{"text":"package elon\n\nconst FULL_BATTERY = 100\n\n\/\/ Car implements a remote controlled car.\ntype Car struct {\n\tspeed int\n\tbatteryDrain int\n\tbattery int\n\tdistance int\n}\n\n\/\/ Track implements a race track.\ntype Track struct {\n\tdistance int\n}\n\n\/\/ CreateCar creates a new car with given specifications.\nfunc CreateCar(speed, batteryDrain int) *Car {\n\treturn &Car{\n\t\tspeed: speed,\n\t\tbatteryDrain: batteryDrain,\n\t\tbattery: FULL_BATTERY,\n\t\tdistance: 0,\n\t}\n}\n\n\/\/ CreateTrack creates a new track with given distance.\nfunc CreateTrack(distance int) Track {\n\tpanic(\"Please implement CreateTrack() method\")\n}\n\n\/\/ Drive drives the car one time.\nfunc (car *Car) Drive() {\n\tpanic(\"Please implement Drive() method\")\n}\n\n\/\/ CanFinish checks if a car is able to finish a certain track.\nfunc (car *Car) CanFinish(track Track) bool {\n\tpanic(\"Please implement CanFinish() method\")\n}\n\n\/\/ DisplayDistance displays the distance the car is driven.\nfunc (car *Car) DisplayDistance() string {\n\tpanic(\"Please implement DisplayDistance() method\")\n}\n\n\/\/ DisplayBattery displays the battery level.\nfunc (car *Car) DisplayBattery() string {\n\tpanic(\"Please implement DisplayBattery() method\")\n}\nImplement CreateTrack and Drivepackage elon\n\nconst FULL_BATTERY = 100\n\n\/\/ Car implements a remote controlled car.\ntype Car struct {\n\tspeed int\n\tbatteryDrain int\n\tbattery int\n\tdistance int\n}\n\n\/\/ Track implements a race track.\ntype Track struct {\n\tdistance int\n}\n\n\/\/ CreateCar creates a new car with given specifications.\nfunc CreateCar(speed, batteryDrain int) *Car {\n\treturn &Car{\n\t\tspeed: speed,\n\t\tbatteryDrain: batteryDrain,\n\t\tbattery: FULL_BATTERY,\n\t\tdistance: 0,\n\t}\n}\n\n\/\/ CreateTrack creates a new track with given distance.\nfunc CreateTrack(distance int) Track {\n\treturn Track{\n\t\tdistance: distance,\n\t}\n}\n\n\/\/ Drive drives the car one time.\nfunc (car *Car) Drive() {\n\tcar.battery -= car.batteryDrain\n\tcar.distance += car.speed\n}\n\n\/\/ CanFinish checks if a car is able to finish a certain track.\nfunc (car *Car) CanFinish(track Track) bool {\n\tpanic(\"Please implement CanFinish() method\")\n}\n\n\/\/ DisplayDistance displays the distance the car is driven.\nfunc (car *Car) DisplayDistance() string {\n\tpanic(\"Please implement DisplayDistance() method\")\n}\n\n\/\/ DisplayBattery displays the battery level.\nfunc (car *Car) DisplayBattery() string {\n\tpanic(\"Please implement DisplayBattery() method\")\n}\n<|endoftext|>"} {"text":"package engine\n\n\/\/\n\/\/ engine.GPGImportKeyEngine is a class that selects key from the GPG keyring via\n\/\/ shell-out to the gpg command line client. It's useful in `client mykey select`\n\/\/ and other places in which the user picks existing PGP keys on the existing\n\/\/ system for use in Keybase tasks.\n\/\/\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\tkeybase1 \"github.com\/keybase\/client\/go\/protocol\"\n)\n\ntype GPGImportKeyArg struct {\n\tQuery string\n\tSigner libkb.GenericKey\n\tAllowMulti bool\n\tSkipImport bool\n\tOnlyImport bool\n\tMe *libkb.User\n\tLks *libkb.LKSec\n}\n\ntype GPGImportKeyEngine struct {\n\tlast *libkb.PGPKeyBundle\n\targ *GPGImportKeyArg\n\tduplicatedFingerprints []libkb.PGPFingerprint\n\tlibkb.Contextified\n}\n\nfunc NewGPGImportKeyEngine(arg *GPGImportKeyArg, g *libkb.GlobalContext) *GPGImportKeyEngine {\n\treturn &GPGImportKeyEngine{\n\t\targ: arg,\n\t\tContextified: libkb.NewContextified(g),\n\t}\n}\n\nfunc (e *GPGImportKeyEngine) Prereqs() Prereqs {\n\treturn Prereqs{\n\t\tSession: true,\n\t}\n}\n\nfunc (e *GPGImportKeyEngine) Name() string {\n\treturn \"GPGImportKeyEngine\"\n}\n\nfunc (e *GPGImportKeyEngine) RequiredUIs() []libkb.UIKind {\n\treturn []libkb.UIKind{\n\t\tlibkb.GPGUIKind,\n\t\tlibkb.SecretUIKind,\n\t}\n}\n\nfunc (e *GPGImportKeyEngine) SubConsumers() []libkb.UIConsumer {\n\treturn []libkb.UIConsumer{\n\t\tNewPGPKeyImportEngine(PGPKeyImportEngineArg{}),\n\t}\n}\n\nfunc (e *GPGImportKeyEngine) WantsGPG(ctx *Context) (bool, error) {\n\tgpg := e.G().GetGpgClient()\n\tcanExec, err := gpg.CanExec()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif !canExec {\n\t\treturn false, nil\n\t}\n\n\t\/\/ they have gpg\n\n\tres, err := ctx.GPGUI.WantToAddGPGKey(0)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn res, nil\n}\n\nfunc (e *GPGImportKeyEngine) Run(ctx *Context) (err error) {\n\tgpg := e.G().GetGpgClient()\n\n\tme := e.arg.Me\n\tif me == nil {\n\t\tif me, err = libkb.LoadMe(libkb.NewLoadUserPubOptionalArg(e.G())); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif !e.arg.OnlyImport {\n\t\tif err = PGPCheckMulti(me, e.arg.AllowMulti); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err = gpg.Configure(); err != nil {\n\t\treturn err\n\t}\n\tindex, warns, err := gpg.Index(true, e.arg.Query)\n\tif err != nil {\n\t\treturn err\n\t}\n\twarns.Warn()\n\n\tvar gks []keybase1.GPGKey\n\tfor _, key := range index.Keys {\n\t\tgk := keybase1.GPGKey{\n\t\t\tAlgorithm: fmt.Sprintf(\"%d%s\", key.Bits, key.AlgoString()),\n\t\t\tKeyID: key.GetFingerprint().ToKeyID(),\n\t\t\tExpiration: key.ExpirationString(),\n\t\t\tIdentities: key.GetPGPIdentities(),\n\t\t}\n\t\tgks = append(gks, gk)\n\t}\n\n\tif len(gks) == 0 {\n\t\treturn fmt.Errorf(\"No PGP keys available to choose from.\")\n\t}\n\n\tres, err := ctx.GPGUI.SelectKeyAndPushOption(keybase1.SelectKeyAndPushOptionArg{Keys: gks})\n\tif err != nil {\n\t\treturn err\n\t}\n\te.G().Log.Debug(\"SelectKey result: %+v\", res)\n\n\tvar selected *libkb.GpgPrimaryKey\n\tfor _, key := range index.Keys {\n\t\tif key.GetFingerprint().ToKeyID() == res.KeyID {\n\t\t\tselected = key\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif selected == nil {\n\t\treturn nil\n\t}\n\n\tpublicKeys := me.GetActivePGPKeys(false)\n\tduplicate := false\n\tfor _, key := range publicKeys {\n\t\tif key.GetFingerprint().Eq(*(selected.GetFingerprint())) {\n\t\t\tduplicate = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif duplicate {\n\t\t\/\/ This key's already been posted to the server.\n\t\tres, err := ctx.GPGUI.ConfirmDuplicateKeyChosen(0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !res {\n\t\t\treturn libkb.SibkeyAlreadyExistsError{}\n\t\t}\n\t\t\/\/ We're sending a key update, then.\n\t\tfp := fmt.Sprintf(\"%s\", *(selected.GetFingerprint()))\n\t\teng := NewPGPUpdateEngine([]string{fp}, false, e.G())\n\t\terr = RunEngine(eng, ctx)\n\t\te.duplicatedFingerprints = eng.duplicatedFingerprints\n\n\t\treturn err\n\t}\n\n\tbundle, err := gpg.ImportKey(true, *(selected.GetFingerprint()))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"ImportKey error: %s\", err)\n\t}\n\n\tif err := bundle.Unlock(\"Import of key into keybase keyring\", ctx.SecretUI); err != nil {\n\t\treturn err\n\t}\n\n\te.G().Log.Info(\"Bundle unlocked: %s\", selected.GetFingerprint().ToKeyID())\n\n\teng := NewPGPKeyImportEngine(PGPKeyImportEngineArg{\n\t\tPregen: bundle,\n\t\tSigningKey: e.arg.Signer,\n\t\tMe: me,\n\t\tAllowMulti: e.arg.AllowMulti,\n\t\tNoSave: e.arg.SkipImport,\n\t\tOnlySave: e.arg.OnlyImport,\n\t\tLks: e.arg.Lks,\n\t})\n\n\tif err = RunEngine(eng, ctx); err != nil {\n\n\t\t\/\/ It's important to propagate a CanceledError unmolested,\n\t\t\/\/ since the UI needs to know that. See:\n\t\t\/\/ https:\/\/github.com\/keybase\/client\/issues\/226\n\t\tif _, ok := err.(libkb.CanceledError); !ok {\n\t\t\terr = libkb.KeyGenError{Msg: err.Error()}\n\t\t}\n\t\treturn\n\t}\n\n\te.G().Log.Info(\"Key %s imported\", selected.GetFingerprint().ToKeyID())\n\n\te.last = bundle\n\n\treturn nil\n}\n\nfunc (e *GPGImportKeyEngine) LastKey() *libkb.PGPKeyBundle {\n\treturn e.last\n}\nAdd a subconsumer for the new engine callpackage engine\n\n\/\/\n\/\/ engine.GPGImportKeyEngine is a class that selects key from the GPG keyring via\n\/\/ shell-out to the gpg command line client. It's useful in `client mykey select`\n\/\/ and other places in which the user picks existing PGP keys on the existing\n\/\/ system for use in Keybase tasks.\n\/\/\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\tkeybase1 \"github.com\/keybase\/client\/go\/protocol\"\n)\n\ntype GPGImportKeyArg struct {\n\tQuery string\n\tSigner libkb.GenericKey\n\tAllowMulti bool\n\tSkipImport bool\n\tOnlyImport bool\n\tMe *libkb.User\n\tLks *libkb.LKSec\n}\n\ntype GPGImportKeyEngine struct {\n\tlast *libkb.PGPKeyBundle\n\targ *GPGImportKeyArg\n\tduplicatedFingerprints []libkb.PGPFingerprint\n\tlibkb.Contextified\n}\n\nfunc NewGPGImportKeyEngine(arg *GPGImportKeyArg, g *libkb.GlobalContext) *GPGImportKeyEngine {\n\treturn &GPGImportKeyEngine{\n\t\targ: arg,\n\t\tContextified: libkb.NewContextified(g),\n\t}\n}\n\nfunc (e *GPGImportKeyEngine) Prereqs() Prereqs {\n\treturn Prereqs{\n\t\tSession: true,\n\t}\n}\n\nfunc (e *GPGImportKeyEngine) Name() string {\n\treturn \"GPGImportKeyEngine\"\n}\n\nfunc (e *GPGImportKeyEngine) RequiredUIs() []libkb.UIKind {\n\treturn []libkb.UIKind{\n\t\tlibkb.GPGUIKind,\n\t\tlibkb.SecretUIKind,\n\t}\n}\n\nfunc (e *GPGImportKeyEngine) SubConsumers() []libkb.UIConsumer {\n\treturn []libkb.UIConsumer{\n\t\tNewPGPKeyImportEngine(PGPKeyImportEngineArg{}),\n\t\tNewPGPUpdateEngine(PGPUpdateEngineArg{}),\n\t}\n}\n\nfunc (e *GPGImportKeyEngine) WantsGPG(ctx *Context) (bool, error) {\n\tgpg := e.G().GetGpgClient()\n\tcanExec, err := gpg.CanExec()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif !canExec {\n\t\treturn false, nil\n\t}\n\n\t\/\/ they have gpg\n\n\tres, err := ctx.GPGUI.WantToAddGPGKey(0)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn res, nil\n}\n\nfunc (e *GPGImportKeyEngine) Run(ctx *Context) (err error) {\n\tgpg := e.G().GetGpgClient()\n\n\tme := e.arg.Me\n\tif me == nil {\n\t\tif me, err = libkb.LoadMe(libkb.NewLoadUserPubOptionalArg(e.G())); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif !e.arg.OnlyImport {\n\t\tif err = PGPCheckMulti(me, e.arg.AllowMulti); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err = gpg.Configure(); err != nil {\n\t\treturn err\n\t}\n\tindex, warns, err := gpg.Index(true, e.arg.Query)\n\tif err != nil {\n\t\treturn err\n\t}\n\twarns.Warn()\n\n\tvar gks []keybase1.GPGKey\n\tfor _, key := range index.Keys {\n\t\tgk := keybase1.GPGKey{\n\t\t\tAlgorithm: fmt.Sprintf(\"%d%s\", key.Bits, key.AlgoString()),\n\t\t\tKeyID: key.GetFingerprint().ToKeyID(),\n\t\t\tExpiration: key.ExpirationString(),\n\t\t\tIdentities: key.GetPGPIdentities(),\n\t\t}\n\t\tgks = append(gks, gk)\n\t}\n\n\tif len(gks) == 0 {\n\t\treturn fmt.Errorf(\"No PGP keys available to choose from.\")\n\t}\n\n\tres, err := ctx.GPGUI.SelectKeyAndPushOption(keybase1.SelectKeyAndPushOptionArg{Keys: gks})\n\tif err != nil {\n\t\treturn err\n\t}\n\te.G().Log.Debug(\"SelectKey result: %+v\", res)\n\n\tvar selected *libkb.GpgPrimaryKey\n\tfor _, key := range index.Keys {\n\t\tif key.GetFingerprint().ToKeyID() == res.KeyID {\n\t\t\tselected = key\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif selected == nil {\n\t\treturn nil\n\t}\n\n\tpublicKeys := me.GetActivePGPKeys(false)\n\tduplicate := false\n\tfor _, key := range publicKeys {\n\t\tif key.GetFingerprint().Eq(*(selected.GetFingerprint())) {\n\t\t\tduplicate = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif duplicate {\n\t\t\/\/ This key's already been posted to the server.\n\t\tres, err := ctx.GPGUI.ConfirmDuplicateKeyChosen(0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !res {\n\t\t\treturn libkb.SibkeyAlreadyExistsError{}\n\t\t}\n\t\t\/\/ We're sending a key update, then.\n\t\tfp := fmt.Sprintf(\"%s\", *(selected.GetFingerprint()))\n\t\teng := NewPGPUpdateEngine([]string{fp}, false, e.G())\n\t\terr = RunEngine(eng, ctx)\n\t\te.duplicatedFingerprints = eng.duplicatedFingerprints\n\n\t\treturn err\n\t}\n\n\tbundle, err := gpg.ImportKey(true, *(selected.GetFingerprint()))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"ImportKey error: %s\", err)\n\t}\n\n\tif err := bundle.Unlock(\"Import of key into keybase keyring\", ctx.SecretUI); err != nil {\n\t\treturn err\n\t}\n\n\te.G().Log.Info(\"Bundle unlocked: %s\", selected.GetFingerprint().ToKeyID())\n\n\teng := NewPGPKeyImportEngine(PGPKeyImportEngineArg{\n\t\tPregen: bundle,\n\t\tSigningKey: e.arg.Signer,\n\t\tMe: me,\n\t\tAllowMulti: e.arg.AllowMulti,\n\t\tNoSave: e.arg.SkipImport,\n\t\tOnlySave: e.arg.OnlyImport,\n\t\tLks: e.arg.Lks,\n\t})\n\n\tif err = RunEngine(eng, ctx); err != nil {\n\n\t\t\/\/ It's important to propagate a CanceledError unmolested,\n\t\t\/\/ since the UI needs to know that. See:\n\t\t\/\/ https:\/\/github.com\/keybase\/client\/issues\/226\n\t\tif _, ok := err.(libkb.CanceledError); !ok {\n\t\t\terr = libkb.KeyGenError{Msg: err.Error()}\n\t\t}\n\t\treturn\n\t}\n\n\te.G().Log.Info(\"Key %s imported\", selected.GetFingerprint().ToKeyID())\n\n\te.last = bundle\n\n\treturn nil\n}\n\nfunc (e *GPGImportKeyEngine) LastKey() *libkb.PGPKeyBundle {\n\treturn e.last\n}\n<|endoftext|>"} {"text":"package handlers\n\nimport (\n\t\"net\/http\"\n\t\"time\"\n\n\t\"google.golang.org\/appengine\"\n\t\"google.golang.org\/appengine\/datastore\"\n\t\"google.golang.org\/appengine\/log\"\n\t\"google.golang.org\/appengine\/memcache\"\n\t\"google.golang.org\/appengine\/user\"\n\n\t\"github.com\/icco\/natnatnat\/models\"\n\t\"github.com\/pilu\/traffic\"\n)\n\ntype ArchiveData struct {\n\tYears *map[int]Year\n\tPosts *[]models.Entry\n\tIsAdmin bool\n}\n\n\/\/ TODO(icco): Rewrite to fix map iteration problems.\ntype Year map[time.Month]Month\ntype Month []Day\ntype Day []int64\n\nvar months = [12]time.Month{\n\ttime.January,\n\ttime.February,\n\ttime.March,\n\ttime.April,\n\ttime.May,\n\ttime.June,\n\ttime.July,\n\ttime.August,\n\ttime.September,\n\ttime.October,\n\ttime.November,\n\ttime.December,\n}\n\nfunc ArchiveTaskHandler(w traffic.ResponseWriter, r *traffic.Request) {\n\tc := appengine.NewContext(r.Request)\n\n\tentries, err := models.AllPosts(c)\n\tif err != nil {\n\t\tlog.Errorf(c, err.Error())\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\tlog.Infof(c, \"Retrieved data: %d.\", len(*entries))\n\n\tyears := make(map[int]Year)\n\n\toldest := (*entries)[len(*entries)-1].Datetime\n\tnewest := (*entries)[0].Datetime\n\n\tlog.Infof(c, \"Oldest: %v, Newest: %v\", oldest, newest)\n\n\tfor year := oldest.Year(); year <= newest.Year(); year += 1 {\n\t\tyears[year] = make(Year)\n\t\tlog.Infof(c, \"Adding %d.\", year)\n\t\tfor _, month := range months {\n\t\t\tif year < newest.Year() || (year == newest.Year() && month <= newest.Month()) {\n\t\t\t\tyears[year][month] = make([]Day, daysIn(month, year))\n\t\t\t\tlog.Debugf(c, \"Adding %d\/%d - %d days.\", year, month, len(years[year][month]))\n\t\t\t}\n\t\t}\n\t}\n\n\tq := models.ArchivePageQuery()\n\tt := q.Run(c)\n\tfor {\n\t\tvar p models.Entry\n\t\t_, err := t.Next(&p)\n\t\tif err == datastore.Done {\n\t\t\tbreak \/\/ No further entities match the query.\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Errorf(c, \"Error fetching next Entry: %v\", err)\n\t\t\tbreak\n\t\t}\n\n\t\tyear := p.Datetime.Year()\n\t\tmonth := p.Datetime.Month()\n\t\tday := p.Datetime.Day()\n\t\tlog.Infof(c, \"Trying post id %d\", p.Id)\n\n\t\tif years[year] == nil {\n\t\t\tyears[year] = make(Year)\n\t\t\tlog.Errorf(c, \"%d isn't a valid year.\", year)\n\t\t}\n\n\t\tif years[year][month] == nil {\n\t\t\tlog.Errorf(c, \"%d\/%d isn't a valid month.\", year, month)\n\t\t\tyears[year][month] = make([]Day, daysIn(month, year))\n\t\t}\n\n\t\tif years[year][month][day] == nil {\n\t\t\tlog.Infof(c, \"Making %d\/%d\/%d\", year, month, day)\n\t\t\tyears[year][month][day] = make(Day, 0)\n\t\t}\n\n\t\t\/\/ log.Infof(c, \"Appending %d\/%d\/%d: %+v\", year, month, day, years[year][month][day])\n\t\tyears[year][month][day] = append(years[year][month][day], p.Id)\n\t}\n\tlog.Infof(c, \"Added posts.\")\n\n\titem := &memcache.Item{\n\t\tKey: \"archive_data\",\n\t\tValue: years,\n\t}\n\n\t\/\/ Set the item, unconditionally\n\tif err := memcache.Set(c, item); err != nil {\n\t\tc.Errorf(\"error setting item: %v\", err)\n\t}\n}\n\nfunc ArchiveHandler(w traffic.ResponseWriter, r *traffic.Request) {\n\tc := appengine.NewContext(r.Request)\n\n\tentries, err := models.AllPosts(c)\n\tif err != nil {\n\t\tlog.Errorf(c, err.Error())\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\tlog.Infof(c, \"Retrieved data: %d.\", len(*entries))\n\n\t\/\/ Get the item from the memcache\n\tif years, err := memcache.Get(c, \"archive_data\"); err == memcache.ErrCacheMiss {\n\t\tc.Infof(\"item not in the cache\")\n\t} else if err != nil {\n\t\tc.Errorf(\"error getting item: %v\", err)\n\t}\n\n\tdata := &ArchiveData{Years: years, IsAdmin: user.IsAdmin(c), Posts: entries}\n\tw.Render(\"archive\", data)\n}\n\n\/\/ daysIn returns the number of days in a month for a given year.\nfunc daysIn(m time.Month, year int) int {\n\t\/\/ This is equivalent to time.daysIn(m, year).\n\treturn time.Date(year, m+1, 0, 0, 0, 0, 0, time.UTC).Day()\n}\nfix logpackage handlers\n\nimport (\n\t\"net\/http\"\n\t\"time\"\n\n\t\"google.golang.org\/appengine\"\n\t\"google.golang.org\/appengine\/datastore\"\n\t\"google.golang.org\/appengine\/log\"\n\t\"google.golang.org\/appengine\/memcache\"\n\t\"google.golang.org\/appengine\/user\"\n\n\t\"github.com\/icco\/natnatnat\/models\"\n\t\"github.com\/pilu\/traffic\"\n)\n\ntype ArchiveData struct {\n\tYears *map[int]Year\n\tPosts *[]models.Entry\n\tIsAdmin bool\n}\n\n\/\/ TODO(icco): Rewrite to fix map iteration problems.\ntype Year map[time.Month]Month\ntype Month []Day\ntype Day []int64\n\nvar months = [12]time.Month{\n\ttime.January,\n\ttime.February,\n\ttime.March,\n\ttime.April,\n\ttime.May,\n\ttime.June,\n\ttime.July,\n\ttime.August,\n\ttime.September,\n\ttime.October,\n\ttime.November,\n\ttime.December,\n}\n\nfunc ArchiveTaskHandler(w traffic.ResponseWriter, r *traffic.Request) {\n\tc := appengine.NewContext(r.Request)\n\n\tentries, err := models.AllPosts(c)\n\tif err != nil {\n\t\tlog.Errorf(c, err.Error())\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\tlog.Infof(c, \"Retrieved data: %d.\", len(*entries))\n\n\tyears := make(map[int]Year)\n\n\toldest := (*entries)[len(*entries)-1].Datetime\n\tnewest := (*entries)[0].Datetime\n\n\tlog.Infof(c, \"Oldest: %v, Newest: %v\", oldest, newest)\n\n\tfor year := oldest.Year(); year <= newest.Year(); year += 1 {\n\t\tyears[year] = make(Year)\n\t\tlog.Infof(c, \"Adding %d.\", year)\n\t\tfor _, month := range months {\n\t\t\tif year < newest.Year() || (year == newest.Year() && month <= newest.Month()) {\n\t\t\t\tyears[year][month] = make([]Day, daysIn(month, year))\n\t\t\t\tlog.Debugf(c, \"Adding %d\/%d - %d days.\", year, month, len(years[year][month]))\n\t\t\t}\n\t\t}\n\t}\n\n\tq := models.ArchivePageQuery()\n\tt := q.Run(c)\n\tfor {\n\t\tvar p models.Entry\n\t\t_, err := t.Next(&p)\n\t\tif err == datastore.Done {\n\t\t\tbreak \/\/ No further entities match the query.\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Errorf(c, \"Error fetching next Entry: %v\", err)\n\t\t\tbreak\n\t\t}\n\n\t\tyear := p.Datetime.Year()\n\t\tmonth := p.Datetime.Month()\n\t\tday := p.Datetime.Day()\n\t\tlog.Infof(c, \"Trying post id %d\", p.Id)\n\n\t\tif years[year] == nil {\n\t\t\tyears[year] = make(Year)\n\t\t\tlog.Errorf(c, \"%d isn't a valid year.\", year)\n\t\t}\n\n\t\tif years[year][month] == nil {\n\t\t\tlog.Errorf(c, \"%d\/%d isn't a valid month.\", year, month)\n\t\t\tyears[year][month] = make([]Day, daysIn(month, year))\n\t\t}\n\n\t\tif years[year][month][day] == nil {\n\t\t\tlog.Infof(c, \"Making %d\/%d\/%d\", year, month, day)\n\t\t\tyears[year][month][day] = make(Day, 0)\n\t\t}\n\n\t\t\/\/ log.Infof(c, \"Appending %d\/%d\/%d: %+v\", year, month, day, years[year][month][day])\n\t\tyears[year][month][day] = append(years[year][month][day], p.Id)\n\t}\n\tlog.Infof(c, \"Added posts.\")\n\n\titem := &memcache.Item{\n\t\tKey: \"archive_data\",\n\t\tValue: years,\n\t}\n\n\t\/\/ Set the item, unconditionally\n\tif err := memcache.Set(c, item); err != nil {\n\t\tlog.Errorf(log, \"error setting item: %v\", err)\n\t}\n}\n\nfunc ArchiveHandler(w traffic.ResponseWriter, r *traffic.Request) {\n\tc := appengine.NewContext(r.Request)\n\n\tentries, err := models.AllPosts(c)\n\tif err != nil {\n\t\tlog.Errorf(c, err.Error())\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\tlog.Infof(c, \"Retrieved data: %d.\", len(*entries))\n\n\t\/\/ Get the item from the memcache\n\tif years, err := memcache.Get(c, \"archive_data\"); err == memcache.ErrCacheMiss {\n\t\tlog.Infof(c, \"item not in the cache\")\n\t} else if err != nil {\n\t\tlog.Errorf(c, \"error getting item: %v\", err)\n\t}\n\n\tdata := &ArchiveData{Years: years, IsAdmin: user.IsAdmin(c), Posts: entries}\n\tw.Render(\"archive\", data)\n}\n\n\/\/ daysIn returns the number of days in a month for a given year.\nfunc daysIn(m time.Month, year int) int {\n\t\/\/ This is equivalent to time.daysIn(m, year).\n\treturn time.Date(year, m+1, 0, 0, 0, 0, 0, time.UTC).Day()\n}\n<|endoftext|>"} {"text":"package evend\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/temoto\/vender\/engine\"\n)\n\ntype DeviceMixer struct {\n\tGeneric\n\n\tmoveTimeout time.Duration\n\tshakeTimeout time.Duration\n\tposClean uint8\n\tposReady uint8\n\tposShake uint8\n}\n\nfunc (self *DeviceMixer) Init(ctx context.Context) error {\n\t\/\/ TODO read config\n\tself.moveTimeout = 10 * time.Second\n\tself.shakeTimeout = 2 * 100 * time.Millisecond\n\tself.posClean = 70\n\tself.posReady = 0\n\tself.posShake = 100\n\terr := self.Generic.Init(ctx, 0xc8, \"mixer\", proto1)\n\n\te := engine.GetEngine(ctx)\n\te.Register(\"mdb.evend.mixer_shake_1\", self.NewShake(1, 100))\n\te.Register(\"mdb.evend.mixer_shake_2\", self.NewShake(2, 100))\n\te.Register(\"mdb.evend.mixer_shake_clean\", self.NewShake(10, 15))\n\te.Register(\"mdb.evend.mixer_fan_on\", self.NewFan(true))\n\te.Register(\"mdb.evend.mixer_fan_off\", self.NewFan(false))\n\te.Register(\"mdb.evend.mixer_move_clean\", self.NewMove(self.posClean))\n\te.Register(\"mdb.evend.mixer_move_ready\", self.NewMove(self.posReady))\n\te.Register(\"mdb.evend.mixer_move_shake\", self.NewMove(self.posShake))\n\n\treturn err\n}\n\n\/\/ 1step = 100ms\nfunc (self *DeviceMixer) NewShake(steps uint8, speed uint8) engine.Doer {\n\ttag := fmt.Sprintf(\"mdb.evend.mixer.shake:%d,%d\", steps, speed)\n\treturn engine.NewSeq(tag).\n\t\tAppend(self.NewWaitReady(tag)).\n\t\tAppend(self.Generic.NewAction(tag, 0x01, steps, speed)).\n\t\tAppend(self.NewWaitDone(tag, self.shakeTimeout*time.Duration(1+steps)))\n}\n\nfunc (self *DeviceMixer) NewFan(on bool) engine.Doer {\n\ttag := fmt.Sprintf(\"mdb.evend.mixer.fan:%t\", on)\n\targ := uint8(0)\n\tif on {\n\t\targ = 1\n\t}\n\treturn self.Generic.NewAction(tag, 0x02, arg, 0x00)\n}\n\nfunc (self *DeviceMixer) NewMove(position uint8) engine.Doer {\n\ttag := fmt.Sprintf(\"mdb.evend.mixer.move:%d\", position)\n\treturn engine.NewSeq(tag).\n\t\tAppend(self.NewWaitReady(tag)).\n\t\tAppend(self.Generic.NewAction(tag, 0x03, position, 0x64)).\n\t\tAppend(self.NewWaitDone(tag, self.moveTimeout))\n}\nevend.mixer shake\/clean speeds were reversedpackage evend\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/temoto\/vender\/engine\"\n)\n\ntype DeviceMixer struct {\n\tGeneric\n\n\tmoveTimeout time.Duration\n\tshakeTimeout time.Duration\n\tposClean uint8\n\tposReady uint8\n\tposShake uint8\n}\n\nfunc (self *DeviceMixer) Init(ctx context.Context) error {\n\t\/\/ TODO read config\n\tself.moveTimeout = 10 * time.Second\n\tself.shakeTimeout = 3 * 100 * time.Millisecond\n\tself.posClean = 70\n\tself.posReady = 0\n\tself.posShake = 100\n\terr := self.Generic.Init(ctx, 0xc8, \"mixer\", proto1)\n\n\te := engine.GetEngine(ctx)\n\te.Register(\"mdb.evend.mixer_shake_1\", self.NewShake(2, 15))\n\te.Register(\"mdb.evend.mixer_shake_2\", self.NewShake(4, 15))\n\te.Register(\"mdb.evend.mixer_shake_clean\", self.NewShake(1, 100))\n\te.Register(\"mdb.evend.mixer_fan_on\", self.NewFan(true))\n\te.Register(\"mdb.evend.mixer_fan_off\", self.NewFan(false))\n\te.Register(\"mdb.evend.mixer_move_clean\", self.NewMove(self.posClean))\n\te.Register(\"mdb.evend.mixer_move_ready\", self.NewMove(self.posReady))\n\te.Register(\"mdb.evend.mixer_move_shake\", self.NewMove(self.posShake))\n\n\treturn err\n}\n\n\/\/ 1step = 100ms\nfunc (self *DeviceMixer) NewShake(steps uint8, speed uint8) engine.Doer {\n\ttag := fmt.Sprintf(\"mdb.evend.mixer.shake:%d,%d\", steps, speed)\n\treturn engine.NewSeq(tag).\n\t\tAppend(self.NewWaitReady(tag)).\n\t\tAppend(self.Generic.NewAction(tag, 0x01, steps, speed)).\n\t\tAppend(self.NewWaitDone(tag, self.shakeTimeout*time.Duration(1+steps)))\n}\n\nfunc (self *DeviceMixer) NewFan(on bool) engine.Doer {\n\ttag := fmt.Sprintf(\"mdb.evend.mixer.fan:%t\", on)\n\targ := uint8(0)\n\tif on {\n\t\targ = 1\n\t}\n\treturn self.Generic.NewAction(tag, 0x02, arg, 0x00)\n}\n\nfunc (self *DeviceMixer) NewMove(position uint8) engine.Doer {\n\ttag := fmt.Sprintf(\"mdb.evend.mixer.move:%d\", position)\n\treturn engine.NewSeq(tag).\n\t\tAppend(self.NewWaitReady(tag)).\n\t\tAppend(self.Generic.NewAction(tag, 0x03, position, 0x64)).\n\t\tAppend(self.NewWaitDone(tag, self.moveTimeout))\n}\n<|endoftext|>"} {"text":"package systests\n\nimport (\n\t\"bytes\"\n\t\"net\/http\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/keybase\/client\/go\/client\"\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\tkeybase1 \"github.com\/keybase\/client\/go\/protocol\/keybase1\"\n\t\"github.com\/keybase\/client\/go\/protocol\/stellar1\"\n\t\"github.com\/keybase\/client\/go\/stellar\"\n\t\"github.com\/keybase\/client\/go\/teams\"\n\t\"github.com\/keybase\/stellarnet\"\n\t\"github.com\/stellar\/go\/build\"\n\t\"github.com\/stellar\/go\/clients\/horizon\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nconst disable = true\nconst disableMsg = \"friendbot issues\"\n\nfunc TestStellarNoteRoundtripAndResets(t *testing.T) {\n\tif disable {\n\t\tt.Skip(disableMsg)\n\t}\n\tctx := newSMUContext(t)\n\tdefer ctx.cleanup()\n\n\t\/\/ Sign up two users, bob and alice.\n\talice := ctx.installKeybaseForUser(\"alice\", 10)\n\talice.signup()\n\tdivDebug(ctx, \"Signed up alice (%s)\", alice.username)\n\tbob := ctx.installKeybaseForUser(\"bob\", 10)\n\tbob.signup()\n\tdivDebug(ctx, \"Signed up bob (%s)\", bob.username)\n\n\tt.Logf(\"note to self\")\n\tencB64, err := stellar.NoteEncryptB64(libkb.NewMetaContextBackground(alice.getPrimaryGlobalContext()), sampleNote(), nil)\n\trequire.NoError(t, err)\n\tnote, err := stellar.NoteDecryptB64(libkb.NewMetaContextBackground(alice.getPrimaryGlobalContext()), encB64)\n\trequire.NoError(t, err)\n\trequire.Equal(t, sampleNote(), note)\n\n\tt.Logf(\"note to both users\")\n\tother := bob.userVersion()\n\tencB64, err = stellar.NoteEncryptB64(libkb.NewMetaContextBackground(alice.getPrimaryGlobalContext()), sampleNote(), &other)\n\trequire.NoError(t, err)\n\n\tt.Logf(\"decrypt as self\")\n\tnote, err = stellar.NoteDecryptB64(libkb.NewMetaContextBackground(alice.getPrimaryGlobalContext()), encB64)\n\trequire.NoError(t, err)\n\trequire.Equal(t, sampleNote(), note)\n\n\tt.Logf(\"decrypt as other\")\n\tnote, err = stellar.NoteDecryptB64(libkb.NewMetaContextBackground(bob.getPrimaryGlobalContext()), encB64)\n\trequire.NoError(t, err)\n\trequire.Equal(t, sampleNote(), note)\n\n\tt.Logf(\"reset sender\")\n\talice.reset()\n\tdivDebug(ctx, \"Reset bob (%s)\", bob.username)\n\talice.loginAfterReset(10)\n\tdivDebug(ctx, \"Bob logged in after reset\")\n\n\tt.Logf(\"fail to decrypt as post-reset self\")\n\tnote, err = stellar.NoteDecryptB64(libkb.NewMetaContextBackground(alice.getPrimaryGlobalContext()), encB64)\n\trequire.Error(t, err)\n\trequire.Equal(t, \"note not encrypted for logged-in user\", err.Error())\n\n\tt.Logf(\"decrypt as other\")\n\tnote, err = stellar.NoteDecryptB64(libkb.NewMetaContextBackground(bob.getPrimaryGlobalContext()), encB64)\n\trequire.NoError(t, err)\n\trequire.Equal(t, sampleNote(), note)\n}\n\n\/\/ Test took 38s on a dev server 2018-06-07\nfunc TestStellarRelayAutoClaims(t *testing.T) {\n\tif disable {\n\t\tt.Skip(disableMsg)\n\t}\n\ttestStellarRelayAutoClaims(t, false, false)\n}\n\n\/\/ Test took 29s on a dev server 2018-06-07\nfunc TestStellarRelayAutoClaimsWithPUK(t *testing.T) {\n\tif disable {\n\t\tt.Skip(disableMsg)\n\t}\n\ttestStellarRelayAutoClaims(t, true, true)\n}\n\n\/\/ Part 1:\n\/\/ XLM is sent to a user before they have a [PUK \/ wallet].\n\/\/ In the form of multiple relay payments.\n\/\/ They then [get a PUK,] add a wallet, and enter the impteam,\n\/\/ which all kick the autoclaim into gear.\n\/\/\n\/\/ Part 2:\n\/\/ A relay payment is sent to the user who already has a wallet.\n\/\/ The funds should be claimed asap.\n\/\/\n\/\/ To debug this test use log filter \"stellar_test|poll-|AutoClaim|stellar.claim|pollfor\"\nfunc testStellarRelayAutoClaims(t *testing.T, startWithPUK, skipPart2 bool) {\n\ttt := newTeamTester(t)\n\tdefer tt.cleanup()\n\tuseStellarTestNet(t)\n\n\talice := tt.addUser(\"alice\")\n\tvar bob *userPlusDevice\n\tif startWithPUK {\n\t\tbob = tt.addUser(\"bob\")\n\t} else {\n\t\tbob = tt.addPuklessUser(\"bob\")\n\t}\n\talice.kickTeamRekeyd()\n\n\tt.Logf(\"alice gets funded\")\n\tacceptDisclaimer(alice)\n\n\tres, err := alice.stellarClient.GetWalletAccountsLocal(context.Background(), 0)\n\trequire.NoError(t, err)\n\tgift(t, res[0].AccountID)\n\n\tt.Logf(\"alice sends a first relay payment to bob P1\")\n\tattachIdentifyUI(t, alice.tc.G, newSimpleIdentifyUI())\n\tcmd := client.CmdWalletSend{\n\t\tContextified: libkb.NewContextified(alice.tc.G),\n\t\tRecipient: bob.username,\n\t\tAmount: \"50\",\n\t}\n\tfor i := 0; i < retryCount; i++ {\n\t\terr = cmd.Run()\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\trequire.NoError(t, err)\n\n\tt.Logf(\"alice sends a second relay payment to bob P2\")\n\tcmd = client.CmdWalletSend{\n\t\tContextified: libkb.NewContextified(alice.tc.G),\n\t\tRecipient: bob.username,\n\t\tAmount: \"30\",\n\t}\n\tfor i := 0; i < retryCount; i++ {\n\t\terr = cmd.Run()\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\trequire.NoError(t, err)\n\n\tt.Logf(\"get the impteam seqno to wait on later\")\n\tteam, _, _, err := teams.LookupImplicitTeam(context.Background(), alice.tc.G, alice.username+\",\"+bob.username, false, teams.ImplicitTeamOptions{})\n\trequire.NoError(t, err)\n\tnextSeqno := team.NextSeqno()\n\n\tif startWithPUK {\n\t\tt.Logf(\"bob gets a wallet\")\n\t\tacceptDisclaimer(bob)\n\t} else {\n\t\tt.Logf(\"bob gets a PUK and wallet\")\n\t\tbob.device.tctx.Tp.DisableUpgradePerUserKey = false\n\t\tacceptDisclaimer(bob)\n\n\t\tt.Logf(\"wait for alice to add bob to their impteam\")\n\t\talice.pollForTeamSeqnoLinkWithLoadArgs(keybase1.LoadTeamArg{ID: team.ID}, nextSeqno)\n\t}\n\n\tpollTime := 20 * time.Second\n\tif libkb.UseCITime(bob.tc.G) {\n\t\t\/\/ This test is especially slow.\n\t\tpollTime = 30 * time.Second\n\t}\n\n\tpollFor(t, \"claims to complete\", pollTime, bob.tc.G, func(i int) bool {\n\t\tres, err = bob.stellarClient.GetWalletAccountsLocal(context.Background(), 0)\n\t\trequire.NoError(t, err)\n\t\tt.Logf(\"poll-1-%v: %v\", i, res[0].BalanceDescription)\n\t\tif res[0].BalanceDescription == \"0 XLM\" {\n\t\t\treturn false\n\t\t}\n\t\tif res[0].BalanceDescription == \"49.9999800 XLM\" {\n\t\t\tt.Logf(\"poll-1-%v: received T1 but not T2\", i)\n\t\t\treturn false\n\t\t}\n\t\tif res[0].BalanceDescription == \"29.9999800 XLM\" {\n\t\t\tt.Logf(\"poll-1-%v: received T2 but not T1\", i)\n\t\t\treturn false\n\t\t}\n\t\tt.Logf(\"poll-1-%v: received both payments\", i)\n\t\trequire.Equal(t, \"79.9999700 XLM\", res[0].BalanceDescription)\n\t\treturn true\n\t})\n\n\tif skipPart2 {\n\t\tt.Logf(\"Skipping part 2\")\n\t\treturn\n\t}\n\n\tt.Logf(\"--------------------\")\n\tt.Logf(\"Part 2: Alice sends a relay payment to bob who now already has a wallet\")\n\tcmd = client.CmdWalletSend{\n\t\tContextified: libkb.NewContextified(alice.tc.G),\n\t\tRecipient: bob.username,\n\t\tAmount: \"10\",\n\t\tForceRelay: true,\n\t}\n\tfor i := 0; i < retryCount; i++ {\n\t\terr = cmd.Run()\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\trequire.NoError(t, err)\n\n\tpollFor(t, \"final claim to complete\", pollTime, bob.tc.G, func(i int) bool {\n\t\tres, err = bob.stellarClient.GetWalletAccountsLocal(context.Background(), 0)\n\t\trequire.NoError(t, err)\n\t\tt.Logf(\"poll-2-%v: %v\", i, res[0].BalanceDescription)\n\t\tif res[0].BalanceDescription == \"79.9999700 XLM\" {\n\t\t\treturn false\n\t\t}\n\t\tt.Logf(\"poll-1-%v: received final payment\", i)\n\t\trequire.Equal(t, \"89.9999600 XLM\", res[0].BalanceDescription)\n\t\treturn true\n\t})\n\n}\n\nfunc sampleNote() stellar1.NoteContents {\n\treturn stellar1.NoteContents{\n\t\tNote: \"wizbang\",\n\t\tStellarID: stellar1.TransactionID(\"6653fc2fdbc42ad51ccbe77ee0a3c29e258a5513c62fdc532cbfff91ab101abf\"),\n\t}\n}\n\n\/\/ Friendbot sends someone XLM\nfunc gift(t testing.TB, accountID stellar1.AccountID) {\n\tt.Logf(\"gift -> %v\", accountID)\n\turl := \"https:\/\/friendbot.stellar.org\/?addr=\" + accountID.String()\n\tfor i := 0; i < retryCount; i++ {\n\t\tt.Logf(\"gift url: %v\", url)\n\t\tres, err := http.Get(url)\n\t\tif err != nil {\n\t\t\tt.Logf(\"http get %s error: %s\", url, err)\n\t\t\tcontinue\n\t\t}\n\t\tbodyBuf := new(bytes.Buffer)\n\t\tbodyBuf.ReadFrom(res.Body)\n\t\tres.Body.Close()\n\t\tt.Logf(\"gift res: %v\", bodyBuf.String())\n\t\tif res.StatusCode == 200 {\n\t\t\treturn\n\t\t}\n\t\tt.Logf(\"gift status not ok: %d\", res.StatusCode)\n\t}\n\tt.Fatalf(\"gift to %s failed after multiple attempts\", accountID)\n}\n\nfunc useStellarTestNet(t testing.TB) {\n\tstellarnet.SetClientAndNetwork(horizon.DefaultTestNetClient, build.TestNetwork)\n}\n\nfunc acceptDisclaimer(u *userPlusDevice) {\n\terr := u.stellarClient.AcceptDisclaimerLocal(context.Background(), 0)\n\trequire.NoError(u.tc.T, err)\n}\nEnable stellar systestspackage systests\n\nimport (\n\t\"bytes\"\n\t\"net\/http\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/keybase\/client\/go\/client\"\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\tkeybase1 \"github.com\/keybase\/client\/go\/protocol\/keybase1\"\n\t\"github.com\/keybase\/client\/go\/protocol\/stellar1\"\n\t\"github.com\/keybase\/client\/go\/stellar\"\n\t\"github.com\/keybase\/client\/go\/teams\"\n\t\"github.com\/keybase\/stellarnet\"\n\t\"github.com\/stellar\/go\/build\"\n\t\"github.com\/stellar\/go\/clients\/horizon\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nconst disable = false\nconst disableMsg = \"friendbot issues\"\n\nfunc TestStellarNoteRoundtripAndResets(t *testing.T) {\n\tif disable {\n\t\tt.Skip(disableMsg)\n\t}\n\tctx := newSMUContext(t)\n\tdefer ctx.cleanup()\n\n\t\/\/ Sign up two users, bob and alice.\n\talice := ctx.installKeybaseForUser(\"alice\", 10)\n\talice.signup()\n\tdivDebug(ctx, \"Signed up alice (%s)\", alice.username)\n\tbob := ctx.installKeybaseForUser(\"bob\", 10)\n\tbob.signup()\n\tdivDebug(ctx, \"Signed up bob (%s)\", bob.username)\n\n\tt.Logf(\"note to self\")\n\tencB64, err := stellar.NoteEncryptB64(libkb.NewMetaContextBackground(alice.getPrimaryGlobalContext()), sampleNote(), nil)\n\trequire.NoError(t, err)\n\tnote, err := stellar.NoteDecryptB64(libkb.NewMetaContextBackground(alice.getPrimaryGlobalContext()), encB64)\n\trequire.NoError(t, err)\n\trequire.Equal(t, sampleNote(), note)\n\n\tt.Logf(\"note to both users\")\n\tother := bob.userVersion()\n\tencB64, err = stellar.NoteEncryptB64(libkb.NewMetaContextBackground(alice.getPrimaryGlobalContext()), sampleNote(), &other)\n\trequire.NoError(t, err)\n\n\tt.Logf(\"decrypt as self\")\n\tnote, err = stellar.NoteDecryptB64(libkb.NewMetaContextBackground(alice.getPrimaryGlobalContext()), encB64)\n\trequire.NoError(t, err)\n\trequire.Equal(t, sampleNote(), note)\n\n\tt.Logf(\"decrypt as other\")\n\tnote, err = stellar.NoteDecryptB64(libkb.NewMetaContextBackground(bob.getPrimaryGlobalContext()), encB64)\n\trequire.NoError(t, err)\n\trequire.Equal(t, sampleNote(), note)\n\n\tt.Logf(\"reset sender\")\n\talice.reset()\n\tdivDebug(ctx, \"Reset bob (%s)\", bob.username)\n\talice.loginAfterReset(10)\n\tdivDebug(ctx, \"Bob logged in after reset\")\n\n\tt.Logf(\"fail to decrypt as post-reset self\")\n\tnote, err = stellar.NoteDecryptB64(libkb.NewMetaContextBackground(alice.getPrimaryGlobalContext()), encB64)\n\trequire.Error(t, err)\n\trequire.Equal(t, \"note not encrypted for logged-in user\", err.Error())\n\n\tt.Logf(\"decrypt as other\")\n\tnote, err = stellar.NoteDecryptB64(libkb.NewMetaContextBackground(bob.getPrimaryGlobalContext()), encB64)\n\trequire.NoError(t, err)\n\trequire.Equal(t, sampleNote(), note)\n}\n\n\/\/ Test took 38s on a dev server 2018-06-07\nfunc TestStellarRelayAutoClaims(t *testing.T) {\n\tif disable {\n\t\tt.Skip(disableMsg)\n\t}\n\ttestStellarRelayAutoClaims(t, false, false)\n}\n\n\/\/ Test took 29s on a dev server 2018-06-07\nfunc TestStellarRelayAutoClaimsWithPUK(t *testing.T) {\n\tif disable {\n\t\tt.Skip(disableMsg)\n\t}\n\ttestStellarRelayAutoClaims(t, true, true)\n}\n\n\/\/ Part 1:\n\/\/ XLM is sent to a user before they have a [PUK \/ wallet].\n\/\/ In the form of multiple relay payments.\n\/\/ They then [get a PUK,] add a wallet, and enter the impteam,\n\/\/ which all kick the autoclaim into gear.\n\/\/\n\/\/ Part 2:\n\/\/ A relay payment is sent to the user who already has a wallet.\n\/\/ The funds should be claimed asap.\n\/\/\n\/\/ To debug this test use log filter \"stellar_test|poll-|AutoClaim|stellar.claim|pollfor\"\nfunc testStellarRelayAutoClaims(t *testing.T, startWithPUK, skipPart2 bool) {\n\ttt := newTeamTester(t)\n\tdefer tt.cleanup()\n\tuseStellarTestNet(t)\n\n\talice := tt.addUser(\"alice\")\n\tvar bob *userPlusDevice\n\tif startWithPUK {\n\t\tbob = tt.addUser(\"bob\")\n\t} else {\n\t\tbob = tt.addPuklessUser(\"bob\")\n\t}\n\talice.kickTeamRekeyd()\n\n\tt.Logf(\"alice gets funded\")\n\tacceptDisclaimer(alice)\n\n\tres, err := alice.stellarClient.GetWalletAccountsLocal(context.Background(), 0)\n\trequire.NoError(t, err)\n\tgift(t, res[0].AccountID)\n\n\tt.Logf(\"alice sends a first relay payment to bob P1\")\n\tattachIdentifyUI(t, alice.tc.G, newSimpleIdentifyUI())\n\tcmd := client.CmdWalletSend{\n\t\tContextified: libkb.NewContextified(alice.tc.G),\n\t\tRecipient: bob.username,\n\t\tAmount: \"50\",\n\t}\n\tfor i := 0; i < retryCount; i++ {\n\t\terr = cmd.Run()\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\trequire.NoError(t, err)\n\n\tt.Logf(\"alice sends a second relay payment to bob P2\")\n\tcmd = client.CmdWalletSend{\n\t\tContextified: libkb.NewContextified(alice.tc.G),\n\t\tRecipient: bob.username,\n\t\tAmount: \"30\",\n\t}\n\tfor i := 0; i < retryCount; i++ {\n\t\terr = cmd.Run()\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\trequire.NoError(t, err)\n\n\tt.Logf(\"get the impteam seqno to wait on later\")\n\tteam, _, _, err := teams.LookupImplicitTeam(context.Background(), alice.tc.G, alice.username+\",\"+bob.username, false, teams.ImplicitTeamOptions{})\n\trequire.NoError(t, err)\n\tnextSeqno := team.NextSeqno()\n\n\tif startWithPUK {\n\t\tt.Logf(\"bob gets a wallet\")\n\t\tacceptDisclaimer(bob)\n\t} else {\n\t\tt.Logf(\"bob gets a PUK and wallet\")\n\t\tbob.device.tctx.Tp.DisableUpgradePerUserKey = false\n\t\tacceptDisclaimer(bob)\n\n\t\tt.Logf(\"wait for alice to add bob to their impteam\")\n\t\talice.pollForTeamSeqnoLinkWithLoadArgs(keybase1.LoadTeamArg{ID: team.ID}, nextSeqno)\n\t}\n\n\tpollTime := 20 * time.Second\n\tif libkb.UseCITime(bob.tc.G) {\n\t\t\/\/ This test is especially slow.\n\t\tpollTime = 30 * time.Second\n\t}\n\n\tpollFor(t, \"claims to complete\", pollTime, bob.tc.G, func(i int) bool {\n\t\tres, err = bob.stellarClient.GetWalletAccountsLocal(context.Background(), 0)\n\t\trequire.NoError(t, err)\n\t\tt.Logf(\"poll-1-%v: %v\", i, res[0].BalanceDescription)\n\t\tif res[0].BalanceDescription == \"0 XLM\" {\n\t\t\treturn false\n\t\t}\n\t\tif res[0].BalanceDescription == \"49.9999800 XLM\" {\n\t\t\tt.Logf(\"poll-1-%v: received T1 but not T2\", i)\n\t\t\treturn false\n\t\t}\n\t\tif res[0].BalanceDescription == \"29.9999800 XLM\" {\n\t\t\tt.Logf(\"poll-1-%v: received T2 but not T1\", i)\n\t\t\treturn false\n\t\t}\n\t\tt.Logf(\"poll-1-%v: received both payments\", i)\n\t\trequire.Equal(t, \"79.9999700 XLM\", res[0].BalanceDescription)\n\t\treturn true\n\t})\n\n\tif skipPart2 {\n\t\tt.Logf(\"Skipping part 2\")\n\t\treturn\n\t}\n\n\tt.Logf(\"--------------------\")\n\tt.Logf(\"Part 2: Alice sends a relay payment to bob who now already has a wallet\")\n\tcmd = client.CmdWalletSend{\n\t\tContextified: libkb.NewContextified(alice.tc.G),\n\t\tRecipient: bob.username,\n\t\tAmount: \"10\",\n\t\tForceRelay: true,\n\t}\n\tfor i := 0; i < retryCount; i++ {\n\t\terr = cmd.Run()\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\trequire.NoError(t, err)\n\n\tpollFor(t, \"final claim to complete\", pollTime, bob.tc.G, func(i int) bool {\n\t\tres, err = bob.stellarClient.GetWalletAccountsLocal(context.Background(), 0)\n\t\trequire.NoError(t, err)\n\t\tt.Logf(\"poll-2-%v: %v\", i, res[0].BalanceDescription)\n\t\tif res[0].BalanceDescription == \"79.9999700 XLM\" {\n\t\t\treturn false\n\t\t}\n\t\tt.Logf(\"poll-1-%v: received final payment\", i)\n\t\trequire.Equal(t, \"89.9999600 XLM\", res[0].BalanceDescription)\n\t\treturn true\n\t})\n\n}\n\nfunc sampleNote() stellar1.NoteContents {\n\treturn stellar1.NoteContents{\n\t\tNote: \"wizbang\",\n\t\tStellarID: stellar1.TransactionID(\"6653fc2fdbc42ad51ccbe77ee0a3c29e258a5513c62fdc532cbfff91ab101abf\"),\n\t}\n}\n\n\/\/ Friendbot sends someone XLM\nfunc gift(t testing.TB, accountID stellar1.AccountID) {\n\tt.Logf(\"gift -> %v\", accountID)\n\turl := \"https:\/\/friendbot.stellar.org\/?addr=\" + accountID.String()\n\tfor i := 0; i < retryCount; i++ {\n\t\tt.Logf(\"gift url: %v\", url)\n\t\tres, err := http.Get(url)\n\t\tif err != nil {\n\t\t\tt.Logf(\"http get %s error: %s\", url, err)\n\t\t\tcontinue\n\t\t}\n\t\tbodyBuf := new(bytes.Buffer)\n\t\tbodyBuf.ReadFrom(res.Body)\n\t\tres.Body.Close()\n\t\tt.Logf(\"gift res: %v\", bodyBuf.String())\n\t\tif res.StatusCode == 200 {\n\t\t\treturn\n\t\t}\n\t\tt.Logf(\"gift status not ok: %d\", res.StatusCode)\n\t}\n\tt.Fatalf(\"gift to %s failed after multiple attempts\", accountID)\n}\n\nfunc useStellarTestNet(t testing.TB) {\n\tstellarnet.SetClientAndNetwork(horizon.DefaultTestNetClient, build.TestNetwork)\n}\n\nfunc acceptDisclaimer(u *userPlusDevice) {\n\terr := u.stellarClient.AcceptDisclaimerLocal(context.Background(), 0)\n\trequire.NoError(u.tc.T, err)\n}\n<|endoftext|>"} {"text":"package goltime\n\nimport (\n\t\"net\/http\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype Timestamp struct {\n\tYear, Month, Day, Hour, Min, Sec int\n}\n\nfunc CreateTimestamp(time_point []string) Timestamp {\n\tyear, _ := strconv.Atoi(time_point[0])\n\tmonth, _ := strconv.Atoi(time_point[1])\n\tday, _ := strconv.Atoi(time_point[2])\n\thour, _ := strconv.Atoi(time_point[3])\n\tmin, _ := strconv.Atoi(time_point[4])\n\tsec, _ := strconv.Atoi(time_point[5])\n\n\treturn Timestamp{\n\t\tYear: year,\n\t\tMonth: month,\n\t\tDay: day,\n\t\tHour: hour,\n\t\tMin: min,\n\t\tSec: sec,\n\t}\n}\n\nfunc TimestampFromHTTPRequest(req *http.Request) Timestamp {\n\treturn CreateTimestamp([]string{\n\t\treq.Form[\"year\"][0], req.Form[\"month\"][0], req.Form[\"day\"][0],\n\t\treq.Form[\"hour\"][0], req.Form[\"min\"][0], req.Form[\"sec\"][0],\n\t})\n}\n\nfunc (timestamp *Timestamp) Time() time.Time {\n\treturn time.Date(timestamp.Year, time.Month(timestamp.Month), timestamp.Day,\n\t\ttimestamp.Hour, timestamp.Min, timestamp.Sec, 0, time.UTC)\n}\ngoltime fixed FormValue call for Timestamp from HTTP Reqpackage goltime\n\nimport (\n\t\"net\/http\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype Timestamp struct {\n\tYear, Month, Day, Hour, Min, Sec int\n}\n\nfunc CreateTimestamp(time_point []string) Timestamp {\n\tyear, _ := strconv.Atoi(time_point[0])\n\tmonth, _ := strconv.Atoi(time_point[1])\n\tday, _ := strconv.Atoi(time_point[2])\n\thour, _ := strconv.Atoi(time_point[3])\n\tmin, _ := strconv.Atoi(time_point[4])\n\tsec, _ := strconv.Atoi(time_point[5])\n\n\treturn Timestamp{\n\t\tYear: year,\n\t\tMonth: month,\n\t\tDay: day,\n\t\tHour: hour,\n\t\tMin: min,\n\t\tSec: sec,\n\t}\n}\n\nfunc TimestampFromHTTPRequest(req *http.Request) Timestamp {\n\treturn CreateTimestamp([]string{\n\t\treq.FormValue(\"year\"), req.FormValue(\"month\"), req.FormValue(\"day\"),\n\t\treq.FormValue(\"hour\"), req.FormValue(\"min\"), req.FormValue(\"sec\"),\n\t})\n}\n\nfunc (timestamp *Timestamp) Time() time.Time {\n\treturn time.Date(timestamp.Year, time.Month(timestamp.Month), timestamp.Day,\n\t\ttimestamp.Hour, timestamp.Min, timestamp.Sec, 0, time.UTC)\n}\n<|endoftext|>"} {"text":"package message\n\n\/*\n#cgo LDFLAGS: -lthemis -lsoter\n#include \n#include \n#include \n#include \n#include \n#include \n\nstatic bool get_message_size(const void *priv, size_t priv_len, const void *public, size_t pub_len, const void *message, size_t message_len, bool is_wrap, size_t *out_len)\n{\n\tthemis_status_t res;\n\n\tif (is_wrap)\n\t{\n\t\tres = themis_secure_message_wrap(priv, priv_len, public, pub_len, message, message_len, NULL, out_len);\n\t}\n\telse\n\t{\n\t\tres = themis_secure_message_unwrap(priv, priv_len, public, pub_len, message, message_len, NULL, out_len);\n\t}\n\n\treturn THEMIS_BUFFER_TOO_SMALL == res;\n}\n\nstatic bool process(const void *priv, size_t priv_len, const void *public, size_t pub_len, const void *message, size_t message_len, bool is_wrap, void *out, size_t out_len)\n{\n\tthemis_status_t res;\n\n\tif (is_wrap)\n\t{\n\t\tres = themis_secure_message_wrap(priv, priv_len, public, pub_len, message, message_len, out, &out_len);\n\t}\n\telse\n\t{\n\t\tres = themis_secure_message_unwrap(priv, priv_len, public, pub_len, message, message_len, out, &out_len);\n\t}\n\n\treturn THEMIS_SUCCESS == res;\n}\n\n*\/\nimport \"C\"\nimport (\n\t\"errors\"\n\t\"github.com\/cossacklabs\/themis\/gothemis\/keys\"\n\t\"unsafe\"\n)\n\ntype SecureMessage struct {\n\tprivate *keys.PrivateKey\n\tpeerPublic *keys.PublicKey\n}\n\nfunc New(private *keys.PrivateKey, peerPublic *keys.PublicKey) *SecureMessage {\n\treturn &SecureMessage{private, peerPublic}\n}\n\nfunc messageProcess(private *keys.PrivateKey, peerPublic *keys.PublicKey, message []byte, is_wrap bool) ([]byte, error) {\n\tif nil == message {\n\t\treturn nil, errors.New(\"No message was provided\")\n\t}\n\n\tvar priv, pub unsafe.Pointer\n\tvar privLen, pubLen C.size_t\n\n\tif nil != private {\n\t\tpriv = unsafe.Pointer(&private.Value[0])\n\t\tprivLen = C.size_t(len(private.Value))\n\t}\n\n\tif nil != peerPublic {\n\t\tpub = unsafe.Pointer(&peerPublic.Value[0])\n\t\tpubLen = C.size_t(len(peerPublic.Value))\n\t}\n\n\tvar output_length C.size_t\n\tif !bool(C.get_message_size(priv,\n\t\tprivLen,\n\t\tpub,\n\t\tpubLen,\n\t\tunsafe.Pointer(&message[0]),\n\t\tC.size_t(len(message)),\n\t\tC.bool(is_wrap),\n\t\t&output_length)) {\n\t\treturn nil, errors.New(\"Failed to get ouput size\")\n\t}\n\n\toutput := make([]byte, int(output_length), int(output_length))\n\tif !bool(C.process(priv,\n\t\tprivLen,\n\t\tpub,\n\t\tpubLen,\n\t\tunsafe.Pointer(&message[0]),\n\t\tC.size_t(len(message)),\n\t\tC.bool(is_wrap),\n\t\tunsafe.Pointer(&output[0]),\n\t\toutput_length)) {\n\t\tif is_wrap {\n\t\t\treturn nil, errors.New(\"Failed to wrap message\")\n\t\t} else {\n\t\t\treturn nil, errors.New(\"Failed to unwrap message\")\n\t\t}\n\n\t}\n\n\treturn output, nil\n}\n\nfunc (sm *SecureMessage) Wrap(message []byte) ([]byte, error) {\n\tif nil == sm.private {\n\t\treturn nil, errors.New(\"Private key was not provided\")\n\t}\n\n\tif nil == sm.peerPublic {\n\t\treturn nil, errors.New(\"Peer public key was not provided\")\n\t}\n\treturn messageProcess(sm.private, sm.peerPublic, message, true)\n}\n\nfunc (sm *SecureMessage) Unwrap(message []byte) ([]byte, error) {\n\tif nil == sm.private {\n\t\treturn nil, errors.New(\"Private key was not provided\")\n\t}\n\n\tif nil == sm.peerPublic {\n\t\treturn nil, errors.New(\"Peer public key was not provided\")\n\t}\n\treturn messageProcess(sm.private, sm.peerPublic, message, false)\n}\n\nfunc (sm *SecureMessage) Sign(message []byte) ([]byte, error) {\n\tif nil == sm.private {\n\t\treturn nil, errors.New(\"Private key was not provided\")\n\t}\n\n\treturn messageProcess(sm.private, nil, message, true)\n}\n\nfunc (sm *SecureMessage) Verify(message []byte) ([]byte, error) {\n\tif nil == sm.peerPublic {\n\t\treturn nil, errors.New(\"Peer public key was not provided\")\n\t}\n\n\treturn messageProcess(nil, sm.peerPublic, message, false)\n}\nfix mistake in error message in message.gopackage message\n\n\/*\n#cgo LDFLAGS: -lthemis -lsoter\n#include \n#include \n#include \n#include \n#include \n#include \n\nstatic bool get_message_size(const void *priv, size_t priv_len, const void *public, size_t pub_len, const void *message, size_t message_len, bool is_wrap, size_t *out_len)\n{\n\tthemis_status_t res;\n\n\tif (is_wrap)\n\t{\n\t\tres = themis_secure_message_wrap(priv, priv_len, public, pub_len, message, message_len, NULL, out_len);\n\t}\n\telse\n\t{\n\t\tres = themis_secure_message_unwrap(priv, priv_len, public, pub_len, message, message_len, NULL, out_len);\n\t}\n\n\treturn THEMIS_BUFFER_TOO_SMALL == res;\n}\n\nstatic bool process(const void *priv, size_t priv_len, const void *public, size_t pub_len, const void *message, size_t message_len, bool is_wrap, void *out, size_t out_len)\n{\n\tthemis_status_t res;\n\n\tif (is_wrap)\n\t{\n\t\tres = themis_secure_message_wrap(priv, priv_len, public, pub_len, message, message_len, out, &out_len);\n\t}\n\telse\n\t{\n\t\tres = themis_secure_message_unwrap(priv, priv_len, public, pub_len, message, message_len, out, &out_len);\n\t}\n\n\treturn THEMIS_SUCCESS == res;\n}\n\n*\/\nimport \"C\"\nimport (\n\t\"errors\"\n\t\"github.com\/cossacklabs\/themis\/gothemis\/keys\"\n\t\"unsafe\"\n)\n\ntype SecureMessage struct {\n\tprivate *keys.PrivateKey\n\tpeerPublic *keys.PublicKey\n}\n\nfunc New(private *keys.PrivateKey, peerPublic *keys.PublicKey) *SecureMessage {\n\treturn &SecureMessage{private, peerPublic}\n}\n\nfunc messageProcess(private *keys.PrivateKey, peerPublic *keys.PublicKey, message []byte, is_wrap bool) ([]byte, error) {\n\tif nil == message {\n\t\treturn nil, errors.New(\"No message was provided\")\n\t}\n\n\tvar priv, pub unsafe.Pointer\n\tvar privLen, pubLen C.size_t\n\n\tif nil != private {\n\t\tpriv = unsafe.Pointer(&private.Value[0])\n\t\tprivLen = C.size_t(len(private.Value))\n\t}\n\n\tif nil != peerPublic {\n\t\tpub = unsafe.Pointer(&peerPublic.Value[0])\n\t\tpubLen = C.size_t(len(peerPublic.Value))\n\t}\n\n\tvar output_length C.size_t\n\tif !bool(C.get_message_size(priv,\n\t\tprivLen,\n\t\tpub,\n\t\tpubLen,\n\t\tunsafe.Pointer(&message[0]),\n\t\tC.size_t(len(message)),\n\t\tC.bool(is_wrap),\n\t\t&output_length)) {\n\t\treturn nil, errors.New(\"Failed to get output size\")\n\t}\n\n\toutput := make([]byte, int(output_length), int(output_length))\n\tif !bool(C.process(priv,\n\t\tprivLen,\n\t\tpub,\n\t\tpubLen,\n\t\tunsafe.Pointer(&message[0]),\n\t\tC.size_t(len(message)),\n\t\tC.bool(is_wrap),\n\t\tunsafe.Pointer(&output[0]),\n\t\toutput_length)) {\n\t\tif is_wrap {\n\t\t\treturn nil, errors.New(\"Failed to wrap message\")\n\t\t} else {\n\t\t\treturn nil, errors.New(\"Failed to unwrap message\")\n\t\t}\n\n\t}\n\n\treturn output, nil\n}\n\nfunc (sm *SecureMessage) Wrap(message []byte) ([]byte, error) {\n\tif nil == sm.private {\n\t\treturn nil, errors.New(\"Private key was not provided\")\n\t}\n\n\tif nil == sm.peerPublic {\n\t\treturn nil, errors.New(\"Peer public key was not provided\")\n\t}\n\treturn messageProcess(sm.private, sm.peerPublic, message, true)\n}\n\nfunc (sm *SecureMessage) Unwrap(message []byte) ([]byte, error) {\n\tif nil == sm.private {\n\t\treturn nil, errors.New(\"Private key was not provided\")\n\t}\n\n\tif nil == sm.peerPublic {\n\t\treturn nil, errors.New(\"Peer public key was not provided\")\n\t}\n\treturn messageProcess(sm.private, sm.peerPublic, message, false)\n}\n\nfunc (sm *SecureMessage) Sign(message []byte) ([]byte, error) {\n\tif nil == sm.private {\n\t\treturn nil, errors.New(\"Private key was not provided\")\n\t}\n\n\treturn messageProcess(sm.private, nil, message, true)\n}\n\nfunc (sm *SecureMessage) Verify(message []byte) ([]byte, error) {\n\tif nil == sm.peerPublic {\n\t\treturn nil, errors.New(\"Peer public key was not provided\")\n\t}\n\n\treturn messageProcess(nil, sm.peerPublic, message, false)\n}\n<|endoftext|>"} {"text":"package json\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"sync\"\n\n\t\"github.com\/segmentio\/objconv\"\n)\n\n\/\/ NewEncoder returns a new JSON encoder that writes to w.\nfunc NewEncoder(w io.Writer) *objconv.Encoder {\n\treturn objconv.NewEncoder(NewEmitter(w))\n}\n\n\/\/ NewStreamEncoder returns a new JSON stream encoder that writes to w.\nfunc NewStreamEncoder(w io.Writer) *objconv.StreamEncoder {\n\treturn objconv.NewStreamEncoder(NewEmitter(w))\n}\n\n\/\/ Marshal writes the JSON representation of v to a byte slice returned in b.\nfunc Marshal(v interface{}) (b []byte, err error) {\n\tm := marshalerPool.Get().(*marshaler)\n\tm.b.Truncate(0)\n\n\tif err = (objconv.Encoder{Emitter: m}).Encode(v); err == nil {\n\t\tb = make([]byte, m.b.Len())\n\t\tcopy(b, m.b.Bytes())\n\t}\n\n\tmarshalerPool.Put(m)\n\treturn\n}\n\nvar marshalerPool = sync.Pool{\n\tNew: func() interface{} { return newMarshaler() },\n}\n\ntype marshaler struct {\n\tEmitter\n\tb bytes.Buffer\n}\n\nfunc newMarshaler() *marshaler {\n\tm := &marshaler{}\n\tm.s = m.a[:0]\n\tm.w = &m.b\n\treturn m\n}\nadd constructors for pretty JSON encoderspackage json\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"sync\"\n\n\t\"github.com\/segmentio\/objconv\"\n)\n\n\/\/ NewEncoder returns a new JSON encoder that writes to w.\nfunc NewEncoder(w io.Writer) *objconv.Encoder {\n\treturn objconv.NewEncoder(NewEmitter(w))\n}\n\n\/\/ NewStreamEncoder returns a new JSON stream encoder that writes to w.\nfunc NewStreamEncoder(w io.Writer) *objconv.StreamEncoder {\n\treturn objconv.NewStreamEncoder(NewEmitter(w))\n}\n\n\/\/ NewPrettyEncoder returns a new JSON encoder that writes to w.\nfunc NewPrettyEncoder(w io.Writer) *objconv.Encoder {\n\treturn objconv.NewEncoder(NewPrettyEmitter(w))\n}\n\n\/\/ NewPrettyStreamEncoder returns a new JSON stream encoder that writes to w.\nfunc NewPrettyStreamEncoder(w io.Writer) *objconv.StreamEncoder {\n\treturn objconv.NewStreamEncoder(NewPrettyEmitter(w))\n}\n\n\/\/ Marshal writes the JSON representation of v to a byte slice returned in b.\nfunc Marshal(v interface{}) (b []byte, err error) {\n\tm := marshalerPool.Get().(*marshaler)\n\tm.b.Truncate(0)\n\n\tif err = (objconv.Encoder{Emitter: m}).Encode(v); err == nil {\n\t\tb = make([]byte, m.b.Len())\n\t\tcopy(b, m.b.Bytes())\n\t}\n\n\tmarshalerPool.Put(m)\n\treturn\n}\n\nvar marshalerPool = sync.Pool{\n\tNew: func() interface{} { return newMarshaler() },\n}\n\ntype marshaler struct {\n\tEmitter\n\tb bytes.Buffer\n}\n\nfunc newMarshaler() *marshaler {\n\tm := &marshaler{}\n\tm.s = m.a[:0]\n\tm.w = &m.b\n\treturn m\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"flag\"\n\t\"koding\/messaging\/rabbitmq\"\n\t\"koding\/tools\/config\"\n\t\"koding\/tools\/logger\"\n\t\"socialapi\/eventbus\"\n\ttopicfeed \"socialapi\/workers\/topicfeed\/lib\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/streadway\/amqp\"\n)\n\nfunc startHandler() func(delivery amqp.Delivery) {\n\tlog.Info(\"Worker Started to Consume\")\n\treturn func(delivery amqp.Delivery) {\n\t\terr := handler.HandleEvent(delivery.Type, delivery.Body)\n\t\tswitch err {\n\t\tcase nil:\n\t\t\tdelivery.Ack(false)\n\t\tcase topicfeed.HandlerNotFoundErr:\n\t\t\tlog.Notice(\"unknown event type (%s) recieved, \\n deleting message from RMQ\", delivery.Type)\n\t\t\tdelivery.Ack(false)\n\t\tcase gorm.RecordNotFound:\n\t\t\tlog.Warning(\"Record not found in our db (%s) recieved, \\n deleting message from RMQ\", string(delivery.Body))\n\t\t\tdelivery.Ack(false)\n\t\tdefault:\n\t\t\t\/\/ add proper error handling\n\t\t\t\/\/ instead of puttting message back to same queue, it is better\n\t\t\t\/\/ to put it to another maintenance queue\/exchange\n\t\t\tlog.Error(\"an error occured %s, \\n putting message back to queue\", err)\n\t\t\t\/\/ multiple false\n\t\t\t\/\/ reque true\n\t\t\tdelivery.Nack(false, true)\n\t\t}\n\t}\n}\n\nvar (\n\tlog = logger.New(\"TopicFeedWorker\")\n\tconf *config.Config\n\tflagProfile = flag.String(\"c\", \"\", \"Configuration profile from file\")\n\tflagDebug = flag.Bool(\"d\", false, \"Debug mode\")\n\thandler = topicfeed.NewTopicFeedController(log)\n)\n\nfunc main() {\n\tflag.Parse()\n\tif *flagProfile == \"\" {\n\t\tlog.Fatal(\"Please define config file with -c\")\n\t}\n\n\tconf = config.MustConfig(*flagProfile)\n\tsetLogLevel()\n\n\tif err := eventbus.Open(conf); err != nil {\n\t\tlog.Critical(\"Realtime operations will not work, this is not good, probably couldnt connect to RMQ. %v\", err.Error())\n\t}\n\tdefer eventbus.Close()\n\n\t\/\/ blocking\n\ttopicfeed.Listen(rabbitmq.New(conf), startHandler)\n\tdefer topicfeed.Consumer.Shutdown()\n}\n\nfunc setLogLevel() {\n\tvar logLevel logger.Level\n\n\tif *flagDebug {\n\t\tlogLevel = logger.DEBUG\n\t} else {\n\t\tlogLevel = logger.INFO\n\t}\n\tlog.SetLevel(logLevel)\n}\nSocial: change modelhelper to bongopackage main\n\nimport (\n\t\"flag\"\n\t\"koding\/messaging\/rabbitmq\"\n\t\"koding\/tools\/config\"\n\t\"socialapi\/db\"\n\ttopicfeed \"socialapi\/workers\/topicfeed\/lib\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/koding\/bongo\"\n\t\"github.com\/koding\/broker\"\n\t\"github.com\/koding\/logging\"\n\t\"github.com\/streadway\/amqp\"\n)\n\nfunc startHandler() func(delivery amqp.Delivery) {\n\tlog.Info(\"Worker Started to Consume\")\n\treturn func(delivery amqp.Delivery) {\n\t\terr := handler.HandleEvent(delivery.Type, delivery.Body)\n\t\tswitch err {\n\t\tcase nil:\n\t\t\tdelivery.Ack(false)\n\t\tcase topicfeed.HandlerNotFoundErr:\n\t\t\tlog.Notice(\"unknown event type (%s) recieved, \\n deleting message from RMQ\", delivery.Type)\n\t\t\tdelivery.Ack(false)\n\t\tcase gorm.RecordNotFound:\n\t\t\tlog.Warning(\"Record not found in our db (%s) recieved, \\n deleting message from RMQ\", string(delivery.Body))\n\t\t\tdelivery.Ack(false)\n\t\tdefault:\n\t\t\t\/\/ add proper error handling\n\t\t\t\/\/ instead of puttting message back to same queue, it is better\n\t\t\t\/\/ to put it to another maintenance queue\/exchange\n\t\t\tlog.Error(\"an error occured %s, \\n putting message back to queue\", err)\n\t\t\t\/\/ multiple false\n\t\t\t\/\/ reque true\n\t\t\tdelivery.Nack(false, true)\n\t\t}\n\t}\n}\n\nvar (\n\tBongo *bongo.Bongo\n\tlog = logging.NewLogger(\"TopicFeedWorker\")\n\tconf *config.Config\n\tflagProfile = flag.String(\"c\", \"\", \"Configuration profile from file\")\n\tflagDebug = flag.Bool(\"d\", false, \"Debug mode\")\n\thandler = topicfeed.NewTopicFeedController(log)\n)\n\nfunc main() {\n\tflag.Parse()\n\tif *flagProfile == \"\" {\n\t\tlog.Fatal(\"Please define config file with -c\", \"\")\n\t}\n\n\tconf = config.MustConfig(*flagProfile)\n\tsetLogLevel()\n\n\tinitBongo(conf)\n\t\/\/ blocking\n\ttopicfeed.Listen(rabbitmq.New(conf), startHandler)\n\tdefer topicfeed.Consumer.Shutdown()\n}\n\nfunc initBongo(c *config.Config) {\n\tbConf := &broker.Config{\n\t\tHost: c.Mq.Host,\n\t\tPort: c.Mq.Port,\n\t\tUsername: c.Mq.ComponentUser,\n\t\tPassword: c.Mq.Password,\n\t\tVhost: c.Mq.Vhost,\n\t}\n\n\tbroker := broker.New(bConf, log)\n\tBongo = bongo.New(broker, db.DB)\n\tBongo.Connect()\n}\n\nfunc setLogLevel() {\n\tvar logLevel logging.Level\n\n\tif *flagDebug {\n\t\tlogLevel = logging.DEBUG\n\t} else {\n\t\tlogLevel = logging.INFO\n\t}\n\tlog.SetLevel(logLevel)\n}\n<|endoftext|>"} {"text":"package core\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype DbType string\n\ntype Uri struct {\n\tDbType DbType\n\tProto string\n\tHost string\n\tPort string\n\tDbName string\n\tUser string\n\tPasswd string\n\tCharset string\n\tLaddr string\n\tRaddr string\n\tTimeout time.Duration\n\tSchema string\n}\n\n\/\/ a dialect is a driver's wrapper\ntype Dialect interface {\n\tSetLogger(logger ILogger)\n\tInit(*DB, *Uri, string, string) error\n\tURI() *Uri\n\tDB() *DB\n\tDBType() DbType\n\tSqlType(*Column) string\n\tFormatBytes(b []byte) string\n\n\tDriverName() string\n\tDataSourceName() string\n\n\tQuoteStr() string\n\tIsReserved(string) bool\n\tQuote(string) string\n\tAndStr() string\n\tOrStr() string\n\tEqStr() string\n\tRollBackStr() string\n\tAutoIncrStr() string\n\n\tSupportInsertMany() bool\n\tSupportEngine() bool\n\tSupportCharset() bool\n\tSupportDropIfExists() bool\n\tIndexOnTable() bool\n\tShowCreateNull() bool\n\n\tIndexCheckSql(tableName, idxName string) (string, []interface{})\n\tTableCheckSql(tableName string) (string, []interface{})\n\n\tIsColumnExist(tableName string, colName string) (bool, error)\n\n\tCreateTableSql(table *Table, tableName, storeEngine, charset string) string\n\tDropTableSql(tableName string) string\n\tCreateIndexSql(tableName string, index *Index) string\n\tDropIndexSql(tableName string, index *Index) string\n\n\tModifyColumnSql(tableName string, col *Column) string\n\n\tForUpdateSql(query string) string\n\n\t\/\/CreateTableIfNotExists(table *Table, tableName, storeEngine, charset string) error\n\t\/\/MustDropTable(tableName string) error\n\n\tGetColumns(tableName string) ([]string, map[string]*Column, error)\n\tGetTables() ([]*Table, error)\n\tGetIndexes(tableName string) (map[string]*Index, error)\n\n\tFilters() []Filter\n}\n\nfunc OpenDialect(dialect Dialect) (*DB, error) {\n\treturn Open(dialect.DriverName(), dialect.DataSourceName())\n}\n\ntype Base struct {\n\tdb *DB\n\tdialect Dialect\n\tdriverName string\n\tdataSourceName string\n\tlogger ILogger\n\t*Uri\n}\n\nfunc (b *Base) DB() *DB {\n\treturn b.db\n}\n\nfunc (b *Base) SetLogger(logger ILogger) {\n\tb.logger = logger\n}\n\nfunc (b *Base) Init(db *DB, dialect Dialect, uri *Uri, drivername, dataSourceName string) error {\n\tb.db, b.dialect, b.Uri = db, dialect, uri\n\tb.driverName, b.dataSourceName = drivername, dataSourceName\n\treturn nil\n}\n\nfunc (b *Base) URI() *Uri {\n\treturn b.Uri\n}\n\nfunc (b *Base) DBType() DbType {\n\treturn b.Uri.DbType\n}\n\nfunc (b *Base) FormatBytes(bs []byte) string {\n\treturn fmt.Sprintf(\"0x%x\", bs)\n}\n\nfunc (b *Base) DriverName() string {\n\treturn b.driverName\n}\n\nfunc (b *Base) ShowCreateNull() bool {\n\treturn true\n}\n\nfunc (b *Base) DataSourceName() string {\n\treturn b.dataSourceName\n}\n\nfunc (b *Base) AndStr() string {\n\treturn \"AND\"\n}\n\nfunc (b *Base) OrStr() string {\n\treturn \"OR\"\n}\n\nfunc (b *Base) EqStr() string {\n\treturn \"=\"\n}\n\nfunc (db *Base) RollBackStr() string {\n\treturn \"ROLL BACK\"\n}\n\nfunc (db *Base) SupportDropIfExists() bool {\n\treturn true\n}\n\nfunc (db *Base) DropTableSql(tableName string) string {\n\treturn fmt.Sprintf(\"DROP TABLE IF EXISTS `%s`\", tableName)\n}\n\nfunc (db *Base) HasRecords(query string, args ...interface{}) (bool, error) {\n\tdb.LogSQL(query, args)\n\trows, err := db.DB().Query(query, args...)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer rows.Close()\n\n\tif rows.Next() {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}\n\nfunc (db *Base) IsColumnExist(tableName, colName string) (bool, error) {\n\tquery := \"SELECT `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ? AND `COLUMN_NAME` = ?\"\n\tquery = strings.Replace(query, \"`\", db.dialect.QuoteStr(), -1)\n\treturn db.HasRecords(query, db.DbName, tableName, colName)\n}\n\n\/*\nfunc (db *Base) CreateTableIfNotExists(table *Table, tableName, storeEngine, charset string) error {\n\tsql, args := db.dialect.TableCheckSql(tableName)\n\trows, err := db.DB().Query(sql, args...)\n\tif db.Logger != nil {\n\t\tdb.Logger.Info(\"[sql]\", sql, args)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\n\tif rows.Next() {\n\t\treturn nil\n\t}\n\n\tsql = db.dialect.CreateTableSql(table, tableName, storeEngine, charset)\n\t_, err = db.DB().Exec(sql)\n\tif db.Logger != nil {\n\t\tdb.Logger.Info(\"[sql]\", sql)\n\t}\n\treturn err\n}*\/\n\nfunc (db *Base) CreateIndexSql(tableName string, index *Index) string {\n\tquote := db.dialect.Quote\n\tvar unique string\n\tvar idxName string\n\tif index.Type == UniqueType {\n\t\tunique = \" UNIQUE\"\n\t}\n\tidxName = index.XName(tableName)\n\treturn fmt.Sprintf(\"CREATE%s INDEX %v ON %v (%v)\", unique,\n\t\tquote(idxName), quote(tableName),\n\t\tquote(strings.Join(index.Cols, quote(\",\"))))\n}\n\nfunc (db *Base) DropIndexSql(tableName string, index *Index) string {\n\tquote := db.dialect.Quote\n\tvar name string\n\tif index.IsRegular {\n\t\tname = index.XName(tableName)\n\t} else {\n\t\tname = index.Name\n\t}\n\treturn fmt.Sprintf(\"DROP INDEX %v ON %s\", quote(name), quote(tableName))\n}\n\nfunc (db *Base) ModifyColumnSql(tableName string, col *Column) string {\n\treturn fmt.Sprintf(\"alter table %s MODIFY COLUMN %s\", tableName, col.StringNoPk(db.dialect))\n}\n\nfunc (b *Base) CreateTableSql(table *Table, tableName, storeEngine, charset string) string {\n\tvar sql string\n\tsql = \"CREATE TABLE IF NOT EXISTS \"\n\tif tableName == \"\" {\n\t\ttableName = table.Name\n\t}\n\n\tsql += b.dialect.Quote(tableName)\n\tsql += \" (\"\n\n\tif len(table.ColumnsSeq()) > 0 {\n\t\tpkList := table.PrimaryKeys\n\n\t\tfor _, colName := range table.ColumnsSeq() {\n\t\t\tcol := table.GetColumn(colName)\n\t\t\tif col.IsPrimaryKey && len(pkList) == 1 {\n\t\t\t\tsql += col.String(b.dialect)\n\t\t\t} else {\n\t\t\t\tsql += col.StringNoPk(b.dialect)\n\t\t\t}\n\t\t\tsql = strings.TrimSpace(sql)\n\t\t\tsql += \", \"\n\t\t}\n\n\t\tif len(pkList) > 1 {\n\t\t\tsql += \"PRIMARY KEY ( \"\n\t\t\tsql += b.dialect.Quote(strings.Join(pkList, b.dialect.Quote(\",\")))\n\t\t\tsql += \" ), \"\n\t\t}\n\n\t\tsql = sql[:len(sql)-2]\n\t}\n\tsql += \")\"\n\n\tif b.dialect.SupportEngine() && storeEngine != \"\" {\n\t\tsql += \" ENGINE=\" + storeEngine\n\t}\n\tif b.dialect.SupportCharset() {\n\t\tif len(charset) == 0 {\n\t\t\tcharset = b.dialect.URI().Charset\n\t\t}\n\t\tif len(charset) > 0 {\n\t\t\tsql += \" DEFAULT CHARSET \" + charset\n\t\t}\n\t}\n\n\treturn sql\n}\n\nfunc (b *Base) ForUpdateSql(query string) string {\n\treturn query + \" FOR UPDATE\"\n}\n\nfunc (b *Base) LogSQL(sql string, args []interface{}) {\n\tif b.logger != nil && b.logger.IsShowSQL() {\n\t\tif len(args) > 0 {\n\t\t\tb.logger.Info(\"[sql]\", sql, args)\n\t\t} else {\n\t\t\tb.logger.Info(\"[sql]\", sql)\n\t\t}\n\t}\n}\n\nvar (\n\tdialects = map[string]func() Dialect{}\n)\n\n\/\/ RegisterDialect register database dialect\nfunc RegisterDialect(dbName DbType, dialectFunc func() Dialect) {\n\tif dialectFunc == nil {\n\t\tpanic(\"core: Register dialect is nil\")\n\t}\n\tdialects[strings.ToLower(string(dbName))] = dialectFunc \/\/ !nashtsai! allow override dialect\n}\n\n\/\/ QueryDialect query if registed database dialect\nfunc QueryDialect(dbName DbType) Dialect {\n\tif d, ok := dialects[strings.ToLower(string(dbName))]; ok {\n\t\treturn d()\n\t}\n\treturn nil\n}\nfix log spacepackage core\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype DbType string\n\ntype Uri struct {\n\tDbType DbType\n\tProto string\n\tHost string\n\tPort string\n\tDbName string\n\tUser string\n\tPasswd string\n\tCharset string\n\tLaddr string\n\tRaddr string\n\tTimeout time.Duration\n\tSchema string\n}\n\n\/\/ a dialect is a driver's wrapper\ntype Dialect interface {\n\tSetLogger(logger ILogger)\n\tInit(*DB, *Uri, string, string) error\n\tURI() *Uri\n\tDB() *DB\n\tDBType() DbType\n\tSqlType(*Column) string\n\tFormatBytes(b []byte) string\n\n\tDriverName() string\n\tDataSourceName() string\n\n\tQuoteStr() string\n\tIsReserved(string) bool\n\tQuote(string) string\n\tAndStr() string\n\tOrStr() string\n\tEqStr() string\n\tRollBackStr() string\n\tAutoIncrStr() string\n\n\tSupportInsertMany() bool\n\tSupportEngine() bool\n\tSupportCharset() bool\n\tSupportDropIfExists() bool\n\tIndexOnTable() bool\n\tShowCreateNull() bool\n\n\tIndexCheckSql(tableName, idxName string) (string, []interface{})\n\tTableCheckSql(tableName string) (string, []interface{})\n\n\tIsColumnExist(tableName string, colName string) (bool, error)\n\n\tCreateTableSql(table *Table, tableName, storeEngine, charset string) string\n\tDropTableSql(tableName string) string\n\tCreateIndexSql(tableName string, index *Index) string\n\tDropIndexSql(tableName string, index *Index) string\n\n\tModifyColumnSql(tableName string, col *Column) string\n\n\tForUpdateSql(query string) string\n\n\t\/\/CreateTableIfNotExists(table *Table, tableName, storeEngine, charset string) error\n\t\/\/MustDropTable(tableName string) error\n\n\tGetColumns(tableName string) ([]string, map[string]*Column, error)\n\tGetTables() ([]*Table, error)\n\tGetIndexes(tableName string) (map[string]*Index, error)\n\n\tFilters() []Filter\n}\n\nfunc OpenDialect(dialect Dialect) (*DB, error) {\n\treturn Open(dialect.DriverName(), dialect.DataSourceName())\n}\n\ntype Base struct {\n\tdb *DB\n\tdialect Dialect\n\tdriverName string\n\tdataSourceName string\n\tlogger ILogger\n\t*Uri\n}\n\nfunc (b *Base) DB() *DB {\n\treturn b.db\n}\n\nfunc (b *Base) SetLogger(logger ILogger) {\n\tb.logger = logger\n}\n\nfunc (b *Base) Init(db *DB, dialect Dialect, uri *Uri, drivername, dataSourceName string) error {\n\tb.db, b.dialect, b.Uri = db, dialect, uri\n\tb.driverName, b.dataSourceName = drivername, dataSourceName\n\treturn nil\n}\n\nfunc (b *Base) URI() *Uri {\n\treturn b.Uri\n}\n\nfunc (b *Base) DBType() DbType {\n\treturn b.Uri.DbType\n}\n\nfunc (b *Base) FormatBytes(bs []byte) string {\n\treturn fmt.Sprintf(\"0x%x\", bs)\n}\n\nfunc (b *Base) DriverName() string {\n\treturn b.driverName\n}\n\nfunc (b *Base) ShowCreateNull() bool {\n\treturn true\n}\n\nfunc (b *Base) DataSourceName() string {\n\treturn b.dataSourceName\n}\n\nfunc (b *Base) AndStr() string {\n\treturn \"AND\"\n}\n\nfunc (b *Base) OrStr() string {\n\treturn \"OR\"\n}\n\nfunc (b *Base) EqStr() string {\n\treturn \"=\"\n}\n\nfunc (db *Base) RollBackStr() string {\n\treturn \"ROLL BACK\"\n}\n\nfunc (db *Base) SupportDropIfExists() bool {\n\treturn true\n}\n\nfunc (db *Base) DropTableSql(tableName string) string {\n\treturn fmt.Sprintf(\"DROP TABLE IF EXISTS `%s`\", tableName)\n}\n\nfunc (db *Base) HasRecords(query string, args ...interface{}) (bool, error) {\n\tdb.LogSQL(query, args)\n\trows, err := db.DB().Query(query, args...)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer rows.Close()\n\n\tif rows.Next() {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}\n\nfunc (db *Base) IsColumnExist(tableName, colName string) (bool, error) {\n\tquery := \"SELECT `COLUMN_NAME` FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE `TABLE_SCHEMA` = ? AND `TABLE_NAME` = ? AND `COLUMN_NAME` = ?\"\n\tquery = strings.Replace(query, \"`\", db.dialect.QuoteStr(), -1)\n\treturn db.HasRecords(query, db.DbName, tableName, colName)\n}\n\n\/*\nfunc (db *Base) CreateTableIfNotExists(table *Table, tableName, storeEngine, charset string) error {\n\tsql, args := db.dialect.TableCheckSql(tableName)\n\trows, err := db.DB().Query(sql, args...)\n\tif db.Logger != nil {\n\t\tdb.Logger.Info(\"[sql]\", sql, args)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer rows.Close()\n\n\tif rows.Next() {\n\t\treturn nil\n\t}\n\n\tsql = db.dialect.CreateTableSql(table, tableName, storeEngine, charset)\n\t_, err = db.DB().Exec(sql)\n\tif db.Logger != nil {\n\t\tdb.Logger.Info(\"[sql]\", sql)\n\t}\n\treturn err\n}*\/\n\nfunc (db *Base) CreateIndexSql(tableName string, index *Index) string {\n\tquote := db.dialect.Quote\n\tvar unique string\n\tvar idxName string\n\tif index.Type == UniqueType {\n\t\tunique = \" UNIQUE\"\n\t}\n\tidxName = index.XName(tableName)\n\treturn fmt.Sprintf(\"CREATE%s INDEX %v ON %v (%v)\", unique,\n\t\tquote(idxName), quote(tableName),\n\t\tquote(strings.Join(index.Cols, quote(\",\"))))\n}\n\nfunc (db *Base) DropIndexSql(tableName string, index *Index) string {\n\tquote := db.dialect.Quote\n\tvar name string\n\tif index.IsRegular {\n\t\tname = index.XName(tableName)\n\t} else {\n\t\tname = index.Name\n\t}\n\treturn fmt.Sprintf(\"DROP INDEX %v ON %s\", quote(name), quote(tableName))\n}\n\nfunc (db *Base) ModifyColumnSql(tableName string, col *Column) string {\n\treturn fmt.Sprintf(\"alter table %s MODIFY COLUMN %s\", tableName, col.StringNoPk(db.dialect))\n}\n\nfunc (b *Base) CreateTableSql(table *Table, tableName, storeEngine, charset string) string {\n\tvar sql string\n\tsql = \"CREATE TABLE IF NOT EXISTS \"\n\tif tableName == \"\" {\n\t\ttableName = table.Name\n\t}\n\n\tsql += b.dialect.Quote(tableName)\n\tsql += \" (\"\n\n\tif len(table.ColumnsSeq()) > 0 {\n\t\tpkList := table.PrimaryKeys\n\n\t\tfor _, colName := range table.ColumnsSeq() {\n\t\t\tcol := table.GetColumn(colName)\n\t\t\tif col.IsPrimaryKey && len(pkList) == 1 {\n\t\t\t\tsql += col.String(b.dialect)\n\t\t\t} else {\n\t\t\t\tsql += col.StringNoPk(b.dialect)\n\t\t\t}\n\t\t\tsql = strings.TrimSpace(sql)\n\t\t\tsql += \", \"\n\t\t}\n\n\t\tif len(pkList) > 1 {\n\t\t\tsql += \"PRIMARY KEY ( \"\n\t\t\tsql += b.dialect.Quote(strings.Join(pkList, b.dialect.Quote(\",\")))\n\t\t\tsql += \" ), \"\n\t\t}\n\n\t\tsql = sql[:len(sql)-2]\n\t}\n\tsql += \")\"\n\n\tif b.dialect.SupportEngine() && storeEngine != \"\" {\n\t\tsql += \" ENGINE=\" + storeEngine\n\t}\n\tif b.dialect.SupportCharset() {\n\t\tif len(charset) == 0 {\n\t\t\tcharset = b.dialect.URI().Charset\n\t\t}\n\t\tif len(charset) > 0 {\n\t\t\tsql += \" DEFAULT CHARSET \" + charset\n\t\t}\n\t}\n\n\treturn sql\n}\n\nfunc (b *Base) ForUpdateSql(query string) string {\n\treturn query + \" FOR UPDATE\"\n}\n\nfunc (b *Base) LogSQL(sql string, args []interface{}) {\n\tif b.logger != nil && b.logger.IsShowSQL() {\n\t\tif len(args) > 0 {\n\t\t\tb.logger.Infof(\"[SQL] %v %v\", sql, args)\n\t\t} else {\n\t\t\tb.logger.Infof(\"[SQL] %v\", sql)\n\t\t}\n\t}\n}\n\nvar (\n\tdialects = map[string]func() Dialect{}\n)\n\n\/\/ RegisterDialect register database dialect\nfunc RegisterDialect(dbName DbType, dialectFunc func() Dialect) {\n\tif dialectFunc == nil {\n\t\tpanic(\"core: Register dialect is nil\")\n\t}\n\tdialects[strings.ToLower(string(dbName))] = dialectFunc \/\/ !nashtsai! allow override dialect\n}\n\n\/\/ QueryDialect query if registed database dialect\nfunc QueryDialect(dbName DbType) Dialect {\n\tif d, ok := dialects[strings.ToLower(string(dbName))]; ok {\n\t\treturn d()\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"\/*\nPackage apachelog is a library for logging the responses of an http.Handler. It uses formats and configuration\nsimilar to the Apache HTTP server.\n\nFormat strings:\n %% A literal %.\n %B Size of the full HTTP response in bytes, excluding headers.\n %b Size of the full HTTP response in bytes, excluding headers. This\n\t\t is '-' rather than 0.\n %D The time taken to serve the request, in microseconds. (Also see\n\t\t %T.)\n %h The client's IP address. (This is a best guess only -- see\n\t\t hutil.RemoteIP.)\n %H The request protocol.\n %{NAME}i The contents of the request header called NAME.\n %m The request method.\n %{NAME}o The contents of the response header called NAME.\n %q The query string (prepended with a ? if a query string exists;\n\t\t otherwise an empty string).\n %r First line of request (equivalent to '%m %U%q %H').\n %s Response status code.\n %t Time the request was received, formatted using ApacheTimeFormat\n\t\t and surrounded by '[]'.\n %{FORMAT}t Time the request was received, formatted using the supplied\n\t\t time.Format string FORMAT and surrounded by '[]'.\n %T The time taken to serve the request, in seconds. (Also see %D).\n %u The remote user. May be bogus if the request was\n\t\t unauthenticated.\n %U The URL path requested, not including a query string.\n*\/\npackage apachelog\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/cespare\/hutil\"\n)\n\nvar (\n\tApacheTimeFormat = `02\/Jan\/2006:15:04:05 -0700`\n\n\t\/\/ Predefined log formats.\n\tCommonLogFormat = `%h - %u %t \"%r\" %s %b`\n\tCombinedLogFormat = `%h - %u %t \"%r\" %s %b \"%{Referer}i\" \"%{User-Agent}i\"`\n\tRackCommonLoggerFormat = `%h - %u %{02\/Jan\/2006 15:04:05 -0700}t \"%r\" %s %b %T`\n)\n\ntype parsedFormat struct {\n\tchunks []chunk\n\tbuf *bytes.Buffer\n\n\tneededReqHeaders map[string]bool\n\tneededRespHeaders map[string]bool\n}\n\nfunc formatProvidedError(format byte) error {\n\treturn fmt.Errorf(\"Format %%%c doesn't take a custom formatter.\", format)\n}\n\nfunc newParsedFormat(format string) (*parsedFormat, error) {\n\tf := &parsedFormat{\n\t\tbuf: &bytes.Buffer{},\n\t\tneededReqHeaders: make(map[string]bool),\n\t\tneededRespHeaders: make(map[string]bool),\n\t}\n\tchunks := []chunk{}\n\n\t\/\/ Add a newline to the format if it's not already provided.\n\tif format[len(format)-1] != '\\n' {\n\t\tformat = format + \"\\n\"\n\t}\n\n\tvar literal []byte\n\tvar braceChunk []byte\n\tinBraceChunk := false \/\/ Whether we're in a brace-delimited formatter (e.g. %{NAME}i)\n\tescaped := false\nouter:\n\tfor _, c := range []byte(format) {\n\t\tif inBraceChunk {\n\t\t\tif c == '}' {\n\t\t\t\tinBraceChunk = false\n\t\t\t} else {\n\t\t\t\tbraceChunk = append(braceChunk, c)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif c == '%' {\n\t\t\tif escaped {\n\t\t\t\tliteral = append(literal, '%')\n\t\t\t} else {\n\t\t\t\tif len(literal) > 0 {\n\t\t\t\t\tchunks = append(chunks, literalChunk(literal))\n\t\t\t\t\tliteral = nil\n\t\t\t\t}\n\t\t\t}\n\t\t\tescaped = !escaped\n\t\t\tcontinue\n\t\t}\n\t\tif !escaped {\n\t\t\tliteral = append(literal, c)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar ch chunk\n\t\t\/\/ First do the codes that can take a format chunk\n\t\tswitch c {\n\t\tcase '{':\n\t\t\tinBraceChunk = true\n\t\t\tcontinue outer\n\t\tcase 'i':\n\t\t\theader := string(braceChunk)\n\t\t\tf.neededReqHeaders[header] = true\n\t\t\tch = reqHeaderChunk(header)\n\t\tcase 'o':\n\t\t\theader := string(braceChunk)\n\t\t\tf.neededRespHeaders[header] = true\n\t\t\tch = respHeaderChunk(header)\n\t\tcase 't':\n\t\t\tformatString := string(braceChunk)\n\t\t\tif braceChunk == nil {\n\t\t\t\tformatString = ApacheTimeFormat\n\t\t\t}\n\t\t\tch = startTimeChunk(formatString)\n\t\tdefault:\n\t\t\tif braceChunk != nil {\n\t\t\t\treturn nil, formatProvidedError(c)\n\t\t\t}\n\t\t\tswitch c {\n\t\t\tcase 'B':\n\t\t\t\tch = responseBytesChunk(false)\n\t\t\tcase 'b':\n\t\t\t\tch = responseBytesChunk(true)\n\t\t\tcase 'D':\n\t\t\t\tch = responseTimeMicros\n\t\t\tcase 'h':\n\t\t\t\tch = clientIPChunk\n\t\t\tcase 'H':\n\t\t\t\tch = protoChunk\n\t\t\tcase 'm':\n\t\t\t\tch = methodChunk\n\t\t\tcase 'q':\n\t\t\t\tch = queryChunk\n\t\t\tcase 'r':\n\t\t\t\tch = requestLineChunk\n\t\t\tcase 's':\n\t\t\t\tch = statusChunk\n\t\t\tcase 'T':\n\t\t\t\tch = responseTimeSeconds\n\t\t\tcase 'u':\n\t\t\t\tf.neededReqHeaders[\"Remote-User\"] = true\n\t\t\t\tch = userChunk\n\t\t\tcase 'U':\n\t\t\t\tch = pathChunk\n\t\t\tdefault:\n\t\t\t\treturn nil, fmt.Errorf(\"Unrecognized format code: %%%c\", c)\n\t\t\t}\n\t\t}\n\n\t\tchunks = append(chunks, ch)\n\t\tescaped = false\n\t\tbraceChunk = nil\n\t}\n\n\tif literal != nil {\n\t\tchunks = append(chunks, literalChunk(literal))\n\t}\n\tf.chunks = chunks\n\treturn f, nil\n}\n\nfunc (f *parsedFormat) Write(r *record, out io.Writer) {\n\tf.buf.Reset()\n\tfor _, c := range f.chunks {\n\t\tc(r, f.buf)\n\t}\n\tf.buf.WriteTo(out)\n}\n\ntype handler struct {\n\thttp.Handler\n\trecords chan *record\n\tout io.Writer\n\tpf *parsedFormat\n}\n\nfunc NewHandler(format string, h http.Handler, out io.Writer) http.Handler {\n\tpf, err := newParsedFormat(format)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\th2 := &handler{\n\t\tHandler: h,\n\t\trecords: make(chan *record), \/\/ TODO: buffered chan?\n\t\tout: out,\n\t\tpf: pf,\n\t}\n\tgo h2.Process()\n\treturn h2\n}\n\nfunc NewDefaultHandler(h http.Handler) http.Handler {\n\treturn NewHandler(RackCommonLoggerFormat, h, os.Stderr)\n}\n\nfunc (h *handler) Process() {\n\tfor r := range h.records {\n\t\th.pf.Write(r, h.out)\n\t}\n}\n\nfunc (h *handler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {\n\tstart := time.Now()\n\trec := &record{\n\t\tstatus: http.StatusOK, \/\/ Set to 200 to begin with because WriteHeader isn't called in the OK case.\n\t}\n\trec.ip = hutil.RemoteIP(r).String()\n\tif len(h.pf.neededReqHeaders) > 0 {\n\t\trec.reqHeaders = make(map[string]string)\n\t\tfor header := range h.pf.neededReqHeaders {\n\t\t\trec.reqHeaders[header] = r.Header.Get(header)\n\t\t}\n\t}\n\trec.startTime = start\n\trec.method = r.Method\n\trec.path = r.URL.Path\n\trec.query = r.URL.RawQuery\n\trec.proto = r.Proto\n\n\trec.ResponseWriter = rw\n\th.Handler.ServeHTTP(rec, r)\n\n\trec.elapsed = time.Since(start)\n\tif len(h.pf.neededRespHeaders) > 0 {\n\t\trec.respHeaders = make(map[string]string)\n\t\tfor header := range h.pf.neededRespHeaders {\n\t\t\trec.respHeaders[header] = rw.Header().Get(header)\n\t\t}\n\t}\n\n\th.records <- rec\n}\n\n\/\/ Only the necessary fields will be filled out.\ntype record struct {\n\thttp.ResponseWriter\n\t\/\/*handler \/\/ Need a reference back to the handler.\n\n\tip string\n\tresponseBytes int64\n\tstartTime time.Time\n\telapsed time.Duration\n\tproto string\n\treqHeaders map[string]string \/\/ Just the ones needed for the format, or nil if there are none\n\tmethod string\n\trespHeaders map[string]string\n\tquery string\n\tstatus int\n\tpath string\n}\n\n\/\/ Write proxies to the underlying ResponseWriter's Write method, while recording response size.\nfunc (r *record) Write(p []byte) (int, error) {\n\twritten, err := r.ResponseWriter.Write(p)\n\tr.responseBytes += int64(written)\n\treturn written, err\n}\n\nfunc (r *record) WriteHeader(status int) {\n\tr.status = status\n\tr.ResponseWriter.WriteHeader(status)\n}\n[apachelog] Remove a bunch of tabs in the doc comment (again)\/*\nPackage apachelog is a library for logging the responses of an http.Handler. It uses formats and configuration\nsimilar to the Apache HTTP server.\n\nFormat strings:\n %% A literal %.\n %B Size of the full HTTP response in bytes, excluding headers.\n %b Size of the full HTTP response in bytes, excluding headers. This\n is '-' rather than 0.\n %D The time taken to serve the request, in microseconds. (Also see\n %T.)\n %h The client's IP address. (This is a best guess only -- see\n hutil.RemoteIP.)\n %H The request protocol.\n %{NAME}i The contents of the request header called NAME.\n %m The request method.\n %{NAME}o The contents of the response header called NAME.\n %q The query string (prepended with a ? if a query string exists;\n otherwise an empty string).\n %r First line of request (equivalent to '%m %U%q %H').\n %s Response status code.\n %t Time the request was received, formatted using ApacheTimeFormat\n and surrounded by '[]'.\n %{FORMAT}t Time the request was received, formatted using the supplied\n time.Format string FORMAT and surrounded by '[]'.\n %T The time taken to serve the request, in seconds. (Also see %D).\n %u The remote user. May be bogus if the request was\n unauthenticated.\n %U The URL path requested, not including a query string.\n*\/\npackage apachelog\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/cespare\/hutil\"\n)\n\nvar (\n\tApacheTimeFormat = `02\/Jan\/2006:15:04:05 -0700`\n\n\t\/\/ Predefined log formats.\n\tCommonLogFormat = `%h - %u %t \"%r\" %s %b`\n\tCombinedLogFormat = `%h - %u %t \"%r\" %s %b \"%{Referer}i\" \"%{User-Agent}i\"`\n\tRackCommonLoggerFormat = `%h - %u %{02\/Jan\/2006 15:04:05 -0700}t \"%r\" %s %b %T`\n)\n\ntype parsedFormat struct {\n\tchunks []chunk\n\tbuf *bytes.Buffer\n\n\tneededReqHeaders map[string]bool\n\tneededRespHeaders map[string]bool\n}\n\nfunc formatProvidedError(format byte) error {\n\treturn fmt.Errorf(\"Format %%%c doesn't take a custom formatter.\", format)\n}\n\nfunc newParsedFormat(format string) (*parsedFormat, error) {\n\tf := &parsedFormat{\n\t\tbuf: &bytes.Buffer{},\n\t\tneededReqHeaders: make(map[string]bool),\n\t\tneededRespHeaders: make(map[string]bool),\n\t}\n\tchunks := []chunk{}\n\n\t\/\/ Add a newline to the format if it's not already provided.\n\tif format[len(format)-1] != '\\n' {\n\t\tformat = format + \"\\n\"\n\t}\n\n\tvar literal []byte\n\tvar braceChunk []byte\n\tinBraceChunk := false \/\/ Whether we're in a brace-delimited formatter (e.g. %{NAME}i)\n\tescaped := false\nouter:\n\tfor _, c := range []byte(format) {\n\t\tif inBraceChunk {\n\t\t\tif c == '}' {\n\t\t\t\tinBraceChunk = false\n\t\t\t} else {\n\t\t\t\tbraceChunk = append(braceChunk, c)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif c == '%' {\n\t\t\tif escaped {\n\t\t\t\tliteral = append(literal, '%')\n\t\t\t} else {\n\t\t\t\tif len(literal) > 0 {\n\t\t\t\t\tchunks = append(chunks, literalChunk(literal))\n\t\t\t\t\tliteral = nil\n\t\t\t\t}\n\t\t\t}\n\t\t\tescaped = !escaped\n\t\t\tcontinue\n\t\t}\n\t\tif !escaped {\n\t\t\tliteral = append(literal, c)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar ch chunk\n\t\t\/\/ First do the codes that can take a format chunk\n\t\tswitch c {\n\t\tcase '{':\n\t\t\tinBraceChunk = true\n\t\t\tcontinue outer\n\t\tcase 'i':\n\t\t\theader := string(braceChunk)\n\t\t\tf.neededReqHeaders[header] = true\n\t\t\tch = reqHeaderChunk(header)\n\t\tcase 'o':\n\t\t\theader := string(braceChunk)\n\t\t\tf.neededRespHeaders[header] = true\n\t\t\tch = respHeaderChunk(header)\n\t\tcase 't':\n\t\t\tformatString := string(braceChunk)\n\t\t\tif braceChunk == nil {\n\t\t\t\tformatString = ApacheTimeFormat\n\t\t\t}\n\t\t\tch = startTimeChunk(formatString)\n\t\tdefault:\n\t\t\tif braceChunk != nil {\n\t\t\t\treturn nil, formatProvidedError(c)\n\t\t\t}\n\t\t\tswitch c {\n\t\t\tcase 'B':\n\t\t\t\tch = responseBytesChunk(false)\n\t\t\tcase 'b':\n\t\t\t\tch = responseBytesChunk(true)\n\t\t\tcase 'D':\n\t\t\t\tch = responseTimeMicros\n\t\t\tcase 'h':\n\t\t\t\tch = clientIPChunk\n\t\t\tcase 'H':\n\t\t\t\tch = protoChunk\n\t\t\tcase 'm':\n\t\t\t\tch = methodChunk\n\t\t\tcase 'q':\n\t\t\t\tch = queryChunk\n\t\t\tcase 'r':\n\t\t\t\tch = requestLineChunk\n\t\t\tcase 's':\n\t\t\t\tch = statusChunk\n\t\t\tcase 'T':\n\t\t\t\tch = responseTimeSeconds\n\t\t\tcase 'u':\n\t\t\t\tf.neededReqHeaders[\"Remote-User\"] = true\n\t\t\t\tch = userChunk\n\t\t\tcase 'U':\n\t\t\t\tch = pathChunk\n\t\t\tdefault:\n\t\t\t\treturn nil, fmt.Errorf(\"Unrecognized format code: %%%c\", c)\n\t\t\t}\n\t\t}\n\n\t\tchunks = append(chunks, ch)\n\t\tescaped = false\n\t\tbraceChunk = nil\n\t}\n\n\tif literal != nil {\n\t\tchunks = append(chunks, literalChunk(literal))\n\t}\n\tf.chunks = chunks\n\treturn f, nil\n}\n\nfunc (f *parsedFormat) Write(r *record, out io.Writer) {\n\tf.buf.Reset()\n\tfor _, c := range f.chunks {\n\t\tc(r, f.buf)\n\t}\n\tf.buf.WriteTo(out)\n}\n\ntype handler struct {\n\thttp.Handler\n\trecords chan *record\n\tout io.Writer\n\tpf *parsedFormat\n}\n\nfunc NewHandler(format string, h http.Handler, out io.Writer) http.Handler {\n\tpf, err := newParsedFormat(format)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\th2 := &handler{\n\t\tHandler: h,\n\t\trecords: make(chan *record), \/\/ TODO: buffered chan?\n\t\tout: out,\n\t\tpf: pf,\n\t}\n\tgo h2.Process()\n\treturn h2\n}\n\nfunc NewDefaultHandler(h http.Handler) http.Handler {\n\treturn NewHandler(RackCommonLoggerFormat, h, os.Stderr)\n}\n\nfunc (h *handler) Process() {\n\tfor r := range h.records {\n\t\th.pf.Write(r, h.out)\n\t}\n}\n\nfunc (h *handler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {\n\tstart := time.Now()\n\trec := &record{\n\t\tstatus: http.StatusOK, \/\/ Set to 200 to begin with because WriteHeader isn't called in the OK case.\n\t}\n\trec.ip = hutil.RemoteIP(r).String()\n\tif len(h.pf.neededReqHeaders) > 0 {\n\t\trec.reqHeaders = make(map[string]string)\n\t\tfor header := range h.pf.neededReqHeaders {\n\t\t\trec.reqHeaders[header] = r.Header.Get(header)\n\t\t}\n\t}\n\trec.startTime = start\n\trec.method = r.Method\n\trec.path = r.URL.Path\n\trec.query = r.URL.RawQuery\n\trec.proto = r.Proto\n\n\trec.ResponseWriter = rw\n\th.Handler.ServeHTTP(rec, r)\n\n\trec.elapsed = time.Since(start)\n\tif len(h.pf.neededRespHeaders) > 0 {\n\t\trec.respHeaders = make(map[string]string)\n\t\tfor header := range h.pf.neededRespHeaders {\n\t\t\trec.respHeaders[header] = rw.Header().Get(header)\n\t\t}\n\t}\n\n\th.records <- rec\n}\n\n\/\/ Only the necessary fields will be filled out.\ntype record struct {\n\thttp.ResponseWriter\n\t\/\/*handler \/\/ Need a reference back to the handler.\n\n\tip string\n\tresponseBytes int64\n\tstartTime time.Time\n\telapsed time.Duration\n\tproto string\n\treqHeaders map[string]string \/\/ Just the ones needed for the format, or nil if there are none\n\tmethod string\n\trespHeaders map[string]string\n\tquery string\n\tstatus int\n\tpath string\n}\n\n\/\/ Write proxies to the underlying ResponseWriter's Write method, while recording response size.\nfunc (r *record) Write(p []byte) (int, error) {\n\twritten, err := r.ResponseWriter.Write(p)\n\tr.responseBytes += int64(written)\n\treturn written, err\n}\n\nfunc (r *record) WriteHeader(status int) {\n\tr.status = status\n\tr.ResponseWriter.WriteHeader(status)\n}\n<|endoftext|>"} {"text":"package api_test\n\nimport (\n\t\"archive\/zip\"\n\t\"cf\"\n\t. \"cf\/api\"\n\t\"cf\/configuration\"\n\t\"cf\/models\"\n\t\"cf\/net\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"os\"\n\t\"runtime\"\n\t\"path\/filepath\"\n\t\"sort\"\n\ttestconfig \"testhelpers\/configuration\"\n\ttestnet \"testhelpers\/net\"\n)\n\nvar _ = Describe(\"BuildpackBitsRepository\", func() {\n\tvar (\n\t\tbuildpacksDir string\n\t\tconfigRepo configuration.Repository\n\t\trepo CloudControllerBuildpackBitsRepository\n\t\tbuildpack models.Buildpack\n\t)\n\n\tBeforeEach(func() {\n\t\tgateway := net.NewCloudControllerGateway()\n\t\tpwd, _ := os.Getwd()\n\n\t\tbuildpacksDir = filepath.Join(pwd, \"..\/..\/fixtures\/buildpacks\")\n\t\tconfigRepo = testconfig.NewRepositoryWithDefaults()\n\t\trepo = NewCloudControllerBuildpackBitsRepository(configRepo, gateway, cf.ApplicationZipper{})\n\t\tbuildpack = models.Buildpack{Name: \"my-cool-buildpack\", Guid: \"my-cool-buildpack-guid\"}\n\t})\n\n\tDescribe(\"#UploadBuildpack\", func() {\n\t\tIt(\"fails to upload a buildpack with an invalid directory\", func() {\n\t\t\tapiResponse := repo.UploadBuildpack(buildpack, \"\/foo\/bar\")\n\t\t\tExpect(apiResponse.IsNotSuccessful()).To(BeTrue())\n\t\t\tExpect(apiResponse.Message).To(ContainSubstring(\"Error opening buildpack file\"))\n\t\t})\n\n\t\tIt(\"uploads a valid buildpack directory\", func() {\n\t\t\tbuildpackPath := filepath.Join(buildpacksDir, \"example-buildpack\")\n\n\t\t\tos.Chmod(filepath.Join(buildpackPath, \"bin\/compile\"), 0755)\n\t\t\tos.Chmod(filepath.Join(buildpackPath, \"bin\/detect\"), 0755)\n\t\t\terr := os.Chmod(filepath.Join(buildpackPath, \"bin\/release\"), 0755)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tts, handler := testnet.NewTLSServer([]testnet.TestRequest{\n\t\t\t\tuploadBuildpackRequest(buildpackPath),\n\t\t\t})\n\t\t\tdefer ts.Close()\n\t\t\tconfigRepo.SetApiEndpoint(ts.URL)\n\n\t\t\tapiResponse := repo.UploadBuildpack(buildpack, buildpackPath)\n\t\t\tExpect(handler.AllRequestsCalled()).To(BeTrue())\n\t\t\tExpect(apiResponse.IsSuccessful()).To(BeTrue())\n\t\t})\n\n\t\tIt(\"uploads a valid zipped buildpack\", func() {\n\t\t\tbuildpackPath := filepath.Join(buildpacksDir, \"example-buildpack.zip\")\n\n\t\t\tts, handler := testnet.NewTLSServer([]testnet.TestRequest{\n\t\t\t\tuploadBuildpackRequest(buildpackPath),\n\t\t\t})\n\t\t\tdefer ts.Close()\n\n\t\t\tconfigRepo.SetApiEndpoint(ts.URL)\n\n\t\t\tapiResponse := repo.UploadBuildpack(buildpack, buildpackPath)\n\t\t\tExpect(handler.AllRequestsCalled()).To(BeTrue())\n\t\t\tExpect(apiResponse.IsSuccessful()).To(BeTrue())\n\t\t})\n\n\t\tDescribe(\"when the buildpack is wrapped in an extra top-level directory\", func() {\n\t\t\tIt(\"uploads a zip file containing only the actual buildpack\", func() {\n\t\t\t\tbuildpackPath := filepath.Join(buildpacksDir, \"example-buildpack-in-dir.zip\")\n\n\t\t\t\tts, handler := testnet.NewTLSServer([]testnet.TestRequest{\n\t\t\t\t\tuploadBuildpackRequest(buildpackPath),\n\t\t\t\t})\n\t\t\t\tdefer ts.Close()\n\n\t\t\t\tconfigRepo.SetApiEndpoint(ts.URL)\n\n\t\t\t\tapiResponse := repo.UploadBuildpack(buildpack, buildpackPath)\n\t\t\t\tExpect(handler.AllRequestsCalled()).To(BeTrue())\n\t\t\t\tExpect(apiResponse.IsSuccessful()).To(BeTrue())\n\t\t\t})\n\t\t})\n\n\t\tDescribe(\"when given the URL of a buildpack\", func() {\n\t\t\tvar handler *testnet.TestHandler\n\t\t\tvar apiServer *httptest.Server\n\n\t\t\tBeforeEach(func() {\n\t\t\t\tapiServer, handler = testnet.NewTLSServer([]testnet.TestRequest{\n\t\t\t\t\tuploadBuildpackRequest(\"example-buildpack.zip\"),\n\t\t\t\t})\n\t\t\t\tconfigRepo.SetApiEndpoint(apiServer.URL)\n\t\t\t})\n\n\t\t\tAfterEach(func() {\n\t\t\t\tapiServer.Close()\n\t\t\t})\n\n\t\t\tvar buildpackFileServerHandler = func(buildpackName string) http.HandlerFunc {\n\t\t\t\treturn func(writer http.ResponseWriter, request *http.Request) {\n\t\t\t\t\tExpect(request.URL.Path).To(Equal(\"\/place\/example-buildpack.zip\"))\n\t\t\t\t\tf, err := os.Open(filepath.Join(buildpacksDir, buildpackName))\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\tio.Copy(writer, f)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tIt(\"uploads the file over HTTP\", func() {\n\t\t\t\tfileServer := httptest.NewServer(buildpackFileServerHandler(\"example-buildpack.zip\"))\n\t\t\t\tdefer fileServer.Close()\n\n\t\t\t\tapiResponse := repo.UploadBuildpack(buildpack, fileServer.URL+\"\/place\/example-buildpack.zip\")\n\t\t\t\tExpect(handler.AllRequestsCalled()).To(BeTrue())\n\t\t\t\tExpect(apiResponse.IsSuccessful()).To(BeTrue())\n\t\t\t})\n\n\t\t\tIt(\"uploads the file over HTTPS\", func() {\n\t\t\t\tfileServer := httptest.NewTLSServer(buildpackFileServerHandler(\"example-buildpack.zip\"))\n\t\t\t\tdefer fileServer.Close()\n\n\t\t\t\tapiResponse := repo.UploadBuildpack(buildpack, fileServer.URL+\"\/place\/example-buildpack.zip\")\n\t\t\t\tExpect(handler.AllRequestsCalled()).To(BeTrue())\n\t\t\t\tExpect(apiResponse.IsSuccessful()).To(BeTrue())\n\t\t\t})\n\n\t\t\tDescribe(\"when the buildpack is wrapped in an extra top-level directory\", func() {\n\t\t\t\tIt(\"uploads a zip file containing only the actual buildpack\", func() {\n\t\t\t\t\tfileServer := httptest.NewTLSServer(buildpackFileServerHandler(\"example-buildpack-in-dir.zip\"))\n\t\t\t\t\tdefer fileServer.Close()\n\n\t\t\t\t\tapiResponse := repo.UploadBuildpack(buildpack, fileServer.URL+\"\/place\/example-buildpack.zip\")\n\t\t\t\t\tExpect(handler.AllRequestsCalled()).To(BeTrue())\n\t\t\t\t\tExpect(apiResponse.IsSuccessful()).To(BeTrue())\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tIt(\"returns an unsuccessful response when the server cannot be reached\", func() {\n\t\t\t\tapiResponse := repo.UploadBuildpack(buildpack, \"https:\/\/domain.bad-domain:223453\/no-place\/example-buildpack.zip\")\n\t\t\t\tExpect(handler.AllRequestsCalled()).To(BeFalse())\n\t\t\t\tExpect(apiResponse.IsSuccessful()).To(BeFalse())\n\t\t\t})\n\t\t})\n\t})\n})\n\nfunc uploadBuildpackRequest(filename string) testnet.TestRequest {\n\treturn testnet.TestRequest{\n\t\tMethod: \"PUT\",\n\t\tPath: \"\/v2\/buildpacks\/my-cool-buildpack-guid\/bits\",\n\t\tResponse: testnet.TestResponse{\n\t\t\tStatus: http.StatusCreated,\n\t\t\tBody: `{ \"metadata\":{ \"guid\": \"my-job-guid\" } }`,\n\t\t},\n\t\tMatcher: func(request *http.Request) {\n\t\t\terr := request.ParseMultipartForm(4096)\n\t\t\tdefer request.MultipartForm.RemoveAll()\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tExpect(len(request.MultipartForm.Value)).To(Equal(0))\n\t\t\tExpect(len(request.MultipartForm.File)).To(Equal(1))\n\n\t\t\tfiles, ok := request.MultipartForm.File[\"buildpack\"]\n\t\t\tExpect(ok).To(BeTrue(), \"Buildpack file part not present\")\n\t\t\tExpect(len(files)).To(Equal(1), \"Wrong number of files\")\n\n\t\t\tbuildpackFile := files[0]\n\t\t\tExpect(buildpackFile.Filename).To(Equal(filepath.Base(filename)), \"Wrong file name\")\n\n\t\t\tfile, err := buildpackFile.Open()\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tzipReader, err := zip.NewReader(file, 4096)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tactualFileNames := []string{}\n\t\t\tactualFileContents := []string{}\n\t\t\tfor _, f := range zipReader.File {\n\t\t\t\tactualFileNames = append(actualFileNames, f.Name)\n\t\t\t\tc, _ := f.Open()\n\t\t\t\tcontent, _ := ioutil.ReadAll(c)\n\t\t\t\tactualFileContents = append(actualFileContents, string(content))\n\t\t\t}\n\t\t\tsort.Strings(actualFileNames)\n\n\t\t\tExpect(actualFileNames).To(Equal([]string{\n\t\t\t\t\"bin\/compile\",\n\t\t\t\t\"bin\/detect\",\n\t\t\t\t\"bin\/release\",\n\t\t\t\t\"lib\/helper\",\n\t\t\t}))\n\t\t\tExpect(actualFileContents).To(Equal([]string{\n\t\t\t\t\"the-compile-script\\n\",\n\t\t\t\t\"the-detect-script\\n\",\n\t\t\t\t\"the-release-script\\n\",\n\t\t\t\t\"the-helper-script\\n\",\n\t\t\t}))\n\n\t\t\tif runtime.GOOS != \"windows\" {\n\t\t\t\tExpect(zipReader.File[0].Mode()).To(Equal(os.FileMode(0755)))\n\t\t\t\tExpect(zipReader.File[1].Mode()).To(Equal(os.FileMode(0755)))\n\t\t\t\tExpect(zipReader.File[2].Mode()).To(Equal(os.FileMode(0755)))\n\t\t\t}\n\t\t},\n\t}\n}\ngo fmtpackage api_test\n\nimport (\n\t\"archive\/zip\"\n\t\"cf\"\n\t. \"cf\/api\"\n\t\"cf\/configuration\"\n\t\"cf\/models\"\n\t\"cf\/net\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"sort\"\n\ttestconfig \"testhelpers\/configuration\"\n\ttestnet \"testhelpers\/net\"\n)\n\nvar _ = Describe(\"BuildpackBitsRepository\", func() {\n\tvar (\n\t\tbuildpacksDir string\n\t\tconfigRepo configuration.Repository\n\t\trepo CloudControllerBuildpackBitsRepository\n\t\tbuildpack models.Buildpack\n\t)\n\n\tBeforeEach(func() {\n\t\tgateway := net.NewCloudControllerGateway()\n\t\tpwd, _ := os.Getwd()\n\n\t\tbuildpacksDir = filepath.Join(pwd, \"..\/..\/fixtures\/buildpacks\")\n\t\tconfigRepo = testconfig.NewRepositoryWithDefaults()\n\t\trepo = NewCloudControllerBuildpackBitsRepository(configRepo, gateway, cf.ApplicationZipper{})\n\t\tbuildpack = models.Buildpack{Name: \"my-cool-buildpack\", Guid: \"my-cool-buildpack-guid\"}\n\t})\n\n\tDescribe(\"#UploadBuildpack\", func() {\n\t\tIt(\"fails to upload a buildpack with an invalid directory\", func() {\n\t\t\tapiResponse := repo.UploadBuildpack(buildpack, \"\/foo\/bar\")\n\t\t\tExpect(apiResponse.IsNotSuccessful()).To(BeTrue())\n\t\t\tExpect(apiResponse.Message).To(ContainSubstring(\"Error opening buildpack file\"))\n\t\t})\n\n\t\tIt(\"uploads a valid buildpack directory\", func() {\n\t\t\tbuildpackPath := filepath.Join(buildpacksDir, \"example-buildpack\")\n\n\t\t\tos.Chmod(filepath.Join(buildpackPath, \"bin\/compile\"), 0755)\n\t\t\tos.Chmod(filepath.Join(buildpackPath, \"bin\/detect\"), 0755)\n\t\t\terr := os.Chmod(filepath.Join(buildpackPath, \"bin\/release\"), 0755)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tts, handler := testnet.NewTLSServer([]testnet.TestRequest{\n\t\t\t\tuploadBuildpackRequest(buildpackPath),\n\t\t\t})\n\t\t\tdefer ts.Close()\n\t\t\tconfigRepo.SetApiEndpoint(ts.URL)\n\n\t\t\tapiResponse := repo.UploadBuildpack(buildpack, buildpackPath)\n\t\t\tExpect(handler.AllRequestsCalled()).To(BeTrue())\n\t\t\tExpect(apiResponse.IsSuccessful()).To(BeTrue())\n\t\t})\n\n\t\tIt(\"uploads a valid zipped buildpack\", func() {\n\t\t\tbuildpackPath := filepath.Join(buildpacksDir, \"example-buildpack.zip\")\n\n\t\t\tts, handler := testnet.NewTLSServer([]testnet.TestRequest{\n\t\t\t\tuploadBuildpackRequest(buildpackPath),\n\t\t\t})\n\t\t\tdefer ts.Close()\n\n\t\t\tconfigRepo.SetApiEndpoint(ts.URL)\n\n\t\t\tapiResponse := repo.UploadBuildpack(buildpack, buildpackPath)\n\t\t\tExpect(handler.AllRequestsCalled()).To(BeTrue())\n\t\t\tExpect(apiResponse.IsSuccessful()).To(BeTrue())\n\t\t})\n\n\t\tDescribe(\"when the buildpack is wrapped in an extra top-level directory\", func() {\n\t\t\tIt(\"uploads a zip file containing only the actual buildpack\", func() {\n\t\t\t\tbuildpackPath := filepath.Join(buildpacksDir, \"example-buildpack-in-dir.zip\")\n\n\t\t\t\tts, handler := testnet.NewTLSServer([]testnet.TestRequest{\n\t\t\t\t\tuploadBuildpackRequest(buildpackPath),\n\t\t\t\t})\n\t\t\t\tdefer ts.Close()\n\n\t\t\t\tconfigRepo.SetApiEndpoint(ts.URL)\n\n\t\t\t\tapiResponse := repo.UploadBuildpack(buildpack, buildpackPath)\n\t\t\t\tExpect(handler.AllRequestsCalled()).To(BeTrue())\n\t\t\t\tExpect(apiResponse.IsSuccessful()).To(BeTrue())\n\t\t\t})\n\t\t})\n\n\t\tDescribe(\"when given the URL of a buildpack\", func() {\n\t\t\tvar handler *testnet.TestHandler\n\t\t\tvar apiServer *httptest.Server\n\n\t\t\tBeforeEach(func() {\n\t\t\t\tapiServer, handler = testnet.NewTLSServer([]testnet.TestRequest{\n\t\t\t\t\tuploadBuildpackRequest(\"example-buildpack.zip\"),\n\t\t\t\t})\n\t\t\t\tconfigRepo.SetApiEndpoint(apiServer.URL)\n\t\t\t})\n\n\t\t\tAfterEach(func() {\n\t\t\t\tapiServer.Close()\n\t\t\t})\n\n\t\t\tvar buildpackFileServerHandler = func(buildpackName string) http.HandlerFunc {\n\t\t\t\treturn func(writer http.ResponseWriter, request *http.Request) {\n\t\t\t\t\tExpect(request.URL.Path).To(Equal(\"\/place\/example-buildpack.zip\"))\n\t\t\t\t\tf, err := os.Open(filepath.Join(buildpacksDir, buildpackName))\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\tio.Copy(writer, f)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tIt(\"uploads the file over HTTP\", func() {\n\t\t\t\tfileServer := httptest.NewServer(buildpackFileServerHandler(\"example-buildpack.zip\"))\n\t\t\t\tdefer fileServer.Close()\n\n\t\t\t\tapiResponse := repo.UploadBuildpack(buildpack, fileServer.URL+\"\/place\/example-buildpack.zip\")\n\t\t\t\tExpect(handler.AllRequestsCalled()).To(BeTrue())\n\t\t\t\tExpect(apiResponse.IsSuccessful()).To(BeTrue())\n\t\t\t})\n\n\t\t\tIt(\"uploads the file over HTTPS\", func() {\n\t\t\t\tfileServer := httptest.NewTLSServer(buildpackFileServerHandler(\"example-buildpack.zip\"))\n\t\t\t\tdefer fileServer.Close()\n\n\t\t\t\tapiResponse := repo.UploadBuildpack(buildpack, fileServer.URL+\"\/place\/example-buildpack.zip\")\n\t\t\t\tExpect(handler.AllRequestsCalled()).To(BeTrue())\n\t\t\t\tExpect(apiResponse.IsSuccessful()).To(BeTrue())\n\t\t\t})\n\n\t\t\tDescribe(\"when the buildpack is wrapped in an extra top-level directory\", func() {\n\t\t\t\tIt(\"uploads a zip file containing only the actual buildpack\", func() {\n\t\t\t\t\tfileServer := httptest.NewTLSServer(buildpackFileServerHandler(\"example-buildpack-in-dir.zip\"))\n\t\t\t\t\tdefer fileServer.Close()\n\n\t\t\t\t\tapiResponse := repo.UploadBuildpack(buildpack, fileServer.URL+\"\/place\/example-buildpack.zip\")\n\t\t\t\t\tExpect(handler.AllRequestsCalled()).To(BeTrue())\n\t\t\t\t\tExpect(apiResponse.IsSuccessful()).To(BeTrue())\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tIt(\"returns an unsuccessful response when the server cannot be reached\", func() {\n\t\t\t\tapiResponse := repo.UploadBuildpack(buildpack, \"https:\/\/domain.bad-domain:223453\/no-place\/example-buildpack.zip\")\n\t\t\t\tExpect(handler.AllRequestsCalled()).To(BeFalse())\n\t\t\t\tExpect(apiResponse.IsSuccessful()).To(BeFalse())\n\t\t\t})\n\t\t})\n\t})\n})\n\nfunc uploadBuildpackRequest(filename string) testnet.TestRequest {\n\treturn testnet.TestRequest{\n\t\tMethod: \"PUT\",\n\t\tPath: \"\/v2\/buildpacks\/my-cool-buildpack-guid\/bits\",\n\t\tResponse: testnet.TestResponse{\n\t\t\tStatus: http.StatusCreated,\n\t\t\tBody: `{ \"metadata\":{ \"guid\": \"my-job-guid\" } }`,\n\t\t},\n\t\tMatcher: func(request *http.Request) {\n\t\t\terr := request.ParseMultipartForm(4096)\n\t\t\tdefer request.MultipartForm.RemoveAll()\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tExpect(len(request.MultipartForm.Value)).To(Equal(0))\n\t\t\tExpect(len(request.MultipartForm.File)).To(Equal(1))\n\n\t\t\tfiles, ok := request.MultipartForm.File[\"buildpack\"]\n\t\t\tExpect(ok).To(BeTrue(), \"Buildpack file part not present\")\n\t\t\tExpect(len(files)).To(Equal(1), \"Wrong number of files\")\n\n\t\t\tbuildpackFile := files[0]\n\t\t\tExpect(buildpackFile.Filename).To(Equal(filepath.Base(filename)), \"Wrong file name\")\n\n\t\t\tfile, err := buildpackFile.Open()\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tzipReader, err := zip.NewReader(file, 4096)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tactualFileNames := []string{}\n\t\t\tactualFileContents := []string{}\n\t\t\tfor _, f := range zipReader.File {\n\t\t\t\tactualFileNames = append(actualFileNames, f.Name)\n\t\t\t\tc, _ := f.Open()\n\t\t\t\tcontent, _ := ioutil.ReadAll(c)\n\t\t\t\tactualFileContents = append(actualFileContents, string(content))\n\t\t\t}\n\t\t\tsort.Strings(actualFileNames)\n\n\t\t\tExpect(actualFileNames).To(Equal([]string{\n\t\t\t\t\"bin\/compile\",\n\t\t\t\t\"bin\/detect\",\n\t\t\t\t\"bin\/release\",\n\t\t\t\t\"lib\/helper\",\n\t\t\t}))\n\t\t\tExpect(actualFileContents).To(Equal([]string{\n\t\t\t\t\"the-compile-script\\n\",\n\t\t\t\t\"the-detect-script\\n\",\n\t\t\t\t\"the-release-script\\n\",\n\t\t\t\t\"the-helper-script\\n\",\n\t\t\t}))\n\n\t\t\tif runtime.GOOS != \"windows\" {\n\t\t\t\tExpect(zipReader.File[0].Mode()).To(Equal(os.FileMode(0755)))\n\t\t\t\tExpect(zipReader.File[1].Mode()).To(Equal(os.FileMode(0755)))\n\t\t\t\tExpect(zipReader.File[2].Mode()).To(Equal(os.FileMode(0755)))\n\t\t\t}\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"package grpcutil\n\nimport (\n\t\"sync\"\n\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/tracing\"\n\t\"google.golang.org\/grpc\"\n)\n\n\/\/ Dialer defines a grpc.ClientConn connection dialer.\ntype Dialer interface {\n\tDial(address string) (*grpc.ClientConn, error)\n\tCloseConns() error\n}\n\n\/\/ NewDialer creates a Dialer.\nfunc NewDialer(opts ...grpc.DialOption) Dialer {\n\treturn newDialer(opts...)\n}\n\ntype dialer struct {\n\topts []grpc.DialOption\n\t\/\/ A map from addresses to connections\n\tconnMap map[string]*grpc.ClientConn\n\tlock sync.Mutex\n}\n\nfunc newDialer(opts ...grpc.DialOption) *dialer {\n\treturn &dialer{\n\t\topts: opts,\n\t\tconnMap: make(map[string]*grpc.ClientConn),\n\t}\n}\n\nfunc (d *dialer) Dial(addr string) (*grpc.ClientConn, error) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tif conn, ok := d.connMap[addr]; ok {\n\t\treturn conn, nil\n\t}\n\topts := append(d.opts,\n\t\tgrpc.WithUnaryInterceptor(tracing.UnaryClientInterceptor()),\n\t\tgrpc.WithStreamInterceptor(tracing.StreamClientInterceptor()),\n\t)\n\tif (strings.Index(addr, \"dns:\/\/\/\") == -1) {\n\t addr = \"dns:\/\/\/\"+addr\n\t}\n\tconn, err := grpc.Dial(addr, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\td.connMap[addr] = conn\n\treturn conn, nil\n}\n\nfunc (d *dialer) CloseConns() error {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\tfor addr, conn := range d.connMap {\n\t\tif err := conn.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdelete(d.connMap, addr)\n\t}\n\treturn nil\n}\nFix importpackage grpcutil\n\nimport (\n\t\"sync\"\n\t\"strings\"\n\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/tracing\"\n\t\"google.golang.org\/grpc\"\n)\n\n\/\/ Dialer defines a grpc.ClientConn connection dialer.\ntype Dialer interface {\n\tDial(address string) (*grpc.ClientConn, error)\n\tCloseConns() error\n}\n\n\/\/ NewDialer creates a Dialer.\nfunc NewDialer(opts ...grpc.DialOption) Dialer {\n\treturn newDialer(opts...)\n}\n\ntype dialer struct {\n\topts []grpc.DialOption\n\t\/\/ A map from addresses to connections\n\tconnMap map[string]*grpc.ClientConn\n\tlock sync.Mutex\n}\n\nfunc newDialer(opts ...grpc.DialOption) *dialer {\n\treturn &dialer{\n\t\topts: opts,\n\t\tconnMap: make(map[string]*grpc.ClientConn),\n\t}\n}\n\nfunc (d *dialer) Dial(addr string) (*grpc.ClientConn, error) {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\n\tif conn, ok := d.connMap[addr]; ok {\n\t\treturn conn, nil\n\t}\n\topts := append(d.opts,\n\t\tgrpc.WithUnaryInterceptor(tracing.UnaryClientInterceptor()),\n\t\tgrpc.WithStreamInterceptor(tracing.StreamClientInterceptor()),\n\t)\n\tif (strings.Index(addr, \"dns:\/\/\/\") == -1) {\n\t addr = \"dns:\/\/\/\"+addr\n\t}\n\tconn, err := grpc.Dial(addr, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\td.connMap[addr] = conn\n\treturn conn, nil\n}\n\nfunc (d *dialer) CloseConns() error {\n\td.lock.Lock()\n\tdefer d.lock.Unlock()\n\tfor addr, conn := range d.connMap {\n\t\tif err := conn.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdelete(d.connMap, addr)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"package reseed\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\n\t\"github.com\/PuerkitoBio\/throttled\"\n\t\"github.com\/PuerkitoBio\/throttled\/store\"\n\t\"github.com\/gorilla\/handlers\"\n\t\"github.com\/justinas\/alice\"\n)\n\nconst (\n\tI2P_USER_AGENT = \"Wget\/1.11.4\"\n)\n\ntype Server struct {\n\t*http.Server\n\tReseeder Reseeder\n}\n\nfunc NewServer(prefix string, trustProxy bool) *Server {\n\tconfig := &tls.Config{MinVersion: tls.VersionTLS10}\n\th := &http.Server{TLSConfig: config}\n\tserver := Server{h, nil}\n\n\tth := throttled.RateLimit(throttled.PerHour(120), &throttled.VaryBy{RemoteAddr: true}, store.NewMemStore(10000))\n\n\tmiddlewareChain := alice.New()\n\tif trustProxy {\n\t\tmiddlewareChain.Append(proxiedMiddleware)\n\t}\n\tmiddlewareChain = middlewareChain.Append(loggingMiddleware, verifyMiddleware, th.Throttle)\n\n\tmux := http.NewServeMux()\n\tmux.Handle(prefix+\"\/i2pseeds.su3\", middlewareChain.Then(http.HandlerFunc(server.reseedHandler)))\n\tserver.Handler = mux\n\n\treturn &server\n}\n\nfunc (s *Server) reseedHandler(w http.ResponseWriter, r *http.Request) {\n\tpeer := s.Reseeder.Peer(r)\n\n\tsu3, err := s.Reseeder.PeerSu3Bytes(peer)\n\tif nil != err {\n\t\thttp.Error(w, \"500 Unable to get SU3\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Disposition\", \"attachment; filename=i2pseeds.su3\")\n\tw.Header().Set(\"Content-Type\", \"application\/octet-stream\")\n\tw.Header().Set(\"Content-Length\", strconv.FormatInt(int64(len(su3)), 10))\n\n\tio.Copy(w, bytes.NewReader(su3))\n}\n\nfunc loggingMiddleware(next http.Handler) http.Handler {\n\treturn handlers.CombinedLoggingHandler(os.Stdout, next)\n}\n\nfunc verifyMiddleware(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tif I2P_USER_AGENT != r.UserAgent() {\n\t\t\thttp.Error(w, \"403 Forbidden\", http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\n\t\tnext.ServeHTTP(w, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}\n\nfunc proxiedMiddleware(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tif prior, ok := r.Header[\"X-Forwarded-For\"]; ok {\n\t\t\tr.RemoteAddr = prior[0]\n\t\t}\n\n\t\tnext.ServeHTTP(w, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}\nadd a handler to log failed requestspackage reseed\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\n\t\"github.com\/PuerkitoBio\/throttled\"\n\t\"github.com\/PuerkitoBio\/throttled\/store\"\n\t\"github.com\/gorilla\/handlers\"\n\t\"github.com\/justinas\/alice\"\n)\n\nconst (\n\tI2P_USER_AGENT = \"Wget\/1.11.4\"\n)\n\ntype Server struct {\n\t*http.Server\n\tReseeder Reseeder\n}\n\nfunc NewServer(prefix string, trustProxy bool) *Server {\n\tconfig := &tls.Config{MinVersion: tls.VersionTLS10}\n\th := &http.Server{TLSConfig: config}\n\tserver := Server{h, nil}\n\n\tth := throttled.RateLimit(throttled.PerHour(120), &throttled.VaryBy{RemoteAddr: true}, store.NewMemStore(10000))\n\n\tmiddlewareChain := alice.New()\n\tif trustProxy {\n\t\tmiddlewareChain.Append(proxiedMiddleware)\n\t}\n\n\terrorHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t})\n\n\tmux := http.NewServeMux()\n\tmux.Handle(\"\/\", middlewareChain.Append(loggingMiddleware).Then(errorHandler))\n\tmux.Handle(prefix+\"\/i2pseeds.su3\", middlewareChain.Append(loggingMiddleware, verifyMiddleware, th.Throttle).Then(http.HandlerFunc(server.reseedHandler)))\n\tserver.Handler = mux\n\n\treturn &server\n}\n\nfunc (s *Server) reseedHandler(w http.ResponseWriter, r *http.Request) {\n\tpeer := s.Reseeder.Peer(r)\n\n\tsu3, err := s.Reseeder.PeerSu3Bytes(peer)\n\tif nil != err {\n\t\thttp.Error(w, \"500 Unable to get SU3\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Disposition\", \"attachment; filename=i2pseeds.su3\")\n\tw.Header().Set(\"Content-Type\", \"application\/octet-stream\")\n\tw.Header().Set(\"Content-Length\", strconv.FormatInt(int64(len(su3)), 10))\n\n\tio.Copy(w, bytes.NewReader(su3))\n}\n\nfunc loggingMiddleware(next http.Handler) http.Handler {\n\treturn handlers.CombinedLoggingHandler(os.Stdout, next)\n}\n\nfunc verifyMiddleware(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tif I2P_USER_AGENT != r.UserAgent() {\n\t\t\thttp.Error(w, \"403 Forbidden\", http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\n\t\tnext.ServeHTTP(w, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}\n\nfunc proxiedMiddleware(next http.Handler) http.Handler {\n\tfn := func(w http.ResponseWriter, r *http.Request) {\n\t\tif prior, ok := r.Header[\"X-Forwarded-For\"]; ok {\n\t\t\tr.RemoteAddr = prior[0]\n\t\t}\n\n\t\tnext.ServeHTTP(w, r)\n\t}\n\treturn http.HandlerFunc(fn)\n}\n<|endoftext|>"} {"text":"package resource\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\n\t\"github.com\/dnaeon\/gru\/utils\"\n\t\"github.com\/hashicorp\/hcl\"\n\t\"github.com\/hashicorp\/hcl\/hcl\/ast\"\n\t\"github.com\/imdario\/mergo\"\n)\n\n\/\/ Name and description of the resource\nconst fileResourceType = \"file\"\nconst fileResourceDesc = \"manages files\"\n\n\/\/ BaseFileResource is the base resource for managing files\ntype BaseFileResource struct {\n\t\/\/ Path to the file\n\tPath string `hcl:\"path\"`\n\n\t\/\/ Permission bits to set on the file\n\tMode int `hcl:\"mode\"`\n\n\t\/\/ Owner of the file\n\tOwner string `hcl:\"owner\"`\n\n\t\/\/ Group of the file\n\tGroup string `hcl:\"group\"`\n\n\t\/\/ Source file to use when creating\/updating the file\n\tSource string `hcl:\"source\"`\n\n\t\/\/ The destination file we manage\n\tdstFile *utils.FileUtil\n}\n\n\/\/ permissionsChanged returns a boolean indicating whether the\n\/\/ permissions of the file managed by the resource is different than the\n\/\/ permissions defined by the resource\nfunc (bfr *BaseFileResource) permissionsChanged() (bool, error) {\n\tm, err := bfr.dstFile.Mode()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn m.Perm() != os.FileMode(bfr.Mode), nil\n}\n\n\/\/ ownerChanged returns a boolean indicating whether the\n\/\/ owner\/group of the file managed by the resource is different than the\n\/\/ owner\/group defined by the resource\nfunc (bfr *BaseFileResource) ownerChanged() (bool, error) {\n\towner, err := bfr.dstFile.Owner()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif bfr.Owner != owner.User.Username || bfr.Group != owner.Group.Name {\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}\n\n\/\/ contentChanged returns a boolean indicating whether the\n\/\/ content of the file managed by the resource is different than the\n\/\/ content of the source file defined by the resource\nfunc (bfr *BaseFileResource) contentChanged(srcPath string) (bool, error) {\n\tif bfr.Source == \"\" {\n\t\treturn false, nil\n\t}\n\n\tsrcFile := utils.NewFileUtil(srcPath)\n\tsrcMd5, err := srcFile.Md5()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tdstMd5, err := bfr.dstFile.Md5()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn srcMd5 != dstMd5, nil\n}\n\n\/\/ FileResource is a resource which manages files\ntype FileResource struct {\n\tBaseResource `hcl:\",squash\"`\n\tBaseFileResource `hcl:\",squash\"`\n}\n\n\/\/ NewFileResource creates a new resource for managing files\nfunc NewFileResource(title string, obj *ast.ObjectItem) (Resource, error) {\n\t\/\/ Defaults for owner and group\n\tcurrentUser, err := user.Current()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcurrentGroup, err := user.LookupGroupId(currentUser.Gid)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefaultOwner := currentUser.Username\n\tdefaultGroup := currentGroup.Name\n\n\t\/\/ Resource defaults\n\tdefaults := FileResource{\n\t\tBaseResource: BaseResource{\n\t\t\tTitle: title,\n\t\t\tType: fileResourceType,\n\t\t\tState: StatePresent,\n\t\t},\n\t\tBaseFileResource: BaseFileResource{\n\t\t\tPath: title,\n\t\t\tMode: 0644,\n\t\t\tOwner: defaultOwner,\n\t\t\tGroup: defaultGroup,\n\t\t},\n\t}\n\n\tvar fr FileResource\n\terr = hcl.DecodeObject(&fr, obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Merge the decoded object with the resource defaults\n\terr = mergo.Merge(&fr, defaults)\n\n\t\/\/ The destination file we manage\n\tfr.dstFile = utils.NewFileUtil(fr.Path)\n\n\treturn &fr, err\n}\n\n\/\/ Evaluate evaluates the file resource\nfunc (fr *FileResource) Evaluate(w io.Writer, opts *Options) (State, error) {\n\trs := State{\n\t\tCurrent: StateUnknown,\n\t\tWant: fr.State,\n\t\tUpdate: false,\n\t}\n\n\t\/\/ File does not exist\n\tfi, err := os.Stat(fr.Path)\n\tif os.IsNotExist(err) {\n\t\trs.Current = StateAbsent\n\t\treturn rs, nil\n\t}\n\n\t\/\/ Ensure that the file we manage is a regular file\n\trs.Current = StatePresent\n\tif !fi.Mode().IsRegular() {\n\t\treturn rs, fmt.Errorf(\"%s is not a regular file\", fr.Path)\n\t}\n\n\t\/\/ Check file content\n\tsrcPath := filepath.Join(opts.SiteDir, \"data\", fr.Source)\n\tchanged, err := fr.contentChanged(srcPath)\n\tif err != nil {\n\t\treturn rs, err\n\t}\n\n\tif changed {\n\t\tfr.Printf(w, \"content is out of date\\n\")\n\t\trs.Update = true\n\t}\n\n\t\/\/ Check file permissions\n\tchanged, err = fr.permissionsChanged()\n\tif err != nil {\n\t\treturn rs, err\n\t}\n\n\tif changed {\n\t\tfr.Printf(w, \"permissions are out of date\\n\")\n\t\trs.Update = true\n\t}\n\n\t\/\/ Check ownership\n\tchanged, err = fr.ownerChanged()\n\tif err != nil {\n\t\treturn rs, err\n\t}\n\n\tif changed {\n\t\tfr.Printf(w, \"owner is out of date\\n\")\n\t\trs.Update = true\n\t}\n\n\treturn rs, nil\n}\n\n\/\/ Create creates the file\nfunc (fr *FileResource) Create(w io.Writer, opts *Options) error {\n\tfr.Printf(w, \"creating file\\n\")\n\n\t\/\/ Set file content\n\tswitch {\n\tcase fr.Source == \"\" && fr.dstFile.Exists():\n\t\t\/\/ Do nothing\n\t\tbreak\n\tcase fr.Source == \"\" && !fr.dstFile.Exists():\n\t\t\/\/ Create an empty file\n\t\tif _, err := os.Create(fr.Path); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase fr.Source != \"\" && fr.dstFile.Exists():\n\t\t\/\/ File exists and we have a source file\n\t\tsrcPath := filepath.Join(opts.SiteDir, \"data\", fr.Source)\n\t\tif err := fr.dstFile.CopyFrom(srcPath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Set file owner\n\tif err := fr.dstFile.SetOwner(fr.Owner, fr.Group); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Set file permissions\n\treturn fr.dstFile.Chmod(os.FileMode(fr.Mode))\n}\n\n\/\/ Delete deletes the file\nfunc (fr *FileResource) Delete(w io.Writer, opts *Options) error {\n\tfr.Printf(w, \"removing file\\n\")\n\n\treturn fr.dstFile.Remove()\n}\n\n\/\/ Update updates the file managed by the resource\nfunc (fr *FileResource) Update(w io.Writer, opts *Options) error {\n\t\/\/ Update file content if needed\n\tsrcPath := filepath.Join(opts.SiteDir, \"data\", fr.Source)\n\tchanged, err := fr.contentChanged(srcPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif changed {\n\t\tsrcFile := utils.NewFileUtil(srcPath)\n\t\tsrcMd5, err := srcFile.Md5()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfr.Printf(w, \"updating content to md5:%s\\n\", srcMd5)\n\t\tif err := fr.dstFile.CopyFrom(srcFile.Path); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Fix permissions if needed\n\tchanged, err = fr.permissionsChanged()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif changed {\n\t\tfr.Printf(w, \"setting permissions to %#o\\n\", fr.Mode)\n\t\tfr.dstFile.Chmod(os.FileMode(fr.Mode))\n\t}\n\n\t\/\/ Fix ownership if needed\n\tchanged, err = fr.ownerChanged()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif changed {\n\t\tfr.Printf(w, \"setting owner to %s:%s\\n\", fr.Owner, fr.Group)\n\t\tif err := fr.dstFile.SetOwner(fr.Owner, fr.Group); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc init() {\n\titem := RegistryItem{\n\t\tName: fileResourceType,\n\t\tDescription: fileResourceDesc,\n\t\tProvider: NewFileResource,\n\t}\n\n\tRegister(item)\n}\nresource: refactor resource.FileResource typepackage resource\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\n\t\"github.com\/dnaeon\/gru\/utils\"\n\t\"github.com\/hashicorp\/hcl\"\n\t\"github.com\/hashicorp\/hcl\/hcl\/ast\"\n\t\"github.com\/imdario\/mergo\"\n)\n\n\/\/ Name and description of the resource\nconst fileResourceType = \"file\"\nconst fileResourceDesc = \"manages files\"\n\n\/\/ BaseFileResource is the base resource for managing files\ntype BaseFileResource struct {\n\t\/\/ Path to the file\n\tPath string `hcl:\"path\"`\n\n\t\/\/ Permission bits to set on the file\n\tMode int `hcl:\"mode\"`\n\n\t\/\/ Owner of the file\n\tOwner string `hcl:\"owner\"`\n\n\t\/\/ Group of the file\n\tGroup string `hcl:\"group\"`\n\n\t\/\/ Source file to use when creating\/updating the file\n\tSource string `hcl:\"source\"`\n}\n\n\/\/ FileResource is a resource which manages files\ntype FileResource struct {\n\tBaseResource `hcl:\",squash\"`\n\tBaseFileResource `hcl:\",squash\"`\n}\n\n\/\/ NewFileResource creates a new resource for managing files\nfunc NewFileResource(title string, obj *ast.ObjectItem) (Resource, error) {\n\t\/\/ Defaults for owner and group\n\tcurrentUser, err := user.Current()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcurrentGroup, err := user.LookupGroupId(currentUser.Gid)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefaultOwner := currentUser.Username\n\tdefaultGroup := currentGroup.Name\n\n\t\/\/ Resource defaults\n\tdefaults := FileResource{\n\t\tBaseResource: BaseResource{\n\t\t\tTitle: title,\n\t\t\tType: fileResourceType,\n\t\t\tState: StatePresent,\n\t\t},\n\t\tBaseFileResource: BaseFileResource{\n\t\t\tPath: title,\n\t\t\tMode: 0644,\n\t\t\tOwner: defaultOwner,\n\t\t\tGroup: defaultGroup,\n\t\t},\n\t}\n\n\tvar fr FileResource\n\terr = hcl.DecodeObject(&fr, obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Merge the decoded object with the resource defaults\n\terr = mergo.Merge(&fr, defaults)\n\n\treturn &fr, err\n}\n\n\/\/ Evaluate evaluates the file resource\nfunc (fr *FileResource) Evaluate(w io.Writer, opts *Options) (State, error) {\n\trs := State{\n\t\tCurrent: StateUnknown,\n\t\tWant: fr.State,\n\t\tUpdate: false,\n\t}\n\n\t\/\/ The file we manage\n\tdst := utils.NewFileUtil(fr.Path)\n\n\t\/\/ File does not exist\n\tfi, err := os.Stat(fr.Path)\n\tif os.IsNotExist(err) {\n\t\trs.Current = StateAbsent\n\t\treturn rs, nil\n\t} else {\n\t\trs.Current = StatePresent\n\t}\n\n\t\/\/ Ensure that the file we manage is a regular file\n\tif !fi.Mode().IsRegular() {\n\t\treturn rs, fmt.Errorf(\"%s is not a regular file\", fr.Path)\n\t}\n\n\t\/\/ Check file content\n\tif fr.Source != \"\" {\n\t\tsrcPath := filepath.Join(opts.SiteDir, \"data\", fr.Source)\n\t\tsame, err := dst.SameContentWith(srcPath)\n\t\tif err != nil {\n\t\t\treturn rs, err\n\t\t}\n\t\tif !same {\n\t\t\tfr.Printf(w, \"content is out of date\\n\")\n\t\t\trs.Update = true\n\t\t}\n\t}\n\n\t\/\/ Check file permissions\n\tmode, err := dst.Mode()\n\tif err != nil {\n\t\treturn rs, err\n\t}\n\n\tif mode.Perm() != os.FileMode(fr.Mode) {\n\t\tfr.Printf(w, \"permissions are out of date\\n\")\n\t\trs.Update = true\n\t}\n\n\t\/\/ Check ownership\n\towner, err := dst.Owner()\n\tif err != nil {\n\t\treturn rs, err\n\t}\n\n\tif fr.Owner != owner.User.Username || fr.Group != owner.Group.Name {\n\t\tfr.Printf(w, \"owner is out of date\\n\")\n\t\trs.Update = true\n\t}\n\n\treturn rs, nil\n}\n\n\/\/ Create creates the file managed by the resource\nfunc (fr *FileResource) Create(w io.Writer, opts *Options) error {\n\tdst := utils.NewFileUtil(fr.Path)\n\tfr.Printf(w, \"creating file\\n\")\n\n\t\/\/ Set file content\n\tswitch {\n\tcase fr.Source == \"\" && dst.Exists():\n\t\t\/\/ Do nothing\n\t\tbreak\n\tcase fr.Source == \"\" && !dst.Exists():\n\t\t\/\/ Create an empty file\n\t\tif _, err := os.Create(fr.Path); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase fr.Source != \"\" && dst.Exists():\n\t\t\/\/ File exists and we have a source file\n\t\tsrcPath := filepath.Join(opts.SiteDir, \"data\", fr.Source)\n\t\tif err := dst.CopyFrom(srcPath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Set file owner\n\tif err := dst.SetOwner(fr.Owner, fr.Group); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Set file permissions\n\treturn dst.Chmod(os.FileMode(fr.Mode))\n}\n\n\/\/ Delete deletes the file\nfunc (fr *FileResource) Delete(w io.Writer, opts *Options) error {\n\tfr.Printf(w, \"removing file\\n\")\n\tdst := utils.NewFileUtil(fr.Path)\n\n\treturn dst.Remove()\n}\n\n\/\/ Update updates the file managed by the resource\nfunc (fr *FileResource) Update(w io.Writer, opts *Options) error {\n\tdst := utils.NewFileUtil(fr.Path)\n\n\t\/\/ Update file content if needed\n\tif fr.Source != \"\" {\n\t\tsrcPath := filepath.Join(opts.SiteDir, \"data\", fr.Source)\n\t\tsame, err := dst.SameContentWith(srcPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif !same {\n\t\t\tsrcFile := utils.NewFileUtil(srcPath)\n\t\t\tsrcMd5, err := srcFile.Md5()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfr.Printf(w, \"updating content to md5:%s\\n\", srcMd5)\n\t\t\tif err := dst.CopyFrom(srcPath); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Fix permissions if needed\n\tmode, err := dst.Mode()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif mode.Perm() != os.FileMode(fr.Mode) {\n\t\tfr.Printf(w, \"setting permissions to %#o\\n\", fr.Mode)\n\t\tdst.Chmod(os.FileMode(fr.Mode))\n\t}\n\n\t\/\/ Fix ownership if needed\n\towner, err := dst.Owner()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif fr.Owner != owner.User.Username || fr.Group != owner.Group.Name {\n\t\tfr.Printf(w, \"setting owner to %s:%s\\n\", fr.Owner, fr.Group)\n\t\tif err := dst.SetOwner(fr.Owner, fr.Group); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc init() {\n\titem := RegistryItem{\n\t\tName: fileResourceType,\n\t\tDescription: fileResourceDesc,\n\t\tProvider: NewFileResource,\n\t}\n\n\tRegister(item)\n}\n<|endoftext|>"} {"text":"\/\/ Copyright (c) 2014 Couchbase, Inc.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the\n\/\/ License. You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an \"AS\n\/\/ IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n\/\/ express or implied. See the License for the specific language\n\/\/ governing permissions and limitations under the License.\n\npackage rest\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\t\"github.com\/couchbase\/cbgt\"\n)\n\n\/\/ TODO: Need to give the codebase a scrub of its log\n\/\/ messages and fmt.Errorf()'s.\n\n\/\/ LogGetHandler is a REST handler that retrieves recent log messages.\ntype LogGetHandler struct {\n\tmgr *cbgt.Manager\n\tmr *cbgt.MsgRing\n}\n\nfunc NewLogGetHandler(\n\tmgr *cbgt.Manager, mr *cbgt.MsgRing) *LogGetHandler {\n\treturn &LogGetHandler{mgr: mgr, mr: mr}\n}\n\nfunc (h *LogGetHandler) ServeHTTP(\n\tw http.ResponseWriter, req *http.Request) {\n\tw.Write([]byte(`{\"messages\":[`))\n\tfor i, message := range h.mr.Messages() {\n\t\tbuf, err := json.Marshal(string(message))\n\t\tif err == nil {\n\t\t\tif i > 0 {\n\t\t\t\tw.Write(cbgt.JsonComma)\n\t\t\t}\n\t\t\tw.Write(buf)\n\t\t}\n\t}\n\tw.Write([]byte(`],\"events\":[`))\n\tif h.mgr != nil {\n\t\tfirst := true\n\t\th.mgr.Lock()\n\t\tp := h.mgr.Events().Front()\n\t\tfor p != nil {\n\t\t\tif !first {\n\t\t\t\tw.Write(cbgt.JsonComma)\n\t\t\t}\n\t\t\tfirst = false\n\t\t\tw.Write(p.Value.([]byte))\n\t\t\tp = p.Next()\n\t\t}\n\t\th.mgr.Unlock()\n\t}\n\tw.Write([]byte(`]}`))\n}\nfixed #3 - check nil MsgRing in REST log handler\/\/ Copyright (c) 2014 Couchbase, Inc.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the\n\/\/ License. You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an \"AS\n\/\/ IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n\/\/ express or implied. See the License for the specific language\n\/\/ governing permissions and limitations under the License.\n\npackage rest\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\t\"github.com\/couchbase\/cbgt\"\n)\n\n\/\/ TODO: Need to give the codebase a scrub of its log\n\/\/ messages and fmt.Errorf()'s.\n\n\/\/ LogGetHandler is a REST handler that retrieves recent log messages.\ntype LogGetHandler struct {\n\tmgr *cbgt.Manager\n\tmr *cbgt.MsgRing\n}\n\nfunc NewLogGetHandler(\n\tmgr *cbgt.Manager, mr *cbgt.MsgRing) *LogGetHandler {\n\treturn &LogGetHandler{mgr: mgr, mr: mr}\n}\n\nfunc (h *LogGetHandler) ServeHTTP(\n\tw http.ResponseWriter, req *http.Request) {\n\tw.Write([]byte(`{\"messages\":[`))\n\tif h.mr != nil {\n\t\tfor i, message := range h.mr.Messages() {\n\t\t\tbuf, err := json.Marshal(string(message))\n\t\t\tif err == nil {\n\t\t\t\tif i > 0 {\n\t\t\t\t\tw.Write(cbgt.JsonComma)\n\t\t\t\t}\n\t\t\t\tw.Write(buf)\n\t\t\t}\n\t\t}\n\t}\n\tw.Write([]byte(`],\"events\":[`))\n\tif h.mgr != nil {\n\t\tfirst := true\n\t\th.mgr.Lock()\n\t\tp := h.mgr.Events().Front()\n\t\tfor p != nil {\n\t\t\tif !first {\n\t\t\t\tw.Write(cbgt.JsonComma)\n\t\t\t}\n\t\t\tfirst = false\n\t\t\tw.Write(p.Value.([]byte))\n\t\t\tp = p.Next()\n\t\t}\n\t\th.mgr.Unlock()\n\t}\n\tw.Write([]byte(`]}`))\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage metrics\n\nimport (\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/procfs\"\n\t\"k8s.io\/klog\"\n\t\"os\"\n\t\"time\"\n)\n\nvar processStartTime = prometheus.NewGaugeVec(\n\tprometheus.GaugeOpts{\n\t\tName: \"process_start_time_seconds\",\n\t\tHelp: \"Start time of the process since unix epoch in seconds.\",\n\t},\n\t[]string{},\n)\n\n\/\/ RegisterProcessStartTime registers the process_start_time_seconds to\n\/\/ a prometheus registry. This metric needs to be included to ensure counter\n\/\/ data fidelity.\nfunc RegisterProcessStartTime(registerer prometheus.Registerer) error {\n\tstart, err := getProcessStart()\n\tif err != nil {\n\t\tklog.Errorf(\"Could not get process start time, %v\", err)\n\t\tstart = float64(time.Now().Unix())\n\t}\n\tprocessStartTime.WithLabelValues().Set(start)\n\treturn registerer.Register(processStartTime)\n}\n\nfunc getProcessStart() (float64, error) {\n\tpid := os.Getpid()\n\tp, err := procfs.NewProc(pid)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif stat, err := p.NewStat(); err == nil {\n\t\treturn stat.StartTime()\n\t}\n\treturn 0, err\n}\nmigrate kubelet's metrics\/probes & metrics endpoint to metrics stability framework\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage metrics\n\nimport (\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/procfs\"\n\t\"k8s.io\/klog\"\n\t\"os\"\n\t\"time\"\n)\n\nvar processStartTime = prometheus.NewGaugeVec(\n\tprometheus.GaugeOpts{\n\t\tName: \"process_start_time_seconds\",\n\t\tHelp: \"Start time of the process since unix epoch in seconds.\",\n\t},\n\t[]string{},\n)\n\n\/\/ Registerer is an interface expected by RegisterProcessStartTime in order to register the metric\ntype Registerer interface {\n\tRegister(prometheus.Collector) error\n}\n\n\/\/ RegisterProcessStartTime registers the process_start_time_seconds to\n\/\/ a prometheus registry. This metric needs to be included to ensure counter\n\/\/ data fidelity.\nfunc RegisterProcessStartTime(registrationFunc func(prometheus.Collector) error) error {\n\tstart, err := getProcessStart()\n\tif err != nil {\n\t\tklog.Errorf(\"Could not get process start time, %v\", err)\n\t\tstart = float64(time.Now().Unix())\n\t}\n\tprocessStartTime.WithLabelValues().Set(start)\n\treturn registrationFunc(processStartTime)\n}\n\nfunc getProcessStart() (float64, error) {\n\tpid := os.Getpid()\n\tp, err := procfs.NewProc(pid)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif stat, err := p.NewStat(); err == nil {\n\t\treturn stat.StartTime()\n\t}\n\treturn 0, err\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"net\/http\"\n\t\"flag\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"log\"\n\tph \"github.com\/advptr\/reverse-proxy\/proxyhandler\"\n\t\"crypto\/x509\"\n)\n\n\/\/ Main config\ntype Config struct {\n\tTrustFiles []string `json:\"trust-files\"`\n\tRoutes []ph.Route `json:\"routes\"\"`\n}\n\n\n\/\/ Program Options\ntype Options struct {\n\tServerAddress string\n\tConfigFile string\n}\n\n\n\/\/ Main Bootstrap\nfunc main() {\n\targs := args()\n\tconfig := config(args.ConfigFile)\n\n\tfor _, r := range config.Routes {\n\t\thandler := ph.NewWSHandler(r, config.trustedCertPool())\n\t\thttp.HandleFunc(handler.Path(), handler.Handle)\n\t}\n\tlog.Printf(\"reverse-proxy on %v\\n\", args.ServerAddress)\n\n\terr := http.ListenAndServe(args.ServerAddress, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\n\/\/ Parse command args\nfunc args() Options {\n\tconst (\n\t\tdefaultServerAddress = \":80\"\n\t\tserverAddressUsage = \"server address: ':80', '0.0.0.0:8080'...\"\n\t\tdefaultRouteConfig = \"config.json\"\n\t\trouteConfigUsage = \"configuration file: 'config.json'\"\n\t)\n\taddress := flag.String(\"address\", defaultServerAddress, serverAddressUsage)\n\tconfig := flag.String(\"config\", defaultRouteConfig, routeConfigUsage)\n\tflag.Parse()\n\n\treturn Options{*address, *config}\n}\n\n\/\/\nfunc (c *Config) trustedCertPool() (*x509.CertPool) {\n\ttrustedCertPool := x509.NewCertPool()\n\tfor _, file := range c.TrustFiles {\n\t\ttrustedCert, err := ioutil.ReadFile(file)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\ttrustedCertPool.AppendCertsFromPEM(trustedCert);\n\t}\n\n\treturn trustedCertPool\n}\n\n\n\/\/ Unmarshal routes from JSON configuration file\nfunc config(configFile string) Config {\n\tfile, err := ioutil.ReadFile(configFile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar config Config\n\terr = json.Unmarshal(file, &config)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn config\n}\n\none cert pool is good enough.package main\n\nimport (\n\t\"net\/http\"\n\t\"flag\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"log\"\n\tph \"github.com\/advptr\/reverse-proxy\/proxyhandler\"\n\t\"crypto\/x509\"\n)\n\n\/\/ Main config\ntype Config struct {\n\tTrustFiles []string `json:\"trust-files\"`\n\tRoutes []ph.Route `json:\"routes\"\"`\n}\n\n\n\/\/ Program Options\ntype Options struct {\n\tServerAddress string\n\tConfigFile string\n}\n\n\n\/\/ Main Bootstrap\nfunc main() {\n\targs := args()\n\tconfig := config(args.ConfigFile)\n\ttrustedCertPool := config.trustedCertPool()\n\tfor _, r := range config.Routes {\n\t\thandler := ph.NewWSHandler(r, trustedCertPool)\n\t\thttp.HandleFunc(handler.Path(), handler.Handle)\n\t}\n\tlog.Printf(\"reverse-proxy on %v\\n\", args.ServerAddress)\n\n\terr := http.ListenAndServe(args.ServerAddress, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\n\/\/ Parse command args\nfunc args() Options {\n\tconst (\n\t\tdefaultServerAddress = \":80\"\n\t\tserverAddressUsage = \"server address: ':80', '0.0.0.0:8080'...\"\n\t\tdefaultRouteConfig = \"config.json\"\n\t\trouteConfigUsage = \"configuration file: 'config.json'\"\n\t)\n\taddress := flag.String(\"address\", defaultServerAddress, serverAddressUsage)\n\tconfig := flag.String(\"config\", defaultRouteConfig, routeConfigUsage)\n\tflag.Parse()\n\n\treturn Options{*address, *config}\n}\n\n\/\/\nfunc (c *Config) trustedCertPool() (*x509.CertPool) {\n\ttrustedCertPool := x509.NewCertPool()\n\tfor _, file := range c.TrustFiles {\n\t\ttrustedCert, err := ioutil.ReadFile(file)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\ttrustedCertPool.AppendCertsFromPEM(trustedCert);\n\t}\n\n\treturn trustedCertPool\n}\n\n\n\/\/ Unmarshal routes from JSON configuration file\nfunc config(configFile string) Config {\n\tfile, err := ioutil.ReadFile(configFile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar config Config\n\terr = json.Unmarshal(file, &config)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn config\n}\n\n<|endoftext|>"} {"text":"package main\n\ntype Point struct {\n\tX int\n\tY int\n}\n\ntype Direction int\n\nconst (\n\tNorth Direction = iota\n\tSouth\n\tEast\n\tWest\n)\n\nfunc (d Direction) Reverse() Direction {\n\tswitch d {\n\tcase North:\n\t\treturn South\n\tcase South:\n\t\treturn North\n\tcase East:\n\t\treturn West\n\tcase West:\n\t\treturn East\n\tdefault:\n\t\treturn North\n\t}\n}\n\ntype Exits map[Direction]*Room\n\ntype Room struct {\n\tName string\n\tDescription string\n\tPosition Point\n\tExits Exits\n\tDungeon *Dungeon\n}\n\ntype Dungeon struct {\n\tRooms map[Point]*Room\n}\n\n\/\/ OpenExit creates a connection from the receiver's Room to another Room. If\n\/\/ a Room already exists in the Direction specified, Exits on both Rooms are\n\/\/ linked. If no Room exists, a new one is created and added to the Dungeon.\n\/\/ If an exit is already open in that Direction, returns false.\nfunc (r *Room) OpenExit(direction Direction) bool {\n\tif _, present := r.Exits[direction]; present {\n\t\treturn false\n\t}\n\n\tchangeX := 0\n\tchangeY := 0\n\n\tswitch direction {\n\tcase North:\n\t\tchangeY = -1\n\tcase South:\n\t\tchangeY = 1\n\tcase East:\n\t\tchangeX = 1\n\tcase West:\n\t\tchangeX = -1\n\tdefault:\n\t\treturn false\n\t}\n\n\tdestinationPosition := Point{\n\t\tX: r.Position.X + changeX,\n\t\tY: r.Position.Y + changeY,\n\t}\n\n\tif destinationRoom, roomPresent := r.Dungeon.Rooms[destinationPosition]; roomPresent {\n\t\tr.Exits[direction] = destinationRoom\n\t\tdestinationRoom.Exits[direction.Reverse()] = r\n\t}\n\n\treturn true\n}\nAdd Dungeon initializer NewDungeonpackage main\n\ntype Point struct {\n\tX int\n\tY int\n}\n\ntype Direction int\n\nconst (\n\tNorth Direction = iota\n\tSouth\n\tEast\n\tWest\n)\n\nfunc (d Direction) Reverse() Direction {\n\tswitch d {\n\tcase North:\n\t\treturn South\n\tcase South:\n\t\treturn North\n\tcase East:\n\t\treturn West\n\tcase West:\n\t\treturn East\n\tdefault:\n\t\treturn North\n\t}\n}\n\ntype Exits map[Direction]*Room\n\ntype Room struct {\n\tName string\n\tDescription string\n\tPosition Point\n\tExits Exits\n\tDungeon *Dungeon\n}\n\ntype Dungeon struct {\n\tRooms map[Point]*Room\n}\n\n\/\/ OpenExit creates a connection from the receiver's Room to another Room. If\n\/\/ a Room already exists in the Direction specified, Exits on both Rooms are\n\/\/ linked. If no Room exists, a new one is created and added to the Dungeon.\n\/\/ If an exit is already open in that Direction, returns false.\nfunc (r *Room) OpenExit(direction Direction) bool {\n\tif _, present := r.Exits[direction]; present {\n\t\treturn false\n\t}\n\n\tchangeX := 0\n\tchangeY := 0\n\n\tswitch direction {\n\tcase North:\n\t\tchangeY = -1\n\tcase South:\n\t\tchangeY = 1\n\tcase East:\n\t\tchangeX = 1\n\tcase West:\n\t\tchangeX = -1\n\tdefault:\n\t\treturn false\n\t}\n\n\tdestinationPosition := Point{\n\t\tX: r.Position.X + changeX,\n\t\tY: r.Position.Y + changeY,\n\t}\n\n\tif destinationRoom, roomPresent := r.Dungeon.Rooms[destinationPosition]; roomPresent {\n\t\tr.Exits[direction] = destinationRoom\n\t\tdestinationRoom.Exits[direction.Reverse()] = r\n\t}\n\n\treturn true\n}\n\n\/\/ NewDungeon returns a new Dungeon type initialized with one empty Room.\nfunc NewDungeon() *Dungeon {\n\tfirstRoom := &Room{}\n\trooms := make(map[Point]*Room)\n\trooms[Point{X: 0, Y: 0}] = firstRoom\n\n\treturn &Dungeon{\n\t\tRooms: rooms,\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ Package durafmt formats time.Duration into a human readable format.\npackage durafmt\n\nimport (\n\t\"errors\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\tunits = []string{\"years\", \"weeks\", \"days\", \"hours\", \"minutes\", \"seconds\", \"milliseconds\"}\n)\n\n\/\/ Durafmt holds the parsed duration and the original input duration.\ntype Durafmt struct {\n\tduration time.Duration\n\tinput string \/\/ Used as reference.\n}\n\n\/\/ Parse creates a new *Durafmt struct, returns error if input is invalid.\nfunc Parse(dinput time.Duration) *Durafmt {\n\tinput := dinput.String()\n\treturn &Durafmt{dinput, input}\n}\n\n\/\/ ParseString creates a new *Durafmt struct from a string.\n\/\/ returns an error if input is invalid.\nfunc ParseString(input string) (*Durafmt, error) {\n\tif input == \"0\" || input == \"-0\" {\n\t\treturn nil, errors.New(\"durafmt: missing unit in duration \" + input)\n\t}\n\tduration, err := time.ParseDuration(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Durafmt{duration, input}, nil\n}\n\n\/\/ String parses d *Durafmt into a human readable duration.\nfunc (d *Durafmt) String() string {\n\tvar duration string\n\n\t\/\/ Check for minus durations.\n\tif string(d.input[0]) == \"-\" {\n\t\tduration += \"-\"\n\t\td.duration = -d.duration\n\t}\n\n\t\/\/ Convert duration.\n\tseconds := int64(d.duration.Seconds()) % 60\n\tminutes := int64(d.duration.Minutes()) % 60\n\thours := int64(d.duration.Hours()) % 24\n\tdays := int64(d.duration\/(24*time.Hour)) % 365 % 7\n\tweeks := int64(d.duration\/(24*time.Hour)) \/ 7 % 52\n\tyears := int64(d.duration\/(24*time.Hour)) \/ 365\n\tmilliseconds := int64(d.duration\/time.Millisecond) -\n\t\t(seconds * 1000) - (minutes * 60000) - (hours * 3600000) -\n\t\t(days * 86400000) - (weeks * 604800000) - (years * 31536000000)\n\n\t\/\/ Create a map of the converted duration time.\n\tdurationMap := map[string]int64{\n\t\t\"milliseconds\": milliseconds,\n\t\t\"seconds\": seconds,\n\t\t\"minutes\": minutes,\n\t\t\"hours\": hours,\n\t\t\"days\": days,\n\t\t\"weeks\": weeks,\n\t\t\"years\": years,\n\t}\n\n\t\/\/ Construct duration string.\n\tfor _, u := range units {\n\t\tv := durationMap[u]\n\t\tstrval := strconv.FormatInt(v, 10)\n\t\tswitch {\n\t\t\/\/ add to the duration string if v > 1.\n\t\tcase v > 1:\n\t\t\tduration += strval + \" \" + u + \" \"\n\t\t\/\/ remove the plural 's', if v is 1.\n\t\tcase v == 1:\n\t\t\tduration += strval + \" \" + strings.TrimRight(u, \"s\") + \" \"\n\t\t\/\/ omit any value with 0s or 0.\n\t\tcase d.duration.String() == \"0\" || d.duration.String() == \"0s\":\n\t\t\t\/\/ note: milliseconds and minutes have the same suffix (m)\n\t\t\t\/\/ so we have to check if the units match with the suffix.\n\n\t\t\t\/\/ check for a suffix that is NOT the milliseconds suffix.\n\t\t\tif strings.HasSuffix(d.input, string(u[0])) && !strings.Contains(d.input, \"ms\") {\n\t\t\t\t\/\/ if it happens that the units are milliseconds, skip.\n\t\t\t\tif u == \"milliseconds\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tduration += strval + \" \" + u\n\t\t\t}\n\t\t\t\/\/ process milliseconds here.\n\t\t\tif u == \"milliseconds\" {\n\t\t\t\tif strings.Contains(d.input, \"ms\") {\n\t\t\t\t\tduration += strval + \" \" + u\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tbreak\n\t\t\/\/ omit any value with 0.\n\t\tcase v == 0:\n\t\t\tcontinue\n\t\t}\n\t}\n\t\/\/ trim any remaining spaces.\n\tduration = strings.TrimSpace(duration)\n\treturn duration\n}\nsolving the edge case between 364 and 365 days tracked as https:\/\/github.com\/hako\/durafmt\/issues\/7\/\/ Package durafmt formats time.Duration into a human readable format.\npackage durafmt\n\nimport (\n\t\"errors\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\tunits = []string{\"years\", \"weeks\", \"days\", \"hours\", \"minutes\", \"seconds\", \"milliseconds\"}\n)\n\n\/\/ Durafmt holds the parsed duration and the original input duration.\ntype Durafmt struct {\n\tduration time.Duration\n\tinput string \/\/ Used as reference.\n}\n\n\/\/ Parse creates a new *Durafmt struct, returns error if input is invalid.\nfunc Parse(dinput time.Duration) *Durafmt {\n\tinput := dinput.String()\n\treturn &Durafmt{dinput, input}\n}\n\n\/\/ ParseString creates a new *Durafmt struct from a string.\n\/\/ returns an error if input is invalid.\nfunc ParseString(input string) (*Durafmt, error) {\n\tif input == \"0\" || input == \"-0\" {\n\t\treturn nil, errors.New(\"durafmt: missing unit in duration \" + input)\n\t}\n\tduration, err := time.ParseDuration(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Durafmt{duration, input}, nil\n}\n\n\/\/ String parses d *Durafmt into a human readable duration.\nfunc (d *Durafmt) String() string {\n\tvar duration string\n\n\t\/\/ Check for minus durations.\n\tif string(d.input[0]) == \"-\" {\n\t\tduration += \"-\"\n\t\td.duration = -d.duration\n\t}\n\n\t\/\/ Convert duration.\n\tseconds := int64(d.duration.Seconds()) % 60\n\tminutes := int64(d.duration.Minutes()) % 60\n\thours := int64(d.duration.Hours()) % 24\n\tdays := int64(d.duration\/(24*time.Hour)) % 365 % 7\n\tweeks := int64(d.duration\/(24*time.Hour)) \/ 7 % 52\n\n\t\/\/ edge case between 364 and 365 days\n\ttotal_days := int64(d.duration\/(24*time.Hour))\n if total_days >= 364 && total_days < 365 {\n\t\tweeks = 52\n\t}\n\n\tyears := int64(d.duration\/(24*time.Hour)) \/ 365\n\tmilliseconds := int64(d.duration\/time.Millisecond) -\n\t\t(seconds * 1000) - (minutes * 60000) - (hours * 3600000) -\n\t\t(days * 86400000) - (weeks * 604800000) - (years * 31536000000)\n\n\t\/\/ Create a map of the converted duration time.\n\tdurationMap := map[string]int64{\n\t\t\"milliseconds\": milliseconds,\n\t\t\"seconds\": seconds,\n\t\t\"minutes\": minutes,\n\t\t\"hours\": hours,\n\t\t\"days\": days,\n\t\t\"weeks\": weeks,\n\t\t\"years\": years,\n\t}\n\n\t\/\/ Construct duration string.\n\tfor _, u := range units {\n\t\tv := durationMap[u]\n\t\tstrval := strconv.FormatInt(v, 10)\n\t\tswitch {\n\t\t\/\/ add to the duration string if v > 1.\n\t\tcase v > 1:\n\t\t\tduration += strval + \" \" + u + \" \"\n\t\t\/\/ remove the plural 's', if v is 1.\n\t\tcase v == 1:\n\t\t\tduration += strval + \" \" + strings.TrimRight(u, \"s\") + \" \"\n\t\t\/\/ omit any value with 0s or 0.\n\t\tcase d.duration.String() == \"0\" || d.duration.String() == \"0s\":\n\t\t\t\/\/ note: milliseconds and minutes have the same suffix (m)\n\t\t\t\/\/ so we have to check if the units match with the suffix.\n\n\t\t\t\/\/ check for a suffix that is NOT the milliseconds suffix.\n\t\t\tif strings.HasSuffix(d.input, string(u[0])) && !strings.Contains(d.input, \"ms\") {\n\t\t\t\t\/\/ if it happens that the units are milliseconds, skip.\n\t\t\t\tif u == \"milliseconds\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tduration += strval + \" \" + u\n\t\t\t}\n\t\t\t\/\/ process milliseconds here.\n\t\t\tif u == \"milliseconds\" {\n\t\t\t\tif strings.Contains(d.input, \"ms\") {\n\t\t\t\t\tduration += strval + \" \" + u\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tbreak\n\t\t\/\/ omit any value with 0.\n\t\tcase v == 0:\n\t\t\tcontinue\n\t\t}\n\t}\n\t\/\/ trim any remaining spaces.\n\tduration = strings.TrimSpace(duration)\n\treturn duration\n}\n<|endoftext|>"} {"text":"package logger\n\nimport (\n\t\"strconv\"\n\t\"time\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/kataras\/iris\"\n\t\"github.com\/iris-contrib\/logger\"\n\t\"github.com\/reconquest\/loreley\"\n)\n\ntype loggerMiddleware struct {\n\t*logger.Logger\n\tconfig Config\n}\n\nvar mapLock sync.RWMutex\n\n\/\/ Serve serves the middleware\nfunc (l *loggerMiddleware) Serve(ctx *iris.Context) {\n\t\/\/all except latency to string\n\tvar date, timed, latency, status, ip, method, path string\n\tvar startTime, endTime time.Time\n\tpath = ctx.PathString()\n\tmethod = ctx.MethodString()\n\n\tstartTime = time.Now()\n\n\tctx.Next()\n\t\/\/no time.Since in order to format it well after\n\tendTime = time.Now()\n\ttimed = rightPad2Len(endTime.Format(\"15:04:05.999999\"), \"0\", 15)\n\tdate = endTime.Format(\"02\/01\/2006\")\n\tlatency = endTime.Sub(startTime).String()\n\tparts := strings.Split(latency,\".\")\n\tif(len(parts) == 2) {\n\t \tif(!strings.Contains(parts[1],\"ms\")) {\n\t\t\tparts[1] = leftPad2Len(parts[1],\"0\",6)\n\t\t}\n\t\tlatency = parts[0] + \".\" + parts[1]\n\t}\n\tlatency = leftPad2Len(latency,\" \",14)\n\t\n\tif l.config.Status {\n\t\tstatus = strconv.Itoa(ctx.Response.StatusCode())\n\t}\n\n\tif l.config.IP {\n\t\tip = leftPad2Len(ctx.RemoteAddr(),\" \",15)\n\t}\n\n\tif !l.config.Method {\n\t\tmethod = \"\"\n\t}\n\n\tif !l.config.Path {\n\t\tpath = \"\"\n\t}\n\n\tgetText, _ := loreley.CompileAndExecuteToString(\n\t\t`{bold}{fg 15}{bg 40} GET {from \"\" 0}{reset}`,\n\t\tnil,\n\t\tnil,\n\t)\n\tpostText, _ := loreley.CompileAndExecuteToString(\n\t\t`{bold}{fg 15}{bg 21} POST {from \"\" 0}{reset}`,\n\t\tnil,\n\t\tnil,\n\t)\n\theadText, _ := loreley.CompileAndExecuteToString(\n\t\t`{bold}{fg 15}{bg 53} HEAD {from \"\" 0}{reset}`,\n\t\tnil,\n\t\tnil,\n\t)\n\tputText, _ := loreley.CompileAndExecuteToString(\n\t\t`{bold}{fg 15}{bg 208} PUT {from \"\" 0}{reset}`,\n\t\tnil,\n\t\tnil,\n\t)\n\tdelText, _ := loreley.CompileAndExecuteToString(\n\t\t`{bold}{fg 15}{bg 160} DEL {from \"\" 0}{reset}`,\n\t\tnil,\n\t\tnil,\n\t)\n\n\t\/\/finally print the logs\n\tif(method == \"GET\") {\n\t\tmapLock.RLock()\n\t\tl.printf(\"%s %s - %s | %v | %4v | %s | %s \\n\", getText, timed, date, status, latency, ip, path)\n\t\tmapLock.RUnlock()\n\t} else if method == \"POST\" {\n\t\tmapLock.RLock()\n\t\tl.printf(\"%s %s - %s | %v | %4v | %s | %s \\n\", postText, timed, date, status, latency, ip, path)\n\t\tmapLock.RUnlock()\n\t} else if method == \"PUT\" {\n\t\tmapLock.RLock()\n\t\tl.printf(\"%s %s - %s | %v | %4v | %s | %s \\n\", putText, timed, date, status, latency, ip, path)\n\t\tmapLock.RUnlock()\n\t} else if method == \"HEAD\" {\n\t\tmapLock.RLock()\n\t\tl.printf(\"%s %s - %s | %v | %4v | %s | %s \\n\", headText, timed, date, status, latency, ip, path)\n\t\tmapLock.RUnlock()\n\t} else if method == \"DELETE\" {\n\t\tmapLock.RLock()\n\t\tl.printf(\"%s %s - %s | %v | %4v | %s | %s \\n\", delText, timed, date, status, latency, ip, path)\n\t\tmapLock.RUnlock()\n\t} else {\n\t\tmapLock.RLock()\n\t\tl.printf(\"%s - %s %v %4v %s %s %s \\n\", timed, date, status, latency, ip, method, path)\n\t\tmapLock.RUnlock()\n\t}\n\n}\n\nfunc rightPad2Len(s string, padStr string, overallLen int) string{\n\tvar padCountInt int\n\tpadCountInt = 1 + ((overallLen-len(padStr))\/len(padStr))\n\tvar retStr = s + strings.Repeat(padStr, padCountInt)\n\treturn retStr[:overallLen]\n}\n\nfunc leftPad2Len(s string, padStr string, overallLen int) string{\n\tvar padCountInt int\n\tpadCountInt = 1 + ((overallLen-len(padStr))\/len(padStr))\n\tvar retStr = strings.Repeat(padStr, padCountInt) + s\n\treturn retStr[(len(retStr)-overallLen):]\n}\n\nfunc (l *loggerMiddleware) printf(format string, a ...interface{}) {\n\tif l.config.EnableColors {\n\t\tl.Logger.Otherf(format, a...)\n\t} else {\n\t\tl.Logger.Printf(format, a...)\n\t}\n}\n\n\/\/ New returns the logger middleware\n\/\/ receives two parameters, both of them optionals\n\/\/ first is the logger, which normally you set to the 'iris.Logger'\n\/\/ if logger is nil then the middlewares makes one with the default configs.\n\/\/ second is optional configs(logger.Config)\nfunc New(theLogger *logger.Logger, cfg ...Config) iris.HandlerFunc {\n\tif theLogger == nil {\n\t\ttheLogger = logger.New(logger.DefaultConfig())\n\t}\n\tc := DefaultConfig().Merge(cfg)\n\tl := &loggerMiddleware{Logger: theLogger, config: c}\n\n\treturn l.Serve\n}\nUpdate logger.gopackage logger\n\nimport (\n\t\"strconv\"\n\t\"time\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/kataras\/iris\"\n\t\"github.com\/iris-contrib\/logger\"\n\t\"github.com\/reconquest\/loreley\"\n\t\"github.com\/fatih\/color\"\n)\n\ntype loggerMiddleware struct {\n\t*logger.Logger\n\tconfig Config\n}\n\nvar mapLock sync.RWMutex\n\n\/\/ Serve serves the middleware\nfunc (l *loggerMiddleware) Serve(ctx *iris.Context) {\n\t\/\/all except latency to string\n\tvar date, timed, latency, status, ip, method, path string\n\tvar startTime, endTime time.Time\n\tpath = ctx.PathString()\n\tmethod = ctx.MethodString()\n\n\tstartTime = time.Now()\n\n\tctx.Next()\n\t\/\/no time.Since in order to format it well after\n\tendTime = time.Now()\n\ttimed = rightPad2Len(endTime.Format(\"15:04:05.999999\"), \"0\", 15)\n\tdate = endTime.Format(\"02\/01\/2006\")\n\tlatency = endTime.Sub(startTime).String()\n\tparts := strings.Split(latency,\".\")\n\tif(len(parts) == 2) {\n\t if(!strings.Contains(parts[1],\"ms\")) {\n\t\t\tparts[1] = leftPad2Len(parts[1],\"0\",6)\n\t\t}\n\t\tlatency = parts[0] + \".\" + parts[1]\n\t\tlatency = leftPad2Len(latency,\" \",15)\t\t\n\t} else {\n\t\tlatency = leftPad2Len(latency,\" \",14)\n\t}\n\t\n\tif l.config.Status {\n\t\tstatus = strconv.Itoa(ctx.Response.StatusCode())\n\t}\n\n\tif l.config.IP {\n\t\tip = leftPad2Len(ctx.RemoteAddr(),\" \",15)\n\t}\n\n\tif !l.config.Method {\n\t\tmethod = \"\"\n\t}\n\n\tif !l.config.Path {\n\t\tpath = \"\"\n\t}\n\n\tgetText, _ := loreley.CompileAndExecuteToString(\n\t\t`{bold}{fg 15}{bg 40} GET {from \"\" 0}{reset}`,\n\t\tnil,\n\t\tnil,\n\t)\n\tpostText, _ := loreley.CompileAndExecuteToString(\n\t\t`{bold}{fg 15}{bg 21} POST {from \"\" 0}{reset}`,\n\t\tnil,\n\t\tnil,\n\t)\n\theadText, _ := loreley.CompileAndExecuteToString(\n\t\t`{bold}{fg 15}{bg 53} HEAD {from \"\" 0}{reset}`,\n\t\tnil,\n\t\tnil,\n\t)\n\tputText, _ := loreley.CompileAndExecuteToString(\n\t\t`{bold}{fg 15}{bg 208} PUT {from \"\" 0}{reset}`,\n\t\tnil,\n\t\tnil,\n\t)\n\tdelText, _ := loreley.CompileAndExecuteToString(\n\t\t`{bold}{fg 15}{bg 160} DEL {from \"\" 0}{reset}`,\n\t\tnil,\n\t\tnil,\n\t)\n\t\n\tif(status == \"200\" || status == \"201\") {\n\t\tstatus = color.GreenString(status)\n\t} else if(status == \"404\" || status == \"500\" || status == \"403\") {\n\t\tstatus = color.RedString(status)\n\t}\n\n\t\/\/finally print the logs\n\tif(method == \"GET\") {\n\t\tmapLock.RLock()\n\t\tl.printf(\"%s %s - %s | %v | %4v | %s | %s \\n\", getText, timed, date, status, latency, ip, path)\n\t\tmapLock.RUnlock()\n\t} else if method == \"POST\" {\n\t\tmapLock.RLock()\n\t\tl.printf(\"%s %s - %s | %v | %4v | %s | %s \\n\", postText, timed, date, status, latency, ip, path)\n\t\tmapLock.RUnlock()\n\t} else if method == \"PUT\" {\n\t\tmapLock.RLock()\n\t\tl.printf(\"%s %s - %s | %v | %4v | %s | %s \\n\", putText, timed, date, status, latency, ip, path)\n\t\tmapLock.RUnlock()\n\t} else if method == \"HEAD\" {\n\t\tmapLock.RLock()\n\t\tl.printf(\"%s %s - %s | %v | %4v | %s | %s \\n\", headText, timed, date, status, latency, ip, path)\n\t\tmapLock.RUnlock()\n\t} else if method == \"DELETE\" {\n\t\tmapLock.RLock()\n\t\tl.printf(\"%s %s - %s | %v | %4v | %s | %s \\n\", delText, timed, date, status, latency, ip, path)\n\t\tmapLock.RUnlock()\n\t} else {\n\t\tmapLock.RLock()\n\t\tl.printf(\"%s - %s %v %4v %s %s %s \\n\", timed, date, status, latency, ip, method, path)\n\t\tmapLock.RUnlock()\n\t}\n\n}\n\nfunc rightPad2Len(s string, padStr string, overallLen int) string{\n\tvar padCountInt int\n\tpadCountInt = 1 + ((overallLen-len(padStr))\/len(padStr))\n\tvar retStr = s + strings.Repeat(padStr, padCountInt)\n\treturn retStr[:overallLen]\n}\n\nfunc leftPad2Len(s string, padStr string, overallLen int) string{\n\tvar padCountInt int\n\tpadCountInt = 1 + ((overallLen-len(padStr))\/len(padStr))\n\tvar retStr = strings.Repeat(padStr, padCountInt) + s\n\treturn retStr[(len(retStr)-overallLen):]\n}\n\nfunc (l *loggerMiddleware) printf(format string, a ...interface{}) {\n\tif l.config.EnableColors {\n\t\tl.Logger.Otherf(format, a...)\n\t} else {\n\t\tl.Logger.Printf(format, a...)\n\t}\n}\n\n\/\/ New returns the logger middleware\n\/\/ receives two parameters, both of them optionals\n\/\/ first is the logger, which normally you set to the 'iris.Logger'\n\/\/ if logger is nil then the middlewares makes one with the default configs.\n\/\/ second is optional configs(logger.Config)\nfunc New(theLogger *logger.Logger, cfg ...Config) iris.HandlerFunc {\n\tif theLogger == nil {\n\t\ttheLogger = logger.New(logger.DefaultConfig())\n\t}\n\tc := DefaultConfig().Merge(cfg)\n\tl := &loggerMiddleware{Logger: theLogger, config: c}\n\n\treturn l.Serve\n}\n<|endoftext|>"} {"text":"package goble\n\nimport (\n\t\"log\"\n\n\t\"github.com\/dim13\/goble\/xpc\"\n)\n\nconst ALL = \"__allEvents__\"\n\n\/\/ Event generated by blued, with associated data\ntype Event struct {\n\tName string\n\tState string\n\tDeviceUUID xpc.UUID\n\tServiceUuid string\n\tCharacteristicUuid string\n\tPeripheral Peripheral\n\tData []byte\n\tMtu int\n\tIsNotification bool\n}\n\n\/\/ The event handler function.\n\/\/ Return true to terminate\ntype EventHandlerFunc func(Event) bool\n\n\/\/ Emitter is an object to emit and handle Event(s)\ntype Emitter struct {\n\thandlers map[string]EventHandlerFunc\n\tevent chan Event\n\tverbose bool\n}\n\n\/\/ Init initialize the emitter and start a goroutine to execute the event handlers\nfunc (e *Emitter) Init() {\n\te.handlers = make(map[string]EventHandlerFunc)\n\te.event = make(chan Event)\n\n\t\/\/ event handler\n\tgo func() {\n\t\tdefer close(e.event) \/\/ FIXME: this causes new \"emits\" to panic.\n\t\tfor ev := range e.event {\n\t\t\tif fn, ok := e.handlers[ev.Name]; ok {\n\t\t\t\tif fn(ev) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else if fn, ok := e.handlers[ALL]; ok {\n\t\t\t\tif fn(ev) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif e.verbose {\n\t\t\t\t\tlog.Println(\"unhandled Emit\", ev)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (e *Emitter) SetVerbose(v bool) {\n\te.verbose = v\n}\n\n\/\/ Emit sends the event on the 'event' channel\nfunc (e *Emitter) Emit(ev Event) {\n\te.event <- ev\n}\n\n\/\/ Register an handler for the specified event\nfunc (e *Emitter) Register(event string, fn EventHandlerFunc) {\n\te.handlers[event] = fn\n}\n\n\/\/ Deregister an handler for the specified event\nfunc (e *Emitter) Deregister(event string) {\n\tdelete(e.handlers, event)\n}\nDon't panicpackage goble\n\nimport (\n\t\"log\"\n\n\t\"github.com\/dim13\/goble\/xpc\"\n)\n\nconst ALL = \"__allEvents__\"\n\n\/\/ Event generated by blued, with associated data\ntype Event struct {\n\tName string\n\tState string\n\tDeviceUUID xpc.UUID\n\tServiceUuid string\n\tCharacteristicUuid string\n\tPeripheral Peripheral\n\tData []byte\n\tMtu int\n\tIsNotification bool\n}\n\n\/\/ The event handler function.\n\/\/ Return true to terminate\ntype EventHandlerFunc func(Event) bool\n\n\/\/ Emitter is an object to emit and handle Event(s)\ntype Emitter struct {\n\thandlers map[string]EventHandlerFunc\n\tevent chan Event\n\tverbose bool\n}\n\n\/\/ Init initialize the emitter and start a goroutine to execute the event handlers\nfunc (e *Emitter) Init() {\n\te.handlers = make(map[string]EventHandlerFunc)\n\te.event = make(chan Event)\n\n\t\/\/ event handler\n\tgo func() {\n\t\tfor ev := range e.event {\n\t\t\tfn, ok := e.handlers[ev.Name]\n\t\t\tif !ok {\n\t\t\t\tfn, ok = e.handlers[ALL]\n\t\t\t}\n\t\t\tif ok {\n\t\t\t\tgo func(ev Event) {\n\t\t\t\t\tif fn(ev) {\n\t\t\t\t\t\tclose(e.event)\n\t\t\t\t\t}\n\t\t\t\t}(ev)\n\t\t\t} else if e.verbose {\n\t\t\t\tlog.Println(\"unhandled Emit\", ev)\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (e *Emitter) SetVerbose(v bool) {\n\te.verbose = v\n}\n\n\/\/ Emit sends the event on the 'event' channel\nfunc (e *Emitter) Emit(ev Event) {\n\tselect {\n\tcase e.event <- ev:\n\tdefault:\n\t\tlog.Println(\"skip Event\", ev)\n\t}\n}\n\n\/\/ Register an handler for the specified event\nfunc (e *Emitter) Register(event string, fn EventHandlerFunc) {\n\te.handlers[event] = fn\n}\n\n\/\/ Deregister an handler for the specified event\nfunc (e *Emitter) Deregister(event string) {\n\tdelete(e.handlers, event)\n}\n<|endoftext|>"} {"text":"package eventemitter\n\nimport (\n\t\"sync\"\n)\n\n\/\/ Emitter is the base struct which manages event subscriptions and calls all registered handlers on event emits.\ntype Emitter struct {\n\tsync.RWMutex\n\n\tasync bool\n\tcapturers []*Capturer\n\tlisteners map[EventType][]*Listener\n\tlistenersOnce map[EventType][]*Listener\n}\n\n\/\/ NewEmitter creates a new event emitter that implements the Observable interface.\n\/\/ Async determines whether events listeners fire in separate goroutines or not.\nfunc NewEmitter(async bool) (em *Emitter) {\n\tem = &Emitter{\n\t\tasync: async,\n\t\tlisteners: make(map[EventType][]*Listener),\n\t\tlistenersOnce: make(map[EventType][]*Listener),\n\t}\n\treturn em\n}\n\n\/\/ EmitEvent emits the given event to all listeners and capturers\nfunc (em *Emitter) EmitEvent(event EventType, arguments ...interface{}) {\n\t\/\/ If we have no single listeners for this event, skip\n\tif len(em.listenersOnce) > 0 {\n\t\t\/\/ Get a full lock, we are changing a map\n\t\tem.Lock()\n\t\t\/\/ Copy the slice\n\t\tlistenersOnce := em.listenersOnce[event]\n\t\t\/\/ Create new empty slice\n\t\tem.listenersOnce[event] = make([]*Listener, 0)\n\t\tem.Unlock()\n\n\t\t\/\/ No lock needed, we are working with an inaccessible copy\n\t\tem.emitListenerEvents(listenersOnce, arguments)\n\t}\n\n\t\/\/ If we have no listeners for this event, skip\n\tif len(em.listeners[event]) > 0 {\n\t\tem.RLock()\n\t\tem.emitListenerEvents(em.listeners[event], arguments)\n\t\tem.RUnlock()\n\t}\n\n\t\/\/ If we have no capturers, skip\n\tif len(em.capturers) > 0 {\n\t\tem.RLock()\n\t\tem.emitCapturerEvents(em.capturers, event, arguments)\n\t\tem.RUnlock()\n\t}\n}\n\nfunc (em *Emitter) emitListenerEvents(listeners []*Listener, arguments []interface{}) {\n\tfor _, listener := range listeners {\n\t\tif em.async {\n\t\t\tgo listener.handler(arguments...)\n\t\t\tcontinue\n\t\t}\n\t\tlistener.handler(arguments...)\n\t}\n}\n\nfunc (em *Emitter) emitCapturerEvents(capturers []*Capturer, event EventType, arguments []interface{}) {\n\tfor _, capturer := range capturers {\n\t\tif em.async {\n\t\t\tgo capturer.handler(event, arguments...)\n\t\t\tcontinue\n\t\t}\n\t\tcapturer.handler(event, arguments...)\n\t}\n}\n\n\/\/ AddListener adds a listener for the given event type\nfunc (em *Emitter) AddListener(event EventType, handler HandleFunc) (listener *Listener) {\n\tem.Lock()\n\tdefer em.Unlock()\n\n\tlistener = &Listener{\n\t\thandler: handler,\n\t}\n\tem.listeners[event] = append(em.listeners[event], listener)\n\treturn listener\n}\n\n\/\/ ListenOnce adds a listener for the given event type that removes itself after it has been fired once\nfunc (em *Emitter) ListenOnce(event EventType, handler HandleFunc) (listener *Listener) {\n\tem.Lock()\n\tdefer em.Unlock()\n\n\tlistener = &Listener{\n\t\thandler: handler,\n\t}\n\tem.listenersOnce[event] = append(em.listenersOnce[event], listener)\n\treturn listener\n}\n\n\/\/ AddCapturer adds an event capturer for all events\nfunc (em *Emitter) AddCapturer(handler CaptureFunc) (capturer *Capturer) {\n\tem.Lock()\n\tdefer em.Unlock()\n\n\tcapturer = &Capturer{\n\t\thandler: handler,\n\t}\n\tem.capturers = append(em.capturers, capturer)\n\treturn capturer\n}\n\n\/\/ RemoveListener removes the registered given listener for the given event\nfunc (em *Emitter) RemoveListener(event EventType, listener *Listener) {\n\tem.Lock()\n\tdefer em.Unlock()\n\n\tfor index, list := range em.listeners[event] {\n\t\tif list == listener {\n\t\t\tem.removeListenerAt(event, index)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ If it hasnt been found yet, remove from listeners once if present there\n\tfor index, list := range em.listenersOnce[event] {\n\t\tif list == listener {\n\t\t\tem.removeOnceListenerAt(event, index)\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ RemoveAllListenersForEvent removes all registered listeners for a given event type\nfunc (em *Emitter) RemoveAllListenersForEvent(event EventType) {\n\tem.Lock()\n\tdefer em.Unlock()\n\n\tem.listeners[event] = make([]*Listener, 0)\n}\n\n\/\/ RemoveAllListeners removes all registered listeners for all event types\nfunc (em *Emitter) RemoveAllListeners() {\n\tem.Lock()\n\tdefer em.Unlock()\n\n\tem.listeners = make(map[EventType][]*Listener)\n\tem.listenersOnce = make(map[EventType][]*Listener)\n}\n\n\/\/ RemoveCapturer removes the given capturer\nfunc (em *Emitter) RemoveCapturer(capturer *Capturer) {\n\tem.Lock()\n\tdefer em.Unlock()\n\n\tfor index, capt := range em.capturers {\n\t\tif capt == capturer {\n\t\t\tem.removeCapturerAt(index)\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ RemoveAllCapturers removes all registered capturers\nfunc (em *Emitter) RemoveAllCapturers() {\n\tem.Lock()\n\tdefer em.Unlock()\n\n\tem.capturers = make([]*Capturer, 0)\n}\n\nfunc (em *Emitter) removeListenerAt(event EventType, index int) {\n\tcopy(em.listeners[event][index:], em.listeners[event][index+1:])\n\tem.listeners[event][len(em.listeners[event])-1] = nil\n\tem.listeners[event] = em.listeners[event][:len(em.listeners[event])-1]\n}\n\nfunc (em *Emitter) removeOnceListenerAt(event EventType, index int) {\n\tcopy(em.listenersOnce[event][index:], em.listenersOnce[event][index+1:])\n\tem.listenersOnce[event][len(em.listenersOnce[event])-1] = nil\n\tem.listenersOnce[event] = em.listenersOnce[event][:len(em.listenersOnce[event])-1]\n}\n\nfunc (em *Emitter) removeCapturerAt(index int) {\n\tcopy(em.capturers[index:], em.capturers[index+1:])\n\tem.capturers[len(em.capturers)-1] = nil\n\tem.capturers = em.capturers[:len(em.capturers)-1]\n}\nMake the emitter lock private and fix data races when doing len() on mapspackage eventemitter\n\nimport (\n\t\"sync\"\n)\n\n\/\/ Emitter is the base struct which manages event subscriptions and calls all registered handlers on event emits.\ntype Emitter struct {\n\tmu sync.RWMutex\n\tasync bool\n\tcapturers []*Capturer\n\tlisteners map[EventType][]*Listener\n\tlistenersOnce map[EventType][]*Listener\n}\n\n\/\/ NewEmitter creates a new event emitter that implements the Observable interface.\n\/\/ Async determines whether events listeners fire in separate goroutines or not.\nfunc NewEmitter(async bool) (em *Emitter) {\n\tem = &Emitter{\n\t\tasync: async,\n\t\tlisteners: make(map[EventType][]*Listener),\n\t\tlistenersOnce: make(map[EventType][]*Listener),\n\t}\n\treturn em\n}\n\n\/\/ EmitEvent emits the given event to all listeners and capturers\nfunc (em *Emitter) EmitEvent(event EventType, arguments ...interface{}) {\n\t\/\/ If we have no single listeners for this event, skip\n\tem.mu.RLock()\n\tif len(em.listenersOnce) > 0 {\n\t\tem.mu.RUnlock()\n\t\t\/\/ Get a full lock, we are changing a map\n\t\tem.mu.Lock()\n\t\t\/\/ Copy the slice\n\t\tlistenersOnce := em.listenersOnce[event]\n\t\t\/\/ Create new empty slice\n\t\tem.listenersOnce[event] = make([]*Listener, 0)\n\t\tem.mu.Unlock()\n\n\t\t\/\/ No lock needed, we are working with an inaccessible copy\n\t\tem.emitListenerEvents(listenersOnce, arguments)\n\t} else {\n\t\tem.mu.RUnlock()\n\t}\n\n\tem.mu.RLock()\n\t\/\/ If we have no listeners for this event, skip\n\tif len(em.listeners[event]) > 0 {\n\t\tem.emitListenerEvents(em.listeners[event], arguments)\n\t}\n\tem.mu.RUnlock()\n\n\tem.mu.RLock()\n\t\/\/ If we have no capturers, skip\n\tif len(em.capturers) > 0 {\n\t\tem.emitCapturerEvents(em.capturers, event, arguments)\n\t}\n\tem.mu.RUnlock()\n}\n\nfunc (em *Emitter) emitListenerEvents(listeners []*Listener, arguments []interface{}) {\n\tfor _, listener := range listeners {\n\t\tif em.async {\n\t\t\tgo listener.handler(arguments...)\n\t\t\tcontinue\n\t\t}\n\t\tlistener.handler(arguments...)\n\t}\n}\n\nfunc (em *Emitter) emitCapturerEvents(capturers []*Capturer, event EventType, arguments []interface{}) {\n\tfor _, capturer := range capturers {\n\t\tif em.async {\n\t\t\tgo capturer.handler(event, arguments...)\n\t\t\tcontinue\n\t\t}\n\t\tcapturer.handler(event, arguments...)\n\t}\n}\n\n\/\/ AddListener adds a listener for the given event type\nfunc (em *Emitter) AddListener(event EventType, handler HandleFunc) (listener *Listener) {\n\tem.mu.Lock()\n\tdefer em.mu.Unlock()\n\n\tlistener = &Listener{\n\t\thandler: handler,\n\t}\n\tem.listeners[event] = append(em.listeners[event], listener)\n\treturn listener\n}\n\n\/\/ ListenOnce adds a listener for the given event type that removes itself after it has been fired once\nfunc (em *Emitter) ListenOnce(event EventType, handler HandleFunc) (listener *Listener) {\n\tem.mu.Lock()\n\tdefer em.mu.Unlock()\n\n\tlistener = &Listener{\n\t\thandler: handler,\n\t}\n\tem.listenersOnce[event] = append(em.listenersOnce[event], listener)\n\treturn listener\n}\n\n\/\/ AddCapturer adds an event capturer for all events\nfunc (em *Emitter) AddCapturer(handler CaptureFunc) (capturer *Capturer) {\n\tem.mu.Lock()\n\tdefer em.mu.Unlock()\n\n\tcapturer = &Capturer{\n\t\thandler: handler,\n\t}\n\tem.capturers = append(em.capturers, capturer)\n\treturn capturer\n}\n\n\/\/ RemoveListener removes the registered given listener for the given event\nfunc (em *Emitter) RemoveListener(event EventType, listener *Listener) {\n\tem.mu.Lock()\n\tdefer em.mu.Unlock()\n\n\tfor index, list := range em.listeners[event] {\n\t\tif list == listener {\n\t\t\tem.removeListenerAt(event, index)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ If it hasnt been found yet, remove from listeners once if present there\n\tfor index, list := range em.listenersOnce[event] {\n\t\tif list == listener {\n\t\t\tem.removeOnceListenerAt(event, index)\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ RemoveAllListenersForEvent removes all registered listeners for a given event type\nfunc (em *Emitter) RemoveAllListenersForEvent(event EventType) {\n\tem.mu.Lock()\n\tdefer em.mu.Unlock()\n\n\tem.listeners[event] = make([]*Listener, 0)\n}\n\n\/\/ RemoveAllListeners removes all registered listeners for all event types\nfunc (em *Emitter) RemoveAllListeners() {\n\tem.mu.Lock()\n\tdefer em.mu.Unlock()\n\n\tem.listeners = make(map[EventType][]*Listener)\n\tem.listenersOnce = make(map[EventType][]*Listener)\n}\n\n\/\/ RemoveCapturer removes the given capturer\nfunc (em *Emitter) RemoveCapturer(capturer *Capturer) {\n\tem.mu.Lock()\n\tdefer em.mu.Unlock()\n\n\tfor index, capt := range em.capturers {\n\t\tif capt == capturer {\n\t\t\tem.removeCapturerAt(index)\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ RemoveAllCapturers removes all registered capturers\nfunc (em *Emitter) RemoveAllCapturers() {\n\tem.mu.Lock()\n\tdefer em.mu.Unlock()\n\n\tem.capturers = make([]*Capturer, 0)\n}\n\nfunc (em *Emitter) removeListenerAt(event EventType, index int) {\n\tcopy(em.listeners[event][index:], em.listeners[event][index+1:])\n\tem.listeners[event][len(em.listeners[event])-1] = nil\n\tem.listeners[event] = em.listeners[event][:len(em.listeners[event])-1]\n}\n\nfunc (em *Emitter) removeOnceListenerAt(event EventType, index int) {\n\tcopy(em.listenersOnce[event][index:], em.listenersOnce[event][index+1:])\n\tem.listenersOnce[event][len(em.listenersOnce[event])-1] = nil\n\tem.listenersOnce[event] = em.listenersOnce[event][:len(em.listenersOnce[event])-1]\n}\n\nfunc (em *Emitter) removeCapturerAt(index int) {\n\tcopy(em.capturers[index:], em.capturers[index+1:])\n\tem.capturers[len(em.capturers)-1] = nil\n\tem.capturers = em.capturers[:len(em.capturers)-1]\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2016 Mender Software AS\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\npackage s3\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\"\n\t\"github.com\/mendersoftware\/artifacts\/models\/fileservice\"\n)\n\nconst (\n\tExpireMaxLimit = 7 * 24 * time.Hour\n\tExpireMinLimit = 1 * time.Minute\n)\n\n\/\/ AWS S3 client. Implements FileServiceModelI\ntype SimpleStorageService struct {\n\tclient *s3.S3\n\tbucket string\n}\n\n\/\/ NewSimpleStorageService create new S3 client model.\n\/\/ AWS authentication keys are automatically reloaded from env variables.\nfunc NewSimpleStorageServiceStatic(bucket, key, secret, region, token string) *SimpleStorageService {\n\n\tcredentials := credentials.NewStaticCredentials(key, secret, token)\n\tconfig := aws.NewConfig().WithCredentials(credentials).WithRegion(region)\n\n\tsess := session.New(config)\n\n\treturn &SimpleStorageService{\n\t\tclient: s3.New(sess),\n\t\tbucket: bucket,\n\t}\n}\n\n\/\/ NewSimpleStorageService create new S3 client model.\n\/\/ Use default authentication provides which looks at env variables,\n\/\/ aws profile file and ec2 iam role\nfunc NewSimpleStorageServiceDefaults(bucket, region string) *SimpleStorageService {\n\n\tsess := session.New(aws.NewConfig().WithRegion(region))\n\n\treturn &SimpleStorageService{\n\t\tclient: s3.New(sess),\n\t\tbucket: bucket,\n\t}\n}\n\n\/\/ makeFileId creates file s3 path based on object id and customer id.\n\/\/ Current structure used is directory per customer id: \/\nfunc (s *SimpleStorageService) makeFileId(customerId, objectId string) string {\n\treturn customerId + \"\/\" + objectId\n}\n\nfunc (s *SimpleStorageService) Delete(customerId, objectId string) error {\n\n\tid := s.makeFileId(customerId, objectId)\n\n\tparams := &s3.DeleteObjectInput{\n\t\t\/\/ Required\n\t\tBucket: aws.String(s.bucket),\n\t\tKey: aws.String(id),\n\n\t\t\/\/ Optional\n\t\tRequestPayer: aws.String(s3.RequestPayerRequester),\n\t}\n\n\t\/\/ ignore return response which contains charing info\n\t\/\/ and file versioning data which are not in interest\n\t_, err := s.client.DeleteObject(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *SimpleStorageService) Exists(customerId, objectId string) (bool, error) {\n\n\tid := s.makeFileId(customerId, objectId)\n\n\tparams := &s3.ListObjectsInput{\n\t\t\/\/ Required\n\t\tBucket: aws.String(s.bucket),\n\n\t\t\/\/ Optional\n\t\tMaxKeys: aws.Int64(1),\n\t\tPrefix: aws.String(id),\n\t}\n\n\tresp, err := s.client.ListObjects(params)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif len(resp.Contents) == 0 {\n\t\treturn false, nil\n\t}\n\n\t\/\/ Note: Response should contain max 1 object (MaxKetys=1)\n\t\/\/ Double check if it's exact match as object search matches prefix.\n\tif *resp.Contents[0].Key == id {\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}\n\n\/\/ PutRequest duration is limited to 7 days (AWS limitation)\nfunc (s *SimpleStorageService) PutRequest(customerId, objectId string, duration time.Duration) (*fileservice.Link, error) {\n\n\tif err := s.validateDurationLimits(duration); err != nil {\n\t\treturn nil, err\n\t}\n\n\tid := s.makeFileId(customerId, objectId)\n\n\tparams := &s3.PutObjectInput{\n\t\t\/\/ Required\n\t\tBucket: aws.String(s.bucket),\n\t\tKey: aws.String(id),\n\t}\n\n\t\/\/ Ignore out object\n\treq, _ := s.client.PutObjectRequest(params)\n\n\turi, err := req.Presign(duration)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn fileservice.NewLink(uri, req.Time), nil\n}\n\n\/\/ GetRequest duration is limited to 7 days (AWS limitation)\nfunc (s *SimpleStorageService) GetRequest(customerId, objectId string, duration time.Duration) (*fileservice.Link, error) {\n\n\tif err := s.validateDurationLimits(duration); err != nil {\n\t\treturn nil, err\n\t}\n\n\tid := s.makeFileId(customerId, objectId)\n\n\tparams := &s3.GetObjectInput{\n\t\tBucket: aws.String(s.bucket),\n\t\tKey: aws.String(id),\n\t}\n\n\t\/\/ Ignore out object\n\treq, _ := s.client.GetObjectRequest(params)\n\n\turi, err := req.Presign(duration)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn fileservice.NewLink(uri, req.Time), nil\n}\n\nfunc (s *SimpleStorageService) validateDurationLimits(duration time.Duration) error {\n\tif duration > ExpireMaxLimit || duration < ExpireMinLimit {\n\t\treturn errors.New(fmt.Sprintf(\"Expire duration out of range: %d[ns] allowed %d-%d[ns]\",\n\t\t\tduration, ExpireMinLimit, ExpireMaxLimit))\n\t}\n\n\treturn nil\n}\n\nfunc (s *SimpleStorageService) LastModified(customerId, objectId string) (time.Time, error) {\n\n\tid := s.makeFileId(customerId, objectId)\n\n\tparams := &s3.ListObjectsInput{\n\t\t\/\/ Required\n\t\tBucket: aws.String(s.bucket),\n\n\t\t\/\/ Optional\n\t\tMaxKeys: aws.Int64(1),\n\t\tPrefix: aws.String(id),\n\t}\n\n\tresp, err := s.client.ListObjects(params)\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\n\tif len(resp.Contents) == 0 {\n\t\treturn time.Time{}, fileservice.ErrNotFound\n\t}\n\n\t\/\/ Note: Response should contain max 1 object (MaxKetys=1)\n\t\/\/ Double check if it's exact match as object search matches prefix.\n\tif *resp.Contents[0].Key != id {\n\t\treturn time.Time{}, fileservice.ErrNotFound\n\t}\n\n\treturn *resp.Contents[0].LastModified, nil\n}\nfileservice\/s3: use fmt.Errorf where applicable\/\/ Copyright 2016 Mender Software AS\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\npackage s3\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\"\n\t\"github.com\/mendersoftware\/artifacts\/models\/fileservice\"\n)\n\nconst (\n\tExpireMaxLimit = 7 * 24 * time.Hour\n\tExpireMinLimit = 1 * time.Minute\n)\n\n\/\/ AWS S3 client. Implements FileServiceModelI\ntype SimpleStorageService struct {\n\tclient *s3.S3\n\tbucket string\n}\n\n\/\/ NewSimpleStorageService create new S3 client model.\n\/\/ AWS authentication keys are automatically reloaded from env variables.\nfunc NewSimpleStorageServiceStatic(bucket, key, secret, region, token string) *SimpleStorageService {\n\n\tcredentials := credentials.NewStaticCredentials(key, secret, token)\n\tconfig := aws.NewConfig().WithCredentials(credentials).WithRegion(region)\n\n\tsess := session.New(config)\n\n\treturn &SimpleStorageService{\n\t\tclient: s3.New(sess),\n\t\tbucket: bucket,\n\t}\n}\n\n\/\/ NewSimpleStorageService create new S3 client model.\n\/\/ Use default authentication provides which looks at env variables,\n\/\/ aws profile file and ec2 iam role\nfunc NewSimpleStorageServiceDefaults(bucket, region string) *SimpleStorageService {\n\n\tsess := session.New(aws.NewConfig().WithRegion(region))\n\n\treturn &SimpleStorageService{\n\t\tclient: s3.New(sess),\n\t\tbucket: bucket,\n\t}\n}\n\n\/\/ makeFileId creates file s3 path based on object id and customer id.\n\/\/ Current structure used is directory per customer id: \/\nfunc (s *SimpleStorageService) makeFileId(customerId, objectId string) string {\n\treturn customerId + \"\/\" + objectId\n}\n\nfunc (s *SimpleStorageService) Delete(customerId, objectId string) error {\n\n\tid := s.makeFileId(customerId, objectId)\n\n\tparams := &s3.DeleteObjectInput{\n\t\t\/\/ Required\n\t\tBucket: aws.String(s.bucket),\n\t\tKey: aws.String(id),\n\n\t\t\/\/ Optional\n\t\tRequestPayer: aws.String(s3.RequestPayerRequester),\n\t}\n\n\t\/\/ ignore return response which contains charing info\n\t\/\/ and file versioning data which are not in interest\n\t_, err := s.client.DeleteObject(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *SimpleStorageService) Exists(customerId, objectId string) (bool, error) {\n\n\tid := s.makeFileId(customerId, objectId)\n\n\tparams := &s3.ListObjectsInput{\n\t\t\/\/ Required\n\t\tBucket: aws.String(s.bucket),\n\n\t\t\/\/ Optional\n\t\tMaxKeys: aws.Int64(1),\n\t\tPrefix: aws.String(id),\n\t}\n\n\tresp, err := s.client.ListObjects(params)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif len(resp.Contents) == 0 {\n\t\treturn false, nil\n\t}\n\n\t\/\/ Note: Response should contain max 1 object (MaxKetys=1)\n\t\/\/ Double check if it's exact match as object search matches prefix.\n\tif *resp.Contents[0].Key == id {\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}\n\n\/\/ PutRequest duration is limited to 7 days (AWS limitation)\nfunc (s *SimpleStorageService) PutRequest(customerId, objectId string, duration time.Duration) (*fileservice.Link, error) {\n\n\tif err := s.validateDurationLimits(duration); err != nil {\n\t\treturn nil, err\n\t}\n\n\tid := s.makeFileId(customerId, objectId)\n\n\tparams := &s3.PutObjectInput{\n\t\t\/\/ Required\n\t\tBucket: aws.String(s.bucket),\n\t\tKey: aws.String(id),\n\t}\n\n\t\/\/ Ignore out object\n\treq, _ := s.client.PutObjectRequest(params)\n\n\turi, err := req.Presign(duration)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn fileservice.NewLink(uri, req.Time), nil\n}\n\n\/\/ GetRequest duration is limited to 7 days (AWS limitation)\nfunc (s *SimpleStorageService) GetRequest(customerId, objectId string, duration time.Duration) (*fileservice.Link, error) {\n\n\tif err := s.validateDurationLimits(duration); err != nil {\n\t\treturn nil, err\n\t}\n\n\tid := s.makeFileId(customerId, objectId)\n\n\tparams := &s3.GetObjectInput{\n\t\tBucket: aws.String(s.bucket),\n\t\tKey: aws.String(id),\n\t}\n\n\t\/\/ Ignore out object\n\treq, _ := s.client.GetObjectRequest(params)\n\n\turi, err := req.Presign(duration)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn fileservice.NewLink(uri, req.Time), nil\n}\n\nfunc (s *SimpleStorageService) validateDurationLimits(duration time.Duration) error {\n\tif duration > ExpireMaxLimit || duration < ExpireMinLimit {\n\t\treturn fmt.Errorf(\"Expire duration out of range: %d[ns] allowed %d-%d[ns]\",\n\t\t\tduration, ExpireMinLimit, ExpireMaxLimit)\n\t}\n\n\treturn nil\n}\n\nfunc (s *SimpleStorageService) LastModified(customerId, objectId string) (time.Time, error) {\n\n\tid := s.makeFileId(customerId, objectId)\n\n\tparams := &s3.ListObjectsInput{\n\t\t\/\/ Required\n\t\tBucket: aws.String(s.bucket),\n\n\t\t\/\/ Optional\n\t\tMaxKeys: aws.Int64(1),\n\t\tPrefix: aws.String(id),\n\t}\n\n\tresp, err := s.client.ListObjects(params)\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\n\tif len(resp.Contents) == 0 {\n\t\treturn time.Time{}, fileservice.ErrNotFound\n\t}\n\n\t\/\/ Note: Response should contain max 1 object (MaxKetys=1)\n\t\/\/ Double check if it's exact match as object search matches prefix.\n\tif *resp.Contents[0].Key != id {\n\t\treturn time.Time{}, fileservice.ErrNotFound\n\t}\n\n\treturn *resp.Contents[0].LastModified, nil\n}\n<|endoftext|>"} {"text":"package models_test\n\nimport (\n\t\"github.com\/APTrust\/exchange\/constants\"\n\t\"github.com\/APTrust\/exchange\/models\"\n\t\"github.com\/APTrust\/exchange\/util\/testutil\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"strings\"\n\t\"testing\"\n)\n\nconst digest = \"12345678901234567890123456789012\"\nconst md5_digest = \"md5:12345678901234567890123456789012\"\nconst sha256_digest = \"sha256:12345678901234567890123456789012\"\n\nfunc TestEventTypeValid(t *testing.T) {\n\tfor _, eventType := range constants.EventTypes {\n\t\tpremisEvent := &models.PremisEvent{\n\t\t\tEventType: eventType,\n\t\t}\n\t\tif premisEvent.EventTypeValid() == false {\n\t\t\tt.Errorf(\"EventType '%s' should be valid\", eventType)\n\t\t}\n\t}\n\tpremisEvent := &models.PremisEvent{\n\t\tEventType: \"pub_crawl\",\n\t}\n\tif premisEvent.EventTypeValid() == true {\n\t\tt.Errorf(\"EventType 'pub_crawl' should not be valid\")\n\t}\n}\n\nfunc TestNewEventObjectIngest(t *testing.T) {\n\tevent, err := models.NewEventObjectIngest(300)\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"ingestion\", event.EventType)\n\tassert.False(t, event.DateTime.IsZero())\n\tassert.Equal(t, \"Copied all files to perservation bucket\", event.Detail)\n\tassert.Equal(t, \"Success\", event.Outcome)\n\tassert.Equal(t, \"300 files copied\", event.OutcomeDetail)\n\tassert.Equal(t, \"goamz S3 client\", event.Object)\n\tassert.Equal(t, \"https:\/\/github.com\/crowdmob\/goamz\", event.Agent)\n\tassert.Equal(t, \"Multipart put using md5 checksum\", event.OutcomeInformation)\n}\n\nfunc TestNewEventObjectIdentifierAssignment(t *testing.T) {\n\tevent, err := models.NewEventObjectIdentifierAssignment(\"test.edu\/object001\")\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"identifier assignment\", event.EventType)\n\tassert.False(t, event.DateTime.IsZero())\n\tassert.Equal(t, \"Assigned bag identifier\", event.Detail)\n\tassert.Equal(t, \"Success\", event.Outcome)\n\tassert.Equal(t, \"test.edu\/object001\", event.OutcomeDetail)\n\tassert.Equal(t, \"APTrust exchange\", event.Object)\n\tassert.Equal(t, \"https:\/\/github.com\/APTrust\/exchange\", event.Agent)\n\tassert.Equal(t, \"Institution domain + tar file name\", event.OutcomeInformation)\n}\n\nfunc TestNewEventObjectRights(t *testing.T) {\n\tevent, err := models.NewEventObjectRights(\"institution\")\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"access assignment\", event.EventType)\n\tassert.False(t, event.DateTime.IsZero())\n\tassert.Equal(t, \"Assigned bag access rights\", event.Detail)\n\tassert.Equal(t, \"Success\", event.Outcome)\n\tassert.Equal(t, \"institution\", event.OutcomeDetail)\n\tassert.Equal(t, \"APTrust exchange\", event.Object)\n\tassert.Equal(t, \"https:\/\/github.com\/APTrust\/exchange\", event.Agent)\n\tassert.Equal(t, \"Set access to institution\", event.OutcomeInformation)\n}\n\nfunc TestNewEventGenericFileIngest(t *testing.T) {\n\tevent, err := models.NewEventGenericFileIngest(testutil.TEST_TIMESTAMP, digest)\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"ingestion\", event.EventType)\n\tassert.Equal(t, testutil.TEST_TIMESTAMP, event.DateTime)\n\tassert.Equal(t, \"Completed copy to S3\", event.Detail)\n\tassert.Equal(t, \"Success\", event.Outcome)\n\tassert.Equal(t, md5_digest, event.OutcomeDetail)\n\tassert.Equal(t, \"exchange + goamz S3 client\", event.Object)\n\tassert.Equal(t, \"https:\/\/github.com\/APTrust\/exchange\", event.Agent)\n\tassert.Equal(t, \"Put using md5 checksum\", event.OutcomeInformation)\n}\n\nfunc TestNewEventGenericFileFixityCheck(t *testing.T) {\n\tevent, err := models.NewEventGenericFileFixityCheck(testutil.TEST_TIMESTAMP, constants.AlgMd5,\n\t\tdigest, true)\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"fixity check\", event.EventType)\n\tassert.Equal(t, testutil.TEST_TIMESTAMP, event.DateTime)\n\tassert.Equal(t, \"Fixity check against registered hash\", event.Detail)\n\tassert.Equal(t, \"Success\", event.Outcome)\n\tassert.Equal(t, md5_digest, event.OutcomeDetail)\n\tassert.Equal(t, \"Go language crypto\/md5\", event.Object)\n\tassert.Equal(t, \"http:\/\/golang.org\/pkg\/crypto\/md5\/\", event.Agent)\n\tassert.Equal(t, \"Fixity matches\", event.OutcomeInformation)\n\n\tevent, err = models.NewEventGenericFileFixityCheck(testutil.TEST_TIMESTAMP, constants.AlgSha256,\n\t\tdigest, false)\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"fixity check\", event.EventType)\n\tassert.Equal(t, testutil.TEST_TIMESTAMP, event.DateTime)\n\tassert.Equal(t, \"Fixity check against registered hash\", event.Detail)\n\tassert.Equal(t, \"Failed\", event.Outcome)\n\tassert.Equal(t, sha256_digest, event.OutcomeDetail)\n\tassert.Equal(t, \"Go language crypto\/sha256\", event.Object)\n\tassert.Equal(t, \"http:\/\/golang.org\/pkg\/crypto\/sha256\/\", event.Agent)\n\tassert.Equal(t, \"Fixity did not match\", event.OutcomeInformation)\n}\n\nfunc TestNewEventGenericFileDigestCalculation(t *testing.T) {\n\tevent, err := models.NewEventGenericFileDigestCalculation(testutil.TEST_TIMESTAMP,\n\t\tconstants.AlgMd5, digest)\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"message digest calculation\", event.EventType)\n\tassert.Equal(t, testutil.TEST_TIMESTAMP, event.DateTime)\n\tassert.Equal(t, \"Calculated fixity value\", event.Detail)\n\tassert.Equal(t, \"Success\", event.Outcome)\n\tassert.Equal(t, md5_digest, event.OutcomeDetail)\n\tassert.Equal(t, \"Go language crypto\/md5\", event.Object)\n\tassert.Equal(t, \"http:\/\/golang.org\/pkg\/crypto\/md5\/\", event.Agent)\n\tassert.Equal(t, \"\", event.OutcomeInformation)\n\n\tevent, err = models.NewEventGenericFileDigestCalculation(testutil.TEST_TIMESTAMP, constants.AlgSha256, digest)\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"message digest calculation\", event.EventType)\n\tassert.Equal(t, testutil.TEST_TIMESTAMP, event.DateTime)\n\tassert.Equal(t, \"Calculated fixity value\", event.Detail)\n\tassert.Equal(t, \"Success\", event.Outcome)\n\tassert.Equal(t, sha256_digest, event.OutcomeDetail)\n\tassert.Equal(t, \"Go language crypto\/sha256\", event.Object)\n\tassert.Equal(t, \"http:\/\/golang.org\/pkg\/crypto\/sha256\/\", event.Agent)\n\tassert.Equal(t, \"\", event.OutcomeInformation)\n}\n\nfunc TestNewEventGenericFileIdentifierAssignment(t *testing.T) {\n\tevent, err := models.NewEventGenericFileIdentifierAssignment(testutil.TEST_TIMESTAMP, constants.IdTypeBagAndPath, \"blah.edu\/blah\/blah.txt\")\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"identifier assignment\", event.EventType)\n\tassert.Equal(t, testutil.TEST_TIMESTAMP, event.DateTime)\n\tassert.Equal(t, \"Assigned new institution.bag\/path identifier\", event.Detail)\n\tassert.Equal(t, \"Success\", event.Outcome)\n\tassert.Equal(t, \"blah.edu\/blah\/blah.txt\", event.OutcomeDetail)\n\tassert.Equal(t, \"APTrust exchange\/ingest processor\", event.Object)\n\tassert.Equal(t, \"https:\/\/github.com\/APTrust\/exchange\", event.Agent)\n\tassert.Equal(t, \"\", event.OutcomeInformation)\n\n\tevent, err = models.NewEventGenericFileIdentifierAssignment(testutil.TEST_TIMESTAMP, constants.IdTypeStorageURL, \"https:\/\/example.com\/000-000-999\")\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"identifier assignment\", event.EventType)\n\tassert.Equal(t, testutil.TEST_TIMESTAMP, event.DateTime)\n\tassert.True(t, strings.HasPrefix(event.Detail, \"Assigned new storage URL identifier\"))\n\tassert.Equal(t, \"Success\", event.Outcome)\n\tassert.Equal(t, \"https:\/\/example.com\/000-000-999\", event.OutcomeDetail)\n\tassert.Equal(t, \"Go uuid library + goamz S3 library\", event.Object)\n\tassert.Equal(t, \"http:\/\/github.com\/nu7hatch\/gouuid\", event.Agent)\n\tassert.Equal(t, \"\", event.OutcomeInformation)\n\n}\n\nfunc TestNewEventGenericFileReplication(t *testing.T) {\n\tevent, err := models.NewEventGenericFileReplication(testutil.TEST_TIMESTAMP, \"https:\/\/example.com\/123456789\")\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"replication\", event.EventType)\n\tassert.Equal(t, testutil.TEST_TIMESTAMP, event.DateTime)\n\tassert.Equal(t, \"Copied to replication storage and assigned replication URL identifier\", event.Detail)\n\tassert.Equal(t, \"Success\", event.Outcome)\n\tassert.Equal(t, \"https:\/\/example.com\/123456789\", event.OutcomeDetail)\n\tassert.Equal(t, \"Go uuid library + goamz S3 library\", event.Object)\n\tassert.Equal(t, \"http:\/\/github.com\/nu7hatch\/gouuid\", event.Agent)\n\tassert.Equal(t, \"\", event.OutcomeInformation)\n}\nTest required Premis event paramspackage models_test\n\nimport (\n\t\"github.com\/APTrust\/exchange\/constants\"\n\t\"github.com\/APTrust\/exchange\/models\"\n\t\"github.com\/APTrust\/exchange\/util\/testutil\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst digest = \"12345678901234567890123456789012\"\nconst md5_digest = \"md5:12345678901234567890123456789012\"\nconst sha256_digest = \"sha256:12345678901234567890123456789012\"\n\nfunc TestEventTypeValid(t *testing.T) {\n\tfor _, eventType := range constants.EventTypes {\n\t\tpremisEvent := &models.PremisEvent{\n\t\t\tEventType: eventType,\n\t\t}\n\t\tif premisEvent.EventTypeValid() == false {\n\t\t\tt.Errorf(\"EventType '%s' should be valid\", eventType)\n\t\t}\n\t}\n\tpremisEvent := &models.PremisEvent{\n\t\tEventType: \"pub_crawl\",\n\t}\n\tif premisEvent.EventTypeValid() == true {\n\t\tt.Errorf(\"EventType 'pub_crawl' should not be valid\")\n\t}\n}\n\nfunc TestNewEventObjectIngest(t *testing.T) {\n\t\/\/ Test with required params missing\n\t_, err := models.NewEventObjectIngest(0)\n\tassert.NotNil(t, err)\n\tif err != nil {\n\t\tassert.True(t, strings.HasPrefix(err.Error(), \"Param\"))\n\t}\n\n\tevent, err := models.NewEventObjectIngest(300)\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"ingestion\", event.EventType)\n\tassert.False(t, event.DateTime.IsZero())\n\tassert.Equal(t, \"Copied all files to perservation bucket\", event.Detail)\n\tassert.Equal(t, \"Success\", event.Outcome)\n\tassert.Equal(t, \"300 files copied\", event.OutcomeDetail)\n\tassert.Equal(t, \"goamz S3 client\", event.Object)\n\tassert.Equal(t, \"https:\/\/github.com\/crowdmob\/goamz\", event.Agent)\n\tassert.Equal(t, \"Multipart put using md5 checksum\", event.OutcomeInformation)\n}\n\nfunc TestNewEventObjectIdentifierAssignment(t *testing.T) {\n\t\/\/ Test with required params missing\n\t_, err := models.NewEventObjectIdentifierAssignment(\"\")\n\tassert.NotNil(t, err)\n\tif err != nil {\n\t\tassert.True(t, strings.HasPrefix(err.Error(), \"Param\"))\n\t}\n\n\tevent, err := models.NewEventObjectIdentifierAssignment(\"test.edu\/object001\")\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"identifier assignment\", event.EventType)\n\tassert.False(t, event.DateTime.IsZero())\n\tassert.Equal(t, \"Assigned bag identifier\", event.Detail)\n\tassert.Equal(t, \"Success\", event.Outcome)\n\tassert.Equal(t, \"test.edu\/object001\", event.OutcomeDetail)\n\tassert.Equal(t, \"APTrust exchange\", event.Object)\n\tassert.Equal(t, \"https:\/\/github.com\/APTrust\/exchange\", event.Agent)\n\tassert.Equal(t, \"Institution domain + tar file name\", event.OutcomeInformation)\n}\n\nfunc TestNewEventObjectRights(t *testing.T) {\n\t\/\/ Test with required params missing\n\t_, err := models.NewEventObjectRights(\"\")\n\tassert.NotNil(t, err)\n\tif err != nil {\n\t\tassert.True(t, strings.HasPrefix(err.Error(), \"Param\"))\n\t}\n\n\tevent, err := models.NewEventObjectRights(\"institution\")\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"access assignment\", event.EventType)\n\tassert.False(t, event.DateTime.IsZero())\n\tassert.Equal(t, \"Assigned bag access rights\", event.Detail)\n\tassert.Equal(t, \"Success\", event.Outcome)\n\tassert.Equal(t, \"institution\", event.OutcomeDetail)\n\tassert.Equal(t, \"APTrust exchange\", event.Object)\n\tassert.Equal(t, \"https:\/\/github.com\/APTrust\/exchange\", event.Agent)\n\tassert.Equal(t, \"Set access to institution\", event.OutcomeInformation)\n}\n\nfunc TestNewEventGenericFileIngest(t *testing.T) {\n\t\/\/ Test with required params missing\n\t_, err := models.NewEventGenericFileIngest(time.Time{}, digest)\n\tassert.NotNil(t, err)\n\tif err != nil {\n\t\tassert.True(t, strings.HasPrefix(err.Error(), \"Param\"))\n\t}\n\t_, err = models.NewEventGenericFileIngest(testutil.TEST_TIMESTAMP, \"\")\n\tassert.NotNil(t, err)\n\tif err != nil {\n\t\tassert.True(t, strings.HasPrefix(err.Error(), \"Param\"))\n\t}\n\n\tevent, err := models.NewEventGenericFileIngest(testutil.TEST_TIMESTAMP, digest)\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"ingestion\", event.EventType)\n\tassert.Equal(t, testutil.TEST_TIMESTAMP, event.DateTime)\n\tassert.Equal(t, \"Completed copy to S3\", event.Detail)\n\tassert.Equal(t, \"Success\", event.Outcome)\n\tassert.Equal(t, md5_digest, event.OutcomeDetail)\n\tassert.Equal(t, \"exchange + goamz S3 client\", event.Object)\n\tassert.Equal(t, \"https:\/\/github.com\/APTrust\/exchange\", event.Agent)\n\tassert.Equal(t, \"Put using md5 checksum\", event.OutcomeInformation)\n}\n\nfunc TestNewEventGenericFileFixityCheck(t *testing.T) {\n\t\/\/ Test with required params missing\n\t_, err := models.NewEventGenericFileFixityCheck(time.Time{}, constants.AlgMd5, digest, true)\n\tassert.NotNil(t, err)\n\tif err != nil {\n\t\tassert.True(t, strings.HasPrefix(err.Error(), \"Param\"))\n\t}\n\t_, err = models.NewEventGenericFileFixityCheck(testutil.TEST_TIMESTAMP, \"\", digest, true)\n\tassert.NotNil(t, err)\n\tif err != nil {\n\t\tassert.True(t, strings.HasPrefix(err.Error(), \"Param\"))\n\t}\n\t_, err = models.NewEventGenericFileFixityCheck(testutil.TEST_TIMESTAMP, constants.AlgMd5, \"\", true)\n\tassert.NotNil(t, err)\n\tif err != nil {\n\t\tassert.True(t, strings.HasPrefix(err.Error(), \"Param\"))\n\t}\n\n\tevent, err := models.NewEventGenericFileFixityCheck(testutil.TEST_TIMESTAMP, constants.AlgMd5,\n\t\tdigest, true)\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"fixity check\", event.EventType)\n\tassert.Equal(t, testutil.TEST_TIMESTAMP, event.DateTime)\n\tassert.Equal(t, \"Fixity check against registered hash\", event.Detail)\n\tassert.Equal(t, \"Success\", event.Outcome)\n\tassert.Equal(t, md5_digest, event.OutcomeDetail)\n\tassert.Equal(t, \"Go language crypto\/md5\", event.Object)\n\tassert.Equal(t, \"http:\/\/golang.org\/pkg\/crypto\/md5\/\", event.Agent)\n\tassert.Equal(t, \"Fixity matches\", event.OutcomeInformation)\n\n\tevent, err = models.NewEventGenericFileFixityCheck(testutil.TEST_TIMESTAMP, constants.AlgSha256,\n\t\tdigest, false)\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"fixity check\", event.EventType)\n\tassert.Equal(t, testutil.TEST_TIMESTAMP, event.DateTime)\n\tassert.Equal(t, \"Fixity check against registered hash\", event.Detail)\n\tassert.Equal(t, \"Failed\", event.Outcome)\n\tassert.Equal(t, sha256_digest, event.OutcomeDetail)\n\tassert.Equal(t, \"Go language crypto\/sha256\", event.Object)\n\tassert.Equal(t, \"http:\/\/golang.org\/pkg\/crypto\/sha256\/\", event.Agent)\n\tassert.Equal(t, \"Fixity did not match\", event.OutcomeInformation)\n}\n\nfunc TestNewEventGenericFileDigestCalculation(t *testing.T) {\n\t\/\/ Test with required params missing\n\t_, err := models.NewEventGenericFileDigestCalculation(time.Time{}, constants.AlgMd5, digest)\n\tassert.NotNil(t, err)\n\tif err != nil {\n\t\tassert.True(t, strings.HasPrefix(err.Error(), \"Param\"))\n\t}\n\t_, err = models.NewEventGenericFileDigestCalculation(testutil.TEST_TIMESTAMP, \"\", digest)\n\tassert.NotNil(t, err)\n\tif err != nil {\n\t\tassert.True(t, strings.HasPrefix(err.Error(), \"Param\"))\n\t}\n\t_, err = models.NewEventGenericFileDigestCalculation(testutil.TEST_TIMESTAMP, constants.AlgMd5, \"\")\n\tassert.NotNil(t, err)\n\tif err != nil {\n\t\tassert.True(t, strings.HasPrefix(err.Error(), \"Param\"))\n\t}\n\n\tevent, err := models.NewEventGenericFileDigestCalculation(testutil.TEST_TIMESTAMP,\n\t\tconstants.AlgMd5, digest)\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"message digest calculation\", event.EventType)\n\tassert.Equal(t, testutil.TEST_TIMESTAMP, event.DateTime)\n\tassert.Equal(t, \"Calculated fixity value\", event.Detail)\n\tassert.Equal(t, \"Success\", event.Outcome)\n\tassert.Equal(t, md5_digest, event.OutcomeDetail)\n\tassert.Equal(t, \"Go language crypto\/md5\", event.Object)\n\tassert.Equal(t, \"http:\/\/golang.org\/pkg\/crypto\/md5\/\", event.Agent)\n\tassert.Equal(t, \"\", event.OutcomeInformation)\n\n\tevent, err = models.NewEventGenericFileDigestCalculation(testutil.TEST_TIMESTAMP, constants.AlgSha256, digest)\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"message digest calculation\", event.EventType)\n\tassert.Equal(t, testutil.TEST_TIMESTAMP, event.DateTime)\n\tassert.Equal(t, \"Calculated fixity value\", event.Detail)\n\tassert.Equal(t, \"Success\", event.Outcome)\n\tassert.Equal(t, sha256_digest, event.OutcomeDetail)\n\tassert.Equal(t, \"Go language crypto\/sha256\", event.Object)\n\tassert.Equal(t, \"http:\/\/golang.org\/pkg\/crypto\/sha256\/\", event.Agent)\n\tassert.Equal(t, \"\", event.OutcomeInformation)\n}\n\nfunc TestNewEventGenericFileIdentifierAssignment(t *testing.T) {\n\t\/\/ Test with required params missing\n\t_, err := models.NewEventGenericFileIdentifierAssignment(time.Time{}, constants.AlgMd5, \"abc\/123\")\n\tassert.NotNil(t, err)\n\tif err != nil {\n\t\tassert.True(t, strings.HasPrefix(err.Error(), \"Param\"))\n\t}\n\t_, err = models.NewEventGenericFileIdentifierAssignment(testutil.TEST_TIMESTAMP, \"\", \"abc\/123\")\n\tassert.NotNil(t, err)\n\tif err != nil {\n\t\tassert.True(t, strings.HasPrefix(err.Error(), \"Param\"))\n\t}\n\t_, err = models.NewEventGenericFileIdentifierAssignment(testutil.TEST_TIMESTAMP, constants.AlgMd5, \"\")\n\tassert.NotNil(t, err)\n\tif err != nil {\n\t\tassert.True(t, strings.HasPrefix(err.Error(), \"Param\"))\n\t}\n\n\tevent, err := models.NewEventGenericFileIdentifierAssignment(testutil.TEST_TIMESTAMP, constants.IdTypeBagAndPath, \"blah.edu\/blah\/blah.txt\")\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"identifier assignment\", event.EventType)\n\tassert.Equal(t, testutil.TEST_TIMESTAMP, event.DateTime)\n\tassert.Equal(t, \"Assigned new institution.bag\/path identifier\", event.Detail)\n\tassert.Equal(t, \"Success\", event.Outcome)\n\tassert.Equal(t, \"blah.edu\/blah\/blah.txt\", event.OutcomeDetail)\n\tassert.Equal(t, \"APTrust exchange\/ingest processor\", event.Object)\n\tassert.Equal(t, \"https:\/\/github.com\/APTrust\/exchange\", event.Agent)\n\tassert.Equal(t, \"\", event.OutcomeInformation)\n\n\tevent, err = models.NewEventGenericFileIdentifierAssignment(testutil.TEST_TIMESTAMP, constants.IdTypeStorageURL, \"https:\/\/example.com\/000-000-999\")\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"identifier assignment\", event.EventType)\n\tassert.Equal(t, testutil.TEST_TIMESTAMP, event.DateTime)\n\tassert.True(t, strings.HasPrefix(event.Detail, \"Assigned new storage URL identifier\"))\n\tassert.Equal(t, \"Success\", event.Outcome)\n\tassert.Equal(t, \"https:\/\/example.com\/000-000-999\", event.OutcomeDetail)\n\tassert.Equal(t, \"Go uuid library + goamz S3 library\", event.Object)\n\tassert.Equal(t, \"http:\/\/github.com\/nu7hatch\/gouuid\", event.Agent)\n\tassert.Equal(t, \"\", event.OutcomeInformation)\n\n}\n\nfunc TestNewEventGenericFileReplication(t *testing.T) {\n\t\/\/ Test with required params missing\n\t_, err := models.NewEventGenericFileReplication(time.Time{}, \"https:\/\/example.com\/123456789\")\n\tassert.NotNil(t, err)\n\tif err != nil {\n\t\tassert.True(t, strings.HasPrefix(err.Error(), \"Param\"))\n\t}\n\t_, err = models.NewEventGenericFileReplication(testutil.TEST_TIMESTAMP, \"\")\n\tassert.NotNil(t, err)\n\tif err != nil {\n\t\tassert.True(t, strings.HasPrefix(err.Error(), \"Param\"))\n\t}\n\n\tevent, err := models.NewEventGenericFileReplication(testutil.TEST_TIMESTAMP, \"https:\/\/example.com\/123456789\")\n\tif err != nil {\n\t\tt.Errorf(\"Error creating PremisEvent: %v\", err)\n\t\treturn\n\t}\n\tassert.Len(t, event.Identifier, 36)\n\tassert.Equal(t, \"replication\", event.EventType)\n\tassert.Equal(t, testutil.TEST_TIMESTAMP, event.DateTime)\n\tassert.Equal(t, \"Copied to replication storage and assigned replication URL identifier\", event.Detail)\n\tassert.Equal(t, \"Success\", event.Outcome)\n\tassert.Equal(t, \"https:\/\/example.com\/123456789\", event.OutcomeDetail)\n\tassert.Equal(t, \"Go uuid library + goamz S3 library\", event.Object)\n\tassert.Equal(t, \"http:\/\/github.com\/nu7hatch\/gouuid\", event.Agent)\n\tassert.Equal(t, \"\", event.OutcomeInformation)\n}\n<|endoftext|>"} {"text":"package models_testadd TODOpackage models_test\n\n\/\/ TODO: add tests\n<|endoftext|>"} {"text":"\/\/ Copyright 2020 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage stats\n\nimport (\n\t\"fmt\"\n\n\trepo_model \"code.gitea.io\/gitea\/models\/repo\"\n\t\"code.gitea.io\/gitea\/modules\/git\"\n\t\"code.gitea.io\/gitea\/modules\/graceful\"\n\t\"code.gitea.io\/gitea\/modules\/log\"\n\t\"code.gitea.io\/gitea\/modules\/process\"\n)\n\n\/\/ DBIndexer implements Indexer interface to use database's like search\ntype DBIndexer struct{}\n\n\/\/ Index repository status function\nfunc (db *DBIndexer) Index(id int64) error {\n\tctx, _, finished := process.GetManager().AddContext(graceful.GetManager().ShutdownContext(), fmt.Sprintf(\"Stats.DB Index Repo[%d]\", id))\n\tdefer finished()\n\n\trepo, err := repo_model.GetRepositoryByID(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif repo.IsEmpty {\n\t\treturn nil\n\t}\n\n\tstatus, err := repo_model.GetIndexerStatus(repo, repo_model.RepoIndexerTypeStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgitRepo, err := git.OpenRepositoryCtx(ctx, repo.RepoPath())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer gitRepo.Close()\n\n\t\/\/ Get latest commit for default branch\n\tcommitID, err := gitRepo.GetBranchCommitID(repo.DefaultBranch)\n\tif err != nil {\n\t\tif git.IsErrBranchNotExist(err) || git.IsErrNotExist(err) {\n\t\t\tlog.Debug(\"Unable to get commit ID for default branch %s in %s ... skipping this repository\", repo.DefaultBranch, repo.RepoPath())\n\t\t\treturn nil\n\t\t}\n\t\tlog.Error(\"Unable to get commit ID for default branch %s in %s. Error: %v\", repo.DefaultBranch, repo.RepoPath(), err)\n\t\treturn err\n\t}\n\n\t\/\/ Do not recalculate stats if already calculated for this commit\n\tif status.CommitSha == commitID {\n\t\treturn nil\n\t}\n\n\t\/\/ Calculate and save language statistics to database\n\tstats, err := gitRepo.GetLanguageStats(commitID)\n\tif err != nil {\n\t\tlog.Error(\"Unable to get language stats for ID %s for default branch %s in %s. Error: %v\", commitID, repo.DefaultBranch, repo.RepoPath(), err)\n\t\treturn err\n\t}\n\terr = repo_model.UpdateLanguageStats(repo, commitID, stats)\n\tif err != nil {\n\t\tlog.Error(\"Unable to update language stats for ID %s for default branch %s in %s. Error: %v\", commitID, repo.DefaultBranch, repo.RepoPath(), err)\n\t\treturn err\n\t}\n\n\tlog.Debug(\"DBIndexer completed language stats for ID %s for default branch %s in %s. stats count: %d\", commitID, repo.DefaultBranch, repo.RepoPath(), len(stats))\n\treturn nil\n}\n\n\/\/ Close dummy function\nfunc (db *DBIndexer) Close() {\n}\nPrevent Stats Indexer reporting error if repo dir missing (#18870)\/\/ Copyright 2020 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage stats\n\nimport (\n\t\"fmt\"\n\n\trepo_model \"code.gitea.io\/gitea\/models\/repo\"\n\t\"code.gitea.io\/gitea\/modules\/git\"\n\t\"code.gitea.io\/gitea\/modules\/graceful\"\n\t\"code.gitea.io\/gitea\/modules\/log\"\n\t\"code.gitea.io\/gitea\/modules\/process\"\n)\n\n\/\/ DBIndexer implements Indexer interface to use database's like search\ntype DBIndexer struct{}\n\n\/\/ Index repository status function\nfunc (db *DBIndexer) Index(id int64) error {\n\tctx, _, finished := process.GetManager().AddContext(graceful.GetManager().ShutdownContext(), fmt.Sprintf(\"Stats.DB Index Repo[%d]\", id))\n\tdefer finished()\n\n\trepo, err := repo_model.GetRepositoryByID(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif repo.IsEmpty {\n\t\treturn nil\n\t}\n\n\tstatus, err := repo_model.GetIndexerStatus(repo, repo_model.RepoIndexerTypeStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgitRepo, err := git.OpenRepositoryCtx(ctx, repo.RepoPath())\n\tif err != nil {\n\t\tif err.Error() == \"no such file or directory\" {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tdefer gitRepo.Close()\n\n\t\/\/ Get latest commit for default branch\n\tcommitID, err := gitRepo.GetBranchCommitID(repo.DefaultBranch)\n\tif err != nil {\n\t\tif git.IsErrBranchNotExist(err) || git.IsErrNotExist(err) {\n\t\t\tlog.Debug(\"Unable to get commit ID for default branch %s in %s ... skipping this repository\", repo.DefaultBranch, repo.RepoPath())\n\t\t\treturn nil\n\t\t}\n\t\tlog.Error(\"Unable to get commit ID for default branch %s in %s. Error: %v\", repo.DefaultBranch, repo.RepoPath(), err)\n\t\treturn err\n\t}\n\n\t\/\/ Do not recalculate stats if already calculated for this commit\n\tif status.CommitSha == commitID {\n\t\treturn nil\n\t}\n\n\t\/\/ Calculate and save language statistics to database\n\tstats, err := gitRepo.GetLanguageStats(commitID)\n\tif err != nil {\n\t\tlog.Error(\"Unable to get language stats for ID %s for default branch %s in %s. Error: %v\", commitID, repo.DefaultBranch, repo.RepoPath(), err)\n\t\treturn err\n\t}\n\terr = repo_model.UpdateLanguageStats(repo, commitID, stats)\n\tif err != nil {\n\t\tlog.Error(\"Unable to update language stats for ID %s for default branch %s in %s. Error: %v\", commitID, repo.DefaultBranch, repo.RepoPath(), err)\n\t\treturn err\n\t}\n\n\tlog.Debug(\"DBIndexer completed language stats for ID %s for default branch %s in %s. stats count: %d\", commitID, repo.DefaultBranch, repo.RepoPath(), len(stats))\n\treturn nil\n}\n\n\/\/ Close dummy function\nfunc (db *DBIndexer) Close() {\n}\n<|endoftext|>"} {"text":"package uic\n\nimport (\n\t\"github.com\/Cepave\/fe\/g\"\n\t\"github.com\/Cepave\/fe\/http\/base\"\n\t. \"github.com\/Cepave\/fe\/model\/uic\"\n\t\"github.com\/Cepave\/fe\/utils\"\n\t\"github.com\/toolkits\/str\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype AuthController struct {\n\tbase.BaseController\n}\n\nfunc (this *AuthController) Logout() {\n\tu := this.Ctx.Input.GetData(\"CurrentUser\").(*User)\n\tRemoveSessionByUid(u.Id)\n\tthis.Ctx.SetCookie(\"sig\", \"\", 0, \"\/\")\n\tthis.Ctx.SetCookie(\"sig\", \"\", 0, \"\/\", \".owlemon.com\")\n\tthis.Redirect(\"\/auth\/login\", 302)\n}\n\nfunc (this *AuthController) LoginGet() {\n\tappSig := this.GetString(\"sig\", \"\")\n\tcallback := this.GetString(\"callback\", \"\")\n\n\tcookieSig := this.Ctx.GetCookie(\"sig\")\n\tif cookieSig == \"\" {\n\t\tthis.renderLoginPage(appSig, callback)\n\t\treturn\n\t}\n\n\tsessionObj := ReadSessionBySig(cookieSig)\n\tif sessionObj == nil {\n\t\tthis.renderLoginPage(appSig, callback)\n\t\treturn\n\t}\n\n\tif int64(sessionObj.Expired) < time.Now().Unix() {\n\t\tRemoveSessionByUid(sessionObj.Uid)\n\t\tthis.renderLoginPage(appSig, callback)\n\t\treturn\n\t}\n\n\tif appSig != \"\" && callback != \"\" {\n\t\tthis.Redirect(callback, 302)\n\t} else {\n\t\tthis.Redirect(\"\/me\/info\", 302)\n\t}\n}\n\nfunc (this *AuthController) LoginPost() {\n\tname := this.GetString(\"name\", \"\")\n\tpassword := this.GetString(\"password\", \"\")\n\n\tif name == \"\" || password == \"\" {\n\t\tthis.ServeErrJson(\"name or password is blank\")\n\t\treturn\n\t}\n\n\tvar u *User\n\n\tldapEnabled := this.MustGetBool(\"ldap\", false)\n\n\tif ldapEnabled {\n\t\tsucess, err := utils.LdapBind(g.Config().Ldap.Addr,\n\t\t\tg.Config().Ldap.BaseDN,\n\t\t\tg.Config().Ldap.BindDN,\n\t\t\tg.Config().Ldap.BindPasswd,\n\t\t\tg.Config().Ldap.UserField,\n\t\t\tname,\n\t\t\tpassword)\n\t\tif err != nil {\n\t\t\tthis.ServeErrJson(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tif !sucess {\n\t\t\tthis.ServeErrJson(\"name or password error\")\n\t\t\treturn\n\t\t}\n\n\t\tuser_attributes, err := utils.Ldapsearch(g.Config().Ldap.Addr,\n\t\t\tg.Config().Ldap.BaseDN,\n\t\t\tg.Config().Ldap.BindDN,\n\t\t\tg.Config().Ldap.BindPasswd,\n\t\t\tg.Config().Ldap.UserField,\n\t\t\tname,\n\t\t\tg.Config().Ldap.Attributes)\n\t\tuserSn := \"\"\n\t\tuserMail := \"\"\n\t\tuserTel := \"\"\n\t\tif err == nil {\n\t\t\tuserSn = user_attributes[\"sn\"]\n\t\t\tuserMail = user_attributes[\"mail\"]\n\t\t\tuserTel = user_attributes[\"telephoneNumber\"]\n\t\t}\n\n\t\tarr := strings.Split(name, \"@\")\n\t\tvar userName, userEmail string\n\t\tif len(arr) == 2 {\n\t\t\tuserName = arr[0]\n\t\t\tuserEmail = name\n\t\t} else {\n\t\t\tuserName = name\n\t\t\tuserEmail = userMail\n\t\t}\n\n\t\tu = ReadUserByName(userName)\n\t\tif u == nil {\n\t\t\t\/\/ 说明用户不存在\n\t\t\tu = &User{\n\t\t\t\tName: userName,\n\t\t\t\tPasswd: \"\",\n\t\t\t\tCnname: userSn,\n\t\t\t\tPhone: userTel,\n\t\t\t\tEmail: userEmail,\n\t\t\t}\n\t\t\t_, err = u.Save()\n\t\t\tif err != nil {\n\t\t\t\tthis.ServeErrJson(\"insert user fail \" + err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t} else {\n\t\tu = ReadUserByName(name)\n\t\tif u == nil {\n\t\t\tthis.ServeErrJson(\"no such user\")\n\t\t\treturn\n\t\t}\n\n\t\tif u.Passwd != str.Md5Encode(g.Config().Salt+password) {\n\t\t\tthis.ServeErrJson(\"password error\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tappSig := this.GetString(\"sig\", \"\")\n\tcallback := this.GetString(\"callback\", \"\")\n\tif appSig != \"\" && callback != \"\" {\n\t\tSaveSessionAttrs(u.Id, appSig, int(time.Now().Unix())+3600*24*30)\n\t} else {\n\t\tthis.CreateSession(u.Id, 3600*24*30)\n\t}\n\n\tthis.ServeDataJson(callback)\n}\n\nfunc (this *AuthController) renderLoginPage(sig, callback string) {\n\tthis.Data[\"CanRegister\"] = g.Config().CanRegister\n\tthis.Data[\"LdapEnabled\"] = g.Config().Ldap.Enabled\n\tthis.Data[\"Sig\"] = sig\n\tthis.Data[\"Callback\"] = callback\n\tthis.Data[\"Shortcut\"] = g.Config().Shortcut\n\tthis.TplNames = \"auth\/login.html\"\n}\n\nfunc (this *AuthController) RegisterGet() {\n\tthis.Data[\"CanRegister\"] = g.Config().CanRegister\n\tthis.Data[\"Shortcut\"] = g.Config().Shortcut\n\tthis.TplNames = \"auth\/register.html\"\n}\n\nfunc (this *AuthController) RegisterPost() {\n\tif !g.Config().CanRegister {\n\t\tthis.ServeErrJson(\"registration system is not open\")\n\t\treturn\n\t}\n\n\tname := strings.TrimSpace(this.GetString(\"name\", \"\"))\n\tpassword := strings.TrimSpace(this.GetString(\"password\", \"\"))\n\trepeatPassword := strings.TrimSpace(this.GetString(\"repeat_password\", \"\"))\n\n\tif password != repeatPassword {\n\t\tthis.ServeErrJson(\"password not equal the repeart one\")\n\t\treturn\n\t}\n\n\tif !utils.IsUsernameValid(name) {\n\t\tthis.ServeErrJson(\"name pattern is invalid\")\n\t\treturn\n\t}\n\n\tif ReadUserIdByName(name) > 0 {\n\t\tthis.ServeErrJson(\"name is already existent\")\n\t\treturn\n\t}\n\n\tlastId, err := InsertRegisterUser(name, str.Md5Encode(g.Config().Salt+password))\n\tif err != nil {\n\t\tthis.ServeErrJson(\"insert user fail \" + err.Error())\n\t\treturn\n\t}\n\n\tthis.CreateSession(lastId, 3600*24*30)\n\n\tthis.ServeOKJson()\n}\n\nfunc (this *AuthController) CreateSession(uid int64, maxAge int) int {\n\tsig := utils.GenerateUUID()\n\texpired := int(time.Now().Unix()) + maxAge\n\tSaveSessionAttrs(uid, sig, expired)\n\tthis.Ctx.SetCookie(\"sig\", sig, maxAge, \"\/\")\n\tthis.Ctx.SetCookie(\"sig\", sig, maxAge, \"\/\", \".owlemon.com\")\n\treturn expired\n}\n[OWL-186] add func (this *AuthController) LoginThirdParty()package uic\n\nimport (\n\t\"encoding\/base64\"\n\t\"github.com\/Cepave\/fe\/g\"\n\t\"github.com\/Cepave\/fe\/http\/base\"\n\t. \"github.com\/Cepave\/fe\/model\/uic\"\n\t\"github.com\/Cepave\/fe\/utils\"\n\t\"github.com\/toolkits\/str\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype AuthController struct {\n\tbase.BaseController\n}\n\nfunc (this *AuthController) Logout() {\n\tu := this.Ctx.Input.GetData(\"CurrentUser\").(*User)\n\tRemoveSessionByUid(u.Id)\n\tthis.Ctx.SetCookie(\"sig\", \"\", 0, \"\/\")\n\tthis.Ctx.SetCookie(\"sig\", \"\", 0, \"\/\", \".owlemon.com\")\n\tthis.Redirect(\"\/auth\/login\", 302)\n}\n\nfunc (this *AuthController) LoginGet() {\n\tappSig := this.GetString(\"sig\", \"\")\n\tcallback := this.GetString(\"callback\", \"\")\n\n\tcookieSig := this.Ctx.GetCookie(\"sig\")\n\tif cookieSig == \"\" {\n\t\tthis.renderLoginPage(appSig, callback)\n\t\treturn\n\t}\n\n\tsessionObj := ReadSessionBySig(cookieSig)\n\tif sessionObj == nil {\n\t\tthis.renderLoginPage(appSig, callback)\n\t\treturn\n\t}\n\n\tif int64(sessionObj.Expired) < time.Now().Unix() {\n\t\tRemoveSessionByUid(sessionObj.Uid)\n\t\tthis.renderLoginPage(appSig, callback)\n\t\treturn\n\t}\n\n\tif appSig != \"\" && callback != \"\" {\n\t\tthis.Redirect(callback, 302)\n\t} else {\n\t\tthis.Redirect(\"\/me\/info\", 302)\n\t}\n}\n\nfunc (this *AuthController) LoginPost() {\n\tname := this.GetString(\"name\", \"\")\n\tpassword := this.GetString(\"password\", \"\")\n\n\tif name == \"\" || password == \"\" {\n\t\tthis.ServeErrJson(\"name or password is blank\")\n\t\treturn\n\t}\n\n\tvar u *User\n\n\tldapEnabled := this.MustGetBool(\"ldap\", false)\n\n\tif ldapEnabled {\n\t\tsucess, err := utils.LdapBind(g.Config().Ldap.Addr,\n\t\t\tg.Config().Ldap.BaseDN,\n\t\t\tg.Config().Ldap.BindDN,\n\t\t\tg.Config().Ldap.BindPasswd,\n\t\t\tg.Config().Ldap.UserField,\n\t\t\tname,\n\t\t\tpassword)\n\t\tif err != nil {\n\t\t\tthis.ServeErrJson(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tif !sucess {\n\t\t\tthis.ServeErrJson(\"name or password error\")\n\t\t\treturn\n\t\t}\n\n\t\tuser_attributes, err := utils.Ldapsearch(g.Config().Ldap.Addr,\n\t\t\tg.Config().Ldap.BaseDN,\n\t\t\tg.Config().Ldap.BindDN,\n\t\t\tg.Config().Ldap.BindPasswd,\n\t\t\tg.Config().Ldap.UserField,\n\t\t\tname,\n\t\t\tg.Config().Ldap.Attributes)\n\t\tuserSn := \"\"\n\t\tuserMail := \"\"\n\t\tuserTel := \"\"\n\t\tif err == nil {\n\t\t\tuserSn = user_attributes[\"sn\"]\n\t\t\tuserMail = user_attributes[\"mail\"]\n\t\t\tuserTel = user_attributes[\"telephoneNumber\"]\n\t\t}\n\n\t\tarr := strings.Split(name, \"@\")\n\t\tvar userName, userEmail string\n\t\tif len(arr) == 2 {\n\t\t\tuserName = arr[0]\n\t\t\tuserEmail = name\n\t\t} else {\n\t\t\tuserName = name\n\t\t\tuserEmail = userMail\n\t\t}\n\n\t\tu = ReadUserByName(userName)\n\t\tif u == nil {\n\t\t\t\/\/ 说明用户不存在\n\t\t\tu = &User{\n\t\t\t\tName: userName,\n\t\t\t\tPasswd: \"\",\n\t\t\t\tCnname: userSn,\n\t\t\t\tPhone: userTel,\n\t\t\t\tEmail: userEmail,\n\t\t\t}\n\t\t\t_, err = u.Save()\n\t\t\tif err != nil {\n\t\t\t\tthis.ServeErrJson(\"insert user fail \" + err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t} else {\n\t\tu = ReadUserByName(name)\n\t\tif u == nil {\n\t\t\tthis.ServeErrJson(\"no such user\")\n\t\t\treturn\n\t\t}\n\n\t\tif u.Passwd != str.Md5Encode(g.Config().Salt+password) {\n\t\t\tthis.ServeErrJson(\"password error\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tappSig := this.GetString(\"sig\", \"\")\n\tcallback := this.GetString(\"callback\", \"\")\n\tif appSig != \"\" && callback != \"\" {\n\t\tSaveSessionAttrs(u.Id, appSig, int(time.Now().Unix())+3600*24*30)\n\t} else {\n\t\tthis.CreateSession(u.Id, 3600*24*30)\n\t}\n\n\tthis.ServeDataJson(callback)\n}\n\nfunc (this *AuthController) renderLoginPage(sig, callback string) {\n\tthis.Data[\"CanRegister\"] = g.Config().CanRegister\n\tthis.Data[\"LdapEnabled\"] = g.Config().Ldap.Enabled\n\tthis.Data[\"Sig\"] = sig\n\tthis.Data[\"Callback\"] = callback\n\tthis.Data[\"Shortcut\"] = g.Config().Shortcut\n\tthis.TplNames = \"auth\/login.html\"\n}\n\nfunc (this *AuthController) RegisterGet() {\n\tthis.Data[\"CanRegister\"] = g.Config().CanRegister\n\tthis.Data[\"Shortcut\"] = g.Config().Shortcut\n\tthis.TplNames = \"auth\/register.html\"\n}\n\nfunc (this *AuthController) RegisterPost() {\n\tif !g.Config().CanRegister {\n\t\tthis.ServeErrJson(\"registration system is not open\")\n\t\treturn\n\t}\n\n\tname := strings.TrimSpace(this.GetString(\"name\", \"\"))\n\tpassword := strings.TrimSpace(this.GetString(\"password\", \"\"))\n\trepeatPassword := strings.TrimSpace(this.GetString(\"repeat_password\", \"\"))\n\n\tif password != repeatPassword {\n\t\tthis.ServeErrJson(\"password not equal the repeart one\")\n\t\treturn\n\t}\n\n\tif !utils.IsUsernameValid(name) {\n\t\tthis.ServeErrJson(\"name pattern is invalid\")\n\t\treturn\n\t}\n\n\tif ReadUserIdByName(name) > 0 {\n\t\tthis.ServeErrJson(\"name is already existent\")\n\t\treturn\n\t}\n\n\tlastId, err := InsertRegisterUser(name, str.Md5Encode(g.Config().Salt+password))\n\tif err != nil {\n\t\tthis.ServeErrJson(\"insert user fail \" + err.Error())\n\t\treturn\n\t}\n\n\tthis.CreateSession(lastId, 3600*24*30)\n\n\tthis.ServeOKJson()\n}\n\nfunc (this *AuthController) CreateSession(uid int64, maxAge int) int {\n\tsig := utils.GenerateUUID()\n\texpired := int(time.Now().Unix()) + maxAge\n\tSaveSessionAttrs(uid, sig, expired)\n\tthis.Ctx.SetCookie(\"sig\", sig, maxAge, \"\/\")\n\tthis.Ctx.SetCookie(\"sig\", sig, maxAge, \"\/\", \".owlemon.com\")\n\treturn expired\n}\n\n\/**\n * @function name: func (this *AuthController) LoginThirdParty()\n * @description: This function returns third party login URL.\n * @related issues: OWL-206\n * @param: void\n * @return: void\n * @author: Don Hsieh\n * @since: 12\/17\/2015\n * @last modified: 12\/17\/2015\n * @called by: beego.Router(\"\/auth\/third-party\", &AuthController{}, \"post:LoginThirdParty\")\n * in fe\/http\/uic\/uic_routes.go\n *\/\nfunc (this *AuthController) LoginThirdParty() {\n\ts := g.Config().Api.Redirect\n\ts = base64.StdEncoding.EncodeToString([]byte(s))\n\tstrEncoded := url.QueryEscape(s)\n\tloginUrl := g.Config().Api.Login + \"\/\" + strEncoded\n\tthis.ServeDataJson(loginUrl)\n}\n<|endoftext|>"} {"text":"\/\/ Package junit defines a JUnit XML report and includes convenience methods\n\/\/ for working with these reports.\npackage junit\n\nimport (\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/jstemmer\/go-junit-report\/v2\/gtr\"\n)\n\n\/\/ Testsuites is a collection of JUnit testsuites.\ntype Testsuites struct {\n\tXMLName xml.Name `xml:\"testsuites\"`\n\n\tName string `xml:\"name,attr,omitempty\"`\n\tTime string `xml:\"time,attr,omitempty\"` \/\/ total duration in seconds\n\tTests int `xml:\"tests,attr,omitempty\"`\n\tErrors int `xml:\"errors,attr,omitempty\"`\n\tFailures int `xml:\"failures,attr,omitempty\"`\n\tSkipped int `xml:\"skipped,attr,omitempty\"`\n\tDisabled int `xml:\"disabled,attr,omitempty\"`\n\n\tSuites []Testsuite `xml:\"testsuite,omitempty\"`\n}\n\n\/\/ AddSuite adds a Testsuite and updates this testssuites' totals.\nfunc (t *Testsuites) AddSuite(ts Testsuite) {\n\tt.Suites = append(t.Suites, ts)\n\tt.Tests += ts.Tests\n\tt.Errors += ts.Errors\n\tt.Failures += ts.Failures\n\tt.Skipped += ts.Skipped\n\tt.Disabled += ts.Disabled\n}\n\n\/\/ Testsuite is a single JUnit testsuite containing testcases.\ntype Testsuite struct {\n\t\/\/ required attributes\n\tName string `xml:\"name,attr\"`\n\tTests int `xml:\"tests,attr\"`\n\tFailures int `xml:\"failures,attr\"`\n\tErrors int `xml:\"errors,attr\"`\n\tID int `xml:\"id,attr\"`\n\n\t\/\/ optional attributes\n\tDisabled int `xml:\"disabled,attr,omitempty\"`\n\tHostname string `xml:\"hostname,attr,omitempty\"`\n\tPackage string `xml:\"package,attr,omitempty\"`\n\tSkipped int `xml:\"skipped,attr,omitempty\"`\n\tTime string `xml:\"time,attr\"` \/\/ duration in seconds\n\tTimestamp string `xml:\"timestamp,attr,omitempty\"` \/\/ date and time in ISO8601\n\tFile string `xml:\"file,attr,omitempty\"`\n\n\tProperties *[]Property `xml:\"properties>property,omitempty\"`\n\tTestcases []Testcase `xml:\"testcase,omitempty\"`\n\tSystemOut *Output `xml:\"system-out,omitempty\"`\n\tSystemErr *Output `xml:\"system-err,omitempty\"`\n}\n\n\/\/ AddProperty adds a property with the given name and value to this Testsuite.\nfunc (t *Testsuite) AddProperty(name, value string) {\n\tprop := Property{Name: name, Value: value}\n\tif t.Properties == nil {\n\t\tt.Properties = &[]Property{prop}\n\t\treturn\n\t}\n\tprops := append(*t.Properties, prop)\n\tt.Properties = &props\n}\n\n\/\/ AddTestcase adds Testcase tc to this Testsuite.\nfunc (t *Testsuite) AddTestcase(tc Testcase) {\n\tt.Testcases = append(t.Testcases, tc)\n\tt.Tests++\n\n\tif tc.Error != nil {\n\t\tt.Errors++\n\t}\n\n\tif tc.Failure != nil {\n\t\tt.Failures++\n\t}\n\n\tif tc.Skipped != nil {\n\t\tt.Skipped++\n\t}\n}\n\n\/\/ SetTimestamp sets the timestamp in this Testsuite.\nfunc (t *Testsuite) SetTimestamp(timestamp time.Time) {\n\tt.Timestamp = timestamp.Format(time.RFC3339)\n}\n\n\/\/ Testcase represents a single test with its results.\ntype Testcase struct {\n\t\/\/ required attributes\n\tName string `xml:\"name,attr\"`\n\tClassname string `xml:\"classname,attr\"`\n\n\t\/\/ optional attributes\n\tTime string `xml:\"time,attr,omitempty\"` \/\/ duration in seconds\n\tStatus string `xml:\"status,attr,omitempty\"`\n\n\tSkipped *Result `xml:\"skipped,omitempty\"`\n\tError *Result `xml:\"error,omitempty\"`\n\tFailure *Result `xml:\"failure,omitempty\"`\n\tSystemOut *Output `xml:\"system-out,omitempty\"`\n\tSystemErr *Output `xml:\"system-err,omitempty\"`\n}\n\n\/\/ Property represents a key\/value pair.\ntype Property struct {\n\tName string `xml:\"name,attr\"`\n\tValue string `xml:\"value,attr\"`\n}\n\n\/\/ Result represents the result of a single test.\ntype Result struct {\n\tMessage string `xml:\"message,attr\"`\n\tType string `xml:\"type,attr,omitempty\"`\n\tData string `xml:\",cdata\"`\n}\n\n\/\/ Output represents output written to stdout or sderr.\ntype Output struct {\n\tData string `xml:\",cdata\"`\n}\n\n\/\/ CreateFromReport creates a JUnit representation of the given gtr.Report.\nfunc CreateFromReport(report gtr.Report, hostname string) Testsuites {\n\tvar suites Testsuites\n\tfor _, pkg := range report.Packages {\n\t\tvar duration time.Duration\n\t\tsuite := Testsuite{\n\t\t\tName: pkg.Name,\n\t\t\tHostname: hostname,\n\t\t\tID: len(suites.Suites),\n\t\t}\n\n\t\tif !pkg.Timestamp.IsZero() {\n\t\t\tsuite.SetTimestamp(pkg.Timestamp)\n\t\t}\n\n\t\tfor _, p := range pkg.Properties {\n\t\t\tsuite.AddProperty(p.Name, p.Value)\n\t\t}\n\n\t\tif len(pkg.Output) > 0 {\n\t\t\tsuite.SystemOut = &Output{Data: formatOutput(pkg.Output, 0)}\n\t\t}\n\n\t\tif pkg.Coverage > 0 {\n\t\t\tsuite.AddProperty(\"coverage.statements.pct\", fmt.Sprintf(\"%.2f\", pkg.Coverage))\n\t\t}\n\n\t\tfor _, test := range pkg.Tests {\n\t\t\tduration += test.Duration\n\t\t\tsuite.AddTestcase(createTestcaseForTest(pkg.Name, test))\n\t\t}\n\n\t\t\/\/ JUnit doesn't have a good way of dealing with build or runtime\n\t\t\/\/ errors that happen before a test has started, so we create a single\n\t\t\/\/ failing test that contains the build error details.\n\t\tif pkg.BuildError.Name != \"\" {\n\t\t\ttc := Testcase{\n\t\t\t\tClassname: pkg.BuildError.Name,\n\t\t\t\tName: pkg.BuildError.Cause,\n\t\t\t\tTime: formatDuration(0),\n\t\t\t\tError: &Result{\n\t\t\t\t\tMessage: \"Build error\",\n\t\t\t\t\tData: strings.Join(pkg.BuildError.Output, \"\\n\"),\n\t\t\t\t},\n\t\t\t}\n\t\t\tsuite.AddTestcase(tc)\n\t\t}\n\n\t\tif pkg.RunError.Name != \"\" {\n\t\t\ttc := Testcase{\n\t\t\t\tClassname: pkg.RunError.Name,\n\t\t\t\tName: \"Failure\",\n\t\t\t\tTime: formatDuration(0),\n\t\t\t\tError: &Result{\n\t\t\t\t\tMessage: \"Runtime error\",\n\t\t\t\t\tData: strings.Join(pkg.RunError.Output, \"\\n\"),\n\t\t\t\t},\n\t\t\t}\n\t\t\tsuite.AddTestcase(tc)\n\t\t}\n\n\t\tif (pkg.Duration) == 0 {\n\t\t\tsuite.Time = formatDuration(duration)\n\t\t} else {\n\t\t\tsuite.Time = formatDuration(pkg.Duration)\n\t\t}\n\t\tsuites.AddSuite(suite)\n\t}\n\treturn suites\n}\n\nfunc createTestcaseForTest(pkgName string, test gtr.Test) Testcase {\n\ttc := Testcase{\n\t\tClassname: pkgName,\n\t\tName: test.Name,\n\t\tTime: formatDuration(test.Duration),\n\t}\n\n\tif test.Result == gtr.Fail {\n\t\ttc.Failure = &Result{\n\t\t\tMessage: \"Failed\",\n\t\t\tData: formatOutput(test.Output, test.Level),\n\t\t}\n\t} else if test.Result == gtr.Skip {\n\t\ttc.Skipped = &Result{\n\t\t\tMessage: \"Skipped\",\n\t\t\tData: formatOutput(test.Output, test.Level),\n\t\t}\n\t} else if test.Result == gtr.Unknown {\n\t\ttc.Error = &Result{\n\t\t\tMessage: \"No test result found\",\n\t\t\tData: formatOutput(test.Output, test.Level),\n\t\t}\n\t} else if len(test.Output) > 0 {\n\t\ttc.SystemOut = &Output{Data: formatOutput(test.Output, test.Level)}\n\t}\n\treturn tc\n}\n\n\/\/ formatDuration returns the JUnit string representation of the given\n\/\/ duration.\nfunc formatDuration(d time.Duration) string {\n\treturn fmt.Sprintf(\"%.3f\", d.Seconds())\n}\n\n\/\/ formatOutput combines the lines from the given output into a single string.\nfunc formatOutput(output []string, indent int) string {\n\treturn strings.Join(output, \"\\n\")\n}\njunit: Remove unused indent parameter from formatOutput function\/\/ Package junit defines a JUnit XML report and includes convenience methods\n\/\/ for working with these reports.\npackage junit\n\nimport (\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/jstemmer\/go-junit-report\/v2\/gtr\"\n)\n\n\/\/ Testsuites is a collection of JUnit testsuites.\ntype Testsuites struct {\n\tXMLName xml.Name `xml:\"testsuites\"`\n\n\tName string `xml:\"name,attr,omitempty\"`\n\tTime string `xml:\"time,attr,omitempty\"` \/\/ total duration in seconds\n\tTests int `xml:\"tests,attr,omitempty\"`\n\tErrors int `xml:\"errors,attr,omitempty\"`\n\tFailures int `xml:\"failures,attr,omitempty\"`\n\tSkipped int `xml:\"skipped,attr,omitempty\"`\n\tDisabled int `xml:\"disabled,attr,omitempty\"`\n\n\tSuites []Testsuite `xml:\"testsuite,omitempty\"`\n}\n\n\/\/ AddSuite adds a Testsuite and updates this testssuites' totals.\nfunc (t *Testsuites) AddSuite(ts Testsuite) {\n\tt.Suites = append(t.Suites, ts)\n\tt.Tests += ts.Tests\n\tt.Errors += ts.Errors\n\tt.Failures += ts.Failures\n\tt.Skipped += ts.Skipped\n\tt.Disabled += ts.Disabled\n}\n\n\/\/ Testsuite is a single JUnit testsuite containing testcases.\ntype Testsuite struct {\n\t\/\/ required attributes\n\tName string `xml:\"name,attr\"`\n\tTests int `xml:\"tests,attr\"`\n\tFailures int `xml:\"failures,attr\"`\n\tErrors int `xml:\"errors,attr\"`\n\tID int `xml:\"id,attr\"`\n\n\t\/\/ optional attributes\n\tDisabled int `xml:\"disabled,attr,omitempty\"`\n\tHostname string `xml:\"hostname,attr,omitempty\"`\n\tPackage string `xml:\"package,attr,omitempty\"`\n\tSkipped int `xml:\"skipped,attr,omitempty\"`\n\tTime string `xml:\"time,attr\"` \/\/ duration in seconds\n\tTimestamp string `xml:\"timestamp,attr,omitempty\"` \/\/ date and time in ISO8601\n\tFile string `xml:\"file,attr,omitempty\"`\n\n\tProperties *[]Property `xml:\"properties>property,omitempty\"`\n\tTestcases []Testcase `xml:\"testcase,omitempty\"`\n\tSystemOut *Output `xml:\"system-out,omitempty\"`\n\tSystemErr *Output `xml:\"system-err,omitempty\"`\n}\n\n\/\/ AddProperty adds a property with the given name and value to this Testsuite.\nfunc (t *Testsuite) AddProperty(name, value string) {\n\tprop := Property{Name: name, Value: value}\n\tif t.Properties == nil {\n\t\tt.Properties = &[]Property{prop}\n\t\treturn\n\t}\n\tprops := append(*t.Properties, prop)\n\tt.Properties = &props\n}\n\n\/\/ AddTestcase adds Testcase tc to this Testsuite.\nfunc (t *Testsuite) AddTestcase(tc Testcase) {\n\tt.Testcases = append(t.Testcases, tc)\n\tt.Tests++\n\n\tif tc.Error != nil {\n\t\tt.Errors++\n\t}\n\n\tif tc.Failure != nil {\n\t\tt.Failures++\n\t}\n\n\tif tc.Skipped != nil {\n\t\tt.Skipped++\n\t}\n}\n\n\/\/ SetTimestamp sets the timestamp in this Testsuite.\nfunc (t *Testsuite) SetTimestamp(timestamp time.Time) {\n\tt.Timestamp = timestamp.Format(time.RFC3339)\n}\n\n\/\/ Testcase represents a single test with its results.\ntype Testcase struct {\n\t\/\/ required attributes\n\tName string `xml:\"name,attr\"`\n\tClassname string `xml:\"classname,attr\"`\n\n\t\/\/ optional attributes\n\tTime string `xml:\"time,attr,omitempty\"` \/\/ duration in seconds\n\tStatus string `xml:\"status,attr,omitempty\"`\n\n\tSkipped *Result `xml:\"skipped,omitempty\"`\n\tError *Result `xml:\"error,omitempty\"`\n\tFailure *Result `xml:\"failure,omitempty\"`\n\tSystemOut *Output `xml:\"system-out,omitempty\"`\n\tSystemErr *Output `xml:\"system-err,omitempty\"`\n}\n\n\/\/ Property represents a key\/value pair.\ntype Property struct {\n\tName string `xml:\"name,attr\"`\n\tValue string `xml:\"value,attr\"`\n}\n\n\/\/ Result represents the result of a single test.\ntype Result struct {\n\tMessage string `xml:\"message,attr\"`\n\tType string `xml:\"type,attr,omitempty\"`\n\tData string `xml:\",cdata\"`\n}\n\n\/\/ Output represents output written to stdout or sderr.\ntype Output struct {\n\tData string `xml:\",cdata\"`\n}\n\n\/\/ CreateFromReport creates a JUnit representation of the given gtr.Report.\nfunc CreateFromReport(report gtr.Report, hostname string) Testsuites {\n\tvar suites Testsuites\n\tfor _, pkg := range report.Packages {\n\t\tvar duration time.Duration\n\t\tsuite := Testsuite{\n\t\t\tName: pkg.Name,\n\t\t\tHostname: hostname,\n\t\t\tID: len(suites.Suites),\n\t\t}\n\n\t\tif !pkg.Timestamp.IsZero() {\n\t\t\tsuite.SetTimestamp(pkg.Timestamp)\n\t\t}\n\n\t\tfor _, p := range pkg.Properties {\n\t\t\tsuite.AddProperty(p.Name, p.Value)\n\t\t}\n\n\t\tif len(pkg.Output) > 0 {\n\t\t\tsuite.SystemOut = &Output{Data: formatOutput(pkg.Output)}\n\t\t}\n\n\t\tif pkg.Coverage > 0 {\n\t\t\tsuite.AddProperty(\"coverage.statements.pct\", fmt.Sprintf(\"%.2f\", pkg.Coverage))\n\t\t}\n\n\t\tfor _, test := range pkg.Tests {\n\t\t\tduration += test.Duration\n\t\t\tsuite.AddTestcase(createTestcaseForTest(pkg.Name, test))\n\t\t}\n\n\t\t\/\/ JUnit doesn't have a good way of dealing with build or runtime\n\t\t\/\/ errors that happen before a test has started, so we create a single\n\t\t\/\/ failing test that contains the build error details.\n\t\tif pkg.BuildError.Name != \"\" {\n\t\t\ttc := Testcase{\n\t\t\t\tClassname: pkg.BuildError.Name,\n\t\t\t\tName: pkg.BuildError.Cause,\n\t\t\t\tTime: formatDuration(0),\n\t\t\t\tError: &Result{\n\t\t\t\t\tMessage: \"Build error\",\n\t\t\t\t\tData: strings.Join(pkg.BuildError.Output, \"\\n\"),\n\t\t\t\t},\n\t\t\t}\n\t\t\tsuite.AddTestcase(tc)\n\t\t}\n\n\t\tif pkg.RunError.Name != \"\" {\n\t\t\ttc := Testcase{\n\t\t\t\tClassname: pkg.RunError.Name,\n\t\t\t\tName: \"Failure\",\n\t\t\t\tTime: formatDuration(0),\n\t\t\t\tError: &Result{\n\t\t\t\t\tMessage: \"Runtime error\",\n\t\t\t\t\tData: strings.Join(pkg.RunError.Output, \"\\n\"),\n\t\t\t\t},\n\t\t\t}\n\t\t\tsuite.AddTestcase(tc)\n\t\t}\n\n\t\tif (pkg.Duration) == 0 {\n\t\t\tsuite.Time = formatDuration(duration)\n\t\t} else {\n\t\t\tsuite.Time = formatDuration(pkg.Duration)\n\t\t}\n\t\tsuites.AddSuite(suite)\n\t}\n\treturn suites\n}\n\nfunc createTestcaseForTest(pkgName string, test gtr.Test) Testcase {\n\ttc := Testcase{\n\t\tClassname: pkgName,\n\t\tName: test.Name,\n\t\tTime: formatDuration(test.Duration),\n\t}\n\n\tif test.Result == gtr.Fail {\n\t\ttc.Failure = &Result{\n\t\t\tMessage: \"Failed\",\n\t\t\tData: formatOutput(test.Output),\n\t\t}\n\t} else if test.Result == gtr.Skip {\n\t\ttc.Skipped = &Result{\n\t\t\tMessage: \"Skipped\",\n\t\t\tData: formatOutput(test.Output),\n\t\t}\n\t} else if test.Result == gtr.Unknown {\n\t\ttc.Error = &Result{\n\t\t\tMessage: \"No test result found\",\n\t\t\tData: formatOutput(test.Output),\n\t\t}\n\t} else if len(test.Output) > 0 {\n\t\ttc.SystemOut = &Output{Data: formatOutput(test.Output)}\n\t}\n\treturn tc\n}\n\n\/\/ formatDuration returns the JUnit string representation of the given\n\/\/ duration.\nfunc formatDuration(d time.Duration) string {\n\treturn fmt.Sprintf(\"%.3f\", d.Seconds())\n}\n\n\/\/ formatOutput combines the lines from the given output into a single string.\nfunc formatOutput(output []string) string {\n\treturn strings.Join(output, \"\\n\")\n}\n<|endoftext|>"} {"text":"package kafkalogrus\n\nimport (\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/Shopify\/sarama\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ KafkaLogrusHook is the primary struct\ntype KafkaLogrusHook struct {\n\tid string\n\tdefaultTopic string\n\tinjectHostname bool\n\thostname string\n\tlevels []logrus.Level\n\tformatter logrus.Formatter\n\tproducer sarama.AsyncProducer\n}\n\n\/\/ NewKafkaLogrusHook creates a new KafkaHook\nfunc NewKafkaLogrusHook(id string,\n\tlevels []logrus.Level,\n\tformatter logrus.Formatter,\n\tbrokers []string,\n\tdefaultTopic string,\n\tinjectHostname bool,\n\ttls *tls.Config) (*KafkaLogrusHook, error) {\n\tvar err error\n\tvar producer sarama.AsyncProducer\n\tkafkaConfig := sarama.NewConfig()\n\tkafkaConfig.Producer.RequiredAcks = sarama.WaitForLocal \/\/ Only wait for the leader to ack\n\tkafkaConfig.Producer.Compression = sarama.CompressionSnappy \/\/ Compress messages\n\tkafkaConfig.Producer.Flush.Frequency = 500 * time.Millisecond \/\/ Flush batches every 500ms\n\n\t\/\/ check here if provided *tls.Config is not nil and assign to the sarama config\n\t\/\/ NOTE: we automatically enabled the TLS config because sarama would error out if our\n\t\/\/ config were non-nil but disabled. To avoid issue father down the stack, we enable.\n\tif tls != nil {\n\t\tkafkaConfig.Net.TLS.Enable = true\n\t\tkafkaConfig.Net.TLS.Config = tls\n\t}\n\n\tif producer, err = sarama.NewAsyncProducer(brokers, kafkaConfig); err != nil {\n\t\treturn nil, err\n\t}\n\n\tgo func() {\n\t\tfor err := range producer.Errors() {\n\t\t\tlog.Printf(\"Failed to send log entry to Kafka: %v\\n\", err)\n\t\t}\n\t}()\n\n\tvar hostname string\n\tif hostname, err = os.Hostname(); err != nil {\n\t\thostname = \"localhost\"\n\t}\n\n\thook := &KafkaLogrusHook{\n\t\tid,\n\t\tdefaultTopic,\n\t\tinjectHostname,\n\t\thostname,\n\t\tlevels,\n\t\tformatter,\n\t\tproducer,\n\t}\n\n\treturn hook, nil\n}\n\n\/\/ Id returns the internal ID of the hook\nfunc (hook *KafkaLogrusHook) Id() string {\n\treturn hook.id\n}\n\n\/\/ Levels is required to implement the hook interface from logrus\nfunc (hook *KafkaLogrusHook) Levels() []logrus.Level {\n\treturn hook.levels\n}\n\n\/\/ Fire is required to implement the hook interface from logrus\nfunc (hook *KafkaLogrusHook) Fire(entry *logrus.Entry) error {\n\tvar partitionKey sarama.ByteEncoder\n\tvar b []byte\n\tvar err error\n\n\tt, _ := entry.Data[\"time\"].(time.Time)\n\tif b, err = t.MarshalBinary(); err != nil {\n\t\treturn err\n\t}\n\tpartitionKey = sarama.ByteEncoder(b)\n\n\tif hook.injectHostname {\n\t\tif _, ok := entry.Data[\"host\"]; !ok {\n\t\t\tentry.Data[\"host\"] = hook.hostname\n\t\t}\n\t}\n\n\tif b, err = hook.formatter.Format(entry); err != nil {\n\t\treturn err\n\t}\n\tvalue := sarama.ByteEncoder(b)\n\n\ttopic := hook.defaultTopic\n\tif tsRaw, ok := entry.Data[\"topic\"]; ok {\n\t\tif ts, ok := tsRaw.(string); !ok {\n\t\t\treturn errors.New(\"Incorrect topic filed type (should be string)\")\n\t\t} else {\n\t\t\ttopic = ts\n\t\t}\n\t}\n\thook.producer.Input() <- &sarama.ProducerMessage{\n\t\tKey: partitionKey,\n\t\tTopic: topic,\n\t\tValue: value,\n\t}\n\treturn nil\n}\nFix bug reported on partitionner keypackage kafkalogrus\n\nimport (\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/Shopify\/sarama\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ KafkaLogrusHook is the primary struct\ntype KafkaLogrusHook struct {\n\tid string\n\tdefaultTopic string\n\tinjectHostname bool\n\thostname string\n\tlevels []logrus.Level\n\tformatter logrus.Formatter\n\tproducer sarama.AsyncProducer\n}\n\n\/\/ NewKafkaLogrusHook creates a new KafkaHook\nfunc NewKafkaLogrusHook(id string,\n\tlevels []logrus.Level,\n\tformatter logrus.Formatter,\n\tbrokers []string,\n\tdefaultTopic string,\n\tinjectHostname bool,\n\ttls *tls.Config) (*KafkaLogrusHook, error) {\n\tvar err error\n\tvar producer sarama.AsyncProducer\n\tkafkaConfig := sarama.NewConfig()\n\tkafkaConfig.Producer.RequiredAcks = sarama.WaitForLocal \/\/ Only wait for the leader to ack\n\tkafkaConfig.Producer.Compression = sarama.CompressionSnappy \/\/ Compress messages\n\tkafkaConfig.Producer.Flush.Frequency = 500 * time.Millisecond \/\/ Flush batches every 500ms\n\n\t\/\/ check here if provided *tls.Config is not nil and assign to the sarama config\n\t\/\/ NOTE: we automatically enabled the TLS config because sarama would error out if our\n\t\/\/ config were non-nil but disabled. To avoid issue futher down the stack, we enable.\n\tif tls != nil {\n\t\tkafkaConfig.Net.TLS.Enable = true\n\t\tkafkaConfig.Net.TLS.Config = tls\n\t}\n\n\tif producer, err = sarama.NewAsyncProducer(brokers, kafkaConfig); err != nil {\n\t\treturn nil, err\n\t}\n\n\tgo func() {\n\t\tfor err := range producer.Errors() {\n\t\t\tlog.Printf(\"Failed to send log entry to Kafka: %v\\n\", err)\n\t\t}\n\t}()\n\n\tvar hostname string\n\tif hostname, err = os.Hostname(); err != nil {\n\t\thostname = \"localhost\"\n\t}\n\n\thook := &KafkaLogrusHook{\n\t\tid,\n\t\tdefaultTopic,\n\t\tinjectHostname,\n\t\thostname,\n\t\tlevels,\n\t\tformatter,\n\t\tproducer,\n\t}\n\n\treturn hook, nil\n}\n\n\/\/ Id returns the internal ID of the hook\nfunc (hook *KafkaLogrusHook) Id() string {\n\treturn hook.id\n}\n\n\/\/ Levels is required to implement the hook interface from logrus\nfunc (hook *KafkaLogrusHook) Levels() []logrus.Level {\n\treturn hook.levels\n}\n\n\/\/ Fire is required to implement the hook interface from logrus\nfunc (hook *KafkaLogrusHook) Fire(entry *logrus.Entry) error {\n\tvar partitionKey sarama.ByteEncoder\n\tvar b []byte\n\tvar err error\n\n\tt := entry.Time\n\tif b, err = t.MarshalBinary(); err != nil {\n\t\treturn err\n\t}\n\tpartitionKey = sarama.ByteEncoder(b)\n\n\tif hook.injectHostname {\n\t\tif _, ok := entry.Data[\"host\"]; !ok {\n\t\t\tentry.Data[\"host\"] = hook.hostname\n\t\t}\n\t}\n\n\tif b, err = hook.formatter.Format(entry); err != nil {\n\t\treturn err\n\t}\n\tvalue := sarama.ByteEncoder(b)\n\n\ttopic := hook.defaultTopic\n\tif tsRaw, ok := entry.Data[\"topic\"]; ok {\n\t\tif ts, ok := tsRaw.(string); !ok {\n\t\t\treturn errors.New(\"Incorrect topic filed type (should be string)\")\n\t\t} else {\n\t\t\ttopic = ts\n\t\t}\n\t}\n\thook.producer.Input() <- &sarama.ProducerMessage{\n\t\tKey: partitionKey,\n\t\tTopic: topic,\n\t\tValue: value,\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"\/\/ Package rpc is a go-micro rpc handler.\npackage rpc\n\nimport (\n\t\"encoding\/json\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/joncalhoun\/qson\"\n\t\"github.com\/micro\/go-micro\/v2\/api\"\n\t\"github.com\/micro\/go-micro\/v2\/api\/handler\"\n\tproto \"github.com\/micro\/go-micro\/v2\/api\/internal\/proto\"\n\t\"github.com\/micro\/go-micro\/v2\/client\"\n\t\"github.com\/micro\/go-micro\/v2\/client\/selector\"\n\t\"github.com\/micro\/go-micro\/v2\/codec\"\n\t\"github.com\/micro\/go-micro\/v2\/codec\/jsonrpc\"\n\t\"github.com\/micro\/go-micro\/v2\/codec\/protorpc\"\n\t\"github.com\/micro\/go-micro\/v2\/errors\"\n\t\"github.com\/micro\/go-micro\/v2\/registry\"\n\t\"github.com\/micro\/go-micro\/v2\/util\/ctx\"\n)\n\nconst (\n\tHandler = \"rpc\"\n)\n\nvar (\n\t\/\/ supported json codecs\n\tjsonCodecs = []string{\n\t\t\"application\/grpc+json\",\n\t\t\"application\/json\",\n\t\t\"application\/json-rpc\",\n\t}\n\n\t\/\/ support proto codecs\n\tprotoCodecs = []string{\n\t\t\"application\/grpc\",\n\t\t\"application\/grpc+proto\",\n\t\t\"application\/proto\",\n\t\t\"application\/protobuf\",\n\t\t\"application\/proto-rpc\",\n\t\t\"application\/octet-stream\",\n\t}\n)\n\ntype rpcHandler struct {\n\topts handler.Options\n\ts *api.Service\n}\n\ntype buffer struct {\n\tio.ReadCloser\n}\n\nfunc (b *buffer) Write(_ []byte) (int, error) {\n\treturn 0, nil\n}\n\n\/\/ strategy is a hack for selection\nfunc strategy(services []*registry.Service) selector.Strategy {\n\treturn func(_ []*registry.Service) selector.Next {\n\t\t\/\/ ignore input to this function, use services above\n\t\treturn selector.Random(services)\n\t}\n}\n\nfunc (h *rpcHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\tvar service *api.Service\n\n\tif h.s != nil {\n\t\t\/\/ we were given the service\n\t\tservice = h.s\n\t} else if h.opts.Router != nil {\n\t\t\/\/ try get service from router\n\t\ts, err := h.opts.Router.Route(r)\n\t\tif err != nil {\n\t\t\twriteError(w, r, errors.InternalServerError(\"go.micro.api\", err.Error()))\n\t\t\treturn\n\t\t}\n\t\tservice = s\n\t} else {\n\t\t\/\/ we have no way of routing the request\n\t\twriteError(w, r, errors.InternalServerError(\"go.micro.api\", \"no route found\"))\n\t\treturn\n\t}\n\n\t\/\/ only allow post when we have the router\n\tif r.Method != \"GET\" && (h.opts.Router != nil && r.Method != \"POST\") {\n\t\thttp.Error(w, \"Method not allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tct := r.Header.Get(\"Content-Type\")\n\n\t\/\/ Strip charset from Content-Type (like `application\/json; charset=UTF-8`)\n\tif idx := strings.IndexRune(ct, ';'); idx >= 0 {\n\t\tct = ct[:idx]\n\t}\n\n\t\/\/ micro client\n\tc := h.opts.Service.Client()\n\n\t\/\/ create strategy\n\tso := selector.WithStrategy(strategy(service.Services))\n\n\t\/\/ get payload\n\tbr, err := requestPayload(r)\n\tif err != nil {\n\t\twriteError(w, r, err)\n\t\treturn\n\t}\n\n\t\/\/ create context\n\tcx := ctx.FromRequest(r)\n\n\tvar rsp []byte\n\n\tswitch {\n\t\/\/ proto codecs\n\tcase hasCodec(ct, protoCodecs):\n\t\trequest := &proto.Message{}\n\t\t\/\/ if the extracted payload isn't empty lets use it\n\t\tif len(br) > 0 {\n\t\t\trequest = proto.NewMessage(br)\n\t\t}\n\n\t\t\/\/ create request\/response\n\t\tresponse := &proto.Message{}\n\n\t\treq := c.NewRequest(\n\t\t\tservice.Name,\n\t\t\tservice.Endpoint.Name,\n\t\t\trequest,\n\t\t\tclient.WithContentType(ct),\n\t\t)\n\n\t\t\/\/ make the call\n\t\tif err := c.Call(cx, req, response, client.WithSelectOption(so)); err != nil {\n\t\t\twriteError(w, r, err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ marshall response\n\t\trsp, _ = response.Marshal()\n\tdefault:\n\t\t\/\/ if json codec is not present set to json\n\t\tif !hasCodec(ct, jsonCodecs) {\n\t\t\tct = \"application\/json\"\n\t\t}\n\n\t\t\/\/ default to trying json\n\t\tvar request json.RawMessage\n\t\t\/\/ if the extracted payload isn't empty lets use it\n\t\tif len(br) > 0 {\n\t\t\trequest = json.RawMessage(br)\n\t\t}\n\n\t\t\/\/ create request\/response\n\t\tvar response json.RawMessage\n\n\t\treq := c.NewRequest(\n\t\t\tservice.Name,\n\t\t\tservice.Endpoint.Name,\n\t\t\t&request,\n\t\t\tclient.WithContentType(ct),\n\t\t)\n\n\t\t\/\/ make the call\n\t\tif err := c.Call(cx, req, &response, client.WithSelectOption(so)); err != nil {\n\t\t\twriteError(w, r, err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ marshall response\n\t\trsp, _ = response.MarshalJSON()\n\t}\n\n\t\/\/ write the response\n\twriteResponse(w, r, rsp)\n}\n\nfunc (rh *rpcHandler) String() string {\n\treturn \"rpc\"\n}\n\nfunc hasCodec(ct string, codecs []string) bool {\n\tfor _, codec := range codecs {\n\t\tif ct == codec {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ requestPayload takes a *http.Request.\n\/\/ If the request is a GET the query string parameters are extracted and marshaled to JSON and the raw bytes are returned.\n\/\/ If the request method is a POST the request body is read and returned\nfunc requestPayload(r *http.Request) ([]byte, error) {\n\t\/\/ we have to decode json-rpc and proto-rpc because we suck\n\t\/\/ well actually because there's no proxy codec right now\n\tswitch r.Header.Get(\"Content-Type\") {\n\tcase \"application\/json-rpc\":\n\t\tmsg := codec.Message{\n\t\t\tType: codec.Request,\n\t\t\tHeader: make(map[string]string),\n\t\t}\n\t\tc := jsonrpc.NewCodec(&buffer{r.Body})\n\t\tif err := c.ReadHeader(&msg, codec.Request); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar raw json.RawMessage\n\t\tif err := c.ReadBody(&raw); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn ([]byte)(raw), nil\n\tcase \"application\/proto-rpc\", \"application\/octet-stream\":\n\t\tmsg := codec.Message{\n\t\t\tType: codec.Request,\n\t\t\tHeader: make(map[string]string),\n\t\t}\n\t\tc := protorpc.NewCodec(&buffer{r.Body})\n\t\tif err := c.ReadHeader(&msg, codec.Request); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar raw proto.Message\n\t\tif err := c.ReadBody(&raw); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tb, _ := raw.Marshal()\n\t\treturn b, nil\n\t}\n\n\t\/\/ otherwise as per usual\n\n\tswitch r.Method {\n\tcase \"GET\":\n\t\tif len(r.URL.RawQuery) > 0 {\n\t\t\treturn qson.ToJSON(r.URL.RawQuery)\n\t\t}\n\tcase \"PATCH\", \"POST\":\n\t\treturn ioutil.ReadAll(r.Body)\n\t}\n\n\treturn []byte{}, nil\n}\n\nfunc writeError(w http.ResponseWriter, r *http.Request, err error) {\n\tce := errors.Parse(err.Error())\n\n\tswitch ce.Code {\n\tcase 0:\n\t\t\/\/ assuming it's totally screwed\n\t\tce.Code = 500\n\t\tce.Id = \"go.micro.api\"\n\t\tce.Status = http.StatusText(500)\n\t\tce.Detail = \"error during request: \" + ce.Detail\n\t\tw.WriteHeader(500)\n\tdefault:\n\t\tw.WriteHeader(int(ce.Code))\n\t}\n\n\t\/\/ response content type\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\n\t\/\/ Set trailers\n\tif strings.Contains(r.Header.Get(\"Content-Type\"), \"application\/grpc\") {\n\t\tw.Header().Set(\"Trailer\", \"grpc-status\")\n\t\tw.Header().Set(\"Trailer\", \"grpc-message\")\n\t\tw.Header().Set(\"grpc-status\", \"13\")\n\t\tw.Header().Set(\"grpc-message\", ce.Detail)\n\t}\n\n\tw.Write([]byte(ce.Error()))\n}\n\nfunc writeResponse(w http.ResponseWriter, r *http.Request, rsp []byte) {\n\tw.Header().Set(\"Content-Type\", r.Header.Get(\"Content-Type\"))\n\tw.Header().Set(\"Content-Length\", strconv.Itoa(len(rsp)))\n\n\t\/\/ Set trailers\n\tif strings.Contains(r.Header.Get(\"Content-Type\"), \"application\/grpc\") {\n\t\tw.Header().Set(\"Trailer\", \"grpc-status\")\n\t\tw.Header().Set(\"Trailer\", \"grpc-message\")\n\t\tw.Header().Set(\"grpc-status\", \"0\")\n\t\tw.Header().Set(\"grpc-message\", \"\")\n\t}\n\n\t\/\/ write response\n\tw.Write(rsp)\n}\n\nfunc NewHandler(opts ...handler.Option) handler.Handler {\n\toptions := handler.NewOptions(opts...)\n\treturn &rpcHandler{\n\t\topts: options,\n\t}\n}\n\nfunc WithService(s *api.Service, opts ...handler.Option) handler.Handler {\n\toptions := handler.NewOptions(opts...)\n\treturn &rpcHandler{\n\t\topts: options,\n\t\ts: s,\n\t}\n}\nparse url encoded form in rpc handler (#1183)\/\/ Package rpc is a go-micro rpc handler.\npackage rpc\n\nimport (\n\t\"encoding\/json\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/joncalhoun\/qson\"\n\t\"github.com\/micro\/go-micro\/v2\/api\"\n\t\"github.com\/micro\/go-micro\/v2\/api\/handler\"\n\tproto \"github.com\/micro\/go-micro\/v2\/api\/internal\/proto\"\n\t\"github.com\/micro\/go-micro\/v2\/client\"\n\t\"github.com\/micro\/go-micro\/v2\/client\/selector\"\n\t\"github.com\/micro\/go-micro\/v2\/codec\"\n\t\"github.com\/micro\/go-micro\/v2\/codec\/jsonrpc\"\n\t\"github.com\/micro\/go-micro\/v2\/codec\/protorpc\"\n\t\"github.com\/micro\/go-micro\/v2\/errors\"\n\t\"github.com\/micro\/go-micro\/v2\/registry\"\n\t\"github.com\/micro\/go-micro\/v2\/util\/ctx\"\n)\n\nconst (\n\tHandler = \"rpc\"\n)\n\nvar (\n\t\/\/ supported json codecs\n\tjsonCodecs = []string{\n\t\t\"application\/grpc+json\",\n\t\t\"application\/json\",\n\t\t\"application\/json-rpc\",\n\t}\n\n\t\/\/ support proto codecs\n\tprotoCodecs = []string{\n\t\t\"application\/grpc\",\n\t\t\"application\/grpc+proto\",\n\t\t\"application\/proto\",\n\t\t\"application\/protobuf\",\n\t\t\"application\/proto-rpc\",\n\t\t\"application\/octet-stream\",\n\t}\n)\n\ntype rpcHandler struct {\n\topts handler.Options\n\ts *api.Service\n}\n\ntype buffer struct {\n\tio.ReadCloser\n}\n\nfunc (b *buffer) Write(_ []byte) (int, error) {\n\treturn 0, nil\n}\n\n\/\/ strategy is a hack for selection\nfunc strategy(services []*registry.Service) selector.Strategy {\n\treturn func(_ []*registry.Service) selector.Next {\n\t\t\/\/ ignore input to this function, use services above\n\t\treturn selector.Random(services)\n\t}\n}\n\nfunc (h *rpcHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\tvar service *api.Service\n\n\tif h.s != nil {\n\t\t\/\/ we were given the service\n\t\tservice = h.s\n\t} else if h.opts.Router != nil {\n\t\t\/\/ try get service from router\n\t\ts, err := h.opts.Router.Route(r)\n\t\tif err != nil {\n\t\t\twriteError(w, r, errors.InternalServerError(\"go.micro.api\", err.Error()))\n\t\t\treturn\n\t\t}\n\t\tservice = s\n\t} else {\n\t\t\/\/ we have no way of routing the request\n\t\twriteError(w, r, errors.InternalServerError(\"go.micro.api\", \"no route found\"))\n\t\treturn\n\t}\n\n\t\/\/ only allow post when we have the router\n\tif r.Method != \"GET\" && (h.opts.Router != nil && r.Method != \"POST\") {\n\t\thttp.Error(w, \"Method not allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tct := r.Header.Get(\"Content-Type\")\n\n\t\/\/ Strip charset from Content-Type (like `application\/json; charset=UTF-8`)\n\tif idx := strings.IndexRune(ct, ';'); idx >= 0 {\n\t\tct = ct[:idx]\n\t}\n\n\t\/\/ micro client\n\tc := h.opts.Service.Client()\n\n\t\/\/ create strategy\n\tso := selector.WithStrategy(strategy(service.Services))\n\n\t\/\/ get payload\n\tbr, err := requestPayload(r)\n\tif err != nil {\n\t\twriteError(w, r, err)\n\t\treturn\n\t}\n\n\t\/\/ create context\n\tcx := ctx.FromRequest(r)\n\n\tvar rsp []byte\n\n\tswitch {\n\t\/\/ proto codecs\n\tcase hasCodec(ct, protoCodecs):\n\t\trequest := &proto.Message{}\n\t\t\/\/ if the extracted payload isn't empty lets use it\n\t\tif len(br) > 0 {\n\t\t\trequest = proto.NewMessage(br)\n\t\t}\n\n\t\t\/\/ create request\/response\n\t\tresponse := &proto.Message{}\n\n\t\treq := c.NewRequest(\n\t\t\tservice.Name,\n\t\t\tservice.Endpoint.Name,\n\t\t\trequest,\n\t\t\tclient.WithContentType(ct),\n\t\t)\n\n\t\t\/\/ make the call\n\t\tif err := c.Call(cx, req, response, client.WithSelectOption(so)); err != nil {\n\t\t\twriteError(w, r, err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ marshall response\n\t\trsp, _ = response.Marshal()\n\tdefault:\n\t\t\/\/ if json codec is not present set to json\n\t\tif !hasCodec(ct, jsonCodecs) {\n\t\t\tct = \"application\/json\"\n\t\t}\n\n\t\t\/\/ default to trying json\n\t\tvar request json.RawMessage\n\t\t\/\/ if the extracted payload isn't empty lets use it\n\t\tif len(br) > 0 {\n\t\t\trequest = json.RawMessage(br)\n\t\t}\n\n\t\t\/\/ create request\/response\n\t\tvar response json.RawMessage\n\n\t\treq := c.NewRequest(\n\t\t\tservice.Name,\n\t\t\tservice.Endpoint.Name,\n\t\t\t&request,\n\t\t\tclient.WithContentType(ct),\n\t\t)\n\n\t\t\/\/ make the call\n\t\tif err := c.Call(cx, req, &response, client.WithSelectOption(so)); err != nil {\n\t\t\twriteError(w, r, err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ marshall response\n\t\trsp, _ = response.MarshalJSON()\n\t}\n\n\t\/\/ write the response\n\twriteResponse(w, r, rsp)\n}\n\nfunc (rh *rpcHandler) String() string {\n\treturn \"rpc\"\n}\n\nfunc hasCodec(ct string, codecs []string) bool {\n\tfor _, codec := range codecs {\n\t\tif ct == codec {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ requestPayload takes a *http.Request.\n\/\/ If the request is a GET the query string parameters are extracted and marshaled to JSON and the raw bytes are returned.\n\/\/ If the request method is a POST the request body is read and returned\nfunc requestPayload(r *http.Request) ([]byte, error) {\n\t\/\/ we have to decode json-rpc and proto-rpc because we suck\n\t\/\/ well actually because there's no proxy codec right now\n\tct := r.Header.Get(\"Content-Type\")\n\tswitch {\n\tcase strings.Contains(ct, \"application\/json-rpc\"):\n\t\tmsg := codec.Message{\n\t\t\tType: codec.Request,\n\t\t\tHeader: make(map[string]string),\n\t\t}\n\t\tc := jsonrpc.NewCodec(&buffer{r.Body})\n\t\tif err := c.ReadHeader(&msg, codec.Request); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar raw json.RawMessage\n\t\tif err := c.ReadBody(&raw); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn ([]byte)(raw), nil\n\tcase strings.Contains(ct, \"application\/proto-rpc\"), strings.Contains(ct, \"application\/octet-stream\"):\n\t\tmsg := codec.Message{\n\t\t\tType: codec.Request,\n\t\t\tHeader: make(map[string]string),\n\t\t}\n\t\tc := protorpc.NewCodec(&buffer{r.Body})\n\t\tif err := c.ReadHeader(&msg, codec.Request); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar raw proto.Message\n\t\tif err := c.ReadBody(&raw); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tb, _ := raw.Marshal()\n\t\treturn b, nil\n\tcase strings.Contains(ct, \"application\/www-x-form-urlencoded\"):\n\t\tr.ParseForm()\n\n\t\t\/\/ generate a new set of values from the form\n\t\tvals := make(map[string]string)\n\t\tfor k, v := range r.Form {\n\t\t\tvals[k] = strings.Join(v, \",\")\n\t\t}\n\n\t\t\/\/ marshal\n\t\tb, _ := json.Marshal(vals)\n\t\treturn b, nil\n\t\t\/\/ TODO: application\/grpc\n\t}\n\n\t\/\/ otherwise as per usual\n\n\tswitch r.Method {\n\tcase \"GET\":\n\t\tif len(r.URL.RawQuery) > 0 {\n\t\t\treturn qson.ToJSON(r.URL.RawQuery)\n\t\t}\n\tcase \"PATCH\", \"POST\":\n\t\treturn ioutil.ReadAll(r.Body)\n\t}\n\n\treturn []byte{}, nil\n}\n\nfunc writeError(w http.ResponseWriter, r *http.Request, err error) {\n\tce := errors.Parse(err.Error())\n\n\tswitch ce.Code {\n\tcase 0:\n\t\t\/\/ assuming it's totally screwed\n\t\tce.Code = 500\n\t\tce.Id = \"go.micro.api\"\n\t\tce.Status = http.StatusText(500)\n\t\tce.Detail = \"error during request: \" + ce.Detail\n\t\tw.WriteHeader(500)\n\tdefault:\n\t\tw.WriteHeader(int(ce.Code))\n\t}\n\n\t\/\/ response content type\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\n\t\/\/ Set trailers\n\tif strings.Contains(r.Header.Get(\"Content-Type\"), \"application\/grpc\") {\n\t\tw.Header().Set(\"Trailer\", \"grpc-status\")\n\t\tw.Header().Set(\"Trailer\", \"grpc-message\")\n\t\tw.Header().Set(\"grpc-status\", \"13\")\n\t\tw.Header().Set(\"grpc-message\", ce.Detail)\n\t}\n\n\tw.Write([]byte(ce.Error()))\n}\n\nfunc writeResponse(w http.ResponseWriter, r *http.Request, rsp []byte) {\n\tw.Header().Set(\"Content-Type\", r.Header.Get(\"Content-Type\"))\n\tw.Header().Set(\"Content-Length\", strconv.Itoa(len(rsp)))\n\n\t\/\/ Set trailers\n\tif strings.Contains(r.Header.Get(\"Content-Type\"), \"application\/grpc\") {\n\t\tw.Header().Set(\"Trailer\", \"grpc-status\")\n\t\tw.Header().Set(\"Trailer\", \"grpc-message\")\n\t\tw.Header().Set(\"grpc-status\", \"0\")\n\t\tw.Header().Set(\"grpc-message\", \"\")\n\t}\n\n\t\/\/ write response\n\tw.Write(rsp)\n}\n\nfunc NewHandler(opts ...handler.Option) handler.Handler {\n\toptions := handler.NewOptions(opts...)\n\treturn &rpcHandler{\n\t\topts: options,\n\t}\n}\n\nfunc WithService(s *api.Service, opts ...handler.Option) handler.Handler {\n\toptions := handler.NewOptions(opts...)\n\treturn &rpcHandler{\n\t\topts: options,\n\t\ts: s,\n\t}\n}\n<|endoftext|>"} {"text":"package batch\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/Jeffail\/benthos\/v3\/internal\/bloblang\"\n\t\"github.com\/Jeffail\/benthos\/v3\/internal\/bloblang\/mapping\"\n\t\"github.com\/Jeffail\/benthos\/v3\/lib\/condition\"\n\t\"github.com\/Jeffail\/benthos\/v3\/lib\/log\"\n\t\"github.com\/Jeffail\/benthos\/v3\/lib\/message\"\n\t\"github.com\/Jeffail\/benthos\/v3\/lib\/metrics\"\n\t\"github.com\/Jeffail\/benthos\/v3\/lib\/processor\"\n\t\"github.com\/Jeffail\/benthos\/v3\/lib\/types\"\n)\n\n\/\/ SanitisePolicyConfig returns a policy config structure ready to be marshalled\n\/\/ with irrelevant fields omitted.\nfunc SanitisePolicyConfig(policy PolicyConfig) (interface{}, error) {\n\tprocConfs := make([]interface{}, len(policy.Processors))\n\tfor i, pConf := range policy.Processors {\n\t\tvar err error\n\t\tif procConfs[i], err = processor.SanitiseConfig(pConf); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tbSanit := map[string]interface{}{\n\t\t\"byte_size\": policy.ByteSize,\n\t\t\"count\": policy.Count,\n\t\t\"check\": policy.Check,\n\t\t\"period\": policy.Period,\n\t\t\"processors\": procConfs,\n\t}\n\tif !isNoopCondition(policy.Condition) {\n\t\tcondSanit, err := condition.SanitiseConfig(policy.Condition)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbSanit[\"condition\"] = condSanit\n\t}\n\treturn bSanit, nil\n}\n\n\/\/------------------------------------------------------------------------------\n\nfunc isNoopCondition(conf condition.Config) bool {\n\treturn conf.Type == condition.TypeStatic && !conf.Static\n}\n\n\/\/ PolicyConfig contains configuration parameters for a batch policy.\ntype PolicyConfig struct {\n\tByteSize int `json:\"byte_size\" yaml:\"byte_size\"`\n\tCount int `json:\"count\" yaml:\"count\"`\n\tCondition condition.Config `json:\"condition\" yaml:\"condition\"`\n\tCheck string `json:\"check\" yaml:\"check\"`\n\tPeriod string `json:\"period\" yaml:\"period\"`\n\tProcessors []processor.Config `json:\"processors\" yaml:\"processors\"`\n}\n\n\/\/ NewPolicyConfig creates a default PolicyConfig.\nfunc NewPolicyConfig() PolicyConfig {\n\tcond := condition.NewConfig()\n\tcond.Type = \"static\"\n\tcond.Static = false\n\treturn PolicyConfig{\n\t\tByteSize: 0,\n\t\tCount: 0,\n\t\tCondition: cond,\n\t\tCheck: \"\",\n\t\tPeriod: \"\",\n\t\tProcessors: []processor.Config{},\n\t}\n}\n\n\/\/ IsNoop returns true if this batch policy configuration does nothing.\nfunc (p PolicyConfig) IsNoop() bool {\n\tif p.ByteSize > 0 {\n\t\treturn false\n\t}\n\tif p.Count > 1 {\n\t\treturn false\n\t}\n\tif !isNoopCondition(p.Condition) {\n\t\treturn false\n\t}\n\tif len(p.Check) > 0 {\n\t\treturn false\n\t}\n\tif len(p.Period) > 0 {\n\t\treturn false\n\t}\n\tif len(p.Processors) > 0 {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (p PolicyConfig) isLimited() bool {\n\tif p.ByteSize > 0 {\n\t\treturn true\n\t}\n\tif p.Count > 0 {\n\t\treturn true\n\t}\n\tif len(p.Period) > 0 {\n\t\treturn true\n\t}\n\tif !isNoopCondition(p.Condition) {\n\t\treturn true\n\t}\n\tif len(p.Check) > 0 {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/------------------------------------------------------------------------------\n\n\/\/ Policy implements a batching policy by buffering messages until, based on a\n\/\/ set of rules, the buffered messages are ready to be sent onwards as a batch.\ntype Policy struct {\n\tlog log.Modular\n\n\tbyteSize int\n\tcount int\n\tperiod time.Duration\n\tcond condition.Type\n\tcheck *mapping.Executor\n\tprocs []types.Processor\n\tsizeTally int\n\tparts []types.Part\n\n\ttriggered bool\n\tlastBatch time.Time\n\n\tmSizeBatch metrics.StatCounter\n\tmCountBatch metrics.StatCounter\n\tmPeriodBatch metrics.StatCounter\n\tmCheckBatch metrics.StatCounter\n\tmCondBatch metrics.StatCounter\n}\n\n\/\/ NewPolicy creates an empty policy with default rules.\nfunc NewPolicy(\n\tconf PolicyConfig,\n\tmgr types.Manager,\n\tlog log.Modular,\n\tstats metrics.Type,\n) (*Policy, error) {\n\tif !conf.isLimited() {\n\t\treturn nil, errors.New(\"batch policy must have at least one active trigger\")\n\t}\n\tvar cond types.Condition\n\tvar err error\n\tif !isNoopCondition(conf.Condition) {\n\t\tif cond, err = condition.New(\n\t\t\tconf.Condition, mgr, log.NewModule(\".condition\"), metrics.Namespaced(stats, \"condition\"),\n\t\t); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create condition: %v\", err)\n\t\t}\n\t}\n\tvar check *mapping.Executor\n\tif len(conf.Check) > 0 {\n\t\tif check, err = bloblang.NewMapping(\"\", conf.Check); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse check: %v\", err)\n\t\t}\n\t}\n\tvar period time.Duration\n\tif len(conf.Period) > 0 {\n\t\tif period, err = time.ParseDuration(conf.Period); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse duration string: %v\", err)\n\t\t}\n\t}\n\tvar procs []types.Processor\n\tfor i, pconf := range conf.Processors {\n\t\tprefix := fmt.Sprintf(\"%v\", i)\n\t\tproc, err := processor.New(pconf, mgr, log.NewModule(\".\"+prefix), metrics.Namespaced(stats, prefix))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create processor '%v': %v\", i, err)\n\t\t}\n\t\tprocs = append(procs, proc)\n\t}\n\treturn &Policy{\n\t\tlog: log,\n\n\t\tbyteSize: conf.ByteSize,\n\t\tcount: conf.Count,\n\t\tperiod: period,\n\t\tcond: cond,\n\t\tcheck: check,\n\t\tprocs: procs,\n\n\t\tlastBatch: time.Now(),\n\n\t\tmSizeBatch: stats.GetCounter(\"on_size\"),\n\t\tmCountBatch: stats.GetCounter(\"on_count\"),\n\t\tmPeriodBatch: stats.GetCounter(\"on_period\"),\n\t\tmCheckBatch: stats.GetCounter(\"on_check\"),\n\t\tmCondBatch: stats.GetCounter(\"on_condition\"),\n\t}, nil\n}\n\n\/\/------------------------------------------------------------------------------\n\n\/\/ Add a new message part to this batch policy. Returns true if this part\n\/\/ triggers the conditions of the policy.\nfunc (p *Policy) Add(part types.Part) bool {\n\tp.sizeTally += len(part.Get())\n\tp.parts = append(p.parts, part)\n\n\tif !p.triggered && p.count > 0 && len(p.parts) >= p.count {\n\t\tp.triggered = true\n\t\tp.mCountBatch.Incr(1)\n\t\tp.log.Traceln(\"Batching based on count\")\n\t}\n\tif !p.triggered && p.byteSize > 0 && p.sizeTally >= p.byteSize {\n\t\tp.triggered = true\n\t\tp.mSizeBatch.Incr(1)\n\t\tp.log.Traceln(\"Batching based on byte_size\")\n\t}\n\ttmpMsg := message.New(nil)\n\ttmpMsg.Append(part)\n\tif p.cond != nil && !p.triggered && p.cond.Check(tmpMsg) {\n\t\tp.triggered = true\n\t\tp.mCondBatch.Incr(1)\n\t\tp.log.Traceln(\"Batching based on condition\")\n\t}\n\ttmpMsg.SetAll(p.parts)\n\tif p.check != nil && !p.triggered {\n\t\ttest, err := p.check.QueryPart(tmpMsg.Len()-1, tmpMsg)\n\t\tif err != nil {\n\t\t\ttest = false\n\t\t\tp.log.Errorf(\"Failed to execute batch check query: %v\\n\", err)\n\t\t}\n\t\tif test {\n\t\t\tp.triggered = true\n\t\t\tp.mCheckBatch.Incr(1)\n\t\t\tp.log.Traceln(\"Batching based on check query\")\n\t\t}\n\t}\n\treturn p.triggered || (p.period > 0 && time.Since(p.lastBatch) > p.period)\n}\n\n\/\/ Flush clears all messages stored by this batch policy. Returns nil if the\n\/\/ policy is currently empty.\nfunc (p *Policy) Flush() types.Message {\n\tvar newMsg types.Message\n\n\tresultMsgs := p.FlushAny()\n\tif len(resultMsgs) == 1 {\n\t\tnewMsg = resultMsgs[0]\n\t} else if len(resultMsgs) > 1 {\n\t\tnewMsg = message.New(nil)\n\t\tvar parts []types.Part\n\t\tfor _, m := range resultMsgs {\n\t\t\tm.Iter(func(_ int, p types.Part) error {\n\t\t\t\tparts = append(parts, p)\n\t\t\t\treturn nil\n\t\t\t})\n\t\t}\n\t\tnewMsg.SetAll(parts)\n\t}\n\treturn newMsg\n}\n\n\/\/ FlushAny clears all messages stored by this batch policy and returns any\n\/\/ number of discrete message batches. Returns nil if the policy is currently\n\/\/ empty.\nfunc (p *Policy) FlushAny() []types.Message {\n\tvar newMsg types.Message\n\tif len(p.parts) > 0 {\n\t\tif !p.triggered && p.period > 0 && time.Since(p.lastBatch) > p.period {\n\t\t\tp.mPeriodBatch.Incr(1)\n\t\t\tp.log.Traceln(\"Batching based on period\")\n\t\t}\n\t\tnewMsg = message.New(nil)\n\t\tnewMsg.Append(p.parts...)\n\t}\n\tp.parts = nil\n\tp.sizeTally = 0\n\tp.lastBatch = time.Now()\n\tp.triggered = false\n\n\tif newMsg == nil {\n\t\treturn nil\n\t}\n\n\tif len(p.procs) > 0 {\n\t\tresultMsgs, res := processor.ExecuteAll(p.procs, newMsg)\n\t\tif res != nil {\n\t\t\tif err := res.Error(); err != nil {\n\t\t\t\tp.log.Errorf(\"Batch processors resulted in error: %v, the batch has been dropped.\", err)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\treturn resultMsgs\n\t}\n\n\treturn []types.Message{newMsg}\n}\n\n\/\/ Count returns the number of currently buffered message parts within this\n\/\/ policy.\nfunc (p *Policy) Count() int {\n\treturn len(p.parts)\n}\n\n\/\/ UntilNext returns a duration indicating how long until the current batch\n\/\/ should be flushed due to a configured period. A negative duration indicates\n\/\/ a period has not been set.\nfunc (p *Policy) UntilNext() time.Duration {\n\tif p.period <= 0 {\n\t\treturn -1\n\t}\n\treturn time.Until(p.lastBatch.Add(p.period))\n}\n\n\/\/------------------------------------------------------------------------------\n\n\/\/ CloseAsync shuts down the policy resources.\nfunc (p *Policy) CloseAsync() {\n\tfor _, c := range p.procs {\n\t\tc.CloseAsync()\n\t}\n}\n\n\/\/ WaitForClose blocks until the processor has closed down.\nfunc (p *Policy) WaitForClose(timeout time.Duration) error {\n\tstopBy := time.Now().Add(timeout)\n\tfor _, c := range p.procs {\n\t\tif err := c.WaitForClose(time.Until(stopBy)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/------------------------------------------------------------------------------\nAdd batch policy ceiling warning logpackage batch\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/Jeffail\/benthos\/v3\/internal\/bloblang\"\n\t\"github.com\/Jeffail\/benthos\/v3\/internal\/bloblang\/mapping\"\n\t\"github.com\/Jeffail\/benthos\/v3\/lib\/condition\"\n\t\"github.com\/Jeffail\/benthos\/v3\/lib\/log\"\n\t\"github.com\/Jeffail\/benthos\/v3\/lib\/message\"\n\t\"github.com\/Jeffail\/benthos\/v3\/lib\/metrics\"\n\t\"github.com\/Jeffail\/benthos\/v3\/lib\/processor\"\n\t\"github.com\/Jeffail\/benthos\/v3\/lib\/types\"\n)\n\n\/\/ SanitisePolicyConfig returns a policy config structure ready to be marshalled\n\/\/ with irrelevant fields omitted.\nfunc SanitisePolicyConfig(policy PolicyConfig) (interface{}, error) {\n\tprocConfs := make([]interface{}, len(policy.Processors))\n\tfor i, pConf := range policy.Processors {\n\t\tvar err error\n\t\tif procConfs[i], err = processor.SanitiseConfig(pConf); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tbSanit := map[string]interface{}{\n\t\t\"byte_size\": policy.ByteSize,\n\t\t\"count\": policy.Count,\n\t\t\"check\": policy.Check,\n\t\t\"period\": policy.Period,\n\t\t\"processors\": procConfs,\n\t}\n\tif !isNoopCondition(policy.Condition) {\n\t\tcondSanit, err := condition.SanitiseConfig(policy.Condition)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbSanit[\"condition\"] = condSanit\n\t}\n\treturn bSanit, nil\n}\n\n\/\/------------------------------------------------------------------------------\n\nfunc isNoopCondition(conf condition.Config) bool {\n\treturn conf.Type == condition.TypeStatic && !conf.Static\n}\n\n\/\/ PolicyConfig contains configuration parameters for a batch policy.\ntype PolicyConfig struct {\n\tByteSize int `json:\"byte_size\" yaml:\"byte_size\"`\n\tCount int `json:\"count\" yaml:\"count\"`\n\tCondition condition.Config `json:\"condition\" yaml:\"condition\"`\n\tCheck string `json:\"check\" yaml:\"check\"`\n\tPeriod string `json:\"period\" yaml:\"period\"`\n\tProcessors []processor.Config `json:\"processors\" yaml:\"processors\"`\n}\n\n\/\/ NewPolicyConfig creates a default PolicyConfig.\nfunc NewPolicyConfig() PolicyConfig {\n\tcond := condition.NewConfig()\n\tcond.Type = \"static\"\n\tcond.Static = false\n\treturn PolicyConfig{\n\t\tByteSize: 0,\n\t\tCount: 0,\n\t\tCondition: cond,\n\t\tCheck: \"\",\n\t\tPeriod: \"\",\n\t\tProcessors: []processor.Config{},\n\t}\n}\n\n\/\/ IsNoop returns true if this batch policy configuration does nothing.\nfunc (p PolicyConfig) IsNoop() bool {\n\tif p.ByteSize > 0 {\n\t\treturn false\n\t}\n\tif p.Count > 1 {\n\t\treturn false\n\t}\n\tif !isNoopCondition(p.Condition) {\n\t\treturn false\n\t}\n\tif len(p.Check) > 0 {\n\t\treturn false\n\t}\n\tif len(p.Period) > 0 {\n\t\treturn false\n\t}\n\tif len(p.Processors) > 0 {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (p PolicyConfig) isLimited() bool {\n\tif p.ByteSize > 0 {\n\t\treturn true\n\t}\n\tif p.Count > 0 {\n\t\treturn true\n\t}\n\tif len(p.Period) > 0 {\n\t\treturn true\n\t}\n\tif !isNoopCondition(p.Condition) {\n\t\treturn true\n\t}\n\tif len(p.Check) > 0 {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (p PolicyConfig) isHardLimited() bool {\n\tif p.ByteSize > 0 {\n\t\treturn true\n\t}\n\tif p.Count > 0 {\n\t\treturn true\n\t}\n\tif len(p.Period) > 0 {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/------------------------------------------------------------------------------\n\n\/\/ Policy implements a batching policy by buffering messages until, based on a\n\/\/ set of rules, the buffered messages are ready to be sent onwards as a batch.\ntype Policy struct {\n\tlog log.Modular\n\n\tbyteSize int\n\tcount int\n\tperiod time.Duration\n\tcond condition.Type\n\tcheck *mapping.Executor\n\tprocs []types.Processor\n\tsizeTally int\n\tparts []types.Part\n\n\ttriggered bool\n\tlastBatch time.Time\n\n\tmSizeBatch metrics.StatCounter\n\tmCountBatch metrics.StatCounter\n\tmPeriodBatch metrics.StatCounter\n\tmCheckBatch metrics.StatCounter\n\tmCondBatch metrics.StatCounter\n}\n\n\/\/ NewPolicy creates an empty policy with default rules.\nfunc NewPolicy(\n\tconf PolicyConfig,\n\tmgr types.Manager,\n\tlog log.Modular,\n\tstats metrics.Type,\n) (*Policy, error) {\n\tif !conf.isLimited() {\n\t\treturn nil, errors.New(\"batch policy must have at least one active trigger\")\n\t}\n\tif !conf.isHardLimited() {\n\t\tlog.Warnln(\"Batch policy should have at least one of count, period or byte_size set in order to provide a hard batch ceiling.\")\n\t}\n\tvar cond types.Condition\n\tvar err error\n\tif !isNoopCondition(conf.Condition) {\n\t\tif cond, err = condition.New(\n\t\t\tconf.Condition, mgr, log.NewModule(\".condition\"), metrics.Namespaced(stats, \"condition\"),\n\t\t); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create condition: %v\", err)\n\t\t}\n\t}\n\tvar check *mapping.Executor\n\tif len(conf.Check) > 0 {\n\t\tif check, err = bloblang.NewMapping(\"\", conf.Check); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse check: %v\", err)\n\t\t}\n\t}\n\tvar period time.Duration\n\tif len(conf.Period) > 0 {\n\t\tif period, err = time.ParseDuration(conf.Period); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse duration string: %v\", err)\n\t\t}\n\t}\n\tvar procs []types.Processor\n\tfor i, pconf := range conf.Processors {\n\t\tprefix := fmt.Sprintf(\"%v\", i)\n\t\tproc, err := processor.New(pconf, mgr, log.NewModule(\".\"+prefix), metrics.Namespaced(stats, prefix))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to create processor '%v': %v\", i, err)\n\t\t}\n\t\tprocs = append(procs, proc)\n\t}\n\treturn &Policy{\n\t\tlog: log,\n\n\t\tbyteSize: conf.ByteSize,\n\t\tcount: conf.Count,\n\t\tperiod: period,\n\t\tcond: cond,\n\t\tcheck: check,\n\t\tprocs: procs,\n\n\t\tlastBatch: time.Now(),\n\n\t\tmSizeBatch: stats.GetCounter(\"on_size\"),\n\t\tmCountBatch: stats.GetCounter(\"on_count\"),\n\t\tmPeriodBatch: stats.GetCounter(\"on_period\"),\n\t\tmCheckBatch: stats.GetCounter(\"on_check\"),\n\t\tmCondBatch: stats.GetCounter(\"on_condition\"),\n\t}, nil\n}\n\n\/\/------------------------------------------------------------------------------\n\n\/\/ Add a new message part to this batch policy. Returns true if this part\n\/\/ triggers the conditions of the policy.\nfunc (p *Policy) Add(part types.Part) bool {\n\tp.sizeTally += len(part.Get())\n\tp.parts = append(p.parts, part)\n\n\tif !p.triggered && p.count > 0 && len(p.parts) >= p.count {\n\t\tp.triggered = true\n\t\tp.mCountBatch.Incr(1)\n\t\tp.log.Traceln(\"Batching based on count\")\n\t}\n\tif !p.triggered && p.byteSize > 0 && p.sizeTally >= p.byteSize {\n\t\tp.triggered = true\n\t\tp.mSizeBatch.Incr(1)\n\t\tp.log.Traceln(\"Batching based on byte_size\")\n\t}\n\ttmpMsg := message.New(nil)\n\ttmpMsg.Append(part)\n\tif p.cond != nil && !p.triggered && p.cond.Check(tmpMsg) {\n\t\tp.triggered = true\n\t\tp.mCondBatch.Incr(1)\n\t\tp.log.Traceln(\"Batching based on condition\")\n\t}\n\ttmpMsg.SetAll(p.parts)\n\tif p.check != nil && !p.triggered {\n\t\ttest, err := p.check.QueryPart(tmpMsg.Len()-1, tmpMsg)\n\t\tif err != nil {\n\t\t\ttest = false\n\t\t\tp.log.Errorf(\"Failed to execute batch check query: %v\\n\", err)\n\t\t}\n\t\tif test {\n\t\t\tp.triggered = true\n\t\t\tp.mCheckBatch.Incr(1)\n\t\t\tp.log.Traceln(\"Batching based on check query\")\n\t\t}\n\t}\n\treturn p.triggered || (p.period > 0 && time.Since(p.lastBatch) > p.period)\n}\n\n\/\/ Flush clears all messages stored by this batch policy. Returns nil if the\n\/\/ policy is currently empty.\nfunc (p *Policy) Flush() types.Message {\n\tvar newMsg types.Message\n\n\tresultMsgs := p.FlushAny()\n\tif len(resultMsgs) == 1 {\n\t\tnewMsg = resultMsgs[0]\n\t} else if len(resultMsgs) > 1 {\n\t\tnewMsg = message.New(nil)\n\t\tvar parts []types.Part\n\t\tfor _, m := range resultMsgs {\n\t\t\tm.Iter(func(_ int, p types.Part) error {\n\t\t\t\tparts = append(parts, p)\n\t\t\t\treturn nil\n\t\t\t})\n\t\t}\n\t\tnewMsg.SetAll(parts)\n\t}\n\treturn newMsg\n}\n\n\/\/ FlushAny clears all messages stored by this batch policy and returns any\n\/\/ number of discrete message batches. Returns nil if the policy is currently\n\/\/ empty.\nfunc (p *Policy) FlushAny() []types.Message {\n\tvar newMsg types.Message\n\tif len(p.parts) > 0 {\n\t\tif !p.triggered && p.period > 0 && time.Since(p.lastBatch) > p.period {\n\t\t\tp.mPeriodBatch.Incr(1)\n\t\t\tp.log.Traceln(\"Batching based on period\")\n\t\t}\n\t\tnewMsg = message.New(nil)\n\t\tnewMsg.Append(p.parts...)\n\t}\n\tp.parts = nil\n\tp.sizeTally = 0\n\tp.lastBatch = time.Now()\n\tp.triggered = false\n\n\tif newMsg == nil {\n\t\treturn nil\n\t}\n\n\tif len(p.procs) > 0 {\n\t\tresultMsgs, res := processor.ExecuteAll(p.procs, newMsg)\n\t\tif res != nil {\n\t\t\tif err := res.Error(); err != nil {\n\t\t\t\tp.log.Errorf(\"Batch processors resulted in error: %v, the batch has been dropped.\", err)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\treturn resultMsgs\n\t}\n\n\treturn []types.Message{newMsg}\n}\n\n\/\/ Count returns the number of currently buffered message parts within this\n\/\/ policy.\nfunc (p *Policy) Count() int {\n\treturn len(p.parts)\n}\n\n\/\/ UntilNext returns a duration indicating how long until the current batch\n\/\/ should be flushed due to a configured period. A negative duration indicates\n\/\/ a period has not been set.\nfunc (p *Policy) UntilNext() time.Duration {\n\tif p.period <= 0 {\n\t\treturn -1\n\t}\n\treturn time.Until(p.lastBatch.Add(p.period))\n}\n\n\/\/------------------------------------------------------------------------------\n\n\/\/ CloseAsync shuts down the policy resources.\nfunc (p *Policy) CloseAsync() {\n\tfor _, c := range p.procs {\n\t\tc.CloseAsync()\n\t}\n}\n\n\/\/ WaitForClose blocks until the processor has closed down.\nfunc (p *Policy) WaitForClose(timeout time.Duration) error {\n\tstopBy := time.Now().Add(timeout)\n\tfor _, c := range p.procs {\n\t\tif err := c.WaitForClose(time.Until(stopBy)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/------------------------------------------------------------------------------\n<|endoftext|>"} {"text":"package movingmedian\n\nimport \"container\/heap\"\n\ntype item struct {\n\tf float64\n\tidx int\n}\n\ntype itemHeap []*item\n\nfunc (h itemHeap) Len() int { return len(h) }\nfunc (h itemHeap) Swap(i, j int) {\n\th[i], h[j] = h[j], h[i]\n\th[i].idx = i\n\th[j].idx = j\n}\n\nfunc (h *itemHeap) Push(x interface{}) {\n\te := x.(*item)\n\te.idx = len(*h)\n\t*h = append(*h, e)\n}\n\nfunc (h *itemHeap) Pop() interface{} {\n\told := *h\n\tn := len(old)\n\tx := old[n-1]\n\t*h = old[0 : n-1]\n\treturn x\n}\n\ntype minItemHeap struct {\n\titemHeap\n}\n\nfunc (h minItemHeap) Less(i, j int) bool { return h.itemHeap[i].f < h.itemHeap[j].f }\n\ntype maxItemHeap struct {\n\titemHeap\n}\n\nfunc (h maxItemHeap) Less(i, j int) bool { return h.itemHeap[i].f > h.itemHeap[j].f }\n\ntype MovingMedian struct {\n\tqueueIndex int\n\tnitems int\n\tqueue []item\n\tmaxHeap maxItemHeap\n\tminHeap minItemHeap\n}\n\nfunc NewMovingMedian(size int) MovingMedian {\n\tm := MovingMedian{\n\t\tqueue: make([]item, size),\n\t\tmaxHeap: maxItemHeap{},\n\t\tminHeap: minItemHeap{},\n\t}\n\n\theap.Init(&m.maxHeap)\n\theap.Init(&m.minHeap)\n\treturn m\n}\n\nfunc (m *MovingMedian) Push(v float64) {\n\tif len(m.queue) == 1 {\n\t\tm.queue[0].f = v\n\t\treturn\n\t}\n\n\titemPtr := &m.queue[m.queueIndex]\n\tm.queueIndex++\n\tif m.queueIndex >= len(m.queue) {\n\t\tm.queueIndex = 0\n\t}\n\n\tminHeapLen := m.minHeap.Len()\n\tif m.nitems == len(m.queue) {\n\t\theapIndex := itemPtr.idx\n\t\tif heapIndex < minHeapLen && itemPtr == m.minHeap.itemHeap[heapIndex] {\n\t\t\tif v >= m.minHeap.itemHeap[0].f || v >= m.maxHeap.itemHeap[0].f {\n\t\t\t\titemPtr.f = v\n\t\t\t\theap.Fix(&m.minHeap, heapIndex)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmoveItem := m.maxHeap.itemHeap[0]\n\t\t\tmoveItem.idx = heapIndex\n\t\t\tm.minHeap.itemHeap[heapIndex] = moveItem\n\t\t\titemPtr.f = v\n\t\t\titemPtr.idx = 0\n\t\t\tm.maxHeap.itemHeap[0] = itemPtr\n\n\t\t\theap.Fix(&m.minHeap, heapIndex)\n\t\t\theap.Fix(&m.maxHeap, 0)\n\t\t\treturn\n\t\t} else {\n\t\t\tif v <= m.maxHeap.itemHeap[0].f || v <= m.minHeap.itemHeap[0].f {\n\t\t\t\titemPtr.f = v\n\t\t\t\theap.Fix(&m.maxHeap, heapIndex)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmoveItem := m.minHeap.itemHeap[0]\n\t\t\tmoveItem.idx = heapIndex\n\t\t\tm.maxHeap.itemHeap[heapIndex] = moveItem\n\t\t\titemPtr.f = v\n\t\t\titemPtr.idx = 0\n\t\t\tm.minHeap.itemHeap[0] = itemPtr\n\n\t\t\theap.Fix(&m.maxHeap, heapIndex)\n\t\t\theap.Fix(&m.minHeap, 0)\n\t\t\treturn\n\t\t}\n\t}\n\n\tm.nitems++\n\titemPtr.f = v\n\tif minHeapLen == 0 {\n\t\theap.Push(&m.minHeap, itemPtr)\n\t} else if v > m.minHeap.itemHeap[0].f {\n\t\theap.Push(&m.minHeap, itemPtr)\n\t\tif minHeapLen > m.maxHeap.Len() {\n\t\t\tmoveItem := heap.Pop(&m.minHeap)\n\t\t\theap.Push(&m.maxHeap, moveItem)\n\t\t}\n\t} else {\n\t\theap.Push(&m.maxHeap, itemPtr)\n\t\tif m.maxHeap.Len() == (minHeapLen + 2) {\n\t\t\tmoveItem := heap.Pop(&m.maxHeap)\n\t\t\theap.Push(&m.minHeap, moveItem)\n\t\t}\n\t}\n}\n\nfunc (m *MovingMedian) Median() float64 {\n\tif len(m.queue) == 1 {\n\t\treturn m.queue[0].f\n\t}\n\n\tif (m.nitems % 2) == 0 {\n\t\treturn (m.maxHeap.itemHeap[0].f + m.minHeap.itemHeap[0].f) \/ 2\n\t}\n\n\tif m.maxHeap.Len() > m.minHeap.Len() {\n\t\treturn m.maxHeap.itemHeap[0].f\n\t}\n\n\treturn m.minHeap.itemHeap[0].f\n}\nclarify case when mean of heaps is used for medianpackage movingmedian\n\nimport \"container\/heap\"\n\ntype item struct {\n\tf float64\n\tidx int\n}\n\ntype itemHeap []*item\n\nfunc (h itemHeap) Len() int { return len(h) }\nfunc (h itemHeap) Swap(i, j int) {\n\th[i], h[j] = h[j], h[i]\n\th[i].idx = i\n\th[j].idx = j\n}\n\nfunc (h *itemHeap) Push(x interface{}) {\n\te := x.(*item)\n\te.idx = len(*h)\n\t*h = append(*h, e)\n}\n\nfunc (h *itemHeap) Pop() interface{} {\n\told := *h\n\tn := len(old)\n\tx := old[n-1]\n\t*h = old[0 : n-1]\n\treturn x\n}\n\ntype minItemHeap struct {\n\titemHeap\n}\n\nfunc (h minItemHeap) Less(i, j int) bool { return h.itemHeap[i].f < h.itemHeap[j].f }\n\ntype maxItemHeap struct {\n\titemHeap\n}\n\nfunc (h maxItemHeap) Less(i, j int) bool { return h.itemHeap[i].f > h.itemHeap[j].f }\n\ntype MovingMedian struct {\n\tqueueIndex int\n\tnitems int\n\tqueue []item\n\tmaxHeap maxItemHeap\n\tminHeap minItemHeap\n}\n\nfunc NewMovingMedian(size int) MovingMedian {\n\tm := MovingMedian{\n\t\tqueue: make([]item, size),\n\t\tmaxHeap: maxItemHeap{},\n\t\tminHeap: minItemHeap{},\n\t}\n\n\theap.Init(&m.maxHeap)\n\theap.Init(&m.minHeap)\n\treturn m\n}\n\nfunc (m *MovingMedian) Push(v float64) {\n\tif len(m.queue) == 1 {\n\t\tm.queue[0].f = v\n\t\treturn\n\t}\n\n\titemPtr := &m.queue[m.queueIndex]\n\tm.queueIndex++\n\tif m.queueIndex >= len(m.queue) {\n\t\tm.queueIndex = 0\n\t}\n\n\tminHeapLen := m.minHeap.Len()\n\tif m.nitems == len(m.queue) {\n\t\theapIndex := itemPtr.idx\n\t\tif heapIndex < minHeapLen && itemPtr == m.minHeap.itemHeap[heapIndex] {\n\t\t\tif v >= m.minHeap.itemHeap[0].f || v >= m.maxHeap.itemHeap[0].f {\n\t\t\t\titemPtr.f = v\n\t\t\t\theap.Fix(&m.minHeap, heapIndex)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmoveItem := m.maxHeap.itemHeap[0]\n\t\t\tmoveItem.idx = heapIndex\n\t\t\tm.minHeap.itemHeap[heapIndex] = moveItem\n\t\t\titemPtr.f = v\n\t\t\titemPtr.idx = 0\n\t\t\tm.maxHeap.itemHeap[0] = itemPtr\n\n\t\t\theap.Fix(&m.minHeap, heapIndex)\n\t\t\theap.Fix(&m.maxHeap, 0)\n\t\t\treturn\n\t\t} else {\n\t\t\tif v <= m.maxHeap.itemHeap[0].f || v <= m.minHeap.itemHeap[0].f {\n\t\t\t\titemPtr.f = v\n\t\t\t\theap.Fix(&m.maxHeap, heapIndex)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmoveItem := m.minHeap.itemHeap[0]\n\t\t\tmoveItem.idx = heapIndex\n\t\t\tm.maxHeap.itemHeap[heapIndex] = moveItem\n\t\t\titemPtr.f = v\n\t\t\titemPtr.idx = 0\n\t\t\tm.minHeap.itemHeap[0] = itemPtr\n\n\t\t\theap.Fix(&m.maxHeap, heapIndex)\n\t\t\theap.Fix(&m.minHeap, 0)\n\t\t\treturn\n\t\t}\n\t}\n\n\tm.nitems++\n\titemPtr.f = v\n\tif minHeapLen == 0 {\n\t\theap.Push(&m.minHeap, itemPtr)\n\t} else if v > m.minHeap.itemHeap[0].f {\n\t\theap.Push(&m.minHeap, itemPtr)\n\t\tif minHeapLen > m.maxHeap.Len() {\n\t\t\tmoveItem := heap.Pop(&m.minHeap)\n\t\t\theap.Push(&m.maxHeap, moveItem)\n\t\t}\n\t} else {\n\t\theap.Push(&m.maxHeap, itemPtr)\n\t\tif m.maxHeap.Len() == (minHeapLen + 2) {\n\t\t\tmoveItem := heap.Pop(&m.maxHeap)\n\t\t\theap.Push(&m.minHeap, moveItem)\n\t\t}\n\t}\n}\n\nfunc (m *MovingMedian) Median() float64 {\n\tif len(m.queue) == 1 {\n\t\treturn m.queue[0].f\n\t}\n\n\tif m.maxHeap.Len() == m.minHeap.Len() {\n\t\treturn (m.maxHeap.itemHeap[0].f + m.minHeap.itemHeap[0].f) \/ 2\n\t}\n\n\tif m.maxHeap.Len() > m.minHeap.Len() {\n\t\treturn m.maxHeap.itemHeap[0].f\n\t}\n\n\treturn m.minHeap.itemHeap[0].f\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"fmt\"\n\t\"ashumeow\/ml-kmeans\/meow-ml-kmeans\/meow-data-structures\"\n\t\"ashumeow\/ml-kmeans\/meow-ml-kmeans\/meow-ml\"\n)\n\nfunc main() {\n\n\t\/\/ still more to code...\n}update initpackage main\n\nimport (\n\t\"fmt\"\n\t\"ashumeow\/ml-kmeans\/meow-ml-kmeans\/meow-data-structures\"\n\t\"ashumeow\/ml-kmeans\/meow-ml-kmeans\/meow-ml\"\n\t\"ashumeow\/ml-kmeans\/meow-ml-kmeans\/\"\n)\n\nfunc main() {\n\n\t\/\/ still more to code...\n}<|endoftext|>"} {"text":"package rtda\n\nimport (\n\trtc \"jvmgo\/jvm\/rtda\/class\"\n\t\"jvmgo\/util\"\n)\n\nfunc JStringChars(jStr *rtc.Obj) []uint16 {\n\tcharArr := jStr.GetFieldValue(\"value\", \"[C\").(*rtc.Obj)\n\treturn charArr.Fields().([]uint16)\n}\n\n\/\/ todo: is there a better way to create String?\n\/\/ todo: add ClassLoaderGetter interface\nfunc NewJString(goStr string, frame *Frame) *rtc.Obj {\n\tchars := util.StringToUtf16(goStr)\n\tinternedStr := getInternedString(chars)\n\tif internedStr != nil {\n\t\treturn internedStr\n\t}\n\n\tclassLoader := frame.ClassLoader()\n\tstringClass := classLoader.StringClass()\n\tjCharArr := rtc.NewCharArray(chars, classLoader)\n\tjStr := stringClass.NewObj()\n\tjStr.SetFieldValue(\"value\", \"[C\", jCharArr)\n\treturn InternString(chars, jStr)\n}\n\nfunc GoString(jStr *rtc.Obj) string {\n\tutf16 := JStringChars(jStr)\n\treturn util.Utf16ToString(utf16)\n}\ncode refactorpackage rtda\n\nimport (\n\trtc \"jvmgo\/jvm\/rtda\/class\"\n\t\"jvmgo\/util\"\n)\n\n\/\/ todo: is there a better way to create String?\nfunc NewJString(goStr string, clg rtc.ClassLoaderGetter) *rtc.Obj {\n\tchars := util.StringToUtf16(goStr)\n\tinternedStr := getInternedString(chars)\n\tif internedStr != nil {\n\t\treturn internedStr\n\t}\n\n\tclassLoader := clg.ClassLoader()\n\tstringClass := classLoader.StringClass()\n\tjCharArr := rtc.NewCharArray(chars, classLoader)\n\tjStr := stringClass.NewObj()\n\tjStr.SetFieldValue(\"value\", \"[C\", jCharArr)\n\treturn InternString(chars, jStr)\n}\n\nfunc GoString(jStr *rtc.Obj) string {\n\tutf16 := JStringChars(jStr)\n\treturn util.Utf16ToString(utf16)\n}\n\nfunc JStringChars(jStr *rtc.Obj) []uint16 {\n\tcharArr := jStr.GetFieldValue(\"value\", \"[C\").(*rtc.Obj)\n\treturn charArr.Fields().([]uint16)\n}\n<|endoftext|>"} {"text":"\/*\n\nThe html transform package implements a html css selector and transformer.\n\nAn html doc can be inspected and queried using css selectors as well as\ntransformed.\n\n\tdoc := NewDoc(str)\n\tt := NewTransform(doc)\n\tt.Apply(CopyAnd(myModifiers...), \"li.menuitem\")\n\tt.Apply(Replace(Text(\"my new text\"), \"a\")\n\tnewDoc := t.Doc()\n*\/\npackage transform\n\n\/\/ TODO(jwall): Documentation...\nimport (\n\t. \"code.google.com\/p\/go-html-transform\/h5\"\n\t\"log\"\n)\n\n\/\/ The TransformFunc type is the type of a Node transformation function.\ntype TransformFunc func(*Node)\n\n\/\/ Transformer encapsulates a document under transformation.\ntype Transformer struct {\n\tdoc *Node\n}\n\n\/\/ Constructor for a Transformer. It makes a copy of the document\n\/\/ and transforms that instead of the original.\nfunc NewTransform(d *Node) *Transformer {\n\treturn &Transformer{doc: d.Clone()}\n}\n\n\/\/ The Doc method returns the document under transformation.\nfunc (t *Transformer) Doc() *Node {\n\treturn t.doc\n}\n\nfunc (t *Transformer) String() string {\n\treturn t.doc.String()\n}\n\nfunc (t *Transformer) Clone() *Transformer {\n\treturn NewTransform(t.Doc())\n}\n\n\/\/ TODO(jwall): TransformApplication type that we can process the doc in one\n\/\/ pass.\n\/\/ The Apply method applies a TransformFunc to the nodes returned from\n\/\/ the Selector query\nfunc (t *Transformer) Apply(f TransformFunc, sel ...string) *Transformer {\n\t\/\/ TODO come up with a way to walk tree once?\n\tsq := NewSelectorQuery(sel...)\n\tnodes := sq.Apply(t.doc)\n\tfor _, n := range nodes {\n\t\tf(n)\n\t}\n\treturn t\n}\n\ntype Transform struct {\n\tq []string\n\tf TransformFunc\n}\n\n\/\/ Spec creates a Transform that you can apply using ApplyAll.\nfunc Trans(f TransformFunc, sel1 string, sel ...string) Transform {\n\treturn Transform{f: f, q: append([]string{sel1}, sel...)}\n}\n\n\/\/ ApplyAll applies a series of Transforms to a document.\n\/\/ t.ApplyAll(Trans(f, sel1, sel2), Trans(f2, sel3, sel4))\nfunc (t *Transformer) ApplyAll(ts ...Transform) *Transformer {\n\tfor _, spec := range ts {\n\t\tt.Apply(spec.f, spec.q...)\n\t}\n\treturn t\n}\n\n\/\/ Compose a set of TransformFuncs into a single TransformFunc\nfunc Compose(fs ...TransformFunc) TransformFunc {\n\treturn func(n *Node) {\n\t\tfor _, f := range fs {\n\t\t\tf(n)\n\t\t}\n\t}\n}\n\n\/\/ AppendChildren creates a TransformFunc that appends the Children passed in.\nfunc AppendChildren(cs ...*Node) TransformFunc {\n\treturn func(n *Node) {\n\t\tsz := len(n.Children)\n\t\tnewChild := make([]*Node, sz+len(cs))\n\t\tcopy(newChild, n.Children)\n\t\tcopy(newChild[sz:], cs)\n\t\tn.Children = newChild\n\t}\n}\n\n\/\/ PrependChildren creates a TransformFunc that prepends the Children passed in.\nfunc PrependChildren(cs ...*Node) TransformFunc {\n\treturn func(n *Node) {\n\t\tsz := len(n.Children)\n\t\tsz2 := len(cs)\n\t\tnewChild := make([]*Node, sz+len(cs))\n\t\tcopy(newChild[sz2:], n.Children)\n\t\tcopy(newChild[0:sz2], cs)\n\t\tn.Children = newChild\n\t}\n}\n\n\/\/ RemoveChildren creates a TransformFunc that removes the Children of the node\n\/\/ it operates on.\nfunc RemoveChildren() TransformFunc {\n\treturn func(n *Node) {\n\t\tn.Children = make([]*Node, 0)\n\t}\n}\n\n\/\/ ReplaceChildren creates a TransformFunc that replaces the Children of the\n\/\/ node it operates on with the Children passed in.\nfunc ReplaceChildren(ns ...*Node) TransformFunc {\n\treturn func(n *Node) {\n\t\tn.Children = ns\n\t}\n}\n\nfunc Replace(ns ...*Node) TransformFunc {\n\treturn func(n *Node) {\n\t\tp := n.Parent\n\t\tswitch p {\n\t\tcase nil:\n\t\t\tn.Children = ns\n\t\tdefault:\n\t\t\tnewChildren := []*Node{}\n\t\t\tfor _, c := range p.Children {\n\t\t\t\tif c.String() != n.String() {\n\t\t\t\t\tnewChildren = append(newChildren, c)\n\t\t\t\t} else {\n\t\t\t\t\tnewChildren = append(newChildren, ns...)\n\t\t\t\t}\n\t\t\t}\n\t\t\tReplaceChildren(newChildren...)(p)\n\t\t}\n\t}\n}\n\n\/\/ ModifyAttrb creates a TransformFunc that modifies the attributes\n\/\/ of the node it operates on.\nfunc ModifyAttrib(key string, val string) TransformFunc {\n\treturn func(n *Node) {\n\t\tfound := false\n\t\tfor i, attr := range n.Attr {\n\t\t\tif attr.Name == key {\n\t\t\t\tn.Attr[i].Value = val\n\t\t\t\tfound = true\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tnewAttr := make([]*Attribute, len(n.Attr)+1)\n\t\t\tnewAttr[len(n.Attr)] = &Attribute{Name: key, Value: val}\n\t\t\tn.Attr = newAttr\n\t\t}\n\t}\n}\n\nfunc TransformAttrib(key string, f func(string) string) TransformFunc {\n\treturn func(n *Node) {\n\t\tfor i, attr := range n.Attr {\n\t\t\tif attr.Name == key {\n\t\t\t\tn.Attr[i].Value = f(n.Attr[i].Value)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc DoAll(fs ...TransformFunc) TransformFunc {\n\treturn func(n *Node) {\n\t\tfor _, f := range fs {\n\t\t\tf(n)\n\t\t}\n\t}\n}\n\n\/\/ ForEach takes a function and a list of Nodes and performs that\n\/\/ function for each node in the list.\n\/\/ The function should be of a type either func(...*Node) TransformFunc\n\/\/ or func(*Node) TransformFunc. Any other type will panic.\n\/\/ Returns a TransformFunc.\nfunc ForEach(f interface{}, ns ...*Node) TransformFunc {\n\tswitch t := f.(type) {\n\tcase func(...*Node) TransformFunc:\n\t\treturn func(n *Node) {\n\t\t\tfor _, n2 := range ns {\n\t\t\t\tf1 := f.(func(...*Node) TransformFunc)\n\t\t\t\tf2 := f1(n2)\n\t\t\t\tf2(n)\n\t\t\t}\n\t\t}\n\tcase func(*Node) TransformFunc:\n\t\treturn func(n *Node) {\n\t\t\tfor _, n2 := range ns {\n\t\t\t\tf1 := f.(func(*Node) TransformFunc)\n\t\t\t\tf2 := f1(n2)\n\t\t\t\tf2(n)\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tlog.Panicf(\"Wrong function type passed to ForEach %s\", t)\n\t}\n\treturn nil\n}\n\n\/\/ CopyAnd will construct a TransformFunc that will\n\/\/ make a copy of the node for each passed in TransformFunc\n\/\/ And replace the passed in node with the resulting transformed\n\/\/ Nodes.\n\/\/ Returns a TransformFunc\nfunc CopyAnd(fns ...TransformFunc) TransformFunc {\n\treturn func(n *Node) {\n\t\tnewNodes := make([]*Node, len(fns))\n\t\tfor i, fn := range fns {\n\t\t\tnode := n.Clone()\n\t\t\tfn(node)\n\t\t\tnewNodes[i] = node\n\t\t}\n\t\treplaceFn := Replace(newNodes...)\n\t\treplaceFn(n)\n\t}\n}\n\nfunc SubTransform(f TransformFunc, sel1 string, sels ...string) TransformFunc {\n\treturn func(n *Node) {\n\t\t\/\/ TODO This is perhaps not the most efficient way to do this.\n\t\ttf := NewTransform(n)\n\t\tq := append([]string{sel1}, sels...)\n\t\ttf.Apply(f, q...)\n\t\tReplace(tf.Doc())(n)\n\t}\n}\n\n\/\/ Copyright 2010 Jeremy Wall (jeremy@marzhillstudios.com)\n\/\/ Use of this source code is governed by the Artistic License 2.0.\n\/\/ That License is included in the LICENSE file.\nSome documenation fixes.\/\/ Copyright 2010 Jeremy Wall (jeremy@marzhillstudios.com)\n\/\/ Use of this source code is governed by the Artistic License 2.0.\n\/\/ That License is included in the LICENSE file.\n\/*\n\nThe html transform package implements a html css selector and transformer.\n\nAn html doc can be inspected and queried using css selectors as well as\ntransformed.\n\n\tdoc := NewDoc(str)\n\tt := NewTransform(doc)\n\tt.Apply(CopyAnd(myModifiers...), \"li.menuitem\")\n\tt.Apply(Replace(Text(\"my new text\"), \"a\")\n\tnewDoc := t.Doc()\n*\/\npackage transform\n\n\/\/ TODO(jwall): Documentation...\nimport (\n\t. \"code.google.com\/p\/go-html-transform\/h5\"\n\t\"log\"\n)\n\n\/\/ The TransformFunc type is the type of a Node transformation function.\ntype TransformFunc func(*Node)\n\n\/\/ Transformer encapsulates a document under transformation.\ntype Transformer struct {\n\tdoc *Node\n}\n\n\/\/ Constructor for a Transformer. It makes a copy of the document\n\/\/ and transforms that instead of the original.\nfunc NewTransform(d *Node) *Transformer {\n\treturn &Transformer{doc: d.Clone()}\n}\n\n\/\/ The Doc method returns the document under transformation.\nfunc (t *Transformer) Doc() *Node {\n\treturn t.doc\n}\n\nfunc (t *Transformer) String() string {\n\treturn t.doc.String()\n}\n\nfunc (t *Transformer) Clone() *Transformer {\n\treturn NewTransform(t.Doc())\n}\n\n\/\/ TODO(jwall): TransformApplication type that we can process the doc in one\n\/\/ pass.\n\/\/ The Apply method applies a TransformFunc to the nodes returned from\n\/\/ the Selector query\nfunc (t *Transformer) Apply(f TransformFunc, sel ...string) *Transformer {\n\t\/\/ TODO come up with a way to walk tree once?\n\tsq := NewSelectorQuery(sel...)\n\tnodes := sq.Apply(t.doc)\n\tfor _, n := range nodes {\n\t\tf(n)\n\t}\n\treturn t\n}\n\ntype Transform struct {\n\tq []string\n\tf TransformFunc\n}\n\n\/\/ Spec creates a Transform that you can apply using ApplyAll.\nfunc Trans(f TransformFunc, sel1 string, sel ...string) Transform {\n\treturn Transform{f: f, q: append([]string{sel1}, sel...)}\n}\n\n\/\/ ApplyAll applies a series of Transforms to a document.\n\/\/ t.ApplyAll(Trans(f, sel1, sel2), Trans(f2, sel3, sel4))\nfunc (t *Transformer) ApplyAll(ts ...Transform) *Transformer {\n\tfor _, spec := range ts {\n\t\tt.Apply(spec.f, spec.q...)\n\t}\n\treturn t\n}\n\n\/\/ Compose a set of TransformFuncs into a single TransformFunc\nfunc Compose(fs ...TransformFunc) TransformFunc {\n\treturn func(n *Node) {\n\t\tfor _, f := range fs {\n\t\t\tf(n)\n\t\t}\n\t}\n}\n\n\/\/ AppendChildren creates a TransformFunc that appends the Children passed in.\nfunc AppendChildren(cs ...*Node) TransformFunc {\n\treturn func(n *Node) {\n\t\tsz := len(n.Children)\n\t\tnewChild := make([]*Node, sz+len(cs))\n\t\tcopy(newChild, n.Children)\n\t\tcopy(newChild[sz:], cs)\n\t\tn.Children = newChild\n\t}\n}\n\n\/\/ PrependChildren creates a TransformFunc that prepends the Children passed in.\nfunc PrependChildren(cs ...*Node) TransformFunc {\n\treturn func(n *Node) {\n\t\tsz := len(n.Children)\n\t\tsz2 := len(cs)\n\t\tnewChild := make([]*Node, sz+len(cs))\n\t\tcopy(newChild[sz2:], n.Children)\n\t\tcopy(newChild[0:sz2], cs)\n\t\tn.Children = newChild\n\t}\n}\n\n\/\/ RemoveChildren creates a TransformFunc that removes the Children of the node\n\/\/ it operates on.\nfunc RemoveChildren() TransformFunc {\n\treturn func(n *Node) {\n\t\tn.Children = make([]*Node, 0)\n\t}\n}\n\n\/\/ ReplaceChildren creates a TransformFunc that replaces the Children of the\n\/\/ node it operates on with the Children passed in.\nfunc ReplaceChildren(ns ...*Node) TransformFunc {\n\treturn func(n *Node) {\n\t\tn.Children = ns\n\t}\n}\n\nfunc Replace(ns ...*Node) TransformFunc {\n\treturn func(n *Node) {\n\t\tp := n.Parent\n\t\tswitch p {\n\t\tcase nil:\n\t\t\tn.Children = ns\n\t\tdefault:\n\t\t\tnewChildren := []*Node{}\n\t\t\tfor _, c := range p.Children {\n\t\t\t\tif c.String() != n.String() {\n\t\t\t\t\tnewChildren = append(newChildren, c)\n\t\t\t\t} else {\n\t\t\t\t\tnewChildren = append(newChildren, ns...)\n\t\t\t\t}\n\t\t\t}\n\t\t\tReplaceChildren(newChildren...)(p)\n\t\t}\n\t}\n}\n\n\/\/ ModifyAttrb creates a TransformFunc that modifies the attributes\n\/\/ of the node it operates on.\nfunc ModifyAttrib(key string, val string) TransformFunc {\n\treturn func(n *Node) {\n\t\tfound := false\n\t\tfor i, attr := range n.Attr {\n\t\t\tif attr.Name == key {\n\t\t\t\tn.Attr[i].Value = val\n\t\t\t\tfound = true\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tnewAttr := make([]*Attribute, len(n.Attr)+1)\n\t\t\tnewAttr[len(n.Attr)] = &Attribute{Name: key, Value: val}\n\t\t\tn.Attr = newAttr\n\t\t}\n\t}\n}\n\nfunc TransformAttrib(key string, f func(string) string) TransformFunc {\n\treturn func(n *Node) {\n\t\tfor i, attr := range n.Attr {\n\t\t\tif attr.Name == key {\n\t\t\t\tn.Attr[i].Value = f(n.Attr[i].Value)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc DoAll(fs ...TransformFunc) TransformFunc {\n\treturn func(n *Node) {\n\t\tfor _, f := range fs {\n\t\t\tf(n)\n\t\t}\n\t}\n}\n\n\/\/ ForEach takes a function and a list of Nodes and performs that\n\/\/ function for each node in the list.\n\/\/ The function should be of a type either func(...*Node) TransformFunc\n\/\/ or func(*Node) TransformFunc. Any other type will panic.\n\/\/ Returns a TransformFunc.\nfunc ForEach(f interface{}, ns ...*Node) TransformFunc {\n\tswitch t := f.(type) {\n\tcase func(...*Node) TransformFunc:\n\t\treturn func(n *Node) {\n\t\t\tfor _, n2 := range ns {\n\t\t\t\tf1 := f.(func(...*Node) TransformFunc)\n\t\t\t\tf2 := f1(n2)\n\t\t\t\tf2(n)\n\t\t\t}\n\t\t}\n\tcase func(*Node) TransformFunc:\n\t\treturn func(n *Node) {\n\t\t\tfor _, n2 := range ns {\n\t\t\t\tf1 := f.(func(*Node) TransformFunc)\n\t\t\t\tf2 := f1(n2)\n\t\t\t\tf2(n)\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tlog.Panicf(\"Wrong function type passed to ForEach %s\", t)\n\t}\n\treturn nil\n}\n\n\/\/ CopyAnd will construct a TransformFunc that will\n\/\/ make a copy of the node for each passed in TransformFunc\n\/\/ And replace the passed in node with the resulting transformed\n\/\/ Nodes.\n\/\/ Returns a TransformFunc\nfunc CopyAnd(fns ...TransformFunc) TransformFunc {\n\treturn func(n *Node) {\n\t\tnewNodes := make([]*Node, len(fns))\n\t\tfor i, fn := range fns {\n\t\t\tnode := n.Clone()\n\t\t\tfn(node)\n\t\t\tnewNodes[i] = node\n\t\t}\n\t\treplaceFn := Replace(newNodes...)\n\t\treplaceFn(n)\n\t}\n}\n\nfunc SubTransform(f TransformFunc, sel1 string, sels ...string) TransformFunc {\n\treturn func(n *Node) {\n\t\t\/\/ TODO This is perhaps not the most efficient way to do this.\n\t\ttf := NewTransform(n)\n\t\tq := append([]string{sel1}, sels...)\n\t\ttf.Apply(f, q...)\n\t\tReplace(tf.Doc())(n)\n\t}\n}\n<|endoftext|>"} {"text":"package static_file\n\nimport (\n\t\"github.com\/ngmoco\/falcore\"\n\t\"mime\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\n\/\/ A falcore RequestFilter for serving static files\n\/\/ from the filesystem.\ntype Filter struct {\n\t\/\/ File system base path for serving files\n\tBasePath string\n\t\/\/ Prefix in URL path\n\tPathPrefix string\n}\n\nfunc (f *Filter) FilterRequest(req *falcore.Request) (res *http.Response) {\n\t\/\/ Clean asset path\n\tasset_path := filepath.Clean(filepath.FromSlash(req.HttpRequest.URL.Path))\n\n\t\/\/ Resolve PathPrefix\n\tif strings.HasPrefix(asset_path, f.PathPrefix) {\n\t\tasset_path = asset_path[len(f.PathPrefix):]\n\t} else {\n\t\tfalcore.Debug(\"%v doesn't match prefix %v\", asset_path, f.PathPrefix)\n\t\tres = falcore.SimpleResponse(req.HttpRequest, 404, nil, \"Not found.\")\n\t\treturn\n\t}\n\n\t\/\/ Resolve FSBase\n\tif f.BasePath != \"\" {\n\t\tasset_path = filepath.Join(f.BasePath, asset_path)\n\t} else {\n\t\tfalcore.Error(\"file_filter requires a BasePath\")\n\t\treturn falcore.SimpleResponse(req.HttpRequest, 500, nil, \"Server Error\\n\")\n\t}\n\n\tvar fileSize int64\n\tif stat, err := os.Stat(asset_path); err == nil {\n\t\tfileSize = stat.Size()\n\t} else {\n\t\tfalcore.Debug(\"Can't stat %v: %v\", asset_path, err)\n\t\treturn falcore.SimpleResponse(req.HttpRequest, 404, nil, \"File not found\\n\")\n\t}\n\n\t\/\/ Open File\n\tif file, err := os.Open(asset_path); err == nil {\n\t\t\/\/ Make sure it's an actual file\n\t\tif stat, err := file.Stat(); err == nil && stat.Mode() & os.ModeType == 0 {\n\t\t\tres = &http.Response{\n\t\t\t\tRequest: req.HttpRequest,\n\t\t\t\tStatusCode: 200,\n\t\t\t\tProto: \"HTTP\/1.1\",\n\t\t\t\tBody: file,\n\t\t\t\tHeader: make(http.Header),\n\t\t\t\tContentLength: fileSize,\n\t\t\t}\n\t\t\tif ct := mime.TypeByExtension(filepath.Ext(asset_path)); ct != \"\" {\n\t\t\t\tres.Header.Set(\"Content-Type\", ct)\n\t\t\t}\n\t\t} else {\n\t\t\tfile.Close()\n\t\t\treturn falcore.SimpleResponse(req.HttpRequest, 404, nil, \"File not found\\n\")\n\t\t}\n\t} else {\n\t\tfalcore.Debug(\"Can't open %v: %v\", asset_path, err)\n\t\tres = falcore.SimpleResponse(req.HttpRequest, 404, nil, \"File not found\\n\")\n\t}\n\n\treturn\n}\nfile_filter.go add http major\/minor versions to struct, prevents FF & IE from barfing on response with HTTP\/0.0package static_file\n\nimport (\n\t\"github.com\/ngmoco\/falcore\"\n\t\"mime\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\n\/\/ A falcore RequestFilter for serving static files\n\/\/ from the filesystem.\ntype Filter struct {\n\t\/\/ File system base path for serving files\n\tBasePath string\n\t\/\/ Prefix in URL path\n\tPathPrefix string\n}\n\nfunc (f *Filter) FilterRequest(req *falcore.Request) (res *http.Response) {\n\t\/\/ Clean asset path\n\tasset_path := filepath.Clean(filepath.FromSlash(req.HttpRequest.URL.Path))\n\n\t\/\/ Resolve PathPrefix\n\tif strings.HasPrefix(asset_path, f.PathPrefix) {\n\t\tasset_path = asset_path[len(f.PathPrefix):]\n\t} else {\n\t\tfalcore.Debug(\"%v doesn't match prefix %v\", asset_path, f.PathPrefix)\n\t\tres = falcore.SimpleResponse(req.HttpRequest, 404, nil, \"Not found.\")\n\t\treturn\n\t}\n\n\t\/\/ Resolve FSBase\n\tif f.BasePath != \"\" {\n\t\tasset_path = filepath.Join(f.BasePath, asset_path)\n\t} else {\n\t\tfalcore.Error(\"file_filter requires a BasePath\")\n\t\treturn falcore.SimpleResponse(req.HttpRequest, 500, nil, \"Server Error\\n\")\n\t}\n\n\tvar fileSize int64\n\tif stat, err := os.Stat(asset_path); err == nil {\n\t\tfileSize = stat.Size()\n\t} else {\n\t\tfalcore.Debug(\"Can't stat %v: %v\", asset_path, err)\n\t\treturn falcore.SimpleResponse(req.HttpRequest, 404, nil, \"File not found\\n\")\n\t}\n\n\t\/\/ Open File\n\tif file, err := os.Open(asset_path); err == nil {\n\t\t\/\/ Make sure it's an actual file\n\t\tif stat, err := file.Stat(); err == nil && stat.Mode() & os.ModeType == 0 {\n\t\t\tres = &http.Response{\n\t\t\t\tRequest: req.HttpRequest,\n\t\t\t\tStatusCode: 200,\n\t\t\t\tProto: \"HTTP\/1.1\",\n\t\t\t\tProtoMajor: 1,\n\t\t\t\tProtoMinor: 1,\n\t\t\t\tBody: file,\n\t\t\t\tHeader: make(http.Header),\n\t\t\t\tContentLength: fileSize,\n\t\t\t}\n\t\t\tif ct := mime.TypeByExtension(filepath.Ext(asset_path)); ct != \"\" {\n\t\t\t\tres.Header.Set(\"Content-Type\", ct)\n\t\t\t}\n\t\t} else {\n\t\t\tfile.Close()\n\t\t\treturn falcore.SimpleResponse(req.HttpRequest, 404, nil, \"File not found\\n\")\n\t\t}\n\t} else {\n\t\tfalcore.Debug(\"Can't open %v: %v\", asset_path, err)\n\t\tres = falcore.SimpleResponse(req.HttpRequest, 404, nil, \"File not found\\n\")\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"package version\n\n\/\/ Version of Functions\nvar Version = \"0.3.168\"\nfunctions: 0.3.169 release [skip ci]package version\n\n\/\/ Version of Functions\nvar Version = \"0.3.169\"\n<|endoftext|>"} {"text":"package version\n\n\/\/ Version of Functions\nvar Version = \"0.3.367\"\nfnserver: 0.3.368 release [skip ci]package version\n\n\/\/ Version of Functions\nvar Version = \"0.3.368\"\n<|endoftext|>"} {"text":"package version\n\n\/\/ Version of Functions\nvar Version = \"0.3.725\"\nfnserver: v0.3.726 release [skip ci]package version\n\n\/\/ Version of Functions\nvar Version = \"0.3.726\"\n<|endoftext|>"} {"text":"package version\n\n\/\/ Version of Functions\nvar Version = \"0.3.568\"\nfnserver: 0.3.569 release [skip ci]package version\n\n\/\/ Version of Functions\nvar Version = \"0.3.569\"\n<|endoftext|>"} {"text":"package version\n\n\/\/ Version of Functions\nvar Version = \"0.3.501\"\nfnserver: 0.3.502 release [skip ci]package version\n\n\/\/ Version of Functions\nvar Version = \"0.3.502\"\n<|endoftext|>"} {"text":"package version\n\n\/\/ Version of Functions\nvar Version = \"0.3.161\"\nfunctions: 0.3.162 release [skip ci]package version\n\n\/\/ Version of Functions\nvar Version = \"0.3.162\"\n<|endoftext|>"} {"text":"package apihelper\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"strconv\"\n\n\t\"github.com\/cloudfoundry\/cli\/plugin\"\n\t\"github.com\/krujos\/cfcurl\"\n)\n\nvar (\n\tErrOrgNotFound = errors.New(\"organization not found\")\n)\n\n\/\/Organization representation\ntype Organization struct {\n\tURL string\n\tName string\n\tQuotaURL string\n\tSpacesURL string\n}\n\n\/\/Space representation\ntype Space struct {\n\tName string\n\tAppsURL string\n}\n\n\/\/App representation\ntype App struct {\n\tInstances float64\n\tRAM float64\n\tRunning bool\n}\n\n\/\/CFAPIHelper to wrap cf curl results\ntype CFAPIHelper interface {\n\tGetOrgs() ([]Organization, error)\n\tGetOrg(string) (Organization, error)\n\tGetQuotaMemoryLimit(string) (float64, error)\n\tGetOrgMemoryUsage(Organization) (float64, error)\n\tGetOrgSpaces(string) ([]Space, error)\n\tGetSpaceApps(string) ([]App, error)\n}\n\n\/\/APIHelper implementation\ntype APIHelper struct {\n\tcli plugin.CliConnection\n}\n\nfunc New(cli plugin.CliConnection) CFAPIHelper {\n\treturn &APIHelper{cli}\n}\n\n\/\/GetOrgs returns a struct that represents critical fields in the JSON\nfunc (api *APIHelper) GetOrgs() ([]Organization, error) {\n\torgsJSON, err := cfcurl.Curl(api.cli, \"\/v2\/organizations\")\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\tpages := int(orgsJSON[\"total_pages\"].(float64))\n\torgs := []Organization{}\n\tfor i := 1; i <= pages; i++ {\n\t\tif 1 != i {\n\t\t\torgsJSON, err = cfcurl.Curl(api.cli, \"\/v2\/organizations?page=\"+strconv.Itoa(i))\n\t\t}\n\t\tfor _, o := range orgsJSON[\"resources\"].([]interface{}) {\n\t\t\ttheOrg := o.(map[string]interface{})\n\t\t\tentity := theOrg[\"entity\"].(map[string]interface{})\n\t\t\tmetadata := theOrg[\"metadata\"].(map[string]interface{})\n\t\t\torgs = append(orgs,\n\t\t\t\tOrganization{\n\t\t\t\t\tName: entity[\"name\"].(string),\n\t\t\t\t\tURL: metadata[\"url\"].(string),\n\t\t\t\t\tQuotaURL: entity[\"quota_definition_url\"].(string),\n\t\t\t\t\tSpacesURL: entity[\"spaces_url\"].(string),\n\t\t\t\t})\n\t\t}\n\t}\n\treturn orgs, nil\n}\n\n\/\/GetOrg returns a struct that represents critical fields in the JSON\nfunc (api *APIHelper) GetOrg(name string) (Organization, error) {\n\tquery := fmt.Sprintf(\"name:%s\", name)\n\tpath := fmt.Sprintf(\"\/v2\/organizations?q=%s&inline-relations-depth=1\", url.QueryEscape(query))\n\torgsJSON, err := cfcurl.Curl(api.cli, path)\n\tif nil != err {\n\t\treturn Organization{}, err\n\t}\n\n\tresults := int(orgsJSON[\"total_results\"].(float64))\n\tif results == 0 {\n\t\treturn Organization{}, ErrOrgNotFound\n\t}\n\n\torgResource := orgsJSON[\"resources\"].([]interface{})[0]\n\torg := api.orgResourceToOrg(orgResource)\n\n\treturn org, nil\n}\n\nfunc (api *APIHelper) orgResourceToOrg(o interface{}) Organization {\n\ttheOrg := o.(map[string]interface{})\n\tentity := theOrg[\"entity\"].(map[string]interface{})\n\tmetadata := theOrg[\"metadata\"].(map[string]interface{})\n\treturn Organization{\n\t\tName: entity[\"name\"].(string),\n\t\tURL: metadata[\"url\"].(string),\n\t\tQuotaURL: entity[\"quota_definition_url\"].(string),\n\t\tSpacesURL: entity[\"spaces_url\"].(string),\n\t}\n}\n\n\/\/GetQuotaMemoryLimit retruns the amount of memory (in MB) that the org is allowed\nfunc (api *APIHelper) GetQuotaMemoryLimit(quotaURL string) (float64, error) {\n\tquotaJSON, err := cfcurl.Curl(api.cli, quotaURL)\n\tif nil != err {\n\t\treturn 0, err\n\t}\n\treturn quotaJSON[\"entity\"].(map[string]interface{})[\"memory_limit\"].(float64), nil\n}\n\n\/\/GetOrgMemoryUsage returns the amount of memory (in MB) that the org is consuming\nfunc (api *APIHelper) GetOrgMemoryUsage(org Organization) (float64, error) {\n\tusageJSON, err := cfcurl.Curl(api.cli, org.URL+\"\/memory_usage\")\n\tif nil != err {\n\t\treturn 0, err\n\t}\n\treturn usageJSON[\"memory_usage_in_mb\"].(float64), nil\n}\n\n\/\/GetOrgSpaces returns the spaces in an org.\nfunc (api *APIHelper) GetOrgSpaces(spacesURL string) ([]Space, error) {\n\tspacesJSON, err := cfcurl.Curl(api.cli, spacesURL)\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\tspaces := []Space{}\n\tfor _, s := range spacesJSON[\"resources\"].([]interface{}) {\n\t\ttheSpace := s.(map[string]interface{})\n\t\tentity := theSpace[\"entity\"].(map[string]interface{})\n\t\tspaces = append(spaces,\n\t\t\tSpace{\n\t\t\t\tAppsURL: entity[\"apps_url\"].(string),\n\t\t\t\tName: entity[\"name\"].(string),\n\t\t\t})\n\t}\n\treturn spaces, nil\n}\n\n\/\/GetSpaceApps returns the apps in a space\nfunc (api *APIHelper) GetSpaceApps(appsURL string) ([]App, error) {\n\tappsJSON, err := cfcurl.Curl(api.cli, appsURL)\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\tapps := []App{}\n\tfor _, a := range appsJSON[\"resources\"].([]interface{}) {\n\t\ttheApp := a.(map[string]interface{})\n\t\tentity := theApp[\"entity\"].(map[string]interface{})\n\t\tapps = append(apps,\n\t\t\tApp{\n\t\t\t\tInstances: entity[\"instances\"].(float64),\n\t\t\t\tRAM: entity[\"memory\"].(float64),\n\t\t\t\tRunning: \"STARTED\" == entity[\"state\"].(string),\n\t\t\t})\n\t}\n\treturn apps, nil\n}\nGetOrgSpaces handle multiple pagespackage apihelper\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"strconv\"\n\n\t\"github.com\/cloudfoundry\/cli\/plugin\"\n\t\"github.com\/krujos\/cfcurl\"\n)\n\nvar (\n\tErrOrgNotFound = errors.New(\"organization not found\")\n)\n\n\/\/Organization representation\ntype Organization struct {\n\tURL string\n\tName string\n\tQuotaURL string\n\tSpacesURL string\n}\n\n\/\/Space representation\ntype Space struct {\n\tName string\n\tAppsURL string\n}\n\n\/\/App representation\ntype App struct {\n\tInstances float64\n\tRAM float64\n\tRunning bool\n}\n\n\/\/CFAPIHelper to wrap cf curl results\ntype CFAPIHelper interface {\n\tGetOrgs() ([]Organization, error)\n\tGetOrg(string) (Organization, error)\n\tGetQuotaMemoryLimit(string) (float64, error)\n\tGetOrgMemoryUsage(Organization) (float64, error)\n\tGetOrgSpaces(string) ([]Space, error)\n\tGetSpaceApps(string) ([]App, error)\n}\n\n\/\/APIHelper implementation\ntype APIHelper struct {\n\tcli plugin.CliConnection\n}\n\nfunc New(cli plugin.CliConnection) CFAPIHelper {\n\treturn &APIHelper{cli}\n}\n\n\/\/GetOrgs returns a struct that represents critical fields in the JSON\nfunc (api *APIHelper) GetOrgs() ([]Organization, error) {\n\torgsJSON, err := cfcurl.Curl(api.cli, \"\/v2\/organizations\")\n\tif nil != err {\n\t\treturn nil, err\n\t}\n\tpages := int(orgsJSON[\"total_pages\"].(float64))\n\torgs := []Organization{}\n\tfor i := 1; i <= pages; i++ {\n\t\tif 1 != i {\n\t\t\torgsJSON, err = cfcurl.Curl(api.cli, \"\/v2\/organizations?page=\"+strconv.Itoa(i))\n\t\t}\n\t\tfor _, o := range orgsJSON[\"resources\"].([]interface{}) {\n\t\t\ttheOrg := o.(map[string]interface{})\n\t\t\tentity := theOrg[\"entity\"].(map[string]interface{})\n\t\t\tmetadata := theOrg[\"metadata\"].(map[string]interface{})\n\t\t\torgs = append(orgs,\n\t\t\t\tOrganization{\n\t\t\t\t\tName: entity[\"name\"].(string),\n\t\t\t\t\tURL: metadata[\"url\"].(string),\n\t\t\t\t\tQuotaURL: entity[\"quota_definition_url\"].(string),\n\t\t\t\t\tSpacesURL: entity[\"spaces_url\"].(string),\n\t\t\t\t})\n\t\t}\n\t}\n\treturn orgs, nil\n}\n\n\/\/GetOrg returns a struct that represents critical fields in the JSON\nfunc (api *APIHelper) GetOrg(name string) (Organization, error) {\n\tquery := fmt.Sprintf(\"name:%s\", name)\n\tpath := fmt.Sprintf(\"\/v2\/organizations?q=%s&inline-relations-depth=1\", url.QueryEscape(query))\n\torgsJSON, err := cfcurl.Curl(api.cli, path)\n\tif nil != err {\n\t\treturn Organization{}, err\n\t}\n\n\tresults := int(orgsJSON[\"total_results\"].(float64))\n\tif results == 0 {\n\t\treturn Organization{}, ErrOrgNotFound\n\t}\n\n\torgResource := orgsJSON[\"resources\"].([]interface{})[0]\n\torg := api.orgResourceToOrg(orgResource)\n\n\treturn org, nil\n}\n\nfunc (api *APIHelper) orgResourceToOrg(o interface{}) Organization {\n\ttheOrg := o.(map[string]interface{})\n\tentity := theOrg[\"entity\"].(map[string]interface{})\n\tmetadata := theOrg[\"metadata\"].(map[string]interface{})\n\treturn Organization{\n\t\tName: entity[\"name\"].(string),\n\t\tURL: metadata[\"url\"].(string),\n\t\tQuotaURL: entity[\"quota_definition_url\"].(string),\n\t\tSpacesURL: entity[\"spaces_url\"].(string),\n\t}\n}\n\n\/\/GetQuotaMemoryLimit retruns the amount of memory (in MB) that the org is allowed\nfunc (api *APIHelper) GetQuotaMemoryLimit(quotaURL string) (float64, error) {\n\tquotaJSON, err := cfcurl.Curl(api.cli, quotaURL)\n\tif nil != err {\n\t\treturn 0, err\n\t}\n\treturn quotaJSON[\"entity\"].(map[string]interface{})[\"memory_limit\"].(float64), nil\n}\n\n\/\/GetOrgMemoryUsage returns the amount of memory (in MB) that the org is consuming\nfunc (api *APIHelper) GetOrgMemoryUsage(org Organization) (float64, error) {\n\tusageJSON, err := cfcurl.Curl(api.cli, org.URL+\"\/memory_usage\")\n\tif nil != err {\n\t\treturn 0, err\n\t}\n\treturn usageJSON[\"memory_usage_in_mb\"].(float64), nil\n}\n\n\/\/GetOrgSpaces returns the spaces in an org.\nfunc (api *APIHelper) GetOrgSpaces(spacesURL string) ([]Space, error) {\n\tnextURL := spacesURL\n\tspaces := []Space{}\n\tfor nextURL != \"\" {\n\t\tspacesJSON, err := cfcurl.Curl(api.cli, nextURL)\n\t\tif nil != err {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, s := range spacesJSON[\"resources\"].([]interface{}) {\n\t\t\ttheSpace := s.(map[string]interface{})\n\t\t\tentity := theSpace[\"entity\"].(map[string]interface{})\n\t\t\tspaces = append(spaces,\n\t\t\t\tSpace{\n\t\t\t\t\tAppsURL: entity[\"apps_url\"].(string),\n\t\t\t\t\tName: entity[\"name\"].(string),\n\t\t\t\t})\n\t\t}\n\t\tif next, ok := spacesJSON[\"next_url\"].(string); ok {\n\t\t\tnextURL = next\n\t\t} else {\n\t\t\tnextURL = \"\"\n\t\t}\n\t}\n\treturn spaces, nil\n}\n\n\/\/GetSpaceApps returns the apps in a space\nfunc (api *APIHelper) GetSpaceApps(appsURL string) ([]App, error) {\n\tnextURL := appsURL\n\tapps := []App{}\n\tfor nextURL != \"\" {\n\t\tappsJSON, err := cfcurl.Curl(api.cli, nextURL)\n\t\tif nil != err {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, a := range appsJSON[\"resources\"].([]interface{}) {\n\t\t\ttheApp := a.(map[string]interface{})\n\t\t\tentity := theApp[\"entity\"].(map[string]interface{})\n\t\t\tapps = append(apps,\n\t\t\t\tApp{\n\t\t\t\t\tInstances: entity[\"instances\"].(float64),\n\t\t\t\t\tRAM:\t\t\t entity[\"memory\"].(float64),\n\t\t\t\t\tRunning:\t \"STARTED\" == entity[\"state\"].(string),\n\t\t\t\t})\n\t\t}\n\t\tif next, ok := appsJSON[\"next_url\"].(string); ok {\n\t\t\tnextURL = next\n\t\t} else {\n\t\t\tnextURL = \"\"\n\t\t}\n\t}\n\treturn apps, nil\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage kv\n\nimport (\n\t\"github.com\/jacobsa\/comeback\/state\"\n)\n\n\/\/ Return a key\/value store that reads keys from the supplied string set to\n\/\/ serve Contains requests, and updates the sets upon successful calls to the\n\/\/ wrapped store's Set method.\nfunc NewExistingKeysStore(\n\texistingKeys state.StringSet,\n\twrapped Store,\n) (store Store) {\n\treturn &existingKeysStore{existingKeys, wrapped}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Implementation\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype existingKeysStore struct {\n\tkeys state.StringSet\n\twrapped Store\n}\n\nfunc (s *existingKeysStore) Set(key []byte, val []byte) (err error) {\n\terr = s.wrapped.Set(key, val)\n\tif err != nil {\n\t\ts.keys.Add(string(key))\n\t}\n\n\treturn\n}\n\nfunc (s *existingKeysStore) Get(key []byte) (val []byte, err error) {\n\treturn s.wrapped.Get(key)\n}\n\nfunc (s *existingKeysStore) Contains(key []byte) (res bool, err error) {\n\tres = s.keys.Contains(string(key))\n\treturn\n}\nFixed a bug.\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage kv\n\nimport (\n\t\"github.com\/jacobsa\/comeback\/state\"\n)\n\n\/\/ Return a key\/value store that reads keys from the supplied string set to\n\/\/ serve Contains requests, and updates the sets upon successful calls to the\n\/\/ wrapped store's Set method.\nfunc NewExistingKeysStore(\n\texistingKeys state.StringSet,\n\twrapped Store,\n) (store Store) {\n\treturn &existingKeysStore{existingKeys, wrapped}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Implementation\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype existingKeysStore struct {\n\tkeys state.StringSet\n\twrapped Store\n}\n\nfunc (s *existingKeysStore) Set(key []byte, val []byte) (err error) {\n\terr = s.wrapped.Set(key, val)\n\n\t\/\/ Store the key if the Set call was successful.\n\tif err == nil {\n\t\ts.keys.Add(string(key))\n\t}\n\n\treturn\n}\n\nfunc (s *existingKeysStore) Get(key []byte) (val []byte, err error) {\n\treturn s.wrapped.Get(key)\n}\n\nfunc (s *existingKeysStore) Contains(key []byte) (res bool, err error) {\n\tres = s.keys.Contains(string(key))\n\treturn\n}\n<|endoftext|>"} {"text":"package clusterquotamapping\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\n\tkapierrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\tutilruntime \"k8s.io\/apimachinery\/pkg\/util\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\t\"k8s.io\/client-go\/util\/workqueue\"\n\tkapi \"k8s.io\/kubernetes\/pkg\/api\"\n\tkcoreinformers \"k8s.io\/kubernetes\/pkg\/client\/informers\/informers_generated\/internalversion\/core\/internalversion\"\n\tkcorelisters \"k8s.io\/kubernetes\/pkg\/client\/listers\/core\/internalversion\"\n\t\"k8s.io\/kubernetes\/pkg\/controller\"\n\n\tocache \"github.com\/openshift\/origin\/pkg\/client\/cache\"\n\t\"github.com\/openshift\/origin\/pkg\/controller\/shared\"\n\tquotaapi \"github.com\/openshift\/origin\/pkg\/quota\/api\"\n)\n\n\/\/ Look out, here there be dragons!\n\/\/ There is a race when dealing with the DeltaFifo compression used to back a reflector for a controller that uses two\n\/\/ SharedInformers for both their watch events AND their caches. The scenario looks like this\n\/\/\n\/\/ 1. Add, Delete a namespace really fast, *before* the add is observed by the controller using the reflector.\n\/\/ 2. Add or Update a quota that matches the Add namespace\n\/\/ 3. The cache had the intermediate state for the namespace for some period of time. This makes the quota update the mapping indicating a match.\n\/\/ 4. The ns Delete is compressed out and never delivered to the controller, so the improper match is never cleared.\n\/\/\n\/\/ This sounds pretty bad, however, we fail in the \"safe\" direction and the consequences are detectable.\n\/\/ When going from quota to namespace, you can get back a namespace that doesn't exist. There are no resource in a non-existence\n\/\/ namespace, so you know to clear all referenced resources. In addition, this add\/delete has to happen so fast\n\/\/ that it would be nearly impossible for any resources to be created. If you do create resources, then we must be observing\n\/\/ their deletes. When quota is replenished, we'll see that we need to clear any charges.\n\/\/\n\/\/ When going from namespace to quota, you can get back a quota that doesn't exist. Since the cache is shared,\n\/\/ we know that a missing quota means that there isn't anything for us to bill against, so we can skip it.\n\/\/\n\/\/ If the mapping cache is wrong and a previously deleted quota or namespace is created, this controller\n\/\/ correctly adds the items back to the list and clears out all previous mappings.\n\/\/\n\/\/ In addition to those constraints, the timing threshold for actually hitting this problem is really tight. It's\n\/\/ basically a script that is creating and deleting things as fast as it possibly can. Sub-millisecond in the fuzz\n\/\/ test where I caught the problem.\n\n\/\/ NewClusterQuotaMappingController builds a mapping between namespaces and clusterresourcequotas\nfunc NewClusterQuotaMappingController(namespaceInformer kcoreinformers.NamespaceInformer, quotaInformer shared.ClusterResourceQuotaInformer) *ClusterQuotaMappingController {\n\tc := &ClusterQuotaMappingController{\n\t\tnamespaceQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), \"controller_clusterquotamappingcontroller_namespaces\"),\n\n\t\tquotaQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), \"controller_clusterquotamappingcontroller_clusterquotas\"),\n\n\t\tclusterQuotaMapper: NewClusterQuotaMapper(),\n\t}\n\n\tnamespaceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: c.addNamespace,\n\t\tUpdateFunc: c.updateNamespace,\n\t\tDeleteFunc: c.deleteNamespace,\n\t})\n\tc.namespaceLister = namespaceInformer.Lister()\n\tc.namespacesSynced = namespaceInformer.Informer().HasSynced\n\n\tquotaInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: c.addQuota,\n\t\tUpdateFunc: c.updateQuota,\n\t\tDeleteFunc: c.deleteQuota,\n\t})\n\tc.quotaLister = quotaInformer.Lister()\n\tc.quotasSynced = quotaInformer.Informer().HasSynced\n\n\treturn c\n}\n\ntype ClusterQuotaMappingController struct {\n\tnamespaceQueue workqueue.RateLimitingInterface\n\tnamespaceLister kcorelisters.NamespaceLister\n\tnamespacesSynced func() bool\n\n\tquotaQueue workqueue.RateLimitingInterface\n\tquotaLister *ocache.IndexerToClusterResourceQuotaLister\n\tquotasSynced func() bool\n\n\tclusterQuotaMapper *clusterQuotaMapper\n}\n\nfunc (c *ClusterQuotaMappingController) GetClusterQuotaMapper() ClusterQuotaMapper {\n\treturn c.clusterQuotaMapper\n}\n\nfunc (c *ClusterQuotaMappingController) Run(workers int, stopCh <-chan struct{}) {\n\tdefer utilruntime.HandleCrash()\n\n\t\/\/ Wait for the stores to sync before starting any work in this controller.\n\tready := make(chan struct{})\n\tgo c.waitForSyncedStores(ready, stopCh)\n\tselect {\n\tcase <-ready:\n\tcase <-stopCh:\n\t\treturn\n\t}\n\n\tglog.V(4).Infof(\"Starting workers for quota mapping controller workers\")\n\tfor i := 0; i < workers; i++ {\n\t\tgo wait.Until(c.namespaceWorker, time.Second, stopCh)\n\t\tgo wait.Until(c.quotaWorker, time.Second, stopCh)\n\t}\n\n\t<-stopCh\n\tglog.Infof(\"Shutting down quota mapping controller\")\n\tc.namespaceQueue.ShutDown()\n\tc.quotaQueue.ShutDown()\n}\n\nfunc (c *ClusterQuotaMappingController) syncQuota(quota *quotaapi.ClusterResourceQuota) error {\n\tmatcherFunc, err := quotaapi.GetMatcher(quota.Spec.Selector)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tallNamespaces, err := c.namespaceLister.List(labels.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range allNamespaces {\n\t\tnamespace := allNamespaces[i]\n\n\t\t\/\/ attempt to set the mapping. The quotas never collide with each other (same quota is never processed twice in parallel)\n\t\t\/\/ so this means that the project we have is out of date, pull a more recent copy from the cache and retest\n\t\tfor {\n\t\t\tmatches, err := matcherFunc(namespace)\n\t\t\tif err != nil {\n\t\t\t\tutilruntime.HandleError(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tsuccess, quotaMatches, _ := c.clusterQuotaMapper.setMapping(quota, namespace, !matches)\n\t\t\tif success {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t\/\/ if the quota is mismatched, then someone has updated the quota or has deleted the entry entirely.\n\t\t\t\/\/ if we've been updated, we'll be rekicked, if we've been deleted we should stop. Either way, this\n\t\t\t\/\/ execution is finished\n\t\t\tif !quotaMatches {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tns, err := c.namespaceLister.Get(namespace.Name)\n\t\t\tif kapierrors.IsNotFound(err) {\n\t\t\t\t\/\/ if the namespace is gone, then the deleteNamespace path will be called, just continue\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tutilruntime.HandleError(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tnamespace = ns\n\t\t}\n\n\t}\n\n\tc.clusterQuotaMapper.completeQuota(quota)\n\treturn nil\n}\n\nfunc (c *ClusterQuotaMappingController) syncNamespace(namespace *kapi.Namespace) error {\n\tallQuotas, err1 := c.quotaLister.List(metav1.ListOptions{})\n\tif err1 != nil {\n\t\treturn err1\n\t}\n\tfor i := range allQuotas {\n\t\tquota := allQuotas[i]\n\n\t\tfor {\n\t\t\tmatcherFunc, err := quotaapi.GetMatcher(quota.Spec.Selector)\n\t\t\tif err != nil {\n\t\t\t\tutilruntime.HandleError(err)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t\/\/ attempt to set the mapping. The namespaces never collide with each other (same namespace is never processed twice in parallel)\n\t\t\t\/\/ so this means that the quota we have is out of date, pull a more recent copy from the cache and retest\n\t\t\tmatches, err := matcherFunc(namespace)\n\t\t\tif err != nil {\n\t\t\t\tutilruntime.HandleError(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tsuccess, _, namespaceMatches := c.clusterQuotaMapper.setMapping(quota, namespace, !matches)\n\t\t\tif success {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t\/\/ if the namespace is mismatched, then someone has updated the namespace or has deleted the entry entirely.\n\t\t\t\/\/ if we've been updated, we'll be rekicked, if we've been deleted we should stop. Either way, this\n\t\t\t\/\/ execution is finished\n\t\t\tif !namespaceMatches {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tquota, err = c.quotaLister.Get(quota.Name)\n\t\t\tif kapierrors.IsNotFound(err) {\n\t\t\t\t\/\/ if the quota is gone, then the deleteQuota path will be called, just continue\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tutilruntime.HandleError(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tc.clusterQuotaMapper.completeNamespace(namespace)\n\treturn nil\n}\n\nfunc (c *ClusterQuotaMappingController) quotaWork() bool {\n\tkey, quit := c.quotaQueue.Get()\n\tif quit {\n\t\treturn true\n\t}\n\tdefer c.quotaQueue.Done(key)\n\n\tquota, exists, err := c.quotaLister.GetByKey(key.(string))\n\tif !exists {\n\t\tc.quotaQueue.Forget(key)\n\t\treturn false\n\t}\n\tif err != nil {\n\t\tutilruntime.HandleError(err)\n\t\treturn false\n\t}\n\n\terr = c.syncQuota(quota.(*quotaapi.ClusterResourceQuota))\n\toutOfRetries := c.quotaQueue.NumRequeues(key) > 5\n\tswitch {\n\tcase err != nil && outOfRetries:\n\t\tutilruntime.HandleError(err)\n\t\tc.quotaQueue.Forget(key)\n\n\tcase err != nil && !outOfRetries:\n\t\tc.quotaQueue.AddRateLimited(key)\n\n\tdefault:\n\t\tc.quotaQueue.Forget(key)\n\t}\n\n\treturn false\n}\n\nfunc (c *ClusterQuotaMappingController) quotaWorker() {\n\tfor {\n\t\tif quit := c.quotaWork(); quit {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *ClusterQuotaMappingController) namespaceWork() bool {\n\tkey, quit := c.namespaceQueue.Get()\n\tif quit {\n\t\treturn true\n\t}\n\tdefer c.namespaceQueue.Done(key)\n\n\tnamespace, err := c.namespaceLister.Get(key.(string))\n\tif kapierrors.IsNotFound(err) {\n\t\tc.namespaceQueue.Forget(key)\n\t\treturn false\n\t}\n\tif err != nil {\n\t\tutilruntime.HandleError(err)\n\t\treturn false\n\t}\n\n\terr = c.syncNamespace(namespace)\n\toutOfRetries := c.namespaceQueue.NumRequeues(key) > 5\n\tswitch {\n\tcase err != nil && outOfRetries:\n\t\tutilruntime.HandleError(err)\n\t\tc.namespaceQueue.Forget(key)\n\n\tcase err != nil && !outOfRetries:\n\t\tc.namespaceQueue.AddRateLimited(key)\n\n\tdefault:\n\t\tc.namespaceQueue.Forget(key)\n\t}\n\n\treturn false\n}\n\nfunc (c *ClusterQuotaMappingController) namespaceWorker() {\n\tfor {\n\t\tif quit := c.namespaceWork(); quit {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *ClusterQuotaMappingController) waitForSyncedStores(ready chan<- struct{}, stopCh <-chan struct{}) {\n\tdefer utilruntime.HandleCrash()\n\n\tfor !c.namespacesSynced() || !c.quotasSynced() {\n\t\tglog.V(4).Infof(\"Waiting for the caches to sync before starting the quota mapping controller workers\")\n\t\tselect {\n\t\tcase <-time.After(100 * time.Millisecond):\n\t\tcase <-stopCh:\n\t\t\treturn\n\t\t}\n\t}\n\tclose(ready)\n}\n\nfunc (c *ClusterQuotaMappingController) deleteNamespace(obj interface{}) {\n\tns, ok1 := obj.(*kapi.Namespace)\n\tif !ok1 {\n\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\tif !ok {\n\t\t\tutilruntime.HandleError(fmt.Errorf(\"couldn't get object from tombstone %v\", obj))\n\t\t\treturn\n\t\t}\n\t\tns, ok = tombstone.Obj.(*kapi.Namespace)\n\t\tif !ok {\n\t\t\tutilruntime.HandleError(fmt.Errorf(\"tombstone contained object that is not a Namespace %v\", obj))\n\t\t\treturn\n\t\t}\n\t}\n\n\tc.clusterQuotaMapper.removeNamespace(ns.Name)\n}\n\nfunc (c *ClusterQuotaMappingController) addNamespace(cur interface{}) {\n\tc.enqueueNamespace(cur)\n}\nfunc (c *ClusterQuotaMappingController) updateNamespace(old, cur interface{}) {\n\tc.enqueueNamespace(cur)\n}\nfunc (c *ClusterQuotaMappingController) enqueueNamespace(obj interface{}) {\n\tns, ok := obj.(*kapi.Namespace)\n\tif !ok {\n\t\tutilruntime.HandleError(fmt.Errorf(\"not a Namespace %v\", obj))\n\t\treturn\n\t}\n\tif !c.clusterQuotaMapper.requireNamespace(ns) {\n\t\treturn\n\t}\n\n\tkey, err := controller.KeyFunc(ns)\n\tif err != nil {\n\t\tutilruntime.HandleError(err)\n\t\treturn\n\t}\n\tc.namespaceQueue.Add(key)\n}\n\nfunc (c *ClusterQuotaMappingController) deleteQuota(obj interface{}) {\n\tquota, ok1 := obj.(*quotaapi.ClusterResourceQuota)\n\tif !ok1 {\n\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\tif !ok {\n\t\t\tutilruntime.HandleError(fmt.Errorf(\"couldn't get object from tombstone %v\", obj))\n\t\t\treturn\n\t\t}\n\t\tquota, ok = tombstone.Obj.(*quotaapi.ClusterResourceQuota)\n\t\tif !ok {\n\t\t\tutilruntime.HandleError(fmt.Errorf(\"tombstone contained object that is not a Quota %v\", obj))\n\t\t\treturn\n\t\t}\n\t}\n\n\tc.clusterQuotaMapper.removeQuota(quota.Name)\n}\n\nfunc (c *ClusterQuotaMappingController) addQuota(cur interface{}) {\n\tc.enqueueQuota(cur)\n}\nfunc (c *ClusterQuotaMappingController) updateQuota(old, cur interface{}) {\n\tc.enqueueQuota(cur)\n}\nfunc (c *ClusterQuotaMappingController) enqueueQuota(obj interface{}) {\n\tquota, ok := obj.(*quotaapi.ClusterResourceQuota)\n\tif !ok {\n\t\tutilruntime.HandleError(fmt.Errorf(\"not a Quota %v\", obj))\n\t\treturn\n\t}\n\tif !c.clusterQuotaMapper.requireQuota(quota) {\n\t\treturn\n\t}\n\n\tkey, err := controller.KeyFunc(quota)\n\tif err != nil {\n\t\tutilruntime.HandleError(err)\n\t\treturn\n\t}\n\tc.quotaQueue.Add(key)\n}\nswitch clusterquotamapping to use the normal cache waitpackage clusterquotamapping\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\n\tkapierrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\tutilruntime \"k8s.io\/apimachinery\/pkg\/util\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\t\"k8s.io\/client-go\/util\/workqueue\"\n\tkapi \"k8s.io\/kubernetes\/pkg\/api\"\n\tkcoreinformers \"k8s.io\/kubernetes\/pkg\/client\/informers\/informers_generated\/internalversion\/core\/internalversion\"\n\tkcorelisters \"k8s.io\/kubernetes\/pkg\/client\/listers\/core\/internalversion\"\n\t\"k8s.io\/kubernetes\/pkg\/controller\"\n\n\tocache \"github.com\/openshift\/origin\/pkg\/client\/cache\"\n\t\"github.com\/openshift\/origin\/pkg\/controller\/shared\"\n\tquotaapi \"github.com\/openshift\/origin\/pkg\/quota\/api\"\n)\n\n\/\/ Look out, here there be dragons!\n\/\/ There is a race when dealing with the DeltaFifo compression used to back a reflector for a controller that uses two\n\/\/ SharedInformers for both their watch events AND their caches. The scenario looks like this\n\/\/\n\/\/ 1. Add, Delete a namespace really fast, *before* the add is observed by the controller using the reflector.\n\/\/ 2. Add or Update a quota that matches the Add namespace\n\/\/ 3. The cache had the intermediate state for the namespace for some period of time. This makes the quota update the mapping indicating a match.\n\/\/ 4. The ns Delete is compressed out and never delivered to the controller, so the improper match is never cleared.\n\/\/\n\/\/ This sounds pretty bad, however, we fail in the \"safe\" direction and the consequences are detectable.\n\/\/ When going from quota to namespace, you can get back a namespace that doesn't exist. There are no resource in a non-existence\n\/\/ namespace, so you know to clear all referenced resources. In addition, this add\/delete has to happen so fast\n\/\/ that it would be nearly impossible for any resources to be created. If you do create resources, then we must be observing\n\/\/ their deletes. When quota is replenished, we'll see that we need to clear any charges.\n\/\/\n\/\/ When going from namespace to quota, you can get back a quota that doesn't exist. Since the cache is shared,\n\/\/ we know that a missing quota means that there isn't anything for us to bill against, so we can skip it.\n\/\/\n\/\/ If the mapping cache is wrong and a previously deleted quota or namespace is created, this controller\n\/\/ correctly adds the items back to the list and clears out all previous mappings.\n\/\/\n\/\/ In addition to those constraints, the timing threshold for actually hitting this problem is really tight. It's\n\/\/ basically a script that is creating and deleting things as fast as it possibly can. Sub-millisecond in the fuzz\n\/\/ test where I caught the problem.\n\n\/\/ NewClusterQuotaMappingController builds a mapping between namespaces and clusterresourcequotas\nfunc NewClusterQuotaMappingController(namespaceInformer kcoreinformers.NamespaceInformer, quotaInformer shared.ClusterResourceQuotaInformer) *ClusterQuotaMappingController {\n\tc := &ClusterQuotaMappingController{\n\t\tnamespaceQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), \"controller_clusterquotamappingcontroller_namespaces\"),\n\n\t\tquotaQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), \"controller_clusterquotamappingcontroller_clusterquotas\"),\n\n\t\tclusterQuotaMapper: NewClusterQuotaMapper(),\n\t}\n\n\tnamespaceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: c.addNamespace,\n\t\tUpdateFunc: c.updateNamespace,\n\t\tDeleteFunc: c.deleteNamespace,\n\t})\n\tc.namespaceLister = namespaceInformer.Lister()\n\tc.namespacesSynced = namespaceInformer.Informer().HasSynced\n\n\tquotaInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: c.addQuota,\n\t\tUpdateFunc: c.updateQuota,\n\t\tDeleteFunc: c.deleteQuota,\n\t})\n\tc.quotaLister = quotaInformer.Lister()\n\tc.quotasSynced = quotaInformer.Informer().HasSynced\n\n\treturn c\n}\n\ntype ClusterQuotaMappingController struct {\n\tnamespaceQueue workqueue.RateLimitingInterface\n\tnamespaceLister kcorelisters.NamespaceLister\n\tnamespacesSynced func() bool\n\n\tquotaQueue workqueue.RateLimitingInterface\n\tquotaLister *ocache.IndexerToClusterResourceQuotaLister\n\tquotasSynced func() bool\n\n\tclusterQuotaMapper *clusterQuotaMapper\n}\n\nfunc (c *ClusterQuotaMappingController) GetClusterQuotaMapper() ClusterQuotaMapper {\n\treturn c.clusterQuotaMapper\n}\n\nfunc (c *ClusterQuotaMappingController) Run(workers int, stopCh <-chan struct{}) {\n\tdefer utilruntime.HandleCrash()\n\tdefer c.namespaceQueue.ShutDown()\n\tdefer c.quotaQueue.ShutDown()\n\n\tglog.Infof(\"Starting ClusterQuotaMappingController controller\")\n\tdefer glog.Infof(\"Shutting down ClusterQuotaMappingController controller\")\n\n\tif !cache.WaitForCacheSync(stopCh, c.namespacesSynced, c.quotasSynced) {\n\t\tutilruntime.HandleError(fmt.Errorf(\"timed out waiting for caches to sync\"))\n\t\treturn\n\t}\n\n\tglog.V(4).Infof(\"Starting workers for quota mapping controller workers\")\n\tfor i := 0; i < workers; i++ {\n\t\tgo wait.Until(c.namespaceWorker, time.Second, stopCh)\n\t\tgo wait.Until(c.quotaWorker, time.Second, stopCh)\n\t}\n\n\t<-stopCh\n}\n\nfunc (c *ClusterQuotaMappingController) syncQuota(quota *quotaapi.ClusterResourceQuota) error {\n\tmatcherFunc, err := quotaapi.GetMatcher(quota.Spec.Selector)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tallNamespaces, err := c.namespaceLister.List(labels.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range allNamespaces {\n\t\tnamespace := allNamespaces[i]\n\n\t\t\/\/ attempt to set the mapping. The quotas never collide with each other (same quota is never processed twice in parallel)\n\t\t\/\/ so this means that the project we have is out of date, pull a more recent copy from the cache and retest\n\t\tfor {\n\t\t\tmatches, err := matcherFunc(namespace)\n\t\t\tif err != nil {\n\t\t\t\tutilruntime.HandleError(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tsuccess, quotaMatches, _ := c.clusterQuotaMapper.setMapping(quota, namespace, !matches)\n\t\t\tif success {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t\/\/ if the quota is mismatched, then someone has updated the quota or has deleted the entry entirely.\n\t\t\t\/\/ if we've been updated, we'll be rekicked, if we've been deleted we should stop. Either way, this\n\t\t\t\/\/ execution is finished\n\t\t\tif !quotaMatches {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tns, err := c.namespaceLister.Get(namespace.Name)\n\t\t\tif kapierrors.IsNotFound(err) {\n\t\t\t\t\/\/ if the namespace is gone, then the deleteNamespace path will be called, just continue\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tutilruntime.HandleError(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tnamespace = ns\n\t\t}\n\n\t}\n\n\tc.clusterQuotaMapper.completeQuota(quota)\n\treturn nil\n}\n\nfunc (c *ClusterQuotaMappingController) syncNamespace(namespace *kapi.Namespace) error {\n\tallQuotas, err1 := c.quotaLister.List(metav1.ListOptions{})\n\tif err1 != nil {\n\t\treturn err1\n\t}\n\tfor i := range allQuotas {\n\t\tquota := allQuotas[i]\n\n\t\tfor {\n\t\t\tmatcherFunc, err := quotaapi.GetMatcher(quota.Spec.Selector)\n\t\t\tif err != nil {\n\t\t\t\tutilruntime.HandleError(err)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t\/\/ attempt to set the mapping. The namespaces never collide with each other (same namespace is never processed twice in parallel)\n\t\t\t\/\/ so this means that the quota we have is out of date, pull a more recent copy from the cache and retest\n\t\t\tmatches, err := matcherFunc(namespace)\n\t\t\tif err != nil {\n\t\t\t\tutilruntime.HandleError(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tsuccess, _, namespaceMatches := c.clusterQuotaMapper.setMapping(quota, namespace, !matches)\n\t\t\tif success {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t\/\/ if the namespace is mismatched, then someone has updated the namespace or has deleted the entry entirely.\n\t\t\t\/\/ if we've been updated, we'll be rekicked, if we've been deleted we should stop. Either way, this\n\t\t\t\/\/ execution is finished\n\t\t\tif !namespaceMatches {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tquota, err = c.quotaLister.Get(quota.Name)\n\t\t\tif kapierrors.IsNotFound(err) {\n\t\t\t\t\/\/ if the quota is gone, then the deleteQuota path will be called, just continue\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tutilruntime.HandleError(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tc.clusterQuotaMapper.completeNamespace(namespace)\n\treturn nil\n}\n\nfunc (c *ClusterQuotaMappingController) quotaWork() bool {\n\tkey, quit := c.quotaQueue.Get()\n\tif quit {\n\t\treturn true\n\t}\n\tdefer c.quotaQueue.Done(key)\n\n\tquota, exists, err := c.quotaLister.GetByKey(key.(string))\n\tif !exists {\n\t\tc.quotaQueue.Forget(key)\n\t\treturn false\n\t}\n\tif err != nil {\n\t\tutilruntime.HandleError(err)\n\t\treturn false\n\t}\n\n\terr = c.syncQuota(quota.(*quotaapi.ClusterResourceQuota))\n\toutOfRetries := c.quotaQueue.NumRequeues(key) > 5\n\tswitch {\n\tcase err != nil && outOfRetries:\n\t\tutilruntime.HandleError(err)\n\t\tc.quotaQueue.Forget(key)\n\n\tcase err != nil && !outOfRetries:\n\t\tc.quotaQueue.AddRateLimited(key)\n\n\tdefault:\n\t\tc.quotaQueue.Forget(key)\n\t}\n\n\treturn false\n}\n\nfunc (c *ClusterQuotaMappingController) quotaWorker() {\n\tfor {\n\t\tif quit := c.quotaWork(); quit {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *ClusterQuotaMappingController) namespaceWork() bool {\n\tkey, quit := c.namespaceQueue.Get()\n\tif quit {\n\t\treturn true\n\t}\n\tdefer c.namespaceQueue.Done(key)\n\n\tnamespace, err := c.namespaceLister.Get(key.(string))\n\tif kapierrors.IsNotFound(err) {\n\t\tc.namespaceQueue.Forget(key)\n\t\treturn false\n\t}\n\tif err != nil {\n\t\tutilruntime.HandleError(err)\n\t\treturn false\n\t}\n\n\terr = c.syncNamespace(namespace)\n\toutOfRetries := c.namespaceQueue.NumRequeues(key) > 5\n\tswitch {\n\tcase err != nil && outOfRetries:\n\t\tutilruntime.HandleError(err)\n\t\tc.namespaceQueue.Forget(key)\n\n\tcase err != nil && !outOfRetries:\n\t\tc.namespaceQueue.AddRateLimited(key)\n\n\tdefault:\n\t\tc.namespaceQueue.Forget(key)\n\t}\n\n\treturn false\n}\n\nfunc (c *ClusterQuotaMappingController) namespaceWorker() {\n\tfor {\n\t\tif quit := c.namespaceWork(); quit {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *ClusterQuotaMappingController) deleteNamespace(obj interface{}) {\n\tns, ok1 := obj.(*kapi.Namespace)\n\tif !ok1 {\n\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\tif !ok {\n\t\t\tutilruntime.HandleError(fmt.Errorf(\"couldn't get object from tombstone %v\", obj))\n\t\t\treturn\n\t\t}\n\t\tns, ok = tombstone.Obj.(*kapi.Namespace)\n\t\tif !ok {\n\t\t\tutilruntime.HandleError(fmt.Errorf(\"tombstone contained object that is not a Namespace %v\", obj))\n\t\t\treturn\n\t\t}\n\t}\n\n\tc.clusterQuotaMapper.removeNamespace(ns.Name)\n}\n\nfunc (c *ClusterQuotaMappingController) addNamespace(cur interface{}) {\n\tc.enqueueNamespace(cur)\n}\nfunc (c *ClusterQuotaMappingController) updateNamespace(old, cur interface{}) {\n\tc.enqueueNamespace(cur)\n}\nfunc (c *ClusterQuotaMappingController) enqueueNamespace(obj interface{}) {\n\tns, ok := obj.(*kapi.Namespace)\n\tif !ok {\n\t\tutilruntime.HandleError(fmt.Errorf(\"not a Namespace %v\", obj))\n\t\treturn\n\t}\n\tif !c.clusterQuotaMapper.requireNamespace(ns) {\n\t\treturn\n\t}\n\n\tkey, err := controller.KeyFunc(ns)\n\tif err != nil {\n\t\tutilruntime.HandleError(err)\n\t\treturn\n\t}\n\tc.namespaceQueue.Add(key)\n}\n\nfunc (c *ClusterQuotaMappingController) deleteQuota(obj interface{}) {\n\tquota, ok1 := obj.(*quotaapi.ClusterResourceQuota)\n\tif !ok1 {\n\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\tif !ok {\n\t\t\tutilruntime.HandleError(fmt.Errorf(\"couldn't get object from tombstone %v\", obj))\n\t\t\treturn\n\t\t}\n\t\tquota, ok = tombstone.Obj.(*quotaapi.ClusterResourceQuota)\n\t\tif !ok {\n\t\t\tutilruntime.HandleError(fmt.Errorf(\"tombstone contained object that is not a Quota %v\", obj))\n\t\t\treturn\n\t\t}\n\t}\n\n\tc.clusterQuotaMapper.removeQuota(quota.Name)\n}\n\nfunc (c *ClusterQuotaMappingController) addQuota(cur interface{}) {\n\tc.enqueueQuota(cur)\n}\nfunc (c *ClusterQuotaMappingController) updateQuota(old, cur interface{}) {\n\tc.enqueueQuota(cur)\n}\nfunc (c *ClusterQuotaMappingController) enqueueQuota(obj interface{}) {\n\tquota, ok := obj.(*quotaapi.ClusterResourceQuota)\n\tif !ok {\n\t\tutilruntime.HandleError(fmt.Errorf(\"not a Quota %v\", obj))\n\t\treturn\n\t}\n\tif !c.clusterQuotaMapper.requireQuota(quota) {\n\t\treturn\n\t}\n\n\tkey, err := controller.KeyFunc(quota)\n\tif err != nil {\n\t\tutilruntime.HandleError(err)\n\t\treturn\n\t}\n\tc.quotaQueue.Add(key)\n}\n<|endoftext|>"} {"text":"package art\n\nimport (\n\t\"sync\"\n\n\t\"unsafe\"\n)\n\ntype nodeFactory interface {\n\tnewNode4() *artNode\n\tnewNode16() *artNode\n\tnewNode48() *artNode\n\tnewNode256() *artNode\n\tnewLeaf(key Key, value interface{}) *artNode\n\n\treleaseNode(n *artNode)\n}\n\nvar factory = newObjFactory()\n\nfunc newTree() *tree {\n\treturn &tree{}\n}\n\ntype poolObjFactory struct {\n\tartNodePool sync.Pool\n\tnode4Pool sync.Pool\n\tnode16Pool sync.Pool\n\tnode48Pool sync.Pool\n\tnode256Pool sync.Pool\n\tleafPool sync.Pool\n}\n\ntype objFactory struct{}\n\nfunc newObjFactory() nodeFactory {\n\treturn &objFactory{}\n}\n\n\/\/ Simple obj factory implementation\nfunc (f *objFactory) newNode4() *artNode {\n\treturn &artNode{kind: Node4, ref: unsafe.Pointer(new(node4))}\n}\n\nfunc (f *objFactory) newNode16() *artNode {\n\treturn &artNode{kind: Node16, ref: unsafe.Pointer(&node16{})}\n}\n\nfunc (f *objFactory) newNode48() *artNode {\n\treturn &artNode{kind: Node48, ref: unsafe.Pointer(&node48{})}\n}\n\nfunc (f *objFactory) newNode256() *artNode {\n\treturn &artNode{kind: Node256, ref: unsafe.Pointer(&node256{})}\n}\n\nfunc (f *objFactory) newLeaf(key Key, value interface{}) *artNode {\n\tclonedKey := make(Key, len(key))\n\tcopy(clonedKey, key)\n\treturn &artNode{kind: Leaf, ref: unsafe.Pointer(&leaf{key: clonedKey, value: value})}\n}\n\nfunc (f *objFactory) releaseNode(an *artNode) {\n\t\/\/ do nothing\n}\n\n\/\/ func newPoolObjFactory() nodeFactory {\n\/\/ \treturn &poolObjFactory{\n\/\/ \t\tartNodePool: sync.Pool{New: func() interface{} { return new(artNode) }},\n\/\/ \t\tnode4Pool: sync.Pool{New: func() interface{} { return new(node4) }},\n\/\/ \t\tnode16Pool: sync.Pool{New: func() interface{} { return new(node16) }},\n\/\/ \t\tnode48Pool: sync.Pool{New: func() interface{} { return new(node48) }},\n\/\/ \t\tnode256Pool: sync.Pool{New: func() interface{} { return new(node256) }},\n\/\/ \t\tleafPool: sync.Pool{New: func() interface{} { return new(leaf) }},\n\/\/ \t}\n\/\/ }\n\n\/\/ func initArtNode(an *artNode, kind Kind, ref unsafe.Pointer) {\n\/\/ \tan.kind = kind\n\/\/ \tan.ref = ref\n\n\/\/ \tswitch kind {\n\/\/ \tcase Node4, Node16, Node48, Node256:\n\/\/ \t\tn := an.node()\n\/\/ \t\tn.numChildren = 0\n\/\/ \t\tn.prefixLen = 0\n\/\/ \t\t\/\/ for i := range n.prefix {\n\/\/ \t\t\/\/ \tn.prefix[i] = 0\n\/\/ \t\t\/\/ }\n\/\/ \t}\n\n\/\/ \tswitch an.kind {\n\/\/ \tcase Node4:\n\/\/ \t\tn := an.node4()\n\/\/ \t\tfor i := range n.keys {\n\/\/ \t\t\tn.keys[i] = 0\n\/\/ \t\t\tn.children[i] = nil\n\/\/ \t\t}\n\/\/ \tcase Node16:\n\/\/ \t\tn := an.node16()\n\/\/ \t\tfor i := range n.keys {\n\/\/ \t\t\tn.keys[i] = 0\n\/\/ \t\t\tn.children[i] = nil\n\/\/ \t\t}\n\/\/ \tcase Node48:\n\/\/ \t\tn := an.node48()\n\/\/ \t\tfor i := range n.keys {\n\/\/ \t\t\tn.keys[i] = 0\n\/\/ \t\t}\n\/\/ \t\tfor i := range n.children {\n\/\/ \t\t\tn.children[i] = nil\n\/\/ \t\t}\n\/\/ \tcase Node256:\n\/\/ \t\tn := an.node256()\n\/\/ \t\tfor i := range n.children {\n\/\/ \t\t\tn.children[i] = nil\n\/\/ \t\t}\n\/\/ \tcase Leaf:\n\/\/ \t\tn := an.leaf()\n\/\/ \t\tn.key = nil\n\/\/ \t\tn.value = nil\n\/\/ \t}\n\/\/ }\n\n\/\/ \/\/ Pool based factory implementation\n\n\/\/ func (f *poolObjFactory) newNode4() *artNode {\n\/\/ \tan := f.artNodePool.Get().(*artNode)\n\/\/ \tnode := f.node4Pool.Get().(*node4)\n\/\/ \tinitArtNode(an, Node4, unsafe.Pointer(node))\n\/\/ \treturn an\n\/\/ }\n\n\/\/ func (f *poolObjFactory) newNode16() *artNode {\n\/\/ \tan := f.artNodePool.Get().(*artNode)\n\/\/ \tnode := f.node16Pool.Get().(*node16)\n\/\/ \tinitArtNode(an, Node16, unsafe.Pointer(node))\n\/\/ \treturn an\n\/\/ }\n\n\/\/ func (f *poolObjFactory) newNode48() *artNode {\n\/\/ \tan := f.artNodePool.Get().(*artNode)\n\/\/ \tnode := f.node48Pool.Get().(*node48)\n\/\/ \tinitArtNode(an, Node48, unsafe.Pointer(node))\n\/\/ \treturn an\n\/\/ }\n\n\/\/ func (f *poolObjFactory) newNode256() *artNode {\n\/\/ \tan := f.artNodePool.Get().(*artNode)\n\/\/ \tnode := f.node256Pool.Get().(*node256)\n\/\/ \tinitArtNode(an, Node256, unsafe.Pointer(node))\n\/\/ \treturn an\n\/\/ }\n\n\/\/ func (f *poolObjFactory) newLeaf(key Key, value interface{}) *artNode {\n\/\/ \tan := f.artNodePool.Get().(*artNode)\n\/\/ \tnode := f.leafPool.Get().(*leaf)\n\n\/\/ \tinitArtNode(an, Leaf, unsafe.Pointer(node))\n\n\/\/ \tclonedKey := make(Key, len(key))\n\/\/ \tcopy(clonedKey, key)\n\/\/ \tnode.key = clonedKey\n\/\/ \tnode.value = value\n\n\/\/ \treturn an\n\/\/ }\n\n\/\/ func (f *poolObjFactory) releaseNode(an *artNode) {\n\/\/ \tif an == nil {\n\/\/ \t\treturn\n\/\/ \t}\n\n\/\/ \t\/\/ fmt.Printf(\"releaseNode %p\\n\", an)\n\/\/ \t\/\/ return\n\n\/\/ \tswitch an.kind {\n\/\/ \tcase Node4:\n\/\/ \t\tf.node4Pool.Put(an.node4())\n\n\/\/ \tcase Node16:\n\/\/ \t\tf.node16Pool.Put(an.node16())\n\n\/\/ \tcase Node48:\n\/\/ \t\tf.node48Pool.Put(an.node48())\n\n\/\/ \tcase Node256:\n\/\/ \t\tf.node256Pool.Put(an.node256())\n\n\/\/ \tcase Leaf:\n\/\/ \t\tf.leafPool.Put(an.leaf())\n\/\/ \t}\n\n\/\/ \tf.artNodePool.Put(an)\n\/\/ }\nRemove unused package and sync.Pool reference; Support go1.1\/1.2package art\n\nimport (\n\t\/\/ \"sync\"\n\n\t\"unsafe\"\n)\n\ntype nodeFactory interface {\n\tnewNode4() *artNode\n\tnewNode16() *artNode\n\tnewNode48() *artNode\n\tnewNode256() *artNode\n\tnewLeaf(key Key, value interface{}) *artNode\n\n\treleaseNode(n *artNode)\n}\n\nvar factory = newObjFactory()\n\nfunc newTree() *tree {\n\treturn &tree{}\n}\n\n\/\/ type poolObjFactory struct {\n\/\/ \tartNodePool sync.Pool\n\/\/ \tnode4Pool sync.Pool\n\/\/ \tnode16Pool sync.Pool\n\/\/ \tnode48Pool sync.Pool\n\/\/ \tnode256Pool sync.Pool\n\/\/ \tleafPool sync.Pool\n\/\/ }\n\ntype objFactory struct{}\n\nfunc newObjFactory() nodeFactory {\n\treturn &objFactory{}\n}\n\n\/\/ Simple obj factory implementation\nfunc (f *objFactory) newNode4() *artNode {\n\treturn &artNode{kind: Node4, ref: unsafe.Pointer(new(node4))}\n}\n\nfunc (f *objFactory) newNode16() *artNode {\n\treturn &artNode{kind: Node16, ref: unsafe.Pointer(&node16{})}\n}\n\nfunc (f *objFactory) newNode48() *artNode {\n\treturn &artNode{kind: Node48, ref: unsafe.Pointer(&node48{})}\n}\n\nfunc (f *objFactory) newNode256() *artNode {\n\treturn &artNode{kind: Node256, ref: unsafe.Pointer(&node256{})}\n}\n\nfunc (f *objFactory) newLeaf(key Key, value interface{}) *artNode {\n\tclonedKey := make(Key, len(key))\n\tcopy(clonedKey, key)\n\treturn &artNode{kind: Leaf, ref: unsafe.Pointer(&leaf{key: clonedKey, value: value})}\n}\n\nfunc (f *objFactory) releaseNode(an *artNode) {\n\t\/\/ do nothing\n}\n\n\/\/ func newPoolObjFactory() nodeFactory {\n\/\/ \treturn &poolObjFactory{\n\/\/ \t\tartNodePool: sync.Pool{New: func() interface{} { return new(artNode) }},\n\/\/ \t\tnode4Pool: sync.Pool{New: func() interface{} { return new(node4) }},\n\/\/ \t\tnode16Pool: sync.Pool{New: func() interface{} { return new(node16) }},\n\/\/ \t\tnode48Pool: sync.Pool{New: func() interface{} { return new(node48) }},\n\/\/ \t\tnode256Pool: sync.Pool{New: func() interface{} { return new(node256) }},\n\/\/ \t\tleafPool: sync.Pool{New: func() interface{} { return new(leaf) }},\n\/\/ \t}\n\/\/ }\n\n\/\/ func initArtNode(an *artNode, kind Kind, ref unsafe.Pointer) {\n\/\/ \tan.kind = kind\n\/\/ \tan.ref = ref\n\n\/\/ \tswitch kind {\n\/\/ \tcase Node4, Node16, Node48, Node256:\n\/\/ \t\tn := an.node()\n\/\/ \t\tn.numChildren = 0\n\/\/ \t\tn.prefixLen = 0\n\/\/ \t\t\/\/ for i := range n.prefix {\n\/\/ \t\t\/\/ \tn.prefix[i] = 0\n\/\/ \t\t\/\/ }\n\/\/ \t}\n\n\/\/ \tswitch an.kind {\n\/\/ \tcase Node4:\n\/\/ \t\tn := an.node4()\n\/\/ \t\tfor i := range n.keys {\n\/\/ \t\t\tn.keys[i] = 0\n\/\/ \t\t\tn.children[i] = nil\n\/\/ \t\t}\n\/\/ \tcase Node16:\n\/\/ \t\tn := an.node16()\n\/\/ \t\tfor i := range n.keys {\n\/\/ \t\t\tn.keys[i] = 0\n\/\/ \t\t\tn.children[i] = nil\n\/\/ \t\t}\n\/\/ \tcase Node48:\n\/\/ \t\tn := an.node48()\n\/\/ \t\tfor i := range n.keys {\n\/\/ \t\t\tn.keys[i] = 0\n\/\/ \t\t}\n\/\/ \t\tfor i := range n.children {\n\/\/ \t\t\tn.children[i] = nil\n\/\/ \t\t}\n\/\/ \tcase Node256:\n\/\/ \t\tn := an.node256()\n\/\/ \t\tfor i := range n.children {\n\/\/ \t\t\tn.children[i] = nil\n\/\/ \t\t}\n\/\/ \tcase Leaf:\n\/\/ \t\tn := an.leaf()\n\/\/ \t\tn.key = nil\n\/\/ \t\tn.value = nil\n\/\/ \t}\n\/\/ }\n\n\/\/ \/\/ Pool based factory implementation\n\n\/\/ func (f *poolObjFactory) newNode4() *artNode {\n\/\/ \tan := f.artNodePool.Get().(*artNode)\n\/\/ \tnode := f.node4Pool.Get().(*node4)\n\/\/ \tinitArtNode(an, Node4, unsafe.Pointer(node))\n\/\/ \treturn an\n\/\/ }\n\n\/\/ func (f *poolObjFactory) newNode16() *artNode {\n\/\/ \tan := f.artNodePool.Get().(*artNode)\n\/\/ \tnode := f.node16Pool.Get().(*node16)\n\/\/ \tinitArtNode(an, Node16, unsafe.Pointer(node))\n\/\/ \treturn an\n\/\/ }\n\n\/\/ func (f *poolObjFactory) newNode48() *artNode {\n\/\/ \tan := f.artNodePool.Get().(*artNode)\n\/\/ \tnode := f.node48Pool.Get().(*node48)\n\/\/ \tinitArtNode(an, Node48, unsafe.Pointer(node))\n\/\/ \treturn an\n\/\/ }\n\n\/\/ func (f *poolObjFactory) newNode256() *artNode {\n\/\/ \tan := f.artNodePool.Get().(*artNode)\n\/\/ \tnode := f.node256Pool.Get().(*node256)\n\/\/ \tinitArtNode(an, Node256, unsafe.Pointer(node))\n\/\/ \treturn an\n\/\/ }\n\n\/\/ func (f *poolObjFactory) newLeaf(key Key, value interface{}) *artNode {\n\/\/ \tan := f.artNodePool.Get().(*artNode)\n\/\/ \tnode := f.leafPool.Get().(*leaf)\n\n\/\/ \tinitArtNode(an, Leaf, unsafe.Pointer(node))\n\n\/\/ \tclonedKey := make(Key, len(key))\n\/\/ \tcopy(clonedKey, key)\n\/\/ \tnode.key = clonedKey\n\/\/ \tnode.value = value\n\n\/\/ \treturn an\n\/\/ }\n\n\/\/ func (f *poolObjFactory) releaseNode(an *artNode) {\n\/\/ \tif an == nil {\n\/\/ \t\treturn\n\/\/ \t}\n\n\/\/ \t\/\/ fmt.Printf(\"releaseNode %p\\n\", an)\n\/\/ \t\/\/ return\n\n\/\/ \tswitch an.kind {\n\/\/ \tcase Node4:\n\/\/ \t\tf.node4Pool.Put(an.node4())\n\n\/\/ \tcase Node16:\n\/\/ \t\tf.node16Pool.Put(an.node16())\n\n\/\/ \tcase Node48:\n\/\/ \t\tf.node48Pool.Put(an.node48())\n\n\/\/ \tcase Node256:\n\/\/ \t\tf.node256Pool.Put(an.node256())\n\n\/\/ \tcase Leaf:\n\/\/ \t\tf.leafPool.Put(an.leaf())\n\/\/ \t}\n\n\/\/ \tf.artNodePool.Put(an)\n\/\/ }\n<|endoftext|>"} {"text":"package gs_test\n\nimport (\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"google.golang.org\/api\/option\"\n\t\"testing\"\n\t\"github.com\/viant\/toolbox\/storage\/gs\"\n)\n\nfunc TestService_List(t *testing.T) {\n\n\tcredential := option.WithServiceAccountFile(\"***\")\n\tservice := gs.NewService(credential)\n\tassert.NotNil(t, service)\n\n\t\/\/objects, err := service.List(\"gs:\/\/s3adlogs\/ad.log.go\")\n\t\/\/assert.Nil(t, err)\n\t\/\/assert.Equal(t, 1, len(objects));\n\n\t\/\/_, err := service.Download(objects[0])\n\t\/\/assert.Nil(t, err)\n\n\t\/\/content, err := ioutil.ReadAll(reader)\n\t\/\/assert.Nil(t, err)\n\t\/\/fmt.Printf(\"%v\\n\", string(content))\n\t\/\/assert.True(t, len(content) > 0)\n\t\/\/err = service.Upload(\"gs:\/\/s3adlogs\/ad1.log?expiry=10\", bytes.NewReader([]byte(\"abc\")))\n\t\/\/assert.Nil(t, err)\n\t\/\/\n\t\/\/object, err := service.Object(\"gs:\/\/s3adlogs\/ad1.log\")\n\t\/\/assert.Nil(t, err)\n\t\/\/err = service.Delete(object)\n\t\/\/assert.Nil(t, err)\n\n}\nReformatedpackage gs_test\n\nimport (\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/viant\/toolbox\/storage\/gs\"\n\t\"google.golang.org\/api\/option\"\n\t\"testing\"\n)\n\nfunc TestService_List(t *testing.T) {\n\n\tcredential := option.WithServiceAccountFile(\"***\")\n\tservice := gs.NewService(credential)\n\tassert.NotNil(t, service)\n\n\t\/\/objects, err := service.List(\"gs:\/\/s3adlogs\/ad.log.go\")\n\t\/\/assert.Nil(t, err)\n\t\/\/assert.Equal(t, 1, len(objects));\n\n\t\/\/_, err := service.Download(objects[0])\n\t\/\/assert.Nil(t, err)\n\n\t\/\/content, err := ioutil.ReadAll(reader)\n\t\/\/assert.Nil(t, err)\n\t\/\/fmt.Printf(\"%v\\n\", string(content))\n\t\/\/assert.True(t, len(content) > 0)\n\t\/\/err = service.Upload(\"gs:\/\/s3adlogs\/ad1.log?expiry=10\", bytes.NewReader([]byte(\"abc\")))\n\t\/\/assert.Nil(t, err)\n\t\/\/\n\t\/\/object, err := service.Object(\"gs:\/\/s3adlogs\/ad1.log\")\n\t\/\/assert.Nil(t, err)\n\t\/\/err = service.Delete(object)\n\t\/\/assert.Nil(t, err)\n\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2016 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package bundler supports bundling (batching) of items. Bundling amortizes an\n\/\/ action with fixed costs over multiple items. For example, if an API provides\n\/\/ an RPC that accepts a list of items as input, but clients would prefer\n\/\/ adding items one at a time, then a Bundler can accept individual items from\n\/\/ the client and bundle many of them into a single RPC.\n\/\/\n\/\/ This package is experimental and subject to change without notice.\npackage bundler\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\tDefaultDelayThreshold = time.Second\n\tDefaultBundleCountThreshold = 10\n\tDefaultBundleByteThreshold = 1e6 \/\/ 1M\n\tDefaultBufferedByteLimit = 1e9 \/\/ 1G\n)\n\nvar (\n\t\/\/ ErrOverflow indicates that Bundler's stored bytes exceeds its BufferedByteLimit.\n\tErrOverflow = errors.New(\"bundler reached buffered byte limit\")\n\n\t\/\/ ErrOversizedItem indicates that an item's size exceeds the maximum bundle size.\n\tErrOversizedItem = errors.New(\"item size exceeds bundle byte limit\")\n)\n\n\/\/ A Bundler collects items added to it into a bundle until the bundle\n\/\/ exceeds a given size, then calls a user-provided function to handle the bundle.\ntype Bundler struct {\n\t\/\/ Starting from the time that the first message is added to a bundle, once\n\t\/\/ this delay has passed, handle the bundle. The default is DefaultDelayThreshold.\n\tDelayThreshold time.Duration\n\n\t\/\/ Once a bundle has this many items, handle the bundle. Since only one\n\t\/\/ item at a time is added to a bundle, no bundle will exceed this\n\t\/\/ threshold, so it also serves as a limit. The default is\n\t\/\/ DefaultBundleCountThreshold.\n\tBundleCountThreshold int\n\n\t\/\/ Once the number of bytes in current bundle reaches this threshold, handle\n\t\/\/ the bundle. The default is DefaultBundleByteThreshold. This triggers handling,\n\t\/\/ but does not cap the total size of a bundle.\n\tBundleByteThreshold int\n\n\t\/\/ The maximum size of a bundle, in bytes. Zero means unlimited.\n\tBundleByteLimit int\n\n\t\/\/ The maximum number of bytes that the Bundler will keep in memory before\n\t\/\/ returning ErrOverflow. The default is DefaultBufferedByteLimit.\n\tBufferedByteLimit int\n\n\thandler func(interface{}) \/\/ called to handle a bundle\n\titemSliceZero reflect.Value \/\/ nil (zero value) for slice of items\n\tflushTimer *time.Timer \/\/ implements DelayThreshold\n\n\tmu sync.Mutex\n\tspaceAvailable chan struct{} \/\/ closed and replaced when space is available\n\tbufferedSize int \/\/ total bytes buffered\n\tcurBundle bundle \/\/ incoming items added to this bundle\n\thandlingc <-chan struct{} \/\/ set to non-nil while a handler is running; closed when it returns\n}\n\ntype bundle struct {\n\titems reflect.Value \/\/ slice of item type\n\tsize int \/\/ size in bytes of all items\n}\n\n\/\/ NewBundler creates a new Bundler.\n\/\/\n\/\/ itemExample is a value of the type that will be bundled. For example, if you\n\/\/ want to create bundles of *Entry, you could pass &Entry{} for itemExample.\n\/\/\n\/\/ handler is a function that will be called on each bundle. If itemExample is\n\/\/ of type T, the argument to handler is of type []T. handler is always called\n\/\/ sequentially for each bundle, and never in parallel.\nfunc NewBundler(itemExample interface{}, handler func(interface{})) *Bundler {\n\tb := &Bundler{\n\t\tDelayThreshold: DefaultDelayThreshold,\n\t\tBundleCountThreshold: DefaultBundleCountThreshold,\n\t\tBundleByteThreshold: DefaultBundleByteThreshold,\n\t\tBufferedByteLimit: DefaultBufferedByteLimit,\n\n\t\thandler: handler,\n\t\titemSliceZero: reflect.Zero(reflect.SliceOf(reflect.TypeOf(itemExample))),\n\t}\n\tb.curBundle.items = b.itemSliceZero\n\treturn b\n}\n\n\/\/ Add adds item to the current bundle. It marks the bundle for handling and\n\/\/ starts a new one if any of the thresholds or limits are exceeded.\n\/\/\n\/\/ If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then\n\/\/ the item can never be handled. Add returns ErrOversizedItem in this case.\n\/\/\n\/\/ If adding the item would exceed the maximum memory allowed (Bundler.BufferedByteLimit),\n\/\/ Add returns ErrOverflow.\n\/\/\n\/\/ Add never blocks.\nfunc (b *Bundler) Add(item interface{}, size int) error {\n\t\/\/ If this item exceeds the maximum size of a bundle,\n\t\/\/ we can never send it.\n\tif b.BundleByteLimit > 0 && size > b.BundleByteLimit {\n\t\treturn ErrOversizedItem\n\t}\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\t\/\/ If adding this item would exceed our allotted memory\n\t\/\/ footprint, we can't accept it.\n\tif b.bufferedSize+size > b.BufferedByteLimit {\n\t\treturn ErrOverflow\n\t}\n\tb.addLocked(item, size)\n\treturn nil\n}\n\n\/\/ addLocked adds item to the current bundle. It marks the bundle for handling and\n\/\/ starts a new one if any of the thresholds or limits are exceeded.\n\/\/\n\/\/ addLocked is called with the lock held.\nfunc (b *Bundler) addLocked(item interface{}, size int) {\n\t\/\/ If adding this item to the current bundle would cause it to exceed the\n\t\/\/ maximum bundle size, close the current bundle and start a new one.\n\tif b.BundleByteLimit > 0 && b.curBundle.size+size > b.BundleByteLimit {\n\t\tb.startFlushLocked()\n\t}\n\t\/\/ Add the item.\n\tb.curBundle.items = reflect.Append(b.curBundle.items, reflect.ValueOf(item))\n\tb.curBundle.size += size\n\tb.bufferedSize += size\n\n\t\/\/ Start a timer to flush the item if one isn't already running.\n\t\/\/ startFlushLocked clears the timer and closes the bundle at the same time,\n\t\/\/ so we only allocate a new timer for the first item in each bundle.\n\t\/\/ (We could try to call Reset on the timer instead, but that would add a lot\n\t\/\/ of complexity to the code just to save one small allocation.)\n\tif b.flushTimer == nil {\n\t\tb.flushTimer = time.AfterFunc(b.DelayThreshold, b.Flush)\n\t}\n\n\t\/\/ If the current bundle equals the count threshold, close it.\n\tif b.curBundle.items.Len() == b.BundleCountThreshold {\n\t\tb.startFlushLocked()\n\t}\n\t\/\/ If the current bundle equals or exceeds the byte threshold, close it.\n\tif b.curBundle.size >= b.BundleByteThreshold {\n\t\tb.startFlushLocked()\n\t}\n}\n\n\/\/ AddWait adds item to the current bundle. It marks the bundle for handling and\n\/\/ starts a new one if any of the thresholds or limits are exceeded.\n\/\/\n\/\/ If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then\n\/\/ the item can never be handled. AddWait returns ErrOversizedItem in this case.\n\/\/\n\/\/ If adding the item would exceed the maximum memory allowed (Bundler.BufferedByteLimit),\n\/\/ AddWait blocks until space is available or ctx is done.\nfunc (b *Bundler) AddWait(ctx context.Context, item interface{}, size int) error {\n\t\/\/ If this item exceeds the maximum size of a bundle,\n\t\/\/ we can never send it.\n\tif b.BundleByteLimit > 0 && size > b.BundleByteLimit {\n\t\treturn ErrOversizedItem\n\t}\n\tb.mu.Lock()\n\t\/\/ If adding this item would exceed our allotted memory\n\t\/\/ footprint, block until space is available.\n\t\/\/ TODO(jba): avoid starvation of large items.\n\tfor b.bufferedSize+size > b.BufferedByteLimit {\n\t\tif b.spaceAvailable == nil {\n\t\t\tb.spaceAvailable = make(chan struct{})\n\t\t}\n\t\tavail := b.spaceAvailable\n\t\tb.mu.Unlock()\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tcase <-avail:\n\t\t\tb.mu.Lock()\n\t\t}\n\t}\n\tb.addLocked(item, size)\n\tb.mu.Unlock()\n\treturn nil\n}\n\n\/\/ Flush invokes the handler for all remaining items in the Bundler and waits\n\/\/ for it to return.\nfunc (b *Bundler) Flush() {\n\tb.mu.Lock()\n\tb.startFlushLocked()\n\tdone := b.handlingc\n\tb.mu.Unlock()\n\n\tif done != nil {\n\t\t<-done\n\t}\n}\n\nfunc (b *Bundler) startFlushLocked() {\n\tif b.flushTimer != nil {\n\t\tb.flushTimer.Stop()\n\t\tb.flushTimer = nil\n\t}\n\n\tif b.curBundle.items.Len() == 0 {\n\t\treturn\n\t}\n\tbun := b.curBundle\n\tb.curBundle = bundle{items: b.itemSliceZero}\n\n\tdone := make(chan struct{})\n\tvar running <-chan struct{}\n\trunning, b.handlingc = b.handlingc, done\n\n\tgo func() {\n\t\tdefer func() {\n\t\t\tb.mu.Lock()\n\t\t\tb.bufferedSize -= bun.size\n\t\t\tavail := b.spaceAvailable\n\t\t\tb.spaceAvailable = nil\n\t\t\tb.mu.Unlock()\n\n\t\t\tif avail != nil {\n\t\t\t\tclose(avail)\n\t\t\t}\n\t\t\tclose(done)\n\t\t}()\n\n\t\tif running != nil {\n\t\t\t\/\/ Wait for our turn to call the handler.\n\t\t\t<-running\n\t\t}\n\n\t\tb.handler(bun.items.Interface())\n\t}()\n}\nbundler: use weighted semaphore to enforced byte limit\/\/ Copyright 2016 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package bundler supports bundling (batching) of items. Bundling amortizes an\n\/\/ action with fixed costs over multiple items. For example, if an API provides\n\/\/ an RPC that accepts a list of items as input, but clients would prefer\n\/\/ adding items one at a time, then a Bundler can accept individual items from\n\/\/ the client and bundle many of them into a single RPC.\n\/\/\n\/\/ This package is experimental and subject to change without notice.\npackage bundler\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/sync\/semaphore\"\n)\n\nconst (\n\tDefaultDelayThreshold = time.Second\n\tDefaultBundleCountThreshold = 10\n\tDefaultBundleByteThreshold = 1e6 \/\/ 1M\n\tDefaultBufferedByteLimit = 1e9 \/\/ 1G\n)\n\nvar (\n\t\/\/ ErrOverflow indicates that Bundler's stored bytes exceeds its BufferedByteLimit.\n\tErrOverflow = errors.New(\"bundler reached buffered byte limit\")\n\n\t\/\/ ErrOversizedItem indicates that an item's size exceeds the maximum bundle size.\n\tErrOversizedItem = errors.New(\"item size exceeds bundle byte limit\")\n)\n\n\/\/ A Bundler collects items added to it into a bundle until the bundle\n\/\/ exceeds a given size, then calls a user-provided function to handle the bundle.\ntype Bundler struct {\n\t\/\/ Starting from the time that the first message is added to a bundle, once\n\t\/\/ this delay has passed, handle the bundle. The default is DefaultDelayThreshold.\n\tDelayThreshold time.Duration\n\n\t\/\/ Once a bundle has this many items, handle the bundle. Since only one\n\t\/\/ item at a time is added to a bundle, no bundle will exceed this\n\t\/\/ threshold, so it also serves as a limit. The default is\n\t\/\/ DefaultBundleCountThreshold.\n\tBundleCountThreshold int\n\n\t\/\/ Once the number of bytes in current bundle reaches this threshold, handle\n\t\/\/ the bundle. The default is DefaultBundleByteThreshold. This triggers handling,\n\t\/\/ but does not cap the total size of a bundle.\n\tBundleByteThreshold int\n\n\t\/\/ The maximum size of a bundle, in bytes. Zero means unlimited.\n\tBundleByteLimit int\n\n\t\/\/ The maximum number of bytes that the Bundler will keep in memory before\n\t\/\/ returning ErrOverflow. The default is DefaultBufferedByteLimit.\n\tBufferedByteLimit int\n\n\thandler func(interface{}) \/\/ called to handle a bundle\n\titemSliceZero reflect.Value \/\/ nil (zero value) for slice of items\n\tflushTimer *time.Timer \/\/ implements DelayThreshold\n\n\tmu sync.Mutex\n\tsem *semaphore.Weighted \/\/ enforces BufferedByteLimit\n\tsemOnce sync.Once\n\tcurBundle bundle \/\/ incoming items added to this bundle\n\thandlingc <-chan struct{} \/\/ set to non-nil while a handler is running; closed when it returns\n}\n\ntype bundle struct {\n\titems reflect.Value \/\/ slice of item type\n\tsize int \/\/ size in bytes of all items\n}\n\n\/\/ NewBundler creates a new Bundler.\n\/\/\n\/\/ itemExample is a value of the type that will be bundled. For example, if you\n\/\/ want to create bundles of *Entry, you could pass &Entry{} for itemExample.\n\/\/\n\/\/ handler is a function that will be called on each bundle. If itemExample is\n\/\/ of type T, the argument to handler is of type []T. handler is always called\n\/\/ sequentially for each bundle, and never in parallel.\n\/\/\n\/\/ Configure the Bundler by setting its thresholds and limits before calling\n\/\/ any of its methods.\nfunc NewBundler(itemExample interface{}, handler func(interface{})) *Bundler {\n\tb := &Bundler{\n\t\tDelayThreshold: DefaultDelayThreshold,\n\t\tBundleCountThreshold: DefaultBundleCountThreshold,\n\t\tBundleByteThreshold: DefaultBundleByteThreshold,\n\t\tBufferedByteLimit: DefaultBufferedByteLimit,\n\n\t\thandler: handler,\n\t\titemSliceZero: reflect.Zero(reflect.SliceOf(reflect.TypeOf(itemExample))),\n\t}\n\tb.curBundle.items = b.itemSliceZero\n\treturn b\n}\n\nfunc (b *Bundler) sema() *semaphore.Weighted {\n\t\/\/ Create the semaphore lazily, because the user may set BufferedByteLimit\n\t\/\/ after NewBundler.\n\tb.semOnce.Do(func() {\n\t\tb.sem = semaphore.NewWeighted(int64(b.BufferedByteLimit))\n\t})\n\treturn b.sem\n}\n\n\/\/ Add adds item to the current bundle. It marks the bundle for handling and\n\/\/ starts a new one if any of the thresholds or limits are exceeded.\n\/\/\n\/\/ If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then\n\/\/ the item can never be handled. Add returns ErrOversizedItem in this case.\n\/\/\n\/\/ If adding the item would exceed the maximum memory allowed\n\/\/ (Bundler.BufferedByteLimit) or an AddWait call is blocked waiting for\n\/\/ memory, Add returns ErrOverflow.\n\/\/\n\/\/ Add never blocks.\nfunc (b *Bundler) Add(item interface{}, size int) error {\n\t\/\/ If this item exceeds the maximum size of a bundle,\n\t\/\/ we can never send it.\n\tif b.BundleByteLimit > 0 && size > b.BundleByteLimit {\n\t\treturn ErrOversizedItem\n\t}\n\t\/\/ If adding this item would exceed our allotted memory\n\t\/\/ footprint, we can't accept it.\n\t\/\/ (TryAcquire also returns false if anything is waiting on the semaphore,\n\t\/\/ so calls to Add and AddWait shouldn't be mixed.)\n\tif !b.sema().TryAcquire(int64(size)) {\n\t\treturn ErrOverflow\n\t}\n\tb.add(item, size)\n\treturn nil\n}\n\n\/\/ add adds item to the current bundle. It marks the bundle for handling and\n\/\/ starts a new one if any of the thresholds or limits are exceeded.\nfunc (b *Bundler) add(item interface{}, size int) {\n\tb.mu.Lock()\n\tdefer b.mu.Unlock()\n\n\t\/\/ If adding this item to the current bundle would cause it to exceed the\n\t\/\/ maximum bundle size, close the current bundle and start a new one.\n\tif b.BundleByteLimit > 0 && b.curBundle.size+size > b.BundleByteLimit {\n\t\tb.startFlushLocked()\n\t}\n\t\/\/ Add the item.\n\tb.curBundle.items = reflect.Append(b.curBundle.items, reflect.ValueOf(item))\n\tb.curBundle.size += size\n\n\t\/\/ Start a timer to flush the item if one isn't already running.\n\t\/\/ startFlushLocked clears the timer and closes the bundle at the same time,\n\t\/\/ so we only allocate a new timer for the first item in each bundle.\n\t\/\/ (We could try to call Reset on the timer instead, but that would add a lot\n\t\/\/ of complexity to the code just to save one small allocation.)\n\tif b.flushTimer == nil {\n\t\tb.flushTimer = time.AfterFunc(b.DelayThreshold, b.Flush)\n\t}\n\n\t\/\/ If the current bundle equals the count threshold, close it.\n\tif b.curBundle.items.Len() == b.BundleCountThreshold {\n\t\tb.startFlushLocked()\n\t}\n\t\/\/ If the current bundle equals or exceeds the byte threshold, close it.\n\tif b.curBundle.size >= b.BundleByteThreshold {\n\t\tb.startFlushLocked()\n\t}\n}\n\n\/\/ AddWait adds item to the current bundle. It marks the bundle for handling and\n\/\/ starts a new one if any of the thresholds or limits are exceeded.\n\/\/\n\/\/ If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then\n\/\/ the item can never be handled. AddWait returns ErrOversizedItem in this case.\n\/\/\n\/\/ If adding the item would exceed the maximum memory allowed (Bundler.BufferedByteLimit),\n\/\/ AddWait blocks until space is available or ctx is done.\n\/\/\n\/\/ Calls to Add and AddWait should not be mixed on the same Bundler.\nfunc (b *Bundler) AddWait(ctx context.Context, item interface{}, size int) error {\n\t\/\/ If this item exceeds the maximum size of a bundle,\n\t\/\/ we can never send it.\n\tif b.BundleByteLimit > 0 && size > b.BundleByteLimit {\n\t\treturn ErrOversizedItem\n\t}\n\t\/\/ If adding this item would exceed our allotted memory footprint, block\n\t\/\/ until space is available. The semaphore is FIFO, so there will be no\n\t\/\/ starvation.\n\tif err := b.sema().Acquire(ctx, int64(size)); err != nil {\n\t\treturn err\n\t}\n\t\/\/ Here, we've reserved space for item. Other goroutines can call AddWait\n\t\/\/ and even acquire space, but no one can take away our reservation\n\t\/\/ (assuming sem.Release is used correctly). So there is no race condition\n\t\/\/ resulting from locking the mutex after sem.Acquire returns.\n\tb.add(item, size)\n\treturn nil\n}\n\n\/\/ Flush invokes the handler for all remaining items in the Bundler and waits\n\/\/ for it to return.\nfunc (b *Bundler) Flush() {\n\tb.mu.Lock()\n\tb.startFlushLocked()\n\tdone := b.handlingc\n\tb.mu.Unlock()\n\n\tif done != nil {\n\t\t<-done\n\t}\n}\n\nfunc (b *Bundler) startFlushLocked() {\n\tif b.flushTimer != nil {\n\t\tb.flushTimer.Stop()\n\t\tb.flushTimer = nil\n\t}\n\n\tif b.curBundle.items.Len() == 0 {\n\t\treturn\n\t}\n\tbun := b.curBundle\n\tb.curBundle = bundle{items: b.itemSliceZero}\n\n\tdone := make(chan struct{})\n\tvar running <-chan struct{}\n\trunning, b.handlingc = b.handlingc, done\n\n\tgo func() {\n\t\tdefer func() {\n\t\t\tb.sem.Release(int64(bun.size))\n\t\t\tclose(done)\n\t\t}()\n\n\t\tif running != nil {\n\t\t\t\/\/ Wait for our turn to call the handler.\n\t\t\t<-running\n\t\t}\n\n\t\tb.handler(bun.items.Interface())\n\t}()\n}\n<|endoftext|>"} {"text":"package v3action\n\nimport (\n\t\"code.cloudfoundry.org\/cli\/actor\/actionerror\"\n\t\"code.cloudfoundry.org\/cli\/actor\/versioncheck\"\n\t\"code.cloudfoundry.org\/cli\/api\/cloudcontroller\/ccv3\"\n\t\"code.cloudfoundry.org\/cli\/api\/cloudcontroller\/ccversion\"\n)\n\n\/\/ Organization represents a V3 actor organization.\ntype Organization ccv3.Organization\n\n\/\/ GetOrganizationByName returns the organization with the given name.\nfunc (actor Actor) GetOrganizationByName(name string) (Organization, Warnings, error) {\n\torgs, warnings, err := actor.CloudControllerClient.GetOrganizations(\n\t\tccv3.Query{Key: ccv3.NameFilter, Values: []string{name}},\n\t)\n\tif err != nil {\n\t\treturn Organization{}, Warnings(warnings), err\n\t}\n\n\tif len(orgs) == 0 {\n\t\treturn Organization{}, Warnings(warnings), actionerror.OrganizationNotFoundError{Name: name}\n\t}\n\n\treturn Organization(orgs[0]), Warnings(warnings), nil\n}\n\nfunc (actor Actor) GetOrganizationsByGUIDs(guids ...string) ([]Organization, Warnings, error) {\n\tcurrentV3Ver := actor.CloudControllerClient.CloudControllerAPIVersion()\n\n\tguidsSupport, err := versioncheck.IsMinimumAPIVersionMet(currentV3Ver, ccversion.MinVersionSpacesGUIDsParamV3)\n\tif err != nil {\n\t\tguidsSupport = false\n\t}\n\n\tqueries := []ccv3.Query{}\n\tif guidsSupport {\n\t\tqueries = []ccv3.Query{ccv3.Query{Key: ccv3.GUIDFilter, Values: guids}}\n\t}\n\n\torgs, warnings, err := actor.CloudControllerClient.GetOrganizations(queries...)\n\tif err != nil {\n\t\treturn []Organization{}, Warnings(warnings), err\n\t}\n\n\tguidToOrg := make(map[string]ccv3.Organization)\n\tfor _, org := range orgs {\n\t\tguidToOrg[org.GUID] = org\n\t}\n\n\tfilteredOrgs := make([]ccv3.Organization, 0)\n\tfor _, guid := range guids {\n\t\tfilteredOrgs = append(filteredOrgs, guidToOrg[guid])\n\t}\n\torgs = filteredOrgs\n\n\treturn convertCCToActorOrganizations(orgs), Warnings(warnings), nil\n}\n\nfunc (actor Actor) GetOrganizations() ([]Organization, Warnings, error) {\n\torgs, warnings, err := actor.CloudControllerClient.GetOrganizations()\n\tif err != nil {\n\t\treturn []Organization{}, Warnings(warnings), err\n\t}\n\treturn convertCCToActorOrganizations(orgs), Warnings(warnings), nil\n}\n\nfunc convertCCToActorOrganizations(v3orgs []ccv3.Organization) []Organization {\n\torgs := make([]Organization, len(v3orgs))\n\tfor i := range v3orgs {\n\t\torgs[i] = Organization(v3orgs[i])\n\t}\n\treturn orgs\n}\nSort organizations when fetchingpackage v3action\n\nimport (\n\t\"code.cloudfoundry.org\/cli\/actor\/actionerror\"\n\t\"code.cloudfoundry.org\/cli\/actor\/versioncheck\"\n\t\"code.cloudfoundry.org\/cli\/api\/cloudcontroller\/ccv3\"\n\t\"code.cloudfoundry.org\/cli\/api\/cloudcontroller\/ccversion\"\n)\n\n\/\/ Organization represents a V3 actor organization.\ntype Organization ccv3.Organization\n\n\/\/ GetOrganizationByName returns the organization with the given name.\nfunc (actor Actor) GetOrganizationByName(name string) (Organization, Warnings, error) {\n\torgs, warnings, err := actor.CloudControllerClient.GetOrganizations(\n\t\tccv3.Query{Key: ccv3.NameFilter, Values: []string{name}},\n\t)\n\tif err != nil {\n\t\treturn Organization{}, Warnings(warnings), err\n\t}\n\n\tif len(orgs) == 0 {\n\t\treturn Organization{}, Warnings(warnings), actionerror.OrganizationNotFoundError{Name: name}\n\t}\n\n\treturn Organization(orgs[0]), Warnings(warnings), nil\n}\n\nfunc (actor Actor) GetOrganizationsByGUIDs(guids ...string) ([]Organization, Warnings, error) {\n\tcurrentV3Ver := actor.CloudControllerClient.CloudControllerAPIVersion()\n\n\tguidsSupport, err := versioncheck.IsMinimumAPIVersionMet(currentV3Ver, ccversion.MinVersionSpacesGUIDsParamV3)\n\tif err != nil {\n\t\tguidsSupport = false\n\t}\n\n\tqueries := []ccv3.Query{}\n\tif guidsSupport {\n\t\tqueries = []ccv3.Query{ccv3.Query{Key: ccv3.GUIDFilter, Values: guids}}\n\t}\n\n\torgs, warnings, err := actor.CloudControllerClient.GetOrganizations(queries...)\n\tif err != nil {\n\t\treturn []Organization{}, Warnings(warnings), err\n\t}\n\n\tguidToOrg := make(map[string]ccv3.Organization)\n\tfor _, org := range orgs {\n\t\tguidToOrg[org.GUID] = org\n\t}\n\n\tfilteredOrgs := make([]ccv3.Organization, 0)\n\tfor _, guid := range guids {\n\t\tfilteredOrgs = append(filteredOrgs, guidToOrg[guid])\n\t}\n\torgs = filteredOrgs\n\n\treturn convertCCToActorOrganizations(orgs), Warnings(warnings), nil\n}\n\nfunc (actor Actor) GetOrganizations() ([]Organization, Warnings, error) {\n\torderBy := ccv3.Query{\n\t\tKey: \"order_by\",\n\t\tValues: []string{\"name\"},\n\t}\n\torgs, warnings, err := actor.CloudControllerClient.GetOrganizations(orderBy)\n\tif err != nil {\n\t\treturn []Organization{}, Warnings(warnings), err\n\t}\n\treturn convertCCToActorOrganizations(orgs), Warnings(warnings), nil\n}\n\nfunc convertCCToActorOrganizations(v3orgs []ccv3.Organization) []Organization {\n\torgs := make([]Organization, len(v3orgs))\n\tfor i := range v3orgs {\n\t\torgs[i] = Organization(v3orgs[i])\n\t}\n\treturn orgs\n}\n<|endoftext|>"} {"text":"package restic\n\nimport (\n\t\"os\"\n\t\"syscall\"\n\t\"time\"\n)\n\nfunc (node *Node) OpenForReading() (*os.File, error) {\n\tfile, err := os.OpenFile(node.path, os.O_RDONLY, 0)\n\tif os.IsPermission(err) {\n\t\treturn os.OpenFile(node.path, os.O_RDONLY, 0)\n\t}\n\treturn file, err\n}\n\nfunc (node *Node) fillTimes(stat *syscall.Stat_t) {\n\tnode.ChangeTime = time.Unix(stat.Ctimespec.Unix())\n\tnode.AccessTime = time.Unix(stat.Atimespec.Unix())\n}\n\nfunc changeTime(stat *syscall.Stat_t) time.Time {\n\treturn time.Unix(stat.Ctimespec.Unix())\n}\nCall open file once on FreeBSD.package restic\n\nimport (\n\t\"os\"\n\t\"syscall\"\n\t\"time\"\n)\n\nfunc (node *Node) OpenForReading() (*os.File, error) {\n\treturn os.OpenFile(node.path, os.O_RDONLY, 0)\n}\n\nfunc (node *Node) fillTimes(stat *syscall.Stat_t) {\n\tnode.ChangeTime = time.Unix(stat.Ctimespec.Unix())\n\tnode.AccessTime = time.Unix(stat.Atimespec.Unix())\n}\n\nfunc changeTime(stat *syscall.Stat_t) time.Time {\n\treturn time.Unix(stat.Ctimespec.Unix())\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2016 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage kubernetes\n\nimport (\n\t\"context\"\n\n\t\"github.com\/go-kit\/kit\/log\"\n\t\"github.com\/go-kit\/kit\/log\/level\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/prometheus\/common\/model\"\n\t\"k8s.io\/api\/networking\/v1beta1\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\t\"k8s.io\/client-go\/util\/workqueue\"\n\n\t\"github.com\/prometheus\/prometheus\/discovery\/targetgroup\"\n\t\"github.com\/prometheus\/prometheus\/util\/strutil\"\n)\n\nvar (\n\tingressAddCount = eventCount.WithLabelValues(\"ingress\", \"add\")\n\tingressUpdateCount = eventCount.WithLabelValues(\"ingress\", \"update\")\n\tingressDeleteCount = eventCount.WithLabelValues(\"ingress\", \"delete\")\n)\n\n\/\/ Ingress implements discovery of Kubernetes ingress.\ntype Ingress struct {\n\tlogger log.Logger\n\tinformer cache.SharedInformer\n\tstore cache.Store\n\tqueue *workqueue.Type\n}\n\n\/\/ NewIngress returns a new ingress discovery.\nfunc NewIngress(l log.Logger, inf cache.SharedInformer) *Ingress {\n\ts := &Ingress{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed(\"ingress\")}\n\ts.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(o interface{}) {\n\t\t\tingressAddCount.Inc()\n\t\t\ts.enqueue(o)\n\t\t},\n\t\tDeleteFunc: func(o interface{}) {\n\t\t\tingressDeleteCount.Inc()\n\t\t\ts.enqueue(o)\n\t\t},\n\t\tUpdateFunc: func(_, o interface{}) {\n\t\t\tingressUpdateCount.Inc()\n\t\t\ts.enqueue(o)\n\t\t},\n\t})\n\treturn s\n}\n\nfunc (i *Ingress) enqueue(obj interface{}) {\n\tkey, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ti.queue.Add(key)\n}\n\n\/\/ Run implements the Discoverer interface.\nfunc (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {\n\tdefer i.queue.ShutDown()\n\n\tif !cache.WaitForCacheSync(ctx.Done(), i.informer.HasSynced) {\n\t\tif ctx.Err() != context.Canceled {\n\t\t\tlevel.Error(i.logger).Log(\"msg\", \"ingress informer unable to sync cache\")\n\t\t}\n\t\treturn\n\t}\n\n\tgo func() {\n\t\tfor i.process(ctx, ch) {\n\t\t}\n\t}()\n\n\t\/\/ Block until the target provider is explicitly canceled.\n\t<-ctx.Done()\n}\n\nfunc (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool {\n\tkeyObj, quit := i.queue.Get()\n\tif quit {\n\t\treturn false\n\t}\n\tdefer i.queue.Done(keyObj)\n\tkey := keyObj.(string)\n\n\tnamespace, name, err := cache.SplitMetaNamespaceKey(key)\n\tif err != nil {\n\t\treturn true\n\t}\n\n\to, exists, err := i.store.GetByKey(key)\n\tif err != nil {\n\t\treturn true\n\t}\n\tif !exists {\n\t\tsend(ctx, ch, &targetgroup.Group{Source: ingressSourceFromNamespaceAndName(namespace, name)})\n\t\treturn true\n\t}\n\teps, err := convertToIngress(o)\n\tif err != nil {\n\t\tlevel.Error(i.logger).Log(\"msg\", \"converting to Ingress object failed\", \"err\", err)\n\t\treturn true\n\t}\n\tsend(ctx, ch, i.buildIngress(eps))\n\treturn true\n}\n\nfunc convertToIngress(o interface{}) (*v1beta1.Ingress, error) {\n\tingress, ok := o.(*v1beta1.Ingress)\n\tif ok {\n\t\treturn ingress, nil\n\t}\n\n\treturn nil, errors.Errorf(\"received unexpected object: %v\", o)\n}\n\nfunc ingressSource(s *v1beta1.Ingress) string {\n\treturn ingressSourceFromNamespaceAndName(s.Namespace, s.Name)\n}\n\nfunc ingressSourceFromNamespaceAndName(namespace, name string) string {\n\treturn \"ingress\/\" + namespace + \"\/\" + name\n}\n\nconst (\n\tingressNameLabel = metaLabelPrefix + \"ingress_name\"\n\tingressLabelPrefix = metaLabelPrefix + \"ingress_label_\"\n\tingressLabelPresentPrefix = metaLabelPrefix + \"ingress_labelpresent_\"\n\tingressAnnotationPrefix = metaLabelPrefix + \"ingress_annotation_\"\n\tingressAnnotationPresentPrefix = metaLabelPrefix + \"ingress_annotationpresent_\"\n\tingressSchemeLabel = metaLabelPrefix + \"ingress_scheme\"\n\tingressHostLabel = metaLabelPrefix + \"ingress_host\"\n\tingressPathLabel = metaLabelPrefix + \"ingress_path\"\n\tingressClassNameLabel = metaLabelPrefix + \"ingress_class_name\"\n)\n\nfunc ingressLabels(ingress *v1beta1.Ingress) model.LabelSet {\n\t\/\/ Each label and annotation will create two key-value pairs in the map.\n\tls := make(model.LabelSet, 2*(len(ingress.Labels)+len(ingress.Annotations))+2)\n\tls[ingressNameLabel] = lv(ingress.Name)\n\tls[namespaceLabel] = lv(ingress.Namespace)\n\tif ingress.Spec.IngressClassName == nil {\n\t\tls[ingressClassNameLabel] = lv(\"\")\n\t} else {\n\t\tls[ingressClassNameLabel] = lv(*ingress.Spec.IngressClassName)\n\t}\n\n\tfor k, v := range ingress.Labels {\n\t\tln := strutil.SanitizeLabelName(k)\n\t\tls[model.LabelName(ingressLabelPrefix+ln)] = lv(v)\n\t\tls[model.LabelName(ingressLabelPresentPrefix+ln)] = presentValue\n\t}\n\n\tfor k, v := range ingress.Annotations {\n\t\tln := strutil.SanitizeLabelName(k)\n\t\tls[model.LabelName(ingressAnnotationPrefix+ln)] = lv(v)\n\t\tls[model.LabelName(ingressAnnotationPresentPrefix+ln)] = presentValue\n\t}\n\treturn ls\n}\n\nfunc pathsFromIngressRule(rv *v1beta1.IngressRuleValue) []string {\n\tif rv.HTTP == nil {\n\t\treturn []string{\"\/\"}\n\t}\n\tpaths := make([]string, len(rv.HTTP.Paths))\n\tfor n, p := range rv.HTTP.Paths {\n\t\tpath := p.Path\n\t\tif path == \"\" {\n\t\t\tpath = \"\/\"\n\t\t}\n\t\tpaths[n] = path\n\t}\n\treturn paths\n}\n\nfunc (i *Ingress) buildIngress(ingress *v1beta1.Ingress) *targetgroup.Group {\n\ttg := &targetgroup.Group{\n\t\tSource: ingressSource(ingress),\n\t}\n\ttg.Labels = ingressLabels(ingress)\n\n\ttlsHosts := make(map[string]struct{})\n\tfor _, tls := range ingress.Spec.TLS {\n\t\tfor _, host := range tls.Hosts {\n\t\t\ttlsHosts[host] = struct{}{}\n\t\t}\n\t}\n\n\tfor _, rule := range ingress.Spec.Rules {\n\t\tpaths := pathsFromIngressRule(&rule.IngressRuleValue)\n\n\t\tscheme := \"http\"\n\t\t_, isTLS := tlsHosts[rule.Host]\n\t\tif isTLS {\n\t\t\tscheme = \"https\"\n\t\t}\n\n\t\tfor _, path := range paths {\n\t\t\ttg.Targets = append(tg.Targets, model.LabelSet{\n\t\t\t\tmodel.AddressLabel: lv(rule.Host),\n\t\t\t\tingressSchemeLabel: lv(scheme),\n\t\t\t\tingressHostLabel: lv(rule.Host),\n\t\t\t\tingressPathLabel: lv(path),\n\t\t\t})\n\t\t}\n\t}\n\n\treturn tg\n}\nDon't set label if ingressClassName is not set\/\/ Copyright 2016 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage kubernetes\n\nimport (\n\t\"context\"\n\n\t\"github.com\/go-kit\/kit\/log\"\n\t\"github.com\/go-kit\/kit\/log\/level\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/prometheus\/common\/model\"\n\t\"k8s.io\/api\/networking\/v1beta1\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\t\"k8s.io\/client-go\/util\/workqueue\"\n\n\t\"github.com\/prometheus\/prometheus\/discovery\/targetgroup\"\n\t\"github.com\/prometheus\/prometheus\/util\/strutil\"\n)\n\nvar (\n\tingressAddCount = eventCount.WithLabelValues(\"ingress\", \"add\")\n\tingressUpdateCount = eventCount.WithLabelValues(\"ingress\", \"update\")\n\tingressDeleteCount = eventCount.WithLabelValues(\"ingress\", \"delete\")\n)\n\n\/\/ Ingress implements discovery of Kubernetes ingress.\ntype Ingress struct {\n\tlogger log.Logger\n\tinformer cache.SharedInformer\n\tstore cache.Store\n\tqueue *workqueue.Type\n}\n\n\/\/ NewIngress returns a new ingress discovery.\nfunc NewIngress(l log.Logger, inf cache.SharedInformer) *Ingress {\n\ts := &Ingress{logger: l, informer: inf, store: inf.GetStore(), queue: workqueue.NewNamed(\"ingress\")}\n\ts.informer.AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(o interface{}) {\n\t\t\tingressAddCount.Inc()\n\t\t\ts.enqueue(o)\n\t\t},\n\t\tDeleteFunc: func(o interface{}) {\n\t\t\tingressDeleteCount.Inc()\n\t\t\ts.enqueue(o)\n\t\t},\n\t\tUpdateFunc: func(_, o interface{}) {\n\t\t\tingressUpdateCount.Inc()\n\t\t\ts.enqueue(o)\n\t\t},\n\t})\n\treturn s\n}\n\nfunc (i *Ingress) enqueue(obj interface{}) {\n\tkey, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)\n\tif err != nil {\n\t\treturn\n\t}\n\n\ti.queue.Add(key)\n}\n\n\/\/ Run implements the Discoverer interface.\nfunc (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) {\n\tdefer i.queue.ShutDown()\n\n\tif !cache.WaitForCacheSync(ctx.Done(), i.informer.HasSynced) {\n\t\tif ctx.Err() != context.Canceled {\n\t\t\tlevel.Error(i.logger).Log(\"msg\", \"ingress informer unable to sync cache\")\n\t\t}\n\t\treturn\n\t}\n\n\tgo func() {\n\t\tfor i.process(ctx, ch) {\n\t\t}\n\t}()\n\n\t\/\/ Block until the target provider is explicitly canceled.\n\t<-ctx.Done()\n}\n\nfunc (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool {\n\tkeyObj, quit := i.queue.Get()\n\tif quit {\n\t\treturn false\n\t}\n\tdefer i.queue.Done(keyObj)\n\tkey := keyObj.(string)\n\n\tnamespace, name, err := cache.SplitMetaNamespaceKey(key)\n\tif err != nil {\n\t\treturn true\n\t}\n\n\to, exists, err := i.store.GetByKey(key)\n\tif err != nil {\n\t\treturn true\n\t}\n\tif !exists {\n\t\tsend(ctx, ch, &targetgroup.Group{Source: ingressSourceFromNamespaceAndName(namespace, name)})\n\t\treturn true\n\t}\n\teps, err := convertToIngress(o)\n\tif err != nil {\n\t\tlevel.Error(i.logger).Log(\"msg\", \"converting to Ingress object failed\", \"err\", err)\n\t\treturn true\n\t}\n\tsend(ctx, ch, i.buildIngress(eps))\n\treturn true\n}\n\nfunc convertToIngress(o interface{}) (*v1beta1.Ingress, error) {\n\tingress, ok := o.(*v1beta1.Ingress)\n\tif ok {\n\t\treturn ingress, nil\n\t}\n\n\treturn nil, errors.Errorf(\"received unexpected object: %v\", o)\n}\n\nfunc ingressSource(s *v1beta1.Ingress) string {\n\treturn ingressSourceFromNamespaceAndName(s.Namespace, s.Name)\n}\n\nfunc ingressSourceFromNamespaceAndName(namespace, name string) string {\n\treturn \"ingress\/\" + namespace + \"\/\" + name\n}\n\nconst (\n\tingressNameLabel = metaLabelPrefix + \"ingress_name\"\n\tingressLabelPrefix = metaLabelPrefix + \"ingress_label_\"\n\tingressLabelPresentPrefix = metaLabelPrefix + \"ingress_labelpresent_\"\n\tingressAnnotationPrefix = metaLabelPrefix + \"ingress_annotation_\"\n\tingressAnnotationPresentPrefix = metaLabelPrefix + \"ingress_annotationpresent_\"\n\tingressSchemeLabel = metaLabelPrefix + \"ingress_scheme\"\n\tingressHostLabel = metaLabelPrefix + \"ingress_host\"\n\tingressPathLabel = metaLabelPrefix + \"ingress_path\"\n\tingressClassNameLabel = metaLabelPrefix + \"ingress_class_name\"\n)\n\nfunc ingressLabels(ingress *v1beta1.Ingress) model.LabelSet {\n\t\/\/ Each label and annotation will create two key-value pairs in the map.\n\tls := make(model.LabelSet, 2*(len(ingress.Labels)+len(ingress.Annotations))+2)\n\tls[ingressNameLabel] = lv(ingress.Name)\n\tls[namespaceLabel] = lv(ingress.Namespace)\n\tif ingress.Spec.IngressClassName != nil {\n\t\tls[ingressClassNameLabel] = lv(*ingress.Spec.IngressClassName)\n\t}\n\n\tfor k, v := range ingress.Labels {\n\t\tln := strutil.SanitizeLabelName(k)\n\t\tls[model.LabelName(ingressLabelPrefix+ln)] = lv(v)\n\t\tls[model.LabelName(ingressLabelPresentPrefix+ln)] = presentValue\n\t}\n\n\tfor k, v := range ingress.Annotations {\n\t\tln := strutil.SanitizeLabelName(k)\n\t\tls[model.LabelName(ingressAnnotationPrefix+ln)] = lv(v)\n\t\tls[model.LabelName(ingressAnnotationPresentPrefix+ln)] = presentValue\n\t}\n\treturn ls\n}\n\nfunc pathsFromIngressRule(rv *v1beta1.IngressRuleValue) []string {\n\tif rv.HTTP == nil {\n\t\treturn []string{\"\/\"}\n\t}\n\tpaths := make([]string, len(rv.HTTP.Paths))\n\tfor n, p := range rv.HTTP.Paths {\n\t\tpath := p.Path\n\t\tif path == \"\" {\n\t\t\tpath = \"\/\"\n\t\t}\n\t\tpaths[n] = path\n\t}\n\treturn paths\n}\n\nfunc (i *Ingress) buildIngress(ingress *v1beta1.Ingress) *targetgroup.Group {\n\ttg := &targetgroup.Group{\n\t\tSource: ingressSource(ingress),\n\t}\n\ttg.Labels = ingressLabels(ingress)\n\n\ttlsHosts := make(map[string]struct{})\n\tfor _, tls := range ingress.Spec.TLS {\n\t\tfor _, host := range tls.Hosts {\n\t\t\ttlsHosts[host] = struct{}{}\n\t\t}\n\t}\n\n\tfor _, rule := range ingress.Spec.Rules {\n\t\tpaths := pathsFromIngressRule(&rule.IngressRuleValue)\n\n\t\tscheme := \"http\"\n\t\t_, isTLS := tlsHosts[rule.Host]\n\t\tif isTLS {\n\t\t\tscheme = \"https\"\n\t\t}\n\n\t\tfor _, path := range paths {\n\t\t\ttg.Targets = append(tg.Targets, model.LabelSet{\n\t\t\t\tmodel.AddressLabel: lv(rule.Host),\n\t\t\t\tingressSchemeLabel: lv(scheme),\n\t\t\t\tingressHostLabel: lv(rule.Host),\n\t\t\t\tingressPathLabel: lv(path),\n\t\t\t})\n\t\t}\n\t}\n\n\treturn tg\n}\n<|endoftext|>"} {"text":"\/\/ Copyright Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage controller\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\tv1 \"k8s.io\/api\/core\/v1\"\n\tcoreinformers \"k8s.io\/client-go\/informers\/core\/v1\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\n\t\"istio.io\/istio\/pilot\/pkg\/model\"\n\t\"istio.io\/istio\/pilot\/pkg\/serviceregistry\/kube\"\n\t\"istio.io\/istio\/pilot\/pkg\/util\/sets\"\n)\n\n\/\/ PodCache is an eventually consistent pod cache\ntype PodCache struct {\n\tinformer cache.SharedIndexInformer\n\n\tsync.RWMutex\n\t\/\/ podsByIP maintains stable pod IP to name key mapping\n\t\/\/ this allows us to retrieve the latest status by pod IP.\n\t\/\/ This should only contain RUNNING or PENDING pods with an allocated IP.\n\tpodsByIP map[string]string\n\t\/\/ IPByPods is a reverse map of podsByIP. This exists to allow us to prune stale entries in the\n\t\/\/ pod cache if a pod changes IP.\n\tIPByPods map[string]string\n\n\t\/\/ needResync is map of IP to endpoint names. This is used to requeue endpoint\n\t\/\/ events when pod event comes. This typically happens when pod is not available\n\t\/\/ in podCache when endpoint event comes.\n\tneedResync map[string]sets.Set\n\tqueueEndpointEvent func(string)\n\n\tc *Controller\n}\n\nfunc newPodCache(c *Controller, informer coreinformers.PodInformer, queueEndpointEvent func(string)) *PodCache {\n\tout := &PodCache{\n\t\tinformer: informer.Informer(),\n\t\tc: c,\n\t\tpodsByIP: make(map[string]string),\n\t\tIPByPods: make(map[string]string),\n\t\tneedResync: make(map[string]sets.Set),\n\t\tqueueEndpointEvent: queueEndpointEvent,\n\t}\n\n\treturn out\n}\n\n\/\/ onEvent updates the IP-based index (pc.podsByIP).\nfunc (pc *PodCache) onEvent(curr interface{}, ev model.Event) error {\n\tpc.Lock()\n\tdefer pc.Unlock()\n\n\t\/\/ When a pod is deleted obj could be an *v1.Pod or a DeletionFinalStateUnknown marker item.\n\tpod, ok := curr.(*v1.Pod)\n\tif !ok {\n\t\ttombstone, ok := curr.(cache.DeletedFinalStateUnknown)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"couldn't get object from tombstone %+v\", curr)\n\t\t}\n\t\tpod, ok = tombstone.Obj.(*v1.Pod)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"tombstone contained object that is not a pod %#v\", curr)\n\t\t}\n\t}\n\n\tip := pod.Status.PodIP\n\n\t\/\/ PodIP will be empty when pod is just created, but before the IP is assigned\n\t\/\/ via UpdateStatus.\n\tif len(ip) > 0 {\n\t\tkey := kube.KeyFunc(pod.Name, pod.Namespace)\n\t\tswitch ev {\n\t\tcase model.EventAdd:\n\t\t\tswitch pod.Status.Phase {\n\t\t\tcase v1.PodPending, v1.PodRunning:\n\t\t\t\tif key != pc.podsByIP[ip] {\n\t\t\t\t\t\/\/ add to cache if the pod is running or pending\n\t\t\t\t\tpc.update(ip, key)\n\t\t\t\t}\n\t\t\t}\n\t\tcase model.EventUpdate:\n\t\t\tif pod.DeletionTimestamp != nil {\n\t\t\t\t\/\/ delete only if this pod was in the cache\n\t\t\t\tif pc.podsByIP[ip] == key {\n\t\t\t\t\tpc.deleteIP(ip)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tswitch pod.Status.Phase {\n\t\t\t\tcase v1.PodPending, v1.PodRunning:\n\t\t\t\t\tif key != pc.podsByIP[ip] {\n\t\t\t\t\t\t\/\/ add to cache if the pod is running or pending\n\t\t\t\t\t\tpc.update(ip, key)\n\t\t\t\t\t}\n\n\t\t\t\tdefault:\n\t\t\t\t\t\/\/ delete if the pod switched to other states and is in the cache\n\t\t\t\t\tif pc.podsByIP[ip] == key {\n\t\t\t\t\t\tpc.deleteIP(ip)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase model.EventDelete:\n\t\t\t\/\/ delete only if this pod was in the cache\n\t\t\tif pc.podsByIP[ip] == key {\n\t\t\t\tpc.deleteIP(ip)\n\t\t\t}\n\t\t}\n\t\t\/\/ fire instance handles for workload\n\t\tfor _, handler := range pc.c.workloadHandlers {\n\t\t\tep := NewEndpointBuilder(pc.c, pod).buildIstioEndpoint(ip, 0, \"\")\n\t\t\thandler(&model.WorkloadInstance{\n\t\t\t\tNamespace: pod.Namespace,\n\t\t\t\tEndpoint: ep,\n\t\t\t\tPortMap: getPortMap(pod),\n\t\t\t}, ev)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc getPortMap(pod *v1.Pod) map[string]uint32 {\n\tpmap := map[string]uint32{}\n\tfor _, c := range pod.Spec.Containers {\n\t\tfor _, port := range c.Ports {\n\t\t\tif port.Name == \"\" || port.Protocol != v1.ProtocolTCP {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ First port wins, per Kubernetes (https:\/\/github.com\/kubernetes\/kubernetes\/issues\/54213)\n\t\t\tif _, f := pmap[port.Name]; !f {\n\t\t\t\tpmap[port.Name] = uint32(port.ContainerPort)\n\t\t\t}\n\t\t}\n\t}\n\treturn pmap\n}\n\nfunc (pc *PodCache) deleteIP(ip string) {\n\tpod := pc.podsByIP[ip]\n\tdelete(pc.podsByIP, ip)\n\tdelete(pc.IPByPods, pod)\n}\n\nfunc (pc *PodCache) update(ip, key string) {\n\tif current, f := pc.IPByPods[key]; f {\n\t\t\/\/ The pod already exists, but with another IP Address. We need to clean up that\n\t\tdelete(pc.podsByIP, current)\n\t}\n\tpc.podsByIP[ip] = key\n\tpc.IPByPods[key] = ip\n\n\tif endpointsToUpdate, f := pc.needResync[ip]; f {\n\t\tdelete(pc.needResync, ip)\n\t\tfor ep := range endpointsToUpdate {\n\t\t\tpc.queueEndpointEvent(ep)\n\t\t}\n\t}\n\n\tpc.proxyUpdates(ip)\n}\n\n\/\/ queueEndpointEventOnPodArrival registers this endpoint and queues endpoint event\n\/\/ when the corresponding pod arrives.\nfunc (pc *PodCache) queueEndpointEventOnPodArrival(key, ip string) {\n\tpc.Lock()\n\tdefer pc.Unlock()\n\tif _, f := pc.needResync[ip]; !f {\n\t\tpc.needResync[ip] = sets.NewSet(key)\n\t} else {\n\t\tpc.needResync[ip].Insert(key)\n\t}\n\tendpointsPendingPodUpdate.Record(float64(len(pc.needResync)))\n}\n\n\/\/ endpointDeleted cleans up endpoint from resync endpoint list.\nfunc (pc *PodCache) endpointDeleted(key string, ip string) {\n\tpc.Lock()\n\tdefer pc.Unlock()\n\tdelete(pc.needResync[ip], key)\n\tif len(pc.needResync[ip]) == 0 {\n\t\tdelete(pc.needResync, ip)\n\t}\n\tendpointsPendingPodUpdate.Record(float64(len(pc.needResync)))\n}\n\nfunc (pc *PodCache) proxyUpdates(ip string) {\n\tif pc.c != nil && pc.c.xdsUpdater != nil {\n\t\tpc.c.xdsUpdater.ProxyUpdate(pc.c.clusterID, ip)\n\t}\n}\n\n\/\/ nolint: unparam\nfunc (pc *PodCache) getPodKey(addr string) (string, bool) {\n\tpc.RLock()\n\tdefer pc.RUnlock()\n\tkey, exists := pc.podsByIP[addr]\n\treturn key, exists\n}\n\n\/\/ getPodByIp returns the pod or nil if pod not found or an error occurred\nfunc (pc *PodCache) getPodByIP(addr string) *v1.Pod {\n\tkey, exists := pc.getPodKey(addr)\n\tif !exists {\n\t\treturn nil\n\t}\n\titem, exists, err := pc.informer.GetStore().GetByKey(key)\n\tif !exists || err != nil {\n\t\treturn nil\n\t}\n\treturn item.(*v1.Pod)\n}\nFix inaccurate endpointsPendingPodUpdate metric (#25899)\/\/ Copyright Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage controller\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\tv1 \"k8s.io\/api\/core\/v1\"\n\tcoreinformers \"k8s.io\/client-go\/informers\/core\/v1\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\n\t\"istio.io\/istio\/pilot\/pkg\/model\"\n\t\"istio.io\/istio\/pilot\/pkg\/serviceregistry\/kube\"\n\t\"istio.io\/istio\/pilot\/pkg\/util\/sets\"\n)\n\n\/\/ PodCache is an eventually consistent pod cache\ntype PodCache struct {\n\tinformer cache.SharedIndexInformer\n\n\tsync.RWMutex\n\t\/\/ podsByIP maintains stable pod IP to name key mapping\n\t\/\/ this allows us to retrieve the latest status by pod IP.\n\t\/\/ This should only contain RUNNING or PENDING pods with an allocated IP.\n\tpodsByIP map[string]string\n\t\/\/ IPByPods is a reverse map of podsByIP. This exists to allow us to prune stale entries in the\n\t\/\/ pod cache if a pod changes IP.\n\tIPByPods map[string]string\n\n\t\/\/ needResync is map of IP to endpoint names. This is used to requeue endpoint\n\t\/\/ events when pod event comes. This typically happens when pod is not available\n\t\/\/ in podCache when endpoint event comes.\n\tneedResync map[string]sets.Set\n\tqueueEndpointEvent func(string)\n\n\tc *Controller\n}\n\nfunc newPodCache(c *Controller, informer coreinformers.PodInformer, queueEndpointEvent func(string)) *PodCache {\n\tout := &PodCache{\n\t\tinformer: informer.Informer(),\n\t\tc: c,\n\t\tpodsByIP: make(map[string]string),\n\t\tIPByPods: make(map[string]string),\n\t\tneedResync: make(map[string]sets.Set),\n\t\tqueueEndpointEvent: queueEndpointEvent,\n\t}\n\n\treturn out\n}\n\n\/\/ onEvent updates the IP-based index (pc.podsByIP).\nfunc (pc *PodCache) onEvent(curr interface{}, ev model.Event) error {\n\tpc.Lock()\n\tdefer pc.Unlock()\n\n\t\/\/ When a pod is deleted obj could be an *v1.Pod or a DeletionFinalStateUnknown marker item.\n\tpod, ok := curr.(*v1.Pod)\n\tif !ok {\n\t\ttombstone, ok := curr.(cache.DeletedFinalStateUnknown)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"couldn't get object from tombstone %+v\", curr)\n\t\t}\n\t\tpod, ok = tombstone.Obj.(*v1.Pod)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"tombstone contained object that is not a pod %#v\", curr)\n\t\t}\n\t}\n\n\tip := pod.Status.PodIP\n\n\t\/\/ PodIP will be empty when pod is just created, but before the IP is assigned\n\t\/\/ via UpdateStatus.\n\tif len(ip) > 0 {\n\t\tkey := kube.KeyFunc(pod.Name, pod.Namespace)\n\t\tswitch ev {\n\t\tcase model.EventAdd:\n\t\t\tswitch pod.Status.Phase {\n\t\t\tcase v1.PodPending, v1.PodRunning:\n\t\t\t\tif key != pc.podsByIP[ip] {\n\t\t\t\t\t\/\/ add to cache if the pod is running or pending\n\t\t\t\t\tpc.update(ip, key)\n\t\t\t\t}\n\t\t\t}\n\t\tcase model.EventUpdate:\n\t\t\tif pod.DeletionTimestamp != nil {\n\t\t\t\t\/\/ delete only if this pod was in the cache\n\t\t\t\tif pc.podsByIP[ip] == key {\n\t\t\t\t\tpc.deleteIP(ip)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tswitch pod.Status.Phase {\n\t\t\t\tcase v1.PodPending, v1.PodRunning:\n\t\t\t\t\tif key != pc.podsByIP[ip] {\n\t\t\t\t\t\t\/\/ add to cache if the pod is running or pending\n\t\t\t\t\t\tpc.update(ip, key)\n\t\t\t\t\t}\n\n\t\t\t\tdefault:\n\t\t\t\t\t\/\/ delete if the pod switched to other states and is in the cache\n\t\t\t\t\tif pc.podsByIP[ip] == key {\n\t\t\t\t\t\tpc.deleteIP(ip)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase model.EventDelete:\n\t\t\t\/\/ delete only if this pod was in the cache\n\t\t\tif pc.podsByIP[ip] == key {\n\t\t\t\tpc.deleteIP(ip)\n\t\t\t}\n\t\t}\n\t\t\/\/ fire instance handles for workload\n\t\tfor _, handler := range pc.c.workloadHandlers {\n\t\t\tep := NewEndpointBuilder(pc.c, pod).buildIstioEndpoint(ip, 0, \"\")\n\t\t\thandler(&model.WorkloadInstance{\n\t\t\t\tNamespace: pod.Namespace,\n\t\t\t\tEndpoint: ep,\n\t\t\t\tPortMap: getPortMap(pod),\n\t\t\t}, ev)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc getPortMap(pod *v1.Pod) map[string]uint32 {\n\tpmap := map[string]uint32{}\n\tfor _, c := range pod.Spec.Containers {\n\t\tfor _, port := range c.Ports {\n\t\t\tif port.Name == \"\" || port.Protocol != v1.ProtocolTCP {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ First port wins, per Kubernetes (https:\/\/github.com\/kubernetes\/kubernetes\/issues\/54213)\n\t\t\tif _, f := pmap[port.Name]; !f {\n\t\t\t\tpmap[port.Name] = uint32(port.ContainerPort)\n\t\t\t}\n\t\t}\n\t}\n\treturn pmap\n}\n\nfunc (pc *PodCache) deleteIP(ip string) {\n\tpod := pc.podsByIP[ip]\n\tdelete(pc.podsByIP, ip)\n\tdelete(pc.IPByPods, pod)\n}\n\nfunc (pc *PodCache) update(ip, key string) {\n\tif current, f := pc.IPByPods[key]; f {\n\t\t\/\/ The pod already exists, but with another IP Address. We need to clean up that\n\t\tdelete(pc.podsByIP, current)\n\t}\n\tpc.podsByIP[ip] = key\n\tpc.IPByPods[key] = ip\n\n\tif endpointsToUpdate, f := pc.needResync[ip]; f {\n\t\tdelete(pc.needResync, ip)\n\t\tfor ep := range endpointsToUpdate {\n\t\t\tpc.queueEndpointEvent(ep)\n\t\t}\n\t\tendpointsPendingPodUpdate.Record(float64(len(pc.needResync)))\n\t}\n\n\tpc.proxyUpdates(ip)\n}\n\n\/\/ queueEndpointEventOnPodArrival registers this endpoint and queues endpoint event\n\/\/ when the corresponding pod arrives.\nfunc (pc *PodCache) queueEndpointEventOnPodArrival(key, ip string) {\n\tpc.Lock()\n\tdefer pc.Unlock()\n\tif _, f := pc.needResync[ip]; !f {\n\t\tpc.needResync[ip] = sets.NewSet(key)\n\t} else {\n\t\tpc.needResync[ip].Insert(key)\n\t}\n\tendpointsPendingPodUpdate.Record(float64(len(pc.needResync)))\n}\n\n\/\/ endpointDeleted cleans up endpoint from resync endpoint list.\nfunc (pc *PodCache) endpointDeleted(key string, ip string) {\n\tpc.Lock()\n\tdefer pc.Unlock()\n\tdelete(pc.needResync[ip], key)\n\tif len(pc.needResync[ip]) == 0 {\n\t\tdelete(pc.needResync, ip)\n\t}\n\tendpointsPendingPodUpdate.Record(float64(len(pc.needResync)))\n}\n\nfunc (pc *PodCache) proxyUpdates(ip string) {\n\tif pc.c != nil && pc.c.xdsUpdater != nil {\n\t\tpc.c.xdsUpdater.ProxyUpdate(pc.c.clusterID, ip)\n\t}\n}\n\n\/\/ nolint: unparam\nfunc (pc *PodCache) getPodKey(addr string) (string, bool) {\n\tpc.RLock()\n\tdefer pc.RUnlock()\n\tkey, exists := pc.podsByIP[addr]\n\treturn key, exists\n}\n\n\/\/ getPodByIp returns the pod or nil if pod not found or an error occurred\nfunc (pc *PodCache) getPodByIP(addr string) *v1.Pod {\n\tkey, exists := pc.getPodKey(addr)\n\tif !exists {\n\t\treturn nil\n\t}\n\titem, exists, err := pc.informer.GetStore().GetByKey(key)\n\tif !exists || err != nil {\n\t\treturn nil\n\t}\n\treturn item.(*v1.Pod)\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2018 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage v1alpha1\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/knative\/pkg\/apis\"\n\t\"github.com\/knative\/serving\/pkg\/apis\/autoscaling\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\n\/\/ ValidateObjectMetadata validates that `metadata` stanza of the\n\/\/ resourse is correct.\nfunc ValidateObjectMetadata(meta metav1.Object) *apis.FieldError {\n\tname := meta.GetName()\n\n\tif strings.Contains(name, \".\") {\n\t\treturn &apis.FieldError{\n\t\t\tMessage: \"Invalid resource name: special character . must not be present\",\n\t\t\tPaths: []string{\"name\"},\n\t\t}\n\t}\n\n\tif len(name) > 63 {\n\t\treturn &apis.FieldError{\n\t\t\tMessage: \"Invalid resource name: length must be no more than 63 characters\",\n\t\t\tPaths: []string{\"name\"},\n\t\t}\n\t}\n\n\tif err := validateScaleBoundsAnnotations(meta.GetAnnotations()); err != nil {\n\t\treturn err.ViaField(\"annotations\")\n\t}\n\n\treturn nil\n}\n\nfunc getIntGT0(m map[string]string, k string) (int64, *apis.FieldError) {\n\tv, ok := m[k]\n\tif ok {\n\t\ti, err := strconv.ParseInt(v, 10, 32)\n\t\tif err != nil || i < 1 {\n\t\t\treturn 0, &apis.FieldError{\n\t\t\t\tMessage: fmt.Sprintf(\"Invalid %s annotation value: must be an integer greater than 0\", k),\n\t\t\t\tPaths: []string{k},\n\t\t\t}\n\t\t}\n\t\treturn i, nil\n\t}\n\treturn 0, nil\n}\n\nfunc validateScaleBoundsAnnotations(annotations map[string]string) *apis.FieldError {\n\tif annotations == nil {\n\t\treturn nil\n\t}\n\n\tmin, err := getIntGT0(annotations, autoscaling.MinScaleAnnotationKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\tmax, err := getIntGT0(annotations, autoscaling.MaxScaleAnnotationKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif max != 0 && max < min {\n\t\treturn &apis.FieldError{\n\t\t\tMessage: fmt.Sprintf(\"%s=%v is less than %s=%v\", autoscaling.MaxScaleAnnotationKey, max, autoscaling.MinScaleAnnotationKey, min),\n\t\t\tPaths: []string{autoscaling.MaxScaleAnnotationKey, autoscaling.MinScaleAnnotationKey},\n\t\t}\n\t}\n\n\treturn nil\n}\nFix spelling errors (#2699)\/*\nCopyright 2018 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage v1alpha1\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/knative\/pkg\/apis\"\n\t\"github.com\/knative\/serving\/pkg\/apis\/autoscaling\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\n\/\/ ValidateObjectMetadata validates that `metadata` stanza of the\n\/\/ resources is correct.\nfunc ValidateObjectMetadata(meta metav1.Object) *apis.FieldError {\n\tname := meta.GetName()\n\n\tif strings.Contains(name, \".\") {\n\t\treturn &apis.FieldError{\n\t\t\tMessage: \"Invalid resource name: special character . must not be present\",\n\t\t\tPaths: []string{\"name\"},\n\t\t}\n\t}\n\n\tif len(name) > 63 {\n\t\treturn &apis.FieldError{\n\t\t\tMessage: \"Invalid resource name: length must be no more than 63 characters\",\n\t\t\tPaths: []string{\"name\"},\n\t\t}\n\t}\n\n\tif err := validateScaleBoundsAnnotations(meta.GetAnnotations()); err != nil {\n\t\treturn err.ViaField(\"annotations\")\n\t}\n\n\treturn nil\n}\n\nfunc getIntGT0(m map[string]string, k string) (int64, *apis.FieldError) {\n\tv, ok := m[k]\n\tif ok {\n\t\ti, err := strconv.ParseInt(v, 10, 32)\n\t\tif err != nil || i < 1 {\n\t\t\treturn 0, &apis.FieldError{\n\t\t\t\tMessage: fmt.Sprintf(\"Invalid %s annotation value: must be an integer greater than 0\", k),\n\t\t\t\tPaths: []string{k},\n\t\t\t}\n\t\t}\n\t\treturn i, nil\n\t}\n\treturn 0, nil\n}\n\nfunc validateScaleBoundsAnnotations(annotations map[string]string) *apis.FieldError {\n\tif annotations == nil {\n\t\treturn nil\n\t}\n\n\tmin, err := getIntGT0(annotations, autoscaling.MinScaleAnnotationKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\tmax, err := getIntGT0(annotations, autoscaling.MaxScaleAnnotationKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif max != 0 && max < min {\n\t\treturn &apis.FieldError{\n\t\t\tMessage: fmt.Sprintf(\"%s=%v is less than %s=%v\", autoscaling.MaxScaleAnnotationKey, max, autoscaling.MinScaleAnnotationKey, min),\n\t\t\tPaths: []string{autoscaling.MaxScaleAnnotationKey, autoscaling.MinScaleAnnotationKey},\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2019 The Jetstack cert-manager contributors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage ca\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\"\n\t\"crypto\/rand\"\n\t\"crypto\/x509\"\n\t\"crypto\/x509\/pkix\"\n\t\"encoding\/asn1\"\n\t\"encoding\/pem\"\n\t\"errors\"\n\t\"testing\"\n\t\"time\"\n\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\tclientcorev1 \"k8s.io\/client-go\/listers\/core\/v1\"\n\tcoretesting \"k8s.io\/client-go\/testing\"\n\tfakeclock \"k8s.io\/utils\/clock\/testing\"\n\n\tapiutil \"github.com\/jetstack\/cert-manager\/pkg\/api\/util\"\n\t\"github.com\/jetstack\/cert-manager\/pkg\/apis\/certmanager\"\n\tcmapi \"github.com\/jetstack\/cert-manager\/pkg\/apis\/certmanager\/v1alpha2\"\n\tcmmeta \"github.com\/jetstack\/cert-manager\/pkg\/apis\/meta\/v1\"\n\t\"github.com\/jetstack\/cert-manager\/pkg\/controller\/certificaterequests\"\n\ttestpkg \"github.com\/jetstack\/cert-manager\/pkg\/controller\/test\"\n\t\"github.com\/jetstack\/cert-manager\/pkg\/util\/pki\"\n\t\"github.com\/jetstack\/cert-manager\/test\/unit\/gen\"\n\ttestlisters \"github.com\/jetstack\/cert-manager\/test\/unit\/listers\"\n)\n\nvar (\n\tfixedClockStart = time.Now()\n\tfixedClock = fakeclock.NewFakeClock(fixedClockStart)\n)\n\nfunc generateCSR(t *testing.T, secretKey crypto.Signer) []byte {\n\tasn1Subj, _ := asn1.Marshal(pkix.Name{\n\t\tCommonName: \"test\",\n\t}.ToRDNSequence())\n\ttemplate := x509.CertificateRequest{\n\t\tRawSubject: asn1Subj,\n\t\tSignatureAlgorithm: x509.SHA256WithRSA,\n\t}\n\n\tcsrBytes, err := x509.CreateCertificateRequest(rand.Reader, &template, secretKey)\n\tif err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\n\tcsr := pem.EncodeToMemory(&pem.Block{Type: \"CERTIFICATE REQUEST\", Bytes: csrBytes})\n\n\treturn csr\n}\n\nfunc generateSelfSignedCertFromCR(t *testing.T, cr *cmapi.CertificateRequest, key crypto.Signer,\n\tduration time.Duration) (*x509.Certificate, []byte) {\n\ttemplate, err := pki.GenerateTemplateFromCertificateRequest(cr)\n\tif err != nil {\n\t\tt.Errorf(\"error generating template: %v\", err)\n\t}\n\n\tderBytes, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key)\n\tif err != nil {\n\t\tt.Errorf(\"error signing cert: %v\", err)\n\t\tt.FailNow()\n\t}\n\n\tpemByteBuffer := bytes.NewBuffer([]byte{})\n\terr = pem.Encode(pemByteBuffer, &pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes})\n\tif err != nil {\n\t\tt.Errorf(\"failed to encode cert: %v\", err)\n\t\tt.FailNow()\n\t}\n\n\treturn template, pemByteBuffer.Bytes()\n}\n\nfunc TestSign(t *testing.T) {\n\tbaseIssuer := gen.Issuer(\"test-issuer\",\n\t\tgen.SetIssuerCA(cmapi.CAIssuer{SecretName: \"root-ca-secret\"}),\n\t\tgen.AddIssuerCondition(cmapi.IssuerCondition{\n\t\t\tType: cmapi.IssuerConditionReady,\n\t\t\tStatus: cmmeta.ConditionTrue,\n\t\t}),\n\t)\n\n\t\/\/ Build root RSA CA\n\tskRSA, err := pki.GenerateRSAPrivateKey(2048)\n\tif err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\n\tskRSAPEM := pki.EncodePKCS1PrivateKey(skRSA)\n\trsaCSR := generateCSR(t, skRSA)\n\n\tbaseCR := gen.CertificateRequest(\"test-cr\",\n\t\tgen.SetCertificateRequestIsCA(true),\n\t\tgen.SetCertificateRequestCSR(rsaCSR),\n\t\tgen.SetCertificateRequestIssuer(cmmeta.ObjectReference{\n\t\t\tName: baseIssuer.DeepCopy().Name,\n\t\t\tGroup: certmanager.GroupName,\n\t\t\tKind: \"Issuer\",\n\t\t}),\n\t\tgen.SetCertificateRequestDuration(&metav1.Duration{Duration: time.Hour * 24 * 60}),\n\t)\n\n\t\/\/ generate a self signed root ca valid for 60d\n\t_, rsaPEMCert := generateSelfSignedCertFromCR(t, baseCR, skRSA, time.Hour*24*60)\n\trsaCASecret := &corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"root-ca-secret\",\n\t\t\tNamespace: gen.DefaultTestNamespace,\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\tcorev1.TLSPrivateKeyKey: skRSAPEM,\n\t\t\tcorev1.TLSCertKey: rsaPEMCert,\n\t\t},\n\t}\n\n\tbadDataSecret := rsaCASecret.DeepCopy()\n\tbadDataSecret.Data[corev1.TLSPrivateKeyKey] = []byte(\"bad key\")\n\n\ttemplate, err := pki.GenerateTemplateFromCertificateRequest(baseCR)\n\tif err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\tcertPEM, _, err := pki.SignCSRTemplate([]*x509.Certificate{template}, skRSA, template)\n\tif err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\n\tmetaFixedClockStart := metav1.NewTime(fixedClockStart)\n\ttests := map[string]testT{\n\t\t\"a missing CA key pair should set the condition to pending and wait for a re-sync\": {\n\t\t\tcertificateRequest: baseCR.DeepCopy(),\n\t\t\tbuilder: &testpkg.Builder{\n\t\t\t\tKubeObjects: []runtime.Object{},\n\t\t\t\tCertManagerObjects: []runtime.Object{baseCR.DeepCopy(), baseIssuer.DeepCopy()},\n\t\t\t\tExpectedEvents: []string{\n\t\t\t\t\t`Normal SecretMissing Referenced secret default-unit-test-ns\/root-ca-secret not found: secret \"root-ca-secret\" not found`,\n\t\t\t\t},\n\t\t\t\tExpectedActions: []testpkg.Action{\n\t\t\t\t\ttestpkg.NewAction(coretesting.NewUpdateSubresourceAction(\n\t\t\t\t\t\tcmapi.SchemeGroupVersion.WithResource(\"certificaterequests\"),\n\t\t\t\t\t\t\"status\",\n\t\t\t\t\t\tgen.DefaultTestNamespace,\n\t\t\t\t\t\tgen.CertificateRequestFrom(baseCR.DeepCopy(),\n\t\t\t\t\t\t\tgen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{\n\t\t\t\t\t\t\t\tType: cmapi.CertificateRequestConditionReady,\n\t\t\t\t\t\t\t\tStatus: cmmeta.ConditionFalse,\n\t\t\t\t\t\t\t\tReason: cmapi.CertificateRequestReasonPending,\n\t\t\t\t\t\t\t\tMessage: `Referenced secret default-unit-test-ns\/root-ca-secret not found: secret \"root-ca-secret\" not found`,\n\t\t\t\t\t\t\t\tLastTransitionTime: &metaFixedClockStart,\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t),\n\t\t\t\t\t)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"a secret with invalid data should set condition to pending and wait for re-sync\": {\n\t\t\tcertificateRequest: baseCR.DeepCopy(),\n\t\t\tbuilder: &testpkg.Builder{\n\t\t\t\tKubeObjects: []runtime.Object{badDataSecret},\n\t\t\t\tCertManagerObjects: []runtime.Object{baseCR.DeepCopy(),\n\t\t\t\t\tgen.IssuerFrom(baseIssuer.DeepCopy(),\n\t\t\t\t\t\tgen.SetIssuerCA(cmapi.CAIssuer{SecretName: badDataSecret.Name}),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tExpectedEvents: []string{\n\t\t\t\t\t\"Normal SecretInvalidData Failed to parse signing CA keypair from secret default-unit-test-ns\/root-ca-secret: error decoding private key PEM block\",\n\t\t\t\t},\n\t\t\t\tExpectedActions: []testpkg.Action{\n\t\t\t\t\ttestpkg.NewAction(coretesting.NewUpdateSubresourceAction(\n\t\t\t\t\t\tcmapi.SchemeGroupVersion.WithResource(\"certificaterequests\"),\n\t\t\t\t\t\t\"status\",\n\t\t\t\t\t\tgen.DefaultTestNamespace,\n\t\t\t\t\t\tgen.CertificateRequestFrom(baseCR.DeepCopy(),\n\t\t\t\t\t\t\tgen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{\n\t\t\t\t\t\t\t\tType: cmapi.CertificateRequestConditionReady,\n\t\t\t\t\t\t\t\tStatus: cmmeta.ConditionFalse,\n\t\t\t\t\t\t\t\tReason: cmapi.CertificateRequestReasonPending,\n\t\t\t\t\t\t\t\tMessage: \"Failed to parse signing CA keypair from secret default-unit-test-ns\/root-ca-secret: error decoding private key PEM block\",\n\t\t\t\t\t\t\t\tLastTransitionTime: &metaFixedClockStart,\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t),\n\t\t\t\t\t)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"a CertificateRequest that transiently fails a secret lookup should backoff error to retry\": {\n\t\t\tcertificateRequest: baseCR.DeepCopy(),\n\t\t\tbuilder: &testpkg.Builder{\n\t\t\t\tKubeObjects: []runtime.Object{rsaCASecret},\n\t\t\t\tCertManagerObjects: []runtime.Object{baseCR.DeepCopy(), baseIssuer.DeepCopy()},\n\t\t\t\tExpectedEvents: []string{\n\t\t\t\t\t`Normal SecretGetError Failed to get certificate key pair from secret default-unit-test-ns\/root-ca-secret: this is a network error`,\n\t\t\t\t},\n\t\t\t\tExpectedActions: []testpkg.Action{\n\t\t\t\t\ttestpkg.NewAction(coretesting.NewUpdateSubresourceAction(\n\t\t\t\t\t\tcmapi.SchemeGroupVersion.WithResource(\"certificaterequests\"),\n\t\t\t\t\t\t\"status\",\n\t\t\t\t\t\tgen.DefaultTestNamespace,\n\t\t\t\t\t\tgen.CertificateRequestFrom(baseCR,\n\t\t\t\t\t\t\tgen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{\n\t\t\t\t\t\t\t\tType: cmapi.CertificateRequestConditionReady,\n\t\t\t\t\t\t\t\tStatus: cmmeta.ConditionFalse,\n\t\t\t\t\t\t\t\tReason: cmapi.CertificateRequestReasonPending,\n\t\t\t\t\t\t\t\tMessage: \"Failed to get certificate key pair from secret default-unit-test-ns\/root-ca-secret: this is a network error\",\n\t\t\t\t\t\t\t\tLastTransitionTime: &metaFixedClockStart,\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t),\n\t\t\t\t\t)),\n\t\t\t\t},\n\t\t\t},\n\t\t\tfakeLister: &testlisters.FakeSecretLister{\n\t\t\t\tSecretsFn: func(namespace string) clientcorev1.SecretNamespaceLister {\n\t\t\t\t\treturn &testlisters.FakeSecretNamespaceLister{\n\t\t\t\t\t\tGetFn: func(name string) (ret *corev1.Secret, err error) {\n\t\t\t\t\t\t\treturn nil, errors.New(\"this is a network error\")\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: true,\n\t\t},\n\t\t\"should exit nil and set status pending if referenced issuer is not ready\": {\n\t\t\tcertificateRequest: baseCR.DeepCopy(),\n\t\t\tbuilder: &testpkg.Builder{\n\t\t\t\tKubeObjects: []runtime.Object{rsaCASecret},\n\t\t\t\tCertManagerObjects: []runtime.Object{baseCR.DeepCopy(),\n\t\t\t\t\tgen.Issuer(baseIssuer.DeepCopy().Name,\n\t\t\t\t\t\tgen.SetIssuerCA(cmapi.CAIssuer{}),\n\t\t\t\t\t)},\n\t\t\t\tExpectedEvents: []string{\n\t\t\t\t\t\"Normal IssuerNotReady Referenced issuer does not have a Ready status condition\",\n\t\t\t\t},\n\t\t\t\tExpectedActions: []testpkg.Action{\n\t\t\t\t\ttestpkg.NewAction(coretesting.NewUpdateSubresourceAction(\n\t\t\t\t\t\tcmapi.SchemeGroupVersion.WithResource(\"certificaterequests\"),\n\t\t\t\t\t\t\"status\",\n\t\t\t\t\t\tgen.DefaultTestNamespace,\n\t\t\t\t\t\tgen.CertificateRequestFrom(baseCR,\n\t\t\t\t\t\t\tgen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{\n\t\t\t\t\t\t\t\tType: cmapi.CertificateRequestConditionReady,\n\t\t\t\t\t\t\t\tStatus: cmmeta.ConditionFalse,\n\t\t\t\t\t\t\t\tReason: \"Pending\",\n\t\t\t\t\t\t\t\tMessage: \"Referenced issuer does not have a Ready status condition\",\n\t\t\t\t\t\t\t\tLastTransitionTime: &metaFixedClockStart,\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t),\n\t\t\t\t\t)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"a secret that fails to sign due to failing to generate the certificate template should set condition to failed\": {\n\t\t\tcertificateRequest: baseCR.DeepCopy(),\n\t\t\ttemplateGenerator: func(*cmapi.CertificateRequest) (*x509.Certificate, error) {\n\t\t\t\treturn nil, errors.New(\"this is a template generate error\")\n\t\t\t},\n\t\t\tbuilder: &testpkg.Builder{\n\t\t\t\tKubeObjects: []runtime.Object{rsaCASecret},\n\t\t\t\tCertManagerObjects: []runtime.Object{baseCR.DeepCopy(), baseIssuer.DeepCopy()},\n\t\t\t\tExpectedEvents: []string{\n\t\t\t\t\t\"Warning SigningError Error generating certificate template: this is a template generate error\",\n\t\t\t\t},\n\t\t\t\tExpectedActions: []testpkg.Action{\n\t\t\t\t\ttestpkg.NewAction(coretesting.NewUpdateSubresourceAction(\n\t\t\t\t\t\tcmapi.SchemeGroupVersion.WithResource(\"certificaterequests\"),\n\t\t\t\t\t\t\"status\",\n\t\t\t\t\t\tgen.DefaultTestNamespace,\n\t\t\t\t\t\tgen.CertificateRequestFrom(baseCR.DeepCopy(),\n\t\t\t\t\t\t\tgen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{\n\t\t\t\t\t\t\t\tType: cmapi.CertificateRequestConditionReady,\n\t\t\t\t\t\t\t\tStatus: cmmeta.ConditionFalse,\n\t\t\t\t\t\t\t\tReason: cmapi.CertificateRequestReasonFailed,\n\t\t\t\t\t\t\t\tMessage: \"Error generating certificate template: this is a template generate error\",\n\t\t\t\t\t\t\t\tLastTransitionTime: &metaFixedClockStart,\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t\tgen.SetCertificateRequestFailureTime(metaFixedClockStart),\n\t\t\t\t\t\t),\n\t\t\t\t\t)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"a successful signinig should set condition to Ready\": {\n\t\t\tcertificateRequest: baseCR.DeepCopy(),\n\t\t\ttemplateGenerator: func(cr *cmapi.CertificateRequest) (*x509.Certificate, error) {\n\t\t\t\t_, err := pki.GenerateTemplateFromCertificateRequest(cr)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\treturn template, nil\n\t\t\t},\n\t\t\tbuilder: &testpkg.Builder{\n\t\t\t\tKubeObjects: []runtime.Object{rsaCASecret},\n\t\t\t\tCertManagerObjects: []runtime.Object{baseCR.DeepCopy(), baseIssuer.DeepCopy()},\n\t\t\t\tExpectedEvents: []string{\n\t\t\t\t\t\"Normal CertificateIssued Certificate fetched from issuer successfully\",\n\t\t\t\t},\n\t\t\t\tExpectedActions: []testpkg.Action{\n\t\t\t\t\ttestpkg.NewAction(coretesting.NewUpdateSubresourceAction(\n\t\t\t\t\t\tcmapi.SchemeGroupVersion.WithResource(\"certificaterequests\"),\n\t\t\t\t\t\t\"status\",\n\t\t\t\t\t\tgen.DefaultTestNamespace,\n\t\t\t\t\t\tgen.CertificateRequestFrom(baseCR,\n\t\t\t\t\t\t\tgen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{\n\t\t\t\t\t\t\t\tType: cmapi.CertificateRequestConditionReady,\n\t\t\t\t\t\t\t\tStatus: cmmeta.ConditionTrue,\n\t\t\t\t\t\t\t\tReason: cmapi.CertificateRequestReasonIssued,\n\t\t\t\t\t\t\t\tMessage: \"Certificate fetched from issuer successfully\",\n\t\t\t\t\t\t\t\tLastTransitionTime: &metaFixedClockStart,\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t\tgen.SetCertificateRequestCA(rsaPEMCert),\n\t\t\t\t\t\t\tgen.SetCertificateRequestCertificate(certPEM),\n\t\t\t\t\t\t),\n\t\t\t\t\t)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tfixedClock.SetTime(fixedClockStart)\n\t\t\ttest.builder.Clock = fixedClock\n\t\t\trunTest(t, test)\n\t\t})\n\t}\n}\n\ntype testT struct {\n\tbuilder *testpkg.Builder\n\tcertificateRequest *cmapi.CertificateRequest\n\ttemplateGenerator templateGenerator\n\n\texpectedErr bool\n\n\tfakeLister *testlisters.FakeSecretLister\n}\n\nfunc runTest(t *testing.T, test testT) {\n\ttest.builder.T = t\n\ttest.builder.Init()\n\tdefer test.builder.Stop()\n\n\tca := NewCA(test.builder.Context)\n\n\tif test.fakeLister != nil {\n\t\tca.secretsLister = test.fakeLister\n\t}\n\n\tif test.templateGenerator != nil {\n\t\tca.templateGenerator = test.templateGenerator\n\t}\n\n\tcontroller := certificaterequests.New(apiutil.IssuerCA, ca)\n\tcontroller.Register(test.builder.Context)\n\ttest.builder.Start()\n\n\terr := controller.Sync(context.Background(), test.certificateRequest)\n\tif err != nil && !test.expectedErr {\n\t\tt.Errorf(\"expected to not get an error, but got: %v\", err)\n\t}\n\tif err == nil && test.expectedErr {\n\t\tt.Errorf(\"expected to get an error but did not get one\")\n\t}\n\n\ttest.builder.CheckAndFinish(err)\n}\nspelling: signing\/*\nCopyright 2019 The Jetstack cert-manager contributors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage ca\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\"\n\t\"crypto\/rand\"\n\t\"crypto\/x509\"\n\t\"crypto\/x509\/pkix\"\n\t\"encoding\/asn1\"\n\t\"encoding\/pem\"\n\t\"errors\"\n\t\"testing\"\n\t\"time\"\n\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\tclientcorev1 \"k8s.io\/client-go\/listers\/core\/v1\"\n\tcoretesting \"k8s.io\/client-go\/testing\"\n\tfakeclock \"k8s.io\/utils\/clock\/testing\"\n\n\tapiutil \"github.com\/jetstack\/cert-manager\/pkg\/api\/util\"\n\t\"github.com\/jetstack\/cert-manager\/pkg\/apis\/certmanager\"\n\tcmapi \"github.com\/jetstack\/cert-manager\/pkg\/apis\/certmanager\/v1alpha2\"\n\tcmmeta \"github.com\/jetstack\/cert-manager\/pkg\/apis\/meta\/v1\"\n\t\"github.com\/jetstack\/cert-manager\/pkg\/controller\/certificaterequests\"\n\ttestpkg \"github.com\/jetstack\/cert-manager\/pkg\/controller\/test\"\n\t\"github.com\/jetstack\/cert-manager\/pkg\/util\/pki\"\n\t\"github.com\/jetstack\/cert-manager\/test\/unit\/gen\"\n\ttestlisters \"github.com\/jetstack\/cert-manager\/test\/unit\/listers\"\n)\n\nvar (\n\tfixedClockStart = time.Now()\n\tfixedClock = fakeclock.NewFakeClock(fixedClockStart)\n)\n\nfunc generateCSR(t *testing.T, secretKey crypto.Signer) []byte {\n\tasn1Subj, _ := asn1.Marshal(pkix.Name{\n\t\tCommonName: \"test\",\n\t}.ToRDNSequence())\n\ttemplate := x509.CertificateRequest{\n\t\tRawSubject: asn1Subj,\n\t\tSignatureAlgorithm: x509.SHA256WithRSA,\n\t}\n\n\tcsrBytes, err := x509.CreateCertificateRequest(rand.Reader, &template, secretKey)\n\tif err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\n\tcsr := pem.EncodeToMemory(&pem.Block{Type: \"CERTIFICATE REQUEST\", Bytes: csrBytes})\n\n\treturn csr\n}\n\nfunc generateSelfSignedCertFromCR(t *testing.T, cr *cmapi.CertificateRequest, key crypto.Signer,\n\tduration time.Duration) (*x509.Certificate, []byte) {\n\ttemplate, err := pki.GenerateTemplateFromCertificateRequest(cr)\n\tif err != nil {\n\t\tt.Errorf(\"error generating template: %v\", err)\n\t}\n\n\tderBytes, err := x509.CreateCertificate(rand.Reader, template, template, key.Public(), key)\n\tif err != nil {\n\t\tt.Errorf(\"error signing cert: %v\", err)\n\t\tt.FailNow()\n\t}\n\n\tpemByteBuffer := bytes.NewBuffer([]byte{})\n\terr = pem.Encode(pemByteBuffer, &pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes})\n\tif err != nil {\n\t\tt.Errorf(\"failed to encode cert: %v\", err)\n\t\tt.FailNow()\n\t}\n\n\treturn template, pemByteBuffer.Bytes()\n}\n\nfunc TestSign(t *testing.T) {\n\tbaseIssuer := gen.Issuer(\"test-issuer\",\n\t\tgen.SetIssuerCA(cmapi.CAIssuer{SecretName: \"root-ca-secret\"}),\n\t\tgen.AddIssuerCondition(cmapi.IssuerCondition{\n\t\t\tType: cmapi.IssuerConditionReady,\n\t\t\tStatus: cmmeta.ConditionTrue,\n\t\t}),\n\t)\n\n\t\/\/ Build root RSA CA\n\tskRSA, err := pki.GenerateRSAPrivateKey(2048)\n\tif err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\n\tskRSAPEM := pki.EncodePKCS1PrivateKey(skRSA)\n\trsaCSR := generateCSR(t, skRSA)\n\n\tbaseCR := gen.CertificateRequest(\"test-cr\",\n\t\tgen.SetCertificateRequestIsCA(true),\n\t\tgen.SetCertificateRequestCSR(rsaCSR),\n\t\tgen.SetCertificateRequestIssuer(cmmeta.ObjectReference{\n\t\t\tName: baseIssuer.DeepCopy().Name,\n\t\t\tGroup: certmanager.GroupName,\n\t\t\tKind: \"Issuer\",\n\t\t}),\n\t\tgen.SetCertificateRequestDuration(&metav1.Duration{Duration: time.Hour * 24 * 60}),\n\t)\n\n\t\/\/ generate a self signed root ca valid for 60d\n\t_, rsaPEMCert := generateSelfSignedCertFromCR(t, baseCR, skRSA, time.Hour*24*60)\n\trsaCASecret := &corev1.Secret{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"root-ca-secret\",\n\t\t\tNamespace: gen.DefaultTestNamespace,\n\t\t},\n\t\tData: map[string][]byte{\n\t\t\tcorev1.TLSPrivateKeyKey: skRSAPEM,\n\t\t\tcorev1.TLSCertKey: rsaPEMCert,\n\t\t},\n\t}\n\n\tbadDataSecret := rsaCASecret.DeepCopy()\n\tbadDataSecret.Data[corev1.TLSPrivateKeyKey] = []byte(\"bad key\")\n\n\ttemplate, err := pki.GenerateTemplateFromCertificateRequest(baseCR)\n\tif err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\tcertPEM, _, err := pki.SignCSRTemplate([]*x509.Certificate{template}, skRSA, template)\n\tif err != nil {\n\t\tt.Error(err)\n\t\tt.FailNow()\n\t}\n\n\tmetaFixedClockStart := metav1.NewTime(fixedClockStart)\n\ttests := map[string]testT{\n\t\t\"a missing CA key pair should set the condition to pending and wait for a re-sync\": {\n\t\t\tcertificateRequest: baseCR.DeepCopy(),\n\t\t\tbuilder: &testpkg.Builder{\n\t\t\t\tKubeObjects: []runtime.Object{},\n\t\t\t\tCertManagerObjects: []runtime.Object{baseCR.DeepCopy(), baseIssuer.DeepCopy()},\n\t\t\t\tExpectedEvents: []string{\n\t\t\t\t\t`Normal SecretMissing Referenced secret default-unit-test-ns\/root-ca-secret not found: secret \"root-ca-secret\" not found`,\n\t\t\t\t},\n\t\t\t\tExpectedActions: []testpkg.Action{\n\t\t\t\t\ttestpkg.NewAction(coretesting.NewUpdateSubresourceAction(\n\t\t\t\t\t\tcmapi.SchemeGroupVersion.WithResource(\"certificaterequests\"),\n\t\t\t\t\t\t\"status\",\n\t\t\t\t\t\tgen.DefaultTestNamespace,\n\t\t\t\t\t\tgen.CertificateRequestFrom(baseCR.DeepCopy(),\n\t\t\t\t\t\t\tgen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{\n\t\t\t\t\t\t\t\tType: cmapi.CertificateRequestConditionReady,\n\t\t\t\t\t\t\t\tStatus: cmmeta.ConditionFalse,\n\t\t\t\t\t\t\t\tReason: cmapi.CertificateRequestReasonPending,\n\t\t\t\t\t\t\t\tMessage: `Referenced secret default-unit-test-ns\/root-ca-secret not found: secret \"root-ca-secret\" not found`,\n\t\t\t\t\t\t\t\tLastTransitionTime: &metaFixedClockStart,\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t),\n\t\t\t\t\t)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"a secret with invalid data should set condition to pending and wait for re-sync\": {\n\t\t\tcertificateRequest: baseCR.DeepCopy(),\n\t\t\tbuilder: &testpkg.Builder{\n\t\t\t\tKubeObjects: []runtime.Object{badDataSecret},\n\t\t\t\tCertManagerObjects: []runtime.Object{baseCR.DeepCopy(),\n\t\t\t\t\tgen.IssuerFrom(baseIssuer.DeepCopy(),\n\t\t\t\t\t\tgen.SetIssuerCA(cmapi.CAIssuer{SecretName: badDataSecret.Name}),\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t\tExpectedEvents: []string{\n\t\t\t\t\t\"Normal SecretInvalidData Failed to parse signing CA keypair from secret default-unit-test-ns\/root-ca-secret: error decoding private key PEM block\",\n\t\t\t\t},\n\t\t\t\tExpectedActions: []testpkg.Action{\n\t\t\t\t\ttestpkg.NewAction(coretesting.NewUpdateSubresourceAction(\n\t\t\t\t\t\tcmapi.SchemeGroupVersion.WithResource(\"certificaterequests\"),\n\t\t\t\t\t\t\"status\",\n\t\t\t\t\t\tgen.DefaultTestNamespace,\n\t\t\t\t\t\tgen.CertificateRequestFrom(baseCR.DeepCopy(),\n\t\t\t\t\t\t\tgen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{\n\t\t\t\t\t\t\t\tType: cmapi.CertificateRequestConditionReady,\n\t\t\t\t\t\t\t\tStatus: cmmeta.ConditionFalse,\n\t\t\t\t\t\t\t\tReason: cmapi.CertificateRequestReasonPending,\n\t\t\t\t\t\t\t\tMessage: \"Failed to parse signing CA keypair from secret default-unit-test-ns\/root-ca-secret: error decoding private key PEM block\",\n\t\t\t\t\t\t\t\tLastTransitionTime: &metaFixedClockStart,\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t),\n\t\t\t\t\t)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"a CertificateRequest that transiently fails a secret lookup should backoff error to retry\": {\n\t\t\tcertificateRequest: baseCR.DeepCopy(),\n\t\t\tbuilder: &testpkg.Builder{\n\t\t\t\tKubeObjects: []runtime.Object{rsaCASecret},\n\t\t\t\tCertManagerObjects: []runtime.Object{baseCR.DeepCopy(), baseIssuer.DeepCopy()},\n\t\t\t\tExpectedEvents: []string{\n\t\t\t\t\t`Normal SecretGetError Failed to get certificate key pair from secret default-unit-test-ns\/root-ca-secret: this is a network error`,\n\t\t\t\t},\n\t\t\t\tExpectedActions: []testpkg.Action{\n\t\t\t\t\ttestpkg.NewAction(coretesting.NewUpdateSubresourceAction(\n\t\t\t\t\t\tcmapi.SchemeGroupVersion.WithResource(\"certificaterequests\"),\n\t\t\t\t\t\t\"status\",\n\t\t\t\t\t\tgen.DefaultTestNamespace,\n\t\t\t\t\t\tgen.CertificateRequestFrom(baseCR,\n\t\t\t\t\t\t\tgen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{\n\t\t\t\t\t\t\t\tType: cmapi.CertificateRequestConditionReady,\n\t\t\t\t\t\t\t\tStatus: cmmeta.ConditionFalse,\n\t\t\t\t\t\t\t\tReason: cmapi.CertificateRequestReasonPending,\n\t\t\t\t\t\t\t\tMessage: \"Failed to get certificate key pair from secret default-unit-test-ns\/root-ca-secret: this is a network error\",\n\t\t\t\t\t\t\t\tLastTransitionTime: &metaFixedClockStart,\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t),\n\t\t\t\t\t)),\n\t\t\t\t},\n\t\t\t},\n\t\t\tfakeLister: &testlisters.FakeSecretLister{\n\t\t\t\tSecretsFn: func(namespace string) clientcorev1.SecretNamespaceLister {\n\t\t\t\t\treturn &testlisters.FakeSecretNamespaceLister{\n\t\t\t\t\t\tGetFn: func(name string) (ret *corev1.Secret, err error) {\n\t\t\t\t\t\t\treturn nil, errors.New(\"this is a network error\")\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: true,\n\t\t},\n\t\t\"should exit nil and set status pending if referenced issuer is not ready\": {\n\t\t\tcertificateRequest: baseCR.DeepCopy(),\n\t\t\tbuilder: &testpkg.Builder{\n\t\t\t\tKubeObjects: []runtime.Object{rsaCASecret},\n\t\t\t\tCertManagerObjects: []runtime.Object{baseCR.DeepCopy(),\n\t\t\t\t\tgen.Issuer(baseIssuer.DeepCopy().Name,\n\t\t\t\t\t\tgen.SetIssuerCA(cmapi.CAIssuer{}),\n\t\t\t\t\t)},\n\t\t\t\tExpectedEvents: []string{\n\t\t\t\t\t\"Normal IssuerNotReady Referenced issuer does not have a Ready status condition\",\n\t\t\t\t},\n\t\t\t\tExpectedActions: []testpkg.Action{\n\t\t\t\t\ttestpkg.NewAction(coretesting.NewUpdateSubresourceAction(\n\t\t\t\t\t\tcmapi.SchemeGroupVersion.WithResource(\"certificaterequests\"),\n\t\t\t\t\t\t\"status\",\n\t\t\t\t\t\tgen.DefaultTestNamespace,\n\t\t\t\t\t\tgen.CertificateRequestFrom(baseCR,\n\t\t\t\t\t\t\tgen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{\n\t\t\t\t\t\t\t\tType: cmapi.CertificateRequestConditionReady,\n\t\t\t\t\t\t\t\tStatus: cmmeta.ConditionFalse,\n\t\t\t\t\t\t\t\tReason: \"Pending\",\n\t\t\t\t\t\t\t\tMessage: \"Referenced issuer does not have a Ready status condition\",\n\t\t\t\t\t\t\t\tLastTransitionTime: &metaFixedClockStart,\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t),\n\t\t\t\t\t)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"a secret that fails to sign due to failing to generate the certificate template should set condition to failed\": {\n\t\t\tcertificateRequest: baseCR.DeepCopy(),\n\t\t\ttemplateGenerator: func(*cmapi.CertificateRequest) (*x509.Certificate, error) {\n\t\t\t\treturn nil, errors.New(\"this is a template generate error\")\n\t\t\t},\n\t\t\tbuilder: &testpkg.Builder{\n\t\t\t\tKubeObjects: []runtime.Object{rsaCASecret},\n\t\t\t\tCertManagerObjects: []runtime.Object{baseCR.DeepCopy(), baseIssuer.DeepCopy()},\n\t\t\t\tExpectedEvents: []string{\n\t\t\t\t\t\"Warning SigningError Error generating certificate template: this is a template generate error\",\n\t\t\t\t},\n\t\t\t\tExpectedActions: []testpkg.Action{\n\t\t\t\t\ttestpkg.NewAction(coretesting.NewUpdateSubresourceAction(\n\t\t\t\t\t\tcmapi.SchemeGroupVersion.WithResource(\"certificaterequests\"),\n\t\t\t\t\t\t\"status\",\n\t\t\t\t\t\tgen.DefaultTestNamespace,\n\t\t\t\t\t\tgen.CertificateRequestFrom(baseCR.DeepCopy(),\n\t\t\t\t\t\t\tgen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{\n\t\t\t\t\t\t\t\tType: cmapi.CertificateRequestConditionReady,\n\t\t\t\t\t\t\t\tStatus: cmmeta.ConditionFalse,\n\t\t\t\t\t\t\t\tReason: cmapi.CertificateRequestReasonFailed,\n\t\t\t\t\t\t\t\tMessage: \"Error generating certificate template: this is a template generate error\",\n\t\t\t\t\t\t\t\tLastTransitionTime: &metaFixedClockStart,\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t\tgen.SetCertificateRequestFailureTime(metaFixedClockStart),\n\t\t\t\t\t\t),\n\t\t\t\t\t)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"a successful signing should set condition to Ready\": {\n\t\t\tcertificateRequest: baseCR.DeepCopy(),\n\t\t\ttemplateGenerator: func(cr *cmapi.CertificateRequest) (*x509.Certificate, error) {\n\t\t\t\t_, err := pki.GenerateTemplateFromCertificateRequest(cr)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\treturn template, nil\n\t\t\t},\n\t\t\tbuilder: &testpkg.Builder{\n\t\t\t\tKubeObjects: []runtime.Object{rsaCASecret},\n\t\t\t\tCertManagerObjects: []runtime.Object{baseCR.DeepCopy(), baseIssuer.DeepCopy()},\n\t\t\t\tExpectedEvents: []string{\n\t\t\t\t\t\"Normal CertificateIssued Certificate fetched from issuer successfully\",\n\t\t\t\t},\n\t\t\t\tExpectedActions: []testpkg.Action{\n\t\t\t\t\ttestpkg.NewAction(coretesting.NewUpdateSubresourceAction(\n\t\t\t\t\t\tcmapi.SchemeGroupVersion.WithResource(\"certificaterequests\"),\n\t\t\t\t\t\t\"status\",\n\t\t\t\t\t\tgen.DefaultTestNamespace,\n\t\t\t\t\t\tgen.CertificateRequestFrom(baseCR,\n\t\t\t\t\t\t\tgen.SetCertificateRequestStatusCondition(cmapi.CertificateRequestCondition{\n\t\t\t\t\t\t\t\tType: cmapi.CertificateRequestConditionReady,\n\t\t\t\t\t\t\t\tStatus: cmmeta.ConditionTrue,\n\t\t\t\t\t\t\t\tReason: cmapi.CertificateRequestReasonIssued,\n\t\t\t\t\t\t\t\tMessage: \"Certificate fetched from issuer successfully\",\n\t\t\t\t\t\t\t\tLastTransitionTime: &metaFixedClockStart,\n\t\t\t\t\t\t\t}),\n\t\t\t\t\t\t\tgen.SetCertificateRequestCA(rsaPEMCert),\n\t\t\t\t\t\t\tgen.SetCertificateRequestCertificate(certPEM),\n\t\t\t\t\t\t),\n\t\t\t\t\t)),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tfixedClock.SetTime(fixedClockStart)\n\t\t\ttest.builder.Clock = fixedClock\n\t\t\trunTest(t, test)\n\t\t})\n\t}\n}\n\ntype testT struct {\n\tbuilder *testpkg.Builder\n\tcertificateRequest *cmapi.CertificateRequest\n\ttemplateGenerator templateGenerator\n\n\texpectedErr bool\n\n\tfakeLister *testlisters.FakeSecretLister\n}\n\nfunc runTest(t *testing.T, test testT) {\n\ttest.builder.T = t\n\ttest.builder.Init()\n\tdefer test.builder.Stop()\n\n\tca := NewCA(test.builder.Context)\n\n\tif test.fakeLister != nil {\n\t\tca.secretsLister = test.fakeLister\n\t}\n\n\tif test.templateGenerator != nil {\n\t\tca.templateGenerator = test.templateGenerator\n\t}\n\n\tcontroller := certificaterequests.New(apiutil.IssuerCA, ca)\n\tcontroller.Register(test.builder.Context)\n\ttest.builder.Start()\n\n\terr := controller.Sync(context.Background(), test.certificateRequest)\n\tif err != nil && !test.expectedErr {\n\t\tt.Errorf(\"expected to not get an error, but got: %v\", err)\n\t}\n\tif err == nil && test.expectedErr {\n\t\tt.Errorf(\"expected to get an error but did not get one\")\n\t}\n\n\ttest.builder.CheckAndFinish(err)\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage aggregator\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\trestful \"github.com\/emicklei\/go-restful\"\n\t\"github.com\/go-openapi\/spec\"\n\n\t\"k8s.io\/apiserver\/pkg\/server\"\n\t\"k8s.io\/kube-aggregator\/pkg\/apis\/apiregistration\"\n\t\"k8s.io\/kube-openapi\/pkg\/aggregator\"\n\t\"k8s.io\/kube-openapi\/pkg\/builder\"\n\t\"k8s.io\/kube-openapi\/pkg\/common\"\n\t\"k8s.io\/kube-openapi\/pkg\/handler\"\n)\n\n\/\/ SpecAggregator calls out to http handlers of APIServices and merges specs. It keeps state of the last\n\/\/ known specs including the http etag.\ntype SpecAggregator interface {\n\tAddUpdateAPIService(handler http.Handler, apiService *apiregistration.APIService) error\n\tUpdateAPIServiceSpec(apiServiceName string, spec *spec.Swagger, etag string) error\n\tRemoveAPIServiceSpec(apiServiceName string) error\n\tGetAPIServiceInfo(apiServiceName string) (handler http.Handler, etag string, exists bool)\n\tGetAPIServiceNames() []string\n}\n\nconst (\n\taggregatorUser = \"system:aggregator\"\n\tspecDownloadTimeout = 60 * time.Second\n\tlocalDelegateChainNamePrefix = \"k8s_internal_local_delegation_chain_\"\n\tlocalDelegateChainNamePattern = localDelegateChainNamePrefix + \"%010d\"\n\n\t\/\/ A randomly generated UUID to differentiate local and remote eTags.\n\tlocallyGeneratedEtagPrefix = \"\\\"6E8F849B434D4B98A569B9D7718876E9-\"\n)\n\n\/\/ IsLocalAPIService returns true for local specs from delegates.\nfunc IsLocalAPIService(apiServiceName string) bool {\n\treturn strings.HasPrefix(apiServiceName, localDelegateChainNamePrefix)\n}\n\n\/\/ GetAPIServicesName returns the names of APIServices recorded in specAggregator.openAPISpecs.\n\/\/ We use this function to pass the names of local APIServices to the controller in this package,\n\/\/ so that the controller can periodically sync the OpenAPI spec from delegation API servers.\nfunc (s *specAggregator) GetAPIServiceNames() []string {\n\tnames := make([]string, len(s.openAPISpecs))\n\tfor key := range s.openAPISpecs {\n\t\tnames = append(names, key)\n\t}\n\treturn names\n}\n\n\/\/ BuildAndRegisterAggregator registered OpenAPI aggregator handler. This function is not thread safe as it only being called on startup.\nfunc BuildAndRegisterAggregator(downloader *Downloader, delegationTarget server.DelegationTarget, webServices []*restful.WebService,\n\tconfig *common.Config, pathHandler common.PathHandler) (SpecAggregator, error) {\n\ts := &specAggregator{\n\t\topenAPISpecs: map[string]*openAPISpecInfo{},\n\t}\n\n\ti := 0\n\t\/\/ Build Aggregator's spec\n\taggregatorOpenAPISpec, err := builder.BuildOpenAPISpec(webServices, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Reserving non-name spec for aggregator's Spec.\n\ts.addLocalSpec(aggregatorOpenAPISpec, nil, fmt.Sprintf(localDelegateChainNamePattern, i), \"\")\n\ti++\n\tfor delegate := delegationTarget; delegate != nil; delegate = delegate.NextDelegate() {\n\t\thandler := delegate.UnprotectedHandler()\n\t\tif handler == nil {\n\t\t\tcontinue\n\t\t}\n\t\tdelegateSpec, etag, _, err := downloader.Download(handler, \"\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif delegateSpec == nil {\n\t\t\tcontinue\n\t\t}\n\t\ts.addLocalSpec(delegateSpec, handler, fmt.Sprintf(localDelegateChainNamePattern, i), etag)\n\t\ti++\n\t}\n\n\t\/\/ Build initial spec to serve.\n\tspecToServe, err := s.buildOpenAPISpec()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Install handler\n\ts.openAPIVersionedService, err = handler.RegisterOpenAPIVersionedService(\n\t\tspecToServe, \"\/openapi\/v2\", pathHandler)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s, nil\n}\n\ntype specAggregator struct {\n\t\/\/ mutex protects all members of this struct.\n\trwMutex sync.RWMutex\n\n\t\/\/ Map of API Services' OpenAPI specs by their name\n\topenAPISpecs map[string]*openAPISpecInfo\n\n\t\/\/ provided for dynamic OpenAPI spec\n\topenAPIVersionedService *handler.OpenAPIService\n}\n\nvar _ SpecAggregator = &specAggregator{}\n\n\/\/ This function is not thread safe as it only being called on startup.\nfunc (s *specAggregator) addLocalSpec(spec *spec.Swagger, localHandler http.Handler, name, etag string) {\n\tlocalAPIService := apiregistration.APIService{}\n\tlocalAPIService.Name = name\n\ts.openAPISpecs[name] = &openAPISpecInfo{\n\t\tetag: etag,\n\t\tapiService: localAPIService,\n\t\thandler: localHandler,\n\t\tspec: spec,\n\t}\n}\n\n\/\/ openAPISpecInfo is used to store OpenAPI spec with its priority.\n\/\/ It can be used to sort specs with their priorities.\ntype openAPISpecInfo struct {\n\tapiService apiregistration.APIService\n\n\t\/\/ Specification of this API Service. If null then the spec is not loaded yet.\n\tspec *spec.Swagger\n\thandler http.Handler\n\tetag string\n}\n\n\/\/ buildOpenAPISpec aggregates all OpenAPI specs. It is not thread-safe. The caller is responsible to hold proper locks.\nfunc (s *specAggregator) buildOpenAPISpec() (specToReturn *spec.Swagger, err error) {\n\tspecs := []openAPISpecInfo{}\n\tfor _, specInfo := range s.openAPISpecs {\n\t\tif specInfo.spec == nil {\n\t\t\tcontinue\n\t\t}\n\t\tspecs = append(specs, *specInfo)\n\t}\n\tif len(specs) == 0 {\n\t\treturn &spec.Swagger{}, nil\n\t}\n\tsortByPriority(specs)\n\tfor _, specInfo := range specs {\n\t\tif specToReturn == nil {\n\t\t\tspecToReturn = &spec.Swagger{}\n\t\t\t*specToReturn = *specInfo.spec\n\t\t\t\/\/ Paths and Definitions are set by MergeSpecsIgnorePathConflict\n\t\t\tspecToReturn.Paths = nil\n\t\t\tspecToReturn.Definitions = nil\n\t\t}\n\t\tif err := aggregator.MergeSpecsIgnorePathConflict(specToReturn, specInfo.spec); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn specToReturn, nil\n}\n\n\/\/ updateOpenAPISpec aggregates all OpenAPI specs. It is not thread-safe. The caller is responsible to hold proper locks.\nfunc (s *specAggregator) updateOpenAPISpec() error {\n\tif s.openAPIVersionedService == nil {\n\t\treturn nil\n\t}\n\tspecToServe, err := s.buildOpenAPISpec()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn s.openAPIVersionedService.UpdateSpec(specToServe)\n}\n\n\/\/ tryUpdatingServiceSpecs tries updating openAPISpecs map with specified specInfo, and keeps the map intact\n\/\/ if the update fails.\nfunc (s *specAggregator) tryUpdatingServiceSpecs(specInfo *openAPISpecInfo) error {\n\tif specInfo == nil {\n\t\treturn fmt.Errorf(\"invalid input: specInfo must be non-nil\")\n\t}\n\torgSpecInfo, exists := s.openAPISpecs[specInfo.apiService.Name]\n\t\/\/ Skip aggregation if OpenAPI spec didn't change\n\tif exists && orgSpecInfo != nil && orgSpecInfo.etag == specInfo.etag {\n\t\treturn nil\n\t}\n\ts.openAPISpecs[specInfo.apiService.Name] = specInfo\n\tif err := s.updateOpenAPISpec(); err != nil {\n\t\tif exists {\n\t\t\ts.openAPISpecs[specInfo.apiService.Name] = orgSpecInfo\n\t\t} else {\n\t\t\tdelete(s.openAPISpecs, specInfo.apiService.Name)\n\t\t}\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ tryDeleteServiceSpecs tries delete specified specInfo from openAPISpecs map, and keeps the map intact\n\/\/ if the update fails.\nfunc (s *specAggregator) tryDeleteServiceSpecs(apiServiceName string) error {\n\torgSpecInfo, exists := s.openAPISpecs[apiServiceName]\n\tif !exists {\n\t\treturn nil\n\t}\n\tdelete(s.openAPISpecs, apiServiceName)\n\tif err := s.updateOpenAPISpec(); err != nil {\n\t\ts.openAPISpecs[apiServiceName] = orgSpecInfo\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ UpdateAPIServiceSpec updates the api service's OpenAPI spec. It is thread safe.\nfunc (s *specAggregator) UpdateAPIServiceSpec(apiServiceName string, spec *spec.Swagger, etag string) error {\n\ts.rwMutex.Lock()\n\tdefer s.rwMutex.Unlock()\n\n\tspecInfo, existingService := s.openAPISpecs[apiServiceName]\n\tif !existingService {\n\t\treturn fmt.Errorf(\"APIService %q does not exists\", apiServiceName)\n\t}\n\n\t\/\/ For APIServices (non-local) specs, only merge their \/apis\/ prefixed endpoint as it is the only paths\n\t\/\/ proxy handler delegates.\n\tif specInfo.apiService.Spec.Service != nil {\n\t\tspec = aggregator.FilterSpecByPathsWithoutSideEffects(spec, []string{\"\/apis\/\"})\n\t}\n\n\treturn s.tryUpdatingServiceSpecs(&openAPISpecInfo{\n\t\tapiService: specInfo.apiService,\n\t\tspec: spec,\n\t\thandler: specInfo.handler,\n\t\tetag: etag,\n\t})\n}\n\n\/\/ AddUpdateAPIService adds or updates the api service. It is thread safe.\nfunc (s *specAggregator) AddUpdateAPIService(handler http.Handler, apiService *apiregistration.APIService) error {\n\ts.rwMutex.Lock()\n\tdefer s.rwMutex.Unlock()\n\n\tif apiService.Spec.Service == nil {\n\t\t\/\/ All local specs should be already aggregated using local delegate chain\n\t\treturn nil\n\t}\n\n\tnewSpec := &openAPISpecInfo{\n\t\tapiService: *apiService,\n\t\thandler: handler,\n\t}\n\tif specInfo, existingService := s.openAPISpecs[apiService.Name]; existingService {\n\t\tnewSpec.etag = specInfo.etag\n\t\tnewSpec.spec = specInfo.spec\n\t}\n\treturn s.tryUpdatingServiceSpecs(newSpec)\n}\n\n\/\/ RemoveAPIServiceSpec removes an api service from OpenAPI aggregation. If it does not exist, no error is returned.\n\/\/ It is thread safe.\nfunc (s *specAggregator) RemoveAPIServiceSpec(apiServiceName string) error {\n\ts.rwMutex.Lock()\n\tdefer s.rwMutex.Unlock()\n\n\tif _, existingService := s.openAPISpecs[apiServiceName]; !existingService {\n\t\treturn nil\n\t}\n\n\treturn s.tryDeleteServiceSpecs(apiServiceName)\n}\n\n\/\/ GetAPIServiceSpec returns api service spec info\nfunc (s *specAggregator) GetAPIServiceInfo(apiServiceName string) (handler http.Handler, etag string, exists bool) {\n\ts.rwMutex.RLock()\n\tdefer s.rwMutex.RUnlock()\n\n\tif info, existingService := s.openAPISpecs[apiServiceName]; existingService {\n\t\treturn info.handler, info.etag, true\n\t}\n\treturn nil, \"\", false\n}\nkube-aggregator: update existing openapi spec info if pre-existing\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage aggregator\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\trestful \"github.com\/emicklei\/go-restful\"\n\t\"github.com\/go-openapi\/spec\"\n\n\t\"k8s.io\/apiserver\/pkg\/server\"\n\t\"k8s.io\/kube-aggregator\/pkg\/apis\/apiregistration\"\n\t\"k8s.io\/kube-openapi\/pkg\/aggregator\"\n\t\"k8s.io\/kube-openapi\/pkg\/builder\"\n\t\"k8s.io\/kube-openapi\/pkg\/common\"\n\t\"k8s.io\/kube-openapi\/pkg\/handler\"\n)\n\n\/\/ SpecAggregator calls out to http handlers of APIServices and merges specs. It keeps state of the last\n\/\/ known specs including the http etag.\ntype SpecAggregator interface {\n\tAddUpdateAPIService(handler http.Handler, apiService *apiregistration.APIService) error\n\tUpdateAPIServiceSpec(apiServiceName string, spec *spec.Swagger, etag string) error\n\tRemoveAPIServiceSpec(apiServiceName string) error\n\tGetAPIServiceInfo(apiServiceName string) (handler http.Handler, etag string, exists bool)\n\tGetAPIServiceNames() []string\n}\n\nconst (\n\taggregatorUser = \"system:aggregator\"\n\tspecDownloadTimeout = 60 * time.Second\n\tlocalDelegateChainNamePrefix = \"k8s_internal_local_delegation_chain_\"\n\tlocalDelegateChainNamePattern = localDelegateChainNamePrefix + \"%010d\"\n\n\t\/\/ A randomly generated UUID to differentiate local and remote eTags.\n\tlocallyGeneratedEtagPrefix = \"\\\"6E8F849B434D4B98A569B9D7718876E9-\"\n)\n\n\/\/ IsLocalAPIService returns true for local specs from delegates.\nfunc IsLocalAPIService(apiServiceName string) bool {\n\treturn strings.HasPrefix(apiServiceName, localDelegateChainNamePrefix)\n}\n\n\/\/ GetAPIServicesName returns the names of APIServices recorded in specAggregator.openAPISpecs.\n\/\/ We use this function to pass the names of local APIServices to the controller in this package,\n\/\/ so that the controller can periodically sync the OpenAPI spec from delegation API servers.\nfunc (s *specAggregator) GetAPIServiceNames() []string {\n\tnames := make([]string, len(s.openAPISpecs))\n\tfor key := range s.openAPISpecs {\n\t\tnames = append(names, key)\n\t}\n\treturn names\n}\n\n\/\/ BuildAndRegisterAggregator registered OpenAPI aggregator handler. This function is not thread safe as it only being called on startup.\nfunc BuildAndRegisterAggregator(downloader *Downloader, delegationTarget server.DelegationTarget, webServices []*restful.WebService,\n\tconfig *common.Config, pathHandler common.PathHandler) (SpecAggregator, error) {\n\ts := &specAggregator{\n\t\topenAPISpecs: map[string]*openAPISpecInfo{},\n\t}\n\n\ti := 0\n\t\/\/ Build Aggregator's spec\n\taggregatorOpenAPISpec, err := builder.BuildOpenAPISpec(webServices, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Reserving non-name spec for aggregator's Spec.\n\ts.addLocalSpec(aggregatorOpenAPISpec, nil, fmt.Sprintf(localDelegateChainNamePattern, i), \"\")\n\ti++\n\tfor delegate := delegationTarget; delegate != nil; delegate = delegate.NextDelegate() {\n\t\thandler := delegate.UnprotectedHandler()\n\t\tif handler == nil {\n\t\t\tcontinue\n\t\t}\n\t\tdelegateSpec, etag, _, err := downloader.Download(handler, \"\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif delegateSpec == nil {\n\t\t\tcontinue\n\t\t}\n\t\ts.addLocalSpec(delegateSpec, handler, fmt.Sprintf(localDelegateChainNamePattern, i), etag)\n\t\ti++\n\t}\n\n\t\/\/ Build initial spec to serve.\n\tspecToServe, err := s.buildOpenAPISpec()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Install handler\n\ts.openAPIVersionedService, err = handler.RegisterOpenAPIVersionedService(\n\t\tspecToServe, \"\/openapi\/v2\", pathHandler)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s, nil\n}\n\ntype specAggregator struct {\n\t\/\/ mutex protects all members of this struct.\n\trwMutex sync.RWMutex\n\n\t\/\/ Map of API Services' OpenAPI specs by their name\n\topenAPISpecs map[string]*openAPISpecInfo\n\n\t\/\/ provided for dynamic OpenAPI spec\n\topenAPIVersionedService *handler.OpenAPIService\n}\n\nvar _ SpecAggregator = &specAggregator{}\n\n\/\/ This function is not thread safe as it only being called on startup.\nfunc (s *specAggregator) addLocalSpec(spec *spec.Swagger, localHandler http.Handler, name, etag string) {\n\tlocalAPIService := apiregistration.APIService{}\n\tlocalAPIService.Name = name\n\ts.openAPISpecs[name] = &openAPISpecInfo{\n\t\tetag: etag,\n\t\tapiService: localAPIService,\n\t\thandler: localHandler,\n\t\tspec: spec,\n\t}\n}\n\n\/\/ openAPISpecInfo is used to store OpenAPI spec with its priority.\n\/\/ It can be used to sort specs with their priorities.\ntype openAPISpecInfo struct {\n\tapiService apiregistration.APIService\n\n\t\/\/ Specification of this API Service. If null then the spec is not loaded yet.\n\tspec *spec.Swagger\n\thandler http.Handler\n\tetag string\n}\n\n\/\/ buildOpenAPISpec aggregates all OpenAPI specs. It is not thread-safe. The caller is responsible to hold proper locks.\nfunc (s *specAggregator) buildOpenAPISpec() (specToReturn *spec.Swagger, err error) {\n\tspecs := []openAPISpecInfo{}\n\tfor _, specInfo := range s.openAPISpecs {\n\t\tif specInfo.spec == nil {\n\t\t\tcontinue\n\t\t}\n\t\tspecs = append(specs, *specInfo)\n\t}\n\tif len(specs) == 0 {\n\t\treturn &spec.Swagger{}, nil\n\t}\n\tsortByPriority(specs)\n\tfor _, specInfo := range specs {\n\t\tif specToReturn == nil {\n\t\t\tspecToReturn = &spec.Swagger{}\n\t\t\t*specToReturn = *specInfo.spec\n\t\t\t\/\/ Paths and Definitions are set by MergeSpecsIgnorePathConflict\n\t\t\tspecToReturn.Paths = nil\n\t\t\tspecToReturn.Definitions = nil\n\t\t}\n\t\tif err := aggregator.MergeSpecsIgnorePathConflict(specToReturn, specInfo.spec); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn specToReturn, nil\n}\n\n\/\/ updateOpenAPISpec aggregates all OpenAPI specs. It is not thread-safe. The caller is responsible to hold proper locks.\nfunc (s *specAggregator) updateOpenAPISpec() error {\n\tif s.openAPIVersionedService == nil {\n\t\treturn nil\n\t}\n\tspecToServe, err := s.buildOpenAPISpec()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn s.openAPIVersionedService.UpdateSpec(specToServe)\n}\n\n\/\/ tryUpdatingServiceSpecs tries updating openAPISpecs map with specified specInfo, and keeps the map intact\n\/\/ if the update fails.\nfunc (s *specAggregator) tryUpdatingServiceSpecs(specInfo *openAPISpecInfo) error {\n\tif specInfo == nil {\n\t\treturn fmt.Errorf(\"invalid input: specInfo must be non-nil\")\n\t}\n\torigSpecInfo, existedBefore := s.openAPISpecs[specInfo.apiService.Name]\n\ts.openAPISpecs[specInfo.apiService.Name] = specInfo\n\n\t\/\/ Skip aggregation if OpenAPI spec didn't change\n\tif existedBefore && origSpecInfo != nil && origSpecInfo.etag == specInfo.etag {\n\t\treturn nil\n\t}\n\tif err := s.updateOpenAPISpec(); err != nil {\n\t\tif existedBefore {\n\t\t\ts.openAPISpecs[specInfo.apiService.Name] = origSpecInfo\n\t\t} else {\n\t\t\tdelete(s.openAPISpecs, specInfo.apiService.Name)\n\t\t}\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ tryDeleteServiceSpecs tries delete specified specInfo from openAPISpecs map, and keeps the map intact\n\/\/ if the update fails.\nfunc (s *specAggregator) tryDeleteServiceSpecs(apiServiceName string) error {\n\torgSpecInfo, exists := s.openAPISpecs[apiServiceName]\n\tif !exists {\n\t\treturn nil\n\t}\n\tdelete(s.openAPISpecs, apiServiceName)\n\tif err := s.updateOpenAPISpec(); err != nil {\n\t\ts.openAPISpecs[apiServiceName] = orgSpecInfo\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ UpdateAPIServiceSpec updates the api service's OpenAPI spec. It is thread safe.\nfunc (s *specAggregator) UpdateAPIServiceSpec(apiServiceName string, spec *spec.Swagger, etag string) error {\n\ts.rwMutex.Lock()\n\tdefer s.rwMutex.Unlock()\n\n\tspecInfo, existingService := s.openAPISpecs[apiServiceName]\n\tif !existingService {\n\t\treturn fmt.Errorf(\"APIService %q does not exists\", apiServiceName)\n\t}\n\n\t\/\/ For APIServices (non-local) specs, only merge their \/apis\/ prefixed endpoint as it is the only paths\n\t\/\/ proxy handler delegates.\n\tif specInfo.apiService.Spec.Service != nil {\n\t\tspec = aggregator.FilterSpecByPathsWithoutSideEffects(spec, []string{\"\/apis\/\"})\n\t}\n\n\treturn s.tryUpdatingServiceSpecs(&openAPISpecInfo{\n\t\tapiService: specInfo.apiService,\n\t\tspec: spec,\n\t\thandler: specInfo.handler,\n\t\tetag: etag,\n\t})\n}\n\n\/\/ AddUpdateAPIService adds or updates the api service. It is thread safe.\nfunc (s *specAggregator) AddUpdateAPIService(handler http.Handler, apiService *apiregistration.APIService) error {\n\ts.rwMutex.Lock()\n\tdefer s.rwMutex.Unlock()\n\n\tif apiService.Spec.Service == nil {\n\t\t\/\/ All local specs should be already aggregated using local delegate chain\n\t\treturn nil\n\t}\n\n\tnewSpec := &openAPISpecInfo{\n\t\tapiService: *apiService,\n\t\thandler: handler,\n\t}\n\tif specInfo, existingService := s.openAPISpecs[apiService.Name]; existingService {\n\t\tnewSpec.etag = specInfo.etag\n\t\tnewSpec.spec = specInfo.spec\n\t}\n\treturn s.tryUpdatingServiceSpecs(newSpec)\n}\n\n\/\/ RemoveAPIServiceSpec removes an api service from OpenAPI aggregation. If it does not exist, no error is returned.\n\/\/ It is thread safe.\nfunc (s *specAggregator) RemoveAPIServiceSpec(apiServiceName string) error {\n\ts.rwMutex.Lock()\n\tdefer s.rwMutex.Unlock()\n\n\tif _, existingService := s.openAPISpecs[apiServiceName]; !existingService {\n\t\treturn nil\n\t}\n\n\treturn s.tryDeleteServiceSpecs(apiServiceName)\n}\n\n\/\/ GetAPIServiceSpec returns api service spec info\nfunc (s *specAggregator) GetAPIServiceInfo(apiServiceName string) (handler http.Handler, etag string, exists bool) {\n\ts.rwMutex.RLock()\n\tdefer s.rwMutex.RUnlock()\n\n\tif info, existingService := s.openAPISpecs[apiServiceName]; existingService {\n\t\treturn info.handler, info.etag, true\n\t}\n\treturn nil, \"\", false\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/codegangsta\/cli\"\n)\n\nfunc doInit(c *cli.Context) {\n\tif err := SetConfig(); err != nil {\n\t\tlog.Fatalf(\"failed to create a setting file\\n\", err)\n\t}\n}\n\nfunc doStart(c *cli.Context) {\n\tlog.Println(\"[start] Initialization...\")\n\n\t\/\/ read a configuration file\n\tconfig, err := GetConfig()\n\tif err != nil {\n\t\tlog.Fatalf(\"at first, \\\"$ surelock-homes init\\\"\\n\", err)\n\t}\n\n\t\/\/ command option check\n\tcmdOptionSerial := c.String(\"serial\")\n\tif cmdOptionSerial != \"SERIAL PORT\" {\n\t\tconfig.SerialPort.Serial = cmdOptionSerial\n\t}\n\n\t\/\/ twitter initial\n\ttoken := TwitterInit(config.Twitter)\n\t\/\/ serial initial\n\tserialObject, err := SerialInit(config.SerialPort)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to open the serial port\\n\", err)\n\t}\n\n\tlog.Println(\"[start] TwitterStreaming and BeaconScanning...\")\n\n\tfor {\n\t\ttimestamp := make(chan string)\n\t\tgo TwitterStreaming(timestamp, token, config.Twitter.ServerAccount)\n\t\tgo BeaconScan(timestamp, config.Bluetooth)\n\n\t\tts1 := <-timestamp\n\t\tts2 := <-timestamp\n\n\t\t\/\/ door doesn't open when the difference exceeds the 5 minutes\n\t\ttimediff := TimeDiff(ts1, ts2)\n\t\tif timediff >= 300 || timediff < 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ send a open command\n\t\terr = SerialWrite(serialObject, \"OC0\")\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to write\\n\", err)\n\t\t}\n\n\t\t\/\/ twitter post\n\t\tmessage := strings.Join([]string{\"@\", config.Twitter.ClientAccount, \" [from Surelock-Homes] The door has opened.\"}, \"\")\n\t\terr = TwitterPost(token, message)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to post a tweet\\n\", err)\n\t\t}\n\t}\n}\nadd: action door closepackage main\n\nimport (\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/codegangsta\/cli\"\n)\n\nfunc doInit(c *cli.Context) {\n\tif err := SetConfig(); err != nil {\n\t\tlog.Fatalf(\"failed to create a setting file\\n\", err)\n\t}\n}\n\nfunc doStart(c *cli.Context) {\n\tlog.Println(\"[start] Initialization...\")\n\n\t\/\/ read a configuration file\n\tconfig, err := GetConfig()\n\tif err != nil {\n\t\tlog.Fatalf(\"at first, \\\"$ surelock-homes init\\\"\\n\", err)\n\t}\n\n\t\/\/ command option check\n\tcmdOptionSerial := c.String(\"serial\")\n\tif cmdOptionSerial != \"SERIAL PORT\" {\n\t\tconfig.SerialPort.Serial = cmdOptionSerial\n\t}\n\n\t\/\/ twitter initial\n\ttoken := TwitterInit(config.Twitter)\n\t\/\/ serial initial\n\tserialObject, err := SerialInit(config.SerialPort)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to open the serial port\\n\", err)\n\t}\n\n\tlog.Println(\"[start] TwitterStreaming and BeaconScanning...\")\n\n\tfor {\n\t\ttimestamp := make(chan string)\n\t\tgo TwitterStreaming(timestamp, token, config.Twitter.ServerAccount)\n\t\tgo BeaconScan(timestamp, config.Bluetooth)\n\n\t\tts1 := <-timestamp\n\t\tts2 := <-timestamp\n\n\t\t\/\/ send a close command\n\t\terr = SerialWrite(serialObject, \"OC1\")\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to write\\n\", err)\n\t\t}\n\n\t\t\/\/ twitter post\n\t\tmessage := \"[from Surelock-Homes] The door has closed.\"\n\t\terr = TwitterPost(token, message)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to post a tweet\\n\", err)\n\t\t}\n\n\t\t\/\/ door doesn't open when the difference exceeds the 5 minutes\n\t\ttimediff := TimeDiff(ts1, ts2)\n\t\tif timediff >= 300 || timediff < 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ send a open command\n\t\terr = SerialWrite(serialObject, \"OC0\")\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to write\\n\", err)\n\t\t}\n\n\t\t\/\/ twitter post\n\t\tmessage = strings.Join([]string{\"@\", config.Twitter.ClientAccount, \" [from Surelock-Homes] The door has opened.\"}, \"\")\n\t\terr = TwitterPost(token, message)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to post a tweet\\n\", err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"package cloudconfig\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\n\t\"github.com\/tmrts\/flamingo\/pkg\/sys\/identity\"\n\t\"github.com\/tmrts\/flamingo\/pkg\/sys\/ssh\"\n\n\t\"gopkg.in\/yaml.v2\"\n)\n\ntype WriteFile struct {\n\tPath string\n\tOwner string\n\tPermissions string\n\tEncoding string\n\n\tContent string\n}\n\nvar (\n\tErrNotACloudConfigFile = errors.New(\"cloudconfig: not a cloud-config file\")\n)\n\nfunc IsValid(rdr io.ReadCloser) error {\n\tbuf, err := ioutil.ReadAll(rdr)\n\tif err != nil {\n\t\treturn err\n\t}\n\trdr.Close()\n\n\tif contents := string(buf); strings.HasPrefix(contents, \"#cloud-config\\n\") != true {\n\t\treturn ErrNotACloudConfigFile\n\t}\n\n\treturn nil\n}\n\ntype user struct {\n\tidentity.User `yaml:\"\"`\n\tAuthorizedSSHKeys []string `yaml:\"ssh-authorized-keys\"`\n\tSSHImportID string `yaml:\"ssh-import-id\"`\n}\n\ntype cloudConfig struct {\n\tRunCMD []interface{}\n\tAuthorizedKeys []string `yaml:\"ssh_authorized_keys\"`\n\tSSHKeyPairs map[string]string `yaml:\"ssh_keys\"`\n\tUsers []identity.User `yaml:\"users\"`\n\tGroups []interface{} `yaml:\"groups\"`\n\tFiles []WriteFile `yaml:\"write_files\"`\n}\n\n\/\/ Digest is the parsed cloud-config file.\ntype Digest struct {\n\tCommands [][]string\n\tFiles []WriteFile\n\tGroups map[string][]string\n\tUsers map[string]identity.User\n\tAuthorizedKeys map[string][]ssh.Key\n\tSSHKeyPairs []ssh.KeyPair\n}\n\n\/\/ Parse parses the given cloud-config file when it's path is given.\nfunc Parse(rdr io.ReadCloser) (*Digest, error) {\n\tbuf, err := ioutil.ReadAll(rdr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trdr.Close()\n\n\tvar conf cloudConfig\n\tif err := yaml.Unmarshal(buf, &conf); err != nil {\n\t\treturn nil, err\n\t}\n\tvar c Digest\n\n\tc.Commands = parseCommands(conf.RunCMD)\n\n\tc.AuthorizedKeys = make(map[string][]ssh.Key)\n\tfor _, key := range conf.AuthorizedKeys {\n\t\tc.AuthorizedKeys[\"root\"] = append(c.AuthorizedKeys[\"root\"], ssh.Key(key))\n\t}\n\n\tpublic_keys, private_keys := make(map[string]string), make(map[string]string)\n\n\t\/\/ TODO: Extend ssh key syntax beyond cloud-config\n\tfor k, v := range conf.SSHKeyPairs {\n\t\tif strings.HasSuffix(k, \"private\") {\n\t\t\tprivate_keys[strings.TrimSuffix(k, \"_private\")] = v\n\t\t} else {\n\t\t\tpublic_keys[strings.TrimSuffix(k, \"_public\")] = v\n\t\t}\n\t}\n\n\tfor key, value := range public_keys {\n\t\tc.SSHKeyPairs = append(c.SSHKeyPairs, ssh.KeyPair{\n\t\t\tPublic: ssh.Key(value),\n\t\t\tPrivate: ssh.Key(private_keys[key]),\n\t\t})\n\t}\n\n\tc.Groups = parseGroups(conf.Groups)\n\tc.Users = parseUsers(conf.Users)\n\t\/*\n\t* BUG(yaml.v2): Embedded structs are not unmarshaled properly.\n\t* TODO(tmrts): Use another yaml library or extend identity.User.\n\t*for _, usr := range conf.Users {\n\t* for _, key := range usr.AuthorizedSSHKeys {\n\t* c.AuthorizedKeys[usr.Name] = append(c.AuthorizedKeys[usr.Name], ssh.Key(key))\n\t* }\n\t*}\n\t *\/\n\n\tc.Files = conf.Files\n\n\treturn &c, nil\n}\n\nfunc parseCommands(runcmd []interface{}) (commands [][]string) {\n\tvar cmds []string\n\n\tfor _, cmd := range runcmd {\n\t\tswitch cmd := cmd.(type) {\n\t\tcase string:\n\t\t\tcmds = []string{\"sh\", \"-c\", cmd}\n\t\tcase []interface{}:\n\t\t\tcmds = []string{}\n\t\t\tfor _, s := range cmd {\n\t\t\t\tcmds = append(cmds, s.(string))\n\t\t\t}\n\t\t}\n\n\t\tcommands = append(commands, cmds)\n\t}\n\n\treturn\n}\n\nfunc parseUsers(users []identity.User) map[string]identity.User {\n\tuserMap := make(map[string]identity.User)\n\n\tfor _, user := range users {\n\t\tuserMap[user.Name] = user\n\t}\n\n\treturn userMap\n}\n\nfunc parseGroups(vals []interface{}) map[string][]string {\n\tgroups := make(map[string][]string)\n\n\tfor _, group := range vals {\n\t\tswitch group := group.(type) {\n\t\tcase string:\n\t\t\tgroups[group] = []string{}\n\t\tcase map[interface{}]interface{}:\n\t\t\tfor name, users := range group {\n\t\t\t\tname := name.(string)\n\n\t\t\t\tmembers := []string{}\n\t\t\t\tfor _, m := range users.([]interface{}) {\n\t\t\t\t\tmembers = append(members, m.(string))\n\t\t\t\t}\n\n\t\t\t\tgroups[name] = members\n\t\t\t}\n\t\t}\n\t}\n\n\treturn groups\n}\nImprove cloudconfig pkg documentation\/\/ Package cloudconfig validates and parses a cloud-config data file.\npackage cloudconfig\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\n\t\"github.com\/tmrts\/flamingo\/pkg\/sys\/identity\"\n\t\"github.com\/tmrts\/flamingo\/pkg\/sys\/ssh\"\n\n\t\"gopkg.in\/yaml.v2\"\n)\n\n\/\/ WriteFile represents the write_file directive found in a\n\/\/ cloud-config file. It contains the fields necessary to create\n\/\/ a new file in the system.\ntype WriteFile struct {\n\tPath string\n\tOwner string\n\tPermissions string\n\tEncoding string\n\n\tContent string\n}\n\nvar (\n\tErrNotACloudConfigFile = errors.New(\"cloudconfig: not a cloud-config file\")\n)\n\n\/\/ IsValid reads from the given io.ReadCloser and determines\n\/\/ whether the read contents belong to a valid cloud-config file.\n\/\/ It returns an ErrNotACloudConfigFile if the file is not a valid cloud-config file.\n\/\/ If it encounters an error while reading from the io.ReadCloser it returns the error.\nfunc IsValid(rdr io.ReadCloser) error {\n\tbuf, err := ioutil.ReadAll(rdr)\n\tif err != nil {\n\t\treturn err\n\t}\n\trdr.Close()\n\n\tif contents := string(buf); strings.HasPrefix(contents, \"#cloud-config\\n\") != true {\n\t\treturn ErrNotACloudConfigFile\n\t}\n\n\treturn nil\n}\n\ntype user struct {\n\tidentity.User `yaml:\"\"`\n\tAuthorizedSSHKeys []string `yaml:\"ssh-authorized-keys\"`\n\tSSHImportID string `yaml:\"ssh-import-id\"`\n}\n\ntype cloudConfig struct {\n\tRunCMD []interface{}\n\tAuthorizedKeys []string `yaml:\"ssh_authorized_keys\"`\n\tSSHKeyPairs map[string]string `yaml:\"ssh_keys\"`\n\tUsers []identity.User `yaml:\"users\"`\n\tGroups []interface{} `yaml:\"groups\"`\n\tFiles []WriteFile `yaml:\"write_files\"`\n}\n\n\/\/ Digest is the parsed cloud-config file.\ntype Digest struct {\n\tCommands [][]string\n\tFiles []WriteFile\n\tGroups map[string][]string\n\tUsers map[string]identity.User\n\tAuthorizedKeys map[string][]ssh.Key\n\tSSHKeyPairs []ssh.KeyPair\n}\n\n\/\/ Parse reads from the given io.ReadCloser and\n\/\/ parses read contents of a cloud-config file.\nfunc Parse(rdr io.ReadCloser) (*Digest, error) {\n\tbuf, err := ioutil.ReadAll(rdr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trdr.Close()\n\n\tvar conf cloudConfig\n\tif err := yaml.Unmarshal(buf, &conf); err != nil {\n\t\treturn nil, err\n\t}\n\tvar c Digest\n\n\tc.Commands = parseCommands(conf.RunCMD)\n\n\tc.AuthorizedKeys = make(map[string][]ssh.Key)\n\tfor _, key := range conf.AuthorizedKeys {\n\t\tc.AuthorizedKeys[\"root\"] = append(c.AuthorizedKeys[\"root\"], ssh.Key(key))\n\t}\n\n\tpublic_keys, private_keys := make(map[string]string), make(map[string]string)\n\n\t\/\/ TODO: Extend ssh key syntax beyond cloud-config\n\tfor k, v := range conf.SSHKeyPairs {\n\t\tif strings.HasSuffix(k, \"private\") {\n\t\t\tprivate_keys[strings.TrimSuffix(k, \"_private\")] = v\n\t\t} else {\n\t\t\tpublic_keys[strings.TrimSuffix(k, \"_public\")] = v\n\t\t}\n\t}\n\n\tfor key, value := range public_keys {\n\t\tc.SSHKeyPairs = append(c.SSHKeyPairs, ssh.KeyPair{\n\t\t\tPublic: ssh.Key(value),\n\t\t\tPrivate: ssh.Key(private_keys[key]),\n\t\t})\n\t}\n\n\tc.Groups = parseGroups(conf.Groups)\n\tc.Users = parseUsers(conf.Users)\n\t\/*\n\t* BUG(yaml.v2): Embedded structs are not unmarshaled properly.\n\t* TODO(tmrts): Use another yaml library or extend identity.User.\n\t*for _, usr := range conf.Users {\n\t* for _, key := range usr.AuthorizedSSHKeys {\n\t* c.AuthorizedKeys[usr.Name] = append(c.AuthorizedKeys[usr.Name], ssh.Key(key))\n\t* }\n\t*}\n\t *\/\n\n\tc.Files = conf.Files\n\n\treturn &c, nil\n}\n\nfunc parseCommands(runcmd []interface{}) (commands [][]string) {\n\tvar cmds []string\n\n\tfor _, cmd := range runcmd {\n\t\tswitch cmd := cmd.(type) {\n\t\tcase string:\n\t\t\tcmds = []string{\"sh\", \"-c\", cmd}\n\t\tcase []interface{}:\n\t\t\tcmds = []string{}\n\t\t\tfor _, s := range cmd {\n\t\t\t\tcmds = append(cmds, s.(string))\n\t\t\t}\n\t\t}\n\n\t\tcommands = append(commands, cmds)\n\t}\n\n\treturn\n}\n\nfunc parseUsers(users []identity.User) map[string]identity.User {\n\tuserMap := make(map[string]identity.User)\n\n\tfor _, user := range users {\n\t\tuserMap[user.Name] = user\n\t}\n\n\treturn userMap\n}\n\nfunc parseGroups(vals []interface{}) map[string][]string {\n\tgroups := make(map[string][]string)\n\n\tfor _, group := range vals {\n\t\tswitch group := group.(type) {\n\t\tcase string:\n\t\t\tgroups[group] = []string{}\n\t\tcase map[interface{}]interface{}:\n\t\t\tfor name, users := range group {\n\t\t\t\tname := name.(string)\n\n\t\t\t\tmembers := []string{}\n\t\t\t\tfor _, m := range users.([]interface{}) {\n\t\t\t\t\tmembers = append(members, m.(string))\n\t\t\t\t}\n\n\t\t\t\tgroups[name] = members\n\t\t\t}\n\t\t}\n\t}\n\n\treturn groups\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\ntype Executor struct {\n\tdir string\n\tdata map[string]string\n}\n\nfunc NewExecutor(dir string, data map[string]string) *Executor {\n\treturn &Executor{dir, data}\n}\n\nfunc (this *Executor) Execute(actions ...*Action) error {\n\tfor _, action := range actions {\n\t\tif action.Mode == \"\" || action.Mode == _Mode {\n\t\t\tvar (\n\t\t\t\terr error\n\t\t\t\targs = strings.Split(action.Args, \" \")\n\t\t\t)\n\n\t\t\tswitch action.Name {\n\t\t\tcase \"copy\":\n\t\t\t\terr = this.Copy(args)\n\t\t\tcase \"replace\":\n\t\t\t\terr = this.Replace(args)\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"action [%s] is not supported\", action.Name)\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"execute action error > %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (this *Executor) Copy(args []string) error {\n\tif len(args) != 2 {\n\t\treturn fmt.Errorf(\"copy action take two arguments\")\n\t}\n\n\tsrc := ExpandString(args[0], this.data)\n\tdest := ExpandString(args[1], this.data)\n\n\tif !filepath.IsAbs(src) {\n\t\tsrc = filepath.Join(this.dir, src)\n\t}\n\tif !filepath.IsAbs(dest) {\n\t\tdest = filepath.Join(this.dir, dest)\n\t}\n\n\tif strings.ContainsAny(src, \"*?\") {\n\t\treturn this.CopyFile(src, dest)\n\t}\n\n\tsrcInfo, err := os.Stat(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif srcInfo.IsDir() {\n\t\treturn this.CopyDir(src, dest)\n\t} else {\n\t\treturn this.CopyFile(src, dest)\n\t}\n}\n\nfunc (this *Executor) CopyDir(src, dest string) error {\n\tsrcInfo, err := os.Stat(src)\n\n\tdest = filepath.Join(dest, filepath.Base(src))\n\t_, err = os.Stat(dest)\n\tif os.IsNotExist(err) {\n\t\terr = os.MkdirAll(dest, srcInfo.Mode())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn filepath.Walk(src, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trelatedPath := path[len(src):]\n\t\tdest := filepath.Join(dest, relatedPath)\n\t\tif info.IsDir() {\n\t\t\treturn CreateDir(dest, info.Mode())\n\t\t} else {\n\t\t\treturn CopyFile(path, dest, info.Mode())\n\t\t}\n\t})\n}\n\nfunc (this *Executor) CopyFile(src, dest string) error {\n\t\/\/ ioutil.ReadDir(dirname)\n\tfiles, err := filepath.Glob(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif files == nil {\n\t\treturn nil\n\t}\n\n\terr = CreateDir(dest, os.ModePerm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, f := range files {\n\t\tfi, err := os.Stat(f)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = CopyFile(f, filepath.Join(dest, filepath.Base(f)), fi.Mode())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (this *Executor) Replace(args []string) error {\n\tvar useRegexp bool\n\tflags := flag.NewFlagSet(\"action\", flag.ContinueOnError)\n\tflags.BoolVar(&useRegexp, \"r\", false, \"use regular expression\")\n\terr := flags.Parse(args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\targs = flags.Args()\n\tif len(args) != 3 {\n\t\treturn fmt.Errorf(\"replace action should take 3 arguments\")\n\t}\n\n\tpath := ExpandString(args[0], this.data)\n\tif !filepath.IsAbs(path) {\n\t\tpath = filepath.Join(this.dir, path)\n\t}\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdata, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif useRegexp {\n\t\treg, err := regexp.Compile(args[1])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdata = reg.ReplaceAll(data, []byte(args[2]))\n\t} else {\n\t\tdata = bytes.Replace(data, []byte(args[1]), []byte(args[2]), -1)\n\t}\n\n\tfi, err := file.Stat()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(path, data, fi.Mode())\n}\n\nfunc (this *Executor) GZip(args []string) error {\n\treturn nil\n}\nadd support to execute system commandpackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\ntype Executor struct {\n\tdir string\n\tdata map[string]string\n}\n\nfunc NewExecutor(dir string, data map[string]string) *Executor {\n\treturn &Executor{dir, data}\n}\n\nfunc (this *Executor) Execute(actions ...*Action) error {\n\tfor _, action := range actions {\n\t\tif action.Mode == \"\" || action.Mode == _Mode {\n\t\t\tvar (\n\t\t\t\terr error\n\t\t\t\targs = strings.Split(action.Args, \" \")\n\t\t\t)\n\n\t\t\tswitch action.Name {\n\t\t\tcase \"exec\":\n\t\t\t\terr = this.Exec(args)\n\t\t\tcase \"copy\":\n\t\t\t\terr = this.Copy(args)\n\t\t\tcase \"replace\":\n\t\t\t\terr = this.Replace(args)\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"action [%s] is not supported\", action.Name)\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"execute action error > %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (this *Executor) Exec(args []string) error {\n\tif len(args) == 0 {\n\t\treturn fmt.Errorf(\"no command is specified\")\n\t}\n\n\tfor i := 1; i < len(args); i++ {\n\t\targs[i] = ExpandString(args[i], this.data)\n\t}\n\n\tcmd := exec.Command(args[0], args[1:]...)\n\tcmd.Dir = this.dir\n\n\tif result, err := cmd.CombinedOutput(); err != nil {\n\t\treturn fmt.Errorf(\"%v, output:\\n%s\", err, result)\n\t}\n\n\treturn nil\n}\n\nfunc (this *Executor) Copy(args []string) error {\n\tif len(args) != 2 {\n\t\treturn fmt.Errorf(\"copy action take two arguments\")\n\t}\n\n\tsrc := ExpandString(args[0], this.data)\n\tdest := ExpandString(args[1], this.data)\n\n\tif !filepath.IsAbs(src) {\n\t\tsrc = filepath.Join(this.dir, src)\n\t}\n\tif !filepath.IsAbs(dest) {\n\t\tdest = filepath.Join(this.dir, dest)\n\t}\n\n\tif strings.ContainsAny(src, \"*?\") {\n\t\treturn this.CopyFile(src, dest)\n\t}\n\n\tsrcInfo, err := os.Stat(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif srcInfo.IsDir() {\n\t\treturn this.CopyDir(src, dest)\n\t} else {\n\t\treturn this.CopyFile(src, dest)\n\t}\n}\n\nfunc (this *Executor) CopyDir(src, dest string) error {\n\tsrcInfo, err := os.Stat(src)\n\n\tdest = filepath.Join(dest, filepath.Base(src))\n\t_, err = os.Stat(dest)\n\tif os.IsNotExist(err) {\n\t\terr = os.MkdirAll(dest, srcInfo.Mode())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn filepath.Walk(src, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trelatedPath := path[len(src):]\n\t\tdest := filepath.Join(dest, relatedPath)\n\t\tif info.IsDir() {\n\t\t\treturn CreateDir(dest, info.Mode())\n\t\t} else {\n\t\t\treturn CopyFile(path, dest, info.Mode())\n\t\t}\n\t})\n}\n\nfunc (this *Executor) CopyFile(src, dest string) error {\n\t\/\/ ioutil.ReadDir(dirname)\n\tfiles, err := filepath.Glob(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif files == nil {\n\t\treturn nil\n\t}\n\n\terr = CreateDir(dest, os.ModePerm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, f := range files {\n\t\tfi, err := os.Stat(f)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = CopyFile(f, filepath.Join(dest, filepath.Base(f)), fi.Mode())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (this *Executor) Replace(args []string) error {\n\tvar useRegexp bool\n\tflags := flag.NewFlagSet(\"action\", flag.ContinueOnError)\n\tflags.BoolVar(&useRegexp, \"r\", false, \"use regular expression\")\n\terr := flags.Parse(args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\targs = flags.Args()\n\tif len(args) != 3 {\n\t\treturn fmt.Errorf(\"replace action should take 3 arguments\")\n\t}\n\n\tpath := ExpandString(args[0], this.data)\n\tif !filepath.IsAbs(path) {\n\t\tpath = filepath.Join(this.dir, path)\n\t}\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdata, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif useRegexp {\n\t\treg, err := regexp.Compile(args[1])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdata = reg.ReplaceAll(data, []byte(args[2]))\n\t} else {\n\t\tdata = bytes.Replace(data, []byte(args[1]), []byte(args[2]), -1)\n\t}\n\n\tfi, err := file.Stat()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(path, data, fi.Mode())\n}\n\nfunc (this *Executor) GZip(args []string) error {\n\treturn nil\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cpumanager\n\nimport (\n\t\"reflect\"\n\t\"sort\"\n\t\"testing\"\n\n\tcadvisorapi \"github.com\/google\/cadvisor\/info\/v1\"\n\tv1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/cm\/cpumanager\/state\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/cm\/cpumanager\/topology\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/cm\/cpuset\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/cm\/topologymanager\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/cm\/topologymanager\/bitmask\"\n)\n\nfunc TestGetTopologyHints(t *testing.T) {\n\ttestPod1 := makePod(\"fakePod\", \"fakeContainer\", \"2\", \"2\")\n\ttestContainer1 := &testPod1.Spec.Containers[0]\n\ttestPod2 := makePod(\"fakePod\", \"fakeContainer\", \"5\", \"5\")\n\ttestContainer2 := &testPod2.Spec.Containers[0]\n\ttestPod3 := makePod(\"fakePod\", \"fakeContainer\", \"7\", \"7\")\n\ttestContainer3 := &testPod3.Spec.Containers[0]\n\ttestPod4 := makePod(\"fakePod\", \"fakeContainer\", \"11\", \"11\")\n\ttestContainer4 := &testPod4.Spec.Containers[0]\n\n\tfirstSocketMask, _ := bitmask.NewBitMask(0)\n\tsecondSocketMask, _ := bitmask.NewBitMask(1)\n\tcrossSocketMask, _ := bitmask.NewBitMask(0, 1)\n\n\tmachineInfo := cadvisorapi.MachineInfo{\n\t\tNumCores: 12,\n\t\tTopology: []cadvisorapi.Node{\n\t\t\t{Id: 0,\n\t\t\t\tCores: []cadvisorapi.Core{\n\t\t\t\t\t{Id: 0, Threads: []int{0, 6}},\n\t\t\t\t\t{Id: 1, Threads: []int{1, 7}},\n\t\t\t\t\t{Id: 2, Threads: []int{2, 8}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{Id: 1,\n\t\t\t\tCores: []cadvisorapi.Core{\n\t\t\t\t\t{Id: 0, Threads: []int{3, 9}},\n\t\t\t\t\t{Id: 1, Threads: []int{4, 10}},\n\t\t\t\t\t{Id: 2, Threads: []int{5, 11}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tnumaNodeInfo := topology.NUMANodeInfo{\n\t\t0: cpuset.NewCPUSet(0, 6, 1, 7, 2, 8),\n\t\t1: cpuset.NewCPUSet(3, 9, 4, 10, 5, 11),\n\t}\n\n\ttcases := []struct {\n\t\tname string\n\t\tpod v1.Pod\n\t\tcontainer v1.Container\n\t\tassignments state.ContainerCPUAssignments\n\t\tdefaultCPUSet cpuset.CPUSet\n\t\texpectedHints []topologymanager.TopologyHint\n\t}{\n\t\t{\n\t\t\tname: \"Request 2 CPUs, 4 available on NUMA 0, 6 available on NUMA 1\",\n\t\t\tpod: *testPod1,\n\t\t\tcontainer: *testContainer1,\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),\n\t\t\texpectedHints: []topologymanager.TopologyHint{\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: firstSocketMask,\n\t\t\t\t\tPreferred: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: secondSocketMask,\n\t\t\t\t\tPreferred: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: crossSocketMask,\n\t\t\t\t\tPreferred: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Request 5 CPUs, 4 available on NUMA 0, 6 available on NUMA 1\",\n\t\t\tpod: *testPod2,\n\t\t\tcontainer: *testContainer2,\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),\n\t\t\texpectedHints: []topologymanager.TopologyHint{\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: secondSocketMask,\n\t\t\t\t\tPreferred: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: crossSocketMask,\n\t\t\t\t\tPreferred: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Request 7 CPUs, 4 available on NUMA 0, 6 available on NUMA 1\",\n\t\t\tpod: *testPod3,\n\t\t\tcontainer: *testContainer3,\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),\n\t\t\texpectedHints: []topologymanager.TopologyHint{\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: crossSocketMask,\n\t\t\t\t\tPreferred: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Request 11 CPUs, 4 available on NUMA 0, 6 available on NUMA 1\",\n\t\t\tpod: *testPod4,\n\t\t\tcontainer: *testContainer4,\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),\n\t\t\texpectedHints: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"Request 2 CPUs, 1 available on NUMA 0, 1 available on NUMA 1\",\n\t\t\tpod: *testPod1,\n\t\t\tcontainer: *testContainer1,\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(0, 3),\n\t\t\texpectedHints: []topologymanager.TopologyHint{\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: crossSocketMask,\n\t\t\t\t\tPreferred: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Request more CPUs than available\",\n\t\t\tpod: *testPod2,\n\t\t\tcontainer: *testContainer2,\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3),\n\t\t\texpectedHints: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"Regenerate Single-Node NUMA Hints if already allocated 1\/2\",\n\t\t\tpod: *testPod1,\n\t\t\tcontainer: *testContainer1,\n\t\t\tassignments: state.ContainerCPUAssignments{\n\t\t\t\tstring(testPod1.UID): map[string]cpuset.CPUSet{\n\t\t\t\t\ttestContainer1.Name: cpuset.NewCPUSet(0, 6),\n\t\t\t\t},\n\t\t\t},\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(),\n\t\t\texpectedHints: []topologymanager.TopologyHint{\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: firstSocketMask,\n\t\t\t\t\tPreferred: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: crossSocketMask,\n\t\t\t\t\tPreferred: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Regenerate Single-Node NUMA Hints if already allocated 1\/2\",\n\t\t\tpod: *testPod1,\n\t\t\tcontainer: *testContainer1,\n\t\t\tassignments: state.ContainerCPUAssignments{\n\t\t\t\tstring(testPod1.UID): map[string]cpuset.CPUSet{\n\t\t\t\t\ttestContainer1.Name: cpuset.NewCPUSet(3, 9),\n\t\t\t\t},\n\t\t\t},\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(),\n\t\t\texpectedHints: []topologymanager.TopologyHint{\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: secondSocketMask,\n\t\t\t\t\tPreferred: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: crossSocketMask,\n\t\t\t\t\tPreferred: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Regenerate Cross-NUMA Hints if already allocated\",\n\t\t\tpod: *testPod4,\n\t\t\tcontainer: *testContainer4,\n\t\t\tassignments: state.ContainerCPUAssignments{\n\t\t\t\tstring(testPod4.UID): map[string]cpuset.CPUSet{\n\t\t\t\t\ttestContainer4.Name: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10),\n\t\t\t\t},\n\t\t\t},\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(),\n\t\t\texpectedHints: []topologymanager.TopologyHint{\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: crossSocketMask,\n\t\t\t\t\tPreferred: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Requested less than already allocated\",\n\t\t\tpod: *testPod1,\n\t\t\tcontainer: *testContainer1,\n\t\t\tassignments: state.ContainerCPUAssignments{\n\t\t\t\tstring(testPod1.UID): map[string]cpuset.CPUSet{\n\t\t\t\t\ttestContainer1.Name: cpuset.NewCPUSet(0, 6, 3, 9),\n\t\t\t\t},\n\t\t\t},\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(),\n\t\t\texpectedHints: []topologymanager.TopologyHint{},\n\t\t},\n\t\t{\n\t\t\tname: \"Requested more than already allocated\",\n\t\t\tpod: *testPod4,\n\t\t\tcontainer: *testContainer4,\n\t\t\tassignments: state.ContainerCPUAssignments{\n\t\t\t\tstring(testPod4.UID): map[string]cpuset.CPUSet{\n\t\t\t\t\ttestContainer4.Name: cpuset.NewCPUSet(0, 6, 3, 9),\n\t\t\t\t},\n\t\t\t},\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(),\n\t\t\texpectedHints: []topologymanager.TopologyHint{},\n\t\t},\n\t}\n\tfor _, tc := range tcases {\n\t\ttopology, _ := topology.Discover(&machineInfo, numaNodeInfo)\n\n\t\tm := manager{\n\t\t\tpolicy: &staticPolicy{\n\t\t\t\ttopology: topology,\n\t\t\t},\n\t\t\tstate: &mockState{\n\t\t\t\tassignments: tc.assignments,\n\t\t\t\tdefaultCPUSet: tc.defaultCPUSet,\n\t\t\t},\n\t\t\ttopology: topology,\n\t\t\tactivePods: func() []*v1.Pod { return nil },\n\t\t\tpodStatusProvider: mockPodStatusProvider{},\n\t\t\tsourcesReady: &sourcesReadyStub{},\n\t\t}\n\n\t\thints := m.GetTopologyHints(tc.pod, tc.container)[string(v1.ResourceCPU)]\n\t\tif len(tc.expectedHints) == 0 && len(hints) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tsort.SliceStable(hints, func(i, j int) bool {\n\t\t\treturn hints[i].LessThan(hints[j])\n\t\t})\n\t\tsort.SliceStable(tc.expectedHints, func(i, j int) bool {\n\t\t\treturn tc.expectedHints[i].LessThan(tc.expectedHints[j])\n\t\t})\n\t\tif !reflect.DeepEqual(tc.expectedHints, hints) {\n\t\t\tt.Errorf(\"Expected in result to be %v , got %v\", tc.expectedHints, hints)\n\t\t}\n\t}\n}\nAdd proper activePods list in TestGetTopologyHints for CPUManager\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cpumanager\n\nimport (\n\t\"reflect\"\n\t\"sort\"\n\t\"testing\"\n\n\tcadvisorapi \"github.com\/google\/cadvisor\/info\/v1\"\n\tv1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/cm\/cpumanager\/state\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/cm\/cpumanager\/topology\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/cm\/cpuset\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/cm\/topologymanager\"\n\t\"k8s.io\/kubernetes\/pkg\/kubelet\/cm\/topologymanager\/bitmask\"\n)\n\nfunc TestGetTopologyHints(t *testing.T) {\n\ttestPod1 := makePod(\"fakePod\", \"fakeContainer\", \"2\", \"2\")\n\ttestContainer1 := &testPod1.Spec.Containers[0]\n\ttestPod2 := makePod(\"fakePod\", \"fakeContainer\", \"5\", \"5\")\n\ttestContainer2 := &testPod2.Spec.Containers[0]\n\ttestPod3 := makePod(\"fakePod\", \"fakeContainer\", \"7\", \"7\")\n\ttestContainer3 := &testPod3.Spec.Containers[0]\n\ttestPod4 := makePod(\"fakePod\", \"fakeContainer\", \"11\", \"11\")\n\ttestContainer4 := &testPod4.Spec.Containers[0]\n\n\tfirstSocketMask, _ := bitmask.NewBitMask(0)\n\tsecondSocketMask, _ := bitmask.NewBitMask(1)\n\tcrossSocketMask, _ := bitmask.NewBitMask(0, 1)\n\n\tmachineInfo := cadvisorapi.MachineInfo{\n\t\tNumCores: 12,\n\t\tTopology: []cadvisorapi.Node{\n\t\t\t{Id: 0,\n\t\t\t\tCores: []cadvisorapi.Core{\n\t\t\t\t\t{Id: 0, Threads: []int{0, 6}},\n\t\t\t\t\t{Id: 1, Threads: []int{1, 7}},\n\t\t\t\t\t{Id: 2, Threads: []int{2, 8}},\n\t\t\t\t},\n\t\t\t},\n\t\t\t{Id: 1,\n\t\t\t\tCores: []cadvisorapi.Core{\n\t\t\t\t\t{Id: 0, Threads: []int{3, 9}},\n\t\t\t\t\t{Id: 1, Threads: []int{4, 10}},\n\t\t\t\t\t{Id: 2, Threads: []int{5, 11}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tnumaNodeInfo := topology.NUMANodeInfo{\n\t\t0: cpuset.NewCPUSet(0, 6, 1, 7, 2, 8),\n\t\t1: cpuset.NewCPUSet(3, 9, 4, 10, 5, 11),\n\t}\n\n\ttcases := []struct {\n\t\tname string\n\t\tpod v1.Pod\n\t\tcontainer v1.Container\n\t\tassignments state.ContainerCPUAssignments\n\t\tdefaultCPUSet cpuset.CPUSet\n\t\texpectedHints []topologymanager.TopologyHint\n\t}{\n\t\t{\n\t\t\tname: \"Request 2 CPUs, 4 available on NUMA 0, 6 available on NUMA 1\",\n\t\t\tpod: *testPod1,\n\t\t\tcontainer: *testContainer1,\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),\n\t\t\texpectedHints: []topologymanager.TopologyHint{\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: firstSocketMask,\n\t\t\t\t\tPreferred: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: secondSocketMask,\n\t\t\t\t\tPreferred: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: crossSocketMask,\n\t\t\t\t\tPreferred: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Request 5 CPUs, 4 available on NUMA 0, 6 available on NUMA 1\",\n\t\t\tpod: *testPod2,\n\t\t\tcontainer: *testContainer2,\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),\n\t\t\texpectedHints: []topologymanager.TopologyHint{\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: secondSocketMask,\n\t\t\t\t\tPreferred: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: crossSocketMask,\n\t\t\t\t\tPreferred: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Request 7 CPUs, 4 available on NUMA 0, 6 available on NUMA 1\",\n\t\t\tpod: *testPod3,\n\t\t\tcontainer: *testContainer3,\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),\n\t\t\texpectedHints: []topologymanager.TopologyHint{\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: crossSocketMask,\n\t\t\t\t\tPreferred: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Request 11 CPUs, 4 available on NUMA 0, 6 available on NUMA 1\",\n\t\t\tpod: *testPod4,\n\t\t\tcontainer: *testContainer4,\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),\n\t\t\texpectedHints: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"Request 2 CPUs, 1 available on NUMA 0, 1 available on NUMA 1\",\n\t\t\tpod: *testPod1,\n\t\t\tcontainer: *testContainer1,\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(0, 3),\n\t\t\texpectedHints: []topologymanager.TopologyHint{\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: crossSocketMask,\n\t\t\t\t\tPreferred: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Request more CPUs than available\",\n\t\t\tpod: *testPod2,\n\t\t\tcontainer: *testContainer2,\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3),\n\t\t\texpectedHints: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"Regenerate Single-Node NUMA Hints if already allocated 1\/2\",\n\t\t\tpod: *testPod1,\n\t\t\tcontainer: *testContainer1,\n\t\t\tassignments: state.ContainerCPUAssignments{\n\t\t\t\tstring(testPod1.UID): map[string]cpuset.CPUSet{\n\t\t\t\t\ttestContainer1.Name: cpuset.NewCPUSet(0, 6),\n\t\t\t\t},\n\t\t\t},\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(),\n\t\t\texpectedHints: []topologymanager.TopologyHint{\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: firstSocketMask,\n\t\t\t\t\tPreferred: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: crossSocketMask,\n\t\t\t\t\tPreferred: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Regenerate Single-Node NUMA Hints if already allocated 1\/2\",\n\t\t\tpod: *testPod1,\n\t\t\tcontainer: *testContainer1,\n\t\t\tassignments: state.ContainerCPUAssignments{\n\t\t\t\tstring(testPod1.UID): map[string]cpuset.CPUSet{\n\t\t\t\t\ttestContainer1.Name: cpuset.NewCPUSet(3, 9),\n\t\t\t\t},\n\t\t\t},\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(),\n\t\t\texpectedHints: []topologymanager.TopologyHint{\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: secondSocketMask,\n\t\t\t\t\tPreferred: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: crossSocketMask,\n\t\t\t\t\tPreferred: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Regenerate Cross-NUMA Hints if already allocated\",\n\t\t\tpod: *testPod4,\n\t\t\tcontainer: *testContainer4,\n\t\t\tassignments: state.ContainerCPUAssignments{\n\t\t\t\tstring(testPod4.UID): map[string]cpuset.CPUSet{\n\t\t\t\t\ttestContainer4.Name: cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10),\n\t\t\t\t},\n\t\t\t},\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(),\n\t\t\texpectedHints: []topologymanager.TopologyHint{\n\t\t\t\t{\n\t\t\t\t\tNUMANodeAffinity: crossSocketMask,\n\t\t\t\t\tPreferred: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Requested less than already allocated\",\n\t\t\tpod: *testPod1,\n\t\t\tcontainer: *testContainer1,\n\t\t\tassignments: state.ContainerCPUAssignments{\n\t\t\t\tstring(testPod1.UID): map[string]cpuset.CPUSet{\n\t\t\t\t\ttestContainer1.Name: cpuset.NewCPUSet(0, 6, 3, 9),\n\t\t\t\t},\n\t\t\t},\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(),\n\t\t\texpectedHints: []topologymanager.TopologyHint{},\n\t\t},\n\t\t{\n\t\t\tname: \"Requested more than already allocated\",\n\t\t\tpod: *testPod4,\n\t\t\tcontainer: *testContainer4,\n\t\t\tassignments: state.ContainerCPUAssignments{\n\t\t\t\tstring(testPod4.UID): map[string]cpuset.CPUSet{\n\t\t\t\t\ttestContainer4.Name: cpuset.NewCPUSet(0, 6, 3, 9),\n\t\t\t\t},\n\t\t\t},\n\t\t\tdefaultCPUSet: cpuset.NewCPUSet(),\n\t\t\texpectedHints: []topologymanager.TopologyHint{},\n\t\t},\n\t}\n\tfor _, tc := range tcases {\n\t\ttopology, _ := topology.Discover(&machineInfo, numaNodeInfo)\n\n\t\tvar activePods []*v1.Pod\n\t\tfor p := range tc.assignments {\n\t\t\tpod := v1.Pod{}\n\t\t\tpod.UID = types.UID(p)\n\t\t\tfor c := range tc.assignments[p] {\n\t\t\t\tcontainer := v1.Container{}\n\t\t\t\tcontainer.Name = c\n\t\t\t\tpod.Spec.Containers = append(pod.Spec.Containers, container)\n\t\t\t}\n\t\t\tactivePods = append(activePods, &pod)\n\t\t}\n\n\t\tm := manager{\n\t\t\tpolicy: &staticPolicy{\n\t\t\t\ttopology: topology,\n\t\t\t},\n\t\t\tstate: &mockState{\n\t\t\t\tassignments: tc.assignments,\n\t\t\t\tdefaultCPUSet: tc.defaultCPUSet,\n\t\t\t},\n\t\t\ttopology: topology,\n\t\t\tactivePods: func() []*v1.Pod { return activePods },\n\t\t\tpodStatusProvider: mockPodStatusProvider{},\n\t\t\tsourcesReady: &sourcesReadyStub{},\n\t\t}\n\n\t\thints := m.GetTopologyHints(tc.pod, tc.container)[string(v1.ResourceCPU)]\n\t\tif len(tc.expectedHints) == 0 && len(hints) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tsort.SliceStable(hints, func(i, j int) bool {\n\t\t\treturn hints[i].LessThan(hints[j])\n\t\t})\n\t\tsort.SliceStable(tc.expectedHints, func(i, j int) bool {\n\t\t\treturn tc.expectedHints[i].LessThan(tc.expectedHints[j])\n\t\t})\n\t\tif !reflect.DeepEqual(tc.expectedHints, hints) {\n\t\t\tt.Errorf(\"Expected in result to be %v , got %v\", tc.expectedHints, hints)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"package main\n\n\/\/FilterChannel cuts off unneeded images\nfunc FilterChannel(in ImageCh) (out ImageCh) {\n\n\tif opts.Filter {\n\t\treturn in\n\t}\n\tout = make(ImageCh)\n\tgo func() {\n\t\tfor imgdata := range in {\n\n\t\t\tif imgdata.Score >= opts.Score {\n\t\t\t\tout <- imgdata\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\telog.Println(\"Filtering \" + imgdata.Filename)\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}\nLogging in filter crashes the program. No idea whypackage main\n\n\/\/FilterChannel cuts off unneeded images\nfunc FilterChannel(in ImageCh) (out ImageCh) {\n\t\/\/elog.Println(\"filtering\")\n\tif !opts.Filter {\n\t\t\/\/elog.Println(\"Filter is off\")\n\t\treturn in\n\t}\n\t\/\/elog.Println(\"Filter is on\")\n\tout = make(ImageCh, 1)\n\tgo func() {\n\t\tfor imgdata := range in {\n\n\t\t\tif imgdata.Score >= opts.Score {\n\t\t\t\tout <- imgdata\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/elog.Println(\"Filtering \" + imgdata.Filename)\n\t\t}\n\t\tclose(out)\n\t}()\n\treturn out\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage defaulttolerationseconds\n\nimport (\n\t\"testing\"\n\n\t\"k8s.io\/apiserver\/pkg\/admission\"\n\tapi \"k8s.io\/kubernetes\/pkg\/apis\/core\"\n\t\"k8s.io\/kubernetes\/pkg\/apis\/core\/helper\"\n\t\"k8s.io\/kubernetes\/plugin\/pkg\/scheduler\/algorithm\"\n)\n\nfunc TestForgivenessAdmission(t *testing.T) {\n\tvar defaultTolerationSeconds int64 = 300\n\n\tgenTolerationSeconds := func(s int64) *int64 {\n\t\treturn &s\n\t}\n\n\thandler := NewDefaultTolerationSeconds()\n\ttests := []struct {\n\t\tdescription string\n\t\trequestedPod api.Pod\n\t\texpectedPod api.Pod\n\t}{\n\t\t{\n\t\t\tdescription: \"pod has no tolerations, expect add tolerations for `notReady:NoExecute` and `unreachable:NoExecute`\",\n\t\t\trequestedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{},\n\t\t\t},\n\t\t\texpectedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeNotReady,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: &defaultTolerationSeconds,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeUnreachable,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: &defaultTolerationSeconds,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"pod has tolerations, but none is for taint `notReady:NoExecute` or `unreachable:NoExecute`, expect add tolerations for `notReady:NoExecute` and `unreachable:NoExecute`\",\n\t\t\trequestedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: \"foo\",\n\t\t\t\t\t\t\tOperator: api.TolerationOpEqual,\n\t\t\t\t\t\t\tValue: \"bar\",\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoSchedule,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: \"foo\",\n\t\t\t\t\t\t\tOperator: api.TolerationOpEqual,\n\t\t\t\t\t\t\tValue: \"bar\",\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoSchedule,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeNotReady,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: &defaultTolerationSeconds,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeUnreachable,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: &defaultTolerationSeconds,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"pod specified a toleration for taint `notReady:NoExecute`, expect add toleration for `unreachable:NoExecute`\",\n\t\t\trequestedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeNotReady,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeNotReady,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeUnreachable,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: &defaultTolerationSeconds,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"pod specified a toleration for taint `unreachable:NoExecute`, expect add toleration for `notReady:NoExecute`\",\n\t\t\trequestedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeUnreachable,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeUnreachable,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeNotReady,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: &defaultTolerationSeconds,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"pod specified tolerations for both `notReady:NoExecute` and `unreachable:NoExecute`, expect no change\",\n\t\t\trequestedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeNotReady,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeUnreachable,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeNotReady,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeUnreachable,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"pod specified toleration for taint `unreachable`, expect add toleration for `notReady:NoExecute`\",\n\t\t\trequestedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeUnreachable,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeUnreachable,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeNotReady,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(300),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"pod has wildcard toleration for all kind of taints, expect no change\",\n\t\t\trequestedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{Operator: api.TolerationOpExists, TolerationSeconds: genTolerationSeconds(700)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\terr := handler.Admit(admission.NewAttributesRecord(&test.requestedPod, nil, api.Kind(\"Pod\").WithVersion(\"version\"), \"foo\", \"name\", api.Resource(\"pods\").WithVersion(\"version\"), \"\", \"ignored\", nil))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"[%s]: unexpected error %v for pod %+v\", test.description, err, test.requestedPod)\n\t\t}\n\n\t\tif !helper.Semantic.DeepEqual(test.expectedPod.Annotations, test.requestedPod.Annotations) {\n\t\t\tt.Errorf(\"[%s]: expected %#v got %#v\", test.description, test.expectedPod.Annotations, test.requestedPod.Annotations)\n\t\t}\n\t}\n}\n\nfunc TestHandles(t *testing.T) {\n\thandler := NewDefaultTolerationSeconds()\n\ttests := map[admission.Operation]bool{\n\t\tadmission.Update: true,\n\t\tadmission.Create: true,\n\t\tadmission.Delete: false,\n\t\tadmission.Connect: false,\n\t}\n\tfor op, expected := range tests {\n\t\tresult := handler.Handles(op)\n\t\tif result != expected {\n\t\t\tt.Errorf(\"Unexpected result for operation %s: %v\\n\", op, result)\n\t\t}\n\t}\n}\nFix TestForgivenessAdmission.\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage defaulttolerationseconds\n\nimport (\n\t\"testing\"\n\n\t\"k8s.io\/apiserver\/pkg\/admission\"\n\tapi \"k8s.io\/kubernetes\/pkg\/apis\/core\"\n\t\"k8s.io\/kubernetes\/pkg\/apis\/core\/helper\"\n\t\"k8s.io\/kubernetes\/plugin\/pkg\/scheduler\/algorithm\"\n)\n\nfunc TestForgivenessAdmission(t *testing.T) {\n\tvar defaultTolerationSeconds int64 = 300\n\n\tgenTolerationSeconds := func(s int64) *int64 {\n\t\treturn &s\n\t}\n\n\thandler := NewDefaultTolerationSeconds()\n\ttests := []struct {\n\t\tdescription string\n\t\trequestedPod api.Pod\n\t\texpectedPod api.Pod\n\t}{\n\t\t{\n\t\t\tdescription: \"pod has no tolerations, expect add tolerations for `notReady:NoExecute` and `unreachable:NoExecute`\",\n\t\t\trequestedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{},\n\t\t\t},\n\t\t\texpectedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeNotReady,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: &defaultTolerationSeconds,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeUnreachable,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: &defaultTolerationSeconds,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"pod has tolerations, but none is for taint `notReady:NoExecute` or `unreachable:NoExecute`, expect add tolerations for `notReady:NoExecute` and `unreachable:NoExecute`\",\n\t\t\trequestedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: \"foo\",\n\t\t\t\t\t\t\tOperator: api.TolerationOpEqual,\n\t\t\t\t\t\t\tValue: \"bar\",\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoSchedule,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: \"foo\",\n\t\t\t\t\t\t\tOperator: api.TolerationOpEqual,\n\t\t\t\t\t\t\tValue: \"bar\",\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoSchedule,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeNotReady,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: &defaultTolerationSeconds,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeUnreachable,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: &defaultTolerationSeconds,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"pod specified a toleration for taint `notReady:NoExecute`, expect add toleration for `unreachable:NoExecute`\",\n\t\t\trequestedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeNotReady,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeNotReady,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeUnreachable,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: &defaultTolerationSeconds,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"pod specified a toleration for taint `unreachable:NoExecute`, expect add toleration for `notReady:NoExecute`\",\n\t\t\trequestedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeUnreachable,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeUnreachable,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeNotReady,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: &defaultTolerationSeconds,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"pod specified tolerations for both `notReady:NoExecute` and `unreachable:NoExecute`, expect no change\",\n\t\t\trequestedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeNotReady,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeUnreachable,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeNotReady,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeUnreachable,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"pod specified toleration for taint `unreachable`, expect add toleration for `notReady:NoExecute`\",\n\t\t\trequestedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeUnreachable,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeUnreachable,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey: algorithm.TaintNodeNotReady,\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tEffect: api.TaintEffectNoExecute,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(300),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdescription: \"pod has wildcard toleration for all kind of taints, expect no change\",\n\t\t\trequestedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{Operator: api.TolerationOpExists, TolerationSeconds: genTolerationSeconds(700)},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedPod: api.Pod{\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tTolerations: []api.Toleration{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\t\tTolerationSeconds: genTolerationSeconds(700),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\terr := handler.Admit(admission.NewAttributesRecord(&test.requestedPod, nil, api.Kind(\"Pod\").WithVersion(\"version\"), \"foo\", \"name\", api.Resource(\"pods\").WithVersion(\"version\"), \"\", \"ignored\", nil))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"[%s]: unexpected error %v for pod %+v\", test.description, err, test.requestedPod)\n\t\t}\n\n\t\tif !helper.Semantic.DeepEqual(test.expectedPod.Spec.Tolerations, test.requestedPod.Spec.Tolerations) {\n\t\t\tt.Errorf(\"[%s]: expected %#v got %#v\", test.description, test.expectedPod.Spec.Tolerations, test.requestedPod.Spec.Tolerations)\n\t\t}\n\t}\n}\n\nfunc TestHandles(t *testing.T) {\n\thandler := NewDefaultTolerationSeconds()\n\ttests := map[admission.Operation]bool{\n\t\tadmission.Update: true,\n\t\tadmission.Create: true,\n\t\tadmission.Delete: false,\n\t\tadmission.Connect: false,\n\t}\n\tfor op, expected := range tests {\n\t\tresult := handler.Handles(op)\n\t\tif result != expected {\n\t\t\tt.Errorf(\"Unexpected result for operation %s: %v\\n\", op, result)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"9d02b9b0-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"e5c50886-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"349ef35e-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"6eb9c498-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"a55a920e-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"50d08836-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"4fc994d8-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"17f5b85a-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"5179df3a-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"ddf8b708-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"d861cfb4-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"e2d23bc8-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"0f536334-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"aec40cc6-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"ef6054fe-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"27ead9b2-2e56-11e5-9284-b827eb9e62be27f002c0-2e56-11e5-9284-b827eb9e62be27f002c0-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"2e9ac8e8-2e57-11e5-9284-b827eb9e62be2e9fea58-2e57-11e5-9284-b827eb9e62be2e9fea58-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"318fd2f0-2e57-11e5-9284-b827eb9e62be3194f1b8-2e57-11e5-9284-b827eb9e62be3194f1b8-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"626d0c5e-2e56-11e5-9284-b827eb9e62be62722de2-2e56-11e5-9284-b827eb9e62be62722de2-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"c333c1a0-2e55-11e5-9284-b827eb9e62bec338dbae-2e55-11e5-9284-b827eb9e62bec338dbae-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"dc6e62b4-2e56-11e5-9284-b827eb9e62bedc737dda-2e56-11e5-9284-b827eb9e62bedc737dda-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"952249d4-2e56-11e5-9284-b827eb9e62be95276c5c-2e56-11e5-9284-b827eb9e62be95276c5c-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"package lexer\n\nimport \"fmt\"\n\ntype tokenType int\n\nconst (\n\tNAME tokenType = iota\n\tHYPHEN\n\tCOLON\n\tCOMMA\n)\n\ntype Token struct {\n\tType tokenType\n\tText string\n}\nMake Token printable.package lexer\n\nimport \"fmt\"\n\ntype tokenType int\n\nconst (\n\tNAME tokenType = iota\n\tHYPHEN\n\tCOLON\n\tCOMMA\n)\n\ntype Token struct {\n\tType tokenType\n\tText string\n}\n\nfunc (t Token) String() string {\n\tvar typeStr string\n\tswitch t.Type {\n\tcase NAME:\n\t\ttypeStr = \"NAME\"\n\tcase HYPHEN:\n\t\ttypeStr = \"HYPHEN\"\n\tcase COLON:\n\t\ttypeStr = \"COLON\"\n\tcase COMMA:\n\t\ttypeStr = \"COMMA\"\n\t}\n\treturn fmt.Sprintf(\"Token(%s, %s)\", typeStr, t.Text)\n}\n<|endoftext|>"} {"text":"package blockexplorer\n\nimport (\n\t\"errors\"\n\n\t\"github.com\/NebulousLabs\/Sia\/build\"\n\t\"github.com\/NebulousLabs\/Sia\/crypto\"\n\t\"github.com\/NebulousLabs\/Sia\/encoding\"\n\t\"github.com\/NebulousLabs\/Sia\/modules\"\n\t\"github.com\/NebulousLabs\/Sia\/types\"\n\t\"github.com\/boltdb\/bolt\"\n)\n\n\/\/ addHashType adds an entry in the Hashes bucket for identifing that hash\nfunc addHashType(tx *bolt.Tx, hash crypto.Hash, hashType int) error {\n\tb := tx.Bucket([]byte(\"Hashes\"))\n\tif b == nil {\n\t\treturn errors.New(\"bucket Hashes does not exist\")\n\t}\n\n\treturn b.Put(encoding.Marshal(hash), encoding.Marshal(hashType))\n}\n\n\/\/ addAddress either creates a new list of transactions for the given\n\/\/ address, or adds the txid to the list if such a list already exists\nfunc addAddress(tx *bolt.Tx, addr types.UnlockHash, txid crypto.Hash) error {\n\terr := addHashType(tx, crypto.Hash(addr), hashUnlockHash)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb := tx.Bucket([]byte(\"Addresses\"))\n\tif b == nil {\n\t\treturn errors.New(\"Addresses bucket does not exist\")\n\t}\n\n\ttxBytes := b.Get(encoding.Marshal(addr))\n\tif txBytes == nil {\n\t\terr := b.Put(encoding.Marshal(addr), encoding.Marshal([]crypto.Hash{txid}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\tvar txns []crypto.Hash\n\terr = encoding.Unmarshal(txBytes, &txns)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttxns = append(txns, txid)\n\n\treturn b.Put(encoding.Marshal(addr), encoding.Marshal(txns))\n}\n\n\/\/ addSiacoinInput changes an existing outputTransactions struct to\n\/\/ point to the place where that output was used\nfunc addSiacoinInput(tx *bolt.Tx, outputID types.SiacoinOutputID, txid crypto.Hash) error {\n\tb := tx.Bucket([]byte(\"SiacoinOutputs\"))\n\tif b == nil {\n\t\treturn errors.New(\"bucket SiacoinOutputs does not exist\")\n\t}\n\n\toutputBytes := b.Get(encoding.Marshal(outputID))\n\tif outputBytes == nil {\n\t\treturn errors.New(\"output for id does not exist\")\n\t}\n\n\tvar ot outputTransactions\n\terr := encoding.Unmarshal(outputBytes, &ot)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tot.InputTx = txid\n\n\treturn b.Put(encoding.Marshal(outputID), encoding.Marshal(ot))\n}\n\n\/\/ addSiafundInpt does the same thing as addSiacoinInput except with siafunds\nfunc addSiafundInput(tx *bolt.Tx, outputID types.SiafundOutputID, txid crypto.Hash) error {\n\tb := tx.Bucket([]byte(\"SiafundOutputs\"))\n\tif b == nil {\n\t\treturn errors.New(\"bucket SaifundOutputs does not exist\")\n\t}\n\n\toutputBytes := b.Get(encoding.Marshal(outputID))\n\tif outputBytes == nil {\n\t\treturn errors.New(\"output for id does not exist\")\n\t}\n\n\tvar ot outputTransactions\n\terr := encoding.Unmarshal(outputBytes, &ot)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tot.InputTx = txid\n\n\treturn b.Put(encoding.Marshal(outputID), encoding.Marshal(ot))\n}\n\n\/\/ addFcRevision changes an existing fcInfo struct to contain the txid\n\/\/ of the contract revision\nfunc addFcRevision(tx *bolt.Tx, fcid types.FileContractID, txid crypto.Hash) error {\n\tb := tx.Bucket([]byte(\"FileContracts\"))\n\tif b == nil {\n\t\treturn errors.New(\"bucket FileContracts does not exist\")\n\t}\n\n\tfiBytes := b.Get(encoding.Marshal(fcid))\n\tif fiBytes == nil {\n\t\treturn errors.New(\"filecontract does not exist in database\")\n\t}\n\n\tvar fi fcInfo\n\terr := encoding.Unmarshal(fiBytes, &fi)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfi.Revisions = append(fi.Revisions, txid)\n\n\treturn b.Put(encoding.Marshal(fcid), encoding.Marshal(fi))\n}\n\n\/\/ addFcProof changes an existing fcInfo struct in the database to\n\/\/ contain the txid of its storage proof\nfunc addFcProof(tx *bolt.Tx, fcid types.FileContractID, txid crypto.Hash) error {\n\tb := tx.Bucket([]byte(\"FileContracts\"))\n\tif b == nil {\n\t\treturn errors.New(\"bucket FileContracts does not exist\")\n\t}\n\n\tfiBytes := b.Get(encoding.Marshal(fcid))\n\tif fiBytes == nil {\n\t\treturn errors.New(\"filecontract does not exist in database\")\n\t}\n\n\tvar fi fcInfo\n\terr := encoding.Unmarshal(fiBytes, &fi)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfi.Proof = txid\n\n\treturn b.Put(encoding.Marshal(fcid), encoding.Marshal(fi))\n}\n\n\/\/ addNewOutput creats a new outputTransactions struct and adds it to the database\nfunc addNewOutput(tx *bolt.Tx, outputID types.SiacoinOutputID, txid crypto.Hash) error {\n\terr := addHashType(tx, crypto.Hash(outputID), hashCoinOutputID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb := tx.Bucket([]byte(\"SiacoinOutputs\"))\n\tif b == nil {\n\t\treturn errors.New(\"bucket SiacoinOutputs does not exist\")\n\t}\n\n\treturn b.Put(encoding.Marshal(outputID), encoding.Marshal(outputTransactions{\n\t\tOutputTx: txid,\n\t}))\n}\n\n\/\/ addNewSFOutput does the same thing as addNewOutput does, except for siafunds\nfunc addNewSFOutput(tx *bolt.Tx, outputID types.SiafundOutputID, txid crypto.Hash) error {\n\tb := tx.Bucket([]byte(\"SiafundOutputs\"))\n\tif b == nil {\n\t\treturn errors.New(\"bucket SaifundOutputs does not exist\")\n\t}\n\n\treturn b.Put(encoding.Marshal(outputID), encoding.Marshal(outputTransactions{\n\t\tOutputTx: txid,\n\t}))\n}\n\n\/\/ addBlock creates a new blockData struct containing a block and adds\n\/\/ it to the database\nfunc addBlock(tx *bolt.Tx, id types.BlockID, bd blockData) error {\n\tb := tx.Bucket([]byte(\"Blocks\"))\n\tif b == nil {\n\t\treturn errors.New(\"bucket Blocks does not exist\")\n\t}\n\n\treturn b.Put(encoding.Marshal(id), encoding.Marshal(bd))\n}\n\n\/\/ addTxid creates a new txInfo struct and adds it to the database\nfunc addTxid(tx *bolt.Tx, txid crypto.Hash, ti txInfo) error {\n\terr := addHashType(tx, txid, hashTransaction)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb := tx.Bucket([]byte(\"Transactions\"))\n\tif b == nil {\n\t\treturn errors.New(\"bucket Transactions does not exist\")\n\t}\n\n\treturn b.Put(encoding.Marshal(txid), encoding.Marshal(ti))\n}\n\n\/\/ addFcid creates a new fcInfo struct about a file contract and adds\n\/\/ it to the database\nfunc addFcid(tx *bolt.Tx, fcid types.FileContractID, fi fcInfo) error {\n\terr := addHashType(tx, crypto.Hash(fcid), hashFilecontract)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb := tx.Bucket([]byte(\"FileContracts\"))\n\tif b == nil {\n\t\treturn errors.New(\"bucket FileContracts does not exist\")\n\t}\n\n\treturn b.Put(encoding.Marshal(fcid), encoding.Marshal(fi))\n}\n\n\/\/ addHeight adds a block summary (modules.ExplorerBlockData) to the\n\/\/ database with a height as the key\nfunc addHeight(tx *bolt.Tx, height types.BlockHeight, bs modules.ExplorerBlockData) error {\n\tb := tx.Bucket([]byte(\"Heights\"))\n\tif b == nil {\n\t\treturn errors.New(\"bucket Blocks does not exist\")\n\t}\n\n\treturn b.Put(encoding.Marshal(height), encoding.Marshal(bs))\n}\n\n\/\/ addBlockDB parses a block and adds it to the database\nfunc (be *BlockExplorer) addBlockDB(b types.Block) error {\n\t\/\/ Special case for the genesis block, which does not have a\n\t\/\/ valid parent, and for testing, as tests will not always use\n\t\/\/ blocks in consensus\n\tvar blocktarget types.Target\n\tif b.ID() == be.genesisBlockID {\n\t\tblocktarget = types.RootDepth\n\t} else {\n\t\tvar exists bool\n\t\tblocktarget, exists = be.cs.ChildTarget(b.ParentID)\n\t\tif build.DEBUG {\n\t\t\tif build.Release == \"testing\" {\n\t\t\t\tblocktarget = types.RootDepth\n\t\t\t}\n\t\t\tif !exists {\n\t\t\t\tpanic(\"Applied block not in consensus\")\n\t\t\t}\n\n\t\t}\n\t}\n\n\ttx, err := be.db.Begin(true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Rollback()\n\n\t\/\/ Construct the struct that will be inside the database\n\tblockStruct := blockData{\n\t\tBlock: b,\n\t\tHeight: be.blockchainHeight,\n\t}\n\n\terr = addBlock(tx, b.ID(), blockStruct)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbSum := modules.ExplorerBlockData{\n\t\tID: b.ID(),\n\t\tTimestamp: b.Timestamp,\n\t\tTarget: blocktarget,\n\t\tSize: uint64(len(encoding.Marshal(b))),\n\t}\n\n\terr = addHeight(tx, be.blockchainHeight, bSum)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = addHashType(tx, crypto.Hash(b.ID()), hashBlock)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Insert the miner payouts as new outputs\n\tfor i, payout := range b.MinerPayouts {\n\t\terr = addAddress(tx, payout.UnlockHash, crypto.Hash(b.ID()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = addNewOutput(tx, b.MinerPayoutID(i), crypto.Hash(b.ID()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Insert each transaction\n\tfor i, txn := range b.Transactions {\n\t\terr = addTxid(tx, txn.ID(), txInfo{b.ID(), i})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = be.addTransaction(tx, txn)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn tx.Commit()\n}\n\n\/\/ addTransaction is called from addBlockDB, and delegates the adding\n\/\/ of information to the database to the functions defined above\nfunc (be *BlockExplorer) addTransaction(btx *bolt.Tx, tx types.Transaction) error {\n\t\/\/ Store this for quick lookup\n\ttxid := tx.ID()\n\n\t\/\/ Append each input to the list of modifications\n\tfor _, input := range tx.SiacoinInputs {\n\t\terr := addSiacoinInput(btx, input.ParentID, txid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Handle all the transaction outputs\n\tfor i, output := range tx.SiacoinOutputs {\n\t\terr := addAddress(btx, output.UnlockHash, txid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = addNewOutput(btx, tx.SiacoinOutputID(i), txid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Handle each file contract individually\n\tfor i, contract := range tx.FileContracts {\n\t\tfcid := tx.FileContractID(i)\n\t\terr := addFcid(btx, fcid, fcInfo{\n\t\t\tContract: txid,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor j, output := range contract.ValidProofOutputs {\n\t\t\terr = addAddress(btx, output.UnlockHash, txid)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = addNewOutput(btx, fcid.StorageProofOutputID(true, j), txid)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tfor j, output := range contract.MissedProofOutputs {\n\t\t\terr = addAddress(btx, output.UnlockHash, txid)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = addNewOutput(btx, fcid.StorageProofOutputID(false, j), txid)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\terr = addAddress(btx, contract.UnlockHash, txid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Update the list of revisions\n\tfor _, revision := range tx.FileContractRevisions {\n\t\terr := addFcRevision(btx, revision.ParentID, txid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Note the old outputs will still be there in the\n\t\t\/\/ database. This is to provide information to the\n\t\t\/\/ people who may just need it.\n\t\tfor i, output := range revision.NewValidProofOutputs {\n\t\t\terr = addAddress(btx, output.UnlockHash, txid)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = addNewOutput(btx, revision.ParentID.StorageProofOutputID(true, i), txid)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tfor i, output := range revision.NewMissedProofOutputs {\n\t\t\terr = addAddress(btx, output.UnlockHash, txid)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = addNewOutput(btx, revision.ParentID.StorageProofOutputID(false, i), txid)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\taddAddress(btx, revision.NewUnlockHash, txid)\n\t}\n\n\t\/\/ Update the list of storage proofs\n\tfor _, proof := range tx.StorageProofs {\n\t\terr := addFcProof(btx, proof.ParentID, txid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Append all the siafund inputs to the modification list\n\tfor _, input := range tx.SiafundInputs {\n\t\terr := addSiafundInput(btx, input.ParentID, txid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Handle all the siafund outputs\n\tfor i, output := range tx.SiafundOutputs {\n\t\terr := addAddress(btx, output.UnlockHash, txid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = addNewSFOutput(btx, tx.SiafundOutputID(i), txid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\treturn addHashType(btx, txid, hashTransaction)\n}\nCombine helper functions in addblock.gopackage blockexplorer\n\nimport (\n\t\"errors\"\n\n\t\"github.com\/NebulousLabs\/Sia\/build\"\n\t\"github.com\/NebulousLabs\/Sia\/crypto\"\n\t\"github.com\/NebulousLabs\/Sia\/encoding\"\n\t\"github.com\/NebulousLabs\/Sia\/modules\"\n\t\"github.com\/NebulousLabs\/Sia\/types\"\n\t\"github.com\/boltdb\/bolt\"\n)\n\n\/\/ addHashType adds an entry in the Hashes bucket for identifing that hash\nfunc addHashType(tx *bolt.Tx, hash crypto.Hash, hashType int) error {\n\tb := tx.Bucket([]byte(\"Hashes\"))\n\tif b == nil {\n\t\treturn errors.New(\"bucket Hashes does not exist\")\n\t}\n\n\treturn b.Put(encoding.Marshal(hash), encoding.Marshal(hashType))\n}\n\n\/\/ addAddress either creates a new list of transactions for the given\n\/\/ address, or adds the txid to the list if such a list already exists\nfunc addAddress(tx *bolt.Tx, addr types.UnlockHash, txid crypto.Hash) error {\n\terr := addHashType(tx, crypto.Hash(addr), hashUnlockHash)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb := tx.Bucket([]byte(\"Addresses\"))\n\tif b == nil {\n\t\treturn errors.New(\"Addresses bucket does not exist\")\n\t}\n\n\ttxBytes := b.Get(encoding.Marshal(addr))\n\tif txBytes == nil {\n\t\terr := b.Put(encoding.Marshal(addr), encoding.Marshal([]crypto.Hash{txid}))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\tvar txns []crypto.Hash\n\terr = encoding.Unmarshal(txBytes, &txns)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttxns = append(txns, txid)\n\n\treturn b.Put(encoding.Marshal(addr), encoding.Marshal(txns))\n}\n\n\/\/ addSiacoinInput changes an existing outputTransactions struct to\n\/\/ point to the place where that output was used\nfunc addSiacoinInput(tx *bolt.Tx, outputID types.SiacoinOutputID, txid crypto.Hash) error {\n\tb := tx.Bucket([]byte(\"SiacoinOutputs\"))\n\tif b == nil {\n\t\treturn errors.New(\"bucket SiacoinOutputs does not exist\")\n\t}\n\n\toutputBytes := b.Get(encoding.Marshal(outputID))\n\tif outputBytes == nil {\n\t\treturn errors.New(\"output for id does not exist\")\n\t}\n\n\tvar ot outputTransactions\n\terr := encoding.Unmarshal(outputBytes, &ot)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tot.InputTx = txid\n\n\treturn b.Put(encoding.Marshal(outputID), encoding.Marshal(ot))\n}\n\n\/\/ addSiafundInpt does the same thing as addSiacoinInput except with siafunds\nfunc addSiafundInput(tx *bolt.Tx, outputID types.SiafundOutputID, txid crypto.Hash) error {\n\tb := tx.Bucket([]byte(\"SiafundOutputs\"))\n\tif b == nil {\n\t\treturn errors.New(\"bucket SaifundOutputs does not exist\")\n\t}\n\n\toutputBytes := b.Get(encoding.Marshal(outputID))\n\tif outputBytes == nil {\n\t\treturn errors.New(\"output for id does not exist\")\n\t}\n\n\tvar ot outputTransactions\n\terr := encoding.Unmarshal(outputBytes, &ot)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tot.InputTx = txid\n\n\treturn b.Put(encoding.Marshal(outputID), encoding.Marshal(ot))\n}\n\n\/\/ addFcRevision changes an existing fcInfo struct to contain the txid\n\/\/ of the contract revision\nfunc addFcRevision(tx *bolt.Tx, fcid types.FileContractID, txid crypto.Hash) error {\n\tb := tx.Bucket([]byte(\"FileContracts\"))\n\tif b == nil {\n\t\treturn errors.New(\"bucket FileContracts does not exist\")\n\t}\n\n\tfiBytes := b.Get(encoding.Marshal(fcid))\n\tif fiBytes == nil {\n\t\treturn errors.New(\"filecontract does not exist in database\")\n\t}\n\n\tvar fi fcInfo\n\terr := encoding.Unmarshal(fiBytes, &fi)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfi.Revisions = append(fi.Revisions, txid)\n\n\treturn b.Put(encoding.Marshal(fcid), encoding.Marshal(fi))\n}\n\n\/\/ addFcProof changes an existing fcInfo struct in the database to\n\/\/ contain the txid of its storage proof\nfunc addFcProof(tx *bolt.Tx, fcid types.FileContractID, txid crypto.Hash) error {\n\tb := tx.Bucket([]byte(\"FileContracts\"))\n\tif b == nil {\n\t\treturn errors.New(\"bucket FileContracts does not exist\")\n\t}\n\n\tfiBytes := b.Get(encoding.Marshal(fcid))\n\tif fiBytes == nil {\n\t\treturn errors.New(\"filecontract does not exist in database\")\n\t}\n\n\tvar fi fcInfo\n\terr := encoding.Unmarshal(fiBytes, &fi)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfi.Proof = txid\n\n\treturn b.Put(encoding.Marshal(fcid), encoding.Marshal(fi))\n}\n\nfunc addNewHash(tx *bolt.Tx, bucketName string, t int, hash crypto.Hash, value interface{}) error {\n\terr := addHashType(tx, hash, t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb := tx.Bucket([]byte(bucketName))\n\tif b == nil {\n\t\treturn errors.New(\"bucket does not exist: \" + bucketName)\n\t}\n\treturn b.Put(encoding.Marshal(hash), encoding.Marshal(value))\n}\n\n\/\/ addNewOutput creats a new outputTransactions struct and adds it to the database\nfunc addNewOutput(tx *bolt.Tx, outputID types.SiacoinOutputID, txid crypto.Hash) error {\n\totx := outputTransactions{txid, crypto.Hash{}}\n\treturn addNewHash(tx, \"SiacoinOutputs\", hashCoinOutputID, crypto.Hash(outputID), otx)\n}\n\n\/\/ addNewSFOutput does the same thing as addNewOutput does, except for siafunds\nfunc addNewSFOutput(tx *bolt.Tx, outputID types.SiafundOutputID, txid crypto.Hash) error {\n\totx := outputTransactions{txid, crypto.Hash{}}\n\treturn addNewHash(tx, \"SiafundOutputs\", hashFundOutputID, crypto.Hash(outputID), otx)\n}\n\n\/\/ addHeight adds a block summary (modules.ExplorerBlockData) to the\n\/\/ database with a height as the key\nfunc addHeight(tx *bolt.Tx, height types.BlockHeight, bs modules.ExplorerBlockData) error {\n\tb := tx.Bucket([]byte(\"Heights\"))\n\tif b == nil {\n\t\treturn errors.New(\"bucket Blocks does not exist\")\n\t}\n\n\treturn b.Put(encoding.Marshal(height), encoding.Marshal(bs))\n}\n\n\/\/ addBlockDB parses a block and adds it to the database\nfunc (be *BlockExplorer) addBlockDB(b types.Block) error {\n\t\/\/ Special case for the genesis block, which does not have a\n\t\/\/ valid parent, and for testing, as tests will not always use\n\t\/\/ blocks in consensus\n\tvar blocktarget types.Target\n\tif b.ID() == be.genesisBlockID {\n\t\tblocktarget = types.RootDepth\n\t} else {\n\t\tvar exists bool\n\t\tblocktarget, exists = be.cs.ChildTarget(b.ParentID)\n\t\tif build.DEBUG {\n\t\t\tif build.Release == \"testing\" {\n\t\t\t\tblocktarget = types.RootDepth\n\t\t\t}\n\t\t\tif !exists {\n\t\t\t\tpanic(\"Applied block not in consensus\")\n\t\t\t}\n\n\t\t}\n\t}\n\n\ttx, err := be.db.Begin(true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tx.Rollback()\n\n\t\/\/ Construct the struct that will be inside the database\n\tblockStruct := blockData{\n\t\tBlock: b,\n\t\tHeight: be.blockchainHeight,\n\t}\n\n\terr = addNewHash(tx, \"Blocks\", hashBlock, crypto.Hash(b.ID()), blockStruct)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbSum := modules.ExplorerBlockData{\n\t\tID: b.ID(),\n\t\tTimestamp: b.Timestamp,\n\t\tTarget: blocktarget,\n\t\tSize: uint64(len(encoding.Marshal(b))),\n\t}\n\n\terr = addHeight(tx, be.blockchainHeight, bSum)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = addHashType(tx, crypto.Hash(b.ID()), hashBlock)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Insert the miner payouts as new outputs\n\tfor i, payout := range b.MinerPayouts {\n\t\terr = addAddress(tx, payout.UnlockHash, crypto.Hash(b.ID()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = addNewOutput(tx, b.MinerPayoutID(i), crypto.Hash(b.ID()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Insert each transaction\n\tfor i, txn := range b.Transactions {\n\t\terr = addNewHash(tx, \"Transactions\", hashTransaction, txn.ID(), txInfo{b.ID(), i})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = be.addTransaction(tx, txn)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn tx.Commit()\n}\n\n\/\/ addTransaction is called from addBlockDB, and delegates the adding\n\/\/ of information to the database to the functions defined above\nfunc (be *BlockExplorer) addTransaction(btx *bolt.Tx, tx types.Transaction) error {\n\t\/\/ Store this for quick lookup\n\ttxid := tx.ID()\n\n\t\/\/ Append each input to the list of modifications\n\tfor _, input := range tx.SiacoinInputs {\n\t\terr := addSiacoinInput(btx, input.ParentID, txid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Handle all the transaction outputs\n\tfor i, output := range tx.SiacoinOutputs {\n\t\terr := addAddress(btx, output.UnlockHash, txid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = addNewOutput(btx, tx.SiacoinOutputID(i), txid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Handle each file contract individually\n\tfor i, contract := range tx.FileContracts {\n\t\tfcid := tx.FileContractID(i)\n\t\terr := addNewHash(btx, \"FileContracts\", hashFilecontract, crypto.Hash(fcid), fcInfo{\n\t\t\tContract: txid,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor j, output := range contract.ValidProofOutputs {\n\t\t\terr = addAddress(btx, output.UnlockHash, txid)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = addNewOutput(btx, fcid.StorageProofOutputID(true, j), txid)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tfor j, output := range contract.MissedProofOutputs {\n\t\t\terr = addAddress(btx, output.UnlockHash, txid)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = addNewOutput(btx, fcid.StorageProofOutputID(false, j), txid)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\terr = addAddress(btx, contract.UnlockHash, txid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Update the list of revisions\n\tfor _, revision := range tx.FileContractRevisions {\n\t\terr := addFcRevision(btx, revision.ParentID, txid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Note the old outputs will still be there in the\n\t\t\/\/ database. This is to provide information to the\n\t\t\/\/ people who may just need it.\n\t\tfor i, output := range revision.NewValidProofOutputs {\n\t\t\terr = addAddress(btx, output.UnlockHash, txid)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = addNewOutput(btx, revision.ParentID.StorageProofOutputID(true, i), txid)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tfor i, output := range revision.NewMissedProofOutputs {\n\t\t\terr = addAddress(btx, output.UnlockHash, txid)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = addNewOutput(btx, revision.ParentID.StorageProofOutputID(false, i), txid)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\taddAddress(btx, revision.NewUnlockHash, txid)\n\t}\n\n\t\/\/ Update the list of storage proofs\n\tfor _, proof := range tx.StorageProofs {\n\t\terr := addFcProof(btx, proof.ParentID, txid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Append all the siafund inputs to the modification list\n\tfor _, input := range tx.SiafundInputs {\n\t\terr := addSiafundInput(btx, input.ParentID, txid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Handle all the siafund outputs\n\tfor i, output := range tx.SiafundOutputs {\n\t\terr := addAddress(btx, output.UnlockHash, txid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = addNewSFOutput(btx, tx.SiafundOutputID(i), txid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\treturn addHashType(btx, txid, hashTransaction)\n}\n<|endoftext|>"} {"text":"package primitive\n\nimport (\n\t\"fmt\"\n\t\"image\"\n\t\"time\"\n\n\t\"github.com\/fogleman\/gg\"\n)\n\ntype Model struct {\n\tW, H int\n\tTarget *image.RGBA\n\tCurrent *image.RGBA\n\tBuffer *image.RGBA\n\tScore float64\n\tContext *gg.Context\n}\n\nfunc NewModel(target image.Image) *Model {\n\tc := averageImageColor(target)\n\tsize := target.Bounds().Size()\n\tmodel := &Model{}\n\tmodel.W = size.X\n\tmodel.H = size.Y\n\tmodel.Target = imageToRGBA(target)\n\tmodel.Current = uniformRGBA(target.Bounds(), c)\n\tmodel.Buffer = uniformRGBA(target.Bounds(), c)\n\tmodel.Score = differenceFull(model.Target, model.Current)\n\tmodel.Context = gg.NewContext(model.W*4, model.H*4)\n\tmodel.Context.Scale(4, 4)\n\tmodel.Context.SetColor(c)\n\tmodel.Context.Clear()\n\treturn model\n}\n\nfunc (model *Model) Run() {\n\tframe := 0\n\tstart := time.Now()\n\tfor {\n\t\tmodel.Step()\n\t\telapsed := time.Since(start).Seconds()\n\t\tfmt.Printf(\"%d, %.3f, %.6f\\n\", frame, elapsed, model.Score)\n\t\tif frame%1 == 0 {\n\t\t\tpath := fmt.Sprintf(\"out%03d.png\", frame)\n\t\t\t\/\/ SavePNG(path, model.Current)\n\t\t\tmodel.Context.SavePNG(path)\n\t\t}\n\t\tframe++\n\t}\n}\n\nfunc (model *Model) Step() {\n\t\/\/ state := NewState(model, NewRandomTriangle(model.W, model.H))\n\t\/\/ state := NewState(model, NewRandomRectangle(model.W, model.H))\n\tstate := NewState(model, NewRandomCircle(model.W, model.H))\n\t\/\/ fmt.Println(PreAnneal(state, 10000))\n\tstate = Anneal(state, 0.2, 0.0001, 10000).(*State)\n\tmodel.Add(state.Shape)\n}\n\nfunc (model *Model) Add(shape Shape) {\n\tlines := shape.Rasterize()\n\tc := model.computeColor(lines, 128)\n\ts := model.computeScore(lines, c)\n\tDraw(model.Current, c, lines)\n\tmodel.Score = s\n\tshape.Draw(model.Context)\n\tmodel.Context.SetRGBA255(c.R, c.G, c.B, c.A)\n\tmodel.Context.Fill()\n}\n\nfunc (model *Model) computeColor(lines []Scanline, alpha int) Color {\n\tvar count int\n\tvar rsum, gsum, bsum float64\n\ta := float64(alpha) \/ 255\n\tfor _, line := range lines {\n\t\ti := model.Target.PixOffset(line.X1, line.Y)\n\t\tfor x := line.X1; x <= line.X2; x++ {\n\t\t\tcount++\n\t\t\ttr := float64(model.Target.Pix[i])\n\t\t\ttg := float64(model.Target.Pix[i+1])\n\t\t\ttb := float64(model.Target.Pix[i+2])\n\t\t\tcr := float64(model.Current.Pix[i])\n\t\t\tcg := float64(model.Current.Pix[i+1])\n\t\t\tcb := float64(model.Current.Pix[i+2])\n\t\t\ti += 4\n\t\t\trsum += (a*cr - cr + tr) \/ a\n\t\t\tgsum += (a*cg - cg + tg) \/ a\n\t\t\tbsum += (a*cb - cb + tb) \/ a\n\t\t}\n\t}\n\tif count == 0 {\n\t\treturn Color{}\n\t}\n\tr := clampInt(int(rsum\/float64(count)), 0, 255)\n\tg := clampInt(int(gsum\/float64(count)), 0, 255)\n\tb := clampInt(int(bsum\/float64(count)), 0, 255)\n\treturn Color{r, g, b, alpha}\n}\n\nfunc (model *Model) computeScore(lines []Scanline, c Color) float64 {\n\tcopy(model.Buffer.Pix, model.Current.Pix)\n\tDraw(model.Buffer, c, lines)\n\treturn differencePartial(model.Target, model.Current, model.Buffer, model.Score, lines)\n}\ntweakspackage primitive\n\nimport (\n\t\"fmt\"\n\t\"image\"\n\t\"image\/color\"\n\t\"time\"\n\n\t\"github.com\/fogleman\/gg\"\n)\n\nconst Scale = 4\n\ntype Model struct {\n\tW, H int\n\tTarget *image.RGBA\n\tCurrent *image.RGBA\n\tBuffer *image.RGBA\n\tScore float64\n\tContext *gg.Context\n}\n\nfunc NewModel(target image.Image) *Model {\n\tc := averageImageColor(target)\n\tc = color.White\n\t\/\/ c = color.Black\n\tsize := target.Bounds().Size()\n\tmodel := &Model{}\n\tmodel.W = size.X\n\tmodel.H = size.Y\n\tmodel.Target = imageToRGBA(target)\n\tmodel.Current = uniformRGBA(target.Bounds(), c)\n\tmodel.Buffer = uniformRGBA(target.Bounds(), c)\n\tmodel.Score = differenceFull(model.Target, model.Current)\n\tmodel.Context = gg.NewContext(model.W*Scale, model.H*Scale)\n\tmodel.Context.Scale(Scale, Scale)\n\tmodel.Context.SetColor(c)\n\tmodel.Context.Clear()\n\treturn model\n}\n\nfunc (model *Model) Run() {\n\tframe := 0\n\tstart := time.Now()\n\tfor {\n\t\tmodel.Step()\n\t\telapsed := time.Since(start).Seconds()\n\t\tfmt.Printf(\"%d, %.3f, %.6f\\n\", frame, elapsed, model.Score)\n\t\tif frame%1 == 0 {\n\t\t\tpath := fmt.Sprintf(\"out%03d.png\", frame)\n\t\t\t\/\/ SavePNG(path, model.Current)\n\t\t\tmodel.Context.SavePNG(path)\n\t\t}\n\t\tframe++\n\t}\n}\n\nfunc (model *Model) Step() {\n\t\/\/ state := NewState(model, NewRandomTriangle(model.W, model.H))\n\t\/\/ state := NewState(model, NewRandomRectangle(model.W, model.H))\n\tstate := NewState(model, NewRandomCircle(model.W, model.H))\n\t\/\/ fmt.Println(PreAnneal(state, 10000))\n\tstate = Anneal(state, 0.2, 0.0001, 50000).(*State)\n\tmodel.Add(state.Shape)\n}\n\nfunc (model *Model) Add(shape Shape) {\n\tlines := shape.Rasterize()\n\tc := model.computeColor(lines, 128)\n\ts := model.computeScore(lines, c)\n\tDraw(model.Current, c, lines)\n\tmodel.Score = s\n\tshape.Draw(model.Context)\n\tmodel.Context.SetRGBA255(c.R, c.G, c.B, c.A)\n\tmodel.Context.Fill()\n}\n\nfunc (model *Model) computeColor(lines []Scanline, alpha int) Color {\n\tvar count int\n\tvar rsum, gsum, bsum float64\n\ta := float64(alpha) \/ 255\n\tfor _, line := range lines {\n\t\ti := model.Target.PixOffset(line.X1, line.Y)\n\t\tfor x := line.X1; x <= line.X2; x++ {\n\t\t\tcount++\n\t\t\ttr := float64(model.Target.Pix[i])\n\t\t\ttg := float64(model.Target.Pix[i+1])\n\t\t\ttb := float64(model.Target.Pix[i+2])\n\t\t\tcr := float64(model.Current.Pix[i])\n\t\t\tcg := float64(model.Current.Pix[i+1])\n\t\t\tcb := float64(model.Current.Pix[i+2])\n\t\t\ti += 4\n\t\t\trsum += (a*cr - cr + tr) \/ a\n\t\t\tgsum += (a*cg - cg + tg) \/ a\n\t\t\tbsum += (a*cb - cb + tb) \/ a\n\t\t}\n\t}\n\tif count == 0 {\n\t\treturn Color{}\n\t}\n\tr := clampInt(int(rsum\/float64(count)), 0, 255)\n\tg := clampInt(int(gsum\/float64(count)), 0, 255)\n\tb := clampInt(int(bsum\/float64(count)), 0, 255)\n\treturn Color{r, g, b, alpha}\n}\n\nfunc (model *Model) computeScore(lines []Scanline, c Color) float64 {\n\tcopy(model.Buffer.Pix, model.Current.Pix)\n\tDraw(model.Buffer, c, lines)\n\treturn differencePartial(model.Target, model.Current, model.Buffer, model.Score, lines)\n}\n<|endoftext|>"} {"text":"package ds\n\nimport (\n\t\"strconv\"\n\n\t\"cloud.google.com\/go\/datastore\"\n)\n\n\/\/ Model is the base model\n\/\/ every model should have this Model\ntype Model struct {\n\tkey *datastore.Key\n\tID string `datastore:\"-\"`\n}\n\n\/\/ Key returns key from model\nfunc (x *Model) Key() *datastore.Key {\n\treturn x.key\n}\n\n\/\/ SetKey sets model key to given key\nfunc (x *Model) SetKey(key *datastore.Key) {\n\tx.key = key\n\tif key == nil {\n\t\tx.ID = \"\"\n\t\treturn\n\t}\n\tif key.Name != \"\" {\n\t\tx.ID = key.Name\n\t\treturn\n\t}\n\tif key.ID != 0 {\n\t\tx.ID = strconv.FormatInt(key.ID, 10)\n\t}\n}\n\n\/\/ KeyGetter interface\ntype KeyGetter interface {\n\tKey() *datastore.Key\n}\n\n\/\/ KeySetter interface\ntype KeySetter interface {\n\tSetKey(*datastore.Key)\n}\n\n\/\/ KeyGetSetter interface\ntype KeyGetSetter interface {\n\tKeyGetter\n\tKeySetter\n}\n\n\/\/ KindGetter interface\ntype KindGetter interface {\n\tKind() string\n}\nadd set id key and set name keypackage ds\n\nimport (\n\t\"strconv\"\n\n\t\"cloud.google.com\/go\/datastore\"\n)\n\n\/\/ Model is the base model\n\/\/ every model should have this Model\ntype Model struct {\n\tkey *datastore.Key\n\tID string `datastore:\"-\"`\n}\n\n\/\/ Key returns key from model\nfunc (x *Model) Key() *datastore.Key {\n\treturn x.key\n}\n\n\/\/ SetKey sets model key to given key\nfunc (x *Model) SetKey(key *datastore.Key) {\n\tx.key = key\n\tif key == nil {\n\t\tx.ID = \"\"\n\t\treturn\n\t}\n\tif key.Name != \"\" {\n\t\tx.ID = key.Name\n\t\treturn\n\t}\n\tif key.ID != 0 {\n\t\tx.ID = strconv.FormatInt(key.ID, 10)\n\t}\n}\n\n\/\/ SetIDKey sets id to model\nfunc (x *Model) SetIDKey(id string) {\n\tx.SetKey(datastore.IDKey(interface{}(x).(KindGetter).Kind(), parseID(id), nil))\n}\n\n\/\/ SetNameKey sets id to model\nfunc (x *Model) SetNameKey(name string) {\n\tx.SetKey(datastore.NameKey(interface{}(x).(KindGetter).Kind(), name, nil))\n}\n\n\/\/ KeyGetter interface\ntype KeyGetter interface {\n\tKey() *datastore.Key\n}\n\n\/\/ KeySetter interface\ntype KeySetter interface {\n\tSetKey(*datastore.Key)\n}\n\n\/\/ KeyGetSetter interface\ntype KeyGetSetter interface {\n\tKeyGetter\n\tKeySetter\n}\n\n\/\/ KindGetter interface\ntype KindGetter interface {\n\tKind() string\n}\n<|endoftext|>"} {"text":"package prompt\n\nimport (\n\t\"bytes\"\n\t\"strconv\"\n)\n\n\/\/ VT100Writer generates VT100 escape sequences.\ntype VT100Writer struct {\n\tbuffer []byte\n}\n\n\/\/ WriteRaw to write raw byte array\nfunc (w *VT100Writer) WriteRaw(data []byte) {\n\tw.buffer = append(w.buffer, data...)\n\treturn\n}\n\n\/\/ Write to write safety byte array by removing control sequences.\nfunc (w *VT100Writer) Write(data []byte) {\n\tw.WriteRaw(bytes.Replace(data, []byte{0x1b}, []byte{'?'}, -1))\n\treturn\n}\n\n\/\/ WriteRawStr to write raw string\nfunc (w *VT100Writer) WriteRawStr(data string) {\n\tw.WriteRaw([]byte(data))\n\treturn\n}\n\n\/\/ WriteStr to write safety string by removing control sequences.\nfunc (w *VT100Writer) WriteStr(data string) {\n\tw.Write([]byte(data))\n\treturn\n}\n\n\/* Erase *\/\n\n\/\/ EraseScreen erases the screen with the background colour and moves the cursor to home.\nfunc (w *VT100Writer) EraseScreen() {\n\tw.WriteRaw([]byte{0x1b, '[', '2', 'J'})\n\treturn\n}\n\n\/\/ EraseUp erases the screen from the current line up to the top of the screen.\nfunc (w *VT100Writer) EraseUp() {\n\tw.WriteRaw([]byte{0x1b, '[', '1', 'J'})\n\treturn\n}\n\n\/\/ EraseDown erases the screen from the current line down to the bottom of the screen.\nfunc (w *VT100Writer) EraseDown() {\n\tw.WriteRaw([]byte{0x1b, '[', 'J'})\n\treturn\n}\n\n\/\/ EraseStartOfLine erases from the current cursor position to the start of the current line.\nfunc (w *VT100Writer) EraseStartOfLine() {\n\tw.WriteRaw([]byte{0x1b, '[', '1', 'K'})\n\treturn\n}\n\n\/\/ EraseEndOfLine erases from the current cursor position to the end of the current line.\nfunc (w *VT100Writer) EraseEndOfLine() {\n\tw.WriteRaw([]byte{0x1b, '[', 'K'})\n\treturn\n}\n\n\/\/ EraseLine erases the entire current line.\nfunc (w *VT100Writer) EraseLine() {\n\tw.WriteRaw([]byte{0x1b, '[', '2', 'K'})\n\treturn\n}\n\n\/* Cursor *\/\n\n\/\/ ShowCursor stops blinking cursor and show.\nfunc (w *VT100Writer) ShowCursor() {\n\tw.WriteRaw([]byte{0x1b, '[', '?', '1', '2', 'l', 0x1b, '[', '?', '2', '5', 'h'})\n}\n\n\/\/ HideCursor hides cursor.\nfunc (w *VT100Writer) HideCursor() {\n\tw.WriteRaw([]byte{0x1b, '[', '?', '2', '5', 'l'})\n\treturn\n}\n\n\/\/ CursorGoTo sets the cursor position where subsequent text will begin.\nfunc (w *VT100Writer) CursorGoTo(row, col int) {\n\tif row == 0 && col == 0 {\n\t\t\/\/ If no row\/column parameters are provided (ie. [H), the cursor will move to the home position.\n\t\tw.WriteRaw([]byte{0x1b, '[', 'H'})\n\t\treturn\n\t}\n\tr := strconv.Itoa(row)\n\tc := strconv.Itoa(col)\n\tw.WriteRaw([]byte{0x1b, '['})\n\tw.WriteRaw([]byte(r))\n\tw.WriteRaw([]byte{';'})\n\tw.WriteRaw([]byte(c))\n\tw.WriteRaw([]byte{'H'})\n\treturn\n}\n\n\/\/ CursorUp moves the cursor up by 'n' rows; the default count is 1.\nfunc (w *VT100Writer) CursorUp(n int) {\n\tif n == 0 {\n\t\treturn\n\t} else if n < 0 {\n\t\tw.CursorDown(-n)\n\t\treturn\n\t}\n\ts := strconv.Itoa(n)\n\tw.WriteRaw([]byte{0x1b, '['})\n\tw.WriteRaw([]byte(s))\n\tw.WriteRaw([]byte{'A'})\n\treturn\n}\n\n\/\/ CursorDown moves the cursor down by 'n' rows; the default count is 1.\nfunc (w *VT100Writer) CursorDown(n int) {\n\tif n == 0 {\n\t\treturn\n\t} else if n < 0 {\n\t\tw.CursorUp(-n)\n\t\treturn\n\t}\n\ts := strconv.Itoa(n)\n\tw.WriteRaw([]byte{0x1b, '['})\n\tw.WriteRaw([]byte(s))\n\tw.WriteRaw([]byte{'B'})\n\treturn\n}\n\n\/\/ CursorForward moves the cursor forward by 'n' columns; the default count is 1.\nfunc (w *VT100Writer) CursorForward(n int) {\n\tif n == 0 {\n\t\treturn\n\t} else if n < 0 {\n\t\tw.CursorBackward(-n)\n\t\treturn\n\t}\n\ts := strconv.Itoa(n)\n\tw.WriteRaw([]byte{0x1b, '['})\n\tw.WriteRaw([]byte(s))\n\tw.WriteRaw([]byte{'C'})\n\treturn\n}\n\n\/\/ CursorBackward moves the cursor backward by 'n' columns; the default count is 1.\nfunc (w *VT100Writer) CursorBackward(n int) {\n\tif n == 0 {\n\t\treturn\n\t} else if n < 0 {\n\t\tw.CursorForward(-n)\n\t\treturn\n\t}\n\ts := strconv.Itoa(n)\n\tw.WriteRaw([]byte{0x1b, '['})\n\tw.WriteRaw([]byte(s))\n\tw.WriteRaw([]byte{'D'})\n\treturn\n}\n\n\/\/ AskForCPR asks for a cursor position report (CPR).\nfunc (w *VT100Writer) AskForCPR() {\n\t\/\/ CPR: Cursor Position Request.\n\tw.WriteRaw([]byte{0x1b, '[', '6', 'n'})\n\treturn\n}\n\n\/\/ SaveCursor saves current cursor position.\nfunc (w *VT100Writer) SaveCursor() {\n\tw.WriteRaw([]byte{0x1b, '[', 's'})\n\treturn\n}\n\n\/\/ UnSaveCursor restores cursor position after a Save Cursor.\nfunc (w *VT100Writer) UnSaveCursor() {\n\tw.WriteRaw([]byte{0x1b, '[', 'u'})\n\treturn\n}\n\n\/* Scrolling *\/\n\n\/\/ ScrollDown scrolls display down one line.\nfunc (w *VT100Writer) ScrollDown() {\n\tw.WriteRaw([]byte{0x1b, 'D'})\n\treturn\n}\n\n\/\/ ScrollUp scroll display up one line.\nfunc (w *VT100Writer) ScrollUp() {\n\tw.WriteRaw([]byte{0x1b, 'M'})\n\treturn\n}\n\n\/* Title *\/\n\n\/\/ SetTitle sets a title of terminal window.\nfunc (w *VT100Writer) SetTitle(title string) {\n\ttitleBytes := []byte(title)\n\tpatterns := []struct {\n\t\tfrom []byte\n\t\tto []byte\n\t}{\n\t\t{\n\t\t\tfrom: []byte{0x13},\n\t\t\tto: []byte{},\n\t\t},\n\t\t{\n\t\t\tfrom: []byte{0x07},\n\t\t\tto: []byte{},\n\t\t},\n\t}\n\tfor i := range patterns {\n\t\ttitleBytes = bytes.Replace(titleBytes, patterns[i].from, patterns[i].to, -1)\n\t}\n\n\tw.WriteRaw([]byte{0x1b, ']', '2', ';'})\n\tw.WriteRaw(titleBytes)\n\tw.WriteRaw([]byte{0x07})\n\treturn\n}\n\n\/\/ ClearTitle clears a title of terminal window.\nfunc (w *VT100Writer) ClearTitle() {\n\tw.WriteRaw([]byte{0x1b, ']', '2', ';', 0x07})\n\treturn\n}\n\n\/* Font *\/\n\n\/\/ SetColor sets text and background colors. and specify whether text is bold.\nfunc (w *VT100Writer) SetColor(fg, bg Color, bold bool) {\n\tif bold {\n\t\tw.SetDisplayAttributes(fg, bg, DisplayBold)\n\t} else {\n\t\tw.SetDisplayAttributes(fg, bg, DisplayDefaultFont, DisplayReset)\n\t}\n\treturn\n}\n\n\/\/ SetDisplayAttributes to set VT100 display attributes.\nfunc (w *VT100Writer) SetDisplayAttributes(fg, bg Color, attrs ...DisplayAttribute) {\n\tw.WriteRaw([]byte{0x1b, '['}) \/\/ control sequence introducer\n\tdefer w.WriteRaw([]byte{'m'}) \/\/ final character\n\n\tvar separator byte = ';'\n\tfor i := range attrs {\n\t\tp, ok := displayAttributeParameters[attrs[i]]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tw.WriteRaw(p)\n\t\tw.WriteRaw([]byte{separator})\n\t}\n\n\tf, ok := foregroundANSIColors[fg]\n\tif !ok {\n\t\tf = foregroundANSIColors[DefaultColor]\n\t}\n\tw.WriteRaw(f)\n\tw.WriteRaw([]byte{separator})\n\tb, ok := backgroundANSIColors[bg]\n\tif !ok {\n\t\tb = backgroundANSIColors[DefaultColor]\n\t}\n\tw.WriteRaw(b)\n\treturn\n}\n\nvar displayAttributeParameters = map[DisplayAttribute][]byte{\n\tDisplayReset: {'0'},\n\tDisplayBold: {'1'},\n\tDisplayLowIntensity: {'2'},\n\tDisplayItalic: {'3'},\n\tDisplayUnderline: {'4'},\n\tDisplayBlink: {'5'},\n\tDisplayRapidBlink: {'6'},\n\tDisplayReverse: {'7'},\n\tDisplayInvisible: {'8'},\n\tDisplayCrossedOut: {'9'},\n\tDisplayDefaultFont: {'1', '0'},\n}\n\nvar foregroundANSIColors = map[Color][]byte{\n\tDefaultColor: {'3', '9'},\n\n\t\/\/ Low intensity.\n\tBlack: {'3', '0'},\n\tDarkRed: {'3', '1'},\n\tDarkGreen: {'3', '2'},\n\tBrown: {'3', '3'},\n\tDarkBlue: {'3', '4'},\n\tPurple: {'3', '5'},\n\tCyan: {'3', '6'},\n\tLightGray: {'3', '7'},\n\n\t\/\/ High intensity.\n\tDarkGray: {'9', '0'},\n\tRed: {'9', '1'},\n\tGreen: {'9', '2'},\n\tYellow: {'9', '3'},\n\tBlue: {'9', '4'},\n\tFuchsia: {'9', '5'},\n\tTurquoise: {'9', '6'},\n\tWhite: {'9', '7'},\n}\n\nvar backgroundANSIColors = map[Color][]byte{\n\tDefaultColor: {'4', '9'},\n\n\t\/\/ Low intensity.\n\tBlack: {'4', '0'},\n\tDarkRed: {'4', '1'},\n\tDarkGreen: {'4', '2'},\n\tBrown: {'4', '3'},\n\tDarkBlue: {'4', '4'},\n\tPurple: {'4', '5'},\n\tCyan: {'4', '6'},\n\tLightGray: {'4', '7'},\n\n\t\/\/ High intensity\n\tDarkGray: {'1', '0', '0'},\n\tRed: {'1', '0', '1'},\n\tGreen: {'1', '0', '2'},\n\tYellow: {'1', '0', '3'},\n\tBlue: {'1', '0', '4'},\n\tFuchsia: {'1', '0', '5'},\n\tTurquoise: {'1', '0', '6'},\n\tWhite: {'1', '0', '7'},\n}\nFix resetting attributes for arch linuxpackage prompt\n\nimport (\n\t\"bytes\"\n\t\"strconv\"\n)\n\n\/\/ VT100Writer generates VT100 escape sequences.\ntype VT100Writer struct {\n\tbuffer []byte\n}\n\n\/\/ WriteRaw to write raw byte array\nfunc (w *VT100Writer) WriteRaw(data []byte) {\n\tw.buffer = append(w.buffer, data...)\n\treturn\n}\n\n\/\/ Write to write safety byte array by removing control sequences.\nfunc (w *VT100Writer) Write(data []byte) {\n\tw.WriteRaw(bytes.Replace(data, []byte{0x1b}, []byte{'?'}, -1))\n\treturn\n}\n\n\/\/ WriteRawStr to write raw string\nfunc (w *VT100Writer) WriteRawStr(data string) {\n\tw.WriteRaw([]byte(data))\n\treturn\n}\n\n\/\/ WriteStr to write safety string by removing control sequences.\nfunc (w *VT100Writer) WriteStr(data string) {\n\tw.Write([]byte(data))\n\treturn\n}\n\n\/* Erase *\/\n\n\/\/ EraseScreen erases the screen with the background colour and moves the cursor to home.\nfunc (w *VT100Writer) EraseScreen() {\n\tw.WriteRaw([]byte{0x1b, '[', '2', 'J'})\n\treturn\n}\n\n\/\/ EraseUp erases the screen from the current line up to the top of the screen.\nfunc (w *VT100Writer) EraseUp() {\n\tw.WriteRaw([]byte{0x1b, '[', '1', 'J'})\n\treturn\n}\n\n\/\/ EraseDown erases the screen from the current line down to the bottom of the screen.\nfunc (w *VT100Writer) EraseDown() {\n\tw.WriteRaw([]byte{0x1b, '[', 'J'})\n\treturn\n}\n\n\/\/ EraseStartOfLine erases from the current cursor position to the start of the current line.\nfunc (w *VT100Writer) EraseStartOfLine() {\n\tw.WriteRaw([]byte{0x1b, '[', '1', 'K'})\n\treturn\n}\n\n\/\/ EraseEndOfLine erases from the current cursor position to the end of the current line.\nfunc (w *VT100Writer) EraseEndOfLine() {\n\tw.WriteRaw([]byte{0x1b, '[', 'K'})\n\treturn\n}\n\n\/\/ EraseLine erases the entire current line.\nfunc (w *VT100Writer) EraseLine() {\n\tw.WriteRaw([]byte{0x1b, '[', '2', 'K'})\n\treturn\n}\n\n\/* Cursor *\/\n\n\/\/ ShowCursor stops blinking cursor and show.\nfunc (w *VT100Writer) ShowCursor() {\n\tw.WriteRaw([]byte{0x1b, '[', '?', '1', '2', 'l', 0x1b, '[', '?', '2', '5', 'h'})\n}\n\n\/\/ HideCursor hides cursor.\nfunc (w *VT100Writer) HideCursor() {\n\tw.WriteRaw([]byte{0x1b, '[', '?', '2', '5', 'l'})\n\treturn\n}\n\n\/\/ CursorGoTo sets the cursor position where subsequent text will begin.\nfunc (w *VT100Writer) CursorGoTo(row, col int) {\n\tif row == 0 && col == 0 {\n\t\t\/\/ If no row\/column parameters are provided (ie. [H), the cursor will move to the home position.\n\t\tw.WriteRaw([]byte{0x1b, '[', 'H'})\n\t\treturn\n\t}\n\tr := strconv.Itoa(row)\n\tc := strconv.Itoa(col)\n\tw.WriteRaw([]byte{0x1b, '['})\n\tw.WriteRaw([]byte(r))\n\tw.WriteRaw([]byte{';'})\n\tw.WriteRaw([]byte(c))\n\tw.WriteRaw([]byte{'H'})\n\treturn\n}\n\n\/\/ CursorUp moves the cursor up by 'n' rows; the default count is 1.\nfunc (w *VT100Writer) CursorUp(n int) {\n\tif n == 0 {\n\t\treturn\n\t} else if n < 0 {\n\t\tw.CursorDown(-n)\n\t\treturn\n\t}\n\ts := strconv.Itoa(n)\n\tw.WriteRaw([]byte{0x1b, '['})\n\tw.WriteRaw([]byte(s))\n\tw.WriteRaw([]byte{'A'})\n\treturn\n}\n\n\/\/ CursorDown moves the cursor down by 'n' rows; the default count is 1.\nfunc (w *VT100Writer) CursorDown(n int) {\n\tif n == 0 {\n\t\treturn\n\t} else if n < 0 {\n\t\tw.CursorUp(-n)\n\t\treturn\n\t}\n\ts := strconv.Itoa(n)\n\tw.WriteRaw([]byte{0x1b, '['})\n\tw.WriteRaw([]byte(s))\n\tw.WriteRaw([]byte{'B'})\n\treturn\n}\n\n\/\/ CursorForward moves the cursor forward by 'n' columns; the default count is 1.\nfunc (w *VT100Writer) CursorForward(n int) {\n\tif n == 0 {\n\t\treturn\n\t} else if n < 0 {\n\t\tw.CursorBackward(-n)\n\t\treturn\n\t}\n\ts := strconv.Itoa(n)\n\tw.WriteRaw([]byte{0x1b, '['})\n\tw.WriteRaw([]byte(s))\n\tw.WriteRaw([]byte{'C'})\n\treturn\n}\n\n\/\/ CursorBackward moves the cursor backward by 'n' columns; the default count is 1.\nfunc (w *VT100Writer) CursorBackward(n int) {\n\tif n == 0 {\n\t\treturn\n\t} else if n < 0 {\n\t\tw.CursorForward(-n)\n\t\treturn\n\t}\n\ts := strconv.Itoa(n)\n\tw.WriteRaw([]byte{0x1b, '['})\n\tw.WriteRaw([]byte(s))\n\tw.WriteRaw([]byte{'D'})\n\treturn\n}\n\n\/\/ AskForCPR asks for a cursor position report (CPR).\nfunc (w *VT100Writer) AskForCPR() {\n\t\/\/ CPR: Cursor Position Request.\n\tw.WriteRaw([]byte{0x1b, '[', '6', 'n'})\n\treturn\n}\n\n\/\/ SaveCursor saves current cursor position.\nfunc (w *VT100Writer) SaveCursor() {\n\tw.WriteRaw([]byte{0x1b, '[', 's'})\n\treturn\n}\n\n\/\/ UnSaveCursor restores cursor position after a Save Cursor.\nfunc (w *VT100Writer) UnSaveCursor() {\n\tw.WriteRaw([]byte{0x1b, '[', 'u'})\n\treturn\n}\n\n\/* Scrolling *\/\n\n\/\/ ScrollDown scrolls display down one line.\nfunc (w *VT100Writer) ScrollDown() {\n\tw.WriteRaw([]byte{0x1b, 'D'})\n\treturn\n}\n\n\/\/ ScrollUp scroll display up one line.\nfunc (w *VT100Writer) ScrollUp() {\n\tw.WriteRaw([]byte{0x1b, 'M'})\n\treturn\n}\n\n\/* Title *\/\n\n\/\/ SetTitle sets a title of terminal window.\nfunc (w *VT100Writer) SetTitle(title string) {\n\ttitleBytes := []byte(title)\n\tpatterns := []struct {\n\t\tfrom []byte\n\t\tto []byte\n\t}{\n\t\t{\n\t\t\tfrom: []byte{0x13},\n\t\t\tto: []byte{},\n\t\t},\n\t\t{\n\t\t\tfrom: []byte{0x07},\n\t\t\tto: []byte{},\n\t\t},\n\t}\n\tfor i := range patterns {\n\t\ttitleBytes = bytes.Replace(titleBytes, patterns[i].from, patterns[i].to, -1)\n\t}\n\n\tw.WriteRaw([]byte{0x1b, ']', '2', ';'})\n\tw.WriteRaw(titleBytes)\n\tw.WriteRaw([]byte{0x07})\n\treturn\n}\n\n\/\/ ClearTitle clears a title of terminal window.\nfunc (w *VT100Writer) ClearTitle() {\n\tw.WriteRaw([]byte{0x1b, ']', '2', ';', 0x07})\n\treturn\n}\n\n\/* Font *\/\n\n\/\/ SetColor sets text and background colors. and specify whether text is bold.\nfunc (w *VT100Writer) SetColor(fg, bg Color, bold bool) {\n\tif bold {\n\t\tw.SetDisplayAttributes(fg, bg, DisplayBold)\n\t} else {\n\t\t\/\/ If using `DisplayDefualt`, it will be broken in some environment.\n\t\t\/\/ Details are https:\/\/github.com\/c-bata\/go-prompt\/pull\/85\n\t\tw.SetDisplayAttributes(fg, bg, DisplayReset)\n\t}\n\treturn\n}\n\n\/\/ SetDisplayAttributes to set VT100 display attributes.\nfunc (w *VT100Writer) SetDisplayAttributes(fg, bg Color, attrs ...DisplayAttribute) {\n\tw.WriteRaw([]byte{0x1b, '['}) \/\/ control sequence introducer\n\tdefer w.WriteRaw([]byte{'m'}) \/\/ final character\n\n\tvar separator byte = ';'\n\tfor i := range attrs {\n\t\tp, ok := displayAttributeParameters[attrs[i]]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tw.WriteRaw(p)\n\t\tw.WriteRaw([]byte{separator})\n\t}\n\n\tf, ok := foregroundANSIColors[fg]\n\tif !ok {\n\t\tf = foregroundANSIColors[DefaultColor]\n\t}\n\tw.WriteRaw(f)\n\tw.WriteRaw([]byte{separator})\n\tb, ok := backgroundANSIColors[bg]\n\tif !ok {\n\t\tb = backgroundANSIColors[DefaultColor]\n\t}\n\tw.WriteRaw(b)\n\treturn\n}\n\nvar displayAttributeParameters = map[DisplayAttribute][]byte{\n\tDisplayReset: {'0'},\n\tDisplayBold: {'1'},\n\tDisplayLowIntensity: {'2'},\n\tDisplayItalic: {'3'},\n\tDisplayUnderline: {'4'},\n\tDisplayBlink: {'5'},\n\tDisplayRapidBlink: {'6'},\n\tDisplayReverse: {'7'},\n\tDisplayInvisible: {'8'},\n\tDisplayCrossedOut: {'9'},\n\tDisplayDefaultFont: {'1', '0'},\n}\n\nvar foregroundANSIColors = map[Color][]byte{\n\tDefaultColor: {'3', '9'},\n\n\t\/\/ Low intensity.\n\tBlack: {'3', '0'},\n\tDarkRed: {'3', '1'},\n\tDarkGreen: {'3', '2'},\n\tBrown: {'3', '3'},\n\tDarkBlue: {'3', '4'},\n\tPurple: {'3', '5'},\n\tCyan: {'3', '6'},\n\tLightGray: {'3', '7'},\n\n\t\/\/ High intensity.\n\tDarkGray: {'9', '0'},\n\tRed: {'9', '1'},\n\tGreen: {'9', '2'},\n\tYellow: {'9', '3'},\n\tBlue: {'9', '4'},\n\tFuchsia: {'9', '5'},\n\tTurquoise: {'9', '6'},\n\tWhite: {'9', '7'},\n}\n\nvar backgroundANSIColors = map[Color][]byte{\n\tDefaultColor: {'4', '9'},\n\n\t\/\/ Low intensity.\n\tBlack: {'4', '0'},\n\tDarkRed: {'4', '1'},\n\tDarkGreen: {'4', '2'},\n\tBrown: {'4', '3'},\n\tDarkBlue: {'4', '4'},\n\tPurple: {'4', '5'},\n\tCyan: {'4', '6'},\n\tLightGray: {'4', '7'},\n\n\t\/\/ High intensity\n\tDarkGray: {'1', '0', '0'},\n\tRed: {'1', '0', '1'},\n\tGreen: {'1', '0', '2'},\n\tYellow: {'1', '0', '3'},\n\tBlue: {'1', '0', '4'},\n\tFuchsia: {'1', '0', '5'},\n\tTurquoise: {'1', '0', '6'},\n\tWhite: {'1', '0', '7'},\n}\n<|endoftext|>"} {"text":"package internal\n\nimport (\n\t\"github.com\/kuzzleio\/sdk-go\/types\"\n\t\"github.com\/stretchr\/testify\/mock\"\n\t\"time\"\n)\n\nvar OfflineQueue []types.QueryObject\n\ntype MockedConnection struct {\n\tmock.Mock\n\tMockSend func([]byte, types.QueryOptions) types.KuzzleResponse\n\tMockEmitEvent func(int, interface{})\n\tMockGetRooms func() *types.RoomList\n}\n\nfunc (c MockedConnection) Send(query []byte, options types.QueryOptions, responseChannel chan<- types.KuzzleResponse, requestId string) error {\n\tif c.MockSend != nil {\n\t\tresponseChannel <- c.MockSend(query, options)\n\t}\n\n\treturn nil\n}\n\nfunc (c MockedConnection) Connect() (bool, error) {\n\tOfflineQueue = make([]types.QueryObject, 1)\n\treturn false, nil\n}\n\nfunc (c MockedConnection) Close() error {\n\treturn nil\n}\n\nfunc (c MockedConnection) AddListener(event int, channel chan<- interface{}) {}\n\nfunc (c MockedConnection) GetState() *int {\n\tstate := 0\n\treturn &state\n}\n\nfunc (c MockedConnection) GetOfflineQueue() *[]types.QueryObject {\n\treturn &OfflineQueue\n}\n\nfunc (c MockedConnection) EmitEvent(event int, arg interface{}) {\n\tif c.MockEmitEvent != nil {\n\t\tc.MockEmitEvent(event, arg)\n\t}\n}\n\nfunc (c MockedConnection) RegisterRoom(roomId, id string, room types.IRoom) {\n}\n\nfunc (c MockedConnection) UnregisterRoom(id string) {}\n\nfunc (c MockedConnection) GetRequestHistory() *map[string]time.Time {\n\tr := make(map[string]time.Time)\n\n\treturn &r\n}\n\nfunc (c MockedConnection) RenewSubscriptions() {}\n\nfunc (c MockedConnection) GetRooms() *types.RoomList {\n\tv := c.MockGetRooms()\n\n\treturn v\n}\nfix mockpackage internal\n\nimport (\n\t\"github.com\/kuzzleio\/sdk-go\/types\"\n\t\"github.com\/stretchr\/testify\/mock\"\n\t\"time\"\n)\n\nvar OfflineQueue []types.QueryObject\n\ntype MockedConnection struct {\n\tmock.Mock\n\tMockSend func([]byte, types.QueryOptions) types.KuzzleResponse\n\tMockEmitEvent func(int, interface{})\n\tMockGetRooms func() *types.RoomList\n}\n\nfunc (c MockedConnection) Send(query []byte, options types.QueryOptions, responseChannel chan<- types.KuzzleResponse, requestId string) error {\n\tif c.MockSend != nil {\n\t\tresponseChannel <- c.MockSend(query, options)\n\t}\n\n\treturn nil\n}\n\nfunc (c MockedConnection) Connect() (bool, error) {\n\tOfflineQueue = make([]types.QueryObject, 1)\n\treturn false, nil\n}\n\nfunc (c MockedConnection) Close() error {\n\treturn nil\n}\n\nfunc (c MockedConnection) AddListener(event int, channel chan<- interface{}) {}\n\nfunc (c MockedConnection) GetState() *int {\n\tstate := 0\n\treturn &state\n}\n\nfunc (c MockedConnection) GetOfflineQueue() *[]types.QueryObject {\n\treturn &OfflineQueue\n}\n\nfunc (c MockedConnection) EmitEvent(event int, arg interface{}) {\n\tif c.MockEmitEvent != nil {\n\t\tc.MockEmitEvent(event, arg)\n\t}\n}\n\nfunc (c MockedConnection) RegisterRoom(roomId, id string, room types.IRoom) {\n}\n\nfunc (c MockedConnection) UnregisterRoom(id string) {}\n\nfunc (c MockedConnection) GetRequestHistory() *map[string]time.Time {\n\tr := make(map[string]time.Time)\n\n\treturn &r\n}\n\nfunc (c MockedConnection) RenewSubscriptions() {}\n\nfunc (c MockedConnection) GetRooms() *types.RoomList {\n\tv := c.MockGetRooms()\n\n\treturn v\n}\n\nfunc (c MockedConnection) RemoveListener(event int) {}<|endoftext|>"} {"text":"package sdk\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n)\n\ntype httpClient interface {\n\tDo(*http.Request) (*http.Response, error)\n}\n\ntype HTTPSender struct {\n\tclient httpClient\n}\n\nfunc NewHTTPSender(client httpClient) *HTTPSender {\n\treturn &HTTPSender{client: client}\n}\n\nfunc (s *HTTPSender) Send(request *http.Request) (content []byte, err error) {\n\tresponse, err := s.client.Do(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontent, err = ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif response.StatusCode != http.StatusOK {\n\t\tresponse.Body.Close()\n\t\treturn nil, fmt.Errorf(\"Non-200 status: %s\\n%s\", response.Status, string(content))\n\t}\n\n\treturn content, response.Body.Close()\n\n}\nAdded TODO item related to a bug when not closing response.package sdk\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n)\n\ntype httpClient interface {\n\tDo(*http.Request) (*http.Response, error)\n}\n\ntype HTTPSender struct {\n\tclient httpClient\n}\n\nfunc NewHTTPSender(client httpClient) *HTTPSender {\n\treturn &HTTPSender{client: client}\n}\n\nfunc (s *HTTPSender) Send(request *http.Request) (content []byte, err error) {\n\tresponse, err := s.client.Do(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontent, err = ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\t\/\/ BUG: if we get a response (and it has a body), it should always be closed\n\t\treturn nil, err\n\t}\n\n\tif response.StatusCode != http.StatusOK {\n\t\tresponse.Body.Close()\n\t\treturn nil, fmt.Errorf(\"Non-200 status: %s\\n%s\", response.Status, string(content))\n\t}\n\n\treturn content, response.Body.Close()\n\n}\n<|endoftext|>"} {"text":"package version\n\n\/\/ Client returns the client version as a string.\n\/\/\nconst Client = \"7.0.0-SNAPSHOT\"\nUpdate the package version to 8.0.0-SNAPSHOTpackage version\n\n\/\/ Client returns the client version as a string.\n\/\/\nconst Client = \"8.0.0-SNAPSHOT\"\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\n\t\"github.com\/brnstz\/routine\/wikimg\"\n)\n\nvar (\n\t\/\/ print a blank line with the given 256 ANSI color\n\tfmtSpec = \"\\x1b[30;48;5;%dm%-80s\\x1b[0m\\n\"\n)\n\nfunc main() {\n\tvar max, workers, buffer int\n\n\tflag.IntVar(&max, \"max\", 10, \"maximum number of images to retrieve\")\n\tflag.IntVar(&workers, \"workers\", 5, \"number of background workers\")\n\tflag.IntVar(&buffer, \"buffer\", 0, \"buffer size of image channel\")\n\tflag.Parse()\n\n\t\/\/ Create a new image puller with our max\n\tp := wikimg.NewPuller(max)\n\n\t\/\/ Create a buffered channel for communicating between image\n\t\/\/ puller loop and workers\n\timgURLs := make(chan string, buffer)\n\n\twg := sync.WaitGroup{}\n\n\tfor i := 0; i < workers; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tfor imgURL := range imgURLs {\n\t\t\t\t\/\/ Get the top color in this image\n\t\t\t\tcolor, err := wikimg.OneColor(imgURL)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t\/\/ Print color to the terminal\n\t\t\t\tfmt.Printf(fmtSpec, color.XTermCode, \"\")\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\t\/\/ Loop to retrieve more images\n\tfor {\n\t\timgURL, err := p.Next()\n\n\t\tif err == wikimg.EndOfResults {\n\t\t\t\/\/ Break from loop when end of results is reached\n\t\t\tbreak\n\n\t\t} else if err != nil {\n\t\t\t\/\/ Log error and continue getting URLs\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\n\t\timgURLs <- imgURL\n\t}\n\tclose(imgURLs)\n\twg.Wait()\n\n}\nspacing fixpackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\n\t\"github.com\/brnstz\/routine\/wikimg\"\n)\n\nvar (\n\t\/\/ print a blank line with the given 256 ANSI color\n\tfmtSpec = \"\\x1b[30;48;5;%dm%-80s\\x1b[0m\\n\"\n)\n\nfunc main() {\n\tvar max, workers, buffer int\n\n\tflag.IntVar(&max, \"max\", 10, \"maximum number of images to retrieve\")\n\tflag.IntVar(&workers, \"workers\", 5, \"number of background workers\")\n\tflag.IntVar(&buffer, \"buffer\", 0, \"buffer size of image channel\")\n\tflag.Parse()\n\n\t\/\/ Create a new image puller with our max\n\tp := wikimg.NewPuller(max)\n\n\t\/\/ Create a buffered channel for communicating between image\n\t\/\/ puller loop and workers\n\timgURLs := make(chan string, buffer)\n\n\twg := sync.WaitGroup{}\n\n\tfor i := 0; i < workers; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tfor imgURL := range imgURLs {\n\t\t\t\t\/\/ Get the top color in this image\n\t\t\t\tcolor, err := wikimg.OneColor(imgURL)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t\/\/ Print color to the terminal\n\t\t\t\tfmt.Printf(fmtSpec, color.XTermCode, \"\")\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\t\/\/ Loop to retrieve more images\n\tfor {\n\t\timgURL, err := p.Next()\n\n\t\tif err == wikimg.EndOfResults {\n\t\t\t\/\/ Break from loop when end of results is reached\n\t\t\tbreak\n\n\t\t} else if err != nil {\n\t\t\t\/\/ Log error and continue getting URLs\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\n\t\timgURLs <- imgURL\n\t}\n\tclose(imgURLs)\n\twg.Wait()\n}\n<|endoftext|>"} {"text":"\/*\nReleased under MIT License = connectAttempts { \/\/ -1 for infinite attempts\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(delay()) \/\/ Increased delay to randomize network load\n\t}\n\treturn\n}\n\n\/\/ ARInGO represents one ARI connection\/application\ntype ARInGO struct {\n\thttpClient *http.Client\n\twsUrl string\n\twsOrigin string\n\tws *websocket.Conn\n\treconnects int\n\twsMux *sync.RWMutex\n\tevChannel chan *json.RawMessage \/\/ Events coming from Asterisk are posted here\n\terrChannel chan error \/\/ Errors are posted here\n\twsListenerExit chan struct{} \/\/ Signal dispatcher to stop listening\n\twsListenerMux *sync.Mutex \/\/ Use it to get access to wsListenerExit recreation\n}\n\n\/\/ wsDispatcher listens for JSON rawMessages and stores them into the evChannel\nfunc (ari *ARInGO) wsEventListener(chanExit chan struct{}) {\n\tfor {\n\t\tselect {\n\t\tcase <-chanExit:\n\t\t\tbreak\n\t\tdefault:\n\t\t\tvar data json.RawMessage\n\t\t\tif err := websocket.JSON.Receive(ari.ws, &data); err != nil { \/\/ ToDo: Add reconnects here\n\t\t\t\tari.disconnect()\n\t\t\t\tari.errChannel <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tari.evChannel <- &data\n\t\t}\n\t}\n}\n\n\/\/ connect connects to Asterisk Websocket and starts listener\nfunc (ari *ARInGO) connect() (err error) {\n\tari.wsMux.Lock()\n\tdefer ari.wsMux.Unlock()\n\tari.ws, err = websocket.Dial(ari.wsUrl, \"\", ari.wsOrigin)\n\tif err != nil {\n\t\treturn\n\t}\n\t\/\/ Connected, start listener\n\tari.wsListenerMux.Lock()\n\tif ari.wsListenerExit != nil {\n\t\tclose(ari.wsListenerExit) \/\/ Order previous listener to stop before proceeding\n\t}\n\tari.wsListenerExit = make(chan struct{})\n\tgo ari.wsEventListener(ari.wsListenerExit)\n\tari.wsListenerMux.Unlock()\n\treturn nil\n}\n\nfunc (ari *ARInGO) disconnect() error {\n\tari.wsListenerMux.Lock()\n\tclose(ari.wsListenerExit) \/\/ Order previous listener to stop\n\tari.wsListenerMux.Unlock()\n\treturn ari.ws.Close()\n}\n\n\/\/ Call represents one REST call to Asterisk using httpClient\n\/\/ Returns a http.Response so we can process additional data out of it\nfunc (ari *ARInGO) Call(url string, data url.Values, resp *http.Response) error {\n\tif reply, err := ari.httpClient.PostForm(url, data); err != nil {\n\t\treturn err\n\t} else {\n\t\t*resp = *reply\n\t}\n\treturn nil\n}\nUse map[string]interface{} to represent events coming from Asterisk\/*\nReleased under MIT License = connectAttempts { \/\/ -1 for infinite attempts\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(delay()) \/\/ Increased delay to randomize network load\n\t}\n\treturn\n}\n\n\/\/ ARInGO represents one ARI connection\/application\ntype ARInGO struct {\n\thttpClient *http.Client\n\twsUrl string\n\twsOrigin string\n\tws *websocket.Conn\n\treconnects int\n\twsMux *sync.RWMutex\n\tevChannel chan map[string]interface{} \/\/ Events coming from Asterisk are posted here\n\terrChannel chan error \/\/ Errors are posted here\n\twsListenerExit chan struct{} \/\/ Signal dispatcher to stop listening\n\twsListenerMux *sync.Mutex \/\/ Use it to get access to wsListenerExit recreation\n}\n\n\/\/ wsDispatcher listens for JSON rawMessages and stores them into the evChannel\nfunc (ari *ARInGO) wsEventListener(chanExit chan struct{}) {\n\tfor {\n\t\tselect {\n\t\tcase <-chanExit:\n\t\t\tbreak\n\t\tdefault:\n\t\t\tvar ev map[string]interface{}\n\t\t\tif err := websocket.JSON.Receive(ari.ws, &ev); err != nil { \/\/ ToDo: Add reconnects here\n\t\t\t\tari.disconnect()\n\t\t\t\tari.errChannel <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tari.evChannel <- ev\n\t\t}\n\t}\n}\n\n\/\/ connect connects to Asterisk Websocket and starts listener\nfunc (ari *ARInGO) connect() (err error) {\n\tari.wsMux.Lock()\n\tdefer ari.wsMux.Unlock()\n\tari.ws, err = websocket.Dial(ari.wsUrl, \"\", ari.wsOrigin)\n\tif err != nil {\n\t\treturn\n\t}\n\t\/\/ Connected, start listener\n\tari.wsListenerMux.Lock()\n\tif ari.wsListenerExit != nil {\n\t\tclose(ari.wsListenerExit) \/\/ Order previous listener to stop before proceeding\n\t}\n\tari.wsListenerExit = make(chan struct{})\n\tgo ari.wsEventListener(ari.wsListenerExit)\n\tari.wsListenerMux.Unlock()\n\treturn nil\n}\n\nfunc (ari *ARInGO) disconnect() error {\n\tari.wsListenerMux.Lock()\n\tclose(ari.wsListenerExit) \/\/ Order previous listener to stop\n\tari.wsListenerMux.Unlock()\n\treturn ari.ws.Close()\n}\n\n\/\/ Call represents one REST call to Asterisk using httpClient\n\/\/ Returns a http.Response so we can process additional data out of it\nfunc (ari *ARInGO) Call(url string, data url.Values, resp *http.Response) error {\n\tif reply, err := ari.httpClient.PostForm(url, data); err != nil {\n\t\treturn err\n\t} else {\n\t\t*resp = *reply\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"\/\/ package assert provides convenience assert methods to complement\n\/\/ the built in go testing library. It's intended to add onto standard\n\/\/ Go tests. Example usage:\n\/\/\tfunc TestSomething(t *testing.T) {\n\/\/\t\ti, err := doSomething()\n\/\/ assert.NoErr(err)\n\/\/ assert.Equal(i, 123, \"returned integer\")\n\/\/ }\npackage assert\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"testing\"\n\t\"reflect\"\n)\n\n\/\/ callerStr returns a string representation of the code numFrames stack\n\/\/ frames above the code that called callerStr\nfunc callerStr(numFrames int) string {\n\t_, file, line, _ := runtime.Caller(1 + numFrames)\n\treturn fmt.Sprintf(\"%s:%d\", file, line)\n}\n\n\/\/ callerStrErrorf calls t.Errorf with fmtStr and vals in it, prefixed\n\/\/ by a callerStr representation of the code numFrames above the caller of\n\/\/ this function\nfunc callerStrf(numFrames int, fmtStr string, vals ...interface{}) string {\n\torigStr := fmt.Sprintf(fmtStr, vals...)\n\treturn fmt.Sprintf(\"%s: %s\", callerStr(1+numFrames), origStr)\n}\n\n\/\/ True calls t.Errorf if the provided bool is false, does nothing\n\/\/ otherwise\nfunc True(t *testing.T, b bool, fmtStr string, vals ...interface{}) {\n\tif !b {\n\t\tt.Errorf(callerStrf(1, fmtStr, vals...))\n\t}\n}\n\n\/\/ False is the equivalent of True(t, !b, fmtStr, vals...)\nfunc False(t *testing.T, b bool, fmtStr string, vals ...interface{}) {\n\tif b {\n\t\tt.Errorf(callerStrf(1, fmtStr, vals...))\n\t}\n}\n\n\/\/ Nil calls t.Errorf if i is not nil\nfunc Nil(t *testing.T, i interface{}, fmtStr string, vals ...interface{}) {\n\tif i != nil {\n\t\tt.Errorf(callerStrf(1, fmtStr, vals...))\n\t}\n}\n\n\/\/ NoErr calls t.Errorf if e is not nil\nfunc NoErr(t *testing.T, e error) {\n\tif e != nil {\n\t\tt.Errorf(callerStrf(1, \"expected no error but got %s\", e))\n\t}\n}\n\n\/\/ Err calls t.Errorf if expected is not equal to actual\nfunc Err(t *testing.T, expected error, actual error) {\n\tif expected != actual {\n\t\tt.Errorf(callerStrf(1, \"expected error %s but got %s\", expected, actual))\n\t}\n}\n\n\/\/ Equal ensures that the actual value returned from a test was equal to an\n\/\/ expected. it uses reflect.DeepEqual to do so.\n\/\/ name is the name used to describe the values being compared.\n\/\/ it's used in the error string if actual != expected\nfunc Equal(t *testing.T, actual, expected interface{}, noun string) {\n\tif !reflect.DeepEqual(actual, expected) {\n\t\tt.Errorf(callerStrf(1, \"actual %s [%+v] != expected %s [%+v]\", noun, actual, noun, expected))\n\t}\n}\nfixing func Nil Fixes #3\/\/ package assert provides convenience assert methods to complement\n\/\/ the built in go testing library. It's intended to add onto standard\n\/\/ Go tests. Example usage:\n\/\/\tfunc TestSomething(t *testing.T) {\n\/\/\t\ti, err := doSomething()\n\/\/ assert.NoErr(err)\n\/\/ assert.Equal(i, 123, \"returned integer\")\n\/\/ }\npackage assert\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"testing\"\n\t\"reflect\"\n)\n\n\/\/ callerStr returns a string representation of the code numFrames stack\n\/\/ frames above the code that called callerStr\nfunc callerStr(numFrames int) string {\n\t_, file, line, _ := runtime.Caller(1 + numFrames)\n\treturn fmt.Sprintf(\"%s:%d\", file, line)\n}\n\n\/\/ callerStrErrorf calls t.Errorf with fmtStr and vals in it, prefixed\n\/\/ by a callerStr representation of the code numFrames above the caller of\n\/\/ this function\nfunc callerStrf(numFrames int, fmtStr string, vals ...interface{}) string {\n\torigStr := fmt.Sprintf(fmtStr, vals...)\n\treturn fmt.Sprintf(\"%s: %s\", callerStr(1+numFrames), origStr)\n}\n\n\/\/ True calls t.Errorf if the provided bool is false, does nothing\n\/\/ otherwise\nfunc True(t *testing.T, b bool, fmtStr string, vals ...interface{}) {\n\tif !b {\n\t\tt.Errorf(callerStrf(1, fmtStr, vals...))\n\t}\n}\n\n\/\/ False is the equivalent of True(t, !b, fmtStr, vals...)\nfunc False(t *testing.T, b bool, fmtStr string, vals ...interface{}) {\n\tif b {\n\t\tt.Errorf(callerStrf(1, fmtStr, vals...))\n\t}\n}\n\n\/\/ Nil calls t.Errorf if i is not nil\nfunc Nil(t *testing.T, i interface{}, fmtStr string, vals ...interface{}) {\n\tif !reflect.DeepEqual(i, nil) {\n\t\tt.Errorf(callerStrf(1, fmtStr, vals...))\n\t}\n}\n\n\/\/ NoErr calls t.Errorf if e is not nil\nfunc NoErr(t *testing.T, e error) {\n\tif e != nil {\n\t\tt.Errorf(callerStrf(1, \"expected no error but got %s\", e))\n\t}\n}\n\n\/\/ Err calls t.Errorf if expected is not equal to actual\nfunc Err(t *testing.T, expected error, actual error) {\n\tif expected != actual {\n\t\tt.Errorf(callerStrf(1, \"expected error %s but got %s\", expected, actual))\n\t}\n}\n\n\/\/ Equal ensures that the actual value returned from a test was equal to an\n\/\/ expected. it uses reflect.DeepEqual to do so.\n\/\/ name is the name used to describe the values being compared.\n\/\/ it's used in the error string if actual != expected\nfunc Equal(t *testing.T, actual, expected interface{}, noun string) {\n\tif !reflect.DeepEqual(actual, expected) {\n\t\tt.Errorf(callerStrf(1, \"actual %s [%+v] != expected %s [%+v]\", noun, actual, noun, expected))\n\t}\n}\n<|endoftext|>"} {"text":"package money\n\nimport (\n\t\"fmt\"\n\t\"math\/big\"\n\t\"strings\"\n\n\t\"github.com\/juju\/errors\"\n)\n\n\/\/ Money represents a monetary value\ntype Money struct {\n\trat *big.Rat\n}\n\n\/\/ New creates a new instance with a zero value.\nfunc New() *Money {\n\treturn &Money{\n\t\trat: big.NewRat(0, 1),\n\t}\n}\n\n\/\/ Parse a string to create a new money value. It can read `XX.YY` and `XX,YY`.\nfunc Parse(s string) (*Money, error) {\n\ts = strings.Replace(s, \",\", \".\", -1)\n\n\trat := new(big.Rat)\n\tif _, err := fmt.Sscan(s, rat); err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\treturn &Money{rat}, nil\n}\n\n\/\/ Cents returns the value with cents precision (2 decimal places) as a number.\nfunc (money *Money) Cents() int64 {\n\tcents := big.NewInt(100)\n\n\tv := money.rat.Num()\n\tv.Mul(v, cents)\n\tv.Quo(v, money.rat.Denom())\n\n\treturn v.Int64()\n}\n\n\/\/ Format the money value with a specific decimal precision.\nfunc (money *Money) Format(prec int) string {\n\treturn money.rat.FloatString(prec)\n}\n\n\/\/ Mul multiplies the money value n times and returns the result.\nfunc (money *Money) Mul(n int64) *Money {\n\tb := big.NewRat(n, 1)\n\tresult := New()\n\tresult.rat.Mul(money.rat, b)\n\treturn result\n}\n\n\/\/ Add two money values together and returns the result.\nfunc (money *Money) Add(other *Money) *Money {\n\tresult := New()\n\tresult.rat.Add(money.rat, other.rat)\n\treturn result\n}\n\n\/\/ Sub subtracts two money values and returns the result.\nfunc (money *Money) Sub(other *Money) *Money {\n\tresult := New()\n\tresult.rat.Sub(money.rat, other.rat)\n\treturn result\n}\n\n\/\/ Div divides two money values and returns the result.\nfunc (money *Money) Div(other *Money) *Money {\n\tresult := New()\n\tresult.rat.Quo(money.rat, other.rat)\n\treturn result\n}\n\n\/\/ LessThan returns true if a money value is less than the other.\nfunc (money *Money) LessThan(other *Money) bool {\n\treturn money.rat.Cmp(other.rat) == -1\n}\nParse an empty money value as zero.package money\n\nimport (\n\t\"fmt\"\n\t\"math\/big\"\n\t\"strings\"\n\n\t\"github.com\/juju\/errors\"\n)\n\n\/\/ Money represents a monetary value\ntype Money struct {\n\trat *big.Rat\n}\n\n\/\/ New creates a new instance with a zero value.\nfunc New() *Money {\n\treturn &Money{\n\t\trat: big.NewRat(0, 1),\n\t}\n}\n\n\/\/ Parse a string to create a new money value. It can read `XX.YY` and `XX,YY`.\n\/\/ An empty string is parsed as zero.\nfunc Parse(s string) (*Money, error) {\n\tif len(s) == 0 {\n\t\treturn New(), nil\n\t}\n\n\ts = strings.Replace(s, \",\", \".\", -1)\n\n\trat := new(big.Rat)\n\tif _, err := fmt.Sscan(s, rat); err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\treturn &Money{rat}, nil\n}\n\n\/\/ Cents returns the value with cents precision (2 decimal places) as a number.\nfunc (money *Money) Cents() int64 {\n\tcents := big.NewInt(100)\n\n\tv := money.rat.Num()\n\tv.Mul(v, cents)\n\tv.Quo(v, money.rat.Denom())\n\n\treturn v.Int64()\n}\n\n\/\/ Format the money value with a specific decimal precision.\nfunc (money *Money) Format(prec int) string {\n\treturn money.rat.FloatString(prec)\n}\n\n\/\/ Mul multiplies the money value n times and returns the result.\nfunc (money *Money) Mul(n int64) *Money {\n\tb := big.NewRat(n, 1)\n\tresult := New()\n\tresult.rat.Mul(money.rat, b)\n\treturn result\n}\n\n\/\/ Add two money values together and returns the result.\nfunc (money *Money) Add(other *Money) *Money {\n\tresult := New()\n\tresult.rat.Add(money.rat, other.rat)\n\treturn result\n}\n\n\/\/ Sub subtracts two money values and returns the result.\nfunc (money *Money) Sub(other *Money) *Money {\n\tresult := New()\n\tresult.rat.Sub(money.rat, other.rat)\n\treturn result\n}\n\n\/\/ Div divides two money values and returns the result.\nfunc (money *Money) Div(other *Money) *Money {\n\tresult := New()\n\tresult.rat.Quo(money.rat, other.rat)\n\treturn result\n}\n\n\/\/ LessThan returns true if a money value is less than the other.\nfunc (money *Money) LessThan(other *Money) bool {\n\treturn money.rat.Cmp(other.rat) == -1\n}\n<|endoftext|>"} {"text":"Added indentation to result JSON<|endoftext|>"} {"text":"disable heap check<|endoftext|>"} {"text":"package iface\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\/exec\"\n\t\"strconv\"\n\n\t\"github.com\/songgao\/water\"\n)\n\ntype Iface struct {\n\tname string\n\tip string\n\tmtu int\n\tifce *water.Interface\n}\n\nfunc New(name, ip string, mtu int) *Iface {\n\treturn &Iface{\n\t\tname: name,\n\t\tip: ip,\n\t\tmtu: mtu,\n\t}\n}\n\nfunc (i *Iface) Start() error {\n\tip, netIP, err := net.ParseCIDR(i.ip)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parse IP err: %s\", err)\n\t}\n\tconfig := water.Config{\n\t\tDeviceType: water.TUN,\n\t}\n\t\/\/config.Name = i.name\n\ti.ifce, err = water.New(config)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"iface create err: %s\", err)\n\t}\n\tmask := netIP.Mask\n\tnetmask := fmt.Sprintf(\"%d.%d.%d.%d\", mask[0], mask[1], mask[2], mask[3])\n\tlog.Printf(\"iface name: %s\", i.Name())\n\tlog.Printf(\"ip: %s\", ip.String())\n\tcmd := exec.Command(\"ifconfig\", i.Name(),\n\t\tip.String(), \"netmask\", netmask,\n\t\t\"mtu\", strconv.Itoa(i.mtu), \"up\")\n\toutput, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"ifconfig run err: %s %s\", err, string(output))\n\t}\n\treturn nil\n}\n\nfunc (i *Iface) Name() string {\n\treturn i.ifce.Name()\n}\n\nfunc (i *Iface) Read(pkt PacketIP) (int, error) {\n\treturn i.ifce.Read(pkt)\n}\n\nfunc (i *Iface) Write(pkt PacketIP) (int, error) {\n\treturn i.ifce.Write(pkt)\n}\nplatform fixpackage iface\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\/exec\"\n\t\"strconv\"\n\n\t\"github.com\/songgao\/water\"\n)\n\nfunc (i *Iface) Start() error {\n\tip, netIP, err := net.ParseCIDR(i.ip)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parse IP err: %s\", err)\n\t}\n\tconfig := water.Config{\n\t\tDeviceType: water.TUN,\n\t}\n\t\/\/config.Name = i.name\n\ti.ifce, err = water.New(config)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"iface create err: %s\", err)\n\t}\n\tmask := netIP.Mask\n\tnetmask := fmt.Sprintf(\"%d.%d.%d.%d\", mask[0], mask[1], mask[2], mask[3])\n\tlog.Printf(\"iface name: %s\", i.Name())\n\tlog.Printf(\"ip: %s\", ip.String())\n\tcmd := exec.Command(\"ifconfig\", i.Name(),\n\t\tip.String(), \"netmask\", netmask,\n\t\t\"mtu\", strconv.Itoa(i.mtu), \"up\")\n\toutput, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"ifconfig run err: %s %s\", err, string(output))\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"package rpm\n\nimport (\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestReadRPMFile(t *testing.T) {\n\tdir := \".\/rpms\"\n\n\t\/\/ list RPM files\n\tfiles, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\tvalid := 0\n\tfor _, f := range files {\n\t\tif strings.HasSuffix(f.Name(), \".rpm\") {\n\t\t\tpath := filepath.Join(dir, f.Name())\n\t\t\t_, err := OpenPackage(path)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Error loading RPM file %s: %s\", f.Name(), err)\n\t\t\t} else {\n\t\t\t\tvalid++\n\t\t\t}\n\t\t}\n\t}\n\n\tif valid == 0 {\n\t\tt.Errorf(\"No RPM files found for testing with in %s\", dir)\n\t} else {\n\t\tt.Logf(\"Validated %d RPM files\", valid)\n\t}\n}\nAdded envvar to package tests for rpm file locationpackage rpm\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestReadRPMFile(t *testing.T) {\n\t\/\/ get a directory full of rpms from RPM_DIR environment variable\n\tdir := os.Getenv(\"RPM_DIR\")\n\tif dir == \"\" {\n\t\tt.Fatalf(\"$RPM_DIR is not set.\")\n\t}\n\n\t\/\/ list RPM files\n\tfiles, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\tt.Fatalf(err.Error())\n\t}\n\n\tvalid := 0\n\tfor _, f := range files {\n\t\tif strings.HasSuffix(f.Name(), \".rpm\") {\n\t\t\tpath := filepath.Join(dir, f.Name())\n\t\t\t_, err := OpenPackage(path)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Error loading RPM file %s: %s\", f.Name(), err)\n\t\t\t} else {\n\t\t\t\tvalid++\n\t\t\t}\n\t\t}\n\t}\n\n\tif valid == 0 {\n\t\tt.Errorf(\"No RPM files found for testing with in %s\", dir)\n\t} else {\n\t\tt.Logf(\"Validated %d RPM files\", valid)\n\t}\n}\n<|endoftext|>"} {"text":"\/\/ IronMQ (elastic message queue) client library\npackage mq\n\nimport (\n\t\"errors\"\n\t\"time\"\n\n\t\"github.com\/iron-io\/iron_go\/api\"\n\t\"github.com\/iron-io\/iron_go\/config\"\n)\n\ntype Queue struct {\n\tSettings config.Settings\n\tName string\n}\n\ntype QueueSubscriber struct {\n\tURL string `json:\"url\"`\n}\n\ntype QueueInfo struct {\n\tId string `json:\"id,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tSize int `json:\"size,omitempty\"`\n\tReserved int `json:\"reserved,omitempty\"`\n\tTotalMessages int `json:\"total_messages,omitempty\"`\n\tMaxReqPerMinute int `json:\"max_req_per_minute,omitempty\"`\n\tSubscribers []QueueSubscriber `json:\"subscribers,omitempty\"`\n\tPushType string `json:\"push_type,omitempty\"`\n}\n\ntype Message struct {\n\tId string `json:\"id,omitempty\"`\n\tBody string `json:\"body\"`\n\t\/\/ Timeout is the amount of time in seconds allowed for processing the\n\t\/\/ message.\n\tTimeout int64 `json:\"timeout,omitempty\"`\n\t\/\/ Delay is the amount of time in seconds to wait before adding the message\n\t\/\/ to the queue.\n\tDelay int64 `json:\"delay,omitempty\"`\n\tq Queue\n}\n\ntype PushStatus struct {\n\tRetried int `json:\"retried\"`\n\tStatusCode int `json:\"status_code\"`\n\tStatus string `json:\"status\"`\n}\n\ntype Subscriber struct {\n\tRetried int `json:\"retried\"`\n\tStatusCode int `json:\"status_code\"`\n\tStatus string `json:\"status\"`\n\tURLs []QueueSubscriber `json:\"urls\"`\n}\n\nfunc New(queueName string) *Queue {\n\treturn &Queue{Settings: config.Config(\"iron_mq\"), Name: queueName}\n}\n\nfunc (q Queue) queues(s ...string) *api.URL { return api.Action(q.Settings, \"queues\", s...) }\n\nfunc (q Queue) ListQueues(page, perPage int) (queues []Queue, err error) {\n\tout := []struct {\n\t\tId string\n\t\tProject_id string\n\t\tName string\n\t}{}\n\n\terr = q.queues().\n\t\tQueryAdd(\"page\", \"%d\", page).\n\t\tQueryAdd(\"per_page\", \"%d\", perPage).\n\t\tReq(\"GET\", nil, &out)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tqueues = make([]Queue, 0, len(out))\n\tfor _, item := range out {\n\t\tqueues = append(queues, Queue{\n\t\t\tSettings: q.Settings,\n\t\t\tName: item.Name,\n\t\t})\n\t}\n\n\treturn\n}\n\nfunc (q Queue) Info() (QueueInfo, error) {\n\tqi := QueueInfo{}\n\terr := q.queues(q.Name).Req(\"GET\", nil, &qi)\n\treturn qi, err\n}\n\nfunc (q Queue) Subscribe(pushType string, subscribers ...string) (err error) {\n\tin := QueueInfo{\n\t\tPushType: pushType,\n\t\tSubscribers: make([]QueueSubscriber, len(subscribers)),\n\t}\n\tfor i, subscriber := range subscribers {\n\t\tin.Subscribers[i].URL = subscriber\n\t}\n\treturn q.queues(q.Name).Req(\"POST\", &in, nil)\n}\n\nfunc (q Queue) PushString(body string) (id string, err error) {\n\tids, err := q.PushStrings(body)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn ids[0], nil\n}\n\n\/\/ Push adds one or more messages to the end of the queue using IronMQ's defaults:\n\/\/\ttimeout - 60 seconds\n\/\/\tdelay - none\n\/\/\n\/\/ Identical to PushMessages with Message{Timeout: 60, Delay: 0}\nfunc (q Queue) PushStrings(bodies ...string) (ids []string, err error) {\n\tmsgs := make([]*Message, 0, len(bodies))\n\tfor _, body := range bodies {\n\t\tmsgs = append(msgs, &Message{Body: body})\n\t}\n\n\treturn q.PushMessages(msgs...)\n}\n\nfunc (q Queue) PushMessage(msg *Message) (id string, err error) {\n\tids, err := q.PushMessages(msg)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn ids[0], nil\n}\n\nfunc (q Queue) PushMessages(msgs ...*Message) (ids []string, err error) {\n\tin := struct {\n\t\tMessages []*Message `json:\"messages\"`\n\t}{Messages: msgs}\n\n\tout := struct {\n\t\tIDs []string `json:\"ids\"`\n\t\tMsg string `json:\"msg\"`\n\t}{}\n\n\terr = q.queues(q.Name, \"messages\").Req(\"POST\", &in, &out)\n\treturn out.IDs, err\n}\n\n\/\/ Get reserves a message from the queue.\n\/\/ The message will not be deleted, but will be reserved until the timeout\n\/\/ expires. If the timeout expires before the message is deleted, the message\n\/\/ will be placed back onto the queue.\n\/\/ As a result, be sure to Delete a message after you're done with it.\nfunc (q Queue) Get() (msg *Message, err error) {\n\tmsgs, err := q.GetN(1)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(msgs) > 0 {\n\t\tmsg = msgs[0]\n\t} else {\n\t\terr = errors.New(\"Couldn't get a single message\")\n\t}\n\n\treturn\n}\n\n\/\/ get N messages\nfunc (q Queue) GetN(n int) (msgs []*Message, err error) {\n\tout := struct {\n\t\tMessages []*Message `json:\"messages\"`\n\t}{}\n\n\terr = q.queues(q.Name, \"messages\").\n\t\tQueryAdd(\"n\", \"%d\", n).\n\t\tReq(\"GET\", nil, &out)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor _, msg := range out.Messages {\n\t\tmsg.q = q\n\t}\n\n\treturn out.Messages, nil\n}\n\n\/\/ Delete all messages in the queue\nfunc (q Queue) Clear() (err error) {\n\treturn q.queues(q.Name, \"clear\").Req(\"POST\", nil, nil)\n}\n\n\/\/ Delete message from queue\nfunc (q Queue) DeleteMessage(msgId string) (err error) {\n\treturn q.queues(q.Name, \"messages\", msgId).Req(\"DELETE\", nil, nil)\n}\n\n\/\/ Reset timeout of message to keep it reserved\nfunc (q Queue) TouchMessage(msgId string) (err error) {\n\treturn q.queues(q.Name, \"messages\", msgId, \"touch\").Req(\"POST\", nil, nil)\n}\n\n\/\/ Put message back in the queue, message will be available after +delay+ seconds.\nfunc (q Queue) ReleaseMessage(msgId string, delay int64) (err error) {\n\tin := struct {\n\t\tDelay int64 `json:\"delay\"`\n\t}{Delay: delay}\n\treturn q.queues(q.Name, \"messages\", msgId, \"release\").Req(\"POST\", &in, nil)\n}\n\nfunc (q Queue) MessageSubscribers(msgId string) ([]*Subscriber, error) {\n\tout := struct {\n\t\tSubscribers []*Subscriber `json:\"subscribers\"`\n\t}{}\n\terr := q.queues(q.Name, \"messages\", msgId, \"subscribers\").Req(\"GET\", nil, &out)\n\treturn out.Subscribers, err\n}\n\nfunc (q Queue) MessageSubscribersPollN(msgId string, n int) ([]*Subscriber, error) {\n\tsubs, err := q.MessageSubscribers(msgId)\n\tfor {\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\tsubs, err = q.MessageSubscribers(msgId)\n\t\tif err != nil {\n\t\t\treturn subs, err\n\t\t}\n\t\tif len(subs) >= n && actualPushStatus(subs) {\n\t\t\treturn subs, nil\n\t\t}\n\t}\n\treturn subs, err\n}\n\nfunc actualPushStatus(subs []*Subscriber) bool {\n\tfor _, sub := range subs {\n\t\tif sub.Status == \"queued\" {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\n\/\/ Delete message from queue\nfunc (m Message) Delete() (err error) {\n\treturn m.q.DeleteMessage(m.Id)\n}\n\n\/\/ Reset timeout of message to keep it reserved\nfunc (m Message) Touch() (err error) {\n\treturn m.q.TouchMessage(m.Id)\n}\n\n\/\/ Put message back in the queue, message will be available after +delay+ seconds.\nfunc (m Message) Release(delay int64) (err error) {\n\treturn m.q.ReleaseMessage(m.Id, delay)\n}\n\nfunc (m Message) Subscribers() (interface{}, error) {\n\treturn m.q.MessageSubscribers(m.Id)\n}\nsimplify subscriber\/\/ IronMQ (elastic message queue) client library\npackage mq\n\nimport (\n\t\"errors\"\n\t\"time\"\n\n\t\"github.com\/iron-io\/iron_go\/api\"\n\t\"github.com\/iron-io\/iron_go\/config\"\n)\n\ntype Queue struct {\n\tSettings config.Settings\n\tName string\n}\n\ntype QueueSubscriber struct {\n\tURL string `json:\"url\"`\n}\n\ntype QueueInfo struct {\n\tId string `json:\"id,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tSize int `json:\"size,omitempty\"`\n\tReserved int `json:\"reserved,omitempty\"`\n\tTotalMessages int `json:\"total_messages,omitempty\"`\n\tMaxReqPerMinute int `json:\"max_req_per_minute,omitempty\"`\n\tSubscribers []QueueSubscriber `json:\"subscribers,omitempty\"`\n\tPushType string `json:\"push_type,omitempty\"`\n}\n\ntype Message struct {\n\tId string `json:\"id,omitempty\"`\n\tBody string `json:\"body\"`\n\t\/\/ Timeout is the amount of time in seconds allowed for processing the\n\t\/\/ message.\n\tTimeout int64 `json:\"timeout,omitempty\"`\n\t\/\/ Delay is the amount of time in seconds to wait before adding the message\n\t\/\/ to the queue.\n\tDelay int64 `json:\"delay,omitempty\"`\n\tq Queue\n}\n\ntype PushStatus struct {\n\tRetried int `json:\"retried\"`\n\tStatusCode int `json:\"status_code\"`\n\tStatus string `json:\"status\"`\n}\n\ntype Subscriber struct {\n\tRetried int `json:\"retried\"`\n\tStatusCode int `json:\"status_code\"`\n\tStatus string `json:\"status\"`\n\tURL string `json:\"url\"`\n}\n\nfunc New(queueName string) *Queue {\n\treturn &Queue{Settings: config.Config(\"iron_mq\"), Name: queueName}\n}\n\nfunc (q Queue) queues(s ...string) *api.URL { return api.Action(q.Settings, \"queues\", s...) }\n\nfunc (q Queue) ListQueues(page, perPage int) (queues []Queue, err error) {\n\tout := []struct {\n\t\tId string\n\t\tProject_id string\n\t\tName string\n\t}{}\n\n\terr = q.queues().\n\t\tQueryAdd(\"page\", \"%d\", page).\n\t\tQueryAdd(\"per_page\", \"%d\", perPage).\n\t\tReq(\"GET\", nil, &out)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tqueues = make([]Queue, 0, len(out))\n\tfor _, item := range out {\n\t\tqueues = append(queues, Queue{\n\t\t\tSettings: q.Settings,\n\t\t\tName: item.Name,\n\t\t})\n\t}\n\n\treturn\n}\n\nfunc (q Queue) Info() (QueueInfo, error) {\n\tqi := QueueInfo{}\n\terr := q.queues(q.Name).Req(\"GET\", nil, &qi)\n\treturn qi, err\n}\n\nfunc (q Queue) Subscribe(pushType string, subscribers ...string) (err error) {\n\tin := QueueInfo{\n\t\tPushType: pushType,\n\t\tSubscribers: make([]QueueSubscriber, len(subscribers)),\n\t}\n\tfor i, subscriber := range subscribers {\n\t\tin.Subscribers[i].URL = subscriber\n\t}\n\treturn q.queues(q.Name).Req(\"POST\", &in, nil)\n}\n\nfunc (q Queue) PushString(body string) (id string, err error) {\n\tids, err := q.PushStrings(body)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn ids[0], nil\n}\n\n\/\/ Push adds one or more messages to the end of the queue using IronMQ's defaults:\n\/\/\ttimeout - 60 seconds\n\/\/\tdelay - none\n\/\/\n\/\/ Identical to PushMessages with Message{Timeout: 60, Delay: 0}\nfunc (q Queue) PushStrings(bodies ...string) (ids []string, err error) {\n\tmsgs := make([]*Message, 0, len(bodies))\n\tfor _, body := range bodies {\n\t\tmsgs = append(msgs, &Message{Body: body})\n\t}\n\n\treturn q.PushMessages(msgs...)\n}\n\nfunc (q Queue) PushMessage(msg *Message) (id string, err error) {\n\tids, err := q.PushMessages(msg)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn ids[0], nil\n}\n\nfunc (q Queue) PushMessages(msgs ...*Message) (ids []string, err error) {\n\tin := struct {\n\t\tMessages []*Message `json:\"messages\"`\n\t}{Messages: msgs}\n\n\tout := struct {\n\t\tIDs []string `json:\"ids\"`\n\t\tMsg string `json:\"msg\"`\n\t}{}\n\n\terr = q.queues(q.Name, \"messages\").Req(\"POST\", &in, &out)\n\treturn out.IDs, err\n}\n\n\/\/ Get reserves a message from the queue.\n\/\/ The message will not be deleted, but will be reserved until the timeout\n\/\/ expires. If the timeout expires before the message is deleted, the message\n\/\/ will be placed back onto the queue.\n\/\/ As a result, be sure to Delete a message after you're done with it.\nfunc (q Queue) Get() (msg *Message, err error) {\n\tmsgs, err := q.GetN(1)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(msgs) > 0 {\n\t\tmsg = msgs[0]\n\t} else {\n\t\terr = errors.New(\"Couldn't get a single message\")\n\t}\n\n\treturn\n}\n\n\/\/ get N messages\nfunc (q Queue) GetN(n int) (msgs []*Message, err error) {\n\tout := struct {\n\t\tMessages []*Message `json:\"messages\"`\n\t}{}\n\n\terr = q.queues(q.Name, \"messages\").\n\t\tQueryAdd(\"n\", \"%d\", n).\n\t\tReq(\"GET\", nil, &out)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor _, msg := range out.Messages {\n\t\tmsg.q = q\n\t}\n\n\treturn out.Messages, nil\n}\n\n\/\/ Delete all messages in the queue\nfunc (q Queue) Clear() (err error) {\n\treturn q.queues(q.Name, \"clear\").Req(\"POST\", nil, nil)\n}\n\n\/\/ Delete message from queue\nfunc (q Queue) DeleteMessage(msgId string) (err error) {\n\treturn q.queues(q.Name, \"messages\", msgId).Req(\"DELETE\", nil, nil)\n}\n\n\/\/ Reset timeout of message to keep it reserved\nfunc (q Queue) TouchMessage(msgId string) (err error) {\n\treturn q.queues(q.Name, \"messages\", msgId, \"touch\").Req(\"POST\", nil, nil)\n}\n\n\/\/ Put message back in the queue, message will be available after +delay+ seconds.\nfunc (q Queue) ReleaseMessage(msgId string, delay int64) (err error) {\n\tin := struct {\n\t\tDelay int64 `json:\"delay\"`\n\t}{Delay: delay}\n\treturn q.queues(q.Name, \"messages\", msgId, \"release\").Req(\"POST\", &in, nil)\n}\n\nfunc (q Queue) MessageSubscribers(msgId string) ([]*Subscriber, error) {\n\tout := struct {\n\t\tSubscribers []*Subscriber `json:\"subscribers\"`\n\t}{}\n\terr := q.queues(q.Name, \"messages\", msgId, \"subscribers\").Req(\"GET\", nil, &out)\n\treturn out.Subscribers, err\n}\n\nfunc (q Queue) MessageSubscribersPollN(msgId string, n int) ([]*Subscriber, error) {\n\tsubs, err := q.MessageSubscribers(msgId)\n\tfor {\n\t\ttime.Sleep(100 * time.Millisecond)\n\t\tsubs, err = q.MessageSubscribers(msgId)\n\t\tif err != nil {\n\t\t\treturn subs, err\n\t\t}\n\t\tif len(subs) >= n && actualPushStatus(subs) {\n\t\t\treturn subs, nil\n\t\t}\n\t}\n\treturn subs, err\n}\n\nfunc actualPushStatus(subs []*Subscriber) bool {\n\tfor _, sub := range subs {\n\t\tif sub.Status == \"queued\" {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\n\/\/ Delete message from queue\nfunc (m Message) Delete() (err error) {\n\treturn m.q.DeleteMessage(m.Id)\n}\n\n\/\/ Reset timeout of message to keep it reserved\nfunc (m Message) Touch() (err error) {\n\treturn m.q.TouchMessage(m.Id)\n}\n\n\/\/ Put message back in the queue, message will be available after +delay+ seconds.\nfunc (m Message) Release(delay int64) (err error) {\n\treturn m.q.ReleaseMessage(m.Id, delay)\n}\n\nfunc (m Message) Subscribers() (interface{}, error) {\n\treturn m.q.MessageSubscribers(m.Id)\n}\n<|endoftext|>"} {"text":"package scamp\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"crypto\/tls\"\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"time\"\n\n\t\"sync\"\n\t\"sync\/atomic\"\n)\n\n\/\/ Two minute timeout on clients\nvar msgTimeout = time.Second * 120\n\ntype ServiceActionFunc func(*Message, *Client)\ntype ServiceAction struct {\n\tcallback ServiceActionFunc\n\tcrudTags string\n\tversion int\n}\n\ntype Service struct {\n\tserviceSpec string\n\tsector string\n\tname string\n\thumanName string\n\n\tlistener net.Listener\n\tlistenerIP net.IP\n\tlistenerPort int\n\n\tactions map[string]*ServiceAction\n\tisRunning bool\n\n\tclientsM sync.Mutex\n\tclients []*Client\n\n\t\/\/ requests ClientChan\n\n\tcert tls.Certificate\n\tpemCert []byte \/\/ just a copy of what was read off disk at tls cert load time\n\n\t\/\/ stats\n\tstatsCloseChan chan bool\n\tconnectionsAccepted uint64\n}\n\nfunc NewService(sector string, serviceSpec string, humanName string) (serv *Service, err error){\n\tif len(humanName) > 18 {\n\t\terr = fmt.Errorf(\"name `%s` is too long, must be less than 18 bytes\", humanName)\n\t\treturn\n\t}\n\n\tserv = new(Service)\n\tserv.sector = sector\n\tserv.serviceSpec = serviceSpec\n\tserv.humanName = humanName\n\tserv.generateRandomName()\n\n\tserv.actions = make(map[string]*ServiceAction)\n Info.Printf(\"NewService.Actions: %+v\", serv.actions)\n\n\tcrtPath := defaultConfig.ServiceCertPath(serv.humanName)\n\tkeyPath := defaultConfig.ServiceKeyPath(serv.humanName)\n\n\tif crtPath == nil || keyPath == nil {\n\t\terr = fmt.Errorf( \"could not find valid crt\/key pair for service %s (`%s`,`%s`)\", serv.humanName, crtPath, keyPath )\n\t\treturn\n\t}\n\n\t\/\/ Load keypair for tls socket library to use\n\tserv.cert, err = tls.LoadX509KeyPair( string(crtPath), string(keyPath) )\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Load cert in to memory for announce packet writing\n\tserv.pemCert, err = ioutil.ReadFile(string(crtPath))\n\tif err != nil {\n\t\treturn\n\t}\n\tserv.pemCert = bytes.TrimSpace(serv.pemCert)\n\n\t\/\/ Finally, get ready for incoming requests\n\terr = serv.listen()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tserv.statsCloseChan = make(chan bool)\n\tgo PrintStatsLoop(serv, time.Duration(15)*time.Second, serv.statsCloseChan)\n\n\tTrace.Printf(\"done initializing service\")\n\n\treturn\n}\n\n\/\/ TODO: port discovery and interface\/IP discovery should happen here\n\/\/ important to set values so announce packets are correct\nfunc (serv *Service)listen() (err error) {\n\tconfig := &tls.Config{\n\t\tCertificates: []tls.Certificate{ serv.cert },\n\t}\n\n\tInfo.Printf(\"starting service on %s\", serv.serviceSpec)\n\tserv.listener,err = tls.Listen(\"tcp\", serv.serviceSpec, config)\n\tif err != nil {\n\t\treturn err\n\t}\n\taddr := serv.listener.Addr()\n\tInfo.Printf(\"service now listening to %s\", addr.String())\n\n \/\/ TODO: get listenerIP to return 127.0.0.1 or something other than '::'\/nil\n \/\/ serv.listenerIP = serv.listener.Addr().(*net.TCPAddr).IP\n serv.listenerIP, err = IPForAnnouncePacket()\n Trace.Printf(\"serv.listenerIP: `%s`\", serv.listenerIP)\n\n \/\/TODO: remove in production! This is used to force local host in dev\n serv.listenerIP = net.ParseIP(\"127.0.0.1\")\n \/\/ Info.Printf(\"serv.listenerIP(after): %s\", serv.listenerIP)\n if err != nil {\n \treturn\n }\n\n\tserv.listenerPort = serv.listener.Addr().(*net.TCPAddr).Port\n\n\treturn\n}\n\/\/ TODO Register must handle name registration better, currenty appends everything before the last dot \".\"\n\/\/ infornt of all actions\nfunc (serv *Service)Register(name string, callback ServiceActionFunc) (err error) {\n\tif serv.isRunning {\n\t\terr = errors.New(\"cannot register handlers while server is running\")\n\t\treturn\n\t}\n\n\tserv.actions[name] = &ServiceAction {\n\t\tcallback: callback,\n\t\tversion: 1,\n\t}\n Info.Printf(\"\\nactions: %+v\\n\", serv.actions)\n\treturn\n}\n\nfunc (serv *Service)Run() {\n\n\tforLoop:\n\tfor {\n\t\tnetConn,err := serv.listener.Accept()\n\t\tif err != nil {\n\t\t\tInfo.Printf(\"exiting service service Run(): `%s`\", err)\n\t\t\tbreak forLoop\n\t\t}\n\t\tTrace.Printf(\"accepted new connection...\")\n\n\t\tvar tlsConn (*tls.Conn) = (netConn).(*tls.Conn)\n\t\tif tlsConn == nil {\n\t\t\tError.Fatalf(\"could not create tlsConn\")\n\t\t\tbreak forLoop\n\t\t}\n\n\t\tconn := NewConnection(tlsConn,\"service\")\n\t\tclient := NewClient(conn)\n\n\t\tserv.clientsM.Lock()\n\t\tserv.clients = append(serv.clients, client)\n\t\tserv.clientsM.Unlock()\n\n\t\tgo serv.Handle(client)\n\n\t\tatomic.AddUint64(&serv.connectionsAccepted, 1)\n\t}\n\n\tInfo.Printf(\"closing all registered objects\")\n\n\tserv.clientsM.Lock()\n\tdefer serv.clientsM.Unlock()\n\tfor _,client := range serv.clients {\n\t\tclient.Close()\n\t}\n\n\tserv.statsCloseChan <- true\n}\n\nfunc (serv *Service)Handle(client *Client) {\n\tvar action *ServiceAction\n\n\tHandlerLoop:\n\tfor {\n\t\tselect {\n\t\tcase msg,ok := <-client.Incoming():\n\t\t\tif !ok {\n\t\t\t\tbreak HandlerLoop\n\t\t\t}\n\t\t\taction = serv.actions[msg.Action]\n\n\t\t\tif action != nil{\n\t\t\t\t\/\/ yay\n\t\t\t\taction.callback(msg, client)\n\t\t\t} else {\n\t\t\t\tError.Printf(\"do not know how to handle action `%s`\", msg.Action)\n\n\t\t\t\treply := NewMessage()\n\t\t reply.SetMessageType(MESSAGE_TYPE_REPLY)\n\t\t reply.SetEnvelope(ENVELOPE_JSON)\n\t\t reply.SetRequestId(msg.RequestId)\n\t\t reply.Write([]byte(`{\"error\": \"no such action\"`))\n\t\t\t\t_,err := client.Send(reply)\n\t\t\t\tif err != nil {\n\t\t\t\t\tclient.Close()\n\t\t\t\t\tbreak HandlerLoop\n\t\t\t\t}\n\n\t\t\t}\n\t\tcase <- time.After(msgTimeout):\n\t\t\tError.Printf(\"timeout... dying!\")\n\t\t\tbreak HandlerLoop\n\t\t}\n\t}\n\n\tclient.Close()\n\tserv.RemoveClient(client)\n\n\tTrace.Printf(\"done handling client\")\n\n}\n\nfunc (serv *Service)RemoveClient(client *Client) (err error){\n\tserv.clientsM.Lock()\n\tdefer serv.clientsM.Unlock()\n\n\tindex := -1\n\tfor i,entry := range serv.clients {\n\t\tif client == entry {\n\t\t\tindex = i\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif index == -1 {\n\t\tError.Printf(\"tried removing client that wasn't being tracked\")\n\t\treturn fmt.Errorf(\"unknown client\") \/\/ TODO can I get the client's IP?\n\t}\n\n\tclient.Close()\n\tserv.clients = append(serv.clients[:index], serv.clients[index+1:]...)\n\n\treturn nil\n}\n\nfunc (serv *Service)Stop(){\n\t\/\/ Sometimes we Stop() before service after service has been init but before it is started\n\t\/\/ The usual case is a bad config in another plugin\n\tif serv.listener != nil {\n\t\tserv.listener.Close()\n\t}\n}\n\nfunc (serv *Service)MarshalText() (b []byte, err error){\n\tvar buf bytes.Buffer\n\n\tserviceProxy := ServiceAsServiceProxy(serv)\n Info.Printf(\"\\nserviceProxy: %s\\n\", serviceProxy)\n\n\tclassRecord,err := json.Marshal(&serviceProxy) \/\/Marshal is mangling service actions\n\tif err != nil {\n\t\treturn\n\t}\n Info.Printf(\"\\nclassRecord JSON: %s\\n\", classRecord)\n classRecord2 := `'[3,\"sdk_service-KdhNHkxWGxURvzIxV+M1IuYF\",\"channelmodule\",1,2500,\"beepish+tls:\/\/127.0.0.1:48386\",[\"json\"],[[\"SDK.orderpull”,[“order_fetch\",\"\",1]],[\"SDK.inventory\",[\"invpush_transmit\",\"\",1]]],1470865574.128266]'`\n\tsig, err := SignSHA256(classRecord2, serv.cert.PrivateKey.(*rsa.PrivateKey))\n\tif err != nil {\n\t\treturn\n\t}\n\tsigParts := stringToRows(sig, 76)\n\n\tbuf.Write(classRecord)\n\tbuf.WriteString(\"\\n\\n\")\n\tbuf.Write(serv.pemCert)\n\tbuf.WriteString(\"\\n\\n\")\n\t\/\/ buf.WriteString(sig)\n\t\/\/ buf.WriteString(\"\\n\\n\")\n\tfor _,part := range sigParts {\n\t\tbuf.WriteString(part)\n\t\tbuf.WriteString(\"\\n\")\n\t}\n\tbuf.WriteString(\"\\n\")\n\n\tb = buf.Bytes()\n Info.Printf(\"\\n\\nbuffer: %s\\n\\n\", string(b))\n\treturn\n}\n\nfunc stringToRows(input string, rowlen int) (output []string) {\n\toutput = make([]string,0)\n\n\tif len(input) <= 76 {\n\t\toutput = append(output, input)\n\t} else {\n\t\tsubstr := input[:]\n\t\tvar row string\n\t\tvar done bool = false\n\t\tfor {\n\t\t\tif len(substr) > 76 {\n\t\t\t\trow = substr[0:76]\n\t\t\t\tsubstr = substr[76:]\n\t\t\t} else {\n\t\t\t\trow = substr[:]\n\t\t\t\tdone = true\n\t\t\t}\n\t\t\toutput = append(output,row)\n\t\t\tif done {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (serv *Service)generateRandomName() {\n\trandBytes := make([]byte, 18, 18)\n\tread,err := rand.Read(randBytes)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"could not generate all rand bytes needed. only read %d of 18\", read)\n\t\treturn\n\t}\n\tbase64RandBytes := base64.StdEncoding.EncodeToString(randBytes)\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(serv.humanName)\n\tbuffer.WriteString(\"-\")\n\tbuffer.WriteString(base64RandBytes[0:])\n\tserv.name = string(buffer.Bytes())\n}\ndebugging Marshal issuepackage scamp\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"crypto\/tls\"\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"time\"\n\n\t\"sync\"\n\t\"sync\/atomic\"\n)\n\n\/\/ Two minute timeout on clients\nvar msgTimeout = time.Second * 120\n\ntype ServiceActionFunc func(*Message, *Client)\ntype ServiceAction struct {\n\tcallback ServiceActionFunc\n\tcrudTags string\n\tversion int\n}\n\ntype Service struct {\n\tserviceSpec string\n\tsector string\n\tname string\n\thumanName string\n\n\tlistener net.Listener\n\tlistenerIP net.IP\n\tlistenerPort int\n\n\tactions map[string]*ServiceAction\n\tisRunning bool\n\n\tclientsM sync.Mutex\n\tclients []*Client\n\n\t\/\/ requests ClientChan\n\n\tcert tls.Certificate\n\tpemCert []byte \/\/ just a copy of what was read off disk at tls cert load time\n\n\t\/\/ stats\n\tstatsCloseChan chan bool\n\tconnectionsAccepted uint64\n}\n\nfunc NewService(sector string, serviceSpec string, humanName string) (serv *Service, err error){\n\tif len(humanName) > 18 {\n\t\terr = fmt.Errorf(\"name `%s` is too long, must be less than 18 bytes\", humanName)\n\t\treturn\n\t}\n\n\tserv = new(Service)\n\tserv.sector = sector\n\tserv.serviceSpec = serviceSpec\n\tserv.humanName = humanName\n\tserv.generateRandomName()\n\n\tserv.actions = make(map[string]*ServiceAction)\n Info.Printf(\"NewService.Actions: %+v\", serv.actions)\n\n\tcrtPath := defaultConfig.ServiceCertPath(serv.humanName)\n\tkeyPath := defaultConfig.ServiceKeyPath(serv.humanName)\n\n\tif crtPath == nil || keyPath == nil {\n\t\terr = fmt.Errorf( \"could not find valid crt\/key pair for service %s (`%s`,`%s`)\", serv.humanName, crtPath, keyPath )\n\t\treturn\n\t}\n\n\t\/\/ Load keypair for tls socket library to use\n\tserv.cert, err = tls.LoadX509KeyPair( string(crtPath), string(keyPath) )\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Load cert in to memory for announce packet writing\n\tserv.pemCert, err = ioutil.ReadFile(string(crtPath))\n\tif err != nil {\n\t\treturn\n\t}\n\tserv.pemCert = bytes.TrimSpace(serv.pemCert)\n\n\t\/\/ Finally, get ready for incoming requests\n\terr = serv.listen()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tserv.statsCloseChan = make(chan bool)\n\tgo PrintStatsLoop(serv, time.Duration(15)*time.Second, serv.statsCloseChan)\n\n\tTrace.Printf(\"done initializing service\")\n\n\treturn\n}\n\n\/\/ TODO: port discovery and interface\/IP discovery should happen here\n\/\/ important to set values so announce packets are correct\nfunc (serv *Service)listen() (err error) {\n\tconfig := &tls.Config{\n\t\tCertificates: []tls.Certificate{ serv.cert },\n\t}\n\n\tInfo.Printf(\"starting service on %s\", serv.serviceSpec)\n\tserv.listener,err = tls.Listen(\"tcp\", serv.serviceSpec, config)\n\tif err != nil {\n\t\treturn err\n\t}\n\taddr := serv.listener.Addr()\n\tInfo.Printf(\"service now listening to %s\", addr.String())\n\n \/\/ TODO: get listenerIP to return 127.0.0.1 or something other than '::'\/nil\n \/\/ serv.listenerIP = serv.listener.Addr().(*net.TCPAddr).IP\n serv.listenerIP, err = IPForAnnouncePacket()\n Trace.Printf(\"serv.listenerIP: `%s`\", serv.listenerIP)\n\n \/\/TODO: remove in production! This is used to force local host in dev\n serv.listenerIP = net.ParseIP(\"127.0.0.1\")\n \/\/ Info.Printf(\"serv.listenerIP(after): %s\", serv.listenerIP)\n if err != nil {\n \treturn\n }\n\n\tserv.listenerPort = serv.listener.Addr().(*net.TCPAddr).Port\n\n\treturn\n}\n\/\/ TODO Register must handle name registration better, currenty appends everything before the last dot \".\"\n\/\/ infornt of all actions\nfunc (serv *Service)Register(name string, callback ServiceActionFunc) (err error) {\n\tif serv.isRunning {\n\t\terr = errors.New(\"cannot register handlers while server is running\")\n\t\treturn\n\t}\n\n\tserv.actions[name] = &ServiceAction {\n\t\tcallback: callback,\n\t\tversion: 1,\n\t}\n Info.Printf(\"\\nactions: %+v\\n\", serv.actions)\n\treturn\n}\n\nfunc (serv *Service)Run() {\n\n\tforLoop:\n\tfor {\n\t\tnetConn,err := serv.listener.Accept()\n\t\tif err != nil {\n\t\t\tInfo.Printf(\"exiting service service Run(): `%s`\", err)\n\t\t\tbreak forLoop\n\t\t}\n\t\tTrace.Printf(\"accepted new connection...\")\n\n\t\tvar tlsConn (*tls.Conn) = (netConn).(*tls.Conn)\n\t\tif tlsConn == nil {\n\t\t\tError.Fatalf(\"could not create tlsConn\")\n\t\t\tbreak forLoop\n\t\t}\n\n\t\tconn := NewConnection(tlsConn,\"service\")\n\t\tclient := NewClient(conn)\n\n\t\tserv.clientsM.Lock()\n\t\tserv.clients = append(serv.clients, client)\n\t\tserv.clientsM.Unlock()\n\n\t\tgo serv.Handle(client)\n\n\t\tatomic.AddUint64(&serv.connectionsAccepted, 1)\n\t}\n\n\tInfo.Printf(\"closing all registered objects\")\n\n\tserv.clientsM.Lock()\n\tdefer serv.clientsM.Unlock()\n\tfor _,client := range serv.clients {\n\t\tclient.Close()\n\t}\n\n\tserv.statsCloseChan <- true\n}\n\nfunc (serv *Service)Handle(client *Client) {\n\tvar action *ServiceAction\n\n\tHandlerLoop:\n\tfor {\n\t\tselect {\n\t\tcase msg,ok := <-client.Incoming():\n\t\t\tif !ok {\n\t\t\t\tbreak HandlerLoop\n\t\t\t}\n\t\t\taction = serv.actions[msg.Action]\n\n\t\t\tif action != nil{\n\t\t\t\t\/\/ yay\n\t\t\t\taction.callback(msg, client)\n\t\t\t} else {\n\t\t\t\tError.Printf(\"do not know how to handle action `%s`\", msg.Action)\n\n\t\t\t\treply := NewMessage()\n\t\t reply.SetMessageType(MESSAGE_TYPE_REPLY)\n\t\t reply.SetEnvelope(ENVELOPE_JSON)\n\t\t reply.SetRequestId(msg.RequestId)\n\t\t reply.Write([]byte(`{\"error\": \"no such action\"`))\n\t\t\t\t_,err := client.Send(reply)\n\t\t\t\tif err != nil {\n\t\t\t\t\tclient.Close()\n\t\t\t\t\tbreak HandlerLoop\n\t\t\t\t}\n\n\t\t\t}\n\t\tcase <- time.After(msgTimeout):\n\t\t\tError.Printf(\"timeout... dying!\")\n\t\t\tbreak HandlerLoop\n\t\t}\n\t}\n\n\tclient.Close()\n\tserv.RemoveClient(client)\n\n\tTrace.Printf(\"done handling client\")\n\n}\n\nfunc (serv *Service)RemoveClient(client *Client) (err error){\n\tserv.clientsM.Lock()\n\tdefer serv.clientsM.Unlock()\n\n\tindex := -1\n\tfor i,entry := range serv.clients {\n\t\tif client == entry {\n\t\t\tindex = i\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif index == -1 {\n\t\tError.Printf(\"tried removing client that wasn't being tracked\")\n\t\treturn fmt.Errorf(\"unknown client\") \/\/ TODO can I get the client's IP?\n\t}\n\n\tclient.Close()\n\tserv.clients = append(serv.clients[:index], serv.clients[index+1:]...)\n\n\treturn nil\n}\n\nfunc (serv *Service)Stop(){\n\t\/\/ Sometimes we Stop() before service after service has been init but before it is started\n\t\/\/ The usual case is a bad config in another plugin\n\tif serv.listener != nil {\n\t\tserv.listener.Close()\n\t}\n}\n\nfunc (serv *Service)MarshalText() (b []byte, err error){\n\tvar buf bytes.Buffer\n\n\tserviceProxy := ServiceAsServiceProxy(serv)\n Info.Printf(\"\\nserviceProxy: %s\\n\", serviceProxy)\n\n\tclassRecord,err := json.Marshal(&serviceProxy) \/\/Marshal is mangling service actions\n\tif err != nil {\n\t\treturn\n\t}\n Info.Printf(\"\\nclassRecord JSON: %s\\n\", classRecord)\n classRecord2 := []byte([3,\"sdk_service-KdhNHkxWGxURvzIxV+M1IuYF\",\"channelmodule\",1,2500,\"beepish+tls:\/\/127.0.0.1:48386\",[\"json\"],[[\"SDK.orderpull”,[“order_fetch\",\"\",1]],[\"SDK.inventory\",[\"invpush_transmit\",\"\",1]]],1470865574.128266])\n\tsig, err := SignSHA256(classRecord2, serv.cert.PrivateKey.(*rsa.PrivateKey))\n\tif err != nil {\n\t\treturn\n\t}\n\tsigParts := stringToRows(sig, 76)\n\n\tbuf.Write(classRecord)\n\tbuf.WriteString(\"\\n\\n\")\n\tbuf.Write(serv.pemCert)\n\tbuf.WriteString(\"\\n\\n\")\n\t\/\/ buf.WriteString(sig)\n\t\/\/ buf.WriteString(\"\\n\\n\")\n\tfor _,part := range sigParts {\n\t\tbuf.WriteString(part)\n\t\tbuf.WriteString(\"\\n\")\n\t}\n\tbuf.WriteString(\"\\n\")\n\n\tb = buf.Bytes()\n Info.Printf(\"\\n\\nbuffer: %s\\n\\n\", string(b))\n\treturn\n}\n\nfunc stringToRows(input string, rowlen int) (output []string) {\n\toutput = make([]string,0)\n\n\tif len(input) <= 76 {\n\t\toutput = append(output, input)\n\t} else {\n\t\tsubstr := input[:]\n\t\tvar row string\n\t\tvar done bool = false\n\t\tfor {\n\t\t\tif len(substr) > 76 {\n\t\t\t\trow = substr[0:76]\n\t\t\t\tsubstr = substr[76:]\n\t\t\t} else {\n\t\t\t\trow = substr[:]\n\t\t\t\tdone = true\n\t\t\t}\n\t\t\toutput = append(output,row)\n\t\t\tif done {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (serv *Service)generateRandomName() {\n\trandBytes := make([]byte, 18, 18)\n\tread,err := rand.Read(randBytes)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"could not generate all rand bytes needed. only read %d of 18\", read)\n\t\treturn\n\t}\n\tbase64RandBytes := base64.StdEncoding.EncodeToString(randBytes)\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(serv.humanName)\n\tbuffer.WriteString(\"-\")\n\tbuffer.WriteString(base64RandBytes[0:])\n\tserv.name = string(buffer.Bytes())\n}\n<|endoftext|>"} {"text":"\/*\nSmall tool to try to debug unix.Getdents problems on CIFS mounts\n( https:\/\/github.com\/rfjakob\/gocryptfs\/issues\/483 )\n\nExample output:\n\n$ while sleep 1 ; do .\/getdents \/mnt\/synology\/public\/tmp\/g1 ; done\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=3192, err=\nunix.Getdents fd3: n=0, err=\ntotal 24072 bytes\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=-1, err=no such file or directory\ntotal 16704 bytes\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=3192, err=\nunix.Getdents fd3: n=0, err=\ntotal 24072 bytes\n\n\nFailure looks like this in strace:\n\n[pid 189974] getdents64(6, 0xc000105808, 10000) = -1 ENOENT (No such file or directory)\n*\/\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"golang.org\/x\/sys\/unix\"\n)\n\nconst (\n\tmyName = \"getdents\"\n)\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s PATH\\n\", myName)\n\t\tfmt.Fprintf(os.Stderr, \"Run getdents(2) on PATH in a 100ms loop until we hit an error\\n\")\n\t\tos.Exit(1)\n\t}\n\tflag.Parse()\n\tif flag.NArg() != 1 {\n\t\tflag.Usage()\n\t}\n\tpath := flag.Arg(0)\n\n\ttmp := make([]byte, 10000)\n\tfor i := 1; ; i++ {\n\t\tsum := 0\n\t\tfd, err := unix.Open(path, unix.O_RDONLY, 0)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"%3d: unix.Open returned err=%v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfmt.Printf(\"%3d: unix.Getdents: \", i)\n\t\tfor {\n\t\t\tn, err := unix.Getdents(fd, tmp)\n\t\t\tfmt.Printf(\"n=%d; \", n)\n\t\t\tif n <= 0 {\n\t\t\t\tfmt.Printf(\"err=%v; total %d bytes\\n\", err, sum)\n\t\t\t\tif err != nil {\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tsum += n\n\t\t}\n\t\tunix.Close(fd)\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n}\ncontrib\/getdents-debug: fix function call missing argument from 22e3eec15302eac28c1a2ac3f9af29c2c9e82a3c\/*\nSmall tool to try to debug unix.Getdents problems on CIFS mounts\n( https:\/\/github.com\/rfjakob\/gocryptfs\/issues\/483 )\n\nExample output:\n\n$ while sleep 1 ; do .\/getdents \/mnt\/synology\/public\/tmp\/g1 ; done\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=3192, err=\nunix.Getdents fd3: n=0, err=\ntotal 24072 bytes\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=-1, err=no such file or directory\ntotal 16704 bytes\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=4176, err=\nunix.Getdents fd3: n=3192, err=\nunix.Getdents fd3: n=0, err=\ntotal 24072 bytes\n\n\nFailure looks like this in strace:\n\n[pid 189974] getdents64(6, 0xc000105808, 10000) = -1 ENOENT (No such file or directory)\n*\/\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"golang.org\/x\/sys\/unix\"\n)\n\nconst (\n\tmyName = \"getdents\"\n)\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s PATH\\n\", myName)\n\t\tfmt.Fprintf(os.Stderr, \"Run getdents(2) on PATH in a 100ms loop until we hit an error\\n\")\n\t\tos.Exit(1)\n\t}\n\tflag.Parse()\n\tif flag.NArg() != 1 {\n\t\tflag.Usage()\n\t}\n\tpath := flag.Arg(0)\n\n\ttmp := make([]byte, 10000)\n\tfor i := 1; ; i++ {\n\t\tsum := 0\n\t\tfd, err := unix.Open(path, unix.O_RDONLY, 0)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"%3d: unix.Open returned err=%v\\n\", i, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfmt.Printf(\"%3d: unix.Getdents: \", i)\n\t\tfor {\n\t\t\tn, err := unix.Getdents(fd, tmp)\n\t\t\tfmt.Printf(\"n=%d; \", n)\n\t\t\tif n <= 0 {\n\t\t\t\tfmt.Printf(\"err=%v; total %d bytes\\n\", err, sum)\n\t\t\t\tif err != nil {\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tsum += n\n\t\t}\n\t\tunix.Close(fd)\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage v1\n\nimport (\n\t\"encoding\/json\"\n\t\"time\"\n)\n\n\/\/ Duration is a wrapper around time.Duration which supports correct\n\/\/ marshaling to YAML and JSON. In particular, it marshals into strings, which\n\/\/ can be used as map keys in json.\ntype Duration struct {\n\ttime.Duration `protobuf:\"varint,1,opt,name=duration,casttype=time.Duration\"`\n}\n\n\/\/ UnmarshalJSON implements the json.Unmarshaller interface.\nfunc (d *Duration) UnmarshalJSON(b []byte) error {\n\tvar str string\n\terr := json.Unmarshal(b, &str)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpd, err := time.ParseDuration(str)\n\tif err != nil {\n\t\treturn err\n\t}\n\td.Duration = pd\n\treturn nil\n}\n\n\/\/ MarshalJSON implements the json.Marshaler interface.\nfunc (d Duration) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(d.Duration.String())\n}\nAdd OpenAPI scheme methods for metav1.Duration\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage v1\n\nimport (\n\t\"encoding\/json\"\n\t\"time\"\n)\n\n\/\/ Duration is a wrapper around time.Duration which supports correct\n\/\/ marshaling to YAML and JSON. In particular, it marshals into strings, which\n\/\/ can be used as map keys in json.\ntype Duration struct {\n\ttime.Duration `protobuf:\"varint,1,opt,name=duration,casttype=time.Duration\"`\n}\n\n\/\/ UnmarshalJSON implements the json.Unmarshaller interface.\nfunc (d *Duration) UnmarshalJSON(b []byte) error {\n\tvar str string\n\terr := json.Unmarshal(b, &str)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpd, err := time.ParseDuration(str)\n\tif err != nil {\n\t\treturn err\n\t}\n\td.Duration = pd\n\treturn nil\n}\n\n\/\/ MarshalJSON implements the json.Marshaler interface.\nfunc (d Duration) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(d.Duration.String())\n}\n\n\/\/ OpenAPISchemaType is used by the kube-openapi generator when constructing\n\/\/ the OpenAPI spec of this type.\n\/\/\n\/\/ See: https:\/\/github.com\/kubernetes\/kube-openapi\/tree\/master\/pkg\/generators\nfunc (_ Duration) OpenAPISchemaType() []string { return []string{\"string\"} }\n\n\/\/ OpenAPISchemaFormat is used by the kube-openapi generator when constructing\n\/\/ the OpenAPI spec of this type.\nfunc (_ Duration) OpenAPISchemaFormat() string { return \"\" }\n<|endoftext|>"} {"text":"\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage handlers\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/serializer\/streaming\"\n\tutilruntime \"k8s.io\/apimachinery\/pkg\/util\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/watch\"\n\t\"k8s.io\/apiserver\/pkg\/endpoints\/handlers\/negotiation\"\n\t\"k8s.io\/apiserver\/pkg\/endpoints\/metrics\"\n\t\"k8s.io\/apiserver\/pkg\/util\/wsstream\"\n\n\t\"golang.org\/x\/net\/websocket\"\n)\n\n\/\/ nothing will ever be sent down this channel\nvar neverExitWatch <-chan time.Time = make(chan time.Time)\n\n\/\/ timeoutFactory abstracts watch timeout logic for testing\ntype TimeoutFactory interface {\n\tTimeoutCh() (<-chan time.Time, func() bool)\n}\n\n\/\/ realTimeoutFactory implements timeoutFactory\ntype realTimeoutFactory struct {\n\ttimeout time.Duration\n}\n\n\/\/ TimeoutCh returns a channel which will receive something when the watch times out,\n\/\/ and a cleanup function to call when this happens.\nfunc (w *realTimeoutFactory) TimeoutCh() (<-chan time.Time, func() bool) {\n\tif w.timeout == 0 {\n\t\treturn neverExitWatch, func() bool { return false }\n\t}\n\tt := time.NewTimer(w.timeout)\n\treturn t.C, t.Stop\n}\n\n\/\/ serveWatch will serve a watch response.\n\/\/ TODO: the functionality in this method and in WatchServer.Serve is not cleanly decoupled.\nfunc serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions negotiation.MediaTypeOptions, req *http.Request, w http.ResponseWriter, timeout time.Duration) {\n\tdefer watcher.Stop()\n\n\toptions, err := optionsForTransform(mediaTypeOptions, req)\n\tif err != nil {\n\t\tscope.err(err, w, req)\n\t\treturn\n\t}\n\n\t\/\/ negotiate for the stream serializer from the scope's serializer\n\tserializer, err := negotiation.NegotiateOutputMediaTypeStream(req, scope.Serializer, scope)\n\tif err != nil {\n\t\tscope.err(err, w, req)\n\t\treturn\n\t}\n\tframer := serializer.StreamSerializer.Framer\n\tstreamSerializer := serializer.StreamSerializer.Serializer\n\tencoder := scope.Serializer.EncoderForVersion(streamSerializer, scope.Kind.GroupVersion())\n\tuseTextFraming := serializer.EncodesAsText\n\tif framer == nil {\n\t\tscope.err(fmt.Errorf(\"no framer defined for %q available for embedded encoding\", serializer.MediaType), w, req)\n\t\treturn\n\t}\n\t\/\/ TODO: next step, get back mediaTypeOptions from negotiate and return the exact value here\n\tmediaType := serializer.MediaType\n\tif mediaType != runtime.ContentTypeJSON {\n\t\tmediaType += \";stream=watch\"\n\t}\n\n\t\/\/ locate the appropriate embedded encoder based on the transform\n\tvar embeddedEncoder runtime.Encoder\n\tcontentKind, contentSerializer, transform := targetEncodingForTransform(scope, mediaTypeOptions, req)\n\tif transform {\n\t\tinfo, ok := runtime.SerializerInfoForMediaType(contentSerializer.SupportedMediaTypes(), serializer.MediaType)\n\t\tif !ok {\n\t\t\tscope.err(fmt.Errorf(\"no encoder for %q exists in the requested target %#v\", serializer.MediaType, contentSerializer), w, req)\n\t\t\treturn\n\t\t}\n\t\tembeddedEncoder = contentSerializer.EncoderForVersion(info.Serializer, contentKind.GroupVersion())\n\t} else {\n\t\tembeddedEncoder = scope.Serializer.EncoderForVersion(serializer.Serializer, contentKind.GroupVersion())\n\t}\n\n\tctx := req.Context()\n\n\tserver := &WatchServer{\n\t\tWatching: watcher,\n\t\tScope: scope,\n\n\t\tUseTextFraming: useTextFraming,\n\t\tMediaType: mediaType,\n\t\tFramer: framer,\n\t\tEncoder: encoder,\n\t\tEmbeddedEncoder: embeddedEncoder,\n\n\t\tFixup: func(obj runtime.Object) runtime.Object {\n\t\t\tresult, err := transformObject(ctx, obj, options, mediaTypeOptions, scope, req)\n\t\t\tif err != nil {\n\t\t\t\tutilruntime.HandleError(fmt.Errorf(\"failed to transform object %v: %v\", reflect.TypeOf(obj), err))\n\t\t\t\treturn obj\n\t\t\t}\n\t\t\t\/\/ When we are transformed to a table, use the table options as the state for whether we\n\t\t\t\/\/ should print headers - on watch, we only want to print table headers on the first object\n\t\t\t\/\/ and omit them on subsequent events.\n\t\t\tif tableOptions, ok := options.(*metav1.TableOptions); ok {\n\t\t\t\ttableOptions.NoHeaders = true\n\t\t\t}\n\t\t\treturn result\n\t\t},\n\n\t\tTimeoutFactory: &realTimeoutFactory{timeout},\n\t}\n\n\tserver.ServeHTTP(w, req)\n}\n\n\/\/ WatchServer serves a watch.Interface over a websocket or vanilla HTTP.\ntype WatchServer struct {\n\tWatching watch.Interface\n\tScope *RequestScope\n\n\t\/\/ true if websocket messages should use text framing (as opposed to binary framing)\n\tUseTextFraming bool\n\t\/\/ the media type this watch is being served with\n\tMediaType string\n\t\/\/ used to frame the watch stream\n\tFramer runtime.Framer\n\t\/\/ used to encode the watch stream event itself\n\tEncoder runtime.Encoder\n\t\/\/ used to encode the nested object in the watch stream\n\tEmbeddedEncoder runtime.Encoder\n\t\/\/ used to correct the object before we send it to the serializer\n\tFixup func(runtime.Object) runtime.Object\n\n\tTimeoutFactory TimeoutFactory\n}\n\n\/\/ ServeHTTP serves a series of encoded events via HTTP with Transfer-Encoding: chunked\n\/\/ or over a websocket connection.\nfunc (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tkind := s.Scope.Kind\n\tmetrics.RegisteredWatchers.WithContext(req.Context()).WithLabelValues(kind.Group, kind.Version, kind.Kind).Inc()\n\tdefer metrics.RegisteredWatchers.WithContext(req.Context()).WithLabelValues(kind.Group, kind.Version, kind.Kind).Dec()\n\n\tif wsstream.IsWebSocketRequest(req) {\n\t\tw.Header().Set(\"Content-Type\", s.MediaType)\n\t\twebsocket.Handler(s.HandleWS).ServeHTTP(w, req)\n\t\treturn\n\t}\n\n\tflusher, ok := w.(http.Flusher)\n\tif !ok {\n\t\terr := fmt.Errorf(\"unable to start watch - can't get http.Flusher: %#v\", w)\n\t\tutilruntime.HandleError(err)\n\t\ts.Scope.err(errors.NewInternalError(err), w, req)\n\t\treturn\n\t}\n\n\tframer := s.Framer.NewFrameWriter(w)\n\tif framer == nil {\n\t\t\/\/ programmer error\n\t\terr := fmt.Errorf(\"no stream framing support is available for media type %q\", s.MediaType)\n\t\tutilruntime.HandleError(err)\n\t\ts.Scope.err(errors.NewBadRequest(err.Error()), w, req)\n\t\treturn\n\t}\n\te := streaming.NewEncoder(framer, s.Encoder)\n\n\t\/\/ ensure the connection times out\n\ttimeoutCh, cleanup := s.TimeoutFactory.TimeoutCh()\n\tdefer cleanup()\n\n\t\/\/ begin the stream\n\tw.Header().Set(\"Content-Type\", s.MediaType)\n\tw.Header().Set(\"Transfer-Encoding\", \"chunked\")\n\tw.WriteHeader(http.StatusOK)\n\tflusher.Flush()\n\n\tvar unknown runtime.Unknown\n\tinternalEvent := &metav1.InternalEvent{}\n\toutEvent := &metav1.WatchEvent{}\n\tbuf := &bytes.Buffer{}\n\tch := s.Watching.ResultChan()\n\tdone := req.Context().Done()\n\n\tfor {\n\t\tselect {\n\t\tcase <-done:\n\t\t\treturn\n\t\tcase <-timeoutCh:\n\t\t\treturn\n\t\tcase event, ok := <-ch:\n\t\t\tif !ok {\n\t\t\t\t\/\/ End of results.\n\t\t\t\treturn\n\t\t\t}\n\t\t\tmetrics.WatchEvents.WithContext(req.Context()).WithLabelValues(kind.Group, kind.Version, kind.Kind).Inc()\n\n\t\t\tobj := s.Fixup(event.Object)\n\t\t\tif err := s.EmbeddedEncoder.Encode(obj, buf); err != nil {\n\t\t\t\t\/\/ unexpected error\n\t\t\t\tutilruntime.HandleError(fmt.Errorf(\"unable to encode watch object %T: %v\", obj, err))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ ContentType is not required here because we are defaulting to the serializer\n\t\t\t\/\/ type\n\t\t\tunknown.Raw = buf.Bytes()\n\t\t\tevent.Object = &unknown\n\t\t\tmetrics.WatchEventsSizes.WithContext(req.Context()).WithLabelValues(kind.Group, kind.Version, kind.Kind).Observe(float64(len(unknown.Raw)))\n\n\t\t\t*outEvent = metav1.WatchEvent{}\n\n\t\t\t\/\/ create the external type directly and encode it. Clients will only recognize the serialization we provide.\n\t\t\t\/\/ The internal event is being reused, not reallocated so its just a few extra assignments to do it this way\n\t\t\t\/\/ and we get the benefit of using conversion functions which already have to stay in sync\n\t\t\t*internalEvent = metav1.InternalEvent(event)\n\t\t\terr := metav1.Convert_v1_InternalEvent_To_v1_WatchEvent(internalEvent, outEvent, nil)\n\t\t\tif err != nil {\n\t\t\t\tutilruntime.HandleError(fmt.Errorf(\"unable to convert watch object: %v\", err))\n\t\t\t\t\/\/ client disconnect.\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := e.Encode(outEvent); err != nil {\n\t\t\t\tutilruntime.HandleError(fmt.Errorf(\"unable to encode watch object %T: %v (%#v)\", outEvent, err, e))\n\t\t\t\t\/\/ client disconnect.\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif len(ch) == 0 {\n\t\t\t\tflusher.Flush()\n\t\t\t}\n\n\t\t\tbuf.Reset()\n\t\t}\n\t}\n}\n\n\/\/ HandleWS implements a websocket handler.\nfunc (s *WatchServer) HandleWS(ws *websocket.Conn) {\n\tdefer ws.Close()\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\tdefer utilruntime.HandleCrash()\n\t\t\/\/ This blocks until the connection is closed.\n\t\t\/\/ Client should not send anything.\n\t\twsstream.IgnoreReceives(ws, 0)\n\t\t\/\/ Once the client closes, we should also close\n\t\tclose(done)\n\t}()\n\n\tvar unknown runtime.Unknown\n\tinternalEvent := &metav1.InternalEvent{}\n\tbuf := &bytes.Buffer{}\n\tstreamBuf := &bytes.Buffer{}\n\tch := s.Watching.ResultChan()\n\n\tfor {\n\t\tselect {\n\t\tcase <-done:\n\t\t\treturn\n\t\tcase event, ok := <-ch:\n\t\t\tif !ok {\n\t\t\t\t\/\/ End of results.\n\t\t\t\treturn\n\t\t\t}\n\t\t\tobj := s.Fixup(event.Object)\n\t\t\tif err := s.EmbeddedEncoder.Encode(obj, buf); err != nil {\n\t\t\t\t\/\/ unexpected error\n\t\t\t\tutilruntime.HandleError(fmt.Errorf(\"unable to encode watch object %T: %v\", obj, err))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ ContentType is not required here because we are defaulting to the serializer\n\t\t\t\/\/ type\n\t\t\tunknown.Raw = buf.Bytes()\n\t\t\tevent.Object = &unknown\n\n\t\t\t\/\/ the internal event will be versioned by the encoder\n\t\t\t\/\/ create the external type directly and encode it. Clients will only recognize the serialization we provide.\n\t\t\t\/\/ The internal event is being reused, not reallocated so its just a few extra assignments to do it this way\n\t\t\t\/\/ and we get the benefit of using conversion functions which already have to stay in sync\n\t\t\toutEvent := &metav1.WatchEvent{}\n\t\t\t*internalEvent = metav1.InternalEvent(event)\n\t\t\terr := metav1.Convert_v1_InternalEvent_To_v1_WatchEvent(internalEvent, outEvent, nil)\n\t\t\tif err != nil {\n\t\t\t\tutilruntime.HandleError(fmt.Errorf(\"unable to convert watch object: %v\", err))\n\t\t\t\t\/\/ client disconnect.\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := s.Encoder.Encode(outEvent, streamBuf); err != nil {\n\t\t\t\t\/\/ encoding error\n\t\t\t\tutilruntime.HandleError(fmt.Errorf(\"unable to encode event: %v\", err))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif s.UseTextFraming {\n\t\t\t\tif err := websocket.Message.Send(ws, streamBuf.String()); err != nil {\n\t\t\t\t\t\/\/ Client disconnect.\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err := websocket.Message.Send(ws, streamBuf.Bytes()); err != nil {\n\t\t\t\t\t\/\/ Client disconnect.\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tbuf.Reset()\n\t\t\tstreamBuf.Reset()\n\t\t}\n\t}\n}\nupdate the watch server to use EncoderWithAllocator during object serialization.\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage handlers\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/websocket\"\n\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/serializer\/streaming\"\n\tutilruntime \"k8s.io\/apimachinery\/pkg\/util\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/watch\"\n\t\"k8s.io\/apiserver\/pkg\/endpoints\/handlers\/negotiation\"\n\t\"k8s.io\/apiserver\/pkg\/endpoints\/metrics\"\n\t\"k8s.io\/apiserver\/pkg\/util\/wsstream\"\n)\n\n\/\/ nothing will ever be sent down this channel\nvar neverExitWatch <-chan time.Time = make(chan time.Time)\n\n\/\/ timeoutFactory abstracts watch timeout logic for testing\ntype TimeoutFactory interface {\n\tTimeoutCh() (<-chan time.Time, func() bool)\n}\n\n\/\/ realTimeoutFactory implements timeoutFactory\ntype realTimeoutFactory struct {\n\ttimeout time.Duration\n}\n\n\/\/ TimeoutCh returns a channel which will receive something when the watch times out,\n\/\/ and a cleanup function to call when this happens.\nfunc (w *realTimeoutFactory) TimeoutCh() (<-chan time.Time, func() bool) {\n\tif w.timeout == 0 {\n\t\treturn neverExitWatch, func() bool { return false }\n\t}\n\tt := time.NewTimer(w.timeout)\n\treturn t.C, t.Stop\n}\n\n\/\/ serveWatch will serve a watch response.\n\/\/ TODO: the functionality in this method and in WatchServer.Serve is not cleanly decoupled.\nfunc serveWatch(watcher watch.Interface, scope *RequestScope, mediaTypeOptions negotiation.MediaTypeOptions, req *http.Request, w http.ResponseWriter, timeout time.Duration) {\n\tdefer watcher.Stop()\n\n\toptions, err := optionsForTransform(mediaTypeOptions, req)\n\tif err != nil {\n\t\tscope.err(err, w, req)\n\t\treturn\n\t}\n\n\t\/\/ negotiate for the stream serializer from the scope's serializer\n\tserializer, err := negotiation.NegotiateOutputMediaTypeStream(req, scope.Serializer, scope)\n\tif err != nil {\n\t\tscope.err(err, w, req)\n\t\treturn\n\t}\n\tframer := serializer.StreamSerializer.Framer\n\tstreamSerializer := serializer.StreamSerializer.Serializer\n\tencoder := scope.Serializer.EncoderForVersion(streamSerializer, scope.Kind.GroupVersion())\n\tuseTextFraming := serializer.EncodesAsText\n\tif framer == nil {\n\t\tscope.err(fmt.Errorf(\"no framer defined for %q available for embedded encoding\", serializer.MediaType), w, req)\n\t\treturn\n\t}\n\t\/\/ TODO: next step, get back mediaTypeOptions from negotiate and return the exact value here\n\tmediaType := serializer.MediaType\n\tif mediaType != runtime.ContentTypeJSON {\n\t\tmediaType += \";stream=watch\"\n\t}\n\n\t\/\/ locate the appropriate embedded encoder based on the transform\n\tvar embeddedEncoder runtime.Encoder\n\tcontentKind, contentSerializer, transform := targetEncodingForTransform(scope, mediaTypeOptions, req)\n\tif transform {\n\t\tinfo, ok := runtime.SerializerInfoForMediaType(contentSerializer.SupportedMediaTypes(), serializer.MediaType)\n\t\tif !ok {\n\t\t\tscope.err(fmt.Errorf(\"no encoder for %q exists in the requested target %#v\", serializer.MediaType, contentSerializer), w, req)\n\t\t\treturn\n\t\t}\n\t\tembeddedEncoder = contentSerializer.EncoderForVersion(info.Serializer, contentKind.GroupVersion())\n\t} else {\n\t\tembeddedEncoder = scope.Serializer.EncoderForVersion(serializer.Serializer, contentKind.GroupVersion())\n\t}\n\n\tctx := req.Context()\n\n\tserver := &WatchServer{\n\t\tWatching: watcher,\n\t\tScope: scope,\n\n\t\tUseTextFraming: useTextFraming,\n\t\tMediaType: mediaType,\n\t\tFramer: framer,\n\t\tEncoder: encoder,\n\t\tEmbeddedEncoder: embeddedEncoder,\n\n\t\tFixup: func(obj runtime.Object) runtime.Object {\n\t\t\tresult, err := transformObject(ctx, obj, options, mediaTypeOptions, scope, req)\n\t\t\tif err != nil {\n\t\t\t\tutilruntime.HandleError(fmt.Errorf(\"failed to transform object %v: %v\", reflect.TypeOf(obj), err))\n\t\t\t\treturn obj\n\t\t\t}\n\t\t\t\/\/ When we are transformed to a table, use the table options as the state for whether we\n\t\t\t\/\/ should print headers - on watch, we only want to print table headers on the first object\n\t\t\t\/\/ and omit them on subsequent events.\n\t\t\tif tableOptions, ok := options.(*metav1.TableOptions); ok {\n\t\t\t\ttableOptions.NoHeaders = true\n\t\t\t}\n\t\t\treturn result\n\t\t},\n\n\t\tTimeoutFactory: &realTimeoutFactory{timeout},\n\t}\n\n\tserver.ServeHTTP(w, req)\n}\n\n\/\/ WatchServer serves a watch.Interface over a websocket or vanilla HTTP.\ntype WatchServer struct {\n\tWatching watch.Interface\n\tScope *RequestScope\n\n\t\/\/ true if websocket messages should use text framing (as opposed to binary framing)\n\tUseTextFraming bool\n\t\/\/ the media type this watch is being served with\n\tMediaType string\n\t\/\/ used to frame the watch stream\n\tFramer runtime.Framer\n\t\/\/ used to encode the watch stream event itself\n\tEncoder runtime.Encoder\n\t\/\/ used to encode the nested object in the watch stream\n\tEmbeddedEncoder runtime.Encoder\n\t\/\/ used to correct the object before we send it to the serializer\n\tFixup func(runtime.Object) runtime.Object\n\n\tTimeoutFactory TimeoutFactory\n}\n\n\/\/ ServeHTTP serves a series of encoded events via HTTP with Transfer-Encoding: chunked\n\/\/ or over a websocket connection.\nfunc (s *WatchServer) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tkind := s.Scope.Kind\n\tmetrics.RegisteredWatchers.WithContext(req.Context()).WithLabelValues(kind.Group, kind.Version, kind.Kind).Inc()\n\tdefer metrics.RegisteredWatchers.WithContext(req.Context()).WithLabelValues(kind.Group, kind.Version, kind.Kind).Dec()\n\n\tif wsstream.IsWebSocketRequest(req) {\n\t\tw.Header().Set(\"Content-Type\", s.MediaType)\n\t\twebsocket.Handler(s.HandleWS).ServeHTTP(w, req)\n\t\treturn\n\t}\n\n\tflusher, ok := w.(http.Flusher)\n\tif !ok {\n\t\terr := fmt.Errorf(\"unable to start watch - can't get http.Flusher: %#v\", w)\n\t\tutilruntime.HandleError(err)\n\t\ts.Scope.err(errors.NewInternalError(err), w, req)\n\t\treturn\n\t}\n\n\tframer := s.Framer.NewFrameWriter(w)\n\tif framer == nil {\n\t\t\/\/ programmer error\n\t\terr := fmt.Errorf(\"no stream framing support is available for media type %q\", s.MediaType)\n\t\tutilruntime.HandleError(err)\n\t\ts.Scope.err(errors.NewBadRequest(err.Error()), w, req)\n\t\treturn\n\t}\n\n\tvar e streaming.Encoder\n\tvar memoryAllocator runtime.MemoryAllocator\n\n\tif encoder, supportsAllocator := s.Encoder.(runtime.EncoderWithAllocator); supportsAllocator {\n\t\tmemoryAllocator = runtime.AllocatorPool.Get().(*runtime.Allocator)\n\t\tdefer runtime.AllocatorPool.Put(memoryAllocator)\n\t\te = streaming.NewEncoderWithAllocator(framer, encoder, memoryAllocator)\n\t} else {\n\t\te = streaming.NewEncoder(framer, s.Encoder)\n\t}\n\n\t\/\/ ensure the connection times out\n\ttimeoutCh, cleanup := s.TimeoutFactory.TimeoutCh()\n\tdefer cleanup()\n\n\t\/\/ begin the stream\n\tw.Header().Set(\"Content-Type\", s.MediaType)\n\tw.Header().Set(\"Transfer-Encoding\", \"chunked\")\n\tw.WriteHeader(http.StatusOK)\n\tflusher.Flush()\n\n\tvar unknown runtime.Unknown\n\tinternalEvent := &metav1.InternalEvent{}\n\toutEvent := &metav1.WatchEvent{}\n\tbuf := &bytes.Buffer{}\n\tch := s.Watching.ResultChan()\n\tdone := req.Context().Done()\n\n\tembeddedEncodeFn := s.EmbeddedEncoder.Encode\n\tif encoder, supportsAllocator := s.EmbeddedEncoder.(runtime.EncoderWithAllocator); supportsAllocator {\n\t\tif memoryAllocator == nil {\n\t\t\t\/\/ don't put the allocator inside the embeddedEncodeFn as that would allocate memory on every call.\n\t\t\t\/\/ instead, we allocate the buffer for the entire watch session and release it when we close the connection.\n\t\t\tmemoryAllocator = runtime.AllocatorPool.Get().(*runtime.Allocator)\n\t\t\tdefer runtime.AllocatorPool.Put(memoryAllocator)\n\t\t}\n\t\tembeddedEncodeFn = func(obj runtime.Object, w io.Writer) error {\n\t\t\treturn encoder.EncodeWithAllocator(obj, w, memoryAllocator)\n\t\t}\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-done:\n\t\t\treturn\n\t\tcase <-timeoutCh:\n\t\t\treturn\n\t\tcase event, ok := <-ch:\n\t\t\tif !ok {\n\t\t\t\t\/\/ End of results.\n\t\t\t\treturn\n\t\t\t}\n\t\t\tmetrics.WatchEvents.WithContext(req.Context()).WithLabelValues(kind.Group, kind.Version, kind.Kind).Inc()\n\n\t\t\tobj := s.Fixup(event.Object)\n\t\t\tif err := embeddedEncodeFn(obj, buf); err != nil {\n\t\t\t\t\/\/ unexpected error\n\t\t\t\tutilruntime.HandleError(fmt.Errorf(\"unable to encode watch object %T: %v\", obj, err))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ ContentType is not required here because we are defaulting to the serializer\n\t\t\t\/\/ type\n\t\t\tunknown.Raw = buf.Bytes()\n\t\t\tevent.Object = &unknown\n\t\t\tmetrics.WatchEventsSizes.WithContext(req.Context()).WithLabelValues(kind.Group, kind.Version, kind.Kind).Observe(float64(len(unknown.Raw)))\n\n\t\t\t*outEvent = metav1.WatchEvent{}\n\n\t\t\t\/\/ create the external type directly and encode it. Clients will only recognize the serialization we provide.\n\t\t\t\/\/ The internal event is being reused, not reallocated so its just a few extra assignments to do it this way\n\t\t\t\/\/ and we get the benefit of using conversion functions which already have to stay in sync\n\t\t\t*internalEvent = metav1.InternalEvent(event)\n\t\t\terr := metav1.Convert_v1_InternalEvent_To_v1_WatchEvent(internalEvent, outEvent, nil)\n\t\t\tif err != nil {\n\t\t\t\tutilruntime.HandleError(fmt.Errorf(\"unable to convert watch object: %v\", err))\n\t\t\t\t\/\/ client disconnect.\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := e.Encode(outEvent); err != nil {\n\t\t\t\tutilruntime.HandleError(fmt.Errorf(\"unable to encode watch object %T: %v (%#v)\", outEvent, err, e))\n\t\t\t\t\/\/ client disconnect.\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif len(ch) == 0 {\n\t\t\t\tflusher.Flush()\n\t\t\t}\n\n\t\t\tbuf.Reset()\n\t\t}\n\t}\n}\n\n\/\/ HandleWS implements a websocket handler.\nfunc (s *WatchServer) HandleWS(ws *websocket.Conn) {\n\tdefer ws.Close()\n\tdone := make(chan struct{})\n\n\tgo func() {\n\t\tdefer utilruntime.HandleCrash()\n\t\t\/\/ This blocks until the connection is closed.\n\t\t\/\/ Client should not send anything.\n\t\twsstream.IgnoreReceives(ws, 0)\n\t\t\/\/ Once the client closes, we should also close\n\t\tclose(done)\n\t}()\n\n\tvar unknown runtime.Unknown\n\tinternalEvent := &metav1.InternalEvent{}\n\tbuf := &bytes.Buffer{}\n\tstreamBuf := &bytes.Buffer{}\n\tch := s.Watching.ResultChan()\n\n\tfor {\n\t\tselect {\n\t\tcase <-done:\n\t\t\treturn\n\t\tcase event, ok := <-ch:\n\t\t\tif !ok {\n\t\t\t\t\/\/ End of results.\n\t\t\t\treturn\n\t\t\t}\n\t\t\tobj := s.Fixup(event.Object)\n\t\t\tif err := s.EmbeddedEncoder.Encode(obj, buf); err != nil {\n\t\t\t\t\/\/ unexpected error\n\t\t\t\tutilruntime.HandleError(fmt.Errorf(\"unable to encode watch object %T: %v\", obj, err))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ ContentType is not required here because we are defaulting to the serializer\n\t\t\t\/\/ type\n\t\t\tunknown.Raw = buf.Bytes()\n\t\t\tevent.Object = &unknown\n\n\t\t\t\/\/ the internal event will be versioned by the encoder\n\t\t\t\/\/ create the external type directly and encode it. Clients will only recognize the serialization we provide.\n\t\t\t\/\/ The internal event is being reused, not reallocated so its just a few extra assignments to do it this way\n\t\t\t\/\/ and we get the benefit of using conversion functions which already have to stay in sync\n\t\t\toutEvent := &metav1.WatchEvent{}\n\t\t\t*internalEvent = metav1.InternalEvent(event)\n\t\t\terr := metav1.Convert_v1_InternalEvent_To_v1_WatchEvent(internalEvent, outEvent, nil)\n\t\t\tif err != nil {\n\t\t\t\tutilruntime.HandleError(fmt.Errorf(\"unable to convert watch object: %v\", err))\n\t\t\t\t\/\/ client disconnect.\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := s.Encoder.Encode(outEvent, streamBuf); err != nil {\n\t\t\t\t\/\/ encoding error\n\t\t\t\tutilruntime.HandleError(fmt.Errorf(\"unable to encode event: %v\", err))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif s.UseTextFraming {\n\t\t\t\tif err := websocket.Message.Send(ws, streamBuf.String()); err != nil {\n\t\t\t\t\t\/\/ Client disconnect.\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err := websocket.Message.Send(ws, streamBuf.Bytes()); err != nil {\n\t\t\t\t\t\/\/ Client disconnect.\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tbuf.Reset()\n\t\t\tstreamBuf.Reset()\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage rollout\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\t\"k8s.io\/apimachinery\/pkg\/api\/meta\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/apimachinery\/pkg\/fields\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/watch\"\n\t\"k8s.io\/cli-runtime\/pkg\/genericclioptions\"\n\t\"k8s.io\/cli-runtime\/pkg\/resource\"\n\t\"k8s.io\/client-go\/dynamic\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\twatchtools \"k8s.io\/client-go\/tools\/watch\"\n\tcmdutil \"k8s.io\/kubectl\/pkg\/cmd\/util\"\n\t\"k8s.io\/kubectl\/pkg\/polymorphichelpers\"\n\t\"k8s.io\/kubectl\/pkg\/scheme\"\n\t\"k8s.io\/kubectl\/pkg\/util\/completion\"\n\t\"k8s.io\/kubectl\/pkg\/util\/i18n\"\n\t\"k8s.io\/kubectl\/pkg\/util\/interrupt\"\n\t\"k8s.io\/kubectl\/pkg\/util\/templates\"\n)\n\nvar (\n\tstatusLong = templates.LongDesc(i18n.T(`\n\t\tShow the status of the rollout.\n\n\t\tBy default 'rollout status' will watch the status of the latest rollout\n\t\tuntil it's done. If you don't want to wait for the rollout to finish then\n\t\tyou can use --watch=false. Note that if a new rollout starts in-between, then\n\t\t'rollout status' will continue watching the latest revision. If you want to\n\t\tpin to a specific revision and abort if it is rolled over by another revision,\n\t\tuse --revision=N where N is the revision you need to watch for.`))\n\n\tstatusExample = templates.Examples(`\n\t\t# Watch the rollout status of a deployment\n\t\tkubectl rollout status deployment\/nginx`)\n)\n\n\/\/ RolloutStatusOptions holds the command-line options for 'rollout status' sub command\ntype RolloutStatusOptions struct {\n\tPrintFlags *genericclioptions.PrintFlags\n\n\tNamespace string\n\tEnforceNamespace bool\n\tBuilderArgs []string\n\tLabelSelector string\n\n\tWatch bool\n\tRevision int64\n\tTimeout time.Duration\n\n\tStatusViewerFn func(*meta.RESTMapping) (polymorphichelpers.StatusViewer, error)\n\tBuilder func() *resource.Builder\n\tDynamicClient dynamic.Interface\n\n\tFilenameOptions *resource.FilenameOptions\n\tgenericclioptions.IOStreams\n}\n\n\/\/ NewRolloutStatusOptions returns an initialized RolloutStatusOptions instance\nfunc NewRolloutStatusOptions(streams genericclioptions.IOStreams) *RolloutStatusOptions {\n\treturn &RolloutStatusOptions{\n\t\tPrintFlags: genericclioptions.NewPrintFlags(\"\").WithTypeSetter(scheme.Scheme),\n\t\tFilenameOptions: &resource.FilenameOptions{},\n\t\tIOStreams: streams,\n\t\tWatch: true,\n\t\tTimeout: 0,\n\t}\n}\n\n\/\/ NewCmdRolloutStatus returns a Command instance for the 'rollout status' sub command\nfunc NewCmdRolloutStatus(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {\n\to := NewRolloutStatusOptions(streams)\n\n\tvalidArgs := []string{\"deployment\", \"daemonset\", \"statefulset\"}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"status (TYPE NAME | TYPE\/NAME) [flags]\",\n\t\tDisableFlagsInUseLine: true,\n\t\tShort: i18n.T(\"Show the status of the rollout\"),\n\t\tLong: statusLong,\n\t\tExample: statusExample,\n\t\tValidArgsFunction: completion.SpecifiedResourceTypeAndNameNoRepeatCompletionFunc(f, validArgs),\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tcmdutil.CheckErr(o.Complete(f, args))\n\t\t\tcmdutil.CheckErr(o.Validate())\n\t\t\tcmdutil.CheckErr(o.Run())\n\t\t},\n\t}\n\n\tusage := \"identifying the resource to get from a server.\"\n\tcmdutil.AddFilenameOptionFlags(cmd, o.FilenameOptions, usage)\n\tcmd.Flags().BoolVarP(&o.Watch, \"watch\", \"w\", o.Watch, \"Watch the status of the rollout until it's done.\")\n\tcmd.Flags().Int64Var(&o.Revision, \"revision\", o.Revision, \"Pin to a specific revision for showing its status. Defaults to 0 (last revision).\")\n\tcmd.Flags().DurationVar(&o.Timeout, \"timeout\", o.Timeout, \"The length of time to wait before ending watch, zero means never. Any other values should contain a corresponding time unit (e.g. 1s, 2m, 3h).\")\n\tcmdutil.AddLabelSelectorFlagVar(cmd, &o.LabelSelector)\n\n\treturn cmd\n}\n\n\/\/ Complete completes all the required options\nfunc (o *RolloutStatusOptions) Complete(f cmdutil.Factory, args []string) error {\n\to.Builder = f.NewBuilder\n\n\tvar err error\n\to.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\to.BuilderArgs = args\n\to.StatusViewerFn = polymorphichelpers.StatusViewerFn\n\n\tclientConfig, err := f.ToRESTConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\to.DynamicClient, err = dynamic.NewForConfig(clientConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Validate makes sure all the provided values for command-line options are valid\nfunc (o *RolloutStatusOptions) Validate() error {\n\tif len(o.BuilderArgs) == 0 && cmdutil.IsFilenameSliceEmpty(o.FilenameOptions.Filenames, o.FilenameOptions.Kustomize) {\n\t\treturn fmt.Errorf(\"required resource not specified\")\n\t}\n\n\tif o.Revision < 0 {\n\t\treturn fmt.Errorf(\"revision must be a positive integer: %v\", o.Revision)\n\t}\n\n\treturn nil\n}\n\n\/\/ Run performs the execution of 'rollout status' sub command\nfunc (o *RolloutStatusOptions) Run() error {\n\tr := o.Builder().\n\t\tWithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).\n\t\tNamespaceParam(o.Namespace).DefaultNamespace().\n\t\tLabelSelectorParam(o.LabelSelector).\n\t\tFilenameParam(o.EnforceNamespace, o.FilenameOptions).\n\t\tResourceTypeOrNameArgs(true, o.BuilderArgs...).\n\t\tContinueOnError().\n\t\tLatest().\n\t\tDo()\n\n\terr := r.Err()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn r.Visit(func(info *resource.Info, err error) error {\n\t\tmapping := info.ResourceMapping()\n\t\tstatusViewer, err := o.StatusViewerFn(mapping)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfieldSelector := fields.OneTermEqualSelector(\"metadata.name\", info.Name).String()\n\t\tlw := &cache.ListWatch{\n\t\t\tListFunc: func(options metav1.ListOptions) (runtime.Object, error) {\n\t\t\t\toptions.FieldSelector = fieldSelector\n\t\t\t\treturn o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).List(context.TODO(), options)\n\t\t\t},\n\t\t\tWatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {\n\t\t\t\toptions.FieldSelector = fieldSelector\n\t\t\t\treturn o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Watch(context.TODO(), options)\n\t\t\t},\n\t\t}\n\n\t\t\/\/ if the rollout isn't done yet, keep watching deployment status\n\t\tctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), o.Timeout)\n\t\tintr := interrupt.New(nil, cancel)\n\t\treturn intr.Run(func() error {\n\t\t\t_, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) {\n\t\t\t\tswitch t := e.Type; t {\n\t\t\t\tcase watch.Added, watch.Modified:\n\t\t\t\t\tstatus, done, err := statusViewer.Status(e.Object.(runtime.Unstructured), o.Revision)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn false, err\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Fprintf(o.Out, \"%s\", status)\n\t\t\t\t\t\/\/ Quit waiting if the rollout is done\n\t\t\t\t\tif done {\n\t\t\t\t\t\treturn true, nil\n\t\t\t\t\t}\n\n\t\t\t\t\tshouldWatch := o.Watch\n\t\t\t\t\tif !shouldWatch {\n\t\t\t\t\t\treturn true, nil\n\t\t\t\t\t}\n\n\t\t\t\t\treturn false, nil\n\n\t\t\t\tcase watch.Deleted:\n\t\t\t\t\t\/\/ We need to abort to avoid cases of recreation and not to silently watch the wrong (new) object\n\t\t\t\t\treturn true, fmt.Errorf(\"object has been deleted\")\n\n\t\t\t\tdefault:\n\t\t\t\t\treturn true, fmt.Errorf(\"internal error: unexpected event %#v\", e)\n\t\t\t\t}\n\t\t\t})\n\t\t\treturn err\n\t\t})\n\t})\n}\nEnable resource builder flattening in rollout status\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage rollout\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\t\"k8s.io\/apimachinery\/pkg\/api\/meta\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/apimachinery\/pkg\/fields\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/watch\"\n\t\"k8s.io\/cli-runtime\/pkg\/genericclioptions\"\n\t\"k8s.io\/cli-runtime\/pkg\/resource\"\n\t\"k8s.io\/client-go\/dynamic\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\twatchtools \"k8s.io\/client-go\/tools\/watch\"\n\tcmdutil \"k8s.io\/kubectl\/pkg\/cmd\/util\"\n\t\"k8s.io\/kubectl\/pkg\/polymorphichelpers\"\n\t\"k8s.io\/kubectl\/pkg\/scheme\"\n\t\"k8s.io\/kubectl\/pkg\/util\/completion\"\n\t\"k8s.io\/kubectl\/pkg\/util\/i18n\"\n\t\"k8s.io\/kubectl\/pkg\/util\/interrupt\"\n\t\"k8s.io\/kubectl\/pkg\/util\/templates\"\n)\n\nvar (\n\tstatusLong = templates.LongDesc(i18n.T(`\n\t\tShow the status of the rollout.\n\n\t\tBy default 'rollout status' will watch the status of the latest rollout\n\t\tuntil it's done. If you don't want to wait for the rollout to finish then\n\t\tyou can use --watch=false. Note that if a new rollout starts in-between, then\n\t\t'rollout status' will continue watching the latest revision. If you want to\n\t\tpin to a specific revision and abort if it is rolled over by another revision,\n\t\tuse --revision=N where N is the revision you need to watch for.`))\n\n\tstatusExample = templates.Examples(`\n\t\t# Watch the rollout status of a deployment\n\t\tkubectl rollout status deployment\/nginx`)\n)\n\n\/\/ RolloutStatusOptions holds the command-line options for 'rollout status' sub command\ntype RolloutStatusOptions struct {\n\tPrintFlags *genericclioptions.PrintFlags\n\n\tNamespace string\n\tEnforceNamespace bool\n\tBuilderArgs []string\n\tLabelSelector string\n\n\tWatch bool\n\tRevision int64\n\tTimeout time.Duration\n\n\tStatusViewerFn func(*meta.RESTMapping) (polymorphichelpers.StatusViewer, error)\n\tBuilder func() *resource.Builder\n\tDynamicClient dynamic.Interface\n\n\tFilenameOptions *resource.FilenameOptions\n\tgenericclioptions.IOStreams\n}\n\n\/\/ NewRolloutStatusOptions returns an initialized RolloutStatusOptions instance\nfunc NewRolloutStatusOptions(streams genericclioptions.IOStreams) *RolloutStatusOptions {\n\treturn &RolloutStatusOptions{\n\t\tPrintFlags: genericclioptions.NewPrintFlags(\"\").WithTypeSetter(scheme.Scheme),\n\t\tFilenameOptions: &resource.FilenameOptions{},\n\t\tIOStreams: streams,\n\t\tWatch: true,\n\t\tTimeout: 0,\n\t}\n}\n\n\/\/ NewCmdRolloutStatus returns a Command instance for the 'rollout status' sub command\nfunc NewCmdRolloutStatus(f cmdutil.Factory, streams genericclioptions.IOStreams) *cobra.Command {\n\to := NewRolloutStatusOptions(streams)\n\n\tvalidArgs := []string{\"deployment\", \"daemonset\", \"statefulset\"}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"status (TYPE NAME | TYPE\/NAME) [flags]\",\n\t\tDisableFlagsInUseLine: true,\n\t\tShort: i18n.T(\"Show the status of the rollout\"),\n\t\tLong: statusLong,\n\t\tExample: statusExample,\n\t\tValidArgsFunction: completion.SpecifiedResourceTypeAndNameNoRepeatCompletionFunc(f, validArgs),\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tcmdutil.CheckErr(o.Complete(f, args))\n\t\t\tcmdutil.CheckErr(o.Validate())\n\t\t\tcmdutil.CheckErr(o.Run())\n\t\t},\n\t}\n\n\tusage := \"identifying the resource to get from a server.\"\n\tcmdutil.AddFilenameOptionFlags(cmd, o.FilenameOptions, usage)\n\tcmd.Flags().BoolVarP(&o.Watch, \"watch\", \"w\", o.Watch, \"Watch the status of the rollout until it's done.\")\n\tcmd.Flags().Int64Var(&o.Revision, \"revision\", o.Revision, \"Pin to a specific revision for showing its status. Defaults to 0 (last revision).\")\n\tcmd.Flags().DurationVar(&o.Timeout, \"timeout\", o.Timeout, \"The length of time to wait before ending watch, zero means never. Any other values should contain a corresponding time unit (e.g. 1s, 2m, 3h).\")\n\tcmdutil.AddLabelSelectorFlagVar(cmd, &o.LabelSelector)\n\n\treturn cmd\n}\n\n\/\/ Complete completes all the required options\nfunc (o *RolloutStatusOptions) Complete(f cmdutil.Factory, args []string) error {\n\to.Builder = f.NewBuilder\n\n\tvar err error\n\to.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\to.BuilderArgs = args\n\to.StatusViewerFn = polymorphichelpers.StatusViewerFn\n\n\tclientConfig, err := f.ToRESTConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\to.DynamicClient, err = dynamic.NewForConfig(clientConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Validate makes sure all the provided values for command-line options are valid\nfunc (o *RolloutStatusOptions) Validate() error {\n\tif len(o.BuilderArgs) == 0 && cmdutil.IsFilenameSliceEmpty(o.FilenameOptions.Filenames, o.FilenameOptions.Kustomize) {\n\t\treturn fmt.Errorf(\"required resource not specified\")\n\t}\n\n\tif o.Revision < 0 {\n\t\treturn fmt.Errorf(\"revision must be a positive integer: %v\", o.Revision)\n\t}\n\n\treturn nil\n}\n\n\/\/ Run performs the execution of 'rollout status' sub command\nfunc (o *RolloutStatusOptions) Run() error {\n\tr := o.Builder().\n\t\tWithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).\n\t\tNamespaceParam(o.Namespace).DefaultNamespace().\n\t\tLabelSelectorParam(o.LabelSelector).\n\t\tFilenameParam(o.EnforceNamespace, o.FilenameOptions).\n\t\tResourceTypeOrNameArgs(true, o.BuilderArgs...).\n\t\tContinueOnError().\n\t\tLatest().\n\t\tFlatten().\n\t\tDo()\n\n\terr := r.Err()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn r.Visit(func(info *resource.Info, err error) error {\n\t\tmapping := info.ResourceMapping()\n\t\tstatusViewer, err := o.StatusViewerFn(mapping)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfieldSelector := fields.OneTermEqualSelector(\"metadata.name\", info.Name).String()\n\t\tlw := &cache.ListWatch{\n\t\t\tListFunc: func(options metav1.ListOptions) (runtime.Object, error) {\n\t\t\t\toptions.FieldSelector = fieldSelector\n\t\t\t\treturn o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).List(context.TODO(), options)\n\t\t\t},\n\t\t\tWatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {\n\t\t\t\toptions.FieldSelector = fieldSelector\n\t\t\t\treturn o.DynamicClient.Resource(info.Mapping.Resource).Namespace(info.Namespace).Watch(context.TODO(), options)\n\t\t\t},\n\t\t}\n\n\t\t\/\/ if the rollout isn't done yet, keep watching deployment status\n\t\tctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), o.Timeout)\n\t\tintr := interrupt.New(nil, cancel)\n\t\treturn intr.Run(func() error {\n\t\t\t_, err = watchtools.UntilWithSync(ctx, lw, &unstructured.Unstructured{}, nil, func(e watch.Event) (bool, error) {\n\t\t\t\tswitch t := e.Type; t {\n\t\t\t\tcase watch.Added, watch.Modified:\n\t\t\t\t\tstatus, done, err := statusViewer.Status(e.Object.(runtime.Unstructured), o.Revision)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn false, err\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Fprintf(o.Out, \"%s\", status)\n\t\t\t\t\t\/\/ Quit waiting if the rollout is done\n\t\t\t\t\tif done {\n\t\t\t\t\t\treturn true, nil\n\t\t\t\t\t}\n\n\t\t\t\t\tshouldWatch := o.Watch\n\t\t\t\t\tif !shouldWatch {\n\t\t\t\t\t\treturn true, nil\n\t\t\t\t\t}\n\n\t\t\t\t\treturn false, nil\n\n\t\t\t\tcase watch.Deleted:\n\t\t\t\t\t\/\/ We need to abort to avoid cases of recreation and not to silently watch the wrong (new) object\n\t\t\t\t\treturn true, fmt.Errorf(\"object has been deleted\")\n\n\t\t\t\tdefault:\n\t\t\t\t\treturn true, fmt.Errorf(\"internal error: unexpected event %#v\", e)\n\t\t\t\t}\n\t\t\t})\n\t\t\treturn err\n\t\t})\n\t})\n}\n<|endoftext|>"} {"text":"package builtin\n\nimport (\n\t\"testing\"\n)\n\nfunc TestContains(t *testing.T) {\n\tif !contains([]string{\"foo\", \"bar\"}, \"bar\") {\n\t\tt.Errorf(\"Error builtin contains\")\n\t}\n\tif contains([]string{\"foo\", \"bar\"}, \"spam\") {\n\t\tt.Errorf(\"Error builtin contains\")\n\t}\n}\nAdded coverage for containspackage builtin\n\nimport (\n\t\"testing\"\n)\n\nfunc TestContains(t *testing.T) {\n\tif !contains([]string{\"foo\", \"bar\"}, \"bar\") {\n\t\tt.Errorf(\"Error builtin contains\")\n\t}\n\tif contains([]string{\"foo\", \"bar\"}, \"spam\") {\n\t\tt.Errorf(\"Error builtin contains\")\n\t}\n\tif !contains([]interface{}{\"foo\", \"bar\"}, \"bar\") {\n\t\tt.Errorf(\"Error builtin contains\")\n\t}\n\tif contains([]interface{}{\"foo\", \"bar\"}, \"spam\") {\n\t\tt.Errorf(\"Error builtin contains\")\n\t}\n}\n<|endoftext|>"} {"text":"package pci_test\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t\"github.com\/bobuhiro11\/gokvm\/pci\"\n)\n\nfunc TestSizeToBits(t *testing.T) {\n\tt.Parallel()\n\n\texpected := uint32(0xffffff00)\n\tactual := pci.SizeToBits(0x100)\n\n\tif expected != actual {\n\t\tt.Fatalf(\"expected: %v, actual: %v\", expected, actual)\n\t}\n}\n\nfunc TestBytesToNum(t *testing.T) {\n\tt.Parallel()\n\n\texpected := uint64(0x12345678)\n\tactual := pci.BytesToNum([]byte{0x78, 0x56, 0x34, 0x12})\n\n\tif expected != actual {\n\t\tt.Fatalf(\"expected: %v, actual: %v\", expected, actual)\n\t}\n}\n\nfunc TestNumToBytes8(t *testing.T) {\n\tt.Parallel()\n\n\texpected := []byte{0x12}\n\tactual := pci.NumToBytes(uint8(0x12))\n\n\tif !bytes.Equal(actual, expected) {\n\t\tt.Fatalf(\"expected: %v, actual: %v\", expected, actual)\n\t}\n}\n\nfunc TestNumToBytes16(t *testing.T) {\n\tt.Parallel()\n\n\texpected := []byte{0x34, 0x12}\n\tactual := pci.NumToBytes(uint16(0x1234))\n\n\tif !bytes.Equal(actual, expected) {\n\t\tt.Fatalf(\"expected: %v, actual: %v\", expected, actual)\n\t}\n}\n\nfunc TestNumToBytes32(t *testing.T) {\n\tt.Parallel()\n\n\texpected := []byte{0x78, 0x56, 0x34, 0x12}\n\tactual := pci.NumToBytes(uint32(0x12345678))\n\n\tif !bytes.Equal(actual, expected) {\n\t\tt.Fatalf(\"expected: %v, actual: %v\", expected, actual)\n\t}\n}\n\nfunc TestNumToBytes64(t *testing.T) {\n\tt.Parallel()\n\n\texpected := []byte{0x78, 0x56, 0x34, 0x12, 0x78, 0x56, 0x34, 0x12}\n\tactual := pci.NumToBytes(uint64(0x1234567812345678))\n\n\tif !bytes.Equal(actual, expected) {\n\t\tt.Fatalf(\"expected: %v, actual: %v\", expected, actual)\n\t}\n}\n\nfunc TestNumToBytesInvalid(t *testing.T) {\n\tt.Parallel()\n\n\tactual := pci.NumToBytes(-1)\n\texpected := []byte{}\n\n\tif !bytes.Equal(actual, expected) {\n\t\tt.Fatalf(\"expected: %v, actual: %v\", expected, actual)\n\t}\n}\n\nfunc TestProbingBAR0(t *testing.T) {\n\tt.Parallel()\n\n\tbr := pci.NewBridge()\n\tstart, end := br.GetIORange()\n\texpected := pci.SizeToBits(end - start)\n\n\tp := pci.New(br)\n\t_ = p.PciConfAddrOut(0x0, pci.NumToBytes(uint32(0x80000010))) \/\/ offset 0x10 for BAR0 with enable bit 0x80\n\t_ = p.PciConfDataOut(0xCFC, pci.NumToBytes(uint32(0xffffffff))) \/\/ all 1-bits for probing size of BAR0\n\n\tbytes := make([]byte, 4)\n\t_ = p.PciConfDataIn(0xCFC, bytes)\n\tactual := uint32(pci.BytesToNum(bytes))\n\n\tif expected != actual {\n\t\tt.Fatalf(\"expected: 0x%x, actual: 0x%x\", expected, actual)\n\t}\n}\npci: add test for bytes funcpackage pci_test\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t\"github.com\/bobuhiro11\/gokvm\/pci\"\n)\n\nfunc TestSizeToBits(t *testing.T) {\n\tt.Parallel()\n\n\texpected := uint32(0xffffff00)\n\tactual := pci.SizeToBits(0x100)\n\n\tif expected != actual {\n\t\tt.Fatalf(\"expected: %v, actual: %v\", expected, actual)\n\t}\n}\n\nfunc TestBytesToNum(t *testing.T) {\n\tt.Parallel()\n\n\texpected := uint64(0x12345678)\n\tactual := pci.BytesToNum([]byte{0x78, 0x56, 0x34, 0x12})\n\n\tif expected != actual {\n\t\tt.Fatalf(\"expected: %v, actual: %v\", expected, actual)\n\t}\n}\n\nfunc TestNumToBytes8(t *testing.T) {\n\tt.Parallel()\n\n\texpected := []byte{0x12}\n\tactual := pci.NumToBytes(uint8(0x12))\n\n\tif !bytes.Equal(actual, expected) {\n\t\tt.Fatalf(\"expected: %v, actual: %v\", expected, actual)\n\t}\n}\n\nfunc TestNumToBytes16(t *testing.T) {\n\tt.Parallel()\n\n\texpected := []byte{0x34, 0x12}\n\tactual := pci.NumToBytes(uint16(0x1234))\n\n\tif !bytes.Equal(actual, expected) {\n\t\tt.Fatalf(\"expected: %v, actual: %v\", expected, actual)\n\t}\n}\n\nfunc TestNumToBytes32(t *testing.T) {\n\tt.Parallel()\n\n\texpected := []byte{0x78, 0x56, 0x34, 0x12}\n\tactual := pci.NumToBytes(uint32(0x12345678))\n\n\tif !bytes.Equal(actual, expected) {\n\t\tt.Fatalf(\"expected: %v, actual: %v\", expected, actual)\n\t}\n}\n\nfunc TestNumToBytes64(t *testing.T) {\n\tt.Parallel()\n\n\texpected := []byte{0x78, 0x56, 0x34, 0x12, 0x78, 0x56, 0x34, 0x12}\n\tactual := pci.NumToBytes(uint64(0x1234567812345678))\n\n\tif !bytes.Equal(actual, expected) {\n\t\tt.Fatalf(\"expected: %v, actual: %v\", expected, actual)\n\t}\n}\n\nfunc TestNumToBytesInvalid(t *testing.T) {\n\tt.Parallel()\n\n\tactual := pci.NumToBytes(-1)\n\texpected := []byte{}\n\n\tif !bytes.Equal(actual, expected) {\n\t\tt.Fatalf(\"expected: %v, actual: %v\", expected, actual)\n\t}\n}\n\nfunc TestProbingBAR0(t *testing.T) {\n\tt.Parallel()\n\n\tbr := pci.NewBridge()\n\tstart, end := br.GetIORange()\n\texpected := pci.SizeToBits(end - start)\n\n\tp := pci.New(br)\n\t_ = p.PciConfAddrOut(0x0, pci.NumToBytes(uint32(0x80000010))) \/\/ offset 0x10 for BAR0 with enable bit 0x80\n\t_ = p.PciConfDataOut(0xCFC, pci.NumToBytes(uint32(0xffffffff))) \/\/ all 1-bits for probing size of BAR0\n\n\tbytes := make([]byte, 4)\n\t_ = p.PciConfDataIn(0xCFC, bytes)\n\tactual := uint32(pci.BytesToNum(bytes))\n\n\tif expected != actual {\n\t\tt.Fatalf(\"expected: 0x%x, actual: 0x%x\", expected, actual)\n\t}\n}\n\nfunc TestBytes(t *testing.T) {\n\tt.Parallel()\n\n\tdh := pci.DeviceHeader{\n\t\tDeviceID: 1,\n\t\tVendorID: 1,\n\t\tHeaderType: 1,\n\t\tSubsystemID: 1,\n\t\tCommand: 1,\n\t\tBAR: [6]uint32{},\n\t\tInterruptPin: 1,\n\t\tInterruptLine: 1,\n\t}\n\n\tb, err := dh.Bytes()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif b[0] != byte(dh.VendorID) {\n\t\tt.Fatalf(\"invalid vendor id\")\n\t}\n}\n<|endoftext|>"} {"text":"package pdp\n\nimport \"net\/url\"\n\n\/\/ Selector provides a generic way to access external data may required\n\/\/ by policies.\ntype Selector interface {\n\t\/\/ Enabled returns true for active selector. Disabled selector isn't\n\t\/\/ initialized and can't be used in policies.\n\tEnabled() bool\n\t\/\/ Scheme returns a name of URI scheme associated with selector.\n\tScheme() string\n\t\/\/ Initialize is called for all registered and enabled selectors\n\t\/\/ by InitializeSelectors.\n\tInitialize()\n\t\/\/ SelectorFunc returns selector expression for given URI,\n\t\/\/ set of arguments and desired result type. The last two arguments\n\t\/\/ define the values selector to return in case of missing content value\n\t\/\/ or other errors\n\tSelectorFunc(*url.URL, []Expression, Type, Expression, Expression) (Expression, error)\n}\n\nvar selectorMap = make(map[string]Selector)\n\n\/\/ MakeSelector returns new selector for given uri with path as a set of\n\/\/ arguments and desired result type.\nfunc MakeSelector(uri *url.URL, path []Expression, t Type, def, err Expression) (Expression, error) {\n\ts := GetSelector(uri.Scheme)\n\tif s == nil {\n\t\treturn nil, newUnsupportedSelectorSchemeError(uri)\n\t}\n\tif !s.Enabled() {\n\t\treturn nil, newDisabledSelectorError(s)\n\t}\n\treturn s.SelectorFunc(uri, path, t, def, err)\n}\n\n\/\/ RegisterSelector puts given selector to PDP's registry.\nfunc RegisterSelector(s Selector) {\n\tselectorMap[s.Scheme()] = s\n}\n\n\/\/ GetSelector returns selector registered for given schema.\nfunc GetSelector(scheme string) Selector {\n\tif s, ok := selectorMap[scheme]; ok {\n\t\treturn s\n\t}\n\n\treturn nil\n}\n\n\/\/ InitializeSelectors initializes all registered and enabled selectors.\nfunc InitializeSelectors() {\n\tfor _, s := range selectorMap {\n\t\tif s.Enabled() {\n\t\t\ts.Initialize()\n\t\t}\n\t}\n}\nlog selectors being initializedpackage pdp\n\nimport (\n\t\"net\/url\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ Selector provides a generic way to access external data may required\n\/\/ by policies.\ntype Selector interface {\n\t\/\/ Enabled returns true for active selector. Disabled selector isn't\n\t\/\/ initialized and can't be used in policies.\n\tEnabled() bool\n\t\/\/ Scheme returns a name of URI scheme associated with selector.\n\tScheme() string\n\t\/\/ Initialize is called for all registered and enabled selectors\n\t\/\/ by InitializeSelectors.\n\tInitialize()\n\t\/\/ SelectorFunc returns selector expression for given URI,\n\t\/\/ set of arguments and desired result type. The last two arguments\n\t\/\/ define the values selector to return in case of missing content value\n\t\/\/ or other errors\n\tSelectorFunc(*url.URL, []Expression, Type, Expression, Expression) (Expression, error)\n}\n\nvar selectorMap = make(map[string]Selector)\n\n\/\/ MakeSelector returns new selector for given uri with path as a set of\n\/\/ arguments and desired result type.\nfunc MakeSelector(uri *url.URL, path []Expression, t Type, def, err Expression) (Expression, error) {\n\ts := GetSelector(uri.Scheme)\n\tif s == nil {\n\t\treturn nil, newUnsupportedSelectorSchemeError(uri)\n\t}\n\tif !s.Enabled() {\n\t\treturn nil, newDisabledSelectorError(s)\n\t}\n\treturn s.SelectorFunc(uri, path, t, def, err)\n}\n\n\/\/ RegisterSelector puts given selector to PDP's registry.\nfunc RegisterSelector(s Selector) {\n\tselectorMap[s.Scheme()] = s\n}\n\n\/\/ GetSelector returns selector registered for given schema.\nfunc GetSelector(scheme string) Selector {\n\tif s, ok := selectorMap[scheme]; ok {\n\t\treturn s\n\t}\n\n\treturn nil\n}\n\n\/\/ InitializeSelectors initializes all registered and enabled selectors.\nfunc InitializeSelectors() {\n\tfor _, s := range selectorMap {\n\t\tif s.Enabled() {\n\t\t\tlog.WithField(\"scheme\", s.Scheme()).Info(\"initializing selector\")\n\t\t\ts.Initialize()\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"Implemented Aws.removeMachines()<|endoftext|>"} {"text":"go doc<|endoftext|>"} {"text":"\/\/ Copyright 2016, Timothy Bogdala \n\/\/ See the LICENSE file for more details.\n\npackage glfwinput\n\nimport glfw \"github.com\/go-gl\/glfw\/v3.1\/glfw\"\n\n\/\/ KeyCallback is the type of the function that gets called for the key\n\/\/ callback events.\ntype KeyCallback func()\n\n\/\/ KeyboardModel is the way to bind keys to events.\ntype KeyboardModel struct {\n\t\/\/ KeyTriggerBindings are the functions to call when the given key is pressed\n\tKeyTriggerBindings map[glfw.Key]KeyCallback\n\n\t\/\/ KeyBindings are the functions to call when the given key is considered\n\t\/\/ 'pressed' when the KeyboardModel runs CheckKeyPresses.\n\tKeyBindings map[glfw.Key]KeyCallback\n\n\t\/\/ window is the GLFW window to poll for key input\n\twindow *glfw.Window\n\n\t\/\/ keyCallback is the function that will get passed to GLFW as the\n\t\/\/ callback handler for key presses\n\tKeyCallback glfw.KeyCallback\n}\n\n\/\/ NewKeyboardModel returns a newly created keyboard model object\nfunc NewKeyboardModel(w *glfw.Window) *KeyboardModel {\n\tkb := new(KeyboardModel)\n\tkb.KeyTriggerBindings = make(map[glfw.Key]KeyCallback)\n\tkb.KeyBindings = make(map[glfw.Key]KeyCallback)\n\tkb.window = w\n\n\t\/\/ use some default callbacks\n\tkb.KeyCallback = func(w *glfw.Window, key glfw.Key, scancode int, action glfw.Action, mods glfw.ModifierKey) {\n\t\t\/\/ for now, we're only interested in press actions\n\t\tif action != glfw.Press {\n\t\t\treturn\n\t\t}\n\n\t\tcb, okay := kb.KeyTriggerBindings[key]\n\t\tif okay && cb != nil {\n\t\t\tcb()\n\t\t}\n\t}\n\n\treturn kb\n}\n\n\/\/ SetupCallbacks sets the callback handlers for the window.\nfunc (kb *KeyboardModel) SetupCallbacks() {\n\tkb.window.SetKeyCallback(kb.KeyCallback)\n}\n\n\/\/ CheckKeyPresses runs through all of the KeyBindings and checks to see if that\n\/\/ key is held down -- if it is, then the callback is invoked.\nfunc (kb *KeyboardModel) CheckKeyPresses() {\n\tfor key, cb := range kb.KeyBindings {\n\t\tif kb.window.GetKey(key) == glfw.Press && cb != nil {\n\t\t\tcb()\n\t\t}\n\t}\n}\n\n\/\/ Bind binds a key press event with a callback that will get called when\n\/\/ CheckKeyPresses finds the key to be pressed.\nfunc (kb *KeyboardModel) Bind(key glfw.Key, f KeyCallback) {\n\tkb.KeyBindings[key] = f\n}\n\n\/\/ BindTrigger binds a key event that gets called when the key is pressed once.\nfunc (kb *KeyboardModel) BindTrigger(key glfw.Key, f KeyCallback) {\n\tkb.KeyTriggerBindings[key] = f\n}\nchained glfw input handlers together\/\/ Copyright 2016, Timothy Bogdala \n\/\/ See the LICENSE file for more details.\n\npackage glfwinput\n\nimport glfw \"github.com\/go-gl\/glfw\/v3.1\/glfw\"\n\n\/\/ KeyCallback is the type of the function that gets called for the key\n\/\/ callback events.\ntype KeyCallback func()\n\n\/\/ KeyboardModel is the way to bind keys to events.\ntype KeyboardModel struct {\n\t\/\/ KeyTriggerBindings are the functions to call when the given key is pressed\n\tKeyTriggerBindings map[glfw.Key]KeyCallback\n\n\t\/\/ KeyBindings are the functions to call when the given key is considered\n\t\/\/ 'pressed' when the KeyboardModel runs CheckKeyPresses.\n\tKeyBindings map[glfw.Key]KeyCallback\n\n\t\/\/ window is the GLFW window to poll for key input\n\twindow *glfw.Window\n\n\t\/\/ keyCallback is the function that will get passed to GLFW as the\n\t\/\/ callback handler for key presses\n\tKeyCallback glfw.KeyCallback\n\n\t\/\/ prevKeyCallback is the previously bound key callback from glfw.\n\t\/\/ This is used to chain input.\n\tprevKeyCallback glfw.KeyCallback\n}\n\n\/\/ NewKeyboardModel returns a newly created keyboard model object\nfunc NewKeyboardModel(w *glfw.Window) *KeyboardModel {\n\tkb := new(KeyboardModel)\n\tkb.KeyTriggerBindings = make(map[glfw.Key]KeyCallback)\n\tkb.KeyBindings = make(map[glfw.Key]KeyCallback)\n\tkb.window = w\n\n\t\/\/ use some default callbacks\n\tkb.KeyCallback = func(w *glfw.Window, key glfw.Key, scancode int, action glfw.Action, mods glfw.ModifierKey) {\n\t\t\/\/ for now, we're only interested in press actions\n\t\tif action != glfw.Press {\n\t\t\treturn\n\t\t}\n\n\t\tcb, okay := kb.KeyTriggerBindings[key]\n\t\tif okay && cb != nil {\n\t\t\tcb()\n\t\t}\n\n\t\t\/\/ chain the event handler to the previous one if it existed.\n\t\tif kb.prevKeyCallback != nil {\n\t\t\tkb.prevKeyCallback(w, key, scancode, action, mods)\n\t\t}\n\t}\n\n\treturn kb\n}\n\n\/\/ SetupCallbacks sets the callback handlers for the window.\nfunc (kb *KeyboardModel) SetupCallbacks() {\n\tkb.prevKeyCallback = kb.window.SetKeyCallback(kb.KeyCallback)\n}\n\n\/\/ CheckKeyPresses runs through all of the KeyBindings and checks to see if that\n\/\/ key is held down -- if it is, then the callback is invoked.\nfunc (kb *KeyboardModel) CheckKeyPresses() {\n\tfor key, cb := range kb.KeyBindings {\n\t\tif kb.window.GetKey(key) == glfw.Press && cb != nil {\n\t\t\tcb()\n\t\t}\n\t}\n}\n\n\/\/ Bind binds a key press event with a callback that will get called when\n\/\/ CheckKeyPresses finds the key to be pressed.\nfunc (kb *KeyboardModel) Bind(key glfw.Key, f KeyCallback) {\n\tkb.KeyBindings[key] = f\n}\n\n\/\/ BindTrigger binds a key event that gets called when the key is pressed once.\nfunc (kb *KeyboardModel) BindTrigger(key glfw.Key, f KeyCallback) {\n\tkb.KeyTriggerBindings[key] = f\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2018 gf Author(https:\/\/github.com\/gogf\/gf). All Rights Reserved.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the MIT License.\n\/\/ If a copy of the MIT was not distributed with this file,\n\/\/ You can obtain one at https:\/\/github.com\/gogf\/gf.\n\npackage ghttp_test\n\nimport (\n\t\"github.com\/gogf\/gf\/container\/garray\"\n\t\"github.com\/gogf\/gf\/os\/genv\"\n)\n\nvar (\n\tports = garray.NewIntArray(true)\n)\n\nfunc init() {\n\tgenv.Set(\"UNDER_TEST\", \"1\")\n\tfor i := 60000; i <= 61000; i++ {\n\t\tports.Append(i)\n\t}\n}\nchange the port range for unit testing cases of ghttp.Server\/\/ Copyright 2018 gf Author(https:\/\/github.com\/gogf\/gf). All Rights Reserved.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the MIT License.\n\/\/ If a copy of the MIT was not distributed with this file,\n\/\/ You can obtain one at https:\/\/github.com\/gogf\/gf.\n\npackage ghttp_test\n\nimport (\n\t\"github.com\/gogf\/gf\/container\/garray\"\n\t\"github.com\/gogf\/gf\/os\/genv\"\n)\n\nvar (\n\tports = garray.NewIntArray(true)\n)\n\nfunc init() {\n\tgenv.Set(\"UNDER_TEST\", \"1\")\n\tfor i := 7000; i <= 8000; i++ {\n\t\tports.Append(i)\n\t}\n}\n<|endoftext|>"} {"text":"package httputilmore\n\nimport (\n\t\"encoding\/json\"\n)\n\n\/\/ ResponseInfo is a generic struct to handle response info.\ntype ResponseInfo struct {\n\tStatusCode int `json:\"statusCode,omitempty\"`\n\tHeaders map[string]string `json:\"headers,omitempty\"`\n\tMessage string `json:\"message,omitempty\"`\n}\n\n\/\/ ToJson returns ResponseInfo as a JSON byte array, embedding json.Marshal\n\/\/ errors if encountered.\nfunc (eresp *ResponseInfo) ToJson() []byte {\n\tbytes, err := json.Marshal(eresp)\n\tif err != nil {\n\t\teresp2 := ResponseInfo{StatusCode: 500, Message: err.Error()}\n\t\tbytes, _ := json.Marshal(eresp2)\n\t\treturn bytes\n\t}\n\treturn bytes\n}\nupdate stylepackage httputilmore\n\nimport (\n\t\"encoding\/json\"\n)\n\n\/\/ ResponseInfo is a generic struct to handle response info.\ntype ResponseInfo struct {\n\tStatusCode int `json:\"statusCode,omitempty\"`\n\tHeaders map[string]string `json:\"headers,omitempty\"`\n\tMessage string `json:\"message,omitempty\"`\n}\n\n\/\/ ToJson returns ResponseInfo as a JSON byte array, embedding json.Marshal\n\/\/ errors if encountered.\nfunc (resIn *ResponseInfo) ToJson() []byte {\n\tbytes, err := json.Marshal(resIn)\n\tif err != nil {\n\t\tresIn2 := ResponseInfo{StatusCode: 500, Message: err.Error()}\n\t\tbytes, _ := json.Marshal(resIn2)\n\t\treturn bytes\n\t}\n\treturn bytes\n}\n<|endoftext|>"} {"text":"package main\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/mux\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ ThemeVersion is the version of the current CSS-JS theme.\n\/\/ It is the name of the folder containing the theme files.\nconst ThemeVersion = \"default\"\n\n\/\/ ThemeDate is the prefix used for \"revving\" the static files and enable long-term HTTP cache.\n\/\/ It MUST end with underscore _ (see app.yaml)\nconst ThemeDate = \"20171211_\"\n\nvar r = mux.NewRouter()\n\nfunc init() {\n\tinitEnv()\n\tinitToggles()\n\tinitRoutes()\n\n\t\/\/ We want the random results to be different even if we reboot the server. Thus, we use\n\t\/\/ the clock to seed the default generator.\n\t\/\/ See https:\/\/www.programming-idioms.org\/idiom\/70\/use-clock-as-random-generator-seed\/346\/go\n\trand.Seed(time.Now().UnixNano())\n}\n\nfunc initRoutes() {\n\tif !toggles[\"online\"] {\n\t\thandle(\"\/\", makeWall(\" Under maintenance.<\/i>\"))\n\t\t\/\/r.HandleFunc(\"\/\", makeWall(\" Coming soon.<\/i>\"))\n\t} else {\n\t\t\/\/handle(\"\/\", makeWall(\" Coming soon.<\/i>\"))\n\t\thandle(\"\/\", home)\n\t\thandle(\"\/home\", home)\n\t\thandle(\"\/wall\", makeWall(\" Coming soon.<\/i>\"))\n\t\thandle(\"\/about\", about)\n\t\thandle(\"\/idiom\/{idiomId}\", idiomDetail)\n\t\thandle(\"\/idiom\/{idiomId}\/impl\/{implId}\", idiomDetail)\n\t\thandle(\"\/idiom\/{idiomId}\/{idiomTitle}\", idiomDetail)\n\t\thandle(\"\/idiom\/{idiomId}\/diff\/{v1}\/{v2}\", versionDiff)\n\t\thandle(\"\/idiom\/{idiomId}\/{idiomTitle}\/{implId}\/{implLang}\", idiomDetail)\n\t\thandle(\"\/history\/{idiomId}\", idiomHistory)\n\t\thandle(\"\/revert\", revertIdiomVersion)\n\t\thandle(\"\/history-restore\", restoreIdiomVersion)\n\t\thandle(\"\/all-idioms\", allIdioms)\n\t\thandle(\"\/random-idiom\/having\/{havingLang}\", randomIdiomHaving)\n\t\thandle(\"\/random-idiom\/not-having\/{notHavingLang}\", randomIdiomNotHaving)\n\t\thandle(\"\/random-idiom\", randomIdiom)\n\t\thandle(\"\/search\", searchRedirect)\n\t\thandle(\"\/search\/{q}\", search)\n\t\thandle(\"\/list-by-language\/{langs}\", listByLanguage)\n\t\thandle(\"\/missing-fields\/{lang}\", missingList)\n\t\thandle(\"\/idiom-picture\", idiomPicture)\n\t\thandle(\"\/rss-recently-created\", rssRecentlyCreated)\n\t\thandle(\"\/rss-recently-updated\", rssRecentlyUpdated)\n\t\thandle(\"\/my\/{nickname}\/{langs}\", bookmarkableUserURL)\n\t\thandle(\"\/my\/{langs}\", bookmarkableUserURL)\n\t\thandle(\"\/cheatsheet\/{lang}\", cheatsheet)\n\t\thandleAjax(\"\/typeahead-languages\", typeaheadLanguages)\n\t\thandleAjax(\"\/ajax-other-implementations\", ajaxOtherImplementations)\n\t\tif toggles[\"writable\"] {\n\t\t\t\/\/ When not in \"read-only\" mode\n\t\t\thandle(\"\/idiom-save\", idiomSave)\n\t\t\thandle(\"\/idiom-edit\/{idiomId}\", idiomEdit)\n\t\t\thandle(\"\/idiom-add-picture\/{idiomId}\", idiomAddPicture)\n\t\t\thandle(\"\/idiom-save-picture\", idiomSavePicture)\n\t\t\thandle(\"\/impl-edit\/{idiomId}\/{implId}\", implEdit)\n\t\t\t\/\/handle(\"\/fake-idiom-save\", fakeIdiomSave)\n\t\t\thandle(\"\/idiom-create\", idiomCreate)\n\t\t\thandle(\"\/impl-create\/{idiomId}\", implCreate)\n\t\t\thandle(\"\/impl-create\/{idiomId}\/{lang}\", implCreate)\n\t\t\thandle(\"\/impl-save\", implSave)\n\t\t\t\/\/ Ajax\n\t\t\thandleAjax(\"\/ajax-idiom-vote\", ajaxIdiomVote)\n\t\t\thandleAjax(\"\/ajax-impl-vote\", ajaxImplVote)\n\t\t\thandleAjax(\"\/ajax-demo-site-suggest\", ajaxDemoSiteSuggest)\n\t\t\thandleAjax(\"\/ajax-user-message-box\", userMessageBoxAjax)\n\t\t\thandleAjax(\"\/ajax-dismiss-user-message\", dismissUserMessage)\n\t\t\thandle(\"\/about-block-project\", ajaxAboutProject)\n\t\t\thandle(\"\/about-block-all-idioms\", ajaxAboutAllIdioms)\n\t\t\thandle(\"\/about-block-language-coverage\", ajaxAboutLanguageCoverage)\n\t\t\thandle(\"\/about-block-rss\", ajaxAboutRss)\n\t\t\thandle(\"\/about-block-cheatsheets\", ajaxAboutCheatsheets)\n\t\t\thandle(\"\/about-block-see-also\", ajaxAboutSeeAlso)\n\t\t\thandle(\"\/about-block-contact\", ajaxAboutContact)\n\t\t\t\/\/ Admin\n\t\t\thandle(\"\/admin\", admin)\n\t\t\thandle(\"\/admin-data-export\", adminExport)\n\t\t\thandle(\"\/admin-data-import\", adminImport)\n\t\t\thandle(\"\/admin-resave-entities\", adminResaveEntities)\n\t\t\thandleAjax(\"\/admin-repair-history-versions\", adminRepairHistoryVersions)\n\t\t\thandleAjax(\"\/admin-data-import-ajax\", adminImportAjax)\n\t\t\thandleAjax(\"\/admin-reindex-ajax\", adminReindexAjax)\n\t\t\thandleAjax(\"\/admin-refresh-toggles-ajax\", ajaxRefreshToggles)\n\t\t\thandleAjax(\"\/admin-set-toggle-ajax\", ajaxSetToggle)\n\t\t\thandleAjax(\"\/admin-create-relation-ajax\", ajaxCreateRelation)\n\t\t\thandleAjax(\"\/admin-idiom-delete\", idiomDelete)\n\t\t\thandleAjax(\"\/admin-impl-delete\", implDelete)\n\t\t\thandleAjax(\"\/admin-send-message-for-user\", sendMessageForUserAjax)\n\t\t}\n\n\t\thandle(\"\/auth\", handleAuth)\n\t\thandle(\"\/_ah\/login_required\", handleAuth)\n\t}\n\thttp.Handle(\"\/\", r)\n}\n\n\/\/ Request will fail if path parameters are missing\nvar neededPathVariables = map[string][]string{\n\t\"\/idiom\/{idiomId}\": {\"idiomId\"},\n\t\"\/idiom\/{idiomId}\/impl\/{implId}\": {\"idiomId\"},\n\t\"\/idiom\/{idiomId}\/{idiomTitle}\": {\"idiomId\"},\n\t\"\/idiom\/{idiomId}\/{idiomTitle}\/{implId}\/{implLang}\": {\"idiomId\"},\n\t\"\/search\/{q}\": {\"q\"},\n\t\"\/my\/{nickname}\/{langs}\": {\"nickname\", \"langs\"},\n\t\"\/idiom-edit\/{idiomId}\": {\"idiomId\"},\n\t\"\/idiom-add-picture\/{idiomId}\": {\"idiomId\"},\n\t\"\/impl-edit\/{idiomId}\/{implId}\": {\"idiomId\", \"implId\"},\n\t\"\/impl-create\/{idiomId}\": {\"idiomId\"},\n\t\"\/impl-create\/{idiomId}\/{lang}\": {\"idiomId\"},\n\t\"\/cheatsheet\/{lang}\": {\"lang\"},\n}\n\n\/\/ Request will fail if it doesn't provide the required GET or POST parameters\nvar neededParameters = map[string][]string{\n\t\"\/typeahead-languages\": { \/*todo*\/ },\n\t\"\/idiom-save\": {\"idiom_title\"},\n\t\"\/idiom-save-picture\": { \/*todo*\/ },\n\t\"\/impl-save\": {\"idiom_id\", \"impl_code\"},\n\t\"\/revert\": {\"idiomId\", \"version\"},\n\t\"\/ajax-idiom-vote\": {\"idiomId\", \"choice\"},\n\t\"\/ajax-impl-vote\": {\"implId\", \"choice\"},\n\t\"\/ajax-demo-site-suggest\": { \/*todo*\/ },\n\t\"\/ajax-dismiss-user-message\": {\"key\"},\n\t\"\/admin-data-export\": { \/*todo*\/ },\n\t\"\/admin-data-import\": { \/*todo*\/ },\n\t\"\/admin-data-import-ajax\": { \/*todo*\/ },\n\t\"\/admin-set-toggle-ajax\": {\"toggle\", \"value\"},\n\t\"\/admin-create-relation-ajax\": {\"idiomAId\", \"idiomBId\"},\n\t\"\/admin-idiom-delete\": {\"idiomId\"},\n\t\"\/admin-impl-delete\": {\"idiomId\", \"implId\"},\n\t\"\/admin-send-message-for-user\": {\"username\", \"message\"},\n}\n\n\/\/ Request will fail if corresponding toggle is off\nvar neededToggles = map[string][]string{\n\t\"\/home\": {\"online\"},\n\t\"\/search\": {\"searchable\"},\n\t\"\/search\/{q}\": {\"searchable\"},\n\t\"\/idiom-save\": {\"writable\"},\n\t\"\/idiom-edit\/{idiomId}\": {\"writable\", \"writable\", \"idiomEditing\"},\n\t\"\/idiom-add-picture\/{idiomId}\": {\"writable\", \"idiomEditing\"},\n\t\"\/idiom-save-picture\": {\"writable\", \"idiomEditing\"},\n\t\"\/impl-edit\/{idiomId}\/{implId}\": {\"writable\", \"implEditing\"},\n\t\"\/idiom-create\": {\"writable\"},\n\t\"\/impl-create\/{idiomId}\": {\"writable\", \"implAddition\"},\n\t\"\/impl-create\/{idiomId}\/{lang}\": {\"writable\", \"implAddition\"},\n\t\"\/impl-save\": {\"writable\"},\n\t\"\/ajax-idiom-vote\": {\"writable\"},\n\t\"\/ajax-impl-vote\": {\"writable\"},\n\t\"\/admin\": {\"administrable\"},\n\t\"\/admin-data-export\": {\"administrable\"},\n\t\"\/admin-data-import\": {\"administrable\"},\n\t\"\/admin-data-import-ajax\": {\"administrable\"},\n\t\"\/admin-set-toggle-ajax\": {\"administrable\"},\n\t\"\/admin-create-relation-ajax\": {\"administrable\"},\n\t\"\/admin-idiom-delete\": {\"administrable\"},\n\t\"\/admin-impl-delete\": {\"administrable\"},\n}\n\ntype standardHandler func(w http.ResponseWriter, r *http.Request)\ntype betterHandler func(w http.ResponseWriter, r *http.Request) error\n\n\/\/ Wrap HandleFunc with\n\/\/ - error handling\n\/\/ - mandatory path variables check\n\/\/ - mandatory parameters check\n\/\/ - toggles check\nfunc handle(path string, h betterHandler) {\n\tr.HandleFunc(path,\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tif isSpam(w, r) {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tif msg := recover(); msg != nil {\n\t\t\t\t\tmsgStr := fmt.Sprintf(\"%v\", msg)\n\t\t\t\t\terrorPage(w, r, PiError{msgStr, http.StatusInternalServerError})\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}()\n\t\t\tif configTime == \"0\" {\n\t\t\t\tc := r.Context()\n\t\t\t\t_ = refreshToggles(c)\n\t\t\t\t\/\/ If it fails... well, ignore for now and continue with non-fresh toggles.\n\t\t\t}\n\n\t\t\tif err := muxVarsMissing(w, r, neededPathVariables[path]...); err != nil {\n\t\t\t\terrorPage(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := togglesMissing(w, r, neededToggles[path]...); err != nil {\n\t\t\t\terrorPage(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := parametersMissing(w, r, neededParameters[path]...); err != nil {\n\t\t\t\terrorPage(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr := h(w, r)\n\t\t\tif err != nil {\n\t\t\t\terrorPage(w, r, err)\n\t\t\t}\n\t\t})\n}\n\nfunc handleAjax(path string, h betterHandler) {\n\tr.HandleFunc(path,\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tif isSpam(w, r) {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tif msg := recover(); msg != nil {\n\t\t\t\t\tmsgStr := fmt.Sprintf(\"%v\", msg)\n\t\t\t\t\terrorJSON(w, r, PiError{msgStr, http.StatusInternalServerError})\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}()\n\t\t\tif configTime == \"0\" {\n\t\t\t\tc := r.Context()\n\t\t\t\t_ = refreshToggles(c)\n\t\t\t\t\/\/ If it fails... well, ignore for now and continue with non-fresh toggles.\n\t\t\t}\n\n\t\t\tif err := muxVarsMissing(w, r, neededPathVariables[path]...); err != nil {\n\t\t\t\terrorJSON(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := togglesMissing(w, r, neededToggles[path]...); err != nil {\n\t\t\t\terrorJSON(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := parametersMissing(w, r, neededParameters[path]...); err != nil {\n\t\t\t\terrorJSON(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr := h(w, r)\n\t\t\tif err != nil {\n\t\t\t\terrorJSON(w, r, err)\n\t\t\t}\n\t\t})\n}\n\nvar datastoreDao = GaeDatastoreAccessor{}\nvar memcachedDao = MemcacheDatastoreAccessor{datastoreDao}\nvar dao = memcachedDao\n\nvar daoVotes = GaeVotesAccessor{}\n\nfunc parametersMissing(w http.ResponseWriter, r *http.Request, params ...string) error {\n\tmissing := []string{}\n\tfor _, param := range params {\n\t\tif r.FormValue(param) == \"\" {\n\t\t\tmissing = append(missing, param)\n\t\t}\n\t}\n\tif len(missing) > 0 {\n\t\treturn PiError{fmt.Sprintf(\"Missing parameters : %s\", missing), http.StatusBadRequest}\n\t}\n\treturn nil\n}\n\n\/\/ Looks in gorilla mux populated variables\nfunc muxVarsMissing(w http.ResponseWriter, r *http.Request, params ...string) error {\n\tmissing := []string{}\n\tmuxvars := mux.Vars(r)\n\tfor _, param := range params {\n\t\tif muxvars[param] == \"\" {\n\t\t\tmissing = append(missing, param)\n\t\t}\n\t}\n\tif len(missing) > 0 {\n\t\treturn PiError{fmt.Sprintf(\"Missing parameters : %s\", missing), http.StatusBadRequest}\n\t}\n\treturn nil\n}\n\nfunc validateURLFormat(urlStr string) error {\n\tu, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !u.IsAbs() {\n\t\treturn fmt.Errorf(\"Requires an absolute URL\")\n\t}\n\treturn nil\n}\n\nfunc validateURLFormatOrEmpty(urlStr string) error {\n\tif urlStr == \"\" {\n\t\treturn nil\n\t}\n\treturn validateURLFormat(urlStr)\n}\n\n\/*\nfunc logIf(err error, logfunc func(format string, args ...interface{}), when string) {\n\tif err != nil {\n\t\tlogfunc(\"Problem on %v: %v\", when, err.Error())\n\t}\n}\n*\/\n\nfunc logIf(err error, logfunc func(c context.Context, format string, args ...interface{}), c context.Context, when string) {\n\tif err != nil {\n\t\tlogfunc(c, \"Problem on %v: %v\", when, err.Error())\n\t}\n}\nUpdated static files.package main\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/mux\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ ThemeVersion is the version of the current CSS-JS theme.\n\/\/ It is the name of the folder containing the theme files.\nconst ThemeVersion = \"default\"\n\n\/\/ ThemeDate is the prefix used for \"revving\" the static files and enable long-term HTTP cache.\n\/\/ It MUST end with underscore _ (see app.yaml)\nconst ThemeDate = \"20190303_\"\n\nvar r = mux.NewRouter()\n\nfunc init() {\n\tinitEnv()\n\tinitToggles()\n\tinitRoutes()\n\n\t\/\/ We want the random results to be different even if we reboot the server. Thus, we use\n\t\/\/ the clock to seed the default generator.\n\t\/\/ See https:\/\/www.programming-idioms.org\/idiom\/70\/use-clock-as-random-generator-seed\/346\/go\n\trand.Seed(time.Now().UnixNano())\n}\n\nfunc initRoutes() {\n\tif !toggles[\"online\"] {\n\t\thandle(\"\/\", makeWall(\" Under maintenance.<\/i>\"))\n\t\t\/\/r.HandleFunc(\"\/\", makeWall(\" Coming soon.<\/i>\"))\n\t} else {\n\t\t\/\/handle(\"\/\", makeWall(\" Coming soon.<\/i>\"))\n\t\thandle(\"\/\", home)\n\t\thandle(\"\/home\", home)\n\t\thandle(\"\/wall\", makeWall(\" Coming soon.<\/i>\"))\n\t\thandle(\"\/about\", about)\n\t\thandle(\"\/idiom\/{idiomId}\", idiomDetail)\n\t\thandle(\"\/idiom\/{idiomId}\/impl\/{implId}\", idiomDetail)\n\t\thandle(\"\/idiom\/{idiomId}\/{idiomTitle}\", idiomDetail)\n\t\thandle(\"\/idiom\/{idiomId}\/diff\/{v1}\/{v2}\", versionDiff)\n\t\thandle(\"\/idiom\/{idiomId}\/{idiomTitle}\/{implId}\/{implLang}\", idiomDetail)\n\t\thandle(\"\/history\/{idiomId}\", idiomHistory)\n\t\thandle(\"\/revert\", revertIdiomVersion)\n\t\thandle(\"\/history-restore\", restoreIdiomVersion)\n\t\thandle(\"\/all-idioms\", allIdioms)\n\t\thandle(\"\/random-idiom\/having\/{havingLang}\", randomIdiomHaving)\n\t\thandle(\"\/random-idiom\/not-having\/{notHavingLang}\", randomIdiomNotHaving)\n\t\thandle(\"\/random-idiom\", randomIdiom)\n\t\thandle(\"\/search\", searchRedirect)\n\t\thandle(\"\/search\/{q}\", search)\n\t\thandle(\"\/list-by-language\/{langs}\", listByLanguage)\n\t\thandle(\"\/missing-fields\/{lang}\", missingList)\n\t\thandle(\"\/idiom-picture\", idiomPicture)\n\t\thandle(\"\/rss-recently-created\", rssRecentlyCreated)\n\t\thandle(\"\/rss-recently-updated\", rssRecentlyUpdated)\n\t\thandle(\"\/my\/{nickname}\/{langs}\", bookmarkableUserURL)\n\t\thandle(\"\/my\/{langs}\", bookmarkableUserURL)\n\t\thandle(\"\/cheatsheet\/{lang}\", cheatsheet)\n\t\thandleAjax(\"\/typeahead-languages\", typeaheadLanguages)\n\t\thandleAjax(\"\/ajax-other-implementations\", ajaxOtherImplementations)\n\t\tif toggles[\"writable\"] {\n\t\t\t\/\/ When not in \"read-only\" mode\n\t\t\thandle(\"\/idiom-save\", idiomSave)\n\t\t\thandle(\"\/idiom-edit\/{idiomId}\", idiomEdit)\n\t\t\thandle(\"\/idiom-add-picture\/{idiomId}\", idiomAddPicture)\n\t\t\thandle(\"\/idiom-save-picture\", idiomSavePicture)\n\t\t\thandle(\"\/impl-edit\/{idiomId}\/{implId}\", implEdit)\n\t\t\t\/\/handle(\"\/fake-idiom-save\", fakeIdiomSave)\n\t\t\thandle(\"\/idiom-create\", idiomCreate)\n\t\t\thandle(\"\/impl-create\/{idiomId}\", implCreate)\n\t\t\thandle(\"\/impl-create\/{idiomId}\/{lang}\", implCreate)\n\t\t\thandle(\"\/impl-save\", implSave)\n\t\t\t\/\/ Ajax\n\t\t\thandleAjax(\"\/ajax-idiom-vote\", ajaxIdiomVote)\n\t\t\thandleAjax(\"\/ajax-impl-vote\", ajaxImplVote)\n\t\t\thandleAjax(\"\/ajax-demo-site-suggest\", ajaxDemoSiteSuggest)\n\t\t\thandleAjax(\"\/ajax-user-message-box\", userMessageBoxAjax)\n\t\t\thandleAjax(\"\/ajax-dismiss-user-message\", dismissUserMessage)\n\t\t\thandle(\"\/about-block-project\", ajaxAboutProject)\n\t\t\thandle(\"\/about-block-all-idioms\", ajaxAboutAllIdioms)\n\t\t\thandle(\"\/about-block-language-coverage\", ajaxAboutLanguageCoverage)\n\t\t\thandle(\"\/about-block-rss\", ajaxAboutRss)\n\t\t\thandle(\"\/about-block-cheatsheets\", ajaxAboutCheatsheets)\n\t\t\thandle(\"\/about-block-see-also\", ajaxAboutSeeAlso)\n\t\t\thandle(\"\/about-block-contact\", ajaxAboutContact)\n\t\t\t\/\/ Admin\n\t\t\thandle(\"\/admin\", admin)\n\t\t\thandle(\"\/admin-data-export\", adminExport)\n\t\t\thandle(\"\/admin-data-import\", adminImport)\n\t\t\thandle(\"\/admin-resave-entities\", adminResaveEntities)\n\t\t\thandleAjax(\"\/admin-repair-history-versions\", adminRepairHistoryVersions)\n\t\t\thandleAjax(\"\/admin-data-import-ajax\", adminImportAjax)\n\t\t\thandleAjax(\"\/admin-reindex-ajax\", adminReindexAjax)\n\t\t\thandleAjax(\"\/admin-refresh-toggles-ajax\", ajaxRefreshToggles)\n\t\t\thandleAjax(\"\/admin-set-toggle-ajax\", ajaxSetToggle)\n\t\t\thandleAjax(\"\/admin-create-relation-ajax\", ajaxCreateRelation)\n\t\t\thandleAjax(\"\/admin-idiom-delete\", idiomDelete)\n\t\t\thandleAjax(\"\/admin-impl-delete\", implDelete)\n\t\t\thandleAjax(\"\/admin-send-message-for-user\", sendMessageForUserAjax)\n\t\t}\n\n\t\thandle(\"\/auth\", handleAuth)\n\t\thandle(\"\/_ah\/login_required\", handleAuth)\n\t}\n\thttp.Handle(\"\/\", r)\n}\n\n\/\/ Request will fail if path parameters are missing\nvar neededPathVariables = map[string][]string{\n\t\"\/idiom\/{idiomId}\": {\"idiomId\"},\n\t\"\/idiom\/{idiomId}\/impl\/{implId}\": {\"idiomId\"},\n\t\"\/idiom\/{idiomId}\/{idiomTitle}\": {\"idiomId\"},\n\t\"\/idiom\/{idiomId}\/{idiomTitle}\/{implId}\/{implLang}\": {\"idiomId\"},\n\t\"\/search\/{q}\": {\"q\"},\n\t\"\/my\/{nickname}\/{langs}\": {\"nickname\", \"langs\"},\n\t\"\/idiom-edit\/{idiomId}\": {\"idiomId\"},\n\t\"\/idiom-add-picture\/{idiomId}\": {\"idiomId\"},\n\t\"\/impl-edit\/{idiomId}\/{implId}\": {\"idiomId\", \"implId\"},\n\t\"\/impl-create\/{idiomId}\": {\"idiomId\"},\n\t\"\/impl-create\/{idiomId}\/{lang}\": {\"idiomId\"},\n\t\"\/cheatsheet\/{lang}\": {\"lang\"},\n}\n\n\/\/ Request will fail if it doesn't provide the required GET or POST parameters\nvar neededParameters = map[string][]string{\n\t\"\/typeahead-languages\": { \/*todo*\/ },\n\t\"\/idiom-save\": {\"idiom_title\"},\n\t\"\/idiom-save-picture\": { \/*todo*\/ },\n\t\"\/impl-save\": {\"idiom_id\", \"impl_code\"},\n\t\"\/revert\": {\"idiomId\", \"version\"},\n\t\"\/ajax-idiom-vote\": {\"idiomId\", \"choice\"},\n\t\"\/ajax-impl-vote\": {\"implId\", \"choice\"},\n\t\"\/ajax-demo-site-suggest\": { \/*todo*\/ },\n\t\"\/ajax-dismiss-user-message\": {\"key\"},\n\t\"\/admin-data-export\": { \/*todo*\/ },\n\t\"\/admin-data-import\": { \/*todo*\/ },\n\t\"\/admin-data-import-ajax\": { \/*todo*\/ },\n\t\"\/admin-set-toggle-ajax\": {\"toggle\", \"value\"},\n\t\"\/admin-create-relation-ajax\": {\"idiomAId\", \"idiomBId\"},\n\t\"\/admin-idiom-delete\": {\"idiomId\"},\n\t\"\/admin-impl-delete\": {\"idiomId\", \"implId\"},\n\t\"\/admin-send-message-for-user\": {\"username\", \"message\"},\n}\n\n\/\/ Request will fail if corresponding toggle is off\nvar neededToggles = map[string][]string{\n\t\"\/home\": {\"online\"},\n\t\"\/search\": {\"searchable\"},\n\t\"\/search\/{q}\": {\"searchable\"},\n\t\"\/idiom-save\": {\"writable\"},\n\t\"\/idiom-edit\/{idiomId}\": {\"writable\", \"writable\", \"idiomEditing\"},\n\t\"\/idiom-add-picture\/{idiomId}\": {\"writable\", \"idiomEditing\"},\n\t\"\/idiom-save-picture\": {\"writable\", \"idiomEditing\"},\n\t\"\/impl-edit\/{idiomId}\/{implId}\": {\"writable\", \"implEditing\"},\n\t\"\/idiom-create\": {\"writable\"},\n\t\"\/impl-create\/{idiomId}\": {\"writable\", \"implAddition\"},\n\t\"\/impl-create\/{idiomId}\/{lang}\": {\"writable\", \"implAddition\"},\n\t\"\/impl-save\": {\"writable\"},\n\t\"\/ajax-idiom-vote\": {\"writable\"},\n\t\"\/ajax-impl-vote\": {\"writable\"},\n\t\"\/admin\": {\"administrable\"},\n\t\"\/admin-data-export\": {\"administrable\"},\n\t\"\/admin-data-import\": {\"administrable\"},\n\t\"\/admin-data-import-ajax\": {\"administrable\"},\n\t\"\/admin-set-toggle-ajax\": {\"administrable\"},\n\t\"\/admin-create-relation-ajax\": {\"administrable\"},\n\t\"\/admin-idiom-delete\": {\"administrable\"},\n\t\"\/admin-impl-delete\": {\"administrable\"},\n}\n\ntype standardHandler func(w http.ResponseWriter, r *http.Request)\ntype betterHandler func(w http.ResponseWriter, r *http.Request) error\n\n\/\/ Wrap HandleFunc with\n\/\/ - error handling\n\/\/ - mandatory path variables check\n\/\/ - mandatory parameters check\n\/\/ - toggles check\nfunc handle(path string, h betterHandler) {\n\tr.HandleFunc(path,\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tif isSpam(w, r) {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tif msg := recover(); msg != nil {\n\t\t\t\t\tmsgStr := fmt.Sprintf(\"%v\", msg)\n\t\t\t\t\terrorPage(w, r, PiError{msgStr, http.StatusInternalServerError})\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}()\n\t\t\tif configTime == \"0\" {\n\t\t\t\tc := r.Context()\n\t\t\t\t_ = refreshToggles(c)\n\t\t\t\t\/\/ If it fails... well, ignore for now and continue with non-fresh toggles.\n\t\t\t}\n\n\t\t\tif err := muxVarsMissing(w, r, neededPathVariables[path]...); err != nil {\n\t\t\t\terrorPage(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := togglesMissing(w, r, neededToggles[path]...); err != nil {\n\t\t\t\terrorPage(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := parametersMissing(w, r, neededParameters[path]...); err != nil {\n\t\t\t\terrorPage(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr := h(w, r)\n\t\t\tif err != nil {\n\t\t\t\terrorPage(w, r, err)\n\t\t\t}\n\t\t})\n}\n\nfunc handleAjax(path string, h betterHandler) {\n\tr.HandleFunc(path,\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tif isSpam(w, r) {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tif msg := recover(); msg != nil {\n\t\t\t\t\tmsgStr := fmt.Sprintf(\"%v\", msg)\n\t\t\t\t\terrorJSON(w, r, PiError{msgStr, http.StatusInternalServerError})\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}()\n\t\t\tif configTime == \"0\" {\n\t\t\t\tc := r.Context()\n\t\t\t\t_ = refreshToggles(c)\n\t\t\t\t\/\/ If it fails... well, ignore for now and continue with non-fresh toggles.\n\t\t\t}\n\n\t\t\tif err := muxVarsMissing(w, r, neededPathVariables[path]...); err != nil {\n\t\t\t\terrorJSON(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := togglesMissing(w, r, neededToggles[path]...); err != nil {\n\t\t\t\terrorJSON(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := parametersMissing(w, r, neededParameters[path]...); err != nil {\n\t\t\t\terrorJSON(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr := h(w, r)\n\t\t\tif err != nil {\n\t\t\t\terrorJSON(w, r, err)\n\t\t\t}\n\t\t})\n}\n\nvar datastoreDao = GaeDatastoreAccessor{}\nvar memcachedDao = MemcacheDatastoreAccessor{datastoreDao}\nvar dao = memcachedDao\n\nvar daoVotes = GaeVotesAccessor{}\n\nfunc parametersMissing(w http.ResponseWriter, r *http.Request, params ...string) error {\n\tmissing := []string{}\n\tfor _, param := range params {\n\t\tif r.FormValue(param) == \"\" {\n\t\t\tmissing = append(missing, param)\n\t\t}\n\t}\n\tif len(missing) > 0 {\n\t\treturn PiError{fmt.Sprintf(\"Missing parameters : %s\", missing), http.StatusBadRequest}\n\t}\n\treturn nil\n}\n\n\/\/ Looks in gorilla mux populated variables\nfunc muxVarsMissing(w http.ResponseWriter, r *http.Request, params ...string) error {\n\tmissing := []string{}\n\tmuxvars := mux.Vars(r)\n\tfor _, param := range params {\n\t\tif muxvars[param] == \"\" {\n\t\t\tmissing = append(missing, param)\n\t\t}\n\t}\n\tif len(missing) > 0 {\n\t\treturn PiError{fmt.Sprintf(\"Missing parameters : %s\", missing), http.StatusBadRequest}\n\t}\n\treturn nil\n}\n\nfunc validateURLFormat(urlStr string) error {\n\tu, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !u.IsAbs() {\n\t\treturn fmt.Errorf(\"Requires an absolute URL\")\n\t}\n\treturn nil\n}\n\nfunc validateURLFormatOrEmpty(urlStr string) error {\n\tif urlStr == \"\" {\n\t\treturn nil\n\t}\n\treturn validateURLFormat(urlStr)\n}\n\n\/*\nfunc logIf(err error, logfunc func(format string, args ...interface{}), when string) {\n\tif err != nil {\n\t\tlogfunc(\"Problem on %v: %v\", when, err.Error())\n\t}\n}\n*\/\n\nfunc logIf(err error, logfunc func(c context.Context, format string, args ...interface{}), c context.Context, when string) {\n\tif err != nil {\n\t\tlogfunc(c, \"Problem on %v: %v\", when, err.Error())\n\t}\n}\n<|endoftext|>"} {"text":"package controllers\n\nimport (\n\t\"github.com\/mrjones\/oauth\"\n\t\"github.com\/revel\/revel\"\n\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"chant\/conf\/my\"\n)\n\n\/\/ コンシューマの定義とプロバイダの定義を含んだ\n\/\/ *oauth.Consumerをつくる\nvar twitter = oauth.NewConsumer(\n\t\/\/ コンシューマの定義\n\tmy.AppTwitterConsumerKey,\n\tmy.AppTwitterConsumerSecret,\n\t\/\/ プロバイダの定義\n\toauth.ServiceProvider{\n\t\tAuthorizeTokenUrl: \"https:\/\/api.twitter.com\/oauth\/authorize\",\n\t\tRequestTokenUrl: \"https:\/\/api.twitter.com\/oauth\/request_token\",\n\t\tAccessTokenUrl: \"https:\/\/api.twitter.com\/oauth\/access_token\",\n\t},\n)\n\ntype Auth struct {\n\t\/\/ embed\n\t*revel.Controller\n}\n\nfunc (c Auth) Index(oauth_verifier string) revel.Result {\n\n\tif _, nameExists := c.Session[\"screenName\"]; nameExists {\n\t\t\/\/ 既にセッションを持っているのでルームにリダイレクトする\n\t\treturn c.Redirect(Room.Index)\n\t}\n\n\t\/\/ oauth_verifierが無い状態でこのURLを叩いたとき\n\t\/\/ つまり、ユーザの最初のAuthenticateへのアクセスである\n\n\t\/\/ まずはverifier獲得した状態でリダイレクトするように促す\n\t\/\/ このアプリケーションのコンシューマキーとコンシューマシークレットを用いて\n\t\/\/ 一時的に使えるrequestTokenの取得を試みる\n\thost, _ := revel.Config.String(\"http.host\")\n\tport, _ := revel.Config.String(\"http.port\")\n\tif port != \"\" {\n\t\tport = \":\" + port\n\t}\n\trequestToken, url, err := twitter.GetRequestTokenAndUrl(fmt.Sprintf(\"http:\/\/%s%s\/auth\/callback\", host, port))\n\tif err == nil {\n\t\t\/\/ 一時的に使えるrequestTokenが取得できたので、サーバ側で一次保存しておく\n\t\tc.Session[\"requestToken\"] = requestToken.Token\n\t\tc.Session[\"requestSecret\"] = requestToken.Secret\n\t\t\/\/ あとは、ユーザの問題\n\t\t\/\/ oauth_verifierを取ってきてもらう\n\t\treturn c.Redirect(url)\n\t} else {\n\t\trevel.ERROR.Println(\n\t\t\t\"そもそもコンシューマキーを用いてリクエストトークン取得できなかったで御座る\",\n\t\t\terr,\n\t\t)\n\t}\n\n\t\/\/ 何が起きてもとりあえずトップへ飛ばす\n\treturn c.Redirect(Application.Index)\n}\n\nfunc (c *Auth) Callback(oauth_verifier string) revel.Result {\n\n\t\/\/ TODO: oauth_verifierあるとか無いとか。\n\t\/\/   : URL直打ちだったらあり得る\n\n\t\/\/ RequestTokenの復元\n\trequestToken := &oauth.RequestToken{\n\t\tc.Session[\"requestToken\"],\n\t\tc.Session[\"requestSecret\"],\n\t}\n\t\/\/ 不要になったので捨てる\n\tdelete(c.Session, \"requestToken\")\n\tdelete(c.Session, \"requestSecret\")\n\t\/\/ これと、oauth_verifierを用いてaccess_tokenを獲得する\n\taccessToken, err := twitter.AuthorizeToken(requestToken, oauth_verifier)\n\tif err == nil {\n\t\t\/\/ 成功したので、これを用いてユーザ情報を取得する\n\t\tresp, _ := twitter.Get(\n\t\t\t\/\/\"https:\/\/api.twitter.com\/1.1\/statuses\/mentions_timeline.json\",\n\t\t\t\"https:\/\/api.twitter.com\/1.1\/account\/verify_credentials.json\",\n\t\t\tmap[string]string{},\n\t\t\taccessToken,\n\t\t)\n\t\tdefer resp.Body.Close()\n\t\taccount := struct {\n\t\t\tName string `json:\"name\"`\n\t\t\tProfileImageUrl string `json:\"profile_image_url\"`\n\t\t\tScreenName string `json:\"screen_name\"`\n\t\t}{}\n\t\t_ = json.NewDecoder(resp.Body).Decode(&account)\n\t\t\/\/ }}}\n\t\t\/\/ セッションに格納する\n\t\tc.Session[\"name\"] = account.Name\n\t\tc.Session[\"screenName\"] = account.ScreenName\n\t\tc.Session[\"profileImageUrl\"] = account.ProfileImageUrl\n\t} else {\n\t\t\/\/ 失敗したので、エラーを吐く\n\t\trevel.ERROR.Println(\"requestTokenとoauth_verifierを用いてaccessTokenを得たかったけど失敗したの図:\\t\", err)\n\t}\n\n\treturn c.Redirect(Application.Index)\n}\n\nfunc init() {\n\t\/\/ revel.Controller.*が実行されるときに必ず呼べる?\n\t\/\/ twitter.Debug(true)\n}\nRefactor a bitpackage controllers\n\nimport (\n\t\"github.com\/mrjones\/oauth\"\n\t\"github.com\/revel\/revel\"\n\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"chant\/conf\/my\"\n)\n\n\/\/ コンシューマの定義とプロバイダの定義を含んだ\n\/\/ *oauth.Consumerをつくる\nvar twitter = oauth.NewConsumer(\n\t\/\/ コンシューマの定義\n\tmy.AppTwitterConsumerKey,\n\tmy.AppTwitterConsumerSecret,\n\t\/\/ プロバイダの定義\n\toauth.ServiceProvider{\n\t\tAuthorizeTokenUrl: \"https:\/\/api.twitter.com\/oauth\/authorize\",\n\t\tRequestTokenUrl: \"https:\/\/api.twitter.com\/oauth\/request_token\",\n\t\tAccessTokenUrl: \"https:\/\/api.twitter.com\/oauth\/access_token\",\n\t},\n)\n\ntype Auth struct {\n\t\/\/ embed\n\t*revel.Controller\n}\n\nfunc getCallbackURL() string {\n\thost, _ := revel.Config.String(\"http.host\")\n\tport, _ := revel.Config.String(\"http.port\")\n\tif port != \"\" {\n\t\tport = \":\" + port\n\t}\n\treturn fmt.Sprintf(\"http:\/\/%s%s\/auth\/callback\", host, port)\n}\n\nfunc (c Auth) Index(oauth_verifier string) revel.Result {\n\n\tif _, nameExists := c.Session[\"screenName\"]; nameExists {\n\t\t\/\/ 既にセッションを持っているのでルームにリダイレクトする\n\t\treturn c.Redirect(Room.Index)\n\t}\n\n\t\/\/ oauth_verifierが無い状態でこのURLを叩いたとき\n\t\/\/ つまり、ユーザの最初のAuthenticateへのアクセスである\n\n\t\/\/ まずはverifier獲得した状態でリダイレクトするように促す\n\t\/\/ このアプリケーションのコンシューマキーとコンシューマシークレットを用いて\n\t\/\/ 一時的に使えるrequestTokenの取得を試みる\n\trequestToken, url, err := twitter.GetRequestTokenAndUrl(getCallbackURL())\n\tif err == nil {\n\t\t\/\/ 一時的に使えるrequestTokenが取得できたので、サーバ側で一次保存しておく\n\t\tc.Session[\"requestToken\"] = requestToken.Token\n\t\tc.Session[\"requestSecret\"] = requestToken.Secret\n\t\t\/\/ あとは、ユーザの問題\n\t\t\/\/ oauth_verifierを取ってきてもらう\n\t\treturn c.Redirect(url)\n\t} else {\n\t\trevel.ERROR.Println(\n\t\t\t\"そもそもコンシューマキーを用いてリクエストトークン取得できなかったで御座る\",\n\t\t\terr,\n\t\t)\n\t}\n\n\t\/\/ 何が起きてもとりあえずトップへ飛ばす\n\treturn c.Redirect(Application.Index)\n}\n\nfunc (c *Auth) Callback(oauth_verifier string) revel.Result {\n\n\t\/\/ TODO: oauth_verifierあるとか無いとか。\n\t\/\/   : URL直打ちだったらあり得る\n\n\t\/\/ RequestTokenの復元\n\trequestToken := &oauth.RequestToken{\n\t\tc.Session[\"requestToken\"],\n\t\tc.Session[\"requestSecret\"],\n\t}\n\t\/\/ 不要になったので捨てる\n\tdelete(c.Session, \"requestToken\")\n\tdelete(c.Session, \"requestSecret\")\n\t\/\/ これと、oauth_verifierを用いてaccess_tokenを獲得する\n\taccessToken, err := twitter.AuthorizeToken(requestToken, oauth_verifier)\n\tif err == nil {\n\t\t\/\/ 成功したので、これを用いてユーザ情報を取得する\n\t\tresp, _ := twitter.Get(\n\t\t\t\/\/\"https:\/\/api.twitter.com\/1.1\/statuses\/mentions_timeline.json\",\n\t\t\t\"https:\/\/api.twitter.com\/1.1\/account\/verify_credentials.json\",\n\t\t\tmap[string]string{},\n\t\t\taccessToken,\n\t\t)\n\t\tdefer resp.Body.Close()\n\t\taccount := struct {\n\t\t\tName string `json:\"name\"`\n\t\t\tProfileImageUrl string `json:\"profile_image_url\"`\n\t\t\tScreenName string `json:\"screen_name\"`\n\t\t}{}\n\t\t_ = json.NewDecoder(resp.Body).Decode(&account)\n\t\t\/\/ }}}\n\t\t\/\/ セッションに格納する\n\t\tc.Session[\"name\"] = account.Name\n\t\tc.Session[\"screenName\"] = account.ScreenName\n\t\tc.Session[\"profileImageUrl\"] = account.ProfileImageUrl\n\t} else {\n\t\t\/\/ 失敗したので、エラーを吐く\n\t\trevel.ERROR.Println(\"requestTokenとoauth_verifierを用いてaccessTokenを得たかったけど失敗したの図:\\t\", err)\n\t}\n\n\treturn c.Redirect(Application.Index)\n}\n\nfunc init() {\n\t\/\/ revel.Controller.*が実行されるときに必ず呼べる?\n\t\/\/ twitter.Debug(true)\n}\n<|endoftext|>"} {"text":"package cmd\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tcli \"github.com\/spf13\/cobra\"\n\n\t\"github.com\/tmrts\/boilr\/pkg\/boilr\"\n\t\"github.com\/tmrts\/boilr\/pkg\/util\/exec\"\n\t\"github.com\/tmrts\/boilr\/pkg\/util\/exit\"\n\t\"github.com\/tmrts\/boilr\/pkg\/util\/osutil\"\n\t\"github.com\/tmrts\/boilr\/pkg\/util\/validate\"\n)\n\n\/\/ Save contains the cli-command for saving templates to template registry.\nvar Save = &cli.Command{\n\tUse: \"save \",\n\tShort: \"Save a project template to local template registry\",\n\tRun: func(c *cli.Command, args []string) {\n\t\tMustValidateArgs(args, []validate.Argument{\n\t\t\t{\"template-path\", validate.UnixPath},\n\t\t\t{\"template-tag\", validate.AlphanumericExt},\n\t\t})\n\n\t\tMustValidateTemplateDir()\n\n\t\ttmplDir, templateName := args[0], args[1]\n\n\t\tMustValidateTemplate(tmplDir)\n\n\t\ttargetDir := filepath.Join(boilr.Configuration.TemplateDirPath, templateName)\n\n\t\tswitch exists, err := osutil.DirExists(targetDir); {\n\t\tcase err != nil:\n\t\t\texit.Error(fmt.Errorf(\"save: %s\", err))\n\t\tcase exists:\n\t\t\tshouldOverwrite := GetBoolFlag(c, \"force\")\n\n\t\t\tif err != nil {\n\t\t\t\texit.Error(fmt.Errorf(\"save: %v\", err))\n\t\t\t}\n\n\t\t\tif !shouldOverwrite {\n\t\t\t\texit.OK(\"Template %v already exists use -f to overwrite\", templateName)\n\t\t\t}\n\n\t\t\tif err := os.RemoveAll(targetDir); err != nil {\n\t\t\t\texit.Error(fmt.Errorf(\"save: %v\", err))\n\t\t\t}\n\t\t}\n\n\t\tif _, err := exec.Cmd(\"cp\", \"-r\", tmplDir, targetDir); err != nil {\n\t\t\texit.Error(err)\n\t\t}\n\n\t\tabsTemplateDir, err := filepath.Abs(tmplDir)\n\t\tif err != nil {\n\t\t\texit.Error(err)\n\t\t}\n\n\t\tif err := serializeMetadata(templateName, \"local:\"+absTemplateDir, targetDir); err != nil {\n\t\t\texit.Error(fmt.Errorf(\"save: %s\", err))\n\t\t}\n\n\t\texit.OK(\"Successfully saved the template %v\", templateName)\n\t},\n}\npkg\/cmd\/save: clarify the usage of the commandpackage cmd\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tcli \"github.com\/spf13\/cobra\"\n\n\t\"github.com\/tmrts\/boilr\/pkg\/boilr\"\n\t\"github.com\/tmrts\/boilr\/pkg\/util\/exec\"\n\t\"github.com\/tmrts\/boilr\/pkg\/util\/exit\"\n\t\"github.com\/tmrts\/boilr\/pkg\/util\/osutil\"\n\t\"github.com\/tmrts\/boilr\/pkg\/util\/validate\"\n)\n\n\/\/ Save contains the cli-command for saving templates to template registry.\nvar Save = &cli.Command{\n\tUse: \"save \",\n\tShort: \"Save a local project template to template registry\",\n\tRun: func(c *cli.Command, args []string) {\n\t\tMustValidateArgs(args, []validate.Argument{\n\t\t\t{\"template-path\", validate.UnixPath},\n\t\t\t{\"template-tag\", validate.AlphanumericExt},\n\t\t})\n\n\t\tMustValidateTemplateDir()\n\n\t\ttmplDir, templateName := args[0], args[1]\n\n\t\tMustValidateTemplate(tmplDir)\n\n\t\ttargetDir := filepath.Join(boilr.Configuration.TemplateDirPath, templateName)\n\n\t\tswitch exists, err := osutil.DirExists(targetDir); {\n\t\tcase err != nil:\n\t\t\texit.Error(fmt.Errorf(\"save: %s\", err))\n\t\tcase exists:\n\t\t\tshouldOverwrite := GetBoolFlag(c, \"force\")\n\n\t\t\tif err != nil {\n\t\t\t\texit.Error(fmt.Errorf(\"save: %v\", err))\n\t\t\t}\n\n\t\t\tif !shouldOverwrite {\n\t\t\t\texit.OK(\"Template %v already exists use -f to overwrite\", templateName)\n\t\t\t}\n\n\t\t\tif err := os.RemoveAll(targetDir); err != nil {\n\t\t\t\texit.Error(fmt.Errorf(\"save: %v\", err))\n\t\t\t}\n\t\t}\n\n\t\tif _, err := exec.Cmd(\"cp\", \"-r\", tmplDir, targetDir); err != nil {\n\t\t\texit.Error(err)\n\t\t}\n\n\t\tabsTemplateDir, err := filepath.Abs(tmplDir)\n\t\tif err != nil {\n\t\t\texit.Error(err)\n\t\t}\n\n\t\tif err := serializeMetadata(templateName, \"local:\"+absTemplateDir, targetDir); err != nil {\n\t\t\texit.Error(fmt.Errorf(\"save: %s\", err))\n\t\t}\n\n\t\texit.OK(\"Successfully saved the template %v\", templateName)\n\t},\n}\n<|endoftext|>"} {"text":"\/\/ Package fun contains Fun language AST and means to prettyprint its source code.\npackage fun\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strings\"\n)\n\nconst (\n\tlf = \"\\n\"\n\tundefined = \"undefined\"\n\tarrow = \" -> \"\n\ttypeSeparator = \" :: \"\n\tunit = \"()\"\n\tspace = \" \"\n\tcomma = \", \"\n\tbinding = \" = \"\n\tdot = \".\"\n\tintendation = \" \"\n\tdoDecl = \"do\" + lf\n\topenBracket = \"(\"\n\tcloseBracket = \")\"\n)\n\n\/\/ Module represents single source file.\ntype Module struct {\n\tName string\n\tImports []Import\n\tDecls []Decl\n}\n\n\/\/ Import represents import.\ntype Import struct {\n\tPath string\n\tAlias string\n}\n\n\/\/ Decl represents declaration.\ntype Decl interface {\n\tString() string\n\tdeclMarker()\n}\n\nfunc (fd FuncDecl) declMarker() {}\n\n\/\/ FuncBody represents function body.\ntype FuncBody interface {\n\tString() string\n\tfuncBodyMarker()\n}\n\nfunc (fa FuncApplication) funcBodyMarker() {}\nfunc (do DoBlock) funcBodyMarker() {}\nfunc (u Undef) funcBodyMarker() {}\n\n\/\/ Undef represents function body placeholder.\ntype Undef bool\n\n\/\/ Undefined is an Undef singleton.\nconst Undefined Undef = true\n\nfunc (u Undef) String() string {\n\treturn undefined\n}\n\n\/\/ FuncDecl represents function declaration.\ntype FuncDecl struct {\n\tName string\n\tParams Parameters\n\tResults Results\n\tBody FuncBody\n}\n\n\/\/ Parameter represents function parameter.\ntype Parameter struct {\n\tName string\n\tType Type\n}\n\n\/\/ Type represents type.\ntype Type string\n\nfunc (imp Import) String() string {\n\tif imp.Alias == \"\" {\n\t\treturn fmt.Sprintf(\"import \\\"%s\\\"\", imp.Path)\n\t}\n\treturn fmt.Sprintf(\"import \\\"%s\\\" as \\\"%s\\\"\", imp.Path, imp.Alias)\n}\n\nfunc (mod Module) String() string {\n\ttopLevels := make([]string, 1+1+len(mod.Decls))\n\n\t\/\/ Module name\n\ttopLevels[0] = fmt.Sprintf(\"module %s where\\n\", mod.Name)\n\n\t\/\/ Imports\n\timports := make([]string, len(mod.Imports))\n\tfor i, imp := range mod.Imports {\n\t\timports[i] = imp.String()\n\t}\n\ttopLevels[1] = strings.Join(imports, lf) + lf\n\n\t\/\/ Top-level declarations\n\tfor i, decl := range mod.Decls {\n\t\ttopLevels[2+i] = decl.String()\n\t}\n\n\treturn strings.Join(topLevels, lf) + lf\n}\n\nfunc (fd FuncDecl) String() string {\n\tvar out bytes.Buffer\n\t\/\/ Type signature\n\tfmt.Fprint(&out, fd.Name)\n\tfmt.Fprint(&out, typeSeparator)\n\tfmt.Fprint(&out, fd.Params)\n\tif len(fd.Params) > 0 {\n\t\tfmt.Fprint(&out, arrow)\n\t}\n\tfmt.Fprint(&out, fd.Results, lf)\n\n\t\/\/ Name and parameters\n\tfmt.Fprint(&out, fd.Name)\n\tif len(fd.Params) > 0 {\n\t\tfmt.Fprint(&out, space, fd.Params.Names())\n\t}\n\tfmt.Fprint(&out, binding)\n\n\t\/\/ TODO implement body\n\tif fd.Body == nil {\n\t\tfmt.Fprint(&out, undefined)\n\t} else {\n\t\tfmt.Fprint(&out, fd.Body)\n\t}\n\n\treturn out.String() + lf\n}\n\n\/\/ Parameters represents function parameters.\ntype Parameters []Parameter \/\/ TODO not only types\n\nfunc (ps Parameters) String() string {\n\tss := make([]string, len(ps))\n\tfor i := 0; i < len(ps); i++ {\n\t\tss[i] = string(ps[i].Type)\n\t}\n\treturn strings.Join(ss, arrow)\n}\n\n\/\/ Names build parameter list for binding.\nfunc (ps Parameters) Names() string {\n\tss := make([]string, len(ps))\n\tfor i := 0; i < len(ps); i++ {\n\t\tss[i] = string(ps[i].Name)\n\t}\n\treturn strings.Join(ss, space)\n}\n\n\/\/ Results represents function result list.\ntype Results []Type\n\nfunc (ts Results) String() string {\n\tswitch len(ts) {\n\tcase 0:\n\t\treturn unit\n\tcase 1:\n\t\treturn string(ts[0])\n\tdefault:\n\t\tss := make([]string, len(ts))\n\t\tfor i := 0; i < len(ts); i++ {\n\t\t\tss[i] = string(ts[i])\n\t\t}\n\t\treturn openBracket + strings.Join(ss, comma) + closeBracket\n\t}\n}\n\n\/\/ Expression is a pure function.\ntype Expression interface {\n\tfuncBodyMarker()\n\t\/\/ expressionMarker()\n}\n\n\/\/ Statement just performs side effects.\ntype Statement interface {\n\tfuncBodyMarker()\n\t\/\/ statementMarker()\n}\n\n\/\/ DoBlock represents raw Go code as a function body.\ntype DoBlock struct {\n\tText []string\n}\n\n\/\/ FuncApplication represents function application.\n\/\/ At least for now it may be both Statement and Expression.\ntype FuncApplication struct {\n\tName string\n\tModule string\n\tArguments []Argument\n\tKind funcApplicationKind\n}\n\ntype funcApplicationKind uint8\n\n\/\/ Kinds of a FuncApplication\nconst (\n\tEXPRESSION funcApplicationKind = iota\n\tSTATEMENT funcApplicationKind = iota\n)\n\nfunc (fa FuncApplication) String() string {\n\tvar buf bytes.Buffer\n\tif fa.Module != \"\" {\n\t\tfmt.Fprint(&buf, fa.Module, dot)\n\t}\n\tfmt.Fprint(&buf, fa.Name)\n\tif len(fa.Arguments) > 0 {\n\t\targs := make([]string, len(fa.Arguments))\n\t\tfor i := 0; i < len(args); i++ {\n\t\t\tswitch arg := fa.Arguments[i].(type) {\n\t\t\tcase FuncApplication:\n\t\t\t\targs[i] = fmt.Sprintf(\"(%s)\", arg)\n\t\t\tdefault:\n\t\t\t\targs[i] = fmt.Sprint(arg)\n\t\t\t}\n\t\t}\n\t\tfmt.Fprint(&buf, space, strings.Join(args, space))\n\t}\n\treturn buf.String()\n}\n\n\/\/ Assuming non-empty body, empty do block does not really makes sense.\nfunc (do DoBlock) String() string {\n\tbuf := bytes.NewBufferString(doDecl)\n\tfor _, line := range do.Text {\n\t\tfmt.Fprint(buf, intendation, line, lf)\n\t}\n\treturn strings.TrimSuffix(buf.String(), lf)\n}\n\n\/\/ Argument represents argument to which a function is applied.\ntype Argument interface {\n\targumentMarker()\n}\n\n\/\/ Literal represents language literals.\ntype Literal interface {\n\targumentMarker()\n\tliteralMarker()\n}\n\nfunc (fa FuncApplication) argumentMarker() {}\n\n\/\/ Int maps to Go int.\ntype Int string\n\nfunc (t Int) literalMarker() {}\nfunc (t Int) argumentMarker() {}\n\n\/\/ Float maps to Go float32.\ntype Float string\n\nfunc (t Float) literalMarker() {}\nfunc (t Float) argumentMarker() {}\n\n\/\/ Double maps to Go float64.\ntype Double string\n\nfunc (t Double) literalMarker() {}\nfunc (t Double) argumentMarker() {}\n\n\/\/ String wraps Go string.\ntype String string\n\nfunc (t String) literalMarker() {}\nfunc (t String) argumentMarker() {}\n\nfunc (t String) String() string {\n\treturn fmt.Sprintf(\"%#v\", t)\n}\n\n\/\/ Bool maps to Go bool.\ntype Bool string\n\nfunc (t Bool) literalMarker() {}\nfunc (t Bool) argumentMarker() {}\n\n\/\/ Char maps to Go char.\ntype Char string\n\nfunc (t Char) literalMarker() {}\nfunc (t Char) argumentMarker() {}\n\n\/\/ Imaginary maps to Go imaginary double.\ntype Imaginary string\n\nfunc (t Imaginary) literalMarker() {}\nfunc (t Imaginary) argumentMarker() {}\n\n\/\/ Var represents something passed by name.\ntype Var string\n\nfunc (v Var) argumentMarker() {}\nInterface cleanup\/\/ Package fun contains Fun language AST and means to prettyprint its source code.\npackage fun\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strings\"\n)\n\nconst (\n\tlf = \"\\n\"\n\tundefined = \"undefined\"\n\tarrow = \" -> \"\n\ttypeSeparator = \" :: \"\n\tunit = \"()\"\n\tspace = \" \"\n\tcomma = \", \"\n\tbinding = \" = \"\n\tdot = \".\"\n\tintendation = \" \"\n\tdoDecl = \"do\" + lf\n\topenBracket = \"(\"\n\tcloseBracket = \")\"\n)\n\n\/\/ Module represents single source file.\ntype Module struct {\n\tName string\n\tImports []Import\n\tDecls []Decl\n}\n\n\/\/ Import represents import.\ntype Import struct {\n\tPath string\n\tAlias string\n}\n\n\/\/ Decl represents declaration.\ntype Decl interface {\n\tdeclMarker()\n}\n\nfunc (fd FuncDecl) declMarker() {}\n\n\/\/ FuncBody represents function body.\ntype FuncBody interface {\n\tfuncBodyMarker()\n}\n\nfunc (fa FuncApplication) funcBodyMarker() {}\nfunc (do DoBlock) funcBodyMarker() {}\nfunc (u Undef) funcBodyMarker() {}\n\n\/\/ Undef represents function body placeholder.\ntype Undef bool\n\n\/\/ Undefined is an Undef singleton.\nconst Undefined Undef = true\n\nfunc (u Undef) String() string {\n\treturn undefined\n}\n\n\/\/ FuncDecl represents function declaration.\ntype FuncDecl struct {\n\tName string\n\tParams Parameters\n\tResults Results\n\tBody FuncBody\n}\n\n\/\/ Parameter represents function parameter.\ntype Parameter struct {\n\tName string\n\tType Type\n}\n\n\/\/ Type represents type.\ntype Type string\n\nfunc (imp Import) String() string {\n\tif imp.Alias == \"\" {\n\t\treturn fmt.Sprintf(\"import \\\"%s\\\"\", imp.Path)\n\t}\n\treturn fmt.Sprintf(\"import \\\"%s\\\" as \\\"%s\\\"\", imp.Path, imp.Alias)\n}\n\nfunc (mod Module) String() string {\n\ttopLevels := make([]string, 1+1+len(mod.Decls))\n\n\t\/\/ Module name\n\ttopLevels[0] = fmt.Sprintf(\"module %s where\\n\", mod.Name)\n\n\t\/\/ Imports\n\timports := make([]string, len(mod.Imports))\n\tfor i, imp := range mod.Imports {\n\t\timports[i] = imp.String()\n\t}\n\ttopLevels[1] = strings.Join(imports, lf) + lf\n\n\t\/\/ Top-level declarations\n\tfor i, decl := range mod.Decls {\n\t\ttopLevels[2+i] = fmt.Sprint(decl)\n\t}\n\n\treturn strings.Join(topLevels, lf) + lf\n}\n\nfunc (fd FuncDecl) String() string {\n\tvar out bytes.Buffer\n\t\/\/ Type signature\n\tfmt.Fprint(&out, fd.Name)\n\tfmt.Fprint(&out, typeSeparator)\n\tfmt.Fprint(&out, fd.Params)\n\tif len(fd.Params) > 0 {\n\t\tfmt.Fprint(&out, arrow)\n\t}\n\tfmt.Fprint(&out, fd.Results, lf)\n\n\t\/\/ Name and parameters\n\tfmt.Fprint(&out, fd.Name)\n\tif len(fd.Params) > 0 {\n\t\tfmt.Fprint(&out, space, fd.Params.Names())\n\t}\n\tfmt.Fprint(&out, binding)\n\n\t\/\/ TODO implement body\n\tif fd.Body == nil {\n\t\tfmt.Fprint(&out, undefined)\n\t} else {\n\t\tfmt.Fprint(&out, fd.Body)\n\t}\n\n\treturn out.String() + lf\n}\n\n\/\/ Parameters represents function parameters.\ntype Parameters []Parameter \/\/ TODO not only types\n\nfunc (ps Parameters) String() string {\n\tss := make([]string, len(ps))\n\tfor i := 0; i < len(ps); i++ {\n\t\tss[i] = string(ps[i].Type)\n\t}\n\treturn strings.Join(ss, arrow)\n}\n\n\/\/ Names build parameter list for binding.\nfunc (ps Parameters) Names() string {\n\tss := make([]string, len(ps))\n\tfor i := 0; i < len(ps); i++ {\n\t\tss[i] = string(ps[i].Name)\n\t}\n\treturn strings.Join(ss, space)\n}\n\n\/\/ Results represents function result list.\ntype Results []Type\n\nfunc (ts Results) String() string {\n\tswitch len(ts) {\n\tcase 0:\n\t\treturn unit\n\tcase 1:\n\t\treturn string(ts[0])\n\tdefault:\n\t\tss := make([]string, len(ts))\n\t\tfor i := 0; i < len(ts); i++ {\n\t\t\tss[i] = string(ts[i])\n\t\t}\n\t\treturn openBracket + strings.Join(ss, comma) + closeBracket\n\t}\n}\n\n\/\/ Expression is a pure function.\ntype Expression interface {\n\tfuncBodyMarker()\n\t\/\/ expressionMarker()\n}\n\n\/\/ Statement just performs side effects.\ntype Statement interface {\n\tfuncBodyMarker()\n\t\/\/ statementMarker()\n}\n\n\/\/ DoBlock represents raw Go code as a function body.\ntype DoBlock struct {\n\tText []string\n}\n\n\/\/ FuncApplication represents function application.\n\/\/ At least for now it may be both Statement and Expression.\ntype FuncApplication struct {\n\tName string\n\tModule string\n\tArguments []Argument\n\tKind funcApplicationKind\n}\n\ntype funcApplicationKind uint8\n\n\/\/ Kinds of a FuncApplication\nconst (\n\tEXPRESSION funcApplicationKind = iota\n\tSTATEMENT funcApplicationKind = iota\n)\n\nfunc (fa FuncApplication) String() string {\n\tvar buf bytes.Buffer\n\tif fa.Module != \"\" {\n\t\tfmt.Fprint(&buf, fa.Module, dot)\n\t}\n\tfmt.Fprint(&buf, fa.Name)\n\tif len(fa.Arguments) > 0 {\n\t\targs := make([]string, len(fa.Arguments))\n\t\tfor i := 0; i < len(args); i++ {\n\t\t\tswitch arg := fa.Arguments[i].(type) {\n\t\t\tcase FuncApplication:\n\t\t\t\targs[i] = fmt.Sprintf(\"(%s)\", arg)\n\t\t\tdefault:\n\t\t\t\targs[i] = fmt.Sprint(arg)\n\t\t\t}\n\t\t}\n\t\tfmt.Fprint(&buf, space, strings.Join(args, space))\n\t}\n\treturn buf.String()\n}\n\n\/\/ Assuming non-empty body, empty do block does not really makes sense.\nfunc (do DoBlock) String() string {\n\tbuf := bytes.NewBufferString(doDecl)\n\tfor _, line := range do.Text {\n\t\tfmt.Fprint(buf, intendation, line, lf)\n\t}\n\treturn strings.TrimSuffix(buf.String(), lf)\n}\n\n\/\/ Argument represents argument to which a function is applied.\ntype Argument interface {\n\targumentMarker()\n}\n\n\/\/ Literal represents language literals.\ntype Literal interface {\n\targumentMarker()\n\tliteralMarker()\n}\n\nfunc (fa FuncApplication) argumentMarker() {}\n\n\/\/ Int maps to Go int.\ntype Int string\n\nfunc (t Int) literalMarker() {}\nfunc (t Int) argumentMarker() {}\n\n\/\/ Float maps to Go float32.\ntype Float string\n\nfunc (t Float) literalMarker() {}\nfunc (t Float) argumentMarker() {}\n\n\/\/ Double maps to Go float64.\ntype Double string\n\nfunc (t Double) literalMarker() {}\nfunc (t Double) argumentMarker() {}\n\n\/\/ String wraps Go string.\ntype String string\n\nfunc (t String) literalMarker() {}\nfunc (t String) argumentMarker() {}\n\nfunc (t String) String() string {\n\treturn fmt.Sprintf(\"%#v\", t)\n}\n\n\/\/ Bool maps to Go bool.\ntype Bool string\n\nfunc (t Bool) literalMarker() {}\nfunc (t Bool) argumentMarker() {}\n\n\/\/ Char maps to Go char.\ntype Char string\n\nfunc (t Char) literalMarker() {}\nfunc (t Char) argumentMarker() {}\n\n\/\/ Imaginary maps to Go imaginary double.\ntype Imaginary string\n\nfunc (t Imaginary) literalMarker() {}\nfunc (t Imaginary) argumentMarker() {}\n\n\/\/ Var represents something passed by name.\ntype Var string\n\nfunc (v Var) argumentMarker() {}\n\n\/\/ InfixOperation represents\ntype InfixOperation struct {\n\tX, Y Expression\n\tOperator Operator\n}\n\nfunc (op InfixOperation) funcBodyMarker() {}\n\nfunc (op InfixOperation) String() string {\n\treturn fmt.Sprintf(\"%s %s %s\", op.X, op.Operator, op.Y)\n}\n\n\/\/ Operator represents binary operator\ntype Operator string\n\n\/\/ Tuple represents a group of values\ntype Tuple []Expression\n\nfunc (t Tuple) String() string {\n\tss := make([]string, len(t))\n\tfor i := 0; i < len(t); i++ {\n\t\tss[i] = fmt.Sprint(t[i])\n\t}\n\treturn openBracket + strings.Join(ss, comma) + closeBracket\n}\n<|endoftext|>"} {"text":"doc: add missing blurb<|endoftext|>"} {"text":"add some docs<|endoftext|>"} {"text":"Changing ResolveSocket and ResolveDir to exported funcs<|endoftext|>"} {"text":"\/\/ Copyright 2015 CloudAwan LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage thirdparty\n\nimport (\n\t\"github.com\/astaxie\/beego\"\n\t\"github.com\/cloudawan\/cloudone_gui\/controllers\/identity\"\n\t\"github.com\/cloudawan\/cloudone_gui\/controllers\/utility\/configuration\"\n\t\"github.com\/cloudawan\/cloudone_gui\/controllers\/utility\/guimessagedisplay\"\n\t\"github.com\/cloudawan\/cloudone_utility\/restclient\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype Cluster struct {\n\tName string\n\tDescription string\n\tReplicationControllerJson string\n\tServiceJson string\n\tEnvironment map[string]string\n\tScriptType string\n\tScriptContent string\n}\n\ntype ClusterLaunch struct {\n\tSize int\n\tEnvironmentSlice []interface{}\n\tReplicationControllerExtraJsonMap map[string]interface{}\n}\n\ntype Region struct {\n\tName string\n\tLocationTagged bool\n\tZoneSlice []Zone\n}\n\ntype Zone struct {\n\tName string\n\tLocationTagged bool\n\tNodeSlice []Node\n}\n\ntype Node struct {\n\tName string\n\tAddress string\n\tCapacity Capacity\n}\n\ntype Capacity struct {\n\tCpu string\n\tMemory string\n}\n\ntype LaunchController struct {\n\tbeego.Controller\n}\n\nfunc (c *LaunchController) Get() {\n\tc.TplName = \"repository\/thirdparty\/launch.html\"\n\tguimessage := guimessagedisplay.GetGUIMessage(c)\n\n\t\/\/ Authorization for web page display\n\tc.Data[\"layoutMenu\"] = c.GetSession(\"layoutMenu\")\n\n\tcloudoneProtocol := beego.AppConfig.String(\"cloudoneProtocol\")\n\tcloudoneHost := beego.AppConfig.String(\"cloudoneHost\")\n\tcloudonePort := beego.AppConfig.String(\"cloudonePort\")\n\n\tname := c.GetString(\"name\")\n\n\turl := cloudoneProtocol + \":\/\/\" + cloudoneHost + \":\" + cloudonePort +\n\t\t\"\/api\/v1\/clusterapplications\/\" + name\n\tcluster := Cluster{}\n\n\ttokenHeaderMap, _ := c.GetSession(\"tokenHeaderMap\").(map[string]string)\n\n\t_, err := restclient.RequestGetWithStructure(url, &cluster, tokenHeaderMap)\n\n\tif identity.IsTokenInvalidAndRedirect(c, c.Ctx, err) {\n\t\treturn\n\t}\n\n\tif err != nil {\n\t\tguimessage.AddDanger(\"Fail to get data with error\" + err.Error())\n\t\t\/\/ Redirect to list\n\t\tc.Ctx.Redirect(302, \"\/gui\/repository\/thirdparty\/list\")\n\n\t\tguimessage.RedirectMessage(c)\n\t\treturn\n\t}\n\n\tkubeapiHost, kubeapiPort, err := configuration.GetAvailableKubeapiHostAndPort()\n\tif err != nil {\n\t\t\/\/ Error\n\t\tguimessage.AddDanger(\"No availabe host and port with error \" + err.Error())\n\t\t\/\/ Redirect to list\n\t\tc.Ctx.Redirect(302, \"\/gui\/repository\/thirdparty\/list\")\n\n\t\tguimessage.RedirectMessage(c)\n\t\treturn\n\t}\n\n\turl = cloudoneProtocol + \":\/\/\" + cloudoneHost + \":\" + cloudonePort +\n\t\t\"\/api\/v1\/nodes\/topology?kubeapihost=\" + kubeapiHost + \"&kubeapiport=\" + strconv.Itoa(kubeapiPort)\n\n\tregionSlice := make([]Region, 0)\n\n\t_, err = restclient.RequestGetWithStructure(url, ®ionSlice, tokenHeaderMap)\n\n\tif identity.IsTokenInvalidAndRedirect(c, c.Ctx, err) {\n\t\treturn\n\t}\n\n\tif err != nil {\n\t\tguimessage.AddDanger(\"Fail to get node topology with error\" + err.Error())\n\t\t\/\/ Redirect to list\n\t\tc.Ctx.Redirect(302, \"\/gui\/repository\/thirdparty\/list\")\n\n\t\tguimessage.RedirectMessage(c)\n\t\treturn\n\t}\n\n\tfilteredRegionSlice := make([]Region, 0)\n\tfor _, region := range regionSlice {\n\t\tif region.LocationTagged {\n\t\t\tfilteredRegionSlice = append(filteredRegionSlice, region)\n\t\t}\n\t}\n\n\tif cluster.Environment != nil {\n\t\tnamespace, _ := c.GetSession(\"namespace\").(string)\n\n\t\t\/\/ Try to set the known common parameter\n\t\tfor key, _ := range cluster.Environment {\n\t\t\tif key == \"SERVICE_NAME\" {\n\t\t\t\tcluster.Environment[key] = name\n\t\t\t}\n\t\t\tif key == \"NAMESPACE\" {\n\t\t\t\tcluster.Environment[key] = namespace\n\t\t\t}\n\t\t}\n\t}\n\n\tc.Data[\"actionButtonValue\"] = \"Launch\"\n\tc.Data[\"pageHeader\"] = \"Launch third party service\"\n\tc.Data[\"thirdPartyApplicationName\"] = name\n\tc.Data[\"environment\"] = cluster.Environment\n\tc.Data[\"regionSlice\"] = filteredRegionSlice\n\n\tguimessage.OutputMessage(c.Data)\n}\n\nfunc (c *LaunchController) Post() {\n\tguimessage := guimessagedisplay.GetGUIMessage(c)\n\n\tcloudoneProtocol := beego.AppConfig.String(\"cloudoneProtocol\")\n\tcloudoneHost := beego.AppConfig.String(\"cloudoneHost\")\n\tcloudonePort := beego.AppConfig.String(\"cloudonePort\")\n\tkubeapiHost, kubeapiPort, err := configuration.GetAvailableKubeapiHostAndPort()\n\tif err != nil {\n\t\t\/\/ Error\n\t\terrorJsonMap := make(map[string]interface{})\n\t\terrorJsonMap[\"error\"] = err.Error()\n\t\tc.Data[\"json\"] = errorJsonMap\n\t\tc.ServeJSON()\n\t\treturn\n\t}\n\n\tnamespace, _ := c.GetSession(\"namespace\").(string)\n\tname := c.GetString(\"name\")\n\tsize, _ := c.GetInt(\"size\")\n\n\tregion := c.GetString(\"region\")\n\tzone := c.GetString(\"zone\")\n\n\tif region == \"Any\" {\n\t\tregion = \"\"\n\t}\n\tif zone == \"Any\" {\n\t\tzone = \"\"\n\t}\n\n\tkeySlice := make([]string, 0)\n\tinputMap := c.Input()\n\tif inputMap != nil {\n\t\tfor key, _ := range inputMap {\n\t\t\t\/\/ Ignore the non environment field\n\t\t\tif strings.HasPrefix(key, \"environment_\") {\n\t\t\t\tkeySlice = append(keySlice, key)\n\t\t\t}\n\t\t}\n\t}\n\n\tenvironmentSlice := make([]interface{}, 0)\n\tfor _, key := range keySlice {\n\t\tvalue := c.GetString(key)\n\t\tif len(value) > 0 {\n\t\t\tenvironmentMap := make(map[string]string)\n\t\t\tenvironmentMap[\"name\"] = key[len(\"environment_\"):]\n\t\t\tenvironmentMap[\"value\"] = value\n\t\t\tenvironmentSlice = append(environmentSlice, environmentMap)\n\t\t}\n\t}\n\n\textraJsonMap := make(map[string]interface{})\n\tif len(region) > 0 {\n\t\textraJsonMap[\"spec\"] = make(map[string]interface{})\n\t\textraJsonMap[\"spec\"].(map[string]interface{})[\"template\"] = make(map[string]interface{})\n\t\textraJsonMap[\"spec\"].(map[string]interface{})[\"template\"].(map[string]interface{})[\"spec\"] = make(map[string]interface{})\n\t\textraJsonMap[\"spec\"].(map[string]interface{})[\"template\"].(map[string]interface{})[\"spec\"].(map[string]interface{})[\"nodeSelector\"] = make(map[string]interface{})\n\n\t\textraJsonMap[\"spec\"].(map[string]interface{})[\"template\"].(map[string]interface{})[\"spec\"].(map[string]interface{})[\"nodeSelector\"].(map[string]interface{})[\"region\"] = region\n\t\tif len(zone) > 0 {\n\t\t\textraJsonMap[\"spec\"].(map[string]interface{})[\"template\"].(map[string]interface{})[\"spec\"].(map[string]interface{})[\"nodeSelector\"].(map[string]interface{})[\"zone\"] = zone\n\t\t}\n\t} else {\n\t\textraJsonMap = nil\n\t}\n\n\tclusterLaunch := ClusterLaunch{\n\t\tsize,\n\t\tenvironmentSlice,\n\t\textraJsonMap,\n\t}\n\n\turl := cloudoneProtocol + \":\/\/\" + cloudoneHost + \":\" + cloudonePort +\n\t\t\"\/api\/v1\/clusterapplications\/launch\/\" + namespace + \"\/\" + name +\n\t\t\"?kubeapihost=\" + kubeapiHost + \"&kubeapiport=\" + strconv.Itoa(kubeapiPort)\n\tjsonMap := make(map[string]interface{})\n\n\ttokenHeaderMap, _ := c.GetSession(\"tokenHeaderMap\").(map[string]string)\n\n\t_, err = restclient.RequestPostWithStructure(url, clusterLaunch, &jsonMap, tokenHeaderMap)\n\n\tif identity.IsTokenInvalidAndRedirect(c, c.Ctx, err) {\n\t\treturn\n\t}\n\n\tif err != nil {\n\t\t\/\/ Error\n\t\terrorMessage, _ := jsonMap[\"Error\"].(string)\n\t\tif strings.HasPrefix(errorMessage, \"Replication controller already exists\") {\n\t\t\tguimessage.AddDanger(\"Replication controller \" + name + \" already exists\")\n\t\t} else {\n\t\t\tguimessage.AddDanger(err.Error())\n\t\t}\n\t} else {\n\t\tguimessage.AddSuccess(\"Cluster application \" + name + \" is launched\")\n\t}\n\n\t\/\/ Redirect to list\n\tc.Ctx.Redirect(302, \"\/gui\/deploy\/deployclusterapplication\/list\")\n\n\tguimessage.RedirectMessage(c)\n}\nChange the error message\/\/ Copyright 2015 CloudAwan LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage thirdparty\n\nimport (\n\t\"github.com\/astaxie\/beego\"\n\t\"github.com\/cloudawan\/cloudone_gui\/controllers\/identity\"\n\t\"github.com\/cloudawan\/cloudone_gui\/controllers\/utility\/configuration\"\n\t\"github.com\/cloudawan\/cloudone_gui\/controllers\/utility\/guimessagedisplay\"\n\t\"github.com\/cloudawan\/cloudone_utility\/restclient\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype Cluster struct {\n\tName string\n\tDescription string\n\tReplicationControllerJson string\n\tServiceJson string\n\tEnvironment map[string]string\n\tScriptType string\n\tScriptContent string\n}\n\ntype ClusterLaunch struct {\n\tSize int\n\tEnvironmentSlice []interface{}\n\tReplicationControllerExtraJsonMap map[string]interface{}\n}\n\ntype Region struct {\n\tName string\n\tLocationTagged bool\n\tZoneSlice []Zone\n}\n\ntype Zone struct {\n\tName string\n\tLocationTagged bool\n\tNodeSlice []Node\n}\n\ntype Node struct {\n\tName string\n\tAddress string\n\tCapacity Capacity\n}\n\ntype Capacity struct {\n\tCpu string\n\tMemory string\n}\n\ntype LaunchController struct {\n\tbeego.Controller\n}\n\nfunc (c *LaunchController) Get() {\n\tc.TplName = \"repository\/thirdparty\/launch.html\"\n\tguimessage := guimessagedisplay.GetGUIMessage(c)\n\n\t\/\/ Authorization for web page display\n\tc.Data[\"layoutMenu\"] = c.GetSession(\"layoutMenu\")\n\n\tcloudoneProtocol := beego.AppConfig.String(\"cloudoneProtocol\")\n\tcloudoneHost := beego.AppConfig.String(\"cloudoneHost\")\n\tcloudonePort := beego.AppConfig.String(\"cloudonePort\")\n\n\tname := c.GetString(\"name\")\n\n\turl := cloudoneProtocol + \":\/\/\" + cloudoneHost + \":\" + cloudonePort +\n\t\t\"\/api\/v1\/clusterapplications\/\" + name\n\tcluster := Cluster{}\n\n\ttokenHeaderMap, _ := c.GetSession(\"tokenHeaderMap\").(map[string]string)\n\n\t_, err := restclient.RequestGetWithStructure(url, &cluster, tokenHeaderMap)\n\n\tif identity.IsTokenInvalidAndRedirect(c, c.Ctx, err) {\n\t\treturn\n\t}\n\n\tif err != nil {\n\t\tguimessage.AddDanger(\"Fail to get data with error\" + err.Error())\n\t\t\/\/ Redirect to list\n\t\tc.Ctx.Redirect(302, \"\/gui\/repository\/thirdparty\/list\")\n\n\t\tguimessage.RedirectMessage(c)\n\t\treturn\n\t}\n\n\tkubeapiHost, kubeapiPort, err := configuration.GetAvailableKubeapiHostAndPort()\n\tif err != nil {\n\t\t\/\/ Error\n\t\tguimessage.AddDanger(\"No availabe host and port with error \" + err.Error())\n\t\t\/\/ Redirect to list\n\t\tc.Ctx.Redirect(302, \"\/gui\/repository\/thirdparty\/list\")\n\n\t\tguimessage.RedirectMessage(c)\n\t\treturn\n\t}\n\n\turl = cloudoneProtocol + \":\/\/\" + cloudoneHost + \":\" + cloudonePort +\n\t\t\"\/api\/v1\/nodes\/topology?kubeapihost=\" + kubeapiHost + \"&kubeapiport=\" + strconv.Itoa(kubeapiPort)\n\n\tregionSlice := make([]Region, 0)\n\n\t_, err = restclient.RequestGetWithStructure(url, ®ionSlice, tokenHeaderMap)\n\n\tif identity.IsTokenInvalidAndRedirect(c, c.Ctx, err) {\n\t\treturn\n\t}\n\n\tif err != nil {\n\t\tguimessage.AddDanger(\"Fail to get node topology with error\" + err.Error())\n\t\t\/\/ Redirect to list\n\t\tc.Ctx.Redirect(302, \"\/gui\/repository\/thirdparty\/list\")\n\n\t\tguimessage.RedirectMessage(c)\n\t\treturn\n\t}\n\n\tfilteredRegionSlice := make([]Region, 0)\n\tfor _, region := range regionSlice {\n\t\tif region.LocationTagged {\n\t\t\tfilteredRegionSlice = append(filteredRegionSlice, region)\n\t\t}\n\t}\n\n\tif cluster.Environment != nil {\n\t\tnamespace, _ := c.GetSession(\"namespace\").(string)\n\n\t\t\/\/ Try to set the known common parameter\n\t\tfor key, _ := range cluster.Environment {\n\t\t\tif key == \"SERVICE_NAME\" {\n\t\t\t\tcluster.Environment[key] = name\n\t\t\t}\n\t\t\tif key == \"NAMESPACE\" {\n\t\t\t\tcluster.Environment[key] = namespace\n\t\t\t}\n\t\t}\n\t}\n\n\tc.Data[\"actionButtonValue\"] = \"Launch\"\n\tc.Data[\"pageHeader\"] = \"Launch third party service\"\n\tc.Data[\"thirdPartyApplicationName\"] = name\n\tc.Data[\"environment\"] = cluster.Environment\n\tc.Data[\"regionSlice\"] = filteredRegionSlice\n\n\tguimessage.OutputMessage(c.Data)\n}\n\nfunc (c *LaunchController) Post() {\n\tguimessage := guimessagedisplay.GetGUIMessage(c)\n\n\tcloudoneProtocol := beego.AppConfig.String(\"cloudoneProtocol\")\n\tcloudoneHost := beego.AppConfig.String(\"cloudoneHost\")\n\tcloudonePort := beego.AppConfig.String(\"cloudonePort\")\n\tkubeapiHost, kubeapiPort, err := configuration.GetAvailableKubeapiHostAndPort()\n\tif err != nil {\n\t\t\/\/ Error\n\t\terrorJsonMap := make(map[string]interface{})\n\t\terrorJsonMap[\"error\"] = err.Error()\n\t\tc.Data[\"json\"] = errorJsonMap\n\t\tc.ServeJSON()\n\t\treturn\n\t}\n\n\tnamespace, _ := c.GetSession(\"namespace\").(string)\n\tname := c.GetString(\"name\")\n\tsize, _ := c.GetInt(\"size\")\n\n\tregion := c.GetString(\"region\")\n\tzone := c.GetString(\"zone\")\n\n\tif region == \"Any\" {\n\t\tregion = \"\"\n\t}\n\tif zone == \"Any\" {\n\t\tzone = \"\"\n\t}\n\n\tkeySlice := make([]string, 0)\n\tinputMap := c.Input()\n\tif inputMap != nil {\n\t\tfor key, _ := range inputMap {\n\t\t\t\/\/ Ignore the non environment field\n\t\t\tif strings.HasPrefix(key, \"environment_\") {\n\t\t\t\tkeySlice = append(keySlice, key)\n\t\t\t}\n\t\t}\n\t}\n\n\tenvironmentSlice := make([]interface{}, 0)\n\tfor _, key := range keySlice {\n\t\tvalue := c.GetString(key)\n\t\tif len(value) > 0 {\n\t\t\tenvironmentMap := make(map[string]string)\n\t\t\tenvironmentMap[\"name\"] = key[len(\"environment_\"):]\n\t\t\tenvironmentMap[\"value\"] = value\n\t\t\tenvironmentSlice = append(environmentSlice, environmentMap)\n\t\t}\n\t}\n\n\textraJsonMap := make(map[string]interface{})\n\tif len(region) > 0 {\n\t\textraJsonMap[\"spec\"] = make(map[string]interface{})\n\t\textraJsonMap[\"spec\"].(map[string]interface{})[\"template\"] = make(map[string]interface{})\n\t\textraJsonMap[\"spec\"].(map[string]interface{})[\"template\"].(map[string]interface{})[\"spec\"] = make(map[string]interface{})\n\t\textraJsonMap[\"spec\"].(map[string]interface{})[\"template\"].(map[string]interface{})[\"spec\"].(map[string]interface{})[\"nodeSelector\"] = make(map[string]interface{})\n\n\t\textraJsonMap[\"spec\"].(map[string]interface{})[\"template\"].(map[string]interface{})[\"spec\"].(map[string]interface{})[\"nodeSelector\"].(map[string]interface{})[\"region\"] = region\n\t\tif len(zone) > 0 {\n\t\t\textraJsonMap[\"spec\"].(map[string]interface{})[\"template\"].(map[string]interface{})[\"spec\"].(map[string]interface{})[\"nodeSelector\"].(map[string]interface{})[\"zone\"] = zone\n\t\t}\n\t} else {\n\t\textraJsonMap = nil\n\t}\n\n\tclusterLaunch := ClusterLaunch{\n\t\tsize,\n\t\tenvironmentSlice,\n\t\textraJsonMap,\n\t}\n\n\turl := cloudoneProtocol + \":\/\/\" + cloudoneHost + \":\" + cloudonePort +\n\t\t\"\/api\/v1\/clusterapplications\/launch\/\" + namespace + \"\/\" + name +\n\t\t\"?kubeapihost=\" + kubeapiHost + \"&kubeapiport=\" + strconv.Itoa(kubeapiPort)\n\tjsonMap := make(map[string]interface{})\n\n\ttokenHeaderMap, _ := c.GetSession(\"tokenHeaderMap\").(map[string]string)\n\n\t_, err = restclient.RequestPostWithStructure(url, clusterLaunch, &jsonMap, tokenHeaderMap)\n\n\tif identity.IsTokenInvalidAndRedirect(c, c.Ctx, err) {\n\t\treturn\n\t}\n\n\tif err != nil {\n\t\t\/\/ Error\n\t\terrorMessage, _ := jsonMap[\"Error\"].(string)\n\t\tif errorMessage == \"The cluster application already exists\" {\n\t\t\tguimessage.AddDanger(\"Cluster application \" + name + \" already exists\")\n\t\t} else {\n\t\t\tguimessage.AddDanger(err.Error())\n\t\t}\n\t} else {\n\t\tguimessage.AddSuccess(\"Cluster application \" + name + \" is launched\")\n\t}\n\n\t\/\/ Redirect to list\n\tc.Ctx.Redirect(302, \"\/gui\/deploy\/deployclusterapplication\/list\")\n\n\tguimessage.RedirectMessage(c)\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The httptest package provides utilities for HTTP testing.\npackage httptest\n\nimport (\n\t\"bytes\"\n\t\"http\"\n\t\"os\"\n)\n\n\/\/ ResponseRecorder is an implementation of http.ResponseWriter that\n\/\/ records its mutations for later inspection in tests.\ntype ResponseRecorder struct {\n\tCode int \/\/ the HTTP response code from WriteHeader\n\tHeaderMap http.Header \/\/ the HTTP response headers\n\tBody *bytes.Buffer \/\/ if non-nil, the bytes.Buffer to append written data to\n\tFlushed bool\n}\n\n\/\/ NewRecorder returns an initialized ResponseRecorder.\nfunc NewRecorder() *ResponseRecorder {\n\treturn &ResponseRecorder{\n\t\tHeaderMap: make(http.Header),\n\t\tBody: new(bytes.Buffer),\n\t}\n}\n\n\/\/ DefaultRemoteAddr is the default remote address to return in RemoteAddr if\n\/\/ an explicit DefaultRemoteAddr isn't set on ResponseRecorder.\nconst DefaultRemoteAddr = \"1.2.3.4\"\n\n\/\/ Header returns the response headers.\nfunc (rw *ResponseRecorder) Header() http.Header {\n\treturn rw.HeaderMap\n}\n\n\/\/ Write always succeeds and writes to rw.Body, if not nil.\nfunc (rw *ResponseRecorder) Write(buf []byte) (int, os.Error) {\n\tif rw.Body != nil {\n\t\trw.Body.Write(buf)\n\t}\n\treturn len(buf), nil\n}\n\n\/\/ WriteHeader sets rw.Code.\nfunc (rw *ResponseRecorder) WriteHeader(code int) {\n\trw.Code = code\n}\n\n\/\/ Flush sets rw.Flushed to true.\nfunc (rw *ResponseRecorder) Flush() {\n\trw.Flushed = true\n}\nhttptest: default the Recorder status code to 200 on a Write\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The httptest package provides utilities for HTTP testing.\npackage httptest\n\nimport (\n\t\"bytes\"\n\t\"http\"\n\t\"os\"\n)\n\n\/\/ ResponseRecorder is an implementation of http.ResponseWriter that\n\/\/ records its mutations for later inspection in tests.\ntype ResponseRecorder struct {\n\tCode int \/\/ the HTTP response code from WriteHeader\n\tHeaderMap http.Header \/\/ the HTTP response headers\n\tBody *bytes.Buffer \/\/ if non-nil, the bytes.Buffer to append written data to\n\tFlushed bool\n}\n\n\/\/ NewRecorder returns an initialized ResponseRecorder.\nfunc NewRecorder() *ResponseRecorder {\n\treturn &ResponseRecorder{\n\t\tHeaderMap: make(http.Header),\n\t\tBody: new(bytes.Buffer),\n\t}\n}\n\n\/\/ DefaultRemoteAddr is the default remote address to return in RemoteAddr if\n\/\/ an explicit DefaultRemoteAddr isn't set on ResponseRecorder.\nconst DefaultRemoteAddr = \"1.2.3.4\"\n\n\/\/ Header returns the response headers.\nfunc (rw *ResponseRecorder) Header() http.Header {\n\treturn rw.HeaderMap\n}\n\n\/\/ Write always succeeds and writes to rw.Body, if not nil.\nfunc (rw *ResponseRecorder) Write(buf []byte) (int, os.Error) {\n\tif rw.Body != nil {\n\t\trw.Body.Write(buf)\n\t}\n\tif rw.Code == 0 {\n\t\trw.Code = http.StatusOK\n\t}\n\treturn len(buf), nil\n}\n\n\/\/ WriteHeader sets rw.Code.\nfunc (rw *ResponseRecorder) WriteHeader(code int) {\n\trw.Code = code\n}\n\n\/\/ Flush sets rw.Flushed to true.\nfunc (rw *ResponseRecorder) Flush() {\n\trw.Flushed = true\n}\n<|endoftext|>"} {"text":"\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build !windows,!plan9\n\npackage syslog\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc runPktSyslog(c net.PacketConn, done chan<- string) {\n\tvar buf [4096]byte\n\tvar rcvd string\n\tct := 0\n\tfor {\n\t\tvar n int\n\t\tvar err error\n\n\t\tc.SetReadDeadline(time.Now().Add(100 * time.Millisecond))\n\t\tn, _, err = c.ReadFrom(buf[:])\n\t\trcvd += string(buf[:n])\n\t\tif err != nil {\n\t\t\tif oe, ok := err.(*net.OpError); ok {\n\t\t\t\tif ct < 3 && oe.Temporary() {\n\t\t\t\t\tct++\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\tc.Close()\n\tdone <- rcvd\n}\n\nvar crashy = false\n\nfunc runStreamSyslog(l net.Listener, done chan<- string, wg *sync.WaitGroup) {\n\tfor {\n\t\tvar c net.Conn\n\t\tvar err error\n\t\tif c, err = l.Accept(); err != nil {\n\t\t\tfmt.Print(err)\n\t\t\treturn\n\t\t}\n\t\twg.Add(1)\n\t\tgo func(c net.Conn) {\n\t\t\tdefer wg.Done()\n\t\t\tc.SetReadDeadline(time.Now().Add(5 * time.Second))\n\t\t\tb := bufio.NewReader(c)\n\t\t\tfor ct := 1; !crashy || ct&7 != 0; ct++ {\n\t\t\t\ts, err := b.ReadString('\\n')\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tdone <- s\n\t\t\t}\n\t\t\tc.Close()\n\t\t}(c)\n\t}\n}\n\nfunc startServer(n, la string, done chan<- string) (addr string, wg *sync.WaitGroup) {\n\tif n == \"udp\" || n == \"tcp\" {\n\t\tla = \"127.0.0.1:0\"\n\t} else {\n\t\t\/\/ unix and unixgram: choose an address if none given\n\t\tif la == \"\" {\n\t\t\t\/\/ use ioutil.TempFile to get a name that is unique\n\t\t\tf, err := ioutil.TempFile(\"\", \"syslogtest\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(\"TempFile: \", err)\n\t\t\t}\n\t\t\tf.Close()\n\t\t\tla = f.Name()\n\t\t}\n\t\tos.Remove(la)\n\t}\n\n\twg = new(sync.WaitGroup)\n\tif n == \"udp\" || n == \"unixgram\" {\n\t\tl, e := net.ListenPacket(n, la)\n\t\tif e != nil {\n\t\t\tlog.Fatalf(\"startServer failed: %v\", e)\n\t\t}\n\t\taddr = l.LocalAddr().String()\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\trunPktSyslog(l, done)\n\t\t}()\n\t} else {\n\t\tl, e := net.Listen(n, la)\n\t\tif e != nil {\n\t\t\tlog.Fatalf(\"startServer failed: %v\", e)\n\t\t}\n\t\taddr = l.Addr().String()\n\t\tgo runStreamSyslog(l, done, wg)\n\t}\n\treturn\n}\n\nfunc TestWithSimulated(t *testing.T) {\n\tmsg := \"Test 123\"\n\ttransport := []string{\"unix\", \"unixgram\", \"udp\", \"tcp\"}\n\n\tfor _, tr := range transport {\n\t\tdone := make(chan string)\n\t\taddr, _ := startServer(tr, \"\", done)\n\t\tif tr == \"unix\" || tr == \"unixgram\" {\n\t\t\tdefer os.Remove(addr)\n\t\t}\n\t\ts, err := Dial(tr, addr, LOG_INFO|LOG_USER, \"syslog_test\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Dial() failed: %v\", err)\n\t\t}\n\t\terr = s.Info(msg)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"log failed: %v\", err)\n\t\t}\n\t\tcheck(t, msg, <-done)\n\t\ts.Close()\n\t}\n}\n\nfunc TestFlap(t *testing.T) {\n\tnet := \"unix\"\n\tdone := make(chan string)\n\taddr, _ := startServer(net, \"\", done)\n\tdefer os.Remove(addr)\n\n\ts, err := Dial(net, addr, LOG_INFO|LOG_USER, \"syslog_test\")\n\tif err != nil {\n\t\tt.Fatalf(\"Dial() failed: %v\", err)\n\t}\n\tmsg := \"Moo 2\"\n\terr = s.Info(msg)\n\tif err != nil {\n\t\tt.Fatalf(\"log failed: %v\", err)\n\t}\n\tcheck(t, msg, <-done)\n\n\t\/\/ restart the server\n\tstartServer(net, addr, done)\n\n\t\/\/ and try retransmitting\n\tmsg = \"Moo 3\"\n\terr = s.Info(msg)\n\tif err != nil {\n\t\tt.Fatalf(\"log failed: %v\", err)\n\t}\n\tcheck(t, msg, <-done)\n\n\ts.Close()\n}\n\nfunc TestNew(t *testing.T) {\n\tif LOG_LOCAL7 != 23<<3 {\n\t\tt.Fatalf(\"LOG_LOCAL7 has wrong value\")\n\t}\n\tif testing.Short() {\n\t\t\/\/ Depends on syslog daemon running, and sometimes it's not.\n\t\tt.Skip(\"skipping syslog test during -short\")\n\t}\n\n\ts, err := New(LOG_INFO|LOG_USER, \"the_tag\")\n\tif err != nil {\n\t\tt.Fatalf(\"New() failed: %s\", err)\n\t}\n\t\/\/ Don't send any messages.\n\ts.Close()\n}\n\nfunc TestNewLogger(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping syslog test during -short\")\n\t}\n\tf, err := NewLogger(LOG_USER|LOG_INFO, 0)\n\tif f == nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestDial(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping syslog test during -short\")\n\t}\n\tf, err := Dial(\"\", \"\", (LOG_LOCAL7|LOG_DEBUG)+1, \"syslog_test\")\n\tif f != nil {\n\t\tt.Fatalf(\"Should have trapped bad priority\")\n\t}\n\tf, err = Dial(\"\", \"\", -1, \"syslog_test\")\n\tif f != nil {\n\t\tt.Fatalf(\"Should have trapped bad priority\")\n\t}\n\tl, err := Dial(\"\", \"\", LOG_USER|LOG_ERR, \"syslog_test\")\n\tif err != nil {\n\t\tt.Fatalf(\"Dial() failed: %s\", err)\n\t}\n\tl.Close()\n}\n\nfunc check(t *testing.T, in, out string) {\n\ttmpl := fmt.Sprintf(\"<%d>%%s %%s syslog_test[%%d]: %s\\n\", LOG_USER+LOG_INFO, in)\n\tif hostname, err := os.Hostname(); err != nil {\n\t\tt.Error(\"Error retrieving hostname\")\n\t} else {\n\t\tvar parsedHostname, timestamp string\n\t\tvar pid int\n\t\tif n, err := fmt.Sscanf(out, tmpl, ×tamp, &parsedHostname, &pid); n != 3 || err != nil || hostname != parsedHostname {\n\t\t\tt.Errorf(\"Got %q, does not match template %q (%d %s)\", out, tmpl, n, err)\n\t\t}\n\t}\n}\n\nfunc TestWrite(t *testing.T) {\n\ttests := []struct {\n\t\tpri Priority\n\t\tpre string\n\t\tmsg string\n\t\texp string\n\t}{\n\t\t{LOG_USER | LOG_ERR, \"syslog_test\", \"\", \"%s %s syslog_test[%d]: \\n\"},\n\t\t{LOG_USER | LOG_ERR, \"syslog_test\", \"write test\", \"%s %s syslog_test[%d]: write test\\n\"},\n\t\t\/\/ Write should not add \\n if there already is one\n\t\t{LOG_USER | LOG_ERR, \"syslog_test\", \"write test 2\\n\", \"%s %s syslog_test[%d]: write test 2\\n\"},\n\t}\n\n\tif hostname, err := os.Hostname(); err != nil {\n\t\tt.Fatalf(\"Error retrieving hostname\")\n\t} else {\n\t\tfor _, test := range tests {\n\t\t\tdone := make(chan string)\n\t\t\taddr, _ := startServer(\"udp\", \"\", done)\n\t\t\tl, err := Dial(\"udp\", addr, test.pri, test.pre)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"syslog.Dial() failed: %v\", err)\n\t\t\t}\n\t\t\t_, err = io.WriteString(l, test.msg)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"WriteString() failed: %v\", err)\n\t\t\t}\n\t\t\trcvd := <-done\n\t\t\ttest.exp = fmt.Sprintf(\"<%d>\", test.pri) + test.exp\n\t\t\tvar parsedHostname, timestamp string\n\t\t\tvar pid int\n\t\t\tif n, err := fmt.Sscanf(rcvd, test.exp, ×tamp, &parsedHostname, &pid); n != 3 || err != nil || hostname != parsedHostname {\n\t\t\t\tt.Errorf(\"s.Info() = '%q', didn't match '%q' (%d %s)\", rcvd, test.exp, n, err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestConcurrentWrite(t *testing.T) {\n\taddr, _ := startServer(\"udp\", \"\", make(chan string))\n\tw, err := Dial(\"udp\", addr, LOG_USER|LOG_ERR, \"how's it going?\")\n\tif err != nil {\n\t\tt.Fatalf(\"syslog.Dial() failed: %v\", err)\n\t}\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\terr := w.Info(\"test\")\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Info() failed: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n}\n\nfunc TestConcurrentReconnect(t *testing.T) {\n\tcrashy = true\n\tdefer func() { crashy = false }()\n\n\tnet := \"unix\"\n\tdone := make(chan string)\n\taddr, srvWG := startServer(net, \"\", done)\n\tdefer os.Remove(addr)\n\n\t\/\/ count all the messages arriving\n\tcount := make(chan int)\n\tgo func() {\n\t\tct := 0\n\t\tfor _ = range done {\n\t\t\tct++\n\t\t\t\/\/ we are looking for 500 out of 1000 events\n\t\t\t\/\/ here because lots of log messages are lost\n\t\t\t\/\/ in buffers (kernel and\/or bufio)\n\t\t\tif ct > 500 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tcount <- ct\n\t}()\n\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tw, err := Dial(net, addr, LOG_USER|LOG_ERR, \"tag\")\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"syslog.Dial() failed: %v\", err)\n\t\t\t}\n\t\t\tfor i := 0; i < 100; i++ {\n\t\t\t\terr := w.Info(\"test\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"Info() failed: %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n\tsrvWG.Wait()\n\tclose(done)\n\n\tselect {\n\tcase <-count:\n\tcase <-time.After(100 * time.Millisecond):\n\t\tt.Error(\"timeout in concurrent reconnect\")\n\t}\n}\nlog\/syslog: fix race in test between channel close and accept.\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build !windows,!plan9\n\npackage syslog\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc runPktSyslog(c net.PacketConn, done chan<- string) {\n\tvar buf [4096]byte\n\tvar rcvd string\n\tct := 0\n\tfor {\n\t\tvar n int\n\t\tvar err error\n\n\t\tc.SetReadDeadline(time.Now().Add(100 * time.Millisecond))\n\t\tn, _, err = c.ReadFrom(buf[:])\n\t\trcvd += string(buf[:n])\n\t\tif err != nil {\n\t\t\tif oe, ok := err.(*net.OpError); ok {\n\t\t\t\tif ct < 3 && oe.Temporary() {\n\t\t\t\t\tct++\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\tc.Close()\n\tdone <- rcvd\n}\n\nvar crashy = false\n\nfunc runStreamSyslog(l net.Listener, done chan<- string, wg *sync.WaitGroup) {\n\tfor {\n\t\tvar c net.Conn\n\t\tvar err error\n\t\tif c, err = l.Accept(); err != nil {\n\t\t\treturn\n\t\t}\n\t\twg.Add(1)\n\t\tgo func(c net.Conn) {\n\t\t\tdefer wg.Done()\n\t\t\tc.SetReadDeadline(time.Now().Add(5 * time.Second))\n\t\t\tb := bufio.NewReader(c)\n\t\t\tfor ct := 1; !crashy || ct&7 != 0; ct++ {\n\t\t\t\ts, err := b.ReadString('\\n')\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tdone <- s\n\t\t\t}\n\t\t\tc.Close()\n\t\t}(c)\n\t}\n}\n\nfunc startServer(n, la string, done chan<- string) (addr string, sock io.Closer, wg *sync.WaitGroup) {\n\tif n == \"udp\" || n == \"tcp\" {\n\t\tla = \"127.0.0.1:0\"\n\t} else {\n\t\t\/\/ unix and unixgram: choose an address if none given\n\t\tif la == \"\" {\n\t\t\t\/\/ use ioutil.TempFile to get a name that is unique\n\t\t\tf, err := ioutil.TempFile(\"\", \"syslogtest\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(\"TempFile: \", err)\n\t\t\t}\n\t\t\tf.Close()\n\t\t\tla = f.Name()\n\t\t}\n\t\tos.Remove(la)\n\t}\n\n\twg = new(sync.WaitGroup)\n\tif n == \"udp\" || n == \"unixgram\" {\n\t\tl, e := net.ListenPacket(n, la)\n\t\tif e != nil {\n\t\t\tlog.Fatalf(\"startServer failed: %v\", e)\n\t\t}\n\t\taddr = l.LocalAddr().String()\n\t\tsock = l\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\trunPktSyslog(l, done)\n\t\t}()\n\t} else {\n\t\tl, e := net.Listen(n, la)\n\t\tif e != nil {\n\t\t\tlog.Fatalf(\"startServer failed: %v\", e)\n\t\t}\n\t\taddr = l.Addr().String()\n\t\tsock = l\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\trunStreamSyslog(l, done, wg)\n\t\t}()\n\t}\n\treturn\n}\n\nfunc TestWithSimulated(t *testing.T) {\n\tmsg := \"Test 123\"\n\ttransport := []string{\"unix\", \"unixgram\", \"udp\", \"tcp\"}\n\n\tfor _, tr := range transport {\n\t\tdone := make(chan string)\n\t\taddr, _, _ := startServer(tr, \"\", done)\n\t\tif tr == \"unix\" || tr == \"unixgram\" {\n\t\t\tdefer os.Remove(addr)\n\t\t}\n\t\ts, err := Dial(tr, addr, LOG_INFO|LOG_USER, \"syslog_test\")\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Dial() failed: %v\", err)\n\t\t}\n\t\terr = s.Info(msg)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"log failed: %v\", err)\n\t\t}\n\t\tcheck(t, msg, <-done)\n\t\ts.Close()\n\t}\n}\n\nfunc TestFlap(t *testing.T) {\n\tnet := \"unix\"\n\tdone := make(chan string)\n\taddr, sock, _ := startServer(net, \"\", done)\n\tdefer os.Remove(addr)\n\tdefer sock.Close()\n\n\ts, err := Dial(net, addr, LOG_INFO|LOG_USER, \"syslog_test\")\n\tif err != nil {\n\t\tt.Fatalf(\"Dial() failed: %v\", err)\n\t}\n\tmsg := \"Moo 2\"\n\terr = s.Info(msg)\n\tif err != nil {\n\t\tt.Fatalf(\"log failed: %v\", err)\n\t}\n\tcheck(t, msg, <-done)\n\n\t\/\/ restart the server\n\t_, sock2, _ := startServer(net, addr, done)\n\tdefer sock2.Close()\n\n\t\/\/ and try retransmitting\n\tmsg = \"Moo 3\"\n\terr = s.Info(msg)\n\tif err != nil {\n\t\tt.Fatalf(\"log failed: %v\", err)\n\t}\n\tcheck(t, msg, <-done)\n\n\ts.Close()\n}\n\nfunc TestNew(t *testing.T) {\n\tif LOG_LOCAL7 != 23<<3 {\n\t\tt.Fatalf(\"LOG_LOCAL7 has wrong value\")\n\t}\n\tif testing.Short() {\n\t\t\/\/ Depends on syslog daemon running, and sometimes it's not.\n\t\tt.Skip(\"skipping syslog test during -short\")\n\t}\n\n\ts, err := New(LOG_INFO|LOG_USER, \"the_tag\")\n\tif err != nil {\n\t\tt.Fatalf(\"New() failed: %s\", err)\n\t}\n\t\/\/ Don't send any messages.\n\ts.Close()\n}\n\nfunc TestNewLogger(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping syslog test during -short\")\n\t}\n\tf, err := NewLogger(LOG_USER|LOG_INFO, 0)\n\tif f == nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestDial(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping syslog test during -short\")\n\t}\n\tf, err := Dial(\"\", \"\", (LOG_LOCAL7|LOG_DEBUG)+1, \"syslog_test\")\n\tif f != nil {\n\t\tt.Fatalf(\"Should have trapped bad priority\")\n\t}\n\tf, err = Dial(\"\", \"\", -1, \"syslog_test\")\n\tif f != nil {\n\t\tt.Fatalf(\"Should have trapped bad priority\")\n\t}\n\tl, err := Dial(\"\", \"\", LOG_USER|LOG_ERR, \"syslog_test\")\n\tif err != nil {\n\t\tt.Fatalf(\"Dial() failed: %s\", err)\n\t}\n\tl.Close()\n}\n\nfunc check(t *testing.T, in, out string) {\n\ttmpl := fmt.Sprintf(\"<%d>%%s %%s syslog_test[%%d]: %s\\n\", LOG_USER+LOG_INFO, in)\n\tif hostname, err := os.Hostname(); err != nil {\n\t\tt.Error(\"Error retrieving hostname\")\n\t} else {\n\t\tvar parsedHostname, timestamp string\n\t\tvar pid int\n\t\tif n, err := fmt.Sscanf(out, tmpl, ×tamp, &parsedHostname, &pid); n != 3 || err != nil || hostname != parsedHostname {\n\t\t\tt.Errorf(\"Got %q, does not match template %q (%d %s)\", out, tmpl, n, err)\n\t\t}\n\t}\n}\n\nfunc TestWrite(t *testing.T) {\n\ttests := []struct {\n\t\tpri Priority\n\t\tpre string\n\t\tmsg string\n\t\texp string\n\t}{\n\t\t{LOG_USER | LOG_ERR, \"syslog_test\", \"\", \"%s %s syslog_test[%d]: \\n\"},\n\t\t{LOG_USER | LOG_ERR, \"syslog_test\", \"write test\", \"%s %s syslog_test[%d]: write test\\n\"},\n\t\t\/\/ Write should not add \\n if there already is one\n\t\t{LOG_USER | LOG_ERR, \"syslog_test\", \"write test 2\\n\", \"%s %s syslog_test[%d]: write test 2\\n\"},\n\t}\n\n\tif hostname, err := os.Hostname(); err != nil {\n\t\tt.Fatalf(\"Error retrieving hostname\")\n\t} else {\n\t\tfor _, test := range tests {\n\t\t\tdone := make(chan string)\n\t\t\taddr, sock, _ := startServer(\"udp\", \"\", done)\n\t\t\tdefer sock.Close()\n\t\t\tl, err := Dial(\"udp\", addr, test.pri, test.pre)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"syslog.Dial() failed: %v\", err)\n\t\t\t}\n\t\t\t_, err = io.WriteString(l, test.msg)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"WriteString() failed: %v\", err)\n\t\t\t}\n\t\t\trcvd := <-done\n\t\t\ttest.exp = fmt.Sprintf(\"<%d>\", test.pri) + test.exp\n\t\t\tvar parsedHostname, timestamp string\n\t\t\tvar pid int\n\t\t\tif n, err := fmt.Sscanf(rcvd, test.exp, ×tamp, &parsedHostname, &pid); n != 3 || err != nil || hostname != parsedHostname {\n\t\t\t\tt.Errorf(\"s.Info() = '%q', didn't match '%q' (%d %s)\", rcvd, test.exp, n, err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestConcurrentWrite(t *testing.T) {\n\taddr, sock, _ := startServer(\"udp\", \"\", make(chan string))\n\tdefer sock.Close()\n\tw, err := Dial(\"udp\", addr, LOG_USER|LOG_ERR, \"how's it going?\")\n\tif err != nil {\n\t\tt.Fatalf(\"syslog.Dial() failed: %v\", err)\n\t}\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\terr := w.Info(\"test\")\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Info() failed: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n}\n\nfunc TestConcurrentReconnect(t *testing.T) {\n\tcrashy = true\n\tdefer func() { crashy = false }()\n\n\tnet := \"unix\"\n\tdone := make(chan string)\n\taddr, sock, srvWG := startServer(net, \"\", done)\n\tdefer os.Remove(addr)\n\n\t\/\/ count all the messages arriving\n\tcount := make(chan int)\n\tgo func() {\n\t\tct := 0\n\t\tfor _ = range done {\n\t\t\tct++\n\t\t\t\/\/ we are looking for 500 out of 1000 events\n\t\t\t\/\/ here because lots of log messages are lost\n\t\t\t\/\/ in buffers (kernel and\/or bufio)\n\t\t\tif ct > 500 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tcount <- ct\n\t}()\n\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tw, err := Dial(net, addr, LOG_USER|LOG_ERR, \"tag\")\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"syslog.Dial() failed: %v\", err)\n\t\t\t}\n\t\t\tfor i := 0; i < 100; i++ {\n\t\t\t\terr := w.Info(\"test\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"Info() failed: %v\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n\tsock.Close()\n\tsrvWG.Wait()\n\tclose(done)\n\n\tselect {\n\tcase <-count:\n\tcase <-time.After(100 * time.Millisecond):\n\t\tt.Error(\"timeout in concurrent reconnect\")\n\t}\n}\n<|endoftext|>"} {"text":"Made it work for channels. Also using channels for stdout and stderr.<|endoftext|>"} {"text":"package main\n\ntype Broker interface {\n\tPublish(msg []byte)\n\tSubscribe() chan []byte\n\tUnsubscribe(ch []byte)\n}\nRegistrar interface for registering channels.package main\n\ntype Broker interface {\n\tPublish(msg []byte)\n\tSubscribe() chan []byte\n\tUnsubscribe(ch []byte)\n}\n\ntype Registrar interface {\n\tRegister(id UUID) error\n\tIsRegistered(id UUID) bool\n}\n<|endoftext|>"} {"text":"package db\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/greensnark\/go-sequell\/action\"\n\t\"github.com\/greensnark\/go-sequell\/crawl\/data\"\n\tcdb \"github.com\/greensnark\/go-sequell\/crawl\/db\"\n\t\"github.com\/greensnark\/go-sequell\/ectx\"\n\t\"github.com\/greensnark\/go-sequell\/loader\"\n\t\"github.com\/greensnark\/go-sequell\/pg\"\n\t\"github.com\/greensnark\/go-sequell\/schema\"\n\t\"github.com\/greensnark\/go-sequell\/sources\"\n)\n\nvar DbExtensions = []string{\"citext\", \"orafce\"}\n\nfunc CrawlSchema() *cdb.CrawlSchema {\n\tschema, err := cdb.LoadSchema(data.Crawl)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn schema\n}\n\nfunc Sources() *sources.Servers {\n\tsrc, err := sources.Sources(data.Sources(), action.LogCache)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn src\n}\n\nfunc DumpSchema(dbspec pg.ConnSpec) error {\n\tdb, err := dbspec.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\ts, err := db.IntrospectSchema()\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.Sort().Write(schema.SelTablesIndexes, os.Stdout)\n\treturn nil\n}\n\nfunc CreateDB(admin, db pg.ConnSpec) error {\n\tpgdb, err := admin.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer pgdb.Close()\n\tdbexist, err := pgdb.DatabaseExists(db.Database)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !dbexist {\n\t\tfmt.Println(\"Creating database\", db.Database)\n\t\tif err = pgdb.CreateDatabase(db.Database); err != nil {\n\t\t\treturn ectx.Err(\"CreateDatabase\", err)\n\t\t}\n\t}\n\n\tif err = CreateExtensions(admin.SpecForDB(db.Database)); err != nil {\n\t\treturn ectx.Err(\"CreateExtensions\", err)\n\t}\n\n\tif err = CreateUser(pgdb, db); err != nil {\n\t\treturn ectx.Err(\"CreateUser\", err)\n\t}\n\treturn ectx.Err(\"GrantDBOwner\", pgdb.GrantDBOwner(db.Database, db.User))\n}\n\nfunc CreateUser(pgdb pg.DB, dbspec pg.ConnSpec) error {\n\tuserExist, err := pgdb.UserExists(dbspec.User)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !userExist {\n\t\tif err = pgdb.CreateUser(dbspec.User, dbspec.Password); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc CreateExtensions(db pg.ConnSpec) error {\n\tc, err := db.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer c.Close()\n\tfor _, ext := range DbExtensions {\n\t\textExists, err := c.ExtensionExists(ext)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !extExists {\n\t\t\tif err = c.CreateExtension(ext); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc PrintSchema(skipIndexes, dropIndexes, createIndexes bool) {\n\ts := CrawlSchema().Schema()\n\tsel := schema.SelTablesIndexes\n\tif skipIndexes {\n\t\tsel = schema.SelTables\n\t}\n\tif dropIndexes {\n\t\tsel = schema.SelDropIndexes\n\t}\n\tif createIndexes {\n\t\tsel = schema.SelIndexes\n\t}\n\ts.Sort().Write(sel, os.Stdout)\n}\n\nfunc CheckDBSchema(dbspec pg.ConnSpec, applyDelta bool) error {\n\tdb, err := dbspec.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\tactualSchema, err := db.IntrospectSchema()\n\tif err != nil {\n\t\treturn err\n\t}\n\twantedSchema := CrawlSchema().Schema()\n\tdiff := wantedSchema.DiffSchema(actualSchema)\n\tif len(diff.Tables) == 0 {\n\t\tfmt.Fprintf(os.Stderr, \"Schema is up-to-date.\\n\")\n\t\treturn nil\n\t}\n\n\tfmt.Fprintf(os.Stderr, \"Schema delta:\\n\")\n\tdiff.PrintDelta(os.Stderr)\n\tif applyDelta {\n\t\treturn nil\n\t}\n\treturn nil\n}\n\nfunc CreateDBSchema(db pg.ConnSpec) error {\n\tc, err := db.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer c.Close()\n\ts := CrawlSchema().Schema()\n\tfor _, sql := range s.SqlSel(schema.SelTables) {\n\t\tif _, err = c.Exec(sql); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc DropDB(admin pg.ConnSpec, db pg.ConnSpec, force bool) error {\n\tif !force {\n\t\treturn fmt.Errorf(\"Use --force to drop the database '%s'\", db.Database)\n\t}\n\tadminDB, err := admin.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Println(\"Dropping database\", db.Database)\n\t_, err = adminDB.Exec(\"drop database \" + db.Database)\n\treturn err\n}\n\nfunc LoadLogs(db pg.ConnSpec) error {\n\tc, err := db.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\tldr := loader.New(c, Sources(), CrawlSchema(),\n\t\tdata.Crawl.StringMap(\"game-type-prefixes\"))\n\tfmt.Println(\"Loading logs...\")\n\treturn ldr.LoadCommit()\n}\n\nfunc CreateIndexes(db pg.ConnSpec) error {\n\tc, err := db.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsch := CrawlSchema().Schema().Sort()\n\tfor _, index := range sch.SqlSel(schema.SelIndexes) {\n\t\tfmt.Println(\"EXEC\", index)\n\t\tif _, err = c.Exec(index); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\nBe a little more verbose.package db\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/greensnark\/go-sequell\/action\"\n\t\"github.com\/greensnark\/go-sequell\/crawl\/data\"\n\tcdb \"github.com\/greensnark\/go-sequell\/crawl\/db\"\n\t\"github.com\/greensnark\/go-sequell\/ectx\"\n\t\"github.com\/greensnark\/go-sequell\/loader\"\n\t\"github.com\/greensnark\/go-sequell\/pg\"\n\t\"github.com\/greensnark\/go-sequell\/schema\"\n\t\"github.com\/greensnark\/go-sequell\/sources\"\n)\n\nvar DbExtensions = []string{\"citext\", \"orafce\"}\n\nfunc CrawlSchema() *cdb.CrawlSchema {\n\tschema, err := cdb.LoadSchema(data.Crawl)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn schema\n}\n\nfunc Sources() *sources.Servers {\n\tsrc, err := sources.Sources(data.Sources(), action.LogCache)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn src\n}\n\nfunc DumpSchema(dbspec pg.ConnSpec) error {\n\tdb, err := dbspec.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\ts, err := db.IntrospectSchema()\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.Sort().Write(schema.SelTablesIndexes, os.Stdout)\n\treturn nil\n}\n\nfunc CreateDB(admin, db pg.ConnSpec) error {\n\tpgdb, err := admin.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer pgdb.Close()\n\tdbexist, err := pgdb.DatabaseExists(db.Database)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !dbexist {\n\t\tfmt.Printf(\"Creating database \\\"%s\\\"\\n\", db.Database)\n\t\tif err = pgdb.CreateDatabase(db.Database); err != nil {\n\t\t\treturn ectx.Err(\"CreateDatabase\", err)\n\t\t}\n\t}\n\n\tif err = CreateExtensions(admin.SpecForDB(db.Database)); err != nil {\n\t\treturn ectx.Err(\"CreateExtensions\", err)\n\t}\n\n\tif err = CreateUser(pgdb, db); err != nil {\n\t\treturn ectx.Err(\"CreateUser\", err)\n\t}\n\treturn ectx.Err(\"GrantDBOwner\", pgdb.GrantDBOwner(db.Database, db.User))\n}\n\nfunc CreateUser(pgdb pg.DB, dbspec pg.ConnSpec) error {\n\tuserExist, err := pgdb.UserExists(dbspec.User)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !userExist {\n\t\tfmt.Printf(\"Creating user \\\"%s\\\"\\n\", dbspec.User)\n\t\tif err = pgdb.CreateUser(dbspec.User, dbspec.Password); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc CreateExtensions(db pg.ConnSpec) error {\n\tc, err := db.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer c.Close()\n\tfor _, ext := range DbExtensions {\n\t\textExists, err := c.ExtensionExists(ext)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !extExists {\n\t\t\tfmt.Printf(\"Creating extension \\\"%s\\\"\\n\", ext)\n\t\t\tif err = c.CreateExtension(ext); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc PrintSchema(skipIndexes, dropIndexes, createIndexes bool) {\n\ts := CrawlSchema().Schema()\n\tsel := schema.SelTablesIndexes\n\tif skipIndexes {\n\t\tsel = schema.SelTables\n\t}\n\tif dropIndexes {\n\t\tsel = schema.SelDropIndexes\n\t}\n\tif createIndexes {\n\t\tsel = schema.SelIndexes\n\t}\n\ts.Sort().Write(sel, os.Stdout)\n}\n\nfunc CheckDBSchema(dbspec pg.ConnSpec, applyDelta bool) error {\n\tdb, err := dbspec.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\tactualSchema, err := db.IntrospectSchema()\n\tif err != nil {\n\t\treturn err\n\t}\n\twantedSchema := CrawlSchema().Schema()\n\tdiff := wantedSchema.DiffSchema(actualSchema)\n\tif len(diff.Tables) == 0 {\n\t\tfmt.Fprintf(os.Stderr, \"Schema is up-to-date.\\n\")\n\t\treturn nil\n\t}\n\n\tfmt.Fprintf(os.Stderr, \"Schema delta:\\n\")\n\tdiff.PrintDelta(os.Stderr)\n\tif applyDelta {\n\t\treturn nil\n\t}\n\treturn nil\n}\n\nfunc CreateDBSchema(db pg.ConnSpec) error {\n\tc, err := db.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer c.Close()\n\ts := CrawlSchema().Schema()\n\tfmt.Printf(\"Creating tables in database \\\"%s\\\"\\n\", db.Database)\n\tfor _, sql := range s.SqlSel(schema.SelTables) {\n\t\tif _, err = c.Exec(sql); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc DropDB(admin pg.ConnSpec, db pg.ConnSpec, force bool) error {\n\tif !force {\n\t\treturn fmt.Errorf(\"Use --force to drop the database '%s'\", db.Database)\n\t}\n\tadminDB, err := admin.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"Dropping database \\\"%s\\\"\\n\", db.Database)\n\t_, err = adminDB.Exec(\"drop database \" + db.Database)\n\treturn err\n}\n\nfunc LoadLogs(db pg.ConnSpec) error {\n\tc, err := db.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\tldr := loader.New(c, Sources(), CrawlSchema(),\n\t\tdata.Crawl.StringMap(\"game-type-prefixes\"))\n\tfmt.Println(\"Loading logs...\")\n\treturn ldr.LoadCommit()\n}\n\nfunc CreateIndexes(db pg.ConnSpec) error {\n\tc, err := db.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsch := CrawlSchema().Schema().Sort()\n\tfor _, index := range sch.SqlSel(schema.SelIndexes) {\n\t\tfmt.Println(\"EXEC\", index)\n\t\tif _, err = c.Exec(index); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"\/\/\n\/\/ users.go\n\/\/ Copyright (C) 2016 wanglong \n\/\/\n\/\/ Distributed under terms of the MIT license.\n\/\/\n\npackage action\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/datawolf\/index-cli\/config\"\n\t\"github.com\/datawolf\/index-cli\/index\"\n\t\"golang.org\/x\/crypto\/ssh\/terminal\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nfunc CreateUser(c *cli.Context) {\n\tr := bufio.NewReader(os.Stdin)\n\tfmt.Print(\"Please input USERNAME you want to create: \")\n\tusername, _ := r.ReadString('\\n')\n\tusername = strings.TrimSpace(username)\n\n\tfmt.Print(\"Please input PASSWORD: \")\n\tbytePassword, _ := terminal.ReadPassword(int(syscall.Stdin))\n\tpassword := string(bytePassword)\n\tpassword = strings.TrimSpace(password)\n\n\tfmt.Printf(\"\\n\")\n\tfmt.Print(\"Please re-input PASSWORD: \")\n\tbytePassword, _ = terminal.ReadPassword(int(syscall.Stdin))\n\tpassword2 := string(bytePassword)\n\tpassword2 = strings.TrimSpace(password2)\n\n\tif password != password2 {\n\t\tfmt.Printf(\"\\nSorry, passwords do not match\\n\")\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Printf(\"\\n\")\n\tfmt.Print(\"Please input EMAIL: \")\n\temail, _ := r.ReadString('\\n')\n\temail = strings.TrimSpace(email)\n\n\tfmt.Print(\"Please input PHONE: \")\n\tphone, _ := r.ReadString('\\n')\n\tphone = strings.TrimSpace(phone)\n\n\tuser := &index.User{\n\t\tUsername: &username,\n\t\tPassword: &password,\n\t\tEmail: &email,\n\t\tPhone: &phone,\n\t}\n\n\tclient := index.NewClient(nil)\n\trel, err := url.Parse(index.EuropaURL)\n\n\tif err != nil {\n\t\tfmt.Printf(\"\\nerror: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tclient.BaseURL = rel\n\n\tresult, _, err := client.Users.Create(user)\n\tif err != nil {\n\t\tfmt.Printf(\"\\nerror: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Printf(\"%s\", result)\n}\n\nfunc UpdateUser(c *cli.Context) {\n\t\/\/ Get the username and password\n\tconfigFile, err := config.Load(\"\")\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to loading the config file\")\n\t}\n\n\tac := configFile.AuthConfigs[\"rnd-dockerhub.huawei.com\"]\n\tif ac.Username == \"\" && ac.Password == \"\" {\n\t\tlog.Fatal(\"Please login in the hub, using command \\\"index-cli login\\\"\")\n\t}\n\n\tusername := strings.TrimSpace(ac.Username)\n\tpassword := strings.TrimSpace(ac.Password)\n\n\t\/\/ Get the new password\n\tr := bufio.NewReader(os.Stdin)\n\tfmt.Print(\"Please input NEW PASSWORD: \")\n\tbytePassword, _ := terminal.ReadPassword(int(syscall.Stdin))\n\tnewPassword := string(bytePassword)\n\tnewPassword = strings.TrimSpace(newPassword)\n\n\tfmt.Printf(\"\\n\")\n\tfmt.Print(\"Please re-input NEW PASSWORD: \")\n\tbytePassword, _ = terminal.ReadPassword(int(syscall.Stdin))\n\tnewPassword2 := string(bytePassword)\n\tnewPassword2 = strings.TrimSpace(newPassword2)\n\n\tif newPassword != newPassword2 {\n\t\tfmt.Printf(\"\\nSorry, passwords do not match\\n\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Get the new Email and Phone\n\tfmt.Printf(\"\\n\")\n\tfmt.Print(\"Please input EMAIL: \")\n\temail, _ := r.ReadString('\\n')\n\temail = strings.TrimSpace(email)\n\n\tfmt.Print(\"Please input PHONE: \")\n\tphone, _ := r.ReadString('\\n')\n\tphone = strings.TrimSpace(phone)\n\n\tuser := &index.User{\n\t\tUsername: &username,\n\t\tPassword: &password,\n\t\tNewPassword: &newPassword,\n\t\tEmail: &email,\n\t\tPhone: &phone,\n\t}\n\n\tclient := index.NewClient(nil)\n\trel, err := url.Parse(index.EuropaURL)\n\n\tif err != nil {\n\t\tfmt.Printf(\"\\nerror: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tclient.BaseURL = rel\n\n\tresult, _, err := client.Users.Update(user)\n\tif err != nil {\n\t\tfmt.Printf(\"\\nerror: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Printf(\"%s\", result)\n}\nadd HTTP Basic for user update method\/\/\n\/\/ users.go\n\/\/ Copyright (C) 2016 wanglong \n\/\/\n\/\/ Distributed under terms of the MIT license.\n\/\/\n\npackage action\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/datawolf\/index-cli\/config\"\n\t\"github.com\/datawolf\/index-cli\/index\"\n\t\"golang.org\/x\/crypto\/ssh\/terminal\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nfunc CreateUser(c *cli.Context) {\n\tr := bufio.NewReader(os.Stdin)\n\tfmt.Print(\"Please input USERNAME you want to create: \")\n\tusername, _ := r.ReadString('\\n')\n\tusername = strings.TrimSpace(username)\n\n\tfmt.Print(\"Please input PASSWORD: \")\n\tbytePassword, _ := terminal.ReadPassword(int(syscall.Stdin))\n\tpassword := string(bytePassword)\n\tpassword = strings.TrimSpace(password)\n\n\tfmt.Printf(\"\\n\")\n\tfmt.Print(\"Please re-input PASSWORD: \")\n\tbytePassword, _ = terminal.ReadPassword(int(syscall.Stdin))\n\tpassword2 := string(bytePassword)\n\tpassword2 = strings.TrimSpace(password2)\n\n\tif password != password2 {\n\t\tfmt.Printf(\"\\nSorry, passwords do not match\\n\")\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Printf(\"\\n\")\n\tfmt.Print(\"Please input EMAIL: \")\n\temail, _ := r.ReadString('\\n')\n\temail = strings.TrimSpace(email)\n\n\tfmt.Print(\"Please input PHONE: \")\n\tphone, _ := r.ReadString('\\n')\n\tphone = strings.TrimSpace(phone)\n\n\tuser := &index.User{\n\t\tUsername: &username,\n\t\tPassword: &password,\n\t\tEmail: &email,\n\t\tPhone: &phone,\n\t}\n\n\tclient := index.NewClient(nil)\n\trel, err := url.Parse(index.EuropaURL)\n\n\tif err != nil {\n\t\tfmt.Printf(\"\\nerror: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tclient.BaseURL = rel\n\n\tresult, _, err := client.Users.Create(user)\n\tif err != nil {\n\t\tfmt.Printf(\"\\nerror: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Printf(\"%s\", result)\n}\n\nfunc UpdateUser(c *cli.Context) {\n\t\/\/ Get the username and password\n\tconfigFile, err := config.Load(\"\")\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to loading the config file\")\n\t}\n\n\tac := configFile.AuthConfigs[\"rnd-dockerhub.huawei.com\"]\n\tif ac.Username == \"\" && ac.Password == \"\" {\n\t\tlog.Fatal(\"Please login in the hub, using command \\\"index-cli login\\\"\")\n\t}\n\n\tusername := strings.TrimSpace(ac.Username)\n\tpassword := strings.TrimSpace(ac.Password)\n\n\tfmt.Printf(\"Username: %s\\n\", username)\n\t\/\/ Get the new password\n\tr := bufio.NewReader(os.Stdin)\n\tfmt.Print(\"Please input NEW PASSWORD: \")\n\tbytePassword, _ := terminal.ReadPassword(int(syscall.Stdin))\n\tnewPassword := string(bytePassword)\n\tnewPassword = strings.TrimSpace(newPassword)\n\n\tfmt.Printf(\"\\n\")\n\tfmt.Print(\"Please re-input NEW PASSWORD: \")\n\tbytePassword, _ = terminal.ReadPassword(int(syscall.Stdin))\n\tnewPassword2 := string(bytePassword)\n\tnewPassword2 = strings.TrimSpace(newPassword2)\n\n\tif newPassword != newPassword2 {\n\t\tfmt.Printf(\"\\nSorry, passwords do not match\\n\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Get the new Email and Phone\n\tfmt.Printf(\"\\n\")\n\tfmt.Print(\"Please input EMAIL: \")\n\temail, _ := r.ReadString('\\n')\n\temail = strings.TrimSpace(email)\n\n\tfmt.Print(\"Please input PHONE: \")\n\tphone, _ := r.ReadString('\\n')\n\tphone = strings.TrimSpace(phone)\n\n\tuser := &index.User{\n\t\tUsername: &username,\n\t\tPassword: &password,\n\t\tNewPassword: &newPassword,\n\t\tEmail: &email,\n\t\tPhone: &phone,\n\t}\n\n\ttp := index.BasicAuthTransport{\n\t\tUsername: strings.TrimSpace(ac.Username),\n\t\tPassword: strings.TrimSpace(ac.Password),\n\t}\n\tclient := index.NewClient(tp.Client())\n\trel, err := url.Parse(index.EuropaURL)\n\n\tif err != nil {\n\t\tfmt.Printf(\"\\nerror: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tclient.BaseURL = rel\n\n\tresult, _, err := client.Users.Update(user)\n\tif err != nil {\n\t\tfmt.Printf(\"\\nerror: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Printf(\"%s\", result)\n}\n<|endoftext|>"} {"text":"package actions\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"time\"\n\n\t\"github.com\/bscott\/golangflow\/models\"\n\t\"github.com\/gobuffalo\/buffalo\"\n\t\"github.com\/gobuffalo\/buffalo\/render\"\n\t\"github.com\/gorilla\/feeds\"\n\t\"github.com\/markbates\/pop\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ HomeHandler is a default handler to serve up\n\/\/ a home page.\nfunc HomeHandler(c buffalo.Context) error {\n\t\/\/ Get the DB connection from the context\n\ttx := c.Value(\"tx\").(*pop.Connection)\n\tposts := &models.Posts{}\n\tq := tx.PaginateFromParams(c.Request().URL.Query())\n\t\/\/ You can order your list here. Just change\n\terr := q.Order(\"created_at desc\").All(posts)\n\t\/\/ to:\n\t\/\/ err := tx.Order(\"create_at desc\").All(posts)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\t\/\/ Make posts available inside the html template\n\tc.Set(\"posts\", posts)\n\tc.Set(\"pagination\", q.Paginator)\n\treturn c.Render(200, r.HTML(\"index.html\"))\n}\n\nfunc RSSFeed(c buffalo.Context) error {\n\ttx := c.Value(\"tx\").(*pop.Connection)\n\tposts := models.Posts{}\n\terr := tx.Order(\"created_at desc\").All(&posts)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tfeed := feeds.Feed{\n\t\tTitle: \"Golang Flow\",\n\t\tLink: &feeds.Link{Href: App().Host},\n\t\tDescription: \"All the Go news that's fit to print!\",\n\t\tAuthor: &feeds.Author{Name: \"Brian Scott\"},\n\t\tCreated: time.Now(),\n\t\tCopyright: \"This work is copyright © Brian Scott\",\n\t\tItems: make([]*feeds.Item, len(posts), len(posts)),\n\t}\n\n\tfor i, p := range posts {\n\t\tu := &models.User{}\n\t\terr := tx.Find(u, p.UserID)\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\tfeed.Items[i] = &feeds.Item{\n\t\t\tTitle: p.Title,\n\t\t\tLink: &feeds.Link{Href: fmt.Sprintf(\"\/posts\/%s\", p.ID)},\n\t\t\tDescription: p.Content,\n\t\t\tAuthor: &feeds.Author{Name: u.Name},\n\t\t\tCreated: p.CreatedAt,\n\t\t}\n\t}\n\n\treturn c.Render(200, r.Func(\"application\/rss+xml\", func(w io.Writer, d render.Data) error {\n\t\ts, err := feed.ToRss()\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\tw.Write([]byte(s))\n\t\treturn nil\n\t}))\n}\nAdded RSS MD strippackage actions\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"time\"\n\n\t\"github.com\/bscott\/golangflow\/models\"\n\t\"github.com\/gobuffalo\/buffalo\"\n\t\"github.com\/gobuffalo\/buffalo\/render\"\n\t\"github.com\/gorilla\/feeds\"\n\t\"github.com\/markbates\/pop\"\n\t\"github.com\/pkg\/errors\"\n\tstripmd \"github.com\/writeas\/go-strip-markdown\"\n)\n\n\/\/ HomeHandler is a default handler to serve up\n\/\/ a home page.\nfunc HomeHandler(c buffalo.Context) error {\n\t\/\/ Get the DB connection from the context\n\ttx := c.Value(\"tx\").(*pop.Connection)\n\tposts := &models.Posts{}\n\tq := tx.PaginateFromParams(c.Request().URL.Query())\n\t\/\/ You can order your list here. Just change\n\terr := q.Order(\"created_at desc\").All(posts)\n\t\/\/ to:\n\t\/\/ err := tx.Order(\"create_at desc\").All(posts)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\t\/\/ Make posts available inside the html template\n\tc.Set(\"posts\", posts)\n\tc.Set(\"pagination\", q.Paginator)\n\treturn c.Render(200, r.HTML(\"index.html\"))\n}\n\nfunc RSSFeed(c buffalo.Context) error {\n\ttx := c.Value(\"tx\").(*pop.Connection)\n\tposts := models.Posts{}\n\terr := tx.Order(\"created_at desc\").All(&posts)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tfeed := feeds.Feed{\n\t\tTitle: \"Golang Flow\",\n\t\tLink: &feeds.Link{Href: App().Host},\n\t\tDescription: \"All the Go news that's fit to print!\",\n\t\tAuthor: &feeds.Author{Name: \"Brian Scott\"},\n\t\tCreated: time.Now(),\n\t\tCopyright: \"This work is copyright © Brian Scott\",\n\t\tItems: make([]*feeds.Item, len(posts), len(posts)),\n\t}\n\n\tfor i, p := range posts {\n\t\tu := &models.User{}\n\t\terr := tx.Find(u, p.UserID)\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\tfeed.Items[i] = &feeds.Item{\n\t\t\tTitle: p.Title,\n\t\t\tLink: &feeds.Link{Href: fmt.Sprintf(\"\/posts\/%s\", p.ID)},\n\t\t\tDescription: stripmd.Strip(p.Content),\n\t\t\tAuthor: &feeds.Author{Name: u.Name},\n\t\t\tCreated: p.CreatedAt,\n\t\t}\n\t}\n\n\treturn c.Render(200, r.Func(\"application\/rss+xml\", func(w io.Writer, d render.Data) error {\n\t\ts, err := feed.ToRss()\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\tw.Write([]byte(s))\n\t\treturn nil\n\t}))\n}\n<|endoftext|>"} {"text":"package perpetua\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\n\t\"github.com\/thoj\/go-ircevent\"\n)\n\nconst version = \"perpetua quote bot v0.1a\"\n\nvar connection *irc.Connection\n\nfunc connect() {\n\tconnection = irc.IRC(options.IRC.Nickname, options.IRC.User)\n\tconnection.Version = version\n\tconnection.UseTLS = options.Server.UseTLS\n\tif options.Server.SkipVerify == true {\n\t\tconnection.TLSConfig = &tls.Config{InsecureSkipVerify: true}\n\t}\n\n\terr := connection.Connect(fmt.Sprintf(\"%s:%d\",\n\t\toptions.Server.Hostname,\n\t\toptions.Server.Port))\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc handleEvents() {\n\tconnection.AddCallback(\"001\", doWelcome)\n\tconnection.AddCallback(\"JOIN\", doJoin)\n\tconnection.AddCallback(\"PRIVMSG\", doPrivmsg)\n}\n\nfunc doWelcome(event *irc.Event) {\n\tconnection.Join(\"#\" + options.IRC.Channel)\n}\n\nfunc doJoin(event *irc.Event) {\n\tconnection.Privmsg(event.Arguments[0], \"Hello! I'm \"+version)\n}\n\nfunc doPrivmsg(event *irc.Event) {\n\tchannel := event.Arguments[0]\n\n\t\/\/ Don't speak in private!\n\tif channel == options.IRC.Nickname {\n\t\treturn\n\t}\n\tcommand, person := parseMessage(event.Message())\n\n\tif command != \"\" && person != \"\" {\n\n\t\tfmt.Println(person)\n\t}\n}\n\nfunc parseMessage(message string) (command, person string) {\n\n\tre := regexp.MustCompile(options.IRC.Nickname +\n\t\t`:?` +\n\t\t`\\s*` +\n\t\t`(?Pcita|cosa dice|quote|what does it say)` +\n\t\t`\\s*(?P[\\w\\s]+)`)\n\n\tres := re.FindStringSubmatch(message)\n\n\tnames := re.SubexpNames()\n\tmd := map[string]string{}\n\tfor i, n := range res {\n\t\tmd[names[i]] = n\n\t}\n\n\treturn md[\"command\"], md[\"person\"]\n}\n\nfunc startIRC() {\n\tconnect()\n\thandleEvents()\n\tconnection.Loop()\n}\nSend random quote to channelpackage perpetua\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\n\t\"github.com\/thoj\/go-ircevent\"\n)\n\nconst version = \"perpetua quote bot v0.1a\"\n\nvar connection *irc.Connection\n\nfunc connect() {\n\tconnection = irc.IRC(options.IRC.Nickname, options.IRC.User)\n\tconnection.Version = version\n\tconnection.UseTLS = options.Server.UseTLS\n\tif options.Server.SkipVerify == true {\n\t\tconnection.TLSConfig = &tls.Config{InsecureSkipVerify: true}\n\t}\n\n\terr := connection.Connect(fmt.Sprintf(\"%s:%d\",\n\t\toptions.Server.Hostname,\n\t\toptions.Server.Port))\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc handleEvents() {\n\tconnection.AddCallback(\"001\", doWelcome)\n\tconnection.AddCallback(\"JOIN\", doJoin)\n\tconnection.AddCallback(\"PRIVMSG\", doPrivmsg)\n}\n\nfunc doWelcome(event *irc.Event) {\n\tconnection.Join(\"#\" + options.IRC.Channel)\n}\n\nfunc doJoin(event *irc.Event) {\n\tconnection.Privmsg(event.Arguments[0], \"Hello! I'm \"+version)\n}\n\nfunc doPrivmsg(event *irc.Event) {\n\tchannel := event.Arguments[0]\n\n\t\/\/ Don't speak in private!\n\tif channel == options.IRC.Nickname {\n\t\treturn\n\t}\n\tcommand, person := parseMessage(event.Message())\n\n\tif command != \"\" && person != \"\" {\n\t\tconnection.Privmsg(event.Arguments[0], store.getQuote(person))\n\t}\n}\n\nfunc parseMessage(message string) (command, person string) {\n\n\tre := regexp.MustCompile(options.IRC.Nickname +\n\t\t`:?` +\n\t\t`\\s*` +\n\t\t`(?Pcita|cosa dice|quote|what does it say)` +\n\t\t`\\s*(?P[\\w\\s]+)`)\n\n\tres := re.FindStringSubmatch(message)\n\n\tnames := re.SubexpNames()\n\tmd := map[string]string{}\n\tfor i, n := range res {\n\t\tmd[names[i]] = n\n\t}\n\n\treturn md[\"command\"], md[\"person\"]\n}\n\nfunc startIRC() {\n\tconnect()\n\thandleEvents()\n\tconnection.Loop()\n}\n<|endoftext|>"} {"text":"Remove hardcode station<|endoftext|>"} {"text":"s\/newSEPosition\/newSEVec2\/<|endoftext|>"} {"text":"r: line insert<|endoftext|>"} {"text":"exclude vendor dir<|endoftext|>"} {"text":"\/\/ Copyright (C) 2015 Space Monkey, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage present\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/spacemonkeygo\/monkit\/v3\"\n)\n\n\/\/ Result writes the expected data to io.Writer and returns any errors if\n\/\/ found.\ntype Result func(io.Writer) error\n\nfunc curry(reg *monkit.Registry,\n\tf func(*monkit.Registry, io.Writer) error) func(io.Writer) error {\n\treturn func(w io.Writer) error {\n\t\treturn f(reg, w)\n\t}\n}\n\n\/\/ FromRequest takes a registry (usually the Default registry), an incoming\n\/\/ path, and optional query parameters, and returns a Result if possible.\n\/\/\n\/\/ FromRequest understands the following paths:\n\/\/ * \/ps, \/ps\/text - returns the result of SpansText\n\/\/ * \/ps\/dot - returns the result of SpansDot\n\/\/ * \/ps\/json - returns the result of SpansJSON\n\/\/ * \/funcs, \/funcs\/text - returns the result of FuncsText\n\/\/ * \/funcs\/dot - returns the result of FuncsDot\n\/\/ * \/funcs\/json - returns the result of FuncsJSON\n\/\/ * \/stats, \/stats\/text - returns the result of StatsText\n\/\/ * \/stats\/json - returns the result of StatsJSON\n\/\/ * \/trace\/svg - returns the result of TraceQuerySVG\n\/\/ * \/trace\/json - returns the result of TraceQueryJSON\n\/\/\n\/\/ The last two paths are worth discussing in more detail, as they take\n\/\/ query parameters. All trace endpoints require at least one of the following\n\/\/ two query parameters:\n\/\/ * regex - If provided, the very next Span that crosses a Func that has\n\/\/ a name that matches this regex will start a trace until that\n\/\/ triggering Span ends, provided the trace_id matches.\n\/\/ * trace_id - If provided, the very next Span on a trace with the given\n\/\/ trace id will start a trace until the triggering Span ends,\n\/\/ provided the regex matches. NOTE: the trace_id will be parsed\n\/\/ in hex.\n\/\/ By default, regular expressions are matched ahead of time against all known\n\/\/ Funcs, but perhaps the Func you want to trace hasn't been observed by the\n\/\/ process yet, in which case the regex will fail to match anything. You can\n\/\/ turn off this preselection behavior by providing preselect=false as an\n\/\/ additional query param. Be advised that until a trace completes, whether\n\/\/ or not it has started, it adds a small amount of overhead (a comparison or\n\/\/ two) to every monitored function.\nfunc FromRequest(reg *monkit.Registry, path string, query url.Values) (\n\tf Result, contentType string, err error) {\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t\/\/ wrap all functions with buffering\n\t\tunbuffered := f\n\t\tf = func(w io.Writer) (err error) {\n\t\t\tbuf := bufio.NewWriter(w)\n\t\t\terr = unbuffered(buf)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = buf.Flush()\n\t\t\treturn err\n\t\t}\n\t}()\n\n\tfirst, rest := shift(path)\n\tsecond, _ := shift(rest)\n\tswitch first {\n\tcase \"\":\n\t\treturn writeIndex, \"text\/html\", nil\n\tcase \"ps\":\n\t\tswitch second {\n\t\tcase \"\", \"text\":\n\t\t\treturn curry(reg, SpansText), \"text\/plain; charset=utf-8\", nil\n\t\tcase \"dot\":\n\t\t\treturn curry(reg, SpansDot), \"text\/plain; charset=utf-8\", nil\n\t\tcase \"json\":\n\t\t\treturn curry(reg, SpansJSON), \"application\/json; charset=utf-8\", nil\n\t\t}\n\n\tcase \"funcs\":\n\t\tswitch second {\n\t\tcase \"\", \"text\":\n\t\t\treturn curry(reg, FuncsText), \"text\/plain; charset=utf-8\", nil\n\t\tcase \"dot\":\n\t\t\treturn curry(reg, FuncsDot), \"text\/plain; charset=utf-8\", nil\n\t\tcase \"json\":\n\t\t\treturn curry(reg, FuncsJSON), \"application\/json; charset=utf-8\", nil\n\t\t}\n\n\tcase \"stats\":\n\t\tswitch second {\n\t\tcase \"\", \"text\", \"old\":\n\t\t\treturn func(w io.Writer) error {\n\t\t\t\treturn StatsText(reg, w)\n\t\t\t}, \"text\/plain; charset=utf-8\", nil\n\t\tcase \"json\":\n\t\t\treturn func(w io.Writer) error {\n\t\t\t\treturn StatsJSON(reg, w)\n\t\t\t}, \"application\/json; charset=utf-8\", nil\n\t\t}\n\n\tcase \"trace\":\n\t\tregexStr := query.Get(\"regex\")\n\t\ttraceIdStr := query.Get(\"trace_id\")\n\t\tif regexStr == \"\" && traceIdStr == \"\" {\n\t\t\treturn nil, \"\", errBadRequest.New(\"at least one of 'regex' or 'trace_id' \" +\n\t\t\t\t\"query parameters required\")\n\t\t}\n\t\tfnMatcher := func(*monkit.Func) bool { return true }\n\n\t\tif regexStr != \"\" {\n\t\t\tre, err := regexp.Compile(regexStr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, \"\", errBadRequest.New(\"invalid regex %#v: %v\",\n\t\t\t\t\tregexStr, err)\n\t\t\t}\n\t\t\tfnMatcher = func(f *monkit.Func) bool {\n\t\t\t\treturn re.MatchString(f.FullName())\n\t\t\t}\n\n\t\t\tpreselect := true\n\t\t\tif query.Get(\"preselect\") != \"\" {\n\t\t\t\tpreselect, err = strconv.ParseBool(query.Get(\"preselect\"))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, \"\", errBadRequest.New(\"invalid preselect %#v: %v\",\n\t\t\t\t\t\tquery.Get(\"preselect\"), err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif preselect {\n\t\t\t\tfuncs := map[*monkit.Func]bool{}\n\t\t\t\treg.Funcs(func(f *monkit.Func) {\n\t\t\t\t\tif fnMatcher(f) {\n\t\t\t\t\t\tfuncs[f] = true\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t\tif len(funcs) <= 0 {\n\t\t\t\t\treturn nil, \"\", errBadRequest.New(\"regex preselect matches 0 functions\")\n\t\t\t\t}\n\n\t\t\t\tfnMatcher = func(f *monkit.Func) bool { return funcs[f] }\n\t\t\t}\n\t\t}\n\n\t\tspanMatcher := func(s *monkit.Span) bool { return fnMatcher(s.Func()) }\n\n\t\tif traceIdStr != \"\" {\n\t\t\ttraceId, err := strconv.ParseUint(traceIdStr, 16, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, \"\", errBadRequest.New(\n\t\t\t\t\t\"trace_id expected to be hex unsigned 64 bit number: %#v\", traceIdStr)\n\t\t\t}\n\t\t\tspanMatcher = func(s *monkit.Span) bool {\n\t\t\t\treturn s.Trace().Id() == int64(traceId) && fnMatcher(s.Func())\n\t\t\t}\n\t\t}\n\n\t\tswitch second {\n\t\tcase \"svg\":\n\t\t\treturn func(w io.Writer) error {\n\t\t\t\treturn TraceQuerySVG(reg, w, spanMatcher)\n\t\t\t}, \"image\/svg+xml; charset=utf-8\", nil\n\t\tcase \"json\":\n\t\t\treturn func(w io.Writer) error {\n\t\t\t\treturn TraceQueryJSON(reg, w, spanMatcher)\n\t\t\t}, \"application\/json; charset=utf-8\", nil\n\t\t}\n\t}\n\treturn nil, \"\", errNotFound.New(\"path not found: %s\", path)\n}\n\nfunc shift(path string) (dir, left string) {\n\tpath = strings.TrimLeft(path, \"\/\")\n\tsplit := strings.Index(path, \"\/\")\n\tif split == -1 {\n\t\treturn path, \"\"\n\t}\n\treturn path[:split], path[split:]\n}\n\nfunc writeIndex(w io.Writer) error {\n\t_, err := w.Write([]byte(`\n\n\t\n\t\t\n\t\tMonkit<\/title>\n\t\t<meta http-equiv=\"refresh\" content=\"1\">\n\t<\/head>\n\t<body>\n\t\t<dl style=\"max-width: 80ch;\">\n\t\t\t<dt><a href=\"ps\">\/ps<\/a><\/dt>\n\t\t\t<dt><a href=\"ps\/json\">\/ps\/json<\/a><\/dt>\n\t\t\t<dt><a href=\"ps\/dot\">\/ps\/dot<\/a><\/dt>\n\t\t\t<dd>Information about active spans.<\/dd>\n\n\t\t\t<dt><a href=\"funcs\">\/funcs<\/a><\/dt>\n\t\t\t<dt><a href=\"funcs\/json\">\/funcs\/json<\/a><\/dt>\n\t\t\t<dt><a href=\"funcs\/dot\">\/funcs\/dot<\/a><\/dt>\n\t\t\t<dd>Information about the functions and their relations.<\/dd>\n\n\t\t\t<dt><a href=\"stats\">\/stats<\/a><\/dt>\n\t\t\t<dt><a href=\"stats\/json\">\/stats\/json<\/a><\/dt>\n\t\t\t<dt><a href=\"stats\/svg\">\/stats\/svg<\/a><\/dt>\n\t\t\t<dd>Statistics about all observed functions, scopes and values.<\/dd>\n\n\t\t\t<dt><a href=\"trace\/json\">\/trace\/json<\/a><\/dt>\n\t\t\t<dt><a href=\"trace\/svg\">\/trace\/svg<\/a><\/dt>\n\t\t\t<dd>Trace the next scope that matches one of the <code>?regex=<\/code> or <code>?trace_id=<\/code> query arguments. By default, regular expressions are matched ahead of time against all known Funcs, but perhaps the Func you want to trace hasn't been observed by the process yet, in which case the regex will fail to match anything. You can turn off this preselection behavior by providing <code>&preselect=false<\/code> as an additional query param. Be advised that until a trace completes, whether or not it has started, it adds a small amount of overhead (a comparison or two) to every monitored function.<\/dd>\n\t\t<\/dl>\n\t<\/body>\n<\/html>`))\n\treturn err\n}\n<commit_msg>present: remove auto-refresh<commit_after>\/\/ Copyright (C) 2015 Space Monkey, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage present\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/spacemonkeygo\/monkit\/v3\"\n)\n\n\/\/ Result writes the expected data to io.Writer and returns any errors if\n\/\/ found.\ntype Result func(io.Writer) error\n\nfunc curry(reg *monkit.Registry,\n\tf func(*monkit.Registry, io.Writer) error) func(io.Writer) error {\n\treturn func(w io.Writer) error {\n\t\treturn f(reg, w)\n\t}\n}\n\n\/\/ FromRequest takes a registry (usually the Default registry), an incoming\n\/\/ path, and optional query parameters, and returns a Result if possible.\n\/\/\n\/\/ FromRequest understands the following paths:\n\/\/ * \/ps, \/ps\/text - returns the result of SpansText\n\/\/ * \/ps\/dot - returns the result of SpansDot\n\/\/ * \/ps\/json - returns the result of SpansJSON\n\/\/ * \/funcs, \/funcs\/text - returns the result of FuncsText\n\/\/ * \/funcs\/dot - returns the result of FuncsDot\n\/\/ * \/funcs\/json - returns the result of FuncsJSON\n\/\/ * \/stats, \/stats\/text - returns the result of StatsText\n\/\/ * \/stats\/json - returns the result of StatsJSON\n\/\/ * \/trace\/svg - returns the result of TraceQuerySVG\n\/\/ * \/trace\/json - returns the result of TraceQueryJSON\n\/\/\n\/\/ The last two paths are worth discussing in more detail, as they take\n\/\/ query parameters. All trace endpoints require at least one of the following\n\/\/ two query parameters:\n\/\/ * regex - If provided, the very next Span that crosses a Func that has\n\/\/ a name that matches this regex will start a trace until that\n\/\/ triggering Span ends, provided the trace_id matches.\n\/\/ * trace_id - If provided, the very next Span on a trace with the given\n\/\/ trace id will start a trace until the triggering Span ends,\n\/\/ provided the regex matches. NOTE: the trace_id will be parsed\n\/\/ in hex.\n\/\/ By default, regular expressions are matched ahead of time against all known\n\/\/ Funcs, but perhaps the Func you want to trace hasn't been observed by the\n\/\/ process yet, in which case the regex will fail to match anything. You can\n\/\/ turn off this preselection behavior by providing preselect=false as an\n\/\/ additional query param. Be advised that until a trace completes, whether\n\/\/ or not it has started, it adds a small amount of overhead (a comparison or\n\/\/ two) to every monitored function.\nfunc FromRequest(reg *monkit.Registry, path string, query url.Values) (\n\tf Result, contentType string, err error) {\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t\/\/ wrap all functions with buffering\n\t\tunbuffered := f\n\t\tf = func(w io.Writer) (err error) {\n\t\t\tbuf := bufio.NewWriter(w)\n\t\t\terr = unbuffered(buf)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = buf.Flush()\n\t\t\treturn err\n\t\t}\n\t}()\n\n\tfirst, rest := shift(path)\n\tsecond, _ := shift(rest)\n\tswitch first {\n\tcase \"\":\n\t\treturn writeIndex, \"text\/html\", nil\n\tcase \"ps\":\n\t\tswitch second {\n\t\tcase \"\", \"text\":\n\t\t\treturn curry(reg, SpansText), \"text\/plain; charset=utf-8\", nil\n\t\tcase \"dot\":\n\t\t\treturn curry(reg, SpansDot), \"text\/plain; charset=utf-8\", nil\n\t\tcase \"json\":\n\t\t\treturn curry(reg, SpansJSON), \"application\/json; charset=utf-8\", nil\n\t\t}\n\n\tcase \"funcs\":\n\t\tswitch second {\n\t\tcase \"\", \"text\":\n\t\t\treturn curry(reg, FuncsText), \"text\/plain; charset=utf-8\", nil\n\t\tcase \"dot\":\n\t\t\treturn curry(reg, FuncsDot), \"text\/plain; charset=utf-8\", nil\n\t\tcase \"json\":\n\t\t\treturn curry(reg, FuncsJSON), \"application\/json; charset=utf-8\", nil\n\t\t}\n\n\tcase \"stats\":\n\t\tswitch second {\n\t\tcase \"\", \"text\", \"old\":\n\t\t\treturn func(w io.Writer) error {\n\t\t\t\treturn StatsText(reg, w)\n\t\t\t}, \"text\/plain; charset=utf-8\", nil\n\t\tcase \"json\":\n\t\t\treturn func(w io.Writer) error {\n\t\t\t\treturn StatsJSON(reg, w)\n\t\t\t}, \"application\/json; charset=utf-8\", nil\n\t\t}\n\n\tcase \"trace\":\n\t\tregexStr := query.Get(\"regex\")\n\t\ttraceIdStr := query.Get(\"trace_id\")\n\t\tif regexStr == \"\" && traceIdStr == \"\" {\n\t\t\treturn nil, \"\", errBadRequest.New(\"at least one of 'regex' or 'trace_id' \" +\n\t\t\t\t\"query parameters required\")\n\t\t}\n\t\tfnMatcher := func(*monkit.Func) bool { return true }\n\n\t\tif regexStr != \"\" {\n\t\t\tre, err := regexp.Compile(regexStr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, \"\", errBadRequest.New(\"invalid regex %#v: %v\",\n\t\t\t\t\tregexStr, err)\n\t\t\t}\n\t\t\tfnMatcher = func(f *monkit.Func) bool {\n\t\t\t\treturn re.MatchString(f.FullName())\n\t\t\t}\n\n\t\t\tpreselect := true\n\t\t\tif query.Get(\"preselect\") != \"\" {\n\t\t\t\tpreselect, err = strconv.ParseBool(query.Get(\"preselect\"))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, \"\", errBadRequest.New(\"invalid preselect %#v: %v\",\n\t\t\t\t\t\tquery.Get(\"preselect\"), err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif preselect {\n\t\t\t\tfuncs := map[*monkit.Func]bool{}\n\t\t\t\treg.Funcs(func(f *monkit.Func) {\n\t\t\t\t\tif fnMatcher(f) {\n\t\t\t\t\t\tfuncs[f] = true\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t\tif len(funcs) <= 0 {\n\t\t\t\t\treturn nil, \"\", errBadRequest.New(\"regex preselect matches 0 functions\")\n\t\t\t\t}\n\n\t\t\t\tfnMatcher = func(f *monkit.Func) bool { return funcs[f] }\n\t\t\t}\n\t\t}\n\n\t\tspanMatcher := func(s *monkit.Span) bool { return fnMatcher(s.Func()) }\n\n\t\tif traceIdStr != \"\" {\n\t\t\ttraceId, err := strconv.ParseUint(traceIdStr, 16, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, \"\", errBadRequest.New(\n\t\t\t\t\t\"trace_id expected to be hex unsigned 64 bit number: %#v\", traceIdStr)\n\t\t\t}\n\t\t\tspanMatcher = func(s *monkit.Span) bool {\n\t\t\t\treturn s.Trace().Id() == int64(traceId) && fnMatcher(s.Func())\n\t\t\t}\n\t\t}\n\n\t\tswitch second {\n\t\tcase \"svg\":\n\t\t\treturn func(w io.Writer) error {\n\t\t\t\treturn TraceQuerySVG(reg, w, spanMatcher)\n\t\t\t}, \"image\/svg+xml; charset=utf-8\", nil\n\t\tcase \"json\":\n\t\t\treturn func(w io.Writer) error {\n\t\t\t\treturn TraceQueryJSON(reg, w, spanMatcher)\n\t\t\t}, \"application\/json; charset=utf-8\", nil\n\t\t}\n\t}\n\treturn nil, \"\", errNotFound.New(\"path not found: %s\", path)\n}\n\nfunc shift(path string) (dir, left string) {\n\tpath = strings.TrimLeft(path, \"\/\")\n\tsplit := strings.Index(path, \"\/\")\n\tif split == -1 {\n\t\treturn path, \"\"\n\t}\n\treturn path[:split], path[split:]\n}\n\nfunc writeIndex(w io.Writer) error {\n\t_, err := w.Write([]byte(`<!DOCTYPE html>\n<html>\n\t<head>\n\t\t<meta charset=\"utf-8\">\n\t\t<title>Monkit<\/title>\n\t<\/head>\n\t<body>\n\t\t<dl style=\"max-width: 80ch;\">\n\t\t\t<dt><a href=\"ps\">\/ps<\/a><\/dt>\n\t\t\t<dt><a href=\"ps\/json\">\/ps\/json<\/a><\/dt>\n\t\t\t<dt><a href=\"ps\/dot\">\/ps\/dot<\/a><\/dt>\n\t\t\t<dd>Information about active spans.<\/dd>\n\n\t\t\t<dt><a href=\"funcs\">\/funcs<\/a><\/dt>\n\t\t\t<dt><a href=\"funcs\/json\">\/funcs\/json<\/a><\/dt>\n\t\t\t<dt><a href=\"funcs\/dot\">\/funcs\/dot<\/a><\/dt>\n\t\t\t<dd>Information about the functions and their relations.<\/dd>\n\n\t\t\t<dt><a href=\"stats\">\/stats<\/a><\/dt>\n\t\t\t<dt><a href=\"stats\/json\">\/stats\/json<\/a><\/dt>\n\t\t\t<dt><a href=\"stats\/svg\">\/stats\/svg<\/a><\/dt>\n\t\t\t<dd>Statistics about all observed functions, scopes and values.<\/dd>\n\n\t\t\t<dt><a href=\"trace\/json\">\/trace\/json<\/a><\/dt>\n\t\t\t<dt><a href=\"trace\/svg\">\/trace\/svg<\/a><\/dt>\n\t\t\t<dd>Trace the next scope that matches one of the <code>?regex=<\/code> or <code>?trace_id=<\/code> query arguments. By default, regular expressions are matched ahead of time against all known Funcs, but perhaps the Func you want to trace hasn't been observed by the process yet, in which case the regex will fail to match anything. You can turn off this preselection behavior by providing <code>&preselect=false<\/code> as an additional query param. Be advised that until a trace completes, whether or not it has started, it adds a small amount of overhead (a comparison or two) to every monitored function.<\/dd>\n\t\t<\/dl>\n\t<\/body>\n<\/html>`))\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Add retry logic to client's Validate method (#150)<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>- Refactored client.setNick<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>Check status code<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage integration\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"gopkg.in\/check.v1\"\n)\n\nvar (\n\tT = NewCommand(\"tsuru\").WithArgs\n\tallPlatforms = []string{\n\t\t\"tsuru\/python\",\n\t\t\"tsuru\/go\",\n\t\t\"tsuru\/buildpack\",\n\t\t\"tsuru\/cordova\",\n\t\t\"tsuru\/elixir\",\n\t\t\"tsuru\/java\",\n\t\t\"tsuru\/nodejs\",\n\t\t\"tsuru\/php\",\n\t\t\"tsuru\/play\",\n\t\t\"tsuru\/pypy\",\n\t\t\"tsuru\/python3\",\n\t\t\"tsuru\/ruby\",\n\t\t\"tsuru\/static\",\n\t}\n\tallProvisioners = []string{\n\t\t\"docker\",\n\t\t\"swarm\",\n\t}\n\tflows = []ExecFlow{\n\t\tplatformsToInstall(),\n\t\tinstallerConfigTest(),\n\t\tinstallerComposeTest(),\n\t\tinstallerTest(),\n\t\ttargetTest(),\n\t\tloginTest(),\n\t\tremoveInstallNodes(),\n\t\tquotaTest(),\n\t\tteamTest(),\n\t\tpoolAdd(),\n\t\tplatformAdd(),\n\t\texampleApps(),\n\t}\n)\n\nvar installerConfig = `driver:\n name: virtualbox\n options:\n virtualbox-cpu-count: 2\n virtualbox-memory: 2048\nhosts:\n apps:\n size: 2\ncomponents:\n install-dashboard: false\n`\n\nfunc platformsToInstall() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"platformimages\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tfor _, platImg := range allPlatforms {\n\t\t\tenv.Add(\"platformimages\", platImg)\n\t\t}\n\t}\n\treturn flow\n}\n\nfunc installerConfigTest() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"installerconfig\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tf, err := ioutil.TempFile(\"\", \"installer-config\")\n\t\tc.Assert(err, check.IsNil)\n\t\tdefer f.Close()\n\t\tf.Write([]byte(installerConfig))\n\t\tc.Assert(err, check.IsNil)\n\t\tenv.Set(\"installerconfig\", f.Name())\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tres := NewCommand(\"rm\", \"{{.installerconfig}}\").Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc installerComposeTest() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"installercompose\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tcomposeFile, err := ioutil.TempFile(\"\", \"installer-compose\")\n\t\tc.Assert(err, check.IsNil)\n\t\tdefer composeFile.Close()\n\t\tf, err := ioutil.TempFile(\"\", \"installer-config\")\n\t\tc.Assert(err, check.IsNil)\n\t\tdefer func() {\n\t\t\tres := NewCommand(\"rm\", f.Name()).Run(env)\n\t\t\tc.Check(res, ResultOk)\n\t\t\tf.Close()\n\t\t}()\n\t\tres := T(\"install-config-init\", f.Name(), composeFile.Name()).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tcomposeData, err := ioutil.ReadFile(composeFile.Name())\n\t\tc.Assert(err, check.IsNil)\n\t\tcomposeData = []byte(strings.Replace(string(composeData), \"tsuru\/api:v1\", \"tsuru\/api:latest\", 1))\n\t\terr = ioutil.WriteFile(composeFile.Name(), composeData, 0644)\n\t\tenv.Set(\"installercompose\", composeFile.Name())\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tres := NewCommand(\"rm\", \"{{.installercompose}}\").Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc installerTest() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"targetaddr\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"install\", \"--config\", \"{{.installerconfig}}\", \"--compose\", \"{{.installercompose}}\").WithTimeout(30 * time.Minute).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tregex := regexp.MustCompile(`(?si).*Core Hosts:.*?([\\d.]+)\\s.*`)\n\t\tparts := regex.FindStringSubmatch(res.Stdout.String())\n\t\tc.Assert(parts, check.HasLen, 2)\n\t\ttargetHost := parts[1]\n\t\tregex = regexp.MustCompile(`(?si).*tsuru_tsuru.*?\\|\\s(\\d+)`)\n\t\tparts = regex.FindStringSubmatch(res.Stdout.String())\n\t\tc.Assert(parts, check.HasLen, 2)\n\t\ttargetPort := parts[1]\n\t\tenv.Set(\"targetaddr\", fmt.Sprintf(\"http:\/\/%s:%s\", targetHost, targetPort))\n\t\tregex = regexp.MustCompile(`\\| (https?[^\\s]+?) \\|`)\n\t\tallParts := regex.FindAllStringSubmatch(res.Stdout.String(), -1)\n\t\tfor _, parts = range allParts {\n\t\t\tc.Assert(parts, check.HasLen, 2)\n\t\t\tenv.Add(\"nodeopts\", fmt.Sprintf(\"--register address=%s --cacert ~\/.tsuru\/installs\/tsuru\/certs\/ca.pem --clientcert ~\/.tsuru\/installs\/tsuru\/certs\/cert.pem --clientkey ~\/.tsuru\/installs\/tsuru\/certs\/key.pem\", parts[1]))\n\t\t\tenv.Add(\"nodestoremove\", parts[1])\n\t\t}\n\t\tregex = regexp.MustCompile(`Username: ([[:print:]]+)`)\n\t\tparts = regex.FindStringSubmatch(res.Stdout.String())\n\t\tenv.Set(\"adminuser\", parts[1])\n\t\tregex = regexp.MustCompile(`Password: ([[:print:]]+)`)\n\t\tparts = regex.FindStringSubmatch(res.Stdout.String())\n\t\tenv.Set(\"adminpassword\", parts[1])\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tres := T(\"uninstall\", \"-y\").Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc targetTest() ExecFlow {\n\tflow := ExecFlow{}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\ttargetName := \"integration-target\"\n\t\tres := T(\"target-add\", targetName, \"{{.targetaddr}}\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tres = T(\"target-list\").Run(env)\n\t\tc.Assert(res, ResultMatches, Expected{Stdout: `\\s+` + targetName + ` .*`})\n\t\tres = T(\"target-set\", targetName).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc loginTest() ExecFlow {\n\tflow := ExecFlow{}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"login\", \"{{.adminuser}}\").WithInput(\"{{.adminpassword}}\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc removeInstallNodes() ExecFlow {\n\tflow := ExecFlow{\n\t\tmatrix: map[string]string{\n\t\t\t\"node\": \"nodestoremove\",\n\t\t},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"node-remove\", \"-y\", \"--no-rebalance\", \"{{.node}}\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc quotaTest() ExecFlow {\n\tflow := ExecFlow{\n\t\trequires: []string{\"adminuser\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"user-quota-change\", \"{{.adminuser}}\", \"100\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tres = T(\"user-quota-view\", \"{{.adminuser}}\").Run(env)\n\t\tc.Assert(res, ResultMatches, Expected{Stdout: `(?s)Apps usage.*\/100`})\n\t}\n\treturn flow\n}\n\nfunc teamTest() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"team\"},\n\t}\n\tteamName := \"integration-team\"\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"team-create\", teamName).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tenv.Set(\"team\", teamName)\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tres := T(\"team-remove\", \"-y\", teamName).Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc poolAdd() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"poolnames\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tfor _, prov := range allProvisioners {\n\t\t\tpoolName := \"ipool-\" + prov\n\t\t\tres := T(\"pool-add\", \"--provisioner\", prov, poolName).Run(env)\n\t\t\tc.Assert(res, ResultOk)\n\t\t\tenv.Add(\"poolnames\", poolName)\n\t\t\tres = T(\"pool-teams-add\", poolName, \"{{.team}}\").Run(env)\n\t\t\tc.Assert(res, ResultOk)\n\t\t\tres = T(\"node-add\", \"{{.nodeopts}}\", \"pool=\"+poolName).Run(env)\n\t\t\tc.Assert(res, ResultOk)\n\t\t\tres = T(\"event-list\").Run(env)\n\t\t\tc.Assert(res, ResultOk)\n\t\t\tnodeopts := env.All(\"nodeopts\")\n\t\t\tenv.Set(\"nodeopts\", append(nodeopts[1:], nodeopts[0])...)\n\t\t\tregex := regexp.MustCompile(`node.create.*?node:\\s+(.*?)\\s+`)\n\t\t\tparts := regex.FindStringSubmatch(res.Stdout.String())\n\t\t\tc.Assert(parts, check.HasLen, 2)\n\t\t\tenv.Add(\"nodeaddrs\", parts[1])\n\t\t\tregex = regexp.MustCompile(parts[1] + `.*?ready`)\n\t\t\tok := retry(time.Minute, func() bool {\n\t\t\t\tres = T(\"node-list\").Run(env)\n\t\t\t\treturn regex.MatchString(res.Stdout.String())\n\t\t\t})\n\t\t\tc.Assert(ok, check.Equals, true, check.Commentf(\"node not ready after 1 minute: %v\", res))\n\t\t}\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tfor _, node := range env.All(\"nodeaddrs\") {\n\t\t\tres := T(\"node-remove\", \"-y\", \"--no-rebalance\", node).Run(env)\n\t\t\tc.Check(res, ResultOk)\n\t\t}\n\t\tfor _, prov := range allProvisioners {\n\t\t\tpoolName := \"ipool-\" + prov\n\t\t\tres := T(\"pool-teams-remove\", poolName, \"{{.team}}\").Run(env)\n\t\t\tc.Check(res, ResultOk)\n\t\t\tres = T(\"pool-remove\", \"-y\", poolName).Run(env)\n\t\t\tc.Check(res, ResultOk)\n\t\t}\n\t}\n\treturn flow\n}\n\nfunc platformAdd() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"platforms\"},\n\t\tmatrix: map[string]string{\n\t\t\t\"platimg\": \"platformimages\",\n\t\t},\n\t\tparallel: true,\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\timg := env.Get(\"platimg\")\n\t\tsuffix := img[strings.LastIndex(img, \"\/\")+1:]\n\t\tplatName := \"iplat-\" + suffix\n\t\tres := T(\"platform-add\", platName, \"-i\", img).WithTimeout(15 * time.Minute).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tenv.Add(\"platforms\", platName)\n\t\tres = T(\"platform-list\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tc.Assert(res, ResultMatches, Expected{Stdout: \"(?s).*- \" + platName + \".*\"})\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\timg := env.Get(\"platimg\")\n\t\tsuffix := img[strings.LastIndex(img, \"\/\")+1:]\n\t\tplatName := \"iplat-\" + suffix\n\t\tres := T(\"platform-remove\", \"-y\", platName).Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc exampleApps() ExecFlow {\n\tflow := ExecFlow{\n\t\tmatrix: map[string]string{\n\t\t\t\"pool\": \"poolnames\",\n\t\t\t\"plat\": \"platforms\",\n\t\t},\n\t\tparallel: true,\n\t}\n\tappName := \"iapp-{{.plat}}-{{.pool}}\"\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"app-create\", appName, \"{{.plat}}\", \"-t\", \"{{.team}}\", \"-o\", \"{{.pool}}\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tres = T(\"app-info\", \"-a\", appName).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tplatRE := regexp.MustCompile(`(?s)Platform: (.*?)\\n`)\n\t\tparts := platRE.FindStringSubmatch(res.Stdout.String())\n\t\tc.Assert(parts, check.HasLen, 2)\n\t\tlang := strings.Replace(parts[1], \"iplat-\", \"\", -1)\n\t\tres = T(\"app-deploy\", \"-a\", appName, \"{{.examplesdir}}\/\"+lang+\"\/\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tres = T(\"app-info\", \"-a\", appName).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\taddrRE := regexp.MustCompile(`(?s)Address: (.*?)\\n`)\n\t\tparts = addrRE.FindStringSubmatch(res.Stdout.String())\n\t\tc.Assert(parts, check.HasLen, 2)\n\t\tcmd := NewCommand(\"curl\", \"-sSf\", \"http:\/\/\"+parts[1])\n\t\tok := retry(5*time.Minute, func() bool {\n\t\t\tres = cmd.Run(env)\n\t\t\treturn res.ExitCode == 0\n\t\t})\n\t\tc.Assert(ok, check.Equals, true, check.Commentf(\"invalid result: %v\", res))\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tres := T(\"app-remove\", \"-y\", \"-a\", appName).Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc (s *S) TestBase(c *check.C) {\n\tenv := NewEnvironment()\n\tif !env.Has(\"enabled\") {\n\t\treturn\n\t}\n\tvar executedFlows []*ExecFlow\n\tdefer func() {\n\t\tfor i := len(executedFlows) - 1; i >= 0; i-- {\n\t\t\texecutedFlows[i].Rollback(c, env)\n\t\t}\n\t}()\n\tfor i := range flows {\n\t\tf := &flows[i]\n\t\tif len(f.provides) > 0 {\n\t\t\tprovidesAll := true\n\t\t\tfor _, envVar := range f.provides {\n\t\t\t\tif env.Get(envVar) == \"\" {\n\t\t\t\t\tprovidesAll = false\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif providesAll {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\texecutedFlows = append(executedFlows, f)\n\t\tf.Run(c, env)\n\t}\n}\n<commit_msg>integration: uses installerconfig on uninstall<commit_after>\/\/ Copyright 2017 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage integration\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"gopkg.in\/check.v1\"\n)\n\nvar (\n\tT = NewCommand(\"tsuru\").WithArgs\n\tallPlatforms = []string{\n\t\t\"tsuru\/python\",\n\t\t\"tsuru\/go\",\n\t\t\"tsuru\/buildpack\",\n\t\t\"tsuru\/cordova\",\n\t\t\"tsuru\/elixir\",\n\t\t\"tsuru\/java\",\n\t\t\"tsuru\/nodejs\",\n\t\t\"tsuru\/php\",\n\t\t\"tsuru\/play\",\n\t\t\"tsuru\/pypy\",\n\t\t\"tsuru\/python3\",\n\t\t\"tsuru\/ruby\",\n\t\t\"tsuru\/static\",\n\t}\n\tallProvisioners = []string{\n\t\t\"docker\",\n\t\t\"swarm\",\n\t}\n\tflows = []ExecFlow{\n\t\tplatformsToInstall(),\n\t\tinstallerConfigTest(),\n\t\tinstallerComposeTest(),\n\t\tinstallerTest(),\n\t\ttargetTest(),\n\t\tloginTest(),\n\t\tremoveInstallNodes(),\n\t\tquotaTest(),\n\t\tteamTest(),\n\t\tpoolAdd(),\n\t\tplatformAdd(),\n\t\texampleApps(),\n\t}\n)\n\nvar installerConfig = `driver:\n name: virtualbox\n options:\n virtualbox-cpu-count: 2\n virtualbox-memory: 2048\nhosts:\n apps:\n size: 2\ncomponents:\n install-dashboard: false\n`\n\nfunc platformsToInstall() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"platformimages\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tfor _, platImg := range allPlatforms {\n\t\t\tenv.Add(\"platformimages\", platImg)\n\t\t}\n\t}\n\treturn flow\n}\n\nfunc installerConfigTest() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"installerconfig\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tf, err := ioutil.TempFile(\"\", \"installer-config\")\n\t\tc.Assert(err, check.IsNil)\n\t\tdefer f.Close()\n\t\tf.Write([]byte(installerConfig))\n\t\tc.Assert(err, check.IsNil)\n\t\tenv.Set(\"installerconfig\", f.Name())\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tres := NewCommand(\"rm\", \"{{.installerconfig}}\").Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc installerComposeTest() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"installercompose\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tcomposeFile, err := ioutil.TempFile(\"\", \"installer-compose\")\n\t\tc.Assert(err, check.IsNil)\n\t\tdefer composeFile.Close()\n\t\tf, err := ioutil.TempFile(\"\", \"installer-config\")\n\t\tc.Assert(err, check.IsNil)\n\t\tdefer func() {\n\t\t\tres := NewCommand(\"rm\", f.Name()).Run(env)\n\t\t\tc.Check(res, ResultOk)\n\t\t\tf.Close()\n\t\t}()\n\t\tres := T(\"install-config-init\", f.Name(), composeFile.Name()).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tcomposeData, err := ioutil.ReadFile(composeFile.Name())\n\t\tc.Assert(err, check.IsNil)\n\t\tcomposeData = []byte(strings.Replace(string(composeData), \"tsuru\/api:v1\", \"tsuru\/api:latest\", 1))\n\t\terr = ioutil.WriteFile(composeFile.Name(), composeData, 0644)\n\t\tenv.Set(\"installercompose\", composeFile.Name())\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tres := NewCommand(\"rm\", \"{{.installercompose}}\").Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc installerTest() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"targetaddr\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"install\", \"--config\", \"{{.installerconfig}}\", \"--compose\", \"{{.installercompose}}\").WithTimeout(30 * time.Minute).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tregex := regexp.MustCompile(`(?si).*Core Hosts:.*?([\\d.]+)\\s.*`)\n\t\tparts := regex.FindStringSubmatch(res.Stdout.String())\n\t\tc.Assert(parts, check.HasLen, 2)\n\t\ttargetHost := parts[1]\n\t\tregex = regexp.MustCompile(`(?si).*tsuru_tsuru.*?\\|\\s(\\d+)`)\n\t\tparts = regex.FindStringSubmatch(res.Stdout.String())\n\t\tc.Assert(parts, check.HasLen, 2)\n\t\ttargetPort := parts[1]\n\t\tenv.Set(\"targetaddr\", fmt.Sprintf(\"http:\/\/%s:%s\", targetHost, targetPort))\n\t\tregex = regexp.MustCompile(`\\| (https?[^\\s]+?) \\|`)\n\t\tallParts := regex.FindAllStringSubmatch(res.Stdout.String(), -1)\n\t\tfor _, parts = range allParts {\n\t\t\tc.Assert(parts, check.HasLen, 2)\n\t\t\tenv.Add(\"nodeopts\", fmt.Sprintf(\"--register address=%s --cacert ~\/.tsuru\/installs\/tsuru\/certs\/ca.pem --clientcert ~\/.tsuru\/installs\/tsuru\/certs\/cert.pem --clientkey ~\/.tsuru\/installs\/tsuru\/certs\/key.pem\", parts[1]))\n\t\t\tenv.Add(\"nodestoremove\", parts[1])\n\t\t}\n\t\tregex = regexp.MustCompile(`Username: ([[:print:]]+)`)\n\t\tparts = regex.FindStringSubmatch(res.Stdout.String())\n\t\tenv.Set(\"adminuser\", parts[1])\n\t\tregex = regexp.MustCompile(`Password: ([[:print:]]+)`)\n\t\tparts = regex.FindStringSubmatch(res.Stdout.String())\n\t\tenv.Set(\"adminpassword\", parts[1])\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tres := T(\"uninstall\", \"--config\", \"{{.installerconfig}}\", \"-y\").Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc targetTest() ExecFlow {\n\tflow := ExecFlow{}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\ttargetName := \"integration-target\"\n\t\tres := T(\"target-add\", targetName, \"{{.targetaddr}}\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tres = T(\"target-list\").Run(env)\n\t\tc.Assert(res, ResultMatches, Expected{Stdout: `\\s+` + targetName + ` .*`})\n\t\tres = T(\"target-set\", targetName).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc loginTest() ExecFlow {\n\tflow := ExecFlow{}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"login\", \"{{.adminuser}}\").WithInput(\"{{.adminpassword}}\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc removeInstallNodes() ExecFlow {\n\tflow := ExecFlow{\n\t\tmatrix: map[string]string{\n\t\t\t\"node\": \"nodestoremove\",\n\t\t},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"node-remove\", \"-y\", \"--no-rebalance\", \"{{.node}}\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc quotaTest() ExecFlow {\n\tflow := ExecFlow{\n\t\trequires: []string{\"adminuser\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"user-quota-change\", \"{{.adminuser}}\", \"100\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tres = T(\"user-quota-view\", \"{{.adminuser}}\").Run(env)\n\t\tc.Assert(res, ResultMatches, Expected{Stdout: `(?s)Apps usage.*\/100`})\n\t}\n\treturn flow\n}\n\nfunc teamTest() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"team\"},\n\t}\n\tteamName := \"integration-team\"\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"team-create\", teamName).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tenv.Set(\"team\", teamName)\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tres := T(\"team-remove\", \"-y\", teamName).Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc poolAdd() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"poolnames\"},\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tfor _, prov := range allProvisioners {\n\t\t\tpoolName := \"ipool-\" + prov\n\t\t\tres := T(\"pool-add\", \"--provisioner\", prov, poolName).Run(env)\n\t\t\tc.Assert(res, ResultOk)\n\t\t\tenv.Add(\"poolnames\", poolName)\n\t\t\tres = T(\"pool-teams-add\", poolName, \"{{.team}}\").Run(env)\n\t\t\tc.Assert(res, ResultOk)\n\t\t\tres = T(\"node-add\", \"{{.nodeopts}}\", \"pool=\"+poolName).Run(env)\n\t\t\tc.Assert(res, ResultOk)\n\t\t\tres = T(\"event-list\").Run(env)\n\t\t\tc.Assert(res, ResultOk)\n\t\t\tnodeopts := env.All(\"nodeopts\")\n\t\t\tenv.Set(\"nodeopts\", append(nodeopts[1:], nodeopts[0])...)\n\t\t\tregex := regexp.MustCompile(`node.create.*?node:\\s+(.*?)\\s+`)\n\t\t\tparts := regex.FindStringSubmatch(res.Stdout.String())\n\t\t\tc.Assert(parts, check.HasLen, 2)\n\t\t\tenv.Add(\"nodeaddrs\", parts[1])\n\t\t\tregex = regexp.MustCompile(parts[1] + `.*?ready`)\n\t\t\tok := retry(time.Minute, func() bool {\n\t\t\t\tres = T(\"node-list\").Run(env)\n\t\t\t\treturn regex.MatchString(res.Stdout.String())\n\t\t\t})\n\t\t\tc.Assert(ok, check.Equals, true, check.Commentf(\"node not ready after 1 minute: %v\", res))\n\t\t}\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tfor _, node := range env.All(\"nodeaddrs\") {\n\t\t\tres := T(\"node-remove\", \"-y\", \"--no-rebalance\", node).Run(env)\n\t\t\tc.Check(res, ResultOk)\n\t\t}\n\t\tfor _, prov := range allProvisioners {\n\t\t\tpoolName := \"ipool-\" + prov\n\t\t\tres := T(\"pool-teams-remove\", poolName, \"{{.team}}\").Run(env)\n\t\t\tc.Check(res, ResultOk)\n\t\t\tres = T(\"pool-remove\", \"-y\", poolName).Run(env)\n\t\t\tc.Check(res, ResultOk)\n\t\t}\n\t}\n\treturn flow\n}\n\nfunc platformAdd() ExecFlow {\n\tflow := ExecFlow{\n\t\tprovides: []string{\"platforms\"},\n\t\tmatrix: map[string]string{\n\t\t\t\"platimg\": \"platformimages\",\n\t\t},\n\t\tparallel: true,\n\t}\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\timg := env.Get(\"platimg\")\n\t\tsuffix := img[strings.LastIndex(img, \"\/\")+1:]\n\t\tplatName := \"iplat-\" + suffix\n\t\tres := T(\"platform-add\", platName, \"-i\", img).WithTimeout(15 * time.Minute).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tenv.Add(\"platforms\", platName)\n\t\tres = T(\"platform-list\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tc.Assert(res, ResultMatches, Expected{Stdout: \"(?s).*- \" + platName + \".*\"})\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\timg := env.Get(\"platimg\")\n\t\tsuffix := img[strings.LastIndex(img, \"\/\")+1:]\n\t\tplatName := \"iplat-\" + suffix\n\t\tres := T(\"platform-remove\", \"-y\", platName).Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc exampleApps() ExecFlow {\n\tflow := ExecFlow{\n\t\tmatrix: map[string]string{\n\t\t\t\"pool\": \"poolnames\",\n\t\t\t\"plat\": \"platforms\",\n\t\t},\n\t\tparallel: true,\n\t}\n\tappName := \"iapp-{{.plat}}-{{.pool}}\"\n\tflow.forward = func(c *check.C, env *Environment) {\n\t\tres := T(\"app-create\", appName, \"{{.plat}}\", \"-t\", \"{{.team}}\", \"-o\", \"{{.pool}}\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tres = T(\"app-info\", \"-a\", appName).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tplatRE := regexp.MustCompile(`(?s)Platform: (.*?)\\n`)\n\t\tparts := platRE.FindStringSubmatch(res.Stdout.String())\n\t\tc.Assert(parts, check.HasLen, 2)\n\t\tlang := strings.Replace(parts[1], \"iplat-\", \"\", -1)\n\t\tres = T(\"app-deploy\", \"-a\", appName, \"{{.examplesdir}}\/\"+lang+\"\/\").Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\tres = T(\"app-info\", \"-a\", appName).Run(env)\n\t\tc.Assert(res, ResultOk)\n\t\taddrRE := regexp.MustCompile(`(?s)Address: (.*?)\\n`)\n\t\tparts = addrRE.FindStringSubmatch(res.Stdout.String())\n\t\tc.Assert(parts, check.HasLen, 2)\n\t\tcmd := NewCommand(\"curl\", \"-sSf\", \"http:\/\/\"+parts[1])\n\t\tok := retry(5*time.Minute, func() bool {\n\t\t\tres = cmd.Run(env)\n\t\t\treturn res.ExitCode == 0\n\t\t})\n\t\tc.Assert(ok, check.Equals, true, check.Commentf(\"invalid result: %v\", res))\n\t}\n\tflow.backward = func(c *check.C, env *Environment) {\n\t\tres := T(\"app-remove\", \"-y\", \"-a\", appName).Run(env)\n\t\tc.Check(res, ResultOk)\n\t}\n\treturn flow\n}\n\nfunc (s *S) TestBase(c *check.C) {\n\tenv := NewEnvironment()\n\tif !env.Has(\"enabled\") {\n\t\treturn\n\t}\n\tvar executedFlows []*ExecFlow\n\tdefer func() {\n\t\tfor i := len(executedFlows) - 1; i >= 0; i-- {\n\t\t\texecutedFlows[i].Rollback(c, env)\n\t\t}\n\t}()\n\tfor i := range flows {\n\t\tf := &flows[i]\n\t\tif len(f.provides) > 0 {\n\t\t\tprovidesAll := true\n\t\t\tfor _, envVar := range f.provides {\n\t\t\t\tif env.Get(envVar) == \"\" {\n\t\t\t\t\tprovidesAll = false\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif providesAll {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\texecutedFlows = append(executedFlows, f)\n\t\tf.Run(c, env)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 go-dockerclient authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/go:build docker_integration\n\/\/ +build docker_integration\n\npackage docker\n\nconst integrationDockerImage = \"mcr.microsoft.com\/dotnet\/sdk:latest\"\n\nfunc integrationCreateContainerOpts(imageName string, hostConfig *HostConfig) CreateContainerOptions {\n\treturn CreateContainerOptions{\n\t\tConfig: &Config{\n\t\t\tImage: imageName,\n\t\t\tCmd: []string{\"powershell\", \"-Command\", `Write-Host \"hello hello\"`},\n\t\t},\n\t\tHostConfig: hostConfig,\n\t}\n}\n<commit_msg>Try a different windows base image for integration tests<commit_after>\/\/ Copyright 2019 go-dockerclient authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/go:build docker_integration\n\/\/ +build docker_integration\n\npackage docker\n\nconst integrationDockerImage = \"mcr.microsoft.com\/windows\/servercore:ltsc2022\"\n\nfunc integrationCreateContainerOpts(imageName string, hostConfig *HostConfig) CreateContainerOptions {\n\treturn CreateContainerOptions{\n\t\tConfig: &Config{\n\t\t\tImage: imageName,\n\t\t\tCmd: []string{\"powershell\", \"-Command\", `Write-Host \"hello hello\"`},\n\t\t},\n\t\tHostConfig: hostConfig,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 by caixw, All rights reserved.\n\/\/ Use of this source code is governed by a MIT\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package watch 提供热编译功能。\n\/\/\n\/\/ 功能与 github.com\/caixw\/gobuild 相似。\npackage watch\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\n\t\"github.com\/caixw\/gobuild\"\n)\n\n\/\/ TODO 自动编译 .proto 文件?\n\nvar (\n\trecursive, showIgnore bool\n\tmainFiles, outputName, extString, appArgs string\n\n\tflagset = flag.NewFlagSet(\"watch\", flag.ExitOnError)\n)\n\nfunc init() {\n\tflagset.BoolVar(&recursive, \"r\", true, \"是否查找子目录;\")\n\tflagset.BoolVar(&showIgnore, \"i\", false, \"是否显示被标记为 IGNORE 的日志内容;\")\n\tflagset.StringVar(&outputName, \"o\", \"\", \"指定输出名称,程序的工作目录随之改变;\")\n\tflagset.StringVar(&appArgs, \"x\", \"\", \"传递给编译程序的参数;\")\n\tflagset.StringVar(&extString, \"ext\", \"go\", \"指定监视的文件扩展,区分大小写。* 表示监视所有类型文件,空值代表不监视任何文件;\")\n\tflagset.StringVar(&mainFiles, \"main\", \"\", \"指定需要编译的文件;\")\n}\n\n\/\/ Do 执行子命令\nfunc Do(output io.Writer) error {\n\tif err := flagset.Parse(os.Args[2:]); err != nil {\n\t\treturn err\n\t}\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdirs := append([]string{wd}, flag.Args()...)\n\n\tlogs := gobuild.NewConsoleLogs(showIgnore)\n\tdefer logs.Stop()\n\treturn gobuild.Build(logs.Logs, mainFiles, outputName, extString, recursive, appArgs, dirs...)\n}\n\n\/\/ Usage 当前子命令的用法\nfunc Usage(output io.Writer) {\n\tfmt.Fprintln(output, `热编译当前目录下的项目\n\n命令行语法:\n web watch [options] [dependents]\n\n options:`)\n\n\tflagset.SetOutput(output)\n\tflagset.PrintDefaults()\n\n\tfmt.Fprintln(output, `\n dependents:\n 指定其它依赖的目录,只能出现在命令的尾部。\n\n\n常见用法:\n\n web watch \n 监视当前目录,若有变动,则重新编译当前目录下的 *.go 文件;\n\n web watch -main=main.go\n 监视当前目录,若有变动,则重新编译当前目录下的 main.go 文件;\n\n web watch -main=\"main.go\" dir1 dir2\n 监视当前目录及 dir1 和 dir2,若有变动,则重新编译当前目录下的 main.go 文件;\n\n\nNOTE: 不会监视隐藏文件和隐藏目录下的文件。`)\n}\n<commit_msg>[cmd\/web] 更新文档<commit_after>\/\/ Copyright 2018 by caixw, All rights reserved.\n\/\/ Use of this source code is governed by a MIT\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package watch 提供热编译功能。\n\/\/\n\/\/ 功能与 github.com\/caixw\/gobuild 相同。\npackage watch\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\n\t\"github.com\/caixw\/gobuild\"\n)\n\nvar (\n\trecursive, showIgnore bool\n\tmainFiles, outputName, extString, appArgs string\n\n\tflagset = flag.NewFlagSet(\"watch\", flag.ExitOnError)\n)\n\nfunc init() {\n\tflagset.BoolVar(&recursive, \"r\", true, \"是否查找子目录;\")\n\tflagset.BoolVar(&showIgnore, \"i\", false, \"是否显示被标记为 IGNORE 的日志内容;\")\n\tflagset.StringVar(&outputName, \"o\", \"\", \"指定输出名称,程序的工作目录随之改变;\")\n\tflagset.StringVar(&appArgs, \"x\", \"\", \"传递给编译程序的参数;\")\n\tflagset.StringVar(&extString, \"ext\", \"go\", \"指定监视的文件扩展,区分大小写。* 表示监视所有类型文件,空值代表不监视任何文件;\")\n\tflagset.StringVar(&mainFiles, \"main\", \"\", \"指定需要编译的文件;\")\n}\n\n\/\/ Do 执行子命令\nfunc Do(output io.Writer) error {\n\tif err := flagset.Parse(os.Args[2:]); err != nil {\n\t\treturn err\n\t}\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdirs := append([]string{wd}, flag.Args()...)\n\n\tlogs := gobuild.NewConsoleLogs(showIgnore)\n\tdefer logs.Stop()\n\treturn gobuild.Build(logs.Logs, mainFiles, outputName, extString, recursive, appArgs, dirs...)\n}\n\n\/\/ Usage 当前子命令的用法\nfunc Usage(output io.Writer) {\n\tfmt.Fprintln(output, `热编译当前目录下的项目\n\n命令行语法:\n web watch [options] [dependents]\n\n options:`)\n\n\tflagset.SetOutput(output)\n\tflagset.PrintDefaults()\n\n\tfmt.Fprintln(output, `\n dependents:\n 指定其它依赖的目录,只能出现在命令的尾部。\n\n\n常见用法:\n\n web watch \n 监视当前目录,若有变动,则重新编译当前目录下的 *.go 文件;\n\n web watch -main=main.go\n 监视当前目录,若有变动,则重新编译当前目录下的 main.go 文件;\n\n web watch -main=\"main.go\" dir1 dir2\n 监视当前目录及 dir1 和 dir2,若有变动,则重新编译当前目录下的 main.go 文件;\n\n\nNOTE: 不会监视隐藏文件和隐藏目录下的文件。`)\n}\n<|endoftext|>"} {"text":"<commit_before>package hscan\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/\/ structMap contains the map of struct fields for target structs\n\/\/ indexed by the struct type.\ntype structMap struct {\n\tm sync.Map\n}\n\nfunc newStructMap() *structMap {\n\treturn new(structMap)\n}\n\nfunc (s *structMap) get(t reflect.Type) *structSpec {\n\tif v, ok := s.m.Load(t); ok {\n\t\treturn v.(*structSpec)\n\t}\n\n\tspec := newStructSpec(t, \"redis\")\n\ts.m.Store(t, spec)\n\treturn spec\n}\n\n\/\/------------------------------------------------------------------------------\n\n\/\/ structSpec contains the list of all fields in a target struct.\ntype structSpec struct {\n\tm map[string]*structField\n}\n\nfunc (s *structSpec) set(tag string, sf *structField) {\n\ts.m[tag] = sf\n}\n\nfunc (s *structSpec) get(tag string) (*structField, bool) {\n\tf, ok := s.m[tag]\n\treturn f, ok\n}\n\nfunc newStructSpec(t reflect.Type, fieldTag string) *structSpec {\n\tout := &structSpec{\n\t\tm: make(map[string]*structField),\n\t}\n\n\tnum := t.NumField()\n\tfor i := 0; i < num; i++ {\n\t\tf := t.Field(i)\n\n\t\ttag := f.Tag.Get(fieldTag)\n\t\tif tag == \"\" || tag == \"-\" {\n\t\t\tcontinue\n\t\t}\n\n\t\ttag = strings.Split(tag, \",\")[0]\n\t\tif tag == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Use the built-in decoder.\n\t\tout.set(tag, &structField{index: i, fn: decoders[f.Type.Kind()]})\n\t}\n\n\treturn out\n}\n\n\/\/------------------------------------------------------------------------------\n\n\/\/ structField represents a single field in a target struct.\ntype structField struct {\n\tindex int\n\tfn decoderFunc\n}\n\n\/\/------------------------------------------------------------------------------\n\ntype StructValue struct {\n\tspec *structSpec\n\tvalue reflect.Value\n}\n\nfunc (s StructValue) Scan(key string, value string) error {\n\tfield, ok := s.spec.m[key]\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn field.fn(s.value.Field(field.index), value)\n}\n<commit_msg>Fix build<commit_after>package hscan\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/\/ structMap contains the map of struct fields for target structs\n\/\/ indexed by the struct type.\ntype structMap struct {\n\tm sync.Map\n}\n\nfunc newStructMap() *structMap {\n\treturn new(structMap)\n}\n\nfunc (s *structMap) get(t reflect.Type) *structSpec {\n\tif v, ok := s.m.Load(t); ok {\n\t\treturn v.(*structSpec)\n\t}\n\n\tspec := newStructSpec(t, \"redis\")\n\ts.m.Store(t, spec)\n\treturn spec\n}\n\n\/\/------------------------------------------------------------------------------\n\n\/\/ structSpec contains the list of all fields in a target struct.\ntype structSpec struct {\n\tm map[string]*structField\n}\n\nfunc (s *structSpec) set(tag string, sf *structField) {\n\ts.m[tag] = sf\n}\n\nfunc newStructSpec(t reflect.Type, fieldTag string) *structSpec {\n\tout := &structSpec{\n\t\tm: make(map[string]*structField),\n\t}\n\n\tnum := t.NumField()\n\tfor i := 0; i < num; i++ {\n\t\tf := t.Field(i)\n\n\t\ttag := f.Tag.Get(fieldTag)\n\t\tif tag == \"\" || tag == \"-\" {\n\t\t\tcontinue\n\t\t}\n\n\t\ttag = strings.Split(tag, \",\")[0]\n\t\tif tag == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Use the built-in decoder.\n\t\tout.set(tag, &structField{index: i, fn: decoders[f.Type.Kind()]})\n\t}\n\n\treturn out\n}\n\n\/\/------------------------------------------------------------------------------\n\n\/\/ structField represents a single field in a target struct.\ntype structField struct {\n\tindex int\n\tfn decoderFunc\n}\n\n\/\/------------------------------------------------------------------------------\n\ntype StructValue struct {\n\tspec *structSpec\n\tvalue reflect.Value\n}\n\nfunc (s StructValue) Scan(key string, value string) error {\n\tfield, ok := s.spec.m[key]\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn field.fn(s.value.Field(field.index), value)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package version records versioning information about this module.\npackage version\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\n\/\/ These constants determine the current version of this module.\n\/\/\n\/\/\n\/\/ For our release process, we enforce the following rules:\n\/\/\t* Tagged releases use a tag that is identical to String.\n\/\/\t* Tagged releases never reference a commit where the String\n\/\/\tcontains \"devel\".\n\/\/\t* The set of all commits in this repository where String\n\/\/\tdoes not contain \"devel\" must have a unique String.\n\/\/\n\/\/\n\/\/ Steps for tagging a new release:\n\/\/\t1. Create a new CL.\n\/\/\n\/\/\t2. Update Minor, Patch, and\/or PreRelease as necessary.\n\/\/\tPreRelease must not contain the string \"devel\".\n\/\/\n\/\/\t3. Since the last released minor version, have there been any changes to\n\/\/\tgenerator that relies on new functionality in the runtime?\n\/\/\tIf yes, then increment RequiredGenerated.\n\/\/\n\/\/\t4. Since the last released minor version, have there been any changes to\n\/\/\tthe runtime that removes support for old .pb.go source code?\n\/\/\tIf yes, then increment SupportMinimum.\n\/\/\n\/\/\t5. Send out the CL for review and submit it.\n\/\/\tNote that the next CL in step 8 must be submitted after this CL\n\/\/\twithout any other CLs in-between.\n\/\/\n\/\/\t6. Tag a new version, where the tag is is the current String.\n\/\/\n\/\/\t7. Write release notes for all notable changes\n\/\/\tbetween this release and the last release.\n\/\/\n\/\/\t8. Create a new CL.\n\/\/\n\/\/\t9. Update PreRelease to include the string \"devel\".\n\/\/\tFor example: \"\" -> \"devel\" or \"rc.1\" -> \"rc.1.devel\"\n\/\/\n\/\/\t10. Send out the CL for review and submit it.\nconst (\n\tMajor = 1\n\tMinor = 26\n\tPatch = 0\n\tPreRelease = \"devel\"\n)\n\n\/\/ String formats the version string for this module in semver format.\n\/\/\n\/\/ Examples:\n\/\/\tv1.20.1\n\/\/\tv1.21.0-rc.1\nfunc String() string {\n\tv := fmt.Sprintf(\"v%d.%d.%d\", Major, Minor, Patch)\n\tif PreRelease != \"\" {\n\t\tv += \"-\" + PreRelease\n\n\t\t\/\/ TODO: Add metadata about the commit or build hash.\n\t\t\/\/ See https:\/\/golang.org\/issue\/29814\n\t\t\/\/ See https:\/\/golang.org\/issue\/33533\n\t\tvar metadata string\n\t\tif strings.Contains(PreRelease, \"devel\") && metadata != \"\" {\n\t\t\tv += \"+\" + metadata\n\t\t}\n\t}\n\treturn v\n}\n<commit_msg>all: release v1.27.0<commit_after>\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package version records versioning information about this module.\npackage version\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\n\/\/ These constants determine the current version of this module.\n\/\/\n\/\/\n\/\/ For our release process, we enforce the following rules:\n\/\/\t* Tagged releases use a tag that is identical to String.\n\/\/\t* Tagged releases never reference a commit where the String\n\/\/\tcontains \"devel\".\n\/\/\t* The set of all commits in this repository where String\n\/\/\tdoes not contain \"devel\" must have a unique String.\n\/\/\n\/\/\n\/\/ Steps for tagging a new release:\n\/\/\t1. Create a new CL.\n\/\/\n\/\/\t2. Update Minor, Patch, and\/or PreRelease as necessary.\n\/\/\tPreRelease must not contain the string \"devel\".\n\/\/\n\/\/\t3. Since the last released minor version, have there been any changes to\n\/\/\tgenerator that relies on new functionality in the runtime?\n\/\/\tIf yes, then increment RequiredGenerated.\n\/\/\n\/\/\t4. Since the last released minor version, have there been any changes to\n\/\/\tthe runtime that removes support for old .pb.go source code?\n\/\/\tIf yes, then increment SupportMinimum.\n\/\/\n\/\/\t5. Send out the CL for review and submit it.\n\/\/\tNote that the next CL in step 8 must be submitted after this CL\n\/\/\twithout any other CLs in-between.\n\/\/\n\/\/\t6. Tag a new version, where the tag is is the current String.\n\/\/\n\/\/\t7. Write release notes for all notable changes\n\/\/\tbetween this release and the last release.\n\/\/\n\/\/\t8. Create a new CL.\n\/\/\n\/\/\t9. Update PreRelease to include the string \"devel\".\n\/\/\tFor example: \"\" -> \"devel\" or \"rc.1\" -> \"rc.1.devel\"\n\/\/\n\/\/\t10. Send out the CL for review and submit it.\nconst (\n\tMajor = 1\n\tMinor = 27\n\tPatch = 0\n\tPreRelease = \"\"\n)\n\n\/\/ String formats the version string for this module in semver format.\n\/\/\n\/\/ Examples:\n\/\/\tv1.20.1\n\/\/\tv1.21.0-rc.1\nfunc String() string {\n\tv := fmt.Sprintf(\"v%d.%d.%d\", Major, Minor, Patch)\n\tif PreRelease != \"\" {\n\t\tv += \"-\" + PreRelease\n\n\t\t\/\/ TODO: Add metadata about the commit or build hash.\n\t\t\/\/ See https:\/\/golang.org\/issue\/29814\n\t\t\/\/ See https:\/\/golang.org\/issue\/33533\n\t\tvar metadata string\n\t\tif strings.Contains(PreRelease, \"devel\") && metadata != \"\" {\n\t\t\tv += \"+\" + metadata\n\t\t}\n\t}\n\treturn v\n}\n<|endoftext|>"} {"text":"<commit_before>package filesystem\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"testing\"\n\n\t. \"gopkg.in\/check.v1\"\n\t\"gopkg.in\/src-d\/go-billy.v3\"\n\t\"gopkg.in\/src-d\/go-billy.v3\/memfs\"\n\t\"gopkg.in\/src-d\/go-git.v4\/plumbing\"\n\t\"gopkg.in\/src-d\/go-git.v4\/utils\/merkletrie\"\n\t\"gopkg.in\/src-d\/go-git.v4\/utils\/merkletrie\/noder\"\n)\n\nfunc Test(t *testing.T) { TestingT(t) }\n\ntype NoderSuite struct{}\n\nvar _ = Suite(&NoderSuite{})\n\nfunc (s *NoderSuite) TestDiff(c *C) {\n\tfsA := memfs.New()\n\tWriteFile(fsA, \"foo\", []byte(\"foo\"), 0644)\n\tWriteFile(fsA, \"qux\/bar\", []byte(\"foo\"), 0644)\n\tWriteFile(fsA, \"qux\/qux\", []byte(\"foo\"), 0644)\n\tfsA.Symlink(\"foo\", \"bar\")\n\n\tfsB := memfs.New()\n\tWriteFile(fsB, \"foo\", []byte(\"foo\"), 0644)\n\tWriteFile(fsB, \"qux\/bar\", []byte(\"foo\"), 0644)\n\tWriteFile(fsB, \"qux\/qux\", []byte(\"foo\"), 0644)\n\tfsB.Symlink(\"foo\", \"bar\")\n\n\tch, err := merkletrie.DiffTree(\n\t\tNewRootNode(fsA, nil),\n\t\tNewRootNode(fsB, nil),\n\t\tIsEquals,\n\t)\n\n\tc.Assert(err, IsNil)\n\tc.Assert(ch, HasLen, 0)\n}\n\nfunc (s *NoderSuite) TestDiffChangeLink(c *C) {\n\tfsA := memfs.New()\n\tfsA.Symlink(\"qux\", \"foo\")\n\n\tfsB := memfs.New()\n\tfsB.Symlink(\"bar\", \"foo\")\n\n\tch, err := merkletrie.DiffTree(\n\t\tNewRootNode(fsA, nil),\n\t\tNewRootNode(fsB, nil),\n\t\tIsEquals,\n\t)\n\n\tc.Assert(err, IsNil)\n\tc.Assert(ch, HasLen, 1)\n}\n\nfunc (s *NoderSuite) TestDiffChangeContent(c *C) {\n\tfsA := memfs.New()\n\tWriteFile(fsA, \"foo\", []byte(\"foo\"), 0644)\n\tWriteFile(fsA, \"qux\/bar\", []byte(\"foo\"), 0644)\n\tWriteFile(fsA, \"qux\/qux\", []byte(\"foo\"), 0644)\n\n\tfsB := memfs.New()\n\tWriteFile(fsB, \"foo\", []byte(\"foo\"), 0644)\n\tWriteFile(fsB, \"qux\/bar\", []byte(\"bar\"), 0644)\n\tWriteFile(fsB, \"qux\/qux\", []byte(\"foo\"), 0644)\n\n\tch, err := merkletrie.DiffTree(\n\t\tNewRootNode(fsA, nil),\n\t\tNewRootNode(fsB, nil),\n\t\tIsEquals,\n\t)\n\n\tc.Assert(err, IsNil)\n\tc.Assert(ch, HasLen, 1)\n}\n\nfunc (s *NoderSuite) TestDiffChangeMissing(c *C) {\n\tfsA := memfs.New()\n\tWriteFile(fsA, \"foo\", []byte(\"foo\"), 0644)\n\n\tfsB := memfs.New()\n\tWriteFile(fsB, \"bar\", []byte(\"bar\"), 0644)\n\n\tch, err := merkletrie.DiffTree(\n\t\tNewRootNode(fsA, nil),\n\t\tNewRootNode(fsB, nil),\n\t\tIsEquals,\n\t)\n\n\tc.Assert(err, IsNil)\n\tc.Assert(ch, HasLen, 2)\n}\n\nfunc (s *NoderSuite) TestDiffChangeMode(c *C) {\n\tfsA := memfs.New()\n\tWriteFile(fsA, \"foo\", []byte(\"foo\"), 0644)\n\n\tfsB := memfs.New()\n\tWriteFile(fsB, \"foo\", []byte(\"foo\"), 0755)\n\n\tch, err := merkletrie.DiffTree(\n\t\tNewRootNode(fsA, nil),\n\t\tNewRootNode(fsB, nil),\n\t\tIsEquals,\n\t)\n\n\tc.Assert(err, IsNil)\n\tc.Assert(ch, HasLen, 1)\n}\n\nfunc (s *NoderSuite) TestDiffChangeModeNotRelevant(c *C) {\n\tfsA := memfs.New()\n\tWriteFile(fsA, \"foo\", []byte(\"foo\"), 0644)\n\n\tfsB := memfs.New()\n\tWriteFile(fsB, \"foo\", []byte(\"foo\"), 0655)\n\n\tch, err := merkletrie.DiffTree(\n\t\tNewRootNode(fsA, nil),\n\t\tNewRootNode(fsB, nil),\n\t\tIsEquals,\n\t)\n\n\tc.Assert(err, IsNil)\n\tc.Assert(ch, HasLen, 0)\n}\n\nfunc (s *NoderSuite) TestDiffDirectory(c *C) {\n\tdir := path.Join(\"qux\", \"bar\")\n\tfsA := memfs.New()\n\tfsA.MkdirAll(dir, 0644)\n\n\tfsB := memfs.New()\n\tfsB.MkdirAll(dir, 0644)\n\n\tch, err := merkletrie.DiffTree(\n\t\tNewRootNode(fsA, map[string]plumbing.Hash{\n\t\t\tdir: plumbing.NewHash(\"aa102815663d23f8b75a47e7a01965dcdc96468c\"),\n\t\t}),\n\t\tNewRootNode(fsB, map[string]plumbing.Hash{\n\t\t\tdir: plumbing.NewHash(\"19102815663d23f8b75a47e7a01965dcdc96468c\"),\n\t\t}),\n\t\tIsEquals,\n\t)\n\n\tc.Assert(err, IsNil)\n\tc.Assert(ch, HasLen, 1)\n\n\ta, err := ch[0].Action()\n\tc.Assert(err, IsNil)\n\tc.Assert(a, Equals, merkletrie.Modify)\n}\n\nfunc WriteFile(fs billy.Filesystem, filename string, data []byte, perm os.FileMode) error {\n\tf, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tn, err := f.Write(data)\n\tif err == nil && n < len(data) {\n\t\terr = io.ErrShortWrite\n\t}\n\tif err1 := f.Close(); err == nil {\n\t\terr = err1\n\t}\n\treturn err\n}\n\nvar empty = make([]byte, 24)\n\nfunc IsEquals(a, b noder.Hasher) bool {\n\tif bytes.Equal(a.Hash(), empty) || bytes.Equal(b.Hash(), empty) {\n\t\treturn false\n\t}\n\n\treturn bytes.Equal(a.Hash(), b.Hash())\n}\n<commit_msg>utils: merkletrie, filesystem fix symlinks to dir<commit_after>package filesystem\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"testing\"\n\n\t. \"gopkg.in\/check.v1\"\n\t\"gopkg.in\/src-d\/go-billy.v3\"\n\t\"gopkg.in\/src-d\/go-billy.v3\/memfs\"\n\t\"gopkg.in\/src-d\/go-git.v4\/plumbing\"\n\t\"gopkg.in\/src-d\/go-git.v4\/utils\/merkletrie\"\n\t\"gopkg.in\/src-d\/go-git.v4\/utils\/merkletrie\/noder\"\n)\n\nfunc Test(t *testing.T) { TestingT(t) }\n\ntype NoderSuite struct{}\n\nvar _ = Suite(&NoderSuite{})\n\nfunc (s *NoderSuite) TestDiff(c *C) {\n\tfsA := memfs.New()\n\tWriteFile(fsA, \"foo\", []byte(\"foo\"), 0644)\n\tWriteFile(fsA, \"qux\/bar\", []byte(\"foo\"), 0644)\n\tWriteFile(fsA, \"qux\/qux\", []byte(\"foo\"), 0644)\n\tfsA.Symlink(\"foo\", \"bar\")\n\n\tfsB := memfs.New()\n\tWriteFile(fsB, \"foo\", []byte(\"foo\"), 0644)\n\tWriteFile(fsB, \"qux\/bar\", []byte(\"foo\"), 0644)\n\tWriteFile(fsB, \"qux\/qux\", []byte(\"foo\"), 0644)\n\tfsB.Symlink(\"foo\", \"bar\")\n\n\tch, err := merkletrie.DiffTree(\n\t\tNewRootNode(fsA, nil),\n\t\tNewRootNode(fsB, nil),\n\t\tIsEquals,\n\t)\n\n\tc.Assert(err, IsNil)\n\tc.Assert(ch, HasLen, 0)\n}\n\nfunc (s *NoderSuite) TestDiffChangeLink(c *C) {\n\tfsA := memfs.New()\n\tfsA.Symlink(\"qux\", \"foo\")\n\n\tfsB := memfs.New()\n\tfsB.Symlink(\"bar\", \"foo\")\n\n\tch, err := merkletrie.DiffTree(\n\t\tNewRootNode(fsA, nil),\n\t\tNewRootNode(fsB, nil),\n\t\tIsEquals,\n\t)\n\n\tc.Assert(err, IsNil)\n\tc.Assert(ch, HasLen, 1)\n}\n\nfunc (s *NoderSuite) TestDiffChangeContent(c *C) {\n\tfsA := memfs.New()\n\tWriteFile(fsA, \"foo\", []byte(\"foo\"), 0644)\n\tWriteFile(fsA, \"qux\/bar\", []byte(\"foo\"), 0644)\n\tWriteFile(fsA, \"qux\/qux\", []byte(\"foo\"), 0644)\n\n\tfsB := memfs.New()\n\tWriteFile(fsB, \"foo\", []byte(\"foo\"), 0644)\n\tWriteFile(fsB, \"qux\/bar\", []byte(\"bar\"), 0644)\n\tWriteFile(fsB, \"qux\/qux\", []byte(\"foo\"), 0644)\n\n\tch, err := merkletrie.DiffTree(\n\t\tNewRootNode(fsA, nil),\n\t\tNewRootNode(fsB, nil),\n\t\tIsEquals,\n\t)\n\n\tc.Assert(err, IsNil)\n\tc.Assert(ch, HasLen, 1)\n}\n\nfunc (s *NoderSuite) TestDiffSymlinkDirOnA(c *C) {\n\tfsA := memfs.New()\n\tWriteFile(fsA, \"qux\/qux\", []byte(\"foo\"), 0644)\n\n\tfsB := memfs.New()\n\tfsB.Symlink(\"qux\", \"foo\")\n\tWriteFile(fsB, \"qux\/qux\", []byte(\"foo\"), 0644)\n\n\tch, err := merkletrie.DiffTree(\n\t\tNewRootNode(fsA, nil),\n\t\tNewRootNode(fsB, nil),\n\t\tIsEquals,\n\t)\n\n\tc.Assert(err, IsNil)\n\tc.Assert(ch, HasLen, 1)\n}\n\nfunc (s *NoderSuite) TestDiffSymlinkDirOnB(c *C) {\n\tfsA := memfs.New()\n\tfsA.Symlink(\"qux\", \"foo\")\n\tWriteFile(fsA, \"qux\/qux\", []byte(\"foo\"), 0644)\n\n\tfsB := memfs.New()\n\tWriteFile(fsB, \"qux\/qux\", []byte(\"foo\"), 0644)\n\n\tch, err := merkletrie.DiffTree(\n\t\tNewRootNode(fsA, nil),\n\t\tNewRootNode(fsB, nil),\n\t\tIsEquals,\n\t)\n\n\tc.Assert(err, IsNil)\n\tc.Assert(ch, HasLen, 1)\n}\n\nfunc (s *NoderSuite) TestDiffChangeMissing(c *C) {\n\tfsA := memfs.New()\n\tWriteFile(fsA, \"foo\", []byte(\"foo\"), 0644)\n\n\tfsB := memfs.New()\n\tWriteFile(fsB, \"bar\", []byte(\"bar\"), 0644)\n\n\tch, err := merkletrie.DiffTree(\n\t\tNewRootNode(fsA, nil),\n\t\tNewRootNode(fsB, nil),\n\t\tIsEquals,\n\t)\n\n\tc.Assert(err, IsNil)\n\tc.Assert(ch, HasLen, 2)\n}\n\nfunc (s *NoderSuite) TestDiffChangeMode(c *C) {\n\tfsA := memfs.New()\n\tWriteFile(fsA, \"foo\", []byte(\"foo\"), 0644)\n\n\tfsB := memfs.New()\n\tWriteFile(fsB, \"foo\", []byte(\"foo\"), 0755)\n\n\tch, err := merkletrie.DiffTree(\n\t\tNewRootNode(fsA, nil),\n\t\tNewRootNode(fsB, nil),\n\t\tIsEquals,\n\t)\n\n\tc.Assert(err, IsNil)\n\tc.Assert(ch, HasLen, 1)\n}\n\nfunc (s *NoderSuite) TestDiffChangeModeNotRelevant(c *C) {\n\tfsA := memfs.New()\n\tWriteFile(fsA, \"foo\", []byte(\"foo\"), 0644)\n\n\tfsB := memfs.New()\n\tWriteFile(fsB, \"foo\", []byte(\"foo\"), 0655)\n\n\tch, err := merkletrie.DiffTree(\n\t\tNewRootNode(fsA, nil),\n\t\tNewRootNode(fsB, nil),\n\t\tIsEquals,\n\t)\n\n\tc.Assert(err, IsNil)\n\tc.Assert(ch, HasLen, 0)\n}\n\nfunc (s *NoderSuite) TestDiffDirectory(c *C) {\n\tdir := path.Join(\"qux\", \"bar\")\n\tfsA := memfs.New()\n\tfsA.MkdirAll(dir, 0644)\n\n\tfsB := memfs.New()\n\tfsB.MkdirAll(dir, 0644)\n\n\tch, err := merkletrie.DiffTree(\n\t\tNewRootNode(fsA, map[string]plumbing.Hash{\n\t\t\tdir: plumbing.NewHash(\"aa102815663d23f8b75a47e7a01965dcdc96468c\"),\n\t\t}),\n\t\tNewRootNode(fsB, map[string]plumbing.Hash{\n\t\t\tdir: plumbing.NewHash(\"19102815663d23f8b75a47e7a01965dcdc96468c\"),\n\t\t}),\n\t\tIsEquals,\n\t)\n\n\tc.Assert(err, IsNil)\n\tc.Assert(ch, HasLen, 1)\n\n\ta, err := ch[0].Action()\n\tc.Assert(err, IsNil)\n\tc.Assert(a, Equals, merkletrie.Modify)\n}\n\nfunc WriteFile(fs billy.Filesystem, filename string, data []byte, perm os.FileMode) error {\n\tf, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tn, err := f.Write(data)\n\tif err == nil && n < len(data) {\n\t\terr = io.ErrShortWrite\n\t}\n\tif err1 := f.Close(); err == nil {\n\t\terr = err1\n\t}\n\treturn err\n}\n\nvar empty = make([]byte, 24)\n\nfunc IsEquals(a, b noder.Hasher) bool {\n\tif bytes.Equal(a.Hash(), empty) || bytes.Equal(b.Hash(), empty) {\n\t\treturn false\n\t}\n\n\treturn bytes.Equal(a.Hash(), b.Hash())\n}\n<|endoftext|>"} {"text":"<commit_before>package command\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/funkygao\/gocli\"\n\t\"github.com\/siddontang\/go-mysql\/canal\"\n)\n\ntype Canal struct {\n\tUi cli.Ui\n\tCmd string\n}\n\ntype myRowsEventHandler struct {\n}\n\nfunc (h *myRowsEventHandler) Do(e *canal.RowsEvent) error {\n\tfmt.Println(*e)\n\treturn nil\n}\n\nfunc (h *myRowsEventHandler) String() string {\n\treturn \"myRowsEventHandler\"\n}\n\nfunc (this *Canal) Run(args []string) (exitCode int) {\n\tcfg := canal.NewDefaultConfig()\n\n\tcmdFlags := flag.NewFlagSet(\"canal\", flag.ContinueOnError)\n\tcmdFlags.Usage = func() { this.Ui.Output(this.Help()) }\n\tcmdFlags.StringVar(&cfg.User, \"user\", \"\", \"\")\n\tcmdFlags.StringVar(&cfg.Password, \"pass\", \"\", \"\")\n\tcmdFlags.StringVar(&cfg.Addr, \"dsn\", \"\", \"\")\n\tcmdFlags.StringVar(&cfg.Dump.TableDB, \"db\", \"\", \"\")\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\tcfg.Dump.Tables = []string{\"logs\"}\n\n\tc, err := canal.NewCanal(cfg)\n\tif err != nil {\n\t\tthis.Ui.Error(err.Error())\n\t\treturn\n\t}\n\n\tc.RegRowsEventHandler(&myRowsEventHandler{})\n\n\t\/\/ Start canal\n\tc.Start()\n\n\treturn\n}\n\nfunc (*Canal) Synopsis() string {\n\treturn \"Sync binlog from MySQL master to elsewhere\"\n}\n\nfunc (this *Canal) Help() string {\n\thelp := fmt.Sprintf(`\nUsage: %s canal [options]\n\n %s\n\nOptions:\n\n -user db user\n\n -pass db password\n\n -dsn db dsn\n\n -db db name\n\n -table table name\n\n\n`, this.Cmd, this.Synopsis())\n\treturn strings.TrimSpace(help)\n}\n<commit_msg>canal<commit_after>package command\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/funkygao\/gocli\"\n\t\"github.com\/siddontang\/go-mysql\/canal\"\n)\n\ntype Canal struct {\n\tUi cli.Ui\n\tCmd string\n}\n\ntype myRowsEventHandler struct {\n}\n\nfunc (h *myRowsEventHandler) Do(e *canal.RowsEvent) error {\n\tfmt.Println(*e)\n\treturn nil\n}\n\nfunc (h *myRowsEventHandler) String() string {\n\treturn \"myRowsEventHandler\"\n}\n\nfunc (this *Canal) Run(args []string) (exitCode int) {\n\tcfg := canal.NewDefaultConfig()\n\n\tcmdFlags := flag.NewFlagSet(\"canal\", flag.ContinueOnError)\n\tcmdFlags.Usage = func() { this.Ui.Output(this.Help()) }\n\tcmdFlags.StringVar(&cfg.User, \"user\", \"\", \"\")\n\tcmdFlags.StringVar(&cfg.Password, \"pass\", \"\", \"\")\n\tcmdFlags.StringVar(&cfg.Addr, \"dsn\", \"\", \"\")\n\tcmdFlags.StringVar(&cfg.Dump.TableDB, \"db\", \"\", \"\")\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\tcfg.Dump.Tables = []string{\"logs\"}\n\n\tc, err := canal.NewCanal(cfg)\n\tif err != nil {\n\t\tthis.Ui.Error(err.Error())\n\t\treturn\n\t}\n\tdefer c.Close()\n\n\tc.RegRowsEventHandler(&myRowsEventHandler{})\n\n\t\/\/ Start canal\n\tc.Start()\n\n\ttime.Sleep(time.Hour)\n\n\treturn\n}\n\nfunc (*Canal) Synopsis() string {\n\treturn \"Sync binlog from MySQL master to elsewhere\"\n}\n\nfunc (this *Canal) Help() string {\n\thelp := fmt.Sprintf(`\nUsage: %s canal [options]\n\n %s\n\nOptions:\n\n -user db user\n\n -pass db password\n\n -dsn db dsn\n\n -db db name\n\n -table table name\n\n\n`, this.Cmd, this.Synopsis())\n\treturn strings.TrimSpace(help)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The linkcheck command finds missing links in the godoc website.\n\/\/ It crawls a URL recursively and notes URLs and URL fragments\n\/\/ that it's seen and prints a report of missing links at the end.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n)\n\nvar (\n\troot = flag.String(\"root\", \"http:\/\/localhost:6060\", \"Root to crawl\")\n\tverbose = flag.Bool(\"verbose\", false, \"verbose\")\n)\n\nvar wg sync.WaitGroup \/\/ outstanding fetches\nvar urlq = make(chan string) \/\/ URLs to crawl\n\n\/\/ urlFrag is a URL and its optional #fragment (without the #)\ntype urlFrag struct {\n\turl, frag string\n}\n\nvar (\n\tmu sync.Mutex\n\tcrawled = make(map[string]bool) \/\/ URL without fragment -> true\n\tneededFrags = make(map[urlFrag][]string) \/\/ URL#frag -> who needs it\n)\n\nvar aRx = regexp.MustCompile(`<a href=['\"]?(\/[^\\s'\">]+)`)\n\n\/\/ Owned by crawlLoop goroutine:\nvar (\n\tlinkSources = make(map[string][]string) \/\/ url no fragment -> sources\n\tfragExists = make(map[urlFrag]bool)\n\tproblems []string\n)\n\nfunc localLinks(body string) (links []string) {\n\tseen := map[string]bool{}\n\tmv := aRx.FindAllStringSubmatch(body, -1)\n\tfor _, m := range mv {\n\t\tref := m[1]\n\t\tif strings.HasPrefix(ref, \"\/src\/\") {\n\t\t\tcontinue\n\t\t}\n\t\tif !seen[ref] {\n\t\t\tseen[ref] = true\n\t\t\tlinks = append(links, m[1])\n\t\t}\n\t}\n\treturn\n}\n\nvar idRx = regexp.MustCompile(`\\bid=['\"]?([^\\s'\">]+)`)\n\nfunc pageIDs(body string) (ids []string) {\n\tmv := idRx.FindAllStringSubmatch(body, -1)\n\tfor _, m := range mv {\n\t\tids = append(ids, m[1])\n\t}\n\treturn\n}\n\n\/\/ url may contain a #fragment, and the fragment is then noted as needing to exist.\nfunc crawl(url string, sourceURL string) {\n\tif strings.Contains(url, \"\/devel\/release\") {\n\t\treturn\n\t}\n\tmu.Lock()\n\tdefer mu.Unlock()\n\tvar frag string\n\tif i := strings.Index(url, \"#\"); i >= 0 {\n\t\tfrag = url[i+1:]\n\t\turl = url[:i]\n\t\tif frag != \"\" {\n\t\t\tuf := urlFrag{url, frag}\n\t\t\tneededFrags[uf] = append(neededFrags[uf], sourceURL)\n\t\t}\n\t}\n\tif crawled[url] {\n\t\treturn\n\t}\n\tcrawled[url] = true\n\n\twg.Add(1)\n\tgo func() {\n\t\turlq <- url\n\t}()\n}\n\nfunc addProblem(url, errmsg string) {\n\tmsg := fmt.Sprintf(\"Error on %s: %s (from %s)\", url, errmsg, linkSources[url])\n\tlog.Print(msg)\n\tproblems = append(problems, msg)\n}\n\nfunc crawlLoop() {\n\tfor url := range urlq {\n\t\tres, err := http.Get(url)\n\t\tif err != nil {\n\t\t\taddProblem(url, fmt.Sprintf(\"Error fetching: %v\", err))\n\t\t\twg.Done()\n\t\t\tcontinue\n\t\t}\n\t\tif res.StatusCode != 200 {\n\t\t\taddProblem(url, fmt.Sprintf(\"Status code = %d\", res.StatusCode))\n\t\t\twg.Done()\n\t\t\tcontinue\n\t\t}\n\t\tslurp, err := ioutil.ReadAll(res.Body)\n\t\tres.Body.Close()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error reading %s body: %v\", url, err)\n\t\t}\n\t\tif *verbose {\n\t\t\tlog.Printf(\"Len of %s: %d\", url, len(slurp))\n\t\t}\n\t\tbody := string(slurp)\n\t\tfor _, ref := range localLinks(body) {\n\t\t\tif *verbose {\n\t\t\t\tlog.Printf(\" links to %s\", ref)\n\t\t\t}\n\t\t\tdest := *root + ref\n\t\t\tlinkSources[dest] = append(linkSources[dest], url)\n\t\t\tcrawl(dest, url)\n\t\t}\n\t\tfor _, id := range pageIDs(body) {\n\t\t\tif *verbose {\n\t\t\t\tlog.Printf(\" url %s has #%s\", url, id)\n\t\t\t}\n\t\t\tfragExists[urlFrag{url, id}] = true\n\t\t}\n\n\t\twg.Done()\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tgo crawlLoop()\n\tcrawl(*root, \"\")\n\tcrawl(*root+\"\/doc\/go1.1.html\", \"\")\n\n\twg.Wait()\n\tclose(urlq)\n\tfor uf, needers := range neededFrags {\n\t\tif !fragExists[uf] {\n\t\t\tproblems = append(problems, fmt.Sprintf(\"Missing fragment for %+v from %v\", uf, needers))\n\t\t}\n\t}\n\n\tfor _, s := range problems {\n\t\tfmt.Println(s)\n\t}\n}\n<commit_msg>misc\/linkcheck: better redirect handling, use meaningful exit code<commit_after>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The linkcheck command finds missing links in the godoc website.\n\/\/ It crawls a URL recursively and notes URLs and URL fragments\n\/\/ that it's seen and prints a report of missing links at the end.\npackage main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n)\n\nvar (\n\troot = flag.String(\"root\", \"http:\/\/localhost:6060\", \"Root to crawl\")\n\tverbose = flag.Bool(\"verbose\", false, \"verbose\")\n)\n\nvar wg sync.WaitGroup \/\/ outstanding fetches\nvar urlq = make(chan string) \/\/ URLs to crawl\n\n\/\/ urlFrag is a URL and its optional #fragment (without the #)\ntype urlFrag struct {\n\turl, frag string\n}\n\nvar (\n\tmu sync.Mutex\n\tcrawled = make(map[string]bool) \/\/ URL without fragment -> true\n\tneededFrags = make(map[urlFrag][]string) \/\/ URL#frag -> who needs it\n)\n\nvar aRx = regexp.MustCompile(`<a href=['\"]?(\/[^\\s'\">]+)`)\n\n\/\/ Owned by crawlLoop goroutine:\nvar (\n\tlinkSources = make(map[string][]string) \/\/ url no fragment -> sources\n\tfragExists = make(map[urlFrag]bool)\n\tproblems []string\n)\n\nfunc localLinks(body string) (links []string) {\n\tseen := map[string]bool{}\n\tmv := aRx.FindAllStringSubmatch(body, -1)\n\tfor _, m := range mv {\n\t\tref := m[1]\n\t\tif strings.HasPrefix(ref, \"\/src\/\") {\n\t\t\tcontinue\n\t\t}\n\t\tif !seen[ref] {\n\t\t\tseen[ref] = true\n\t\t\tlinks = append(links, m[1])\n\t\t}\n\t}\n\treturn\n}\n\nvar idRx = regexp.MustCompile(`\\bid=['\"]?([^\\s'\">]+)`)\n\nfunc pageIDs(body string) (ids []string) {\n\tmv := idRx.FindAllStringSubmatch(body, -1)\n\tfor _, m := range mv {\n\t\tids = append(ids, m[1])\n\t}\n\treturn\n}\n\n\/\/ url may contain a #fragment, and the fragment is then noted as needing to exist.\nfunc crawl(url string, sourceURL string) {\n\tif strings.Contains(url, \"\/devel\/release\") {\n\t\treturn\n\t}\n\tmu.Lock()\n\tdefer mu.Unlock()\n\tvar frag string\n\tif i := strings.Index(url, \"#\"); i >= 0 {\n\t\tfrag = url[i+1:]\n\t\turl = url[:i]\n\t\tif frag != \"\" {\n\t\t\tuf := urlFrag{url, frag}\n\t\t\tneededFrags[uf] = append(neededFrags[uf], sourceURL)\n\t\t}\n\t}\n\tif crawled[url] {\n\t\treturn\n\t}\n\tcrawled[url] = true\n\n\twg.Add(1)\n\tgo func() {\n\t\turlq <- url\n\t}()\n}\n\nfunc addProblem(url, errmsg string) {\n\tmsg := fmt.Sprintf(\"Error on %s: %s (from %s)\", url, errmsg, linkSources[url])\n\tif *verbose {\n\t\tlog.Print(msg)\n\t}\n\tproblems = append(problems, msg)\n}\n\nfunc crawlLoop() {\n\tfor url := range urlq {\n\t\tif err := doCrawl(url); err != nil {\n\t\t\taddProblem(url, err.Error())\n\t\t}\n\t}\n}\n\nfunc doCrawl(url string) error {\n\tdefer wg.Done()\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tres, err := http.DefaultTransport.RoundTrip(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Handle redirects.\n\tif res.StatusCode\/100 == 3 {\n\t\tnewURL, err := res.Location()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"resolving redirect: %v\", err)\n\t\t}\n\t\tif !strings.HasPrefix(newURL.String(), *root) {\n\t\t\t\/\/ Skip off-site redirects.\n\t\t\treturn nil\n\t\t}\n\t\tcrawl(newURL.String(), url)\n\t\treturn nil\n\t}\n\tif res.StatusCode != 200 {\n\t\treturn errors.New(res.Status)\n\t}\n\tslurp, err := ioutil.ReadAll(res.Body)\n\tres.Body.Close()\n\tif err != nil {\n\t\tlog.Fatalf(\"Error reading %s body: %v\", url, err)\n\t}\n\tif *verbose {\n\t\tlog.Printf(\"Len of %s: %d\", url, len(slurp))\n\t}\n\tbody := string(slurp)\n\tfor _, ref := range localLinks(body) {\n\t\tif *verbose {\n\t\t\tlog.Printf(\" links to %s\", ref)\n\t\t}\n\t\tdest := *root + ref\n\t\tlinkSources[dest] = append(linkSources[dest], url)\n\t\tcrawl(dest, url)\n\t}\n\tfor _, id := range pageIDs(body) {\n\t\tif *verbose {\n\t\t\tlog.Printf(\" url %s has #%s\", url, id)\n\t\t}\n\t\tfragExists[urlFrag{url, id}] = true\n\t}\n\treturn nil\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tgo crawlLoop()\n\tcrawl(*root, \"\")\n\n\twg.Wait()\n\tclose(urlq)\n\tfor uf, needers := range neededFrags {\n\t\tif !fragExists[uf] {\n\t\t\tproblems = append(problems, fmt.Sprintf(\"Missing fragment for %+v from %v\", uf, needers))\n\t\t}\n\t}\n\n\tfor _, s := range problems {\n\t\tfmt.Println(s)\n\t}\n\tif len(problems) > 0 {\n\t\tos.Exit(1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>[Bug fix] port by command-line option.<commit_after><|endoftext|>"} {"text":"<commit_before>package mmail\n\nimport (\n\t\"crypto\/tls\"\n\t\"net\/mail\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/emersion\/go-imap\"\n\tidle \"github.com\/emersion\/go-imap-idle\"\n\t\"github.com\/emersion\/go-imap\/client\"\n\t\"github.com\/rodcorsi\/mattermail\/model\"\n)\n\n\/\/ MailProviderImap implements MailProvider using imap\ntype MailProviderImap struct {\n\timapClient *client.Client\n\tcfg *model.Email\n\tlog Logger\n\tidle bool\n\tdebug bool\n}\n\nconst mailBox = \"INBOX\"\n\n\/\/ NewMailProviderImap creates a new MailProviderImap implementing MailProvider\nfunc NewMailProviderImap(cfg *model.Email, log Logger, debug bool) *MailProviderImap {\n\treturn &MailProviderImap{\n\t\tcfg: cfg,\n\t\tlog: log,\n\t\tdebug: debug,\n\t}\n}\n\n\/\/ CheckNewMessage gets new email from server\nfunc (m *MailProviderImap) CheckNewMessage(handler MailHandler) error {\n\tm.log.Debug(\"MailProviderImap.CheckNewMessage\")\n\n\tif err := m.checkConnection(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := m.selectMailBox(); err != nil {\n\t\treturn err\n\t}\n\n\tcriteria := &imap.SearchCriteria{\n\t\tWithoutFlags: []string{imap.SeenFlag},\n\t}\n\n\tuid, err := m.imapClient.UidSearch(criteria)\n\tif err != nil {\n\t\tm.log.Debug(\"MailProviderImap.CheckNewMessage: Error UIDSearch\")\n\t\treturn err\n\t}\n\n\tif len(uid) == 0 {\n\t\tm.log.Debug(\"MailProviderImap.CheckNewMessage: No new messages\")\n\t\treturn nil\n\t}\n\n\tm.log.Debugf(\"MailProviderImap.CheckNewMessage: found %v uid\", len(uid))\n\n\tseqset := &imap.SeqSet{}\n\tseqset.AddNum(uid...)\n\n\tmessages := make(chan *imap.Message, len(uid))\n\tdone := make(chan error, 1)\n\tgo func() {\n\t\tdone <- m.imapClient.UidFetch(seqset, []string{imap.EnvelopeMsgAttr, \"BODY[]\"}, messages)\n\t}()\n\n\temailPosted := make(map[uint32]bool)\n\tfor _, v := range uid {\n\t\temailPosted[v] = false\n\t}\n\n\tfor imapMsg := range messages {\n\t\tm.log.Debug(\"MailProviderImap.CheckNewMessage: PostMail uid:\", imapMsg.Uid)\n\t\tif emailPosted[imapMsg.Uid] {\n\t\t\tm.log.Debug(\"MailProviderImap.CheckNewMessage: Email was posted uid:\", imapMsg.Uid)\n\t\t\tcontinue\n\t\t}\n\n\t\tr := imapMsg.GetBody(\"BODY[]\")\n\t\tif r == nil {\n\t\t\tm.log.Error(\"MailProviderImap.CheckNewMessage: message.GetBody(BODY[]) returns nil\")\n\t\t\tcontinue\n\t\t}\n\n\t\tmsg, err := mail.ReadMessage(r)\n\t\tif err != nil {\n\t\t\tm.log.Error(\"MailProviderImap.CheckNewMessage: Error on parse imap\/message to mail\/message\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := handler(msg); err != nil {\n\t\t\tm.log.Debug(\"MailProviderImap.CheckNewMessage: Error handler:\", err.Error())\n\t\t\tcontinue\n\t\t} else {\n\t\t\temailPosted[imapMsg.Uid] = true\n\t\t}\n\t}\n\n\t\/\/ Check command completion status\n\tif err := <-done; err != nil {\n\t\tm.log.Error(\"MailProviderImap.CheckNewMessage: Error on terminate fetch command\")\n\t\treturn err\n\t}\n\n\terrorset := &imap.SeqSet{}\n\tfor k, posted := range emailPosted {\n\t\tif !posted {\n\t\t\terrorset.AddNum(k)\n\t\t}\n\t}\n\n\tif errorset.Empty() {\n\t\treturn nil\n\t}\n\n\t\/\/ Mark all valid messages as read\n\terr = m.imapClient.UidStore(errorset, imap.RemoveFlags, []interface{}{imap.SeenFlag}, nil)\n\tif err != nil {\n\t\tm.log.Error(\"MailProviderImap.CheckNewMessage: Error UIDStore UNSEEN\")\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ WaitNewMessage waits for a new message (idle or time.Sleep)\nfunc (m *MailProviderImap) WaitNewMessage(timeout int) error {\n\tm.log.Debug(\"MailProviderImap.WaitNewMessage\")\n\n\t\/\/ Idle mode\n\tif err := m.checkConnection(); err != nil {\n\t\treturn err\n\t}\n\n\tm.log.Debug(\"MailProviderImap.WaitNewMessage: idle mode:\", m.idle)\n\n\tif !m.idle {\n\t\ttime.Sleep(time.Second * time.Duration(timeout))\n\t\treturn nil\n\t}\n\n\tif err := m.selectMailBox(); err != nil {\n\t\treturn err\n\t}\n\n\tidleClient := idle.NewClient(m.imapClient)\n\n\t\/\/ Create a channel to receive mailbox updates\n\tstatuses := make(chan *imap.MailboxStatus)\n\tm.imapClient.MailboxUpdates = statuses\n\n\tstop := make(chan struct{})\n\tdone := make(chan error, 1)\n\tgo func() {\n\t\tdone <- idleClient.Idle(stop)\n\t}()\n\n\treset := time.After(time.Second * time.Duration(timeout))\n\n\tvar lock sync.Mutex\n\tclosed := false\n\tcloseChannel := func() {\n\t\tlock.Lock()\n\t\tif !closed {\n\t\t\tclose(stop)\n\t\t\tclosed = true\n\t\t}\n\t\tlock.Unlock()\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase status := <-statuses:\n\t\t\tm.log.Debug(\"MailProviderImap.WaitNewMessage: New mailbox status:\", status.Format())\n\t\t\tcloseChannel()\n\n\t\tcase err := <-done:\n\t\t\tif err != nil {\n\t\t\t\tm.log.Error(\"MailProviderImap.WaitNewMessage: Error on terminate idle\", err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\tcase <-reset:\n\t\t\tcloseChannel()\n\t\t}\n\t}\n}\n\nfunc (m *MailProviderImap) selectMailBox() error {\n\tif m.imapClient.Mailbox != nil && m.imapClient.Mailbox.Name == mailBox {\n\t\treturn nil\n\t}\n\n\t_, err := m.imapClient.Select(mailBox, false)\n\tif err != nil {\n\t\tm.log.Error(\"MailProviderImap.selectMailBox: Error on select\", mailBox)\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ checkConnection if is connected return nil or try to connect\nfunc (m *MailProviderImap) checkConnection() error {\n\tif m.imapClient != nil && m.imapClient.State != imap.LogoutState {\n\t\tm.log.Debug(\"MailProviderImap.CheckConnection: Connection state\", m.imapClient.State)\n\t\treturn nil\n\t}\n\n\tvar err error\n\n\t\/\/Start connection with server\n\tif strings.HasSuffix(m.cfg.ImapServer, \":993\") {\n\t\tm.log.Debug(\"MailProviderImap.CheckConnection: DialTLS\")\n\t\tm.imapClient, err = client.DialTLS(m.cfg.ImapServer, nil)\n\t} else {\n\t\tm.log.Debug(\"MailProviderImap.CheckConnection: Dial\")\n\t\tm.imapClient, err = client.Dial(m.cfg.ImapServer)\n\t}\n\n\tif err != nil {\n\t\tm.log.Error(\"MailProviderImap.CheckConnection: Unable to connect:\", m.cfg.ImapServer)\n\t\treturn err\n\t}\n\n\tif m.debug {\n\t\tm.imapClient.SetDebug(m.log)\n\t}\n\n\t\/\/ Max timeout awaiting a command\n\tm.imapClient.Timeout = time.Minute * 3\n\n\tif *m.cfg.StartTLS {\n\t\tstarttls, err := m.imapClient.SupportStartTLS()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif starttls {\n\t\t\tm.log.Debug(\"MailProviderImap.CheckConnection:StartTLS\")\n\t\t\tvar tconfig tls.Config\n\t\t\tif *m.cfg.TLSAcceptAllCerts {\n\t\t\t\ttconfig.InsecureSkipVerify = true\n\t\t\t}\n\t\t\terr = m.imapClient.StartTLS(&tconfig)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tm.log.Infof(\"Connected with %q\\n\", m.cfg.ImapServer)\n\n\terr = m.imapClient.Login(m.cfg.Address, m.cfg.Password)\n\tif err != nil {\n\t\tm.log.Error(\"MailProviderImap.CheckConnection: Unable to login:\", m.cfg.Address)\n\t\treturn err\n\t}\n\n\tif err = m.selectMailBox(); err != nil {\n\t\treturn err\n\t}\n\n\tidleClient := idle.NewClient(m.imapClient)\n\n\tm.idle, err = idleClient.SupportIdle()\n\tif err != nil {\n\t\tm.idle = false\n\t\tm.log.Error(\"MailProviderImap.CheckConnection: Error on check idle support\")\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Terminate imap connection\nfunc (m *MailProviderImap) Terminate() error {\n\tif m.imapClient != nil {\n\t\tm.log.Info(\"MailProviderImap.Terminate Logout\")\n\t\tif err := m.imapClient.Logout(); err != nil {\n\t\t\tm.log.Error(\"MailProviderImap.Terminate Error:\", err.Error())\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>remove mutex mail_provider<commit_after>package mmail\n\nimport (\n\t\"crypto\/tls\"\n\t\"net\/mail\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/emersion\/go-imap\"\n\tidle \"github.com\/emersion\/go-imap-idle\"\n\t\"github.com\/emersion\/go-imap\/client\"\n\t\"github.com\/rodcorsi\/mattermail\/model\"\n)\n\n\/\/ MailProviderImap implements MailProvider using imap\ntype MailProviderImap struct {\n\timapClient *client.Client\n\tcfg *model.Email\n\tlog Logger\n\tidle bool\n\tdebug bool\n}\n\nconst mailBox = \"INBOX\"\n\n\/\/ NewMailProviderImap creates a new MailProviderImap implementing MailProvider\nfunc NewMailProviderImap(cfg *model.Email, log Logger, debug bool) *MailProviderImap {\n\treturn &MailProviderImap{\n\t\tcfg: cfg,\n\t\tlog: log,\n\t\tdebug: debug,\n\t}\n}\n\n\/\/ CheckNewMessage gets new email from server\nfunc (m *MailProviderImap) CheckNewMessage(handler MailHandler) error {\n\tm.log.Debug(\"MailProviderImap.CheckNewMessage\")\n\n\tif err := m.checkConnection(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := m.selectMailBox(); err != nil {\n\t\treturn err\n\t}\n\n\tcriteria := &imap.SearchCriteria{\n\t\tWithoutFlags: []string{imap.SeenFlag},\n\t}\n\n\tuid, err := m.imapClient.UidSearch(criteria)\n\tif err != nil {\n\t\tm.log.Debug(\"MailProviderImap.CheckNewMessage: Error UIDSearch\")\n\t\treturn err\n\t}\n\n\tif len(uid) == 0 {\n\t\tm.log.Debug(\"MailProviderImap.CheckNewMessage: No new messages\")\n\t\treturn nil\n\t}\n\n\tm.log.Debugf(\"MailProviderImap.CheckNewMessage: found %v uid\", len(uid))\n\n\tseqset := &imap.SeqSet{}\n\tseqset.AddNum(uid...)\n\n\tmessages := make(chan *imap.Message, len(uid))\n\tdone := make(chan error, 1)\n\tgo func() {\n\t\tdone <- m.imapClient.UidFetch(seqset, []string{imap.EnvelopeMsgAttr, \"BODY[]\"}, messages)\n\t}()\n\n\temailPosted := make(map[uint32]bool)\n\tfor _, v := range uid {\n\t\temailPosted[v] = false\n\t}\n\n\tfor imapMsg := range messages {\n\t\tm.log.Debug(\"MailProviderImap.CheckNewMessage: PostMail uid:\", imapMsg.Uid)\n\t\tif emailPosted[imapMsg.Uid] {\n\t\t\tm.log.Debug(\"MailProviderImap.CheckNewMessage: Email was posted uid:\", imapMsg.Uid)\n\t\t\tcontinue\n\t\t}\n\n\t\tr := imapMsg.GetBody(\"BODY[]\")\n\t\tif r == nil {\n\t\t\tm.log.Error(\"MailProviderImap.CheckNewMessage: message.GetBody(BODY[]) returns nil\")\n\t\t\tcontinue\n\t\t}\n\n\t\tmsg, err := mail.ReadMessage(r)\n\t\tif err != nil {\n\t\t\tm.log.Error(\"MailProviderImap.CheckNewMessage: Error on parse imap\/message to mail\/message\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := handler(msg); err != nil {\n\t\t\tm.log.Debug(\"MailProviderImap.CheckNewMessage: Error handler:\", err.Error())\n\t\t\tcontinue\n\t\t} else {\n\t\t\temailPosted[imapMsg.Uid] = true\n\t\t}\n\t}\n\n\t\/\/ Check command completion status\n\tif err := <-done; err != nil {\n\t\tm.log.Error(\"MailProviderImap.CheckNewMessage: Error on terminate fetch command\")\n\t\treturn err\n\t}\n\n\terrorset := &imap.SeqSet{}\n\tfor k, posted := range emailPosted {\n\t\tif !posted {\n\t\t\terrorset.AddNum(k)\n\t\t}\n\t}\n\n\tif errorset.Empty() {\n\t\treturn nil\n\t}\n\n\t\/\/ Mark all valid messages as read\n\terr = m.imapClient.UidStore(errorset, imap.RemoveFlags, []interface{}{imap.SeenFlag}, nil)\n\tif err != nil {\n\t\tm.log.Error(\"MailProviderImap.CheckNewMessage: Error UIDStore UNSEEN\")\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ WaitNewMessage waits for a new message (idle or time.Sleep)\nfunc (m *MailProviderImap) WaitNewMessage(timeout int) error {\n\tm.log.Debug(\"MailProviderImap.WaitNewMessage\")\n\n\t\/\/ Idle mode\n\tif err := m.checkConnection(); err != nil {\n\t\treturn err\n\t}\n\n\tm.log.Debug(\"MailProviderImap.WaitNewMessage: idle mode:\", m.idle)\n\n\tif !m.idle {\n\t\ttime.Sleep(time.Second * time.Duration(timeout))\n\t\treturn nil\n\t}\n\n\tif err := m.selectMailBox(); err != nil {\n\t\treturn err\n\t}\n\n\tidleClient := idle.NewClient(m.imapClient)\n\n\t\/\/ Create a channel to receive mailbox updates\n\tstatuses := make(chan *imap.MailboxStatus)\n\tm.imapClient.MailboxUpdates = statuses\n\n\tstop := make(chan struct{})\n\tdone := make(chan error, 1)\n\tgo func() {\n\t\tdone <- idleClient.Idle(stop)\n\t}()\n\n\treset := time.After(time.Second * time.Duration(timeout))\n\n\tclosed := false\n\tcloseChannel := func() {\n\t\tif !closed {\n\t\t\tclose(stop)\n\t\t\tclosed = true\n\t\t}\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase status := <-statuses:\n\t\t\tm.log.Debug(\"MailProviderImap.WaitNewMessage: New mailbox status:\", status)\n\t\t\tcloseChannel()\n\n\t\tcase err := <-done:\n\t\t\tif err != nil {\n\t\t\t\tm.log.Error(\"MailProviderImap.WaitNewMessage: Error on terminate idle\", err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\tcase <-reset:\n\t\t\tcloseChannel()\n\t\t}\n\t}\n}\n\nfunc (m *MailProviderImap) selectMailBox() error {\n\tif m.imapClient.Mailbox != nil && m.imapClient.Mailbox.Name == mailBox {\n\t\treturn nil\n\t}\n\n\t_, err := m.imapClient.Select(mailBox, false)\n\tif err != nil {\n\t\tm.log.Error(\"MailProviderImap.selectMailBox: Error on select\", mailBox)\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ checkConnection if is connected return nil or try to connect\nfunc (m *MailProviderImap) checkConnection() error {\n\tif m.imapClient != nil && (m.imapClient.State == imap.AuthenticatedState || m.imapClient.State == imap.SelectedState) {\n\t\tm.log.Debug(\"MailProviderImap.CheckConnection: Connection state\", m.imapClient.State)\n\t\treturn nil\n\t}\n\n\tvar err error\n\n\t\/\/Start connection with server\n\tif strings.HasSuffix(m.cfg.ImapServer, \":993\") {\n\t\tm.log.Debug(\"MailProviderImap.CheckConnection: DialTLS\")\n\t\tm.imapClient, err = client.DialTLS(m.cfg.ImapServer, nil)\n\t} else {\n\t\tm.log.Debug(\"MailProviderImap.CheckConnection: Dial\")\n\t\tm.imapClient, err = client.Dial(m.cfg.ImapServer)\n\t}\n\n\tif err != nil {\n\t\tm.log.Error(\"MailProviderImap.CheckConnection: Unable to connect:\", m.cfg.ImapServer)\n\t\treturn err\n\t}\n\n\tif m.debug {\n\t\tm.imapClient.SetDebug(m.log)\n\t}\n\n\t\/\/ Max timeout awaiting a command\n\tm.imapClient.Timeout = time.Minute * 3\n\n\tif *m.cfg.StartTLS {\n\t\tstarttls, err := m.imapClient.SupportStartTLS()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif starttls {\n\t\t\tm.log.Debug(\"MailProviderImap.CheckConnection:StartTLS\")\n\t\t\tvar tconfig tls.Config\n\t\t\tif *m.cfg.TLSAcceptAllCerts {\n\t\t\t\ttconfig.InsecureSkipVerify = true\n\t\t\t}\n\t\t\terr = m.imapClient.StartTLS(&tconfig)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tm.log.Infof(\"Connected with %q\\n\", m.cfg.ImapServer)\n\n\terr = m.imapClient.Login(m.cfg.Address, m.cfg.Password)\n\tif err != nil {\n\t\tm.log.Error(\"MailProviderImap.CheckConnection: Unable to login:\", m.cfg.Address)\n\t\treturn err\n\t}\n\n\tif err = m.selectMailBox(); err != nil {\n\t\treturn err\n\t}\n\n\tidleClient := idle.NewClient(m.imapClient)\n\n\tm.idle, err = idleClient.SupportIdle()\n\tif err != nil {\n\t\tm.idle = false\n\t\tm.log.Error(\"MailProviderImap.CheckConnection: Error on check idle support\")\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Terminate imap connection\nfunc (m *MailProviderImap) Terminate() error {\n\tif m.imapClient != nil {\n\t\tm.log.Info(\"MailProviderImap.Terminate Logout\")\n\t\tif err := m.imapClient.Logout(); err != nil {\n\t\t\tm.log.Error(\"MailProviderImap.Terminate Error:\", err.Error())\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"sync\"\n\n\tdocker \"github.com\/fsouza\/go-dockerclient\"\n)\n\ntype Gateway struct {\n\tDestinations map[string]DestinationMap\n\t*sync.Mutex\n}\n\nfunc NewGateway() *Gateway {\n\treturn &Gateway{\n\t\tmap[string]DestinationMap{},\n\t\t&sync.Mutex{},\n\t}\n}\n\nfunc (gw *Gateway) fetchDomain(c *docker.Container) string {\n\tfor _, v := range c.Config.Env {\n\t\tif strings.Contains(v, \"APP_DOMAIN=\") {\n\t\t\treturn strings.Replace(v, \"APP_DOMAIN=\", \"\", 1)\n\t\t}\n\t}\n\n\treturn c.ID\n}\n\nfunc (gw *Gateway) Add(container *docker.Container) error {\n\tlog.Println(\"Adding container:\", container.ID)\n\n\tkey := gw.fetchDomain(container)\n\n\tif gw.Destinations[key][container.ID] != nil {\n\t\treturn fmt.Errorf(\"Destination alreaady exists!\")\n\t}\n\n\tif len(container.Config.ExposedPorts) == 0 {\n\t\tlog.Printf(\"Container %s does not have any exposed ports\\n\", container.ID)\n\t\treturn nil\n\t}\n\n\tdest, err := NewDestination(container)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgw.Lock()\n\tdefer gw.Unlock()\n\n\tif gw.Destinations[key] == nil {\n\t\tgw.Destinations[key] = DestinationMap{}\n\t}\n\n\tgw.Destinations[key][container.ID] = dest\n\treturn nil\n}\n\nfunc (gw *Gateway) Remove(container *docker.Container) error {\n\tlog.Println(\"Removing container:\", container.ID)\n\tkey := gw.fetchDomain(container)\n\n\tif len(gw.Destinations[key]) == 0 {\n\t\treturn nil\n\t}\n\n\tgw.Lock()\n\tdefer gw.Unlock()\n\n\tdelete(gw.Destinations[key], container.ID)\n\treturn nil\n}\n\nfunc (gw *Gateway) Find(host string) *Destination {\n\tif len(gw.Destinations[host]) == 0 {\n\t\treturn nil\n\t}\n\n\tlist := []*Destination{}\n\tfor _, dst := range gw.Destinations[host] {\n\t\tlist = append(list, dst)\n\t}\n\n\treturn list[rand.Intn(len(list))]\n}\n\nfunc (gw *Gateway) Handle(w http.ResponseWriter, r *http.Request) {\n\tdestination := gw.Find(r.Host)\n\n\tlog.Printf(\"Request method=%s host=%s path=%s -> %s\\n\", r.Method, r.Host, r.RequestURI, destination)\n\n\tif destination == nil {\n\t\thttp.Error(w, \"No route\", http.StatusBadGateway)\n\t\treturn\n\t}\n\n\tdestination.proxy.ServeHTTP(w, r)\n}\n\nfunc (gw *Gateway) RenderDestinations(w http.ResponseWriter, r *http.Request) {\n\tresult := map[string][]string{}\n\n\tfor k, dstMap := range gw.Destinations {\n\t\tfor _, dst := range dstMap {\n\t\t\tresult[k] = append(result[k], dst.String())\n\t\t}\n\t}\n\n\tdata, _ := json.Marshal(result)\n\tfmt.Fprintf(w, \"%s\", data)\n}\n\nfunc (gw *Gateway) Start(bind string) error {\n\tlog.Printf(\"Starting gateway server on http:\/\/%s\\n\", bind)\n\n\thttp.HandleFunc(\"\/_destinations\", gw.RenderDestinations)\n\thttp.HandleFunc(\"\/\", gw.Handle)\n\treturn http.ListenAndServe(bind, nil)\n}\n<commit_msg>Use DOMAIN var instead of APP_DOMAIN<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"sync\"\n\n\tdocker \"github.com\/fsouza\/go-dockerclient\"\n)\n\ntype Gateway struct {\n\tDestinations map[string]DestinationMap\n\t*sync.Mutex\n}\n\nfunc NewGateway() *Gateway {\n\treturn &Gateway{\n\t\tmap[string]DestinationMap{},\n\t\t&sync.Mutex{},\n\t}\n}\n\nfunc (gw *Gateway) fetchDomain(c *docker.Container) string {\n\tfor _, v := range c.Config.Env {\n\t\tif strings.Contains(v, \"DOMAIN=\") {\n\t\t\treturn strings.Replace(v, \"DOMAIN=\", \"\", 1)\n\t\t}\n\t}\n\n\treturn c.ID\n}\n\nfunc (gw *Gateway) Add(container *docker.Container) error {\n\tlog.Println(\"Adding container:\", container.ID)\n\n\tkey := gw.fetchDomain(container)\n\n\tif gw.Destinations[key][container.ID] != nil {\n\t\treturn fmt.Errorf(\"Destination alreaady exists!\")\n\t}\n\n\tif len(container.Config.ExposedPorts) == 0 {\n\t\tlog.Printf(\"Container %s does not have any exposed ports\\n\", container.ID)\n\t\treturn nil\n\t}\n\n\tdest, err := NewDestination(container)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgw.Lock()\n\tdefer gw.Unlock()\n\n\tif gw.Destinations[key] == nil {\n\t\tgw.Destinations[key] = DestinationMap{}\n\t}\n\n\tgw.Destinations[key][container.ID] = dest\n\treturn nil\n}\n\nfunc (gw *Gateway) Remove(container *docker.Container) error {\n\tlog.Println(\"Removing container:\", container.ID)\n\tkey := gw.fetchDomain(container)\n\n\tif len(gw.Destinations[key]) == 0 {\n\t\treturn nil\n\t}\n\n\tgw.Lock()\n\tdefer gw.Unlock()\n\n\tdelete(gw.Destinations[key], container.ID)\n\treturn nil\n}\n\nfunc (gw *Gateway) Find(host string) *Destination {\n\tif len(gw.Destinations[host]) == 0 {\n\t\treturn nil\n\t}\n\n\tlist := []*Destination{}\n\tfor _, dst := range gw.Destinations[host] {\n\t\tlist = append(list, dst)\n\t}\n\n\treturn list[rand.Intn(len(list))]\n}\n\nfunc (gw *Gateway) Handle(w http.ResponseWriter, r *http.Request) {\n\tdestination := gw.Find(r.Host)\n\n\tlog.Printf(\"Request method=%s host=%s path=%s -> %s\\n\", r.Method, r.Host, r.RequestURI, destination)\n\n\tif destination == nil {\n\t\thttp.Error(w, \"No route\", http.StatusBadGateway)\n\t\treturn\n\t}\n\n\tdestination.proxy.ServeHTTP(w, r)\n}\n\nfunc (gw *Gateway) RenderDestinations(w http.ResponseWriter, r *http.Request) {\n\tresult := map[string][]string{}\n\n\tfor k, dstMap := range gw.Destinations {\n\t\tfor _, dst := range dstMap {\n\t\t\tresult[k] = append(result[k], dst.String())\n\t\t}\n\t}\n\n\tdata, _ := json.Marshal(result)\n\tfmt.Fprintf(w, \"%s\", data)\n}\n\nfunc (gw *Gateway) Start(bind string) error {\n\tlog.Printf(\"Starting gateway server on http:\/\/%s\\n\", bind)\n\n\thttp.HandleFunc(\"\/_destinations\", gw.RenderDestinations)\n\thttp.HandleFunc(\"\/\", gw.Handle)\n\treturn http.ListenAndServe(bind, nil)\n}\n<|endoftext|>"} {"text":"<commit_before>package moneybird\n\nimport (\n\t\"testing\"\n\t\"time\"\n)\n\n\/\/ Because Moneybird schedules a background job when you create a new invoice, this test will fail when running too soon after a previous run.\nfunc TestInvoiceGatewayListAndDelete(t *testing.T) {\n\tinvoices, err := testClient.Invoice().List()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tfor _, invoice := range invoices {\n\t\terr := testClient.Invoice().Delete(invoice)\n\t\tif err != nil {\n\t\t\t\/\/ let's ignore this error for now... (see func doc)\n\t\t\tif err.Error() == \"moneybird: Sales invoice cannot be destroyed\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tt.Error(err)\n\t\t}\n\t}\n\n}\n\nfunc TestInvoiceGatewayCRUD(t *testing.T) {\n\tvar err error\n\t\/\/ create contact\n\tcontact := &Contact{\n\t\tEmail: \"johndoe@email.com\",\n\t\tFirstName: \"John\",\n\t\tLastName: \"Doe\",\n\t}\n\tcontact, err = testClient.Contact().Create(contact)\n\tif err != nil {\n\t\tt.Fatalf(\"ContactGateway.Create: %s\", err)\n\t}\n\n\t\/\/ delete contact (deferred)\n\tdefer func() {\n\t\terr = testClient.Contact().Delete(contact)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"ContactGateway.Delete: %s\", err)\n\t\t}\n\t}()\n\n\tgateway := testClient.Invoice()\n\t\/\/ create invoice\n\tinvoice := &Invoice{\n\t\tContactID: contact.ID,\n\t\tInvoiceDate: time.Now().Format(\"2006-01-02\"),\n\t\tDetails: []*InvoiceDetails{\n\t\t\t&InvoiceDetails{\n\t\t\t\tAmount: \"1\",\n\t\t\t\tPrice: \"10.00\",\n\t\t\t\tDescription: \"Test Service\",\n\t\t\t},\n\t\t},\n\t}\n\tinvoice, err = gateway.Create(invoice)\n\tif err != nil {\n\t\tt.Fatalf(\"InvoiceGateway.Create: %s\", err) \/\/ abandon test if invoice creation fails\n\t}\n\n\t\/\/ update invoice\n\tinvoice.Reference = \"my-reference\"\n\tinvoice, err = gateway.Update(invoice)\n\tif err != nil {\n\t\tt.Errorf(\"InvoiceGateway.Update: %s\", err)\n\t}\n\n\tif invoice.Reference != \"my-reference\" {\n\t\tt.Error(\"InvoiceGateway.Update: reference was not properly updated\")\n\t}\n\n\t\/\/ get invoice\n\tinvoice, err = gateway.Get(invoice.ID)\n\tif err != nil {\n\t\tt.Errorf(\"InvoiceGateway.Get: %s\", err)\n\t}\n\n\tif invoice.Contact.ID != contact.ID {\n\t\tt.Errorf(\"InvoiceGateway.Get: invoice contact ID does not match, got %#v\", invoice.Contact.ID)\n\t}\n\n\t\/\/ create invoice sending (send invoice)\n\terr = testClient.InvoiceSending().Create(invoice, &InvoiceSending{\n\t\tDeliveryMethod: \"Manual\",\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"InvoiceSendingGateway.Create: %s\", err)\n\t}\n\n\t\/\/ create invoice payment (mark invoice as paid)\n\terr = testClient.InvoicePayment().Create(invoice, &InvoicePayment{\n\t\tPrice: invoice.TotalUnpaid,\n\t\tPriceBase: invoice.TotalUnpaid,\n\t\tPaymentDate: time.Now().Format(\"2006-01-02\"),\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"InvoicePaymentGateway.Create: %s \", err)\n\t}\n\n\t\/\/ create invoice note\n\tnote, err := testClient.InvoiceNote().Create(invoice, &InvoiceNote{\n\t\tNote: \"my note\",\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"InvoiceNoteGateway.Create: %s\", err)\n\t}\n\n\tif note.Note != \"my note\" {\n\t\tt.Errorf(\"InvoiceNoteGateway.Create: note does not match input string. Got %#v\", note.Note)\n\t}\n\n\t\/\/ delete invoice note\n\terr = testClient.InvoiceNote().Delete(invoice, note)\n\tif err != nil {\n\t\tt.Errorf(\"InvoiceNoteGateway.Delete: %s\", err)\n\t}\n\n}\n<commit_msg>Set PriceBase as TotalUnpaidBase<commit_after>package moneybird\n\nimport (\n\t\"testing\"\n\t\"time\"\n)\n\n\/\/ Because Moneybird schedules a background job when you create a new invoice, this test will fail when running too soon after a previous run.\nfunc TestInvoiceGatewayListAndDelete(t *testing.T) {\n\tinvoices, err := testClient.Invoice().List()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tfor _, invoice := range invoices {\n\t\terr := testClient.Invoice().Delete(invoice)\n\t\tif err != nil {\n\t\t\t\/\/ let's ignore this error for now... (see func doc)\n\t\t\tif err.Error() == \"moneybird: Sales invoice cannot be destroyed\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tt.Error(err)\n\t\t}\n\t}\n\n}\n\nfunc TestInvoiceGatewayCRUD(t *testing.T) {\n\tvar err error\n\t\/\/ create contact\n\tcontact := &Contact{\n\t\tEmail: \"johndoe@email.com\",\n\t\tFirstName: \"John\",\n\t\tLastName: \"Doe\",\n\t}\n\tcontact, err = testClient.Contact().Create(contact)\n\tif err != nil {\n\t\tt.Fatalf(\"ContactGateway.Create: %s\", err)\n\t}\n\n\t\/\/ delete contact (deferred)\n\tdefer func() {\n\t\terr = testClient.Contact().Delete(contact)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"ContactGateway.Delete: %s\", err)\n\t\t}\n\t}()\n\n\tgateway := testClient.Invoice()\n\t\/\/ create invoice\n\tinvoice := &Invoice{\n\t\tContactID: contact.ID,\n\t\tInvoiceDate: time.Now().Format(\"2006-01-02\"),\n\t\tDetails: []*InvoiceDetails{\n\t\t\t&InvoiceDetails{\n\t\t\t\tAmount: \"1\",\n\t\t\t\tPrice: \"10.00\",\n\t\t\t\tDescription: \"Test Service\",\n\t\t\t},\n\t\t},\n\t}\n\tinvoice, err = gateway.Create(invoice)\n\tif err != nil {\n\t\tt.Fatalf(\"InvoiceGateway.Create: %s\", err) \/\/ abandon test if invoice creation fails\n\t}\n\n\t\/\/ update invoice\n\tinvoice.Reference = \"my-reference\"\n\tinvoice, err = gateway.Update(invoice)\n\tif err != nil {\n\t\tt.Errorf(\"InvoiceGateway.Update: %s\", err)\n\t}\n\n\tif invoice.Reference != \"my-reference\" {\n\t\tt.Error(\"InvoiceGateway.Update: reference was not properly updated\")\n\t}\n\n\t\/\/ get invoice\n\tinvoice, err = gateway.Get(invoice.ID)\n\tif err != nil {\n\t\tt.Errorf(\"InvoiceGateway.Get: %s\", err)\n\t}\n\n\tif invoice.Contact.ID != contact.ID {\n\t\tt.Errorf(\"InvoiceGateway.Get: invoice contact ID does not match, got %#v\", invoice.Contact.ID)\n\t}\n\n\t\/\/ create invoice sending (send invoice)\n\terr = testClient.InvoiceSending().Create(invoice, &InvoiceSending{\n\t\tDeliveryMethod: \"Manual\",\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"InvoiceSendingGateway.Create: %s\", err)\n\t}\n\n\t\/\/ create invoice payment (mark invoice as paid)\n\terr = testClient.InvoicePayment().Create(invoice, &InvoicePayment{\n\t\tPrice: invoice.TotalUnpaid,\n\t\tPriceBase: invoice.TotalUnpaidBase,\n\t\tPaymentDate: time.Now().Format(\"2006-01-02\"),\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"InvoicePaymentGateway.Create: %s \", err)\n\t}\n\n\t\/\/ create invoice note\n\tnote, err := testClient.InvoiceNote().Create(invoice, &InvoiceNote{\n\t\tNote: \"my note\",\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"InvoiceNoteGateway.Create: %s\", err)\n\t}\n\n\tif note.Note != \"my note\" {\n\t\tt.Errorf(\"InvoiceNoteGateway.Create: note does not match input string. Got %#v\", note.Note)\n\t}\n\n\t\/\/ delete invoice note\n\terr = testClient.InvoiceNote().Delete(invoice, note)\n\tif err != nil {\n\t\tt.Errorf(\"InvoiceNoteGateway.Delete: %s\", err)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage etcdserver\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\n\tpb \"github.com\/coreos\/etcd\/etcdserver\/etcdserverpb\"\n\t\"github.com\/coreos\/etcd\/migrate\"\n\t\"github.com\/coreos\/etcd\/pkg\/pbutil\"\n\t\"github.com\/coreos\/etcd\/pkg\/types\"\n\t\"github.com\/coreos\/etcd\/raft\/raftpb\"\n\t\"github.com\/coreos\/etcd\/snap\"\n\t\"github.com\/coreos\/etcd\/version\"\n\t\"github.com\/coreos\/etcd\/wal\"\n\t\"github.com\/coreos\/etcd\/wal\/walpb\"\n)\n\ntype Storage interface {\n\t\/\/ Save function saves ents and state to the underlying stable storage.\n\t\/\/ Save MUST block until st and ents are on stable storage.\n\tSave(st raftpb.HardState, ents []raftpb.Entry) error\n\t\/\/ SaveSnap function saves snapshot to the underlying stable storage.\n\tSaveSnap(snap raftpb.Snapshot) error\n\t\/\/ Close closes the Storage and performs finalization.\n\tClose() error\n}\n\ntype storage struct {\n\t*wal.WAL\n\t*snap.Snapshotter\n}\n\nfunc NewStorage(w *wal.WAL, s *snap.Snapshotter) Storage {\n\treturn &storage{w, s}\n}\n\n\/\/ SaveSnap saves the snapshot to disk and release the locked\n\/\/ wal files since they will not be used.\nfunc (st *storage) SaveSnap(snap raftpb.Snapshot) error {\n\twalsnap := walpb.Snapshot{\n\t\tIndex: snap.Metadata.Index,\n\t\tTerm: snap.Metadata.Term,\n\t}\n\terr := st.WAL.SaveSnapshot(walsnap)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = st.Snapshotter.SaveSnap(snap)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = st.WAL.ReleaseLockTo(snap.Metadata.Index)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) {\n\tvar (\n\t\terr error\n\t\twmetadata []byte\n\t)\n\n\tfor i := 0; i < 2; i++ {\n\t\tif w, err = wal.Open(waldir, snap); err != nil {\n\t\t\tlog.Fatalf(\"etcdserver: open wal error: %v\", err)\n\t\t}\n\t\tif wmetadata, st, ents, err = w.ReadAll(); err != nil {\n\t\t\tw.Close()\n\t\t\tif i != 0 || err != io.ErrUnexpectedEOF {\n\t\t\t\tlog.Fatalf(\"etcdserver: read wal error: %v\", err)\n\t\t\t}\n\t\t\tif !wal.Repair(waldir) {\n\t\t\t\tlog.Fatalf(\"etcdserver: WAL error (%v) cannot be repaired\", err)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"etcdserver: repaired WAL error (%v)\", err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\tvar metadata pb.Metadata\n\tpbutil.MustUnmarshal(&metadata, wmetadata)\n\tid = types.ID(metadata.NodeID)\n\tcid = types.ID(metadata.ClusterID)\n\treturn\n}\n\n\/\/ upgradeWAL converts an older version of the etcdServer data to the newest version.\n\/\/ It must ensure that, after upgrading, the most recent version is present.\nfunc upgradeDataDir(baseDataDir string, name string, ver version.DataDirVersion) error {\n\tswitch ver {\n\tcase version.DataDir0_4:\n\t\tlog.Print(\"etcdserver: converting v0.4 log to v2.0\")\n\t\terr := migrate.Migrate4To2(baseDataDir, name)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"etcdserver: failed migrating data-dir: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\tfallthrough\n\tcase version.DataDir2_0:\n\t\terr := makeMemberDir(baseDataDir)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfallthrough\n\tcase version.DataDir2_0_1:\n\t\tfallthrough\n\tdefault:\n\t\tlog.Printf(\"etcdserver: datadir is valid for the 2.0.1 format\")\n\t}\n\treturn nil\n}\n\nfunc makeMemberDir(dir string) error {\n\tmembdir := path.Join(dir, \"member\")\n\t_, err := os.Stat(membdir)\n\tswitch {\n\tcase err == nil:\n\t\treturn nil\n\tcase !os.IsNotExist(err):\n\t\treturn err\n\t}\n\tif err := os.MkdirAll(membdir, 0700); err != nil {\n\t\treturn err\n\t}\n\tnames := []string{\"snap\", \"wal\"}\n\tfor _, name := range names {\n\t\tif err := os.Rename(path.Join(dir, name), path.Join(membdir, name)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>etcdserver: make the wal repairing logic clear<commit_after>\/\/ Copyright 2015 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage etcdserver\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\n\tpb \"github.com\/coreos\/etcd\/etcdserver\/etcdserverpb\"\n\t\"github.com\/coreos\/etcd\/migrate\"\n\t\"github.com\/coreos\/etcd\/pkg\/pbutil\"\n\t\"github.com\/coreos\/etcd\/pkg\/types\"\n\t\"github.com\/coreos\/etcd\/raft\/raftpb\"\n\t\"github.com\/coreos\/etcd\/snap\"\n\t\"github.com\/coreos\/etcd\/version\"\n\t\"github.com\/coreos\/etcd\/wal\"\n\t\"github.com\/coreos\/etcd\/wal\/walpb\"\n)\n\ntype Storage interface {\n\t\/\/ Save function saves ents and state to the underlying stable storage.\n\t\/\/ Save MUST block until st and ents are on stable storage.\n\tSave(st raftpb.HardState, ents []raftpb.Entry) error\n\t\/\/ SaveSnap function saves snapshot to the underlying stable storage.\n\tSaveSnap(snap raftpb.Snapshot) error\n\t\/\/ Close closes the Storage and performs finalization.\n\tClose() error\n}\n\ntype storage struct {\n\t*wal.WAL\n\t*snap.Snapshotter\n}\n\nfunc NewStorage(w *wal.WAL, s *snap.Snapshotter) Storage {\n\treturn &storage{w, s}\n}\n\n\/\/ SaveSnap saves the snapshot to disk and release the locked\n\/\/ wal files since they will not be used.\nfunc (st *storage) SaveSnap(snap raftpb.Snapshot) error {\n\twalsnap := walpb.Snapshot{\n\t\tIndex: snap.Metadata.Index,\n\t\tTerm: snap.Metadata.Term,\n\t}\n\terr := st.WAL.SaveSnapshot(walsnap)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = st.Snapshotter.SaveSnap(snap)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = st.WAL.ReleaseLockTo(snap.Metadata.Index)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) {\n\tvar (\n\t\terr error\n\t\twmetadata []byte\n\t)\n\n\trepaired := false\n\tfor {\n\t\tif w, err = wal.Open(waldir, snap); err != nil {\n\t\t\tlog.Fatalf(\"etcdserver: open wal error: %v\", err)\n\t\t}\n\t\tif wmetadata, st, ents, err = w.ReadAll(); err != nil {\n\t\t\tw.Close()\n\t\t\t\/\/ we can only repair ErrUnexpectedEOF and we never repair twice.\n\t\t\tif repaired || err != io.ErrUnexpectedEOF {\n\t\t\t\tlog.Fatalf(\"etcdserver: read wal error (%v) and cannot be repaired\", err)\n\t\t\t}\n\t\t\tif !wal.Repair(waldir) {\n\t\t\t\tlog.Fatalf(\"etcdserver: WAL error (%v) cannot be repaired\", err)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"etcdserver: repaired WAL error (%v)\", err)\n\t\t\t\trepaired = true\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\tvar metadata pb.Metadata\n\tpbutil.MustUnmarshal(&metadata, wmetadata)\n\tid = types.ID(metadata.NodeID)\n\tcid = types.ID(metadata.ClusterID)\n\treturn\n}\n\n\/\/ upgradeWAL converts an older version of the etcdServer data to the newest version.\n\/\/ It must ensure that, after upgrading, the most recent version is present.\nfunc upgradeDataDir(baseDataDir string, name string, ver version.DataDirVersion) error {\n\tswitch ver {\n\tcase version.DataDir0_4:\n\t\tlog.Print(\"etcdserver: converting v0.4 log to v2.0\")\n\t\terr := migrate.Migrate4To2(baseDataDir, name)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"etcdserver: failed migrating data-dir: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\tfallthrough\n\tcase version.DataDir2_0:\n\t\terr := makeMemberDir(baseDataDir)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfallthrough\n\tcase version.DataDir2_0_1:\n\t\tfallthrough\n\tdefault:\n\t\tlog.Printf(\"etcdserver: datadir is valid for the 2.0.1 format\")\n\t}\n\treturn nil\n}\n\nfunc makeMemberDir(dir string) error {\n\tmembdir := path.Join(dir, \"member\")\n\t_, err := os.Stat(membdir)\n\tswitch {\n\tcase err == nil:\n\t\treturn nil\n\tcase !os.IsNotExist(err):\n\t\treturn err\n\t}\n\tif err := os.MkdirAll(membdir, 0700); err != nil {\n\t\treturn err\n\t}\n\tnames := []string{\"snap\", \"wal\"}\n\tfor _, name := range names {\n\t\tif err := os.Rename(path.Join(dir, name), path.Join(membdir, name)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 FactomProject Authors. All rights reserved.\n\/\/ Use of this source code is governed by the MIT license\n\/\/ that can be found in the LICENSE file.\n\npackage process\n\nimport (\n\t\"fmt\"\n\t\"github.com\/FactomProject\/FactomCode\/common\"\n\t\"github.com\/FactomProject\/FactomCode\/factomlog\"\n\t\"github.com\/FactomProject\/FactomCode\/util\"\n\t\"github.com\/FactomProject\/factoid\/block\"\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"sort\"\n)\n\nfunc GetEntryCreditBalance(pubKey *[32]byte) (int32, error) {\n\n\treturn eCreditMap[string(pubKey[:])], nil\n}\n\nfunc exportDChain(chain *common.DChain) {\n\tif len(chain.Blocks) == 0 || procLog.Level() < factomlog.Info {\n\t\t\/\/log.Println(\"no blocks to save for chain: \" + string (*chain.ChainID))\n\t\treturn\n\t}\n\n\t\/\/ get all ecBlocks from db\n\tdBlocks, _ := db.FetchAllDBlocks()\n\tsort.Sort(util.ByDBlockIDAccending(dBlocks))\n\n\tfor _, block := range dBlocks {\n\n\t\tdata, err := block.MarshalBinary()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tstrChainID := chain.ChainID.String()\n\t\tif fileNotExists(dataStorePath + strChainID) {\n\t\t\terr := os.MkdirAll(dataStorePath+strChainID, 0777)\n\t\t\tif err == nil {\n\t\t\t\tprocLog.Info(\"Created directory \" + dataStorePath + strChainID)\n\t\t\t} else {\n\t\t\t\tprocLog.Error(err)\n\t\t\t}\n\t\t}\n\t\terr = ioutil.WriteFile(fmt.Sprintf(dataStorePath+strChainID+\"\/store.%09d.block\", block.Header.DBHeight), data, 0777)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n\nfunc exportEChain(chain *common.EChain) {\n\tif procLog.Level() < factomlog.Info {\n\t\treturn\n\t}\n\n\teBlocks, _ := db.FetchAllEBlocksByChain(chain.ChainID)\n\tsort.Sort(util.ByEBlockIDAccending(*eBlocks))\n\n\tfor _, block := range *eBlocks {\n\n\t\tdata, err := block.MarshalBinary()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tstrChainID := chain.ChainID.String()\n\t\tif fileNotExists(dataStorePath + strChainID) {\n\t\t\terr := os.MkdirAll(dataStorePath+strChainID, 0777)\n\t\t\tif err == nil {\n\t\t\t\tprocLog.Info(\"Created directory \" + dataStorePath + strChainID)\n\t\t\t} else {\n\t\t\t\tprocLog.Error(err)\n\t\t\t}\n\t\t}\n\n\t\terr = ioutil.WriteFile(fmt.Sprintf(dataStorePath+strChainID+\"\/store.%09d.block\", block.Header.EBHeight), data, 0777)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n\nfunc exportECChain(chain *common.ECChain) {\n\tif procLog.Level() < factomlog.Info {\n\t\treturn\n\t}\n\t\/\/ get all ecBlocks from db\n\tecBlocks, _ := db.FetchAllECBlocks()\n\tsort.Sort(util.ByECBlockIDAccending(ecBlocks))\n\n\tfor _, block := range ecBlocks {\n\t\tdata, err := block.MarshalBinary()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tstrChainID := chain.ChainID.String()\n\t\tif fileNotExists(dataStorePath + strChainID) {\n\t\t\terr := os.MkdirAll(dataStorePath+strChainID, 0777)\n\t\t\tif err == nil {\n\t\t\t\tprocLog.Info(\"Created directory \" + dataStorePath + strChainID)\n\t\t\t} else {\n\t\t\t\tprocLog.Error(err)\n\t\t\t}\n\t\t}\n\t\terr = ioutil.WriteFile(fmt.Sprintf(dataStorePath+strChainID+\"\/store.%09d.block\", block.Header.DBHeight), data, 0777)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n\nfunc exportAChain(chain *common.AdminChain) {\n\tif procLog.Level() < factomlog.Info {\n\t\treturn\n\t}\n\t\/\/ get all aBlocks from db\n\taBlocks, _ := db.FetchAllABlocks()\n\tsort.Sort(util.ByABlockIDAccending(aBlocks))\n\n\tfor _, block := range aBlocks {\n\n\t\tdata, err := block.MarshalBinary()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tstrChainID := chain.ChainID.String()\n\t\tif fileNotExists(dataStorePath + strChainID) {\n\t\t\terr := os.MkdirAll(dataStorePath+strChainID, 0777)\n\t\t\tif err == nil {\n\t\t\t\tprocLog.Info(\"Created directory \" + dataStorePath + strChainID)\n\t\t\t} else {\n\t\t\t\tprocLog.Error(err)\n\t\t\t}\n\t\t}\n\t\terr = ioutil.WriteFile(fmt.Sprintf(dataStorePath+strChainID+\"\/store.%09d.block\", block.Header.DBHeight), data, 0777)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n\nfunc exportFctChain(chain *common.FctChain) {\n\tif procLog.Level() < factomlog.Info {\n\t\treturn\n\t}\n\t\/\/ get all aBlocks from db\n\tFBlocks, _ := db.FetchAllFBlocks()\n\tsort.Sort(util.ByFBlockIDAccending(FBlocks))\n\n\tfor _, block := range FBlocks {\n\n\t\tdata, err := block.MarshalBinary()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tstrChainID := chain.ChainID.String()\n\t\tif fileNotExists(dataStorePath + strChainID) {\n\t\t\terr := os.MkdirAll(dataStorePath+strChainID, 0777)\n\t\t\tif err == nil {\n\t\t\t\tprocLog.Info(\"Created directory \" + dataStorePath + strChainID)\n\t\t\t} else {\n\t\t\t\tprocLog.Error(err)\n\t\t\t}\n\t\t}\n\t\terr = ioutil.WriteFile(fmt.Sprintf(dataStorePath+strChainID+\"\/store.%09d.block\", block.GetDBHeight()), data, 0777)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n\n\/\/ to export individual block once at a time - for debugging ------------------------\nfunc exportDBlock(block *common.DirectoryBlock) {\n\tif block == nil || procLog.Level() < factomlog.Info {\n\t\t\/\/log.Println(\"no blocks to save for chain: \" + string (*chain.ChainID))\n\t\treturn\n\t}\n\n\tdata, err := block.MarshalBinary()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tstrChainID := dchain.ChainID.String()\n\tif fileNotExists(dataStorePath + strChainID) {\n\t\terr := os.MkdirAll(dataStorePath+strChainID, 0777)\n\t\tif err == nil {\n\t\t\tprocLog.Info(\"Created directory \" + dataStorePath + strChainID)\n\t\t} else {\n\t\t\tprocLog.Error(err)\n\t\t}\n\t}\n\terr = ioutil.WriteFile(fmt.Sprintf(dataStorePath+strChainID+\"\/store.%09d.block\", block.Header.DBHeight), data, 0777)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}\n\nfunc exportEBlock(block *common.EBlock) {\n\tif block == nil || procLog.Level() < factomlog.Info {\n\t\treturn\n\t}\n\n\tdata, err := block.MarshalBinary()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tstrChainID := block.Header.ChainID.String()\n\tif fileNotExists(dataStorePath + strChainID) {\n\t\terr := os.MkdirAll(dataStorePath+strChainID, 0777)\n\t\tif err == nil {\n\t\t\tprocLog.Info(\"Created directory \" + dataStorePath + strChainID)\n\t\t} else {\n\t\t\tprocLog.Error(err)\n\t\t}\n\t}\n\n\terr = ioutil.WriteFile(fmt.Sprintf(dataStorePath+strChainID+\"\/store.%09d.block\", block.Header.DBHeight), data, 0777)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}\n\nfunc exportECBlock(block *common.ECBlock) {\n\tif block == nil || procLog.Level() < factomlog.Info {\n\t\treturn\n\t}\n\n\tdata, err := block.MarshalBinary()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tstrChainID := block.Header.ECChainID.String()\n\tif fileNotExists(dataStorePath + strChainID) {\n\t\terr := os.MkdirAll(dataStorePath+strChainID, 0777)\n\t\tif err == nil {\n\t\t\tprocLog.Info(\"Created directory \" + dataStorePath + strChainID)\n\t\t} else {\n\t\t\tprocLog.Error(err)\n\t\t}\n\t}\n\terr = ioutil.WriteFile(fmt.Sprintf(dataStorePath+strChainID+\"\/store.%09d.block\", block.Header.DBHeight), data, 0777)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}\n\nfunc exportABlock(block *common.AdminBlock) {\n\tif block == nil || procLog.Level() < factomlog.Info {\n\t\treturn\n\t}\n\n\tdata, err := block.MarshalBinary()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tstrChainID := block.Header.AdminChainID.String()\n\tif fileNotExists(dataStorePath + strChainID) {\n\t\terr := os.MkdirAll(dataStorePath+strChainID, 0777)\n\t\tif err == nil {\n\t\t\tprocLog.Info(\"Created directory \" + dataStorePath + strChainID)\n\t\t} else {\n\t\t\tprocLog.Error(err)\n\t\t}\n\t}\n\terr = ioutil.WriteFile(fmt.Sprintf(dataStorePath+strChainID+\"\/store.%09d.block\", block.Header.DBHeight), data, 0777)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}\n\nfunc exportFctBlock(block block.IFBlock) {\n\tif block == nil || procLog.Level() < factomlog.Info {\n\t\treturn\n\t}\n\n\tdata, err := block.MarshalBinary()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tstrChainID := block.GetChainID().String()\n\tif fileNotExists(dataStorePath + strChainID) {\n\t\terr := os.MkdirAll(dataStorePath+strChainID, 0777)\n\t\tif err == nil {\n\t\t\tprocLog.Info(\"Created directory \" + dataStorePath + strChainID)\n\t\t} else {\n\t\t\tprocLog.Error(err)\n\t\t}\n\t}\n\terr = ioutil.WriteFile(fmt.Sprintf(dataStorePath+strChainID+\"\/store.%09d.block\", block.GetDBHeight()), data, 0777)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}\n\n\/\/--------------------------------------\n\nfunc getPrePaidChainKey(entryHash *common.Hash, chainIDHash *common.Hash) string {\n\treturn chainIDHash.String() + entryHash.String()\n}\n\nfunc copyCreditMap(\n\toriginalMap map[string]int32,\n\tnewMap map[string]int32) {\n\tnewMap = make(map[string]int32)\n\n\t\/\/ copy every element from the original map\n\tfor k, v := range originalMap {\n\t\tnewMap[k] = v\n\t}\n\n}\n\nfunc printCreditMap() {\n\tprocLog.Debug(\"eCreditMap:\")\n\tfor key := range eCreditMap {\n\t\tprocLog.Debugf(\"Key: %x Value %d\\n\", key, eCreditMap[key])\n\t}\n}\n\nfunc fileNotExists(name string) bool {\n\t_, err := os.Stat(name)\n\tif os.IsNotExist(err) {\n\t\treturn true\n\t}\n\treturn err != nil\n}\n\n\/\/ HaveBlockInDB returns whether or not the chain instance has the block represented\n\/\/ by the passed hash. This includes checking the various places a block can\n\/\/ be like part of the main chain, on a side chain, or in the orphan pool.\n\/\/\n\/\/ This function is NOT safe for concurrent access.\nfunc HaveBlockInDB(hash *common.Hash) (bool, error) {\n\tutil.Trace(spew.Sdump(hash))\n\n\tif hash == nil || dchain.Blocks == nil || len(dchain.Blocks) == 0 {\n\t\treturn false, nil\n\t}\n\n\t\/\/ double check the block ids\n\tfor i := 0; i < len(dchain.Blocks); i = i + 1 {\n\t\tif dchain.Blocks[i] == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif dchain.Blocks[i].DBHash == nil {\n\t\t\tdchain.Blocks[i].DBHash, _ = common.CreateHash(dchain.Blocks[i])\n\t\t}\n\t\tif dchain.Blocks[i].DBHash.IsSameAs(hash) {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}\n<commit_msg>Changed EBock num in export for debugging<commit_after>\/\/ Copyright 2015 FactomProject Authors. All rights reserved.\n\/\/ Use of this source code is governed by the MIT license\n\/\/ that can be found in the LICENSE file.\n\npackage process\n\nimport (\n\t\"fmt\"\n\t\"github.com\/FactomProject\/FactomCode\/common\"\n\t\"github.com\/FactomProject\/FactomCode\/factomlog\"\n\t\"github.com\/FactomProject\/FactomCode\/util\"\n\t\"github.com\/FactomProject\/factoid\/block\"\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"sort\"\n)\n\nfunc GetEntryCreditBalance(pubKey *[32]byte) (int32, error) {\n\n\treturn eCreditMap[string(pubKey[:])], nil\n}\n\nfunc exportDChain(chain *common.DChain) {\n\tif len(chain.Blocks) == 0 || procLog.Level() < factomlog.Info {\n\t\t\/\/log.Println(\"no blocks to save for chain: \" + string (*chain.ChainID))\n\t\treturn\n\t}\n\n\t\/\/ get all ecBlocks from db\n\tdBlocks, _ := db.FetchAllDBlocks()\n\tsort.Sort(util.ByDBlockIDAccending(dBlocks))\n\n\tfor _, block := range dBlocks {\n\n\t\tdata, err := block.MarshalBinary()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tstrChainID := chain.ChainID.String()\n\t\tif fileNotExists(dataStorePath + strChainID) {\n\t\t\terr := os.MkdirAll(dataStorePath+strChainID, 0777)\n\t\t\tif err == nil {\n\t\t\t\tprocLog.Info(\"Created directory \" + dataStorePath + strChainID)\n\t\t\t} else {\n\t\t\t\tprocLog.Error(err)\n\t\t\t}\n\t\t}\n\t\terr = ioutil.WriteFile(fmt.Sprintf(dataStorePath+strChainID+\"\/store.%09d.block\", block.Header.DBHeight), data, 0777)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n\nfunc exportEChain(chain *common.EChain) {\n\tif procLog.Level() < factomlog.Info {\n\t\treturn\n\t}\n\n\teBlocks, _ := db.FetchAllEBlocksByChain(chain.ChainID)\n\tsort.Sort(util.ByEBlockIDAccending(*eBlocks))\n\n\tfor _, block := range *eBlocks {\n\n\t\tdata, err := block.MarshalBinary()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tstrChainID := chain.ChainID.String()\n\t\tif fileNotExists(dataStorePath + strChainID) {\n\t\t\terr := os.MkdirAll(dataStorePath+strChainID, 0777)\n\t\t\tif err == nil {\n\t\t\t\tprocLog.Info(\"Created directory \" + dataStorePath + strChainID)\n\t\t\t} else {\n\t\t\t\tprocLog.Error(err)\n\t\t\t}\n\t\t}\n\n\t\terr = ioutil.WriteFile(fmt.Sprintf(dataStorePath+strChainID+\"\/store.%09d.block\", block.Header.DBHeight), data, 0777)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n\nfunc exportECChain(chain *common.ECChain) {\n\tif procLog.Level() < factomlog.Info {\n\t\treturn\n\t}\n\t\/\/ get all ecBlocks from db\n\tecBlocks, _ := db.FetchAllECBlocks()\n\tsort.Sort(util.ByECBlockIDAccending(ecBlocks))\n\n\tfor _, block := range ecBlocks {\n\t\tdata, err := block.MarshalBinary()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tstrChainID := chain.ChainID.String()\n\t\tif fileNotExists(dataStorePath + strChainID) {\n\t\t\terr := os.MkdirAll(dataStorePath+strChainID, 0777)\n\t\t\tif err == nil {\n\t\t\t\tprocLog.Info(\"Created directory \" + dataStorePath + strChainID)\n\t\t\t} else {\n\t\t\t\tprocLog.Error(err)\n\t\t\t}\n\t\t}\n\t\terr = ioutil.WriteFile(fmt.Sprintf(dataStorePath+strChainID+\"\/store.%09d.block\", block.Header.DBHeight), data, 0777)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n\nfunc exportAChain(chain *common.AdminChain) {\n\tif procLog.Level() < factomlog.Info {\n\t\treturn\n\t}\n\t\/\/ get all aBlocks from db\n\taBlocks, _ := db.FetchAllABlocks()\n\tsort.Sort(util.ByABlockIDAccending(aBlocks))\n\n\tfor _, block := range aBlocks {\n\n\t\tdata, err := block.MarshalBinary()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tstrChainID := chain.ChainID.String()\n\t\tif fileNotExists(dataStorePath + strChainID) {\n\t\t\terr := os.MkdirAll(dataStorePath+strChainID, 0777)\n\t\t\tif err == nil {\n\t\t\t\tprocLog.Info(\"Created directory \" + dataStorePath + strChainID)\n\t\t\t} else {\n\t\t\t\tprocLog.Error(err)\n\t\t\t}\n\t\t}\n\t\terr = ioutil.WriteFile(fmt.Sprintf(dataStorePath+strChainID+\"\/store.%09d.block\", block.Header.DBHeight), data, 0777)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n\nfunc exportFctChain(chain *common.FctChain) {\n\tif procLog.Level() < factomlog.Info {\n\t\treturn\n\t}\n\t\/\/ get all aBlocks from db\n\tFBlocks, _ := db.FetchAllFBlocks()\n\tsort.Sort(util.ByFBlockIDAccending(FBlocks))\n\n\tfor _, block := range FBlocks {\n\n\t\tdata, err := block.MarshalBinary()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tstrChainID := chain.ChainID.String()\n\t\tif fileNotExists(dataStorePath + strChainID) {\n\t\t\terr := os.MkdirAll(dataStorePath+strChainID, 0777)\n\t\t\tif err == nil {\n\t\t\t\tprocLog.Info(\"Created directory \" + dataStorePath + strChainID)\n\t\t\t} else {\n\t\t\t\tprocLog.Error(err)\n\t\t\t}\n\t\t}\n\t\terr = ioutil.WriteFile(fmt.Sprintf(dataStorePath+strChainID+\"\/store.%09d.block\", block.GetDBHeight()), data, 0777)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n\n\/\/ to export individual block once at a time - for debugging ------------------------\nfunc exportDBlock(block *common.DirectoryBlock) {\n\tif block == nil || procLog.Level() < factomlog.Info {\n\t\t\/\/log.Println(\"no blocks to save for chain: \" + string (*chain.ChainID))\n\t\treturn\n\t}\n\n\tdata, err := block.MarshalBinary()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tstrChainID := dchain.ChainID.String()\n\tif fileNotExists(dataStorePath + strChainID) {\n\t\terr := os.MkdirAll(dataStorePath+strChainID, 0777)\n\t\tif err == nil {\n\t\t\tprocLog.Info(\"Created directory \" + dataStorePath + strChainID)\n\t\t} else {\n\t\t\tprocLog.Error(err)\n\t\t}\n\t}\n\terr = ioutil.WriteFile(fmt.Sprintf(dataStorePath+strChainID+\"\/store.%09d.block\", block.Header.DBHeight), data, 0777)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}\n\nfunc exportEBlock(block *common.EBlock) {\n\tif block == nil || procLog.Level() < factomlog.Info {\n\t\treturn\n\t}\n\n\tdata, err := block.MarshalBinary()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tstrChainID := block.Header.ChainID.String()\n\tif fileNotExists(dataStorePath + strChainID) {\n\t\terr := os.MkdirAll(dataStorePath+strChainID, 0777)\n\t\tif err == nil {\n\t\t\tprocLog.Info(\"Created directory \" + dataStorePath + strChainID)\n\t\t} else {\n\t\t\tprocLog.Error(err)\n\t\t}\n\t}\n\n\terr = ioutil.WriteFile(fmt.Sprintf(dataStorePath+strChainID+\"\/store.%09d.block\", block.Header.DBHeight), data, 0777)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}\n\nfunc exportECBlock(block *common.ECBlock) {\n\tif block == nil || procLog.Level() < factomlog.Info {\n\t\treturn\n\t}\n\n\tdata, err := block.MarshalBinary()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tstrChainID := block.Header.ECChainID.String()\n\tif fileNotExists(dataStorePath + strChainID) {\n\t\terr := os.MkdirAll(dataStorePath+strChainID, 0777)\n\t\tif err == nil {\n\t\t\tprocLog.Info(\"Created directory \" + dataStorePath + strChainID)\n\t\t} else {\n\t\t\tprocLog.Error(err)\n\t\t}\n\t}\n\terr = ioutil.WriteFile(fmt.Sprintf(dataStorePath+strChainID+\"\/store.%09d.block\", block.Header.DBHeight), data, 0777)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}\n\nfunc exportABlock(block *common.AdminBlock) {\n\tif block == nil || procLog.Level() < factomlog.Info {\n\t\treturn\n\t}\n\n\tdata, err := block.MarshalBinary()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tstrChainID := block.Header.AdminChainID.String()\n\tif fileNotExists(dataStorePath + strChainID) {\n\t\terr := os.MkdirAll(dataStorePath+strChainID, 0777)\n\t\tif err == nil {\n\t\t\tprocLog.Info(\"Created directory \" + dataStorePath + strChainID)\n\t\t} else {\n\t\t\tprocLog.Error(err)\n\t\t}\n\t}\n\terr = ioutil.WriteFile(fmt.Sprintf(dataStorePath+strChainID+\"\/store.%09d.block\", block.Header.DBHeight), data, 0777)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}\n\nfunc exportFctBlock(block block.IFBlock) {\n\tif block == nil || procLog.Level() < factomlog.Info {\n\t\treturn\n\t}\n\n\tdata, err := block.MarshalBinary()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tstrChainID := block.GetChainID().String()\n\tif fileNotExists(dataStorePath + strChainID) {\n\t\terr := os.MkdirAll(dataStorePath+strChainID, 0777)\n\t\tif err == nil {\n\t\t\tprocLog.Info(\"Created directory \" + dataStorePath + strChainID)\n\t\t} else {\n\t\t\tprocLog.Error(err)\n\t\t}\n\t}\n\terr = ioutil.WriteFile(fmt.Sprintf(dataStorePath+strChainID+\"\/store.%09d.block\", block.GetDBHeight()), data, 0777)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}\n\n\/\/--------------------------------------\n\nfunc getPrePaidChainKey(entryHash *common.Hash, chainIDHash *common.Hash) string {\n\treturn chainIDHash.String() + entryHash.String()\n}\n\nfunc copyCreditMap(\n\toriginalMap map[string]int32,\n\tnewMap map[string]int32) {\n\tnewMap = make(map[string]int32)\n\n\t\/\/ copy every element from the original map\n\tfor k, v := range originalMap {\n\t\tnewMap[k] = v\n\t}\n\n}\n\nfunc printCreditMap() {\n\tprocLog.Debug(\"eCreditMap:\")\n\tfor key := range eCreditMap {\n\t\tprocLog.Debugf(\"Key: %x Value %d\\n\", key, eCreditMap[key])\n\t}\n}\n\nfunc fileNotExists(name string) bool {\n\t_, err := os.Stat(name)\n\tif os.IsNotExist(err) {\n\t\treturn true\n\t}\n\treturn err != nil\n}\n\n\/\/ HaveBlockInDB returns whether or not the chain instance has the block represented\n\/\/ by the passed hash. This includes checking the various places a block can\n\/\/ be like part of the main chain, on a side chain, or in the orphan pool.\n\/\/\n\/\/ This function is NOT safe for concurrent access.\nfunc HaveBlockInDB(hash *common.Hash) (bool, error) {\n\tutil.Trace(spew.Sdump(hash))\n\n\tif hash == nil || dchain.Blocks == nil || len(dchain.Blocks) == 0 {\n\t\treturn false, nil\n\t}\n\n\t\/\/ double check the block ids\n\tfor i := 0; i < len(dchain.Blocks); i = i + 1 {\n\t\tif dchain.Blocks[i] == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif dchain.Blocks[i].DBHash == nil {\n\t\t\tdchain.Blocks[i].DBHash, _ = common.CreateHash(dchain.Blocks[i])\n\t\t}\n\t\tif dchain.Blocks[i].DBHash.IsSameAs(hash) {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package gen\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"go\/format\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"text\/template\"\n\t\"time\"\n)\n\n\/\/ dirMetadata is the location of the x-common repository\n\/\/ on the filesystem.\n\/\/ We're making the assumption that the x-common repository\n\/\/ has been cloned to the same parent directory as the xgo\n\/\/ repository. E.g.\n\/\/\n\/\/ $ tree -L 1 .\n\/\/ .\n\/\/ ├── x-common\n\/\/ └── xgo\nvar dirMetadata string\n\n\/\/ dirExercise is the location that the test cases should be generated to.\n\/\/ This assumes that the generator script lives in the .meta directory within\n\/\/ the exercise directory. Falls back to the present working directory.\nvar dirExercise string\n\n\/\/ genClient creates an http client with a 10 second timeout so we don't get\n\/\/ stuck waiting for a response.\nvar genClient = &http.Client{Timeout: 10 * time.Second}\n\nconst (\n\t\/\/ canonicalDataURL is the URL for the raw canonical-data.json data,\n\t\/\/ requires exercise name.\n\tcanonicalDataURL = \"https:\/\/raw.githubusercontent.com\/exercism\/x-common\/master\/exercises\/%s\/canonical-data.json\"\n\t\/\/ commitsURL is the GitHub api endpoint for the canonical-data.json\n\t\/\/ file commit history, requires exercise name.\n\tcommitsURL = \"https:\/\/api.github.com\/repos\/exercism\/x-common\/commits?path=exercises\/%s\/canonical-data.json\"\n)\n\n\/\/ Header tells how the test data was generated, for display in the header of cases_test.go\ntype Header struct {\n\t\/\/ Ori is a deprecated short name for Origin.\n\t\/\/ TODO: Remove Ori once everything switches to Origin.\n\tOri string\n\tOrigin string\n\tCommit string\n\tVersion string\n}\n\nfunc (h Header) String() string {\n\ts := fmt.Sprintf(\"\/\/ Source: %s\\n\", h.Origin)\n\tif h.Commit != \"\" {\n\t\ts += fmt.Sprintf(\"\/\/ Commit: %s\\n\", h.Commit)\n\t}\n\tif h.Version != \"\" {\n\t\ts += fmt.Sprintf(\"\/\/ x-common version: %s\\n\", h.Version)\n\t}\n\treturn s\n}\n\nfunc init() {\n\tif _, path, _, ok := runtime.Caller(0); ok {\n\t\tdirMetadata = filepath.Join(path, \"..\", \"..\", \"..\", \"x-common\")\n\t}\n\tif _, path, _, ok := runtime.Caller(2); ok {\n\t\tdirExercise = filepath.Join(path, \"..\", \"..\")\n\t}\n\tif dirExercise == \"\" {\n\t\tdirExercise = \".\"\n\t}\n}\n\n\/\/ Gen generates the exercise cases_test.go file from the relevant canonical-data.json\nfunc Gen(exercise string, j interface{}, t *template.Template) error {\n\tif dirMetadata == \"\" {\n\t\treturn errors.New(\"unable to determine current path\")\n\t}\n\tjFile := filepath.Join(\"exercises\", exercise, \"canonical-data.json\")\n\t\/\/ try to find and read the local json source file\n\tlog.Printf(\"[LOCAL] fetching %s test data\\n\", exercise)\n\tjPath, jOrigin, jCommit := getLocal(jFile)\n\tif jPath != \"\" {\n\t\tlog.Printf(\"[LOCAL] source: %s\\n\", jPath)\n\t}\n\tjSrc, err := ioutil.ReadFile(filepath.Join(jPath, jFile))\n\tif err != nil {\n\t\t\/\/ fetch json data remotely if there's no local file\n\t\tlog.Println(\"[LOCAL] No test data found\")\n\t\tlog.Printf(\"[REMOTE] fetching %s test data\\n\", exercise)\n\t\tjSrc, jOrigin, jCommit, err = getRemote(exercise)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ unmarshal the json source to a Go structure\n\tif err = json.Unmarshal(jSrc, j); err != nil {\n\t\t\/\/ This error message is usually enough if the problem is a wrong\n\t\t\/\/ data structure defined here. Sadly it doesn't locate the error well\n\t\t\/\/ in the case of invalid JSON. Use a real validator tool if you can't\n\t\t\/\/ spot the problem right away.\n\t\treturn fmt.Errorf(`unexpected data structure: %v`, err)\n\t}\n\n\t\/\/ These fields are guaranteed to be in every problem\n\tvar commonMetadata struct {\n\t\tVersion string\n\t}\n\tif err := json.Unmarshal(jSrc, &commonMetadata); err != nil {\n\t\treturn fmt.Errorf(`Didn't contain version: %v`, err)\n\t}\n\n\t\/\/ package up a little meta data\n\td := struct {\n\t\tHeader\n\t\tJ interface{}\n\t}{Header{\n\t\tOri: jOrigin,\n\t\tOrigin: jOrigin,\n\t\tCommit: jCommit,\n\t\tVersion: commonMetadata.Version,\n\t}, j}\n\n\t\/\/ render the Go test cases\n\tvar b bytes.Buffer\n\tif err = t.Execute(&b, &d); err != nil {\n\t\treturn err\n\t}\n\t\/\/ clean it up\n\tsrc, err := format.Source(b.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ write output file for the Go test cases.\n\treturn ioutil.WriteFile(filepath.Join(dirExercise, \"cases_test.go\"), src, 0666)\n}\n\nfunc getLocal(jFile string) (jPath, jOrigin, jCommit string) {\n\t\/\/ Ideally draw from a .json which is pulled from the official x-common\n\t\/\/ repository. For development however, accept a file in current directory\n\t\/\/ if there is no .json in source control. Also allow an override in any\n\t\/\/ case by environment variable.\n\tif jPath = os.Getenv(\"EXTEST\"); jPath > \"\" {\n\t\treturn jPath, \"local file\", \"\" \/\/ override\n\t}\n\tc := exec.Command(\"git\", \"log\", \"-1\", \"--oneline\", jFile)\n\tc.Dir = dirMetadata\n\tori, err := c.Output()\n\tif err != nil {\n\t\treturn \"\", \"local file\", \"\" \/\/ no source control\n\t}\n\tif _, err = os.Stat(filepath.Join(c.Dir, jFile)); err != nil {\n\t\treturn \"\", \"local file\", \"\" \/\/ not in source control\n\t}\n\t\/\/ good. return source control dir and commit.\n\treturn c.Dir, \"exercism\/x-common\", string(bytes.TrimSpace(ori))\n}\n\nfunc getRemote(exercise string) (body []byte, jOrigin string, jCommit string, err error) {\n\turl := fmt.Sprintf(canonicalDataURL, exercise)\n\tresp, err := genClient.Get(url)\n\tif err != nil {\n\t\treturn []byte{}, \"\", \"\", err\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn []byte{}, \"\", \"\", fmt.Errorf(\"error fetching remote data: %s\", resp.Status)\n\t}\n\tdefer resp.Body.Close()\n\tbody, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn []byte{}, \"\", \"\", err\n\t}\n\tc, err := getRemoteCommit(exercise)\n\tif err != nil {\n\t\t\/\/ we always expect to have the commit in the cases_test.go\n\t\t\/\/ file, so return the error if we can't fetch it\n\t\treturn []byte{}, \"\", \"\", err\n\t}\n\tlog.Printf(\"[REMOTE] source: %s\\n\", url)\n\treturn body, \"exercism\/x-common\", c, nil\n}\n\nfunc getRemoteCommit(exercise string) (string, error) {\n\ttype Commits struct {\n\t\tSha string\n\t\tCommit struct {\n\t\t\tMessage string\n\t\t}\n\t}\n\tresp, err := genClient.Get(fmt.Sprintf(commitsURL, exercise))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\tvar c []Commits\n\terr = json.NewDecoder(resp.Body).Decode(&c)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn fmt.Sprintf(\"%s %s\", c[0].Sha[0:7], c[0].Commit.Message), nil\n}\n<commit_msg>gen.go: minor formatting fixes<commit_after>package gen\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"go\/format\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"text\/template\"\n\t\"time\"\n)\n\n\/\/ dirMetadata is the location of the x-common repository on the filesystem.\n\/\/ We're making the assumption that the x-common repository has been cloned to\n\/\/ the same parent directory as the xgo repository.\n\/\/ E.g.\n\/\/\n\/\/ $ tree -L 1 .\n\/\/ .\n\/\/ ├── x-common\n\/\/ └── xgo\nvar dirMetadata string\n\n\/\/ dirExercise is the location that the test cases should be generated to.\n\/\/ This assumes that the generator script lives in the .meta directory within\n\/\/ the exercise directory. Falls back to the present working directory.\nvar dirExercise string\n\n\/\/ genClient creates an http client with a 10 second timeout so we don't get\n\/\/ stuck waiting for a response.\nvar genClient = &http.Client{Timeout: 10 * time.Second}\n\nconst (\n\t\/\/ canonicalDataURL is the URL for the raw canonical-data.json data,\n\t\/\/ requires exercise name.\n\tcanonicalDataURL = \"https:\/\/raw.githubusercontent.com\/exercism\/x-common\/master\/exercises\/%s\/canonical-data.json\"\n\t\/\/ commitsURL is the GitHub api endpoint for the canonical-data.json\n\t\/\/ file commit history, requires exercise name.\n\tcommitsURL = \"https:\/\/api.github.com\/repos\/exercism\/x-common\/commits?path=exercises\/%s\/canonical-data.json\"\n)\n\n\/\/ Header tells how the test data was generated, for display in the header of\n\/\/ cases_test.go\ntype Header struct {\n\t\/\/ Ori is a deprecated short name for Origin.\n\t\/\/ TODO: Remove Ori once everything switches to Origin.\n\tOri string\n\tOrigin string\n\tCommit string\n\tVersion string\n}\n\nfunc (h Header) String() string {\n\ts := fmt.Sprintf(\"\/\/ Source: %s\\n\", h.Origin)\n\tif h.Commit != \"\" {\n\t\ts += fmt.Sprintf(\"\/\/ Commit: %s\\n\", h.Commit)\n\t}\n\tif h.Version != \"\" {\n\t\ts += fmt.Sprintf(\"\/\/ x-common version: %s\\n\", h.Version)\n\t}\n\treturn s\n}\n\nfunc init() {\n\tif _, path, _, ok := runtime.Caller(0); ok {\n\t\tdirMetadata = filepath.Join(path, \"..\", \"..\", \"..\", \"x-common\")\n\t}\n\tif _, path, _, ok := runtime.Caller(2); ok {\n\t\tdirExercise = filepath.Join(path, \"..\", \"..\")\n\t}\n\tif dirExercise == \"\" {\n\t\tdirExercise = \".\"\n\t}\n}\n\n\/\/ Gen generates the exercise cases_test.go file from the relevant canonical-data.json\nfunc Gen(exercise string, j interface{}, t *template.Template) error {\n\tif dirMetadata == \"\" {\n\t\treturn errors.New(\"unable to determine current path\")\n\t}\n\tjFile := filepath.Join(\"exercises\", exercise, \"canonical-data.json\")\n\t\/\/ try to find and read the local json source file\n\tlog.Printf(\"[LOCAL] fetching %s test data\\n\", exercise)\n\tjPath, jOrigin, jCommit := getLocal(jFile)\n\tif jPath != \"\" {\n\t\tlog.Printf(\"[LOCAL] source: %s\\n\", jPath)\n\t}\n\tjSrc, err := ioutil.ReadFile(filepath.Join(jPath, jFile))\n\tif err != nil {\n\t\t\/\/ fetch json data remotely if there's no local file\n\t\tlog.Println(\"[LOCAL] No test data found\")\n\t\tlog.Printf(\"[REMOTE] fetching %s test data\\n\", exercise)\n\t\tjSrc, jOrigin, jCommit, err = getRemote(exercise)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ unmarshal the json source to a Go structure\n\tif err = json.Unmarshal(jSrc, j); err != nil {\n\t\t\/\/ This error message is usually enough if the problem is a wrong\n\t\t\/\/ data structure defined here. Sadly it doesn't locate the error well\n\t\t\/\/ in the case of invalid JSON. Use a real validator tool if you can't\n\t\t\/\/ spot the problem right away.\n\t\treturn fmt.Errorf(`unexpected data structure: %v`, err)\n\t}\n\n\t\/\/ These fields are guaranteed to be in every problem\n\tvar commonMetadata struct {\n\t\tVersion string\n\t}\n\tif err := json.Unmarshal(jSrc, &commonMetadata); err != nil {\n\t\treturn fmt.Errorf(`didn't contain version: %v`, err)\n\t}\n\n\t\/\/ package up a little meta data\n\td := struct {\n\t\tHeader\n\t\tJ interface{}\n\t}{Header{\n\t\tOri: jOrigin,\n\t\tOrigin: jOrigin,\n\t\tCommit: jCommit,\n\t\tVersion: commonMetadata.Version,\n\t}, j}\n\n\t\/\/ render the Go test cases\n\tvar b bytes.Buffer\n\tif err := t.Execute(&b, &d); err != nil {\n\t\treturn err\n\t}\n\t\/\/ clean it up\n\tsrc, err := format.Source(b.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ write output file for the Go test cases.\n\treturn ioutil.WriteFile(filepath.Join(dirExercise, \"cases_test.go\"), src, 0666)\n}\n\nfunc getLocal(jFile string) (jPath, jOrigin, jCommit string) {\n\t\/\/ Ideally draw from a .json which is pulled from the official x-common\n\t\/\/ repository. For development however, accept a file in current directory\n\t\/\/ if there is no .json in source control. Also allow an override in any\n\t\/\/ case by environment variable.\n\tif jPath := os.Getenv(\"EXTEST\"); jPath > \"\" {\n\t\treturn jPath, \"local file\", \"\" \/\/ override\n\t}\n\tc := exec.Command(\"git\", \"log\", \"-1\", \"--oneline\", jFile)\n\tc.Dir = dirMetadata\n\torigin, err := c.Output()\n\tif err != nil {\n\t\treturn \"\", \"local file\", \"\" \/\/ no source control\n\t}\n\tif _, err = os.Stat(filepath.Join(c.Dir, jFile)); err != nil {\n\t\treturn \"\", \"local file\", \"\" \/\/ not in source control\n\t}\n\t\/\/ good. return source control dir and commit.\n\treturn c.Dir, \"exercism\/x-common\", string(bytes.TrimSpace(origin))\n}\n\nfunc getRemote(exercise string) (body []byte, jOrigin string, jCommit string, err error) {\n\turl := fmt.Sprintf(canonicalDataURL, exercise)\n\tresp, err := genClient.Get(url)\n\tif err != nil {\n\t\treturn []byte{}, \"\", \"\", err\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn []byte{}, \"\", \"\", fmt.Errorf(\"error fetching remote data: %s\", resp.Status)\n\t}\n\tdefer resp.Body.Close()\n\tbody, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn []byte{}, \"\", \"\", err\n\t}\n\tc, err := getRemoteCommit(exercise)\n\tif err != nil {\n\t\t\/\/ we always expect to have the commit in the cases_test.go\n\t\t\/\/ file, so return the error if we can't fetch it\n\t\treturn []byte{}, \"\", \"\", err\n\t}\n\tlog.Printf(\"[REMOTE] source: %s\\n\", url)\n\treturn body, \"exercism\/x-common\", c, nil\n}\n\nfunc getRemoteCommit(exercise string) (string, error) {\n\ttype Commits struct {\n\t\tSha string\n\t\tCommit struct {\n\t\t\tMessage string\n\t\t}\n\t}\n\tresp, err := genClient.Get(fmt.Sprintf(commitsURL, exercise))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\tvar c []Commits\n\terr = json.NewDecoder(resp.Body).Decode(&c)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn fmt.Sprintf(\"%s %s\", c[0].Sha[0:7], c[0].Commit.Message), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package gen\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"go\/format\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"text\/template\"\n)\n\n\/\/ dirMetadata is the location of the x-common repository\n\/\/ on the filesystem.\n\/\/ We're making the assumption that the x-common repository\n\/\/ has been cloned to the same parent directory as the xgo\n\/\/ repository. E.g.\n\/\/\n\/\/ $ tree -L 1 .\n\/\/ .\n\/\/ ├── x-common\n\/\/ └── xgo\nvar dirMetadata string\n\n\/\/ dirProblem is the location that the test cases should be generated to.\n\/\/ This assumes that the generator script lives in the same directory as\n\/\/ the problem.\n\/\/ Falls back to the present working directory.\nvar dirProblem string\n\nfunc init() {\n\tif _, path, _, ok := runtime.Caller(0); ok {\n\t\tdirMetadata = filepath.Join(path, \"..\", \"..\", \"..\", \"x-common\")\n\t}\n\tif _, path, _, ok := runtime.Caller(2); ok {\n\t\tdirProblem = filepath.Join(path, \"..\")\n\t}\n\tif dirProblem == \"\" {\n\t\tdirProblem = \".\"\n\t}\n}\n\nfunc Gen(exercise string, j interface{}, t *template.Template) error {\n\tif dirMetadata == \"\" {\n\t\treturn errors.New(\"unable to determine current path\")\n\t}\n\tjFile := filepath.Join(\"exercises\", exercise, \"canonical-data.json\")\n\t\/\/ find and read the json source file\n\tjPath, jOri, jCommit := getPath(jFile)\n\tjSrc, err := ioutil.ReadFile(filepath.Join(jPath, jFile))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ unmarshal the json source to a Go structure\n\tif err = json.Unmarshal(jSrc, j); err != nil {\n\t\t\/\/ This error message is usually enough if the problem is a wrong\n\t\t\/\/ data structure defined here. Sadly it doesn't locate the error well\n\t\t\/\/ in the case of invalid JSON. Use a real validator tool if you can't\n\t\t\/\/ spot the problem right away.\n\t\treturn fmt.Errorf(`unexpected data structure: %v`, err)\n\t}\n\n\t\/\/ package up a little meta data\n\td := struct {\n\t\tOri string\n\t\tCommit string\n\t\tJ interface{}\n\t}{jOri, jCommit, j}\n\n\t\/\/ render the Go test cases\n\tvar b bytes.Buffer\n\tif err = t.Execute(&b, &d); err != nil {\n\t\treturn err\n\t}\n\t\/\/ clean it up\n\tsrc, err := format.Source(b.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ write output file for the Go test cases.\n\treturn ioutil.WriteFile(filepath.Join(dirProblem, \"cases_test.go\"), src, 0777)\n}\n\nfunc getPath(jFile string) (jPath, jOri, jCommit string) {\n\t\/\/ Ideally draw from a .json which is pulled from the official x-common\n\t\/\/ repository. For development however, accept a file in current directory\n\t\/\/ if there is no .json in source control. Also allow an override in any\n\t\/\/ case by environment variable.\n\tif jPath = os.Getenv(\"EXTEST\"); jPath > \"\" {\n\t\treturn jPath, \"local file\", \"\" \/\/ override\n\t}\n\tc := exec.Command(\"git\", \"log\", \"-1\", \"--oneline\")\n\tc.Dir = dirMetadata\n\tori, err := c.Output()\n\tif err != nil {\n\t\treturn \"\", \"local file\", \"\" \/\/ no source control\n\t}\n\tif _, err = os.Stat(filepath.Join(c.Dir, jFile)); err != nil {\n\t\treturn \"\", \"local file\", \"\" \/\/ not in source control\n\t}\n\t\/\/ good. return source control dir and commit.\n\treturn c.Dir, \"exercism\/x-common\", string(bytes.TrimSpace(ori))\n}\n<commit_msg>gen.go: add filename to git log<commit_after>package gen\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"go\/format\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"text\/template\"\n)\n\n\/\/ dirMetadata is the location of the x-common repository\n\/\/ on the filesystem.\n\/\/ We're making the assumption that the x-common repository\n\/\/ has been cloned to the same parent directory as the xgo\n\/\/ repository. E.g.\n\/\/\n\/\/ $ tree -L 1 .\n\/\/ .\n\/\/ ├── x-common\n\/\/ └── xgo\nvar dirMetadata string\n\n\/\/ dirProblem is the location that the test cases should be generated to.\n\/\/ This assumes that the generator script lives in the same directory as\n\/\/ the problem.\n\/\/ Falls back to the present working directory.\nvar dirProblem string\n\nfunc init() {\n\tif _, path, _, ok := runtime.Caller(0); ok {\n\t\tdirMetadata = filepath.Join(path, \"..\", \"..\", \"..\", \"x-common\")\n\t}\n\tif _, path, _, ok := runtime.Caller(2); ok {\n\t\tdirProblem = filepath.Join(path, \"..\")\n\t}\n\tif dirProblem == \"\" {\n\t\tdirProblem = \".\"\n\t}\n}\n\nfunc Gen(exercise string, j interface{}, t *template.Template) error {\n\tif dirMetadata == \"\" {\n\t\treturn errors.New(\"unable to determine current path\")\n\t}\n\tjFile := filepath.Join(\"exercises\", exercise, \"canonical-data.json\")\n\t\/\/ find and read the json source file\n\tjPath, jOri, jCommit := getPath(jFile)\n\tjSrc, err := ioutil.ReadFile(filepath.Join(jPath, jFile))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ unmarshal the json source to a Go structure\n\tif err = json.Unmarshal(jSrc, j); err != nil {\n\t\t\/\/ This error message is usually enough if the problem is a wrong\n\t\t\/\/ data structure defined here. Sadly it doesn't locate the error well\n\t\t\/\/ in the case of invalid JSON. Use a real validator tool if you can't\n\t\t\/\/ spot the problem right away.\n\t\treturn fmt.Errorf(`unexpected data structure: %v`, err)\n\t}\n\n\t\/\/ package up a little meta data\n\td := struct {\n\t\tOri string\n\t\tCommit string\n\t\tJ interface{}\n\t}{jOri, jCommit, j}\n\n\t\/\/ render the Go test cases\n\tvar b bytes.Buffer\n\tif err = t.Execute(&b, &d); err != nil {\n\t\treturn err\n\t}\n\t\/\/ clean it up\n\tsrc, err := format.Source(b.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ write output file for the Go test cases.\n\treturn ioutil.WriteFile(filepath.Join(dirProblem, \"cases_test.go\"), src, 0777)\n}\n\nfunc getPath(jFile string) (jPath, jOri, jCommit string) {\n\t\/\/ Ideally draw from a .json which is pulled from the official x-common\n\t\/\/ repository. For development however, accept a file in current directory\n\t\/\/ if there is no .json in source control. Also allow an override in any\n\t\/\/ case by environment variable.\n\tif jPath = os.Getenv(\"EXTEST\"); jPath > \"\" {\n\t\treturn jPath, \"local file\", \"\" \/\/ override\n\t}\n\tc := exec.Command(\"git\", \"log\", \"-1\", \"--oneline\", jFile)\n\tc.Dir = dirMetadata\n\tori, err := c.Output()\n\tif err != nil {\n\t\treturn \"\", \"local file\", \"\" \/\/ no source control\n\t}\n\tif _, err = os.Stat(filepath.Join(c.Dir, jFile)); err != nil {\n\t\treturn \"\", \"local file\", \"\" \/\/ not in source control\n\t}\n\t\/\/ good. return source control dir and commit.\n\treturn c.Dir, \"exercism\/x-common\", string(bytes.TrimSpace(ori))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Code generated by protoc-gen-gogo.\n\/\/ source: envelope.proto\n\/\/ DO NOT EDIT!\n\n\/*\nPackage events is a generated protocol buffer package.\n\nIt is generated from these files:\n\tenvelope.proto\n\terror.proto\n\theartbeat.proto\n\thttp.proto\n\tlog.proto\n\tmetric.proto\n\tuuid.proto\n\nIt has these top-level messages:\n\tEnvelope\n*\/\npackage events\n\nimport proto \"github.com\/gogo\/protobuf\/proto\"\nimport math \"math\"\n\n\/\/ Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = math.Inf\n\n\/\/ \/ Type of the wrapped event.\ntype Envelope_EventType int32\n\nconst (\n\tEnvelope_Heartbeat Envelope_EventType = 1\n\tEnvelope_HttpStart Envelope_EventType = 2\n\tEnvelope_HttpStop Envelope_EventType = 3\n\tEnvelope_HttpStartStop Envelope_EventType = 4\n\tEnvelope_LogMessage Envelope_EventType = 5\n\tEnvelope_ValueMetric Envelope_EventType = 6\n\tEnvelope_CounterEvent Envelope_EventType = 7\n\tEnvelope_Error Envelope_EventType = 8\n\tEnvelope_ContainerMetric Envelope_EventType = 9\n)\n\nvar Envelope_EventType_name = map[int32]string{\n\t1: \"Heartbeat\",\n\t2: \"HttpStart\",\n\t3: \"HttpStop\",\n\t4: \"HttpStartStop\",\n\t5: \"LogMessage\",\n\t6: \"ValueMetric\",\n\t7: \"CounterEvent\",\n\t8: \"Error\",\n\t9: \"ContainerMetric\",\n}\nvar Envelope_EventType_value = map[string]int32{\n\t\"Heartbeat\": 1,\n\t\"HttpStart\": 2,\n\t\"HttpStop\": 3,\n\t\"HttpStartStop\": 4,\n\t\"LogMessage\": 5,\n\t\"ValueMetric\": 6,\n\t\"CounterEvent\": 7,\n\t\"Error\": 8,\n\t\"ContainerMetric\": 9,\n}\n\nfunc (x Envelope_EventType) Enum() *Envelope_EventType {\n\tp := new(Envelope_EventType)\n\t*p = x\n\treturn p\n}\nfunc (x Envelope_EventType) String() string {\n\treturn proto.EnumName(Envelope_EventType_name, int32(x))\n}\nfunc (x *Envelope_EventType) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(Envelope_EventType_value, data, \"Envelope_EventType\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = Envelope_EventType(value)\n\treturn nil\n}\n\n\/\/ \/ Envelope wraps an Event and adds metadata.\ntype Envelope struct {\n\tOrigin *string `protobuf:\"bytes,1,req,name=origin\" json:\"origin,omitempty\"`\n\tEventType *Envelope_EventType `protobuf:\"varint,2,req,name=eventType,enum=events.Envelope_EventType\" json:\"eventType,omitempty\"`\n\tTimestamp *int64 `protobuf:\"varint,6,opt,name=timestamp\" json:\"timestamp,omitempty\"`\n\tHeartbeat *Heartbeat `protobuf:\"bytes,3,opt,name=heartbeat\" json:\"heartbeat,omitempty\"`\n\tHttpStart *HttpStart `protobuf:\"bytes,4,opt,name=httpStart\" json:\"httpStart,omitempty\"`\n\tHttpStop *HttpStop `protobuf:\"bytes,5,opt,name=httpStop\" json:\"httpStop,omitempty\"`\n\tHttpStartStop *HttpStartStop `protobuf:\"bytes,7,opt,name=httpStartStop\" json:\"httpStartStop,omitempty\"`\n\tLogMessage *LogMessage `protobuf:\"bytes,8,opt,name=logMessage\" json:\"logMessage,omitempty\"`\n\tValueMetric *ValueMetric `protobuf:\"bytes,9,opt,name=valueMetric\" json:\"valueMetric,omitempty\"`\n\tCounterEvent *CounterEvent `protobuf:\"bytes,10,opt,name=counterEvent\" json:\"counterEvent,omitempty\"`\n\tError *Error `protobuf:\"bytes,11,opt,name=error\" json:\"error,omitempty\"`\n\tContainerMetric *ContainerMetric `protobuf:\"bytes,12,opt,name=containerMetric\" json:\"containerMetric,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *Envelope) Reset() { *m = Envelope{} }\nfunc (m *Envelope) String() string { return proto.CompactTextString(m) }\nfunc (*Envelope) ProtoMessage() {}\n\nfunc (m *Envelope) GetOrigin() string {\n\tif m != nil && m.Origin != nil {\n\t\treturn *m.Origin\n\t}\n\treturn \"\"\n}\n\nfunc (m *Envelope) GetEventType() Envelope_EventType {\n\tif m != nil && m.EventType != nil {\n\t\treturn *m.EventType\n\t}\n\treturn Envelope_Heartbeat\n}\n\nfunc (m *Envelope) GetTimestamp() int64 {\n\tif m != nil && m.Timestamp != nil {\n\t\treturn *m.Timestamp\n\t}\n\treturn 0\n}\n\nfunc (m *Envelope) GetHeartbeat() *Heartbeat {\n\tif m != nil {\n\t\treturn m.Heartbeat\n\t}\n\treturn nil\n}\n\nfunc (m *Envelope) GetHttpStart() *HttpStart {\n\tif m != nil {\n\t\treturn m.HttpStart\n\t}\n\treturn nil\n}\n\nfunc (m *Envelope) GetHttpStop() *HttpStop {\n\tif m != nil {\n\t\treturn m.HttpStop\n\t}\n\treturn nil\n}\n\nfunc (m *Envelope) GetHttpStartStop() *HttpStartStop {\n\tif m != nil {\n\t\treturn m.HttpStartStop\n\t}\n\treturn nil\n}\n\nfunc (m *Envelope) GetLogMessage() *LogMessage {\n\tif m != nil {\n\t\treturn m.LogMessage\n\t}\n\treturn nil\n}\n\nfunc (m *Envelope) GetValueMetric() *ValueMetric {\n\tif m != nil {\n\t\treturn m.ValueMetric\n\t}\n\treturn nil\n}\n\nfunc (m *Envelope) GetCounterEvent() *CounterEvent {\n\tif m != nil {\n\t\treturn m.CounterEvent\n\t}\n\treturn nil\n}\n\nfunc (m *Envelope) GetError() *Error {\n\tif m != nil {\n\t\treturn m.Error\n\t}\n\treturn nil\n}\n\nfunc (m *Envelope) GetContainerMetric() *ContainerMetric {\n\tif m != nil {\n\t\treturn m.ContainerMetric\n\t}\n\treturn nil\n}\n\nfunc init() {\n\tproto.RegisterEnum(\"events.Envelope_EventType\", Envelope_EventType_name, Envelope_EventType_value)\n}\n<commit_msg>Update to latest version of dropsonde-protocol<commit_after>\/\/ Code generated by protoc-gen-gogo.\n\/\/ source: envelope.proto\n\/\/ DO NOT EDIT!\n\n\/*\nPackage events is a generated protocol buffer package.\n\nIt is generated from these files:\n\tenvelope.proto\n\terror.proto\n\theartbeat.proto\n\thttp.proto\n\tlog.proto\n\tmetric.proto\n\tuuid.proto\n\nIt has these top-level messages:\n\tEnvelope\n\tTag\n*\/\npackage events\n\nimport proto \"github.com\/gogo\/protobuf\/proto\"\nimport math \"math\"\n\n\/\/ Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = math.Inf\n\n\/\/ \/ Type of the wrapped event.\ntype Envelope_EventType int32\n\nconst (\n\tEnvelope_Heartbeat Envelope_EventType = 1\n\tEnvelope_HttpStart Envelope_EventType = 2\n\tEnvelope_HttpStop Envelope_EventType = 3\n\tEnvelope_HttpStartStop Envelope_EventType = 4\n\tEnvelope_LogMessage Envelope_EventType = 5\n\tEnvelope_ValueMetric Envelope_EventType = 6\n\tEnvelope_CounterEvent Envelope_EventType = 7\n\tEnvelope_Error Envelope_EventType = 8\n\tEnvelope_ContainerMetric Envelope_EventType = 9\n)\n\nvar Envelope_EventType_name = map[int32]string{\n\t1: \"Heartbeat\",\n\t2: \"HttpStart\",\n\t3: \"HttpStop\",\n\t4: \"HttpStartStop\",\n\t5: \"LogMessage\",\n\t6: \"ValueMetric\",\n\t7: \"CounterEvent\",\n\t8: \"Error\",\n\t9: \"ContainerMetric\",\n}\nvar Envelope_EventType_value = map[string]int32{\n\t\"Heartbeat\": 1,\n\t\"HttpStart\": 2,\n\t\"HttpStop\": 3,\n\t\"HttpStartStop\": 4,\n\t\"LogMessage\": 5,\n\t\"ValueMetric\": 6,\n\t\"CounterEvent\": 7,\n\t\"Error\": 8,\n\t\"ContainerMetric\": 9,\n}\n\nfunc (x Envelope_EventType) Enum() *Envelope_EventType {\n\tp := new(Envelope_EventType)\n\t*p = x\n\treturn p\n}\nfunc (x Envelope_EventType) String() string {\n\treturn proto.EnumName(Envelope_EventType_name, int32(x))\n}\nfunc (x *Envelope_EventType) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(Envelope_EventType_value, data, \"Envelope_EventType\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = Envelope_EventType(value)\n\treturn nil\n}\n\n\/\/ \/ Envelope wraps an Event and adds metadata.\ntype Envelope struct {\n\tOrigin *string `protobuf:\"bytes,1,req,name=origin\" json:\"origin,omitempty\"`\n\tEventType *Envelope_EventType `protobuf:\"varint,2,req,name=eventType,enum=events.Envelope_EventType\" json:\"eventType,omitempty\"`\n\tTimestamp *int64 `protobuf:\"varint,6,opt,name=timestamp\" json:\"timestamp,omitempty\"`\n\tHeartbeat *Heartbeat `protobuf:\"bytes,3,opt,name=heartbeat\" json:\"heartbeat,omitempty\"`\n\tHttpStart *HttpStart `protobuf:\"bytes,4,opt,name=httpStart\" json:\"httpStart,omitempty\"`\n\tHttpStop *HttpStop `protobuf:\"bytes,5,opt,name=httpStop\" json:\"httpStop,omitempty\"`\n\tHttpStartStop *HttpStartStop `protobuf:\"bytes,7,opt,name=httpStartStop\" json:\"httpStartStop,omitempty\"`\n\tLogMessage *LogMessage `protobuf:\"bytes,8,opt,name=logMessage\" json:\"logMessage,omitempty\"`\n\tValueMetric *ValueMetric `protobuf:\"bytes,9,opt,name=valueMetric\" json:\"valueMetric,omitempty\"`\n\tCounterEvent *CounterEvent `protobuf:\"bytes,10,opt,name=counterEvent\" json:\"counterEvent,omitempty\"`\n\tError *Error `protobuf:\"bytes,11,opt,name=error\" json:\"error,omitempty\"`\n\tContainerMetric *ContainerMetric `protobuf:\"bytes,12,opt,name=containerMetric\" json:\"containerMetric,omitempty\"`\n\tTags []*Tag `protobuf:\"bytes,13,rep,name=tags\" json:\"tags,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *Envelope) Reset() { *m = Envelope{} }\nfunc (m *Envelope) String() string { return proto.CompactTextString(m) }\nfunc (*Envelope) ProtoMessage() {}\n\nfunc (m *Envelope) GetOrigin() string {\n\tif m != nil && m.Origin != nil {\n\t\treturn *m.Origin\n\t}\n\treturn \"\"\n}\n\nfunc (m *Envelope) GetEventType() Envelope_EventType {\n\tif m != nil && m.EventType != nil {\n\t\treturn *m.EventType\n\t}\n\treturn Envelope_Heartbeat\n}\n\nfunc (m *Envelope) GetTimestamp() int64 {\n\tif m != nil && m.Timestamp != nil {\n\t\treturn *m.Timestamp\n\t}\n\treturn 0\n}\n\nfunc (m *Envelope) GetHeartbeat() *Heartbeat {\n\tif m != nil {\n\t\treturn m.Heartbeat\n\t}\n\treturn nil\n}\n\nfunc (m *Envelope) GetHttpStart() *HttpStart {\n\tif m != nil {\n\t\treturn m.HttpStart\n\t}\n\treturn nil\n}\n\nfunc (m *Envelope) GetHttpStop() *HttpStop {\n\tif m != nil {\n\t\treturn m.HttpStop\n\t}\n\treturn nil\n}\n\nfunc (m *Envelope) GetHttpStartStop() *HttpStartStop {\n\tif m != nil {\n\t\treturn m.HttpStartStop\n\t}\n\treturn nil\n}\n\nfunc (m *Envelope) GetLogMessage() *LogMessage {\n\tif m != nil {\n\t\treturn m.LogMessage\n\t}\n\treturn nil\n}\n\nfunc (m *Envelope) GetValueMetric() *ValueMetric {\n\tif m != nil {\n\t\treturn m.ValueMetric\n\t}\n\treturn nil\n}\n\nfunc (m *Envelope) GetCounterEvent() *CounterEvent {\n\tif m != nil {\n\t\treturn m.CounterEvent\n\t}\n\treturn nil\n}\n\nfunc (m *Envelope) GetError() *Error {\n\tif m != nil {\n\t\treturn m.Error\n\t}\n\treturn nil\n}\n\nfunc (m *Envelope) GetContainerMetric() *ContainerMetric {\n\tif m != nil {\n\t\treturn m.ContainerMetric\n\t}\n\treturn nil\n}\n\nfunc (m *Envelope) GetTags() []*Tag {\n\tif m != nil {\n\t\treturn m.Tags\n\t}\n\treturn nil\n}\n\n\/\/ \/ Tag holds a key-value pair\ntype Tag struct {\n\tKey *string `protobuf:\"bytes,1,req,name=key\" json:\"key,omitempty\"`\n\tValue *string `protobuf:\"bytes,2,req,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *Tag) Reset() { *m = Tag{} }\nfunc (m *Tag) String() string { return proto.CompactTextString(m) }\nfunc (*Tag) ProtoMessage() {}\n\nfunc (m *Tag) GetKey() string {\n\tif m != nil && m.Key != nil {\n\t\treturn *m.Key\n\t}\n\treturn \"\"\n}\n\nfunc (m *Tag) GetValue() string {\n\tif m != nil && m.Value != nil {\n\t\treturn *m.Value\n\t}\n\treturn \"\"\n}\n\nfunc init() {\n\tproto.RegisterEnum(\"events.Envelope_EventType\", Envelope_EventType_name, Envelope_EventType_value)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright 2018 Padduck, LLC\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n \thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage api\n\nimport (\n\t\"fmt\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/pufferpanel\/pufferpanel\/v2\"\n\t\"github.com\/pufferpanel\/pufferpanel\/v2\/panel\/models\"\n\t\"github.com\/pufferpanel\/pufferpanel\/v2\/panel\/services\"\n\t\"github.com\/pufferpanel\/pufferpanel\/v2\/panel\/web\/handlers\"\n\t\"github.com\/pufferpanel\/pufferpanel\/v2\/response\"\n\t\"github.com\/pufferpanel\/pufferpanel\/v2\/scope\"\n\tuuid \"github.com\/satori\/go.uuid\"\n\t\"github.com\/spf13\/viper\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc registerNodes(g *gin.RouterGroup) {\n\tg.Handle(\"GET\", \"\", handlers.OAuth2Handler(scope.NodesView, false), getAllNodes)\n\tg.Handle(\"OPTIONS\", \"\", response.CreateOptions(\"GET\"))\n\tg.Handle(\"POST\", \"\", handlers.OAuth2Handler(scope.NodesEdit, false), createNode)\n\n\tg.Handle(\"GET\", \"\/:id\", handlers.OAuth2Handler(scope.NodesView, false), getNode)\n\tg.Handle(\"PUT\", \"\/:id\", handlers.OAuth2Handler(scope.NodesEdit, false), updateNode)\n\tg.Handle(\"DELETE\", \"\/:id\", handlers.OAuth2Handler(scope.NodesEdit, false), deleteNode)\n\tg.Handle(\"OPTIONS\", \"\/:id\", response.CreateOptions(\"PUT\", \"GET\", \"POST\", \"DELETE\"))\n\n\tg.Handle(\"GET\", \"\/:id\/deployment\", handlers.OAuth2Handler(scope.NodesDeploy, false), deployNode)\n\tg.Handle(\"OPTIONS\", \"\/:id\/deployment\", response.CreateOptions(\"GET\"))\n\n\t\/\/g.Handle(\"POST\", \"\/:id\/reset\", handlers.OAuth2(scope.NodesDeploy, false), response.NotImplemented)\n\t\/\/g.Handle(\"OPTIONS\", \"\/:id\/reset\", response.CreateOptions(\"POST\"))\n}\n\n\/\/ @Summary Get nodes\n\/\/ @Description Gets all nodes registered to the panel\n\/\/ @Accept json\n\/\/ @Produce json\n\/\/ @Success 200 {object} models.NodesView \"Nodes\"\n\/\/ @Failure 400 {object} response.Error\n\/\/ @Failure 403 {object} response.Error\n\/\/ @Failure 404 {object} response.Error\n\/\/ @Failure 500 {object} response.Error\n\/\/ @Router \/nodes [get]\nfunc getAllNodes(c *gin.Context) {\n\tvar err error\n\tdb := handlers.GetDatabase(c)\n\tns := &services.Node{DB: db}\n\n\tvar nodes *models.Nodes\n\tif nodes, err = ns.GetAll(); response.HandleError(c, err, http.StatusInternalServerError) {\n\t\treturn\n\t}\n\n\tdata := models.FromNodes(nodes)\n\n\tc.JSON(http.StatusOK, data)\n}\n\n\/\/ @Summary Get node\n\/\/ @Description Gets information about a single node\n\/\/ @Accept json\n\/\/ @Produce json\n\/\/ @Success 200 {object} models.NodeView \"Nodes\"\n\/\/ @Failure 400 {object} response.Error\n\/\/ @Failure 403 {object} response.Error\n\/\/ @Failure 404 {object} response.Error\n\/\/ @Failure 500 {object} response.Error\n\/\/ @Router \/nodes\/{id} [get]\nfunc getNode(c *gin.Context) {\n\tvar err error\n\tdb := handlers.GetDatabase(c)\n\tns := &services.Node{DB: db}\n\n\tid, ok := validateId(c)\n\tif !ok {\n\t\treturn\n\t}\n\n\tnode, err := ns.Get(id)\n\tif response.HandleError(c, err, http.StatusInternalServerError) {\n\t\treturn\n\t}\n\n\tdata := models.FromNode(node)\n\n\tc.JSON(http.StatusOK, data)\n}\n\n\/\/ @Summary Create node\n\/\/ @Description Creates a node\n\/\/ @Accept json\n\/\/ @Produce json\n\/\/ @Success 200 {object} models.NodeView \"Node created\"\n\/\/ @Failure 400 {object} response.Error\n\/\/ @Failure 403 {object} response.Error\n\/\/ @Failure 404 {object} response.Error\n\/\/ @Failure 500 {object} response.Error\n\/\/ @Param id path string true \"Node Identifier\"\n\/\/ @Router \/nodes [post]\nfunc createNode(c *gin.Context) {\n\tvar err error\n\tdb := handlers.GetDatabase(c)\n\tns := &services.Node{DB: db}\n\n\tmodel := &models.NodeView{}\n\tif err = c.BindJSON(model); response.HandleError(c, err, http.StatusBadRequest) {\n\t\treturn\n\t}\n\n\tif err = model.Valid(false); response.HandleError(c, err, http.StatusBadRequest) {\n\t\treturn\n\t}\n\n\tcreate := &models.Node{}\n\tmodel.CopyToModel(create)\n\tcreate.Secret = strings.Replace(uuid.NewV4().String(), \"-\", \"\", -1)\n\tif err = ns.Create(create); response.HandleError(c, err, http.StatusInternalServerError) {\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, create)\n}\n\n\/\/ @Summary Update node\n\/\/ @Description Updates a node with given information\n\/\/ @Accept json\n\/\/ @Produce json\n\/\/ @Success 204 {object} response.Empty\n\/\/ @Failure 400 {object} response.Error\n\/\/ @Failure 403 {object} response.Error\n\/\/ @Failure 404 {object} response.Error\n\/\/ @Failure 500 {object} response.Error\n\/\/ @Param id path string true \"Node Id\"\n\/\/ @Router \/nodes\/{id} [put]\nfunc updateNode(c *gin.Context) {\n\tvar err error\n\tdb := handlers.GetDatabase(c)\n\tns := &services.Node{DB: db}\n\n\tviewModel := &models.NodeView{}\n\tif err = c.BindJSON(viewModel); response.HandleError(c, err, http.StatusBadRequest) {\n\t\treturn\n\t}\n\n\tid, ok := validateId(c)\n\tif !ok {\n\t\treturn\n\t}\n\n\tif err = viewModel.Valid(true); response.HandleError(c, err, http.StatusBadRequest) {\n\t\treturn\n\t}\n\n\tnode, err := ns.Get(id)\n\tif response.HandleError(c, err, http.StatusInternalServerError) {\n\t\treturn\n\t}\n\n\tviewModel.CopyToModel(node)\n\tif err = ns.Update(node); response.HandleError(c, err, http.StatusInternalServerError) {\n\t\treturn\n\t}\n\n\tc.Status(http.StatusNoContent)\n}\n\n\/\/ @Summary Deletes a node\n\/\/ @Description Deletes the node\n\/\/ @Accept json\n\/\/ @Produce json\n\/\/ @Success 204 {object} response.Empty\n\/\/ @Failure 400 {object} response.Error\n\/\/ @Failure 403 {object} response.Error\n\/\/ @Failure 404 {object} response.Error\n\/\/ @Failure 500 {object} response.Error\n\/\/ @Param id path string true \"Node Id\"\n\/\/ @Router \/nodes\/{id} [delete]\nfunc deleteNode(c *gin.Context) {\n\tvar err error\n\tdb := handlers.GetDatabase(c)\n\tns := &services.Node{DB: db}\n\n\tid, ok := validateId(c)\n\tif !ok {\n\t\treturn\n\t}\n\n\tnode, err := ns.Get(id)\n\tif response.HandleError(c, err, http.StatusInternalServerError) {\n\t\treturn\n\t}\n\n\terr = ns.Delete(node.ID)\n\tif response.HandleError(c, err, http.StatusInternalServerError) {\n\t\treturn\n\t}\n\n\tc.Status(http.StatusNoContent)\n}\n\nfunc deployNode(c *gin.Context) {\n\tvar err error\n\tdb := handlers.GetDatabase(c)\n\tns := &services.Node{DB: db}\n\n\tid, ok := validateId(c)\n\tif !ok {\n\t\treturn\n\t}\n\n\tnode, err := ns.Get(id)\n\tif response.HandleError(c, err, http.StatusInternalServerError) {\n\t\treturn\n\t}\n\n\tservices.ValidateTokenLoaded()\n\tfile, err := ioutil.ReadFile(viper.GetString(\"panel.token.public\"))\n\tif response.HandleError(c, err, http.StatusInternalServerError) {\n\t\treturn\n\t}\n\n\tdata := &models.Deployment{\n\t\tClientId: fmt.Sprintf(\".node_%d\", node.ID),\n\t\tClientSecret: node.Secret,\n\t\tPublicKey: string(file),\n\t}\n\n\tc.JSON(http.StatusOK, data)\n}\n\nfunc validateId(c *gin.Context) (uint, bool) {\n\tparam := c.Param(\"id\")\n\n\tid, err := strconv.Atoi(param)\n\n\tif response.HandleError(c, err, http.StatusBadRequest) || id <= 0 {\n\t\tresponse.HandleError(c, pufferpanel.ErrFieldTooSmall(\"id\", 0), http.StatusBadRequest)\n\t\treturn 0, false\n\t}\n\n\treturn uint(id), true\n}\n<commit_msg>Add localhost workaround for displaying SFTP info [skip]<commit_after>\/*\n Copyright 2018 Padduck, LLC\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n \thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage api\n\nimport (\n\t\"fmt\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/pufferpanel\/pufferpanel\/v2\"\n\t\"github.com\/pufferpanel\/pufferpanel\/v2\/panel\/models\"\n\t\"github.com\/pufferpanel\/pufferpanel\/v2\/panel\/services\"\n\t\"github.com\/pufferpanel\/pufferpanel\/v2\/panel\/web\/handlers\"\n\t\"github.com\/pufferpanel\/pufferpanel\/v2\/response\"\n\t\"github.com\/pufferpanel\/pufferpanel\/v2\/scope\"\n\tuuid \"github.com\/satori\/go.uuid\"\n\t\"github.com\/spf13\/viper\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc registerNodes(g *gin.RouterGroup) {\n\tg.Handle(\"GET\", \"\", handlers.OAuth2Handler(scope.NodesView, false), getAllNodes)\n\tg.Handle(\"OPTIONS\", \"\", response.CreateOptions(\"GET\"))\n\tg.Handle(\"POST\", \"\", handlers.OAuth2Handler(scope.NodesEdit, false), createNode)\n\n\tg.Handle(\"GET\", \"\/:id\", handlers.OAuth2Handler(scope.NodesView, false), getNode)\n\tg.Handle(\"PUT\", \"\/:id\", handlers.OAuth2Handler(scope.NodesEdit, false), updateNode)\n\tg.Handle(\"DELETE\", \"\/:id\", handlers.OAuth2Handler(scope.NodesEdit, false), deleteNode)\n\tg.Handle(\"OPTIONS\", \"\/:id\", response.CreateOptions(\"PUT\", \"GET\", \"POST\", \"DELETE\"))\n\n\tg.Handle(\"GET\", \"\/:id\/deployment\", handlers.OAuth2Handler(scope.NodesDeploy, false), deployNode)\n\tg.Handle(\"OPTIONS\", \"\/:id\/deployment\", response.CreateOptions(\"GET\"))\n\n\t\/\/g.Handle(\"POST\", \"\/:id\/reset\", handlers.OAuth2(scope.NodesDeploy, false), response.NotImplemented)\n\t\/\/g.Handle(\"OPTIONS\", \"\/:id\/reset\", response.CreateOptions(\"POST\"))\n}\n\n\/\/ @Summary Get nodes\n\/\/ @Description Gets all nodes registered to the panel\n\/\/ @Accept json\n\/\/ @Produce json\n\/\/ @Success 200 {object} models.NodesView \"Nodes\"\n\/\/ @Failure 400 {object} response.Error\n\/\/ @Failure 403 {object} response.Error\n\/\/ @Failure 404 {object} response.Error\n\/\/ @Failure 500 {object} response.Error\n\/\/ @Router \/nodes [get]\nfunc getAllNodes(c *gin.Context) {\n\tvar err error\n\tdb := handlers.GetDatabase(c)\n\tns := &services.Node{DB: db}\n\n\tvar nodes *models.Nodes\n\tif nodes, err = ns.GetAll(); response.HandleError(c, err, http.StatusInternalServerError) {\n\t\treturn\n\t}\n\n\tdata := models.FromNodes(nodes)\n\n\t\/\/HACK: For our local node, we actually need to override the public IP\n\tfor _, d := range *data {\n\t\tif d.PrivateHost == \"127.0.0.1\" && d.PublicHost == \"127.0.0.1\" {\n\t\t\td.PublicHost = c.Request.Host\n\t\t\tbreak\n\t\t}\n\t}\n\n\tc.JSON(http.StatusOK, data)\n}\n\n\/\/ @Summary Get node\n\/\/ @Description Gets information about a single node\n\/\/ @Accept json\n\/\/ @Produce json\n\/\/ @Success 200 {object} models.NodeView \"Nodes\"\n\/\/ @Failure 400 {object} response.Error\n\/\/ @Failure 403 {object} response.Error\n\/\/ @Failure 404 {object} response.Error\n\/\/ @Failure 500 {object} response.Error\n\/\/ @Router \/nodes\/{id} [get]\nfunc getNode(c *gin.Context) {\n\tvar err error\n\tdb := handlers.GetDatabase(c)\n\tns := &services.Node{DB: db}\n\n\tid, ok := validateId(c)\n\tif !ok {\n\t\treturn\n\t}\n\n\tnode, err := ns.Get(id)\n\tif response.HandleError(c, err, http.StatusInternalServerError) {\n\t\treturn\n\t}\n\n\tdata := models.FromNode(node)\n\n\tc.JSON(http.StatusOK, data)\n}\n\n\/\/ @Summary Create node\n\/\/ @Description Creates a node\n\/\/ @Accept json\n\/\/ @Produce json\n\/\/ @Success 200 {object} models.NodeView \"Node created\"\n\/\/ @Failure 400 {object} response.Error\n\/\/ @Failure 403 {object} response.Error\n\/\/ @Failure 404 {object} response.Error\n\/\/ @Failure 500 {object} response.Error\n\/\/ @Param id path string true \"Node Identifier\"\n\/\/ @Router \/nodes [post]\nfunc createNode(c *gin.Context) {\n\tvar err error\n\tdb := handlers.GetDatabase(c)\n\tns := &services.Node{DB: db}\n\n\tmodel := &models.NodeView{}\n\tif err = c.BindJSON(model); response.HandleError(c, err, http.StatusBadRequest) {\n\t\treturn\n\t}\n\n\tif err = model.Valid(false); response.HandleError(c, err, http.StatusBadRequest) {\n\t\treturn\n\t}\n\n\tcreate := &models.Node{}\n\tmodel.CopyToModel(create)\n\tcreate.Secret = strings.Replace(uuid.NewV4().String(), \"-\", \"\", -1)\n\tif err = ns.Create(create); response.HandleError(c, err, http.StatusInternalServerError) {\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, create)\n}\n\n\/\/ @Summary Update node\n\/\/ @Description Updates a node with given information\n\/\/ @Accept json\n\/\/ @Produce json\n\/\/ @Success 204 {object} response.Empty\n\/\/ @Failure 400 {object} response.Error\n\/\/ @Failure 403 {object} response.Error\n\/\/ @Failure 404 {object} response.Error\n\/\/ @Failure 500 {object} response.Error\n\/\/ @Param id path string true \"Node Id\"\n\/\/ @Router \/nodes\/{id} [put]\nfunc updateNode(c *gin.Context) {\n\tvar err error\n\tdb := handlers.GetDatabase(c)\n\tns := &services.Node{DB: db}\n\n\tviewModel := &models.NodeView{}\n\tif err = c.BindJSON(viewModel); response.HandleError(c, err, http.StatusBadRequest) {\n\t\treturn\n\t}\n\n\tid, ok := validateId(c)\n\tif !ok {\n\t\treturn\n\t}\n\n\tif err = viewModel.Valid(true); response.HandleError(c, err, http.StatusBadRequest) {\n\t\treturn\n\t}\n\n\tnode, err := ns.Get(id)\n\tif response.HandleError(c, err, http.StatusInternalServerError) {\n\t\treturn\n\t}\n\n\tviewModel.CopyToModel(node)\n\tif err = ns.Update(node); response.HandleError(c, err, http.StatusInternalServerError) {\n\t\treturn\n\t}\n\n\tc.Status(http.StatusNoContent)\n}\n\n\/\/ @Summary Deletes a node\n\/\/ @Description Deletes the node\n\/\/ @Accept json\n\/\/ @Produce json\n\/\/ @Success 204 {object} response.Empty\n\/\/ @Failure 400 {object} response.Error\n\/\/ @Failure 403 {object} response.Error\n\/\/ @Failure 404 {object} response.Error\n\/\/ @Failure 500 {object} response.Error\n\/\/ @Param id path string true \"Node Id\"\n\/\/ @Router \/nodes\/{id} [delete]\nfunc deleteNode(c *gin.Context) {\n\tvar err error\n\tdb := handlers.GetDatabase(c)\n\tns := &services.Node{DB: db}\n\n\tid, ok := validateId(c)\n\tif !ok {\n\t\treturn\n\t}\n\n\tnode, err := ns.Get(id)\n\tif response.HandleError(c, err, http.StatusInternalServerError) {\n\t\treturn\n\t}\n\n\terr = ns.Delete(node.ID)\n\tif response.HandleError(c, err, http.StatusInternalServerError) {\n\t\treturn\n\t}\n\n\tc.Status(http.StatusNoContent)\n}\n\nfunc deployNode(c *gin.Context) {\n\tvar err error\n\tdb := handlers.GetDatabase(c)\n\tns := &services.Node{DB: db}\n\n\tid, ok := validateId(c)\n\tif !ok {\n\t\treturn\n\t}\n\n\tnode, err := ns.Get(id)\n\tif response.HandleError(c, err, http.StatusInternalServerError) {\n\t\treturn\n\t}\n\n\tservices.ValidateTokenLoaded()\n\tfile, err := ioutil.ReadFile(viper.GetString(\"panel.token.public\"))\n\tif response.HandleError(c, err, http.StatusInternalServerError) {\n\t\treturn\n\t}\n\n\tdata := &models.Deployment{\n\t\tClientId: fmt.Sprintf(\".node_%d\", node.ID),\n\t\tClientSecret: node.Secret,\n\t\tPublicKey: string(file),\n\t}\n\n\tc.JSON(http.StatusOK, data)\n}\n\nfunc validateId(c *gin.Context) (uint, bool) {\n\tparam := c.Param(\"id\")\n\n\tid, err := strconv.Atoi(param)\n\n\tif response.HandleError(c, err, http.StatusBadRequest) || id <= 0 {\n\t\tresponse.HandleError(c, pufferpanel.ErrFieldTooSmall(\"id\", 0), http.StatusBadRequest)\n\t\treturn 0, false\n\t}\n\n\treturn uint(id), true\n}\n<|endoftext|>"} {"text":"<commit_before>package vfsswift\n\nimport (\n\t\"errors\"\n\n\tmultierror \"github.com\/hashicorp\/go-multierror\"\n\t\"github.com\/ncw\/swift\"\n)\n\n\/\/ maxNbFilesToDelete is the maximal number of files that we will try to delete\n\/\/ in a single call to swift.\nconst maxNbFilesToDelete = 8000\n\n\/\/ maxSimultaneousCalls is the maximal number of simultaneous calls to Swift to\n\/\/ delete files in the same container.\nconst maxSimultaneousCalls = 8\n\nvar errFailFast = errors.New(\"fail fast\")\n\n\/\/ DeleteContainer removes all the files inside the given container, and then\n\/\/ deletes it.\nfunc DeleteContainer(c *swift.Connection, container string) error {\n\t_, _, err := c.Container(container)\n\tif err == swift.ContainerNotFound {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tobjectNames, err := c.ObjectNamesAll(container, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(objectNames) > 0 {\n\t\tif err = deleteContainerFiles(c, container, objectNames); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn c.ContainerDelete(container)\n}\n\nfunc deleteContainerFiles(c *swift.Connection, container string, objectNames []string) error {\n\tnb := 1 + (len(objectNames)-1)\/maxNbFilesToDelete\n\tch := make(chan error)\n\n\t\/\/ Use a system of tokens to limit the number of simultaneous calls to\n\t\/\/ Swift: only a goroutine that has a token can make a call.\n\ttokens := make(chan int, maxSimultaneousCalls)\n\tfor k := 0; k < maxSimultaneousCalls; k++ {\n\t\ttokens <- k\n\t}\n\n\tfor i := 0; i < nb; i++ {\n\t\tbegin := i * maxNbFilesToDelete\n\t\tend := (i + 1) * maxNbFilesToDelete\n\t\tif end > len(objectNames) {\n\t\t\tend = len(objectNames)\n\t\t}\n\t\tobjectToDelete := objectNames[begin:end]\n\t\tgo func() {\n\t\t\tk := <-tokens\n\t\t\t_, err := c.BulkDelete(container, objectToDelete)\n\t\t\tch <- err\n\t\t\ttokens <- k\n\t\t}()\n\t}\n\n\tvar errm error\n\tfor i := 0; i < nb; i++ {\n\t\tif err := <-ch; err != nil {\n\t\t\terrm = multierror.Append(errm, err)\n\t\t}\n\t}\n\t\/\/ Get back the tokens to ensure that each goroutine can finish.\n\tfor k := 0; k < maxSimultaneousCalls; k++ {\n\t\t<-tokens\n\t}\n\treturn errm\n}\n<commit_msg>Improve the Swift container deletion (#2143)<commit_after>package vfsswift\n\nimport (\n\t\"errors\"\n\t\"time\"\n\n\tmultierror \"github.com\/hashicorp\/go-multierror\"\n\t\"github.com\/ncw\/swift\"\n)\n\n\/\/ maxNbFilesToDelete is the maximal number of files that we will try to delete\n\/\/ in a single call to swift.\nconst maxNbFilesToDelete = 8000\n\n\/\/ maxSimultaneousCalls is the maximal number of simultaneous calls to Swift to\n\/\/ delete files in the same container.\nconst maxSimultaneousCalls = 8\n\nvar errFailFast = errors.New(\"fail fast\")\n\n\/\/ DeleteContainer removes all the files inside the given container, and then\n\/\/ deletes it.\nfunc DeleteContainer(c *swift.Connection, container string) error {\n\t_, _, err := c.Container(container)\n\tif err == swift.ContainerNotFound {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tobjectNames, err := c.ObjectNamesAll(container, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(objectNames) > 0 {\n\t\tif err = deleteContainerFiles(c, container, objectNames); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ XXX Swift has told us that all the files have been deleted on the bulk\n\t\/\/ delete, but it only means that they have been deleted on one object\n\t\/\/ server (at least). And, when we try to delete the container, Swift can\n\t\/\/ send an error as some container servers will still have objects\n\t\/\/ registered for this container. We will try several times to delete the\n\t\/\/ container to work-around this limitation.\n\tfor i := 0; i < 5; i++ {\n\t\terr = c.ContainerDelete(container)\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(5 * time.Second)\n\t}\n\treturn err\n}\n\nfunc deleteContainerFiles(c *swift.Connection, container string, objectNames []string) error {\n\tnb := 1 + (len(objectNames)-1)\/maxNbFilesToDelete\n\tch := make(chan error)\n\n\t\/\/ Use a system of tokens to limit the number of simultaneous calls to\n\t\/\/ Swift: only a goroutine that has a token can make a call.\n\ttokens := make(chan int, maxSimultaneousCalls)\n\tfor k := 0; k < maxSimultaneousCalls; k++ {\n\t\ttokens <- k\n\t}\n\n\tfor i := 0; i < nb; i++ {\n\t\tbegin := i * maxNbFilesToDelete\n\t\tend := (i + 1) * maxNbFilesToDelete\n\t\tif end > len(objectNames) {\n\t\t\tend = len(objectNames)\n\t\t}\n\t\tobjectToDelete := objectNames[begin:end]\n\t\tgo func() {\n\t\t\tk := <-tokens\n\t\t\t_, err := c.BulkDelete(container, objectToDelete)\n\t\t\tch <- err\n\t\t\ttokens <- k\n\t\t}()\n\t}\n\n\tvar errm error\n\tfor i := 0; i < nb; i++ {\n\t\tif err := <-ch; err != nil {\n\t\t\terrm = multierror.Append(errm, err)\n\t\t}\n\t}\n\t\/\/ Get back the tokens to ensure that each goroutine can finish.\n\tfor k := 0; k < maxSimultaneousCalls; k++ {\n\t\t<-tokens\n\t}\n\treturn errm\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Only use ca cert if readable<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>first commit<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>Clean after your self, remove container at the end of the run<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>All major bugs fixed. Still needs tweaks<commit_after><|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n)\n\ntype line struct {\n\tip, hostname string\n}\n\nconst filename = \"\/etc\/hosts\"\n\ntype hostlist struct {\n\tlines []string\n}\n\nfunc (hl *hostlist) Read(fn string) error {\n\tb, err := ioutil.ReadFile(fn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thl.Parse(b)\n\treturn nil\n}\n\nfunc (hl *hostlist) Parse(b []byte) {\n\thl.lines = strings.Split(string(b), \"\\n\")\n}\n\nfunc (hl *hostlist) Write(fn string) error {\n\treturn ioutil.WriteFile(fn, hl.Bytes(), 0644)\n}\n\nfunc (hl *hostlist) Bytes() []byte {\n\treturn []byte(strings.Join(hl.lines, \"\\n\"))\n\nfunc (hl *hostlist) Contains(a, b string) (bool, error) {\n\tvar ip, hostname string\n\n\tif net.ParseIP(a) == nil && net.ParseIP(b) == nil {\n\t\treturn false, fmt.Errorf(\"neither %s or %s is a valid IP address\", a, b)\n\t}\n\n\tif net.ParseIP(a) == nil {\n\t\thostname = a\n\t\tip = b\n\t}\n\n\tfor _, line := range hl.lines {\n\t\tif line == fmt.Sprintf(\"%s\\t%s\", ip, hostname) {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}\n\nfunc (hl *hostlist) Add(a, b string) error {\n\tvar ip, hostname string\n\n\tif net.ParseIP(a) == nil && net.ParseIP(b) == nil {\n\t\treturn fmt.Errorf(\"neither %s or %s is a valid IP address\", a, b)\n\t}\n\n\tif net.ParseIP(a) == nil {\n\t\thostname = a\n\t\tip = b\n\t}\n\n\thl.lines = append(hl.lines, fmt.Sprintf(\"%s\\t%s\", ip, hostname))\n\treturn nil\n}\n\nfunc containsPart(haystack, needle string) bool {\n\treturn strings.Contains(haystack, \"\\t\"+needle) || strings.Contains(haystack, needle+\"\\t\")\n}\n\nfunc reverse(a []int) []int {\n\tfor i := len(a)\/2 - 1; i >= 0; i-- {\n\t\topp := len(a) - 1 - i\n\t\ta[i], a[opp] = a[opp], a[i]\n\t}\n\n\treturn a\n}\n\nfunc (hl *hostlist) Remove(thing string) error {\n\tdeletes := []int{}\n\tfor i, line := range hl.lines {\n\t\tif containsPart(line, thing) {\n\t\t\tdeletes = append(deletes, i)\n\t\t}\n\t}\n\n\tfor _, i := range reverse(deletes) {\n\t\thl.lines = append(hl.lines[:i], hl.lines[i+1:]...)\n\t}\n\n\treturn nil\n}\n\nfunc (hl *hostlist) Comment(thing string) error {\n\tfor i, line := range hl.lines {\n\t\tif containsPart(line, thing) {\n\t\t\thl.lines[i] = \"#\" + line\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (hl *hostlist) Uncomment(thing string) error {\n\tfor i, line := range hl.lines {\n\t\tif containsPart(line, thing) {\n\t\t\thl.lines[i] = strings.TrimLeft(line, \"#\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc iAmRoot() bool {\n\tcmd := exec.Command(\"whoami\")\n\tuser, err := cmd.Output()\n\tif err != nil {\n\t\t\/\/ couldn't determine root due to error, run anyway - the user won't be able\n\t\t\/\/ to mod anything without root rights anyway\n\t\treturn true\n\t}\n\n\treturn strings.TrimSpace(string(user)) == \"root\"\n}\n\nfunc init() {\n\n}\n\nfunc main() {\n\tlog.SetFlags(0)\n\n\tif !iAmRoot() {\n\t\tlog.Fatal(\"Please run this program as Root!\")\n\t}\n\n\tif len(os.Args) <= 1 {\n\t\tlog.Fatal(\"Nothing to do, please specify command\")\n\t}\n\n\thosts := hostlist{}\n\thosts.Read(filename)\n\n\tcommand := string(os.Args[1])\n\n\t\/\/ fmt.Println(command)\n\tswitch command {\n\tdefault:\n\t\tlog.Fatalf(\"Unknown command: %s\", command)\n\n\tcase \"list\", \"ls\":\n\t\tfmt.Println(string(hosts.Bytes()))\n\n\tcase \"del\", \"rm\", \"-\":\n\t\tif len(os.Args) != 3 {\n\t\t\tlog.Fatal(\"Please give an IP or hostname to delete\")\n\t\t}\n\t\thosts.Remove(os.Args[2])\n\n\tcase \"ucom\":\n\t\tif len(os.Args) != 3 {\n\t\t\tlog.Fatal(\"Please give an IP or hostname to uncomment\")\n\t\t}\n\t\thosts.Uncomment(os.Args[2])\n\n\tcase \"com\":\n\t\tif len(os.Args) != 3 {\n\t\t\tlog.Fatal(\"Please give an IP or hostname to comment out\")\n\t\t}\n\t\thosts.Comment(os.Args[2])\n\n\tcase \"add\", \"+\":\n\t\tif len(os.Args) != 4 {\n\t\t\tlog.Fatal(\"Please give arguments in the form ip, hostname\")\n\t\t}\n\t\terr := hosts.Add(os.Args[2], os.Args[3])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\tcase \"has\", \"?\", \"contains\":\n\t\tif len(os.Args) != 4 {\n\t\t\tlog.Fatal(\"Please give arguments in the form ip, hostname\")\n\t\t}\n\n\t\tyes, err := hosts.Contains(os.Args[2], os.Args[3])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tif yes {\n\t\t\tos.Exit(0) \/\/ exit code 0 means it was contained within\n\t\t}\n\n\t\tos.Exit(1) \/\/ exit code 1 means not contained within\n\n\t}\n\n\thosts.Write(filename)\n}\n<commit_msg>move util functions to the top<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n)\n\ntype line struct {\n\tip, hostname string\n}\n\nconst filename = \"\/etc\/hosts\"\n\nfunc containsPart(haystack, needle string) bool {\n\treturn strings.Contains(haystack, \"\\t\"+needle) || strings.Contains(haystack, needle+\"\\t\")\n}\n\n\nfunc reverse(a []int) []int {\n\tfor i := len(a)\/2 - 1; i >= 0; i-- {\n\t\topp := len(a) - 1 - i\n\t\ta[i], a[opp] = a[opp], a[i]\n\t}\n\n\treturn a\n}\n\nfunc amIRoot() bool {\n\tcmd := exec.Command(\"whoami\")\n\tuser, err := cmd.Output()\n\tif err != nil {\n\t\t\/\/ couldn't determine root due to error, run anyway - the user won't be able\n\t\t\/\/ to mod anything without root rights anyway\n\t\treturn true\n\t}\n\n\treturn strings.TrimSpace(string(user)) == \"root\"\n}\n\ntype hostlist struct {\n\tlines []string\n}\n\nfunc (hl *hostlist) Read(fn string) error {\n\tb, err := ioutil.ReadFile(fn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thl.Parse(b)\n\treturn nil\n}\n\nfunc (hl *hostlist) Parse(b []byte) {\n\thl.lines = strings.Split(string(b), \"\\n\")\n}\n\nfunc (hl *hostlist) Write(fn string) error {\n\treturn ioutil.WriteFile(fn, hl.Bytes(), 0644)\n}\n\nfunc (hl *hostlist) Bytes() []byte {\n\treturn []byte(strings.Join(hl.lines, \"\\n\"))\n\nfunc (hl *hostlist) Contains(a, b string) (bool, error) {\n\tvar ip, hostname string\n\n\tif net.ParseIP(a) == nil && net.ParseIP(b) == nil {\n\t\treturn false, fmt.Errorf(\"neither %s or %s is a valid IP address\", a, b)\n\t}\n\n\tif net.ParseIP(a) == nil {\n\t\thostname = a\n\t\tip = b\n\t}\n\n\tfor _, line := range hl.lines {\n\t\tif line == fmt.Sprintf(\"%s\\t%s\", ip, hostname) {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\treturn false, nil\n}\n\nfunc (hl *hostlist) Add(a, b string) error {\n\tvar ip, hostname string\n\n\tif net.ParseIP(a) == nil && net.ParseIP(b) == nil {\n\t\treturn fmt.Errorf(\"neither %s or %s is a valid IP address\", a, b)\n\t}\n\n\tif net.ParseIP(a) == nil {\n\t\thostname = a\n\t\tip = b\n\t}\n\n\thl.lines = append(hl.lines, fmt.Sprintf(\"%s\\t%s\", ip, hostname))\n\treturn nil\n}\n\nfunc (hl *hostlist) Remove(thing string) error {\n\tdeletes := []int{}\n\tfor i, line := range hl.lines {\n\t\tif containsPart(line, thing) {\n\t\t\tdeletes = append(deletes, i)\n\t\t}\n\t}\n\n\tfor _, i := range reverse(deletes) {\n\t\thl.lines = append(hl.lines[:i], hl.lines[i+1:]...)\n\t}\n\n\treturn nil\n}\n\nfunc (hl *hostlist) Comment(thing string) error {\n\tfor i, line := range hl.lines {\n\t\tif containsPart(line, thing) {\n\t\t\thl.lines[i] = \"#\" + line\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (hl *hostlist) Uncomment(thing string) error {\n\tfor i, line := range hl.lines {\n\t\tif containsPart(line, thing) {\n\t\t\thl.lines[i] = strings.TrimLeft(line, \"#\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc main() {\n\tlog.SetFlags(0)\n\n\tif !amIRoot() {\n\t\tlog.Fatal(\"Please run this program as Root!\")\n\t}\n\n\tif len(os.Args) <= 1 {\n\t\tlog.Fatal(\"Nothing to do, please specify command\")\n\t}\n\n\thosts := hostlist{}\n\thosts.Read(filename)\n\n\tcommand := string(os.Args[1])\n\n\t\/\/ fmt.Println(command)\n\tswitch command {\n\tdefault:\n\t\tlog.Fatalf(\"Unknown command: %s\", command)\n\n\tcase \"list\", \"ls\":\n\t\tfmt.Println(string(hosts.Bytes()))\n\n\tcase \"del\", \"rm\", \"-\":\n\t\tif len(os.Args) != 3 {\n\t\t\tlog.Fatal(\"Please give an IP or hostname to delete\")\n\t\t}\n\t\thosts.Remove(os.Args[2])\n\n\tcase \"ucom\":\n\t\tif len(os.Args) != 3 {\n\t\t\tlog.Fatal(\"Please give an IP or hostname to uncomment\")\n\t\t}\n\t\thosts.Uncomment(os.Args[2])\n\n\tcase \"com\":\n\t\tif len(os.Args) != 3 {\n\t\t\tlog.Fatal(\"Please give an IP or hostname to comment out\")\n\t\t}\n\t\thosts.Comment(os.Args[2])\n\n\tcase \"add\", \"+\":\n\t\tif len(os.Args) != 4 {\n\t\t\tlog.Fatal(\"Please give arguments in the form ip, hostname\")\n\t\t}\n\t\terr := hosts.Add(os.Args[2], os.Args[3])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\tcase \"has\", \"?\", \"contains\":\n\t\tif len(os.Args) != 4 {\n\t\t\tlog.Fatal(\"Please give arguments in the form ip, hostname\")\n\t\t}\n\n\t\tyes, err := hosts.Contains(os.Args[2], os.Args[3])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tif yes {\n\t\t\tos.Exit(0) \/\/ exit code 0 means it was contained within\n\t\t}\n\n\t\tos.Exit(1) \/\/ exit code 1 means not contained within\n\n\t}\n\n\thosts.Write(filename)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\nvar (\n\tcfgPath = kingpin.Flag(\"config\", `path to JSON configuration, defaults to \"config.json\"`).Short('c').String()\n\toutFile = kingpin.Flag(\"out\", `path to output file, defaults to \"config.go\"`).Short('o').String()\n\tpack = kingpin.Flag(\"package\", `name of go package containing config code`).Short('p').Default(\"main\").String()\n\n\t\/\/ Replace the variables below for testing\n\tfatalf = kingpin.Fatalf\n)\n\nfunc main() {\n\tkingpin.Parse()\n\trun()\n}\n\n\/\/ Make it easy for test to setup configuration values.\nfunc run() {\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tfatalf(\"failed to retrieve current directory: %s\", err)\n\t}\n\tif *cfgPath == \"\" {\n\t\t*cfgPath = path.Join(wd, \"config.json\")\n\t}\n\tif *cfgPath, err = filepath.Abs(*cfgPath); err != nil {\n\t\tfatalf(\"failed to compute absolute file path for config: %s\", err)\n\t}\n\tif *outFile == \"\" {\n\t\t*outFile = path.Join(wd, \"config.go\")\n\t}\n\tif _, err := os.Stat(*cfgPath); os.IsNotExist(err) {\n\t\tfatalf(\"no configuration file at %s\", *cfgPath)\n\t}\n\toutDir := filepath.Dir(*outFile)\n\ts, err := os.Stat(outDir)\n\tif os.IsNotExist(err) {\n\t\tkingpin.FatalIfError(os.MkdirAll(outDir, 0777), \"output directory\")\n\t} else if err != nil || !s.IsDir() {\n\t\tfatalf(\"not a valid output directory: %s\", outDir)\n\t}\n\tinput, err := os.Open(*cfgPath)\n\tif err != nil {\n\t\tfatalf(\"failed to open JSON file: %s\", err)\n\t}\n\tdefer input.Close()\n\tdecoder := json.NewDecoder(input)\n\tdecoder.UseNumber()\n\tvar data interface{}\n\terr = decoder.Decode(&data)\n\tif err != nil {\n\t\tfatalf(\"failed to unmarshal JSON: %s\", err)\n\t}\n\tvar tree Tree\n\ttree.Populate(data)\n\tif tree.Type != Struct {\n\t\tfatalf(\"invalid configuration file content, JSON must define an object\")\n\t}\n\ttree.Normalize()\n\ttree.Name = \"cfg\"\n\tconfigs := make([]*Tree, len(tree.Children))\n\tfor i, child := range tree.Children {\n\t\tconfigs[i] = &Tree{\n\t\t\tName: child.Name + \"Cfg\",\n\t\t\tChildren: child.Children,\n\t\t\tType: child.Type,\n\t\t\tList: child.List,\n\t\t}\n\t}\n\tvars := Variables{\n\t\tCmdLine: fmt.Sprintf(\"$ %s %s\", os.Args[0], strings.Join(os.Args[1:], \" \")),\n\t\tPack: *pack,\n\t\tTree: &tree,\n\t\tConfigs: configs,\n\t}\n\ttmpl, err := template.New(\"gonfig\").Funcs(template.FuncMap{\"NewVar\": NewVar}).Parse(configTemplate)\n\tif err != nil {\n\t\tfatalf(\"failed to compile template: %s\", err)\n\t}\n\toutF, err := os.Create(*outFile)\n\tif err != nil {\n\t\tfatalf(\"failed to create file: %s\", err)\n\t}\n\tdefer outF.Close()\n\terr = tmpl.Execute(outF, &vars)\n\n\t\/\/ Print something\n\tfmt.Println(*outFile)\n}\n\n\/\/ Variables defines the data structure fed to the template to generate the final code.\ntype Variables struct {\n\tCmdLine string \/\/ Command line used to invoke gonfig\n\tPack string \/\/ Name of target package\n\tTree *Tree \/\/ Top level data structure\n\tConfigs []*Tree \/\/ Configuration entries\n}\n\n\/\/ Internal count used to generate unique variable names\nvar varCount int\n\n\/\/ Generate unique Go variable name\nfunc NewVar() string {\n\tvarCount += 1\n\treturn fmt.Sprintf(\"v%d\", varCount)\n}\n\nconst configTemplate string = `\/\/************************************************************************\/\/\n\/\/ Configuration\n\/\/\n\/\/ Generated with:\n\/\/ {{.CmdLine}}\n\/\/\n\/\/ The content of this file is auto-generated, DO NOT MODIFY\n\/\/************************************************************************\/\/\n\npackage {{.Pack}}\n\nimport (\n\t\"encoding\/json\"\n \"os\"\n)\n\nvar ({{range .Tree.Children}}\n\t{{.Name}} {{.TypeName}}{{end}}\n)\n{{range .Configs}}{{if eq .Type.String \"struct\"}}\n{{.FormatRaw}}\n{{end}}{{end}}\n{{.Tree.FormatRaw}}\n\n\/\/ Load reads the JSON at the given path and initializes the package variables with the\n\/\/ corresponding values.\nfunc Load(path string) error {\n input, err := os.Open(path)\n if err != nil {\n return err\n }\n\tdecoder := json.NewDecoder(input)\n var c Cfg\n err = decoder.Decode(&c)\n if err != nil {\n return err\n\t}{{range .Tree.Children}}{{$v := NewVar}}{{if eq .Type.String \"struct\"}}\n\t{{$v}} := {{.Name}}Cfg(c.{{.Name}}){{end}}\n\t{{.Name}} = {{if eq .Type.String \"struct\"}}&{{$v}}{{else}}c.{{.Name}}{{end}}{{end}}\n\n return nil\n}\n`\n\n\/\/ Wrapper around tree.Format that exits the process in case of error.\nfunc (t *Tree) FormatRaw() string {\n\tb, err := t.Format()\n\tif err != nil {\n\t\tfatalf(\"failed to format data structure: %s\", err)\n\t}\n\treturn string(b)\n}\n\n\/\/ Produce type name for tree node\nfunc (t *Tree) TypeName() string {\n\tif t.Type == Struct {\n\t\treturn \"*\" + t.Name.String() + \"Cfg\"\n\t}\n\treturn t.Type.String()\n}\n<commit_msg>Remove extraneous blank lines in generated file.<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\nvar (\n\tcfgPath = kingpin.Flag(\"config\", `path to JSON configuration, defaults to \"config.json\"`).Short('c').String()\n\toutFile = kingpin.Flag(\"out\", `path to output file, defaults to \"config.go\"`).Short('o').String()\n\tpack = kingpin.Flag(\"package\", `name of go package containing config code`).Short('p').Default(\"main\").String()\n\n\t\/\/ Replace the variables below for testing\n\tfatalf = kingpin.Fatalf\n)\n\nfunc main() {\n\tkingpin.Parse()\n\trun()\n}\n\n\/\/ Make it easy for test to setup configuration values.\nfunc run() {\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tfatalf(\"failed to retrieve current directory: %s\", err)\n\t}\n\tif *cfgPath == \"\" {\n\t\t*cfgPath = path.Join(wd, \"config.json\")\n\t}\n\tif *cfgPath, err = filepath.Abs(*cfgPath); err != nil {\n\t\tfatalf(\"failed to compute absolute file path for config: %s\", err)\n\t}\n\tif *outFile == \"\" {\n\t\t*outFile = path.Join(wd, \"config.go\")\n\t}\n\tif _, err := os.Stat(*cfgPath); os.IsNotExist(err) {\n\t\tfatalf(\"no configuration file at %s\", *cfgPath)\n\t}\n\toutDir := filepath.Dir(*outFile)\n\ts, err := os.Stat(outDir)\n\tif os.IsNotExist(err) {\n\t\tkingpin.FatalIfError(os.MkdirAll(outDir, 0777), \"output directory\")\n\t} else if err != nil || !s.IsDir() {\n\t\tfatalf(\"not a valid output directory: %s\", outDir)\n\t}\n\tinput, err := os.Open(*cfgPath)\n\tif err != nil {\n\t\tfatalf(\"failed to open JSON file: %s\", err)\n\t}\n\tdefer input.Close()\n\tdecoder := json.NewDecoder(input)\n\tdecoder.UseNumber()\n\tvar data interface{}\n\terr = decoder.Decode(&data)\n\tif err != nil {\n\t\tfatalf(\"failed to unmarshal JSON: %s\", err)\n\t}\n\tvar tree Tree\n\ttree.Populate(data)\n\tif tree.Type != Struct {\n\t\tfatalf(\"invalid configuration file content, JSON must define an object\")\n\t}\n\ttree.Normalize()\n\ttree.Name = \"cfg\"\n\tconfigs := make([]*Tree, len(tree.Children))\n\tfor i, child := range tree.Children {\n\t\tconfigs[i] = &Tree{\n\t\t\tName: child.Name + \"Cfg\",\n\t\t\tChildren: child.Children,\n\t\t\tType: child.Type,\n\t\t\tList: child.List,\n\t\t}\n\t}\n\tvars := Variables{\n\t\tCmdLine: fmt.Sprintf(\"$ %s %s\", os.Args[0], strings.Join(os.Args[1:], \" \")),\n\t\tPack: *pack,\n\t\tTree: &tree,\n\t\tConfigs: configs,\n\t}\n\ttmpl, err := template.New(\"gonfig\").Funcs(template.FuncMap{\"NewVar\": NewVar}).Parse(configTemplate)\n\tif err != nil {\n\t\tfatalf(\"failed to compile template: %s\", err)\n\t}\n\toutF, err := os.Create(*outFile)\n\tif err != nil {\n\t\tfatalf(\"failed to create file: %s\", err)\n\t}\n\tdefer outF.Close()\n\terr = tmpl.Execute(outF, &vars)\n\n\t\/\/ Print something\n\tfmt.Println(*outFile)\n}\n\n\/\/ Variables defines the data structure fed to the template to generate the final code.\ntype Variables struct {\n\tCmdLine string \/\/ Command line used to invoke gonfig\n\tPack string \/\/ Name of target package\n\tTree *Tree \/\/ Top level data structure\n\tConfigs []*Tree \/\/ Configuration entries\n}\n\n\/\/ Internal count used to generate unique variable names\nvar varCount int\n\n\/\/ Generate unique Go variable name\nfunc NewVar() string {\n\tvarCount += 1\n\treturn fmt.Sprintf(\"v%d\", varCount)\n}\n\nconst configTemplate string = `\/\/************************************************************************\/\/\n\/\/ Configuration\n\/\/\n\/\/ Generated with:\n\/\/ {{.CmdLine}}\n\/\/\n\/\/ The content of this file is auto-generated, DO NOT MODIFY\n\/\/************************************************************************\/\/\n\npackage {{.Pack}}\n\nimport (\n\t\"encoding\/json\"\n \"os\"\n)\n\nvar ({{range .Tree.Children}}\n\t{{.Name}} {{.TypeName}}{{end}}\n)\n{{range .Configs}}{{if eq .Type.String \"struct\"}}\n{{.FormatRaw}}{{end}}{{end}}\n{{.Tree.FormatRaw}}\n\/\/ Load reads the JSON at the given path and initializes the package variables with the\n\/\/ corresponding values.\nfunc Load(path string) error {\n input, err := os.Open(path)\n if err != nil {\n return err\n }\n\tdecoder := json.NewDecoder(input)\n var c Cfg\n err = decoder.Decode(&c)\n if err != nil {\n return err\n\t}{{range .Tree.Children}}{{$v := NewVar}}{{if eq .Type.String \"struct\"}}\n\t{{$v}} := {{.Name}}Cfg(c.{{.Name}}){{end}}\n\t{{.Name}} = {{if eq .Type.String \"struct\"}}&{{$v}}{{else}}c.{{.Name}}{{end}}{{end}}\n\n return nil\n}\n`\n\n\/\/ Wrapper around tree.Format that exits the process in case of error.\nfunc (t *Tree) FormatRaw() string {\n\tb, err := t.Format()\n\tif err != nil {\n\t\tfatalf(\"failed to format data structure: %s\", err)\n\t}\n\treturn string(b)\n}\n\n\/\/ Produce type name for tree node\nfunc (t *Tree) TypeName() string {\n\tif t.Type == Struct {\n\t\treturn \"*\" + t.Name.String() + \"Cfg\"\n\t}\n\treturn t.Type.String()\n}\n<|endoftext|>"} {"text":"<commit_before>package parsers\n\nvar assetListTestCases = []Case{\n\t{\n\t\t\"Simple\",\n\t\t`Hurricane\t1\tCombat Battlecruiser`,\n\t\t&AssetList{\n\t\t\tItems: []AssetItem{{Name: \"Hurricane\", Group: \"Combat Battlecruiser\", Quantity: 1}},\n\t\t\tlines: []int{0},\n\t\t},\n\t\tInput{},\n\t\tfalse, \/\/ This clashes with the simple contract format\n\t}, {\n\t\t\"Typical\",\n\t\t`720mm Gallium Cannon\t1\tProjectile Weapon\tMedium\tHigh\t10 m3\nDamage Control II\t1\tDamage Control\t\tLow\t5 m3\nExperimental 10MN Microwarpdrive I\t1\tPropulsion Module\t\tMedium\t10 m3`,\n\t\t&AssetList{\n\t\t\tItems: []AssetItem{\n\t\t\t\t{Name: \"720mm Gallium Cannon\", Quantity: 1, Group: \"Projectile Weapon\", Category: \"Medium\", Slot: \"High\", Volume: 10},\n\t\t\t\t{Name: \"Damage Control II\", Quantity: 1, Group: \"Damage Control\", Slot: \"Low\", Volume: 5},\n\t\t\t\t{Name: \"Experimental 10MN Microwarpdrive I\", Quantity: 1, Group: \"Propulsion Module\", Size: \"Medium\", Volume: 10},\n\t\t\t},\n\t\t\tlines: []int{0, 1, 2}},\n\t\tInput{},\n\t\ttrue,\n\t}, {\n\t\t\"Full\",\n\t\t`200mm AutoCannon I\t1\tProjectile Weapon\tModule\tSmall\tHigh\t5 m3\t1\n10MN Afterburner II\t1\tPropulsion Module\tModule\tMedium\t5 m3\t5\t2\nWarrior II\t9`,\n\t\t&AssetList{\n\t\t\tItems: []AssetItem{\n\t\t\t\t{Name: \"10MN Afterburner II\", Quantity: 1, Group: \"Propulsion Module\", Category: \"Module\", Size: \"Medium\", MetaLevel: \"5\", TechLevel: \"2\", Volume: 5},\n\t\t\t\t{Name: \"200mm AutoCannon I\", Quantity: 1, Group: \"Projectile Weapon\", Category: \"Module\", Size: \"Small\", Slot: \"High\", MetaLevel: \"1\", Volume: 5},\n\t\t\t\t{Name: \"Warrior II\", Quantity: 9},\n\t\t\t},\n\t\t\tlines: []int{0, 1, 2},\n\t\t},\n\t\tInput{},\n\t\ttrue,\n\t}, {\n\t\t\"With Volumes\",\n\t\t`Sleeper Data Library\t1080\tSleeper Components\t\t\t10.82 m3`,\n\t\t&AssetList{\n\t\t\tItems: []AssetItem{{Name: \"Sleeper Data Library\", Quantity: 1080, Group: \"Sleeper Components\", Volume: 10.82}},\n\t\t\tlines: []int{0},\n\t\t},\n\t\tInput{},\n\t\ttrue,\n\t}, {\n\t\t\"With thousands separators\",\n\t\t`Sleeper Data Library\t1,080\nSleeper Data Library\t1'080\nSleeper Data Library\t1.080`,\n\t\t&AssetList{\n\t\t\tItems: []AssetItem{\n\t\t\t\t{Name: \"Sleeper Data Library\", Quantity: 1080},\n\t\t\t\t{Name: \"Sleeper Data Library\", Quantity: 1080},\n\t\t\t\t{Name: \"Sleeper Data Library\", Quantity: 1080},\n\t\t\t},\n\t\t\tlines: []int{0, 1, 2},\n\t\t},\n\t\tInput{},\n\t\tfalse,\n\t}, {\n\t\t\"With empty quantity\",\n\t\t`Sleeper Data Library\t`,\n\t\t&AssetList{\n\t\t\tItems: []AssetItem{\n\t\t\t\t{Name: \"Sleeper Data Library\", Quantity: 1},\n\t\t\t},\n\t\t\tlines: []int{0},\n\t\t},\n\t\tInput{},\n\t\tfalse,\n\t},\n}\n<commit_msg>Add test for asset parsing improvement<commit_after>package parsers\n\nvar assetListTestCases = []Case{\n\t{\n\t\t\"Simple\",\n\t\t`Hurricane\t1\tCombat Battlecruiser`,\n\t\t&AssetList{\n\t\t\tItems: []AssetItem{{Name: \"Hurricane\", Group: \"Combat Battlecruiser\", Quantity: 1}},\n\t\t\tlines: []int{0},\n\t\t},\n\t\tInput{},\n\t\tfalse, \/\/ This clashes with the simple contract format\n\t}, {\n\t\t\"Typical\",\n\t\t`720mm Gallium Cannon\t1\tProjectile Weapon\tMedium\tHigh\t10 m3\nDamage Control II\t1\tDamage Control\t\tLow\t5 m3\nExperimental 10MN Microwarpdrive I\t1\tPropulsion Module\t\tMedium\t10 m3`,\n\t\t&AssetList{\n\t\t\tItems: []AssetItem{\n\t\t\t\t{Name: \"720mm Gallium Cannon\", Quantity: 1, Group: \"Projectile Weapon\", Category: \"Medium\", Slot: \"High\", Volume: 10},\n\t\t\t\t{Name: \"Damage Control II\", Quantity: 1, Group: \"Damage Control\", Slot: \"Low\", Volume: 5},\n\t\t\t\t{Name: \"Experimental 10MN Microwarpdrive I\", Quantity: 1, Group: \"Propulsion Module\", Size: \"Medium\", Volume: 10},\n\t\t\t},\n\t\t\tlines: []int{0, 1, 2}},\n\t\tInput{},\n\t\ttrue,\n\t}, {\n\t\t\"Full\",\n\t\t`200mm AutoCannon I\t1\tProjectile Weapon\tModule\tSmall\tHigh\t5 m3\t1\n10MN Afterburner II\t1\tPropulsion Module\tModule\tMedium\t5 m3\t5\t2\nWarrior II\t9`,\n\t\t&AssetList{\n\t\t\tItems: []AssetItem{\n\t\t\t\t{Name: \"10MN Afterburner II\", Quantity: 1, Group: \"Propulsion Module\", Category: \"Module\", Size: \"Medium\", MetaLevel: \"5\", TechLevel: \"2\", Volume: 5},\n\t\t\t\t{Name: \"200mm AutoCannon I\", Quantity: 1, Group: \"Projectile Weapon\", Category: \"Module\", Size: \"Small\", Slot: \"High\", MetaLevel: \"1\", Volume: 5},\n\t\t\t\t{Name: \"Warrior II\", Quantity: 9},\n\t\t\t},\n\t\t\tlines: []int{0, 1, 2},\n\t\t},\n\t\tInput{},\n\t\ttrue,\n\t}, {\n\t\t\"With Volumes\",\n\t\t`Sleeper Data Library\t1080\tSleeper Components\t\t\t10.82 m3`,\n\t\t&AssetList{\n\t\t\tItems: []AssetItem{{Name: \"Sleeper Data Library\", Quantity: 1080, Group: \"Sleeper Components\", Volume: 10.82}},\n\t\t\tlines: []int{0},\n\t\t},\n\t\tInput{},\n\t\ttrue,\n\t}, {\n\t\t\"With thousands separators\",\n\t\t`Sleeper Data Library\t1,080\nSleeper Data Library\t1'080\nSleeper Data Library\t1.080`,\n\t\t&AssetList{\n\t\t\tItems: []AssetItem{\n\t\t\t\t{Name: \"Sleeper Data Library\", Quantity: 1080},\n\t\t\t\t{Name: \"Sleeper Data Library\", Quantity: 1080},\n\t\t\t\t{Name: \"Sleeper Data Library\", Quantity: 1080},\n\t\t\t},\n\t\t\tlines: []int{0, 1, 2},\n\t\t},\n\t\tInput{},\n\t\tfalse,\n\t}, {\n\t\t\"With empty quantity\",\n\t\t`Sleeper Data Library\t`,\n\t\t&AssetList{\n\t\t\tItems: []AssetItem{\n\t\t\t\t{Name: \"Sleeper Data Library\", Quantity: 1},\n\t\t\t},\n\t\t\tlines: []int{0},\n\t\t},\n\t\tInput{},\n\t\tfalse,\n\t}, {\n\t\t\"With asterisk\",\n\t\t`Armor Plates*\t477\tGeborgene Materialien*`,\n\t\t&AssetList{\n\t\t\tItems: []AssetItem{\n\t\t\t\t{Name: \"Armor Plates\", Quantity: 477, Group: \"Geborgene Materialien*\"},\n\t\t\t},\n\t\t\tlines: []int{0},\n\t\t},\n\t\tInput{},\n\t\tfalse,\n\t},\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/najeira\/ltsv\"\n\t\"github.com\/olekukonko\/tablewriter\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n\t\"io\"\n\t\"log\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype Profile struct {\n\tUri string\n\tCnt int\n\tMax float64\n\tMin float64\n\tSum float64\n\tAvg float64\n\tMethod string\n\tMaxBody float64\n\tMinBody float64\n\tSumBody float64\n\tAvgBody float64\n}\n\ntype Profiles []Profile\ntype ByMax struct{ Profiles }\ntype ByMin struct{ Profiles }\ntype BySum struct{ Profiles }\ntype ByAvg struct{ Profiles }\ntype ByUri struct{ Profiles }\ntype ByCnt struct{ Profiles }\ntype ByMethod struct{ Profiles }\ntype ByMaxBody struct{ Profiles }\ntype ByMinBody struct{ Profiles }\ntype BySumBody struct{ Profiles }\ntype ByAvgBody struct{ Profiles }\n\nfunc (s Profiles) Len() int { return len(s) }\nfunc (s Profiles) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\n\nfunc (s ByMax) Less(i, j int) bool { return s.Profiles[i].Max < s.Profiles[j].Max }\nfunc (s ByMin) Less(i, j int) bool { return s.Profiles[i].Min < s.Profiles[j].Min }\nfunc (s BySum) Less(i, j int) bool { return s.Profiles[i].Sum < s.Profiles[j].Sum }\nfunc (s ByAvg) Less(i, j int) bool { return s.Profiles[i].Avg < s.Profiles[j].Avg }\nfunc (s ByUri) Less(i, j int) bool { return s.Profiles[i].Uri < s.Profiles[j].Uri }\nfunc (s ByCnt) Less(i, j int) bool { return s.Profiles[i].Cnt < s.Profiles[j].Cnt }\nfunc (s ByMethod) Less(i, j int) bool { return s.Profiles[i].Method < s.Profiles[j].Method }\nfunc (s ByMaxBody) Less(i, j int) bool { return s.Profiles[i].MaxBody < s.Profiles[j].MaxBody }\nfunc (s ByMinBody) Less(i, j int) bool { return s.Profiles[i].MinBody < s.Profiles[j].MinBody }\nfunc (s BySumBody) Less(i, j int) bool { return s.Profiles[i].SumBody < s.Profiles[j].SumBody }\nfunc (s ByAvgBody) Less(i, j int) bool { return s.Profiles[i].AvgBody < s.Profiles[j].AvgBody }\n\nfunc AbsPath(fname string) (f string, err error) {\n\tvar fpath string\n\tmatched, _ := regexp.Match(\"^~\/\", []byte(fname))\n\tif matched {\n\t\tusr, _ := user.Current()\n\t\tfpath = strings.Replace(fname, \"~\", usr.HomeDir, 1)\n\t} else {\n\t\tfpath, err = filepath.Abs(fname)\n\t}\n\n\treturn fpath, err\n}\n\nfunc LoadFile(filename string) (f *os.File, err error) {\n\tfpath, err := AbsPath(filename)\n\tif err != nil {\n\t\treturn f, err\n\t}\n\tf, err = os.Open(fpath)\n\n\treturn f, err\n}\n\nfunc Round(f float64) string {\n\treturn fmt.Sprintf(\"%.3f\", f)\n}\n\nfunc Output(ps Profiles) {\n\tif *tsv {\n\t\tfmt.Printf(\"Count\\tMin\\tMax\\tSum\\tAvg\\tMax(Body)\\tMin(Body)\\tSum(Body)\\tAvg(Body)\\tMethod\\tUri%v\", eol)\n\n\t\tfor _, p := range ps {\n\t\t\tfmt.Printf(\"%v\\t%v\\t%v\\t%v\\t%v\\t%v\\t%v\\t%v\\t%v\\t%v\\t%v%v\",\n\t\t\t\tp.Cnt, Round(p.Min), Round(p.Max), Round(p.Sum), Round(p.Avg),\n\t\t\t\tRound(p.MinBody), Round(p.MaxBody), Round(p.SumBody), Round(p.AvgBody),\n\t\t\t\tp.Method, p.Uri, eol)\n\t\t}\n\t} else {\n\t\ttable := tablewriter.NewWriter(os.Stdout)\n\t\ttable.SetHeader([]string{\"Count\", \"Min\", \"Max\", \"Sum\", \"Avg\",\n\t\t\t\"Max(Body)\", \"Min(Body)\", \"Sum(Body)\", \"Avg(Body)\",\n\t\t\t\"Method\", \"Uri\"})\n\t\tfor _, p := range ps {\n\t\t\tdata := []string{\n\t\t\t\tfmt.Sprint(p.Cnt), Round(p.Min), Round(p.Max), Round(p.Sum), Round(p.Avg),\n\t\t\t\tRound(p.MinBody), Round(p.MaxBody), Round(p.SumBody), Round(p.AvgBody),\n\t\t\t\tp.Method, p.Uri}\n\t\t\ttable.Append(data)\n\t\t}\n\t\ttable.Render()\n\t}\n}\n\nfunc SortByMax(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(ByMax{ps}))\n\t} else {\n\t\tsort.Sort(ByMax{ps})\n\t}\n\tOutput(ps)\n}\n\nfunc SortByMin(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(ByMin{ps}))\n\t} else {\n\t\tsort.Sort(ByMin{ps})\n\t}\n\tOutput(ps)\n}\n\nfunc SortByAvg(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(ByAvg{ps}))\n\t} else {\n\t\tsort.Sort(ByAvg{ps})\n\t}\n\tOutput(ps)\n}\n\nfunc SortBySum(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(BySum{ps}))\n\t} else {\n\t\tsort.Sort(BySum{ps})\n\t}\n\tOutput(ps)\n}\n\nfunc SortByCnt(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(ByCnt{ps}))\n\t} else {\n\t\tsort.Sort(ByCnt{ps})\n\t}\n\tOutput(ps)\n}\n\nfunc SortByUri(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(ByUri{ps}))\n\t} else {\n\t\tsort.Sort(ByUri{ps})\n\t}\n\tOutput(ps)\n}\n\nfunc SortByMethod(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(ByMethod{ps}))\n\t} else {\n\t\tsort.Sort(ByMethod{ps})\n\t}\n\tOutput(ps)\n}\n\nfunc SortByMaxBody(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(ByMaxBody{ps}))\n\t} else {\n\t\tsort.Sort(ByMaxBody{ps})\n\t}\n\tOutput(ps)\n}\n\nfunc SortByMinBody(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(ByMinBody{ps}))\n\t} else {\n\t\tsort.Sort(ByMinBody{ps})\n\t}\n\tOutput(ps)\n}\n\nfunc SortByAvgBody(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(ByAvgBody{ps}))\n\t} else {\n\t\tsort.Sort(ByAvgBody{ps})\n\t}\n\tOutput(ps)\n}\n\nfunc SortBySumBody(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(BySumBody{ps}))\n\t} else {\n\t\tsort.Sort(BySumBody{ps})\n\t}\n\tOutput(ps)\n}\n\nvar (\n\tfile = kingpin.Flag(\"file\", \"access log file\").Short('f').String()\n\tmax = kingpin.Flag(\"max\", \"sort by max response time\").Bool()\n\tmin = kingpin.Flag(\"min\", \"sort by min response time\").Bool()\n\tavg = kingpin.Flag(\"avg\", \"sort by avg response time\").Bool()\n\tsum = kingpin.Flag(\"sum\", \"sort by sum response time\").Bool()\n\tcnt = kingpin.Flag(\"cnt\", \"sort by count\").Bool()\n\tsortUri = kingpin.Flag(\"uri\", \"sort by uri\").Bool()\n\tmethod = kingpin.Flag(\"method\", \"sort by method\").Bool()\n\tmaxBody = kingpin.Flag(\"max-body\", \"sort by max body size\").Bool()\n\tminBody = kingpin.Flag(\"min-body\", \"sort by min body size\").Bool()\n\tavgBody = kingpin.Flag(\"avg-body\", \"sort by avg body size\").Bool()\n\tsumBody = kingpin.Flag(\"sum-body\", \"sort by sum body size\").Bool()\n\treverse = kingpin.Flag(\"reverse\", \"reverse the result of comparisons\").Short('r').Bool()\n\tqueryString = kingpin.Flag(\"query-string\", \"include query string\").Short('q').Bool()\n\ttsv = kingpin.Flag(\"tsv\", \"tsv format (default: table)\").Bool()\n\tapptimeLabel = kingpin.Flag(\"apptime-label\", \"apptime label\").Default(\"apptime\").String()\n\tsizeLabel = kingpin.Flag(\"size-label\", \"size label\").Default(\"size\").String()\n\tmethodLabel = kingpin.Flag(\"method-label\", \"method label\").Default(\"method\").String()\n\turiLabel = kingpin.Flag(\"uri-label\", \"uri label\").Default(\"uri\").String()\n\tlimit = kingpin.Flag(\"limit\", \"set an upper limit of the target uri\").Default(\"5000\").Int()\n\tinclude = kingpin.Flag(\"include\", \"don't exclude uri matching PATTERN\").PlaceHolder(\"PATTERN\").String()\n\texclude = kingpin.Flag(\"exclude\", \"exclude uri matching PATTERN\").PlaceHolder(\"PATTERN\").String()\n\n\teol = \"\\n\"\n)\n\nfunc main() {\n\tkingpin.Version(\"0.0.4\")\n\tkingpin.Parse()\n\n\tvar f *os.File\n\tvar err error\n\n\tif *file != \"\" {\n\t\tf, err = LoadFile(*file)\n\t\tdefer f.Close()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t} else {\n\t\tf = os.Stdin\n\t}\n\n\tif runtime.GOOS == \"windows\" {\n\t\teol = \"\\r\\n\"\n\t}\n\n\tvar sortKey string\n\n\tif *max {\n\t\tsortKey = \"max\"\n\t} else if *min {\n\t\tsortKey = \"min\"\n\t} else if *avg {\n\t\tsortKey = \"avg\"\n\t} else if *sum {\n\t\tsortKey = \"sum\"\n\t} else if *cnt {\n\t\tsortKey = \"cnt\"\n\t} else if *sortUri {\n\t\tsortKey = \"uri\"\n\t} else if *method {\n\t\tsortKey = \"method\"\n\t} else if *maxBody {\n\t\tsortKey = \"maxBody\"\n\t} else if *minBody {\n\t\tsortKey = \"minBody\"\n\t} else if *avgBody {\n\t\tsortKey = \"avgBody\"\n\t} else if *sumBody {\n\t\tsortKey = \"sumBody\"\n\t} else {\n\t\tsortKey = \"max\"\n\t}\n\n\tvar uri string\n\tvar index string\n\tvar accessLog Profiles\n\turiHints := make(map[string]int)\n\tlength := 0\n\tcursor := 0\n\n\tr := ltsv.NewReader(f)\n\tfor {\n\t\tline, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tresTime, err := strconv.ParseFloat(line[*apptimeLabel], 64)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tbodySize, err := strconv.ParseFloat(line[*sizeLabel], 64)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tu, err := url.Parse(line[*uriLabel])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif *queryString {\n\t\t\tv := url.Values{}\n\t\t\tvalues := u.Query()\n\t\t\tfor q, _ := range values {\n\t\t\t\tv.Set(q, \"xxx\")\n\t\t\t}\n\t\t\turi = fmt.Sprintf(\"%s?%s\", u.Path, v.Encode())\n\t\t\tindex = fmt.Sprintf(\"%s_%s?%s\", line[*methodLabel], u.Path, v.Encode())\n\t\t} else {\n\t\t\turi = u.Path\n\t\t\tindex = fmt.Sprintf(\"%s_%s\", line[*methodLabel], u.Path)\n\t\t}\n\n\t\tif *include != \"\" {\n\t\t\tif ok, err := regexp.Match(*include, []byte(uri)); !ok && err == nil {\n\t\t\t\tcontinue\n\t\t\t} else if err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\n\t\tif *exclude != \"\" {\n\t\t\tif ok, err := regexp.Match(*exclude, []byte(uri)); ok && err == nil {\n\t\t\t\tcontinue\n\t\t\t} else if err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\n\t\tif _, ok := uriHints[index]; ok {\n\t\t\tcursor = uriHints[index]\n\t\t} else {\n\t\t\turiHints[index] = length\n\t\t\tcursor = length\n\t\t\tlength++\n\t\t\taccessLog = append(accessLog, Profile{Uri: uri})\n\t\t}\n\n\t\tif len(uriHints) > *limit {\n\t\t\tlog.Fatal(fmt.Sprintf(\"Too many uri (%d or less)\", *limit))\n\t\t}\n\n\t\tif accessLog[cursor].Max < resTime {\n\t\t\taccessLog[cursor].Max = resTime\n\t\t}\n\n\t\tif accessLog[cursor].Min >= resTime || accessLog[cursor].Min == 0 {\n\t\t\taccessLog[cursor].Min = resTime\n\t\t}\n\n\t\taccessLog[cursor].Cnt++\n\t\taccessLog[cursor].Sum += resTime\n\t\taccessLog[cursor].Method = line[*methodLabel]\n\n\t\tif accessLog[cursor].MaxBody < bodySize {\n\t\t\taccessLog[cursor].MaxBody = bodySize\n\t\t}\n\n\t\tif accessLog[cursor].MinBody >= bodySize || accessLog[cursor].MinBody == 0 {\n\t\t\taccessLog[cursor].MinBody = bodySize\n\t\t}\n\n\t\taccessLog[cursor].SumBody += bodySize\n\t}\n\n\tfor i, _ := range accessLog {\n\t\taccessLog[i].Avg = accessLog[i].Sum \/ float64(accessLog[i].Cnt)\n\t\taccessLog[i].AvgBody = accessLog[i].SumBody \/ float64(accessLog[i].Cnt)\n\t}\n\n\tswitch sortKey {\n\tcase \"max\":\n\t\tSortByMax(accessLog, *reverse)\n\tcase \"min\":\n\t\tSortByMin(accessLog, *reverse)\n\tcase \"avg\":\n\t\tSortByAvg(accessLog, *reverse)\n\tcase \"sum\":\n\t\tSortBySum(accessLog, *reverse)\n\tcase \"cnt\":\n\t\tSortByCnt(accessLog, *reverse)\n\tcase \"uri\":\n\t\tSortByUri(accessLog, *reverse)\n\tcase \"method\":\n\t\tSortByMethod(accessLog, *reverse)\n\tcase \"maxBody\":\n\t\tSortByMaxBody(accessLog, *reverse)\n\tcase \"minBody\":\n\t\tSortByMinBody(accessLog, *reverse)\n\tcase \"avgBody\":\n\t\tSortByAvgBody(accessLog, *reverse)\n\tcase \"sumBody\":\n\t\tSortBySumBody(accessLog, *reverse)\n\t}\n}\n<commit_msg>Added --noheaders<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/najeira\/ltsv\"\n\t\"github.com\/olekukonko\/tablewriter\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n\t\"io\"\n\t\"log\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype Profile struct {\n\tUri string\n\tCnt int\n\tMax float64\n\tMin float64\n\tSum float64\n\tAvg float64\n\tMethod string\n\tMaxBody float64\n\tMinBody float64\n\tSumBody float64\n\tAvgBody float64\n}\n\ntype Profiles []Profile\ntype ByMax struct{ Profiles }\ntype ByMin struct{ Profiles }\ntype BySum struct{ Profiles }\ntype ByAvg struct{ Profiles }\ntype ByUri struct{ Profiles }\ntype ByCnt struct{ Profiles }\ntype ByMethod struct{ Profiles }\ntype ByMaxBody struct{ Profiles }\ntype ByMinBody struct{ Profiles }\ntype BySumBody struct{ Profiles }\ntype ByAvgBody struct{ Profiles }\n\nfunc (s Profiles) Len() int { return len(s) }\nfunc (s Profiles) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\n\nfunc (s ByMax) Less(i, j int) bool { return s.Profiles[i].Max < s.Profiles[j].Max }\nfunc (s ByMin) Less(i, j int) bool { return s.Profiles[i].Min < s.Profiles[j].Min }\nfunc (s BySum) Less(i, j int) bool { return s.Profiles[i].Sum < s.Profiles[j].Sum }\nfunc (s ByAvg) Less(i, j int) bool { return s.Profiles[i].Avg < s.Profiles[j].Avg }\nfunc (s ByUri) Less(i, j int) bool { return s.Profiles[i].Uri < s.Profiles[j].Uri }\nfunc (s ByCnt) Less(i, j int) bool { return s.Profiles[i].Cnt < s.Profiles[j].Cnt }\nfunc (s ByMethod) Less(i, j int) bool { return s.Profiles[i].Method < s.Profiles[j].Method }\nfunc (s ByMaxBody) Less(i, j int) bool { return s.Profiles[i].MaxBody < s.Profiles[j].MaxBody }\nfunc (s ByMinBody) Less(i, j int) bool { return s.Profiles[i].MinBody < s.Profiles[j].MinBody }\nfunc (s BySumBody) Less(i, j int) bool { return s.Profiles[i].SumBody < s.Profiles[j].SumBody }\nfunc (s ByAvgBody) Less(i, j int) bool { return s.Profiles[i].AvgBody < s.Profiles[j].AvgBody }\n\nfunc AbsPath(fname string) (f string, err error) {\n\tvar fpath string\n\tmatched, _ := regexp.Match(\"^~\/\", []byte(fname))\n\tif matched {\n\t\tusr, _ := user.Current()\n\t\tfpath = strings.Replace(fname, \"~\", usr.HomeDir, 1)\n\t} else {\n\t\tfpath, err = filepath.Abs(fname)\n\t}\n\n\treturn fpath, err\n}\n\nfunc LoadFile(filename string) (f *os.File, err error) {\n\tfpath, err := AbsPath(filename)\n\tif err != nil {\n\t\treturn f, err\n\t}\n\tf, err = os.Open(fpath)\n\n\treturn f, err\n}\n\nfunc Round(f float64) string {\n\treturn fmt.Sprintf(\"%.3f\", f)\n}\n\nfunc Output(ps Profiles) {\n\tif *tsv {\n\t\tif !*noHeaders {\n\t\t\tfmt.Printf(\"Count\\tMin\\tMax\\tSum\\tAvg\\tMax(Body)\\tMin(Body)\\tSum(Body)\\tAvg(Body)\\tMethod\\tUri%v\", eol)\n\t\t}\n\n\t\tfor _, p := range ps {\n\t\t\tfmt.Printf(\"%v\\t%v\\t%v\\t%v\\t%v\\t%v\\t%v\\t%v\\t%v\\t%v\\t%v%v\",\n\t\t\t\tp.Cnt, Round(p.Min), Round(p.Max), Round(p.Sum), Round(p.Avg),\n\t\t\t\tRound(p.MinBody), Round(p.MaxBody), Round(p.SumBody), Round(p.AvgBody),\n\t\t\t\tp.Method, p.Uri, eol)\n\t\t}\n\t} else {\n\t\ttable := tablewriter.NewWriter(os.Stdout)\n\t\ttable.SetHeader([]string{\"Count\", \"Min\", \"Max\", \"Sum\", \"Avg\",\n\t\t\t\"Max(Body)\", \"Min(Body)\", \"Sum(Body)\", \"Avg(Body)\",\n\t\t\t\"Method\", \"Uri\"})\n\t\tfor _, p := range ps {\n\t\t\tdata := []string{\n\t\t\t\tfmt.Sprint(p.Cnt), Round(p.Min), Round(p.Max), Round(p.Sum), Round(p.Avg),\n\t\t\t\tRound(p.MinBody), Round(p.MaxBody), Round(p.SumBody), Round(p.AvgBody),\n\t\t\t\tp.Method, p.Uri}\n\t\t\ttable.Append(data)\n\t\t}\n\t\ttable.Render()\n\t}\n}\n\nfunc SortByMax(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(ByMax{ps}))\n\t} else {\n\t\tsort.Sort(ByMax{ps})\n\t}\n\tOutput(ps)\n}\n\nfunc SortByMin(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(ByMin{ps}))\n\t} else {\n\t\tsort.Sort(ByMin{ps})\n\t}\n\tOutput(ps)\n}\n\nfunc SortByAvg(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(ByAvg{ps}))\n\t} else {\n\t\tsort.Sort(ByAvg{ps})\n\t}\n\tOutput(ps)\n}\n\nfunc SortBySum(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(BySum{ps}))\n\t} else {\n\t\tsort.Sort(BySum{ps})\n\t}\n\tOutput(ps)\n}\n\nfunc SortByCnt(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(ByCnt{ps}))\n\t} else {\n\t\tsort.Sort(ByCnt{ps})\n\t}\n\tOutput(ps)\n}\n\nfunc SortByUri(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(ByUri{ps}))\n\t} else {\n\t\tsort.Sort(ByUri{ps})\n\t}\n\tOutput(ps)\n}\n\nfunc SortByMethod(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(ByMethod{ps}))\n\t} else {\n\t\tsort.Sort(ByMethod{ps})\n\t}\n\tOutput(ps)\n}\n\nfunc SortByMaxBody(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(ByMaxBody{ps}))\n\t} else {\n\t\tsort.Sort(ByMaxBody{ps})\n\t}\n\tOutput(ps)\n}\n\nfunc SortByMinBody(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(ByMinBody{ps}))\n\t} else {\n\t\tsort.Sort(ByMinBody{ps})\n\t}\n\tOutput(ps)\n}\n\nfunc SortByAvgBody(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(ByAvgBody{ps}))\n\t} else {\n\t\tsort.Sort(ByAvgBody{ps})\n\t}\n\tOutput(ps)\n}\n\nfunc SortBySumBody(ps Profiles, reverse bool) {\n\tif reverse {\n\t\tsort.Sort(sort.Reverse(BySumBody{ps}))\n\t} else {\n\t\tsort.Sort(BySumBody{ps})\n\t}\n\tOutput(ps)\n}\n\nvar (\n\tfile = kingpin.Flag(\"file\", \"access log file\").Short('f').String()\n\tmax = kingpin.Flag(\"max\", \"sort by max response time\").Bool()\n\tmin = kingpin.Flag(\"min\", \"sort by min response time\").Bool()\n\tavg = kingpin.Flag(\"avg\", \"sort by avg response time\").Bool()\n\tsum = kingpin.Flag(\"sum\", \"sort by sum response time\").Bool()\n\tcnt = kingpin.Flag(\"cnt\", \"sort by count\").Bool()\n\tsortUri = kingpin.Flag(\"uri\", \"sort by uri\").Bool()\n\tmethod = kingpin.Flag(\"method\", \"sort by method\").Bool()\n\tmaxBody = kingpin.Flag(\"max-body\", \"sort by max body size\").Bool()\n\tminBody = kingpin.Flag(\"min-body\", \"sort by min body size\").Bool()\n\tavgBody = kingpin.Flag(\"avg-body\", \"sort by avg body size\").Bool()\n\tsumBody = kingpin.Flag(\"sum-body\", \"sort by sum body size\").Bool()\n\treverse = kingpin.Flag(\"reverse\", \"reverse the result of comparisons\").Short('r').Bool()\n\tqueryString = kingpin.Flag(\"query-string\", \"include query string\").Short('q').Bool()\n\ttsv = kingpin.Flag(\"tsv\", \"tsv format (default: table)\").Bool()\n\tapptimeLabel = kingpin.Flag(\"apptime-label\", \"apptime label\").Default(\"apptime\").String()\n\tsizeLabel = kingpin.Flag(\"size-label\", \"size label\").Default(\"size\").String()\n\tmethodLabel = kingpin.Flag(\"method-label\", \"method label\").Default(\"method\").String()\n\turiLabel = kingpin.Flag(\"uri-label\", \"uri label\").Default(\"uri\").String()\n\tlimit = kingpin.Flag(\"limit\", \"set an upper limit of the target uri\").Default(\"5000\").Int()\n\tinclude = kingpin.Flag(\"include\", \"don't exclude uri matching PATTERN\").PlaceHolder(\"PATTERN\").String()\n\texclude = kingpin.Flag(\"exclude\", \"exclude uri matching PATTERN\").PlaceHolder(\"PATTERN\").String()\n\tnoHeaders = kingpin.Flag(\"noheaders\", \"print no header line at all (only --tsv)\").Bool()\n\n\teol = \"\\n\"\n)\n\nfunc main() {\n\tkingpin.Version(\"0.0.4\")\n\tkingpin.Parse()\n\n\tvar f *os.File\n\tvar err error\n\n\tif *file != \"\" {\n\t\tf, err = LoadFile(*file)\n\t\tdefer f.Close()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t} else {\n\t\tf = os.Stdin\n\t}\n\n\tif runtime.GOOS == \"windows\" {\n\t\teol = \"\\r\\n\"\n\t}\n\n\tvar sortKey string\n\n\tif *max {\n\t\tsortKey = \"max\"\n\t} else if *min {\n\t\tsortKey = \"min\"\n\t} else if *avg {\n\t\tsortKey = \"avg\"\n\t} else if *sum {\n\t\tsortKey = \"sum\"\n\t} else if *cnt {\n\t\tsortKey = \"cnt\"\n\t} else if *sortUri {\n\t\tsortKey = \"uri\"\n\t} else if *method {\n\t\tsortKey = \"method\"\n\t} else if *maxBody {\n\t\tsortKey = \"maxBody\"\n\t} else if *minBody {\n\t\tsortKey = \"minBody\"\n\t} else if *avgBody {\n\t\tsortKey = \"avgBody\"\n\t} else if *sumBody {\n\t\tsortKey = \"sumBody\"\n\t} else {\n\t\tsortKey = \"max\"\n\t}\n\n\tvar uri string\n\tvar index string\n\tvar accessLog Profiles\n\turiHints := make(map[string]int)\n\tlength := 0\n\tcursor := 0\n\n\tr := ltsv.NewReader(f)\n\tfor {\n\t\tline, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tresTime, err := strconv.ParseFloat(line[*apptimeLabel], 64)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tbodySize, err := strconv.ParseFloat(line[*sizeLabel], 64)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tu, err := url.Parse(line[*uriLabel])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif *queryString {\n\t\t\tv := url.Values{}\n\t\t\tvalues := u.Query()\n\t\t\tfor q, _ := range values {\n\t\t\t\tv.Set(q, \"xxx\")\n\t\t\t}\n\t\t\turi = fmt.Sprintf(\"%s?%s\", u.Path, v.Encode())\n\t\t\tindex = fmt.Sprintf(\"%s_%s?%s\", line[*methodLabel], u.Path, v.Encode())\n\t\t} else {\n\t\t\turi = u.Path\n\t\t\tindex = fmt.Sprintf(\"%s_%s\", line[*methodLabel], u.Path)\n\t\t}\n\n\t\tif *include != \"\" {\n\t\t\tif ok, err := regexp.Match(*include, []byte(uri)); !ok && err == nil {\n\t\t\t\tcontinue\n\t\t\t} else if err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\n\t\tif *exclude != \"\" {\n\t\t\tif ok, err := regexp.Match(*exclude, []byte(uri)); ok && err == nil {\n\t\t\t\tcontinue\n\t\t\t} else if err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\n\t\tif _, ok := uriHints[index]; ok {\n\t\t\tcursor = uriHints[index]\n\t\t} else {\n\t\t\turiHints[index] = length\n\t\t\tcursor = length\n\t\t\tlength++\n\t\t\taccessLog = append(accessLog, Profile{Uri: uri})\n\t\t}\n\n\t\tif len(uriHints) > *limit {\n\t\t\tlog.Fatal(fmt.Sprintf(\"Too many uri (%d or less)\", *limit))\n\t\t}\n\n\t\tif accessLog[cursor].Max < resTime {\n\t\t\taccessLog[cursor].Max = resTime\n\t\t}\n\n\t\tif accessLog[cursor].Min >= resTime || accessLog[cursor].Min == 0 {\n\t\t\taccessLog[cursor].Min = resTime\n\t\t}\n\n\t\taccessLog[cursor].Cnt++\n\t\taccessLog[cursor].Sum += resTime\n\t\taccessLog[cursor].Method = line[*methodLabel]\n\n\t\tif accessLog[cursor].MaxBody < bodySize {\n\t\t\taccessLog[cursor].MaxBody = bodySize\n\t\t}\n\n\t\tif accessLog[cursor].MinBody >= bodySize || accessLog[cursor].MinBody == 0 {\n\t\t\taccessLog[cursor].MinBody = bodySize\n\t\t}\n\n\t\taccessLog[cursor].SumBody += bodySize\n\t}\n\n\tfor i, _ := range accessLog {\n\t\taccessLog[i].Avg = accessLog[i].Sum \/ float64(accessLog[i].Cnt)\n\t\taccessLog[i].AvgBody = accessLog[i].SumBody \/ float64(accessLog[i].Cnt)\n\t}\n\n\tswitch sortKey {\n\tcase \"max\":\n\t\tSortByMax(accessLog, *reverse)\n\tcase \"min\":\n\t\tSortByMin(accessLog, *reverse)\n\tcase \"avg\":\n\t\tSortByAvg(accessLog, *reverse)\n\tcase \"sum\":\n\t\tSortBySum(accessLog, *reverse)\n\tcase \"cnt\":\n\t\tSortByCnt(accessLog, *reverse)\n\tcase \"uri\":\n\t\tSortByUri(accessLog, *reverse)\n\tcase \"method\":\n\t\tSortByMethod(accessLog, *reverse)\n\tcase \"maxBody\":\n\t\tSortByMaxBody(accessLog, *reverse)\n\tcase \"minBody\":\n\t\tSortByMinBody(accessLog, *reverse)\n\tcase \"avgBody\":\n\t\tSortByAvgBody(accessLog, *reverse)\n\tcase \"sumBody\":\n\t\tSortBySumBody(accessLog, *reverse)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"code.cloudfoundry.org\/cli\/cf\/cmd\"\n\t\"code.cloudfoundry.org\/cli\/commands\"\n\t\"code.cloudfoundry.org\/cli\/commands\/v2\"\n\t\"code.cloudfoundry.org\/cli\/utils\/configv3\"\n\t\"code.cloudfoundry.org\/cli\/utils\/panichandler\"\n\t\"code.cloudfoundry.org\/cli\/utils\/ui\"\n\t\"github.com\/jessevdk\/go-flags\"\n)\n\ntype UI interface {\n\tDisplayError(err ui.TranslatableError)\n\tDisplayErrorMessage(err string, keys ...map[string]interface{})\n}\n\nvar ErrFailed = errors.New(\"command failed\")\n\nfunc main() {\n\tdefer panichandler.HandlePanic()\n\tparse(os.Args[1:])\n}\n\nfunc parse(args []string) {\n\tparser := flags.NewParser(&v2.Commands, flags.HelpFlag)\n\tparser.CommandHandler = executionWrapper\n\textraArgs, err := parser.ParseArgs(args)\n\tif err == nil {\n\t\treturn\n\t}\n\n\tif flagErr, ok := err.(*flags.Error); ok {\n\t\tswitch flagErr.Type {\n\t\tcase flags.ErrHelp, flags.ErrUnknownFlag, flags.ErrExpectedArgument:\n\t\t\t_, found := reflect.TypeOf(v2.Commands).FieldByNameFunc(\n\t\t\t\tfunc(fieldName string) bool {\n\t\t\t\t\tfield, _ := reflect.TypeOf(v2.Commands).FieldByName(fieldName)\n\t\t\t\t\treturn parser.Active != nil && parser.Active.Name == field.Tag.Get(\"command\")\n\t\t\t\t},\n\t\t\t)\n\n\t\t\tif found && flagErr.Type == flags.ErrUnknownFlag && parser.Active.Name == \"set-env\" {\n\t\t\t\tnewArgs := []string{}\n\t\t\t\tfor _, arg := range args {\n\t\t\t\t\tif arg[0] == '-' {\n\t\t\t\t\t\tnewArgs = append(newArgs, fmt.Sprintf(\"%s%s\", v2.WorkAroundPrefix, arg))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnewArgs = append(newArgs, arg)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tparse(newArgs)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif flagErr.Type == flags.ErrUnknownFlag || flagErr.Type == flags.ErrExpectedArgument {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Incorrect Usage: %s\\n\\n\", flagErr.Error())\n\t\t\t}\n\n\t\t\tif found {\n\t\t\t\tparse([]string{\"help\", parser.Active.Name})\n\t\t\t} else {\n\t\t\t\tswitch len(extraArgs) {\n\t\t\t\tcase 0:\n\t\t\t\t\tparse([]string{\"help\"})\n\t\t\t\tcase 1:\n\t\t\t\t\tif !isOption(extraArgs[0]) || (len(args) > 1 && extraArgs[0] == \"-a\") {\n\t\t\t\t\t\tparse([]string{\"help\", extraArgs[0]})\n\t\t\t\t\t} else {\n\t\t\t\t\t\tparse([]string{\"help\"})\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\tif isCommand(extraArgs[0]) {\n\t\t\t\t\t\tparse([]string{\"help\", extraArgs[0]})\n\t\t\t\t\t} else {\n\t\t\t\t\t\tparse(extraArgs[1:])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif flagErr.Type == flags.ErrUnknownFlag || flagErr.Type == flags.ErrExpectedArgument {\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\tcase flags.ErrRequired:\n\t\t\tfmt.Fprintf(os.Stderr, \"Incorrect Usage: %s\\n\\n\", flagErr.Error())\n\t\t\tparse(append([]string{\"help\"}, args...))\n\t\t\tos.Exit(1)\n\t\tcase flags.ErrUnknownCommand:\n\t\t\tcmd.Main(os.Getenv(\"CF_TRACE\"), os.Args)\n\t\tcase flags.ErrCommandRequired:\n\t\t\tif v2.Commands.VerboseOrVersion {\n\t\t\t\tparse([]string{\"version\"})\n\t\t\t} else {\n\t\t\t\tparse([]string{\"help\"})\n\t\t\t}\n\t\tdefault:\n\t\t\tfmt.Fprintf(os.Stderr, \"Unexpected flag error\\ntype: %s\\nmessage: %s\\n\", flagErr.Type, flagErr.Error())\n\t\t}\n\t} else if err == ErrFailed {\n\t\tos.Exit(1)\n\t} else {\n\t\tfmt.Fprintf(os.Stderr, \"Unexpected error: %s\\n\", err.Error())\n\t\tos.Exit(1)\n\t}\n}\n\nfunc isCommand(s string) bool {\n\t_, found := reflect.TypeOf(v2.Commands).FieldByNameFunc(\n\t\tfunc(fieldName string) bool {\n\t\t\tfield, _ := reflect.TypeOf(v2.Commands).FieldByName(fieldName)\n\t\t\treturn s == field.Tag.Get(\"command\") || s == field.Tag.Get(\"alias\")\n\t\t})\n\n\treturn found\n}\nfunc isOption(s string) bool {\n\treturn strings.HasPrefix(s, \"-\")\n}\n\nfunc executionWrapper(cmd flags.Commander, args []string) error {\n\tcfConfig, err := configv3.LoadConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer configv3.WriteConfig(cfConfig)\n\n\tif extendedCmd, ok := cmd.(commands.ExtendedCommander); ok {\n\t\tcommandUI, err := ui.NewUI(cfConfig)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = extendedCmd.Setup(cfConfig, commandUI)\n\t\tif err != nil {\n\t\t\treturn handleError(err, commandUI)\n\t\t}\n\t\treturn handleError(extendedCmd.Execute(args), commandUI)\n\t}\n\n\treturn fmt.Errorf(\"command does not conform to ExtendedCommander\")\n}\n\nfunc handleError(err error, commandUI UI) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tif e, ok := err.(ui.TranslatableError); ok {\n\t\tcommandUI.DisplayError(e)\n\t} else {\n\t\tcommandUI.DisplayErrorMessage(err.Error())\n\t}\n\treturn ErrFailed\n}\n<commit_msg>shows help for a subcommand when running cf with missing required arg<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"code.cloudfoundry.org\/cli\/cf\/cmd\"\n\t\"code.cloudfoundry.org\/cli\/commands\"\n\t\"code.cloudfoundry.org\/cli\/commands\/v2\"\n\t\"code.cloudfoundry.org\/cli\/utils\/configv3\"\n\t\"code.cloudfoundry.org\/cli\/utils\/panichandler\"\n\t\"code.cloudfoundry.org\/cli\/utils\/ui\"\n\t\"github.com\/jessevdk\/go-flags\"\n)\n\ntype UI interface {\n\tDisplayError(err ui.TranslatableError)\n\tDisplayErrorMessage(err string, keys ...map[string]interface{})\n}\n\nvar ErrFailed = errors.New(\"command failed\")\n\nfunc main() {\n\tdefer panichandler.HandlePanic()\n\tparse(os.Args[1:])\n}\n\nfunc parse(args []string) {\n\tparser := flags.NewParser(&v2.Commands, flags.HelpFlag)\n\tparser.CommandHandler = executionWrapper\n\textraArgs, err := parser.ParseArgs(args)\n\tif err == nil {\n\t\treturn\n\t}\n\n\tif flagErr, ok := err.(*flags.Error); ok {\n\t\tswitch flagErr.Type {\n\t\tcase flags.ErrHelp, flags.ErrUnknownFlag, flags.ErrExpectedArgument:\n\t\t\t_, found := reflect.TypeOf(v2.Commands).FieldByNameFunc(\n\t\t\t\tfunc(fieldName string) bool {\n\t\t\t\t\tfield, _ := reflect.TypeOf(v2.Commands).FieldByName(fieldName)\n\t\t\t\t\treturn parser.Active != nil && parser.Active.Name == field.Tag.Get(\"command\")\n\t\t\t\t},\n\t\t\t)\n\n\t\t\tif found && flagErr.Type == flags.ErrUnknownFlag && parser.Active.Name == \"set-env\" {\n\t\t\t\tnewArgs := []string{}\n\t\t\t\tfor _, arg := range args {\n\t\t\t\t\tif arg[0] == '-' {\n\t\t\t\t\t\tnewArgs = append(newArgs, fmt.Sprintf(\"%s%s\", v2.WorkAroundPrefix, arg))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tnewArgs = append(newArgs, arg)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tparse(newArgs)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif flagErr.Type == flags.ErrUnknownFlag || flagErr.Type == flags.ErrExpectedArgument {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Incorrect Usage: %s\\n\\n\", flagErr.Error())\n\t\t\t}\n\n\t\t\tif found {\n\t\t\t\tparse([]string{\"help\", parser.Active.Name})\n\t\t\t} else {\n\t\t\t\tswitch len(extraArgs) {\n\t\t\t\tcase 0:\n\t\t\t\t\tparse([]string{\"help\"})\n\t\t\t\tcase 1:\n\t\t\t\t\tif !isOption(extraArgs[0]) || (len(args) > 1 && extraArgs[0] == \"-a\") {\n\t\t\t\t\t\tparse([]string{\"help\", extraArgs[0]})\n\t\t\t\t\t} else {\n\t\t\t\t\t\tparse([]string{\"help\"})\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\tif isCommand(extraArgs[0]) {\n\t\t\t\t\t\tparse([]string{\"help\", extraArgs[0]})\n\t\t\t\t\t} else {\n\t\t\t\t\t\tparse(extraArgs[1:])\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif flagErr.Type == flags.ErrUnknownFlag || flagErr.Type == flags.ErrExpectedArgument {\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\tcase flags.ErrRequired:\n\t\t\tfmt.Fprintf(os.Stderr, \"Incorrect Usage: %s\\n\\n\", flagErr.Error())\n\t\t\tparse([]string{\"help\", args[0]})\n\t\t\tos.Exit(1)\n\t\tcase flags.ErrUnknownCommand:\n\t\t\tcmd.Main(os.Getenv(\"CF_TRACE\"), os.Args)\n\t\tcase flags.ErrCommandRequired:\n\t\t\tif v2.Commands.VerboseOrVersion {\n\t\t\t\tparse([]string{\"version\"})\n\t\t\t} else {\n\t\t\t\tparse([]string{\"help\"})\n\t\t\t}\n\t\tdefault:\n\t\t\tfmt.Fprintf(os.Stderr, \"Unexpected flag error\\ntype: %s\\nmessage: %s\\n\", flagErr.Type, flagErr.Error())\n\t\t}\n\t} else if err == ErrFailed {\n\t\tos.Exit(1)\n\t} else {\n\t\tfmt.Fprintf(os.Stderr, \"Unexpected error: %s\\n\", err.Error())\n\t\tos.Exit(1)\n\t}\n}\n\nfunc isCommand(s string) bool {\n\t_, found := reflect.TypeOf(v2.Commands).FieldByNameFunc(\n\t\tfunc(fieldName string) bool {\n\t\t\tfield, _ := reflect.TypeOf(v2.Commands).FieldByName(fieldName)\n\t\t\treturn s == field.Tag.Get(\"command\") || s == field.Tag.Get(\"alias\")\n\t\t})\n\n\treturn found\n}\nfunc isOption(s string) bool {\n\treturn strings.HasPrefix(s, \"-\")\n}\n\nfunc executionWrapper(cmd flags.Commander, args []string) error {\n\tcfConfig, err := configv3.LoadConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer configv3.WriteConfig(cfConfig)\n\n\tif extendedCmd, ok := cmd.(commands.ExtendedCommander); ok {\n\t\tcommandUI, err := ui.NewUI(cfConfig)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = extendedCmd.Setup(cfConfig, commandUI)\n\t\tif err != nil {\n\t\t\treturn handleError(err, commandUI)\n\t\t}\n\t\treturn handleError(extendedCmd.Execute(args), commandUI)\n\t}\n\n\treturn fmt.Errorf(\"command does not conform to ExtendedCommander\")\n}\n\nfunc handleError(err error, commandUI UI) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tif e, ok := err.(ui.TranslatableError); ok {\n\t\tcommandUI.DisplayError(e)\n\t} else {\n\t\tcommandUI.DisplayErrorMessage(err.Error())\n\t}\n\treturn ErrFailed\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Tweaks<commit_after><|endoftext|>"} {"text":"<commit_before>package main\n\nimport(\n \"github.com\/TykTechnologies\/tykcommon\"\n\n \"encoding\/json\"\n \"fmt\"\n \"flag\"\n \"errors\"\n \"io\/ioutil\"\n \"os\"\n)\n\n\/\/ tyk-cli <module> <submodule> <command> [--options] args...\n\nvar module, submodule, command string\n\nfunc init() {\n}\n\n\/\/ main is the entrypoint.\nfunc main() {\n fmt.Println(\"tyk-cli:\", flag.CommandLine, os.Args)\n fmt.Println(\"os.Args (length) = \", len(os.Args))\n if len(os.Args) == 1 {\n fmt.Println(\"No module specified.\")\n os.Exit(1)\n } else if len(os.Args) == 2 {\n fmt.Println(\"No command specified.\")\n os.Exit(1)\n }\n\n\n module = os.Args[1]\n command = os.Args[2]\n\n fmt.Println(\"module =\", module)\n fmt.Println(\"command =\", command)\n\n var err error\n\n switch module {\n case \"bundle\":\n fmt.Println(\"Using bundle module.\")\n err = bundle(command)\n default:\n err = errors.New(\"Invalid module\")\n }\n\n if err != nil {\n fmt.Println(\"Error:\", err)\n os.Exit(1)\n }\n}\n\n\/\/ bundle will handle the bundle command calls.\nfunc bundle(command string) (err error) {\n switch command {\n case \"build\":\n var manifestPath = \".\/manifest.json\"\n if _, err := os.Stat(manifestPath); err == nil {\n var manifestData []byte\n manifestData, err = ioutil.ReadFile(manifestPath)\n\n var manifest tykcommon.BundleManifest\n err = json.Unmarshal(manifestData, &manifest)\n\n if err != nil {\n fmt.Println(\"Couldn't parse manifest file!\")\n break\n }\n\n err = bundleValidateManifest(&manifest)\n\n if err != nil {\n fmt.Println(\"Bundle validation error:\")\n fmt.Println(err)\n break\n }\n\n \/\/ The manifest is valid, we should do the checksum and sign step at this point.\n bundleBuild(&manifest)\n\n } else {\n err = errors.New(\"Manifest file doesn't exist.\")\n }\n default:\n err = errors.New(\"Invalid command.\")\n }\n return err\n}\n\n\/\/ bundleValidateManifest will validate the manifest file before building a bundle.\nfunc bundleValidateManifest(manifest *tykcommon.BundleManifest) (err error) {\n for _, file := range manifest.FileList {\n if _, statErr := os.Stat(file); statErr != nil {\n err = errors.New(\"Referencing a nonexistent file: \" + file)\n break\n }\n }\n \/\/ TODO: validate the custom middleware block.\n return err\n}\n\nfunc bundleBuild(manifest *tykcommon.BundleManifest) (err error) {\n return err\n}\n<commit_msg>tyk-85: set checksum.<commit_after>package main\n\nimport(\n \"github.com\/TykTechnologies\/tykcommon\"\n\n \"encoding\/json\"\n \"fmt\"\n \"flag\"\n \"errors\"\n \"io\/ioutil\"\n \"crypto\/md5\"\n \"strings\"\n \"os\"\n)\n\n\/\/ tyk-cli <module> <submodule> <command> [--options] args...\n\nvar module, submodule, command string\n\nfunc init() {\n}\n\n\/\/ main is the entrypoint.\nfunc main() {\n fmt.Println(\"tyk-cli:\", flag.CommandLine, os.Args)\n fmt.Println(\"os.Args (length) = \", len(os.Args))\n if len(os.Args) == 1 {\n fmt.Println(\"No module specified.\")\n os.Exit(1)\n } else if len(os.Args) == 2 {\n fmt.Println(\"No command specified.\")\n os.Exit(1)\n }\n\n\n module = os.Args[1]\n command = os.Args[2]\n\n fmt.Println(\"module =\", module)\n fmt.Println(\"command =\", command)\n\n var err error\n\n switch module {\n case \"bundle\":\n fmt.Println(\"Using bundle module.\")\n err = bundle(command)\n default:\n err = errors.New(\"Invalid module\")\n }\n\n if err != nil {\n fmt.Println(\"Error:\", err)\n os.Exit(1)\n }\n}\n\n\/\/ bundle will handle the bundle command calls.\nfunc bundle(command string) (err error) {\n switch command {\n case \"build\":\n var manifestPath = \".\/manifest.json\"\n if _, err := os.Stat(manifestPath); err == nil {\n var manifestData []byte\n manifestData, err = ioutil.ReadFile(manifestPath)\n\n var manifest tykcommon.BundleManifest\n err = json.Unmarshal(manifestData, &manifest)\n\n if err != nil {\n fmt.Println(\"Couldn't parse manifest file!\")\n break\n }\n\n err = bundleValidateManifest(&manifest)\n\n if err != nil {\n fmt.Println(\"Bundle validation error:\")\n fmt.Println(err)\n break\n }\n\n \/\/ The manifest is valid, we should do the checksum and sign step at this point.\n bundleBuild(&manifest)\n\n } else {\n err = errors.New(\"Manifest file doesn't exist.\")\n }\n default:\n err = errors.New(\"Invalid command.\")\n }\n return err\n}\n\n\/\/ bundleValidateManifest will validate the manifest file before building a bundle.\nfunc bundleValidateManifest(manifest *tykcommon.BundleManifest) (err error) {\n \/\/ Validate manifest file list:\n for _, file := range manifest.FileList {\n if _, statErr := os.Stat(file); statErr != nil {\n err = errors.New(\"Referencing a nonexistent file: \" + file)\n break\n }\n }\n\n \/\/ The custom middleware block must specify at least one hook:\n var definedHooks int\n definedHooks = len(manifest.CustomMiddleware.Pre) + len(manifest.CustomMiddleware.Post) + len(manifest.CustomMiddleware.PostKeyAuth)\n\n if manifest.CustomMiddleware.AuthCheck.Name != \"\" {\n definedHooks++\n }\n\n if definedHooks == 0 {\n err = errors.New(\"No hooks defined!\")\n return err\n }\n\n \/\/ The custom middleware block must specify a driver:\n if manifest.CustomMiddleware.Driver == \"\" {\n err = errors.New(\"No driver specified!\")\n return err\n }\n\n return err\n}\n\nfunc bundleBuild(manifest *tykcommon.BundleManifest) (err error) {\n var bundleChecksums []string\n for _, file := range manifest.FileList {\n var data []byte\n data, err = ioutil.ReadFile(file)\n if err != nil {\n return err\n }\n hash := fmt.Sprintf(\"%x\", md5.Sum(data))\n bundleChecksums = append(bundleChecksums, hash)\n }\n mergedChecksums := strings.Join(bundleChecksums, \"\")\n manifest.Checksum = fmt.Sprintf(\"%x\", md5.Sum([]byte(mergedChecksums)))\n return err\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Fixed HTTP auth issues<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>Add showComments<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>add some more printf logging to main events<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>test set<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>I suck at regexes<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Project Gonder.\n\/\/ Author Supme\n\/\/ Copyright Supme 2016\n\/\/ License http:\/\/opensource.org\/licenses\/MIT MIT License\n\/\/\n\/\/ THE SOFTWARE AND DOCUMENTATION ARE PROVIDED \"AS IS\" WITHOUT WARRANTY OF\n\/\/ ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n\/\/ IMPLIED WARRANTIES OF MERCHANTABILITY AND\/OR FITNESS FOR A PARTICULAR\n\/\/ PURPOSE.\n\/\/\n\/\/ Please see the License.txt file for more information.\n\/\/\npackage main\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/supme\/gonder\/api\"\n\t\"github.com\/supme\/gonder\/campaign\"\n\t\"github.com\/supme\/gonder\/models\"\n\t\"github.com\/supme\/gonder\/utm\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"syscall\"\n)\n\nfunc main() {\n\n\tl, err := os.OpenFile(models.FromRootDir(\"log\/main.log\"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)\n\tif err != nil {\n\t\tlog.Println(\"error opening log file: %v\", err)\n\t}\n\tdefer l.Close()\n\n\tml := io.MultiWriter(l, os.Stdout)\n\n\tlog.SetFlags(3)\n\tlog.SetOutput(ml)\n\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\t\/\/\tmodels.Config.Get()\n\tdefer models.Config.Close()\n\n\t\/\/ Start\n\tif len(os.Args) == 2 {\n\t\tvar err error\n\t\tif os.Args[1] == \"status\" {\n\t\t\terr = checkPid(\"api\")\n\t\t\tif err == nil {\n\t\t\t\tfmt.Println(\"Process api running\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Process api stoping\")\n\t\t\t}\n\t\t\terr = checkPid(\"sender\")\n\t\t\tif err == nil {\n\t\t\t\tfmt.Println(\"Process sender running\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Process sender stoping\")\n\t\t\t}\n\t\t\terr = checkPid(\"utm\")\n\t\t\tif err == nil {\n\t\t\t\tfmt.Println(\"Process utm running\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Process utm stoping\")\n\t\t\t}\n\t\t}\n\t\tif os.Args[1] == \"start\" {\n\t\t\terr = startProcess(\"api\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\terr = startProcess(\"sender\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\terr = startProcess(\"utm\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\tos.Exit(0)\n\t\t}\n\t\tif os.Args[1] == \"stop\" {\n\t\t\terr = stopProcess(\"api\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\terr = stopProcess(\"sender\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\terr = stopProcess(\"utm\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\tos.Exit(0)\n\t\t}\n\t\tif os.Args[1] == \"restart\" {\n\t\t\terr = stopProcess(\"api\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\terr = startProcess(\"api\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\terr = stopProcess(\"utm\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\terr = startProcess(\"utm\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\terr = stopProcess(\"sender\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\terr = startProcess(\"sender\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\n\tif len(os.Args) == 3 {\n\t\tif os.Args[1] == \"start\" {\n\t\t\tif os.Args[2] == \"api\" {\n\t\t\t\terr = startProcess(\"api\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tif os.Args[2] == \"sender\" {\n\t\t\t\terr = startProcess(\"sender\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tif os.Args[2] == \"utm\" {\n\t\t\t\terr = startProcess(\"utm\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\tif os.Args[1] == \"stop\" {\n\t\t\tif os.Args[2] == \"api\" {\n\t\t\t\terr = stopProcess(\"api\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tif os.Args[2] == \"sender\" {\n\t\t\t\terr = stopProcess(\"sender\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tif os.Args[2] == \"utm\" {\n\t\t\t\terr = stopProcess(\"utm\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\tif os.Args[1] == \"restart\" {\n\t\t\tif os.Args[2] == \"api\" {\n\t\t\t\terr = stopProcess(\"api\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t\terr = startProcess(\"api\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tif os.Args[2] == \"sender\" {\n\t\t\t\terr = stopProcess(\"sender\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t\terr = startProcess(\"sender\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tif os.Args[2] == \"utm\" {\n\t\t\t\terr = stopProcess(\"utm\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t\terr = startProcess(\"utm\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\tif os.Args[1] == \"daemonize\" {\n\n\t\t\tif os.Args[2] == \"api\" {\n\n\t\t\t\tfmt.Println(\"Start api http server\")\n\t\t\t\tapi.Run()\n\n\t\t\t\tfor {\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif os.Args[2] == \"sender\" {\n\n\t\t\t\tfmt.Println(\"Start database mailer\")\n\t\t\t\tcampaign.Run()\n\n\t\t\t\tfor {\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif os.Args[2] == \"utm\" {\n\n\t\t\t\tfmt.Println(\"Start utm http server\")\n\t\t\t\tutm.Run()\n\n\t\t\t\tfor {\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif len(os.Args) == 1 {\n\t\tfmt.Println(\"Start api http server\")\n\t\tgo api.Run()\n\n\t\tfmt.Println(\"Start database mailer\")\n\t\tgo campaign.Run()\n\n\t\tfmt.Println(\"Start utm http server\")\n\t\tgo utm.Run()\n\n\t\tfmt.Println(\"Press Enter for stop\")\n\t\tvar input string\n\t\tfmt.Scanln(&input)\n\t}\n\n}\n\nfunc startProcess(name string) error {\n\terr := checkPid(name)\n\tif err == nil {\n\t\treturn errors.New(\"Process \" + name + \" already running\")\n\t} else {\n\t\tp := exec.Command(os.Args[0], \"daemonize\", name)\n\t\tp.Start()\n\t\tfmt.Println(\"Started \"+name+\" pid\", p.Process.Pid)\n\t\terr := setPid(name, p.Process.Pid)\n\t\tif err != nil {\n\t\t\treturn errors.New(name + \" set PID error: \" + err.Error())\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc stopProcess(name string) error {\n\terr := checkPid(name)\n\tif err != nil {\n\t\tfmt.Println(\"Process \" + name + \" not found:\")\n\t\treturn err\n\t} else {\n\t\tfile, err := os.Open(models.FromRootDir(\"pid\/\" + name + \".pid\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treader := bufio.NewReader(file)\n\t\tpid, _, err := reader.ReadLine()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp, _ := strconv.Atoi(string(pid))\n\t\tprocess, _ := os.FindProcess(p)\n\t\terr = process.Kill()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tos.Remove(models.FromRootDir(\"pid\/\" + name + \".pid\"))\n\t}\n\tfmt.Println(\"Process \" + name + \" stoped\")\n\treturn nil\n}\n\nfunc setPid(name string, pid int) error {\n\tp := strconv.Itoa(pid)\n\tfile, err := os.Create(models.FromRootDir(\"pid\/\" + name + \".pid\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\t_, err = io.WriteString(file, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc checkPid(name string) error {\n\tfile, err := os.Open(models.FromRootDir(\"pid\/\" + name + \".pid\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\treader := bufio.NewReader(file)\n\tpid, _, err := reader.ReadLine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp, _ := strconv.Atoi(string(pid))\n\tprocess, err := os.FindProcess(p)\n\tif err != nil {\n\t\tos.Remove(models.FromRootDir(\"pid\/\" + name + \".pid\"))\n\t\treturn errors.New(\"Failed to find process\")\n\t} else {\n\t\terr := process.Signal(syscall.Signal(0))\n\t\tif err != nil {\n\t\t\tos.Remove(models.FromRootDir(\"pid\/\" + name + \".pid\"))\n\t\t\treturn errors.New(\"Process not response to signal.\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc checkErr(err error) {\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<commit_msg>add github.com\/google\/gops<commit_after>\/\/ Project Gonder.\n\/\/ Author Supme\n\/\/ Copyright Supme 2016\n\/\/ License http:\/\/opensource.org\/licenses\/MIT MIT License\n\/\/\n\/\/ THE SOFTWARE AND DOCUMENTATION ARE PROVIDED \"AS IS\" WITHOUT WARRANTY OF\n\/\/ ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n\/\/ IMPLIED WARRANTIES OF MERCHANTABILITY AND\/OR FITNESS FOR A PARTICULAR\n\/\/ PURPOSE.\n\/\/\n\/\/ Please see the License.txt file for more information.\n\/\/\npackage main\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/supme\/gonder\/api\"\n\t\"github.com\/supme\/gonder\/campaign\"\n\t\"github.com\/supme\/gonder\/models\"\n\t\"github.com\/supme\/gonder\/utm\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"syscall\"\n\t\"github.com\/google\/gops\/agent\"\n)\n\nfunc main() {\n\tif err := agent.Start(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tl, err := os.OpenFile(models.FromRootDir(\"log\/main.log\"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)\n\tif err != nil {\n\t\tlog.Println(\"error opening log file: %v\", err)\n\t}\n\tdefer l.Close()\n\n\tml := io.MultiWriter(l, os.Stdout)\n\n\tlog.SetFlags(3)\n\tlog.SetOutput(ml)\n\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\t\/\/\tmodels.Config.Get()\n\tdefer models.Config.Close()\n\n\t\/\/ Start\n\tif len(os.Args) == 2 {\n\t\tvar err error\n\t\tif os.Args[1] == \"status\" {\n\t\t\terr = checkPid(\"api\")\n\t\t\tif err == nil {\n\t\t\t\tfmt.Println(\"Process api running\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Process api stoping\")\n\t\t\t}\n\t\t\terr = checkPid(\"sender\")\n\t\t\tif err == nil {\n\t\t\t\tfmt.Println(\"Process sender running\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Process sender stoping\")\n\t\t\t}\n\t\t\terr = checkPid(\"utm\")\n\t\t\tif err == nil {\n\t\t\t\tfmt.Println(\"Process utm running\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Process utm stoping\")\n\t\t\t}\n\t\t}\n\t\tif os.Args[1] == \"start\" {\n\t\t\terr = startProcess(\"api\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\terr = startProcess(\"sender\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\terr = startProcess(\"utm\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\tos.Exit(0)\n\t\t}\n\t\tif os.Args[1] == \"stop\" {\n\t\t\terr = stopProcess(\"api\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\terr = stopProcess(\"sender\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\terr = stopProcess(\"utm\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\tos.Exit(0)\n\t\t}\n\t\tif os.Args[1] == \"restart\" {\n\t\t\terr = stopProcess(\"api\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\terr = startProcess(\"api\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\terr = stopProcess(\"utm\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\terr = startProcess(\"utm\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\terr = stopProcess(\"sender\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\terr = startProcess(\"sender\")\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err.Error())\n\t\t\t}\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\n\tif len(os.Args) == 3 {\n\t\tif os.Args[1] == \"start\" {\n\t\t\tif os.Args[2] == \"api\" {\n\t\t\t\terr = startProcess(\"api\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tif os.Args[2] == \"sender\" {\n\t\t\t\terr = startProcess(\"sender\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tif os.Args[2] == \"utm\" {\n\t\t\t\terr = startProcess(\"utm\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\tif os.Args[1] == \"stop\" {\n\t\t\tif os.Args[2] == \"api\" {\n\t\t\t\terr = stopProcess(\"api\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tif os.Args[2] == \"sender\" {\n\t\t\t\terr = stopProcess(\"sender\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tif os.Args[2] == \"utm\" {\n\t\t\t\terr = stopProcess(\"utm\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\tif os.Args[1] == \"restart\" {\n\t\t\tif os.Args[2] == \"api\" {\n\t\t\t\terr = stopProcess(\"api\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t\terr = startProcess(\"api\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tif os.Args[2] == \"sender\" {\n\t\t\t\terr = stopProcess(\"sender\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t\terr = startProcess(\"sender\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tif os.Args[2] == \"utm\" {\n\t\t\t\terr = stopProcess(\"utm\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t\terr = startProcess(\"utm\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\tif os.Args[1] == \"daemonize\" {\n\n\t\t\tif os.Args[2] == \"api\" {\n\n\t\t\t\tfmt.Println(\"Start api http server\")\n\t\t\t\tapi.Run()\n\n\t\t\t\tfor {\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif os.Args[2] == \"sender\" {\n\n\t\t\t\tfmt.Println(\"Start database mailer\")\n\t\t\t\tcampaign.Run()\n\n\t\t\t\tfor {\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif os.Args[2] == \"utm\" {\n\n\t\t\t\tfmt.Println(\"Start utm http server\")\n\t\t\t\tutm.Run()\n\n\t\t\t\tfor {\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif len(os.Args) == 1 {\n\t\tfmt.Println(\"Start api http server\")\n\t\tgo api.Run()\n\n\t\tfmt.Println(\"Start database mailer\")\n\t\tgo campaign.Run()\n\n\t\tfmt.Println(\"Start utm http server\")\n\t\tgo utm.Run()\n\n\t\tfmt.Println(\"Press Enter for stop\")\n\t\tvar input string\n\t\tfmt.Scanln(&input)\n\t}\n\n}\n\nfunc startProcess(name string) error {\n\terr := checkPid(name)\n\tif err == nil {\n\t\treturn errors.New(\"Process \" + name + \" already running\")\n\t} else {\n\t\tp := exec.Command(os.Args[0], \"daemonize\", name)\n\t\tp.Start()\n\t\tfmt.Println(\"Started \"+name+\" pid\", p.Process.Pid)\n\t\terr := setPid(name, p.Process.Pid)\n\t\tif err != nil {\n\t\t\treturn errors.New(name + \" set PID error: \" + err.Error())\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc stopProcess(name string) error {\n\terr := checkPid(name)\n\tif err != nil {\n\t\tfmt.Println(\"Process \" + name + \" not found:\")\n\t\treturn err\n\t} else {\n\t\tfile, err := os.Open(models.FromRootDir(\"pid\/\" + name + \".pid\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treader := bufio.NewReader(file)\n\t\tpid, _, err := reader.ReadLine()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp, _ := strconv.Atoi(string(pid))\n\t\tprocess, _ := os.FindProcess(p)\n\t\terr = process.Kill()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tos.Remove(models.FromRootDir(\"pid\/\" + name + \".pid\"))\n\t}\n\tfmt.Println(\"Process \" + name + \" stoped\")\n\treturn nil\n}\n\nfunc setPid(name string, pid int) error {\n\tp := strconv.Itoa(pid)\n\tfile, err := os.Create(models.FromRootDir(\"pid\/\" + name + \".pid\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\t_, err = io.WriteString(file, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc checkPid(name string) error {\n\tfile, err := os.Open(models.FromRootDir(\"pid\/\" + name + \".pid\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\treader := bufio.NewReader(file)\n\tpid, _, err := reader.ReadLine()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp, _ := strconv.Atoi(string(pid))\n\tprocess, err := os.FindProcess(p)\n\tif err != nil {\n\t\tos.Remove(models.FromRootDir(\"pid\/\" + name + \".pid\"))\n\t\treturn errors.New(\"Failed to find process\")\n\t} else {\n\t\terr := process.Signal(syscall.Signal(0))\n\t\tif err != nil {\n\t\t\tos.Remove(models.FromRootDir(\"pid\/\" + name + \".pid\"))\n\t\t\treturn errors.New(\"Process not response to signal.\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc checkErr(err error) {\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"encoding\/xml\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n)\n\ntype rss struct {\n\tChannel channel `xml:\"channel\"`\n}\n\ntype channel struct {\n\tTitle string `xml:\"title\"`\n\tLink string `xml:\"link\"`\n\tDescription string `xml:\"description\"`\n\tItems []item `xml:\"item\"`\n}\n\ntype item struct {\n\tEnclosure enclosure `xml:\"enclosure\"`\n\tLink string `xml:\"link\"`\n\tGuid string `xml:\"guid\"`\n}\n\ntype enclosure struct {\n\tUrl string `xml:\"url,attr\"`\n\tType string `xml:\"type,attr\"`\n}\n\ntype archive struct {\n\tGuid []string `json:\"guid\"`\n}\n\nfunc main() {\n\n\taccountPtr := flag.String(\"user\", \"\", \"soup.io username\")\n\tflag.Parse()\n\n\tif *accountPtr == \"\" {\n\t\tflag.Usage()\n\t\tos.Exit(0)\n\t}\n\n\turl := fmt.Sprintf(\"http:\/\/%s.soup.io\/rss\", *accountPtr)\n\tfeedResponse, err := http.Get(url)\n\tif err != nil {\n\t\tfmt.Printf(\"Error fetching %s: %s\", url, err)\n\t\tos.Exit(1)\n\t}\n\tdefer feedResponse.Body.Close()\n\tfeedBody, err := ioutil.ReadAll(feedResponse.Body)\n\tif err != nil {\n\t\tfmt.Printf(\"Error reading feed from %s: %s\", url, err)\n\t\tos.Exit(1)\n\t}\n\n\tvar feed rss\n\txml.Unmarshal(feedBody, &feed)\n\n\ta, err := readArchive()\n\tif err != nil {\n\t\tfmt.Println(\"No archive data found. Will create a fresh one\")\n\t}\n\n\tfor _, i := range feed.Channel.Items {\n\t\tif inArchive(i.Guid, a) {\n\t\t\tfmt.Printf(\"Skipping %s. Already in archive.\\n\", i.Enclosure.Url)\n\t\t\tcontinue\n\t\t}\n\t\tresponse, err := http.Get(i.Enclosure.Url)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error fetching %s: %s\\n\", i.Enclosure.Url, err)\n\t\t\tcontinue\n\t\t}\n\t\tfilepath := \"archive\/\" + path.Base(i.Enclosure.Url)\n\t\tfile, err := os.Create(filepath)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error opening file %s: %s\\n\", filepath, err)\n\t\t\tresponse.Body.Close()\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Printf(\"Saving %s...\\n\", i.Enclosure.Url)\n\t\t_, err = io.Copy(file, response.Body)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error writing file %s: %s\\n\", filepath, err)\n\t\t\tresponse.Body.Close()\n\t\t\tcontinue\n\t\t}\n\t\tresponse.Body.Close()\n\t\tfile.Close()\n\n\t\ta.Guid = append(a.Guid, i.Guid)\n\t\tsaveArchive(a)\n\t}\n}\n\nfunc saveArchive(a archive) {\n\tdata, err := json.Marshal(a)\n\tif err != nil {\n\t\tfmt.Println(\"Error marshalling archive data: \", err)\n\t\treturn\n\t}\n\tioutil.WriteFile(\"archive\/guids.json\", data, 0600)\n}\n\nfunc inArchive(guid string, a archive) bool {\n\tfor _, s := range a.Guid {\n\t\tif guid == s {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc readArchive() (archive, error) {\n\tvar a archive\n\n\tarchiveData, err := ioutil.ReadFile(\"archive\/guids.json\")\n\tif err != nil {\n\t\treturn a, err\n\t}\n\n\terr = json.Unmarshal(archiveData, &a)\n\tif err != nil {\n\t\treturn a, err\n\t}\n\n\treturn a, nil\n}\n<commit_msg>move log output to better represent the current workload<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"encoding\/xml\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n)\n\ntype rss struct {\n\tChannel channel `xml:\"channel\"`\n}\n\ntype channel struct {\n\tTitle string `xml:\"title\"`\n\tLink string `xml:\"link\"`\n\tDescription string `xml:\"description\"`\n\tItems []item `xml:\"item\"`\n}\n\ntype item struct {\n\tEnclosure enclosure `xml:\"enclosure\"`\n\tLink string `xml:\"link\"`\n\tGuid string `xml:\"guid\"`\n}\n\ntype enclosure struct {\n\tUrl string `xml:\"url,attr\"`\n\tType string `xml:\"type,attr\"`\n}\n\ntype archive struct {\n\tGuid []string `json:\"guid\"`\n}\n\nfunc main() {\n\n\taccountPtr := flag.String(\"user\", \"\", \"soup.io username\")\n\tflag.Parse()\n\n\tif *accountPtr == \"\" {\n\t\tflag.Usage()\n\t\tos.Exit(0)\n\t}\n\n\turl := fmt.Sprintf(\"http:\/\/%s.soup.io\/rss\", *accountPtr)\n\tfeedResponse, err := http.Get(url)\n\tif err != nil {\n\t\tfmt.Printf(\"Error fetching %s: %s\", url, err)\n\t\tos.Exit(1)\n\t}\n\tdefer feedResponse.Body.Close()\n\tfeedBody, err := ioutil.ReadAll(feedResponse.Body)\n\tif err != nil {\n\t\tfmt.Printf(\"Error reading feed from %s: %s\", url, err)\n\t\tos.Exit(1)\n\t}\n\n\tvar feed rss\n\txml.Unmarshal(feedBody, &feed)\n\n\ta, err := readArchive()\n\tif err != nil {\n\t\tfmt.Println(\"No archive data found. Will create a fresh one\")\n\t}\n\n\tfor _, i := range feed.Channel.Items {\n\t\tif inArchive(i.Guid, a) {\n\t\t\tfmt.Printf(\"Skipping %s. Already in archive.\\n\", i.Enclosure.Url)\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Printf(\"Saving %s...\\n\", i.Enclosure.Url)\n\t\tresponse, err := http.Get(i.Enclosure.Url)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error fetching %s: %s\\n\", i.Enclosure.Url, err)\n\t\t\tcontinue\n\t\t}\n\t\tfilepath := \"archive\/\" + path.Base(i.Enclosure.Url)\n\t\tfile, err := os.Create(filepath)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error opening file %s: %s\\n\", filepath, err)\n\t\t\tresponse.Body.Close()\n\t\t\tcontinue\n\t\t}\n\t\t_, err = io.Copy(file, response.Body)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error writing file %s: %s\\n\", filepath, err)\n\t\t\tresponse.Body.Close()\n\t\t\tcontinue\n\t\t}\n\t\tresponse.Body.Close()\n\t\tfile.Close()\n\n\t\ta.Guid = append(a.Guid, i.Guid)\n\t\tsaveArchive(a)\n\t}\n}\n\nfunc saveArchive(a archive) {\n\tdata, err := json.Marshal(a)\n\tif err != nil {\n\t\tfmt.Println(\"Error marshalling archive data: \", err)\n\t\treturn\n\t}\n\tioutil.WriteFile(\"archive\/guids.json\", data, 0600)\n}\n\nfunc inArchive(guid string, a archive) bool {\n\tfor _, s := range a.Guid {\n\t\tif guid == s {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc readArchive() (archive, error) {\n\tvar a archive\n\n\tarchiveData, err := ioutil.ReadFile(\"archive\/guids.json\")\n\tif err != nil {\n\t\treturn a, err\n\t}\n\n\terr = json.Unmarshal(archiveData, &a)\n\tif err != nil {\n\t\treturn a, err\n\t}\n\n\treturn a, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/starmanmartin\/simple-fs\"\n)\n\nconst (\n\tinstall = \"install\"\n\ttest = \"test\"\n)\n\nvar runTypes = []string{install, test}\n\nvar (\n\tlastPart *regexp.Regexp\n\tisTest, isBenchTest, isExecute, isWatch bool\n\tnewRoot, packageName, currentPath, outputString string\n\trestArgs []string\n)\n\nfunc init() {\n\tlastPart, _ = regexp.Compile(`[^\\\\\/]*$`)\n\n\tflag.BoolVar(&isTest, \"t\", false, \"Run as Test\")\n\tflag.BoolVar(&isBenchTest, \"b\", false, \"Bench tests (only if test)\")\n\tflag.BoolVar(&isExecute, \"e\", false, \"Execute (only if not test)\")\n\tflag.BoolVar(&isWatch, \"w\", false, \"Execute (only if not test)\")\n\tflag.StringVar(&outputString, \"p\", \"\", \"Make Package\")\n}\n\nfunc getCmd(cmdCommand []string) *exec.Cmd {\n\tparts := cmdCommand\n\thead := parts[0]\n\tparts = parts[1:len(parts)]\n\n\tcmd := exec.Command(head, parts...)\n\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\treturn cmd\n}\n\nfunc exeCmd(cmdCommand []string) (*exec.Cmd, error) {\n\tcmd := getCmd(cmdCommand)\n\n\tif err := cmd.Run(); err != nil {\n\t\treturn cmd, err\n\t}\n\n\treturn cmd, nil\n}\n\nfunc buildCommand(packageName string) []string {\n\tbuffer := make([]string, 0, 6)\n\n\tbuffer = append(buffer, \"go\")\n\n\tif isTest {\n\t\tbuffer = append(buffer, \"test\")\n\t\tif isBenchTest {\n\t\t\tbuffer = append(buffer, \"-bench=.\")\n\t\t}\n\t} else {\n\t\tbuffer = append(buffer, \"install\")\n\t}\n\n\tbuffer = append(buffer, \"-v\")\n\n\treturn buffer\n}\n\nfunc handelPathArgs() (string, string, []string, error) {\n\targs := flag.Args()\n\tif len(args) == 0 {\n\t\treturn \"\", \"\", nil, errors.New(\"No Args\")\n\t}\n\n\tif len(args) == 1 || args[0][:11] == \"github.com\/\" {\n\t\tdir, err := os.Getwd()\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", nil, (err)\n\t\t}\n\n\t\treturn dir, args[0], args[1:], nil\n\t}\n\n\tabsPath, err := filepath.Abs(args[0])\n\tif err != nil {\n\t\treturn \"\", \"\", nil, (err)\n\t}\n\n\treturn absPath, args[1], args[1:], nil\n}\n\nfunc copyPackage(dir, packageName, funcName string) (isPackage bool, err error) {\n\tif len(outputString) == 0 {\n\t\treturn\n\t}\n\n\tisPackage = true\n\tdest := dir + \"\/bin\/\" + funcName + \"\/\"\n\tsrc := dir + \"\/src\/\" + packageName + \"\/\"\n\n\toutput := strings.Split(outputString, \" \")\n\n\tfor _, dirName := range output {\n\t\terr = fs.CopyFolder(src+dirName, dest+dirName)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc main() {\n\tflag.Parse()\n\tvar err error\n\tnewRoot, packageName, restArgs, err = handelPathArgs()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\tcurrentPath := os.Getenv(\"GOPATH\")\n\tdefer func() {\n\t\tlog.Println(\"Done!!\")\n\t\tos.Setenv(\"GOPATH\", currentPath)\n\t}()\n\tnewPath := []string{newRoot, \";\", currentPath}\n\n\tos.Setenv(\"GOPATH\", strings.Join(newPath, \"\"))\n\trunBuild()\n}\n\nfunc runBuild() {\n\tbuildCommandList := buildCommand(packageName)\n\tbuildCommandList = append(buildCommandList, packageName)\n _, err := exeCmd(buildCommandList)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfuncName := lastPart.FindString(packageName)\n\tisPackage, err := copyPackage(newRoot, packageName, funcName)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t} else if isPackage {\n\t\tfs.SyncFile(newRoot+\"\/bin\/\"+funcName+\".exe\", newRoot+\"\/bin\/\"+funcName+\"\/\"+funcName+\".exe\")\n\t\tfuncName = funcName + \"\/\" + funcName\n\t}\n\n\tif isExecute && !isTest {\n\t\tlog.Printf(\"Running %s\\n\", funcName)\n\t\texecutionPath := newRoot + \"\/bin\/\" + funcName + \".exe\"\n\t\texArgs := []string{executionPath}\n\t\texArgs = append(exArgs, restArgs...)\n\t\tif isWatch {\n\t\t\twatch(exArgs, newRoot+\"\/src\/\"+packageName)\n\t\t} else {\n\t\t\t_, err := exeCmd(exArgs)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t} else {\n\t\tlog.Printf(\"Builded %s\\n\", funcName)\n\t}\n}\n\nfunc watch(args []string, rootPath string) {\n\tdone := make(chan error, 1)\n\tdoneWithoutErr := make(chan bool, 1)\n\n\tcmd := getCmd(args)\n\n\tgo func() {\n\t\terr := cmd.Run()\n\t\tif err != nil {\n\t\t\tdone <- err\n\t\t} else {\n\t\t\tdoneWithoutErr <- true\n\t\t}\n\t}()\n\n\trestart := make(chan bool, 1)\n\n\tgo func() {\n\t\tticker := time.NewTicker(5 * time.Second)\n\t\tlastChaek := time.Now()\n\t\tfor _ = range ticker.C {\n\t\t\tisUpdated, _ := fs.CheckIfFolderUpdated(rootPath, lastChaek)\n\t\t\tif isUpdated {\n\t\t\t\trestart <- true\n\t\t\t\tticker.Stop()\n\t\t\t}\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-restart:\n\t\tselect {\n\t\tcase <-doneWithoutErr:\n\t\t\tlog.Println(\"process restarted\")\n\t\t\trunBuild()\n\t\tdefault:\n\t\t\tif err := cmd.Process.Kill(); err != nil {\n\t\t\t\tlog.Fatal(\"failed to kill: \", err)\n\t\t\t}\n\n\t\t\tlog.Println(\"process restarted\")\n\t\t\trunBuild()\n\n\t\t}\n\n\tcase err := <-done:\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"process done with error = %v\", err)\n\t\t} else {\n\t\t\tlog.Print(\"process done gracefully without error\")\n\t\t}\n\t}\n\n}\n<commit_msg>removed package folder<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/starmanmartin\/simple-fs\"\n)\n\nconst (\n\tinstall = \"install\"\n\ttest = \"test\"\n)\n\nvar runTypes = []string{install, test}\n\nvar (\n\tlastPart *regexp.Regexp\n\tisTest, isBenchTest, isExecute, isWatch bool\n\tnewRoot, packageName, currentPath, outputString string\n\trestArgs []string\n)\n\nfunc init() {\n\tlastPart, _ = regexp.Compile(`[^\\\\\/]*$`)\n\n\tflag.BoolVar(&isTest, \"t\", false, \"Run as Test\")\n\tflag.BoolVar(&isBenchTest, \"b\", false, \"Bench tests (only if test)\")\n\tflag.BoolVar(&isExecute, \"e\", false, \"Execute (only if not test)\")\n\tflag.BoolVar(&isWatch, \"w\", false, \"Execute (only if not test)\")\n\tflag.StringVar(&outputString, \"p\", \"\", \"Make Package\")\n}\n\nfunc getCmd(cmdCommand []string) *exec.Cmd {\n\tparts := cmdCommand\n\thead := parts[0]\n\tparts = parts[1:len(parts)]\n\n\tcmd := exec.Command(head, parts...)\n\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\treturn cmd\n}\n\nfunc exeCmd(cmdCommand []string) (*exec.Cmd, error) {\n\tcmd := getCmd(cmdCommand)\n\n\tif err := cmd.Run(); err != nil {\n\t\treturn cmd, err\n\t}\n\n\treturn cmd, nil\n}\n\nfunc buildCommand(packageName string) []string {\n\tbuffer := make([]string, 0, 6)\n\n\tbuffer = append(buffer, \"go\")\n\n\tif isTest {\n\t\tbuffer = append(buffer, \"test\")\n\t\tif isBenchTest {\n\t\t\tbuffer = append(buffer, \"-bench=.\")\n\t\t}\n\t} else {\n\t\tbuffer = append(buffer, \"install\")\n\t}\n\n\tbuffer = append(buffer, \"-v\")\n\n\treturn buffer\n}\n\nfunc handelPathArgs() (string, string, []string, error) {\n\targs := flag.Args()\n\tif len(args) == 0 {\n\t\treturn \"\", \"\", nil, errors.New(\"No Args\")\n\t}\n\n\tif len(args) == 1 || args[0][:11] == \"github.com\/\" {\n\t\tdir, err := os.Getwd()\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", nil, (err)\n\t\t}\n\n\t\treturn dir, args[0], args[1:], nil\n\t}\n\n\tabsPath, err := filepath.Abs(args[0])\n\tif err != nil {\n\t\treturn \"\", \"\", nil, (err)\n\t}\n\n\treturn absPath, args[1], args[1:], nil\n}\n\nfunc copyPackage(dir, packageName string) (isPackage bool, err error) {\n\tif len(outputString) == 0 {\n\t\treturn\n\t}\n\n\tisPackage = true\n\tdest := dir + \"\/bin\/\"\n\tsrc := dir + \"\/src\/\" + packageName + \"\/\"\n\n\toutput := strings.Split(outputString, \" \")\n\n\tfor _, dirName := range output {\n\t\terr = fs.CopyFolder(src+dirName, dest+dirName)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc main() {\n\tflag.Parse()\n\tvar err error\n\tnewRoot, packageName, restArgs, err = handelPathArgs()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\tcurrentPath := os.Getenv(\"GOPATH\")\n\tdefer func() {\n\t\tlog.Println(\"Done!!\")\n\t\tos.Setenv(\"GOPATH\", currentPath)\n\t}()\n\tnewPath := []string{newRoot, \";\", currentPath}\n\n\tos.Setenv(\"GOPATH\", strings.Join(newPath, \"\"))\n\trunBuild()\n}\n\nfunc runBuild() {\n\tbuildCommandList := buildCommand(packageName)\n\tbuildCommandList = append(buildCommandList, packageName)\n _, err := exeCmd(buildCommandList)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfuncName := lastPart.FindString(packageName)\n\t_, err = copyPackage(newRoot, packageName)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t} \n\t\n\tif isExecute && !isTest {\n\t\tlog.Printf(\"Running %s\\n\", funcName)\n\t\texecutionPath := newRoot + \"\/bin\/\" + funcName + \".exe\"\n\t\texArgs := []string{executionPath}\n\t\texArgs = append(exArgs, restArgs...)\n\t\tif isWatch {\n\t\t\twatch(exArgs, newRoot+\"\/src\/\"+packageName)\n\t\t} else {\n\t\t\t_, err := exeCmd(exArgs)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t} else {\n\t\tlog.Printf(\"Builded %s\\n\", funcName)\n\t}\n}\n\nfunc watch(args []string, rootPath string) {\n\tdone := make(chan error, 1)\n\tdoneWithoutErr := make(chan bool, 1)\n\n\tcmd := getCmd(args)\n\n\tgo func() {\n\t\terr := cmd.Run()\n\t\tif err != nil {\n\t\t\tdone <- err\n\t\t} else {\n\t\t\tdoneWithoutErr <- true\n\t\t}\n\t}()\n\n\trestart := make(chan bool, 1)\n\n\tgo func() {\n\t\tticker := time.NewTicker(5 * time.Second)\n\t\tlastChaek := time.Now()\n\t\tfor _ = range ticker.C {\n\t\t\tisUpdated, _ := fs.CheckIfFolderUpdated(rootPath, lastChaek)\n\t\t\tif isUpdated {\n\t\t\t\trestart <- true\n\t\t\t\tticker.Stop()\n\t\t\t}\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-restart:\n\t\tselect {\n\t\tcase <-doneWithoutErr:\n\t\t\tlog.Println(\"process restarted\")\n\t\t\trunBuild()\n\t\tdefault:\n\t\t\tif err := cmd.Process.Kill(); err != nil {\n\t\t\t\tlog.Fatal(\"failed to kill: \", err)\n\t\t\t}\n\n\t\t\tlog.Println(\"process restarted\")\n\t\t\trunBuild()\n\n\t\t}\n\n\tcase err := <-done:\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"process done with error = %v\", err)\n\t\t} else {\n\t\t\tlog.Print(\"process done gracefully without error\")\n\t\t}\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tgit2go \"gopkg.in\/libgit2\/git2go.v24\"\n\n\t\"github.com\/josledp\/termcolor\"\n)\n\nfunc getPythonVirtualEnv() string {\n\tvirtualEnv, ve := os.LookupEnv(\"VIRTUAL_ENV\")\n\tif ve {\n\t\tave := strings.Split(virtualEnv, \"\/\")\n\t\tvirtualEnv = fmt.Sprintf(\"(%s) \", ave[len(ave)-1])\n\t}\n\treturn virtualEnv\n}\n\nfunc getAwsInfo() string {\n\trole := os.Getenv(\"AWS_ROLE\")\n\tif role != \"\" {\n\t\ttmp := strings.Split(role, \":\")\n\t\trole = tmp[0]\n\t\ttmp = strings.Split(tmp[1], \"-\")\n\t\trole += \":\" + tmp[2]\n\t}\n\treturn role\n}\n\nfunc main() {\n\t\/\/Get basicinfo\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Fatalln(\"Unable to get current path\", err)\n\t}\n\thome := os.Getenv(\"HOME\")\n\tif home != \"\" {\n\t\tpwd = strings.Replace(pwd, home, \"~\", -1)\n\t}\n\tuser := os.Getenv(\"USER\")\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\tlog.Fatalln(\"Unable to get hostname\", err)\n\t}\n\n\t\/\/Get Python VirtualEnv info\n\tvirtualEnv := getPythonVirtualEnv()\n\n\t\/\/AWS\n\tawsRole := getAwsInfo()\n\tiExpire, _ := strconv.ParseInt(os.Getenv(\"AWS_SESSION_EXPIRE\"), 10, 0)\n\tawsExpire := time.Unix(iExpire, int64(0))\n\n\t\/\/Get git information\n\t_ = git2go.Repository{}\n\n\tgitpath, err := git2go.Discover(\".\", false, []string{\"\/\"})\n\tif err == nil {\n\t\trepository, err := git2go.OpenRepository(gitpath)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error opening repository at %s: %v\", gitpath, err)\n\t\t}\n\t\tdefer repository.Free()\n\t\trepostate, err := repository.StatusList(nil)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Impsible to get repository status at %s: %v\", gitpath, err)\n\t\t}\n\t\tdefer repostate.Free()\n\t\tn, err := repostate.EntryCount()\n\t\tfor i := 0; i < n; i++ {\n\t\t\tentry, _ := repostate.ByIndex(i)\n\t\t\tfmt.Println(entry.Status)\n\t\t}\n\t}\n\n\t\/\/Formatting\n\tvar userInfo, pwdInfo, virtualEnvInfo, awsInfo string\n\n\tif user == \"root\" {\n\t\tuserInfo = termcolor.Format(hostname, termcolor.Bold, termcolor.FgRed)\n\t} else {\n\t\tuserInfo = termcolor.Format(hostname, termcolor.Bold, termcolor.FgGreen)\n\t}\n\tpwdInfo = termcolor.Format(pwd, termcolor.Bold, termcolor.FgBlue)\n\tvirtualEnvInfo = termcolor.Format(virtualEnv, termcolor.FgBlue)\n\n\tif awsRole != \"\" {\n\t\tt := termcolor.FgGreen\n\t\td := time.Until(awsExpire).Seconds()\n\t\tif d < 0 {\n\t\t\tt = termcolor.FgRed\n\t\t} else if d < 600 {\n\t\t\tt = termcolor.FgYellow\n\t\t}\n\t\tawsInfo = termcolor.Format(awsRole, t) + \"|\"\n\t}\n\n\tfmt.Printf(\"%s[%s%s %s]$ \", virtualEnvInfo, awsInfo, userInfo, pwdInfo)\n}\n<commit_msg>structure improvement<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tgit2go \"gopkg.in\/libgit2\/git2go.v24\"\n\n\t\"github.com\/josledp\/termcolor\"\n)\n\nfunc getPythonVirtualEnv() string {\n\tvirtualEnv, ve := os.LookupEnv(\"VIRTUAL_ENV\")\n\tif ve {\n\t\tave := strings.Split(virtualEnv, \"\/\")\n\t\tvirtualEnv = fmt.Sprintf(\"(%s) \", ave[len(ave)-1])\n\t}\n\treturn virtualEnv\n}\n\nfunc getAwsInfo() string {\n\trole := os.Getenv(\"AWS_ROLE\")\n\tif role != \"\" {\n\t\ttmp := strings.Split(role, \":\")\n\t\trole = tmp[0]\n\t\ttmp = strings.Split(tmp[1], \"-\")\n\t\trole += \":\" + tmp[2]\n\t}\n\treturn role\n}\n\ntype termInfo struct {\n\tpwd string\n\tuser string\n\thostname string\n\tvirtualEnv string\n\tawsRole string\n\tawsExpire time.Time\n}\n\nfunc main() {\n\tvar err error\n\n\tti := termInfo{}\n\t\/\/Get basicinfo\n\tti.pwd, err = os.Getwd()\n\tif err != nil {\n\t\tlog.Fatalln(\"Unable to get current path\", err)\n\t}\n\thome := os.Getenv(\"HOME\")\n\tif home != \"\" {\n\t\tti.pwd = strings.Replace(ti.pwd, home, \"~\", -1)\n\t}\n\tti.user = os.Getenv(\"USER\")\n\tti.hostname, err = os.Hostname()\n\tif err != nil {\n\t\tlog.Fatalln(\"Unable to get hostname\", err)\n\t}\n\n\t\/\/Get Python VirtualEnv info\n\tti.virtualEnv = getPythonVirtualEnv()\n\n\t\/\/AWS\n\tti.awsRole = getAwsInfo()\n\tiExpire, _ := strconv.ParseInt(os.Getenv(\"AWS_SESSION_EXPIRE\"), 10, 0)\n\tti.awsExpire = time.Unix(iExpire, int64(0))\n\n\t\/\/Get git information\n\t_ = git2go.Repository{}\n\n\tgitpath, err := git2go.Discover(\".\", false, []string{\"\/\"})\n\tif err == nil {\n\t\trepository, err := git2go.OpenRepository(gitpath)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error opening repository at %s: %v\", gitpath, err)\n\t\t}\n\t\tdefer repository.Free()\n\t\trepostate, err := repository.StatusList(nil)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Impsible to get repository status at %s: %v\", gitpath, err)\n\t\t}\n\t\tdefer repostate.Free()\n\t\tn, err := repostate.EntryCount()\n\t\tfor i := 0; i < n; i++ {\n\t\t\tentry, _ := repostate.ByIndex(i)\n\t\t\tfmt.Println(entry.Status)\n\t\t}\n\t}\n\n\tfmt.Println(makePrompt(ti))\n}\n\nfunc makePrompt(ti termInfo) string {\n\t\/\/Formatting\n\tvar userInfo, pwdInfo, virtualEnvInfo, awsInfo string\n\n\tif ti.user == \"root\" {\n\t\tuserInfo = termcolor.Format(ti.hostname, termcolor.Bold, termcolor.FgRed)\n\t} else {\n\t\tuserInfo = termcolor.Format(ti.hostname, termcolor.Bold, termcolor.FgGreen)\n\t}\n\tpwdInfo = termcolor.Format(ti.pwd, termcolor.Bold, termcolor.FgBlue)\n\tvirtualEnvInfo = termcolor.Format(ti.virtualEnv, termcolor.FgBlue)\n\n\tif ti.awsRole != \"\" {\n\t\tt := termcolor.FgGreen\n\t\td := time.Until(ti.awsExpire).Seconds()\n\t\tif d < 0 {\n\t\t\tt = termcolor.FgRed\n\t\t} else if d < 600 {\n\t\t\tt = termcolor.FgYellow\n\t\t}\n\t\tawsInfo = termcolor.Format(ti.awsRole, t) + \"|\"\n\t}\n\n\treturn fmt.Sprintf(\"%s[%s%s %s]$ \", virtualEnvInfo, awsInfo, userInfo, pwdInfo)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ lgts (looks good to slack) listens to slack event stream\n\/\/ and then reports back when certain emojis are used by certain\n\/\/ users on predetermined messages\n\n\/\/ Useful for instances where public callbacks are impossible\n\npackage main\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\n\t\"github.com\/julienschmidt\/httprouter\"\n)\n\nvar slackToken = os.Getenv(\"SLACK_TOKEN\")\nvar serverURL = os.Getenv(\"SERVER_URL\")\nvar stateFilePath = os.Getenv(\"STATE_FILE_PATH\") \/\/ Full path to save state of registered applications\nvar debug bool\nvar debugString = os.Getenv(\"DEBUG\")\n\nfunc init() {\n\n\tif slackToken == \"\" {\n\t\tlog.Fatal(\"$SLACK_TOKEN not set\")\n\t}\n\n\tif serverURL == \"\" {\n\t\tserverURL = \"localhost:8080\"\n\t}\n\n\tif stateFilePath == \"\" {\n\t\tstateFilePath = \".\/state\"\n\t}\n\n\tif debugString == \"\" {\n\t\tdebug = false\n\t} else {\n\t\tdebug, _ = strconv.ParseBool(debugString)\n\t}\n}\n\nfunc main() {\n\trouter := httprouter.New()\n\n\tlgts := *newlgts(stateFilePath)\n\tlgts.loadState()\n\tgo runrtm(&lgts, slackToken, debug)\n\n\trouter.GET(\"\/apps\", lgts.getApps)\n\trouter.POST(\"\/apps\", lgts.registerApp)\n\n\trouter.GET(\"\/messages\", lgts.getMessages)\n\trouter.POST(\"\/messages\", lgts.registerMessage)\n\n\tlog.Printf(\"Starting lgts server on %s\\n\", serverURL)\n\tlog.Fatal(http.ListenAndServe(serverURL, router))\n}\n<commit_msg>Better opening preamble<commit_after>\/\/ lgts (looks good to slack) is a web service which\n\/\/ allows applications to register messages to look out for,\n\/\/ listens to the slack event stream and then reports back\n\/\/ when certain emojis are used by certain users on those\n\/\/ predetermined messages\n\n\/\/ Provides a service much like interactive messages.\n\/\/ Useful for instances where public callbacks are impossible\n\npackage main\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\n\t\"github.com\/julienschmidt\/httprouter\"\n)\n\nvar slackToken = os.Getenv(\"SLACK_TOKEN\")\nvar serverURL = os.Getenv(\"SERVER_URL\")\nvar stateFilePath = os.Getenv(\"STATE_FILE_PATH\") \/\/ Full path to save state of registered applications\nvar debug bool\nvar debugString = os.Getenv(\"DEBUG\")\n\nfunc init() {\n\n\tif slackToken == \"\" {\n\t\tlog.Fatal(\"$SLACK_TOKEN not set\")\n\t}\n\n\tif serverURL == \"\" {\n\t\tserverURL = \"localhost:8080\"\n\t}\n\n\tif stateFilePath == \"\" {\n\t\tstateFilePath = \".\/state\"\n\t}\n\n\tif debugString == \"\" {\n\t\tdebug = false\n\t} else {\n\t\tdebug, _ = strconv.ParseBool(debugString)\n\t}\n}\n\nfunc main() {\n\trouter := httprouter.New()\n\n\tlgts := *newlgts(stateFilePath)\n\tlgts.loadState()\n\tgo runrtm(&lgts, slackToken, debug)\n\n\trouter.GET(\"\/apps\", lgts.getApps)\n\trouter.POST(\"\/apps\", lgts.registerApp)\n\n\trouter.GET(\"\/messages\", lgts.getMessages)\n\trouter.POST(\"\/messages\", lgts.registerMessage)\n\n\tlog.Printf(\"Starting lgts server on %s\\n\", serverURL)\n\tlog.Fatal(http.ListenAndServe(serverURL, router))\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Minimalist Object Storage, (C) 2015 Minio, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"errors\"\n\t\"reflect\"\n\n\t\"github.com\/dustin\/go-humanize\"\n\t\"github.com\/minio-io\/cli\"\n\t\"github.com\/minio-io\/minio\/pkg\/api\"\n\t\"github.com\/minio-io\/minio\/pkg\/api\/web\"\n\t\"github.com\/minio-io\/minio\/pkg\/iodine\"\n\t\"github.com\/minio-io\/minio\/pkg\/server\"\n\t\"github.com\/minio-io\/minio\/pkg\/server\/httpserver\"\n\t\"github.com\/minio-io\/minio\/pkg\/storage\/drivers\/donut\"\n\t\"github.com\/minio-io\/minio\/pkg\/storage\/drivers\/memory\"\n\t\"github.com\/minio-io\/minio\/pkg\/utils\/log\"\n)\n\nvar globalDebugFlag = false\n\nvar commands = []cli.Command{\n\tmodeCmd,\n}\n\nvar modeCommands = []cli.Command{\n\tmemoryCmd,\n\tdonutCmd,\n}\n\nvar modeCmd = cli.Command{\n\tName: \"mode\",\n\tSubcommands: modeCommands,\n\tDescription: \"Mode of execution\",\n}\n\nvar memoryCmd = cli.Command{\n\tName: \"memory\",\n\tDescription: \"Limit maximum memory usage to SIZE in [B, KB, MB, GB]\",\n\tAction: runMemory,\n\tCustomHelpTemplate: `NAME:\n minio {{.Name}} - {{.Description}}\n\nUSAGE:\n minio {{.Name}} SIZE\n\nEXAMPLES:\n 1. Limit maximum memory usage to 64MB\n $ minio {{.Name}} 64MB\n\n 2. Limit maximum memory usage to 4GB\n $ minio {{.Name}} 4GB\n`,\n}\n\nvar donutCmd = cli.Command{\n\tName: \"donut\",\n\tDescription: \"Specify a path to instantiate donut\",\n\tAction: runDonut,\n\tCustomHelpTemplate: `NAME:\n minio {{.Name}} - {{.Description}}\n\nUSAGE:\n minio {{.Name}} PATH\n\nEXAMPLES:\n 1. Use a regular disk to create donut\n $ minio {{.Name}} \/mnt\/disk1\n\n 2. Use a lvm group to create donut\n $ minio {{.Name}} \/media\/lvm\/groups\n`,\n}\n\ntype memoryFactory struct {\n\tserver.Config\n\tmaxMemory uint64\n}\n\nfunc (f memoryFactory) getStartServerFunc() startServerFunc {\n\treturn func() (chan<- string, <-chan error) {\n\t\thttpConfig := httpserver.Config{}\n\t\thttpConfig.Address = f.Address\n\t\thttpConfig.TLS = f.TLS\n\t\thttpConfig.CertFile = f.CertFile\n\t\thttpConfig.KeyFile = f.KeyFile\n\t\thttpConfig.Websocket = false\n\t\t_, _, driver := memory.Start(f.maxMemory)\n\t\tctrl, status, _ := httpserver.Start(api.HTTPHandler(f.Domain, driver), httpConfig)\n\t\treturn ctrl, status\n\t}\n}\n\ntype webFactory struct {\n\tserver.Config\n}\n\nfunc (f webFactory) getStartServerFunc() startServerFunc {\n\treturn func() (chan<- string, <-chan error) {\n\t\thttpConfig := httpserver.Config{}\n\t\thttpConfig.Address = f.Address\n\t\thttpConfig.TLS = f.TLS\n\t\thttpConfig.CertFile = f.CertFile\n\t\thttpConfig.KeyFile = f.KeyFile\n\t\thttpConfig.Websocket = false\n\t\tctrl, status, _ := httpserver.Start(web.HTTPHandler(), httpConfig)\n\t\treturn ctrl, status\n\t}\n}\n\ntype donutFactory struct {\n\tserver.Config\n\tpath string\n}\n\nfunc (f donutFactory) getStartServerFunc() startServerFunc {\n\treturn func() (chan<- string, <-chan error) {\n\t\thttpConfig := httpserver.Config{}\n\t\thttpConfig.Address = f.Address\n\t\thttpConfig.TLS = f.TLS\n\t\thttpConfig.CertFile = f.CertFile\n\t\thttpConfig.KeyFile = f.KeyFile\n\t\thttpConfig.Websocket = false\n\t\t_, _, driver := donut.Start(f.path)\n\t\tctrl, status, _ := httpserver.Start(api.HTTPHandler(f.Domain, driver), httpConfig)\n\t\treturn ctrl, status\n\t}\n}\n\nvar flags = []cli.Flag{\n\tcli.StringFlag{\n\t\tName: \"domain,d\",\n\t\tValue: \"\",\n\t\tUsage: \"domain used for routing incoming API requests\",\n\t},\n\tcli.StringFlag{\n\t\tName: \"api-address,a\",\n\t\tValue: \":9000\",\n\t\tUsage: \"address for incoming API requests\",\n\t},\n\tcli.StringFlag{\n\t\tName: \"web-address,w\",\n\t\tValue: \":9001\",\n\t\tUsage: \"address for incoming Management UI requests\",\n\t},\n\tcli.StringFlag{\n\t\tName: \"cert,c\",\n\t\tHide: true,\n\t\tValue: \"\",\n\t\tUsage: \"cert.pem\",\n\t},\n\tcli.StringFlag{\n\t\tName: \"key,k\",\n\t\tHide: true,\n\t\tValue: \"\",\n\t\tUsage: \"key.pem\",\n\t},\n\tcli.BoolFlag{\n\t\tName: \"debug\",\n\t\tUsage: \"print debug information\",\n\t},\n}\n\nfunc init() {\n\t\/\/ Check for the environment early on and gracefuly report.\n\t_, err := user.Current()\n\tif err != nil {\n\t\tlog.Fatalf(\"minio: Unable to obtain user's home directory. \\nError: %s\\n\", err)\n\t}\n}\n\ntype startServerFunc func() (chan<- string, <-chan error)\n\nfunc runMemory(c *cli.Context) {\n\tif len(c.Args()) < 1 {\n\t\tcli.ShowCommandHelpAndExit(c, \"memory\", 1) \/\/ last argument is exit code\n\t}\n\tapiServerConfig := getAPIServerConfig(c)\n\tmaxMemory, err := humanize.ParseBytes(c.Args().First())\n\tif err != nil {\n\t\tlog.Fatalf(\"MaxMemory not a numeric value with reason: %s\", err)\n\t}\n\tmemoryDriver := memoryFactory{\n\t\tConfig: apiServerConfig,\n\t\tmaxMemory: maxMemory,\n\t}\n\tapiServer := memoryDriver.getStartServerFunc()\n\twebServer := getWebServerConfigFunc(c)\n\tservers := []startServerFunc{apiServer, webServer}\n\tstartMinio(servers)\n}\n\nfunc runDonut(c *cli.Context) {\n\tu, err := user.Current()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tif len(c.Args()) < 1 {\n\t\tcli.ShowCommandHelpAndExit(c, \"donut\", 1) \/\/ last argument is exit code\n\t}\n\tp := c.Args().First()\n\tif strings.TrimSpace(p) == \"\" {\n\t\tp = path.Join(u.HomeDir, \"minio-storage\", \"donut\")\n\t}\n\tapiServerConfig := getAPIServerConfig(c)\n\tdonutDriver := donutFactory{\n\t\tConfig: apiServerConfig,\n\t\tpath: p,\n\t}\n\tapiServer := donutDriver.getStartServerFunc()\n\twebServer := getWebServerConfigFunc(c)\n\tservers := []startServerFunc{apiServer, webServer}\n\tstartMinio(servers)\n}\n\nfunc getAPIServerConfig(c *cli.Context) server.Config {\n\tcertFile := c.String(\"cert\")\n\tkeyFile := c.String(\"key\")\n\tif (certFile != \"\" && keyFile == \"\") || (certFile == \"\" && keyFile != \"\") {\n\t\tlog.Fatalln(\"Both certificate and key must be provided to enable https\")\n\t}\n\ttls := (certFile != \"\" && keyFile != \"\")\n\treturn server.Config{\n\t\tDomain: c.GlobalString(\"domain\"),\n\t\tAddress: c.GlobalString(\"api-address\"),\n\t\tTLS: tls,\n\t\tCertFile: certFile,\n\t\tKeyFile: keyFile,\n\t}\n}\n\nfunc getWebServerConfigFunc(c *cli.Context) startServerFunc {\n\tconfig := server.Config{\n\t\tDomain: c.GlobalString(\"domain\"),\n\t\tAddress: c.GlobalString(\"web-address\"),\n\t\tTLS: false,\n\t\tCertFile: \"\",\n\t\tKeyFile: \"\",\n\t}\n\twebDrivers := webFactory{\n\t\tConfig: config,\n\t}\n\treturn webDrivers.getStartServerFunc()\n}\n\nfunc startMinio(servers []startServerFunc) {\n\tvar ctrlChannels []chan<- string\n\tvar errChannels []<-chan error\n\tfor _, server := range servers {\n\t\tctrlChannel, errChannel := server()\n\t\tctrlChannels = append(ctrlChannels, ctrlChannel)\n\t\terrChannels = append(errChannels, errChannel)\n\t}\n\tcases := createSelectCases(errChannels)\n\tfor len(cases) > 0 {\n\t\tchosen, value, recvOk := reflect.Select(cases)\n\t\tswitch recvOk {\n\t\tcase true:\n\t\t\t\/\/ Status Message Received\n\t\t\tswitch true {\n\t\t\tcase value.Interface() != nil:\n\t\t\t\t\/\/ For any error received cleanup all existing channels and fail\n\t\t\t\tfor _, ch := range ctrlChannels {\n\t\t\t\t\tclose(ch)\n\t\t\t\t}\n\t\t\t\tmsg := fmt.Sprintf(\"%q\", value.Interface())\n\t\t\t\tlog.Fatal(iodine.New(errors.New(msg), nil))\n\t\t\t}\n\t\tcase false:\n\t\t\t\/\/ Channel closed, remove from list\n\t\t\tvar aliveStatusChans []<-chan error\n\t\t\tfor i, ch := range errChannels {\n\t\t\t\tif i != chosen {\n\t\t\t\t\taliveStatusChans = append(aliveStatusChans, ch)\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ create new select cases without defunct channel\n\t\t\terrChannels = aliveStatusChans\n\t\t\tcases = createSelectCases(errChannels)\n\t\t}\n\t}\n}\n\nfunc createSelectCases(channels []<-chan error) []reflect.SelectCase {\n\tcases := make([]reflect.SelectCase, len(channels))\n\tfor i, ch := range channels {\n\t\tcases[i] = reflect.SelectCase{\n\t\t\tDir: reflect.SelectRecv,\n\t\t\tChan: reflect.ValueOf(ch),\n\t\t}\n\t}\n\treturn cases\n}\n\n\/\/ Tries to get os\/arch\/platform specific information\n\/\/ Returns a map of current os\/arch\/platform\/memstats\nfunc getSystemData() map[string]string {\n\thost, err := os.Hostname()\n\tif err != nil {\n\t\thost = \"\"\n\t}\n\tmemstats := &runtime.MemStats{}\n\truntime.ReadMemStats(memstats)\n\tmem := fmt.Sprintf(\"Used: %s | Allocated: %s | Used-Heap: %s | Allocated-Heap: %s\",\n\t\thumanize.Bytes(memstats.Alloc),\n\t\thumanize.Bytes(memstats.TotalAlloc),\n\t\thumanize.Bytes(memstats.HeapAlloc),\n\t\thumanize.Bytes(memstats.HeapSys))\n\tplatform := fmt.Sprintf(\"Host: %s | OS: %s | Arch: %s\",\n\t\thost,\n\t\truntime.GOOS,\n\t\truntime.GOARCH)\n\tgoruntime := fmt.Sprintf(\"Version: %s | CPUs: %s\", runtime.Version(), strconv.Itoa(runtime.NumCPU()))\n\treturn map[string]string{\n\t\t\"PLATFORM\": platform,\n\t\t\"RUNTIME\": goruntime,\n\t\t\"MEM\": mem,\n\t}\n}\n\nfunc main() {\n\t\/\/ set up iodine\n\tiodine.SetGlobalState(\"minio.git\", minioGitCommitHash)\n\tiodine.SetGlobalState(\"minio.starttime\", time.Now().Format(time.RFC3339))\n\n\t\/\/ set up app\n\tapp := cli.NewApp()\n\tapp.Name = \"minio\"\n\tapp.Version = minioGitCommitHash\n\tapp.Author = \"Minio.io\"\n\tapp.Usage = \"Minimalist Object Storage\"\n\tapp.Flags = flags\n\tapp.Commands = commands\n\tapp.Before = func(c *cli.Context) error {\n\t\tglobalDebugFlag = c.GlobalBool(\"debug\")\n\t\tif globalDebugFlag {\n\t\t\tapp.ExtraInfo = getSystemData()\n\t\t}\n\t\treturn nil\n\t}\n\tapp.RunAndExitOnError()\n}\n<commit_msg>Update command docs further<commit_after>\/*\n * Minimalist Object Storage, (C) 2015 Minio, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"errors\"\n\t\"reflect\"\n\n\t\"github.com\/dustin\/go-humanize\"\n\t\"github.com\/minio-io\/cli\"\n\t\"github.com\/minio-io\/minio\/pkg\/api\"\n\t\"github.com\/minio-io\/minio\/pkg\/api\/web\"\n\t\"github.com\/minio-io\/minio\/pkg\/iodine\"\n\t\"github.com\/minio-io\/minio\/pkg\/server\"\n\t\"github.com\/minio-io\/minio\/pkg\/server\/httpserver\"\n\t\"github.com\/minio-io\/minio\/pkg\/storage\/drivers\/donut\"\n\t\"github.com\/minio-io\/minio\/pkg\/storage\/drivers\/memory\"\n\t\"github.com\/minio-io\/minio\/pkg\/utils\/log\"\n)\n\nvar globalDebugFlag = false\n\nvar commands = []cli.Command{\n\tmodeCmd,\n}\n\nvar modeCommands = []cli.Command{\n\tmemoryCmd,\n\tdonutCmd,\n}\n\nvar modeCmd = cli.Command{\n\tName: \"mode\",\n\tSubcommands: modeCommands,\n\tDescription: \"Mode of execution\",\n}\n\nvar memoryCmd = cli.Command{\n\tName: \"memory\",\n\tDescription: \"Limit maximum memory usage to SIZE in [B, KB, MB, GB]\",\n\tAction: runMemory,\n\tCustomHelpTemplate: `NAME:\n minio mode {{.Name}} - {{.Description}}\n\nUSAGE:\n minio mode {{.Name}} SIZE\n\nEXAMPLES:\n 1. Limit maximum memory usage to 64MB\n $ minio mode {{.Name}} 64MB\n\n 2. Limit maximum memory usage to 4GB\n $ minio mode {{.Name}} 4GB\n`,\n}\n\nvar donutCmd = cli.Command{\n\tName: \"donut\",\n\tDescription: \"Specify a path to instantiate donut\",\n\tAction: runDonut,\n\tCustomHelpTemplate: `NAME:\n minio mode {{.Name}} - {{.Description}}\n\nUSAGE:\n minio mode {{.Name}} PATH\n\nEXAMPLES:\n 1. Create a donut volume under \"\/mnt\/backup\"\n $ minio mode {{.Name}} \/mnt\/backup\n\n 2. Create a temporary donut volume under \"\/tmp\"\n $ minio mode {{.Name}} \/tmp\n\n`,\n}\n\ntype memoryFactory struct {\n\tserver.Config\n\tmaxMemory uint64\n}\n\nfunc (f memoryFactory) getStartServerFunc() startServerFunc {\n\treturn func() (chan<- string, <-chan error) {\n\t\thttpConfig := httpserver.Config{}\n\t\thttpConfig.Address = f.Address\n\t\thttpConfig.TLS = f.TLS\n\t\thttpConfig.CertFile = f.CertFile\n\t\thttpConfig.KeyFile = f.KeyFile\n\t\thttpConfig.Websocket = false\n\t\t_, _, driver := memory.Start(f.maxMemory)\n\t\tctrl, status, _ := httpserver.Start(api.HTTPHandler(f.Domain, driver), httpConfig)\n\t\treturn ctrl, status\n\t}\n}\n\ntype webFactory struct {\n\tserver.Config\n}\n\nfunc (f webFactory) getStartServerFunc() startServerFunc {\n\treturn func() (chan<- string, <-chan error) {\n\t\thttpConfig := httpserver.Config{}\n\t\thttpConfig.Address = f.Address\n\t\thttpConfig.TLS = f.TLS\n\t\thttpConfig.CertFile = f.CertFile\n\t\thttpConfig.KeyFile = f.KeyFile\n\t\thttpConfig.Websocket = false\n\t\tctrl, status, _ := httpserver.Start(web.HTTPHandler(), httpConfig)\n\t\treturn ctrl, status\n\t}\n}\n\ntype donutFactory struct {\n\tserver.Config\n\tpath string\n}\n\nfunc (f donutFactory) getStartServerFunc() startServerFunc {\n\treturn func() (chan<- string, <-chan error) {\n\t\thttpConfig := httpserver.Config{}\n\t\thttpConfig.Address = f.Address\n\t\thttpConfig.TLS = f.TLS\n\t\thttpConfig.CertFile = f.CertFile\n\t\thttpConfig.KeyFile = f.KeyFile\n\t\thttpConfig.Websocket = false\n\t\t_, _, driver := donut.Start(f.path)\n\t\tctrl, status, _ := httpserver.Start(api.HTTPHandler(f.Domain, driver), httpConfig)\n\t\treturn ctrl, status\n\t}\n}\n\nvar flags = []cli.Flag{\n\tcli.StringFlag{\n\t\tName: \"domain,d\",\n\t\tValue: \"\",\n\t\tUsage: \"domain used for routing incoming API requests\",\n\t},\n\tcli.StringFlag{\n\t\tName: \"api-address,a\",\n\t\tValue: \":9000\",\n\t\tUsage: \"address for incoming API requests\",\n\t},\n\tcli.StringFlag{\n\t\tName: \"web-address,w\",\n\t\tValue: \":9001\",\n\t\tUsage: \"address for incoming Management UI requests\",\n\t},\n\tcli.StringFlag{\n\t\tName: \"cert,c\",\n\t\tHide: true,\n\t\tValue: \"\",\n\t\tUsage: \"cert.pem\",\n\t},\n\tcli.StringFlag{\n\t\tName: \"key,k\",\n\t\tHide: true,\n\t\tValue: \"\",\n\t\tUsage: \"key.pem\",\n\t},\n\tcli.BoolFlag{\n\t\tName: \"debug\",\n\t\tUsage: \"print debug information\",\n\t},\n}\n\nfunc init() {\n\t\/\/ Check for the environment early on and gracefuly report.\n\t_, err := user.Current()\n\tif err != nil {\n\t\tlog.Fatalf(\"minio: Unable to obtain user's home directory. \\nError: %s\\n\", err)\n\t}\n}\n\ntype startServerFunc func() (chan<- string, <-chan error)\n\nfunc runMemory(c *cli.Context) {\n\tif len(c.Args()) < 1 {\n\t\tcli.ShowCommandHelpAndExit(c, \"memory\", 1) \/\/ last argument is exit code\n\t}\n\tapiServerConfig := getAPIServerConfig(c)\n\tmaxMemory, err := humanize.ParseBytes(c.Args().First())\n\tif err != nil {\n\t\tlog.Fatalf(\"MaxMemory not a numeric value with reason: %s\", err)\n\t}\n\tmemoryDriver := memoryFactory{\n\t\tConfig: apiServerConfig,\n\t\tmaxMemory: maxMemory,\n\t}\n\tapiServer := memoryDriver.getStartServerFunc()\n\twebServer := getWebServerConfigFunc(c)\n\tservers := []startServerFunc{apiServer, webServer}\n\tstartMinio(servers)\n}\n\nfunc runDonut(c *cli.Context) {\n\tu, err := user.Current()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tif len(c.Args()) < 1 {\n\t\tcli.ShowCommandHelpAndExit(c, \"donut\", 1) \/\/ last argument is exit code\n\t}\n\tp := c.Args().First()\n\tif strings.TrimSpace(p) == \"\" {\n\t\tp = path.Join(u.HomeDir, \"minio-storage\", \"donut\")\n\t}\n\tapiServerConfig := getAPIServerConfig(c)\n\tdonutDriver := donutFactory{\n\t\tConfig: apiServerConfig,\n\t\tpath: p,\n\t}\n\tapiServer := donutDriver.getStartServerFunc()\n\twebServer := getWebServerConfigFunc(c)\n\tservers := []startServerFunc{apiServer, webServer}\n\tstartMinio(servers)\n}\n\nfunc getAPIServerConfig(c *cli.Context) server.Config {\n\tcertFile := c.String(\"cert\")\n\tkeyFile := c.String(\"key\")\n\tif (certFile != \"\" && keyFile == \"\") || (certFile == \"\" && keyFile != \"\") {\n\t\tlog.Fatalln(\"Both certificate and key must be provided to enable https\")\n\t}\n\ttls := (certFile != \"\" && keyFile != \"\")\n\treturn server.Config{\n\t\tDomain: c.GlobalString(\"domain\"),\n\t\tAddress: c.GlobalString(\"api-address\"),\n\t\tTLS: tls,\n\t\tCertFile: certFile,\n\t\tKeyFile: keyFile,\n\t}\n}\n\nfunc getWebServerConfigFunc(c *cli.Context) startServerFunc {\n\tconfig := server.Config{\n\t\tDomain: c.GlobalString(\"domain\"),\n\t\tAddress: c.GlobalString(\"web-address\"),\n\t\tTLS: false,\n\t\tCertFile: \"\",\n\t\tKeyFile: \"\",\n\t}\n\twebDrivers := webFactory{\n\t\tConfig: config,\n\t}\n\treturn webDrivers.getStartServerFunc()\n}\n\nfunc startMinio(servers []startServerFunc) {\n\tvar ctrlChannels []chan<- string\n\tvar errChannels []<-chan error\n\tfor _, server := range servers {\n\t\tctrlChannel, errChannel := server()\n\t\tctrlChannels = append(ctrlChannels, ctrlChannel)\n\t\terrChannels = append(errChannels, errChannel)\n\t}\n\tcases := createSelectCases(errChannels)\n\tfor len(cases) > 0 {\n\t\tchosen, value, recvOk := reflect.Select(cases)\n\t\tswitch recvOk {\n\t\tcase true:\n\t\t\t\/\/ Status Message Received\n\t\t\tswitch true {\n\t\t\tcase value.Interface() != nil:\n\t\t\t\t\/\/ For any error received cleanup all existing channels and fail\n\t\t\t\tfor _, ch := range ctrlChannels {\n\t\t\t\t\tclose(ch)\n\t\t\t\t}\n\t\t\t\tmsg := fmt.Sprintf(\"%q\", value.Interface())\n\t\t\t\tlog.Fatal(iodine.New(errors.New(msg), nil))\n\t\t\t}\n\t\tcase false:\n\t\t\t\/\/ Channel closed, remove from list\n\t\t\tvar aliveStatusChans []<-chan error\n\t\t\tfor i, ch := range errChannels {\n\t\t\t\tif i != chosen {\n\t\t\t\t\taliveStatusChans = append(aliveStatusChans, ch)\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ create new select cases without defunct channel\n\t\t\terrChannels = aliveStatusChans\n\t\t\tcases = createSelectCases(errChannels)\n\t\t}\n\t}\n}\n\nfunc createSelectCases(channels []<-chan error) []reflect.SelectCase {\n\tcases := make([]reflect.SelectCase, len(channels))\n\tfor i, ch := range channels {\n\t\tcases[i] = reflect.SelectCase{\n\t\t\tDir: reflect.SelectRecv,\n\t\t\tChan: reflect.ValueOf(ch),\n\t\t}\n\t}\n\treturn cases\n}\n\n\/\/ Tries to get os\/arch\/platform specific information\n\/\/ Returns a map of current os\/arch\/platform\/memstats\nfunc getSystemData() map[string]string {\n\thost, err := os.Hostname()\n\tif err != nil {\n\t\thost = \"\"\n\t}\n\tmemstats := &runtime.MemStats{}\n\truntime.ReadMemStats(memstats)\n\tmem := fmt.Sprintf(\"Used: %s | Allocated: %s | Used-Heap: %s | Allocated-Heap: %s\",\n\t\thumanize.Bytes(memstats.Alloc),\n\t\thumanize.Bytes(memstats.TotalAlloc),\n\t\thumanize.Bytes(memstats.HeapAlloc),\n\t\thumanize.Bytes(memstats.HeapSys))\n\tplatform := fmt.Sprintf(\"Host: %s | OS: %s | Arch: %s\",\n\t\thost,\n\t\truntime.GOOS,\n\t\truntime.GOARCH)\n\tgoruntime := fmt.Sprintf(\"Version: %s | CPUs: %s\", runtime.Version(), strconv.Itoa(runtime.NumCPU()))\n\treturn map[string]string{\n\t\t\"PLATFORM\": platform,\n\t\t\"RUNTIME\": goruntime,\n\t\t\"MEM\": mem,\n\t}\n}\n\nfunc main() {\n\t\/\/ set up iodine\n\tiodine.SetGlobalState(\"minio.git\", minioGitCommitHash)\n\tiodine.SetGlobalState(\"minio.starttime\", time.Now().Format(time.RFC3339))\n\n\t\/\/ set up app\n\tapp := cli.NewApp()\n\tapp.Name = \"minio\"\n\tapp.Version = minioGitCommitHash\n\tapp.Author = \"Minio.io\"\n\tapp.Usage = \"Minimalist Object Storage\"\n\tapp.Flags = flags\n\tapp.Commands = commands\n\tapp.Before = func(c *cli.Context) error {\n\t\tglobalDebugFlag = c.GlobalBool(\"debug\")\n\t\tif globalDebugFlag {\n\t\t\tapp.ExtraInfo = getSystemData()\n\t\t}\n\t\treturn nil\n\t}\n\tapp.RunAndExitOnError()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/freeusd\/solebtc\/Godeps\/_workspace\/src\/github.com\/gin-gonic\/gin\"\n\t\"github.com\/freeusd\/solebtc\/Godeps\/_workspace\/src\/github.com\/robfig\/cron\"\n\t\"github.com\/freeusd\/solebtc\/controllers\/v1\"\n\t\"github.com\/freeusd\/solebtc\/errors\"\n\t\"github.com\/freeusd\/solebtc\/middlewares\"\n\t\"github.com\/freeusd\/solebtc\/models\"\n\t\"github.com\/freeusd\/solebtc\/services\/cache\"\n\t\"github.com\/freeusd\/solebtc\/services\/cache\/memory\"\n\t\"github.com\/freeusd\/solebtc\/services\/mail\"\n\t\"github.com\/freeusd\/solebtc\/services\/mail\/mandrill\"\n\t\"github.com\/freeusd\/solebtc\/services\/storage\"\n\t\"github.com\/freeusd\/solebtc\/services\/storage\/mysql\"\n\t\"github.com\/freeusd\/solebtc\/utils\"\n)\n\nvar (\n\tlogWriter io.Writer = os.Stdout\n\tpanicWriter io.Writer = os.Stderr\n\tmailer mail.Mailer\n\tstore storage.Storage\n\tmemoryCache cache.Cache\n)\n\nfunc init() {\n\t\/\/ ORDER MATTERs\n\tinitConfig()\n\tinitMailer()\n\tinitStorage()\n\tinitCache()\n\tinitCronjob()\n}\n\nfunc main() {\n\tgin.SetMode(ginEnvMode())\n\trouter := gin.New()\n\n\t\/\/ middlewares\n\trecovery := gin.RecoveryWithWriter(panicWriter)\n\tlogger := middlewares.LoggerWithWriter(logWriter)\n\tcors := middlewares.CORS()\n\terrorWriter := middlewares.ErrorWriter()\n\tauthRequired := middlewares.AuthRequired(store.GetAuthToken, config.AuthToken.Lifetime)\n\n\t\/\/ globally use middlewares\n\trouter.Use(recovery, logger, cors, errorWriter)\n\n\t\/\/ version 1 api endpoints\n\tv1Endpoints := router.Group(\"\/v1\")\n\n\t\/\/ user endpoints\n\tv1UserEndpoints := v1Endpoints.Group(\"\/users\")\n\tv1UserEndpoints.GET(\"\", authRequired, v1.UserInfo(store.GetUserByID))\n\tv1UserEndpoints.POST(\"\", v1.Signup(store.CreateUser, store.GetUserByID))\n\tv1UserEndpoints.PUT(\"\/:id\/status\", v1.VerifyEmail(store.GetSessionByToken, store.GetUserByID, store.UpdateUserStatus))\n\tv1UserEndpoints.GET(\"\/referees\", authRequired, v1.RefereeList(store.GetRefereesSince, store.GetRefereesUntil))\n\n\t\/\/ auth token endpoints\n\tv1AuthTokenEndpoints := v1Endpoints.Group(\"\/auth_tokens\")\n\tv1AuthTokenEndpoints.POST(\"\", v1.Login(store.GetUserByEmail, store.CreateAuthToken))\n\tv1AuthTokenEndpoints.DELETE(\"\", authRequired, v1.Logout(store.DeleteAuthToken))\n\n\t\/\/ session endpoints\n\tv1SessionEndpoints := v1Endpoints.Group(\"\/sessions\")\n\tv1SessionEndpoints.POST(\"\", authRequired, v1.RequestVerifyEmail(store.GetUserByID, store.UpsertSession, mailer.SendEmail))\n\n\t\/\/ income endpoints\n\tv1IncomeEndpoints := v1Endpoints.Group(\"\/incomes\", authRequired)\n\tv1IncomeEndpoints.POST(\"\/rewards\",\n\t\tv1.GetReward(store.GetUserByID,\n\t\t\tmemoryCache.GetLatestTotalReward,\n\t\t\tmemoryCache.GetLatestConfig,\n\t\t\tmemoryCache.GetRewardRatesByType,\n\t\t\tcreateRewardIncome))\n\tv1IncomeEndpoints.GET(\"\/rewards\", v1.RewardList(store.GetRewardIncomesSince, store.GetRewardIncomesUntil))\n\tv1IncomeEndpoints.GET(\"\/rewards\/referees\/:referee_id\", v1.RefereeRewardList(store.GetUserByID, store.GetRewardIncomesSince, store.GetRewardIncomesUntil))\n\n\tfmt.Fprintf(logWriter, \"SoleBTC is running on %s\\n\", config.HTTP.Port)\n\tpanicIfErrored(router.Run(config.HTTP.Port))\n}\n\nfunc createRewardIncome(income models.Income, now time.Time) *errors.Error {\n\tif err := store.CreateRewardIncome(income, now); err != nil {\n\t\treturn err\n\t}\n\n\ttotalReward := income.Income\n\tif income.RefererID > 0 {\n\t\ttotalReward += income.RefererIncome\n\t}\n\tmemoryCache.IncrementTotalReward(now, totalReward)\n\n\treturn nil\n}\n\nfunc initMailer() {\n\t\/\/ mailer\n\tmailer = mandrill.New(config.Mandrill.Key, config.Mandrill.FromEmail, config.Mandrill.FromName)\n}\n\nfunc initStorage() {\n\t\/\/ storage service\n\ts, err := mysql.New(config.DB.DataSourceName)\n\tpanicIfErrored(err)\n\ts.SetMaxOpenConns(config.DB.MaxOpenConns)\n\ts.SetMaxIdleConns(config.DB.MaxIdleConns)\n\tstore = s\n}\n\nfunc initCache() {\n\tmemoryCache = memory.New()\n\n\t\/\/ init config in cache\n\tconfig, err := store.GetLatestConfig()\n\tpanicIfErrored(err)\n\tmemoryCache.SetLatestConfig(config)\n\n\t\/\/ init rates in cache\n\tlessRates, err := store.GetRewardRatesByType(models.RewardRateTypeLess)\n\tpanicIfErrored(err)\n\tmemoryCache.SetRewardRates(models.RewardRateTypeLess, lessRates)\n\n\tmoreRates, err := store.GetRewardRatesByType(models.RewardRateTypeMore)\n\tpanicIfErrored(err)\n\tmemoryCache.SetRewardRates(models.RewardRateTypeMore, moreRates)\n\n\t\/\/ update bitcoin price on start\n\tupdateBitcoinPrice()\n}\n\nfunc initCronjob() {\n\tc := cron.New()\n\tpanicIfErrored(c.AddFunc(\"@every 1m\", updateBitcoinPrice))\n\tpanicIfErrored(c.AddFunc(\"@daily\", createWithdrawal))\n\tc.Start()\n}\n\n\/\/ update bitcoin price in cache\nfunc updateBitcoinPrice() {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tfmt.Fprintf(logWriter, \"Update bitcoin price panic: %v\\n\", err)\n\t\t}\n\t}()\n\n\t\/\/ get bitcoin price from blockchain.info\n\tp, err := utils.BitcoinPrice()\n\tif err != nil {\n\t\tfmt.Fprintf(logWriter, \"Fetch bitcoin price error: %v\\n\", err)\n\t\treturn\n\t}\n\n\t\/\/ update bitcoin price in database\n\tif err := store.UpdateLatestBitcoinPrice(p); err != nil {\n\t\tfmt.Fprintf(logWriter, \"Update bitcoin price in database error: %v\\n\", err)\n\t\treturn\n\t}\n\n\t\/\/ update bitcoin price in cache\n\tmemoryCache.UpdateBitcoinPrice(p)\n\n\tfmt.Fprintf(logWriter, \"Successfully update bitcoin price to %v\\n\", p)\n}\n\n\/\/ automatically create withdrawal\nfunc createWithdrawal() {\n\t\/\/ critical for the system\n\t\/\/ use panicWriter for error log instead of logWriter\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tfmt.Fprintf(panicWriter, \"Create withdrawal panic: %v\\n\", err)\n\t\t}\n\t}()\n\n\tusers, err := store.GetWithdrawableUsers()\n\tif err != nil {\n\t\tfmt.Fprintf(panicWriter, \"Get withdrawable users error: %v\\n\", err)\n\t\treturn\n\t}\n\n\tf := func(users []models.User, handler func(err error, u models.User)) {\n\t\tfor i := range users {\n\t\t\thandler(store.CreateWithdrawal(models.Withdrawal{\n\t\t\t\tUserID: users[i].ID,\n\t\t\t\tAmount: users[i].Balance,\n\t\t\t\tBitcoinAddress: users[i].BitcoinAddress,\n\t\t\t}), users[i])\n\t\t}\n\t}\n\n\t\/\/ create withdrawal, move errored ones into retry queue\n\tretryUsers := []models.User{}\n\tf(users, func(err error, u models.User) {\n\t\tif err != nil {\n\t\t\tretryUsers = append(retryUsers, u)\n\t\t}\n\t})\n\n\t\/\/ retry with error output\n\terrored := false\n\tf(retryUsers, func(err error, u models.User) {\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(panicWriter, \"Create withdrawal for user %v error: %v\\n\", u, err)\n\t\t\terrored = true\n\t\t}\n\t})\n\n\tif !errored {\n\t\tfmt.Fprintf(logWriter, \"Create withdrawals successfully...\\n\")\n\t}\n}\n\n\/\/ fail fast on initialization\nfunc panicIfErrored(err error) {\n\tif err != nil {\n\t\t\/\/ Tricky:\n\t\t\/\/ pass a nil *errors.Error into this function\n\t\t\/\/ err is not nil\n\t\t\/\/ see discussion here:\n\t\t\/\/ https:\/\/github.com\/go-playground\/validator\/issues\/134\n\t\t\/\/ or\n\t\t\/\/ http:\/\/stackoverflow.com\/questions\/29138591\/hiding-nil-values-understanding-why-golang-fails-here\/29138676#29138676\n\t\tif e, ok := err.(*errors.Error); ok {\n\t\t\tif e != nil {\n\t\t\t\tpanic(e.ErrStringForLogging)\n\t\t\t}\n\t\t} else {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n<commit_msg>Connects hub and websocket controller<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/freeusd\/solebtc\/Godeps\/_workspace\/src\/github.com\/gin-gonic\/gin\"\n\t\"github.com\/freeusd\/solebtc\/Godeps\/_workspace\/src\/github.com\/robfig\/cron\"\n\t\"github.com\/freeusd\/solebtc\/controllers\/v1\"\n\t\"github.com\/freeusd\/solebtc\/errors\"\n\t\"github.com\/freeusd\/solebtc\/middlewares\"\n\t\"github.com\/freeusd\/solebtc\/models\"\n\t\"github.com\/freeusd\/solebtc\/services\/cache\"\n\t\"github.com\/freeusd\/solebtc\/services\/cache\/memory\"\n\t\"github.com\/freeusd\/solebtc\/services\/hub\"\n\t\"github.com\/freeusd\/solebtc\/services\/hub\/list\"\n\t\"github.com\/freeusd\/solebtc\/services\/mail\"\n\t\"github.com\/freeusd\/solebtc\/services\/mail\/mandrill\"\n\t\"github.com\/freeusd\/solebtc\/services\/storage\"\n\t\"github.com\/freeusd\/solebtc\/services\/storage\/mysql\"\n\t\"github.com\/freeusd\/solebtc\/utils\"\n)\n\nvar (\n\tlogWriter io.Writer = os.Stdout\n\tpanicWriter io.Writer = os.Stderr\n\tmailer mail.Mailer\n\tstore storage.Storage\n\tmemoryCache cache.Cache\n\tconnsHub hub.Hub\n)\n\nfunc init() {\n\t\/\/ ORDER MATTERs\n\tinitConfig()\n\tinitMailer()\n\tinitStorage()\n\tinitCache()\n\tinitHub()\n\tinitCronjob()\n}\n\nfunc main() {\n\tgin.SetMode(ginEnvMode())\n\trouter := gin.New()\n\n\t\/\/ middlewares\n\trecovery := gin.RecoveryWithWriter(panicWriter)\n\tlogger := middlewares.LoggerWithWriter(logWriter)\n\tcors := middlewares.CORS()\n\terrorWriter := middlewares.ErrorWriter()\n\tauthRequired := middlewares.AuthRequired(store.GetAuthToken, config.AuthToken.Lifetime)\n\n\t\/\/ globally use middlewares\n\trouter.Use(recovery, logger, cors, errorWriter)\n\n\t\/\/ version 1 api endpoints\n\tv1Endpoints := router.Group(\"\/v1\")\n\n\t\/\/ user endpoints\n\tv1UserEndpoints := v1Endpoints.Group(\"\/users\")\n\tv1UserEndpoints.GET(\"\", authRequired, v1.UserInfo(store.GetUserByID))\n\tv1UserEndpoints.POST(\"\", v1.Signup(store.CreateUser, store.GetUserByID))\n\tv1UserEndpoints.PUT(\"\/:id\/status\", v1.VerifyEmail(store.GetSessionByToken, store.GetUserByID, store.UpdateUserStatus))\n\tv1UserEndpoints.GET(\"\/referees\", authRequired, v1.RefereeList(store.GetRefereesSince, store.GetRefereesUntil))\n\n\t\/\/ auth token endpoints\n\tv1AuthTokenEndpoints := v1Endpoints.Group(\"\/auth_tokens\")\n\tv1AuthTokenEndpoints.POST(\"\", v1.Login(store.GetUserByEmail, store.CreateAuthToken))\n\tv1AuthTokenEndpoints.DELETE(\"\", authRequired, v1.Logout(store.DeleteAuthToken))\n\n\t\/\/ session endpoints\n\tv1SessionEndpoints := v1Endpoints.Group(\"\/sessions\")\n\tv1SessionEndpoints.POST(\"\", authRequired, v1.RequestVerifyEmail(store.GetUserByID, store.UpsertSession, mailer.SendEmail))\n\n\t\/\/ income endpoints\n\tv1IncomeEndpoints := v1Endpoints.Group(\"\/incomes\", authRequired)\n\tv1IncomeEndpoints.POST(\"\/rewards\",\n\t\tv1.GetReward(store.GetUserByID,\n\t\t\tmemoryCache.GetLatestTotalReward,\n\t\t\tmemoryCache.GetLatestConfig,\n\t\t\tmemoryCache.GetRewardRatesByType,\n\t\t\tcreateRewardIncome))\n\tv1IncomeEndpoints.GET(\"\/rewards\", v1.RewardList(store.GetRewardIncomesSince, store.GetRewardIncomesUntil))\n\tv1IncomeEndpoints.GET(\"\/rewards\/referees\/:referee_id\", v1.RefereeRewardList(store.GetUserByID, store.GetRewardIncomesSince, store.GetRewardIncomesUntil))\n\n\t\/\/ websocket endpoint\n\tv1Endpoints.GET(\"\/websocket\", v1.Websocket(\n\t\tconnsHub.Len,\n\t\tmemoryCache.GetLatestConfig,\n\t\tmemoryCache.GetLatestIncomes,\n\t\tconnsHub.Broadcast,\n\t\thub.WrapPutWebsocketConn(connsHub.PutConn)),\n\t)\n\n\tfmt.Fprintf(logWriter, \"SoleBTC is running on %s\\n\", config.HTTP.Port)\n\tpanicIfErrored(router.Run(config.HTTP.Port))\n}\n\nfunc createRewardIncome(income models.Income, now time.Time) *errors.Error {\n\tif err := store.CreateRewardIncome(income, now); err != nil {\n\t\treturn err\n\t}\n\n\ttotalReward := income.Income\n\tif income.RefererID > 0 {\n\t\ttotalReward += income.RefererIncome\n\t}\n\tmemoryCache.IncrementTotalReward(now, totalReward)\n\n\treturn nil\n}\n\nfunc initMailer() {\n\t\/\/ mailer\n\tmailer = mandrill.New(config.Mandrill.Key, config.Mandrill.FromEmail, config.Mandrill.FromName)\n}\n\nfunc initStorage() {\n\t\/\/ storage service\n\ts, err := mysql.New(config.DB.DataSourceName)\n\tpanicIfErrored(err)\n\ts.SetMaxOpenConns(config.DB.MaxOpenConns)\n\ts.SetMaxIdleConns(config.DB.MaxIdleConns)\n\tstore = s\n}\n\nfunc initCache() {\n\tmemoryCache = memory.New()\n\n\t\/\/ init config in cache\n\tconfig, err := store.GetLatestConfig()\n\tpanicIfErrored(err)\n\tmemoryCache.SetLatestConfig(config)\n\n\t\/\/ init rates in cache\n\tlessRates, err := store.GetRewardRatesByType(models.RewardRateTypeLess)\n\tpanicIfErrored(err)\n\tmemoryCache.SetRewardRates(models.RewardRateTypeLess, lessRates)\n\n\tmoreRates, err := store.GetRewardRatesByType(models.RewardRateTypeMore)\n\tpanicIfErrored(err)\n\tmemoryCache.SetRewardRates(models.RewardRateTypeMore, moreRates)\n\n\t\/\/ update bitcoin price on start\n\tupdateBitcoinPrice()\n}\n\nfunc initHub() {\n\tconnsHub = list.New(func(err error) {\n\t\tfmt.Fprintf(panicWriter, \"Send message over hub error: %v\", err)\n\t})\n}\n\nfunc initCronjob() {\n\tc := cron.New()\n\tpanicIfErrored(c.AddFunc(\"@every 1m\", updateBitcoinPrice))\n\tpanicIfErrored(c.AddFunc(\"@daily\", createWithdrawal))\n\tc.Start()\n}\n\n\/\/ update bitcoin price in cache\nfunc updateBitcoinPrice() {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tfmt.Fprintf(logWriter, \"Update bitcoin price panic: %v\\n\", err)\n\t\t}\n\t}()\n\n\t\/\/ get bitcoin price from blockchain.info\n\tp, err := utils.BitcoinPrice()\n\tif err != nil {\n\t\tfmt.Fprintf(logWriter, \"Fetch bitcoin price error: %v\\n\", err)\n\t\treturn\n\t}\n\n\t\/\/ update bitcoin price in database\n\tif err := store.UpdateLatestBitcoinPrice(p); err != nil {\n\t\tfmt.Fprintf(logWriter, \"Update bitcoin price in database error: %v\\n\", err)\n\t\treturn\n\t}\n\n\t\/\/ update bitcoin price in cache\n\tmemoryCache.UpdateBitcoinPrice(p)\n\n\tfmt.Fprintf(logWriter, \"Successfully update bitcoin price to %v\\n\", p)\n}\n\n\/\/ automatically create withdrawal\nfunc createWithdrawal() {\n\t\/\/ critical for the system\n\t\/\/ use panicWriter for error log instead of logWriter\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tfmt.Fprintf(panicWriter, \"Create withdrawal panic: %v\\n\", err)\n\t\t}\n\t}()\n\n\tusers, err := store.GetWithdrawableUsers()\n\tif err != nil {\n\t\tfmt.Fprintf(panicWriter, \"Get withdrawable users error: %v\\n\", err)\n\t\treturn\n\t}\n\n\tf := func(users []models.User, handler func(err error, u models.User)) {\n\t\tfor i := range users {\n\t\t\thandler(store.CreateWithdrawal(models.Withdrawal{\n\t\t\t\tUserID: users[i].ID,\n\t\t\t\tAmount: users[i].Balance,\n\t\t\t\tBitcoinAddress: users[i].BitcoinAddress,\n\t\t\t}), users[i])\n\t\t}\n\t}\n\n\t\/\/ create withdrawal, move errored ones into retry queue\n\tretryUsers := []models.User{}\n\tf(users, func(err error, u models.User) {\n\t\tif err != nil {\n\t\t\tretryUsers = append(retryUsers, u)\n\t\t}\n\t})\n\n\t\/\/ retry with error output\n\terrored := false\n\tf(retryUsers, func(err error, u models.User) {\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(panicWriter, \"Create withdrawal for user %v error: %v\\n\", u, err)\n\t\t\terrored = true\n\t\t}\n\t})\n\n\tif !errored {\n\t\tfmt.Fprintf(logWriter, \"Create withdrawals successfully...\\n\")\n\t}\n}\n\n\/\/ fail fast on initialization\nfunc panicIfErrored(err error) {\n\tif err != nil {\n\t\t\/\/ Tricky:\n\t\t\/\/ pass a nil *errors.Error into this function\n\t\t\/\/ err is not nil\n\t\t\/\/ see discussion here:\n\t\t\/\/ https:\/\/github.com\/go-playground\/validator\/issues\/134\n\t\t\/\/ or\n\t\t\/\/ http:\/\/stackoverflow.com\/questions\/29138591\/hiding-nil-values-understanding-why-golang-fails-here\/29138676#29138676\n\t\tif e, ok := err.(*errors.Error); ok {\n\t\t\tif e != nil {\n\t\t\t\tpanic(e.ErrStringForLogging)\n\t\t\t}\n\t\t} else {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Initial commmit<commit_after><|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tgit2go \"gopkg.in\/libgit2\/git2go.v26\"\n\n\t\"github.com\/josledp\/termcolor\"\n)\n\nconst (\n\tdownArrow = \"↓\"\n\tupArrow = \"↑\"\n\tthreePoints = \"…\"\n\tdot = \"●\"\n\tcheck = \"✔\"\n)\n\nfunc getPythonVirtualEnv() string {\n\tvirtualEnv, ve := os.LookupEnv(\"VIRTUAL_ENV\")\n\tif ve {\n\t\tave := strings.Split(virtualEnv, \"\/\")\n\t\tvirtualEnv = fmt.Sprintf(\"(%s) \", ave[len(ave)-1])\n\t}\n\treturn virtualEnv\n}\n\nfunc getAwsInfo() string {\n\trole := os.Getenv(\"AWS_ROLE\")\n\tif role != \"\" {\n\t\ttmp := strings.Split(role, \":\")\n\t\trole = tmp[0]\n\t\ttmp = strings.Split(tmp[1], \"-\")\n\t\trole += \":\" + tmp[2]\n\t}\n\treturn role\n}\n\nfunc getGitInfo() gitInfo {\n\tgi := gitInfo{}\n\n\tgitpath, err := git2go.Discover(\".\", false, []string{\"\/\"})\n\tif err == nil {\n\t\trepository, err := git2go.OpenRepository(gitpath)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error opening repository at %s: %v\", gitpath, err)\n\t\t}\n\t\tdefer repository.Free()\n\n\t\t\/\/Get current tracked & untracked files status\n\t\tstatusOpts := git2go.StatusOptions{\n\t\t\tFlags: git2go.StatusOptIncludeUntracked | git2go.StatusOptRenamesHeadToIndex,\n\t\t}\n\t\trepostate, err := repository.StatusList(&statusOpts)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error getting repository status at %s: %v\", gitpath, err)\n\t\t}\n\t\tdefer repostate.Free()\n\t\tn, err := repostate.EntryCount()\n\t\tfor i := 0; i < n; i++ {\n\t\t\tentry, _ := repostate.ByIndex(i)\n\t\t\tgot := false\n\t\t\tif entry.Status&git2go.StatusCurrent > 0 {\n\t\t\t\tlog.Println(\"StatusCurrent\")\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexNew > 0 {\n\t\t\t\tlog.Println(\"StatusIndexNew\")\n\t\t\t\tgi.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexModified > 0 {\n\t\t\t\tlog.Println(\"StatusIndexModified\")\n\t\t\t\tgi.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexDeleted > 0 {\n\t\t\t\tlog.Println(\"StatusIndexDeleted\")\n\t\t\t\tgi.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexRenamed > 0 {\n\t\t\t\tlog.Println(\"StatusIndexRenamed\")\n\t\t\t\tgi.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexTypeChange > 0 {\n\t\t\t\tlog.Println(\"StatusIndexTypeChange\")\n\t\t\t\tgi.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtNew > 0 {\n\t\t\t\tlog.Println(\"StatusWtNew\")\n\t\t\t\tgi.untracked++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtModified > 0 {\n\t\t\t\tlog.Println(\"StatusWtModified\")\n\t\t\t\tgi.changed++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtDeleted > 0 {\n\t\t\t\tlog.Println(\"StatusWtDeleted\")\n\t\t\t\tgi.changed++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtTypeChange > 0 {\n\t\t\t\tlog.Println(\"StatusWtTypeChange\")\n\t\t\t\tgi.changed++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtRenamed > 0 {\n\t\t\t\tlog.Println(\"StatusWtRenamed\")\n\t\t\t\tgi.changed++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIgnored > 0 {\n\t\t\t\tlog.Println(\"StatusIgnored\")\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusConflicted > 0 {\n\t\t\t\tlog.Println(\"StatusConflicted\")\n\t\t\t\tgi.conflict = true\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif !got {\n\t\t\t\tlog.Println(\"Unknown: \", entry.Status)\n\t\t\t}\n\t\t}\n\t\t\/\/Get current branch name\n\t\tlocalRef, err := repository.Head()\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"error getting head: \", err)\n\t\t}\n\t\tdefer localRef.Free()\n\n\t\tref := strings.Split(localRef.Name(), \"\/\")\n\t\tgi.branch = ref[len(ref)-1]\n\t\t\/\/Get commits Ahead\/Behind\n\n\t\tlocalBranch := localRef.Branch()\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"Error getting local branch: \", err)\n\t\t}\n\n\t\tremoteRef, err := localBranch.Upstream()\n\t\tif err == nil {\n\t\t\tdefer remoteRef.Free()\n\n\t\t\tif !remoteRef.Target().Equal(localRef.Target()) {\n\t\t\t\tlog.Println(\"Local & remore differ:\", remoteRef.Target().String(), localRef.Target().String())\n\t\t\t\t\/\/git rev-list --left-right localRef...remoteRef\n\t\t\t\toids, err := repository.MergeBases(localRef.Target(), remoteRef.Target())\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalln(\"Error getting merge bases\")\n\t\t\t\t}\n\t\t\t\tfor _, oid := range oids {\n\t\t\t\t\tlog.Println(oid.String())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\treturn gi\n}\n\ntype gitInfo struct {\n\tconflict bool\n\tchanged int\n\tstaged int\n\tuntracked int\n\tcommitsAhead int\n\tcommitsBehind int\n\tstashed int\n\tbranch string\n\tupstream bool\n}\n\ntype termInfo struct {\n\tpwd string\n\tuser string\n\thostname string\n\tvirtualEnv string\n\tawsRole string\n\tawsExpire time.Time\n\tgi gitInfo\n}\n\nfunc main() {\n\tvar err error\n\n\tti := termInfo{}\n\t\/\/Get basicinfo\n\tti.pwd, err = os.Getwd()\n\tif err != nil {\n\t\tlog.Fatalln(\"Unable to get current path\", err)\n\t}\n\thome := os.Getenv(\"HOME\")\n\tif home != \"\" {\n\t\tti.pwd = strings.Replace(ti.pwd, home, \"~\", -1)\n\t}\n\tti.user = os.Getenv(\"USER\")\n\tti.hostname, err = os.Hostname()\n\tif err != nil {\n\t\tlog.Fatalln(\"Unable to get hostname\", err)\n\t}\n\n\t\/\/Get Python VirtualEnv info\n\tti.virtualEnv = getPythonVirtualEnv()\n\n\t\/\/AWS\n\tti.awsRole = getAwsInfo()\n\tiExpire, _ := strconv.ParseInt(os.Getenv(\"AWS_SESSION_EXPIRE\"), 10, 0)\n\tti.awsExpire = time.Unix(iExpire, int64(0))\n\n\t\/\/Get git information\n\t_ = git2go.Repository{}\n\n\tti.gi = getGitInfo()\n\n\tfmt.Println(makePrompt(ti))\n}\n\nfunc makePrompt(ti termInfo) string {\n\t\/\/Formatting\n\tvar userInfo, pwdInfo, virtualEnvInfo, awsInfo string\n\tpromptEnd := \"$\"\n\n\tif ti.user == \"root\" {\n\t\tuserInfo = termcolor.EscapedFormat(ti.hostname, termcolor.Bold, termcolor.FgRed)\n\t\tpromptEnd = \"#\"\n\t} else {\n\t\tuserInfo = termcolor.EscapedFormat(ti.hostname, termcolor.Bold, termcolor.FgGreen)\n\t}\n\tpwdInfo = termcolor.EscapedFormat(ti.pwd, termcolor.Bold, termcolor.FgBlue)\n\tvirtualEnvInfo = termcolor.EscapedFormat(ti.virtualEnv, termcolor.FgBlue)\n\n\tif ti.awsRole != \"\" {\n\t\tt := termcolor.FgGreen\n\t\td := time.Until(ti.awsExpire).Seconds()\n\t\tif d < 0 {\n\t\t\tt = termcolor.FgRed\n\t\t} else if d < 600 {\n\t\t\tt = termcolor.FgYellow\n\t\t}\n\t\tawsInfo = termcolor.EscapedFormat(ti.awsRole, t) + \"|\"\n\t}\n\n\treturn fmt.Sprintf(\"%s[%s%s %s]%s \", virtualEnvInfo, awsInfo, userInfo, pwdInfo, promptEnd)\n}\n<commit_msg>logger + gitInfo almost complete (lacking stashed + minor things)<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tgit2go \"gopkg.in\/libgit2\/git2go.v26\"\n\n\t\"github.com\/josledp\/termcolor\"\n)\n\nconst (\n\tdownArrow = \"↓\"\n\tupArrow = \"↑\"\n\tthreePoints = \"…\"\n\tdot = \"●\"\n\tcheck = \"✔\"\n)\n\nvar logger *log.Logger\n\nfunc getPythonVirtualEnv() string {\n\tvirtualEnv, ve := os.LookupEnv(\"VIRTUAL_ENV\")\n\tif ve {\n\t\tave := strings.Split(virtualEnv, \"\/\")\n\t\tvirtualEnv = fmt.Sprintf(\"(%s) \", ave[len(ave)-1])\n\t}\n\treturn virtualEnv\n}\n\nfunc getAwsInfo() string {\n\trole := os.Getenv(\"AWS_ROLE\")\n\tif role != \"\" {\n\t\ttmp := strings.Split(role, \":\")\n\t\trole = tmp[0]\n\t\ttmp = strings.Split(tmp[1], \"-\")\n\t\trole += \":\" + tmp[2]\n\t}\n\treturn role\n}\n\nfunc getGitInfo() gitInfo {\n\tgi := gitInfo{}\n\n\tgitpath, err := git2go.Discover(\".\", false, []string{\"\/\"})\n\tif err == nil {\n\t\trepository, err := git2go.OpenRepository(gitpath)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error opening repository at %s: %v\", gitpath, err)\n\t\t}\n\t\tdefer repository.Free()\n\n\t\t\/\/Get current tracked & untracked files status\n\t\tstatusOpts := git2go.StatusOptions{\n\t\t\tFlags: git2go.StatusOptIncludeUntracked | git2go.StatusOptRenamesHeadToIndex,\n\t\t}\n\t\trepostate, err := repository.StatusList(&statusOpts)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Error getting repository status at %s: %v\", gitpath, err)\n\t\t}\n\t\tdefer repostate.Free()\n\t\tn, err := repostate.EntryCount()\n\t\tfor i := 0; i < n; i++ {\n\t\t\tentry, _ := repostate.ByIndex(i)\n\t\t\tgot := false\n\t\t\tif entry.Status&git2go.StatusCurrent > 0 {\n\t\t\t\tlogger.Println(\"StatusCurrent\")\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexNew > 0 {\n\t\t\t\tlogger.Println(\"StatusIndexNew\")\n\t\t\t\tgi.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexModified > 0 {\n\t\t\t\tlogger.Println(\"StatusIndexModified\")\n\t\t\t\tgi.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexDeleted > 0 {\n\t\t\t\tlogger.Println(\"StatusIndexDeleted\")\n\t\t\t\tgi.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexRenamed > 0 {\n\t\t\t\tlogger.Println(\"StatusIndexRenamed\")\n\t\t\t\tgi.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIndexTypeChange > 0 {\n\t\t\t\tlogger.Println(\"StatusIndexTypeChange\")\n\t\t\t\tgi.staged++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtNew > 0 {\n\t\t\t\tlogger.Println(\"StatusWtNew\")\n\t\t\t\tgi.untracked++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtModified > 0 {\n\t\t\t\tlogger.Println(\"StatusWtModified\")\n\t\t\t\tgi.changed++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtDeleted > 0 {\n\t\t\t\tlogger.Println(\"StatusWtDeleted\")\n\t\t\t\tgi.changed++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtTypeChange > 0 {\n\t\t\t\tlogger.Println(\"StatusWtTypeChange\")\n\t\t\t\tgi.changed++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusWtRenamed > 0 {\n\t\t\t\tlogger.Println(\"StatusWtRenamed\")\n\t\t\t\tgi.changed++\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusIgnored > 0 {\n\t\t\t\tlogger.Println(\"StatusIgnored\")\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif entry.Status&git2go.StatusConflicted > 0 {\n\t\t\t\tlogger.Println(\"StatusConflicted\")\n\t\t\t\tgi.conflict = true\n\t\t\t\tgot = true\n\t\t\t}\n\t\t\tif !got {\n\t\t\t\tlogger.Println(\"Unknown: \", entry.Status)\n\t\t\t}\n\t\t}\n\t\t\/\/Get current branch name\n\t\tlocalRef, err := repository.Head()\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"error getting head: \", err)\n\t\t}\n\t\tdefer localRef.Free()\n\n\t\tref := strings.Split(localRef.Name(), \"\/\")\n\t\tgi.branch = ref[len(ref)-1]\n\t\t\/\/Get commits Ahead\/Behind\n\n\t\tlocalBranch := localRef.Branch()\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"Error getting local branch: \", err)\n\t\t}\n\n\t\tremoteRef, err := localBranch.Upstream()\n\t\tif err == nil {\n\t\t\tdefer remoteRef.Free()\n\n\t\t\tif !remoteRef.Target().Equal(localRef.Target()) {\n\t\t\t\tlogger.Println(\"Local & remore differ:\", remoteRef.Target().String(), localRef.Target().String())\n\t\t\t\t\/\/git rev-list --left-right localRef...remoteRef\n\t\t\t\toids, err := repository.MergeBases(localRef.Target(), remoteRef.Target())\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalln(\"Error getting merge bases\")\n\t\t\t\t}\n\n\t\t\t\tgi.commitsAhead = gitCount(repository, localRef.Target(), oids)\n\t\t\t\tgi.commitsBehind = gitCount(repository, remoteRef.Target(), oids)\n\t\t\t\tlogger.Println(gi.commitsAhead, gi.commitsBehind)\n\t\t\t}\n\t\t}\n\n\t}\n\treturn gi\n}\n\nfunc gitCount(r *git2go.Repository, oid *git2go.Oid, until []*git2go.Oid) int {\n\tc, err := r.LookupCommit(oid)\n\tif err != nil {\n\t\tlog.Fatalln(\"Error getting commit from oid \", oid, \": \", err)\n\t}\n\tmUntil := make(map[string]struct{})\n\tfor _, u := range until {\n\t\tmUntil[u.String()] = struct{}{}\n\t}\n\treturn _gitCount(r, c, mUntil)\n\n}\nfunc _gitCount(r *git2go.Repository, c *git2go.Commit, until map[string]struct{}) int {\n\tvar s int\n\tfor i := uint(0); i < c.ParentCount(); i++ {\n\t\ts++\n\t\tpc := c.ParentId(i)\n\t\tif _, ok := until[pc.String()]; !ok {\n\t\t\ts += _gitCount(r, c.Parent(i), until)\n\t\t}\n\n\t}\n\treturn s\n}\n\ntype gitInfo struct {\n\tconflict bool\n\tchanged int\n\tstaged int\n\tuntracked int\n\tcommitsAhead int\n\tcommitsBehind int\n\tstashed int\n\tbranch string\n\tupstream bool\n}\n\ntype termInfo struct {\n\tpwd string\n\tuser string\n\thostname string\n\tvirtualEnv string\n\tawsRole string\n\tawsExpire time.Time\n\tgi gitInfo\n}\n\nfunc main() {\n\tvar err error\n\tvar debug bool\n\n\tflag.BoolVar(&debug, \"debug\", false, \"enable debug messages\")\n\tflag.Parse()\n\tlogger = log.New(os.Stderr, \"\", log.LstdFlags)\n\n\tif !debug {\n\t\tlogger.SetOutput(ioutil.Discard)\n\t}\n\tti := termInfo{}\n\t\/\/Get basicinfo\n\tti.pwd, err = os.Getwd()\n\tif err != nil {\n\t\tlog.Fatalln(\"Unable to get current path\", err)\n\t}\n\thome := os.Getenv(\"HOME\")\n\tif home != \"\" {\n\t\tti.pwd = strings.Replace(ti.pwd, home, \"~\", -1)\n\t}\n\tti.user = os.Getenv(\"USER\")\n\tti.hostname, err = os.Hostname()\n\tif err != nil {\n\t\tlog.Fatalln(\"Unable to get hostname\", err)\n\t}\n\n\t\/\/Get Python VirtualEnv info\n\tti.virtualEnv = getPythonVirtualEnv()\n\n\t\/\/AWS\n\tti.awsRole = getAwsInfo()\n\tiExpire, _ := strconv.ParseInt(os.Getenv(\"AWS_SESSION_EXPIRE\"), 10, 0)\n\tti.awsExpire = time.Unix(iExpire, int64(0))\n\n\t\/\/Get git information\n\t_ = git2go.Repository{}\n\n\tti.gi = getGitInfo()\n\n\tfmt.Println(makePrompt(ti))\n}\n\nfunc makePrompt(ti termInfo) string {\n\t\/\/Formatting\n\tvar userInfo, pwdInfo, virtualEnvInfo, awsInfo, gitInfo string\n\tpromptEnd := \"$\"\n\n\tif ti.user == \"root\" {\n\t\tuserInfo = termcolor.EscapedFormat(ti.hostname, termcolor.Bold, termcolor.FgRed)\n\t\tpromptEnd = \"#\"\n\t} else {\n\t\tuserInfo = termcolor.EscapedFormat(ti.hostname, termcolor.Bold, termcolor.FgGreen)\n\t}\n\tpwdInfo = termcolor.EscapedFormat(ti.pwd, termcolor.Bold, termcolor.FgBlue)\n\tvirtualEnvInfo = termcolor.EscapedFormat(ti.virtualEnv, termcolor.FgBlue)\n\tif ti.gi.branch != \"\" {\n\t\tgitInfo = \" \" + termcolor.EscapedFormat(ti.gi.branch, termcolor.FgMagenta)\n\t\tspace := \" \"\n\t\tif ti.gi.commitsBehind > 0 {\n\t\t\tgitInfo += space + downArrow + \"·\" + strconv.Itoa(ti.gi.commitsBehind)\n\t\t\tspace = \"\"\n\t\t}\n\t\tif ti.gi.commitsAhead > 0 {\n\t\t\tgitInfo += space + upArrow + \"·\" + strconv.Itoa(ti.gi.commitsAhead)\n\t\t\tspace = \"\"\n\t\t}\n\t\tif !ti.gi.upstream {\n\t\t\tgitInfo += space + \"*\"\n\t\t\tspace = \"\"\n\t\t}\n\t\tgitInfo += \"|\"\n\t\tif ti.gi.staged > 0 {\n\t\t\tgitInfo += termcolor.EscapedFormat(dot+strconv.Itoa(ti.gi.staged), termcolor.FgCyan)\n\t\t}\n\t\tif ti.gi.changed > 0 {\n\t\t\tgitInfo += termcolor.EscapedFormat(\"+\"+strconv.Itoa(ti.gi.changed), termcolor.FgCyan)\n\t\t}\n\t\tif ti.gi.untracked > 0 {\n\t\t\tgitInfo += termcolor.EscapedFormat(threePoints+strconv.Itoa(ti.gi.untracked), termcolor.FgCyan)\n\t\t}\n\t}\n\tif ti.awsRole != \"\" {\n\t\tt := termcolor.FgGreen\n\t\td := time.Until(ti.awsExpire).Seconds()\n\t\tif d < 0 {\n\t\t\tt = termcolor.FgRed\n\t\t} else if d < 600 {\n\t\t\tt = termcolor.FgYellow\n\t\t}\n\t\tawsInfo = termcolor.EscapedFormat(ti.awsRole, t) + \"|\"\n\t}\n\n\treturn fmt.Sprintf(\"%s[%s%s %s%s]%s \", virtualEnvInfo, awsInfo, userInfo, pwdInfo, gitInfo, promptEnd)\n}\n<|endoftext|>"} {"text":"<commit_before>package proxy\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\n\t\"github.com\/fsouza\/go-dockerclient\"\n\t. \"github.com\/weaveworks\/weave\/common\"\n)\n\ntype client struct {\n\tinterceptor\n\tDial func() (net.Conn, error)\n}\n\nfunc newClient(dial func() (net.Conn, error), i interceptor) *client {\n\treturn &client{\n\t\tinterceptor: i,\n\t\tDial: dial,\n\t}\n}\n\nfunc (c *client) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\terr := c.InterceptRequest(r)\n\tif err == docker.ErrNoSuchImage {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\tif err != nil {\n\t\thttp.Error(w, \"Unable to intercept request\", http.StatusInternalServerError)\n\t\tWarning.Print(\"Error intercepting request: \", err)\n\t\treturn\n\t}\n\n\tconn, err := c.Dial()\n\tif err != nil {\n\t\thttp.Error(w, \"Could not connect to target\", http.StatusInternalServerError)\n\t\tWarning.Print(err)\n\t\treturn\n\t}\n\tclient := httputil.NewClientConn(conn, nil)\n\tdefer client.Close()\n\n\tresp, err := client.Do(r)\n\tif err != nil && err != httputil.ErrPersistEOF {\n\t\thttp.Error(w, fmt.Sprintf(\"Could not make request to target: %v\", err), http.StatusInternalServerError)\n\t\tWarning.Print(\"Error forwarding request: \", err)\n\t\treturn\n\t}\n\terr = c.InterceptResponse(resp)\n\tif err != nil {\n\t\thttp.Error(w, \"Unable to intercept response\", http.StatusInternalServerError)\n\t\tWarning.Print(\"Error intercepting response: \", err)\n\t\treturn\n\t}\n\n\thdr := w.Header()\n\tfor k, vs := range resp.Header {\n\t\tfor _, v := range vs {\n\t\t\thdr.Add(k, v)\n\t\t}\n\t}\n\tDebug.Printf(\"Response from target: %s %v\", resp.Status, w.Header())\n\n\tif resp.Header.Get(\"Content-Type\") == \"application\/vnd.docker.raw-stream\" {\n\t\tdoRawStream(w, resp, client)\n\t} else if resp.TransferEncoding != nil && resp.TransferEncoding[0] == \"chunked\" {\n\t\tdoChunkedResponse(w, resp, client)\n\t} else {\n\t\tw.WriteHeader(resp.StatusCode)\n\t\tif _, err := io.Copy(w, resp.Body); err != nil {\n\t\t\tWarning.Print(err)\n\t\t}\n\t}\n}\n\nfunc doRawStream(w http.ResponseWriter, resp *http.Response, client *httputil.ClientConn) {\n\tdown, downBuf, up, rem, err := hijack(w, client)\n\tif err != nil {\n\t\tError.Fatal(w, \"Unable to hijack connection for raw stream mode\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer down.Close()\n\tdefer up.Close()\n\n\tif _, err := down.Write([]byte(\"HTTP\/1.1 200 OK\\n\")); err != nil {\n\t\tWarning.Print(err)\n\t\treturn\n\t}\n\n\tif err := resp.Header.Write(down); err != nil {\n\t\tWarning.Print(err)\n\t\treturn\n\t}\n\n\tif _, err := down.Write([]byte(\"\\n\")); err != nil {\n\t\tWarning.Print(err)\n\t\treturn\n\t}\n\n\tupDone := make(chan struct{})\n\tdownDone := make(chan struct{})\n\tgo copyStream(down, io.MultiReader(rem, up), upDone)\n\tgo copyStream(up, downBuf, downDone)\n\t<-upDone\n\t<-downDone\n}\n\nfunc copyStream(dst io.Writer, src io.Reader, done chan struct{}) {\n\tdefer close(done)\n\tif _, err := io.Copy(dst, src); err != nil {\n\t\tWarning.Print(err)\n\t}\n\tif c, ok := dst.(interface {\n\t\tCloseWrite() error\n\t}); ok {\n\t\tif err := c.CloseWrite(); err != nil {\n\t\t\tWarning.Printf(\"Error closing connection: %s\", err)\n\t\t}\n\t}\n}\n\nfunc doChunkedResponse(w http.ResponseWriter, resp *http.Response, client *httputil.ClientConn) {\n\t\/\/ Because we can't go back to request\/response after we\n\t\/\/ hijack the connection, we need to close it and make the\n\t\/\/ client open another.\n\tw.Header().Add(\"Connection\", \"close\")\n\tw.WriteHeader(resp.StatusCode)\n\n\tdown, _, up, rem, err := hijack(w, client)\n\tif err != nil {\n\t\tError.Fatal(\"Unable to hijack response stream for chunked response\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer up.Close()\n\tdefer down.Close()\n\t\/\/ Copy the chunked response body to downstream,\n\t\/\/ stopping at the end of the chunked section.\n\trawResponseBody := io.MultiReader(rem, up)\n\tif _, err := io.Copy(ioutil.Discard, httputil.NewChunkedReader(io.TeeReader(rawResponseBody, down))); err != nil {\n\t\tError.Fatal(\"Error copying chunked response body\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tresp.Trailer.Write(down)\n\t\/\/ a chunked response ends with a CRLF\n\tdown.Write([]byte(\"\\r\\n\"))\n}\n\nfunc hijack(w http.ResponseWriter, client *httputil.ClientConn) (down net.Conn, downBuf *bufio.ReadWriter, up net.Conn, rem io.Reader, err error) {\n\thj, ok := w.(http.Hijacker)\n\tif !ok {\n\t\terr = errors.New(\"Unable to cast to Hijack\")\n\t\treturn\n\t}\n\tdown, downBuf, err = hj.Hijack()\n\tif err != nil {\n\t\treturn\n\t}\n\tup, rem = client.Hijack()\n\treturn\n}\n<commit_msg>proxy should be sending error code to client, instead of exiting<commit_after>package proxy\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\n\t\"github.com\/fsouza\/go-dockerclient\"\n\t. \"github.com\/weaveworks\/weave\/common\"\n)\n\ntype client struct {\n\tinterceptor\n\tDial func() (net.Conn, error)\n}\n\nfunc newClient(dial func() (net.Conn, error), i interceptor) *client {\n\treturn &client{\n\t\tinterceptor: i,\n\t\tDial: dial,\n\t}\n}\n\nfunc (c *client) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\terr := c.InterceptRequest(r)\n\tif err == docker.ErrNoSuchImage {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\tif err != nil {\n\t\thttp.Error(w, \"Unable to intercept request\", http.StatusInternalServerError)\n\t\tWarning.Print(\"Error intercepting request: \", err)\n\t\treturn\n\t}\n\n\tconn, err := c.Dial()\n\tif err != nil {\n\t\thttp.Error(w, \"Could not connect to target\", http.StatusInternalServerError)\n\t\tWarning.Print(err)\n\t\treturn\n\t}\n\tclient := httputil.NewClientConn(conn, nil)\n\tdefer client.Close()\n\n\tresp, err := client.Do(r)\n\tif err != nil && err != httputil.ErrPersistEOF {\n\t\thttp.Error(w, fmt.Sprintf(\"Could not make request to target: %v\", err), http.StatusInternalServerError)\n\t\tWarning.Print(\"Error forwarding request: \", err)\n\t\treturn\n\t}\n\terr = c.InterceptResponse(resp)\n\tif err != nil {\n\t\thttp.Error(w, \"Unable to intercept response\", http.StatusInternalServerError)\n\t\tWarning.Print(\"Error intercepting response: \", err)\n\t\treturn\n\t}\n\n\thdr := w.Header()\n\tfor k, vs := range resp.Header {\n\t\tfor _, v := range vs {\n\t\t\thdr.Add(k, v)\n\t\t}\n\t}\n\tDebug.Printf(\"Response from target: %s %v\", resp.Status, w.Header())\n\n\tif resp.Header.Get(\"Content-Type\") == \"application\/vnd.docker.raw-stream\" {\n\t\tdoRawStream(w, resp, client)\n\t} else if resp.TransferEncoding != nil && resp.TransferEncoding[0] == \"chunked\" {\n\t\tdoChunkedResponse(w, resp, client)\n\t} else {\n\t\tw.WriteHeader(resp.StatusCode)\n\t\tif _, err := io.Copy(w, resp.Body); err != nil {\n\t\t\tWarning.Print(err)\n\t\t}\n\t}\n}\n\nfunc doRawStream(w http.ResponseWriter, resp *http.Response, client *httputil.ClientConn) {\n\tdown, downBuf, up, rem, err := hijack(w, client)\n\tif err != nil {\n\t\thttp.Error(w, \"Unable to hijack connection for raw stream mode\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer down.Close()\n\tdefer up.Close()\n\n\tif _, err := down.Write([]byte(\"HTTP\/1.1 200 OK\\n\")); err != nil {\n\t\tWarning.Print(err)\n\t\treturn\n\t}\n\n\tif err := resp.Header.Write(down); err != nil {\n\t\tWarning.Print(err)\n\t\treturn\n\t}\n\n\tif _, err := down.Write([]byte(\"\\n\")); err != nil {\n\t\tWarning.Print(err)\n\t\treturn\n\t}\n\n\tupDone := make(chan struct{})\n\tdownDone := make(chan struct{})\n\tgo copyStream(down, io.MultiReader(rem, up), upDone)\n\tgo copyStream(up, downBuf, downDone)\n\t<-upDone\n\t<-downDone\n}\n\nfunc copyStream(dst io.Writer, src io.Reader, done chan struct{}) {\n\tdefer close(done)\n\tif _, err := io.Copy(dst, src); err != nil {\n\t\tWarning.Print(err)\n\t}\n\tif c, ok := dst.(interface {\n\t\tCloseWrite() error\n\t}); ok {\n\t\tif err := c.CloseWrite(); err != nil {\n\t\t\tWarning.Printf(\"Error closing connection: %s\", err)\n\t\t}\n\t}\n}\n\nfunc doChunkedResponse(w http.ResponseWriter, resp *http.Response, client *httputil.ClientConn) {\n\t\/\/ Because we can't go back to request\/response after we\n\t\/\/ hijack the connection, we need to close it and make the\n\t\/\/ client open another.\n\tw.Header().Add(\"Connection\", \"close\")\n\tw.WriteHeader(resp.StatusCode)\n\n\tdown, _, up, rem, err := hijack(w, client)\n\tif err != nil {\n\t\thttp.Error(w, \"Unable to hijack response stream for chunked response\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer up.Close()\n\tdefer down.Close()\n\t\/\/ Copy the chunked response body to downstream,\n\t\/\/ stopping at the end of the chunked section.\n\trawResponseBody := io.MultiReader(rem, up)\n\tif _, err := io.Copy(ioutil.Discard, httputil.NewChunkedReader(io.TeeReader(rawResponseBody, down))); err != nil {\n\t\thttp.Error(w, \"Error copying chunked response body\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tresp.Trailer.Write(down)\n\t\/\/ a chunked response ends with a CRLF\n\tdown.Write([]byte(\"\\r\\n\"))\n}\n\nfunc hijack(w http.ResponseWriter, client *httputil.ClientConn) (down net.Conn, downBuf *bufio.ReadWriter, up net.Conn, rem io.Reader, err error) {\n\thj, ok := w.(http.Hijacker)\n\tif !ok {\n\t\terr = errors.New(\"Unable to cast to Hijack\")\n\t\treturn\n\t}\n\tdown, downBuf, err = hj.Hijack()\n\tif err != nil {\n\t\treturn\n\t}\n\tup, rem = client.Hijack()\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package json\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t. \"github.com\/polydawn\/go-xlate\/testutil\"\n\t. \"github.com\/polydawn\/go-xlate\/tok\"\n)\n\nfunc TestJsonSerializer(t *testing.T) {\n\ttt := []struct {\n\t\ttitle string\n\t\ttokenSeq []Token\n\t\texpect string\n\t}{\n\t\t{\n\t\t\t\"flat string\",\n\t\t\t[]Token{\n\t\t\t\t\"value\",\n\t\t\t},\n\t\t\t`\"value\"`,\n\t\t},\n\t\t{\n\t\t\t\"single row map\",\n\t\t\t[]Token{\n\t\t\t\tToken_MapOpen,\n\t\t\t\t\"key\",\n\t\t\t\t\"value\",\n\t\t\t\tToken_MapClose,\n\t\t\t},\n\t\t\t`{\"key\":\"value\"}`,\n\t\t},\n\t\t{\n\t\t\t\"duo row map\",\n\t\t\t[]Token{\n\t\t\t\tToken_MapOpen,\n\t\t\t\t\"key\",\n\t\t\t\t\"value\",\n\t\t\t\t\"k2\",\n\t\t\t\t\"v2\",\n\t\t\t\tToken_MapClose,\n\t\t\t},\n\t\t\t`{\"key\":\"value\",\"k2\":\"v2\"}`,\n\t\t},\n\t\t{\n\t\t\t\"single entry array\",\n\t\t\t[]Token{\n\t\t\t\tToken_ArrOpen,\n\t\t\t\t\"value\",\n\t\t\t\tToken_ArrClose,\n\t\t\t},\n\t\t\t`[\"value\"]`,\n\t\t},\n\t\t{\n\t\t\t\"duo entry array\",\n\t\t\t[]Token{\n\t\t\t\tToken_ArrOpen,\n\t\t\t\t\"value\",\n\t\t\t\t\"v2\",\n\t\t\t\tToken_ArrClose,\n\t\t\t},\n\t\t\t`[\"value\",\"v2\"]`,\n\t\t},\n\t\t{\n\t\t\t\"empty map\",\n\t\t\t[]Token{\n\t\t\t\tToken_MapOpen,\n\t\t\t\tToken_MapClose,\n\t\t\t},\n\t\t\t`{}`,\n\t\t},\n\t\t{\n\t\t\t\"empty array\",\n\t\t\t[]Token{\n\t\t\t\tToken_ArrOpen,\n\t\t\t\tToken_ArrClose,\n\t\t\t},\n\t\t\t`[]`,\n\t\t},\n\t\t{\n\t\t\t\"array nested in map as non-first and final entry\",\n\t\t\t[]Token{\n\t\t\t\tToken_MapOpen,\n\t\t\t\t\"k1\",\n\t\t\t\t\"v1\",\n\t\t\t\t\"ke\",\n\t\t\t\tToken_ArrOpen,\n\t\t\t\t\"oh\",\n\t\t\t\t\"whee\",\n\t\t\t\t\"wow\",\n\t\t\t\tToken_ArrClose,\n\t\t\t\tToken_MapClose,\n\t\t\t},\n\t\t\t`{\"k1\":\"v1\",\"ke\":[\"oh\",\"whee\",\"wow\"]}`,\n\t\t},\n\t\t{\n\t\t\t\"array nested in map as first and non-final entry\",\n\t\t\t[]Token{\n\t\t\t\tToken_MapOpen,\n\t\t\t\t\"ke\",\n\t\t\t\tToken_ArrOpen,\n\t\t\t\t\"oh\",\n\t\t\t\t\"whee\",\n\t\t\t\t\"wow\",\n\t\t\t\tToken_ArrClose,\n\t\t\t\t\"k1\",\n\t\t\t\t\"v1\",\n\t\t\t\tToken_MapClose,\n\t\t\t},\n\t\t\t`{\"ke\":[\"oh\",\"whee\",\"wow\"],\"k1\":\"v1\"}`,\n\t\t},\n\t\t{\n\t\t\t\"maps nested in array\",\n\t\t\t[]Token{\n\t\t\t\tToken_ArrOpen,\n\t\t\t\tToken_MapOpen,\n\t\t\t\t\"k\",\n\t\t\t\t\"v\",\n\t\t\t\tToken_MapClose,\n\t\t\t\t\"whee\",\n\t\t\t\tToken_MapOpen,\n\t\t\t\t\"k1\",\n\t\t\t\t\"v1\",\n\t\t\t\tToken_MapClose,\n\t\t\t\tToken_ArrClose,\n\t\t\t},\n\t\t\t`[{\"k\":\"v\"},\"whee\",{\"k1\":\"v1\"}]`,\n\t\t},\n\t\t{\n\t\t\t\"arrays in arrays in arrays\",\n\t\t\t[]Token{\n\t\t\t\tToken_ArrOpen,\n\t\t\t\tToken_ArrOpen,\n\t\t\t\tToken_ArrOpen,\n\t\t\t\tToken_ArrClose,\n\t\t\t\tToken_ArrClose,\n\t\t\t\tToken_ArrClose,\n\t\t\t},\n\t\t\t`[[[]]]`,\n\t\t},\n\t}\n\tfor _, tr := range tt {\n\t\t\/\/ Set it up.\n\t\tbuf := &bytes.Buffer{}\n\t\tsink := NewSerializer(buf)\n\n\t\t\/\/ Run steps.\n\t\tvar done bool\n\t\tvar err error\n\t\tfor n, tok := range tr.tokenSeq {\n\t\t\tdone, err = sink.Step(&tok)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"test %q step %d (inputting %#v) errored: %s\", tr.title, n, tok, err)\n\t\t\t}\n\t\t\tif done && n != len(tr.tokenSeq)-1 {\n\t\t\t\tt.Errorf(\"test %q done early! on step %d out of %d tokens\", tr.title, n, len(tr.tokenSeq))\n\t\t\t}\n\t\t}\n\t\tif !done {\n\t\t\tt.Errorf(\"test %q still not done after %d tokens!\", tr.title, len(tr.tokenSeq))\n\t\t}\n\n\t\t\/\/ Assert final result.\n\t\tAssert(t, tr.title, tr.expect, buf.String())\n\t}\n}\n<commit_msg>Add test coverage for json serializer maps-within-maps.<commit_after>package json\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t. \"github.com\/polydawn\/go-xlate\/testutil\"\n\t. \"github.com\/polydawn\/go-xlate\/tok\"\n)\n\nfunc TestJsonSerializer(t *testing.T) {\n\ttt := []struct {\n\t\ttitle string\n\t\ttokenSeq []Token\n\t\texpect string\n\t}{\n\t\t{\n\t\t\t\"flat string\",\n\t\t\t[]Token{\n\t\t\t\t\"value\",\n\t\t\t},\n\t\t\t`\"value\"`,\n\t\t},\n\t\t{\n\t\t\t\"single row map\",\n\t\t\t[]Token{\n\t\t\t\tToken_MapOpen,\n\t\t\t\t\"key\",\n\t\t\t\t\"value\",\n\t\t\t\tToken_MapClose,\n\t\t\t},\n\t\t\t`{\"key\":\"value\"}`,\n\t\t},\n\t\t{\n\t\t\t\"duo row map\",\n\t\t\t[]Token{\n\t\t\t\tToken_MapOpen,\n\t\t\t\t\"key\",\n\t\t\t\t\"value\",\n\t\t\t\t\"k2\",\n\t\t\t\t\"v2\",\n\t\t\t\tToken_MapClose,\n\t\t\t},\n\t\t\t`{\"key\":\"value\",\"k2\":\"v2\"}`,\n\t\t},\n\t\t{\n\t\t\t\"single entry array\",\n\t\t\t[]Token{\n\t\t\t\tToken_ArrOpen,\n\t\t\t\t\"value\",\n\t\t\t\tToken_ArrClose,\n\t\t\t},\n\t\t\t`[\"value\"]`,\n\t\t},\n\t\t{\n\t\t\t\"duo entry array\",\n\t\t\t[]Token{\n\t\t\t\tToken_ArrOpen,\n\t\t\t\t\"value\",\n\t\t\t\t\"v2\",\n\t\t\t\tToken_ArrClose,\n\t\t\t},\n\t\t\t`[\"value\",\"v2\"]`,\n\t\t},\n\t\t{\n\t\t\t\"empty map\",\n\t\t\t[]Token{\n\t\t\t\tToken_MapOpen,\n\t\t\t\tToken_MapClose,\n\t\t\t},\n\t\t\t`{}`,\n\t\t},\n\t\t{\n\t\t\t\"empty array\",\n\t\t\t[]Token{\n\t\t\t\tToken_ArrOpen,\n\t\t\t\tToken_ArrClose,\n\t\t\t},\n\t\t\t`[]`,\n\t\t},\n\t\t{\n\t\t\t\"array nested in map as non-first and final entry\",\n\t\t\t[]Token{\n\t\t\t\tToken_MapOpen,\n\t\t\t\t\"k1\",\n\t\t\t\t\"v1\",\n\t\t\t\t\"ke\",\n\t\t\t\tToken_ArrOpen,\n\t\t\t\t\"oh\",\n\t\t\t\t\"whee\",\n\t\t\t\t\"wow\",\n\t\t\t\tToken_ArrClose,\n\t\t\t\tToken_MapClose,\n\t\t\t},\n\t\t\t`{\"k1\":\"v1\",\"ke\":[\"oh\",\"whee\",\"wow\"]}`,\n\t\t},\n\t\t{\n\t\t\t\"array nested in map as first and non-final entry\",\n\t\t\t[]Token{\n\t\t\t\tToken_MapOpen,\n\t\t\t\t\"ke\",\n\t\t\t\tToken_ArrOpen,\n\t\t\t\t\"oh\",\n\t\t\t\t\"whee\",\n\t\t\t\t\"wow\",\n\t\t\t\tToken_ArrClose,\n\t\t\t\t\"k1\",\n\t\t\t\t\"v1\",\n\t\t\t\tToken_MapClose,\n\t\t\t},\n\t\t\t`{\"ke\":[\"oh\",\"whee\",\"wow\"],\"k1\":\"v1\"}`,\n\t\t},\n\t\t{\n\t\t\t\"maps nested in array\",\n\t\t\t[]Token{\n\t\t\t\tToken_ArrOpen,\n\t\t\t\tToken_MapOpen,\n\t\t\t\t\"k\",\n\t\t\t\t\"v\",\n\t\t\t\tToken_MapClose,\n\t\t\t\t\"whee\",\n\t\t\t\tToken_MapOpen,\n\t\t\t\t\"k1\",\n\t\t\t\t\"v1\",\n\t\t\t\tToken_MapClose,\n\t\t\t\tToken_ArrClose,\n\t\t\t},\n\t\t\t`[{\"k\":\"v\"},\"whee\",{\"k1\":\"v1\"}]`,\n\t\t},\n\t\t{\n\t\t\t\"arrays in arrays in arrays\",\n\t\t\t[]Token{\n\t\t\t\tToken_ArrOpen,\n\t\t\t\tToken_ArrOpen,\n\t\t\t\tToken_ArrOpen,\n\t\t\t\tToken_ArrClose,\n\t\t\t\tToken_ArrClose,\n\t\t\t\tToken_ArrClose,\n\t\t\t},\n\t\t\t`[[[]]]`,\n\t\t},\n\t\t{\n\t\t\t\"maps nested in maps\",\n\t\t\t[]Token{\n\t\t\t\tToken_MapOpen,\n\t\t\t\t\"k\",\n\t\t\t\tToken_MapOpen,\n\t\t\t\t\"k2\",\n\t\t\t\t\"v2\",\n\t\t\t\tToken_MapClose,\n\t\t\t\tToken_MapClose,\n\t\t\t},\n\t\t\t`{\"k\":{\"k2\":\"v2\"}}`,\n\t\t},\n\t}\n\tfor _, tr := range tt {\n\t\t\/\/ Set it up.\n\t\tbuf := &bytes.Buffer{}\n\t\tsink := NewSerializer(buf)\n\n\t\t\/\/ Run steps.\n\t\tvar done bool\n\t\tvar err error\n\t\tfor n, tok := range tr.tokenSeq {\n\t\t\tdone, err = sink.Step(&tok)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"test %q step %d (inputting %#v) errored: %s\", tr.title, n, tok, err)\n\t\t\t}\n\t\t\tif done && n != len(tr.tokenSeq)-1 {\n\t\t\t\tt.Errorf(\"test %q done early! on step %d out of %d tokens\", tr.title, n, len(tr.tokenSeq))\n\t\t\t}\n\t\t}\n\t\tif !done {\n\t\t\tt.Errorf(\"test %q still not done after %d tokens!\", tr.title, len(tr.tokenSeq))\n\t\t}\n\n\t\t\/\/ Assert final result.\n\t\tAssert(t, tr.title, tr.expect, buf.String())\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/plugin\"\n\t\"github.com\/hashicorp\/terraform-provider-kubernetes\/kubernetes\"\n)\n\nfunc main() {\n\tplugin.Serve(&plugin.ServeOpts{\n\t\tProviderFunc: kubernetes.Provider})\n}\n<commit_msg>Add support for stand-alone debug mode (launch with -debug argument) (#1277)<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/plugin\"\n\t\"github.com\/hashicorp\/terraform-provider-kubernetes\/kubernetes\"\n)\n\nfunc main() {\n\tdebugFlag := flag.Bool(\"debug\", false, \"Start provider in stand-alone debug mode.\")\n\tflag.Parse()\n\n\tserveOpts := &plugin.ServeOpts{\n\t\tProviderFunc: kubernetes.Provider,\n\t}\n\tif debugFlag != nil && *debugFlag {\n\t\tplugin.Debug(context.Background(), \"registry.terraform.io\/hashicorp\/kubernetes\", serveOpts)\n\t} else {\n\t\tplugin.Serve(serveOpts)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package proxy\n\nimport (\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/ngaut\/arena\"\n\tstats \"github.com\/ngaut\/gostats\"\n\tlog \"github.com\/ngaut\/logging\"\n\t\"github.com\/ngaut\/tokenlimiter\"\n\t\"github.com\/wandoulabs\/cm\/config\"\n\t\"github.com\/wandoulabs\/cm\/mysql\"\n\t\"github.com\/wandoulabs\/cm\/vt\/tabletserver\"\n)\n\nvar (\n\tbaseConnId uint32 = 10000\n)\n\ntype Server struct {\n\tconfigFile string\n\tcfg *config.Config\n\taddr string\n\tuser string\n\tpassword string\n\tlistener net.Listener\n\tshards map[string]*Shard\n\tschemas map[string]*Schema\n\tautoSchamas map[string]*tabletserver.SchemaInfo\n\trwlock *sync.RWMutex\n\ttaskQ chan *execTask\n\tconcurrentLimiter *tokenlimiter.TokenLimiter\n\n\tcounter *stats.Counters\n\n\tclients map[uint32]*Conn\n}\n\ntype IServer interface {\n\tGetSchema(string) *Schema\n\tGetRowCacheSchema(string) (*tabletserver.SchemaInfo, bool)\n\tCfgGetPwd() string\n\tCfgIsSkipAuth() bool\n\tGetToken() *tokenlimiter.Token\n\tReleaseToken(token *tokenlimiter.Token)\n\tGetRWlock() *sync.RWMutex\n\tGetShard(name string) *Shard\n\tGetShardNames() []string\n\tAsynExec(task *execTask)\n\tIncCounter(key string)\n\tDecCounter(key string)\n}\n\nfunc (s *Server) IncCounter(key string) {\n\ts.counter.Add(key, 1)\n}\n\nfunc (s *Server) DecCounter(key string) {\n\ts.counter.Add(key, -1)\n}\n\nfunc (s *Server) GetToken() *tokenlimiter.Token {\n\treturn s.concurrentLimiter.Get()\n}\n\nfunc (s *Server) ReleaseToken(token *tokenlimiter.Token) {\n\ts.concurrentLimiter.Put(token)\n}\n\nfunc (s *Server) GetShard(name string) *Shard {\n\treturn s.shards[name]\n}\n\nfunc (s *Server) GetRowCacheSchema(db string) (*tabletserver.SchemaInfo, bool) {\n\tsi, ok := s.autoSchamas[db]\n\treturn si, ok\n}\n\nfunc (s *Server) GetShardNames() []string {\n\tret := make([]string, 0, len(s.shards))\n\tfor name, _ := range s.shards {\n\t\tret = append(ret, name)\n\t}\n\n\treturn ret\n}\n\nfunc (s *Server) parseShards() error {\n\tcfg := s.cfg\n\ts.shards = make(map[string]*Shard, len(cfg.Shards))\n\n\tfor _, v := range cfg.Shards {\n\t\tif _, ok := s.shards[v.Name]; ok {\n\t\t\treturn errors.Errorf(\"duplicate node [%s].\", v.Name)\n\t\t}\n\n\t\tn, err := s.parseShard(v)\n\t\tif err != nil {\n\t\t\treturn errors.Trace(err)\n\t\t}\n\n\t\ts.shards[v.Name] = n\n\t}\n\n\treturn nil\n}\n\nfunc (s *Server) parseShard(cfg config.ShardConfig) (*Shard, error) {\n\tn := &Shard{\n\t\tserver: s,\n\t\tcfg: cfg,\n\t}\n\tif len(cfg.Master) == 0 {\n\t\treturn nil, errors.Errorf(\"must setting master MySQL node.\")\n\t}\n\n\tvar err error\n\tif n.master, err = n.openDB(cfg.Master); err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\treturn n, nil\n}\n\nfunc (s *Server) newConn(co net.Conn) *Conn {\n\tlog.Info(\"newConn\", co.RemoteAddr().String())\n\tc := &Conn{\n\t\tc: co,\n\t\tpkg: mysql.NewPacketIO(co),\n\t\tserver: s,\n\t\tconnectionId: atomic.AddUint32(&baseConnId, 1),\n\t\tstatus: mysql.SERVER_STATUS_AUTOCOMMIT,\n\t\tcollation: mysql.DEFAULT_COLLATION_ID,\n\t\tcharset: mysql.DEFAULT_CHARSET,\n\t\talloc: arena.NewArenaAllocator(32 * 1024),\n\t\ttxConns: make(map[string]*mysql.SqlConn),\n\t}\n\tc.salt, _ = mysql.RandomBuf(20)\n\n\treturn c\n}\n\nfunc (s *Server) GetRWlock() *sync.RWMutex {\n\treturn s.rwlock\n}\n\nfunc (s *Server) AsynExec(task *execTask) {\n\ts.taskQ <- task\n}\n\nfunc (s *Server) CfgIsSkipAuth() bool {\n\treturn s.cfg.SkipAuth\n}\n\nfunc (s *Server) CfgGetPwd() string {\n\treturn s.cfg.Password\n}\n\nfunc (s *Server) loadSchemaInfo() error {\n\tif err := s.parseShards(); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tif err := s.parseSchemas(); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tfor _, v := range s.cfg.Schemas {\n\t\trc := v.RulesConifg\n\t\tvar overrides []tabletserver.SchemaOverride\n\t\tfor _, sc := range rc.ShardRule {\n\t\t\tor := tabletserver.SchemaOverride{Name: sc.Table}\n\t\t\tpks := strings.Split(sc.ShardingKey, \",\")\n\t\t\tfor _, pk := range pks {\n\t\t\t\tor.PKColumns = append(or.PKColumns, strings.TrimSpace(pk))\n\t\t\t}\n\t\t\tor.Cache = &tabletserver.OverrideCacheDesc{Type: sc.RowCacheType, Prefix: or.Name, Table: or.Name}\n\t\t\toverrides = append(overrides, or)\n\t\t}\n\n\t\t\/\/fix hard code node\n\t\tsc := rc.ShardRule[0]\n\t\ts.autoSchamas[v.DB] = tabletserver.NewSchemaInfo(s.cfg.RowCacheConf, s.cfg.Shards[0].Master, sc.User, sc.Password, v.DB, overrides)\n\t}\n\n\treturn nil\n}\n\nfunc makeServer(configFile string) *Server {\n\tcfg, err := config.ParseConfigFile(configFile)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\treturn nil\n\t}\n\n\tlog.Warningf(\"%#v\", cfg)\n\n\ts := &Server{\n\t\tconfigFile: configFile,\n\t\tcfg: cfg,\n\t\taddr: cfg.Addr,\n\t\tuser: cfg.User,\n\t\tpassword: cfg.Password,\n\t\tautoSchamas: make(map[string]*tabletserver.SchemaInfo),\n\t\ttaskQ: make(chan *execTask, 100),\n\t\tconcurrentLimiter: tokenlimiter.NewTokenLimiter(100),\n\t\tcounter: stats.NewCounters(\"stats\"),\n\t\trwlock: &sync.RWMutex{},\n\t\tclients: make(map[uint32]*Conn),\n\t}\n\n\tf := func(wg *sync.WaitGroup, rs []interface{}, i int, co *mysql.SqlConn, sql string, args []interface{}) {\n\t\tr, err := co.Execute(sql, args...)\n\t\tif err != nil {\n\t\t\tlog.Warning(err)\n\t\t\trs[i] = err\n\t\t} else {\n\t\t\trs[i] = r\n\t\t}\n\n\t\twg.Done()\n\t}\n\n\tfor i := 0; i < 100; i++ {\n\t\tgo func() {\n\t\t\tfor task := range s.taskQ {\n\t\t\t\tf(task.wg, task.rs, task.idx, task.co, task.sql, task.args)\n\t\t\t}\n\t\t}()\n\t}\n\n\treturn s\n}\n\nfunc NewServer(configFile string) (*Server, error) {\n\ts := makeServer(configFile)\n\ts.loadSchemaInfo()\n\n\tnetProto := \"tcp\"\n\tif strings.Contains(netProto, \"\/\") {\n\t\tnetProto = \"unix\"\n\t}\n\n\tvar err error\n\ts.listener, err = net.Listen(netProto, s.addr)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tlog.Infof(\"Server run MySql Protocol Listen(%s) at [%s]\", netProto, s.addr)\n\treturn s, nil\n}\n\nfunc (s *Server) cleanup() {\n\tfor _, si := range s.autoSchamas {\n\t\tsi.Close()\n\t}\n}\n\nfunc (s *Server) resetSchemaInfo() error {\n\tfor _, c := range s.clients {\n\t\tif len(c.txConns) > 0 {\n\t\t\treturn errors.Errorf(\"transaction exist\")\n\t\t}\n\t}\n\n\ts.cleanup()\n\ts.autoSchamas = make(map[string]*tabletserver.SchemaInfo)\n\tfor _, n := range s.shards {\n\t\tn.Close()\n\t}\n\n\ts.shards = nil\n\ts.schemas = nil\n\n\tcfg, err := config.ParseConfigFile(s.configFile)\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\n\tlog.Warningf(\"%#v\", cfg)\n\n\ts.cfg = cfg\n\ts.loadSchemaInfo()\n\treturn nil\n}\n\nfunc (s *Server) HandleReload(w http.ResponseWriter, req *http.Request) {\n\tlog.Warning(\"trying to reload config\")\n\ts.rwlock.Lock()\n\tdefer s.rwlock.Unlock()\n\n\tif err := s.resetSchemaInfo(); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\treturn\n\t}\n\n\tio.WriteString(w, \"ok\")\n}\n\nfunc (s *Server) Run() error {\n\tfor {\n\t\tconn, err := s.listener.Accept()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"accept error %s\", err.Error())\n\t\t\treturn err\n\t\t}\n\n\t\tgo s.onConn(conn)\n\t}\n\n\treturn nil\n}\n\nfunc (s *Server) Close() {\n\ts.rwlock.Lock()\n\tdefer s.rwlock.Unlock()\n\n\tif s.listener != nil {\n\t\ts.listener.Close()\n\t\ts.listener = nil\n\t}\n\n\ts.cleanup()\n}\n\nfunc (s *Server) onConn(c net.Conn) {\n\tconn := s.newConn(c)\n\tif err := conn.Handshake(); err != nil {\n\t\tlog.Errorf(\"handshake error %s\", errors.ErrorStack(err))\n\t\tc.Close()\n\t\treturn\n\t}\n\n\tconst key = \"connections\"\n\n\ts.IncCounter(key)\n\tdefer s.DecCounter(key)\n\n\ts.rwlock.Lock()\n\ts.clients[conn.connectionId] = conn\n\ts.rwlock.Unlock()\n\n\tconn.Run()\n}\n<commit_msg>fix auth bug<commit_after>package proxy\n\nimport (\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/ngaut\/arena\"\n\tstats \"github.com\/ngaut\/gostats\"\n\tlog \"github.com\/ngaut\/logging\"\n\t\"github.com\/ngaut\/tokenlimiter\"\n\t\"github.com\/wandoulabs\/cm\/config\"\n\t\"github.com\/wandoulabs\/cm\/mysql\"\n\t\"github.com\/wandoulabs\/cm\/vt\/tabletserver\"\n)\n\nvar (\n\tbaseConnId uint32 = 10000\n)\n\ntype Server struct {\n\tconfigFile string\n\tcfg *config.Config\n\taddr string\n\tuser string\n\tpassword string\n\tlistener net.Listener\n\tshards map[string]*Shard\n\tschemas map[string]*Schema\n\tautoSchamas map[string]*tabletserver.SchemaInfo\n\trwlock *sync.RWMutex\n\ttaskQ chan *execTask\n\tconcurrentLimiter *tokenlimiter.TokenLimiter\n\n\tcounter *stats.Counters\n\n\tclients map[uint32]*Conn\n}\n\ntype IServer interface {\n\tGetSchema(string) *Schema\n\tGetRowCacheSchema(string) (*tabletserver.SchemaInfo, bool)\n\tCfgGetPwd() string\n\tCfgIsSkipAuth() bool\n\tGetToken() *tokenlimiter.Token\n\tReleaseToken(token *tokenlimiter.Token)\n\tGetRWlock() *sync.RWMutex\n\tGetShard(name string) *Shard\n\tGetShardNames() []string\n\tAsynExec(task *execTask)\n\tIncCounter(key string)\n\tDecCounter(key string)\n}\n\nfunc (s *Server) IncCounter(key string) {\n\ts.counter.Add(key, 1)\n}\n\nfunc (s *Server) DecCounter(key string) {\n\ts.counter.Add(key, -1)\n}\n\nfunc (s *Server) GetToken() *tokenlimiter.Token {\n\treturn s.concurrentLimiter.Get()\n}\n\nfunc (s *Server) ReleaseToken(token *tokenlimiter.Token) {\n\ts.concurrentLimiter.Put(token)\n}\n\nfunc (s *Server) GetShard(name string) *Shard {\n\treturn s.shards[name]\n}\n\nfunc (s *Server) GetRowCacheSchema(db string) (*tabletserver.SchemaInfo, bool) {\n\tsi, ok := s.autoSchamas[db]\n\treturn si, ok\n}\n\nfunc (s *Server) GetShardNames() []string {\n\tret := make([]string, 0, len(s.shards))\n\tfor name, _ := range s.shards {\n\t\tret = append(ret, name)\n\t}\n\n\treturn ret\n}\n\nfunc (s *Server) parseShards() error {\n\tcfg := s.cfg\n\ts.shards = make(map[string]*Shard, len(cfg.Shards))\n\n\tfor _, v := range cfg.Shards {\n\t\tif _, ok := s.shards[v.Name]; ok {\n\t\t\treturn errors.Errorf(\"duplicate node [%s].\", v.Name)\n\t\t}\n\n\t\tn, err := s.parseShard(v)\n\t\tif err != nil {\n\t\t\treturn errors.Trace(err)\n\t\t}\n\n\t\ts.shards[v.Name] = n\n\t}\n\n\treturn nil\n}\n\nfunc (s *Server) parseShard(cfg config.ShardConfig) (*Shard, error) {\n\tn := &Shard{\n\t\tserver: s,\n\t\tcfg: cfg,\n\t}\n\tif len(cfg.Master) == 0 {\n\t\treturn nil, errors.Errorf(\"must setting master MySQL node.\")\n\t}\n\n\tvar err error\n\tif n.master, err = n.openDB(cfg.Master); err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\treturn n, nil\n}\n\nfunc (s *Server) newConn(co net.Conn) *Conn {\n\tlog.Info(\"newConn\", co.RemoteAddr().String())\n\tc := &Conn{\n\t\tc: co,\n\t\tpkg: mysql.NewPacketIO(co),\n\t\tserver: s,\n\t\tconnectionId: atomic.AddUint32(&baseConnId, 1),\n\t\tstatus: mysql.SERVER_STATUS_AUTOCOMMIT,\n\t\tcollation: mysql.DEFAULT_COLLATION_ID,\n\t\tcharset: mysql.DEFAULT_CHARSET,\n\t\talloc: arena.NewArenaAllocator(32 * 1024),\n\t\ttxConns: make(map[string]*mysql.SqlConn),\n\t}\n\tc.salt, _ = mysql.RandomBuf(20)\n\n\treturn c\n}\n\nfunc (s *Server) GetRWlock() *sync.RWMutex {\n\treturn s.rwlock\n}\n\nfunc (s *Server) AsynExec(task *execTask) {\n\ts.taskQ <- task\n}\n\nfunc (s *Server) CfgIsSkipAuth() bool {\n\treturn s.cfg.SkipAuth\n}\n\nfunc (s *Server) CfgGetPwd() string {\n\treturn s.cfg.Password\n}\n\nfunc (s *Server) loadSchemaInfo() error {\n\tif err := s.parseShards(); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tif err := s.parseSchemas(); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tfor _, v := range s.cfg.Schemas {\n\t\trc := v.RulesConifg\n\t\tvar overrides []tabletserver.SchemaOverride\n\t\tfor _, sc := range rc.ShardRule {\n\t\t\tor := tabletserver.SchemaOverride{Name: sc.Table}\n\t\t\tpks := strings.Split(sc.ShardingKey, \",\")\n\t\t\tfor _, pk := range pks {\n\t\t\t\tor.PKColumns = append(or.PKColumns, strings.TrimSpace(pk))\n\t\t\t}\n\t\t\tor.Cache = &tabletserver.OverrideCacheDesc{Type: sc.RowCacheType, Prefix: or.Name, Table: or.Name}\n\t\t\toverrides = append(overrides, or)\n\t\t}\n\n\t\t\/\/fix hard code node\n\t\tsc := s.cfg.Shards[0]\n\t\ts.autoSchamas[v.DB] = tabletserver.NewSchemaInfo(s.cfg.RowCacheConf, s.cfg.Shards[0].Master, sc.User, sc.Password, v.DB, overrides)\n\t}\n\n\treturn nil\n}\n\nfunc makeServer(configFile string) *Server {\n\tcfg, err := config.ParseConfigFile(configFile)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\treturn nil\n\t}\n\n\tlog.Warningf(\"%#v\", cfg)\n\n\ts := &Server{\n\t\tconfigFile: configFile,\n\t\tcfg: cfg,\n\t\taddr: cfg.Addr,\n\t\tuser: cfg.User,\n\t\tpassword: cfg.Password,\n\t\tautoSchamas: make(map[string]*tabletserver.SchemaInfo),\n\t\ttaskQ: make(chan *execTask, 100),\n\t\tconcurrentLimiter: tokenlimiter.NewTokenLimiter(100),\n\t\tcounter: stats.NewCounters(\"stats\"),\n\t\trwlock: &sync.RWMutex{},\n\t\tclients: make(map[uint32]*Conn),\n\t}\n\n\tf := func(wg *sync.WaitGroup, rs []interface{}, i int, co *mysql.SqlConn, sql string, args []interface{}) {\n\t\tr, err := co.Execute(sql, args...)\n\t\tif err != nil {\n\t\t\tlog.Warning(err)\n\t\t\trs[i] = err\n\t\t} else {\n\t\t\trs[i] = r\n\t\t}\n\n\t\twg.Done()\n\t}\n\n\tfor i := 0; i < 100; i++ {\n\t\tgo func() {\n\t\t\tfor task := range s.taskQ {\n\t\t\t\tf(task.wg, task.rs, task.idx, task.co, task.sql, task.args)\n\t\t\t}\n\t\t}()\n\t}\n\n\treturn s\n}\n\nfunc NewServer(configFile string) (*Server, error) {\n\ts := makeServer(configFile)\n\ts.loadSchemaInfo()\n\n\tnetProto := \"tcp\"\n\tif strings.Contains(netProto, \"\/\") {\n\t\tnetProto = \"unix\"\n\t}\n\n\tvar err error\n\ts.listener, err = net.Listen(netProto, s.addr)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tlog.Infof(\"Server run MySql Protocol Listen(%s) at [%s]\", netProto, s.addr)\n\treturn s, nil\n}\n\nfunc (s *Server) cleanup() {\n\tfor _, si := range s.autoSchamas {\n\t\tsi.Close()\n\t}\n}\n\nfunc (s *Server) resetSchemaInfo() error {\n\tfor _, c := range s.clients {\n\t\tif len(c.txConns) > 0 {\n\t\t\treturn errors.Errorf(\"transaction exist\")\n\t\t}\n\t}\n\n\ts.cleanup()\n\ts.autoSchamas = make(map[string]*tabletserver.SchemaInfo)\n\tfor _, n := range s.shards {\n\t\tn.Close()\n\t}\n\n\ts.shards = nil\n\ts.schemas = nil\n\n\tcfg, err := config.ParseConfigFile(s.configFile)\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\n\tlog.Warningf(\"%#v\", cfg)\n\n\ts.cfg = cfg\n\ts.loadSchemaInfo()\n\treturn nil\n}\n\nfunc (s *Server) HandleReload(w http.ResponseWriter, req *http.Request) {\n\tlog.Warning(\"trying to reload config\")\n\ts.rwlock.Lock()\n\tdefer s.rwlock.Unlock()\n\n\tif err := s.resetSchemaInfo(); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusConflict)\n\t\treturn\n\t}\n\n\tio.WriteString(w, \"ok\")\n}\n\nfunc (s *Server) Run() error {\n\tfor {\n\t\tconn, err := s.listener.Accept()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"accept error %s\", err.Error())\n\t\t\treturn err\n\t\t}\n\n\t\tgo s.onConn(conn)\n\t}\n\n\treturn nil\n}\n\nfunc (s *Server) Close() {\n\ts.rwlock.Lock()\n\tdefer s.rwlock.Unlock()\n\n\tif s.listener != nil {\n\t\ts.listener.Close()\n\t\ts.listener = nil\n\t}\n\n\ts.cleanup()\n}\n\nfunc (s *Server) onConn(c net.Conn) {\n\tconn := s.newConn(c)\n\tif err := conn.Handshake(); err != nil {\n\t\tlog.Errorf(\"handshake error %s\", errors.ErrorStack(err))\n\t\tc.Close()\n\t\treturn\n\t}\n\n\tconst key = \"connections\"\n\n\ts.IncCounter(key)\n\tdefer s.DecCounter(key)\n\n\ts.rwlock.Lock()\n\ts.clients[conn.connectionId] = conn\n\ts.rwlock.Unlock()\n\n\tconn.Run()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/fatih\/color\"\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"governator\"\n\tapp.Action = run\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"application, a\",\n\t\t\tEnvVar: \"GOVERNATOR_APPLICATION\",\n\t\t\tUsage: \"Name of the application that failed us\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"exit-code, e\",\n\t\t\tEnvVar: \"GOVERNATOR_EXIT_CODE\",\n\t\t\tUsage: \"Code that the service failed with\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"service, s\",\n\t\t\tEnvVar: \"GOVERNATOR_SERVICE\",\n\t\t\tUsage: \"Name of the service that is specifically at fault\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"uri, u\",\n\t\t\tEnvVar: \"GOVERNATOR_URI\",\n\t\t\tUsage: \"Uri to POST to with a cancellation notice\",\n\t\t},\n\t}\n\tapp.Run(os.Args)\n}\n\nfunc run(context *cli.Context) {\n\tapplicationName := context.String(\"application\")\n\texitCode := context.String(\"exit-code\")\n\tserviceName := context.String(\"service\")\n\turi := context.String(\"uri\")\n\n\tif applicationName == \"\" || exitCode == \"\" || serviceName == \"\" || uri == \"\" {\n\t\tcli.ShowAppHelp(context)\n\t\tcolor.Red(\" --application, --exit-code, --service, and --uri are all required\")\n\t\tos.Exit(1)\n\t}\n\n\tres, err := http.PostForm(uri, url.Values{\n\t\t\"applicationName\": {applicationName},\n\t\t\"exitCode\": {exitCode},\n\t\t\"serviceName\": {serviceName},\n\t})\n\tif err != nil {\n\t\tlog.Panicf(\"Failure to post to uri: %v\", err.Error())\n\t}\n\tif res.StatusCode != 201 {\n\t\tlog.Panicf(\"Cancellation failed with code '%v'\", res.StatusCode)\n\t}\n}\n<commit_msg>Better error message on missing parameters, v1.1.1<commit_after>package main\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/fatih\/color\"\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"governator\"\n\tapp.Action = run\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"application, a\",\n\t\t\tEnvVar: \"GOVERNATOR_APPLICATION\",\n\t\t\tUsage: \"Name of the application that failed us\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"exit-code, e\",\n\t\t\tEnvVar: \"GOVERNATOR_EXIT_CODE\",\n\t\t\tUsage: \"Code that the service failed with\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"service, s\",\n\t\t\tEnvVar: \"GOVERNATOR_SERVICE\",\n\t\t\tUsage: \"Name of the service that is specifically at fault\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"uri, u\",\n\t\t\tEnvVar: \"GOVERNATOR_URI\",\n\t\t\tUsage: \"Uri to POST to with a cancellation notice\",\n\t\t},\n\t}\n\tapp.Run(os.Args)\n}\n\nfunc run(context *cli.Context) {\n\tapplicationName := context.String(\"application\")\n\texitCode := context.String(\"exit-code\")\n\tserviceName := context.String(\"service\")\n\turi := context.String(\"uri\")\n\n\tif applicationName == \"\" || exitCode == \"\" || serviceName == \"\" || uri == \"\" {\n\t\tcli.ShowAppHelp(context)\n\n\t\tif applicationName == \"\" {\n\t\t\tcolor.Red(\" Missing required flag --application or GOVERNATOR_APPLICATION\")\n\t\t}\n\t\tif exitCode == \"\" {\n\t\t\tcolor.Red(\" Missing required flag --exit-code or GOVERNATOR_EXIT_CODE\")\n\t\t}\n\t\tif serviceName == \"\" {\n\t\t\tcolor.Red(\" Missing required flag --service or GOVERNATOR_SERVICE\")\n\t\t}\n\t\tif uri == \"\" {\n\t\t\tcolor.Red(\" Missing required flag --uri or GOVERNATOR_URI\")\n\t\t}\n\t\tos.Exit(1)\n\t}\n\n\tres, err := http.PostForm(uri, url.Values{\n\t\t\"applicationName\": {applicationName},\n\t\t\"exitCode\": {exitCode},\n\t\t\"serviceName\": {serviceName},\n\t})\n\tif err != nil {\n\t\tlog.Panicf(\"Failure to post to uri: %v\", err.Error())\n\t}\n\tif res.StatusCode != 201 {\n\t\tlog.Panicf(\"Cancellation failed with code '%v'\", res.StatusCode)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/fatih\/color\"\n\t\"github.com\/fsnotify\/fsnotify\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"time\"\n)\n\ntype featureFlag int\n\nconst (\n\tflgAutoIgnore featureFlag = 1 + iota\n\t\/\/ Particularly useful for VIM flury of events, see:\n\t\/\/ https:\/\/stackoverflow.com\/q\/10300835\/287374\n\tflgDebugOutput\n)\n\nfunc (flg featureFlag) String() string {\n\tswitch flg {\n\tcase flgAutoIgnore:\n\t\treturn \"flgAutoIgnore\"\n\tcase flgDebugOutput:\n\t\treturn \"flgDebugOutput\"\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unexpected flag, '%d'\", int(flg)))\n\t}\n}\n\ntype exitReason int\n\nconst (\n\texCommandline exitReason = 1 + iota\n\texWatcher\n\texFsevent\n)\n\ntype runDirective struct {\n\tShell string\n\tCommand string\n\tWatchTarget string\n\tInvertMatch *regexp.Regexp\n\tFeatures map[featureFlag]bool\n\tLastRun time.Time\n}\n\nfunc (run *runDirective) Exec(msgStdout bool) error {\n\trun.LastRun = time.Now()\n\tif msgStdout {\n\t\tfmt.Printf(\"%s\\t: `%s`\\n\",\n\t\t\tcolor.YellowString(\"running\"),\n\t\t\tcolor.HiRedString(run.Command))\n\t}\n\n\t\/\/ TODO(zacsh) find out a shell-agnostic way to run comands (eg: *bash*\n\t\/\/ specifically takes a \"-c\" flag)\n\tcmd := exec.Command(run.Shell, \"-c\", run.Command)\n\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\trunError := cmd.Run()\n\n\tif msgStdout {\n\t\tif runError == nil {\n\t\t\tfmt.Printf(\"%s\\n\", color.YellowString(\"done\"))\n\t\t} else {\n\t\t\tfmt.Printf(\"%s\\t: %s\\n\\n\",\n\t\t\t\tcolor.YellowString(\"done\"),\n\t\t\t\tcolor.New(color.Bold, color.FgRed).Sprintf(runError.Error()))\n\t\t}\n\t}\n\treturn runError\n}\n\nfunc (run *runDirective) hasRunRecently(since time.Duration) bool {\n\treturn time.Since(run.LastRun) <= since\n}\n\nfunc usage() string {\n\treturn fmt.Sprintf(`Runs a command everytime some filesystem events happen.\n Usage: COMMAND [DIR_TO_WATCH [FILE_IGNORE_PATTERN]]\n\n DIR_TO_WATCH defaults to the current working directory.\n FILE_IGNORE_PATTERN If provided, is used to match against the basename of the\n exact file whose event has been captured. If FILE_IGNORE_PATTERN expression\n matches said file, COMMAND will not be run.\n Valid arguments are those accepted by https:\/\/golang.org\/pkg\/regexp\/#Compile\n`)\n}\n\nfunc die(reason exitReason, e error) {\n\tvar reasonStr string\n\tswitch reason {\n\tcase exCommandline:\n\t\treasonStr = \"usage\"\n\tcase exWatcher:\n\t\treasonStr = \"watcher\"\n\tcase exFsevent:\n\t\treasonStr = \"event\"\n\t}\n\n\tfmt.Fprintf(os.Stderr, \"%s error: %s\\n\", reasonStr, e)\n\tos.Exit(int(reason))\n}\n\nfunc (c *runDirective) debugStr() string {\n\tinvertMatch := \"n\/a\"\n\tif c.InvertMatch != nil {\n\t\tinvertMatch = c.InvertMatch.String()\n\t}\n\n\tvar features string\n\tfor k, v := range c.Features {\n\t\tif v {\n\t\t\tvar sep string\n\t\t\tif len(features) > 0 {\n\t\t\t\tsep = \", \"\n\t\t\t}\n\t\t\tfeatures = fmt.Sprintf(\"%s%s%s\", features, sep, k.String())\n\t\t}\n\t}\n\n\treturn fmt.Sprintf(`\n run.Command: \"%s\"\n run.WatchTarget.Name(): \"%s\"\n run.InvertMatch: \"%s\"\n run.Shell: \"%s\"\n run.Features: %s\n `, c.Command, c.WatchTarget, invertMatch, c.Shell, features)\n}\n\nfunc main() {\n\tmagicFileRegexp := regexp.MustCompile(`^(\\.\\w.*sw[a-z]|4913)$`)\n\n\trun, perr := parseCli()\n\tif perr != nil {\n\t\tif perr.Stage == psHelp {\n\t\t\tfmt.Printf(usage())\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\tdie(exCommandline, perr)\n\t}\n\n\twatcher, e := fsnotify.NewWatcher()\n\tif e != nil {\n\t\tdie(exCommandline, e)\n\t}\n\tdefer watcher.Close()\n\n\tfmt.Printf(\"Watching `%s`\\n\", run.WatchTarget)\n\n\tif run.Features[flgDebugOutput] {\n\t\tfmt.Fprintf(\n\t\t\tos.Stderr,\n\t\t\t\"[debug] not yet implemented, but here's what you asked for:\\n%s\\n\",\n\t\t\trun.debugStr())\n\t}\n\n\trun.Exec(true \/*msgStdout*\/)\n\n\thaveActionableEvent := make(chan bool)\n\tdone := make(chan bool)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase e := <-watcher.Events:\n\t\t\t\tif run.Features[flgDebugOutput] {\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \"[debug] [%s] %s\\n\", e.Op.String(), e.Name)\n\t\t\t\t}\n\n\t\t\t\tif run.Features[flgAutoIgnore] {\n\t\t\t\t\tif magicFileRegexp.MatchString(filepath.Base(e.Name)) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif run.InvertMatch != nil &&\n\t\t\t\t\trun.InvertMatch.MatchString(filepath.Base(e.Name)) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\thaveActionableEvent <- true\n\t\t\tcase err := <-watcher.Errors:\n\t\t\t\tdie(exFsevent, err)\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-haveActionableEvent:\n\t\t\t\tif run.hasRunRecently(2 * time.Second) {\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \".\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tfmt.Fprintf(os.Stderr, \"\\n\")\n\t\t\t\trun.Exec(true \/*msgStdout*\/)\n\t\t\t}\n\t\t}\n\t}()\n\n\tif err := watcher.Add(run.WatchTarget); err != nil {\n\t\tdie(exWatcher, e)\n\t}\n\t<-done \/\/ hang main\n}\n<commit_msg>prevent long-running COMMAND being re-run mid-execution<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/fatih\/color\"\n\t\"github.com\/fsnotify\/fsnotify\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"time\"\n)\n\nconst defaultWaitTime time.Duration = 2 * time.Second\n\ntype featureFlag int\n\nconst (\n\tflgAutoIgnore featureFlag = 1 + iota\n\t\/\/ Particularly useful for VIM flury of events, see:\n\t\/\/ https:\/\/stackoverflow.com\/q\/10300835\/287374\n\tflgDebugOutput\n)\n\nfunc (flg featureFlag) String() string {\n\tswitch flg {\n\tcase flgAutoIgnore:\n\t\treturn \"flgAutoIgnore\"\n\tcase flgDebugOutput:\n\t\treturn \"flgDebugOutput\"\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unexpected flag, '%d'\", int(flg)))\n\t}\n}\n\ntype exitReason int\n\nconst (\n\texCommandline exitReason = 1 + iota\n\texWatcher\n\texFsevent\n)\n\ntype runDirective struct {\n\tShell string\n\tCommand string\n\tWatchTarget string\n\tInvertMatch *regexp.Regexp\n\tFeatures map[featureFlag]bool\n\tLastRun time.Time\n}\n\nfunc (run *runDirective) Exec(msgStdout bool) error {\n\tif msgStdout {\n\t\tfmt.Printf(\"%s\\t: `%s`\\n\",\n\t\t\tcolor.YellowString(\"running\"),\n\t\t\tcolor.HiRedString(run.Command))\n\t}\n\n\t\/\/ TODO(zacsh) find out a shell-agnostic way to run comands (eg: *bash*\n\t\/\/ specifically takes a \"-c\" flag)\n\tcmd := exec.Command(run.Shell, \"-c\", run.Command)\n\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\trun.LastRun = time.Time{}\n\trunError := cmd.Run()\n\trun.LastRun = time.Now()\n\n\tif msgStdout {\n\t\tif runError == nil {\n\t\t\tfmt.Printf(\"%s\\n\", color.YellowString(\"done\"))\n\t\t} else {\n\t\t\tfmt.Printf(\"%s\\t: %s\\n\\n\",\n\t\t\t\tcolor.YellowString(\"done\"),\n\t\t\t\tcolor.New(color.Bold, color.FgRed).Sprintf(runError.Error()))\n\t\t}\n\t}\n\treturn runError\n}\n\nfunc (run *runDirective) isOkToRun() bool {\n\treturn !(run.isRunning() || run.hasRunRecently(defaultWaitTime))\n}\n\nfunc (run *runDirective) isRunning() bool { return run.LastRun.IsZero() }\n\nfunc (run *runDirective) hasRunRecently(since time.Duration) bool {\n\treturn time.Since(run.LastRun) <= since\n}\n\nfunc usage() string {\n\treturn fmt.Sprintf(`Runs a command everytime some filesystem events happen.\n Usage: COMMAND [DIR_TO_WATCH [FILE_IGNORE_PATTERN]]\n\n DIR_TO_WATCH defaults to the current working directory.\n FILE_IGNORE_PATTERN If provided, is used to match against the basename of the\n exact file whose event has been captured. If FILE_IGNORE_PATTERN expression\n matches said file, COMMAND will not be run.\n Valid arguments are those accepted by https:\/\/golang.org\/pkg\/regexp\/#Compile\n`)\n}\n\nfunc die(reason exitReason, e error) {\n\tvar reasonStr string\n\tswitch reason {\n\tcase exCommandline:\n\t\treasonStr = \"usage\"\n\tcase exWatcher:\n\t\treasonStr = \"watcher\"\n\tcase exFsevent:\n\t\treasonStr = \"event\"\n\t}\n\n\tfmt.Fprintf(os.Stderr, \"%s error: %s\\n\", reasonStr, e)\n\tos.Exit(int(reason))\n}\n\nfunc (c *runDirective) debugStr() string {\n\tinvertMatch := \"n\/a\"\n\tif c.InvertMatch != nil {\n\t\tinvertMatch = c.InvertMatch.String()\n\t}\n\n\tvar features string\n\tfor k, v := range c.Features {\n\t\tif v {\n\t\t\tvar sep string\n\t\t\tif len(features) > 0 {\n\t\t\t\tsep = \", \"\n\t\t\t}\n\t\t\tfeatures = fmt.Sprintf(\"%s%s%s\", features, sep, k.String())\n\t\t}\n\t}\n\n\treturn fmt.Sprintf(`\n run.Command: \"%s\"\n run.WatchTarget.Name(): \"%s\"\n run.InvertMatch: \"%s\"\n run.Shell: \"%s\"\n run.Features: %s\n `, c.Command, c.WatchTarget, invertMatch, c.Shell, features)\n}\n\nfunc main() {\n\tmagicFileRegexp := regexp.MustCompile(`^(\\.\\w.*sw[a-z]|4913)$`)\n\n\trun, perr := parseCli()\n\tif perr != nil {\n\t\tif perr.Stage == psHelp {\n\t\t\tfmt.Printf(usage())\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\tdie(exCommandline, perr)\n\t}\n\n\twatcher, e := fsnotify.NewWatcher()\n\tif e != nil {\n\t\tdie(exCommandline, e)\n\t}\n\tdefer watcher.Close()\n\n\tfmt.Printf(\"Watching `%s`\\n\", run.WatchTarget)\n\n\tif run.Features[flgDebugOutput] {\n\t\tfmt.Fprintf(\n\t\t\tos.Stderr,\n\t\t\t\"[debug] not yet implemented, but here's what you asked for:\\n%s\\n\",\n\t\t\trun.debugStr())\n\t}\n\n\trun.Exec(true \/*msgStdout*\/)\n\n\thaveActionableEvent := make(chan bool)\n\tdone := make(chan bool)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase e := <-watcher.Events:\n\t\t\t\tif run.Features[flgDebugOutput] {\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \"[debug] [%s] %s\\n\", e.Op.String(), e.Name)\n\t\t\t\t}\n\n\t\t\t\tif run.Features[flgAutoIgnore] {\n\t\t\t\t\tif magicFileRegexp.MatchString(filepath.Base(e.Name)) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif run.InvertMatch != nil &&\n\t\t\t\t\trun.InvertMatch.MatchString(filepath.Base(e.Name)) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\thaveActionableEvent <- true\n\t\t\tcase err := <-watcher.Errors:\n\t\t\t\tdie(exFsevent, err)\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-haveActionableEvent:\n\t\t\t\tif !run.isOkToRun() {\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \".\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tfmt.Fprintf(os.Stderr, \"\\n\")\n\t\t\t\trun.Exec(true \/*msgStdout*\/)\n\t\t\t}\n\t\t}\n\t}()\n\n\tif err := watcher.Add(run.WatchTarget); err != nil {\n\t\tdie(exWatcher, e)\n\t}\n\t<-done \/\/ hang main\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\t\"time\"\n\n\t\"github.com\/micromdm\/micromdm\/version\"\n)\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n}\n\nfunc main() {\n\tif len(os.Args) < 2 {\n\t\tusage()\n\t\tos.Exit(1)\n\t}\n\tvar run func([]string) error\n\tswitch strings.ToLower(os.Args[1]) {\n\tcase \"version\", \"-version\":\n\t\tversion.Print()\n\t\treturn\n\tcase \"serve\":\n\t\trun = serve\n\tcase \"dep-token\":\n\t\trun = depToken\n\tcase \"get\":\n\t\trun = getResource\n\tcase \"apply\":\n\t\trun = applyResource\n\tcase \"dev\":\n\t\trun = dev\n\tdefault:\n\t\tusage()\n\t\tos.Exit(1)\n\t}\n\n\tif err := run(os.Args[2:]); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc usage() error {\n\thelpText := `USAGE: micromdm <COMMAND>\n\nAvailable Commands:\n\tserve\n\tget\n\tapply\n\tdep-token\n\tversion\n\nUse micromdm <command> -h for additional usage of each command.\nExample: micromdm serve -h\n`\n\tfmt.Println(helpText)\n\treturn nil\n}\n\nfunc usageFor(fs *flag.FlagSet, short string) func() {\n\treturn func() {\n\t\tfmt.Fprintf(os.Stderr, \"USAGE\\n\")\n\t\tfmt.Fprintf(os.Stderr, \" %s\\n\", short)\n\t\tfmt.Fprintf(os.Stderr, \"\\n\")\n\t\tfmt.Fprintf(os.Stderr, \"FLAGS\\n\")\n\t\tw := tabwriter.NewWriter(os.Stderr, 0, 2, 2, ' ', 0)\n\t\tfs.VisitAll(func(f *flag.Flag) {\n\t\t\tfmt.Fprintf(w, \"\\t-%s %s\\t%s\\n\", f.Name, f.DefValue, f.Usage)\n\t\t})\n\t\tw.Flush()\n\t\tfmt.Fprintf(os.Stderr, \"\\n\")\n\t}\n}\n<commit_msg>Add dev to usage<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\t\"time\"\n\n\t\"github.com\/micromdm\/micromdm\/version\"\n)\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n}\n\nfunc main() {\n\tif len(os.Args) < 2 {\n\t\tusage()\n\t\tos.Exit(1)\n\t}\n\tvar run func([]string) error\n\tswitch strings.ToLower(os.Args[1]) {\n\tcase \"version\", \"-version\":\n\t\tversion.Print()\n\t\treturn\n\tcase \"serve\":\n\t\trun = serve\n\tcase \"dep-token\":\n\t\trun = depToken\n\tcase \"get\":\n\t\trun = getResource\n\tcase \"apply\":\n\t\trun = applyResource\n\tcase \"dev\":\n\t\trun = dev\n\tdefault:\n\t\tusage()\n\t\tos.Exit(1)\n\t}\n\n\tif err := run(os.Args[2:]); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc usage() error {\n\thelpText := `USAGE: micromdm <COMMAND>\n\nAvailable Commands:\n\tserve\n\tdev\n\tget\n\tapply\n\tdep-token\n\tversion\n\nUse micromdm <command> -h for additional usage of each command.\nExample: micromdm serve -h\n`\n\tfmt.Println(helpText)\n\treturn nil\n}\n\nfunc usageFor(fs *flag.FlagSet, short string) func() {\n\treturn func() {\n\t\tfmt.Fprintf(os.Stderr, \"USAGE\\n\")\n\t\tfmt.Fprintf(os.Stderr, \" %s\\n\", short)\n\t\tfmt.Fprintf(os.Stderr, \"\\n\")\n\t\tfmt.Fprintf(os.Stderr, \"FLAGS\\n\")\n\t\tw := tabwriter.NewWriter(os.Stderr, 0, 2, 2, ' ', 0)\n\t\tfs.VisitAll(func(f *flag.Flag) {\n\t\t\tfmt.Fprintf(w, \"\\t-%s %s\\t%s\\n\", f.Name, f.DefValue, f.Usage)\n\t\t})\n\t\tw.Flush()\n\t\tfmt.Fprintf(os.Stderr, \"\\n\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"os\"\n\n\t\"fmt\"\n\t\"github.com\/dynport\/dgtk\/cli\"\n)\n\nfunc main() {\n\tRun()\n}\n\nfunc Run() {\n\tcallArgs, _ := ConfigCallArgs()\n\terr := router(callArgs).RunWithArgs()\n\tswitch err {\n\tcase cli.ErrorHelpRequested, cli.ErrorNoRoute:\n\t\tprintErr(err)\n\t\tos.Exit(1)\n\tcase nil:\n\t\tos.Exit(0)\n\tdefault:\n\t\tprintErr(err)\n\t\tfmt.Println(\"HERE\")\n\t\tos.Exit(1)\n\t}\n}\n<commit_msg>printErr should not be shown when HelpRequested and NoRoute error occurs<commit_after>package main\n\nimport (\n\t\"os\"\n\n\t\"fmt\"\n\t\"github.com\/dynport\/dgtk\/cli\"\n)\n\nfunc main() {\n\tRun()\n}\n\nfunc Run() {\n\tcallArgs, _ := ConfigCallArgs()\n\terr := router(callArgs).RunWithArgs()\n\tswitch err {\n\tcase cli.ErrorHelpRequested, cli.ErrorNoRoute:\n\t\tos.Exit(1)\n\tcase nil:\n\t\tos.Exit(0)\n\tdefault:\n\t\tprintErr(err)\n\t\tfmt.Println(\"HERE\")\n\t\tos.Exit(1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Command glow generates Go OpenGL bindings. See http:\/\/github.com\/errcw\/glow.\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar specURL = \"https:\/\/cvs.khronos.org\/svn\/repos\/ogl\/trunk\/doc\/registry\/public\/api\"\nvar specRegexp = regexp.MustCompile(`^(gl|glx|egl|wgl)\\.xml$`)\n\nvar docURLs = []string{\n\t\"https:\/\/cvs.khronos.org\/svn\/repos\/ogl\/trunk\/ecosystem\/public\/sdk\/docs\/man2\",\n\t\"https:\/\/cvs.khronos.org\/svn\/repos\/ogl\/trunk\/ecosystem\/public\/sdk\/docs\/man3\",\n\t\"https:\/\/cvs.khronos.org\/svn\/repos\/ogl\/trunk\/ecosystem\/public\/sdk\/docs\/man4\"}\nvar docRegexp = regexp.MustCompile(`^[ew]?gl[^u_].*\\.xml$`)\n\nfunc download(name string, args []string) {\n\tflags := flag.NewFlagSet(name, flag.ExitOnError)\n\txmlDir := flags.String(\"d\", \"xml\", \"XML directory\")\n\tflags.Parse(args)\n\n\tspecDir := filepath.Join(*xmlDir, \"spec\")\n\tif err := os.MkdirAll(specDir, 0755); err != nil {\n\t\tlog.Fatal(\"error creating specification output directory:\", err)\n\t}\n\n\tdocDir := filepath.Join(*xmlDir, \"doc\")\n\tif err := os.MkdirAll(docDir, 0755); err != nil {\n\t\tlog.Fatal(\"error creating documentation output directory:\", err)\n\t}\n\n\trev, err := DownloadSvnDir(specURL, specRegexp, specDir)\n\tif err != nil {\n\t\tlog.Fatal(\"error downloading specification files:\", err)\n\t}\n\n\tspecVersionFile := filepath.Join(specDir, \"REVISION\")\n\tif err := ioutil.WriteFile(specVersionFile, []byte(rev), 0644); err != nil {\n\t\tlog.Fatal(\"error writing spec revision metadata file:\", err)\n\t}\n\n\tfor _, url := range docURLs {\n\t\tif _, err := DownloadSvnDir(url, docRegexp, docDir); err != nil {\n\t\t\tlog.Fatal(\"error downloading documentation files:\", err)\n\t\t}\n\t}\n}\n\nfunc generate(name string, args []string) {\n\tflags := flag.NewFlagSet(name, flag.ExitOnError)\n\txmlDir := flags.String(\"xml\", \"xml\", \"XML directory\")\n\tapi := flags.String(\"api\", \"\", \"API to generate (e.g., gl)\")\n\tver := flags.String(\"version\", \"\", \"API version to generate (e.g., 4.1)\")\n\tprofile := flags.String(\"profile\", \"\", \"API profile to generate (e.g., core)\")\n\taddext := flags.String(\"addext\", \".*\", \"Regular expression of extensions to include (e.g., .*)\")\n\tremext := flags.String(\"remext\", \"$^\", \"Regular expression of extensions to exclude (e.g., .*)\")\n\trestrict := flags.String(\"restrict\", \"\", \"JSON file of symbols to restrict symbol generation\")\n\tlenientInit := flags.Bool(\"lenientInit\", false, \"When true missing functions do not fail Init\")\n\tflags.Parse(args)\n\n\tversion, err := ParseVersion(*ver)\n\tif err != nil {\n\t\tlog.Fatal(\"error parsing version:\", err)\n\t}\n\n\taddExtRegexp, err := regexp.Compile(*addext)\n\tif err != nil {\n\t\tlog.Fatal(\"error parsing extension inclusion regexp:\", err)\n\t}\n\n\tremExtRegexp, err := regexp.Compile(*remext)\n\tif err != nil {\n\t\tlog.Fatal(\"error parsing extension exclusion regexp:\", err)\n\t}\n\n\tpackageSpec := &PackageSpec{\n\t\tAPI: *api,\n\t\tVersion: version,\n\t\tProfile: *profile,\n\t\tAddExtRegexp: addExtRegexp,\n\t\tRemExtRegexp: remExtRegexp,\n\t\tLenientInit: *lenientInit,\n\t}\n\n\tspecs, rev := parseSpecifications(*xmlDir)\n\tdocs := parseDocumentation(*xmlDir)\n\n\tvar pkg *Package\n\tfor _, spec := range specs {\n\t\tif spec.HasPackage(packageSpec) {\n\t\t\tpkg = spec.ToPackage(packageSpec)\n\t\t\tpkg.SpecRev = rev\n\t\t\tdocs.AddDocs(pkg)\n\t\t\tif len(*restrict) > 0 {\n\t\t\t\tperformRestriction(pkg, *restrict)\n\t\t\t}\n\t\t\tif err := pkg.GeneratePackage(); err != nil {\n\t\t\t\tlog.Fatal(\"error generating package:\", err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\tif pkg == nil {\n\t\tlog.Fatal(\"unable to generate package:\", packageSpec)\n\t}\n\tlog.Println(\"generated package in\", pkg.Dir())\n}\n\n\/\/ Converts a slice string into a simple lookup map.\nfunc lookupMap(s []string) map[string]struct{} {\n\tlookup := make(map[string]struct{}, len(s))\n\tfor _, str := range s {\n\t\tlookup[str] = struct{}{}\n\t}\n\treturn lookup\n}\n\ntype jsonRestriction struct {\n\tEnums []string\n\tFunctions []string\n}\n\n\/\/ Reads the given JSON file path into jsonRestriction and filters the package\n\/\/ accordingly.\nfunc performRestriction(pkg *Package, jsonPath string) {\n\tdata, err := ioutil.ReadFile(jsonPath)\n\tif err != nil {\n\t\tlog.Fatal(\"error reading JSON restriction file:\", err)\n\t}\n\tvar r jsonRestriction\n\terr = json.Unmarshal(data, &r)\n\tif err != nil {\n\t\tlog.Fatal(\"error parsing JSON restriction file:\", err)\n\t}\n\tpkg.Filter(lookupMap(r.Enums), lookupMap(r.Functions))\n}\n\nfunc parseSpecifications(xmlDir string) ([]*Specification, string) {\n\tspecDir := filepath.Join(xmlDir, \"spec\")\n\tspecFiles, err := ioutil.ReadDir(specDir)\n\tif err != nil {\n\t\tlog.Fatal(\"error reading spec file entries:\", err)\n\t}\n\n\tspecs := make([]*Specification, 0, len(specFiles))\n\tfor _, specFile := range specFiles {\n\t\tif !strings.HasSuffix(specFile.Name(), \"xml\") {\n\t\t\tcontinue\n\t\t}\n\t\tspec, err := NewSpecification(filepath.Join(specDir, specFile.Name()))\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"error parsing specification:\", specFile.Name(), err)\n\t\t}\n\t\tspecs = append(specs, spec)\n\t}\n\n\trev, err := ioutil.ReadFile(filepath.Join(specDir, \"REVISION\"))\n\tif err != nil {\n\t\tlog.Fatal(\"error reading spec revision file:\", err)\n\t}\n\n\treturn specs, string(rev)\n}\n\nfunc parseDocumentation(xmlDir string) Documentation {\n\tdocDir := filepath.Join(xmlDir, \"doc\")\n\tdocFiles, err := ioutil.ReadDir(docDir)\n\tif err != nil {\n\t\tlog.Fatal(\"error reading doc file entries:\", err)\n\t}\n\n\tdocs := make([]string, 0, len(docFiles))\n\tfor _, docFile := range docFiles {\n\t\tdocs = append(docs, filepath.Join(docDir, docFile.Name()))\n\t}\n\n\tdoc, err := NewDocumentation(docs)\n\tif err != nil {\n\t\tlog.Fatal(\"error parsing documentation:\", err)\n\t}\n\n\treturn doc\n}\n\n\/\/ PackageSpec describes a package to be generated.\ntype PackageSpec struct {\n\tAPI string\n\tVersion Version\n\tProfile string \/\/ If \"all\" overrides the version spec\n\tAddExtRegexp *regexp.Regexp\n\tRemExtRegexp *regexp.Regexp\n\tLenientInit bool\n}\n\nfunc printUsage(name string) {\n\tfmt.Printf(\"Usage: %s command [arguments]\\n\", name)\n\tfmt.Println(\"Commands:\")\n\tfmt.Println(\" download Downloads specification and documentation XML files\")\n\tfmt.Println(\" generate Generates bindings\")\n\tfmt.Printf(\"Use %s <command> -help for a detailed command description\\n\", name)\n}\n\nfunc main() {\n\tname := os.Args[0]\n\targs := os.Args[1:]\n\n\tif len(args) < 1 {\n\t\tprintUsage(name)\n\t\tos.Exit(-1)\n\t}\n\n\tcommand := args[0]\n\tswitch command {\n\tcase \"download\":\n\t\tdownload(\"download\", args[1:])\n\tcase \"generate\":\n\t\tgenerate(\"generate\", args[1:])\n\tdefault:\n\t\tfmt.Printf(\"Unknown command: '%s'\\n\", command)\n\t\tprintUsage(name)\n\t\tos.Exit(-1)\n\t}\n}\n<commit_msg>Move error check for json.Unmarshal onto single line.<commit_after>\/\/ Command glow generates Go OpenGL bindings. See http:\/\/github.com\/errcw\/glow.\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar specURL = \"https:\/\/cvs.khronos.org\/svn\/repos\/ogl\/trunk\/doc\/registry\/public\/api\"\nvar specRegexp = regexp.MustCompile(`^(gl|glx|egl|wgl)\\.xml$`)\n\nvar docURLs = []string{\n\t\"https:\/\/cvs.khronos.org\/svn\/repos\/ogl\/trunk\/ecosystem\/public\/sdk\/docs\/man2\",\n\t\"https:\/\/cvs.khronos.org\/svn\/repos\/ogl\/trunk\/ecosystem\/public\/sdk\/docs\/man3\",\n\t\"https:\/\/cvs.khronos.org\/svn\/repos\/ogl\/trunk\/ecosystem\/public\/sdk\/docs\/man4\"}\nvar docRegexp = regexp.MustCompile(`^[ew]?gl[^u_].*\\.xml$`)\n\nfunc download(name string, args []string) {\n\tflags := flag.NewFlagSet(name, flag.ExitOnError)\n\txmlDir := flags.String(\"d\", \"xml\", \"XML directory\")\n\tflags.Parse(args)\n\n\tspecDir := filepath.Join(*xmlDir, \"spec\")\n\tif err := os.MkdirAll(specDir, 0755); err != nil {\n\t\tlog.Fatal(\"error creating specification output directory:\", err)\n\t}\n\n\tdocDir := filepath.Join(*xmlDir, \"doc\")\n\tif err := os.MkdirAll(docDir, 0755); err != nil {\n\t\tlog.Fatal(\"error creating documentation output directory:\", err)\n\t}\n\n\trev, err := DownloadSvnDir(specURL, specRegexp, specDir)\n\tif err != nil {\n\t\tlog.Fatal(\"error downloading specification files:\", err)\n\t}\n\n\tspecVersionFile := filepath.Join(specDir, \"REVISION\")\n\tif err := ioutil.WriteFile(specVersionFile, []byte(rev), 0644); err != nil {\n\t\tlog.Fatal(\"error writing spec revision metadata file:\", err)\n\t}\n\n\tfor _, url := range docURLs {\n\t\tif _, err := DownloadSvnDir(url, docRegexp, docDir); err != nil {\n\t\t\tlog.Fatal(\"error downloading documentation files:\", err)\n\t\t}\n\t}\n}\n\nfunc generate(name string, args []string) {\n\tflags := flag.NewFlagSet(name, flag.ExitOnError)\n\txmlDir := flags.String(\"xml\", \"xml\", \"XML directory\")\n\tapi := flags.String(\"api\", \"\", \"API to generate (e.g., gl)\")\n\tver := flags.String(\"version\", \"\", \"API version to generate (e.g., 4.1)\")\n\tprofile := flags.String(\"profile\", \"\", \"API profile to generate (e.g., core)\")\n\taddext := flags.String(\"addext\", \".*\", \"Regular expression of extensions to include (e.g., .*)\")\n\tremext := flags.String(\"remext\", \"$^\", \"Regular expression of extensions to exclude (e.g., .*)\")\n\trestrict := flags.String(\"restrict\", \"\", \"JSON file of symbols to restrict symbol generation\")\n\tlenientInit := flags.Bool(\"lenientInit\", false, \"When true missing functions do not fail Init\")\n\tflags.Parse(args)\n\n\tversion, err := ParseVersion(*ver)\n\tif err != nil {\n\t\tlog.Fatal(\"error parsing version:\", err)\n\t}\n\n\taddExtRegexp, err := regexp.Compile(*addext)\n\tif err != nil {\n\t\tlog.Fatal(\"error parsing extension inclusion regexp:\", err)\n\t}\n\n\tremExtRegexp, err := regexp.Compile(*remext)\n\tif err != nil {\n\t\tlog.Fatal(\"error parsing extension exclusion regexp:\", err)\n\t}\n\n\tpackageSpec := &PackageSpec{\n\t\tAPI: *api,\n\t\tVersion: version,\n\t\tProfile: *profile,\n\t\tAddExtRegexp: addExtRegexp,\n\t\tRemExtRegexp: remExtRegexp,\n\t\tLenientInit: *lenientInit,\n\t}\n\n\tspecs, rev := parseSpecifications(*xmlDir)\n\tdocs := parseDocumentation(*xmlDir)\n\n\tvar pkg *Package\n\tfor _, spec := range specs {\n\t\tif spec.HasPackage(packageSpec) {\n\t\t\tpkg = spec.ToPackage(packageSpec)\n\t\t\tpkg.SpecRev = rev\n\t\t\tdocs.AddDocs(pkg)\n\t\t\tif len(*restrict) > 0 {\n\t\t\t\tperformRestriction(pkg, *restrict)\n\t\t\t}\n\t\t\tif err := pkg.GeneratePackage(); err != nil {\n\t\t\t\tlog.Fatal(\"error generating package:\", err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\tif pkg == nil {\n\t\tlog.Fatal(\"unable to generate package:\", packageSpec)\n\t}\n\tlog.Println(\"generated package in\", pkg.Dir())\n}\n\n\/\/ Converts a slice string into a simple lookup map.\nfunc lookupMap(s []string) map[string]struct{} {\n\tlookup := make(map[string]struct{}, len(s))\n\tfor _, str := range s {\n\t\tlookup[str] = struct{}{}\n\t}\n\treturn lookup\n}\n\ntype jsonRestriction struct {\n\tEnums []string\n\tFunctions []string\n}\n\n\/\/ Reads the given JSON file path into jsonRestriction and filters the package\n\/\/ accordingly.\nfunc performRestriction(pkg *Package, jsonPath string) {\n\tdata, err := ioutil.ReadFile(jsonPath)\n\tif err != nil {\n\t\tlog.Fatal(\"error reading JSON restriction file:\", err)\n\t}\n\tvar r jsonRestriction\n\tif err = json.Unmarshal(data, &r); err != nil {\n\t\tlog.Fatal(\"error parsing JSON restriction file:\", err)\n\t}\n\tpkg.Filter(lookupMap(r.Enums), lookupMap(r.Functions))\n}\n\nfunc parseSpecifications(xmlDir string) ([]*Specification, string) {\n\tspecDir := filepath.Join(xmlDir, \"spec\")\n\tspecFiles, err := ioutil.ReadDir(specDir)\n\tif err != nil {\n\t\tlog.Fatal(\"error reading spec file entries:\", err)\n\t}\n\n\tspecs := make([]*Specification, 0, len(specFiles))\n\tfor _, specFile := range specFiles {\n\t\tif !strings.HasSuffix(specFile.Name(), \"xml\") {\n\t\t\tcontinue\n\t\t}\n\t\tspec, err := NewSpecification(filepath.Join(specDir, specFile.Name()))\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"error parsing specification:\", specFile.Name(), err)\n\t\t}\n\t\tspecs = append(specs, spec)\n\t}\n\n\trev, err := ioutil.ReadFile(filepath.Join(specDir, \"REVISION\"))\n\tif err != nil {\n\t\tlog.Fatal(\"error reading spec revision file:\", err)\n\t}\n\n\treturn specs, string(rev)\n}\n\nfunc parseDocumentation(xmlDir string) Documentation {\n\tdocDir := filepath.Join(xmlDir, \"doc\")\n\tdocFiles, err := ioutil.ReadDir(docDir)\n\tif err != nil {\n\t\tlog.Fatal(\"error reading doc file entries:\", err)\n\t}\n\n\tdocs := make([]string, 0, len(docFiles))\n\tfor _, docFile := range docFiles {\n\t\tdocs = append(docs, filepath.Join(docDir, docFile.Name()))\n\t}\n\n\tdoc, err := NewDocumentation(docs)\n\tif err != nil {\n\t\tlog.Fatal(\"error parsing documentation:\", err)\n\t}\n\n\treturn doc\n}\n\n\/\/ PackageSpec describes a package to be generated.\ntype PackageSpec struct {\n\tAPI string\n\tVersion Version\n\tProfile string \/\/ If \"all\" overrides the version spec\n\tAddExtRegexp *regexp.Regexp\n\tRemExtRegexp *regexp.Regexp\n\tLenientInit bool\n}\n\nfunc printUsage(name string) {\n\tfmt.Printf(\"Usage: %s command [arguments]\\n\", name)\n\tfmt.Println(\"Commands:\")\n\tfmt.Println(\" download Downloads specification and documentation XML files\")\n\tfmt.Println(\" generate Generates bindings\")\n\tfmt.Printf(\"Use %s <command> -help for a detailed command description\\n\", name)\n}\n\nfunc main() {\n\tname := os.Args[0]\n\targs := os.Args[1:]\n\n\tif len(args) < 1 {\n\t\tprintUsage(name)\n\t\tos.Exit(-1)\n\t}\n\n\tcommand := args[0]\n\tswitch command {\n\tcase \"download\":\n\t\tdownload(\"download\", args[1:])\n\tcase \"generate\":\n\t\tgenerate(\"generate\", args[1:])\n\tdefault:\n\t\tfmt.Printf(\"Unknown command: '%s'\\n\", command)\n\t\tprintUsage(name)\n\t\tos.Exit(-1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Set user agent<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Tor websocket server transport plugin.\n\/\/\n\/\/ Usage:\n\/\/ ServerTransportPlugin websocket exec .\/websocket-server --port 9901\n\npackage main\n\nimport (\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sync\"\n\t\"time\"\n)\n\nimport \"pt\"\nimport \"websocket\"\n\nconst ptMethodName = \"websocket\"\nconst requestTimeout = 10 * time.Second\n\n\/\/ \"4\/3+1\" accounts for possible base64 encoding.\nconst maxMessageSize = 64*1024*4\/3 + 1\n\nvar logFile = os.Stderr\n\nvar ptInfo pt.ServerInfo\n\n\/\/ When a connection handler starts, +1 is written to this channel; when it\n\/\/ ends, -1 is written.\nvar handlerChan = make(chan int)\n\nfunc usage() {\n\tfmt.Printf(\"Usage: %s [OPTIONS]\\n\", os.Args[0])\n\tfmt.Printf(\"WebSocket server pluggable transport for Tor.\\n\")\n\tfmt.Printf(\"Works only as a managed proxy.\\n\")\n\tfmt.Printf(\"\\n\")\n\tfmt.Printf(\" -h, --help show this help.\\n\")\n\tfmt.Printf(\" --log FILE log messages to FILE (default stderr).\\n\")\n\tfmt.Printf(\" --port PORT listen on PORT (overrides Tor's requested port).\\n\")\n}\n\nvar logMutex sync.Mutex\n\nfunc Log(format string, v ...interface{}) {\n\tdateStr := time.Now().Format(\"2006-01-02 15:04:05\")\n\tlogMutex.Lock()\n\tdefer logMutex.Unlock()\n\tmsg := fmt.Sprintf(format, v...)\n\tfmt.Fprintf(logFile, \"%s %s\\n\", dateStr, msg)\n}\n\n\/\/ An abstraction that makes an underlying WebSocket connection look like an\n\/\/ io.ReadWriteCloser. It internally takes care of things like base64 encoding\n\/\/ and decoding.\ntype webSocketConn struct {\n\tWs *websocket.WebSocket\n\tBase64 bool\n\tmessageBuf []byte\n}\n\n\/\/ Implements io.Reader.\nfunc (conn *webSocketConn) Read(b []byte) (n int, err error) {\n\tfor len(conn.messageBuf) == 0 {\n\t\tvar m websocket.Message\n\t\tm, err = conn.Ws.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tif m.Opcode == 8 {\n\t\t\terr = io.EOF\n\t\t\treturn\n\t\t}\n\t\tif conn.Base64 {\n\t\t\tif m.Opcode != 1 {\n\t\t\t\terr = errors.New(fmt.Sprintf(\"got non-text opcode %d with the base64 subprotocol\", m.Opcode))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tconn.messageBuf = make([]byte, base64.StdEncoding.DecodedLen(len(m.Payload)))\n\t\t\tvar num int\n\t\t\tnum, err = base64.StdEncoding.Decode(conn.messageBuf, m.Payload)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tconn.messageBuf = conn.messageBuf[:num]\n\t\t} else {\n\t\t\tif m.Opcode != 2 {\n\t\t\t\terr = errors.New(fmt.Sprintf(\"got non-binary opcode %d with no subprotocol\", m.Opcode))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tconn.messageBuf = m.Payload\n\t\t}\n\t}\n\n\tn = copy(b, conn.messageBuf)\n\tconn.messageBuf = conn.messageBuf[n:]\n\n\treturn\n}\n\n\/\/ Implements io.Writer.\nfunc (conn *webSocketConn) Write(b []byte) (n int, err error) {\n\tif conn.Base64 {\n\t\tbuf := make([]byte, base64.StdEncoding.EncodedLen(len(b)))\n\t\tbase64.StdEncoding.Encode(buf, b)\n\t\terr = conn.Ws.WriteMessage(1, buf)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tn = len(b)\n\t} else {\n\t\terr = conn.Ws.WriteMessage(2, b)\n\t\tn = len(b)\n\t}\n\treturn\n}\n\n\/\/ Implements io.Closer.\nfunc (conn *webSocketConn) Close() error {\n\t\/\/ Ignore any error in trying to write a Close frame.\n\t_ = conn.Ws.WriteFrame(8, nil)\n\treturn conn.Ws.Conn.Close()\n}\n\n\/\/ Create a new webSocketConn.\nfunc NewWebSocketConn(ws *websocket.WebSocket) webSocketConn {\n\tvar conn webSocketConn\n\tconn.Ws = ws\n\tconn.Base64 = (ws.Subprotocol == \"base64\")\n\treturn conn\n}\n\n\/\/ Copy from WebSocket to socket and vice versa.\nfunc proxy(local *net.TCPConn, conn *webSocketConn) {\n\tvar wg sync.WaitGroup\n\n\twg.Add(2)\n\n\tgo func() {\n\t\t_, err := io.Copy(conn, local)\n\t\tif err != nil {\n\t\t\tLog(\"error copying ORPort to WebSocket\")\n\t\t}\n\t\tlocal.CloseRead()\n\t\tconn.Close()\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\t_, err := io.Copy(local, conn)\n\t\tif err != nil {\n\t\t\tLog(\"error copying WebSocket to ORPort\")\n\t\t}\n\t\tlocal.CloseWrite()\n\t\tconn.Close()\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n}\n\nfunc webSocketHandler(ws *websocket.WebSocket) {\n\t\/\/ Undo timeouts on HTTP request handling.\n\tws.Conn.SetDeadline(time.Time{})\n\tconn := NewWebSocketConn(ws)\n\n\thandlerChan <- 1\n\tdefer func() {\n\t\thandlerChan <- -1\n\t}()\n\n\ts, err := pt.ConnectOr(&ptInfo, ws.Conn, ptMethodName)\n\tif err != nil {\n\t\tLog(\"Failed to connect to ORPort: \" + err.Error())\n\t\treturn\n\t}\n\n\tproxy(s, &conn)\n}\n\nfunc startListener(addr *net.TCPAddr) (*net.TCPListener, error) {\n\tln, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgo func() {\n\t\tvar config websocket.Config\n\t\tconfig.Subprotocols = []string{\"base64\"}\n\t\tconfig.MaxMessageSize = maxMessageSize\n\t\ts := &http.Server{\n\t\t\tHandler: config.Handler(webSocketHandler),\n\t\t\tReadTimeout: requestTimeout,\n\t\t}\n\t\terr = s.Serve(ln)\n\t\tif err != nil {\n\t\t\tLog(\"http.Serve: \" + err.Error())\n\t\t}\n\t}()\n\treturn ln, nil\n}\n\nfunc main() {\n\tvar logFilename string\n\tvar port int\n\n\tflag.Usage = usage\n\tflag.StringVar(&logFilename, \"log\", \"\", \"log file to write to\")\n\tflag.IntVar(&port, \"port\", 0, \"port to listen on if unspecified by Tor\")\n\tflag.Parse()\n\n\tif logFilename != \"\" {\n\t\tf, err := os.OpenFile(logFilename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Can't open log file %q: %s.\\n\", logFilename, err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t\tlogFile = f\n\t}\n\n\tLog(\"starting\")\n\tptInfo = pt.ServerSetup([]string{ptMethodName})\n\n\tlisteners := make([]*net.TCPListener, 0)\n\tfor _, bindAddr := range ptInfo.BindAddrs {\n\t\t\/\/ Override tor's requested port (which is 0 if this transport\n\t\t\/\/ has not been run before) with the one requested by the --port\n\t\t\/\/ option.\n\t\tif port != 0 {\n\t\t\tbindAddr.Addr.Port = port\n\t\t}\n\n\t\tln, err := startListener(bindAddr.Addr)\n\t\tif err != nil {\n\t\t\tpt.SmethodError(bindAddr.MethodName, err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tpt.Smethod(bindAddr.MethodName, ln.Addr())\n\t\tLog(\"listening on %s\", ln.Addr().String())\n\t\tlisteners = append(listeners, ln)\n\t}\n\tpt.SmethodsDone()\n\n\tvar numHandlers int = 0\n\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, os.Interrupt)\n\tvar sigint bool = false\n\tfor !sigint {\n\t\tselect {\n\t\tcase n := <-handlerChan:\n\t\t\tnumHandlers += n\n\t\tcase <-signalChan:\n\t\t\tLog(\"SIGINT\")\n\t\t\tsigint = true\n\t\t}\n\t}\n\n\tfor _, ln := range listeners {\n\t\tln.Close()\n\t}\n\n\tsigint = false\n\tfor numHandlers != 0 && !sigint {\n\t\tselect {\n\t\tcase n := <-handlerChan:\n\t\t\tnumHandlers += n\n\t\tcase <-signalChan:\n\t\t\tLog(\"SIGINT\")\n\t\t\tsigint = true\n\t\t}\n\t}\n}\n<commit_msg>No need to export these local functions.<commit_after>\/\/ Tor websocket server transport plugin.\n\/\/\n\/\/ Usage:\n\/\/ ServerTransportPlugin websocket exec .\/websocket-server --port 9901\n\npackage main\n\nimport (\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sync\"\n\t\"time\"\n)\n\nimport \"pt\"\nimport \"websocket\"\n\nconst ptMethodName = \"websocket\"\nconst requestTimeout = 10 * time.Second\n\n\/\/ \"4\/3+1\" accounts for possible base64 encoding.\nconst maxMessageSize = 64*1024*4\/3 + 1\n\nvar logFile = os.Stderr\n\nvar ptInfo pt.ServerInfo\n\n\/\/ When a connection handler starts, +1 is written to this channel; when it\n\/\/ ends, -1 is written.\nvar handlerChan = make(chan int)\n\nfunc usage() {\n\tfmt.Printf(\"Usage: %s [OPTIONS]\\n\", os.Args[0])\n\tfmt.Printf(\"WebSocket server pluggable transport for Tor.\\n\")\n\tfmt.Printf(\"Works only as a managed proxy.\\n\")\n\tfmt.Printf(\"\\n\")\n\tfmt.Printf(\" -h, --help show this help.\\n\")\n\tfmt.Printf(\" --log FILE log messages to FILE (default stderr).\\n\")\n\tfmt.Printf(\" --port PORT listen on PORT (overrides Tor's requested port).\\n\")\n}\n\nvar logMutex sync.Mutex\n\nfunc log(format string, v ...interface{}) {\n\tdateStr := time.Now().Format(\"2006-01-02 15:04:05\")\n\tlogMutex.Lock()\n\tdefer logMutex.Unlock()\n\tmsg := fmt.Sprintf(format, v...)\n\tfmt.Fprintf(logFile, \"%s %s\\n\", dateStr, msg)\n}\n\n\/\/ An abstraction that makes an underlying WebSocket connection look like an\n\/\/ io.ReadWriteCloser. It internally takes care of things like base64 encoding\n\/\/ and decoding.\ntype webSocketConn struct {\n\tWs *websocket.WebSocket\n\tBase64 bool\n\tmessageBuf []byte\n}\n\n\/\/ Implements io.Reader.\nfunc (conn *webSocketConn) Read(b []byte) (n int, err error) {\n\tfor len(conn.messageBuf) == 0 {\n\t\tvar m websocket.Message\n\t\tm, err = conn.Ws.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tif m.Opcode == 8 {\n\t\t\terr = io.EOF\n\t\t\treturn\n\t\t}\n\t\tif conn.Base64 {\n\t\t\tif m.Opcode != 1 {\n\t\t\t\terr = errors.New(fmt.Sprintf(\"got non-text opcode %d with the base64 subprotocol\", m.Opcode))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tconn.messageBuf = make([]byte, base64.StdEncoding.DecodedLen(len(m.Payload)))\n\t\t\tvar num int\n\t\t\tnum, err = base64.StdEncoding.Decode(conn.messageBuf, m.Payload)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tconn.messageBuf = conn.messageBuf[:num]\n\t\t} else {\n\t\t\tif m.Opcode != 2 {\n\t\t\t\terr = errors.New(fmt.Sprintf(\"got non-binary opcode %d with no subprotocol\", m.Opcode))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tconn.messageBuf = m.Payload\n\t\t}\n\t}\n\n\tn = copy(b, conn.messageBuf)\n\tconn.messageBuf = conn.messageBuf[n:]\n\n\treturn\n}\n\n\/\/ Implements io.Writer.\nfunc (conn *webSocketConn) Write(b []byte) (n int, err error) {\n\tif conn.Base64 {\n\t\tbuf := make([]byte, base64.StdEncoding.EncodedLen(len(b)))\n\t\tbase64.StdEncoding.Encode(buf, b)\n\t\terr = conn.Ws.WriteMessage(1, buf)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tn = len(b)\n\t} else {\n\t\terr = conn.Ws.WriteMessage(2, b)\n\t\tn = len(b)\n\t}\n\treturn\n}\n\n\/\/ Implements io.Closer.\nfunc (conn *webSocketConn) Close() error {\n\t\/\/ Ignore any error in trying to write a Close frame.\n\t_ = conn.Ws.WriteFrame(8, nil)\n\treturn conn.Ws.Conn.Close()\n}\n\n\/\/ Create a new webSocketConn.\nfunc newWebSocketConn(ws *websocket.WebSocket) webSocketConn {\n\tvar conn webSocketConn\n\tconn.Ws = ws\n\tconn.Base64 = (ws.Subprotocol == \"base64\")\n\treturn conn\n}\n\n\/\/ Copy from WebSocket to socket and vice versa.\nfunc proxy(local *net.TCPConn, conn *webSocketConn) {\n\tvar wg sync.WaitGroup\n\n\twg.Add(2)\n\n\tgo func() {\n\t\t_, err := io.Copy(conn, local)\n\t\tif err != nil {\n\t\t\tlog(\"error copying ORPort to WebSocket\")\n\t\t}\n\t\tlocal.CloseRead()\n\t\tconn.Close()\n\t\twg.Done()\n\t}()\n\n\tgo func() {\n\t\t_, err := io.Copy(local, conn)\n\t\tif err != nil {\n\t\t\tlog(\"error copying WebSocket to ORPort\")\n\t\t}\n\t\tlocal.CloseWrite()\n\t\tconn.Close()\n\t\twg.Done()\n\t}()\n\n\twg.Wait()\n}\n\nfunc webSocketHandler(ws *websocket.WebSocket) {\n\t\/\/ Undo timeouts on HTTP request handling.\n\tws.Conn.SetDeadline(time.Time{})\n\tconn := newWebSocketConn(ws)\n\n\thandlerChan <- 1\n\tdefer func() {\n\t\thandlerChan <- -1\n\t}()\n\n\ts, err := pt.ConnectOr(&ptInfo, ws.Conn, ptMethodName)\n\tif err != nil {\n\t\tlog(\"Failed to connect to ORPort: \" + err.Error())\n\t\treturn\n\t}\n\n\tproxy(s, &conn)\n}\n\nfunc startListener(addr *net.TCPAddr) (*net.TCPListener, error) {\n\tln, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgo func() {\n\t\tvar config websocket.Config\n\t\tconfig.Subprotocols = []string{\"base64\"}\n\t\tconfig.MaxMessageSize = maxMessageSize\n\t\ts := &http.Server{\n\t\t\tHandler: config.Handler(webSocketHandler),\n\t\t\tReadTimeout: requestTimeout,\n\t\t}\n\t\terr = s.Serve(ln)\n\t\tif err != nil {\n\t\t\tlog(\"http.Serve: \" + err.Error())\n\t\t}\n\t}()\n\treturn ln, nil\n}\n\nfunc main() {\n\tvar logFilename string\n\tvar port int\n\n\tflag.Usage = usage\n\tflag.StringVar(&logFilename, \"log\", \"\", \"log file to write to\")\n\tflag.IntVar(&port, \"port\", 0, \"port to listen on if unspecified by Tor\")\n\tflag.Parse()\n\n\tif logFilename != \"\" {\n\t\tf, err := os.OpenFile(logFilename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Can't open log file %q: %s.\\n\", logFilename, err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t\tlogFile = f\n\t}\n\n\tlog(\"starting\")\n\tptInfo = pt.ServerSetup([]string{ptMethodName})\n\n\tlisteners := make([]*net.TCPListener, 0)\n\tfor _, bindAddr := range ptInfo.BindAddrs {\n\t\t\/\/ Override tor's requested port (which is 0 if this transport\n\t\t\/\/ has not been run before) with the one requested by the --port\n\t\t\/\/ option.\n\t\tif port != 0 {\n\t\t\tbindAddr.Addr.Port = port\n\t\t}\n\n\t\tln, err := startListener(bindAddr.Addr)\n\t\tif err != nil {\n\t\t\tpt.SmethodError(bindAddr.MethodName, err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tpt.Smethod(bindAddr.MethodName, ln.Addr())\n\t\tlog(\"listening on %s\", ln.Addr().String())\n\t\tlisteners = append(listeners, ln)\n\t}\n\tpt.SmethodsDone()\n\n\tvar numHandlers int = 0\n\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, os.Interrupt)\n\tvar sigint bool = false\n\tfor !sigint {\n\t\tselect {\n\t\tcase n := <-handlerChan:\n\t\t\tnumHandlers += n\n\t\tcase <-signalChan:\n\t\t\tlog(\"SIGINT\")\n\t\t\tsigint = true\n\t\t}\n\t}\n\n\tfor _, ln := range listeners {\n\t\tln.Close()\n\t}\n\n\tsigint = false\n\tfor numHandlers != 0 && !sigint {\n\t\tselect {\n\t\tcase n := <-handlerChan:\n\t\t\tnumHandlers += n\n\t\tcase <-signalChan:\n\t\t\tlog(\"SIGINT\")\n\t\t\tsigint = true\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package githttp\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"strings\"\n)\n\ntype GitHttp struct {\n\t\/\/ Root directory to serve repos from\n\tProjectRoot string\n\n\t\/\/ Path to git binary\n\tGitBinPath string\n\n\t\/\/ Access rules\n\tUploadPack bool\n\tReceivePack bool\n\n\t\/\/ Event handling functions\n\tEventHandler func(ev Event)\n}\n\n\/\/ Implement the http.Handler interface\nfunc (g *GitHttp) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tg.requestHandler(w, r)\n\treturn\n}\n\n\/\/ Shorthand constructor for most common scenario\nfunc New(root string) *GitHttp {\n\treturn &GitHttp{\n\t\tProjectRoot: root,\n\t\tGitBinPath: \"\/usr\/bin\/git\",\n\t\tUploadPack: true,\n\t\tReceivePack: true,\n\t}\n}\n\n\/\/ Build root directory if doesn't exist\nfunc (g *GitHttp) Init() (*GitHttp, error) {\n\tif err := os.MkdirAll(g.ProjectRoot, os.ModePerm); err != nil {\n\t\treturn nil, err\n\t}\n\treturn g, nil\n}\n\n\/\/ Publish event if EventHandler is set\nfunc (g *GitHttp) event(e Event) {\n\tif g.EventHandler != nil {\n\t\tg.EventHandler(e)\n\t} else {\n\t\tfmt.Printf(\"EVENT: %q\\n\", e)\n\t}\n}\n\n\/\/ Actual command handling functions\n\nfunc (g *GitHttp) serviceRpc(hr HandlerReq) error {\n\tw, r, rpc, dir := hr.w, hr.r, hr.Rpc, hr.Dir\n\n\taccess, err := g.hasAccess(r, dir, rpc, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif access == false {\n\t\treturn &ErrorNoAccess{hr.Dir}\n\t}\n\n\t\/\/ Reader that decompresses if necessary\n\treader, err := requestReader(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Reader that scans for events\n\trpcReader := &RpcReader{\n\t\tReader: reader,\n\t\tRpc: rpc,\n\t}\n\n\t\/\/ Set content type\n\tw.Header().Set(\"Content-Type\", fmt.Sprintf(\"application\/x-git-%s-result\", rpc))\n\n\targs := []string{rpc, \"--stateless-rpc\", \".\"}\n\tcmd := exec.Command(g.GitBinPath, args...)\n\tcmd.Dir = dir\n\tstdin, err := cmd.StdinPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stdin.Close()\n\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stdout.Close()\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Scan's git command's output for errors\n\tgitReader := &GitReader{\n\t\tReader: stdout,\n\t}\n\n\t\/\/ Copy input to git binary\n\tio.Copy(stdin, rpcReader)\n\n\t\/\/ Write git binary's output to http response\n\tio.Copy(w, gitReader)\n\n\t\/\/ Wait till command has completed\n\tmainError := cmd.Wait()\n\n\tif mainError == nil {\n\t\tmainError = gitReader.GitError\n\t}\n\n\t\/\/ Fire events\n\tfor _, e := range rpcReader.Events {\n\t\t\/\/ Set directory to current repo\n\t\te.Dir = dir\n\t\te.Request = hr.r\n\t\te.Error = mainError\n\n\t\t\/\/ Fire event\n\t\tg.event(e)\n\t}\n\n\t\/\/ May be nil if all is good\n\treturn mainError\n}\n\nfunc (g *GitHttp) getInfoRefs(hr HandlerReq) error {\n\tw, r, dir := hr.w, hr.r, hr.Dir\n\tservice_name := getServiceType(r)\n\taccess, err := g.hasAccess(r, dir, service_name, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !access {\n\t\tg.updateServerInfo(dir)\n\t\thdrNocache(w)\n\t\treturn sendFile(\"text\/plain; charset=utf-8\", hr)\n\t}\n\n\targs := []string{service_name, \"--stateless-rpc\", \"--advertise-refs\", \".\"}\n\trefs, err := g.gitCommand(dir, args...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thdrNocache(w)\n\tw.Header().Set(\"Content-Type\", fmt.Sprintf(\"application\/x-git-%s-advertisement\", service_name))\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(packetWrite(\"# service=git-\" + service_name + \"\\n\"))\n\tw.Write(packetFlush())\n\tw.Write(refs)\n\n\treturn nil\n}\n\nfunc (g *GitHttp) getInfoPacks(hr HandlerReq) error {\n\thdrCacheForever(hr.w)\n\treturn sendFile(\"text\/plain; charset=utf-8\", hr)\n}\n\nfunc (g *GitHttp) getLooseObject(hr HandlerReq) error {\n\thdrCacheForever(hr.w)\n\treturn sendFile(\"application\/x-git-loose-object\", hr)\n}\n\nfunc (g *GitHttp) getPackFile(hr HandlerReq) error {\n\thdrCacheForever(hr.w)\n\treturn sendFile(\"application\/x-git-packed-objects\", hr)\n}\n\nfunc (g *GitHttp) getIdxFile(hr HandlerReq) error {\n\thdrCacheForever(hr.w)\n\treturn sendFile(\"application\/x-git-packed-objects-toc\", hr)\n}\n\nfunc (g *GitHttp) getTextFile(hr HandlerReq) error {\n\thdrNocache(hr.w)\n\treturn sendFile(\"text\/plain\", hr)\n}\n\n\/\/ Logic helping functions\n\nfunc sendFile(content_type string, hr HandlerReq) error {\n\tw, r := hr.w, hr.r\n\treq_file := path.Join(hr.Dir, hr.File)\n\n\tf, err := os.Stat(req_file)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw.Header().Set(\"Content-Type\", content_type)\n\tw.Header().Set(\"Content-Length\", fmt.Sprintf(\"%d\", f.Size()))\n\tw.Header().Set(\"Last-Modified\", f.ModTime().Format(http.TimeFormat))\n\thttp.ServeFile(w, r, req_file)\n\n\treturn nil\n}\n\nfunc (g *GitHttp) getGitDir(file_path string) (string, error) {\n\troot := g.ProjectRoot\n\n\tif root == \"\" {\n\t\tcwd, err := os.Getwd()\n\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\troot = cwd\n\t}\n\n\tf := path.Join(root, file_path)\n\tif _, err := os.Stat(f); os.IsNotExist(err) {\n\t\treturn \"\", err\n\t}\n\n\treturn f, nil\n}\n\nfunc (g *GitHttp) hasAccess(r *http.Request, dir string, rpc string, check_content_type bool) (bool, error) {\n\tif check_content_type {\n\t\tif r.Header.Get(\"Content-Type\") != fmt.Sprintf(\"application\/x-git-%s-request\", rpc) {\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\tif !(rpc == \"upload-pack\" || rpc == \"receive-pack\") {\n\t\treturn false, nil\n\t}\n\tif rpc == \"receive-pack\" {\n\t\treturn g.ReceivePack, nil\n\t}\n\tif rpc == \"upload-pack\" {\n\t\treturn g.UploadPack, nil\n\t}\n\n\treturn g.getConfigSetting(rpc, dir)\n}\n\nfunc (g *GitHttp) getConfigSetting(service_name string, dir string) (bool, error) {\n\tservice_name = strings.Replace(service_name, \"-\", \"\", -1)\n\tsetting, err := g.getGitConfig(\"http.\"+service_name, dir)\n\tif err != nil {\n\t\treturn false, nil\n\t}\n\n\tif service_name == \"uploadpack\" {\n\t\treturn setting != \"false\", nil\n\t}\n\n\treturn setting == \"true\", nil\n}\n\nfunc (g *GitHttp) getGitConfig(config_name string, dir string) (string, error) {\n\targs := []string{\"config\", config_name}\n\tout, err := g.gitCommand(dir, args...)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(out)[0 : len(out)-1], nil\n}\n\nfunc (g *GitHttp) updateServerInfo(dir string) ([]byte, error) {\n\targs := []string{\"update-server-info\"}\n\treturn g.gitCommand(dir, args...)\n}\n\nfunc (g *GitHttp) gitCommand(dir string, args ...string) ([]byte, error) {\n\tcommand := exec.Command(g.GitBinPath, args...)\n\tcommand.Dir = dir\n\n\treturn command.Output()\n}\n<commit_msg>Improve: Close requestReader<commit_after>package githttp\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"strings\"\n)\n\ntype GitHttp struct {\n\t\/\/ Root directory to serve repos from\n\tProjectRoot string\n\n\t\/\/ Path to git binary\n\tGitBinPath string\n\n\t\/\/ Access rules\n\tUploadPack bool\n\tReceivePack bool\n\n\t\/\/ Event handling functions\n\tEventHandler func(ev Event)\n}\n\n\/\/ Implement the http.Handler interface\nfunc (g *GitHttp) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tg.requestHandler(w, r)\n\treturn\n}\n\n\/\/ Shorthand constructor for most common scenario\nfunc New(root string) *GitHttp {\n\treturn &GitHttp{\n\t\tProjectRoot: root,\n\t\tGitBinPath: \"\/usr\/bin\/git\",\n\t\tUploadPack: true,\n\t\tReceivePack: true,\n\t}\n}\n\n\/\/ Build root directory if doesn't exist\nfunc (g *GitHttp) Init() (*GitHttp, error) {\n\tif err := os.MkdirAll(g.ProjectRoot, os.ModePerm); err != nil {\n\t\treturn nil, err\n\t}\n\treturn g, nil\n}\n\n\/\/ Publish event if EventHandler is set\nfunc (g *GitHttp) event(e Event) {\n\tif g.EventHandler != nil {\n\t\tg.EventHandler(e)\n\t} else {\n\t\tfmt.Printf(\"EVENT: %q\\n\", e)\n\t}\n}\n\n\/\/ Actual command handling functions\n\nfunc (g *GitHttp) serviceRpc(hr HandlerReq) error {\n\tw, r, rpc, dir := hr.w, hr.r, hr.Rpc, hr.Dir\n\n\taccess, err := g.hasAccess(r, dir, rpc, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif access == false {\n\t\treturn &ErrorNoAccess{hr.Dir}\n\t}\n\n\t\/\/ Reader that decompresses if necessary\n\treader, err := requestReader(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer reader.Close()\n\n\t\/\/ Reader that scans for events\n\trpcReader := &RpcReader{\n\t\tReader: reader,\n\t\tRpc: rpc,\n\t}\n\n\t\/\/ Set content type\n\tw.Header().Set(\"Content-Type\", fmt.Sprintf(\"application\/x-git-%s-result\", rpc))\n\n\targs := []string{rpc, \"--stateless-rpc\", \".\"}\n\tcmd := exec.Command(g.GitBinPath, args...)\n\tcmd.Dir = dir\n\tstdin, err := cmd.StdinPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stdin.Close()\n\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stdout.Close()\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Scan's git command's output for errors\n\tgitReader := &GitReader{\n\t\tReader: stdout,\n\t}\n\n\t\/\/ Copy input to git binary\n\tio.Copy(stdin, rpcReader)\n\n\t\/\/ Write git binary's output to http response\n\tio.Copy(w, gitReader)\n\n\t\/\/ Wait till command has completed\n\tmainError := cmd.Wait()\n\n\tif mainError == nil {\n\t\tmainError = gitReader.GitError\n\t}\n\n\t\/\/ Fire events\n\tfor _, e := range rpcReader.Events {\n\t\t\/\/ Set directory to current repo\n\t\te.Dir = dir\n\t\te.Request = hr.r\n\t\te.Error = mainError\n\n\t\t\/\/ Fire event\n\t\tg.event(e)\n\t}\n\n\t\/\/ May be nil if all is good\n\treturn mainError\n}\n\nfunc (g *GitHttp) getInfoRefs(hr HandlerReq) error {\n\tw, r, dir := hr.w, hr.r, hr.Dir\n\tservice_name := getServiceType(r)\n\taccess, err := g.hasAccess(r, dir, service_name, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !access {\n\t\tg.updateServerInfo(dir)\n\t\thdrNocache(w)\n\t\treturn sendFile(\"text\/plain; charset=utf-8\", hr)\n\t}\n\n\targs := []string{service_name, \"--stateless-rpc\", \"--advertise-refs\", \".\"}\n\trefs, err := g.gitCommand(dir, args...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thdrNocache(w)\n\tw.Header().Set(\"Content-Type\", fmt.Sprintf(\"application\/x-git-%s-advertisement\", service_name))\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(packetWrite(\"# service=git-\" + service_name + \"\\n\"))\n\tw.Write(packetFlush())\n\tw.Write(refs)\n\n\treturn nil\n}\n\nfunc (g *GitHttp) getInfoPacks(hr HandlerReq) error {\n\thdrCacheForever(hr.w)\n\treturn sendFile(\"text\/plain; charset=utf-8\", hr)\n}\n\nfunc (g *GitHttp) getLooseObject(hr HandlerReq) error {\n\thdrCacheForever(hr.w)\n\treturn sendFile(\"application\/x-git-loose-object\", hr)\n}\n\nfunc (g *GitHttp) getPackFile(hr HandlerReq) error {\n\thdrCacheForever(hr.w)\n\treturn sendFile(\"application\/x-git-packed-objects\", hr)\n}\n\nfunc (g *GitHttp) getIdxFile(hr HandlerReq) error {\n\thdrCacheForever(hr.w)\n\treturn sendFile(\"application\/x-git-packed-objects-toc\", hr)\n}\n\nfunc (g *GitHttp) getTextFile(hr HandlerReq) error {\n\thdrNocache(hr.w)\n\treturn sendFile(\"text\/plain\", hr)\n}\n\n\/\/ Logic helping functions\n\nfunc sendFile(content_type string, hr HandlerReq) error {\n\tw, r := hr.w, hr.r\n\treq_file := path.Join(hr.Dir, hr.File)\n\n\tf, err := os.Stat(req_file)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw.Header().Set(\"Content-Type\", content_type)\n\tw.Header().Set(\"Content-Length\", fmt.Sprintf(\"%d\", f.Size()))\n\tw.Header().Set(\"Last-Modified\", f.ModTime().Format(http.TimeFormat))\n\thttp.ServeFile(w, r, req_file)\n\n\treturn nil\n}\n\nfunc (g *GitHttp) getGitDir(file_path string) (string, error) {\n\troot := g.ProjectRoot\n\n\tif root == \"\" {\n\t\tcwd, err := os.Getwd()\n\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\troot = cwd\n\t}\n\n\tf := path.Join(root, file_path)\n\tif _, err := os.Stat(f); os.IsNotExist(err) {\n\t\treturn \"\", err\n\t}\n\n\treturn f, nil\n}\n\nfunc (g *GitHttp) hasAccess(r *http.Request, dir string, rpc string, check_content_type bool) (bool, error) {\n\tif check_content_type {\n\t\tif r.Header.Get(\"Content-Type\") != fmt.Sprintf(\"application\/x-git-%s-request\", rpc) {\n\t\t\treturn false, nil\n\t\t}\n\t}\n\n\tif !(rpc == \"upload-pack\" || rpc == \"receive-pack\") {\n\t\treturn false, nil\n\t}\n\tif rpc == \"receive-pack\" {\n\t\treturn g.ReceivePack, nil\n\t}\n\tif rpc == \"upload-pack\" {\n\t\treturn g.UploadPack, nil\n\t}\n\n\treturn g.getConfigSetting(rpc, dir)\n}\n\nfunc (g *GitHttp) getConfigSetting(service_name string, dir string) (bool, error) {\n\tservice_name = strings.Replace(service_name, \"-\", \"\", -1)\n\tsetting, err := g.getGitConfig(\"http.\"+service_name, dir)\n\tif err != nil {\n\t\treturn false, nil\n\t}\n\n\tif service_name == \"uploadpack\" {\n\t\treturn setting != \"false\", nil\n\t}\n\n\treturn setting == \"true\", nil\n}\n\nfunc (g *GitHttp) getGitConfig(config_name string, dir string) (string, error) {\n\targs := []string{\"config\", config_name}\n\tout, err := g.gitCommand(dir, args...)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(out)[0 : len(out)-1], nil\n}\n\nfunc (g *GitHttp) updateServerInfo(dir string) ([]byte, error) {\n\targs := []string{\"update-server-info\"}\n\treturn g.gitCommand(dir, args...)\n}\n\nfunc (g *GitHttp) gitCommand(dir string, args ...string) ([]byte, error) {\n\tcommand := exec.Command(g.GitBinPath, args...)\n\tcommand.Dir = dir\n\n\treturn command.Output()\n}\n<|endoftext|>"} {"text":"<commit_before>package gitmock\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/Masterminds\/semver\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/go:generate go run _tools\/gen.go\n\/\/ New returns new GitMock\nfunc New(opts ...string) (*GitMock, error) {\n\tgit := \"git\"\n\tif len(opts) > 0 {\n\t\tgit = opts[0]\n\t}\n\n\tcmd := exec.Command(git, \"version\")\n\tcmd.Env = append(os.Environ(), \"LANG=C\")\n\tvar b bytes.Buffer\n\tcmd.Stdout = &b\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to create new GitMock\")\n\t}\n\tgitVer := b.String()\n\tarr := strings.Fields(gitVer)\n\tif len(arr) != 3 || arr[0] != \"git\" || arr[1] != \"version\" {\n\t\treturn nil, fmt.Errorf(\"output of `git version` looks strange: %s\", gitVer)\n\t}\n\tver, err := semver.NewVersion(arr[2])\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"git version looks strange\")\n\t}\n\tc, _ := semver.NewConstraint(\">= 1.8.5\")\n\tif !c.Check(ver) {\n\t\treturn nil, fmt.Errorf(\"git 1.8.5 or later required.\")\n\t}\n\n\tuser := \"\"\n\terr = exec.Command(git, \"config\", \"user.name\").Run()\n\tif err != nil {\n\t\tuser = \"gomock\"\n\t}\n\temail := \"\"\n\terr = exec.Command(git, \"config\", \"user.email\").Run()\n\tif err != nil {\n\t\temail = \"gomock@example.com\"\n\t}\n\n\treturn &GitMock{\n\t\tgitPath: git,\n\t\tuser: user,\n\t\temail: email,\n\t}, nil\n}\n\n\/\/ GitMock is git mock repository\ntype GitMock struct {\n\trepoPath string\n\tgitPath string\n\tuser string\n\temail string\n}\n\n\/\/ RepoPath returns repository path\nfunc (gm *GitMock) RepoPath() string {\n\tif gm.repoPath == \"\" {\n\t\tdir, err := ioutil.TempDir(\"\", \"gitmock\")\n\t\tif err != nil {\n\t\t\tlog.Fatal(err.Error())\n\t\t}\n\t\tgm.repoPath = dir\n\t}\n\treturn gm.repoPath\n}\n\nfunc (gm *GitMock) gitProg() string {\n\tif gm.gitPath != \"\" {\n\t\treturn gm.gitPath\n\t}\n\treturn \"git\"\n}\n\nfunc (gm *GitMock) env() (ret []string) {\n\tif gm.user != \"\" {\n\t\tenvs := []string{\"GIT_AUTHOR_NAME\", \"GIT_COMMITER_NAME\"}\n\t\tfor _, v := range envs {\n\t\t\tif env := os.Getenv(v); env == \"\" {\n\t\t\t\tret = append(ret, fmt.Sprintf(\"%s=%s\", v, gm.user))\n\t\t\t}\n\t\t}\n\t}\n\tif gm.email != \"\" {\n\t\tenvs := []string{\"GIT_AUTHOR_EMAIL\", \"GIT_COMMITER_EMAIL\"}\n\t\tfor _, v := range envs {\n\t\t\tif env := os.Getenv(v); env == \"\" {\n\t\t\t\tret = append(ret, fmt.Sprintf(\"%s=%s\", v, gm.email))\n\t\t\t}\n\t\t}\n\t}\n\treturn ret\n}\n\n\/\/ Do the git command\nfunc (gm *GitMock) Do(args ...string) (string, string, error) {\n\targ := []string{\"-C\", gm.RepoPath()}\n\targ = append(arg, args...)\n\tcmd := exec.Command(gm.gitProg(), arg...)\n\tcmd.Env = append(os.Environ(), \"LANG=C\")\n\tenv := gm.env()\n\tif len(env) > 0 {\n\t\tcmd.Env = append(cmd.Env, env...)\n\t}\n\tvar bout, berr bytes.Buffer\n\tcmd.Stdout = &bout\n\tcmd.Stderr = &berr\n\terr := cmd.Run()\n\treturn bout.String(), berr.String(), err\n}\n\n\/\/ PutFile put a file to repo\nfunc (gm *GitMock) PutFile(file, content string) error {\n\trepo := gm.RepoPath()\n\tfpath := filepath.Join(repo, file)\n\terr := os.MkdirAll(filepath.Dir(fpath), 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc := []byte(content)\n\treturn ioutil.WriteFile(fpath, c, 0644)\n}\n<commit_msg>adjust comment<commit_after>package gitmock\n\n\/\/go:generate go run _tools\/gen.go\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/Masterminds\/semver\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ New returns new GitMock\nfunc New(opts ...string) (*GitMock, error) {\n\tgit := \"git\"\n\tif len(opts) > 0 {\n\t\tgit = opts[0]\n\t}\n\n\tcmd := exec.Command(git, \"version\")\n\tcmd.Env = append(os.Environ(), \"LANG=C\")\n\tvar b bytes.Buffer\n\tcmd.Stdout = &b\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to create new GitMock\")\n\t}\n\tgitVer := b.String()\n\tarr := strings.Fields(gitVer)\n\tif len(arr) != 3 || arr[0] != \"git\" || arr[1] != \"version\" {\n\t\treturn nil, fmt.Errorf(\"output of `git version` looks strange: %s\", gitVer)\n\t}\n\tver, err := semver.NewVersion(arr[2])\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"git version looks strange\")\n\t}\n\tc, _ := semver.NewConstraint(\">= 1.8.5\")\n\tif !c.Check(ver) {\n\t\treturn nil, fmt.Errorf(\"git 1.8.5 or later required.\")\n\t}\n\n\tuser := \"\"\n\terr = exec.Command(git, \"config\", \"user.name\").Run()\n\tif err != nil {\n\t\tuser = \"gomock\"\n\t}\n\temail := \"\"\n\terr = exec.Command(git, \"config\", \"user.email\").Run()\n\tif err != nil {\n\t\temail = \"gomock@example.com\"\n\t}\n\n\treturn &GitMock{\n\t\tgitPath: git,\n\t\tuser: user,\n\t\temail: email,\n\t}, nil\n}\n\n\/\/ GitMock is git mock repository\ntype GitMock struct {\n\trepoPath string\n\tgitPath string\n\tuser string\n\temail string\n}\n\n\/\/ RepoPath returns repository path\nfunc (gm *GitMock) RepoPath() string {\n\tif gm.repoPath == \"\" {\n\t\tdir, err := ioutil.TempDir(\"\", \"gitmock\")\n\t\tif err != nil {\n\t\t\tlog.Fatal(err.Error())\n\t\t}\n\t\tgm.repoPath = dir\n\t}\n\treturn gm.repoPath\n}\n\nfunc (gm *GitMock) gitProg() string {\n\tif gm.gitPath != \"\" {\n\t\treturn gm.gitPath\n\t}\n\treturn \"git\"\n}\n\nfunc (gm *GitMock) env() (ret []string) {\n\tif gm.user != \"\" {\n\t\tenvs := []string{\"GIT_AUTHOR_NAME\", \"GIT_COMMITER_NAME\"}\n\t\tfor _, v := range envs {\n\t\t\tif env := os.Getenv(v); env == \"\" {\n\t\t\t\tret = append(ret, fmt.Sprintf(\"%s=%s\", v, gm.user))\n\t\t\t}\n\t\t}\n\t}\n\tif gm.email != \"\" {\n\t\tenvs := []string{\"GIT_AUTHOR_EMAIL\", \"GIT_COMMITER_EMAIL\"}\n\t\tfor _, v := range envs {\n\t\t\tif env := os.Getenv(v); env == \"\" {\n\t\t\t\tret = append(ret, fmt.Sprintf(\"%s=%s\", v, gm.email))\n\t\t\t}\n\t\t}\n\t}\n\treturn ret\n}\n\n\/\/ Do the git command\nfunc (gm *GitMock) Do(args ...string) (string, string, error) {\n\targ := []string{\"-C\", gm.RepoPath()}\n\targ = append(arg, args...)\n\tcmd := exec.Command(gm.gitProg(), arg...)\n\tcmd.Env = append(os.Environ(), \"LANG=C\")\n\tenv := gm.env()\n\tif len(env) > 0 {\n\t\tcmd.Env = append(cmd.Env, env...)\n\t}\n\tvar bout, berr bytes.Buffer\n\tcmd.Stdout = &bout\n\tcmd.Stderr = &berr\n\terr := cmd.Run()\n\treturn bout.String(), berr.String(), err\n}\n\n\/\/ PutFile put a file to repo\nfunc (gm *GitMock) PutFile(file, content string) error {\n\trepo := gm.RepoPath()\n\tfpath := filepath.Join(repo, file)\n\terr := os.MkdirAll(filepath.Dir(fpath), 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc := []byte(content)\n\treturn ioutil.WriteFile(fpath, c, 0644)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2016 The Syncthing Authors.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this file,\n\/\/ You can obtain one at https:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage fs\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n)\n\nvar errNoHome = errors.New(\"no home directory found - set $HOME (or the platform equivalent)\")\n\nfunc ExpandTilde(path string) (string, error) {\n\tif path == \"~\" {\n\t\treturn getHomeDir()\n\t}\n\n\tpath = filepath.FromSlash(path)\n\tif !strings.HasPrefix(path, fmt.Sprintf(\"~%c\", PathSeparator)) {\n\t\treturn path, nil\n\t}\n\n\thome, err := getHomeDir()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Join(home, path[2:]), nil\n}\n\nfunc getHomeDir() (string, error) {\n\tif runtime.GOOS == \"windows\" {\n\t\t\/\/ Legacy -- we prioritize this for historical reasons, whereas\n\t\t\/\/ os.UserHomeDir uses %USERPROFILE% always.\n\t\thome := filepath.Join(os.Getenv(\"HomeDrive\"), os.Getenv(\"HomePath\"))\n\t\tif home != \"\" {\n\t\t\treturn home, nil\n\t\t}\n\t}\n\n\treturn os.UserHomeDir()\n}\n\nvar windowsDisallowedCharacters = string([]rune{\n\t'<', '>', ':', '\"', '|', '?', '*',\n\t0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,\n\t11, 12, 13, 14, 15, 16, 17, 18, 19, 20,\n\t21, 22, 23, 24, 25, 26, 27, 28, 29, 30,\n\t31,\n})\n\nfunc WindowsInvalidFilename(name string) bool {\n\t\/\/ None of the path components should end in space\n\tfor _, part := range strings.Split(name, `\\`) {\n\t\tif len(part) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif part[len(part)-1] == ' ' {\n\t\t\t\/\/ Names ending in space are not valid.\n\t\t\treturn true\n\t\t}\n\t}\n\n\t\/\/ The path must not contain any disallowed characters\n\treturn strings.ContainsAny(name, windowsDisallowedCharacters)\n}\n\n\/\/ IsParent compares paths purely lexicographically, meaning it returns false\n\/\/ if path and parent aren't both absolute or relative.\nfunc IsParent(path, parent string) bool {\n\tif parent == path {\n\t\t\/\/ Twice the same root on windows would not be caught at the end.\n\t\treturn false\n\t}\n\tif filepath.IsAbs(path) != filepath.IsAbs(parent) {\n\t\treturn false\n\t}\n\tif parent == \"\" || parent == \".\" {\n\t\t\/\/ The empty string is the parent of everything except the empty\n\t\t\/\/ string and \".\". (Avoids panic in the last step.)\n\t\treturn path != \"\" && path != \".\"\n\t}\n\tif parent == \"\/\" {\n\t\t\/\/ The root is the parent of everything except itself, which would\n\t\t\/\/ not be caught below.\n\t\treturn path != \"\/\"\n\t}\n\tif parent[len(parent)-1] != PathSeparator {\n\t\tparent += string(PathSeparator)\n\t}\n\treturn strings.HasPrefix(path, parent)\n}\n\nfunc CommonPrefix(first, second string) string {\n\tif filepath.IsAbs(first) != filepath.IsAbs(second) {\n\t\t\/\/ Whatever\n\t\treturn \"\"\n\t}\n\n\tfirstParts := strings.Split(filepath.Clean(first), string(PathSeparator))\n\tsecondParts := strings.Split(filepath.Clean(second), string(PathSeparator))\n\n\tisAbs := filepath.IsAbs(first) && filepath.IsAbs(second)\n\n\tcount := len(firstParts)\n\tif len(secondParts) < len(firstParts) {\n\t\tcount = len(secondParts)\n\t}\n\n\tcommon := make([]string, 0, count)\n\tfor i := 0; i < count; i++ {\n\t\tif firstParts[i] != secondParts[i] {\n\t\t\tbreak\n\t\t}\n\t\tcommon = append(common, firstParts[i])\n\t}\n\n\tif isAbs {\n\t\tif runtime.GOOS == \"windows\" && isVolumeNameOnly(common) {\n\t\t\t\/\/ Because strings.Split strips out path separators, if we're at the volume name, we end up without a separator\n\t\t\t\/\/ Wedge an empty element to be joined with.\n\t\t\tcommon = append(common, \"\")\n\t\t} else if len(common) == 1 {\n\t\t\t\/\/ If isAbs on non Windows, first element in both first and second is \"\", hence joining that returns nothing.\n\t\t\treturn string(PathSeparator)\n\t\t}\n\t}\n\n\t\/\/ This should only be true on Windows when drive letters are different or when paths are relative.\n\t\/\/ In case of UNC paths we should end up with more than a single element hence joining is fine\n\tif len(common) == 0 {\n\t\treturn \"\"\n\t}\n\n\t\/\/ This has to be strings.Join, because filepath.Join([]string{\"\", \"\", \"?\", \"C:\", \"Audrius\"}...) returns garbage\n\tresult := strings.Join(common, string(PathSeparator))\n\treturn filepath.Clean(result)\n}\n\nfunc isVolumeNameOnly(parts []string) bool {\n\tisNormalVolumeName := len(parts) == 1 && strings.HasSuffix(parts[0], \":\")\n\tisUNCVolumeName := len(parts) == 4 && strings.HasSuffix(parts[3], \":\")\n\treturn isNormalVolumeName || isUNCVolumeName\n}\n<commit_msg>lib\/model: Disallow reserved names, and names ending with period on Windows (fixes #7008) (#7010)<commit_after>\/\/ Copyright (C) 2016 The Syncthing Authors.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this file,\n\/\/ You can obtain one at https:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage fs\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n)\n\nvar errNoHome = errors.New(\"no home directory found - set $HOME (or the platform equivalent)\")\n\nfunc ExpandTilde(path string) (string, error) {\n\tif path == \"~\" {\n\t\treturn getHomeDir()\n\t}\n\n\tpath = filepath.FromSlash(path)\n\tif !strings.HasPrefix(path, fmt.Sprintf(\"~%c\", PathSeparator)) {\n\t\treturn path, nil\n\t}\n\n\thome, err := getHomeDir()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Join(home, path[2:]), nil\n}\n\nfunc getHomeDir() (string, error) {\n\tif runtime.GOOS == \"windows\" {\n\t\t\/\/ Legacy -- we prioritize this for historical reasons, whereas\n\t\t\/\/ os.UserHomeDir uses %USERPROFILE% always.\n\t\thome := filepath.Join(os.Getenv(\"HomeDrive\"), os.Getenv(\"HomePath\"))\n\t\tif home != \"\" {\n\t\t\treturn home, nil\n\t\t}\n\t}\n\n\treturn os.UserHomeDir()\n}\n\nvar windowsDisallowedCharacters = string([]rune{\n\t'<', '>', ':', '\"', '|', '?', '*',\n\t0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,\n\t11, 12, 13, 14, 15, 16, 17, 18, 19, 20,\n\t21, 22, 23, 24, 25, 26, 27, 28, 29, 30,\n\t31,\n})\n\nfunc WindowsInvalidFilename(name string) bool {\n\t\/\/ None of the path components should end in space or period, or be a\n\t\/\/ reserved name.\n\t\/\/ (https:\/\/docs.microsoft.com\/en-us\/windows\/win32\/fileio\/naming-a-file)\n\tfor _, part := range strings.Split(name, `\\`) {\n\t\tif len(part) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tswitch part[len(part)-1] {\n\t\tcase ' ', '.':\n\t\t\t\/\/ Names ending in space or period are not valid.\n\t\t\treturn true\n\t\t}\n\t\tswitch part {\n\t\tcase \"CON\", \"PRN\", \"AUX\", \"NUL\",\n\t\t\t\"COM1\", \"COM2\", \"COM3\", \"COM4\", \"COM5\", \"COM6\", \"COM7\", \"COM8\", \"COM9\",\n\t\t\t\"LPT1\", \"LPT2\", \"LPT3\", \"LPT4\", \"LPT5\", \"LPT6\", \"LPT7\", \"LPT8\", \"LPT9\":\n\t\t\t\/\/ These reserved names are not valid.\n\t\t\treturn true\n\t\t}\n\t}\n\n\t\/\/ The path must not contain any disallowed characters\n\treturn strings.ContainsAny(name, windowsDisallowedCharacters)\n}\n\n\/\/ IsParent compares paths purely lexicographically, meaning it returns false\n\/\/ if path and parent aren't both absolute or relative.\nfunc IsParent(path, parent string) bool {\n\tif parent == path {\n\t\t\/\/ Twice the same root on windows would not be caught at the end.\n\t\treturn false\n\t}\n\tif filepath.IsAbs(path) != filepath.IsAbs(parent) {\n\t\treturn false\n\t}\n\tif parent == \"\" || parent == \".\" {\n\t\t\/\/ The empty string is the parent of everything except the empty\n\t\t\/\/ string and \".\". (Avoids panic in the last step.)\n\t\treturn path != \"\" && path != \".\"\n\t}\n\tif parent == \"\/\" {\n\t\t\/\/ The root is the parent of everything except itself, which would\n\t\t\/\/ not be caught below.\n\t\treturn path != \"\/\"\n\t}\n\tif parent[len(parent)-1] != PathSeparator {\n\t\tparent += string(PathSeparator)\n\t}\n\treturn strings.HasPrefix(path, parent)\n}\n\nfunc CommonPrefix(first, second string) string {\n\tif filepath.IsAbs(first) != filepath.IsAbs(second) {\n\t\t\/\/ Whatever\n\t\treturn \"\"\n\t}\n\n\tfirstParts := strings.Split(filepath.Clean(first), string(PathSeparator))\n\tsecondParts := strings.Split(filepath.Clean(second), string(PathSeparator))\n\n\tisAbs := filepath.IsAbs(first) && filepath.IsAbs(second)\n\n\tcount := len(firstParts)\n\tif len(secondParts) < len(firstParts) {\n\t\tcount = len(secondParts)\n\t}\n\n\tcommon := make([]string, 0, count)\n\tfor i := 0; i < count; i++ {\n\t\tif firstParts[i] != secondParts[i] {\n\t\t\tbreak\n\t\t}\n\t\tcommon = append(common, firstParts[i])\n\t}\n\n\tif isAbs {\n\t\tif runtime.GOOS == \"windows\" && isVolumeNameOnly(common) {\n\t\t\t\/\/ Because strings.Split strips out path separators, if we're at the volume name, we end up without a separator\n\t\t\t\/\/ Wedge an empty element to be joined with.\n\t\t\tcommon = append(common, \"\")\n\t\t} else if len(common) == 1 {\n\t\t\t\/\/ If isAbs on non Windows, first element in both first and second is \"\", hence joining that returns nothing.\n\t\t\treturn string(PathSeparator)\n\t\t}\n\t}\n\n\t\/\/ This should only be true on Windows when drive letters are different or when paths are relative.\n\t\/\/ In case of UNC paths we should end up with more than a single element hence joining is fine\n\tif len(common) == 0 {\n\t\treturn \"\"\n\t}\n\n\t\/\/ This has to be strings.Join, because filepath.Join([]string{\"\", \"\", \"?\", \"C:\", \"Audrius\"}...) returns garbage\n\tresult := strings.Join(common, string(PathSeparator))\n\treturn filepath.Clean(result)\n}\n\nfunc isVolumeNameOnly(parts []string) bool {\n\tisNormalVolumeName := len(parts) == 1 && strings.HasSuffix(parts[0], \":\")\n\tisUNCVolumeName := len(parts) == 4 && strings.HasSuffix(parts[3], \":\")\n\treturn isNormalVolumeName || isUNCVolumeName\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2015, Peter Mrekaj. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE.txt file.\n\npackage search\n\n\/\/ MinMax returns the min and max values from an.\n\/\/ The time complexity is O(n). The O(1) additional space is needed.\nfunc MinMax(an []int) (min, max int) {\n\tif len(an) == 0 {\n\t\treturn 0, 0\n\t}\n\n\tmin, max = an[0], an[0]\n\tfor _, v := range an { \/\/ We start iterate again from index 0 to avoid array bounds checking and thus improve performance.\n\t\tif v < min {\n\t\t\tmin = v\n\t\t} else if v > max {\n\t\t\tmax = v\n\t\t}\n\t}\n\treturn min, max\n}\n\n\/\/ compare returns a, b if a < b, otherwise b, a.\nfunc compare(a, b int) (min, max int) {\n\tif a < b {\n\t\treturn a, b\n\t}\n\treturn b, a\n}\n\n\/\/ MinMaxAlt is an alternative function to MinMax for searching min and max in an.\n\/\/ The time complexity is O(n). The O(1) additional space is needed.\n\/\/ The time complexity hidden constant is more significant here than in the MaxMin.\nfunc MinMaxAlt(an []int) (min, max int) {\n\tswitch len(an) {\n\tcase 0:\n\t\treturn 0, 0\n\tcase 1:\n\t\treturn an[0], an[0]\n\t}\n\n\tmin, max = compare(an[0], an[1])\n\tfor i := 3; i < len(an); i += 2 { \/\/ Compare 3\/2n - 2 times.\n\t\tnMin, nMax := compare(an[i-1], an[i])\n\t\tmin, _ = compare(nMin, min)\n\t\t_, max = compare(nMax, max)\n\t}\n\n\tif len(an) != 0 { \/\/ We need to compare the last element if length of an is odd.\n\t\tv := an[len(an)-1]\n\t\tif v < min {\n\t\t\tmin = v\n\t\t} else if v > max {\n\t\t\tmax = v\n\t\t}\n\t}\n\n\treturn min, max\n}\n<commit_msg>Fix unnecessary comparison at the end of search.MinMaxAlt function<commit_after>\/\/ Copyright (c) 2015, Peter Mrekaj. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE.txt file.\n\npackage search\n\n\/\/ MinMax returns the min and max values from an.\n\/\/ The time complexity is O(n). The O(1) additional space is needed.\nfunc MinMax(an []int) (min, max int) {\n\tif len(an) == 0 {\n\t\treturn 0, 0\n\t}\n\n\tmin, max = an[0], an[0]\n\tfor _, v := range an { \/\/ We start iterate again from index 0 to avoid array bounds checking and thus improve performance.\n\t\tif v < min {\n\t\t\tmin = v\n\t\t} else if v > max {\n\t\t\tmax = v\n\t\t}\n\t}\n\treturn min, max\n}\n\n\/\/ compare returns a, b if a < b, otherwise b, a.\nfunc compare(a, b int) (min, max int) {\n\tif a < b {\n\t\treturn a, b\n\t}\n\treturn b, a\n}\n\n\/\/ MinMaxAlt is an alternative function to MinMax for searching min and max in an.\n\/\/ The time complexity is O(n). The O(1) additional space is needed.\n\/\/ The time complexity hidden constant is more significant here than in the MaxMin.\nfunc MinMaxAlt(an []int) (min, max int) {\n\tswitch len(an) {\n\tcase 0:\n\t\treturn 0, 0\n\tcase 1:\n\t\treturn an[0], an[0]\n\t}\n\n\tmin, max = compare(an[0], an[1])\n\tfor i := 3; i < len(an); i += 2 { \/\/ Compare 3\/2n - 2 times.\n\t\tnMin, nMax := compare(an[i-1], an[i])\n\t\tmin, _ = compare(nMin, min)\n\t\t_, max = compare(nMax, max)\n\t}\n\n\tif len(an)%2 != 0 { \/\/ We need to compare the last element if length of an is odd.\n\t\tv := an[len(an)-1]\n\t\tif v < min {\n\t\t\tmin = v\n\t\t} else if v > max {\n\t\t\tmax = v\n\t\t}\n\t}\n\n\treturn min, max\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Ferret\n * Copyright (c) 2016 Yieldbot, Inc.\n * For the full copyright and license information, please view the LICENSE.txt file.\n *\/\n\n\/\/ Package search provides search interface and functionality\npackage search\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strconv\"\n\t\"time\"\n\n\tprov \"github.com\/yieldbot\/ferret\/providers\"\n\t\"github.com\/yieldbot\/gocli\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nvar (\n\tgoCommand = \"open\"\n\tsearchTimeout = \"5000ms\"\n\tproviders = make(map[string]Provider)\n)\n\nfunc init() {\n\tif e := os.Getenv(\"FERRET_GOTO_CMD\"); e != \"\" {\n\t\tgoCommand = e\n\t}\n\tif e := os.Getenv(\"FERRET_SEARCH_TIMEOUT\"); e != \"\" {\n\t\tsearchTimeout = e\n\t}\n\n\tprov.Register(Register)\n}\n\n\/\/ Provider represents a provider\ntype Provider struct {\n\tName string\n\tTitle string\n\tEnabled bool\n\tNoui bool\n\tPriority int64\n\tDoer\n}\n\n\/\/ Providers returns a sorted list of the names of the providers\nfunc Providers() []string {\n\tl := []string{}\n\tfor n := range providers {\n\t\tl = append(l, n)\n\t}\n\tsort.Strings(l)\n\treturn l\n}\n\n\/\/ ProviderByName returns a provider by the given name\nfunc ProviderByName(name string) (Provider, error) {\n\tp, ok := providers[name]\n\tif !ok {\n\t\treturn p, errors.New(\"provider \" + name + \" couldn't be found\")\n\t}\n\treturn p, nil\n}\n\n\/\/ Doer is the interface that must be implemented by a search provider\ntype Doer interface {\n\t\/\/ Search makes a search\n\tSearch(ctx context.Context, args map[string]interface{}) ([]map[string]interface{}, error)\n}\n\n\/\/ Register registers a search provider\nfunc Register(provider interface{}) error {\n\n\t\/\/ Init provider\n\tp, ok := provider.(Doer)\n\tif !ok {\n\t\treturn errors.New(\"invalid provider\")\n\t}\n\n\tvar name, title string\n\tvar enabled, noui bool\n\tvar priority int64\n\n\t\/\/ Get the value of the provider\n\tv := reflect.Indirect(reflect.ValueOf(p))\n\t\/\/ Iterate the provider fields\n\tfor i := 0; i < v.NumField(); i++ {\n\t\tfn := v.Type().Field(i).Name\n\t\tft := v.Field(i).Type().Name()\n\n\t\tif fn == \"name\" && ft == \"string\" {\n\t\t\tname = v.Field(i).String()\n\t\t} else if fn == \"title\" && ft == \"string\" {\n\t\t\ttitle = v.Field(i).String()\n\t\t} else if fn == \"enabled\" && ft == \"bool\" {\n\t\t\tenabled = v.Field(i).Bool()\n\t\t} else if fn == \"noui\" && ft == \"bool\" {\n\t\t\tnoui = v.Field(i).Bool()\n\t\t} else if fn == \"priority\" && ft == \"int64\" {\n\t\t\tpriority = v.Field(i).Int()\n\t\t}\n\t}\n\tif name == \"\" {\n\t\treturn errors.New(\"invalid provider name\")\n\t}\n\tif title == \"\" {\n\t\ttitle = name\n\t}\n\n\t\/\/ Init provider\n\tif _, ok := providers[name]; ok {\n\t\treturn errors.New(\"search provider \" + name + \" is already registered\")\n\t}\n\tnp := Provider{\n\t\tName: name,\n\t\tTitle: title,\n\t\tEnabled: enabled,\n\t\tNoui: noui,\n\t\tPriority: priority,\n\t\tDoer: p,\n\t}\n\tproviders[name] = np\n\n\treturn nil\n}\n\n\/\/ Do makes a search query by the given query\nfunc Do(query Query) (Query, error) {\n\n\t\/\/ Provider\n\tprovider, ok := providers[query.Provider]\n\tif !ok {\n\t\tquery.HTTPStatus = http.StatusBadRequest\n\t\treturn query, fmt.Errorf(\"invalid search provider. Possible search providers are %s\", Providers())\n\t}\n\n\t\/\/ Keyword\n\tif query.Keyword == \"\" {\n\t\tquery.HTTPStatus = http.StatusBadRequest\n\t\treturn query, errors.New(\"missing keyword\")\n\t}\n\n\t\/\/ Page\n\tif query.Page <= 0 {\n\t\tquery.HTTPStatus = http.StatusBadRequest\n\t\treturn query, errors.New(\"invalid page #. It should be greater than 0\")\n\t}\n\n\t\/\/ Limit\n\tif query.Limit <= 0 {\n\t\tquery.HTTPStatus = http.StatusBadRequest\n\t\treturn query, errors.New(\"invalid limit. It should be greater than 0\")\n\t}\n\n\t\/\/ Search\n\tquery.Start = time.Now()\n\tctx, cancel := context.WithTimeout(context.Background(), query.Timeout)\n\tdefer cancel()\n\tsq := map[string]interface{}{\"page\": query.Page, \"limit\": query.Limit, \"keyword\": query.Keyword}\n\tsr, err := provider.Search(ctx, sq)\n\tif err != nil {\n\t\tif err == context.DeadlineExceeded {\n\t\t\tquery.HTTPStatus = http.StatusGatewayTimeout\n\t\t\treturn query, errors.New(\"timeout\")\n\t\t} else if err == context.Canceled {\n\t\t\tquery.HTTPStatus = http.StatusInternalServerError\n\t\t\treturn query, errors.New(\"canceled\")\n\t\t}\n\t\tquery.HTTPStatus = http.StatusInternalServerError\n\t\treturn query, errors.New(\"failed to search due to \" + err.Error())\n\t}\n\tquery.Elapsed = time.Since(query.Start)\n\tfor _, srv := range sr {\n\t\tvar d string\n\t\tif _, ok := srv[\"Description\"]; ok {\n\t\t\td = srv[\"Description\"].(string)\n\t\t}\n\n\t\tvar t time.Time\n\t\tif _, ok := srv[\"Date\"]; ok {\n\t\t\tt = srv[\"Date\"].(time.Time)\n\t\t}\n\n\t\tquery.Results = append(query.Results, Result{\n\t\t\tLink: srv[\"Link\"].(string),\n\t\t\tTitle: srv[\"Title\"].(string),\n\t\t\tDescription: d,\n\t\t\tDate: t,\n\t\t\tFrom: provider.Title,\n\t\t})\n\t}\n\n\t\/\/ Goto\n\tif query.Goto != 0 {\n\t\tif query.Goto < 0 || query.Goto > len(query.Results) {\n\t\t\treturn query, fmt.Errorf(\"invalid result # to go. It should be between 1 and %d\", len(query.Results))\n\t\t}\n\t\tlink := query.Results[query.Goto-1].Link\n\t\tif _, err = exec.Command(goCommand, link).Output(); err != nil {\n\t\t\treturn query, fmt.Errorf(\"failed to go to %s due to %s. Check FERRET_GOTO_CMD environment variable\", link, err.Error())\n\t\t}\n\t\treturn query, nil\n\t}\n\n\treturn query, nil\n}\n\n\/\/ PrintResults prints the given search results\nfunc PrintResults(query Query, err error) {\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tif query.Goto == 0 {\n\t\tt := gocli.Table{}\n\t\tt.AddRow(1, \"#\", \"TITLE\")\n\t\tfor i, v := range query.Results {\n\t\t\tts := \"\"\n\t\t\tif !v.Date.IsZero() {\n\t\t\t\tts = fmt.Sprintf(\" (%d-%02d-%02d)\", v.Date.Year(), v.Date.Month(), v.Date.Day())\n\t\t\t}\n\t\t\tt.AddRow(i+2, fmt.Sprintf(\"%d\", i+1), fmt.Sprintf(\"%s%s\", v.Title, ts))\n\t\t}\n\t\tt.PrintData()\n\t\tfmt.Printf(\"\\n%dms\\n\", int64(query.Elapsed\/time.Millisecond))\n\t}\n}\n\n\/\/ ParsePage parses page from a given string\nfunc ParsePage(page string) int {\n\tp := 1\n\tif page != \"\" {\n\t\ti, err := strconv.Atoi(page)\n\t\tif err == nil && i > 0 {\n\t\t\tp = i\n\t\t}\n\t}\n\treturn p\n}\n\n\/\/ ParseGoto parses goto from a given string\nfunc ParseGoto(gt string) int {\n\tg := 0\n\tif gt != \"\" {\n\t\ti, err := strconv.Atoi(gt)\n\t\tif err == nil && i > 0 {\n\t\t\tg = i\n\t\t}\n\t}\n\treturn g\n}\n\n\/\/ ParseTimeout parses timeout from a given string\nfunc ParseTimeout(timeout string) time.Duration {\n\tt := 5000 * time.Millisecond\n\tif timeout != \"\" {\n\t\td, err := time.ParseDuration(timeout)\n\t\tif err == nil {\n\t\t\tt = d\n\t\t}\n\t} else {\n\t\td, err := time.ParseDuration(searchTimeout)\n\t\tif err == nil {\n\t\t\tt = d\n\t\t}\n\t}\n\treturn t\n}\n\n\/\/ ParseLimit parses limit from a given string\nfunc ParseLimit(limit string) int {\n\tl := 10\n\tif limit != \"\" {\n\t\ti, err := strconv.Atoi(limit)\n\t\tif err == nil && i > 0 {\n\t\t\tl = i\n\t\t}\n\t}\n\treturn l\n}\n<commit_msg>Rename Register function to ProviderRegister<commit_after>\/*\n * Ferret\n * Copyright (c) 2016 Yieldbot, Inc.\n * For the full copyright and license information, please view the LICENSE.txt file.\n *\/\n\n\/\/ Package search provides search interface and functionality\npackage search\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strconv\"\n\t\"time\"\n\n\tprov \"github.com\/yieldbot\/ferret\/providers\"\n\t\"github.com\/yieldbot\/gocli\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nvar (\n\tgoCommand = \"open\"\n\tsearchTimeout = \"5000ms\"\n\tproviders = make(map[string]Provider)\n)\n\nfunc init() {\n\tif e := os.Getenv(\"FERRET_GOTO_CMD\"); e != \"\" {\n\t\tgoCommand = e\n\t}\n\tif e := os.Getenv(\"FERRET_SEARCH_TIMEOUT\"); e != \"\" {\n\t\tsearchTimeout = e\n\t}\n\n\tprov.Register(ProviderRegister)\n}\n\n\/\/ Provider represents a provider\ntype Provider struct {\n\tName string\n\tTitle string\n\tEnabled bool\n\tNoui bool\n\tPriority int64\n\tDoer\n}\n\n\/\/ Providers returns a sorted list of the names of the providers\nfunc Providers() []string {\n\tl := []string{}\n\tfor n := range providers {\n\t\tl = append(l, n)\n\t}\n\tsort.Strings(l)\n\treturn l\n}\n\n\/\/ ProviderByName returns a provider by the given name\nfunc ProviderByName(name string) (Provider, error) {\n\tp, ok := providers[name]\n\tif !ok {\n\t\treturn p, errors.New(\"provider \" + name + \" couldn't be found\")\n\t}\n\treturn p, nil\n}\n\n\/\/ ProviderRegister registers a search provider\nfunc ProviderRegister(provider interface{}) error {\n\n\t\/\/ Init provider\n\tp, ok := provider.(Doer)\n\tif !ok {\n\t\treturn errors.New(\"invalid provider\")\n\t}\n\n\tvar name, title string\n\tvar enabled, noui bool\n\tvar priority int64\n\n\t\/\/ Get the value of the provider\n\tv := reflect.Indirect(reflect.ValueOf(p))\n\t\/\/ Iterate the provider fields\n\tfor i := 0; i < v.NumField(); i++ {\n\t\tfn := v.Type().Field(i).Name\n\t\tft := v.Field(i).Type().Name()\n\n\t\tif fn == \"name\" && ft == \"string\" {\n\t\t\tname = v.Field(i).String()\n\t\t} else if fn == \"title\" && ft == \"string\" {\n\t\t\ttitle = v.Field(i).String()\n\t\t} else if fn == \"enabled\" && ft == \"bool\" {\n\t\t\tenabled = v.Field(i).Bool()\n\t\t} else if fn == \"noui\" && ft == \"bool\" {\n\t\t\tnoui = v.Field(i).Bool()\n\t\t} else if fn == \"priority\" && ft == \"int64\" {\n\t\t\tpriority = v.Field(i).Int()\n\t\t}\n\t}\n\tif name == \"\" {\n\t\treturn errors.New(\"invalid provider name\")\n\t}\n\tif title == \"\" {\n\t\ttitle = name\n\t}\n\n\t\/\/ Init provider\n\tif _, ok := providers[name]; ok {\n\t\treturn errors.New(\"search provider \" + name + \" is already registered\")\n\t}\n\tnp := Provider{\n\t\tName: name,\n\t\tTitle: title,\n\t\tEnabled: enabled,\n\t\tNoui: noui,\n\t\tPriority: priority,\n\t\tDoer: p,\n\t}\n\tproviders[name] = np\n\n\treturn nil\n}\n\n\/\/ Doer is the interface that must be implemented by a search provider\ntype Doer interface {\n\t\/\/ Search makes a search\n\tSearch(ctx context.Context, args map[string]interface{}) ([]map[string]interface{}, error)\n}\n\n\/\/ Do makes a search query by the given query\nfunc Do(query Query) (Query, error) {\n\n\t\/\/ Provider\n\tprovider, ok := providers[query.Provider]\n\tif !ok {\n\t\tquery.HTTPStatus = http.StatusBadRequest\n\t\treturn query, fmt.Errorf(\"invalid search provider. Possible search providers are %s\", Providers())\n\t}\n\n\t\/\/ Keyword\n\tif query.Keyword == \"\" {\n\t\tquery.HTTPStatus = http.StatusBadRequest\n\t\treturn query, errors.New(\"missing keyword\")\n\t}\n\n\t\/\/ Page\n\tif query.Page <= 0 {\n\t\tquery.HTTPStatus = http.StatusBadRequest\n\t\treturn query, errors.New(\"invalid page #. It should be greater than 0\")\n\t}\n\n\t\/\/ Limit\n\tif query.Limit <= 0 {\n\t\tquery.HTTPStatus = http.StatusBadRequest\n\t\treturn query, errors.New(\"invalid limit. It should be greater than 0\")\n\t}\n\n\t\/\/ Search\n\tquery.Start = time.Now()\n\tctx, cancel := context.WithTimeout(context.Background(), query.Timeout)\n\tdefer cancel()\n\tsq := map[string]interface{}{\"page\": query.Page, \"limit\": query.Limit, \"keyword\": query.Keyword}\n\tsr, err := provider.Search(ctx, sq)\n\tif err != nil {\n\t\tif err == context.DeadlineExceeded {\n\t\t\tquery.HTTPStatus = http.StatusGatewayTimeout\n\t\t\treturn query, errors.New(\"timeout\")\n\t\t} else if err == context.Canceled {\n\t\t\tquery.HTTPStatus = http.StatusInternalServerError\n\t\t\treturn query, errors.New(\"canceled\")\n\t\t}\n\t\tquery.HTTPStatus = http.StatusInternalServerError\n\t\treturn query, errors.New(\"failed to search due to \" + err.Error())\n\t}\n\tquery.Elapsed = time.Since(query.Start)\n\tfor _, srv := range sr {\n\t\tvar d string\n\t\tif _, ok := srv[\"Description\"]; ok {\n\t\t\td = srv[\"Description\"].(string)\n\t\t}\n\n\t\tvar t time.Time\n\t\tif _, ok := srv[\"Date\"]; ok {\n\t\t\tt = srv[\"Date\"].(time.Time)\n\t\t}\n\n\t\tquery.Results = append(query.Results, Result{\n\t\t\tLink: srv[\"Link\"].(string),\n\t\t\tTitle: srv[\"Title\"].(string),\n\t\t\tDescription: d,\n\t\t\tDate: t,\n\t\t\tFrom: provider.Title,\n\t\t})\n\t}\n\n\t\/\/ Goto\n\tif query.Goto != 0 {\n\t\tif query.Goto < 0 || query.Goto > len(query.Results) {\n\t\t\treturn query, fmt.Errorf(\"invalid result # to go. It should be between 1 and %d\", len(query.Results))\n\t\t}\n\t\tlink := query.Results[query.Goto-1].Link\n\t\tif _, err = exec.Command(goCommand, link).Output(); err != nil {\n\t\t\treturn query, fmt.Errorf(\"failed to go to %s due to %s. Check FERRET_GOTO_CMD environment variable\", link, err.Error())\n\t\t}\n\t\treturn query, nil\n\t}\n\n\treturn query, nil\n}\n\n\/\/ PrintResults prints the given search results\nfunc PrintResults(query Query, err error) {\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tif query.Goto == 0 {\n\t\tt := gocli.Table{}\n\t\tt.AddRow(1, \"#\", \"TITLE\")\n\t\tfor i, v := range query.Results {\n\t\t\tts := \"\"\n\t\t\tif !v.Date.IsZero() {\n\t\t\t\tts = fmt.Sprintf(\" (%d-%02d-%02d)\", v.Date.Year(), v.Date.Month(), v.Date.Day())\n\t\t\t}\n\t\t\tt.AddRow(i+2, fmt.Sprintf(\"%d\", i+1), fmt.Sprintf(\"%s%s\", v.Title, ts))\n\t\t}\n\t\tt.PrintData()\n\t\tfmt.Printf(\"\\n%dms\\n\", int64(query.Elapsed\/time.Millisecond))\n\t}\n}\n\n\/\/ ParsePage parses page from a given string\nfunc ParsePage(page string) int {\n\tp := 1\n\tif page != \"\" {\n\t\ti, err := strconv.Atoi(page)\n\t\tif err == nil && i > 0 {\n\t\t\tp = i\n\t\t}\n\t}\n\treturn p\n}\n\n\/\/ ParseGoto parses goto from a given string\nfunc ParseGoto(gt string) int {\n\tg := 0\n\tif gt != \"\" {\n\t\ti, err := strconv.Atoi(gt)\n\t\tif err == nil && i > 0 {\n\t\t\tg = i\n\t\t}\n\t}\n\treturn g\n}\n\n\/\/ ParseTimeout parses timeout from a given string\nfunc ParseTimeout(timeout string) time.Duration {\n\tt := 5000 * time.Millisecond\n\tif timeout != \"\" {\n\t\td, err := time.ParseDuration(timeout)\n\t\tif err == nil {\n\t\t\tt = d\n\t\t}\n\t} else {\n\t\td, err := time.ParseDuration(searchTimeout)\n\t\tif err == nil {\n\t\t\tt = d\n\t\t}\n\t}\n\treturn t\n}\n\n\/\/ ParseLimit parses limit from a given string\nfunc ParseLimit(limit string) int {\n\tl := 10\n\tif limit != \"\" {\n\t\ti, err := strconv.Atoi(limit)\n\t\tif err == nil && i > 0 {\n\t\t\tl = i\n\t\t}\n\t}\n\treturn l\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage stats\n\nimport (\n\t\"code.gitea.io\/gitea\/models\"\n\t\"code.gitea.io\/gitea\/modules\/git\"\n\t\"code.gitea.io\/gitea\/modules\/log\"\n)\n\n\/\/ DBIndexer implements Indexer interface to use database's like search\ntype DBIndexer struct {\n}\n\n\/\/ Index repository status function\nfunc (db *DBIndexer) Index(id int64) error {\n\trepo, err := models.GetRepositoryByID(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif repo.IsEmpty {\n\t\treturn nil\n\t}\n\n\tstatus, err := repo.GetIndexerStatus(models.RepoIndexerTypeStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgitRepo, err := git.OpenRepository(repo.RepoPath())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer gitRepo.Close()\n\n\t\/\/ Get latest commit for default branch\n\tcommitID, err := gitRepo.GetBranchCommitID(repo.DefaultBranch)\n\tif err != nil {\n\t\tif git.IsErrBranchNotExist(err) {\n\t\t\tlog.Debug(\"Unable to get commit ID for defaultbranch %s in %s ... skipping this repository\", repo.DefaultBranch, repo.RepoPath())\n\t\t\treturn nil\n\t\t}\n\t\tlog.Error(\"Unable to get commit ID for defaultbranch %s in %s. Error: %v\", repo.DefaultBranch, repo.RepoPath(), err)\n\t\treturn err\n\t}\n\n\t\/\/ Do not recalculate stats if already calculated for this commit\n\tif status.CommitSha == commitID {\n\t\treturn nil\n\t}\n\n\t\/\/ Calculate and save language statistics to database\n\tstats, err := gitRepo.GetLanguageStats(commitID)\n\tif err != nil {\n\t\tlog.Error(\"Unable to get language stats for ID %s for defaultbranch %s in %s. Error: %v\", commitID, repo.DefaultBranch, repo.RepoPath(), err)\n\t\treturn err\n\t}\n\treturn repo.UpdateLanguageStats(commitID, stats)\n}\n\n\/\/ Close dummy function\nfunc (db *DBIndexer) Close() {\n}\n<commit_msg>If the default branch is not present do not report error on stats indexing (follow-up of #15546) (#15583)<commit_after>\/\/ Copyright 2020 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage stats\n\nimport (\n\t\"code.gitea.io\/gitea\/models\"\n\t\"code.gitea.io\/gitea\/modules\/git\"\n\t\"code.gitea.io\/gitea\/modules\/log\"\n)\n\n\/\/ DBIndexer implements Indexer interface to use database's like search\ntype DBIndexer struct {\n}\n\n\/\/ Index repository status function\nfunc (db *DBIndexer) Index(id int64) error {\n\trepo, err := models.GetRepositoryByID(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif repo.IsEmpty {\n\t\treturn nil\n\t}\n\n\tstatus, err := repo.GetIndexerStatus(models.RepoIndexerTypeStats)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgitRepo, err := git.OpenRepository(repo.RepoPath())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer gitRepo.Close()\n\n\t\/\/ Get latest commit for default branch\n\tcommitID, err := gitRepo.GetBranchCommitID(repo.DefaultBranch)\n\tif err != nil {\n\t\tif git.IsErrBranchNotExist(err) || git.IsErrNotExist((err)) {\n\t\t\tlog.Debug(\"Unable to get commit ID for defaultbranch %s in %s ... skipping this repository\", repo.DefaultBranch, repo.RepoPath())\n\t\t\treturn nil\n\t\t}\n\t\tlog.Error(\"Unable to get commit ID for defaultbranch %s in %s. Error: %v\", repo.DefaultBranch, repo.RepoPath(), err)\n\t\treturn err\n\t}\n\n\t\/\/ Do not recalculate stats if already calculated for this commit\n\tif status.CommitSha == commitID {\n\t\treturn nil\n\t}\n\n\t\/\/ Calculate and save language statistics to database\n\tstats, err := gitRepo.GetLanguageStats(commitID)\n\tif err != nil {\n\t\tlog.Error(\"Unable to get language stats for ID %s for defaultbranch %s in %s. Error: %v\", commitID, repo.DefaultBranch, repo.RepoPath(), err)\n\t\treturn err\n\t}\n\treturn repo.UpdateLanguageStats(commitID, stats)\n}\n\n\/\/ Close dummy function\nfunc (db *DBIndexer) Close() {\n}\n<|endoftext|>"} {"text":"<commit_before>package java\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/ezbuy\/tgen\/global\"\n\t\"github.com\/samuel\/go-thrift\/parser\"\n)\n\nfunc TestGenerate(t *testing.T) {\n\t\/\/ 1 read thrift files from folder 'cases'\n\t\/\/ 2 generate & output\n\t\/\/ 3 read generated files, compared with corresponding files in folder 'test'\n\n\tcasedir, _ := filepath.Abs(\".\/..\/..\/example\/java\")\n\n\t\/\/ create output dir\n\toutdir, _ := filepath.Abs(\".\/output\")\n\t\/\/ if err := os.MkdirAll(outdir, 0755); err != nil {\n\t\/\/ \tt.Errorf(\"failed to create output directory %s\", outdir)\n\t\/\/ }\n\n\ttestdir, _ := filepath.Abs(\".\/..\/..\/example\/java\/ref\")\n\n\tgen := &JavaGen{}\n\tp := &parser.Parser{}\n\n\tvisitfunc := func(path string, info os.FileInfo, err error) error {\n\t\tif strings.HasPrefix(filepath.Base(path), \".\") || filepath.Ext(path) != \".thrift\" {\n\t\t\treturn nil\n\t\t}\n\n\t\tglobal.InputFile = path\n\n\t\tparsedThrift, _, err := p.ParseFile(path)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"parse error: %s\\n\", err.Error())\n\t\t}\n\n\t\tgen.Generate(outdir, parsedThrift)\n\n\t\tfor f, thrift := range parsedThrift {\n\t\t\tif f != global.InputFile {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tns := thrift.Namespaces[\"java\"]\n\t\t\tp := strings.Replace(ns, \".\", \"\/\", -1)\n\n\t\t\tfor _, m := range thrift.Structs {\n\t\t\t\tname := m.Name + \".java\"\n\n\t\t\t\t\/\/ jsonrpc\n\n\t\t\t\toutfile := filepath.Join(outdir, global.MODE_JSONRPC, p, name)\n\t\t\t\ttestfile := filepath.Join(testdir, global.MODE_JSONRPC, p, name)\n\n\t\t\t\tfileCompare(t, outfile, testfile)\n\n\t\t\t\t\/\/ rest\n\t\t\t\toutfile = filepath.Join(outdir, global.MODE_REST, p, name)\n\t\t\t\ttestfile = filepath.Join(testdir, global.MODE_REST, p, name)\n\n\t\t\t\tfileCompare(t, outfile, testfile)\n\t\t\t}\n\n\t\t\tfor _, s := range thrift.Services {\n\t\t\t\tname := s.Name + \"Service.java\"\n\n\t\t\t\t\/\/ jsonrpc\n\t\t\t\toutfile := filepath.Join(outdir, global.MODE_JSONRPC, p, name)\n\t\t\t\ttestfile := filepath.Join(testdir, global.MODE_JSONRPC, p, name)\n\n\t\t\t\tfileCompare(t, outfile, testfile)\n\n\t\t\t\t\/\/ rest\n\t\t\t\toutfile = filepath.Join(outdir, global.MODE_REST, p, name)\n\t\t\t\ttestfile = filepath.Join(testdir, global.MODE_REST, p, name)\n\n\t\t\t\tfileCompare(t, outfile, testfile)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tif err := filepath.Walk(casedir, visitfunc); err != nil {\n\t\tt.Errorf(\"walk error: %s\\n\", err.Error())\n\t}\n\n\t\/\/ do some clean\n\tos.RemoveAll(outdir)\n}\n\nfunc fileCompare(t *testing.T, src string, dest string) {\n\tif !pathexists(src) {\n\t\tt.Error(\"generate error\\n\")\n\t} else if !pathexists(dest) {\n\t\tt.Errorf(\"no test file found [%s]\\n\", dest)\n\t} else {\n\t\t\/\/ compare the output file with the case\n\t\tsrcdata, srcerr := ioutil.ReadFile(src)\n\t\tdestdata, desterr := ioutil.ReadFile(dest)\n\n\t\tif srcerr != nil || desterr != nil {\n\t\t\tt.Error(\"compare error [reading]\")\n\t\t} else if string(srcdata) != string(destdata) {\n\t\t\tt.Errorf(\"mismatch: [%s, %s]\", src, dest)\n\t\t} else {\n\t\t\tt.Log(\"PASS\")\n\t\t}\n\t}\n}\n\nfunc pathexists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\n\treturn false\n}\n<commit_msg>modify go test<commit_after>package java\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/ezbuy\/tgen\/global\"\n\t\"github.com\/samuel\/go-thrift\/parser\"\n)\n\nfunc TestGenerate(t *testing.T) {\n\t\/\/ 1 read thrift files from folder 'cases'\n\t\/\/ 2 generate & output\n\t\/\/ 3 read generated files, compared with corresponding files in folder 'test'\n\n\tcasedir, _ := filepath.Abs(\".\/..\/..\/example\/java\")\n\n\t\/\/ create output dir\n\toutdir, _ := filepath.Abs(\".\/output\")\n\t\/\/ if err := os.MkdirAll(outdir, 0755); err != nil {\n\t\/\/ \tt.Errorf(\"failed to create output directory %s\", outdir)\n\t\/\/ }\n\n\ttestdir, _ := filepath.Abs(\".\/..\/..\/example\/java\/ref\")\n\n\tgen := &JavaGen{}\n\tp := &parser.Parser{}\n\n\tvisitfunc := func(path string, info os.FileInfo, err error) error {\n\t\tif strings.HasPrefix(filepath.Base(path), \".\") || filepath.Ext(path) != \".thrift\" {\n\t\t\treturn nil\n\t\t}\n\n\t\tif !strings.HasSuffix(path, \"ShipForMe.thrift\") {\n\t\t\treturn nil\n\t\t}\n\n\t\tglobal.InputFile = path\n\n\t\tparsedThrift, _, err := p.ParseFile(path)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"parse error: %s\\n\", err.Error())\n\t\t}\n\n\t\tgen.Generate(outdir, parsedThrift)\n\n\t\tfor f, thrift := range parsedThrift {\n\t\t\tif f != global.InputFile {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tns := thrift.Namespaces[\"java\"]\n\t\t\tp := strings.Replace(ns, \".\", \"\/\", -1)\n\n\t\t\tfor _, m := range thrift.Structs {\n\t\t\t\tname := m.Name + \".java\"\n\n\t\t\t\t\/\/ jsonrpc\n\n\t\t\t\toutfile := filepath.Join(outdir, global.MODE_JSONRPC, p, name)\n\t\t\t\ttestfile := filepath.Join(testdir, global.MODE_JSONRPC, p, name)\n\n\t\t\t\tfileCompare(t, outfile, testfile)\n\n\t\t\t\t\/\/ rest\n\t\t\t\toutfile = filepath.Join(outdir, global.MODE_REST, p, name)\n\t\t\t\ttestfile = filepath.Join(testdir, global.MODE_REST, p, name)\n\n\t\t\t\tfileCompare(t, outfile, testfile)\n\t\t\t}\n\n\t\t\tfor _, s := range thrift.Services {\n\t\t\t\tname := s.Name + \"Service.java\"\n\n\t\t\t\t\/\/ jsonrpc\n\t\t\t\toutfile := filepath.Join(outdir, global.MODE_JSONRPC, p, name)\n\t\t\t\ttestfile := filepath.Join(testdir, global.MODE_JSONRPC, p, name)\n\n\t\t\t\tfileCompare(t, outfile, testfile)\n\n\t\t\t\t\/\/ rest\n\t\t\t\toutfile = filepath.Join(outdir, global.MODE_REST, p, name)\n\t\t\t\ttestfile = filepath.Join(testdir, global.MODE_REST, p, name)\n\n\t\t\t\tfileCompare(t, outfile, testfile)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tif err := filepath.Walk(casedir, visitfunc); err != nil {\n\t\tt.Errorf(\"walk error: %s\\n\", err.Error())\n\t}\n\n\t\/\/ do some clean\n\tos.RemoveAll(outdir)\n}\n\nfunc fileCompare(t *testing.T, src string, dest string) {\n\tif !pathexists(src) {\n\t\tt.Error(\"generate error\\n\")\n\t} else if !pathexists(dest) {\n\t\tt.Errorf(\"no test file found [%s]\\n\", dest)\n\t} else {\n\t\t\/\/ compare the output file with the case\n\t\tsrcdata, srcerr := ioutil.ReadFile(src)\n\t\tdestdata, desterr := ioutil.ReadFile(dest)\n\n\t\tif srcerr != nil || desterr != nil {\n\t\t\tt.Error(\"compare error [reading]\")\n\t\t} else if string(srcdata) != string(destdata) {\n\t\t\tt.Errorf(\"mismatch: [%s, %s]\", src, dest)\n\t\t} else {\n\t\t\tt.Log(\"PASS\")\n\t\t}\n\t}\n}\n\nfunc pathexists(path string) bool {\n\t_, err := os.Stat(path)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tif os.IsNotExist(err) {\n\t\treturn false\n\t}\n\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage chartutil\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"path\"\n\t\"strings\"\n\n\t\"github.com\/ghodss\/yaml\"\n\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/gobwas\/glob\"\n\t\"github.com\/golang\/protobuf\/ptypes\/any\"\n)\n\n\/\/ Files is a map of files in a chart that can be accessed from a template.\ntype Files map[string][]byte\n\n\/\/ NewFiles creates a new Files from chart files.\n\/\/ Given an []*any.Any (the format for files in a chart.Chart), extract a map of files.\nfunc NewFiles(from []*any.Any) Files {\n\tfiles := map[string][]byte{}\n\tif from != nil {\n\t\tfor _, f := range from {\n\t\t\tfiles[f.TypeUrl] = f.Value\n\t\t}\n\t}\n\treturn files\n}\n\n\/\/ GetBytes gets a file by path.\n\/\/\n\/\/ The returned data is raw. In a template context, this is identical to calling\n\/\/ {{index .Files $path}}.\n\/\/\n\/\/ This is intended to be accessed from within a template, so a missed key returns\n\/\/ an empty []byte.\nfunc (f Files) GetBytes(name string) []byte {\n\tv, ok := f[name]\n\tif !ok {\n\t\treturn []byte{}\n\t}\n\treturn v\n}\n\n\/\/ Get returns a string representation of the given file.\n\/\/\n\/\/ Fetch the contents of a file as a string. It is designed to be called in a\n\/\/ template.\n\/\/\n\/\/\t{{.Files.Get \"foo\"}}\nfunc (f Files) Get(name string) string {\n\treturn string(f.GetBytes(name))\n}\n\n\/\/ Glob takes a glob pattern and returns another files object only containing\n\/\/ matched files.\n\/\/\n\/\/ This is designed to be called from a template.\n\/\/\n\/\/ {{ range $name, $content := .Files.Glob(\"foo\/**\") }}\n\/\/ {{ $name }}: |\n\/\/ {{ .Files.Get($name) | indent 4 }}{{ end }}\nfunc (f Files) Glob(pattern string) Files {\n\tg, err := glob.Compile(pattern, '\/')\n\tif err != nil {\n\t\tg, _ = glob.Compile(\"**\")\n\t}\n\n\tnf := NewFiles(nil)\n\tfor name, contents := range f {\n\t\tif g.Match(name) {\n\t\t\tnf[name] = contents\n\t\t}\n\t}\n\n\treturn nf\n}\n\n\/\/ AsConfig turns a Files group and flattens it to a YAML map suitable for\n\/\/ including in the 'data' section of a Kubernetes ConfigMap definition.\n\/\/ Duplicate keys will be overwritten, so be aware that your file names\n\/\/ (regardless of path) should be unique.\n\/\/\n\/\/ This is designed to be called from a template, and will return empty string\n\/\/ (via ToYaml function) if it cannot be serialized to YAML, or if the Files\n\/\/ object is nil.\n\/\/\n\/\/ The output will not be indented, so you will want to pipe this to the\n\/\/ 'indent' template function.\n\/\/\n\/\/ data:\n\/\/ {{ .Files.Glob(\"config\/**\").AsConfig() | indent 4 }}\nfunc (f Files) AsConfig() string {\n\tif f == nil {\n\t\treturn \"\"\n\t}\n\n\tm := map[string]string{}\n\n\t\/\/ Explicitly convert to strings, and file names\n\tfor k, v := range f {\n\t\tm[path.Base(k)] = string(v)\n\t}\n\n\treturn ToYaml(m)\n}\n\n\/\/ AsSecrets returns the base64-encoded value of a Files object suitable for\n\/\/ including in the 'data' section of a Kubernetes Secret definition.\n\/\/ Duplicate keys will be overwritten, so be aware that your file names\n\/\/ (regardless of path) should be unique.\n\/\/\n\/\/ This is designed to be called from a template, and will return empty string\n\/\/ (via ToYaml function) if it cannot be serialized to YAML, or if the Files\n\/\/ object is nil.\n\/\/\n\/\/ The output will not be indented, so you will want to pipe this to the\n\/\/ 'indent' template function.\n\/\/\n\/\/ data:\n\/\/ {{ .Files.Glob(\"secrets\/*\").AsSecrets() }}\nfunc (f Files) AsSecrets() string {\n\tif f == nil {\n\t\treturn \"\"\n\t}\n\n\tm := map[string]string{}\n\n\tfor k, v := range f {\n\t\tm[path.Base(k)] = base64.StdEncoding.EncodeToString(v)\n\t}\n\n\treturn ToYaml(m)\n}\n\n\/\/ Lines returns each line of a named file (split by \"\\n\") as a slice, so it can\n\/\/ be ranged over in your templates.\n\/\/\n\/\/ This is designed to be called from a template.\n\/\/\n\/\/ {{ range .Files.Lines \"foo\/bar.html\" }}\n\/\/ {{ . }}{{ end }}\nfunc (f Files) Lines(path string) []string {\n\tif f == nil || f[path] == nil {\n\t\treturn []string{}\n\t}\n\n\treturn strings.Split(string(f[path]), \"\\n\")\n}\n\n\/\/ ToYaml takes an interface, marshals it to yaml, and returns a string. It will\n\/\/ always return a string, even on marshal error (empty string).\n\/\/\n\/\/ This is designed to be called from a template.\nfunc ToYaml(v interface{}) string {\n\tdata, err := yaml.Marshal(v)\n\tif err != nil {\n\t\t\/\/ Swallow errors inside of a template.\n\t\treturn \"\"\n\t}\n\treturn string(data)\n}\n\n\/\/ FromYaml converts a YAML document into a map[string]interface{}.\n\/\/\n\/\/ This is not a general-purpose YAML parser, and will not parse all valid\n\/\/ YAML documents. Additionally, because its intended use is within templates\n\/\/ it tolerates errors. It will insert the returned error message string into\n\/\/ m[\"Error\"] in the returned map.\nfunc FromYaml(str string) map[string]interface{} {\n\tm := map[string]interface{}{}\n\n\tif err := yaml.Unmarshal([]byte(str), &m); err != nil {\n\t\tm[\"Error\"] = err.Error()\n\t}\n\treturn m\n}\n\n\/\/ ToToml takes an interface, marshals it to toml, and returns a string. It will\n\/\/ always return a string, even on marshal error (empty string).\n\/\/\n\/\/ This is designed to be called from a template.\nfunc ToToml(v interface{}) string {\n\tb := bytes.NewBuffer(nil)\n\te := toml.NewEncoder(b)\n\terr := e.Encode(v)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\treturn b.String()\n}\n\n\/\/ ToJson takes an interface, marshals it to json, and returns a string. It will\n\/\/ always return a string, even on marshal error (empty string).\n\/\/\n\/\/ This is designed to be called from a template.\nfunc ToJson(v interface{}) string {\n\tdata, err := json.Marshal(v)\n\tif err != nil {\n\t\t\/\/ Swallow errors inside of a template.\n\t\treturn \"\"\n\t}\n\treturn string(data)\n}\n\n\/\/ FromJson converts a YAML document into a map[string]interface{}.\n\/\/\n\/\/ This is not a general-purpose JSON parser, and will not parse all valid\n\/\/ YAML documents. Additionally, because its intended use is within templates\n\/\/ it tolerates errors. It will insert the returned error message string into\n\/\/ m[\"error\"] in the returned map.\nfunc FromJson(str string) map[string]interface{} {\n\tm := map[string]interface{}{}\n\n\tif err := json.Unmarshal([]byte(str), &m); err != nil {\n\t\tm[\"Error\"] = err.Error()\n\t}\n\treturn m\n}\n<commit_msg>fix fromjson comment<commit_after>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage chartutil\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"path\"\n\t\"strings\"\n\n\t\"github.com\/ghodss\/yaml\"\n\n\t\"github.com\/BurntSushi\/toml\"\n\t\"github.com\/gobwas\/glob\"\n\t\"github.com\/golang\/protobuf\/ptypes\/any\"\n)\n\n\/\/ Files is a map of files in a chart that can be accessed from a template.\ntype Files map[string][]byte\n\n\/\/ NewFiles creates a new Files from chart files.\n\/\/ Given an []*any.Any (the format for files in a chart.Chart), extract a map of files.\nfunc NewFiles(from []*any.Any) Files {\n\tfiles := map[string][]byte{}\n\tif from != nil {\n\t\tfor _, f := range from {\n\t\t\tfiles[f.TypeUrl] = f.Value\n\t\t}\n\t}\n\treturn files\n}\n\n\/\/ GetBytes gets a file by path.\n\/\/\n\/\/ The returned data is raw. In a template context, this is identical to calling\n\/\/ {{index .Files $path}}.\n\/\/\n\/\/ This is intended to be accessed from within a template, so a missed key returns\n\/\/ an empty []byte.\nfunc (f Files) GetBytes(name string) []byte {\n\tv, ok := f[name]\n\tif !ok {\n\t\treturn []byte{}\n\t}\n\treturn v\n}\n\n\/\/ Get returns a string representation of the given file.\n\/\/\n\/\/ Fetch the contents of a file as a string. It is designed to be called in a\n\/\/ template.\n\/\/\n\/\/\t{{.Files.Get \"foo\"}}\nfunc (f Files) Get(name string) string {\n\treturn string(f.GetBytes(name))\n}\n\n\/\/ Glob takes a glob pattern and returns another files object only containing\n\/\/ matched files.\n\/\/\n\/\/ This is designed to be called from a template.\n\/\/\n\/\/ {{ range $name, $content := .Files.Glob(\"foo\/**\") }}\n\/\/ {{ $name }}: |\n\/\/ {{ .Files.Get($name) | indent 4 }}{{ end }}\nfunc (f Files) Glob(pattern string) Files {\n\tg, err := glob.Compile(pattern, '\/')\n\tif err != nil {\n\t\tg, _ = glob.Compile(\"**\")\n\t}\n\n\tnf := NewFiles(nil)\n\tfor name, contents := range f {\n\t\tif g.Match(name) {\n\t\t\tnf[name] = contents\n\t\t}\n\t}\n\n\treturn nf\n}\n\n\/\/ AsConfig turns a Files group and flattens it to a YAML map suitable for\n\/\/ including in the 'data' section of a Kubernetes ConfigMap definition.\n\/\/ Duplicate keys will be overwritten, so be aware that your file names\n\/\/ (regardless of path) should be unique.\n\/\/\n\/\/ This is designed to be called from a template, and will return empty string\n\/\/ (via ToYaml function) if it cannot be serialized to YAML, or if the Files\n\/\/ object is nil.\n\/\/\n\/\/ The output will not be indented, so you will want to pipe this to the\n\/\/ 'indent' template function.\n\/\/\n\/\/ data:\n\/\/ {{ .Files.Glob(\"config\/**\").AsConfig() | indent 4 }}\nfunc (f Files) AsConfig() string {\n\tif f == nil {\n\t\treturn \"\"\n\t}\n\n\tm := map[string]string{}\n\n\t\/\/ Explicitly convert to strings, and file names\n\tfor k, v := range f {\n\t\tm[path.Base(k)] = string(v)\n\t}\n\n\treturn ToYaml(m)\n}\n\n\/\/ AsSecrets returns the base64-encoded value of a Files object suitable for\n\/\/ including in the 'data' section of a Kubernetes Secret definition.\n\/\/ Duplicate keys will be overwritten, so be aware that your file names\n\/\/ (regardless of path) should be unique.\n\/\/\n\/\/ This is designed to be called from a template, and will return empty string\n\/\/ (via ToYaml function) if it cannot be serialized to YAML, or if the Files\n\/\/ object is nil.\n\/\/\n\/\/ The output will not be indented, so you will want to pipe this to the\n\/\/ 'indent' template function.\n\/\/\n\/\/ data:\n\/\/ {{ .Files.Glob(\"secrets\/*\").AsSecrets() }}\nfunc (f Files) AsSecrets() string {\n\tif f == nil {\n\t\treturn \"\"\n\t}\n\n\tm := map[string]string{}\n\n\tfor k, v := range f {\n\t\tm[path.Base(k)] = base64.StdEncoding.EncodeToString(v)\n\t}\n\n\treturn ToYaml(m)\n}\n\n\/\/ Lines returns each line of a named file (split by \"\\n\") as a slice, so it can\n\/\/ be ranged over in your templates.\n\/\/\n\/\/ This is designed to be called from a template.\n\/\/\n\/\/ {{ range .Files.Lines \"foo\/bar.html\" }}\n\/\/ {{ . }}{{ end }}\nfunc (f Files) Lines(path string) []string {\n\tif f == nil || f[path] == nil {\n\t\treturn []string{}\n\t}\n\n\treturn strings.Split(string(f[path]), \"\\n\")\n}\n\n\/\/ ToYaml takes an interface, marshals it to yaml, and returns a string. It will\n\/\/ always return a string, even on marshal error (empty string).\n\/\/\n\/\/ This is designed to be called from a template.\nfunc ToYaml(v interface{}) string {\n\tdata, err := yaml.Marshal(v)\n\tif err != nil {\n\t\t\/\/ Swallow errors inside of a template.\n\t\treturn \"\"\n\t}\n\treturn string(data)\n}\n\n\/\/ FromYaml converts a YAML document into a map[string]interface{}.\n\/\/\n\/\/ This is not a general-purpose YAML parser, and will not parse all valid\n\/\/ YAML documents. Additionally, because its intended use is within templates\n\/\/ it tolerates errors. It will insert the returned error message string into\n\/\/ m[\"Error\"] in the returned map.\nfunc FromYaml(str string) map[string]interface{} {\n\tm := map[string]interface{}{}\n\n\tif err := yaml.Unmarshal([]byte(str), &m); err != nil {\n\t\tm[\"Error\"] = err.Error()\n\t}\n\treturn m\n}\n\n\/\/ ToToml takes an interface, marshals it to toml, and returns a string. It will\n\/\/ always return a string, even on marshal error (empty string).\n\/\/\n\/\/ This is designed to be called from a template.\nfunc ToToml(v interface{}) string {\n\tb := bytes.NewBuffer(nil)\n\te := toml.NewEncoder(b)\n\terr := e.Encode(v)\n\tif err != nil {\n\t\treturn err.Error()\n\t}\n\treturn b.String()\n}\n\n\/\/ ToJson takes an interface, marshals it to json, and returns a string. It will\n\/\/ always return a string, even on marshal error (empty string).\n\/\/\n\/\/ This is designed to be called from a template.\nfunc ToJson(v interface{}) string {\n\tdata, err := json.Marshal(v)\n\tif err != nil {\n\t\t\/\/ Swallow errors inside of a template.\n\t\treturn \"\"\n\t}\n\treturn string(data)\n}\n\n\/\/ FromJson converts a YAML document into a map[string]interface{}.\n\/\/\n\/\/ This is not a general-purpose JSON parser, and will not parse all valid\n\/\/ YAML documents. Additionally, because its intended use is within templates\n\/\/ it tolerates errors. It will insert the returned error message string into\n\/\/ m[\"Error\"] in the returned map.\nfunc FromJson(str string) map[string]interface{} {\n\tm := map[string]interface{}{}\n\n\tif err := json.Unmarshal([]byte(str), &m); err != nil {\n\t\tm[\"Error\"] = err.Error()\n\t}\n\treturn m\n}\n<|endoftext|>"} {"text":"<commit_before>package handler\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/fusor\/ansible-service-broker\/pkg\/broker\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/pborman\/uuid\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n)\n\n\/\/ TODO: implement asynchronous operations\n\/\/ TODO: authentication \/ authorization\n\ntype handler struct {\n\trouter mux.Router\n\tbroker broker.Broker\n}\n\nfunc NewHandler(b broker.Broker) http.Handler {\n\th := handler{broker: b}\n\n\t\/\/ TODO: handle X-Broker-API-Version header, currently poorly defined\n\troot := h.router.Headers(\"X-Broker-API-Version\", \"2.9\").Subrouter()\n\n\troot.HandleFunc(\"\/v2\/bootstrap\", h.bootstrap).Methods(\"POST\")\n\troot.HandleFunc(\"\/v2\/catalog\", h.catalog).Methods(\"GET\")\n\troot.HandleFunc(\"\/v2\/service_instances\/{instance_uuid}\", h.provision).Methods(\"PUT\")\n\troot.HandleFunc(\"\/v2\/service_instances\/{instance_uuid}\", h.update).Methods(\"PATCH\")\n\troot.HandleFunc(\"\/v2\/service_instances\/{instance_uuid}\", h.deprovision).Methods(\"DELETE\")\n\troot.HandleFunc(\"\/v2\/service_instances\/{instance_uuid}\/service_bindings\/{binding_uuid}\", h.bind).Methods(\"PUT\")\n\troot.HandleFunc(\"\/v2\/service_instances\/{instance_uuid}\/service_bindings\/{binding_uuid}\", h.unbind).Methods(\"DELETE\")\n\n\treturn h\n}\n\nfunc (h handler) bootstrap(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\tresp, err := h.broker.Bootstrap()\n\twriteDefaultResponse(w, http.StatusOK, resp, err)\n}\n\nfunc (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\th.router.ServeHTTP(w, r)\n}\n\nfunc (h handler) catalog(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\n\tresp, err := h.broker.Catalog()\n\n\twriteDefaultResponse(w, http.StatusOK, resp, err)\n}\n\nfunc (h handler) provision(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\n\tinstanceUUID := uuid.Parse(mux.Vars(r)[\"instance_uuid\"])\n\tif instanceUUID == nil {\n\t\twriteResponse(w, http.StatusBadRequest, broker.ErrorResponse{Description: \"invalid instance_uuid\"})\n\t\treturn\n\t}\n\n\tvar req *broker.ProvisionRequest\n\terr := readRequest(r, &req)\n\n\tif err != nil {\n\t\twriteResponse(w, http.StatusBadRequest, broker.ErrorResponse{Description: \"could not read request: \" + err.Error()})\n\t\treturn\n\t}\n\n\tresp, err := h.broker.Provision(instanceUUID, req)\n\n\tif errors.IsNotFound(err) || errors.IsInvalid(err) {\n\t\twriteResponse(w, http.StatusBadRequest, broker.ErrorResponse{Description: \"instance not found: \" + err.Error()})\n\t} else if errors.IsAlreadyExists(err) {\n\t\twriteResponse(w, http.StatusConflict, broker.ProvisionResponse{})\n\t} else {\n\t\twriteDefaultResponse(w, http.StatusCreated, resp, err)\n\t}\n}\n\nfunc (h handler) update(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\n\tinstanceUUID := uuid.Parse(mux.Vars(r)[\"instance_uuid\"])\n\tif instanceUUID == nil {\n\t\twriteResponse(w, http.StatusBadRequest, broker.ErrorResponse{Description: \"invalid instance_uuid\"})\n\t\treturn\n\t}\n\n\tvar req *broker.UpdateRequest\n\tif err := readRequest(r, &req); err != nil {\n\t\twriteResponse(w, http.StatusBadRequest, broker.ErrorResponse{Description: err.Error()})\n\t\treturn\n\t}\n\n\tresp, err := h.broker.Update(instanceUUID, req)\n\n\twriteDefaultResponse(w, http.StatusOK, resp, err)\n}\n\nfunc (h handler) deprovision(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\n\tinstanceUUID := uuid.Parse(mux.Vars(r)[\"instance_uuid\"])\n\tif instanceUUID == nil {\n\t\twriteResponse(w, http.StatusBadRequest, broker.ErrorResponse{Description: \"invalid instance_uuid\"})\n\t\treturn\n\t}\n\n\tresp, err := h.broker.Deprovision(instanceUUID)\n\n\tif errors.IsNotFound(err) {\n\t\twriteResponse(w, http.StatusGone, broker.DeprovisionResponse{})\n\t} else {\n\t\twriteDefaultResponse(w, http.StatusOK, resp, err)\n\t}\n}\n\nfunc (h handler) bind(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\n\tinstanceUUID := uuid.Parse(mux.Vars(r)[\"instance_uuid\"])\n\tif instanceUUID == nil {\n\t\twriteResponse(w, http.StatusBadRequest, broker.ErrorResponse{Description: \"invalid instance_uuid\"})\n\t\treturn\n\t}\n\n\tbindingUUID := uuid.Parse(mux.Vars(r)[\"binding_uuid\"])\n\tif bindingUUID == nil {\n\t\twriteResponse(w, http.StatusBadRequest, broker.ErrorResponse{Description: \"invalid binding_uuid\"})\n\t\treturn\n\t}\n\n\tvar req *broker.BindRequest\n\tif err := readRequest(r, &req); err != nil {\n\t\twriteResponse(w, http.StatusInternalServerError, broker.ErrorResponse{Description: err.Error()})\n\t\treturn\n\t}\n\n\tresp, err := h.broker.Bind(instanceUUID, bindingUUID, req)\n\n\twriteDefaultResponse(w, http.StatusCreated, resp, err)\n}\n\nfunc (h handler) unbind(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\n\tinstanceUUID := uuid.Parse(mux.Vars(r)[\"instance_uuid\"])\n\tif instanceUUID == nil {\n\t\twriteResponse(w, http.StatusBadRequest, broker.ErrorResponse{Description: \"invalid instance_uuid\"})\n\t\treturn\n\t}\n\n\tbindingUUID := uuid.Parse(mux.Vars(r)[\"binding_uuid\"])\n\tif bindingUUID == nil {\n\t\twriteResponse(w, http.StatusBadRequest, broker.ErrorResponse{Description: \"invalid binding_uuid\"})\n\t\treturn\n\t}\n\n\terr := h.broker.Unbind(instanceUUID, bindingUUID)\n\n\tif errors.IsNotFound(err) {\n\t\twriteResponse(w, http.StatusGone, struct{}{})\n\t} else {\n\t\twriteDefaultResponse(w, http.StatusOK, struct{}{}, err)\n\t}\n\treturn\n}\n<commit_msg>Remove header restriction from router (#35)<commit_after>package handler\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/fusor\/ansible-service-broker\/pkg\/broker\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/pborman\/uuid\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n)\n\n\/\/ TODO: implement asynchronous operations\n\/\/ TODO: authentication \/ authorization\n\ntype handler struct {\n\trouter mux.Router\n\tbroker broker.Broker\n}\n\nfunc NewHandler(b broker.Broker) http.Handler {\n\th := handler{\n\t\trouter: *mux.NewRouter(),\n\t\tbroker: b,\n\t}\n\n\t\/\/ TODO: Reintroduce router restriction based on API version when settled upstream\n\t\/\/root := h.router.Headers(\"X-Broker-API-Version\", \"2.9\").Subrouter()\n\n\th.router.HandleFunc(\"\/v2\/bootstrap\", h.bootstrap).Methods(\"POST\")\n\th.router.HandleFunc(\"\/v2\/catalog\", h.catalog).Methods(\"GET\")\n\th.router.HandleFunc(\"\/v2\/service_instances\/{instance_uuid}\", h.provision).Methods(\"PUT\")\n\th.router.HandleFunc(\"\/v2\/service_instances\/{instance_uuid}\", h.update).Methods(\"PATCH\")\n\th.router.HandleFunc(\"\/v2\/service_instances\/{instance_uuid}\", h.deprovision).Methods(\"DELETE\")\n\th.router.HandleFunc(\"\/v2\/service_instances\/{instance_uuid}\/service_bindings\/{binding_uuid}\", h.bind).Methods(\"PUT\")\n\th.router.HandleFunc(\"\/v2\/service_instances\/{instance_uuid}\/service_bindings\/{binding_uuid}\", h.unbind).Methods(\"DELETE\")\n\n\treturn h\n}\n\nfunc (h handler) bootstrap(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\tresp, err := h.broker.Bootstrap()\n\twriteDefaultResponse(w, http.StatusOK, resp, err)\n}\n\nfunc (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\th.router.ServeHTTP(w, r)\n}\n\nfunc (h handler) catalog(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\n\tresp, err := h.broker.Catalog()\n\n\twriteDefaultResponse(w, http.StatusOK, resp, err)\n}\n\nfunc (h handler) provision(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\n\tinstanceUUID := uuid.Parse(mux.Vars(r)[\"instance_uuid\"])\n\tif instanceUUID == nil {\n\t\twriteResponse(w, http.StatusBadRequest, broker.ErrorResponse{Description: \"invalid instance_uuid\"})\n\t\treturn\n\t}\n\n\tvar req *broker.ProvisionRequest\n\terr := readRequest(r, &req)\n\n\tif err != nil {\n\t\twriteResponse(w, http.StatusBadRequest, broker.ErrorResponse{Description: \"could not read request: \" + err.Error()})\n\t\treturn\n\t}\n\n\tresp, err := h.broker.Provision(instanceUUID, req)\n\n\tif errors.IsNotFound(err) || errors.IsInvalid(err) {\n\t\twriteResponse(w, http.StatusBadRequest, broker.ErrorResponse{Description: \"instance not found: \" + err.Error()})\n\t} else if errors.IsAlreadyExists(err) {\n\t\twriteResponse(w, http.StatusConflict, broker.ProvisionResponse{})\n\t} else {\n\t\twriteDefaultResponse(w, http.StatusCreated, resp, err)\n\t}\n}\n\nfunc (h handler) update(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\n\tinstanceUUID := uuid.Parse(mux.Vars(r)[\"instance_uuid\"])\n\tif instanceUUID == nil {\n\t\twriteResponse(w, http.StatusBadRequest, broker.ErrorResponse{Description: \"invalid instance_uuid\"})\n\t\treturn\n\t}\n\n\tvar req *broker.UpdateRequest\n\tif err := readRequest(r, &req); err != nil {\n\t\twriteResponse(w, http.StatusBadRequest, broker.ErrorResponse{Description: err.Error()})\n\t\treturn\n\t}\n\n\tresp, err := h.broker.Update(instanceUUID, req)\n\n\twriteDefaultResponse(w, http.StatusOK, resp, err)\n}\n\nfunc (h handler) deprovision(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\n\tinstanceUUID := uuid.Parse(mux.Vars(r)[\"instance_uuid\"])\n\tif instanceUUID == nil {\n\t\twriteResponse(w, http.StatusBadRequest, broker.ErrorResponse{Description: \"invalid instance_uuid\"})\n\t\treturn\n\t}\n\n\tresp, err := h.broker.Deprovision(instanceUUID)\n\n\tif errors.IsNotFound(err) {\n\t\twriteResponse(w, http.StatusGone, broker.DeprovisionResponse{})\n\t} else {\n\t\twriteDefaultResponse(w, http.StatusOK, resp, err)\n\t}\n}\n\nfunc (h handler) bind(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\n\tinstanceUUID := uuid.Parse(mux.Vars(r)[\"instance_uuid\"])\n\tif instanceUUID == nil {\n\t\twriteResponse(w, http.StatusBadRequest, broker.ErrorResponse{Description: \"invalid instance_uuid\"})\n\t\treturn\n\t}\n\n\tbindingUUID := uuid.Parse(mux.Vars(r)[\"binding_uuid\"])\n\tif bindingUUID == nil {\n\t\twriteResponse(w, http.StatusBadRequest, broker.ErrorResponse{Description: \"invalid binding_uuid\"})\n\t\treturn\n\t}\n\n\tvar req *broker.BindRequest\n\tif err := readRequest(r, &req); err != nil {\n\t\twriteResponse(w, http.StatusInternalServerError, broker.ErrorResponse{Description: err.Error()})\n\t\treturn\n\t}\n\n\tresp, err := h.broker.Bind(instanceUUID, bindingUUID, req)\n\n\twriteDefaultResponse(w, http.StatusCreated, resp, err)\n}\n\nfunc (h handler) unbind(w http.ResponseWriter, r *http.Request) {\n\tdefer r.Body.Close()\n\n\tinstanceUUID := uuid.Parse(mux.Vars(r)[\"instance_uuid\"])\n\tif instanceUUID == nil {\n\t\twriteResponse(w, http.StatusBadRequest, broker.ErrorResponse{Description: \"invalid instance_uuid\"})\n\t\treturn\n\t}\n\n\tbindingUUID := uuid.Parse(mux.Vars(r)[\"binding_uuid\"])\n\tif bindingUUID == nil {\n\t\twriteResponse(w, http.StatusBadRequest, broker.ErrorResponse{Description: \"invalid binding_uuid\"})\n\t\treturn\n\t}\n\n\terr := h.broker.Unbind(instanceUUID, bindingUUID)\n\n\tif errors.IsNotFound(err) {\n\t\twriteResponse(w, http.StatusGone, struct{}{})\n\t} else {\n\t\twriteDefaultResponse(w, http.StatusOK, struct{}{}, err)\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package rancher\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\n\tresponsewriter \"github.com\/rancher\/apiserver\/pkg\/middleware\"\n\t\"github.com\/rancher\/rancher\/pkg\/api\/norman\/customization\/kontainerdriver\"\n\t\"github.com\/rancher\/rancher\/pkg\/api\/norman\/customization\/podsecuritypolicytemplate\"\n\tsteveapi \"github.com\/rancher\/rancher\/pkg\/api\/steve\"\n\t\"github.com\/rancher\/rancher\/pkg\/api\/steve\/proxy\"\n\t\"github.com\/rancher\/rancher\/pkg\/auth\"\n\t\"github.com\/rancher\/rancher\/pkg\/auth\/audit\"\n\t\"github.com\/rancher\/rancher\/pkg\/auth\/requests\"\n\t\"github.com\/rancher\/rancher\/pkg\/controllers\/dashboard\"\n\t\"github.com\/rancher\/rancher\/pkg\/controllers\/dashboardapi\"\n\tmanagementauth \"github.com\/rancher\/rancher\/pkg\/controllers\/management\/auth\"\n\tcrds \"github.com\/rancher\/rancher\/pkg\/crds\/dashboard\"\n\tdashboarddata \"github.com\/rancher\/rancher\/pkg\/data\/dashboard\"\n\t\"github.com\/rancher\/rancher\/pkg\/features\"\n\t\"github.com\/rancher\/rancher\/pkg\/multiclustermanager\"\n\t\"github.com\/rancher\/rancher\/pkg\/tls\"\n\t\"github.com\/rancher\/rancher\/pkg\/ui\"\n\t\"github.com\/rancher\/rancher\/pkg\/websocket\"\n\t\"github.com\/rancher\/rancher\/pkg\/wrangler\"\n\tsteveauth \"github.com\/rancher\/steve\/pkg\/auth\"\n\tsteveserver \"github.com\/rancher\/steve\/pkg\/server\"\n\t\"github.com\/rancher\/wrangler\/pkg\/k8scheck\"\n\t\"github.com\/urfave\/cli\"\n\t\"k8s.io\/client-go\/rest\"\n\t\"k8s.io\/client-go\/tools\/clientcmd\"\n)\n\ntype Options struct {\n\tACMEDomains cli.StringSlice\n\tAddLocal string\n\tEmbedded bool\n\tBindHost string\n\tHTTPListenPort int\n\tHTTPSListenPort int\n\tK8sMode string\n\tDebug bool\n\tTrace bool\n\tNoCACerts bool\n\tAuditLogPath string\n\tAuditLogMaxage int\n\tAuditLogMaxsize int\n\tAuditLogMaxbackup int\n\tAuditLevel int\n\tAgent bool\n\tFeatures string\n}\n\ntype Rancher struct {\n\tAuth steveauth.Middleware\n\tHandler http.Handler\n\tWrangler *wrangler.Context\n\tSteve *steveserver.Server\n\n\tauditLog *audit.LogWriter\n\tauthServer *auth.Server\n\topts *Options\n}\n\nfunc New(ctx context.Context, clientConfg clientcmd.ClientConfig, opts *Options) (*Rancher, error) {\n\tvar (\n\t\tauthServer *auth.Server\n\t)\n\n\tif opts == nil {\n\t\topts = &Options{}\n\t}\n\n\trestConfig, err := clientConfg.ClientConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trestConfig, err = setupAndValidationRESTConfig(ctx, restConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlockID := \"cattle-controllers\"\n\tif opts.Agent {\n\t\tlockID = \"cattle-agent-controllers\"\n\t}\n\n\twranglerContext, err := wrangler.NewContext(ctx, lockID, clientConfg, restConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\twranglerContext.MultiClusterManager = newMCM(wranglerContext, opts)\n\twranglerContext.Agent = opts.Agent\n\n\tpodsecuritypolicytemplate.RegisterIndexers(wranglerContext)\n\tkontainerdriver.RegisterIndexers(wranglerContext)\n\tmanagementauth.RegisterWranglerIndexers(wranglerContext)\n\n\tif err := crds.Create(ctx, restConfig); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Initialize Features as early as possible\n\tfeatures.InitializeFeatures(wranglerContext.Mgmt.Feature(), opts.Features)\n\n\tif opts.Agent {\n\t\tauthServer, err = auth.NewHeaderAuth()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfeatures.MCM.Disable()\n\t\tfeatures.Fleet.Disable()\n\t} else if features.Auth.Enabled() {\n\t\tauthServer, err = auth.NewServer(ctx, restConfig)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tauthServer, err = auth.NewAlwaysAdmin()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tsteve, err := steveserver.New(ctx, restConfig, &steveserver.Options{\n\t\tControllers: wranglerContext.Controllers,\n\t\tAccessSetLookup: wranglerContext.ASL,\n\t\tAuthMiddleware: steveauth.ExistingContext,\n\t\tNext: ui.New(wranglerContext.Mgmt.Preference().Cache()),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclusterProxy, err := proxy.NewProxyMiddleware(wranglerContext.K8s.AuthorizationV1().SubjectAccessReviews(),\n\t\twranglerContext.MultiClusterManager,\n\t\twranglerContext.Mgmt.Cluster().Cache(),\n\t\tlocalClusterEnabled(opts),\n\t\tsteve,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tadditionalAPI, err := steveapi.AdditionalAPIs(ctx, wranglerContext, steve)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tauditLogWriter := audit.NewLogWriter(opts.AuditLogPath, opts.AuditLevel, opts.AuditLogMaxage, opts.AuditLogMaxbackup, opts.AuditLogMaxsize)\n\tauditFilter := audit.NewAuditLogMiddleware(auditLogWriter)\n\n\treturn &Rancher{\n\t\tAuth: authServer.Authenticator.Chain(\n\t\t\tauditFilter),\n\t\tHandler: responsewriter.Chain{\n\t\t\tresponsewriter.ContentTypeOptions,\n\t\t\twebsocket.NewWebsocketHandler,\n\t\t\tproxy.RewriteLocalCluster,\n\t\t\tclusterProxy,\n\t\t\twranglerContext.MultiClusterManager.Middleware,\n\t\t\tauthServer.Management,\n\t\t\tadditionalAPI,\n\t\t\trequests.NewRequireAuthenticatedFilter(\"\/v1\/\"),\n\t\t}.Handler(steve),\n\t\tWrangler: wranglerContext,\n\t\tSteve: steve,\n\t\tauditLog: auditLogWriter,\n\t\tauthServer: authServer,\n\t\topts: opts,\n\t}, nil\n}\n\nfunc (r *Rancher) Start(ctx context.Context) error {\n\tif err := dashboarddata.EarlyData(ctx, r.Wrangler.K8s); err != nil {\n\t\treturn err\n\t}\n\n\tif err := dashboardapi.Register(ctx, r.Wrangler); err != nil {\n\t\treturn err\n\t}\n\n\tif err := steveapi.Setup(ctx, r.Steve, r.Wrangler); err != nil {\n\t\treturn err\n\t}\n\n\tr.Wrangler.OnLeader(func(ctx context.Context) error {\n\t\treturn r.Wrangler.StartWithTransaction(ctx, func(ctx context.Context) error {\n\t\t\tif err := dashboarddata.Add(ctx, r.Wrangler, localClusterEnabled(r.opts), r.opts.AddLocal == \"false\", r.opts.Embedded); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn dashboard.Register(ctx, r.Wrangler)\n\t\t})\n\t})\n\n\tif err := r.authServer.Start(ctx, false); err != nil {\n\t\treturn err\n\t}\n\n\tr.Wrangler.OnLeader(r.authServer.OnLeader)\n\tr.auditLog.Start(ctx)\n\n\treturn r.Wrangler.Start(ctx)\n}\n\nfunc (r *Rancher) ListenAndServe(ctx context.Context) error {\n\tif err := r.Start(ctx); err != nil {\n\t\treturn err\n\t}\n\n\tr.Wrangler.MultiClusterManager.Wait(ctx)\n\n\tif err := tls.ListenAndServe(ctx, r.Wrangler.RESTConfig,\n\t\tr.Auth(r.Handler),\n\t\tr.opts.BindHost,\n\t\tr.opts.HTTPSListenPort,\n\t\tr.opts.HTTPListenPort,\n\t\tr.opts.ACMEDomains,\n\t\tr.opts.NoCACerts); err != nil {\n\t\treturn err\n\t}\n\n\t<-ctx.Done()\n\treturn ctx.Err()\n}\n\nfunc newMCM(wrangler *wrangler.Context, opts *Options) wrangler.MultiClusterManager {\n\treturn multiclustermanager.NewDeferredServer(wrangler, &multiclustermanager.Options{\n\t\tRemoveLocalCluster: opts.AddLocal == \"false\",\n\t\tLocalClusterEnabled: localClusterEnabled(opts),\n\t\tEmbedded: opts.Embedded,\n\t\tHTTPSListenPort: opts.HTTPSListenPort,\n\t\tDebug: opts.Debug,\n\t\tTrace: opts.Trace,\n\t})\n}\n\nfunc setupAndValidationRESTConfig(ctx context.Context, restConfig *rest.Config) (*rest.Config, error) {\n\trestConfig = steveserver.RestConfigDefaults(restConfig)\n\treturn restConfig, k8scheck.Wait(ctx, *restConfig)\n}\n\nfunc localClusterEnabled(opts *Options) bool {\n\tif opts.AddLocal == \"true\" || opts.AddLocal == \"auto\" {\n\t\treturn true\n\t}\n\treturn false\n}\n<commit_msg>Add steve aggregation client to Rancher<commit_after>package rancher\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\n\tresponsewriter \"github.com\/rancher\/apiserver\/pkg\/middleware\"\n\t\"github.com\/rancher\/rancher\/pkg\/api\/norman\/customization\/kontainerdriver\"\n\t\"github.com\/rancher\/rancher\/pkg\/api\/norman\/customization\/podsecuritypolicytemplate\"\n\tsteveapi \"github.com\/rancher\/rancher\/pkg\/api\/steve\"\n\t\"github.com\/rancher\/rancher\/pkg\/api\/steve\/proxy\"\n\t\"github.com\/rancher\/rancher\/pkg\/auth\"\n\t\"github.com\/rancher\/rancher\/pkg\/auth\/audit\"\n\t\"github.com\/rancher\/rancher\/pkg\/auth\/requests\"\n\t\"github.com\/rancher\/rancher\/pkg\/controllers\/dashboard\"\n\t\"github.com\/rancher\/rancher\/pkg\/controllers\/dashboardapi\"\n\tmanagementauth \"github.com\/rancher\/rancher\/pkg\/controllers\/management\/auth\"\n\tcrds \"github.com\/rancher\/rancher\/pkg\/crds\/dashboard\"\n\tdashboarddata \"github.com\/rancher\/rancher\/pkg\/data\/dashboard\"\n\t\"github.com\/rancher\/rancher\/pkg\/features\"\n\t\"github.com\/rancher\/rancher\/pkg\/multiclustermanager\"\n\t\"github.com\/rancher\/rancher\/pkg\/namespace\"\n\t\"github.com\/rancher\/rancher\/pkg\/tls\"\n\t\"github.com\/rancher\/rancher\/pkg\/ui\"\n\t\"github.com\/rancher\/rancher\/pkg\/websocket\"\n\t\"github.com\/rancher\/rancher\/pkg\/wrangler\"\n\tsteveauth \"github.com\/rancher\/steve\/pkg\/auth\"\n\tsteveserver \"github.com\/rancher\/steve\/pkg\/server\"\n\t\"github.com\/rancher\/wrangler\/pkg\/k8scheck\"\n\t\"github.com\/urfave\/cli\"\n\t\"k8s.io\/client-go\/rest\"\n\t\"k8s.io\/client-go\/tools\/clientcmd\"\n)\n\ntype Options struct {\n\tACMEDomains cli.StringSlice\n\tAddLocal string\n\tEmbedded bool\n\tBindHost string\n\tHTTPListenPort int\n\tHTTPSListenPort int\n\tK8sMode string\n\tDebug bool\n\tTrace bool\n\tNoCACerts bool\n\tAuditLogPath string\n\tAuditLogMaxage int\n\tAuditLogMaxsize int\n\tAuditLogMaxbackup int\n\tAuditLevel int\n\tAgent bool\n\tFeatures string\n}\n\ntype Rancher struct {\n\tAuth steveauth.Middleware\n\tHandler http.Handler\n\tWrangler *wrangler.Context\n\tSteve *steveserver.Server\n\n\tauditLog *audit.LogWriter\n\tauthServer *auth.Server\n\topts *Options\n}\n\nfunc New(ctx context.Context, clientConfg clientcmd.ClientConfig, opts *Options) (*Rancher, error) {\n\tvar (\n\t\tauthServer *auth.Server\n\t)\n\n\tif opts == nil {\n\t\topts = &Options{}\n\t}\n\n\trestConfig, err := clientConfg.ClientConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trestConfig, err = setupAndValidationRESTConfig(ctx, restConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlockID := \"cattle-controllers\"\n\tif opts.Agent {\n\t\tlockID = \"cattle-agent-controllers\"\n\t}\n\n\twranglerContext, err := wrangler.NewContext(ctx, lockID, clientConfg, restConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\twranglerContext.MultiClusterManager = newMCM(wranglerContext, opts)\n\twranglerContext.Agent = opts.Agent\n\n\tpodsecuritypolicytemplate.RegisterIndexers(wranglerContext)\n\tkontainerdriver.RegisterIndexers(wranglerContext)\n\tmanagementauth.RegisterWranglerIndexers(wranglerContext)\n\n\tif err := crds.Create(ctx, restConfig); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Initialize Features as early as possible\n\tfeatures.InitializeFeatures(wranglerContext.Mgmt.Feature(), opts.Features)\n\n\tif opts.Agent {\n\t\tauthServer, err = auth.NewHeaderAuth()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfeatures.MCM.Disable()\n\t\tfeatures.Fleet.Disable()\n\t} else if features.Auth.Enabled() {\n\t\tauthServer, err = auth.NewServer(ctx, restConfig)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tauthServer, err = auth.NewAlwaysAdmin()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tsteve, err := steveserver.New(ctx, restConfig, &steveserver.Options{\n\t\tControllers: wranglerContext.Controllers,\n\t\tAccessSetLookup: wranglerContext.ASL,\n\t\tAuthMiddleware: steveauth.ExistingContext,\n\t\tAggregationSecretNamespace: namespace.System,\n\t\tAggregationSecretName: \"steve-aggregation\",\n\t\tNext: ui.New(wranglerContext.Mgmt.Preference().Cache()),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclusterProxy, err := proxy.NewProxyMiddleware(wranglerContext.K8s.AuthorizationV1().SubjectAccessReviews(),\n\t\twranglerContext.MultiClusterManager,\n\t\twranglerContext.Mgmt.Cluster().Cache(),\n\t\tlocalClusterEnabled(opts),\n\t\tsteve,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tadditionalAPI, err := steveapi.AdditionalAPIs(ctx, wranglerContext, steve)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tauditLogWriter := audit.NewLogWriter(opts.AuditLogPath, opts.AuditLevel, opts.AuditLogMaxage, opts.AuditLogMaxbackup, opts.AuditLogMaxsize)\n\tauditFilter := audit.NewAuditLogMiddleware(auditLogWriter)\n\n\treturn &Rancher{\n\t\tAuth: authServer.Authenticator.Chain(\n\t\t\tauditFilter),\n\t\tHandler: responsewriter.Chain{\n\t\t\tresponsewriter.ContentTypeOptions,\n\t\t\twebsocket.NewWebsocketHandler,\n\t\t\tproxy.RewriteLocalCluster,\n\t\t\tclusterProxy,\n\t\t\twranglerContext.MultiClusterManager.Middleware,\n\t\t\tauthServer.Management,\n\t\t\tadditionalAPI,\n\t\t\trequests.NewRequireAuthenticatedFilter(\"\/v1\/\"),\n\t\t}.Handler(steve),\n\t\tWrangler: wranglerContext,\n\t\tSteve: steve,\n\t\tauditLog: auditLogWriter,\n\t\tauthServer: authServer,\n\t\topts: opts,\n\t}, nil\n}\n\nfunc (r *Rancher) Start(ctx context.Context) error {\n\tif err := dashboarddata.EarlyData(ctx, r.Wrangler.K8s); err != nil {\n\t\treturn err\n\t}\n\n\tif err := dashboardapi.Register(ctx, r.Wrangler); err != nil {\n\t\treturn err\n\t}\n\n\tif err := steveapi.Setup(ctx, r.Steve, r.Wrangler); err != nil {\n\t\treturn err\n\t}\n\n\tr.Wrangler.OnLeader(func(ctx context.Context) error {\n\t\treturn r.Wrangler.StartWithTransaction(ctx, func(ctx context.Context) error {\n\t\t\tif err := dashboarddata.Add(ctx, r.Wrangler, localClusterEnabled(r.opts), r.opts.AddLocal == \"false\", r.opts.Embedded); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn dashboard.Register(ctx, r.Wrangler)\n\t\t})\n\t})\n\n\tif err := r.authServer.Start(ctx, false); err != nil {\n\t\treturn err\n\t}\n\n\tr.Wrangler.OnLeader(r.authServer.OnLeader)\n\tr.auditLog.Start(ctx)\n\n\treturn r.Wrangler.Start(ctx)\n}\n\nfunc (r *Rancher) ListenAndServe(ctx context.Context) error {\n\tif err := r.Start(ctx); err != nil {\n\t\treturn err\n\t}\n\n\tr.Wrangler.MultiClusterManager.Wait(ctx)\n\n\tif err := tls.ListenAndServe(ctx, r.Wrangler.RESTConfig,\n\t\tr.Auth(r.Handler),\n\t\tr.opts.BindHost,\n\t\tr.opts.HTTPSListenPort,\n\t\tr.opts.HTTPListenPort,\n\t\tr.opts.ACMEDomains,\n\t\tr.opts.NoCACerts); err != nil {\n\t\treturn err\n\t}\n\n\t<-ctx.Done()\n\treturn ctx.Err()\n}\n\nfunc newMCM(wrangler *wrangler.Context, opts *Options) wrangler.MultiClusterManager {\n\treturn multiclustermanager.NewDeferredServer(wrangler, &multiclustermanager.Options{\n\t\tRemoveLocalCluster: opts.AddLocal == \"false\",\n\t\tLocalClusterEnabled: localClusterEnabled(opts),\n\t\tEmbedded: opts.Embedded,\n\t\tHTTPSListenPort: opts.HTTPSListenPort,\n\t\tDebug: opts.Debug,\n\t\tTrace: opts.Trace,\n\t})\n}\n\nfunc setupAndValidationRESTConfig(ctx context.Context, restConfig *rest.Config) (*rest.Config, error) {\n\trestConfig = steveserver.RestConfigDefaults(restConfig)\n\treturn restConfig, k8scheck.Wait(ctx, *restConfig)\n}\n\nfunc localClusterEnabled(opts *Options) bool {\n\tif opts.AddLocal == \"true\" || opts.AddLocal == \"auto\" {\n\t\treturn true\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 The LUCI Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage ledcmd\n\nimport (\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/bazelbuild\/remote-apis-sdks\/go\/pkg\/client\"\n\t\"github.com\/bazelbuild\/remote-apis-sdks\/go\/pkg\/command\"\n\t\"github.com\/bazelbuild\/remote-apis-sdks\/go\/pkg\/digest\"\n\t\"github.com\/bazelbuild\/remote-apis-sdks\/go\/pkg\/filemetadata\"\n\t\"github.com\/mattn\/go-tty\"\n\n\t\"go.chromium.org\/luci\/auth\"\n\t\"go.chromium.org\/luci\/client\/cas\"\n\t\"go.chromium.org\/luci\/client\/downloader\"\n\t\"go.chromium.org\/luci\/common\/errors\"\n\t\"go.chromium.org\/luci\/common\/isolated\"\n\t\"go.chromium.org\/luci\/common\/isolatedclient\"\n\t\"go.chromium.org\/luci\/common\/logging\"\n\t\"go.chromium.org\/luci\/common\/retry\"\n\tapipb \"go.chromium.org\/luci\/swarming\/proto\/api\"\n\n\t\"go.chromium.org\/luci\/led\/job\"\n)\n\n\/\/ IsolatedTransformer is a function which receives a directory on the local\n\/\/ disk with the contents of an isolate and is expected to manipulate the\n\/\/ contents of that directory however it chooses.\n\/\/\n\/\/ EditIsolated takes these functions as a callback in order to manipulate the\n\/\/ isolated content of a job.Definition.\ntype IsolatedTransformer func(ctx context.Context, directory string) error\n\n\/\/ ProgramIsolatedTransformer returns an IsolatedTransformer which alters the\n\/\/ contents of the isolated by running a program specified with `args` in the\n\/\/ directory where the isolated content has been unpacked.\nfunc ProgramIsolatedTransformer(args ...string) IsolatedTransformer {\n\treturn func(ctx context.Context, dir string) error {\n\t\tlogging.Infof(ctx, \"Invoking transform_program: %q\", args)\n\t\ttProg := exec.CommandContext(ctx, args[0], args[1:]...)\n\t\ttProg.Stdout = os.Stderr\n\t\ttProg.Stderr = os.Stderr\n\t\ttProg.Dir = dir\n\t\treturn errors.Annotate(tProg.Run(), \"running transform_program\").Err()\n\t}\n}\n\n\/\/ PromptIsolatedTransformer returns an IsolatedTransformer which prompts the\n\/\/ user to navigate to the directory with the isolated content and manipulate\n\/\/ it manually. When the user is done they should press \"enter\" to indicate that\n\/\/ they're finished.\nfunc PromptIsolatedTransformer() IsolatedTransformer {\n\treturn func(ctx context.Context, dir string) error {\n\t\tlogging.Infof(ctx, \"\")\n\t\tlogging.Infof(ctx, \"Edit files as you wish in:\")\n\t\tlogging.Infof(ctx, \"\\t%s\", dir)\n\n\t\tterm, err := tty.Open()\n\t\tif err != nil {\n\t\t\treturn errors.Annotate(err, \"opening terminal\").Err()\n\t\t}\n\t\tdefer term.Close()\n\n\t\tlogging.Infof(ctx, \"When finished, press <enter> here to isolate it.\")\n\t\t_, err = term.ReadString()\n\t\treturn errors.Annotate(err, \"reading <enter>\").Err()\n\t}\n}\n\n\/\/ EditIsolated allows you to edit the isolated (input_ref or cas_input_root)\n\/\/ contents of the job.Definition.\n\/\/\n\/\/ This implicitly collapses all isolated sources in the job.Definition into\n\/\/ a single isolated source.\n\/\/ The output job.Definition always has cas_user_payload and no user_payload.\nfunc EditIsolated(ctx context.Context, authClient *http.Client, authOpts auth.Options, jd *job.Definition, xform IsolatedTransformer) error {\n\tlogging.Infof(ctx, \"editing isolated\")\n\n\ttdir, err := ioutil.TempDir(\"\", \"led-edit-isolated\")\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"failed to create tempdir\").Err()\n\t}\n\tdefer func() {\n\t\tif err = os.RemoveAll(tdir); err != nil {\n\t\t\tlogging.Errorf(ctx, \"failed to cleanup temp dir %q: %s\", tdir, err)\n\t\t}\n\t}()\n\n\tif err := ConsolidateIsolateSources(ctx, authClient, jd); err != nil {\n\t\treturn err\n\t}\n\tif err := ConsolidateRbeCasSources(ctx, authOpts, jd); err != nil {\n\t\treturn err\n\t}\n\n\tcurrent, err := jd.Info().CurrentIsolated()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif current.CASTree != nil && current.CASReference != nil {\n\t\treturn errors.Reason(\"job uses isolate and RBE-CAS at the same time - iso: %v\\ncas: %v\", current.CASTree, current.CASReference).Err()\n\t}\n\n\terr = jd.Edit(func(je job.Editor) {\n\t\tje.ClearCurrentIsolated()\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcasInstance := current.GetCasInstance()\n\tif casInstance == \"\" {\n\t\tif casInstance, err = jd.CasInstance(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tcasClient, err := cas.NewClient(ctx, casInstance, authOpts, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer casClient.Close()\n\n\tif err = downloadFromIso(ctx, current.CASTree, authClient, tdir); err != nil {\n\t\treturn err\n\t}\n\tif err = downloadFromCas(ctx, current.CASReference, casClient, tdir); err != nil {\n\t\treturn err\n\t}\n\n\tif err := xform(ctx, tdir); err != nil {\n\t\treturn err\n\t}\n\n\tlogging.Infof(ctx, \"uploading new isolated to RBE-CAS\")\n\tdigest, err := uploadToCas(ctx, casClient, tdir)\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"errors in uploadToCas\").Err()\n\t}\n\tlogging.Infof(ctx, \"isolated upload: done\")\n\tjd.CasUserPayload = &apipb.CASReference{\n\t\tCasInstance: casInstance,\n\t\tDigest: &apipb.Digest{\n\t\t\tHash: digest.Hash,\n\t\t\tSizeBytes: digest.Size,\n\t\t},\n\t}\n\tjd.UserPayload = nil\n\treturn nil\n}\n\nfunc downloadFromCas(ctx context.Context, casRef *apipb.CASReference, casClient *client.Client, tdir string) error {\n\tif casRef.GetDigest().GetHash() == \"\" {\n\t\treturn nil\n\t}\n\td := digest.Digest{\n\t\tHash: casRef.Digest.Hash,\n\t\tSize: casRef.Digest.SizeBytes,\n\t}\n\tlogging.Infof(ctx, \"downloading from RBE-CAS...\")\n\t_, err := casClient.DownloadDirectory(ctx, d, tdir, filemetadata.NewNoopCache())\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"failed to download directory\").Err()\n\t}\n\treturn nil\n}\n\nfunc downloadFromIso(ctx context.Context, iso *apipb.CASTree, authClient *http.Client, tdir string) error {\n\tif iso.GetDigest() == \"\" {\n\t\treturn nil\n\t}\n\trawIsoClient := isolatedclient.NewClient(\n\t\tiso.Server,\n\t\tisolatedclient.WithAuthClient(authClient),\n\t\tisolatedclient.WithNamespace(iso.Namespace),\n\t\tisolatedclient.WithRetryFactory(retry.Default))\n\tvar statMu sync.Mutex\n\tvar previousStats *downloader.FileStats\n\n\tlogging.Infof(ctx, \"downloading from isolate...\")\n\tdl := downloader.New(ctx, rawIsoClient, isolated.HexDigest(iso.GetDigest()), tdir, &downloader.Options{\n\t\tFileStatsCallback: func(s downloader.FileStats, span time.Duration) {\n\t\t\tlogging.Infof(ctx, \"%s\", s.StatLine(previousStats, span))\n\t\t\tstatMu.Lock()\n\t\t\tpreviousStats = &s\n\t\t\tstatMu.Unlock()\n\t\t},\n\t})\n\tif err := dl.Wait(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc uploadToCas(ctx context.Context, client *client.Client, dir string) (*digest.Digest, error) {\n\tis := command.InputSpec{\n\t\tInputs: []string{\".\"}, \/\/ entire dir\n\t}\n\trootDg, entries, _, err := client.ComputeMerkleTree(dir, &is, filemetadata.NewNoopCache())\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"failed to compute Merkle Tree\").Err()\n\t}\n\n\t_, _, err = client.UploadIfMissing(ctx, entries...)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"failed to upload items\").Err()\n\t}\n\treturn &rootDg, nil\n}\n<commit_msg>led: build fix for deps roll<commit_after>\/\/ Copyright 2020 The LUCI Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage ledcmd\n\nimport (\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/bazelbuild\/remote-apis-sdks\/go\/pkg\/client\"\n\t\"github.com\/bazelbuild\/remote-apis-sdks\/go\/pkg\/command\"\n\t\"github.com\/bazelbuild\/remote-apis-sdks\/go\/pkg\/digest\"\n\t\"github.com\/bazelbuild\/remote-apis-sdks\/go\/pkg\/filemetadata\"\n\t\"github.com\/mattn\/go-tty\"\n\n\t\"go.chromium.org\/luci\/auth\"\n\t\"go.chromium.org\/luci\/client\/cas\"\n\t\"go.chromium.org\/luci\/client\/downloader\"\n\t\"go.chromium.org\/luci\/common\/errors\"\n\t\"go.chromium.org\/luci\/common\/isolated\"\n\t\"go.chromium.org\/luci\/common\/isolatedclient\"\n\t\"go.chromium.org\/luci\/common\/logging\"\n\t\"go.chromium.org\/luci\/common\/retry\"\n\tapipb \"go.chromium.org\/luci\/swarming\/proto\/api\"\n\n\t\"go.chromium.org\/luci\/led\/job\"\n)\n\n\/\/ IsolatedTransformer is a function which receives a directory on the local\n\/\/ disk with the contents of an isolate and is expected to manipulate the\n\/\/ contents of that directory however it chooses.\n\/\/\n\/\/ EditIsolated takes these functions as a callback in order to manipulate the\n\/\/ isolated content of a job.Definition.\ntype IsolatedTransformer func(ctx context.Context, directory string) error\n\n\/\/ ProgramIsolatedTransformer returns an IsolatedTransformer which alters the\n\/\/ contents of the isolated by running a program specified with `args` in the\n\/\/ directory where the isolated content has been unpacked.\nfunc ProgramIsolatedTransformer(args ...string) IsolatedTransformer {\n\treturn func(ctx context.Context, dir string) error {\n\t\tlogging.Infof(ctx, \"Invoking transform_program: %q\", args)\n\t\ttProg := exec.CommandContext(ctx, args[0], args[1:]...)\n\t\ttProg.Stdout = os.Stderr\n\t\ttProg.Stderr = os.Stderr\n\t\ttProg.Dir = dir\n\t\treturn errors.Annotate(tProg.Run(), \"running transform_program\").Err()\n\t}\n}\n\n\/\/ PromptIsolatedTransformer returns an IsolatedTransformer which prompts the\n\/\/ user to navigate to the directory with the isolated content and manipulate\n\/\/ it manually. When the user is done they should press \"enter\" to indicate that\n\/\/ they're finished.\nfunc PromptIsolatedTransformer() IsolatedTransformer {\n\treturn func(ctx context.Context, dir string) error {\n\t\tlogging.Infof(ctx, \"\")\n\t\tlogging.Infof(ctx, \"Edit files as you wish in:\")\n\t\tlogging.Infof(ctx, \"\\t%s\", dir)\n\n\t\tterm, err := tty.Open()\n\t\tif err != nil {\n\t\t\treturn errors.Annotate(err, \"opening terminal\").Err()\n\t\t}\n\t\tdefer term.Close()\n\n\t\tlogging.Infof(ctx, \"When finished, press <enter> here to isolate it.\")\n\t\t_, err = term.ReadString()\n\t\treturn errors.Annotate(err, \"reading <enter>\").Err()\n\t}\n}\n\n\/\/ EditIsolated allows you to edit the isolated (input_ref or cas_input_root)\n\/\/ contents of the job.Definition.\n\/\/\n\/\/ This implicitly collapses all isolated sources in the job.Definition into\n\/\/ a single isolated source.\n\/\/ The output job.Definition always has cas_user_payload and no user_payload.\nfunc EditIsolated(ctx context.Context, authClient *http.Client, authOpts auth.Options, jd *job.Definition, xform IsolatedTransformer) error {\n\tlogging.Infof(ctx, \"editing isolated\")\n\n\ttdir, err := ioutil.TempDir(\"\", \"led-edit-isolated\")\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"failed to create tempdir\").Err()\n\t}\n\tdefer func() {\n\t\tif err = os.RemoveAll(tdir); err != nil {\n\t\t\tlogging.Errorf(ctx, \"failed to cleanup temp dir %q: %s\", tdir, err)\n\t\t}\n\t}()\n\n\tif err := ConsolidateIsolateSources(ctx, authClient, jd); err != nil {\n\t\treturn err\n\t}\n\tif err := ConsolidateRbeCasSources(ctx, authOpts, jd); err != nil {\n\t\treturn err\n\t}\n\n\tcurrent, err := jd.Info().CurrentIsolated()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif current.CASTree != nil && current.CASReference != nil {\n\t\treturn errors.Reason(\"job uses isolate and RBE-CAS at the same time - iso: %v\\ncas: %v\", current.CASTree, current.CASReference).Err()\n\t}\n\n\terr = jd.Edit(func(je job.Editor) {\n\t\tje.ClearCurrentIsolated()\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcasInstance := current.GetCasInstance()\n\tif casInstance == \"\" {\n\t\tif casInstance, err = jd.CasInstance(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tcasClient, err := cas.NewClient(ctx, casInstance, authOpts, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer casClient.Close()\n\n\tif err = downloadFromIso(ctx, current.CASTree, authClient, tdir); err != nil {\n\t\treturn err\n\t}\n\tif err = downloadFromCas(ctx, current.CASReference, casClient, tdir); err != nil {\n\t\treturn err\n\t}\n\n\tif err := xform(ctx, tdir); err != nil {\n\t\treturn err\n\t}\n\n\tlogging.Infof(ctx, \"uploading new isolated to RBE-CAS\")\n\tdigest, err := uploadToCas(ctx, casClient, tdir)\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"errors in uploadToCas\").Err()\n\t}\n\tlogging.Infof(ctx, \"isolated upload: done\")\n\tjd.CasUserPayload = &apipb.CASReference{\n\t\tCasInstance: casInstance,\n\t\tDigest: &apipb.Digest{\n\t\t\tHash: digest.Hash,\n\t\t\tSizeBytes: digest.Size,\n\t\t},\n\t}\n\tjd.UserPayload = nil\n\treturn nil\n}\n\nfunc downloadFromCas(ctx context.Context, casRef *apipb.CASReference, casClient *client.Client, tdir string) error {\n\tif casRef.GetDigest().GetHash() == \"\" {\n\t\treturn nil\n\t}\n\td := digest.Digest{\n\t\tHash: casRef.Digest.Hash,\n\t\tSize: casRef.Digest.SizeBytes,\n\t}\n\tlogging.Infof(ctx, \"downloading from RBE-CAS...\")\n\t_, _, err := casClient.DownloadDirectory(ctx, d, tdir, filemetadata.NewNoopCache())\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"failed to download directory\").Err()\n\t}\n\treturn nil\n}\n\nfunc downloadFromIso(ctx context.Context, iso *apipb.CASTree, authClient *http.Client, tdir string) error {\n\tif iso.GetDigest() == \"\" {\n\t\treturn nil\n\t}\n\trawIsoClient := isolatedclient.NewClient(\n\t\tiso.Server,\n\t\tisolatedclient.WithAuthClient(authClient),\n\t\tisolatedclient.WithNamespace(iso.Namespace),\n\t\tisolatedclient.WithRetryFactory(retry.Default))\n\tvar statMu sync.Mutex\n\tvar previousStats *downloader.FileStats\n\n\tlogging.Infof(ctx, \"downloading from isolate...\")\n\tdl := downloader.New(ctx, rawIsoClient, isolated.HexDigest(iso.GetDigest()), tdir, &downloader.Options{\n\t\tFileStatsCallback: func(s downloader.FileStats, span time.Duration) {\n\t\t\tlogging.Infof(ctx, \"%s\", s.StatLine(previousStats, span))\n\t\t\tstatMu.Lock()\n\t\t\tpreviousStats = &s\n\t\t\tstatMu.Unlock()\n\t\t},\n\t})\n\tif err := dl.Wait(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc uploadToCas(ctx context.Context, client *client.Client, dir string) (*digest.Digest, error) {\n\tis := command.InputSpec{\n\t\tInputs: []string{\".\"}, \/\/ entire dir\n\t}\n\trootDg, entries, _, err := client.ComputeMerkleTree(dir, &is, filemetadata.NewNoopCache())\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"failed to compute Merkle Tree\").Err()\n\t}\n\n\t_, _, err = client.UploadIfMissing(ctx, entries...)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"failed to upload items\").Err()\n\t}\n\treturn &rootDg, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage plan\n\nimport (\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/pingcap\/tidb\/expression\"\n)\n\nfunc retrieveColumnsInExpression(expr expression.Expression, schema expression.Schema) (\n\texpression.Expression, error) {\n\tswitch v := expr.(type) {\n\tcase *expression.ScalarFunction:\n\t\tfor i, arg := range v.Args {\n\t\t\tnewExpr, err := retrieveColumnsInExpression(arg, schema)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\tv.Args[i] = newExpr\n\t\t}\n\tcase *expression.Column:\n\t\tif !v.Correlated {\n\t\t\tnewColumn := schema.RetrieveColumn(v)\n\t\t\tif newColumn == nil {\n\t\t\t\treturn nil, errors.Errorf(\"Can't Find column %s.\", expr.ToString())\n\t\t\t}\n\t\t\treturn newColumn, nil\n\t\t}\n\t}\n\treturn expr, nil\n}\n\nfunc makeUsedList(usedCols []*expression.Column, schema expression.Schema) []bool {\n\tused := make([]bool, len(schema))\n\tfor _, col := range usedCols {\n\t\tidx := schema.GetIndex(col)\n\t\tused[idx] = true\n\t}\n\treturn used\n}\n\n\/\/ PruneColumnsAndResolveIndices prunes unused columns and resolves index for columns.\n\/\/ This function returns a column slice representing outer columns and an error.\nfunc pruneColumnsAndResolveIndices(p Plan, parentUsedCols []*expression.Column) ([]*expression.Column, error) {\n\tswitch v := p.(type) {\n\tcase *Projection:\n\t\t\/\/ Prune\n\t\tvar cols, outerCols []*expression.Column\n\t\tused := makeUsedList(parentUsedCols, p.GetSchema())\n\t\tfor i := len(used) - 1; i >= 0; i-- {\n\t\t\tif !used[i] {\n\t\t\t\tv.schema = append(v.schema[:i], v.schema[i+1:]...)\n\t\t\t\tv.Exprs = append(v.Exprs[:i], v.Exprs[i+1:]...)\n\t\t\t}\n\t\t}\n\t\tv.schema.InitIndices()\n\t\tfor _, expr := range v.Exprs {\n\t\t\tcols, outerCols = extractColumn(expr, cols, outerCols)\n\t\t}\n\t\touter, err := pruneColumnsAndResolveIndices(p.GetChildByIndex(0), cols)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tfor i, expr := range v.Exprs {\n\t\t\tv.Exprs[i], err = retrieveColumnsInExpression(expr, p.GetChildByIndex(0).GetSchema())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t}\n\t\treturn append(outer, outerCols...), nil\n\tcase *Selection:\n\t\tvar outerCols []*expression.Column\n\t\tfor _, cond := range v.Conditions {\n\t\t\tparentUsedCols, outerCols = extractColumn(cond, parentUsedCols, outerCols)\n\t\t}\n\t\touter, err := pruneColumnsAndResolveIndices(p.GetChildByIndex(0), parentUsedCols)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tv.SetSchema(p.GetChildByIndex(0).GetSchema())\n\t\tfor i, cond := range v.Conditions {\n\t\t\tv.Conditions[i], err = retrieveColumnsInExpression(cond, p.GetChildByIndex(0).GetSchema())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t}\n\t\treturn append(outer, outerCols...), nil\n\tcase *Apply:\n\t\treturn pruneApply(v, parentUsedCols)\n\tcase *Aggregation:\n\t\tused := makeUsedList(parentUsedCols, p.GetSchema())\n\t\tfor i := len(used) - 1; i >= 0; i-- {\n\t\t\tif !used[i] {\n\t\t\t\tv.schema = append(v.schema[:i], v.schema[i+1:]...)\n\t\t\t\tv.AggFuncs = append(v.AggFuncs[:i], v.AggFuncs[i+1:]...)\n\t\t\t}\n\t\t}\n\t\tvar cols, outerCols []*expression.Column\n\t\tfor _, aggrFunc := range v.AggFuncs {\n\t\t\tfor _, arg := range aggrFunc.GetArgs() {\n\t\t\t\tcols, outerCols = extractColumn(arg, cols, outerCols)\n\t\t\t}\n\t\t}\n\t\tfor _, expr := range v.GroupByItems {\n\t\t\tcols, outerCols = extractColumn(expr, cols, outerCols)\n\t\t}\n\t\touter, err := pruneColumnsAndResolveIndices(p.GetChildByIndex(0), cols)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tfor _, aggrFunc := range v.AggFuncs {\n\t\t\tfor i, arg := range aggrFunc.GetArgs() {\n\t\t\t\tvar newArg expression.Expression\n\t\t\t\tnewArg, err = retrieveColumnsInExpression(arg, p.GetChildByIndex(0).GetSchema())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t\t}\n\t\t\t\taggrFunc.SetArgs(i, newArg)\n\t\t\t}\n\t\t}\n\t\tv.schema.InitIndices()\n\t\treturn append(outer, outerCols...), nil\n\tcase *NewSort:\n\t\tvar outerCols []*expression.Column\n\t\tfor _, item := range v.ByItems {\n\t\t\tparentUsedCols, outerCols = extractColumn(item.Expr, parentUsedCols, outerCols)\n\t\t}\n\t\touter, err := pruneColumnsAndResolveIndices(p.GetChildByIndex(0), parentUsedCols)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tv.SetSchema(p.GetChildByIndex(0).GetSchema())\n\t\tfor _, item := range v.ByItems {\n\t\t\titem.Expr, err = retrieveColumnsInExpression(item.Expr, p.GetChildByIndex(0).GetSchema())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t}\n\t\treturn append(outer, outerCols...), nil\n\tcase *Union:\n\t\tvar outerCols []*expression.Column\n\t\tused := makeUsedList(parentUsedCols, p.GetSchema())\n\t\tfor i := len(used) - 1; i >= 0; i-- {\n\t\t\tif !used[i] {\n\t\t\t\tv.schema = append(v.schema[:i], v.schema[i+1:]...)\n\t\t\t}\n\t\t}\n\t\tv.schema.InitIndices()\n\t\tfor _, child := range p.GetChildren() {\n\t\t\tschema := child.GetSchema()\n\t\t\tvar newSchema []*expression.Column\n\t\t\tfor i, use := range used {\n\t\t\t\tif use {\n\t\t\t\t\tnewSchema = append(newSchema, schema[i])\n\t\t\t\t}\n\t\t\t}\n\t\t\touter, err := pruneColumnsAndResolveIndices(child, newSchema)\n\t\t\touterCols = append(outerCols, outer...)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t}\n\t\treturn outerCols, nil\n\tcase *NewTableScan:\n\t\tused := makeUsedList(parentUsedCols, p.GetSchema())\n\t\tfor i := len(used) - 1; i >= 0; i-- {\n\t\t\tif !used[i] {\n\t\t\t\tv.schema = append(v.schema[:i], v.schema[i+1:]...)\n\t\t\t\tv.Columns = append(v.Columns[:i], v.Columns[i+1:]...)\n\t\t\t}\n\t\t}\n\t\tv.schema.InitIndices()\n\t\treturn nil, nil\n\tcase *Limit, *MaxOneRow, *Distinct:\n\t\touter, err := pruneColumnsAndResolveIndices(p.GetChildByIndex(0), parentUsedCols)\n\t\tp.SetSchema(p.GetChildByIndex(0).GetSchema())\n\t\treturn outer, errors.Trace(err)\n\tcase *Trim:\n\t\tused := makeUsedList(parentUsedCols, v.schema)\n\t\tfor i := len(used) - 1; i >= 0; i-- {\n\t\t\tif !used[i] {\n\t\t\t\tv.schema = append(v.schema[:i], v.schema[i+1:]...)\n\t\t\t}\n\t\t}\n\t\touter, err := pruneColumnsAndResolveIndices(p.GetChildByIndex(0), parentUsedCols)\n\t\treturn outer, errors.Trace(err)\n\tcase *Exists:\n\t\treturn pruneColumnsAndResolveIndices(p.GetChildByIndex(0), nil)\n\tcase *Join:\n\t\tvar outerCols []*expression.Column\n\t\tfor _, eqCond := range v.EqualConditions {\n\t\t\tparentUsedCols, outerCols = extractColumn(eqCond, parentUsedCols, outerCols)\n\t\t}\n\t\tfor _, leftCond := range v.LeftConditions {\n\t\t\tparentUsedCols, outerCols = extractColumn(leftCond, parentUsedCols, outerCols)\n\t\t}\n\t\tfor _, rightCond := range v.RightConditions {\n\t\t\tparentUsedCols, outerCols = extractColumn(rightCond, parentUsedCols, outerCols)\n\t\t}\n\t\tfor _, otherCond := range v.OtherConditions {\n\t\t\tparentUsedCols, outerCols = extractColumn(otherCond, parentUsedCols, outerCols)\n\t\t}\n\t\tvar leftCols, rightCols []*expression.Column\n\t\tfor _, col := range parentUsedCols {\n\t\t\tif p.GetChildByIndex(0).GetSchema().GetIndex(col) != -1 {\n\t\t\t\tleftCols = append(leftCols, col)\n\t\t\t} else {\n\t\t\t\trightCols = append(rightCols, col)\n\t\t\t}\n\t\t}\n\t\touterLeft, err := pruneColumnsAndResolveIndices(p.GetChildByIndex(0), leftCols)\n\t\touterCols = append(outerCols, outerLeft...)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tfor i, leftCond := range v.LeftConditions {\n\t\t\tv.LeftConditions[i], err = retrieveColumnsInExpression(leftCond, p.GetChildByIndex(0).GetSchema())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t}\n\t\touterRight, err := pruneColumnsAndResolveIndices(p.GetChildByIndex(1), rightCols)\n\t\touterCols = append(outerCols, outerRight...)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tfor i, rightCond := range v.RightConditions {\n\t\t\tv.RightConditions[i], err = retrieveColumnsInExpression(rightCond, p.GetChildByIndex(1).GetSchema())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t}\n\t\tv.schema = append(v.GetChildByIndex(0).GetSchema().DeepCopy(), v.GetChildByIndex(1).GetSchema().DeepCopy()...)\n\t\tv.schema.InitIndices()\n\t\tfor i, otherCond := range v.OtherConditions {\n\t\t\tv.OtherConditions[i], err = retrieveColumnsInExpression(otherCond, p.GetSchema())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t}\n\t\tfor _, eqCond := range v.EqualConditions {\n\t\t\teqCond.Args[0], err = retrieveColumnsInExpression(eqCond.Args[0], p.GetChildByIndex(0).GetSchema())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\teqCond.Args[1], err = retrieveColumnsInExpression(eqCond.Args[1], p.GetChildByIndex(1).GetSchema())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t}\n\t\treturn outerCols, nil\n\tdefault:\n\t\treturn nil, nil\n\t}\n}\n\n\/\/ e.g. For query select b.c ,(select count(*) from a where a.id = b.id) from b. Its plan is Projection->Apply->TableScan.\n\/\/ The schema of b is (a,b,c,id). When Pruning Apply, the parentUsedCols is (c, extra), outerSchema is (a,b,c,id).\n\/\/ Then after pruning inner plan, the outer schema in apply becomes (id).\n\/\/ Now there're two columns in parentUsedCols, c is the column from Apply's child ---- TableScan, but extra isn't.\n\/\/ So only c in parentUsedCols and id in outerSchema can be passed to TableScan.\nfunc pruneApply(v *Apply, parentUsedCols []*expression.Column) ([]*expression.Column, error) {\n\touter, err := pruneColumnsAndResolveIndices(v.InnerPlan, v.InnerPlan.GetSchema())\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tused := makeUsedList(outer, v.OuterSchema)\n\tfor i := len(used) - 1; i >= 0; i-- {\n\t\tif !used[i] {\n\t\t\tv.OuterSchema = append(v.OuterSchema[:i], v.OuterSchema[i+1:]...)\n\t\t}\n\t}\n\tnewUsedCols := v.OuterSchema\n\tfor _, used := range parentUsedCols {\n\t\tif v.GetChildByIndex(0).GetSchema().GetIndex(used) != -1 {\n\t\t\tnewUsedCols = append(newUsedCols, used)\n\t\t}\n\t}\n\tif v.Checker != nil {\n\t\tcondUsedCols, _ := extractColumn(v.Checker.Condition, nil, nil)\n\t\tfor _, used := range condUsedCols {\n\t\t\tif v.GetChildByIndex(0).GetSchema().GetIndex(used) != -1 {\n\t\t\t\tnewUsedCols = append(newUsedCols, used)\n\t\t\t}\n\t\t}\n\t}\n\touter, err = pruneColumnsAndResolveIndices(v.GetChildByIndex(0), newUsedCols)\n\tfor _, col := range v.OuterSchema {\n\t\tcol.Index = v.GetChildByIndex(0).GetSchema().GetIndex(col)\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tcombinedSchema := append(v.GetChildByIndex(0).GetSchema().DeepCopy(), v.InnerPlan.GetSchema().DeepCopy()...)\n\tif v.Checker == nil {\n\t\tv.schema = combinedSchema\n\t} else {\n\t\tcombinedSchema.InitIndices()\n\t\tv.Checker.Condition, err = retrieveColumnsInExpression(v.Checker.Condition, combinedSchema)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tv.schema = append(v.GetChildByIndex(0).GetSchema().DeepCopy(), v.schema[len(v.schema)-1])\n\t}\n\tv.schema.InitIndices()\n\treturn outer, nil\n}\n<commit_msg>plan: tiny clean up<commit_after>\/\/ Copyright 2016 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage plan\n\nimport (\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/pingcap\/tidb\/expression\"\n)\n\nfunc retrieveColumnsInExpression(expr expression.Expression, schema expression.Schema) (\n\texpression.Expression, error) {\n\tswitch v := expr.(type) {\n\tcase *expression.ScalarFunction:\n\t\tfor i, arg := range v.Args {\n\t\t\tnewExpr, err := retrieveColumnsInExpression(arg, schema)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\tv.Args[i] = newExpr\n\t\t}\n\tcase *expression.Column:\n\t\tif !v.Correlated {\n\t\t\tnewColumn := schema.RetrieveColumn(v)\n\t\t\tif newColumn == nil {\n\t\t\t\treturn nil, errors.Errorf(\"Can't Find column %s.\", expr.ToString())\n\t\t\t}\n\t\t\treturn newColumn, nil\n\t\t}\n\t}\n\treturn expr, nil\n}\n\nfunc makeUsedList(usedCols []*expression.Column, schema expression.Schema) []bool {\n\tused := make([]bool, len(schema))\n\tfor _, col := range usedCols {\n\t\tidx := schema.GetIndex(col)\n\t\tused[idx] = true\n\t}\n\treturn used\n}\n\n\/\/ pruneColumnsAndResolveIndices prunes unused columns and resolves index for columns.\n\/\/ This function returns a column slice representing outer columns and an error.\nfunc pruneColumnsAndResolveIndices(p Plan, parentUsedCols []*expression.Column) ([]*expression.Column, error) {\n\tswitch v := p.(type) {\n\tcase *Projection:\n\t\t\/\/ Prune\n\t\tvar cols, outerCols []*expression.Column\n\t\tused := makeUsedList(parentUsedCols, p.GetSchema())\n\t\tfor i := len(used) - 1; i >= 0; i-- {\n\t\t\tif !used[i] {\n\t\t\t\tv.schema = append(v.schema[:i], v.schema[i+1:]...)\n\t\t\t\tv.Exprs = append(v.Exprs[:i], v.Exprs[i+1:]...)\n\t\t\t}\n\t\t}\n\t\tv.schema.InitIndices()\n\t\tfor _, expr := range v.Exprs {\n\t\t\tcols, outerCols = extractColumn(expr, cols, outerCols)\n\t\t}\n\t\touter, err := pruneColumnsAndResolveIndices(p.GetChildByIndex(0), cols)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tfor i, expr := range v.Exprs {\n\t\t\tv.Exprs[i], err = retrieveColumnsInExpression(expr, p.GetChildByIndex(0).GetSchema())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t}\n\t\treturn append(outer, outerCols...), nil\n\tcase *Selection:\n\t\tvar outerCols []*expression.Column\n\t\tfor _, cond := range v.Conditions {\n\t\t\tparentUsedCols, outerCols = extractColumn(cond, parentUsedCols, outerCols)\n\t\t}\n\t\touter, err := pruneColumnsAndResolveIndices(p.GetChildByIndex(0), parentUsedCols)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tv.SetSchema(p.GetChildByIndex(0).GetSchema())\n\t\tfor i, cond := range v.Conditions {\n\t\t\tv.Conditions[i], err = retrieveColumnsInExpression(cond, p.GetChildByIndex(0).GetSchema())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t}\n\t\treturn append(outer, outerCols...), nil\n\tcase *Apply:\n\t\treturn pruneApply(v, parentUsedCols)\n\tcase *Aggregation:\n\t\tused := makeUsedList(parentUsedCols, p.GetSchema())\n\t\tfor i := len(used) - 1; i >= 0; i-- {\n\t\t\tif !used[i] {\n\t\t\t\tv.schema = append(v.schema[:i], v.schema[i+1:]...)\n\t\t\t\tv.AggFuncs = append(v.AggFuncs[:i], v.AggFuncs[i+1:]...)\n\t\t\t}\n\t\t}\n\t\tvar cols, outerCols []*expression.Column\n\t\tfor _, aggrFunc := range v.AggFuncs {\n\t\t\tfor _, arg := range aggrFunc.GetArgs() {\n\t\t\t\tcols, outerCols = extractColumn(arg, cols, outerCols)\n\t\t\t}\n\t\t}\n\t\tfor _, expr := range v.GroupByItems {\n\t\t\tcols, outerCols = extractColumn(expr, cols, outerCols)\n\t\t}\n\t\touter, err := pruneColumnsAndResolveIndices(p.GetChildByIndex(0), cols)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tfor _, aggrFunc := range v.AggFuncs {\n\t\t\tfor i, arg := range aggrFunc.GetArgs() {\n\t\t\t\tvar newArg expression.Expression\n\t\t\t\tnewArg, err = retrieveColumnsInExpression(arg, p.GetChildByIndex(0).GetSchema())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t\t}\n\t\t\t\taggrFunc.SetArgs(i, newArg)\n\t\t\t}\n\t\t}\n\t\tv.schema.InitIndices()\n\t\treturn append(outer, outerCols...), nil\n\tcase *NewSort:\n\t\tvar outerCols []*expression.Column\n\t\tfor _, item := range v.ByItems {\n\t\t\tparentUsedCols, outerCols = extractColumn(item.Expr, parentUsedCols, outerCols)\n\t\t}\n\t\touter, err := pruneColumnsAndResolveIndices(p.GetChildByIndex(0), parentUsedCols)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tv.SetSchema(p.GetChildByIndex(0).GetSchema())\n\t\tfor _, item := range v.ByItems {\n\t\t\titem.Expr, err = retrieveColumnsInExpression(item.Expr, p.GetChildByIndex(0).GetSchema())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t}\n\t\treturn append(outer, outerCols...), nil\n\tcase *Union:\n\t\tvar outerCols []*expression.Column\n\t\tused := makeUsedList(parentUsedCols, p.GetSchema())\n\t\tfor i := len(used) - 1; i >= 0; i-- {\n\t\t\tif !used[i] {\n\t\t\t\tv.schema = append(v.schema[:i], v.schema[i+1:]...)\n\t\t\t}\n\t\t}\n\t\tv.schema.InitIndices()\n\t\tfor _, child := range p.GetChildren() {\n\t\t\tschema := child.GetSchema()\n\t\t\tvar newSchema []*expression.Column\n\t\t\tfor i, use := range used {\n\t\t\t\tif use {\n\t\t\t\t\tnewSchema = append(newSchema, schema[i])\n\t\t\t\t}\n\t\t\t}\n\t\t\touter, err := pruneColumnsAndResolveIndices(child, newSchema)\n\t\t\touterCols = append(outerCols, outer...)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t}\n\t\treturn outerCols, nil\n\tcase *NewTableScan:\n\t\tused := makeUsedList(parentUsedCols, p.GetSchema())\n\t\tfor i := len(used) - 1; i >= 0; i-- {\n\t\t\tif !used[i] {\n\t\t\t\tv.schema = append(v.schema[:i], v.schema[i+1:]...)\n\t\t\t\tv.Columns = append(v.Columns[:i], v.Columns[i+1:]...)\n\t\t\t}\n\t\t}\n\t\tv.schema.InitIndices()\n\t\treturn nil, nil\n\tcase *Limit, *MaxOneRow, *Distinct:\n\t\touter, err := pruneColumnsAndResolveIndices(p.GetChildByIndex(0), parentUsedCols)\n\t\tp.SetSchema(p.GetChildByIndex(0).GetSchema())\n\t\treturn outer, errors.Trace(err)\n\tcase *Trim:\n\t\tused := makeUsedList(parentUsedCols, v.schema)\n\t\tfor i := len(used) - 1; i >= 0; i-- {\n\t\t\tif !used[i] {\n\t\t\t\tv.schema = append(v.schema[:i], v.schema[i+1:]...)\n\t\t\t}\n\t\t}\n\t\touter, err := pruneColumnsAndResolveIndices(p.GetChildByIndex(0), parentUsedCols)\n\t\treturn outer, errors.Trace(err)\n\tcase *Exists:\n\t\treturn pruneColumnsAndResolveIndices(p.GetChildByIndex(0), nil)\n\tcase *Join:\n\t\tvar outerCols []*expression.Column\n\t\tfor _, eqCond := range v.EqualConditions {\n\t\t\tparentUsedCols, outerCols = extractColumn(eqCond, parentUsedCols, outerCols)\n\t\t}\n\t\tfor _, leftCond := range v.LeftConditions {\n\t\t\tparentUsedCols, outerCols = extractColumn(leftCond, parentUsedCols, outerCols)\n\t\t}\n\t\tfor _, rightCond := range v.RightConditions {\n\t\t\tparentUsedCols, outerCols = extractColumn(rightCond, parentUsedCols, outerCols)\n\t\t}\n\t\tfor _, otherCond := range v.OtherConditions {\n\t\t\tparentUsedCols, outerCols = extractColumn(otherCond, parentUsedCols, outerCols)\n\t\t}\n\t\tvar leftCols, rightCols []*expression.Column\n\t\tfor _, col := range parentUsedCols {\n\t\t\tif p.GetChildByIndex(0).GetSchema().GetIndex(col) != -1 {\n\t\t\t\tleftCols = append(leftCols, col)\n\t\t\t} else {\n\t\t\t\trightCols = append(rightCols, col)\n\t\t\t}\n\t\t}\n\t\touterLeft, err := pruneColumnsAndResolveIndices(p.GetChildByIndex(0), leftCols)\n\t\touterCols = append(outerCols, outerLeft...)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tfor i, leftCond := range v.LeftConditions {\n\t\t\tv.LeftConditions[i], err = retrieveColumnsInExpression(leftCond, p.GetChildByIndex(0).GetSchema())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t}\n\t\touterRight, err := pruneColumnsAndResolveIndices(p.GetChildByIndex(1), rightCols)\n\t\touterCols = append(outerCols, outerRight...)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tfor i, rightCond := range v.RightConditions {\n\t\t\tv.RightConditions[i], err = retrieveColumnsInExpression(rightCond, p.GetChildByIndex(1).GetSchema())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t}\n\t\tv.schema = append(v.GetChildByIndex(0).GetSchema().DeepCopy(), v.GetChildByIndex(1).GetSchema().DeepCopy()...)\n\t\tv.schema.InitIndices()\n\t\tfor i, otherCond := range v.OtherConditions {\n\t\t\tv.OtherConditions[i], err = retrieveColumnsInExpression(otherCond, p.GetSchema())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t}\n\t\tfor _, eqCond := range v.EqualConditions {\n\t\t\teqCond.Args[0], err = retrieveColumnsInExpression(eqCond.Args[0], p.GetChildByIndex(0).GetSchema())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t\teqCond.Args[1], err = retrieveColumnsInExpression(eqCond.Args[1], p.GetChildByIndex(1).GetSchema())\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Trace(err)\n\t\t\t}\n\t\t}\n\t\treturn outerCols, nil\n\tdefault:\n\t\treturn nil, nil\n\t}\n}\n\n\/\/ e.g. For query select b.c ,(select count(*) from a where a.id = b.id) from b. Its plan is Projection->Apply->TableScan.\n\/\/ The schema of b is (a,b,c,id). When Pruning Apply, the parentUsedCols is (c, extra), outerSchema is (a,b,c,id).\n\/\/ Then after pruning inner plan, the outer schema in apply becomes (id).\n\/\/ Now there're two columns in parentUsedCols, c is the column from Apply's child ---- TableScan, but extra isn't.\n\/\/ So only c in parentUsedCols and id in outerSchema can be passed to TableScan.\nfunc pruneApply(v *Apply, parentUsedCols []*expression.Column) ([]*expression.Column, error) {\n\touter, err := pruneColumnsAndResolveIndices(v.InnerPlan, v.InnerPlan.GetSchema())\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tused := makeUsedList(outer, v.OuterSchema)\n\tfor i := len(used) - 1; i >= 0; i-- {\n\t\tif !used[i] {\n\t\t\tv.OuterSchema = append(v.OuterSchema[:i], v.OuterSchema[i+1:]...)\n\t\t}\n\t}\n\tnewUsedCols := v.OuterSchema\n\tfor _, used := range parentUsedCols {\n\t\tif v.GetChildByIndex(0).GetSchema().GetIndex(used) != -1 {\n\t\t\tnewUsedCols = append(newUsedCols, used)\n\t\t}\n\t}\n\tif v.Checker != nil {\n\t\tcondUsedCols, _ := extractColumn(v.Checker.Condition, nil, nil)\n\t\tfor _, used := range condUsedCols {\n\t\t\tif v.GetChildByIndex(0).GetSchema().GetIndex(used) != -1 {\n\t\t\t\tnewUsedCols = append(newUsedCols, used)\n\t\t\t}\n\t\t}\n\t}\n\touter, err = pruneColumnsAndResolveIndices(v.GetChildByIndex(0), newUsedCols)\n\tfor _, col := range v.OuterSchema {\n\t\tcol.Index = v.GetChildByIndex(0).GetSchema().GetIndex(col)\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tcombinedSchema := append(v.GetChildByIndex(0).GetSchema().DeepCopy(), v.InnerPlan.GetSchema().DeepCopy()...)\n\tif v.Checker == nil {\n\t\tv.schema = combinedSchema\n\t} else {\n\t\tcombinedSchema.InitIndices()\n\t\tv.Checker.Condition, err = retrieveColumnsInExpression(v.Checker.Condition, combinedSchema)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tv.schema = append(v.GetChildByIndex(0).GetSchema().DeepCopy(), v.schema[len(v.schema)-1])\n\t}\n\tv.schema.InitIndices()\n\treturn outer, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package terminal\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\t\"unsafe\"\n\n\t\"github.com\/mattn\/go-isatty\"\n)\n\nvar (\n\tcursorFunctions = map[rune]func(c *Cursor) func(int){\n\t\t'A': func(c *Cursor) func(int) { return c.Up },\n\t\t'B': func(c *Cursor) func(int) { return c.Down },\n\t\t'C': func(c *Cursor) func(int) { return c.Forward },\n\t\t'D': func(c *Cursor) func(int) { return c.Back },\n\t\t'E': func(c *Cursor) func(int) { return c.NextLine },\n\t\t'F': func(c *Cursor) func(int) { return c.PreviousLine },\n\t\t'G': func(c *Cursor) func(int) { return c.HorizontalAbsolute },\n\t}\n)\n\nconst (\n\tforegroundBlue = 0x1\n\tforegroundGreen = 0x2\n\tforegroundRed = 0x4\n\tforegroundIntensity = 0x8\n\tforegroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)\n\tbackgroundBlue = 0x10\n\tbackgroundGreen = 0x20\n\tbackgroundRed = 0x40\n\tbackgroundIntensity = 0x80\n\tbackgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)\n)\n\ntype Writer struct {\n\tout FileWriter\n\thandle syscall.Handle\n\torgAttr word\n}\n\nfunc NewAnsiStdout(out FileWriter) io.Writer {\n\tvar csbi consoleScreenBufferInfo\n\tif !isatty.IsTerminal(out.Fd()) {\n\t\treturn out\n\t}\n\thandle := syscall.Handle(out.Fd())\n\tprocGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))\n\treturn &Writer{out: out, handle: handle, orgAttr: csbi.attributes}\n}\n\nfunc NewAnsiStderr(out FileWriter) io.Writer {\n\tvar csbi consoleScreenBufferInfo\n\tif !isatty.IsTerminal(out.Fd()) {\n\t\treturn out\n\t}\n\thandle := syscall.Handle(out.Fd())\n\tprocGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))\n\treturn &Writer{out: out, handle: handle, orgAttr: csbi.attributes}\n}\n\nfunc (w *Writer) Write(data []byte) (n int, err error) {\n\tr := bytes.NewReader(data)\n\n\tfor {\n\t\tvar ch rune\n\t\tvar size int\n\t\tch, size, err = r.ReadRune()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tn += size\n\n\t\tswitch ch {\n\t\tcase '\\x1b':\n\t\t\tsize, err = w.handleEscape(r)\n\t\t\tn += size\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\tdefault:\n\t\t\t_, err = fmt.Fprint(w.out, string(ch))\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (w *Writer) handleEscape(r *bytes.Reader) (n int, err error) {\n\tbuf := make([]byte, 0, 10)\n\tbuf = append(buf, \"\\x1b\"...)\n\n\t\/\/ Check '[' continues after \\x1b\n\tch, size, err := r.ReadRune()\n\tif err != nil {\n\t\tfmt.Fprint(w.out, string(buf))\n\t\treturn\n\t}\n\tn += size\n\tif ch != '[' {\n\t\tfmt.Fprint(w.out, string(buf))\n\t\treturn\n\t}\n\n\t\/\/ Parse escape code\n\tvar code rune\n\targBuf := make([]byte, 0, 10)\n\tfor {\n\t\tch, size, err = r.ReadRune()\n\t\tif err != nil {\n\t\t\tfmt.Fprint(w.out, string(buf))\n\t\t\treturn\n\t\t}\n\t\tn += size\n\t\tif ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') {\n\t\t\tcode = ch\n\t\t\tbreak\n\t\t}\n\t\targBuf = append(argBuf, string(ch)...)\n\t}\n\n\tw.applyEscapeCode(buf, string(argBuf), code)\n\treturn\n}\n\nfunc (w *Writer) applyEscapeCode(buf []byte, arg string, code rune) {\n\tc := &Cursor{Out: w.out}\n\n\tswitch arg + string(code) {\n\tcase \"?25h\":\n\t\tc.Show()\n\t\treturn\n\tcase \"?25l\":\n\t\tc.Hide()\n\t\treturn\n\t}\n\n\tif f, ok := cursorFunctions[code]; ok {\n\t\tif n, err := strconv.Atoi(arg); err == nil {\n\t\t\tf(c)(n)\n\t\t\treturn\n\t\t}\n\t}\n\n\tswitch code {\n\tcase 'm':\n\t\tw.applySelectGraphicRendition(arg)\n\tdefault:\n\t\tbuf = append(buf, string(code)...)\n\t\tfmt.Fprint(w.out, string(buf))\n\t}\n}\n\n\/\/ Original implementation: https:\/\/github.com\/mattn\/go-colorable\nfunc (w *Writer) applySelectGraphicRendition(arg string) {\n\tif arg == \"\" {\n\t\tprocSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.orgAttr))\n\t\treturn\n\t}\n\n\tvar csbi consoleScreenBufferInfo\n\tprocGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))\n\tattr := csbi.attributes\n\n\tfor _, param := range strings.Split(arg, \";\") {\n\t\tn, err := strconv.Atoi(param)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch {\n\t\tcase n == 0 || n == 100:\n\t\t\tattr = w.orgAttr\n\t\tcase 1 <= n && n <= 5:\n\t\t\tattr |= foregroundIntensity\n\t\tcase 30 <= n && n <= 37:\n\t\t\tattr = (attr & backgroundMask)\n\t\t\tif (n-30)&1 != 0 {\n\t\t\t\tattr |= foregroundRed\n\t\t\t}\n\t\t\tif (n-30)&2 != 0 {\n\t\t\t\tattr |= foregroundGreen\n\t\t\t}\n\t\t\tif (n-30)&4 != 0 {\n\t\t\t\tattr |= foregroundBlue\n\t\t\t}\n\t\tcase 40 <= n && n <= 47:\n\t\t\tattr = (attr & foregroundMask)\n\t\t\tif (n-40)&1 != 0 {\n\t\t\t\tattr |= backgroundRed\n\t\t\t}\n\t\t\tif (n-40)&2 != 0 {\n\t\t\t\tattr |= backgroundGreen\n\t\t\t}\n\t\t\tif (n-40)&4 != 0 {\n\t\t\t\tattr |= backgroundBlue\n\t\t\t}\n\t\tcase 90 <= n && n <= 97:\n\t\t\tattr = (attr & backgroundMask)\n\t\t\tattr |= foregroundIntensity\n\t\t\tif (n-90)&1 != 0 {\n\t\t\t\tattr |= foregroundRed\n\t\t\t}\n\t\t\tif (n-90)&2 != 0 {\n\t\t\t\tattr |= foregroundGreen\n\t\t\t}\n\t\t\tif (n-90)&4 != 0 {\n\t\t\t\tattr |= foregroundBlue\n\t\t\t}\n\t\tcase 100 <= n && n <= 107:\n\t\t\tattr = (attr & foregroundMask)\n\t\t\tattr |= backgroundIntensity\n\t\t\tif (n-100)&1 != 0 {\n\t\t\t\tattr |= backgroundRed\n\t\t\t}\n\t\t\tif (n-100)&2 != 0 {\n\t\t\t\tattr |= backgroundGreen\n\t\t\t}\n\t\t\tif (n-100)&4 != 0 {\n\t\t\t\tattr |= backgroundBlue\n\t\t\t}\n\t\t}\n\t}\n\n\tprocSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))\n}\n<commit_msg>Fix Survey output on Windows (#413)<commit_after>package terminal\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\t\"unsafe\"\n\n\t\"github.com\/mattn\/go-isatty\"\n)\n\nvar (\n\tcursorFunctions = map[rune]func(c *Cursor) func(int){\n\t\t'A': func(c *Cursor) func(int) { return c.Up },\n\t\t'B': func(c *Cursor) func(int) { return c.Down },\n\t\t'C': func(c *Cursor) func(int) { return c.Forward },\n\t\t'D': func(c *Cursor) func(int) { return c.Back },\n\t\t'E': func(c *Cursor) func(int) { return c.NextLine },\n\t\t'F': func(c *Cursor) func(int) { return c.PreviousLine },\n\t\t'G': func(c *Cursor) func(int) { return c.HorizontalAbsolute },\n\t}\n)\n\nconst (\n\tforegroundBlue = 0x1\n\tforegroundGreen = 0x2\n\tforegroundRed = 0x4\n\tforegroundIntensity = 0x8\n\tforegroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)\n\tbackgroundBlue = 0x10\n\tbackgroundGreen = 0x20\n\tbackgroundRed = 0x40\n\tbackgroundIntensity = 0x80\n\tbackgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)\n)\n\ntype Writer struct {\n\tout FileWriter\n\thandle syscall.Handle\n\torgAttr word\n}\n\nfunc NewAnsiStdout(out FileWriter) io.Writer {\n\tvar csbi consoleScreenBufferInfo\n\tif !isatty.IsTerminal(out.Fd()) {\n\t\treturn out\n\t}\n\thandle := syscall.Handle(out.Fd())\n\tprocGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))\n\treturn &Writer{out: out, handle: handle, orgAttr: csbi.attributes}\n}\n\nfunc NewAnsiStderr(out FileWriter) io.Writer {\n\tvar csbi consoleScreenBufferInfo\n\tif !isatty.IsTerminal(out.Fd()) {\n\t\treturn out\n\t}\n\thandle := syscall.Handle(out.Fd())\n\tprocGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))\n\treturn &Writer{out: out, handle: handle, orgAttr: csbi.attributes}\n}\n\nfunc (w *Writer) Write(data []byte) (n int, err error) {\n\tr := bytes.NewReader(data)\n\n\tfor {\n\t\tvar ch rune\n\t\tvar size int\n\t\tch, size, err = r.ReadRune()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\terr = nil\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tn += size\n\n\t\tswitch ch {\n\t\tcase '\\x1b':\n\t\t\tsize, err = w.handleEscape(r)\n\t\t\tn += size\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\tdefault:\n\t\t\t_, err = fmt.Fprint(w.out, string(ch))\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (w *Writer) handleEscape(r *bytes.Reader) (n int, err error) {\n\tbuf := make([]byte, 0, 10)\n\tbuf = append(buf, \"\\x1b\"...)\n\n\t\/\/ Check '[' continues after \\x1b\n\tch, size, err := r.ReadRune()\n\tif err != nil {\n\t\tfmt.Fprint(w.out, string(buf))\n\t\treturn\n\t}\n\tn += size\n\tif ch != '[' {\n\t\tfmt.Fprint(w.out, string(buf))\n\t\treturn\n\t}\n\n\t\/\/ Parse escape code\n\tvar code rune\n\targBuf := make([]byte, 0, 10)\n\tfor {\n\t\tch, size, err = r.ReadRune()\n\t\tif err != nil {\n\t\t\tfmt.Fprint(w.out, string(buf))\n\t\t\treturn\n\t\t}\n\t\tn += size\n\t\tif ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') {\n\t\t\tcode = ch\n\t\t\tbreak\n\t\t}\n\t\targBuf = append(argBuf, string(ch)...)\n\t}\n\n\tw.applyEscapeCode(buf, string(argBuf), code)\n\treturn\n}\n\nfunc (w *Writer) applyEscapeCode(buf []byte, arg string, code rune) {\n\tc := &Cursor{Out: w.out}\n\n\tswitch arg + string(code) {\n\tcase \"?25h\":\n\t\tc.Show()\n\t\treturn\n\tcase \"?25l\":\n\t\tc.Hide()\n\t\treturn\n\t}\n\n\tif f, ok := cursorFunctions[code]; ok {\n\t\tif n, err := strconv.Atoi(arg); err == nil {\n\t\t\tf(c)(n)\n\t\t\treturn\n\t\t}\n\t}\n\n\tswitch code {\n\tcase 'm':\n\t\tw.applySelectGraphicRendition(arg)\n\tdefault:\n\t\tbuf = append(buf, string(code)...)\n\t\tfmt.Fprint(w.out, string(buf))\n\t}\n}\n\n\/\/ Original implementation: https:\/\/github.com\/mattn\/go-colorable\nfunc (w *Writer) applySelectGraphicRendition(arg string) {\n\tif arg == \"\" {\n\t\tprocSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.orgAttr))\n\t\treturn\n\t}\n\n\tvar csbi consoleScreenBufferInfo\n\tprocGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))\n\tattr := csbi.attributes\n\n\tfor _, param := range strings.Split(arg, \";\") {\n\t\tn, err := strconv.Atoi(param)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch {\n\t\tcase n == 0 || n == 100:\n\t\t\tattr = w.orgAttr\n\t\tcase 1 <= n && n <= 5:\n\t\t\tattr |= foregroundIntensity\n\t\tcase 30 <= n && n <= 37:\n\t\t\tattr = (attr & backgroundMask)\n\t\t\tif (n-30)&1 != 0 {\n\t\t\t\tattr |= foregroundRed\n\t\t\t}\n\t\t\tif (n-30)&2 != 0 {\n\t\t\t\tattr |= foregroundGreen\n\t\t\t}\n\t\t\tif (n-30)&4 != 0 {\n\t\t\t\tattr |= foregroundBlue\n\t\t\t}\n\t\tcase 40 <= n && n <= 47:\n\t\t\tattr = (attr & foregroundMask)\n\t\t\tif (n-40)&1 != 0 {\n\t\t\t\tattr |= backgroundRed\n\t\t\t}\n\t\t\tif (n-40)&2 != 0 {\n\t\t\t\tattr |= backgroundGreen\n\t\t\t}\n\t\t\tif (n-40)&4 != 0 {\n\t\t\t\tattr |= backgroundBlue\n\t\t\t}\n\t\tcase 90 <= n && n <= 97:\n\t\t\tattr = (attr & backgroundMask)\n\t\t\tattr |= foregroundIntensity\n\t\t\tif (n-90)&1 != 0 {\n\t\t\t\tattr |= foregroundRed\n\t\t\t}\n\t\t\tif (n-90)&2 != 0 {\n\t\t\t\tattr |= foregroundGreen\n\t\t\t}\n\t\t\tif (n-90)&4 != 0 {\n\t\t\t\tattr |= foregroundBlue\n\t\t\t}\n\t\tcase 100 <= n && n <= 107:\n\t\t\tattr = (attr & foregroundMask)\n\t\t\tattr |= backgroundIntensity\n\t\t\tif (n-100)&1 != 0 {\n\t\t\t\tattr |= backgroundRed\n\t\t\t}\n\t\t\tif (n-100)&2 != 0 {\n\t\t\t\tattr |= backgroundGreen\n\t\t\t}\n\t\t\tif (n-100)&4 != 0 {\n\t\t\t\tattr |= backgroundBlue\n\t\t\t}\n\t\t}\n\t}\n\n\tprocSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))\n}\n<|endoftext|>"} {"text":"<commit_before>package lib\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/BytemarkHosting\/bytemark-client\/util\/log\"\n)\n\n\/\/ Request is the lower-level type that makes HTTP calls\ntype Request interface {\n\t\/\/ GetURL gets the URL this request will be \/ has been made to\n\tGetURL() url.URL\n\t\/\/ AllowInsecure allows the Request to be made over http instead of https\n\tAllowInsecure()\n\t\/\/ JSON-marshals the input and runs the request, unmarshalling the response to out if it's not nil\n\tMarshalAndRun(in interface{}, out interface{}) (statusCode int, responseBody []byte, err error)\n\t\/\/ Runs the request, reading the request body to reader and unmarshalling the response to responseObject (if not nil)\n\tRun(body io.Reader, responseObject interface{}) (statusCode int, responseBody []byte, err error)\n}\n\n\/\/ TokenType returns what kind of token is used for the given endpoint.\n\/\/ token for endpoints which expect an authorization header like \"Token token=blah\"\n\/\/ bearer for endpoints which expect one like \"Bearer blah\"\nfunc TokenType(ep Endpoint) string {\n\tswitch ep {\n\tcase BillingEndpoint:\n\t\treturn \"token\"\n\t}\n\treturn \"bearer\"\n}\n\n\/\/ RequestAlreadyRunError is returned if the Run method was already called for this Request.\ntype RequestAlreadyRunError struct {\n\tRequest Request\n}\n\n\/\/ InsecureConnectionError is returned if the endpoint isn't https but AllowInsecure was not called.\ntype InsecureConnectionError struct {\n\tRequest Request\n}\n\nfunc (e RequestAlreadyRunError) Error() string {\n\treturn \"A Request was Run twice\"\n}\n\nfunc (e InsecureConnectionError) Error() string {\n\treturn \"A Request to an insecure endpoint was attempted when AllowInsecureRequests had not been called.\"\n}\n\n\/\/ internalRequest is the workhorse of the bytemark-client\/lib - it builds up a request, then Run can be called to get its results.\ntype internalRequest struct {\n\tauthenticate bool\n\tclient Client\n\tendpoint Endpoint\n\turl *url.URL\n\tmethod string\n\tbody []byte\n\tallowInsecure bool\n\thasRun bool\n}\n\n\/\/ GetURL returns the URL that the Request is for.\nfunc (r *internalRequest) GetURL() url.URL {\n\tif r.url == nil {\n\t\treturn url.URL{}\n\t}\n\treturn *r.url\n}\n\n\/\/ BuildRequestNoAuth creates a new Request with the intention of not authenticating.\nfunc (c *bytemarkClient) BuildRequestNoAuth(method string, endpoint Endpoint, path string, parts ...string) (r Request, err error) {\n\turl, err := c.BuildURL(endpoint, path, parts...)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn &internalRequest{\n\t\tclient: c,\n\t\tendpoint: endpoint,\n\t\turl: url,\n\t\tmethod: method,\n\t\tallowInsecure: c.allowInsecure,\n\t}, nil\n}\n\n\/\/ BuildRequest builds a request that will be authenticated by the endpoint given.\nfunc (c *bytemarkClient) BuildRequest(method string, endpoint Endpoint, path string, parts ...string) (r Request, err error) {\n\turl, err := c.BuildURL(endpoint, path, parts...)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn &internalRequest{\n\t\tauthenticate: true,\n\t\tclient: c,\n\t\tendpoint: endpoint,\n\t\turl: url,\n\t\tmethod: method,\n\t\tallowInsecure: c.allowInsecure,\n\t}, nil\n}\n\n\/\/ AllowInsecure tells the Request that it's ok if the endpoint isn't communicated with over HTTPS.\nfunc (r *internalRequest) AllowInsecure() {\n\tr.allowInsecure = true\n}\n\n\/\/ mkHTTPClient creates an http.Client for this request. If the staging endpoint is used, InsecureSkipVerify is used because I guess we don't have a good cert for that brain.\nfunc (r *internalRequest) mkHTTPClient() (c *http.Client) {\n\tc = new(http.Client)\n\tif r.url.Host == \"staging.bigv.io\" {\n\t\tc.Transport = &http.Transport{\n\t\t\t\/\/ disable gas lint for this line (gas looks for insecure TLS settings, among other things)\n\t\t\t\/* #nosec *\/\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: true,\n\t\t\t},\n\t\t}\n\t}\n\treturn c\n}\n\n\/\/ mkHTTPRequest assembles an http.Request for this Request, adding Authorization headers as needed, setting the Content-Type correctly for whichever endpoint it's talking to.\nfunc (r *internalRequest) mkHTTPRequest(body io.Reader) (req *http.Request, err error) {\n\treq, err = http.NewRequest(r.method, r.url.String(), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Close = true\n\treq.Header.Add(\"User-Agent\", \"bytemark-client-\"+Version)\n\n\tif r.endpoint == SPPEndpoint {\n\t\treq.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\t} else {\n\t\treq.Header.Add(\"Accept\", \"application\/json\")\n\t\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\t}\n\tif r.authenticate {\n\t\tif r.client.GetSessionToken() == \"\" {\n\t\t\treturn nil, NilAuthError{}\n\t\t}\n\t\t\/\/ if we could settle on a single standard\n\t\t\/\/ rather than two basically-identical ones that'd be cool\n\t\tswitch TokenType(r.endpoint) {\n\t\tcase \"token\":\n\t\t\treq.Header.Add(\"Authorization\", \"Token token=\"+r.client.GetSessionToken())\n\t\tcase \"bearer\":\n\t\t\treq.Header.Add(\"Authorization\", \"Bearer \"+r.client.GetSessionToken())\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ MarshalAndRun marshals the 'in' object and passes that and 'out' to Run as the body and responseObject, respectively.\nfunc (r *internalRequest) MarshalAndRun(in interface{}, out interface{}) (statusCode int, responseBody []byte, err error) {\n\tvar b bytes.Buffer\n\terr = json.NewEncoder(&b).Encode(in)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn r.Run(&b, out)\n}\n\n\/\/ Run performs the request with the given body, and attempts to unmarshal a successful response into responseObject\nfunc (r *internalRequest) Run(body io.Reader, responseObject interface{}) (statusCode int, responseBody []byte, err error) {\n\tif r.hasRun {\n\t\terr = RequestAlreadyRunError{r}\n\t\treturn\n\t}\n\tr.hasRun = true\n\n\tif !r.allowInsecure && r.url.Scheme == \"http\" {\n\t\terr = InsecureConnectionError{r}\n\t\treturn\n\t}\n\tvar rb []byte\n\tif body != nil {\n\n\t\trb, err = ioutil.ReadAll(body)\n\t\tif err != nil {\n\t\t\treturn 0, nil, err\n\t\t}\n\t\tlog.Debugf(log.LvlHTTPData, \"request body: '%s'\\r\\n\", string(rb))\n\t}\n\n\tcli := r.mkHTTPClient()\n\n\t\/\/ This isnt actually returning errors if not authenticated. (when Auth or preflight has not been run, NilAuthError)\n\t\/\/ TODO: dig deep and find out it does not return this error (NilAuthError)\n\treq, err := r.mkHTTPRequest(bytes.NewBuffer(rb))\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(rb) > 0 {\n\t\treq.Header.Add(\"Content-Length\", fmt.Sprintf(\"%d\", len(rb)))\n\t}\n\tres, err := cli.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tstatusCode = res.StatusCode\n\n\tlog.Debugf(log.LvlOutline, \"%s %s: %d\\r\\n\", r.method, req.URL, res.StatusCode)\n\n\tresponseBody, err = r.handleResponse(req, rb, res, responseObject)\n\treturn\n}\n\n\/\/ handleResponse deals with the response coming back from the request - creating an error if required, unmarshalling responseObject if necessary\nfunc (r *internalRequest) handleResponse(req *http.Request, requestBody []byte, res *http.Response, responseObject interface{}) (body []byte, err error) {\n\tbody, err = ioutil.ReadAll(res.Body)\n\tlog.Debugf(log.LvlHTTPData, \"response body: '%s'\\r\\n\", body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tbaseErr := APIError{\n\t\tMethod: r.method,\n\t\tURL: req.URL,\n\t\tStatusCode: res.StatusCode,\n\t\tRequestBody: string(requestBody),\n\t}\n\n\tbaseErr.ResponseBody = string(body)\n\n\tswitch res.StatusCode {\n\tcase http.StatusBadRequest:\n\t\t\/\/ because we need to reference fields specific to BadRequestError later\n\t\terr = newBadRequestError(baseErr, body)\n\tcase http.StatusUnauthorized:\n\t\terr = UnauthorizedError{baseErr}\n\tcase http.StatusForbidden:\n\t\terr = ForbiddenError{baseErr}\n\tcase http.StatusNotFound:\n\t\terr = NotFoundError{baseErr}\n\tcase http.StatusInternalServerError:\n\t\terr = InternalServerError{baseErr}\n\tcase http.StatusServiceUnavailable:\n\t\terr = ServiceUnavailableError{baseErr}\n\tdefault:\n\t\tif 200 <= res.StatusCode && res.StatusCode <= 299 {\n\t\t\tif responseObject != nil {\n\t\t\t\tjsonErr := json.Unmarshal(body, responseObject)\n\t\t\t\tif jsonErr != nil {\n\t\t\t\t\treturn body, jsonErr\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\terr = UnknownStatusCodeError{baseErr}\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ BuildURL pieces together a URL from parts, escaping as necessary..\nfunc (c *bytemarkClient) BuildURL(endpoint Endpoint, format string, args ...string) (*url.URL, error) {\n\tarr := make([]interface{}, len(args))\n\tfor i, str := range args {\n\t\tarr[i] = url.QueryEscape(str)\n\t}\n\tendpointURL := \"\"\n\tswitch endpoint {\n\tcase APIEndpoint:\n\t\tendpointURL = c.urls.API\n\tcase BrainEndpoint:\n\t\tendpointURL = c.urls.Brain\n\tcase BillingEndpoint:\n\t\tendpointURL = c.urls.Billing\n\tcase SPPEndpoint:\n\t\tendpointURL = c.urls.SPP\n\tdefault:\n\t\treturn nil, UnsupportedEndpointError(endpoint)\n\t}\n\tif !strings.HasPrefix(format, \"\/\") {\n\t\treturn nil, UnsupportedEndpointError(-1)\n\t}\n\treturn url.Parse(endpointURL + fmt.Sprintf(format, arr...))\n}\n<commit_msg>changed a file on gitlab, pull that in<commit_after>package lib\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/BytemarkHosting\/bytemark-client\/util\/log\"\n)\n\n\/\/ Request is the lower-level type that makes HTTP calls\ntype Request interface {\n\t\/\/ GetURL gets the URL this request will be \/ has been made to\n\tGetURL() url.URL\n\t\/\/ AllowInsecure allows the Request to be made over http instead of https\n\tAllowInsecure()\n\t\/\/ JSON-marshals the input and runs the request, unmarshalling the response to out if it's not nil\n\tMarshalAndRun(in interface{}, out interface{}) (statusCode int, responseBody []byte, err error)\n\t\/\/ Runs the request, reading the request body to reader and unmarshalling the response to responseObject (if not nil)\n\tRun(body io.Reader, responseObject interface{}) (statusCode int, responseBody []byte, err error)\n}\n\n\/\/ TokenType returns what kind of token is used for the given endpoint.\n\/\/ token for endpoints which expect an authorization header like \"Token token=blah\"\n\/\/ bearer for endpoints which expect one like \"Bearer blah\"\nfunc TokenType(ep Endpoint) string {\n\tswitch ep {\n\tcase BillingEndpoint:\n\t\treturn \"token\"\n\t}\n\treturn \"bearer\"\n}\n\n\/\/ RequestAlreadyRunError is returned if the Run method was already called for this Request.\ntype RequestAlreadyRunError struct {\n\tRequest Request\n}\n\n\/\/ InsecureConnectionError is returned if the endpoint isn't https but AllowInsecure was not called.\ntype InsecureConnectionError struct {\n\tRequest Request\n}\n\nfunc (e RequestAlreadyRunError) Error() string {\n\treturn \"A Request was Run twice\"\n}\n\nfunc (e InsecureConnectionError) Error() string {\n\treturn \"A Request to an insecure endpoint was attempted when AllowInsecureRequests had not been called.\"\n}\n\n\/\/ internalRequest is the workhorse of the bytemark-client\/lib - it builds up a request, then Run can be called to get its results.\ntype internalRequest struct {\n\tauthenticate bool\n\tclient Client\n\tendpoint Endpoint\n\turl *url.URL\n\tmethod string\n\tbody []byte\n\tallowInsecure bool\n\thasRun bool\n}\n\n\/\/ GetURL returns the URL that the Request is for.\nfunc (r *internalRequest) GetURL() url.URL {\n\tif r.url == nil {\n\t\treturn url.URL{}\n\t}\n\treturn *r.url\n}\n\n\/\/ BuildRequestNoAuth creates a new Request with the intention of not authenticating.\nfunc (c *bytemarkClient) BuildRequestNoAuth(method string, endpoint Endpoint, path string, parts ...string) (r Request, err error) {\n\turl, err := c.BuildURL(endpoint, path, parts...)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn &internalRequest{\n\t\tclient: c,\n\t\tendpoint: endpoint,\n\t\turl: url,\n\t\tmethod: method,\n\t\tallowInsecure: c.allowInsecure,\n\t}, nil\n}\n\n\/\/ BuildRequest builds a request that will be authenticated by the endpoint given.\nfunc (c *bytemarkClient) BuildRequest(method string, endpoint Endpoint, path string, parts ...string) (r Request, err error) {\n\turl, err := c.BuildURL(endpoint, path, parts...)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn &internalRequest{\n\t\tauthenticate: true,\n\t\tclient: c,\n\t\tendpoint: endpoint,\n\t\turl: url,\n\t\tmethod: method,\n\t\tallowInsecure: c.allowInsecure,\n\t}, nil\n}\n\n\/\/ AllowInsecure tells the Request that it's ok if the endpoint isn't communicated with over HTTPS.\nfunc (r *internalRequest) AllowInsecure() {\n\tr.allowInsecure = true\n}\n\n\/\/ mkHTTPClient creates an http.Client for this request. If the staging endpoint is used, InsecureSkipVerify is used because I guess we don't have a good cert for that brain.\nfunc (r *internalRequest) mkHTTPClient() (c *http.Client) {\n\tc = new(http.Client)\n\tif r.url.Host == \"staging.bigv.io\" {\n\t\tc.Transport = &http.Transport{\n\t\t\t\/\/ disable gas lint for this line (gas looks for insecure TLS settings, among other things)\n\t\t\t\/* #nosec *\/\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tInsecureSkipVerify: true,\n\t\t\t},\n\t\t}\n\t}\n\treturn c\n}\n\n\/\/ mkHTTPRequest assembles an http.Request for this Request, adding Authorization headers as needed, setting the Content-Type correctly for whichever endpoint it's talking to.\nfunc (r *internalRequest) mkHTTPRequest(body io.Reader) (req *http.Request, err error) {\n\treq, err = http.NewRequest(r.method, r.url.String(), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Close = true\n\treq.Header.Add(\"User-Agent\", \"bytemark-client-\"+Version)\n\n\tif r.endpoint == SPPEndpoint {\n\t\treq.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\t} else {\n\t\treq.Header.Add(\"Accept\", \"application\/json\")\n\t\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\t}\n\tif r.authenticate {\n\t\tif r.client.GetSessionToken() == \"\" {\n\t\t\treturn nil, NilAuthError{}\n\t\t}\n\t\t\/\/ if we could settle on a single standard\n\t\t\/\/ rather than two basically-identical ones that'd be cool\n\t\tswitch TokenType(r.endpoint) {\n\t\tcase \"token\":\n\t\t\treq.Header.Add(\"Authorization\", \"Token token=\"+r.client.GetSessionToken())\n\t\tcase \"bearer\":\n\t\t\treq.Header.Add(\"Authorization\", \"Bearer \"+r.client.GetSessionToken())\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ MarshalAndRun marshals the 'in' object and passes that and 'out' to Run as the body and responseObject, respectively.\nfunc (r *internalRequest) MarshalAndRun(in interface{}, out interface{}) (statusCode int, responseBody []byte, err error) {\n\tvar b bytes.Buffer\n\terr = json.NewEncoder(&b).Encode(in)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn r.Run(&b, out)\n}\n\n\/\/ Run performs the request with the given body, and attempts to unmarshal a successful response into responseObject\nfunc (r *internalRequest) Run(body io.Reader, responseObject interface{}) (statusCode int, responseBody []byte, err error) {\n\tif r.hasRun {\n\t\terr = RequestAlreadyRunError{r}\n\t\treturn\n\t}\n\tr.hasRun = true\n\n\tif !r.allowInsecure && r.url.Scheme == \"http\" {\n\t\terr = InsecureConnectionError{r}\n\t\treturn\n\t}\n\tvar rb []byte\n\tif body != nil {\n\n\t\trb, err = ioutil.ReadAll(body)\n\t\tif err != nil {\n\t\t\treturn 0, nil, err\n\t\t}\n\t\tlog.Debugf(log.LvlHTTPData, \"request body: '%s'\\r\\n\", string(rb))\n\t}\n\n\tcli := r.mkHTTPClient()\n\n\t\/\/ This isnt actually returning errors if not authenticated. (when Auth or preflight has not been run)\n\t\/\/ TODO: dig deep and find out why it does not return this error (NilAuthError)\n\treq, err := r.mkHTTPRequest(bytes.NewBuffer(rb))\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(rb) > 0 {\n\t\treq.Header.Add(\"Content-Length\", fmt.Sprintf(\"%d\", len(rb)))\n\t}\n\tres, err := cli.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tstatusCode = res.StatusCode\n\n\tlog.Debugf(log.LvlOutline, \"%s %s: %d\\r\\n\", r.method, req.URL, res.StatusCode)\n\n\tresponseBody, err = r.handleResponse(req, rb, res, responseObject)\n\treturn\n}\n\n\/\/ handleResponse deals with the response coming back from the request - creating an error if required, unmarshalling responseObject if necessary\nfunc (r *internalRequest) handleResponse(req *http.Request, requestBody []byte, res *http.Response, responseObject interface{}) (body []byte, err error) {\n\tbody, err = ioutil.ReadAll(res.Body)\n\tlog.Debugf(log.LvlHTTPData, \"response body: '%s'\\r\\n\", body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tbaseErr := APIError{\n\t\tMethod: r.method,\n\t\tURL: req.URL,\n\t\tStatusCode: res.StatusCode,\n\t\tRequestBody: string(requestBody),\n\t}\n\n\tbaseErr.ResponseBody = string(body)\n\n\tswitch res.StatusCode {\n\tcase http.StatusBadRequest:\n\t\t\/\/ because we need to reference fields specific to BadRequestError later\n\t\terr = newBadRequestError(baseErr, body)\n\tcase http.StatusUnauthorized:\n\t\terr = UnauthorizedError{baseErr}\n\tcase http.StatusForbidden:\n\t\terr = ForbiddenError{baseErr}\n\tcase http.StatusNotFound:\n\t\terr = NotFoundError{baseErr}\n\tcase http.StatusInternalServerError:\n\t\terr = InternalServerError{baseErr}\n\tcase http.StatusServiceUnavailable:\n\t\terr = ServiceUnavailableError{baseErr}\n\tdefault:\n\t\tif 200 <= res.StatusCode && res.StatusCode <= 299 {\n\t\t\tif responseObject != nil {\n\t\t\t\tjsonErr := json.Unmarshal(body, responseObject)\n\t\t\t\tif jsonErr != nil {\n\t\t\t\t\treturn body, jsonErr\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\terr = UnknownStatusCodeError{baseErr}\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ BuildURL pieces together a URL from parts, escaping as necessary..\nfunc (c *bytemarkClient) BuildURL(endpoint Endpoint, format string, args ...string) (*url.URL, error) {\n\tarr := make([]interface{}, len(args))\n\tfor i, str := range args {\n\t\tarr[i] = url.QueryEscape(str)\n\t}\n\tendpointURL := \"\"\n\tswitch endpoint {\n\tcase APIEndpoint:\n\t\tendpointURL = c.urls.API\n\tcase BrainEndpoint:\n\t\tendpointURL = c.urls.Brain\n\tcase BillingEndpoint:\n\t\tendpointURL = c.urls.Billing\n\tcase SPPEndpoint:\n\t\tendpointURL = c.urls.SPP\n\tdefault:\n\t\treturn nil, UnsupportedEndpointError(endpoint)\n\t}\n\tif !strings.HasPrefix(format, \"\/\") {\n\t\treturn nil, UnsupportedEndpointError(-1)\n\t}\n\treturn url.Parse(endpointURL + fmt.Sprintf(format, arr...))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage gob\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"os\"\n\t\"runtime\"\n\t\"testing\"\n)\n\ntype Bench struct {\n\tA int\n\tB float64\n\tC string\n\tD []byte\n}\n\nfunc benchmarkEndToEnd(b *testing.B, v interface{}, pipe func() (r io.Reader, w io.Writer, err error)) {\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tr, w, err := pipe()\n\t\tif err != nil {\n\t\t\tb.Fatal(\"can't get pipe:\", err)\n\t\t}\n\t\tenc := NewEncoder(w)\n\t\tdec := NewDecoder(r)\n\t\tfor pb.Next() {\n\t\t\tif err := enc.Encode(v); err != nil {\n\t\t\t\tb.Fatal(\"encode error:\", err)\n\t\t\t}\n\t\t\tif err := dec.Decode(v); err != nil {\n\t\t\t\tb.Fatal(\"decode error:\", err)\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc BenchmarkEndToEndPipe(b *testing.B) {\n\tv := &Bench{7, 3.2, \"now is the time\", bytes.Repeat([]byte(\"for all good men\"), 100)}\n\tbenchmarkEndToEnd(b, v, func() (r io.Reader, w io.Writer, err error) {\n\t\tr, w, err = os.Pipe()\n\t\treturn\n\t})\n}\n\nfunc BenchmarkEndToEndByteBuffer(b *testing.B) {\n\tv := &Bench{7, 3.2, \"now is the time\", bytes.Repeat([]byte(\"for all good men\"), 100)}\n\tbenchmarkEndToEnd(b, v, func() (r io.Reader, w io.Writer, err error) {\n\t\tvar buf bytes.Buffer\n\t\treturn &buf, &buf, nil\n\t})\n}\n\nfunc BenchmarkEndToEndSliceByteBuffer(b *testing.B) {\n\tv := &Bench{7, 3.2, \"now is the time\", nil}\n\tRegister(v)\n\tarr := make([]interface{}, 100)\n\tfor i := range arr {\n\t\tarr[i] = v\n\t}\n\tbenchmarkEndToEnd(b, &arr, func() (r io.Reader, w io.Writer, err error) {\n\t\tvar buf bytes.Buffer\n\t\treturn &buf, &buf, nil\n\t})\n}\n\nfunc TestCountEncodeMallocs(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping malloc count in short mode\")\n\t}\n\tif runtime.GOMAXPROCS(0) > 1 {\n\t\tt.Skip(\"skipping; GOMAXPROCS>1\")\n\t}\n\n\tconst N = 1000\n\n\tvar buf bytes.Buffer\n\tenc := NewEncoder(&buf)\n\tbench := &Bench{7, 3.2, \"now is the time\", []byte(\"for all good men\")}\n\n\tallocs := testing.AllocsPerRun(N, func() {\n\t\terr := enc.Encode(bench)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"encode:\", err)\n\t\t}\n\t})\n\tif allocs != 0 {\n\t\tt.Fatalf(\"mallocs per encode of type Bench: %v; wanted 0\\n\", allocs)\n\t}\n}\n\nfunc TestCountDecodeMallocs(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping malloc count in short mode\")\n\t}\n\tif runtime.GOMAXPROCS(0) > 1 {\n\t\tt.Skip(\"skipping; GOMAXPROCS>1\")\n\t}\n\n\tconst N = 1000\n\n\tvar buf bytes.Buffer\n\tenc := NewEncoder(&buf)\n\tbench := &Bench{7, 3.2, \"now is the time\", []byte(\"for all good men\")}\n\n\t\/\/ Fill the buffer with enough to decode\n\ttesting.AllocsPerRun(N, func() {\n\t\terr := enc.Encode(bench)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"encode:\", err)\n\t\t}\n\t})\n\n\tdec := NewDecoder(&buf)\n\tallocs := testing.AllocsPerRun(N, func() {\n\t\t*bench = Bench{}\n\t\terr := dec.Decode(&bench)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"decode:\", err)\n\t\t}\n\t})\n\tif allocs != 4 {\n\t\tt.Fatalf(\"mallocs per decode of type Bench: %v; wanted 4\\n\", allocs)\n\t}\n}\n<commit_msg>encoding\/gob: fix data races in benchmarks All goroutines decode into the same value.<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage gob\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"os\"\n\t\"runtime\"\n\t\"testing\"\n)\n\ntype Bench struct {\n\tA int\n\tB float64\n\tC string\n\tD []byte\n}\n\nfunc benchmarkEndToEnd(b *testing.B, ctor func() interface{}, pipe func() (r io.Reader, w io.Writer, err error)) {\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tr, w, err := pipe()\n\t\tif err != nil {\n\t\t\tb.Fatal(\"can't get pipe:\", err)\n\t\t}\n\t\tv := ctor()\n\t\tenc := NewEncoder(w)\n\t\tdec := NewDecoder(r)\n\t\tfor pb.Next() {\n\t\t\tif err := enc.Encode(v); err != nil {\n\t\t\t\tb.Fatal(\"encode error:\", err)\n\t\t\t}\n\t\t\tif err := dec.Decode(v); err != nil {\n\t\t\t\tb.Fatal(\"decode error:\", err)\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc BenchmarkEndToEndPipe(b *testing.B) {\n\tbenchmarkEndToEnd(b, func() interface{} {\n\t\treturn &Bench{7, 3.2, \"now is the time\", bytes.Repeat([]byte(\"for all good men\"), 100)}\n\t}, func() (r io.Reader, w io.Writer, err error) {\n\t\tr, w, err = os.Pipe()\n\t\treturn\n\t})\n}\n\nfunc BenchmarkEndToEndByteBuffer(b *testing.B) {\n\tbenchmarkEndToEnd(b, func() interface{} {\n\t\treturn &Bench{7, 3.2, \"now is the time\", bytes.Repeat([]byte(\"for all good men\"), 100)}\n\t}, func() (r io.Reader, w io.Writer, err error) {\n\t\tvar buf bytes.Buffer\n\t\treturn &buf, &buf, nil\n\t})\n}\n\nfunc BenchmarkEndToEndSliceByteBuffer(b *testing.B) {\n\tbenchmarkEndToEnd(b, func() interface{} {\n\t\tv := &Bench{7, 3.2, \"now is the time\", nil}\n\t\tRegister(v)\n\t\tarr := make([]interface{}, 100)\n\t\tfor i := range arr {\n\t\t\tarr[i] = v\n\t\t}\n\t\treturn &arr\n\t}, func() (r io.Reader, w io.Writer, err error) {\n\t\tvar buf bytes.Buffer\n\t\treturn &buf, &buf, nil\n\t})\n}\n\nfunc TestCountEncodeMallocs(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping malloc count in short mode\")\n\t}\n\tif runtime.GOMAXPROCS(0) > 1 {\n\t\tt.Skip(\"skipping; GOMAXPROCS>1\")\n\t}\n\n\tconst N = 1000\n\n\tvar buf bytes.Buffer\n\tenc := NewEncoder(&buf)\n\tbench := &Bench{7, 3.2, \"now is the time\", []byte(\"for all good men\")}\n\n\tallocs := testing.AllocsPerRun(N, func() {\n\t\terr := enc.Encode(bench)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"encode:\", err)\n\t\t}\n\t})\n\tif allocs != 0 {\n\t\tt.Fatalf(\"mallocs per encode of type Bench: %v; wanted 0\\n\", allocs)\n\t}\n}\n\nfunc TestCountDecodeMallocs(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping malloc count in short mode\")\n\t}\n\tif runtime.GOMAXPROCS(0) > 1 {\n\t\tt.Skip(\"skipping; GOMAXPROCS>1\")\n\t}\n\n\tconst N = 1000\n\n\tvar buf bytes.Buffer\n\tenc := NewEncoder(&buf)\n\tbench := &Bench{7, 3.2, \"now is the time\", []byte(\"for all good men\")}\n\n\t\/\/ Fill the buffer with enough to decode\n\ttesting.AllocsPerRun(N, func() {\n\t\terr := enc.Encode(bench)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"encode:\", err)\n\t\t}\n\t})\n\n\tdec := NewDecoder(&buf)\n\tallocs := testing.AllocsPerRun(N, func() {\n\t\t*bench = Bench{}\n\t\terr := dec.Decode(&bench)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"decode:\", err)\n\t\t}\n\t})\n\tif allocs != 4 {\n\t\tt.Fatalf(\"mallocs per decode of type Bench: %v; wanted 4\\n\", allocs)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage types\n\nimport (\n\t\"go\/ast\"\n\t\"go\/build\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar gcPath string \/\/ Go compiler path\n\nfunc init() {\n\t\/\/ determine compiler\n\tvar gc string\n\tswitch runtime.GOARCH {\n\tcase \"386\":\n\t\tgc = \"8g\"\n\tcase \"amd64\":\n\t\tgc = \"6g\"\n\tcase \"arm\":\n\t\tgc = \"5g\"\n\tdefault:\n\t\tgcPath = \"unknown-GOARCH-compiler\"\n\t\treturn\n\t}\n\tgcPath = filepath.Join(build.ToolDir, gc)\n}\n\nfunc compile(t *testing.T, dirname, filename string) string {\n\tcmd := exec.Command(gcPath, filename)\n\tcmd.Dir = dirname\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Logf(\"%s\", out)\n\t\tt.Fatalf(\"%s %s failed: %s\", gcPath, filename, err)\n\t}\n\tarchCh, _ := build.ArchChar(runtime.GOARCH)\n\t\/\/ filename should end with \".go\"\n\treturn filepath.Join(dirname, filename[:len(filename)-2]+archCh)\n}\n\n\/\/ Use the same global imports map for all tests. The effect is\n\/\/ as if all tested packages were imported into a single package.\nvar imports = make(map[string]*ast.Object)\n\nfunc testPath(t *testing.T, path string) bool {\n\t_, err := GcImport(imports, path)\n\tif err != nil {\n\t\tt.Errorf(\"testPath(%s): %s\", path, err)\n\t\treturn false\n\t}\n\treturn true\n}\n\nconst maxTime = 3 * time.Second\n\nfunc testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {\n\tdirname := filepath.Join(runtime.GOROOT(), \"pkg\", runtime.GOOS+\"_\"+runtime.GOARCH, dir)\n\tlist, err := ioutil.ReadDir(dirname)\n\tif err != nil {\n\t\tt.Errorf(\"testDir(%s): %s\", dirname, err)\n\t}\n\tfor _, f := range list {\n\t\tif time.Now().After(endTime) {\n\t\t\tt.Log(\"testing time used up\")\n\t\t\treturn\n\t\t}\n\t\tswitch {\n\t\tcase !f.IsDir():\n\t\t\t\/\/ try extensions\n\t\t\tfor _, ext := range pkgExts {\n\t\t\t\tif strings.HasSuffix(f.Name(), ext) {\n\t\t\t\t\tname := f.Name()[0 : len(f.Name())-len(ext)] \/\/ remove extension\n\t\t\t\t\tif testPath(t, filepath.Join(dir, name)) {\n\t\t\t\t\t\tnimports++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase f.IsDir():\n\t\t\tnimports += testDir(t, filepath.Join(dir, f.Name()), endTime)\n\t\t}\n\t}\n\treturn\n}\n\nfunc TestGcImport(t *testing.T) {\n\t\/\/ On cross-compile builds, the path will not exist.\n\t\/\/ Need to use GOHOSTOS, which is not available.\n\tif _, err := os.Stat(gcPath); err != nil {\n\t\tt.Logf(\"skipping test: %v\", err)\n\t\treturn\n\t}\n\n\tif outFn := compile(t, \"testdata\", \"exports.go\"); outFn != \"\" {\n\t\tdefer os.Remove(outFn)\n\t}\n\n\tnimports := 0\n\tif testPath(t, \".\/testdata\/exports\") {\n\t\tnimports++\n\t}\n\tnimports += testDir(t, \"\", time.Now().Add(maxTime)) \/\/ installed packages\n\tt.Logf(\"tested %d imports\", nimports)\n}\n\nvar importedObjectTests = []struct {\n\tname string\n\tkind ast.ObjKind\n\ttyp string\n}{\n\t{\"unsafe.Pointer\", ast.Typ, \"Pointer\"},\n\t{\"math.Pi\", ast.Con, \"untyped float\"},\n\t{\"io.Reader\", ast.Typ, \"interface{Read(p []byte) (n int, err error)}\"},\n\t{\"io.ReadWriter\", ast.Typ, \"interface{Read(p []byte) (n int, err error); Write(p []byte) (n int, err error)}\"},\n\t{\"math.Sin\", ast.Fun, \"func(x·2 float64) (_ float64)\"},\n\t\/\/ TODO(gri) add more tests\n}\n\nfunc TestGcImportedTypes(t *testing.T) {\n\t\/\/ This package does not yet know how to read gccgo export data.\n\tif runtime.Compiler == \"gccgo\" {\n\t\treturn\n\t}\n\tfor _, test := range importedObjectTests {\n\t\ts := strings.Split(test.name, \".\")\n\t\tif len(s) != 2 {\n\t\t\tt.Fatal(\"inconsistent test data\")\n\t\t}\n\t\timportPath := s[0]\n\t\tobjName := s[1]\n\n\t\tpkg, err := GcImport(imports, importPath)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tobj := pkg.Data.(*ast.Scope).Lookup(objName)\n\t\tif obj.Kind != test.kind {\n\t\t\tt.Errorf(\"%s: got kind = %q; want %q\", test.name, obj.Kind, test.kind)\n\t\t}\n\t\ttyp := typeString(underlying(obj.Type.(Type)))\n\t\tif typ != test.typ {\n\t\t\tt.Errorf(\"%s: got type = %q; want %q\", test.name, typ, test.typ)\n\t\t}\n\t}\n}\n<commit_msg>go\/types: add more logging to gc import test<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage types\n\nimport (\n\t\"go\/ast\"\n\t\"go\/build\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar gcPath string \/\/ Go compiler path\n\nfunc init() {\n\t\/\/ determine compiler\n\tvar gc string\n\tswitch runtime.GOARCH {\n\tcase \"386\":\n\t\tgc = \"8g\"\n\tcase \"amd64\":\n\t\tgc = \"6g\"\n\tcase \"arm\":\n\t\tgc = \"5g\"\n\tdefault:\n\t\tgcPath = \"unknown-GOARCH-compiler\"\n\t\treturn\n\t}\n\tgcPath = filepath.Join(build.ToolDir, gc)\n}\n\nfunc compile(t *testing.T, dirname, filename string) string {\n\tcmd := exec.Command(gcPath, filename)\n\tcmd.Dir = dirname\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Logf(\"%s\", out)\n\t\tt.Fatalf(\"%s %s failed: %s\", gcPath, filename, err)\n\t}\n\tarchCh, _ := build.ArchChar(runtime.GOARCH)\n\t\/\/ filename should end with \".go\"\n\treturn filepath.Join(dirname, filename[:len(filename)-2]+archCh)\n}\n\n\/\/ Use the same global imports map for all tests. The effect is\n\/\/ as if all tested packages were imported into a single package.\nvar imports = make(map[string]*ast.Object)\n\nfunc testPath(t *testing.T, path string) bool {\n\tt0 := time.Now()\n\t_, err := GcImport(imports, path)\n\tif err != nil {\n\t\tt.Errorf(\"testPath(%s): %s\", path, err)\n\t\treturn false\n\t}\n\tt.Logf(\"testPath(%s): %v\", path, time.Since(t0))\n\treturn true\n}\n\nconst maxTime = 30 * time.Second\n\nfunc testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {\n\tdirname := filepath.Join(runtime.GOROOT(), \"pkg\", runtime.GOOS+\"_\"+runtime.GOARCH, dir)\n\tlist, err := ioutil.ReadDir(dirname)\n\tif err != nil {\n\t\tt.Fatalf(\"testDir(%s): %s\", dirname, err)\n\t}\n\tfor _, f := range list {\n\t\tif time.Now().After(endTime) {\n\t\t\tt.Log(\"testing time used up\")\n\t\t\treturn\n\t\t}\n\t\tswitch {\n\t\tcase !f.IsDir():\n\t\t\t\/\/ try extensions\n\t\t\tfor _, ext := range pkgExts {\n\t\t\t\tif strings.HasSuffix(f.Name(), ext) {\n\t\t\t\t\tname := f.Name()[0 : len(f.Name())-len(ext)] \/\/ remove extension\n\t\t\t\t\tif testPath(t, filepath.Join(dir, name)) {\n\t\t\t\t\t\tnimports++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase f.IsDir():\n\t\t\tnimports += testDir(t, filepath.Join(dir, f.Name()), endTime)\n\t\t}\n\t}\n\treturn\n}\n\nfunc TestGcImport(t *testing.T) {\n\t\/\/ On cross-compile builds, the path will not exist.\n\t\/\/ Need to use GOHOSTOS, which is not available.\n\tif _, err := os.Stat(gcPath); err != nil {\n\t\tt.Logf(\"skipping test: %v\", err)\n\t\treturn\n\t}\n\n\tif outFn := compile(t, \"testdata\", \"exports.go\"); outFn != \"\" {\n\t\tdefer os.Remove(outFn)\n\t}\n\n\tnimports := 0\n\tif testPath(t, \".\/testdata\/exports\") {\n\t\tnimports++\n\t}\n\tnimports += testDir(t, \"\", time.Now().Add(maxTime)) \/\/ installed packages\n\tt.Logf(\"tested %d imports\", nimports)\n}\n\nvar importedObjectTests = []struct {\n\tname string\n\tkind ast.ObjKind\n\ttyp string\n}{\n\t{\"unsafe.Pointer\", ast.Typ, \"Pointer\"},\n\t{\"math.Pi\", ast.Con, \"untyped float\"},\n\t{\"io.Reader\", ast.Typ, \"interface{Read(p []byte) (n int, err error)}\"},\n\t{\"io.ReadWriter\", ast.Typ, \"interface{Read(p []byte) (n int, err error); Write(p []byte) (n int, err error)}\"},\n\t{\"math.Sin\", ast.Fun, \"func(x·2 float64) (_ float64)\"},\n\t\/\/ TODO(gri) add more tests\n}\n\nfunc TestGcImportedTypes(t *testing.T) {\n\t\/\/ This package does not yet know how to read gccgo export data.\n\tif runtime.Compiler == \"gccgo\" {\n\t\treturn\n\t}\n\tfor _, test := range importedObjectTests {\n\t\ts := strings.Split(test.name, \".\")\n\t\tif len(s) != 2 {\n\t\t\tt.Fatal(\"inconsistent test data\")\n\t\t}\n\t\timportPath := s[0]\n\t\tobjName := s[1]\n\n\t\tpkg, err := GcImport(imports, importPath)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tobj := pkg.Data.(*ast.Scope).Lookup(objName)\n\t\tif obj.Kind != test.kind {\n\t\t\tt.Errorf(\"%s: got kind = %q; want %q\", test.name, obj.Kind, test.kind)\n\t\t}\n\t\ttyp := typeString(underlying(obj.Type.(Type)))\n\t\tif typ != test.typ {\n\t\t\tt.Errorf(\"%s: got type = %q; want %q\", test.name, typ, test.typ)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage types\n\nimport (\n\t\"go\/ast\"\n\t\"go\/build\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar gcPath string \/\/ Go compiler path\n\nfunc init() {\n\t\/\/ determine compiler\n\tvar gc string\n\tswitch runtime.GOARCH {\n\tcase \"386\":\n\t\tgc = \"8g\"\n\tcase \"amd64\":\n\t\tgc = \"6g\"\n\tcase \"arm\":\n\t\tgc = \"5g\"\n\tdefault:\n\t\tgcPath = \"unknown-GOARCH-compiler\"\n\t\treturn\n\t}\n\tgcPath = filepath.Join(build.ToolDir, gc)\n}\n\nfunc compile(t *testing.T, dirname, filename string) string {\n\tcmd := exec.Command(gcPath, filename)\n\tcmd.Dir = dirname\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Logf(\"%s\", out)\n\t\tt.Fatalf(\"%s %s failed: %s\", gcPath, filename, err)\n\t}\n\tarchCh, _ := build.ArchChar(runtime.GOARCH)\n\t\/\/ filename should end with \".go\"\n\treturn filepath.Join(dirname, filename[:len(filename)-2]+archCh)\n}\n\n\/\/ Use the same global imports map for all tests. The effect is\n\/\/ as if all tested packages were imported into a single package.\nvar imports = make(map[string]*ast.Object)\n\nfunc testPath(t *testing.T, path string) bool {\n\t_, err := GcImport(imports, path)\n\tif err != nil {\n\t\tt.Errorf(\"testPath(%s): %s\", path, err)\n\t\treturn false\n\t}\n\treturn true\n}\n\nconst maxTime = 3 * time.Second\n\nfunc testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {\n\tdirname := filepath.Join(runtime.GOROOT(), \"pkg\", runtime.GOOS+\"_\"+runtime.GOARCH, dir)\n\tlist, err := ioutil.ReadDir(dirname)\n\tif err != nil {\n\t\tt.Errorf(\"testDir(%s): %s\", dirname, err)\n\t}\n\tfor _, f := range list {\n\t\tif time.Now().After(endTime) {\n\t\t\tt.Log(\"testing time used up\")\n\t\t\treturn\n\t\t}\n\t\tswitch {\n\t\tcase !f.IsDir():\n\t\t\t\/\/ try extensions\n\t\t\tfor _, ext := range pkgExts {\n\t\t\t\tif strings.HasSuffix(f.Name(), ext) {\n\t\t\t\t\tname := f.Name()[0 : len(f.Name())-len(ext)] \/\/ remove extension\n\t\t\t\t\tif testPath(t, filepath.Join(dir, name)) {\n\t\t\t\t\t\tnimports++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase f.IsDir():\n\t\t\tnimports += testDir(t, filepath.Join(dir, f.Name()), endTime)\n\t\t}\n\t}\n\treturn\n}\n\nfunc TestGcImport(t *testing.T) {\n\t\/\/ On cross-compile builds, the path will not exist.\n\t\/\/ Need to use GOHOSTOS, which is not available.\n\tif _, err := os.Stat(gcPath); err != nil {\n\t\tt.Logf(\"skipping test: %v\", err)\n\t\treturn\n\t}\n\n\tif outFn := compile(t, \"testdata\", \"exports.go\"); outFn != \"\" {\n\t\tdefer os.Remove(outFn)\n\t}\n\n\tnimports := 0\n\tif testPath(t, \".\/testdata\/exports\") {\n\t\tnimports++\n\t}\n\tnimports += testDir(t, \"\", time.Now().Add(maxTime)) \/\/ installed packages\n\tt.Logf(\"tested %d imports\", nimports)\n}\n\nvar importedObjectTests = []struct {\n\tname string\n\tkind ast.ObjKind\n\ttyp string\n}{\n\t{\"unsafe.Pointer\", ast.Typ, \"Pointer\"},\n\t{\"math.Pi\", ast.Con, \"untyped float\"},\n\t{\"io.Reader\", ast.Typ, \"interface{Read(p []byte) (n int, err error)}\"},\n\t{\"io.ReadWriter\", ast.Typ, \"interface{Read(p []byte) (n int, err error); Write(p []byte) (n int, err error)}\"},\n\t{\"math.Sin\", ast.Fun, \"func(x·2 float64) (_ float64)\"},\n\t\/\/ TODO(gri) add more tests\n}\n\nfunc TestGcImportedTypes(t *testing.T) {\n\t\/\/ This package does not yet know how to read gccgo export data.\n\tif runtime.Compiler == \"gccgo\" {\n\t\treturn\n\t}\n\tfor _, test := range importedObjectTests {\n\t\ts := strings.Split(test.name, \".\")\n\t\tif len(s) != 2 {\n\t\t\tt.Fatal(\"inconsistent test data\")\n\t\t}\n\t\timportPath := s[0]\n\t\tobjName := s[1]\n\n\t\tpkg, err := GcImport(imports, importPath)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tobj := pkg.Data.(*ast.Scope).Lookup(objName)\n\t\tif obj.Kind != test.kind {\n\t\t\tt.Errorf(\"%s: got kind = %q; want %q\", test.name, obj.Kind, test.kind)\n\t\t}\n\t\ttyp := typeString(underlying(obj.Type.(Type)))\n\t\tif typ != test.typ {\n\t\t\tt.Errorf(\"%s: got type = %q; want %q\", test.name, typ, test.typ)\n\t\t}\n\t}\n}\n<commit_msg>go\/types: add more logging to gc import test<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage types\n\nimport (\n\t\"go\/ast\"\n\t\"go\/build\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar gcPath string \/\/ Go compiler path\n\nfunc init() {\n\t\/\/ determine compiler\n\tvar gc string\n\tswitch runtime.GOARCH {\n\tcase \"386\":\n\t\tgc = \"8g\"\n\tcase \"amd64\":\n\t\tgc = \"6g\"\n\tcase \"arm\":\n\t\tgc = \"5g\"\n\tdefault:\n\t\tgcPath = \"unknown-GOARCH-compiler\"\n\t\treturn\n\t}\n\tgcPath = filepath.Join(build.ToolDir, gc)\n}\n\nfunc compile(t *testing.T, dirname, filename string) string {\n\tcmd := exec.Command(gcPath, filename)\n\tcmd.Dir = dirname\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Logf(\"%s\", out)\n\t\tt.Fatalf(\"%s %s failed: %s\", gcPath, filename, err)\n\t}\n\tarchCh, _ := build.ArchChar(runtime.GOARCH)\n\t\/\/ filename should end with \".go\"\n\treturn filepath.Join(dirname, filename[:len(filename)-2]+archCh)\n}\n\n\/\/ Use the same global imports map for all tests. The effect is\n\/\/ as if all tested packages were imported into a single package.\nvar imports = make(map[string]*ast.Object)\n\nfunc testPath(t *testing.T, path string) bool {\n\tt0 := time.Now()\n\t_, err := GcImport(imports, path)\n\tif err != nil {\n\t\tt.Errorf(\"testPath(%s): %s\", path, err)\n\t\treturn false\n\t}\n\tt.Logf(\"testPath(%s): %v\", path, time.Since(t0))\n\treturn true\n}\n\nconst maxTime = 30 * time.Second\n\nfunc testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {\n\tdirname := filepath.Join(runtime.GOROOT(), \"pkg\", runtime.GOOS+\"_\"+runtime.GOARCH, dir)\n\tlist, err := ioutil.ReadDir(dirname)\n\tif err != nil {\n\t\tt.Fatalf(\"testDir(%s): %s\", dirname, err)\n\t}\n\tfor _, f := range list {\n\t\tif time.Now().After(endTime) {\n\t\t\tt.Log(\"testing time used up\")\n\t\t\treturn\n\t\t}\n\t\tswitch {\n\t\tcase !f.IsDir():\n\t\t\t\/\/ try extensions\n\t\t\tfor _, ext := range pkgExts {\n\t\t\t\tif strings.HasSuffix(f.Name(), ext) {\n\t\t\t\t\tname := f.Name()[0 : len(f.Name())-len(ext)] \/\/ remove extension\n\t\t\t\t\tif testPath(t, filepath.Join(dir, name)) {\n\t\t\t\t\t\tnimports++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase f.IsDir():\n\t\t\tnimports += testDir(t, filepath.Join(dir, f.Name()), endTime)\n\t\t}\n\t}\n\treturn\n}\n\nfunc TestGcImport(t *testing.T) {\n\t\/\/ On cross-compile builds, the path will not exist.\n\t\/\/ Need to use GOHOSTOS, which is not available.\n\tif _, err := os.Stat(gcPath); err != nil {\n\t\tt.Logf(\"skipping test: %v\", err)\n\t\treturn\n\t}\n\n\tif outFn := compile(t, \"testdata\", \"exports.go\"); outFn != \"\" {\n\t\tdefer os.Remove(outFn)\n\t}\n\n\tnimports := 0\n\tif testPath(t, \".\/testdata\/exports\") {\n\t\tnimports++\n\t}\n\tnimports += testDir(t, \"\", time.Now().Add(maxTime)) \/\/ installed packages\n\tt.Logf(\"tested %d imports\", nimports)\n}\n\nvar importedObjectTests = []struct {\n\tname string\n\tkind ast.ObjKind\n\ttyp string\n}{\n\t{\"unsafe.Pointer\", ast.Typ, \"Pointer\"},\n\t{\"math.Pi\", ast.Con, \"untyped float\"},\n\t{\"io.Reader\", ast.Typ, \"interface{Read(p []byte) (n int, err error)}\"},\n\t{\"io.ReadWriter\", ast.Typ, \"interface{Read(p []byte) (n int, err error); Write(p []byte) (n int, err error)}\"},\n\t{\"math.Sin\", ast.Fun, \"func(x·2 float64) (_ float64)\"},\n\t\/\/ TODO(gri) add more tests\n}\n\nfunc TestGcImportedTypes(t *testing.T) {\n\t\/\/ This package does not yet know how to read gccgo export data.\n\tif runtime.Compiler == \"gccgo\" {\n\t\treturn\n\t}\n\tfor _, test := range importedObjectTests {\n\t\ts := strings.Split(test.name, \".\")\n\t\tif len(s) != 2 {\n\t\t\tt.Fatal(\"inconsistent test data\")\n\t\t}\n\t\timportPath := s[0]\n\t\tobjName := s[1]\n\n\t\tpkg, err := GcImport(imports, importPath)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tobj := pkg.Data.(*ast.Scope).Lookup(objName)\n\t\tif obj.Kind != test.kind {\n\t\t\tt.Errorf(\"%s: got kind = %q; want %q\", test.name, obj.Kind, test.kind)\n\t\t}\n\t\ttyp := typeString(underlying(obj.Type.(Type)))\n\t\tif typ != test.typ {\n\t\t\tt.Errorf(\"%s: got type = %q; want %q\", test.name, typ, test.typ)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>auto-update<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>add documentation on rest of the functions<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>fix potential null pointer<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>Song parsing now strips punctuation from first line when using it as title<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2013 ActiveState Software Inc. All rights reserved.\n\npackage tail\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/ActiveState\/tail\/ratelimiter\"\n\t\"github.com\/ActiveState\/tail\/util\"\n\t\"github.com\/ActiveState\/tail\/watch\"\n\t\"gopkg.in\/tomb.v1\"\n)\n\nvar (\n\tErrStop = fmt.Errorf(\"tail should now stop\")\n)\n\ntype Line struct {\n\tText string\n\tTime time.Time\n\tErr error \/\/ Error from tail\n}\n\n\/\/ NewLine returns a Line with present time.\nfunc NewLine(text string) *Line {\n\treturn &Line{text, time.Now(), nil}\n}\n\n\/\/ SeekInfo represents arguments to `os.Seek`\ntype SeekInfo struct {\n\tOffset int64\n\tWhence int \/\/ os.SEEK_*\n}\n\n\/\/ Config is used to specify how a file must be tailed.\ntype Config struct {\n\t\/\/ File-specifc\n\tLocation *SeekInfo \/\/ Seek to this location before tailing\n\tReOpen bool \/\/ Reopen recreated files (tail -F)\n\tMustExist bool \/\/ Fail early if the file does not exist\n\tPoll bool \/\/ Poll for file changes instead of using inotify\n\tRateLimiter *ratelimiter.LeakyBucket\n\n\t\/\/ Generic IO\n\tFollow bool \/\/ Continue looking for new lines (tail -f)\n\tMaxLineSize int \/\/ If non-zero, split longer lines into multiple lines\n\n\t\/\/ Logger, when nil, is set to tail.DefaultLogger\n\t\/\/ To disable logging: set field to tail.DiscardingLogger\n\tLogger *log.Logger\n}\n\ntype Tail struct {\n\tFilename string\n\tLines chan *Line\n\tConfig\n\n\tfile *os.File\n\treader *bufio.Reader\n\ttracker *watch.InotifyTracker\n\n\twatcher watch.FileWatcher\n\tchanges *watch.FileChanges\n\n\ttomb.Tomb \/\/ provides: Done, Kill, Dying\n}\n\nvar (\n\t\/\/ DefaultLogger is used when Config.Logger == nil\n\tDefaultLogger = log.New(os.Stderr, \"\", log.LstdFlags)\n\t\/\/ DiscardingLogger can be used to disable logging output\n\tDiscardingLogger = log.New(ioutil.Discard, \"\", 0)\n)\n\n\/\/ TailFile begins tailing the file. Output stream is made available\n\/\/ via the `Tail.Lines` channel. To handle errors during tailing,\n\/\/ invoke the `Wait` or `Err` method after finishing reading from the\n\/\/ `Lines` channel.\nfunc TailFile(filename string, config Config) (*Tail, error) {\n\tif config.ReOpen && !config.Follow {\n\t\tutil.Fatal(\"cannot set ReOpen without Follow.\")\n\t}\n\n\tt := &Tail{\n\t\tFilename: filename,\n\t\tLines: make(chan *Line),\n\t\tConfig: config,\n\t}\n\n\t\/\/ when Logger was not specified in config, use default logger\n\tif t.Logger == nil {\n\t\tt.Logger = log.New(os.Stderr, \"\", log.LstdFlags)\n\t}\n\n\tif t.Poll {\n\t\tt.watcher = watch.NewPollingFileWatcher(filename)\n\t} else {\n\t\tt.tracker = watch.NewInotifyTracker()\n\t\tw, err := t.tracker.NewWatcher()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tt.watcher = watch.NewInotifyFileWatcher(filename, w)\n\t}\n\n\tif t.MustExist {\n\t\tvar err error\n\t\tt.file, err = OpenFile(t.Filename)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tgo t.tailFileSync()\n\n\treturn t, nil\n}\n\n\/\/ Return the file's current position, like stdio's ftell().\n\/\/ But this value is not very accurate.\n\/\/ it may readed one line in the chan(tail.Lines),\n\/\/ so it may lost one line.\nfunc (tail *Tail) Tell() (offset int64, err error) {\n\tif tail.file == nil {\n\t\treturn\n\t}\n\toffset, err = tail.file.Seek(0, os.SEEK_CUR)\n\tif err == nil {\n\t\toffset -= int64(tail.reader.Buffered())\n\t}\n\treturn\n}\n\n\/\/ Stop stops the tailing activity.\nfunc (tail *Tail) Stop() error {\n\ttail.Kill(nil)\n\treturn tail.Wait()\n}\n\nfunc (tail *Tail) close() {\n\tclose(tail.Lines)\n\tif tail.file != nil {\n\t\ttail.file.Close()\n\t}\n}\n\nfunc (tail *Tail) reopen() error {\n\tif tail.file != nil {\n\t\ttail.file.Close()\n\t}\n\tfor {\n\t\tvar err error\n\t\ttail.file, err = OpenFile(tail.Filename)\n\t\tif err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\ttail.Logger.Printf(\"Waiting for %s to appear...\", tail.Filename)\n\t\t\t\tif err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil {\n\t\t\t\t\tif err == tomb.ErrDying {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\treturn fmt.Errorf(\"Failed to detect creation of %s: %s\", tail.Filename, err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"Unable to open file %s: %s\", tail.Filename, err)\n\t\t}\n\t\tbreak\n\t}\n\treturn nil\n}\n\nfunc (tail *Tail) readLine() (string, error) {\n\tline, err := tail.reader.ReadString('\\n')\n\tif err != nil {\n\t\t\/\/ Note ReadString \"returns the data read before the error\" in\n\t\t\/\/ case of an error, including EOF, so we return it as is. The\n\t\t\/\/ caller is expected to process it if err is EOF.\n\t\treturn line, err\n\t}\n\n\tline = strings.TrimRight(line, \"\\n\")\n\n\treturn line, err\n}\n\nfunc (tail *Tail) tailFileSync() {\n\tdefer tail.Done()\n\tdefer tail.close()\n\n\tif !tail.MustExist {\n\t\t\/\/ deferred first open.\n\t\terr := tail.reopen()\n\t\tif err != nil {\n\t\t\tif err != tomb.ErrDying {\n\t\t\t\ttail.Kill(err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Seek to requested location on first open of the file.\n\tif tail.Location != nil {\n\t\t_, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence)\n\t\ttail.Logger.Printf(\"Seeked %s - %+v\\n\", tail.Filename, tail.Location)\n\t\tif err != nil {\n\t\t\ttail.Killf(\"Seek error on %s: %s\", tail.Filename, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\ttail.openReader()\n\n\t\/\/ Read line by line.\n\tfor {\n\t\t\/\/ grab the position in case we need to back up in the event of a half-line\n\t\toffset, err := tail.Tell()\n\t\tif err != nil {\n\t\t\ttail.Kill(err)\n\t\t\treturn\n\t\t}\n\n\t\tline, err := tail.readLine()\n\n\t\t\/\/ Process `line` even if err is EOF.\n\t\tif err == nil {\n\t\t\tcooloff := !tail.sendLine(line)\n\t\t\tif cooloff {\n\t\t\t\t\/\/ Wait a second before seeking till the end of\n\t\t\t\t\/\/ file when rate limit is reached.\n\t\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\t\"Too much log activity; waiting a second \" +\n\t\t\t\t\t\t\"before resuming tailing\")\n\t\t\t\ttail.Lines <- &Line{msg, time.Now(), fmt.Errorf(msg)}\n\t\t\t\tselect {\n\t\t\t\tcase <-time.After(time.Second):\n\t\t\t\tcase <-tail.Dying():\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terr = tail.seekEnd()\n\t\t\t\tif err != nil {\n\t\t\t\t\ttail.Kill(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t} else if err == io.EOF {\n\t\t\tif !tail.Follow {\n\t\t\t\tif line != \"\" {\n\t\t\t\t\ttail.sendLine(line)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif tail.Follow && line != \"\" {\n\t\t\t\t\/\/ this has the potential to never return the last line if\n\t\t\t\t\/\/ it's not followed by a newline; seems a fair trade here\n\t\t\t\terr := tail.seekTo(SeekInfo{Offset: offset, Whence: 0})\n\t\t\t\tif err != nil {\n\t\t\t\t\ttail.Kill(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ When EOF is reached, wait for more data to become\n\t\t\t\/\/ available. Wait strategy is based on the `tail.watcher`\n\t\t\t\/\/ implementation (inotify or polling).\n\t\t\terr := tail.waitForChanges()\n\t\t\tif err != nil {\n\t\t\t\tif err != ErrStop {\n\t\t\t\t\ttail.Kill(err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ non-EOF error\n\t\t\ttail.Killf(\"Error reading %s: %s\", tail.Filename, err)\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-tail.Dying():\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t}\n}\n\n\/\/ waitForChanges waits until the file has been appended, deleted,\n\/\/ moved or truncated. When moved or deleted - the file will be\n\/\/ reopened if ReOpen is true. Truncated files are always reopened.\nfunc (tail *Tail) waitForChanges() error {\n\tif tail.changes == nil {\n\t\tst, err := tail.file.Stat()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttail.changes = tail.watcher.ChangeEvents(&tail.Tomb, st)\n\t}\n\n\tselect {\n\tcase <-tail.changes.Modified:\n\t\treturn nil\n\tcase <-tail.changes.Deleted:\n\t\ttail.changes = nil\n\t\tif tail.ReOpen {\n\t\t\t\/\/ XXX: we must not log from a library.\n\t\t\ttail.Logger.Printf(\"Re-opening moved\/deleted file %s ...\", tail.Filename)\n\t\t\tif err := tail.reopen(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttail.Logger.Printf(\"Successfully reopened %s\", tail.Filename)\n\t\t\ttail.openReader()\n\t\t\treturn nil\n\t\t} else {\n\t\t\ttail.Logger.Printf(\"Stopping tail as file no longer exists: %s\", tail.Filename)\n\t\t\treturn ErrStop\n\t\t}\n\tcase <-tail.changes.Truncated:\n\t\t\/\/ Always reopen truncated files (Follow is true)\n\t\ttail.Logger.Printf(\"Re-opening truncated file %s ...\", tail.Filename)\n\t\tif err := tail.reopen(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttail.Logger.Printf(\"Successfully reopened truncated %s\", tail.Filename)\n\t\ttail.openReader()\n\t\treturn nil\n\tcase <-tail.Dying():\n\t\treturn ErrStop\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc (tail *Tail) openReader() {\n\tif tail.MaxLineSize > 0 {\n\t\t\/\/ add 2 to account for newline characters\n\t\ttail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2)\n\t} else {\n\t\ttail.reader = bufio.NewReader(tail.file)\n\t}\n}\n\nfunc (tail *Tail) seekEnd() error {\n\treturn tail.seekTo(SeekInfo{Offset: 0, Whence: 2})\n}\n\nfunc (tail *Tail) seekTo(pos SeekInfo) error {\n\t_, err := tail.file.Seek(pos.Offset, pos.Whence)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Seek error on %s: %s\", tail.Filename, err)\n\t}\n\t\/\/ Reset the read buffer whenever the file is re-seek'ed\n\ttail.reader.Reset(tail.file)\n\treturn nil\n}\n\n\/\/ sendLine sends the line(s) to Lines channel, splitting longer lines\n\/\/ if necessary. Return false if rate limit is reached.\nfunc (tail *Tail) sendLine(line string) bool {\n\tnow := time.Now()\n\tlines := []string{line}\n\n\t\/\/ Split longer lines\n\tif tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize {\n\t\tlines = util.PartitionString(line, tail.MaxLineSize)\n\t}\n\n\tfor _, line := range lines {\n\t\ttail.Lines <- &Line{line, now, nil}\n\t}\n\n\tif tail.Config.RateLimiter != nil {\n\t\tok := tail.Config.RateLimiter.Pour(uint16(len(lines)))\n\t\tif !ok {\n\t\t\ttail.Logger.Printf(\"Leaky bucket full (%v); entering 1s cooloff period.\\n\",\n\t\t\t\ttail.Filename)\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\n\/\/ Cleanup removes inotify watches added by the tail package. This function is\n\/\/ meant to be invoked from a process's exit handler. Linux kernel may not\n\/\/ automatically remove inotify watches after the process exits.\nfunc (tail *Tail) Cleanup() {\n\tif tail.tracker != nil {\n\t\ttail.tracker.CloseAll()\n\t}\n}\n<commit_msg>Do not seek in named pipe<commit_after>\/\/ Copyright (c) 2013 ActiveState Software Inc. All rights reserved.\n\npackage tail\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/ActiveState\/tail\/ratelimiter\"\n\t\"github.com\/ActiveState\/tail\/util\"\n\t\"github.com\/ActiveState\/tail\/watch\"\n\t\"gopkg.in\/tomb.v1\"\n)\n\nvar (\n\tErrStop = fmt.Errorf(\"tail should now stop\")\n)\n\ntype Line struct {\n\tText string\n\tTime time.Time\n\tErr error \/\/ Error from tail\n}\n\n\/\/ NewLine returns a Line with present time.\nfunc NewLine(text string) *Line {\n\treturn &Line{text, time.Now(), nil}\n}\n\n\/\/ SeekInfo represents arguments to `os.Seek`\ntype SeekInfo struct {\n\tOffset int64\n\tWhence int \/\/ os.SEEK_*\n}\n\n\/\/ Config is used to specify how a file must be tailed.\ntype Config struct {\n\t\/\/ File-specifc\n\tLocation *SeekInfo \/\/ Seek to this location before tailing\n\tReOpen bool \/\/ Reopen recreated files (tail -F)\n\tMustExist bool \/\/ Fail early if the file does not exist\n\tPoll bool \/\/ Poll for file changes instead of using inotify\n\tPipe bool \/\/ Is a named pipe (mkfifo)\n\tRateLimiter *ratelimiter.LeakyBucket\n\n\t\/\/ Generic IO\n\tFollow bool \/\/ Continue looking for new lines (tail -f)\n\tMaxLineSize int \/\/ If non-zero, split longer lines into multiple lines\n\n\t\/\/ Logger, when nil, is set to tail.DefaultLogger\n\t\/\/ To disable logging: set field to tail.DiscardingLogger\n\tLogger *log.Logger\n}\n\ntype Tail struct {\n\tFilename string\n\tLines chan *Line\n\tConfig\n\n\tfile *os.File\n\treader *bufio.Reader\n\ttracker *watch.InotifyTracker\n\n\twatcher watch.FileWatcher\n\tchanges *watch.FileChanges\n\n\ttomb.Tomb \/\/ provides: Done, Kill, Dying\n}\n\nvar (\n\t\/\/ DefaultLogger is used when Config.Logger == nil\n\tDefaultLogger = log.New(os.Stderr, \"\", log.LstdFlags)\n\t\/\/ DiscardingLogger can be used to disable logging output\n\tDiscardingLogger = log.New(ioutil.Discard, \"\", 0)\n)\n\n\/\/ TailFile begins tailing the file. Output stream is made available\n\/\/ via the `Tail.Lines` channel. To handle errors during tailing,\n\/\/ invoke the `Wait` or `Err` method after finishing reading from the\n\/\/ `Lines` channel.\nfunc TailFile(filename string, config Config) (*Tail, error) {\n\tif config.ReOpen && !config.Follow {\n\t\tutil.Fatal(\"cannot set ReOpen without Follow.\")\n\t}\n\n\tt := &Tail{\n\t\tFilename: filename,\n\t\tLines: make(chan *Line),\n\t\tConfig: config,\n\t}\n\n\t\/\/ when Logger was not specified in config, use default logger\n\tif t.Logger == nil {\n\t\tt.Logger = log.New(os.Stderr, \"\", log.LstdFlags)\n\t}\n\n\tif t.Poll {\n\t\tt.watcher = watch.NewPollingFileWatcher(filename)\n\t} else {\n\t\tt.tracker = watch.NewInotifyTracker()\n\t\tw, err := t.tracker.NewWatcher()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tt.watcher = watch.NewInotifyFileWatcher(filename, w)\n\t}\n\n\tif t.MustExist {\n\t\tvar err error\n\t\tt.file, err = OpenFile(t.Filename)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tgo t.tailFileSync()\n\n\treturn t, nil\n}\n\n\/\/ Return the file's current position, like stdio's ftell().\n\/\/ But this value is not very accurate.\n\/\/ it may readed one line in the chan(tail.Lines),\n\/\/ so it may lost one line.\nfunc (tail *Tail) Tell() (offset int64, err error) {\n\tif tail.file == nil {\n\t\treturn\n\t}\n\toffset, err = tail.file.Seek(0, os.SEEK_CUR)\n\tif err == nil {\n\t\toffset -= int64(tail.reader.Buffered())\n\t}\n\treturn\n}\n\n\/\/ Stop stops the tailing activity.\nfunc (tail *Tail) Stop() error {\n\ttail.Kill(nil)\n\treturn tail.Wait()\n}\n\nfunc (tail *Tail) close() {\n\tclose(tail.Lines)\n\tif tail.file != nil {\n\t\ttail.file.Close()\n\t}\n}\n\nfunc (tail *Tail) reopen() error {\n\tif tail.file != nil {\n\t\ttail.file.Close()\n\t}\n\tfor {\n\t\tvar err error\n\t\ttail.file, err = OpenFile(tail.Filename)\n\t\tif err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\ttail.Logger.Printf(\"Waiting for %s to appear...\", tail.Filename)\n\t\t\t\tif err := tail.watcher.BlockUntilExists(&tail.Tomb); err != nil {\n\t\t\t\t\tif err == tomb.ErrDying {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\treturn fmt.Errorf(\"Failed to detect creation of %s: %s\", tail.Filename, err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"Unable to open file %s: %s\", tail.Filename, err)\n\t\t}\n\t\tbreak\n\t}\n\treturn nil\n}\n\nfunc (tail *Tail) readLine() (string, error) {\n\tline, err := tail.reader.ReadString('\\n')\n\tif err != nil {\n\t\t\/\/ Note ReadString \"returns the data read before the error\" in\n\t\t\/\/ case of an error, including EOF, so we return it as is. The\n\t\t\/\/ caller is expected to process it if err is EOF.\n\t\treturn line, err\n\t}\n\n\tline = strings.TrimRight(line, \"\\n\")\n\n\treturn line, err\n}\n\nfunc (tail *Tail) tailFileSync() {\n\tdefer tail.Done()\n\tdefer tail.close()\n\n\tif !tail.MustExist {\n\t\t\/\/ deferred first open.\n\t\terr := tail.reopen()\n\t\tif err != nil {\n\t\t\tif err != tomb.ErrDying {\n\t\t\t\ttail.Kill(err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Seek to requested location on first open of the file.\n\tif tail.Location != nil {\n\t\t_, err := tail.file.Seek(tail.Location.Offset, tail.Location.Whence)\n\t\ttail.Logger.Printf(\"Seeked %s - %+v\\n\", tail.Filename, tail.Location)\n\t\tif err != nil {\n\t\t\ttail.Killf(\"Seek error on %s: %s\", tail.Filename, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\ttail.openReader()\n\n\tvar offset int64 = 0\n\tvar err error\n\t\/\/ Read line by line.\n\tfor {\n\t\t\/\/ do not seek in named pipes\n\t\tif !tail.Pipe {\n\t\t\t\/\/ grab the position in case we need to back up in the event of a half-line\n\t\t\toffset, err = tail.Tell()\n\t\t\tif err != nil {\n\t\t\t\ttail.Kill(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tline, err := tail.readLine()\n\n\t\t\/\/ Process `line` even if err is EOF.\n\t\tif err == nil {\n\t\t\tcooloff := !tail.sendLine(line)\n\t\t\tif cooloff {\n\t\t\t\t\/\/ Wait a second before seeking till the end of\n\t\t\t\t\/\/ file when rate limit is reached.\n\t\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\t\"Too much log activity; waiting a second \" +\n\t\t\t\t\t\t\"before resuming tailing\")\n\t\t\t\ttail.Lines <- &Line{msg, time.Now(), fmt.Errorf(msg)}\n\t\t\t\tselect {\n\t\t\t\tcase <-time.After(time.Second):\n\t\t\t\tcase <-tail.Dying():\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\terr = tail.seekEnd()\n\t\t\t\tif err != nil {\n\t\t\t\t\ttail.Kill(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t} else if err == io.EOF {\n\t\t\tif !tail.Follow {\n\t\t\t\tif line != \"\" {\n\t\t\t\t\ttail.sendLine(line)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif tail.Follow && line != \"\" {\n\t\t\t\t\/\/ this has the potential to never return the last line if\n\t\t\t\t\/\/ it's not followed by a newline; seems a fair trade here\n\t\t\t\terr := tail.seekTo(SeekInfo{Offset: offset, Whence: 0})\n\t\t\t\tif err != nil {\n\t\t\t\t\ttail.Kill(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ When EOF is reached, wait for more data to become\n\t\t\t\/\/ available. Wait strategy is based on the `tail.watcher`\n\t\t\t\/\/ implementation (inotify or polling).\n\t\t\terr := tail.waitForChanges()\n\t\t\tif err != nil {\n\t\t\t\tif err != ErrStop {\n\t\t\t\t\ttail.Kill(err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ non-EOF error\n\t\t\ttail.Killf(\"Error reading %s: %s\", tail.Filename, err)\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-tail.Dying():\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t}\n}\n\n\/\/ waitForChanges waits until the file has been appended, deleted,\n\/\/ moved or truncated. When moved or deleted - the file will be\n\/\/ reopened if ReOpen is true. Truncated files are always reopened.\nfunc (tail *Tail) waitForChanges() error {\n\tif tail.changes == nil {\n\t\tst, err := tail.file.Stat()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttail.changes = tail.watcher.ChangeEvents(&tail.Tomb, st)\n\t}\n\n\tselect {\n\tcase <-tail.changes.Modified:\n\t\treturn nil\n\tcase <-tail.changes.Deleted:\n\t\ttail.changes = nil\n\t\tif tail.ReOpen {\n\t\t\t\/\/ XXX: we must not log from a library.\n\t\t\ttail.Logger.Printf(\"Re-opening moved\/deleted file %s ...\", tail.Filename)\n\t\t\tif err := tail.reopen(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttail.Logger.Printf(\"Successfully reopened %s\", tail.Filename)\n\t\t\ttail.openReader()\n\t\t\treturn nil\n\t\t} else {\n\t\t\ttail.Logger.Printf(\"Stopping tail as file no longer exists: %s\", tail.Filename)\n\t\t\treturn ErrStop\n\t\t}\n\tcase <-tail.changes.Truncated:\n\t\t\/\/ Always reopen truncated files (Follow is true)\n\t\ttail.Logger.Printf(\"Re-opening truncated file %s ...\", tail.Filename)\n\t\tif err := tail.reopen(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttail.Logger.Printf(\"Successfully reopened truncated %s\", tail.Filename)\n\t\ttail.openReader()\n\t\treturn nil\n\tcase <-tail.Dying():\n\t\treturn ErrStop\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc (tail *Tail) openReader() {\n\tif tail.MaxLineSize > 0 {\n\t\t\/\/ add 2 to account for newline characters\n\t\ttail.reader = bufio.NewReaderSize(tail.file, tail.MaxLineSize+2)\n\t} else {\n\t\ttail.reader = bufio.NewReader(tail.file)\n\t}\n}\n\nfunc (tail *Tail) seekEnd() error {\n\treturn tail.seekTo(SeekInfo{Offset: 0, Whence: 2})\n}\n\nfunc (tail *Tail) seekTo(pos SeekInfo) error {\n\t_, err := tail.file.Seek(pos.Offset, pos.Whence)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Seek error on %s: %s\", tail.Filename, err)\n\t}\n\t\/\/ Reset the read buffer whenever the file is re-seek'ed\n\ttail.reader.Reset(tail.file)\n\treturn nil\n}\n\n\/\/ sendLine sends the line(s) to Lines channel, splitting longer lines\n\/\/ if necessary. Return false if rate limit is reached.\nfunc (tail *Tail) sendLine(line string) bool {\n\tnow := time.Now()\n\tlines := []string{line}\n\n\t\/\/ Split longer lines\n\tif tail.MaxLineSize > 0 && len(line) > tail.MaxLineSize {\n\t\tlines = util.PartitionString(line, tail.MaxLineSize)\n\t}\n\n\tfor _, line := range lines {\n\t\ttail.Lines <- &Line{line, now, nil}\n\t}\n\n\tif tail.Config.RateLimiter != nil {\n\t\tok := tail.Config.RateLimiter.Pour(uint16(len(lines)))\n\t\tif !ok {\n\t\t\ttail.Logger.Printf(\"Leaky bucket full (%v); entering 1s cooloff period.\\n\",\n\t\t\t\ttail.Filename)\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\n\/\/ Cleanup removes inotify watches added by the tail package. This function is\n\/\/ meant to be invoked from a process's exit handler. Linux kernel may not\n\/\/ automatically remove inotify watches after the process exits.\nfunc (tail *Tail) Cleanup() {\n\tif tail.tracker != nil {\n\t\ttail.tracker.CloseAll()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>A test binary.<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>make the field property of NoPropertyError public<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package sensor provides sensor events from various movement sensors.\npackage sensor\n\nimport (\n\t\"errors\"\n\t\"time\"\n)\n\n\/\/ Type represents a sensor type.\ntype Type int\n\nvar sensorNames = map[Type]string{\n\tAccelerometer: \"Accelerometer\",\n\tGyroscope: \"Gyrsocope\",\n\tMagnetometer: \"Magnetometer\",\n}\n\n\/\/ String returns the string representation of the sensor type.\nfunc (t Type) String() string {\n\tif n, ok := sensorNames[t]; ok {\n\t\treturn n\n\t}\n\treturn \"Unknown sensor\"\n}\n\nvar (\n\tAccelerometer = Type(0)\n\tGyroscope = Type(1)\n\tMagnetometer = Type(2)\n)\n\n\/\/ Event represents a sensor event.\ntype Event struct {\n\t\/\/ Sensor is the type of the sensor the event is coming from.\n\tSensor Type\n\n\t\/\/ Timestamp is a device specific event time in nanoseconds.\n\t\/\/ Timestamps are not Unix times, they represent a time that is\n\t\/\/ only valid for the device's default sensor.\n\tTimestamp int64\n\n\t\/\/ Data is the event data.\n\t\/\/\n\t\/\/ If the event source is Accelerometer,\n\t\/\/ - Data[0]: acceleration force in x axis in m\/s^2\n\t\/\/ - Data[1]: acceleration force in y axis in m\/s^2\n\t\/\/ - Data[2]: acceleration force in z axis in m\/s^2\n\t\/\/\n\t\/\/ If the event source is Gyroscope,\n\t\/\/ - Data[0]: rate of rotation around the x axis in rad\/s\n\t\/\/ - Data[1]: rate of rotation around the y axis in rad\/s\n\t\/\/ - Data[2]: rate of rotation around the z axis in rad\/s\n\t\/\/\n\t\/\/ If the event source is Magnetometer,\n\t\/\/ - Data[0]: force of gravity along the x axis in m\/s^2\n\t\/\/ - Data[1]: force of gravity along the y axis in m\/s^2\n\t\/\/ - Data[2]: force of gravity along the z axis in m\/s^2\n\t\/\/\n\tData []float64\n}\n\n\/\/ Manager multiplexes sensor event data from various sensor sources.\ntype Manager struct {\n\tm *manager \/\/ platform-specific implementation of the underlying manager\n}\n\n\/\/ Enable enables a sensor with the specified delay rate.\n\/\/ If there are multiple sensors of type t on the device, Enable uses\n\/\/ the default one.\n\/\/ If there is no default sensor of type t on the device, an error returned.\n\/\/ Valid sensor types supported by this package are Accelerometer,\n\/\/ Gyroscope, Magnetometer and Altimeter.\nfunc (m *Manager) Enable(t Type, delay time.Duration) error {\n\tif m.m == nil {\n\t\tm.m = new(manager)\n\t\tm.m.initialize()\n\t}\n\tif t < 0 || int(t) >= len(sensorNames) {\n\t\treturn errors.New(\"sensor: unknown sensor type\")\n\t}\n\treturn m.m.enable(t, delay)\n}\n\n\/\/ Disable disables to feed the manager with the specified sensor.\nfunc (m *Manager) Disable(t Type) error {\n\tif m.m == nil {\n\t\tm.m = new(manager)\n\t\tm.m.initialize()\n\t}\n\tif t < 0 || int(t) >= len(sensorNames) {\n\t\treturn errors.New(\"sensor: unknown sensor type\")\n\t}\n\treturn m.m.disable(t)\n}\n\n\/\/ Read reads a series of events from the manager.\n\/\/ It may read up to len(e) number of events, but will return\n\/\/ less events if timeout occurs.\nfunc (m *Manager) Read(e []Event) (n int, err error) {\n\tif m.m == nil {\n\t\tm.m = new(manager)\n\t\tm.m.initialize()\n\t}\n\treturn m.m.read(e)\n}\n\n\/\/ Close stops the manager and frees the related resources.\n\/\/ Once Close is called, Manager becomes invalid to use.\nfunc (m *Manager) Close() error {\n\tif m.m == nil {\n\t\treturn nil\n\t}\n\treturn m.m.close()\n}\n<commit_msg>sensor: make the global Type values consts instead of vars.<commit_after>\/\/ Copyright 2015 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package sensor provides sensor events from various movement sensors.\npackage sensor\n\nimport (\n\t\"errors\"\n\t\"time\"\n)\n\n\/\/ Type represents a sensor type.\ntype Type int\n\nvar sensorNames = map[Type]string{\n\tAccelerometer: \"Accelerometer\",\n\tGyroscope: \"Gyrsocope\",\n\tMagnetometer: \"Magnetometer\",\n}\n\n\/\/ String returns the string representation of the sensor type.\nfunc (t Type) String() string {\n\tif n, ok := sensorNames[t]; ok {\n\t\treturn n\n\t}\n\treturn \"Unknown sensor\"\n}\n\nconst (\n\tAccelerometer = Type(0)\n\tGyroscope = Type(1)\n\tMagnetometer = Type(2)\n)\n\n\/\/ Event represents a sensor event.\ntype Event struct {\n\t\/\/ Sensor is the type of the sensor the event is coming from.\n\tSensor Type\n\n\t\/\/ Timestamp is a device specific event time in nanoseconds.\n\t\/\/ Timestamps are not Unix times, they represent a time that is\n\t\/\/ only valid for the device's default sensor.\n\tTimestamp int64\n\n\t\/\/ Data is the event data.\n\t\/\/\n\t\/\/ If the event source is Accelerometer,\n\t\/\/ - Data[0]: acceleration force in x axis in m\/s^2\n\t\/\/ - Data[1]: acceleration force in y axis in m\/s^2\n\t\/\/ - Data[2]: acceleration force in z axis in m\/s^2\n\t\/\/\n\t\/\/ If the event source is Gyroscope,\n\t\/\/ - Data[0]: rate of rotation around the x axis in rad\/s\n\t\/\/ - Data[1]: rate of rotation around the y axis in rad\/s\n\t\/\/ - Data[2]: rate of rotation around the z axis in rad\/s\n\t\/\/\n\t\/\/ If the event source is Magnetometer,\n\t\/\/ - Data[0]: force of gravity along the x axis in m\/s^2\n\t\/\/ - Data[1]: force of gravity along the y axis in m\/s^2\n\t\/\/ - Data[2]: force of gravity along the z axis in m\/s^2\n\t\/\/\n\tData []float64\n}\n\n\/\/ Manager multiplexes sensor event data from various sensor sources.\ntype Manager struct {\n\tm *manager \/\/ platform-specific implementation of the underlying manager\n}\n\n\/\/ Enable enables a sensor with the specified delay rate.\n\/\/ If there are multiple sensors of type t on the device, Enable uses\n\/\/ the default one.\n\/\/ If there is no default sensor of type t on the device, an error returned.\n\/\/ Valid sensor types supported by this package are Accelerometer,\n\/\/ Gyroscope, Magnetometer and Altimeter.\nfunc (m *Manager) Enable(t Type, delay time.Duration) error {\n\tif m.m == nil {\n\t\tm.m = new(manager)\n\t\tm.m.initialize()\n\t}\n\tif t < 0 || int(t) >= len(sensorNames) {\n\t\treturn errors.New(\"sensor: unknown sensor type\")\n\t}\n\treturn m.m.enable(t, delay)\n}\n\n\/\/ Disable disables to feed the manager with the specified sensor.\nfunc (m *Manager) Disable(t Type) error {\n\tif m.m == nil {\n\t\tm.m = new(manager)\n\t\tm.m.initialize()\n\t}\n\tif t < 0 || int(t) >= len(sensorNames) {\n\t\treturn errors.New(\"sensor: unknown sensor type\")\n\t}\n\treturn m.m.disable(t)\n}\n\n\/\/ Read reads a series of events from the manager.\n\/\/ It may read up to len(e) number of events, but will return\n\/\/ less events if timeout occurs.\nfunc (m *Manager) Read(e []Event) (n int, err error) {\n\tif m.m == nil {\n\t\tm.m = new(manager)\n\t\tm.m.initialize()\n\t}\n\treturn m.m.read(e)\n}\n\n\/\/ Close stops the manager and frees the related resources.\n\/\/ Once Close is called, Manager becomes invalid to use.\nfunc (m *Manager) Close() error {\n\tif m.m == nil {\n\t\treturn nil\n\t}\n\treturn m.m.close()\n}\n<|endoftext|>"} {"text":"<commit_before>package sgr\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc MustParse(format string) string {\n\tstr, err := parse(true, false, format)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn str\n}\n\nfunc MustParseln(format string) string {\n\tstr, err := parse(true, true, format)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn str\n}\n\nfunc MustParseWithoutReset(format string) string {\n\tstr, err := parse(false, false, format)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn str\n}\n\nfunc Parse(format string) (string, error) {\n\treturn parse(true, false, format)\n}\n\nfunc Parseln(format string) (string, error) {\n\treturn parse(true, true, format)\n}\n\nfunc ParseWithoutReset(format string) (string, error) {\n\treturn parse(false, false, format)\n}\n\nfunc parse(reset bool, newline bool, format string) (string, error) {\n\t\/\/ Builder used to build the colored string.\n\tsb := new(sgrBuilder)\n\n\t\/\/ position in the parsing process\n\tpos := 0\n\n\t\/\/ index of the currenctly processing square bracket start\n\tidxStart := 0\n\tidxEnd := 0\n\n\tfor {\n\t\t\/\/ Find next square bracket, break loop when none was found.\n\t\trelBlockOpen := strings.IndexRune(format[pos:], '[')\n\t\tif relBlockOpen == -1 {\n\t\t\tsb.appendString(format[pos:])\n\t\t\tbreak\n\t\t}\n\t\tidxStart = pos + relBlockOpen\n\n\t\t\/\/ Test for escaped square bracket\n\t\tif format[idxStart+1] == '[' {\n\t\t\tsb.appendString(format[pos : idxStart+1])\n\t\t\tpos = idxStart + 2\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Add skipped string (if any)\n\t\tif idxStart > pos { \/\/idxStart > pos+1 ???\n\t\t\tsb.appendString(format[pos:idxStart])\n\t\t}\n\n\t\t\/\/ Find square bracket end\n\t\trelBlockClose := strings.IndexRune(format[idxStart:], ']')\n\t\tif relBlockClose == -1 {\n\t\t\treturn \"\", fmt.Errorf(\"Opened square bracket never closed at pos %d. If you want a literal bracket escape it: [[\", idxStart)\n\t\t}\n\t\tidxEnd = idxStart + relBlockClose\n\n\t\t\/\/ found a block\n\t\tblock := format[idxStart+1 : idxEnd]\n\t\tfields := strings.Fields(block)\n\t\tfor _, field := range fields {\n\n\t\t\tswitch field {\n\t\t\t\/\/ Options\n\t\t\tcase \"reset\":\n\t\t\t\tsb.appendSgr(Reset)\n\t\t\tcase \"fg-reset\":\n\t\t\t\tsb.appendSgr(ResetForegroundColor)\n\t\t\tcase \"bg-reset\":\n\t\t\t\tsb.appendSgr(ResetBackgroundColor)\n\t\t\tcase \"bold\":\n\t\t\t\tsb.appendSgr(Bold)\n\t\t\tcase \"boldOff\":\n\t\t\t\tsb.appendSgr(BoldOff)\n\t\t\tcase \"underline\":\n\t\t\t\tsb.appendSgr(Underline)\n\t\t\tcase \"underlineOff\":\n\t\t\t\tsb.appendSgr(UnderlineOff)\n\t\t\tcase \"blink\":\n\t\t\t\tsb.appendSgr(Blink)\n\t\t\tcase \"blinkOff\":\n\t\t\t\tsb.appendSgr(BlinkOff)\n\t\t\tcase \"imageNegative\":\n\t\t\t\tsb.appendSgr(ImageNegative)\n\t\t\tcase \"imagePositive\":\n\t\t\t\tsb.appendSgr(ImagePositive)\n\t\t\tcase \"framed\":\n\t\t\t\tsb.appendSgr(Framed)\n\t\t\tcase \"encircled\":\n\t\t\t\tsb.appendSgr(Encircled)\n\t\t\tcase \"framedEncircledOff\":\n\t\t\t\tsb.appendSgr(FramedEncircledOff)\n\t\t\tcase \"overlined\":\n\t\t\t\tsb.appendSgr(Overlined)\n\t\t\tcase \"overlinedOff\":\n\t\t\t\tsb.appendSgr(OverlinedOff)\n\n\t\t\t\/\/ Foreground Colors\n\t\t\tcase \"fg-black\":\n\t\t\t\tsb.appendSgr(FgBlack)\n\t\t\tcase \"fg-red\":\n\t\t\t\tsb.appendSgr(FgRed)\n\t\t\tcase \"fg-green\":\n\t\t\t\tsb.appendSgr(FgGreen)\n\t\t\tcase \"fg-yellow\":\n\t\t\t\tsb.appendSgr(FgYellow)\n\t\t\tcase \"fg-blue\":\n\t\t\t\tsb.appendSgr(FgBlue)\n\t\t\tcase \"fg-magenta\":\n\t\t\t\tsb.appendSgr(FgMagenta)\n\t\t\tcase \"fg-cyan\":\n\t\t\t\tsb.appendSgr(FgCyan)\n\t\t\tcase \"fg-grey\":\n\t\t\t\tsb.appendSgr(FgGrey)\n\t\t\tcase \"fg-white\":\n\t\t\t\tsb.appendSgr(FgWhite)\n\n\t\t\t\/\/ Background Colors\n\t\t\tcase \"bg-black\":\n\t\t\t\tsb.appendSgr(BgBlack)\n\t\t\tcase \"bg-red\":\n\t\t\t\tsb.appendSgr(BgRed)\n\t\t\tcase \"bg-green\":\n\t\t\t\tsb.appendSgr(BgGreen)\n\t\t\tcase \"bg-yellow\":\n\t\t\t\tsb.appendSgr(BgYellow)\n\t\t\tcase \"bg-blue\":\n\t\t\t\tsb.appendSgr(BgBlue)\n\t\t\tcase \"bg-magenta\":\n\t\t\t\tsb.appendSgr(BgMagenta)\n\t\t\tcase \"bg-cyan\":\n\t\t\t\tsb.appendSgr(BgCyan)\n\t\t\tcase \"bg-grey\":\n\t\t\t\tsb.appendSgr(BgGrey)\n\t\t\tcase \"bg-white\":\n\t\t\t\tsb.appendSgr(BgWhite)\n\n\t\t\t\/\/ Not Found\n\t\t\tdefault:\n\t\t\t\tisFgColor := strings.HasPrefix(field, \"fg-\")\n\t\t\t\tisBgColor := strings.HasPrefix(field, \"bg-\")\n\t\t\t\tif isFgColor || isBgColor {\n\t\t\t\t\t\/\/ Get the color number from the fg- or bg- code\n\t\t\t\t\tclr, err := strconv.Atoi(field[3:])\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tgoto invalidBlockCode\n\t\t\t\t\t}\n\t\t\t\t\tif clr < 0 || clr > 255 {\n\t\t\t\t\t\treturn \"\", fmt.Errorf(\"Invalid color code %s. Expecting 0-255 or a defined color.\", field[3:])\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Add actual fg or bg color to sgrBuilder\n\t\t\t\t\tif isFgColor {\n\t\t\t\t\t\tsb.appendSgr(FgColor(clr))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tsb.appendSgr(BgColor(clr))\n\t\t\t\t\t}\n\t\t\t\t\tcontinue \/\/ next field\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ not valid\n\t\t\t\t\tgoto invalidBlockCode\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tcontinue\n\t\tinvalidBlockCode:\n\t\t\treturn \"\", fmt.Errorf(\"Invalid block code '%s' in block at position %d.\", field, idxStart)\n\n\t\t}\n\n\t\t\/\/ Change starting position for next iteration.\n\t\tpos = idxEnd + 1\n\t}\n\n\tif reset {\n\t\tsb.appendSgr(Reset)\n\t}\n\n\tif newline {\n\t\tsb.appendString(\"\\n\")\n\t}\n\n\treturn sb.string(), nil\n}\n<commit_msg>s\/blockcode\/code<commit_after>package sgr\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc MustParse(format string) string {\n\tstr, err := parse(true, false, format)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn str\n}\n\nfunc MustParseln(format string) string {\n\tstr, err := parse(true, true, format)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn str\n}\n\nfunc MustParseWithoutReset(format string) string {\n\tstr, err := parse(false, false, format)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn str\n}\n\nfunc Parse(format string) (string, error) {\n\treturn parse(true, false, format)\n}\n\nfunc Parseln(format string) (string, error) {\n\treturn parse(true, true, format)\n}\n\nfunc ParseWithoutReset(format string) (string, error) {\n\treturn parse(false, false, format)\n}\n\nfunc parse(reset bool, newline bool, format string) (string, error) {\n\t\/\/ Builder used to build the colored string.\n\tsb := new(sgrBuilder)\n\n\t\/\/ position in the parsing process\n\tpos := 0\n\n\t\/\/ index of the currenctly processing square bracket start\n\tidxStart := 0\n\tidxEnd := 0\n\n\tfor {\n\t\t\/\/ Find next square bracket, break loop when none was found.\n\t\trelBlockOpen := strings.IndexRune(format[pos:], '[')\n\t\tif relBlockOpen == -1 {\n\t\t\tsb.appendString(format[pos:])\n\t\t\tbreak\n\t\t}\n\t\tidxStart = pos + relBlockOpen\n\n\t\t\/\/ Test for escaped square bracket\n\t\tif format[idxStart+1] == '[' {\n\t\t\tsb.appendString(format[pos : idxStart+1])\n\t\t\tpos = idxStart + 2\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Add skipped string (if any)\n\t\tif idxStart > pos { \/\/idxStart > pos+1 ???\n\t\t\tsb.appendString(format[pos:idxStart])\n\t\t}\n\n\t\t\/\/ Find square bracket end\n\t\trelBlockClose := strings.IndexRune(format[idxStart:], ']')\n\t\tif relBlockClose == -1 {\n\t\t\treturn \"\", fmt.Errorf(\"Opened square bracket never closed at pos %d. If you want a literal bracket escape it: [[\", idxStart)\n\t\t}\n\t\tidxEnd = idxStart + relBlockClose\n\n\t\t\/\/ found a block\n\t\tblock := format[idxStart+1 : idxEnd]\n\t\tfields := strings.Fields(block)\n\t\tfor _, field := range fields {\n\n\t\t\tswitch field {\n\t\t\t\/\/ Options\n\t\t\tcase \"reset\":\n\t\t\t\tsb.appendSgr(Reset)\n\t\t\tcase \"fg-reset\":\n\t\t\t\tsb.appendSgr(ResetForegroundColor)\n\t\t\tcase \"bg-reset\":\n\t\t\t\tsb.appendSgr(ResetBackgroundColor)\n\t\t\tcase \"bold\":\n\t\t\t\tsb.appendSgr(Bold)\n\t\t\tcase \"boldOff\":\n\t\t\t\tsb.appendSgr(BoldOff)\n\t\t\tcase \"underline\":\n\t\t\t\tsb.appendSgr(Underline)\n\t\t\tcase \"underlineOff\":\n\t\t\t\tsb.appendSgr(UnderlineOff)\n\t\t\tcase \"blink\":\n\t\t\t\tsb.appendSgr(Blink)\n\t\t\tcase \"blinkOff\":\n\t\t\t\tsb.appendSgr(BlinkOff)\n\t\t\tcase \"imageNegative\":\n\t\t\t\tsb.appendSgr(ImageNegative)\n\t\t\tcase \"imagePositive\":\n\t\t\t\tsb.appendSgr(ImagePositive)\n\t\t\tcase \"framed\":\n\t\t\t\tsb.appendSgr(Framed)\n\t\t\tcase \"encircled\":\n\t\t\t\tsb.appendSgr(Encircled)\n\t\t\tcase \"framedEncircledOff\":\n\t\t\t\tsb.appendSgr(FramedEncircledOff)\n\t\t\tcase \"overlined\":\n\t\t\t\tsb.appendSgr(Overlined)\n\t\t\tcase \"overlinedOff\":\n\t\t\t\tsb.appendSgr(OverlinedOff)\n\n\t\t\t\/\/ Foreground Colors\n\t\t\tcase \"fg-black\":\n\t\t\t\tsb.appendSgr(FgBlack)\n\t\t\tcase \"fg-red\":\n\t\t\t\tsb.appendSgr(FgRed)\n\t\t\tcase \"fg-green\":\n\t\t\t\tsb.appendSgr(FgGreen)\n\t\t\tcase \"fg-yellow\":\n\t\t\t\tsb.appendSgr(FgYellow)\n\t\t\tcase \"fg-blue\":\n\t\t\t\tsb.appendSgr(FgBlue)\n\t\t\tcase \"fg-magenta\":\n\t\t\t\tsb.appendSgr(FgMagenta)\n\t\t\tcase \"fg-cyan\":\n\t\t\t\tsb.appendSgr(FgCyan)\n\t\t\tcase \"fg-grey\":\n\t\t\t\tsb.appendSgr(FgGrey)\n\t\t\tcase \"fg-white\":\n\t\t\t\tsb.appendSgr(FgWhite)\n\n\t\t\t\/\/ Background Colors\n\t\t\tcase \"bg-black\":\n\t\t\t\tsb.appendSgr(BgBlack)\n\t\t\tcase \"bg-red\":\n\t\t\t\tsb.appendSgr(BgRed)\n\t\t\tcase \"bg-green\":\n\t\t\t\tsb.appendSgr(BgGreen)\n\t\t\tcase \"bg-yellow\":\n\t\t\t\tsb.appendSgr(BgYellow)\n\t\t\tcase \"bg-blue\":\n\t\t\t\tsb.appendSgr(BgBlue)\n\t\t\tcase \"bg-magenta\":\n\t\t\t\tsb.appendSgr(BgMagenta)\n\t\t\tcase \"bg-cyan\":\n\t\t\t\tsb.appendSgr(BgCyan)\n\t\t\tcase \"bg-grey\":\n\t\t\t\tsb.appendSgr(BgGrey)\n\t\t\tcase \"bg-white\":\n\t\t\t\tsb.appendSgr(BgWhite)\n\n\t\t\t\/\/ Not Found\n\t\t\tdefault:\n\t\t\t\tisFgColor := strings.HasPrefix(field, \"fg-\")\n\t\t\t\tisBgColor := strings.HasPrefix(field, \"bg-\")\n\t\t\t\tif isFgColor || isBgColor {\n\t\t\t\t\t\/\/ Get the color number from the fg- or bg- code\n\t\t\t\t\tclr, err := strconv.Atoi(field[3:])\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tgoto invalidCode\n\t\t\t\t\t}\n\t\t\t\t\tif clr < 0 || clr > 255 {\n\t\t\t\t\t\treturn \"\", fmt.Errorf(\"Invalid color code %s. Expecting 0-255 or a defined color.\", field[3:])\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Add actual fg or bg color to sgrBuilder\n\t\t\t\t\tif isFgColor {\n\t\t\t\t\t\tsb.appendSgr(FgColor(clr))\n\t\t\t\t\t} else {\n\t\t\t\t\t\tsb.appendSgr(BgColor(clr))\n\t\t\t\t\t}\n\t\t\t\t\tcontinue \/\/ next field\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ not valid\n\t\t\t\t\tgoto invalidCode\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tcontinue\n\t\tinvalidCode:\n\t\t\treturn \"\", fmt.Errorf(\"Invalid code '%s' in block at position %d.\", field, idxStart)\n\n\t\t}\n\n\t\t\/\/ Change starting position for next iteration.\n\t\tpos = idxEnd + 1\n\t}\n\n\tif reset {\n\t\tsb.appendSgr(Reset)\n\t}\n\n\tif newline {\n\t\tsb.appendString(\"\\n\")\n\t}\n\n\treturn sb.string(), nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>\n\/\/ See LICENSE for licensing information\n\npackage sh\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"regexp\"\n\t\"unicode\/utf8\"\n)\n\nfunc Parse(r io.Reader, name string) (Prog, error) {\n\tp := &parser{\n\t\tr: bufio.NewReader(r),\n\t\tname: name,\n\t\tnpos: position{\n\t\t\tline: 1,\n\t\t\tcol: 1,\n\t\t},\n\t}\n\tp.push(&p.prog.Stmts)\n\tp.next()\n\tp.program()\n\treturn p.prog, p.err\n}\n\ntype parser struct {\n\tr *bufio.Reader\n\tname string\n\n\terr error\n\n\tltok Token\n\ttok Token\n\tlval string\n\tval string\n\n\tlpos position\n\tpos position\n\tnpos position\n\n\tprog Prog\n\tstack []interface{}\n}\n\ntype position struct {\n\tline int\n\tcol int\n}\n\nvar reserved = map[rune]bool{\n\t'\\n': true,\n\t'#': true,\n\t'&': true,\n\t'>': true,\n\t'<': true,\n\t'|': true,\n\t';': true,\n\t'(': true,\n\t')': true,\n}\n\n\/\/ like reserved, but these can appear in the middle of a word\nvar starters = map[rune]bool{\n\t'{': true,\n\t'}': true,\n}\n\nvar space = map[rune]bool{\n\t' ': true,\n\t'\\t': true,\n}\n\nvar identRe = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*$`)\n\nfunc (p *parser) readRune() (rune, error) {\n\tr, _, err := p.r.ReadRune()\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\tp.eof()\n\t\t} else {\n\t\t\tp.errPass(err)\n\t\t}\n\t\treturn 0, err\n\t}\n\tp.moveWith(r)\n\treturn r, nil\n}\n\nfunc (p *parser) moveWith(r rune) {\n\tif r == '\\n' {\n\t\tp.npos.line++\n\t\tp.npos.col = 1\n\t} else {\n\t\tp.npos.col++\n\t}\n}\n\nfunc (p *parser) unreadRune() {\n\tif err := p.r.UnreadRune(); err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc (p *parser) readOnly(wanted rune) bool {\n\tr, _, err := p.r.ReadRune()\n\tif r == wanted {\n\t\tp.moveWith(r)\n\t\treturn true\n\t}\n\tif err == nil {\n\t\tp.unreadRune()\n\t}\n\treturn false\n}\n\nfunc (p *parser) next() {\n\tp.lpos = p.pos\n\tr := ' '\n\tvar err error\n\tfor space[r] {\n\t\tr, err = p.readRune()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tp.pos = p.npos\n\tp.pos.col--\n\tif r == '\\\\' && p.readOnly('\\n') {\n\t\tp.next()\n\t\treturn\n\t}\n\tp.lval = p.val\n\tif reserved[r] || starters[r] {\n\t\tswitch r {\n\t\tcase '#':\n\t\t\tp.advance(COMMENT, p.readUpTo('\\n'))\n\t\tcase '\\n':\n\t\t\tp.advance('\\n', \"\")\n\t\tdefault:\n\t\t\tp.setTok(p.doToken(r))\n\t\t}\n\t\treturn\n\t}\n\trs := []rune{r}\n\tq := rune(0)\n\tif r == '\"' || r == '\\'' {\n\t\tq = r\n\t}\n\tfor {\n\t\tr, err = p.readRune()\n\t\tif err != nil {\n\t\t\tif q != 0 {\n\t\t\t\tp.errWanted(Token(q))\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tif q != 0 {\n\t\t\tif q == '\"' && r == '\\\\' {\n\t\t\t\trs = append(rs, r)\n\t\t\t\tr, _ = p.readRune()\n\t\t\t} else if r == q {\n\t\t\t\tq = 0\n\t\t\t}\n\t\t} else if r == '\"' || r == '\\'' {\n\t\t\tq = r\n\t\t} else if reserved[r] || space[r] {\n\t\t\tp.npos.col--\n\t\t\tp.unreadRune()\n\t\t\tbreak\n\t\t}\n\t\trs = append(rs, r)\n\t}\n\tp.setTok(WORD)\n\tp.val = string(rs)\n}\n\nfunc (p *parser) doToken(r rune) Token {\n\tswitch r {\n\tcase '&':\n\t\tif p.readOnly('&') {\n\t\t\treturn LAND\n\t\t}\n\t\treturn AND\n\tcase '|':\n\t\tif p.readOnly('|') {\n\t\t\treturn LOR\n\t\t}\n\t\treturn OR\n\tcase '(':\n\t\treturn LPAREN\n\tcase '{':\n\t\treturn LBRACE\n\tcase ')':\n\t\treturn RPAREN\n\tcase '}':\n\t\treturn RBRACE\n\tcase ';':\n\t\tif p.readOnly(';') {\n\t\t\treturn DSEMICOLON\n\t\t}\n\t\treturn SEMICOLON\n\tcase '<':\n\t\treturn LSS\n\tcase '>':\n\t\tif p.readOnly('>') {\n\t\t\treturn SHR\n\t\t}\n\t\treturn GTR\n\tdefault:\n\t\treturn Token(r)\n\t}\n}\n\nfunc (p *parser) advance(tok Token, val string) {\n\tp.setTok(tok)\n\tp.lval = p.val\n\tp.val = val\n}\n\nfunc (p *parser) setTok(tok Token) {\n\tp.ltok = p.tok\n\tp.tok = tok\n}\n\nfunc (p *parser) eof() {\n\tp.advance(EOF, \"EOF\")\n}\n\nfunc (p *parser) readUpTo(delim byte) string {\n\tb, err := p.r.ReadBytes(delim)\n\tcont := b\n\tif err == io.EOF {\n\t} else if err != nil {\n\t\tp.errPass(err)\n\t} else {\n\t\tcont = cont[:len(b)-1]\n\t}\n\tp.npos.col += utf8.RuneCount(b)\n\treturn string(cont)\n}\n\n\/\/ We can't simply have these as tokens as they can sometimes be valid\n\/\/ words, e.g. `echo if`.\nvar reservedWords = map[Token]string{\n\tIF: \"if\",\n\tTHEN: \"then\",\n\tELIF: \"elif\",\n\tELSE: \"else\",\n\tFI: \"fi\",\n\tWHILE: \"while\",\n\tDO: \"do\",\n\tDONE: \"done\",\n}\n\nfunc (p *parser) peek(tok Token) bool {\n\treturn p.tok == tok || (p.tok == WORD && p.val == reservedWords[tok])\n}\n\nfunc (p *parser) got(tok Token) bool {\n\tif p.peek(tok) {\n\t\tp.next()\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (p *parser) want(tok Token) {\n\tif !p.peek(tok) {\n\t\tp.errWanted(tok)\n\t\treturn\n\t}\n\tp.next()\n}\n\nfunc (p *parser) errPass(err error) {\n\tif p.err == nil {\n\t\tp.err = err\n\t}\n\tp.eof()\n}\n\nfunc (p *parser) posErr(pos position, format string, v ...interface{}) {\n\tprefix := fmt.Sprintf(\"%s:%d:%d: \", p.name, pos.line, pos.col)\n\tp.errPass(fmt.Errorf(prefix+format, v...))\n}\n\nfunc (p *parser) curErr(format string, v ...interface{}) {\n\tp.posErr(p.pos, format, v...)\n}\n\nfunc (p *parser) errWantedStr(s string) {\n\tif p.tok == EOF {\n\t\tp.pos = p.npos\n\t}\n\tp.curErr(\"unexpected token %s - wanted %s\", p.tok, s)\n}\n\nfunc (p *parser) errWanted(tok Token) {\n\tp.errWantedStr(tok.String())\n}\n\nfunc (p *parser) errAfterStr(s string) {\n\tp.curErr(\"unexpected token %s after %s\", p.tok, s)\n}\n\nfunc (p *parser) add(n Node) {\n\tcur := p.stack[len(p.stack)-1]\n\tswitch x := cur.(type) {\n\tcase *[]Node:\n\t\t*x = append(*x, n)\n\tcase *Node:\n\t\tif *x != nil {\n\t\t\tpanic(\"single node set twice\")\n\t\t}\n\t\t*x = n\n\tdefault:\n\t\tpanic(\"unknown type in the stack\")\n\t}\n}\n\nfunc (p *parser) pop() {\n\tp.stack = p.stack[:len(p.stack)-1]\n}\n\nfunc (p *parser) push(v interface{}) {\n\tp.stack = append(p.stack, v)\n}\n\nfunc (p *parser) popAdd(n Node) {\n\tp.pop()\n\tp.add(n)\n}\n\nfunc (p *parser) program() {\n\tp.commands()\n}\n\nfunc (p *parser) commands(stop ...Token) (count int) {\n\tfor p.tok != EOF {\n\t\tfor _, tok := range stop {\n\t\t\tif p.peek(tok) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tp.command()\n\t\tcount++\n\t}\n\treturn\n}\n\nfunc (p *parser) command() {\n\tswitch {\n\tcase p.got(COMMENT):\n\t\tcom := Comment{\n\t\t\tText: p.lval,\n\t\t}\n\t\tp.add(com)\n\tcase p.got('\\n'), p.got(COMMENT):\n\t\tif p.tok != EOF {\n\t\t\tp.command()\n\t\t}\n\tcase p.got(LPAREN):\n\t\tvar sub Subshell\n\t\tp.push(&sub.Stmts)\n\t\tif p.commands(RPAREN) == 0 {\n\t\t\tp.errWantedStr(\"command\")\n\t\t}\n\t\tp.want(RPAREN)\n\t\tp.popAdd(sub)\n\tcase p.got(LBRACE):\n\t\tvar bl Block\n\t\tp.push(&bl.Stmts)\n\t\tif p.commands(RBRACE) == 0 {\n\t\t\tp.errWantedStr(\"command\")\n\t\t}\n\t\tp.want(RBRACE)\n\t\tp.popAdd(bl)\n\tcase p.got(IF):\n\t\tvar ifs IfStmt\n\t\tp.push(&ifs.Cond)\n\t\tp.command()\n\t\tp.pop()\n\t\tp.want(THEN)\n\t\tp.push(&ifs.ThenStmts)\n\t\tp.commands(FI, ELIF, ELSE)\n\t\tp.pop()\n\t\tp.push(&ifs.Elifs)\n\t\tfor p.got(ELIF) {\n\t\t\tvar elf Elif\n\t\t\tp.push(&elf.Cond)\n\t\t\tp.command()\n\t\t\tp.pop()\n\t\t\tp.want(THEN)\n\t\t\tp.push(&elf.ThenStmts)\n\t\t\tp.commands(FI, ELIF, ELSE)\n\t\t\tp.popAdd(elf)\n\t\t}\n\t\tif p.got(ELSE) {\n\t\t\tp.pop()\n\t\t\tp.push(&ifs.ElseStmts)\n\t\t\tp.commands(FI)\n\t\t}\n\t\tp.want(FI)\n\t\tp.popAdd(ifs)\n\tcase p.got(WHILE):\n\t\tvar whl WhileStmt\n\t\tp.push(&whl.Cond)\n\t\tp.command()\n\t\tp.pop()\n\t\tp.want(DO)\n\t\tp.push(&whl.DoStmts)\n\t\tp.commands(DONE)\n\t\tp.want(DONE)\n\t\tp.popAdd(whl)\n\tcase p.got(WORD):\n\t\tvar cmd Command\n\t\tp.push(&cmd.Args)\n\t\tp.add(Lit{Val: p.lval})\n\t\tfirst := p.lpos\n\targs:\n\t\tfor p.tok != EOF {\n\t\t\tswitch {\n\t\t\tcase p.got(WORD):\n\t\t\t\tp.add(Lit{Val: p.lval})\n\t\t\tcase p.got(AND):\n\t\t\t\tcmd.Background = true\n\t\t\t\tbreak args\n\t\t\tcase p.got(LAND):\n\t\t\t\tp.binaryExpr(LAND, cmd)\n\t\t\t\treturn\n\t\t\tcase p.got(OR):\n\t\t\t\tp.binaryExpr(OR, cmd)\n\t\t\t\treturn\n\t\t\tcase p.got(LOR):\n\t\t\t\tp.binaryExpr(LOR, cmd)\n\t\t\t\treturn\n\t\t\tcase p.got(LPAREN):\n\t\t\t\tif !identRe.MatchString(p.lval) {\n\t\t\t\t\tp.posErr(first, \"invalid func name %q\", p.lval)\n\t\t\t\t\tbreak args\n\t\t\t\t}\n\t\t\t\tfun := FuncDecl{\n\t\t\t\t\tName: Lit{Val: p.lval},\n\t\t\t\t}\n\t\t\t\tp.want(RPAREN)\n\t\t\t\tp.push(&fun.Body)\n\t\t\t\tp.command()\n\t\t\t\tp.pop()\n\t\t\t\tp.popAdd(fun)\n\t\t\t\treturn\n\t\t\tcase p.gotRedirect():\n\t\t\tcase p.got(SEMICOLON):\n\t\t\t\tbreak args\n\t\t\tcase p.got('\\n'):\n\t\t\t\tbreak args\n\t\t\tdefault:\n\t\t\t\tp.errAfterStr(\"command\")\n\t\t\t}\n\t\t}\n\t\tp.popAdd(cmd)\n\tdefault:\n\t\tp.errWantedStr(\"command\")\n\t}\n}\n\nfunc (p *parser) binaryExpr(op Token, left Node) {\n\tb := BinaryExpr{Op: op}\n\tp.push(&b.Y)\n\tp.command()\n\tp.pop()\n\tb.X = left\n\tp.popAdd(b)\n}\n\nfunc (p *parser) gotRedirect() bool {\n\tvar r Redirect\n\tswitch {\n\tcase p.got(GTR):\n\t\tr.Op = GTR\n\t\tif p.got(AND) {\n\t\t\tp.want(WORD)\n\t\t\tr.Obj = Lit{Val: \"&\" + p.lval}\n\t\t} else {\n\t\t\tp.want(WORD)\n\t\t\tr.Obj = Lit{Val: p.lval}\n\t\t}\n\tcase p.got(SHR):\n\t\tr.Op = SHR\n\t\tp.want(WORD)\n\t\tr.Obj = Lit{Val: p.lval}\n\tcase p.got(LSS):\n\t\tr.Op = LSS\n\t\tp.want(WORD)\n\t\tr.Obj = Lit{Val: p.lval}\n\tdefault:\n\t\treturn false\n\t}\n\tp.add(r)\n\treturn true\n}\n<commit_msg>Rename eof() to setEOF() for clarity<commit_after>\/\/ Copyright (c) 2016, Daniel Martí <mvdan@mvdan.cc>\n\/\/ See LICENSE for licensing information\n\npackage sh\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"regexp\"\n\t\"unicode\/utf8\"\n)\n\nfunc Parse(r io.Reader, name string) (Prog, error) {\n\tp := &parser{\n\t\tr: bufio.NewReader(r),\n\t\tname: name,\n\t\tnpos: position{\n\t\t\tline: 1,\n\t\t\tcol: 1,\n\t\t},\n\t}\n\tp.push(&p.prog.Stmts)\n\tp.next()\n\tp.program()\n\treturn p.prog, p.err\n}\n\ntype parser struct {\n\tr *bufio.Reader\n\tname string\n\n\terr error\n\n\tltok Token\n\ttok Token\n\tlval string\n\tval string\n\n\tlpos position\n\tpos position\n\tnpos position\n\n\tprog Prog\n\tstack []interface{}\n}\n\ntype position struct {\n\tline int\n\tcol int\n}\n\nvar reserved = map[rune]bool{\n\t'\\n': true,\n\t'#': true,\n\t'&': true,\n\t'>': true,\n\t'<': true,\n\t'|': true,\n\t';': true,\n\t'(': true,\n\t')': true,\n}\n\n\/\/ like reserved, but these can appear in the middle of a word\nvar starters = map[rune]bool{\n\t'{': true,\n\t'}': true,\n}\n\nvar space = map[rune]bool{\n\t' ': true,\n\t'\\t': true,\n}\n\nvar identRe = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_]*$`)\n\nfunc (p *parser) readRune() (rune, error) {\n\tr, _, err := p.r.ReadRune()\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\tp.setEOF()\n\t\t} else {\n\t\t\tp.errPass(err)\n\t\t}\n\t\treturn 0, err\n\t}\n\tp.moveWith(r)\n\treturn r, nil\n}\n\nfunc (p *parser) moveWith(r rune) {\n\tif r == '\\n' {\n\t\tp.npos.line++\n\t\tp.npos.col = 1\n\t} else {\n\t\tp.npos.col++\n\t}\n}\n\nfunc (p *parser) unreadRune() {\n\tif err := p.r.UnreadRune(); err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc (p *parser) readOnly(wanted rune) bool {\n\tr, _, err := p.r.ReadRune()\n\tif r == wanted {\n\t\tp.moveWith(r)\n\t\treturn true\n\t}\n\tif err == nil {\n\t\tp.unreadRune()\n\t}\n\treturn false\n}\n\nfunc (p *parser) next() {\n\tp.lpos = p.pos\n\tr := ' '\n\tvar err error\n\tfor space[r] {\n\t\tr, err = p.readRune()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tp.pos = p.npos\n\tp.pos.col--\n\tif r == '\\\\' && p.readOnly('\\n') {\n\t\tp.next()\n\t\treturn\n\t}\n\tp.lval = p.val\n\tif reserved[r] || starters[r] {\n\t\tswitch r {\n\t\tcase '#':\n\t\t\tp.advance(COMMENT, p.readUpTo('\\n'))\n\t\tcase '\\n':\n\t\t\tp.advance('\\n', \"\")\n\t\tdefault:\n\t\t\tp.setTok(p.doToken(r))\n\t\t}\n\t\treturn\n\t}\n\trs := []rune{r}\n\tq := rune(0)\n\tif r == '\"' || r == '\\'' {\n\t\tq = r\n\t}\n\tfor {\n\t\tr, err = p.readRune()\n\t\tif err != nil {\n\t\t\tif q != 0 {\n\t\t\t\tp.errWanted(Token(q))\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tif q != 0 {\n\t\t\tif q == '\"' && r == '\\\\' {\n\t\t\t\trs = append(rs, r)\n\t\t\t\tr, _ = p.readRune()\n\t\t\t} else if r == q {\n\t\t\t\tq = 0\n\t\t\t}\n\t\t} else if r == '\"' || r == '\\'' {\n\t\t\tq = r\n\t\t} else if reserved[r] || space[r] {\n\t\t\tp.npos.col--\n\t\t\tp.unreadRune()\n\t\t\tbreak\n\t\t}\n\t\trs = append(rs, r)\n\t}\n\tp.setTok(WORD)\n\tp.val = string(rs)\n}\n\nfunc (p *parser) doToken(r rune) Token {\n\tswitch r {\n\tcase '&':\n\t\tif p.readOnly('&') {\n\t\t\treturn LAND\n\t\t}\n\t\treturn AND\n\tcase '|':\n\t\tif p.readOnly('|') {\n\t\t\treturn LOR\n\t\t}\n\t\treturn OR\n\tcase '(':\n\t\treturn LPAREN\n\tcase '{':\n\t\treturn LBRACE\n\tcase ')':\n\t\treturn RPAREN\n\tcase '}':\n\t\treturn RBRACE\n\tcase ';':\n\t\tif p.readOnly(';') {\n\t\t\treturn DSEMICOLON\n\t\t}\n\t\treturn SEMICOLON\n\tcase '<':\n\t\treturn LSS\n\tcase '>':\n\t\tif p.readOnly('>') {\n\t\t\treturn SHR\n\t\t}\n\t\treturn GTR\n\tdefault:\n\t\treturn Token(r)\n\t}\n}\n\nfunc (p *parser) advance(tok Token, val string) {\n\tp.setTok(tok)\n\tp.lval = p.val\n\tp.val = val\n}\n\nfunc (p *parser) setTok(tok Token) {\n\tp.ltok = p.tok\n\tp.tok = tok\n}\n\nfunc (p *parser) setEOF() {\n\tp.advance(EOF, \"EOF\")\n}\n\nfunc (p *parser) readUpTo(delim byte) string {\n\tb, err := p.r.ReadBytes(delim)\n\tcont := b\n\tif err == io.EOF {\n\t} else if err != nil {\n\t\tp.errPass(err)\n\t} else {\n\t\tcont = cont[:len(b)-1]\n\t}\n\tp.npos.col += utf8.RuneCount(b)\n\treturn string(cont)\n}\n\n\/\/ We can't simply have these as tokens as they can sometimes be valid\n\/\/ words, e.g. `echo if`.\nvar reservedWords = map[Token]string{\n\tIF: \"if\",\n\tTHEN: \"then\",\n\tELIF: \"elif\",\n\tELSE: \"else\",\n\tFI: \"fi\",\n\tWHILE: \"while\",\n\tDO: \"do\",\n\tDONE: \"done\",\n}\n\nfunc (p *parser) peek(tok Token) bool {\n\treturn p.tok == tok || (p.tok == WORD && p.val == reservedWords[tok])\n}\n\nfunc (p *parser) got(tok Token) bool {\n\tif p.peek(tok) {\n\t\tp.next()\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (p *parser) want(tok Token) {\n\tif !p.peek(tok) {\n\t\tp.errWanted(tok)\n\t\treturn\n\t}\n\tp.next()\n}\n\nfunc (p *parser) errPass(err error) {\n\tif p.err == nil {\n\t\tp.err = err\n\t}\n\tp.setEOF()\n}\n\nfunc (p *parser) posErr(pos position, format string, v ...interface{}) {\n\tprefix := fmt.Sprintf(\"%s:%d:%d: \", p.name, pos.line, pos.col)\n\tp.errPass(fmt.Errorf(prefix+format, v...))\n}\n\nfunc (p *parser) curErr(format string, v ...interface{}) {\n\tp.posErr(p.pos, format, v...)\n}\n\nfunc (p *parser) errWantedStr(s string) {\n\tif p.tok == EOF {\n\t\tp.pos = p.npos\n\t}\n\tp.curErr(\"unexpected token %s - wanted %s\", p.tok, s)\n}\n\nfunc (p *parser) errWanted(tok Token) {\n\tp.errWantedStr(tok.String())\n}\n\nfunc (p *parser) errAfterStr(s string) {\n\tp.curErr(\"unexpected token %s after %s\", p.tok, s)\n}\n\nfunc (p *parser) add(n Node) {\n\tcur := p.stack[len(p.stack)-1]\n\tswitch x := cur.(type) {\n\tcase *[]Node:\n\t\t*x = append(*x, n)\n\tcase *Node:\n\t\tif *x != nil {\n\t\t\tpanic(\"single node set twice\")\n\t\t}\n\t\t*x = n\n\tdefault:\n\t\tpanic(\"unknown type in the stack\")\n\t}\n}\n\nfunc (p *parser) pop() {\n\tp.stack = p.stack[:len(p.stack)-1]\n}\n\nfunc (p *parser) push(v interface{}) {\n\tp.stack = append(p.stack, v)\n}\n\nfunc (p *parser) popAdd(n Node) {\n\tp.pop()\n\tp.add(n)\n}\n\nfunc (p *parser) program() {\n\tp.commands()\n}\n\nfunc (p *parser) commands(stop ...Token) (count int) {\n\tfor p.tok != EOF {\n\t\tfor _, tok := range stop {\n\t\t\tif p.peek(tok) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tp.command()\n\t\tcount++\n\t}\n\treturn\n}\n\nfunc (p *parser) command() {\n\tswitch {\n\tcase p.got(COMMENT):\n\t\tcom := Comment{\n\t\t\tText: p.lval,\n\t\t}\n\t\tp.add(com)\n\tcase p.got('\\n'), p.got(COMMENT):\n\t\tif p.tok != EOF {\n\t\t\tp.command()\n\t\t}\n\tcase p.got(LPAREN):\n\t\tvar sub Subshell\n\t\tp.push(&sub.Stmts)\n\t\tif p.commands(RPAREN) == 0 {\n\t\t\tp.errWantedStr(\"command\")\n\t\t}\n\t\tp.want(RPAREN)\n\t\tp.popAdd(sub)\n\tcase p.got(LBRACE):\n\t\tvar bl Block\n\t\tp.push(&bl.Stmts)\n\t\tif p.commands(RBRACE) == 0 {\n\t\t\tp.errWantedStr(\"command\")\n\t\t}\n\t\tp.want(RBRACE)\n\t\tp.popAdd(bl)\n\tcase p.got(IF):\n\t\tvar ifs IfStmt\n\t\tp.push(&ifs.Cond)\n\t\tp.command()\n\t\tp.pop()\n\t\tp.want(THEN)\n\t\tp.push(&ifs.ThenStmts)\n\t\tp.commands(FI, ELIF, ELSE)\n\t\tp.pop()\n\t\tp.push(&ifs.Elifs)\n\t\tfor p.got(ELIF) {\n\t\t\tvar elf Elif\n\t\t\tp.push(&elf.Cond)\n\t\t\tp.command()\n\t\t\tp.pop()\n\t\t\tp.want(THEN)\n\t\t\tp.push(&elf.ThenStmts)\n\t\t\tp.commands(FI, ELIF, ELSE)\n\t\t\tp.popAdd(elf)\n\t\t}\n\t\tif p.got(ELSE) {\n\t\t\tp.pop()\n\t\t\tp.push(&ifs.ElseStmts)\n\t\t\tp.commands(FI)\n\t\t}\n\t\tp.want(FI)\n\t\tp.popAdd(ifs)\n\tcase p.got(WHILE):\n\t\tvar whl WhileStmt\n\t\tp.push(&whl.Cond)\n\t\tp.command()\n\t\tp.pop()\n\t\tp.want(DO)\n\t\tp.push(&whl.DoStmts)\n\t\tp.commands(DONE)\n\t\tp.want(DONE)\n\t\tp.popAdd(whl)\n\tcase p.got(WORD):\n\t\tvar cmd Command\n\t\tp.push(&cmd.Args)\n\t\tp.add(Lit{Val: p.lval})\n\t\tfirst := p.lpos\n\targs:\n\t\tfor p.tok != EOF {\n\t\t\tswitch {\n\t\t\tcase p.got(WORD):\n\t\t\t\tp.add(Lit{Val: p.lval})\n\t\t\tcase p.got(AND):\n\t\t\t\tcmd.Background = true\n\t\t\t\tbreak args\n\t\t\tcase p.got(LAND):\n\t\t\t\tp.binaryExpr(LAND, cmd)\n\t\t\t\treturn\n\t\t\tcase p.got(OR):\n\t\t\t\tp.binaryExpr(OR, cmd)\n\t\t\t\treturn\n\t\t\tcase p.got(LOR):\n\t\t\t\tp.binaryExpr(LOR, cmd)\n\t\t\t\treturn\n\t\t\tcase p.got(LPAREN):\n\t\t\t\tif !identRe.MatchString(p.lval) {\n\t\t\t\t\tp.posErr(first, \"invalid func name %q\", p.lval)\n\t\t\t\t\tbreak args\n\t\t\t\t}\n\t\t\t\tfun := FuncDecl{\n\t\t\t\t\tName: Lit{Val: p.lval},\n\t\t\t\t}\n\t\t\t\tp.want(RPAREN)\n\t\t\t\tp.push(&fun.Body)\n\t\t\t\tp.command()\n\t\t\t\tp.pop()\n\t\t\t\tp.popAdd(fun)\n\t\t\t\treturn\n\t\t\tcase p.gotRedirect():\n\t\t\tcase p.got(SEMICOLON):\n\t\t\t\tbreak args\n\t\t\tcase p.got('\\n'):\n\t\t\t\tbreak args\n\t\t\tdefault:\n\t\t\t\tp.errAfterStr(\"command\")\n\t\t\t}\n\t\t}\n\t\tp.popAdd(cmd)\n\tdefault:\n\t\tp.errWantedStr(\"command\")\n\t}\n}\n\nfunc (p *parser) binaryExpr(op Token, left Node) {\n\tb := BinaryExpr{Op: op}\n\tp.push(&b.Y)\n\tp.command()\n\tp.pop()\n\tb.X = left\n\tp.popAdd(b)\n}\n\nfunc (p *parser) gotRedirect() bool {\n\tvar r Redirect\n\tswitch {\n\tcase p.got(GTR):\n\t\tr.Op = GTR\n\t\tif p.got(AND) {\n\t\t\tp.want(WORD)\n\t\t\tr.Obj = Lit{Val: \"&\" + p.lval}\n\t\t} else {\n\t\t\tp.want(WORD)\n\t\t\tr.Obj = Lit{Val: p.lval}\n\t\t}\n\tcase p.got(SHR):\n\t\tr.Op = SHR\n\t\tp.want(WORD)\n\t\tr.Obj = Lit{Val: p.lval}\n\tcase p.got(LSS):\n\t\tr.Op = LSS\n\t\tp.want(WORD)\n\t\tr.Obj = Lit{Val: p.lval}\n\tdefault:\n\t\treturn false\n\t}\n\tp.add(r)\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>package packer\n\nimport (\n\t\"bytes\"\n\t\"cgl.tideland.biz\/asserts\"\n\t\"os\"\n\t\"testing\"\n)\n\nfunc testEnvironment() *Environment {\n\tconfig := &EnvironmentConfig{}\n\tconfig.Ui = &ReaderWriterUi{\n\t\tnew(bytes.Buffer),\n\t\tnew(bytes.Buffer),\n\t}\n\n\treturn NewEnvironment(config)\n}\n\nfunc TestEnvironment_Cli_CallsRun(t *testing.T) {\n\t\/\/_ := asserts.NewTestingAsserts(t, true)\n\n\t\/\/ TODO: Test that the call to `Run` is done with\n\t\/\/ proper arguments and such.\n}\n\nfunc TestEnvironment_DefaultCli_Empty(t *testing.T) {\n\tassert := asserts.NewTestingAsserts(t, true)\n\n\tdefaultEnv := testEnvironment()\n\n\tassert.Equal(defaultEnv.Cli([]string{}), 1, \"CLI with no args\")\n}\n\nfunc TestEnvironment_DefaultCli_Help(t *testing.T) {\n\tassert := asserts.NewTestingAsserts(t, true)\n\n\tdefaultEnv := testEnvironment()\n\n\t\/\/ Test the basic version options\n\tassert.Equal(defaultEnv.Cli([]string{\"--help\"}), 1, \"--help should print\")\n\tassert.Equal(defaultEnv.Cli([]string{\"-h\"}), 1, \"--help should print\")\n}\n\nfunc TestEnvironment_DefaultCli_Version(t *testing.T) {\n\tassert := asserts.NewTestingAsserts(t, true)\n\n\tdefaultEnv := testEnvironment()\n\n\t\/\/ Test the basic version options\n\tassert.Equal(defaultEnv.Cli([]string{\"version\"}), 0, \"version should work\")\n\tassert.Equal(defaultEnv.Cli([]string{\"--version\"}), 0, \"--version should work\")\n\tassert.Equal(defaultEnv.Cli([]string{\"-v\"}), 0, \"-v should work\")\n\n\t\/\/ Test the --version and -v can appear anywhere\n\tassert.Equal(defaultEnv.Cli([]string{\"bad\", \"-v\"}), 0, \"-v should work anywhere\")\n\tassert.Equal(defaultEnv.Cli([]string{\"bad\", \"--version\"}), 0, \"--version should work anywhere\")\n\n\t\/\/ Test that \"version\" can't appear anywhere\n\tassert.Equal(defaultEnv.Cli([]string{\"bad\", \"version\"}), 1, \"version should NOT work anywhere\")\n}\n\nfunc TestEnvironment_DefaultUi(t *testing.T) {\n\tassert := asserts.NewTestingAsserts(t, true)\n\n\tdefaultEnv := NewEnvironment(nil)\n\tassert.NotNil(defaultEnv.Ui(), \"default UI should not be nil\")\n\n\trwUi, ok := defaultEnv.Ui().(*ReaderWriterUi)\n\tassert.True(ok, \"default UI should be ReaderWriterUi\")\n\tassert.Equal(rwUi.Writer, os.Stdout, \"default UI should go to stdout\")\n\tassert.Equal(rwUi.Reader, os.Stdin, \"default UI should read from stdin\")\n}\n\nfunc TestEnvironment_PrintHelp(t *testing.T) {\n\t\/\/ Just call the function and verify that no panics occur\n\ttestEnvironment().PrintHelp()\n}\n\nfunc TestEnvironment_SettingUi(t *testing.T) {\n\tassert := asserts.NewTestingAsserts(t, true)\n\n\tui := &ReaderWriterUi{new(bytes.Buffer), new(bytes.Buffer)}\n\n\tconfig := &EnvironmentConfig{}\n\tconfig.Ui = ui\n\n\tenv := NewEnvironment(config)\n\n\tassert.Equal(env.Ui(), ui, \"UIs should be equal\")\n}\n<commit_msg>Test the CLI that Run is called on custom commands<commit_after>package packer\n\nimport (\n\t\"bytes\"\n\t\"cgl.tideland.biz\/asserts\"\n\t\"os\"\n\t\"testing\"\n)\n\ntype TestCommand struct {\n\trunCalled bool\n}\n\nfunc (tc *TestCommand) Run(env *Environment, args []string) int {\n\ttc.runCalled = true\n\treturn 0\n}\n\nfunc (tc *TestCommand) Synopsis() string {\n\treturn \"\"\n}\n\nfunc testEnvironment() *Environment {\n\tconfig := &EnvironmentConfig{}\n\tconfig.Ui = &ReaderWriterUi{\n\t\tnew(bytes.Buffer),\n\t\tnew(bytes.Buffer),\n\t}\n\n\treturn NewEnvironment(config)\n}\n\nfunc TestEnvironment_Cli_CallsRun(t *testing.T) {\n\tassert := asserts.NewTestingAsserts(t, true)\n\n\tcommand := &TestCommand{}\n\n\tconfig := &EnvironmentConfig{}\n\tconfig.Command = make(map[string]Command)\n\tconfig.Command[\"foo\"] = command\n\n\tenv := NewEnvironment(config)\n\tassert.Equal(env.Cli([]string{\"foo\"}), 0, \"runs foo command\")\n\tassert.True(command.runCalled, \"run should've been called\")\n}\n\nfunc TestEnvironment_DefaultCli_Empty(t *testing.T) {\n\tassert := asserts.NewTestingAsserts(t, true)\n\n\tdefaultEnv := testEnvironment()\n\n\tassert.Equal(defaultEnv.Cli([]string{}), 1, \"CLI with no args\")\n}\n\nfunc TestEnvironment_DefaultCli_Help(t *testing.T) {\n\tassert := asserts.NewTestingAsserts(t, true)\n\n\tdefaultEnv := testEnvironment()\n\n\t\/\/ Test the basic version options\n\tassert.Equal(defaultEnv.Cli([]string{\"--help\"}), 1, \"--help should print\")\n\tassert.Equal(defaultEnv.Cli([]string{\"-h\"}), 1, \"--help should print\")\n}\n\nfunc TestEnvironment_DefaultCli_Version(t *testing.T) {\n\tassert := asserts.NewTestingAsserts(t, true)\n\n\tdefaultEnv := testEnvironment()\n\n\t\/\/ Test the basic version options\n\tassert.Equal(defaultEnv.Cli([]string{\"version\"}), 0, \"version should work\")\n\tassert.Equal(defaultEnv.Cli([]string{\"--version\"}), 0, \"--version should work\")\n\tassert.Equal(defaultEnv.Cli([]string{\"-v\"}), 0, \"-v should work\")\n\n\t\/\/ Test the --version and -v can appear anywhere\n\tassert.Equal(defaultEnv.Cli([]string{\"bad\", \"-v\"}), 0, \"-v should work anywhere\")\n\tassert.Equal(defaultEnv.Cli([]string{\"bad\", \"--version\"}), 0, \"--version should work anywhere\")\n\n\t\/\/ Test that \"version\" can't appear anywhere\n\tassert.Equal(defaultEnv.Cli([]string{\"bad\", \"version\"}), 1, \"version should NOT work anywhere\")\n}\n\nfunc TestEnvironment_DefaultUi(t *testing.T) {\n\tassert := asserts.NewTestingAsserts(t, true)\n\n\tdefaultEnv := NewEnvironment(nil)\n\tassert.NotNil(defaultEnv.Ui(), \"default UI should not be nil\")\n\n\trwUi, ok := defaultEnv.Ui().(*ReaderWriterUi)\n\tassert.True(ok, \"default UI should be ReaderWriterUi\")\n\tassert.Equal(rwUi.Writer, os.Stdout, \"default UI should go to stdout\")\n\tassert.Equal(rwUi.Reader, os.Stdin, \"default UI should read from stdin\")\n}\n\nfunc TestEnvironment_PrintHelp(t *testing.T) {\n\t\/\/ Just call the function and verify that no panics occur\n\ttestEnvironment().PrintHelp()\n}\n\nfunc TestEnvironment_SettingUi(t *testing.T) {\n\tassert := asserts.NewTestingAsserts(t, true)\n\n\tui := &ReaderWriterUi{new(bytes.Buffer), new(bytes.Buffer)}\n\n\tconfig := &EnvironmentConfig{}\n\tconfig.Ui = ui\n\n\tenv := NewEnvironment(config)\n\n\tassert.Equal(env.Ui(), ui, \"UIs should be equal\")\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>add CreateDirForFileMust()<commit_after><|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/heroku\/busl\/assets\"\n\t\"github.com\/heroku\/busl\/broker\"\n\t\"github.com\/heroku\/busl\/storage\"\n\t\"github.com\/heroku\/busl\/util\"\n)\n\nfunc handleError(w http.ResponseWriter, r *http.Request, err error) {\n\tif err == broker.ErrNotRegistered || err == storage.ErrNoStorage {\n\t\tmessage := \"Channel is not registered.\"\n\t\tif r.Header.Get(\"Accept\") == \"text\/ascii; version=feral\" {\n\t\t\tmessage = assets.HttpCatGone\n\t\t}\n\n\t\thttp.Error(w, message, http.StatusNotFound)\n\n\t} else if err != nil {\n\t\tutil.CountWithData(\"server.handleError\", 1, \"error=%s\", err.Error())\n\t\thttp.Error(w, \"Internal Server Error\", http.StatusInternalServerError)\n\t}\n}\n<commit_msg>Use errNoContent to signal 204<commit_after>package server\n\nimport (\n\t\"errors\"\n\t\"net\/http\"\n\n\t\"github.com\/heroku\/busl\/assets\"\n\t\"github.com\/heroku\/busl\/broker\"\n\t\"github.com\/heroku\/busl\/storage\"\n\t\"github.com\/heroku\/busl\/util\"\n)\n\nvar errNoContent = errors.New(\"No Content\")\n\nfunc handleError(w http.ResponseWriter, r *http.Request, err error) {\n\tif err == broker.ErrNotRegistered || err == storage.ErrNoStorage {\n\t\tmessage := \"Channel is not registered.\"\n\t\tif r.Header.Get(\"Accept\") == \"text\/ascii; version=feral\" {\n\t\t\tmessage = assets.HttpCatGone\n\t\t}\n\n\t\thttp.Error(w, message, http.StatusNotFound)\n\n\t} else if err == errNoContent {\n\t\t\/\/ As indicated in the w3 spec[1] an SSE stream\n\t\t\/\/ that's already done should return a `204 No Content`\n\t\t\/\/ [1]: http:\/\/www.w3.org\/TR\/2012\/WD-eventsource-20120426\/\n\t\tw.WriteHeader(http.StatusNoContent)\n\n\t} else if err != nil {\n\t\tutil.CountWithData(\"server.handleError\", 1, \"error=%s\", err.Error())\n\t\thttp.Error(w, \"Internal Server Error\", http.StatusInternalServerError)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Fix race updating piece priority when piece being read is unexpectedly unavailable<commit_after><|endoftext|>"} {"text":"<commit_before>package storage\n\nimport \"testing\"\n\n\/**\n * ZooKeeper Storage Manager Test\n *\n * @author: Anant Bhardwaj\n * @date: 05\/11\/2014\n *\/\n\nfunc TestBasicLeaderModified(t *testing.T) {\n servers := \"localhost:2181\"\n sm := MakeStorageManager()\n sm.Open(servers)\n defer sm.Close()\n sm.Create(\"\/test\", \"\")\n sm.Create(\"\/test\/_var_tmp_824-1000_px-12568-basic-1\", \"\")\n sm.Create(\"\/test\/_var_tmp_824-1000_px-12568-basic-1\/store\", \"\")\n sm.Create(\"\/test\/_var_tmp_824-1000_px-12568-basic-1\/done\", \"\")\n sm.Create(\"\/test\/_var_tmp_824-1000_px-12568-basic-1\/store\/0\", \"6.824\")\n\n _ = sm.Write(\"\/test\/_var_tmp_824-1000_px-12568-basic-1\/store\/0\", \"6.824\")\n data, _ := sm.Read(\"\/test\/_var_tmp_824-1000_px-12568-basic-1\/store\/0\")\n if data != \"6.824\" {\n t.Fatalf(\"got=%v wanted=%v\", data, \"6.824\")\n }\n\n sm.Close(servers)\n sm.Open(servers)\n data, _ := sm.Read(\"\/test\/_var_tmp_824-1000_px-12568-basic-1\/store\/0\")\n if data != \"6.824\" {\n t.Fatalf(\"got=%v wanted=%v\", data, \"6.824\")\n }\n}\n\n<commit_msg>more tests<commit_after>package storage\n\nimport \"testing\"\n\n\/**\n * ZooKeeper Storage Manager Test\n *\n * @author: Anant Bhardwaj\n * @date: 05\/11\/2014\n *\/\n\nfunc TestBasicLeaderModified(t *testing.T) {\n servers := \"localhost:2181\"\n sm := MakeStorageManager()\n sm.Open(servers)\n defer sm.Close()\n sm.Create(\"\/test\", \"\")\n sm.Create(\"\/test\/_var_tmp_824-1000_px-12568-basic-1\", \"\")\n sm.Create(\"\/test\/_var_tmp_824-1000_px-12568-basic-1\/store\", \"\")\n sm.Create(\"\/test\/_var_tmp_824-1000_px-12568-basic-1\/done\", \"\")\n sm.Create(\"\/test\/_var_tmp_824-1000_px-12568-basic-1\/store\/0\", \"6.824\")\n\n _ = sm.Write(\"\/test\/_var_tmp_824-1000_px-12568-basic-1\/store\/0\", \"6.824\")\n data, _ := sm.Read(\"\/test\/_var_tmp_824-1000_px-12568-basic-1\/store\/0\")\n if data != \"6.824\" {\n t.Fatalf(\"got=%v wanted=%v\", data, \"6.824\")\n }\n\n sm.Close()\n sm.Open(servers)\n data, _ := sm.Read(\"\/test\/_var_tmp_824-1000_px-12568-basic-1\/store\/0\")\n if data != \"6.824\" {\n t.Fatalf(\"got=%v wanted=%v\", data, \"6.824\")\n }\n sm.Close()\n}\n\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"mime\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"net\/textproto\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strconv\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/cenkalti\/backoff\"\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/livepeer\/lpms\/ffmpeg\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/codes\"\n\t\"google.golang.org\/grpc\/credentials\"\n\t\"google.golang.org\/grpc\/status\"\n\n\t\"github.com\/livepeer\/go-livepeer\/common\"\n\t\"github.com\/livepeer\/go-livepeer\/core\"\n\t\"github.com\/livepeer\/go-livepeer\/net\"\n)\n\nconst protoVerLPT = \"Livepeer-Transcoder-1.0\"\nconst transcodingErrorMimeType = \"livepeer\/transcoding-error\"\n\nvar errSecret = errors.New(\"Invalid secret\")\nvar errZeroCapacity = errors.New(\"Zero capacity\")\n\n\/\/ Standalone Transcoder\n\n\/\/ RunTranscoder is main routing of standalone transcoder\n\/\/ Exiting it will terminate executable\nfunc RunTranscoder(n *core.LivepeerNode, orchAddr string, capacity int) {\n\texpb := backoff.NewExponentialBackOff()\n\texpb.MaxInterval = time.Minute\n\texpb.MaxElapsedTime = 0\n\tbackoff.Retry(func() error {\n\t\tglog.Info(\"Registering transcoder to \", orchAddr)\n\t\terr := runTranscoder(n, orchAddr, capacity)\n\t\tglog.Info(\"Unregistering transcoder: \", err)\n\t\tif _, fatal := err.(core.RemoteTranscoderFatalError); fatal {\n\t\t\tglog.Info(\"Terminating transcoder because of \", err)\n\t\t\t\/\/ Returning nil here will make `backoff` to stop trying to reconnect and exit\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ By returning error we tell `backoff` to try to connect again\n\t\treturn err\n\t}, expb)\n}\n\nfunc checkTranscoderError(err error) error {\n\tif err != nil {\n\t\ts := status.Convert(err)\n\t\tif s.Message() == errSecret.Error() { \/\/ consider this unrecoverable\n\t\t\treturn core.NewRemoteTranscoderFatalError(errSecret)\n\t\t}\n\t\tif s.Message() == errZeroCapacity.Error() { \/\/ consider this unrecoverable\n\t\t\treturn core.NewRemoteTranscoderFatalError(errZeroCapacity)\n\t\t}\n\t\tif status.Code(err) == codes.Canceled {\n\t\t\treturn core.NewRemoteTranscoderFatalError(fmt.Errorf(\"Execution interrupted\"))\n\t\t}\n\t}\n\treturn err\n}\n\nfunc runTranscoder(n *core.LivepeerNode, orchAddr string, capacity int) error {\n\ttlsConfig := &tls.Config{InsecureSkipVerify: true}\n\tconn, err := grpc.Dial(orchAddr,\n\t\tgrpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)))\n\tif err != nil {\n\t\tglog.Error(\"Did not connect transcoder to orchesrator: \", err)\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\tc := net.NewTranscoderClient(conn)\n\tctx := context.Background()\n\tctx, cancel := context.WithCancel(ctx)\n\t\/\/ Silence linter\n\tdefer cancel()\n\tr, err := c.RegisterTranscoder(ctx, &net.RegisterRequest{Secret: n.OrchSecret, Capacity: int64(capacity)})\n\tif err := checkTranscoderError(err); err != nil {\n\t\tglog.Error(\"Could not register transcoder to orchestrator \", err)\n\t\treturn err\n\t}\n\n\t\/\/ Catch interrupt signal to shut down transcoder\n\texitc := make(chan os.Signal)\n\tsignal.Notify(exitc, os.Interrupt, syscall.SIGTERM)\n\tdefer signal.Stop(exitc)\n\tgo func() {\n\t\tselect {\n\t\tcase sig := <-exitc:\n\t\t\tglog.Infof(\"Exiting Livepeer Transcoder: %v\", sig)\n\t\t\t\/\/ Cancelling context will close connection to orchestrator\n\t\t\tcancel()\n\t\t\treturn\n\t\t}\n\t}()\n\n\thttpc := &http.Client{Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}}\n\tvar wg sync.WaitGroup\n\tfor {\n\t\tnotify, err := r.Recv()\n\t\tif err := checkTranscoderError(err); err != nil {\n\t\t\tglog.Infof(`End of stream receive cycle because of err=\"%v\", waiting for running transcode jobs to complete`, err)\n\t\t\twg.Wait()\n\t\t\treturn err\n\t\t}\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\trunTranscode(n, orchAddr, httpc, notify)\n\t\t\twg.Done()\n\t\t}()\n\t}\n}\n\nfunc runTranscode(n *core.LivepeerNode, orchAddr string, httpc *http.Client, notify *net.NotifySegment) {\n\tprofiles := []ffmpeg.VideoProfile{}\n\tif len(notify.FullProfiles) > 0 {\n\t\tprofiles = makeFfmpegVideoProfiles(notify.FullProfiles)\n\t} else if len(notify.Profiles) > 0 {\n\t\tprof, err := common.TxDataToVideoProfile(hex.EncodeToString(notify.Profiles))\n\t\tprofiles = prof\n\t\tif err != nil {\n\t\t\tglog.Error(\"Unable to deserialize profiles \", err)\n\t\t}\n\t}\n\n\tglog.Infof(\"Transcoding taskId=%d url=%s\", notify.TaskId, notify.Url)\n\tvar contentType string\n\tvar body bytes.Buffer\n\n\ttData, err := n.Transcoder.Transcode(notify.Job, notify.Url, profiles)\n\tglog.V(common.VERBOSE).Infof(\"Transcoding done for taskId=%d url=%s err=%v\", notify.TaskId, notify.Url, err)\n\tif err != nil {\n\t\tglog.Error(\"Unable to transcode \", err)\n\t\tbody.Write([]byte(err.Error()))\n\t\tcontentType = transcodingErrorMimeType\n\t} else {\n\t\tboundary := common.RandName()\n\t\tw := multipart.NewWriter(&body)\n\t\tfor _, v := range tData.Segments {\n\t\t\tw.SetBoundary(boundary)\n\t\t\thdrs := textproto.MIMEHeader{\n\t\t\t\t\"Content-Type\": {\"video\/MP2T\"},\n\t\t\t\t\"Content-Length\": {strconv.Itoa(len(v.Data))},\n\t\t\t\t\"Pixels\": {strconv.FormatInt(v.Pixels, 10)},\n\t\t\t}\n\t\t\tfw, err := w.CreatePart(hdrs)\n\t\t\tif err != nil {\n\t\t\t\tglog.Error(\"Could not create multipart part \", err)\n\t\t\t}\n\t\t\tio.Copy(fw, bytes.NewBuffer(v.Data))\n\t\t}\n\t\tw.Close()\n\t\tcontentType = \"multipart\/mixed; boundary=\" + boundary\n\t}\n\treq, err := http.NewRequest(\"POST\", \"https:\/\/\"+orchAddr+\"\/transcodeResults\", &body)\n\tif err != nil {\n\t\tglog.Error(\"Error posting results \", err)\n\t}\n\treq.Header.Set(\"Authorization\", protoVerLPT)\n\treq.Header.Set(\"Credentials\", n.OrchSecret)\n\treq.Header.Set(\"Content-Type\", contentType)\n\treq.Header.Set(\"TaskId\", strconv.FormatInt(notify.TaskId, 10))\n\tif tData != nil {\n\t\treq.Header.Set(\"Pixels\", strconv.FormatInt(tData.Pixels, 10))\n\t}\n\tresp, err := httpc.Do(req)\n\tif err != nil {\n\t\tglog.Error(\"Error submitting results \", err)\n\t} else {\n\t\tioutil.ReadAll(resp.Body)\n\t\tresp.Body.Close()\n\t}\n\tglog.V(common.VERBOSE).Infof(\"Transcoding done results sent for taskId=%d url=%s err=%v\", notify.TaskId, notify.Url, err)\n}\n\n\/\/ Orchestrator gRPC\n\nfunc (h *lphttp) RegisterTranscoder(req *net.RegisterRequest, stream net.Transcoder_RegisterTranscoderServer) error {\n\tfrom := common.GetConnectionAddr(stream.Context())\n\tglog.Infof(\"Got a RegisterTranscoder request from transcoder=%s capacity=%d\", from, req.Capacity)\n\n\tif req.Secret != h.orchestrator.TranscoderSecret() {\n\t\tglog.Info(errSecret.Error())\n\t\treturn errSecret\n\t}\n\tif req.Capacity <= 0 {\n\t\tglog.Info(errZeroCapacity.Error())\n\t\treturn errZeroCapacity\n\t}\n\n\t\/\/ blocks until stream is finished\n\th.orchestrator.ServeTranscoder(stream, int(req.Capacity))\n\treturn nil\n}\n\n\/\/ Orchestrator HTTP\n\nfunc (h *lphttp) TranscodeResults(w http.ResponseWriter, r *http.Request) {\n\torch := h.orchestrator\n\n\tauthType := r.Header.Get(\"Authorization\")\n\tcreds := r.Header.Get(\"Credentials\")\n\tif protoVerLPT != authType {\n\t\tglog.Error(\"Invalid auth type \", authType)\n\t\thttp.Error(w, \"Unauthorized\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tif creds != orch.TranscoderSecret() {\n\t\tglog.Error(\"Invalid shared secret\")\n\t\thttp.Error(w, \"Unauthorized\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tmediaType, params, err := mime.ParseMediaType(r.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tglog.Error(\"Error getting mime type \", err)\n\t\thttp.Error(w, err.Error(), http.StatusUnsupportedMediaType)\n\t\treturn\n\t}\n\n\ttid, err := strconv.ParseInt(r.Header.Get(\"TaskId\"), 10, 64)\n\tif err != nil {\n\t\tglog.Error(\"Could not parse task ID \", err)\n\t\thttp.Error(w, \"Invalid Task ID\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tdecodedPixels, err := strconv.ParseInt(r.Header.Get(\"Pixels\"), 10, 64)\n\tif err != nil {\n\t\tglog.Error(\"Could not parse decoded pixels\", err)\n\t\thttp.Error(w, \"Invalid Pixels\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar res core.RemoteTranscoderResult\n\tif transcodingErrorMimeType == mediaType {\n\t\tw.Write([]byte(\"OK\"))\n\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Unable to read transcoding error body taskID=%v err=%v\", tid, err)\n\t\t\tres.Err = err\n\t\t} else {\n\t\t\tres.Err = fmt.Errorf(string(body))\n\t\t}\n\t\tglog.Errorf(\"Trascoding error for taskID=%v err=%v\", tid, res.Err)\n\t\torch.TranscoderResults(tid, &res)\n\t\treturn\n\t}\n\n\tvar segments []*core.TranscodedSegmentData\n\tif \"multipart\/mixed\" == mediaType {\n\t\tmr := multipart.NewReader(r.Body, params[\"boundary\"])\n\t\tfor {\n\t\t\tp, err := mr.NextPart()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tglog.Error(\"Could not process multipart part \", err)\n\t\t\t\tres.Err = err\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tbody, err := ioutil.ReadAll(p)\n\t\t\tif err != nil {\n\t\t\t\tglog.Error(\"Error reading body \", err)\n\t\t\t\tres.Err = err\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tencodedPixels, err := strconv.ParseInt(p.Header.Get(\"Pixels\"), 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tglog.Error(\"Error getting pixels in header:\", err)\n\t\t\t\tres.Err = err\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tsegments = append(segments, &core.TranscodedSegmentData{Data: body, Pixels: encodedPixels})\n\t\t}\n\t\tres.TranscodeData = &core.TranscodeData{\n\t\t\tSegments: segments,\n\t\t\tPixels: decodedPixels,\n\t\t}\n\t\torch.TranscoderResults(tid, &res)\n\t}\n\tif res.Err != nil {\n\t\thttp.Error(w, res.Err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Write([]byte(\"OK\"))\n}\n<commit_msg>Explicitly use HTTP2 transport in communication between orchestrator and transcoder. Mitigates issue in the golang's implementation of HTTP2 https:\/\/github.com\/golang\/go\/issues\/32441<commit_after>package server\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"mime\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"net\/textproto\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strconv\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/cenkalti\/backoff\"\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/livepeer\/lpms\/ffmpeg\"\n\t\"golang.org\/x\/net\/http2\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/codes\"\n\t\"google.golang.org\/grpc\/credentials\"\n\t\"google.golang.org\/grpc\/status\"\n\n\t\"github.com\/livepeer\/go-livepeer\/common\"\n\t\"github.com\/livepeer\/go-livepeer\/core\"\n\t\"github.com\/livepeer\/go-livepeer\/net\"\n)\n\nconst protoVerLPT = \"Livepeer-Transcoder-1.0\"\nconst transcodingErrorMimeType = \"livepeer\/transcoding-error\"\n\nvar errSecret = errors.New(\"Invalid secret\")\nvar errZeroCapacity = errors.New(\"Zero capacity\")\n\n\/\/ Standalone Transcoder\n\n\/\/ RunTranscoder is main routing of standalone transcoder\n\/\/ Exiting it will terminate executable\nfunc RunTranscoder(n *core.LivepeerNode, orchAddr string, capacity int) {\n\texpb := backoff.NewExponentialBackOff()\n\texpb.MaxInterval = time.Minute\n\texpb.MaxElapsedTime = 0\n\tbackoff.Retry(func() error {\n\t\tglog.Info(\"Registering transcoder to \", orchAddr)\n\t\terr := runTranscoder(n, orchAddr, capacity)\n\t\tglog.Info(\"Unregistering transcoder: \", err)\n\t\tif _, fatal := err.(core.RemoteTranscoderFatalError); fatal {\n\t\t\tglog.Info(\"Terminating transcoder because of \", err)\n\t\t\t\/\/ Returning nil here will make `backoff` to stop trying to reconnect and exit\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ By returning error we tell `backoff` to try to connect again\n\t\treturn err\n\t}, expb)\n}\n\nfunc checkTranscoderError(err error) error {\n\tif err != nil {\n\t\ts := status.Convert(err)\n\t\tif s.Message() == errSecret.Error() { \/\/ consider this unrecoverable\n\t\t\treturn core.NewRemoteTranscoderFatalError(errSecret)\n\t\t}\n\t\tif s.Message() == errZeroCapacity.Error() { \/\/ consider this unrecoverable\n\t\t\treturn core.NewRemoteTranscoderFatalError(errZeroCapacity)\n\t\t}\n\t\tif status.Code(err) == codes.Canceled {\n\t\t\treturn core.NewRemoteTranscoderFatalError(fmt.Errorf(\"Execution interrupted\"))\n\t\t}\n\t}\n\treturn err\n}\n\nfunc runTranscoder(n *core.LivepeerNode, orchAddr string, capacity int) error {\n\ttlsConfig := &tls.Config{InsecureSkipVerify: true}\n\tconn, err := grpc.Dial(orchAddr,\n\t\tgrpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)))\n\tif err != nil {\n\t\tglog.Error(\"Did not connect transcoder to orchesrator: \", err)\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\tc := net.NewTranscoderClient(conn)\n\tctx := context.Background()\n\tctx, cancel := context.WithCancel(ctx)\n\t\/\/ Silence linter\n\tdefer cancel()\n\tr, err := c.RegisterTranscoder(ctx, &net.RegisterRequest{Secret: n.OrchSecret, Capacity: int64(capacity)})\n\tif err := checkTranscoderError(err); err != nil {\n\t\tglog.Error(\"Could not register transcoder to orchestrator \", err)\n\t\treturn err\n\t}\n\n\t\/\/ Catch interrupt signal to shut down transcoder\n\texitc := make(chan os.Signal)\n\tsignal.Notify(exitc, os.Interrupt, syscall.SIGTERM)\n\tdefer signal.Stop(exitc)\n\tgo func() {\n\t\tselect {\n\t\tcase sig := <-exitc:\n\t\t\tglog.Infof(\"Exiting Livepeer Transcoder: %v\", sig)\n\t\t\t\/\/ Cancelling context will close connection to orchestrator\n\t\t\tcancel()\n\t\t\treturn\n\t\t}\n\t}()\n\n\thttpc := &http.Client{Transport: &http2.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}}\n\tvar wg sync.WaitGroup\n\tfor {\n\t\tnotify, err := r.Recv()\n\t\tif err := checkTranscoderError(err); err != nil {\n\t\t\tglog.Infof(`End of stream receive cycle because of err=\"%v\", waiting for running transcode jobs to complete`, err)\n\t\t\twg.Wait()\n\t\t\treturn err\n\t\t}\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\trunTranscode(n, orchAddr, httpc, notify)\n\t\t\twg.Done()\n\t\t}()\n\t}\n}\n\nfunc runTranscode(n *core.LivepeerNode, orchAddr string, httpc *http.Client, notify *net.NotifySegment) {\n\tprofiles := []ffmpeg.VideoProfile{}\n\tif len(notify.FullProfiles) > 0 {\n\t\tprofiles = makeFfmpegVideoProfiles(notify.FullProfiles)\n\t} else if len(notify.Profiles) > 0 {\n\t\tprof, err := common.TxDataToVideoProfile(hex.EncodeToString(notify.Profiles))\n\t\tprofiles = prof\n\t\tif err != nil {\n\t\t\tglog.Error(\"Unable to deserialize profiles \", err)\n\t\t}\n\t}\n\n\tglog.Infof(\"Transcoding taskId=%d url=%s\", notify.TaskId, notify.Url)\n\tvar contentType string\n\tvar body bytes.Buffer\n\n\ttData, err := n.Transcoder.Transcode(notify.Job, notify.Url, profiles)\n\tglog.V(common.VERBOSE).Infof(\"Transcoding done for taskId=%d url=%s err=%v\", notify.TaskId, notify.Url, err)\n\tif err != nil {\n\t\tglog.Error(\"Unable to transcode \", err)\n\t\tbody.Write([]byte(err.Error()))\n\t\tcontentType = transcodingErrorMimeType\n\t} else {\n\t\tboundary := common.RandName()\n\t\tw := multipart.NewWriter(&body)\n\t\tfor _, v := range tData.Segments {\n\t\t\tw.SetBoundary(boundary)\n\t\t\thdrs := textproto.MIMEHeader{\n\t\t\t\t\"Content-Type\": {\"video\/MP2T\"},\n\t\t\t\t\"Content-Length\": {strconv.Itoa(len(v.Data))},\n\t\t\t\t\"Pixels\": {strconv.FormatInt(v.Pixels, 10)},\n\t\t\t}\n\t\t\tfw, err := w.CreatePart(hdrs)\n\t\t\tif err != nil {\n\t\t\t\tglog.Error(\"Could not create multipart part \", err)\n\t\t\t}\n\t\t\tio.Copy(fw, bytes.NewBuffer(v.Data))\n\t\t}\n\t\tw.Close()\n\t\tcontentType = \"multipart\/mixed; boundary=\" + boundary\n\t}\n\treq, err := http.NewRequest(\"POST\", \"https:\/\/\"+orchAddr+\"\/transcodeResults\", &body)\n\tif err != nil {\n\t\tglog.Error(\"Error posting results \", err)\n\t}\n\treq.Header.Set(\"Authorization\", protoVerLPT)\n\treq.Header.Set(\"Credentials\", n.OrchSecret)\n\treq.Header.Set(\"Content-Type\", contentType)\n\treq.Header.Set(\"TaskId\", strconv.FormatInt(notify.TaskId, 10))\n\tif tData != nil {\n\t\treq.Header.Set(\"Pixels\", strconv.FormatInt(tData.Pixels, 10))\n\t}\n\tresp, err := httpc.Do(req)\n\tif err != nil {\n\t\tglog.Error(\"Error submitting results \", err)\n\t} else {\n\t\tioutil.ReadAll(resp.Body)\n\t\tresp.Body.Close()\n\t}\n\tglog.V(common.VERBOSE).Infof(\"Transcoding done results sent for taskId=%d url=%s err=%v\", notify.TaskId, notify.Url, err)\n}\n\n\/\/ Orchestrator gRPC\n\nfunc (h *lphttp) RegisterTranscoder(req *net.RegisterRequest, stream net.Transcoder_RegisterTranscoderServer) error {\n\tfrom := common.GetConnectionAddr(stream.Context())\n\tglog.Infof(\"Got a RegisterTranscoder request from transcoder=%s capacity=%d\", from, req.Capacity)\n\n\tif req.Secret != h.orchestrator.TranscoderSecret() {\n\t\tglog.Info(errSecret.Error())\n\t\treturn errSecret\n\t}\n\tif req.Capacity <= 0 {\n\t\tglog.Info(errZeroCapacity.Error())\n\t\treturn errZeroCapacity\n\t}\n\n\t\/\/ blocks until stream is finished\n\th.orchestrator.ServeTranscoder(stream, int(req.Capacity))\n\treturn nil\n}\n\n\/\/ Orchestrator HTTP\n\nfunc (h *lphttp) TranscodeResults(w http.ResponseWriter, r *http.Request) {\n\torch := h.orchestrator\n\n\tauthType := r.Header.Get(\"Authorization\")\n\tcreds := r.Header.Get(\"Credentials\")\n\tif protoVerLPT != authType {\n\t\tglog.Error(\"Invalid auth type \", authType)\n\t\thttp.Error(w, \"Unauthorized\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tif creds != orch.TranscoderSecret() {\n\t\tglog.Error(\"Invalid shared secret\")\n\t\thttp.Error(w, \"Unauthorized\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tmediaType, params, err := mime.ParseMediaType(r.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tglog.Error(\"Error getting mime type \", err)\n\t\thttp.Error(w, err.Error(), http.StatusUnsupportedMediaType)\n\t\treturn\n\t}\n\n\ttid, err := strconv.ParseInt(r.Header.Get(\"TaskId\"), 10, 64)\n\tif err != nil {\n\t\tglog.Error(\"Could not parse task ID \", err)\n\t\thttp.Error(w, \"Invalid Task ID\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tdecodedPixels, err := strconv.ParseInt(r.Header.Get(\"Pixels\"), 10, 64)\n\tif err != nil {\n\t\tglog.Error(\"Could not parse decoded pixels\", err)\n\t\thttp.Error(w, \"Invalid Pixels\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tvar res core.RemoteTranscoderResult\n\tif transcodingErrorMimeType == mediaType {\n\t\tw.Write([]byte(\"OK\"))\n\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Unable to read transcoding error body taskID=%v err=%v\", tid, err)\n\t\t\tres.Err = err\n\t\t} else {\n\t\t\tres.Err = fmt.Errorf(string(body))\n\t\t}\n\t\tglog.Errorf(\"Trascoding error for taskID=%v err=%v\", tid, res.Err)\n\t\torch.TranscoderResults(tid, &res)\n\t\treturn\n\t}\n\n\tvar segments []*core.TranscodedSegmentData\n\tif \"multipart\/mixed\" == mediaType {\n\t\tmr := multipart.NewReader(r.Body, params[\"boundary\"])\n\t\tfor {\n\t\t\tp, err := mr.NextPart()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tglog.Error(\"Could not process multipart part \", err)\n\t\t\t\tres.Err = err\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tbody, err := ioutil.ReadAll(p)\n\t\t\tif err != nil {\n\t\t\t\tglog.Error(\"Error reading body \", err)\n\t\t\t\tres.Err = err\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tencodedPixels, err := strconv.ParseInt(p.Header.Get(\"Pixels\"), 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tglog.Error(\"Error getting pixels in header:\", err)\n\t\t\t\tres.Err = err\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tsegments = append(segments, &core.TranscodedSegmentData{Data: body, Pixels: encodedPixels})\n\t\t}\n\t\tres.TranscodeData = &core.TranscodeData{\n\t\t\tSegments: segments,\n\t\t\tPixels: decodedPixels,\n\t\t}\n\t\torch.TranscoderResults(tid, &res)\n\t}\n\tif res.Err != nil {\n\t\thttp.Error(w, res.Err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Write([]byte(\"OK\"))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/NYTimes\/gziphandler\"\n\t\"github.com\/discoviking\/website\/server\/storage\"\n\t\"github.com\/gorilla\/mux\"\n\t\"net\/http\"\n)\n\nfunc createRouter(storageService storage.Service) *mux.Router {\n\t\/\/ Main Router.\n\tr := mux.NewRouter()\n\n\tr.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(w, r, \"..\/app\/build\/src\/index.html\")\n\t})\n\n\tr.HandleFunc(\"\/app\/{path:.*}\", func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(w, r, \"..\/app\/build\/src\/index.html\")\n\t})\n\n\tappHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(w, r, \"..\/app\/build\/src\/app.js\")\n\t})\n\tr.Handle(\"\/app.js\", gziphandler.GzipHandler(appHandler))\n\n\tfs := http.FileServer(http.Dir(\"..\/app\/build\/src\/assets\/\"))\n\tr.Handle(\"\/assets\/{assetPath:.*}\", http.StripPrefix(\"\/assets\/\", fs))\n\n\tstorageHandler := storage.NewHandler(storageService)\n\tr.Handle(\"\/storage\/{key}\", http.StripPrefix(\"\/storage\", storageHandler))\n\n\treturn r\n}\n<commit_msg>Simplify router a bit<commit_after>package main\n\nimport (\n\t\"github.com\/NYTimes\/gziphandler\"\n\t\"github.com\/discoviking\/website\/server\/storage\"\n\t\"github.com\/gorilla\/mux\"\n\t\"net\/http\"\n)\n\nfunc createRouter(storageService storage.Service) *mux.Router {\n\t\/\/ Main Router.\n\tr := mux.NewRouter()\n\n\tfs := http.FileServer(http.Dir(\"..\/app\/build\/src\/assets\/\"))\n\tr.Handle(\"\/assets\/{assetPath:.*}\", http.StripPrefix(\"\/assets\/\", fs))\n\n\tstorageHandler := storage.NewHandler(storageService)\n\tr.Handle(\"\/storage\/{key}\", http.StripPrefix(\"\/storage\", storageHandler))\n\n\tappHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(w, r, \"..\/app\/build\/src\/app.js\")\n\t})\n\tr.Handle(\"\/app.js\", gziphandler.GzipHandler(appHandler))\n\n\t\/\/ For all other paths just serve the app and defer to the front-end to handle it.\n\tr.HandleFunc(\"\/{path:.*}\", func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.ServeFile(w, r, \"..\/app\/build\/src\/index.html\")\n\t})\n\n\treturn r\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"crypto\/rand\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Shopify\/sarama\"\n\tmetrics \"github.com\/rcrowley\/go-metrics\"\n)\n\nvar (\n\tsync = flag.Bool(\n\t\t\"sync\",\n\t\tfalse,\n\t\t\"Use a synchronous producer.\",\n\t)\n\tmessageLoad = flag.Int(\n\t\t\"message-load\",\n\t\t0,\n\t\t\"REQUIRED: The number of messages to produce to -topic.\",\n\t)\n\tmessageSize = flag.Int(\n\t\t\"message-size\",\n\t\t0,\n\t\t\"REQUIRED: The approximate size (in bytes) of each message to produce to -topic.\",\n\t)\n\tbrokers = flag.String(\n\t\t\"brokers\",\n\t\t\"\",\n\t\t\"REQUIRED: A comma separated list of broker addresses.\",\n\t)\n\ttopic = flag.String(\n\t\t\"topic\",\n\t\t\"\",\n\t\t\"REQUIRED: The topic to run the performance test on.\",\n\t)\n\tpartition = flag.Int(\n\t\t\"partition\",\n\t\t-1,\n\t\t\"The partition of -topic to run the performance test on.\",\n\t)\n\tthroughput = flag.Int(\n\t\t\"throughput\",\n\t\t0,\n\t\t\"The maximum number of messages to send per second (0 for no limit).\",\n\t)\n\tmaxMessageBytes = flag.Int(\n\t\t\"max-message-bytes\",\n\t\t1000000,\n\t\t\"The max permitted size of a message.\",\n\t)\n\trequiredAcks = flag.Int(\n\t\t\"required-acks\",\n\t\t1,\n\t\t\"The required number of acks needed from the broker (-1: all, 0: none, 1: local).\",\n\t)\n\ttimeout = flag.Duration(\n\t\t\"timeout\",\n\t\t10*time.Second,\n\t\t\"The duration the producer will wait to receive -required-acks.\",\n\t)\n\tpartitioner = flag.String(\n\t\t\"partitioner\",\n\t\t\"roundrobin\",\n\t\t\"The partitioning scheme to use (hash, manual, random, roundrobin).\",\n\t)\n\tcompression = flag.String(\n\t\t\"compression\",\n\t\t\"none\",\n\t\t\"The compression method to use (none, gzip, snappy, lz4).\",\n\t)\n\tflushFrequency = flag.Duration(\n\t\t\"flush-frequency\",\n\t\t0,\n\t\t\"The best-effort frequency of flushes.\",\n\t)\n\tflushBytes = flag.Int(\n\t\t\"flush-bytes\",\n\t\t0,\n\t\t\"The best-effort number of bytes needed to trigger a flush.\",\n\t)\n\tflushMessages = flag.Int(\n\t\t\"flush-messages\",\n\t\t0,\n\t\t\"The best-effort number of messages needed to trigger a flush.\",\n\t)\n\tflushMaxMessages = flag.Int(\n\t\t\"flush-max-messages\",\n\t\t0,\n\t\t\"The maximum number of messages the producer will send in a single request.\",\n\t)\n\tretryMax = flag.Int(\n\t\t\"retry-max\",\n\t\t3,\n\t\t\"The total number of times to retry sending a message.\",\n\t)\n\tretryBackoff = flag.Duration(\n\t\t\"retry-backoff\",\n\t\t100*time.Millisecond,\n\t\t\"The duration the producer will wait for the cluster to settle between retries.\",\n\t)\n\tclientID = flag.String(\n\t\t\"client-id\",\n\t\t\"sarama\",\n\t\t\"The client ID sent with every request to the brokers.\",\n\t)\n\tchannelBufferSize = flag.Int(\n\t\t\"channel-buffer-size\",\n\t\t256,\n\t\t\"The number of events to buffer in internal and external channels.\",\n\t)\n\tversion = flag.String(\n\t\t\"version\",\n\t\t\"0.8.2.0\",\n\t\t\"The assumed version of Kafka.\",\n\t)\n)\n\nfunc parseCompression(scheme string) sarama.CompressionCodec {\n\tswitch scheme {\n\tcase \"none\":\n\t\treturn sarama.CompressionNone\n\tcase \"gzip\":\n\t\treturn sarama.CompressionGZIP\n\tcase \"snappy\":\n\t\treturn sarama.CompressionSnappy\n\tcase \"lz4\":\n\t\treturn sarama.CompressionLZ4\n\tdefault:\n\t\tprintUsageErrorAndExit(fmt.Sprintf(\"Unknown -compression: %s\", scheme))\n\t}\n\tpanic(\"should not happen\")\n}\n\nfunc parsePartitioner(scheme string, partition int) sarama.PartitionerConstructor {\n\tif partition < 0 && scheme == \"manual\" {\n\t\tprintUsageErrorAndExit(\"-partition must not be -1 for -partitioning=manual\")\n\t}\n\tswitch scheme {\n\tcase \"manual\":\n\t\treturn sarama.NewManualPartitioner\n\tcase \"hash\":\n\t\treturn sarama.NewHashPartitioner\n\tcase \"random\":\n\t\treturn sarama.NewRandomPartitioner\n\tcase \"roundrobin\":\n\t\treturn sarama.NewRoundRobinPartitioner\n\tdefault:\n\t\tprintUsageErrorAndExit(fmt.Sprintf(\"Unknown -partitioning: %s\", scheme))\n\t}\n\tpanic(\"should not happen\")\n}\n\nfunc parseVersion(version string) sarama.KafkaVersion {\n\tresult, err := sarama.ParseKafkaVersion(version)\n\tif err != nil {\n\t\tprintUsageErrorAndExit(fmt.Sprintf(\"unknown -version: %s\", version))\n\t}\n\treturn result\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif *brokers == \"\" {\n\t\tprintUsageErrorAndExit(\"-brokers is required\")\n\t}\n\tif *topic == \"\" {\n\t\tprintUsageErrorAndExit(\"-topic is required\")\n\t}\n\tif *messageLoad <= 0 {\n\t\tprintUsageErrorAndExit(\"-message-load must be greater than 0\")\n\t}\n\tif *messageSize <= 0 {\n\t\tprintUsageErrorAndExit(\"-message-size must be greater than 0\")\n\t}\n\n\tconfig := sarama.NewConfig()\n\n\tconfig.Producer.MaxMessageBytes = *maxMessageBytes\n\tconfig.Producer.RequiredAcks = sarama.RequiredAcks(*requiredAcks)\n\tconfig.Producer.Timeout = *timeout\n\tconfig.Producer.Partitioner = parsePartitioner(*partitioner, *partition)\n\tconfig.Producer.Compression = parseCompression(*compression)\n\tconfig.Producer.Flush.Frequency = *flushFrequency\n\tconfig.Producer.Flush.Bytes = *flushBytes\n\tconfig.Producer.Flush.Messages = *flushMessages\n\tconfig.Producer.Flush.MaxMessages = *flushMaxMessages\n\tconfig.Producer.Return.Successes = true\n\tconfig.ClientID = *clientID\n\tconfig.ChannelBufferSize = *channelBufferSize\n\tconfig.Version = parseVersion(*version)\n\n\tif err := config.Validate(); err != nil {\n\t\tprintErrorAndExit(69, \"Invalid configuration: %s\", err)\n\t}\n\n\t\/\/ Construct -messageLoad messages of appoximately -messageSize random bytes.\n\tmessages := make([]*sarama.ProducerMessage, *messageLoad)\n\tfor i := 0; i < *messageLoad; i++ {\n\t\tpayload := make([]byte, *messageSize)\n\t\tif _, err := rand.Read(payload); err != nil {\n\t\t\tprintErrorAndExit(69, \"Failed to generate message payload: %s\", err)\n\t\t}\n\t\tmessages[i] = &sarama.ProducerMessage{\n\t\t\tTopic: *topic,\n\t\t\tPartition: int32(*partition),\n\t\t\tValue: sarama.ByteEncoder(payload),\n\t\t}\n\t}\n\n\t\/\/ Print out metrics periodically.\n\tctx, cancel := context.WithCancel(context.Background())\n\tgo func(ctx context.Context) {\n\t\tt := time.Tick(5 * time.Second)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-t:\n\t\t\t\tprintMetrics(os.Stdout, config.MetricRegistry)\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}(ctx)\n\n\tbrokers := strings.Split(*brokers, \",\")\n\tif *sync {\n\t\trunSyncProducer(config, brokers, messages, *throughput)\n\t} else {\n\t\trunAsyncProducer(config, brokers, messages, *throughput)\n\t}\n\n\tcancel()\n\t<-ctx.Done()\n\n\t\/\/ Print final metrics.\n\tprintMetrics(os.Stdout, config.MetricRegistry)\n}\n\nfunc runAsyncProducer(config *sarama.Config, brokers []string,\n\tmessages []*sarama.ProducerMessage, throughput int) {\n\tproducer, err := sarama.NewAsyncProducer(brokers, config)\n\tif err != nil {\n\t\tprintErrorAndExit(69, \"Failed to create producer: %s\", err)\n\t}\n\tdefer func() {\n\t\tif err := producer.Close(); err != nil {\n\t\t\tprintErrorAndExit(69, \"Failed to close producer: %s\", err)\n\t\t}\n\t}()\n\n\tmessagesDone := make(chan struct{})\n\tgo func() {\n\t\tfor i := 0; i < *messageLoad; i++ {\n\t\t\tselect {\n\t\t\tcase <-producer.Successes():\n\t\t\tcase err = <-producer.Errors():\n\t\t\t\tprintErrorAndExit(69, \"%s\", err)\n\t\t\t}\n\t\t}\n\t\tmessagesDone <- struct{}{}\n\t}()\n\n\tif throughput > 0 {\n\t\tticker := time.NewTicker(time.Second)\n\t\tfor _, message := range messages {\n\t\t\tfor i := 0; i < throughput; i++ {\n\t\t\t\tproducer.Input() <- message\n\t\t\t}\n\t\t\t<-ticker.C\n\t\t}\n\t\tticker.Stop()\n\t} else {\n\t\tfor _, message := range messages {\n\t\t\tproducer.Input() <- message\n\t\t}\n\t}\n\n\t<-messagesDone\n\tclose(messagesDone)\n}\n\nfunc runSyncProducer(config *sarama.Config, brokers []string,\n\tmessages []*sarama.ProducerMessage, throughput int) {\n\tproducer, err := sarama.NewSyncProducer(brokers, config)\n\tif err != nil {\n\t\tprintErrorAndExit(69, \"Failed to create producer: %s\", err)\n\t}\n\tdefer func() {\n\t\tif err := producer.Close(); err != nil {\n\t\t\tprintErrorAndExit(69, \"Failed to close producer: %s\", err)\n\t\t}\n\t}()\n\n\tif throughput > 0 {\n\t\tticker := time.NewTicker(time.Second)\n\t\tfor _, message := range messages {\n\t\t\tfor i := 0; i < throughput; i++ {\n\t\t\t\t_, _, err = producer.SendMessage(message)\n\t\t\t\tif err != nil {\n\t\t\t\t\tprintErrorAndExit(69, \"Failed to send message: %s\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t\t<-ticker.C\n\t\t}\n\t\tticker.Stop()\n\t} else {\n\t\tfor _, message := range messages {\n\t\t\t_, _, err = producer.SendMessage(message)\n\t\t\tif err != nil {\n\t\t\t\tprintErrorAndExit(69, \"Failed to send message: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc printMetrics(w io.Writer, r metrics.Registry) {\n\trecordSendRate := r.Get(\"record-send-rate\").(metrics.Meter).Snapshot()\n\trequestLatency := r.Get(\"request-latency-in-ms\").(metrics.Histogram).Snapshot()\n\trequestLatencyPercentiles := requestLatency.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})\n\tfmt.Fprintf(w, \"%d records sent, %.1f records\/sec (%.2f MB\/sec), \"+\n\t\t\"%.1f ms avg latency, %.1f ms stddev, %.1f ms 50th, %.1f ms 75th, \"+\n\t\t\"%.1f ms 95th, %.1f ms 99th, %.1f ms 99.9th\\n\",\n\t\trecordSendRate.Count(),\n\t\trecordSendRate.RateMean(),\n\t\trecordSendRate.RateMean()*float64(*messageSize)\/1024\/1024,\n\t\trequestLatency.Mean(),\n\t\trequestLatency.StdDev(),\n\t\trequestLatencyPercentiles[0],\n\t\trequestLatencyPercentiles[1],\n\t\trequestLatencyPercentiles[2],\n\t\trequestLatencyPercentiles[3],\n\t\trequestLatencyPercentiles[4],\n\t)\n}\n\nfunc printUsageErrorAndExit(message string) {\n\tfmt.Fprintln(os.Stderr, \"ERROR:\", message)\n\tfmt.Fprintln(os.Stderr)\n\tfmt.Fprintln(os.Stderr, \"Available command line options:\")\n\tflag.PrintDefaults()\n\tos.Exit(64)\n}\n\nfunc printErrorAndExit(code int, format string, values ...interface{}) {\n\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", fmt.Sprintf(format, values...))\n\tfmt.Fprintln(os.Stderr)\n\tos.Exit(code)\n}\n<commit_msg>Add routines for sync producer on performance tool<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"crypto\/rand\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\tgosync \"sync\"\n\t\"time\"\n\n\t\"github.com\/Shopify\/sarama\"\n\tmetrics \"github.com\/rcrowley\/go-metrics\"\n)\n\nvar (\n\tsync = flag.Bool(\n\t\t\"sync\",\n\t\tfalse,\n\t\t\"Use a synchronous producer.\",\n\t)\n\tmessageLoad = flag.Int(\n\t\t\"message-load\",\n\t\t0,\n\t\t\"REQUIRED: The number of messages to produce to -topic.\",\n\t)\n\tmessageSize = flag.Int(\n\t\t\"message-size\",\n\t\t0,\n\t\t\"REQUIRED: The approximate size (in bytes) of each message to produce to -topic.\",\n\t)\n\tbrokers = flag.String(\n\t\t\"brokers\",\n\t\t\"\",\n\t\t\"REQUIRED: A comma separated list of broker addresses.\",\n\t)\n\ttopic = flag.String(\n\t\t\"topic\",\n\t\t\"\",\n\t\t\"REQUIRED: The topic to run the performance test on.\",\n\t)\n\tpartition = flag.Int(\n\t\t\"partition\",\n\t\t-1,\n\t\t\"The partition of -topic to run the performance test on.\",\n\t)\n\tthroughput = flag.Int(\n\t\t\"throughput\",\n\t\t0,\n\t\t\"The maximum number of messages to send per second (0 for no limit).\",\n\t)\n\tmaxMessageBytes = flag.Int(\n\t\t\"max-message-bytes\",\n\t\t1000000,\n\t\t\"The max permitted size of a message.\",\n\t)\n\trequiredAcks = flag.Int(\n\t\t\"required-acks\",\n\t\t1,\n\t\t\"The required number of acks needed from the broker (-1: all, 0: none, 1: local).\",\n\t)\n\ttimeout = flag.Duration(\n\t\t\"timeout\",\n\t\t10*time.Second,\n\t\t\"The duration the producer will wait to receive -required-acks.\",\n\t)\n\tpartitioner = flag.String(\n\t\t\"partitioner\",\n\t\t\"roundrobin\",\n\t\t\"The partitioning scheme to use (hash, manual, random, roundrobin).\",\n\t)\n\tcompression = flag.String(\n\t\t\"compression\",\n\t\t\"none\",\n\t\t\"The compression method to use (none, gzip, snappy, lz4).\",\n\t)\n\tflushFrequency = flag.Duration(\n\t\t\"flush-frequency\",\n\t\t0,\n\t\t\"The best-effort frequency of flushes.\",\n\t)\n\tflushBytes = flag.Int(\n\t\t\"flush-bytes\",\n\t\t0,\n\t\t\"The best-effort number of bytes needed to trigger a flush.\",\n\t)\n\tflushMessages = flag.Int(\n\t\t\"flush-messages\",\n\t\t0,\n\t\t\"The best-effort number of messages needed to trigger a flush.\",\n\t)\n\tflushMaxMessages = flag.Int(\n\t\t\"flush-max-messages\",\n\t\t0,\n\t\t\"The maximum number of messages the producer will send in a single request.\",\n\t)\n\tretryMax = flag.Int(\n\t\t\"retry-max\",\n\t\t3,\n\t\t\"The total number of times to retry sending a message.\",\n\t)\n\tretryBackoff = flag.Duration(\n\t\t\"retry-backoff\",\n\t\t100*time.Millisecond,\n\t\t\"The duration the producer will wait for the cluster to settle between retries.\",\n\t)\n\tclientID = flag.String(\n\t\t\"client-id\",\n\t\t\"sarama\",\n\t\t\"The client ID sent with every request to the brokers.\",\n\t)\n\tchannelBufferSize = flag.Int(\n\t\t\"channel-buffer-size\",\n\t\t256,\n\t\t\"The number of events to buffer in internal and external channels.\",\n\t)\n\troutines = flag.Int(\n\t\t\"routines\",\n\t\t1,\n\t\t\"The number of routines to send the messages from (-sync only).\",\n\t)\n\tversion = flag.String(\n\t\t\"version\",\n\t\t\"0.8.2.0\",\n\t\t\"The assumed version of Kafka.\",\n\t)\n)\n\nfunc parseCompression(scheme string) sarama.CompressionCodec {\n\tswitch scheme {\n\tcase \"none\":\n\t\treturn sarama.CompressionNone\n\tcase \"gzip\":\n\t\treturn sarama.CompressionGZIP\n\tcase \"snappy\":\n\t\treturn sarama.CompressionSnappy\n\tcase \"lz4\":\n\t\treturn sarama.CompressionLZ4\n\tdefault:\n\t\tprintUsageErrorAndExit(fmt.Sprintf(\"Unknown -compression: %s\", scheme))\n\t}\n\tpanic(\"should not happen\")\n}\n\nfunc parsePartitioner(scheme string, partition int) sarama.PartitionerConstructor {\n\tif partition < 0 && scheme == \"manual\" {\n\t\tprintUsageErrorAndExit(\"-partition must not be -1 for -partitioning=manual\")\n\t}\n\tswitch scheme {\n\tcase \"manual\":\n\t\treturn sarama.NewManualPartitioner\n\tcase \"hash\":\n\t\treturn sarama.NewHashPartitioner\n\tcase \"random\":\n\t\treturn sarama.NewRandomPartitioner\n\tcase \"roundrobin\":\n\t\treturn sarama.NewRoundRobinPartitioner\n\tdefault:\n\t\tprintUsageErrorAndExit(fmt.Sprintf(\"Unknown -partitioning: %s\", scheme))\n\t}\n\tpanic(\"should not happen\")\n}\n\nfunc parseVersion(version string) sarama.KafkaVersion {\n\tresult, err := sarama.ParseKafkaVersion(version)\n\tif err != nil {\n\t\tprintUsageErrorAndExit(fmt.Sprintf(\"unknown -version: %s\", version))\n\t}\n\treturn result\n}\n\nfunc generateMessages(topic string, partition, messageLoad, messageSize int) []*sarama.ProducerMessage {\n\tmessages := make([]*sarama.ProducerMessage, messageLoad)\n\tfor i := 0; i < messageLoad; i++ {\n\t\tpayload := make([]byte, messageSize)\n\t\tif _, err := rand.Read(payload); err != nil {\n\t\t\tprintErrorAndExit(69, \"Failed to generate message payload: %s\", err)\n\t\t}\n\t\tmessages[i] = &sarama.ProducerMessage{\n\t\t\tTopic: topic,\n\t\t\tPartition: int32(partition),\n\t\t\tValue: sarama.ByteEncoder(payload),\n\t\t}\n\t}\n\treturn messages\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif *brokers == \"\" {\n\t\tprintUsageErrorAndExit(\"-brokers is required\")\n\t}\n\tif *topic == \"\" {\n\t\tprintUsageErrorAndExit(\"-topic is required\")\n\t}\n\tif *messageLoad <= 0 {\n\t\tprintUsageErrorAndExit(\"-message-load must be greater than 0\")\n\t}\n\tif *messageSize <= 0 {\n\t\tprintUsageErrorAndExit(\"-message-size must be greater than 0\")\n\t}\n\tif *routines < 1 || *routines > *messageLoad {\n\t\tprintUsageErrorAndExit(\"-routines must be greater than 0 and less than or equal to -message-load\")\n\t}\n\n\tconfig := sarama.NewConfig()\n\n\tconfig.Producer.MaxMessageBytes = *maxMessageBytes\n\tconfig.Producer.RequiredAcks = sarama.RequiredAcks(*requiredAcks)\n\tconfig.Producer.Timeout = *timeout\n\tconfig.Producer.Partitioner = parsePartitioner(*partitioner, *partition)\n\tconfig.Producer.Compression = parseCompression(*compression)\n\tconfig.Producer.Flush.Frequency = *flushFrequency\n\tconfig.Producer.Flush.Bytes = *flushBytes\n\tconfig.Producer.Flush.Messages = *flushMessages\n\tconfig.Producer.Flush.MaxMessages = *flushMaxMessages\n\tconfig.Producer.Return.Successes = true\n\tconfig.ClientID = *clientID\n\tconfig.ChannelBufferSize = *channelBufferSize\n\tconfig.Version = parseVersion(*version)\n\n\tif err := config.Validate(); err != nil {\n\t\tprintErrorAndExit(69, \"Invalid configuration: %s\", err)\n\t}\n\n\t\/\/ Print out metrics periodically.\n\tctx, cancel := context.WithCancel(context.Background())\n\tgo func(ctx context.Context) {\n\t\tt := time.Tick(5 * time.Second)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-t:\n\t\t\t\tprintMetrics(os.Stdout, config.MetricRegistry)\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}(ctx)\n\n\tbrokers := strings.Split(*brokers, \",\")\n\tif *sync {\n\t\trunSyncProducer(*topic, *partition, *messageLoad, *messageSize, *routines,\n\t\t\tconfig, brokers, *throughput)\n\t} else {\n\t\trunAsyncProducer(*topic, *partition, *messageLoad, *messageSize,\n\t\t\tconfig, brokers, *throughput)\n\t}\n\n\tcancel()\n\t<-ctx.Done()\n\n\t\/\/ Print final metrics.\n\tprintMetrics(os.Stdout, config.MetricRegistry)\n}\n\nfunc runAsyncProducer(topic string, partition, messageLoad, messageSize int,\n\tconfig *sarama.Config, brokers []string, throughput int) {\n\tproducer, err := sarama.NewAsyncProducer(brokers, config)\n\tif err != nil {\n\t\tprintErrorAndExit(69, \"Failed to create producer: %s\", err)\n\t}\n\tdefer func() {\n\t\tif err := producer.Close(); err != nil {\n\t\t\tprintErrorAndExit(69, \"Failed to close producer: %s\", err)\n\t\t}\n\t}()\n\n\tmessages := generateMessages(topic, partition, messageLoad, messageSize)\n\n\tmessagesDone := make(chan struct{})\n\tgo func() {\n\t\tfor i := 0; i < messageLoad; i++ {\n\t\t\tselect {\n\t\t\tcase <-producer.Successes():\n\t\t\tcase err = <-producer.Errors():\n\t\t\t\tprintErrorAndExit(69, \"%s\", err)\n\t\t\t}\n\t\t}\n\t\tmessagesDone <- struct{}{}\n\t}()\n\n\tif throughput > 0 {\n\t\tticker := time.NewTicker(time.Second)\n\t\tfor _, message := range messages {\n\t\t\tfor i := 0; i < throughput; i++ {\n\t\t\t\tproducer.Input() <- message\n\t\t\t}\n\t\t\t<-ticker.C\n\t\t}\n\t\tticker.Stop()\n\t} else {\n\t\tfor _, message := range messages {\n\t\t\tproducer.Input() <- message\n\t\t}\n\t}\n\n\t<-messagesDone\n\tclose(messagesDone)\n}\n\nfunc runSyncProducer(topic string, partition, messageLoad, messageSize, routines int,\n\tconfig *sarama.Config, brokers []string, throughput int) {\n\tproducer, err := sarama.NewSyncProducer(brokers, config)\n\tif err != nil {\n\t\tprintErrorAndExit(69, \"Failed to create producer: %s\", err)\n\t}\n\tdefer func() {\n\t\tif err := producer.Close(); err != nil {\n\t\t\tprintErrorAndExit(69, \"Failed to close producer: %s\", err)\n\t\t}\n\t}()\n\n\tmessages := make([][]*sarama.ProducerMessage, routines)\n\tfor i := 0; i < routines; i++ {\n\t\tif i == routines-1 {\n\t\t\tmessages[i] = generateMessages(topic, partition, messageLoad\/routines+messageLoad%routines, messageSize)\n\t\t} else {\n\t\t\tmessages[i] = generateMessages(topic, partition, messageLoad\/routines, messageSize)\n\t\t}\n\t}\n\n\tvar wg gosync.WaitGroup\n\tif throughput > 0 {\n\t\tfor _, messages := range messages {\n\t\t\tmessages := messages\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tticker := time.NewTicker(time.Second)\n\t\t\t\tfor _, message := range messages {\n\t\t\t\t\tfor i := 0; i < throughput; i++ {\n\t\t\t\t\t\t_, _, err = producer.SendMessage(message)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tprintErrorAndExit(69, \"Failed to send message: %s\", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t<-ticker.C\n\t\t\t\t}\n\t\t\t\tticker.Stop()\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t}\n\t} else {\n\t\tfor _, messages := range messages {\n\t\t\tmessages := messages\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tfor _, message := range messages {\n\t\t\t\t\t_, _, err = producer.SendMessage(message)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tprintErrorAndExit(69, \"Failed to send message: %s\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t}\n\t}\n\twg.Wait()\n}\n\nfunc printMetrics(w io.Writer, r metrics.Registry) {\n\trecordSendRate := r.Get(\"record-send-rate\").(metrics.Meter).Snapshot()\n\trequestLatency := r.Get(\"request-latency-in-ms\").(metrics.Histogram).Snapshot()\n\trequestLatencyPercentiles := requestLatency.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})\n\tfmt.Fprintf(w, \"%d records sent, %.1f records\/sec (%.2f MB\/sec), \"+\n\t\t\"%.1f ms avg latency, %.1f ms stddev, %.1f ms 50th, %.1f ms 75th, \"+\n\t\t\"%.1f ms 95th, %.1f ms 99th, %.1f ms 99.9th\\n\",\n\t\trecordSendRate.Count(),\n\t\trecordSendRate.RateMean(),\n\t\trecordSendRate.RateMean()*float64(*messageSize)\/1024\/1024,\n\t\trequestLatency.Mean(),\n\t\trequestLatency.StdDev(),\n\t\trequestLatencyPercentiles[0],\n\t\trequestLatencyPercentiles[1],\n\t\trequestLatencyPercentiles[2],\n\t\trequestLatencyPercentiles[3],\n\t\trequestLatencyPercentiles[4],\n\t)\n}\n\nfunc printUsageErrorAndExit(message string) {\n\tfmt.Fprintln(os.Stderr, \"ERROR:\", message)\n\tfmt.Fprintln(os.Stderr)\n\tfmt.Fprintln(os.Stderr, \"Available command line options:\")\n\tflag.PrintDefaults()\n\tos.Exit(64)\n}\n\nfunc printErrorAndExit(code int, format string, values ...interface{}) {\n\tfmt.Fprintf(os.Stderr, \"ERROR: %s\\n\", fmt.Sprintf(format, values...))\n\tfmt.Fprintln(os.Stderr)\n\tos.Exit(code)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Jacob Taylor jacob@ablox.io\n\/\/ License: Apache2 - http:\/\/www.apache.org\/licenses\/LICENSE-2.0\npackage main\n\nimport (\n \"fmt\"\n \"net\"\n \"..\/utils\"\n \"bufio\"\n \"encoding\/binary\"\n \/\/\"time\"\n \"os\"\n \"bytes\"\n \"io\"\n \"io\/ioutil\"\n \"github.com\/urfave\/cli\"\n \"path\/filepath\"\n)\n\nconst nbd_folder = \"\/sample_disks\/\"\n\nvar characters_per_line = 100\nvar newline = 0\nvar line_number = 0\n\n\/\/ settings for the server\ntype Settings struct {\n ReadOnly bool\n AutoFlush bool\n Host string\n Port int\n Listen string\n File string\n Directory string\n}\n\ntype Connection struct {\n File string\n RemoteAddr string\n ReadOnly bool\n}\n\nvar connections = make(map[string][]Connection)\n\n\/*\n Add a new connection to the list of connections for a file. Make sure there is only one writable connection per filename\n returns true if the connection was added correctly. false otherwise\n *\/\nfunc addConnection(filename string, readOnly bool, remoteAddr string) bool {\n currentConnections, ok := connections[filename]\n if ok == false {\n currentConnections = make([]Connection, 4)\n }\n\n \/\/ If this a writable request, check to see if anybody else has a writable connection\n if !readOnly {\n for _, conn := range currentConnections {\n if !conn.ReadOnly {\n fmt.Printf(\"Error, too many writable connections. %s is already connected to %s\\n\", remoteAddr, filename)\n return false\n }\n }\n }\n\n newConnection := Connection{\n File: filename,\n RemoteAddr: remoteAddr,\n ReadOnly: readOnly,\n }\n\n connections[filename] = append(currentConnections, newConnection)\n return true\n}\n\n\n\nvar globalSettings Settings = Settings {\n ReadOnly: false,\n AutoFlush: true,\n Host: \"localhost\",\n Port: 8000,\n Listen: \"\",\n File: \"\",\n Directory: \"sample_disks\",\n}\n\nfunc send_export_list_item(output *bufio.Writer, options uint32, export_name string) {\n data := make([]byte, 1024)\n length := len(export_name)\n offset := 0\n\n \/\/ length of export name\n binary.BigEndian.PutUint32(data[offset:], uint32(length)) \/\/ length of string\n offset += 4\n\n \/\/ export name\n copy(data[offset:], export_name)\n offset += length\n\n reply_type := uint32(2) \/\/ reply_type: NBD_REP_SERVER\n send_message(output, options, reply_type, uint32(offset), data)\n}\n\nfunc send_ack(output *bufio.Writer, options uint32) {\n send_message(output, options, utils.NBD_COMMAND_ACK, 0, nil)\n}\n\nfunc export_name(output *bufio.Writer, conn net.Conn, payload_size int, payload []byte, options uint32, globalSettings Settings) {\n fmt.Printf(\"have request to bind to: %s\\n\", string(payload[:payload_size]))\n\n defer conn.Close()\n\n \/\/todo add support for file specification\n\n var filename bytes.Buffer\n readOnly := false\n\n var current_directory = globalSettings.Directory\n var err error\n if current_directory == \"\" {\n current_directory, err = os.Getwd()\n utils.ErrorCheck(err)\n }\n filename.WriteString(current_directory)\n filename.WriteString(nbd_folder)\n filename.Write(payload[:payload_size])\n\n fmt.Printf(\"Opening file: %s\\n\", filename.String())\n\n fileMode := os.O_RDWR\n if globalSettings.ReadOnly || (options & utils.NBD_OPT_READ_ONLY != 0) {\n fmt.Printf(\"Read Only is set\\n\")\n fileMode = os.O_RDONLY\n readOnly = true\n }\n\n file, err := os.OpenFile(filename.String(), fileMode, 0644)\n\n utils.ErrorCheck(err)\n if err != nil {\n return\n }\n\n buffer := make([]byte, 256)\n offset := 0\n\n fs, err := file.Stat()\n file_size := uint64(fs.Size())\n\n binary.BigEndian.PutUint64(buffer[offset:], file_size) \/\/ size\n offset += 8\n\n binary.BigEndian.PutUint16(buffer[offset:], 1) \/\/ flags\n offset += 2\n\n \/\/ if requested, pad with 124 zeros\n if (options & utils.NBD_FLAG_NO_ZEROES) != utils.NBD_FLAG_NO_ZEROES {\n offset += 124\n }\n\n _, err = output.Write(buffer[:offset])\n output.Flush()\n utils.ErrorCheck(err)\n\n buffer_limit := 2048*1024 \/\/ set the buffer to 2mb\n\n buffer = make([]byte, buffer_limit)\n conn_reader := bufio.NewReader(conn)\n for {\n waiting_for := 28 \/\/ wait for at least the minimum payload size\n\n _, err := io.ReadFull(conn_reader, buffer[:waiting_for])\n if err == io.EOF {\n fmt.Printf(\"Abort detected, escaping processing loop\\n\")\n break\n }\n utils.ErrorCheck(err)\n\n \/\/magic := binary.BigEndian.Uint32(buffer)\n command := binary.BigEndian.Uint32(buffer[4:8])\n \/\/handle := binary.BigEndian.Uint64(buffer[8:16])\n from := binary.BigEndian.Uint64(buffer[16:24])\n length := binary.BigEndian.Uint32(buffer[24:28])\n\n \/\/ Error out and drop the connection if there is an attempt to read too much\n if length > buffer_limit {\n fmt.Printf(\"E\")\n\n file.Sync()\n return\n }\n\n newline += 1;\n if newline % characters_per_line == 0 {\n line_number++\n fmt.Printf(\"\\n%5d: \", line_number * 100)\n newline -= characters_per_line\n }\n\n switch command {\n case utils.NBD_COMMAND_READ:\n fmt.Printf(\".\")\n\n _, err = file.ReadAt(buffer[16:16+length], int64(from))\n utils.ErrorCheck(err)\n\n binary.BigEndian.PutUint32(buffer[:4], utils.NBD_REPLY_MAGIC)\n binary.BigEndian.PutUint32(buffer[4:8], 0) \/\/ error bits\n\n conn.Write(buffer[:16+length])\n\n continue\n case utils.NBD_COMMAND_WRITE:\n if readOnly {\n fmt.Printf(\"E\")\n fmt.Printf(\"\\nAttempt to write to read only file blocked\\n\")\n\n continue\n }\n\n fmt.Printf(\"W\")\n\n _, err := io.ReadFull(conn_reader, buffer[28:28+length])\n if err == io.EOF {\n fmt.Printf(\"Abort detected, escaping processing loop\\n\")\n break\n }\n utils.ErrorCheck(err)\n\n _, err = file.WriteAt(buffer[28:28+length], int64(from))\n utils.ErrorCheck(err)\n\n if globalSettings.AutoFlush {\n file.Sync()\n }\n\n \/\/ let them know we are done\n binary.BigEndian.PutUint32(buffer[:4], utils.NBD_REPLY_MAGIC)\n binary.BigEndian.PutUint32(buffer[4:8], 0) \/\/ error bits\n\n conn.Write(buffer[:16])\n\n continue\n\n case utils.NBD_COMMAND_DISCONNECT:\n fmt.Printf(\"D\")\n\n file.Sync()\n return\n }\n }\n}\n\n\/*\nFirst check for a specific file. If one is specified, use it. If not, check for a directory. If that is not\navailable, use the CWD.\n *\/\nfunc send_export_list(output *bufio.Writer, options uint32, globalSettings Settings) {\n if globalSettings.File != \"\" {\n _, file := filepath.Split(globalSettings.File)\n\n send_export_list_item(output, options, file)\n send_ack(output, options)\n return\n }\n\n var current_directory string\n var err error\n if globalSettings.Directory == \"\" {\n current_directory, err = os.Getwd()\n utils.ErrorCheck(err)\n }\n\n files, err := ioutil.ReadDir(current_directory + nbd_folder)\n utils.ErrorCheck(err)\n\n for _, file := range files {\n send_export_list_item(output, options, file.Name())\n }\n\n send_ack(output, options)\n}\n\nfunc send_message(output *bufio.Writer, options uint32, reply_type uint32, length uint32, data []byte ) {\n endian := binary.BigEndian\n buffer := make([]byte, 1024)\n offset := 0\n\n endian.PutUint64(buffer[offset:], utils.NBD_SERVER_SEND_REPLY_MAGIC)\n offset += 8\n\n endian.PutUint32(buffer[offset:], options) \/\/ put out the server options\n offset += 4\n\n endian.PutUint32(buffer[offset:], reply_type) \/\/ reply_type: NBD_REP_SERVER\n offset += 4\n\n endian.PutUint32(buffer[offset:], length) \/\/ length of package\n offset += 4\n\n if data != nil {\n copy(buffer[offset:], data[0:length])\n offset += int(length)\n }\n\n data_to_send := buffer[:offset]\n output.Write(data_to_send)\n output.Flush()\n\n utils.LogData(\"Just sent:\", offset, data_to_send)\n}\n\nvar defaultOptions = []byte{0, 0}\n\nfunc main() {\n app := cli.NewApp()\n app.Name = \"AnyBlox\"\n app.Usage = \"block storage for the masses\"\n app.Action = func(c *cli.Context) error {\n fmt.Println(\"Please specify either a full 'listen' parameter (e.g. 'localhost:8000', '192.168.1.2:8000) or a host and port\\n\")\n return nil\n }\n\n app.Flags = []cli.Flag {\n cli.StringFlag{\n Name: \"host\",\n Value: globalSettings.Host,\n Usage: \"Hostname or IP address you want to serve traffic on. e.x. 'localhost', '192.168.1.2'\",\n Destination: &globalSettings.Host,\n },\n cli.IntFlag{\n Name: \"port\",\n Value: globalSettings.Port,\n Usage: \"Port you want to serve traffic on. e.x. '8000'\",\n Destination: &globalSettings.Port,\n },\n cli.StringFlag{\n Name: \"listen, l\",\n Destination: &globalSettings.Listen,\n Usage: \"Address and port the server should listen on. Listen will take priority over host and port parameters. hostname:port - e.x. 'localhost:8000', '192.168.1.2:8000'\",\n },\n cli.StringFlag{\n Name: \"file, f\",\n Destination: &globalSettings.File,\n Value: \"\",\n Usage: \"The file that should be shared by this server. 'file' overrides 'directory'. It is required to be a full absolute path that includes the filename\",\n },\n cli.StringFlag{\n Name: \"directory, d\",\n Destination: &globalSettings.Directory,\n Value: globalSettings.Directory,\n Usage: \"Specify a directory where the files to share are located. Default is 'sample_disks\",\n },\n }\n\n app.Run(os.Args)\n\n \/\/ Determine where the host should be listening to, depending on the arguments\n fmt.Printf(\"listen (%s) host (%s) port (%d)\\n\", globalSettings.Listen, globalSettings.Host, globalSettings.Port)\n hostingAddress := globalSettings.Listen\n if len(globalSettings.Listen) == 0 {\n if len(globalSettings.Host) == 0 || globalSettings.Port <= 0 {\n panic(\"You need to specify a host and port or specify a listen address (host:port)\\n\")\n }\n var port string\n fmt.Sprint(port, \"%d\", globalSettings.Port)\n hostingAddress = globalSettings.Host + \":\" + port\n }\n\n fmt.Printf(\"About to listen on %s\\n\", hostingAddress)\n listener, err := net.Listen(\"tcp\", hostingAddress)\n utils.ErrorCheck(err)\n\n fmt.Printf(\"aBlox server online\\n\")\n\n reply_magic := make([]byte, 4)\n binary.BigEndian.PutUint32(reply_magic, utils.NBD_REPLY_MAGIC)\n\n defer fmt.Printf(\"End of line\\n\")\n\n for {\n conn, err := listener.Accept()\n utils.ErrorCheck(err)\n\n fmt.Printf(\"We have a new connection from: %s\\n\", conn.RemoteAddr())\n output := bufio.NewWriter(conn)\n\n output.WriteString(\"NBDMAGIC\") \/\/ init password\n output.WriteString(\"IHAVEOPT\") \/\/ Magic\n\n output.Write(defaultOptions)\n\n output.Flush()\n\n \/\/ Fetch the data until we get the initial options\n data := make([]byte, 1024)\n offset := 0\n waiting_for := 16 \/\/ wait for at least the minimum payload size\n\n _, err = io.ReadFull(conn, data[:waiting_for])\n utils.ErrorCheck(err)\n\n options := binary.BigEndian.Uint32(data[:4])\n command := binary.BigEndian.Uint32(data[12:16])\n\n \/\/ If we are requesting an export, make sure we have the length of the data for the export name.\n if binary.BigEndian.Uint32(data[12:]) == utils.NBD_COMMAND_EXPORT_NAME {\n waiting_for += 4\n _, err = io.ReadFull(conn, data[16:20])\n utils.ErrorCheck(err)\n }\n payload_size := int(binary.BigEndian.Uint32(data[16:]))\n\n fmt.Printf(\"command is: %d\\npayload_size is: %d\\n\", command, payload_size)\n offset = waiting_for\n waiting_for += int(payload_size)\n _, err = io.ReadFull(conn, data[offset:waiting_for])\n utils.ErrorCheck(err)\n\n payload := make([]byte, payload_size)\n if payload_size > 0 {\n copy(payload, data[20:])\n }\n\n utils.LogData(\"Payload is:\", payload_size, payload)\n\n \/\/ At this point, we have the command, payload size, and payload.\n switch command {\n case utils.NBD_COMMAND_LIST:\n send_export_list(output, options, globalSettings)\n conn.Close()\n break\n case utils.NBD_COMMAND_EXPORT_NAME:\n go export_name(output, conn, payload_size, payload, options, globalSettings)\n break\n }\n }\n\n}\n<commit_msg>fixed #10 finally allowing users to specify their own value for the buffer limit<commit_after>\/\/ Copyright 2016 Jacob Taylor jacob@ablox.io\n\/\/ License: Apache2 - http:\/\/www.apache.org\/licenses\/LICENSE-2.0\npackage main\n\nimport (\n \"fmt\"\n \"net\"\n \"..\/utils\"\n \"bufio\"\n \"encoding\/binary\"\n \/\/\"time\"\n \"os\"\n \"bytes\"\n \"io\"\n \"io\/ioutil\"\n \"github.com\/urfave\/cli\"\n \"path\/filepath\"\n)\n\nconst nbd_folder = \"\/sample_disks\/\"\n\nvar characters_per_line = 100\nvar newline = 0\nvar line_number = 0\n\n\/\/ settings for the server\ntype Settings struct {\n ReadOnly bool\n AutoFlush bool\n Host string\n Port int\n Listen string\n File string\n Directory string\n BufferLimit int\n}\n\ntype Connection struct {\n File string\n RemoteAddr string\n ReadOnly bool\n}\n\nvar connections = make(map[string][]Connection)\n\n\/*\n Add a new connection to the list of connections for a file. Make sure there is only one writable connection per filename\n returns true if the connection was added correctly. false otherwise\n *\/\nfunc addConnection(filename string, readOnly bool, remoteAddr string) bool {\n currentConnections, ok := connections[filename]\n if ok == false {\n currentConnections = make([]Connection, 4)\n }\n\n \/\/ If this a writable request, check to see if anybody else has a writable connection\n if !readOnly {\n for _, conn := range currentConnections {\n if !conn.ReadOnly {\n fmt.Printf(\"Error, too many writable connections. %s is already connected to %s\\n\", remoteAddr, filename)\n return false\n }\n }\n }\n\n newConnection := Connection{\n File: filename,\n RemoteAddr: remoteAddr,\n ReadOnly: readOnly,\n }\n\n connections[filename] = append(currentConnections, newConnection)\n return true\n}\n\n\n\nvar globalSettings Settings = Settings {\n ReadOnly: false,\n AutoFlush: true,\n Host: \"localhost\",\n Port: 8000,\n Listen: \"\",\n File: \"\",\n Directory: \"sample_disks\",\n BufferLimit: 2048,\n}\n\nfunc send_export_list_item(output *bufio.Writer, options uint32, export_name string) {\n data := make([]byte, 1024)\n length := len(export_name)\n offset := 0\n\n \/\/ length of export name\n binary.BigEndian.PutUint32(data[offset:], uint32(length)) \/\/ length of string\n offset += 4\n\n \/\/ export name\n copy(data[offset:], export_name)\n offset += length\n\n reply_type := uint32(2) \/\/ reply_type: NBD_REP_SERVER\n send_message(output, options, reply_type, uint32(offset), data)\n}\n\nfunc send_ack(output *bufio.Writer, options uint32) {\n send_message(output, options, utils.NBD_COMMAND_ACK, 0, nil)\n}\n\nfunc export_name(output *bufio.Writer, conn net.Conn, payload_size int, payload []byte, options uint32, globalSettings Settings) {\n fmt.Printf(\"have request to bind to: %s\\n\", string(payload[:payload_size]))\n\n defer conn.Close()\n\n \/\/todo add support for file specification\n\n var filename bytes.Buffer\n readOnly := false\n\n var current_directory = globalSettings.Directory\n var err error\n if current_directory == \"\" {\n current_directory, err = os.Getwd()\n utils.ErrorCheck(err)\n }\n filename.WriteString(current_directory)\n filename.WriteString(nbd_folder)\n filename.Write(payload[:payload_size])\n\n fmt.Printf(\"Opening file: %s\\n\", filename.String())\n\n fileMode := os.O_RDWR\n if globalSettings.ReadOnly || (options & utils.NBD_OPT_READ_ONLY != 0) {\n fmt.Printf(\"Read Only is set\\n\")\n fileMode = os.O_RDONLY\n readOnly = true\n }\n\n file, err := os.OpenFile(filename.String(), fileMode, 0644)\n\n utils.ErrorCheck(err)\n if err != nil {\n return\n }\n\n buffer := make([]byte, 256)\n offset := 0\n\n fs, err := file.Stat()\n file_size := uint64(fs.Size())\n\n binary.BigEndian.PutUint64(buffer[offset:], file_size) \/\/ size\n offset += 8\n\n binary.BigEndian.PutUint16(buffer[offset:], 1) \/\/ flags\n offset += 2\n\n \/\/ if requested, pad with 124 zeros\n if (options & utils.NBD_FLAG_NO_ZEROES) != utils.NBD_FLAG_NO_ZEROES {\n offset += 124\n }\n\n _, err = output.Write(buffer[:offset])\n output.Flush()\n utils.ErrorCheck(err)\n\n buffer_limit := globalSettings.BufferLimit*1024 \/\/ set the buffer to 2mb\n\n buffer = make([]byte, buffer_limit)\n conn_reader := bufio.NewReader(conn)\n for {\n waiting_for := 28 \/\/ wait for at least the minimum payload size\n\n _, err := io.ReadFull(conn_reader, buffer[:waiting_for])\n if err == io.EOF {\n fmt.Printf(\"Abort detected, escaping processing loop\\n\")\n break\n }\n utils.ErrorCheck(err)\n\n \/\/magic := binary.BigEndian.Uint32(buffer)\n command := binary.BigEndian.Uint32(buffer[4:8])\n \/\/handle := binary.BigEndian.Uint64(buffer[8:16])\n from := binary.BigEndian.Uint64(buffer[16:24])\n length := binary.BigEndian.Uint32(buffer[24:28])\n\n \/\/ Error out and drop the connection if there is an attempt to read too much\n if length > buffer_limit {\n fmt.Printf(\"E\")\n\n file.Sync()\n return\n }\n\n newline += 1;\n if newline % characters_per_line == 0 {\n line_number++\n fmt.Printf(\"\\n%5d: \", line_number * 100)\n newline -= characters_per_line\n }\n\n switch command {\n case utils.NBD_COMMAND_READ:\n fmt.Printf(\".\")\n\n _, err = file.ReadAt(buffer[16:16+length], int64(from))\n utils.ErrorCheck(err)\n\n binary.BigEndian.PutUint32(buffer[:4], utils.NBD_REPLY_MAGIC)\n binary.BigEndian.PutUint32(buffer[4:8], 0) \/\/ error bits\n\n conn.Write(buffer[:16+length])\n\n continue\n case utils.NBD_COMMAND_WRITE:\n if readOnly {\n fmt.Printf(\"E\")\n fmt.Printf(\"\\nAttempt to write to read only file blocked\\n\")\n\n continue\n }\n\n fmt.Printf(\"W\")\n\n _, err := io.ReadFull(conn_reader, buffer[28:28+length])\n if err == io.EOF {\n fmt.Printf(\"Abort detected, escaping processing loop\\n\")\n break\n }\n utils.ErrorCheck(err)\n\n _, err = file.WriteAt(buffer[28:28+length], int64(from))\n utils.ErrorCheck(err)\n\n if globalSettings.AutoFlush {\n file.Sync()\n }\n\n \/\/ let them know we are done\n binary.BigEndian.PutUint32(buffer[:4], utils.NBD_REPLY_MAGIC)\n binary.BigEndian.PutUint32(buffer[4:8], 0) \/\/ error bits\n\n conn.Write(buffer[:16])\n\n continue\n\n case utils.NBD_COMMAND_DISCONNECT:\n fmt.Printf(\"D\")\n\n file.Sync()\n return\n }\n }\n}\n\n\/*\nFirst check for a specific file. If one is specified, use it. If not, check for a directory. If that is not\navailable, use the CWD.\n *\/\nfunc send_export_list(output *bufio.Writer, options uint32, globalSettings Settings) {\n if globalSettings.File != \"\" {\n _, file := filepath.Split(globalSettings.File)\n\n send_export_list_item(output, options, file)\n send_ack(output, options)\n return\n }\n\n var current_directory string\n var err error\n if globalSettings.Directory == \"\" {\n current_directory, err = os.Getwd()\n utils.ErrorCheck(err)\n }\n\n files, err := ioutil.ReadDir(current_directory + nbd_folder)\n utils.ErrorCheck(err)\n\n for _, file := range files {\n send_export_list_item(output, options, file.Name())\n }\n\n send_ack(output, options)\n}\n\nfunc send_message(output *bufio.Writer, options uint32, reply_type uint32, length uint32, data []byte ) {\n endian := binary.BigEndian\n buffer := make([]byte, 1024)\n offset := 0\n\n endian.PutUint64(buffer[offset:], utils.NBD_SERVER_SEND_REPLY_MAGIC)\n offset += 8\n\n endian.PutUint32(buffer[offset:], options) \/\/ put out the server options\n offset += 4\n\n endian.PutUint32(buffer[offset:], reply_type) \/\/ reply_type: NBD_REP_SERVER\n offset += 4\n\n endian.PutUint32(buffer[offset:], length) \/\/ length of package\n offset += 4\n\n if data != nil {\n copy(buffer[offset:], data[0:length])\n offset += int(length)\n }\n\n data_to_send := buffer[:offset]\n output.Write(data_to_send)\n output.Flush()\n\n utils.LogData(\"Just sent:\", offset, data_to_send)\n}\n\nvar defaultOptions = []byte{0, 0}\n\nfunc main() {\n app := cli.NewApp()\n app.Name = \"AnyBlox\"\n app.Usage = \"block storage for the masses\"\n app.Action = func(c *cli.Context) error {\n fmt.Println(\"Please specify either a full 'listen' parameter (e.g. 'localhost:8000', '192.168.1.2:8000) or a host and port\\n\")\n return nil\n }\n\n app.Flags = []cli.Flag {\n cli.StringFlag{\n Name: \"host\",\n Value: globalSettings.Host,\n Usage: \"Hostname or IP address you want to serve traffic on. e.x. 'localhost', '192.168.1.2'\",\n Destination: &globalSettings.Host,\n },\n cli.IntFlag{\n Name: \"port\",\n Value: globalSettings.Port,\n Usage: \"Port you want to serve traffic on. e.x. '8000'\",\n Destination: &globalSettings.Port,\n },\n cli.StringFlag{\n Name: \"listen, l\",\n Destination: &globalSettings.Listen,\n Usage: \"Address and port the server should listen on. Listen will take priority over host and port parameters. hostname:port - e.x. 'localhost:8000', '192.168.1.2:8000'\",\n },\n cli.StringFlag{\n Name: \"file, f\",\n Destination: &globalSettings.File,\n Value: \"\",\n Usage: \"The file that should be shared by this server. 'file' overrides 'directory'. It is required to be a full absolute path that includes the filename\",\n },\n cli.StringFlag{\n Name: \"directory, d\",\n Destination: &globalSettings.Directory,\n Value: globalSettings.Directory,\n Usage: \"Specify a directory where the files to share are located. Default is 'sample_disks\",\n },\n cli.IntFlag{\n Name: \"buffer\",\n Value: globalSettings.BufferLimit,\n Usage: \"The number of kilobytes in size of the maximum supported read request e.x. '2048'\",\n Destination: &globalSettings.Port,\n },\n }\n\n app.Run(os.Args)\n\n \/\/ Determine where the host should be listening to, depending on the arguments\n fmt.Printf(\"listen (%s) host (%s) port (%d)\\n\", globalSettings.Listen, globalSettings.Host, globalSettings.Port)\n hostingAddress := globalSettings.Listen\n if len(globalSettings.Listen) == 0 {\n if len(globalSettings.Host) == 0 || globalSettings.Port <= 0 {\n panic(\"You need to specify a host and port or specify a listen address (host:port)\\n\")\n }\n var port string\n fmt.Sprint(port, \"%d\", globalSettings.Port)\n hostingAddress = globalSettings.Host + \":\" + port\n }\n\n fmt.Printf(\"About to listen on %s\\n\", hostingAddress)\n listener, err := net.Listen(\"tcp\", hostingAddress)\n utils.ErrorCheck(err)\n\n fmt.Printf(\"aBlox server online\\n\")\n\n reply_magic := make([]byte, 4)\n binary.BigEndian.PutUint32(reply_magic, utils.NBD_REPLY_MAGIC)\n\n defer fmt.Printf(\"End of line\\n\")\n\n for {\n conn, err := listener.Accept()\n utils.ErrorCheck(err)\n\n fmt.Printf(\"We have a new connection from: %s\\n\", conn.RemoteAddr())\n output := bufio.NewWriter(conn)\n\n output.WriteString(\"NBDMAGIC\") \/\/ init password\n output.WriteString(\"IHAVEOPT\") \/\/ Magic\n\n output.Write(defaultOptions)\n\n output.Flush()\n\n \/\/ Fetch the data until we get the initial options\n data := make([]byte, 1024)\n offset := 0\n waiting_for := 16 \/\/ wait for at least the minimum payload size\n\n _, err = io.ReadFull(conn, data[:waiting_for])\n utils.ErrorCheck(err)\n\n options := binary.BigEndian.Uint32(data[:4])\n command := binary.BigEndian.Uint32(data[12:16])\n\n \/\/ If we are requesting an export, make sure we have the length of the data for the export name.\n if binary.BigEndian.Uint32(data[12:]) == utils.NBD_COMMAND_EXPORT_NAME {\n waiting_for += 4\n _, err = io.ReadFull(conn, data[16:20])\n utils.ErrorCheck(err)\n }\n payload_size := int(binary.BigEndian.Uint32(data[16:]))\n\n fmt.Printf(\"command is: %d\\npayload_size is: %d\\n\", command, payload_size)\n offset = waiting_for\n waiting_for += int(payload_size)\n _, err = io.ReadFull(conn, data[offset:waiting_for])\n utils.ErrorCheck(err)\n\n payload := make([]byte, payload_size)\n if payload_size > 0 {\n copy(payload, data[20:])\n }\n\n utils.LogData(\"Payload is:\", payload_size, payload)\n\n \/\/ At this point, we have the command, payload size, and payload.\n switch command {\n case utils.NBD_COMMAND_LIST:\n send_export_list(output, options, globalSettings)\n conn.Close()\n break\n case utils.NBD_COMMAND_EXPORT_NAME:\n go export_name(output, conn, payload_size, payload, options, globalSettings)\n break\n }\n }\n\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Adding ShortAlias<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Pilosa Corp.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package server contains the `pilosa server` subcommand which runs Pilosa\n\/\/ itself. The purpose of this package is to define an easily tested Command\n\/\/ object which handles interpreting configuration and setting up all the\n\/\/ objects that Pilosa needs.\n\npackage server\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strconv\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"crypto\/tls\"\n\n\t\"github.com\/pilosa\/pilosa\"\n\t\"github.com\/pilosa\/pilosa\/boltdb\"\n\t\"github.com\/pilosa\/pilosa\/encoding\/proto\"\n\t\"github.com\/pilosa\/pilosa\/gcnotify\"\n\t\"github.com\/pilosa\/pilosa\/gopsutil\"\n\t\"github.com\/pilosa\/pilosa\/gossip\"\n\t\"github.com\/pilosa\/pilosa\/http\"\n\t\"github.com\/pilosa\/pilosa\/statsd\"\n\t\"github.com\/pkg\/errors\"\n)\n\nfunc init() {\n\trand.Seed(time.Now().UTC().UnixNano())\n}\n\ntype loggerLogger interface {\n\tpilosa.Logger\n\tLogger() *log.Logger\n}\n\n\/\/ Command represents the state of the pilosa server command.\ntype Command struct {\n\tServer *pilosa.Server\n\n\t\/\/ Configuration.\n\tConfig *Config\n\n\t\/\/ Gossip transport\n\tgossipTransport *gossip.Transport\n\n\t\/\/ Standard input\/output\n\t*pilosa.CmdIO\n\n\t\/\/ Started will be closed once Command.Start is finished.\n\tStarted chan struct{}\n\t\/\/ done will be closed when Command.Close() is called\n\tdone chan struct{}\n\n\t\/\/ Passed to the Gossip implementation.\n\tlogOutput io.Writer\n\tlogger loggerLogger\n\n\tHandler pilosa.Handler\n\tAPI *pilosa.API\n\tln net.Listener\n\n\tserverOptions []pilosa.ServerOption\n}\n\ntype CommandOption func(c *Command) error\n\nfunc OptCommandServerOptions(opts ...pilosa.ServerOption) CommandOption {\n\treturn func(c *Command) error {\n\t\tc.serverOptions = append(c.serverOptions, opts...)\n\t\treturn nil\n\t}\n}\n\n\/\/ NewCommand returns a new instance of Main.\nfunc NewCommand(stdin io.Reader, stdout, stderr io.Writer, opts ...CommandOption) *Command {\n\tc := &Command{\n\t\tConfig: NewConfig(),\n\n\t\tCmdIO: pilosa.NewCmdIO(stdin, stdout, stderr),\n\n\t\tStarted: make(chan struct{}),\n\t\tdone: make(chan struct{}),\n\t}\n\n\tfor _, opt := range opts {\n\t\terr := opt(c)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t\t\/\/ TODO: Return error instead of panic?\n\t\t}\n\t}\n\n\treturn c\n}\n\n\/\/ Start starts the pilosa server - it returns once the server is running.\nfunc (m *Command) Start() (err error) {\n\tdefer close(m.Started)\n\n\t\/\/ SetupServer\n\terr = m.SetupServer()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"setting up server\")\n\t}\n\n\t\/\/ SetupNetworking\n\terr = m.setupNetworking()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"setting up networking\")\n\t}\n\tgo func() {\n\t\terr := m.Handler.Serve()\n\t\tif err != nil {\n\t\t\tm.logger.Printf(\"Handler serve error: %v\", err)\n\t\t}\n\t}()\n\n\t\/\/ Initialize server.\n\tif err = m.Server.Open(); err != nil {\n\t\treturn errors.Wrap(err, \"opening server\")\n\t}\n\n\tm.logger.Printf(\"Listening as %s\\n\", m.API.Node().URI)\n\n\treturn nil\n}\n\n\/\/ Wait waits for the server to be closed or interrupted.\nfunc (m *Command) Wait() error {\n\t\/\/ First SIGKILL causes server to shut down gracefully.\n\tc := make(chan os.Signal, 2)\n\tsignal.Notify(c, os.Interrupt, syscall.SIGTERM)\n\tselect {\n\tcase sig := <-c:\n\t\tm.logger.Printf(\"Received %s; gracefully shutting down...\\n\", sig.String())\n\n\t\t\/\/ Second signal causes a hard shutdown.\n\t\tgo func() { <-c; os.Exit(1) }()\n\t\treturn errors.Wrap(m.Close(), \"closing command\")\n\tcase <-m.done:\n\t\tm.logger.Printf(\"Server closed externally\")\n\t\treturn nil\n\t}\n}\n\n\/\/ setupLogger sets up the logger based on the configuration.\nfunc (m *Command) setupLogger() error {\n\tvar err error\n\tif m.Config.LogPath == \"\" {\n\t\tm.logOutput = m.Stderr\n\t} else {\n\t\tm.logOutput, err = os.OpenFile(m.Config.LogPath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"opening file\")\n\t\t}\n\t}\n\n\tif m.Config.Verbose {\n\t\tm.logger = pilosa.NewVerboseLogger(m.logOutput)\n\t} else {\n\t\tm.logger = pilosa.NewStandardLogger(m.logOutput)\n\t}\n\treturn nil\n}\n\n\/\/ SetupServer uses the cluster configuration to set up this server.\nfunc (m *Command) SetupServer() error {\n\terr := m.setupLogger()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"setting up logger\")\n\t}\n\n\tproductName := \"Pilosa\"\n\tif pilosa.EnterpriseEnabled {\n\t\tproductName += \" Enterprise\"\n\t}\n\tm.logger.Printf(\"%s %s, build time %s\\n\", productName, pilosa.Version, pilosa.BuildTime)\n\n\turi, err := pilosa.AddressWithDefaults(m.Config.Bind)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"processing bind address\")\n\t}\n\n\t\/\/ Setup TLS\n\tvar TLSConfig *tls.Config\n\tif uri.Scheme == \"https\" {\n\t\tif m.Config.TLS.CertificatePath == \"\" {\n\t\t\treturn errors.New(\"certificate path is required for TLS sockets\")\n\t\t}\n\t\tif m.Config.TLS.CertificateKeyPath == \"\" {\n\t\t\treturn errors.New(\"certificate key path is required for TLS sockets\")\n\t\t}\n\t\tcert, err := tls.LoadX509KeyPair(m.Config.TLS.CertificatePath, m.Config.TLS.CertificateKeyPath)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"load x509 key pair\")\n\t\t}\n\t\tTLSConfig = &tls.Config{\n\t\t\tCertificates: []tls.Certificate{cert},\n\t\t\tInsecureSkipVerify: m.Config.TLS.SkipVerify,\n\t\t}\n\t}\n\n\tdiagnosticsInterval := time.Duration(0)\n\tif m.Config.Metric.Diagnostics {\n\t\tdiagnosticsInterval = time.Duration(defaultDiagnosticsInterval)\n\t}\n\n\tstatsClient, err := NewStatsClient(m.Config.Metric.Service, m.Config.Metric.Host)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"new stats client\")\n\t}\n\n\tm.ln, err = getListener(*uri, TLSConfig)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getting listener\")\n\t}\n\n\t\/\/ If port is 0, get auto-allocated port from listener\n\tif uri.Port == 0 {\n\t\turi.SetPort(uint16(m.ln.Addr().(*net.TCPAddr).Port))\n\t}\n\n\tc := http.GetHTTPClient(TLSConfig)\n\n\t\/\/ Setup connection to primary store if this is a replica.\n\tvar primaryTranslateStore pilosa.TranslateStore\n\tif m.Config.Translation.PrimaryURL != \"\" {\n\t\tprimaryTranslateStore = http.NewTranslateStore(m.Config.Translation.PrimaryURL)\n\t}\n\n\t\/\/ Set Coordinator.\n\tcoordinatorOpt := pilosa.OptServerIsCoordinator(false)\n\tif m.Config.Cluster.Coordinator || len(m.Config.Gossip.Seeds) == 0 {\n\t\tcoordinatorOpt = pilosa.OptServerIsCoordinator(true)\n\t}\n\n\tserverOptions := []pilosa.ServerOption{\n\t\tpilosa.OptServerAntiEntropyInterval(time.Duration(m.Config.AntiEntropy.Interval)),\n\t\tpilosa.OptServerLongQueryTime(time.Duration(m.Config.Cluster.LongQueryTime)),\n\t\tpilosa.OptServerDataDir(m.Config.DataDir),\n\t\tpilosa.OptServerReplicaN(m.Config.Cluster.ReplicaN),\n\t\tpilosa.OptServerMaxWritesPerRequest(m.Config.MaxWritesPerRequest),\n\t\tpilosa.OptServerMetricInterval(time.Duration(m.Config.Metric.PollInterval)),\n\t\tpilosa.OptServerDiagnosticsInterval(diagnosticsInterval),\n\n\t\tpilosa.OptServerLogger(m.logger),\n\t\tpilosa.OptServerAttrStoreFunc(boltdb.NewAttrStore),\n\t\tpilosa.OptServerSystemInfo(gopsutil.NewSystemInfo()),\n\t\tpilosa.OptServerGCNotifier(gcnotify.NewActiveGCNotifier()),\n\t\tpilosa.OptServerStatsClient(statsClient),\n\t\tpilosa.OptServerURI(uri),\n\t\tpilosa.OptServerInternalClient(http.NewInternalClientFromURI(uri, c)),\n\t\tpilosa.OptServerPrimaryTranslateStore(primaryTranslateStore),\n\t\tpilosa.OptServerClusterDisabled(m.Config.Cluster.Disabled, m.Config.Cluster.Hosts),\n\t\tpilosa.OptServerSerializer(proto.Serializer{}),\n\t\tcoordinatorOpt,\n\t}\n\n\tserverOptions = append(serverOptions, m.serverOptions...)\n\n\tm.Server, err = pilosa.NewServer(serverOptions...)\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"new server\")\n\t}\n\n\tm.API, err = pilosa.NewAPI(pilosa.OptAPIServer(m.Server))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"new api\")\n\t}\n\n\tm.Handler, err = http.NewHandler(\n\t\thttp.OptHandlerAllowedOrigins(m.Config.Handler.AllowedOrigins),\n\t\thttp.OptHandlerAPI(m.API),\n\t\thttp.OptHandlerLogger(m.logger),\n\t\thttp.OptHandlerListener(m.ln),\n\t)\n\treturn errors.Wrap(err, \"new handler\")\n\n}\n\n\/\/ setupNetworking sets up internode communication based on the configuration.\nfunc (m *Command) setupNetworking() error {\n\tif m.Config.Cluster.Disabled {\n\t\treturn nil\n\t}\n\n\tgossipPort, err := strconv.Atoi(m.Config.Gossip.Port)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"parsing port\")\n\t}\n\n\t\/\/ get the host portion of addr to use for binding\n\tgossipHost := m.API.Node().URI.Host\n\tm.gossipTransport, err = gossip.NewTransport(gossipHost, gossipPort, m.logger.Logger())\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getting transport\")\n\t}\n\n\tgossipMemberSet, err := gossip.NewGossipMemberSet(\n\t\tm.Config.Gossip,\n\t\tm.API,\n\t\tgossip.WithLogger(m.logger.Logger()),\n\t\tgossip.WithTransport(m.gossipTransport),\n\t)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getting memberset\")\n\t}\n\treturn errors.Wrap(gossipMemberSet.Open(), \"opening gossip memberset\")\n}\n\n\/\/ GossipTransport allows a caller to return the gossip transport created when\n\/\/ setting up the GossipMemberSet. This is useful if one needs to determine the\n\/\/ allocated ephemeral port programmatically. (usually used in tests)\nfunc (m *Command) GossipTransport() *gossip.Transport {\n\treturn m.gossipTransport\n}\n\n\/\/ Close shuts down the server.\nfunc (m *Command) Close() error {\n\tvar logErr error\n\thandlerErr := m.Handler.Close()\n\tserveErr := m.Server.Close()\n\tif closer, ok := m.logOutput.(io.Closer); ok {\n\t\tlogErr = closer.Close()\n\t}\n\tclose(m.done)\n\tif serveErr != nil || logErr != nil || handlerErr != nil {\n\t\treturn fmt.Errorf(\"closing server: '%v', closing logs: '%v', closing handler: '%v'\", serveErr, logErr, handlerErr)\n\t}\n\treturn nil\n}\n\n\/\/ NewStatsClient creates a stats client from the config\nfunc NewStatsClient(name string, host string) (pilosa.StatsClient, error) {\n\tswitch name {\n\tcase \"expvar\":\n\t\treturn pilosa.NewExpvarStatsClient(), nil\n\tcase \"statsd\":\n\t\treturn statsd.NewStatsClient(host)\n\tcase \"nop\", \"none\":\n\t\treturn pilosa.NopStatsClient, nil\n\tdefault:\n\t\treturn nil, errors.Errorf(\"'%v' not a valid stats client, choose from [expvar, statsd, none].\", name)\n\t}\n}\n\n\/\/ getListener gets a net.Listener based on the config.\nfunc getListener(uri pilosa.URI, tlsconf *tls.Config) (ln net.Listener, err error) {\n\t\/\/ If bind URI has the https scheme, enable TLS\n\tif uri.Scheme == \"https\" && tlsconf != nil {\n\t\tln, err = tls.Listen(\"tcp\", uri.HostPort(), tlsconf)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"tls.Listener\")\n\t\t}\n\t} else if uri.Scheme == \"http\" {\n\t\t\/\/ Open HTTP listener to determine port (if specified as :0).\n\t\tln, err = net.Listen(\"tcp\", uri.HostPort())\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"net.Listen\")\n\t\t}\n\t} else {\n\t\treturn nil, errors.Errorf(\"unsupported scheme: %s\", uri.Scheme)\n\t}\n\n\treturn ln, nil\n}\n<commit_msg>Unexport server.NewStatsClient<commit_after>\/\/ Copyright 2017 Pilosa Corp.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package server contains the `pilosa server` subcommand which runs Pilosa\n\/\/ itself. The purpose of this package is to define an easily tested Command\n\/\/ object which handles interpreting configuration and setting up all the\n\/\/ objects that Pilosa needs.\n\npackage server\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strconv\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"crypto\/tls\"\n\n\t\"github.com\/pilosa\/pilosa\"\n\t\"github.com\/pilosa\/pilosa\/boltdb\"\n\t\"github.com\/pilosa\/pilosa\/encoding\/proto\"\n\t\"github.com\/pilosa\/pilosa\/gcnotify\"\n\t\"github.com\/pilosa\/pilosa\/gopsutil\"\n\t\"github.com\/pilosa\/pilosa\/gossip\"\n\t\"github.com\/pilosa\/pilosa\/http\"\n\t\"github.com\/pilosa\/pilosa\/statsd\"\n\t\"github.com\/pkg\/errors\"\n)\n\nfunc init() {\n\trand.Seed(time.Now().UTC().UnixNano())\n}\n\ntype loggerLogger interface {\n\tpilosa.Logger\n\tLogger() *log.Logger\n}\n\n\/\/ Command represents the state of the pilosa server command.\ntype Command struct {\n\tServer *pilosa.Server\n\n\t\/\/ Configuration.\n\tConfig *Config\n\n\t\/\/ Gossip transport\n\tgossipTransport *gossip.Transport\n\n\t\/\/ Standard input\/output\n\t*pilosa.CmdIO\n\n\t\/\/ Started will be closed once Command.Start is finished.\n\tStarted chan struct{}\n\t\/\/ done will be closed when Command.Close() is called\n\tdone chan struct{}\n\n\t\/\/ Passed to the Gossip implementation.\n\tlogOutput io.Writer\n\tlogger loggerLogger\n\n\tHandler pilosa.Handler\n\tAPI *pilosa.API\n\tln net.Listener\n\n\tserverOptions []pilosa.ServerOption\n}\n\ntype CommandOption func(c *Command) error\n\nfunc OptCommandServerOptions(opts ...pilosa.ServerOption) CommandOption {\n\treturn func(c *Command) error {\n\t\tc.serverOptions = append(c.serverOptions, opts...)\n\t\treturn nil\n\t}\n}\n\n\/\/ NewCommand returns a new instance of Main.\nfunc NewCommand(stdin io.Reader, stdout, stderr io.Writer, opts ...CommandOption) *Command {\n\tc := &Command{\n\t\tConfig: NewConfig(),\n\n\t\tCmdIO: pilosa.NewCmdIO(stdin, stdout, stderr),\n\n\t\tStarted: make(chan struct{}),\n\t\tdone: make(chan struct{}),\n\t}\n\n\tfor _, opt := range opts {\n\t\terr := opt(c)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t\t\/\/ TODO: Return error instead of panic?\n\t\t}\n\t}\n\n\treturn c\n}\n\n\/\/ Start starts the pilosa server - it returns once the server is running.\nfunc (m *Command) Start() (err error) {\n\tdefer close(m.Started)\n\n\t\/\/ SetupServer\n\terr = m.SetupServer()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"setting up server\")\n\t}\n\n\t\/\/ SetupNetworking\n\terr = m.setupNetworking()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"setting up networking\")\n\t}\n\tgo func() {\n\t\terr := m.Handler.Serve()\n\t\tif err != nil {\n\t\t\tm.logger.Printf(\"Handler serve error: %v\", err)\n\t\t}\n\t}()\n\n\t\/\/ Initialize server.\n\tif err = m.Server.Open(); err != nil {\n\t\treturn errors.Wrap(err, \"opening server\")\n\t}\n\n\tm.logger.Printf(\"Listening as %s\\n\", m.API.Node().URI)\n\n\treturn nil\n}\n\n\/\/ Wait waits for the server to be closed or interrupted.\nfunc (m *Command) Wait() error {\n\t\/\/ First SIGKILL causes server to shut down gracefully.\n\tc := make(chan os.Signal, 2)\n\tsignal.Notify(c, os.Interrupt, syscall.SIGTERM)\n\tselect {\n\tcase sig := <-c:\n\t\tm.logger.Printf(\"Received %s; gracefully shutting down...\\n\", sig.String())\n\n\t\t\/\/ Second signal causes a hard shutdown.\n\t\tgo func() { <-c; os.Exit(1) }()\n\t\treturn errors.Wrap(m.Close(), \"closing command\")\n\tcase <-m.done:\n\t\tm.logger.Printf(\"Server closed externally\")\n\t\treturn nil\n\t}\n}\n\n\/\/ setupLogger sets up the logger based on the configuration.\nfunc (m *Command) setupLogger() error {\n\tvar err error\n\tif m.Config.LogPath == \"\" {\n\t\tm.logOutput = m.Stderr\n\t} else {\n\t\tm.logOutput, err = os.OpenFile(m.Config.LogPath, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"opening file\")\n\t\t}\n\t}\n\n\tif m.Config.Verbose {\n\t\tm.logger = pilosa.NewVerboseLogger(m.logOutput)\n\t} else {\n\t\tm.logger = pilosa.NewStandardLogger(m.logOutput)\n\t}\n\treturn nil\n}\n\n\/\/ SetupServer uses the cluster configuration to set up this server.\nfunc (m *Command) SetupServer() error {\n\terr := m.setupLogger()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"setting up logger\")\n\t}\n\n\tproductName := \"Pilosa\"\n\tif pilosa.EnterpriseEnabled {\n\t\tproductName += \" Enterprise\"\n\t}\n\tm.logger.Printf(\"%s %s, build time %s\\n\", productName, pilosa.Version, pilosa.BuildTime)\n\n\turi, err := pilosa.AddressWithDefaults(m.Config.Bind)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"processing bind address\")\n\t}\n\n\t\/\/ Setup TLS\n\tvar TLSConfig *tls.Config\n\tif uri.Scheme == \"https\" {\n\t\tif m.Config.TLS.CertificatePath == \"\" {\n\t\t\treturn errors.New(\"certificate path is required for TLS sockets\")\n\t\t}\n\t\tif m.Config.TLS.CertificateKeyPath == \"\" {\n\t\t\treturn errors.New(\"certificate key path is required for TLS sockets\")\n\t\t}\n\t\tcert, err := tls.LoadX509KeyPair(m.Config.TLS.CertificatePath, m.Config.TLS.CertificateKeyPath)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"load x509 key pair\")\n\t\t}\n\t\tTLSConfig = &tls.Config{\n\t\t\tCertificates: []tls.Certificate{cert},\n\t\t\tInsecureSkipVerify: m.Config.TLS.SkipVerify,\n\t\t}\n\t}\n\n\tdiagnosticsInterval := time.Duration(0)\n\tif m.Config.Metric.Diagnostics {\n\t\tdiagnosticsInterval = time.Duration(defaultDiagnosticsInterval)\n\t}\n\n\tstatsClient, err := newStatsClient(m.Config.Metric.Service, m.Config.Metric.Host)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"new stats client\")\n\t}\n\n\tm.ln, err = getListener(*uri, TLSConfig)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getting listener\")\n\t}\n\n\t\/\/ If port is 0, get auto-allocated port from listener\n\tif uri.Port == 0 {\n\t\turi.SetPort(uint16(m.ln.Addr().(*net.TCPAddr).Port))\n\t}\n\n\tc := http.GetHTTPClient(TLSConfig)\n\n\t\/\/ Setup connection to primary store if this is a replica.\n\tvar primaryTranslateStore pilosa.TranslateStore\n\tif m.Config.Translation.PrimaryURL != \"\" {\n\t\tprimaryTranslateStore = http.NewTranslateStore(m.Config.Translation.PrimaryURL)\n\t}\n\n\t\/\/ Set Coordinator.\n\tcoordinatorOpt := pilosa.OptServerIsCoordinator(false)\n\tif m.Config.Cluster.Coordinator || len(m.Config.Gossip.Seeds) == 0 {\n\t\tcoordinatorOpt = pilosa.OptServerIsCoordinator(true)\n\t}\n\n\tserverOptions := []pilosa.ServerOption{\n\t\tpilosa.OptServerAntiEntropyInterval(time.Duration(m.Config.AntiEntropy.Interval)),\n\t\tpilosa.OptServerLongQueryTime(time.Duration(m.Config.Cluster.LongQueryTime)),\n\t\tpilosa.OptServerDataDir(m.Config.DataDir),\n\t\tpilosa.OptServerReplicaN(m.Config.Cluster.ReplicaN),\n\t\tpilosa.OptServerMaxWritesPerRequest(m.Config.MaxWritesPerRequest),\n\t\tpilosa.OptServerMetricInterval(time.Duration(m.Config.Metric.PollInterval)),\n\t\tpilosa.OptServerDiagnosticsInterval(diagnosticsInterval),\n\n\t\tpilosa.OptServerLogger(m.logger),\n\t\tpilosa.OptServerAttrStoreFunc(boltdb.NewAttrStore),\n\t\tpilosa.OptServerSystemInfo(gopsutil.NewSystemInfo()),\n\t\tpilosa.OptServerGCNotifier(gcnotify.NewActiveGCNotifier()),\n\t\tpilosa.OptServerStatsClient(statsClient),\n\t\tpilosa.OptServerURI(uri),\n\t\tpilosa.OptServerInternalClient(http.NewInternalClientFromURI(uri, c)),\n\t\tpilosa.OptServerPrimaryTranslateStore(primaryTranslateStore),\n\t\tpilosa.OptServerClusterDisabled(m.Config.Cluster.Disabled, m.Config.Cluster.Hosts),\n\t\tpilosa.OptServerSerializer(proto.Serializer{}),\n\t\tcoordinatorOpt,\n\t}\n\n\tserverOptions = append(serverOptions, m.serverOptions...)\n\n\tm.Server, err = pilosa.NewServer(serverOptions...)\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"new server\")\n\t}\n\n\tm.API, err = pilosa.NewAPI(pilosa.OptAPIServer(m.Server))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"new api\")\n\t}\n\n\tm.Handler, err = http.NewHandler(\n\t\thttp.OptHandlerAllowedOrigins(m.Config.Handler.AllowedOrigins),\n\t\thttp.OptHandlerAPI(m.API),\n\t\thttp.OptHandlerLogger(m.logger),\n\t\thttp.OptHandlerListener(m.ln),\n\t)\n\treturn errors.Wrap(err, \"new handler\")\n\n}\n\n\/\/ setupNetworking sets up internode communication based on the configuration.\nfunc (m *Command) setupNetworking() error {\n\tif m.Config.Cluster.Disabled {\n\t\treturn nil\n\t}\n\n\tgossipPort, err := strconv.Atoi(m.Config.Gossip.Port)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"parsing port\")\n\t}\n\n\t\/\/ get the host portion of addr to use for binding\n\tgossipHost := m.API.Node().URI.Host\n\tm.gossipTransport, err = gossip.NewTransport(gossipHost, gossipPort, m.logger.Logger())\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getting transport\")\n\t}\n\n\tgossipMemberSet, err := gossip.NewGossipMemberSet(\n\t\tm.Config.Gossip,\n\t\tm.API,\n\t\tgossip.WithLogger(m.logger.Logger()),\n\t\tgossip.WithTransport(m.gossipTransport),\n\t)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getting memberset\")\n\t}\n\treturn errors.Wrap(gossipMemberSet.Open(), \"opening gossip memberset\")\n}\n\n\/\/ GossipTransport allows a caller to return the gossip transport created when\n\/\/ setting up the GossipMemberSet. This is useful if one needs to determine the\n\/\/ allocated ephemeral port programmatically. (usually used in tests)\nfunc (m *Command) GossipTransport() *gossip.Transport {\n\treturn m.gossipTransport\n}\n\n\/\/ Close shuts down the server.\nfunc (m *Command) Close() error {\n\tvar logErr error\n\thandlerErr := m.Handler.Close()\n\tserveErr := m.Server.Close()\n\tif closer, ok := m.logOutput.(io.Closer); ok {\n\t\tlogErr = closer.Close()\n\t}\n\tclose(m.done)\n\tif serveErr != nil || logErr != nil || handlerErr != nil {\n\t\treturn fmt.Errorf(\"closing server: '%v', closing logs: '%v', closing handler: '%v'\", serveErr, logErr, handlerErr)\n\t}\n\treturn nil\n}\n\n\/\/ newStatsClient creates a stats client from the config\nfunc newStatsClient(name string, host string) (pilosa.StatsClient, error) {\n\tswitch name {\n\tcase \"expvar\":\n\t\treturn pilosa.NewExpvarStatsClient(), nil\n\tcase \"statsd\":\n\t\treturn statsd.NewStatsClient(host)\n\tcase \"nop\", \"none\":\n\t\treturn pilosa.NopStatsClient, nil\n\tdefault:\n\t\treturn nil, errors.Errorf(\"'%v' not a valid stats client, choose from [expvar, statsd, none].\", name)\n\t}\n}\n\n\/\/ getListener gets a net.Listener based on the config.\nfunc getListener(uri pilosa.URI, tlsconf *tls.Config) (ln net.Listener, err error) {\n\t\/\/ If bind URI has the https scheme, enable TLS\n\tif uri.Scheme == \"https\" && tlsconf != nil {\n\t\tln, err = tls.Listen(\"tcp\", uri.HostPort(), tlsconf)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"tls.Listener\")\n\t\t}\n\t} else if uri.Scheme == \"http\" {\n\t\t\/\/ Open HTTP listener to determine port (if specified as :0).\n\t\tln, err = net.Listen(\"tcp\", uri.HostPort())\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"net.Listen\")\n\t\t}\n\t} else {\n\t\treturn nil, errors.Errorf(\"unsupported scheme: %s\", uri.Scheme)\n\t}\n\n\treturn ln, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package finalize\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"bytes\"\n\n\t\"github.com\/cloudfoundry\/libbuildpack\"\n)\n\ntype Staticfile struct {\n\tRootDir string `yaml:\"root\"`\n\tHostDotFiles bool `yaml:\"host_dot_files\"`\n\tLocationInclude string `yaml:\"location_include\"`\n\tDirectoryIndex bool `yaml:\"directory\"`\n\tSSI bool `yaml:\"ssi\"`\n\tPushState bool `yaml:\"pushstate\"`\n\tHSTS bool `yaml:\"http_strict_transport_security\"`\n\tHSTSIncludeSubDomains bool `yaml:\"http_strict_transport_security_include_subdomains\"`\n\tHSTSPreload bool `yaml:\"http_strict_transport_security_preload\"`\n\tForceHTTPS bool `yaml:\"force_https\"`\n\tBasicAuth bool\n\tStatusCodes map[string]string `yaml:\"status_codes\"`\n}\n\ntype YAML interface {\n\tLoad(string, interface{}) error\n}\n\ntype Finalizer struct {\n\tBuildDir string\n\tDepDir string\n\tLog *libbuildpack.Logger\n\tConfig Staticfile\n\tYAML YAML\n}\ntype StaticfileTemp struct {\n\tRootDir string `yaml:\"root,omitempty\"`\n\tHostDotFiles string `yaml:\"host_dot_files,omitempty\"`\n\tLocationInclude string `yaml:\"location_include\"`\n\tDirectoryIndex string `yaml:\"directory\"`\n\tSSI string `yaml:\"ssi\"`\n\tPushState string `yaml:\"pushstate\"`\n\tHSTS string `yaml:\"http_strict_transport_security\"`\n\tHSTSIncludeSubDomains string `yaml:\"http_strict_transport_security_include_subdomains\"`\n\tHSTSPreload string `yaml:\"http_strict_transport_security_preload\"`\n\tForceHTTPS string `yaml:\"force_https\"`\n\tStatusCodes map[string]string `yaml:\"status_codes\"`\n}\n\nvar skipCopyFile = map[string]bool{\n\t\"Staticfile\": true,\n\t\"Staticfile.auth\": true,\n\t\"manifest.yml\": true,\n\t\".profile\": true,\n\t\".profile.d\": true,\n\t\"stackato.yml\": true,\n\t\".cloudfoundry\": true,\n\t\"nginx\": true,\n}\n\nfunc Run(sf *Finalizer) error {\n\tvar err error\n\n\terr = sf.LoadStaticfile()\n\tif err != nil {\n\t\tsf.Log.Error(\"Unable to load Staticfile: %s\", err.Error())\n\t\treturn err\n\t}\n\n\tappRootDir, err := sf.GetAppRootDir()\n\tif err != nil {\n\t\tsf.Log.Error(\"Invalid root directory: %s\", err.Error())\n\t\treturn err\n\t}\n\n\tsf.Warnings()\n\n\terr = sf.CopyFilesToPublic(appRootDir)\n\tif err != nil {\n\t\tsf.Log.Error(\"Unable to copy project files: %s\", err.Error())\n\t\treturn err\n\t}\n\n\terr = sf.ConfigureNginx()\n\tif err != nil {\n\t\tsf.Log.Error(\"Unable to configure nginx: %s\", err.Error())\n\t\treturn err\n\t}\n\n\terr = sf.WriteStartupFiles()\n\tif err != nil {\n\t\tsf.Log.Error(\"Unable to write startup file: %s\", err.Error())\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (sf *Finalizer) WriteStartupFiles() error {\n\tprofiledDir := filepath.Join(sf.DepDir, \"profile.d\")\n\terr := os.MkdirAll(profiledDir, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(filepath.Join(profiledDir, \"staticfile.sh\"), []byte(initScript), 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(filepath.Join(sf.BuildDir, \"start_logging.sh\"), []byte(startLoggingScript), 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbootScript := filepath.Join(sf.BuildDir, \"boot.sh\")\n\treturn ioutil.WriteFile(bootScript, []byte(startCommand), 0755)\n}\n\nfunc (sf *Finalizer) LoadStaticfile() error {\n\tvar hash StaticfileTemp\n\tconf := &sf.Config\n\n\terr := sf.YAML.Load(filepath.Join(sf.BuildDir, \"Staticfile\"), &hash)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\n\tisEnabled := func(value string) bool {\n\t\treturn (value == \"enabled\" || value == \"true\")\n\t}\n\n\tif hash.RootDir != \"\" {\n\t\tconf.RootDir = hash.RootDir\n\t}\n\n\tif isEnabled(hash.HostDotFiles) {\n\t\tsf.Log.BeginStep(\"Enabling hosting of dotfiles\")\n\t\tconf.HostDotFiles = true\n\t}\n\n\tconf.LocationInclude = hash.LocationInclude\n\tif conf.LocationInclude != \"\" {\n\t\tsf.Log.BeginStep(\"Enabling location include file %s\", conf.LocationInclude)\n\t}\n\n\tif hash.DirectoryIndex != \"\" {\n\t\tsf.Log.BeginStep(\"Enabling directory index for folders without index.html files\")\n\t\tconf.DirectoryIndex = true\n\t}\n\n\tif isEnabled(hash.SSI) {\n\t\tsf.Log.BeginStep(\"Enabling SSI\")\n\t\tconf.SSI = true\n\t}\n\n\tif isEnabled(hash.PushState) {\n\t\tsf.Log.BeginStep(\"Enabling pushstate\")\n\t\tconf.PushState = true\n\t}\n\n\tif isEnabled(hash.HSTS) {\n\t\tsf.Log.BeginStep(\"Enabling HSTS\")\n\t\tconf.HSTS = true\n\t}\n\tif isEnabled(hash.HSTSIncludeSubDomains) {\n\t\tsf.Log.BeginStep(\"Enabling HSTS includeSubDomains\")\n\t\tconf.HSTSIncludeSubDomains = true\n\t}\n\tif isEnabled(hash.HSTSPreload) {\n\t\tsf.Log.BeginStep(\"Enabling HSTS Preload\")\n\t\tconf.HSTSPreload = true\n\t}\n\tif isEnabled(hash.ForceHTTPS) {\n\t\tsf.Log.BeginStep(\"Enabling HTTPS redirect\")\n\t\tconf.ForceHTTPS = true\n\t}\n\tif len(hash.StatusCodes) > 0 {\n\t\tsf.Log.BeginStep(\"Enabling custom pages for status_codes\")\n\t\tconf.StatusCodes = sf.getStatusCodes(hash.StatusCodes)\n\t}\n\n\tif !conf.HSTS && (conf.HSTSIncludeSubDomains || conf.HSTSPreload) {\n\t\tsf.Log.Warning(\"http_strict_transport_security is not enabled while http_strict_transport_security_include_subdomains or http_strict_transport_security_preload have been enabled.\")\n\t\tsf.Log.Protip(\"http_strict_transport_security_include_subdomains and http_strict_transport_security_preload do nothing without http_strict_transport_security enabled.\", \"https:\/\/docs.cloudfoundry.org\/buildpacks\/staticfile\/index.html#strict-security\")\n\t}\n\n\tauthFile := filepath.Join(sf.BuildDir, \"Staticfile.auth\")\n\t_, err = os.Stat(authFile)\n\tif err == nil {\n\t\tconf.BasicAuth = true\n\t\tsf.Log.BeginStep(\"Enabling basic authentication using Staticfile.auth\")\n\t\tsf.Log.Protip(\"Learn about basic authentication\", \"https:\/\/docs.cloudfoundry.org\/buildpacks\/staticfile\/index.html#authentication\")\n\t}\n\n\treturn nil\n}\n\nfunc (sf *Finalizer) getStatusCodes(codes map[string]string) map[string]string {\n\tvar versions map[string]string\n\tversions = make(map[string]string)\n\tfor key, value := range codes {\n\t\tif strings.Contains(key, \"4xx\") {\n\t\t\tkey = \"400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 421 422 423 424 426 428 429 431 451\"\n\t\t} else if strings.Contains(key, \"5xx\") {\n\t\t\tkey = \"500 501 502 503 504 505 506 507 508 510 511\"\n\t\t}\n\t\tversions[key] = value\n\t}\n\treturn versions\n}\n\nfunc (sf *Finalizer) GetAppRootDir() (string, error) {\n\tvar rootDirRelative string\n\n\tif sf.Config.RootDir != \"\" {\n\t\trootDirRelative = sf.Config.RootDir\n\t} else {\n\t\trootDirRelative = \".\"\n\t}\n\n\trootDirAbs, err := filepath.Abs(filepath.Join(sf.BuildDir, rootDirRelative))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tsf.Log.BeginStep(\"Root folder %s\", rootDirAbs)\n\n\tdirInfo, err := os.Stat(rootDirAbs)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"the application Staticfile specifies a root directory %s that does not exist\", rootDirRelative)\n\t}\n\n\tif !dirInfo.IsDir() {\n\t\treturn \"\", fmt.Errorf(\"the application Staticfile specifies a root directory %s that is a plain file, but was expected to be a directory\", rootDirRelative)\n\t}\n\n\treturn rootDirAbs, nil\n}\n\nfunc (sf *Finalizer) CopyFilesToPublic(appRootDir string) error {\n\tsf.Log.BeginStep(\"Copying project files into public\")\n\n\tpublicDir := filepath.Join(sf.BuildDir, \"public\")\n\n\tif publicDir == appRootDir {\n\t\treturn nil\n\t}\n\n\ttmpDir, err := ioutil.TempDir(\"\", \"staticfile-buildpack.approot.\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfiles, err := ioutil.ReadDir(appRootDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, file := range files {\n\t\tif skipCopyFile[file.Name()] {\n\t\t\tcontinue\n\t\t}\n\n\t\tif strings.HasPrefix(file.Name(), \".\") && !sf.Config.HostDotFiles {\n\t\t\tcontinue\n\t\t}\n\n\t\terr = os.Rename(filepath.Join(appRootDir, file.Name()), filepath.Join(tmpDir, file.Name()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := os.RemoveAll(publicDir); err != nil {\n\t\treturn err\n\t}\n\n\tif err := os.Rename(tmpDir, publicDir); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (sf *Finalizer) Warnings() {\n\tif len(sf.Config.LocationInclude) > 0 && len(sf.Config.RootDir) == 0 {\n\t\tsf.Log.Warning(\"The location_include directive only works in conjunction with root.\\nPlease specify root to use location_include\")\n\t}\n\n\tif filepath.Clean(sf.Config.RootDir) == \".\" {\n\t\tfound, _ := libbuildpack.FileExists(filepath.Join(sf.BuildDir, \"nginx\", \"conf\"))\n\t\tif found {\n\t\t\tsf.Log.Info(\"\\n\\n\\n\")\n\t\t\tsf.Log.Warning(\"You have an nginx\/conf directory, but have not set *root*, or have set it to '.'.\\nIf you are using the nginx\/conf directory for nginx configuration, you probably need to also set the *root* directive.\")\n\t\t\tsf.Log.Info(\"\\n\\n\\n\")\n\t\t}\n\t}\n}\n\nfunc (sf *Finalizer) ConfigureNginx() error {\n\tvar err error\n\n\tsf.Log.BeginStep(\"Configuring nginx\")\n\n\tnginxConf, err := sf.generateNginxConf()\n\tif err != nil {\n\t\tsf.Log.Error(\"Unable to generate nginx.conf: %s\", err.Error())\n\t\treturn err\n\t}\n\n\tconfDir := filepath.Join(sf.BuildDir, \"nginx\", \"conf\")\n\tif err := os.MkdirAll(confDir, 0755); err != nil {\n\t\treturn err\n\t}\n\n\tlogsDir := filepath.Join(sf.BuildDir, \"nginx\", \"logs\")\n\tif err := os.MkdirAll(logsDir, 0755); err != nil {\n\t\treturn err\n\t}\n\n\tconfFiles := map[string]string{\n\t\t\"nginx.conf\": nginxConf,\n\t\t\"mime.types\": MimeTypes}\n\n\tfor file, contents := range confFiles {\n\t\tconfDest := filepath.Join(confDir, file)\n\t\tcustomConfFile := filepath.Join(sf.BuildDir, \"public\", file)\n\n\t\t_, err = os.Stat(customConfFile)\n\t\tif err == nil {\n\t\t\terr = os.Rename(customConfFile, confDest)\n\t\t\tif file == \"nginx.conf\" {\n\t\t\t\tsf.Log.Warning(\"overriding nginx.conf is deprecated and highly discouraged, as it breaks the functionality of the Staticfile and Staticfile.auth configuration directives. Please use the NGINX buildpack available at: https:\/\/github.com\/cloudfoundry\/nginx-buildpack\")\n\t\t\t}\n\t\t} else {\n\t\t\terr = ioutil.WriteFile(confDest, []byte(contents), 0644)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif sf.Config.BasicAuth {\n\t\tauthFile := filepath.Join(sf.BuildDir, \"Staticfile.auth\")\n\t\terr = libbuildpack.CopyFile(authFile, filepath.Join(confDir, \".htpasswd\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (sf *Finalizer) generateNginxConf() (string, error) {\n\tbuffer := new(bytes.Buffer)\n\n\tt := template.Must(template.New(\"nginx.conf\").Parse(nginxConfTemplate))\n\n\terr := t.Execute(buffer, sf.Config)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn buffer.String(), nil\n}\n<commit_msg>removes warning from finalize that is no longer applicable<commit_after>package finalize\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"bytes\"\n\n\t\"github.com\/cloudfoundry\/libbuildpack\"\n)\n\ntype Staticfile struct {\n\tRootDir string `yaml:\"root\"`\n\tHostDotFiles bool `yaml:\"host_dot_files\"`\n\tLocationInclude string `yaml:\"location_include\"`\n\tDirectoryIndex bool `yaml:\"directory\"`\n\tSSI bool `yaml:\"ssi\"`\n\tPushState bool `yaml:\"pushstate\"`\n\tHSTS bool `yaml:\"http_strict_transport_security\"`\n\tHSTSIncludeSubDomains bool `yaml:\"http_strict_transport_security_include_subdomains\"`\n\tHSTSPreload bool `yaml:\"http_strict_transport_security_preload\"`\n\tForceHTTPS bool `yaml:\"force_https\"`\n\tBasicAuth bool\n\tStatusCodes map[string]string `yaml:\"status_codes\"`\n}\n\ntype YAML interface {\n\tLoad(string, interface{}) error\n}\n\ntype Finalizer struct {\n\tBuildDir string\n\tDepDir string\n\tLog *libbuildpack.Logger\n\tConfig Staticfile\n\tYAML YAML\n}\ntype StaticfileTemp struct {\n\tRootDir string `yaml:\"root,omitempty\"`\n\tHostDotFiles string `yaml:\"host_dot_files,omitempty\"`\n\tLocationInclude string `yaml:\"location_include\"`\n\tDirectoryIndex string `yaml:\"directory\"`\n\tSSI string `yaml:\"ssi\"`\n\tPushState string `yaml:\"pushstate\"`\n\tHSTS string `yaml:\"http_strict_transport_security\"`\n\tHSTSIncludeSubDomains string `yaml:\"http_strict_transport_security_include_subdomains\"`\n\tHSTSPreload string `yaml:\"http_strict_transport_security_preload\"`\n\tForceHTTPS string `yaml:\"force_https\"`\n\tStatusCodes map[string]string `yaml:\"status_codes\"`\n}\n\nvar skipCopyFile = map[string]bool{\n\t\"Staticfile\": true,\n\t\"Staticfile.auth\": true,\n\t\"manifest.yml\": true,\n\t\".profile\": true,\n\t\".profile.d\": true,\n\t\"stackato.yml\": true,\n\t\".cloudfoundry\": true,\n\t\"nginx\": true,\n}\n\nfunc Run(sf *Finalizer) error {\n\tvar err error\n\n\terr = sf.LoadStaticfile()\n\tif err != nil {\n\t\tsf.Log.Error(\"Unable to load Staticfile: %s\", err.Error())\n\t\treturn err\n\t}\n\n\tappRootDir, err := sf.GetAppRootDir()\n\tif err != nil {\n\t\tsf.Log.Error(\"Invalid root directory: %s\", err.Error())\n\t\treturn err\n\t}\n\n\tsf.Warnings()\n\n\terr = sf.CopyFilesToPublic(appRootDir)\n\tif err != nil {\n\t\tsf.Log.Error(\"Unable to copy project files: %s\", err.Error())\n\t\treturn err\n\t}\n\n\terr = sf.ConfigureNginx()\n\tif err != nil {\n\t\tsf.Log.Error(\"Unable to configure nginx: %s\", err.Error())\n\t\treturn err\n\t}\n\n\terr = sf.WriteStartupFiles()\n\tif err != nil {\n\t\tsf.Log.Error(\"Unable to write startup file: %s\", err.Error())\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (sf *Finalizer) WriteStartupFiles() error {\n\tprofiledDir := filepath.Join(sf.DepDir, \"profile.d\")\n\terr := os.MkdirAll(profiledDir, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(filepath.Join(profiledDir, \"staticfile.sh\"), []byte(initScript), 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(filepath.Join(sf.BuildDir, \"start_logging.sh\"), []byte(startLoggingScript), 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbootScript := filepath.Join(sf.BuildDir, \"boot.sh\")\n\treturn ioutil.WriteFile(bootScript, []byte(startCommand), 0755)\n}\n\nfunc (sf *Finalizer) LoadStaticfile() error {\n\tvar hash StaticfileTemp\n\tconf := &sf.Config\n\n\terr := sf.YAML.Load(filepath.Join(sf.BuildDir, \"Staticfile\"), &hash)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\n\tisEnabled := func(value string) bool {\n\t\treturn (value == \"enabled\" || value == \"true\")\n\t}\n\n\tif hash.RootDir != \"\" {\n\t\tconf.RootDir = hash.RootDir\n\t}\n\n\tif isEnabled(hash.HostDotFiles) {\n\t\tsf.Log.BeginStep(\"Enabling hosting of dotfiles\")\n\t\tconf.HostDotFiles = true\n\t}\n\n\tconf.LocationInclude = hash.LocationInclude\n\tif conf.LocationInclude != \"\" {\n\t\tsf.Log.BeginStep(\"Enabling location include file %s\", conf.LocationInclude)\n\t}\n\n\tif hash.DirectoryIndex != \"\" {\n\t\tsf.Log.BeginStep(\"Enabling directory index for folders without index.html files\")\n\t\tconf.DirectoryIndex = true\n\t}\n\n\tif isEnabled(hash.SSI) {\n\t\tsf.Log.BeginStep(\"Enabling SSI\")\n\t\tconf.SSI = true\n\t}\n\n\tif isEnabled(hash.PushState) {\n\t\tsf.Log.BeginStep(\"Enabling pushstate\")\n\t\tconf.PushState = true\n\t}\n\n\tif isEnabled(hash.HSTS) {\n\t\tsf.Log.BeginStep(\"Enabling HSTS\")\n\t\tconf.HSTS = true\n\t}\n\tif isEnabled(hash.HSTSIncludeSubDomains) {\n\t\tsf.Log.BeginStep(\"Enabling HSTS includeSubDomains\")\n\t\tconf.HSTSIncludeSubDomains = true\n\t}\n\tif isEnabled(hash.HSTSPreload) {\n\t\tsf.Log.BeginStep(\"Enabling HSTS Preload\")\n\t\tconf.HSTSPreload = true\n\t}\n\tif isEnabled(hash.ForceHTTPS) {\n\t\tsf.Log.BeginStep(\"Enabling HTTPS redirect\")\n\t\tconf.ForceHTTPS = true\n\t}\n\tif len(hash.StatusCodes) > 0 {\n\t\tsf.Log.BeginStep(\"Enabling custom pages for status_codes\")\n\t\tconf.StatusCodes = sf.getStatusCodes(hash.StatusCodes)\n\t}\n\n\tif !conf.HSTS && (conf.HSTSIncludeSubDomains || conf.HSTSPreload) {\n\t\tsf.Log.Warning(\"http_strict_transport_security is not enabled while http_strict_transport_security_include_subdomains or http_strict_transport_security_preload have been enabled.\")\n\t\tsf.Log.Protip(\"http_strict_transport_security_include_subdomains and http_strict_transport_security_preload do nothing without http_strict_transport_security enabled.\", \"https:\/\/docs.cloudfoundry.org\/buildpacks\/staticfile\/index.html#strict-security\")\n\t}\n\n\tauthFile := filepath.Join(sf.BuildDir, \"Staticfile.auth\")\n\t_, err = os.Stat(authFile)\n\tif err == nil {\n\t\tconf.BasicAuth = true\n\t\tsf.Log.BeginStep(\"Enabling basic authentication using Staticfile.auth\")\n\t\tsf.Log.Protip(\"Learn about basic authentication\", \"https:\/\/docs.cloudfoundry.org\/buildpacks\/staticfile\/index.html#authentication\")\n\t}\n\n\treturn nil\n}\n\nfunc (sf *Finalizer) getStatusCodes(codes map[string]string) map[string]string {\n\tvar versions map[string]string\n\tversions = make(map[string]string)\n\tfor key, value := range codes {\n\t\tif strings.Contains(key, \"4xx\") {\n\t\t\tkey = \"400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 421 422 423 424 426 428 429 431 451\"\n\t\t} else if strings.Contains(key, \"5xx\") {\n\t\t\tkey = \"500 501 502 503 504 505 506 507 508 510 511\"\n\t\t}\n\t\tversions[key] = value\n\t}\n\treturn versions\n}\n\nfunc (sf *Finalizer) GetAppRootDir() (string, error) {\n\tvar rootDirRelative string\n\n\tif sf.Config.RootDir != \"\" {\n\t\trootDirRelative = sf.Config.RootDir\n\t} else {\n\t\trootDirRelative = \".\"\n\t}\n\n\trootDirAbs, err := filepath.Abs(filepath.Join(sf.BuildDir, rootDirRelative))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tsf.Log.BeginStep(\"Root folder %s\", rootDirAbs)\n\n\tdirInfo, err := os.Stat(rootDirAbs)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"the application Staticfile specifies a root directory %s that does not exist\", rootDirRelative)\n\t}\n\n\tif !dirInfo.IsDir() {\n\t\treturn \"\", fmt.Errorf(\"the application Staticfile specifies a root directory %s that is a plain file, but was expected to be a directory\", rootDirRelative)\n\t}\n\n\treturn rootDirAbs, nil\n}\n\nfunc (sf *Finalizer) CopyFilesToPublic(appRootDir string) error {\n\tsf.Log.BeginStep(\"Copying project files into public\")\n\n\tpublicDir := filepath.Join(sf.BuildDir, \"public\")\n\n\tif publicDir == appRootDir {\n\t\treturn nil\n\t}\n\n\ttmpDir, err := ioutil.TempDir(\"\", \"staticfile-buildpack.approot.\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfiles, err := ioutil.ReadDir(appRootDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, file := range files {\n\t\tif skipCopyFile[file.Name()] {\n\t\t\tcontinue\n\t\t}\n\n\t\tif strings.HasPrefix(file.Name(), \".\") && !sf.Config.HostDotFiles {\n\t\t\tcontinue\n\t\t}\n\n\t\terr = os.Rename(filepath.Join(appRootDir, file.Name()), filepath.Join(tmpDir, file.Name()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := os.RemoveAll(publicDir); err != nil {\n\t\treturn err\n\t}\n\n\tif err := os.Rename(tmpDir, publicDir); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (sf *Finalizer) Warnings() {\n\tif filepath.Clean(sf.Config.RootDir) == \".\" {\n\t\tfound, _ := libbuildpack.FileExists(filepath.Join(sf.BuildDir, \"nginx\", \"conf\"))\n\t\tif found {\n\t\t\tsf.Log.Info(\"\\n\\n\\n\")\n\t\t\tsf.Log.Warning(\"You have an nginx\/conf directory, but have not set *root*, or have set it to '.'.\\nIf you are using the nginx\/conf directory for nginx configuration, you probably need to also set the *root* directive.\")\n\t\t\tsf.Log.Info(\"\\n\\n\\n\")\n\t\t}\n\t}\n}\n\nfunc (sf *Finalizer) ConfigureNginx() error {\n\tvar err error\n\n\tsf.Log.BeginStep(\"Configuring nginx\")\n\n\tnginxConf, err := sf.generateNginxConf()\n\tif err != nil {\n\t\tsf.Log.Error(\"Unable to generate nginx.conf: %s\", err.Error())\n\t\treturn err\n\t}\n\n\tconfDir := filepath.Join(sf.BuildDir, \"nginx\", \"conf\")\n\tif err := os.MkdirAll(confDir, 0755); err != nil {\n\t\treturn err\n\t}\n\n\tlogsDir := filepath.Join(sf.BuildDir, \"nginx\", \"logs\")\n\tif err := os.MkdirAll(logsDir, 0755); err != nil {\n\t\treturn err\n\t}\n\n\tconfFiles := map[string]string{\n\t\t\"nginx.conf\": nginxConf,\n\t\t\"mime.types\": MimeTypes}\n\n\tfor file, contents := range confFiles {\n\t\tconfDest := filepath.Join(confDir, file)\n\t\tcustomConfFile := filepath.Join(sf.BuildDir, \"public\", file)\n\n\t\t_, err = os.Stat(customConfFile)\n\t\tif err == nil {\n\t\t\terr = os.Rename(customConfFile, confDest)\n\t\t\tif file == \"nginx.conf\" {\n\t\t\t\tsf.Log.Warning(\"overriding nginx.conf is deprecated and highly discouraged, as it breaks the functionality of the Staticfile and Staticfile.auth configuration directives. Please use the NGINX buildpack available at: https:\/\/github.com\/cloudfoundry\/nginx-buildpack\")\n\t\t\t}\n\t\t} else {\n\t\t\terr = ioutil.WriteFile(confDest, []byte(contents), 0644)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif sf.Config.BasicAuth {\n\t\tauthFile := filepath.Join(sf.BuildDir, \"Staticfile.auth\")\n\t\terr = libbuildpack.CopyFile(authFile, filepath.Join(confDir, \".htpasswd\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (sf *Finalizer) generateNginxConf() (string, error) {\n\tbuffer := new(bytes.Buffer)\n\n\tt := template.Must(template.New(\"nginx.conf\").Parse(nginxConfTemplate))\n\n\terr := t.Execute(buffer, sf.Config)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn buffer.String(), nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Andreas Koch. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage server\n\nimport (\n\t\"code.google.com\/p\/go.net\/websocket\"\n\t\"fmt\"\n\t\"github.com\/andreaskoch\/allmark\/config\"\n\t\"github.com\/andreaskoch\/allmark\/path\"\n\t\"github.com\/andreaskoch\/allmark\/renderer\"\n\t\"github.com\/andreaskoch\/allmark\/repository\"\n\t\"github.com\/andreaskoch\/allmark\/util\"\n\t\"log\"\n\t\"math\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"strings\"\n)\n\nvar (\n\troutes map[string]string\n\n\tuseTempDir = true\n)\n\nconst (\n\n\t\/\/ Dynamic Routes\n\tItemHandlerRoute = \"\/\"\n\tDebugHandlerRoute = \"\/debug\/index\"\n\tWebSocketHandlerRoute = \"\/ws\"\n\n\t\/\/ Static Routes\n\tThemeFolderRoute = \"\/theme\/\"\n)\n\ntype Server struct {\n\trepositoryPath string\n\tpathProvider *path.Provider\n\tconfig *config.Config\n\trenderer *renderer.Renderer\n}\n\nfunc New(repositoryPath string, config *config.Config, useTempDir bool) *Server {\n\n\treturn &Server{\n\t\trepositoryPath: repositoryPath,\n\t\tpathProvider: path.NewProvider(repositoryPath, useTempDir),\n\t\tconfig: config,\n\t\trenderer: renderer.New(repositoryPath, config, useTempDir),\n\t}\n\n}\n\nfunc (server *Server) Serve() {\n\n\tserver.renderer.Execute()\n\n\t\/\/ Initialize the routing table\n\tserver.initializeRoutes()\n\n\t\/\/ start the websocket hub\n\tgo h.run()\n\n\t\/\/ register handlers\n\thttp.HandleFunc(ItemHandlerRoute, itemHandler)\n\thttp.HandleFunc(DebugHandlerRoute, indexDebugger)\n\thttp.Handle(WebSocketHandlerRoute, websocket.Handler(webSocketHandler))\n\n\t\/\/ serve theme files\n\tif themeFolder := server.config.ThemeFolder(); util.DirectoryExists(themeFolder) {\n\t\thttp.Handle(ThemeFolderRoute, http.StripPrefix(ThemeFolderRoute, http.FileServer(http.Dir(themeFolder))))\n\t}\n\n\t\/\/ start http server: http\n\thttpBinding := server.getHttpBinding()\n\tfmt.Printf(\"Starting http server %q\\n\", httpBinding)\n\n\tif err := http.ListenAndServe(httpBinding, nil); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Server failed with error: %v\", err)\n\t}\n}\n\nfunc (server *Server) getHttpBinding() string {\n\n\t\/\/ validate the port\n\tport := server.config.Server.Http.Port\n\tif port < 1 || port > math.MaxUint16 {\n\t\tpanic(fmt.Sprintf(\"%q is an invalid value for a port. Ports can only be in the range of %v to %v,\", port, 1, math.MaxUint16))\n\t}\n\n\treturn fmt.Sprintf(\":%v\", port)\n}\n\nfunc (server *Server) initializeRoutes() {\n\n\troutes = make(map[string]string)\n\n\tgo func() {\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase item := <-server.renderer.Rendered:\n\t\t\t\tif item != nil {\n\t\t\t\t\tfmt.Printf(\"Registering item %s\\n\", item)\n\t\t\t\t\tserver.registerItem(item)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n}\n\nfunc (server *Server) registerItem(item *repository.Item) {\n\n\t\/\/ recurse for child items\n\tfor _, child := range item.Childs {\n\t\tserver.registerItem(child)\n\t}\n\n\t\/\/ get the item route and\n\t\/\/ add it to the routing table\n\tserver.registerRoute(item)\n\n\t\/\/ get the file routes and\n\t\/\/ add them to the routing table\n\tfor _, file := range item.Files.Items() {\n\t\tserver.registerRoute(file)\n\t}\n}\n\nfunc (server *Server) registerRoute(pather path.Pather) {\n\n\tif pather == nil {\n\t\tlog.Printf(\"Cannot add a route for an uninitialized item %q.\\n\", pather.Path())\n\t\treturn\n\t}\n\n\troute := server.pathProvider.GetWebRoute(pather)\n\tfilePath := server.pathProvider.GetFilepath(pather)\n\n\tif strings.TrimSpace(route) == \"\" {\n\t\tlog.Println(\"Cannot add an empty route to the routing table.\")\n\t\treturn\n\t}\n\n\troutes[route] = filePath\n}\n\nfunc openUrlInBrowser(url string) {\n\tvar err error\n\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\t\terr = exec.Command(\"xdg-open\", url).Start()\n\tcase \"windows\":\n\t\terr = exec.Command(`C:\\Windows\\System32\\rundll32.exe`, \"url.dll,FileProtocolHandler\", url).Start()\n\tcase \"darwin\":\n\t\terr = exec.Command(\"open\", url).Start()\n\tdefault:\n\t\tlog.Print(\"unsupported platform\")\n\t}\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n<commit_msg>Reconnected the websocket<commit_after>\/\/ Copyright 2013 Andreas Koch. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage server\n\nimport (\n\t\"code.google.com\/p\/go.net\/websocket\"\n\t\"fmt\"\n\t\"github.com\/andreaskoch\/allmark\/config\"\n\t\"github.com\/andreaskoch\/allmark\/path\"\n\t\"github.com\/andreaskoch\/allmark\/renderer\"\n\t\"github.com\/andreaskoch\/allmark\/repository\"\n\t\"github.com\/andreaskoch\/allmark\/util\"\n\t\"log\"\n\t\"math\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"strings\"\n)\n\nvar (\n\troutes = make(map[string]string)\n\n\tuseTempDir = true\n)\n\nconst (\n\n\t\/\/ Dynamic Routes\n\tItemHandlerRoute = \"\/\"\n\tDebugHandlerRoute = \"\/debug\/index\"\n\tWebSocketHandlerRoute = \"\/ws\"\n\n\t\/\/ Static Routes\n\tThemeFolderRoute = \"\/theme\/\"\n)\n\ntype Server struct {\n\trepositoryPath string\n\tpathProvider *path.Provider\n\tconfig *config.Config\n\trenderer *renderer.Renderer\n}\n\nfunc New(repositoryPath string, config *config.Config, useTempDir bool) *Server {\n\n\treturn &Server{\n\t\trepositoryPath: repositoryPath,\n\t\tpathProvider: path.NewProvider(repositoryPath, useTempDir),\n\t\tconfig: config,\n\t\trenderer: renderer.New(repositoryPath, config, useTempDir),\n\t}\n\n}\n\nfunc (server *Server) Serve() {\n\n\t\/\/ start the renderer\n\tserver.renderer.Execute()\n\n\t\/\/ start a change listener\n\tserver.listenForChanges()\n\n\t\/\/ start the websocket hub\n\tgo h.run()\n\n\t\/\/ register handlers\n\thttp.HandleFunc(ItemHandlerRoute, itemHandler)\n\thttp.HandleFunc(DebugHandlerRoute, indexDebugger)\n\thttp.Handle(WebSocketHandlerRoute, websocket.Handler(webSocketHandler))\n\n\t\/\/ serve theme files\n\tif themeFolder := server.config.ThemeFolder(); util.DirectoryExists(themeFolder) {\n\t\thttp.Handle(ThemeFolderRoute, http.StripPrefix(ThemeFolderRoute, http.FileServer(http.Dir(themeFolder))))\n\t}\n\n\t\/\/ start http server: http\n\thttpBinding := server.getHttpBinding()\n\tfmt.Printf(\"Starting http server %q\\n\", httpBinding)\n\n\tif err := http.ListenAndServe(httpBinding, nil); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Server failed with error: %v\", err)\n\t}\n}\n\nfunc (server *Server) getHttpBinding() string {\n\n\t\/\/ validate the port\n\tport := server.config.Server.Http.Port\n\tif port < 1 || port > math.MaxUint16 {\n\t\tpanic(fmt.Sprintf(\"%q is an invalid value for a port. Ports can only be in the range of %v to %v,\", port, 1, math.MaxUint16))\n\t}\n\n\treturn fmt.Sprintf(\":%v\", port)\n}\n\nfunc (server *Server) listenForChanges() {\n\n\tgo func() {\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase item := <-server.renderer.Rendered:\n\t\t\t\tif item != nil {\n\t\t\t\t\t\/\/ register the item\n\t\t\t\t\tserver.registerItem(item)\n\n\t\t\t\t\t\/\/ send update event to connected browsers\n\t\t\t\t\th.broadcast <- UpdateMessage(item.Model)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n}\n\nfunc (server *Server) registerItem(item *repository.Item) {\n\n\t\/\/ recurse for child items\n\tfor _, child := range item.Childs {\n\t\tserver.registerItem(child)\n\t}\n\n\t\/\/ get the item route and\n\t\/\/ add it to the routing table\n\tserver.registerRoute(item)\n\n\t\/\/ get the file routes and\n\t\/\/ add them to the routing table\n\tfor _, file := range item.Files.Items() {\n\t\tserver.registerRoute(file)\n\t}\n}\n\nfunc (server *Server) registerRoute(pather path.Pather) {\n\n\tif pather == nil {\n\t\tlog.Printf(\"Cannot add a route for an uninitialized item %q.\\n\", pather.Path())\n\t\treturn\n\t}\n\n\troute := server.pathProvider.GetWebRoute(pather)\n\tfilePath := server.pathProvider.GetFilepath(pather)\n\n\tif strings.TrimSpace(route) == \"\" {\n\t\tlog.Println(\"Cannot add an empty route to the routing table.\")\n\t\treturn\n\t}\n\n\troutes[route] = filePath\n}\n\nfunc openUrlInBrowser(url string) {\n\tvar err error\n\n\tswitch runtime.GOOS {\n\tcase \"linux\":\n\t\terr = exec.Command(\"xdg-open\", url).Start()\n\tcase \"windows\":\n\t\terr = exec.Command(`C:\\Windows\\System32\\rundll32.exe`, \"url.dll,FileProtocolHandler\", url).Start()\n\tcase \"darwin\":\n\t\terr = exec.Command(\"open\", url).Start()\n\tdefault:\n\t\tlog.Print(\"unsupported platform\")\n\t}\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\/\/ General\n\t\"github.com\/golang\/glog\"\n\t\"flag\"\n\t\"github.com\/iambc\/xerrors\"\n\t\"reflect\"\n\t\"os\"\n\n\t\/\/API\n\t\"net\/http\"\n\t\"encoding\/json\"\n\n\t\/\/DB\n\t\"database\/sql\"\n\t_ \"github.com\/lib\/pq\"\n)\n\n\/*\nTODO:\n2) Add different input\/output formats for the API\n3) Add settings to the boards\n4) Add settings to the threads\n6) quote of the day\n*\/\n\n\ntype image_board_clusters struct {\n Id int\n Descr string\n LongDescr string\n BoardLimitCount int\n}\n\ntype boards struct {\n Id int\n Name string\n Descr string\n ImageBoardClusterId string\n MaxThreadCount int \/\/to be checked in insert thread\n MaxActiveThreadCount int \/\/to be checked in insert thread\n MaxPostsPerThread int \/\/ to be checked in insert thread\n AreAttachmentsAllowed bool \/\/ to be checked in insert post\n PostLimitsReachedActionId int \/\/ to be checked in insert post\n}\n\ntype threads struct{\n Id int\n Name string\n Descr string\n BoardId int\n MaxPostsPerThread int\n AreAttachmentsAllowed bool\n LimitsReachedActionId int\n}\n\ntype thread_posts struct{\n Id int\n Body string\n ThreadId int\n AttachmentUrl int\n}\n\ntype thread_limits_reached_actions struct{\n Id\t int\n Name string\n Descr string\n}\n\ntype api_request struct{\n Status string\n Msg\t *string\n Payload interface{}\n}\n\n\nfunc getBoards(res http.ResponseWriter, req *http.Request) ([]byte, error) {\n if req == nil || res == nil {\n\treturn []byte{}, xerrors.NewSysErr()\n }\n\n values := req.URL.Query()\n api_key := values[`api_key`][0]\n rows, err := dbh.Query(\"select b.id, b.name, b.descr from boards b join image_board_clusters ibc on ibc.id = b.image_board_cluster_id where api_key = $1;\", api_key)\n if err != nil {\n\treturn []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `002`, true)\n }\n defer rows.Close()\n\n var curr_boards []boards\n for rows.Next() {\n\tvar board boards\n\terr = rows.Scan(&board.Id, &board.Name, &board.Descr)\n\tif err != nil {\n\t return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `003`, true)\n\t}\n\tcurr_boards = append(curr_boards, board)\n }\n bytes, err1 := json.Marshal(api_request{\"ok\", nil, &curr_boards})\n if err1 != nil {\n\treturn []byte{}, xerrors.NewUIErr(err1.Error(), err1.Error(), `004`, true)\n }\n return bytes, nil\n}\n\n\nfunc getActiveThreadsForBoard(res http.ResponseWriter, req *http.Request) ([]byte, error) {\n if req == nil || res == nil {\n return []byte{}, xerrors.NewSysErr()\n }\n values := req.URL.Query()\n board_id, is_passed := values[`board_id`]\n if !is_passed {\n\treturn []byte{}, xerrors.NewUIErr(`Invalid params: No board_id given!`, `Invalid params: No board_id given!`, `005`, true)\n }\n\n api_key := values[`api_key`][0]\n rows, err := dbh.Query(`select t.id, t.name from threads t \n\t\t\t\tjoin boards b on b.id = t.board_id \n\t\t\t\tjoin image_board_clusters ibc on ibc.id = b.image_board_cluster_id \n\t\t\t where t.is_active = TRUE and t.board_id = $1 and ibc.api_key = $2;`, board_id[0], api_key)\n if err != nil {\n return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `006`, true)\n }\n defer rows.Close()\n\n var active_threads []threads\n for rows.Next() {\n\tglog.Info(\"Popped new thread\")\n var thread threads\n err = rows.Scan(&thread.Id, &thread.Name)\n if err != nil {\n return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `007`, true)\n }\n active_threads = append(active_threads, thread)\n }\n var bytes []byte\n var err1 error\n if(len(active_threads) == 0){\n errMsg := \"No objects returned.\"\n bytes, err1 = json.Marshal(api_request{\"error\", &errMsg, &active_threads})\n }else {\n bytes, err1 = json.Marshal(api_request{\"ok\", nil, &active_threads})\n }\n\n if err1 != nil {\n return []byte{}, xerrors.NewUIErr(err1.Error(), err1.Error(), `008`, true)\n }\n\n return bytes, nil\n}\n\n\nfunc getPostsForThread(res http.ResponseWriter, req *http.Request) ([]byte, error) {\n if req == nil || res == nil {\n return []byte{}, xerrors.NewSysErr()\n }\n values := req.URL.Query()\n thread_id, is_passed := values[`thread_id`]\n if !is_passed {\n return []byte{},xerrors.NewUIErr(`Invalid params: No thread_id given!`, `Invalid params: No thread_id given!`, `006`, true)\n }\n\n api_key := values[`api_key`][0]\n rows, err := dbh.Query(`select tp.id, tp.body \n\t\t\t from thread_posts tp join threads t on t.id = tp.thread_id \n\t\t\t\t\t\t join boards b on b.id = t.board_id \n\t\t\t\t\t\t join image_board_clusters ibc on ibc.id = b.image_board_cluster_id \n\t\t\t where tp.thread_id = $1 and ibc.api_key = $2;`, thread_id[0], api_key)\n if err != nil {\n\tglog.Error(err)\n return []byte{}, xerrors.NewSysErr()\n }\n defer rows.Close()\n\n var curr_posts []thread_posts\n for rows.Next() {\n\tglog.Info(\"new post for thread with id: \", thread_id[0])\n var curr_post thread_posts\n err = rows.Scan(&curr_post.Id, &curr_post.Body)\n if err != nil {\n return []byte{}, xerrors.NewSysErr()\n }\n curr_posts = append(curr_posts, curr_post)\n }\n\n var bytes []byte\n var err1 error\n if(len(curr_posts) == 0){\n\terrMsg := \"No objects returned.\"\n\tbytes, err1 = json.Marshal(api_request{\"error\", &errMsg, &curr_posts})\n }else {\n\tbytes, err1 = json.Marshal(api_request{\"ok\", nil, &curr_posts})\n }\n\n if err1 != nil {\n return []byte{}, xerrors.NewSysErr()\n }\n\n return bytes, nil\n}\n\n\nfunc addPostToThread(res http.ResponseWriter, req *http.Request) ([]byte,error) {\n if req == nil || res == nil{\n return []byte{}, xerrors.NewSysErr()\n }\n values := req.URL.Query()\n thread_id, is_passed := values[`thread_id`]\n if !is_passed {\n return []byte{}, xerrors.NewUIErr(`Invalid params: No thread_id given!`, `Invalid params: No thread_id given!`, `001`, true)\n }\n\n thread_body_post, is_passed := values[`thread_post_body`]\n if !is_passed {\n return []byte{}, xerrors.NewUIErr(`Invalid params: No thread_post_body given!`, `Invalid params: No thread_post_body given!`, `001`, true)\n }\n \n var is_limit_reached bool\n err := dbh.QueryRow(\"select (select count(*) from thread_posts where thread_id = $1) > max_posts_per_thread from threads where id = $1;\", thread_id[0]).Scan(&is_limit_reached)\n if err != nil {\n\treturn []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `009`, true)\n }\n\n if is_limit_reached {\n\tdbh.QueryRow(\"UPDATE threads set is_active = false where id = $1\", thread_id[0]).Scan()\n\treturn []byte{}, xerrors.NewUIErr(`Thread post limit reached!`, `Thread post limit reached!`, `010`, true)\n }\n\n attachment_urls, is_passed := values[`attachment_url`]\n var attachment_url *string\n if !is_passed{\n\tattachment_url = nil\n }else{\n\tattachment_url = &attachment_urls[0]\n }\n\n _, err = dbh.Query(\"INSERT INTO thread_posts(body, thread_id, attachment_url) VALUES($1, $2, $3)\", thread_body_post[0], thread_id[0], attachment_url)\n\n if err != nil {\n\tglog.Error(err)\n return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `011`, true)\n }\n\n bytes, err1 := json.Marshal(api_request{\"ok\", nil, nil})\n if err1 != nil {\n return []byte{}, xerrors.NewUIErr(err1.Error(), err1.Error(), `012`, true)\n }\n\n return bytes, nil\n}\n\n\nfunc addThread(res http.ResponseWriter, req *http.Request) ([]byte,error) {\n if req == nil || res == nil{\n return []byte{}, xerrors.NewSysErr()\n }\n values := req.URL.Query()\n\n thread_name, is_passed := values[`thread_name`]\n if !is_passed {\n return []byte{}, xerrors.NewUIErr(`Invalid params: No thread_name given!`, `Invalid params: No thread_name given!`, `013`, true)\n }\n\n board_id, is_passed := values[`board_id`]\n if !is_passed {\n return []byte{}, xerrors.NewUIErr(`Invalid params: No board_id given!`, `Invalid params: No board_id given!`, `014`, true)\n }\n\n\n var is_limit_reached bool\n err := dbh.QueryRow(\"select (select count(*) from threads where board_id = $1) > thread_setting_max_thread_count from boards where id = $1;\", board_id[0]).Scan(&is_limit_reached)\n if err != nil {\n\tglog.Error(\"COULD NOT SELECT thread_count\")\n\treturn []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `015`, true)\n }\n if is_limit_reached {\n\treturn []byte{}, xerrors.NewUIErr(`Thread limit reached!`, `Thread limit reached!`, `016`, true)\n }\n\n _, err = dbh.Query(\"INSERT INTO threads(name, board_id, limits_reached_action_id) VALUES($1, $2, 1)\", thread_name[0], board_id[0])\n\n if err != nil {\n\tglog.Error(\"INSERT FAILED\")\n return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `017`, true)\n }\n\n bytes, err1 := json.Marshal(api_request{\"ok\", nil, nil})\n if err1 != nil {\n return []byte{}, xerrors.NewUIErr(err1.Error(), err1.Error(), `018`, true)\n }\n\n return bytes, nil\n}\n\nvar dbConnString = ``\nvar dbh *sql.DB\n\n\/\/ sample usage\nfunc main() {\n flag.Parse()\n\n var err error\n dbConnString = os.Getenv(\"ABC_DB_CONN_STRING\") \/\/ DB will return error if empty string\n dbh, err = sql.Open(\"postgres\", dbConnString)\n if err != nil {\n\tglog.Fatal(err)\n }\n\n commands := map[string]func(http.ResponseWriter, *http.Request) ([]byte, error){\n\t\t\t\t\"getBoards\": getBoards,\n\t\t\t\t\"getActiveThreadsForBoard\": getActiveThreadsForBoard,\n\t\t\t\t\"getPostsForThread\": getPostsForThread,\n\t\t\t\t\"addPostToThread\": addPostToThread,\n\t\t\t\t\"addThread\": addThread,\n\t\t\t }\n\n http.HandleFunc(\"\/api\", func(res http.ResponseWriter, req *http.Request) {\n\t\t\t\t\tvalues := req.URL.Query()\n\t\t\t\t\tcommand, is_passed := values[`command`]\n\t\t\t\t\tif !is_passed {\n\t\t\t\t\t res.Write([]byte(`{\"Status\":\"error\",\"Msg\":\"Paremeter 'command' is undefined.\",\"Payload\":null}`))\n\t\t\t\t\t return\n\t\t\t\t\t}\n\n\n\t\t\t\t\t_, is_passed = values[`api_key`]\n\t\t\t\t\tif !is_passed {\n\t\t\t\t\t res.Write([]byte(`{\"Status\":\"error\",\"Msg\":\"Paremeter 'api_key' is undefined.\",\"Payload\":null}`))\n\t\t\t\t\t return\n\t\t\t\t\t}\n\n\t\t\t\t\t_, is_passed = commands[command[0]]\n\t\t\t\t\tif !is_passed{\n\t\t\t\t\t res.Write([]byte(`{\"Status\":\"error\",\"Msg\":\"No such command exists.\",\"Payload\":null}`))\n\t\t\t\t\t glog.Error(\"command: \", command[0])\n\t\t\t\t\t return\n\t\t\t\t\t}\n\n\t\t\t\t\tres.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t\t\t\t\tbytes, err := commands[command[0]](res, req)\n\n\n\t\t\t\t\tif err != nil{\n\t\t\t\t\t if string(reflect.TypeOf(err).Name()) == `SysErr` {\n\t\t\t\t\t\tres.Write([]byte(`{\"Status\":\"error\",\"Msg\":\"` + err.Error() +`\",\"Payload\":null}`))\n\t\t\t\t\t } else if string(reflect.TypeOf(err).Name()) == `UIErr` {\n\t\t\t\t\t\tres.Write([]byte(`{\"Status\":\"error\",\"Msg\":\"`+ err.Error() +`\",\"Payload\":null}`))\n\t\t\t\t\t } else {\n\t\t\t\t\t\tres.Write([]byte(`{\"Status\":\"error\",\"Msg\":\"Application Error!\",\"Payload\":null}`))\n\t\t\t\t\t }\n\t\t\t\t\t glog.Error(err)\n\t\t\t\t\t return\n\t\t\t\t\t}\n\n\t\t\t\t\tglog.Info(string(bytes))\n\t\t\t\t\tres.Write(bytes)\n })\n\n http.ListenAndServe(`:`+ os.Getenv(\"ABC_SERVER_ENDPOINT_URL\"), nil)\n}\n\n\n<commit_msg>feat: thread posts are seen only if thread is active<commit_after>package main\n\nimport (\n\t\/\/ General\n\t\"github.com\/golang\/glog\"\n\t\"flag\"\n\t\"github.com\/iambc\/xerrors\"\n\t\"reflect\"\n\t\"os\"\n\n\t\/\/API\n\t\"net\/http\"\n\t\"encoding\/json\"\n\n\t\/\/DB\n\t\"database\/sql\"\n\t_ \"github.com\/lib\/pq\"\n)\n\n\/*\nTODO:\n2) Add different input\/output formats for the API\n6) quote of the day\n*\/\n\n\ntype image_board_clusters struct {\n Id int\n Descr string\n LongDescr string\n BoardLimitCount int\n}\n\ntype boards struct {\n Id int\n Name string\n Descr string\n ImageBoardClusterId string\n MaxThreadCount int \/\/to be checked in insert thread\n MaxActiveThreadCount int \/\/to be checked in insert thread\n MaxPostsPerThread int \/\/ to be checked in insert thread\n AreAttachmentsAllowed bool \/\/ to be checked in insert post\n PostLimitsReachedActionId int \/\/ to be checked in insert post\n}\n\ntype threads struct{\n Id int\n Name string\n Descr string\n BoardId int\n MaxPostsPerThread int\n AreAttachmentsAllowed bool\n LimitsReachedActionId int\n}\n\ntype thread_posts struct{\n Id int\n Body string\n ThreadId int\n AttachmentUrl int\n}\n\ntype thread_limits_reached_actions struct{\n Id\t int\n Name string\n Descr string\n}\n\ntype api_request struct{\n Status string\n Msg\t *string\n Payload interface{}\n}\n\n\nfunc getBoards(res http.ResponseWriter, req *http.Request) ([]byte, error) {\n if req == nil || res == nil {\n\treturn []byte{}, xerrors.NewSysErr()\n }\n\n values := req.URL.Query()\n api_key := values[`api_key`][0]\n rows, err := dbh.Query(\"select b.id, b.name, b.descr from boards b join image_board_clusters ibc on ibc.id = b.image_board_cluster_id where api_key = $1;\", api_key)\n if err != nil {\n\treturn []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `002`, true)\n }\n defer rows.Close()\n\n var curr_boards []boards\n for rows.Next() {\n\tvar board boards\n\terr = rows.Scan(&board.Id, &board.Name, &board.Descr)\n\tif err != nil {\n\t return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `003`, true)\n\t}\n\tcurr_boards = append(curr_boards, board)\n }\n bytes, err1 := json.Marshal(api_request{\"ok\", nil, &curr_boards})\n if err1 != nil {\n\treturn []byte{}, xerrors.NewUIErr(err1.Error(), err1.Error(), `004`, true)\n }\n return bytes, nil\n}\n\n\nfunc getActiveThreadsForBoard(res http.ResponseWriter, req *http.Request) ([]byte, error) {\n if req == nil || res == nil {\n return []byte{}, xerrors.NewSysErr()\n }\n values := req.URL.Query()\n board_id, is_passed := values[`board_id`]\n if !is_passed {\n\treturn []byte{}, xerrors.NewUIErr(`Invalid params: No board_id given!`, `Invalid params: No board_id given!`, `005`, true)\n }\n\n api_key := values[`api_key`][0]\n rows, err := dbh.Query(`select t.id, t.name from threads t \n\t\t\t\tjoin boards b on b.id = t.board_id \n\t\t\t\tjoin image_board_clusters ibc on ibc.id = b.image_board_cluster_id \n\t\t\t where t.is_active = TRUE and t.board_id = $1 and ibc.api_key = $2;`, board_id[0], api_key)\n if err != nil {\n return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `006`, true)\n }\n defer rows.Close()\n\n var active_threads []threads\n for rows.Next() {\n\tglog.Info(\"Popped new thread\")\n var thread threads\n err = rows.Scan(&thread.Id, &thread.Name)\n if err != nil {\n return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `007`, true)\n }\n active_threads = append(active_threads, thread)\n }\n var bytes []byte\n var err1 error\n if(len(active_threads) == 0){\n errMsg := \"No objects returned.\"\n bytes, err1 = json.Marshal(api_request{\"error\", &errMsg, &active_threads})\n }else {\n bytes, err1 = json.Marshal(api_request{\"ok\", nil, &active_threads})\n }\n\n if err1 != nil {\n return []byte{}, xerrors.NewUIErr(err1.Error(), err1.Error(), `008`, true)\n }\n\n return bytes, nil\n}\n\n\nfunc getPostsForThread(res http.ResponseWriter, req *http.Request) ([]byte, error) {\n if req == nil || res == nil {\n return []byte{}, xerrors.NewSysErr()\n }\n values := req.URL.Query()\n thread_id, is_passed := values[`thread_id`]\n if !is_passed {\n return []byte{},xerrors.NewUIErr(`Invalid params: No thread_id given!`, `Invalid params: No thread_id given!`, `006`, true)\n }\n\n api_key := values[`api_key`][0]\n rows, err := dbh.Query(`select tp.id, tp.body \n\t\t\t from thread_posts tp join threads t on t.id = tp.thread_id \n\t\t\t\t\t\t join boards b on b.id = t.board_id \n\t\t\t\t\t\t join image_board_clusters ibc on ibc.id = b.image_board_cluster_id \n\t\t\t where tp.thread_id = $1 and ibc.api_key = $2 and t.is_active = true;`, thread_id[0], api_key)\n if err != nil {\n\tglog.Error(err)\n return []byte{}, xerrors.NewSysErr()\n }\n defer rows.Close()\n\n var curr_posts []thread_posts\n for rows.Next() {\n\tglog.Info(\"new post for thread with id: \", thread_id[0])\n var curr_post thread_posts\n err = rows.Scan(&curr_post.Id, &curr_post.Body)\n if err != nil {\n return []byte{}, xerrors.NewSysErr()\n }\n curr_posts = append(curr_posts, curr_post)\n }\n\n var bytes []byte\n var err1 error\n if(len(curr_posts) == 0){\n\terrMsg := \"No objects returned.\"\n\tbytes, err1 = json.Marshal(api_request{\"error\", &errMsg, &curr_posts})\n }else {\n\tbytes, err1 = json.Marshal(api_request{\"ok\", nil, &curr_posts})\n }\n\n if err1 != nil {\n return []byte{}, xerrors.NewSysErr()\n }\n\n return bytes, nil\n}\n\n\nfunc addPostToThread(res http.ResponseWriter, req *http.Request) ([]byte,error) {\n if req == nil || res == nil{\n return []byte{}, xerrors.NewSysErr()\n }\n values := req.URL.Query()\n thread_id, is_passed := values[`thread_id`]\n if !is_passed {\n return []byte{}, xerrors.NewUIErr(`Invalid params: No thread_id given!`, `Invalid params: No thread_id given!`, `001`, true)\n }\n\n thread_body_post, is_passed := values[`thread_post_body`]\n if !is_passed {\n return []byte{}, xerrors.NewUIErr(`Invalid params: No thread_post_body given!`, `Invalid params: No thread_post_body given!`, `001`, true)\n }\n \n var is_limit_reached bool\n err := dbh.QueryRow(\"select (select count(*) from thread_posts where thread_id = $1) > max_posts_per_thread from threads where id = $1;\", thread_id[0]).Scan(&is_limit_reached)\n if err != nil {\n\treturn []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `009`, true)\n }\n\n if is_limit_reached {\n\tdbh.QueryRow(\"UPDATE threads set is_active = false where id = $1\", thread_id[0]).Scan()\n\treturn []byte{}, xerrors.NewUIErr(`Thread post limit reached!`, `Thread post limit reached!`, `010`, true)\n }\n\n attachment_urls, is_passed := values[`attachment_url`]\n var attachment_url *string\n if !is_passed{\n\tattachment_url = nil\n }else{\n\tattachment_url = &attachment_urls[0]\n }\n\n _, err = dbh.Query(\"INSERT INTO thread_posts(body, thread_id, attachment_url) VALUES($1, $2, $3)\", thread_body_post[0], thread_id[0], attachment_url)\n\n if err != nil {\n\tglog.Error(err)\n return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `011`, true)\n }\n\n bytes, err1 := json.Marshal(api_request{\"ok\", nil, nil})\n if err1 != nil {\n return []byte{}, xerrors.NewUIErr(err1.Error(), err1.Error(), `012`, true)\n }\n\n return bytes, nil\n}\n\n\nfunc addThread(res http.ResponseWriter, req *http.Request) ([]byte,error) {\n if req == nil || res == nil{\n return []byte{}, xerrors.NewSysErr()\n }\n values := req.URL.Query()\n\n thread_name, is_passed := values[`thread_name`]\n if !is_passed {\n return []byte{}, xerrors.NewUIErr(`Invalid params: No thread_name given!`, `Invalid params: No thread_name given!`, `013`, true)\n }\n\n board_id, is_passed := values[`board_id`]\n if !is_passed {\n return []byte{}, xerrors.NewUIErr(`Invalid params: No board_id given!`, `Invalid params: No board_id given!`, `014`, true)\n }\n\n\n var is_limit_reached bool\n err := dbh.QueryRow(\"select (select count(*) from threads where board_id = $1) > thread_setting_max_thread_count from boards where id = $1;\", board_id[0]).Scan(&is_limit_reached)\n if err != nil {\n\tglog.Error(\"COULD NOT SELECT thread_count\")\n\treturn []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `015`, true)\n }\n if is_limit_reached {\n\treturn []byte{}, xerrors.NewUIErr(`Thread limit reached!`, `Thread limit reached!`, `016`, true)\n }\n\n _, err = dbh.Query(\"INSERT INTO threads(name, board_id, limits_reached_action_id, max_posts_per_thread) VALUES($1, $2, 1, 10)\", thread_name[0], board_id[0])\n\n if err != nil {\n\tglog.Error(\"INSERT FAILED\")\n return []byte{}, xerrors.NewUIErr(err.Error(), err.Error(), `017`, true)\n }\n\n bytes, err1 := json.Marshal(api_request{\"ok\", nil, nil})\n if err1 != nil {\n return []byte{}, xerrors.NewUIErr(err1.Error(), err1.Error(), `018`, true)\n }\n\n return bytes, nil\n}\n\nvar dbConnString = ``\nvar dbh *sql.DB\n\n\/\/ sample usage\nfunc main() {\n flag.Parse()\n\n var err error\n dbConnString = os.Getenv(\"ABC_DB_CONN_STRING\") \/\/ DB will return error if empty string\n dbh, err = sql.Open(\"postgres\", dbConnString)\n if err != nil {\n\tglog.Fatal(err)\n }\n\n commands := map[string]func(http.ResponseWriter, *http.Request) ([]byte, error){\n\t\t\t\t\"getBoards\": getBoards,\n\t\t\t\t\"getActiveThreadsForBoard\": getActiveThreadsForBoard,\n\t\t\t\t\"getPostsForThread\": getPostsForThread,\n\t\t\t\t\"addPostToThread\": addPostToThread,\n\t\t\t\t\"addThread\": addThread,\n\t\t\t }\n\n http.HandleFunc(\"\/api\", func(res http.ResponseWriter, req *http.Request) {\n\t\t\t\t\tvalues := req.URL.Query()\n\t\t\t\t\tcommand, is_passed := values[`command`]\n\t\t\t\t\tif !is_passed {\n\t\t\t\t\t res.Write([]byte(`{\"Status\":\"error\",\"Msg\":\"Paremeter 'command' is undefined.\",\"Payload\":null}`))\n\t\t\t\t\t return\n\t\t\t\t\t}\n\n\n\t\t\t\t\t_, is_passed = values[`api_key`]\n\t\t\t\t\tif !is_passed {\n\t\t\t\t\t res.Write([]byte(`{\"Status\":\"error\",\"Msg\":\"Paremeter 'api_key' is undefined.\",\"Payload\":null}`))\n\t\t\t\t\t return\n\t\t\t\t\t}\n\n\t\t\t\t\t_, is_passed = commands[command[0]]\n\t\t\t\t\tif !is_passed{\n\t\t\t\t\t res.Write([]byte(`{\"Status\":\"error\",\"Msg\":\"No such command exists.\",\"Payload\":null}`))\n\t\t\t\t\t glog.Error(\"command: \", command[0])\n\t\t\t\t\t return\n\t\t\t\t\t}\n\n\t\t\t\t\tres.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\t\t\t\t\tbytes, err := commands[command[0]](res, req)\n\n\n\t\t\t\t\tif err != nil{\n\t\t\t\t\t if string(reflect.TypeOf(err).Name()) == `SysErr` {\n\t\t\t\t\t\tres.Write([]byte(`{\"Status\":\"error\",\"Msg\":\"` + err.Error() +`\",\"Payload\":null}`))\n\t\t\t\t\t } else if string(reflect.TypeOf(err).Name()) == `UIErr` {\n\t\t\t\t\t\tres.Write([]byte(`{\"Status\":\"error\",\"Msg\":\"`+ err.Error() +`\",\"Payload\":null}`))\n\t\t\t\t\t } else {\n\t\t\t\t\t\tres.Write([]byte(`{\"Status\":\"error\",\"Msg\":\"Application Error!\",\"Payload\":null}`))\n\t\t\t\t\t }\n\t\t\t\t\t glog.Error(err)\n\t\t\t\t\t return\n\t\t\t\t\t}\n\n\t\t\t\t\tglog.Info(string(bytes))\n\t\t\t\t\tres.Write(bytes)\n })\n\n http.ListenAndServe(`:`+ os.Getenv(\"ABC_SERVER_ENDPOINT_URL\"), nil)\n}\n\n\n<|endoftext|>"} {"text":"<commit_before><commit_msg>fix import<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Package server implements the Tracer server.\npackage server\n\nimport (\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/tracer\/tracer\"\n)\n\n\/\/ A StorageTransportEngine returns an instance of a storage transport.\ntype StorageTransportEngine func(srv *Server, conf map[string]interface{}) (StorageTransport, error)\n\n\/\/ A QueryTransportEngine returns an instance of a query transport.\ntype QueryTransportEngine func(srv *Server, conf map[string]interface{}) (QueryTransport, error)\n\n\/\/ A StorageEngine returns an instance of a storage.\ntype StorageEngine func(conf map[string]interface{}) (Storage, error)\n\nvar storageTransportEngines = map[string]StorageTransportEngine{}\nvar queryTransportEngines = map[string]QueryTransportEngine{}\nvar storageEngines = map[string]StorageEngine{}\n\n\/\/ RegisterStorageTransport registers a storage transport.\nfunc RegisterStorageTransport(name string, engine StorageTransportEngine) {\n\tstorageTransportEngines[name] = engine\n}\n\n\/\/ GetStorageTransport returns a storage transport by name.\nfunc GetStorageTransport(name string) (StorageTransportEngine, bool) {\n\ttransport, ok := storageTransportEngines[name]\n\treturn transport, ok\n}\n\n\/\/ RegisterQueryTransport registers a query transport.\nfunc RegisterQueryTransport(name string, engine QueryTransportEngine) {\n\tqueryTransportEngines[name] = engine\n}\n\n\/\/ GetQueryTransport returns a query transport by name.\nfunc GetQueryTransport(name string) (QueryTransportEngine, bool) {\n\ttransport, ok := queryTransportEngines[name]\n\treturn transport, ok\n}\n\n\/\/ RegisterStorage registers a storage engine.\nfunc RegisterStorage(name string, engine StorageEngine) {\n\tstorageEngines[name] = engine\n}\n\n\/\/ GetStorage returns a storage engine by name.\nfunc GetStorage(name string) (StorageEngine, bool) {\n\tstorer, ok := storageEngines[name]\n\treturn storer, ok\n}\n\n\/\/ A StorageTransport accepts spans via some protocol and sends them\n\/\/ to a Storer.\ntype StorageTransport interface {\n\t\/\/ Start starts the transport.\n\tStart() error\n}\n\n\/\/ QueryTransport accepts requests via some protocol and answers them.\ntype QueryTransport interface {\n\t\/\/ Start starts the transport.\n\tStart() error\n}\n\n\/\/ Storage allows storing and querying spans.\ntype Storage interface {\n\ttracer.Storer\n\tQueryer\n}\n\n\/\/ A Purger can delete all traces starting before a certain date.\ntype Purger interface {\n\tPurge(before time.Time) error\n}\n\n\/\/ A Queryer is a backend that allows fetching traces and spans by ID\n\/\/ or via a more advanced query.\ntype Queryer interface {\n\t\/\/ TraceByID returns a trace with a specific ID.\n\tTraceByID(id uint64) (tracer.RawTrace, error)\n\t\/\/ SpanByID returns a span with a specific ID.\n\tSpanByID(id uint64) (tracer.RawSpan, error)\n\t\/\/ QueryTraces returns all traces that match a query.\n\tQueryTraces(q Query) ([]tracer.RawTrace, error)\n\n\tServices() ([]string, error)\n\t\/\/ TODO(dh): The current Spans function only really exists because\n\t\/\/ of the zipkin UI. We might want a QuerySpans instead.\n\tSpans(service string) ([]string, error)\n\tDependencies() ([]Dependency, error)\n}\n\n\/\/ Dependency describes the dependency of one service on another.\ntype Dependency struct {\n\tParent string\n\tChild string\n\tCount uint64\n}\n\n\/\/ QueryTag describes a single tag or log entry that should be queried\n\/\/ for.\ntype QueryTag struct {\n\t\/\/ The key of the tag.\n\tKey string\n\t\/\/ The value of the tag.\n\tValue string\n\t\/\/ Whether the value should be checked for.\n\tCheckValue bool\n}\n\n\/\/ A Query describes the various conditionals of a query for a trace.\n\/\/\n\/\/ All conditions are ANDed together. Zero values are understood as\n\/\/ the lack of a constraint.\ntype Query struct {\n\t\/\/ Only return traces that started at or after this time.\n\tStartTime time.Time\n\t\/\/ Only return traces that finished before or at this time.\n\tFinishTime time.Time\n\t\/\/ Only return traces where a span has this operation name.\n\tOperationName string\n\t\/\/ Only return traces that lasted at least this long.\n\tMinDuration time.Duration\n\t\/\/ Only return traces that lasted at most this long.\n\tMaxDuration time.Duration\n\t\/\/ Only return traces where all spans combined have all of these\n\t\/\/ tags.\n\tAndTags []QueryTag\n\t\/\/ Only return traces where all spans combined have at least one\n\t\/\/ of these tags.\n\tOrTags []QueryTag\n\t\/\/ How many traces to return. Zero means no limit.\n\tNum int\n}\n\n\/\/ Server is an instance of the Tracer application.\ntype Server struct {\n\tStorage Storage\n\tStorageTransport StorageTransport\n\tQueryTransports []QueryTransport\n}\n\ntype errors struct {\n\terrs []error\n}\n\nfunc (errs errors) Error() string {\n\tvar s []string\n\tfor _, err := range errs.errs {\n\t\ts = append(s, err.Error())\n\t}\n\treturn strings.Join(s, \"\\n\")\n}\n\nfunc (srv *Server) Start() error {\n\terrs := make(chan error)\n\tgo func() {\n\t\terrs <- srv.StorageTransport.Start()\n\t}()\n\tfor _, t := range srv.QueryTransports {\n\t\tt := t\n\t\tgo func() {\n\t\t\terrs <- t.Start()\n\t\t}()\n\t}\n\tvar out errors\n\tfor i := 0; i < len(srv.QueryTransports)+1; i++ {\n\t\tif err := <-errs; err != nil {\n\t\t\tout.errs = append(out.errs, err)\n\t\t}\n\t}\n\tif len(out.errs) == 0 {\n\t\treturn nil\n\t}\n\treturn out\n}\n<commit_msg>Document Queryer.Services and Queryer.Dependencies<commit_after>\/\/ Package server implements the Tracer server.\npackage server\n\nimport (\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/tracer\/tracer\"\n)\n\n\/\/ A StorageTransportEngine returns an instance of a storage transport.\ntype StorageTransportEngine func(srv *Server, conf map[string]interface{}) (StorageTransport, error)\n\n\/\/ A QueryTransportEngine returns an instance of a query transport.\ntype QueryTransportEngine func(srv *Server, conf map[string]interface{}) (QueryTransport, error)\n\n\/\/ A StorageEngine returns an instance of a storage.\ntype StorageEngine func(conf map[string]interface{}) (Storage, error)\n\nvar storageTransportEngines = map[string]StorageTransportEngine{}\nvar queryTransportEngines = map[string]QueryTransportEngine{}\nvar storageEngines = map[string]StorageEngine{}\n\n\/\/ RegisterStorageTransport registers a storage transport.\nfunc RegisterStorageTransport(name string, engine StorageTransportEngine) {\n\tstorageTransportEngines[name] = engine\n}\n\n\/\/ GetStorageTransport returns a storage transport by name.\nfunc GetStorageTransport(name string) (StorageTransportEngine, bool) {\n\ttransport, ok := storageTransportEngines[name]\n\treturn transport, ok\n}\n\n\/\/ RegisterQueryTransport registers a query transport.\nfunc RegisterQueryTransport(name string, engine QueryTransportEngine) {\n\tqueryTransportEngines[name] = engine\n}\n\n\/\/ GetQueryTransport returns a query transport by name.\nfunc GetQueryTransport(name string) (QueryTransportEngine, bool) {\n\ttransport, ok := queryTransportEngines[name]\n\treturn transport, ok\n}\n\n\/\/ RegisterStorage registers a storage engine.\nfunc RegisterStorage(name string, engine StorageEngine) {\n\tstorageEngines[name] = engine\n}\n\n\/\/ GetStorage returns a storage engine by name.\nfunc GetStorage(name string) (StorageEngine, bool) {\n\tstorer, ok := storageEngines[name]\n\treturn storer, ok\n}\n\n\/\/ A StorageTransport accepts spans via some protocol and sends them\n\/\/ to a Storer.\ntype StorageTransport interface {\n\t\/\/ Start starts the transport.\n\tStart() error\n}\n\n\/\/ QueryTransport accepts requests via some protocol and answers them.\ntype QueryTransport interface {\n\t\/\/ Start starts the transport.\n\tStart() error\n}\n\n\/\/ Storage allows storing and querying spans.\ntype Storage interface {\n\ttracer.Storer\n\tQueryer\n}\n\n\/\/ A Purger can delete all traces starting before a certain date.\ntype Purger interface {\n\tPurge(before time.Time) error\n}\n\n\/\/ A Queryer is a backend that allows fetching traces and spans by ID\n\/\/ or via a more advanced query.\ntype Queryer interface {\n\t\/\/ TraceByID returns a trace with a specific ID.\n\tTraceByID(id uint64) (tracer.RawTrace, error)\n\t\/\/ SpanByID returns a span with a specific ID.\n\tSpanByID(id uint64) (tracer.RawSpan, error)\n\t\/\/ QueryTraces returns all traces that match a query.\n\tQueryTraces(q Query) ([]tracer.RawTrace, error)\n\n\t\/\/ Services returns a list of all services.\n\tServices() ([]string, error)\n\t\/\/ TODO(dh): The current Spans function only really exists because\n\t\/\/ of the zipkin UI. We might want a QuerySpans instead.\n\tSpans(service string) ([]string, error)\n\t\/\/ Dependencies returns the dependencies between services.\n\tDependencies() ([]Dependency, error)\n}\n\n\/\/ Dependency describes the dependency of one service on another.\ntype Dependency struct {\n\tParent string\n\tChild string\n\tCount uint64\n}\n\n\/\/ QueryTag describes a single tag or log entry that should be queried\n\/\/ for.\ntype QueryTag struct {\n\t\/\/ The key of the tag.\n\tKey string\n\t\/\/ The value of the tag.\n\tValue string\n\t\/\/ Whether the value should be checked for.\n\tCheckValue bool\n}\n\n\/\/ A Query describes the various conditionals of a query for a trace.\n\/\/\n\/\/ All conditions are ANDed together. Zero values are understood as\n\/\/ the lack of a constraint.\ntype Query struct {\n\t\/\/ Only return traces that started at or after this time.\n\tStartTime time.Time\n\t\/\/ Only return traces that finished before or at this time.\n\tFinishTime time.Time\n\t\/\/ Only return traces where a span has this operation name.\n\tOperationName string\n\t\/\/ Only return traces that lasted at least this long.\n\tMinDuration time.Duration\n\t\/\/ Only return traces that lasted at most this long.\n\tMaxDuration time.Duration\n\t\/\/ Only return traces where all spans combined have all of these\n\t\/\/ tags.\n\tAndTags []QueryTag\n\t\/\/ Only return traces where all spans combined have at least one\n\t\/\/ of these tags.\n\tOrTags []QueryTag\n\t\/\/ How many traces to return. Zero means no limit.\n\tNum int\n}\n\n\/\/ Server is an instance of the Tracer application.\ntype Server struct {\n\tStorage Storage\n\tStorageTransport StorageTransport\n\tQueryTransports []QueryTransport\n}\n\ntype errors struct {\n\terrs []error\n}\n\nfunc (errs errors) Error() string {\n\tvar s []string\n\tfor _, err := range errs.errs {\n\t\ts = append(s, err.Error())\n\t}\n\treturn strings.Join(s, \"\\n\")\n}\n\nfunc (srv *Server) Start() error {\n\terrs := make(chan error)\n\tgo func() {\n\t\terrs <- srv.StorageTransport.Start()\n\t}()\n\tfor _, t := range srv.QueryTransports {\n\t\tt := t\n\t\tgo func() {\n\t\t\terrs <- t.Start()\n\t\t}()\n\t}\n\tvar out errors\n\tfor i := 0; i < len(srv.QueryTransports)+1; i++ {\n\t\tif err := <-errs; err != nil {\n\t\t\tout.errs = append(out.errs, err)\n\t\t}\n\t}\n\tif len(out.errs) == 0 {\n\t\treturn nil\n\t}\n\treturn out\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Server is the base tcpez server. It sets up a tcp listener on an address,\n\/\/ and given a RequestHandler, it parses the tcpez protocol format and turns\n\/\/ it into individual request\/responses. Each connection is handled on a\n\/\/ seperate goroutine and pipelined requests are first parsed then farmed\n\/\/ to seperate goroutines. Pipelined requests from the client are handled\n\/\/ seamlessly this way, each seperate request is passed to its own RequestHandler\n\/\/ with its own Span.\npackage tcpez\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"github.com\/op\/go-logging\"\n\t\"io\"\n\t\"net\"\n\t\"sync\"\n)\n\nvar log = logging.MustGetLogger(\"tcpez\")\nvar LogFormat = logging.MustStringFormatter(\"%{time:2006-01-02T15:04:05.999999999Z07:00} %{level} [%{module}] %{message}\")\n\nfunc init() {\n\tlogging.SetLevel(logging.INFO, \"tcpez\")\n\tlogging.SetFormatter(LogFormat)\n}\n\n\/\/ Server is the base struct that wraps the tcp listener and allows\n\/\/ for setting the RequestHandler that takes each request and returns\n\/\/ an response\ntype Server struct {\n\t\/\/ Address is the string address that the tcp listener is\n\t\/\/ bound to\n\tAddress string\n\t\/\/ Handler is a value that responds to the RequestHandler\n\t\/\/ interface.\n\tHandler RequestHandler\n\t\/\/ StatsRecorder is the value that delivers stats to a collection\n\t\/\/ agent. This is the DebugStatsRecorder by default (does nothing)\n\t\/\/ but can be swapped to send to StatsD. This is passed to each\n\t\/\/ Span created and passed to the RequestHandler\n\tStats StatsRecorder\n\t\/\/ UUIDGenerator generates UUIDs for each Span, by default this uses\n\t\/\/ a simple hash function but can be swapped out for something more\n\t\/\/ complex (a vector-clock style UUID generator for example)\n\tUUIDGenerator UUIDGenerator\n\n\tconn *net.TCPListener\n\tisClosed bool\n}\n\n\/\/ RequestHandler is the basic interface for setting up the request handling\n\/\/ logic of a tcpez server. The server handles all the request parsing and setup\n\/\/ as well as the response encoding. All you have to do to create a working server\n\/\/ is create an object that has a Respond() method that takes a byte slice (which\n\/\/ is the request) and a Span (which allows you to track timings and meta data\n\/\/ through the request) and then it returns a byte slice which is the response.\n\/\/\n\/\/ type MyHandler struct\n\/\/\n\/\/ func (h *MyHandler) Respond(req []byte, span *tcpez.Span) (res []byte, err error) {\n\/\/ if string(req) == \"PING\" {\n\/\/ span.Attr(\"command\", \"PING\")\n\/\/ return []byte{\"PONG\"}, nil\n\/\/ }\n\/\/ }\n\/\/\ntype RequestHandler interface {\n\tRespond([]byte, *Span) ([]byte, error)\n}\n\n\/\/ NewServer is the tcpez server intializer. It only requires two parameters,\n\/\/ an address to bind to (same format as net.ListenTCP) and a RequestHandler\n\/\/ which serves the requests.\nfunc NewServer(address string, handler RequestHandler) (s *Server, err error) {\n\ttcpAddr, err := net.ResolveTCPAddr(\"tcp\", address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tl, err := net.ListenTCP(\"tcp\", tcpAddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Server{Address: address, conn: l, Handler: handler, Stats: new(DebugStatsRecorder), UUIDGenerator: DefaultUUIDGenerator}, nil\n}\n\n\/\/ Start starts the connection handling and request processing loop.\n\/\/ This is a blocking operation and can be started in a goroutine.\nfunc (s *Server) Start() {\n\tlog.Debug(\"Listening on %s\", s.conn.Addr().String())\n\tfor {\n\t\tif s.isClosed == true {\n\t\t\tbreak\n\t\t}\n\t\tclientConn, err := s.conn.Accept()\n\t\tif err != nil {\n\t\t\tlog.Warning(err.Error())\n\t\t\tbreak\n\t\t}\n\t\tgo s.handle(clientConn)\n\t}\n\tlog.Debug(\"Closing %s\", s.conn.Addr().String())\n}\n\n\/\/ Close closes the server listener to any more connections\nfunc (s *Server) Close() (err error) {\n\tif s.isClosed == false {\n\t\terr = s.conn.Close()\n\t\ts.isClosed = true\n\t\treturn\n\t}\n\treturn errors.New(\"Closing already closed connection\")\n}\n\nfunc (s *Server) handle(clientConn net.Conn) {\n\tlog.Debug(\"[tcpez] New client(%s)\", clientConn.RemoteAddr())\n\tfor {\n\t\theader, response, err := s.readHeaderAndHandleRequest(clientConn)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\t\/\/ EOF the client has disconnected\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlog.Error(err.Error())\n\t\t\ts.Stats.Increment(\"operation.failure\")\n\t\t\treturn\n\t\t}\n\t\terr = s.sendResponse(clientConn, header, response)\n\t\tif err != nil {\n\t\t\tlog.Error(err.Error())\n\t\t\ts.Stats.Increment(\"operation.failure\")\n\t\t\treturn\n\t\t}\n\t\ts.Stats.Increment(\"operation.success\")\n\t}\n}\n\nfunc (s *Server) readHeaderAndHandleRequest(buf io.Reader) (header int32, response []byte, err error) {\n\tvar size int32\n\terr = binary.Read(buf, binary.BigEndian, &size)\n\tif err != nil {\n\t\treturn 0, nil, err\n\t}\n\tif size < 0 {\n\t\t\/\/ this is a pipelined request\n\t\tvar wg sync.WaitGroup\n\t\tcount := -size\n\t\trequests := make([][]byte, count)\n\t\tresponses := make([][]byte, count)\n\t\tfor r := 0; int32(r) < count; r++ {\n\t\t\trequest, err := s.parseRequest(buf, 0)\n\t\t\tif err == nil {\n\t\t\t\trequests[r] = request\n\t\t\t\twg.Add(1)\n\t\t\t\tgo func(index int) {\n\t\t\t\t\tres, err := s.handleRequest(requests[index], true)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tresponses[index] = res\n\t\t\t\t\t}\n\t\t\t\t\twg.Done()\n\t\t\t\t}(r)\n\t\t\t}\n\t\t}\n\t\twg.Wait()\n\t\toutput := bytes.NewBuffer(nil)\n\t\tfor j := 0; int32(j) < count; j++ {\n\t\t\tlength := int32(len(responses[j]))\n\t\t\terr = binary.Write(output, binary.BigEndian, length)\n\t\t\tif err == nil {\n\t\t\t\toutput.Write(responses[j])\n\t\t\t}\n\t\t}\n\t\treturn int32(-count), output.Bytes(), err\n\t} else {\n\t\trequest, err := s.parseRequest(buf, size)\n\t\tif err != nil {\n\t\t\treturn 0, nil, err\n\t\t}\n\t\tresponse, err := s.handleRequest(request, false)\n\t\tif err != nil {\n\t\t\treturn 0, nil, err\n\t\t}\n\t\treturn int32(len(response)), response, nil\n\t}\n\treturn\n}\n\nfunc (s *Server) sendResponse(w io.Writer, header int32, data []byte) (err error) {\n\terr = binary.Write(w, binary.BigEndian, header)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(data)\n\treturn\n}\n\nfunc (s *Server) parseRequest(buf io.Reader, size int32) (request []byte, err error) {\n\tif size == int32(0) {\n\t\terr = binary.Read(buf, binary.BigEndian, &size)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\trequest = make([]byte, size)\n\t_, err = io.ReadFull(buf, request)\n\tlog.Debug(\"Server Request: %s\", request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn\n}\n\nfunc (s *Server) handleRequest(request []byte, multi bool) (response []byte, err error) {\n\tspan := NewSpan(s.UUIDGenerator())\n\tif multi == true {\n\t\tspan.Attr(\"multi\", \"true\")\n\t}\n\tspan.Stats = s.Stats\n\tspan.Start(\"duration\")\n\tresponse, err = s.Handler.Respond(request, span)\n\tspan.Finish(\"duration\")\n\tlog.Info(\"%s\", span.JSON())\n\tspan.Record()\n\treturn\n}\n<commit_msg>Implement read timeouts on client connections (at 5 mins)<commit_after>\/\/ Server is the base tcpez server. It sets up a tcp listener on an address,\n\/\/ and given a RequestHandler, it parses the tcpez protocol format and turns\n\/\/ it into individual request\/responses. Each Connection is handled on a\n\/\/ seperate goroutine and pipelined requests are first parsed then farmed\n\/\/ to seperate goroutines. Pipelined requests from the client are handled\n\/\/ seamlessly this way, each seperate request is passed to its own RequestHandler\n\/\/ with its own Span.\npackage tcpez\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"github.com\/op\/go-logging\"\n\t\"io\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar log = logging.MustGetLogger(\"tcpez\")\nvar LogFormat = logging.MustStringFormatter(\"%{time:2006-01-02T15:04:05.999999999Z07:00} %{level} [%{module}] %{message}\")\n\nfunc init() {\n\tlogging.SetLevel(logging.INFO, \"tcpez\")\n\tlogging.SetFormatter(LogFormat)\n}\n\n\/\/ Server is the base struct that wraps the tcp listener and allows\n\/\/ for setting the RequestHandler that takes each request and returns\n\/\/ an response\ntype Server struct {\n\t\/\/ Address is the string address that the tcp listener is\n\t\/\/ bound to\n\tAddress string\n\t\/\/ Handler is a value that responds to the RequestHandler\n\t\/\/ interface.\n\tHandler RequestHandler\n\t\/\/ StatsRecorder is the value that delivers stats to a collection\n\t\/\/ agent. This is the DebugStatsRecorder by default (does nothing)\n\t\/\/ but can be swapped to send to StatsD. This is passed to each\n\t\/\/ Span created and passed to the RequestHandler\n\tStats StatsRecorder\n\t\/\/ UUIDGenerator generates UUIDs for each Span, by default this uses\n\t\/\/ a simple hash function but can be swapped out for something more\n\t\/\/ complex (a vector-clock style UUID generator for example)\n\tUUIDGenerator UUIDGenerator\n\n\t\/\/ The underlying TCP listener, access if you need to set timeouts, etc\n\tConn *net.TCPListener\n\tisClosed bool\n}\n\n\/\/ RequestHandler is the basic interface for setting up the request handling\n\/\/ logic of a tcpez server. The server handles all the request parsing and setup\n\/\/ as well as the response encoding. All you have to do to create a working server\n\/\/ is create an object that has a Respond() method that takes a byte slice (which\n\/\/ is the request) and a Span (which allows you to track timings and meta data\n\/\/ through the request) and then it returns a byte slice which is the response.\n\/\/\n\/\/ type MyHandler struct\n\/\/\n\/\/ func (h *MyHandler) Respond(req []byte, span *tcpez.Span) (res []byte, err error) {\n\/\/ if string(req) == \"PING\" {\n\/\/ span.Attr(\"command\", \"PING\")\n\/\/ return []byte{\"PONG\"}, nil\n\/\/ }\n\/\/ }\n\/\/\ntype RequestHandler interface {\n\tRespond([]byte, *Span) ([]byte, error)\n}\n\n\/\/ NewServer is the tcpez server intializer. It only requires two parameters,\n\/\/ an address to bind to (same format as net.ListenTCP) and a RequestHandler\n\/\/ which serves the requests.\nfunc NewServer(address string, handler RequestHandler) (s *Server, err error) {\n\ttcpAddr, err := net.ResolveTCPAddr(\"tcp\", address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tl, err := net.ListenTCP(\"tcp\", tcpAddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Server{Address: address, Conn: l, Handler: handler, Stats: new(DebugStatsRecorder), UUIDGenerator: DefaultUUIDGenerator}, nil\n}\n\n\/\/ Start starts the Connection handling and request processing loop.\n\/\/ This is a blocking operation and can be started in a goroutine.\nfunc (s *Server) Start() {\n\tlog.Debug(\"Listening on %s\", s.Conn.Addr().String())\n\tfor {\n\t\tif s.isClosed == true {\n\t\t\tbreak\n\t\t}\n\t\tclientConn, err := s.Conn.Accept()\n\t\tif err != nil {\n\t\t\tlog.Warning(err.Error())\n\t\t\tbreak\n\t\t}\n\t\tgo s.handle(clientConn)\n\t}\n\tlog.Debug(\"Closing %s\", s.Conn.Addr().String())\n}\n\n\/\/ Close closes the server listener to any more Connections\nfunc (s *Server) Close() (err error) {\n\tif s.isClosed == false {\n\t\terr = s.Conn.Close()\n\t\ts.isClosed = true\n\t\treturn\n\t}\n\treturn errors.New(\"Closing already closed Connection\")\n}\n\nfunc (s *Server) handle(clientConn net.Conn) {\n\tlog.Debug(\"[tcpez] New client(%s)\", clientConn.RemoteAddr())\n\tfor {\n\t\t\/\/ Timeout the connection after 5 mins\n\t\tclientConn.SetReadDeadline(time.Now().Add(5 * time.Minute))\n\t\theader, response, err := s.readHeaderAndHandleRequest(clientConn)\n\t\tif err != nil {\n\t\t\tif err == io.EOF || err.(net.Error).Timeout() == true {\n\t\t\t\t\/\/ EOF the client has disConnected\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlog.Error(err.Error())\n\t\t\ts.Stats.Increment(\"operation.failure\")\n\t\t\treturn\n\t\t}\n\t\terr = s.sendResponse(clientConn, header, response)\n\t\tif err != nil {\n\t\t\tlog.Error(err.Error())\n\t\t\ts.Stats.Increment(\"operation.failure\")\n\t\t\treturn\n\t\t}\n\t\ts.Stats.Increment(\"operation.success\")\n\t}\n}\n\nfunc (s *Server) readHeaderAndHandleRequest(buf io.Reader) (header int32, response []byte, err error) {\n\tvar size int32\n\terr = binary.Read(buf, binary.BigEndian, &size)\n\tif err != nil {\n\t\treturn 0, nil, err\n\t}\n\tif size < 0 {\n\t\t\/\/ this is a pipelined request\n\t\tvar wg sync.WaitGroup\n\t\tcount := -size\n\t\trequests := make([][]byte, count)\n\t\tresponses := make([][]byte, count)\n\t\tfor r := 0; int32(r) < count; r++ {\n\t\t\trequest, err := s.parseRequest(buf, 0)\n\t\t\tif err == nil {\n\t\t\t\trequests[r] = request\n\t\t\t\twg.Add(1)\n\t\t\t\tgo func(index int) {\n\t\t\t\t\tres, err := s.handleRequest(requests[index], true)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\tresponses[index] = res\n\t\t\t\t\t}\n\t\t\t\t\twg.Done()\n\t\t\t\t}(r)\n\t\t\t}\n\t\t}\n\t\twg.Wait()\n\t\toutput := bytes.NewBuffer(nil)\n\t\tfor j := 0; int32(j) < count; j++ {\n\t\t\tlength := int32(len(responses[j]))\n\t\t\terr = binary.Write(output, binary.BigEndian, length)\n\t\t\tif err == nil {\n\t\t\t\toutput.Write(responses[j])\n\t\t\t}\n\t\t}\n\t\treturn int32(-count), output.Bytes(), err\n\t} else {\n\t\trequest, err := s.parseRequest(buf, size)\n\t\tif err != nil {\n\t\t\treturn 0, nil, err\n\t\t}\n\t\tresponse, err := s.handleRequest(request, false)\n\t\tif err != nil {\n\t\t\treturn 0, nil, err\n\t\t}\n\t\treturn int32(len(response)), response, nil\n\t}\n\treturn\n}\n\nfunc (s *Server) sendResponse(w io.Writer, header int32, data []byte) (err error) {\n\terr = binary.Write(w, binary.BigEndian, header)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(data)\n\treturn\n}\n\nfunc (s *Server) parseRequest(buf io.Reader, size int32) (request []byte, err error) {\n\tif size == int32(0) {\n\t\terr = binary.Read(buf, binary.BigEndian, &size)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\trequest = make([]byte, size)\n\t_, err = io.ReadFull(buf, request)\n\tlog.Debug(\"Server Request: %s\", request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn\n}\n\nfunc (s *Server) handleRequest(request []byte, multi bool) (response []byte, err error) {\n\tspan := NewSpan(s.UUIDGenerator())\n\tif multi == true {\n\t\tspan.Attr(\"multi\", \"true\")\n\t}\n\tspan.Stats = s.Stats\n\tspan.Start(\"duration\")\n\tresponse, err = s.Handler.Respond(request, span)\n\tspan.Finish(\"duration\")\n\tlog.Info(\"%s\", span.JSON())\n\tspan.Record()\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2009--2013 The Web.go Authors\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ this file is about the actual handling of a request: it comes in, what\n\/\/ happens? routing determines which handler is responsible and that is then\n\/\/ wrapped appropriately and invoked.\n\npackage web\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"code.google.com\/p\/go.net\/websocket\"\n)\n\ntype route struct {\n\trex *regexp.Regexp\n\tmethod string\n\thandler parametrizedHandler\n}\n\ntype ServerConfig struct {\n\tStaticDirs []string\n\tAddr string\n\tPort int\n\tCookieSecret string\n\tRecoverPanic bool\n\tCert string\n\tKey string\n\tColorOutput bool\n}\n\ntype Server struct {\n\tConfig ServerConfig\n\troutes []*route\n\tLogger *log.Logger\n\tEnv map[string]interface{}\n\t\/\/ Save the listener so it can be closed\n\tl net.Listener\n\t\/\/ Passed verbatim to every handler on every request\n\tUser interface{}\n\t\/\/ All requests are passed through this wrapper if defined\n\tWrappers []Wrapper\n}\n\nvar mainServer = NewServer()\n\n\/\/ Configuration of the shared server\nvar Config = &mainServer.Config\nvar exeFile string\n\n\/\/Stops the web server\nfunc (s *Server) Close() error {\n\tif s.l != nil {\n\t\treturn s.l.Close()\n\t}\n\treturn errors.New(\"closing non-listening web.go server\")\n}\n\n\/\/ Queue response wrapper that is called after all other wrappers\nfunc (s *Server) AddWrapper(wrap Wrapper) {\n\ts.Wrappers = append(s.Wrappers, wrap)\n}\n\nfunc (s *Server) SetLogger(logger *log.Logger) {\n\ts.Logger = logger\n}\n\nfunc (s *Server) addRoute(rawrex string, method string, handler interface{}) {\n\trex, err := regexp.Compile(rawrex)\n\tif err != nil {\n\t\ts.Logger.Printf(\"Error in route regex %q: %v\", rawrex, err)\n\t\treturn\n\t}\n\ts.routes = append(s.routes, &route{\n\t\trex: rex,\n\t\tmethod: method,\n\t\thandler: fixHandlerSignature(handler),\n\t})\n}\n\n\/\/ Calls function with recover block. The first return value is whatever the\n\/\/ function returns if it didnt panic. The second is what was passed to panic()\n\/\/ if it did.\nfunc (s *Server) safelyCall(f func() error) (softerr error, harderr interface{}) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tif !s.Config.RecoverPanic {\n\t\t\t\t\/\/ go back to panic\n\t\t\t\ts.Logger.Printf(\"Panic: %v\", err)\n\t\t\t\tpanic(err)\n\t\t\t} else {\n\t\t\t\tharderr = err\n\t\t\t\ts.Logger.Println(\"Handler crashed with error: \", err)\n\t\t\t\tfor i := 1; ; i += 1 {\n\t\t\t\t\t_, file, line, ok := runtime.Caller(i)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\ts.Logger.Println(file, line)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\treturn f(), nil\n}\n\n\/\/ Determine if this route matches this request purely on the basis of the method\nfunc matchRouteMethods(req *http.Request, route *route) bool {\n\tif req.Method == route.method {\n\t\treturn true\n\t}\n\tif req.Method == \"HEAD\" && route.method == \"GET\" {\n\t\treturn true\n\t}\n\tif req.Header.Get(\"Upgrade\") == \"websocket\" && route.method == \"WEBSOCKET\" {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ If this request matches this route return the group matches from the regular\n\/\/ expression otherwise return an empty slice. note on success the return value\n\/\/ includes the entire match as the first element.\nfunc matchRoute(req *http.Request, route *route) []string {\n\tif !matchRouteMethods(req, route) {\n\t\treturn nil\n\t}\n\tmatch := route.rex.FindStringSubmatch(req.URL.Path)\n\tif match == nil || len(match[0]) != len(req.URL.Path) {\n\t\treturn nil\n\t}\n\treturn match\n}\n\nfunc findMatchingRoute(req *http.Request, routes []*route) (*route, []string) {\n\tfor _, route := range routes {\n\t\tif match := matchRoute(req, route); match != nil {\n\t\t\treturn route, match\n\t\t}\n\t}\n\treturn nil, nil\n}\n\n\/\/ Apply the handler to this context and try to handle errors where possible\nfunc (s *Server) applyHandler(f SimpleHandler, ctx *Context) (err error) {\n\tsofterr, harderr := s.safelyCall(func() error {\n\t\treturn f(ctx)\n\t})\n\tif harderr != nil {\n\t\t\/\/there was an error or panic while calling the handler\n\t\tctx.Abort(500, \"Server Error\")\n\t\treturn fmt.Errorf(\"%v\", harderr)\n\t}\n\tif softerr != nil {\n\t\ts.Logger.Printf(\"Handler returned error: %v\", softerr)\n\t\tif werr, ok := softerr.(WebError); ok {\n\t\t\tctx.Abort(werr.Code, werr.Error())\n\t\t} else {\n\t\t\t\/\/ Non-web errors are not leaked to the outside\n\t\t\tctx.Abort(500, \"Server Error\")\n\t\t\terr = softerr\n\t\t}\n\t} else {\n\t\t\/\/ flush the writer by ensuring at least one Write call takes place\n\t\t_, err = ctx.Write([]byte{})\n\t}\n\tif err == nil {\n\t\terr = ctx.Response.Close()\n\t}\n\treturn\n}\n\nfunc dirExists(dir string) bool {\n\td, e := os.Stat(dir)\n\tswitch {\n\tcase e != nil:\n\t\treturn false\n\tcase !d.IsDir():\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc fileExists(dir string) bool {\n\tinfo, err := os.Stat(dir)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn !info.IsDir()\n}\n\n\/\/ Default\nfunc defaultStaticDir() string {\n\troot, _ := path.Split(exeFile)\n\treturn path.Join(root, \"static\")\n}\n\n\/\/ If this request corresponds to a static file return its path\nfunc (s *Server) findFile(req *http.Request) string {\n\t\/\/try to serve static files\n\tstaticDirs := s.Config.StaticDirs\n\tif len(staticDirs) == 0 {\n\t\tstaticDirs = []string{defaultStaticDir()}\n\t}\n\tfor _, staticDir := range staticDirs {\n\t\tstaticFile := path.Join(staticDir, req.URL.Path)\n\t\tif fileExists(staticFile) && (req.Method == \"GET\" || req.Method == \"HEAD\") {\n\t\t\treturn staticFile\n\t\t}\n\t}\n\n\t\/\/ Try to serve index.html || index.htm\n\tindexFilenames := []string{\"index.html\", \"index.htm\"}\n\tfor _, staticDir := range staticDirs {\n\t\tfor _, indexFilename := range indexFilenames {\n\t\t\tif indexPath := path.Join(path.Join(staticDir, req.URL.Path), indexFilename); fileExists(indexPath) {\n\t\t\t\treturn indexPath\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ Fully clothed request handler\nfunc (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tctx := &Context{\n\t\tRequest: req,\n\t\tRawBody: nil,\n\t\tParams: map[string]string{},\n\t\tServer: s,\n\t\tResponse: &ResponseWriter{ResponseWriter: w, BodyWriter: w},\n\t\tUser: s.User,\n\t}\n\n\t\/\/log the request\n\tif s.Config.ColorOutput {\n\t\ts.Logger.Printf(\"\\033[32;1m%s %s\\033[0m\", req.Method, req.URL.Path)\n\t} else {\n\t\ts.Logger.Printf(\"%s %s\", req.Method, req.URL.Path)\n\t}\n\n\t\/\/ignore errors from ParseForm because it's usually harmless.\n\treq.ParseForm()\n\tif len(req.Form) > 0 {\n\t\tfor k, v := range req.Form {\n\t\t\tctx.Params[k] = v[0]\n\t\t}\n\t\tif s.Config.ColorOutput {\n\t\t\ts.Logger.Printf(\"\\033[37;1mParams: %v\\033[0m\\n\", ctx.Params)\n\t\t} else {\n\t\t\ts.Logger.Printf(\"Params: %v\\n\", ctx.Params)\n\t\t}\n\n\t}\n\n\tvar simpleh SimpleHandler\n\troute, match := findMatchingRoute(req, s.routes)\n\tif route != nil {\n\t\tif route.method == \"WEBSOCKET\" {\n\t\t\t\/\/ Wrap websocket handler\n\t\t\topenh := func(ctx *Context, args ...string) (err error) {\n\t\t\t\t\/\/ yo dawg we heard you like wrapped functions\n\t\t\t\twebsocket.Handler(func(ws *websocket.Conn) {\n\t\t\t\t\tctx.WebsockConn = ws\n\t\t\t\t\terr = route.handler(ctx, args...)\n\t\t\t\t}).ServeHTTP(ctx.Response, req)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tsimpleh = closeHandler(openh, match[1:]...)\n\t\t} else {\n\t\t\tsimpleh = closeHandler(route.handler, match[1:]...)\n\t\t}\n\t} else if path := s.findFile(req); path != \"\" {\n\t\t\/\/ no custom handler found but there is a file with this name\n\t\tsimpleh = func(ctx *Context) error {\n\t\t\thttp.ServeFile(ctx.Response, ctx.Request, path)\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\t\/\/ hopeless, 404\n\t\tsimpleh = func(ctx *Context) error {\n\t\t\treturn WebError{404, \"Page not found\"}\n\t\t}\n\t}\n\tfor _, wrap := range s.Wrappers {\n\t\tsimpleh = wrapHandler(wrap, simpleh)\n\t}\n\ts.applyHandler(simpleh, ctx)\n\treturn\n}\n\nfunc webTime(t time.Time) string {\n\tftime := t.Format(time.RFC1123)\n\tif strings.HasSuffix(ftime, \"UTC\") {\n\t\tftime = ftime[0:len(ftime)-3] + \"GMT\"\n\t}\n\treturn ftime\n}\n\nfunc NewServer() *Server {\n\tconf := ServerConfig{\n\t\tRecoverPanic: true,\n\t\tCert: \"\",\n\t\tKey: \"\",\n\t\tColorOutput: true,\n\t}\n\ts := &Server{\n\t\tConfig: conf,\n\t\tLogger: log.New(os.Stdout, \"\", log.Ldate|log.Ltime),\n\t\tEnv: map[string]interface{}{},\n\t}\n\t\/\/ Set some default headers\n\ts.AddWrapper(func(h SimpleHandler, ctx *Context) error {\n\t\tctx.Header().Set(\"Server\", \"web.go\")\n\t\ttm := time.Now().UTC()\n\t\tctx.Header().Set(\"Date\", webTime(tm))\n\t\treturn h(ctx)\n\t})\n\treturn s\n}\n\n\/\/ Package wide proxy functions for global web server object\n\n\/\/ Stop the global web server\nfunc Close() error {\n\treturn mainServer.Close()\n}\n\n\/\/ Set a logger to be used by the global web server\nfunc SetLogger(logger *log.Logger) {\n\tmainServer.SetLogger(logger)\n}\n\nfunc AddWrapper(wrap Wrapper) {\n\tmainServer.AddWrapper(wrap)\n}\n\n\/\/ The global web server as an object implementing the http.Handler interface\nfunc GetHTTPHandler() http.Handler {\n\treturn mainServer\n}\n\nfunc init() {\n\t\/\/ find the location of the executable\n\targ0 := path.Clean(os.Args[0])\n\twd, _ := os.Getwd()\n\tif strings.HasPrefix(arg0, \"\/\") {\n\t\texeFile = arg0\n\t} else {\n\t\t\/\/ TODO For robustness, search each directory in $PATH\n\t\texeFile = path.Join(wd, arg0)\n\t}\n\tregisterDefaultMimetypes()\n}\n<commit_msg>Revert \"Do not set content-type HTML by default\"<commit_after>\/\/ Copyright © 2009--2013 The Web.go Authors\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ this file is about the actual handling of a request: it comes in, what\n\/\/ happens? routing determines which handler is responsible and that is then\n\/\/ wrapped appropriately and invoked.\n\npackage web\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"code.google.com\/p\/go.net\/websocket\"\n)\n\ntype route struct {\n\trex *regexp.Regexp\n\tmethod string\n\thandler parametrizedHandler\n}\n\ntype ServerConfig struct {\n\tStaticDirs []string\n\tAddr string\n\tPort int\n\tCookieSecret string\n\tRecoverPanic bool\n\tCert string\n\tKey string\n\tColorOutput bool\n}\n\ntype Server struct {\n\tConfig ServerConfig\n\troutes []*route\n\tLogger *log.Logger\n\tEnv map[string]interface{}\n\t\/\/ Save the listener so it can be closed\n\tl net.Listener\n\t\/\/ Passed verbatim to every handler on every request\n\tUser interface{}\n\t\/\/ All requests are passed through this wrapper if defined\n\tWrappers []Wrapper\n}\n\nvar mainServer = NewServer()\n\n\/\/ Configuration of the shared server\nvar Config = &mainServer.Config\nvar exeFile string\n\n\/\/Stops the web server\nfunc (s *Server) Close() error {\n\tif s.l != nil {\n\t\treturn s.l.Close()\n\t}\n\treturn errors.New(\"closing non-listening web.go server\")\n}\n\n\/\/ Queue response wrapper that is called after all other wrappers\nfunc (s *Server) AddWrapper(wrap Wrapper) {\n\ts.Wrappers = append(s.Wrappers, wrap)\n}\n\nfunc (s *Server) SetLogger(logger *log.Logger) {\n\ts.Logger = logger\n}\n\nfunc (s *Server) addRoute(rawrex string, method string, handler interface{}) {\n\trex, err := regexp.Compile(rawrex)\n\tif err != nil {\n\t\ts.Logger.Printf(\"Error in route regex %q: %v\", rawrex, err)\n\t\treturn\n\t}\n\ts.routes = append(s.routes, &route{\n\t\trex: rex,\n\t\tmethod: method,\n\t\thandler: fixHandlerSignature(handler),\n\t})\n}\n\n\/\/ Calls function with recover block. The first return value is whatever the\n\/\/ function returns if it didnt panic. The second is what was passed to panic()\n\/\/ if it did.\nfunc (s *Server) safelyCall(f func() error) (softerr error, harderr interface{}) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tif !s.Config.RecoverPanic {\n\t\t\t\t\/\/ go back to panic\n\t\t\t\ts.Logger.Printf(\"Panic: %v\", err)\n\t\t\t\tpanic(err)\n\t\t\t} else {\n\t\t\t\tharderr = err\n\t\t\t\ts.Logger.Println(\"Handler crashed with error: \", err)\n\t\t\t\tfor i := 1; ; i += 1 {\n\t\t\t\t\t_, file, line, ok := runtime.Caller(i)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\ts.Logger.Println(file, line)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\treturn f(), nil\n}\n\n\/\/ Determine if this route matches this request purely on the basis of the method\nfunc matchRouteMethods(req *http.Request, route *route) bool {\n\tif req.Method == route.method {\n\t\treturn true\n\t}\n\tif req.Method == \"HEAD\" && route.method == \"GET\" {\n\t\treturn true\n\t}\n\tif req.Header.Get(\"Upgrade\") == \"websocket\" && route.method == \"WEBSOCKET\" {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ If this request matches this route return the group matches from the regular\n\/\/ expression otherwise return an empty slice. note on success the return value\n\/\/ includes the entire match as the first element.\nfunc matchRoute(req *http.Request, route *route) []string {\n\tif !matchRouteMethods(req, route) {\n\t\treturn nil\n\t}\n\tmatch := route.rex.FindStringSubmatch(req.URL.Path)\n\tif match == nil || len(match[0]) != len(req.URL.Path) {\n\t\treturn nil\n\t}\n\treturn match\n}\n\nfunc findMatchingRoute(req *http.Request, routes []*route) (*route, []string) {\n\tfor _, route := range routes {\n\t\tif match := matchRoute(req, route); match != nil {\n\t\t\treturn route, match\n\t\t}\n\t}\n\treturn nil, nil\n}\n\n\/\/ Apply the handler to this context and try to handle errors where possible\nfunc (s *Server) applyHandler(f SimpleHandler, ctx *Context) (err error) {\n\tsofterr, harderr := s.safelyCall(func() error {\n\t\treturn f(ctx)\n\t})\n\tif harderr != nil {\n\t\t\/\/there was an error or panic while calling the handler\n\t\tctx.Abort(500, \"Server Error\")\n\t\treturn fmt.Errorf(\"%v\", harderr)\n\t}\n\tif softerr != nil {\n\t\ts.Logger.Printf(\"Handler returned error: %v\", softerr)\n\t\tif werr, ok := softerr.(WebError); ok {\n\t\t\tctx.Abort(werr.Code, werr.Error())\n\t\t} else {\n\t\t\t\/\/ Non-web errors are not leaked to the outside\n\t\t\tctx.Abort(500, \"Server Error\")\n\t\t\terr = softerr\n\t\t}\n\t} else {\n\t\t\/\/ flush the writer by ensuring at least one Write call takes place\n\t\t_, err = ctx.Write([]byte{})\n\t}\n\tif err == nil {\n\t\terr = ctx.Response.Close()\n\t}\n\treturn\n}\n\nfunc dirExists(dir string) bool {\n\td, e := os.Stat(dir)\n\tswitch {\n\tcase e != nil:\n\t\treturn false\n\tcase !d.IsDir():\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc fileExists(dir string) bool {\n\tinfo, err := os.Stat(dir)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn !info.IsDir()\n}\n\n\/\/ Default\nfunc defaultStaticDir() string {\n\troot, _ := path.Split(exeFile)\n\treturn path.Join(root, \"static\")\n}\n\n\/\/ If this request corresponds to a static file return its path\nfunc (s *Server) findFile(req *http.Request) string {\n\t\/\/try to serve static files\n\tstaticDirs := s.Config.StaticDirs\n\tif len(staticDirs) == 0 {\n\t\tstaticDirs = []string{defaultStaticDir()}\n\t}\n\tfor _, staticDir := range staticDirs {\n\t\tstaticFile := path.Join(staticDir, req.URL.Path)\n\t\tif fileExists(staticFile) && (req.Method == \"GET\" || req.Method == \"HEAD\") {\n\t\t\treturn staticFile\n\t\t}\n\t}\n\n\t\/\/ Try to serve index.html || index.htm\n\tindexFilenames := []string{\"index.html\", \"index.htm\"}\n\tfor _, staticDir := range staticDirs {\n\t\tfor _, indexFilename := range indexFilenames {\n\t\t\tif indexPath := path.Join(path.Join(staticDir, req.URL.Path), indexFilename); fileExists(indexPath) {\n\t\t\t\treturn indexPath\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ Fully clothed request handler\nfunc (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tctx := &Context{\n\t\tRequest: req,\n\t\tRawBody: nil,\n\t\tParams: map[string]string{},\n\t\tServer: s,\n\t\tResponse: &ResponseWriter{ResponseWriter: w, BodyWriter: w},\n\t\tUser: s.User,\n\t}\n\n\t\/\/log the request\n\tif s.Config.ColorOutput {\n\t\ts.Logger.Printf(\"\\033[32;1m%s %s\\033[0m\", req.Method, req.URL.Path)\n\t} else {\n\t\ts.Logger.Printf(\"%s %s\", req.Method, req.URL.Path)\n\t}\n\n\t\/\/ignore errors from ParseForm because it's usually harmless.\n\treq.ParseForm()\n\tif len(req.Form) > 0 {\n\t\tfor k, v := range req.Form {\n\t\t\tctx.Params[k] = v[0]\n\t\t}\n\t\tif s.Config.ColorOutput {\n\t\t\ts.Logger.Printf(\"\\033[37;1mParams: %v\\033[0m\\n\", ctx.Params)\n\t\t} else {\n\t\t\ts.Logger.Printf(\"Params: %v\\n\", ctx.Params)\n\t\t}\n\n\t}\n\n\tvar simpleh SimpleHandler\n\troute, match := findMatchingRoute(req, s.routes)\n\tif route != nil {\n\t\tif route.method == \"WEBSOCKET\" {\n\t\t\t\/\/ Wrap websocket handler\n\t\t\topenh := func(ctx *Context, args ...string) (err error) {\n\t\t\t\t\/\/ yo dawg we heard you like wrapped functions\n\t\t\t\twebsocket.Handler(func(ws *websocket.Conn) {\n\t\t\t\t\tctx.WebsockConn = ws\n\t\t\t\t\terr = route.handler(ctx, args...)\n\t\t\t\t}).ServeHTTP(ctx.Response, req)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tsimpleh = closeHandler(openh, match[1:]...)\n\t\t} else {\n\t\t\t\/\/ Set the default content-type\n\t\t\tctx.ContentType(\"text\/html; charset=utf-8\")\n\t\t\tsimpleh = closeHandler(route.handler, match[1:]...)\n\t\t}\n\t} else if path := s.findFile(req); path != \"\" {\n\t\t\/\/ no custom handler found but there is a file with this name\n\t\tsimpleh = func(ctx *Context) error {\n\t\t\thttp.ServeFile(ctx.Response, ctx.Request, path)\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\t\/\/ hopeless, 404\n\t\tsimpleh = func(ctx *Context) error {\n\t\t\treturn WebError{404, \"Page not found\"}\n\t\t}\n\t}\n\tfor _, wrap := range s.Wrappers {\n\t\tsimpleh = wrapHandler(wrap, simpleh)\n\t}\n\ts.applyHandler(simpleh, ctx)\n\treturn\n}\n\nfunc webTime(t time.Time) string {\n\tftime := t.Format(time.RFC1123)\n\tif strings.HasSuffix(ftime, \"UTC\") {\n\t\tftime = ftime[0:len(ftime)-3] + \"GMT\"\n\t}\n\treturn ftime\n}\n\nfunc NewServer() *Server {\n\tconf := ServerConfig{\n\t\tRecoverPanic: true,\n\t\tCert: \"\",\n\t\tKey: \"\",\n\t\tColorOutput: true,\n\t}\n\ts := &Server{\n\t\tConfig: conf,\n\t\tLogger: log.New(os.Stdout, \"\", log.Ldate|log.Ltime),\n\t\tEnv: map[string]interface{}{},\n\t}\n\t\/\/ Set some default headers\n\ts.AddWrapper(func(h SimpleHandler, ctx *Context) error {\n\t\tctx.Header().Set(\"Server\", \"web.go\")\n\t\ttm := time.Now().UTC()\n\t\tctx.Header().Set(\"Date\", webTime(tm))\n\t\treturn h(ctx)\n\t})\n\treturn s\n}\n\n\/\/ Package wide proxy functions for global web server object\n\n\/\/ Stop the global web server\nfunc Close() error {\n\treturn mainServer.Close()\n}\n\n\/\/ Set a logger to be used by the global web server\nfunc SetLogger(logger *log.Logger) {\n\tmainServer.SetLogger(logger)\n}\n\nfunc AddWrapper(wrap Wrapper) {\n\tmainServer.AddWrapper(wrap)\n}\n\n\/\/ The global web server as an object implementing the http.Handler interface\nfunc GetHTTPHandler() http.Handler {\n\treturn mainServer\n}\n\nfunc init() {\n\t\/\/ find the location of the executable\n\targ0 := path.Clean(os.Args[0])\n\twd, _ := os.Getwd()\n\tif strings.HasPrefix(arg0, \"\/\") {\n\t\texeFile = arg0\n\t} else {\n\t\t\/\/ TODO For robustness, search each directory in $PATH\n\t\texeFile = path.Join(wd, arg0)\n\t}\n\tregisterDefaultMimetypes()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Jacob Taylor jacob.taylor@gmail.com\n\/\/ License: Apache2\npackage main\n\nimport (\n \"fmt\"\n \"net\"\n \"..\/utils\"\n \"bufio\"\n \"encoding\/binary\"\n \"time\"\n \"os\"\n \"bytes\"\n \"io\"\n \"io\/ioutil\"\n \"log\"\n)\n\nconst nbd_folder = \"\/sample_disks\/\"\n\nvar characters_per_line = 100\nvar newline = 0\nvar line_number = 0\n\nfunc send_export_list_item(output *bufio.Writer, export_name string) {\n data := make([]byte, 1024)\n length := len(export_name)\n offset := 0\n\n \/\/ length of export name\n binary.BigEndian.PutUint32(data[offset:], uint32(length)) \/\/ length of string\n offset += 4\n\n \/\/ export name\n copy(data[offset:], export_name)\n offset += length\n\n reply_type := uint32(2) \/\/ reply_type: NBD_REP_SERVER\n send_message(output, reply_type, uint32(offset), data)\n}\n\nfunc send_ack(output *bufio.Writer) {\n send_message(output, utils.NBD_COMMAND_ACK, 0, nil)\n}\n\nfunc export_name(output *bufio.Writer, conn net.Conn, payload_size int, payload []byte, pad_with_zeros bool) {\n fmt.Printf(\"have request to bind to: %s\\n\", string(payload[:payload_size]))\n\n defer conn.Close()\n\n var filename bytes.Buffer\n current_directory, err := os.Getwd()\n utils.ErrorCheck(err)\n filename.WriteString(current_directory)\n filename.WriteString(nbd_folder)\n filename.Write(payload[:payload_size])\n\n fmt.Printf(\"Opening file: %s\\n\", filename.String())\n\n file, err := os.OpenFile(filename.String(), os.O_RDWR, 0644)\n\n utils.ErrorCheck(err)\n if err != nil {\n return\n }\n\n buffer := make([]byte, 256)\n offset := 0\n\n fs, err := file.Stat()\n file_size := uint64(fs.Size())\n\n binary.BigEndian.PutUint64(buffer[offset:], file_size) \/\/ size\n offset += 8\n\n binary.BigEndian.PutUint16(buffer[offset:], 1) \/\/ flags\n offset += 2\n\n if pad_with_zeros {\n offset += 124 \/\/ pad with 124 zeroes\n }\n\n _, err = output.Write(buffer[:offset])\n output.Flush()\n utils.ErrorCheck(err)\n\n buffer = make([]byte, 2048*1024) \/\/ set the buffer to 2mb\n conn_reader := bufio.NewReader(conn)\n for {\n waiting_for := 28 \/\/ wait for at least the minimum payload size\n\n _, err := io.ReadFull(conn_reader, buffer[:waiting_for])\n if err == io.EOF {\n fmt.Printf(\"Abort detected, escaping processing loop\\n\")\n break\n }\n utils.ErrorCheck(err)\n\n \/\/magic := binary.BigEndian.Uint32(buffer)\n command := binary.BigEndian.Uint32(buffer[4:8])\n \/\/handle := binary.BigEndian.Uint64(buffer[8:16])\n from := binary.BigEndian.Uint64(buffer[16:24])\n length := binary.BigEndian.Uint32(buffer[24:28])\n\n newline += 1;\n if newline % characters_per_line == 0 {\n line_number++\n fmt.Printf(\"\\n%5d: \", line_number * 100)\n newline -= characters_per_line\n }\n\n switch command {\n case utils.NBD_COMMAND_READ:\n fmt.Printf(\".\")\n\n _, err = file.ReadAt(buffer[16:16+length], int64(from))\n utils.ErrorCheck(err)\n\n binary.BigEndian.PutUint32(buffer[:4], utils.NBD_REPLY_MAGIC)\n binary.BigEndian.PutUint32(buffer[4:8], 0) \/\/ error bits\n\n conn.Write(buffer[:16+length])\n\n continue\n case utils.NBD_COMMAND_WRITE:\n fmt.Printf(\"W\")\n\n \/\/waiting_for += int(length) \/\/ wait for the additional payload\n \/\/fmt.Printf(\"About to read the data that we should be writing out.\")\n _, err := io.ReadFull(conn_reader, buffer[28:28+length])\n if err == io.EOF {\n fmt.Printf(\"Abort detected, escaping processing loop\\n\")\n break\n }\n utils.ErrorCheck(err)\n \/\/fmt.Printf(\"Done reading the data that should be written\")\n\n _, err = file.WriteAt(buffer[28:28+length], int64(from))\n utils.ErrorCheck(err)\n\n file.Sync()\n\n \/\/ let them know we are done\n binary.BigEndian.PutUint32(buffer[:4], utils.NBD_REPLY_MAGIC)\n binary.BigEndian.PutUint32(buffer[4:8], 0) \/\/ error bits\n\n conn.Write(buffer[:16])\n\n continue\n\n case utils.NBD_COMMAND_DISCONNECT:\n fmt.Printf(\"D\")\n\n file.Sync()\n return\n }\n }\n}\n\nfunc send_export_list(output *bufio.Writer) {\n current_directory, err := os.Getwd()\n files, err := ioutil.ReadDir(current_directory + nbd_folder)\n if err != nil {\n log.Fatal(err)\n }\n for _, file := range files {\n send_export_list_item(output, file.Name())\n }\n\n send_ack(output)\n}\n\nfunc send_message(output *bufio.Writer, reply_type uint32, length uint32, data []byte ) {\n endian := binary.BigEndian\n buffer := make([]byte, 1024)\n offset := 0\n\n endian.PutUint64(buffer[offset:], utils.NBD_SERVER_SEND_REPLY_MAGIC)\n offset += 8\n\n endian.PutUint32(buffer[offset:], uint32(3)) \/\/ not sure what this is....\n offset += 4\n\n endian.PutUint32(buffer[offset:], reply_type) \/\/ reply_type: NBD_REP_SERVER\n offset += 4\n\n endian.PutUint32(buffer[offset:], length) \/\/ length of package\n offset += 4\n\n if data != nil {\n copy(buffer[offset:], data[0:length])\n offset += int(length)\n }\n\n data_to_send := buffer[:offset]\n output.Write(data_to_send)\n output.Flush()\n\n utils.LogData(\"Just sent:\", offset, data_to_send)\n}\n\nfunc main() {\n\n if len(os.Args) < 3 {\n panic(\"missing arguments: (ipaddress) (portnumber)\")\n return\n }\n\n listener, err := net.Listen(\"tcp\", os.Args[1] + \":\" + os.Args[2])\n utils.ErrorCheck(err)\n\n fmt.Printf(\"Hello World, we have %v\\n\", listener)\n reply_magic := make([]byte, 4)\n binary.BigEndian.PutUint32(reply_magic, utils.NBD_REPLY_MAGIC)\n\n for {\n conn, err := listener.Accept()\n utils.ErrorCheck(err)\n\n fmt.Printf(\"We have a new connection from: %s\\n\", conn.RemoteAddr())\n output := bufio.NewWriter(conn)\n\n output.WriteString(\"NBDMAGIC\") \/\/ init password\n output.WriteString(\"IHAVEOPT\") \/\/ Magic\n \/\/fmt.printf(\"arg \")\n \/\/output.Write([]byte{0, byte(os.Args[3][1])})\n \/\/output.Write([]byte{0, 3}) \/\/ Ubuntu\n output.Write([]byte{0, 0}) \/\/ Qemu\n\n output.Flush()\n\n \/\/ Fetch the data until we get the initial options\n data := make([]byte, 1024)\n offset := 0\n waiting_for := 16 \/\/ wait for at least the minimum payload size\n\n packet_count := 0\n for offset < waiting_for {\n length, err := conn.Read(data[offset:])\n if length > 0 {\n packet_count += 1\n }\n offset += length\n utils.ErrorCheck(err)\n \/\/utils.LogData(\"Reading instruction\", offset, data)\n if offset < waiting_for {\n time.Sleep(5 * time.Millisecond)\n }\n \/\/ If we are requesting an export, make sure we have the length of the data for the export name.\n if offset > 15 && binary.BigEndian.Uint32(data[12:]) == utils.NBD_COMMAND_EXPORT_NAME {\n waiting_for = 20\n }\n }\n\n fmt.Printf(\"%d packets processed to get %d bytes\\n\", packet_count, offset)\n utils.LogData(\"Received from client\", offset, data)\n options := binary.BigEndian.Uint32(data[:4])\n command := binary.BigEndian.Uint32(data[12:])\n payload_size := int(binary.BigEndian.Uint32(data[16:]))\n fmt.Printf(\"Options are: %v\\n\", options)\n if (options & utils.NBD_FLAG_FIXED_NEW_STYLE) == utils.NBD_FLAG_FIXED_NEW_STYLE {\n fmt.Printf(\"Fixed New Style option requested\\n\")\n }\n pad_with_zeros := true\n if (options & utils.NBD_FLAG_NO_ZEROES) == utils.NBD_FLAG_NO_ZEROES {\n pad_with_zeros = false\n fmt.Printf(\"No Zero Padding option requested\\n\")\n }\n\n fmt.Sprintf(\"command is: %d\\npayload_size is: %d\\n\", command, payload_size)\n waiting_for += int(payload_size)\n for offset < waiting_for {\n length, err := conn.Read(data[offset:])\n offset += length\n utils.ErrorCheck(err)\n utils.LogData(\"Reading instruction\", offset, data)\n if offset < waiting_for {\n time.Sleep(5 * time.Millisecond)\n }\n }\n payload := make([]byte, payload_size)\n\n if payload_size > 0{\n copy(payload, data[20:])\n }\n\n utils.LogData(\"Payload is:\", payload_size, payload)\n fmt.Printf(\"command is: %v\\n\", command)\n\n \/\/ At this point, we have the command, payload size, and payload.\n switch command {\n case utils.NBD_COMMAND_LIST:\n send_export_list(output)\n conn.Close()\n break\n case utils.NBD_COMMAND_EXPORT_NAME:\n go export_name(output, conn, payload_size, payload, pad_with_zeros)\n break\n }\n }\n\n}\n<commit_msg>fixed #13 nicer printing messages on startup and shutdown<commit_after>\/\/ Copyright 2016 Jacob Taylor jacob.taylor@gmail.com\n\/\/ License: Apache2\npackage main\n\nimport (\n \"fmt\"\n \"net\"\n \"..\/utils\"\n \"bufio\"\n \"encoding\/binary\"\n \"time\"\n \"os\"\n \"bytes\"\n \"io\"\n \"io\/ioutil\"\n \"log\"\n)\n\nconst nbd_folder = \"\/sample_disks\/\"\n\nvar characters_per_line = 100\nvar newline = 0\nvar line_number = 0\n\nfunc send_export_list_item(output *bufio.Writer, export_name string) {\n data := make([]byte, 1024)\n length := len(export_name)\n offset := 0\n\n \/\/ length of export name\n binary.BigEndian.PutUint32(data[offset:], uint32(length)) \/\/ length of string\n offset += 4\n\n \/\/ export name\n copy(data[offset:], export_name)\n offset += length\n\n reply_type := uint32(2) \/\/ reply_type: NBD_REP_SERVER\n send_message(output, reply_type, uint32(offset), data)\n}\n\nfunc send_ack(output *bufio.Writer) {\n send_message(output, utils.NBD_COMMAND_ACK, 0, nil)\n}\n\nfunc export_name(output *bufio.Writer, conn net.Conn, payload_size int, payload []byte, pad_with_zeros bool) {\n fmt.Printf(\"have request to bind to: %s\\n\", string(payload[:payload_size]))\n\n defer conn.Close()\n\n var filename bytes.Buffer\n current_directory, err := os.Getwd()\n utils.ErrorCheck(err)\n filename.WriteString(current_directory)\n filename.WriteString(nbd_folder)\n filename.Write(payload[:payload_size])\n\n fmt.Printf(\"Opening file: %s\\n\", filename.String())\n\n file, err := os.OpenFile(filename.String(), os.O_RDWR, 0644)\n\n utils.ErrorCheck(err)\n if err != nil {\n return\n }\n\n buffer := make([]byte, 256)\n offset := 0\n\n fs, err := file.Stat()\n file_size := uint64(fs.Size())\n\n binary.BigEndian.PutUint64(buffer[offset:], file_size) \/\/ size\n offset += 8\n\n binary.BigEndian.PutUint16(buffer[offset:], 1) \/\/ flags\n offset += 2\n\n if pad_with_zeros {\n offset += 124 \/\/ pad with 124 zeroes\n }\n\n _, err = output.Write(buffer[:offset])\n output.Flush()\n utils.ErrorCheck(err)\n\n buffer = make([]byte, 2048*1024) \/\/ set the buffer to 2mb\n conn_reader := bufio.NewReader(conn)\n for {\n waiting_for := 28 \/\/ wait for at least the minimum payload size\n\n _, err := io.ReadFull(conn_reader, buffer[:waiting_for])\n if err == io.EOF {\n fmt.Printf(\"Abort detected, escaping processing loop\\n\")\n break\n }\n utils.ErrorCheck(err)\n\n \/\/magic := binary.BigEndian.Uint32(buffer)\n command := binary.BigEndian.Uint32(buffer[4:8])\n \/\/handle := binary.BigEndian.Uint64(buffer[8:16])\n from := binary.BigEndian.Uint64(buffer[16:24])\n length := binary.BigEndian.Uint32(buffer[24:28])\n\n newline += 1;\n if newline % characters_per_line == 0 {\n line_number++\n fmt.Printf(\"\\n%5d: \", line_number * 100)\n newline -= characters_per_line\n }\n\n switch command {\n case utils.NBD_COMMAND_READ:\n fmt.Printf(\".\")\n\n _, err = file.ReadAt(buffer[16:16+length], int64(from))\n utils.ErrorCheck(err)\n\n binary.BigEndian.PutUint32(buffer[:4], utils.NBD_REPLY_MAGIC)\n binary.BigEndian.PutUint32(buffer[4:8], 0) \/\/ error bits\n\n conn.Write(buffer[:16+length])\n\n continue\n case utils.NBD_COMMAND_WRITE:\n fmt.Printf(\"W\")\n\n \/\/waiting_for += int(length) \/\/ wait for the additional payload\n \/\/fmt.Printf(\"About to read the data that we should be writing out.\")\n _, err := io.ReadFull(conn_reader, buffer[28:28+length])\n if err == io.EOF {\n fmt.Printf(\"Abort detected, escaping processing loop\\n\")\n break\n }\n utils.ErrorCheck(err)\n \/\/fmt.Printf(\"Done reading the data that should be written\")\n\n _, err = file.WriteAt(buffer[28:28+length], int64(from))\n utils.ErrorCheck(err)\n\n file.Sync()\n\n \/\/ let them know we are done\n binary.BigEndian.PutUint32(buffer[:4], utils.NBD_REPLY_MAGIC)\n binary.BigEndian.PutUint32(buffer[4:8], 0) \/\/ error bits\n\n conn.Write(buffer[:16])\n\n continue\n\n case utils.NBD_COMMAND_DISCONNECT:\n fmt.Printf(\"D\")\n\n file.Sync()\n return\n }\n }\n}\n\nfunc send_export_list(output *bufio.Writer) {\n current_directory, err := os.Getwd()\n files, err := ioutil.ReadDir(current_directory + nbd_folder)\n if err != nil {\n log.Fatal(err)\n }\n for _, file := range files {\n send_export_list_item(output, file.Name())\n }\n\n send_ack(output)\n}\n\nfunc send_message(output *bufio.Writer, reply_type uint32, length uint32, data []byte ) {\n endian := binary.BigEndian\n buffer := make([]byte, 1024)\n offset := 0\n\n endian.PutUint64(buffer[offset:], utils.NBD_SERVER_SEND_REPLY_MAGIC)\n offset += 8\n\n endian.PutUint32(buffer[offset:], uint32(3)) \/\/ not sure what this is....\n offset += 4\n\n endian.PutUint32(buffer[offset:], reply_type) \/\/ reply_type: NBD_REP_SERVER\n offset += 4\n\n endian.PutUint32(buffer[offset:], length) \/\/ length of package\n offset += 4\n\n if data != nil {\n copy(buffer[offset:], data[0:length])\n offset += int(length)\n }\n\n data_to_send := buffer[:offset]\n output.Write(data_to_send)\n output.Flush()\n\n utils.LogData(\"Just sent:\", offset, data_to_send)\n}\n\nfunc main() {\n\n if len(os.Args) < 3 {\n panic(\"missing arguments: (ipaddress) (portnumber)\")\n return\n }\n\n listener, err := net.Listen(\"tcp\", os.Args[1] + \":\" + os.Args[2])\n utils.ErrorCheck(err)\n\n fmt.Printf(\"ABlox server online\\n\")\n\n reply_magic := make([]byte, 4)\n binary.BigEndian.PutUint32(reply_magic, utils.NBD_REPLY_MAGIC)\n\n defer fmt.Printf(\"End of line\\n\")\n\n for {\n conn, err := listener.Accept()\n utils.ErrorCheck(err)\n\n fmt.Printf(\"We have a new connection from: %s\\n\", conn.RemoteAddr())\n output := bufio.NewWriter(conn)\n\n output.WriteString(\"NBDMAGIC\") \/\/ init password\n output.WriteString(\"IHAVEOPT\") \/\/ Magic\n \/\/fmt.printf(\"arg \")\n \/\/output.Write([]byte{0, byte(os.Args[3][1])})\n \/\/output.Write([]byte{0, 3}) \/\/ Ubuntu\n output.Write([]byte{0, 0}) \/\/ Qemu\n\n output.Flush()\n\n \/\/ Fetch the data until we get the initial options\n data := make([]byte, 1024)\n offset := 0\n waiting_for := 16 \/\/ wait for at least the minimum payload size\n\n packet_count := 0\n for offset < waiting_for {\n length, err := conn.Read(data[offset:])\n if length > 0 {\n packet_count += 1\n }\n offset += length\n utils.ErrorCheck(err)\n \/\/utils.LogData(\"Reading instruction\", offset, data)\n if offset < waiting_for {\n time.Sleep(5 * time.Millisecond)\n }\n \/\/ If we are requesting an export, make sure we have the length of the data for the export name.\n if offset > 15 && binary.BigEndian.Uint32(data[12:]) == utils.NBD_COMMAND_EXPORT_NAME {\n waiting_for = 20\n }\n }\n\n fmt.Printf(\"%d packets processed to get %d bytes\\n\", packet_count, offset)\n utils.LogData(\"Received from client\", offset, data)\n options := binary.BigEndian.Uint32(data[:4])\n command := binary.BigEndian.Uint32(data[12:])\n payload_size := int(binary.BigEndian.Uint32(data[16:]))\n fmt.Printf(\"Options are: %v\\n\", options)\n if (options & utils.NBD_FLAG_FIXED_NEW_STYLE) == utils.NBD_FLAG_FIXED_NEW_STYLE {\n fmt.Printf(\"Fixed New Style option requested\\n\")\n }\n pad_with_zeros := true\n if (options & utils.NBD_FLAG_NO_ZEROES) == utils.NBD_FLAG_NO_ZEROES {\n pad_with_zeros = false\n fmt.Printf(\"No Zero Padding option requested\\n\")\n }\n\n fmt.Sprintf(\"command is: %d\\npayload_size is: %d\\n\", command, payload_size)\n waiting_for += int(payload_size)\n for offset < waiting_for {\n length, err := conn.Read(data[offset:])\n offset += length\n utils.ErrorCheck(err)\n utils.LogData(\"Reading instruction\", offset, data)\n if offset < waiting_for {\n time.Sleep(5 * time.Millisecond)\n }\n }\n payload := make([]byte, payload_size)\n\n if payload_size > 0{\n copy(payload, data[20:])\n }\n\n utils.LogData(\"Payload is:\", payload_size, payload)\n fmt.Printf(\"command is: %v\\n\", command)\n\n \/\/ At this point, we have the command, payload size, and payload.\n switch command {\n case utils.NBD_COMMAND_LIST:\n send_export_list(output)\n conn.Close()\n break\n case utils.NBD_COMMAND_EXPORT_NAME:\n go export_name(output, conn, payload_size, payload, pad_with_zeros)\n break\n }\n }\n\n}\n<|endoftext|>"} {"text":"<commit_before>package ostent\nimport (\n\t\"share\/assets\"\n\n\t\"os\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"flag\"\n\t\"bytes\"\n\t\"strings\"\n\t\"net\/http\"\n\n\t\"github.com\/justinas\/alice\"\n)\n\ntype bindValue struct {\n\tstring\n\tdefport string \/\/ const\n\tHost, Port string \/\/ available after flag.Parse()\n}\n\nfunc newBind(defstring, defport string) bindValue {\n\tbv := bindValue{defport: defport}\n\tbv.Set(defstring)\n\treturn bv\n}\n\n\/\/ satisfying flag.Value interface\nfunc(bv bindValue) String() string { return string(bv.string); }\nfunc(bv *bindValue) Set(input string) error {\n\tif input == \"\" {\n\t\treturn nil\n\t}\n\tif !strings.Contains(input, \":\") {\n\t\tinput = \":\" + input\n\t}\n\tvar err error\n\tbv.Host, bv.Port, err = net.SplitHostPort(input)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif bv.Host == \"*\" {\n\t\tbv.Host = \"\"\n\t} else if bv.Port == \"127\" {\n\t\tbv.Host = \"127.0.0.1\"\n\t\tbv.Port = bv.defport\n\t}\n\tif _, err = net.LookupPort(\"tcp\", bv.Port); err != nil {\n\t\tif bv.Host != \"\" {\n\t\t\treturn err\n\t\t}\n\t\tbv.Host, bv.Port = bv.Port, bv.defport\n\t}\n\n\tbv.string = bv.Host + \":\" + bv.Port\n\treturn nil\n}\n\nvar OstentBindFlag = newBind(\":8050\", \"8050\")\n\/\/ var CollectdBindFlag = newBind(\"\", \"8051\") \/\/ \"\" by default meaning DO NOT BIND\nfunc init() {\n\tflag.Var(&OstentBindFlag, \"b\", \"Bind address\")\n\tflag.Var(&OstentBindFlag, \"bind\", \"Bind address\")\n\t\/\/ flag.Var(&CollectdBindFlag, \"collectdb\", \"Bind address for collectd receiving\")\n\t\/\/ flag.Var(&CollectdBindFlag, \"collectdbind\", \"Bind address for collectd receiving\")\n}\n\ntype Muxmap map[string]http.HandlerFunc\n\nvar stdaccess *logger \/\/ a global, available after Serve call\n\nfunc Serve(listen net.Listener, production bool, extramap Muxmap) error {\n\tlogger := log.New(os.Stderr, \"[ostent] \", 0)\n\taccess := log.New(os.Stdout, \"\", 0)\n\n\tstdaccess = NewLogged(production, access)\n\trecovery := Recovery(production)\n\n\tchain := alice.New(\n\t\tstdaccess.Constructor,\n\t\trecovery .Constructor,\n\t)\n\tmux := NewMux(chain.Then)\n\n\tfor _, path := range assets.AssetNames() {\n\t\thf := chain.Then(serveContentFunc(path))\n\t\tmux.Handle(\"GET\", \"\/\"+ path, hf)\n\t\tmux.Handle(\"HEAD\", \"\/\"+ path, hf)\n\t}\n\n\/\/\tmux.Handle(\"GET\", \"\/ws\", chain.ThenFunc(slashws)) \/\/ that would include stdlogger\n\tmux.Handle(\"GET\", \"\/ws\", recovery.ConstructorFunc(slashws)) \/\/ slashws uses stdlogger itself\n\n\tmux.Handle(\"GET\", \"\/\", chain.ThenFunc(index))\n\tmux.Handle(\"HEAD\", \"\/\", chain.ThenFunc(index))\n\n\t\/\/ mux.Handle(\"GET\", \"\/panic\", chain.ThenFunc(func(http.ResponseWriter, *http.Request) { panic(fmt.Errorf(\"I'm panicing\")) }))\n\n\tif extramap != nil {\n\t\tfor path, handler := range extramap {\n\t\t\tfor _, METH := range []string{\"HEAD\", \"GET\", \"POST\"} {\n\t\t\t\tmux.Handle(METH, path, chain.Then(handler))\n\t\t\t}\n\t\t}\n\t}\n\n\tbanner(listen, logger)\n\n\tserver := &http.Server{Addr: listen.Addr().String(), Handler: mux}\n\treturn server.Serve(listen)\n}\n\nfunc serveContentFunc(path string) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\ttext, err := assets.Uncompressedasset(path)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tmodtime, err := assets.ModTime(\"assets\", path)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\thttp.ServeContent(w, r, path, modtime, bytes.NewReader(text))\n\t}\n}\n\nfunc banner(listen net.Listener, logger *log.Logger) {\n\thostname, _ := getHostname()\n\tlogger.Printf(\" %s\\n\", strings.Repeat(\"-\", len(hostname) + 7))\n\tif len(hostname) > 19 {\n\t\thostname = hostname[:16] +\"...\"\n\t}\n\tlogger.Printf(\" \/ %s ostent \\\\ \\n\", hostname)\n\tlogger.Printf(\"+------------------------------+\")\n\n\taddr := listen.Addr()\n\tif h, port, err := net.SplitHostPort(addr.String()); err == nil && h == \"::\" {\n\t\t\/\/ wildcard bind\n\n\t\t\/* _, IP := NewInterfaces()\n\t\tlogger.Printf(\" http:\/\/%s\", IP) \/\/ *\/\n\t\taddrs, err := net.InterfaceAddrs()\n\t\tif err == nil {\n\t\t\tfst := true\n\t\t\tfor _, a := range addrs {\n\t\t\t\tipnet, ok := a.(*net.IPNet)\n\t\t\t\tif !ok || strings.Contains(ipnet.IP.String(), \":\") {\n\t\t\t\t\tcontinue \/\/ no IPv6 for now\n\t\t\t\t}\n\t\t\t\tf := fmt.Sprintf(\"http:\/\/%s:%s\", ipnet.IP.String(), port)\n\t\t\t\tif len(f) < 28 {\n\t\t\t\t\tf += strings.Repeat(\" \", 28 - len(f))\n\t\t\t\t}\n\t\t\t\tif !fst {\n\t\t\t\t\tlogger.Printf(\"|------------------------------|\")\n\t\t\t\t}\n\t\t\t\tfst = false\n\t\t\t\tlogger.Printf(\"| %s |\", f)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tf := fmt.Sprintf(\"http:\/\/%s\", addr.String())\n\t\tif len(f) < 28 {\n\t\t\tf += strings.Repeat(\" \", 28 - len(f))\n\t\t}\n\t\tlogger.Printf(\"| %s |\", f)\n\t}\n\tlogger.Printf(\"+------------------------------+\")\n}\n\nconst VERSION = \"0.1.8\"\n<commit_msg>godoc, gofmt<commit_after>package ostent\n\nimport (\n\t\"share\/assets\"\n\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/justinas\/alice\"\n)\n\ntype bindValue struct {\n\tstring\n\tdefport string \/\/ const\n\tHost string \/\/ available after flag.Parse()\n\tPort string \/\/ available after flag.Parse()\n}\n\nfunc newBind(defstring, defport string) bindValue {\n\tbv := bindValue{defport: defport}\n\tbv.Set(defstring)\n\treturn bv\n}\n\n\/\/ satisfying flag.Value interface\nfunc (bv bindValue) String() string { return string(bv.string) }\nfunc (bv *bindValue) Set(input string) error {\n\tif input == \"\" {\n\t\treturn nil\n\t}\n\tif !strings.Contains(input, \":\") {\n\t\tinput = \":\" + input\n\t}\n\tvar err error\n\tbv.Host, bv.Port, err = net.SplitHostPort(input)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif bv.Host == \"*\" {\n\t\tbv.Host = \"\"\n\t} else if bv.Port == \"127\" {\n\t\tbv.Host = \"127.0.0.1\"\n\t\tbv.Port = bv.defport\n\t}\n\tif _, err = net.LookupPort(\"tcp\", bv.Port); err != nil {\n\t\tif bv.Host != \"\" {\n\t\t\treturn err\n\t\t}\n\t\tbv.Host, bv.Port = bv.Port, bv.defport\n\t}\n\n\tbv.string = bv.Host + \":\" + bv.Port\n\treturn nil\n}\n\n\/\/ OstentBindFlag is a bindValue hoding the ostent bind address.\nvar OstentBindFlag = newBind(\":8050\", \"8050\")\n\n\/\/ CollectdBindFlag is a bindValue hoding the ostent collectd bind address.\n\/\/ var CollectdBindFlag = newBind(\"\", \"8051\") \/\/ \"\" by default meaning DO NOT BIND\nfunc init() {\n\tflag.Var(&OstentBindFlag, \"b\", \"short for bind\")\n\tflag.Var(&OstentBindFlag, \"bind\", \"Bind address\")\n\t\/\/ flag.Var(&CollectdBindFlag, \"collectdb\", \"short for collectdbind\")\n\t\/\/ flag.Var(&CollectdBindFlag, \"collectdbind\", \"Bind address for collectd receiving\")\n}\n\n\/\/ Muxmap is a type of a map of pattern to HandlerFunc.\ntype Muxmap map[string]http.HandlerFunc\n\nvar stdaccess *logger \/\/ a global, available after Serve call\n\n\/\/ Serve does http.Serve with the listener l and constructed *TrieServeMux.\n\/\/ production is passed to logging and recoverying middleware.\n\/\/ Non-nil extramap is passed to the mux.\n\/\/ Returns http.Serve result.\nfunc Serve(l net.Listener, production bool, extramap Muxmap) error {\n\tlogger := log.New(os.Stderr, \"[ostent] \", 0)\n\taccess := log.New(os.Stdout, \"\", 0)\n\n\tstdaccess = NewLogged(production, access)\n\trecovery := Recovery(production)\n\n\tchain := alice.New(\n\t\tstdaccess.Constructor,\n\t\t recovery.Constructor,\n\t)\n\tmux := NewMux(chain.Then)\n\n\tfor _, path := range assets.AssetNames() {\n\t\thf := chain.Then(serveContentFunc(path))\n\t\tmux.Handle(\"GET\", \"\/\"+path, hf)\n\t\tmux.Handle(\"HEAD\", \"\/\"+path, hf)\n\t}\n\n\t\/\/\tchain.ThenFunc(slashws)) handler would include stdlogger\n\t\/\/ slashws uses stdlogger itself\n\tmux.Handle(\"GET\", \"\/ws\", recovery.ConstructorFunc(slashws))\n\n\tmux.Handle(\"GET\", \"\/\", chain.ThenFunc(index))\n\tmux.Handle(\"HEAD\", \"\/\", chain.ThenFunc(index))\n\n\t\/* panics := func(http.ResponseWriter, *http.Request) {\n\t\tpanic(fmt.Errorf(\"I'm panicing\"))\n\t}\n\tmux.Handle(\"GET\", \"\/panic\", chain.ThenFunc(panics)) \/\/ *\/\n\n\tif extramap != nil {\n\t\tfor path, handler := range extramap {\n\t\t\tfor _, METH := range []string{\"HEAD\", \"GET\", \"POST\"} {\n\t\t\t\tmux.Handle(METH, path, chain.Then(handler))\n\t\t\t}\n\t\t}\n\t}\n\n\tbanner(l, logger)\n\n\tserver := &http.Server{Addr: l.Addr().String(), Handler: mux}\n\treturn server.Serve(l)\n}\n\nfunc serveContentFunc(path string) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\ttext, err := assets.Uncompressedasset(path)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tmodtime, err := assets.ModTime(\"assets\", path)\n\t\tif err != nil {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\thttp.ServeContent(w, r, path, modtime, bytes.NewReader(text))\n\t}\n}\n\nfunc banner(listen net.Listener, logger *log.Logger) {\n\thostname, _ := getHostname()\n\tlogger.Printf(\" %s\\n\", strings.Repeat(\"-\", len(hostname)+7))\n\tif len(hostname) > 19 {\n\t\thostname = hostname[:16] + \"...\"\n\t}\n\tlogger.Printf(\" \/ %s ostent \\\\ \\n\", hostname)\n\tlogger.Printf(\"+------------------------------+\")\n\n\taddr := listen.Addr()\n\tif h, port, err := net.SplitHostPort(addr.String()); err == nil && h == \"::\" {\n\t\t\/\/ wildcard bind\n\n\t\t\/* _, IP := NewInterfaces()\n\t\tlogger.Printf(\" http:\/\/%s\", IP) \/\/ *\/\n\t\taddrs, err := net.InterfaceAddrs()\n\t\tif err == nil {\n\t\t\tfst := true\n\t\t\tfor _, a := range addrs {\n\t\t\t\tipnet, ok := a.(*net.IPNet)\n\t\t\t\tif !ok || strings.Contains(ipnet.IP.String(), \":\") {\n\t\t\t\t\tcontinue \/\/ no IPv6 for now\n\t\t\t\t}\n\t\t\t\tf := fmt.Sprintf(\"http:\/\/%s:%s\", ipnet.IP.String(), port)\n\t\t\t\tif len(f) < 28 {\n\t\t\t\t\tf += strings.Repeat(\" \", 28-len(f))\n\t\t\t\t}\n\t\t\t\tif !fst {\n\t\t\t\t\tlogger.Printf(\"|------------------------------|\")\n\t\t\t\t}\n\t\t\t\tfst = false\n\t\t\t\tlogger.Printf(\"| %s |\", f)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tf := fmt.Sprintf(\"http:\/\/%s\", addr.String())\n\t\tif len(f) < 28 {\n\t\t\tf += strings.Repeat(\" \", 28-len(f))\n\t\t}\n\t\tlogger.Printf(\"| %s |\", f)\n\t}\n\tlogger.Printf(\"+------------------------------+\")\n}\n\n\/\/ VERSION of the latest known release.\n\/\/ Unused in non-production mode.\n\/\/ Compared with in main.production.go.\nconst VERSION = \"0.1.8\"\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Copyright 2017 XLAB d.o.o.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n *\/\n\npackage server\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"net\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\n\t\"github.com\/grpc-ecosystem\/go-grpc-prometheus\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/xlab-si\/emmy\/config\"\n\t\"github.com\/xlab-si\/emmy\/crypto\/groups\"\n\t\"github.com\/xlab-si\/emmy\/log\"\n\tpb \"github.com\/xlab-si\/emmy\/protobuf\"\n\t\"github.com\/xlab-si\/emmy\/types\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/credentials\"\n)\n\nvar _ pb.ProtocolServer = (*Server)(nil)\n\ntype Server struct {\n\tgrpcServer *grpc.Server\n\tlogger log.Logger\n\t*sessionManager\n\t*registrationManager\n}\n\n\/\/ NewProtocolServer initializes an instance of the Server struct and returns a pointer.\n\/\/ It performs some default configuration (tracing of gRPC communication and interceptors)\n\/\/ and registers RPC protocol server with gRPC server. It requires TLS cert and keyfile\n\/\/ in order to establish a secure channel with clients.\nfunc NewProtocolServer(certFile, keyFile string, logger log.Logger) (*Server, error) {\n\tlogger.Info(\"Instantiating new protocol server\")\n\n\t\/\/ Register our generic service\n\tlogger.Info(\"Registering services\")\n\n\t\/\/ Obtain TLS credentials\n\tcreds, err := credentials.NewServerTLSFromFile(certFile, keyFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogger.Infof(\"Successfully read certificate [%s] and key [%s]\", certFile, keyFile)\n\n\tsessionManager, err := newSessionManager(config.LoadSessionKeyMinByteLen())\n\tif err != nil {\n\t\tlogger.Warning(err)\n\t}\n\n\tregistrationManager, err := NewRegistrationManager(config.LoadRegistrationDBAddress())\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn nil, err\n\t}\n\n\t\/\/ Allow as much concurrent streams as possible and register a gRPC stream interceptor\n\t\/\/ for logging and monitoring purposes.\n\tserver := &Server{\n\t\tgrpcServer: grpc.NewServer(\n\t\t\tgrpc.Creds(creds),\n\t\t\tgrpc.MaxConcurrentStreams(math.MaxUint32),\n\t\t\tgrpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),\n\t\t),\n\t\tlogger: logger,\n\t\tsessionManager: sessionManager,\n\t\tregistrationManager: registrationManager,\n\t}\n\n\t\/\/ Disable tracing by default, as is used for debugging purposes.\n\t\/\/ The user will be able to turn it on via Server's EnableTracing function.\n\tgrpc.EnableTracing = false\n\n\t\/\/ Register our services with the supporting gRPC server\n\tpb.RegisterProtocolServer(server.grpcServer, server)\n\tpb.RegisterInfoServer(server.grpcServer, server)\n\n\t\/\/ Initialize gRPC metrics offered by Prometheus package\n\tgrpc_prometheus.Register(server.grpcServer)\n\n\tlogger.Notice(\"gRPC Services registered\")\n\treturn server, nil\n}\n\n\/\/ Start configures and starts the protocol server at the requested port.\nfunc (s *Server) Start(port int) error {\n\tconnStr := fmt.Sprintf(\":%d\", port)\n\tlistener, err := net.Listen(\"tcp\", connStr)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not connect: %v\", err)\n\t}\n\n\t\/\/ Register Prometheus metrics handler and serve metrics page on the desired endpoint.\n\t\/\/ Metrics are handled via HTTP in a separate goroutine as gRPC requests,\n\t\/\/ as grpc server's performance over HTTP (grpcServer.ServeHTTP) is much worse.\n\thttp.Handle(\"\/metrics\", prometheus.Handler())\n\n\t\/\/ After this, \/metrics will be available, along with \/debug\/requests, \/debug\/events in\n\t\/\/ case server's EnableTracing function is called.\n\tgo http.ListenAndServe(\":8881\", nil)\n\n\t\/\/ From here on, gRPC server will accept connections\n\ts.logger.Noticef(\"Emmy server listening for connections on port %d\", port)\n\ts.grpcServer.Serve(listener)\n\treturn nil\n}\n\n\/\/ Teardown stops the protocol server by gracefully stopping enclosed gRPC server.\nfunc (s *Server) Teardown() {\n\ts.logger.Notice(\"Tearing down gRPC server\")\n\ts.grpcServer.GracefulStop()\n}\n\n\/\/ EnableTracing instructs the gRPC framework to enable its tracing capability, which\n\/\/ is mainly used for debugging purposes.\n\/\/ Although this function does not explicitly affect the Server struct, it is wired to Server\n\/\/ in order to provide a nicer API when setting up the server.\nfunc (s *Server) EnableTracing() {\n\tgrpc.EnableTracing = true\n\ts.logger.Notice(\"Enabled gRPC tracing\")\n}\n\nfunc (s *Server) send(msg *pb.Message, stream pb.Protocol_RunServer) error {\n\tif err := stream.Send(msg); err != nil {\n\t\treturn fmt.Errorf(\"Error sending message:\", err)\n\t}\n\ts.logger.Infof(\"Successfully sent response of type %T\", msg.Content)\n\ts.logger.Debugf(\"%+v\", msg)\n\n\treturn nil\n}\n\nfunc (s *Server) receive(stream pb.Protocol_RunServer) (*pb.Message, error) {\n\tresp, err := stream.Recv()\n\tif err == io.EOF {\n\t\treturn nil, err\n\t} else if err != nil {\n\t\treturn nil, fmt.Errorf(\"An error ocurred: %v\", err)\n\t}\n\ts.logger.Infof(\"Received request of type %T from the stream\", resp.Content)\n\ts.logger.Debugf(\"%+v\", resp)\n\n\treturn resp, nil\n}\n\nfunc (s *Server) Run(stream pb.Protocol_RunServer) error {\n\ts.logger.Info(\"Starting new RPC\")\n\n\treq, err := s.receive(stream)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treqClientId := req.ClientId\n\treqSchemaType := req.Schema\n\treqSchemaVariant := req.SchemaVariant\n\n\t\/\/ Check whether the client requested a valid schema\n\treqSchemaTypeStr, schemaValid := pb.SchemaType_name[int32(reqSchemaType)]\n\tif !schemaValid {\n\t\treturn fmt.Errorf(\"Client [\", reqClientId, \"] requested invalid schema: %v\", reqSchemaType)\n\t}\n\n\t\/\/ Check whether the client requested a valid schema variant\n\treqSchemaVariantStr, variantValid := pb.SchemaVariant_name[int32(reqSchemaVariant)]\n\tif !variantValid {\n\t\treturn fmt.Errorf(\"Client [ %v ] requested invalid schema variant: %v\", reqClientId, reqSchemaVariant)\n\t}\n\n\ts.logger.Noticef(\"Client [ %v ] requested schema %v, variant %v\", reqClientId, reqSchemaTypeStr, reqSchemaVariantStr)\n\n\t\/\/ Convert Sigma, ZKP or ZKPOK protocol type to a types type\n\tprotocolType := types.ToProtocolType(reqSchemaVariant)\n\t\/\/ This curve will be used for all schemes\n\tcurve := groups.P256\n\n\tswitch reqSchemaType {\n\tcase pb.SchemaType_PEDERSEN_EC:\n\t\terr = s.PedersenEC(curve, stream)\n\tcase pb.SchemaType_PEDERSEN:\n\t\tgroup := config.LoadGroup(\"pedersen\")\n\t\terr = s.Pedersen(group, stream)\n\tcase pb.SchemaType_SCHNORR:\n\t\tgroup := config.LoadGroup(\"schnorr\")\n\t\terr = s.Schnorr(req, group, protocolType, stream)\n\tcase pb.SchemaType_SCHNORR_EC:\n\t\terr = s.SchnorrEC(req, protocolType, stream, curve)\n\tcase pb.SchemaType_CSPAILLIER:\n\t\tkeyDir := config.LoadKeyDirFromConfig()\n\t\tsecKeyPath := filepath.Join(keyDir, \"cspaillierseckey.txt\")\n\t\terr = s.CSPaillier(req, secKeyPath, stream)\n\tcase pb.SchemaType_PSEUDONYMSYS_CA:\n\t\terr = s.PseudonymsysCA(req, stream)\n\tcase pb.SchemaType_PSEUDONYMSYS_NYM_GEN:\n\t\terr = s.PseudonymsysGenerateNym(req, stream)\n\tcase pb.SchemaType_PSEUDONYMSYS_ISSUE_CREDENTIAL:\n\t\terr = s.PseudonymsysIssueCredential(req, stream)\n\tcase pb.SchemaType_PSEUDONYMSYS_TRANSFER_CREDENTIAL:\n\t\terr = s.PseudonymsysTransferCredential(req, stream)\n\tcase pb.SchemaType_PSEUDONYMSYS_CA_EC:\n\t\terr = s.PseudonymsysCAEC(curve, req, stream)\n\tcase pb.SchemaType_PSEUDONYMSYS_NYM_GEN_EC:\n\t\terr = s.PseudonymsysGenerateNymEC(curve, req, stream)\n\tcase pb.SchemaType_PSEUDONYMSYS_ISSUE_CREDENTIAL_EC:\n\t\terr = s.PseudonymsysIssueCredentialEC(curve, req, stream)\n\tcase pb.SchemaType_PSEUDONYMSYS_TRANSFER_CREDENTIAL_EC:\n\t\terr = s.PseudonymsysTransferCredentialEC(curve, req, stream)\n\tcase pb.SchemaType_QR:\n\t\tgroup := config.LoadGroup(\"pseudonymsys\")\n\t\terr = s.QR(req, group, stream)\n\tcase pb.SchemaType_QNR:\n\t\tqr := config.LoadQRRSA(\"qrsmall\") \/\/ only for testing\n\t\terr = s.QNR(req, qr, stream)\n\t}\n\n\tif err != nil {\n\t\ts.logger.Error(\"Closing RPC due to previous errors\")\n\t\treturn fmt.Errorf(\"FAIL: %v\", err)\n\t}\n\n\ts.logger.Notice(\"RPC finished successfully\")\n\treturn nil\n}\n<commit_msg>Update calls to fmt.Errorf in server.go<commit_after>\/*\n * Copyright 2017 XLAB d.o.o.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n *\/\n\npackage server\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"net\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\n\t\"github.com\/grpc-ecosystem\/go-grpc-prometheus\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/xlab-si\/emmy\/config\"\n\t\"github.com\/xlab-si\/emmy\/crypto\/groups\"\n\t\"github.com\/xlab-si\/emmy\/log\"\n\tpb \"github.com\/xlab-si\/emmy\/protobuf\"\n\t\"github.com\/xlab-si\/emmy\/types\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/credentials\"\n)\n\nvar _ pb.ProtocolServer = (*Server)(nil)\n\ntype Server struct {\n\tgrpcServer *grpc.Server\n\tlogger log.Logger\n\t*sessionManager\n\t*registrationManager\n}\n\n\/\/ NewProtocolServer initializes an instance of the Server struct and returns a pointer.\n\/\/ It performs some default configuration (tracing of gRPC communication and interceptors)\n\/\/ and registers RPC protocol server with gRPC server. It requires TLS cert and keyfile\n\/\/ in order to establish a secure channel with clients.\nfunc NewProtocolServer(certFile, keyFile string, logger log.Logger) (*Server, error) {\n\tlogger.Info(\"Instantiating new protocol server\")\n\n\t\/\/ Register our generic service\n\tlogger.Info(\"Registering services\")\n\n\t\/\/ Obtain TLS credentials\n\tcreds, err := credentials.NewServerTLSFromFile(certFile, keyFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogger.Infof(\"Successfully read certificate [%s] and key [%s]\", certFile, keyFile)\n\n\tsessionManager, err := newSessionManager(config.LoadSessionKeyMinByteLen())\n\tif err != nil {\n\t\tlogger.Warning(err)\n\t}\n\n\tregistrationManager, err := NewRegistrationManager(config.LoadRegistrationDBAddress())\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn nil, err\n\t}\n\n\t\/\/ Allow as much concurrent streams as possible and register a gRPC stream interceptor\n\t\/\/ for logging and monitoring purposes.\n\tserver := &Server{\n\t\tgrpcServer: grpc.NewServer(\n\t\t\tgrpc.Creds(creds),\n\t\t\tgrpc.MaxConcurrentStreams(math.MaxUint32),\n\t\t\tgrpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),\n\t\t),\n\t\tlogger: logger,\n\t\tsessionManager: sessionManager,\n\t\tregistrationManager: registrationManager,\n\t}\n\n\t\/\/ Disable tracing by default, as is used for debugging purposes.\n\t\/\/ The user will be able to turn it on via Server's EnableTracing function.\n\tgrpc.EnableTracing = false\n\n\t\/\/ Register our services with the supporting gRPC server\n\tpb.RegisterProtocolServer(server.grpcServer, server)\n\tpb.RegisterInfoServer(server.grpcServer, server)\n\n\t\/\/ Initialize gRPC metrics offered by Prometheus package\n\tgrpc_prometheus.Register(server.grpcServer)\n\n\tlogger.Notice(\"gRPC Services registered\")\n\treturn server, nil\n}\n\n\/\/ Start configures and starts the protocol server at the requested port.\nfunc (s *Server) Start(port int) error {\n\tconnStr := fmt.Sprintf(\":%d\", port)\n\tlistener, err := net.Listen(\"tcp\", connStr)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not connect: %v\", err)\n\t}\n\n\t\/\/ Register Prometheus metrics handler and serve metrics page on the desired endpoint.\n\t\/\/ Metrics are handled via HTTP in a separate goroutine as gRPC requests,\n\t\/\/ as grpc server's performance over HTTP (grpcServer.ServeHTTP) is much worse.\n\thttp.Handle(\"\/metrics\", prometheus.Handler())\n\n\t\/\/ After this, \/metrics will be available, along with \/debug\/requests, \/debug\/events in\n\t\/\/ case server's EnableTracing function is called.\n\tgo http.ListenAndServe(\":8881\", nil)\n\n\t\/\/ From here on, gRPC server will accept connections\n\ts.logger.Noticef(\"Emmy server listening for connections on port %d\", port)\n\ts.grpcServer.Serve(listener)\n\treturn nil\n}\n\n\/\/ Teardown stops the protocol server by gracefully stopping enclosed gRPC server.\nfunc (s *Server) Teardown() {\n\ts.logger.Notice(\"Tearing down gRPC server\")\n\ts.grpcServer.GracefulStop()\n}\n\n\/\/ EnableTracing instructs the gRPC framework to enable its tracing capability, which\n\/\/ is mainly used for debugging purposes.\n\/\/ Although this function does not explicitly affect the Server struct, it is wired to Server\n\/\/ in order to provide a nicer API when setting up the server.\nfunc (s *Server) EnableTracing() {\n\tgrpc.EnableTracing = true\n\ts.logger.Notice(\"Enabled gRPC tracing\")\n}\n\nfunc (s *Server) send(msg *pb.Message, stream pb.Protocol_RunServer) error {\n\tif err := stream.Send(msg); err != nil {\n\t\treturn fmt.Errorf(\"Error sending message: %v\", err)\n\t}\n\ts.logger.Infof(\"Successfully sent response of type %T\", msg.Content)\n\ts.logger.Debugf(\"%+v\", msg)\n\n\treturn nil\n}\n\nfunc (s *Server) receive(stream pb.Protocol_RunServer) (*pb.Message, error) {\n\tresp, err := stream.Recv()\n\tif err == io.EOF {\n\t\treturn nil, err\n\t} else if err != nil {\n\t\treturn nil, fmt.Errorf(\"An error ocurred: %v\", err)\n\t}\n\ts.logger.Infof(\"Received request of type %T from the stream\", resp.Content)\n\ts.logger.Debugf(\"%+v\", resp)\n\n\treturn resp, nil\n}\n\nfunc (s *Server) Run(stream pb.Protocol_RunServer) error {\n\ts.logger.Info(\"Starting new RPC\")\n\n\treq, err := s.receive(stream)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treqClientId := req.ClientId\n\treqSchemaType := req.Schema\n\treqSchemaVariant := req.SchemaVariant\n\n\t\/\/ Check whether the client requested a valid schema\n\treqSchemaTypeStr, schemaValid := pb.SchemaType_name[int32(reqSchemaType)]\n\tif !schemaValid {\n\t\treturn fmt.Errorf(\"Client [ %d ] requested invalid schema: %v\", reqClientId, reqSchemaType)\n\t}\n\n\t\/\/ Check whether the client requested a valid schema variant\n\treqSchemaVariantStr, variantValid := pb.SchemaVariant_name[int32(reqSchemaVariant)]\n\tif !variantValid {\n\t\treturn fmt.Errorf(\"Client [ %d ] requested invalid schema variant: %v\", reqClientId, reqSchemaVariant)\n\t}\n\n\ts.logger.Noticef(\"Client [ %v ] requested schema %v, variant %v\", reqClientId, reqSchemaTypeStr, reqSchemaVariantStr)\n\n\t\/\/ Convert Sigma, ZKP or ZKPOK protocol type to a types type\n\tprotocolType := types.ToProtocolType(reqSchemaVariant)\n\t\/\/ This curve will be used for all schemes\n\tcurve := groups.P256\n\n\tswitch reqSchemaType {\n\tcase pb.SchemaType_PEDERSEN_EC:\n\t\terr = s.PedersenEC(curve, stream)\n\tcase pb.SchemaType_PEDERSEN:\n\t\tgroup := config.LoadGroup(\"pedersen\")\n\t\terr = s.Pedersen(group, stream)\n\tcase pb.SchemaType_SCHNORR:\n\t\tgroup := config.LoadGroup(\"schnorr\")\n\t\terr = s.Schnorr(req, group, protocolType, stream)\n\tcase pb.SchemaType_SCHNORR_EC:\n\t\terr = s.SchnorrEC(req, protocolType, stream, curve)\n\tcase pb.SchemaType_CSPAILLIER:\n\t\tkeyDir := config.LoadKeyDirFromConfig()\n\t\tsecKeyPath := filepath.Join(keyDir, \"cspaillierseckey.txt\")\n\t\terr = s.CSPaillier(req, secKeyPath, stream)\n\tcase pb.SchemaType_PSEUDONYMSYS_CA:\n\t\terr = s.PseudonymsysCA(req, stream)\n\tcase pb.SchemaType_PSEUDONYMSYS_NYM_GEN:\n\t\terr = s.PseudonymsysGenerateNym(req, stream)\n\tcase pb.SchemaType_PSEUDONYMSYS_ISSUE_CREDENTIAL:\n\t\terr = s.PseudonymsysIssueCredential(req, stream)\n\tcase pb.SchemaType_PSEUDONYMSYS_TRANSFER_CREDENTIAL:\n\t\terr = s.PseudonymsysTransferCredential(req, stream)\n\tcase pb.SchemaType_PSEUDONYMSYS_CA_EC:\n\t\terr = s.PseudonymsysCAEC(curve, req, stream)\n\tcase pb.SchemaType_PSEUDONYMSYS_NYM_GEN_EC:\n\t\terr = s.PseudonymsysGenerateNymEC(curve, req, stream)\n\tcase pb.SchemaType_PSEUDONYMSYS_ISSUE_CREDENTIAL_EC:\n\t\terr = s.PseudonymsysIssueCredentialEC(curve, req, stream)\n\tcase pb.SchemaType_PSEUDONYMSYS_TRANSFER_CREDENTIAL_EC:\n\t\terr = s.PseudonymsysTransferCredentialEC(curve, req, stream)\n\tcase pb.SchemaType_QR:\n\t\tgroup := config.LoadGroup(\"pseudonymsys\")\n\t\terr = s.QR(req, group, stream)\n\tcase pb.SchemaType_QNR:\n\t\tqr := config.LoadQRRSA(\"qrsmall\") \/\/ only for testing\n\t\terr = s.QNR(req, qr, stream)\n\t}\n\n\tif err != nil {\n\t\ts.logger.Error(\"Closing RPC due to previous errors\")\n\t\treturn fmt.Errorf(\"FAIL: %v\", err)\n\t}\n\n\ts.logger.Notice(\"RPC finished successfully\")\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"fmt\"\n\t\"github.com\/gansidui\/chatserver\/handlers\"\n\t\"github.com\/gansidui\/chatserver\/packet\"\n\t\"github.com\/gansidui\/chatserver\/report\"\n\t\"github.com\/gansidui\/chatserver\/utils\/convert\"\n\t\"github.com\/gansidui\/chatserver\/utils\/funcmap\"\n\t\"log\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype Server struct {\n\texitCh chan bool \/\/ 结束信号\n\twaitGroup *sync.WaitGroup \/\/ 等待goroutine\n\tfuncMap *funcmap.FuncMap \/\/ 映射消息处理函数(uint32 --> func)\n\tacceptTimeout time.Duration \/\/ 连接超时时间\n\treadTimeout time.Duration \/\/ 读超时时间,其实也就是心跳维持时间\n\twriteTimeout time.Duration \/\/ 写超时时间\n\treqMemPool *sync.Pool \/\/ 为每个conn分配一个固定的接收缓存\n}\n\nfunc NewServer() *Server {\n\treturn &Server{\n\t\texitCh: make(chan bool),\n\t\twaitGroup: &sync.WaitGroup{},\n\t\tfuncMap: funcmap.NewFuncMap(),\n\t\tacceptTimeout: 30,\n\t\treadTimeout: 60,\n\t\twriteTimeout: 60,\n\t\treqMemPool: &sync.Pool{\n\t\t\tNew: func() interface{} {\n\t\t\t\treturn make([]byte, 1024)\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc (this *Server) SetAcceptTimeout(acceptTimeout time.Duration) {\n\tthis.acceptTimeout = acceptTimeout\n}\n\nfunc (this *Server) SetReadTimeout(readTimeout time.Duration) {\n\tthis.readTimeout = readTimeout\n}\n\nfunc (this *Server) SetWriteTimeout(writeTimeout time.Duration) {\n\tthis.writeTimeout = writeTimeout\n}\n\nfunc (this *Server) Start(listener *net.TCPListener) {\n\tlog.Printf(\"Start listen on %v\\r\\n\", listener.Addr())\n\tthis.waitGroup.Add(1)\n\tdefer func() {\n\t\tlistener.Close()\n\t\tthis.waitGroup.Done()\n\t}()\n\n\t\/\/ 防止恶意连接\n\tgo this.dealSpamConn()\n\n\t\/\/ report记录,定时发送邮件\n\tgo report.Work()\n\n\tfor {\n\t\tselect {\n\t\tcase <-this.exitCh:\n\t\t\tlog.Printf(\"Stop listen on %v\\r\\n\", listener.Addr())\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\tlistener.SetDeadline(time.Now().Add(this.acceptTimeout))\n\t\tconn, err := listener.AcceptTCP()\n\n\t\tif err != nil {\n\t\t\tif opErr, ok := err.(*net.OpError); ok && opErr.Timeout() {\n\t\t\t\t\/\/ log.Printf(\"Accept timeout: %v\\r\\n\", opErr)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treport.AddCount(report.TryConnect, 1)\n\t\t\tlog.Printf(\"Accept error: %v\\r\\n\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\treport.AddCount(report.SuccessConnect, 1)\n\n\t\t\/\/ 连接后等待登陆验证\n\t\thandlers.ConnMapLoginStatus.Set(conn, time.Now())\n\t\tlog.Printf(\"Accept: %v\\r\\n\", conn.RemoteAddr())\n\n\t\tgo this.handleClientConn(conn)\n\t}\n}\n\n\/\/ 处理恶意连接,定时检测。\n\/\/ 若conn的loginstatus为nil,则说明conn已经登陆成功。\n\/\/ 若conn的loginstatus不为nil,则表示loginstatus为该conn连接服务器时的时间戳(time.Time)\n\/\/ 判断这个时间戳是否已经超过登陆限制时间,若超过,则断开。\nfunc (this *Server) dealSpamConn() {\n\tlimitTime := 60 * time.Second\n\tticker := time.NewTicker(limitTime)\n\tfor _ = range ticker.C {\n\t\titems := handlers.ConnMapLoginStatus.Items()\n\t\tfor conn, loginstatus := range items {\n\t\t\tif loginstatus != nil {\n\t\t\t\tdeadline := loginstatus.(time.Time).Add(limitTime)\n\t\t\t\tif time.Now().After(deadline) {\n\t\t\t\t\tconn.(*net.TCPConn).Close()\n\t\t\t\t\thandlers.ConnMapLoginStatus.Delete(conn.(*net.TCPConn))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ 输出当前的连接数\n\t\tfmt.Println(\"当前连接数: \", handlers.ConnMapUuid.Size())\n\t}\n}\n\nfunc (this *Server) Stop() {\n\t\/\/ close后,所有的exitCh都返回false\n\tclose(this.exitCh)\n\tthis.waitGroup.Wait()\n}\n\nfunc (this *Server) BindMsgHandler(pacType uint32, fn interface{}) error {\n\treturn this.funcMap.Bind(pacType, fn)\n}\n\nfunc (this *Server) handleClientConn(conn *net.TCPConn) {\n\tthis.waitGroup.Add(1)\n\tdefer this.waitGroup.Done()\n\n\treceivePackets := make(chan *packet.Packet, 20) \/\/ 接收到的包\n\tchStop := make(chan bool) \/\/ 通知停止消息处理\n\taddr := conn.RemoteAddr().String()\n\n\tdefer func() {\n\t\tdefer func() {\n\t\t\tif e := recover(); e != nil {\n\t\t\t\tlog.Printf(\"Panic: %v\\r\\n\", e)\n\t\t\t}\n\t\t}()\n\n\t\thandlers.CloseConn(conn)\n\n\t\tlog.Printf(\"Disconnect: %v\\r\\n\", addr)\n\t\tchStop <- true\n\t}()\n\n\t\/\/ 处理接收到的包\n\tgo this.handlePackets(conn, receivePackets, chStop)\n\n\t\/\/ 接收数据\n\tlog.Printf(\"HandleClient: %v\\r\\n\", addr)\n\n\trequest := this.reqMemPool.Get().([]byte)\n\tdefer this.reqMemPool.Put(request)\n\n\tbuf := make([]byte, 0)\n\tvar bufLen uint32 = 0\n\n\tfor {\n\t\tselect {\n\t\tcase <-this.exitCh:\n\t\t\tlog.Printf(\"Stop handleClientConn\\r\\n\")\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\tconn.SetReadDeadline(time.Now().Add(this.readTimeout))\n\t\treadSize, err := conn.Read(request)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Read failed: %v\\r\\n\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif readSize > 0 {\n\t\t\tbuf = append(buf, request[:readSize]...)\n\t\t\tbufLen += uint32(readSize)\n\n\t\t\t\/\/ 包长(4) + 类型(4) + 包体(len([]byte))\n\t\t\tfor {\n\t\t\t\tif bufLen >= 8 {\n\t\t\t\t\tpacLen := convert.BytesToUint32(buf[0:4])\n\t\t\t\t\tif bufLen >= pacLen {\n\t\t\t\t\t\treceivePackets <- &packet.Packet{\n\t\t\t\t\t\t\tLen: pacLen,\n\t\t\t\t\t\t\tType: convert.BytesToUint32(buf[4:8]),\n\t\t\t\t\t\t\tData: buf[8:pacLen],\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbuf = buf[pacLen:]\n\t\t\t\t\t\tbufLen -= pacLen\n\t\t\t\t\t} else {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n}\n\nfunc (this *Server) handlePackets(conn *net.TCPConn, receivePackets <-chan *packet.Packet, chStop <-chan bool) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tlog.Printf(\"Panic: %v\\r\\n\", e)\n\t\t}\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-chStop:\n\t\t\tlog.Printf(\"Stop handle receivePackets.\\r\\n\")\n\t\t\treturn\n\n\t\t\/\/ 消息包处理\n\t\tcase p := <-receivePackets:\n\t\t\t\/\/ 防止模拟的客户端未经登陆就发送消息\n\t\t\t\/\/ 如果接收的不是登陆包,则检查是否已经在线,若没在线,则无视消息包,等待登陆检测机制处理。\n\t\t\tif p.Type != packet.PK_ClientLogin {\n\t\t\t\tif !handlers.ConnMapUuid.Check(conn) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif this.funcMap.Exist(p.Type) {\n\t\t\t\tthis.funcMap.Call(p.Type, conn, p)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Unknown packet type\\r\\n\")\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>add ring buffer<commit_after>package server\n\nimport (\n\t\"fmt\"\n\t\"github.com\/gansidui\/chatserver\/handlers\"\n\t\"github.com\/gansidui\/chatserver\/packet\"\n\t\"github.com\/gansidui\/chatserver\/report\"\n\t\"github.com\/gansidui\/chatserver\/utils\/convert\"\n\t\"github.com\/gansidui\/chatserver\/utils\/funcmap\"\n\t\"github.com\/gansidui\/code\/ringbuffer\"\n\t\"log\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype Server struct {\n\texitCh chan bool \/\/ 结束信号\n\twaitGroup *sync.WaitGroup \/\/ 等待goroutine\n\tfuncMap *funcmap.FuncMap \/\/ 映射消息处理函数(uint32 --> func)\n\tacceptTimeout time.Duration \/\/ 连接超时时间\n\treadTimeout time.Duration \/\/ 读超时时间,其实也就是心跳维持时间\n\twriteTimeout time.Duration \/\/ 写超时时间\n\treqMemPool *sync.Pool \/\/ 为每个conn分配一个固定的接收缓存\n\trbufMemPool *sync.Pool \/\/ 为每个conn分配一个环形缓冲区\n}\n\nfunc NewServer() *Server {\n\treturn &Server{\n\t\texitCh: make(chan bool),\n\t\twaitGroup: &sync.WaitGroup{},\n\t\tfuncMap: funcmap.NewFuncMap(),\n\t\tacceptTimeout: 30,\n\t\treadTimeout: 60,\n\t\twriteTimeout: 60,\n\t\treqMemPool: &sync.Pool{\n\t\t\tNew: func() interface{} {\n\t\t\t\treturn make([]byte, 1024)\n\t\t\t},\n\t\t},\n\t\trbufMemPool: &sync.Pool{\n\t\t\tNew: func() interface{} {\n\t\t\t\treturn ringbuffer.NewRingBuffer(1024)\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc (this *Server) SetAcceptTimeout(acceptTimeout time.Duration) {\n\tthis.acceptTimeout = acceptTimeout\n}\n\nfunc (this *Server) SetReadTimeout(readTimeout time.Duration) {\n\tthis.readTimeout = readTimeout\n}\n\nfunc (this *Server) SetWriteTimeout(writeTimeout time.Duration) {\n\tthis.writeTimeout = writeTimeout\n}\n\nfunc (this *Server) Start(listener *net.TCPListener) {\n\tlog.Printf(\"Start listen on %v\\r\\n\", listener.Addr())\n\tthis.waitGroup.Add(1)\n\tdefer func() {\n\t\tlistener.Close()\n\t\tthis.waitGroup.Done()\n\t}()\n\n\t\/\/ 防止恶意连接\n\tgo this.dealSpamConn()\n\n\t\/\/ report记录,定时发送邮件\n\tgo report.Work()\n\n\tfor {\n\t\tselect {\n\t\tcase <-this.exitCh:\n\t\t\tlog.Printf(\"Stop listen on %v\\r\\n\", listener.Addr())\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\tlistener.SetDeadline(time.Now().Add(this.acceptTimeout))\n\t\tconn, err := listener.AcceptTCP()\n\n\t\tif err != nil {\n\t\t\tif opErr, ok := err.(*net.OpError); ok && opErr.Timeout() {\n\t\t\t\t\/\/ log.Printf(\"Accept timeout: %v\\r\\n\", opErr)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treport.AddCount(report.TryConnect, 1)\n\t\t\tlog.Printf(\"Accept error: %v\\r\\n\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\treport.AddCount(report.SuccessConnect, 1)\n\n\t\t\/\/ 连接后等待登陆验证\n\t\thandlers.ConnMapLoginStatus.Set(conn, time.Now())\n\t\tlog.Printf(\"Accept: %v\\r\\n\", conn.RemoteAddr())\n\n\t\tgo this.handleClientConn(conn)\n\t}\n}\n\n\/\/ 处理恶意连接,定时检测。\n\/\/ 若conn的loginstatus为nil,则说明conn已经登陆成功。\n\/\/ 若conn的loginstatus不为nil,则表示loginstatus为该conn连接服务器时的时间戳(time.Time)\n\/\/ 判断这个时间戳是否已经超过登陆限制时间,若超过,则断开。\nfunc (this *Server) dealSpamConn() {\n\tlimitTime := 60 * time.Second\n\tticker := time.NewTicker(limitTime)\n\tfor _ = range ticker.C {\n\t\titems := handlers.ConnMapLoginStatus.Items()\n\t\tfor conn, loginstatus := range items {\n\t\t\tif loginstatus != nil {\n\t\t\t\tdeadline := loginstatus.(time.Time).Add(limitTime)\n\t\t\t\tif time.Now().After(deadline) {\n\t\t\t\t\tconn.(*net.TCPConn).Close()\n\t\t\t\t\thandlers.ConnMapLoginStatus.Delete(conn.(*net.TCPConn))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ 输出当前的连接数\n\t\tfmt.Println(\"当前连接数: \", handlers.ConnMapUuid.Size())\n\t}\n}\n\nfunc (this *Server) Stop() {\n\t\/\/ close后,所有的exitCh都返回false\n\tclose(this.exitCh)\n\tthis.waitGroup.Wait()\n}\n\nfunc (this *Server) BindMsgHandler(pacType uint32, fn interface{}) error {\n\treturn this.funcMap.Bind(pacType, fn)\n}\n\nfunc (this *Server) handleClientConn(conn *net.TCPConn) {\n\tthis.waitGroup.Add(1)\n\tdefer this.waitGroup.Done()\n\n\treceivePackets := make(chan *packet.Packet, 20) \/\/ 接收到的包\n\tchStop := make(chan bool) \/\/ 通知停止消息处理\n\taddr := conn.RemoteAddr().String()\n\n\tdefer func() {\n\t\tdefer func() {\n\t\t\tif e := recover(); e != nil {\n\t\t\t\tlog.Printf(\"Panic: %v\\r\\n\", e)\n\t\t\t}\n\t\t}()\n\n\t\thandlers.CloseConn(conn)\n\n\t\tlog.Printf(\"Disconnect: %v\\r\\n\", addr)\n\t\tchStop <- true\n\t}()\n\n\t\/\/ 处理接收到的包\n\tgo this.handlePackets(conn, receivePackets, chStop)\n\n\t\/\/ 接收数据\n\tlog.Printf(\"HandleClient: %v\\r\\n\", addr)\n\n\trequest := this.reqMemPool.Get().([]byte)\n\tdefer this.reqMemPool.Put(request)\n\n\trbuf := this.rbufMemPool.Get().(*ringbuffer.RingBuffer)\n\tdefer this.rbufMemPool.Put(rbuf)\n\n\tfor {\n\t\tselect {\n\t\tcase <-this.exitCh:\n\t\t\tlog.Printf(\"Stop handleClientConn\\r\\n\")\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\tconn.SetReadDeadline(time.Now().Add(this.readTimeout))\n\t\treadSize, err := conn.Read(request)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Read failed: %v\\r\\n\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif readSize > 0 {\n\t\t\trbuf.Write(request[:readSize])\n\n\t\t\t\/\/ 包长(4) + 类型(4) + 包体(len([]byte))\n\t\t\tfor {\n\t\t\t\tif rbuf.Len() >= 8 {\n\t\t\t\t\tpacLen := convert.BytesToUint32(rbuf.Bytes(4))\n\t\t\t\t\tif rbuf.Len() >= int(pacLen) {\n\t\t\t\t\t\trbuf.Peek(4)\n\t\t\t\t\t\treceivePackets <- &packet.Packet{\n\t\t\t\t\t\t\tLen: pacLen,\n\t\t\t\t\t\t\tType: convert.BytesToUint32(rbuf.Read(4)),\n\t\t\t\t\t\t\tData: rbuf.Read(int(pacLen) - 8),\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n}\n\nfunc (this *Server) handlePackets(conn *net.TCPConn, receivePackets <-chan *packet.Packet, chStop <-chan bool) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tlog.Printf(\"Panic: %v\\r\\n\", e)\n\t\t}\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-chStop:\n\t\t\tlog.Printf(\"Stop handle receivePackets.\\r\\n\")\n\t\t\treturn\n\n\t\t\/\/ 消息包处理\n\t\tcase p := <-receivePackets:\n\t\t\t\/\/ 防止模拟的客户端未经登陆就发送消息\n\t\t\t\/\/ 如果接收的不是登陆包,则检查是否已经在线,若没在线,则无视消息包,等待登陆检测机制处理。\n\t\t\tif p.Type != packet.PK_ClientLogin {\n\t\t\t\tif !handlers.ConnMapUuid.Check(conn) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif this.funcMap.Exist(p.Type) {\n\t\t\t\tthis.funcMap.Call(p.Type, conn, p)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Unknown packet type\\r\\n\")\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/gorilla\/handlers\"\n\t\"net\/http\"\n)\n\nfunc main() {\n\tif httpsErr := BindHTTPS(); httpsErr != nil {\n\t\tlog.Print(httpsErr)\n\t\tif httpErr := BindHTTP(); httpErr != nil {\n\t\t\tlog.Print(httpErr)\n\t\t}\n\t} else {\n\t\tif httpErr := RedirectHTTP(); httpErr != nil {\n\t\t\tlog.Print(httpErr)\n\t\t}\n\t}\n\tselect {}\n}\n\nfunc BindHTTPS() error {\n\tbindAddress := os.Getenv(\"FLASHBACK_HTTPS_BIND\")\n\tif len(bindAddress) == 0 {\n\t\treturn fmt.Errorf(\"FLASHBACK_HTTPS_BIND notset, not serving HTTPS\")\n\t}\n\tlog.Printf(\"Serving static assets via HTTPS on %s\\n\", bindAddress)\n\tgo func() {\n\t\tlog.Fatal(http.ListenAndServeTLS(bindAddress, os.Getenv(\"FLASHBACK_SSL_CERT\"), os.Getenv(\"FLASHBACK_SSL_KEY\"),\n\t\t\thandlers.LoggingHandler(os.Stderr, http.FileServer(http.Dir(\"www\")))))\n\t}()\n\treturn nil\n}\n\nfunc BindHTTP() error {\n\tbindAddress := os.Getenv(\"FLASHBACK_HTTP_BIND\")\n\tif len(bindAddress) == 0 {\n\t\treturn fmt.Errorf(\"FLASHBACK_HTTP_BIND not set, not serving HTTP\")\n\t}\n\tlog.Printf(\"Redirecting for static assets via HTTP on %s\\n\\n\", bindAddress)\n\tgo func() {\n\t\tlog.Fatal(http.ListenAndServe(bindAddress, handlers.LoggingHandler(os.Stderr, http.FileServer(http.Dir(\"www\")))))\n\t}()\n\treturn nil\n}\n\nfunc RedirectHTTP() error {\n\tbindAddress := os.Getenv(\"FLASHBACK_HTTP_BIND\")\n\tif len(bindAddress) == 0 {\n\t\treturn fmt.Errorf(\"FLASHBACK_HTTP_BIND not set, not redirecting HTTP\")\n\t}\n\tlog.Printf(\"Serving static assets via HTTP on %s\\n\", bindAddress)\n\tgo func() {\n\t\tlog.Fatal(http.ListenAndServe(bindAddress, handlers.LoggingHandler(os.Stderr, http.HandlerFunc(RedirectHandler))))\n\t}()\n\treturn nil\n}\n\nfunc RedirectHandler(w http.ResponseWriter, r *http.Request) {\n\tbaseURI := os.Getenv(\"FLASHBACK_BASEURI\")\n\tif len(baseURI) == 0 {\n\t\tlog.Fatal(\"FLASHBACK_BASEURI must be set if FLASHBACK_HTTP_BIND is set\\n\")\n\t}\n\t\/\/ Redirect all HTTP requests to HTTPS\n\t\/\/ It is the responsibility of the admin to configure FLASHBACK_API_BASEURI\n\t\/\/ properly. I will not be held responsible for broken redirections\n\t\/\/ or redirect loops!\n\thttp.Redirect(w, r, baseURI, http.StatusFound)\n}\n<commit_msg>remove old server directory<commit_after><|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\n\t. \"github.com\/tendermint\/go-common\"\n\t\"github.com\/tendermint\/go-wire\"\n\t\"github.com\/tendermint\/tmsp\/types\"\n)\n\n\/\/ var maxNumberConnections = 2\n\nfunc StartListener(protoAddr string, app types.Application) (net.Listener, error) {\n\tvar mtx sync.Mutex \/\/ global mutex\n\tparts := strings.SplitN(protoAddr, \":\/\/\", 2)\n\tproto, addr := parts[0], parts[1]\n\tln, err := net.Listen(proto, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ A goroutine to accept a connection.\n\tgo func() {\n\t\t\/\/ semaphore := make(chan struct{}, maxNumberConnections)\n\n\t\tfor {\n\t\t\t\/\/ semaphore <- struct{}{}\n\n\t\t\t\/\/ Accept a connection\n\t\t\tfmt.Println(\"Waiting for new connection...\")\n\t\t\tconn, err := ln.Accept()\n\t\t\tif err != nil {\n\t\t\t\tExit(\"Failed to accept connection\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Accepted a new connection\")\n\t\t\t}\n\n\t\t\tcloseConn := make(chan error, 2) \/\/ Push to signal connection closed\n\t\t\tresponses := make(chan types.Response, 1000) \/\/ A channel to buffer responses\n\n\t\t\t\/\/ Read requests from conn and deal with them\n\t\t\tgo handleRequests(&mtx, app, closeConn, conn, responses)\n\t\t\t\/\/ Pull responses from 'responses' and write them to conn.\n\t\t\tgo handleResponses(closeConn, responses, conn)\n\n\t\t\tgo func() {\n\t\t\t\t\/\/ Wait until signal to close connection\n\t\t\t\terrClose := <-closeConn\n\t\t\t\tif errClose != nil {\n\t\t\t\t\tfmt.Printf(\"Connection error: %v\\n\", errClose)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"Connection was closed.\")\n\t\t\t\t}\n\n\t\t\t\t\/\/ Close the connection\n\t\t\t\terr := conn.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"Error in closing connection: %v\\n\", err)\n\t\t\t\t}\n\n\t\t\t\t\/\/ <-semaphore\n\t\t\t}()\n\t\t}\n\n\t}()\n\n\treturn ln, nil\n}\n\n\/\/ Read requests from conn and deal with them\nfunc handleRequests(mtx *sync.Mutex, app types.Application, closeConn chan error, conn net.Conn, responses chan<- types.Response) {\n\tvar count int\n\tvar bufReader = bufio.NewReader(conn)\n\tfor {\n\t\tvar n int\n\t\tvar err error\n\t\tvar req types.Request\n\t\twire.ReadBinaryPtrLengthPrefixed(&req, bufReader, 0, &n, &err)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tcloseConn <- fmt.Errorf(\"Connection closed by client\")\n\t\t\t} else {\n\t\t\t\tcloseConn <- fmt.Errorf(\"Error in handleRequests: %v\", err.Error())\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tmtx.Lock()\n\t\tcount++\n\t\thandleRequest(app, req, responses)\n\t\tmtx.Unlock()\n\t}\n}\n\nfunc handleRequest(app types.Application, req types.Request, responses chan<- types.Response) {\n\tswitch req := req.(type) {\n\tcase types.RequestEcho:\n\t\tresponses <- types.ResponseEcho{req.Message}\n\tcase types.RequestFlush:\n\t\tresponses <- types.ResponseFlush{}\n\tcase types.RequestInfo:\n\t\tdata := app.Info()\n\t\tresponses <- types.ResponseInfo{data}\n\tcase types.RequestSetOption:\n\t\tlogstr := app.SetOption(req.Key, req.Value)\n\t\tresponses <- types.ResponseSetOption{logstr}\n\tcase types.RequestAppendTx:\n\t\tcode, result, logstr := app.AppendTx(req.TxBytes)\n\t\tresponses <- types.ResponseAppendTx{code, result, logstr}\n\tcase types.RequestCheckTx:\n\t\tcode, result, logstr := app.CheckTx(req.TxBytes)\n\t\tresponses <- types.ResponseCheckTx{code, result, logstr}\n\tcase types.RequestGetHash:\n\t\thash, logstr := app.GetHash()\n\t\tresponses <- types.ResponseGetHash{hash, logstr}\n\tcase types.RequestQuery:\n\t\tresult, logstr := app.Query(req.QueryBytes)\n\t\tresponses <- types.ResponseQuery{result, logstr}\n\tdefault:\n\t\tresponses <- types.ResponseException{\"Unknown request\"}\n\t}\n}\n\n\/\/ Pull responses from 'responses' and write them to conn.\nfunc handleResponses(closeConn chan error, responses <-chan types.Response, conn net.Conn) {\n\tvar count int\n\tvar bufWriter = bufio.NewWriter(conn)\n\tfor {\n\t\tvar res = <-responses\n\t\tvar n int\n\t\tvar err error\n\t\twire.WriteBinaryLengthPrefixed(struct{ types.Response }{res}, bufWriter, &n, &err)\n\t\tif err != nil {\n\t\t\tcloseConn <- fmt.Errorf(\"Error in handleResponses: %v\", err.Error())\n\t\t\treturn\n\t\t}\n\t\tif _, ok := res.(types.ResponseFlush); ok {\n\t\t\terr = bufWriter.Flush()\n\t\t\tif err != nil {\n\t\t\t\tcloseConn <- fmt.Errorf(\"Error in handleResponses: %v\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tcount++\n\t}\n}\n<commit_msg>s\/logstr\/logStr\/g<commit_after>package server\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n\n\t. \"github.com\/tendermint\/go-common\"\n\t\"github.com\/tendermint\/go-wire\"\n\t\"github.com\/tendermint\/tmsp\/types\"\n)\n\n\/\/ var maxNumberConnections = 2\n\nfunc StartListener(protoAddr string, app types.Application) (net.Listener, error) {\n\tvar mtx sync.Mutex \/\/ global mutex\n\tparts := strings.SplitN(protoAddr, \":\/\/\", 2)\n\tproto, addr := parts[0], parts[1]\n\tln, err := net.Listen(proto, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ A goroutine to accept a connection.\n\tgo func() {\n\t\t\/\/ semaphore := make(chan struct{}, maxNumberConnections)\n\n\t\tfor {\n\t\t\t\/\/ semaphore <- struct{}{}\n\n\t\t\t\/\/ Accept a connection\n\t\t\tfmt.Println(\"Waiting for new connection...\")\n\t\t\tconn, err := ln.Accept()\n\t\t\tif err != nil {\n\t\t\t\tExit(\"Failed to accept connection\")\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"Accepted a new connection\")\n\t\t\t}\n\n\t\t\tcloseConn := make(chan error, 2) \/\/ Push to signal connection closed\n\t\t\tresponses := make(chan types.Response, 1000) \/\/ A channel to buffer responses\n\n\t\t\t\/\/ Read requests from conn and deal with them\n\t\t\tgo handleRequests(&mtx, app, closeConn, conn, responses)\n\t\t\t\/\/ Pull responses from 'responses' and write them to conn.\n\t\t\tgo handleResponses(closeConn, responses, conn)\n\n\t\t\tgo func() {\n\t\t\t\t\/\/ Wait until signal to close connection\n\t\t\t\terrClose := <-closeConn\n\t\t\t\tif errClose != nil {\n\t\t\t\t\tfmt.Printf(\"Connection error: %v\\n\", errClose)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"Connection was closed.\")\n\t\t\t\t}\n\n\t\t\t\t\/\/ Close the connection\n\t\t\t\terr := conn.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"Error in closing connection: %v\\n\", err)\n\t\t\t\t}\n\n\t\t\t\t\/\/ <-semaphore\n\t\t\t}()\n\t\t}\n\n\t}()\n\n\treturn ln, nil\n}\n\n\/\/ Read requests from conn and deal with them\nfunc handleRequests(mtx *sync.Mutex, app types.Application, closeConn chan error, conn net.Conn, responses chan<- types.Response) {\n\tvar count int\n\tvar bufReader = bufio.NewReader(conn)\n\tfor {\n\t\tvar n int\n\t\tvar err error\n\t\tvar req types.Request\n\t\twire.ReadBinaryPtrLengthPrefixed(&req, bufReader, 0, &n, &err)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tcloseConn <- fmt.Errorf(\"Connection closed by client\")\n\t\t\t} else {\n\t\t\t\tcloseConn <- fmt.Errorf(\"Error in handleRequests: %v\", err.Error())\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tmtx.Lock()\n\t\tcount++\n\t\thandleRequest(app, req, responses)\n\t\tmtx.Unlock()\n\t}\n}\n\nfunc handleRequest(app types.Application, req types.Request, responses chan<- types.Response) {\n\tswitch req := req.(type) {\n\tcase types.RequestEcho:\n\t\tresponses <- types.ResponseEcho{req.Message}\n\tcase types.RequestFlush:\n\t\tresponses <- types.ResponseFlush{}\n\tcase types.RequestInfo:\n\t\tdata := app.Info()\n\t\tresponses <- types.ResponseInfo{data}\n\tcase types.RequestSetOption:\n\t\tlogStr := app.SetOption(req.Key, req.Value)\n\t\tresponses <- types.ResponseSetOption{logStr}\n\tcase types.RequestAppendTx:\n\t\tcode, result, logStr := app.AppendTx(req.TxBytes)\n\t\tresponses <- types.ResponseAppendTx{code, result, logStr}\n\tcase types.RequestCheckTx:\n\t\tcode, result, logStr := app.CheckTx(req.TxBytes)\n\t\tresponses <- types.ResponseCheckTx{code, result, logStr}\n\tcase types.RequestGetHash:\n\t\thash, logStr := app.GetHash()\n\t\tresponses <- types.ResponseGetHash{hash, logStr}\n\tcase types.RequestQuery:\n\t\tresult, logStr := app.Query(req.QueryBytes)\n\t\tresponses <- types.ResponseQuery{result, logStr}\n\tdefault:\n\t\tresponses <- types.ResponseException{\"Unknown request\"}\n\t}\n}\n\n\/\/ Pull responses from 'responses' and write them to conn.\nfunc handleResponses(closeConn chan error, responses <-chan types.Response, conn net.Conn) {\n\tvar count int\n\tvar bufWriter = bufio.NewWriter(conn)\n\tfor {\n\t\tvar res = <-responses\n\t\tvar n int\n\t\tvar err error\n\t\twire.WriteBinaryLengthPrefixed(struct{ types.Response }{res}, bufWriter, &n, &err)\n\t\tif err != nil {\n\t\t\tcloseConn <- fmt.Errorf(\"Error in handleResponses: %v\", err.Error())\n\t\t\treturn\n\t\t}\n\t\tif _, ok := res.(types.ResponseFlush); ok {\n\t\t\terr = bufWriter.Flush()\n\t\t\tif err != nil {\n\t\t\t\tcloseConn <- fmt.Errorf(\"Error in handleResponses: %v\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tcount++\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/gophergala2016\/3wordgame\/validation\"\n\t\"net\"\n)\n\n\/\/ Client struct\ntype Client struct {\n\tincoming chan Message\n\toutgoing chan string\n\treader *bufio.Reader\n\twriter *bufio.Writer\n\taddress string\n}\n\ntype Message struct {\n\ttext string\n\taddress string\n}\n\n\/\/ Read line by line into the client.incoming\nfunc (client *Client) Read() {\n\tfor {\n\t\tline, _ := client.reader.ReadString('\\n')\n\t\tclient.incoming <- Message{\n\t\t\t\ttext: line,\n\t\t\t\taddress: client.address,\n\t\t\t}\n\t}\n}\n\n\/\/ Write client outgoing data to the client writer\nfunc (client *Client) Write() {\n\tfor data := range client.outgoing {\n\t\tclient.writer.WriteString(data)\n\t\tclient.writer.Flush()\n\t}\n}\n\n\/\/ Listen for reads and writes on the client\nfunc (client *Client) Listen() {\n\tgo client.Read()\n\tgo client.Write()\n}\n\n\/\/ NewClient returns new instance of client.\nfunc NewClient(connection net.Conn) *Client {\n\twriter := bufio.NewWriter(connection)\n\treader := bufio.NewReader(connection)\n\n\taddress := connection.RemoteAddr().String()\n\n\tclient := &Client{\n\t\tincoming: make(chan Message),\n\t\toutgoing: make(chan string),\n\t\treader: reader,\n\t\twriter: writer,\n\t\taddress: address,\n\t}\n\n\tclient.Listen()\n\n\treturn client\n}\n\n\/\/ ChatRoom struct\ntype ChatRoom struct {\n\tclients []*Client\n\tjoins chan net.Conn\n\tincoming chan Message\n\toutgoing chan string\n\tstory string\n\tlast_msg_user_address string\n}\n\n\/\/ Broadcast data to all connected chatRoom.clients\nfunc (chatRoom *ChatRoom) Broadcast(data string) {\n\tfor _, client := range chatRoom.clients {\n\t\tclient.outgoing <- data\n\t}\n}\n\n\/\/ Join attaches a new client to the chatRoom clients\nfunc (chatRoom *ChatRoom) Join(connection net.Conn) {\n\tclient := NewClient(connection)\n\tchatRoom.clients = append(chatRoom.clients, client)\n\tgo func() {\n\t\tfor {\n\t\t\tchatRoom.incoming <- <-client.incoming\n\t\t}\n\t}()\n}\n\n\/\/ Listen to all incoming messages for the chatRoom\nfunc (chatRoom *ChatRoom) Listen() {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase data := <-chatRoom.incoming:\n\t\t\t\tmsg, err := validation.ValidateMsg(data.text)\n\t\t\t\tif err == nil && chatRoom.last_msg_user_address != data.address {\n\t\t\t\t\tchatRoom.Broadcast(msg)\n\t\t\t\t\tchatRoom.story = fmt.Sprintf(\"%s %s\", chatRoom.story, msg)\n\t\t\t\t\tchatRoom.last_msg_user_address = data.address\n\t\t\t\t}\n\t\t\tcase conn := <-chatRoom.joins:\n\t\t\t\tchatRoom.Join(conn)\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/ NewChatRoom factories a ChatRoom instance\nfunc NewChatRoom() *ChatRoom {\n\tchatRoom := &ChatRoom{\n\t\tclients: make([]*Client, 0),\n\t\tjoins: make(chan net.Conn),\n\t\tincoming: make(chan Message),\n\t\toutgoing: make(chan string),\n\t}\n\n\tchatRoom.Listen()\n\n\treturn chatRoom\n}\n\nfunc main() {\n\tvar server string\n\tvar port int\n\n\tflag.StringVar(&server, \"server\", \"127.0.0.1\", \"Server host\")\n\tflag.IntVar(&port, \"port\", 6666, \"Server port\")\n\tflag.Parse()\n\n\tlistener, _ := net.Listen(\"tcp\", fmt.Sprintf(\"%s:%d\", server, port))\n\n\tchatRoom := NewChatRoom()\n\n\tfor {\n\t\tconn, _ := listener.Accept()\n\t\tchatRoom.joins <- conn\n\t}\n}\n<commit_msg>Send story when joining ChatRoom<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/gophergala2016\/3wordgame\/validation\"\n\t\"net\"\n)\n\n\/\/ Client struct\ntype Client struct {\n\tincoming chan Message\n\toutgoing chan string\n\treader *bufio.Reader\n\twriter *bufio.Writer\n\taddress string\n}\n\ntype Message struct {\n\ttext string\n\taddress string\n}\n\n\/\/ Read line by line into the client.incoming\nfunc (client *Client) Read() {\n\tfor {\n\t\tline, _ := client.reader.ReadString('\\n')\n\t\tclient.incoming <- Message{\n\t\t\t\ttext: line,\n\t\t\t\taddress: client.address,\n\t\t\t}\n\t}\n}\n\n\/\/ Write client outgoing data to the client writer\nfunc (client *Client) Write() {\n\tfor data := range client.outgoing {\n\t\tclient.writer.WriteString(data)\n\t\tclient.writer.Flush()\n\t}\n}\n\n\/\/ Listen for reads and writes on the client\nfunc (client *Client) Listen() {\n\tgo client.Read()\n\tgo client.Write()\n}\n\n\/\/ NewClient returns new instance of client.\nfunc NewClient(connection net.Conn) *Client {\n\twriter := bufio.NewWriter(connection)\n\treader := bufio.NewReader(connection)\n\n\taddress := connection.RemoteAddr().String()\n\n\tclient := &Client{\n\t\tincoming: make(chan Message),\n\t\toutgoing: make(chan string),\n\t\treader: reader,\n\t\twriter: writer,\n\t\taddress: address,\n\t}\n\n\tclient.Listen()\n\n\treturn client\n}\n\n\/\/ ChatRoom struct\ntype ChatRoom struct {\n\tclients []*Client\n\tjoins chan net.Conn\n\tincoming chan Message\n\toutgoing chan string\n\tstory string\n\tlast_msg_user_address string\n}\n\n\/\/ Broadcast data to all connected chatRoom.clients\nfunc (chatRoom *ChatRoom) Broadcast(data string) {\n\tfor _, client := range chatRoom.clients {\n\t\tclient.outgoing <- data\n\t}\n}\n\n\/\/ Join attaches a new client to the chatRoom clients\nfunc (chatRoom *ChatRoom) Join(connection net.Conn) {\n\tclient := NewClient(connection)\n\tchatRoom.clients = append(chatRoom.clients, client)\n\tclient.outgoing <- chatRoom.story\n\tgo func() {\n\t\tfor {\n\t\t\tchatRoom.incoming <- <-client.incoming\n\t\t}\n\t}()\n}\n\n\/\/ Listen to all incoming messages for the chatRoom\nfunc (chatRoom *ChatRoom) Listen() {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase data := <-chatRoom.incoming:\n\t\t\t\tmsg, err := validation.ValidateMsg(data.text)\n\t\t\t\tif err == nil && chatRoom.last_msg_user_address != data.address {\n\t\t\t\t\tchatRoom.Broadcast(msg)\n\t\t\t\t\tif len(chatRoom.story) == 0 {\n\t\t\t\t\t\tchatRoom.story = msg\n\t\t\t\t\t} else {\n\t\t\t\t\t\tchatRoom.story = fmt.Sprintf(\"%s %s\", chatRoom.story, msg)\n\t\t\t\t\t}\n\t\t\t\t\tchatRoom.last_msg_user_address = data.address\n\t\t\t\t}\n\t\t\tcase conn := <-chatRoom.joins:\n\t\t\t\tchatRoom.Join(conn)\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/ NewChatRoom factories a ChatRoom instance\nfunc NewChatRoom() *ChatRoom {\n\tchatRoom := &ChatRoom{\n\t\tclients: make([]*Client, 0),\n\t\tjoins: make(chan net.Conn),\n\t\tincoming: make(chan Message),\n\t\toutgoing: make(chan string),\n\t}\n\n\tchatRoom.Listen()\n\n\treturn chatRoom\n}\n\nfunc main() {\n\tvar server string\n\tvar port int\n\n\tflag.StringVar(&server, \"server\", \"127.0.0.1\", \"Server host\")\n\tflag.IntVar(&port, \"port\", 6666, \"Server port\")\n\tflag.Parse()\n\n\tlistener, _ := net.Listen(\"tcp\", fmt.Sprintf(\"%s:%d\", server, port))\n\n\tchatRoom := NewChatRoom()\n\n\tfor {\n\t\tconn, _ := listener.Accept()\n\t\tchatRoom.joins <- conn\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/**\n * @file advisory.go\n * @author Mikhail Klementyev jollheef<AT>riseup.net\n * @license GNU GPLv3\n * @date September, 2015\n * @brief routine for receive advisories from commands\n *\n * Provide tcp server for receive advisory.\n *\/\n\npackage receiver\n\nimport (\n\t\"bufio\"\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"regexp\"\n\t\"time\"\n)\n\nimport \"tinfoilhat\/steward\"\n\nfunc HasUnacceptableSymbols(s, regex string) bool {\n\n\tr, err := regexp.Compile(regex)\n\tif err != nil {\n\t\tlog.Println(\"Compile regex fail:\", err)\n\t\treturn true\n\t}\n\n\treturn !r.MatchString(s)\n}\n\nfunc AdvisoryHandler(conn net.Conn, db *sql.DB) {\n\n\taddr := conn.RemoteAddr().String()\n\n\tdefer conn.Close()\n\n\tround, err := steward.CurrentRound(db)\n\tif err != nil {\n\t\tlog.Println(\"Get current round fail:\", err)\n\t\tfmt.Fprint(conn, InternalErrorMsg)\n\t\treturn\n\t}\n\n\tround_end_time := round.StartTime.Add(round.Len)\n\n\tif time.Now().After(round_end_time) {\n\t\tfmt.Fprintln(conn, \"Current contest not runned\")\n\t\treturn\n\t}\n\n\tfmt.Fprint(conn, \"IBST.PSU CTF Advisory Receiver\\n\"+\n\t\t\"Insert empty line for close\\n\"+\n\t\t\"Input advisory: \")\n\n\tscanner := bufio.NewScanner(conn)\n\tvar advisory string\n\tfor scanner.Scan() {\n\t\tadvisory += scanner.Text() + \"\\n\"\n\t\tif len(advisory) > 2 {\n\t\t\tif advisory[len(advisory)-2:len(advisory)-1] == \"\\n\" {\n\t\t\t\t\/\/ remove last newline\n\t\t\t\tadvisory = advisory[:len(advisory)-1]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tr := `[ -~]`\n\tif HasUnacceptableSymbols(advisory, r) {\n\t\tfmt.Fprintf(conn, \"Accept only %s\\n\", r)\n\t\treturn\n\t}\n\n\tteam, err := TeamByAddr(db, addr)\n\tif err != nil {\n\t\tlog.Println(\"\\tGet team by ip failed:\", err)\n\t\tfmt.Fprint(conn, InvalidTeamMsg)\n\t\treturn\n\t}\n\n\t_, err = steward.AddAdvisory(db, team.Id, advisory)\n\tif err != nil {\n\t\tlog.Println(\"\\tAdd advisory failed:\", err)\n\t\tfmt.Fprint(conn, InternalErrorMsg)\n\t\treturn\n\t}\n\n\tfmt.Fprint(conn, \"Accepted\\n\")\n}\n\nfunc AdvisoryReceiver(db *sql.DB, addr string, timeout,\n\tsocket_timeout time.Duration) {\n\n\tlog.Println(\"Launching advisory receiver at\", addr, \"...\")\n\n\tconnects := make(map[string]time.Time) \/\/ { ip : last_connect_time }\n\n\tlistener, _ := net.Listen(\"tcp\", addr)\n\n\tfor {\n\t\tconn, _ := listener.Accept()\n\n\t\taddr := conn.RemoteAddr().String()\n\n\t\tlog.Printf(\"Connection accepted from %s\", addr)\n\n\t\tip, _, err := net.SplitHostPort(addr)\n\t\tif err != nil {\n\t\t\tlog.Println(\"\\tCannot split remote addr:\", err)\n\t\t\tfmt.Fprint(conn, InternalErrorMsg)\n\t\t\tconn.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\tif time.Now().Before(connects[ip].Add(timeout)) {\n\t\t\tlog.Println(\"\\tToo fast connects by\", ip)\n\t\t\tfmt.Fprintf(conn, \"Attempts limit exceeded (wait %s)\\n\",\n\t\t\t\tconnects[ip].Add(timeout).Sub(time.Now()))\n\t\t\tconn.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\terr = conn.SetDeadline(time.Now().Add(socket_timeout))\n\t\tif err != nil {\n\t\t\tlog.Println(\"Set deadline fail:\", err)\n\t\t\tfmt.Fprint(conn, InternalErrorMsg)\n\t\t\tconn.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\tgo AdvisoryHandler(conn, db)\n\n\t\tconnects[ip] = time.Now()\n\t}\n}\n<commit_msg>Prevent accept advisory for browser connect<commit_after>\/**\n * @file advisory.go\n * @author Mikhail Klementyev jollheef<AT>riseup.net\n * @license GNU GPLv3\n * @date September, 2015\n * @brief routine for receive advisories from commands\n *\n * Provide tcp server for receive advisory.\n *\/\n\npackage receiver\n\nimport (\n\t\"bufio\"\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"regexp\"\n\t\"time\"\n)\n\nimport \"tinfoilhat\/steward\"\n\nfunc HasUnacceptableSymbols(s, regex string) bool {\n\n\tr, err := regexp.Compile(regex)\n\tif err != nil {\n\t\tlog.Println(\"Compile regex fail:\", err)\n\t\treturn true\n\t}\n\n\treturn !r.MatchString(s)\n}\n\nfunc AdvisoryHandler(conn net.Conn, db *sql.DB) {\n\n\taddr := conn.RemoteAddr().String()\n\n\tdefer conn.Close()\n\n\tround, err := steward.CurrentRound(db)\n\tif err != nil {\n\t\tlog.Println(\"Get current round fail:\", err)\n\t\tfmt.Fprint(conn, InternalErrorMsg)\n\t\treturn\n\t}\n\n\tround_end_time := round.StartTime.Add(round.Len)\n\n\tif time.Now().After(round_end_time) {\n\t\tfmt.Fprintln(conn, \"Current contest not runned\")\n\t\treturn\n\t}\n\n\tfmt.Fprint(conn, \"IBST.PSU CTF Advisory Receiver\\n\"+\n\t\t\"Insert empty line for close\\n\"+\n\t\t\"Input advisory: \")\n\n\tscanner := bufio.NewScanner(conn)\n\tvar advisory string\n\tfor scanner.Scan() {\n\t\tadvisory += scanner.Text() + \"\\n\"\n\t\tif len(advisory) > 2 {\n\t\t\tif advisory[len(advisory)-2:len(advisory)-1] == \"\\n\" {\n\t\t\t\t\/\/ remove last newline\n\t\t\t\tadvisory = advisory[:len(advisory)-1]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\thttp_get_root := \"GET \/ HTTP\/1.1\"\n\tif len(advisory) > len(http_get_root) {\n\t\tif advisory[0:len(http_get_root)] == http_get_root {\n\t\t\tfmt.Fprintf(conn, \"\\n\\nIt's not a HTTP server! \"+\n\t\t\t\t\"Use netcat for communication.\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tr := `[ -~]`\n\tif HasUnacceptableSymbols(advisory, r) {\n\t\tfmt.Fprintf(conn, \"Accept only %s\\n\", r)\n\t\treturn\n\t}\n\n\tteam, err := TeamByAddr(db, addr)\n\tif err != nil {\n\t\tlog.Println(\"\\tGet team by ip failed:\", err)\n\t\tfmt.Fprint(conn, InvalidTeamMsg)\n\t\treturn\n\t}\n\n\t_, err = steward.AddAdvisory(db, team.Id, advisory)\n\tif err != nil {\n\t\tlog.Println(\"\\tAdd advisory failed:\", err)\n\t\tfmt.Fprint(conn, InternalErrorMsg)\n\t\treturn\n\t}\n\n\tfmt.Fprint(conn, \"Accepted\\n\")\n}\n\nfunc AdvisoryReceiver(db *sql.DB, addr string, timeout,\n\tsocket_timeout time.Duration) {\n\n\tlog.Println(\"Launching advisory receiver at\", addr, \"...\")\n\n\tconnects := make(map[string]time.Time) \/\/ { ip : last_connect_time }\n\n\tlistener, _ := net.Listen(\"tcp\", addr)\n\n\tfor {\n\t\tconn, _ := listener.Accept()\n\n\t\taddr := conn.RemoteAddr().String()\n\n\t\tlog.Printf(\"Connection accepted from %s\", addr)\n\n\t\tip, _, err := net.SplitHostPort(addr)\n\t\tif err != nil {\n\t\t\tlog.Println(\"\\tCannot split remote addr:\", err)\n\t\t\tfmt.Fprint(conn, InternalErrorMsg)\n\t\t\tconn.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\tif time.Now().Before(connects[ip].Add(timeout)) {\n\t\t\tlog.Println(\"\\tToo fast connects by\", ip)\n\t\t\tfmt.Fprintf(conn, \"Attempts limit exceeded (wait %s)\\n\",\n\t\t\t\tconnects[ip].Add(timeout).Sub(time.Now()))\n\t\t\tconn.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\terr = conn.SetDeadline(time.Now().Add(socket_timeout))\n\t\tif err != nil {\n\t\t\tlog.Println(\"Set deadline fail:\", err)\n\t\t\tfmt.Fprint(conn, InternalErrorMsg)\n\t\t\tconn.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\tgo AdvisoryHandler(conn, db)\n\n\t\tconnects[ip] = time.Now()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package scan\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"time\"\n\t\"vaddy\/args\"\n\t\"vaddy\/common\"\n\t\"vaddy\/config\"\n)\n\nfunc GetScanResult(scanSetting args.ScanSetting, scanId string) (ScanResult, error) {\n\tvalues := url.Values{}\n\tvalues.Add(\"user\", scanSetting.User)\n\tvalues.Add(\"fqdn\", scanSetting.Fqdn)\n\tvalues.Add(\"verification_code\", scanSetting.VerificationCode)\n\tvalues.Add(\"project_id\", scanSetting.ProjectId)\n\tvalues.Add(\"scan_id\", scanId)\n\n\tvar scanResult ScanResult\n\tvar retryCount int = 0\n\tfor {\n\t\tresult, err := httpRequestHandler.HttpGet(\"\/scan\/result\", scanSetting, values)\n\t\tif err != nil {\n\t\t\tfmt.Print(\"HTTP Get Request Error: \/scan\/result. \")\n\t\t\tfmt.Print(err)\n\t\t\t\/\/リトライするためにerrがあってもprintのみする\n\t\t}\n\n\t\terr2 := common.CheckHttpResponse(result)\n\t\tif err2 != nil {\n\t\t\tfmt.Print(\"HTTP Get Response Error: \/scan\/result. \")\n\t\t\tfmt.Print(err2)\n\t\t\t\/\/リトライするためにerr2があってもprintのみする\n\t\t}\n\n\t\tif err == nil && err2 == nil {\n\t\t\tcommon.ConvertJsonToStruct(result.Body, &scanResult)\n\t\t\treturn scanResult, nil\n\t\t}\n\n\t\tretryCount++\n\t\tif retryCount > config.NETWORK_RETRY_COUNT {\n\t\t\tfmt.Printf(\"-- getScanResult() retry max count: %d exit. --\", retryCount)\n\t\t\tfmt.Println(err)\n\t\t\treturn scanResult, errors.New(\"Error: get scan result retry over.\")\n\t\t}\n\t\tfmt.Printf(\"-- getScanResult() HTTP GET error: count %d --\\n\", retryCount)\n\t\ttime.Sleep(time.Duration(config.NETWORK_RETRY_WAIT_TIME) * time.Second)\n\t}\n}\n<commit_msg>JSONがデコードできない場合のエラーチェックを追加<commit_after>package scan\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"time\"\n\t\"vaddy\/args\"\n\t\"vaddy\/common\"\n\t\"vaddy\/config\"\n)\n\nfunc GetScanResult(scanSetting args.ScanSetting, scanId string) (ScanResult, error) {\n\tvalues := url.Values{}\n\tvalues.Add(\"user\", scanSetting.User)\n\tvalues.Add(\"fqdn\", scanSetting.Fqdn)\n\tvalues.Add(\"verification_code\", scanSetting.VerificationCode)\n\tvalues.Add(\"project_id\", scanSetting.ProjectId)\n\tvalues.Add(\"scan_id\", scanId)\n\n\tvar scanResult ScanResult\n\tvar retryCount int = 0\n\tfor {\n\t\tresult, err := httpRequestHandler.HttpGet(\"\/scan\/result\", scanSetting, values)\n\t\tif err != nil {\n\t\t\tfmt.Print(\"HTTP Get Request Error: \/scan\/result. \")\n\t\t\tfmt.Print(err)\n\t\t\t\/\/リトライするためにerrがあってもprintのみする\n\t\t}\n\n\t\terr2 := common.CheckHttpResponse(result)\n\t\tif err2 != nil {\n\t\t\tfmt.Print(\"HTTP Get Response Error: \/scan\/result. \")\n\t\t\tfmt.Print(err2)\n\t\t\t\/\/リトライするためにerr2があってもprintのみする\n\t\t}\n\n\t\tif err == nil && err2 == nil {\n\t\t\terr := common.ConvertJsonToStruct(result.Body, &scanResult)\n\t\t\tif err != nil {\n\t\t\t\treturn ScanResult{}, err\n\t\t\t}\n\t\t\treturn scanResult, nil\n\t\t}\n\n\t\tretryCount++\n\t\tif retryCount > config.NETWORK_RETRY_COUNT {\n\t\t\tfmt.Printf(\"-- getScanResult() retry max count: %d exit. --\", retryCount)\n\t\t\tfmt.Println(err)\n\t\t\treturn scanResult, errors.New(\"Error: get scan result retry over.\")\n\t\t}\n\t\tfmt.Printf(\"-- getScanResult() HTTP GET error: count %d --\\n\", retryCount)\n\t\ttime.Sleep(time.Duration(config.NETWORK_RETRY_WAIT_TIME) * time.Second)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package kontrol\n\nimport (\n\t\"container\/list\"\n\t\"koding\/newkite\/dnode\"\n\t\"koding\/newkite\/kite\"\n\t\"koding\/newkite\/protocol\"\n\t\"reflect\"\n\t\"sync\"\n)\n\ntype watcherHub struct {\n\tsync.RWMutex\n\n\t\/\/ Indexed by user to iterate faster when a notification comes.\n\t\/\/ Indexed by user because it is the first field in protocol.KontrolQuery.\n\twatchesByUser map[string]*list.List \/\/ List contains *watch\n\n\t\/\/ Indexed by Kite to remove them easily when Kite disconnects.\n\twatchesByKite map[*kite.RemoteKite][]*list.Element\n}\n\ntype watch struct {\n\tquery *protocol.KontrolQuery\n\tcallback dnode.Function\n}\n\nfunc newWatcherHub() *watcherHub {\n\treturn &watcherHub{\n\t\twatchesByUser: make(map[string]*list.List),\n\t\twatchesByKite: make(map[*kite.RemoteKite][]*list.Element),\n\t}\n}\n\n\/\/ RegisterWatcher saves the callbacks to invoke later\n\/\/ when a Kite is registered\/deregistered matching the query.\nfunc (h *watcherHub) RegisterWatcher(r *kite.RemoteKite, q *protocol.KontrolQuery, callback dnode.Function) {\n\th.Lock()\n\tdefer h.Unlock()\n\n\tr.OnDisconnect(func() {\n\t\th.Lock()\n\t\tdefer h.Unlock()\n\n\t\t\/\/ Delete watch from watchesByUser\n\t\tfor _, elem := range h.watchesByKite[r] {\n\t\t\tl := h.watchesByUser[q.Username]\n\t\t\tl.Remove(elem)\n\n\t\t\t\/\/ Delete the empty list.\n\t\t\tif l.Len() == 0 {\n\t\t\t\tdelete(h.watchesByUser, q.Username)\n\t\t\t}\n\t\t}\n\n\t\tdelete(h.watchesByKite, r)\n\t})\n\n\t\/\/ Get or create a new list.\n\tl, ok := h.watchesByUser[q.Username]\n\tif !ok {\n\t\tl = list.New()\n\t\th.watchesByUser[q.Username] = l\n\t}\n\n\telem := l.PushBack(&watch{q, callback})\n\th.watchesByKite[r] = append(h.watchesByKite[r], elem)\n}\n\n\/\/ Notify is called when a Kite is registered by the user of this watcherHub.\n\/\/ Calls the registered callbacks mathching to the kite.\nfunc (h *watcherHub) Notify(kite *protocol.Kite, action protocol.KiteAction, kodingKey string) {\n\th.RLock()\n\tdefer h.RUnlock()\n\n\tl, ok := h.watchesByUser[kite.Username]\n\tif ok {\n\t\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\t\twatch := e.Value.(*watch)\n\t\t\tif matches(kite, watch.query) {\n\t\t\t\tvar kiteWithToken *protocol.KiteWithToken\n\t\t\t\tvar err error\n\n\t\t\t\t\/\/ Register events needs a token attached.\n\t\t\t\tif action == protocol.Register {\n\t\t\t\t\tkiteWithToken, err = addTokenToKite(kite, watch.query.Username, kodingKey)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Error(\"watch notify: %s\", err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ We do not need to send token for deregister event.\n\t\t\t\t\tkiteWithToken = &protocol.KiteWithToken{Kite: *kite}\n\t\t\t\t}\n\n\t\t\t\tevent := protocol.KiteEvent{\n\t\t\t\t\tAction: action,\n\t\t\t\t\tKite: *kiteWithToken,\n\t\t\t\t}\n\t\t\t\tgo watch.callback(event)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ matches returns true if kite mathches to the query.\nfunc matches(kite *protocol.Kite, query *protocol.KontrolQuery) bool {\n\tqv := reflect.ValueOf(*query)\n\tqt := qv.Type()\n\n\tfor i := 0; i < qt.NumField(); i++ {\n\t\tqf := qv.Field(i)\n\n\t\t\/\/ Empty field in query matches everything.\n\t\tif qf.String() == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Compare field qf. query does not match if any field is different.\n\t\tkf := reflect.ValueOf(*kite).FieldByName(qt.Field(i).Name)\n\t\tif kf.String() != qf.String() {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n<commit_msg>kite: flatten ifs<commit_after>package kontrol\n\nimport (\n\t\"container\/list\"\n\t\"koding\/newkite\/dnode\"\n\t\"koding\/newkite\/kite\"\n\t\"koding\/newkite\/protocol\"\n\t\"reflect\"\n\t\"sync\"\n)\n\n\/\/ watcherHub allows watches to be registered on Kites and allows them to\n\/\/ be notified when a Kite changes (registered or deregistered).\ntype watcherHub struct {\n\tsync.RWMutex\n\n\t\/\/ Indexed by user to iterate faster when a notification comes.\n\t\/\/ Indexed by user because it is the first field in protocol.KontrolQuery.\n\twatchesByUser map[string]*list.List \/\/ List contains *watch\n\n\t\/\/ Indexed by Kite to remove them easily when Kite disconnects.\n\twatchesByKite map[*kite.RemoteKite][]*list.Element\n}\n\ntype watch struct {\n\tquery *protocol.KontrolQuery\n\tcallback dnode.Function\n}\n\nfunc newWatcherHub() *watcherHub {\n\treturn &watcherHub{\n\t\twatchesByUser: make(map[string]*list.List),\n\t\twatchesByKite: make(map[*kite.RemoteKite][]*list.Element),\n\t}\n}\n\n\/\/ RegisterWatcher saves the callbacks to invoke later\n\/\/ when a Kite is registered\/deregistered matching the query.\nfunc (h *watcherHub) RegisterWatcher(r *kite.RemoteKite, q *protocol.KontrolQuery, callback dnode.Function) {\n\th.Lock()\n\tdefer h.Unlock()\n\n\tr.OnDisconnect(func() {\n\t\th.Lock()\n\t\tdefer h.Unlock()\n\n\t\t\/\/ Delete watch from watchesByUser\n\t\tfor _, elem := range h.watchesByKite[r] {\n\t\t\tl := h.watchesByUser[q.Username]\n\t\t\tl.Remove(elem)\n\n\t\t\t\/\/ Delete the empty list.\n\t\t\tif l.Len() == 0 {\n\t\t\t\tdelete(h.watchesByUser, q.Username)\n\t\t\t}\n\t\t}\n\n\t\tdelete(h.watchesByKite, r)\n\t})\n\n\t\/\/ Get or create a new list.\n\tl, ok := h.watchesByUser[q.Username]\n\tif !ok {\n\t\tl = list.New()\n\t\th.watchesByUser[q.Username] = l\n\t}\n\n\telem := l.PushBack(&watch{q, callback})\n\th.watchesByKite[r] = append(h.watchesByKite[r], elem)\n}\n\n\/\/ Notify is called when a Kite is registered by the user of this watcherHub.\n\/\/ Calls the registered callbacks mathching to the kite.\nfunc (h *watcherHub) Notify(kite *protocol.Kite, action protocol.KiteAction, kodingKey string) {\n\th.RLock()\n\tdefer h.RUnlock()\n\n\tl, ok := h.watchesByUser[kite.Username]\n\tif !ok {\n\t\treturn\n\t}\n\n\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\twatch := e.Value.(*watch)\n\t\tif !matches(kite, watch.query) {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar kiteWithToken *protocol.KiteWithToken\n\t\tvar err error\n\n\t\t\/\/ Register events needs a token attached.\n\t\tif action == protocol.Register {\n\t\t\tkiteWithToken, err = addTokenToKite(kite, watch.query.Username, kodingKey)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"watch notify: %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t} else {\n\t\t\t\/\/ We do not need to send token for deregister event.\n\t\t\tkiteWithToken = &protocol.KiteWithToken{Kite: *kite}\n\t\t}\n\n\t\tevent := protocol.KiteEvent{\n\t\t\tAction: action,\n\t\t\tKite: *kiteWithToken,\n\t\t}\n\t\tgo watch.callback(event)\n\t}\n}\n\n\/\/ matches returns true if kite mathches to the query.\nfunc matches(kite *protocol.Kite, query *protocol.KontrolQuery) bool {\n\tqv := reflect.ValueOf(*query)\n\tqt := qv.Type()\n\n\tfor i := 0; i < qt.NumField(); i++ {\n\t\tqf := qv.Field(i)\n\n\t\t\/\/ Empty field in query matches everything.\n\t\tif qf.String() == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Compare field qf. query does not match if any field is different.\n\t\tkf := reflect.ValueOf(*kite).FieldByName(qt.Field(i).Name)\n\t\tif kf.String() != qf.String() {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>package migrate\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/go-gorp\/gorp\"\n\n\t\"github.com\/blang\/semver\"\n\n\t\"github.com\/ovh\/cds\/sdk\"\n\t\"github.com\/ovh\/cds\/sdk\/log\"\n)\n\n\/\/ MinCompatibleRelease represent the minimum release which is working with these migrations, need to update when we delete migration in our codebase\nconst MinCompatibleRelease = \"0.38.1\"\n\nvar migrations = []sdk.Migration{}\n\n\/\/ Add usefull to add new migrations\nfunc Add(migration sdk.Migration) {\n\tif migration.Major == 0 && migration.Minor == 0 && migration.Patch == 0 && migration.Release != \"\" && !strings.HasPrefix(migration.Release, \"snapshot\") {\n\t\tv, err := semver.Parse(migration.Release)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Cannot parse your release reference : %v\", err)\n\t\t}\n\t\tmigration.Major = v.Major\n\t\tmigration.Minor = v.Minor\n\t\tmigration.Patch = v.Patch\n\t}\n\tmigrations = append(migrations, migration)\n}\n\n\/\/ Run run all local migrations\nfunc Run(ctx context.Context, db gorp.SqlExecutor, panicDump func(s string) (io.WriteCloser, error)) {\n\tfor _, migration := range migrations {\n\t\tfunc(currentMigration sdk.Migration) {\n\t\t\tsdk.GoRoutine(ctx, \"migrate_\"+migration.Name, func(contex context.Context) {\n\t\t\t\tvar mig *sdk.Migration\n\t\t\t\tvar errMig error\n\t\t\t\tmig, errMig = GetByName(db, currentMigration.Name)\n\t\t\t\tif errMig != nil {\n\t\t\t\t\tlog.Error(\"Cannot get migration %s : %v\", currentMigration.Name, errMig)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif mig != nil {\n\t\t\t\t\tif mig.Status == sdk.MigrationStatusDone || mig.Status == sdk.MigrationStatusCanceled {\n\t\t\t\t\t\tlog.Info(\"Migration> %s> Already done (status: %s)\", currentMigration.Name, mig.Status)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tcurrentMigration.Progress = \"Begin\"\n\t\t\t\t\tcurrentMigration.Status = sdk.MigrationStatusInProgress\n\t\t\t\t\tif err := Insert(db, ¤tMigration); err != nil {\n\t\t\t\t\t\tlog.Error(\"Cannot insert migration %s : %v\", currentMigration.Name, err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tlog.Info(\"Migration [%s]: begin\", currentMigration.Name)\n\t\t\t\tif err := currentMigration.ExecFunc(contex); err != nil {\n\t\t\t\t\tlog.Error(\"migration %s in ERROR : %v\", currentMigration.Name, err)\n\t\t\t\t\tcurrentMigration.Error = err.Error()\n\t\t\t\t}\n\t\t\t\tcurrentMigration.Progress = \"Migration done\"\n\t\t\t\tcurrentMigration.Done = time.Now()\n\t\t\t\tcurrentMigration.Status = sdk.MigrationStatusDone\n\n\t\t\t\tif err := Update(db, ¤tMigration); err != nil {\n\t\t\t\t\tlog.Error(\"Cannot update migration %s : %v\", currentMigration.Name, err)\n\t\t\t\t}\n\t\t\t\tlog.Info(\"Migration [%s]: Done\", currentMigration.Name)\n\t\t\t}, panicDump)\n\t\t}(migration)\n\t}\n}\n\n\/\/ CleanMigrationsList Delete all elements in local migrations\nfunc CleanMigrationsList() {\n\tmigrations = []sdk.Migration{}\n}\n\n\/\/ Status returns monitoring status, if there are cds migration in progress it returns WARN\nfunc Status(db gorp.SqlExecutor) sdk.MonitoringStatusLine {\n\tcount, err := db.SelectInt(\"SELECT COUNT(id) FROM cds_migration WHERE status <> $1 AND status <> $2\", sdk.MigrationStatusDone, sdk.MigrationStatusCanceled)\n\tif err != nil {\n\t\treturn sdk.MonitoringStatusLine{Component: \"CDS Migration\", Status: sdk.MonitoringStatusWarn, Value: fmt.Sprintf(\"KO Cannot request in database : %v\", err)}\n\t}\n\tstatus := sdk.MonitoringStatusOK\n\tif count > 0 {\n\t\tstatus = sdk.MonitoringStatusWarn\n\t}\n\treturn sdk.MonitoringStatusLine{Component: \"Nb of CDS Migrations in progress\", Value: fmt.Sprintf(\"%d\", count), Status: status}\n}\n<commit_msg>fix(migrate): if migration exists in database reuse to update (#4191)<commit_after>package migrate\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/blang\/semver\"\n\t\"github.com\/go-gorp\/gorp\"\n\n\t\"github.com\/ovh\/cds\/sdk\"\n\t\"github.com\/ovh\/cds\/sdk\/log\"\n)\n\n\/\/ MinCompatibleRelease represent the minimum release which is working with these migrations, need to update when we delete migration in our codebase\nconst MinCompatibleRelease = \"0.38.1\"\n\nvar migrations = []sdk.Migration{}\n\n\/\/ Add usefull to add new migrations\nfunc Add(migration sdk.Migration) {\n\tif migration.Major == 0 && migration.Minor == 0 && migration.Patch == 0 && migration.Release != \"\" && !strings.HasPrefix(migration.Release, \"snapshot\") {\n\t\tv, err := semver.Parse(migration.Release)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Cannot parse your release reference : %v\", err)\n\t\t}\n\t\tmigration.Major = v.Major\n\t\tmigration.Minor = v.Minor\n\t\tmigration.Patch = v.Patch\n\t}\n\tmigrations = append(migrations, migration)\n}\n\n\/\/ Run run all local migrations\nfunc Run(ctx context.Context, db gorp.SqlExecutor, panicDump func(s string) (io.WriteCloser, error)) {\n\tfor _, migration := range migrations {\n\t\tfunc(currentMigration sdk.Migration) {\n\t\t\tsdk.GoRoutine(ctx, \"migrate_\"+migration.Name, func(contex context.Context) {\n\t\t\t\tmig, errMig := GetByName(db, currentMigration.Name)\n\t\t\t\tif errMig != nil {\n\t\t\t\t\tlog.Error(\"Cannot get migration %s : %v\", currentMigration.Name, errMig)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif mig != nil {\n\t\t\t\t\tif mig.Status == sdk.MigrationStatusDone || mig.Status == sdk.MigrationStatusCanceled {\n\t\t\t\t\t\tlog.Info(\"Migration> %s> Already done (status: %s)\", currentMigration.Name, mig.Status)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ set the previous migration id for for the case where the migration was reset\n\t\t\t\t\tcurrentMigration.ID = mig.ID\n\t\t\t\t} else {\n\t\t\t\t\tcurrentMigration.Progress = \"Begin\"\n\t\t\t\t\tcurrentMigration.Status = sdk.MigrationStatusInProgress\n\t\t\t\t\tif err := Insert(db, ¤tMigration); err != nil {\n\t\t\t\t\t\tlog.Error(\"Cannot insert migration %s : %v\", currentMigration.Name, err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tlog.Info(\"Migration [%s]: begin\", currentMigration.Name)\n\t\t\t\tif err := currentMigration.ExecFunc(contex); err != nil {\n\t\t\t\t\tlog.Error(\"migration %s in ERROR : %v\", currentMigration.Name, err)\n\t\t\t\t\tcurrentMigration.Error = err.Error()\n\t\t\t\t}\n\t\t\t\tcurrentMigration.Progress = \"Migration done\"\n\t\t\t\tcurrentMigration.Done = time.Now()\n\t\t\t\tcurrentMigration.Status = sdk.MigrationStatusDone\n\n\t\t\t\tif err := Update(db, ¤tMigration); err != nil {\n\t\t\t\t\tlog.Error(\"Cannot update migration %s : %v\", currentMigration.Name, err)\n\t\t\t\t}\n\t\t\t\tlog.Info(\"Migration [%s]: Done\", currentMigration.Name)\n\t\t\t}, panicDump)\n\t\t}(migration)\n\t}\n}\n\n\/\/ CleanMigrationsList Delete all elements in local migrations\nfunc CleanMigrationsList() {\n\tmigrations = []sdk.Migration{}\n}\n\n\/\/ Status returns monitoring status, if there are cds migration in progress it returns WARN\nfunc Status(db gorp.SqlExecutor) sdk.MonitoringStatusLine {\n\tcount, err := db.SelectInt(\"SELECT COUNT(id) FROM cds_migration WHERE status <> $1 AND status <> $2\", sdk.MigrationStatusDone, sdk.MigrationStatusCanceled)\n\tif err != nil {\n\t\treturn sdk.MonitoringStatusLine{Component: \"CDS Migration\", Status: sdk.MonitoringStatusWarn, Value: fmt.Sprintf(\"KO Cannot request in database : %v\", err)}\n\t}\n\tstatus := sdk.MonitoringStatusOK\n\tif count > 0 {\n\t\tstatus = sdk.MonitoringStatusWarn\n\t}\n\treturn sdk.MonitoringStatusLine{Component: \"Nb of CDS Migrations in progress\", Value: fmt.Sprintf(\"%d\", count), Status: status}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/xitongsys\/parquet-go\/ParquetFile\"\n\t\"github.com\/xitongsys\/parquet-go\/ParquetReader\"\n\t\"github.com\/xitongsys\/parquet-go\/ParquetWriter\"\n\t\"log\"\n\t\"time\"\n)\n\ntype Student struct {\n\tName string `parquet:\"name=name, type=UTF8\"`\n\tAge int32 `parquet:\"name=age, type=INT32\"`\n\tId int64 `parquet:\"name=id, type=INT64\"`\n\tWeight float32 `parquet:\"name=weight, type=FLOAT\"`\n\tSex bool `parquet:\"name=sex, type=BOOLEAN\"`\n\tDay int32 `parquet:\"name=day, type=DATE\"`\n}\n\nfunc main() {\n\tfw, _ := ParquetFile.NewLocalFileWriter(\"flat.parquet\")\n\n\t\/\/write flat\n\tpw, _ := ParquetWriter.NewParquetWriter(fw, new(Student), 10)\n\tnum := 10\n\tfor i := 0; i < num; i++ {\n\t\tstu := Student{\n\t\t\tName: \"StudentName\",\n\t\t\tAge: int32(20 + i%5),\n\t\t\tId: int64(i),\n\t\t\tWeight: float32(50.0 + float32(i)*0.1),\n\t\t\tSex: bool(i%2 == 0),\n\t\t\tDay: int32(time.Now().Unix() \/ 3600 \/ 24),\n\t\t}\n\t\tpw.Write(stu)\n\t}\n\tpw.Flush(true)\n\tpw.WriteStop()\n\tlog.Println(\"Write Finished\")\n\tfw.Close()\n\n\t\/\/\/read flat\n\tfr, _ := ParquetFile.NewLocalFileReader(\"flat.parquet\")\n\tpr, err := ParquetReader.NewParquetReader(fr, new(Student), 1)\n\tif err != nil {\n\t\tlog.Println(\"Failed new reader\", err)\n\t}\n\tnum = int(pr.GetNumRows())\n\tfor i := 0; i < num; i++ {\n\t\tstus := make([]Student, 1)\n\t\tpr.Read(&stus)\n\t\tlog.Println(stus)\n\t}\n\n\tpr.ReadStop()\n\tfr.Close()\n\n}\n<commit_msg>add RLE in example<commit_after>package main\n\nimport (\n\t\"github.com\/xitongsys\/parquet-go\/ParquetFile\"\n\t\"github.com\/xitongsys\/parquet-go\/ParquetReader\"\n\t\"github.com\/xitongsys\/parquet-go\/ParquetWriter\"\n\t\"log\"\n\t\"time\"\n)\n\ntype Student struct {\n\tName string `parquet:\"name=name, type=UTF8\"`\n\tAge int32 `parquet:\"name=age, type=INT32\"`\n\tId int64 `parquet:\"name=id, type=INT64, encoding=RLE, length=10\"`\n\tWeight float32 `parquet:\"name=weight, type=FLOAT\"`\n\tSex bool `parquet:\"name=sex, type=BOOLEAN\"`\n\tDay int32 `parquet:\"name=day, type=DATE\"`\n}\n\nfunc main() {\n\tfw, _ := ParquetFile.NewLocalFileWriter(\"flat.parquet\")\n\n\t\/\/write flat\n\tpw, _ := ParquetWriter.NewParquetWriter(fw, new(Student), 10)\n\tnum := 10\n\tfor i := 0; i < num; i++ {\n\t\tstu := Student{\n\t\t\tName: \"StudentName\",\n\t\t\tAge: int32(20 + i%5),\n\t\t\tId: int64(i),\n\t\t\tWeight: float32(50.0 + float32(i)*0.1),\n\t\t\tSex: bool(i%2 == 0),\n\t\t\tDay: int32(time.Now().Unix() \/ 3600 \/ 24),\n\t\t}\n\t\tpw.Write(stu)\n\t}\n\tpw.Flush(true)\n\tpw.WriteStop()\n\tlog.Println(\"Write Finished\")\n\tfw.Close()\n\n\t\/\/\/read flat\n\tfr, _ := ParquetFile.NewLocalFileReader(\"flat.parquet\")\n\tpr, err := ParquetReader.NewParquetReader(fr, new(Student), 1)\n\tif err != nil {\n\t\tlog.Println(\"Failed new reader\", err)\n\t}\n\tnum = int(pr.GetNumRows())\n\tfor i := 0; i < num; i++ {\n\t\tstus := make([]Student, 1)\n\t\tpr.Read(&stus)\n\t\tlog.Println(stus)\n\t}\n\n\tpr.ReadStop()\n\tfr.Close()\n\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/buger\/jsonparser\"\n\t\"github.com\/imdario\/mergo\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/pterodactyl\/wings\/environment\"\n)\n\n\/\/ Merges data passed through in JSON form into the existing server object.\n\/\/ Any changes to the build settings will apply immediately in the environment\n\/\/ if the environment supports it.\n\/\/\n\/\/ The server will be marked as requiring a rebuild on the next boot sequence,\n\/\/ it is up to the specific environment to determine what needs to happen when\n\/\/ that is the case.\nfunc (s *Server) UpdateDataStructure(data []byte) error {\n\tsrc := new(Configuration)\n\tif err := json.Unmarshal(data, src); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\t\/\/ Don't allow obviously corrupted data to pass through into this function. If the UUID\n\t\/\/ doesn't match something has gone wrong and the API is attempting to meld this server\n\t\/\/ instance into a totally different one, which would be bad.\n\tif src.Uuid != \"\" && s.Id() != \"\" && src.Uuid != s.Id() {\n\t\treturn errors.New(\"attempting to merge a data stack with an invalid UUID\")\n\t}\n\n\t\/\/ Grab a copy of the configuration to work on.\n\tc := *s.Config()\n\n\t\/\/ Lock our copy of the configuration since the deferred unlock will end up acting upon this\n\t\/\/ new memory address rather than the old one. If we don't lock this, the deferred unlock will\n\t\/\/ cause a panic when it goes to run. However, since we only update s.cfg at the end, if there\n\t\/\/ is an error before that point we'll still properly unlock the original configuration for the\n\t\/\/ server.\n\tc.mu.Lock()\n\n\t\/\/ Lock the server configuration while we're doing this merge to avoid anything\n\t\/\/ trying to overwrite it or make modifications while we're sorting out what we\n\t\/\/ need to do.\n\ts.cfg.mu.Lock()\n\tdefer s.cfg.mu.Unlock()\n\n\t\/\/ Merge the new data object that we have received with the existing server data object\n\t\/\/ and then save it to the disk so it is persistent.\n\tif err := mergo.Merge(&c, src, mergo.WithOverride); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\t\/\/ Don't explode if we're setting CPU limits to 0. Mergo sees that as an empty value\n\t\/\/ so it won't override the value we've passed through in the API call. However, we can\n\t\/\/ safely assume that we're passing through valid data structures here. I foresee this\n\t\/\/ backfiring at some point, but until then...\n\t\/\/\n\t\/\/ We'll go ahead and do this with swap as well.\n\tc.Build.CpuLimit = src.Build.CpuLimit\n\tc.Build.Swap = src.Build.Swap\n\tc.Build.DiskSpace = src.Build.DiskSpace\n\n\t\/\/ Mergo can't quite handle this boolean value correctly, so for now we'll just\n\t\/\/ handle this edge case manually since none of the other data passed through in this\n\t\/\/ request is going to be boolean. Allegedly.\n\tif v, err := jsonparser.GetBoolean(data, \"container\", \"oom_disabled\"); err != nil {\n\t\tif err != jsonparser.KeyPathNotFoundError {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t} else {\n\t\tc.Build.OOMDisabled = v\n\t}\n\n\t\/\/ Mergo also cannot handle this boolean value.\n\tif v, err := jsonparser.GetBoolean(data, \"suspended\"); err != nil {\n\t\tif err != jsonparser.KeyPathNotFoundError {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t} else {\n\t\tc.Suspended = v\n\t}\n\n\tif v, err := jsonparser.GetBoolean(data, \"skip_egg_scripts\"); err != nil {\n\t\tif err != jsonparser.KeyPathNotFoundError {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t} else {\n\t\tc.SkipEggScripts = v\n\t}\n\n\t\/\/ Environment and Mappings should be treated as a full update at all times, never a\n\t\/\/ true patch, otherwise we can't know what we're passing along.\n\tif src.EnvVars != nil && len(src.EnvVars) > 0 {\n\t\tc.EnvVars = src.EnvVars\n\t}\n\n\tif src.Allocations.Mappings != nil && len(src.Allocations.Mappings) > 0 {\n\t\tc.Allocations.Mappings = src.Allocations.Mappings\n\t}\n\n\tif src.Mounts != nil && len(src.Mounts) > 0 {\n\t\tc.Mounts = src.Mounts\n\t}\n\n\t\/\/ Update the configuration once we have a lock on the configuration object.\n\ts.cfg = c\n\n\treturn nil\n}\n\n\/\/ Updates the environment for the server to match any of the changed data. This pushes new settings and\n\/\/ environment variables to the environment. In addition, the in-situ update method is called on the\n\/\/ environment which will allow environments that make use of it (such as Docker) to immediately apply\n\/\/ some settings without having to wait on a server to restart.\n\/\/\n\/\/ This functionality allows a server's resources limits to be modified on the fly and have them apply\n\/\/ right away allowing for dynamic resource allocation and responses to abusive server processes.\nfunc (s *Server) SyncWithEnvironment() {\n\ts.Log().Debug(\"syncing server settings with environment\")\n\n\t\/\/ Update the environment settings using the new information from this server.\n\ts.Environment.Config().SetSettings(environment.Settings{\n\t\tMounts: s.Mounts(),\n\t\tAllocations: s.Config().Allocations,\n\t\tLimits: s.Config().Build,\n\t})\n\n\t\/\/ If build limits are changed, environment variables also change. Plus, any modifications to\n\t\/\/ the startup command also need to be properly propagated to this environment.\n\t\/\/\n\t\/\/ @see https:\/\/github.com\/pterodactyl\/panel\/issues\/2255\n\ts.Environment.Config().SetEnvironmentVariables(s.GetEnvironmentVariables())\n\n\tif !s.IsSuspended() {\n\t\t\/\/ Update the environment in place, allowing memory and CPU usage to be adjusted\n\t\t\/\/ on the fly without the user needing to reboot (theoretically).\n\t\ts.Log().Info(\"performing server limit modification on-the-fly\")\n\t\tif err := s.Environment.InSituUpdate(); err != nil {\n\t\t\t\/\/ This is not a failure, the process is still running fine and will fix itself on the\n\t\t\t\/\/ next boot, or fail out entirely in a more logical position.\n\t\t\ts.Log().WithField(\"error\", err).Warn(\"failed to perform on-the-fly update of the server environment\")\n\t\t}\n\t} else {\n\t\t\/\/ Checks if the server is now in a suspended state. If so and a server process is currently running it\n\t\t\/\/ will be gracefully stopped (and terminated if it refuses to stop).\n\t\ts.Log().Info(\"server suspended with running process state, terminating now\")\n\n\t\tgo func(s *Server) {\n\t\t\tif err := s.Environment.WaitForStop(60, true); err != nil {\n\t\t\t\ts.Log().WithField(\"error\", err).Warn(\"failed to terminate server environment after suspension\")\n\t\t\t}\n\t\t}(s)\n\t}\n}\n<commit_msg>Safety logic check, don't try to stop a stopped server when suspending; closes #2318<commit_after>package server\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/buger\/jsonparser\"\n\t\"github.com\/imdario\/mergo\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/pterodactyl\/wings\/environment\"\n)\n\n\/\/ Merges data passed through in JSON form into the existing server object.\n\/\/ Any changes to the build settings will apply immediately in the environment\n\/\/ if the environment supports it.\n\/\/\n\/\/ The server will be marked as requiring a rebuild on the next boot sequence,\n\/\/ it is up to the specific environment to determine what needs to happen when\n\/\/ that is the case.\nfunc (s *Server) UpdateDataStructure(data []byte) error {\n\tsrc := new(Configuration)\n\tif err := json.Unmarshal(data, src); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\t\/\/ Don't allow obviously corrupted data to pass through into this function. If the UUID\n\t\/\/ doesn't match something has gone wrong and the API is attempting to meld this server\n\t\/\/ instance into a totally different one, which would be bad.\n\tif src.Uuid != \"\" && s.Id() != \"\" && src.Uuid != s.Id() {\n\t\treturn errors.New(\"attempting to merge a data stack with an invalid UUID\")\n\t}\n\n\t\/\/ Grab a copy of the configuration to work on.\n\tc := *s.Config()\n\n\t\/\/ Lock our copy of the configuration since the deferred unlock will end up acting upon this\n\t\/\/ new memory address rather than the old one. If we don't lock this, the deferred unlock will\n\t\/\/ cause a panic when it goes to run. However, since we only update s.cfg at the end, if there\n\t\/\/ is an error before that point we'll still properly unlock the original configuration for the\n\t\/\/ server.\n\tc.mu.Lock()\n\n\t\/\/ Lock the server configuration while we're doing this merge to avoid anything\n\t\/\/ trying to overwrite it or make modifications while we're sorting out what we\n\t\/\/ need to do.\n\ts.cfg.mu.Lock()\n\tdefer s.cfg.mu.Unlock()\n\n\t\/\/ Merge the new data object that we have received with the existing server data object\n\t\/\/ and then save it to the disk so it is persistent.\n\tif err := mergo.Merge(&c, src, mergo.WithOverride); err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\t\/\/ Don't explode if we're setting CPU limits to 0. Mergo sees that as an empty value\n\t\/\/ so it won't override the value we've passed through in the API call. However, we can\n\t\/\/ safely assume that we're passing through valid data structures here. I foresee this\n\t\/\/ backfiring at some point, but until then...\n\t\/\/\n\t\/\/ We'll go ahead and do this with swap as well.\n\tc.Build.CpuLimit = src.Build.CpuLimit\n\tc.Build.Swap = src.Build.Swap\n\tc.Build.DiskSpace = src.Build.DiskSpace\n\n\t\/\/ Mergo can't quite handle this boolean value correctly, so for now we'll just\n\t\/\/ handle this edge case manually since none of the other data passed through in this\n\t\/\/ request is going to be boolean. Allegedly.\n\tif v, err := jsonparser.GetBoolean(data, \"container\", \"oom_disabled\"); err != nil {\n\t\tif err != jsonparser.KeyPathNotFoundError {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t} else {\n\t\tc.Build.OOMDisabled = v\n\t}\n\n\t\/\/ Mergo also cannot handle this boolean value.\n\tif v, err := jsonparser.GetBoolean(data, \"suspended\"); err != nil {\n\t\tif err != jsonparser.KeyPathNotFoundError {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t} else {\n\t\tc.Suspended = v\n\t}\n\n\tif v, err := jsonparser.GetBoolean(data, \"skip_egg_scripts\"); err != nil {\n\t\tif err != jsonparser.KeyPathNotFoundError {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t} else {\n\t\tc.SkipEggScripts = v\n\t}\n\n\t\/\/ Environment and Mappings should be treated as a full update at all times, never a\n\t\/\/ true patch, otherwise we can't know what we're passing along.\n\tif src.EnvVars != nil && len(src.EnvVars) > 0 {\n\t\tc.EnvVars = src.EnvVars\n\t}\n\n\tif src.Allocations.Mappings != nil && len(src.Allocations.Mappings) > 0 {\n\t\tc.Allocations.Mappings = src.Allocations.Mappings\n\t}\n\n\tif src.Mounts != nil && len(src.Mounts) > 0 {\n\t\tc.Mounts = src.Mounts\n\t}\n\n\t\/\/ Update the configuration once we have a lock on the configuration object.\n\ts.cfg = c\n\n\treturn nil\n}\n\n\/\/ Updates the environment for the server to match any of the changed data. This pushes new settings and\n\/\/ environment variables to the environment. In addition, the in-situ update method is called on the\n\/\/ environment which will allow environments that make use of it (such as Docker) to immediately apply\n\/\/ some settings without having to wait on a server to restart.\n\/\/\n\/\/ This functionality allows a server's resources limits to be modified on the fly and have them apply\n\/\/ right away allowing for dynamic resource allocation and responses to abusive server processes.\nfunc (s *Server) SyncWithEnvironment() {\n\ts.Log().Debug(\"syncing server settings with environment\")\n\n\t\/\/ Update the environment settings using the new information from this server.\n\ts.Environment.Config().SetSettings(environment.Settings{\n\t\tMounts: s.Mounts(),\n\t\tAllocations: s.Config().Allocations,\n\t\tLimits: s.Config().Build,\n\t})\n\n\t\/\/ If build limits are changed, environment variables also change. Plus, any modifications to\n\t\/\/ the startup command also need to be properly propagated to this environment.\n\t\/\/\n\t\/\/ @see https:\/\/github.com\/pterodactyl\/panel\/issues\/2255\n\ts.Environment.Config().SetEnvironmentVariables(s.GetEnvironmentVariables())\n\n\tif !s.IsSuspended() {\n\t\t\/\/ Update the environment in place, allowing memory and CPU usage to be adjusted\n\t\t\/\/ on the fly without the user needing to reboot (theoretically).\n\t\ts.Log().Info(\"performing server limit modification on-the-fly\")\n\t\tif err := s.Environment.InSituUpdate(); err != nil {\n\t\t\t\/\/ This is not a failure, the process is still running fine and will fix itself on the\n\t\t\t\/\/ next boot, or fail out entirely in a more logical position.\n\t\t\ts.Log().WithField(\"error\", err).Warn(\"failed to perform on-the-fly update of the server environment\")\n\t\t}\n\t} else {\n\t\t\/\/ Checks if the server is now in a suspended state. If so and a server process is currently running it\n\t\t\/\/ will be gracefully stopped (and terminated if it refuses to stop).\n\t\tif s.GetState() != environment.ProcessOfflineState {\n\t\t\ts.Log().Info(\"server suspended with running process state, terminating now\")\n\n\t\t\tgo func(s *Server) {\n\t\t\t\tif err := s.Environment.WaitForStop(60, true); err != nil {\n\t\t\t\t\ts.Log().WithField(\"error\", err).Warn(\"failed to terminate server environment after suspension\")\n\t\t\t\t}\n\t\t\t}(s)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package service\n\n\/* CHECKLIST\n * [ ] Uses interfaces as appropriate\n * [ ] Private package variables use underscore prefix\n * [ ] All parameters validated\n * [ ] All errors handled\n * [ ] Reviewed for concurrency safety\n * [ ] Code complete\n * [ ] Full test coverage\n *\/\n\ntype Error struct {\n\tCode string `json:\"code,omitempty\"`\n\tDetail string `json:\"detail,omitempty\"`\n\tSource *Source `json:\"source,omitempty\"`\n\tStatus int `json:\"status,string,omitempty\"`\n\tTitle string `json:\"title,omitempty\"`\n}\n\ntype Source struct {\n\tParameter string `json:\"parameter,omitempty\"`\n\tPointer string `json:\"pointer,omitempty\"`\n}\n\ntype Errors struct {\n\terrors []*Error\n}\n\nfunc (e *Error) WithParameter(parameter string) *Error {\n\te.Source = &Source{Parameter: parameter}\n\treturn e\n}\n\nfunc (e *Error) WithPointer(pointer string) *Error {\n\te.Source = &Source{Pointer: pointer}\n\treturn e\n}\n\nfunc NewErrors() *Errors {\n\treturn &Errors{\n\t\terrors: []*Error{},\n\t}\n}\n\nfunc (e *Errors) HasErrors() bool {\n\treturn len(e.errors) > 0\n}\n\nfunc (e *Errors) GetError(index int) *Error {\n\tif index < 0 || index >= len(e.errors) {\n\t\treturn nil\n\t}\n\n\treturn e.errors[index]\n}\n\nfunc (e *Errors) GetErrors() []*Error {\n\treturn e.errors\n}\n\nfunc (e *Errors) AppendError(err *Error) {\n\te.errors = append(e.errors, err)\n}\n<commit_msg>Errors refactor (to be deprecated)<commit_after>package service\n\n\/* CHECKLIST\n * [ ] Uses interfaces as appropriate\n * [ ] Private package variables use underscore prefix\n * [ ] All parameters validated\n * [ ] All errors handled\n * [ ] Reviewed for concurrency safety\n * [ ] Code complete\n * [ ] Full test coverage\n *\/\n\ntype Error struct {\n\tCode string `json:\"code,omitempty\"`\n\tTitle string `json:\"title,omitempty\"`\n\tDetail string `json:\"detail,omitempty\"`\n\tStatus int `json:\"status,string,omitempty\"`\n\tSource *Source `json:\"source,omitempty\"`\n}\n\ntype Source struct {\n\tParameter string `json:\"parameter,omitempty\"`\n\tPointer string `json:\"pointer,omitempty\"`\n}\n\nfunc (e *Error) WithParameter(parameter string) *Error {\n\tif e.Source == nil {\n\t\te.Source = &Source{}\n\t}\n\te.Source.Parameter = parameter\n\treturn e\n}\n\nfunc (e *Error) WithPointer(pointer string) *Error {\n\tif e.Source == nil {\n\t\te.Source = &Source{}\n\t}\n\te.Source.Pointer = pointer\n\treturn e\n}\n\n\/\/ TODO: Deprecate below Errors struct\n\ntype Errors struct {\n\terrors []*Error\n}\n\nfunc NewErrors() *Errors {\n\treturn &Errors{\n\t\terrors: []*Error{},\n\t}\n}\n\nfunc (e *Errors) HasErrors() bool {\n\treturn len(e.errors) > 0\n}\n\nfunc (e *Errors) GetError(index int) *Error {\n\tif index < 0 || index >= len(e.errors) {\n\t\treturn nil\n\t}\n\n\treturn e.errors[index]\n}\n\nfunc (e *Errors) GetErrors() []*Error {\n\treturn e.errors\n}\n\nfunc (e *Errors) AppendError(err *Error) {\n\te.errors = append(e.errors, err)\n}\n<|endoftext|>"} {"text":"<commit_before>package services\n\nimport (\n\tcmdInfo \"github.com\/synw\/microb\/libmicrob\/cmd\/info\"\n\t\"github.com\/synw\/microb\/libmicrob\/datatypes\"\n\tcmdHttp \"github.com\/synw\/microb\/services\/httpServer\/cmd\"\n\thttpState \"github.com\/synw\/microb\/services\/httpServer\/state\"\n)\n\nvar infoCmds = []string{\"ping\"}\nvar httpCmds = []string{\"start\", \"stop\"}\n\nvar All = map[string]*datatypes.Service{\n\t\"info\": New(\"info\", infoCmds),\n\t\"http\": New(\"http\", httpCmds),\n}\n\nvar initState = map[string]interface{}{\n\t\"http\": httpState.InitState,\n}\nvar initDispatch = map[string]interface{}{\n\t\"http\": cmdHttp.Dispatch,\n\t\"info\": cmdInfo.Dispatch,\n}\n<commit_msg>Rename: services\/manifest.go<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017 Cisco and\/or its affiliates.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage kafka\n\nimport (\n\t\"fmt\"\n\t\"github.com\/Shopify\/sarama\"\n\t\"github.com\/ligato\/cn-infra\/db\/keyval\"\n\t\"github.com\/ligato\/cn-infra\/flavors\/local\"\n\t\"github.com\/ligato\/cn-infra\/health\/statuscheck\"\n\t\"github.com\/ligato\/cn-infra\/logging\"\n\t\"github.com\/ligato\/cn-infra\/messaging\"\n\t\"github.com\/ligato\/cn-infra\/messaging\/kafka\/client\"\n\t\"github.com\/ligato\/cn-infra\/messaging\/kafka\/mux\"\n\t\"github.com\/ligato\/cn-infra\/utils\/safeclose\"\n)\n\nconst topic = \"status-check\"\n\n\/\/ Plugin provides API for interaction with kafka brokers.\ntype Plugin struct {\n\tDeps \/\/ inject\n\tmux *mux.Multiplexer\n\tsubscription chan (*client.ConsumerMessage)\n\n\t\/\/ Kafka plugin is using two clients. The first one is using 'hash' (default) partitioner. The second mux\n\t\/\/ uses manual partitioner which allows to send a message to specified partition and watching to desired partition\/offset\n\thsClient sarama.Client\n\tmanClient sarama.Client\n\n\tdisabled bool\n}\n\n\/\/ Deps is here to group injected dependencies of plugin\n\/\/ to not mix with other plugin fields.\ntype Deps struct {\n\tlocal.PluginInfraDeps \/\/inject\n}\n\n\/\/ FromExistingMux is used mainly for testing purposes.\nfunc FromExistingMux(mux *mux.Multiplexer) *Plugin {\n\treturn &Plugin{mux: mux}\n}\n\n\/\/ Init is called at plugin initialization.\nfunc (plugin *Plugin) Init() (err error) {\n\t\/\/ Prepare topic and subscription for status check client\n\tplugin.subscription = make(chan *client.ConsumerMessage)\n\n\t\/\/ Get muxCfg data (contains kafka brokers ip addresses)\n\tmuxCfg := &mux.Config{}\n\tfound, err := plugin.PluginConfig.GetValue(muxCfg)\n\tif !found {\n\t\tplugin.Log.Info(\"kafka config not found \", plugin.PluginConfig.GetConfigName(), \" - skip loading this plugin\")\n\t\tplugin.disabled = true\n\t\treturn nil \/\/skip loading the plugin\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ retrieve clientCfg\n\tclientCfg := plugin.getClientConfig(muxCfg, plugin.Log, topic)\n\n\t\/\/ init 'hash' sarama client\n\tplugin.hsClient, err = client.NewClient(clientCfg, client.Hash)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ init 'manual' sarama client\n\tplugin.manClient, err = client.NewClient(clientCfg, client.Manual)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Initialize both multiplexers to allow both, dynamic and manual mode\n\tif plugin.mux == nil {\n\t\tname := plugin.ServiceLabel.GetAgentLabel() + \"-hash\"\n\t\tplugin.mux, err = mux.InitMultiplexerWithConfig(clientCfg, plugin.hsClient, plugin.manClient, name, plugin.Log)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tplugin.Log.Debug(\"Default multiplexer initialized\")\n\t}\n\n\treturn err\n}\n\n\/\/ AfterInit is called in the second phase of the initialization. The kafka multiplexerNewWatcher\n\/\/ is started, all consumers have to be subscribed until this phase.\nfunc (plugin *Plugin) AfterInit() error {\n\tif plugin.mux != nil {\n\t\terr := plugin.mux.Start()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Register for providing status reports (polling mode)\n\tif plugin.StatusCheck != nil && !plugin.disabled {\n\t\tplugin.StatusCheck.Register(plugin.PluginName, func() (statuscheck.PluginState, error) {\n\t\t\tif plugin.mux.Consumer == nil || plugin.mux.Consumer.Client == nil {\n\t\t\t\treturn statuscheck.Error, fmt.Errorf(\"kafka client\/consumer not initialized\")\n\t\t\t}\n\t\t\t\/\/ Method 'RefreshMetadata()' returns error if kafka server is unavailable\n\t\t\terr := plugin.hsClient.RefreshMetadata(topic)\n\t\t\tif err == nil {\n\t\t\t\treturn statuscheck.OK, nil\n\t\t\t}\n\t\t\tplugin.Log.Errorf(\"Kafka server unavailable\")\n\t\t\treturn statuscheck.Error, err\n\t\t})\n\t} else {\n\t\tplugin.Log.Warnf(\"Unable to start status check for kafka\")\n\t}\n\n\treturn nil\n}\n\n\/\/ Close is called at plugin cleanup phase.\nfunc (plugin *Plugin) Close() error {\n\t_, err := safeclose.CloseAll(plugin.hsClient, plugin.manClient, plugin.mux)\n\treturn err\n}\n\n\/\/ NewBytesConnection returns a new instance of a connection to access kafka brokers. The connection allows to create\n\/\/ new kafka providers\/consumers on multiplexer with hash partitioner.\nfunc (plugin *Plugin) NewBytesConnection(name string) *mux.BytesConnection {\n\treturn plugin.mux.NewBytesConnection(name)\n}\n\n\/\/ NewBytesConnectionToPartition returns a new instance of a connection to access kafka brokers. The connection allows to create\n\/\/ new kafka providers\/consumers on multiplexer with manual partitioner which allows to send messages to specific partition\n\/\/ in kafka cluster and watch on partition\/offset.\nfunc (plugin *Plugin) NewBytesConnectionToPartition(name string) *mux.BytesConnection {\n\treturn plugin.mux.NewBytesConnection(name)\n}\n\n\/\/ NewProtoConnection returns a new instance of a connection to access kafka brokers. The connection allows to create\n\/\/ new kafka providers\/consumers on multiplexer with hash partitioner.The connection uses proto-modelled messages.\nfunc (plugin *Plugin) NewProtoConnection(name string) mux.Connection {\n\treturn plugin.mux.NewProtoConnection(name, &keyval.SerializerJSON{})\n}\n\n\/\/ NewProtoManualConnection returns a new instance of a connection to access kafka brokers. The connection allows to create\n\/\/ new kafka providers\/consumers on multiplexer with manual partitioner which allows to send messages to specific partition\n\/\/ in kafka cluster and watch on partition\/offset. The connection uses proto-modelled messages.\nfunc (plugin *Plugin) NewProtoManualConnection(name string) mux.ManualConnection {\n\treturn plugin.mux.NewProtoManualConnection(name, &keyval.SerializerJSON{})\n}\n\n\/\/ NewSyncPublisher creates a publisher that allows to publish messages using synchronous API. The publisher creates\n\/\/ new proto connection on multiplexer with default partitioner.\nfunc (plugin *Plugin) NewSyncPublisher(connectionName string, topic string) (messaging.ProtoPublisher, error) {\n\treturn plugin.NewProtoConnection(connectionName).NewSyncPublisher(topic)\n}\n\n\/\/ NewSyncPublisherToPartition creates a publisher that allows to publish messages to custom partition using synchronous API.\n\/\/ The publisher creates new proto connection on multiplexer with manual partitioner.\nfunc (plugin *Plugin) NewSyncPublisherToPartition(connectionName string, topic string, partition int32) (messaging.ProtoPublisher, error) {\n\treturn plugin.NewProtoManualConnection(connectionName).NewSyncPublisherToPartition(topic, partition)\n}\n\n\/\/ NewAsyncPublisher creates a publisher that allows to publish messages using asynchronous API. The publisher creates\n\/\/ new proto connection on multiplexer with default partitioner.\nfunc (plugin *Plugin) NewAsyncPublisher(connectionName string, topic string, successClb func(messaging.ProtoMessage), errorClb func(messaging.ProtoMessageErr)) (messaging.ProtoPublisher, error) {\n\treturn plugin.NewProtoConnection(connectionName).NewAsyncPublisher(topic, successClb, errorClb)\n}\n\n\/\/ NewAsyncPublisherToPartition creates a publisher that allows to publish messages to custom partition using asynchronous API.\n\/\/ The publisher creates new proto connection on multiplexer with manual partitioner.\nfunc (plugin *Plugin) NewAsyncPublisherToPartition(connectionName string, topic string, partition int32, successClb func(messaging.ProtoMessage), errorClb func(messaging.ProtoMessageErr)) (messaging.ProtoPublisher, error) {\n\treturn plugin.NewProtoManualConnection(connectionName).NewAsyncPublisherToPartition(topic, partition, successClb, errorClb)\n}\n\n\/\/ NewWatcher creates a watcher that allows to start\/stop consuming of messaging published to given topics.\nfunc (plugin *Plugin) NewWatcher(name string) messaging.ProtoWatcher {\n\treturn plugin.NewProtoConnection(name)\n}\n\n\/\/ NewPartitionWatcher creates a watcher that allows to start\/stop consuming of messaging published to given topics, offset and partition\nfunc (plugin *Plugin) NewPartitionWatcher(name string) messaging.ProtoPartitionWatcher {\n\treturn plugin.NewProtoManualConnection(name)\n}\n\n\/\/ Disabled if the plugin config was not found\nfunc (plugin *Plugin) Disabled() (disabled bool) {\n\treturn plugin.disabled\n}\n\n\/\/ Receive client config according to kafka config data\nfunc (plugin *Plugin) getClientConfig(config *mux.Config, logger logging.Logger, topic string) *client.Config {\n\tclientCfg := client.NewConfig(logger)\n\t\/\/ Set brokers obtained from kafka config. In case there are none available, use a default one\n\tif len(config.Addrs) > 0 {\n\t\tclientCfg.SetBrokers(config.Addrs...)\n\t} else {\n\t\tclientCfg.SetBrokers(mux.DefAddress)\n\t}\n\tclientCfg.SetGroup(plugin.ServiceLabel.GetAgentLabel())\n\tclientCfg.SetRecvMessageChan(plugin.subscription)\n\tclientCfg.SetInitialOffset(sarama.OffsetNewest)\n\tclientCfg.SetTopics(topic)\n\treturn clientCfg\n}\n<commit_msg>fixed kafka statuscheck<commit_after>\/\/ Copyright (c) 2017 Cisco and\/or its affiliates.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage kafka\n\nimport (\n\t\"fmt\"\n\t\"github.com\/Shopify\/sarama\"\n\t\"github.com\/ligato\/cn-infra\/db\/keyval\"\n\t\"github.com\/ligato\/cn-infra\/flavors\/local\"\n\t\"github.com\/ligato\/cn-infra\/health\/statuscheck\"\n\t\"github.com\/ligato\/cn-infra\/logging\"\n\t\"github.com\/ligato\/cn-infra\/messaging\"\n\t\"github.com\/ligato\/cn-infra\/messaging\/kafka\/client\"\n\t\"github.com\/ligato\/cn-infra\/messaging\/kafka\/mux\"\n\t\"github.com\/ligato\/cn-infra\/utils\/safeclose\"\n)\n\nconst topic = \"status-check\"\n\n\/\/ Plugin provides API for interaction with kafka brokers.\ntype Plugin struct {\n\tDeps \/\/ inject\n\tmux *mux.Multiplexer\n\tsubscription chan (*client.ConsumerMessage)\n\n\t\/\/ Kafka plugin is using two clients. The first one is using 'hash' (default) partitioner. The second mux\n\t\/\/ uses manual partitioner which allows to send a message to specified partition and watching to desired partition\/offset\n\thsClient sarama.Client\n\tmanClient sarama.Client\n\n\tdisabled bool\n}\n\n\/\/ Deps is here to group injected dependencies of plugin\n\/\/ to not mix with other plugin fields.\ntype Deps struct {\n\tlocal.PluginInfraDeps \/\/inject\n}\n\n\/\/ FromExistingMux is used mainly for testing purposes.\nfunc FromExistingMux(mux *mux.Multiplexer) *Plugin {\n\treturn &Plugin{mux: mux}\n}\n\n\/\/ Init is called at plugin initialization.\nfunc (plugin *Plugin) Init() (err error) {\n\t\/\/ Prepare topic and subscription for status check client\n\tplugin.subscription = make(chan *client.ConsumerMessage)\n\n\t\/\/ Get muxCfg data (contains kafka brokers ip addresses)\n\tmuxCfg := &mux.Config{}\n\tfound, err := plugin.PluginConfig.GetValue(muxCfg)\n\tif !found {\n\t\tplugin.Log.Info(\"kafka config not found \", plugin.PluginConfig.GetConfigName(), \" - skip loading this plugin\")\n\t\tplugin.disabled = true\n\t\treturn nil \/\/skip loading the plugin\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ retrieve clientCfg\n\tclientCfg := plugin.getClientConfig(muxCfg, plugin.Log, topic)\n\n\t\/\/ init 'hash' sarama client\n\tplugin.hsClient, err = client.NewClient(clientCfg, client.Hash)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ init 'manual' sarama client\n\tplugin.manClient, err = client.NewClient(clientCfg, client.Manual)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Initialize both multiplexers to allow both, dynamic and manual mode\n\tif plugin.mux == nil {\n\t\tname := plugin.ServiceLabel.GetAgentLabel() + \"-hash\"\n\t\tplugin.mux, err = mux.InitMultiplexerWithConfig(clientCfg, plugin.hsClient, plugin.manClient, name, plugin.Log)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tplugin.Log.Debug(\"Default multiplexer initialized\")\n\t}\n\n\treturn err\n}\n\n\/\/ AfterInit is called in the second phase of the initialization. The kafka multiplexerNewWatcher\n\/\/ is started, all consumers have to be subscribed until this phase.\nfunc (plugin *Plugin) AfterInit() error {\n\tif plugin.mux != nil {\n\t\terr := plugin.mux.Start()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Register for providing status reports (polling mode)\n\tif plugin.StatusCheck != nil && !plugin.disabled {\n\t\tplugin.StatusCheck.Register(plugin.PluginName, func() (statuscheck.PluginState, error) {\n\t\t\tif plugin.hsClient == nil || plugin.hsClient.Closed() {\n\t\t\t\treturn statuscheck.Error, fmt.Errorf(\"kafka client\/consumer not initialized\")\n\t\t\t}\n\t\t\t\/\/ Method 'RefreshMetadata()' returns error if kafka server is unavailable\n\t\t\terr := plugin.hsClient.RefreshMetadata(topic)\n\t\t\tif err == nil {\n\t\t\t\treturn statuscheck.OK, nil\n\t\t\t}\n\t\t\tplugin.Log.Errorf(\"Kafka server unavailable\")\n\t\t\treturn statuscheck.Error, err\n\t\t})\n\t} else {\n\t\tplugin.Log.Warnf(\"Unable to start status check for kafka\")\n\t}\n\n\treturn nil\n}\n\n\/\/ Close is called at plugin cleanup phase.\nfunc (plugin *Plugin) Close() error {\n\t_, err := safeclose.CloseAll(plugin.hsClient, plugin.manClient, plugin.mux)\n\treturn err\n}\n\n\/\/ NewBytesConnection returns a new instance of a connection to access kafka brokers. The connection allows to create\n\/\/ new kafka providers\/consumers on multiplexer with hash partitioner.\nfunc (plugin *Plugin) NewBytesConnection(name string) *mux.BytesConnection {\n\treturn plugin.mux.NewBytesConnection(name)\n}\n\n\/\/ NewBytesConnectionToPartition returns a new instance of a connection to access kafka brokers. The connection allows to create\n\/\/ new kafka providers\/consumers on multiplexer with manual partitioner which allows to send messages to specific partition\n\/\/ in kafka cluster and watch on partition\/offset.\nfunc (plugin *Plugin) NewBytesConnectionToPartition(name string) *mux.BytesConnection {\n\treturn plugin.mux.NewBytesConnection(name)\n}\n\n\/\/ NewProtoConnection returns a new instance of a connection to access kafka brokers. The connection allows to create\n\/\/ new kafka providers\/consumers on multiplexer with hash partitioner.The connection uses proto-modelled messages.\nfunc (plugin *Plugin) NewProtoConnection(name string) mux.Connection {\n\treturn plugin.mux.NewProtoConnection(name, &keyval.SerializerJSON{})\n}\n\n\/\/ NewProtoManualConnection returns a new instance of a connection to access kafka brokers. The connection allows to create\n\/\/ new kafka providers\/consumers on multiplexer with manual partitioner which allows to send messages to specific partition\n\/\/ in kafka cluster and watch on partition\/offset. The connection uses proto-modelled messages.\nfunc (plugin *Plugin) NewProtoManualConnection(name string) mux.ManualConnection {\n\treturn plugin.mux.NewProtoManualConnection(name, &keyval.SerializerJSON{})\n}\n\n\/\/ NewSyncPublisher creates a publisher that allows to publish messages using synchronous API. The publisher creates\n\/\/ new proto connection on multiplexer with default partitioner.\nfunc (plugin *Plugin) NewSyncPublisher(connectionName string, topic string) (messaging.ProtoPublisher, error) {\n\treturn plugin.NewProtoConnection(connectionName).NewSyncPublisher(topic)\n}\n\n\/\/ NewSyncPublisherToPartition creates a publisher that allows to publish messages to custom partition using synchronous API.\n\/\/ The publisher creates new proto connection on multiplexer with manual partitioner.\nfunc (plugin *Plugin) NewSyncPublisherToPartition(connectionName string, topic string, partition int32) (messaging.ProtoPublisher, error) {\n\treturn plugin.NewProtoManualConnection(connectionName).NewSyncPublisherToPartition(topic, partition)\n}\n\n\/\/ NewAsyncPublisher creates a publisher that allows to publish messages using asynchronous API. The publisher creates\n\/\/ new proto connection on multiplexer with default partitioner.\nfunc (plugin *Plugin) NewAsyncPublisher(connectionName string, topic string, successClb func(messaging.ProtoMessage), errorClb func(messaging.ProtoMessageErr)) (messaging.ProtoPublisher, error) {\n\treturn plugin.NewProtoConnection(connectionName).NewAsyncPublisher(topic, successClb, errorClb)\n}\n\n\/\/ NewAsyncPublisherToPartition creates a publisher that allows to publish messages to custom partition using asynchronous API.\n\/\/ The publisher creates new proto connection on multiplexer with manual partitioner.\nfunc (plugin *Plugin) NewAsyncPublisherToPartition(connectionName string, topic string, partition int32, successClb func(messaging.ProtoMessage), errorClb func(messaging.ProtoMessageErr)) (messaging.ProtoPublisher, error) {\n\treturn plugin.NewProtoManualConnection(connectionName).NewAsyncPublisherToPartition(topic, partition, successClb, errorClb)\n}\n\n\/\/ NewWatcher creates a watcher that allows to start\/stop consuming of messaging published to given topics.\nfunc (plugin *Plugin) NewWatcher(name string) messaging.ProtoWatcher {\n\treturn plugin.NewProtoConnection(name)\n}\n\n\/\/ NewPartitionWatcher creates a watcher that allows to start\/stop consuming of messaging published to given topics, offset and partition\nfunc (plugin *Plugin) NewPartitionWatcher(name string) messaging.ProtoPartitionWatcher {\n\treturn plugin.NewProtoManualConnection(name)\n}\n\n\/\/ Disabled if the plugin config was not found\nfunc (plugin *Plugin) Disabled() (disabled bool) {\n\treturn plugin.disabled\n}\n\n\/\/ Receive client config according to kafka config data\nfunc (plugin *Plugin) getClientConfig(config *mux.Config, logger logging.Logger, topic string) *client.Config {\n\tclientCfg := client.NewConfig(logger)\n\t\/\/ Set brokers obtained from kafka config. In case there are none available, use a default one\n\tif len(config.Addrs) > 0 {\n\t\tclientCfg.SetBrokers(config.Addrs...)\n\t} else {\n\t\tclientCfg.SetBrokers(mux.DefAddress)\n\t}\n\tclientCfg.SetGroup(plugin.ServiceLabel.GetAgentLabel())\n\tclientCfg.SetRecvMessageChan(plugin.subscription)\n\tclientCfg.SetInitialOffset(sarama.OffsetNewest)\n\tclientCfg.SetTopics(topic)\n\treturn clientCfg\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2021 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage controllers\n\nimport (\n\tk8smetrics \"k8s.io\/component-base\/metrics\"\n\t\"k8s.io\/component-base\/metrics\/legacyregistry\"\n)\n\nvar (\n\t\/\/ Metrics provides access to all controllerreadiness metrics.\n\tMetrics = newControllerMetrics()\n)\n\n\/\/ ControllerMetrics includes all the metrics of the proxy server.\ntype ControllerMetrics struct {\n\tcontrollerInstanceCount *k8smetrics.GaugeVec\n}\n\n\/\/ newControllerMetrics create a new ControllerMetrics, configured with default metric names.\nfunc newControllerMetrics() *ControllerMetrics {\n\tcontrollerInstanceCount := k8smetrics.NewGaugeVec(\n\t\t&k8smetrics.GaugeOpts{\n\t\t\tName: \"managed_controller_instance_count\",\n\t\t\tHelp: \"Instances of individual controllers currently running\",\n\t\t},\n\t\t[]string{\"controller_name\", \"controller_manager\"},\n\t)\n\tlegacyregistry.MustRegister(controllerInstanceCount)\n\treturn &ControllerMetrics{\n\t\tcontrollerInstanceCount: controllerInstanceCount,\n\t}\n}\n\n\/\/ ControllerStarted sets the controllerInstanceCount to 1.\nfunc (a *ControllerMetrics) ControllerStarted(controllerName string, controllerManager string) {\n\ta.controllerInstanceCount.With(k8smetrics.Labels{\"controller_name\": controllerName, \"controller_manager\": controllerManager}).Set(float64(1))\n}\n\n\/\/ ControllerStopped sets the controllerInstanceCount to 0.\nfunc (a *ControllerMetrics) ControllerStopped(controllerName string, controllerManager string) {\n\ta.controllerInstanceCount.With(k8smetrics.Labels{\"controller_name\": controllerName, \"controller_manager\": controllerManager}).Set(float64(0))\n}\n<commit_msg>updated to add StabilityLevel<commit_after>\/*\nCopyright 2021 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage controllers\n\nimport (\n\tk8smetrics \"k8s.io\/component-base\/metrics\"\n\t\"k8s.io\/component-base\/metrics\/legacyregistry\"\n)\n\nvar (\n\t\/\/ Metrics provides access to all controllerreadiness metrics.\n\tMetrics = newControllerMetrics()\n)\n\n\/\/ ControllerMetrics includes all the metrics of the proxy server.\ntype ControllerMetrics struct {\n\tcontrollerInstanceCount *k8smetrics.GaugeVec\n}\n\n\/\/ newControllerMetrics create a new ControllerMetrics, configured with default metric names.\nfunc newControllerMetrics() *ControllerMetrics {\n\tcontrollerInstanceCount := k8smetrics.NewGaugeVec(\n\t\t&k8smetrics.GaugeOpts{\n\t\t\tName: \"managed_controller_instance_count\",\n\t\t\tHelp: \"Instances of individual controllers currently running\",\n\t\t\tStabilityLevel: k8smetrics.ALPHA,\n\t\t},\n\t\t[]string{\"controller_name\", \"controller_manager\"},\n\t)\n\tlegacyregistry.MustRegister(controllerInstanceCount)\n\treturn &ControllerMetrics{\n\t\tcontrollerInstanceCount: controllerInstanceCount,\n\t}\n}\n\n\/\/ controller_name and controller_manager should be updated to validate against bounded lists\n\/\/ this will allow us to confirm they are populated with \"good\" values.\n\n\/\/ These values use set instead of inc\/dec to avoid accidentally double counting\n\/\/ a controller that starts but fails to properly signal when it crashes.\n\/\/ ControllerStarted sets the controllerInstanceCount to 1.\nfunc (a *ControllerMetrics) ControllerStarted(controllerName string, controllerManager string) {\n\ta.controllerInstanceCount.With(k8smetrics.Labels{\"controller_name\": controllerName, \"controller_manager\": controllerManager}).Set(float64(1))\n}\n\n\/\/ ControllerStopped sets the controllerInstanceCount to 0.\nfunc (a *ControllerMetrics) ControllerStopped(controllerName string, controllerManager string) {\n\ta.controllerInstanceCount.With(k8smetrics.Labels{\"controller_name\": controllerName, \"controller_manager\": controllerManager}).Set(float64(0))\n}\n<|endoftext|>"} {"text":"<commit_before>package mux\n\nimport (\n\t\"context\"\n\t\"sync\"\n\t\"time\"\n\n\t\"v2ray.com\/core\/app\"\n\t\"v2ray.com\/core\/app\/dispatcher\"\n\t\"v2ray.com\/core\/app\/log\"\n\t\"v2ray.com\/core\/common\/buf\"\n\t\"v2ray.com\/core\/common\/errors\"\n\t\"v2ray.com\/core\/common\/net\"\n\t\"v2ray.com\/core\/common\/signal\"\n\t\"v2ray.com\/core\/proxy\"\n\t\"v2ray.com\/core\/transport\/ray\"\n)\n\nconst (\n\tmaxParallel = 8\n\tmaxTotal = 128\n)\n\ntype manager interface {\n\tremove(id uint16)\n}\n\ntype session struct {\n\tsync.Mutex\n\tinput ray.InputStream\n\toutput ray.OutputStream\n\tparent manager\n\tid uint16\n\tuplinkClosed bool\n\tdownlinkClosed bool\n}\n\nfunc (s *session) closeUplink() {\n\tvar allDone bool\n\ts.Lock()\n\ts.uplinkClosed = true\n\tallDone = s.uplinkClosed && s.downlinkClosed\n\ts.Unlock()\n\tif allDone {\n\t\ts.parent.remove(s.id)\n\t}\n}\n\nfunc (s *session) closeDownlink() {\n\tvar allDone bool\n\ts.Lock()\n\ts.downlinkClosed = true\n\tallDone = s.uplinkClosed && s.downlinkClosed\n\ts.Unlock()\n\tif allDone {\n\t\ts.parent.remove(s.id)\n\t}\n}\n\ntype ClientManager struct {\n\taccess sync.Mutex\n\tclients []*Client\n\tproxy proxy.Outbound\n\tdialer proxy.Dialer\n}\n\nfunc NewClientManager(p proxy.Outbound, d proxy.Dialer) *ClientManager {\n\treturn &ClientManager{\n\t\tproxy: p,\n\t\tdialer: d,\n\t}\n}\n\nfunc (m *ClientManager) Dispatch(ctx context.Context, outboundRay ray.OutboundRay) error {\n\tm.access.Lock()\n\tdefer m.access.Unlock()\n\n\tfor _, client := range m.clients {\n\t\tif client.Dispatch(ctx, outboundRay) {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tclient, err := NewClient(m.proxy, m.dialer, m)\n\tif err != nil {\n\t\treturn errors.Base(err).Message(\"Proxyman|Mux|ClientManager: Failed to create client.\")\n\t}\n\tm.clients = append(m.clients, client)\n\tclient.Dispatch(ctx, outboundRay)\n\treturn nil\n}\n\nfunc (m *ClientManager) onClientFinish() {\n\tm.access.Lock()\n\tdefer m.access.Unlock()\n\n\tif len(m.clients) < 10 {\n\t\treturn\n\t}\n\n\tactiveClients := make([]*Client, 0, len(m.clients))\n\n\tfor _, client := range m.clients {\n\t\tif !client.Closed() {\n\t\t\tactiveClients = append(activeClients, client)\n\t\t}\n\t}\n\tm.clients = activeClients\n}\n\ntype Client struct {\n\taccess sync.RWMutex\n\tcount uint16\n\tsessions map[uint16]*session\n\tinboundRay ray.InboundRay\n\tctx context.Context\n\tcancel context.CancelFunc\n\tmanager *ClientManager\n}\n\nvar muxCoolDestination = net.TCPDestination(net.DomainAddress(\"v1.mux.cool\"), net.Port(9527))\n\nfunc NewClient(p proxy.Outbound, dialer proxy.Dialer, m *ClientManager) (*Client, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tctx = proxy.ContextWithTarget(ctx, muxCoolDestination)\n\tpipe := ray.NewRay(ctx)\n\tgo p.Process(ctx, pipe, dialer)\n\tc := &Client{\n\t\tsessions: make(map[uint16]*session, 256),\n\t\tinboundRay: pipe,\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tmanager: m,\n\t\tcount: 0,\n\t}\n\tgo c.fetchOutput()\n\treturn c, nil\n}\n\nfunc (m *Client) remove(id uint16) {\n\tm.access.Lock()\n\tdefer m.access.Unlock()\n\n\tdelete(m.sessions, id)\n\n\tif len(m.sessions) == 0 {\n\t\tm.cancel()\n\t\tm.inboundRay.InboundInput().Close()\n\t\tgo m.manager.onClientFinish()\n\t}\n}\n\nfunc (m *Client) Closed() bool {\n\tselect {\n\tcase <-m.ctx.Done():\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc fetchInput(ctx context.Context, s *session, output buf.Writer) {\n\tdest, _ := proxy.TargetFromContext(ctx)\n\twriter := &Writer{\n\t\tdest: dest,\n\t\tid: s.id,\n\t\twriter: output,\n\t}\n\tdefer writer.Close()\n\tdefer s.closeUplink()\n\n\tlog.Info(\"Proxyman|Mux|Client: Dispatching request to \", dest)\n\tdata, _ := s.input.ReadTimeout(time.Millisecond * 500)\n\tif data != nil {\n\t\tif err := writer.Write(data); err != nil {\n\t\t\tlog.Info(\"Proxyman|Mux|Client: Failed to write first payload: \", err)\n\t\t\treturn\n\t\t}\n\t}\n\tif err := buf.PipeUntilEOF(signal.BackgroundTimer(), s.input, writer); err != nil {\n\t\tlog.Info(\"Proxyman|Mux|Client: Failed to fetch all input: \", err)\n\t}\n}\n\nfunc (m *Client) Dispatch(ctx context.Context, outboundRay ray.OutboundRay) bool {\n\tm.access.Lock()\n\tdefer m.access.Unlock()\n\n\tif len(m.sessions) >= maxParallel {\n\t\treturn false\n\t}\n\n\tif m.count >= maxTotal {\n\t\treturn false\n\t}\n\n\tselect {\n\tcase <-m.ctx.Done():\n\t\treturn false\n\tdefault:\n\t}\n\n\tm.count++\n\tid := m.count\n\ts := &session{\n\t\tinput: outboundRay.OutboundInput(),\n\t\toutput: outboundRay.OutboundOutput(),\n\t\tparent: m,\n\t\tid: id,\n\t}\n\tm.sessions[id] = s\n\tgo fetchInput(ctx, s, m.inboundRay.InboundInput())\n\treturn true\n}\n\nfunc drain(reader *Reader) error {\n\tfor {\n\t\tdata, more, err := reader.Read()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdata.Release()\n\t\tif !more {\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc pipe(reader *Reader, writer buf.Writer) error {\n\tfor {\n\t\tdata, more, err := reader.Read()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := writer.Write(data); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !more {\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (m *Client) fetchOutput() {\n\treader := NewReader(m.inboundRay.InboundOutput())\n\tfor {\n\t\tmeta, err := reader.ReadMetadata()\n\t\tif err != nil {\n\t\t\tlog.Info(\"Proxyman|Mux|Client: Failed to read metadata: \", err)\n\t\t\tbreak\n\t\t}\n\t\tm.access.RLock()\n\t\ts, found := m.sessions[meta.SessionID]\n\t\tm.access.RUnlock()\n\t\tif found && meta.SessionStatus == SessionStatusEnd {\n\t\t\ts.closeDownlink()\n\t\t\ts.output.Close()\n\t\t}\n\t\tif !meta.Option.Has(OptionData) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif found {\n\t\t\terr = pipe(reader, s.output)\n\t\t} else {\n\t\t\terr = drain(reader)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Info(\"Proxyman|Mux|Client: Failed to read data: \", err)\n\t\t\tbreak\n\t\t}\n\t}\n}\n\ntype Server struct {\n\tdispatcher dispatcher.Interface\n}\n\nfunc NewServer(ctx context.Context) *Server {\n\ts := &Server{}\n\tspace := app.SpaceFromContext(ctx)\n\tspace.OnInitialize(func() error {\n\t\td := dispatcher.FromSpace(space)\n\t\tif d == nil {\n\t\t\treturn errors.New(\"Proxyman|Mux: No dispatcher in space.\")\n\t\t}\n\t\ts.dispatcher = d\n\t\treturn nil\n\t})\n\treturn s\n}\n\nfunc (s *Server) Dispatch(ctx context.Context, dest net.Destination) (ray.InboundRay, error) {\n\tif dest != muxCoolDestination {\n\t\treturn s.dispatcher.Dispatch(ctx, dest)\n\t}\n\n\tray := ray.NewRay(ctx)\n\tworker := &ServerWorker{\n\t\tdispatcher: s.dispatcher,\n\t\toutboundRay: ray,\n\t\tsessions: make(map[uint16]*session),\n\t}\n\tgo worker.run(ctx)\n\treturn ray, nil\n}\n\ntype ServerWorker struct {\n\tdispatcher dispatcher.Interface\n\toutboundRay ray.OutboundRay\n\tsessions map[uint16]*session\n\taccess sync.RWMutex\n}\n\nfunc (w *ServerWorker) remove(id uint16) {\n\tw.access.Lock()\n\tdelete(w.sessions, id)\n\tw.access.Unlock()\n}\n\nfunc handle(ctx context.Context, s *session, output buf.Writer) {\n\twriter := NewResponseWriter(s.id, output)\n\tif err := buf.PipeUntilEOF(signal.BackgroundTimer(), s.input, writer); err != nil {\n\t\tlog.Info(\"Proxyman|Mux|ServerWorker: Session \", s.id, \" ends: \", err)\n\t}\n\twriter.Close()\n\ts.closeDownlink()\n}\n\nfunc (w *ServerWorker) run(ctx context.Context) {\n\tinput := w.outboundRay.OutboundInput()\n\treader := NewReader(input)\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\tmeta, err := reader.ReadMetadata()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tw.access.RLock()\n\t\ts, found := w.sessions[meta.SessionID]\n\t\tw.access.RUnlock()\n\n\t\tif found && meta.SessionStatus == SessionStatusEnd {\n\t\t\ts.closeUplink()\n\t\t\ts.output.Close()\n\t\t}\n\n\t\tif meta.SessionStatus == SessionStatusNew {\n\t\t\tlog.Info(\"Proxyman|Mux|Server: Received request for \", meta.Target)\n\t\t\tinboundRay, err := w.dispatcher.Dispatch(ctx, meta.Target)\n\t\t\tif err != nil {\n\t\t\t\tlog.Info(\"Proxyman|Mux: Failed to dispatch request: \", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ts = &session{\n\t\t\t\tinput: inboundRay.InboundOutput(),\n\t\t\t\toutput: inboundRay.InboundInput(),\n\t\t\t\tparent: w,\n\t\t\t\tid: meta.SessionID,\n\t\t\t}\n\t\t\tw.access.Lock()\n\t\t\tw.sessions[meta.SessionID] = s\n\t\t\tw.access.Unlock()\n\t\t\tgo handle(ctx, s, w.outboundRay.OutboundOutput())\n\t\t}\n\n\t\tif !meta.Option.Has(OptionData) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif s != nil {\n\t\t\terr = pipe(reader, s.output)\n\t\t} else {\n\t\t\terr = drain(reader)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Info(\"Proxyman|Mux|ServerWorker: Failed to read data: \", err)\n\t\t\tbreak\n\t\t}\n\t}\n}\n<commit_msg>wait for done<commit_after>package mux\n\nimport (\n\t\"context\"\n\t\"sync\"\n\t\"time\"\n\n\t\"v2ray.com\/core\/app\"\n\t\"v2ray.com\/core\/app\/dispatcher\"\n\t\"v2ray.com\/core\/app\/log\"\n\t\"v2ray.com\/core\/common\/buf\"\n\t\"v2ray.com\/core\/common\/errors\"\n\t\"v2ray.com\/core\/common\/net\"\n\t\"v2ray.com\/core\/common\/signal\"\n\t\"v2ray.com\/core\/proxy\"\n\t\"v2ray.com\/core\/transport\/ray\"\n)\n\nconst (\n\tmaxParallel = 8\n\tmaxTotal = 128\n)\n\ntype manager interface {\n\tremove(id uint16)\n}\n\ntype session struct {\n\tsync.Mutex\n\tinput ray.InputStream\n\toutput ray.OutputStream\n\tparent manager\n\tid uint16\n\tuplinkClosed bool\n\tdownlinkClosed bool\n}\n\nfunc (s *session) closeUplink() {\n\tvar allDone bool\n\ts.Lock()\n\ts.uplinkClosed = true\n\tallDone = s.uplinkClosed && s.downlinkClosed\n\ts.Unlock()\n\tif allDone {\n\t\ts.parent.remove(s.id)\n\t}\n}\n\nfunc (s *session) closeDownlink() {\n\tvar allDone bool\n\ts.Lock()\n\ts.downlinkClosed = true\n\tallDone = s.uplinkClosed && s.downlinkClosed\n\ts.Unlock()\n\tif allDone {\n\t\ts.parent.remove(s.id)\n\t}\n}\n\ntype ClientManager struct {\n\taccess sync.Mutex\n\tclients []*Client\n\tproxy proxy.Outbound\n\tdialer proxy.Dialer\n}\n\nfunc NewClientManager(p proxy.Outbound, d proxy.Dialer) *ClientManager {\n\treturn &ClientManager{\n\t\tproxy: p,\n\t\tdialer: d,\n\t}\n}\n\nfunc (m *ClientManager) Dispatch(ctx context.Context, outboundRay ray.OutboundRay) error {\n\tm.access.Lock()\n\tdefer m.access.Unlock()\n\n\tfor _, client := range m.clients {\n\t\tif client.Dispatch(ctx, outboundRay) {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tclient, err := NewClient(m.proxy, m.dialer, m)\n\tif err != nil {\n\t\treturn errors.Base(err).Message(\"Proxyman|Mux|ClientManager: Failed to create client.\")\n\t}\n\tm.clients = append(m.clients, client)\n\tclient.Dispatch(ctx, outboundRay)\n\treturn nil\n}\n\nfunc (m *ClientManager) onClientFinish() {\n\tm.access.Lock()\n\tdefer m.access.Unlock()\n\n\tif len(m.clients) < 10 {\n\t\treturn\n\t}\n\n\tactiveClients := make([]*Client, 0, len(m.clients))\n\n\tfor _, client := range m.clients {\n\t\tif !client.Closed() {\n\t\t\tactiveClients = append(activeClients, client)\n\t\t}\n\t}\n\tm.clients = activeClients\n}\n\ntype Client struct {\n\taccess sync.RWMutex\n\tcount uint16\n\tsessions map[uint16]*session\n\tinboundRay ray.InboundRay\n\tctx context.Context\n\tcancel context.CancelFunc\n\tmanager *ClientManager\n}\n\nvar muxCoolDestination = net.TCPDestination(net.DomainAddress(\"v1.mux.cool\"), net.Port(9527))\n\nfunc NewClient(p proxy.Outbound, dialer proxy.Dialer, m *ClientManager) (*Client, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tctx = proxy.ContextWithTarget(ctx, muxCoolDestination)\n\tpipe := ray.NewRay(ctx)\n\tgo p.Process(ctx, pipe, dialer)\n\tc := &Client{\n\t\tsessions: make(map[uint16]*session, 256),\n\t\tinboundRay: pipe,\n\t\tctx: ctx,\n\t\tcancel: cancel,\n\t\tmanager: m,\n\t\tcount: 0,\n\t}\n\tgo c.fetchOutput()\n\treturn c, nil\n}\n\nfunc (m *Client) remove(id uint16) {\n\tm.access.Lock()\n\tdefer m.access.Unlock()\n\n\tdelete(m.sessions, id)\n\n\tif len(m.sessions) == 0 {\n\t\tm.cancel()\n\t\tm.inboundRay.InboundInput().Close()\n\t\tgo m.manager.onClientFinish()\n\t}\n}\n\nfunc (m *Client) Closed() bool {\n\tselect {\n\tcase <-m.ctx.Done():\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc fetchInput(ctx context.Context, s *session, output buf.Writer) {\n\tdest, _ := proxy.TargetFromContext(ctx)\n\twriter := &Writer{\n\t\tdest: dest,\n\t\tid: s.id,\n\t\twriter: output,\n\t}\n\tdefer writer.Close()\n\tdefer s.closeUplink()\n\n\tlog.Info(\"Proxyman|Mux|Client: Dispatching request to \", dest)\n\tdata, _ := s.input.ReadTimeout(time.Millisecond * 500)\n\tif data != nil {\n\t\tif err := writer.Write(data); err != nil {\n\t\t\tlog.Info(\"Proxyman|Mux|Client: Failed to write first payload: \", err)\n\t\t\treturn\n\t\t}\n\t}\n\tif err := buf.PipeUntilEOF(signal.BackgroundTimer(), s.input, writer); err != nil {\n\t\tlog.Info(\"Proxyman|Mux|Client: Failed to fetch all input: \", err)\n\t}\n}\n\nfunc waitForDone(ctx context.Context, s *session) {\n\t<-ctx.Done()\n\ts.closeUplink()\n\ts.closeDownlink()\n\ts.output.Close()\n}\n\nfunc (m *Client) Dispatch(ctx context.Context, outboundRay ray.OutboundRay) bool {\n\tm.access.Lock()\n\tdefer m.access.Unlock()\n\n\tif len(m.sessions) >= maxParallel {\n\t\treturn false\n\t}\n\n\tif m.count >= maxTotal {\n\t\treturn false\n\t}\n\n\tselect {\n\tcase <-m.ctx.Done():\n\t\treturn false\n\tdefault:\n\t}\n\n\tm.count++\n\tid := m.count\n\ts := &session{\n\t\tinput: outboundRay.OutboundInput(),\n\t\toutput: outboundRay.OutboundOutput(),\n\t\tparent: m,\n\t\tid: id,\n\t}\n\tm.sessions[id] = s\n\tgo fetchInput(ctx, s, m.inboundRay.InboundInput())\n\tgo waitForDone(ctx, s)\n\treturn true\n}\n\nfunc drain(reader *Reader) error {\n\tfor {\n\t\tdata, more, err := reader.Read()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdata.Release()\n\t\tif !more {\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc pipe(reader *Reader, writer buf.Writer) error {\n\tfor {\n\t\tdata, more, err := reader.Read()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := writer.Write(data); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !more {\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (m *Client) fetchOutput() {\n\treader := NewReader(m.inboundRay.InboundOutput())\n\tfor {\n\t\tmeta, err := reader.ReadMetadata()\n\t\tif err != nil {\n\t\t\tlog.Info(\"Proxyman|Mux|Client: Failed to read metadata: \", err)\n\t\t\tbreak\n\t\t}\n\t\tm.access.RLock()\n\t\ts, found := m.sessions[meta.SessionID]\n\t\tm.access.RUnlock()\n\t\tif found && meta.SessionStatus == SessionStatusEnd {\n\t\t\ts.closeDownlink()\n\t\t\ts.output.Close()\n\t\t}\n\t\tif !meta.Option.Has(OptionData) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif found {\n\t\t\terr = pipe(reader, s.output)\n\t\t} else {\n\t\t\terr = drain(reader)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Info(\"Proxyman|Mux|Client: Failed to read data: \", err)\n\t\t\tbreak\n\t\t}\n\t}\n}\n\ntype Server struct {\n\tdispatcher dispatcher.Interface\n}\n\nfunc NewServer(ctx context.Context) *Server {\n\ts := &Server{}\n\tspace := app.SpaceFromContext(ctx)\n\tspace.OnInitialize(func() error {\n\t\td := dispatcher.FromSpace(space)\n\t\tif d == nil {\n\t\t\treturn errors.New(\"Proxyman|Mux: No dispatcher in space.\")\n\t\t}\n\t\ts.dispatcher = d\n\t\treturn nil\n\t})\n\treturn s\n}\n\nfunc (s *Server) Dispatch(ctx context.Context, dest net.Destination) (ray.InboundRay, error) {\n\tif dest != muxCoolDestination {\n\t\treturn s.dispatcher.Dispatch(ctx, dest)\n\t}\n\n\tray := ray.NewRay(ctx)\n\tworker := &ServerWorker{\n\t\tdispatcher: s.dispatcher,\n\t\toutboundRay: ray,\n\t\tsessions: make(map[uint16]*session),\n\t}\n\tgo worker.run(ctx)\n\treturn ray, nil\n}\n\ntype ServerWorker struct {\n\tdispatcher dispatcher.Interface\n\toutboundRay ray.OutboundRay\n\tsessions map[uint16]*session\n\taccess sync.RWMutex\n}\n\nfunc (w *ServerWorker) remove(id uint16) {\n\tw.access.Lock()\n\tdelete(w.sessions, id)\n\tw.access.Unlock()\n}\n\nfunc handle(ctx context.Context, s *session, output buf.Writer) {\n\twriter := NewResponseWriter(s.id, output)\n\tif err := buf.PipeUntilEOF(signal.BackgroundTimer(), s.input, writer); err != nil {\n\t\tlog.Info(\"Proxyman|Mux|ServerWorker: Session \", s.id, \" ends: \", err)\n\t}\n\twriter.Close()\n\ts.closeDownlink()\n}\n\nfunc (w *ServerWorker) run(ctx context.Context) {\n\tinput := w.outboundRay.OutboundInput()\n\treader := NewReader(input)\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\tmeta, err := reader.ReadMetadata()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tw.access.RLock()\n\t\ts, found := w.sessions[meta.SessionID]\n\t\tw.access.RUnlock()\n\n\t\tif found && meta.SessionStatus == SessionStatusEnd {\n\t\t\ts.closeUplink()\n\t\t\ts.output.Close()\n\t\t}\n\n\t\tif meta.SessionStatus == SessionStatusNew {\n\t\t\tlog.Info(\"Proxyman|Mux|Server: Received request for \", meta.Target)\n\t\t\tinboundRay, err := w.dispatcher.Dispatch(ctx, meta.Target)\n\t\t\tif err != nil {\n\t\t\t\tlog.Info(\"Proxyman|Mux: Failed to dispatch request: \", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ts = &session{\n\t\t\t\tinput: inboundRay.InboundOutput(),\n\t\t\t\toutput: inboundRay.InboundInput(),\n\t\t\t\tparent: w,\n\t\t\t\tid: meta.SessionID,\n\t\t\t}\n\t\t\tw.access.Lock()\n\t\t\tw.sessions[meta.SessionID] = s\n\t\t\tw.access.Unlock()\n\t\t\tgo handle(ctx, s, w.outboundRay.OutboundOutput())\n\t\t}\n\n\t\tif !meta.Option.Has(OptionData) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif s != nil {\n\t\t\terr = pipe(reader, s.output)\n\t\t} else {\n\t\t\terr = drain(reader)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Info(\"Proxyman|Mux|ServerWorker: Failed to read data: \", err)\n\t\t\tbreak\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com\/bwmarrin\/dgvoice\"\n\t\"github.com\/bwmarrin\/discordgo\"\n)\n\nfunc main() {\n\n\t\/\/ NOTE: All of the below fields are required for this example to work correctly.\n\tvar (\n\t\tEmail = flag.String(\"e\", \"\", \"Discord account email.\")\n\t\tPassword = flag.String(\"p\", \"\", \"Discord account password.\")\n\t\tGuildID = flag.String(\"g\", \"\", \"Guild ID\")\n\t\tChannelID = flag.String(\"c\", \"\", \"Channel ID\")\n\t\terr error\n\t)\n\tflag.Parse()\n\n\t\/\/ Connect to Discord\n\tdiscord, err := discordgo.New(*Email, *Password)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\t\/\/ Open Websocket\n\terr = discord.Open()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\t\/\/ Connect to voice channel.\n\t\/\/ NOTE: Setting mute to false, deaf to true.\n\tdgv, err := discord.ChannelVoiceJoin(*GuildID, *ChannelID, false, true)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\t\/\/ Starts echo\n\tEcho(dgv)\n\n\t\/\/ Close connections\n\tdgv.Close()\n\tdiscord.Close()\n\n\treturn\n}\n\n\/\/ Takes inbound audio and sends it right back out.\nfunc Echo(v *discordgo.VoiceConnection) {\n\n\trecv := make(chan *discordgo.Packet, 2)\n\tgo dgvoice.ReceivePCM(v, recv)\n\n\tsend := make(chan []int16, 2)\n\tgo dgvoice.SendPCM(v, send)\n\n\tv.Speaking(true)\n\tdefer v.Speaking(false)\n\n\tfor {\n\n\t\tp, ok := <-recv\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\tsend <- p.PCM\n\t}\n}\n<commit_msg>Unexport Echo command.<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com\/bwmarrin\/dgvoice\"\n\t\"github.com\/bwmarrin\/discordgo\"\n)\n\nfunc main() {\n\n\t\/\/ NOTE: All of the below fields are required for this example to work correctly.\n\tvar (\n\t\tEmail = flag.String(\"e\", \"\", \"Discord account email.\")\n\t\tPassword = flag.String(\"p\", \"\", \"Discord account password.\")\n\t\tGuildID = flag.String(\"g\", \"\", \"Guild ID\")\n\t\tChannelID = flag.String(\"c\", \"\", \"Channel ID\")\n\t\terr error\n\t)\n\tflag.Parse()\n\n\t\/\/ Connect to Discord\n\tdiscord, err := discordgo.New(*Email, *Password)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\t\/\/ Open Websocket\n\terr = discord.Open()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\t\/\/ Connect to voice channel.\n\t\/\/ NOTE: Setting mute to false, deaf to true.\n\tdgv, err := discord.ChannelVoiceJoin(*GuildID, *ChannelID, false, true)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\t\/\/ Starts echo\n\techo(dgv)\n\n\t\/\/ Close connections\n\tdgv.Close()\n\tdiscord.Close()\n\n\treturn\n}\n\n\/\/ Takes inbound audio and sends it right back out.\nfunc echo(v *discordgo.VoiceConnection) {\n\n\trecv := make(chan *discordgo.Packet, 2)\n\tgo dgvoice.ReceivePCM(v, recv)\n\n\tsend := make(chan []int16, 2)\n\tgo dgvoice.SendPCM(v, send)\n\n\tv.Speaking(true)\n\tdefer v.Speaking(false)\n\n\tfor {\n\n\t\tp, ok := <-recv\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\tsend <- p.PCM\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cloudprovider\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n)\n\n\/\/ Interface is an abstract, pluggable interface for cloud providers.\ntype Interface interface {\n\t\/\/ LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise.\n\tLoadBalancer() (LoadBalancer, bool)\n\t\/\/ Instances returns an instances interface. Also returns true if the interface is supported, false otherwise.\n\tInstances() (Instances, bool)\n\t\/\/ Zones returns a zones interface. Also returns true if the interface is supported, false otherwise.\n\tZones() (Zones, bool)\n\t\/\/ Clusters returns a clusters interface. Also returns true if the interface is supported, false otherwise.\n\tClusters() (Clusters, bool)\n\t\/\/ Routes returns a routes interface along with whether the interface is supported.\n\tRoutes() (Routes, bool)\n\t\/\/ ProviderName returns the cloud provider ID.\n\tProviderName() string\n\t\/\/ ScrubDNS provides an opportunity for cloud-provider-specific code to process DNS settings for pods.\n\tScrubDNS(nameservers, searches []string) (nsOut, srchOut []string)\n}\n\n\/\/ Clusters is an abstract, pluggable interface for clusters of containers.\ntype Clusters interface {\n\t\/\/ ListClusters lists the names of the available clusters.\n\tListClusters() ([]string, error)\n\t\/\/ Master gets back the address (either DNS name or IP address) of the master node for the cluster.\n\tMaster(clusterName string) (string, error)\n}\n\n\/\/ TODO(#6812): Use a shorter name that's less likely to be longer than cloud\n\/\/ providers' name length limits.\nfunc GetLoadBalancerName(service *api.Service) string {\n\t\/\/GCE requires that the name of a load balancer starts with a lower case letter.\n\tret := \"a\" + string(service.UID)\n\tret = strings.Replace(ret, \"-\", \"\", -1)\n\t\/\/AWS requires that the name of a load balancer is shorter than 32 bytes.\n\tif len(ret) > 32 {\n\t\tret = ret[:32]\n\t}\n\treturn ret\n}\n\nfunc GetInstanceProviderID(cloud Interface, nodeName string) (string, error) {\n\tinstances, ok := cloud.Instances()\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"failed to get instances from cloud provider\")\n\t}\n\tinstanceID, err := instances.InstanceID(nodeName)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to get instance ID from cloud provider: %v\", err)\n\t}\n\treturn cloud.ProviderName() + \":\/\/\" + instanceID, nil\n}\n\n\/\/ LoadBalancer is an abstract, pluggable interface for load balancers.\ntype LoadBalancer interface {\n\t\/\/ TODO: Break this up into different interfaces (LB, etc) when we have more than one type of service\n\t\/\/ GetLoadBalancer returns whether the specified load balancer exists, and\n\t\/\/ if so, what its status is.\n\t\/\/ Implementations must treat the *api.Service parameter as read-only and not modify it.\n\tGetLoadBalancer(service *api.Service) (status *api.LoadBalancerStatus, exists bool, err error)\n\t\/\/ EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer\n\t\/\/ Implementations must treat the *api.Service parameter as read-only and not modify it.\n\tEnsureLoadBalancer(service *api.Service, hosts []string) (*api.LoadBalancerStatus, error)\n\t\/\/ UpdateLoadBalancer updates hosts under the specified load balancer.\n\t\/\/ Implementations must treat the *api.Service parameter as read-only and not modify it.\n\tUpdateLoadBalancer(service *api.Service, hosts []string) error\n\t\/\/ EnsureLoadBalancerDeleted deletes the specified load balancer if it\n\t\/\/ exists, returning nil if the load balancer specified either didn't exist or\n\t\/\/ was successfully deleted.\n\t\/\/ This construction is useful because many cloud providers' load balancers\n\t\/\/ have multiple underlying components, meaning a Get could say that the LB\n\t\/\/ doesn't exist even if some part of it is still laying around.\n\t\/\/ Implementations must treat the *api.Service parameter as read-only and not modify it.\n\tEnsureLoadBalancerDeleted(service *api.Service) error\n}\n\n\/\/ Instances is an abstract, pluggable interface for sets of instances.\ntype Instances interface {\n\t\/\/ NodeAddresses returns the addresses of the specified instance.\n\t\/\/ TODO(roberthbailey): This currently is only used in such a way that it\n\t\/\/ returns the address of the calling instance. We should do a rename to\n\t\/\/ make this clearer.\n\tNodeAddresses(name string) ([]api.NodeAddress, error)\n\t\/\/ ExternalID returns the cloud provider ID of the specified instance (deprecated).\n\tExternalID(name string) (string, error)\n\t\/\/ InstanceID returns the cloud provider ID of the specified instance.\n\t\/\/ Note that if the instance does not exist or is no longer running, we must return (\"\", cloudprovider.InstanceNotFound)\n\tInstanceID(name string) (string, error)\n\t\/\/ InstanceType returns the type of the specified instance.\n\t\/\/ Note that if the instance does not exist or is no longer running, we must return (\"\", cloudprovider.InstanceNotFound)\n\tInstanceType(name string) (string, error)\n\t\/\/ List lists instances that match 'filter' which is a regular expression which must match the entire instance name (fqdn)\n\tList(filter string) ([]string, error)\n\t\/\/ AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances\n\t\/\/ expected format for the key is standard ssh-keygen format: <protocol> <blob>\n\tAddSSHKeyToAllInstances(user string, keyData []byte) error\n\t\/\/ CurrentNodeName returns the name of the node we are currently running on\n\t\/\/ On most clouds (e.g. GCE) this is the hostname, so we provide the hostname\n\tCurrentNodeName(hostname string) (string, error)\n}\n\n\/\/ Route is a representation of an advanced routing rule.\ntype Route struct {\n\t\/\/ Name is the name of the routing rule in the cloud-provider.\n\t\/\/ It will be ignored in a Create (although nameHint may influence it)\n\tName string\n\t\/\/ TargetInstance is the name of the instance as specified in routing rules\n\t\/\/ for the cloud-provider (in gce: the Instance Name).\n\tTargetInstance string\n\t\/\/ DestinationCIDR is the CIDR format IP range that this routing rule\n\t\/\/ applies to.\n\tDestinationCIDR string\n}\n\n\/\/ Routes is an abstract, pluggable interface for advanced routing rules.\ntype Routes interface {\n\t\/\/ ListRoutes lists all managed routes that belong to the specified clusterName\n\tListRoutes(clusterName string) ([]*Route, error)\n\t\/\/ CreateRoute creates the described managed route\n\t\/\/ route.Name will be ignored, although the cloud-provider may use nameHint\n\t\/\/ to create a more user-meaningful name.\n\tCreateRoute(clusterName string, nameHint string, route *Route) error\n\t\/\/ DeleteRoute deletes the specified managed route\n\t\/\/ Route should be as returned by ListRoutes\n\tDeleteRoute(clusterName string, route *Route) error\n}\n\nvar InstanceNotFound = errors.New(\"instance not found\")\n\n\/\/ Zone represents the location of a particular machine.\ntype Zone struct {\n\tFailureDomain string\n\tRegion string\n}\n\n\/\/ Zones is an abstract, pluggable interface for zone enumeration.\ntype Zones interface {\n\t\/\/ GetZone returns the Zone containing the current failure zone and locality region that the program is running in\n\tGetZone() (Zone, error)\n}\n<commit_msg>Correctly document cloudprovider Instances contract<commit_after>\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cloudprovider\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n)\n\n\/\/ Interface is an abstract, pluggable interface for cloud providers.\ntype Interface interface {\n\t\/\/ LoadBalancer returns a balancer interface. Also returns true if the interface is supported, false otherwise.\n\tLoadBalancer() (LoadBalancer, bool)\n\t\/\/ Instances returns an instances interface. Also returns true if the interface is supported, false otherwise.\n\tInstances() (Instances, bool)\n\t\/\/ Zones returns a zones interface. Also returns true if the interface is supported, false otherwise.\n\tZones() (Zones, bool)\n\t\/\/ Clusters returns a clusters interface. Also returns true if the interface is supported, false otherwise.\n\tClusters() (Clusters, bool)\n\t\/\/ Routes returns a routes interface along with whether the interface is supported.\n\tRoutes() (Routes, bool)\n\t\/\/ ProviderName returns the cloud provider ID.\n\tProviderName() string\n\t\/\/ ScrubDNS provides an opportunity for cloud-provider-specific code to process DNS settings for pods.\n\tScrubDNS(nameservers, searches []string) (nsOut, srchOut []string)\n}\n\n\/\/ Clusters is an abstract, pluggable interface for clusters of containers.\ntype Clusters interface {\n\t\/\/ ListClusters lists the names of the available clusters.\n\tListClusters() ([]string, error)\n\t\/\/ Master gets back the address (either DNS name or IP address) of the master node for the cluster.\n\tMaster(clusterName string) (string, error)\n}\n\n\/\/ TODO(#6812): Use a shorter name that's less likely to be longer than cloud\n\/\/ providers' name length limits.\nfunc GetLoadBalancerName(service *api.Service) string {\n\t\/\/GCE requires that the name of a load balancer starts with a lower case letter.\n\tret := \"a\" + string(service.UID)\n\tret = strings.Replace(ret, \"-\", \"\", -1)\n\t\/\/AWS requires that the name of a load balancer is shorter than 32 bytes.\n\tif len(ret) > 32 {\n\t\tret = ret[:32]\n\t}\n\treturn ret\n}\n\nfunc GetInstanceProviderID(cloud Interface, nodeName string) (string, error) {\n\tinstances, ok := cloud.Instances()\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"failed to get instances from cloud provider\")\n\t}\n\tinstanceID, err := instances.InstanceID(nodeName)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to get instance ID from cloud provider: %v\", err)\n\t}\n\treturn cloud.ProviderName() + \":\/\/\" + instanceID, nil\n}\n\n\/\/ LoadBalancer is an abstract, pluggable interface for load balancers.\ntype LoadBalancer interface {\n\t\/\/ TODO: Break this up into different interfaces (LB, etc) when we have more than one type of service\n\t\/\/ GetLoadBalancer returns whether the specified load balancer exists, and\n\t\/\/ if so, what its status is.\n\t\/\/ Implementations must treat the *api.Service parameter as read-only and not modify it.\n\tGetLoadBalancer(service *api.Service) (status *api.LoadBalancerStatus, exists bool, err error)\n\t\/\/ EnsureLoadBalancer creates a new load balancer 'name', or updates the existing one. Returns the status of the balancer\n\t\/\/ Implementations must treat the *api.Service parameter as read-only and not modify it.\n\tEnsureLoadBalancer(service *api.Service, hosts []string) (*api.LoadBalancerStatus, error)\n\t\/\/ UpdateLoadBalancer updates hosts under the specified load balancer.\n\t\/\/ Implementations must treat the *api.Service parameter as read-only and not modify it.\n\tUpdateLoadBalancer(service *api.Service, hosts []string) error\n\t\/\/ EnsureLoadBalancerDeleted deletes the specified load balancer if it\n\t\/\/ exists, returning nil if the load balancer specified either didn't exist or\n\t\/\/ was successfully deleted.\n\t\/\/ This construction is useful because many cloud providers' load balancers\n\t\/\/ have multiple underlying components, meaning a Get could say that the LB\n\t\/\/ doesn't exist even if some part of it is still laying around.\n\t\/\/ Implementations must treat the *api.Service parameter as read-only and not modify it.\n\tEnsureLoadBalancerDeleted(service *api.Service) error\n}\n\n\/\/ Instances is an abstract, pluggable interface for sets of instances.\ntype Instances interface {\n\t\/\/ NodeAddresses returns the addresses of the specified instance.\n\t\/\/ TODO(roberthbailey): This currently is only used in such a way that it\n\t\/\/ returns the address of the calling instance. We should do a rename to\n\t\/\/ make this clearer.\n\tNodeAddresses(name string) ([]api.NodeAddress, error)\n\t\/\/ ExternalID returns the cloud provider ID of the specified instance (deprecated).\n\t\/\/ Note that if the instance does not exist or is no longer running, we must return (\"\", cloudprovider.InstanceNotFound)\n\tExternalID(name string) (string, error)\n\t\/\/ InstanceID returns the cloud provider ID of the specified instance.\n\tInstanceID(name string) (string, error)\n\t\/\/ InstanceType returns the type of the specified instance.\n\tInstanceType(name string) (string, error)\n\t\/\/ List lists instances that match 'filter' which is a regular expression which must match the entire instance name (fqdn)\n\tList(filter string) ([]string, error)\n\t\/\/ AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances\n\t\/\/ expected format for the key is standard ssh-keygen format: <protocol> <blob>\n\tAddSSHKeyToAllInstances(user string, keyData []byte) error\n\t\/\/ CurrentNodeName returns the name of the node we are currently running on\n\t\/\/ On most clouds (e.g. GCE) this is the hostname, so we provide the hostname\n\tCurrentNodeName(hostname string) (string, error)\n}\n\n\/\/ Route is a representation of an advanced routing rule.\ntype Route struct {\n\t\/\/ Name is the name of the routing rule in the cloud-provider.\n\t\/\/ It will be ignored in a Create (although nameHint may influence it)\n\tName string\n\t\/\/ TargetInstance is the name of the instance as specified in routing rules\n\t\/\/ for the cloud-provider (in gce: the Instance Name).\n\tTargetInstance string\n\t\/\/ DestinationCIDR is the CIDR format IP range that this routing rule\n\t\/\/ applies to.\n\tDestinationCIDR string\n}\n\n\/\/ Routes is an abstract, pluggable interface for advanced routing rules.\ntype Routes interface {\n\t\/\/ ListRoutes lists all managed routes that belong to the specified clusterName\n\tListRoutes(clusterName string) ([]*Route, error)\n\t\/\/ CreateRoute creates the described managed route\n\t\/\/ route.Name will be ignored, although the cloud-provider may use nameHint\n\t\/\/ to create a more user-meaningful name.\n\tCreateRoute(clusterName string, nameHint string, route *Route) error\n\t\/\/ DeleteRoute deletes the specified managed route\n\t\/\/ Route should be as returned by ListRoutes\n\tDeleteRoute(clusterName string, route *Route) error\n}\n\nvar InstanceNotFound = errors.New(\"instance not found\")\n\n\/\/ Zone represents the location of a particular machine.\ntype Zone struct {\n\tFailureDomain string\n\tRegion string\n}\n\n\/\/ Zones is an abstract, pluggable interface for zone enumeration.\ntype Zones interface {\n\t\/\/ GetZone returns the Zone containing the current failure zone and locality region that the program is running in\n\tGetZone() (Zone, error)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage database\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n)\n\ntype OSType int\n\n\/\/ Display prints the name of the OSType. Note this CANNOT be named String()\n\/\/ because, if it is, Go's text\/template package will automatically call\n\/\/ String() and cause you to lose hours upon hours of your life debuggin when\n\/\/ forms are suddenly broken.\nfunc (o OSType) Display() string {\n\tswitch o {\n\tcase OSTypeIOS:\n\t\treturn \"iOS\"\n\tcase OSTypeAndroid:\n\t\treturn \"Android\"\n\tdefault:\n\t\treturn \"Unknown\"\n\t}\n}\n\nconst (\n\tOSTypeInvalid OSType = iota\n\tOSTypeIOS\n\tOSTypeAndroid\n)\n\nvar _ Auditable = (*MobileApp)(nil)\n\ntype MobileApp struct {\n\tgorm.Model\n\tErrorable\n\n\t\/\/ Name is the name of the app.\n\tName string `gorm:\"column:name; type:citext;\"`\n\n\t\/\/ RealmID is the id of the mobile app.\n\tRealmID uint `gorm:\"column:realm_id;\"`\n\n\t\/\/ URL is the link to the app in it's appstore.\n\tURL string `gorm:\"-\"`\n\tURLPtr *string `gorm:\"column:url; type:text\"`\n\n\t\/\/ OS is the type of the application we're using (eg, iOS, Android).\n\tOS OSType `gorm:\"column:os; type:int;\"`\n\n\t\/\/ IOSAppID is a unique string representing the app.\n\tAppID string `gorm:\"column:app_id; type:varchar(512);\"`\n\n\t\/\/ SHA is a unique hash of the app.\n\t\/\/ It is only present for Android devices, and should be of the form:\n\t\/\/ AA:BB:CC:DD...\n\tSHA string `gorm:\"column:sha; type:text;\"`\n}\n\nfunc (a *MobileApp) BeforeSave(tx *gorm.DB) error {\n\ta.Name = strings.TrimSpace(a.Name)\n\tif a.Name == \"\" {\n\t\ta.AddError(\"name\", \"is required\")\n\t}\n\n\tif a.RealmID == 0 {\n\t\ta.AddError(\"realm_id\", \"is required\")\n\t}\n\n\ta.AppID = strings.TrimSpace(a.AppID)\n\tif a.AppID == \"\" {\n\t\ta.AddError(\"app_id\", \"is required\")\n\t}\n\n\ta.URL = strings.TrimSpace(a.URL)\n\tif a.URL != \"\" {\n\t\ta.URLPtr = stringPtr(a.URL)\n\t}\n\n\t\/\/ Ensure OS is valid\n\tif a.OS < OSTypeIOS || a.OS > OSTypeAndroid {\n\t\ta.AddError(\"os\", \"is invalid\")\n\t}\n\n\t\/\/ SHA is required for Android\n\ta.SHA = strings.TrimSpace(a.SHA)\n\tif a.OS == OSTypeAndroid {\n\t\tif a.SHA == \"\" {\n\t\t\ta.AddError(\"sha\", \"is required for Android apps\")\n\t\t}\n\t}\n\n\t\/\/ Process and clean SHAs\n\tvar shas []string\n\tfor _, line := range strings.Split(a.SHA, \"\\n\") {\n\t\tfor _, entry := range strings.Split(line, \",\") {\n\t\t\tentry = strings.ToUpper(strings.TrimSpace(entry))\n\t\t\tif entry == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif len(entry) != 95 {\n\t\t\t\ta.AddError(\"sha\", \"is not 95 characters\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif entry != \"\" {\n\t\t\t\tshas = append(shas, entry)\n\t\t\t}\n\t\t}\n\t}\n\ta.SHA = strings.Join(shas, \"\\n\")\n\n\tif len(a.Errors()) > 0 {\n\t\treturn fmt.Errorf(\"validation failed\")\n\t}\n\n\treturn nil\n}\n\n\/\/ ListActiveAppsByOS finds mobile apps by their realm and OS.\nfunc (db *Database) ListActiveAppsByOS(realmID uint, os OSType) ([]*MobileApp, error) {\n\t\/\/ Find the apps.\n\tvar apps []*MobileApp\n\tif err := db.db.\n\t\tModel(&MobileApp{}).\n\t\tWhere(\"realm_id = ? AND os = ?\", realmID, os).\n\t\tFind(&apps).\n\t\tError; err != nil {\n\t\tif IsNotFound(err) {\n\t\t\treturn apps, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn apps, nil\n}\n\n\/\/ SaveMobileApp saves the mobile app.\nfunc (db *Database) SaveMobileApp(a *MobileApp, actor Auditable) error {\n\tif a == nil {\n\t\treturn fmt.Errorf(\"provided mobile app is nil\")\n\t}\n\n\tif actor == nil {\n\t\treturn fmt.Errorf(\"auditing actor is nil\")\n\t}\n\n\treturn db.db.Transaction(func(tx *gorm.DB) error {\n\t\tvar audits []*AuditEntry\n\n\t\tvar existing MobileApp\n\t\tif err := tx.\n\t\t\tUnscoped().\n\t\t\tModel(&MobileApp{}).\n\t\t\tWhere(\"id = ?\", a.ID).\n\t\t\tFirst(&existing).\n\t\t\tError; err != nil && !IsNotFound(err) {\n\t\t\treturn fmt.Errorf(\"failed to get existing mobile app\")\n\t\t}\n\n\t\t\/\/ Save the app\n\t\tif err := tx.Unscoped().Save(a).Error; err != nil {\n\t\t\treturn fmt.Errorf(\"failed to save mobile app: %w\", err)\n\t\t}\n\n\t\t\/\/ Brand new app?\n\t\tif existing.ID == 0 {\n\t\t\taudit := BuildAuditEntry(actor, \"created mobile app\", a, a.RealmID)\n\t\t\taudits = append(audits, audit)\n\t\t} else {\n\t\t\tif existing.Name != a.Name {\n\t\t\t\taudit := BuildAuditEntry(actor, \"updated mobile app name\", a, a.RealmID)\n\t\t\t\taudit.Diff = stringDiff(existing.Name, a.Name)\n\t\t\t\taudits = append(audits, audit)\n\t\t\t}\n\n\t\t\tif existing.OS != a.OS {\n\t\t\t\taudit := BuildAuditEntry(actor, \"updated mobile app os\", a, a.RealmID)\n\t\t\t\taudit.Diff = stringDiff(existing.OS.Display(), a.OS.Display())\n\t\t\t\taudits = append(audits, audit)\n\t\t\t}\n\n\t\t\tif existing.AppID != a.AppID {\n\t\t\t\taudit := BuildAuditEntry(actor, \"updated mobile app appID\", a, a.RealmID)\n\t\t\t\taudit.Diff = stringDiff(existing.AppID, a.AppID)\n\t\t\t\taudits = append(audits, audit)\n\t\t\t}\n\n\t\t\tif existing.SHA != a.SHA {\n\t\t\t\taudit := BuildAuditEntry(actor, \"updated mobile app sha\", a, a.RealmID)\n\t\t\t\taudit.Diff = stringDiff(existing.SHA, a.SHA)\n\t\t\t\taudits = append(audits, audit)\n\t\t\t}\n\n\t\t\tif existing.DeletedAt != a.DeletedAt {\n\t\t\t\taudit := BuildAuditEntry(actor, \"updated mobile app enabled\", a, a.RealmID)\n\t\t\t\taudit.Diff = boolDiff(existing.DeletedAt == nil, a.DeletedAt == nil)\n\t\t\t\taudits = append(audits, audit)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Save all audits\n\t\tfor _, audit := range audits {\n\t\t\tif err := tx.Save(audit).Error; err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to save audits: %w\", err)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n}\n\nfunc (a *MobileApp) AuditID() string {\n\treturn fmt.Sprintf(\"mobile_apps:%d\", a.ID)\n}\n\nfunc (a *MobileApp) AuditDisplay() string {\n\treturn fmt.Sprintf(\"%s (%s)\", a.Name, a.OS.Display())\n}\n\nfunc (a *MobileApp) AfterFind(tx *gorm.DB) error {\n\ta.URL = stringValue(a.URLPtr)\n\treturn nil\n}\n\n\/\/ PurgeMobileApps will delete mobile apps that have been deleted for more than\n\/\/ the specified time.\nfunc (db *Database) PurgeMobileApps(maxAge time.Duration) (int64, error) {\n\tif maxAge > 0 {\n\t\tmaxAge = -1 * maxAge\n\t}\n\tdeleteBefore := time.Now().UTC().Add(maxAge)\n\n\tresult := db.db.\n\t\tUnscoped().\n\t\tWhere(\"deleted_at IS NOT NULL AND deleted_at < ?\", deleteBefore).\n\t\tDelete(&MobileApp{})\n\treturn result.RowsAffected, result.Error\n}\n<commit_msg>Allow blank entry for URL. (#846)<commit_after>\/\/ Copyright 2020 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage database\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n)\n\ntype OSType int\n\n\/\/ Display prints the name of the OSType. Note this CANNOT be named String()\n\/\/ because, if it is, Go's text\/template package will automatically call\n\/\/ String() and cause you to lose hours upon hours of your life debuggin when\n\/\/ forms are suddenly broken.\nfunc (o OSType) Display() string {\n\tswitch o {\n\tcase OSTypeIOS:\n\t\treturn \"iOS\"\n\tcase OSTypeAndroid:\n\t\treturn \"Android\"\n\tdefault:\n\t\treturn \"Unknown\"\n\t}\n}\n\nconst (\n\tOSTypeInvalid OSType = iota\n\tOSTypeIOS\n\tOSTypeAndroid\n)\n\nvar _ Auditable = (*MobileApp)(nil)\n\ntype MobileApp struct {\n\tgorm.Model\n\tErrorable\n\n\t\/\/ Name is the name of the app.\n\tName string `gorm:\"column:name; type:citext;\"`\n\n\t\/\/ RealmID is the id of the mobile app.\n\tRealmID uint `gorm:\"column:realm_id;\"`\n\n\t\/\/ URL is the link to the app in it's appstore.\n\tURL string `gorm:\"-\"`\n\tURLPtr *string `gorm:\"column:url; type:text\"`\n\n\t\/\/ OS is the type of the application we're using (eg, iOS, Android).\n\tOS OSType `gorm:\"column:os; type:int;\"`\n\n\t\/\/ IOSAppID is a unique string representing the app.\n\tAppID string `gorm:\"column:app_id; type:varchar(512);\"`\n\n\t\/\/ SHA is a unique hash of the app.\n\t\/\/ It is only present for Android devices, and should be of the form:\n\t\/\/ AA:BB:CC:DD...\n\tSHA string `gorm:\"column:sha; type:text;\"`\n}\n\nfunc (a *MobileApp) BeforeSave(tx *gorm.DB) error {\n\ta.Name = strings.TrimSpace(a.Name)\n\tif a.Name == \"\" {\n\t\ta.AddError(\"name\", \"is required\")\n\t}\n\n\tif a.RealmID == 0 {\n\t\ta.AddError(\"realm_id\", \"is required\")\n\t}\n\n\ta.AppID = strings.TrimSpace(a.AppID)\n\tif a.AppID == \"\" {\n\t\ta.AddError(\"app_id\", \"is required\")\n\t}\n\n\ta.URL = strings.TrimSpace(a.URL)\n\ta.URLPtr = stringPtr(a.URL)\n\n\t\/\/ Ensure OS is valid\n\tif a.OS < OSTypeIOS || a.OS > OSTypeAndroid {\n\t\ta.AddError(\"os\", \"is invalid\")\n\t}\n\n\t\/\/ SHA is required for Android\n\ta.SHA = strings.TrimSpace(a.SHA)\n\tif a.OS == OSTypeAndroid {\n\t\tif a.SHA == \"\" {\n\t\t\ta.AddError(\"sha\", \"is required for Android apps\")\n\t\t}\n\t}\n\n\t\/\/ Process and clean SHAs\n\tvar shas []string\n\tfor _, line := range strings.Split(a.SHA, \"\\n\") {\n\t\tfor _, entry := range strings.Split(line, \",\") {\n\t\t\tentry = strings.ToUpper(strings.TrimSpace(entry))\n\t\t\tif entry == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif len(entry) != 95 {\n\t\t\t\ta.AddError(\"sha\", \"is not 95 characters\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif entry != \"\" {\n\t\t\t\tshas = append(shas, entry)\n\t\t\t}\n\t\t}\n\t}\n\ta.SHA = strings.Join(shas, \"\\n\")\n\n\tif len(a.Errors()) > 0 {\n\t\treturn fmt.Errorf(\"validation failed\")\n\t}\n\n\treturn nil\n}\n\n\/\/ ListActiveAppsByOS finds mobile apps by their realm and OS.\nfunc (db *Database) ListActiveAppsByOS(realmID uint, os OSType) ([]*MobileApp, error) {\n\t\/\/ Find the apps.\n\tvar apps []*MobileApp\n\tif err := db.db.\n\t\tModel(&MobileApp{}).\n\t\tWhere(\"realm_id = ? AND os = ?\", realmID, os).\n\t\tFind(&apps).\n\t\tError; err != nil {\n\t\tif IsNotFound(err) {\n\t\t\treturn apps, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\treturn apps, nil\n}\n\n\/\/ SaveMobileApp saves the mobile app.\nfunc (db *Database) SaveMobileApp(a *MobileApp, actor Auditable) error {\n\tif a == nil {\n\t\treturn fmt.Errorf(\"provided mobile app is nil\")\n\t}\n\n\tif actor == nil {\n\t\treturn fmt.Errorf(\"auditing actor is nil\")\n\t}\n\n\treturn db.db.Transaction(func(tx *gorm.DB) error {\n\t\tvar audits []*AuditEntry\n\n\t\tvar existing MobileApp\n\t\tif err := tx.\n\t\t\tUnscoped().\n\t\t\tModel(&MobileApp{}).\n\t\t\tWhere(\"id = ?\", a.ID).\n\t\t\tFirst(&existing).\n\t\t\tError; err != nil && !IsNotFound(err) {\n\t\t\treturn fmt.Errorf(\"failed to get existing mobile app\")\n\t\t}\n\n\t\t\/\/ Save the app\n\t\tif err := tx.Unscoped().Save(a).Error; err != nil {\n\t\t\treturn fmt.Errorf(\"failed to save mobile app: %w\", err)\n\t\t}\n\n\t\t\/\/ Brand new app?\n\t\tif existing.ID == 0 {\n\t\t\taudit := BuildAuditEntry(actor, \"created mobile app\", a, a.RealmID)\n\t\t\taudits = append(audits, audit)\n\t\t} else {\n\t\t\tif existing.Name != a.Name {\n\t\t\t\taudit := BuildAuditEntry(actor, \"updated mobile app name\", a, a.RealmID)\n\t\t\t\taudit.Diff = stringDiff(existing.Name, a.Name)\n\t\t\t\taudits = append(audits, audit)\n\t\t\t}\n\n\t\t\tif existing.OS != a.OS {\n\t\t\t\taudit := BuildAuditEntry(actor, \"updated mobile app os\", a, a.RealmID)\n\t\t\t\taudit.Diff = stringDiff(existing.OS.Display(), a.OS.Display())\n\t\t\t\taudits = append(audits, audit)\n\t\t\t}\n\n\t\t\tif existing.AppID != a.AppID {\n\t\t\t\taudit := BuildAuditEntry(actor, \"updated mobile app appID\", a, a.RealmID)\n\t\t\t\taudit.Diff = stringDiff(existing.AppID, a.AppID)\n\t\t\t\taudits = append(audits, audit)\n\t\t\t}\n\n\t\t\tif existing.SHA != a.SHA {\n\t\t\t\taudit := BuildAuditEntry(actor, \"updated mobile app sha\", a, a.RealmID)\n\t\t\t\taudit.Diff = stringDiff(existing.SHA, a.SHA)\n\t\t\t\taudits = append(audits, audit)\n\t\t\t}\n\n\t\t\tif existing.DeletedAt != a.DeletedAt {\n\t\t\t\taudit := BuildAuditEntry(actor, \"updated mobile app enabled\", a, a.RealmID)\n\t\t\t\taudit.Diff = boolDiff(existing.DeletedAt == nil, a.DeletedAt == nil)\n\t\t\t\taudits = append(audits, audit)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Save all audits\n\t\tfor _, audit := range audits {\n\t\t\tif err := tx.Save(audit).Error; err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to save audits: %w\", err)\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n}\n\nfunc (a *MobileApp) AuditID() string {\n\treturn fmt.Sprintf(\"mobile_apps:%d\", a.ID)\n}\n\nfunc (a *MobileApp) AuditDisplay() string {\n\treturn fmt.Sprintf(\"%s (%s)\", a.Name, a.OS.Display())\n}\n\nfunc (a *MobileApp) AfterFind(tx *gorm.DB) error {\n\ta.URL = stringValue(a.URLPtr)\n\treturn nil\n}\n\n\/\/ PurgeMobileApps will delete mobile apps that have been deleted for more than\n\/\/ the specified time.\nfunc (db *Database) PurgeMobileApps(maxAge time.Duration) (int64, error) {\n\tif maxAge > 0 {\n\t\tmaxAge = -1 * maxAge\n\t}\n\tdeleteBefore := time.Now().UTC().Add(maxAge)\n\n\tresult := db.db.\n\t\tUnscoped().\n\t\tWhere(\"deleted_at IS NOT NULL AND deleted_at < ?\", deleteBefore).\n\t\tDelete(&MobileApp{})\n\treturn result.RowsAffected, result.Error\n}\n<|endoftext|>"} {"text":"<commit_before>package datastore\n\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t_ \"github.com\/go-sql-driver\/mysql\" \/\/ mysql driver\n\t\"github.com\/jmoiron\/sqlx\"\n\t_ \"github.com\/lib\/pq\" \/\/ postgresql driver\n\t\"github.com\/rubenv\/sql-migrate\"\n\t\"log\"\n)\n\n\/\/ DB holds the deprecated SQL connection pool. Try to use exported methods in this package instead.\nvar DB *sql.DB\n\nvar dbx *sqlx.DB\n\n\/\/ ErrNoResults is returned when a query yielded 0 results\nvar ErrNoResults = errors.New(\"query returned 0 results\")\n\n\/\/ Init creates a database connection pool (using sqlx)\nfunc Init(driver string, host string, name string, user string, password string) *sqlx.DB {\n\tdbx = New(driver, getDSN(driver, host, name, user, password))\n\n\t\/\/ store old sql.DB in exported var for backwards compat\n\tDB = dbx.DB\n\n\t\/\/ run migrations\n\trunMigrations(driver)\n\n\treturn dbx\n}\n\n\/\/ New creates a new database pool\nfunc New(driver string, dsn string) *sqlx.DB {\n\tdbx := sqlx.MustConnect(driver, dsn)\n\treturn dbx\n}\n\n\/\/ Get returns the underlying sqlx.DB instance. Use at your own risk.\nfunc Get() *sqlx.DB {\n\treturn dbx\n}\n\nfunc getDSN(driver string, host string, name string, user string, password string) string {\n\tvar dsn = fmt.Sprintf(\"%s:%s@%s\/%s\", user, password, host, name)\n\n\tswitch driver {\n\tcase \"postgres\":\n\t\tdsn = \"postgres:\/\/\" + dsn\n\tcase \"mysql\":\n\t\tdsn = dsn + \"?parseTime=true\"\n\t}\n\n\treturn dsn\n}\n\nfunc runMigrations(driver string) {\n\tmigrations := migrate.FileMigrationSource{\n\t\tDir: \"pkg\/datastore\/migrations\", \/\/ TODO: Move to bindata\n\t}\n\n\tmigrate.SetTable(\"migrations\")\n\n\tn, err := migrate.Exec(dbx.DB, driver, migrations, migrate.Up)\n\n\tif err != nil {\n\t\tlog.Fatal(\"Database migrations failed: \", err)\n\t}\n\n\tif n > 0 {\n\t\tlog.Printf(\"Applied %d database migrations!\\n\", n)\n\t}\n}\n<commit_msg>stop exporting DB instance in datastore pkg. yay<commit_after>package datastore\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t_ \"github.com\/go-sql-driver\/mysql\" \/\/ mysql driver\n\t\"github.com\/jmoiron\/sqlx\"\n\t_ \"github.com\/lib\/pq\" \/\/ postgresql driver\n\t\"github.com\/rubenv\/sql-migrate\"\n\t\"log\"\n)\n\nvar dbx *sqlx.DB\n\n\/\/ ErrNoResults is returned when a query yielded 0 results\nvar ErrNoResults = errors.New(\"datastore: query returned 0 results\")\n\n\/\/ Init creates a database connection pool (using sqlx)\nfunc Init(driver string, host string, name string, user string, password string) *sqlx.DB {\n\tdbx = New(driver, getDSN(driver, host, name, user, password))\n\n\t\/\/ run migrations\n\trunMigrations(driver)\n\n\treturn dbx\n}\n\n\/\/ New creates a new database pool\nfunc New(driver string, dsn string) *sqlx.DB {\n\tdbx := sqlx.MustConnect(driver, dsn)\n\treturn dbx\n}\n\nfunc getDSN(driver string, host string, name string, user string, password string) string {\n\tvar dsn = fmt.Sprintf(\"%s:%s@%s\/%s\", user, password, host, name)\n\n\tswitch driver {\n\tcase \"postgres\":\n\t\tdsn = \"postgres:\/\/\" + dsn\n\tcase \"mysql\":\n\t\tdsn = dsn + \"?parseTime=true\"\n\t}\n\n\treturn dsn\n}\n\nfunc runMigrations(driver string) {\n\tmigrations := migrate.FileMigrationSource{\n\t\tDir: \"pkg\/datastore\/migrations\", \/\/ TODO: Move to bindata\n\t}\n\n\tmigrate.SetTable(\"migrations\")\n\n\tn, err := migrate.Exec(dbx.DB, driver, migrations, migrate.Up)\n\n\tif err != nil {\n\t\tlog.Fatal(\"Database migrations failed: \", err)\n\t}\n\n\tif n > 0 {\n\t\tlog.Printf(\"Applied %d database migrations!\\n\", n)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package writer\n\nimport (\n\t\"archive\/tar\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/mgoltzsche\/cntnr\/pkg\/fs\"\n\t\"github.com\/pkg\/errors\"\n)\n\nvar _ fs.Writer = &TarWriter{}\n\n\/\/ A mapping file system writer that secures root directory boundaries.\n\/\/ Derived from umoci's tar_extract.go to allow separate source\/dest interfaces\n\/\/ and filter archive contents on extraction\ntype TarWriter struct {\n\twriter *tar.Writer\n\twritten map[string]*fs.FileAttrs\n}\n\nfunc NewTarWriter(writer io.Writer) (w *TarWriter) {\n\treturn &TarWriter{tar.NewWriter(writer), map[string]*fs.FileAttrs{}}\n}\n\nfunc (w *TarWriter) Close() error {\n\treturn errors.Wrap(w.writer.Close(), \"close tar writer\")\n}\n\nfunc (w *TarWriter) Parent() error { return nil }\nfunc (w *TarWriter) LowerNode(path, name string, a *fs.NodeAttrs) error { return nil }\nfunc (w *TarWriter) LowerLink(path, target string, a *fs.NodeAttrs) error { return nil }\n\nfunc (w *TarWriter) Lazy(path, name string, src fs.LazySource, written map[fs.Source]string) (err error) {\n\treturn errors.Errorf(\"refused to write lazy source %s into tar writer directly at %s since resulting tar could contain overridden entries. lazy sources must be resolved first.\", src, path)\n}\n\nfunc (w *TarWriter) File(path string, src fs.FileSource) (r fs.Source, err error) {\n\ta := src.Attrs()\n\tif path, err = normalize(path); err != nil {\n\t\treturn\n\t}\n\tif err = w.writeTarHeader(path, a.FileAttrs); err != nil {\n\t\treturn\n\t}\n\n\tif a.NodeType != fs.TypeFile {\n\t\treturn src, nil\n\t}\n\n\t\/\/ Copy file\n\tf, err := src.Reader()\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif e := f.Close(); e != nil && err == nil {\n\t\t\terr = errors.Wrap(e, \"write tar\")\n\t\t}\n\t}()\n\tn, err := io.Copy(w.writer, f)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"write tar: file entry\")\n\t}\n\tif n != a.Size {\n\t\treturn nil, errors.Wrap(io.ErrShortWrite, \"write tar: file entry\")\n\t}\n\treturn src, nil\n}\n\nfunc (w *TarWriter) writeTarHeader(path string, a fs.FileAttrs) (err error) {\n\thdr, err := w.toTarHeader(path, a)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = w.writer.WriteHeader(hdr)\n\treturn errors.Wrapf(err, \"write tar header for %q\", path)\n}\n\nfunc (w *TarWriter) toTarHeader(path string, a fs.FileAttrs) (hdr *tar.Header, err error) {\n\ta.Mtime = time.Unix(a.Mtime.Unix(), 0) \/\/ use floor(mtime) to preserve mtime which otherwise is not guaranteed due to rounding to seconds within tar\n\thdr, err = tar.FileInfoHeader(fs.NewFileInfo(path, &a), a.Symlink)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"to tar header: %s\", path)\n\t}\n\thdr.AccessTime = a.Atime\n\thdr.Xattrs = a.Xattrs\n\tw.addWritten(path, &a)\n\treturn\n}\n\n\/\/ Taken from umoci\nfunc normalize(path string) (string, error) {\n\tpath = filepath.Clean(string(os.PathSeparator) + path)\n\tpath, _ = filepath.Rel(string(os.PathSeparator), path)\n\tpath = filepath.Clean(path)\n\tif !isValidPath(path) {\n\t\treturn \"\", errors.Errorf(\"tar writer: path outside tar root: %s\", path)\n\t}\n\treturn path, nil\n}\n\nfunc isValidPath(path string) bool {\n\tprfx := string(os.PathSeparator) + \"___\"\n\treturn filepath.HasPrefix(filepath.Join(prfx, path), prfx)\n}\n\nfunc (w *TarWriter) addWritten(path string, a *fs.FileAttrs) {\n\tw.written[path] = a\n}\n\nfunc (w *TarWriter) Link(path, target string) (err error) {\n\tif path, err = normalize(path); err != nil {\n\t\treturn\n\t}\n\tif !filepath.IsAbs(target) {\n\t\ttarget = filepath.Join(filepath.Dir(path), target)\n\t}\n\tif target, err = normalize(target); err != nil {\n\t\treturn errors.Wrap(err, \"link\")\n\t}\n\n\ta := w.written[target]\n\tif a == nil {\n\t\treturn errors.Errorf(\"write tar: link entry %s: target %s does not exist\", path, target)\n\t}\n\thdr, err := w.toTarHeader(path, *a)\n\tif err != nil {\n\t\treturn\n\t}\n\thdr.Typeflag = tar.TypeLink\n\thdr.Linkname = target\n\thdr.Size = 0\n\n\terr = w.writer.WriteHeader(hdr)\n\treturn errors.Wrap(err, \"tar writer: write link\")\n}\n\nfunc (w *TarWriter) Symlink(path string, a fs.FileAttrs) (err error) {\n\tif path, err = normalize(path); err != nil {\n\t\treturn\n\t}\n\n\ta.Mode |= os.ModeSymlink | 0777\n\ta.Symlink, err = normalizeLinkDest(path, a.Symlink)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn w.writeTarHeader(path, a)\n}\n\nfunc (w *TarWriter) Fifo(path string, a fs.DeviceAttrs) (err error) {\n\ta.Mode |= syscall.S_IFIFO\n\treturn w.device(path, &a)\n}\n\nfunc (w *TarWriter) device(path string, a *fs.DeviceAttrs) (err error) {\n\tif path, err = normalize(path); err != nil {\n\t\treturn\n\t}\n\thdr, err := w.toTarHeader(path, a.FileAttrs)\n\tif err != nil {\n\t\treturn\n\t}\n\thdr.Size = 0\n\thdr.Devmajor = a.Devmajor\n\thdr.Devminor = a.Devminor\n\terr = w.writer.WriteHeader(hdr)\n\treturn errors.Wrap(err, \"tar writer: write device\")\n}\n\nfunc (w *TarWriter) Device(path string, a fs.DeviceAttrs) (err error) {\n\treturn w.device(path, &a)\n}\n\nfunc (w *TarWriter) Mkdir(path string) (err error) {\n\tif path, err = normalize(path); err != nil {\n\t\treturn\n\t}\n\treturn w.writeTarHeader(path+string(os.PathSeparator), fs.FileAttrs{Mode: os.ModeDir | 0755})\n}\n\nfunc (w *TarWriter) Dir(path, base string, a fs.FileAttrs) (err error) {\n\tif path, err = normalize(path); err != nil {\n\t\treturn\n\t}\n\ta.Mode |= os.ModeDir\n\treturn w.writeTarHeader(path+string(os.PathSeparator), a)\n}\n\nfunc (w *TarWriter) Remove(path string) (err error) {\n\tif path, err = normalize(path); err != nil {\n\t\treturn\n\t}\n\tdelete(w.written, path)\n\tdir, file := filepath.Split(path)\n\tfile = fs.WhiteoutPrefix + file\n\tnow := time.Now()\n\t\/\/ Using current time for header values which leads to unreproducable layer\n\t\/\/ TODO: maybe change to fixed time instead of now()\n\treturn w.writeTarHeader(filepath.Join(dir, file), fs.FileAttrs{FileTimes: fs.FileTimes{Atime: now, Mtime: now}})\n}\n<commit_msg>Fixed tar\/user mapping<commit_after>package writer\n\nimport (\n\t\"archive\/tar\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/mgoltzsche\/cntnr\/pkg\/fs\"\n\t\"github.com\/pkg\/errors\"\n)\n\nvar _ fs.Writer = &TarWriter{}\n\n\/\/ A mapping file system writer that secures root directory boundaries.\n\/\/ Derived from umoci's tar_extract.go to allow separate source\/dest interfaces\n\/\/ and filter archive contents on extraction\ntype TarWriter struct {\n\twriter *tar.Writer\n\twritten map[string]*fs.FileAttrs\n}\n\nfunc NewTarWriter(writer io.Writer) (w *TarWriter) {\n\treturn &TarWriter{tar.NewWriter(writer), map[string]*fs.FileAttrs{}}\n}\n\nfunc (w *TarWriter) Close() error {\n\treturn errors.Wrap(w.writer.Close(), \"close tar writer\")\n}\n\nfunc (w *TarWriter) Parent() error { return nil }\nfunc (w *TarWriter) LowerNode(path, name string, a *fs.NodeAttrs) error { return nil }\nfunc (w *TarWriter) LowerLink(path, target string, a *fs.NodeAttrs) error { return nil }\n\nfunc (w *TarWriter) Lazy(path, name string, src fs.LazySource, written map[fs.Source]string) (err error) {\n\treturn errors.Errorf(\"refused to write lazy source %s into tar writer directly at %s since resulting tar could contain overridden entries. lazy sources must be resolved first.\", src, path)\n}\n\nfunc (w *TarWriter) File(path string, src fs.FileSource) (r fs.Source, err error) {\n\ta := src.Attrs()\n\tif path, err = normalize(path); err != nil {\n\t\treturn\n\t}\n\tif err = w.writeTarHeader(path, a.FileAttrs); err != nil {\n\t\treturn\n\t}\n\n\tif a.NodeType != fs.TypeFile {\n\t\treturn src, nil\n\t}\n\n\t\/\/ Copy file\n\tf, err := src.Reader()\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif e := f.Close(); e != nil && err == nil {\n\t\t\terr = errors.Wrap(e, \"write tar\")\n\t\t}\n\t}()\n\tn, err := io.Copy(w.writer, f)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"write tar: file entry\")\n\t}\n\tif n != a.Size {\n\t\treturn nil, errors.Wrap(io.ErrShortWrite, \"write tar: file entry\")\n\t}\n\treturn src, nil\n}\n\nfunc (w *TarWriter) writeTarHeader(path string, a fs.FileAttrs) (err error) {\n\thdr, err := w.toTarHeader(path, a)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = w.writer.WriteHeader(hdr)\n\treturn errors.Wrapf(err, \"write tar header for %q\", path)\n}\n\nfunc (w *TarWriter) toTarHeader(path string, a fs.FileAttrs) (hdr *tar.Header, err error) {\n\ta.Mtime = time.Unix(a.Mtime.Unix(), 0) \/\/ use floor(mtime) to preserve mtime which otherwise is not guaranteed due to rounding to seconds within tar\n\thdr, err = tar.FileInfoHeader(fs.NewFileInfo(path, &a), a.Symlink)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"to tar header: %s\", path)\n\t}\n\thdr.Uid = int(a.Uid)\n\thdr.Gid = int(a.Gid)\n\thdr.AccessTime = a.Atime\n\thdr.Xattrs = a.Xattrs\n\tw.addWritten(path, &a)\n\treturn\n}\n\n\/\/ Taken from umoci\nfunc normalize(path string) (string, error) {\n\tpath = filepath.Clean(string(os.PathSeparator) + path)\n\tpath, _ = filepath.Rel(string(os.PathSeparator), path)\n\tpath = filepath.Clean(path)\n\tif !isValidPath(path) {\n\t\treturn \"\", errors.Errorf(\"tar writer: path outside tar root: %s\", path)\n\t}\n\treturn path, nil\n}\n\nfunc isValidPath(path string) bool {\n\tprfx := string(os.PathSeparator) + \"___\"\n\treturn filepath.HasPrefix(filepath.Join(prfx, path), prfx)\n}\n\nfunc (w *TarWriter) addWritten(path string, a *fs.FileAttrs) {\n\tw.written[path] = a\n}\n\nfunc (w *TarWriter) Link(path, target string) (err error) {\n\tif path, err = normalize(path); err != nil {\n\t\treturn\n\t}\n\tif !filepath.IsAbs(target) {\n\t\ttarget = filepath.Join(filepath.Dir(path), target)\n\t}\n\tif target, err = normalize(target); err != nil {\n\t\treturn errors.Wrap(err, \"link\")\n\t}\n\n\ta := w.written[target]\n\tif a == nil {\n\t\treturn errors.Errorf(\"write tar: link entry %s: target %s does not exist\", path, target)\n\t}\n\thdr, err := w.toTarHeader(path, *a)\n\tif err != nil {\n\t\treturn\n\t}\n\thdr.Typeflag = tar.TypeLink\n\thdr.Linkname = target\n\thdr.Size = 0\n\n\terr = w.writer.WriteHeader(hdr)\n\treturn errors.Wrap(err, \"tar writer: write link\")\n}\n\nfunc (w *TarWriter) Symlink(path string, a fs.FileAttrs) (err error) {\n\tif path, err = normalize(path); err != nil {\n\t\treturn\n\t}\n\n\ta.Mode |= os.ModeSymlink | 0777\n\ta.Symlink, err = normalizeLinkDest(path, a.Symlink)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn w.writeTarHeader(path, a)\n}\n\nfunc (w *TarWriter) Fifo(path string, a fs.DeviceAttrs) (err error) {\n\ta.Mode |= syscall.S_IFIFO\n\treturn w.device(path, &a)\n}\n\nfunc (w *TarWriter) device(path string, a *fs.DeviceAttrs) (err error) {\n\tif path, err = normalize(path); err != nil {\n\t\treturn\n\t}\n\thdr, err := w.toTarHeader(path, a.FileAttrs)\n\tif err != nil {\n\t\treturn\n\t}\n\thdr.Size = 0\n\thdr.Devmajor = a.Devmajor\n\thdr.Devminor = a.Devminor\n\terr = w.writer.WriteHeader(hdr)\n\treturn errors.Wrap(err, \"tar writer: write device\")\n}\n\nfunc (w *TarWriter) Device(path string, a fs.DeviceAttrs) (err error) {\n\treturn w.device(path, &a)\n}\n\nfunc (w *TarWriter) Mkdir(path string) (err error) {\n\tif path, err = normalize(path); err != nil {\n\t\treturn\n\t}\n\treturn w.writeTarHeader(path+string(os.PathSeparator), fs.FileAttrs{Mode: os.ModeDir | 0755})\n}\n\nfunc (w *TarWriter) Dir(path, base string, a fs.FileAttrs) (err error) {\n\tif path, err = normalize(path); err != nil {\n\t\treturn\n\t}\n\ta.Mode |= os.ModeDir\n\treturn w.writeTarHeader(path+string(os.PathSeparator), a)\n}\n\nfunc (w *TarWriter) Remove(path string) (err error) {\n\tif path, err = normalize(path); err != nil {\n\t\treturn\n\t}\n\tdelete(w.written, path)\n\tdir, file := filepath.Split(path)\n\tfile = fs.WhiteoutPrefix + file\n\tnow := time.Now()\n\t\/\/ Using current time for header values which leads to unreproducable layer\n\t\/\/ TODO: maybe change to fixed time instead of now()\n\treturn w.writeTarHeader(filepath.Join(dir, file), fs.FileAttrs{FileTimes: fs.FileTimes{Atime: now, Mtime: now}})\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright 2010 Jeremy Wall (jeremy@marzhillstudios.com)\n Use of this source code is governed by the Artistic License 2.0.\n That License is included in the LICENSE file.\n*\/ \npackage transform\n\nimport (\n\t. \"html\"\n\t\"os\"\n\t\"log\"\n\t\"strings\"\n\tv \"container\/vector\"\n)\n\ntype NodeType int\n\nconst (\n\tTEXT NodeType = iota \/\/ 0 value so the default\n\tTAG\n)\n\ntype Node struct {\n\tnodeType NodeType\n\tnodeValue string\n\tnodeAttributes map[string] string\n\tchildren v.Vector\n}\n\nfunc lazyTokens(t *Tokenizer) <-chan Token {\n\ttokens := make(chan Token, 1)\n\tgo func() {\n\t\tfor {\n\t\t\ttt := t.Next()\n\t\t\tif tt == Error {\n\t\t\t\tswitch t.Error() {\n\t\t\t\tcase os.EOF:\n\t\t\t\t\tbreak\n\t\t\t\tdefault:\n\t\t\t\t\tlog.Panicf(\n\t\t\t\t\t\t\"Error tokenizing string: %s\",\n\t\t\t\t\t\tt.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\ttokens <- t.Token()\n\t\t}\n\t}()\n\treturn tokens\n}\n\ntype Document struct {\n\ttop Node\n}\n\nfunc transformAttributes(attrs []Attribute) map[string] string {\n\tattributes := make(map[string] string)\n\tfor _, attr := range attrs {\n\t\tattributes[attr.Key] = attr.Val\n\t}\n\treturn attributes\n}\n\nfunc typeFromToken(t Token) NodeType {\n\tif t.Type == Text {\n\t\treturn TEXT\n\t}\n\treturn TAG\n}\n\nfunc nodeFromToken(t Token) Node {\n\treturn Node{\n\t\tnodeType: typeFromToken(t),\n\t\tnodeValue: t.Data,\n\t\tnodeAttributes: transformAttributes(t.Attr),\n\t}\n}\n\nfunc NewDoc(s string) *Document {\n\tt := NewTokenizer(strings.NewReader(s))\n\ttokens := lazyTokens(t)\n\ttok1 := <-tokens\n\tdoc := Document{top: nodeFromToken(tok1)}\n\n\tqueue := new(v.Vector)\n\tqueue.Push(doc.top)\n\tfor tok := range tokens {\n\t\tcurr := queue.At(0).(Node)\n\t\tswitch tok.Type {\n\t\tcase SelfClosingTag, Text:\n\t\t\tcurr.children.Push(nodeFromToken(tok))\n\t\tcase StartTag:\n\t\t\tcurr.children.Push(nodeFromToken(tok))\n\t\t\tqueue.Push(nodeFromToken(tok))\n\t\tcase EndTag:\n\t\t\tqueue.Pop()\n\t\t}\n\t}\n\treturn &doc\n}\n\n\/\/ TODO(jwall): not sure I even need fragments but oh well\ntype Fragment struct {\n\t*Document\n}\n\nfunc NewFragment(s string) *Fragment {\n\treturn &Fragment{NewDoc(s)}\n}\n\n\/\/ TODO(jwall): css style addressing of elements\n<commit_msg>untested but compiling selector parser<commit_after>\/*\n Copyright 2010 Jeremy Wall (jeremy@marzhillstudios.com)\n Use of this source code is governed by the Artistic License 2.0.\n That License is included in the LICENSE file.\n*\/ \npackage transform\n\nimport (\n\tv \"container\/vector\"\n\t. \"html\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n)\n\ntype NodeType int\n\nconst (\n\tTEXT NodeType = iota \/\/ 0 value so the default\n\tTAG\n)\n\ntype Node struct {\n\tnodeType NodeType\n\tnodeValue string\n\tnodeAttributes map[string] string\n\tchildren v.Vector\n}\n\nfunc lazyTokens(t *Tokenizer) <-chan Token {\n\ttokens := make(chan Token, 1)\n\tgo func() {\n\t\tfor {\n\t\t\ttt := t.Next()\n\t\t\tif tt == Error {\n\t\t\t\tswitch t.Error() {\n\t\t\t\tcase os.EOF:\n\t\t\t\t\tbreak\n\t\t\t\tdefault:\n\t\t\t\t\tlog.Panicf(\n\t\t\t\t\t\t\"Error tokenizing string: %s\",\n\t\t\t\t\t\tt.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t\ttokens <- t.Token()\n\t\t}\n\t}()\n\treturn tokens\n}\n\ntype Document struct {\n\ttop Node\n}\n\nfunc transformAttributes(attrs []Attribute) map[string] string {\n\tattributes := make(map[string] string)\n\tfor _, attr := range attrs {\n\t\tattributes[attr.Key] = attr.Val\n\t}\n\treturn attributes\n}\n\nfunc typeFromToken(t Token) NodeType {\n\tif t.Type == Text {\n\t\treturn TEXT\n\t}\n\treturn TAG\n}\n\nfunc nodeFromToken(t Token) Node {\n\treturn Node{\n\t\tnodeType: typeFromToken(t),\n\t\tnodeValue: t.Data,\n\t\tnodeAttributes: transformAttributes(t.Attr),\n\t}\n}\n\nfunc NewDoc(s string) *Document {\n\tt := NewTokenizer(strings.NewReader(s))\n\ttokens := lazyTokens(t)\n\ttok1 := <-tokens\n\tdoc := Document{top: nodeFromToken(tok1)}\n\n\tqueue := new(v.Vector)\n\tqueue.Push(doc.top)\n\tfor tok := range tokens {\n\t\tcurr := queue.At(0).(Node)\n\t\tswitch tok.Type {\n\t\tcase SelfClosingTag, Text:\n\t\t\tcurr.children.Push(nodeFromToken(tok))\n\t\tcase StartTag:\n\t\t\tcurr.children.Push(nodeFromToken(tok))\n\t\t\tqueue.Push(nodeFromToken(tok))\n\t\tcase EndTag:\n\t\t\tqueue.Pop()\n\t\t}\n\t}\n\treturn &doc\n}\n\n\/\/ TODO(jwall): not sure I even need fragments but oh well\ntype Fragment struct {\n\t*Document\n}\n\nfunc NewFragment(s string) *Fragment {\n\treturn &Fragment{NewDoc(s)}\n}\n\n\/\/ TODO(jwall): css style addressing of elements\n<|endoftext|>"} {"text":"<commit_before>package landscaper\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"reflect\"\n\n\t\"github.com\/pmezard\/go-difflib\/difflib\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"google.golang.org\/grpc\"\n\t\"k8s.io\/helm\/pkg\/helm\"\n)\n\n\/\/ Executor is responsible for applying a desired landscape to the actual landscape\ntype Executor interface {\n\tApply(Components, Components) error\n\n\tCreateComponent(*Component) error\n\tUpdateComponent(*Component) error\n\tDeleteComponent(*Component) error\n}\n\ntype executor struct {\n\thelmClient helm.Interface\n\tchartLoader ChartLoader\n\tkubeSecrets SecretsWriteDeleter\n\tdryRun bool\n\twait bool\n\twaitTimeout int64\n\tdisabledStages []string\n}\n\n\/\/ NewExecutor is a factory method to create a new Executor\nfunc NewExecutor(helmClient helm.Interface, chartLoader ChartLoader, kubeSecrets SecretsWriteDeleter, dryRun bool, wait bool, waitTimeout int64, disabledStages []string) Executor {\n\treturn &executor{\n\t\thelmClient: helmClient,\n\t\tchartLoader: chartLoader,\n\t\tkubeSecrets: kubeSecrets,\n\t\tdryRun: dryRun,\n\t\twait: wait,\n\t\twaitTimeout: waitTimeout,\n\t\tdisabledStages: disabledStages,\n\t}\n}\n\n\/\/ gatherForcedUpdates returns a map that for each to-be-updated component indicates if it needs a forced update.\n\/\/ there may be several reasons to do so: releases that differ only in secret values are forced so that pods will restart with the new values; releases that differ in namespace cannot be updated\nfunc (e *executor) gatherForcedUpdates(current, update Components) (map[string]bool, error) {\n\tneedForcedUpdate := map[string]bool{}\n\n\tfor _, cmp := range update {\n\t\t\/\/ releases that differ only in secret values are forced so that pods will restart with the new values\n\t\tfor _, curCmp := range current {\n\t\t\tif curCmp.Name == cmp.Name && isOnlySecretValueDiff(*curCmp, *cmp) {\n\t\t\t\tlogrus.Infof(\"%s differs in secrets values only; don't update but delete + create instead\", cmp.Name)\n\t\t\t\tneedForcedUpdate[cmp.Name] = true\n\t\t\t}\n\t\t}\n\t\tif curCmp := current[cmp.Name]; curCmp != nil {\n\t\t\tif curCmp.Namespace != cmp.Namespace {\n\t\t\t\tlogrus.Infof(\"%s differs in namespace; don't update but delete + create instead\", cmp.Name)\n\t\t\t\tneedForcedUpdate[cmp.Name] = true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn needForcedUpdate, nil\n}\n\n\/\/ Apply transforms the current state into the desired state\nfunc (e *executor) Apply(desired, current Components) error {\n\tcreate, update, delete := diff(desired, current)\n\n\t\/\/ some to-be-updated components need a delete + create instead\n\tneedForcedUpdate, err := e.gatherForcedUpdates(current, update)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ delete+create pairs will never work in dry run since the dry-run \"deleted\" component will exist in create\n\tif !e.dryRun {\n\t\tcreate, update, delete = integrateForcedUpdates(current, create, update, delete, needForcedUpdate)\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\"create\": len(create), \"update\": len(update), \"delete\": len(delete)}).Info(\"Apply desired state\")\n\n\tif err := logDifferences(current, create, update, delete, e.stageEnabled(\"update\"), e.stageEnabled(\"delete\"), logrus.Infof); err != nil {\n\t\treturn err\n\t}\n\n\tif e.stageEnabled(\"delete\") {\n\t\tfor _, cmp := range delete {\n\t\t\tif err := e.DeleteComponent(cmp); err != nil {\n\t\t\t\tlogrus.WithFields(logrus.Fields{\"error\": err}).Error(\"DeleteComponent failed\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif e.stageEnabled(\"update\") {\n\t\tfor _, cmp := range update {\n\t\t\tif err := e.UpdateComponent(cmp); err != nil {\n\t\t\t\tlogrus.WithFields(logrus.Fields{\"error\": err}).Error(\"UpdateComponent failed\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif e.stageEnabled(\"create\") {\n\t\tfor _, cmp := range create {\n\t\t\tif err := e.CreateComponent(cmp); err != nil {\n\t\t\t\tlogrus.WithFields(logrus.Fields{\"error\": err}).Error(\"CreateComponent failed\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\"created\": len(create), \"updated\": len(update), \"deleted\": len(delete)}).Info(\"Applied desired state successfully\")\n\treturn nil\n}\n\nfunc (e *executor) stageEnabled(stage string) bool {\n\tfor _, stageDisabled := range e.disabledStages {\n\t\tif stageDisabled == stage {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ CreateComponent creates the given Component\nfunc (e *executor) CreateComponent(cmp *Component) error {\n\t\/\/ We need to ensure the chart is available on the local system. LoadChart will ensure\n\t\/\/ this is the case by downloading the chart if it is not there yet\n\tchartRef, err := cmp.FullChartRef()\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, chartPath, err := e.chartLoader.Load(chartRef)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trawValues, err := cmp.Configuration.YAML()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"release\": cmp.Name,\n\t\t\"chart\": cmp.Release.Chart,\n\t\t\"chartPath\": chartPath,\n\t\t\"rawValues\": rawValues,\n\t\t\"values\": cmp.Configuration,\n\t\t\"dryrun\": e.dryRun,\n\t}).Debug(\"Create component\")\n\n\tif len(cmp.SecretValues) > 0 && !e.dryRun {\n\t\terr = e.kubeSecrets.Write(cmp.Name, cmp.Namespace, cmp.SecretValues)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t_, err = e.helmClient.InstallRelease(\n\t\tchartPath,\n\t\tcmp.Namespace,\n\t\thelm.ValueOverrides([]byte(rawValues)),\n\t\thelm.ReleaseName(cmp.Name),\n\t\thelm.InstallDryRun(e.dryRun),\n\t\thelm.InstallReuseName(true),\n\t\thelm.InstallWait(e.wait),\n\t\thelm.InstallTimeout(e.waitTimeout),\n\t)\n\tif err != nil {\n\t\treturn errors.New(grpc.ErrorDesc(err))\n\t}\n\n\treturn nil\n}\n\n\/\/ UpdateComponent updates the given Component\nfunc (e *executor) UpdateComponent(cmp *Component) error {\n\t\/\/ We need to ensure the chart is available on the local system. LoadChart will ensure\n\t\/\/ this is the case by downloading the chart if it is not there yet\n\tchartRef, err := cmp.FullChartRef()\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, chartPath, err := e.chartLoader.Load(chartRef)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trawValues, err := cmp.Configuration.YAML()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !e.dryRun {\n\t\tif e.stageEnabled(\"deleteSecrets\") || len(cmp.SecretValues) > 0 {\n\t\t\terr = e.kubeSecrets.Delete(cmp.Name, cmp.Namespace)\n\t\t}\n\n\t\tif len(cmp.SecretValues) > 0 {\n\t\t\terr = e.kubeSecrets.Write(cmp.Name, cmp.Namespace, cmp.SecretValues)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"release\": cmp.Name,\n\t\t\"chart\": cmp.Release.Chart,\n\t\t\"chartPath\": chartPath,\n\t\t\"values\": cmp.Configuration,\n\t\t\"dryrun\": e.dryRun,\n\t}).Debug(\"Update component\")\n\n\t_, err = e.helmClient.UpdateRelease(\n\t\tcmp.Name,\n\t\tchartPath,\n\t\thelm.UpdateValueOverrides([]byte(rawValues)),\n\t\thelm.UpgradeDryRun(e.dryRun),\n\t\thelm.UpgradeWait(e.wait),\n\t\thelm.UpgradeTimeout(e.waitTimeout),\n\t)\n\tif err != nil {\n\t\treturn errors.New(grpc.ErrorDesc(err))\n\t}\n\n\treturn nil\n}\n\n\/\/ DeleteComponent removes the given Component\nfunc (e *executor) DeleteComponent(cmp *Component) error {\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"release\": cmp.Name,\n\t\t\"values\": cmp.Configuration,\n\t\t\"dryrun\": e.dryRun,\n\t}).Debug(\"Delete component\")\n\n\tif len(cmp.SecretValues) > 0 && !e.dryRun {\n\t\terr := e.kubeSecrets.Delete(cmp.Name, cmp.Namespace)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif !e.dryRun {\n\t\t_, err := e.helmClient.DeleteRelease(\n\t\t\tcmp.Name,\n\t\t\thelm.DeletePurge(true),\n\t\t\thelm.DeleteDryRun(e.dryRun),\n\t\t)\n\t\tif err != nil {\n\t\t\treturn errors.New(grpc.ErrorDesc(err))\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ diff takes desired and current components, and returns the components to create, update and delete to get from current to desired\nfunc diff(desired, current Components) (Components, Components, Components) {\n\tcreate := Components{}\n\tupdate := Components{}\n\tdelete := Components{}\n\n\tfor name, desiredCmp := range desired {\n\t\tif currentCmp, ok := current[name]; ok {\n\t\t\tif !desiredCmp.Equals(currentCmp) {\n\t\t\t\tupdate[name] = desiredCmp\n\t\t\t}\n\t\t} else {\n\t\t\tcreate[name] = desiredCmp\n\t\t}\n\t}\n\n\tfor name, currentCmp := range current {\n\t\tif _, ok := desired[name]; !ok {\n\t\t\tdelete[name] = currentCmp\n\t\t}\n\t}\n\n\treturn create, update, delete\n}\n\n\/\/ componentDiffText returns a diff as text. current and desired can be nil and indicate non-existence (e.g. current nil and desired non-nil means: create)\nfunc componentDiffText(current, desired *Component) (string, error) {\n\tcText, dText := []string{}, []string{}\n\tcName, dName := \"<none>\", \"<none>\"\n\tif current != nil {\n\t\tcs, err := json.MarshalIndent(current, \"\", \" \")\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tcText = difflib.SplitLines(string(cs))\n\t\tcName = current.Name\n\t}\n\tif desired != nil {\n\t\tds, err := json.MarshalIndent(desired, \"\", \" \")\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tdText = difflib.SplitLines(string(ds))\n\t\tdName = desired.Name\n\t}\n\n\treturn difflib.GetUnifiedDiffString(difflib.UnifiedDiff{\n\t\tA: cText,\n\t\tFromFile: \"Current \" + cName,\n\t\tB: dText,\n\t\tToFile: \"Desired \" + dName,\n\t\tContext: 3,\n\t})\n}\n\n\/\/ logDifferences logs the Create, Update and Delete w.r.t. current to logf\nfunc logDifferences(current, creates, updates, deletes Components, updateStageEnabled bool, deleteStageEnabled bool, logf func(format string, args ...interface{})) error {\n\tlog := func(action string, current, desired *Component) error {\n\t\tdiff, err := componentDiffText(current, desired)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlogf(\"%s\", action)\n\t\tif diff != \"\" {\n\t\t\tlogf(\"Diff:\\n%s\", diff)\n\t\t}\n\t\tif current != nil && desired != nil && !reflect.DeepEqual(current.SecretValues, desired.SecretValues) {\n\t\t\tlogrus.Info(\"Diff: secrets have changed, not shown here\")\n\t\t}\n\t\treturn nil\n\t}\n\n\t\/\/ Log diffs only if delete is applied\n\tif deleteStageEnabled {\n\t\tfor _, d := range deletes {\n\t\t\tlogf(\"Delete: %s\", d.Name)\n\t\t}\n\t}\n\n\tfor _, d := range creates {\n\t\tif err := log(\"Create: \"+d.Name, nil, d); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Log diffs only if upadete is applied\n\tif updateStageEnabled {\n\t\tfor _, d := range updates {\n\t\t\tc := current[d.Name]\n\t\t\tif err := log(\"Update: \"+d.Name, c, d); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ integrateForcedUpdates removes forceUpdate from update and inserts it into delete + create\nfunc integrateForcedUpdates(current, create, update, delete Components, forceUpdate map[string]bool) (Components, Components, Components) {\n\tfixUpdate := Components{}\n\tfor _, cmp := range update {\n\t\tif forceUpdate[cmp.Name] {\n\t\t\tif currentCmp, ok := current[cmp.Name]; ok {\n\t\t\t\tdelete[currentCmp.Name] = currentCmp \/\/ delete the current component\n\t\t\t}\n\t\t\tcreate[cmp.Name] = cmp \/\/ create cmp, by definition a desired component\n\t\t} else {\n\t\t\tfixUpdate[cmp.Name] = cmp\n\t\t}\n\t}\n\treturn create, fixUpdate, delete\n}\n\n\/\/ isOnlySecretValueDiff tells whether the given Components differ in their .SecretValues fields and are identical otherwise\nfunc isOnlySecretValueDiff(a, b Component) bool {\n\tsecValsEqual := reflect.DeepEqual(a.SecretValues, b.SecretValues)\n\ta.SecretValues = SecretValues{}\n\tb.SecretValues = SecretValues{}\n\treturn !secValsEqual && reflect.DeepEqual(a, b)\n}\n<commit_msg>feat(log) Log which component has failed<commit_after>package landscaper\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"reflect\"\n\n\t\"github.com\/pmezard\/go-difflib\/difflib\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"google.golang.org\/grpc\"\n\t\"k8s.io\/helm\/pkg\/helm\"\n)\n\n\/\/ Executor is responsible for applying a desired landscape to the actual landscape\ntype Executor interface {\n\tApply(Components, Components) error\n\n\tCreateComponent(*Component) error\n\tUpdateComponent(*Component) error\n\tDeleteComponent(*Component) error\n}\n\ntype executor struct {\n\thelmClient helm.Interface\n\tchartLoader ChartLoader\n\tkubeSecrets SecretsWriteDeleter\n\tdryRun bool\n\twait bool\n\twaitTimeout int64\n\tdisabledStages []string\n}\n\n\/\/ NewExecutor is a factory method to create a new Executor\nfunc NewExecutor(helmClient helm.Interface, chartLoader ChartLoader, kubeSecrets SecretsWriteDeleter, dryRun bool, wait bool, waitTimeout int64, disabledStages []string) Executor {\n\treturn &executor{\n\t\thelmClient: helmClient,\n\t\tchartLoader: chartLoader,\n\t\tkubeSecrets: kubeSecrets,\n\t\tdryRun: dryRun,\n\t\twait: wait,\n\t\twaitTimeout: waitTimeout,\n\t\tdisabledStages: disabledStages,\n\t}\n}\n\n\/\/ gatherForcedUpdates returns a map that for each to-be-updated component indicates if it needs a forced update.\n\/\/ there may be several reasons to do so: releases that differ only in secret values are forced so that pods will restart with the new values; releases that differ in namespace cannot be updated\nfunc (e *executor) gatherForcedUpdates(current, update Components) (map[string]bool, error) {\n\tneedForcedUpdate := map[string]bool{}\n\n\tfor _, cmp := range update {\n\t\t\/\/ releases that differ only in secret values are forced so that pods will restart with the new values\n\t\tfor _, curCmp := range current {\n\t\t\tif curCmp.Name == cmp.Name && isOnlySecretValueDiff(*curCmp, *cmp) {\n\t\t\t\tlogrus.Infof(\"%s differs in secrets values only; don't update but delete + create instead\", cmp.Name)\n\t\t\t\tneedForcedUpdate[cmp.Name] = true\n\t\t\t}\n\t\t}\n\t\tif curCmp := current[cmp.Name]; curCmp != nil {\n\t\t\tif curCmp.Namespace != cmp.Namespace {\n\t\t\t\tlogrus.Infof(\"%s differs in namespace; don't update but delete + create instead\", cmp.Name)\n\t\t\t\tneedForcedUpdate[cmp.Name] = true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn needForcedUpdate, nil\n}\n\n\/\/ Apply transforms the current state into the desired state\nfunc (e *executor) Apply(desired, current Components) error {\n\tcreate, update, delete := diff(desired, current)\n\n\t\/\/ some to-be-updated components need a delete + create instead\n\tneedForcedUpdate, err := e.gatherForcedUpdates(current, update)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ delete+create pairs will never work in dry run since the dry-run \"deleted\" component will exist in create\n\tif !e.dryRun {\n\t\tcreate, update, delete = integrateForcedUpdates(current, create, update, delete, needForcedUpdate)\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\"create\": len(create), \"update\": len(update), \"delete\": len(delete)}).Info(\"Apply desired state\")\n\n\tif err := logDifferences(current, create, update, delete, e.stageEnabled(\"update\"), e.stageEnabled(\"delete\"), logrus.Infof); err != nil {\n\t\treturn err\n\t}\n\n\tif e.stageEnabled(\"delete\") {\n\t\tfor _, cmp := range delete {\n\t\t\tif err := e.DeleteComponent(cmp); err != nil {\n\t\t\t\tlogrus.WithFields(logrus.Fields{\"error\": err, \"component\": cmp}).Error(\"DeleteComponent failed\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif e.stageEnabled(\"update\") {\n\t\tfor _, cmp := range update {\n\t\t\tif err := e.UpdateComponent(cmp); err != nil {\n\t\t\t\tlogrus.WithFields(logrus.Fields{\"error\": err, \"component\": cmp}).Error(\"UpdateComponent failed\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif e.stageEnabled(\"create\") {\n\t\tfor _, cmp := range create {\n\t\t\tif err := e.CreateComponent(cmp); err != nil {\n\t\t\t\tlogrus.WithFields(logrus.Fields{\"error\": err, \"component\": cmp}).Error(\"CreateComponent failed\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\"created\": len(create), \"updated\": len(update), \"deleted\": len(delete)}).Info(\"Applied desired state successfully\")\n\treturn nil\n}\n\nfunc (e *executor) stageEnabled(stage string) bool {\n\tfor _, stageDisabled := range e.disabledStages {\n\t\tif stageDisabled == stage {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ CreateComponent creates the given Component\nfunc (e *executor) CreateComponent(cmp *Component) error {\n\t\/\/ We need to ensure the chart is available on the local system. LoadChart will ensure\n\t\/\/ this is the case by downloading the chart if it is not there yet\n\tchartRef, err := cmp.FullChartRef()\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, chartPath, err := e.chartLoader.Load(chartRef)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trawValues, err := cmp.Configuration.YAML()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"release\": cmp.Name,\n\t\t\"chart\": cmp.Release.Chart,\n\t\t\"chartPath\": chartPath,\n\t\t\"rawValues\": rawValues,\n\t\t\"values\": cmp.Configuration,\n\t\t\"dryrun\": e.dryRun,\n\t}).Debug(\"Create component\")\n\n\tif len(cmp.SecretValues) > 0 && !e.dryRun {\n\t\terr = e.kubeSecrets.Write(cmp.Name, cmp.Namespace, cmp.SecretValues)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t_, err = e.helmClient.InstallRelease(\n\t\tchartPath,\n\t\tcmp.Namespace,\n\t\thelm.ValueOverrides([]byte(rawValues)),\n\t\thelm.ReleaseName(cmp.Name),\n\t\thelm.InstallDryRun(e.dryRun),\n\t\thelm.InstallReuseName(true),\n\t\thelm.InstallWait(e.wait),\n\t\thelm.InstallTimeout(e.waitTimeout),\n\t)\n\tif err != nil {\n\t\treturn errors.New(grpc.ErrorDesc(err))\n\t}\n\n\treturn nil\n}\n\n\/\/ UpdateComponent updates the given Component\nfunc (e *executor) UpdateComponent(cmp *Component) error {\n\t\/\/ We need to ensure the chart is available on the local system. LoadChart will ensure\n\t\/\/ this is the case by downloading the chart if it is not there yet\n\tchartRef, err := cmp.FullChartRef()\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, chartPath, err := e.chartLoader.Load(chartRef)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trawValues, err := cmp.Configuration.YAML()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !e.dryRun {\n\t\tif e.stageEnabled(\"deleteSecrets\") || len(cmp.SecretValues) > 0 {\n\t\t\terr = e.kubeSecrets.Delete(cmp.Name, cmp.Namespace)\n\t\t}\n\n\t\tif len(cmp.SecretValues) > 0 {\n\t\t\terr = e.kubeSecrets.Write(cmp.Name, cmp.Namespace, cmp.SecretValues)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"release\": cmp.Name,\n\t\t\"chart\": cmp.Release.Chart,\n\t\t\"chartPath\": chartPath,\n\t\t\"values\": cmp.Configuration,\n\t\t\"dryrun\": e.dryRun,\n\t}).Debug(\"Update component\")\n\n\t_, err = e.helmClient.UpdateRelease(\n\t\tcmp.Name,\n\t\tchartPath,\n\t\thelm.UpdateValueOverrides([]byte(rawValues)),\n\t\thelm.UpgradeDryRun(e.dryRun),\n\t\thelm.UpgradeWait(e.wait),\n\t\thelm.UpgradeTimeout(e.waitTimeout),\n\t)\n\tif err != nil {\n\t\treturn errors.New(grpc.ErrorDesc(err))\n\t}\n\n\treturn nil\n}\n\n\/\/ DeleteComponent removes the given Component\nfunc (e *executor) DeleteComponent(cmp *Component) error {\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"release\": cmp.Name,\n\t\t\"values\": cmp.Configuration,\n\t\t\"dryrun\": e.dryRun,\n\t}).Debug(\"Delete component\")\n\n\tif len(cmp.SecretValues) > 0 && !e.dryRun {\n\t\terr := e.kubeSecrets.Delete(cmp.Name, cmp.Namespace)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif !e.dryRun {\n\t\t_, err := e.helmClient.DeleteRelease(\n\t\t\tcmp.Name,\n\t\t\thelm.DeletePurge(true),\n\t\t\thelm.DeleteDryRun(e.dryRun),\n\t\t)\n\t\tif err != nil {\n\t\t\treturn errors.New(grpc.ErrorDesc(err))\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ diff takes desired and current components, and returns the components to create, update and delete to get from current to desired\nfunc diff(desired, current Components) (Components, Components, Components) {\n\tcreate := Components{}\n\tupdate := Components{}\n\tdelete := Components{}\n\n\tfor name, desiredCmp := range desired {\n\t\tif currentCmp, ok := current[name]; ok {\n\t\t\tif !desiredCmp.Equals(currentCmp) {\n\t\t\t\tupdate[name] = desiredCmp\n\t\t\t}\n\t\t} else {\n\t\t\tcreate[name] = desiredCmp\n\t\t}\n\t}\n\n\tfor name, currentCmp := range current {\n\t\tif _, ok := desired[name]; !ok {\n\t\t\tdelete[name] = currentCmp\n\t\t}\n\t}\n\n\treturn create, update, delete\n}\n\n\/\/ componentDiffText returns a diff as text. current and desired can be nil and indicate non-existence (e.g. current nil and desired non-nil means: create)\nfunc componentDiffText(current, desired *Component) (string, error) {\n\tcText, dText := []string{}, []string{}\n\tcName, dName := \"<none>\", \"<none>\"\n\tif current != nil {\n\t\tcs, err := json.MarshalIndent(current, \"\", \" \")\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tcText = difflib.SplitLines(string(cs))\n\t\tcName = current.Name\n\t}\n\tif desired != nil {\n\t\tds, err := json.MarshalIndent(desired, \"\", \" \")\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tdText = difflib.SplitLines(string(ds))\n\t\tdName = desired.Name\n\t}\n\n\treturn difflib.GetUnifiedDiffString(difflib.UnifiedDiff{\n\t\tA: cText,\n\t\tFromFile: \"Current \" + cName,\n\t\tB: dText,\n\t\tToFile: \"Desired \" + dName,\n\t\tContext: 3,\n\t})\n}\n\n\/\/ logDifferences logs the Create, Update and Delete w.r.t. current to logf\nfunc logDifferences(current, creates, updates, deletes Components, updateStageEnabled bool, deleteStageEnabled bool, logf func(format string, args ...interface{})) error {\n\tlog := func(action string, current, desired *Component) error {\n\t\tdiff, err := componentDiffText(current, desired)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlogf(\"%s\", action)\n\t\tif diff != \"\" {\n\t\t\tlogf(\"Diff:\\n%s\", diff)\n\t\t}\n\t\tif current != nil && desired != nil && !reflect.DeepEqual(current.SecretValues, desired.SecretValues) {\n\t\t\tlogrus.Info(\"Diff: secrets have changed, not shown here\")\n\t\t}\n\t\treturn nil\n\t}\n\n\t\/\/ Log diffs only if delete is applied\n\tif deleteStageEnabled {\n\t\tfor _, d := range deletes {\n\t\t\tlogf(\"Delete: %s\", d.Name)\n\t\t}\n\t}\n\n\tfor _, d := range creates {\n\t\tif err := log(\"Create: \"+d.Name, nil, d); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Log diffs only if upadete is applied\n\tif updateStageEnabled {\n\t\tfor _, d := range updates {\n\t\t\tc := current[d.Name]\n\t\t\tif err := log(\"Update: \"+d.Name, c, d); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ integrateForcedUpdates removes forceUpdate from update and inserts it into delete + create\nfunc integrateForcedUpdates(current, create, update, delete Components, forceUpdate map[string]bool) (Components, Components, Components) {\n\tfixUpdate := Components{}\n\tfor _, cmp := range update {\n\t\tif forceUpdate[cmp.Name] {\n\t\t\tif currentCmp, ok := current[cmp.Name]; ok {\n\t\t\t\tdelete[currentCmp.Name] = currentCmp \/\/ delete the current component\n\t\t\t}\n\t\t\tcreate[cmp.Name] = cmp \/\/ create cmp, by definition a desired component\n\t\t} else {\n\t\t\tfixUpdate[cmp.Name] = cmp\n\t\t}\n\t}\n\treturn create, fixUpdate, delete\n}\n\n\/\/ isOnlySecretValueDiff tells whether the given Components differ in their .SecretValues fields and are identical otherwise\nfunc isOnlySecretValueDiff(a, b Component) bool {\n\tsecValsEqual := reflect.DeepEqual(a.SecretValues, b.SecretValues)\n\ta.SecretValues = SecretValues{}\n\tb.SecretValues = SecretValues{}\n\treturn !secValsEqual && reflect.DeepEqual(a, b)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build integration\n\npackage proxy\n\nimport (\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"testing\"\n\n\t\"github.com\/hellofresh\/janus\/pkg\/router\"\n\t\"github.com\/hellofresh\/janus\/pkg\/test\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nvar tests = []struct {\n\tdescription string\n\tmethod string\n\turl string\n\texpectedContentType string\n\texpectedCode int\n}{\n\t{\n\t\tdescription: \"Get example route\",\n\t\tmethod: \"GET\",\n\t\turl: \"\/example\",\n\t\texpectedContentType: \"application\/json; charset=utf-8\",\n\t\texpectedCode: http.StatusOK,\n\t}, {\n\t\tdescription: \"Get invalid route\",\n\t\tmethod: \"GET\",\n\t\turl: \"\/invalid-route\",\n\t\texpectedContentType: \"text\/plain; charset=utf-8\",\n\t\texpectedCode: http.StatusNotFound,\n\t},\n\t{\n\t\tdescription: \"Get one posts - strip path\",\n\t\tmethod: \"GET\",\n\t\turl: \"\/posts\/1\",\n\t\texpectedContentType: \"application\/json; charset=utf-8\",\n\t\texpectedCode: http.StatusOK,\n\t},\n\t{\n\t\tdescription: \"Get one posts - append path\",\n\t\tmethod: \"GET\",\n\t\turl: \"\/append\",\n\t\texpectedContentType: \"application\/json; charset=utf-8\",\n\t\texpectedCode: http.StatusOK,\n\t},\n\t{\n\t\tdescription: \"Get one recipe - parameter interpolation\",\n\t\turl: \"\/api\/recipes\/5252b1b5301bbf46038b473f\",\n\t\texpectedContentType: \"application\/json; charset=utf-8\",\n\t\texpectedCode: http.StatusOK,\n\t},\n\t{\n\t\tdescription: \"No parameter to interpolate\",\n\t\turl: \"\/api\/recipes\/search\",\n\t\texpectedContentType: \"application\/json\",\n\t\texpectedCode: http.StatusNotFound,\n\t},\n}\n\nfunc TestSuccessfulProxy(t *testing.T) {\n\tt.Parallel()\n\n\tlog.SetOutput(ioutil.Discard)\n\n\tts := test.NewServer(createRegisterAndRouter())\n\tdefer ts.Close()\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.description, func(t *testing.T) {\n\t\t\tres, err := ts.Do(tc.method, tc.url, make(map[string]string))\n\t\t\tassert.NoError(t, err)\n\t\t\tif res != nil {\n\t\t\t\tdefer res.Body.Close()\n\t\t\t}\n\n\t\t\tassert.Equal(t, tc.expectedContentType, res.Header.Get(\"Content-Type\"), tc.description)\n\t\t\tassert.Equal(t, tc.expectedCode, res.StatusCode, tc.description)\n\t\t})\n\t}\n}\n\nfunc createProxyDefinitions() []*Definition {\n\treturn []*Definition{\n\t\t{\n\t\t\tListenPath: \"\/example\/*\",\n\t\t\tUpstreams: &Upstreams{\n\t\t\t\tBalancing: \"roundrobin\",\n\t\t\t\tTargets: []*Target{{Target: \"http:\/\/localhost:9089\/hello-world\"}},\n\t\t\t},\n\t\t\tMethods: []string{\"ALL\"},\n\t\t},\n\t\t{\n\t\t\tListenPath: \"\/posts\/*\",\n\t\t\tStripPath: true,\n\t\t\tUpstreams: &Upstreams{\n\t\t\t\tBalancing: \"roundrobin\",\n\t\t\t\tTargets: []*Target{{Target: \"http:\/\/localhost:9089\/posts\"}},\n\t\t\t},\n\t\t\tMethods: []string{\"ALL\"},\n\t\t},\n\t\t{\n\t\t\tListenPath: \"\/append\/*\",\n\t\t\tUpstreams: &Upstreams{\n\t\t\t\tBalancing: \"roundrobin\",\n\t\t\t\tTargets: []*Target{{Target: \"http:\/\/localhost:9089\/hello-world\"}},\n\t\t\t},\n\t\t\tAppendPath: true,\n\t\t\tMethods: []string{\"GET\"},\n\t\t},\n\t\t{\n\t\t\tListenPath: \"\/api\/recipes\/{id:[\\\\da-f]{24}}\",\n\t\t\tUpstreams: &Upstreams{\n\t\t\t\tBalancing: \"roundrobin\",\n\t\t\t\tTargets: []*Target{{Target: \"http:\/\/localhost:9089\/recipes\/{id}\"}},\n\t\t\t},\n\t\t\tMethods: []string{\"GET\"},\n\t\t},\n\t\t{\n\t\t\tListenPath: \"\/api\/recipes\/search\",\n\t\t\tUpstreams: &Upstreams{\n\t\t\t\tBalancing: \"roundrobin\",\n\t\t\t\tTargets: []*Target{{Target: \"http:\/\/localhost:9089\/recipes\/{id}\"}},\n\t\t\t},\n\t\t\tMethods: []string{\"GET\"},\n\t\t},\n\t}\n}\n\nfunc createRegisterAndRouter() router.Router {\n\tr := router.NewChiRouter()\n\tcreateRegister(r)\n\treturn r\n}\n\nfunc createRegister(r router.Router) *Register {\n\tregister := NewRegister(r, Params{})\n\n\tdefinitions := createProxyDefinitions()\n\tfor _, def := range definitions {\n\t\tregister.Add(def)\n\t}\n\n\treturn register\n}\n<commit_msg>Fixed Register call<commit_after>\/\/ +build integration\n\npackage proxy\n\nimport (\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"testing\"\n\n\t\"github.com\/hellofresh\/janus\/pkg\/router\"\n\t\"github.com\/hellofresh\/janus\/pkg\/test\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nvar tests = []struct {\n\tdescription string\n\tmethod string\n\turl string\n\texpectedContentType string\n\texpectedCode int\n}{\n\t{\n\t\tdescription: \"Get example route\",\n\t\tmethod: \"GET\",\n\t\turl: \"\/example\",\n\t\texpectedContentType: \"application\/json; charset=utf-8\",\n\t\texpectedCode: http.StatusOK,\n\t}, {\n\t\tdescription: \"Get invalid route\",\n\t\tmethod: \"GET\",\n\t\turl: \"\/invalid-route\",\n\t\texpectedContentType: \"text\/plain; charset=utf-8\",\n\t\texpectedCode: http.StatusNotFound,\n\t},\n\t{\n\t\tdescription: \"Get one posts - strip path\",\n\t\tmethod: \"GET\",\n\t\turl: \"\/posts\/1\",\n\t\texpectedContentType: \"application\/json; charset=utf-8\",\n\t\texpectedCode: http.StatusOK,\n\t},\n\t{\n\t\tdescription: \"Get one posts - append path\",\n\t\tmethod: \"GET\",\n\t\turl: \"\/append\",\n\t\texpectedContentType: \"application\/json; charset=utf-8\",\n\t\texpectedCode: http.StatusOK,\n\t},\n\t{\n\t\tdescription: \"Get one recipe - parameter interpolation\",\n\t\turl: \"\/api\/recipes\/5252b1b5301bbf46038b473f\",\n\t\texpectedContentType: \"application\/json; charset=utf-8\",\n\t\texpectedCode: http.StatusOK,\n\t},\n\t{\n\t\tdescription: \"No parameter to interpolate\",\n\t\turl: \"\/api\/recipes\/search\",\n\t\texpectedContentType: \"application\/json\",\n\t\texpectedCode: http.StatusNotFound,\n\t},\n}\n\nfunc TestSuccessfulProxy(t *testing.T) {\n\tt.Parallel()\n\n\tlog.SetOutput(ioutil.Discard)\n\n\tts := test.NewServer(createRegisterAndRouter())\n\tdefer ts.Close()\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.description, func(t *testing.T) {\n\t\t\tres, err := ts.Do(tc.method, tc.url, make(map[string]string))\n\t\t\tassert.NoError(t, err)\n\t\t\tif res != nil {\n\t\t\t\tdefer res.Body.Close()\n\t\t\t}\n\n\t\t\tassert.Equal(t, tc.expectedContentType, res.Header.Get(\"Content-Type\"), tc.description)\n\t\t\tassert.Equal(t, tc.expectedCode, res.StatusCode, tc.description)\n\t\t})\n\t}\n}\n\nfunc createProxyDefinitions() []*Definition {\n\treturn []*Definition{\n\t\t{\n\t\t\tListenPath: \"\/example\/*\",\n\t\t\tUpstreams: &Upstreams{\n\t\t\t\tBalancing: \"roundrobin\",\n\t\t\t\tTargets: []*Target{{Target: \"http:\/\/localhost:9089\/hello-world\"}},\n\t\t\t},\n\t\t\tMethods: []string{\"ALL\"},\n\t\t},\n\t\t{\n\t\t\tListenPath: \"\/posts\/*\",\n\t\t\tStripPath: true,\n\t\t\tUpstreams: &Upstreams{\n\t\t\t\tBalancing: \"roundrobin\",\n\t\t\t\tTargets: []*Target{{Target: \"http:\/\/localhost:9089\/posts\"}},\n\t\t\t},\n\t\t\tMethods: []string{\"ALL\"},\n\t\t},\n\t\t{\n\t\t\tListenPath: \"\/append\/*\",\n\t\t\tUpstreams: &Upstreams{\n\t\t\t\tBalancing: \"roundrobin\",\n\t\t\t\tTargets: []*Target{{Target: \"http:\/\/localhost:9089\/hello-world\"}},\n\t\t\t},\n\t\t\tAppendPath: true,\n\t\t\tMethods: []string{\"GET\"},\n\t\t},\n\t\t{\n\t\t\tListenPath: \"\/api\/recipes\/{id:[\\\\da-f]{24}}\",\n\t\t\tUpstreams: &Upstreams{\n\t\t\t\tBalancing: \"roundrobin\",\n\t\t\t\tTargets: []*Target{{Target: \"http:\/\/localhost:9089\/recipes\/{id}\"}},\n\t\t\t},\n\t\t\tMethods: []string{\"GET\"},\n\t\t},\n\t\t{\n\t\t\tListenPath: \"\/api\/recipes\/search\",\n\t\t\tUpstreams: &Upstreams{\n\t\t\t\tBalancing: \"roundrobin\",\n\t\t\t\tTargets: []*Target{{Target: \"http:\/\/localhost:9089\/recipes\/{id}\"}},\n\t\t\t},\n\t\t\tMethods: []string{\"GET\"},\n\t\t},\n\t}\n}\n\nfunc createRegisterAndRouter() router.Router {\n\tr := router.NewChiRouter()\n\tcreateRegister(r)\n\treturn r\n}\n\nfunc createRegister(r router.Router) *Register {\n\tregister := NewRegister(WithRouter(r))\n\n\tdefinitions := createProxyDefinitions()\n\tfor _, def := range definitions {\n\t\tregister.Add(def)\n\t}\n\n\treturn register\n}\n<|endoftext|>"} {"text":"<commit_before>package application\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/thoas\/gokvstores\"\n\t\"github.com\/thoas\/muxer\"\n\t\"github.com\/thoas\/picfit\/extractors\"\n\t\"github.com\/thoas\/picfit\/hash\"\n\t\"github.com\/thoas\/picfit\/image\"\n\t\"github.com\/thoas\/picfit\/util\"\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\nvar Extractors = map[string]extractors.Extractor{\n\t\"op\": extractors.Operation,\n\t\"fmt\": extractors.Format,\n\t\"url\": extractors.URL,\n\t\"q\": extractors.Quality,\n\t\"path\": extractors.Path,\n}\n\nfunc NotFoundHandler() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.Error(w, \"404 not found\", http.StatusNotFound)\n\t})\n}\n\ntype Options struct {\n\tFormat string\n\tQuality int\n}\n\ntype Request struct {\n\t*muxer.Request\n\tOperation *image.Operation\n\tConnection gokvstores.KVStoreConnection\n\tKey string\n\tURL *url.URL\n\tFilepath string\n\tOptions *Options\n}\n\ntype Handler func(muxer.Response, *Request)\n\nfunc (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tcon := App.KVStore.Connection()\n\tdefer con.Close()\n\n\trequest := muxer.NewRequest(req)\n\n\tfor k, v := range request.Params {\n\t\trequest.QueryString[k] = v\n\t}\n\n\tres := muxer.NewResponse(w)\n\n\textracted := map[string]interface{}{}\n\n\tfor key, extractor := range Extractors {\n\t\tresult, err := extractor(key, request)\n\n\t\tif err != nil {\n\t\t\tApp.Logger.Info(err)\n\n\t\t\tres.BadRequest()\n\t\t\treturn\n\t\t}\n\n\t\textracted[key] = result\n\t}\n\n\tsorted := util.SortMapString(request.QueryString)\n\n\tvalid := App.IsValidSign(sorted)\n\n\tdelete(sorted, \"sig\")\n\n\tserialized := hash.Serialize(sorted)\n\n\tkey := hash.Tokey(serialized)\n\n\tApp.Logger.Infof(\"Generating key %s from request: %s\", key, serialized)\n\n\tvar u *url.URL\n\tvar path string\n\tvar format string\n\tvar quality int\n\n\tvalue, ok := extracted[\"url\"]\n\n\tif ok && value != nil {\n\t\tu = value.(*url.URL)\n\t}\n\n\tvalue, ok = extracted[\"path\"]\n\n\tif ok {\n\t\tpath = value.(string)\n\t}\n\n\tif !valid || (u == nil && path == \"\") {\n\t\tres.BadRequest()\n\t\treturn\n\t}\n\n\tvalue, ok = extracted[\"fmt\"]\n\n\tif ok {\n\t\tformat = value.(string)\n\t}\n\n\tvalue, ok = extracted[\"q\"]\n\n\tif ok && value != nil {\n\t\tquality = value.(int)\n\t}\n\n\toptions := &Options{Quality: quality, Format: format}\n\n\th(res, &Request{\n\t\trequest,\n\t\textracted[\"op\"].(*image.Operation),\n\t\tcon,\n\t\tkey,\n\t\tu,\n\t\tpath,\n\t\toptions,\n\t})\n}\n\nvar ImageHandler Handler = func(res muxer.Response, req *Request) {\n\tfile, err := App.ImageFileFromRequest(req, true, true)\n\n\tutil.PanicIf(err)\n\n\tcontent, err := file.ToBytes()\n\n\tutil.PanicIf(err)\n\n\tres.SetHeaders(file.Headers, true)\n\tres.ResponseWriter.Write(content)\n}\n\nvar GetHandler Handler = func(res muxer.Response, req *Request) {\n\tfile, err := App.ImageFileFromRequest(req, false, false)\n\n\tutil.PanicIf(err)\n\n\tcontent, err := json.Marshal(map[string]string{\n\t\t\"filename\": file.Filename(),\n\t\t\"path\": file.Path(),\n\t\t\"url\": file.URL(),\n\t})\n\n\tutil.PanicIf(err)\n\n\tres.ContentType(\"application\/json\")\n\tres.ResponseWriter.Write(content)\n}\n\nvar RedirectHandler Handler = func(res muxer.Response, req *Request) {\n\tfile, err := App.ImageFileFromRequest(req, false, false)\n\n\tutil.PanicIf(err)\n\n\tres.PermanentRedirect(file.URL())\n}\n<commit_msg>Fix nil reference on fmt and path<commit_after>package application\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/thoas\/gokvstores\"\n\t\"github.com\/thoas\/muxer\"\n\t\"github.com\/thoas\/picfit\/extractors\"\n\t\"github.com\/thoas\/picfit\/hash\"\n\t\"github.com\/thoas\/picfit\/image\"\n\t\"github.com\/thoas\/picfit\/util\"\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\nvar Extractors = map[string]extractors.Extractor{\n\t\"op\": extractors.Operation,\n\t\"fmt\": extractors.Format,\n\t\"url\": extractors.URL,\n\t\"q\": extractors.Quality,\n\t\"path\": extractors.Path,\n}\n\nfunc NotFoundHandler() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\thttp.Error(w, \"404 not found\", http.StatusNotFound)\n\t})\n}\n\ntype Options struct {\n\tFormat string\n\tQuality int\n}\n\ntype Request struct {\n\t*muxer.Request\n\tOperation *image.Operation\n\tConnection gokvstores.KVStoreConnection\n\tKey string\n\tURL *url.URL\n\tFilepath string\n\tOptions *Options\n}\n\ntype Handler func(muxer.Response, *Request)\n\nfunc (h Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tcon := App.KVStore.Connection()\n\tdefer con.Close()\n\n\trequest := muxer.NewRequest(req)\n\n\tfor k, v := range request.Params {\n\t\trequest.QueryString[k] = v\n\t}\n\n\tres := muxer.NewResponse(w)\n\n\textracted := map[string]interface{}{}\n\n\tfor key, extractor := range Extractors {\n\t\tresult, err := extractor(key, request)\n\n\t\tif err != nil {\n\t\t\tApp.Logger.Info(err)\n\n\t\t\tres.BadRequest()\n\t\t\treturn\n\t\t}\n\n\t\textracted[key] = result\n\t}\n\n\tsorted := util.SortMapString(request.QueryString)\n\n\tvalid := App.IsValidSign(sorted)\n\n\tdelete(sorted, \"sig\")\n\n\tserialized := hash.Serialize(sorted)\n\n\tkey := hash.Tokey(serialized)\n\n\tApp.Logger.Infof(\"Generating key %s from request: %s\", key, serialized)\n\n\tvar u *url.URL\n\tvar path string\n\tvar format string\n\tvar quality int\n\n\tvalue, ok := extracted[\"url\"]\n\n\tif ok && value != nil {\n\t\tu = value.(*url.URL)\n\t}\n\n\tvalue, ok = extracted[\"path\"]\n\n\tif ok && value != nil {\n\t\tpath = value.(string)\n\t}\n\n\tif !valid || (u == nil && path == \"\") {\n\t\tres.BadRequest()\n\t\treturn\n\t}\n\n\tvalue, ok = extracted[\"fmt\"]\n\n\tif ok && value != nil {\n\t\tformat = value.(string)\n\t}\n\n\tvalue, ok = extracted[\"q\"]\n\n\tif ok && value != nil {\n\t\tquality = value.(int)\n\t}\n\n\toptions := &Options{Quality: quality, Format: format}\n\n\th(res, &Request{\n\t\trequest,\n\t\textracted[\"op\"].(*image.Operation),\n\t\tcon,\n\t\tkey,\n\t\tu,\n\t\tpath,\n\t\toptions,\n\t})\n}\n\nvar ImageHandler Handler = func(res muxer.Response, req *Request) {\n\tfile, err := App.ImageFileFromRequest(req, true, true)\n\n\tutil.PanicIf(err)\n\n\tcontent, err := file.ToBytes()\n\n\tutil.PanicIf(err)\n\n\tres.SetHeaders(file.Headers, true)\n\tres.ResponseWriter.Write(content)\n}\n\nvar GetHandler Handler = func(res muxer.Response, req *Request) {\n\tfile, err := App.ImageFileFromRequest(req, false, false)\n\n\tutil.PanicIf(err)\n\n\tcontent, err := json.Marshal(map[string]string{\n\t\t\"filename\": file.Filename(),\n\t\t\"path\": file.Path(),\n\t\t\"url\": file.URL(),\n\t})\n\n\tutil.PanicIf(err)\n\n\tres.ContentType(\"application\/json\")\n\tres.ResponseWriter.Write(content)\n}\n\nvar RedirectHandler Handler = func(res muxer.Response, req *Request) {\n\tfile, err := App.ImageFileFromRequest(req, false, false)\n\n\tutil.PanicIf(err)\n\n\tres.PermanentRedirect(file.URL())\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package transform invokes the golang HTML parser, executes the\n\/\/ individual transfomers (unless overridden), and prints the output\n\/\/ to the provided string.\npackage transform\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/ampproject\/amppackager\/pkg\/printer\"\n\t\"github.com\/ampproject\/amppackager\/pkg\/transformer\"\n\t\"golang.org\/x\/net\/html\"\n)\n\n\/\/ Transformer functions must be added here in order to be passed in from\n\/\/ the command line or invoked from other languages. Please keep alphabetical.\n\/\/\n\/\/ NOTE: The string mapping is necessary as a cross-over to allow\n\/\/ invocation from C\/C++.\nvar transformerFunctionMap = map[string]func(*transformer.Engine){\n\t\"AMPBoilerplateTransformer\": transformer.AMPBoilerplateTransformer,\n\t\"ReorderHeadTransformer\": transformer.ReorderHeadTransformer,\n\t\"ServerSideRenderingTransformer\": transformer.ServerSideRenderingTransformer,\n\t\"URLTransformer\": transformer.URLTransformer,\n}\n\n\/\/ The transformers to execute, in the order in which to execute them.\nvar transformers = []string{\n\t\"URLTransformer\",\n\t\"AMPBoilerplateTransformer\",\n\t\"ServerSideRenderingTransformer\",\n\t\"ReorderHeadTransformer\",\n}\n\n\/\/ Process will parse the given HTML byte array, applying all the\n\/\/ transformers and return the transformed HTML, or an error.\n\/\/ TODO(b\/112356610): Clean up these args into a proto.\nfunc Process(data, docURL string) (string, error) {\n\treturn ProcessSome(data, docURL, transformers)\n}\n\n\/\/ Process will parse the given HTML byte array, and execute the named\n\/\/ transformers, returning the transformed HTML, or an error.\n\/\/ TODO(b\/112356610): Clean up these args into a proto.\nfunc ProcessSome(data, docURL string, transformers []string) (string, error) {\n\tdoc, err := html.Parse(strings.NewReader(data))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfns := []func(*transformer.Engine){}\n\tfor _, val := range transformers {\n\t\tfn, ok := transformerFunctionMap[val]\n\t\tif !ok {\n\t\t\treturn \"\", fmt.Errorf(\"transformer doesn't exist: %s\", val)\n\t\t}\n\t\tfns = append(fns, fn)\n\t}\n\tu, err := url.Parse(docURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\te := transformer.Engine{doc, u, fns}\n\te.Transform()\n\tvar o strings.Builder\n\terr = printer.Print(&o, e.Doc)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn o.String(), nil\n}\n<commit_msg>Point diff.sh to new \/\/third_party\/amppackager location.<commit_after>\/\/ Package transform invokes the golang HTML parser, executes the\n\/\/ individual transfomers (unless overridden), and prints the output\n\/\/ to the provided string.\npackage transform\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/ampproject\/amppackager\/pkg\/printer\"\n\t\"github.com\/ampproject\/amppackager\/pkg\/transformer\"\n\t\"golang.org\/x\/net\/html\"\n)\n\n\/\/ Transformer functions must be added here in order to be passed in from\n\/\/ the command line or invoked from other languages. Please keep alphabetical.\n\/\/\n\/\/ NOTE: The string mapping is necessary as a cross-over to allow\n\/\/ invocation from C\/C++.\nvar transformerFunctionMap = map[string]func(*transformer.Engine){\n\t\"AMPBoilerplateTransformer\": transformer.AMPBoilerplateTransformer,\n\t\"ReorderHeadTransformer\": transformer.ReorderHeadTransformer,\n\t\"ServerSideRenderingTransformer\": transformer.ServerSideRenderingTransformer,\n\t\"TransformedIdentifierTransformer\": transformer.TransformedIdentifierTransformer,\n\t\"URLTransformer\": transformer.URLTransformer,\n}\n\n\/\/ The transformers to execute, in the order in which to execute them.\nvar transformers = []string{\n\t\"URLTransformer\",\n\t\"AMPBoilerplateTransformer\",\n\t\"ServerSideRenderingTransformer\",\n\t\"TransformedIdentifierTransformer\",\n\t\"ReorderHeadTransformer\",\n}\n\n\/\/ Process will parse the given HTML byte array, applying all the\n\/\/ transformers and return the transformed HTML, or an error.\n\/\/ TODO(b\/112356610): Clean up these args into a proto.\nfunc Process(data, docURL string) (string, error) {\n\treturn ProcessSome(data, docURL, transformers)\n}\n\n\/\/ Process will parse the given HTML byte array, and execute the named\n\/\/ transformers, returning the transformed HTML, or an error.\n\/\/ TODO(b\/112356610): Clean up these args into a proto.\nfunc ProcessSome(data, docURL string, transformers []string) (string, error) {\n\tdoc, err := html.Parse(strings.NewReader(data))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfns := []func(*transformer.Engine){}\n\tfor _, val := range transformers {\n\t\tfn, ok := transformerFunctionMap[val]\n\t\tif !ok {\n\t\t\treturn \"\", fmt.Errorf(\"transformer doesn't exist: %s\", val)\n\t\t}\n\t\tfns = append(fns, fn)\n\t}\n\tu, err := url.Parse(docURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\te := transformer.Engine{doc, u, fns}\n\te.Transform()\n\tvar o strings.Builder\n\terr = printer.Print(&o, e.Doc)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn o.String(), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package log\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\tkitlog \"github.com\/go-kit\/kit\/log\"\n\tuuid \"github.com\/satori\/go.uuid\"\n)\n\ntype key int\n\nconst (\n\tKubernikusRequestID key = 0\n)\n\nfunc RequestIDHandler(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(rw http.ResponseWriter, request *http.Request) {\n\t\tif id := request.Context().Value(KubernikusRequestID); id == nil {\n\t\t\trequest = request.WithContext(context.WithValue(request.Context(), KubernikusRequestID, uuid.NewV4()))\n\t\t}\n\t\tnext.ServeHTTP(rw, request)\n\t})\n}\n\nfunc LoggingHandler(logger kitlog.Logger, next http.Handler) http.Handler {\n\tingress_logger := kitlog.With(logger, \"api\", \"ingress\")\n\treturn http.HandlerFunc(func(rw http.ResponseWriter, request *http.Request) {\n\t\twrapper := makeWrapper(rw)\n\n\t\tinner_logger := kitlog.With(ingress_logger)\n\n\t\tid := \"\"\n\t\tif reqId := request.Context().Value(KubernikusRequestID); reqId != nil {\n\t\t\tid = fmt.Sprintf(\"%s\", reqId)\n\t\t\tinner_logger = kitlog.With(inner_logger, \"id\", id)\n\t\t}\n\t\trequest = request.WithContext(context.WithValue(request.Context(), \"logger\", inner_logger))\n\n\t\tdefer func(begin time.Time) {\n\t\t\tvar keyvals = make([]interface{}, 0, 4)\n\n\t\t\tkeyvals = append(keyvals,\n\t\t\t\t\"status\", wrapper.Status(),\n\t\t\t\t\"size\", wrapper.Size(),\n\t\t\t\t\"took\", time.Since(begin),\n\t\t\t)\n\n\t\t\tif id != \"\" {\n\t\t\t\tkeyvals = append(keyvals, \"id\", id)\n\t\t\t}\n\n\t\t\tlog(inner_logger, request, keyvals...)\n\t\t}(time.Now())\n\n\t\tnext.ServeHTTP(wrapper, request)\n\t})\n}\n\nfunc log(logger kitlog.Logger, request *http.Request, extra ...interface{}) {\n\tvar keyvals []interface{}\n\n\tsource_ip, _, err := net.SplitHostPort(request.RemoteAddr)\n\tif err != nil {\n\t\tsource_ip = request.RemoteAddr\n\t}\n\n\tif source_ip != \"\" {\n\t\tkeyvals = append(keyvals, \"source_ip\", source_ip)\n\t}\n\n\tkeyvals = append(keyvals, \"method\", request.Method)\n\n\thost, host_port, err := net.SplitHostPort(request.Host)\n\tif err == nil {\n\t\tif host != \"\" {\n\t\t\tkeyvals = append(keyvals,\n\t\t\t\t\"host\", host)\n\t\t}\n\t\tif host_port != \"\" {\n\t\t\tkeyvals = append(keyvals,\n\t\t\t\t\"port\", host_port)\n\t\t}\n\t}\n\n\tkeyvals = append(keyvals, \"path\", request.URL.EscapedPath())\n\n\tfor i, k := range request.URL.Query() {\n\t\tkeyvals = append(keyvals, i, strings.Join(k, \",\"))\n\t}\n\n\tkeyvals = append(keyvals, \"user_agent\", request.UserAgent())\n\tkeyvals = append(keyvals, extra...)\n\tlogger.Log(keyvals...)\n}\n\n\/\/ this stuff is copied from gorilla\n\nfunc makeWrapper(w http.ResponseWriter) loggingResponseWriter {\n\tvar logger loggingResponseWriter = &responseLogger{w: w, status: http.StatusOK}\n\tif _, ok := w.(http.Hijacker); ok {\n\t\tlogger = &hijackLogger{responseLogger{w: w, status: http.StatusOK}}\n\t}\n\th, ok1 := logger.(http.Hijacker)\n\tc, ok2 := w.(http.CloseNotifier)\n\tif ok1 && ok2 {\n\t\treturn hijackCloseNotifier{logger, h, c}\n\t}\n\tif ok2 {\n\t\treturn &closeNotifyWriter{logger, c}\n\t}\n\treturn logger\n}\n\ntype hijackLogger struct {\n\tresponseLogger\n}\n\nfunc (l *hijackLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\th := l.responseLogger.w.(http.Hijacker)\n\tconn, rw, err := h.Hijack()\n\tif err == nil && l.responseLogger.status == 0 {\n\t\t\/\/ The status will be StatusSwitchingProtocols if there was no error and\n\t\t\/\/ WriteHeader has not been called yet\n\t\tl.responseLogger.status = http.StatusSwitchingProtocols\n\t}\n\treturn conn, rw, err\n}\n\ntype closeNotifyWriter struct {\n\tloggingResponseWriter\n\thttp.CloseNotifier\n}\n\ntype hijackCloseNotifier struct {\n\tloggingResponseWriter\n\thttp.Hijacker\n\thttp.CloseNotifier\n}\n\ntype loggingResponseWriter interface {\n\tcommonLoggingResponseWriter\n\thttp.Pusher\n}\n\ntype commonLoggingResponseWriter interface {\n\thttp.ResponseWriter\n\thttp.Flusher\n\tStatus() int\n\tSize() int\n}\n\ntype responseLogger struct {\n\tw http.ResponseWriter\n\tstatus int\n\tsize int\n}\n\nfunc (l *responseLogger) Header() http.Header {\n\treturn l.w.Header()\n}\n\nfunc (l *responseLogger) Write(b []byte) (int, error) {\n\tsize, err := l.w.Write(b)\n\tl.size += size\n\treturn size, err\n}\n\nfunc (l *responseLogger) WriteHeader(s int) {\n\tl.w.WriteHeader(s)\n\tl.status = s\n}\n\nfunc (l *responseLogger) Status() int {\n\treturn l.status\n}\n\nfunc (l *responseLogger) Size() int {\n\treturn l.size\n}\n\nfunc (l *responseLogger) Flush() {\n\tf, ok := l.w.(http.Flusher)\n\tif ok {\n\t\tf.Flush()\n\t}\n}\n\nfunc (l *responseLogger) Push(target string, opts *http.PushOptions) error {\n\tp, ok := l.w.(http.Pusher)\n\tif !ok {\n\t\treturn fmt.Errorf(\"responseLogger does not implement http.Pusher\")\n\t}\n\treturn p.Push(target, opts)\n}\n<commit_msg>removes dublicate id<commit_after>package log\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\tkitlog \"github.com\/go-kit\/kit\/log\"\n\tuuid \"github.com\/satori\/go.uuid\"\n)\n\ntype key int\n\nconst (\n\tKubernikusRequestID key = 0\n)\n\nfunc RequestIDHandler(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(rw http.ResponseWriter, request *http.Request) {\n\t\tif id := request.Context().Value(KubernikusRequestID); id == nil {\n\t\t\trequest = request.WithContext(context.WithValue(request.Context(), KubernikusRequestID, uuid.NewV4()))\n\t\t}\n\t\tnext.ServeHTTP(rw, request)\n\t})\n}\n\nfunc LoggingHandler(logger kitlog.Logger, next http.Handler) http.Handler {\n\tingress_logger := kitlog.With(logger, \"api\", \"ingress\")\n\treturn http.HandlerFunc(func(rw http.ResponseWriter, request *http.Request) {\n\t\twrapper := makeWrapper(rw)\n\n\t\tinner_logger := kitlog.With(ingress_logger)\n\n\t\tid := \"\"\n\t\tif reqId := request.Context().Value(KubernikusRequestID); reqId != nil {\n\t\t\tid = fmt.Sprintf(\"%s\", reqId)\n\t\t\tinner_logger = kitlog.With(inner_logger, \"id\", id)\n\t\t}\n\t\trequest = request.WithContext(context.WithValue(request.Context(), \"logger\", inner_logger))\n\n\t\tdefer func(begin time.Time) {\n\t\t\tvar keyvals = make([]interface{}, 0, 4)\n\n\t\t\tkeyvals = append(keyvals,\n\t\t\t\t\"status\", wrapper.Status(),\n\t\t\t\t\"size\", wrapper.Size(),\n\t\t\t\t\"took\", time.Since(begin),\n\t\t\t)\n\n\t\t\tlog(inner_logger, request, keyvals...)\n\t\t}(time.Now())\n\n\t\tnext.ServeHTTP(wrapper, request)\n\t})\n}\n\nfunc log(logger kitlog.Logger, request *http.Request, extra ...interface{}) {\n\tvar keyvals []interface{}\n\n\tsource_ip, _, err := net.SplitHostPort(request.RemoteAddr)\n\tif err != nil {\n\t\tsource_ip = request.RemoteAddr\n\t}\n\n\tif source_ip != \"\" {\n\t\tkeyvals = append(keyvals, \"source_ip\", source_ip)\n\t}\n\n\tkeyvals = append(keyvals, \"method\", request.Method)\n\n\thost, host_port, err := net.SplitHostPort(request.Host)\n\tif err == nil {\n\t\tif host != \"\" {\n\t\t\tkeyvals = append(keyvals,\n\t\t\t\t\"host\", host)\n\t\t}\n\t\tif host_port != \"\" {\n\t\t\tkeyvals = append(keyvals,\n\t\t\t\t\"port\", host_port)\n\t\t}\n\t}\n\n\tkeyvals = append(keyvals, \"path\", request.URL.EscapedPath())\n\n\tfor i, k := range request.URL.Query() {\n\t\tkeyvals = append(keyvals, i, strings.Join(k, \",\"))\n\t}\n\n\tkeyvals = append(keyvals, \"user_agent\", request.UserAgent())\n\tkeyvals = append(keyvals, extra...)\n\tlogger.Log(keyvals...)\n}\n\n\/\/ this stuff is copied from gorilla\n\nfunc makeWrapper(w http.ResponseWriter) loggingResponseWriter {\n\tvar logger loggingResponseWriter = &responseLogger{w: w, status: http.StatusOK}\n\tif _, ok := w.(http.Hijacker); ok {\n\t\tlogger = &hijackLogger{responseLogger{w: w, status: http.StatusOK}}\n\t}\n\th, ok1 := logger.(http.Hijacker)\n\tc, ok2 := w.(http.CloseNotifier)\n\tif ok1 && ok2 {\n\t\treturn hijackCloseNotifier{logger, h, c}\n\t}\n\tif ok2 {\n\t\treturn &closeNotifyWriter{logger, c}\n\t}\n\treturn logger\n}\n\ntype hijackLogger struct {\n\tresponseLogger\n}\n\nfunc (l *hijackLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\th := l.responseLogger.w.(http.Hijacker)\n\tconn, rw, err := h.Hijack()\n\tif err == nil && l.responseLogger.status == 0 {\n\t\t\/\/ The status will be StatusSwitchingProtocols if there was no error and\n\t\t\/\/ WriteHeader has not been called yet\n\t\tl.responseLogger.status = http.StatusSwitchingProtocols\n\t}\n\treturn conn, rw, err\n}\n\ntype closeNotifyWriter struct {\n\tloggingResponseWriter\n\thttp.CloseNotifier\n}\n\ntype hijackCloseNotifier struct {\n\tloggingResponseWriter\n\thttp.Hijacker\n\thttp.CloseNotifier\n}\n\ntype loggingResponseWriter interface {\n\tcommonLoggingResponseWriter\n\thttp.Pusher\n}\n\ntype commonLoggingResponseWriter interface {\n\thttp.ResponseWriter\n\thttp.Flusher\n\tStatus() int\n\tSize() int\n}\n\ntype responseLogger struct {\n\tw http.ResponseWriter\n\tstatus int\n\tsize int\n}\n\nfunc (l *responseLogger) Header() http.Header {\n\treturn l.w.Header()\n}\n\nfunc (l *responseLogger) Write(b []byte) (int, error) {\n\tsize, err := l.w.Write(b)\n\tl.size += size\n\treturn size, err\n}\n\nfunc (l *responseLogger) WriteHeader(s int) {\n\tl.w.WriteHeader(s)\n\tl.status = s\n}\n\nfunc (l *responseLogger) Status() int {\n\treturn l.status\n}\n\nfunc (l *responseLogger) Size() int {\n\treturn l.size\n}\n\nfunc (l *responseLogger) Flush() {\n\tf, ok := l.w.(http.Flusher)\n\tif ok {\n\t\tf.Flush()\n\t}\n}\n\nfunc (l *responseLogger) Push(target string, opts *http.PushOptions) error {\n\tp, ok := l.w.(http.Pusher)\n\tif !ok {\n\t\treturn fmt.Errorf(\"responseLogger does not implement http.Pusher\")\n\t}\n\treturn p.Push(target, opts)\n}\n<|endoftext|>"} {"text":"<commit_before>package utils\n\nimport (\n\t\"fmt\"\n\n\tbuildv1alpha1 \"github.com\/knative\/build\/pkg\/apis\/build\/v1alpha1\"\n\tservingv1alpha1 \"github.com\/knative\/serving\/pkg\/apis\/serving\/v1alpha1\"\n\truntimev1alpha1 \"github.com\/kyma-incubator\/runtime\/pkg\/apis\/runtime\/v1alpha1\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n)\n\n\/\/ GetServiceSpec gets ServiceSpec for a function\nfunc GetServiceSpec(imageName string, fn runtimev1alpha1.Function, rnInfo *RuntimeInfo) servingv1alpha1.ServiceSpec {\n\tdefaultMode := int32(420)\n\tbuildContainer := getBuildContainer(imageName, fn, rnInfo)\n\tvolumes := []corev1.Volume{\n\t\t{\n\t\t\tName: \"dockerfile-vol\",\n\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\tDefaultMode: &defaultMode,\n\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\tName: rnInfo.DockerFileConfigMapName(fn.Spec.Runtime),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"func-vol\",\n\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\tDefaultMode: &defaultMode,\n\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\tName: fn.Name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t\/\/ TODO: Make it constant for nodejs8\/nodejs6\n\tenvVarsForRevision := []corev1.EnvVar{\n\t\t{\n\t\t\tName: \"FUNC_HANDLER\",\n\t\t\tValue: \"main\",\n\t\t},\n\t\t{\n\t\t\tName: \"MOD_NAME\",\n\t\t\tValue: \"handler\",\n\t\t},\n\t\t{\n\t\t\tName: \"FUNC_TIMEOUT\",\n\t\t\tValue: \"180\",\n\t\t},\n\t\t{\n\t\t\tName: \"FUNC_RUNTIME\",\n\t\t\tValue: \"nodejs8\",\n\t\t},\n\t\t{\n\t\t\tName: \"FUNC_MEMORY_LIMIT\",\n\t\t\tValue: \"128Mi\",\n\t\t},\n\t\t{\n\t\t\tName: \"FUNC_PORT\",\n\t\t\tValue: \"8080\",\n\t\t},\n\t\t{\n\t\t\tName: \"NODE_PATH\",\n\t\t\tValue: \"$(KUBELESS_INSTALL_VOLUME)\/node_modules\",\n\t\t},\n\t}\n\n\treturn servingv1alpha1.ServiceSpec{\n\t\tRunLatest: &servingv1alpha1.RunLatestType{\n\t\t\tConfiguration: servingv1alpha1.ConfigurationSpec{\n\t\t\t\tBuild: &servingv1alpha1.RawExtension{\n\t\t\t\t\tBuildSpec: &buildv1alpha1.BuildSpec{\n\t\t\t\t\t\tServiceAccountName: rnInfo.ServiceAccount,\n\t\t\t\t\t\tSteps: []corev1.Container{\n\t\t\t\t\t\t\t*buildContainer,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tVolumes: volumes,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tRevisionTemplate: servingv1alpha1.RevisionTemplateSpec{\n\t\t\t\t\tSpec: servingv1alpha1.RevisionSpec{\n\t\t\t\t\t\tContainer: corev1.Container{\n\t\t\t\t\t\t\tImage: imageName,\n\t\t\t\t\t\t\tEnv: envVarsForRevision,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc getBuildContainer(imageName string, fn runtimev1alpha1.Function, riUtil *RuntimeInfo) *corev1.Container {\n\tdestination := fmt.Sprintf(\"--destination=%s\", imageName)\n\tbuildContainer := corev1.Container{\n\t\tName: \"build-and-push\",\n\t\tImage: \"gcr.io\/kaniko-project\/executor\",\n\t\tArgs: []string{\"--dockerfile=\/workspace\/Dockerfile\", destination},\n\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t{\n\t\t\t\tName: \"dockerfile-vol\", \/\/TODO: make it configurable\n\t\t\t\tMountPath: \"\/workspace\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"func-vol\",\n\t\t\t\tMountPath: \"\/src\",\n\t\t\t},\n\t\t},\n\t}\n\n\treturn &buildContainer\n}\n<commit_msg>add functionName as container name in service<commit_after>package utils\n\nimport (\n\t\"fmt\"\n\n\tbuildv1alpha1 \"github.com\/knative\/build\/pkg\/apis\/build\/v1alpha1\"\n\tservingv1alpha1 \"github.com\/knative\/serving\/pkg\/apis\/serving\/v1alpha1\"\n\truntimev1alpha1 \"github.com\/kyma-incubator\/runtime\/pkg\/apis\/runtime\/v1alpha1\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n)\n\n\/\/ GetServiceSpec gets ServiceSpec for a function\nfunc GetServiceSpec(imageName string, fn runtimev1alpha1.Function, rnInfo *RuntimeInfo) servingv1alpha1.ServiceSpec {\n\tdefaultMode := int32(420)\n\tbuildContainer := getBuildContainer(imageName, fn, rnInfo)\n\tvolumes := []corev1.Volume{\n\t\t{\n\t\t\tName: \"dockerfile-vol\",\n\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\tDefaultMode: &defaultMode,\n\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\tName: rnInfo.DockerFileConfigMapName(fn.Spec.Runtime),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"func-vol\",\n\t\t\tVolumeSource: corev1.VolumeSource{\n\t\t\t\tConfigMap: &corev1.ConfigMapVolumeSource{\n\t\t\t\t\tDefaultMode: &defaultMode,\n\t\t\t\t\tLocalObjectReference: corev1.LocalObjectReference{\n\t\t\t\t\t\tName: fn.Name,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t\/\/ TODO: Make it constant for nodejs8\/nodejs6\n\tenvVarsForRevision := []corev1.EnvVar{\n\t\t{\n\t\t\tName: \"FUNC_HANDLER\",\n\t\t\tValue: \"main\",\n\t\t},\n\t\t{\n\t\t\tName: \"MOD_NAME\",\n\t\t\tValue: \"handler\",\n\t\t},\n\t\t{\n\t\t\tName: \"FUNC_TIMEOUT\",\n\t\t\tValue: \"180\",\n\t\t},\n\t\t{\n\t\t\tName: \"FUNC_RUNTIME\",\n\t\t\tValue: \"nodejs8\",\n\t\t},\n\t\t{\n\t\t\tName: \"FUNC_MEMORY_LIMIT\",\n\t\t\tValue: \"128Mi\",\n\t\t},\n\t\t{\n\t\t\tName: \"FUNC_PORT\",\n\t\t\tValue: \"8080\",\n\t\t},\n\t\t{\n\t\t\tName: \"NODE_PATH\",\n\t\t\tValue: \"$(KUBELESS_INSTALL_VOLUME)\/node_modules\",\n\t\t},\n\t}\n\n\treturn servingv1alpha1.ServiceSpec{\n\t\tRunLatest: &servingv1alpha1.RunLatestType{\n\t\t\tConfiguration: servingv1alpha1.ConfigurationSpec{\n\t\t\t\tBuild: &servingv1alpha1.RawExtension{\n\t\t\t\t\tBuildSpec: &buildv1alpha1.BuildSpec{\n\t\t\t\t\t\tServiceAccountName: rnInfo.ServiceAccount,\n\t\t\t\t\t\tSteps: []corev1.Container{\n\t\t\t\t\t\t\t*buildContainer,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tVolumes: volumes,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tRevisionTemplate: servingv1alpha1.RevisionTemplateSpec{\n\t\t\t\t\tSpec: servingv1alpha1.RevisionSpec{\n\t\t\t\t\t\tContainer: corev1.Container{\n\t\t\t\t\t\t\tImage: imageName,\n\t\t\t\t\t\t\tEnv: envVarsForRevision,\n\t\t\t\t\t\t\tName: fn.Name,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc getBuildContainer(imageName string, fn runtimev1alpha1.Function, riUtil *RuntimeInfo) *corev1.Container {\n\tdestination := fmt.Sprintf(\"--destination=%s\", imageName)\n\tbuildContainer := corev1.Container{\n\t\tName: \"build-and-push\",\n\t\tImage: \"gcr.io\/kaniko-project\/executor\",\n\t\tArgs: []string{\"--dockerfile=\/workspace\/Dockerfile\", destination},\n\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t{\n\t\t\t\tName: \"dockerfile-vol\", \/\/TODO: make it configurable\n\t\t\t\tMountPath: \"\/workspace\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"func-vol\",\n\t\t\t\tMountPath: \"\/src\",\n\t\t\t},\n\t\t},\n\t}\n\n\treturn &buildContainer\n}\n<|endoftext|>"} {"text":"<commit_before>package vfs\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\n\t\"github.com\/cozy\/cozy-stack\/pkg\/consts\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/couchdb\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/couchdb\/mango\"\n)\n\ntype couchdbIndexer struct {\n\tdb couchdb.Database\n}\n\n\/\/ NewCouchdbIndexer creates an Indexer instance based on couchdb to store\n\/\/ files and directories metadata and index them.\nfunc NewCouchdbIndexer(db couchdb.Database) Indexer {\n\treturn &couchdbIndexer{\n\t\tdb: db,\n\t}\n}\n\nfunc (c *couchdbIndexer) InitIndex() error {\n\terr := couchdb.CreateNamedDocWithDB(c.db, &DirDoc{\n\t\tDocName: \"\",\n\t\tType: consts.DirType,\n\t\tDocID: consts.RootDirID,\n\t\tFullpath: \"\/\",\n\t\tDirID: \"\",\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = couchdb.CreateNamedDocWithDB(c.db, &DirDoc{\n\t\tDocName: path.Base(TrashDirName),\n\t\tType: consts.DirType,\n\t\tDocID: consts.TrashDirID,\n\t\tFullpath: TrashDirName,\n\t\tDirID: consts.RootDirID,\n\t})\n\tif err != nil && !couchdb.IsConflictError(err) {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *couchdbIndexer) DiskUsage() (int64, error) {\n\tvar doc couchdb.ViewResponse\n\terr := couchdb.ExecView(c.db, consts.DiskUsageView, &couchdb.ViewRequest{\n\t\tReduce: true,\n\t}, &doc)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif len(doc.Rows) == 0 {\n\t\treturn 0, nil\n\t}\n\t\/\/ Reduce of _count should give us a number value\n\tf64, ok := doc.Rows[0].Value.(float64)\n\tif !ok {\n\t\treturn 0, ErrWrongCouchdbState\n\t}\n\treturn int64(f64), nil\n}\n\nfunc (c *couchdbIndexer) CreateFileDoc(doc *FileDoc) error {\n\treturn couchdb.CreateDoc(c.db, doc)\n}\n\nfunc (c *couchdbIndexer) CreateNamedFileDoc(doc *FileDoc) error {\n\treturn couchdb.CreateNamedDoc(c.db, doc)\n}\n\nfunc (c *couchdbIndexer) UpdateFileDoc(olddoc, newdoc *FileDoc) error {\n\tnewdoc.SetID(olddoc.ID())\n\tnewdoc.SetRev(olddoc.Rev())\n\treturn couchdb.UpdateDoc(c.db, newdoc)\n}\n\nfunc (c *couchdbIndexer) UpdateFileDocs(docs []*FileDoc) error {\n\tif len(docs) == 0 {\n\t\treturn nil\n\t}\n\tcouchdocs := make([]couchdb.Doc, len(docs))\n\tfor i, doc := range docs {\n\t\tcouchdocs[i] = doc\n\t}\n\treturn couchdb.BulkUpdateDoc(c.db, couchdocs)\n}\n\nfunc (c *couchdbIndexer) DeleteFileDoc(doc *FileDoc) error {\n\treturn couchdb.DeleteDoc(c.db, doc)\n}\n\nfunc (c *couchdbIndexer) CreateDirDoc(doc *DirDoc) error {\n\treturn couchdb.CreateDoc(c.db, doc)\n}\n\nfunc (c *couchdbIndexer) CreateNamedDirDoc(doc *DirDoc) error {\n\treturn couchdb.CreateNamedDoc(c.db, doc)\n}\n\nfunc (c *couchdbIndexer) UpdateDirDoc(olddoc, newdoc *DirDoc) error {\n\tnewdoc.SetID(olddoc.ID())\n\tnewdoc.SetRev(olddoc.Rev())\n\tif newdoc.Fullpath != olddoc.Fullpath {\n\t\tif err := c.moveDir(olddoc.Fullpath, newdoc.Fullpath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn couchdb.UpdateDoc(c.db, newdoc)\n}\n\nfunc (c *couchdbIndexer) DeleteDirDoc(doc *DirDoc) error {\n\treturn couchdb.DeleteDoc(c.db, doc)\n}\n\n\/\/ @TODO use couchdb bulk updates instead\nfunc (c *couchdbIndexer) moveDir(oldpath, newpath string) error {\n\tvar children []*DirDoc\n\tsel := mango.StartWith(\"path\", oldpath+\"\/\")\n\treq := &couchdb.FindRequest{\n\t\tUseIndex: \"dir-by-path\",\n\t\tSelector: sel,\n\t}\n\terr := couchdb.FindDocs(c.db, consts.Files, req, &children)\n\tif err != nil || len(children) == 0 {\n\t\treturn err\n\t}\n\n\terrc := make(chan error)\n\n\tfor _, child := range children {\n\t\tgo func(child *DirDoc) {\n\t\t\tif !strings.HasPrefix(child.Fullpath, oldpath+\"\/\") {\n\t\t\t\terrc <- fmt.Errorf(\"Child has wrong base directory\")\n\t\t\t} else {\n\t\t\t\tchild.Fullpath = path.Join(newpath, child.Fullpath[len(oldpath)+1:])\n\t\t\t\terrc <- couchdb.UpdateDoc(c.db, child)\n\t\t\t}\n\t\t}(child)\n\t}\n\n\tfor range children {\n\t\tif e := <-errc; e != nil {\n\t\t\terr = e\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc (c *couchdbIndexer) DirByID(fileID string) (*DirDoc, error) {\n\tdoc := &DirDoc{}\n\terr := couchdb.GetDoc(c.db, consts.Files, fileID, doc)\n\tif couchdb.IsNotFoundError(err) {\n\t\terr = os.ErrNotExist\n\t}\n\tif err != nil {\n\t\tif fileID == consts.RootDirID {\n\t\t\tpanic(\"Root directory is not in database\")\n\t\t}\n\t\tif fileID == consts.TrashDirID {\n\t\t\tpanic(\"Trash directory is not in database\")\n\t\t}\n\t\treturn nil, err\n\t}\n\tif doc.Type != consts.DirType {\n\t\treturn nil, os.ErrNotExist\n\t}\n\treturn doc, err\n}\n\nfunc (c *couchdbIndexer) DirByPath(name string) (*DirDoc, error) {\n\tif !path.IsAbs(name) {\n\t\treturn nil, ErrNonAbsolutePath\n\t}\n\tvar docs []*DirDoc\n\tsel := mango.Equal(\"path\", path.Clean(name))\n\treq := &couchdb.FindRequest{\n\t\tUseIndex: \"dir-by-path\",\n\t\tSelector: sel,\n\t\tLimit: 1,\n\t}\n\terr := couchdb.FindDocs(c.db, consts.Files, req, &docs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(docs) == 0 {\n\t\tif name == \"\/\" {\n\t\t\tpanic(\"Root directory is not in database\")\n\t\t}\n\t\treturn nil, os.ErrNotExist\n\t}\n\treturn docs[0], nil\n}\n\nfunc (c *couchdbIndexer) FileByID(fileID string) (*FileDoc, error) {\n\tdoc := &FileDoc{}\n\terr := couchdb.GetDoc(c.db, consts.Files, fileID, doc)\n\tif couchdb.IsNotFoundError(err) {\n\t\treturn nil, os.ErrNotExist\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif doc.Type != consts.FileType {\n\t\treturn nil, os.ErrNotExist\n\t}\n\treturn doc, nil\n}\n\nfunc (c *couchdbIndexer) FileByPath(name string) (*FileDoc, error) {\n\tif !path.IsAbs(name) {\n\t\treturn nil, ErrNonAbsolutePath\n\t}\n\tparent, err := c.DirByPath(path.Dir(name))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ consts.FilesByParentView keys are [parentID, type, name]\n\tvar res couchdb.ViewResponse\n\terr = couchdb.ExecView(c.db, consts.FilesByParentView, &couchdb.ViewRequest{\n\t\tKey: []string{parent.DocID, consts.FileType, path.Base(name)},\n\t\tIncludeDocs: true,\n\t}, &res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(res.Rows) == 0 {\n\t\treturn nil, os.ErrNotExist\n\t}\n\n\tvar fdoc FileDoc\n\terr = json.Unmarshal(*res.Rows[0].Doc, &fdoc)\n\treturn &fdoc, err\n}\n\nfunc (c *couchdbIndexer) FilePath(doc *FileDoc) (string, error) {\n\tvar parentPath string\n\tif doc.DirID == consts.RootDirID {\n\t\tparentPath = \"\/\"\n\t} else if doc.DirID == consts.TrashDirID {\n\t\tparentPath = TrashDirName\n\t} else {\n\t\tparent, err := c.DirByID(doc.DirID)\n\t\tif err != nil {\n\t\t\treturn \"\", ErrParentDoesNotExist\n\t\t}\n\t\tparentPath = parent.Fullpath\n\t}\n\treturn path.Join(parentPath, doc.DocName), nil\n}\n\nfunc (c *couchdbIndexer) DirOrFileByID(fileID string) (*DirDoc, *FileDoc, error) {\n\tdirOrFile := &DirOrFileDoc{}\n\terr := couchdb.GetDoc(c.db, consts.Files, fileID, dirOrFile)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdirDoc, fileDoc := dirOrFile.Refine()\n\treturn dirDoc, fileDoc, nil\n}\n\nfunc (c *couchdbIndexer) DirOrFileByPath(name string) (*DirDoc, *FileDoc, error) {\n\tdirDoc, err := c.DirByPath(name)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn nil, nil, err\n\t}\n\tif err == nil {\n\t\treturn dirDoc, nil, nil\n\t}\n\tfileDoc, err := c.FileByPath(name)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn nil, nil, err\n\t}\n\tif err == nil {\n\t\treturn nil, fileDoc, nil\n\t}\n\treturn nil, nil, err\n}\n\nfunc (c *couchdbIndexer) DirIterator(doc *DirDoc, opts *IteratorOptions) DirIterator {\n\treturn NewIterator(c.db, doc, opts)\n}\n\nfunc (c *couchdbIndexer) DirBatch(doc *DirDoc, cursor couchdb.Cursor) ([]DirOrFileDoc, error) {\n\t\/\/ consts.FilesByParentView keys are [parentID, type, name]\n\treq := couchdb.ViewRequest{\n\t\tStartKey: []string{doc.DocID, \"\"},\n\t\tEndKey: []string{doc.DocID, couchdb.MaxString},\n\t\tIncludeDocs: true,\n\t}\n\tvar res couchdb.ViewResponse\n\tcursor.ApplyTo(&req)\n\terr := couchdb.ExecView(c.db, consts.FilesByParentView, &req, &res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcursor.UpdateFrom(&res)\n\n\tdocs := make([]DirOrFileDoc, len(res.Rows))\n\tfor i, row := range res.Rows {\n\t\tvar doc DirOrFileDoc\n\t\terr := json.Unmarshal(*row.Doc, &doc)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdocs[i] = doc\n\t}\n\n\treturn docs, nil\n}\n\nfunc (c *couchdbIndexer) DirLength(doc *DirDoc) (int, error) {\n\treq := couchdb.ViewRequest{\n\t\tStartKey: []string{doc.DocID, \"\"},\n\t\tEndKey: []string{doc.DocID, couchdb.MaxString},\n\t\tReduce: true,\n\t\tGroupLevel: 1,\n\t}\n\tvar res couchdb.ViewResponse\n\terr := couchdb.ExecView(c.db, consts.FilesByParentView, &req, &res)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif len(res.Rows) == 0 {\n\t\treturn 0, nil\n\t}\n\n\t\/\/ Reduce of _count should give us a number value\n\tf64, ok := res.Rows[0].Value.(float64)\n\tif !ok {\n\t\treturn 0, ErrWrongCouchdbState\n\t}\n\treturn int(f64), nil\n}\n\nfunc (c *couchdbIndexer) DirChildExists(dirID, name string) (bool, error) {\n\tvar res couchdb.ViewResponse\n\n\t\/\/ consts.FilesByParentView keys are [parentID, type, name]\n\terr := couchdb.ExecView(c.db, consts.FilesByParentView, &couchdb.ViewRequest{\n\t\tKey: []string{dirID, consts.FileType, name},\n\t\tIncludeDocs: false,\n\t}, &res)\n\n\treturn len(res.Rows) > 0, err\n}\n<commit_msg>Check files and directories as child<commit_after>package vfs\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\n\t\"github.com\/cozy\/cozy-stack\/pkg\/consts\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/couchdb\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/couchdb\/mango\"\n)\n\ntype couchdbIndexer struct {\n\tdb couchdb.Database\n}\n\n\/\/ NewCouchdbIndexer creates an Indexer instance based on couchdb to store\n\/\/ files and directories metadata and index them.\nfunc NewCouchdbIndexer(db couchdb.Database) Indexer {\n\treturn &couchdbIndexer{\n\t\tdb: db,\n\t}\n}\n\nfunc (c *couchdbIndexer) InitIndex() error {\n\terr := couchdb.CreateNamedDocWithDB(c.db, &DirDoc{\n\t\tDocName: \"\",\n\t\tType: consts.DirType,\n\t\tDocID: consts.RootDirID,\n\t\tFullpath: \"\/\",\n\t\tDirID: \"\",\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = couchdb.CreateNamedDocWithDB(c.db, &DirDoc{\n\t\tDocName: path.Base(TrashDirName),\n\t\tType: consts.DirType,\n\t\tDocID: consts.TrashDirID,\n\t\tFullpath: TrashDirName,\n\t\tDirID: consts.RootDirID,\n\t})\n\tif err != nil && !couchdb.IsConflictError(err) {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *couchdbIndexer) DiskUsage() (int64, error) {\n\tvar doc couchdb.ViewResponse\n\terr := couchdb.ExecView(c.db, consts.DiskUsageView, &couchdb.ViewRequest{\n\t\tReduce: true,\n\t}, &doc)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif len(doc.Rows) == 0 {\n\t\treturn 0, nil\n\t}\n\t\/\/ Reduce of _count should give us a number value\n\tf64, ok := doc.Rows[0].Value.(float64)\n\tif !ok {\n\t\treturn 0, ErrWrongCouchdbState\n\t}\n\treturn int64(f64), nil\n}\n\nfunc (c *couchdbIndexer) CreateFileDoc(doc *FileDoc) error {\n\treturn couchdb.CreateDoc(c.db, doc)\n}\n\nfunc (c *couchdbIndexer) CreateNamedFileDoc(doc *FileDoc) error {\n\treturn couchdb.CreateNamedDoc(c.db, doc)\n}\n\nfunc (c *couchdbIndexer) UpdateFileDoc(olddoc, newdoc *FileDoc) error {\n\tnewdoc.SetID(olddoc.ID())\n\tnewdoc.SetRev(olddoc.Rev())\n\treturn couchdb.UpdateDoc(c.db, newdoc)\n}\n\nfunc (c *couchdbIndexer) UpdateFileDocs(docs []*FileDoc) error {\n\tif len(docs) == 0 {\n\t\treturn nil\n\t}\n\tcouchdocs := make([]couchdb.Doc, len(docs))\n\tfor i, doc := range docs {\n\t\tcouchdocs[i] = doc\n\t}\n\treturn couchdb.BulkUpdateDoc(c.db, couchdocs)\n}\n\nfunc (c *couchdbIndexer) DeleteFileDoc(doc *FileDoc) error {\n\treturn couchdb.DeleteDoc(c.db, doc)\n}\n\nfunc (c *couchdbIndexer) CreateDirDoc(doc *DirDoc) error {\n\treturn couchdb.CreateDoc(c.db, doc)\n}\n\nfunc (c *couchdbIndexer) CreateNamedDirDoc(doc *DirDoc) error {\n\treturn couchdb.CreateNamedDoc(c.db, doc)\n}\n\nfunc (c *couchdbIndexer) UpdateDirDoc(olddoc, newdoc *DirDoc) error {\n\tnewdoc.SetID(olddoc.ID())\n\tnewdoc.SetRev(olddoc.Rev())\n\tif newdoc.Fullpath != olddoc.Fullpath {\n\t\tif err := c.moveDir(olddoc.Fullpath, newdoc.Fullpath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn couchdb.UpdateDoc(c.db, newdoc)\n}\n\nfunc (c *couchdbIndexer) DeleteDirDoc(doc *DirDoc) error {\n\treturn couchdb.DeleteDoc(c.db, doc)\n}\n\n\/\/ @TODO use couchdb bulk updates instead\nfunc (c *couchdbIndexer) moveDir(oldpath, newpath string) error {\n\tvar children []*DirDoc\n\tsel := mango.StartWith(\"path\", oldpath+\"\/\")\n\treq := &couchdb.FindRequest{\n\t\tUseIndex: \"dir-by-path\",\n\t\tSelector: sel,\n\t}\n\terr := couchdb.FindDocs(c.db, consts.Files, req, &children)\n\tif err != nil || len(children) == 0 {\n\t\treturn err\n\t}\n\n\terrc := make(chan error)\n\n\tfor _, child := range children {\n\t\tgo func(child *DirDoc) {\n\t\t\tif !strings.HasPrefix(child.Fullpath, oldpath+\"\/\") {\n\t\t\t\terrc <- fmt.Errorf(\"Child has wrong base directory\")\n\t\t\t} else {\n\t\t\t\tchild.Fullpath = path.Join(newpath, child.Fullpath[len(oldpath)+1:])\n\t\t\t\terrc <- couchdb.UpdateDoc(c.db, child)\n\t\t\t}\n\t\t}(child)\n\t}\n\n\tfor range children {\n\t\tif e := <-errc; e != nil {\n\t\t\terr = e\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc (c *couchdbIndexer) DirByID(fileID string) (*DirDoc, error) {\n\tdoc := &DirDoc{}\n\terr := couchdb.GetDoc(c.db, consts.Files, fileID, doc)\n\tif couchdb.IsNotFoundError(err) {\n\t\terr = os.ErrNotExist\n\t}\n\tif err != nil {\n\t\tif fileID == consts.RootDirID {\n\t\t\tpanic(\"Root directory is not in database\")\n\t\t}\n\t\tif fileID == consts.TrashDirID {\n\t\t\tpanic(\"Trash directory is not in database\")\n\t\t}\n\t\treturn nil, err\n\t}\n\tif doc.Type != consts.DirType {\n\t\treturn nil, os.ErrNotExist\n\t}\n\treturn doc, err\n}\n\nfunc (c *couchdbIndexer) DirByPath(name string) (*DirDoc, error) {\n\tif !path.IsAbs(name) {\n\t\treturn nil, ErrNonAbsolutePath\n\t}\n\tvar docs []*DirDoc\n\tsel := mango.Equal(\"path\", path.Clean(name))\n\treq := &couchdb.FindRequest{\n\t\tUseIndex: \"dir-by-path\",\n\t\tSelector: sel,\n\t\tLimit: 1,\n\t}\n\terr := couchdb.FindDocs(c.db, consts.Files, req, &docs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(docs) == 0 {\n\t\tif name == \"\/\" {\n\t\t\tpanic(\"Root directory is not in database\")\n\t\t}\n\t\treturn nil, os.ErrNotExist\n\t}\n\treturn docs[0], nil\n}\n\nfunc (c *couchdbIndexer) FileByID(fileID string) (*FileDoc, error) {\n\tdoc := &FileDoc{}\n\terr := couchdb.GetDoc(c.db, consts.Files, fileID, doc)\n\tif couchdb.IsNotFoundError(err) {\n\t\treturn nil, os.ErrNotExist\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif doc.Type != consts.FileType {\n\t\treturn nil, os.ErrNotExist\n\t}\n\treturn doc, nil\n}\n\nfunc (c *couchdbIndexer) FileByPath(name string) (*FileDoc, error) {\n\tif !path.IsAbs(name) {\n\t\treturn nil, ErrNonAbsolutePath\n\t}\n\tparent, err := c.DirByPath(path.Dir(name))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ consts.FilesByParentView keys are [parentID, type, name]\n\tvar res couchdb.ViewResponse\n\terr = couchdb.ExecView(c.db, consts.FilesByParentView, &couchdb.ViewRequest{\n\t\tKey: []string{parent.DocID, consts.FileType, path.Base(name)},\n\t\tIncludeDocs: true,\n\t}, &res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(res.Rows) == 0 {\n\t\treturn nil, os.ErrNotExist\n\t}\n\n\tvar fdoc FileDoc\n\terr = json.Unmarshal(*res.Rows[0].Doc, &fdoc)\n\treturn &fdoc, err\n}\n\nfunc (c *couchdbIndexer) FilePath(doc *FileDoc) (string, error) {\n\tvar parentPath string\n\tif doc.DirID == consts.RootDirID {\n\t\tparentPath = \"\/\"\n\t} else if doc.DirID == consts.TrashDirID {\n\t\tparentPath = TrashDirName\n\t} else {\n\t\tparent, err := c.DirByID(doc.DirID)\n\t\tif err != nil {\n\t\t\treturn \"\", ErrParentDoesNotExist\n\t\t}\n\t\tparentPath = parent.Fullpath\n\t}\n\treturn path.Join(parentPath, doc.DocName), nil\n}\n\nfunc (c *couchdbIndexer) DirOrFileByID(fileID string) (*DirDoc, *FileDoc, error) {\n\tdirOrFile := &DirOrFileDoc{}\n\terr := couchdb.GetDoc(c.db, consts.Files, fileID, dirOrFile)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdirDoc, fileDoc := dirOrFile.Refine()\n\treturn dirDoc, fileDoc, nil\n}\n\nfunc (c *couchdbIndexer) DirOrFileByPath(name string) (*DirDoc, *FileDoc, error) {\n\tdirDoc, err := c.DirByPath(name)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn nil, nil, err\n\t}\n\tif err == nil {\n\t\treturn dirDoc, nil, nil\n\t}\n\tfileDoc, err := c.FileByPath(name)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn nil, nil, err\n\t}\n\tif err == nil {\n\t\treturn nil, fileDoc, nil\n\t}\n\treturn nil, nil, err\n}\n\nfunc (c *couchdbIndexer) DirIterator(doc *DirDoc, opts *IteratorOptions) DirIterator {\n\treturn NewIterator(c.db, doc, opts)\n}\n\nfunc (c *couchdbIndexer) DirBatch(doc *DirDoc, cursor couchdb.Cursor) ([]DirOrFileDoc, error) {\n\t\/\/ consts.FilesByParentView keys are [parentID, type, name]\n\treq := couchdb.ViewRequest{\n\t\tStartKey: []string{doc.DocID, \"\"},\n\t\tEndKey: []string{doc.DocID, couchdb.MaxString},\n\t\tIncludeDocs: true,\n\t}\n\tvar res couchdb.ViewResponse\n\tcursor.ApplyTo(&req)\n\terr := couchdb.ExecView(c.db, consts.FilesByParentView, &req, &res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcursor.UpdateFrom(&res)\n\n\tdocs := make([]DirOrFileDoc, len(res.Rows))\n\tfor i, row := range res.Rows {\n\t\tvar doc DirOrFileDoc\n\t\terr := json.Unmarshal(*row.Doc, &doc)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdocs[i] = doc\n\t}\n\n\treturn docs, nil\n}\n\nfunc (c *couchdbIndexer) DirLength(doc *DirDoc) (int, error) {\n\treq := couchdb.ViewRequest{\n\t\tStartKey: []string{doc.DocID, \"\"},\n\t\tEndKey: []string{doc.DocID, couchdb.MaxString},\n\t\tReduce: true,\n\t\tGroupLevel: 1,\n\t}\n\tvar res couchdb.ViewResponse\n\terr := couchdb.ExecView(c.db, consts.FilesByParentView, &req, &res)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif len(res.Rows) == 0 {\n\t\treturn 0, nil\n\t}\n\n\t\/\/ Reduce of _count should give us a number value\n\tf64, ok := res.Rows[0].Value.(float64)\n\tif !ok {\n\t\treturn 0, ErrWrongCouchdbState\n\t}\n\treturn int(f64), nil\n}\n\nfunc (c *couchdbIndexer) DirChildExists(dirID, name string) (bool, error) {\n\tvar res couchdb.ViewResponse\n\n\t\/\/ consts.FilesByParentView keys are [parentID, type, name]\n\terr := couchdb.ExecView(c.db, consts.FilesByParentView, &couchdb.ViewRequest{\n\t\tKeys: []interface{}{\n\t\t\t[]string{dirID, consts.FileType, name},\n\t\t\t[]string{dirID, consts.DirType, name},\n\t\t},\n\t\tIncludeDocs: false,\n\t}, &res)\n\n\treturn len(res.Rows) > 0, err\n}\n<|endoftext|>"} {"text":"<commit_before>package websocket\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\nconst (\n\t\/\/ Time allowed to write a message to the peer.\n\twriteWait = 10 * time.Second\n\n\t\/\/ Time allowed to read the next pong message from the peer.\n\tpongWait = 10 * time.Second\n\n\t\/\/ Send pings to peer with this period. Must be less than pongWait.\n\tpingPeriod = (pongWait * 9) \/ 10\n\n\treconnectWait = 2 * time.Second\n)\n\n\/\/ Websocket implements a websocket client\ntype Websocket interface {\n\tOnConnect(cb func())\n\tConnectContext(ctx context.Context, addr string, headers http.Header) error\n\tConnectWithRetry(parentCtx context.Context, addr string, headers http.Header)\n\tWait()\n\tRead() <-chan []byte\n\t\/\/ WriteJSON writes interface{} encoded as JSON to our connection\n\tWriteJSON(v interface{}) error\n\tSetTLSConfig(c *tls.Config)\n}\n\ntype websocketClient struct {\n\tconn *websocket.Conn\n\ttlsClientConfig *tls.Config\n\twrite chan func()\n\tread chan []byte\n\twg *sync.WaitGroup\n\tdisconnected chan error\n\tconnected chan struct{}\n\tonConnect func()\n}\n\n\/\/ New creates a new Websocket\nfunc New() Websocket {\n\treturn &websocketClient{\n\t\twrite: make(chan func()),\n\t\tread: make(chan []byte, 100),\n\t\twg: &sync.WaitGroup{},\n\t\tdisconnected: make(chan error),\n\t\tconnected: make(chan struct{}),\n\t}\n}\n\nfunc (ws *websocketClient) SetTLSConfig(c *tls.Config) {\n\tws.tlsClientConfig = c\n}\n\nfunc (ws *websocketClient) OnConnect(cb func()) {\n\tws.onConnect = cb\n}\n\nfunc (ws *websocketClient) ConnectContext(ctx context.Context, addr string, headers http.Header) error {\n\tvar err error\n\tvar c *websocket.Conn\n\tlogrus.Info(\"websocket: connecting to \", addr)\n\tif ws.tlsClientConfig != nil {\n\t\tdialer := &websocket.Dialer{\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\tHandshakeTimeout: 45 * time.Second,\n\t\t\tTLSClientConfig: ws.tlsClientConfig,\n\t\t}\n\t\tc, _, err = dialer.DialContext(ctx, addr, headers)\n\t} else {\n\t\tc, _, err = websocket.DefaultDialer.DialContext(ctx, addr, headers)\n\t}\n\tif err != nil {\n\t\tws.wasDisconnected(err)\n\t\treturn err\n\t}\n\tlogrus.Infof(\"websocket: connected to %s\", addr)\n\tws.wasConnected()\n\tws.conn = c\n\tws.readPump()\n\tws.writePump(ctx) <- struct{}{}\n\n\tif ws.onConnect != nil {\n\t\tws.onConnect()\n\t}\n\treturn nil\n}\n\n\/\/ ConnectWithRetry tries to connect and blocks until connected.\n\/\/ if disconnected because an error tries to reconnect again every 5th second\nfunc (ws *websocketClient) ConnectWithRetry(parentCtx context.Context, addr string, headers http.Header) {\n\n\tctx, cancel := context.WithCancel(parentCtx)\n\tws.wg.Add(1)\n\tgo func() {\n\t\tdefer ws.wg.Done()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-parentCtx.Done():\n\t\t\t\tlogrus.Info(\"websocket: stopping reconnect because err: \", parentCtx.Err())\n\t\t\t\treturn\n\t\t\tcase err := <-ws.disconnected:\n\t\t\t\tcancel() \/\/ Stop any write\/read pumps so we dont get duplicate write panic\n\t\t\t\tlogrus.Error(\"websocket: disconnected\")\n\t\t\t\tif websocket.IsCloseError(err, websocket.CloseNormalClosure) {\n\t\t\t\t\tlogrus.Info(\"websocket: Skipping reconnect due to CloseNormalClosure\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlogrus.Info(\"websocket: Reconnect because error: \", err)\n\t\t\t\tgo func() {\n\t\t\t\t\ttime.Sleep(5 * time.Second)\n\t\t\t\t\tctx, cancel = context.WithCancel(parentCtx)\n\t\t\t\t\terr := ws.ConnectContext(ctx, addr, headers)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogrus.Error(\"websocket: Reconnect failed with error: \", err)\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t}\n\t\t}\n\t}()\n\tgo ws.ConnectContext(ctx, addr, headers)\n\t<-ws.connected\n\treturn\n}\n\nfunc (ws *websocketClient) Wait() {\n\tws.wg.Wait()\n}\n\nfunc (ws *websocketClient) Read() <-chan []byte {\n\treturn ws.read\n}\n\n\/\/ WriteJSON writes interface{} encoded as JSON to our connection\nfunc (ws *websocketClient) WriteJSON(v interface{}) error {\n\terrCh := make(chan error, 1)\n\tselect {\n\tcase ws.write <- func() {\n\t\terr := ws.conn.WriteJSON(v)\n\t\terrCh <- err\n\t}:\n\tdefault:\n\t\terrCh <- fmt.Errorf(\"websocket: no one listening on write channel\")\n\t}\n\treturn <-errCh\n}\n\nfunc (ws *websocketClient) readPump() {\n\tgo func() {\n\t\tws.wg.Add(1)\n\t\tdefer ws.wg.Done()\n\t\tws.conn.SetReadDeadline(time.Now().Add(pongWait))\n\t\tws.conn.SetPongHandler(func(string) error { ws.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil })\n\t\tfor {\n\t\t\t_, message, err := ws.conn.ReadMessage()\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Error(\"websocket: readPump error:\", err)\n\t\t\t\tws.wasDisconnected(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlogrus.Debugf(\"websocket: readPump got msg: %s\", message)\n\t\t\tselect {\n\t\t\tcase ws.read <- message:\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (ws *websocketClient) writePump(ctx context.Context) chan struct{} {\n\tready := make(chan struct{})\n\tgo func() {\n\t\tws.wg.Add(1)\n\t\tdefer ws.wg.Done()\n\t\tticker := time.NewTicker(pingPeriod)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase t := <-ws.write:\n\t\t\t\tt()\n\t\t\tcase <-ctx.Done():\n\t\t\t\tlogrus.Error(\"websocket: Stopping writePump because err: \", ctx.Err())\n\t\t\t\terr := ws.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, \"\"))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Error(\"websocket: write close:\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tif err := ws.conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(writeWait)); err != nil {\n\t\t\t\t\tlogrus.Error(\"websocket: ping:\", err)\n\t\t\t\t}\n\t\t\tcase <-ready:\n\t\t\t}\n\t\t}\n\t}()\n\treturn ready\n}\n\nfunc (ws *websocketClient) wasDisconnected(err error) {\n\tselect {\n\tcase ws.disconnected <- err:\n\tdefault:\n\t}\n}\n\nfunc (ws *websocketClient) wasConnected() {\n\tselect {\n\tcase ws.connected <- struct{}{}:\n\tdefault:\n\t}\n}\n<commit_msg>Fixed careless mistakes<commit_after>package websocket\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\nconst (\n\t\/\/ Time allowed to write a message to the peer.\n\twriteWait = 10 * time.Second\n\n\t\/\/ Time allowed to read the next pong message from the peer.\n\tpongWait = 10 * time.Second\n\n\t\/\/ Send pings to peer with this period. Must be less than pongWait.\n\tpingPeriod = (pongWait * 9) \/ 10\n\n\treconnectWait = 2 * time.Second\n)\n\n\/\/ Websocket implements a websocket client\ntype Websocket interface {\n\tOnConnect(cb func())\n\tConnectContext(ctx context.Context, addr string, headers http.Header) error\n\tConnectWithRetry(parentCtx context.Context, addr string, headers http.Header)\n\tWait()\n\tRead() <-chan []byte\n\t\/\/ WriteJSON writes interface{} encoded as JSON to our connection\n\tWriteJSON(v interface{}) error\n\tSetTLSConfig(c *tls.Config)\n}\n\ntype websocketClient struct {\n\tconn *websocket.Conn\n\ttlsClientConfig *tls.Config\n\twrite chan func()\n\tread chan []byte\n\twg *sync.WaitGroup\n\tdisconnected chan error\n\tconnected chan struct{}\n\tonConnect func()\n}\n\n\/\/ New creates a new Websocket\nfunc New() Websocket {\n\treturn &websocketClient{\n\t\twrite: make(chan func()),\n\t\tread: make(chan []byte, 100),\n\t\twg: &sync.WaitGroup{},\n\t\tdisconnected: make(chan error),\n\t\tconnected: make(chan struct{}),\n\t}\n}\n\nfunc (ws *websocketClient) SetTLSConfig(c *tls.Config) {\n\tws.tlsClientConfig = c\n}\n\nfunc (ws *websocketClient) OnConnect(cb func()) {\n\tws.onConnect = cb\n}\n\nfunc (ws *websocketClient) ConnectContext(ctx context.Context, addr string, headers http.Header) error {\n\tvar err error\n\tvar c *websocket.Conn\n\tlogrus.Info(\"websocket: connecting to \", addr)\n\tif ws.tlsClientConfig != nil {\n\t\tdialer := &websocket.Dialer{\n\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\tHandshakeTimeout: 45 * time.Second,\n\t\t\tTLSClientConfig: ws.tlsClientConfig,\n\t\t}\n\t\tc, _, err = dialer.DialContext(ctx, addr, headers)\n\t} else {\n\t\tc, _, err = websocket.DefaultDialer.DialContext(ctx, addr, headers)\n\t}\n\tif err != nil {\n\t\tws.wasDisconnected(err)\n\t\treturn err\n\t}\n\tlogrus.Infof(\"websocket: connected to %s\", addr)\n\tws.wasConnected()\n\tws.conn = c\n\tws.readPump()\n\tws.writePump(ctx) <- struct{}{}\n\n\tif ws.onConnect != nil {\n\t\tws.onConnect()\n\t}\n\treturn nil\n}\n\n\/\/ ConnectWithRetry tries to connect and blocks until connected.\n\/\/ if disconnected because an error tries to reconnect again every 5th second\nfunc (ws *websocketClient) ConnectWithRetry(parentCtx context.Context, addr string, headers http.Header) {\n\n\tctx, cancel := context.WithCancel(parentCtx)\n\tws.wg.Add(1)\n\tgo func() {\n\t\tdefer ws.wg.Done()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-parentCtx.Done():\n\t\t\t\tlogrus.Info(\"websocket: stopping reconnect because err: \", parentCtx.Err())\n\t\t\t\treturn\n\t\t\tcase err := <-ws.disconnected:\n\t\t\t\tcancel() \/\/ Stop any write\/read pumps so we dont get duplicate write panic\n\t\t\t\tlogrus.Error(\"websocket: disconnected\")\n\t\t\t\tif websocket.IsCloseError(err, websocket.CloseNormalClosure) {\n\t\t\t\t\tlogrus.Info(\"websocket: Skipping reconnect due to CloseNormalClosure\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tlogrus.Info(\"websocket: Reconnect because error: \", err)\n\t\t\t\tgo func() {\n\t\t\t\t\ttime.Sleep(5 * time.Second)\n\t\t\t\t\tctx, cancel = context.WithCancel(parentCtx)\n\t\t\t\t\terr := ws.ConnectContext(ctx, addr, headers)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlogrus.Error(\"websocket: Reconnect failed with error: \", err)\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t}\n\t\t}\n\t}()\n\tgo ws.ConnectContext(ctx, addr, headers)\n\t<-ws.connected\n\treturn\n}\n\nfunc (ws *websocketClient) Wait() {\n\tws.wg.Wait()\n}\n\nfunc (ws *websocketClient) Read() <-chan []byte {\n\treturn ws.read\n}\n\n\/\/ WriteJSON writes interface{} encoded as JSON to our connection\nfunc (ws *websocketClient) WriteJSON(v interface{}) error {\n\terrCh := make(chan error, 1)\n\tselect {\n\tcase ws.write <- func() {\n\t\terr := ws.conn.WriteJSON(v)\n\t\terrCh <- err\n\t}:\n\tdefault:\n\t\terrCh <- fmt.Errorf(\"websocket: no one listening on write channel\")\n\t}\n\treturn <-errCh\n}\n\nfunc (ws *websocketClient) readPump() {\n\tws.wg.Add(1)\n\tgo func() {\n\t\tdefer ws.wg.Done()\n\t\tws.conn.SetReadDeadline(time.Now().Add(pongWait))\n\t\tws.conn.SetPongHandler(func(string) error { ws.conn.SetReadDeadline(time.Now().Add(pongWait)); return nil })\n\t\tfor {\n\t\t\t_, message, err := ws.conn.ReadMessage()\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Error(\"websocket: readPump error:\", err)\n\t\t\t\tws.wasDisconnected(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlogrus.Debugf(\"websocket: readPump got msg: %s\", message)\n\t\t\tselect {\n\t\t\tcase ws.read <- message:\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (ws *websocketClient) writePump(ctx context.Context) chan struct{} {\n\tready := make(chan struct{})\n\tws.wg.Add(1)\n\tgo func() {\n\t\tdefer ws.wg.Done()\n\t\tticker := time.NewTicker(pingPeriod)\n\t\tdefer ticker.Stop()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase t := <-ws.write:\n\t\t\t\tt()\n\t\t\tcase <-ctx.Done():\n\t\t\t\tlogrus.Error(\"websocket: Stopping writePump because err: \", ctx.Err())\n\t\t\t\terr := ws.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, \"\"))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Error(\"websocket: write close:\", err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t\tif err := ws.conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(writeWait)); err != nil {\n\t\t\t\t\tlogrus.Error(\"websocket: ping:\", err)\n\t\t\t\t}\n\t\t\tcase <-ready:\n\t\t\t}\n\t\t}\n\t}()\n\treturn ready\n}\n\nfunc (ws *websocketClient) wasDisconnected(err error) {\n\tselect {\n\tcase ws.disconnected <- err:\n\tdefault:\n\t}\n}\n\nfunc (ws *websocketClient) wasConnected() {\n\tselect {\n\tcase ws.connected <- struct{}{}:\n\tdefault:\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Added sorting functions, by name or by level+name.<commit_after><|endoftext|>"} {"text":"<commit_before>package textmate\n\nimport (\n\t\"code.google.com\/p\/log4go\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/quarnster\/parser\"\n\t\"io\/ioutil\"\n\t\"lime\/3rdparty\/libs\/rubex\"\n\t\"lime\/backend\/loaders\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst maxiter = 10000\n\ntype (\n\tRegex struct {\n\t\tre *rubex.Regexp\n\t\tlastIndex int\n\t\tlastFound int\n\t\tlastData string\n\t}\n\n\tLanguage struct {\n\t\tUnpatchedLanguage\n\t}\n\n\tLanguageProvider map[string]*Language\n\n\tUnpatchedLanguage struct {\n\t\tFileTypes []string\n\t\tFirstLineMatch string\n\t\tRootPattern RootPattern `json:\"patterns\"`\n\t\tRepository map[string]*Pattern\n\t\tScopeName string\n\t}\n\n\tNamed struct {\n\t\tName string\n\t}\n\n\tCaptures map[string]Named\n\n\tMatchObject []int\n\n\tPattern struct {\n\t\tNamed\n\t\tInclude string\n\t\tMatch Regex\n\t\tCaptures Captures\n\t\tBegin Regex\n\t\tBeginCaptures Captures\n\t\tEnd Regex\n\t\tEndCaptures Captures\n\t\tPatterns []*Pattern\n\t\towner *Language \/\/ needed for include directives\n\t\tcachedData string\n\t\tcachedPat *Pattern\n\t\tcachedPatterns []*Pattern\n\t\tcachedMatch MatchObject\n\t\thits int\n\t\tmisses int\n\t}\n\tRootPattern struct {\n\t\tPattern\n\t}\n\n\tLanguageParser struct {\n\t\tLanguage *Language\n\t\troot parser.Node\n\t}\n)\n\nvar (\n\tProvider = make(LanguageProvider)\n\tfailed = make(map[string]bool)\n)\n\nfunc (t LanguageProvider) GetLanguage(id string) (*Language, error) {\n\tif v, ok := t[id]; !ok {\n\t\treturn nil, errors.New(\"Can't handle id \" + id)\n\t} else {\n\t\treturn v, nil\n\t}\n}\n\nfunc (t LanguageProvider) Load(fn string) error {\n\tif d, err := ioutil.ReadFile(fn); err != nil {\n\t\treturn fmt.Errorf(\"Couldn't load file %s: %s\", fn, err)\n\t} else {\n\t\tvar l Language\n\t\tif err := loaders.LoadPlist(d, &l); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\tt[l.ScopeName] = &l\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (p Pattern) String() (ret string) {\n\tret = fmt.Sprintf(`---------------------------------------\nName: %s\nMatch: %s\nBegin: %s\nEnd: %s\nInclude: %s\n`, p.Name, p.Match, p.Begin, p.End, p.Include)\n\tret += fmt.Sprintf(\"<Sub-Patterns>\\n\")\n\tfor i := range p.Patterns {\n\t\tinner := fmt.Sprintf(\"%s\", p.Patterns[i])\n\t\tret += fmt.Sprintf(\"\\t%s\\n\", strings.Replace(strings.Replace(inner, \"\\t\", \"\\t\\t\", -1), \"\\n\", \"\\n\\t\", -1))\n\t}\n\tret += fmt.Sprintf(\"<\/Sub-Patterns>\\n---------------------------------------\")\n\treturn\n}\n\nfunc (r Regex) String() string {\n\tif r.re == nil {\n\t\treturn \"nil\"\n\t}\n\treturn fmt.Sprintf(\"%s \/\/ %d, %d\", r.re.String(), r.lastIndex, r.lastFound)\n}\n\nfunc (r *RootPattern) String() (ret string) {\n\tfor i := range r.Patterns {\n\t\tret += fmt.Sprintf(\"\\t%s\\n\", r.Patterns[i])\n\t}\n\treturn\n}\n\nfunc (s *Language) String() string {\n\treturn fmt.Sprintf(\"%s\\n%s\", s.ScopeName, s.RootPattern)\n}\n\nfunc (p *Pattern) setOwner(l *Language) {\n\tp.owner = l\n\tfor i := range p.Patterns {\n\t\tp.Patterns[i].setOwner(l)\n\t}\n}\n\nfunc (l *Language) UnmarshalJSON(data []byte) error {\n\tif err := json.Unmarshal(data, &l.UnpatchedLanguage); err != nil {\n\t\treturn err\n\t}\n\tl.RootPattern.setOwner(l)\n\tfor k := range l.Repository {\n\t\tl.Repository[k].setOwner(l)\n\t}\n\treturn nil\n}\n\nfunc (r *RootPattern) UnmarshalJSON(data []byte) error {\n\treturn json.Unmarshal(data, &r.Patterns)\n}\n\nfunc (r *Regex) UnmarshalJSON(data []byte) error {\n\tstr := string(data[1 : len(data)-1])\n\tstr = strings.Replace(str, \"\\\\\\\\\", \"\\\\\", -1)\n\tstr = strings.Replace(str, \"\\\\n\", \"\\n\", -1)\n\tstr = strings.Replace(str, \"\\\\t\", \"\\t\", -1)\n\tif re, err := rubex.Compile(str); err != nil {\n\t\tlog4go.Warn(\"Couldn't compile language pattern %s: %s\", str, err)\n\t} else {\n\t\tr.re = re\n\t}\n\treturn nil\n}\n\nfunc (m MatchObject) fix(add int) {\n\tfor i := range m {\n\t\tif m[i] != -1 {\n\t\t\tm[i] += add\n\t\t}\n\t}\n}\n\nfunc (r *Regex) Find(data string, pos int) MatchObject {\n\tif r.lastData != data || r.lastIndex > pos {\n\t\tr.lastData = data\n\t\tr.lastFound = 0\n\t}\n\tr.lastIndex = pos\n\tfor r.lastFound < len(data) {\n\t\tret := r.re.FindStringSubmatchIndex(data[r.lastFound:])\n\t\tif ret == nil {\n\t\t\tbreak\n\t\t} else if (ret[0] + r.lastFound) < pos {\n\t\t\tif ret[0] == 0 {\n\t\t\t\tr.lastFound++\n\t\t\t} else {\n\t\t\t\tr.lastFound += ret[0]\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tmo := MatchObject(ret)\n\t\tmo.fix(r.lastFound)\n\t\treturn mo\n\t}\n\treturn nil\n}\n\nfunc (p *Pattern) FirstMatch(data string, pos int) (pat *Pattern, ret MatchObject) {\n\tstartIdx := -1\n\tfor i := 0; i < len(p.cachedPatterns); {\n\t\tip, im := p.cachedPatterns[i].Cache(data, pos)\n\t\tif im != nil && im[0] != im[1] {\n\t\t\tif startIdx < 0 || startIdx > im[0] {\n\t\t\t\tstartIdx, pat, ret = im[0], ip, im\n\t\t\t\t\/\/ This match is right at the start, we're not going to find a better pattern than this,\n\t\t\t\t\/\/ so stop the search\n\t\t\t\tif im[0] == pos {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\ti++\n\t\t} else {\n\t\t\t\/\/ If it wasn't found now, it'll never be found, so the pattern can be popped from the cache\n\t\t\tcopy(p.cachedPatterns[i:], p.cachedPatterns[i+1:])\n\t\t\tp.cachedPatterns = p.cachedPatterns[:len(p.cachedPatterns)-1]\n\t\t}\n\t}\n\treturn\n}\n\nfunc (p *Pattern) Cache(data string, pos int) (pat *Pattern, ret MatchObject) {\n\tif p.cachedData == data {\n\t\tif p.cachedMatch == nil {\n\t\t\treturn nil, nil\n\t\t}\n\t\tif p.cachedMatch[0] >= pos {\n\t\t\tp.hits++\n\t\t\treturn p.cachedPat, p.cachedMatch\n\t\t}\n\t} else {\n\t\tp.cachedPatterns = nil\n\t}\n\tif p.cachedPatterns == nil {\n\t\tp.cachedPatterns = make([]*Pattern, len(p.Patterns))\n\t\tcopy(p.cachedPatterns, p.Patterns)\n\t}\n\tp.misses++\n\n\tif p.Match.re != nil {\n\t\tpat, ret = p, p.Match.Find(data, pos)\n\t} else if p.Begin.re != nil {\n\t\tpat, ret = p, p.Begin.Find(data, pos)\n\t} else if p.Include != \"\" {\n\t\tif z := p.Include[0]; z == '#' {\n\t\t\tkey := p.Include[1:]\n\t\t\tif p2, ok := p.owner.Repository[key]; ok {\n\t\t\t\tpat, ret = p2.Cache(data, pos)\n\t\t\t} else {\n\t\t\t\tlog4go.Warn(\"Not found in repository: %s\", p.Include)\n\t\t\t}\n\t\t} else if z == '$' {\n\t\t\t\/\/ TODO(q): Implement tmLanguage $ include directives\n\t\t\tlog4go.Warn(\"Unhandled include directive: %s\", p.Include)\n\t\t} else if l, err := Provider.GetLanguage(p.Include); err != nil {\n\t\t\tif !failed[p.Include] {\n\t\t\t\tlog4go.Warn(\"Include directive %s failed: %s\", p.Include, err)\n\t\t\t}\n\t\t\tfailed[p.Include] = true\n\t\t} else {\n\t\t\treturn l.RootPattern.Cache(data, pos)\n\t\t}\n\t} else {\n\t\tpat, ret = p.FirstMatch(data, pos)\n\t}\n\tp.cachedData = data\n\tp.cachedMatch = ret\n\tp.cachedPat = pat\n\n\treturn\n}\n\nfunc (p *Pattern) CreateCaptureNodes(data string, pos int, d parser.DataSource, mo MatchObject, parent *parser.Node, cap Captures) {\n\tranges := make([]parser.Range, len(mo)\/2)\n\tparentIndex := make([]int, len(ranges))\n\tparents := make([]*parser.Node, len(parentIndex))\n\tfor i := range ranges {\n\t\tranges[i] = parser.Range{mo[i*2+0], mo[i*2+1]}\n\t\tif i < 2 {\n\t\t\tparents[i] = parent\n\t\t\tcontinue\n\t\t}\n\t\tr := ranges[i]\n\t\tfor j := i - 1; j >= 0; j-- {\n\t\t\tif ranges[j].Contains(r) {\n\t\t\t\tparentIndex[i] = j\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tfor k, v := range cap {\n\t\ti64, err := strconv.ParseInt(k, 10, 32)\n\t\tif i := int(i64); err == nil && i < len(parents) {\n\t\t\tif ranges[i].Start == -1 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tchild := &parser.Node{Name: v.Name, Range: ranges[i], P: d}\n\t\t\tparents[i] = child\n\t\t\tif i == 0 {\n\t\t\t\tparent.Append(child)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar p *parser.Node\n\t\t\tfor p == nil {\n\t\t\t\ti = parentIndex[i]\n\t\t\t\tp = parents[i]\n\t\t\t}\n\t\t\tp.Append(child)\n\t\t}\n\t}\n}\n\nfunc (p *Pattern) CreateNode(data string, pos int, d parser.DataSource, mo MatchObject) *parser.Node {\n\tret := parser.Node{Name: p.Name, Range: parser.Range{mo[0], mo[1]}, P: d}\n\tif p.Match.re != nil {\n\t\tp.CreateCaptureNodes(data, pos, d, mo, &ret, p.Captures)\n\t} else if p.Begin.re != nil {\n\t\tif len(p.BeginCaptures) > 0 {\n\t\t\tp.CreateCaptureNodes(data, pos, d, mo, &ret, p.BeginCaptures)\n\t\t} else {\n\t\t\tp.CreateCaptureNodes(data, pos, d, mo, &ret, p.Captures)\n\t\t}\n\n\t\tif p.End.re != nil {\n\t\t\tvar (\n\t\t\t\tfound = false\n\t\t\t\ti, end int\n\t\t\t)\n\t\t\tfor i, end = ret.Range.End, len(data); i < len(data); {\n\t\t\t\tendmatch := p.End.Find(data, i)\n\t\t\t\tif endmatch != nil {\n\t\t\t\t\tend = endmatch[1]\n\t\t\t\t} else {\n\t\t\t\t\tif !found {\n\t\t\t\t\t\t\/\/ oops.. no end found at all, set it to the next line\n\t\t\t\t\t\tif e2 := strings.IndexRune(data[i:], '\\n'); e2 != -1 {\n\t\t\t\t\t\t\tend = i + e2\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak\n\t\t\t\t\t} else {\n\t\t\t\t\t\tend = i\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (endmatch == nil || (endmatch != nil && endmatch[0] != i)) && len(p.cachedPatterns) > 0 {\n\t\t\t\t\t\/\/ Might be more recursive patterns to apply BEFORE the end is reached\n\t\t\t\t\tpattern2, match2 := p.FirstMatch(data, i)\n\t\t\t\t\tif match2 != nil &&\n\t\t\t\t\t\t((endmatch == nil && match2[0] < end) ||\n\t\t\t\t\t\t\t(endmatch != nil && match2[0] < endmatch[0])) {\n\n\t\t\t\t\t\tfound = true\n\t\t\t\t\t\tr := pattern2.CreateNode(data, i, d, match2)\n\t\t\t\t\t\tret.Append(r)\n\t\t\t\t\t\ti = r.Range.End\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif endmatch != nil {\n\t\t\t\t\tif len(p.EndCaptures) > 0 {\n\t\t\t\t\t\tp.CreateCaptureNodes(data, i, d, endmatch, &ret, p.EndCaptures)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tp.CreateCaptureNodes(data, i, d, endmatch, &ret, p.Captures)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tret.Range.End = end\n\t\t}\n\t}\n\tret.UpdateRange()\n\treturn &ret\n}\n\ntype dp struct {\n\tdata []rune\n}\n\nfunc (d *dp) Data(a, b int) string {\n\treturn string(d.data[a:b])\n}\n\nfunc (lp *LanguageParser) RootNode() *parser.Node {\n\treturn &lp.root\n}\n\nfunc (lp *LanguageParser) patch(lut []int, node *parser.Node) {\n\tnode.Range.Start = lut[node.Range.Start]\n\tnode.Range.End = lut[node.Range.End]\n\tfor _, child := range node.Children {\n\t\tlp.patch(lut, child)\n\t}\n}\n\nfunc (lp *LanguageParser) Parse(data string) bool {\n\td := &dp{[]rune(data)}\n\tlp.root = parser.Node{P: d, Name: lp.Language.ScopeName}\n\titer := maxiter\n\tfor i := 0; i < len(data) && iter > 0; iter-- {\n\t\tpat, ret := lp.Language.RootPattern.Cache(data, i)\n\t\tnl := strings.IndexAny(data[i:], \"\\n\\r\")\n\n\t\tif nl != -1 {\n\t\t\tnl += i\n\t\t}\n\t\tif ret == nil {\n\t\t\tbreak\n\t\t} else if nl > 0 && nl <= ret[0] {\n\t\t\ti = nl\n\t\t\tfor i < len(data) && (data[i] == '\\n' || data[i] == '\\r') {\n\t\t\t\ti++\n\t\t\t}\n\t\t} else {\n\t\t\tn := pat.CreateNode(data, i, d, ret)\n\t\t\tlp.root.Append(n)\n\n\t\t\ti = n.Range.End\n\t\t}\n\t}\n\tlp.root.UpdateRange()\n\tlut := make([]int, len(data))\n\tj := 0\n\tfor i := range data {\n\t\tlut[i] = j\n\t\tj++\n\t}\n\tlp.patch(lut, &lp.root)\n\treturn true\n}\n<commit_msg>Don't patch empty data.<commit_after>package textmate\n\nimport (\n\t\"code.google.com\/p\/log4go\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/quarnster\/parser\"\n\t\"io\/ioutil\"\n\t\"lime\/3rdparty\/libs\/rubex\"\n\t\"lime\/backend\/loaders\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst maxiter = 10000\n\ntype (\n\tRegex struct {\n\t\tre *rubex.Regexp\n\t\tlastIndex int\n\t\tlastFound int\n\t\tlastData string\n\t}\n\n\tLanguage struct {\n\t\tUnpatchedLanguage\n\t}\n\n\tLanguageProvider map[string]*Language\n\n\tUnpatchedLanguage struct {\n\t\tFileTypes []string\n\t\tFirstLineMatch string\n\t\tRootPattern RootPattern `json:\"patterns\"`\n\t\tRepository map[string]*Pattern\n\t\tScopeName string\n\t}\n\n\tNamed struct {\n\t\tName string\n\t}\n\n\tCaptures map[string]Named\n\n\tMatchObject []int\n\n\tPattern struct {\n\t\tNamed\n\t\tInclude string\n\t\tMatch Regex\n\t\tCaptures Captures\n\t\tBegin Regex\n\t\tBeginCaptures Captures\n\t\tEnd Regex\n\t\tEndCaptures Captures\n\t\tPatterns []*Pattern\n\t\towner *Language \/\/ needed for include directives\n\t\tcachedData string\n\t\tcachedPat *Pattern\n\t\tcachedPatterns []*Pattern\n\t\tcachedMatch MatchObject\n\t\thits int\n\t\tmisses int\n\t}\n\tRootPattern struct {\n\t\tPattern\n\t}\n\n\tLanguageParser struct {\n\t\tLanguage *Language\n\t\troot parser.Node\n\t}\n)\n\nvar (\n\tProvider = make(LanguageProvider)\n\tfailed = make(map[string]bool)\n)\n\nfunc (t LanguageProvider) GetLanguage(id string) (*Language, error) {\n\tif v, ok := t[id]; !ok {\n\t\treturn nil, errors.New(\"Can't handle id \" + id)\n\t} else {\n\t\treturn v, nil\n\t}\n}\n\nfunc (t LanguageProvider) Load(fn string) error {\n\tif d, err := ioutil.ReadFile(fn); err != nil {\n\t\treturn fmt.Errorf(\"Couldn't load file %s: %s\", fn, err)\n\t} else {\n\t\tvar l Language\n\t\tif err := loaders.LoadPlist(d, &l); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\tt[l.ScopeName] = &l\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (p Pattern) String() (ret string) {\n\tret = fmt.Sprintf(`---------------------------------------\nName: %s\nMatch: %s\nBegin: %s\nEnd: %s\nInclude: %s\n`, p.Name, p.Match, p.Begin, p.End, p.Include)\n\tret += fmt.Sprintf(\"<Sub-Patterns>\\n\")\n\tfor i := range p.Patterns {\n\t\tinner := fmt.Sprintf(\"%s\", p.Patterns[i])\n\t\tret += fmt.Sprintf(\"\\t%s\\n\", strings.Replace(strings.Replace(inner, \"\\t\", \"\\t\\t\", -1), \"\\n\", \"\\n\\t\", -1))\n\t}\n\tret += fmt.Sprintf(\"<\/Sub-Patterns>\\n---------------------------------------\")\n\treturn\n}\n\nfunc (r Regex) String() string {\n\tif r.re == nil {\n\t\treturn \"nil\"\n\t}\n\treturn fmt.Sprintf(\"%s \/\/ %d, %d\", r.re.String(), r.lastIndex, r.lastFound)\n}\n\nfunc (r *RootPattern) String() (ret string) {\n\tfor i := range r.Patterns {\n\t\tret += fmt.Sprintf(\"\\t%s\\n\", r.Patterns[i])\n\t}\n\treturn\n}\n\nfunc (s *Language) String() string {\n\treturn fmt.Sprintf(\"%s\\n%s\", s.ScopeName, s.RootPattern)\n}\n\nfunc (p *Pattern) setOwner(l *Language) {\n\tp.owner = l\n\tfor i := range p.Patterns {\n\t\tp.Patterns[i].setOwner(l)\n\t}\n}\n\nfunc (l *Language) UnmarshalJSON(data []byte) error {\n\tif err := json.Unmarshal(data, &l.UnpatchedLanguage); err != nil {\n\t\treturn err\n\t}\n\tl.RootPattern.setOwner(l)\n\tfor k := range l.Repository {\n\t\tl.Repository[k].setOwner(l)\n\t}\n\treturn nil\n}\n\nfunc (r *RootPattern) UnmarshalJSON(data []byte) error {\n\treturn json.Unmarshal(data, &r.Patterns)\n}\n\nfunc (r *Regex) UnmarshalJSON(data []byte) error {\n\tstr := string(data[1 : len(data)-1])\n\tstr = strings.Replace(str, \"\\\\\\\\\", \"\\\\\", -1)\n\tstr = strings.Replace(str, \"\\\\n\", \"\\n\", -1)\n\tstr = strings.Replace(str, \"\\\\t\", \"\\t\", -1)\n\tif re, err := rubex.Compile(str); err != nil {\n\t\tlog4go.Warn(\"Couldn't compile language pattern %s: %s\", str, err)\n\t} else {\n\t\tr.re = re\n\t}\n\treturn nil\n}\n\nfunc (m MatchObject) fix(add int) {\n\tfor i := range m {\n\t\tif m[i] != -1 {\n\t\t\tm[i] += add\n\t\t}\n\t}\n}\n\nfunc (r *Regex) Find(data string, pos int) MatchObject {\n\tif r.lastData != data || r.lastIndex > pos {\n\t\tr.lastData = data\n\t\tr.lastFound = 0\n\t}\n\tr.lastIndex = pos\n\tfor r.lastFound < len(data) {\n\t\tret := r.re.FindStringSubmatchIndex(data[r.lastFound:])\n\t\tif ret == nil {\n\t\t\tbreak\n\t\t} else if (ret[0] + r.lastFound) < pos {\n\t\t\tif ret[0] == 0 {\n\t\t\t\tr.lastFound++\n\t\t\t} else {\n\t\t\t\tr.lastFound += ret[0]\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tmo := MatchObject(ret)\n\t\tmo.fix(r.lastFound)\n\t\treturn mo\n\t}\n\treturn nil\n}\n\nfunc (p *Pattern) FirstMatch(data string, pos int) (pat *Pattern, ret MatchObject) {\n\tstartIdx := -1\n\tfor i := 0; i < len(p.cachedPatterns); {\n\t\tip, im := p.cachedPatterns[i].Cache(data, pos)\n\t\tif im != nil && im[0] != im[1] {\n\t\t\tif startIdx < 0 || startIdx > im[0] {\n\t\t\t\tstartIdx, pat, ret = im[0], ip, im\n\t\t\t\t\/\/ This match is right at the start, we're not going to find a better pattern than this,\n\t\t\t\t\/\/ so stop the search\n\t\t\t\tif im[0] == pos {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\ti++\n\t\t} else {\n\t\t\t\/\/ If it wasn't found now, it'll never be found, so the pattern can be popped from the cache\n\t\t\tcopy(p.cachedPatterns[i:], p.cachedPatterns[i+1:])\n\t\t\tp.cachedPatterns = p.cachedPatterns[:len(p.cachedPatterns)-1]\n\t\t}\n\t}\n\treturn\n}\n\nfunc (p *Pattern) Cache(data string, pos int) (pat *Pattern, ret MatchObject) {\n\tif p.cachedData == data {\n\t\tif p.cachedMatch == nil {\n\t\t\treturn nil, nil\n\t\t}\n\t\tif p.cachedMatch[0] >= pos {\n\t\t\tp.hits++\n\t\t\treturn p.cachedPat, p.cachedMatch\n\t\t}\n\t} else {\n\t\tp.cachedPatterns = nil\n\t}\n\tif p.cachedPatterns == nil {\n\t\tp.cachedPatterns = make([]*Pattern, len(p.Patterns))\n\t\tcopy(p.cachedPatterns, p.Patterns)\n\t}\n\tp.misses++\n\n\tif p.Match.re != nil {\n\t\tpat, ret = p, p.Match.Find(data, pos)\n\t} else if p.Begin.re != nil {\n\t\tpat, ret = p, p.Begin.Find(data, pos)\n\t} else if p.Include != \"\" {\n\t\tif z := p.Include[0]; z == '#' {\n\t\t\tkey := p.Include[1:]\n\t\t\tif p2, ok := p.owner.Repository[key]; ok {\n\t\t\t\tpat, ret = p2.Cache(data, pos)\n\t\t\t} else {\n\t\t\t\tlog4go.Warn(\"Not found in repository: %s\", p.Include)\n\t\t\t}\n\t\t} else if z == '$' {\n\t\t\t\/\/ TODO(q): Implement tmLanguage $ include directives\n\t\t\tlog4go.Warn(\"Unhandled include directive: %s\", p.Include)\n\t\t} else if l, err := Provider.GetLanguage(p.Include); err != nil {\n\t\t\tif !failed[p.Include] {\n\t\t\t\tlog4go.Warn(\"Include directive %s failed: %s\", p.Include, err)\n\t\t\t}\n\t\t\tfailed[p.Include] = true\n\t\t} else {\n\t\t\treturn l.RootPattern.Cache(data, pos)\n\t\t}\n\t} else {\n\t\tpat, ret = p.FirstMatch(data, pos)\n\t}\n\tp.cachedData = data\n\tp.cachedMatch = ret\n\tp.cachedPat = pat\n\n\treturn\n}\n\nfunc (p *Pattern) CreateCaptureNodes(data string, pos int, d parser.DataSource, mo MatchObject, parent *parser.Node, cap Captures) {\n\tranges := make([]parser.Range, len(mo)\/2)\n\tparentIndex := make([]int, len(ranges))\n\tparents := make([]*parser.Node, len(parentIndex))\n\tfor i := range ranges {\n\t\tranges[i] = parser.Range{mo[i*2+0], mo[i*2+1]}\n\t\tif i < 2 {\n\t\t\tparents[i] = parent\n\t\t\tcontinue\n\t\t}\n\t\tr := ranges[i]\n\t\tfor j := i - 1; j >= 0; j-- {\n\t\t\tif ranges[j].Contains(r) {\n\t\t\t\tparentIndex[i] = j\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tfor k, v := range cap {\n\t\ti64, err := strconv.ParseInt(k, 10, 32)\n\t\tif i := int(i64); err == nil && i < len(parents) {\n\t\t\tif ranges[i].Start == -1 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tchild := &parser.Node{Name: v.Name, Range: ranges[i], P: d}\n\t\t\tparents[i] = child\n\t\t\tif i == 0 {\n\t\t\t\tparent.Append(child)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar p *parser.Node\n\t\t\tfor p == nil {\n\t\t\t\ti = parentIndex[i]\n\t\t\t\tp = parents[i]\n\t\t\t}\n\t\t\tp.Append(child)\n\t\t}\n\t}\n}\n\nfunc (p *Pattern) CreateNode(data string, pos int, d parser.DataSource, mo MatchObject) *parser.Node {\n\tret := parser.Node{Name: p.Name, Range: parser.Range{mo[0], mo[1]}, P: d}\n\tif p.Match.re != nil {\n\t\tp.CreateCaptureNodes(data, pos, d, mo, &ret, p.Captures)\n\t} else if p.Begin.re != nil {\n\t\tif len(p.BeginCaptures) > 0 {\n\t\t\tp.CreateCaptureNodes(data, pos, d, mo, &ret, p.BeginCaptures)\n\t\t} else {\n\t\t\tp.CreateCaptureNodes(data, pos, d, mo, &ret, p.Captures)\n\t\t}\n\n\t\tif p.End.re != nil {\n\t\t\tvar (\n\t\t\t\tfound = false\n\t\t\t\ti, end int\n\t\t\t)\n\t\t\tfor i, end = ret.Range.End, len(data); i < len(data); {\n\t\t\t\tendmatch := p.End.Find(data, i)\n\t\t\t\tif endmatch != nil {\n\t\t\t\t\tend = endmatch[1]\n\t\t\t\t} else {\n\t\t\t\t\tif !found {\n\t\t\t\t\t\t\/\/ oops.. no end found at all, set it to the next line\n\t\t\t\t\t\tif e2 := strings.IndexRune(data[i:], '\\n'); e2 != -1 {\n\t\t\t\t\t\t\tend = i + e2\n\t\t\t\t\t\t}\n\t\t\t\t\t\tbreak\n\t\t\t\t\t} else {\n\t\t\t\t\t\tend = i\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (endmatch == nil || (endmatch != nil && endmatch[0] != i)) && len(p.cachedPatterns) > 0 {\n\t\t\t\t\t\/\/ Might be more recursive patterns to apply BEFORE the end is reached\n\t\t\t\t\tpattern2, match2 := p.FirstMatch(data, i)\n\t\t\t\t\tif match2 != nil &&\n\t\t\t\t\t\t((endmatch == nil && match2[0] < end) ||\n\t\t\t\t\t\t\t(endmatch != nil && match2[0] < endmatch[0])) {\n\n\t\t\t\t\t\tfound = true\n\t\t\t\t\t\tr := pattern2.CreateNode(data, i, d, match2)\n\t\t\t\t\t\tret.Append(r)\n\t\t\t\t\t\ti = r.Range.End\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif endmatch != nil {\n\t\t\t\t\tif len(p.EndCaptures) > 0 {\n\t\t\t\t\t\tp.CreateCaptureNodes(data, i, d, endmatch, &ret, p.EndCaptures)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tp.CreateCaptureNodes(data, i, d, endmatch, &ret, p.Captures)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tret.Range.End = end\n\t\t}\n\t}\n\tret.UpdateRange()\n\treturn &ret\n}\n\ntype dp struct {\n\tdata []rune\n}\n\nfunc (d *dp) Data(a, b int) string {\n\treturn string(d.data[a:b])\n}\n\nfunc (lp *LanguageParser) RootNode() *parser.Node {\n\treturn &lp.root\n}\n\nfunc (lp *LanguageParser) patch(lut []int, node *parser.Node) {\n\tnode.Range.Start = lut[node.Range.Start]\n\tnode.Range.End = lut[node.Range.End]\n\tfor _, child := range node.Children {\n\t\tlp.patch(lut, child)\n\t}\n}\n\nfunc (lp *LanguageParser) Parse(data string) bool {\n\td := &dp{[]rune(data)}\n\tlp.root = parser.Node{P: d, Name: lp.Language.ScopeName}\n\titer := maxiter\n\tfor i := 0; i < len(data) && iter > 0; iter-- {\n\t\tpat, ret := lp.Language.RootPattern.Cache(data, i)\n\t\tnl := strings.IndexAny(data[i:], \"\\n\\r\")\n\n\t\tif nl != -1 {\n\t\t\tnl += i\n\t\t}\n\t\tif ret == nil {\n\t\t\tbreak\n\t\t} else if nl > 0 && nl <= ret[0] {\n\t\t\ti = nl\n\t\t\tfor i < len(data) && (data[i] == '\\n' || data[i] == '\\r') {\n\t\t\t\ti++\n\t\t\t}\n\t\t} else {\n\t\t\tn := pat.CreateNode(data, i, d, ret)\n\t\t\tlp.root.Append(n)\n\n\t\t\ti = n.Range.End\n\t\t}\n\t}\n\tlp.root.UpdateRange()\n\tif len(data) != 0 {\n\t\tlut := make([]int, len(data))\n\t\tj := 0\n\t\tfor i := range data {\n\t\t\tlut[i] = j\n\t\t\tj++\n\t\t}\n\t\tlp.patch(lut, &lp.root)\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\n\tfabric \"github.com\/nimona\/go-nimona-fabric\"\n)\n\nfunc main() {\n\tpeerA, err := newPeer(\"PeerA\")\n\tif err != nil {\n\t\tlog.Fatal(\"Could not create peer A\", err)\n\t}\n\n\tpeerB, err := newPeer(\"PeerB\")\n\tif err != nil {\n\t\tlog.Fatal(\"Could not create peer B\", err)\n\t}\n\n\tlog.Println(\"Peer A address:\", peerA.GetAddresses())\n\n\tfor _, addr := range peerA.GetAddresses() {\n\t\tendpoint := addr + \"\/tls\/yamux\/router\/identity\/ping\"\n\t\tlog.Println(\"-------- Dialing\", endpoint)\n\t\tif err := peerB.DialContext(context.Background(), endpoint); err != nil {\n\t\t\tlog.Println(\"Dial error\", err)\n\t\t}\n\t\tendpoint = addr + \"\/tls\/yamux\/router\/ping\"\n\t\tlog.Println(\"-------- SECOND Dial\", endpoint)\n\t\tif err := peerB.DialContext(context.Background(), endpoint); err != nil {\n\t\t\tlog.Println(\"Dial error\", err)\n\t\t}\n\t}\n}\n\nfunc newPeer(peerID string) (*fabric.Fabric, error) {\n\tctx := context.Background()\n\tcrt, err := GenX509KeyPair()\n\tif err != nil {\n\t\tfmt.Println(\"Cert creation error\", err)\n\t\treturn nil, err\n\t}\n\n\tyamux := fabric.NewYamux()\n\trouter := fabric.NewRouter()\n\tidentity := &fabric.IdentityProtocol{Local: peerID}\n\ttls := &fabric.SecProtocol{\n\t\tConfig: tls.Config{\n\t\t\tCertificates: []tls.Certificate{crt},\n\t\t\tInsecureSkipVerify: true,\n\t\t},\n\t}\n\tping := &Ping{}\n\n\ttcp := fabric.NewTransportTCP(\"0.0.0.0\", 0)\n\t\/\/ ws := fabric.NewTransportWebsocket(\"0.0.0.0\", 0)\n\n\tf := fabric.New(ctx)\n\n\tf.AddTransport(yamux, []fabric.Protocol{router})\n\tf.AddTransport(tcp, []fabric.Protocol{tls, yamux, router})\n\t\/\/ f.AddTransport(ws, []fabric.Protocol{tls, yamux, router})\n\n\tf.AddProtocol(router)\n\tf.AddProtocol(tls)\n\tf.AddProtocol(yamux)\n\tf.AddProtocol(identity)\n\tf.AddProtocol(ping)\n\n\trouter.AddRoute(ping)\n\trouter.AddRoute(identity, ping)\n\n\tif err := f.Listen(ctx); err != nil {\n\t\tlog.Fatal(\"Could not listen for peer A\", err)\n\t}\n\n\treturn f, nil\n}\n<commit_msg>Fix example<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\n\tfabric \"github.com\/nimona\/go-nimona-fabric\"\n)\n\nfunc main() {\n\tpeerA, err := newPeer(\"PeerA\")\n\tif err != nil {\n\t\tlog.Fatal(\"Could not create peer A\", err)\n\t}\n\n\tpeerB, err := newPeer(\"PeerB\")\n\tif err != nil {\n\t\tlog.Fatal(\"Could not create peer B\", err)\n\t}\n\n\tlog.Println(\"Peer A address:\", peerA.GetAddresses())\n\n\tfor _, addr := range peerA.GetAddresses() {\n\t\tendpoint := addr + \"\/tls\/yamux\/router\/identity\/ping\"\n\t\tlog.Println(\"-------- Dialing\", endpoint)\n\t\tif err := peerB.DialContext(context.Background(), endpoint); err != nil {\n\t\t\tlog.Println(\"Dial error\", err)\n\t\t}\n\t\tendpoint = addr + \"\/tls\/yamux\/router\/ping\"\n\t\tlog.Println(\"-------- SECOND Dial\", endpoint)\n\t\tif err := peerB.DialContext(context.Background(), endpoint); err != nil {\n\t\t\tlog.Println(\"Dial error\", err)\n\t\t}\n\t}\n}\n\nfunc newPeer(peerID string) (*fabric.Fabric, error) {\n\tctx := context.Background()\n\tcrt, err := GenX509KeyPair()\n\tif err != nil {\n\t\tfmt.Println(\"Cert creation error\", err)\n\t\treturn nil, err\n\t}\n\n\tyamux := fabric.NewYamux()\n\trouter := fabric.NewRouter()\n\tidentity := &fabric.IdentityProtocol{Local: peerID}\n\ttls := &fabric.SecProtocol{\n\t\tConfig: tls.Config{\n\t\t\tCertificates: []tls.Certificate{crt},\n\t\t\tInsecureSkipVerify: true,\n\t\t},\n\t}\n\tping := &Ping{}\n\n\ttcp := fabric.NewTransportTCP(\"0.0.0.0\", 0)\n\tws := fabric.NewTransportWebsocket(\"0.0.0.0\", 0)\n\n\tf := fabric.New(ctx)\n\n\tf.AddTransport(yamux, []fabric.Protocol{router})\n\tf.AddTransport(tcp, []fabric.Protocol{tls, yamux, router})\n\tf.AddTransport(ws, []fabric.Protocol{tls, yamux, router})\n\n\tf.AddProtocol(router)\n\tf.AddProtocol(tls)\n\tf.AddProtocol(yamux)\n\tf.AddProtocol(identity)\n\tf.AddProtocol(ping)\n\n\trouter.AddRoute(ping)\n\trouter.AddRoute(identity, ping)\n\n\treturn f, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package e2e\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\n\t. \"github.com\/argoproj\/argo-cd\/pkg\/apis\/application\/v1alpha1\"\n\t\"github.com\/argoproj\/argo-cd\/test\/e2e\/fixture\"\n\t. \"github.com\/argoproj\/argo-cd\/test\/e2e\/fixture\/app\"\n)\n\nfunc TestKustomize2AppSource(t *testing.T) {\n\n\tpatchLabelMatchesFor := func(kind string) func(app *Application) {\n\t\treturn func(app *Application) {\n\t\t\tname := \"k2-patched-guestbook-ui\"\n\t\t\tlabelValue, err := fixture.Run(\n\t\t\t\t\"\", \"kubectl\", \"-n=\"+fixture.DeploymentNamespace(),\n\t\t\t\t\"get\", kind, name,\n\t\t\t\t\"-ojsonpath={.metadata.labels.patched-by}\")\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, \"argo-cd\", labelValue, \"wrong value of 'patched-by' label of %s %s\", kind, name)\n\t\t}\n\t}\n\n\tGiven(t).\n\t\tPath(guestbookPath).\n\t\tNamePrefix(\"k2-\").\n\t\tWhen().\n\t\tCreate().\n\t\tRefresh(RefreshTypeHard).\n\t\tPatchApp(`[\n\t\t\t{\n\t\t\t\t\"op\": \"replace\",\n\t\t\t\t\"path\": \"\/spec\/source\/kustomize\/namePrefix\",\n\t\t\t\t\"value\": \"k2-patched-\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"op\": \"add\",\n\t\t\t\t\"path\": \"\/spec\/source\/kustomize\/commonLabels\",\n\t\t\t\t\"value\": {\n\t\t\t\t\t\"patched-by\": \"argo-cd\"\n\t\t\t\t}\n\t\t\t}\n\t\t]`).\n\t\tThen().\n\t\tExpect(Success(\"\")).\n\t\tWhen().\n\t\tSync().\n\t\tThen().\n\t\tAnd(patchLabelMatchesFor(\"Service\")).\n\t\tAnd(patchLabelMatchesFor(\"Deployment\"))\n}\n\n\/\/ when we have a config map generator, AND the ignore annotation, it is ignored in the app's sync status\nfunc TestSyncStatusOptionIgnore(t *testing.T) {\n\tvar mapName string\n\tGiven(t).\n\t\tPath(\"kustomize-cm-gen\").\n\t\tWhen().\n\t\tCreate().\n\t\tSync().\n\t\tThen().\n\t\tExpect(OperationPhaseIs(OperationSucceeded)).\n\t\tExpect(SyncStatusIs(SyncStatusCodeSynced)).\n\t\tExpect(HealthIs(HealthStatusHealthy)).\n\t\tAnd(func(app *Application) {\n\t\t\tresourceStatus := app.Status.Resources[0]\n\t\t\tassert.Contains(t, resourceStatus.Name, \"my-map-\")\n\t\t\tassert.Equal(t, SyncStatusCodeSynced, resourceStatus.Status)\n\n\t\t\tmapName = resourceStatus.Name\n\t\t}).\n\t\tWhen().\n\t\t\/\/ we now force generation of a second CM\n\t\tPatchFile(\"kustomization.yaml\", `[{\"op\": \"replace\", \"path\": \"\/configMapGenerator\/0\/literals\/0\", \"value\": \"foo=baz\"}]`).\n\t\tRefresh(RefreshTypeHard).\n\t\tThen().\n\t\t\/\/ this is standard logging from the command - tough one - true statement\n\t\tWhen().\n\t\tSync().\n\t\tThen().\n\t\tExpect(Error(\"1 resources require pruning\")).\n\t\tExpect(OperationPhaseIs(OperationSucceeded)).\n\t\t\/\/ this is a key check - we expect the app to be healthy because, even though we have a resources that needs\n\t\t\/\/ pruning, because it is annotated with IgnoreExtraneous it should not contribute to the sync status\n\t\tExpect(SyncStatusIs(SyncStatusCodeSynced)).\n\t\tExpect(HealthIs(HealthStatusHealthy)).\n\t\tAnd(func(app *Application) {\n\t\t\tassert.Equal(t, 2, len(app.Status.Resources))\n\t\t\t\/\/ new map in-sync\n\t\t\t{\n\t\t\t\tresourceStatus := app.Status.Resources[0]\n\t\t\t\tassert.Contains(t, resourceStatus.Name, \"my-map-\")\n\t\t\t\t\/\/ make sure we've a new map with changed name\n\t\t\t\tassert.NotEqual(t, mapName, resourceStatus.Name)\n\t\t\t\tassert.Equal(t, SyncStatusCodeSynced, resourceStatus.Status)\n\t\t\t}\n\t\t\t\/\/ old map is out of sync\n\t\t\t{\n\t\t\t\tresourceStatus := app.Status.Resources[1]\n\t\t\t\tassert.Equal(t, mapName, resourceStatus.Name)\n\t\t\t\tassert.Equal(t, SyncStatusCodeOutOfSync, resourceStatus.Status)\n\t\t\t}\n\t\t})\n}\n\n\/\/ make sure we can create an app which has a SSH remote base\nfunc TestKustomizeSSHRemoteBase(t *testing.T) {\n\tGiven(t).\n\t\t\/\/ not the best test, as we should have two remote repos both with the same SSH private key\n\t\tSSHRepo().\n\t\tPath(\"remote-base\").\n\t\tWhen().\n\t\tCreate().\n\t\tSync().\n\t\tThen().\n\t\tExpect(OperationPhaseIs(OperationSucceeded)).\n\t\tExpect(ResourceSyncStatusIs(\"ConfigMap\", \"my-map\", SyncStatusCodeSynced))\n}\n\n\/\/ make sure we can create an app which has a SSH remote base\nfunc TestKustomizeDeclarativeInvalidApp(t *testing.T) {\n\tGiven(t).\n\t\tPath(\"invalid-kustomize\").\n\t\tWhen().\n\t\tDeclarative(\"declarative-apps\/app.yaml\").\n\t\tThen().\n\t\tExpect(Success(\"\")).\n\t\tExpect(HealthIs(HealthStatusHealthy)).\n\t\tExpect(SyncStatusIs(SyncStatusCodeUnknown)).\n\t\tExpect(Condition(ApplicationConditionComparisonError, \"invalid-kustomize\/does-not-exist.yaml: no such file or directory\"))\n}\n<commit_msg>Fixes TestKustomize2AppSource. Closes #1800 (#1801)<commit_after>package e2e\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\n\t. \"github.com\/argoproj\/argo-cd\/pkg\/apis\/application\/v1alpha1\"\n\t\"github.com\/argoproj\/argo-cd\/test\/e2e\/fixture\"\n\t. \"github.com\/argoproj\/argo-cd\/test\/e2e\/fixture\/app\"\n)\n\nfunc TestKustomize2AppSource(t *testing.T) {\n\n\tpatchLabelMatchesFor := func(kind string) func(app *Application) {\n\t\treturn func(app *Application) {\n\t\t\tname := \"k2-patched-guestbook-ui\"\n\t\t\tlabelValue, err := fixture.Run(\n\t\t\t\t\"\", \"kubectl\", \"-n=\"+fixture.DeploymentNamespace(),\n\t\t\t\t\"get\", kind, name,\n\t\t\t\t\"-ojsonpath={.metadata.labels.patched-by}\")\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, \"argo-cd\", labelValue, \"wrong value of 'patched-by' label of %s %s\", kind, name)\n\t\t}\n\t}\n\n\tGiven(t).\n\t\tPath(guestbookPath).\n\t\tNamePrefix(\"k2-\").\n\t\tWhen().\n\t\tCreate().\n\t\tRefresh(RefreshTypeHard).\n\t\tPatchApp(`[\n\t\t\t{\n\t\t\t\t\"op\": \"replace\",\n\t\t\t\t\"path\": \"\/spec\/source\/kustomize\/namePrefix\",\n\t\t\t\t\"value\": \"k2-patched-\"\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"op\": \"add\",\n\t\t\t\t\"path\": \"\/spec\/source\/kustomize\/commonLabels\",\n\t\t\t\t\"value\": {\n\t\t\t\t\t\"patched-by\": \"argo-cd\"\n\t\t\t\t}\n\t\t\t}\n\t\t]`).\n\t\tThen().\n\t\tExpect(Success(\"\")).\n\t\tWhen().\n\t\tSync().\n\t\tThen().\n\t\tExpect(Success(\"\")).\n\t\tExpect(OperationPhaseIs(OperationSucceeded)).\n\t\tExpect(SyncStatusIs(SyncStatusCodeSynced)).\n\t\tExpect(HealthIs(HealthStatusHealthy)).\n\t\tAnd(patchLabelMatchesFor(\"Service\")).\n\t\tAnd(patchLabelMatchesFor(\"Deployment\"))\n}\n\n\/\/ when we have a config map generator, AND the ignore annotation, it is ignored in the app's sync status\nfunc TestSyncStatusOptionIgnore(t *testing.T) {\n\tvar mapName string\n\tGiven(t).\n\t\tPath(\"kustomize-cm-gen\").\n\t\tWhen().\n\t\tCreate().\n\t\tSync().\n\t\tThen().\n\t\tExpect(OperationPhaseIs(OperationSucceeded)).\n\t\tExpect(SyncStatusIs(SyncStatusCodeSynced)).\n\t\tExpect(HealthIs(HealthStatusHealthy)).\n\t\tAnd(func(app *Application) {\n\t\t\tresourceStatus := app.Status.Resources[0]\n\t\t\tassert.Contains(t, resourceStatus.Name, \"my-map-\")\n\t\t\tassert.Equal(t, SyncStatusCodeSynced, resourceStatus.Status)\n\n\t\t\tmapName = resourceStatus.Name\n\t\t}).\n\t\tWhen().\n\t\t\/\/ we now force generation of a second CM\n\t\tPatchFile(\"kustomization.yaml\", `[{\"op\": \"replace\", \"path\": \"\/configMapGenerator\/0\/literals\/0\", \"value\": \"foo=baz\"}]`).\n\t\tRefresh(RefreshTypeHard).\n\t\tThen().\n\t\t\/\/ this is standard logging from the command - tough one - true statement\n\t\tWhen().\n\t\tSync().\n\t\tThen().\n\t\tExpect(Error(\"1 resources require pruning\")).\n\t\tExpect(OperationPhaseIs(OperationSucceeded)).\n\t\t\/\/ this is a key check - we expect the app to be healthy because, even though we have a resources that needs\n\t\t\/\/ pruning, because it is annotated with IgnoreExtraneous it should not contribute to the sync status\n\t\tExpect(SyncStatusIs(SyncStatusCodeSynced)).\n\t\tExpect(HealthIs(HealthStatusHealthy)).\n\t\tAnd(func(app *Application) {\n\t\t\tassert.Equal(t, 2, len(app.Status.Resources))\n\t\t\t\/\/ new map in-sync\n\t\t\t{\n\t\t\t\tresourceStatus := app.Status.Resources[0]\n\t\t\t\tassert.Contains(t, resourceStatus.Name, \"my-map-\")\n\t\t\t\t\/\/ make sure we've a new map with changed name\n\t\t\t\tassert.NotEqual(t, mapName, resourceStatus.Name)\n\t\t\t\tassert.Equal(t, SyncStatusCodeSynced, resourceStatus.Status)\n\t\t\t}\n\t\t\t\/\/ old map is out of sync\n\t\t\t{\n\t\t\t\tresourceStatus := app.Status.Resources[1]\n\t\t\t\tassert.Equal(t, mapName, resourceStatus.Name)\n\t\t\t\tassert.Equal(t, SyncStatusCodeOutOfSync, resourceStatus.Status)\n\t\t\t}\n\t\t})\n}\n\n\/\/ make sure we can create an app which has a SSH remote base\nfunc TestKustomizeSSHRemoteBase(t *testing.T) {\n\tGiven(t).\n\t\t\/\/ not the best test, as we should have two remote repos both with the same SSH private key\n\t\tSSHRepo().\n\t\tPath(\"remote-base\").\n\t\tWhen().\n\t\tCreate().\n\t\tSync().\n\t\tThen().\n\t\tExpect(OperationPhaseIs(OperationSucceeded)).\n\t\tExpect(ResourceSyncStatusIs(\"ConfigMap\", \"my-map\", SyncStatusCodeSynced))\n}\n\n\/\/ make sure we can create an app which has a SSH remote base\nfunc TestKustomizeDeclarativeInvalidApp(t *testing.T) {\n\tGiven(t).\n\t\tPath(\"invalid-kustomize\").\n\t\tWhen().\n\t\tDeclarative(\"declarative-apps\/app.yaml\").\n\t\tThen().\n\t\tExpect(Success(\"\")).\n\t\tExpect(HealthIs(HealthStatusHealthy)).\n\t\tExpect(SyncStatusIs(SyncStatusCodeUnknown)).\n\t\tExpect(Condition(ApplicationConditionComparisonError, \"invalid-kustomize\/does-not-exist.yaml: no such file or directory\"))\n}\n<|endoftext|>"} {"text":"<commit_before>package plugins\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/cenk\/backoff\"\n\t\"github.com\/fatih\/color\"\n\tmarathon \"github.com\/gambol99\/go-marathon\"\n\tconsul \"github.com\/hashicorp\/consul\/api\"\n\n\t\"github.com\/InnovaCo\/serve\/manifest\"\n)\n\nfunc init() {\n\tmanifest.PluginRegestry.Add(\"deploy.marathon\", DeployMarathon{})\n}\n\ntype DeployMarathon struct{}\n\nfunc (p DeployMarathon) Run(data manifest.Manifest) error {\n\tif data.GetBool(\"purge\") {\n\t\treturn p.Uninstall(data)\n\t} else {\n\t\treturn p.Install(data)\n\t}\n}\n\nfunc (p DeployMarathon) Install(data manifest.Manifest) error {\n\tmarathonApi, err := MarathonClient(data.GetString(\"marathon-host\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfullName := data.GetString(\"app-name\")\n\n\tbs, bf, bmax := 1.0, 2.0, 30.0\n\tapp := &marathon.Application{\n\t\tBackoffSeconds: &bs,\n\t\tBackoffFactor: &bf,\n\t\tMaxLaunchDelaySeconds: &bmax,\n\t}\n\n\tapp.Name(fullName)\n\tapp.Command(fmt.Sprintf(\"serve-tools consul supervisor --service '%s' --port $PORT0 start %s\", fullName, data.GetString(\"cmd\")))\n\tapp.Count(data.GetInt(\"instances\"))\n\tapp.Memory(float64(data.GetInt(\"mem\")))\n\n\tif cpu, err := strconv.ParseFloat(data.GetString(\"cpu\"), 64); err == nil {\n\t\tapp.CPU(cpu)\n\t}\n\n\tif constrs := data.GetString(\"constraints\"); constrs != \"\" {\n\t\tcs := strings.SplitN(constrs, \":\", 2)\n\t\tapp.AddConstraint(cs[0], \"CLUSTER\", cs[1])\n\t\tapp.AddLabel(cs[0], cs[1])\n\t}\n\n\tfor k, v := range data.GetMap(\"environment\") {\n\t\tapp.AddEnv(k, fmt.Sprintf(\"%s\", v.Unwrap()))\n\t}\n\n\tapp.AddUris(data.GetString(\"package-uri\"))\n\n\tif _, err := marathonApi.UpdateApplication(app, false); err != nil {\n\t\tcolor.Yellow(\"marathon <- %s\", app)\n\t\treturn err\n\t}\n\n\tcolor.Green(\"marathon <- %s\", app)\n\n\tconsulApi, err := ConsulClient(data.GetString(\"consul-host\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := registerPluginData(\"deploy.marathon\", data.GetString(\"app-name\"), data.String(), data.GetString(\"consul-host\")); err != nil {\n\t\treturn err\n\t}\n\n\treturn backoff.Retry(func() error {\n\t\tservices, _, err := consulApi.Health().Service(fullName, \"\", true, nil)\n\n\t\tif err != nil {\n\t\t\tlog.Println(color.RedString(\"Error in check health in consul: %v\", err))\n\t\t\treturn err\n\t\t}\n\n\t\tif len(services) == 0 {\n\t\t\tlog.Printf(\"Service `%s` not started yet! Retry...\", fullName)\n\t\t\treturn fmt.Errorf(\"Service `%s` not started!\", fullName)\n\t\t}\n\n\t\tlog.Println(color.GreenString(\"Service `%s` successfully started!\", fullName))\n\t\treturn nil\n\t}, backoff.NewExponentialBackOff())\n}\n\nfunc (p DeployMarathon) Uninstall(data manifest.Manifest) error {\n\tmarathonApi, err := MarathonClient(data.GetString(\"marathon-host\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tname := data.GetString(\"app-name\")\n\n\tif _, err := marathonApi.Application(name); err == nil {\n\t\tif _, err := marathonApi.DeleteApplication(name, false); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tlog.Println(color.YellowString(\"App `%s` doesnt exists in marathon!\", name))\n\t}\n\n\treturn deletePluginData(\"deploy.marathon\", name, data.GetString(\"consul-host\"))\n}\n\nfunc MarathonClient(marathonHost string) (marathon.Marathon, error) {\n\tconf := marathon.NewDefaultConfig()\n\tconf.URL = fmt.Sprintf(\"http:\/\/%s:8080\", marathonHost)\n\tconf.LogOutput = os.Stdout\n\treturn marathon.NewClient(conf)\n}\n\nfunc ConsulClient(consulHost string) (*consul.Client, error) {\n\tconf := consul.DefaultConfig()\n\tconf.Address = consulHost + \":8500\"\n\treturn consul.NewClient(conf)\n}\n\nfunc putConsulKv(client *consul.Client, key string, value string) error {\n\tlog.Printf(\"consul put `%s`: %s\", key, value)\n\t_, err := client.KV().Put(&consul.KVPair{Key: strings.TrimPrefix(key, \"\/\"), Value: []byte(value)}, nil)\n\treturn err\n}\n\nfunc listConsulKv(client *consul.Client, prefix string, q *consul.QueryOptions) (consul.KVPairs, error) {\n\tlog.Printf(\"consul list `%s`\", prefix)\n\tlist, _, err := client.KV().List(prefix, q)\n\treturn list, err\n}\n\nfunc delConsulKv(client *consul.Client, key string) error {\n\tlog.Printf(\"consul delete `%s`\", key)\n\t_, err := client.KV().Delete(strings.TrimPrefix(key, \"\/\"), nil)\n\treturn err\n}\n\nfunc registerPluginData(plugin string, packageName string, data string, consulHost string) error {\n\tconsulApi, err := ConsulClient(consulHost)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn putConsulKv(consulApi, \"services\/data\/\"+packageName+\"\/\"+plugin, data)\n}\n\nfunc deletePluginData(plugin string, packageName string, consulHost string) error {\n\tlog.Println(color.YellowString(\"Delete %s for %s package in consul\", plugin, packageName))\n\tconsulApi, err := ConsulClient(consulHost)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn delConsulKv(consulApi, \"services\/data\/\"+packageName+\"\/\"+plugin)\n}\n\nfunc markAsOutdated(client *consul.Client, name string, delay time.Duration) error {\n\tlog.Printf(\"Mark service `%s` as outdated\\n\", name)\n\tjson := fmt.Sprintf(`{\"endOfLife\":%d}`, time.Now().Add(delay).UnixNano()\/int64(time.Millisecond))\n\treturn putConsulKv(client, \"services\/outdated\/\"+name, json)\n}\n<commit_msg>= marathon: log requests to os.Stderr<commit_after>package plugins\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/cenk\/backoff\"\n\t\"github.com\/fatih\/color\"\n\tmarathon \"github.com\/gambol99\/go-marathon\"\n\tconsul \"github.com\/hashicorp\/consul\/api\"\n\n\t\"github.com\/InnovaCo\/serve\/manifest\"\n)\n\nfunc init() {\n\tmanifest.PluginRegestry.Add(\"deploy.marathon\", DeployMarathon{})\n}\n\ntype DeployMarathon struct{}\n\nfunc (p DeployMarathon) Run(data manifest.Manifest) error {\n\tif data.GetBool(\"purge\") {\n\t\treturn p.Uninstall(data)\n\t} else {\n\t\treturn p.Install(data)\n\t}\n}\n\nfunc (p DeployMarathon) Install(data manifest.Manifest) error {\n\tmarathonApi, err := MarathonClient(data.GetString(\"marathon-host\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfullName := data.GetString(\"app-name\")\n\n\tbs, bf, bmax := 1.0, 2.0, 30.0\n\tapp := &marathon.Application{\n\t\tBackoffSeconds: &bs,\n\t\tBackoffFactor: &bf,\n\t\tMaxLaunchDelaySeconds: &bmax,\n\t}\n\n\tapp.Name(fullName)\n\tapp.Command(fmt.Sprintf(\"serve-tools consul supervisor --service '%s' --port $PORT0 start %s\", fullName, data.GetString(\"cmd\")))\n\tapp.Count(data.GetInt(\"instances\"))\n\tapp.Memory(float64(data.GetInt(\"mem\")))\n\n\tif cpu, err := strconv.ParseFloat(data.GetString(\"cpu\"), 64); err == nil {\n\t\tapp.CPU(cpu)\n\t}\n\n\tif constrs := data.GetString(\"constraints\"); constrs != \"\" {\n\t\tcs := strings.SplitN(constrs, \":\", 2)\n\t\tapp.AddConstraint(cs[0], \"CLUSTER\", cs[1])\n\t\tapp.AddLabel(cs[0], cs[1])\n\t}\n\n\tfor k, v := range data.GetMap(\"environment\") {\n\t\tapp.AddEnv(k, fmt.Sprintf(\"%s\", v.Unwrap()))\n\t}\n\n\tapp.AddUris(data.GetString(\"package-uri\"))\n\n\tif _, err := marathonApi.UpdateApplication(app, false); err != nil {\n\t\tcolor.Yellow(\"marathon <- %s\", app)\n\t\treturn err\n\t}\n\n\tcolor.Green(\"marathon <- %s\", app)\n\n\tconsulApi, err := ConsulClient(data.GetString(\"consul-host\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := registerPluginData(\"deploy.marathon\", data.GetString(\"app-name\"), data.String(), data.GetString(\"consul-host\")); err != nil {\n\t\treturn err\n\t}\n\n\treturn backoff.Retry(func() error {\n\t\tservices, _, err := consulApi.Health().Service(fullName, \"\", true, nil)\n\n\t\tif err != nil {\n\t\t\tlog.Println(color.RedString(\"Error in check health in consul: %v\", err))\n\t\t\treturn err\n\t\t}\n\n\t\tif len(services) == 0 {\n\t\t\tlog.Printf(\"Service `%s` not started yet! Retry...\", fullName)\n\t\t\treturn fmt.Errorf(\"Service `%s` not started!\", fullName)\n\t\t}\n\n\t\tlog.Println(color.GreenString(\"Service `%s` successfully started!\", fullName))\n\t\treturn nil\n\t}, backoff.NewExponentialBackOff())\n}\n\nfunc (p DeployMarathon) Uninstall(data manifest.Manifest) error {\n\tmarathonApi, err := MarathonClient(data.GetString(\"marathon-host\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tname := data.GetString(\"app-name\")\n\n\tif _, err := marathonApi.Application(name); err == nil {\n\t\tif _, err := marathonApi.DeleteApplication(name, false); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tlog.Println(color.YellowString(\"App `%s` doesnt exists in marathon!\", name))\n\t}\n\n\treturn deletePluginData(\"deploy.marathon\", name, data.GetString(\"consul-host\"))\n}\n\nfunc MarathonClient(marathonHost string) (marathon.Marathon, error) {\n\tconf := marathon.NewDefaultConfig()\n\tconf.URL = fmt.Sprintf(\"http:\/\/%s:8080\", marathonHost)\n\tconf.LogOutput = os.Stderr\n\treturn marathon.NewClient(conf)\n}\n\nfunc ConsulClient(consulHost string) (*consul.Client, error) {\n\tconf := consul.DefaultConfig()\n\tconf.Address = consulHost + \":8500\"\n\treturn consul.NewClient(conf)\n}\n\nfunc putConsulKv(client *consul.Client, key string, value string) error {\n\tlog.Printf(\"consul put `%s`: %s\", key, value)\n\t_, err := client.KV().Put(&consul.KVPair{Key: strings.TrimPrefix(key, \"\/\"), Value: []byte(value)}, nil)\n\treturn err\n}\n\nfunc listConsulKv(client *consul.Client, prefix string, q *consul.QueryOptions) (consul.KVPairs, error) {\n\tlog.Printf(\"consul list `%s`\", prefix)\n\tlist, _, err := client.KV().List(prefix, q)\n\treturn list, err\n}\n\nfunc delConsulKv(client *consul.Client, key string) error {\n\tlog.Printf(\"consul delete `%s`\", key)\n\t_, err := client.KV().Delete(strings.TrimPrefix(key, \"\/\"), nil)\n\treturn err\n}\n\nfunc registerPluginData(plugin string, packageName string, data string, consulHost string) error {\n\tconsulApi, err := ConsulClient(consulHost)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn putConsulKv(consulApi, \"services\/data\/\"+packageName+\"\/\"+plugin, data)\n}\n\nfunc deletePluginData(plugin string, packageName string, consulHost string) error {\n\tlog.Println(color.YellowString(\"Delete %s for %s package in consul\", plugin, packageName))\n\tconsulApi, err := ConsulClient(consulHost)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn delConsulKv(consulApi, \"services\/data\/\"+packageName+\"\/\"+plugin)\n}\n\nfunc markAsOutdated(client *consul.Client, name string, delay time.Duration) error {\n\tlog.Printf(\"Mark service `%s` as outdated\\n\", name)\n\tjson := fmt.Sprintf(`{\"endOfLife\":%d}`, time.Now().Add(delay).UnixNano()\/int64(time.Millisecond))\n\treturn putConsulKv(client, \"services\/outdated\/\"+name, json)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/petemoore\/taskcluster-client-go\/utils\"\n\t\"github.com\/streadway\/amqp\"\n)\n\nfunc main() {\n\t\/\/ connection, err := amqp.Dial(\"amqps:\/\/pulse.mozilla.org\/exchange\/taskcluster-queue\/v1\/task-defined\")\n\tconnection, err := amqp.Dial(\"amqp:\/\/localhost\/\")\n\tutils.ExitOnFail(err)\n\tdefer connection.Close()\n\tchannel, err := connection.Channel()\n\tutils.ExitOnFail(err)\n\t_, err = channel.QueueDeclare(\"hello\", false, false, false, false, nil)\n\tmsg := amqp.Publishing{\n\t\tContentType: \"application\/json\",\n\t\tBody: []byte(`{\"name\":\"pmoore\"}`)}\n\tchannel.Publish(\"\", \"hello\", false, false, msg)\n\tutils.ExitOnFail(err)\n\tfmt.Println(\"Published message\")\n}\n<commit_msg>Work in progress<commit_after>package exchange\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n)\n\ntype TaskDefined struct {\n\tRoutingKeyKind string `mwords:\"*\"`\n\tTaskId string `mwords:\"*\"`\n\tRunId string `mwords:\"*\"`\n\tWorkerGroup string `mwords:\"*\"`\n\tWorkerId string `mwords:\"*\"`\n\tProvisionerId string `mwords:\"*\"`\n\tWorkerType string `mwords:\"*\"`\n\tSchedulerId string `mwords:\"*\"`\n\tTaskGroupId string `mwords:\"*\"`\n\tReserved string `mwords:\"#\"`\n}\n\nfunc (x TaskDefined) RoutingKey() string {\n\treturn generateRoutingKey(&x)\n}\n\nfunc (x TaskDefined) ExchangeName() string {\n\treturn \"exchange\/taskcluster-queue\/v1\/task-defined\"\n}\n\nfunc generateRoutingKey(x *TaskDefined) string {\n\tval := reflect.ValueOf(x).Elem()\n\tp := make([]string, 0, val.NumField())\n\tfor i := 0; i < val.NumField(); i++ {\n\t\tvalueField := val.Field(i)\n\t\ttypeField := val.Type().Field(i)\n\t\ttag := typeField.Tag\n\t\tif t := tag.Get(\"mwords\"); t != \"\" {\n\t\t\tif v := valueField.Interface(); v == \"\" {\n\t\t\t\tp = append(p, t)\n\t\t\t} else {\n\t\t\t\tp = append(p, v.(string))\n\t\t\t}\n\t\t}\n\t}\n\treturn strings.Join(p, \".\")\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Add getDigit into MBS<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ The experiments package holds various experiments with NEAT.\npackage experiments\n\nimport (\n\t\"github.com\/yaricom\/goNEAT\/neat\/genetics\"\n\t\"github.com\/yaricom\/goNEAT\/neat\"\n\t\"fmt\"\n\t\"time\"\n)\n\n\/\/ The interface describing evaluator for one epoch of evolution.\ntype EpochEvaluator interface {\n\tEpochEvaluate(pop *genetics.Population, epoch *Epoch, context *neat.NeatContext) (err error)\n}\n\n\n\/\/ The Experiment execution entry point\nfunc (ex *Experiment) Execute(context *neat.NeatContext, start_genome *genetics.Genome, epoch_executor EpochEvaluator) (err error) {\n\tif ex.Trials == nil {\n\t\tex.Trials = make(Trials, context.NumRuns)\n\t}\n\n\tvar pop *genetics.Population\n\tfor run := 0; run < context.NumRuns; run++ {\n\t\tneat.InfoLog(\"\\n>>>>> Spawning new population \")\n\t\tpop, err = genetics.NewPopulation(start_genome, context)\n\t\tif err != nil {\n\t\t\tneat.InfoLog(\"Failed to spawn new population from start genome\")\n\t\t\treturn err\n\t\t} else {\n\t\t\tneat.InfoLog(\"OK <<<<<\")\n\t\t}\n\t\tneat.InfoLog(\">>>>> Verifying spawned population \")\n\t\t_, err = pop.Verify()\n\t\tif err != nil {\n\t\t\tneat.ErrorLog(\"\\n!!!!! Population verification failed !!!!!\")\n\t\t\treturn err\n\t\t} else {\n\t\t\tneat.InfoLog(\"OK <<<<<\")\n\t\t}\n\n\t\t\/\/ start new trial\n\t\ttrial := Trial {\n\t\t\tId:run,\n\t\t}\n\n\t\tfor gen := 0; gen < context.NumGenerations; gen++ {\n\t\t\tneat.InfoLog(fmt.Sprintf(\">>>>> Epoch:%3d\\tRun: %d\\n\", gen, run))\n\t\t\tepoch := Epoch{\n\t\t\t\tId:gen,\n\t\t\t}\n\t\t\terr = epoch_executor.EpochEvaluate(pop, &epoch, context)\n\t\t\tif err != nil {\n\t\t\t\tneat.InfoLog(fmt.Sprintf(\"!!!!! Epoch %d evaluation failed !!!!!\\n\", gen))\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tepoch.Executed = time.Now()\n\t\t\ttrial.Epochs = append(trial.Epochs, epoch)\n\t\t\tif epoch.Solved {\n\t\t\t\tneat.InfoLog(fmt.Sprintf(\">>>>> The winner organism found in epoch %d! <<<<<\\n\", gen))\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\t\/\/ store trial into experiment\n\t\tex.Trials[run] = trial\n\t}\n\n\treturn nil\n}\n<commit_msg>Added trial run observer interface<commit_after>\/\/ The experiments package holds various experiments with NEAT.\npackage experiments\n\nimport (\n\t\"github.com\/yaricom\/goNEAT\/neat\/genetics\"\n\t\"github.com\/yaricom\/goNEAT\/neat\"\n\t\"fmt\"\n\t\"time\"\n)\n\n\/\/ The interface describing evaluator for one epoch of evolution.\ntype EpochEvaluator interface {\n\t\/\/ Invoked to evaluate one epoch of evolution over provided population of organisms within given\n\t\/\/ execution context.\n\tEpochEvaluate(pop *genetics.Population, epoch *Epoch, context *neat.NeatContext) (err error)\n}\n\n\/\/ The interface to describe trial lifecycle observer interested to receive lifecycle notifications\ntype TrialRunObserver interface {\n\t\/\/ Invoked to notify that new trial run just started before any epoch evaluation in that trial run\n\tTrialRunStarted(trial *Trial)\n}\n\n\n\/\/ The Experiment execution entry point\nfunc (ex *Experiment) Execute(context *neat.NeatContext, start_genome *genetics.Genome, executor interface{}) (err error) {\n\tif ex.Trials == nil {\n\t\tex.Trials = make(Trials, context.NumRuns)\n\t}\n\n\tvar pop *genetics.Population\n\tfor run := 0; run < context.NumRuns; run++ {\n\t\tneat.InfoLog(\"\\n>>>>> Spawning new population \")\n\t\tpop, err = genetics.NewPopulation(start_genome, context)\n\t\tif err != nil {\n\t\t\tneat.InfoLog(\"Failed to spawn new population from start genome\")\n\t\t\treturn err\n\t\t} else {\n\t\t\tneat.InfoLog(\"OK <<<<<\")\n\t\t}\n\t\tneat.InfoLog(\">>>>> Verifying spawned population \")\n\t\t_, err = pop.Verify()\n\t\tif err != nil {\n\t\t\tneat.ErrorLog(\"\\n!!!!! Population verification failed !!!!!\")\n\t\t\treturn err\n\t\t} else {\n\t\t\tneat.InfoLog(\"OK <<<<<\")\n\t\t}\n\n\t\t\/\/ start new trial\n\t\ttrial := Trial {\n\t\t\tId:run,\n\t\t}\n\n\t\tif trialObserver, ok := executor.(TrialRunObserver); ok {\n\t\t\ttrialObserver.TrialRunStarted(&trial) \/\/ optional\n\t\t}\n\n\t\tepoch_evaluator := executor.(EpochEvaluator) \/\/ mandatory\n\n\t\tfor gen := 0; gen < context.NumGenerations; gen++ {\n\t\t\tneat.InfoLog(fmt.Sprintf(\">>>>> Epoch:%3d\\tRun: %d\\n\", gen, run))\n\t\t\tepoch := Epoch{\n\t\t\t\tId:gen,\n\t\t\t}\n\t\t\terr = epoch_evaluator.EpochEvaluate(pop, &epoch, context)\n\t\t\tif err != nil {\n\t\t\t\tneat.InfoLog(fmt.Sprintf(\"!!!!! Epoch %d evaluation failed !!!!!\\n\", gen))\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tepoch.Executed = time.Now()\n\t\t\ttrial.Epochs = append(trial.Epochs, epoch)\n\t\t\tif epoch.Solved {\n\t\t\t\tneat.InfoLog(fmt.Sprintf(\">>>>> The winner organism found in epoch %d! <<<<<\\n\", gen))\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\t\/\/ store trial into experiment\n\t\tex.Trials[run] = trial\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package exporter\n\nimport (\n\t\"testing\"\n\t\"strings\"\n)\n\nfunc TestAllRegexpsCompile(t *testing.T) {\n\tpatterns := loadPatternDir(t)\n\tfor pattern := range *patterns {\n\t\t_, err := Compile(\"%{\"+pattern+\"}\", patterns)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%v\", err.Error())\n\t\t}\n\t}\n}\n\nfunc TestUnknownGrokPattern(t *testing.T) {\n\tpatterns := loadPatternDir(t)\n\t_, err := Compile(\"%{USER} [a-z] %{SOME_UNKNOWN_PATTERN}.*\", patterns)\n\tif err == nil || ! strings.Contains(err.Error(), \"SOME_UNKNOWN_PATTERN\") {\n\t\tt.Error(\"expected error message saying which pattern is undefined.\")\n\t}\n}\n\nfunc TestInvalidRegexp(t *testing.T) {\n\tpatterns := loadPatternDir(t)\n\t_, err := Compile(\"%{USER} [a-z] \\\\\", patterns) \/\/ wrong because regex cannot end with backslash\n\tif err == nil || ! strings.Contains(err.Error(), \"%{USER} [a-z] \\\\\") {\n\t\tt.Error(\"expected error message saying which pattern is invalid.\")\n\t}\n}\n\nfunc TestNamedCaptureGroup(t *testing.T) {\n\tpatterns := loadPatternDir(t)\n\tregex, err := Compile(\"User %{USER:user} has logged in.\", patterns)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tfound := regex.Gsub(\"User fabian has logged in.\", \"\\\\k<user>\")\n\tif found != \"fabian\" {\n\t\tt.Errorf(\"Expected to capture 'fabian', but captured '%v'.\", found)\n\t}\n}\n<commit_msg>go fmt<commit_after>package exporter\n\nimport (\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestAllRegexpsCompile(t *testing.T) {\n\tpatterns := loadPatternDir(t)\n\tfor pattern := range *patterns {\n\t\t_, err := Compile(\"%{\"+pattern+\"}\", patterns)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%v\", err.Error())\n\t\t}\n\t}\n}\n\nfunc TestUnknownGrokPattern(t *testing.T) {\n\tpatterns := loadPatternDir(t)\n\t_, err := Compile(\"%{USER} [a-z] %{SOME_UNKNOWN_PATTERN}.*\", patterns)\n\tif err == nil || !strings.Contains(err.Error(), \"SOME_UNKNOWN_PATTERN\") {\n\t\tt.Error(\"expected error message saying which pattern is undefined.\")\n\t}\n}\n\nfunc TestInvalidRegexp(t *testing.T) {\n\tpatterns := loadPatternDir(t)\n\t_, err := Compile(\"%{USER} [a-z] \\\\\", patterns) \/\/ wrong because regex cannot end with backslash\n\tif err == nil || !strings.Contains(err.Error(), \"%{USER} [a-z] \\\\\") {\n\t\tt.Error(\"expected error message saying which pattern is invalid.\")\n\t}\n}\n\nfunc TestNamedCaptureGroup(t *testing.T) {\n\tpatterns := loadPatternDir(t)\n\tregex, err := Compile(\"User %{USER:user} has logged in.\", patterns)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tfound := regex.Gsub(\"User fabian has logged in.\", \"\\\\k<user>\")\n\tif found != \"fabian\" {\n\t\tt.Errorf(\"Expected to capture 'fabian', but captured '%v'.\", found)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright (c) 2014-2015, Percona LLC and\/or its affiliates. All rights reserved.\n\n This program is free software: you can redistribute it and\/or modify\n it under the terms of the GNU Affero General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU Affero General Public License for more details.\n\n You should have received a copy of the GNU Affero General Public License\n along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>\n*\/\n\npackage qan\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"runtime\/debug\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/percona\/percona-agent\/data\"\n\t\"github.com\/percona\/percona-agent\/mysql\"\n\t\"github.com\/percona\/percona-agent\/pct\"\n\t\"github.com\/percona\/percona-agent\/ticker\"\n)\n\n\/\/ A Worker gets queries, aggregates them, and returns a Result. Workers are ran\n\/\/ by Analyzers. When ran, MySQL is presumed to be configured and ready.\ntype Worker interface {\n\tSetup(*Interval) error\n\tRun() (*Result, error)\n\tStop() error\n\tCleanup() error\n\tStatus() map[string]string\n}\n\n\/\/ An Analyzer runs a Worker at each Interval. Analyzers are responsible for\n\/\/ MySQL: configuring, restarts, etc. The Worker is only ran when MySQL is\n\/\/ configured and ready. Analyzers are also responsible for making Reports from\n\/\/ the Results returned by Workers. The Worker determines the type of Analyzer:\n\/\/ slowlog or perfschema. Analyzers are ran by the QAN Manager.\ntype Analyzer interface {\n\tStart() error\n\tStop() error\n\tStatus() map[string]string\n\tString() string\n}\n\n\/\/ An AnalyzerFactory makes an Analyzer, real or mock.\ntype AnalyzerFactory interface {\n\tMake(config Config, name string, mysqlConn mysql.Connector, restartChan <-chan bool, tickChan chan time.Time) Analyzer\n}\n\n\/\/ --------------------------------------------------------------------------\n\ntype RealAnalyzer struct {\n\tlogger *pct.Logger\n\tconfig Config\n\titer IntervalIter\n\tmysqlConn mysql.Connector\n\trestartChan <-chan bool\n\tworker Worker\n\tclock ticker.Manager\n\tspool data.Spooler\n\t\/\/ --\n\tname string\n\tmysqlConfiguredChan chan bool\n\tworkerDoneChan chan *Interval\n\tstatus *pct.Status\n\trunSync *pct.SyncChan\n\tconfigureMySQLSync *pct.SyncChan\n\trunning bool\n\tmux *sync.RWMutex\n}\n\nfunc NewRealAnalyzer(logger *pct.Logger, config Config, iter IntervalIter, mysqlConn mysql.Connector, restartChan <-chan bool, worker Worker, clock ticker.Manager, spool data.Spooler) *RealAnalyzer {\n\tname := logger.Service()\n\ta := &RealAnalyzer{\n\t\tlogger: logger,\n\t\tconfig: config,\n\t\titer: iter,\n\t\tmysqlConn: mysqlConn,\n\t\trestartChan: restartChan,\n\t\tworker: worker,\n\t\tclock: clock,\n\t\tspool: spool,\n\t\t\/\/ --\n\t\tname: name,\n\t\tmysqlConfiguredChan: make(chan bool, 1),\n\t\tworkerDoneChan: make(chan *Interval, 1),\n\t\tstatus: pct.NewStatus([]string{name, name + \"-last-interval\", name + \"-next-interval\"}),\n\t\trunSync: pct.NewSyncChan(),\n\t\tconfigureMySQLSync: pct.NewSyncChan(),\n\t\tmux: &sync.RWMutex{},\n\t}\n\treturn a\n}\n\nfunc (a *RealAnalyzer) String() string {\n\treturn a.name\n}\n\nfunc (a *RealAnalyzer) Start() error {\n\ta.logger.Debug(\"Start:call\")\n\tdefer a.logger.Debug(\"Start:return\")\n\ta.mux.Lock()\n\tdefer a.mux.Unlock()\n\tif a.running {\n\t\treturn nil\n\t}\n\tgo a.run()\n\ta.running = true\n\treturn nil\n}\n\nfunc (a *RealAnalyzer) Stop() error {\n\ta.logger.Debug(\"Stop:call\")\n\tdefer a.logger.Debug(\"Stop:return\")\n\ta.mux.Lock()\n\tdefer a.mux.Unlock()\n\tif !a.running {\n\t\treturn nil\n\t}\n\ta.runSync.Stop()\n\ta.runSync.Wait()\n\ta.running = false\n\treturn nil\n}\n\nfunc (a *RealAnalyzer) Status() map[string]string {\n\ta.mux.RLock()\n\tdefer a.mux.RUnlock()\n\tif a.running {\n\t\ta.status.Update(a.name+\"-next-interval\", fmt.Sprintf(\"%.1fs\", a.clock.ETA(a.iter.TickChan())))\n\t} else {\n\t\ta.status.Update(a.name+\"-next-interval\", \"\")\n\t}\n\treturn a.status.Merge(a.worker.Status())\n}\n\n\/\/ --------------------------------------------------------------------------\n\nfunc (a *RealAnalyzer) configureMySQL(config []mysql.Query, tryLimit int) {\n\ta.logger.Debug(\"configureMySQL:call\")\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\ta.logger.Error(a.name+\":configureMySQL crashed: \", err)\n\t\t}\n\t\ta.logger.Debug(\"configureMySQL:return\")\n\t}()\n\n\ttry := 0\n\tfor (tryLimit == 0) || (try <= tryLimit) {\n\t\ttry++\n\n\t\tselect {\n\t\tcase <-a.configureMySQLSync.StopChan:\n\t\t\ta.logger.Debug(\"configureMySQL:stop\")\n\t\t\ta.configureMySQLSync.Done()\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\t\/\/ Connect handles backoff internally.\n\t\ta.logger.Debug(\"configureMySQL:connecting\")\n\t\tif err := a.mysqlConn.Connect(1); err != nil {\n\t\t\ta.logger.Warn(\"Cannot connect to MySQL:\", err)\n\t\t\tcontinue\n\t\t}\n\t\tdefer a.mysqlConn.Close()\n\n\t\ta.logger.Debug(\"configureMySQL:configuring\")\n\t\tif err := a.mysqlConn.Set(config); err != nil {\n\t\t\ta.mysqlConn.Close()\n\t\t\ta.logger.Warn(\"Cannot configure MySQL:\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\ta.logger.Debug(\"configureMySQL:configured\")\n\t\tselect {\n\t\tcase a.mysqlConfiguredChan <- true:\n\t\t\treturn \/\/ success\n\t\tcase <-a.configureMySQLSync.StopChan:\n\t\t\ta.logger.Debug(\"configureMySQL:stop\")\n\t\t\ta.configureMySQLSync.Done()\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (a *RealAnalyzer) run() {\n\ta.logger.Debug(\"run:call\")\n\tdefer a.logger.Debug(\"run:return\")\n\n\tmysqlConfigured := false\n\tgo a.configureMySQL(a.config.Start, 0) \/\/ try forever\n\n\tdefer func() {\n\t\ta.status.Update(a.name, \"Stopping worker\")\n\t\ta.logger.Info(\"Stopping worker\")\n\t\ta.worker.Stop()\n\n\t\ta.status.Update(a.name, \"Stopping interval iter\")\n\t\ta.logger.Info(\"Stopping interval iter\")\n\t\ta.iter.Stop()\n\n\t\tif !mysqlConfigured {\n\t\t\ta.status.Update(a.name, \"Stopping MySQL config\")\n\t\t\ta.logger.Info(\"Stopping MySQL config\")\n\t\t\ta.configureMySQLSync.Stop()\n\t\t\ta.configureMySQLSync.Wait()\n\t\t}\n\n\t\ta.status.Update(a.name, \"Stopping QAN on MySQL\")\n\t\ta.logger.Info(\"Stopping QAN on MySQL\")\n\t\ta.configureMySQL(a.config.Stop, 1) \/\/ try once\n\n\t\tif err := recover(); err != nil {\n\t\t\ta.logger.Error(a.name+\" crashed: \", err)\n\t\t\ta.status.Update(a.name, \"Crashed\")\n\t\t} else {\n\t\t\ta.status.Update(a.name, \"Stopped\")\n\t\t}\n\n\t\ta.runSync.Done()\n\t}()\n\n\tworkerRunning := false\n\tlastTs := time.Time{}\n\tcurrentInterval := &Interval{}\n\tfor {\n\t\ta.logger.Debug(\"run:idle\")\n\t\tif mysqlConfigured {\n\t\t\tif workerRunning {\n\t\t\t\ta.status.Update(a.name, \"Running\")\n\t\t\t} else {\n\t\t\t\ta.status.Update(a.name, \"Idle\")\n\t\t\t}\n\t\t} else {\n\t\t\ta.status.Update(a.name, \"Trying to configure MySQL\")\n\t\t}\n\n\t\tselect {\n\t\tcase interval := <-a.iter.IntervalChan():\n\t\t\tif !mysqlConfigured {\n\t\t\t\ta.logger.Debug(fmt.Sprintf(\"run:interval:%d:skip (mysql not configured)\", interval.Number))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif workerRunning {\n\t\t\t\ta.logger.Warn(fmt.Sprintf(\"Skipping interval '%s' because interval '%s' is still being parsed\",\n\t\t\t\t\tinterval, currentInterval))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ta.status.Update(a.name, fmt.Sprintf(\"Starting interval '%s'\", interval))\n\t\t\ta.logger.Debug(fmt.Sprintf(\"run:interval:%s\", interval))\n\t\t\tcurrentInterval = interval\n\n\t\t\t\/\/ Run the worker, timing it, make a report from its results, spool\n\t\t\t\/\/ the report. When done the interval is returned on workerDoneChan.\n\t\t\tgo a.runWorker(interval)\n\t\t\tworkerRunning = true\n\t\tcase interval := <-a.workerDoneChan:\n\t\t\ta.logger.Debug(\"run:worker:done\")\n\t\t\ta.status.Update(a.name, fmt.Sprintf(\"Cleaning up after interval '%s'\", interval))\n\t\t\tworkerRunning = false\n\n\t\t\tif interval.StartTime.After(lastTs) {\n\t\t\t\tt0 := interval.StartTime.Format(\"2006-01-02 15:04:05\")\n\t\t\t\tif a.config.CollectFrom == \"slowlog\" {\n\t\t\t\t\tt1 := interval.StopTime.Format(\"15:04:05 MST\")\n\t\t\t\t\ta.status.Update(a.name+\"-last-interval\", fmt.Sprintf(\"%s to %s\", t0, t1))\n\t\t\t\t} else {\n\t\t\t\t\ta.status.Update(a.name+\"-last-interval\", fmt.Sprintf(\"%s\", t0))\n\t\t\t\t}\n\t\t\t\tlastTs = interval.StartTime\n\t\t\t}\n\t\tcase mysqlConfigured = <-a.mysqlConfiguredChan:\n\t\t\ta.logger.Debug(\"run:mysql:configured\")\n\t\t\t\/\/ Start the IntervalIter once MySQL has been configured.\n\t\t\t\/\/ This avoids no data or partial data, e.g. slow log verbosity\n\t\t\t\/\/ not set yet.\n\t\t\ta.iter.Start()\n\n\t\t\t\/\/ If the next interval is more than 1 minute in the future,\n\t\t\t\/\/ simulate a clock tick now to start the iter early. For example,\n\t\t\t\/\/ if the interval is 5m and it's currently 01:00, the next interval\n\t\t\t\/\/ starts in 4m and stops in 9m, so data won't be reported for about\n\t\t\t\/\/ 10m. Instead, tick now so start interval=01:00 and end interval\n\t\t\t\/\/ =05:00 and data is reported in about 6m.\n\t\t\ttickChan := a.iter.TickChan()\n\t\t\tt := a.clock.ETA(tickChan)\n\t\t\tif t > 60 {\n\t\t\t\tbegan := ticker.Began(a.config.Interval, uint(time.Now().UTC().Unix()))\n\t\t\t\ta.logger.Info(\"First interval began at\", began)\n\t\t\t\ttickChan <- began\n\t\t\t} else {\n\t\t\t\ta.logger.Info(fmt.Sprintf(\"First interval begins in %.1f seconds\", t))\n\t\t\t}\n\t\tcase <-a.restartChan:\n\t\t\ta.logger.Debug(\"run:mysql:restart\")\n\t\t\t\/\/ If MySQL is not configured, then configureMySQL() should already\n\t\t\t\/\/ be running, trying to configure it. Else, we need to run\n\t\t\t\/\/ configureMySQL again.\n\t\t\tif mysqlConfigured {\n\t\t\t\tmysqlConfigured = false\n\t\t\t\ta.iter.Stop()\n\t\t\t\tgo a.configureMySQL(a.config.Start, 0) \/\/ try forever\n\t\t\t}\n\t\tcase <-a.runSync.StopChan:\n\t\t\ta.logger.Debug(\"run:stop\")\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (a *RealAnalyzer) runWorker(interval *Interval) {\n\ta.logger.Debug(fmt.Sprintf(\"runWorker:call:%d\", interval.Number))\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\terrMsg := fmt.Sprintf(a.name+\"-worker crashed: '%s': %s\", interval, err)\n\t\t\tlog.Println(errMsg)\n\t\t\tdebug.PrintStack()\n\t\t\ta.logger.Error(errMsg)\n\t\t}\n\t\ta.workerDoneChan <- interval\n\t\ta.logger.Debug(fmt.Sprintf(\"runWorker:return:%d\", interval.Number))\n\t}()\n\n\t\/\/ Let worker do whatever it needs before it starts processing\n\t\/\/ the interval. This mostly makes testing easier.\n\tif err := a.worker.Setup(interval); err != nil {\n\t\ta.logger.Warn(err)\n\t\treturn\n\t}\n\n\t\/\/ Let worker do whatever it needs after processing the interval.\n\t\/\/ This mostly maske testing easier.\n\tdefer func() {\n\t\tif err := a.worker.Cleanup(); err != nil {\n\t\t\ta.logger.Warn(err)\n\t\t}\n\t}()\n\n\t\/\/ Run the worker to process the interval.\n\tt0 := time.Now()\n\tresult, err := a.worker.Run()\n\tt1 := time.Now()\n\tif err != nil {\n\t\ta.logger.Error(err)\n\t\treturn\n\t}\n\tif result == nil {\n\t\tif a.config.CollectFrom == \"slowlog\" {\n\t\t\t\/\/ This shouldn't happen. If it does, the slow log worker has a bug\n\t\t\t\/\/ because it should have returned an error above.\n\t\t\ta.logger.Error(\"Nil result\", interval)\n\t\t}\n\t\treturn\n\t}\n\tresult.RunTime = t1.Sub(t0).Seconds()\n\n\t\/\/ Translate the results into a report and spool.\n\t\/\/ NOTE: \"qan\" here is correct; do not use a.name.\n\treport := MakeReport(a.config, interval, result)\n\tif err := a.spool.Write(\"qan\", report); err != nil {\n\t\ta.logger.Warn(\"Lost report:\", err)\n\t}\n}\n<commit_msg>Fix tryLimit in qan\/analyzer.go.<commit_after>\/*\n Copyright (c) 2014-2015, Percona LLC and\/or its affiliates. All rights reserved.\n\n This program is free software: you can redistribute it and\/or modify\n it under the terms of the GNU Affero General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU Affero General Public License for more details.\n\n You should have received a copy of the GNU Affero General Public License\n along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>\n*\/\n\npackage qan\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"runtime\/debug\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/percona\/percona-agent\/data\"\n\t\"github.com\/percona\/percona-agent\/mysql\"\n\t\"github.com\/percona\/percona-agent\/pct\"\n\t\"github.com\/percona\/percona-agent\/ticker\"\n)\n\n\/\/ A Worker gets queries, aggregates them, and returns a Result. Workers are ran\n\/\/ by Analyzers. When ran, MySQL is presumed to be configured and ready.\ntype Worker interface {\n\tSetup(*Interval) error\n\tRun() (*Result, error)\n\tStop() error\n\tCleanup() error\n\tStatus() map[string]string\n}\n\n\/\/ An Analyzer runs a Worker at each Interval. Analyzers are responsible for\n\/\/ MySQL: configuring, restarts, etc. The Worker is only ran when MySQL is\n\/\/ configured and ready. Analyzers are also responsible for making Reports from\n\/\/ the Results returned by Workers. The Worker determines the type of Analyzer:\n\/\/ slowlog or perfschema. Analyzers are ran by the QAN Manager.\ntype Analyzer interface {\n\tStart() error\n\tStop() error\n\tStatus() map[string]string\n\tString() string\n}\n\n\/\/ An AnalyzerFactory makes an Analyzer, real or mock.\ntype AnalyzerFactory interface {\n\tMake(config Config, name string, mysqlConn mysql.Connector, restartChan <-chan bool, tickChan chan time.Time) Analyzer\n}\n\n\/\/ --------------------------------------------------------------------------\n\ntype RealAnalyzer struct {\n\tlogger *pct.Logger\n\tconfig Config\n\titer IntervalIter\n\tmysqlConn mysql.Connector\n\trestartChan <-chan bool\n\tworker Worker\n\tclock ticker.Manager\n\tspool data.Spooler\n\t\/\/ --\n\tname string\n\tmysqlConfiguredChan chan bool\n\tworkerDoneChan chan *Interval\n\tstatus *pct.Status\n\trunSync *pct.SyncChan\n\tconfigureMySQLSync *pct.SyncChan\n\trunning bool\n\tmux *sync.RWMutex\n}\n\nfunc NewRealAnalyzer(logger *pct.Logger, config Config, iter IntervalIter, mysqlConn mysql.Connector, restartChan <-chan bool, worker Worker, clock ticker.Manager, spool data.Spooler) *RealAnalyzer {\n\tname := logger.Service()\n\ta := &RealAnalyzer{\n\t\tlogger: logger,\n\t\tconfig: config,\n\t\titer: iter,\n\t\tmysqlConn: mysqlConn,\n\t\trestartChan: restartChan,\n\t\tworker: worker,\n\t\tclock: clock,\n\t\tspool: spool,\n\t\t\/\/ --\n\t\tname: name,\n\t\tmysqlConfiguredChan: make(chan bool, 1),\n\t\tworkerDoneChan: make(chan *Interval, 1),\n\t\tstatus: pct.NewStatus([]string{name, name + \"-last-interval\", name + \"-next-interval\"}),\n\t\trunSync: pct.NewSyncChan(),\n\t\tconfigureMySQLSync: pct.NewSyncChan(),\n\t\tmux: &sync.RWMutex{},\n\t}\n\treturn a\n}\n\nfunc (a *RealAnalyzer) String() string {\n\treturn a.name\n}\n\nfunc (a *RealAnalyzer) Start() error {\n\ta.logger.Debug(\"Start:call\")\n\tdefer a.logger.Debug(\"Start:return\")\n\ta.mux.Lock()\n\tdefer a.mux.Unlock()\n\tif a.running {\n\t\treturn nil\n\t}\n\tgo a.run()\n\ta.running = true\n\treturn nil\n}\n\nfunc (a *RealAnalyzer) Stop() error {\n\ta.logger.Debug(\"Stop:call\")\n\tdefer a.logger.Debug(\"Stop:return\")\n\ta.mux.Lock()\n\tdefer a.mux.Unlock()\n\tif !a.running {\n\t\treturn nil\n\t}\n\ta.runSync.Stop()\n\ta.runSync.Wait()\n\ta.running = false\n\treturn nil\n}\n\nfunc (a *RealAnalyzer) Status() map[string]string {\n\ta.mux.RLock()\n\tdefer a.mux.RUnlock()\n\tif a.running {\n\t\ta.status.Update(a.name+\"-next-interval\", fmt.Sprintf(\"%.1fs\", a.clock.ETA(a.iter.TickChan())))\n\t} else {\n\t\ta.status.Update(a.name+\"-next-interval\", \"\")\n\t}\n\treturn a.status.Merge(a.worker.Status())\n}\n\n\/\/ --------------------------------------------------------------------------\n\nfunc (a *RealAnalyzer) configureMySQL(config []mysql.Query, tryLimit int) {\n\ta.logger.Debug(\"configureMySQL:call\")\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\ta.logger.Error(a.name+\":configureMySQL crashed: \", err)\n\t\t}\n\t\ta.logger.Debug(\"configureMySQL:return\")\n\t}()\n\n\ttry := 0\n\tfor (tryLimit == 0) || (try < tryLimit) {\n\t\ttry++\n\n\t\tselect {\n\t\tcase <-a.configureMySQLSync.StopChan:\n\t\t\ta.logger.Debug(\"configureMySQL:stop\")\n\t\t\ta.configureMySQLSync.Done()\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\t\/\/ Connect handles backoff internally.\n\t\ta.logger.Debug(\"configureMySQL:connecting\")\n\t\tif err := a.mysqlConn.Connect(1); err != nil {\n\t\t\ta.logger.Warn(\"Cannot connect to MySQL:\", err)\n\t\t\tcontinue\n\t\t}\n\t\tdefer a.mysqlConn.Close()\n\n\t\ta.logger.Debug(\"configureMySQL:configuring\")\n\t\tif err := a.mysqlConn.Set(config); err != nil {\n\t\t\ta.mysqlConn.Close()\n\t\t\ta.logger.Warn(\"Cannot configure MySQL:\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\ta.logger.Debug(\"configureMySQL:configured\")\n\t\tselect {\n\t\tcase a.mysqlConfiguredChan <- true:\n\t\t\treturn \/\/ success\n\t\tcase <-a.configureMySQLSync.StopChan:\n\t\t\ta.logger.Debug(\"configureMySQL:stop\")\n\t\t\ta.configureMySQLSync.Done()\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (a *RealAnalyzer) run() {\n\ta.logger.Debug(\"run:call\")\n\tdefer a.logger.Debug(\"run:return\")\n\n\tmysqlConfigured := false\n\tgo a.configureMySQL(a.config.Start, 0) \/\/ try forever\n\n\tdefer func() {\n\t\ta.status.Update(a.name, \"Stopping worker\")\n\t\ta.logger.Info(\"Stopping worker\")\n\t\ta.worker.Stop()\n\n\t\ta.status.Update(a.name, \"Stopping interval iter\")\n\t\ta.logger.Info(\"Stopping interval iter\")\n\t\ta.iter.Stop()\n\n\t\tif !mysqlConfigured {\n\t\t\ta.status.Update(a.name, \"Stopping MySQL config\")\n\t\t\ta.logger.Info(\"Stopping MySQL config\")\n\t\t\ta.configureMySQLSync.Stop()\n\t\t\ta.configureMySQLSync.Wait()\n\t\t}\n\n\t\ta.status.Update(a.name, \"Stopping QAN on MySQL\")\n\t\ta.logger.Info(\"Stopping QAN on MySQL\")\n\t\ta.configureMySQL(a.config.Stop, 1) \/\/ try once\n\n\t\tif err := recover(); err != nil {\n\t\t\ta.logger.Error(a.name+\" crashed: \", err)\n\t\t\ta.status.Update(a.name, \"Crashed\")\n\t\t} else {\n\t\t\ta.status.Update(a.name, \"Stopped\")\n\t\t}\n\n\t\ta.runSync.Done()\n\t}()\n\n\tworkerRunning := false\n\tlastTs := time.Time{}\n\tcurrentInterval := &Interval{}\n\tfor {\n\t\ta.logger.Debug(\"run:idle\")\n\t\tif mysqlConfigured {\n\t\t\tif workerRunning {\n\t\t\t\ta.status.Update(a.name, \"Running\")\n\t\t\t} else {\n\t\t\t\ta.status.Update(a.name, \"Idle\")\n\t\t\t}\n\t\t} else {\n\t\t\ta.status.Update(a.name, \"Trying to configure MySQL\")\n\t\t}\n\n\t\tselect {\n\t\tcase interval := <-a.iter.IntervalChan():\n\t\t\tif !mysqlConfigured {\n\t\t\t\ta.logger.Debug(fmt.Sprintf(\"run:interval:%d:skip (mysql not configured)\", interval.Number))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif workerRunning {\n\t\t\t\ta.logger.Warn(fmt.Sprintf(\"Skipping interval '%s' because interval '%s' is still being parsed\",\n\t\t\t\t\tinterval, currentInterval))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ta.status.Update(a.name, fmt.Sprintf(\"Starting interval '%s'\", interval))\n\t\t\ta.logger.Debug(fmt.Sprintf(\"run:interval:%s\", interval))\n\t\t\tcurrentInterval = interval\n\n\t\t\t\/\/ Run the worker, timing it, make a report from its results, spool\n\t\t\t\/\/ the report. When done the interval is returned on workerDoneChan.\n\t\t\tgo a.runWorker(interval)\n\t\t\tworkerRunning = true\n\t\tcase interval := <-a.workerDoneChan:\n\t\t\ta.logger.Debug(\"run:worker:done\")\n\t\t\ta.status.Update(a.name, fmt.Sprintf(\"Cleaning up after interval '%s'\", interval))\n\t\t\tworkerRunning = false\n\n\t\t\tif interval.StartTime.After(lastTs) {\n\t\t\t\tt0 := interval.StartTime.Format(\"2006-01-02 15:04:05\")\n\t\t\t\tif a.config.CollectFrom == \"slowlog\" {\n\t\t\t\t\tt1 := interval.StopTime.Format(\"15:04:05 MST\")\n\t\t\t\t\ta.status.Update(a.name+\"-last-interval\", fmt.Sprintf(\"%s to %s\", t0, t1))\n\t\t\t\t} else {\n\t\t\t\t\ta.status.Update(a.name+\"-last-interval\", fmt.Sprintf(\"%s\", t0))\n\t\t\t\t}\n\t\t\t\tlastTs = interval.StartTime\n\t\t\t}\n\t\tcase mysqlConfigured = <-a.mysqlConfiguredChan:\n\t\t\ta.logger.Debug(\"run:mysql:configured\")\n\t\t\t\/\/ Start the IntervalIter once MySQL has been configured.\n\t\t\t\/\/ This avoids no data or partial data, e.g. slow log verbosity\n\t\t\t\/\/ not set yet.\n\t\t\ta.iter.Start()\n\n\t\t\t\/\/ If the next interval is more than 1 minute in the future,\n\t\t\t\/\/ simulate a clock tick now to start the iter early. For example,\n\t\t\t\/\/ if the interval is 5m and it's currently 01:00, the next interval\n\t\t\t\/\/ starts in 4m and stops in 9m, so data won't be reported for about\n\t\t\t\/\/ 10m. Instead, tick now so start interval=01:00 and end interval\n\t\t\t\/\/ =05:00 and data is reported in about 6m.\n\t\t\ttickChan := a.iter.TickChan()\n\t\t\tt := a.clock.ETA(tickChan)\n\t\t\tif t > 60 {\n\t\t\t\tbegan := ticker.Began(a.config.Interval, uint(time.Now().UTC().Unix()))\n\t\t\t\ta.logger.Info(\"First interval began at\", began)\n\t\t\t\ttickChan <- began\n\t\t\t} else {\n\t\t\t\ta.logger.Info(fmt.Sprintf(\"First interval begins in %.1f seconds\", t))\n\t\t\t}\n\t\tcase <-a.restartChan:\n\t\t\ta.logger.Debug(\"run:mysql:restart\")\n\t\t\t\/\/ If MySQL is not configured, then configureMySQL() should already\n\t\t\t\/\/ be running, trying to configure it. Else, we need to run\n\t\t\t\/\/ configureMySQL again.\n\t\t\tif mysqlConfigured {\n\t\t\t\tmysqlConfigured = false\n\t\t\t\ta.iter.Stop()\n\t\t\t\tgo a.configureMySQL(a.config.Start, 0) \/\/ try forever\n\t\t\t}\n\t\tcase <-a.runSync.StopChan:\n\t\t\ta.logger.Debug(\"run:stop\")\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (a *RealAnalyzer) runWorker(interval *Interval) {\n\ta.logger.Debug(fmt.Sprintf(\"runWorker:call:%d\", interval.Number))\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\terrMsg := fmt.Sprintf(a.name+\"-worker crashed: '%s': %s\", interval, err)\n\t\t\tlog.Println(errMsg)\n\t\t\tdebug.PrintStack()\n\t\t\ta.logger.Error(errMsg)\n\t\t}\n\t\ta.workerDoneChan <- interval\n\t\ta.logger.Debug(fmt.Sprintf(\"runWorker:return:%d\", interval.Number))\n\t}()\n\n\t\/\/ Let worker do whatever it needs before it starts processing\n\t\/\/ the interval. This mostly makes testing easier.\n\tif err := a.worker.Setup(interval); err != nil {\n\t\ta.logger.Warn(err)\n\t\treturn\n\t}\n\n\t\/\/ Let worker do whatever it needs after processing the interval.\n\t\/\/ This mostly maske testing easier.\n\tdefer func() {\n\t\tif err := a.worker.Cleanup(); err != nil {\n\t\t\ta.logger.Warn(err)\n\t\t}\n\t}()\n\n\t\/\/ Run the worker to process the interval.\n\tt0 := time.Now()\n\tresult, err := a.worker.Run()\n\tt1 := time.Now()\n\tif err != nil {\n\t\ta.logger.Error(err)\n\t\treturn\n\t}\n\tif result == nil {\n\t\tif a.config.CollectFrom == \"slowlog\" {\n\t\t\t\/\/ This shouldn't happen. If it does, the slow log worker has a bug\n\t\t\t\/\/ because it should have returned an error above.\n\t\t\ta.logger.Error(\"Nil result\", interval)\n\t\t}\n\t\treturn\n\t}\n\tresult.RunTime = t1.Sub(t0).Seconds()\n\n\t\/\/ Translate the results into a report and spool.\n\t\/\/ NOTE: \"qan\" here is correct; do not use a.name.\n\treport := MakeReport(a.config, interval, result)\n\tif err := a.spool.Write(\"qan\", report); err != nil {\n\t\ta.logger.Warn(\"Lost report:\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package ksonnet\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/ksonnet\/ksonnet-lib\/ksonnet-gen\/kubespec\"\n)\n\nconst constructorName = \"new\"\n\nvar specialProperties = map[kubespec.PropertyName]kubespec.PropertyName{\n\t\"apiVersion\": \"apiVersion\",\n\t\"metadata\": \"metadata\",\n\t\"kind\": \"kind\",\n}\n\nfunc isSpecialProperty(pn kubespec.PropertyName) bool {\n\t_, ok := specialProperties[pn]\n\treturn ok\n}\n\nfunc getSHARevision(dir string) string {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Fatalf(\"Could get working directory:\\n%v\", err)\n\t}\n\n\terr = os.Chdir(dir)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could cd to directory of repository at '%s':\\n%v\", dir, err)\n\t}\n\n\tsha, err := exec.Command(\"sh\", \"-c\", \"git rev-parse HEAD\").Output()\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not find SHA of HEAD:\\n%v\", err)\n\t}\n\n\terr = os.Chdir(cwd)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could cd back to current directory '%s':\\n%v\", cwd, err)\n\t}\n\n\treturn strings.TrimSpace(string(sha))\n}\n<commit_msg>Remove metadata from special properties list<commit_after>package ksonnet\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/ksonnet\/ksonnet-lib\/ksonnet-gen\/kubespec\"\n)\n\nconst constructorName = \"new\"\n\nvar specialProperties = map[kubespec.PropertyName]kubespec.PropertyName{\n\t\"apiVersion\": \"apiVersion\",\n\t\"kind\": \"kind\",\n}\n\nfunc isSpecialProperty(pn kubespec.PropertyName) bool {\n\t_, ok := specialProperties[pn]\n\treturn ok\n}\n\nfunc getSHARevision(dir string) string {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Fatalf(\"Could get working directory:\\n%v\", err)\n\t}\n\n\terr = os.Chdir(dir)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could cd to directory of repository at '%s':\\n%v\", dir, err)\n\t}\n\n\tsha, err := exec.Command(\"sh\", \"-c\", \"git rev-parse HEAD\").Output()\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not find SHA of HEAD:\\n%v\", err)\n\t}\n\n\terr = os.Chdir(cwd)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could cd back to current directory '%s':\\n%v\", cwd, err)\n\t}\n\n\treturn strings.TrimSpace(string(sha))\n}\n<|endoftext|>"} {"text":"<commit_before>package si7021\n\nimport (\n\t\"github.com\/explicite\/i2c\/driver\"\n)\n\nconst (\n\t\/\/ Measure Relative Humidity, Hold Master Mode.\n\tRhHm = 0xE5\n\n\t\/\/ Measure Relative Humidity, No Hold Master Mode.\n\tRh = 0xF5\n\n\t\/\/ Measure Temperature, Hold Master Mode.\n\tTmpHm = 0xE3\n\n\t\/\/ Measure Temperature, No Hold Master Mode.\n\tTmp = 0xF3\n\n\t\/\/ Read Temperature Value from Previous RH Measurement.\n\tReadTmpPrev = 0xE0\n\n\t\/\/ Reset.\n\tReset = 0xFE\n\n\t\/\/ Write RH\/T User Register 1\n\tWriteRhtUr1 = 0xE6\n\n\t\/\/ Read RH\/T User Register 1\n\tReadRhtUr1 = 0xE7\n\n\t\/\/ Write Heater Control Register\n\tWriteHcr = 0x51\n\n\t\/\/ Read Heater Control Register\n\tReadHcr = 0x11\n\n\t\/\/ Read Electronic ID 1st Byte\n\tReadEid1p1 = 0xFA\n\tReadEid1p2 = 0x0F\n\n\t\/\/ Read Electronic ID 2nd Byte\n\tReadEid2p1 = 0xFC\n\tReadEid2p2 = 0xC9\n\n\t\/\/ Read Firmware Revision\n\tReadFr1 = 0x84\n\tReadFr2 = 0xB8\n)\n\ntype SI7021 struct{ driver.Driver }\n\nfunc (s *SI7021) Init(addr byte, bus byte) error {\n\treturn s.Load(addr, bus)\n}\n\nfunc (s *SI7021) RelativeHumidity(hm bool) (float64, error) {\n\t\/\/TODO\n\tif hm == true {\n\t\ts.Write(RhHm, 0x01)\n\t} else {\n\t\ts.Write(Rh, 0x01)\n\t}\n\treturn float64(1), nil\n}\n\nfunc (s *SI7021) Temperature(hm bool) (float64, error) {\n\t\/\/TODO\n\tif hm == true {\n\t\ts.Write(TmpHm, 0x01)\n\t} else {\n\t\ts.Write(Tmp, 0x01)\n\t}\n\treturn float64(1), nil\n}\n\nfunc (s *SI7021) ESN() (string, error) {\n\t\/\/TODO\n\treturn \"todo\", nil\n}\n\nfunc (s *SI7021) Rev() (string, error) {\n\t\/\/TODO\n\treturn \"todo\", nil\n}\n\nfunc (s *SI7021) Active() error {\n\treturn s.On()\n}\n\nfunc (s *SI7021) Deactive() error {\n\treturn s.Off()\n}\n<commit_msg>SI7021: comments for exported consts<commit_after>package si7021\n\nimport (\n\t\"github.com\/explicite\/i2c\/driver\"\n)\n\nconst (\n\t\/\/ RhHm code Measure Relative Humidity, Hold Master Mode.\n\tRhHm = 0xE5\n\n\t\/\/ Rh code Measure Relative Humidity, No Hold Master Mode.\n\tRh = 0xF5\n\n\t\/\/ TmpHm code Measure Temperature, Hold Master Mode.\n\tTmpHm = 0xE3\n\n\t\/\/ Tmp code Measure Temperature, No Hold Master Mode.\n\tTmp = 0xF3\n\n\t\/\/ ReadTmpPrev code Read Temperature Value from Previous RH Measurement.\n\tReadTmpPrev = 0xE0\n\n\t\/\/ Reset code.\n\tReset = 0xFE\n\n\t\/\/ WriteRhtUr1 code Write RH\/T User Register 1\n\tWriteRhtUr1 = 0xE6\n\n\t\/\/ ReadRhtUr1 code Read RH\/T User Register 1\n\tReadRhtUr1 = 0xE7\n\n\t\/\/ WriteHcr code Write Heater Control Register\n\tWriteHcr = 0x51\n\n\t\/\/ ReadHcr code Read Heater Control Register\n\tReadHcr = 0x11\n\n\t\/\/ ReadEid1p1 code Read Electronic ID 1st Byte part 1\n\tReadEid1p1 = 0xFA\n\t\/\/ ReadEid1p2 code Read Electronic ID 1st Byte part 2\n\tReadEid1p2 = 0x0F\n\n\t\/\/ ReadEid2p1 code Read Electronic ID 2nd Byte part 1\n\tReadEid2p1 = 0xFC\n\t\/\/ ReadEid2p2 code Read Electronic ID 2nd Byte part 2\n\tReadEid2p2 = 0xC9\n\n\t\/\/ ReadFr1 code Read Firmware Revision part 1\n\tReadFr1 = 0x84\n\t\/\/ ReadFr2 code Read Firmware Revision part 2\n\tReadFr2 = 0xB8\n)\n\ntype SI7021 struct{ driver.Driver }\n\nfunc (s *SI7021) Init(addr byte, bus byte) error {\n\treturn s.Load(addr, bus)\n}\n\nfunc (s *SI7021) RelativeHumidity(hm bool) (float64, error) {\n\t\/\/TODO\n\tif hm == true {\n\t\ts.Write(RhHm, 0x01)\n\t} else {\n\t\ts.Write(Rh, 0x01)\n\t}\n\treturn float64(1), nil\n}\n\nfunc (s *SI7021) Temperature(hm bool) (float64, error) {\n\t\/\/TODO\n\tif hm == true {\n\t\ts.Write(TmpHm, 0x01)\n\t} else {\n\t\ts.Write(Tmp, 0x01)\n\t}\n\treturn float64(1), nil\n}\n\nfunc (s *SI7021) ESN() (string, error) {\n\t\/\/TODO\n\treturn \"todo\", nil\n}\n\nfunc (s *SI7021) Rev() (string, error) {\n\t\/\/TODO\n\treturn \"todo\", nil\n}\n\nfunc (s *SI7021) Active() error {\n\treturn s.On()\n}\n\nfunc (s *SI7021) Deactive() error {\n\treturn s.Off()\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t_ \"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/CenturyLinkLabs\/dray\/job\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/mock\"\n)\n\nvar (\n\tj *job.Job\n\tjm *mockJobManager\n\tsvr *httptest.Server\n\tclient *http.Client\n\tserverErr error\n\tnotFoundErr error\n)\n\nfunc init() {\n\tlog.SetLevel(log.PanicLevel)\n}\n\ntype mockJobManager struct {\n\tmock.Mock\n}\n\nfunc (m *mockJobManager) ListAll() ([]job.Job, error) {\n\tvar jobs []job.Job\n\targs := m.Mock.Called()\n\n\tif jobsArg := args.Get(0); jobsArg != nil {\n\t\tjobs = jobsArg.([]job.Job)\n\t}\n\treturn jobs, args.Error(1)\n}\n\nfunc (m *mockJobManager) GetByID(jobID string) (*job.Job, error) {\n\tvar j *job.Job\n\targs := m.Mock.Called(jobID)\n\n\tif jobArg := args.Get(0); jobArg != nil {\n\t\tj = jobArg.(*job.Job)\n\t}\n\n\treturn j, args.Error(1)\n}\n\nfunc (m *mockJobManager) Create(j *job.Job) error {\n\targs := m.Mock.Called(j)\n\treturn args.Error(0)\n}\n\nfunc (m *mockJobManager) Execute(j *job.Job) error {\n\targs := m.Mock.Called(j)\n\treturn args.Error(0)\n}\n\nfunc (m *mockJobManager) GetLog(j *job.Job, index int) (*job.JobLog, error) {\n\tvar jl *job.JobLog\n\targs := m.Mock.Called(j, index)\n\n\tif logArg := args.Get(0); logArg != nil {\n\t\tjl = logArg.(*job.JobLog)\n\t}\n\n\treturn jl, args.Error(1)\n}\n\nfunc (m *mockJobManager) Delete(job *job.Job) error {\n\targs := m.Mock.Called(job)\n\treturn args.Error(0)\n}\n\nfunc setUp() {\n\tj = &job.Job{ID: \"123\"}\n\tjm = &mockJobManager{}\n\tjobServer := NewServer(jm)\n\tsvr = httptest.NewServer(jobServer.createRouter())\n\tclient = &http.Client{}\n\tserverErr = errors.New(\"oops\")\n\tnotFoundErr = job.NotFoundError(j.ID)\n}\n\nfunc TestListJobsSuccess(t *testing.T) {\n\tsetUp()\n\n\tjobs := []job.Job{*j}\n\tjm.On(\"ListAll\").Return(jobs, nil)\n\n\tres, _ := http.Get(url(\"jobs\"))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tassert.Equal(t, http.StatusOK, res.StatusCode)\n\tassert.Equal(t, \"application\/json\", res.Header[\"Content-Type\"][0])\n\tassert.Equal(t, \"[{\\\"id\\\":\\\"123\\\"}]\\n\", string(body))\n\tjm.Mock.AssertExpectations(t)\n}\n\nfunc TestListJobsError(t *testing.T) {\n\tsetUp()\n\n\tjm.On(\"ListAll\").Return(nil, serverErr)\n\n\tres, _ := http.Get(url(\"jobs\"))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tassert.Equal(t, http.StatusInternalServerError, res.StatusCode)\n\tassert.Equal(t, \"text\/plain; charset=utf-8\", res.Header[\"Content-Type\"][0])\n\tassert.Equal(t, \"\", string(body))\n\tjm.Mock.AssertExpectations(t)\n}\n\nfunc TestCreateJobSuccess(t *testing.T) {\n\tsetUp()\n\tpayload := \"{\\\"name\\\":\\\"foo\\\"}\\n\"\n\n\tjm.On(\"Create\", mock.AnythingOfType(\"*job.Job\")).Return(nil)\n\tjm.On(\"Execute\", mock.AnythingOfType(\"*job.Job\")).Return(nil)\n\n\tres, _ := http.Post(url(\"jobs\"), \"application\/json\", bytes.NewBufferString(payload))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\ttime.Sleep(time.Millisecond)\n\n\tassert.Equal(t, http.StatusOK, res.StatusCode)\n\tassert.Equal(t, payload, string(body))\n\tjm.Mock.AssertExpectations(t)\n}\n\nfunc TestCreateJobJSONError(t *testing.T) {\n\tsetUp()\n\n\tres, _ := http.Post(url(\"jobs\"), \"application\/json\", bytes.NewBufferString(\"\"))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tassert.Equal(t, http.StatusInternalServerError, res.StatusCode)\n\tassert.Equal(t, \"\", string(body))\n\tjm.Mock.AssertExpectations(t)\n}\n\nfunc TestCreateJobError(t *testing.T) {\n\tsetUp()\n\n\tjm.On(\"Create\", mock.AnythingOfType(\"*job.Job\")).Return(serverErr)\n\n\tres, _ := http.Post(url(\"jobs\"), \"application\/json\", bytes.NewBufferString(\"{}\"))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tassert.Equal(t, http.StatusInternalServerError, res.StatusCode)\n\tassert.Equal(t, \"\", string(body))\n\tjm.Mock.AssertExpectations(t)\n}\n\nfunc TestGetJobSuccess(t *testing.T) {\n\tsetUp()\n\n\tjm.On(\"GetByID\", j.ID).Return(j, nil)\n\n\tres, _ := http.Get(url(\"jobs\/\" + j.ID))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tassert.Equal(t, http.StatusOK, res.StatusCode)\n\tassert.Equal(t, \"{\\\"id\\\":\\\"123\\\"}\\n\", string(body))\n\tjm.Mock.AssertExpectations(t)\n}\n\nfunc TestGetJobNotFound(t *testing.T) {\n\tsetUp()\n\n\tjm.On(\"GetByID\", j.ID).Return(nil, notFoundErr)\n\n\tres, _ := http.Get(url(\"jobs\/\" + j.ID))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tassert.Equal(t, http.StatusNotFound, res.StatusCode)\n\tassert.Equal(t, \"\", string(body))\n\tjm.Mock.AssertExpectations(t)\n}\n\nfunc TestGetJobServerError(t *testing.T) {\n\tsetUp()\n\n\tjm.On(\"GetByID\", j.ID).Return(nil, serverErr)\n\n\tres, _ := http.Get(url(\"jobs\/\" + j.ID))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tassert.Equal(t, http.StatusInternalServerError, res.StatusCode)\n\tassert.Equal(t, \"\", string(body))\n\tjm.Mock.AssertExpectations(t)\n}\n\nfunc TestGetJobLogSuccess(t *testing.T) {\n\tsetUp()\n\tindex := 99\n\tjobLog := &job.JobLog{Lines: []string{\"foo\", \"bar\"}}\n\n\tjm.On(\"GetByID\", j.ID).Return(j, nil)\n\tjm.On(\"GetLog\", j, index).Return(jobLog, nil)\n\n\tres, _ := http.Get(url(\"jobs\/\" + j.ID + \"\/log\" + \"?index=\" + strconv.Itoa(index)))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tassert.Equal(t, http.StatusOK, res.StatusCode)\n\tassert.Equal(t, \"{\\\"lines\\\":[\\\"foo\\\",\\\"bar\\\"]}\\n\", string(body))\n\tjm.Mock.AssertExpectations(t)\n}\n\nfunc TestGetJobLogNotFound(t *testing.T) {\n\tsetUp()\n\n\tjm.On(\"GetByID\", j.ID).Return(nil, notFoundErr)\n\n\tres, _ := http.Get(url(\"jobs\/\" + j.ID + \"\/log\"))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tassert.Equal(t, http.StatusNotFound, res.StatusCode)\n\tassert.Equal(t, \"\", string(body))\n\tjm.Mock.AssertExpectations(t)\n}\n\nfunc TestGetJobLogError(t *testing.T) {\n\tsetUp()\n\tindex := 99\n\n\tjm.On(\"GetByID\", j.ID).Return(j, nil)\n\tjm.On(\"GetLog\", j, index).Return(nil, serverErr)\n\n\tres, _ := http.Get(url(\"jobs\/\" + j.ID + \"\/log\" + \"?index=\" + strconv.Itoa(index)))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tassert.Equal(t, http.StatusInternalServerError, res.StatusCode)\n\tassert.Equal(t, \"\", string(body))\n\tjm.Mock.AssertExpectations(t)\n}\n\nfunc TestDeleteJobSuccess(t *testing.T) {\n\tsetUp()\n\n\tjm.On(\"GetByID\", j.ID).Return(j, nil)\n\tjm.On(\"Delete\", j).Return(nil)\n\n\treq, _ := http.NewRequest(\"DELETE\", url(\"jobs\/\"+j.ID), nil)\n\tres, _ := client.Do(req)\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tassert.Equal(t, http.StatusNoContent, res.StatusCode)\n\tassert.Equal(t, \"\", string(body))\n\tjm.Mock.AssertExpectations(t)\n}\n\nfunc TestDeleteJobNotFound(t *testing.T) {\n\tsetUp()\n\n\tjm.On(\"GetByID\", j.ID).Return(nil, notFoundErr)\n\n\treq, _ := http.NewRequest(\"DELETE\", url(\"jobs\/\"+j.ID), nil)\n\tres, _ := client.Do(req)\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tassert.Equal(t, http.StatusNotFound, res.StatusCode)\n\tassert.Equal(t, \"\", string(body))\n\tjm.Mock.AssertExpectations(t)\n}\n\nfunc TestDeleteJobError(t *testing.T) {\n\tsetUp()\n\n\tjm.On(\"GetByID\", j.ID).Return(j, nil)\n\tjm.On(\"Delete\", j).Return(serverErr)\n\n\treq, _ := http.NewRequest(\"DELETE\", url(\"jobs\/\"+j.ID), nil)\n\tres, _ := client.Do(req)\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tassert.Equal(t, http.StatusInternalServerError, res.StatusCode)\n\tassert.Equal(t, \"\", string(body))\n\tjm.Mock.AssertExpectations(t)\n}\n\nfunc url(path string) string {\n\treturn fmt.Sprintf(\"%s\/%s\", svr.URL, path)\n}\n<commit_msg>Adopt testify\/suite for API tests<commit_after>package api\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/CenturyLinkLabs\/dray\/job\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/stretchr\/testify\/mock\"\n\t\"github.com\/stretchr\/testify\/suite\"\n)\n\nfunc init() {\n\tlog.SetLevel(log.PanicLevel)\n}\n\ntype mockJobManager struct {\n\tmock.Mock\n}\n\nfunc (m *mockJobManager) ListAll() ([]job.Job, error) {\n\tvar jobs []job.Job\n\targs := m.Mock.Called()\n\n\tif jobsArg := args.Get(0); jobsArg != nil {\n\t\tjobs = jobsArg.([]job.Job)\n\t}\n\treturn jobs, args.Error(1)\n}\n\nfunc (m *mockJobManager) GetByID(jobID string) (*job.Job, error) {\n\tvar j *job.Job\n\targs := m.Mock.Called(jobID)\n\n\tif jobArg := args.Get(0); jobArg != nil {\n\t\tj = jobArg.(*job.Job)\n\t}\n\n\treturn j, args.Error(1)\n}\n\nfunc (m *mockJobManager) Create(j *job.Job) error {\n\targs := m.Mock.Called(j)\n\treturn args.Error(0)\n}\n\nfunc (m *mockJobManager) Execute(j *job.Job) error {\n\targs := m.Mock.Called(j)\n\treturn args.Error(0)\n}\n\nfunc (m *mockJobManager) GetLog(j *job.Job, index int) (*job.JobLog, error) {\n\tvar jl *job.JobLog\n\targs := m.Mock.Called(j, index)\n\n\tif logArg := args.Get(0); logArg != nil {\n\t\tjl = logArg.(*job.JobLog)\n\t}\n\n\treturn jl, args.Error(1)\n}\n\nfunc (m *mockJobManager) Delete(job *job.Job) error {\n\targs := m.Mock.Called(job)\n\treturn args.Error(0)\n}\n\ntype APITestSuite struct {\n\tsuite.Suite\n\n\tj *job.Job\n\tjm *mockJobManager\n\tsvr *httptest.Server\n\tclient *http.Client\n\tserverErr error\n\tnotFoundErr error\n}\n\nfunc (suite *APITestSuite) SetupTest() {\n\tsuite.j = &job.Job{ID: \"123\"}\n\tsuite.jm = &mockJobManager{}\n\n\tsuite.svr = httptest.NewServer(NewServer(suite.jm).createRouter())\n\tsuite.client = &http.Client{}\n\n\tsuite.serverErr = errors.New(\"oops\")\n\tsuite.notFoundErr = job.NotFoundError(suite.j.ID)\n}\n\nfunc (suite *APITestSuite) TestListJobsSuccess() {\n\tjobs := []job.Job{*suite.j}\n\tsuite.jm.On(\"ListAll\").Return(jobs, nil)\n\n\tres, _ := http.Get(suite.url(\"jobs\"))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tsuite.Equal(http.StatusOK, res.StatusCode)\n\tsuite.Equal(\"application\/json\", res.Header[\"Content-Type\"][0])\n\tsuite.Equal(\"[{\\\"id\\\":\\\"123\\\"}]\\n\", string(body))\n\tsuite.jm.Mock.AssertExpectations(suite.T())\n}\n\nfunc (suite *APITestSuite) TestListJobsError() {\n\tsuite.jm.On(\"ListAll\").Return(nil, suite.serverErr)\n\n\tres, _ := http.Get(suite.url(\"jobs\"))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tsuite.Equal(http.StatusInternalServerError, res.StatusCode)\n\tsuite.Equal(\"text\/plain; charset=utf-8\", res.Header[\"Content-Type\"][0])\n\tsuite.Equal(\"\", string(body))\n\tsuite.jm.Mock.AssertExpectations(suite.T())\n}\n\nfunc (suite *APITestSuite) TestCreateJobSuccess() {\n\tpayload := \"{\\\"name\\\":\\\"foo\\\"}\\n\"\n\n\tsuite.jm.On(\"Create\", mock.AnythingOfType(\"*job.Job\")).Return(nil)\n\tsuite.jm.On(\"Execute\", mock.AnythingOfType(\"*job.Job\")).Return(nil)\n\n\tres, _ := http.Post(suite.url(\"jobs\"), \"application\/json\", bytes.NewBufferString(payload))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\ttime.Sleep(time.Millisecond)\n\n\tsuite.Equal(http.StatusOK, res.StatusCode)\n\tsuite.Equal(payload, string(body))\n\tsuite.jm.Mock.AssertExpectations(suite.T())\n}\n\nfunc (suite *APITestSuite) TestCreateJobJSONError() {\n\tres, _ := http.Post(suite.url(\"jobs\"), \"application\/json\", bytes.NewBufferString(\"\"))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tsuite.Equal(http.StatusInternalServerError, res.StatusCode)\n\tsuite.Equal(\"\", string(body))\n\tsuite.jm.Mock.AssertExpectations(suite.T())\n}\n\nfunc (suite *APITestSuite) TestCreateJobError() {\n\tsuite.jm.On(\"Create\", mock.AnythingOfType(\"*job.Job\")).Return(suite.serverErr)\n\n\tres, _ := http.Post(suite.url(\"jobs\"), \"application\/json\", bytes.NewBufferString(\"{}\"))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tsuite.Equal(http.StatusInternalServerError, res.StatusCode)\n\tsuite.Equal(\"\", string(body))\n\tsuite.jm.Mock.AssertExpectations(suite.T())\n}\n\nfunc (suite *APITestSuite) TestGetJobSuccess() {\n\tsuite.jm.On(\"GetByID\", suite.j.ID).Return(suite.j, nil)\n\n\tres, _ := http.Get(suite.url(\"jobs\", suite.j.ID))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tsuite.Equal(http.StatusOK, res.StatusCode)\n\tsuite.Equal(\"{\\\"id\\\":\\\"123\\\"}\\n\", string(body))\n\tsuite.jm.Mock.AssertExpectations(suite.T())\n}\n\nfunc (suite *APITestSuite) TestGetJobNotFound() {\n\tsuite.jm.On(\"GetByID\", suite.j.ID).Return(nil, suite.notFoundErr)\n\n\tres, _ := http.Get(suite.url(\"jobs\", suite.j.ID))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tsuite.Equal(http.StatusNotFound, res.StatusCode)\n\tsuite.Equal(\"\", string(body))\n\tsuite.jm.Mock.AssertExpectations(suite.T())\n}\n\nfunc (suite *APITestSuite) TestGetJobServerError() {\n\tsuite.jm.On(\"GetByID\", suite.j.ID).Return(nil, suite.serverErr)\n\n\tres, _ := http.Get(suite.url(\"jobs\", suite.j.ID))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tsuite.Equal(http.StatusInternalServerError, res.StatusCode)\n\tsuite.Equal(\"\", string(body))\n\tsuite.jm.Mock.AssertExpectations(suite.T())\n}\n\nfunc (suite *APITestSuite) TestGetJobLogSuccess() {\n\tindex := 99\n\tjobLog := &job.JobLog{Lines: []string{\"foo\", \"bar\"}}\n\n\tsuite.jm.On(\"GetByID\", suite.j.ID).Return(suite.j, nil)\n\tsuite.jm.On(\"GetLog\", suite.j, index).Return(jobLog, nil)\n\n\tres, _ := http.Get(suite.url(\"jobs\", suite.j.ID, \"log\") + \"?index=\" + strconv.Itoa(index))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tsuite.Equal(http.StatusOK, res.StatusCode)\n\tsuite.Equal(\"{\\\"lines\\\":[\\\"foo\\\",\\\"bar\\\"]}\\n\", string(body))\n\tsuite.jm.Mock.AssertExpectations(suite.T())\n}\n\nfunc (suite *APITestSuite) TestGetJobLogNotFound() {\n\tsuite.jm.On(\"GetByID\", suite.j.ID).Return(nil, suite.notFoundErr)\n\n\tres, _ := http.Get(suite.url(\"jobs\", suite.j.ID, \"log\"))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tsuite.Equal(http.StatusNotFound, res.StatusCode)\n\tsuite.Equal(\"\", string(body))\n\tsuite.jm.Mock.AssertExpectations(suite.T())\n}\n\nfunc (suite *APITestSuite) TestGetJobLogError() {\n\tindex := 99\n\n\tsuite.jm.On(\"GetByID\", suite.j.ID).Return(suite.j, nil)\n\tsuite.jm.On(\"GetLog\", suite.j, index).Return(nil, suite.serverErr)\n\n\tres, _ := http.Get(suite.url(\"jobs\", suite.j.ID, \"log\") + \"?index=\" + strconv.Itoa(index))\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tsuite.Equal(http.StatusInternalServerError, res.StatusCode)\n\tsuite.Equal(\"\", string(body))\n\tsuite.jm.Mock.AssertExpectations(suite.T())\n}\n\nfunc (suite *APITestSuite) TestDeleteJobSuccess() {\n\tsuite.jm.On(\"GetByID\", suite.j.ID).Return(suite.j, nil)\n\tsuite.jm.On(\"Delete\", suite.j).Return(nil)\n\n\treq, _ := http.NewRequest(\"DELETE\", suite.url(\"jobs\", suite.j.ID), nil)\n\tres, _ := suite.client.Do(req)\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tsuite.Equal(http.StatusNoContent, res.StatusCode)\n\tsuite.Equal(\"\", string(body))\n\tsuite.jm.Mock.AssertExpectations(suite.T())\n}\n\nfunc (suite *APITestSuite) TestDeleteJobNotFound() {\n\tsuite.jm.On(\"GetByID\", suite.j.ID).Return(nil, suite.notFoundErr)\n\n\treq, _ := http.NewRequest(\"DELETE\", suite.url(\"jobs\", suite.j.ID), nil)\n\tres, _ := suite.client.Do(req)\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tsuite.Equal(http.StatusNotFound, res.StatusCode)\n\tsuite.Equal(\"\", string(body))\n\tsuite.jm.Mock.AssertExpectations(suite.T())\n}\n\nfunc (suite *APITestSuite) TestDeleteJobError() {\n\tsuite.jm.On(\"GetByID\", suite.j.ID).Return(suite.j, nil)\n\tsuite.jm.On(\"Delete\", suite.j).Return(suite.serverErr)\n\n\treq, _ := http.NewRequest(\"DELETE\", suite.url(\"jobs\", suite.j.ID), nil)\n\tres, _ := suite.client.Do(req)\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\tsuite.Equal(http.StatusInternalServerError, res.StatusCode)\n\tsuite.Equal(\"\", string(body))\n\tsuite.jm.Mock.AssertExpectations(suite.T())\n}\n\nfunc (suite *APITestSuite) url(parts ...string) string {\n\tparts = append([]string{suite.svr.URL}, parts...)\n\treturn strings.Join(parts, \"\/\")\n}\n\nfunc TestAPITestSuite(t *testing.T) {\n\tsuite.Run(t, new(APITestSuite))\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/ajvb\/kala\/job\"\n\n\t\"testing\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/suite\"\n)\n\nfunc generateNewJobMap() map[string]string {\n\tscheduleTime := time.Now().Add(time.Minute * 5)\n\trepeat := 1\n\tdelay := \"P1DT10M10S\"\n\tparsedTime := scheduleTime.Format(time.RFC3339)\n\tscheduleStr := fmt.Sprintf(\"R%d\/%s\/%s\", repeat, parsedTime, delay)\n\n\treturn map[string]string{\n\t\t\"schedule\": scheduleStr,\n\t\t\"name\": \"mock_job\",\n\t\t\"command\": \"bash -c 'date'\",\n\t\t\"owner\": \"aj@ajvb.me\",\n\t}\n}\n\nfunc generateNewRemoteJobMap() map[string]interface{} {\n\treturn map[string]interface{}{\n\t\t\"name\": \"mock_remote_job\",\n\t\t\"owner\": \"aj@ajvb.me\",\n\t\t\"type\": 1,\n\t\t\"remote_properties\": map[string]string{\n\t\t\t\"url\": \"http:\/\/example.com\",\n\t\t},\n\t}\n}\n\nfunc generateJobAndCache() (*job.LockFreeJobCache, *job.Job) {\n\tcache := job.NewMockCache()\n\tj := job.GetMockJobWithGenericSchedule()\n\tj.Init(cache)\n\treturn cache, j\n}\n\ntype ApiTestSuite struct {\n\tsuite.Suite\n}\n\nfunc TestApiTestSuite(t *testing.T) {\n\tsuite.Run(t, new(ApiTestSuite))\n}\n\nfunc (a *ApiTestSuite) TestHandleAddJob() {\n\tt := a.T()\n\tcache := job.NewMockCache()\n\tjobMap := generateNewJobMap()\n\tjobMap[\"owner\"] = \"\"\n\tdefaultOwner := \"aj+tester@ajvb.me\"\n\thandler := HandleAddJob(cache, defaultOwner)\n\n\tjsonJobMap, err := json.Marshal(jobMap)\n\ta.NoError(err)\n\tw, req := setupTestReq(t, \"POST\", ApiJobPath, jsonJobMap)\n\thandler(w, req)\n\n\tvar addJobResp AddJobResponse\n\terr = json.Unmarshal(w.Body.Bytes(), &addJobResp)\n\ta.NoError(err)\n\tretrievedJob, err := cache.Get(addJobResp.Id)\n\ta.NoError(err)\n\ta.Equal(jobMap[\"name\"], retrievedJob.Name)\n\ta.NotEqual(jobMap[\"owner\"], retrievedJob.Owner)\n\ta.Equal(defaultOwner, retrievedJob.Owner)\n\ta.Equal(w.Code, http.StatusCreated)\n}\n\nfunc (a *ApiTestSuite) TestHandleAddRemoteJob() {\n\tt := a.T()\n\tcache := job.NewMockCache()\n\tjobMap := generateNewRemoteJobMap()\n\tjobMap[\"owner\"] = \"\"\n\tdefaultOwner := \"aj+tester@ajvb.me\"\n\thandler := HandleAddJob(cache, defaultOwner)\n\n\tjsonJobMap, err := json.Marshal(jobMap)\n\ta.NoError(err)\n\tw, req := setupTestReq(t, \"POST\", ApiJobPath, jsonJobMap)\n\thandler(w, req)\n\n\tvar addJobResp AddJobResponse\n\terr = json.Unmarshal(w.Body.Bytes(), &addJobResp)\n\ta.NoError(err)\n\tretrievedJob, err := cache.Get(addJobResp.Id)\n\ta.NoError(err)\n\ta.Equal(jobMap[\"name\"], retrievedJob.Name)\n\ta.NotEqual(jobMap[\"owner\"], retrievedJob.Owner)\n\ta.Equal(defaultOwner, retrievedJob.Owner)\n\ta.Equal(w.Code, http.StatusCreated)\n}\n\nfunc (a *ApiTestSuite) TestHandleAddJobFailureBadJson() {\n\tt := a.T()\n\tcache := job.NewMockCache()\n\thandler := HandleAddJob(cache, \"\")\n\n\tw, req := setupTestReq(t, \"POST\", ApiJobPath, []byte(\"asd\"))\n\thandler(w, req)\n\ta.Equal(w.Code, http.StatusBadRequest)\n}\nfunc (a *ApiTestSuite) TestHandleAddJobFailureBadSchedule() {\n\tt := a.T()\n\tcache := job.NewMockCache()\n\tjobMap := generateNewJobMap()\n\thandler := HandleAddJob(cache, \"\")\n\n\t\/\/ Mess up schedule\n\tjobMap[\"schedule\"] = \"asdf\"\n\n\tjsonJobMap, err := json.Marshal(jobMap)\n\ta.NoError(err)\n\tw, req := setupTestReq(t, \"POST\", ApiJobPath, jsonJobMap)\n\thandler(w, req)\n\ta.Equal(w.Code, http.StatusBadRequest)\n\ta.True(strings.Contains(bytes.NewBuffer(w.Body.Bytes()).String(), \"when initializing\"))\n}\n\nfunc (a *ApiTestSuite) TestDeleteJobSuccess() {\n\tt := a.T()\n\tdb := &job.MockDB{}\n\tcache, job := generateJobAndCache()\n\n\tr := mux.NewRouter()\n\tr.HandleFunc(ApiJobPath+\"{id}\", HandleJobRequest(cache, db)).Methods(\"DELETE\", \"GET\")\n\tts := httptest.NewServer(r)\n\n\t_, req := setupTestReq(t, \"DELETE\", ts.URL+ApiJobPath+job.Id, nil)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\ta.NoError(err)\n\ta.Equal(resp.StatusCode, http.StatusNoContent)\n\n\ta.Nil(cache.Get(job.Id))\n}\n\nfunc (a *ApiTestSuite) TestHandleJobRequestJobDoesNotExist() {\n\tt := a.T()\n\tdb := &job.MockDB{}\n\tcache := job.NewMockCache()\n\n\tr := mux.NewRouter()\n\tr.HandleFunc(ApiJobPath+\"{id}\", HandleJobRequest(cache, db)).Methods(\"DELETE\", \"GET\")\n\tts := httptest.NewServer(r)\n\n\t_, req := setupTestReq(t, \"DELETE\", ts.URL+ApiJobPath+\"not-a-real-id\", nil)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\ta.NoError(err)\n\n\ta.Equal(resp.StatusCode, http.StatusNotFound)\n}\n\nfunc (a *ApiTestSuite) TestGetJobSuccess() {\n\tt := a.T()\n\tdb := &job.MockDB{}\n\tcache, job := generateJobAndCache()\n\n\tr := mux.NewRouter()\n\tr.HandleFunc(ApiJobPath+\"{id}\", HandleJobRequest(cache, db)).Methods(\"DELETE\", \"GET\")\n\tts := httptest.NewServer(r)\n\n\t_, req := setupTestReq(t, \"GET\", ts.URL+ApiJobPath+job.Id, nil)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\ta.NoError(err)\n\n\tvar jobResp JobResponse\n\tbody, err := ioutil.ReadAll(resp.Body)\n\ta.NoError(err)\n\tresp.Body.Close()\n\terr = json.Unmarshal(body, &jobResp)\n\ta.NoError(err)\n\ta.Equal(job.Id, jobResp.Job.Id)\n\ta.Equal(job.Owner, jobResp.Job.Owner)\n\ta.Equal(job.Name, jobResp.Job.Name)\n\ta.Equal(resp.StatusCode, http.StatusOK)\n}\n\nfunc (a *ApiTestSuite) TestHandleListJobStatsRequest() {\n\tcache, job := generateJobAndCache()\n\tjob.Run(cache)\n\n\tr := mux.NewRouter()\n\tr.HandleFunc(ApiJobPath+\"stats\/{id}\", HandleListJobStatsRequest(cache)).Methods(\"GET\")\n\tts := httptest.NewServer(r)\n\n\t_, req := setupTestReq(a.T(), \"GET\", ts.URL+ApiJobPath+\"stats\/\"+job.Id, nil)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\ta.NoError(err)\n\n\tvar jobStatsResp ListJobStatsResponse\n\tbody, err := ioutil.ReadAll(resp.Body)\n\ta.NoError(err)\n\tresp.Body.Close()\n\terr = json.Unmarshal(body, &jobStatsResp)\n\ta.NoError(err)\n\n\ta.Equal(len(jobStatsResp.JobStats), 1)\n\ta.Equal(jobStatsResp.JobStats[0].JobId, job.Id)\n\ta.Equal(jobStatsResp.JobStats[0].NumberOfRetries, uint(0))\n\ta.True(jobStatsResp.JobStats[0].Success)\n}\nfunc (a *ApiTestSuite) TestHandleListJobStatsRequestNotFound() {\n\tcache, _ := generateJobAndCache()\n\tr := mux.NewRouter()\n\n\tr.HandleFunc(ApiJobPath+\"stats\/{id}\", HandleListJobStatsRequest(cache)).Methods(\"GET\")\n\tts := httptest.NewServer(r)\n\n\t_, req := setupTestReq(a.T(), \"GET\", ts.URL+ApiJobPath+\"stats\/not-a-real-id\", nil)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\ta.NoError(err)\n\n\ta.Equal(resp.StatusCode, http.StatusNotFound)\n}\n\nfunc (a *ApiTestSuite) TestHandleListJobsRequest() {\n\tcache, jobOne := generateJobAndCache()\n\tjobTwo := job.GetMockJobWithGenericSchedule()\n\tjobTwo.Init(cache)\n\n\tr := mux.NewRouter()\n\tr.HandleFunc(ApiJobPath, HandleListJobsRequest(cache)).Methods(\"GET\")\n\tts := httptest.NewServer(r)\n\n\t_, req := setupTestReq(a.T(), \"GET\", ts.URL+ApiJobPath, nil)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\ta.NoError(err)\n\n\tvar jobsResp ListJobsResponse\n\tunmarshallRequestBody(a.T(), resp, &jobsResp)\n\n\ta.Equal(len(jobsResp.Jobs), 2)\n\ta.Equal(jobsResp.Jobs[jobOne.Id].Schedule, jobOne.Schedule)\n\ta.Equal(jobsResp.Jobs[jobOne.Id].Name, jobOne.Name)\n\ta.Equal(jobsResp.Jobs[jobOne.Id].Owner, jobOne.Owner)\n\ta.Equal(jobsResp.Jobs[jobOne.Id].Command, jobOne.Command)\n\n\ta.Equal(jobsResp.Jobs[jobTwo.Id].Schedule, jobTwo.Schedule)\n\ta.Equal(jobsResp.Jobs[jobTwo.Id].Name, jobTwo.Name)\n\ta.Equal(jobsResp.Jobs[jobTwo.Id].Owner, jobTwo.Owner)\n\ta.Equal(jobsResp.Jobs[jobTwo.Id].Command, jobTwo.Command)\n}\n\nfunc (a *ApiTestSuite) TestHandleStartJobRequest() {\n\tt := a.T()\n\tcache, job := generateJobAndCache()\n\tr := mux.NewRouter()\n\tr.HandleFunc(ApiJobPath+\"start\/{id}\", HandleStartJobRequest(cache)).Methods(\"POST\")\n\tts := httptest.NewServer(r)\n\n\t_, req := setupTestReq(t, \"POST\", ts.URL+ApiJobPath+\"start\/\"+job.Id, nil)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\ta.NoError(err)\n\n\tnow := time.Now()\n\n\ta.Equal(resp.StatusCode, http.StatusNoContent)\n\n\ta.Equal(job.Metadata.SuccessCount, uint(1))\n\ta.WithinDuration(job.Metadata.LastSuccess, now, 2*time.Second)\n\ta.WithinDuration(job.Metadata.LastAttemptedRun, now, 2*time.Second)\n}\nfunc (a *ApiTestSuite) TestHandleStartJobRequestNotFound() {\n\tt := a.T()\n\tcache := job.NewMockCache()\n\thandler := HandleStartJobRequest(cache)\n\tw, req := setupTestReq(t, \"POST\", ApiJobPath+\"start\/asdasd\", nil)\n\thandler(w, req)\n\ta.Equal(w.Code, http.StatusNotFound)\n}\n\nfunc (a *ApiTestSuite) TestHandleKalaStatsRequest() {\n\tcache, _ := generateJobAndCache()\n\tjobTwo := job.GetMockJobWithGenericSchedule()\n\tjobTwo.Init(cache)\n\tjobTwo.Run(cache)\n\n\tr := mux.NewRouter()\n\tr.HandleFunc(ApiUrlPrefix+\"stats\", HandleKalaStatsRequest(cache)).Methods(\"GET\")\n\tts := httptest.NewServer(r)\n\n\t_, req := setupTestReq(a.T(), \"GET\", ts.URL+ApiUrlPrefix+\"stats\", nil)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\ta.NoError(err)\n\n\tnow := time.Now()\n\n\tvar statsResp KalaStatsResponse\n\tunmarshallRequestBody(a.T(), resp, &statsResp)\n\n\ta.Equal(statsResp.Stats.Jobs, 2)\n\ta.Equal(statsResp.Stats.ActiveJobs, 2)\n\ta.Equal(statsResp.Stats.DisabledJobs, 0)\n\n\ta.Equal(statsResp.Stats.ErrorCount, uint(0))\n\ta.Equal(statsResp.Stats.SuccessCount, uint(1))\n\n\ta.WithinDuration(statsResp.Stats.LastAttemptedRun, now, 2*time.Second)\n\ta.WithinDuration(statsResp.Stats.CreatedAt, now, 2*time.Second)\n}\n\nfunc (a *ApiTestSuite) TestSetupApiRoutes() {\n\tdb := &job.MockDB{}\n\tcache := job.NewMockCache()\n\tr := mux.NewRouter()\n\n\tSetupApiRoutes(r, cache, db, \"\")\n\n\ta.NotNil(r)\n\ta.IsType(r, mux.NewRouter())\n}\n\n\/\/ setupTestReq constructs the writer recorder and request obj for use in tests\nfunc setupTestReq(t assert.TestingT, method, path string, data []byte) (*httptest.ResponseRecorder, *http.Request) {\n\tw := httptest.NewRecorder()\n\treq, err := http.NewRequest(method, path, bytes.NewReader(data))\n\tassert.NoError(t, err)\n\treturn w, req\n}\n\nfunc unmarshallRequestBody(t assert.TestingT, resp *http.Response, obj interface{}) {\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tassert.NoError(t, err)\n\tresp.Body.Close()\n\terr = json.Unmarshal(body, obj)\n\tassert.NoError(t, err)\n}\n<commit_msg>Add missing api tests<commit_after>package api\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/ajvb\/kala\/job\"\n\n\t\"testing\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/suite\"\n)\n\nfunc generateNewJobMap() map[string]string {\n\tscheduleTime := time.Now().Add(time.Minute * 5)\n\trepeat := 1\n\tdelay := \"P1DT10M10S\"\n\tparsedTime := scheduleTime.Format(time.RFC3339)\n\tscheduleStr := fmt.Sprintf(\"R%d\/%s\/%s\", repeat, parsedTime, delay)\n\n\treturn map[string]string{\n\t\t\"schedule\": scheduleStr,\n\t\t\"name\": \"mock_job\",\n\t\t\"command\": \"bash -c 'date'\",\n\t\t\"owner\": \"aj@ajvb.me\",\n\t}\n}\n\nfunc generateNewRemoteJobMap() map[string]interface{} {\n\treturn map[string]interface{}{\n\t\t\"name\": \"mock_remote_job\",\n\t\t\"owner\": \"aj@ajvb.me\",\n\t\t\"type\": 1,\n\t\t\"remote_properties\": map[string]string{\n\t\t\t\"url\": \"http:\/\/example.com\",\n\t\t},\n\t}\n}\n\nfunc generateJobAndCache() (*job.LockFreeJobCache, *job.Job) {\n\tcache := job.NewMockCache()\n\tj := job.GetMockJobWithGenericSchedule()\n\tj.Init(cache)\n\treturn cache, j\n}\n\ntype ApiTestSuite struct {\n\tsuite.Suite\n}\n\nfunc TestApiTestSuite(t *testing.T) {\n\tsuite.Run(t, new(ApiTestSuite))\n}\n\nfunc (a *ApiTestSuite) TestHandleAddJob() {\n\tt := a.T()\n\tcache := job.NewMockCache()\n\tjobMap := generateNewJobMap()\n\tjobMap[\"owner\"] = \"\"\n\tdefaultOwner := \"aj+tester@ajvb.me\"\n\thandler := HandleAddJob(cache, defaultOwner)\n\n\tjsonJobMap, err := json.Marshal(jobMap)\n\ta.NoError(err)\n\tw, req := setupTestReq(t, \"POST\", ApiJobPath, jsonJobMap)\n\thandler(w, req)\n\n\tvar addJobResp AddJobResponse\n\terr = json.Unmarshal(w.Body.Bytes(), &addJobResp)\n\ta.NoError(err)\n\tretrievedJob, err := cache.Get(addJobResp.Id)\n\ta.NoError(err)\n\ta.Equal(jobMap[\"name\"], retrievedJob.Name)\n\ta.NotEqual(jobMap[\"owner\"], retrievedJob.Owner)\n\ta.Equal(defaultOwner, retrievedJob.Owner)\n\ta.Equal(w.Code, http.StatusCreated)\n}\n\nfunc (a *ApiTestSuite) TestHandleAddRemoteJob() {\n\tt := a.T()\n\tcache := job.NewMockCache()\n\tjobMap := generateNewRemoteJobMap()\n\tjobMap[\"owner\"] = \"\"\n\tdefaultOwner := \"aj+tester@ajvb.me\"\n\thandler := HandleAddJob(cache, defaultOwner)\n\n\tjsonJobMap, err := json.Marshal(jobMap)\n\ta.NoError(err)\n\tw, req := setupTestReq(t, \"POST\", ApiJobPath, jsonJobMap)\n\thandler(w, req)\n\n\tvar addJobResp AddJobResponse\n\terr = json.Unmarshal(w.Body.Bytes(), &addJobResp)\n\ta.NoError(err)\n\tretrievedJob, err := cache.Get(addJobResp.Id)\n\ta.NoError(err)\n\ta.Equal(jobMap[\"name\"], retrievedJob.Name)\n\ta.NotEqual(jobMap[\"owner\"], retrievedJob.Owner)\n\ta.Equal(defaultOwner, retrievedJob.Owner)\n\ta.Equal(w.Code, http.StatusCreated)\n}\n\nfunc (a *ApiTestSuite) TestHandleAddJobFailureBadJson() {\n\tt := a.T()\n\tcache := job.NewMockCache()\n\thandler := HandleAddJob(cache, \"\")\n\n\tw, req := setupTestReq(t, \"POST\", ApiJobPath, []byte(\"asd\"))\n\thandler(w, req)\n\ta.Equal(w.Code, http.StatusBadRequest)\n}\nfunc (a *ApiTestSuite) TestHandleAddJobFailureBadSchedule() {\n\tt := a.T()\n\tcache := job.NewMockCache()\n\tjobMap := generateNewJobMap()\n\thandler := HandleAddJob(cache, \"\")\n\n\t\/\/ Mess up schedule\n\tjobMap[\"schedule\"] = \"asdf\"\n\n\tjsonJobMap, err := json.Marshal(jobMap)\n\ta.NoError(err)\n\tw, req := setupTestReq(t, \"POST\", ApiJobPath, jsonJobMap)\n\thandler(w, req)\n\ta.Equal(w.Code, http.StatusBadRequest)\n\ta.True(strings.Contains(bytes.NewBuffer(w.Body.Bytes()).String(), \"when initializing\"))\n}\n\nfunc (a *ApiTestSuite) TestDeleteJobSuccess() {\n\tt := a.T()\n\tdb := &job.MockDB{}\n\tcache, job := generateJobAndCache()\n\n\tr := mux.NewRouter()\n\tr.HandleFunc(ApiJobPath+\"{id}\", HandleJobRequest(cache, db)).Methods(\"DELETE\", \"GET\")\n\tts := httptest.NewServer(r)\n\n\t_, req := setupTestReq(t, \"DELETE\", ts.URL+ApiJobPath+job.Id, nil)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\ta.NoError(err)\n\ta.Equal(resp.StatusCode, http.StatusNoContent)\n\n\ta.Nil(cache.Get(job.Id))\n}\n\nfunc (a *ApiTestSuite) TestHandleJobRequestJobDoesNotExist() {\n\tt := a.T()\n\tdb := &job.MockDB{}\n\tcache := job.NewMockCache()\n\n\tr := mux.NewRouter()\n\tr.HandleFunc(ApiJobPath+\"{id}\", HandleJobRequest(cache, db)).Methods(\"DELETE\", \"GET\")\n\tts := httptest.NewServer(r)\n\n\t_, req := setupTestReq(t, \"DELETE\", ts.URL+ApiJobPath+\"not-a-real-id\", nil)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\ta.NoError(err)\n\n\ta.Equal(resp.StatusCode, http.StatusNotFound)\n}\n\nfunc (a *ApiTestSuite) TestGetJobSuccess() {\n\tt := a.T()\n\tdb := &job.MockDB{}\n\tcache, job := generateJobAndCache()\n\n\tr := mux.NewRouter()\n\tr.HandleFunc(ApiJobPath+\"{id}\", HandleJobRequest(cache, db)).Methods(\"DELETE\", \"GET\")\n\tts := httptest.NewServer(r)\n\n\t_, req := setupTestReq(t, \"GET\", ts.URL+ApiJobPath+job.Id, nil)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\ta.NoError(err)\n\n\tvar jobResp JobResponse\n\tbody, err := ioutil.ReadAll(resp.Body)\n\ta.NoError(err)\n\tresp.Body.Close()\n\terr = json.Unmarshal(body, &jobResp)\n\ta.NoError(err)\n\ta.Equal(job.Id, jobResp.Job.Id)\n\ta.Equal(job.Owner, jobResp.Job.Owner)\n\ta.Equal(job.Name, jobResp.Job.Name)\n\ta.Equal(resp.StatusCode, http.StatusOK)\n}\n\nfunc (a *ApiTestSuite) TestHandleListJobStatsRequest() {\n\tcache, job := generateJobAndCache()\n\tjob.Run(cache)\n\n\tr := mux.NewRouter()\n\tr.HandleFunc(ApiJobPath+\"stats\/{id}\", HandleListJobStatsRequest(cache)).Methods(\"GET\")\n\tts := httptest.NewServer(r)\n\n\t_, req := setupTestReq(a.T(), \"GET\", ts.URL+ApiJobPath+\"stats\/\"+job.Id, nil)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\ta.NoError(err)\n\n\tvar jobStatsResp ListJobStatsResponse\n\tbody, err := ioutil.ReadAll(resp.Body)\n\ta.NoError(err)\n\tresp.Body.Close()\n\terr = json.Unmarshal(body, &jobStatsResp)\n\ta.NoError(err)\n\n\ta.Equal(len(jobStatsResp.JobStats), 1)\n\ta.Equal(jobStatsResp.JobStats[0].JobId, job.Id)\n\ta.Equal(jobStatsResp.JobStats[0].NumberOfRetries, uint(0))\n\ta.True(jobStatsResp.JobStats[0].Success)\n}\nfunc (a *ApiTestSuite) TestHandleListJobStatsRequestNotFound() {\n\tcache, _ := generateJobAndCache()\n\tr := mux.NewRouter()\n\n\tr.HandleFunc(ApiJobPath+\"stats\/{id}\", HandleListJobStatsRequest(cache)).Methods(\"GET\")\n\tts := httptest.NewServer(r)\n\n\t_, req := setupTestReq(a.T(), \"GET\", ts.URL+ApiJobPath+\"stats\/not-a-real-id\", nil)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\ta.NoError(err)\n\n\ta.Equal(resp.StatusCode, http.StatusNotFound)\n}\n\nfunc (a *ApiTestSuite) TestHandleListJobsRequest() {\n\tcache, jobOne := generateJobAndCache()\n\tjobTwo := job.GetMockJobWithGenericSchedule()\n\tjobTwo.Init(cache)\n\n\tr := mux.NewRouter()\n\tr.HandleFunc(ApiJobPath, HandleListJobsRequest(cache)).Methods(\"GET\")\n\tts := httptest.NewServer(r)\n\n\t_, req := setupTestReq(a.T(), \"GET\", ts.URL+ApiJobPath, nil)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\ta.NoError(err)\n\n\tvar jobsResp ListJobsResponse\n\tunmarshallRequestBody(a.T(), resp, &jobsResp)\n\n\ta.Equal(len(jobsResp.Jobs), 2)\n\ta.Equal(jobsResp.Jobs[jobOne.Id].Schedule, jobOne.Schedule)\n\ta.Equal(jobsResp.Jobs[jobOne.Id].Name, jobOne.Name)\n\ta.Equal(jobsResp.Jobs[jobOne.Id].Owner, jobOne.Owner)\n\ta.Equal(jobsResp.Jobs[jobOne.Id].Command, jobOne.Command)\n\n\ta.Equal(jobsResp.Jobs[jobTwo.Id].Schedule, jobTwo.Schedule)\n\ta.Equal(jobsResp.Jobs[jobTwo.Id].Name, jobTwo.Name)\n\ta.Equal(jobsResp.Jobs[jobTwo.Id].Owner, jobTwo.Owner)\n\ta.Equal(jobsResp.Jobs[jobTwo.Id].Command, jobTwo.Command)\n}\n\nfunc (a *ApiTestSuite) TestHandleStartJobRequest() {\n\tt := a.T()\n\tcache, job := generateJobAndCache()\n\tr := mux.NewRouter()\n\tr.HandleFunc(ApiJobPath+\"start\/{id}\", HandleStartJobRequest(cache)).Methods(\"POST\")\n\tts := httptest.NewServer(r)\n\n\t_, req := setupTestReq(t, \"POST\", ts.URL+ApiJobPath+\"start\/\"+job.Id, nil)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\ta.NoError(err)\n\n\tnow := time.Now()\n\n\ta.Equal(resp.StatusCode, http.StatusNoContent)\n\n\ta.Equal(job.Metadata.SuccessCount, uint(1))\n\ta.WithinDuration(job.Metadata.LastSuccess, now, 2*time.Second)\n\ta.WithinDuration(job.Metadata.LastAttemptedRun, now, 2*time.Second)\n}\nfunc (a *ApiTestSuite) TestHandleStartJobRequestNotFound() {\n\tt := a.T()\n\tcache := job.NewMockCache()\n\thandler := HandleStartJobRequest(cache)\n\tw, req := setupTestReq(t, \"POST\", ApiJobPath+\"start\/asdasd\", nil)\n\thandler(w, req)\n\ta.Equal(w.Code, http.StatusNotFound)\n}\n\nfunc (a *ApiTestSuite) TestHandleEnableJobRequest() {\n\tt := a.T()\n\tcache, job := generateJobAndCache()\n\tr := mux.NewRouter()\n\tr.HandleFunc(ApiJobPath+\"enable\/{id}\", HandleEnableJobRequest(cache)).Methods(\"POST\")\n\tts := httptest.NewServer(r)\n\n\tjob.Disable()\n\n\t_, req := setupTestReq(t, \"POST\", ts.URL+ApiJobPath+\"enable\/\"+job.Id, nil)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\ta.NoError(err)\n\n\ta.Equal(http.StatusNoContent, resp.StatusCode)\n\n\ta.Equal(false, job.Disabled)\n}\nfunc (a *ApiTestSuite) TestHandleEnableJobRequestNotFound() {\n\tt := a.T()\n\tcache := job.NewMockCache()\n\thandler := HandleEnableJobRequest(cache)\n\tw, req := setupTestReq(t, \"POST\", ApiJobPath+\"enable\/asdasd\", nil)\n\thandler(w, req)\n\ta.Equal(w.Code, http.StatusNotFound)\n}\n\nfunc (a *ApiTestSuite) TestHandleDisableJobRequest() {\n\tt := a.T()\n\tcache, job := generateJobAndCache()\n\tr := mux.NewRouter()\n\tr.HandleFunc(ApiJobPath+\"disable\/{id}\", HandleDisableJobRequest(cache)).Methods(\"POST\")\n\tts := httptest.NewServer(r)\n\n\t_, req := setupTestReq(t, \"POST\", ts.URL+ApiJobPath+\"disable\/\"+job.Id, nil)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\ta.NoError(err)\n\n\ta.Equal(http.StatusNoContent, resp.StatusCode)\n\n\ta.Equal(true, job.Disabled)\n}\nfunc (a *ApiTestSuite) TestHandleDisableJobRequestNotFound() {\n\tt := a.T()\n\tcache := job.NewMockCache()\n\thandler := HandleDisableJobRequest(cache)\n\tw, req := setupTestReq(t, \"POST\", ApiJobPath+\"disable\/asdasd\", nil)\n\thandler(w, req)\n\ta.Equal(w.Code, http.StatusNotFound)\n}\n\nfunc (a *ApiTestSuite) TestHandleKalaStatsRequest() {\n\tcache, _ := generateJobAndCache()\n\tjobTwo := job.GetMockJobWithGenericSchedule()\n\tjobTwo.Init(cache)\n\tjobTwo.Run(cache)\n\n\tr := mux.NewRouter()\n\tr.HandleFunc(ApiUrlPrefix+\"stats\", HandleKalaStatsRequest(cache)).Methods(\"GET\")\n\tts := httptest.NewServer(r)\n\n\t_, req := setupTestReq(a.T(), \"GET\", ts.URL+ApiUrlPrefix+\"stats\", nil)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\ta.NoError(err)\n\n\tnow := time.Now()\n\n\tvar statsResp KalaStatsResponse\n\tunmarshallRequestBody(a.T(), resp, &statsResp)\n\n\ta.Equal(statsResp.Stats.Jobs, 2)\n\ta.Equal(statsResp.Stats.ActiveJobs, 2)\n\ta.Equal(statsResp.Stats.DisabledJobs, 0)\n\n\ta.Equal(statsResp.Stats.ErrorCount, uint(0))\n\ta.Equal(statsResp.Stats.SuccessCount, uint(1))\n\n\ta.WithinDuration(statsResp.Stats.LastAttemptedRun, now, 2*time.Second)\n\ta.WithinDuration(statsResp.Stats.CreatedAt, now, 2*time.Second)\n}\n\nfunc (a *ApiTestSuite) TestSetupApiRoutes() {\n\tdb := &job.MockDB{}\n\tcache := job.NewMockCache()\n\tr := mux.NewRouter()\n\n\tSetupApiRoutes(r, cache, db, \"\")\n\n\ta.NotNil(r)\n\ta.IsType(r, mux.NewRouter())\n}\n\n\/\/ setupTestReq constructs the writer recorder and request obj for use in tests\nfunc setupTestReq(t assert.TestingT, method, path string, data []byte) (*httptest.ResponseRecorder, *http.Request) {\n\tw := httptest.NewRecorder()\n\treq, err := http.NewRequest(method, path, bytes.NewReader(data))\n\tassert.NoError(t, err)\n\treturn w, req\n}\n\nfunc unmarshallRequestBody(t assert.TestingT, resp *http.Response, obj interface{}) {\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tassert.NoError(t, err)\n\tresp.Body.Close()\n\terr = json.Unmarshal(body, obj)\n\tassert.NoError(t, err)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2022 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package pgxv4 provides a Cloud SQL Postgres driver that uses pgx v4 and works\n\/\/ with the database\/sql package.\npackage pgxv4\n\nimport (\n\t\"context\"\n\t\"database\/sql\"\n\t\"database\/sql\/driver\"\n\t\"net\"\n\n\t\"cloud.google.com\/go\/cloudsqlconn\"\n\t\"github.com\/jackc\/pgx\/v4\"\n\t\"github.com\/jackc\/pgx\/v4\/stdlib\"\n)\n\n\/\/ RegisterDriver registers a Postgres driver that uses the cloudsqlconn.Dialer\n\/\/ configured with the provided options. The choice of name is entirely up to\n\/\/ the caller and may be used to distinguish between multiple registrations of\n\/\/ differently configured Dialers. The driver uses pgx\/v4 internally.\n\/\/ RegisterDriver returns a cleanup function that should be called one the\n\/\/ database connection is no longer needed.\nfunc RegisterDriver(name string, opts ...cloudsqlconn.Option) (func() error, error) {\n\td, err := cloudsqlconn.NewDialer(context.Background(), opts...)\n\tif err != nil {\n\t\treturn func() error { return nil }, err\n\t}\n\tsql.Register(name, &pgDriver{\n\t\td: d,\n\t})\n\treturn func() error { return d.Close() }, nil\n}\n\ntype pgDriver struct {\n\td *cloudsqlconn.Dialer\n}\n\n\/\/ Open accepts a keyword\/value formatted connection string and returns a\n\/\/ connection to the database using cloudsqlconn.Dialer. The Cloud SQL instance\n\/\/ connection name should be specified in the host field. For example:\n\/\/\n\/\/ \"host=my-project:us-central1:my-db-instance user=myuser password=mypass\"\nfunc (p *pgDriver) Open(name string) (driver.Conn, error) {\n\tconfig, err := pgx.ParseConfig(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinstConnName := config.Config.Host \/\/ Extract instance connection name\n\tconfig.Config.Host = \"localhost\" \/\/ Replace it with a default value\n\tconfig.DialFunc = func(ctx context.Context, _, _ string) (net.Conn, error) {\n\t\treturn p.d.Dial(ctx, instConnName)\n\t}\n\tdbURI := stdlib.RegisterConnConfig(config)\n\treturn stdlib.GetDefaultDriver().Open(dbURI)\n}\n<commit_msg>fix: memory leak in database\/sql integration (#162)<commit_after>\/\/ Copyright 2022 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package pgxv4 provides a Cloud SQL Postgres driver that uses pgx v4 and works\n\/\/ with the database\/sql package.\npackage pgxv4\n\nimport (\n\t\"context\"\n\t\"database\/sql\"\n\t\"database\/sql\/driver\"\n\t\"net\"\n\t\"sync\"\n\n\t\"cloud.google.com\/go\/cloudsqlconn\"\n\t\"github.com\/jackc\/pgx\/v4\"\n\t\"github.com\/jackc\/pgx\/v4\/stdlib\"\n)\n\n\/\/ RegisterDriver registers a Postgres driver that uses the cloudsqlconn.Dialer\n\/\/ configured with the provided options. The choice of name is entirely up to\n\/\/ the caller and may be used to distinguish between multiple registrations of\n\/\/ differently configured Dialers. The driver uses pgx\/v4 internally.\n\/\/ RegisterDriver returns a cleanup function that should be called one the\n\/\/ database connection is no longer needed.\nfunc RegisterDriver(name string, opts ...cloudsqlconn.Option) (func() error, error) {\n\td, err := cloudsqlconn.NewDialer(context.Background(), opts...)\n\tif err != nil {\n\t\treturn func() error { return nil }, err\n\t}\n\tsql.Register(name, &pgDriver{\n\t\td: d,\n\t\tdbURIs: make(map[string]string),\n\t})\n\treturn func() error { return d.Close() }, nil\n}\n\ntype pgDriver struct {\n\td *cloudsqlconn.Dialer\n\tmu sync.RWMutex\n\t\/\/ dbURIs is a map of DSN to DB URI for registered connection names.\n\tdbURIs map[string]string\n}\n\n\/\/ Open accepts a keyword\/value formatted connection string and returns a\n\/\/ connection to the database using cloudsqlconn.Dialer. The Cloud SQL instance\n\/\/ connection name should be specified in the host field. For example:\n\/\/\n\/\/ \"host=my-project:us-central1:my-db-instance user=myuser password=mypass\"\nfunc (p *pgDriver) Open(name string) (driver.Conn, error) {\n\tvar (\n\t\tdbURI string\n\t\tok bool\n\t)\n\n\tp.mu.RLock()\n\tdbURI, ok = p.dbURIs[name]\n\tp.mu.RUnlock()\n\n\tif ok {\n\t\treturn stdlib.GetDefaultDriver().Open(dbURI)\n\t}\n\n\tconfig, err := pgx.ParseConfig(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinstConnName := config.Config.Host \/\/ Extract instance connection name\n\tconfig.Config.Host = \"localhost\" \/\/ Replace it with a default value\n\tconfig.DialFunc = func(ctx context.Context, _, _ string) (net.Conn, error) {\n\t\treturn p.d.Dial(ctx, instConnName)\n\t}\n\n\tp.mu.Lock()\n\tdbURI, ok = p.dbURIs[name] \/\/ check again if another goroutine already registered config\n\tif !ok {\n\t\tdbURI = stdlib.RegisterConnConfig(config)\n\t\tp.dbURIs[name] = dbURI\n\t}\n\tp.mu.Unlock()\n\treturn stdlib.GetDefaultDriver().Open(dbURI)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\r\n\r\nimport (\r\n\t\"bufio\"\r\n\t\/\/w \"eaciit\/wfdemo-git\/processapp\/opcdata\/filewatcher\"\r\n\td \"eaciit\/wfdemo-git\/processapp\/opcdata\/datareader\"\r\n\t\"fmt\"\r\n\t\"os\"\r\n\t\"strings\"\r\n\t\"time\"\r\n\r\n\t\"github.com\/eaciit\/dbox\"\r\n\t_ \"github.com\/eaciit\/dbox\/dbc\/mongo\"\r\n\t_ \"github.com\/eaciit\/orm\"\r\n\ttk \"github.com\/eaciit\/toolkit\"\r\n)\r\n\r\nvar (\r\n\twd = func() string {\r\n\t\td, _ := os.Getwd()\r\n\t\treturn d + \"\/\"\r\n\t}()\r\n)\r\n\r\nfunc main() {\r\n\tFileWatcher()\r\n}\r\nfunc FileIsExist(dirSources string) (bool, string) {\r\n\tnow := time.Now()\r\n\tyear, month, day := now.Date()\r\n\thour, _, _ := now.Clock()\r\n\r\n\ttargetFileName := fmt.Sprintf(\"DataFile%d%02d%02d-%02d.csv\", year, month, day, hour)\r\n\ttk.Println(dirSources + \"\\\\\" + targetFileName)\r\n\t_, err := os.Open(dirSources + \"\\\\\" + targetFileName)\r\n\tif err != nil {\r\n\t\tif os.IsNotExist(err) {\r\n\t\t\ttk.Println(\"File Not Exist\")\r\n\r\n\t\t} else {\r\n\t\t\ttk.Println(err.Error())\r\n\t\t}\r\n\t\treturn false, \"\"\r\n\t}\r\n\treturn true, targetFileName\r\n}\r\nfunc FileWatcher() {\r\n\tconfig := ReadConfig()\r\n\tdirSources := config[\"FileSources\"]\r\n\tdirProcess := config[\"FileProcess\"]\r\n\tscpDir := config[\"UploadDirectory\"]\r\n\tsshUser := config[\"SSHUser\"]\r\n\tsshServer := config[\"SSHServer\"]\r\n\tvar FileName string\r\n\tvar e bool\r\n\r\n\tnow := time.Now()\r\n\tindx:=6\r\n\tarrDataReader := []d.DataReader{}\r\n\tfor ;indx>=0;indx--{\r\n\t\tprev:=now.Add(time.Duration(-indx)*time.Hour)\r\n\t\tyear,month,day:=prev.Date()\r\n\t\thour,_,_:=prev.Clock()\r\n\t\tmodFileName := fmt.Sprintf(\"%d%02d%02d\",year,month,day)\r\n\t\tmodified = ReadModified(modFileName)\r\n\t\tprocessed = ReadProcessed(modFileName)\r\n\t\ttargetFileName := fmt.Sprintf(\"DataFile%d%02d%02d-%02d.csv\",year,month,day,hour)\r\n\t\t\t\r\n\t\t\/\/watcher := w.NewFileWatcher(dirSources, dirProcess, wd,scpDir)\r\n\t\tif e=FileIsExist(dirSources,targetFileName);!e{\r\n\t\t\tcontinue\r\n\t\t}\r\n\t\tfmt.Println(dirSources+pathSep+targetFileName)\r\n\t\tinfo, _ := os.Stat(dirSources+pathSep+targetFileName)\r\n\t\tmodifiedTimeFS := info.ModTime()\r\n\t\tmodifiedTimeFS,_ = time.Parse(\"02-Jan-2006 15:04:05\",modifiedTimeFS.Format(\"02-Jan-2006 15:04:05\"))\r\n\t\tif _,ok:=modified[targetFileName];ok{\r\n\t\t\tlastModTimeLog,_ := time.Parse(\"02-Jan-2006 15:04:05\",modified[targetFileName])\r\n\t\t\tfmt.Println(lastModTimeLog.Format(\"02-Jan-2006 15:04:05\"),modifiedTimeFS.Format(\"02-Jan-2006 15:04:05\"))\r\n\t\t\tif modifiedTimeFS.After(lastModTimeLog){\r\n\t\t\t\tfmt.Println(\"File \",targetFileName,\"Modified\")\r\n\t\t\t\tdr := d.NewDataReader(dirSources+\"\\\\\"+targetFileName, dirProcess, wd,scpDir,sshUser,sshServer)\r\n\t\t\t\t_,e,start,end,rows:=dr.Start(0)\r\n\t\t\t\tarrDataReader = append(arrDataReader,*dr)\r\n\t\t\t\tif e==nil{\r\n\t\t\t\t\tmodified[targetFileName]=modifiedTimeFS.Format(\"02-Jan-2006 15:04:05\")\r\n\t\t\t\t\tnewPf:=processed[targetFileName]\r\n\t\t\t\t\tnewPf.StartTime = start.Format(\"02-Jan-2006 15:04:05\")\r\n\t\t\t\t\tnewPf.EndTime = end.Format(\"02-Jan-2006 15:04:05\")\r\n\t\t\t\t\tnewPf.RowIndex = rows\r\n\t\t\t\t\tprocessed[targetFileName]=newPf\r\n\t\t\t\t}else{\r\n\t\t\t\t\tfmt.Println(e.Error())\r\n\t\t\t\t}\r\n\t\t\t}else{\r\n\t\t\t\tfmt.Println(lastModTimeLog.Nanosecond(),modifiedTimeFS.Nanosecond())\r\n\t\t\t}\r\n\t\t}else{\r\n\t\t\tdr := d.NewDataReader(dirSources+\"\\\\\"+targetFileName, dirProcess, wd,scpDir,sshUser,sshServer)\r\n\t\t\t_,e,start,end,rows:=dr.Start(0)\r\n\t\t\tarrDataReader = append(arrDataReader,*dr)\r\n\t\t\tif e==nil{\r\n\t\t\t\tmodified[targetFileName]=modifiedTimeFS.Format(\"02-Jan-2006 15:04:05\")\r\n\t\t\t\tnewPf:=new(model.ProcessedLog)\r\n\t\t\t\t(*newPf).Filename = targetFileName\r\n\t\t\t\t(*newPf).StartTime = start.Format(\"02-Jan-2006 15:04:05\")\r\n\t\t\t\t(*newPf).EndTime = end.Format(\"02-Jan-2006 15:04:05\")\r\n\t\t\t\t(*newPf).RowIndex = rows\r\n\t\t\t\tprocessed[targetFileName]=*newPf\r\n\t\t\t}\r\n\t\t\t\r\n\t\t}\r\n\t\t\r\n\t\tWriteModified(modFileName)\r\n\t\tWriteProcessed(modFileName)\r\n\t\t\r\n\t}\r\n\tfor oo:=len(arrDataReader)-1;oo>=0;oo--{\r\n\t\tarrDataReader[oo].SendFile(arrDataReader[oo].ZipName)\r\n\t}\r\n\t\r\n\/\/=======\r\n\t\/\/watcher := w.NewFileWatcher(dirSources, dirProcess, wd,scpDir)\r\n\/\/\tif e, FileName = FileIsExist(dirSources); !e {\r\n\/\/\t\tos.Exit(1)\r\n\/\/\t}\r\n\/\/\td.NewDataReader(dirSources+\"\\\\\"+FileName, dirProcess, wd, scpDir, sshUser, sshServer).Start()\r\n\/\/>>>>>>> e93aaba7699185484bd3b42e861e2779bb99342c\r\n\t\/\/watcher.StartWatcher()\r\n}\r\n\r\nfunc CsvExtractor() {\r\n\t\/\/conn, err := PrepareConnection()\r\n\t\/\/ if err != nil {\r\n\t\/\/ \ttk.Println(\"Error connection: \", err.Error())\r\n\t\/\/ }\r\n\t\/\/ctx := orm.New(conn)\r\n\r\n\t\/\/ config := ReadConfig()\r\n\t\/\/ dirSources := config[\"FileSources\"]\r\n}\r\n\r\nfunc PrepareConnection() (dbox.IConnection, error) {\r\n\tconfig := ReadConfig()\r\n\r\n\tci := &dbox.ConnectionInfo{config[\"host\"], config[\"database\"], config[\"username\"], config[\"password\"], tk.M{}.Set(\"timeout\", 3000)}\r\n\r\n\tc, e := dbox.NewConnection(\"mongo\", ci)\r\n\r\n\tif e != nil {\r\n\t\treturn nil, e\r\n\t}\r\n\r\n\te = c.Connect()\r\n\tif e != nil {\r\n\t\treturn nil, e\r\n\t}\r\n\r\n\treturn c, nil\r\n}\r\n\r\nfunc ReadConfig() map[string]string {\r\n\tret := make(map[string]string)\r\n\tfile, err := os.Open(wd + \"conf\/app.conf\")\r\n\tif err == nil {\r\n\t\tdefer file.Close()\r\n\r\n\t\treader := bufio.NewReader(file)\r\n\t\tfor {\r\n\t\t\tline, _, e := reader.ReadLine()\r\n\t\t\tif e != nil {\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\r\n\t\t\tsval := strings.Split(string(line), \"=\")\r\n\t\t\tret[sval[0]] = sval[1]\r\n\t\t}\r\n\t} else {\r\n\t\ttk.Println(err.Error())\r\n\t}\r\n\r\n\treturn ret\r\n}\r\n<commit_msg>add function that somehow lost in my repository but exist on git<commit_after>package main\r\n\r\nimport (\r\n\t\"bufio\"\r\n\t\/\/w \"eaciit\/wfdemo-git\/processapp\/opcdata\/filewatcher\"\r\n\td \"eaciit\/wfdemo-git\/processapp\/opcdata\/datareader\"\r\n\t\"fmt\"\r\n\t\"os\"\r\n\t\"strings\"\r\n\t\"time\"\r\n\t\"io\/ioutil\"\r\n\t\"github.com\/eaciit\/dbox\"\r\n\t\"eaciit\/wfdemo-git\/processapp\/opcdata\/model\"\r\n\t_ \"github.com\/eaciit\/dbox\/dbc\/mongo\"\r\n\t_ \"github.com\/eaciit\/orm\"\r\n\ttk \"github.com\/eaciit\/toolkit\"\r\n)\r\n\r\nvar (\r\n\twd = func() string {\r\n\t\td, _ := os.Getwd()\r\n\t\treturn d + \"\/\"\r\n\t}()\r\n)\r\nvar pathSep = string(os.PathSeparator) \r\nvar modified map[string]string\r\nvar\tprocessed map[string]model.ProcessedLog\r\nfunc main() {\r\n\tFileWatcher()\r\n}\r\nfunc FileIsExist(dirSources string,targetFileName string) bool {\r\n\t_,err:=os.Open(dirSources+\"\\\\\"+targetFileName)\r\n\tif err!=nil{\r\n\t\tif os.IsNotExist(err){\r\n\t\t\ttk.Println(\"File Not Exist\")\r\n\t\t\t\r\n\t\t}else{\r\n\t\t\ttk.Println(err.Error())\r\n\t\t}\r\n\t\treturn false\r\n\t}\r\n\treturn true\r\n}\r\nfunc FileWatcher() {\r\n\tconfig := ReadConfig()\r\n\tdirSources := config[\"FileSources\"]\r\n\tdirProcess := config[\"FileProcess\"]\r\n\tscpDir := config[\"UploadDirectory\"]\r\n\tsshUser := config[\"SSHUser\"]\r\n\tsshServer := config[\"SSHServer\"]\r\n\t\/\/var FileName string\r\n\tvar e bool\r\n\r\n\tnow := time.Now()\r\n\tindx:=6\r\n\tarrDataReader := []d.DataReader{}\r\n\tfor ;indx>=0;indx--{\r\n\t\tprev:=now.Add(time.Duration(-indx)*time.Hour)\r\n\t\tyear,month,day:=prev.Date()\r\n\t\thour,_,_:=prev.Clock()\r\n\t\tmodFileName := fmt.Sprintf(\"%d%02d%02d\",year,month,day)\r\n\t\tmodified = ReadModified(modFileName)\r\n\t\tprocessed = ReadProcessed(modFileName)\r\n\t\ttargetFileName := fmt.Sprintf(\"DataFile%d%02d%02d-%02d.csv\",year,month,day,hour)\r\n\t\t\t\r\n\t\t\/\/watcher := w.NewFileWatcher(dirSources, dirProcess, wd,scpDir)\r\n\t\tif e=FileIsExist(dirSources,targetFileName);!e{\r\n\t\t\tcontinue\r\n\t\t}\r\n\t\tfmt.Println(dirSources+pathSep+targetFileName)\r\n\t\tinfo, _ := os.Stat(dirSources+pathSep+targetFileName)\r\n\t\tmodifiedTimeFS := info.ModTime()\r\n\t\tmodifiedTimeFS,_ = time.Parse(\"02-Jan-2006 15:04:05\",modifiedTimeFS.Format(\"02-Jan-2006 15:04:05\"))\r\n\t\tif _,ok:=modified[targetFileName];ok{\r\n\t\t\tlastModTimeLog,_ := time.Parse(\"02-Jan-2006 15:04:05\",modified[targetFileName])\r\n\t\t\tfmt.Println(lastModTimeLog.Format(\"02-Jan-2006 15:04:05\"),modifiedTimeFS.Format(\"02-Jan-2006 15:04:05\"))\r\n\t\t\tif modifiedTimeFS.After(lastModTimeLog){\r\n\t\t\t\tfmt.Println(\"File \",targetFileName,\"Modified\")\r\n\t\t\t\tdr := d.NewDataReader(dirSources+\"\\\\\"+targetFileName, dirProcess, wd,scpDir,sshUser,sshServer)\r\n\t\t\t\t_,e,start,end,rows:=dr.Start(0)\r\n\t\t\t\tarrDataReader = append(arrDataReader,*dr)\r\n\t\t\t\tif e==nil{\r\n\t\t\t\t\tmodified[targetFileName]=modifiedTimeFS.Format(\"02-Jan-2006 15:04:05\")\r\n\t\t\t\t\tnewPf:=processed[targetFileName]\r\n\t\t\t\t\tnewPf.StartTime = start.Format(\"02-Jan-2006 15:04:05\")\r\n\t\t\t\t\tnewPf.EndTime = end.Format(\"02-Jan-2006 15:04:05\")\r\n\t\t\t\t\tnewPf.RowIndex = rows\r\n\t\t\t\t\tprocessed[targetFileName]=newPf\r\n\t\t\t\t}else{\r\n\t\t\t\t\tfmt.Println(e.Error())\r\n\t\t\t\t}\r\n\t\t\t}else{\r\n\t\t\t\tfmt.Println(lastModTimeLog.Nanosecond(),modifiedTimeFS.Nanosecond())\r\n\t\t\t}\r\n\t\t}else{\r\n\t\t\tdr := d.NewDataReader(dirSources+\"\\\\\"+targetFileName, dirProcess, wd,scpDir,sshUser,sshServer)\r\n\t\t\t_,e,start,end,rows:=dr.Start(0)\r\n\t\t\tarrDataReader = append(arrDataReader,*dr)\r\n\t\t\tif e==nil{\r\n\t\t\t\tmodified[targetFileName]=modifiedTimeFS.Format(\"02-Jan-2006 15:04:05\")\r\n\t\t\t\tnewPf:=new(model.ProcessedLog)\r\n\t\t\t\t(*newPf).Filename = targetFileName\r\n\t\t\t\t(*newPf).StartTime = start.Format(\"02-Jan-2006 15:04:05\")\r\n\t\t\t\t(*newPf).EndTime = end.Format(\"02-Jan-2006 15:04:05\")\r\n\t\t\t\t(*newPf).RowIndex = rows\r\n\t\t\t\tprocessed[targetFileName]=*newPf\r\n\t\t\t}\r\n\t\t\t\r\n\t\t}\r\n\t\t\r\n\t\tWriteModified(modFileName)\r\n\t\tWriteProcessed(modFileName)\r\n\t\t\r\n\t}\r\n\tfor oo:=len(arrDataReader)-1;oo>=0;oo--{\r\n\t\tarrDataReader[oo].SendFile(arrDataReader[oo].ZipName)\r\n\t}\r\n\t\r\n\/\/=======\r\n\t\/\/watcher := w.NewFileWatcher(dirSources, dirProcess, wd,scpDir)\r\n\/\/\tif e, FileName = FileIsExist(dirSources); !e {\r\n\/\/\t\tos.Exit(1)\r\n\/\/\t}\r\n\/\/\td.NewDataReader(dirSources+\"\\\\\"+FileName, dirProcess, wd, scpDir, sshUser, sshServer).Start()\r\n\/\/>>>>>>> e93aaba7699185484bd3b42e861e2779bb99342c\r\n\t\/\/watcher.StartWatcher()\r\n}\r\nfunc ReadModified(filename string) map[string]string {\r\n\tret := make(map[string]string)\r\n \tfile, err := os.Open(wd + \"log\"+pathSep+\"modified_\"+filename+\".csv\")\r\n\tif err == nil {\r\n\t\tdefer file.Close()\r\n\r\n\t\treader := bufio.NewReader(file)\r\n\t\tfor {\r\n\t\t\tline, _, e := reader.ReadLine()\r\n\t\t\tif e != nil {\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\r\n\t\t\tsval := strings.Split(string(line), \";\")\r\n\t\t\tret[sval[0]] = sval[1]\r\n\t\t}\r\n\t} else {\r\n\t\ttk.Println(err.Error())\r\n\t}\r\n\r\n\treturn ret\r\n}\r\nfunc WriteModified(filename string){\r\n\tstrBuf:=\"\"\r\n\tfor val,key:=range modified{\r\n\t\tstrBuf+=val+\";\"+key+\"\\n\"\r\n\t}\r\n\tbuff:=[]byte(strBuf)\r\n\tfmt.Println(\"Write Modified\",wd+\"log\"+pathSep+\"modified_\"+filename+\".csv\")\r\n\tioutil.WriteFile(wd+\"log\"+pathSep+\"modified_\"+filename+\".csv\",buff,0644)\r\n}\r\nfunc WriteProcessed(filename string){\r\n\tstrBuf:=\"\"\r\n\tfor _,val:=range processed{\r\n\t\tstrBuf+=val.ToString()\r\n\t}\r\n\tbuff:=[]byte(strBuf)\r\n\tfmt.Println(\"Write Processed\",wd+\"log\"+pathSep+\"processed_\"+filename+\".csv\")\r\n\tioutil.WriteFile(wd+\"log\"+pathSep+\"processed_\"+filename+\".csv\",buff,0644)\r\n}\r\nfunc ReadProcessed(filename string) map[string]model.ProcessedLog {\r\n\tret := make(map[string]model.ProcessedLog)\r\n\tfile, err := os.Open(wd + \"log\"+pathSep+\"processed_\"+filename+\".csv\")\r\n\tif err == nil {\r\n\t\tdefer file.Close()\r\n\t\treader := bufio.NewReader(file)\r\n\t\tfor {\r\n\t\t\tline, _, e := reader.ReadLine()\r\n\t\t\tif e != nil {\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t\tnewPF := model.FromString(string(line))\r\n\t\t\tret[(*newPF).Filename] = *newPF\r\n\t\t}\r\n\t}\r\n\treturn ret\r\n}\r\nfunc CsvExtractor() {\r\n\t\/\/conn, err := PrepareConnection()\r\n\t\/\/ if err != nil {\r\n\t\/\/ \ttk.Println(\"Error connection: \", err.Error())\r\n\t\/\/ }\r\n\t\/\/ctx := orm.New(conn)\r\n\r\n\t\/\/ config := ReadConfig()\r\n\t\/\/ dirSources := config[\"FileSources\"]\r\n}\r\n\r\nfunc PrepareConnection() (dbox.IConnection, error) {\r\n\tconfig := ReadConfig()\r\n\r\n\tci := &dbox.ConnectionInfo{config[\"host\"], config[\"database\"], config[\"username\"], config[\"password\"], tk.M{}.Set(\"timeout\", 3000)}\r\n\r\n\tc, e := dbox.NewConnection(\"mongo\", ci)\r\n\r\n\tif e != nil {\r\n\t\treturn nil, e\r\n\t}\r\n\r\n\te = c.Connect()\r\n\tif e != nil {\r\n\t\treturn nil, e\r\n\t}\r\n\r\n\treturn c, nil\r\n}\r\n\r\nfunc ReadConfig() map[string]string {\r\n\tret := make(map[string]string)\r\n\tfile, err := os.Open(wd + \"conf\/app.conf\")\r\n\tif err == nil {\r\n\t\tdefer file.Close()\r\n\r\n\t\treader := bufio.NewReader(file)\r\n\t\tfor {\r\n\t\t\tline, _, e := reader.ReadLine()\r\n\t\t\tif e != nil {\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\r\n\t\t\tsval := strings.Split(string(line), \"=\")\r\n\t\t\tret[sval[0]] = sval[1]\r\n\t\t}\r\n\t} else {\r\n\t\ttk.Println(err.Error())\r\n\t}\r\n\r\n\treturn ret\r\n}\r\n<|endoftext|>"} {"text":"<commit_before>\/\/ Used to represent empty bools. Go types are always instantiated with a default value, for bool the default value is false.\n\/\/ This makes it difficult to update an SObject without overwriting any boolean field to false.\n\/\/ This package solves the issue by representing a bool as an int and implementing the marshal\/unmarshal json interface.\n\/\/ It is a drop in replacement for the bool type.\n\/\/ 1 is true\n\/\/ 0 is nil\n\/\/ -1 is false\n\/\/ Unmarshalling: false will be unmarshalled to -1, true will be unmarshalled to 1\n\/\/ If no value is set the unmarshaller will skip the field and the int will default to 0.\n\/\/ Marshalling: -1 will be marshaled to false, 1 will be marshaled to true, and\n\/\/ 0 will be marshaled to nothing (assuming the field has the omitempty json tag `json:\",omitempty\"`)\npackage sobjects\n\nimport \"encoding\/json\"\n\ntype SFBool int\n\nfunc (t *SFBool) MarshalJSON() ([]byte, error) {\n\tif *t == 1 {\n\t\treturn json.Marshal(true)\n\t} else if *t == -1 {\n\t\treturn json.Marshal(false)\n\t}\n\treturn json.Marshal(0)\n}\n\nfunc (t *SFBool) UnmarshalJSON(data []byte) error {\n\tb := string(data)\n\tif b == \"true\" {\n\t\t*t = 1\n\t} else if b == \"false\" {\n\t\t*t = -1\n\t}\n\treturn nil\n}\n\nfunc (t *SFBool) Bool() bool {\n\tif *t == 1 {\n\t\treturn true\n\t}\n\treturn false\n}\n<commit_msg>Don't use warning for SFBool ;)<commit_after>\/\/ Don't use this! It was an interesting effort but in reality all you need is a ptr to a bool. *bool will solve all your problems. :)\n\/\/ Used to represent empty bools. Go types are always instantiated with a default value, for bool the default value is false.\n\/\/ This makes it difficult to update an SObject without overwriting any boolean field to false.\n\/\/ This package solves the issue by representing a bool as an int and implementing the marshal\/unmarshal json interface.\n\/\/ It is a drop in replacement for the bool type.\n\/\/ 1 is true\n\/\/ 0 is nil\n\/\/ -1 is false\n\/\/ Unmarshalling: false will be unmarshalled to -1, true will be unmarshalled to 1\n\/\/ If no value is set the unmarshaller will skip the field and the int will default to 0.\n\/\/ Marshalling: -1 will be marshaled to false, 1 will be marshaled to true, and\n\/\/ 0 will be marshaled to nothing (assuming the field has the omitempty json tag `json:\",omitempty\"`)\npackage sobjects\n\nimport \"encoding\/json\"\n\ntype SFBool int\n\nfunc (t *SFBool) MarshalJSON() ([]byte, error) {\n\tif *t == 1 {\n\t\treturn json.Marshal(true)\n\t} else if *t == -1 {\n\t\treturn json.Marshal(false)\n\t}\n\treturn json.Marshal(0)\n}\n\nfunc (t *SFBool) UnmarshalJSON(data []byte) error {\n\tb := string(data)\n\tif b == \"true\" {\n\t\t*t = 1\n\t} else if b == \"false\" {\n\t\t*t = -1\n\t}\n\treturn nil\n}\n\nfunc (t *SFBool) Bool() bool {\n\tif *t == 1 {\n\t\treturn true\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n\tPort of https:\/\/github.com\/coreos\/locksmith\/blob\/master\/lock\/etcd.go\n\tfrom etcd to Consul\n*\/\n\npackage lock\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\n\tapi \"github.com\/armon\/consul-api\"\n)\n\nvar (\n\tCheckAndSetFailedErr = errors.New(\"Someone else modified the semaphore\")\n)\n\n\/\/ ConsulLockClient is a wrapper around the consul-api client\n\/\/ that provides simple primitives to operate on a named semaphore\n\/\/ stored as a Consul KV.\ntype ConsulLockClient struct {\n\tPath string\n\tclient *api.Client\n}\n\nfunc NewConsulLockClient(apiClient *api.Client) (client *ConsulLockClient, err error) {\n\tclient = &ConsulLockClient{client: apiClient}\n\terr = nil\n\treturn\n}\n\nfunc (c *ConsulLockClient) SetPath(path string) error {\n\tc.Path = path\n\treturn nil\n}\n\nfunc (c *ConsulLockClient) Init() (err error) {\n\tif c.Path == \"\" {\n\t\treturn errors.New(\"cannot initialize semaphore without a path\")\n\t}\n\n\tsem := newSemaphore()\n\tb, err := json.Marshal(sem)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkv := c.client.KV()\n\n\tpair, _, err := kv.Get(c.Path, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif pair == nil {\n\t\tp := &api.KVPair{Key: c.Path, Value: b}\n\t\t_, err := kv.Put(p, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *ConsulLockClient) Get() (sem *Semaphore, err error) {\n\tkv := c.client.KV()\n\tqo := &api.QueryOptions{\n\t\tAllowStale: false,\n\t\tRequireConsistent: true,\n\t}\n\tpair, _, err := kv.Get(c.Path, qo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsem = &Semaphore{}\n\terr = json.Unmarshal([]byte(pair.Value), sem)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsem.Index = pair.ModifyIndex\n\n\treturn sem, nil\n}\n\nfunc (c *ConsulLockClient) Set(sem *Semaphore) (err error) {\n\tif sem == nil {\n\t\treturn errors.New(\"cannot set nil semaphore\")\n\t}\n\tb, err := json.Marshal(sem)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpair := &api.KVPair{Key: c.Path, Value: b}\n\tpair.ModifyIndex = sem.Index\n\n\tkv := c.client.KV()\n\n\twritten, _, err := kv.CAS(pair, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif written != true {\n\t\t\/\/ TODO: Should I handle CAS errors in .Set()?\n\t\t\/\/ Makes the API cleaner, but makes Set potentially blocking\n\t\treturn CheckAndSetFailedErr\n\t}\n\n\treturn nil\n}\n\nfunc (c *ConsulLockClient) Watch(sem *Semaphore) (changed bool, err error) {\n\n\tkv := c.client.KV()\n\tqo := &api.QueryOptions{\n\t\tAllowStale: false,\n\t\tRequireConsistent: true,\n\t\tWaitIndex: sem.Index,\n\t}\n\tpair, meta, err := kv.Get(c.Path, qo)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\n\tif meta == nil {\n\t\treturn true, errors.New(\"Returned with nil metadata, see https:\/\/github.com\/hashicorp\/consul-template\/issues\/72\")\n\t}\n\n\tchanged = meta.LastIndex == sem.Index\n\n\t\/\/ NOTE: modifies input argument..\n\terr = json.Unmarshal([]byte(pair.Value), sem)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\n\tsem.Index = pair.ModifyIndex\n\treturn\n}\n<commit_msg>Changed logic was backwards..<commit_after>\/*\n\tPort of https:\/\/github.com\/coreos\/locksmith\/blob\/master\/lock\/etcd.go\n\tfrom etcd to Consul\n*\/\n\npackage lock\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\n\tapi \"github.com\/armon\/consul-api\"\n)\n\nvar (\n\tCheckAndSetFailedErr = errors.New(\"Someone else modified the semaphore\")\n)\n\n\/\/ ConsulLockClient is a wrapper around the consul-api client\n\/\/ that provides simple primitives to operate on a named semaphore\n\/\/ stored as a Consul KV.\ntype ConsulLockClient struct {\n\tPath string\n\tclient *api.Client\n}\n\nfunc NewConsulLockClient(apiClient *api.Client) (client *ConsulLockClient, err error) {\n\tclient = &ConsulLockClient{client: apiClient}\n\terr = nil\n\treturn\n}\n\nfunc (c *ConsulLockClient) SetPath(path string) error {\n\tc.Path = path\n\treturn nil\n}\n\nfunc (c *ConsulLockClient) Init() (err error) {\n\tif c.Path == \"\" {\n\t\treturn errors.New(\"cannot initialize semaphore without a path\")\n\t}\n\n\tsem := newSemaphore()\n\tb, err := json.Marshal(sem)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkv := c.client.KV()\n\n\tpair, _, err := kv.Get(c.Path, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif pair == nil {\n\t\tp := &api.KVPair{Key: c.Path, Value: b}\n\t\t_, err := kv.Put(p, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *ConsulLockClient) Get() (sem *Semaphore, err error) {\n\tkv := c.client.KV()\n\tqo := &api.QueryOptions{\n\t\tAllowStale: false,\n\t\tRequireConsistent: true,\n\t}\n\tpair, _, err := kv.Get(c.Path, qo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsem = &Semaphore{}\n\terr = json.Unmarshal([]byte(pair.Value), sem)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsem.Index = pair.ModifyIndex\n\n\treturn sem, nil\n}\n\nfunc (c *ConsulLockClient) Set(sem *Semaphore) (err error) {\n\tif sem == nil {\n\t\treturn errors.New(\"cannot set nil semaphore\")\n\t}\n\tb, err := json.Marshal(sem)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpair := &api.KVPair{Key: c.Path, Value: b}\n\tpair.ModifyIndex = sem.Index\n\n\tkv := c.client.KV()\n\n\twritten, _, err := kv.CAS(pair, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif written != true {\n\t\t\/\/ TODO: Should I handle CAS errors in .Set()?\n\t\t\/\/ Makes the API cleaner, but makes Set potentially blocking\n\t\treturn CheckAndSetFailedErr\n\t}\n\n\treturn nil\n}\n\nfunc (c *ConsulLockClient) Watch(sem *Semaphore) (changed bool, err error) {\n\n\tkv := c.client.KV()\n\tqo := &api.QueryOptions{\n\t\tAllowStale: false,\n\t\tRequireConsistent: true,\n\t\tWaitIndex: sem.Index,\n\t}\n\tpair, meta, err := kv.Get(c.Path, qo)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\n\tif meta == nil {\n\t\treturn true, errors.New(\"Returned with nil metadata, see https:\/\/github.com\/hashicorp\/consul-template\/issues\/72\")\n\t}\n\n\tchanged = meta.LastIndex != sem.Index\n\n\t\/\/ NOTE: modifies input argument..\n\terr = json.Unmarshal([]byte(pair.Value), sem)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\n\tsem.Index = pair.ModifyIndex\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"database\/sql\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\n\t_ \"github.com\/go-sql-driver\/mysql\"\n)\n\nvar (\n\tdeptNo string\n\tdeptName string\n)\n\nconst USER string = \"simar\"\nconst PASSWORD string = \"password\"\n\nvar operationPtr = flag.String(\"operator\", \"\", \"Operation: SELECT, DELETE, UPDATE, INSERT\")\nvar countPtr = flag.Int(\"count\", 1, \"Repeat: Number of times to repeat the benchmark.\")\nvar dbPtr = flag.String(\"db\", \"\", \"Database: Name of the DB to perform operations on.\")\nvar tablePtr = flag.String(\"table\", \"\", \"Table: Name of the table to perform operations on.\")\nvar conditionPtr = flag.String(\"condition\", \"\", \"Condition: Constraint on the transaction.\")\n\nfunc validateInput() {\n\tif *tablePtr == \"\" {\n\t\tlog.Fatal(\"Please specify a MySQL table using the --table option.\")\n\t} else if *dbPtr == \"\" {\n\t\tlog.Fatal(\"Please specify a MySQL database using the --database option.\")\n\t} else if *operationPtr == \"\" {\n\t\tlog.Fatal(\"Please specify a MySQL operation using the --operator option.\")\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tvalidateInput()\n\n\tdb, err := sql.Open(\"mysql\", fmt.Sprintf(\"%s:%s@\/%s\", USER, PASSWORD, *dbPtr))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer db.Close()\n\n\tstmtOut, err := db.Prepare(fmt.Sprintf(\"%s FROM %s %s\", *operationPtr, *tablePtr, *conditionPtr))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\trows, err := stmtOut.Query()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\terr := rows.Scan(&deptNo, &deptName)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tlog.Println(deptNo, deptName)\n\t}\n\terr = rows.Err()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer stmtOut.Close()\n}\n<commit_msg>lomax: Refactoring code to live as functions<commit_after>package main\n\nimport (\n\t\"database\/sql\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\n\t_ \"github.com\/go-sql-driver\/mysql\"\n)\n\nconst USER string = \"simar\"\nconst PASSWORD string = \"password\"\n\nvar (\n\tdeptNo string\n\tdeptName string\n\tempNo int\n\tfromDate string\n\ttoDate string\n\tbirthDate string\n\tfirstName string\n\tlastName string\n\tgender string\n\thireDate string\n\tsalary int\n\ttitle string\n)\n\nvar operationPtr = flag.String(\"operator\", \"\", \"Operation: SELECT, DELETE, UPDATE, INSERT\")\nvar countPtr = flag.Int(\"count\", 1, \"Repeat: Number of times to repeat the benchmark.\")\nvar dbPtr = flag.String(\"db\", \"\", \"Database: Name of the DB to perform operations on.\")\nvar tablePtr = flag.String(\"table\", \"\", \"Table: Name of the table to perform operations on.\")\nvar conditionPtr = flag.String(\"condition\", \"\", \"Condition: Constraint on the transaction.\")\n\nfunc validateInput() {\n\tif *tablePtr == \"\" {\n\t\tlog.Fatal(\"Please specify a MySQL table using the --table option.\")\n\t} else if *dbPtr == \"\" {\n\t\tlog.Fatal(\"Please specify a MySQL database using the --database option.\")\n\t} else if *operationPtr == \"\" {\n\t\tlog.Fatal(\"Please specify a MySQL operation using the --operator option.\")\n\t}\n}\n\nfunc initializeDB() *sql.DB {\n\tdb, err := sql.Open(\"mysql\", fmt.Sprintf(\"%s:%s@\/%s\", USER, PASSWORD, *dbPtr))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn db\n}\nfunc prepareStatement(db *sql.DB, operationPtr string, tablePtr string, conditionPtr string) *sql.Rows {\n\tstmtOut, err := db.Prepare(fmt.Sprintf(\"%s FROM %s %s\", operationPtr, tablePtr, conditionPtr))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\trows, err := stmtOut.Query()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer stmtOut.Close()\n\treturn rows\n}\n\nfunc processData(rows *sql.Rows) {\n\tfor rows.Next() {\n\t\terr := rows.Scan(&deptNo, &deptName)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tlog.Println(deptNo, deptName)\n\t}\n\terr := rows.Err()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tvalidateInput()\n\n\tdb := initializeDB()\n\tdefer db.Close()\n\n\trows := prepareStatement(db, *operationPtr, *tablePtr, *conditionPtr)\n\tdefer rows.Close()\n\n\tprocessData(rows)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/lxc\/lxd\"\n\t\"github.com\/lxc\/lxd\/i18n\"\n\t\"github.com\/lxc\/lxd\/shared\/gnuflag\"\n\n\t\"github.com\/lxc\/lxd\/shared\"\n)\n\ntype publishCmd struct {\n\tpAliases aliasList \/\/ aliasList defined in lxc\/image.go\n\tmakePublic bool\n\tForce bool\n}\n\nfunc (c *publishCmd) showByDefault() bool {\n\treturn true\n}\n\nfunc (c *publishCmd) usage() string {\n\treturn i18n.G(\n\t\t`Publish containers as images.\n\nlxc publish [remote:]container [remote:] [--alias=ALIAS]... [prop-key=prop-value]...`)\n}\n\nfunc (c *publishCmd) flags() {\n\tgnuflag.BoolVar(&c.makePublic, \"public\", false, i18n.G(\"Make the image public\"))\n\tgnuflag.Var(&c.pAliases, \"alias\", i18n.G(\"New alias to define at target\"))\n\tgnuflag.BoolVar(&c.Force, \"force\", false, i18n.G(\"Stop the container if currently running\"))\n\tgnuflag.BoolVar(&c.Force, \"f\", false, i18n.G(\"Stop the container if currently running\"))\n}\n\nfunc (c *publishCmd) run(config *lxd.Config, args []string) error {\n\tvar cRemote string\n\tvar cName string\n\tiName := \"\"\n\tiRemote := \"\"\n\tproperties := map[string]string{}\n\tfirstprop := 1 \/\/ first property is arg[2] if arg[1] is image remote, else arg[1]\n\n\tif len(args) < 1 {\n\t\treturn errArgs\n\t}\n\n\tcRemote, cName = config.ParseRemoteAndContainer(args[0])\n\tif len(args) >= 2 && !strings.Contains(args[1], \"=\") {\n\t\tfirstprop = 2\n\t\tiRemote, iName = config.ParseRemoteAndContainer(args[1])\n\t} else {\n\t\tiRemote, iName = config.ParseRemoteAndContainer(\"\")\n\t}\n\n\tif cName == \"\" {\n\t\treturn fmt.Errorf(i18n.G(\"Container name is mandatory\"))\n\t}\n\tif iName != \"\" {\n\t\treturn fmt.Errorf(i18n.G(\"There is no \\\"image name\\\". Did you want an alias?\"))\n\t}\n\n\td, err := lxd.NewClient(config, iRemote)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tct, err := d.ContainerInfo(cName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twasRunning := ct.StatusCode != 0 && ct.StatusCode != shared.Stopped\n\twasEphemeral := ct.Ephemeral\n\n\tif wasRunning {\n\t\tif !c.Force {\n\t\t\treturn fmt.Errorf(\"The container is currently running. Use --force to have it stopped and restarted.\")\n\t\t}\n\n\t\tif ct.Ephemeral {\n\t\t\tct.Ephemeral = false\n\t\t\terr := d.UpdateContainerConfig(cName, ct.Brief())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tresp, err := d.Action(cName, shared.Stop, -1, true)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\top, err := d.WaitFor(resp.Operation)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif op.StatusCode == shared.Failure {\n\t\t\treturn fmt.Errorf(i18n.G(\"Stopping container failed!\"))\n\t\t}\n\t\tdefer d.Action(cName, shared.Start, -1, true)\n\n\t\tif wasEphemeral {\n\t\t\tct.Ephemeral = true\n\t\t\terr := d.UpdateContainerConfig(cName, ct.Brief())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i := firstprop; i < len(args); i++ {\n\t\tentry := strings.SplitN(args[i], \"=\", 2)\n\t\tif len(entry) < 2 {\n\t\t\treturn errArgs\n\t\t}\n\t\tproperties[entry[0]] = entry[1]\n\t}\n\n\tvar fp string\n\n\t\/\/ Optimized local publish\n\tif cRemote == iRemote {\n\t\tfp, err = d.ImageFromContainer(cName, c.makePublic, c.pAliases, properties)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Printf(i18n.G(\"Container published with fingerprint: %s\")+\"\\n\", fp)\n\t\treturn nil\n\t}\n\n\ts, err := lxd.NewClient(config, cRemote)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfp, err = s.ImageFromContainer(cName, false, nil, properties)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer s.DeleteImage(fp)\n\n\terr = s.CopyImage(fp, d, false, c.pAliases, c.makePublic, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(i18n.G(\"Container published with fingerprint: %s\")+\"\\n\", fp)\n\n\treturn nil\n}\n<commit_msg>Fix publishing snapshots<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/lxc\/lxd\"\n\t\"github.com\/lxc\/lxd\/i18n\"\n\t\"github.com\/lxc\/lxd\/shared\/gnuflag\"\n\n\t\"github.com\/lxc\/lxd\/shared\"\n)\n\ntype publishCmd struct {\n\tpAliases aliasList \/\/ aliasList defined in lxc\/image.go\n\tmakePublic bool\n\tForce bool\n}\n\nfunc (c *publishCmd) showByDefault() bool {\n\treturn true\n}\n\nfunc (c *publishCmd) usage() string {\n\treturn i18n.G(\n\t\t`Publish containers as images.\n\nlxc publish [remote:]container [remote:] [--alias=ALIAS]... [prop-key=prop-value]...`)\n}\n\nfunc (c *publishCmd) flags() {\n\tgnuflag.BoolVar(&c.makePublic, \"public\", false, i18n.G(\"Make the image public\"))\n\tgnuflag.Var(&c.pAliases, \"alias\", i18n.G(\"New alias to define at target\"))\n\tgnuflag.BoolVar(&c.Force, \"force\", false, i18n.G(\"Stop the container if currently running\"))\n\tgnuflag.BoolVar(&c.Force, \"f\", false, i18n.G(\"Stop the container if currently running\"))\n}\n\nfunc (c *publishCmd) run(config *lxd.Config, args []string) error {\n\tvar cRemote string\n\tvar cName string\n\tiName := \"\"\n\tiRemote := \"\"\n\tproperties := map[string]string{}\n\tfirstprop := 1 \/\/ first property is arg[2] if arg[1] is image remote, else arg[1]\n\n\tif len(args) < 1 {\n\t\treturn errArgs\n\t}\n\n\tcRemote, cName = config.ParseRemoteAndContainer(args[0])\n\tif len(args) >= 2 && !strings.Contains(args[1], \"=\") {\n\t\tfirstprop = 2\n\t\tiRemote, iName = config.ParseRemoteAndContainer(args[1])\n\t} else {\n\t\tiRemote, iName = config.ParseRemoteAndContainer(\"\")\n\t}\n\n\tif cName == \"\" {\n\t\treturn fmt.Errorf(i18n.G(\"Container name is mandatory\"))\n\t}\n\tif iName != \"\" {\n\t\treturn fmt.Errorf(i18n.G(\"There is no \\\"image name\\\". Did you want an alias?\"))\n\t}\n\n\td, err := lxd.NewClient(config, iRemote)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !shared.IsSnapshot(cName) {\n\t\tct, err := d.ContainerInfo(cName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\twasRunning := ct.StatusCode != 0 && ct.StatusCode != shared.Stopped\n\t\twasEphemeral := ct.Ephemeral\n\n\t\tif wasRunning {\n\t\t\tif !c.Force {\n\t\t\t\treturn fmt.Errorf(\"The container is currently running. Use --force to have it stopped and restarted.\")\n\t\t\t}\n\n\t\t\tif ct.Ephemeral {\n\t\t\t\tct.Ephemeral = false\n\t\t\t\terr := d.UpdateContainerConfig(cName, ct.Brief())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tresp, err := d.Action(cName, shared.Stop, -1, true)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\top, err := d.WaitFor(resp.Operation)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif op.StatusCode == shared.Failure {\n\t\t\t\treturn fmt.Errorf(i18n.G(\"Stopping container failed!\"))\n\t\t\t}\n\t\t\tdefer d.Action(cName, shared.Start, -1, true)\n\n\t\t\tif wasEphemeral {\n\t\t\t\tct.Ephemeral = true\n\t\t\t\terr := d.UpdateContainerConfig(cName, ct.Brief())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i := firstprop; i < len(args); i++ {\n\t\tentry := strings.SplitN(args[i], \"=\", 2)\n\t\tif len(entry) < 2 {\n\t\t\treturn errArgs\n\t\t}\n\t\tproperties[entry[0]] = entry[1]\n\t}\n\n\tvar fp string\n\n\t\/\/ Optimized local publish\n\tif cRemote == iRemote {\n\t\tfp, err = d.ImageFromContainer(cName, c.makePublic, c.pAliases, properties)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Printf(i18n.G(\"Container published with fingerprint: %s\")+\"\\n\", fp)\n\t\treturn nil\n\t}\n\n\ts, err := lxd.NewClient(config, cRemote)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfp, err = s.ImageFromContainer(cName, false, nil, properties)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer s.DeleteImage(fp)\n\n\terr = s.CopyImage(fp, d, false, c.pAliases, c.makePublic, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(i18n.G(\"Container published with fingerprint: %s\")+\"\\n\", fp)\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package myconfig\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"path\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/clientv3\"\n)\n\nconst (\n\tdialTimeout = 5 * time.Second\n\trequestTimeout = 1 * time.Second\n)\n\n\/\/ Config holds configuration data.\ntype Config struct {\n\tEndpoints []string\n\tEnv string\n\tService string\n}\n\n\/\/ Client is configuration client.\ntype Client struct {\n\tetcdClient *clientv3.Client\n\tglobalPrefix string\n\tservicePrefix string\n\tstorage *data\n}\n\n\/\/ New creates configuration client.\nfunc New(cfg Config) (*Client, error) {\n\n\tec, err := clientv3.New(clientv3.Config{\n\t\tEndpoints: cfg.Endpoints,\n\t\tDialTimeout: dialTimeout,\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &Client{\n\t\tetcdClient: ec,\n\t\tstorage: newData(),\n\t}\n\n\tif err := c.setPrefixes(cfg.Env, cfg.Service); err != nil {\n\t\treturn nil, fmt.Errorf(\"prefixes: %v\", err)\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\n\tgo c.updateStorage(&wg, c.globalPrefix)\n\n\tgo c.updateStorage(&wg, c.servicePrefix)\n\n\twg.Wait()\n\n\treturn c, nil\n\n}\n\nfunc (c *Client) String(key string) (string, bool) {\n\tv, ok := c.storage.get(key)\n\treturn v, ok\n}\n\nfunc (c *Client) updateStorage(wg *sync.WaitGroup, prefix string) {\n\tdefer wg.Done()\n\tdm, err := c.get(prefix)\n\tif err != nil {\n\t\tlog.Printf(\"update client: %s: %v\", prefix, err)\n\t}\n\tc.storage.update(dm)\n}\n\nconst companyKey = \"com\"\n\nfunc (c *Client) setPrefixes(env, service string) error {\n\tif env == \"\" {\n\t\treturn errors.New(\"empty env\")\n\t}\n\tif service == \"\" {\n\t\treturn errors.New(\"empty service\")\n\t}\n\troot := fmt.Sprintf(\"\/%s\/%s\", companyKey, env)\n\tc.globalPrefix = fmt.Sprintf(\"%s\/global\", root)\n\tc.servicePrefix = fmt.Sprintf(\"%s\/%s\", root, service)\n\treturn nil\n}\n\nfunc (c *Client) get(prefix string) (dataMap, error) {\n\n\tctx, cancel := context.WithTimeout(context.Background(), requestTimeout)\n\tresp, err := c.etcdClient.Get(\n\t\tctx,\n\t\tprefix,\n\t\tclientv3.WithPrefix(),\n\t\tclientv3.WithSerializable(),\n\t)\n\n\tdefer cancel()\n\n\tdm := dataMap{}\n\n\tif err != nil {\n\t\treturn dm, err\n\t}\n\n\tif len(resp.Kvs) < 1 {\n\t\treturn dm, errors.New(\"not exists\")\n\t}\n\n\tvar k, v string\n\n\tfor _, ev := range resp.Kvs {\n\t\tk = string(ev.Key)\n\t\tif strings.HasPrefix(k, c.globalPrefix) {\n\t\t\tk = \"\/global\" + strings.TrimPrefix(k, c.globalPrefix)\n\t\t}\n\t\tk = strings.TrimPrefix(k, c.servicePrefix)\n\n\t\tv = string(ev.Value)\n\n\t\tdm[k] = v\n\t}\n\treturn dm, nil\n}\n\n\/\/ Close closes client.\nfunc Close(c *Client) {\n\n\terr := c.etcdClient.Close()\n\tif err != nil {\n\t\t\/\/ TODO(dvrkps): add better logging\n\t\tlog.Print(err)\n\t}\n}\n\nfunc checkKey(key string) error {\n\tif key == \"\" || key == \"\/\" {\n\t\treturn errors.New(\"empty key\")\n\t}\n\tif path.IsAbs(key) {\n\t\treturn errors.New(\"relative key\")\n\t}\n\treturn nil\n}\n<commit_msg>etcdconfig\/myconfig: fix checkKey<commit_after>package myconfig\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"path\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/clientv3\"\n)\n\nconst (\n\tdialTimeout = 5 * time.Second\n\trequestTimeout = 1 * time.Second\n)\n\n\/\/ Config holds configuration data.\ntype Config struct {\n\tEndpoints []string\n\tEnv string\n\tService string\n}\n\n\/\/ Client is configuration client.\ntype Client struct {\n\tetcdClient *clientv3.Client\n\tglobalPrefix string\n\tservicePrefix string\n\tstorage *data\n}\n\n\/\/ New creates configuration client.\nfunc New(cfg Config) (*Client, error) {\n\n\tec, err := clientv3.New(clientv3.Config{\n\t\tEndpoints: cfg.Endpoints,\n\t\tDialTimeout: dialTimeout,\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &Client{\n\t\tetcdClient: ec,\n\t\tstorage: newData(),\n\t}\n\n\tif err := c.setPrefixes(cfg.Env, cfg.Service); err != nil {\n\t\treturn nil, fmt.Errorf(\"prefixes: %v\", err)\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\n\tgo c.updateStorage(&wg, c.globalPrefix)\n\n\tgo c.updateStorage(&wg, c.servicePrefix)\n\n\twg.Wait()\n\n\treturn c, nil\n\n}\n\nfunc (c *Client) String(key string) (string, bool) {\n\tv, ok := c.storage.get(key)\n\treturn v, ok\n}\n\nfunc (c *Client) updateStorage(wg *sync.WaitGroup, prefix string) {\n\tdefer wg.Done()\n\tdm, err := c.get(prefix)\n\tif err != nil {\n\t\tlog.Printf(\"update client: %s: %v\", prefix, err)\n\t}\n\tc.storage.update(dm)\n}\n\nconst companyKey = \"com\"\n\nfunc (c *Client) setPrefixes(env, service string) error {\n\tif env == \"\" {\n\t\treturn errors.New(\"empty env\")\n\t}\n\tif service == \"\" {\n\t\treturn errors.New(\"empty service\")\n\t}\n\troot := fmt.Sprintf(\"\/%s\/%s\", companyKey, env)\n\tc.globalPrefix = fmt.Sprintf(\"%s\/global\", root)\n\tc.servicePrefix = fmt.Sprintf(\"%s\/%s\", root, service)\n\treturn nil\n}\n\nfunc (c *Client) get(prefix string) (dataMap, error) {\n\n\tctx, cancel := context.WithTimeout(context.Background(), requestTimeout)\n\tresp, err := c.etcdClient.Get(\n\t\tctx,\n\t\tprefix,\n\t\tclientv3.WithPrefix(),\n\t\tclientv3.WithSerializable(),\n\t)\n\n\tdefer cancel()\n\n\tdm := dataMap{}\n\n\tif err != nil {\n\t\treturn dm, err\n\t}\n\n\tif len(resp.Kvs) < 1 {\n\t\treturn dm, errors.New(\"not exists\")\n\t}\n\n\tvar k, v string\n\n\tfor _, ev := range resp.Kvs {\n\t\tk = string(ev.Key)\n\t\tif strings.HasPrefix(k, c.globalPrefix) {\n\t\t\tk = \"\/global\" + strings.TrimPrefix(k, c.globalPrefix)\n\t\t}\n\t\tk = strings.TrimPrefix(k, c.servicePrefix)\n\n\t\tv = string(ev.Value)\n\n\t\tdm[k] = v\n\t}\n\treturn dm, nil\n}\n\n\/\/ Close closes client.\nfunc Close(c *Client) {\n\n\terr := c.etcdClient.Close()\n\tif err != nil {\n\t\t\/\/ TODO(dvrkps): add better logging\n\t\tlog.Print(err)\n\t}\n}\n\nfunc checkKey(key string) error {\n\tif key == \"\" || key == \"\/\" {\n\t\treturn errors.New(\"empty key\")\n\t}\n\tif !path.IsAbs(key) {\n\t\treturn errors.New(\"relative key\")\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package gordon\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\nconst (\n\tMaintainerFileName = \"MAINTAINERS\"\n\tNumWorkers = 10\n)\n\n\/\/ GetMaintainersFromRepo returns the maintainers for a repo with the username\n\/\/ as the key and the file's that they own as a slice in the value\nfunc GetMaintainersFromRepo(repoPath string) (map[string][]string, error) {\n\tcurrent := make(map[string][]string)\n\n\tif err := getMaintainersForDirectory(repoPath, repoPath, current); err != nil {\n\t\treturn nil, err\n\t}\n\treturn current, nil\n}\n\nfunc getMaintainersForDirectory(root, dir string, current map[string][]string) error {\n\tmaintainersPerFile, err := getMaintainersFromFile(dir)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\n\tfor m, files := range maintainersPerFile {\n\t\tfor _, f := range files {\n\t\t\tp, err := filepath.Rel(root, filepath.Join(dir, f))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcurrent[m] = append(current[m], p)\n\t\t}\n\t}\n\n\tcontents, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, fi := range contents {\n\t\tif fi.IsDir() && fi.Name() != \".git\" {\n\t\t\tif err := getMaintainersForDirectory(root, filepath.Join(dir, fi.Name()), current); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc getMaintainersFromFile(dir string) (map[string][]string, error) {\n\tmaintainerFile := filepath.Join(dir, MaintainerFileName)\n\tf, err := os.Open(maintainerFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tvar (\n\t\tmaintainer = make(map[string][]string)\n\t\ts = bufio.NewScanner(f)\n\t)\n\tfor s.Scan() {\n\t\tif err := s.Err(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tt := s.Text()\n\t\tif t == \"\" || t[0] == '#' {\n\t\t\tcontinue\n\t\t}\n\t\tm := parseMaintainer(t)\n\t\tif m.Email == \"\" || m.Username == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"invalid maintainer file format %s in %s\", t, maintainerFile)\n\t\t}\n\t\ttarget := m.Target\n\t\tif target == \"\" {\n\t\t\ttarget = \"*\"\n\t\t}\n\t\tmaintainer[m.Username] = append(maintainer[m.Username], target)\n\t}\n\treturn maintainer, nil\n}\n\n\/\/ this function basically reverses the maintainers format so that file paths can be looked\n\/\/ up by path and the maintainers are the value. We have to parse the directories differently\n\/\/ at first then lookup per path when we actually have the files so that it is much faster\n\/\/ and cleaner than walking a fill dir tree looking at files and placing them into memeory.\n\/\/\n\/\/ I swear I'm not crazy\nfunc buildFileIndex(maintainers map[string][]string) map[string]map[string]bool {\n\tindex := make(map[string]map[string]bool)\n\n\tfor m, files := range maintainers {\n\t\tfor _, f := range files {\n\t\t\tnm, exists := index[f]\n\t\t\tif !exists {\n\t\t\t\tnm = make(map[string]bool)\n\t\t\t\tindex[f] = nm\n\t\t\t}\n\t\t\tnm[m] = true\n\t\t}\n\t}\n\treturn index\n}\n<commit_msg>Use email and not github name<commit_after>package gordon\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\nconst (\n\tMaintainerFileName = \"MAINTAINERS\"\n\tNumWorkers = 10\n)\n\n\/\/ GetMaintainersFromRepo returns the maintainers for a repo with the username\n\/\/ as the key and the file's that they own as a slice in the value\nfunc GetMaintainersFromRepo(repoPath string) (map[string][]string, error) {\n\tcurrent := make(map[string][]string)\n\n\tif err := getMaintainersForDirectory(repoPath, repoPath, current); err != nil {\n\t\treturn nil, err\n\t}\n\treturn current, nil\n}\n\nfunc getMaintainersForDirectory(root, dir string, current map[string][]string) error {\n\tmaintainersPerFile, err := getMaintainersFromFile(dir)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\n\tfor m, files := range maintainersPerFile {\n\t\tfor _, f := range files {\n\t\t\tp, err := filepath.Rel(root, filepath.Join(dir, f))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcurrent[m] = append(current[m], p)\n\t\t}\n\t}\n\n\tcontents, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, fi := range contents {\n\t\tif fi.IsDir() && fi.Name() != \".git\" {\n\t\t\tif err := getMaintainersForDirectory(root, filepath.Join(dir, fi.Name()), current); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc getMaintainersFromFile(dir string) (map[string][]string, error) {\n\tmaintainerFile := filepath.Join(dir, MaintainerFileName)\n\tf, err := os.Open(maintainerFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tvar (\n\t\tmaintainer = make(map[string][]string)\n\t\ts = bufio.NewScanner(f)\n\t)\n\tfor s.Scan() {\n\t\tif err := s.Err(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tt := s.Text()\n\t\tif t == \"\" || t[0] == '#' {\n\t\t\tcontinue\n\t\t}\n\t\tm := parseMaintainer(t)\n\t\tif m.Email == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"invalid maintainer file format %s in %s\", t, maintainerFile)\n\t\t}\n\t\ttarget := m.Target\n\t\tif target == \"\" {\n\t\t\ttarget = \"*\"\n\t\t}\n\t\tmaintainer[m.Email] = append(maintainer[m.Email], target)\n\t}\n\treturn maintainer, nil\n}\n\n\/\/ this function basically reverses the maintainers format so that file paths can be looked\n\/\/ up by path and the maintainers are the value. We have to parse the directories differently\n\/\/ at first then lookup per path when we actually have the files so that it is much faster\n\/\/ and cleaner than walking a fill dir tree looking at files and placing them into memeory.\n\/\/\n\/\/ I swear I'm not crazy\nfunc buildFileIndex(maintainers map[string][]string) map[string]map[string]bool {\n\tindex := make(map[string]map[string]bool)\n\n\tfor m, files := range maintainers {\n\t\tfor _, f := range files {\n\t\t\tnm, exists := index[f]\n\t\t\tif !exists {\n\t\t\t\tnm = make(map[string]bool)\n\t\t\t\tindex[f] = nm\n\t\t\t}\n\t\t\tnm[m] = true\n\t\t}\n\t}\n\treturn index\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017, OpenCensus Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package zipkin contains an exporter for Zipkin.\n\/\/\n\/\/ Example:\n\/\/\n\/\/ \timport (\n\/\/ \t\topenzipkin \"github.com\/openzipkin\/zipkin-go\"\n\/\/ \t\t\"github.com\/openzipkin\/zipkin-go\/reporter\/http\"\n\/\/ \t\t\"go.opencensus.io\/trace\/adaptor\/zipkin\"\n\/\/ \t)\n\/\/\t...\n\/\/\t\tlocalEndpoint, err := openzipkin.NewEndpoint(\"server\", \"server:5454\")\n\/\/ \t\tif err != nil {\n\/\/ \t\t\tlog.Print(err)\n\/\/ \t\t}\n\/\/ \t\treporter := http.NewReporter(\"http:\/\/localhost:9411\/api\/v2\/spans\")\n\/\/ \t\texporter := zipkin.NewExporter(reporter, localEndpoint)\n\/\/ \t\ttrace.RegisterExporter(exporter)\npackage zipkin\n\nimport (\n\t\"encoding\/binary\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/openzipkin\/zipkin-go\/model\"\n\t\"github.com\/openzipkin\/zipkin-go\/reporter\"\n\t\"go.opencensus.io\/trace\"\n)\n\n\/\/ Exporter is an implementation of trace.Exporter that uploads spans to a\n\/\/ Zipkin server.\ntype Exporter struct {\n\treporter reporter.Reporter\n\tlocalEndpoint *model.Endpoint\n}\n\n\/\/ NewExporter returns an implementation of trace.Exporter that uploads spans\n\/\/ to a Zipkin server.\n\/\/\n\/\/ reporter is a Zipkin Reporter which will be used to send the spans. These\n\/\/ can be created with the openzipkin library, using one of the packages under\n\/\/ github.com\/openzipkin\/zipkin-go\/reporter.\n\/\/\n\/\/ localEndpoint sets the local endpoint of exported spans. It can be\n\/\/ constructed with github.com\/openzipkin\/zipkin-go.NewEndpoint, e.g.:\n\/\/ \tlocalEndpoint, err := NewEndpoint(\"my server\", listener.Addr().String())\n\/\/ localEndpoint can be nil.\nfunc NewExporter(reporter reporter.Reporter, localEndpoint *model.Endpoint) *Exporter {\n\treturn &Exporter{\n\t\treporter: reporter,\n\t\tlocalEndpoint: localEndpoint,\n\t}\n}\n\n\/\/ Export exports a span to a Zipkin server.\nfunc (e *Exporter) Export(s *trace.SpanData) {\n\te.reporter.Send(zipkinSpan(s, e.localEndpoint))\n}\n\nconst (\n\tstatusCodeTagKey = \"error\"\n\tstatusDescriptionTagKey = \"opencensus.status_description\"\n)\n\nvar (\n\tsampledTrue = true\n\tcanonicalCodes = [...]string{\n\t\t\"OK\",\n\t\t\"CANCELLED\",\n\t\t\"UNKNOWN\",\n\t\t\"INVALID_ARGUMENT\",\n\t\t\"DEADLINE_EXCEEDED\",\n\t\t\"NOT_FOUND\",\n\t\t\"ALREADY_EXISTS\",\n\t\t\"PERMISSION_DENIED\",\n\t\t\"RESOURCE_EXHAUSTED\",\n\t\t\"FAILED_PRECONDITION\",\n\t\t\"ABORTED\",\n\t\t\"OUT_OF_RANGE\",\n\t\t\"UNIMPLEMENTED\",\n\t\t\"INTERNAL\",\n\t\t\"UNAVAILABLE\",\n\t\t\"DATA_LOSS\",\n\t\t\"UNAUTHENTICATED\",\n\t}\n)\n\nfunc canonicalCodeString(code int32) string {\n\tif code < 0 || int(code) >= len(canonicalCodes) {\n\t\treturn \"error code \" + strconv.FormatInt(int64(code), 10)\n\t}\n\treturn canonicalCodes[code]\n}\n\nfunc convertTraceID(t trace.TraceID) model.TraceID {\n\treturn model.TraceID{\n\t\tHigh: binary.BigEndian.Uint64(t[:8]),\n\t\tLow: binary.BigEndian.Uint64(t[8:]),\n\t}\n}\n\nfunc convertSpanID(s trace.SpanID) model.ID {\n\treturn model.ID(binary.BigEndian.Uint64(s[:]))\n}\n\nfunc spanKind(s *trace.SpanData) model.Kind {\n\tif s.HasRemoteParent {\n\t\treturn model.Server\n\t}\n\tif strings.HasPrefix(s.Name, \"Sent.\") {\n\t\treturn model.Client\n\t}\n\tif strings.HasPrefix(s.Name, \"Recv.\") {\n\t\treturn model.Server\n\t}\n\tif len(s.MessageEvents) > 0 {\n\t\tswitch s.MessageEvents[0].EventType {\n\t\tcase trace.MessageEventTypeSent:\n\t\t\treturn model.Client\n\t\tcase trace.MessageEventTypeRecv:\n\t\t\treturn model.Server\n\t\t}\n\t}\n\treturn model.Undetermined\n}\n\nfunc zipkinSpan(s *trace.SpanData, localEndpoint *model.Endpoint) model.SpanModel {\n\tsc := s.SpanContext\n\tz := model.SpanModel{\n\t\tSpanContext: model.SpanContext{\n\t\t\tTraceID: convertTraceID(sc.TraceID),\n\t\t\tID: convertSpanID(sc.SpanID),\n\t\t\tSampled: &sampledTrue,\n\t\t},\n\t\tKind: spanKind(s),\n\t\tName: s.Name,\n\t\tTimestamp: s.StartTime,\n\t\tShared: false,\n\t\tLocalEndpoint: localEndpoint,\n\t}\n\n\tif s.ParentSpanID != (trace.SpanID{}) {\n\t\tid := convertSpanID(s.ParentSpanID)\n\t\tz.ParentID = &id\n\t}\n\n\tif s, e := s.StartTime, s.EndTime; !s.IsZero() && !e.IsZero() {\n\t\tz.Duration = e.Sub(s)\n\t}\n\n\t\/\/ construct Tags from s.Attributes and s.Status.\n\tif len(s.Attributes) != 0 {\n\t\tm := make(map[string]string, len(s.Attributes)+2)\n\t\tfor key, value := range s.Attributes {\n\t\t\tswitch v := value.(type) {\n\t\t\tcase string:\n\t\t\t\tm[key] = v\n\t\t\tcase bool:\n\t\t\t\tif v {\n\t\t\t\t\tm[key] = \"true\"\n\t\t\t\t} else {\n\t\t\t\t\tm[key] = \"false\"\n\t\t\t\t}\n\t\t\tcase int64:\n\t\t\t\tm[key] = strconv.FormatInt(v, 10)\n\t\t\t}\n\t\t}\n\t\tz.Tags = m\n\t}\n\tif s.Status.Code != 0 || s.Status.Message != \"\" {\n\t\tif z.Tags == nil {\n\t\t\tz.Tags = make(map[string]string, 2)\n\t\t}\n\t\tif s.Status.Code != 0 {\n\t\t\tz.Tags[statusCodeTagKey] = canonicalCodeString(s.Status.Code)\n\t\t}\n\t\tif s.Status.Message != \"\" {\n\t\t\tz.Tags[statusDescriptionTagKey] = s.Status.Message\n\t\t}\n\t}\n\n\t\/\/ construct Annotations from s.Annotations and s.MessageEvents.\n\tif len(s.Annotations) != 0 || len(s.MessageEvents) != 0 {\n\t\tz.Annotations = make([]model.Annotation, 0, len(s.Annotations)+len(s.MessageEvents))\n\t\tfor _, a := range s.Annotations {\n\t\t\tz.Annotations = append(z.Annotations, model.Annotation{\n\t\t\t\tTimestamp: a.Time,\n\t\t\t\tValue: a.Message,\n\t\t\t})\n\t\t}\n\t\tfor _, m := range s.MessageEvents {\n\t\t\ta := model.Annotation{\n\t\t\t\tTimestamp: m.Time,\n\t\t\t}\n\t\t\tswitch m.EventType {\n\t\t\tcase trace.MessageEventTypeSent:\n\t\t\t\ta.Value = \"SENT\"\n\t\t\tcase trace.MessageEventTypeRecv:\n\t\t\t\ta.Value = \"RECV\"\n\t\t\tdefault:\n\t\t\t\ta.Value = \"<?>\"\n\t\t\t}\n\t\t\tz.Annotations = append(z.Annotations, a)\n\t\t}\n\t}\n\n\treturn z\n}\n<commit_msg>Fix zipkin example (#274)<commit_after>\/\/ Copyright 2017, OpenCensus Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package zipkin contains an exporter for Zipkin.\n\/\/\n\/\/ Example:\n\/\/\n\/\/ \timport (\n\/\/ \t\topenzipkin \"github.com\/openzipkin\/zipkin-go\"\n\/\/ \t\t\"github.com\/openzipkin\/zipkin-go\/reporter\/http\"\n\/\/ \t\t\"go.opencensus.io\/exporter\/trace\/zipkin\"\n\/\/ \t)\n\/\/\t...\n\/\/\t\tlocalEndpoint, err := openzipkin.NewEndpoint(\"server\", \"server:5454\")\n\/\/ \t\tif err != nil {\n\/\/ \t\t\tlog.Print(err)\n\/\/ \t\t}\n\/\/ \t\treporter := http.NewReporter(\"http:\/\/localhost:9411\/api\/v2\/spans\")\n\/\/ \t\texporter := zipkin.NewExporter(reporter, localEndpoint)\n\/\/ \t\ttrace.RegisterExporter(exporter)\npackage zipkin\n\nimport (\n\t\"encoding\/binary\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/openzipkin\/zipkin-go\/model\"\n\t\"github.com\/openzipkin\/zipkin-go\/reporter\"\n\t\"go.opencensus.io\/trace\"\n)\n\n\/\/ Exporter is an implementation of trace.Exporter that uploads spans to a\n\/\/ Zipkin server.\ntype Exporter struct {\n\treporter reporter.Reporter\n\tlocalEndpoint *model.Endpoint\n}\n\n\/\/ NewExporter returns an implementation of trace.Exporter that uploads spans\n\/\/ to a Zipkin server.\n\/\/\n\/\/ reporter is a Zipkin Reporter which will be used to send the spans. These\n\/\/ can be created with the openzipkin library, using one of the packages under\n\/\/ github.com\/openzipkin\/zipkin-go\/reporter.\n\/\/\n\/\/ localEndpoint sets the local endpoint of exported spans. It can be\n\/\/ constructed with github.com\/openzipkin\/zipkin-go.NewEndpoint, e.g.:\n\/\/ \tlocalEndpoint, err := NewEndpoint(\"my server\", listener.Addr().String())\n\/\/ localEndpoint can be nil.\nfunc NewExporter(reporter reporter.Reporter, localEndpoint *model.Endpoint) *Exporter {\n\treturn &Exporter{\n\t\treporter: reporter,\n\t\tlocalEndpoint: localEndpoint,\n\t}\n}\n\n\/\/ Export exports a span to a Zipkin server.\nfunc (e *Exporter) Export(s *trace.SpanData) {\n\te.reporter.Send(zipkinSpan(s, e.localEndpoint))\n}\n\nconst (\n\tstatusCodeTagKey = \"error\"\n\tstatusDescriptionTagKey = \"opencensus.status_description\"\n)\n\nvar (\n\tsampledTrue = true\n\tcanonicalCodes = [...]string{\n\t\t\"OK\",\n\t\t\"CANCELLED\",\n\t\t\"UNKNOWN\",\n\t\t\"INVALID_ARGUMENT\",\n\t\t\"DEADLINE_EXCEEDED\",\n\t\t\"NOT_FOUND\",\n\t\t\"ALREADY_EXISTS\",\n\t\t\"PERMISSION_DENIED\",\n\t\t\"RESOURCE_EXHAUSTED\",\n\t\t\"FAILED_PRECONDITION\",\n\t\t\"ABORTED\",\n\t\t\"OUT_OF_RANGE\",\n\t\t\"UNIMPLEMENTED\",\n\t\t\"INTERNAL\",\n\t\t\"UNAVAILABLE\",\n\t\t\"DATA_LOSS\",\n\t\t\"UNAUTHENTICATED\",\n\t}\n)\n\nfunc canonicalCodeString(code int32) string {\n\tif code < 0 || int(code) >= len(canonicalCodes) {\n\t\treturn \"error code \" + strconv.FormatInt(int64(code), 10)\n\t}\n\treturn canonicalCodes[code]\n}\n\nfunc convertTraceID(t trace.TraceID) model.TraceID {\n\treturn model.TraceID{\n\t\tHigh: binary.BigEndian.Uint64(t[:8]),\n\t\tLow: binary.BigEndian.Uint64(t[8:]),\n\t}\n}\n\nfunc convertSpanID(s trace.SpanID) model.ID {\n\treturn model.ID(binary.BigEndian.Uint64(s[:]))\n}\n\nfunc spanKind(s *trace.SpanData) model.Kind {\n\tif s.HasRemoteParent {\n\t\treturn model.Server\n\t}\n\tif strings.HasPrefix(s.Name, \"Sent.\") {\n\t\treturn model.Client\n\t}\n\tif strings.HasPrefix(s.Name, \"Recv.\") {\n\t\treturn model.Server\n\t}\n\tif len(s.MessageEvents) > 0 {\n\t\tswitch s.MessageEvents[0].EventType {\n\t\tcase trace.MessageEventTypeSent:\n\t\t\treturn model.Client\n\t\tcase trace.MessageEventTypeRecv:\n\t\t\treturn model.Server\n\t\t}\n\t}\n\treturn model.Undetermined\n}\n\nfunc zipkinSpan(s *trace.SpanData, localEndpoint *model.Endpoint) model.SpanModel {\n\tsc := s.SpanContext\n\tz := model.SpanModel{\n\t\tSpanContext: model.SpanContext{\n\t\t\tTraceID: convertTraceID(sc.TraceID),\n\t\t\tID: convertSpanID(sc.SpanID),\n\t\t\tSampled: &sampledTrue,\n\t\t},\n\t\tKind: spanKind(s),\n\t\tName: s.Name,\n\t\tTimestamp: s.StartTime,\n\t\tShared: false,\n\t\tLocalEndpoint: localEndpoint,\n\t}\n\n\tif s.ParentSpanID != (trace.SpanID{}) {\n\t\tid := convertSpanID(s.ParentSpanID)\n\t\tz.ParentID = &id\n\t}\n\n\tif s, e := s.StartTime, s.EndTime; !s.IsZero() && !e.IsZero() {\n\t\tz.Duration = e.Sub(s)\n\t}\n\n\t\/\/ construct Tags from s.Attributes and s.Status.\n\tif len(s.Attributes) != 0 {\n\t\tm := make(map[string]string, len(s.Attributes)+2)\n\t\tfor key, value := range s.Attributes {\n\t\t\tswitch v := value.(type) {\n\t\t\tcase string:\n\t\t\t\tm[key] = v\n\t\t\tcase bool:\n\t\t\t\tif v {\n\t\t\t\t\tm[key] = \"true\"\n\t\t\t\t} else {\n\t\t\t\t\tm[key] = \"false\"\n\t\t\t\t}\n\t\t\tcase int64:\n\t\t\t\tm[key] = strconv.FormatInt(v, 10)\n\t\t\t}\n\t\t}\n\t\tz.Tags = m\n\t}\n\tif s.Status.Code != 0 || s.Status.Message != \"\" {\n\t\tif z.Tags == nil {\n\t\t\tz.Tags = make(map[string]string, 2)\n\t\t}\n\t\tif s.Status.Code != 0 {\n\t\t\tz.Tags[statusCodeTagKey] = canonicalCodeString(s.Status.Code)\n\t\t}\n\t\tif s.Status.Message != \"\" {\n\t\t\tz.Tags[statusDescriptionTagKey] = s.Status.Message\n\t\t}\n\t}\n\n\t\/\/ construct Annotations from s.Annotations and s.MessageEvents.\n\tif len(s.Annotations) != 0 || len(s.MessageEvents) != 0 {\n\t\tz.Annotations = make([]model.Annotation, 0, len(s.Annotations)+len(s.MessageEvents))\n\t\tfor _, a := range s.Annotations {\n\t\t\tz.Annotations = append(z.Annotations, model.Annotation{\n\t\t\t\tTimestamp: a.Time,\n\t\t\t\tValue: a.Message,\n\t\t\t})\n\t\t}\n\t\tfor _, m := range s.MessageEvents {\n\t\t\ta := model.Annotation{\n\t\t\t\tTimestamp: m.Time,\n\t\t\t}\n\t\t\tswitch m.EventType {\n\t\t\tcase trace.MessageEventTypeSent:\n\t\t\t\ta.Value = \"SENT\"\n\t\t\tcase trace.MessageEventTypeRecv:\n\t\t\t\ta.Value = \"RECV\"\n\t\t\tdefault:\n\t\t\t\ta.Value = \"<?>\"\n\t\t\t}\n\t\t\tz.Annotations = append(z.Annotations, a)\n\t\t}\n\t}\n\n\treturn z\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n)\n\nfunc main() {\n\tif err := RootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(-1)\n\t}\n}\n<commit_msg>Use log.Fatalf<commit_after>package main\n\nimport (\n\t\"log\"\n)\n\nfunc main() {\n\tif err := RootCmd.Execute(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>remove sha1 field<commit_after><|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"bytes\"\n\t\"database\/sql\"\n\t\"image\/png\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/mdlayher\/wavepipe\/data\"\n\n\t\"github.com\/gorilla\/context\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/mdlayher\/waveform\"\n\t\"github.com\/unrolled\/render\"\n)\n\n\/\/ waveformCache stores encoded waveform images in-memory, for re-use\n\/\/ through multiple HTTP calls\nvar waveformCache = map[int][]byte{}\n\n\/\/ GetWaveform generates and returns a waveform image from wavepipe. On success, this API will\n\/\/ return a binary stream. On failure, it will return a JSON error.\nfunc GetWaveform(w http.ResponseWriter, r *http.Request) {\n\t\/\/ Retrieve render\n\tren := context.Get(r, CtxRender).(*render.Render)\n\n\t\/\/ Check API version\n\tif version, ok := mux.Vars(r)[\"version\"]; ok {\n\t\t\/\/ Check if this API call is supported in the advertised version\n\t\tif !apiVersionSet.Has(version) {\n\t\t\tren.JSON(w, 400, errRes(400, \"unsupported API version: \"+version))\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Check for an ID parameter\n\tpID, ok := mux.Vars(r)[\"id\"]\n\tif !ok {\n\t\tren.JSON(w, 400, errRes(400, \"no integer song ID provided\"))\n\t\treturn\n\t}\n\n\t\/\/ Verify valid integer ID\n\tid, err := strconv.Atoi(pID)\n\tif err != nil {\n\t\tren.JSON(w, 400, errRes(400, \"invalid integer song ID\"))\n\t\treturn\n\t}\n\n\t\/\/ Attempt to load the song with matching ID\n\tsong := &data.Song{ID: id}\n\tif err := song.Load(); err != nil {\n\t\t\/\/ Check for invalid ID\n\t\tif err == sql.ErrNoRows {\n\t\t\tren.JSON(w, 404, errRes(404, \"song ID not found\"))\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ All other errors\n\t\tlog.Println(err)\n\t\tren.JSON(w, 500, serverErr)\n\t\treturn\n\t}\n\n\t\/\/ Check for a cached waveform\n\tif _, ok := waveformCache[id]; ok {\n\t\t\/\/ Send cached data to HTTP writer\n\t\tif _, err := io.Copy(w, bytes.NewReader(waveformCache[id])); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\treturn\n\t}\n\n\t\/\/ Open song's backing stream\n\tstream, err := song.Stream()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tren.JSON(w, 500, serverErr)\n\t\treturn\n\t}\n\n\t\/\/ Generate a waveform from this song\n\timg, err := waveform.New(stream, &waveform.Options{\n\t\tScaleX: 2,\n\t\tScaleY: 2,\n\t\tResolution: 2,\n\t\tScaleRMS: true,\n\t\tSharpness: 1,\n\t})\n\tif err != nil {\n\t\t\/\/ If unknown format, return JSON error\n\t\tif err == waveform.ErrFormat {\n\t\t\tren.JSON(w, 501, errRes(501, \"unsupported audio format\"))\n\t\t\treturn\n\t\t}\n\n\t\tlog.Println(err)\n\t\tren.JSON(w, 500, serverErr)\n\t\treturn\n\t}\n\n\t\/\/ Encode as PNG into buffer\n\tbuf := bytes.NewBuffer(nil)\n\tif err := png.Encode(buf, img); err != nil {\n\t\tlog.Println(err)\n\t}\n\n\t\/\/ Store cached image\n\twaveformCache[id] = buf.Bytes()\n\n\t\/\/ Send over HTTP\n\tif _, err := io.Copy(w, buf); err != nil {\n\t\tlog.Println(err)\n\t}\n}\n<commit_msg>api\/waveform: limit waveform cache to 10 entries<commit_after>package api\n\nimport (\n\t\"bytes\"\n\t\"database\/sql\"\n\t\"image\/png\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/mdlayher\/wavepipe\/data\"\n\n\t\"github.com\/gorilla\/context\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/mdlayher\/waveform\"\n\t\"github.com\/unrolled\/render\"\n)\n\n\/\/ waveformCache stores encoded waveform images in-memory, for re-use\n\/\/ through multiple HTTP calls\nvar waveformCache = map[int][]byte{}\n\n\/\/ waveformList tracks insertion order for cached waveforms, and enables the removal\n\/\/ of the oldest waveform once a threshold is reached\nvar waveformList = []int{}\n\n\/\/ GetWaveform generates and returns a waveform image from wavepipe. On success, this API will\n\/\/ return a binary stream. On failure, it will return a JSON error.\nfunc GetWaveform(w http.ResponseWriter, r *http.Request) {\n\t\/\/ Retrieve render\n\tren := context.Get(r, CtxRender).(*render.Render)\n\n\t\/\/ Check API version\n\tif version, ok := mux.Vars(r)[\"version\"]; ok {\n\t\t\/\/ Check if this API call is supported in the advertised version\n\t\tif !apiVersionSet.Has(version) {\n\t\t\tren.JSON(w, 400, errRes(400, \"unsupported API version: \"+version))\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Check for an ID parameter\n\tpID, ok := mux.Vars(r)[\"id\"]\n\tif !ok {\n\t\tren.JSON(w, 400, errRes(400, \"no integer song ID provided\"))\n\t\treturn\n\t}\n\n\t\/\/ Verify valid integer ID\n\tid, err := strconv.Atoi(pID)\n\tif err != nil {\n\t\tren.JSON(w, 400, errRes(400, \"invalid integer song ID\"))\n\t\treturn\n\t}\n\n\t\/\/ Attempt to load the song with matching ID\n\tsong := &data.Song{ID: id}\n\tif err := song.Load(); err != nil {\n\t\t\/\/ Check for invalid ID\n\t\tif err == sql.ErrNoRows {\n\t\t\tren.JSON(w, 404, errRes(404, \"song ID not found\"))\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ All other errors\n\t\tlog.Println(err)\n\t\tren.JSON(w, 500, serverErr)\n\t\treturn\n\t}\n\n\t\/\/ Check for a cached waveform\n\tif _, ok := waveformCache[id]; ok {\n\t\t\/\/ Send cached data to HTTP writer\n\t\tif _, err := io.Copy(w, bytes.NewReader(waveformCache[id])); err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\treturn\n\t}\n\n\t\/\/ Open song's backing stream\n\tstream, err := song.Stream()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tren.JSON(w, 500, serverErr)\n\t\treturn\n\t}\n\n\t\/\/ Generate a waveform from this song\n\timg, err := waveform.New(stream, &waveform.Options{\n\t\tScaleX: 2,\n\t\tScaleY: 2,\n\t\tResolution: 2,\n\t\tScaleRMS: true,\n\t\tSharpness: 1,\n\t})\n\tif err != nil {\n\t\t\/\/ If unknown format, return JSON error\n\t\tif err == waveform.ErrFormat {\n\t\t\tren.JSON(w, 501, errRes(501, \"unsupported audio format\"))\n\t\t\treturn\n\t\t}\n\n\t\tlog.Println(err)\n\t\tren.JSON(w, 500, serverErr)\n\t\treturn\n\t}\n\n\t\/\/ Encode as PNG into buffer\n\tbuf := bytes.NewBuffer(nil)\n\tif err := png.Encode(buf, img); err != nil {\n\t\tlog.Println(err)\n\t}\n\n\t\/\/ Store cached image, append to cache list\n\twaveformCache[id] = buf.Bytes()\n\twaveformList = append(waveformList, id)\n\n\t\/\/ If threshold reached, remove oldest waveform from cache\n\tif len(waveformList) > 2 {\n\t\toldest := waveformList[0]\n\t\twaveformList = waveformList[1:]\n\t\tdelete(waveformCache, oldest)\n\t}\n\n\t\/\/ Send over HTTP\n\tif _, err := io.Copy(w, buf); err != nil {\n\t\tlog.Println(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>getAllCategoriesExcept works<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Andreas Koch. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage video\n\nimport (\n\t\"fmt\"\n\t\"github.com\/andreaskoch\/allmark2\/common\/paths\"\n\t\"github.com\/andreaskoch\/allmark2\/model\"\n\t\"github.com\/andreaskoch\/allmark2\/services\/converter\/markdowntohtml\/pattern\"\n\t\"github.com\/andreaskoch\/allmark2\/services\/converter\/markdowntohtml\/util\"\n\t\"mime\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar (\n\t\/\/ video: [*description text*](*a link to a youtube video or to a video file*)\n\tmarkdownPattern = regexp.MustCompile(`video: \\[([^\\]]+)\\]\\(([^)]+)\\)`)\n\n\t\/\/ youtube video link pattern\n\tyouTubeVideoPattern = regexp.MustCompile(`http[s]?:\/\/www\\.youtube\\.com\/watch\\?v=([^&]+)`)\n\n\t\/\/ vimeo video link pattern\n\tvimeoVideoPattern = regexp.MustCompile(`http[s]?:\/\/vimeo\\.com\/([\\d]+)`)\n)\n\nfunc New(pathProvider paths.Pather, files []*model.File) *VideoExtension {\n\treturn &VideoExtension{\n\t\tpathProvider: pathProvider,\n\t\tfiles: files,\n\t}\n}\n\ntype VideoExtension struct {\n\tpathProvider paths.Pather\n\tfiles []*model.File\n}\n\nfunc (converter *VideoExtension) Convert(markdown string) (convertedContent string, converterError error) {\n\n\tconvertedContent = markdown\n\n\tfor {\n\n\t\tfound, matches := pattern.IsMatch(convertedContent, markdownPattern)\n\t\tif !found || (found && len(matches) != 3) {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ parameters\n\t\toriginalText := strings.TrimSpace(matches[0])\n\t\ttitle := strings.TrimSpace(matches[1])\n\t\tpath := strings.TrimSpace(matches[2])\n\n\t\t\/\/ get the code\n\t\trenderedCode := converter.getVideoCode(title, path)\n\n\t\t\/\/ replace markdown\n\t\tconvertedContent = strings.Replace(convertedContent, originalText, renderedCode, 1)\n\n\t}\n\n\treturn convertedContent, nil\n}\n\nfunc (converter *VideoExtension) getMatchingFile(path string) *model.File {\n\tfor _, file := range converter.files {\n\t\tif file.Route().IsMatch(path) && util.IsVideoFile(file) {\n\t\t\treturn file\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (converter *VideoExtension) getVideoCode(title, path string) string {\n\n\tfallback := util.GetHtmlLinkCode(title, path)\n\n\t\/\/ internal video file\n\tif util.IsInternalLink(path) {\n\n\t\tif videoFile := converter.getMatchingFile(path); videoFile != nil {\n\n\t\t\tif mimeType, err := util.GetMimeType(videoFile); err == nil {\n\t\t\t\tfilepath := converter.pathProvider.Path(videoFile.Route().Value())\n\t\t\t\treturn renderVideoFileLink(title, filepath, mimeType)\n\t\t\t}\n\n\t\t}\n\n\t} else {\n\n\t\t\/\/ external: youtube\n\t\tif isYouTube, videoId := isYouTubeLink(path); isYouTube {\n\t\t\treturn renderYouTubeVideo(title, videoId)\n\t\t}\n\n\t\t\/\/ external: vimeo\n\t\tif isVimeo, videoId := isVimeoLink(path); isVimeo {\n\t\t\treturn renderVimeoVideo(title, videoId)\n\t\t}\n\n\t\t\/\/ external: html5 video file\n\t\tif isVideoFile, mimeType := isVideoFileLink(path); isVideoFile {\n\t\t\treturn renderVideoFileLink(title, path, mimeType)\n\t\t}\n\n\t}\n\n\t\/\/ return the fallback handler\n\treturn fallback\n}\n\nfunc isYouTubeLink(link string) (isYouTubeLink bool, videoId string) {\n\tif found, matches := pattern.IsMatch(link, youTubeVideoPattern); found && len(matches) == 2 {\n\t\treturn true, matches[1]\n\t}\n\n\treturn false, \"\"\n}\n\nfunc renderYouTubeVideo(title, videoId string) string {\n\treturn fmt.Sprintf(`<section class=\"video video-external video-youtube\">\n\t\t<header><a href=\"\/\/www.youtube.com\/watch?v=%s\" target=\"_blank\" title=\"%s\">%s<\/a><\/header>\n\t\t<iframe width=\"560\" height=\"315\" src=\"\/\/www.youtube.com\/embed\/%s\" frameborder=\"0\" allowfullscreen><\/iframe>\n\t<\/section>`, videoId, title, title, videoId)\n}\n\nfunc isVimeoLink(link string) (isVimeoLink bool, videoId string) {\n\tif found, matches := pattern.IsMatch(link, vimeoVideoPattern); found && len(matches) == 2 {\n\t\treturn true, matches[1]\n\t}\n\n\treturn false, \"\"\n}\n\nfunc renderVimeoVideo(title, videoId string) string {\n\treturn fmt.Sprintf(`<section class=\"video video-external video-vimeo\">\n\t\t<header><a href=\"\/\/vimeo.com\/%s\" target=\"_blank\" title=\"%s\">%s<\/a><\/header>\n\t\t<iframe src=\"\/\/player.vimeo.com\/video\/%s\" width=\"560\" height=\"315\" frameborder=\"0\" webkitAllowFullScreen mozallowfullscreen allowFullScreen><\/iframe>\n\t<\/section>`, videoId, title, title, videoId)\n}\n\nfunc isVideoFileLink(link string) (isVideoFile bool, mimeType string) {\n\n\t\/\/ abort if the link does not contain a dot\n\tif !strings.Contains(link, \".\") {\n\t\treturn false, \"\"\n\t}\n\n\tnormalizedLink := strings.ToLower(link)\n\tfileExtension := normalizedLink[strings.LastIndex(normalizedLink, \".\"):]\n\tmimeType = mime.TypeByExtension(fileExtension)\n\n\tswitch fileExtension {\n\tcase \".mp4\", \".ogg\", \".ogv\", \".webm\", \".3gp\":\n\t\treturn true, mimeType\n\tdefault:\n\t\treturn false, \"\"\n\t}\n\n\tpanic(\"Unreachable\")\n}\n\nfunc renderVideoFileLink(title, link, mimetype string) string {\n\treturn fmt.Sprintf(`<section class=\"video video-file\">\n\t\t<header><a href=\"%s\" target=\"_blank\" title=\"%s\">%s<\/a><\/header>\n\t\t<video width=\"560\" height=\"315\" controls>\n\t\t\t<source src=\"%s\" type=\"%s\">\n\t\t<\/video>\n\t<\/section>`, link, title, title, link, mimetype)\n}\n<commit_msg>Always embed youtube and video links via https<commit_after>\/\/ Copyright 2014 Andreas Koch. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage video\n\nimport (\n\t\"fmt\"\n\t\"github.com\/andreaskoch\/allmark2\/common\/paths\"\n\t\"github.com\/andreaskoch\/allmark2\/model\"\n\t\"github.com\/andreaskoch\/allmark2\/services\/converter\/markdowntohtml\/pattern\"\n\t\"github.com\/andreaskoch\/allmark2\/services\/converter\/markdowntohtml\/util\"\n\t\"mime\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar (\n\t\/\/ video: [*description text*](*a link to a youtube video or to a video file*)\n\tmarkdownPattern = regexp.MustCompile(`video: \\[([^\\]]+)\\]\\(([^)]+)\\)`)\n\n\t\/\/ youtube video link pattern\n\tyouTubeVideoPattern = regexp.MustCompile(`http[s]?:\/\/www\\.youtube\\.com\/watch\\?v=([^&]+)`)\n\n\t\/\/ vimeo video link pattern\n\tvimeoVideoPattern = regexp.MustCompile(`http[s]?:\/\/vimeo\\.com\/([\\d]+)`)\n)\n\nfunc New(pathProvider paths.Pather, files []*model.File) *VideoExtension {\n\treturn &VideoExtension{\n\t\tpathProvider: pathProvider,\n\t\tfiles: files,\n\t}\n}\n\ntype VideoExtension struct {\n\tpathProvider paths.Pather\n\tfiles []*model.File\n}\n\nfunc (converter *VideoExtension) Convert(markdown string) (convertedContent string, converterError error) {\n\n\tconvertedContent = markdown\n\n\tfor {\n\n\t\tfound, matches := pattern.IsMatch(convertedContent, markdownPattern)\n\t\tif !found || (found && len(matches) != 3) {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ parameters\n\t\toriginalText := strings.TrimSpace(matches[0])\n\t\ttitle := strings.TrimSpace(matches[1])\n\t\tpath := strings.TrimSpace(matches[2])\n\n\t\t\/\/ get the code\n\t\trenderedCode := converter.getVideoCode(title, path)\n\n\t\t\/\/ replace markdown\n\t\tconvertedContent = strings.Replace(convertedContent, originalText, renderedCode, 1)\n\n\t}\n\n\treturn convertedContent, nil\n}\n\nfunc (converter *VideoExtension) getMatchingFile(path string) *model.File {\n\tfor _, file := range converter.files {\n\t\tif file.Route().IsMatch(path) && util.IsVideoFile(file) {\n\t\t\treturn file\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (converter *VideoExtension) getVideoCode(title, path string) string {\n\n\tfallback := util.GetHtmlLinkCode(title, path)\n\n\t\/\/ internal video file\n\tif util.IsInternalLink(path) {\n\n\t\tif videoFile := converter.getMatchingFile(path); videoFile != nil {\n\n\t\t\tif mimeType, err := util.GetMimeType(videoFile); err == nil {\n\t\t\t\tfilepath := converter.pathProvider.Path(videoFile.Route().Value())\n\t\t\t\treturn renderVideoFileLink(title, filepath, mimeType)\n\t\t\t}\n\n\t\t}\n\n\t} else {\n\n\t\t\/\/ external: youtube\n\t\tif isYouTube, videoId := isYouTubeLink(path); isYouTube {\n\t\t\treturn renderYouTubeVideo(title, videoId)\n\t\t}\n\n\t\t\/\/ external: vimeo\n\t\tif isVimeo, videoId := isVimeoLink(path); isVimeo {\n\t\t\treturn renderVimeoVideo(title, videoId)\n\t\t}\n\n\t\t\/\/ external: html5 video file\n\t\tif isVideoFile, mimeType := isVideoFileLink(path); isVideoFile {\n\t\t\treturn renderVideoFileLink(title, path, mimeType)\n\t\t}\n\n\t}\n\n\t\/\/ return the fallback handler\n\treturn fallback\n}\n\nfunc isYouTubeLink(link string) (isYouTubeLink bool, videoId string) {\n\tif found, matches := pattern.IsMatch(link, youTubeVideoPattern); found && len(matches) == 2 {\n\t\treturn true, matches[1]\n\t}\n\n\treturn false, \"\"\n}\n\nfunc renderYouTubeVideo(title, videoId string) string {\n\treturn fmt.Sprintf(`<section class=\"video video-external video-youtube\">\n\t\t<header><a href=\"https:\/\/www.youtube.com\/watch?v=%s\" target=\"_blank\" title=\"%s\">%s<\/a><\/header>\n\t\t<iframe width=\"560\" height=\"315\" src=\"https:\/\/www.youtube.com\/embed\/%s\" frameborder=\"0\" allowfullscreen><\/iframe>\n\t<\/section>`, videoId, title, title, videoId)\n}\n\nfunc isVimeoLink(link string) (isVimeoLink bool, videoId string) {\n\tif found, matches := pattern.IsMatch(link, vimeoVideoPattern); found && len(matches) == 2 {\n\t\treturn true, matches[1]\n\t}\n\n\treturn false, \"\"\n}\n\nfunc renderVimeoVideo(title, videoId string) string {\n\treturn fmt.Sprintf(`<section class=\"video video-external video-vimeo\">\n\t\t<header><a href=\"https:\/\/vimeo.com\/%s\" target=\"_blank\" title=\"%s\">%s<\/a><\/header>\n\t\t<iframe src=\"https:\/\/player.vimeo.com\/video\/%s\" width=\"560\" height=\"315\" frameborder=\"0\" webkitAllowFullScreen mozallowfullscreen allowFullScreen><\/iframe>\n\t<\/section>`, videoId, title, title, videoId)\n}\n\nfunc isVideoFileLink(link string) (isVideoFile bool, mimeType string) {\n\n\t\/\/ abort if the link does not contain a dot\n\tif !strings.Contains(link, \".\") {\n\t\treturn false, \"\"\n\t}\n\n\tnormalizedLink := strings.ToLower(link)\n\tfileExtension := normalizedLink[strings.LastIndex(normalizedLink, \".\"):]\n\tmimeType = mime.TypeByExtension(fileExtension)\n\n\tswitch fileExtension {\n\tcase \".mp4\", \".ogg\", \".ogv\", \".webm\", \".3gp\":\n\t\treturn true, mimeType\n\tdefault:\n\t\treturn false, \"\"\n\t}\n\n\tpanic(\"Unreachable\")\n}\n\nfunc renderVideoFileLink(title, link, mimetype string) string {\n\treturn fmt.Sprintf(`<section class=\"video video-file\">\n\t\t<header><a href=\"%s\" target=\"_blank\" title=\"%s\">%s<\/a><\/header>\n\t\t<video width=\"560\" height=\"315\" controls>\n\t\t\t<source src=\"%s\" type=\"%s\">\n\t\t<\/video>\n\t<\/section>`, link, title, title, link, mimetype)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport \"fmt\"\nfunc fib(n int) int {\n if n == 0 {\n return 0\n }\n if n == 1 {\n return 1\n }\n return fib(n-1)+fib(n-2)\n}\nfunc main() {\n fmt.Print(\"Введите число: \")\n var n int\n fmt.Scanf(\"%d\", &n)\n fmt.Println(fib(n))\n }\n<commit_msg>телефонная книга<commit_after>package main\n\nimport \"fmt\"\n\nfunc main() {\n i:= 0\n fmt.Print(\n \"1. Показать книгу\"\n \"2. Добавить контакт\"\n \"3.Удалить контакт\"\n \"4. Найти контакт\"\n )\n fmt.Scanf(\"%i\",&i)\n switch i{\n case 1: \n }\n \n var name string\n fmt.Print(\"Введите имя: \")\n fmt.Scanf(\"%s\", &name)\n var number string\n fmt.Print(\"Введите телефон: \")\n fmt.Scanf(\"%s\", &number)\n entry := map[string]string{\n name: number,\n }\n \n for i, v:= range entry{\n fmt.Println( i,v)\n }\n \n}<|endoftext|>"} {"text":"<commit_before>package maven\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\/exec\"\n)\n\nfunc Clean(localCache, projectDirectory string) ([]byte, error) {\n\t\/\/ fmt.Printf(\"in clean function.\\n\") \/\/ debug\n\n\t\/\/ run the clean command for the specified\n\t\/\/ project directory\n\tcleanCommand := exec.Command(\n\t\t\"mvn\",\n\t\t\"clean\",\n\t\t\"-f\",\n\t\tprojectDirectory,\n\t)\n\n\t\/\/ add localcache option flag\n\tif localCache != \"\" {\n\t\tmavenOpts := fmt.Sprintf(\n\t\t\t\"-Dmaven.repo.local=%s\", localCache)\n\t\t\/\/ fmt.Printf(mavenOpts) \/\/ debug\n\t\tcleanCommand = exec.Command(\n\t\t\t\"mvn\",\n\t\t\t\"clean\",\n\t\t\t\"-f\",\n\t\t\tprojectDirectory,\n\t\t\tmavenOpts,\n\t\t)\n\t}\n\toutput, err := cleanCommand.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = cleanCommand.Start()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\toutputBytes, err := ioutil.ReadAll(output)\n\tif err != nil {\n\t\tfmt.Printf(\"err:\\n%v\\n\", err)\n\t\treturn nil, err\n\t}\n\n\tcleanCommand.Wait()\n\n\treturn outputBytes, nil\n}\n<commit_msg>remove clean command.<commit_after><|endoftext|>"} {"text":"<commit_before>package mcmap\n\nimport (\n\t\"errors\"\n\t\"github.com\/kch42\/gonbt\/nbt\"\n\t\"time\"\n)\n\nfunc calcBlockOffset(x, y, z int) int {\n\tif (x < 0) || (y < 0) || (z < 0) || (x >= ChunkSizeXZ) || (y >= ChunkSizeY) || (z >= ChunkSizeXZ) {\n\t\treturn -1\n\t}\n\n\treturn x | (z << 4) | (y << 8)\n}\n\nfunc offsetToPos(off int) (x, y, z int) {\n\tx = off & 0xf\n\tz = (off >> 4) & 0xf\n\ty = (off >> 8) & 0xff\n\treturn\n}\n\n\/\/ BlockToChunk calculates the chunk (cx, cz) and the block position in this chunk(rbx, rbz) of a block position given global coordinates.\nfunc BlockToChunk(bx, bz int) (cx, cz, rbx, rbz int) {\n\tcx = bx << 4\n\tcz = bz << 4\n\trbx = ((cx % ChunkSizeXZ) + ChunkSizeXZ) % ChunkSizeXZ\n\trbz = ((cz % ChunkSizeXZ) + ChunkSizeXZ) % ChunkSizeXZ\n\treturn\n}\n\n\/\/ ChunkToBlock calculates the global position of a block, given the chunk position (cx, cz) and the plock position in that chunk (rbx, rbz).\nfunc ChunkToBlock(cx, cz, rbx, rbz int) (bx, bz int) {\n\tbx = cx*ChunkSizeXZ + rbx\n\tbz = cz*ChunkSizeXZ + rbz\n\treturn\n}\n\n\/\/ Chunk represents a 16*16*256 Chunk of the region.\ntype Chunk struct {\n\tEntities []nbt.TagCompound\n\n\tx, z int32\n\n\tlastUpdate int64\n\tpopulated bool\n\tinhabitedTime int64\n\tts time.Time\n\n\theightMap []int32 \/\/ Ordered ZX\n\n\tmodified bool\n\tblocks []Block \/\/ Ordered YZX\n\tbiomes []Biome \/\/ Ordered ZX\n\n\treg *Region\n}\n\n\/\/ MarkModified needs to be called, if some data of the chunk was modified.\nfunc (c *Chunk) MarkModified() { c.modified = true }\n\n\/\/ Coords returns the Chunk's coordinates.\nfunc (c *Chunk) Coords() (X, Z int32) { return c.x, c.z }\n\n\/\/ Block gives you a reference to the Block located at x, y, z. If you modify the block data, you need to call the MarkModified() function of the chunk.\n\/\/\n\/\/ x and z must be in [0, 15], y in [0, 255]. Otherwise a nil pointer is returned.\nfunc (c *Chunk) Block(x, y, z int) *Block {\n\toff := calcBlockOffset(x, y, z)\n\tif off < 0 {\n\t\treturn nil\n\t}\n\n\treturn &(c.blocks[off])\n}\n\n\/\/ Height returns the height at x, z.\n\/\/\n\/\/ x and z must be in [0, 15]. Height will panic, if this is violated!\nfunc (c *Chunk) Height(x, z int) int {\n\tif (x < 0) || (x >= ChunkSizeXZ) || (z < 0) || (z >= ChunkSizeXZ) {\n\t\tpanic(errors.New(\"x or z parameter was out of range\"))\n\t}\n\n\treturn int(c.heightMap[z*ChunkSizeXZ+x])\n}\n\n\/\/ Iter iterates ofer all blocks of this chunk and calls the function fx with the coords (x,y,z) and a pointer to the block.\nfunc (c *Chunk) Iter(fx func(int, int, int, *Block)) {\n\tfor x := 0; x < ChunkSizeXZ; x++ {\n\t\tfor y := 0; y < ChunkSizeY; y++ {\n\t\t\tfor z := 0; z < ChunkSizeXZ; z++ {\n\t\t\t\tfx(x, y, z, &(c.blocks[calcBlockOffset(x, y, z)]))\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Biome gets the Biome at x,z.\nfunc (c *Chunk) Biome(x, z int) Biome { return c.biomes[z*ChunkSizeXZ+x] }\n\n\/\/ SetBiome sets the biome at x,z.\nfunc (c *Chunk) SetBiome(x, z int, bio Biome) { c.biomes[z*ChunkSizeXZ+x] = bio }\n\n\/\/ MarkUnused marks the chunk as unused. If all chunks of a superchunk are marked as unused, the superchunk will be unloaded and saved (if needed).\n\/\/\n\/\/ You must not use the chunk any longer, after you called this function.\n\/\/\n\/\/ If the chunk was modified, call MarkModified BEFORE.\nfunc (c *Chunk) MarkUnused() error { return c.reg.unloadChunk(int(c.x), int(c.z)) }\n\n\/\/ RecalcHeightMap recalculates the internal height map.\n\/\/\n\/\/ You should use this function before marking the chunk as unused, if you modified the chunk\n\/\/ (unless you know, your changes wouldn't affect the height map).\nfunc (c *Chunk) RecalcHeightMap() {\n\ti := 0\n\tfor z := 0; z < ChunkSizeXZ; z++ {\n\t\tfor x := 0; x < ChunkSizeXZ; x++ {\n\t\t\tfor y := ChunkSizeY; y >= 0; y-- {\n\t\t\t\tblkid := c.blocks[calcBlockOffset(x, y, z)].ID\n\t\t\t\tif (blkid != BlkAir) && (blkid != BlkGlass) && (blkid != BlkGlassPane) {\n\t\t\t\t\tc.heightMap[i] = int32(y)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t}\n}\n<commit_msg>Fixed BlockToChunk<commit_after>package mcmap\n\nimport (\n\t\"errors\"\n\t\"github.com\/kch42\/gonbt\/nbt\"\n\t\"time\"\n)\n\nfunc calcBlockOffset(x, y, z int) int {\n\tif (x < 0) || (y < 0) || (z < 0) || (x >= ChunkSizeXZ) || (y >= ChunkSizeY) || (z >= ChunkSizeXZ) {\n\t\treturn -1\n\t}\n\n\treturn x | (z << 4) | (y << 8)\n}\n\nfunc offsetToPos(off int) (x, y, z int) {\n\tx = off & 0xf\n\tz = (off >> 4) & 0xf\n\ty = (off >> 8) & 0xff\n\treturn\n}\n\n\/\/ BlockToChunk calculates the chunk (cx, cz) and the block position in this chunk(rbx, rbz) of a block position given global coordinates.\nfunc BlockToChunk(bx, bz int) (cx, cz, rbx, rbz int) {\n\tcx = bx >> 4\n\tcz = bz >> 4\n\trbx = ((cx % ChunkSizeXZ) + ChunkSizeXZ) % ChunkSizeXZ\n\trbz = ((cz % ChunkSizeXZ) + ChunkSizeXZ) % ChunkSizeXZ\n\treturn\n}\n\n\/\/ ChunkToBlock calculates the global position of a block, given the chunk position (cx, cz) and the plock position in that chunk (rbx, rbz).\nfunc ChunkToBlock(cx, cz, rbx, rbz int) (bx, bz int) {\n\tbx = cx*ChunkSizeXZ + rbx\n\tbz = cz*ChunkSizeXZ + rbz\n\treturn\n}\n\n\/\/ Chunk represents a 16*16*256 Chunk of the region.\ntype Chunk struct {\n\tEntities []nbt.TagCompound\n\n\tx, z int32\n\n\tlastUpdate int64\n\tpopulated bool\n\tinhabitedTime int64\n\tts time.Time\n\n\theightMap []int32 \/\/ Ordered ZX\n\n\tmodified bool\n\tblocks []Block \/\/ Ordered YZX\n\tbiomes []Biome \/\/ Ordered ZX\n\n\treg *Region\n}\n\n\/\/ MarkModified needs to be called, if some data of the chunk was modified.\nfunc (c *Chunk) MarkModified() { c.modified = true }\n\n\/\/ Coords returns the Chunk's coordinates.\nfunc (c *Chunk) Coords() (X, Z int32) { return c.x, c.z }\n\n\/\/ Block gives you a reference to the Block located at x, y, z. If you modify the block data, you need to call the MarkModified() function of the chunk.\n\/\/\n\/\/ x and z must be in [0, 15], y in [0, 255]. Otherwise a nil pointer is returned.\nfunc (c *Chunk) Block(x, y, z int) *Block {\n\toff := calcBlockOffset(x, y, z)\n\tif off < 0 {\n\t\treturn nil\n\t}\n\n\treturn &(c.blocks[off])\n}\n\n\/\/ Height returns the height at x, z.\n\/\/\n\/\/ x and z must be in [0, 15]. Height will panic, if this is violated!\nfunc (c *Chunk) Height(x, z int) int {\n\tif (x < 0) || (x >= ChunkSizeXZ) || (z < 0) || (z >= ChunkSizeXZ) {\n\t\tpanic(errors.New(\"x or z parameter was out of range\"))\n\t}\n\n\treturn int(c.heightMap[z*ChunkSizeXZ+x])\n}\n\n\/\/ Iter iterates ofer all blocks of this chunk and calls the function fx with the coords (x,y,z) and a pointer to the block.\nfunc (c *Chunk) Iter(fx func(int, int, int, *Block)) {\n\tfor x := 0; x < ChunkSizeXZ; x++ {\n\t\tfor y := 0; y < ChunkSizeY; y++ {\n\t\t\tfor z := 0; z < ChunkSizeXZ; z++ {\n\t\t\t\tfx(x, y, z, &(c.blocks[calcBlockOffset(x, y, z)]))\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Biome gets the Biome at x,z.\nfunc (c *Chunk) Biome(x, z int) Biome { return c.biomes[z*ChunkSizeXZ+x] }\n\n\/\/ SetBiome sets the biome at x,z.\nfunc (c *Chunk) SetBiome(x, z int, bio Biome) { c.biomes[z*ChunkSizeXZ+x] = bio }\n\n\/\/ MarkUnused marks the chunk as unused. If all chunks of a superchunk are marked as unused, the superchunk will be unloaded and saved (if needed).\n\/\/\n\/\/ You must not use the chunk any longer, after you called this function.\n\/\/\n\/\/ If the chunk was modified, call MarkModified BEFORE.\nfunc (c *Chunk) MarkUnused() error { return c.reg.unloadChunk(int(c.x), int(c.z)) }\n\n\/\/ RecalcHeightMap recalculates the internal height map.\n\/\/\n\/\/ You should use this function before marking the chunk as unused, if you modified the chunk\n\/\/ (unless you know, your changes wouldn't affect the height map).\nfunc (c *Chunk) RecalcHeightMap() {\n\ti := 0\n\tfor z := 0; z < ChunkSizeXZ; z++ {\n\t\tfor x := 0; x < ChunkSizeXZ; x++ {\n\t\t\tfor y := ChunkSizeY; y >= 0; y-- {\n\t\t\t\tblkid := c.blocks[calcBlockOffset(x, y, z)].ID\n\t\t\t\tif (blkid != BlkAir) && (blkid != BlkGlass) && (blkid != BlkGlassPane) {\n\t\t\t\t\tc.heightMap[i] = int32(y)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package veneur\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/DataDog\/datadog-go\/statsd\"\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/stripe\/veneur\/samplers\"\n\t\"github.com\/stripe\/veneur\/trace\"\n)\n\ntype metricSink interface {\n\tName() string\n\tFlush(context.Context, []samplers.InterMetric) error\n\t\/\/ This one is temporary?\n\tFlushEventsChecks(ctx context.Context, events []samplers.UDPEvent, checks []samplers.UDPServiceCheck)\n}\n\ntype datadogMetricSink struct {\n\tHTTPClient *http.Client\n\tddHostname string\n\thostname string\n\tapiKey string\n\tflushMaxPerBody int\n\tstatsd *statsd.Client\n\ttags []string\n\tinterval float64\n}\n\n\/\/ NewDatadogMetricSink creates a new Datadog sink for trace spans.\nfunc NewDatadogMetricSink(config *Config, interval float64, httpClient *http.Client, stats *statsd.Client) (*datadogMetricSink, error) {\n\treturn &datadogMetricSink{\n\t\tHTTPClient: httpClient,\n\t\tstatsd: stats,\n\t\tinterval: interval,\n\t\tflushMaxPerBody: config.FlushMaxPerBody,\n\t\tddHostname: config.DatadogAPIHostname,\n\t\tapiKey: config.DatadogAPIKey,\n\t}, nil\n}\n\n\/\/ Name returns the name of this sink.\nfunc (dd *datadogMetricSink) Name() string {\n\treturn \"datadog\"\n}\n\nfunc (dd *datadogMetricSink) Flush(ctx context.Context, interMetrics []samplers.InterMetric) error {\n\tspan, _ := trace.StartSpanFromContext(ctx, \"\")\n\tdefer span.Finish()\n\n\tmetrics := dd.finalizeMetrics(interMetrics)\n\n\t\/\/ break the metrics into chunks of approximately equal size, such that\n\t\/\/ each chunk is less than the limit\n\t\/\/ we compute the chunks using rounding-up integer division\n\tworkers := ((len(metrics) - 1) \/ dd.flushMaxPerBody) + 1\n\tchunkSize := ((len(metrics) - 1) \/ workers) + 1\n\tlog.WithField(\"workers\", workers).Debug(\"Worker count chosen\")\n\tlog.WithField(\"chunkSize\", chunkSize).Debug(\"Chunk size chosen\")\n\tvar wg sync.WaitGroup\n\tflushStart := time.Now()\n\tfor i := 0; i < workers; i++ {\n\t\tchunk := metrics[i*chunkSize:]\n\t\tif i < workers-1 {\n\t\t\t\/\/ trim to chunk size unless this is the last one\n\t\t\tchunk = chunk[:chunkSize]\n\t\t}\n\t\twg.Add(1)\n\t\tgo dd.flushPart(span.Attach(ctx), chunk, &wg)\n\t}\n\twg.Wait()\n\tdd.statsd.TimeInMilliseconds(\"flush.total_duration_ns\", float64(time.Since(flushStart).Nanoseconds()), []string{\"part:post\"}, 1.0)\n\n\tlog.WithField(\"metrics\", len(metrics)).Info(\"Completed flush to Datadog\")\n\treturn nil\n}\n\nfunc (dd *datadogMetricSink) FlushEventsChecks(ctx context.Context, events []samplers.UDPEvent, checks []samplers.UDPServiceCheck) {\n\tspan, _ := trace.StartSpanFromContext(ctx, \"\")\n\tdefer span.Finish()\n\n\t\/\/ fill in the default hostname for packets that didn't set it\n\tfor i := range events {\n\t\tif events[i].Hostname == \"\" {\n\t\t\tevents[i].Hostname = dd.hostname\n\t\t}\n\t\tevents[i].Tags = append(events[i].Tags, dd.tags...)\n\t}\n\tfor i := range checks {\n\t\tif checks[i].Hostname == \"\" {\n\t\t\tchecks[i].Hostname = dd.hostname\n\t\t}\n\t\tchecks[i].Tags = append(checks[i].Tags, dd.tags...)\n\t}\n\n\tif len(events) != 0 {\n\t\t\/\/ this endpoint is not documented at all, its existence is only known from\n\t\t\/\/ the official dd-agent\n\t\t\/\/ we don't actually pass all the body keys that dd-agent passes here... but\n\t\t\/\/ it still works\n\t\terr := postHelper(context.TODO(), dd.HTTPClient, dd.statsd, fmt.Sprintf(\"%s\/intake?api_key=%s\"), map[string]map[string][]samplers.UDPEvent{\n\t\t\t\"events\": {\n\t\t\t\t\"api\": events,\n\t\t\t},\n\t\t}, \"flush_events\", true)\n\t\tif err == nil {\n\t\t\tlog.WithField(\"events\", len(events)).Info(\"Completed flushing events to Datadog\")\n\t\t} else {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"events\": len(events),\n\t\t\t\tlogrus.ErrorKey: err}).Warn(\"Error flushing events to Datadog\")\n\t\t}\n\t}\n\n\tif len(checks) != 0 {\n\t\t\/\/ this endpoint is not documented to take an array... but it does\n\t\t\/\/ another curious constraint of this endpoint is that it does not\n\t\t\/\/ support \"Content-Encoding: deflate\"\n\t\terr := postHelper(context.TODO(), dd.HTTPClient, dd.statsd, fmt.Sprintf(\"%s\/api\/v1\/check_run?api_key=%s\"), checks, \"flush_checks\", false)\n\t\tif err == nil {\n\t\t\tlog.WithField(\"checks\", len(checks)).Info(\"Completed flushing service checks to Datadog\")\n\t\t} else {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"checks\": len(checks),\n\t\t\t\tlogrus.ErrorKey: err}).Warn(\"Error flushing checks to Datadog\")\n\t\t}\n\t}\n}\n\nfunc (dd *datadogMetricSink) finalizeMetrics(metrics []samplers.InterMetric) []samplers.DDMetric {\n\tddMetrics := make([]samplers.DDMetric, len(metrics))\n\tfor i, m := range metrics {\n\t\t\/\/ Defensively copy tags since we're gonna mutate it\n\t\ttags := make([]string, len(dd.tags))\n\t\tcopy(tags, dd.tags)\n\t\tmetricType := m.MetricType\n\t\tvalue := m.Value\n\t\t\/\/ We convert Datadog counters into rates\n\t\tif m.MetricType == \"counter\" {\n\t\t\tmetricType = \"rate\"\n\t\t\tvalue = m.Value \/ dd.interval\n\t\t}\n\t\tddMetric := samplers.DDMetric{\n\t\t\tName: m.Name,\n\t\t\tValue: [1][2]float64{\n\t\t\t\t[2]float64{\n\t\t\t\t\tfloat64(m.Timestamp), value,\n\t\t\t\t},\n\t\t\t},\n\t\t\tTags: tags,\n\t\t\tMetricType: metricType,\n\t\t\tInterval: int32(dd.interval),\n\t\t}\n\n\t\t\/\/ Let's look for \"magic tags\" that override metric fields host and device.\n\t\tfor _, tag := range m.Tags {\n\t\t\t\/\/ This overrides hostname\n\t\t\tif strings.HasPrefix(tag, \"host:\") {\n\t\t\t\t\/\/ Override the hostname with the tag, trimming off the prefix.\n\t\t\t\tddMetric.Hostname = tag[5:]\n\t\t\t} else if strings.HasPrefix(tag, \"device:\") {\n\t\t\t\t\/\/ Same as above, but device this time\n\t\t\t\tddMetric.DeviceName = tag[7:]\n\t\t\t} else {\n\t\t\t\t\/\/ Add it, no reason to exclude it.\n\t\t\t\tddMetric.Tags = append(ddMetric.Tags, tag)\n\t\t\t}\n\t\t}\n\t\tif ddMetric.Hostname == \"\" {\n\t\t\t\/\/ No magic tag, set the hostname\n\t\t\tddMetric.Hostname = dd.hostname\n\t\t}\n\t\tddMetrics[i] = ddMetric\n\t}\n\n\treturn ddMetrics\n}\n\nfunc (dd *datadogMetricSink) flushPart(ctx context.Context, metricSlice []samplers.DDMetric, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tpostHelper(ctx, dd.HTTPClient, dd.statsd, fmt.Sprintf(\"%s\/api\/v1\/series?api_key=%s\", dd.ddHostname, dd.apiKey), map[string][]samplers.DDMetric{\n\t\t\"series\": metricSlice,\n\t}, \"flush\", true)\n}\n<commit_msg>Fix small field name change difference.<commit_after>package veneur\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/DataDog\/datadog-go\/statsd\"\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/stripe\/veneur\/samplers\"\n\t\"github.com\/stripe\/veneur\/trace\"\n)\n\ntype metricSink interface {\n\tName() string\n\tFlush(context.Context, []samplers.InterMetric) error\n\t\/\/ This one is temporary?\n\tFlushEventsChecks(ctx context.Context, events []samplers.UDPEvent, checks []samplers.UDPServiceCheck)\n}\n\ntype datadogMetricSink struct {\n\tHTTPClient *http.Client\n\tddHostname string\n\thostname string\n\tapiKey string\n\tflushMaxPerBody int\n\tstatsd *statsd.Client\n\ttags []string\n\tinterval float64\n}\n\n\/\/ NewDatadogMetricSink creates a new Datadog sink for trace spans.\nfunc NewDatadogMetricSink(config *Config, interval float64, httpClient *http.Client, stats *statsd.Client) (*datadogMetricSink, error) {\n\treturn &datadogMetricSink{\n\t\tHTTPClient: httpClient,\n\t\tstatsd: stats,\n\t\tinterval: interval,\n\t\tflushMaxPerBody: config.FlushMaxPerBody,\n\t\tddHostname: config.DatadogAPIHostname,\n\t\tapiKey: config.DatadogAPIKey,\n\t}, nil\n}\n\n\/\/ Name returns the name of this sink.\nfunc (dd *datadogMetricSink) Name() string {\n\treturn \"datadog\"\n}\n\nfunc (dd *datadogMetricSink) Flush(ctx context.Context, interMetrics []samplers.InterMetric) error {\n\tspan, _ := trace.StartSpanFromContext(ctx, \"\")\n\tdefer span.Finish()\n\n\tmetrics := dd.finalizeMetrics(interMetrics)\n\n\t\/\/ break the metrics into chunks of approximately equal size, such that\n\t\/\/ each chunk is less than the limit\n\t\/\/ we compute the chunks using rounding-up integer division\n\tworkers := ((len(metrics) - 1) \/ dd.flushMaxPerBody) + 1\n\tchunkSize := ((len(metrics) - 1) \/ workers) + 1\n\tlog.WithField(\"workers\", workers).Debug(\"Worker count chosen\")\n\tlog.WithField(\"chunkSize\", chunkSize).Debug(\"Chunk size chosen\")\n\tvar wg sync.WaitGroup\n\tflushStart := time.Now()\n\tfor i := 0; i < workers; i++ {\n\t\tchunk := metrics[i*chunkSize:]\n\t\tif i < workers-1 {\n\t\t\t\/\/ trim to chunk size unless this is the last one\n\t\t\tchunk = chunk[:chunkSize]\n\t\t}\n\t\twg.Add(1)\n\t\tgo dd.flushPart(span.Attach(ctx), chunk, &wg)\n\t}\n\twg.Wait()\n\tdd.statsd.TimeInMilliseconds(\"flush.total_duration_ns\", float64(time.Since(flushStart).Nanoseconds()), []string{\"part:post\"}, 1.0)\n\n\tlog.WithField(\"metrics\", len(metrics)).Info(\"Completed flush to Datadog\")\n\treturn nil\n}\n\nfunc (dd *datadogMetricSink) FlushEventsChecks(ctx context.Context, events []samplers.UDPEvent, checks []samplers.UDPServiceCheck) {\n\tspan, _ := trace.StartSpanFromContext(ctx, \"\")\n\tdefer span.Finish()\n\n\t\/\/ fill in the default hostname for packets that didn't set it\n\tfor i := range events {\n\t\tif events[i].Hostname == \"\" {\n\t\t\tevents[i].Hostname = dd.hostname\n\t\t}\n\t\tevents[i].Tags = append(events[i].Tags, dd.tags...)\n\t}\n\tfor i := range checks {\n\t\tif checks[i].Hostname == \"\" {\n\t\t\tchecks[i].Hostname = dd.hostname\n\t\t}\n\t\tchecks[i].Tags = append(checks[i].Tags, dd.tags...)\n\t}\n\n\tif len(events) != 0 {\n\t\t\/\/ this endpoint is not documented at all, its existence is only known from\n\t\t\/\/ the official dd-agent\n\t\t\/\/ we don't actually pass all the body keys that dd-agent passes here... but\n\t\t\/\/ it still works\n\t\terr := postHelper(context.TODO(), dd.HTTPClient, dd.statsd, fmt.Sprintf(\"%s\/intake?api_key=%s\"), map[string]map[string][]samplers.UDPEvent{\n\t\t\t\"events\": {\n\t\t\t\t\"api\": events,\n\t\t\t},\n\t\t}, \"flush_events\", true)\n\t\tif err == nil {\n\t\t\tlog.WithField(\"events\", len(events)).Info(\"Completed flushing events to Datadog\")\n\t\t} else {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"events\": len(events),\n\t\t\t\tlogrus.ErrorKey: err}).Warn(\"Error flushing events to Datadog\")\n\t\t}\n\t}\n\n\tif len(checks) != 0 {\n\t\t\/\/ this endpoint is not documented to take an array... but it does\n\t\t\/\/ another curious constraint of this endpoint is that it does not\n\t\t\/\/ support \"Content-Encoding: deflate\"\n\t\terr := postHelper(context.TODO(), dd.HTTPClient, dd.statsd, fmt.Sprintf(\"%s\/api\/v1\/check_run?api_key=%s\"), checks, \"flush_checks\", false)\n\t\tif err == nil {\n\t\t\tlog.WithField(\"checks\", len(checks)).Info(\"Completed flushing service checks to Datadog\")\n\t\t} else {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"checks\": len(checks),\n\t\t\t\tlogrus.ErrorKey: err}).Warn(\"Error flushing checks to Datadog\")\n\t\t}\n\t}\n}\n\nfunc (dd *datadogMetricSink) finalizeMetrics(metrics []samplers.InterMetric) []samplers.DDMetric {\n\tddMetrics := make([]samplers.DDMetric, len(metrics))\n\tfor i, m := range metrics {\n\t\t\/\/ Defensively copy tags since we're gonna mutate it\n\t\ttags := make([]string, len(dd.tags))\n\t\tcopy(tags, dd.tags)\n\t\tmetricType := m.Type.String()\n\t\tvalue := m.Value\n\t\t\/\/ We convert Datadog counters into rates\n\t\tif metricType == \"counter\" {\n\t\t\tmetricType = \"rate\"\n\t\t\tvalue = m.Value \/ dd.interval\n\t\t}\n\t\tddMetric := samplers.DDMetric{\n\t\t\tName: m.Name,\n\t\t\tValue: [1][2]float64{\n\t\t\t\t[2]float64{\n\t\t\t\t\tfloat64(m.Timestamp), value,\n\t\t\t\t},\n\t\t\t},\n\t\t\tTags: tags,\n\t\t\tMetricType: metricType,\n\t\t\tInterval: int32(dd.interval),\n\t\t}\n\n\t\t\/\/ Let's look for \"magic tags\" that override metric fields host and device.\n\t\tfor _, tag := range m.Tags {\n\t\t\t\/\/ This overrides hostname\n\t\t\tif strings.HasPrefix(tag, \"host:\") {\n\t\t\t\t\/\/ Override the hostname with the tag, trimming off the prefix.\n\t\t\t\tddMetric.Hostname = tag[5:]\n\t\t\t} else if strings.HasPrefix(tag, \"device:\") {\n\t\t\t\t\/\/ Same as above, but device this time\n\t\t\t\tddMetric.DeviceName = tag[7:]\n\t\t\t} else {\n\t\t\t\t\/\/ Add it, no reason to exclude it.\n\t\t\t\tddMetric.Tags = append(ddMetric.Tags, tag)\n\t\t\t}\n\t\t}\n\t\tif ddMetric.Hostname == \"\" {\n\t\t\t\/\/ No magic tag, set the hostname\n\t\t\tddMetric.Hostname = dd.hostname\n\t\t}\n\t\tddMetrics[i] = ddMetric\n\t}\n\n\treturn ddMetrics\n}\n\nfunc (dd *datadogMetricSink) flushPart(ctx context.Context, metricSlice []samplers.DDMetric, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tpostHelper(ctx, dd.HTTPClient, dd.statsd, fmt.Sprintf(\"%s\/api\/v1\/series?api_key=%s\", dd.ddHostname, dd.apiKey), map[string][]samplers.DDMetric{\n\t\t\"series\": metricSlice,\n\t}, \"flush\", true)\n}\n<|endoftext|>"} {"text":"<commit_before>package structs_test\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/fatih\/structs\"\n\t\"github.com\/mitchellh\/mapstructure\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n)\n\ntype Book struct {\n\tTitle string\n\tAuthor string\n\tYear int\n}\n\nconst jsonData string = `{\n\t\"name\": \"Enrique Vila-Matas\",\n\t\"nationality\": \"Spain\",\n\t\"books\": [{\n\t\t\"title\": \"El mal de Montano\",\n\t\t\"year\": 2002\n\t}, {\n\t\t\"title\": \"Paris no se acaba nunca\"\n\t}, {\n\t\t\"title\": \"Doctor Pasavento\",\n\t\t\"year\": 2005,\n\t\t\"publisher\": \"Anagrama\"\n\t}, {\n\t\t\"title\": \"Dublinesca\",\n\t\t\"year\": null\n\t}]\n}`\n\nfunc Test(t *testing.T) {\n\tassert.Nil(t, nil)\n\n\tbook := Book{\"El mal de Montano\", \"E. Vila-Matas\", 2002}\n\tassert.Equal(t, \"E. Vila-Matas\", book.Author)\n\n\tbookUpdated1 := Book{}\n\tgetUpdatedStruct(book, &bookUpdated1, `{\"author\": \"Enrique Vila-Matas\"}`)\n\tassert.Equal(t, \"Enrique Vila-Matas\", bookUpdated1.Author)\n\n\tbookUpdated2 := Book{}\n\tgetUpdatedStruct(bookUpdated1, &bookUpdated2, `{\"publisher\": \"Anagrama\"}`)\n\tassert.Equal(t, bookUpdated2, bookUpdated1)\n\n\tbookUpdated3 := Book{}\n\tgetUpdatedStruct(bookUpdated1, &bookUpdated3, `{\"year\": null}`)\n\tassert.Equal(t, bookUpdated3.Title, bookUpdated1.Title)\n\tassert.Equal(t, bookUpdated3.Author, bookUpdated1.Author)\n\tassert.Equal(t, bookUpdated3.Year, 0)\n\n\tbookUpdated4 := Book{}\n\tgetUpdatedStruct(bookUpdated1, &bookUpdated4, `{\"title\": null}`)\n\tassert.Equal(t, bookUpdated4.Title, \"\")\n\tassert.Equal(t, bookUpdated4.Author, bookUpdated1.Author)\n\tassert.Equal(t, bookUpdated4.Year, bookUpdated1.Year)\n}\n\nfunc getUpdatedStruct(orig interface{}, dest interface{}, changeJson string) {\n\torigMap := structs.Map(orig)\n\tfmt.Println(\"---\")\n\tjson.NewEncoder(os.Stdout).Encode(origMap)\n\n\tvar changeMap map[string]interface{}\n\tjson.Unmarshal([]byte(changeJson), &changeMap)\n\tjson.NewEncoder(os.Stdout).Encode(changeMap)\n\n\tdestMap := make(map[string]interface{})\n\tfor key, value := range origMap {\n\t\tkey := strings.ToLower(key)\n\t\tdestMap[key] = value\n\t}\n\tfor key, value := range changeMap {\n\t\tkey := strings.ToLower(key)\n\t\tdestMap[key] = value\n\t}\n\n\tmapstructure.Decode(destMap, dest)\n\tjson.NewEncoder(os.Stdout).Encode(dest)\n}\n<commit_msg>update<commit_after>package structs_test\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/fatih\/structs\"\n\t\"github.com\/mitchellh\/mapstructure\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n)\n\ntype Book struct {\n\tTitle string\n\tAuthor string\n\tYear int\n}\n\nfunc Test(t *testing.T) {\n\tassert.Nil(t, nil)\n\n\tbook := Book{\"El mal de Montano\", \"E. Vila-Matas\", 2002}\n\tassert.Equal(t, \"E. Vila-Matas\", book.Author)\n\n\tbookUpdated1 := Book{}\n\tgetUpdatedStruct(book, &bookUpdated1, `{\"author\": \"Enrique Vila-Matas\"}`)\n\tassert.Equal(t, \"Enrique Vila-Matas\", bookUpdated1.Author)\n\n\tbookUpdated2 := Book{}\n\tgetUpdatedStruct(bookUpdated1, &bookUpdated2, `{\"publisher\": \"Anagrama\"}`)\n\tassert.Equal(t, bookUpdated2, bookUpdated1)\n\n\tbookUpdated3 := Book{}\n\tgetUpdatedStruct(bookUpdated1, &bookUpdated3, `{\"year\": null}`)\n\tassert.Equal(t, bookUpdated3.Title, bookUpdated1.Title)\n\tassert.Equal(t, bookUpdated3.Author, bookUpdated1.Author)\n\tassert.Equal(t, bookUpdated3.Year, 0)\n\n\tbookUpdated4 := Book{}\n\tgetUpdatedStruct(bookUpdated1, &bookUpdated4, `{\"title\": null}`)\n\tassert.Equal(t, bookUpdated4.Title, \"\")\n\tassert.Equal(t, bookUpdated4.Author, bookUpdated1.Author)\n\tassert.Equal(t, bookUpdated4.Year, bookUpdated1.Year)\n}\n\nfunc getUpdatedStruct(orig interface{}, dest interface{}, changeJson string) {\n\torigMap := structs.Map(orig)\n\tfmt.Println(\"---\")\n\tjson.NewEncoder(os.Stdout).Encode(origMap)\n\n\tvar changeMap map[string]interface{}\n\tjson.Unmarshal([]byte(changeJson), &changeMap)\n\tjson.NewEncoder(os.Stdout).Encode(changeMap)\n\n\tdestMap := make(map[string]interface{})\n\tfor key, value := range origMap {\n\t\tkey := strings.ToLower(key)\n\t\tdestMap[key] = value\n\t}\n\tfor key, value := range changeMap {\n\t\tkey := strings.ToLower(key)\n\t\tdestMap[key] = value\n\t}\n\n\tmapstructure.Decode(destMap, dest)\n\tjson.NewEncoder(os.Stdout).Encode(dest)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e_metric_only\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"testing\"\n\t\"time\"\n\n\t\"k8s.io\/node-problem-detector\/pkg\/util\/tomb\"\n\t\"k8s.io\/node-problem-detector\/test\/e2e\/lib\/gce\"\n\t\"k8s.io\/test-infra\/boskos\/client\"\n\n\t\"github.com\/onsi\/ginkgo\"\n\t\"github.com\/onsi\/ginkgo\/config\"\n\t\"github.com\/onsi\/ginkgo\/reporters\"\n\t. \"github.com\/onsi\/gomega\"\n\tcompute \"google.golang.org\/api\/compute\/v1\"\n)\n\nvar zone = flag.String(\"zone\", \"\", \"gce zone the hosts live in\")\nvar project = flag.String(\"project\", \"\", \"gce project the hosts live in\")\nvar image = flag.String(\"image\", \"\", \"image to test\")\nvar imageFamily = flag.String(\"image-family\", \"\", \"image family to pick up the test image. Ignored when -image is set.\")\nvar imageProject = flag.String(\"image-project\", \"\", \"gce project of the OS image\")\nvar jobName = flag.String(\"job-name\", \"\", \"name of the Prow job running the test\")\nvar sshKey = flag.String(\"ssh-key\", \"\", \"path to ssh private key.\")\nvar sshUser = flag.String(\"ssh-user\", \"\", \"use predefined user for ssh.\")\nvar npdBuildTar = flag.String(\"npd-build-tar\", \"\", \"tarball containing NPD to be tested.\")\nvar artifactsDir = flag.String(\"artifacts-dir\", \"\", \"local directory to save test artifacts into.\")\nvar boskosProjectType = flag.String(\"boskos-project-type\", \"gce-project\",\n\t\"specifies which project type to select from Boskos.\")\nvar boskosServerURL = flag.String(\"boskos-server-url\", \"http:\/\/boskos.test-pods.svc.cluster.local\",\n\t\"specifies Boskos server URL.\")\nvar boskosWaitDuration = flag.Duration(\"boskos-wait-duration\", 2*time.Minute,\n\t\"Duration to wait before quitting getting Boskos resource.\")\n\nvar computeService *compute.Service\n\n\/\/ boskosClient helps renting project from Boskos, and is only initialized on Ginkgo node 1.\nvar boskosClient *client.Client\n\n\/\/ boskosRenewingTomb stops the goroutine keep renewing the Boskos resources.\nvar boskosRenewingTomb *tomb.Tomb\n\n\/\/ SynchronizedBeforeSuite and SynchronizedAfterSuite help manages singleton resource (a Boskos project) across Ginkgo nodes.\nvar _ = ginkgo.SynchronizedBeforeSuite(rentBoskosProjectIfNeededOnNode1, acceptBoskosProjectIfNeededFromNode1)\nvar _ = ginkgo.SynchronizedAfterSuite(func() {}, releaseBoskosResourcesOnNode1)\n\nfunc TestNPD(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping test in short mode.\")\n\t}\n\n\tvar err error\n\tcomputeService, err = gce.GetComputeClient()\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Unable to create gcloud compute service using defaults. Make sure you are authenticated. %v\", err))\n\t}\n\n\tif *artifactsDir != \"\" {\n\t\t_, err := os.Stat(*artifactsDir)\n\t\tif err != nil && os.IsNotExist(err) {\n\t\t\tos.MkdirAll(*artifactsDir, os.ModeDir|0755)\n\t\t}\n\t}\n\n\t\/\/ The junit formatted result output is for showing test results on testgrid.\n\tjunitReporter := reporters.NewJUnitReporter(path.Join(*artifactsDir, fmt.Sprintf(\"junit-%02d.xml\", config.GinkgoConfig.ParallelNode)))\n\tginkgo.RunSpecsWithDefaultAndCustomReporters(t, \"NPD Metric-only Suite\", []ginkgo.Reporter{junitReporter})\n}\n\n\/\/ rentBoskosProjectIfNeededOnNode1 rents a GCP project from Boskos if no GCP project is specified.\n\/\/\n\/\/ rentBoskosProjectIfNeededOnNode1 returns a byte slice containing the project name.\n\/\/ rentBoskosProjectIfNeededOnNode1 also initializes boskosClient if necessary.\n\/\/ When the tests run in parallel mode in Ginkgo, this rentBoskosProjectIfNeededOnNode1 runs only on\n\/\/ Ginkgo node 1. The output should be shared with all other Gingko nodes so that they all use the same\n\/\/ GCP project.\nfunc rentBoskosProjectIfNeededOnNode1() []byte {\n\tif *project != \"\" {\n\t\treturn []byte{}\n\t}\n\n\tfmt.Printf(\"Renting project from Boskos\\n\")\n\tboskosClient = client.NewClient(*jobName, *boskosServerURL)\n\tboskosRenewingTomb = tomb.NewTomb()\n\n\tctx, cancel := context.WithTimeout(context.Background(), *boskosWaitDuration)\n\tdefer cancel()\n\tp, err := boskosClient.AcquireWait(ctx, *boskosProjectType, \"free\", \"busy\")\n\tExpect(err).NotTo(HaveOccurred(), fmt.Sprintf(\"Unable to rent project from Boskos: %v\\n\", err))\n\tfmt.Printf(\"Rented project %q from Boskos\\n\", p.Name)\n\n\tgo renewBoskosProject(boskosClient, p.Name, boskosRenewingTomb)\n\n\treturn []byte(p.Name)\n}\n\n\/\/ acceptBoskosProjectIfNeededFromNode1 accepts a GCP project rented from Boskos by Ginkgo node 1.\n\/\/\n\/\/ acceptBoskosProjectIfNeededFromNode1 takes the output of rentBoskosProjectIfNeededOnNode1.\n\/\/ When the tests run in parallel mode in Ginkgo, this function runs on all Ginkgo nodes.\nfunc acceptBoskosProjectIfNeededFromNode1(data []byte) {\n\tif *project != \"\" {\n\t\treturn\n\t}\n\n\tboskosProject := string(data)\n\tfmt.Printf(\"Received Boskos project %q from Ginkgo node 1.\\n\", boskosProject)\n\t*project = boskosProject\n}\n\nfunc renewBoskosProject(boskosClient *client.Client, projectName string, boskosRenewingTomb *tomb.Tomb) {\n\tdefer boskosRenewingTomb.Done()\n\tfor {\n\t\tselect {\n\t\tcase <-time.Tick(5 * time.Minute):\n\t\t\tfmt.Printf(\"Renewing boskosProject %q\\n\", projectName)\n\t\t\tif err := boskosClient.UpdateOne(projectName, \"busy\", nil); err != nil {\n\t\t\t\tfmt.Printf(\"Failed to update status for project %q with Boskos: %v\\n\", projectName, err)\n\t\t\t}\n\t\tcase <-boskosRenewingTomb.Stopping():\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ releaseBoskosResourcesOnNode1 releases all rented Boskos resources if there is any.\nfunc releaseBoskosResourcesOnNode1() {\n\tif boskosClient == nil {\n\t\treturn\n\t}\n\tboskosRenewingTomb.Stop()\n\tif !boskosClient.HasResource() {\n\t\treturn\n\t}\n\tfmt.Printf(\"Releasing all Boskos resources.\\n\")\n\terr := boskosClient.ReleaseAll(\"dirty\")\n\tExpect(err).NotTo(HaveOccurred(), fmt.Sprintf(\"Failed to release project to Boskos: %v\", err))\n}\n\nfunc TestMain(m *testing.M) {\n\tRegisterFailHandler(ginkgo.Fail)\n\tflag.Parse()\n\n\tos.Exit(m.Run())\n}\n<commit_msg>Use Fatal instead of panic for go tests.<commit_after>\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e_metric_only\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"testing\"\n\t\"time\"\n\n\t\"k8s.io\/node-problem-detector\/pkg\/util\/tomb\"\n\t\"k8s.io\/node-problem-detector\/test\/e2e\/lib\/gce\"\n\t\"k8s.io\/test-infra\/boskos\/client\"\n\n\t\"github.com\/onsi\/ginkgo\"\n\t\"github.com\/onsi\/ginkgo\/config\"\n\t\"github.com\/onsi\/ginkgo\/reporters\"\n\t. \"github.com\/onsi\/gomega\"\n\tcompute \"google.golang.org\/api\/compute\/v1\"\n)\n\nvar zone = flag.String(\"zone\", \"\", \"gce zone the hosts live in\")\nvar project = flag.String(\"project\", \"\", \"gce project the hosts live in\")\nvar image = flag.String(\"image\", \"\", \"image to test\")\nvar imageFamily = flag.String(\"image-family\", \"\", \"image family to pick up the test image. Ignored when -image is set.\")\nvar imageProject = flag.String(\"image-project\", \"\", \"gce project of the OS image\")\nvar jobName = flag.String(\"job-name\", \"\", \"name of the Prow job running the test\")\nvar sshKey = flag.String(\"ssh-key\", \"\", \"path to ssh private key.\")\nvar sshUser = flag.String(\"ssh-user\", \"\", \"use predefined user for ssh.\")\nvar npdBuildTar = flag.String(\"npd-build-tar\", \"\", \"tarball containing NPD to be tested.\")\nvar artifactsDir = flag.String(\"artifacts-dir\", \"\", \"local directory to save test artifacts into.\")\nvar boskosProjectType = flag.String(\"boskos-project-type\", \"gce-project\",\n\t\"specifies which project type to select from Boskos.\")\nvar boskosServerURL = flag.String(\"boskos-server-url\", \"http:\/\/boskos.test-pods.svc.cluster.local\",\n\t\"specifies Boskos server URL.\")\nvar boskosWaitDuration = flag.Duration(\"boskos-wait-duration\", 2*time.Minute,\n\t\"Duration to wait before quitting getting Boskos resource.\")\n\nvar computeService *compute.Service\n\n\/\/ boskosClient helps renting project from Boskos, and is only initialized on Ginkgo node 1.\nvar boskosClient *client.Client\n\n\/\/ boskosRenewingTomb stops the goroutine keep renewing the Boskos resources.\nvar boskosRenewingTomb *tomb.Tomb\n\n\/\/ SynchronizedBeforeSuite and SynchronizedAfterSuite help manages singleton resource (a Boskos project) across Ginkgo nodes.\nvar _ = ginkgo.SynchronizedBeforeSuite(rentBoskosProjectIfNeededOnNode1, acceptBoskosProjectIfNeededFromNode1)\nvar _ = ginkgo.SynchronizedAfterSuite(func() {}, releaseBoskosResourcesOnNode1)\n\nfunc TestNPD(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping test in short mode.\")\n\t}\n\n\tvar err error\n\tcomputeService, err = gce.GetComputeClient()\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to create gcloud compute service using defaults. Make sure you are authenticated. %v\", err)\n\t}\n\n\tif *artifactsDir != \"\" {\n\t\t_, err := os.Stat(*artifactsDir)\n\t\tif err != nil && os.IsNotExist(err) {\n\t\t\tos.MkdirAll(*artifactsDir, os.ModeDir|0755)\n\t\t}\n\t}\n\n\t\/\/ The junit formatted result output is for showing test results on testgrid.\n\tjunitReporter := reporters.NewJUnitReporter(path.Join(*artifactsDir, fmt.Sprintf(\"junit-%02d.xml\", config.GinkgoConfig.ParallelNode)))\n\tginkgo.RunSpecsWithDefaultAndCustomReporters(t, \"NPD Metric-only Suite\", []ginkgo.Reporter{junitReporter})\n}\n\n\/\/ rentBoskosProjectIfNeededOnNode1 rents a GCP project from Boskos if no GCP project is specified.\n\/\/\n\/\/ rentBoskosProjectIfNeededOnNode1 returns a byte slice containing the project name.\n\/\/ rentBoskosProjectIfNeededOnNode1 also initializes boskosClient if necessary.\n\/\/ When the tests run in parallel mode in Ginkgo, this rentBoskosProjectIfNeededOnNode1 runs only on\n\/\/ Ginkgo node 1. The output should be shared with all other Gingko nodes so that they all use the same\n\/\/ GCP project.\nfunc rentBoskosProjectIfNeededOnNode1() []byte {\n\tif *project != \"\" {\n\t\treturn []byte{}\n\t}\n\n\tfmt.Printf(\"Renting project from Boskos\\n\")\n\tboskosClient = client.NewClient(*jobName, *boskosServerURL)\n\tboskosRenewingTomb = tomb.NewTomb()\n\n\tctx, cancel := context.WithTimeout(context.Background(), *boskosWaitDuration)\n\tdefer cancel()\n\tp, err := boskosClient.AcquireWait(ctx, *boskosProjectType, \"free\", \"busy\")\n\tExpect(err).NotTo(HaveOccurred(), fmt.Sprintf(\"Unable to rent project from Boskos: %v\\n\", err))\n\tfmt.Printf(\"Rented project %q from Boskos\\n\", p.Name)\n\n\tgo renewBoskosProject(boskosClient, p.Name, boskosRenewingTomb)\n\n\treturn []byte(p.Name)\n}\n\n\/\/ acceptBoskosProjectIfNeededFromNode1 accepts a GCP project rented from Boskos by Ginkgo node 1.\n\/\/\n\/\/ acceptBoskosProjectIfNeededFromNode1 takes the output of rentBoskosProjectIfNeededOnNode1.\n\/\/ When the tests run in parallel mode in Ginkgo, this function runs on all Ginkgo nodes.\nfunc acceptBoskosProjectIfNeededFromNode1(data []byte) {\n\tif *project != \"\" {\n\t\treturn\n\t}\n\n\tboskosProject := string(data)\n\tfmt.Printf(\"Received Boskos project %q from Ginkgo node 1.\\n\", boskosProject)\n\t*project = boskosProject\n}\n\nfunc renewBoskosProject(boskosClient *client.Client, projectName string, boskosRenewingTomb *tomb.Tomb) {\n\tdefer boskosRenewingTomb.Done()\n\tfor {\n\t\tselect {\n\t\tcase <-time.Tick(5 * time.Minute):\n\t\t\tfmt.Printf(\"Renewing boskosProject %q\\n\", projectName)\n\t\t\tif err := boskosClient.UpdateOne(projectName, \"busy\", nil); err != nil {\n\t\t\t\tfmt.Printf(\"Failed to update status for project %q with Boskos: %v\\n\", projectName, err)\n\t\t\t}\n\t\tcase <-boskosRenewingTomb.Stopping():\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ releaseBoskosResourcesOnNode1 releases all rented Boskos resources if there is any.\nfunc releaseBoskosResourcesOnNode1() {\n\tif boskosClient == nil {\n\t\treturn\n\t}\n\tboskosRenewingTomb.Stop()\n\tif !boskosClient.HasResource() {\n\t\treturn\n\t}\n\tfmt.Printf(\"Releasing all Boskos resources.\\n\")\n\terr := boskosClient.ReleaseAll(\"dirty\")\n\tExpect(err).NotTo(HaveOccurred(), fmt.Sprintf(\"Failed to release project to Boskos: %v\", err))\n}\n\nfunc TestMain(m *testing.M) {\n\tRegisterFailHandler(ginkgo.Fail)\n\tflag.Parse()\n\n\tos.Exit(m.Run())\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2020 The cert-manager Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage approval\n\nimport (\n\t\"context\"\n\t\"crypto\/x509\"\n\t\"fmt\"\n\t\"time\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\trbacv1 \"k8s.io\/api\/rbac\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\n\tapiutil \"github.com\/jetstack\/cert-manager\/pkg\/api\/util\"\n\tcmapi \"github.com\/jetstack\/cert-manager\/pkg\/apis\/certmanager\/v1\"\n\tcmmeta \"github.com\/jetstack\/cert-manager\/pkg\/apis\/meta\/v1\"\n\tclientset \"github.com\/jetstack\/cert-manager\/pkg\/client\/clientset\/versioned\"\n\t\"github.com\/jetstack\/cert-manager\/test\/e2e\/framework\"\n\ttestutil \"github.com\/jetstack\/cert-manager\/test\/e2e\/framework\/util\"\n\t\"github.com\/jetstack\/cert-manager\/test\/unit\/gen\"\n)\n\n\/\/ TODO\nvar _ = framework.CertManagerDescribe(\"Approval CertificateRequests\", func() {\n\tf := framework.NewDefaultFramework(\"approval-certificaterequests\")\n\n\tvar (\n\t\tsa *corev1.ServiceAccount\n\t\tsaclient clientset.Interface\n\t\trequest *cmapi.CertificateRequest\n\t)\n\n\tJustBeforeEach(func() {\n\t\tvar err error\n\n\t\tsa, err = f.KubeClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), &corev1.ServiceAccount{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tGenerateName: \"test-sa-\",\n\t\t\t\tNamespace: f.Namespace.Name,\n\t\t\t},\n\t\t}, metav1.CreateOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\trole, err := f.KubeClientSet.RbacV1().Roles(f.Namespace.Name).Create(context.TODO(), &rbacv1.Role{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tGenerateName: \"certificaterequest-creator-\",\n\t\t\t\tNamespace: f.Namespace.Name,\n\t\t\t},\n\t\t\tRules: []rbacv1.PolicyRule{\n\t\t\t\t{\n\t\t\t\t\tVerbs: []string{\"create\"},\n\t\t\t\t\tAPIGroups: []string{\"cert-manager.io\"},\n\t\t\t\t\tResources: []string{\"certificaterequests\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tVerbs: []string{\"update\"},\n\t\t\t\t\tAPIGroups: []string{\"cert-manager.io\"},\n\t\t\t\t\tResources: []string{\"certificaterequests\/status\"},\n\t\t\t\t},\n\t\t\t},\n\t\t}, metav1.CreateOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tBy(\"Creating certificaterequest-creator rolebinding for ServiceAccount\")\n\t\t_, err = f.KubeClientSet.RbacV1().RoleBindings(f.Namespace.Name).Create(context.TODO(), &rbacv1.RoleBinding{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tGenerateName: \"certificaterequest-creator-\",\n\t\t\t\tNamespace: f.Namespace.Name,\n\t\t\t},\n\t\t\tSubjects: []rbacv1.Subject{\n\t\t\t\t{\n\t\t\t\t\tKind: \"ServiceAccount\",\n\t\t\t\t\tName: sa.Name,\n\t\t\t\t\tNamespace: f.Namespace.Name,\n\t\t\t\t},\n\t\t\t},\n\t\t\tRoleRef: rbacv1.RoleRef{\n\t\t\t\tAPIGroup: \"rbac.authorization.k8s.io\",\n\t\t\t\tKind: \"Role\",\n\t\t\t\tName: role.Name,\n\t\t\t},\n\t\t}, metav1.CreateOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\terr = wait.PollImmediate(time.Second, time.Second*10,\n\t\t\tfunc() (bool, error) {\n\t\t\t\tsa, err = f.KubeClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), sa.Name, metav1.GetOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\n\t\t\t\tif len(sa.Secrets) == 0 {\n\t\t\t\t\treturn false, nil\n\t\t\t\t}\n\n\t\t\t\treturn true, nil\n\t\t\t},\n\t\t)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tBy(\"Building ServiceAccount kubernetes clientset\")\n\t\tsec, err := f.KubeClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), sa.Secrets[0].Name, metav1.GetOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tkubeConfig, err := testutil.LoadConfig(f.Config.KubeConfig, f.Config.KubeContext)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tkubeConfig.BearerToken = fmt.Sprintf(\"%s\", sec.Data[\"token\"])\n\t\tkubeConfig.CertData = nil\n\t\tkubeConfig.KeyData = nil\n\n\t\tsaclient, err = clientset.NewForConfig(kubeConfig)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tcsr, _, err := gen.CSR(x509.RSA)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\trequest = gen.CertificateRequest(\"\",\n\t\t\tgen.SetCertificateRequestNamespace(f.Namespace.Name),\n\t\t\tgen.SetCertificateRequestCSR(csr),\n\t\t\tgen.SetCertificateRequestIssuer(cmmeta.ObjectReference{\n\t\t\t\tName: \"test-issuer\",\n\t\t\t\tKind: \"Issuer\",\n\t\t\t\tGroup: \"example.io\",\n\t\t\t}),\n\t\t)\n\t\trequest.GenerateName = \"test-request-\"\n\n\t\trequest, err = saclient.CertmanagerV1().CertificateRequests(f.Namespace.Name).Create(context.TODO(), request, metav1.CreateOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t})\n\n\tJustAfterEach(func() {\n\t\terr := f.KubeClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Delete(context.TODO(), sa.Name, metav1.DeleteOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\terr = f.CertManagerClientSet.CertmanagerV1().CertificateRequests(f.Namespace.Name).Delete(context.TODO(), request.Name, metav1.DeleteOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t})\n\n\tIt(\"attempting to approve a certificate request without the approve permission should error\", func() {\n\t\tapprovedCR := request.DeepCopy()\n\t\tapiutil.SetCertificateRequestCondition(approvedCR, cmapi.CertificateRequestConditionApproved, cmmeta.ConditionTrue, \"cert-manager.io\", \"integration\")\n\t\t_, err := saclient.CertmanagerV1().CertificateRequests(f.Namespace.Name).UpdateStatus(context.TODO(), approvedCR, metav1.UpdateOptions{})\n\t\tExpect(err).To(HaveOccurred())\n\t})\n\n\tIt(\"attempting to deny a certificate request without the approve permission should error\", func() {\n\t\tapprovedCR := request.DeepCopy()\n\t\tapiutil.SetCertificateRequestCondition(approvedCR, cmapi.CertificateRequestConditionDenied, cmmeta.ConditionTrue, \"cert-manager.io\", \"integration\")\n\t\t_, err := saclient.CertmanagerV1().CertificateRequests(f.Namespace.Name).UpdateStatus(context.TODO(), approvedCR, metav1.UpdateOptions{})\n\t\tExpect(err).To(HaveOccurred())\n\t})\n\n\tIt(\"a service account with the approve permissions for issuer.example.io\/* should be able to approve requests\", func() {\n\t\tbindServiceAccountToApprove(f, sa, \"issuer.example.io\/*\")\n\n\t\tapprovedCR, err := f.CertManagerClientSet.CertmanagerV1().CertificateRequests(f.Namespace.Name).Get(context.TODO(), request.Name, metav1.GetOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tapiutil.SetCertificateRequestCondition(approvedCR, cmapi.CertificateRequestConditionApproved, cmmeta.ConditionTrue, \"cert-manager.io\", \"integration\")\n\t\t_, err = saclient.CertmanagerV1().CertificateRequests(f.Namespace.Name).UpdateStatus(context.TODO(), approvedCR, metav1.UpdateOptions{})\n\t\tExpect(err).ToNot(HaveOccurred())\n\t})\n\n\tIt(\"a service account with the approve permissions for issuer.example.io\/* should be able to deny requests\", func() {\n\t\tbindServiceAccountToApprove(f, sa, \"issuer.example.io\/*\")\n\n\t\tdeniedCR, err := f.CertManagerClientSet.CertmanagerV1().CertificateRequests(f.Namespace.Name).Get(context.TODO(), request.Name, metav1.GetOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tapiutil.SetCertificateRequestCondition(deniedCR, cmapi.CertificateRequestConditionDenied, cmmeta.ConditionTrue, \"cert-manager.io\", \"integration\")\n\t\t_, err = saclient.CertmanagerV1().CertificateRequests(f.Namespace.Name).UpdateStatus(context.TODO(), deniedCR, metav1.UpdateOptions{})\n\t\tExpect(err).ToNot(HaveOccurred())\n\t})\n\n\tIt(\"a service account with the approve permissions for issuer.example.io\/test-issuer should be able to approve requests\", func() {\n\t\tbindServiceAccountToApprove(f, sa, \"issuer.example.io\/test-issuer\")\n\n\t\tapprovedCR, err := f.CertManagerClientSet.CertmanagerV1().CertificateRequests(f.Namespace.Name).Get(context.TODO(), request.Name, metav1.GetOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tapiutil.SetCertificateRequestCondition(approvedCR, cmapi.CertificateRequestConditionApproved, cmmeta.ConditionTrue, \"cert-manager.io\", \"integration\")\n\t\t_, err = saclient.CertmanagerV1().CertificateRequests(f.Namespace.Name).UpdateStatus(context.TODO(), approvedCR, metav1.UpdateOptions{})\n\t\tExpect(err).ToNot(HaveOccurred())\n\t})\n\n\tIt(\"a service account with the approve permissions for issuer.example.io\/test-issuer should be able to deny requests\", func() {\n\t\tbindServiceAccountToApprove(f, sa, \"issuer.example.io\/test-issuer\")\n\n\t\tdeniedCR, err := f.CertManagerClientSet.CertmanagerV1().CertificateRequests(f.Namespace.Name).Get(context.TODO(), request.Name, metav1.GetOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tapiutil.SetCertificateRequestCondition(deniedCR, cmapi.CertificateRequestConditionDenied, cmmeta.ConditionTrue, \"cert-manager.io\", \"integration\")\n\t\t_, err = saclient.CertmanagerV1().CertificateRequests(f.Namespace.Name).UpdateStatus(context.TODO(), deniedCR, metav1.UpdateOptions{})\n\t\tExpect(err).ToNot(HaveOccurred())\n\t})\n})\n\nfunc bindServiceAccountToApprove(f *framework.Framework, sa *corev1.ServiceAccount, resourceName string) {\n\tclusterrole, err := f.KubeClientSet.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tGenerateName: \"certificaterequest-approver-\",\n\t\t},\n\t\tRules: []rbacv1.PolicyRule{\n\t\t\t{\n\t\t\t\tAPIGroups: []string{\"cert-manager.io\"},\n\t\t\t\tResources: []string{\"signers\"},\n\t\t\t\tVerbs: []string{\"approve\"},\n\t\t\t\tResourceNames: []string{resourceName},\n\t\t\t},\n\t\t},\n\t}, metav1.CreateOptions{})\n\tExpect(err).NotTo(HaveOccurred())\n\n\t_, err = f.KubeClientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tGenerateName: \"certificaterequest-approver-\",\n\t\t},\n\t\tSubjects: []rbacv1.Subject{\n\t\t\t{\n\t\t\t\tKind: \"ServiceAccount\",\n\t\t\t\tName: sa.Name,\n\t\t\t\tNamespace: sa.Namespace,\n\t\t\t},\n\t\t},\n\t\tRoleRef: rbacv1.RoleRef{\n\t\t\tAPIGroup: \"rbac.authorization.k8s.io\",\n\t\t\tKind: \"ClusterRole\",\n\t\t\tName: clusterrole.Name,\n\t\t},\n\t}, metav1.CreateOptions{})\n\tExpect(err).NotTo(HaveOccurred())\n}\n<commit_msg>Adds test comment to approval e2e test<commit_after>\/*\nCopyright 2020 The cert-manager Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage approval\n\nimport (\n\t\"context\"\n\t\"crypto\/x509\"\n\t\"fmt\"\n\t\"time\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\trbacv1 \"k8s.io\/api\/rbac\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\n\tapiutil \"github.com\/jetstack\/cert-manager\/pkg\/api\/util\"\n\tcmapi \"github.com\/jetstack\/cert-manager\/pkg\/apis\/certmanager\/v1\"\n\tcmmeta \"github.com\/jetstack\/cert-manager\/pkg\/apis\/meta\/v1\"\n\tclientset \"github.com\/jetstack\/cert-manager\/pkg\/client\/clientset\/versioned\"\n\t\"github.com\/jetstack\/cert-manager\/test\/e2e\/framework\"\n\ttestutil \"github.com\/jetstack\/cert-manager\/test\/e2e\/framework\/util\"\n\t\"github.com\/jetstack\/cert-manager\/test\/unit\/gen\"\n)\n\n\/\/ This test ensures that the approval condition may only be set by users who\n\/\/ have the correct RBAC permissions.\nvar _ = framework.CertManagerDescribe(\"Approval CertificateRequests\", func() {\n\tf := framework.NewDefaultFramework(\"approval-certificaterequests\")\n\n\tvar (\n\t\tsa *corev1.ServiceAccount\n\t\tsaclient clientset.Interface\n\t\trequest *cmapi.CertificateRequest\n\t)\n\n\tJustBeforeEach(func() {\n\t\tvar err error\n\n\t\tsa, err = f.KubeClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), &corev1.ServiceAccount{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tGenerateName: \"test-sa-\",\n\t\t\t\tNamespace: f.Namespace.Name,\n\t\t\t},\n\t\t}, metav1.CreateOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\trole, err := f.KubeClientSet.RbacV1().Roles(f.Namespace.Name).Create(context.TODO(), &rbacv1.Role{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tGenerateName: \"certificaterequest-creator-\",\n\t\t\t\tNamespace: f.Namespace.Name,\n\t\t\t},\n\t\t\tRules: []rbacv1.PolicyRule{\n\t\t\t\t{\n\t\t\t\t\tVerbs: []string{\"create\"},\n\t\t\t\t\tAPIGroups: []string{\"cert-manager.io\"},\n\t\t\t\t\tResources: []string{\"certificaterequests\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tVerbs: []string{\"update\"},\n\t\t\t\t\tAPIGroups: []string{\"cert-manager.io\"},\n\t\t\t\t\tResources: []string{\"certificaterequests\/status\"},\n\t\t\t\t},\n\t\t\t},\n\t\t}, metav1.CreateOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tBy(\"Creating certificaterequest-creator rolebinding for ServiceAccount\")\n\t\t_, err = f.KubeClientSet.RbacV1().RoleBindings(f.Namespace.Name).Create(context.TODO(), &rbacv1.RoleBinding{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tGenerateName: \"certificaterequest-creator-\",\n\t\t\t\tNamespace: f.Namespace.Name,\n\t\t\t},\n\t\t\tSubjects: []rbacv1.Subject{\n\t\t\t\t{\n\t\t\t\t\tKind: \"ServiceAccount\",\n\t\t\t\t\tName: sa.Name,\n\t\t\t\t\tNamespace: f.Namespace.Name,\n\t\t\t\t},\n\t\t\t},\n\t\t\tRoleRef: rbacv1.RoleRef{\n\t\t\t\tAPIGroup: \"rbac.authorization.k8s.io\",\n\t\t\t\tKind: \"Role\",\n\t\t\t\tName: role.Name,\n\t\t\t},\n\t\t}, metav1.CreateOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\terr = wait.PollImmediate(time.Second, time.Second*10,\n\t\t\tfunc() (bool, error) {\n\t\t\t\tsa, err = f.KubeClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), sa.Name, metav1.GetOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\n\t\t\t\tif len(sa.Secrets) == 0 {\n\t\t\t\t\treturn false, nil\n\t\t\t\t}\n\n\t\t\t\treturn true, nil\n\t\t\t},\n\t\t)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tBy(\"Building ServiceAccount kubernetes clientset\")\n\t\tsec, err := f.KubeClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), sa.Secrets[0].Name, metav1.GetOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tkubeConfig, err := testutil.LoadConfig(f.Config.KubeConfig, f.Config.KubeContext)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tkubeConfig.BearerToken = fmt.Sprintf(\"%s\", sec.Data[\"token\"])\n\t\tkubeConfig.CertData = nil\n\t\tkubeConfig.KeyData = nil\n\n\t\tsaclient, err = clientset.NewForConfig(kubeConfig)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tcsr, _, err := gen.CSR(x509.RSA)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\trequest = gen.CertificateRequest(\"\",\n\t\t\tgen.SetCertificateRequestNamespace(f.Namespace.Name),\n\t\t\tgen.SetCertificateRequestCSR(csr),\n\t\t\tgen.SetCertificateRequestIssuer(cmmeta.ObjectReference{\n\t\t\t\tName: \"test-issuer\",\n\t\t\t\tKind: \"Issuer\",\n\t\t\t\tGroup: \"example.io\",\n\t\t\t}),\n\t\t)\n\t\trequest.GenerateName = \"test-request-\"\n\n\t\trequest, err = saclient.CertmanagerV1().CertificateRequests(f.Namespace.Name).Create(context.TODO(), request, metav1.CreateOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t})\n\n\tJustAfterEach(func() {\n\t\terr := f.KubeClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Delete(context.TODO(), sa.Name, metav1.DeleteOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\terr = f.CertManagerClientSet.CertmanagerV1().CertificateRequests(f.Namespace.Name).Delete(context.TODO(), request.Name, metav1.DeleteOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t})\n\n\tIt(\"attempting to approve a certificate request without the approve permission should error\", func() {\n\t\tapprovedCR := request.DeepCopy()\n\t\tapiutil.SetCertificateRequestCondition(approvedCR, cmapi.CertificateRequestConditionApproved, cmmeta.ConditionTrue, \"cert-manager.io\", \"integration\")\n\t\t_, err := saclient.CertmanagerV1().CertificateRequests(f.Namespace.Name).UpdateStatus(context.TODO(), approvedCR, metav1.UpdateOptions{})\n\t\tExpect(err).To(HaveOccurred())\n\t})\n\n\tIt(\"attempting to deny a certificate request without the approve permission should error\", func() {\n\t\tapprovedCR := request.DeepCopy()\n\t\tapiutil.SetCertificateRequestCondition(approvedCR, cmapi.CertificateRequestConditionDenied, cmmeta.ConditionTrue, \"cert-manager.io\", \"integration\")\n\t\t_, err := saclient.CertmanagerV1().CertificateRequests(f.Namespace.Name).UpdateStatus(context.TODO(), approvedCR, metav1.UpdateOptions{})\n\t\tExpect(err).To(HaveOccurred())\n\t})\n\n\tIt(\"a service account with the approve permissions for issuer.example.io\/* should be able to approve requests\", func() {\n\t\tbindServiceAccountToApprove(f, sa, \"issuer.example.io\/*\")\n\n\t\tapprovedCR, err := f.CertManagerClientSet.CertmanagerV1().CertificateRequests(f.Namespace.Name).Get(context.TODO(), request.Name, metav1.GetOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tapiutil.SetCertificateRequestCondition(approvedCR, cmapi.CertificateRequestConditionApproved, cmmeta.ConditionTrue, \"cert-manager.io\", \"integration\")\n\t\t_, err = saclient.CertmanagerV1().CertificateRequests(f.Namespace.Name).UpdateStatus(context.TODO(), approvedCR, metav1.UpdateOptions{})\n\t\tExpect(err).ToNot(HaveOccurred())\n\t})\n\n\tIt(\"a service account with the approve permissions for issuer.example.io\/* should be able to deny requests\", func() {\n\t\tbindServiceAccountToApprove(f, sa, \"issuer.example.io\/*\")\n\n\t\tdeniedCR, err := f.CertManagerClientSet.CertmanagerV1().CertificateRequests(f.Namespace.Name).Get(context.TODO(), request.Name, metav1.GetOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tapiutil.SetCertificateRequestCondition(deniedCR, cmapi.CertificateRequestConditionDenied, cmmeta.ConditionTrue, \"cert-manager.io\", \"integration\")\n\t\t_, err = saclient.CertmanagerV1().CertificateRequests(f.Namespace.Name).UpdateStatus(context.TODO(), deniedCR, metav1.UpdateOptions{})\n\t\tExpect(err).ToNot(HaveOccurred())\n\t})\n\n\tIt(\"a service account with the approve permissions for issuer.example.io\/test-issuer should be able to approve requests\", func() {\n\t\tbindServiceAccountToApprove(f, sa, \"issuer.example.io\/test-issuer\")\n\n\t\tapprovedCR, err := f.CertManagerClientSet.CertmanagerV1().CertificateRequests(f.Namespace.Name).Get(context.TODO(), request.Name, metav1.GetOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tapiutil.SetCertificateRequestCondition(approvedCR, cmapi.CertificateRequestConditionApproved, cmmeta.ConditionTrue, \"cert-manager.io\", \"integration\")\n\t\t_, err = saclient.CertmanagerV1().CertificateRequests(f.Namespace.Name).UpdateStatus(context.TODO(), approvedCR, metav1.UpdateOptions{})\n\t\tExpect(err).ToNot(HaveOccurred())\n\t})\n\n\tIt(\"a service account with the approve permissions for issuer.example.io\/test-issuer should be able to deny requests\", func() {\n\t\tbindServiceAccountToApprove(f, sa, \"issuer.example.io\/test-issuer\")\n\n\t\tdeniedCR, err := f.CertManagerClientSet.CertmanagerV1().CertificateRequests(f.Namespace.Name).Get(context.TODO(), request.Name, metav1.GetOptions{})\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tapiutil.SetCertificateRequestCondition(deniedCR, cmapi.CertificateRequestConditionDenied, cmmeta.ConditionTrue, \"cert-manager.io\", \"integration\")\n\t\t_, err = saclient.CertmanagerV1().CertificateRequests(f.Namespace.Name).UpdateStatus(context.TODO(), deniedCR, metav1.UpdateOptions{})\n\t\tExpect(err).ToNot(HaveOccurred())\n\t})\n})\n\nfunc bindServiceAccountToApprove(f *framework.Framework, sa *corev1.ServiceAccount, resourceName string) {\n\tclusterrole, err := f.KubeClientSet.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tGenerateName: \"certificaterequest-approver-\",\n\t\t},\n\t\tRules: []rbacv1.PolicyRule{\n\t\t\t{\n\t\t\t\tAPIGroups: []string{\"cert-manager.io\"},\n\t\t\t\tResources: []string{\"signers\"},\n\t\t\t\tVerbs: []string{\"approve\"},\n\t\t\t\tResourceNames: []string{resourceName},\n\t\t\t},\n\t\t},\n\t}, metav1.CreateOptions{})\n\tExpect(err).NotTo(HaveOccurred())\n\n\t_, err = f.KubeClientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tGenerateName: \"certificaterequest-approver-\",\n\t\t},\n\t\tSubjects: []rbacv1.Subject{\n\t\t\t{\n\t\t\t\tKind: \"ServiceAccount\",\n\t\t\t\tName: sa.Name,\n\t\t\t\tNamespace: sa.Namespace,\n\t\t\t},\n\t\t},\n\t\tRoleRef: rbacv1.RoleRef{\n\t\t\tAPIGroup: \"rbac.authorization.k8s.io\",\n\t\t\tKind: \"ClusterRole\",\n\t\t\tName: clusterrole.Name,\n\t\t},\n\t}, metav1.CreateOptions{})\n\tExpect(err).NotTo(HaveOccurred())\n}\n<|endoftext|>"} {"text":"<commit_before>package draw\n\nimport (\n\t\"image\"\n\t\"testing\"\n\n\t\"github.com\/mum4k\/termdash\/area\"\n\t\"github.com\/mum4k\/termdash\/canvas\/braille\"\n\t\"github.com\/mum4k\/termdash\/canvas\/braille\/testbraille\"\n\t\"github.com\/mum4k\/termdash\/cell\"\n\t\"github.com\/mum4k\/termdash\/terminal\/faketerm\"\n)\n\nfunc TestBrailleLine(t *testing.T) {\n\ttests := []struct {\n\t\tdesc string\n\t\tcanvas image.Rectangle\n\t\tstart image.Point\n\t\tend image.Point\n\t\topts []BrailleLineOption\n\t\twant func(size image.Point) *faketerm.Terminal\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tdesc: \"fails when start has negative X\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{-1, 0},\n\t\t\tend: image.Point{0, 0},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tdesc: \"fails when start has negative Y\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{0, -1},\n\t\t\tend: image.Point{0, 0},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tdesc: \"fails when end has negative X\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{0, 0},\n\t\t\tend: image.Point{-1, 0},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tdesc: \"fails when end has negative Y\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{0, 0},\n\t\t\tend: image.Point{0, -1},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tdesc: \"high line, fails on start point outside of the canvas\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{2, 2},\n\t\t\tend: image.Point{2, 2},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tdesc: \"low line, fails on end point outside of the canvas\",\n\t\t\tcanvas: image.Rect(0, 0, 3, 1),\n\t\t\tstart: image.Point{0, 0},\n\t\t\tend: image.Point{6, 3},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tdesc: \"high line, fails on end point outside of the canvas\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{0, 0},\n\t\t\tend: image.Point{2, 2},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws single point\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{0, 0},\n\t\t\tend: image.Point{0, 0},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 0})\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws single point with cell options\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{0, 0},\n\t\t\tend: image.Point{0, 0},\n\t\t\topts: []BrailleLineOption{\n\t\t\t\tBrailleLineCellOpts(\n\t\t\t\t\tcell.FgColor(cell.ColorRed),\n\t\t\t\t),\n\t\t\t},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 0}, cell.FgColor(cell.ColorRed))\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws high line, octant SE\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{0, 0},\n\t\t\tend: image.Point{1, 3},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 0})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 1})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 2})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 3})\n\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws high line, octant NW\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{1, 3},\n\t\t\tend: image.Point{0, 0},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 0})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 1})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 2})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 3})\n\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws high line, octant SW\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{1, 0},\n\t\t\tend: image.Point{0, 3},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 0})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 1})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 2})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 3})\n\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws high line, octant NE\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{0, 3},\n\t\t\tend: image.Point{1, 0},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 0})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 1})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 2})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 3})\n\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws low line, octant SE\",\n\t\t\tcanvas: image.Rect(0, 0, 3, 1),\n\t\t\tstart: image.Point{0, 0},\n\t\t\tend: image.Point{4, 3},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 0})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 1})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{2, 1})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{3, 2})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{4, 3})\n\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws low line, octant NW\",\n\t\t\tcanvas: image.Rect(0, 0, 3, 1),\n\t\t\tstart: image.Point{4, 3},\n\t\t\tend: image.Point{0, 0},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 0})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 1})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{2, 1})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{3, 2})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{4, 3})\n\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws high line, octant SW\",\n\t\t\tcanvas: image.Rect(0, 0, 3, 1),\n\t\t\tstart: image.Point{4, 0},\n\t\t\tend: image.Point{0, 3},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{4, 0})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{3, 1})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{2, 2})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 2})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 3})\n\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws high line, octant NE\",\n\t\t\tcanvas: image.Rect(0, 0, 3, 1),\n\t\t\tstart: image.Point{0, 3},\n\t\t\tend: image.Point{4, 0},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{4, 0})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{3, 1})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{2, 2})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 2})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 3})\n\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.desc, func(t *testing.T) {\n\t\t\tbc, err := braille.New(tc.canvas)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"braille.New => unexpected error: %v\", err)\n\t\t\t}\n\n\t\t\terr = BrailleLine(bc, tc.start, tc.end, tc.opts...)\n\t\t\tif (err != nil) != tc.wantErr {\n\t\t\t\tt.Errorf(\"BrailleLine => unexpected error: %v, wantErr: %v\", err, tc.wantErr)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tsize := area.Size(tc.canvas)\n\t\t\twant := faketerm.MustNew(size)\n\t\t\tif tc.want != nil {\n\t\t\t\twant = tc.want(size)\n\t\t\t}\n\n\t\t\tgot, err := faketerm.New(size)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"faketerm.New => unexpected error: %v\", err)\n\t\t\t}\n\t\t\tif err := bc.Apply(got); err != nil {\n\t\t\t\tt.Fatalf(\"bc.Apply => unexpected error: %v\", err)\n\t\t\t}\n\t\t\tif diff := faketerm.Diff(want, got); diff != \"\" {\n\t\t\t\tt.Fatalf(\"BrailleLine => %v\", diff)\n\t\t\t}\n\n\t\t})\n\t}\n}\n<commit_msg>More test coverage.<commit_after>package draw\n\nimport (\n\t\"image\"\n\t\"testing\"\n\n\t\"github.com\/mum4k\/termdash\/area\"\n\t\"github.com\/mum4k\/termdash\/canvas\/braille\"\n\t\"github.com\/mum4k\/termdash\/canvas\/braille\/testbraille\"\n\t\"github.com\/mum4k\/termdash\/cell\"\n\t\"github.com\/mum4k\/termdash\/terminal\/faketerm\"\n)\n\nfunc TestBrailleLine(t *testing.T) {\n\ttests := []struct {\n\t\tdesc string\n\t\tcanvas image.Rectangle\n\t\tstart image.Point\n\t\tend image.Point\n\t\topts []BrailleLineOption\n\t\twant func(size image.Point) *faketerm.Terminal\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tdesc: \"fails when start has negative X\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{-1, 0},\n\t\t\tend: image.Point{0, 0},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tdesc: \"fails when start has negative Y\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{0, -1},\n\t\t\tend: image.Point{0, 0},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tdesc: \"fails when end has negative X\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{0, 0},\n\t\t\tend: image.Point{-1, 0},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tdesc: \"fails when end has negative Y\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{0, 0},\n\t\t\tend: image.Point{0, -1},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tdesc: \"high line, fails on start point outside of the canvas\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{2, 2},\n\t\t\tend: image.Point{2, 2},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tdesc: \"low line, fails on end point outside of the canvas\",\n\t\t\tcanvas: image.Rect(0, 0, 3, 1),\n\t\t\tstart: image.Point{0, 0},\n\t\t\tend: image.Point{6, 3},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tdesc: \"high line, fails on end point outside of the canvas\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{0, 0},\n\t\t\tend: image.Point{2, 2},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws single point\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{0, 0},\n\t\t\tend: image.Point{0, 0},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 0})\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws single point with cell options\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{0, 0},\n\t\t\tend: image.Point{0, 0},\n\t\t\topts: []BrailleLineOption{\n\t\t\t\tBrailleLineCellOpts(\n\t\t\t\t\tcell.FgColor(cell.ColorRed),\n\t\t\t\t),\n\t\t\t},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 0}, cell.FgColor(cell.ColorRed))\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws high line, octant SE\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{0, 0},\n\t\t\tend: image.Point{1, 3},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 0})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 1})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 2})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 3})\n\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws high line, octant NW\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{1, 3},\n\t\t\tend: image.Point{0, 0},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 0})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 1})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 2})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 3})\n\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws high line, octant SW\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{1, 0},\n\t\t\tend: image.Point{0, 3},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 0})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 1})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 2})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 3})\n\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws high line, octant NE\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{0, 3},\n\t\t\tend: image.Point{1, 0},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 0})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 1})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 2})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 3})\n\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws low line, octant SE\",\n\t\t\tcanvas: image.Rect(0, 0, 3, 1),\n\t\t\tstart: image.Point{0, 0},\n\t\t\tend: image.Point{4, 3},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 0})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 1})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{2, 1})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{3, 2})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{4, 3})\n\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws low line, octant NW\",\n\t\t\tcanvas: image.Rect(0, 0, 3, 1),\n\t\t\tstart: image.Point{4, 3},\n\t\t\tend: image.Point{0, 0},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 0})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 1})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{2, 1})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{3, 2})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{4, 3})\n\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws high line, octant SW\",\n\t\t\tcanvas: image.Rect(0, 0, 3, 1),\n\t\t\tstart: image.Point{4, 0},\n\t\t\tend: image.Point{0, 3},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{4, 0})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{3, 1})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{2, 2})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 2})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 3})\n\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws high line, octant NE\",\n\t\t\tcanvas: image.Rect(0, 0, 3, 1),\n\t\t\tstart: image.Point{0, 3},\n\t\t\tend: image.Point{4, 0},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{4, 0})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{3, 1})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{2, 2})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 2})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 3})\n\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws horizontal line, octant E\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{0, 0},\n\t\t\tend: image.Point{1, 0},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 0})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 0})\n\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws horizontal line, octant W\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{1, 0},\n\t\t\tend: image.Point{0, 0},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 0})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{1, 0})\n\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws vertical line, octant S\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{0, 0},\n\t\t\tend: image.Point{0, 1},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 0})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 1})\n\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"draws vertical line, octant N\",\n\t\t\tcanvas: image.Rect(0, 0, 1, 1),\n\t\t\tstart: image.Point{0, 1},\n\t\t\tend: image.Point{0, 0},\n\t\t\twant: func(size image.Point) *faketerm.Terminal {\n\t\t\t\tft := faketerm.MustNew(size)\n\t\t\t\tbc := testbraille.MustNew(ft.Area())\n\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 0})\n\t\t\t\ttestbraille.MustSetPixel(bc, image.Point{0, 1})\n\n\t\t\t\ttestbraille.MustApply(bc, ft)\n\t\t\t\treturn ft\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.desc, func(t *testing.T) {\n\t\t\tbc, err := braille.New(tc.canvas)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"braille.New => unexpected error: %v\", err)\n\t\t\t}\n\n\t\t\terr = BrailleLine(bc, tc.start, tc.end, tc.opts...)\n\t\t\tif (err != nil) != tc.wantErr {\n\t\t\t\tt.Errorf(\"BrailleLine => unexpected error: %v, wantErr: %v\", err, tc.wantErr)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tsize := area.Size(tc.canvas)\n\t\t\twant := faketerm.MustNew(size)\n\t\t\tif tc.want != nil {\n\t\t\t\twant = tc.want(size)\n\t\t\t}\n\n\t\t\tgot, err := faketerm.New(size)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"faketerm.New => unexpected error: %v\", err)\n\t\t\t}\n\t\t\tif err := bc.Apply(got); err != nil {\n\t\t\t\tt.Fatalf(\"bc.Apply => unexpected error: %v\", err)\n\t\t\t}\n\t\t\tif diff := faketerm.Diff(want, got); diff != \"\" {\n\t\t\t\tt.Fatalf(\"BrailleLine => %v\", diff)\n\t\t\t}\n\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Mesosphere, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage http\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/coreos\/go-systemd\/activation\"\n\t\"github.com\/dcos\/dcos-go\/store\"\n\t\"github.com\/dcos\/dcos-metrics\/producers\"\n)\n\nvar httpLog = log.WithFields(log.Fields{\n\t\"producer\": \"http\",\n})\n\n\/\/ Config for the HTTP producer\ntype Config struct {\n\tPort int `yaml:\"port\"`\n\tIP string\n\tCacheExpiry time.Duration\n\tDCOSRole string\n}\n\ntype producerImpl struct {\n\tconfig Config\n\tstore store.Store\n\tmetricsChan chan producers.MetricsMessage\n}\n\n\/\/ New creates a new instance of the HTTP producer with the provided configuration.\nfunc New(cfg Config) (producers.MetricsProducer, chan producers.MetricsMessage) {\n\tp := producerImpl{\n\t\tconfig: cfg,\n\t\tstore: store.New(),\n\t\tmetricsChan: make(chan producers.MetricsMessage),\n\t}\n\treturn &p, p.metricsChan\n}\n\n\/\/ Run a HTTP server and serve the various metrics API endpoints.\n\/\/ This function should be run in its own goroutine.\nfunc (p *producerImpl) Run() error {\n\thttpLog.Info(\"Starting HTTP producer garbage collection service\")\n\tgo p.janitor()\n\n\tgo func() {\n\t\thttpLog.Debug(\"HTTP producer listening for incoming messages on metricsChan\")\n\t\tfor {\n\t\t\t\/\/ read messages off the channel,\n\t\t\t\/\/ and give them a unique name in the store\n\t\t\tmessage := <-p.metricsChan\n\t\t\thttpLog.Debugf(\"Received message '%+v' with timestamp %s\",\n\t\t\t\tmessage, time.Unix(message.Timestamp, 0).Format(time.RFC3339))\n\n\t\t\tvar name string\n\t\t\tswitch message.Name {\n\n\t\t\tcase producers.NodeMetricPrefix:\n\t\t\t\tname = strings.Join([]string{\n\t\t\t\t\tmessage.Name,\n\t\t\t\t\tmessage.Dimensions.MesosID,\n\t\t\t\t}, producers.MetricNamespaceSep)\n\n\t\t\tcase producers.ContainerMetricPrefix:\n\t\t\t\tname = strings.Join([]string{\n\t\t\t\t\tmessage.Name,\n\t\t\t\t\tmessage.Dimensions.ContainerID,\n\t\t\t\t}, producers.MetricNamespaceSep)\n\n\t\t\tcase producers.AppMetricPrefix:\n\t\t\t\tname = strings.Join([]string{\n\t\t\t\t\tmessage.Name,\n\t\t\t\t\tmessage.Dimensions.ContainerID,\n\t\t\t\t}, producers.MetricNamespaceSep)\n\t\t\t}\n\t\t\thttpLog.Debugf(\"Setting store object '%s' with timestamp %s\",\n\t\t\t\tname, time.Unix(message.Timestamp, 0).Format(time.RFC3339))\n\n\t\t\tp.store.Set(name, message) \/\/ overwrite existing object with the same name\n\t\t}\n\t}()\n\n\tr := newRouter(p)\n\tlisteners, err := activation.Listeners(true)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to get listeners: %s\", err)\n\t}\n\t\/\/ If a listener is avialable, use that. If it is not avialable,\n\t\/\/ listen on the default TCP socket and port.\n\tif len(listeners) == 1 {\n\t\thttpLog.Infof(\"HTTP Producer serving requests on %s\", listeners[0].Addr().String())\n\t\treturn http.Serve(listeners[0], r)\n\t}\n\thttpLog.Infof(\"HTTP Producer serving requests on %s:%d\", p.config.IP, p.config.Port)\n\treturn http.ListenAndServe(fmt.Sprintf(\"%s:%d\", p.config.IP, p.config.Port), r)\n}\n\n\/\/ janitor analyzes the objects in the store and removes stale objects. An\n\/\/ object is considered stale when the top-level timestamp of its MetricsMessage\n\/\/ has exceeded the CacheExpiry, which is calculated as a multiple of the\n\/\/ collector's polling period. This function should be run in its own goroutine.\nfunc (p *producerImpl) janitor() {\n\tticker := time.NewTicker(time.Duration(60 * time.Second))\n\tfor {\n\t\tselect {\n\t\tcase _ = <-ticker.C:\n\t\t\tfor _, obj := range p.store.Objects() {\n\t\t\t\to := obj.(producers.MetricsMessage)\n\n\t\t\t\tage := time.Since(time.Unix(o.Timestamp, 0))\n\t\t\t\tif age > p.config.CacheExpiry {\n\t\t\t\t\thttpLog.Debugf(\"Removing stale object %s; last updated %d seconds ago\", o.Name, age*time.Second)\n\t\t\t\t\tp.store.Delete(o.Name)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>resolve bugs with http producer garbage collector<commit_after>\/\/ Copyright 2016 Mesosphere, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage http\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/coreos\/go-systemd\/activation\"\n\t\"github.com\/dcos\/dcos-go\/store\"\n\t\"github.com\/dcos\/dcos-metrics\/producers\"\n)\n\nvar httpLog = log.WithFields(log.Fields{\n\t\"producer\": \"http\",\n})\n\n\/\/ Config for the HTTP producer\ntype Config struct {\n\tPort int `yaml:\"port\"`\n\tIP string\n\tCacheExpiry time.Duration\n\tDCOSRole string\n}\n\ntype producerImpl struct {\n\tconfig Config\n\tstore store.Store\n\tmetricsChan chan producers.MetricsMessage\n\tjanitorRunInterval time.Duration\n}\n\n\/\/ New creates a new instance of the HTTP producer with the provided configuration.\nfunc New(cfg Config) (producers.MetricsProducer, chan producers.MetricsMessage) {\n\tp := producerImpl{\n\t\tconfig: cfg,\n\t\tstore: store.New(),\n\t\tmetricsChan: make(chan producers.MetricsMessage),\n\t\tjanitorRunInterval: 60 * time.Second,\n\t}\n\treturn &p, p.metricsChan\n}\n\n\/\/ Run a HTTP server and serve the various metrics API endpoints.\n\/\/ This function should be run in its own goroutine.\nfunc (p *producerImpl) Run() error {\n\thttpLog.Info(\"Starting HTTP producer garbage collection service\")\n\tgo p.janitor()\n\n\tgo func() {\n\t\thttpLog.Debug(\"HTTP producer listening for incoming messages on metricsChan\")\n\t\tfor {\n\t\t\t\/\/ read messages off the channel,\n\t\t\t\/\/ and give them a unique name in the store\n\t\t\tmessage := <-p.metricsChan\n\t\t\thttpLog.Debugf(\"Received message '%+v' with timestamp %s\",\n\t\t\t\tmessage, time.Unix(message.Timestamp, 0).Format(time.RFC3339))\n\n\t\t\tvar name string\n\t\t\tswitch message.Name {\n\n\t\t\tcase producers.NodeMetricPrefix:\n\t\t\t\tname = strings.Join([]string{\n\t\t\t\t\tmessage.Name,\n\t\t\t\t\tmessage.Dimensions.MesosID,\n\t\t\t\t}, producers.MetricNamespaceSep)\n\n\t\t\tcase producers.ContainerMetricPrefix:\n\t\t\t\tname = strings.Join([]string{\n\t\t\t\t\tmessage.Name,\n\t\t\t\t\tmessage.Dimensions.ContainerID,\n\t\t\t\t}, producers.MetricNamespaceSep)\n\n\t\t\tcase producers.AppMetricPrefix:\n\t\t\t\tname = strings.Join([]string{\n\t\t\t\t\tmessage.Name,\n\t\t\t\t\tmessage.Dimensions.ContainerID,\n\t\t\t\t}, producers.MetricNamespaceSep)\n\t\t\t}\n\t\t\thttpLog.Debugf(\"Setting store object '%s' with timestamp %s\",\n\t\t\t\tname, time.Unix(message.Timestamp, 0).Format(time.RFC3339))\n\n\t\t\tp.store.Set(name, message) \/\/ overwrite existing object with the same name\n\t\t}\n\t}()\n\n\tr := newRouter(p)\n\tlisteners, err := activation.Listeners(true)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to get listeners: %s\", err)\n\t}\n\t\/\/ If a listener is avialable, use that. If it is not avialable,\n\t\/\/ listen on the default TCP socket and port.\n\tif len(listeners) == 1 {\n\t\thttpLog.Infof(\"HTTP Producer serving requests on %s\", listeners[0].Addr().String())\n\t\treturn http.Serve(listeners[0], r)\n\t}\n\thttpLog.Infof(\"HTTP Producer serving requests on %s:%d\", p.config.IP, p.config.Port)\n\treturn http.ListenAndServe(fmt.Sprintf(\"%s:%d\", p.config.IP, p.config.Port), r)\n}\n\n\/\/ janitor analyzes the objects in the store and removes stale objects. An\n\/\/ object is considered stale when the top-level timestamp of its MetricsMessage\n\/\/ has exceeded the CacheExpiry, which is calculated as a multiple of the\n\/\/ collector's polling period. This function should be run in its own goroutine.\nfunc (p *producerImpl) janitor() {\n\tticker := time.NewTicker(p.janitorRunInterval)\n\tfor {\n\t\tselect {\n\t\tcase _ = <-ticker.C:\n\t\t\tfor k, v := range p.store.Objects() {\n\t\t\t\to := v.(producers.MetricsMessage)\n\n\t\t\t\tage := time.Now().Sub(time.Unix(o.Timestamp, 0))\n\t\t\t\tif age > p.config.CacheExpiry {\n\t\t\t\t\thttpLog.Debugf(\"Removing stale object %s; last updated %d seconds ago\", k, age\/time.Second)\n\t\t\t\t\tp.store.Delete(k)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package isolated\n\nimport (\n\t\"code.cloudfoundry.org\/cli\/integration\/helpers\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/gbytes\"\n\t. \"github.com\/onsi\/gomega\/gexec\"\n)\n\nvar _ = Describe(\"auth command\", func() {\n\tContext(\"Help\", func() {\n\t\tIt(\"displays the help information\", func() {\n\t\t\tsession := helpers.CF(\"auth\", \"--help\")\n\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\t\t\tEventually(session).Should(Say(\"auth - Authenticate non-interactively\\n\\n\"))\n\n\t\t\tEventually(session).Should(Say(\"USAGE:\"))\n\t\t\tEventually(session).Should(Say(\"cf auth USERNAME PASSWORD\\n\"))\n\t\t\tEventually(session).Should(Say(\"cf auth CLIENT_ID CLIENT_SECRET --client-credentials\\n\\n\"))\n\n\t\t\tEventually(session).Should(Say(\"ENVIRONMENT VARIABLES:\"))\n\t\t\tEventually(session).Should(Say(\"CF_USERNAME=user\\\\s+Authenticating user. Overridden if USERNAME argument is provided.\"))\n\t\t\tEventually(session).Should(Say(\"CF_PASSWORD=password\\\\s+Password associated with user. Overriden if PASSWORD argument is provided.\"))\n\n\t\t\tEventually(session).Should(Say(\"WARNING:\"))\n\t\t\tEventually(session).Should(Say(\"Providing your password as a command line option is highly discouraged\"))\n\t\t\tEventually(session).Should(Say(\"Your password may be visible to others and may be recorded in your shell history\\n\"))\n\t\t\tEventually(session).Should(Say(\"Consider using the CF_PASSWORD environment variable instead\\n\\n\"))\n\n\t\t\tEventually(session).Should(Say(\"EXAMPLES:\"))\n\t\t\tEventually(session).Should(Say(\"cf auth name@example\\\\.com \\\"my password\\\" \\\\(use quotes for passwords with a space\\\\)\"))\n\t\t\tEventually(session).Should(Say(\"cf auth name@example\\\\.com \\\\\\\"\\\\\\\\\\\"password\\\\\\\\\\\"\\\\\\\" \\\\(escape quotes if used in password\\\\)\\n\\n\"))\n\n\t\t\tEventually(session).Should(Say(\"OPTIONS:\"))\n\t\t\tEventually(session).Should(Say(\"--client-credentials\\\\s+Use \\\\(non-user\\\\) service account \\\\(also called client credentials\\\\)\\n\\n\"))\n\n\t\t\tEventually(session).Should(Say(\"SEE ALSO:\"))\n\t\t\tEventually(session).Should(Say(\"api, login, target\"))\n\n\t\t\tEventually(session).Should(Exit(0))\n\t\t})\n\t})\n\n\tContext(\"when no positional arguments are provided\", func() {\n\t\tContext(\"and no env variables are provided\", func() {\n\t\t\tIt(\"errors-out with the help information\", func() {\n\t\t\t\tsession := helpers.CF(\"auth\")\n\t\t\t\tEventually(session.Err).Should(Say(\"Username and password not provided.\"))\n\t\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when env variables are provided\", func() {\n\t\t\tIt(\"authenticates the user\", func() {\n\t\t\t\tusername, password := helpers.GetCredentials()\n\t\t\t\tenv := map[string]string{\n\t\t\t\t\t\"CF_USERNAME\": username,\n\t\t\t\t\t\"CF_PASSWORD\": password,\n\t\t\t\t}\n\t\t\t\tsession := helpers.CFWithEnv(env, \"auth\")\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"OK\"))\n\t\t\t\tEventually(session).Should(Say(\"Use 'cf target' to view or set your target org and space\"))\n\n\t\t\t\tEventually(session).Should(Exit(0))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"when only a username is provided\", func() {\n\t\tIt(\"errors-out with a password required error and the help information\", func() {\n\t\t\tsession := helpers.CF(\"auth\", \"some-user\")\n\t\t\tEventually(session.Err).Should(Say(\"Password not provided.\"))\n\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\n\t\t\tEventually(session).Should(Exit(1))\n\t\t})\n\t})\n\n\tContext(\"when only a password is provided\", func() {\n\t\tIt(\"errors-out with a username required error and the help information\", func() {\n\t\t\tenv := map[string]string{\n\t\t\t\t\"CF_PASSWORD\": \"some-pass\",\n\t\t\t}\n\t\t\tsession := helpers.CFWithEnv(env, \"auth\")\n\t\t\tEventually(session.Err).Should(Say(\"Username not provided.\"))\n\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\n\t\t\tEventually(session).Should(Exit(1))\n\t\t})\n\t})\n\n\tContext(\"when too many arguments are provided\", func() {\n\t\tIt(\"displays an 'unknown flag' error message\", func() {\n\t\t\tsession := helpers.CF(\"auth\", \"some-username\", \"some-password\", \"-a\", \"api.bosh-lite.com\")\n\n\t\t\tEventually(session.Err).Should(Say(\"Incorrect Usage: unknown flag `a'\"))\n\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\n\t\t\tEventually(session).Should(Exit(1))\n\t\t})\n\t})\n\n\tContext(\"when the API endpoint is not set\", func() {\n\t\tBeforeEach(func() {\n\t\t\thelpers.UnsetAPI()\n\t\t})\n\n\t\tIt(\"displays an error message\", func() {\n\t\t\tsession := helpers.CF(\"auth\", \"some-username\", \"some-password\")\n\n\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\tEventually(session.Err).Should(Say(\"No API endpoint set\\\\. Use 'cf login' or 'cf api' to target an endpoint\\\\.\"))\n\n\t\t\tEventually(session).Should(Exit(1))\n\t\t})\n\t})\n\n\tContext(\"when no flags are set (logging in with password grant type)\", func() {\n\t\tContext(\"when the user provides an invalid username\/password combo\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.LoginCF()\n\t\t\t\thelpers.TargetOrgAndSpace(ReadOnlyOrg, ReadOnlySpace)\n\t\t\t})\n\n\t\t\tIt(\"clears the cached tokens and target info, then displays an error message\", func() {\n\t\t\t\tsession := helpers.CF(\"auth\", \"some-username\", \"some-password\")\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(session.Err).Should(Say(\"Credentials were rejected, please try again\\\\.\"))\n\t\t\t\tEventually(session).Should(Exit(1))\n\n\t\t\t\t\/\/ Verify that the user is not logged-in\n\t\t\t\ttargetSession1 := helpers.CF(\"target\")\n\t\t\t\tEventually(targetSession1.Err).Should(Say(\"Not logged in\\\\. Use 'cf login' to log in\\\\.\"))\n\t\t\t\tEventually(targetSession1).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(targetSession1).Should(Exit(1))\n\n\t\t\t\t\/\/ Verify that neither org nor space is targeted\n\t\t\t\thelpers.LoginCF()\n\t\t\t\ttargetSession2 := helpers.CF(\"target\")\n\t\t\t\tEventually(targetSession2).Should(Say(\"No org or space targeted, use 'cf target -o ORG -s SPACE'\"))\n\t\t\t\tEventually(targetSession2).Should(Exit(0))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the username and password are valid\", func() {\n\t\t\tIt(\"authenticates the user\", func() {\n\t\t\t\tusername, password := helpers.GetCredentials()\n\t\t\t\tsession := helpers.CF(\"auth\", username, password)\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"OK\"))\n\t\t\t\tEventually(session).Should(Say(\"Use 'cf target' to view or set your target org and space\"))\n\n\t\t\t\tEventually(session).Should(Exit(0))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"when the 'client-credentials' flag is set\", func() {\n\t\tContext(\"when the user provides an invalid client id\/secret combo\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.LoginCF()\n\t\t\t\thelpers.TargetOrgAndSpace(ReadOnlyOrg, ReadOnlySpace)\n\t\t\t})\n\n\t\t\tIt(\"clears the cached tokens and target info, then displays an error message\", func() {\n\t\t\t\tsession := helpers.CF(\"auth\", \"some-client-id\", \"some-client-secret\", \"--client-credentials\")\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(session.Err).Should(Say(\"Credentials were rejected, please try again\\\\.\"))\n\t\t\t\tEventually(session).Should(Exit(1))\n\n\t\t\t\t\/\/ Verify that the user is not logged-in\n\t\t\t\ttargetSession1 := helpers.CF(\"target\")\n\t\t\t\tEventually(targetSession1.Err).Should(Say(\"Not logged in\\\\. Use 'cf login' to log in\\\\.\"))\n\t\t\t\tEventually(targetSession1).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(targetSession1).Should(Exit(1))\n\n\t\t\t\t\/\/ Verify that neither org nor space is targeted\n\t\t\t\thelpers.LoginCF()\n\t\t\t\ttargetSession2 := helpers.CF(\"target\")\n\t\t\t\tEventually(targetSession2).Should(Say(\"No org or space targeted, use 'cf target -o ORG -s SPACE'\"))\n\t\t\t\tEventually(targetSession2).Should(Exit(0))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the client id and client secret are valid\", func() {\n\t\t\tIt(\"authenticates the user\", func() {\n\t\t\t\tclientID, clientSecret := helpers.SkipIfClientCredentialsNotSet()\n\t\t\t\tsession := helpers.CF(\"auth\", clientID, clientSecret, \"--client-credentials\")\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"OK\"))\n\t\t\t\tEventually(session).Should(Say(\"Use 'cf target' to view or set your target org and space\"))\n\n\t\t\t\tEventually(session).Should(Exit(0))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"when a user authenticates with valid client credentials\", func() {\n\t\tBeforeEach(func() {\n\t\t\tclientID, clientSecret := helpers.SkipIfClientCredentialsNotSet()\n\t\t\tsession := helpers.CF(\"auth\", clientID, clientSecret, \"--client-credentials\")\n\t\t\tEventually(session).Should(Exit(0))\n\t\t})\n\n\t\tContext(\"when a different user authenticates with valid password credentials\", func() {\n\t\t\tIt(\"should fail authentication and display an error informing the user they need to log out\", func() {\n\t\t\t\tusername, password := helpers.GetCredentials()\n\t\t\t\tsession := helpers.CF(\"auth\", username, password)\n\n\t\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(session.Err).Should(Say(\"Service account currently logged in\\\\. Use 'cf logout' to log out service account and try again\\\\.\"))\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t})\n\t\t})\n\t})\n})\n<commit_msg>fix help test for cf auth<commit_after>package isolated\n\nimport (\n\t\"code.cloudfoundry.org\/cli\/integration\/helpers\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/gbytes\"\n\t. \"github.com\/onsi\/gomega\/gexec\"\n)\n\nvar _ = Describe(\"auth command\", func() {\n\tContext(\"Help\", func() {\n\t\tIt(\"displays the help information\", func() {\n\t\t\tsession := helpers.CF(\"auth\", \"--help\")\n\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\t\t\tEventually(session).Should(Say(\"auth - Authenticate non-interactively\\n\\n\"))\n\n\t\t\tEventually(session).Should(Say(\"USAGE:\"))\n\t\t\tEventually(session).Should(Say(\"cf auth USERNAME PASSWORD\\n\"))\n\t\t\tEventually(session).Should(Say(\"cf auth CLIENT_ID CLIENT_SECRET --client-credentials\\n\\n\"))\n\n\t\t\tEventually(session).Should(Say(\"ENVIRONMENT VARIABLES:\"))\n\t\t\tEventually(session).Should(Say(\"CF_USERNAME=user\\\\s+Authenticating user. Overridden if USERNAME argument is provided.\"))\n\t\t\tEventually(session).Should(Say(\"CF_PASSWORD=password\\\\s+Password associated with user. Overriden if PASSWORD argument is provided.\"))\n\n\t\t\tEventually(session).Should(Say(\"WARNING:\"))\n\t\t\tEventually(session).Should(Say(\"Providing your password as a command line option is highly discouraged\"))\n\t\t\tEventually(session).Should(Say(\"Your password may be visible to others and may be recorded in your shell history\\n\"))\n\t\t\tEventually(session).Should(Say(\"Consider using the CF_PASSWORD environment variable instead\\n\\n\"))\n\n\t\t\tEventually(session).Should(Say(\"EXAMPLES:\"))\n\t\t\tEventually(session).Should(Say(\"cf auth name@example\\\\.com \\\"my password\\\" \\\\(use quotes for passwords with a space\\\\)\"))\n\t\t\tEventually(session).Should(Say(\"cf auth name@example\\\\.com \\\\\\\"\\\\\\\\\\\"password\\\\\\\\\\\"\\\\\\\" \\\\(escape quotes if used in password\\\\)\\n\\n\"))\n\n\t\t\tEventually(session).Should(Say(\"OPTIONS:\"))\n\t\t\tEventually(session).Should(Say(\"--client-credentials\\\\s+Use \\\\(non-user\\\\) service account \\\\(also called client credentials\\\\)\\n\"))\n\t\t\tEventually(session).Should(Say(\"--origin\\\\s+Indicates the identity provider to be used for authentication\\n\\n\"))\n\n\t\t\tEventually(session).Should(Say(\"SEE ALSO:\"))\n\t\t\tEventually(session).Should(Say(\"api, login, target\"))\n\n\t\t\tEventually(session).Should(Exit(0))\n\t\t})\n\t})\n\n\tContext(\"when no positional arguments are provided\", func() {\n\t\tContext(\"and no env variables are provided\", func() {\n\t\t\tIt(\"errors-out with the help information\", func() {\n\t\t\t\tsession := helpers.CF(\"auth\")\n\t\t\t\tEventually(session.Err).Should(Say(\"Username and password not provided.\"))\n\t\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when env variables are provided\", func() {\n\t\t\tIt(\"authenticates the user\", func() {\n\t\t\t\tusername, password := helpers.GetCredentials()\n\t\t\t\tenv := map[string]string{\n\t\t\t\t\t\"CF_USERNAME\": username,\n\t\t\t\t\t\"CF_PASSWORD\": password,\n\t\t\t\t}\n\t\t\t\tsession := helpers.CFWithEnv(env, \"auth\")\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"OK\"))\n\t\t\t\tEventually(session).Should(Say(\"Use 'cf target' to view or set your target org and space\"))\n\n\t\t\t\tEventually(session).Should(Exit(0))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"when only a username is provided\", func() {\n\t\tIt(\"errors-out with a password required error and the help information\", func() {\n\t\t\tsession := helpers.CF(\"auth\", \"some-user\")\n\t\t\tEventually(session.Err).Should(Say(\"Password not provided.\"))\n\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\n\t\t\tEventually(session).Should(Exit(1))\n\t\t})\n\t})\n\n\tContext(\"when only a password is provided\", func() {\n\t\tIt(\"errors-out with a username required error and the help information\", func() {\n\t\t\tenv := map[string]string{\n\t\t\t\t\"CF_PASSWORD\": \"some-pass\",\n\t\t\t}\n\t\t\tsession := helpers.CFWithEnv(env, \"auth\")\n\t\t\tEventually(session.Err).Should(Say(\"Username not provided.\"))\n\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\n\t\t\tEventually(session).Should(Exit(1))\n\t\t})\n\t})\n\n\tContext(\"when too many arguments are provided\", func() {\n\t\tIt(\"displays an 'unknown flag' error message\", func() {\n\t\t\tsession := helpers.CF(\"auth\", \"some-username\", \"some-password\", \"-a\", \"api.bosh-lite.com\")\n\n\t\t\tEventually(session.Err).Should(Say(\"Incorrect Usage: unknown flag `a'\"))\n\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\n\t\t\tEventually(session).Should(Exit(1))\n\t\t})\n\t})\n\n\tContext(\"when the API endpoint is not set\", func() {\n\t\tBeforeEach(func() {\n\t\t\thelpers.UnsetAPI()\n\t\t})\n\n\t\tIt(\"displays an error message\", func() {\n\t\t\tsession := helpers.CF(\"auth\", \"some-username\", \"some-password\")\n\n\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\tEventually(session.Err).Should(Say(\"No API endpoint set\\\\. Use 'cf login' or 'cf api' to target an endpoint\\\\.\"))\n\n\t\t\tEventually(session).Should(Exit(1))\n\t\t})\n\t})\n\n\tContext(\"when no flags are set (logging in with password grant type)\", func() {\n\t\tContext(\"when the user provides an invalid username\/password combo\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.LoginCF()\n\t\t\t\thelpers.TargetOrgAndSpace(ReadOnlyOrg, ReadOnlySpace)\n\t\t\t})\n\n\t\t\tIt(\"clears the cached tokens and target info, then displays an error message\", func() {\n\t\t\t\tsession := helpers.CF(\"auth\", \"some-username\", \"some-password\")\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(session.Err).Should(Say(\"Credentials were rejected, please try again\\\\.\"))\n\t\t\t\tEventually(session).Should(Exit(1))\n\n\t\t\t\t\/\/ Verify that the user is not logged-in\n\t\t\t\ttargetSession1 := helpers.CF(\"target\")\n\t\t\t\tEventually(targetSession1.Err).Should(Say(\"Not logged in\\\\. Use 'cf login' to log in\\\\.\"))\n\t\t\t\tEventually(targetSession1).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(targetSession1).Should(Exit(1))\n\n\t\t\t\t\/\/ Verify that neither org nor space is targeted\n\t\t\t\thelpers.LoginCF()\n\t\t\t\ttargetSession2 := helpers.CF(\"target\")\n\t\t\t\tEventually(targetSession2).Should(Say(\"No org or space targeted, use 'cf target -o ORG -s SPACE'\"))\n\t\t\t\tEventually(targetSession2).Should(Exit(0))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the username and password are valid\", func() {\n\t\t\tIt(\"authenticates the user\", func() {\n\t\t\t\tusername, password := helpers.GetCredentials()\n\t\t\t\tsession := helpers.CF(\"auth\", username, password)\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"OK\"))\n\t\t\t\tEventually(session).Should(Say(\"Use 'cf target' to view or set your target org and space\"))\n\n\t\t\t\tEventually(session).Should(Exit(0))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"when the 'client-credentials' flag is set\", func() {\n\t\tContext(\"when the user provides an invalid client id\/secret combo\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.LoginCF()\n\t\t\t\thelpers.TargetOrgAndSpace(ReadOnlyOrg, ReadOnlySpace)\n\t\t\t})\n\n\t\t\tIt(\"clears the cached tokens and target info, then displays an error message\", func() {\n\t\t\t\tsession := helpers.CF(\"auth\", \"some-client-id\", \"some-client-secret\", \"--client-credentials\")\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(session.Err).Should(Say(\"Credentials were rejected, please try again\\\\.\"))\n\t\t\t\tEventually(session).Should(Exit(1))\n\n\t\t\t\t\/\/ Verify that the user is not logged-in\n\t\t\t\ttargetSession1 := helpers.CF(\"target\")\n\t\t\t\tEventually(targetSession1.Err).Should(Say(\"Not logged in\\\\. Use 'cf login' to log in\\\\.\"))\n\t\t\t\tEventually(targetSession1).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(targetSession1).Should(Exit(1))\n\n\t\t\t\t\/\/ Verify that neither org nor space is targeted\n\t\t\t\thelpers.LoginCF()\n\t\t\t\ttargetSession2 := helpers.CF(\"target\")\n\t\t\t\tEventually(targetSession2).Should(Say(\"No org or space targeted, use 'cf target -o ORG -s SPACE'\"))\n\t\t\t\tEventually(targetSession2).Should(Exit(0))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the client id and client secret are valid\", func() {\n\t\t\tIt(\"authenticates the user\", func() {\n\t\t\t\tclientID, clientSecret := helpers.SkipIfClientCredentialsNotSet()\n\t\t\t\tsession := helpers.CF(\"auth\", clientID, clientSecret, \"--client-credentials\")\n\n\t\t\t\tEventually(session).Should(Say(\"API endpoint: %s\", helpers.GetAPI()))\n\t\t\t\tEventually(session).Should(Say(\"Authenticating\\\\.\\\\.\\\\.\"))\n\t\t\t\tEventually(session).Should(Say(\"OK\"))\n\t\t\t\tEventually(session).Should(Say(\"Use 'cf target' to view or set your target org and space\"))\n\n\t\t\t\tEventually(session).Should(Exit(0))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"when a user authenticates with valid client credentials\", func() {\n\t\tBeforeEach(func() {\n\t\t\tclientID, clientSecret := helpers.SkipIfClientCredentialsNotSet()\n\t\t\tsession := helpers.CF(\"auth\", clientID, clientSecret, \"--client-credentials\")\n\t\t\tEventually(session).Should(Exit(0))\n\t\t})\n\n\t\tContext(\"when a different user authenticates with valid password credentials\", func() {\n\t\t\tIt(\"should fail authentication and display an error informing the user they need to log out\", func() {\n\t\t\t\tusername, password := helpers.GetCredentials()\n\t\t\t\tsession := helpers.CF(\"auth\", username, password)\n\n\t\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\t\tEventually(session.Err).Should(Say(\"Service account currently logged in\\\\. Use 'cf logout' to log out service account and try again\\\\.\"))\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t})\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package dockerwrapper\n\nimport (\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\ntype DockerWrapper interface {\n\tPullImage(image, tag string) error\n\tRun(runList []string, portMappings map[int]int, image, tag string) error\n}\n\ntype wrapper struct {\n\trunner CommandRunner\n}\n\nfunc New() DockerWrapper {\n\trunner := NewDockerRunner()\n\treturn wrapper{runner: runner}\n}\n\nfunc NewWithRunner(commandRunner CommandRunner) DockerWrapper {\n\treturn wrapper{runner: commandRunner}\n}\n\nfunc (w wrapper) PullImage(image, tag string) error {\n\treturn w.runner.Run(\"pull\", []string{image + \":\" + tag})\n}\n\nfunc (w wrapper) Run(runList []string, portMappings map[int]int, image, tag string) error {\n\tdockerCommand := strings.Join(runList, \"&&\")\n\n\targs := append(w.defaultStaticParams(), w.portsMapToArgsParams(portMappings)...)\n\targs = append(args, w.mountPointParams()...)\n\targs = append(args, image+\":\"+tag, \"-c\", dockerCommand)\n\n\treturn w.runner.Run(\"run\", args)\n}\n\nfunc (w wrapper) defaultStaticParams() []string {\n\treturn []string{\"--tty\", \"-i\", \"--rm\", \"-w\", \"\/workdir\", \"--entrypoint\", \"\/bin\/sh\"}\n}\n\nfunc (w wrapper) portsMapToArgsParams(portMappings map[int]int) []string {\n\tports := []string{}\n\n\tfor hostPort, dockerPort := range portMappings {\n\t\tports = append(ports, []string{\"-p\", fmt.Sprintf(\"%d:%d\", hostPort, dockerPort)}...)\n\t}\n\n\treturn ports\n}\n\nfunc (w wrapper) mountPointParams() []string {\n\tworkingDir, _ := filepath.Abs(\"\")\n\n\treturn []string{\n\t\t\"-v\",\n\t\tfmt.Sprintf(\"%s:\/workdir\", workingDir),\n\t}\n}\n<commit_msg>Change dockerwrapper\/command runner api<commit_after>package dockerwrapper\n\nimport (\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\ntype DockerWrapper interface {\n\tPullImage(image, tag string) error\n\tRun(runList []string, portMappings map[int]int, image, tag string) error\n}\n\ntype wrapper struct {\n\trunner CommandRunner\n}\n\nfunc New() DockerWrapper {\n\trunner := dockerRunner{}\n\treturn wrapper{runner: runner}\n}\n\nfunc NewWithRunner(commandRunner CommandRunner) DockerWrapper {\n\treturn wrapper{runner: commandRunner}\n}\n\nfunc (w wrapper) PullImage(image, tag string) error {\n\treturn w.runner.Run(\"pull\", []string{image + \":\" + tag})\n}\n\nfunc (w wrapper) Run(runList []string, portMappings map[int]int, image, tag string) error {\n\tdockerCommand := strings.Join(runList, \"&&\")\n\n\targs := append(w.defaultStaticParams(), w.portsMapToArgsParams(portMappings)...)\n\targs = append(args, w.mountPointParams()...)\n\targs = append(args, image+\":\"+tag, \"-c\", dockerCommand)\n\n\treturn w.runner.Run(\"run\", args)\n}\n\nfunc (w wrapper) defaultStaticParams() []string {\n\treturn []string{\"--tty\", \"-i\", \"--rm\", \"-w\", \"\/workdir\", \"--entrypoint\", \"\/bin\/sh\"}\n}\n\nfunc (w wrapper) portsMapToArgsParams(portMappings map[int]int) []string {\n\tports := []string{}\n\n\tfor hostPort, dockerPort := range portMappings {\n\t\tports = append(ports, []string{\"-p\", fmt.Sprintf(\"%d:%d\", hostPort, dockerPort)}...)\n\t}\n\n\treturn ports\n}\n\nfunc (w wrapper) mountPointParams() []string {\n\tworkingDir, _ := filepath.Abs(\"\")\n\n\treturn []string{\n\t\t\"-v\",\n\t\tfmt.Sprintf(\"%s:\/workdir\", workingDir),\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package models\n\nimport (\n\t\"time\"\n\t\"github.com\/lifei6671\/mindoc\/conf\"\n\t\"github.com\/astaxie\/beego\/orm\"\n\t\"github.com\/astaxie\/beego\"\n)\n\n\/\/博文表\ntype Blog struct {\n\tBlogId int\t\t`orm:\"pk;auto;unique;column(blog_id)\" json:\"blog_id\"`\n\t\/\/文章标题\n\tBlogTitle string\t`orm:\"column(blog_title);size(500)\" json:\"blog_title\"`\n\t\/\/文章标识\n\tBlogIdentify string\t`orm:\"column(blog_identify);size(100);unique\" json:\"blog_identify\"`\n\t\/\/排序序号\n\tOrderIndex int \t\t`orm:\"column(order_index);type(int);default(0)\" json:\"order_index\"`\n\t\/\/所属用户\n\tMemberId int\t\t`orm:\"column(member_id);type(int);default(0):index\" json:\"member_id\"`\n\t\/\/用户头像\n\tMemberAvatar string\t\t`orm:\"-\" json:\"member_avatar\"`\n\t\/\/文章类型:0 普通文章\/1 链接文章\n\tBlogType int\t\t`orm:\"column(blog_type);type(int);default(0)\" json:\"blog_type\"`\n\t\/\/链接到的项目中的文档ID\n\tDocumentId int\t\t`orm:\"column(document_id);type(int);default(0)\" json:\"document_id\"`\n\t\/\/文章的标识\n\tDocumentIdentify string `orm:\"-\" json:\"document_identify\"`\n\t\/\/关联文档的项目标识\n\tBookIdentify string \t`orm:\"-\" json:\"book_identify\"`\n\t\/\/关联文档的项目ID\n\tBookId int \t\t\t\t`orm:\"-\" json:\"book_id\"`\n\t\/\/文章摘要\n\tBlogExcerpt string\t`orm:\"column(blog_excerpt);size(1500)\" json:\"blog_excerpt\"`\n\t\/\/文章内容\n\tBlogContent string\t`orm:\"column(blog_content);type(text);null\" json:\"blog_content\"`\n\t\/\/发布后的文章内容\n\tBlogRelease string \t`orm:\"column(blog_release);type(text);null\" json:\"blog_release\"`\n\t\/\/文章当前的状态,枚举enum(’publish’,’draft’,’password’)值,publish为已 发表,draft为草稿,password 为私人内容(不会被公开) 。默认为publish。\n\tBlogStatus string\t`orm:\"column(blog_status);size(100);default(publish)\" json:\"blog_status\"`\n\t\/\/文章密码,varchar(100)值。文章编辑才可为文章设定一个密码,凭这个密码才能对文章进行重新强加或修改。\n\tPassword string\t\t`orm:\"column(password);size(100)\" json:\"-\"`\n\t\/\/最后修改时间\n\tModified time.Time\t`orm:\"column(modify_time);type(datetime);auto_now\" json:\"modify_time\"`\n\t\/\/修改人id\n\tModifyAt int\t\t`orm:\"column(modify_at);type(int)\" json:\"-\"`\n\tModifyRealName string `orm:\"-\" json:\"modify_real_name\"`\n\t\/\/创建时间\n\tCreated time.Time\t`orm:\"column(create_time);type(datetime);auto_now_add\" json:\"create_time\"`\n\tCreateName string \t`orm:\"-\" json:\"create_name\"`\n\t\/\/版本号\n\tVersion int64 `orm:\"type(bigint);column(version)\" json:\"version\"`\n\t\/\/附件列表\n\tAttachList []*Attachment `orm:\"-\" json:\"attach_list\"`\n}\n\n\/\/ 多字段唯一键\nfunc (m *Blog) TableUnique() [][]string {\n\treturn [][]string{\n\t\t{\"blog_id\", \"blog_identify\"},\n\t}\n}\n\n\/\/ TableName 获取对应数据库表名.\nfunc (m *Blog) TableName() string {\n\treturn \"blogs\"\n}\n\n\/\/ TableEngine 获取数据使用的引擎.\nfunc (m *Blog) TableEngine() string {\n\treturn \"INNODB\"\n}\n\nfunc (m *Blog) TableNameWithPrefix() string {\n\treturn conf.GetDatabasePrefix() + m.TableName()\n}\n\nfunc NewBlog() *Blog {\n\treturn &Blog{\n\t\tBlogStatus: \"public\",\n\t}\n}\n\n\/\/根据文章ID查询文章\nfunc (b *Blog) Find(blogId int) (*Blog,error) {\n\to := orm.NewOrm()\n\n\terr := o.QueryTable(b.TableNameWithPrefix()).Filter(\"blog_id\",blogId).One(b)\n\tif err != nil {\n\t\tbeego.Error(\"查询文章时失败 -> \",err)\n\t\treturn nil,err\n\t}\n\n\n\treturn b.Link()\n}\n\/\/查找指定用户的指定文章\nfunc (b *Blog) FindByIdAndMemberId(blogId,memberId int) (*Blog,error) {\n\to := orm.NewOrm()\n\n\terr := o.QueryTable(b.TableNameWithPrefix()).Filter(\"blog_id\",blogId).Filter(\"member_id\",memberId).One(b)\n\tif err != nil {\n\t\tbeego.Error(\"查询文章时失败 -> \",err)\n\t\treturn nil,err\n\t}\n\n\treturn b.Link()\n}\n\/\/根据文章标识查询文章\nfunc (b *Blog) FindByIdentify(identify string) (*Blog,error) {\n\to := orm.NewOrm()\n\n\terr := o.QueryTable(b.TableNameWithPrefix()).Filter(\"blog_identify\",identify).One(b)\n\tif err != nil {\n\t\tbeego.Error(\"查询文章时失败 -> \",err)\n\t\treturn nil,err\n\t}\n\treturn b,nil\n}\n\n\/\/获取指定文章的链接内容\nfunc (b *Blog)Link() (*Blog,error) {\n\to := orm.NewOrm()\n\t\/\/如果是链接文章,则需要从链接的项目中查找文章内容\n\tif b.BlogType == 1 && b.DocumentId > 0 {\n\t\tdoc := NewDocument()\n\t\tif err := o.QueryTable(doc.TableNameWithPrefix()).Filter(\"document_id\",b.DocumentId).One(doc,\"release\",\"markdown\",\"identify\",\"book_id\");err != nil {\n\t\t\tbeego.Error(\"查询文章链接对象时出错 -> \",err)\n\t\t}else{\n\t\t\tb.DocumentIdentify = doc.Identify\n\t\t\tb.BlogRelease = doc.Release\n\t\t\t\/\/目前仅支持markdown文档进行链接\n\t\t\tb.BlogContent = doc.Markdown\n\t\t\tbook := NewBook()\n\t\t\tif err := o.QueryTable(book.TableNameWithPrefix()).Filter(\"book_id\",doc.BookId).One(book,\"identify\");err != nil {\n\t\t\t\tbeego.Error(\"查询关联文档的项目时出错 ->\",err)\n\t\t\t}else{\n\t\t\t\tb.BookIdentify = book.Identify\n\t\t\t\tb.BookId = doc.BookId\n\t\t\t}\n\t\t}\n\t}\n\n\tif b.ModifyAt > 0{\n\t\tmember := NewMember()\n\t\tif err := o.QueryTable(member.TableNameWithPrefix()).Filter(\"member_id\",b.ModifyAt).One(member,\"real_name\",\"account\"); err == nil {\n\t\t\tif member.RealName != \"\"{\n\t\t\t\tb.ModifyRealName = member.RealName\n\t\t\t}else{\n\t\t\t\tb.ModifyRealName = member.Account\n\t\t\t}\n\t\t}\n\t}\n\tif b.MemberId > 0 {\n\t\tmember := NewMember()\n\t\tif err := o.QueryTable(member.TableNameWithPrefix()).Filter(\"member_id\",b.MemberId).One(member,\"real_name\",\"account\",\"avatar\"); err == nil {\n\t\t\tif member.RealName != \"\"{\n\t\t\t\tb.CreateName = member.RealName\n\t\t\t}else{\n\t\t\t\tb.CreateName = member.Account\n\t\t\t}\n\t\t\tb.MemberAvatar = member.Avatar\n\t\t}\n\t}\n\n\treturn b,nil\n}\n\n\/\/判断指定的文章标识是否存在\nfunc (b *Blog) IsExist(identify string) bool {\n\to := orm.NewOrm()\n\n\treturn o.QueryTable(b.TableNameWithPrefix()).Filter(\"blog_identify\",identify).Exist()\n}\n\n\/\/保存文章\nfunc (b *Blog) Save(cols ...string) error {\n\to := orm.NewOrm()\n\n\tif b.OrderIndex <= 0 {\n\t\tblog := NewBlog()\n\t\tif err :=o.QueryTable(b.TableNameWithPrefix()).OrderBy(\"-blog_id\").One(blog,\"blog_id\");err == nil{\n\t\t\tb.OrderIndex = b.BlogId + 1;\n\t\t}else{\n\t\t\tc,_ := o.QueryTable(b.TableNameWithPrefix()).Count()\n\t\t\tb.OrderIndex = int(c) + 1\n\t\t}\n\t}\n\tvar err error\n\tb.Version = time.Now().Unix()\n\tif b.BlogId > 0 {\n\t\tb.Modified = time.Now()\n\t\t_,err = o.Update(b,cols...)\n\t}else{\n\n\t\tb.Created = time.Now()\n\t\t_,err = o.Insert(b)\n\t}\n\treturn err\n}\n\n\/\/分页查询文章列表\nfunc (b *Blog) FindToPager(pageIndex, pageSize int,memberId int,status string) (blogList []*Blog, totalCount int, err error) {\n\n\to := orm.NewOrm()\n\n\toffset := (pageIndex - 1) * pageSize\n\n\tquery := o.QueryTable(b.TableNameWithPrefix());\n\n\tif memberId > 0 {\n\t\tquery = query.Filter(\"member_id\",memberId)\n\t}\n\tif status != \"\" {\n\t\tquery = query.Filter(\"blog_status\",status)\n\t}\n\n\n\t_,err = query.OrderBy(\"-order_index\",\"-blog_id\").Offset(offset).Limit(pageSize).All(&blogList)\n\n\tif err != nil {\n\t\tif err == orm.ErrNoRows {\n\t\t\treturn\n\t\t}\n\t\tbeego.Error(\"获取文章列表时出错 ->\",err)\n\t\treturn\n\t}\n\tcount,err := query.Count()\n\n\tif err != nil {\n\t\tbeego.Error(\"获取文章数量时出错 ->\",err)\n\t\treturn nil,0,err\n\t}\n\ttotalCount = int(count)\n\tfor _,blog := range blogList {\n\t\tif blog.BlogType == 1 {\n\t\t\tblog.Link()\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/删除文章\nfunc (b *Blog) Delete(blogId int) error {\n\to := orm.NewOrm()\n\n\t_,err := o.QueryTable(b.TableNameWithPrefix()).Filter(\"blog_id\",blogId).Delete()\n\tif err != nil {\n\t\tbeego.Error(\"删除文章失败 ->\",err)\n\t}\n\treturn err\n}\n\n\/\/查询下一篇文章\nfunc (b *Blog) QueryNext(blogId int) (*Blog,error) {\n\to := orm.NewOrm()\n\tblog := NewBlog()\n\n\tif err := o.QueryTable(b.TableNameWithPrefix()).Filter(\"blog_id\",blogId).One(blog,\"order_index\"); err != nil {\n\t\tbeego.Error(\"查询文章时出错 ->\",err)\n\t\treturn b,err\n\t}\n\n\terr := o.QueryTable(b.TableNameWithPrefix()).Filter(\"order_index__gte\",blog.OrderIndex).Filter(\"blog_id__gt\",blogId).OrderBy(\"-order_index\",\"-blog_id\").One(blog)\n\n\tif err != nil {\n\t\tbeego.Error(\"查询文章时出错 ->\",err)\n\t}\n\treturn blog,err\n}\n\n\/\/查询下一篇文章\nfunc (b *Blog) QueryPrevious(blogId int) (*Blog,error) {\n\to := orm.NewOrm()\n\tblog := NewBlog()\n\n\tif err := o.QueryTable(b.TableNameWithPrefix()).Filter(\"blog_id\",blogId).One(blog,\"order_index\"); err != nil {\n\t\tbeego.Error(\"查询文章时出错 ->\",err)\n\t\treturn b,err\n\t}\n\n\terr := o.QueryTable(b.TableNameWithPrefix()).Filter(\"order_index__lte\",blog.OrderIndex).Filter(\"blog_id__lt\",blogId).OrderBy(\"-order_index\",\"-blog_id\").One(blog)\n\n\tif err != nil {\n\t\tbeego.Error(\"查询文章时出错 ->\",err)\n\t}\n\treturn blog,err\n}\n\n\/\/关联文章附件\nfunc (b *Blog) LinkAttach() (err error) {\n\n\to := orm.NewOrm()\n\n\tvar attachList []*Attachment\n\t\/\/当不是关联文章时,用文章ID去查询附件\n\tif b.BlogType != 1 || b.DocumentId <= 0 {\n\t\t_, err = o.QueryTable(NewAttachment().TableNameWithPrefix()).Filter(\"document_id\", b.BlogId).Filter(\"book_id\",0).All(&attachList)\n\t\tif err != nil {\n\t\t\tbeego.Error(\"查询文章附件时出错 ->\", err)\n\t\t}\n\t}else {\n\t\t_, err = o.QueryTable(NewAttachment().TableNameWithPrefix()).Filter(\"document_id\", b.DocumentId).Filter(\"book_id\", b.BookId).All(&attachList)\n\n\t\tif err != nil {\n\t\t\tbeego.Error(\"查询文章附件时出错 ->\", err)\n\t\t}\n\t}\n\tb.AttachList = attachList\n\treturn\n}<commit_msg>修复文章排序错误<commit_after>package models\n\nimport (\n\t\"time\"\n\t\"github.com\/lifei6671\/mindoc\/conf\"\n\t\"github.com\/astaxie\/beego\/orm\"\n\t\"github.com\/astaxie\/beego\"\n)\n\n\/\/博文表\ntype Blog struct {\n\tBlogId int\t\t`orm:\"pk;auto;unique;column(blog_id)\" json:\"blog_id\"`\n\t\/\/文章标题\n\tBlogTitle string\t`orm:\"column(blog_title);size(500)\" json:\"blog_title\"`\n\t\/\/文章标识\n\tBlogIdentify string\t`orm:\"column(blog_identify);size(100);unique\" json:\"blog_identify\"`\n\t\/\/排序序号\n\tOrderIndex int \t\t`orm:\"column(order_index);type(int);default(0)\" json:\"order_index\"`\n\t\/\/所属用户\n\tMemberId int\t\t`orm:\"column(member_id);type(int);default(0):index\" json:\"member_id\"`\n\t\/\/用户头像\n\tMemberAvatar string\t\t`orm:\"-\" json:\"member_avatar\"`\n\t\/\/文章类型:0 普通文章\/1 链接文章\n\tBlogType int\t\t`orm:\"column(blog_type);type(int);default(0)\" json:\"blog_type\"`\n\t\/\/链接到的项目中的文档ID\n\tDocumentId int\t\t`orm:\"column(document_id);type(int);default(0)\" json:\"document_id\"`\n\t\/\/文章的标识\n\tDocumentIdentify string `orm:\"-\" json:\"document_identify\"`\n\t\/\/关联文档的项目标识\n\tBookIdentify string \t`orm:\"-\" json:\"book_identify\"`\n\t\/\/关联文档的项目ID\n\tBookId int \t\t\t\t`orm:\"-\" json:\"book_id\"`\n\t\/\/文章摘要\n\tBlogExcerpt string\t`orm:\"column(blog_excerpt);size(1500)\" json:\"blog_excerpt\"`\n\t\/\/文章内容\n\tBlogContent string\t`orm:\"column(blog_content);type(text);null\" json:\"blog_content\"`\n\t\/\/发布后的文章内容\n\tBlogRelease string \t`orm:\"column(blog_release);type(text);null\" json:\"blog_release\"`\n\t\/\/文章当前的状态,枚举enum(’publish’,’draft’,’password’)值,publish为已 发表,draft为草稿,password 为私人内容(不会被公开) 。默认为publish。\n\tBlogStatus string\t`orm:\"column(blog_status);size(100);default(publish)\" json:\"blog_status\"`\n\t\/\/文章密码,varchar(100)值。文章编辑才可为文章设定一个密码,凭这个密码才能对文章进行重新强加或修改。\n\tPassword string\t\t`orm:\"column(password);size(100)\" json:\"-\"`\n\t\/\/最后修改时间\n\tModified time.Time\t`orm:\"column(modify_time);type(datetime);auto_now\" json:\"modify_time\"`\n\t\/\/修改人id\n\tModifyAt int\t\t`orm:\"column(modify_at);type(int)\" json:\"-\"`\n\tModifyRealName string `orm:\"-\" json:\"modify_real_name\"`\n\t\/\/创建时间\n\tCreated time.Time\t`orm:\"column(create_time);type(datetime);auto_now_add\" json:\"create_time\"`\n\tCreateName string \t`orm:\"-\" json:\"create_name\"`\n\t\/\/版本号\n\tVersion int64 `orm:\"type(bigint);column(version)\" json:\"version\"`\n\t\/\/附件列表\n\tAttachList []*Attachment `orm:\"-\" json:\"attach_list\"`\n}\n\n\/\/ 多字段唯一键\nfunc (m *Blog) TableUnique() [][]string {\n\treturn [][]string{\n\t\t{\"blog_id\", \"blog_identify\"},\n\t}\n}\n\n\/\/ TableName 获取对应数据库表名.\nfunc (m *Blog) TableName() string {\n\treturn \"blogs\"\n}\n\n\/\/ TableEngine 获取数据使用的引擎.\nfunc (m *Blog) TableEngine() string {\n\treturn \"INNODB\"\n}\n\nfunc (m *Blog) TableNameWithPrefix() string {\n\treturn conf.GetDatabasePrefix() + m.TableName()\n}\n\nfunc NewBlog() *Blog {\n\treturn &Blog{\n\t\tBlogStatus: \"public\",\n\t}\n}\n\n\/\/根据文章ID查询文章\nfunc (b *Blog) Find(blogId int) (*Blog,error) {\n\to := orm.NewOrm()\n\n\terr := o.QueryTable(b.TableNameWithPrefix()).Filter(\"blog_id\",blogId).One(b)\n\tif err != nil {\n\t\tbeego.Error(\"查询文章时失败 -> \",err)\n\t\treturn nil,err\n\t}\n\n\n\treturn b.Link()\n}\n\/\/查找指定用户的指定文章\nfunc (b *Blog) FindByIdAndMemberId(blogId,memberId int) (*Blog,error) {\n\to := orm.NewOrm()\n\n\terr := o.QueryTable(b.TableNameWithPrefix()).Filter(\"blog_id\",blogId).Filter(\"member_id\",memberId).One(b)\n\tif err != nil {\n\t\tbeego.Error(\"查询文章时失败 -> \",err)\n\t\treturn nil,err\n\t}\n\n\treturn b.Link()\n}\n\/\/根据文章标识查询文章\nfunc (b *Blog) FindByIdentify(identify string) (*Blog,error) {\n\to := orm.NewOrm()\n\n\terr := o.QueryTable(b.TableNameWithPrefix()).Filter(\"blog_identify\",identify).One(b)\n\tif err != nil {\n\t\tbeego.Error(\"查询文章时失败 -> \",err)\n\t\treturn nil,err\n\t}\n\treturn b,nil\n}\n\n\/\/获取指定文章的链接内容\nfunc (b *Blog)Link() (*Blog,error) {\n\to := orm.NewOrm()\n\t\/\/如果是链接文章,则需要从链接的项目中查找文章内容\n\tif b.BlogType == 1 && b.DocumentId > 0 {\n\t\tdoc := NewDocument()\n\t\tif err := o.QueryTable(doc.TableNameWithPrefix()).Filter(\"document_id\",b.DocumentId).One(doc,\"release\",\"markdown\",\"identify\",\"book_id\");err != nil {\n\t\t\tbeego.Error(\"查询文章链接对象时出错 -> \",err)\n\t\t}else{\n\t\t\tb.DocumentIdentify = doc.Identify\n\t\t\tb.BlogRelease = doc.Release\n\t\t\t\/\/目前仅支持markdown文档进行链接\n\t\t\tb.BlogContent = doc.Markdown\n\t\t\tbook := NewBook()\n\t\t\tif err := o.QueryTable(book.TableNameWithPrefix()).Filter(\"book_id\",doc.BookId).One(book,\"identify\");err != nil {\n\t\t\t\tbeego.Error(\"查询关联文档的项目时出错 ->\",err)\n\t\t\t}else{\n\t\t\t\tb.BookIdentify = book.Identify\n\t\t\t\tb.BookId = doc.BookId\n\t\t\t}\n\t\t}\n\t}\n\n\tif b.ModifyAt > 0{\n\t\tmember := NewMember()\n\t\tif err := o.QueryTable(member.TableNameWithPrefix()).Filter(\"member_id\",b.ModifyAt).One(member,\"real_name\",\"account\"); err == nil {\n\t\t\tif member.RealName != \"\"{\n\t\t\t\tb.ModifyRealName = member.RealName\n\t\t\t}else{\n\t\t\t\tb.ModifyRealName = member.Account\n\t\t\t}\n\t\t}\n\t}\n\tif b.MemberId > 0 {\n\t\tmember := NewMember()\n\t\tif err := o.QueryTable(member.TableNameWithPrefix()).Filter(\"member_id\",b.MemberId).One(member,\"real_name\",\"account\",\"avatar\"); err == nil {\n\t\t\tif member.RealName != \"\"{\n\t\t\t\tb.CreateName = member.RealName\n\t\t\t}else{\n\t\t\t\tb.CreateName = member.Account\n\t\t\t}\n\t\t\tb.MemberAvatar = member.Avatar\n\t\t}\n\t}\n\n\treturn b,nil\n}\n\n\/\/判断指定的文章标识是否存在\nfunc (b *Blog) IsExist(identify string) bool {\n\to := orm.NewOrm()\n\n\treturn o.QueryTable(b.TableNameWithPrefix()).Filter(\"blog_identify\",identify).Exist()\n}\n\n\/\/保存文章\nfunc (b *Blog) Save(cols ...string) error {\n\to := orm.NewOrm()\n\n\tif b.OrderIndex <= 0 {\n\t\tblog := NewBlog()\n\t\tif err := o.QueryTable(blog.TableNameWithPrefix()).OrderBy(\"-blog_id\").Limit(1).One(blog,\"blog_id\");err == nil{\n\t\t\tb.OrderIndex = blog.BlogId + 1;\n\t\t}else{\n\t\t\tc,_ := o.QueryTable(b.TableNameWithPrefix()).Count()\n\t\t\tb.OrderIndex = int(c) + 1\n\t\t}\n\t}\n\tvar err error\n\tb.Version = time.Now().Unix()\n\tif b.BlogId > 0 {\n\t\tb.Modified = time.Now()\n\t\t_,err = o.Update(b,cols...)\n\t}else{\n\n\t\tb.Created = time.Now()\n\t\t_,err = o.Insert(b)\n\t}\n\treturn err\n}\n\n\/\/分页查询文章列表\nfunc (b *Blog) FindToPager(pageIndex, pageSize int,memberId int,status string) (blogList []*Blog, totalCount int, err error) {\n\n\to := orm.NewOrm()\n\n\toffset := (pageIndex - 1) * pageSize\n\n\tquery := o.QueryTable(b.TableNameWithPrefix());\n\n\tif memberId > 0 {\n\t\tquery = query.Filter(\"member_id\",memberId)\n\t}\n\tif status != \"\" {\n\t\tquery = query.Filter(\"blog_status\",status)\n\t}\n\n\n\t_,err = query.OrderBy(\"-order_index\",\"-blog_id\").Offset(offset).Limit(pageSize).All(&blogList)\n\n\tif err != nil {\n\t\tif err == orm.ErrNoRows {\n\t\t\treturn\n\t\t}\n\t\tbeego.Error(\"获取文章列表时出错 ->\",err)\n\t\treturn\n\t}\n\tcount,err := query.Count()\n\n\tif err != nil {\n\t\tbeego.Error(\"获取文章数量时出错 ->\",err)\n\t\treturn nil,0,err\n\t}\n\ttotalCount = int(count)\n\tfor _,blog := range blogList {\n\t\tif blog.BlogType == 1 {\n\t\t\tblog.Link()\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/删除文章\nfunc (b *Blog) Delete(blogId int) error {\n\to := orm.NewOrm()\n\n\t_,err := o.QueryTable(b.TableNameWithPrefix()).Filter(\"blog_id\",blogId).Delete()\n\tif err != nil {\n\t\tbeego.Error(\"删除文章失败 ->\",err)\n\t}\n\treturn err\n}\n\n\/\/查询下一篇文章\nfunc (b *Blog) QueryNext(blogId int) (*Blog,error) {\n\to := orm.NewOrm()\n\tblog := NewBlog()\n\n\tif err := o.QueryTable(b.TableNameWithPrefix()).Filter(\"blog_id\",blogId).One(blog,\"order_index\"); err != nil {\n\t\tbeego.Error(\"查询文章时出错 ->\",err)\n\t\treturn b,err\n\t}\n\n\terr := o.QueryTable(b.TableNameWithPrefix()).Filter(\"order_index__gte\",blog.OrderIndex).Filter(\"blog_id__gt\",blogId).OrderBy(\"-order_index\",\"-blog_id\").One(blog)\n\n\tif err != nil {\n\t\tbeego.Error(\"查询文章时出错 ->\",err)\n\t}\n\treturn blog,err\n}\n\n\/\/查询下一篇文章\nfunc (b *Blog) QueryPrevious(blogId int) (*Blog,error) {\n\to := orm.NewOrm()\n\tblog := NewBlog()\n\n\tif err := o.QueryTable(b.TableNameWithPrefix()).Filter(\"blog_id\",blogId).One(blog,\"order_index\"); err != nil {\n\t\tbeego.Error(\"查询文章时出错 ->\",err)\n\t\treturn b,err\n\t}\n\n\terr := o.QueryTable(b.TableNameWithPrefix()).Filter(\"order_index__lte\",blog.OrderIndex).Filter(\"blog_id__lt\",blogId).OrderBy(\"-order_index\",\"-blog_id\").One(blog)\n\n\tif err != nil {\n\t\tbeego.Error(\"查询文章时出错 ->\",err)\n\t}\n\treturn blog,err\n}\n\n\/\/关联文章附件\nfunc (b *Blog) LinkAttach() (err error) {\n\n\to := orm.NewOrm()\n\n\tvar attachList []*Attachment\n\t\/\/当不是关联文章时,用文章ID去查询附件\n\tif b.BlogType != 1 || b.DocumentId <= 0 {\n\t\t_, err = o.QueryTable(NewAttachment().TableNameWithPrefix()).Filter(\"document_id\", b.BlogId).Filter(\"book_id\",0).All(&attachList)\n\t\tif err != nil {\n\t\t\tbeego.Error(\"查询文章附件时出错 ->\", err)\n\t\t}\n\t}else {\n\t\t_, err = o.QueryTable(NewAttachment().TableNameWithPrefix()).Filter(\"document_id\", b.DocumentId).Filter(\"book_id\", b.BookId).All(&attachList)\n\n\t\tif err != nil {\n\t\t\tbeego.Error(\"查询文章附件时出错 ->\", err)\n\t\t}\n\t}\n\tb.AttachList = attachList\n\treturn\n}<|endoftext|>"} {"text":"<commit_before>package models\n\nimport (\n\t\"fmt\"\n)\n\n\/\/ User stores login user name and GitHub username\ntype User struct {\n\tLoginName string\n\tGitHubUsername string\n}\n\n\/\/ NewUser creates new User instance\nfunc NewUser(loginName string, githubUsername string) *User {\n\treturn &User{\n\t\tLoginName: loginName,\n\t\tGitHubUsername: githubUsername,\n\t}\n}\n\nfunc (u *User) String() string {\n\treturn fmt.Sprintf(\"%v:@%v\", u.LoginName, u.GitHubUsername)\n}\n<commit_msg>Add SlackUsername<commit_after>package models\n\nimport (\n\t\"fmt\"\n)\n\n\/\/ User stores login user name and GitHub username\ntype User struct {\n\tLoginName string\n\tGitHubUsername string\n\tSlackUsername string\n}\n\n\/\/ NewUser creates new User instance\nfunc NewUser(loginName string, githubUsername string, slackUsername string) *User {\n\treturn &User{\n\t\tLoginName: loginName,\n\t\tGitHubUsername: githubUsername,\n\t\tSlackUsername: slackUsername,\n\t}\n}\n\nfunc (u *User) String() string {\n\treturn fmt.Sprintf(\"%v:@%v\", u.LoginName, u.GitHubUsername)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright (c) 2009 Samuel Tesla <samuel.tesla@gmail.com>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*\/\npackage main\n\nimport (\n\t\"bytes\";\n\t\"os\";\n)\n\nconst (\n\tbufferSize\t= 512;\n\tchanSize\t= 8;\n)\n\nfunc newBuffer() []byte\t{ return make([]byte, bufferSize)[0:0] }\n\nfunc fillBuf(ch <-chan []byte, buf *bytes.Buffer) (isClosed bool) {\n\tbytes := <-ch;\n\tif bytes != nil {\n\t\tbuf.Write(bytes)\n\t} else {\n\t\tisClosed = closed(ch)\n\t}\n\treturn;\n}\n\nfunc mockConnection() (rwc *mockServer, client *mockClient) {\n\tin := make(chan []byte, chanSize);\n\tout := make(chan []byte, chanSize);\n\trwc = &mockServer{in: in, out: out, buf: bytes.NewBuffer(newBuffer())};\n\tclient = &mockClient{in: in, out: out, buf: bytes.NewBuffer(newBuffer())};\n\treturn;\n}\n\ntype mockServer struct {\n\tin\t<-chan []byte;\n\tout\tchan<- []byte;\n\n\tclosed\tbool;\n\tbuf\t*bytes.Buffer;\n}\n\nfunc (self *mockServer) Read(b []byte) (n int, err os.Error) {\n\tif self.buf.Len() >= len(b) {\n\t\treturn self.buf.Read(b)\n\t}\n\n\tfor !self.closed && self.buf.Len() < len(b) {\n\t\tself.closed = fillBuf(self.in, self.buf)\n\t}\n\treturn self.buf.Read(b);\n}\n\nfunc (self *mockServer) Write(b []byte) (n int, err os.Error) {\n\tself.out <- b;\n\treturn len(b), nil;\n}\n\nfunc (self *mockServer) Close() os.Error {\n\tclose(self.out);\n\treturn nil;\n}\n\ntype mockClient struct {\n\tin\tchan<- []byte;\n\tout\t<-chan []byte;\n\n\tclosed\tbool;\n\tbuf\t*bytes.Buffer;\n}\n\nfunc (self *mockClient) Close()\t{ close(self.in) }\n\nfunc (self *mockClient) Closed() bool {\n\tif !self.closed {\n\t\tbytes, ok := <-self.out;\n\t\tif ok {\n\t\t\tself.buf.Write(bytes);\n\t\t\tself.closed = closed(self.out);\n\t\t}\n\t}\n\treturn self.closed;\n}\n\nfunc (self *mockClient) Read(b []byte) (n int, err os.Error) {\n\tif self.buf.Len() >= len(b) {\n\t\treturn self.buf.Read(b)\n\t}\n\n\tfor !self.closed && self.buf.Len() < len(b) {\n\t\tself.closed = fillBuf(self.out, self.buf)\n\t}\n\treturn self.buf.Read(b);\n}\n\nfunc (self *mockClient) Send(b []byte)\t{ self.in <- b }\n<commit_msg>Put mockConnection at the top of the file<commit_after>\/*\nCopyright (c) 2009 Samuel Tesla <samuel.tesla@gmail.com>\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n*\/\npackage main\n\nimport (\n\t\"bytes\";\n\t\"os\";\n)\n\nconst (\n\tbufferSize\t= 512;\n\tchanSize\t= 8;\n)\n\nfunc mockConnection() (rwc *mockServer, client *mockClient) {\n\tin := make(chan []byte, chanSize);\n\tout := make(chan []byte, chanSize);\n\trwc = &mockServer{in: in, out: out, buf: bytes.NewBuffer(newBuffer())};\n\tclient = &mockClient{in: in, out: out, buf: bytes.NewBuffer(newBuffer())};\n\treturn;\n}\n\nfunc newBuffer() []byte\t{ return make([]byte, bufferSize)[0:0] }\n\nfunc fillBuf(ch <-chan []byte, buf *bytes.Buffer) (isClosed bool) {\n\tbytes := <-ch;\n\tif bytes != nil {\n\t\tbuf.Write(bytes)\n\t} else {\n\t\tisClosed = closed(ch)\n\t}\n\treturn;\n}\n\ntype mockServer struct {\n\tin\t<-chan []byte;\n\tout\tchan<- []byte;\n\n\tclosed\tbool;\n\tbuf\t*bytes.Buffer;\n}\n\nfunc (self *mockServer) Read(b []byte) (n int, err os.Error) {\n\tif self.buf.Len() >= len(b) {\n\t\treturn self.buf.Read(b)\n\t}\n\n\tfor !self.closed && self.buf.Len() < len(b) {\n\t\tself.closed = fillBuf(self.in, self.buf)\n\t}\n\treturn self.buf.Read(b);\n}\n\nfunc (self *mockServer) Write(b []byte) (n int, err os.Error) {\n\tself.out <- b;\n\treturn len(b), nil;\n}\n\nfunc (self *mockServer) Close() os.Error {\n\tclose(self.out);\n\treturn nil;\n}\n\ntype mockClient struct {\n\tin\tchan<- []byte;\n\tout\t<-chan []byte;\n\n\tclosed\tbool;\n\tbuf\t*bytes.Buffer;\n}\n\nfunc (self *mockClient) Close()\t{ close(self.in) }\n\nfunc (self *mockClient) Closed() bool {\n\tif !self.closed {\n\t\tbytes, ok := <-self.out;\n\t\tif ok {\n\t\t\tself.buf.Write(bytes);\n\t\t\tself.closed = closed(self.out);\n\t\t}\n\t}\n\treturn self.closed;\n}\n\nfunc (self *mockClient) Read(b []byte) (n int, err os.Error) {\n\tif self.buf.Len() >= len(b) {\n\t\treturn self.buf.Read(b)\n\t}\n\n\tfor !self.closed && self.buf.Len() < len(b) {\n\t\tself.closed = fillBuf(self.out, self.buf)\n\t}\n\treturn self.buf.Read(b);\n}\n\nfunc (self *mockClient) Send(b []byte)\t{ self.in <- b }\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build go1.13\n\npackage mount\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strconv\"\n\n\t\"github.com\/moby\/sys\/mountinfo\"\n)\n\n\/\/ mountError records an error from mount or unmount operation\ntype mountError struct {\n\top string\n\tsource, target string\n\tflags uintptr\n\tdata string\n\terr error\n}\n\nfunc (e *mountError) Error() string {\n\tout := e.op + \" \"\n\n\tif e.source != \"\" {\n\t\tout += e.source + \":\" + e.target\n\t} else {\n\t\tout += e.target\n\t}\n\n\tif e.flags != uintptr(0) {\n\t\tout += \", flags: 0x\" + strconv.FormatUint(uint64(e.flags), 16)\n\t}\n\tif e.data != \"\" {\n\t\tout += \", data: \" + e.data\n\t}\n\n\tout += \": \" + e.err.Error()\n\treturn out\n}\n\n\/\/ Cause returns the underlying cause of the error.\n\/\/ This is a convention used in github.com\/pkg\/errors\nfunc (e *mountError) Cause() error {\n\treturn e.err\n}\n\n\/\/ Unwrap returns the underlying error.\n\/\/ This is a convention used in golang 1.13+\nfunc (e *mountError) Unwrap() error {\n\treturn e.err\n}\n\n\/\/ Mount will mount filesystem according to the specified configuration.\n\/\/ Options must be specified like the mount or fstab unix commands:\n\/\/ \"opt1=val1,opt2=val2\". See flags.go for supported option flags.\nfunc Mount(device, target, mType, options string) error {\n\tflag, data := parseOptions(options)\n\treturn mount(device, target, mType, uintptr(flag), data)\n}\n\n\/\/ ForceMount will mount filesystem according to the specified configuration.\n\/\/ Options must be specified like the mount or fstab unix commands:\n\/\/ \"opt1=val1,opt2=val2\". See flags.go for supported option flags.\n\/\/\n\/\/ Deprecated: use Mount instead.\nfunc ForceMount(device, target, mType, options string) error {\n\tflag, data := parseOptions(options)\n\treturn mount(device, target, mType, uintptr(flag), data)\n}\n\n\/\/ Unmount lazily unmounts a filesystem on supported platforms, otherwise\n\/\/ does a normal unmount.\nfunc Unmount(target string) error {\n\treturn unmount(target, mntDetach)\n}\n\n\/\/ RecursiveUnmount unmounts the target and all mounts underneath, starting with\n\/\/ the deepsest mount first.\nfunc RecursiveUnmount(target string) error {\n\tmounts, err := mountinfo.GetMounts(mountinfo.PrefixFilter(target))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Make the deepest mount be first\n\tsort.Slice(mounts, func(i, j int) bool {\n\t\treturn len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint)\n\t})\n\n\tvar suberr error\n\tfor i, m := range mounts {\n\t\terr = unmount(m.Mountpoint, mntDetach)\n\t\tif err != nil {\n\t\t\tif i == len(mounts)-1 { \/\/ last mount\n\t\t\t\treturn fmt.Errorf(\"%w (possible cause: %s)\", err, suberr)\n\t\t\t}\n\t\t\t\/\/ This is a submount, we can ignore the error for now,\n\t\t\t\/\/ the final unmount will fail if this is a real problem.\n\t\t\t\/\/ With that in mind, the _first_ failed unmount error\n\t\t\t\/\/ might be the real error cause, so let's keep it.\n\t\t\tif suberr == nil {\n\t\t\t\tsuberr = err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>mount: rm ForceMount<commit_after>\/\/ +build go1.13\n\npackage mount\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strconv\"\n\n\t\"github.com\/moby\/sys\/mountinfo\"\n)\n\n\/\/ mountError records an error from mount or unmount operation\ntype mountError struct {\n\top string\n\tsource, target string\n\tflags uintptr\n\tdata string\n\terr error\n}\n\nfunc (e *mountError) Error() string {\n\tout := e.op + \" \"\n\n\tif e.source != \"\" {\n\t\tout += e.source + \":\" + e.target\n\t} else {\n\t\tout += e.target\n\t}\n\n\tif e.flags != uintptr(0) {\n\t\tout += \", flags: 0x\" + strconv.FormatUint(uint64(e.flags), 16)\n\t}\n\tif e.data != \"\" {\n\t\tout += \", data: \" + e.data\n\t}\n\n\tout += \": \" + e.err.Error()\n\treturn out\n}\n\n\/\/ Cause returns the underlying cause of the error.\n\/\/ This is a convention used in github.com\/pkg\/errors\nfunc (e *mountError) Cause() error {\n\treturn e.err\n}\n\n\/\/ Unwrap returns the underlying error.\n\/\/ This is a convention used in golang 1.13+\nfunc (e *mountError) Unwrap() error {\n\treturn e.err\n}\n\n\/\/ Mount will mount filesystem according to the specified configuration.\n\/\/ Options must be specified like the mount or fstab unix commands:\n\/\/ \"opt1=val1,opt2=val2\". See flags.go for supported option flags.\nfunc Mount(device, target, mType, options string) error {\n\tflag, data := parseOptions(options)\n\treturn mount(device, target, mType, uintptr(flag), data)\n}\n\n\/\/ Unmount lazily unmounts a filesystem on supported platforms, otherwise\n\/\/ does a normal unmount.\nfunc Unmount(target string) error {\n\treturn unmount(target, mntDetach)\n}\n\n\/\/ RecursiveUnmount unmounts the target and all mounts underneath, starting with\n\/\/ the deepsest mount first.\nfunc RecursiveUnmount(target string) error {\n\tmounts, err := mountinfo.GetMounts(mountinfo.PrefixFilter(target))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Make the deepest mount be first\n\tsort.Slice(mounts, func(i, j int) bool {\n\t\treturn len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint)\n\t})\n\n\tvar suberr error\n\tfor i, m := range mounts {\n\t\terr = unmount(m.Mountpoint, mntDetach)\n\t\tif err != nil {\n\t\t\tif i == len(mounts)-1 { \/\/ last mount\n\t\t\t\treturn fmt.Errorf(\"%w (possible cause: %s)\", err, suberr)\n\t\t\t}\n\t\t\t\/\/ This is a submount, we can ignore the error for now,\n\t\t\t\/\/ the final unmount will fail if this is a real problem.\n\t\t\t\/\/ With that in mind, the _first_ failed unmount error\n\t\t\t\/\/ might be the real error cause, so let's keep it.\n\t\t\tif suberr == nil {\n\t\t\t\tsuberr = err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package mount provides a Datastore that has other Datastores\n\/\/ mounted at various key prefixes and is threadsafe\npackage mount\n\nimport (\n\t\"container\/heap\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\n\tds \"github.com\/ipfs\/go-datastore\"\n\t\"github.com\/ipfs\/go-datastore\/query\"\n)\n\nvar (\n\tErrNoMount = errors.New(\"no datastore mounted for this key\")\n)\n\ntype Mount struct {\n\tPrefix ds.Key\n\tDatastore ds.Datastore\n}\n\nfunc New(mounts []Mount) *Datastore {\n\t\/\/ make a copy so we're sure it doesn't mutate\n\tm := make([]Mount, len(mounts))\n\tfor i, v := range mounts {\n\t\tm[i] = v\n\t}\n\tsort.Slice(m, func(i, j int) bool { return m[i].Prefix.String() > m[j].Prefix.String() })\n\treturn &Datastore{mounts: m}\n}\n\ntype Datastore struct {\n\tmounts []Mount\n}\n\nvar _ ds.Datastore = (*Datastore)(nil)\n\nfunc (d *Datastore) lookup(key ds.Key) (ds.Datastore, ds.Key, ds.Key) {\n\tfor _, m := range d.mounts {\n\t\tif m.Prefix.Equal(key) || m.Prefix.IsAncestorOf(key) {\n\t\t\ts := strings.TrimPrefix(key.String(), m.Prefix.String())\n\t\t\tk := ds.NewKey(s)\n\t\t\treturn m.Datastore, m.Prefix, k\n\t\t}\n\t}\n\treturn nil, ds.NewKey(\"\/\"), key\n}\n\ntype queryResults struct {\n\tmount ds.Key\n\tresults query.Results\n\tnext query.Result\n}\n\nfunc (qr *queryResults) advance() bool {\n\tif qr.results == nil {\n\t\treturn false\n\t}\n\n\tqr.next = query.Result{}\n\tr, more := qr.results.NextSync()\n\tif !more {\n\t\terr := qr.results.Close()\n\t\tqr.results = nil\n\t\tif err != nil {\n\t\t\t\/\/ One more result, the error.\n\t\t\tqr.next = query.Result{Error: err}\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tr.Key = qr.mount.Child(ds.RawKey(r.Key)).String()\n\tqr.next = r\n\treturn true\n}\n\ntype querySet struct {\n\tquery query.Query\n\theads []*queryResults\n}\n\nfunc (h *querySet) Len() int {\n\treturn len(h.heads)\n}\n\nfunc (h *querySet) Less(i, j int) bool {\n\treturn query.Less(h.query.Orders, h.heads[i].next.Entry, h.heads[j].next.Entry)\n}\n\nfunc (h *querySet) Swap(i, j int) {\n\th.heads[i], h.heads[j] = h.heads[j], h.heads[i]\n}\n\nfunc (h *querySet) Push(x interface{}) {\n\th.heads = append(h.heads, x.(*queryResults))\n}\n\nfunc (h *querySet) Pop() interface{} {\n\ti := len(h.heads) - 1\n\tlast := h.heads[i]\n\th.heads[i] = nil\n\th.heads = h.heads[:i]\n\treturn last\n}\n\nfunc (h *querySet) close() error {\n\tvar errs []error\n\tfor _, qr := range h.heads {\n\t\terr := qr.results.Close()\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\th.heads = nil\n\tif len(errs) > 0 {\n\t\treturn errs[0]\n\t}\n\treturn nil\n}\n\nfunc (h *querySet) addResults(mount ds.Key, results query.Results) {\n\tr := &queryResults{\n\t\tresults: results,\n\t\tmount: mount,\n\t}\n\tif r.advance() {\n\t\theap.Push(h, r)\n\t}\n}\n\nfunc (h *querySet) next() (query.Result, bool) {\n\tif len(h.heads) == 0 {\n\t\treturn query.Result{}, false\n\t}\n\thead := h.heads[0]\n\tnext := head.next\n\n\tfor head.advance() {\n\t\theap.Fix(h, 0)\n\t\treturn next, true\n\t}\n\theap.Remove(h, 0)\n\treturn next, true\n}\n\n\/\/ lookupAll returns all mounts that might contain keys that are descendant of <key>\n\/\/\n\/\/ Matching: \/ao\/e\n\/\/\n\/\/ \/ B \/ao\/e\n\/\/ \/a\/ not matching\n\/\/ \/ao\/ B \/e\n\/\/ \/ao\/e\/ A \/\n\/\/ \/ao\/e\/uh\/ A \/\n\/\/ \/aoe\/ not matching\nfunc (d *Datastore) lookupAll(key ds.Key) (dst []ds.Datastore, mountpoint, rest []ds.Key) {\n\tfor _, m := range d.mounts {\n\t\tp := m.Prefix.String()\n\t\tif len(p) > 1 {\n\t\t\tp = p + \"\/\"\n\t\t}\n\n\t\tif strings.HasPrefix(p, key.String()) {\n\t\t\tdst = append(dst, m.Datastore)\n\t\t\tmountpoint = append(mountpoint, m.Prefix)\n\t\t\trest = append(rest, ds.NewKey(\"\/\"))\n\t\t} else if strings.HasPrefix(key.String(), p) {\n\t\t\tr := strings.TrimPrefix(key.String(), m.Prefix.String())\n\n\t\t\tdst = append(dst, m.Datastore)\n\t\t\tmountpoint = append(mountpoint, m.Prefix)\n\t\t\trest = append(rest, ds.NewKey(r))\n\t\t}\n\t}\n\treturn dst, mountpoint, rest\n}\n\nfunc (d *Datastore) Put(key ds.Key, value []byte) error {\n\tcds, _, k := d.lookup(key)\n\tif cds == nil {\n\t\treturn ErrNoMount\n\t}\n\treturn cds.Put(k, value)\n}\n\nfunc (d *Datastore) Get(key ds.Key) (value []byte, err error) {\n\tcds, _, k := d.lookup(key)\n\tif cds == nil {\n\t\treturn nil, ds.ErrNotFound\n\t}\n\treturn cds.Get(k)\n}\n\nfunc (d *Datastore) Has(key ds.Key) (exists bool, err error) {\n\tcds, _, k := d.lookup(key)\n\tif cds == nil {\n\t\treturn false, nil\n\t}\n\treturn cds.Has(k)\n}\n\nfunc (d *Datastore) GetSize(key ds.Key) (size int, err error) {\n\tcds, _, k := d.lookup(key)\n\tif cds == nil {\n\t\treturn -1, ds.ErrNotFound\n\t}\n\treturn cds.GetSize(k)\n}\n\nfunc (d *Datastore) Delete(key ds.Key) error {\n\tcds, _, k := d.lookup(key)\n\tif cds == nil {\n\t\treturn ds.ErrNotFound\n\t}\n\treturn cds.Delete(k)\n}\n\nfunc (d *Datastore) Query(master query.Query) (query.Results, error) {\n\tchildQuery := query.Query{\n\t\tPrefix: master.Prefix,\n\t\tLimit: master.Limit,\n\t\tOrders: master.Orders,\n\t\tKeysOnly: master.KeysOnly,\n\t\tReturnExpirations: master.ReturnExpirations,\n\t}\n\n\tprefix := ds.NewKey(childQuery.Prefix)\n\tdses, mounts, rests := d.lookupAll(prefix)\n\n\tqueries := &querySet{\n\t\tquery: childQuery,\n\t\theads: make([]*queryResults, 0, len(dses)),\n\t}\n\n\tfor i := range dses {\n\t\tmount := mounts[i]\n\t\tdstore := dses[i]\n\t\trest := rests[i]\n\n\t\tqi := childQuery\n\t\tqi.Prefix = rest.String()\n\t\tresults, err := dstore.Query(qi)\n\n\t\tif err != nil {\n\t\t\t_ = queries.close()\n\t\t\treturn nil, err\n\t\t}\n\t\tqueries.addResults(mount, results)\n\t}\n\n\tqr := query.ResultsFromIterator(childQuery, query.Iterator{\n\t\tNext: queries.next,\n\t\tClose: queries.close,\n\t})\n\n\tif len(master.Filters) > 0 {\n\t\tfor _, f := range master.Filters {\n\t\t\tqr = query.NaiveFilter(qr, f)\n\t\t}\n\t}\n\n\tif master.Offset > 0 {\n\t\tqr = query.NaiveOffset(qr, master.Offset)\n\t}\n\n\tif childQuery.Limit > 0 {\n\t\tqr = query.NaiveLimit(qr, childQuery.Limit)\n\t}\n\n\treturn qr, nil\n}\n\nfunc (d *Datastore) Close() error {\n\tfor _, d := range d.mounts {\n\t\terr := d.Datastore.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ DiskUsage returns the sum of DiskUsages for the mounted datastores.\n\/\/ Non PersistentDatastores will not be accounted.\nfunc (d *Datastore) DiskUsage() (uint64, error) {\n\tvar duTotal uint64 = 0\n\tfor _, d := range d.mounts {\n\t\tdu, err := ds.DiskUsage(d.Datastore)\n\t\tduTotal += du\n\t\tif err != nil {\n\t\t\treturn duTotal, err\n\t\t}\n\t}\n\treturn duTotal, nil\n}\n\ntype mountBatch struct {\n\tmounts map[string]ds.Batch\n\tlk sync.Mutex\n\n\td *Datastore\n}\n\nfunc (d *Datastore) Batch() (ds.Batch, error) {\n\treturn &mountBatch{\n\t\tmounts: make(map[string]ds.Batch),\n\t\td: d,\n\t}, nil\n}\n\nfunc (mt *mountBatch) lookupBatch(key ds.Key) (ds.Batch, ds.Key, error) {\n\tmt.lk.Lock()\n\tdefer mt.lk.Unlock()\n\n\tchild, loc, rest := mt.d.lookup(key)\n\tt, ok := mt.mounts[loc.String()]\n\tif !ok {\n\t\tbds, ok := child.(ds.Batching)\n\t\tif !ok {\n\t\t\treturn nil, ds.NewKey(\"\"), ds.ErrBatchUnsupported\n\t\t}\n\t\tvar err error\n\t\tt, err = bds.Batch()\n\t\tif err != nil {\n\t\t\treturn nil, ds.NewKey(\"\"), err\n\t\t}\n\t\tmt.mounts[loc.String()] = t\n\t}\n\treturn t, rest, nil\n}\n\nfunc (mt *mountBatch) Put(key ds.Key, val []byte) error {\n\tt, rest, err := mt.lookupBatch(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn t.Put(rest, val)\n}\n\nfunc (mt *mountBatch) Delete(key ds.Key) error {\n\tt, rest, err := mt.lookupBatch(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn t.Delete(rest)\n}\n\nfunc (mt *mountBatch) Commit() error {\n\tmt.lk.Lock()\n\tdefer mt.lk.Unlock()\n\n\tfor _, t := range mt.mounts {\n\t\terr := t.Commit()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (d *Datastore) Check() error {\n\tfor _, m := range d.mounts {\n\t\tif c, ok := m.Datastore.(ds.CheckedDatastore); ok {\n\t\t\tif err := c.Check(); err != nil {\n\t\t\t\treturn fmt.Errorf(\"checking datastore at %s: %s\", m.Prefix.String(), err.Error())\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (d *Datastore) Scrub() error {\n\tfor _, m := range d.mounts {\n\t\tif c, ok := m.Datastore.(ds.ScrubbedDatastore); ok {\n\t\t\tif err := c.Scrub(); err != nil {\n\t\t\t\treturn fmt.Errorf(\"scrubbing datastore at %s: %s\", m.Prefix.String(), err.Error())\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (d *Datastore) CollectGarbage() error {\n\tfor _, m := range d.mounts {\n\t\tif c, ok := m.Datastore.(ds.GCDatastore); ok {\n\t\t\tif err := c.CollectGarbage(); err != nil {\n\t\t\t\treturn fmt.Errorf(\"gc on datastore at %s: %s\", m.Prefix.String(), err.Error())\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Run go fmt on mount.go<commit_after>\/\/ Package mount provides a Datastore that has other Datastores\n\/\/ mounted at various key prefixes and is threadsafe\npackage mount\n\nimport (\n\t\"container\/heap\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\n\tds \"github.com\/ipfs\/go-datastore\"\n\t\"github.com\/ipfs\/go-datastore\/query\"\n)\n\nvar (\n\tErrNoMount = errors.New(\"no datastore mounted for this key\")\n)\n\ntype Mount struct {\n\tPrefix ds.Key\n\tDatastore ds.Datastore\n}\n\nfunc New(mounts []Mount) *Datastore {\n\t\/\/ make a copy so we're sure it doesn't mutate\n\tm := make([]Mount, len(mounts))\n\tfor i, v := range mounts {\n\t\tm[i] = v\n\t}\n\tsort.Slice(m, func(i, j int) bool { return m[i].Prefix.String() > m[j].Prefix.String() })\n\treturn &Datastore{mounts: m}\n}\n\ntype Datastore struct {\n\tmounts []Mount\n}\n\nvar _ ds.Datastore = (*Datastore)(nil)\n\nfunc (d *Datastore) lookup(key ds.Key) (ds.Datastore, ds.Key, ds.Key) {\n\tfor _, m := range d.mounts {\n\t\tif m.Prefix.Equal(key) || m.Prefix.IsAncestorOf(key) {\n\t\t\ts := strings.TrimPrefix(key.String(), m.Prefix.String())\n\t\t\tk := ds.NewKey(s)\n\t\t\treturn m.Datastore, m.Prefix, k\n\t\t}\n\t}\n\treturn nil, ds.NewKey(\"\/\"), key\n}\n\ntype queryResults struct {\n\tmount ds.Key\n\tresults query.Results\n\tnext query.Result\n}\n\nfunc (qr *queryResults) advance() bool {\n\tif qr.results == nil {\n\t\treturn false\n\t}\n\n\tqr.next = query.Result{}\n\tr, more := qr.results.NextSync()\n\tif !more {\n\t\terr := qr.results.Close()\n\t\tqr.results = nil\n\t\tif err != nil {\n\t\t\t\/\/ One more result, the error.\n\t\t\tqr.next = query.Result{Error: err}\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tr.Key = qr.mount.Child(ds.RawKey(r.Key)).String()\n\tqr.next = r\n\treturn true\n}\n\ntype querySet struct {\n\tquery query.Query\n\theads []*queryResults\n}\n\nfunc (h *querySet) Len() int {\n\treturn len(h.heads)\n}\n\nfunc (h *querySet) Less(i, j int) bool {\n\treturn query.Less(h.query.Orders, h.heads[i].next.Entry, h.heads[j].next.Entry)\n}\n\nfunc (h *querySet) Swap(i, j int) {\n\th.heads[i], h.heads[j] = h.heads[j], h.heads[i]\n}\n\nfunc (h *querySet) Push(x interface{}) {\n\th.heads = append(h.heads, x.(*queryResults))\n}\n\nfunc (h *querySet) Pop() interface{} {\n\ti := len(h.heads) - 1\n\tlast := h.heads[i]\n\th.heads[i] = nil\n\th.heads = h.heads[:i]\n\treturn last\n}\n\nfunc (h *querySet) close() error {\n\tvar errs []error\n\tfor _, qr := range h.heads {\n\t\terr := qr.results.Close()\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\th.heads = nil\n\tif len(errs) > 0 {\n\t\treturn errs[0]\n\t}\n\treturn nil\n}\n\nfunc (h *querySet) addResults(mount ds.Key, results query.Results) {\n\tr := &queryResults{\n\t\tresults: results,\n\t\tmount: mount,\n\t}\n\tif r.advance() {\n\t\theap.Push(h, r)\n\t}\n}\n\nfunc (h *querySet) next() (query.Result, bool) {\n\tif len(h.heads) == 0 {\n\t\treturn query.Result{}, false\n\t}\n\thead := h.heads[0]\n\tnext := head.next\n\n\tfor head.advance() {\n\t\theap.Fix(h, 0)\n\t\treturn next, true\n\t}\n\theap.Remove(h, 0)\n\treturn next, true\n}\n\n\/\/ lookupAll returns all mounts that might contain keys that are descendant of <key>\n\/\/\n\/\/ Matching: \/ao\/e\n\/\/\n\/\/ \/ B \/ao\/e\n\/\/ \/a\/ not matching\n\/\/ \/ao\/ B \/e\n\/\/ \/ao\/e\/ A \/\n\/\/ \/ao\/e\/uh\/ A \/\n\/\/ \/aoe\/ not matching\nfunc (d *Datastore) lookupAll(key ds.Key) (dst []ds.Datastore, mountpoint, rest []ds.Key) {\n\tfor _, m := range d.mounts {\n\t\tp := m.Prefix.String()\n\t\tif len(p) > 1 {\n\t\t\tp = p + \"\/\"\n\t\t}\n\n\t\tif strings.HasPrefix(p, key.String()) {\n\t\t\tdst = append(dst, m.Datastore)\n\t\t\tmountpoint = append(mountpoint, m.Prefix)\n\t\t\trest = append(rest, ds.NewKey(\"\/\"))\n\t\t} else if strings.HasPrefix(key.String(), p) {\n\t\t\tr := strings.TrimPrefix(key.String(), m.Prefix.String())\n\n\t\t\tdst = append(dst, m.Datastore)\n\t\t\tmountpoint = append(mountpoint, m.Prefix)\n\t\t\trest = append(rest, ds.NewKey(r))\n\t\t}\n\t}\n\treturn dst, mountpoint, rest\n}\n\nfunc (d *Datastore) Put(key ds.Key, value []byte) error {\n\tcds, _, k := d.lookup(key)\n\tif cds == nil {\n\t\treturn ErrNoMount\n\t}\n\treturn cds.Put(k, value)\n}\n\nfunc (d *Datastore) Get(key ds.Key) (value []byte, err error) {\n\tcds, _, k := d.lookup(key)\n\tif cds == nil {\n\t\treturn nil, ds.ErrNotFound\n\t}\n\treturn cds.Get(k)\n}\n\nfunc (d *Datastore) Has(key ds.Key) (exists bool, err error) {\n\tcds, _, k := d.lookup(key)\n\tif cds == nil {\n\t\treturn false, nil\n\t}\n\treturn cds.Has(k)\n}\n\nfunc (d *Datastore) GetSize(key ds.Key) (size int, err error) {\n\tcds, _, k := d.lookup(key)\n\tif cds == nil {\n\t\treturn -1, ds.ErrNotFound\n\t}\n\treturn cds.GetSize(k)\n}\n\nfunc (d *Datastore) Delete(key ds.Key) error {\n\tcds, _, k := d.lookup(key)\n\tif cds == nil {\n\t\treturn ds.ErrNotFound\n\t}\n\treturn cds.Delete(k)\n}\n\nfunc (d *Datastore) Query(master query.Query) (query.Results, error) {\n\tchildQuery := query.Query{\n\t\tPrefix: master.Prefix,\n\t\tLimit: master.Limit,\n\t\tOrders: master.Orders,\n\t\tKeysOnly: master.KeysOnly,\n\t\tReturnExpirations: master.ReturnExpirations,\n\t}\n\n\tprefix := ds.NewKey(childQuery.Prefix)\n\tdses, mounts, rests := d.lookupAll(prefix)\n\n\tqueries := &querySet{\n\t\tquery: childQuery,\n\t\theads: make([]*queryResults, 0, len(dses)),\n\t}\n\n\tfor i := range dses {\n\t\tmount := mounts[i]\n\t\tdstore := dses[i]\n\t\trest := rests[i]\n\n\t\tqi := childQuery\n\t\tqi.Prefix = rest.String()\n\t\tresults, err := dstore.Query(qi)\n\n\t\tif err != nil {\n\t\t\t_ = queries.close()\n\t\t\treturn nil, err\n\t\t}\n\t\tqueries.addResults(mount, results)\n\t}\n\n\tqr := query.ResultsFromIterator(childQuery, query.Iterator{\n\t\tNext: queries.next,\n\t\tClose: queries.close,\n\t})\n\n\tif len(master.Filters) > 0 {\n\t\tfor _, f := range master.Filters {\n\t\t\tqr = query.NaiveFilter(qr, f)\n\t\t}\n\t}\n\n\tif master.Offset > 0 {\n\t\tqr = query.NaiveOffset(qr, master.Offset)\n\t}\n\n\tif childQuery.Limit > 0 {\n\t\tqr = query.NaiveLimit(qr, childQuery.Limit)\n\t}\n\n\treturn qr, nil\n}\n\nfunc (d *Datastore) Close() error {\n\tfor _, d := range d.mounts {\n\t\terr := d.Datastore.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ DiskUsage returns the sum of DiskUsages for the mounted datastores.\n\/\/ Non PersistentDatastores will not be accounted.\nfunc (d *Datastore) DiskUsage() (uint64, error) {\n\tvar duTotal uint64 = 0\n\tfor _, d := range d.mounts {\n\t\tdu, err := ds.DiskUsage(d.Datastore)\n\t\tduTotal += du\n\t\tif err != nil {\n\t\t\treturn duTotal, err\n\t\t}\n\t}\n\treturn duTotal, nil\n}\n\ntype mountBatch struct {\n\tmounts map[string]ds.Batch\n\tlk sync.Mutex\n\n\td *Datastore\n}\n\nfunc (d *Datastore) Batch() (ds.Batch, error) {\n\treturn &mountBatch{\n\t\tmounts: make(map[string]ds.Batch),\n\t\td: d,\n\t}, nil\n}\n\nfunc (mt *mountBatch) lookupBatch(key ds.Key) (ds.Batch, ds.Key, error) {\n\tmt.lk.Lock()\n\tdefer mt.lk.Unlock()\n\n\tchild, loc, rest := mt.d.lookup(key)\n\tt, ok := mt.mounts[loc.String()]\n\tif !ok {\n\t\tbds, ok := child.(ds.Batching)\n\t\tif !ok {\n\t\t\treturn nil, ds.NewKey(\"\"), ds.ErrBatchUnsupported\n\t\t}\n\t\tvar err error\n\t\tt, err = bds.Batch()\n\t\tif err != nil {\n\t\t\treturn nil, ds.NewKey(\"\"), err\n\t\t}\n\t\tmt.mounts[loc.String()] = t\n\t}\n\treturn t, rest, nil\n}\n\nfunc (mt *mountBatch) Put(key ds.Key, val []byte) error {\n\tt, rest, err := mt.lookupBatch(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn t.Put(rest, val)\n}\n\nfunc (mt *mountBatch) Delete(key ds.Key) error {\n\tt, rest, err := mt.lookupBatch(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn t.Delete(rest)\n}\n\nfunc (mt *mountBatch) Commit() error {\n\tmt.lk.Lock()\n\tdefer mt.lk.Unlock()\n\n\tfor _, t := range mt.mounts {\n\t\terr := t.Commit()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (d *Datastore) Check() error {\n\tfor _, m := range d.mounts {\n\t\tif c, ok := m.Datastore.(ds.CheckedDatastore); ok {\n\t\t\tif err := c.Check(); err != nil {\n\t\t\t\treturn fmt.Errorf(\"checking datastore at %s: %s\", m.Prefix.String(), err.Error())\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (d *Datastore) Scrub() error {\n\tfor _, m := range d.mounts {\n\t\tif c, ok := m.Datastore.(ds.ScrubbedDatastore); ok {\n\t\t\tif err := c.Scrub(); err != nil {\n\t\t\t\treturn fmt.Errorf(\"scrubbing datastore at %s: %s\", m.Prefix.String(), err.Error())\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (d *Datastore) CollectGarbage() error {\n\tfor _, m := range d.mounts {\n\t\tif c, ok := m.Datastore.(ds.GCDatastore); ok {\n\t\t\tif err := c.CollectGarbage(); err != nil {\n\t\t\t\treturn fmt.Errorf(\"gc on datastore at %s: %s\", m.Prefix.String(), err.Error())\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"strconv\"\n \"fmt\"\n \"encoding\/json\"\n\t\".\/mqttc\"\n\t\"git.eclipse.org\/gitroot\/paho\/org.eclipse.paho.mqtt.golang.git\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/marpaia\/graphite-golang\"\n\t\"gopkg.in\/alecthomas\/kingpin.v1\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\tGraphite *graphite.Graphite\n\tNOOP = false\n)\n\ntype Host struct {\n\tIP string `json:\"ip\"`\n\tName string `json:\"hostname\"`\n\tHop int `json:\"hop-number\"`\n\tSent int `json:\"sent\"`\n\tLostPercent float64 `json:\"lost-percent\"`\n\tLast float64 `json:\"last\"`\n\tAvg float64 `json:\"avg\"`\n\tBest float64 `json:\"best\"`\n\tWorst float64 `json:\"worst\"`\n\tStDev float64 `json:\"standard-dev\"`\n}\n\ntype Report struct {\n\tTime time.Time `json:\"time\"`\n\tHosts []*Host `json:\"hosts\"`\n\tHops int `json:\"hops\"`\n\tElapsedTime time.Duration `json:\"elapsed_time\"`\n\tLocation *ReportLocation `json:\"location\"`\n}\n\n\/\/ slightly simpler struct than the one provided by geoipc\ntype ReportLocation struct {\n\tIP string `json:\"ip\"`\n\tCountryCode string `json:\"country_code\"`\n\tCountryName string `json:\"country_name\"`\n\tCity string `json:\"city\"`\n\tLatitude float64 `json:\"latitude\"`\n\tLongitude float64 `json:\"longitude\"`\n}\n\nfunc sendMetrics(client *mqtt.MqttClient, msg mqtt.Message) {\n\tvar report Report\n\terr := json.Unmarshal(msg.Payload(), &report)\n\tif err != nil {\n\t\tlog.Error(\"Error decoding json report\")\n\t}\n\n ip := report.Location.IP\n\tif ip == \"\" {\n\t\tlog.Warnf(\"Discarding report with no source IP\")\n\t\treturn\n\t}\n\tip = strings.Replace(ip, \".\", \"_\", -1)\n\n cc := report.Location.CountryCode\n\tif cc == \"\" {\n\t\tlog.Warnf(\"Discarding report with no country code\")\n\t\treturn\n\t}\n\n\tcity := strings.Replace(report.Location.City, \" \", \"_\", -1)\n\thops := report.Hops\n\tif city == \"\" {\n\t\tcity = \"nil\"\n\t}\n\tlast_hop := report.Hosts[len(report.Hosts) - 1]\n\n\tmetric_prefix := strings.ToLower(fmt.Sprintf(\"push-mtr.%s.%s.%s\", ip, cc, city))\n\tmHops := metric_prefix + \".hops\"\n\tmLastHopAvg := metric_prefix + \".last_hop.avg\"\n\tmLastHopBest := metric_prefix + \".last_hop.best\"\n\tmLastHopWorst := metric_prefix + \".last_hop.worst\"\n\n\tsendMetric(mHops, strconv.Itoa(hops))\n\tsendMetric(mLastHopAvg, strconv.FormatFloat(last_hop.Avg, 'f', 3, 64))\n\tsendMetric(mLastHopBest, strconv.FormatFloat(last_hop.Best, 'f', 3, 64))\n\tsendMetric(mLastHopWorst, strconv.FormatFloat(last_hop.Worst, 'f', 3, 64))\n}\n\nfunc sendMetric(key, value string) {\n\tif NOOP {\n\t\tlog.Infof(\"NOOP: Sending metric %s %s\", key, value)\n\t\treturn\n\t}\n\terr := Graphite.SimpleSend(key, value)\n\tif err != nil {\n\t\tlog.Debugf(\"Error sending metric %s: %s\", key, err)\n\t}\n}\n\nfunc parseBrokerUrls(brokerUrls string) []string {\n\ttokens := strings.Split(brokerUrls, \",\")\n\tfor i, url := range tokens {\n\t\ttokens[i] = strings.TrimSpace(url)\n\t}\n\n\treturn tokens\n}\n\nfunc main() {\n\tkingpin.Version(PKG_VERSION)\n\n\tbrokerUrls := kingpin.Flag(\"broker-urls\", \"Comman separated MQTT broker URLs\").\n\t\tRequired().Default(\"\").OverrideDefaultFromEnvar(\"MQTT_URLS\").String()\n\n\tcafile := kingpin.Flag(\"cafile\", \"CA certificate when using TLS (optional)\").\n\t\tString()\n\n\ttopic := kingpin.Flag(\"topic\", \"MQTT topic\").\n\t\tDefault(\"\/metrics\/mtr\").String()\n\n\tgraphiteHost := kingpin.Flag(\"graphiteHost\", \"Graphite host\").\n\t\tDefault(\"localhost\").String()\n\n\tgraphitePort := kingpin.Flag(\"graphitePort\", \"Graphite port\").\n\t\tDefault(\"2003\").Int()\n\n\tgraphitePing := kingpin.Flag(\"graphitePing\", \"Try to reconnect to graphite every X seconds\").\n\t\tDefault(\"15\").Int()\n\n\tinsecure := kingpin.Flag(\"insecure\", \"Don't verify the server's certificate chain and host name.\").\n\t\tDefault(\"false\").Bool()\n\n\tdebug := kingpin.Flag(\"debug\", \"Print debugging messages\").\n\t\tDefault(\"false\").Bool()\n\n\tnoop := kingpin.Flag(\"noop\", \"Print the metrics being sent instead of sending them\").\n\t\tDefault(\"false\").Bool()\n\n\tkingpin.Parse()\n\n\tif *debug {\n\t\tlog.SetLevel(log.DebugLevel)\n\t}\n\n\tNOOP = *noop\n\n\tif *cafile != \"\" {\n\t\tif _, err := os.Stat(*cafile); err != nil {\n\t\t\tlog.Fatalf(\"Error reading CA certificate %s\", err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\turlList := parseBrokerUrls(*brokerUrls)\n\n\tvar gerr error\n\tGraphite, gerr = graphite.NewGraphite(*graphiteHost, *graphitePort)\n\tif gerr != nil {\n\t\tlog.Warn(\"Error connecting to graphite\")\n\t\tos.Exit(1)\n\t} else {\n\t\tlog.Info(\"Connected to graphite\")\n\t\tlog.Debugf(\"Loaded Graphite connection: %#v\", Graphite)\n\t}\n\n\tfor _, urlStr := range urlList {\n\t\targs := mqttc.Args{\n\t\t\tBrokerURLs: []string{urlStr},\n\t\t\tClientID: \"mtr-collect\",\n\t\t\tTopic: *topic,\n\t\t\tTLSCACertPath: *cafile,\n\t\t\tTLSSkipVerify: *insecure,\n\t\t}\n\n\t\turi, _ := url.Parse(urlStr)\n\t\tc := mqttc.Subscribe(sendMetrics, &args)\n\t\tdefer c.Disconnect(0)\n\t\thost := strings.Split(uri.Host, \":\")[0]\n\t\thost = strings.Replace(host, \".\", \"_\", -1)\n\t}\n\n\t\/\/ Try to reconnect every graphitePing sec if sending fails by sending\n\t\/\/ a fake ping metric\n\t\/\/ FIXME: better handled by the Graphite client\n\tgo func() {\n\t\tfor {\n\t\t\ttime.Sleep(time.Duration(*graphitePing) * time.Second)\n\t\t\terr := Graphite.SimpleSend(\"ping metric\", \"\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Warn(\"Ping metric failed, trying to reconnect\")\n\t\t\t\terr = Graphite.Connect()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(\"Reconnecting to graphite failed\")\n\t\t\t\t} else {\n\t\t\t\t\tlog.Info(\"Reconnected to graphite\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ wait endlessly\n\tvar loop chan bool\n\tloop <- true\n}\n<commit_msg>Add debugging and send new metric<commit_after>package main\n\nimport (\n\t\".\/mqttc\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"git.eclipse.org\/gitroot\/paho\/org.eclipse.paho.mqtt.golang.git\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/marpaia\/graphite-golang\"\n\t\"gopkg.in\/alecthomas\/kingpin.v1\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\tGraphite *graphite.Graphite\n\tNOOP = false\n\tVERBOSE = true\n)\n\ntype Host struct {\n\tIP string `json:\"ip\"`\n\tName string `json:\"hostname\"`\n\tHop int `json:\"hop-number\"`\n\tSent int `json:\"sent\"`\n\tLostPercent float64 `json:\"lost-percent\"`\n\tLast float64 `json:\"last\"`\n\tAvg float64 `json:\"avg\"`\n\tBest float64 `json:\"best\"`\n\tWorst float64 `json:\"worst\"`\n\tStDev float64 `json:\"standard-dev\"`\n}\n\ntype Report struct {\n\tTime time.Time `json:\"time\"`\n\tHosts []*Host `json:\"hosts\"`\n\tHops int `json:\"hops\"`\n\tElapsedTime time.Duration `json:\"elapsed_time\"`\n\tLocation *ReportLocation `json:\"location\"`\n}\n\n\/\/ slightly simpler struct than the one provided by geoipc\ntype ReportLocation struct {\n\tIP string `json:\"ip\"`\n\tCountryCode string `json:\"country_code\"`\n\tCountryName string `json:\"country_name\"`\n\tCity string `json:\"city\"`\n\tLatitude float64 `json:\"latitude\"`\n\tLongitude float64 `json:\"longitude\"`\n}\n\nfunc sendMetrics(client *mqtt.MqttClient, msg mqtt.Message) {\n\tvar report Report\n\terr := json.Unmarshal(msg.Payload(), &report)\n\tif err != nil {\n\t\tlog.Error(\"Error decoding json report\")\n\t}\n\n\tip := report.Location.IP\n\tif ip == \"\" {\n\t\tlog.Warnf(\"Discarding report with no source IP\")\n\t\treturn\n\t}\n\tip = strings.Replace(ip, \".\", \"_\", -1)\n\n\tcc := report.Location.CountryCode\n\tif cc == \"\" {\n\t\tlog.Warnf(\"Discarding report with no country code\")\n\t\treturn\n\t}\n\n\tcity := strings.Replace(report.Location.City, \" \", \"_\", -1)\n\thops := report.Hops\n\tif city == \"\" {\n\t\tcity = \"nil\"\n\t}\n\n\tpktLossHops := 0\n\tfor _, host := range report.Hosts {\n\t\tif host.LostPercent != 0 && host.LostPercent != 100 {\n\t\t\tpktLossHops += 1\n\t\t}\n\t}\n\n\tlast_hop := report.Hosts[len(report.Hosts)-1]\n\n\tmetric_prefix := strings.ToLower(fmt.Sprintf(\"push-mtr.%s.%s.%s\", ip, cc, city))\n\tmHops := metric_prefix + \".hops\"\n\tmLastHopAvg := metric_prefix + \".last_hop.avg\"\n\tmLastHopBest := metric_prefix + \".last_hop.best\"\n\tmLastHopWorst := metric_prefix + \".last_hop.worst\"\n\n\tsendMetric(metric_prefix+\".pkt_loss_hops\", strconv.Itoa(pktLossHops))\n\tsendMetric(mHops, strconv.Itoa(hops))\n\tsendMetric(mLastHopAvg, strconv.FormatFloat(last_hop.Avg, 'f', 3, 64))\n\tsendMetric(mLastHopBest, strconv.FormatFloat(last_hop.Best, 'f', 3, 64))\n\tsendMetric(mLastHopWorst, strconv.FormatFloat(last_hop.Worst, 'f', 3, 64))\n}\n\nfunc sendMetric(key, value string) {\n\tif NOOP {\n\t\tlog.Infof(\"NOOP: Sending metric %s %s\", key, value)\n\t\treturn\n\t}\n\terr := Graphite.SimpleSend(key, value)\n\tif err != nil {\n\t\tlog.Debugf(\"Error sending metric %s: %s\", key, err)\n\t}\n}\n\nfunc parseBrokerUrls(brokerUrls string) []string {\n\ttokens := strings.Split(brokerUrls, \",\")\n\tfor i, url := range tokens {\n\t\ttokens[i] = strings.TrimSpace(url)\n\t}\n\n\treturn tokens\n}\n\nfunc main() {\n\tkingpin.Version(PKG_VERSION)\n\n\tbrokerUrls := kingpin.Flag(\"broker-urls\", \"Comman separated MQTT broker URLs\").\n\t\tRequired().Default(\"\").OverrideDefaultFromEnvar(\"MQTT_URLS\").String()\n\n\tcafile := kingpin.Flag(\"cafile\", \"CA certificate when using TLS (optional)\").\n\t\tString()\n\n\ttopic := kingpin.Flag(\"topic\", \"MQTT topic\").\n\t\tDefault(\"\/metrics\/mtr\").String()\n\n\tgraphiteHost := kingpin.Flag(\"graphiteHost\", \"Graphite host\").\n\t\tDefault(\"localhost\").String()\n\n\tclientID := kingpin.Flag(\"clientid\", \"Use a custom MQTT client ID\").String()\n\n\tgraphitePort := kingpin.Flag(\"graphitePort\", \"Graphite port\").\n\t\tDefault(\"2003\").Int()\n\n\tgraphitePing := kingpin.Flag(\"graphitePing\", \"Try to reconnect to graphite every X seconds\").\n\t\tDefault(\"15\").Int()\n\n\tinsecure := kingpin.Flag(\"insecure\", \"Don't verify the server's certificate chain and host name.\").\n\t\tDefault(\"false\").Bool()\n\n\tdebug := kingpin.Flag(\"debug\", \"Print debugging messages\").\n\t\tDefault(\"false\").Bool()\n\n\tVERBOSE = *(kingpin.Flag(\"verbose\", \"Print metrics being sent\").\n\t\tDefault(\"false\").Bool())\n\n\tnoop := kingpin.Flag(\"noop\", \"Print the metrics being sent instead of sending them\").\n\t\tDefault(\"false\").Bool()\n\n\tkingpin.Parse()\n\n\tif *debug {\n\t\tlog.SetLevel(log.DebugLevel)\n\t}\n\n\tNOOP = *noop\n\tvar err error\n\n\tif *cafile != \"\" {\n\t\tif _, err := os.Stat(*cafile); err != nil {\n\t\t\tlog.Fatalf(\"Error reading CA certificate %s\", err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\turlList := parseBrokerUrls(*brokerUrls)\n\n\tvar gerr error\n\tGraphite, gerr = graphite.NewGraphite(*graphiteHost, *graphitePort)\n\tif gerr != nil {\n\t\tlog.Warn(\"Error connecting to graphite\")\n\t\tos.Exit(1)\n\t} else {\n\t\tlog.Info(\"Connected to graphite\")\n\t\tlog.Debugf(\"Loaded Graphite connection: %#v\", Graphite)\n\t}\n\n\tif *clientID == \"\" {\n\t\t*clientID, err = os.Hostname()\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Can't get the hostname to use it as the ClientID, use --clientid option\")\n\t\t}\n\t}\n\tlog.Debugf(\"MQTT Client ID: %s\", *clientID)\n\n\tfor _, urlStr := range urlList {\n\t\targs := mqttc.Args{\n\t\t\tBrokerURLs: []string{urlStr},\n\t\t\tClientID: *clientID,\n\t\t\tTopic: *topic,\n\t\t\tTLSCACertPath: *cafile,\n\t\t\tTLSSkipVerify: *insecure,\n\t\t}\n\n\t\turi, _ := url.Parse(urlStr)\n\t\tlog.Debug(\"Starting mqttc client\")\n\t\tc := mqttc.Subscribe(sendMetrics, &args)\n\t\tdefer c.Disconnect(0)\n\t\thost := strings.Split(uri.Host, \":\")[0]\n\t\thost = strings.Replace(host, \".\", \"_\", -1)\n\t}\n\n\t\/\/ Try to reconnect every graphitePing sec if sending fails by sending\n\t\/\/ a fake ping metric\n\t\/\/ FIXME: better handled by the Graphite client\n\tgo func() {\n\t\tfor {\n\t\t\ttime.Sleep(time.Duration(*graphitePing) * time.Second)\n\t\t\terr := Graphite.SimpleSend(\"ping metric\", \"\")\n\t\t\tif err != nil {\n\t\t\t\tlog.Warn(\"Ping metric failed, trying to reconnect\")\n\t\t\t\terr = Graphite.Connect()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(\"Reconnecting to graphite failed\")\n\t\t\t\t} else {\n\t\t\t\t\tlog.Info(\"Reconnected to graphite\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ wait endlessly\n\tvar loop chan bool\n\tloop <- true\n}\n<|endoftext|>"} {"text":"<commit_before>package virtualbox\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\nvar (\n\treVMNameUUID = regexp.MustCompile(`\"(.+)\" {([0-9a-f-]+)}`)\n\treVMInfoLine = regexp.MustCompile(`(?:\"(.+)\"|(.+))=(?:\"(.*)\"|(.*))`)\n\treColonLine = regexp.MustCompile(`(.+):\\s+(.*)`)\n\treMachineNotFound = regexp.MustCompile(`Could not find a registered machine named '(.+)'`)\n)\n\nvar (\n\tErrMachineExist = errors.New(\"machine already exists\")\n\tErrMachineNotExist = errors.New(\"machine does not exist\")\n\tErrVBMNotFound = errors.New(\"VBoxManage not found\")\n\tvboxManageCmd = setVBoxManageCmd()\n)\n\n\/\/ detect the VBoxManage cmd's path if needed\nfunc setVBoxManageCmd() string {\n\tcmd := \"VBoxManage\"\n\tif path, err := exec.LookPath(cmd); err == nil {\n\t\treturn path\n\t}\n\tif runtime.GOOS == \"windows\" {\n\t\tif p := os.Getenv(\"VBOX_INSTALL_PATH\"); p != \"\" {\n\t\t\tif path, err := exec.LookPath(filepath.Join(p, cmd)); err == nil {\n\t\t\t\treturn path\n\t\t\t}\n\t\t}\n\t\tif p := os.Getenv(\"VBOX_MSI_INSTALL_PATH\"); p != \"\" {\n\t\t\tif path, err := exec.LookPath(filepath.Join(p, cmd)); err == nil {\n\t\t\t\treturn path\n\t\t\t}\n\t\t}\n\t\t\/\/ look at HKEY_LOCAL_MACHINE\\SOFTWARE\\Oracle\\VirtualBox\\InstallDir\n\t\tp := \"C:\\\\Program Files\\\\Oracle\\\\VirtualBox\"\n\t\tif path, err := exec.LookPath(filepath.Join(p, cmd)); err == nil {\n\t\t\treturn path\n\t\t}\n\t}\n\treturn cmd\n}\n\nfunc vbm(args ...string) error {\n\tcmd := exec.Command(vboxManageCmd, args...)\n\tif os.Getenv(\"DEBUG\") != \"\" {\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\t}\n\tlog.Debugf(\"executing: %v %v\", vboxManageCmd, strings.Join(args, \" \"))\n\tif err := cmd.Run(); err != nil {\n\t\tif ee, ok := err.(*exec.Error); ok && ee == exec.ErrNotFound {\n\t\t\treturn ErrVBMNotFound\n\t\t}\n\t\treturn fmt.Errorf(\"%v %v failed: %v\", vboxManageCmd, strings.Join(args, \" \"), err)\n\t}\n\treturn nil\n}\n\nfunc vbmOut(args ...string) (string, error) {\n\tcmd := exec.Command(vboxManageCmd, args...)\n\tif os.Getenv(\"DEBUG\") != \"\" {\n\t\tcmd.Stderr = os.Stderr\n\t}\n\tlog.Debugf(\"executing: %v %v\", vboxManageCmd, strings.Join(args, \" \"))\n\n\tb, err := cmd.Output()\n\tif err != nil {\n\t\tif ee, ok := err.(*exec.Error); ok && ee == exec.ErrNotFound {\n\t\t\terr = ErrVBMNotFound\n\t\t}\n\t}\n\treturn string(b), err\n}\n\nfunc vbmOutErr(args ...string) (string, string, error) {\n\tcmd := exec.Command(vboxManageCmd, args...)\n\tlog.Debugf(\"executing: %v %v\", vboxManageCmd, strings.Join(args, \" \"))\n\tvar stdout bytes.Buffer\n\tvar stderr bytes.Buffer\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\tif ee, ok := err.(*exec.Error); ok && ee == exec.ErrNotFound {\n\t\t\terr = ErrVBMNotFound\n\t\t}\n\t}\n\treturn stdout.String(), stderr.String(), err\n}\n<commit_msg>Detect vboxmanage error conditions and pass those on as errors<commit_after>package virtualbox\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\nvar (\n\treVMNameUUID = regexp.MustCompile(`\"(.+)\" {([0-9a-f-]+)}`)\n\treVMInfoLine = regexp.MustCompile(`(?:\"(.+)\"|(.+))=(?:\"(.*)\"|(.*))`)\n\treColonLine = regexp.MustCompile(`(.+):\\s+(.*)`)\n\treMachineNotFound = regexp.MustCompile(`Could not find a registered machine named '(.+)'`)\n)\n\nvar (\n\tErrMachineExist = errors.New(\"machine already exists\")\n\tErrMachineNotExist = errors.New(\"machine does not exist\")\n\tErrVBMNotFound = errors.New(\"VBoxManage not found\")\n\tvboxManageCmd = setVBoxManageCmd()\n)\n\n\/\/ detect the VBoxManage cmd's path if needed\nfunc setVBoxManageCmd() string {\n\tcmd := \"VBoxManage\"\n\tif path, err := exec.LookPath(cmd); err == nil {\n\t\treturn path\n\t}\n\tif runtime.GOOS == \"windows\" {\n\t\tif p := os.Getenv(\"VBOX_INSTALL_PATH\"); p != \"\" {\n\t\t\tif path, err := exec.LookPath(filepath.Join(p, cmd)); err == nil {\n\t\t\t\treturn path\n\t\t\t}\n\t\t}\n\t\tif p := os.Getenv(\"VBOX_MSI_INSTALL_PATH\"); p != \"\" {\n\t\t\tif path, err := exec.LookPath(filepath.Join(p, cmd)); err == nil {\n\t\t\t\treturn path\n\t\t\t}\n\t\t}\n\t\t\/\/ look at HKEY_LOCAL_MACHINE\\SOFTWARE\\Oracle\\VirtualBox\\InstallDir\n\t\tp := \"C:\\\\Program Files\\\\Oracle\\\\VirtualBox\"\n\t\tif path, err := exec.LookPath(filepath.Join(p, cmd)); err == nil {\n\t\t\treturn path\n\t\t}\n\t}\n\treturn cmd\n}\n\nfunc vbm(args ...string) error {\n\t_, _, err := vbmOutErr(args...)\n\treturn err\n}\n\nfunc vbmOut(args ...string) (string, error) {\n\tstdout, _, err := vbmOutErr(args...)\n\treturn stdout, err\n}\n\nfunc vbmOutErr(args ...string) (string, string, error) {\n\tcmd := exec.Command(vboxManageCmd, args...)\n\tlog.Debugf(\"executing: %v %v\", vboxManageCmd, strings.Join(args, \" \"))\n\tvar stdout bytes.Buffer\n\tvar stderr bytes.Buffer\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\terr := cmd.Run()\n\tstderrStr := stderr.String()\n\tlog.Debugf(\"STDOUT: %v\", stdout.String())\n\tlog.Debugf(\"STDERR: %v\", stderrStr)\n\tif err != nil {\n\t\tif ee, ok := err.(*exec.Error); ok && ee == exec.ErrNotFound {\n\t\t\terr = ErrVBMNotFound\n\t\t}\n\t} else {\n\t\t\/\/ VBoxManage will sometimes not set the return code, but has a fatal error\n\t\t\/\/ such as VBoxManage.exe: error: VT-x is not available. (VERR_VMX_NO_VMX)\n\t\tif strings.Contains(stderrStr, \"error:\") {\n\t\t\terr = fmt.Errorf(\"%v %v failed: %v\", vboxManageCmd, strings.Join(args, \" \"), stderrStr)\n\t\t}\n\t}\n\treturn stdout.String(), stderrStr, err\n}\n<|endoftext|>"} {"text":"<commit_before>package build\n\nimport (\n\t\"os\"\n\t\"text\/template\"\n\n\t\"github.com\/drone\/drone-cli\/drone\/internal\"\n\t\"github.com\/drone\/drone-go\/drone\"\n\t\"github.com\/urfave\/cli\"\n)\n\nvar buildListCmd = cli.Command{\n\tName: \"ls\",\n\tUsage: \"show build history\",\n\tArgsUsage: \"<repo\/name>\",\n\tAction: buildList,\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"format\",\n\t\t\tUsage: \"format output\",\n\t\t\tValue: tmplBuildList,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"branch\",\n\t\t\tUsage: \"branch filter\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"event\",\n\t\t\tUsage: \"event filter\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"status\",\n\t\t\tUsage: \"status filter\",\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"limit\",\n\t\t\tUsage: \"limit the list size\",\n\t\t\tValue: 25,\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"page\",\n\t\t\tUsage: \"page number\",\n\t\t\tValue: 1,\n\t\t},\n\t},\n}\n\nfunc buildList(c *cli.Context) error {\n\trepo := c.Args().First()\n\towner, name, err := internal.ParseRepo(repo)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient, err := internal.NewClient(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbuilds, err := client.BuildList(owner, name, &drone.ListOptions{Page: c.Int(\"page\")})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttmpl, err := template.New(\"_\").Parse(c.String(\"format\") + \"\\n\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbranch := c.String(\"branch\")\n\tevent := c.String(\"event\")\n\tstatus := c.String(\"status\")\n\tlimit := c.Int(\"limit\")\n\n\tvar count int\n\tfor _, build := range builds {\n\t\tif count >= limit {\n\t\t\tbreak\n\t\t}\n\t\tif branch != \"\" && build.Target != branch {\n\t\t\tcontinue\n\t\t}\n\t\tif event != \"\" && build.Event != event {\n\t\t\tcontinue\n\t\t}\n\t\tif status != \"\" && build.Status != status {\n\t\t\tcontinue\n\t\t}\n\t\ttmpl.Execute(os.Stdout, build)\n\t\tcount++\n\t}\n\treturn nil\n}\n\n\/\/ template for build list information\nvar tmplBuildList = \"\\x1b[33mBuild #{{ .Number }} \\x1b[0m\" + `\nStatus: {{ .Status }}\nEvent: {{ .Event }}\nCommit: {{ .After }}\nBranch: {{ .Target }}\nRef: {{ .Ref }}\nAuthor: {{ .Author }} {{ if .AuthorEmail }}<{{.AuthorEmail}}>{{ end }}\nMessage: {{ .Message }}\n`\n<commit_msg>Removed pointer when passing ListOptions parameter<commit_after>package build\n\nimport (\n\t\"os\"\n\t\"text\/template\"\n\n\t\"github.com\/drone\/drone-cli\/drone\/internal\"\n\t\"github.com\/drone\/drone-go\/drone\"\n\t\"github.com\/urfave\/cli\"\n)\n\nvar buildListCmd = cli.Command{\n\tName: \"ls\",\n\tUsage: \"show build history\",\n\tArgsUsage: \"<repo\/name>\",\n\tAction: buildList,\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"format\",\n\t\t\tUsage: \"format output\",\n\t\t\tValue: tmplBuildList,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"branch\",\n\t\t\tUsage: \"branch filter\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"event\",\n\t\t\tUsage: \"event filter\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"status\",\n\t\t\tUsage: \"status filter\",\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"limit\",\n\t\t\tUsage: \"limit the list size\",\n\t\t\tValue: 25,\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"page\",\n\t\t\tUsage: \"page number\",\n\t\t\tValue: 1,\n\t\t},\n\t},\n}\n\nfunc buildList(c *cli.Context) error {\n\trepo := c.Args().First()\n\towner, name, err := internal.ParseRepo(repo)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient, err := internal.NewClient(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbuilds, err := client.BuildList(owner, name, drone.ListOptions{Page: c.Int(\"page\")})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttmpl, err := template.New(\"_\").Parse(c.String(\"format\") + \"\\n\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbranch := c.String(\"branch\")\n\tevent := c.String(\"event\")\n\tstatus := c.String(\"status\")\n\tlimit := c.Int(\"limit\")\n\n\tvar count int\n\tfor _, build := range builds {\n\t\tif count >= limit {\n\t\t\tbreak\n\t\t}\n\t\tif branch != \"\" && build.Target != branch {\n\t\t\tcontinue\n\t\t}\n\t\tif event != \"\" && build.Event != event {\n\t\t\tcontinue\n\t\t}\n\t\tif status != \"\" && build.Status != status {\n\t\t\tcontinue\n\t\t}\n\t\ttmpl.Execute(os.Stdout, build)\n\t\tcount++\n\t}\n\treturn nil\n}\n\n\/\/ template for build list information\nvar tmplBuildList = \"\\x1b[33mBuild #{{ .Number }} \\x1b[0m\" + `\nStatus: {{ .Status }}\nEvent: {{ .Event }}\nCommit: {{ .After }}\nBranch: {{ .Target }}\nRef: {{ .Ref }}\nAuthor: {{ .Author }} {{ if .AuthorEmail }}<{{.AuthorEmail}}>{{ end }}\nMessage: {{ .Message }}\n`\n<|endoftext|>"} {"text":"<commit_before>package prometheus\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"runtime\/debug\"\n\t\"time\"\n)\n\ntype goCollector struct {\n\tgoroutines Gauge\n\tgcDesc *Desc\n\n\t\/\/ metrics to describe and collect\n\tmetrics memStatsMetrics\n}\n\n\/\/ NewGoCollector returns a collector which exports metrics about the current\n\/\/ go process.\nfunc NewGoCollector() Collector {\n\treturn &goCollector{\n\t\tgoroutines: NewGauge(GaugeOpts{\n\t\t\tNamespace: \"go\",\n\t\t\tName: \"goroutines\",\n\t\t\tHelp: \"Number of goroutines that currently exist.\",\n\t\t}),\n\t\tgcDesc: NewDesc(\n\t\t\t\"go_gc_duration_seconds\",\n\t\t\t\"A summary of the GC invocation durations.\",\n\t\t\tnil, nil),\n\t\tmetrics: memStatsMetrics{\n\t\t\t{\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"alloc_bytes\"),\n\t\t\t\t\t\"Number of bytes allocated and still in use.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"alloc_bytes_total\"),\n\t\t\t\t\t\"Total number of bytes allocated, even if freed.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },\n\t\t\t\tvalType: CounterValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"sys_bytes\"),\n\t\t\t\t\t\"Number of bytes obtained by system. Sum of all system allocations.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"lookups_total\"),\n\t\t\t\t\t\"Total number of pointer lookups.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },\n\t\t\t\tvalType: CounterValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"mallocs_total\"),\n\t\t\t\t\t\"Total number of mallocs.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },\n\t\t\t\tvalType: CounterValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"frees_total\"),\n\t\t\t\t\t\"Total number of frees.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },\n\t\t\t\tvalType: CounterValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"heap_alloc_bytes\"),\n\t\t\t\t\t\"Number of heap bytes allocated and still in use.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"heap_sys_bytes\"),\n\t\t\t\t\t\"Number of heap bytes obtained from system.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"heap_idle_bytes\"),\n\t\t\t\t\t\"Number of heap bytes waiting to be used.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"heap_inuse_bytes\"),\n\t\t\t\t\t\"Number of heap bytes that are in use.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"heap_released_bytes_total\"),\n\t\t\t\t\t\"Total number of heap bytes released to OS.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },\n\t\t\t\tvalType: CounterValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"heap_objects\"),\n\t\t\t\t\t\"Number of allocated objects.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"stack_inuse_bytes\"),\n\t\t\t\t\t\"Number of bytes in use by the stack allocator.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"stack_sys_bytes\"),\n\t\t\t\t\t\"Number of bytes obtained from system for stack allocator.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"mspan_inuse_bytes\"),\n\t\t\t\t\t\"Number of bytes in use by mspan structures.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"mspan_sys_bytes\"),\n\t\t\t\t\t\"Number of bytes used for mspan structures obtained from system.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"mcache_inuse_bytes\"),\n\t\t\t\t\t\"Number of bytes in use by mcache structures.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"mcache_sys_bytes\"),\n\t\t\t\t\t\"Number of bytes used for mcache structures obtained from system.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"buck_hash_sys_bytes\"),\n\t\t\t\t\t\"Number of bytes used by the profiling bucket hash table.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"gc_sys_bytes\"),\n\t\t\t\t\t\"Number of bytes used for garbage collection system metadata.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"other_sys_bytes\"),\n\t\t\t\t\t\"Number of bytes used for other system allocations.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"next_gc_bytes\"),\n\t\t\t\t\t\"Number of heap bytes when next garbage collection will take place.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"last_gc_time_seconds\"),\n\t\t\t\t\t\"Number of seconds since 1970 of last garbage collection.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) \/ 1e9 },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc memstatNamespace(s string) string {\n\treturn fmt.Sprintf(\"go_memstats_%s\", s)\n}\n\n\/\/ Describe returns all descriptions of the collector.\nfunc (c *goCollector) Describe(ch chan<- *Desc) {\n\tch <- c.goroutines.Desc()\n\tch <- c.gcDesc\n\n\tfor _, i := range c.metrics {\n\t\tch <- i.desc\n\t}\n}\n\n\/\/ Collect returns the current state of all metrics of the collector.\nfunc (c *goCollector) Collect(ch chan<- Metric) {\n\tc.goroutines.Set(float64(runtime.NumGoroutine()))\n\tch <- c.goroutines\n\n\tvar stats debug.GCStats\n\tstats.PauseQuantiles = make([]time.Duration, 5)\n\tdebug.ReadGCStats(&stats)\n\n\tquantiles := make(map[float64]float64)\n\tfor idx, pq := range stats.PauseQuantiles[1:] {\n\t\tquantiles[float64(idx+1)\/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()\n\t}\n\tquantiles[0.0] = stats.PauseQuantiles[0].Seconds()\n\tch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)\n\n\tms := &runtime.MemStats{}\n\truntime.ReadMemStats(ms)\n\tfor _, i := range c.metrics {\n\t\tch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))\n\t}\n}\n\n\/\/ memStatsMetrics provide description, value, and value type for memstat metrics.\ntype memStatsMetrics []struct {\n\tdesc *Desc\n\teval func(*runtime.MemStats) float64\n\tvalType ValueType\n}\n<commit_msg>Make heap_released_bytes a Gauge.<commit_after>package prometheus\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"runtime\/debug\"\n\t\"time\"\n)\n\ntype goCollector struct {\n\tgoroutines Gauge\n\tgcDesc *Desc\n\n\t\/\/ metrics to describe and collect\n\tmetrics memStatsMetrics\n}\n\n\/\/ NewGoCollector returns a collector which exports metrics about the current\n\/\/ go process.\nfunc NewGoCollector() Collector {\n\treturn &goCollector{\n\t\tgoroutines: NewGauge(GaugeOpts{\n\t\t\tNamespace: \"go\",\n\t\t\tName: \"goroutines\",\n\t\t\tHelp: \"Number of goroutines that currently exist.\",\n\t\t}),\n\t\tgcDesc: NewDesc(\n\t\t\t\"go_gc_duration_seconds\",\n\t\t\t\"A summary of the GC invocation durations.\",\n\t\t\tnil, nil),\n\t\tmetrics: memStatsMetrics{\n\t\t\t{\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"alloc_bytes\"),\n\t\t\t\t\t\"Number of bytes allocated and still in use.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"alloc_bytes_total\"),\n\t\t\t\t\t\"Total number of bytes allocated, even if freed.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },\n\t\t\t\tvalType: CounterValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"sys_bytes\"),\n\t\t\t\t\t\"Number of bytes obtained from system.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"lookups_total\"),\n\t\t\t\t\t\"Total number of pointer lookups.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },\n\t\t\t\tvalType: CounterValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"mallocs_total\"),\n\t\t\t\t\t\"Total number of mallocs.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },\n\t\t\t\tvalType: CounterValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"frees_total\"),\n\t\t\t\t\t\"Total number of frees.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },\n\t\t\t\tvalType: CounterValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"heap_alloc_bytes\"),\n\t\t\t\t\t\"Number of heap bytes allocated and still in use.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"heap_sys_bytes\"),\n\t\t\t\t\t\"Number of heap bytes obtained from system.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"heap_idle_bytes\"),\n\t\t\t\t\t\"Number of heap bytes waiting to be used.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"heap_inuse_bytes\"),\n\t\t\t\t\t\"Number of heap bytes that are in use.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"heap_released_bytes\"),\n\t\t\t\t\t\"Number of heap bytes released to OS.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"heap_objects\"),\n\t\t\t\t\t\"Number of allocated objects.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"stack_inuse_bytes\"),\n\t\t\t\t\t\"Number of bytes in use by the stack allocator.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"stack_sys_bytes\"),\n\t\t\t\t\t\"Number of bytes obtained from system for stack allocator.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"mspan_inuse_bytes\"),\n\t\t\t\t\t\"Number of bytes in use by mspan structures.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"mspan_sys_bytes\"),\n\t\t\t\t\t\"Number of bytes used for mspan structures obtained from system.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"mcache_inuse_bytes\"),\n\t\t\t\t\t\"Number of bytes in use by mcache structures.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"mcache_sys_bytes\"),\n\t\t\t\t\t\"Number of bytes used for mcache structures obtained from system.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"buck_hash_sys_bytes\"),\n\t\t\t\t\t\"Number of bytes used by the profiling bucket hash table.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"gc_sys_bytes\"),\n\t\t\t\t\t\"Number of bytes used for garbage collection system metadata.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"other_sys_bytes\"),\n\t\t\t\t\t\"Number of bytes used for other system allocations.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"next_gc_bytes\"),\n\t\t\t\t\t\"Number of heap bytes when next garbage collection will take place.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"last_gc_time_seconds\"),\n\t\t\t\t\t\"Number of seconds since 1970 of last garbage collection.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) \/ 1e9 },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc memstatNamespace(s string) string {\n\treturn fmt.Sprintf(\"go_memstats_%s\", s)\n}\n\n\/\/ Describe returns all descriptions of the collector.\nfunc (c *goCollector) Describe(ch chan<- *Desc) {\n\tch <- c.goroutines.Desc()\n\tch <- c.gcDesc\n\n\tfor _, i := range c.metrics {\n\t\tch <- i.desc\n\t}\n}\n\n\/\/ Collect returns the current state of all metrics of the collector.\nfunc (c *goCollector) Collect(ch chan<- Metric) {\n\tc.goroutines.Set(float64(runtime.NumGoroutine()))\n\tch <- c.goroutines\n\n\tvar stats debug.GCStats\n\tstats.PauseQuantiles = make([]time.Duration, 5)\n\tdebug.ReadGCStats(&stats)\n\n\tquantiles := make(map[float64]float64)\n\tfor idx, pq := range stats.PauseQuantiles[1:] {\n\t\tquantiles[float64(idx+1)\/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()\n\t}\n\tquantiles[0.0] = stats.PauseQuantiles[0].Seconds()\n\tch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)\n\n\tms := &runtime.MemStats{}\n\truntime.ReadMemStats(ms)\n\tfor _, i := range c.metrics {\n\t\tch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))\n\t}\n}\n\n\/\/ memStatsMetrics provide description, value, and value type for memstat metrics.\ntype memStatsMetrics []struct {\n\tdesc *Desc\n\teval func(*runtime.MemStats) float64\n\tvalType ValueType\n}\n<|endoftext|>"} {"text":"<commit_before>package fuze\n\nimport (\n\t\"encoding\/json\"\n\t\"strconv\"\n \"errors\"\n \"fmt\"\n\n\tfuze \"github.com\/coreos\/container-linux-config-transpiler\/config\"\n\t\"github.com\/hashicorp\/terraform\/helper\/hashcode\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n)\n\nfunc dataSourceFuzeConfig() *schema.Resource {\n\treturn &schema.Resource{\n\t\tRead: dataSourceFuzeConfigRead,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"content\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t},\n\t\t\t\"pretty_print\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: false,\n\t\t\t},\n\t\t\t\"rendered\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t\tDescription: \"rendered ignition configuration\",\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc dataSourceFuzeConfigRead(d *schema.ResourceData, meta interface{}) error {\n\trendered, err := renderFuzeConfig(d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.Set(\"rendered\", rendered)\n\td.SetId(strconv.Itoa(hashcode.String(rendered)))\n\treturn nil\n}\n\nfunc renderFuzeConfig(d *schema.ResourceData) (string, error) {\n\tpretty := d.Get(\"pretty_print\").(bool)\n\tconfig := d.Get(\"content\").(string)\n\n\tignition, pR := fuze.Parse([]byte(config))\n\tif len(pR.Entries) > 0 {\n\t\treturn \"\", errors.New(pR.String())\n\t}\n\n\tif pretty {\n\t\tignitionJSON, pErr := json.MarshalIndent(&ignition, \"\", \" \")\n\t\treturn string(ignitionJSON), pErr\n\t}\n\n fmt.Println(ignition)\n converted, cR := fuze.ConvertAs2_0_0(ignition)\n\tif len(cR.Entries) > 0 {\n\t\treturn \"\", errors.New(cR.String())\n\t}\n fmt.Println(converted)\n\n\tignitionJSON, mErr := json.Marshal(&converted)\n\treturn string(ignitionJSON), mErr\n}\n\n<commit_msg>tweaks.<commit_after>package fuze\n\nimport (\n\t\"encoding\/json\"\n\t\"strconv\"\n \"errors\"\n \"fmt\"\n\n\tfuze \"github.com\/coreos\/container-linux-config-transpiler\/config\"\n\t\"github.com\/hashicorp\/terraform\/helper\/hashcode\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n)\n\nfunc dataSourceFuzeConfig() *schema.Resource {\n\treturn &schema.Resource{\n\t\tRead: dataSourceFuzeConfigRead,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"content\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t},\n\t\t\t\"pretty_print\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: false,\n\t\t\t},\n\t\t\t\"rendered\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t\tDescription: \"rendered ignition configuration\",\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc dataSourceFuzeConfigRead(d *schema.ResourceData, meta interface{}) error {\n\trendered, err := renderFuzeConfig(d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.Set(\"rendered\", rendered)\n\td.SetId(strconv.Itoa(hashcode.String(rendered)))\n\treturn nil\n}\n\nfunc renderFuzeConfig(d *schema.ResourceData) (string, error) {\n\tpretty := d.Get(\"pretty_print\").(bool)\n\tconfig := d.Get(\"content\").(string)\n\n\tignition, pR := fuze.Parse([]byte(config))\n\tif len(pR.Entries) > 0 {\n\t\treturn \"\", errors.New(pR.String())\n\t}\n\n\tif pretty {\n\t\tignitionJSON, pErr := json.MarshalIndent(&ignition, \"\", \" \")\n\t\treturn string(ignitionJSON), pErr\n\t}\n\n converted, cR := fuze.ConvertAs2_0_0(ignition)\n\tif len(cR.Entries) > 0 {\n\t\treturn \"\", errors.New(cR.String())\n\t}\n\n\tignitionJSON, mErr := json.Marshal(&converted)\n\treturn string(ignitionJSON), mErr\n}\n\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux,!noaio\n\n\/\/ The above build tag specifies this file is only to be built on linux (because\n\/\/ goaio only supports linux). If you are having difficulty building on linux\n\/\/ or want to build without librados present, then use\n\/\/ go build -tags 'noaio'\n\npackage nbd\n\nimport (\n\t\"github.com\/traetox\/goaio\"\n\t\"golang.org\/x\/net\/context\"\n\t\"os\"\n)\n\n\/\/ AioFileBackend implements Backend\ntype AioFileBackend struct {\n\taio *goaio.AIO\n\tsize uint64\n}\n\n\/\/ WriteAt implements Backend.WriteAt\nfunc (afb *AioFileBackend) WriteAt(ctx context.Context, b []byte, offset int64, fua bool) (int, error) {\n\tif err := afb.aio.Wait(); err != nil {\n\t\treturn 0, err\n\t}\n\trequestId, err := afb.aio.WriteAt(b, offset)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif err := afb.aio.WaitFor(requestId); err != nil {\n\t\treturn 0, err\n\t}\n\tif err := afb.aio.Ack(requestId); err != nil {\n\t\treturn 0, err\n\t}\n\tif fua {\n\t\tif err := afb.aio.Flush(); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\treturn len(b), err\n}\n\n\/\/ ReadAt implements Backend.ReadAt\nfunc (afb *AioFileBackend) ReadAt(ctx context.Context, b []byte, offset int64) (int, error) {\n\tif err := afb.aio.Wait(); err != nil {\n\t\treturn 0, err\n\t}\n\trequestId, err := afb.aio.ReadAt(b, offset)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif err := afb.aio.WaitFor(requestId); err != nil {\n\t\treturn 0, err\n\t}\n\tif err := afb.aio.Ack(requestId); err != nil {\n\t\treturn 0, err\n\t}\n\treturn len(b), err\n}\n\n\/\/ TrimAt implements Backend.TrimAt\nfunc (afb *AioFileBackend) TrimAt(ctx context.Context, length int, offset int64) (int, error) {\n\treturn length, nil\n}\n\n\/\/ Flush implements Backend.Flush\nfunc (afb *AioFileBackend) Flush(ctx context.Context) error {\n\treturn afb.aio.Flush()\n}\n\n\/\/ Close implements Backend.Close\nfunc (afb *AioFileBackend) Close(ctx context.Context) error {\n\treturn afb.aio.Close()\n}\n\n\/\/ Size implements Backend.Size\nfunc (afb *AioFileBackend) Geometry(ctx context.Context) (uint64, uint64, uint64, uint64, error) {\n\treturn afb.size, 1, 4096, 128 * 1024 * 1024, nil\n}\n\n\/\/ Size implements Backend.HasFua\nfunc (afb *AioFileBackend) HasFua(ctx context.Context) bool {\n\treturn false\n}\n\n\/\/ Size implements Backend.HasFua\nfunc (afb *AioFileBackend) HasFlush(ctx context.Context) bool {\n\treturn false\n}\n\n\/\/ Generate a new aio backend\nfunc NewAioFileBackend(ctx context.Context, ec *ExportConfig) (Backend, error) {\n\tperms := os.O_RDWR\n\tif ec.ReadOnly {\n\t\tperms = os.O_RDONLY\n\t}\n\tif s, err := isTrue(ec.DriverParameters[\"sync\"]); err != nil {\n\t\treturn nil, err\n\t} else if s {\n\t\tperms |= os.O_SYNC\n\t}\n\taio, err := goaio.NewAIO(ec.DriverParameters[\"path\"], perms, 0666)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstat, err := aio.FD().Stat()\n\tif err != nil {\n\t\taio.Close()\n\t\treturn nil, err\n\t}\n\treturn &AioFileBackend{\n\t\taio: aio,\n\t\tsize: uint64(stat.Size()),\n\t}, nil\n}\n\n\/\/ Register our backend\nfunc init() {\n\tRegisterBackend(\"aiofile\", NewAioFileBackend)\n}\n<commit_msg>removed the need for Ack. WaitFor returns an error if one ocurred<commit_after>\/\/ +build linux,!noaio\n\n\/\/ The above build tag specifies this file is only to be built on linux (because\n\/\/ goaio only supports linux). If you are having difficulty building on linux\n\/\/ or want to build without librados present, then use\n\/\/ go build -tags 'noaio'\n\npackage nbd\n\nimport (\n\t\"github.com\/traetox\/goaio\"\n\t\"golang.org\/x\/net\/context\"\n\t\"os\"\n)\n\n\/\/ AioFileBackend implements Backend\ntype AioFileBackend struct {\n\taio *goaio.AIO\n\tsize uint64\n}\n\n\/\/ WriteAt implements Backend.WriteAt\nfunc (afb *AioFileBackend) WriteAt(ctx context.Context, b []byte, offset int64, fua bool) (int, error) {\n\tif err := afb.aio.Wait(); err != nil {\n\t\treturn 0, err\n\t}\n\trequestId, err := afb.aio.WriteAt(b, offset)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif err := afb.aio.WaitFor(requestId); err != nil {\n\t\treturn 0, err\n\t}\n\tif fua {\n\t\tif err := afb.aio.Flush(); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\treturn len(b), err\n}\n\n\/\/ ReadAt implements Backend.ReadAt\nfunc (afb *AioFileBackend) ReadAt(ctx context.Context, b []byte, offset int64) (int, error) {\n\tif err := afb.aio.Wait(); err != nil {\n\t\treturn 0, err\n\t}\n\trequestId, err := afb.aio.ReadAt(b, offset)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif err := afb.aio.WaitFor(requestId); err != nil {\n\t\treturn 0, err\n\t}\n\treturn len(b), err\n}\n\n\/\/ TrimAt implements Backend.TrimAt\nfunc (afb *AioFileBackend) TrimAt(ctx context.Context, length int, offset int64) (int, error) {\n\treturn length, nil\n}\n\n\/\/ Flush implements Backend.Flush\nfunc (afb *AioFileBackend) Flush(ctx context.Context) error {\n\treturn afb.aio.Flush()\n}\n\n\/\/ Close implements Backend.Close\nfunc (afb *AioFileBackend) Close(ctx context.Context) error {\n\treturn afb.aio.Close()\n}\n\n\/\/ Size implements Backend.Size\nfunc (afb *AioFileBackend) Geometry(ctx context.Context) (uint64, uint64, uint64, uint64, error) {\n\treturn afb.size, 1, 4096, 128 * 1024 * 1024, nil\n}\n\n\/\/ Size implements Backend.HasFua\nfunc (afb *AioFileBackend) HasFua(ctx context.Context) bool {\n\treturn false\n}\n\n\/\/ Size implements Backend.HasFua\nfunc (afb *AioFileBackend) HasFlush(ctx context.Context) bool {\n\treturn false\n}\n\n\/\/ Generate a new aio backend\nfunc NewAioFileBackend(ctx context.Context, ec *ExportConfig) (Backend, error) {\n\tperms := os.O_RDWR\n\tif ec.ReadOnly {\n\t\tperms = os.O_RDONLY\n\t}\n\tif s, err := isTrue(ec.DriverParameters[\"sync\"]); err != nil {\n\t\treturn nil, err\n\t} else if s {\n\t\tperms |= os.O_SYNC\n\t}\n\taio, err := goaio.NewAIO(ec.DriverParameters[\"path\"], perms, 0666)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstat, err := aio.FD().Stat()\n\tif err != nil {\n\t\taio.Close()\n\t\treturn nil, err\n\t}\n\treturn &AioFileBackend{\n\t\taio: aio,\n\t\tsize: uint64(stat.Size()),\n\t}, nil\n}\n\n\/\/ Register our backend\nfunc init() {\n\tRegisterBackend(\"aiofile\", NewAioFileBackend)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package handlers is used by go generate for analizing\n\/\/ controller package's files and generation of handlers.\npackage handlers\n\nimport (\n\t\"go\/ast\"\n\t\"path\/filepath\"\n\n\t\"github.com\/anonx\/sunplate\/command\"\n\t\"github.com\/anonx\/sunplate\/generation\/output\"\n\t\"github.com\/anonx\/sunplate\/reflect\"\n)\n\nconst (\n\t\/\/ ActionInterfaceImport is a GOPATH to the Result interface\n\t\/\/ that should be implemented by types returned by actions.\n\tActionInterfaceImport = \"github.com\/anonx\/sunplate\/action\"\n\n\t\/\/ ActionInterfaceName is an interface that should be implemented\n\t\/\/ by types that are returned from actions.\n\tActionInterfaceName = \"Result\"\n\n\t\/\/ MagicMethodBefore is a name of the magic method that will be executed\n\t\/\/ before every action.\n\tMagicMethodBefore = \"Before\"\n\n\t\/\/ MagicMethodAfter is a name of the magic method that will be executed\n\t\/\/ after every action.\n\tMagicMethodAfter = \"After\"\n\n\t\/\/ MagicMethodFinally is a name of the magic method that will be executed\n\t\/\/ after every action no matter what.\n\tMagicMethodFinally = \"Finally\"\n)\n\n\/\/ Controller is a type that represents application controller,\n\/\/ a structure that has actions.\ntype Controller struct {\n\tActions reflect.Funcs \/\/ Actions are methods that implement action.Result interface.\n\tAfter reflect.Funcs \/\/ Magic methods that are executed after actions if they return nil.\n\tBefore reflect.Funcs \/\/ Magic methods that are executed before every action.\n\tFinally reflect.Funcs \/\/ Finally is executed at the end of every request no matter what.\n\tStruct reflect.Struct \/\/ Structure of the controller (its name, fields, etc).\n}\n\n\/\/ Start is an entry point of the generate handlers command.\nfunc Start(basePath string, params command.Data) {\n\t\/\/ Generate and save a new package.\n\tt := output.NewType(\n\t\tparams.Default(\"--package\", \"handlers\"), filepath.Join(basePath, \".\/handlers.go.template\"),\n\t)\n\tt.CreateDir(params.Default(\"--output\", \".\/assets\/handlers\/\"))\n\tt.Extension = \".go\" \/\/ Save generated file as a .go source.\n\tt.Context = map[string]interface{}{\n\t\t\"rootPath\": params.Default(\"--path\", \".\/controllers\/\"),\n\t}\n\tt.Generate()\n}\n\n\/\/ extractControllers gets a reflect.Package type and returns\n\/\/ a slice of controllers that are found there.\nfunc extractControllers(pkg *reflect.Package) (cs []Controller) {\n\t\/\/ Actions are required to return action.Result as the first argument.\n\t\/\/ actionImportName is used to store information on how the action package is named.\n\t\/\/ For illustration, here is an example:\n\t\/\/\timport (\n\t\/\/\t\tqwerty \"github.com\/anonx\/sunplate\/action\"\n\t\/\/\t)\n\t\/\/ In the example above action package will be imported as \"qwerty\".\n\t\/\/ So, we are saving this name to actionImportName[FILE_NAME_WHERE_WE_IMPORT_THIS]\n\t\/\/ to eliminate the need of iterating over all imports over and over again.\n\tactionImportName := map[string]string{}\n\n\t\/\/ Files that should be excluded from search of actions\n\t\/\/ as they do not have action package imported.\n\t\/\/ We are using this as a cache.\n\tignoreFiles := map[string]bool{}\n\n\t\/\/ isAction gets a pointer to reflect.Func structure\n\t\/\/ and checks whether it represents action or one of magic methods.\n\t\/\/ If so, it return true.\n\tisAction := func(f *reflect.Func) bool {\n\t\t\/\/ Check whether the file where this method located\n\t\t\/\/ is ignored due to the lack of action subpackage import.\n\t\tif ignoreFiles[f.File] {\n\t\t\treturn false\n\t\t}\n\n\t\t\/\/ Check whether the method returns at least one parameter.\n\t\tif len(f.Results) == 0 {\n\t\t\treturn false\n\t\t}\n\n\t\t\/\/ Make sure the method we are checking is Exported.\n\t\t\/\/ Private ones are ignored.\n\t\tif !ast.IsExported(f.Name) {\n\t\t\treturn false\n\t\t}\n\n\t\t\/\/ Check whether we already know from previous iterations\n\t\t\/\/ how action subpackage is imported (its name).\n\t\tif _, ok := actionImportName[f.File]; !ok {\n\t\t\t\/\/ If not, try to find it out.\n\t\t\tn, ok := pkg.Imports.Name(f.File, ActionInterfaceImport)\n\t\t\tif !ok {\n\t\t\t\t\/\/ Action subpackage import path is not found in this file.\n\t\t\t\t\/\/ So, this is not an action method.\n\t\t\t\t\/\/ Ignore this file (and all methods inside it) in future.\n\t\t\t\tignoreFiles[f.File] = true\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tactionImportName[f.File] = n \/\/ Save the import name to use in future iterations.\n\t\t}\n\n\t\t\/\/ Make sure the first result is of type action.Result.\n\t\tcorrectPackage := f.Results[0].Type.Package == actionImportName[f.File]\n\t\tcorrectName := f.Results[0].Type.Name == ActionInterfaceName\n\t\tif !correctPackage || !correctName {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\n\t\/\/ isBefore gets a Func and checks whether it is a Before magic method.\n\tisBefore := func(f *reflect.Func) bool {\n\t\tif f.Name == MagicMethodBefore {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\t\/\/ isAfter gets a Func and checks whether it is an After magic method.\n\tisAfter := func(f *reflect.Func) bool {\n\t\tif f.Name == MagicMethodAfter {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\t\/\/ isFinally gets a Func and checks whether it is a Finally magic method.\n\tisFinally := func(f *reflect.Func) bool {\n\t\tif f.Name == MagicMethodFinally {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\t\/\/ notMagicMethod gets a Func and makes sure it is not a magic method but a usual action.\n\tnotMagicMethod := func(f *reflect.Func) bool {\n\t\tif isBefore(f) || isAfter(f) || isFinally(f) {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\n\t\/\/ Iterating through all available structures and checking\n\t\/\/ whether those structures are controllers (i.e. whether they have actions).\n\tfor i := 0; i < len(pkg.Structs); i++ {\n\t\t\/\/ Make sure the structure has methods.\n\t\tms, ok := pkg.Methods[pkg.Structs[i].Name]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Check whether there are actions among those methods.\n\t\tas := ms.Filter(isAction, notMagicMethod, isAfter, isBefore, isFinally)\n\t\tif len(as) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Add a new controller to the list of results.\n\t\tcs = append(cs, Controller{\n\t\t\tActions: as[0],\n\t\t\tAfter: as[1],\n\t\t\tBefore: as[2],\n\t\t\tFinally: as[3],\n\t\t\tStruct: pkg.Structs[i],\n\t\t})\n\t}\n\treturn\n}\n<commit_msg>Get rid of nested functions<commit_after>\/\/ Package handlers is used by go generate for analizing\n\/\/ controller package's files and generation of handlers.\npackage handlers\n\nimport (\n\t\"go\/ast\"\n\t\"path\/filepath\"\n\n\t\"github.com\/anonx\/sunplate\/command\"\n\t\"github.com\/anonx\/sunplate\/generation\/output\"\n\t\"github.com\/anonx\/sunplate\/reflect\"\n)\n\nconst (\n\t\/\/ ActionInterfaceImport is a GOPATH to the Result interface\n\t\/\/ that should be implemented by types returned by actions.\n\tActionInterfaceImport = \"github.com\/anonx\/sunplate\/action\"\n\n\t\/\/ ActionInterfaceName is an interface that should be implemented\n\t\/\/ by types that are returned from actions.\n\tActionInterfaceName = \"Result\"\n\n\t\/\/ MagicMethodBefore is a name of the magic method that will be executed\n\t\/\/ before every action.\n\tMagicMethodBefore = \"Before\"\n\n\t\/\/ MagicMethodAfter is a name of the magic method that will be executed\n\t\/\/ after every action.\n\tMagicMethodAfter = \"After\"\n\n\t\/\/ MagicMethodFinally is a name of the magic method that will be executed\n\t\/\/ after every action no matter what.\n\tMagicMethodFinally = \"Finally\"\n)\n\n\/\/ Controller is a type that represents application controller,\n\/\/ a structure that has actions.\ntype Controller struct {\n\tActions reflect.Funcs \/\/ Actions are methods that implement action.Result interface.\n\tAfter reflect.Funcs \/\/ Magic methods that are executed after actions if they return nil.\n\tBefore reflect.Funcs \/\/ Magic methods that are executed before every action.\n\tFinally reflect.Funcs \/\/ Finally is executed at the end of every request no matter what.\n\tStruct reflect.Struct \/\/ Structure of the controller (its name, fields, etc).\n}\n\n\/\/ Start is an entry point of the generate handlers command.\nfunc Start(basePath string, params command.Data) {\n\t\/\/ Generate and save a new package.\n\tt := output.NewType(\n\t\tparams.Default(\"--package\", \"handlers\"), filepath.Join(basePath, \".\/handlers.go.template\"),\n\t)\n\tt.CreateDir(params.Default(\"--output\", \".\/assets\/handlers\/\"))\n\tt.Extension = \".go\" \/\/ Save generated file as a .go source.\n\tt.Context = map[string]interface{}{\n\t\t\"rootPath\": params.Default(\"--path\", \".\/controllers\/\"),\n\t}\n\tt.Generate()\n}\n\n\/\/ extractControllers gets a reflect.Package type and returns\n\/\/ a slice of controllers that are found there.\nfunc extractControllers(pkg *reflect.Package) (cs []Controller) {\n\t\/\/ Actions are required to return action.Result as the first argument.\n\t\/\/ actionImportName is used to store information on how the action package is named.\n\t\/\/ For illustration, here is an example:\n\t\/\/\timport (\n\t\/\/\t\tqwerty \"github.com\/anonx\/sunplate\/action\"\n\t\/\/\t)\n\t\/\/ In the example above action package will be imported as \"qwerty\".\n\t\/\/ So, we are saving this name to actionImportName[FILE_NAME_WHERE_WE_IMPORT_THIS]\n\t\/\/ to eliminate the need of iterating over all imports over and over again.\n\tactionImportName := map[string]string{}\n\n\t\/\/ Files that should be excluded from search of actions\n\t\/\/ as they do not have action package imported.\n\t\/\/ We are using this as a cache.\n\tignoreFiles := map[string]bool{}\n\n\t\/\/ isAction gets a pointer to reflect.Func structure\n\t\/\/ and checks whether it represents action or one of magic methods.\n\t\/\/ If so, it return true.\n\tisAction := func(f *reflect.Func) bool {\n\t\t\/\/ Check whether the file where this method located\n\t\t\/\/ is ignored due to the lack of action subpackage import.\n\t\tif ignoreFiles[f.File] {\n\t\t\treturn false\n\t\t}\n\n\t\t\/\/ Check whether the method returns at least one parameter.\n\t\tif len(f.Results) == 0 {\n\t\t\treturn false\n\t\t}\n\n\t\t\/\/ Make sure the method we are checking is Exported.\n\t\t\/\/ Private ones are ignored.\n\t\tif !ast.IsExported(f.Name) {\n\t\t\treturn false\n\t\t}\n\n\t\t\/\/ Check whether we already know from previous iterations\n\t\t\/\/ how action subpackage is imported (its name).\n\t\tif _, ok := actionImportName[f.File]; !ok {\n\t\t\t\/\/ If not, try to find it out.\n\t\t\tn, ok := pkg.Imports.Name(f.File, ActionInterfaceImport)\n\t\t\tif !ok {\n\t\t\t\t\/\/ Action subpackage import path is not found in this file.\n\t\t\t\t\/\/ So, this is not an action method.\n\t\t\t\t\/\/ Ignore this file (and all methods inside it) in future.\n\t\t\t\tignoreFiles[f.File] = true\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tactionImportName[f.File] = n \/\/ Save the import name to use in future iterations.\n\t\t}\n\n\t\t\/\/ Make sure the first result is of type action.Result.\n\t\tcorrectPackage := f.Results[0].Type.Package == actionImportName[f.File]\n\t\tcorrectName := f.Results[0].Type.Name == ActionInterfaceName\n\t\tif !correctPackage || !correctName {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\n\t\/\/ Iterating through all available structures and checking\n\t\/\/ whether those structures are controllers (i.e. whether they have actions).\n\tfor i := 0; i < len(pkg.Structs); i++ {\n\t\t\/\/ Make sure the structure has methods.\n\t\tms, ok := pkg.Methods[pkg.Structs[i].Name]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Check whether there are actions among those methods.\n\t\tas, count := ms.Filter(isAction, notMagicMethod, isAfter, isBefore, isFinally)\n\t\tif count == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Add a new controller to the list of results.\n\t\tcs = append(cs, Controller{\n\t\t\tActions: as[0],\n\t\t\tAfter: as[1],\n\t\t\tBefore: as[2],\n\t\t\tFinally: as[3],\n\t\t\tStruct: pkg.Structs[i],\n\t\t})\n\t}\n\treturn\n}\n\n\/\/ isBefore gets a Func and checks whether it is a Before magic method.\nfunc isBefore(f *reflect.Func) bool {\n\tif f.Name == MagicMethodBefore {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ isAfter gets a Func and checks whether it is an After magic method.\nfunc isAfter(f *reflect.Func) bool {\n\tif f.Name == MagicMethodAfter {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ isFinally gets a Func and checks whether it is a Finally magic method.\nfunc isFinally(f *reflect.Func) bool {\n\tif f.Name == MagicMethodFinally {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ notMagicMethod gets a Func and makes sure it is not a magic method but a usual action.\nfunc notMagicMethod(f *reflect.Func) bool {\n\tif isBefore(f) || isAfter(f) || isFinally(f) {\n\t\treturn false\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Godless is a peer-to-peer database running over IPFS.\n\/\/\n\/\/ Godless uses a Consistent Replicated Data Type called a Namespace to share schemaless data with peers.\n\/\/\n\/\/ This package is a facade to Godless internals.\n\/\/\n\/\/ Godless is in alpha, and should be considered experimental software.\npackage godless\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\tgohttp \"net\/http\"\n\n\t\"github.com\/johnny-morrice\/godless\/api\"\n\t\"github.com\/johnny-morrice\/godless\/cache\"\n\t\"github.com\/johnny-morrice\/godless\/crdt\"\n\t\"github.com\/johnny-morrice\/godless\/crypto\"\n\t\"github.com\/johnny-morrice\/godless\/internal\/http\"\n\t\"github.com\/johnny-morrice\/godless\/internal\/ipfs\"\n\t\"github.com\/johnny-morrice\/godless\/internal\/service\"\n\t\"github.com\/johnny-morrice\/godless\/log\"\n\t\"github.com\/johnny-morrice\/godless\/query\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ Godless options.\ntype Options struct {\n\t\/\/ IpfsServiceUrl is required, unless specifying your own DataPeer or RemoteStore.\n\tIpfsServiceUrl string\n\t\/\/ DataPeer is optional.\n\tDataPeer api.DataPeer\n\t\/\/ RemoteStore is optional.\n\tRemoteStore api.RemoteStore\n\t\/\/ KeyStore is required. A private Key store.\n\tKeyStore api.KeyStore\n\t\/\/ MemoryImage is required.\n\tMemoryImage api.MemoryImage\n\t\/\/ WebServiceAddr is optional. If not set, the webservice will be disabled.\n\tWebServiceAddr string\n\t\/\/ IndexHash is optional. Set to load an existing index from IPFS.\n\tIndexHash string\n\t\/\/ FailEarly will cause the godless process to crash if it cannot contact IPFS on startup.\n\tFailEarly bool\n\t\/\/ ReplicateInterval is optional. The duration between peer-to-peer replications.\n\tReplicateInterval time.Duration\n\tPulse time.Duration\n\t\/\/ Topics is optional. Two godless servers which share a topic will replicate indices. An empty topics slice will disable replication.\n\tTopics []string\n\t\/\/ IpfsClient is optional. Specify a HTTP client for IPFS.\n\tIpfsClient *gohttp.Client\n\t\/\/ IpfsPingTimeout is optional. Specify a lower timeout for \"Am I Connected?\" checks.\n\tIpfsPingTimeout time.Duration\n\t\/\/ Cache is optional. Build a 12-factor app by supplying your own remote cache.\n\t\/\/ HeadCache, IndexCache and NamespaceCache can be used to specify different caches for different data types.\n\tCache api.Cache\n\t\/\/ HeadCache is optional. Build a 12-factor app by supplying your own remote cache.\n\tHeadCache api.HeadCache\n\t\/\/ IndexCache is optional. Build a 12-factor app by supplying your own remote cache.\n\tIndexCache api.IndexCache\n\t\/\/ NamespaceCache is optional. Build a 12-factor app by supplying your own remote cache.\n\tNamespaceCache api.NamespaceCache\n\t\/\/ PriorityQueue is optional. Build a 12-factor app by supplying your own remote cache.\n\tPriorityQueue api.RequestPriorityQueue\n\t\/\/ APIQueryLimit is optional. Tune performance by setting the number of simultaneous queries.\n\tAPIQueryLimit int\n\t\/\/ PublicServer is optional. If false, the index will only be updated from peers who are in your public key list.\n\tPublicServer bool\n}\n\n\/\/ Godless is a peer-to-peer database. It shares structured data between peers, using IPFS as a backing store.\n\/\/ The core datastructure is a CRDT namespace which resembles a relational scheme in that it has tables, rows, and entries.\ntype Godless struct {\n\tOptions\n\terrch chan error\n\terrwg sync.WaitGroup\n\tstopch chan struct{}\n\tstoppers []chan<- struct{}\n\tremote api.RemoteNamespace\n\tapi api.APIService\n}\n\n\/\/ New creates a godless instance, connecting to any services, and providing any services, specified in the options.\nfunc New(options Options) (*Godless, error) {\n\tgodless := &Godless{Options: options}\n\n\tmissing := godless.findMissingParameters()\n\n\tif missing != nil {\n\t\treturn nil, missing\n\t}\n\n\tsetupFuncs := []func() error{\n\t\tgodless.connectDataPeer,\n\t\tgodless.connectRemoteStore,\n\t\tgodless.connectCache,\n\t\tgodless.setupNamespace,\n\t\tgodless.launchAPI,\n\t\tgodless.serveWeb,\n\t\tgodless.replicate,\n\t}\n\n\terr := breakOnError(setupFuncs)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgodless.report()\n\n\treturn godless, nil\n}\n\nfunc (godless *Godless) report() {\n\tif godless.PublicServer {\n\t\tlog.Info(\"Running public Godless API\")\n\t} else {\n\t\tlog.Info(\"Running private Godless API\")\n\t}\n\n\tprivCount := len(godless.KeyStore.GetAllPrivateKeys())\n\tpubCount := len(godless.KeyStore.GetAllPublicKeys())\n\n\tlog.Info(\"Godless API using %v private and %v public keys\", privCount, pubCount)\n}\n\nfunc (godless *Godless) findMissingParameters() error {\n\tvar missing error\n\n\tif godless.KeyStore == nil {\n\t\tmsg := godless.missingParameterText(\"KeyStore\")\n\t\tmissing = addErrorMessage(missing, msg)\n\t}\n\n\tif godless.MemoryImage == nil {\n\t\tmsg := godless.missingParameterText(\"MemoryImage\")\n\t\tmissing = addErrorMessage(missing, msg)\n\t}\n\n\treturn missing\n}\n\nfunc addErrorMessage(err error, msg string) error {\n\tif err == nil {\n\t\treturn errors.New(msg)\n\t} else {\n\t\treturn errors.Wrap(err, msg)\n\t}\n}\n\nfunc (godless *Godless) missingParameterText(param string) string {\n\treturn fmt.Sprintf(\"Missing required parameter '%v'\", param)\n}\n\n\/\/ Errors provides a stream of errors from godless. Godless will attempt to handle any errors it can. Any errors received here indicate that bad things have happened.\nfunc (godless *Godless) Errors() <-chan error {\n\treturn godless.errch\n}\n\n\/\/ Shutdown stops all godless processes. It does not wait for those goroutines to stop.\nfunc (godless *Godless) Shutdown() {\n\tgodless.stopch <- struct{}{}\n}\n\nfunc (godless *Godless) connectDataPeer() error {\n\tif godless.RemoteStore != nil {\n\t\treturn nil\n\t}\n\n\tif godless.DataPeer == nil {\n\t\tif godless.IpfsServiceUrl == \"\" {\n\t\t\tmsg := godless.missingParameterText(\"IpfsServiceUrl\")\n\t\t\treturn errors.New(msg)\n\t\t}\n\n\t\tpeer := &ipfs.WebServiceClient{\n\t\t\tUrl: godless.IpfsServiceUrl,\n\t\t\tPingTimeout: godless.IpfsPingTimeout,\n\t\t\tHttp: godless.IpfsClient,\n\t\t}\n\n\t\tgodless.DataPeer = peer\n\t}\n\n\treturn nil\n}\n\nfunc (godless *Godless) connectRemoteStore() error {\n\tif godless.RemoteStore == nil {\n\t\tipfs := &ipfs.IpfsRemoteStore{\n\t\t\tShell: godless.DataPeer,\n\t\t}\n\n\t\tif godless.FailEarly {\n\t\t\terr := ipfs.Connect()\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tgodless.RemoteStore = ipfs\n\t}\n\n\treturn nil\n}\n\nfunc (godless *Godless) makeCacheStopper(cache api.Cache) chan<- struct{} {\n\tstopper := make(chan struct{}, 1)\n\tgo func() {\n\t\t<-stopper\n\t\terr := cache.CloseCache()\n\t\tif err == nil {\n\t\t\tlog.Info(\"Closed cache\")\n\t\t} else {\n\t\t\tlog.Error(\"Error closing cache: %v\", err.Error())\n\t\t}\n\t}()\n\n\treturn stopper\n}\n\nfunc (godless *Godless) connectCache() error {\n\tif godless.Cache != nil {\n\t\tgodless.HeadCache = godless.Cache\n\t\tgodless.IndexCache = godless.Cache\n\t\tgodless.NamespaceCache = godless.Cache\n\t\tgodless.addStopper(godless.makeCacheStopper(godless.Cache))\n\t\treturn nil\n\t}\n\n\tif godless.HeadCache == nil {\n\t\tgodless.HeadCache = cache.MakeResidentHeadCache()\n\t}\n\n\tif godless.IndexCache == nil {\n\t\tgodless.IndexCache = cache.MakeResidentIndexCache(__UNKNOWN_BUFFER_SIZE)\n\t}\n\n\tif godless.NamespaceCache == nil {\n\t\tgodless.NamespaceCache = cache.MakeResidentNamespaceCache(__UNKNOWN_BUFFER_SIZE)\n\t}\n\n\treturn nil\n}\n\nfunc (godless *Godless) setupNamespace() error {\n\tif godless.IndexHash != \"\" {\n\t\thead := crdt.IPFSPath(godless.IndexHash)\n\n\t\terr := godless.HeadCache.SetHead(head)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tnamespaceOptions := service.RemoteNamespaceOptions{\n\t\tPulse: godless.Pulse,\n\t\tStore: godless.RemoteStore,\n\t\tHeadCache: godless.HeadCache,\n\t\tIndexCache: godless.IndexCache,\n\t\tNamespaceCache: godless.NamespaceCache,\n\t\tKeyStore: godless.KeyStore,\n\t\tIsPublicIndex: godless.PublicServer,\n\t\tMemoryImage: godless.MemoryImage,\n\t}\n\n\tgodless.remote = service.MakeRemoteNamespace(namespaceOptions)\n\treturn nil\n}\n\nfunc (godless *Godless) launchAPI() error {\n\tlimit := godless.APIQueryLimit\n\n\tif limit == 0 {\n\t\tlimit = 1\n\t}\n\n\tqueue := godless.PriorityQueue\n\n\tif queue == nil {\n\t\tqueue = cache.MakeResidentBufferQueue(__UNKNOWN_BUFFER_SIZE)\n\t}\n\n\tapi, errch := service.LaunchKeyValueStore(godless.remote, queue, limit)\n\n\tgodless.addErrors(errch)\n\tgodless.api = api\n\n\treturn nil\n}\n\n\/\/ Serve serves the Godless webservice.\nfunc (godless *Godless) serveWeb() error {\n\taddr := godless.WebServiceAddr\n\n\tif addr == \"\" {\n\t\treturn nil\n\t}\n\n\twebService := &service.WebService{API: godless.api}\n\tstopch, err := http.Serve(addr, webService.Handler())\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgodless.addStopper(stopch)\n\treturn nil\n}\n\n\/\/ Replicate shares data via the IPFS pubsub mechanism.\nfunc (godless *Godless) replicate() error {\n\ttopics := godless.Topics\n\tinterval := godless.ReplicateInterval\n\n\tif len(topics) == 0 {\n\t\treturn nil\n\t}\n\n\tpubsubTopics := make([]api.PubSubTopic, len(topics))\n\n\tfor i, t := range topics {\n\t\tpubsubTopics[i] = api.PubSubTopic(t)\n\t}\n\n\toptions := service.ReplicateOptions{\n\t\tAPI: godless.api,\n\t\tRemoteStore: godless.RemoteStore,\n\t\tInterval: interval,\n\t\tTopics: pubsubTopics,\n\t\tKeyStore: godless.KeyStore,\n\t}\n\tstopch, errch := service.Replicate(options)\n\tgodless.addStopper(stopch)\n\tgodless.addErrors(errch)\n\treturn nil\n}\n\nfunc (godless *Godless) addStopper(stopch chan<- struct{}) {\n\tif godless.stopch == nil {\n\t\tgodless.stopch = make(chan struct{})\n\t\tgo func() {\n\t\t\tgodless.handleShutdown()\n\t\t}()\n\t}\n\n\tgodless.stoppers = append(godless.stoppers, stopch)\n}\n\nfunc (godless *Godless) handleShutdown() {\n\t<-godless.stopch\n\tlog.Info(\"Shutting down\")\n\tfor _, stopper := range godless.stoppers {\n\t\tgo close(stopper)\n\t}\n\n}\n\nfunc (godless *Godless) addErrors(errch <-chan error) {\n\tgodless.errwg.Add(1)\n\n\tif godless.errch == nil {\n\t\tgodless.errch = make(chan error)\n\t\tgo func() {\n\t\t\tgodless.errwg.Wait()\n\t\t\tclose(godless.errch)\n\t\t}()\n\t}\n\n\tgo func() {\n\t\tfor err := range errch {\n\t\t\tgodless.errch <- err\n\t\t}\n\t\tgodless.errwg.Done()\n\t}()\n}\n\n\/\/ Client is a Godless HTTP client.\ntype Client interface {\n\tSendQuery(*query.Query) (api.APIResponse, error)\n\tSendReflection(api.APIReflectionType) (api.APIResponse, error)\n}\n\n\/\/ MakeClient creates a Godless HTTP Client.\nfunc MakeClient(serviceAddr string) Client {\n\treturn service.MakeClient(serviceAddr)\n}\n\nfunc MakeClientWithHttp(serviceAddr string, webClient *gohttp.Client) Client {\n\treturn service.MakeClientWithHttp(serviceAddr, webClient)\n}\n\nfunc MakeKeyStore() api.KeyStore {\n\treturn &crypto.KeyStore{}\n}\n\nfunc breakOnError(pipeline []func() error) error {\n\tfor _, f := range pipeline {\n\t\terr := f()\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ We don't know the right buffer size here, so let the cache package handle it.\nconst __UNKNOWN_BUFFER_SIZE = -1\n<commit_msg>Call godless via root lib<commit_after>\/\/ Godless is a peer-to-peer database running over IPFS.\n\/\/\n\/\/ Godless uses a Consistent Replicated Data Type called a Namespace to share schemaless data with peers.\n\/\/\n\/\/ This package is a facade to Godless internals.\n\/\/\n\/\/ Godless is in alpha, and should be considered experimental software.\npackage godless\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\tgohttp \"net\/http\"\n\n\t\"github.com\/johnny-morrice\/godless\/api\"\n\t\"github.com\/johnny-morrice\/godless\/cache\"\n\t\"github.com\/johnny-morrice\/godless\/crdt\"\n\t\"github.com\/johnny-morrice\/godless\/crypto\"\n\t\"github.com\/johnny-morrice\/godless\/internal\/http\"\n\t\"github.com\/johnny-morrice\/godless\/internal\/ipfs\"\n\t\"github.com\/johnny-morrice\/godless\/internal\/service\"\n\t\"github.com\/johnny-morrice\/godless\/log\"\n\t\"github.com\/johnny-morrice\/godless\/query\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ Godless options.\ntype Options struct {\n\t\/\/ IpfsServiceUrl is required, unless specifying your own DataPeer or RemoteStore.\n\tIpfsServiceUrl string\n\t\/\/ DataPeer is optional. If specified, none of the IPFS options will be used.\n\tDataPeer api.DataPeer\n\t\/\/ RemoteStore is optional. If specified, the DataPeer will not be used, nor any of the IPFS options.\n\tRemoteStore api.RemoteStore\n\t\/\/ KeyStore is required. A private Key store.\n\tKeyStore api.KeyStore\n\t\/\/ MemoryImage is required.\n\tMemoryImage api.MemoryImage\n\t\/\/ WebServiceAddr is optional. If not set, the webservice will be disabled.\n\tWebServiceAddr string\n\t\/\/ IndexHash is optional. Set to load an existing index from IPFS.\n\tIndexHash string\n\t\/\/ FailEarly will cause the godless process to crash if it cannot contact IPFS on startup.\n\tFailEarly bool\n\t\/\/ ReplicateInterval is optional. The duration between peer-to-peer replications.\n\tReplicateInterval time.Duration\n\t\/\/ Pulse is optional. The duration between flushes of the index to IPFS.\n\tPulse time.Duration\n\t\/\/ Topics is optional. Two godless servers which share a topic will replicate indices. An empty topics slice will disable replication.\n\tTopics []string\n\t\/\/ IpfsClient is optional. Specify a HTTP client for IPFS.\n\tIpfsClient *gohttp.Client\n\t\/\/ IpfsPingTimeout is optional. Specify a lower timeout for \"Am I Connected?\" checks.\n\tIpfsPingTimeout time.Duration\n\t\/\/ Cache is optional. Build a 12-factor app by supplying your own remote cache.\n\t\/\/ HeadCache, IndexCache and NamespaceCache can be used to specify different caches for different data types.\n\tCache api.Cache\n\t\/\/ HeadCache is optional. Build a 12-factor app by supplying your own remote cache.\n\tHeadCache api.HeadCache\n\t\/\/ IndexCache is optional. Build a 12-factor app by supplying your own remote cache.\n\tIndexCache api.IndexCache\n\t\/\/ NamespaceCache is optional. Build a 12-factor app by supplying your own remote cache.\n\tNamespaceCache api.NamespaceCache\n\t\/\/ PriorityQueue is optional. Build a 12-factor app by supplying your own remote cache.\n\tPriorityQueue api.RequestPriorityQueue\n\t\/\/ APIQueryLimit is optional. Tune performance by setting the number of simultaneous queries.\n\tAPIQueryLimit int\n\t\/\/ PublicServer is optional. If false, the index will only be updated from peers who are in your public key list.\n\tPublicServer bool\n}\n\n\/\/ Godless is a peer-to-peer database. It shares structured data between peers, using IPFS as a backing store.\n\/\/ The core datastructure is a CRDT namespace which resembles a relational scheme in that it has tables, rows, and entries.\ntype Godless struct {\n\tOptions\n\terrch chan error\n\terrwg sync.WaitGroup\n\tstopch chan struct{}\n\tstoppers []chan<- struct{}\n\tremote api.RemoteNamespace\n\tapi api.APIService\n}\n\n\/\/ New creates a godless instance, connecting to any services, and providing any services, specified in the options.\nfunc New(options Options) (*Godless, error) {\n\tgodless := &Godless{Options: options}\n\n\tmissing := godless.findMissingParameters()\n\n\tif missing != nil {\n\t\treturn nil, missing\n\t}\n\n\tsetupFuncs := []func() error{\n\t\tgodless.connectDataPeer,\n\t\tgodless.connectRemoteStore,\n\t\tgodless.connectCache,\n\t\tgodless.setupNamespace,\n\t\tgodless.launchAPI,\n\t\tgodless.serveWeb,\n\t\tgodless.replicate,\n\t}\n\n\terr := breakOnError(setupFuncs)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgodless.report()\n\n\treturn godless, nil\n}\n\nfunc (godless *Godless) Call(request api.APIRequest) (<-chan api.APIResponse, error) {\n\treturn godless.api.Call(request)\n}\n\nfunc (godless *Godless) report() {\n\tif godless.PublicServer {\n\t\tlog.Info(\"Running public Godless API\")\n\t} else {\n\t\tlog.Info(\"Running private Godless API\")\n\t}\n\n\tprivCount := len(godless.KeyStore.GetAllPrivateKeys())\n\tpubCount := len(godless.KeyStore.GetAllPublicKeys())\n\n\tlog.Info(\"Godless API using %v private and %v public keys\", privCount, pubCount)\n}\n\nfunc (godless *Godless) findMissingParameters() error {\n\tvar missing error\n\n\tif godless.KeyStore == nil {\n\t\tmsg := godless.missingParameterText(\"KeyStore\")\n\t\tmissing = addErrorMessage(missing, msg)\n\t}\n\n\tif godless.MemoryImage == nil {\n\t\tmsg := godless.missingParameterText(\"MemoryImage\")\n\t\tmissing = addErrorMessage(missing, msg)\n\t}\n\n\treturn missing\n}\n\nfunc addErrorMessage(err error, msg string) error {\n\tif err == nil {\n\t\treturn errors.New(msg)\n\t} else {\n\t\treturn errors.Wrap(err, msg)\n\t}\n}\n\nfunc (godless *Godless) missingParameterText(param string) string {\n\treturn fmt.Sprintf(\"Missing required parameter '%v'\", param)\n}\n\n\/\/ Errors provides a stream of errors from godless. Godless will attempt to handle any errors it can. Any errors received here indicate that bad things have happened.\nfunc (godless *Godless) Errors() <-chan error {\n\treturn godless.errch\n}\n\n\/\/ Shutdown stops all godless processes. It does not wait for those goroutines to stop.\nfunc (godless *Godless) Shutdown() {\n\tgodless.stopch <- struct{}{}\n}\n\nfunc (godless *Godless) connectDataPeer() error {\n\tif godless.RemoteStore != nil {\n\t\treturn nil\n\t}\n\n\tif godless.DataPeer == nil {\n\t\tif godless.IpfsServiceUrl == \"\" {\n\t\t\tmsg := godless.missingParameterText(\"IpfsServiceUrl\")\n\t\t\treturn errors.New(msg)\n\t\t}\n\n\t\tpeer := &ipfs.WebServiceClient{\n\t\t\tUrl: godless.IpfsServiceUrl,\n\t\t\tPingTimeout: godless.IpfsPingTimeout,\n\t\t\tHttp: godless.IpfsClient,\n\t\t}\n\n\t\tgodless.DataPeer = peer\n\t}\n\n\treturn nil\n}\n\nfunc (godless *Godless) connectRemoteStore() error {\n\tif godless.RemoteStore == nil {\n\t\tipfs := &ipfs.IpfsRemoteStore{\n\t\t\tShell: godless.DataPeer,\n\t\t}\n\n\t\tif godless.FailEarly {\n\t\t\terr := ipfs.Connect()\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tgodless.RemoteStore = ipfs\n\t}\n\n\treturn nil\n}\n\nfunc (godless *Godless) makeCacheStopper(cache api.Cache) chan<- struct{} {\n\tstopper := make(chan struct{}, 1)\n\tgo func() {\n\t\t<-stopper\n\t\terr := cache.CloseCache()\n\t\tif err == nil {\n\t\t\tlog.Info(\"Closed cache\")\n\t\t} else {\n\t\t\tlog.Error(\"Error closing cache: %v\", err.Error())\n\t\t}\n\t}()\n\n\treturn stopper\n}\n\nfunc (godless *Godless) connectCache() error {\n\tif godless.Cache != nil {\n\t\tgodless.HeadCache = godless.Cache\n\t\tgodless.IndexCache = godless.Cache\n\t\tgodless.NamespaceCache = godless.Cache\n\t\tgodless.addStopper(godless.makeCacheStopper(godless.Cache))\n\t\treturn nil\n\t}\n\n\tif godless.HeadCache == nil {\n\t\tgodless.HeadCache = cache.MakeResidentHeadCache()\n\t}\n\n\tif godless.IndexCache == nil {\n\t\tgodless.IndexCache = cache.MakeResidentIndexCache(__UNKNOWN_BUFFER_SIZE)\n\t}\n\n\tif godless.NamespaceCache == nil {\n\t\tgodless.NamespaceCache = cache.MakeResidentNamespaceCache(__UNKNOWN_BUFFER_SIZE)\n\t}\n\n\treturn nil\n}\n\nfunc (godless *Godless) setupNamespace() error {\n\tif godless.IndexHash != \"\" {\n\t\thead := crdt.IPFSPath(godless.IndexHash)\n\n\t\terr := godless.HeadCache.SetHead(head)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tnamespaceOptions := service.RemoteNamespaceOptions{\n\t\tPulse: godless.Pulse,\n\t\tStore: godless.RemoteStore,\n\t\tHeadCache: godless.HeadCache,\n\t\tIndexCache: godless.IndexCache,\n\t\tNamespaceCache: godless.NamespaceCache,\n\t\tKeyStore: godless.KeyStore,\n\t\tIsPublicIndex: godless.PublicServer,\n\t\tMemoryImage: godless.MemoryImage,\n\t}\n\n\tgodless.remote = service.MakeRemoteNamespace(namespaceOptions)\n\treturn nil\n}\n\nfunc (godless *Godless) launchAPI() error {\n\tlimit := godless.APIQueryLimit\n\n\tif limit == 0 {\n\t\tlimit = 1\n\t}\n\n\tqueue := godless.PriorityQueue\n\n\tif queue == nil {\n\t\tqueue = cache.MakeResidentBufferQueue(__UNKNOWN_BUFFER_SIZE)\n\t}\n\n\tapi, errch := service.LaunchKeyValueStore(godless.remote, queue, limit)\n\n\tgodless.addErrors(errch)\n\tgodless.api = api\n\n\treturn nil\n}\n\n\/\/ Serve serves the Godless webservice.\nfunc (godless *Godless) serveWeb() error {\n\taddr := godless.WebServiceAddr\n\n\tif addr == \"\" {\n\t\treturn nil\n\t}\n\n\twebService := &service.WebService{API: godless.api}\n\tstopch, err := http.Serve(addr, webService.Handler())\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tgodless.addStopper(stopch)\n\treturn nil\n}\n\n\/\/ Replicate shares data via the IPFS pubsub mechanism.\nfunc (godless *Godless) replicate() error {\n\ttopics := godless.Topics\n\tinterval := godless.ReplicateInterval\n\n\tif len(topics) == 0 {\n\t\treturn nil\n\t}\n\n\tpubsubTopics := make([]api.PubSubTopic, len(topics))\n\n\tfor i, t := range topics {\n\t\tpubsubTopics[i] = api.PubSubTopic(t)\n\t}\n\n\toptions := service.ReplicateOptions{\n\t\tAPI: godless.api,\n\t\tRemoteStore: godless.RemoteStore,\n\t\tInterval: interval,\n\t\tTopics: pubsubTopics,\n\t\tKeyStore: godless.KeyStore,\n\t}\n\tstopch, errch := service.Replicate(options)\n\tgodless.addStopper(stopch)\n\tgodless.addErrors(errch)\n\treturn nil\n}\n\nfunc (godless *Godless) addStopper(stopch chan<- struct{}) {\n\tif godless.stopch == nil {\n\t\tgodless.stopch = make(chan struct{})\n\t\tgo func() {\n\t\t\tgodless.handleShutdown()\n\t\t}()\n\t}\n\n\tgodless.stoppers = append(godless.stoppers, stopch)\n}\n\nfunc (godless *Godless) handleShutdown() {\n\t<-godless.stopch\n\tlog.Info(\"Shutting down\")\n\tfor _, stopper := range godless.stoppers {\n\t\tgo close(stopper)\n\t}\n\n}\n\nfunc (godless *Godless) addErrors(errch <-chan error) {\n\tgodless.errwg.Add(1)\n\n\tif godless.errch == nil {\n\t\tgodless.errch = make(chan error)\n\t\tgo func() {\n\t\t\tgodless.errwg.Wait()\n\t\t\tclose(godless.errch)\n\t\t}()\n\t}\n\n\tgo func() {\n\t\tfor err := range errch {\n\t\t\tgodless.errch <- err\n\t\t}\n\t\tgodless.errwg.Done()\n\t}()\n}\n\n\/\/ Client is a Godless HTTP client.\ntype Client interface {\n\tSendQuery(*query.Query) (api.APIResponse, error)\n\tSendReflection(api.APIReflectionType) (api.APIResponse, error)\n}\n\n\/\/ MakeClient creates a Godless HTTP Client.\nfunc MakeClient(serviceAddr string) Client {\n\treturn service.MakeClient(serviceAddr)\n}\n\nfunc MakeClientWithHttp(serviceAddr string, webClient *gohttp.Client) Client {\n\treturn service.MakeClientWithHttp(serviceAddr, webClient)\n}\n\nfunc MakeKeyStore() api.KeyStore {\n\treturn &crypto.KeyStore{}\n}\n\nfunc breakOnError(pipeline []func() error) error {\n\tfor _, f := range pipeline {\n\t\terr := f()\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ We don't know the right buffer size here, so let the cache package handle it.\nconst __UNKNOWN_BUFFER_SIZE = -1\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 Anapaya Systems\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package launcher includes the shared application execution boilerplate of all\n\/\/ SCION servers.\npackage launcher\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"syscall\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/viper\"\n\n\t\"github.com\/scionproto\/scion\/go\/lib\/config\"\n\tlibconfig \"github.com\/scionproto\/scion\/go\/lib\/config\"\n\t\"github.com\/scionproto\/scion\/go\/lib\/env\"\n\t\"github.com\/scionproto\/scion\/go\/lib\/fatal\"\n\t\"github.com\/scionproto\/scion\/go\/lib\/log\"\n\t\"github.com\/scionproto\/scion\/go\/lib\/metrics\"\n\t\"github.com\/scionproto\/scion\/go\/lib\/prom\"\n\t\"github.com\/scionproto\/scion\/go\/lib\/serrors\"\n\t\"github.com\/scionproto\/scion\/go\/pkg\/command\"\n)\n\n\/\/ Configuration keys used by the launcher\nconst (\n\tcfgLogConsoleLevel = \"log.console.level\"\n\tcfgLogConsoleFormat = \"log.console.format\"\n\tcfgLogConsoleStacktraceLevel = \"log.console.stacktrace_level\"\n\tcfgGeneralID = \"general.id\"\n\tcfgConfigFile = \"config\"\n)\n\n\/\/ Application models a SCION server application.\ntype Application struct {\n\t\/\/ TOMLConfig holds the Go data structure for the application-specific\n\t\/\/ TOML configuration. The Application launcher will check if the TOMLConfig\n\t\/\/ supports additional methods (e.g., custom logging or instance ID) and\n\t\/\/ extract them from the config if that is the case. See the XxxConfig interfaces\n\t\/\/ in this package for more information.\n\tTOMLConfig libconfig.Config\n\n\t\/\/ Samplers contains additional configuration samplers to be included\n\t\/\/ under the sample subcommand. If empty, no additional samplers are\n\t\/\/ listed.\n\t\/\/\n\t\/\/ DEPRECATED. This field will be removed once Anapaya\/scion#5000 is implemented.\n\tSamplers []func(command.Pather) *cobra.Command\n\n\t\/\/ ShortName is the short name of the application. If empty, the executable name is used.\n\t\/\/ The ShortName could be, for example, \"SCION Daemon\" for the SCION Daemon.\n\tShortName string\n\n\t\/\/ Main is the custom logic of the application. If nil, no custom logic is executed\n\t\/\/ (and only the setup\/teardown harness runs). If Main returns an error, the\n\t\/\/ Run method will return a non-zero exit code.\n\tMain func() error\n\n\t\/\/ ErrorWriter specifies where error output should be printed. If nil, os.Stderr is used.\n\tErrorWriter io.Writer\n\n\t\/\/ config contains the Viper configuration KV store.\n\tconfig *viper.Viper\n}\n\n\/\/ Run sets up the common SCION server harness, and then passes control to the Main\n\/\/ function (if one exists).\n\/\/\n\/\/ Run uses the following globals:\n\/\/ os.Args\n\/\/\n\/\/ Run will exit the application if it encounters a fatal error.\nfunc (a *Application) Run() {\n\tif err := a.run(); err != nil {\n\t\tfmt.Fprintf(a.getErrorWriter(), \"fatal error: %v\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc (a *Application) run() error {\n\texecutable := filepath.Base(os.Args[0])\n\tshortName := a.getShortName(executable)\n\n\tcmd := newCommandTemplate(executable, shortName, a.TOMLConfig, a.Samplers...)\n\tcmd.RunE = func(cmd *cobra.Command, args []string) error {\n\t\treturn a.executeCommand(shortName)\n\t}\n\ta.config = viper.New()\n\ta.config.SetDefault(cfgLogConsoleLevel, log.DefaultConsoleLevel)\n\ta.config.SetDefault(cfgLogConsoleFormat, \"human\")\n\ta.config.SetDefault(cfgLogConsoleStacktraceLevel, log.DefaultStacktraceLevel)\n\ta.config.SetDefault(cfgGeneralID, executable)\n\t\/\/ The configuration file location is specified through command-line flags.\n\t\/\/ Once the comand-line flags are parsed, we register the location of the\n\t\/\/ config file with the viper config.\n\ta.config.BindPFlag(cfgConfigFile, cmd.Flags().Lookup(cfgConfigFile))\n\n\t\/\/ All servers accept SIGTERM to perform clean shutdown (for example, this\n\t\/\/ is used behind the scenes by docker stop to cleanly shut down a container).\n\tsigterm := make(chan os.Signal)\n\tsignal.Notify(sigterm, syscall.SIGTERM)\n\tgo func() {\n\t\tdefer log.HandlePanic()\n\t\t<-sigterm\n\t\tlog.Info(\"Received SIGTERM signal, exiting...\")\n\t\t\/\/ FIXME(scrye): Use context.Context and clean context propagation to\n\t\t\/\/ server modules instead of a global cancelation signal.\n\t\tfatal.Shutdown(env.ShutdownGraceInterval)\n\t}()\n\n\treturn cmd.Execute()\n}\n\nfunc (a *Application) getShortName(executable string) string {\n\tif a.ShortName != \"\" {\n\t\treturn a.ShortName\n\t}\n\treturn executable\n}\n\nfunc (a *Application) executeCommand(shortName string) error {\n\tos.Setenv(\"TZ\", \"UTC\")\n\tfatal.Init()\n\n\t\/\/ Load launcher configurations from the same config file as the custom\n\t\/\/ application configuration.\n\ta.config.SetConfigType(\"toml\")\n\ta.config.SetConfigFile(a.config.GetString(cfgConfigFile))\n\tif err := a.config.ReadInConfig(); err != nil {\n\t\treturn serrors.WrapStr(\"loading generic server config from file\", err,\n\t\t\t\"file\", a.config.GetString(cfgConfigFile))\n\t}\n\n\tif err := libconfig.LoadFile(a.config.GetString(cfgConfigFile), a.TOMLConfig); err != nil {\n\t\treturn serrors.WrapStr(\"loading config from file\", err,\n\t\t\t\"file\", a.config.GetString(cfgConfigFile))\n\t}\n\ta.TOMLConfig.InitDefaults()\n\n\tlogEntriesTotal := metrics.NewPromCounterFrom(\n\t\tprometheus.CounterOpts{\n\t\t\tName: \"lib_log_emitted_entries_total\",\n\t\t\tHelp: \"Total number of log entries emitted.\",\n\t\t},\n\t\t[]string{\"level\"},\n\t)\n\topt := log.WithEntriesCounter(log.EntriesCounter{\n\t\tDebug: logEntriesTotal.With(\"level\", \"debug\"),\n\t\tInfo: logEntriesTotal.With(\"level\", \"info\"),\n\t\tError: logEntriesTotal.With(\"level\", \"error\"),\n\t})\n\n\tif err := log.Setup(a.getLogging(), opt); err != nil {\n\t\treturn serrors.WrapStr(\"initialize logging\", err)\n\t}\n\tdefer log.Flush()\n\tif err := env.LogAppStarted(shortName, a.config.GetString(cfgGeneralID)); err != nil {\n\t\treturn err\n\t}\n\tdefer env.LogAppStopped(shortName, a.config.GetString(cfgGeneralID))\n\tdefer log.HandlePanic()\n\n\tprom.ExportElementID(a.config.GetString(cfgGeneralID))\n\tif err := a.TOMLConfig.Validate(); err != nil {\n\t\treturn serrors.WrapStr(\"validate config\", err)\n\t}\n\n\tif a.Main == nil {\n\t\treturn nil\n\t}\n\treturn a.Main()\n}\n\nfunc (a *Application) getLogging() log.Config {\n\treturn log.Config{\n\t\tConsole: log.ConsoleConfig{\n\t\t\tLevel: a.config.GetString(cfgLogConsoleLevel),\n\t\t\tFormat: a.config.GetString(cfgLogConsoleFormat),\n\t\t\tStacktraceLevel: a.config.GetString(cfgLogConsoleStacktraceLevel),\n\t\t},\n\t}\n}\n\nfunc (a *Application) getErrorWriter() io.Writer {\n\tif a.ErrorWriter != nil {\n\t\treturn a.ErrorWriter\n\t}\n\treturn os.Stderr\n}\n\n\/\/ LoggingConfig is implemented by configurations that define logging behavior.\n\/\/ If a application configuration does not implement this interface, then a\n\/\/ default logging configuration is used.\ntype LoggingConfig interface {\n\t\/\/ Logging returns the logging configuration. The Get prefix is used to\n\t\/\/ avoid collisions with data members named Logging.\n\tGetLogging() log.Config\n}\n\n\/\/ IDConfig is implemented by configurations that define a SCION instance ID.\n\/\/ If an application configuration does not implement this interface, then the\n\/\/ SCION instance ID is equal to the application binary name.\ntype IDConfig interface {\n\t\/\/ ID returns the SCION instance ID of the application. The Get prefix is\n\t\/\/ used to avoid collisions with data members named ID.\n\tGetID() string\n}\n\n\/\/ newCommandTemplate returns a cobra command template for a SCION server application.\nfunc newCommandTemplate(executable string, shortName string, config config.Sampler,\n\tsamplers ...func(command.Pather) *cobra.Command) *cobra.Command {\n\n\tcmd := &cobra.Command{\n\t\tUse: executable,\n\t\tShort: shortName,\n\t\tExample: fmt.Sprintf(\" %s --config %s\", executable, \"config.toml\"),\n\t\tSilenceErrors: true,\n\t\tSilenceUsage: true,\n\t\tArgs: cobra.NoArgs,\n\t}\n\tcmd.AddCommand(\n\t\tcommand.NewCompletion(cmd),\n\t\tcommand.NewSample(\n\t\t\tcmd,\n\t\t\tappend(samplers, command.NewSampleConfig(config))...,\n\t\t),\n\t\tcommand.NewVersion(cmd),\n\t)\n\tcmd.Flags().String(cfgConfigFile, \"\", \"Configuration file (required)\")\n\tcmd.MarkFlagRequired(cfgConfigFile)\n\treturn cmd\n}\n<commit_msg>app: add newline on fatal error<commit_after>\/\/ Copyright 2020 Anapaya Systems\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package launcher includes the shared application execution boilerplate of all\n\/\/ SCION servers.\npackage launcher\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"syscall\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/viper\"\n\n\t\"github.com\/scionproto\/scion\/go\/lib\/config\"\n\tlibconfig \"github.com\/scionproto\/scion\/go\/lib\/config\"\n\t\"github.com\/scionproto\/scion\/go\/lib\/env\"\n\t\"github.com\/scionproto\/scion\/go\/lib\/fatal\"\n\t\"github.com\/scionproto\/scion\/go\/lib\/log\"\n\t\"github.com\/scionproto\/scion\/go\/lib\/metrics\"\n\t\"github.com\/scionproto\/scion\/go\/lib\/prom\"\n\t\"github.com\/scionproto\/scion\/go\/lib\/serrors\"\n\t\"github.com\/scionproto\/scion\/go\/pkg\/command\"\n)\n\n\/\/ Configuration keys used by the launcher\nconst (\n\tcfgLogConsoleLevel = \"log.console.level\"\n\tcfgLogConsoleFormat = \"log.console.format\"\n\tcfgLogConsoleStacktraceLevel = \"log.console.stacktrace_level\"\n\tcfgGeneralID = \"general.id\"\n\tcfgConfigFile = \"config\"\n)\n\n\/\/ Application models a SCION server application.\ntype Application struct {\n\t\/\/ TOMLConfig holds the Go data structure for the application-specific\n\t\/\/ TOML configuration. The Application launcher will check if the TOMLConfig\n\t\/\/ supports additional methods (e.g., custom logging or instance ID) and\n\t\/\/ extract them from the config if that is the case. See the XxxConfig interfaces\n\t\/\/ in this package for more information.\n\tTOMLConfig libconfig.Config\n\n\t\/\/ Samplers contains additional configuration samplers to be included\n\t\/\/ under the sample subcommand. If empty, no additional samplers are\n\t\/\/ listed.\n\t\/\/\n\t\/\/ DEPRECATED. This field will be removed once Anapaya\/scion#5000 is implemented.\n\tSamplers []func(command.Pather) *cobra.Command\n\n\t\/\/ ShortName is the short name of the application. If empty, the executable name is used.\n\t\/\/ The ShortName could be, for example, \"SCION Daemon\" for the SCION Daemon.\n\tShortName string\n\n\t\/\/ Main is the custom logic of the application. If nil, no custom logic is executed\n\t\/\/ (and only the setup\/teardown harness runs). If Main returns an error, the\n\t\/\/ Run method will return a non-zero exit code.\n\tMain func() error\n\n\t\/\/ ErrorWriter specifies where error output should be printed. If nil, os.Stderr is used.\n\tErrorWriter io.Writer\n\n\t\/\/ config contains the Viper configuration KV store.\n\tconfig *viper.Viper\n}\n\n\/\/ Run sets up the common SCION server harness, and then passes control to the Main\n\/\/ function (if one exists).\n\/\/\n\/\/ Run uses the following globals:\n\/\/ os.Args\n\/\/\n\/\/ Run will exit the application if it encounters a fatal error.\nfunc (a *Application) Run() {\n\tif err := a.run(); err != nil {\n\t\tfmt.Fprintf(a.getErrorWriter(), \"fatal error: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc (a *Application) run() error {\n\texecutable := filepath.Base(os.Args[0])\n\tshortName := a.getShortName(executable)\n\n\tcmd := newCommandTemplate(executable, shortName, a.TOMLConfig, a.Samplers...)\n\tcmd.RunE = func(cmd *cobra.Command, args []string) error {\n\t\treturn a.executeCommand(shortName)\n\t}\n\ta.config = viper.New()\n\ta.config.SetDefault(cfgLogConsoleLevel, log.DefaultConsoleLevel)\n\ta.config.SetDefault(cfgLogConsoleFormat, \"human\")\n\ta.config.SetDefault(cfgLogConsoleStacktraceLevel, log.DefaultStacktraceLevel)\n\ta.config.SetDefault(cfgGeneralID, executable)\n\t\/\/ The configuration file location is specified through command-line flags.\n\t\/\/ Once the comand-line flags are parsed, we register the location of the\n\t\/\/ config file with the viper config.\n\ta.config.BindPFlag(cfgConfigFile, cmd.Flags().Lookup(cfgConfigFile))\n\n\t\/\/ All servers accept SIGTERM to perform clean shutdown (for example, this\n\t\/\/ is used behind the scenes by docker stop to cleanly shut down a container).\n\tsigterm := make(chan os.Signal)\n\tsignal.Notify(sigterm, syscall.SIGTERM)\n\tgo func() {\n\t\tdefer log.HandlePanic()\n\t\t<-sigterm\n\t\tlog.Info(\"Received SIGTERM signal, exiting...\")\n\t\t\/\/ FIXME(scrye): Use context.Context and clean context propagation to\n\t\t\/\/ server modules instead of a global cancelation signal.\n\t\tfatal.Shutdown(env.ShutdownGraceInterval)\n\t}()\n\n\treturn cmd.Execute()\n}\n\nfunc (a *Application) getShortName(executable string) string {\n\tif a.ShortName != \"\" {\n\t\treturn a.ShortName\n\t}\n\treturn executable\n}\n\nfunc (a *Application) executeCommand(shortName string) error {\n\tos.Setenv(\"TZ\", \"UTC\")\n\tfatal.Init()\n\n\t\/\/ Load launcher configurations from the same config file as the custom\n\t\/\/ application configuration.\n\ta.config.SetConfigType(\"toml\")\n\ta.config.SetConfigFile(a.config.GetString(cfgConfigFile))\n\tif err := a.config.ReadInConfig(); err != nil {\n\t\treturn serrors.WrapStr(\"loading generic server config from file\", err,\n\t\t\t\"file\", a.config.GetString(cfgConfigFile))\n\t}\n\n\tif err := libconfig.LoadFile(a.config.GetString(cfgConfigFile), a.TOMLConfig); err != nil {\n\t\treturn serrors.WrapStr(\"loading config from file\", err,\n\t\t\t\"file\", a.config.GetString(cfgConfigFile))\n\t}\n\ta.TOMLConfig.InitDefaults()\n\n\tlogEntriesTotal := metrics.NewPromCounterFrom(\n\t\tprometheus.CounterOpts{\n\t\t\tName: \"lib_log_emitted_entries_total\",\n\t\t\tHelp: \"Total number of log entries emitted.\",\n\t\t},\n\t\t[]string{\"level\"},\n\t)\n\topt := log.WithEntriesCounter(log.EntriesCounter{\n\t\tDebug: logEntriesTotal.With(\"level\", \"debug\"),\n\t\tInfo: logEntriesTotal.With(\"level\", \"info\"),\n\t\tError: logEntriesTotal.With(\"level\", \"error\"),\n\t})\n\n\tif err := log.Setup(a.getLogging(), opt); err != nil {\n\t\treturn serrors.WrapStr(\"initialize logging\", err)\n\t}\n\tdefer log.Flush()\n\tif err := env.LogAppStarted(shortName, a.config.GetString(cfgGeneralID)); err != nil {\n\t\treturn err\n\t}\n\tdefer env.LogAppStopped(shortName, a.config.GetString(cfgGeneralID))\n\tdefer log.HandlePanic()\n\n\tprom.ExportElementID(a.config.GetString(cfgGeneralID))\n\tif err := a.TOMLConfig.Validate(); err != nil {\n\t\treturn serrors.WrapStr(\"validate config\", err)\n\t}\n\n\tif a.Main == nil {\n\t\treturn nil\n\t}\n\treturn a.Main()\n}\n\nfunc (a *Application) getLogging() log.Config {\n\treturn log.Config{\n\t\tConsole: log.ConsoleConfig{\n\t\t\tLevel: a.config.GetString(cfgLogConsoleLevel),\n\t\t\tFormat: a.config.GetString(cfgLogConsoleFormat),\n\t\t\tStacktraceLevel: a.config.GetString(cfgLogConsoleStacktraceLevel),\n\t\t},\n\t}\n}\n\nfunc (a *Application) getErrorWriter() io.Writer {\n\tif a.ErrorWriter != nil {\n\t\treturn a.ErrorWriter\n\t}\n\treturn os.Stderr\n}\n\n\/\/ LoggingConfig is implemented by configurations that define logging behavior.\n\/\/ If a application configuration does not implement this interface, then a\n\/\/ default logging configuration is used.\ntype LoggingConfig interface {\n\t\/\/ Logging returns the logging configuration. The Get prefix is used to\n\t\/\/ avoid collisions with data members named Logging.\n\tGetLogging() log.Config\n}\n\n\/\/ IDConfig is implemented by configurations that define a SCION instance ID.\n\/\/ If an application configuration does not implement this interface, then the\n\/\/ SCION instance ID is equal to the application binary name.\ntype IDConfig interface {\n\t\/\/ ID returns the SCION instance ID of the application. The Get prefix is\n\t\/\/ used to avoid collisions with data members named ID.\n\tGetID() string\n}\n\n\/\/ newCommandTemplate returns a cobra command template for a SCION server application.\nfunc newCommandTemplate(executable string, shortName string, config config.Sampler,\n\tsamplers ...func(command.Pather) *cobra.Command) *cobra.Command {\n\n\tcmd := &cobra.Command{\n\t\tUse: executable,\n\t\tShort: shortName,\n\t\tExample: fmt.Sprintf(\" %s --config %s\", executable, \"config.toml\"),\n\t\tSilenceErrors: true,\n\t\tSilenceUsage: true,\n\t\tArgs: cobra.NoArgs,\n\t}\n\tcmd.AddCommand(\n\t\tcommand.NewCompletion(cmd),\n\t\tcommand.NewSample(\n\t\t\tcmd,\n\t\t\tappend(samplers, command.NewSampleConfig(config))...,\n\t\t),\n\t\tcommand.NewVersion(cmd),\n\t)\n\tcmd.Flags().String(cfgConfigFile, \"\", \"Configuration file (required)\")\n\tcmd.MarkFlagRequired(cfgConfigFile)\n\treturn cmd\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012, Google Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage binlog\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\tlog \"github.com\/golang\/glog\"\n\tmproto \"github.com\/youtube\/vitess\/go\/mysql\/proto\"\n\t\"github.com\/youtube\/vitess\/go\/stats\"\n\t\"github.com\/youtube\/vitess\/go\/sync2\"\n\t\"github.com\/youtube\/vitess\/go\/tb\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/binlog\/proto\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/key\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/mysqlctl\"\n\tmyproto \"github.com\/youtube\/vitess\/go\/vt\/mysqlctl\/proto\"\n\n\tpb \"github.com\/youtube\/vitess\/go\/vt\/proto\/topodata\"\n)\n\n\/* API and config for UpdateStream Service *\/\n\nconst (\n\tusDisabled int64 = iota\n\tusEnabled\n)\n\nvar usStateNames = map[int64]string{\n\tusEnabled: \"Enabled\",\n\tusDisabled: \"Disabled\",\n}\n\nvar (\n\tstreamCount = stats.NewCounters(\"UpdateStreamStreamCount\")\n\tupdateStreamErrors = stats.NewCounters(\"UpdateStreamErrors\")\n\tupdateStreamEvents = stats.NewCounters(\"UpdateStreamEvents\")\n\tkeyrangeStatements = stats.NewInt(\"UpdateStreamKeyRangeStatements\")\n\tkeyrangeTransactions = stats.NewInt(\"UpdateStreamKeyRangeTransactions\")\n\ttablesStatements = stats.NewInt(\"UpdateStreamTablesStatements\")\n\ttablesTransactions = stats.NewInt(\"UpdateStreamTablesTransactions\")\n)\n\n\/\/ UpdateStreamControl is the interface an UpdateStream service implements\n\/\/ to bring it up or down.\ntype UpdateStreamControl interface {\n\t\/\/ Enable will allow any new RPC calls\n\tEnable()\n\n\t\/\/ Disable will interrupt all current calls, and disallow any new call\n\tDisable()\n\n\t\/\/ IsEnabled returns true iff the service is enabled\n\tIsEnabled() bool\n}\n\n\/\/ UpdateStreamControlMock is an implementation of UpdateStreamControl\n\/\/ to be used in tests\ntype UpdateStreamControlMock struct {\n\tenabled bool\n}\n\n\/\/ NewUpdateStreamControlMock creates a new UpdateStreamControlMock\nfunc NewUpdateStreamControlMock() *UpdateStreamControlMock {\n\treturn &UpdateStreamControlMock{}\n}\n\n\/\/ Enable is part of UpdateStreamControl\nfunc (m *UpdateStreamControlMock) Enable() {\n\tm.enabled = true\n}\n\n\/\/ Disable is part of UpdateStreamControl\nfunc (m *UpdateStreamControlMock) Disable() {\n\tm.enabled = false\n}\n\n\/\/ IsEnabled is part of UpdateStreamControl\nfunc (m *UpdateStreamControlMock) IsEnabled() bool {\n\treturn m.enabled\n}\n\n\/\/ UpdateStream is the real implementation of proto.UpdateStream\n\/\/ and UpdateStreamControl\ntype UpdateStream struct {\n\t\/\/ the following variables are set at construction time\n\n\tmysqld mysqlctl.MysqlDaemon\n\tdbname string\n\n\t\/\/ actionLock protects the following variables\n\tactionLock sync.Mutex\n\tstate sync2.AtomicInt64\n\tstateWaitGroup sync.WaitGroup\n\tstreams streamList\n}\n\ntype streamList struct {\n\tsync.Mutex\n\tstreams map[*sync2.ServiceManager]bool\n}\n\nfunc (sl *streamList) Init() {\n\tsl.Lock()\n\tsl.streams = make(map[*sync2.ServiceManager]bool)\n\tsl.Unlock()\n}\n\nfunc (sl *streamList) Add(e *sync2.ServiceManager) {\n\tsl.Lock()\n\tsl.streams[e] = true\n\tsl.Unlock()\n}\n\nfunc (sl *streamList) Delete(e *sync2.ServiceManager) {\n\tsl.Lock()\n\tdelete(sl.streams, e)\n\tsl.Unlock()\n}\n\nfunc (sl *streamList) Stop() {\n\tsl.Lock()\n\tfor stream := range sl.streams {\n\t\tstream.Stop()\n\t}\n\tsl.Unlock()\n}\n\n\/\/ RegisterUpdateStreamServiceFunc is the type to use for delayed\n\/\/ registration of RPC servers until we have all the objects\ntype RegisterUpdateStreamServiceFunc func(proto.UpdateStream)\n\n\/\/ RegisterUpdateStreamServices is the list of all registration\n\/\/ callbacks to invoke\nvar RegisterUpdateStreamServices []RegisterUpdateStreamServiceFunc\n\n\/\/ NewUpdateStream returns a new UpdateStream object\nfunc NewUpdateStream(mysqld mysqlctl.MysqlDaemon, dbname string) *UpdateStream {\n\treturn &UpdateStream{\n\t\tmysqld: mysqld,\n\t\tdbname: dbname,\n\t}\n}\n\n\/\/ RegisterService needs to be called to publish stats, and to start listening\n\/\/ to clients. Only once instance can call this in a process.\nfunc (updateStream *UpdateStream) RegisterService() {\n\t\/\/ publish the stats\n\tstats.Publish(\"UpdateStreamState\", stats.StringFunc(func() string {\n\t\treturn usStateNames[updateStream.state.Get()]\n\t}))\n\n\t\/\/ and register all the RPC protocols\n\tfor _, f := range RegisterUpdateStreamServices {\n\t\tf(updateStream)\n\t}\n}\n\nfunc logError() {\n\tif x := recover(); x != nil {\n\t\tlog.Errorf(\"%s at\\n%s\", x.(error).Error(), tb.Stack(4))\n\t}\n}\n\n\/\/ Enable will allow connections to the service\nfunc (updateStream *UpdateStream) Enable() {\n\tdefer logError()\n\tupdateStream.actionLock.Lock()\n\tdefer updateStream.actionLock.Unlock()\n\tif updateStream.IsEnabled() {\n\t\treturn\n\t}\n\n\tupdateStream.state.Set(usEnabled)\n\tupdateStream.streams.Init()\n\tlog.Infof(\"Enabling update stream, dbname: %s\", updateStream.dbname)\n}\n\n\/\/ Disable will disallow any connection to the service\nfunc (updateStream *UpdateStream) Disable() {\n\tdefer logError()\n\tupdateStream.actionLock.Lock()\n\tdefer updateStream.actionLock.Unlock()\n\tif !updateStream.IsEnabled() {\n\t\treturn\n\t}\n\n\tupdateStream.state.Set(usDisabled)\n\tupdateStream.streams.Stop()\n\tupdateStream.stateWaitGroup.Wait()\n\tlog.Infof(\"Update Stream Disabled\")\n}\n\n\/\/ IsEnabled returns true if UpdateStream is enabled\nfunc (updateStream *UpdateStream) IsEnabled() bool {\n\treturn updateStream.state.Get() == usEnabled\n}\n\n\/\/ ServeUpdateStream is part of the proto.UpdateStream interface\nfunc (updateStream *UpdateStream) ServeUpdateStream(position string, sendReply func(reply *proto.StreamEvent) error) (err error) {\n\tpos, err := myproto.DecodeReplicationPosition(position)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tupdateStream.actionLock.Lock()\n\tif !updateStream.IsEnabled() {\n\t\tupdateStream.actionLock.Unlock()\n\t\tlog.Errorf(\"Unable to serve client request: update stream service is not enabled\")\n\t\treturn fmt.Errorf(\"update stream service is not enabled\")\n\t}\n\tupdateStream.stateWaitGroup.Add(1)\n\tupdateStream.actionLock.Unlock()\n\tdefer updateStream.stateWaitGroup.Done()\n\n\tstreamCount.Add(\"Updates\", 1)\n\tdefer streamCount.Add(\"Updates\", -1)\n\tlog.Infof(\"ServeUpdateStream starting @ %#v\", pos)\n\n\tevs := NewEventStreamer(updateStream.dbname, updateStream.mysqld, pos, func(reply *proto.StreamEvent) error {\n\t\tif reply.Category == \"ERR\" {\n\t\t\tupdateStreamErrors.Add(\"UpdateStream\", 1)\n\t\t} else {\n\t\t\tupdateStreamEvents.Add(reply.Category, 1)\n\t\t}\n\t\treturn sendReply(reply)\n\t})\n\n\tsvm := &sync2.ServiceManager{}\n\tsvm.Go(evs.Stream)\n\tupdateStream.streams.Add(svm)\n\tdefer updateStream.streams.Delete(svm)\n\treturn svm.Join()\n}\n\n\/\/ StreamKeyRange is part of the proto.UpdateStream interface\nfunc (updateStream *UpdateStream) StreamKeyRange(position string, keyspaceIDType key.KeyspaceIdType, keyRange *pb.KeyRange, charset *mproto.Charset, sendReply func(reply *proto.BinlogTransaction) error) (err error) {\n\tpos, err := myproto.DecodeReplicationPosition(position)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tupdateStream.actionLock.Lock()\n\tif !updateStream.IsEnabled() {\n\t\tupdateStream.actionLock.Unlock()\n\t\tlog.Errorf(\"Unable to serve client request: Update stream service is not enabled\")\n\t\treturn fmt.Errorf(\"update stream service is not enabled\")\n\t}\n\tupdateStream.stateWaitGroup.Add(1)\n\tupdateStream.actionLock.Unlock()\n\tdefer updateStream.stateWaitGroup.Done()\n\n\tstreamCount.Add(\"KeyRange\", 1)\n\tdefer streamCount.Add(\"KeyRange\", -1)\n\tlog.Infof(\"ServeUpdateStream starting @ %#v\", pos)\n\n\t\/\/ Calls cascade like this: BinlogStreamer->KeyRangeFilterFunc->func(*proto.BinlogTransaction)->sendReply\n\tf := KeyRangeFilterFunc(keyspaceIDType, keyRange, func(reply *proto.BinlogTransaction) error {\n\t\tkeyrangeStatements.Add(int64(len(reply.Statements)))\n\t\tkeyrangeTransactions.Add(1)\n\t\treturn sendReply(reply)\n\t})\n\tbls := NewBinlogStreamer(updateStream.dbname, updateStream.mysqld, charset, pos, f)\n\n\tsvm := &sync2.ServiceManager{}\n\tsvm.Go(bls.Stream)\n\tupdateStream.streams.Add(svm)\n\tdefer updateStream.streams.Delete(svm)\n\treturn svm.Join()\n}\n\n\/\/ StreamTables is part of the proto.UpdateStream interface\nfunc (updateStream *UpdateStream) StreamTables(position string, tables []string, charset *mproto.Charset, sendReply func(reply *proto.BinlogTransaction) error) (err error) {\n\tpos, err := myproto.DecodeReplicationPosition(position)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tupdateStream.actionLock.Lock()\n\tif !updateStream.IsEnabled() {\n\t\tupdateStream.actionLock.Unlock()\n\t\tlog.Errorf(\"Unable to serve client request: Update stream service is not enabled\")\n\t\treturn fmt.Errorf(\"update stream service is not enabled\")\n\t}\n\tupdateStream.stateWaitGroup.Add(1)\n\tupdateStream.actionLock.Unlock()\n\tdefer updateStream.stateWaitGroup.Done()\n\n\tstreamCount.Add(\"Tables\", 1)\n\tdefer streamCount.Add(\"Tables\", -1)\n\tlog.Infof(\"ServeUpdateStream starting @ %#v\", pos)\n\n\t\/\/ Calls cascade like this: BinlogStreamer->TablesFilterFunc->func(*proto.BinlogTransaction)->sendReply\n\tf := TablesFilterFunc(tables, func(reply *proto.BinlogTransaction) error {\n\t\tkeyrangeStatements.Add(int64(len(reply.Statements)))\n\t\tkeyrangeTransactions.Add(1)\n\t\treturn sendReply(reply)\n\t})\n\tbls := NewBinlogStreamer(updateStream.dbname, updateStream.mysqld, charset, pos, f)\n\n\tsvm := &sync2.ServiceManager{}\n\tsvm.Go(bls.Stream)\n\tupdateStream.streams.Add(svm)\n\tdefer updateStream.streams.Delete(svm)\n\treturn svm.Join()\n}\n\n\/\/ HandlePanic is part of the proto.UpdateStream interface\nfunc (updateStream *UpdateStream) HandlePanic(err *error) {\n\tif x := recover(); x != nil {\n\t\tlog.Errorf(\"Uncaught panic:\\n%v\\n%s\", x, tb.Stack(4))\n\t\t*err = fmt.Errorf(\"uncaught panic: %v\", x)\n\t}\n}\n<commit_msg>Fix data race in UpdateStreamControlMock.<commit_after>\/\/ Copyright 2012, Google Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage binlog\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\tlog \"github.com\/golang\/glog\"\n\tmproto \"github.com\/youtube\/vitess\/go\/mysql\/proto\"\n\t\"github.com\/youtube\/vitess\/go\/stats\"\n\t\"github.com\/youtube\/vitess\/go\/sync2\"\n\t\"github.com\/youtube\/vitess\/go\/tb\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/binlog\/proto\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/key\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/mysqlctl\"\n\tmyproto \"github.com\/youtube\/vitess\/go\/vt\/mysqlctl\/proto\"\n\n\tpb \"github.com\/youtube\/vitess\/go\/vt\/proto\/topodata\"\n)\n\n\/* API and config for UpdateStream Service *\/\n\nconst (\n\tusDisabled int64 = iota\n\tusEnabled\n)\n\nvar usStateNames = map[int64]string{\n\tusEnabled: \"Enabled\",\n\tusDisabled: \"Disabled\",\n}\n\nvar (\n\tstreamCount = stats.NewCounters(\"UpdateStreamStreamCount\")\n\tupdateStreamErrors = stats.NewCounters(\"UpdateStreamErrors\")\n\tupdateStreamEvents = stats.NewCounters(\"UpdateStreamEvents\")\n\tkeyrangeStatements = stats.NewInt(\"UpdateStreamKeyRangeStatements\")\n\tkeyrangeTransactions = stats.NewInt(\"UpdateStreamKeyRangeTransactions\")\n\ttablesStatements = stats.NewInt(\"UpdateStreamTablesStatements\")\n\ttablesTransactions = stats.NewInt(\"UpdateStreamTablesTransactions\")\n)\n\n\/\/ UpdateStreamControl is the interface an UpdateStream service implements\n\/\/ to bring it up or down.\ntype UpdateStreamControl interface {\n\t\/\/ Enable will allow any new RPC calls\n\tEnable()\n\n\t\/\/ Disable will interrupt all current calls, and disallow any new call\n\tDisable()\n\n\t\/\/ IsEnabled returns true iff the service is enabled\n\tIsEnabled() bool\n}\n\n\/\/ UpdateStreamControlMock is an implementation of UpdateStreamControl\n\/\/ to be used in tests\ntype UpdateStreamControlMock struct {\n\tenabled bool\n\tsync.Mutex\n}\n\n\/\/ NewUpdateStreamControlMock creates a new UpdateStreamControlMock\nfunc NewUpdateStreamControlMock() *UpdateStreamControlMock {\n\treturn &UpdateStreamControlMock{}\n}\n\n\/\/ Enable is part of UpdateStreamControl\nfunc (m *UpdateStreamControlMock) Enable() {\n\tm.Lock()\n\tm.enabled = true\n\tm.Unlock()\n}\n\n\/\/ Disable is part of UpdateStreamControl\nfunc (m *UpdateStreamControlMock) Disable() {\n\tm.Lock()\n\tm.enabled = false\n\tm.Unlock()\n}\n\n\/\/ IsEnabled is part of UpdateStreamControl\nfunc (m *UpdateStreamControlMock) IsEnabled() bool {\n\tm.Lock()\n\tdefer m.Unlock()\n\treturn m.enabled\n}\n\n\/\/ UpdateStream is the real implementation of proto.UpdateStream\n\/\/ and UpdateStreamControl\ntype UpdateStream struct {\n\t\/\/ the following variables are set at construction time\n\n\tmysqld mysqlctl.MysqlDaemon\n\tdbname string\n\n\t\/\/ actionLock protects the following variables\n\tactionLock sync.Mutex\n\tstate sync2.AtomicInt64\n\tstateWaitGroup sync.WaitGroup\n\tstreams streamList\n}\n\ntype streamList struct {\n\tsync.Mutex\n\tstreams map[*sync2.ServiceManager]bool\n}\n\nfunc (sl *streamList) Init() {\n\tsl.Lock()\n\tsl.streams = make(map[*sync2.ServiceManager]bool)\n\tsl.Unlock()\n}\n\nfunc (sl *streamList) Add(e *sync2.ServiceManager) {\n\tsl.Lock()\n\tsl.streams[e] = true\n\tsl.Unlock()\n}\n\nfunc (sl *streamList) Delete(e *sync2.ServiceManager) {\n\tsl.Lock()\n\tdelete(sl.streams, e)\n\tsl.Unlock()\n}\n\nfunc (sl *streamList) Stop() {\n\tsl.Lock()\n\tfor stream := range sl.streams {\n\t\tstream.Stop()\n\t}\n\tsl.Unlock()\n}\n\n\/\/ RegisterUpdateStreamServiceFunc is the type to use for delayed\n\/\/ registration of RPC servers until we have all the objects\ntype RegisterUpdateStreamServiceFunc func(proto.UpdateStream)\n\n\/\/ RegisterUpdateStreamServices is the list of all registration\n\/\/ callbacks to invoke\nvar RegisterUpdateStreamServices []RegisterUpdateStreamServiceFunc\n\n\/\/ NewUpdateStream returns a new UpdateStream object\nfunc NewUpdateStream(mysqld mysqlctl.MysqlDaemon, dbname string) *UpdateStream {\n\treturn &UpdateStream{\n\t\tmysqld: mysqld,\n\t\tdbname: dbname,\n\t}\n}\n\n\/\/ RegisterService needs to be called to publish stats, and to start listening\n\/\/ to clients. Only once instance can call this in a process.\nfunc (updateStream *UpdateStream) RegisterService() {\n\t\/\/ publish the stats\n\tstats.Publish(\"UpdateStreamState\", stats.StringFunc(func() string {\n\t\treturn usStateNames[updateStream.state.Get()]\n\t}))\n\n\t\/\/ and register all the RPC protocols\n\tfor _, f := range RegisterUpdateStreamServices {\n\t\tf(updateStream)\n\t}\n}\n\nfunc logError() {\n\tif x := recover(); x != nil {\n\t\tlog.Errorf(\"%s at\\n%s\", x.(error).Error(), tb.Stack(4))\n\t}\n}\n\n\/\/ Enable will allow connections to the service\nfunc (updateStream *UpdateStream) Enable() {\n\tdefer logError()\n\tupdateStream.actionLock.Lock()\n\tdefer updateStream.actionLock.Unlock()\n\tif updateStream.IsEnabled() {\n\t\treturn\n\t}\n\n\tupdateStream.state.Set(usEnabled)\n\tupdateStream.streams.Init()\n\tlog.Infof(\"Enabling update stream, dbname: %s\", updateStream.dbname)\n}\n\n\/\/ Disable will disallow any connection to the service\nfunc (updateStream *UpdateStream) Disable() {\n\tdefer logError()\n\tupdateStream.actionLock.Lock()\n\tdefer updateStream.actionLock.Unlock()\n\tif !updateStream.IsEnabled() {\n\t\treturn\n\t}\n\n\tupdateStream.state.Set(usDisabled)\n\tupdateStream.streams.Stop()\n\tupdateStream.stateWaitGroup.Wait()\n\tlog.Infof(\"Update Stream Disabled\")\n}\n\n\/\/ IsEnabled returns true if UpdateStream is enabled\nfunc (updateStream *UpdateStream) IsEnabled() bool {\n\treturn updateStream.state.Get() == usEnabled\n}\n\n\/\/ ServeUpdateStream is part of the proto.UpdateStream interface\nfunc (updateStream *UpdateStream) ServeUpdateStream(position string, sendReply func(reply *proto.StreamEvent) error) (err error) {\n\tpos, err := myproto.DecodeReplicationPosition(position)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tupdateStream.actionLock.Lock()\n\tif !updateStream.IsEnabled() {\n\t\tupdateStream.actionLock.Unlock()\n\t\tlog.Errorf(\"Unable to serve client request: update stream service is not enabled\")\n\t\treturn fmt.Errorf(\"update stream service is not enabled\")\n\t}\n\tupdateStream.stateWaitGroup.Add(1)\n\tupdateStream.actionLock.Unlock()\n\tdefer updateStream.stateWaitGroup.Done()\n\n\tstreamCount.Add(\"Updates\", 1)\n\tdefer streamCount.Add(\"Updates\", -1)\n\tlog.Infof(\"ServeUpdateStream starting @ %#v\", pos)\n\n\tevs := NewEventStreamer(updateStream.dbname, updateStream.mysqld, pos, func(reply *proto.StreamEvent) error {\n\t\tif reply.Category == \"ERR\" {\n\t\t\tupdateStreamErrors.Add(\"UpdateStream\", 1)\n\t\t} else {\n\t\t\tupdateStreamEvents.Add(reply.Category, 1)\n\t\t}\n\t\treturn sendReply(reply)\n\t})\n\n\tsvm := &sync2.ServiceManager{}\n\tsvm.Go(evs.Stream)\n\tupdateStream.streams.Add(svm)\n\tdefer updateStream.streams.Delete(svm)\n\treturn svm.Join()\n}\n\n\/\/ StreamKeyRange is part of the proto.UpdateStream interface\nfunc (updateStream *UpdateStream) StreamKeyRange(position string, keyspaceIDType key.KeyspaceIdType, keyRange *pb.KeyRange, charset *mproto.Charset, sendReply func(reply *proto.BinlogTransaction) error) (err error) {\n\tpos, err := myproto.DecodeReplicationPosition(position)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tupdateStream.actionLock.Lock()\n\tif !updateStream.IsEnabled() {\n\t\tupdateStream.actionLock.Unlock()\n\t\tlog.Errorf(\"Unable to serve client request: Update stream service is not enabled\")\n\t\treturn fmt.Errorf(\"update stream service is not enabled\")\n\t}\n\tupdateStream.stateWaitGroup.Add(1)\n\tupdateStream.actionLock.Unlock()\n\tdefer updateStream.stateWaitGroup.Done()\n\n\tstreamCount.Add(\"KeyRange\", 1)\n\tdefer streamCount.Add(\"KeyRange\", -1)\n\tlog.Infof(\"ServeUpdateStream starting @ %#v\", pos)\n\n\t\/\/ Calls cascade like this: BinlogStreamer->KeyRangeFilterFunc->func(*proto.BinlogTransaction)->sendReply\n\tf := KeyRangeFilterFunc(keyspaceIDType, keyRange, func(reply *proto.BinlogTransaction) error {\n\t\tkeyrangeStatements.Add(int64(len(reply.Statements)))\n\t\tkeyrangeTransactions.Add(1)\n\t\treturn sendReply(reply)\n\t})\n\tbls := NewBinlogStreamer(updateStream.dbname, updateStream.mysqld, charset, pos, f)\n\n\tsvm := &sync2.ServiceManager{}\n\tsvm.Go(bls.Stream)\n\tupdateStream.streams.Add(svm)\n\tdefer updateStream.streams.Delete(svm)\n\treturn svm.Join()\n}\n\n\/\/ StreamTables is part of the proto.UpdateStream interface\nfunc (updateStream *UpdateStream) StreamTables(position string, tables []string, charset *mproto.Charset, sendReply func(reply *proto.BinlogTransaction) error) (err error) {\n\tpos, err := myproto.DecodeReplicationPosition(position)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tupdateStream.actionLock.Lock()\n\tif !updateStream.IsEnabled() {\n\t\tupdateStream.actionLock.Unlock()\n\t\tlog.Errorf(\"Unable to serve client request: Update stream service is not enabled\")\n\t\treturn fmt.Errorf(\"update stream service is not enabled\")\n\t}\n\tupdateStream.stateWaitGroup.Add(1)\n\tupdateStream.actionLock.Unlock()\n\tdefer updateStream.stateWaitGroup.Done()\n\n\tstreamCount.Add(\"Tables\", 1)\n\tdefer streamCount.Add(\"Tables\", -1)\n\tlog.Infof(\"ServeUpdateStream starting @ %#v\", pos)\n\n\t\/\/ Calls cascade like this: BinlogStreamer->TablesFilterFunc->func(*proto.BinlogTransaction)->sendReply\n\tf := TablesFilterFunc(tables, func(reply *proto.BinlogTransaction) error {\n\t\tkeyrangeStatements.Add(int64(len(reply.Statements)))\n\t\tkeyrangeTransactions.Add(1)\n\t\treturn sendReply(reply)\n\t})\n\tbls := NewBinlogStreamer(updateStream.dbname, updateStream.mysqld, charset, pos, f)\n\n\tsvm := &sync2.ServiceManager{}\n\tsvm.Go(bls.Stream)\n\tupdateStream.streams.Add(svm)\n\tdefer updateStream.streams.Delete(svm)\n\treturn svm.Join()\n}\n\n\/\/ HandlePanic is part of the proto.UpdateStream interface\nfunc (updateStream *UpdateStream) HandlePanic(err *error) {\n\tif x := recover(); x != nil {\n\t\tlog.Errorf(\"Uncaught panic:\\n%v\\n%s\", x, tb.Stack(4))\n\t\t*err = fmt.Errorf(\"uncaught panic: %v\", x)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright IBM Corp. All Rights Reserved.\n\nSPDX-License-Identifier: Apache-2.0\n*\/\n\npackage pull\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/hyperledger\/fabric\/gossip\/comm\"\n\t\"github.com\/hyperledger\/fabric\/gossip\/common\"\n\t\"github.com\/hyperledger\/fabric\/gossip\/discovery\"\n\t\"github.com\/hyperledger\/fabric\/gossip\/gossip\/algo\"\n\t\"github.com\/hyperledger\/fabric\/gossip\/util\"\n\tproto \"github.com\/hyperledger\/fabric\/protos\/gossip\"\n\t\"github.com\/pkg\/errors\"\n\t\"go.uber.org\/zap\/zapcore\"\n)\n\n\/\/ Constants go here.\nconst (\n\tHelloMsgType MsgType = iota\n\tDigestMsgType\n\tRequestMsgType\n\tResponseMsgType\n)\n\n\/\/ MsgType defines the type of a message that is sent to the PullStore\ntype MsgType int\n\n\/\/ MessageHook defines a function that will run after a certain pull message is received\ntype MessageHook func(itemIDs []string, items []*proto.SignedGossipMessage, msg proto.ReceivedMessage)\n\n\/\/ Sender sends messages to remote peers\ntype Sender interface {\n\t\/\/ Send sends a message to a list of remote peers\n\tSend(msg *proto.SignedGossipMessage, peers ...*comm.RemotePeer)\n}\n\n\/\/ MembershipService obtains membership information of alive peers\ntype MembershipService interface {\n\t\/\/ GetMembership returns the membership of\n\tGetMembership() []discovery.NetworkMember\n}\n\n\/\/ Config defines the configuration of the pull mediator\ntype Config struct {\n\tID string\n\tPullInterval time.Duration \/\/ Duration between pull invocations\n\tChannel common.ChainID\n\tPeerCountToSelect int \/\/ Number of peers to initiate pull with\n\tTag proto.GossipMessage_Tag\n\tMsgType proto.PullMsgType\n}\n\n\/\/ IngressDigestFilter filters out entities in digests that are received from remote peers\ntype IngressDigestFilter func(digestMsg *proto.DataDigest) *proto.DataDigest\n\n\/\/ EgressDigestFilter filters digests to be sent to a remote peer, that\n\/\/ sent a hello with the following message\ntype EgressDigestFilter func(helloMsg proto.ReceivedMessage) func(digestItem string) bool\n\n\/\/ byContext converts this EgressDigFilter to an algo.DigestFilter\nfunc (df EgressDigestFilter) byContext() algo.DigestFilter {\n\treturn func(context interface{}) func(digestItem string) bool {\n\t\treturn func(digestItem string) bool {\n\t\t\treturn df(context.(proto.ReceivedMessage))(digestItem)\n\t\t}\n\t}\n}\n\n\/\/ PullAdapter defines methods of the pullStore to interact\n\/\/ with various modules of gossip\ntype PullAdapter struct {\n\tSndr Sender\n\tMemSvc MembershipService\n\tIdExtractor proto.IdentifierExtractor\n\tMsgCons proto.MsgConsumer\n\tEgressDigFilter EgressDigestFilter\n\tIngressDigFilter IngressDigestFilter\n}\n\n\/\/ Mediator is a component wrap a PullEngine and provides the methods\n\/\/ it needs to perform pull synchronization.\n\/\/ The specialization of a pull mediator to a certain type of message is\n\/\/ done by the configuration, a IdentifierExtractor, IdentifierExtractor\n\/\/ given at construction, and also hooks that can be registered for each\n\/\/ type of pullMsgType (hello, digest, req, res).\ntype Mediator interface {\n\t\/\/ Stop stop the Mediator\n\tStop()\n\n\t\/\/ RegisterMsgHook registers a message hook to a specific type of pull message\n\tRegisterMsgHook(MsgType, MessageHook)\n\n\t\/\/ Add adds a GossipMessage to the Mediator\n\tAdd(*proto.SignedGossipMessage)\n\n\t\/\/ Remove removes a GossipMessage from the Mediator with a matching digest,\n\t\/\/ if such a message exits\n\tRemove(digest string)\n\n\t\/\/ HandleMessage handles a message from some remote peer\n\tHandleMessage(msg proto.ReceivedMessage)\n}\n\n\/\/ pullMediatorImpl is an implementation of Mediator\ntype pullMediatorImpl struct {\n\tsync.RWMutex\n\t*PullAdapter\n\tmsgType2Hook map[MsgType][]MessageHook\n\tconfig Config\n\tlogger util.Logger\n\titemID2Msg map[string]*proto.SignedGossipMessage\n\tengine *algo.PullEngine\n}\n\n\/\/ NewPullMediator returns a new Mediator\nfunc NewPullMediator(config Config, adapter *PullAdapter) Mediator {\n\tegressDigFilter := adapter.EgressDigFilter\n\n\tacceptAllFilter := func(_ proto.ReceivedMessage) func(string) bool {\n\t\treturn func(_ string) bool {\n\t\t\treturn true\n\t\t}\n\t}\n\n\tif egressDigFilter == nil {\n\t\tegressDigFilter = acceptAllFilter\n\t}\n\n\tp := &pullMediatorImpl{\n\t\tPullAdapter: adapter,\n\t\tmsgType2Hook: make(map[MsgType][]MessageHook),\n\t\tconfig: config,\n\t\tlogger: util.GetLogger(util.PullLogger, config.ID),\n\t\titemID2Msg: make(map[string]*proto.SignedGossipMessage),\n\t}\n\n\tp.engine = algo.NewPullEngineWithFilter(p, config.PullInterval, egressDigFilter.byContext())\n\n\tif adapter.IngressDigFilter == nil {\n\t\t\/\/ Create accept all filter\n\t\tadapter.IngressDigFilter = func(digestMsg *proto.DataDigest) *proto.DataDigest {\n\t\t\treturn digestMsg\n\t\t}\n\t}\n\treturn p\n\n}\n\nfunc (p *pullMediatorImpl) HandleMessage(m proto.ReceivedMessage) {\n\tif m.GetGossipMessage() == nil || !m.GetGossipMessage().IsPullMsg() {\n\t\treturn\n\t}\n\tmsg := m.GetGossipMessage()\n\tmsgType := msg.GetPullMsgType()\n\tif msgType != p.config.MsgType {\n\t\treturn\n\t}\n\n\tp.logger.Debug(msg)\n\n\titemIDs := []string{}\n\titems := []*proto.SignedGossipMessage{}\n\tvar pullMsgType MsgType\n\n\tif helloMsg := msg.GetHello(); helloMsg != nil {\n\t\tpullMsgType = HelloMsgType\n\t\tp.engine.OnHello(helloMsg.Nonce, m)\n\t}\n\tif digest := msg.GetDataDig(); digest != nil {\n\t\td := p.PullAdapter.IngressDigFilter(digest)\n\t\titemIDs = util.BytesToStrings(d.Digests)\n\t\tpullMsgType = DigestMsgType\n\t\tp.engine.OnDigest(itemIDs, d.Nonce, m)\n\t}\n\tif req := msg.GetDataReq(); req != nil {\n\t\titemIDs = util.BytesToStrings(req.Digests)\n\t\tpullMsgType = RequestMsgType\n\t\tp.engine.OnReq(itemIDs, req.Nonce, m)\n\t}\n\tif res := msg.GetDataUpdate(); res != nil {\n\t\titemIDs = make([]string, len(res.Data))\n\t\titems = make([]*proto.SignedGossipMessage, len(res.Data))\n\t\tpullMsgType = ResponseMsgType\n\t\tfor i, pulledMsg := range res.Data {\n\t\t\tmsg, err := pulledMsg.ToGossipMessage()\n\t\t\tif err != nil {\n\t\t\t\tp.logger.Warningf(\"Data update contains an invalid message: %+v\", errors.WithStack(err))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tp.MsgCons(msg)\n\t\t\titemIDs[i] = p.IdExtractor(msg)\n\t\t\titems[i] = msg\n\t\t\tp.Lock()\n\t\t\tp.itemID2Msg[itemIDs[i]] = msg\n\t\t\tp.Unlock()\n\t\t}\n\t\tp.engine.OnRes(itemIDs, res.Nonce)\n\t}\n\n\t\/\/ Invoke hooks for relevant message type\n\tfor _, h := range p.hooksByMsgType(pullMsgType) {\n\t\th(itemIDs, items, m)\n\t}\n}\n\nfunc (p *pullMediatorImpl) Stop() {\n\tp.engine.Stop()\n}\n\n\/\/ RegisterMsgHook registers a message hook to a specific type of pull message\nfunc (p *pullMediatorImpl) RegisterMsgHook(pullMsgType MsgType, hook MessageHook) {\n\tp.Lock()\n\tdefer p.Unlock()\n\tp.msgType2Hook[pullMsgType] = append(p.msgType2Hook[pullMsgType], hook)\n\n}\n\n\/\/ Add adds a GossipMessage to the store\nfunc (p *pullMediatorImpl) Add(msg *proto.SignedGossipMessage) {\n\tp.Lock()\n\tdefer p.Unlock()\n\titemID := p.IdExtractor(msg)\n\tp.itemID2Msg[itemID] = msg\n\tp.engine.Add(itemID)\n}\n\n\/\/ Remove removes a GossipMessage from the Mediator with a matching digest,\n\/\/ if such a message exits\nfunc (p *pullMediatorImpl) Remove(digest string) {\n\tp.Lock()\n\tdefer p.Unlock()\n\tdelete(p.itemID2Msg, digest)\n\tp.engine.Remove(digest)\n}\n\n\/\/ SelectPeers returns a slice of peers which the engine will initiate the protocol with\nfunc (p *pullMediatorImpl) SelectPeers() []string {\n\tremotePeers := SelectEndpoints(p.config.PeerCountToSelect, p.MemSvc.GetMembership())\n\tendpoints := make([]string, len(remotePeers))\n\tfor i, peer := range remotePeers {\n\t\tendpoints[i] = peer.Endpoint\n\t}\n\treturn endpoints\n}\n\n\/\/ Hello sends a hello message to initiate the protocol\n\/\/ and returns an NONCE that is expected to be returned\n\/\/ in the digest message.\nfunc (p *pullMediatorImpl) Hello(dest string, nonce uint64) {\n\thelloMsg := &proto.GossipMessage{\n\t\tChannel: p.config.Channel,\n\t\tTag: p.config.Tag,\n\t\tContent: &proto.GossipMessage_Hello{\n\t\t\tHello: &proto.GossipHello{\n\t\t\t\tNonce: nonce,\n\t\t\t\tMetadata: nil,\n\t\t\t\tMsgType: p.config.MsgType,\n\t\t\t},\n\t\t},\n\t}\n\n\tp.logger.Debug(\"Sending\", p.config.MsgType, \"hello to\", dest)\n\tsMsg, err := helloMsg.NoopSign()\n\tif err != nil {\n\t\tp.logger.Errorf(\"Failed creating SignedGossipMessage: %+v\", errors.WithStack(err))\n\t\treturn\n\t}\n\tp.Sndr.Send(sMsg, p.peersWithEndpoints(dest)...)\n}\n\n\/\/ SendDigest sends a digest to a remote PullEngine.\n\/\/ The context parameter specifies the remote engine to send to.\nfunc (p *pullMediatorImpl) SendDigest(digest []string, nonce uint64, context interface{}) {\n\tdigMsg := &proto.GossipMessage{\n\t\tChannel: p.config.Channel,\n\t\tTag: p.config.Tag,\n\t\tNonce: 0,\n\t\tContent: &proto.GossipMessage_DataDig{\n\t\t\tDataDig: &proto.DataDigest{\n\t\t\t\tMsgType: p.config.MsgType,\n\t\t\t\tNonce: nonce,\n\t\t\t\tDigests: util.StringsToBytes(digest),\n\t\t\t},\n\t\t},\n\t}\n\tremotePeer := context.(proto.ReceivedMessage).GetConnectionInfo()\n\tif p.logger.IsEnabledFor(zapcore.DebugLevel) {\n\t\tp.logger.Debug(\"Sending\", p.config.MsgType, \"digest:\", digMsg.GetDataDig().FormattedDigests(), \"to\", remotePeer)\n\t}\n\n\tcontext.(proto.ReceivedMessage).Respond(digMsg)\n}\n\n\/\/ SendReq sends an array of items to a certain remote PullEngine identified\n\/\/ by a string\nfunc (p *pullMediatorImpl) SendReq(dest string, items []string, nonce uint64) {\n\treq := &proto.GossipMessage{\n\t\tChannel: p.config.Channel,\n\t\tTag: p.config.Tag,\n\t\tNonce: 0,\n\t\tContent: &proto.GossipMessage_DataReq{\n\t\t\tDataReq: &proto.DataRequest{\n\t\t\t\tMsgType: p.config.MsgType,\n\t\t\t\tNonce: nonce,\n\t\t\t\tDigests: util.StringsToBytes(items),\n\t\t\t},\n\t\t},\n\t}\n\tif p.logger.IsEnabledFor(zapcore.DebugLevel) {\n\t\tp.logger.Debug(\"Sending\", req.GetDataReq().FormattedDigests(), \"to\", dest)\n\t}\n\tsMsg, err := req.NoopSign()\n\tif err != nil {\n\t\tp.logger.Warningf(\"Failed creating SignedGossipMessage: %+v\", errors.WithStack(err))\n\t\treturn\n\t}\n\tp.Sndr.Send(sMsg, p.peersWithEndpoints(dest)...)\n}\n\n\/\/ SendRes sends an array of items to a remote PullEngine identified by a context.\nfunc (p *pullMediatorImpl) SendRes(items []string, context interface{}, nonce uint64) {\n\titems2return := []*proto.Envelope{}\n\tp.RLock()\n\tdefer p.RUnlock()\n\tfor _, item := range items {\n\t\tif msg, exists := p.itemID2Msg[item]; exists {\n\t\t\titems2return = append(items2return, msg.Envelope)\n\t\t}\n\t}\n\treturnedUpdate := &proto.GossipMessage{\n\t\tChannel: p.config.Channel,\n\t\tTag: p.config.Tag,\n\t\tNonce: 0,\n\t\tContent: &proto.GossipMessage_DataUpdate{\n\t\t\tDataUpdate: &proto.DataUpdate{\n\t\t\t\tMsgType: p.config.MsgType,\n\t\t\t\tNonce: nonce,\n\t\t\t\tData: items2return,\n\t\t\t},\n\t\t},\n\t}\n\tremotePeer := context.(proto.ReceivedMessage).GetConnectionInfo()\n\tp.logger.Debug(\"Sending\", len(returnedUpdate.GetDataUpdate().Data), p.config.MsgType, \"items to\", remotePeer)\n\tcontext.(proto.ReceivedMessage).Respond(returnedUpdate)\n}\n\nfunc (p *pullMediatorImpl) peersWithEndpoints(endpoints ...string) []*comm.RemotePeer {\n\tpeers := []*comm.RemotePeer{}\n\tfor _, member := range p.MemSvc.GetMembership() {\n\t\tfor _, endpoint := range endpoints {\n\t\t\tif member.PreferredEndpoint() == endpoint {\n\t\t\t\tpeers = append(peers, &comm.RemotePeer{Endpoint: member.PreferredEndpoint(), PKIID: member.PKIid})\n\t\t\t}\n\t\t}\n\t}\n\treturn peers\n}\n\nfunc (p *pullMediatorImpl) hooksByMsgType(msgType MsgType) []MessageHook {\n\tp.RLock()\n\tdefer p.RUnlock()\n\treturnedHooks := []MessageHook{}\n\tfor _, h := range p.msgType2Hook[msgType] {\n\t\treturnedHooks = append(returnedHooks, h)\n\t}\n\treturn returnedHooks\n}\n\n\/\/ SelectEndpoints select k peers from peerPool and returns them.\nfunc SelectEndpoints(k int, peerPool []discovery.NetworkMember) []*comm.RemotePeer {\n\tif len(peerPool) < k {\n\t\tk = len(peerPool)\n\t}\n\n\tindices := util.GetRandomIndices(k, len(peerPool)-1)\n\tendpoints := make([]*comm.RemotePeer, len(indices))\n\tfor i, j := range indices {\n\t\tendpoints[i] = &comm.RemotePeer{Endpoint: peerPool[j].PreferredEndpoint(), PKIID: peerPool[j].PKIid}\n\t}\n\treturn endpoints\n}\n<commit_msg>[FAB-13204] refactor gossip pull message class<commit_after>\/*\nCopyright IBM Corp. All Rights Reserved.\n\nSPDX-License-Identifier: Apache-2.0\n*\/\n\npackage pull\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/hyperledger\/fabric\/gossip\/comm\"\n\t\"github.com\/hyperledger\/fabric\/gossip\/common\"\n\t\"github.com\/hyperledger\/fabric\/gossip\/discovery\"\n\t\"github.com\/hyperledger\/fabric\/gossip\/gossip\/algo\"\n\t\"github.com\/hyperledger\/fabric\/gossip\/util\"\n\tproto \"github.com\/hyperledger\/fabric\/protos\/gossip\"\n\t\"github.com\/pkg\/errors\"\n\t\"go.uber.org\/zap\/zapcore\"\n)\n\n\/\/ Constants go here.\nconst (\n\tHelloMsgType MsgType = iota\n\tDigestMsgType\n\tRequestMsgType\n\tResponseMsgType\n)\n\n\/\/ MsgType defines the type of a message that is sent to the PullStore\ntype MsgType int\n\n\/\/ MessageHook defines a function that will run after a certain pull message is received\ntype MessageHook func(itemIDs []string, items []*proto.SignedGossipMessage, msg proto.ReceivedMessage)\n\n\/\/ Sender sends messages to remote peers\ntype Sender interface {\n\t\/\/ Send sends a message to a list of remote peers\n\tSend(msg *proto.SignedGossipMessage, peers ...*comm.RemotePeer)\n}\n\n\/\/ MembershipService obtains membership information of alive peers\ntype MembershipService interface {\n\t\/\/ GetMembership returns the membership of\n\tGetMembership() []discovery.NetworkMember\n}\n\n\/\/ Config defines the configuration of the pull mediator\ntype Config struct {\n\tID string\n\tPullInterval time.Duration \/\/ Duration between pull invocations\n\tChannel common.ChainID\n\tPeerCountToSelect int \/\/ Number of peers to initiate pull with\n\tTag proto.GossipMessage_Tag\n\tMsgType proto.PullMsgType\n}\n\n\/\/ IngressDigestFilter filters out entities in digests that are received from remote peers\ntype IngressDigestFilter func(digestMsg *proto.DataDigest) *proto.DataDigest\n\n\/\/ EgressDigestFilter filters digests to be sent to a remote peer, that\n\/\/ sent a hello with the following message\ntype EgressDigestFilter func(helloMsg proto.ReceivedMessage) func(digestItem string) bool\n\n\/\/ byContext converts this EgressDigFilter to an algo.DigestFilter\nfunc (df EgressDigestFilter) byContext() algo.DigestFilter {\n\treturn func(context interface{}) func(digestItem string) bool {\n\t\treturn func(digestItem string) bool {\n\t\t\treturn df(context.(proto.ReceivedMessage))(digestItem)\n\t\t}\n\t}\n}\n\n\/\/ PullAdapter defines methods of the pullStore to interact\n\/\/ with various modules of gossip\ntype PullAdapter struct {\n\tSndr Sender\n\tMemSvc MembershipService\n\tIdExtractor proto.IdentifierExtractor\n\tMsgCons proto.MsgConsumer\n\tEgressDigFilter EgressDigestFilter\n\tIngressDigFilter IngressDigestFilter\n}\n\n\/\/ Mediator is a component wrap a PullEngine and provides the methods\n\/\/ it needs to perform pull synchronization.\n\/\/ The specialization of a pull mediator to a certain type of message is\n\/\/ done by the configuration, a IdentifierExtractor, IdentifierExtractor\n\/\/ given at construction, and also hooks that can be registered for each\n\/\/ type of pullMsgType (hello, digest, req, res).\ntype Mediator interface {\n\t\/\/ Stop stop the Mediator\n\tStop()\n\n\t\/\/ RegisterMsgHook registers a message hook to a specific type of pull message\n\tRegisterMsgHook(MsgType, MessageHook)\n\n\t\/\/ Add adds a GossipMessage to the Mediator\n\tAdd(*proto.SignedGossipMessage)\n\n\t\/\/ Remove removes a GossipMessage from the Mediator with a matching digest,\n\t\/\/ if such a message exits\n\tRemove(digest string)\n\n\t\/\/ HandleMessage handles a message from some remote peer\n\tHandleMessage(msg proto.ReceivedMessage)\n}\n\n\/\/ pullMediatorImpl is an implementation of Mediator\ntype pullMediatorImpl struct {\n\tsync.RWMutex\n\t*PullAdapter\n\tmsgType2Hook map[MsgType][]MessageHook\n\tconfig Config\n\tlogger util.Logger\n\titemID2Msg map[string]*proto.SignedGossipMessage\n\tengine *algo.PullEngine\n}\n\n\/\/ NewPullMediator returns a new Mediator\nfunc NewPullMediator(config Config, adapter *PullAdapter) Mediator {\n\tegressDigFilter := adapter.EgressDigFilter\n\n\tacceptAllFilter := func(_ proto.ReceivedMessage) func(string) bool {\n\t\treturn func(_ string) bool {\n\t\t\treturn true\n\t\t}\n\t}\n\n\tif egressDigFilter == nil {\n\t\tegressDigFilter = acceptAllFilter\n\t}\n\n\tp := &pullMediatorImpl{\n\t\tPullAdapter: adapter,\n\t\tmsgType2Hook: make(map[MsgType][]MessageHook),\n\t\tconfig: config,\n\t\tlogger: util.GetLogger(util.PullLogger, config.ID),\n\t\titemID2Msg: make(map[string]*proto.SignedGossipMessage),\n\t}\n\n\tp.engine = algo.NewPullEngineWithFilter(p, config.PullInterval, egressDigFilter.byContext())\n\n\tif adapter.IngressDigFilter == nil {\n\t\t\/\/ Create accept all filter\n\t\tadapter.IngressDigFilter = func(digestMsg *proto.DataDigest) *proto.DataDigest {\n\t\t\treturn digestMsg\n\t\t}\n\t}\n\treturn p\n\n}\n\nfunc (p *pullMediatorImpl) HandleMessage(m proto.ReceivedMessage) {\n\tif m.GetGossipMessage() == nil || !m.GetGossipMessage().IsPullMsg() {\n\t\treturn\n\t}\n\tmsg := m.GetGossipMessage()\n\tmsgType := msg.GetPullMsgType()\n\tif msgType != p.config.MsgType {\n\t\treturn\n\t}\n\n\tp.logger.Debug(msg)\n\n\titemIDs := []string{}\n\titems := []*proto.SignedGossipMessage{}\n\tvar pullMsgType MsgType\n\n\tif helloMsg := msg.GetHello(); helloMsg != nil {\n\t\tpullMsgType = HelloMsgType\n\t\tp.engine.OnHello(helloMsg.Nonce, m)\n\t} else if digest := msg.GetDataDig(); digest != nil {\n\t\td := p.PullAdapter.IngressDigFilter(digest)\n\t\titemIDs = util.BytesToStrings(d.Digests)\n\t\tpullMsgType = DigestMsgType\n\t\tp.engine.OnDigest(itemIDs, d.Nonce, m)\n\t} else if req := msg.GetDataReq(); req != nil {\n\t\titemIDs = util.BytesToStrings(req.Digests)\n\t\tpullMsgType = RequestMsgType\n\t\tp.engine.OnReq(itemIDs, req.Nonce, m)\n\t} else if res := msg.GetDataUpdate(); res != nil {\n\t\titemIDs = make([]string, len(res.Data))\n\t\titems = make([]*proto.SignedGossipMessage, len(res.Data))\n\t\tpullMsgType = ResponseMsgType\n\t\tfor i, pulledMsg := range res.Data {\n\t\t\tmsg, err := pulledMsg.ToGossipMessage()\n\t\t\tif err != nil {\n\t\t\t\tp.logger.Warningf(\"Data update contains an invalid message: %+v\", errors.WithStack(err))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tp.MsgCons(msg)\n\t\t\titemIDs[i] = p.IdExtractor(msg)\n\t\t\titems[i] = msg\n\t\t\tp.Lock()\n\t\t\tp.itemID2Msg[itemIDs[i]] = msg\n\t\t\tp.Unlock()\n\t\t}\n\t\tp.engine.OnRes(itemIDs, res.Nonce)\n\t}\n\n\t\/\/ Invoke hooks for relevant message type\n\tfor _, h := range p.hooksByMsgType(pullMsgType) {\n\t\th(itemIDs, items, m)\n\t}\n}\n\nfunc (p *pullMediatorImpl) Stop() {\n\tp.engine.Stop()\n}\n\n\/\/ RegisterMsgHook registers a message hook to a specific type of pull message\nfunc (p *pullMediatorImpl) RegisterMsgHook(pullMsgType MsgType, hook MessageHook) {\n\tp.Lock()\n\tdefer p.Unlock()\n\tp.msgType2Hook[pullMsgType] = append(p.msgType2Hook[pullMsgType], hook)\n\n}\n\n\/\/ Add adds a GossipMessage to the store\nfunc (p *pullMediatorImpl) Add(msg *proto.SignedGossipMessage) {\n\tp.Lock()\n\tdefer p.Unlock()\n\titemID := p.IdExtractor(msg)\n\tp.itemID2Msg[itemID] = msg\n\tp.engine.Add(itemID)\n}\n\n\/\/ Remove removes a GossipMessage from the Mediator with a matching digest,\n\/\/ if such a message exits\nfunc (p *pullMediatorImpl) Remove(digest string) {\n\tp.Lock()\n\tdefer p.Unlock()\n\tdelete(p.itemID2Msg, digest)\n\tp.engine.Remove(digest)\n}\n\n\/\/ SelectPeers returns a slice of peers which the engine will initiate the protocol with\nfunc (p *pullMediatorImpl) SelectPeers() []string {\n\tremotePeers := SelectEndpoints(p.config.PeerCountToSelect, p.MemSvc.GetMembership())\n\tendpoints := make([]string, len(remotePeers))\n\tfor i, peer := range remotePeers {\n\t\tendpoints[i] = peer.Endpoint\n\t}\n\treturn endpoints\n}\n\n\/\/ Hello sends a hello message to initiate the protocol\n\/\/ and returns an NONCE that is expected to be returned\n\/\/ in the digest message.\nfunc (p *pullMediatorImpl) Hello(dest string, nonce uint64) {\n\thelloMsg := &proto.GossipMessage{\n\t\tChannel: p.config.Channel,\n\t\tTag: p.config.Tag,\n\t\tContent: &proto.GossipMessage_Hello{\n\t\t\tHello: &proto.GossipHello{\n\t\t\t\tNonce: nonce,\n\t\t\t\tMetadata: nil,\n\t\t\t\tMsgType: p.config.MsgType,\n\t\t\t},\n\t\t},\n\t}\n\n\tp.logger.Debug(\"Sending\", p.config.MsgType, \"hello to\", dest)\n\tsMsg, err := helloMsg.NoopSign()\n\tif err != nil {\n\t\tp.logger.Errorf(\"Failed creating SignedGossipMessage: %+v\", errors.WithStack(err))\n\t\treturn\n\t}\n\tp.Sndr.Send(sMsg, p.peersWithEndpoints(dest)...)\n}\n\n\/\/ SendDigest sends a digest to a remote PullEngine.\n\/\/ The context parameter specifies the remote engine to send to.\nfunc (p *pullMediatorImpl) SendDigest(digest []string, nonce uint64, context interface{}) {\n\tdigMsg := &proto.GossipMessage{\n\t\tChannel: p.config.Channel,\n\t\tTag: p.config.Tag,\n\t\tNonce: 0,\n\t\tContent: &proto.GossipMessage_DataDig{\n\t\t\tDataDig: &proto.DataDigest{\n\t\t\t\tMsgType: p.config.MsgType,\n\t\t\t\tNonce: nonce,\n\t\t\t\tDigests: util.StringsToBytes(digest),\n\t\t\t},\n\t\t},\n\t}\n\tremotePeer := context.(proto.ReceivedMessage).GetConnectionInfo()\n\tif p.logger.IsEnabledFor(zapcore.DebugLevel) {\n\t\tp.logger.Debug(\"Sending\", p.config.MsgType, \"digest:\", digMsg.GetDataDig().FormattedDigests(), \"to\", remotePeer)\n\t}\n\n\tcontext.(proto.ReceivedMessage).Respond(digMsg)\n}\n\n\/\/ SendReq sends an array of items to a certain remote PullEngine identified\n\/\/ by a string\nfunc (p *pullMediatorImpl) SendReq(dest string, items []string, nonce uint64) {\n\treq := &proto.GossipMessage{\n\t\tChannel: p.config.Channel,\n\t\tTag: p.config.Tag,\n\t\tNonce: 0,\n\t\tContent: &proto.GossipMessage_DataReq{\n\t\t\tDataReq: &proto.DataRequest{\n\t\t\t\tMsgType: p.config.MsgType,\n\t\t\t\tNonce: nonce,\n\t\t\t\tDigests: util.StringsToBytes(items),\n\t\t\t},\n\t\t},\n\t}\n\tif p.logger.IsEnabledFor(zapcore.DebugLevel) {\n\t\tp.logger.Debug(\"Sending\", req.GetDataReq().FormattedDigests(), \"to\", dest)\n\t}\n\tsMsg, err := req.NoopSign()\n\tif err != nil {\n\t\tp.logger.Warningf(\"Failed creating SignedGossipMessage: %+v\", errors.WithStack(err))\n\t\treturn\n\t}\n\tp.Sndr.Send(sMsg, p.peersWithEndpoints(dest)...)\n}\n\n\/\/ SendRes sends an array of items to a remote PullEngine identified by a context.\nfunc (p *pullMediatorImpl) SendRes(items []string, context interface{}, nonce uint64) {\n\titems2return := []*proto.Envelope{}\n\tp.RLock()\n\tdefer p.RUnlock()\n\tfor _, item := range items {\n\t\tif msg, exists := p.itemID2Msg[item]; exists {\n\t\t\titems2return = append(items2return, msg.Envelope)\n\t\t}\n\t}\n\treturnedUpdate := &proto.GossipMessage{\n\t\tChannel: p.config.Channel,\n\t\tTag: p.config.Tag,\n\t\tNonce: 0,\n\t\tContent: &proto.GossipMessage_DataUpdate{\n\t\t\tDataUpdate: &proto.DataUpdate{\n\t\t\t\tMsgType: p.config.MsgType,\n\t\t\t\tNonce: nonce,\n\t\t\t\tData: items2return,\n\t\t\t},\n\t\t},\n\t}\n\tremotePeer := context.(proto.ReceivedMessage).GetConnectionInfo()\n\tp.logger.Debug(\"Sending\", len(returnedUpdate.GetDataUpdate().Data), p.config.MsgType, \"items to\", remotePeer)\n\tcontext.(proto.ReceivedMessage).Respond(returnedUpdate)\n}\n\nfunc (p *pullMediatorImpl) peersWithEndpoints(endpoints ...string) []*comm.RemotePeer {\n\tpeers := []*comm.RemotePeer{}\n\tfor _, member := range p.MemSvc.GetMembership() {\n\t\tfor _, endpoint := range endpoints {\n\t\t\tif member.PreferredEndpoint() == endpoint {\n\t\t\t\tpeers = append(peers, &comm.RemotePeer{Endpoint: member.PreferredEndpoint(), PKIID: member.PKIid})\n\t\t\t}\n\t\t}\n\t}\n\treturn peers\n}\n\nfunc (p *pullMediatorImpl) hooksByMsgType(msgType MsgType) []MessageHook {\n\tp.RLock()\n\tdefer p.RUnlock()\n\treturnedHooks := []MessageHook{}\n\tfor _, h := range p.msgType2Hook[msgType] {\n\t\treturnedHooks = append(returnedHooks, h)\n\t}\n\treturn returnedHooks\n}\n\n\/\/ SelectEndpoints select k peers from peerPool and returns them.\nfunc SelectEndpoints(k int, peerPool []discovery.NetworkMember) []*comm.RemotePeer {\n\tif len(peerPool) < k {\n\t\tk = len(peerPool)\n\t}\n\n\tindices := util.GetRandomIndices(k, len(peerPool)-1)\n\tendpoints := make([]*comm.RemotePeer, len(indices))\n\tfor i, j := range indices {\n\t\tendpoints[i] = &comm.RemotePeer{Endpoint: peerPool[j].PreferredEndpoint(), PKIID: peerPool[j].PKIid}\n\t}\n\treturn endpoints\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011, 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage ec2\n\nimport (\n\tgc \"launchpad.net\/gocheck\"\n\n\t\"launchpad.net\/juju-core\/constraints\"\n\t\"launchpad.net\/juju-core\/environs\/imagemetadata\"\n\t\"launchpad.net\/juju-core\/environs\/instances\"\n\t\"launchpad.net\/juju-core\/environs\/simplestreams\"\n\t\"launchpad.net\/juju-core\/testing\/testbase\"\n)\n\ntype imageSuite struct {\n\ttestbase.LoggingSuite\n}\n\nvar _ = gc.Suite(&imageSuite{})\n\nfunc (s *imageSuite) SetUpSuite(c *gc.C) {\n\ts.LoggingSuite.SetUpSuite(c)\n\tUseTestImageData(TestImagesData)\n}\n\nfunc (s *imageSuite) TearDownSuite(c *gc.C) {\n\tUseTestImageData(nil)\n\ts.LoggingSuite.TearDownTest(c)\n}\n\ntype specSuite struct {\n\ttestbase.LoggingSuite\n}\n\nvar _ = gc.Suite(&specSuite{})\n\nfunc (s *specSuite) SetUpSuite(c *gc.C) {\n\ts.LoggingSuite.SetUpSuite(c)\n\tUseTestImageData(TestImagesData)\n\tUseTestInstanceTypeData(TestInstanceTypeCosts)\n\tUseTestRegionData(TestRegions)\n}\n\nfunc (s *specSuite) TearDownSuite(c *gc.C) {\n\tUseTestInstanceTypeData(nil)\n\tUseTestImageData(nil)\n\tUseTestRegionData(nil)\n\ts.LoggingSuite.TearDownSuite(c)\n}\n\nvar findInstanceSpecTests = []struct {\n\tseries string\n\tarches []string\n\tcons string\n\titype string\n\timage string\n}{\n\t{\n\t\tseries: \"precise\",\n\t\tarches: both,\n\t\titype: \"m1.small\",\n\t\timage: \"ami-00000033\",\n\t}, {\n\t\tseries: \"quantal\",\n\t\tarches: both,\n\t\titype: \"m1.small\",\n\t\timage: \"ami-01000034\",\n\t}, {\n\t\tseries: \"precise\",\n\t\tarches: both,\n\t\tcons: \"cpu-cores=4\",\n\t\titype: \"m1.xlarge\",\n\t\timage: \"ami-00000033\",\n\t}, {\n\t\tseries: \"precise\",\n\t\tarches: both,\n\t\tcons: \"cpu-cores=2 arch=i386\",\n\t\titype: \"c1.medium\",\n\t\timage: \"ami-00000034\",\n\t}, {\n\t\tseries: \"precise\",\n\t\tarches: both,\n\t\tcons: \"mem=10G\",\n\t\titype: \"m1.xlarge\",\n\t\timage: \"ami-00000033\",\n\t}, {\n\t\tseries: \"precise\",\n\t\tarches: both,\n\t\tcons: \"mem=\",\n\t\titype: \"m1.small\",\n\t\timage: \"ami-00000033\",\n\t}, {\n\t\tseries: \"precise\",\n\t\tarches: both,\n\t\tcons: \"cpu-power=\",\n\t\titype: \"t1.micro\",\n\t\timage: \"ami-00000033\",\n\t}, {\n\t\tseries: \"precise\",\n\t\tarches: both,\n\t\tcons: \"cpu-power=800\",\n\t\titype: \"m1.xlarge\",\n\t\timage: \"ami-00000033\",\n\t}, {\n\t\tseries: \"precise\",\n\t\tarches: both,\n\t\tcons: \"cpu-power=500 arch=i386\",\n\t\titype: \"c1.medium\",\n\t\timage: \"ami-00000034\",\n\t}, {\n\t\tseries: \"precise\",\n\t\tarches: []string{\"i386\"},\n\t\tcons: \"cpu-power=400\",\n\t\titype: \"c1.medium\",\n\t\timage: \"ami-00000034\",\n\t}, {\n\t\tseries: \"quantal\",\n\t\tarches: both,\n\t\tcons: \"arch=amd64\",\n\t\titype: \"cc1.4xlarge\",\n\t\timage: \"ami-01000035\",\n\t},\n}\n\nfunc (s *specSuite) TestFindInstanceSpec(c *gc.C) {\n\tfor i, t := range findInstanceSpecTests {\n\t\tc.Logf(\"test %d\", i)\n\t\tstor := ebsStorage\n\t\tspec, err := findInstanceSpec(\n\t\t\t[]simplestreams.DataSource{simplestreams.NewURLDataSource(\"test:\", simplestreams.VerifySSLHostnames), simplestreams.VerifySSLHostnames},\n\t\t\t&instances.InstanceConstraint{\n\t\t\t\tRegion: \"test\",\n\t\t\t\tSeries: t.series,\n\t\t\t\tArches: t.arches,\n\t\t\t\tConstraints: constraints.MustParse(t.cons),\n\t\t\t\tStorage: &stor,\n\t\t\t})\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Check(spec.InstanceType.Name, gc.Equals, t.itype)\n\t\tc.Check(spec.Image.Id, gc.Equals, t.image)\n\t}\n}\n\nvar findInstanceSpecErrorTests = []struct {\n\tseries string\n\tarches []string\n\tcons string\n\terr string\n}{\n\t{\n\t\tseries: \"bad\",\n\t\tarches: both,\n\t\terr: `invalid series \"bad\"`,\n\t}, {\n\t\tseries: \"precise\",\n\t\tarches: []string{\"arm\"},\n\t\terr: `no \"precise\" images in test with arches \\[arm\\]`,\n\t}, {\n\t\tseries: \"raring\",\n\t\tarches: both,\n\t\tcons: \"mem=4G\",\n\t\terr: `no \"raring\" images in test matching instance types \\[m1.large m1.xlarge c1.xlarge cc1.4xlarge cc2.8xlarge\\]`,\n\t},\n}\n\nfunc (s *specSuite) TestFindInstanceSpecErrors(c *gc.C) {\n\tfor i, t := range findInstanceSpecErrorTests {\n\t\tc.Logf(\"test %d\", i)\n\t\t_, err := findInstanceSpec(\n\t\t\t[]simplestreams.DataSource{simplestreams.NewURLDataSource(\"test:\")}, &instances.InstanceConstraint{\n\t\t\t\tRegion: \"test\",\n\t\t\t\tSeries: t.series,\n\t\t\t\tArches: t.arches,\n\t\t\t\tConstraints: constraints.MustParse(t.cons),\n\t\t\t})\n\t\tc.Check(err, gc.ErrorMatches, t.err)\n\t}\n}\n\nfunc (*specSuite) TestFilterImagesAcceptsNil(c *gc.C) {\n\tc.Check(filterImages(nil), gc.HasLen, 0)\n}\n\nfunc (*specSuite) TestFilterImagesAcceptsImageWithEBSStorage(c *gc.C) {\n\tinput := []*imagemetadata.ImageMetadata{{Id: \"yay\", Storage: \"ebs\"}}\n\tc.Check(filterImages(input), gc.DeepEquals, input)\n}\n\nfunc (*specSuite) TestFilterImagesRejectsImageWithoutEBSStorage(c *gc.C) {\n\tinput := []*imagemetadata.ImageMetadata{{Id: \"boo\", Storage: \"ftp\"}}\n\tc.Check(filterImages(input), gc.HasLen, 0)\n}\n\nfunc (*specSuite) TestFilterImagesReturnsSelectively(c *gc.C) {\n\tgood := imagemetadata.ImageMetadata{Id: \"good\", Storage: \"ebs\"}\n\tbad := imagemetadata.ImageMetadata{Id: \"bad\", Storage: \"ftp\"}\n\tinput := []*imagemetadata.ImageMetadata{&good, &bad}\n\texpectation := []*imagemetadata.ImageMetadata{&good}\n\tc.Check(filterImages(input), gc.DeepEquals, expectation)\n}\n\nfunc (*specSuite) TestFilterImagesMaintainsOrdering(c *gc.C) {\n\tinput := []*imagemetadata.ImageMetadata{\n\t\t{Id: \"one\", Storage: \"ebs\"},\n\t\t{Id: \"two\", Storage: \"ebs\"},\n\t\t{Id: \"three\", Storage: \"ebs\"},\n\t}\n\tc.Check(filterImages(input), gc.DeepEquals, input)\n}\n<commit_msg>Fix some typos<commit_after>\/\/ Copyright 2011, 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage ec2\n\nimport (\n\tgc \"launchpad.net\/gocheck\"\n\n\t\"launchpad.net\/juju-core\/constraints\"\n\t\"launchpad.net\/juju-core\/environs\/imagemetadata\"\n\t\"launchpad.net\/juju-core\/environs\/instances\"\n\t\"launchpad.net\/juju-core\/environs\/simplestreams\"\n\t\"launchpad.net\/juju-core\/testing\/testbase\"\n)\n\ntype imageSuite struct {\n\ttestbase.LoggingSuite\n}\n\nvar _ = gc.Suite(&imageSuite{})\n\nfunc (s *imageSuite) SetUpSuite(c *gc.C) {\n\ts.LoggingSuite.SetUpSuite(c)\n\tUseTestImageData(TestImagesData)\n}\n\nfunc (s *imageSuite) TearDownSuite(c *gc.C) {\n\tUseTestImageData(nil)\n\ts.LoggingSuite.TearDownTest(c)\n}\n\ntype specSuite struct {\n\ttestbase.LoggingSuite\n}\n\nvar _ = gc.Suite(&specSuite{})\n\nfunc (s *specSuite) SetUpSuite(c *gc.C) {\n\ts.LoggingSuite.SetUpSuite(c)\n\tUseTestImageData(TestImagesData)\n\tUseTestInstanceTypeData(TestInstanceTypeCosts)\n\tUseTestRegionData(TestRegions)\n}\n\nfunc (s *specSuite) TearDownSuite(c *gc.C) {\n\tUseTestInstanceTypeData(nil)\n\tUseTestImageData(nil)\n\tUseTestRegionData(nil)\n\ts.LoggingSuite.TearDownSuite(c)\n}\n\nvar findInstanceSpecTests = []struct {\n\tseries string\n\tarches []string\n\tcons string\n\titype string\n\timage string\n}{\n\t{\n\t\tseries: \"precise\",\n\t\tarches: both,\n\t\titype: \"m1.small\",\n\t\timage: \"ami-00000033\",\n\t}, {\n\t\tseries: \"quantal\",\n\t\tarches: both,\n\t\titype: \"m1.small\",\n\t\timage: \"ami-01000034\",\n\t}, {\n\t\tseries: \"precise\",\n\t\tarches: both,\n\t\tcons: \"cpu-cores=4\",\n\t\titype: \"m1.xlarge\",\n\t\timage: \"ami-00000033\",\n\t}, {\n\t\tseries: \"precise\",\n\t\tarches: both,\n\t\tcons: \"cpu-cores=2 arch=i386\",\n\t\titype: \"c1.medium\",\n\t\timage: \"ami-00000034\",\n\t}, {\n\t\tseries: \"precise\",\n\t\tarches: both,\n\t\tcons: \"mem=10G\",\n\t\titype: \"m1.xlarge\",\n\t\timage: \"ami-00000033\",\n\t}, {\n\t\tseries: \"precise\",\n\t\tarches: both,\n\t\tcons: \"mem=\",\n\t\titype: \"m1.small\",\n\t\timage: \"ami-00000033\",\n\t}, {\n\t\tseries: \"precise\",\n\t\tarches: both,\n\t\tcons: \"cpu-power=\",\n\t\titype: \"t1.micro\",\n\t\timage: \"ami-00000033\",\n\t}, {\n\t\tseries: \"precise\",\n\t\tarches: both,\n\t\tcons: \"cpu-power=800\",\n\t\titype: \"m1.xlarge\",\n\t\timage: \"ami-00000033\",\n\t}, {\n\t\tseries: \"precise\",\n\t\tarches: both,\n\t\tcons: \"cpu-power=500 arch=i386\",\n\t\titype: \"c1.medium\",\n\t\timage: \"ami-00000034\",\n\t}, {\n\t\tseries: \"precise\",\n\t\tarches: []string{\"i386\"},\n\t\tcons: \"cpu-power=400\",\n\t\titype: \"c1.medium\",\n\t\timage: \"ami-00000034\",\n\t}, {\n\t\tseries: \"quantal\",\n\t\tarches: both,\n\t\tcons: \"arch=amd64\",\n\t\titype: \"cc1.4xlarge\",\n\t\timage: \"ami-01000035\",\n\t},\n}\n\nfunc (s *specSuite) TestFindInstanceSpec(c *gc.C) {\n\tfor i, t := range findInstanceSpecTests {\n\t\tc.Logf(\"test %d\", i)\n\t\tstor := ebsStorage\n\t\tspec, err := findInstanceSpec(\n\t\t\t[]simplestreams.DataSource{simplestreams.NewURLDataSource(\"test:\", simplestreams.VerifySSLHostnames)},\n\t\t\t&instances.InstanceConstraint{\n\t\t\t\tRegion: \"test\",\n\t\t\t\tSeries: t.series,\n\t\t\t\tArches: t.arches,\n\t\t\t\tConstraints: constraints.MustParse(t.cons),\n\t\t\t\tStorage: &stor,\n\t\t\t})\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Check(spec.InstanceType.Name, gc.Equals, t.itype)\n\t\tc.Check(spec.Image.Id, gc.Equals, t.image)\n\t}\n}\n\nvar findInstanceSpecErrorTests = []struct {\n\tseries string\n\tarches []string\n\tcons string\n\terr string\n}{\n\t{\n\t\tseries: \"bad\",\n\t\tarches: both,\n\t\terr: `invalid series \"bad\"`,\n\t}, {\n\t\tseries: \"precise\",\n\t\tarches: []string{\"arm\"},\n\t\terr: `no \"precise\" images in test with arches \\[arm\\]`,\n\t}, {\n\t\tseries: \"raring\",\n\t\tarches: both,\n\t\tcons: \"mem=4G\",\n\t\terr: `no \"raring\" images in test matching instance types \\[m1.large m1.xlarge c1.xlarge cc1.4xlarge cc2.8xlarge\\]`,\n\t},\n}\n\nfunc (s *specSuite) TestFindInstanceSpecErrors(c *gc.C) {\n\tfor i, t := range findInstanceSpecErrorTests {\n\t\tc.Logf(\"test %d\", i)\n\t\t_, err := findInstanceSpec(\n\t\t\t[]simplestreams.DataSource{simplestreams.NewURLDataSource(\"test:\", simplestreams.VerifySSLHostnames)}, &instances.InstanceConstraint{\n\t\t\t\tRegion: \"test\",\n\t\t\t\tSeries: t.series,\n\t\t\t\tArches: t.arches,\n\t\t\t\tConstraints: constraints.MustParse(t.cons),\n\t\t\t})\n\t\tc.Check(err, gc.ErrorMatches, t.err)\n\t}\n}\n\nfunc (*specSuite) TestFilterImagesAcceptsNil(c *gc.C) {\n\tc.Check(filterImages(nil), gc.HasLen, 0)\n}\n\nfunc (*specSuite) TestFilterImagesAcceptsImageWithEBSStorage(c *gc.C) {\n\tinput := []*imagemetadata.ImageMetadata{{Id: \"yay\", Storage: \"ebs\"}}\n\tc.Check(filterImages(input), gc.DeepEquals, input)\n}\n\nfunc (*specSuite) TestFilterImagesRejectsImageWithoutEBSStorage(c *gc.C) {\n\tinput := []*imagemetadata.ImageMetadata{{Id: \"boo\", Storage: \"ftp\"}}\n\tc.Check(filterImages(input), gc.HasLen, 0)\n}\n\nfunc (*specSuite) TestFilterImagesReturnsSelectively(c *gc.C) {\n\tgood := imagemetadata.ImageMetadata{Id: \"good\", Storage: \"ebs\"}\n\tbad := imagemetadata.ImageMetadata{Id: \"bad\", Storage: \"ftp\"}\n\tinput := []*imagemetadata.ImageMetadata{&good, &bad}\n\texpectation := []*imagemetadata.ImageMetadata{&good}\n\tc.Check(filterImages(input), gc.DeepEquals, expectation)\n}\n\nfunc (*specSuite) TestFilterImagesMaintainsOrdering(c *gc.C) {\n\tinput := []*imagemetadata.ImageMetadata{\n\t\t{Id: \"one\", Storage: \"ebs\"},\n\t\t{Id: \"two\", Storage: \"ebs\"},\n\t\t{Id: \"three\", Storage: \"ebs\"},\n\t}\n\tc.Check(filterImages(input), gc.DeepEquals, input)\n}\n<|endoftext|>"} {"text":"<commit_before>package network\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"github.com\/NebulousLabs\/Sia\/encoding\"\n)\n\n\/\/ reflection helpers\nvar (\n\ttypeOfError = reflect.TypeOf((*error)(nil)).Elem()\n\ttypeOfConn = reflect.TypeOf((*net.Conn)(nil)).Elem()\n)\n\n\/\/ handlerName truncates a string to 8 bytes. If len(name) < 8, the remaining\n\/\/ bytes are 0. A handlerName is specified at the beginning of each network\n\/\/ call, indicating which function should handle the connection.\nfunc handlerName(name string) []byte {\n\tb := make([]byte, 8)\n\tcopy(b, name)\n\treturn b\n}\n\n\/\/ Call establishes a TCP connection to the Address, calls the provided\n\/\/ function on it, and closes the connection.\nfunc (na Address) Call(name string, fn func(net.Conn) error) error {\n\tconn, err := net.DialTimeout(\"tcp\", string(na), timeout)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\t\/\/ set default deadline\n\t\/\/ note: fn can extend this deadline as needed\n\tconn.SetDeadline(time.Now().Add(timeout))\n\t\/\/ write header\n\tif _, err := conn.Write(handlerName(name)); err != nil {\n\t\treturn err\n\t}\n\treturn fn(conn)\n}\n\n\/\/ RPC performs a Remote Procedure Call by sending the procedure name and\n\/\/ encoded argument, and decoding the response into the supplied object.\n\/\/ 'resp' must be a pointer. If arg is nil, no object is sent. If 'resp' is\n\/\/ nil, no response is read.\nfunc (na *Address) RPC(name string, arg, resp interface{}) error {\n\treturn na.Call(name, func(conn net.Conn) error {\n\t\t\/\/ write arg\n\t\tif arg != nil {\n\t\t\tif _, err := encoding.WriteObject(conn, arg); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\t\/\/ read resp\n\t\tif resp != nil {\n\t\t\tif err := encoding.ReadObject(conn, resp, maxMsgLen); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\t\/\/ read err\n\t\tvar errStr string\n\t\tif err := encoding.ReadObject(conn, &errStr, maxMsgLen); err != nil {\n\t\t\treturn err\n\t\t} else if errStr != \"\" {\n\t\t\treturn errors.New(errStr)\n\t\t}\n\t\treturn nil\n\t})\n}\n\n\/\/ Broadcast calls the RPC on each peer in the address book.\nfunc (tcps *TCPServer) Broadcast(name string, arg, resp interface{}) {\n\tfor _, addr := range tcps.AddressBook() {\n\t\terr := addr.RPC(name, arg, resp)\n\t\t\/\/ remove unresponsive peers\n\t\tif err != nil {\n\t\t\ttcps.RemovePeer(addr)\n\t\t}\n\t}\n}\n\n\/\/ RegisterRPC registers a function as an RPC handler for a given identifier.\n\/\/ The function must be one of four possible types:\n\/\/ func(net.Conn) error\n\/\/ func(Type) (Type, error)\n\/\/ func(Type) error\n\/\/ func() (Type, error)\n\/\/ To call an RPC, use Address.RPC, supplying the same identifier given to\n\/\/ RegisterRPC. Identifiers should always use PascalCase.\nfunc (tcps *TCPServer) RegisterRPC(name string, fn interface{}) error {\n\t\/\/ all handlers are functions with 0 or 1 ins and 1 or 2 outs, the last of\n\t\/\/ which must be an error.\n\tval, typ := reflect.ValueOf(fn), reflect.TypeOf(fn)\n\tif typ.Kind() != reflect.Func || typ.NumIn() > 1 || typ.NumOut() > 2 ||\n\t\ttyp.NumOut() < 1 || typ.Out(typ.NumOut()-1) != typeOfError {\n\t\tpanic(\"registered function has wrong type signature\")\n\t}\n\n\tvar handler func(net.Conn) error\n\tswitch {\n\t\/\/ func(net.Conn) error\n\tcase typ.NumIn() == 1 && typ.NumOut() == 1 && typ.In(0) == typeOfConn:\n\t\thandler = fn.(func(net.Conn) error)\n\t\/\/ func(Type) (Type, error)\n\tcase typ.NumIn() == 1 && typ.NumOut() == 2:\n\t\thandler = registerRPC(val, typ)\n\t\/\/ func(Type) error\n\tcase typ.NumIn() == 1 && typ.NumOut() == 1:\n\t\thandler = registerArg(val, typ)\n\t\/\/ func() (Type, error)\n\tcase typ.NumIn() == 0 && typ.NumOut() == 2:\n\t\thandler = registerResp(val, typ)\n\n\tdefault:\n\t\tpanic(\"registered function has wrong type signature\")\n\t}\n\n\tident := string(handlerName(name))\n\ttcps.Lock()\n\ttcps.handlerMap[ident] = handler\n\ttcps.Unlock()\n\n\treturn nil\n}\n\n\/\/ registerRPC is for handlers that take an argument return a value. The input\n\/\/ is decoded and passed to fn, whose return value is written back to the\n\/\/ caller. fn must have the type signature:\n\/\/ func(Type, *Type) error\nfunc registerRPC(fn reflect.Value, typ reflect.Type) func(net.Conn) error {\n\treturn func(conn net.Conn) error {\n\t\t\/\/ read arg\n\t\targ := reflect.New(typ.In(0))\n\t\tif err := encoding.ReadObject(conn, arg.Interface(), maxMsgLen); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ call fn\n\t\tretvals := fn.Call([]reflect.Value{arg.Elem()})\n\t\tresp, errInter := retvals[0].Interface(), retvals[1].Interface()\n\t\t\/\/ write resp\n\t\tif _, err := encoding.WriteObject(conn, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ write err\n\t\tvar errStr string\n\t\tif errInter != nil {\n\t\t\terrStr = errInter.(error).Error()\n\t\t}\n\t\t_, err := encoding.WriteObject(conn, errStr)\n\t\treturn err\n\t}\n}\n\n\/\/ registerArg is for RPCs that do not return a value.\nfunc registerArg(fn reflect.Value, typ reflect.Type) func(net.Conn) error {\n\treturn func(conn net.Conn) error {\n\t\t\/\/ read arg\n\t\targ := reflect.New(typ.In(0))\n\t\tif err := encoding.ReadObject(conn, arg.Interface(), maxMsgLen); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ call fn on object\n\t\terrInter := fn.Call([]reflect.Value{arg.Elem()})[0].Interface()\n\t\t\/\/ write err\n\t\tvar errStr string\n\t\tif errInter != nil {\n\t\t\terrStr = errInter.(error).Error()\n\t\t}\n\t\t_, err := encoding.WriteObject(conn, errStr)\n\t\treturn err\n\t}\n}\n\n\/\/ registerResp is for RPCs that do not take a value.\nfunc registerResp(fn reflect.Value, typ reflect.Type) func(net.Conn) error {\n\treturn func(conn net.Conn) error {\n\t\t\/\/ call fn\n\t\tretvals := fn.Call(nil)\n\t\tresp, errInter := retvals[0].Interface(), retvals[1].Interface()\n\t\t\/\/ write resp\n\t\tif _, err := encoding.WriteObject(conn, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ write err\n\t\tvar errStr string\n\t\tif errInter != nil {\n\t\t\terrStr = errInter.(error).Error()\n\t\t}\n\t\t_, err := encoding.WriteObject(conn, errStr)\n\t\treturn err\n\t}\n}\n<commit_msg>HOTFIX: prevent bootstrap panic<commit_after>package network\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"github.com\/NebulousLabs\/Sia\/encoding\"\n)\n\n\/\/ reflection helpers\nvar (\n\ttypeOfError = reflect.TypeOf((*error)(nil)).Elem()\n\ttypeOfConn = reflect.TypeOf((*net.Conn)(nil)).Elem()\n)\n\n\/\/ handlerName truncates a string to 8 bytes. If len(name) < 8, the remaining\n\/\/ bytes are 0. A handlerName is specified at the beginning of each network\n\/\/ call, indicating which function should handle the connection.\nfunc handlerName(name string) []byte {\n\tb := make([]byte, 8)\n\tcopy(b, name)\n\treturn b\n}\n\n\/\/ Call establishes a TCP connection to the Address, calls the provided\n\/\/ function on it, and closes the connection.\nfunc (na Address) Call(name string, fn func(net.Conn) error) error {\n\tconn, err := net.DialTimeout(\"tcp\", string(na), timeout)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\t\/\/ set default deadline\n\t\/\/ note: fn can extend this deadline as needed\n\tconn.SetDeadline(time.Now().Add(timeout))\n\t\/\/ write header\n\tif _, err := conn.Write(handlerName(name)); err != nil {\n\t\treturn err\n\t}\n\treturn fn(conn)\n}\n\n\/\/ RPC performs a Remote Procedure Call by sending the procedure name and\n\/\/ encoded argument, and decoding the response into the supplied object.\n\/\/ 'resp' must be a pointer. If arg is nil, no object is sent. If 'resp' is\n\/\/ nil, no response is read.\nfunc (na *Address) RPC(name string, arg, resp interface{}) error {\n\treturn na.Call(name, func(conn net.Conn) error {\n\t\t\/\/ write arg\n\t\tif arg != nil {\n\t\t\tif _, err := encoding.WriteObject(conn, arg); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\t\/\/ read resp\n\t\tif resp != nil {\n\t\t\tif err := encoding.ReadObject(conn, resp, maxMsgLen); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\t\/\/ read err\n\t\tvar errStr string\n\t\tif err := encoding.ReadObject(conn, &errStr, maxMsgLen); err != nil {\n\t\t\treturn err\n\t\t} else if errStr != \"\" {\n\t\t\treturn errors.New(errStr)\n\t\t}\n\t\treturn nil\n\t})\n}\n\n\/\/ Broadcast calls the RPC on each peer in the address book.\nfunc (tcps *TCPServer) Broadcast(name string, arg, resp interface{}) {\n\tfor _, addr := range tcps.AddressBook() {\n\t\t\/\/ TODO: remove unresponsive peers\n\t\t_ = addr.RPC(name, arg, resp)\n\t}\n}\n\n\/\/ RegisterRPC registers a function as an RPC handler for a given identifier.\n\/\/ The function must be one of four possible types:\n\/\/ func(net.Conn) error\n\/\/ func(Type) (Type, error)\n\/\/ func(Type) error\n\/\/ func() (Type, error)\n\/\/ To call an RPC, use Address.RPC, supplying the same identifier given to\n\/\/ RegisterRPC. Identifiers should always use PascalCase.\nfunc (tcps *TCPServer) RegisterRPC(name string, fn interface{}) error {\n\t\/\/ all handlers are functions with 0 or 1 ins and 1 or 2 outs, the last of\n\t\/\/ which must be an error.\n\tval, typ := reflect.ValueOf(fn), reflect.TypeOf(fn)\n\tif typ.Kind() != reflect.Func || typ.NumIn() > 1 || typ.NumOut() > 2 ||\n\t\ttyp.NumOut() < 1 || typ.Out(typ.NumOut()-1) != typeOfError {\n\t\tpanic(\"registered function has wrong type signature\")\n\t}\n\n\tvar handler func(net.Conn) error\n\tswitch {\n\t\/\/ func(net.Conn) error\n\tcase typ.NumIn() == 1 && typ.NumOut() == 1 && typ.In(0) == typeOfConn:\n\t\thandler = fn.(func(net.Conn) error)\n\t\/\/ func(Type) (Type, error)\n\tcase typ.NumIn() == 1 && typ.NumOut() == 2:\n\t\thandler = registerRPC(val, typ)\n\t\/\/ func(Type) error\n\tcase typ.NumIn() == 1 && typ.NumOut() == 1:\n\t\thandler = registerArg(val, typ)\n\t\/\/ func() (Type, error)\n\tcase typ.NumIn() == 0 && typ.NumOut() == 2:\n\t\thandler = registerResp(val, typ)\n\n\tdefault:\n\t\tpanic(\"registered function has wrong type signature\")\n\t}\n\n\tident := string(handlerName(name))\n\ttcps.Lock()\n\ttcps.handlerMap[ident] = handler\n\ttcps.Unlock()\n\n\treturn nil\n}\n\n\/\/ registerRPC is for handlers that take an argument return a value. The input\n\/\/ is decoded and passed to fn, whose return value is written back to the\n\/\/ caller. fn must have the type signature:\n\/\/ func(Type, *Type) error\nfunc registerRPC(fn reflect.Value, typ reflect.Type) func(net.Conn) error {\n\treturn func(conn net.Conn) error {\n\t\t\/\/ read arg\n\t\targ := reflect.New(typ.In(0))\n\t\tif err := encoding.ReadObject(conn, arg.Interface(), maxMsgLen); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ call fn\n\t\tretvals := fn.Call([]reflect.Value{arg.Elem()})\n\t\tresp, errInter := retvals[0].Interface(), retvals[1].Interface()\n\t\t\/\/ write resp\n\t\tif _, err := encoding.WriteObject(conn, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ write err\n\t\tvar errStr string\n\t\tif errInter != nil {\n\t\t\terrStr = errInter.(error).Error()\n\t\t}\n\t\t_, err := encoding.WriteObject(conn, errStr)\n\t\treturn err\n\t}\n}\n\n\/\/ registerArg is for RPCs that do not return a value.\nfunc registerArg(fn reflect.Value, typ reflect.Type) func(net.Conn) error {\n\treturn func(conn net.Conn) error {\n\t\t\/\/ read arg\n\t\targ := reflect.New(typ.In(0))\n\t\tif err := encoding.ReadObject(conn, arg.Interface(), maxMsgLen); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ call fn on object\n\t\terrInter := fn.Call([]reflect.Value{arg.Elem()})[0].Interface()\n\t\t\/\/ write err\n\t\tvar errStr string\n\t\tif errInter != nil {\n\t\t\terrStr = errInter.(error).Error()\n\t\t}\n\t\t_, err := encoding.WriteObject(conn, errStr)\n\t\treturn err\n\t}\n}\n\n\/\/ registerResp is for RPCs that do not take a value.\nfunc registerResp(fn reflect.Value, typ reflect.Type) func(net.Conn) error {\n\treturn func(conn net.Conn) error {\n\t\t\/\/ call fn\n\t\tretvals := fn.Call(nil)\n\t\tresp, errInter := retvals[0].Interface(), retvals[1].Interface()\n\t\t\/\/ write resp\n\t\tif _, err := encoding.WriteObject(conn, resp); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ write err\n\t\tvar errStr string\n\t\tif errInter != nil {\n\t\t\terrStr = errInter.(error).Error()\n\t\t}\n\t\t_, err := encoding.WriteObject(conn, errStr)\n\t\treturn err\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package gitlab implements the OAuth2 protocol for authenticating users through gitlab.\n\/\/ This package can be used as a reference implementation of an OAuth2 provider for Goth.\npackage gitlab\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\n\t\"github.com\/markbates\/goth\"\n\t\"golang.org\/x\/oauth2\"\n\t\"fmt\"\n)\n\n\/\/ These vars define the Authentication, Token, and Profile URLS for Gitlab. If\n\/\/ using Gitlab CE or EE, you should change these values before calling New.\n\/\/\n\/\/ Examples:\n\/\/\tgitlab.AuthURL = \"https:\/\/gitlab.acme.com\/oauth\/authorize\n\/\/\tgitlab.TokenURL = \"https:\/\/gitlab.acme.com\/oauth\/token\n\/\/\tgitlab.ProfileURL = \"https:\/\/gitlab.acme.com\/api\/v3\/user\nvar (\n\tAuthURL = \"https:\/\/gitlab.com\/oauth\/authorize\"\n\tTokenURL = \"https:\/\/gitlab.com\/oauth\/token\"\n\tProfileURL = \"https:\/\/gitlab.com\/api\/v3\/user\"\n)\n\n\/\/ Provider is the implementation of `goth.Provider` for accessing Gitlab.\ntype Provider struct {\n\tClientKey string\n\tSecret string\n\tCallbackURL string\n\tHTTPClient *http.Client\n\tconfig *oauth2.Config\n\tproviderName string\n}\n\n\/\/ New creates a new Gitlab provider and sets up important connection details.\n\/\/ You should always call `gitlab.New` to get a new provider. Never try to\n\/\/ create one manually.\nfunc New(clientKey, secret, callbackURL string, scopes ...string) *Provider {\n\tp := &Provider{\n\t\tClientKey: clientKey,\n\t\tSecret: secret,\n\t\tCallbackURL: callbackURL,\n\t\tproviderName: \"gitlab\",\n\t}\n\tp.config = newConfig(p, scopes)\n\treturn p\n}\n\n\/\/ Name is the name used to retrieve this provider later.\nfunc (p *Provider) Name() string {\n\treturn p.providerName\n}\n\n\/\/ SetName is to update the name of the provider (needed in case of multiple providers of 1 type)\nfunc (p *Provider) SetName(name string) {\n\tp.providerName = name\n}\n\nfunc (p *Provider) Client() *http.Client {\n\treturn goth.HTTPClientWithFallBack(p.HTTPClient)\n}\n\n\/\/ Debug is a no-op for the gitlab package.\nfunc (p *Provider) Debug(debug bool) {}\n\n\/\/ BeginAuth asks Gitlab for an authentication end-point.\nfunc (p *Provider) BeginAuth(state string) (goth.Session, error) {\n\treturn &Session{\n\t\tAuthURL: p.config.AuthCodeURL(state),\n\t}, nil\n}\n\n\/\/ FetchUser will go to Gitlab and access basic information about the user.\nfunc (p *Provider) FetchUser(session goth.Session) (goth.User, error) {\n\tsess := session.(*Session)\n\tuser := goth.User{\n\t\tAccessToken: sess.AccessToken,\n\t\tProvider: p.Name(),\n\t\tRefreshToken: sess.RefreshToken,\n\t\tExpiresAt: sess.ExpiresAt,\n\t}\n\n\tif user.AccessToken == \"\" {\n\t\t\/\/ data is not yet retrieved since accessToken is still empty\n\t\treturn user, fmt.Errorf(\"%s cannot get user information without accessToken\", p.providerName)\n\t}\n\n\tresponse, err := p.Client().Get(ProfileURL + \"?access_token=\" + url.QueryEscape(sess.AccessToken))\n\tif err != nil {\n\t\tif response != nil {\n\t\t\tresponse.Body.Close()\n\t\t}\n\t\treturn user, err\n\t}\n\n\tdefer response.Body.Close()\n\n\tif response.StatusCode != http.StatusOK {\n\t\treturn user, fmt.Errorf(\"%s responded with a %d trying to fetch user information\", p.providerName, response.StatusCode)\n\t}\n\n\tbits, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn user, err\n\t}\n\n\terr = json.NewDecoder(bytes.NewReader(bits)).Decode(&user.RawData)\n\tif err != nil {\n\t\treturn user, err\n\t}\n\n\terr = userFromReader(bytes.NewReader(bits), &user)\n\n\treturn user, err\n}\n\nfunc newConfig(provider *Provider, scopes []string) *oauth2.Config {\n\tc := &oauth2.Config{\n\t\tClientID: provider.ClientKey,\n\t\tClientSecret: provider.Secret,\n\t\tRedirectURL: provider.CallbackURL,\n\t\tEndpoint: oauth2.Endpoint{\n\t\t\tAuthURL: AuthURL,\n\t\t\tTokenURL: TokenURL,\n\t\t},\n\t\tScopes: []string{},\n\t}\n\n\tif len(scopes) > 0 {\n\t\tfor _, scope := range scopes {\n\t\t\tc.Scopes = append(c.Scopes, scope)\n\t\t}\n\t}\n\treturn c\n}\n\nfunc userFromReader(r io.Reader, user *goth.User) error {\n\tu := struct {\n\t\tName string `json:\"name\"`\n\t\tEmail string `json:\"email\"`\n\t\tNickName string `json:\"username\"`\n\t\tID int `json:\"id\"`\n\t\tAvatarURL string `json:\"avatar_url\"`\n\t}{}\n\terr := json.NewDecoder(r).Decode(&u)\n\tif err != nil {\n\t\treturn err\n\t}\n\tuser.Email = u.Email\n\tuser.Name = u.Name\n\tuser.NickName = u.NickName\n\tuser.UserID = strconv.Itoa(u.ID)\n\tuser.AvatarURL = u.AvatarURL\n\treturn nil\n}\n\n\/\/RefreshTokenAvailable refresh token is provided by auth provider or not\nfunc (p *Provider) RefreshTokenAvailable() bool {\n\treturn true\n}\n\n\/\/RefreshToken get new access token based on the refresh token\nfunc (p *Provider) RefreshToken(refreshToken string) (*oauth2.Token, error) {\n\ttoken := &oauth2.Token{RefreshToken: refreshToken}\n\tts := p.config.TokenSource(goth.ContextForClient(p.Client()), token)\n\tnewToken, err := ts.Token()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newToken, err\n}\n<commit_msg>gitlab add new customised url function<commit_after>\/\/ Package gitlab implements the OAuth2 protocol for authenticating users through gitlab.\n\/\/ This package can be used as a reference implementation of an OAuth2 provider for Goth.\npackage gitlab\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\n\t\"github.com\/markbates\/goth\"\n\t\"golang.org\/x\/oauth2\"\n\t\"fmt\"\n)\n\n\/\/ These vars define the Authentication, Token, and Profile URLS for Gitlab. If\n\/\/ using Gitlab CE or EE, you should change these values before calling New.\n\/\/\n\/\/ Examples:\n\/\/\tgitlab.AuthURL = \"https:\/\/gitlab.acme.com\/oauth\/authorize\n\/\/\tgitlab.TokenURL = \"https:\/\/gitlab.acme.com\/oauth\/token\n\/\/\tgitlab.ProfileURL = \"https:\/\/gitlab.acme.com\/api\/v3\/user\nvar (\n\tAuthURL = \"https:\/\/gitlab.com\/oauth\/authorize\"\n\tTokenURL = \"https:\/\/gitlab.com\/oauth\/token\"\n\tProfileURL = \"https:\/\/gitlab.com\/api\/v3\/user\"\n)\n\n\/\/ Provider is the implementation of `goth.Provider` for accessing Gitlab.\ntype Provider struct {\n\tClientKey string\n\tSecret string\n\tCallbackURL string\n\tHTTPClient *http.Client\n\tconfig *oauth2.Config\n\tproviderName string\n\tauthURL string\n\ttokenURL string\n\tprofileURL string\n}\n\n\/\/ New creates a new Gitlab provider and sets up important connection details.\n\/\/ You should always call `gitlab.New` to get a new provider. Never try to\n\/\/ create one manually.\nfunc New(clientKey, secret, callbackURL string, scopes ...string) *Provider {\n\treturn NewCustomisedURL(clientKey, secret, callbackURL, AuthURL, TokenURL, ProfileURL, scopes...)\n}\n\n\/\/ NewCustomisedURL is similar to New(...) but can be used to set custom URLs to connect to\nfunc NewCustomisedURL(clientKey, secret, callbackURL, authURL, tokenURL, profileURL string, scopes ...string) *Provider {\n\tp := &Provider{\n\t\tClientKey: clientKey,\n\t\tSecret: secret,\n\t\tCallbackURL: callbackURL,\n\t\tproviderName: \"gitlab\",\n\t\tauthURL: authURL,\n\t\ttokenURL: tokenURL,\n\t\tprofileURL: profileURL,\n\t}\n\tp.config = newConfig(p, scopes)\n\treturn p\n}\n\n\/\/ Name is the name used to retrieve this provider later.\nfunc (p *Provider) Name() string {\n\treturn p.providerName\n}\n\n\/\/ SetName is to update the name of the provider (needed in case of multiple providers of 1 type)\nfunc (p *Provider) SetName(name string) {\n\tp.providerName = name\n}\n\nfunc (p *Provider) Client() *http.Client {\n\treturn goth.HTTPClientWithFallBack(p.HTTPClient)\n}\n\n\/\/ Debug is a no-op for the gitlab package.\nfunc (p *Provider) Debug(debug bool) {}\n\n\/\/ BeginAuth asks Gitlab for an authentication end-point.\nfunc (p *Provider) BeginAuth(state string) (goth.Session, error) {\n\treturn &Session{\n\t\tAuthURL: p.config.AuthCodeURL(state),\n\t}, nil\n}\n\n\/\/ FetchUser will go to Gitlab and access basic information about the user.\nfunc (p *Provider) FetchUser(session goth.Session) (goth.User, error) {\n\tsess := session.(*Session)\n\tuser := goth.User{\n\t\tAccessToken: sess.AccessToken,\n\t\tProvider: p.Name(),\n\t\tRefreshToken: sess.RefreshToken,\n\t\tExpiresAt: sess.ExpiresAt,\n\t}\n\n\tif user.AccessToken == \"\" {\n\t\t\/\/ data is not yet retrieved since accessToken is still empty\n\t\treturn user, fmt.Errorf(\"%s cannot get user information without accessToken\", p.providerName)\n\t}\n\n\tresponse, err := p.Client().Get(p.profileURL + \"?access_token=\" + url.QueryEscape(sess.AccessToken))\n\tif err != nil {\n\t\tif response != nil {\n\t\t\tresponse.Body.Close()\n\t\t}\n\t\treturn user, err\n\t}\n\n\tdefer response.Body.Close()\n\n\tif response.StatusCode != http.StatusOK {\n\t\treturn user, fmt.Errorf(\"%s responded with a %d trying to fetch user information\", p.providerName, response.StatusCode)\n\t}\n\n\tbits, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn user, err\n\t}\n\n\terr = json.NewDecoder(bytes.NewReader(bits)).Decode(&user.RawData)\n\tif err != nil {\n\t\treturn user, err\n\t}\n\n\terr = userFromReader(bytes.NewReader(bits), &user)\n\n\treturn user, err\n}\n\nfunc newConfig(provider *Provider, scopes []string) *oauth2.Config {\n\tc := &oauth2.Config{\n\t\tClientID: provider.ClientKey,\n\t\tClientSecret: provider.Secret,\n\t\tRedirectURL: provider.CallbackURL,\n\t\tEndpoint: oauth2.Endpoint{\n\t\t\tAuthURL: provider.authURL,\n\t\t\tTokenURL: provider.tokenURL,\n\t\t},\n\t\tScopes: []string{},\n\t}\n\n\tif len(scopes) > 0 {\n\t\tfor _, scope := range scopes {\n\t\t\tc.Scopes = append(c.Scopes, scope)\n\t\t}\n\t}\n\treturn c\n}\n\nfunc userFromReader(r io.Reader, user *goth.User) error {\n\tu := struct {\n\t\tName string `json:\"name\"`\n\t\tEmail string `json:\"email\"`\n\t\tNickName string `json:\"username\"`\n\t\tID int `json:\"id\"`\n\t\tAvatarURL string `json:\"avatar_url\"`\n\t}{}\n\terr := json.NewDecoder(r).Decode(&u)\n\tif err != nil {\n\t\treturn err\n\t}\n\tuser.Email = u.Email\n\tuser.Name = u.Name\n\tuser.NickName = u.NickName\n\tuser.UserID = strconv.Itoa(u.ID)\n\tuser.AvatarURL = u.AvatarURL\n\treturn nil\n}\n\n\/\/RefreshTokenAvailable refresh token is provided by auth provider or not\nfunc (p *Provider) RefreshTokenAvailable() bool {\n\treturn true\n}\n\n\/\/RefreshToken get new access token based on the refresh token\nfunc (p *Provider) RefreshToken(refreshToken string) (*oauth2.Token, error) {\n\ttoken := &oauth2.Token{RefreshToken: refreshToken}\n\tts := p.config.TokenSource(goth.ContextForClient(p.Client()), token)\n\tnewToken, err := ts.Token()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn newToken, err\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/crowdmob\/goamz\/aws\"\n\t\"github.com\/crowdmob\/goamz\/s3\"\n\t\"github.com\/goji\/httpauth\"\n\t\"github.com\/zenazn\/goji\"\n\t\"github.com\/zenazn\/goji\/web\"\n)\n\nvar (\n\tbucketName string\n\tbaseURL string\n)\n\nfunc init() {\n\tflag.StringVar(&bucketName, \"b\", \"\", \"Bucket Name\")\n\tflag.StringVar(&baseURL, \"u\", \"\", \"Base URL\")\n}\n\nfunc rootHandler(c web.C, w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"Nothing to see here!\")\n}\n\nfunc tweetbot(c web.C, w http.ResponseWriter, r *http.Request) {\n\tmultiReader, err := r.MultipartReader()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t\/\/ Open Bucket\n\tauth, err := aws.EnvAuth()\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\ts := s3.New(auth, aws.EUWest)\n\tbucket := s.Bucket(bucketName)\n\n\t\/\/ We only want the first part, the media\n\tpart, err := multiReader.NextPart()\n\tif err == io.EOF {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\toriginalFilename := part.FileName()\n\ttimeStamp := time.Now().Unix()\n\trandomValue := rand.Intn(999999)\n\tfilename := fmt.Sprintf(\"%x-%x-%s\", timeStamp, randomValue, originalFilename)\n\tpath := fmt.Sprintf(\"tweetbot\/%s\", filename)\n\n\tcontentType := part.Header.Get(\"Content-Type\")\n\tcontentLength, err := strconv.ParseInt(part.Header.Get(\"Content-Length\"), 10, 64)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\terr = bucket.PutReader(path, part, contentLength, contentType, s3.PublicRead, s3.Options{CacheControl: \"public, max-age=315360000\"})\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\tfmt.Printf(\"\\nFile %s (%s) uploaded successfully.\\n\", originalFilename, path)\n\n\turl := fmt.Sprintf(\"%s\/%s\", baseURL, path)\n\n\tresponseMap := map[string]string{\"url\": url}\n\tjsonResponse, _ := json.Marshal(responseMap)\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tfmt.Fprintf(w, string(jsonResponse))\n}\n\nfunc main() {\n\tgoji.Use(httpauth.SimpleBasicAuth(\"x\", \"password\"))\n\n\tgoji.Get(\"\/\", rootHandler)\n\n\tre := regexp.MustCompile(\"\/tweetbot\")\n\tgoji.Post(re, tweetbot)\n\n\tgoji.Serve()\n}\n<commit_msg>Better filenames<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\/big\"\n\t\"mime\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/crowdmob\/goamz\/aws\"\n\t\"github.com\/crowdmob\/goamz\/s3\"\n\t\"github.com\/goji\/httpauth\"\n\t\"github.com\/tv42\/base58\"\n\t\"github.com\/zenazn\/goji\"\n\t\"github.com\/zenazn\/goji\/web\"\n)\n\nvar (\n\tbucketName string\n\tbaseURL string\n)\n\nfunc init() {\n\tflag.StringVar(&bucketName, \"b\", \"\", \"Bucket Name\")\n\tflag.StringVar(&baseURL, \"u\", \"\", \"Base URL\")\n}\n\nfunc rootHandler(c web.C, w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"Nothing to see here!\")\n}\n\nfunc tweetbot(c web.C, w http.ResponseWriter, r *http.Request) {\n\tmultiReader, err := r.MultipartReader()\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t\/\/ Open Bucket\n\tauth, err := aws.EnvAuth()\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\ts := s3.New(auth, aws.EUWest)\n\tbucket := s.Bucket(bucketName)\n\n\t\/\/ We only want the first part, the media\n\tpart, err := multiReader.NextPart()\n\tif err == io.EOF {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\toriginalFilename := part.FileName()\n\tfileExt := filepath.Ext(originalFilename)\n\tunixTime := time.Now().UTC().Unix()\n\tb58buf := base58.EncodeBig(nil, big.NewInt(unixTime))\n\n\tfilename := fmt.Sprintf(\"%s%s\", b58buf, fileExt)\n\tpath := \"tweetbot\/\" + filename\n\n\tcontentType := part.Header.Get(\"Content-Type\")\n\tif contentType == \"\" {\n\t\tcontentType = mime.TypeByExtension(fileExt)\n\n\t\tif contentType == \"\" {\n\t\t\tcontentType = \"application\/octet-stream\"\n\t\t}\n\t}\n\n\tcontentLength, err := strconv.ParseInt(part.Header.Get(\"Content-Length\"), 10, 64)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\terr = bucket.PutReader(path, part, contentLength, contentType, s3.PublicRead, s3.Options{CacheControl: \"public, max-age=315360000\"})\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\t\/\/ fmt.Printf(\"\\nFile %s (%s) uploaded successfully.\\n\", originalFilename, path)\n\n\turl := fmt.Sprintf(\"%s\/%s\", baseURL, path)\n\n\tresponseMap := map[string]string{\"url\": url}\n\tjsonResponse, _ := json.Marshal(responseMap)\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tfmt.Fprintf(w, string(jsonResponse))\n}\n\nfunc main() {\n\tgoji.Use(httpauth.SimpleBasicAuth(\"x\", \"password\"))\n\n\tgoji.Get(\"\/\", rootHandler)\n\n\tre := regexp.MustCompile(\"\/tweetbot\")\n\tgoji.Post(re, tweetbot)\n\n\tgoji.Serve()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package goutils provides common utilities\n\/\/\n\/\/ See also:\n\/\/ https:\/\/godoc.org\/github.com\/palsivertsen\/goutils\/converters\n\/\/ https:\/\/godoc.org\/github.com\/palsivertsen\/goutils\/validators\npackage goutils\n<commit_msg>Removed links to sub packages as they are listed implicitly<commit_after>\/\/ Package goutils provides common utilities\npackage goutils\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2014 Datacratic. All rights reserved.\n\npackage nfork\n\nimport (\n\t\"encoding\/json\"\n\t\"math\/rand\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n)\n\n\n\/\/ Stats contains the stats of an outbound at a given point in time.\ntype Stats struct {\n\n\t\/\/ Requests counts the number of requests made.\n\tRequests uint64\n\n\t\/\/ Errors counts the number of errors encountered.\n\tErrors uint64\n\n\t\/\/ Timeouts counts the number of timeouts encountered.\n\tTimeouts uint64\n\n\t\/\/ Latency is the latency distribution of all requests.\n\tLatency Distribution\n\n\t\/\/ Responses counts the number of responses received for an HTTP status\n\t\/\/ code.\n\tResponses map[int]uint64\n}\n\n\/\/ MarshalJSON defines a custom JSON format for encoding\/json.\nfunc (stats *Stats) MarshalJSON() ([]byte, error) {\n\tvar statsJSON struct {\n\t\tRequests uint64 `json:\"requests\"`\n\t\tErrors uint64 `json:\"errors\"`\n\t\tTimeouts uint64 `json:\"timeouts\"`\n\t\tLatency map[string]string `json:\"latency\"`\n\t\tResponses map[string]uint64 `json:\"responses\"`\n\t}\n\n\tstatsJSON.Requests = stats.Requests\n\tstatsJSON.Errors = stats.Errors\n\tstatsJSON.Timeouts = stats.Timeouts\n\tstatsJSON.Latency = make(map[string]string)\n\tstatsJSON.Responses = make(map[string]uint64)\n\n\tp50, p90, p99, max := stats.Latency.Percentiles()\n\tstatsJSON.Latency[\"p50\"] = time.Duration(p50).String()\n\tstatsJSON.Latency[\"p90\"] = time.Duration(p90).String()\n\tstatsJSON.Latency[\"p99\"] = time.Duration(p99).String()\n\tstatsJSON.Latency[\"pmx\"] = time.Duration(max).String()\n\n\tfor code, count := range stats.Responses {\n\t\tstatsJSON.Responses[strconv.Itoa(code)] = count\n\t}\n\n\treturn json.Marshal(&statsJSON)\n}\n\n\/\/ Event contains the outcome of an HTTP request.\ntype Event struct {\n\n\t\/\/ Error indicates that an error occured.\n\tError bool\n\n\t\/\/ Timeout indicates that the request timed out.\n\tTimeout bool\n\n\t\/\/ Response is the HTTP response code received.\n\tResponse int\n\n\t\/\/ Latency mesures the latency of the request.\n\tLatency time.Duration\n}\n\n\/\/ DefaultSampleRate is used if Rate is not set set in StatsRecorder.\nconst DefaultSampleRate = 10 * time.Second\n\n\/\/ StatsRecorder records stats for a given outbound and updates them at a\n\/\/ given rate.\ntype StatsRecorder struct {\n\n\t\/\/ Rate at which stats are updated.\n\tRate time.Duration\n\n\t\/\/ Rand is the RNG used for stats sampling.\n\tRand *rand.Rand\n\n\n\tinitialize sync.Once\n\n\tmutex sync.Mutex\n\tcurrent, prev *Stats\n\n\tshutdownC chan int\n}\n\n\/\/ Init initializes the object.\nfunc (recorder *StatsRecorder) Init() {\n\trecorder.initialize.Do(recorder.init)\n}\n\nfunc (recorder *StatsRecorder) init() {\n\tif recorder.Rate == 0 {\n\t\trecorder.Rate = DefaultSampleRate\n\t}\n\n\tif recorder.Rand == nil {\n\t\trecorder.Rand = rand.New(rand.NewSource(0))\n\t}\n\n\trecorder.prev = new(Stats)\n\trecorder.current = new(Stats)\n\n\trecorder.shutdownC = make(chan int)\n\tgo recorder.run()\n}\n\n\/\/ Close terminates the stats recorder.\nfunc (recorder *StatsRecorder) Close() {\n\trecorder.Init()\n\trecorder.shutdownC <- 1\n}\n\n\/\/ Record records the given outcome.\nfunc (recorder *StatsRecorder) Record(event Event) {\n\trecorder.Init()\n\trecorder.mutex.Lock()\n\n\tstats := recorder.current\n\n\tstats.Requests++\n\tstats.Latency.Sample(uint64(event.Latency))\n\n\tif event.Error {\n\t\tstats.Errors++\n\n\t} else if event.Timeout {\n\t\tstats.Timeouts++\n\n\t} else {\n\t\tif stats.Responses == nil {\n\t\t\tstats.Responses = make(map[int]uint64)\n\t\t}\n\t\tstats.Responses[event.Response]++\n\t}\n\n\trecorder.mutex.Unlock()\n}\n\n\/\/ Read returns the last updated stats.\nfunc (recorder *StatsRecorder) Read() (stats *Stats) {\n\trecorder.Init()\n\trecorder.mutex.Lock()\n\n\tstats = recorder.prev\n\n\trecorder.mutex.Unlock()\n\treturn\n}\n\nfunc (recorder *StatsRecorder) run() {\n\ttick := time.NewTicker(recorder.Rate)\n\tfor {\n\t\tselect {\n\t\tcase <-tick.C:\n\t\t\trecorder.mutex.Lock()\n\n\t\t\trecorder.prev = recorder.current\n\t\t\trecorder.current = new(Stats)\n\n\t\t\trecorder.mutex.Unlock()\n\n\t\tcase <-recorder.shutdownC:\n\t\t\ttick.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}\n<commit_msg>undo unnecessary white spaces<commit_after>\/\/ Copyright (c) 2014 Datacratic. All rights reserved.\n\npackage nfork\n\nimport (\n\t\"encoding\/json\"\n\t\"math\/rand\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ Stats contains the stats of an outbound at a given point in time.\ntype Stats struct {\n\n\t\/\/ Requests counts the number of requests made.\n\tRequests uint64\n\n\t\/\/ Errors counts the number of errors encountered.\n\tErrors uint64\n\n\t\/\/ Timeouts counts the number of timeouts encountered.\n\tTimeouts uint64\n\n\t\/\/ Latency is the latency distribution of all requests.\n\tLatency Distribution\n\n\t\/\/ Responses counts the number of responses received for an HTTP status\n\t\/\/ code.\n\tResponses map[int]uint64\n}\n\n\/\/ MarshalJSON defines a custom JSON format for encoding\/json.\nfunc (stats *Stats) MarshalJSON() ([]byte, error) {\n\tvar statsJSON struct {\n\t\tRequests uint64 `json:\"requests\"`\n\t\tErrors uint64 `json:\"errors\"`\n\t\tTimeouts uint64 `json:\"timeouts\"`\n\t\tLatency map[string]string `json:\"latency\"`\n\t\tResponses map[string]uint64 `json:\"responses\"`\n\t}\n\n\tstatsJSON.Requests = stats.Requests\n\tstatsJSON.Errors = stats.Errors\n\tstatsJSON.Timeouts = stats.Timeouts\n\tstatsJSON.Latency = make(map[string]string)\n\tstatsJSON.Responses = make(map[string]uint64)\n\n\tp50, p90, p99, max := stats.Latency.Percentiles()\n\tstatsJSON.Latency[\"p50\"] = time.Duration(p50).String()\n\tstatsJSON.Latency[\"p90\"] = time.Duration(p90).String()\n\tstatsJSON.Latency[\"p99\"] = time.Duration(p99).String()\n\tstatsJSON.Latency[\"pmx\"] = time.Duration(max).String()\n\n\tfor code, count := range stats.Responses {\n\t\tstatsJSON.Responses[strconv.Itoa(code)] = count\n\t}\n\n\treturn json.Marshal(&statsJSON)\n}\n\n\/\/ Event contains the outcome of an HTTP request.\ntype Event struct {\n\n\t\/\/ Error indicates that an error occured.\n\tError bool\n\n\t\/\/ Timeout indicates that the request timed out.\n\tTimeout bool\n\n\t\/\/ Response is the HTTP response code received.\n\tResponse int\n\n\t\/\/ Latency mesures the latency of the request.\n\tLatency time.Duration\n}\n\n\/\/ DefaultSampleRate is used if Rate is not set set in StatsRecorder.\nconst DefaultSampleRate = 10 * time.Second\n\n\/\/ StatsRecorder records stats for a given outbound and updates them at a\n\/\/ given rate.\ntype StatsRecorder struct {\n\n\t\/\/ Rate at which stats are updated.\n\tRate time.Duration\n\n\t\/\/ Rand is the RNG used for stats sampling.\n\tRand *rand.Rand\n\n\tinitialize sync.Once\n\n\tmutex sync.Mutex\n\tcurrent, prev *Stats\n\n\tshutdownC chan int\n}\n\n\/\/ Init initializes the object.\nfunc (recorder *StatsRecorder) Init() {\n\trecorder.initialize.Do(recorder.init)\n}\n\nfunc (recorder *StatsRecorder) init() {\n\tif recorder.Rate == 0 {\n\t\trecorder.Rate = DefaultSampleRate\n\t}\n\n\tif recorder.Rand == nil {\n\t\trecorder.Rand = rand.New(rand.NewSource(0))\n\t}\n\n\trecorder.prev = new(Stats)\n\trecorder.current = new(Stats)\n\n\trecorder.shutdownC = make(chan int)\n\tgo recorder.run()\n}\n\n\/\/ Close terminates the stats recorder.\nfunc (recorder *StatsRecorder) Close() {\n\trecorder.Init()\n\trecorder.shutdownC <- 1\n}\n\n\/\/ Record records the given outcome.\nfunc (recorder *StatsRecorder) Record(event Event) {\n\trecorder.Init()\n\trecorder.mutex.Lock()\n\n\tstats := recorder.current\n\n\tstats.Requests++\n\tstats.Latency.Sample(uint64(event.Latency))\n\n\tif event.Error {\n\t\tstats.Errors++\n\n\t} else if event.Timeout {\n\t\tstats.Timeouts++\n\n\t} else {\n\t\tif stats.Responses == nil {\n\t\t\tstats.Responses = make(map[int]uint64)\n\t\t}\n\t\tstats.Responses[event.Response]++\n\t}\n\n\trecorder.mutex.Unlock()\n}\n\n\/\/ Read returns the last updated stats.\nfunc (recorder *StatsRecorder) Read() (stats *Stats) {\n\trecorder.Init()\n\trecorder.mutex.Lock()\n\n\tstats = recorder.prev\n\n\trecorder.mutex.Unlock()\n\treturn\n}\n\nfunc (recorder *StatsRecorder) run() {\n\ttick := time.NewTicker(recorder.Rate)\n\tfor {\n\t\tselect {\n\t\tcase <-tick.C:\n\t\t\trecorder.mutex.Lock()\n\n\t\t\trecorder.prev = recorder.current\n\t\t\trecorder.current = new(Stats)\n\n\t\t\trecorder.mutex.Unlock()\n\n\t\tcase <-recorder.shutdownC:\n\t\t\ttick.Stop()\n\t\t\treturn\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package nyb\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/hako\/durafmt\"\n\t\"github.com\/ugjka\/dumbirc\"\n\t\"gopkg.in\/ugjka\/go-tz.v2\/tz\"\n)\n\nvar nickChangeInterval = time.Second * 5\n\nfunc (bot *Settings) addCallbacks() {\n\tirc := bot.IrcConn\n\t\/\/On any message send a signal to ping timer to be ready\n\n\tirc.AddCallback(dumbirc.WELCOME, func(msg *dumbirc.Message) {\n\t\tif irc.Password != \"\" {\n\t\t\tconfirmErr := fmt.Errorf(\"did not get identification confirmation\")\n\t\t\terr := irc.WaitFor(func(m *dumbirc.Message) bool {\n\t\t\t\treturn m.Command == dumbirc.NOTICE && strings.Contains(m.Content, \"You are now identified for\")\n\t\t\t},\n\t\t\t\tfunc() {},\n\t\t\t\ttime.Second*30,\n\t\t\t\tconfirmErr,\n\t\t\t)\n\t\t\tif err == confirmErr {\n\t\t\t\tlog.Println(err)\n\t\t\t\tlog.Println(\"trying to start the bot anyway\")\n\t\t\t} else if err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tirc.Join(bot.IrcChans)\n\t\t\/\/Prevent early start\n\t\tbot.Do(func() {\n\t\t\tclose(bot.start)\n\t\t})\n\t})\n}\n\nfunc (bot *Settings) addTriggers() {\n\tirc := bot.IrcConn\n\t\/\/Trigger for !help\n\tstHelp := \"%s: Query location: '%s <location>', Time in location: '%s !time <location>', Next zone: '%s !next', Last zone: '%s !last', Remaining: '%s !remaining', Source code: https:\/\/github.com\/ugjka\/newyearsbot\"\n\tirc.AddTrigger(dumbirc.Trigger{\n\t\tCondition: func(msg *dumbirc.Message) bool {\n\t\t\treturn msg.Command == dumbirc.PRIVMSG &&\n\t\t\t\tmsg.Content == fmt.Sprintf(\"%s !help\", bot.IrcTrigger)\n\t\t},\n\t\tResponse: func(msg *dumbirc.Message) {\n\t\t\tlog.Println(\"Querying !help...\")\n\t\t\tirc.Reply(msg, fmt.Sprintf(stHelp, msg.Name, bot.IrcTrigger, bot.IrcTrigger, bot.IrcTrigger, bot.IrcTrigger, bot.IrcTrigger))\n\t\t},\n\t})\n\t\/\/Trigger for !next\n\tirc.AddTrigger(dumbirc.Trigger{\n\t\tCondition: func(msg *dumbirc.Message) bool {\n\t\t\treturn msg.Command == dumbirc.PRIVMSG &&\n\t\t\t\tmsg.Content == fmt.Sprintf(\"%s !next\", bot.IrcTrigger)\n\t\t},\n\t\tResponse: func(msg *dumbirc.Message) {\n\t\t\tlog.Println(\"Querying !next...\")\n\t\t\tdur := time.Minute * time.Duration(bot.next.Offset*60)\n\t\t\tif timeNow().UTC().Add(dur).After(target) {\n\t\t\t\tirc.Reply(msg, fmt.Sprintf(\"No more next, %d is here AoE\", target.Year()))\n\t\t\t\treturn\n\t\t\t}\n\t\t\thumandur := durafmt.Parse(target.Sub(timeNow().UTC().Add(dur)))\n\t\t\tirc.Reply(msg, fmt.Sprintf(\"Next New Year in %s in %s\",\n\t\t\t\tremoveMilliseconds(humandur), bot.next))\n\t\t},\n\t})\n\t\/\/Trigger for !last\n\tirc.AddTrigger(dumbirc.Trigger{\n\t\tCondition: func(msg *dumbirc.Message) bool {\n\t\t\treturn msg.Command == dumbirc.PRIVMSG &&\n\t\t\t\tmsg.Content == fmt.Sprintf(\"%s !last\", bot.IrcTrigger)\n\t\t},\n\t\tResponse: func(msg *dumbirc.Message) {\n\t\t\tlog.Println(\"Querying !last...\")\n\t\t\tdur := time.Minute * time.Duration(bot.last.Offset*60)\n\t\t\thumandur := durafmt.Parse(timeNow().UTC().Add(dur).Sub(target))\n\t\t\tif bot.last.Offset == -12 {\n\t\t\t\thumandur = durafmt.Parse(timeNow().UTC().Add(dur).Sub(target.AddDate(-1, 0, 0)))\n\t\t\t}\n\t\t\tirc.Reply(msg, fmt.Sprintf(\"Last New Year %s ago in %s\",\n\t\t\t\tremoveMilliseconds(humandur), bot.last))\n\t\t},\n\t})\n\t\/\/Trigger for !remaining\n\tirc.AddTrigger(dumbirc.Trigger{\n\t\tCondition: func(msg *dumbirc.Message) bool {\n\t\t\treturn msg.Command == dumbirc.PRIVMSG &&\n\t\t\t\tmsg.Content == fmt.Sprintf(\"%s !remaining\", bot.IrcTrigger)\n\t\t},\n\t\tResponse: func(msg *dumbirc.Message) {\n\t\t\tlog.Println(\"Querying !remaining...\")\n\t\t\tss := \"s\"\n\t\t\tif bot.remaining == 1 {\n\t\t\t\tss = \"\"\n\t\t\t}\n\t\t\tirc.Reply(msg, fmt.Sprintf(\"%s: %d timezone%s remaining\", msg.Name, bot.remaining, ss))\n\t\t},\n\t})\n\t\/\/Trigger for time in location\n\tirc.AddTrigger(dumbirc.Trigger{\n\t\tCondition: func(msg *dumbirc.Message) bool {\n\t\t\treturn msg.Command == dumbirc.PRIVMSG &&\n\t\t\t\tstrings.HasPrefix(msg.Content, fmt.Sprintf(\"%s !time \", bot.IrcTrigger))\n\t\t},\n\t\tResponse: func(msg *dumbirc.Message) {\n\t\t\tlog.Println(\"Querying !time...\")\n\t\t\tres, err := bot.getTime(msg.Content[len(bot.IrcTrigger)+7:])\n\t\t\tif err == errNoZone || err == errNoPlace {\n\t\t\t\tlog.Println(\"Query error:\", err)\n\t\t\t\tirc.Reply(msg, fmt.Sprintf(\"%s: %s\", msg.Name, err))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Query error:\", err)\n\t\t\t\tirc.Reply(msg, fmt.Sprintf(\"%s: Some error occurred!\", msg.Name))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tirc.Reply(msg, res)\n\t\t},\n\t})\n\t\/\/UTC\n\tirc.AddTrigger(dumbirc.Trigger{\n\t\tCondition: func(msg *dumbirc.Message) bool {\n\t\t\treturn msg.Command == dumbirc.PRIVMSG &&\n\t\t\t\tmsg.Content == fmt.Sprintf(\"%s !time\", bot.IrcTrigger)\n\t\t},\n\t\tResponse: func(msg *dumbirc.Message) {\n\t\t\tlog.Println(\"Querying !time...\")\n\t\t\tres := fmt.Sprintf(\"Time is %s\", time.Now().UTC().Format(\"Mon Jan 2 15:04:05 -0700 MST 2006\"))\n\t\t\tirc.Reply(msg, res)\n\t\t},\n\t})\n\t\/\/Trigger for location queries\n\tirc.AddTrigger(dumbirc.Trigger{\n\t\tCondition: func(msg *dumbirc.Message) bool {\n\t\t\treturn msg.Command == dumbirc.PRIVMSG &&\n\t\t\t\t!strings.Contains(msg.Content, \"!time\") &&\n\t\t\t\t!strings.Contains(msg.Content, \"!next\") &&\n\t\t\t\t!strings.Contains(msg.Content, \"!last\") &&\n\t\t\t\t!strings.Contains(msg.Content, \"!help\") &&\n\t\t\t\t!strings.Contains(msg.Content, \"!remaining\") &&\n\t\t\t\tstrings.HasPrefix(msg.Content, fmt.Sprintf(\"%s \", bot.IrcTrigger))\n\t\t},\n\t\tResponse: func(msg *dumbirc.Message) {\n\t\t\ttz, err := bot.getNewYear(msg.Content[len(bot.IrcTrigger)+1:])\n\t\t\tif err == errNoZone || err == errNoPlace {\n\t\t\t\tlog.Println(\"Query error:\", err)\n\t\t\t\tirc.Reply(msg, fmt.Sprintf(\"%s: %s\", msg.Name, err))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Query error:\", err)\n\t\t\t\tirc.Reply(msg, fmt.Sprintf(\"%s: Some error occurred!\", msg.Name))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tirc.Reply(msg, fmt.Sprintf(\"%s: %s\", msg.Name, tz))\n\t\t},\n\t})\n}\n\nvar (\n\terrNoZone = errors.New(\"couldn't get the timezone for that location\")\n\terrNoPlace = errors.New(\"Couldn't find that place\")\n)\n\nfunc (bot *Settings) getNominatimReqURL(location *string) string {\n\tmaps := url.Values{}\n\tmaps.Add(\"q\", *location)\n\tmaps.Add(\"format\", \"json\")\n\tmaps.Add(\"accept-language\", \"en\")\n\tmaps.Add(\"limit\", \"1\")\n\tmaps.Add(\"email\", bot.Email)\n\treturn bot.Nominatim + NominatimEndpoint + maps.Encode()\n}\n\nvar stNewYearWillHappen = \"New Year in %s will happen in %s\"\nvar stNewYearHappenned = \"New Year in %s happened %s ago\"\n\nfunc (bot *Settings) getTime(location string) (string, error) {\n\tlog.Println(\"Querying location:\", location)\n\tdata, err := NominatimGetter(bot.getNominatimReqURL(&location))\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn \"\", err\n\t}\n\tvar res NominatimResults\n\tif err = json.Unmarshal(data, &res); err != nil {\n\t\tlog.Println(err)\n\t\treturn \"\", err\n\t}\n\tif len(res) == 0 {\n\t\treturn \"\", errNoPlace\n\t}\n\tp := tz.Point{\n\t\tLat: res[0].Lat,\n\t\tLon: res[0].Lon,\n\t}\n\ttzid, err := tz.GetZone(p)\n\tif err != nil {\n\t\treturn \"\", errNoZone\n\t}\n\tzone, err := time.LoadLocation(tzid[0])\n\tif err != nil {\n\t\treturn \"\", errNoZone\n\t}\n\taddress := res[0].DisplayName\n\treturn fmt.Sprintf(\"Time in %s is %s\", address, time.Now().In(zone).Format(\"Mon Jan 2 15:04:05 -0700 MST 2006\")), nil\n}\n\nfunc (bot *Settings) getNewYear(location string) (string, error) {\n\tlog.Println(\"Querying location:\", location)\n\tdata, err := NominatimGetter(bot.getNominatimReqURL(&location))\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn \"\", err\n\t}\n\tvar res NominatimResults\n\tif err = json.Unmarshal(data, &res); err != nil {\n\t\tlog.Println(err)\n\t\treturn \"\", err\n\t}\n\tif len(res) == 0 {\n\t\treturn \"\", errNoPlace\n\t}\n\tp := tz.Point{\n\t\tLat: res[0].Lat,\n\t\tLon: res[0].Lon,\n\t}\n\ttzid, err := tz.GetZone(p)\n\tif err != nil {\n\t\treturn \"\", errNoZone\n\t}\n\tzone, err := time.LoadLocation(tzid[0])\n\tif err != nil {\n\t\treturn \"\", errNoZone\n\t}\n\toffset := time.Second * time.Duration(getOffset(target, zone))\n\taddress := res[0].DisplayName\n\n\tif timeNow().UTC().Add(offset).Before(target) {\n\t\thumandur := durafmt.Parse(target.Sub(timeNow().UTC().Add(offset)))\n\t\treturn fmt.Sprintf(stNewYearWillHappen, address, removeMilliseconds(humandur)), nil\n\t}\n\thumandur := durafmt.Parse(timeNow().UTC().Add(offset).Sub(target))\n\treturn fmt.Sprintf(stNewYearHappenned, address, removeMilliseconds(humandur)), nil\n}\n<commit_msg>improve trigger checking<commit_after>package nyb\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/hako\/durafmt\"\n\t\"github.com\/ugjka\/dumbirc\"\n\t\"gopkg.in\/ugjka\/go-tz.v2\/tz\"\n)\n\nvar nickChangeInterval = time.Second * 5\n\nfunc (bot *Settings) addCallbacks() {\n\tirc := bot.IrcConn\n\t\/\/On any message send a signal to ping timer to be ready\n\n\tirc.AddCallback(dumbirc.WELCOME, func(msg *dumbirc.Message) {\n\t\tif irc.Password != \"\" {\n\t\t\tconfirmErr := fmt.Errorf(\"did not get identification confirmation\")\n\t\t\terr := irc.WaitFor(func(m *dumbirc.Message) bool {\n\t\t\t\treturn m.Command == dumbirc.NOTICE && strings.Contains(m.Content, \"You are now identified for\")\n\t\t\t},\n\t\t\t\tfunc() {},\n\t\t\t\ttime.Second*30,\n\t\t\t\tconfirmErr,\n\t\t\t)\n\t\t\tif err == confirmErr {\n\t\t\t\tlog.Println(err)\n\t\t\t\tlog.Println(\"trying to start the bot anyway\")\n\t\t\t} else if err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tirc.Join(bot.IrcChans)\n\t\t\/\/Prevent early start\n\t\tbot.Do(func() {\n\t\t\tclose(bot.start)\n\t\t})\n\t})\n}\n\nfunc (bot *Settings) addTriggers() {\n\tirc := bot.IrcConn\n\t\/\/Trigger for !help\n\tstHelp := \"Query location: '%s <location>', Time in location: '%s !time <location>', Next zone: '%s !next', Last zone: '%s !last', Remaining: '%s !remaining'\"\n\tstSource := \"Source code: https:\/\/github.com\/ugjka\/newyearsbot\"\n\tirc.AddTrigger(dumbirc.Trigger{\n\t\tCondition: func(msg *dumbirc.Message) bool {\n\t\t\treturn msg.Command == dumbirc.PRIVMSG &&\n\t\t\t\tstrings.HasPrefix(msg.Content, fmt.Sprintf(\"%s !help\", bot.IrcTrigger)) ||\n\t\t\t\tmsg.Content == fmt.Sprintf(\"%s help\", bot.IrcTrigger)\n\t\t},\n\t\tResponse: func(msg *dumbirc.Message) {\n\t\t\tlog.Println(\"Querying !help...\")\n\t\t\tirc.Reply(msg, fmt.Sprintf(stHelp+\", \"+stSource, bot.IrcTrigger, bot.IrcTrigger, bot.IrcTrigger, bot.IrcTrigger, bot.IrcTrigger))\n\t\t},\n\t})\n\t\/\/Trigger for !next\n\tirc.AddTrigger(dumbirc.Trigger{\n\t\tCondition: func(msg *dumbirc.Message) bool {\n\t\t\treturn msg.Command == dumbirc.PRIVMSG &&\n\t\t\t\tstrings.HasPrefix(msg.Content, fmt.Sprintf(\"%s !next\", bot.IrcTrigger)) ||\n\t\t\t\tmsg.Content == fmt.Sprintf(\"%s next\", bot.IrcTrigger)\n\t\t},\n\t\tResponse: func(msg *dumbirc.Message) {\n\t\t\tlog.Println(\"Querying !next...\")\n\t\t\tdur := time.Minute * time.Duration(bot.next.Offset*60)\n\t\t\tif timeNow().UTC().Add(dur).After(target) {\n\t\t\t\tirc.Reply(msg, fmt.Sprintf(\"No more next, %d is here AoE\", target.Year()))\n\t\t\t\treturn\n\t\t\t}\n\t\t\thumandur := durafmt.Parse(target.Sub(timeNow().UTC().Add(dur)))\n\t\t\tirc.Reply(msg, fmt.Sprintf(\"Next New Year in %s in %s\",\n\t\t\t\tremoveMilliseconds(humandur), bot.next))\n\t\t},\n\t})\n\t\/\/Trigger for !last\n\tirc.AddTrigger(dumbirc.Trigger{\n\t\tCondition: func(msg *dumbirc.Message) bool {\n\t\t\treturn msg.Command == dumbirc.PRIVMSG &&\n\t\t\t\tstrings.HasPrefix(msg.Content, fmt.Sprintf(\"%s !last\", bot.IrcTrigger)) ||\n\t\t\t\tmsg.Content == fmt.Sprintf(\"%s last\", bot.IrcTrigger)\n\t\t},\n\t\tResponse: func(msg *dumbirc.Message) {\n\t\t\tlog.Println(\"Querying !last...\")\n\t\t\tdur := time.Minute * time.Duration(bot.last.Offset*60)\n\t\t\thumandur := durafmt.Parse(timeNow().UTC().Add(dur).Sub(target))\n\t\t\tif bot.last.Offset == -12 {\n\t\t\t\thumandur = durafmt.Parse(timeNow().UTC().Add(dur).Sub(target.AddDate(-1, 0, 0)))\n\t\t\t}\n\t\t\tirc.Reply(msg, fmt.Sprintf(\"Last New Year %s ago in %s\",\n\t\t\t\tremoveMilliseconds(humandur), bot.last))\n\t\t},\n\t})\n\t\/\/Trigger for !remaining\n\tirc.AddTrigger(dumbirc.Trigger{\n\t\tCondition: func(msg *dumbirc.Message) bool {\n\t\t\treturn msg.Command == dumbirc.PRIVMSG &&\n\t\t\t\tstrings.HasPrefix(msg.Content, fmt.Sprintf(\"%s !remaining\", bot.IrcTrigger)) ||\n\t\t\t\tmsg.Content == fmt.Sprintf(\"%s remaining\", bot.IrcTrigger)\n\t\t},\n\t\tResponse: func(msg *dumbirc.Message) {\n\t\t\tlog.Println(\"Querying !remaining...\")\n\t\t\tss := \"s\"\n\t\t\tif bot.remaining == 1 {\n\t\t\t\tss = \"\"\n\t\t\t}\n\t\t\tirc.Reply(msg, fmt.Sprintf(\"%s: %d timezone%s remaining\", msg.Name, bot.remaining, ss))\n\t\t},\n\t})\n\t\/\/Trigger for time in location\n\tirc.AddTrigger(dumbirc.Trigger{\n\t\tCondition: func(msg *dumbirc.Message) bool {\n\t\t\treturn msg.Command == dumbirc.PRIVMSG &&\n\t\t\t\tstrings.HasPrefix(msg.Content, fmt.Sprintf(\"%s !time \", bot.IrcTrigger))\n\t\t},\n\t\tResponse: func(msg *dumbirc.Message) {\n\t\t\tlog.Println(\"Querying !time...\")\n\t\t\tres, err := bot.getTime(msg.Content[len(bot.IrcTrigger)+7:])\n\t\t\tif err == errNoZone || err == errNoPlace {\n\t\t\t\tlog.Println(\"Query error:\", err)\n\t\t\t\tirc.Reply(msg, fmt.Sprintf(\"%s: %s\", msg.Name, err))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Query error:\", err)\n\t\t\t\tirc.Reply(msg, fmt.Sprintf(\"%s: Some error occurred!\", msg.Name))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tirc.Reply(msg, res)\n\t\t},\n\t})\n\t\/\/UTC\n\tirc.AddTrigger(dumbirc.Trigger{\n\t\tCondition: func(msg *dumbirc.Message) bool {\n\t\t\treturn msg.Command == dumbirc.PRIVMSG &&\n\t\t\t\tmsg.Content == fmt.Sprintf(\"%s !time\", bot.IrcTrigger)\n\t\t},\n\t\tResponse: func(msg *dumbirc.Message) {\n\t\t\tlog.Println(\"Querying !time...\")\n\t\t\tres := fmt.Sprintf(\"Time is %s\", time.Now().UTC().Format(\"Mon Jan 2 15:04:05 -0700 MST 2006\"))\n\t\t\tirc.Reply(msg, res)\n\t\t},\n\t})\n\n\t\/\/Trigger for incorrect querries\n\tirc.AddTrigger(dumbirc.Trigger{\n\t\tCondition: func(msg *dumbirc.Message) bool {\n\t\t\treturn msg.Command == dumbirc.PRIVMSG &&\n\t\t\t\tmsg.Content != fmt.Sprintf(\"%s !time\", bot.IrcTrigger) &&\n\t\t\t\t!strings.HasPrefix(msg.Content, fmt.Sprintf(\"%s !time \", bot.IrcTrigger)) &&\n\t\t\t\t!strings.HasPrefix(msg.Content, fmt.Sprintf(\"%s !next\", bot.IrcTrigger)) &&\n\t\t\t\t!strings.HasPrefix(msg.Content, fmt.Sprintf(\"%s !last\", bot.IrcTrigger)) &&\n\t\t\t\t!strings.HasPrefix(msg.Content, fmt.Sprintf(\"%s !help\", bot.IrcTrigger)) &&\n\t\t\t\t!strings.HasPrefix(msg.Content, fmt.Sprintf(\"%s !remaining\", bot.IrcTrigger)) &&\n\t\t\t\tstrings.HasPrefix(msg.Content, fmt.Sprintf(\"%s !\", bot.IrcTrigger))\n\t\t},\n\t\tResponse: func(msg *dumbirc.Message) {\n\t\t\tirc.Reply(msg, fmt.Sprintf(\"Invalid command, valid commands are: \"+stHelp, bot.IrcTrigger, bot.IrcTrigger, bot.IrcTrigger, bot.IrcTrigger, bot.IrcTrigger))\n\t\t},\n\t})\n\n\t\/\/Trigger for location queries\n\tirc.AddTrigger(dumbirc.Trigger{\n\t\tCondition: func(msg *dumbirc.Message) bool {\n\t\t\treturn msg.Command == dumbirc.PRIVMSG &&\n\t\t\t\t!strings.HasPrefix(msg.Content, fmt.Sprintf(\"%s !\", bot.IrcTrigger)) &&\n\t\t\t\tmsg.Content != fmt.Sprintf(\"%s next\", bot.IrcTrigger) &&\n\t\t\t\tmsg.Content != fmt.Sprintf(\"%s last\", bot.IrcTrigger) &&\n\t\t\t\tmsg.Content != fmt.Sprintf(\"%s help\", bot.IrcTrigger) &&\n\t\t\t\tmsg.Content != fmt.Sprintf(\"%s remaining\", bot.IrcTrigger) &&\n\t\t\t\tstrings.HasPrefix(msg.Content, fmt.Sprintf(\"%s \", bot.IrcTrigger))\n\t\t},\n\t\tResponse: func(msg *dumbirc.Message) {\n\t\t\ttz, err := bot.getNewYear(msg.Content[len(bot.IrcTrigger)+1:])\n\t\t\tif err == errNoZone || err == errNoPlace {\n\t\t\t\tlog.Println(\"Query error:\", err)\n\t\t\t\tirc.Reply(msg, fmt.Sprintf(\"%s: %s\", msg.Name, err))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Query error:\", err)\n\t\t\t\tirc.Reply(msg, fmt.Sprintf(\"%s: Some error occurred!\", msg.Name))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tirc.Reply(msg, fmt.Sprintf(\"%s: %s\", msg.Name, tz))\n\t\t},\n\t})\n}\n\nvar (\n\terrNoZone = errors.New(\"couldn't get the timezone for that location\")\n\terrNoPlace = errors.New(\"Couldn't find that place\")\n)\n\nfunc (bot *Settings) getNominatimReqURL(location *string) string {\n\tmaps := url.Values{}\n\tmaps.Add(\"q\", *location)\n\tmaps.Add(\"format\", \"json\")\n\tmaps.Add(\"accept-language\", \"en\")\n\tmaps.Add(\"limit\", \"1\")\n\tmaps.Add(\"email\", bot.Email)\n\treturn bot.Nominatim + NominatimEndpoint + maps.Encode()\n}\n\nvar stNewYearWillHappen = \"New Year in %s will happen in %s\"\nvar stNewYearHappenned = \"New Year in %s happened %s ago\"\n\nfunc (bot *Settings) getTime(location string) (string, error) {\n\tlog.Println(\"Querying location:\", location)\n\tdata, err := NominatimGetter(bot.getNominatimReqURL(&location))\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn \"\", err\n\t}\n\tvar res NominatimResults\n\tif err = json.Unmarshal(data, &res); err != nil {\n\t\tlog.Println(err)\n\t\treturn \"\", err\n\t}\n\tif len(res) == 0 {\n\t\treturn \"\", errNoPlace\n\t}\n\tp := tz.Point{\n\t\tLat: res[0].Lat,\n\t\tLon: res[0].Lon,\n\t}\n\ttzid, err := tz.GetZone(p)\n\tif err != nil {\n\t\treturn \"\", errNoZone\n\t}\n\tzone, err := time.LoadLocation(tzid[0])\n\tif err != nil {\n\t\treturn \"\", errNoZone\n\t}\n\taddress := res[0].DisplayName\n\treturn fmt.Sprintf(\"Time in %s is %s\", address, time.Now().In(zone).Format(\"Mon Jan 2 15:04:05 -0700 MST 2006\")), nil\n}\n\nfunc (bot *Settings) getNewYear(location string) (string, error) {\n\tlog.Println(\"Querying location:\", location)\n\tdata, err := NominatimGetter(bot.getNominatimReqURL(&location))\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn \"\", err\n\t}\n\tvar res NominatimResults\n\tif err = json.Unmarshal(data, &res); err != nil {\n\t\tlog.Println(err)\n\t\treturn \"\", err\n\t}\n\tif len(res) == 0 {\n\t\treturn \"\", errNoPlace\n\t}\n\tp := tz.Point{\n\t\tLat: res[0].Lat,\n\t\tLon: res[0].Lon,\n\t}\n\ttzid, err := tz.GetZone(p)\n\tif err != nil {\n\t\treturn \"\", errNoZone\n\t}\n\tzone, err := time.LoadLocation(tzid[0])\n\tif err != nil {\n\t\treturn \"\", errNoZone\n\t}\n\toffset := time.Second * time.Duration(getOffset(target, zone))\n\taddress := res[0].DisplayName\n\n\tif timeNow().UTC().Add(offset).Before(target) {\n\t\thumandur := durafmt.Parse(target.Sub(timeNow().UTC().Add(offset)))\n\t\treturn fmt.Sprintf(stNewYearWillHappen, address, removeMilliseconds(humandur)), nil\n\t}\n\thumandur := durafmt.Parse(timeNow().UTC().Add(offset).Sub(target))\n\treturn fmt.Sprintf(stNewYearHappenned, address, removeMilliseconds(humandur)), nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The oauth package provides support for making\n\/\/ OAuth2-authenticated HTTP requests.\n\/\/\n\/\/ Example usage:\n\/\/\n\/\/\t\/\/ Specify your configuration. (typically as a global variable)\n\/\/\tvar config = &oauth.Config{\n\/\/\t\tClientId: YOUR_CLIENT_ID,\n\/\/\t\tClientSecret: YOUR_CLIENT_SECRET,\n\/\/\t\tScope: \"https:\/\/www.googleapis.com\/auth\/buzz\",\n\/\/\t\tAuthURL: \"https:\/\/accounts.google.com\/o\/oauth2\/auth\",\n\/\/\t\tTokenURL: \"https:\/\/accounts.google.com\/o\/oauth2\/token\",\n\/\/\t\tRedirectURL: \"http:\/\/you.example.org\/handler\",\n\/\/\t}\n\/\/\n\/\/\t\/\/ A landing page redirects to the OAuth provider to get the auth code.\n\/\/\tfunc landing(w http.ResponseWriter, r *http.Request) {\n\/\/\t\thttp.Redirect(w, r, config.AuthCodeURL(\"foo\"), http.StatusFound)\n\/\/\t}\n\/\/\n\/\/\t\/\/ The user will be redirected back to this handler, that takes the\n\/\/\t\/\/ \"code\" query parameter and Exchanges it for an access token.\n\/\/\tfunc handler(w http.ResponseWriter, r *http.Request) {\n\/\/\t\tt := &oauth.Transport{Config: config}\n\/\/\t\tt.Exchange(r.FormValue(\"code\"))\n\/\/\t\t\/\/ The Transport now has a valid Token. Create an *http.Client\n\/\/\t\t\/\/ with which we can make authenticated API requests.\n\/\/\t\tc := t.Client()\n\/\/\t\tc.Post(...)\n\/\/\t\t\/\/ ...\n\/\/\t\t\/\/ btw, r.FormValue(\"state\") == \"foo\"\n\/\/\t}\n\/\/\npackage oauth\n\n\/\/ TODO(adg): A means of automatically saving credentials when updated.\n\nimport (\n\t\"http\"\n\t\"json\"\n\t\"os\"\n\t\"time\"\n)\n\n\/\/ Config is the configuration of an OAuth consumer.\ntype Config struct {\n\tClientId string\n\tClientSecret string\n\tScope string\n\tAuthURL string\n\tTokenURL string\n\tRedirectURL string \/\/ Defaults to out-of-band mode if empty.\n}\n\nfunc (c *Config) redirectURL() string {\n\tif c.RedirectURL != \"\" {\n\t\treturn c.RedirectURL\n\t}\n\treturn \"oob\"\n}\n\n\/\/ Token contains an end-user's tokens.\n\/\/ This is the data you must store to persist authentication.\ntype Token struct {\n\tAccessToken string \"access_token\"\n\tRefreshToken string \"refresh_token\"\n\tTokenExpiry int64 \"expires_in\"\n}\n\n\/\/ Transport implements http.RoundTripper. When configured with a valid\n\/\/ Config and Token it can be used to make authenticated HTTP requests.\n\/\/\n\/\/\tt := &oauth.Transport{config}\n\/\/ t.Exchange(code)\n\/\/ \/\/ t now contains a valid Token\n\/\/\tr, _, err := t.Client().Get(\"http:\/\/example.org\/url\/requiring\/auth\")\n\/\/\n\/\/ It will automatically refresh the Token if it can,\n\/\/ updating the supplied Token in place.\ntype Transport struct {\n\t*Config\n\t*Token\n\n\t\/\/ Transport is the HTTP transport to use when making requests.\n\t\/\/ It will default to http.DefaultTransport if nil.\n\t\/\/ (It should never be an oauth.Transport.)\n\tTransport http.RoundTripper\n}\n\n\/\/ Client returns an *http.Client that makes OAuth-authenticated requests.\nfunc (t *Transport) Client() *http.Client {\n\treturn &http.Client{Transport: t}\n}\n\nfunc (t *Transport) transport() http.RoundTripper {\n\tif t.Transport != nil {\n\t\treturn t.Transport\n\t}\n\treturn http.DefaultTransport\n}\n\n\/\/ AuthCodeURL returns a URL that the end-user should be redirected to,\n\/\/ so that they may obtain an authorization code.\nfunc (c *Config) AuthCodeURL(state string) string {\n\turl, err := http.ParseURL(c.AuthURL)\n\tif err != nil {\n\t\tpanic(\"AuthURL malformed: \" + err.String())\n\t}\n\tq := http.EncodeQuery(map[string][]string{\n\t\t\"response_type\": {\"code\"},\n\t\t\"client_id\": {c.ClientId},\n\t\t\"redirect_uri\": {c.redirectURL()},\n\t\t\"scope\": {c.Scope},\n\t\t\"state\": {state},\n\t})\n\tif url.RawQuery == \"\" {\n\t\turl.RawQuery = q\n\t} else {\n\t\turl.RawQuery += \"&\" + q\n\t}\n\treturn url.String()\n}\n\n\/\/ Exchange takes a code and gets access Token from the remote server.\nfunc (t *Transport) Exchange(code string) (tok *Token, err os.Error) {\n\tif t.Config == nil {\n\t\treturn nil, os.NewError(\"no Config supplied\")\n\t}\n\ttok = new(Token)\n\terr = t.updateToken(tok, map[string]string{\n\t\t\"grant_type\": \"authorization_code\",\n\t\t\"redirect_uri\": t.redirectURL(),\n\t\t\"scope\": t.Scope,\n\t\t\"code\": code,\n\t})\n\tif err == nil {\n\t\tt.Token = tok\n\t}\n\treturn\n}\n\n\/\/ RoundTrip executes a single HTTP transaction using the Transport's\n\/\/ Token as authorization headers.\nfunc (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err os.Error) {\n\tif t.Config == nil {\n\t\treturn nil, os.NewError(\"no Config supplied\")\n\t}\n\tif t.Token == nil {\n\t\treturn nil, os.NewError(\"no Token supplied\")\n\t}\n\n\t\/\/ Make the HTTP request\n\treq.Header.Set(\"Authorization\", \"OAuth \"+t.AccessToken)\n\tif resp, err = t.transport().RoundTrip(req); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Refresh credentials if they're stale and try again\n\tif resp.StatusCode == 401 {\n\t\tif err = t.refresh(); err != nil {\n\t\t\treturn\n\t\t}\n\t\tresp, err = t.transport().RoundTrip(req)\n\t}\n\n\treturn\n}\n\nfunc (t *Transport) refresh() os.Error {\n\treturn t.updateToken(t.Token, map[string]string{\n\t\t\"grant_type\": \"refresh_token\",\n\t\t\"refresh_token\": t.RefreshToken,\n\t})\n}\n\nfunc (t *Transport) updateToken(tok *Token, form map[string]string) os.Error {\n\tform[\"client_id\"] = t.ClientId\n\tform[\"client_secret\"] = t.ClientSecret\n\tr, err := (&http.Client{Transport: t.transport()}).PostForm(t.TokenURL, form)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer r.Body.Close()\n\tif r.StatusCode != 200 {\n\t\treturn os.NewError(\"invalid response: \" + r.Status)\n\t}\n\tif err = json.NewDecoder(r.Body).Decode(tok); err != nil {\n\t\treturn err\n\t}\n\tif tok.TokenExpiry != 0 {\n\t\ttok.TokenExpiry = time.Seconds() + tok.TokenExpiry\n\t}\n\treturn nil\n}\n<commit_msg>update to latest http package'<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ The oauth package provides support for making\n\/\/ OAuth2-authenticated HTTP requests.\n\/\/\n\/\/ Example usage:\n\/\/\n\/\/\t\/\/ Specify your configuration. (typically as a global variable)\n\/\/\tvar config = &oauth.Config{\n\/\/\t\tClientId: YOUR_CLIENT_ID,\n\/\/\t\tClientSecret: YOUR_CLIENT_SECRET,\n\/\/\t\tScope: \"https:\/\/www.googleapis.com\/auth\/buzz\",\n\/\/\t\tAuthURL: \"https:\/\/accounts.google.com\/o\/oauth2\/auth\",\n\/\/\t\tTokenURL: \"https:\/\/accounts.google.com\/o\/oauth2\/token\",\n\/\/\t\tRedirectURL: \"http:\/\/you.example.org\/handler\",\n\/\/\t}\n\/\/\n\/\/\t\/\/ A landing page redirects to the OAuth provider to get the auth code.\n\/\/\tfunc landing(w http.ResponseWriter, r *http.Request) {\n\/\/\t\thttp.Redirect(w, r, config.AuthCodeURL(\"foo\"), http.StatusFound)\n\/\/\t}\n\/\/\n\/\/\t\/\/ The user will be redirected back to this handler, that takes the\n\/\/\t\/\/ \"code\" query parameter and Exchanges it for an access token.\n\/\/\tfunc handler(w http.ResponseWriter, r *http.Request) {\n\/\/\t\tt := &oauth.Transport{Config: config}\n\/\/\t\tt.Exchange(r.FormValue(\"code\"))\n\/\/\t\t\/\/ The Transport now has a valid Token. Create an *http.Client\n\/\/\t\t\/\/ with which we can make authenticated API requests.\n\/\/\t\tc := t.Client()\n\/\/\t\tc.Post(...)\n\/\/\t\t\/\/ ...\n\/\/\t\t\/\/ btw, r.FormValue(\"state\") == \"foo\"\n\/\/\t}\n\/\/\npackage oauth\n\n\/\/ TODO(adg): A means of automatically saving credentials when updated.\n\nimport (\n\t\"http\"\n\t\"json\"\n\t\"os\"\n\t\"time\"\n)\n\n\/\/ Config is the configuration of an OAuth consumer.\ntype Config struct {\n\tClientId string\n\tClientSecret string\n\tScope string\n\tAuthURL string\n\tTokenURL string\n\tRedirectURL string \/\/ Defaults to out-of-band mode if empty.\n}\n\nfunc (c *Config) redirectURL() string {\n\tif c.RedirectURL != \"\" {\n\t\treturn c.RedirectURL\n\t}\n\treturn \"oob\"\n}\n\n\/\/ Token contains an end-user's tokens.\n\/\/ This is the data you must store to persist authentication.\ntype Token struct {\n\tAccessToken string \"access_token\"\n\tRefreshToken string \"refresh_token\"\n\tTokenExpiry int64 \"expires_in\"\n}\n\n\/\/ Transport implements http.RoundTripper. When configured with a valid\n\/\/ Config and Token it can be used to make authenticated HTTP requests.\n\/\/\n\/\/\tt := &oauth.Transport{config}\n\/\/ t.Exchange(code)\n\/\/ \/\/ t now contains a valid Token\n\/\/\tr, _, err := t.Client().Get(\"http:\/\/example.org\/url\/requiring\/auth\")\n\/\/\n\/\/ It will automatically refresh the Token if it can,\n\/\/ updating the supplied Token in place.\ntype Transport struct {\n\t*Config\n\t*Token\n\n\t\/\/ Transport is the HTTP transport to use when making requests.\n\t\/\/ It will default to http.DefaultTransport if nil.\n\t\/\/ (It should never be an oauth.Transport.)\n\tTransport http.RoundTripper\n}\n\n\/\/ Client returns an *http.Client that makes OAuth-authenticated requests.\nfunc (t *Transport) Client() *http.Client {\n\treturn &http.Client{Transport: t}\n}\n\nfunc (t *Transport) transport() http.RoundTripper {\n\tif t.Transport != nil {\n\t\treturn t.Transport\n\t}\n\treturn http.DefaultTransport\n}\n\n\/\/ AuthCodeURL returns a URL that the end-user should be redirected to,\n\/\/ so that they may obtain an authorization code.\nfunc (c *Config) AuthCodeURL(state string) string {\n\turl, err := http.ParseURL(c.AuthURL)\n\tif err != nil {\n\t\tpanic(\"AuthURL malformed: \" + err.String())\n\t}\n\tq := http.Values{\n\t\t\"response_type\": {\"code\"},\n\t\t\"client_id\": {c.ClientId},\n\t\t\"redirect_uri\": {c.redirectURL()},\n\t\t\"scope\": {c.Scope},\n\t\t\"state\": {state},\n\t}.Encode()\n\tif url.RawQuery == \"\" {\n\t\turl.RawQuery = q\n\t} else {\n\t\turl.RawQuery += \"&\" + q\n\t}\n\treturn url.String()\n}\n\n\/\/ Exchange takes a code and gets access Token from the remote server.\nfunc (t *Transport) Exchange(code string) (tok *Token, err os.Error) {\n\tif t.Config == nil {\n\t\treturn nil, os.NewError(\"no Config supplied\")\n\t}\n\ttok = new(Token)\n\terr = t.updateToken(tok, http.Values{\n\t\t\"grant_type\": {\"authorization_code\"},\n\t\t\"redirect_uri\": {t.redirectURL()},\n\t\t\"scope\": {t.Scope},\n\t\t\"code\": {code},\n\t})\n\tif err == nil {\n\t\tt.Token = tok\n\t}\n\treturn\n}\n\n\/\/ RoundTrip executes a single HTTP transaction using the Transport's\n\/\/ Token as authorization headers.\nfunc (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err os.Error) {\n\tif t.Config == nil {\n\t\treturn nil, os.NewError(\"no Config supplied\")\n\t}\n\tif t.Token == nil {\n\t\treturn nil, os.NewError(\"no Token supplied\")\n\t}\n\n\t\/\/ Make the HTTP request\n\treq.Header.Set(\"Authorization\", \"OAuth \"+t.AccessToken)\n\tif resp, err = t.transport().RoundTrip(req); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Refresh credentials if they're stale and try again\n\tif resp.StatusCode == 401 {\n\t\tif err = t.refresh(); err != nil {\n\t\t\treturn\n\t\t}\n\t\tresp, err = t.transport().RoundTrip(req)\n\t}\n\n\treturn\n}\n\nfunc (t *Transport) refresh() os.Error {\n\treturn t.updateToken(t.Token, http.Values{\n\t\t\"grant_type\": {\"refresh_token\"},\n\t\t\"refresh_token\": {t.RefreshToken},\n\t})\n}\n\nfunc (t *Transport) updateToken(tok *Token, v http.Values) os.Error {\n\tv.Set(\"client_id\", t.ClientId)\n\tv.Set(\"client_secret\", t.ClientSecret)\n\tr, err := (&http.Client{Transport: t.transport()}).PostForm(t.TokenURL, v)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer r.Body.Close()\n\tif r.StatusCode != 200 {\n\t\treturn os.NewError(\"invalid response: \" + r.Status)\n\t}\n\tif err = json.NewDecoder(r.Body).Decode(tok); err != nil {\n\t\treturn err\n\t}\n\tif tok.TokenExpiry != 0 {\n\t\ttok.TokenExpiry = time.Seconds() + tok.TokenExpiry\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage docker\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/dotcloud\/docker\"\n\tdclient \"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/globocom\/config\"\n\t\"github.com\/globocom\/docker-cluster\/cluster\"\n\t\"github.com\/globocom\/tsuru\/fs\"\n\t\"github.com\/globocom\/tsuru\/log\"\n\t\"github.com\/globocom\/tsuru\/provision\"\n\t\"io\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\t\"strings\"\n)\n\nvar dockerCluster *cluster.Cluster\n\nfunc init() {\n\tdockerCluster, _ = cluster.New(\n\t\tcluster.Node{ID: \"server\", Address: \"http:\/\/localhost:4243\"},\n\t)\n}\n\nvar fsystem fs.Fs\n\nfunc filesystem() fs.Fs {\n\tif fsystem == nil {\n\t\tfsystem = fs.OsFs{}\n\t}\n\treturn fsystem\n}\n\n\/\/ runCmd executes commands and log the given stdout and stderror.\nfunc runCmd(cmd string, args ...string) (string, error) {\n\tout := bytes.Buffer{}\n\terr := executor().Execute(cmd, args, nil, &out, &out)\n\tlog.Printf(\"running the cmd: %s with the args: %s\", cmd, args)\n\tif err != nil {\n\t\treturn \"\", &cmdError{cmd: cmd, args: args, err: err, out: out.String()}\n\t}\n\treturn out.String(), nil\n}\n\nfunc getPort() (string, error) {\n\treturn config.GetString(\"docker:run-cmd:port\")\n}\n\ntype container struct {\n\tID string `bson:\"_id\"`\n\tAppName string\n\tType string\n\tIP string\n\tPort string\n\tHostPort string\n\tStatus string\n\tVersion string\n\tImage string\n}\n\nfunc (c *container) getAddress() string {\n\thostAddr, err := config.Get(\"docker:host-address\")\n\tif err != nil {\n\t\tlog.Printf(\"Failed to obtain container address: %s\", err.Error())\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"http:\/\/%s:%s\", hostAddr, c.HostPort)\n}\n\n\/\/ newContainer creates a new container in Docker and stores it in the database.\n\/\/\n\/\/ TODO (flaviamissi): make it atomic\nfunc newContainer(app provision.App, imageId string, commands []string) (*container, error) {\n\tappName := app.GetName()\n\tc := container{\n\t\tAppName: appName,\n\t\tType: app.GetPlatform(),\n\t}\n\terr := c.create(imageId, commands)\n\tif err != nil {\n\t\tlog.Printf(\"Error creating container for the app %q: %s\", appName, err)\n\t\treturn nil, err\n\t}\n\treturn &c, nil\n}\n\n\/\/ hostPort returns the host port mapped for the container.\nfunc (c *container) hostPort() (string, error) {\n\tif c.Port == \"\" {\n\t\treturn \"\", errors.New(\"Container does not contain any mapped port\")\n\t}\n\tdockerContainer, err := dockerCluster.InspectContainer(c.ID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif dockerContainer.NetworkSettings != nil {\n\t\tmappedPorts := dockerContainer.NetworkSettings.PortMapping\n\t\tif port, ok := mappedPorts[c.Port]; ok {\n\t\t\treturn port, nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"Container port %s is not mapped to any host port\", c.Port)\n}\n\n\/\/ ip returns the ip for the container.\nfunc (c *container) ip() (string, error) {\n\tdockerContainer, err := dockerCluster.InspectContainer(c.ID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif dockerContainer.NetworkSettings == nil {\n\t\tmsg := \"Error when getting container information. NetworkSettings is missing.\"\n\t\tlog.Print(msg)\n\t\treturn \"\", errors.New(msg)\n\t}\n\tinstanceIP := dockerContainer.NetworkSettings.IPAddress\n\tif instanceIP == \"\" {\n\t\tmsg := \"error: Can't get ipaddress...\"\n\t\tlog.Print(msg)\n\t\treturn \"\", errors.New(msg)\n\t}\n\tlog.Printf(\"Instance IpAddress: %s\", instanceIP)\n\treturn instanceIP, nil\n}\n\n\/\/ create creates a docker container, stores it on the database and adds a route to it.\n\/\/\n\/\/ It receives the related application in order to choose the correct\n\/\/ docker image and the repository to pass to the script that will take\n\/\/ care of the deploy, and a function to generate the correct command ran by\n\/\/ docker, which might be to deploy a container or to run and expose a\n\/\/ container for an application.\nfunc (c *container) create(imageId string, commands []string) error {\n\tport, err := getPort()\n\tif err != nil {\n\t\treturn err\n\t}\n\tuser, err := config.GetString(\"docker:ssh:user\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tconfig := docker.Config{\n\t\tImage: imageId,\n\t\tCmd: commands,\n\t\tPortSpecs: []string{port},\n\t\tUser: user,\n\t\tAttachStdin: false,\n\t\tAttachStdout: false,\n\t\tAttachStderr: false,\n\t}\n\t_, cont, err := dockerCluster.CreateContainer(&config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.ID = cont.ID\n\tc.Port = port\n\tip, err := c.ip()\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.IP = ip\n\thostPort, err := c.hostPort()\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.HostPort = hostPort\n\tc.Status = \"created\"\n\tcoll := collection()\n\tdefer coll.Database.Session.Close()\n\tif err := coll.Insert(c); err != nil {\n\t\tlog.Print(err)\n\t\treturn err\n\t}\n\tr, err := getRouter()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn r.AddRoute(c.AppName, c.getAddress())\n}\n\nfunc (c *container) setStatus(status string) error {\n\tc.Status = status\n\tcoll := collection()\n\tdefer coll.Database.Session.Close()\n\treturn coll.UpdateId(c.ID, c)\n}\n\nfunc (c *container) setImage(imageId string) error {\n\tc.Image = imageId\n\tcoll := collection()\n\tdefer coll.Database.Session.Close()\n\treturn coll.UpdateId(c.ID, c)\n}\n\nfunc deploy(app provision.App, version string, w io.Writer) (string, error) {\n\tcommands, err := deployCmds(app, version)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\timageId := getImage(app)\n\tc, err := newContainer(app, imageId, commands)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor {\n\t\tresult, err := c.stopped()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif result {\n\t\t\tbreak\n\t\t}\n\t}\n\terr = c.logs(w)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\timageId, err = c.commit()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\terr = c.remove()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn imageId, nil\n}\n\nfunc start(app provision.App, imageId string, w io.Writer) (*container, error) {\n\tcommands, err := runCmds()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc, err := newContainer(app, imageId, commands)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = c.setImage(imageId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = c.setStatus(\"running\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}\n\n\/\/ remove removes a docker container.\nfunc (c *container) remove() error {\n\taddress := c.getAddress()\n\tlog.Printf(\"Removing container %s from docker\", c.ID)\n\terr := dockerCluster.RemoveContainer(c.ID)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to remove container from docker: %s\", err)\n\t\treturn err\n\t}\n\trunCmd(\"ssh-keygen\", \"-R\", c.IP)\n\tlog.Printf(\"Removing container %s from database\", c.ID)\n\tcoll := collection()\n\tdefer coll.Database.Session.Close()\n\tif err := coll.RemoveId(c.ID); err != nil {\n\t\tlog.Printf(\"Failed to remove container from database: %s\", err.Error())\n\t\treturn err\n\t}\n\tr, err := getRouter()\n\tif err != nil {\n\t\tlog.Printf(\"Failed to obtain router: %s\", err.Error())\n\t\treturn err\n\t}\n\tif err := r.RemoveRoute(c.AppName, address); err != nil {\n\t\tlog.Printf(\"Failed to remove route: %s\", err.Error())\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *container) ssh(stdout, stderr io.Writer, cmd string, args ...string) error {\n\tstderr = &filter{w: stderr, content: []byte(\"unable to resolve host\")}\n\tuser, err := config.GetString(\"docker:ssh:user\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tsshArgs := []string{c.IP, \"-l\", user, \"-o\", \"StrictHostKeyChecking no\"}\n\tif keyFile, err := config.GetString(\"docker:ssh:private-key\"); err == nil {\n\t\tsshArgs = append(sshArgs, \"-i\", keyFile)\n\t}\n\tsshArgs = append(sshArgs, \"--\", cmd)\n\tsshArgs = append(sshArgs, args...)\n\treturn executor().Execute(\"ssh\", sshArgs, nil, stdout, stderr)\n}\n\n\/\/ commit commits an image in docker based in the container\nfunc (c *container) commit() (string, error) {\n\topts := dclient.CommitContainerOptions{Container: c.ID}\n\timage, err := dockerCluster.CommitContainer(opts)\n\tif err != nil {\n\t\tlog.Printf(\"Could not commit docker image: %s\", err.Error())\n\t\treturn \"\", err\n\t}\n\treturn image.ID, nil\n}\n\n\/\/ stopped returns true if the container is stopped.\nfunc (c *container) stopped() (bool, error) {\n\tdockerContainer, err := dockerCluster.InspectContainer(c.ID)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\trunning := dockerContainer.State.Running\n\treturn !running, nil\n}\n\n\/\/ logs returns logs for the container.\nfunc (c *container) logs(w io.Writer) error {\n\topts := dclient.AttachToContainerOptions{\n\t\tContainer: c.ID,\n\t\tLogs: true,\n\t\tOutputStream: w,\n\t}\n\treturn dockerCluster.AttachToContainer(opts)\n}\n\nfunc getContainer(id string) (*container, error) {\n\tvar c container\n\tcoll := collection()\n\tdefer coll.Database.Session.Close()\n\terr := coll.Find(bson.M{\"_id\": id}).One(&c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &c, nil\n}\n\nfunc listAppContainers(appName string) ([]container, error) {\n\tvar containers []container\n\terr := collection().Find(bson.M{\"appname\": appName}).All(&containers)\n\treturn containers, err\n}\n\n\/\/ getImage returns the image name or id from an app.\nfunc getImage(app provision.App) string {\n\tvar c container\n\tcollection().Find(bson.M{\"appname\": app.GetName()}).One(&c)\n\tif c.Image != \"\" {\n\t\treturn c.Image\n\t}\n\trepoNamespace, err := config.GetString(\"docker:repository-namespace\")\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"%s\/%s\", repoNamespace, app.GetPlatform())\n}\n\n\/\/ removeImage removes an image from docker registry\nfunc removeImage(imageId string) error {\n\treturn dockerCluster.RemoveImage(imageId)\n}\n\ntype cmdError struct {\n\tcmd string\n\targs []string\n\terr error\n\tout string\n}\n\nfunc (e *cmdError) Error() string {\n\tcommand := e.cmd + \" \" + strings.Join(e.args, \" \")\n\treturn fmt.Sprintf(\"Failed to run command %q (%s): %s.\", command, e.err, e.out)\n}\n<commit_msg>docker: igoring errors when ip or port is none.<commit_after>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage docker\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/dotcloud\/docker\"\n\tdclient \"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/globocom\/config\"\n\t\"github.com\/globocom\/docker-cluster\/cluster\"\n\t\"github.com\/globocom\/tsuru\/fs\"\n\t\"github.com\/globocom\/tsuru\/log\"\n\t\"github.com\/globocom\/tsuru\/provision\"\n\t\"io\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\t\"strings\"\n)\n\nvar dockerCluster *cluster.Cluster\n\nfunc init() {\n\tdockerCluster, _ = cluster.New(\n\t\tcluster.Node{ID: \"server\", Address: \"http:\/\/localhost:4243\"},\n\t)\n}\n\nvar fsystem fs.Fs\n\nfunc filesystem() fs.Fs {\n\tif fsystem == nil {\n\t\tfsystem = fs.OsFs{}\n\t}\n\treturn fsystem\n}\n\n\/\/ runCmd executes commands and log the given stdout and stderror.\nfunc runCmd(cmd string, args ...string) (string, error) {\n\tout := bytes.Buffer{}\n\terr := executor().Execute(cmd, args, nil, &out, &out)\n\tlog.Printf(\"running the cmd: %s with the args: %s\", cmd, args)\n\tif err != nil {\n\t\treturn \"\", &cmdError{cmd: cmd, args: args, err: err, out: out.String()}\n\t}\n\treturn out.String(), nil\n}\n\nfunc getPort() (string, error) {\n\treturn config.GetString(\"docker:run-cmd:port\")\n}\n\ntype container struct {\n\tID string `bson:\"_id\"`\n\tAppName string\n\tType string\n\tIP string\n\tPort string\n\tHostPort string\n\tStatus string\n\tVersion string\n\tImage string\n}\n\nfunc (c *container) getAddress() string {\n\thostAddr, err := config.Get(\"docker:host-address\")\n\tif err != nil {\n\t\tlog.Printf(\"Failed to obtain container address: %s\", err.Error())\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"http:\/\/%s:%s\", hostAddr, c.HostPort)\n}\n\n\/\/ newContainer creates a new container in Docker and stores it in the database.\n\/\/\n\/\/ TODO (flaviamissi): make it atomic\nfunc newContainer(app provision.App, imageId string, commands []string) (*container, error) {\n\tappName := app.GetName()\n\tc := container{\n\t\tAppName: appName,\n\t\tType: app.GetPlatform(),\n\t}\n\terr := c.create(imageId, commands)\n\tif err != nil {\n\t\tlog.Printf(\"Error creating container for the app %q: %s\", appName, err)\n\t\treturn nil, err\n\t}\n\treturn &c, nil\n}\n\n\/\/ hostPort returns the host port mapped for the container.\nfunc (c *container) hostPort() (string, error) {\n\tif c.Port == \"\" {\n\t\treturn \"\", errors.New(\"Container does not contain any mapped port\")\n\t}\n\tdockerContainer, err := dockerCluster.InspectContainer(c.ID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif dockerContainer.NetworkSettings != nil {\n\t\tmappedPorts := dockerContainer.NetworkSettings.PortMapping\n\t\tif port, ok := mappedPorts[c.Port]; ok {\n\t\t\treturn port, nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"Container port %s is not mapped to any host port\", c.Port)\n}\n\n\/\/ ip returns the ip for the container.\nfunc (c *container) ip() (string, error) {\n\tdockerContainer, err := dockerCluster.InspectContainer(c.ID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif dockerContainer.NetworkSettings == nil {\n\t\tmsg := \"Error when getting container information. NetworkSettings is missing.\"\n\t\tlog.Print(msg)\n\t\treturn \"\", errors.New(msg)\n\t}\n\tinstanceIP := dockerContainer.NetworkSettings.IPAddress\n\tif instanceIP == \"\" {\n\t\tmsg := \"error: Can't get ipaddress...\"\n\t\tlog.Print(msg)\n\t\treturn \"\", errors.New(msg)\n\t}\n\tlog.Printf(\"Instance IpAddress: %s\", instanceIP)\n\treturn instanceIP, nil\n}\n\n\/\/ create creates a docker container, stores it on the database and adds a route to it.\n\/\/\n\/\/ It receives the related application in order to choose the correct\n\/\/ docker image and the repository to pass to the script that will take\n\/\/ care of the deploy, and a function to generate the correct command ran by\n\/\/ docker, which might be to deploy a container or to run and expose a\n\/\/ container for an application.\nfunc (c *container) create(imageId string, commands []string) error {\n\tport, err := getPort()\n\tif err != nil {\n\t\treturn err\n\t}\n\tuser, err := config.GetString(\"docker:ssh:user\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tconfig := docker.Config{\n\t\tImage: imageId,\n\t\tCmd: commands,\n\t\tPortSpecs: []string{port},\n\t\tUser: user,\n\t\tAttachStdin: false,\n\t\tAttachStdout: false,\n\t\tAttachStderr: false,\n\t}\n\t_, cont, err := dockerCluster.CreateContainer(&config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.ID = cont.ID\n\tc.Port = port\n\tip, err := c.ip()\n\t\/\/ if err != nil {\n\t\/\/ \treturn err\n\t\/\/ }\n\tc.IP = ip\n\thostPort, err := c.hostPort()\n\t\/\/ if err != nil {\n\t\/\/ \treturn err\n\t\/\/ }\n\tc.HostPort = hostPort\n\tc.Status = \"created\"\n\tcoll := collection()\n\tdefer coll.Database.Session.Close()\n\tif err := coll.Insert(c); err != nil {\n\t\tlog.Print(err)\n\t\treturn err\n\t}\n\tr, err := getRouter()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn r.AddRoute(c.AppName, c.getAddress())\n}\n\nfunc (c *container) setStatus(status string) error {\n\tc.Status = status\n\tcoll := collection()\n\tdefer coll.Database.Session.Close()\n\treturn coll.UpdateId(c.ID, c)\n}\n\nfunc (c *container) setImage(imageId string) error {\n\tc.Image = imageId\n\tcoll := collection()\n\tdefer coll.Database.Session.Close()\n\treturn coll.UpdateId(c.ID, c)\n}\n\nfunc deploy(app provision.App, version string, w io.Writer) (string, error) {\n\tcommands, err := deployCmds(app, version)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\timageId := getImage(app)\n\tc, err := newContainer(app, imageId, commands)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor {\n\t\tresult, err := c.stopped()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif result {\n\t\t\tbreak\n\t\t}\n\t}\n\terr = c.logs(w)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\timageId, err = c.commit()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\terr = c.remove()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn imageId, nil\n}\n\nfunc start(app provision.App, imageId string, w io.Writer) (*container, error) {\n\tcommands, err := runCmds()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc, err := newContainer(app, imageId, commands)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = c.setImage(imageId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = c.setStatus(\"running\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}\n\n\/\/ remove removes a docker container.\nfunc (c *container) remove() error {\n\taddress := c.getAddress()\n\tlog.Printf(\"Removing container %s from docker\", c.ID)\n\terr := dockerCluster.RemoveContainer(c.ID)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to remove container from docker: %s\", err)\n\t\treturn err\n\t}\n\trunCmd(\"ssh-keygen\", \"-R\", c.IP)\n\tlog.Printf(\"Removing container %s from database\", c.ID)\n\tcoll := collection()\n\tdefer coll.Database.Session.Close()\n\tif err := coll.RemoveId(c.ID); err != nil {\n\t\tlog.Printf(\"Failed to remove container from database: %s\", err.Error())\n\t\treturn err\n\t}\n\tr, err := getRouter()\n\tif err != nil {\n\t\tlog.Printf(\"Failed to obtain router: %s\", err.Error())\n\t\treturn err\n\t}\n\tif err := r.RemoveRoute(c.AppName, address); err != nil {\n\t\tlog.Printf(\"Failed to remove route: %s\", err.Error())\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *container) ssh(stdout, stderr io.Writer, cmd string, args ...string) error {\n\tstderr = &filter{w: stderr, content: []byte(\"unable to resolve host\")}\n\tuser, err := config.GetString(\"docker:ssh:user\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tsshArgs := []string{c.IP, \"-l\", user, \"-o\", \"StrictHostKeyChecking no\"}\n\tif keyFile, err := config.GetString(\"docker:ssh:private-key\"); err == nil {\n\t\tsshArgs = append(sshArgs, \"-i\", keyFile)\n\t}\n\tsshArgs = append(sshArgs, \"--\", cmd)\n\tsshArgs = append(sshArgs, args...)\n\treturn executor().Execute(\"ssh\", sshArgs, nil, stdout, stderr)\n}\n\n\/\/ commit commits an image in docker based in the container\nfunc (c *container) commit() (string, error) {\n\topts := dclient.CommitContainerOptions{Container: c.ID}\n\timage, err := dockerCluster.CommitContainer(opts)\n\tif err != nil {\n\t\tlog.Printf(\"Could not commit docker image: %s\", err.Error())\n\t\treturn \"\", err\n\t}\n\treturn image.ID, nil\n}\n\n\/\/ stopped returns true if the container is stopped.\nfunc (c *container) stopped() (bool, error) {\n\tdockerContainer, err := dockerCluster.InspectContainer(c.ID)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\trunning := dockerContainer.State.Running\n\treturn !running, nil\n}\n\n\/\/ logs returns logs for the container.\nfunc (c *container) logs(w io.Writer) error {\n\topts := dclient.AttachToContainerOptions{\n\t\tContainer: c.ID,\n\t\tLogs: true,\n\t\tOutputStream: w,\n\t}\n\treturn dockerCluster.AttachToContainer(opts)\n}\n\nfunc getContainer(id string) (*container, error) {\n\tvar c container\n\tcoll := collection()\n\tdefer coll.Database.Session.Close()\n\terr := coll.Find(bson.M{\"_id\": id}).One(&c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &c, nil\n}\n\nfunc listAppContainers(appName string) ([]container, error) {\n\tvar containers []container\n\terr := collection().Find(bson.M{\"appname\": appName}).All(&containers)\n\treturn containers, err\n}\n\n\/\/ getImage returns the image name or id from an app.\nfunc getImage(app provision.App) string {\n\tvar c container\n\tcollection().Find(bson.M{\"appname\": app.GetName()}).One(&c)\n\tif c.Image != \"\" {\n\t\treturn c.Image\n\t}\n\trepoNamespace, err := config.GetString(\"docker:repository-namespace\")\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"%s\/%s\", repoNamespace, app.GetPlatform())\n}\n\n\/\/ removeImage removes an image from docker registry\nfunc removeImage(imageId string) error {\n\treturn dockerCluster.RemoveImage(imageId)\n}\n\ntype cmdError struct {\n\tcmd string\n\targs []string\n\terr error\n\tout string\n}\n\nfunc (e *cmdError) Error() string {\n\tcommand := e.cmd + \" \" + strings.Join(e.args, \" \")\n\treturn fmt.Sprintf(\"Failed to run command %q (%s): %s.\", command, e.err, e.out)\n}\n<|endoftext|>"} {"text":"<commit_before>package casper\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/cascades-fbp\/cascades\/runtime\"\n\t. \"github.com\/gogap\/base_component\"\n)\n\nconst (\n\tREQ_X_API = \"X-API\"\n\tREQ_TIMEOUT = time.Duration(15) * time.Second\n)\n\ntype Response struct {\n\tCode uint64 `json:\"code\"`\n\tMessage string `json:\"message,omitempty\"`\n\tResult interface{} `json:\"result,omitempty\"`\n}\n\nfunc handle(p *App) func(http.ResponseWriter, *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tp.logger.Debug(\"http Handler:\", r.Method, r.RequestURI)\n\n\t\tapiName := r.Header.Get(REQ_X_API)\n\t\tif apiName == \"\" {\n\t\t\tp.logger.Errorln(\"request api name nil\")\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\t\tport := p.GetApi(apiName)\n\t\tif port == nil {\n\t\t\tp.logger.Errorln(\"request api 404\")\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\n\t\treqBody, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\tp.logger.Errorln(\"request body err:\", p.Name, err.Error())\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tw.Write([]byte(\"Read request body error\"))\n\t\t\treturn\n\t\t}\n\t\tp.logger.Debug(\"req:\", apiName, string(reqBody))\n\n\t\tcomponentMsg, _ := NewComponentMessage()\n\t\tcomponentMsg.Payload.Result = reqBody\n\n\t\tch := p.AddRequest(componentMsg.ID)\n\t\tdefer p.DelRequest(componentMsg.ID)\n\t\tdefer close(ch)\n\n\t\t\/\/ Send Component message\n\t\tmsgBytes, err := componentMsg.Serialize()\n\t\tif err != nil {\n\t\t\tp.logger.Errorln(\"Service Internal Error\")\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tw.Write([]byte(\"Service Internal Error\"))\n\t\t\treturn\n\t\t}\n\t\tp.logger.Infoln(\"ToNextComponent:\", port.outPort[0].Url, string(msgBytes))\n\t\tport.outPort[0].Socket.SendMessage(runtime.NewPacket(msgBytes))\n\n\t\t\/\/ Wait for response from IN port\n\t\tp.logger.Debug(\"Waiting for response from a channel port (from INPUT port)\")\n\t\tvar load *Payload\n\t\tselect {\n\t\tcase load = <-ch:\n\t\t\tbreak\n\t\tcase <-time.Tick(REQ_TIMEOUT):\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tw.Write([]byte(\"Couldn't process request in a given time\"))\n\t\t\treturn\n\t\t}\n\n\t\tp.logger.Infoln(\"Data arrived. Responding to HTTP response...\")\n\t\tobjResp := Response{\n\t\t\tCode: load.Code,\n\t\t\tMessage: load.Message,\n\t\t\tResult: load.Result}\n\n\t\tbResp, _ := json.Marshal(objResp)\n\t\tw.Write(bResp)\n\t}\n}\n<commit_msg>update<commit_after>package casper\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/cascades-fbp\/cascades\/runtime\"\n\t. \"github.com\/gogap\/base_component\"\n)\n\nconst (\n\tREQ_X_API = \"X-API\"\n\tREQ_TIMEOUT = time.Duration(15) * time.Second\n)\n\ntype Response struct {\n\tCode uint64 `json:\"code\"`\n\tMessage string `json:\"message,omitempty\"`\n\tResult interface{} `json:\"result,omitempty\"`\n}\n\nfunc handle(p *App) func(http.ResponseWriter, *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tp.logger.Debug(\"http Handler:\", r.Method, r.RequestURI)\n\n\t\tapiName := r.Header.Get(REQ_X_API)\n\t\tif apiName == \"\" {\n\t\t\tp.logger.Errorln(\"request api name nil\")\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\t\tport := p.GetApi(apiName)\n\t\tif port == nil {\n\t\t\tp.logger.Errorln(\"request api 404\")\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\n\t\treqBody, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\tp.logger.Errorln(\"request body err:\", p.Name, err.Error())\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tw.Write([]byte(\"Read request body error\"))\n\t\t\treturn\n\t\t}\n\t\tp.logger.Debug(\"req:\", apiName, string(reqBody))\n\n\t\tcomponentMsg, _ := NewComponentMessage()\n\t\tcomponentMsg.Payload.Result = reqBody\n\n\t\tch := p.AddRequest(componentMsg.ID)\n\t\tdefer p.DelRequest(componentMsg.ID)\n\t\tdefer close(ch)\n\n\t\t\/\/ Send Component message\n\t\tmsgBytes, err := componentMsg.Serialize()\n\t\tif err != nil {\n\t\t\tp.logger.Errorln(\"Service Internal Error\")\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tw.Write([]byte(\"Service Internal Error\"))\n\t\t\treturn\n\t\t}\n\t\tp.logger.Infoln(\"ToNextComponent:\", port.outPort[0].Url, string(msgBytes))\n\t\tport.outPort[0].Socket.SendMessage(runtime.NewPacket(msgBytes))\n\n\t\t\/\/ Wait for response from IN port\n\t\tp.logger.Debug(\"Waiting for response from a channel port (from INPUT port)\")\n\t\tvar load *Payload\n\t\tselect {\n\t\tcase load = <-ch:\n\t\t\tbreak\n\t\tcase <-time.Tick(REQ_TIMEOUT):\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tw.Write([]byte(\"Couldn't process request in a given time\"))\n\t\t\treturn\n\t\t}\n\n\t\tobjResp := Response{\n\t\t\tCode: load.Code,\n\t\t\tMessage: load.Message,\n\t\t\tResult: load.Result}\n\n\t\tbResp, _ := json.Marshal(objResp)\n\t\tw.Write(bResp)\n\t\tp.logger.Infoln(\"Data arrived. Responding to HTTP response...\", string(bResp))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/traetox\/pty\"\n\t\"github.com\/traetox\/sshForShits\/fakeshell\"\n\t\"golang.org\/x\/crypto\/ssh\"\n)\n\nconst (\n\tpmpt = \"sh-4.3$ \"\n)\n\nfunc mainHandler(conn *ssh.ServerConn, chans <-chan ssh.NewChannel, reqs <-chan *ssh.Request, handler fakeshell.Handler, dg datagram) {\n\twg := sync.WaitGroup{}\n\twg.Add(2)\n\tgo handleRequests(reqs, &wg)\n\tgo handleChannels(chans, &wg, handler, dg)\n\twg.Wait()\n\tconn.Close()\n\tconn.Conn.Close()\n}\n\nfunc handleRequests(reqs <-chan *ssh.Request, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tfor _ = range reqs {\n\t}\n}\n\nfunc handleChannels(chans <-chan ssh.NewChannel, wg *sync.WaitGroup, handler fakeshell.Handler, dg datagram) {\n\tdefer wg.Done()\n\t\/\/ Service the incoming Channel channel.\n\tfor newChannel := range chans {\n\t\t\/\/ Channels have a type, depending on the application level\n\t\t\/\/ protocol intended. In the case of a shell, the type is\n\t\t\/\/ \"session\" and ServerShell may be used to present a simple\n\t\t\/\/ terminal interface.\n\t\tif t := newChannel.ChannelType(); t != \"session\" {\n\t\t\tnewChannel.Reject(ssh.UnknownChannelType, fmt.Sprintf(\"unknown channel type: %s\", t))\n\t\t\tcontinue\n\t\t}\n\t\tchannel, requests, err := newChannel.Accept()\n\t\tif err != nil {\n\t\t\terrLog.Printf(\"Failed to Accept new channel: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/fire up our fake shell\n\t\tc := fakeshell.New(\"sh-4.3$ \", handler, &dg)\n\t\tf, err := pty.StartFaker(c)\n\t\tif err != nil {\n\t\t\terrLog.Printf(\"Failed to start faker: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/teardown session\n\t\tvar once sync.Once\n\t\tclose := func() {\n\t\t\tchannel.Close()\n\t\t\tc.Wait()\n\t\t\tdg.Logout = time.Now().Format(time.RFC3339Nano)\n\t\t\tif len(dg.ShellActivity) > 0 {\n\t\t\t\tif err := activityClient.Write(dg); err != nil {\n\t\t\t\t\terrLog.Printf(\"Failed to write session: %v\", err)\n\t\t\t\t\tif err := activityClient.Login(); err != nil {\n\t\t\t\t\t\terrLog.Printf(\"Failed to re-login after Write error: %v\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/pipe session to bash and vice-versa\n\t\tgo func() {\n\t\t\tio.Copy(channel, f)\n\t\t\tonce.Do(close)\n\t\t}()\n\t\tgo func() {\n\t\t\tio.Copy(f, channel)\n\t\t\tonce.Do(close)\n\t\t}()\n\n\t\t\/\/ Sessions have out-of-band requests such as \"shell\", \"pty-req\" and \"env\"\n\t\tgo func(in <-chan *ssh.Request) {\n\t\t\tfor req := range in {\n\t\t\t\tswitch req.Type {\n\t\t\t\tcase \"shell\":\n\t\t\t\t\t\/\/ We don't accept any commands (Payload),\n\t\t\t\t\t\/\/ only the default shell.\n\t\t\t\t\tif len(req.Payload) == 0 {\n\t\t\t\t\t\treq.Reply(true, nil)\n\t\t\t\t\t} else {\n\t\t\t\t\t\treq.Reply(false, nil)\n\t\t\t\t\t}\n\t\t\t\tcase \"pty-req\":\n\t\t\t\t\t\/\/ Responding 'ok' here will let the client\n\t\t\t\t\t\/\/ know we have a pty ready for input\n\t\t\t\t\treq.Reply(true, nil)\n\t\t\t\tcase \"window-change\":\n\t\t\t\t\tcontinue \/\/no response\n\t\t\t\t}\n\t\t\t}\n\t\t}(requests)\n\t}\n}\n<commit_msg>closing PTY device after fake shell exits<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/traetox\/pty\"\n\t\"github.com\/traetox\/sshForShits\/fakeshell\"\n\t\"golang.org\/x\/crypto\/ssh\"\n)\n\nconst (\n\tpmpt = \"sh-4.3$ \"\n)\n\nfunc mainHandler(conn *ssh.ServerConn, chans <-chan ssh.NewChannel, reqs <-chan *ssh.Request, handler fakeshell.Handler, dg datagram) {\n\twg := sync.WaitGroup{}\n\twg.Add(2)\n\tgo handleRequests(reqs, &wg)\n\tgo handleChannels(chans, &wg, handler, dg)\n\twg.Wait()\n\tconn.Close()\n\tconn.Conn.Close()\n}\n\nfunc handleRequests(reqs <-chan *ssh.Request, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tfor _ = range reqs {\n\t}\n}\n\nfunc handleChannels(chans <-chan ssh.NewChannel, wg *sync.WaitGroup, handler fakeshell.Handler, dg datagram) {\n\tdefer wg.Done()\n\t\/\/ Service the incoming Channel channel.\n\tfor newChannel := range chans {\n\t\t\/\/ Channels have a type, depending on the application level\n\t\t\/\/ protocol intended. In the case of a shell, the type is\n\t\t\/\/ \"session\" and ServerShell may be used to present a simple\n\t\t\/\/ terminal interface.\n\t\tif t := newChannel.ChannelType(); t != \"session\" {\n\t\t\tnewChannel.Reject(ssh.UnknownChannelType, fmt.Sprintf(\"unknown channel type: %s\", t))\n\t\t\tcontinue\n\t\t}\n\t\tchannel, requests, err := newChannel.Accept()\n\t\tif err != nil {\n\t\t\terrLog.Printf(\"Failed to Accept new channel: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/fire up our fake shell\n\t\tc := fakeshell.New(\"sh-4.3$ \", handler, &dg)\n\t\tf, err := pty.StartFaker(c)\n\t\tif err != nil {\n\t\t\terrLog.Printf(\"Failed to start faker: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/teardown session\n\t\tvar once sync.Once\n\t\tclose := func() {\n\t\t\tchannel.Close()\n\t\t\tc.Wait()\n\t\t\tf.Close() \/\/close the PTY device\n\t\t\tdg.Logout = time.Now().Format(time.RFC3339Nano)\n\t\t\tif len(dg.ShellActivity) > 0 {\n\t\t\t\tif err := activityClient.Write(dg); err != nil {\n\t\t\t\t\terrLog.Printf(\"Failed to write session: %v\", err)\n\t\t\t\t\tif err := activityClient.Login(); err != nil {\n\t\t\t\t\t\terrLog.Printf(\"Failed to re-login after Write error: %v\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/pipe session to bash and vice-versa\n\t\tgo func() {\n\t\t\tio.Copy(channel, f)\n\t\t\tonce.Do(close)\n\t\t}()\n\t\tgo func() {\n\t\t\tio.Copy(f, channel)\n\t\t\tonce.Do(close)\n\t\t}()\n\n\t\t\/\/ Sessions have out-of-band requests such as \"shell\", \"pty-req\" and \"env\"\n\t\tgo func(in <-chan *ssh.Request) {\n\t\t\tfor req := range in {\n\t\t\t\tswitch req.Type {\n\t\t\t\tcase \"shell\":\n\t\t\t\t\t\/\/ We don't accept any commands (Payload),\n\t\t\t\t\t\/\/ only the default shell.\n\t\t\t\t\tif len(req.Payload) == 0 {\n\t\t\t\t\t\treq.Reply(true, nil)\n\t\t\t\t\t} else {\n\t\t\t\t\t\treq.Reply(false, nil)\n\t\t\t\t\t}\n\t\t\t\tcase \"pty-req\":\n\t\t\t\t\t\/\/ Responding 'ok' here will let the client\n\t\t\t\t\t\/\/ know we have a pty ready for input\n\t\t\t\t\treq.Reply(true, nil)\n\t\t\t\tcase \"window-change\":\n\t\t\t\t\tcontinue \/\/no response\n\t\t\t\t}\n\t\t\t}\n\t\t}(requests)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package fweight\n\nimport (\n\t\"net\/http\"\n)\n\n\/\/Function HandlerOf returns a Handler of a http.Handler, which\n\/\/can be used to terminatr and handle routes.\nfunc HandlerOf(h http.Handler) Handler {\n\treturn Handler{\n\t\th,\n\t}\n}\n\ntype Handler struct {\n\thttp.Handler\n}\n\nfunc (h Handler) RouteHTTP(rq *http.Request) Router {\n\treturn h\n}\n<commit_msg>HandlerFunc<commit_after>package fweight\n\nimport (\n\t\"net\/http\"\n)\n\n\/\/Function HandlerOf returns a Handler of a http.Handler, which\n\/\/can be used to terminatr and handle routes.\nfunc HandlerOf(h http.Handler) Handler {\n\treturn Handler{\n\t\th,\n\t}\n}\n\nfunc HandlerFunc(hf http.HandlerFunc) Handler {\n\treturn HandlerOf(hf)\n}\n\ntype Handler struct {\n\thttp.Handler\n}\n\nfunc (h Handler) RouteHTTP(rq *http.Request) Router {\n\treturn h\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/miekg\/dns\"\n)\n\nfunc recurse(w dns.ResponseWriter, req *dns.Msg) {\n\tif recurseTo == \"\" {\n\t\tdns.HandleFailed(w, req)\n\t\treturn\n\t} else {\n\t\tc := new(dns.Client)\n\tRedo:\n\t\tif in, _, err := c.Exchange(req, recurseTo); err == nil { \/\/ Second return value is RTT\n\t\t\tif in.MsgHdr.Truncated {\n\t\t\t\tc.Net = \"tcp\"\n\t\t\t\tgoto Redo\n\t\t\t}\n\n\t\t\tw.WriteMsg(in)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Printf(\"Recursive error: %+v\\n\", err)\n\t\t\tdns.HandleFailed(w, req)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc handleDNS(w dns.ResponseWriter, req *dns.Msg) {\n\t\/\/ BIND does not support answering multiple questions so we won't\n\tvar zone *Zone\n\tvar name string\n\n\tzones.RLock()\n\tdefer zones.RUnlock()\n\n\tif len(req.Question) != 1 {\n\t\tm := new(dns.Msg)\n\t\tm.SetReply(req)\n\t\tm.SetRcodeFormatError(req)\n\t\tw.WriteMsg(m)\n\t\treturn\n\t}\n\treq.Question[0].Name = strings.ToLower(req.Question[0].Name)\n\n\tzone, name = zones.match(req.Question[0].Name, req.Question[0].Qtype)\n\tif zone == nil {\n\t\tif recurseTo != \"\" {\n\t\t\trecurse(w, req)\n\t\t} else {\n\t\t\tm := new(dns.Msg)\n\t\t\tm.SetReply(req)\n\t\t\tm.SetRcode(req, dns.RcodeRefused)\n\t\t\tw.WriteMsg(m)\n\t\t}\n\t\treturn\n\t}\n\n\tzones.hits_mx.Lock()\n\tzones.hits[name]++\n\tzones.hits_mx.Unlock()\n\n\tm := new(dns.Msg)\n\tm.SetReply(req)\n\tm.Authoritative = true\n\tm.RecursionAvailable = false\n\n\tvar (\n\t\tanswerKnown bool\n\t\trrsetExists bool\n\t)\n\n\tfor _, r := range (*zone)[dns.RR_Header{Name: req.Question[0].Name, Class: req.Question[0].Qclass}] {\n\t\trrsetExists = true\n\t\tif r.Header().Rrtype == req.Question[0].Qtype {\n\t\t\tm.Answer = append(m.Answer, r)\n\t\t\tanswerKnown = true\n\t\t}\n\t}\n\n\t\/\/ check for a wildcard record (*.zone)\n\tif !answerKnown {\n\t\tfor _, r := range (*zone)[dns.RR_Header{Name: \"*.\" + name, Class: req.Question[0].Qclass}] {\n\t\t\trrsetExists = true\n\t\t\tif r.Header().Rrtype == req.Question[0].Qtype {\n\t\t\t\tr.Header().Name = dns.Fqdn(req.Question[0].Name)\n\t\t\t\tm.Answer = append(m.Answer, r)\n\t\t\t\tanswerKnown = true\n\t\t\t}\n\t\t}\n\t}\n\n\tif !answerKnown && rrsetExists {\n\t\t\/\/ we don't have a direct response but may find alternative record type: CNAME\n\t\tfor _, r := range (*zone)[dns.RR_Header{Name: req.Question[0].Name, Rrtype: dns.TypeCNAME, Class: req.Question[0].Qclass}] {\n\t\t\tm.Answer = append(m.Answer, r)\n\t\t\tanswerKnown = true\n\t\t}\n\n\t\t\/\/ we don't have a direct response but may find alternative record type: NS\n\t\tfor _, r := range (*zone)[dns.RR_Header{Name: req.Question[0].Name, Rrtype: dns.TypeNS, Class: req.Question[0].Qclass}] {\n\t\t\tm.Answer = append(m.Answer, r)\n\t\t\tanswerKnown = true\n\t\t}\n\t}\n\n\t\/\/ now we try recursion if enabled\n\tif !answerKnown && recurseTo != \"\" { \/\/ we don't want to handleFailed when recursion is disabled\n\t\trecurse(w, req)\n\t\treturn\n\t}\n\n\tif !answerKnown && !rrsetExists {\n\t\t\/\/ we really don't have any record to offer\n\t\tm.SetRcode(req, dns.RcodeNameError)\n\t\tm.Ns = (*zone)[dns.RR_Header{Name: name, Rrtype: dns.TypeSOA, Class: dns.ClassINET}]\n\t} else if !answerKnown && rrsetExists {\n\t\tm.Ns = (*zone)[dns.RR_Header{Name: name, Rrtype: dns.TypeSOA, Class: dns.ClassINET}]\n\t} else {\n\t\t\/\/ Add Authority section\n\t\tfor _, r := range (*zone)[dns.RR_Header{Name: name, Rrtype: dns.TypeNS, Class: dns.ClassINET}] {\n\t\t\tm.Ns = append(m.Ns, r)\n\n\t\t\t\/\/ Resolve Authority if possible and serve as ADDITIONAL SECTION\n\t\t\tzone2, _ := zones.match(r.(*dns.NS).Ns, dns.TypeA)\n\t\t\tif zone2 != nil {\n\t\t\t\tfor _, r := range (*zone2)[dns.RR_Header{Name: r.(*dns.NS).Ns, Rrtype: dns.TypeA, Class: dns.ClassINET}] {\n\t\t\t\t\tm.Extra = append(m.Extra, r)\n\t\t\t\t}\n\t\t\t\tfor _, r := range (*zone2)[dns.RR_Header{Name: r.(*dns.NS).Ns, Rrtype: dns.TypeAAAA, Class: dns.ClassINET}] {\n\t\t\t\t\tm.Extra = append(m.Extra, r)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Resolve extra lookups for CNAMEs, SRVs, etc. and put in ADDITIONAL SECTION\n\tfor _, r := range m.Answer {\n\t\tswitch r.Header().Rrtype {\n\t\tcase dns.TypeCNAME:\n\t\t\tzone2, _ := zones.match(r.(*dns.CNAME).Target, dns.TypeA)\n\t\t\tif zone2 != nil {\n\t\t\t\tfor _, r := range (*zone2)[dns.RR_Header{Name: r.(*dns.CNAME).Target, Rrtype: dns.TypeA, Class: dns.ClassINET}] {\n\t\t\t\t\tm.Extra = append(m.Extra, r)\n\t\t\t\t}\n\t\t\t\tfor _, r := range (*zone2)[dns.RR_Header{Name: r.(*dns.CNAME).Target, Rrtype: dns.TypeAAAA, Class: dns.ClassINET}] {\n\t\t\t\t\tm.Extra = append(m.Extra, r)\n\t\t\t\t}\n\t\t\t}\n\t\tcase dns.TypeSRV:\n\t\t\tzone2, _ := zones.match(r.(*dns.SRV).Target, dns.TypeA)\n\t\t\tif zone2 != nil {\n\t\t\t\tfor _, r := range (*zone2)[dns.RR_Header{Name: r.(*dns.SRV).Target, Rrtype: dns.TypeA, Class: dns.ClassINET}] {\n\t\t\t\t\tm.Extra = append(m.Extra, r)\n\t\t\t\t}\n\t\t\t\tfor _, r := range (*zone2)[dns.RR_Header{Name: r.(*dns.SRV).Target, Rrtype: dns.TypeAAAA, Class: dns.ClassINET}] {\n\t\t\t\t\tm.Extra = append(m.Extra, r)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tm.Answer = dns.Dedup(m.Answer, nil)\n\tm.Extra = dns.Dedup(m.Extra, nil)\n\tw.WriteMsg(m)\n}\n\nfunc UnFqdn(s string) string {\n\tif dns.IsFqdn(s) {\n\t\treturn s[:len(s)-1]\n\t}\n\treturn s\n}\n<commit_msg>Fix (not) adding authority section and NS records<commit_after>package main\n\nimport (\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/miekg\/dns\"\n)\n\nfunc recurse(w dns.ResponseWriter, req *dns.Msg) {\n\tif recurseTo == \"\" {\n\t\tdns.HandleFailed(w, req)\n\t\treturn\n\t} else {\n\t\tc := new(dns.Client)\n\tRedo:\n\t\tif in, _, err := c.Exchange(req, recurseTo); err == nil { \/\/ Second return value is RTT\n\t\t\tif in.MsgHdr.Truncated {\n\t\t\t\tc.Net = \"tcp\"\n\t\t\t\tgoto Redo\n\t\t\t}\n\n\t\t\tw.WriteMsg(in)\n\t\t\treturn\n\t\t} else {\n\t\t\tlog.Printf(\"Recursive error: %+v\\n\", err)\n\t\t\tdns.HandleFailed(w, req)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc handleDNS(w dns.ResponseWriter, req *dns.Msg) {\n\t\/\/ BIND does not support answering multiple questions so we won't\n\tvar zone *Zone\n\tvar name string\n\n\tzones.RLock()\n\tdefer zones.RUnlock()\n\n\tif len(req.Question) != 1 {\n\t\tm := new(dns.Msg)\n\t\tm.SetReply(req)\n\t\tm.SetRcodeFormatError(req)\n\t\tw.WriteMsg(m)\n\t\treturn\n\t}\n\treq.Question[0].Name = strings.ToLower(req.Question[0].Name)\n\n\tzone, name = zones.match(req.Question[0].Name, req.Question[0].Qtype)\n\tif zone == nil {\n\t\tif recurseTo != \"\" {\n\t\t\trecurse(w, req)\n\t\t} else {\n\t\t\tm := new(dns.Msg)\n\t\t\tm.SetReply(req)\n\t\t\tm.SetRcode(req, dns.RcodeRefused)\n\t\t\tw.WriteMsg(m)\n\t\t}\n\t\treturn\n\t}\n\n\tzones.hits_mx.Lock()\n\tzones.hits[name]++\n\tzones.hits_mx.Unlock()\n\n\tm := new(dns.Msg)\n\tm.SetReply(req)\n\tm.Authoritative = true\n\tm.RecursionAvailable = false\n\n\tvar (\n\t\tanswerKnown bool\n\t\trrsetExists bool\n\t\tnoAuthority bool\n\t)\n\n\tfor _, r := range (*zone)[dns.RR_Header{Name: req.Question[0].Name, Class: req.Question[0].Qclass}] {\n\t\trrsetExists = true\n\t\tif r.Header().Rrtype == req.Question[0].Qtype {\n\t\t\tm.Answer = append(m.Answer, r)\n\t\t\tanswerKnown = true\n\t\t}\n\t}\n\n\t\/\/ check for a wildcard record (*.zone)\n\tif !answerKnown {\n\t\tfor _, r := range (*zone)[dns.RR_Header{Name: \"*.\" + name, Class: req.Question[0].Qclass}] {\n\t\t\trrsetExists = true\n\t\t\tif r.Header().Rrtype == req.Question[0].Qtype {\n\t\t\t\tr.Header().Name = dns.Fqdn(req.Question[0].Name)\n\t\t\t\tm.Answer = append(m.Answer, r)\n\t\t\t\tanswerKnown = true\n\t\t\t}\n\t\t}\n\t}\n\n\tif !answerKnown && rrsetExists {\n\t\t\/\/ we don't have a direct response but may find alternative record type: CNAME\n\t\tfor _, r := range (*zone)[dns.RR_Header{Name: req.Question[0].Name, Rrtype: dns.TypeCNAME, Class: req.Question[0].Qclass}] {\n\t\t\tm.Answer = append(m.Answer, r)\n\t\t\tanswerKnown = true\n\t\t\tnoAuthority = true\n\t\t}\n\n\t\t\/\/ we don't have a direct response but may find alternative record type: NS\n\t\tif (*zone)[dns.RR_Header{Name: req.Question[0].Name, Rrtype: dns.TypeNS, Class: req.Question[0].Qclass}] != nil {\n\t\t\tm.Ns = (*zone)[dns.RR_Header{Name: req.Question[0].Name, Rrtype: dns.TypeNS, Class: req.Question[0].Qclass}]\n\t\t\tanswerKnown = true\n\t\t\tnoAuthority = true\n\t\t}\n\t}\n\n\t\/\/ now we try recursion if enabled\n\tif !answerKnown && recurseTo != \"\" { \/\/ we don't want to handleFailed when recursion is disabled\n\t\trecurse(w, req)\n\t\treturn\n\t}\n\n\tif !answerKnown && !rrsetExists {\n\t\t\/\/ we really don't have any record to offer\n\t\tm.SetRcode(req, dns.RcodeNameError)\n\t\tm.Ns = (*zone)[dns.RR_Header{Name: name, Rrtype: dns.TypeSOA, Class: dns.ClassINET}]\n\t} else if !answerKnown && rrsetExists {\n\t\tm.Ns = (*zone)[dns.RR_Header{Name: name, Rrtype: dns.TypeSOA, Class: dns.ClassINET}]\n\t} else if !noAuthority {\n\t\t\/\/ Add Authority section\n\t\tfor _, r := range (*zone)[dns.RR_Header{Name: name, Rrtype: dns.TypeNS, Class: dns.ClassINET}] {\n\t\t\tm.Ns = append(m.Ns, r)\n\n\t\t\t\/\/ Resolve Authority if possible and serve as ADDITIONAL SECTION\n\t\t\tzone2, _ := zones.match(r.(*dns.NS).Ns, dns.TypeA)\n\t\t\tif zone2 != nil {\n\t\t\t\tfor _, r := range (*zone2)[dns.RR_Header{Name: r.(*dns.NS).Ns, Rrtype: dns.TypeA, Class: dns.ClassINET}] {\n\t\t\t\t\tm.Extra = append(m.Extra, r)\n\t\t\t\t}\n\t\t\t\tfor _, r := range (*zone2)[dns.RR_Header{Name: r.(*dns.NS).Ns, Rrtype: dns.TypeAAAA, Class: dns.ClassINET}] {\n\t\t\t\t\tm.Extra = append(m.Extra, r)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Resolve extra lookups for CNAMEs, SRVs, etc. and put in ADDITIONAL SECTION\n\tfor _, r := range m.Answer {\n\t\tswitch r.Header().Rrtype {\n\t\tcase dns.TypeCNAME:\n\t\t\tzone2, _ := zones.match(r.(*dns.CNAME).Target, dns.TypeA)\n\t\t\tif zone2 != nil {\n\t\t\t\tfor _, r := range (*zone2)[dns.RR_Header{Name: r.(*dns.CNAME).Target, Rrtype: dns.TypeA, Class: dns.ClassINET}] {\n\t\t\t\t\tm.Extra = append(m.Extra, r)\n\t\t\t\t}\n\t\t\t\tfor _, r := range (*zone2)[dns.RR_Header{Name: r.(*dns.CNAME).Target, Rrtype: dns.TypeAAAA, Class: dns.ClassINET}] {\n\t\t\t\t\tm.Extra = append(m.Extra, r)\n\t\t\t\t}\n\t\t\t}\n\t\tcase dns.TypeSRV:\n\t\t\tzone2, _ := zones.match(r.(*dns.SRV).Target, dns.TypeA)\n\t\t\tif zone2 != nil {\n\t\t\t\tfor _, r := range (*zone2)[dns.RR_Header{Name: r.(*dns.SRV).Target, Rrtype: dns.TypeA, Class: dns.ClassINET}] {\n\t\t\t\t\tm.Extra = append(m.Extra, r)\n\t\t\t\t}\n\t\t\t\tfor _, r := range (*zone2)[dns.RR_Header{Name: r.(*dns.SRV).Target, Rrtype: dns.TypeAAAA, Class: dns.ClassINET}] {\n\t\t\t\t\tm.Extra = append(m.Extra, r)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tm.Answer = dns.Dedup(m.Answer, nil)\n\tm.Extra = dns.Dedup(m.Extra, nil)\n\tw.WriteMsg(m)\n}\n\nfunc UnFqdn(s string) string {\n\tif dns.IsFqdn(s) {\n\t\treturn s[:len(s)-1]\n\t}\n\treturn s\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Go implementation of http:\/\/www.hashids.org under MIT license\n\/\/ Generates hashes from an array of integers, eg. for YouTube like hashes\n\/\/ Setup: go get github.com\/speps\/go-hashids\n\/\/ Original implementations by Ivan Akimov at https:\/\/github.com\/ivanakimov\n\/\/ Thanks to Rémy Oudompheng and Peter Hellberg for code review and fixes\n\npackage hashids\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"strings\"\n)\n\nconst (\n\t\/\/ Version is the version number of the library\n\tVersion string = \"1.0.0\"\n\n\t\/\/ DefaultAlphabet is the default alphabet used by go-hashids\n\tDefaultAlphabet string = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890\"\n\n\tminAlphabetLength int = 16\n\tsepDiv float64 = 3.5\n\tguardDiv float64 = 12.0\n)\n\nvar sepsOriginal = []rune(\"cfhistuCFHISTU\")\n\n\/\/ HashID contains everything needed to encode\/decode hashids\ntype HashID struct {\n\talphabet []rune\n\tminLength int\n\tmaxLengthPerNumber int\n\tsalt []rune\n\tseps []rune\n\tguards []rune\n}\n\n\/\/ HashIDData contains the information needed to generate hashids\ntype HashIDData struct {\n\t\/\/ Alphabet is the alphabet used to generate new ids\n\tAlphabet string\n\n\t\/\/ MinLength is the minimum length of a generated id\n\tMinLength int\n\n\t\/\/ Salt is the secret used to make the generated id harder to guess\n\tSalt string\n}\n\n\/\/ NewData creates a new HashIDData with the DefaultAlphabet already set.\nfunc NewData() *HashIDData {\n\treturn &HashIDData{Alphabet: DefaultAlphabet}\n}\n\n\/\/ New creates a new HashID\nfunc New() (*HashID, error) {\n\treturn NewWithData(NewData())\n}\n\n\/\/ NewWithData creates a new HashID with the provided HashIDData\nfunc NewWithData(data *HashIDData) (*HashID, error) {\n\tif len(data.Alphabet) < minAlphabetLength {\n\t\treturn nil, fmt.Errorf(\"alphabet must contain at least %d characters\", minAlphabetLength)\n\t}\n\tif strings.Contains(data.Alphabet, \" \") {\n\t\treturn nil, fmt.Errorf(\"alphabet may not contain spaces\")\n\t}\n\t\/\/ Check if all characters are unique in Alphabet\n\tuniqueCheck := make(map[rune]bool, len(data.Alphabet))\n\tfor _, a := range data.Alphabet {\n\t\tif _, found := uniqueCheck[a]; found {\n\t\t\treturn nil, fmt.Errorf(\"duplicate character in alphabet: %s\", string([]rune{a}))\n\t\t}\n\t\tuniqueCheck[a] = true\n\t}\n\n\talphabet := []rune(data.Alphabet)\n\tsalt := []rune(data.Salt)\n\n\tseps := make([]rune, len(sepsOriginal))\n\tcopy(seps, sepsOriginal)\n\n\t\/\/ seps should contain only characters present in alphabet; alphabet should not contains seps\n\tfor i := 0; i < len(seps); i++ {\n\t\tfoundIndex := -1\n\t\tfor j, a := range alphabet {\n\t\t\tif a == seps[i] {\n\t\t\t\tfoundIndex = j\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif foundIndex == -1 {\n\t\t\tseps = append(seps[:i], seps[i+1:]...)\n\t\t\ti--\n\t\t} else {\n\t\t\talphabet = append(alphabet[:foundIndex], alphabet[foundIndex+1:]...)\n\t\t}\n\t}\n\tseps = consistentShuffle(seps, salt)\n\n\tif len(seps) == 0 || float64(len(alphabet))\/float64(len(seps)) > sepDiv {\n\t\tsepsLength := int(math.Ceil(float64(len(alphabet)) \/ sepDiv))\n\t\tif sepsLength == 1 {\n\t\t\tsepsLength++\n\t\t}\n\t\tif sepsLength > len(seps) {\n\t\t\tdiff := sepsLength - len(seps)\n\t\t\tseps = append(seps, alphabet[:diff]...)\n\t\t\talphabet = alphabet[diff:]\n\t\t} else {\n\t\t\tseps = seps[:sepsLength]\n\t\t}\n\t}\n\talphabet = consistentShuffle(alphabet, salt)\n\n\tguardCount := int(math.Ceil(float64(len(alphabet)) \/ guardDiv))\n\tvar guards []rune\n\tif len(alphabet) < 3 {\n\t\tguards = seps[:guardCount]\n\t\tseps = seps[guardCount:]\n\t} else {\n\t\tguards = alphabet[:guardCount]\n\t\talphabet = alphabet[guardCount:]\n\t}\n\n\thid := &HashID{\n\t\talphabet: alphabet,\n\t\tminLength: data.MinLength,\n\t\tsalt: salt,\n\t\tseps: seps,\n\t\tguards: guards,\n\t}\n\n\t\/\/ Calculate the maximum possible string length by hashing the maximum possible id\n\tencoded, err := hid.EncodeInt64([]int64{math.MaxInt64})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to encode maximum int64 to find max encoded value length: %s\", err)\n\t}\n\thid.maxLengthPerNumber = len(encoded)\n\n\treturn hid, nil\n}\n\n\/\/ Encode hashes an array of int to a string containing at least MinLength characters taken from the Alphabet.\n\/\/ Use Decode using the same Alphabet and Salt to get back the array of int.\nfunc (h *HashID) Encode(numbers []int) (string, error) {\n\tnumbers64 := make([]int64, 0, len(numbers))\n\tfor _, id := range numbers {\n\t\tnumbers64 = append(numbers64, int64(id))\n\t}\n\treturn h.EncodeInt64(numbers64)\n}\n\n\/\/ EncodeInt64 hashes an array of int64 to a string containing at least MinLength characters taken from the Alphabet.\n\/\/ Use DecodeInt64 using the same Alphabet and Salt to get back the array of int64.\nfunc (h *HashID) EncodeInt64(numbers []int64) (string, error) {\n\tif len(numbers) == 0 {\n\t\treturn \"\", errors.New(\"encoding empty array of numbers makes no sense\")\n\t}\n\tfor _, n := range numbers {\n\t\tif n < 0 {\n\t\t\treturn \"\", errors.New(\"negative number not supported\")\n\t\t}\n\t}\n\n\talphabet := make([]rune, len(h.alphabet))\n\tcopy(alphabet, h.alphabet)\n\n\tnumbersHash := int64(0)\n\tfor i, n := range numbers {\n\t\tnumbersHash += (n % int64(i+100))\n\t}\n\n\tmaxRuneLength := h.maxLengthPerNumber * len(numbers)\n\tif maxRuneLength < h.minLength {\n\t\tmaxRuneLength = h.minLength\n\t}\n\n\tresult := make([]rune, 0, maxRuneLength)\n\tlottery := alphabet[numbersHash%int64(len(alphabet))]\n\tresult = append(result, lottery)\n\n\tfor i, n := range numbers {\n\t\tbuffer := append([]rune{lottery}, append(h.salt, alphabet...)...)\n\t\talphabet = consistentShuffle(alphabet, buffer[:len(alphabet)])\n\t\thash := hash(n, maxRuneLength, alphabet)\n\t\tresult = append(result, hash...)\n\n\t\tif i+1 < len(numbers) {\n\t\t\tn %= int64(hash[0]) + int64(i)\n\t\t\tresult = append(result, h.seps[n%int64(len(h.seps))])\n\t\t}\n\t}\n\n\tif len(result) < h.minLength {\n\t\tguardIndex := (numbersHash + int64(result[0])) % int64(len(h.guards))\n\t\tresult = append([]rune{h.guards[guardIndex]}, result...)\n\n\t\tif len(result) < h.minLength {\n\t\t\tguardIndex = (numbersHash + int64(result[2])) % int64(len(h.guards))\n\t\t\tresult = append(result, h.guards[guardIndex])\n\t\t}\n\t}\n\n\thalfLength := len(alphabet) \/ 2\n\tfor len(result) < h.minLength {\n\t\talphabet = consistentShuffle(alphabet, alphabet)\n\t\tresult = append(alphabet[halfLength:], append(result, alphabet[:halfLength]...)...)\n\t\texcess := len(result) - h.minLength\n\t\tif excess > 0 {\n\t\t\tresult = result[excess\/2 : excess\/2+h.minLength]\n\t\t}\n\t}\n\n\treturn string(result), nil\n}\n\n\/\/ DEPRECATED: Use DecodeWithError instead\n\/\/ Decode unhashes the string passed to an array of int.\n\/\/ It is symmetric with Encode if the Alphabet and Salt are the same ones which were used to hash.\n\/\/ MinLength has no effect on Decode.\nfunc (h *HashID) Decode(hash string) []int {\n\tresult, err := h.DecodeWithError(hash)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn result\n}\n\n\/\/ Decode unhashes the string passed to an array of int.\n\/\/ It is symmetric with Encode if the Alphabet and Salt are the same ones which were used to hash.\n\/\/ MinLength has no effect on Decode.\nfunc (h *HashID) DecodeWithError(hash string) ([]int, error) {\n\tresult64, err := h.DecodeInt64WithError(hash)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult := make([]int, 0, len(result64))\n\tfor _, id := range result64 {\n\t\tresult = append(result, int(id))\n\t}\n\treturn result, nil\n}\n\n\/\/ DEPRECATED: Use DecodeInt64WithError instead\n\/\/ DecodeInt64 unhashes the string passed to an array of int64.\n\/\/ It is symmetric with EncodeInt64 if the Alphabet and Salt are the same ones which were used to hash.\n\/\/ MinLength has no effect on DecodeInt64.\nfunc (h *HashID) DecodeInt64(hash string) []int64 {\n\tresult, err := h.DecodeInt64WithError(hash)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn result\n}\n\n\/\/ DecodeInt64 unhashes the string passed to an array of int64.\n\/\/ It is symmetric with EncodeInt64 if the Alphabet and Salt are the same ones which were used to hash.\n\/\/ MinLength has no effect on DecodeInt64.\nfunc (h *HashID) DecodeInt64WithError(hash string) ([]int64, error) {\n\thashes := splitRunes([]rune(hash), h.guards)\n\thashIndex := 0\n\tif len(hashes) == 2 || len(hashes) == 3 {\n\t\thashIndex = 1\n\t}\n\n\tresult := make([]int64, 0)\n\n\thashBreakdown := hashes[hashIndex]\n\tif len(hashBreakdown) > 0 {\n\t\tlottery := hashBreakdown[0]\n\t\thashBreakdown = hashBreakdown[1:]\n\t\thashes = splitRunes(hashBreakdown, h.seps)\n\t\talphabet := []rune(h.alphabet)\n\t\tfor _, subHash := range hashes {\n\t\t\tbuffer := append([]rune{lottery}, append(h.salt, alphabet...)...)\n\t\t\talphabet = consistentShuffle(alphabet, buffer[:len(alphabet)])\n\t\t\tnumber, err := unhash(subHash, alphabet)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresult = append(result, number)\n\t\t}\n\t}\n\n\tsanityCheck, _ := h.EncodeInt64(result)\n\tif sanityCheck != hash {\n\t\treturn result, errors.New(\"mismatch between encode and decode\")\n\t}\n\n\treturn result, nil\n}\n\nfunc splitRunes(input, seps []rune) [][]rune {\n\tsplitIndices := make([]int, 0)\n\tfor i, inputRune := range input {\n\t\tfor _, sepsRune := range seps {\n\t\t\tif inputRune == sepsRune {\n\t\t\t\tsplitIndices = append(splitIndices, i)\n\t\t\t}\n\t\t}\n\t}\n\n\tresult := make([][]rune, 0, len(splitIndices)+1)\n\tinputLeft := input[:]\n\tfor _, splitIndex := range splitIndices {\n\t\tsplitIndex -= len(input) - len(inputLeft)\n\t\tsubInput := make([]rune, splitIndex)\n\t\tcopy(subInput, inputLeft[:splitIndex])\n\t\tresult = append(result, subInput)\n\t\tinputLeft = inputLeft[splitIndex+1:]\n\t}\n\tresult = append(result, inputLeft)\n\n\treturn result\n}\n\nfunc hash(input int64, maxRuneLength int, alphabet []rune) []rune {\n\tresult := make([]rune, 0, maxRuneLength)\n\tfor {\n\t\tr := alphabet[input%int64(len(alphabet))]\n\t\tresult = append(result, r)\n\t\tinput \/= int64(len(alphabet))\n\t\tif input == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\tfor i := len(result)\/2 - 1; i >= 0; i-- {\n\t\topp := len(result) - 1 - i\n\t\tresult[i], result[opp] = result[opp], result[i]\n\t}\n\treturn result\n}\n\nfunc unhash(input, alphabet []rune) (int64, error) {\n\tresult := int64(0)\n\tfor _, inputRune := range input {\n\t\talphabetPos := -1\n\t\tfor pos, alphabetRune := range alphabet {\n\t\t\tif inputRune == alphabetRune {\n\t\t\t\talphabetPos = pos\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif alphabetPos == -1 {\n\t\t\treturn 0, errors.New(\"alphabet used for hash was different\")\n\t\t}\n\n\t\tresult = result*int64(len(alphabet)) + int64(alphabetPos)\n\t}\n\treturn result, nil\n}\n\nfunc consistentShuffle(alphabet, salt []rune) []rune {\n\tif len(salt) == 0 {\n\t\treturn alphabet\n\t}\n\n\tresult := make([]rune, len(alphabet))\n\tcopy(result, alphabet)\n\tfor i, v, p := len(result)-1, 0, 0; i > 0; i-- {\n\t\tp += int(salt[v])\n\t\tj := (int(salt[v]) + v + p) % i\n\t\tresult[i], result[j] = result[j], result[i]\n\t\tv = (v + 1) % len(salt)\n\t}\n\n\treturn result\n}\n<commit_msg>Add a function for duplicating a slice of runes<commit_after>\/\/ Go implementation of http:\/\/www.hashids.org under MIT license\n\/\/ Generates hashes from an array of integers, eg. for YouTube like hashes\n\/\/ Setup: go get github.com\/speps\/go-hashids\n\/\/ Original implementations by Ivan Akimov at https:\/\/github.com\/ivanakimov\n\/\/ Thanks to Rémy Oudompheng and Peter Hellberg for code review and fixes\n\npackage hashids\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"strings\"\n)\n\nconst (\n\t\/\/ Version is the version number of the library\n\tVersion string = \"1.0.0\"\n\n\t\/\/ DefaultAlphabet is the default alphabet used by go-hashids\n\tDefaultAlphabet string = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890\"\n\n\tminAlphabetLength int = 16\n\tsepDiv float64 = 3.5\n\tguardDiv float64 = 12.0\n)\n\nvar sepsOriginal = []rune(\"cfhistuCFHISTU\")\n\n\/\/ HashID contains everything needed to encode\/decode hashids\ntype HashID struct {\n\talphabet []rune\n\tminLength int\n\tmaxLengthPerNumber int\n\tsalt []rune\n\tseps []rune\n\tguards []rune\n}\n\n\/\/ HashIDData contains the information needed to generate hashids\ntype HashIDData struct {\n\t\/\/ Alphabet is the alphabet used to generate new ids\n\tAlphabet string\n\n\t\/\/ MinLength is the minimum length of a generated id\n\tMinLength int\n\n\t\/\/ Salt is the secret used to make the generated id harder to guess\n\tSalt string\n}\n\n\/\/ NewData creates a new HashIDData with the DefaultAlphabet already set.\nfunc NewData() *HashIDData {\n\treturn &HashIDData{Alphabet: DefaultAlphabet}\n}\n\n\/\/ New creates a new HashID\nfunc New() (*HashID, error) {\n\treturn NewWithData(NewData())\n}\n\n\/\/ NewWithData creates a new HashID with the provided HashIDData\nfunc NewWithData(data *HashIDData) (*HashID, error) {\n\tif len(data.Alphabet) < minAlphabetLength {\n\t\treturn nil, fmt.Errorf(\"alphabet must contain at least %d characters\", minAlphabetLength)\n\t}\n\tif strings.Contains(data.Alphabet, \" \") {\n\t\treturn nil, fmt.Errorf(\"alphabet may not contain spaces\")\n\t}\n\t\/\/ Check if all characters are unique in Alphabet\n\tuniqueCheck := make(map[rune]bool, len(data.Alphabet))\n\tfor _, a := range data.Alphabet {\n\t\tif _, found := uniqueCheck[a]; found {\n\t\t\treturn nil, fmt.Errorf(\"duplicate character in alphabet: %s\", string([]rune{a}))\n\t\t}\n\t\tuniqueCheck[a] = true\n\t}\n\n\talphabet := []rune(data.Alphabet)\n\tsalt := []rune(data.Salt)\n\n\tseps := duplicateRuneSlice(sepsOriginal)\n\n\t\/\/ seps should contain only characters present in alphabet; alphabet should not contains seps\n\tfor i := 0; i < len(seps); i++ {\n\t\tfoundIndex := -1\n\t\tfor j, a := range alphabet {\n\t\t\tif a == seps[i] {\n\t\t\t\tfoundIndex = j\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif foundIndex == -1 {\n\t\t\tseps = append(seps[:i], seps[i+1:]...)\n\t\t\ti--\n\t\t} else {\n\t\t\talphabet = append(alphabet[:foundIndex], alphabet[foundIndex+1:]...)\n\t\t}\n\t}\n\tseps = consistentShuffle(seps, salt)\n\n\tif len(seps) == 0 || float64(len(alphabet))\/float64(len(seps)) > sepDiv {\n\t\tsepsLength := int(math.Ceil(float64(len(alphabet)) \/ sepDiv))\n\t\tif sepsLength == 1 {\n\t\t\tsepsLength++\n\t\t}\n\t\tif sepsLength > len(seps) {\n\t\t\tdiff := sepsLength - len(seps)\n\t\t\tseps = append(seps, alphabet[:diff]...)\n\t\t\talphabet = alphabet[diff:]\n\t\t} else {\n\t\t\tseps = seps[:sepsLength]\n\t\t}\n\t}\n\talphabet = consistentShuffle(alphabet, salt)\n\n\tguardCount := int(math.Ceil(float64(len(alphabet)) \/ guardDiv))\n\tvar guards []rune\n\tif len(alphabet) < 3 {\n\t\tguards = seps[:guardCount]\n\t\tseps = seps[guardCount:]\n\t} else {\n\t\tguards = alphabet[:guardCount]\n\t\talphabet = alphabet[guardCount:]\n\t}\n\n\thid := &HashID{\n\t\talphabet: alphabet,\n\t\tminLength: data.MinLength,\n\t\tsalt: salt,\n\t\tseps: seps,\n\t\tguards: guards,\n\t}\n\n\t\/\/ Calculate the maximum possible string length by hashing the maximum possible id\n\tencoded, err := hid.EncodeInt64([]int64{math.MaxInt64})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Unable to encode maximum int64 to find max encoded value length: %s\", err)\n\t}\n\thid.maxLengthPerNumber = len(encoded)\n\n\treturn hid, nil\n}\n\n\/\/ Encode hashes an array of int to a string containing at least MinLength characters taken from the Alphabet.\n\/\/ Use Decode using the same Alphabet and Salt to get back the array of int.\nfunc (h *HashID) Encode(numbers []int) (string, error) {\n\tnumbers64 := make([]int64, 0, len(numbers))\n\tfor _, id := range numbers {\n\t\tnumbers64 = append(numbers64, int64(id))\n\t}\n\treturn h.EncodeInt64(numbers64)\n}\n\n\/\/ EncodeInt64 hashes an array of int64 to a string containing at least MinLength characters taken from the Alphabet.\n\/\/ Use DecodeInt64 using the same Alphabet and Salt to get back the array of int64.\nfunc (h *HashID) EncodeInt64(numbers []int64) (string, error) {\n\tif len(numbers) == 0 {\n\t\treturn \"\", errors.New(\"encoding empty array of numbers makes no sense\")\n\t}\n\tfor _, n := range numbers {\n\t\tif n < 0 {\n\t\t\treturn \"\", errors.New(\"negative number not supported\")\n\t\t}\n\t}\n\n\talphabet := duplicateRuneSlice(h.alphabet)\n\n\tnumbersHash := int64(0)\n\tfor i, n := range numbers {\n\t\tnumbersHash += (n % int64(i+100))\n\t}\n\n\tmaxRuneLength := h.maxLengthPerNumber * len(numbers)\n\tif maxRuneLength < h.minLength {\n\t\tmaxRuneLength = h.minLength\n\t}\n\n\tresult := make([]rune, 0, maxRuneLength)\n\tlottery := alphabet[numbersHash%int64(len(alphabet))]\n\tresult = append(result, lottery)\n\n\tfor i, n := range numbers {\n\t\tbuffer := append([]rune{lottery}, append(h.salt, alphabet...)...)\n\t\talphabet = consistentShuffle(alphabet, buffer[:len(alphabet)])\n\t\thash := hash(n, maxRuneLength, alphabet)\n\t\tresult = append(result, hash...)\n\n\t\tif i+1 < len(numbers) {\n\t\t\tn %= int64(hash[0]) + int64(i)\n\t\t\tresult = append(result, h.seps[n%int64(len(h.seps))])\n\t\t}\n\t}\n\n\tif len(result) < h.minLength {\n\t\tguardIndex := (numbersHash + int64(result[0])) % int64(len(h.guards))\n\t\tresult = append([]rune{h.guards[guardIndex]}, result...)\n\n\t\tif len(result) < h.minLength {\n\t\t\tguardIndex = (numbersHash + int64(result[2])) % int64(len(h.guards))\n\t\t\tresult = append(result, h.guards[guardIndex])\n\t\t}\n\t}\n\n\thalfLength := len(alphabet) \/ 2\n\tfor len(result) < h.minLength {\n\t\talphabet = consistentShuffle(alphabet, alphabet)\n\t\tresult = append(alphabet[halfLength:], append(result, alphabet[:halfLength]...)...)\n\t\texcess := len(result) - h.minLength\n\t\tif excess > 0 {\n\t\t\tresult = result[excess\/2 : excess\/2+h.minLength]\n\t\t}\n\t}\n\n\treturn string(result), nil\n}\n\n\/\/ DEPRECATED: Use DecodeWithError instead\n\/\/ Decode unhashes the string passed to an array of int.\n\/\/ It is symmetric with Encode if the Alphabet and Salt are the same ones which were used to hash.\n\/\/ MinLength has no effect on Decode.\nfunc (h *HashID) Decode(hash string) []int {\n\tresult, err := h.DecodeWithError(hash)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn result\n}\n\n\/\/ Decode unhashes the string passed to an array of int.\n\/\/ It is symmetric with Encode if the Alphabet and Salt are the same ones which were used to hash.\n\/\/ MinLength has no effect on Decode.\nfunc (h *HashID) DecodeWithError(hash string) ([]int, error) {\n\tresult64, err := h.DecodeInt64WithError(hash)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult := make([]int, 0, len(result64))\n\tfor _, id := range result64 {\n\t\tresult = append(result, int(id))\n\t}\n\treturn result, nil\n}\n\n\/\/ DEPRECATED: Use DecodeInt64WithError instead\n\/\/ DecodeInt64 unhashes the string passed to an array of int64.\n\/\/ It is symmetric with EncodeInt64 if the Alphabet and Salt are the same ones which were used to hash.\n\/\/ MinLength has no effect on DecodeInt64.\nfunc (h *HashID) DecodeInt64(hash string) []int64 {\n\tresult, err := h.DecodeInt64WithError(hash)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn result\n}\n\n\/\/ DecodeInt64 unhashes the string passed to an array of int64.\n\/\/ It is symmetric with EncodeInt64 if the Alphabet and Salt are the same ones which were used to hash.\n\/\/ MinLength has no effect on DecodeInt64.\nfunc (h *HashID) DecodeInt64WithError(hash string) ([]int64, error) {\n\thashes := splitRunes([]rune(hash), h.guards)\n\thashIndex := 0\n\tif len(hashes) == 2 || len(hashes) == 3 {\n\t\thashIndex = 1\n\t}\n\n\tresult := make([]int64, 0)\n\n\thashBreakdown := hashes[hashIndex]\n\tif len(hashBreakdown) > 0 {\n\t\tlottery := hashBreakdown[0]\n\t\thashBreakdown = hashBreakdown[1:]\n\t\thashes = splitRunes(hashBreakdown, h.seps)\n\t\talphabet := []rune(h.alphabet)\n\t\tfor _, subHash := range hashes {\n\t\t\tbuffer := append([]rune{lottery}, append(h.salt, alphabet...)...)\n\t\t\talphabet = consistentShuffle(alphabet, buffer[:len(alphabet)])\n\t\t\tnumber, err := unhash(subHash, alphabet)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresult = append(result, number)\n\t\t}\n\t}\n\n\tsanityCheck, _ := h.EncodeInt64(result)\n\tif sanityCheck != hash {\n\t\treturn result, errors.New(\"mismatch between encode and decode\")\n\t}\n\n\treturn result, nil\n}\n\nfunc splitRunes(input, seps []rune) [][]rune {\n\tsplitIndices := make([]int, 0)\n\tfor i, inputRune := range input {\n\t\tfor _, sepsRune := range seps {\n\t\t\tif inputRune == sepsRune {\n\t\t\t\tsplitIndices = append(splitIndices, i)\n\t\t\t}\n\t\t}\n\t}\n\n\tresult := make([][]rune, 0, len(splitIndices)+1)\n\tinputLeft := input[:]\n\tfor _, splitIndex := range splitIndices {\n\t\tsplitIndex -= len(input) - len(inputLeft)\n\t\tsubInput := make([]rune, splitIndex)\n\t\tcopy(subInput, inputLeft[:splitIndex])\n\t\tresult = append(result, subInput)\n\t\tinputLeft = inputLeft[splitIndex+1:]\n\t}\n\tresult = append(result, inputLeft)\n\n\treturn result\n}\n\nfunc hash(input int64, maxRuneLength int, alphabet []rune) []rune {\n\tresult := make([]rune, 0, maxRuneLength)\n\tfor {\n\t\tr := alphabet[input%int64(len(alphabet))]\n\t\tresult = append(result, r)\n\t\tinput \/= int64(len(alphabet))\n\t\tif input == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\tfor i := len(result)\/2 - 1; i >= 0; i-- {\n\t\topp := len(result) - 1 - i\n\t\tresult[i], result[opp] = result[opp], result[i]\n\t}\n\treturn result\n}\n\nfunc unhash(input, alphabet []rune) (int64, error) {\n\tresult := int64(0)\n\tfor _, inputRune := range input {\n\t\talphabetPos := -1\n\t\tfor pos, alphabetRune := range alphabet {\n\t\t\tif inputRune == alphabetRune {\n\t\t\t\talphabetPos = pos\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif alphabetPos == -1 {\n\t\t\treturn 0, errors.New(\"alphabet used for hash was different\")\n\t\t}\n\n\t\tresult = result*int64(len(alphabet)) + int64(alphabetPos)\n\t}\n\treturn result, nil\n}\n\nfunc consistentShuffle(alphabet, salt []rune) []rune {\n\tif len(salt) == 0 {\n\t\treturn alphabet\n\t}\n\n\tresult := duplicateRuneSlice(alphabet)\n\tfor i, v, p := len(result)-1, 0, 0; i > 0; i-- {\n\t\tp += int(salt[v])\n\t\tj := (int(salt[v]) + v + p) % i\n\t\tresult[i], result[j] = result[j], result[i]\n\t\tv = (v + 1) % len(salt)\n\t}\n\n\treturn result\n}\n\nfunc duplicateRuneSlice(data []rune) []rune {\n\tresult := make([]rune, len(data))\n\tcopy(result, data)\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>package hashmap\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"unsafe\"\n)\n\n\/\/ MaxFillRate is the maximum fill rate for the slice before a resize will happen.\nconst MaxFillRate = 50\n\ntype (\n\thashMapData struct {\n\t\tkeyRightShifts uint64 \/\/ 64 - log2 of array size, to be used as index in the data array\n\t\tdata unsafe.Pointer \/\/ pointer to slice data array\n\t\tslice []*ListElement \/\/ storage for the slice for the garbage collector to not clean it up\n\t\tcount uint64 \/\/ count of filled elements in the slice\n\t}\n\n\t\/\/ HashMap implements a read optimized hash map.\n\tHashMap struct {\n\t\tmapData unsafe.Pointer \/\/ pointer to a map instance that gets replaced if the map resizes\n\t\tlinkedList *List \/\/ key sorted linked list of elements\n\t\tsync.Mutex \/\/ mutex that is only used for resize operations\n\t}\n\n\t\/\/ KeyValue represents a key\/value that is returned by the iterator.\n\tKeyValue struct {\n\t\tKey interface{}\n\t\tValue unsafe.Pointer\n\t}\n)\n\n\/\/ New returns a new HashMap.\nfunc New() *HashMap {\n\treturn NewSize(8)\n}\n\n\/\/ NewSize returns a new HashMap instance with a specific initialization size.\nfunc NewSize(size uint64) *HashMap {\n\thashmap := &HashMap{\n\t\tlinkedList: NewList(),\n\t}\n\thashmap.Grow(size)\n\treturn hashmap\n}\n\n\/\/ Len returns the number of elements within the map.\nfunc (m *HashMap) Len() uint64 {\n\treturn m.linkedList.Len()\n}\n\n\/\/ Fillrate returns the fill rate of the map as an percentage integer.\nfunc (m *HashMap) Fillrate() uint64 {\n\tmapData := (*hashMapData)(atomic.LoadPointer(&m.mapData))\n\tcount := atomic.LoadUint64(&mapData.count)\n\tsliceLen := uint64(len(mapData.slice))\n\treturn (count * 100) \/ sliceLen\n}\n\nfunc (m *HashMap) getSliceItemForKey(hashedKey uint64) (mapData *hashMapData, item *ListElement) {\n\tmapData = (*hashMapData)(atomic.LoadPointer(&m.mapData))\n\tindex := hashedKey >> mapData.keyRightShifts\n\tsliceDataIndexPointer := (*unsafe.Pointer)(unsafe.Pointer(uintptr(mapData.data) + uintptr(index*intSizeBytes)))\n\titem = (*ListElement)(atomic.LoadPointer(sliceDataIndexPointer))\n\treturn\n}\n\n\/\/ Get retrieves an element from the map under given hash key.\nfunc (m *HashMap) Get(hashedKey uint64) (unsafe.Pointer, bool) {\n\t\/\/ inline HashMap.getSliceItemForKey()\n\tmapData := (*hashMapData)(atomic.LoadPointer(&m.mapData))\n\tindex := hashedKey >> mapData.keyRightShifts\n\tsliceDataIndexPointer := (*unsafe.Pointer)(unsafe.Pointer(uintptr(mapData.data) + uintptr(index*intSizeBytes)))\n\tentry := (*ListElement)(atomic.LoadPointer(sliceDataIndexPointer))\n\n\tfor entry != nil {\n\t\tif entry.keyHash == hashedKey {\n\t\t\tif atomic.LoadUint64(&entry.deleted) == 1 { \/\/ inline ListElement.Deleted()\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\treturn atomic.LoadPointer(&entry.value), true \/\/ inline ListElement.Value()\n\t\t}\n\n\t\tif entry.keyHash > hashedKey {\n\t\t\treturn nil, false\n\t\t}\n\n\t\tentry = (*ListElement)(atomic.LoadPointer(&entry.nextElement)) \/\/ inline ListElement.Next()\n\t}\n\treturn nil, false\n}\n\n\/\/ Del deletes the hashed key from the map.\nfunc (m *HashMap) Del(hashedKey uint64) {\n\tfor _, entry := m.getSliceItemForKey(hashedKey); entry != nil; entry = entry.Next() {\n\t\tif entry.keyHash == hashedKey {\n\t\t\tm.linkedList.Delete(entry)\n\t\t\treturn\n\t\t}\n\n\t\tif entry.keyHash > hashedKey {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Set sets the value under the specified hash key to the map. An existing item for this key will be overwritten.\n\/\/ Do not use non hashes as keys for this function, the performance would decrease!\n\/\/ If a resizing operation is happening concurrently while calling Set, the item might show up in the map only after the resize operation is finished.\nfunc (m *HashMap) Set(hashedKey uint64, value unsafe.Pointer) {\n\tnewEntry := &ListElement{\n\t\tkey: hashedKey,\n\t\tkeyHash: hashedKey,\n\t\tvalue: value,\n\t}\n\n\tfor {\n\t\tmapData, sliceItem := m.getSliceItemForKey(hashedKey)\n\t\tif !m.linkedList.Add(newEntry, sliceItem) {\n\t\t\tcontinue \/\/ a concurrent add did interfere, try again\n\t\t}\n\n\t\tnewSliceCount := m.addItemToIndex(newEntry, mapData)\n\t\tif newSliceCount != 0 {\n\t\t\tsliceLen := uint64(len(mapData.slice))\n\t\t\tfillRate := (newSliceCount * 100) \/ sliceLen\n\n\t\t\tif fillRate > MaxFillRate { \/\/ check if the slice needs to be resized\n\t\t\t\tm.Lock()\n\t\t\t\tcurrentMapData := (*hashMapData)(atomic.LoadPointer(&m.mapData))\n\t\t\t\tif mapData == currentMapData { \/\/ double check that no other resize happened\n\t\t\t\t\tm.grow(0)\n\t\t\t\t}\n\t\t\t\tm.Unlock()\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n}\n\n\/\/ adds an item to the index if needed and returns the new item counter if it changed, otherwise 0\nfunc (m *HashMap) addItemToIndex(item *ListElement, mapData *hashMapData) uint64 {\n\tindex := item.keyHash >> mapData.keyRightShifts\n\tsliceDataIndexPointer := (*unsafe.Pointer)(unsafe.Pointer(uintptr(mapData.data) + uintptr(index*intSizeBytes)))\n\n\tfor { \/\/ loop until the smallest key hash is in the index\n\t\tsliceItem := (*ListElement)(atomic.LoadPointer(sliceDataIndexPointer)) \/\/ get the current item in the index\n\t\tif sliceItem == nil { \/\/ no item yet at this index\n\t\t\tif atomic.CompareAndSwapPointer((*unsafe.Pointer)(sliceDataIndexPointer), nil, unsafe.Pointer(item)) {\n\t\t\t\treturn atomic.AddUint64(&mapData.count, 1)\n\t\t\t}\n\t\t\tcontinue \/\/ a new item was inserted concurrently, retry\n\t\t}\n\n\t\tif item.keyHash < sliceItem.keyHash {\n\t\t\t\/\/ the new item is the smallest for this index?\n\t\t\tif !atomic.CompareAndSwapPointer((*unsafe.Pointer)(sliceDataIndexPointer), unsafe.Pointer(sliceItem), unsafe.Pointer(item)) {\n\t\t\t\tcontinue \/\/ a new item was inserted concurrently, retry\n\t\t\t}\n\t\t}\n\t\treturn 0\n\t}\n}\n\n\/\/ Grow resizes the hashmap to a new size, gets rounded up to next power of 2.\n\/\/ To double the size of the hashmap use newSize 0.\nfunc (m *HashMap) Grow(newSize uint64) {\n\tm.Lock()\n\tm.grow(newSize)\n\tm.Unlock()\n}\n\nfunc (m *HashMap) grow(newSize uint64) {\n\tmapData := (*hashMapData)(atomic.LoadPointer(&m.mapData))\n\tif newSize == 0 {\n\t\tnewSize = uint64(len(mapData.slice)) << 1\n\t} else {\n\t\tnewSize = roundUpPower2(newSize)\n\t}\n\n\tnewSlice := make([]*ListElement, newSize)\n\theader := (*reflect.SliceHeader)(unsafe.Pointer(&newSlice))\n\n\tnewMapData := &hashMapData{\n\t\tkeyRightShifts: 64 - log2(newSize),\n\t\tdata: unsafe.Pointer(header.Data), \/\/ use address of slice data storage\n\t\tslice: newSlice,\n\t}\n\n\tm.fillIndexItems(newMapData) \/\/ initialize new index slice with longer keys\n\n\tatomic.StorePointer(&m.mapData, unsafe.Pointer(newMapData))\n\n\tm.fillIndexItems(newMapData) \/\/ make sure that the new index is up to date with the current state of the linked list\n}\n\nfunc (m *HashMap) fillIndexItems(mapData *hashMapData) {\n\tfirst := m.linkedList.First()\n\titem := first\n\tlastIndex := uint64(0)\n\n\tfor item != nil {\n\t\tindex := item.keyHash >> mapData.keyRightShifts\n\t\tif item == first || index != lastIndex { \/\/ store item with smallest hash key for every index\n\t\t\tif !item.Deleted() {\n\t\t\t\tm.addItemToIndex(item, mapData)\n\t\t\t\tlastIndex = index\n\t\t\t}\n\t\t}\n\t\titem = item.Next()\n\t}\n}\n\n\/\/ String returns the map as a string, only hashed keys are printed.\nfunc (m *HashMap) String() string {\n\tbuffer := bytes.NewBufferString(\"\")\n\tbuffer.WriteRune('[')\n\n\tfirst := m.linkedList.First()\n\titem := first\n\n\tfor item != nil {\n\t\tif !item.Deleted() {\n\t\t\tif item != first {\n\t\t\t\tbuffer.WriteRune(',')\n\t\t\t}\n\t\t\tfmt.Fprint(buffer, item.keyHash)\n\t\t}\n\n\t\titem = item.Next()\n\t}\n\tbuffer.WriteRune(']')\n\treturn buffer.String()\n}\n\n\/\/ Iter returns an iterator which could be used in a for range loop.\n\/\/ The order of the items is sorted by hash keys.\nfunc (m *HashMap) Iter() <-chan KeyValue {\n\tch := make(chan KeyValue) \/\/ do not use a size here since items can get added during iteration\n\n\tgo func() {\n\t\titem := m.linkedList.First()\n\t\tfor item != nil {\n\t\t\tif !item.Deleted() {\n\t\t\t\tch <- KeyValue{item.key, item.Value()}\n\t\t\t}\n\t\t\titem = item.Next()\n\t\t}\n\t\tclose(ch)\n\t}()\n\n\treturn ch\n}\n<commit_msg>Code refactoring<commit_after>package hashmap\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"unsafe\"\n)\n\n\/\/ MaxFillRate is the maximum fill rate for the slice before a resize will happen.\nconst MaxFillRate = 50\n\ntype (\n\thashMapData struct {\n\t\tkeyRightShifts uint64 \/\/ 64 - log2 of array size, to be used as index in the data array\n\t\tdata unsafe.Pointer \/\/ pointer to slice data array\n\t\tslice []*ListElement \/\/ storage for the slice for the garbage collector to not clean it up\n\t\tcount uint64 \/\/ count of filled elements in the slice\n\t}\n\n\t\/\/ HashMap implements a read optimized hash map.\n\tHashMap struct {\n\t\tmapDataPtr unsafe.Pointer \/\/ pointer to a map instance that gets replaced if the map resizes\n\t\tlinkedList *List \/\/ key sorted linked list of elements\n\t\tsync.Mutex \/\/ mutex that is only used for resize operations\n\t}\n\n\t\/\/ KeyValue represents a key\/value that is returned by the iterator.\n\tKeyValue struct {\n\t\tKey interface{}\n\t\tValue unsafe.Pointer\n\t}\n)\n\n\/\/ New returns a new HashMap.\nfunc New() *HashMap {\n\treturn NewSize(8)\n}\n\n\/\/ NewSize returns a new HashMap instance with a specific initialization size.\nfunc NewSize(size uint64) *HashMap {\n\thashmap := &HashMap{\n\t\tlinkedList: NewList(),\n\t}\n\thashmap.Grow(size)\n\treturn hashmap\n}\n\n\/\/ Len returns the number of elements within the map.\nfunc (m *HashMap) Len() uint64 {\n\treturn m.linkedList.Len()\n}\n\nfunc (m *HashMap) mapData() *hashMapData {\n\treturn (*hashMapData)(atomic.LoadPointer(&m.mapDataPtr))\n}\n\n\/\/ Fillrate returns the fill rate of the map as an percentage integer.\nfunc (m *HashMap) Fillrate() uint64 {\n\tmapData := m.mapData()\n\tcount := atomic.LoadUint64(&mapData.count)\n\tsliceLen := uint64(len(mapData.slice))\n\treturn (count * 100) \/ sliceLen\n}\n\nfunc (m *HashMap) getSliceItemForKey(hashedKey uint64) (mapData *hashMapData, item *ListElement) {\n\tmapData = m.mapData()\n\tindex := hashedKey >> mapData.keyRightShifts\n\tsliceDataIndexPointer := (*unsafe.Pointer)(unsafe.Pointer(uintptr(mapData.data) + uintptr(index*intSizeBytes)))\n\titem = (*ListElement)(atomic.LoadPointer(sliceDataIndexPointer))\n\treturn\n}\n\n\/\/ Get retrieves an element from the map under given hash key.\nfunc (m *HashMap) Get(hashedKey uint64) (unsafe.Pointer, bool) {\n\t\/\/ inline HashMap.getSliceItemForKey()\n\tmapData := (*hashMapData)(atomic.LoadPointer(&m.mapDataPtr))\n\tindex := hashedKey >> mapData.keyRightShifts\n\tsliceDataIndexPointer := (*unsafe.Pointer)(unsafe.Pointer(uintptr(mapData.data) + uintptr(index*intSizeBytes)))\n\tentry := (*ListElement)(atomic.LoadPointer(sliceDataIndexPointer))\n\n\tfor entry != nil {\n\t\tif entry.keyHash == hashedKey {\n\t\t\tif atomic.LoadUint64(&entry.deleted) == 1 { \/\/ inline ListElement.Deleted()\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\treturn atomic.LoadPointer(&entry.value), true \/\/ inline ListElement.Value()\n\t\t}\n\n\t\tif entry.keyHash > hashedKey {\n\t\t\treturn nil, false\n\t\t}\n\n\t\tentry = (*ListElement)(atomic.LoadPointer(&entry.nextElement)) \/\/ inline ListElement.Next()\n\t}\n\treturn nil, false\n}\n\n\/\/ Del deletes the hashed key from the map.\nfunc (m *HashMap) Del(hashedKey uint64) {\n\tfor _, entry := m.getSliceItemForKey(hashedKey); entry != nil; entry = entry.Next() {\n\t\tif entry.keyHash == hashedKey {\n\t\t\tm.linkedList.Delete(entry)\n\t\t\treturn\n\t\t}\n\n\t\tif entry.keyHash > hashedKey {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Set sets the value under the specified hash key to the map. An existing item for this key will be overwritten.\n\/\/ Do not use non hashes as keys for this function, the performance would decrease!\n\/\/ If a resizing operation is happening concurrently while calling Set, the item might show up in the map only after the resize operation is finished.\nfunc (m *HashMap) Set(hashedKey uint64, value unsafe.Pointer) {\n\tnewEntry := &ListElement{\n\t\tkey: hashedKey,\n\t\tkeyHash: hashedKey,\n\t\tvalue: value,\n\t}\n\n\tfor {\n\t\tmapData, sliceItem := m.getSliceItemForKey(hashedKey)\n\t\tif !m.linkedList.Add(newEntry, sliceItem) {\n\t\t\tcontinue \/\/ a concurrent add did interfere, try again\n\t\t}\n\n\t\tnewSliceCount := mapData.addItemToIndex(newEntry)\n\t\tif newSliceCount != 0 {\n\t\t\tsliceLen := uint64(len(mapData.slice))\n\t\t\tfillRate := (newSliceCount * 100) \/ sliceLen\n\n\t\t\tif fillRate > MaxFillRate { \/\/ check if the slice needs to be resized\n\t\t\t\tm.Lock()\n\t\t\t\tcurrentMapData := m.mapData()\n\t\t\t\tif mapData == currentMapData { \/\/ double check that no other resize happened\n\t\t\t\t\tm.grow(0)\n\t\t\t\t}\n\t\t\t\tm.Unlock()\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n}\n\n\/\/ adds an item to the index if needed and returns the new item counter if it changed, otherwise 0\nfunc (mapData *hashMapData) addItemToIndex(item *ListElement) uint64 {\n\tindex := item.keyHash >> mapData.keyRightShifts\n\tsliceDataIndexPointer := (*unsafe.Pointer)(unsafe.Pointer(uintptr(mapData.data) + uintptr(index*intSizeBytes)))\n\n\tfor { \/\/ loop until the smallest key hash is in the index\n\t\tsliceItem := (*ListElement)(atomic.LoadPointer(sliceDataIndexPointer)) \/\/ get the current item in the index\n\t\tif sliceItem == nil { \/\/ no item yet at this index\n\t\t\tif atomic.CompareAndSwapPointer((*unsafe.Pointer)(sliceDataIndexPointer), nil, unsafe.Pointer(item)) {\n\t\t\t\treturn atomic.AddUint64(&mapData.count, 1)\n\t\t\t}\n\t\t\tcontinue \/\/ a new item was inserted concurrently, retry\n\t\t}\n\n\t\tif item.keyHash < sliceItem.keyHash {\n\t\t\t\/\/ the new item is the smallest for this index?\n\t\t\tif !atomic.CompareAndSwapPointer((*unsafe.Pointer)(sliceDataIndexPointer), unsafe.Pointer(sliceItem), unsafe.Pointer(item)) {\n\t\t\t\tcontinue \/\/ a new item was inserted concurrently, retry\n\t\t\t}\n\t\t}\n\t\treturn 0\n\t}\n}\n\n\/\/ Grow resizes the hashmap to a new size, gets rounded up to next power of 2.\n\/\/ To double the size of the hashmap use newSize 0.\nfunc (m *HashMap) Grow(newSize uint64) {\n\tm.Lock()\n\tm.grow(newSize)\n\tm.Unlock()\n}\n\nfunc (m *HashMap) grow(newSize uint64) {\n\tmapData := m.mapData()\n\tif newSize == 0 {\n\t\tnewSize = uint64(len(mapData.slice)) << 1\n\t} else {\n\t\tnewSize = roundUpPower2(newSize)\n\t}\n\n\tnewSlice := make([]*ListElement, newSize)\n\theader := (*reflect.SliceHeader)(unsafe.Pointer(&newSlice))\n\n\tnewMapData := &hashMapData{\n\t\tkeyRightShifts: 64 - log2(newSize),\n\t\tdata: unsafe.Pointer(header.Data), \/\/ use address of slice data storage\n\t\tslice: newSlice,\n\t}\n\n\tm.fillIndexItems(newMapData) \/\/ initialize new index slice with longer keys\n\n\tatomic.StorePointer(&m.mapDataPtr, unsafe.Pointer(newMapData))\n\n\tm.fillIndexItems(newMapData) \/\/ make sure that the new index is up to date with the current state of the linked list\n}\n\nfunc (m *HashMap) fillIndexItems(mapData *hashMapData) {\n\tfirst := m.linkedList.First()\n\titem := first\n\tlastIndex := uint64(0)\n\n\tfor item != nil {\n\t\tindex := item.keyHash >> mapData.keyRightShifts\n\t\tif item == first || index != lastIndex { \/\/ store item with smallest hash key for every index\n\t\t\tif !item.Deleted() {\n\t\t\t\tmapData.addItemToIndex(item)\n\t\t\t\tlastIndex = index\n\t\t\t}\n\t\t}\n\t\titem = item.Next()\n\t}\n}\n\n\/\/ String returns the map as a string, only hashed keys are printed.\nfunc (m *HashMap) String() string {\n\tbuffer := bytes.NewBufferString(\"\")\n\tbuffer.WriteRune('[')\n\n\tfirst := m.linkedList.First()\n\titem := first\n\n\tfor item != nil {\n\t\tif !item.Deleted() {\n\t\t\tif item != first {\n\t\t\t\tbuffer.WriteRune(',')\n\t\t\t}\n\t\t\tfmt.Fprint(buffer, item.keyHash)\n\t\t}\n\n\t\titem = item.Next()\n\t}\n\tbuffer.WriteRune(']')\n\treturn buffer.String()\n}\n\n\/\/ Iter returns an iterator which could be used in a for range loop.\n\/\/ The order of the items is sorted by hash keys.\nfunc (m *HashMap) Iter() <-chan KeyValue {\n\tch := make(chan KeyValue) \/\/ do not use a size here since items can get added during iteration\n\n\tgo func() {\n\t\titem := m.linkedList.First()\n\t\tfor item != nil {\n\t\t\tif !item.Deleted() {\n\t\t\t\tch <- KeyValue{item.key, item.Value()}\n\t\t\t}\n\t\t\titem = item.Next()\n\t\t}\n\t\tclose(ch)\n\t}()\n\n\treturn ch\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"reflect\"\n)\n\nfunc assertSeq(obj Object, msg string) Seq {\n\tswitch s := obj.(type) {\n\tcase Seq:\n\t\treturn s\n\tcase Sequenceable:\n\t\treturn s.Seq()\n\tdefault:\n\t\tpanic(RT.newError(msg))\n\t}\n}\n\nfunc ensureSeq(args []Object, index int) Seq {\n\tswitch s := args[index].(type) {\n\tcase Seq:\n\t\treturn s\n\tcase Sequenceable:\n\t\treturn s.Seq()\n\tdefault:\n\t\tpanic(RT.newArgTypeError(index, \"Seq\"))\n\t}\n}\n\nfunc ensureNumber(args []Object, index int) Number {\n\tswitch obj := args[index].(type) {\n\tcase Number:\n\t\treturn obj\n\tdefault:\n\t\tpanic(RT.newArgTypeError(index, \"Number\"))\n\t}\n}\n\nfunc ensureString(args []Object, index int) String {\n\tswitch obj := args[index].(type) {\n\tcase String:\n\t\treturn obj\n\tdefault:\n\t\tpanic(RT.newArgTypeError(index, \"String\"))\n\t}\n}\n\nfunc ensureType(args []Object, index int) *Type {\n\tswitch obj := args[index].(type) {\n\tcase *Type:\n\t\treturn obj\n\tdefault:\n\t\tpanic(RT.newArgTypeError(index, \"Type\"))\n\t}\n}\n\nfunc ensureMap(args []Object, index int) *ArrayMap {\n\tswitch obj := args[index].(type) {\n\tcase *ArrayMap:\n\t\treturn obj\n\tdefault:\n\t\tpanic(RT.newArgTypeError(index, \"Map\"))\n\t}\n}\n\nfunc ensureMeta(args []Object, index int) Meta {\n\tswitch obj := args[index].(type) {\n\tcase Meta:\n\t\treturn obj\n\tdefault:\n\t\tpanic(RT.newArgTypeError(index, \"Meta\"))\n\t}\n}\n\nvar procMeta Proc = func(args []Object) Object {\n\tswitch obj := args[0].(type) {\n\tcase Meta:\n\t\tmeta := obj.GetMeta()\n\t\tif meta != nil {\n\t\t\treturn meta\n\t\t}\n\t}\n\treturn NIL\n}\n\nvar procWithMeta Proc = func(args []Object) Object {\n\tcheckArity(args, 2, 2)\n\treturn ensureMeta(args, 0).WithMeta(ensureMap(args, 1))\n}\n\nvar procIsZero Proc = func(args []Object) Object {\n\t\/\/ checkArity(args, 1, \"zero?\")\n\tn := ensureNumber(args, 0)\n\tops := GetOps(ensureNumber(args, 0))\n\treturn Bool{b: ops.IsZero(n)}\n}\n\nvar procAdd Proc = func(args []Object) Object {\n\tvar res Number = Int{i: 0}\n\tfor i, n := range args {\n\t\tops := GetOps(res).Combine(GetOps(n))\n\t\tres = ops.Add(res, ensureNumber(args, i))\n\t}\n\treturn res\n}\n\nvar procMultiply Proc = func(args []Object) Object {\n\tvar res Number = Int{i: 1}\n\tfor i, n := range args {\n\t\tops := GetOps(res).Combine(GetOps(n))\n\t\tres = ops.Multiply(res, ensureNumber(args, i))\n\t}\n\treturn res\n}\n\nvar procSubtract Proc = func(args []Object) Object {\n\tif len(args) == 0 {\n\t\tpanicArity(0)\n\t}\n\tvar res Number = Int{i: 0}\n\tstart := 0\n\tif len(args) > 1 {\n\t\tres = ensureNumber(args, 0)\n\t\tstart = 1\n\t}\n\tfor i := start; i < len(args); i++ {\n\t\tops := GetOps(res).Combine(GetOps(args[i]))\n\t\tres = ops.Subtract(res, ensureNumber(args, i))\n\t}\n\treturn res\n}\n\nvar procDivide Proc = func(args []Object) Object {\n\tif len(args) == 0 {\n\t\tpanicArity(0)\n\t}\n\tvar res Number = Int{i: 1}\n\tstart := 0\n\tif len(args) > 1 {\n\t\tres = ensureNumber(args, 0)\n\t\tstart = 1\n\t}\n\tfor i := start; i < len(args); i++ {\n\t\tops := GetOps(res).Combine(GetOps(args[i]))\n\t\tres = ops.Divide(res, ensureNumber(args, i))\n\t}\n\treturn res\n}\n\nvar procExInfo Proc = func(args []Object) Object {\n\tcheckArity(args, 2, 2)\n\treturn &ExInfo{\n\t\tmsg: ensureString(args, 0),\n\t\tdata: ensureMap(args, 1),\n\t\trt: RT.clone(),\n\t}\n}\n\nvar procPrint Proc = func(args []Object) Object {\n\tn := len(args)\n\tif n > 0 {\n\t\tfor _, arg := range args[:n-1] {\n\t\t\tprint(arg.ToString(false))\n\t\t\tprint(\" \")\n\t\t}\n\t\tprint(args[n-1].ToString(false))\n\t}\n\treturn NIL\n}\n\nvar procSetMacro Proc = func(args []Object) Object {\n\tvr := args[0].(*Var)\n\tvr.isMacro = true\n\treturn vr\n}\n\nvar procList Proc = func(args []Object) Object {\n\treturn NewListFrom(args...)\n}\n\nvar procCons Proc = func(args []Object) Object {\n\tcheckArity(args, 2, 2)\n\ts := ensureSeq(args, 1)\n\treturn s.Cons(args[0])\n}\n\nvar procFirst Proc = func(args []Object) Object {\n\tcheckArity(args, 1, 1)\n\ts := ensureSeq(args, 0)\n\treturn s.First()\n}\n\nvar procNext Proc = func(args []Object) Object {\n\tcheckArity(args, 1, 1)\n\ts := ensureSeq(args, 0)\n\tres := s.Rest()\n\tif res.IsEmpty() {\n\t\treturn NIL\n\t}\n\treturn res\n}\n\nvar procRest Proc = func(args []Object) Object {\n\tcheckArity(args, 1, 1)\n\ts := ensureSeq(args, 0)\n\treturn s.Rest()\n}\n\nvar procConj Proc = func(args []Object) Object {\n\tswitch c := args[0].(type) {\n\tcase Nil:\n\t\treturn NewListFrom(args[1])\n\tcase Conjable:\n\t\treturn c.Conj(args[1])\n\tcase Seq:\n\t\treturn c.Cons(args[1])\n\tdefault:\n\t\tpanic(RT.newError(\"conj's first argument must be a collection\"))\n\t}\n}\n\nvar procSeq Proc = func(args []Object) Object {\n\tcheckArity(args, 1, 1)\n\treturn ensureSeq(args, 0)\n}\n\nvar procIsInstance Proc = func(args []Object) Object {\n\tcheckArity(args, 2, 2)\n\tswitch t := args[0].(type) {\n\tcase *Type:\n\t\tif t.reflectType.Kind() == reflect.Interface {\n\t\t\treturn Bool{b: args[1].GetType().reflectType.Implements(t.reflectType)}\n\t\t} else {\n\t\t\treturn Bool{b: args[1].GetType().reflectType == t.reflectType}\n\t\t}\n\tdefault:\n\t\tpanic(RT.newError(\"First argument to instance? must be a type\"))\n\t}\n}\n\nvar procAssoc Proc = func(args []Object) Object {\n\treturn ensureMap(args, 0).Assoc(args[1], args[2])\n}\n\nvar procEquals Proc = func(args []Object) Object {\n\treturn Bool{b: args[0].Equals(args[1])}\n}\n\nvar procCount Proc = func(args []Object) Object {\n\tswitch obj := args[0].(type) {\n\tcase Counted:\n\t\treturn Int{i: obj.Count()}\n\tdefault:\n\t\ts := assertSeq(obj, \"count not supported on this type: \"+obj.GetType().ToString(false))\n\t\tc := 0\n\t\tfor !s.IsEmpty() {\n\t\t\tc++\n\t\t\ts = s.Rest()\n\t\t\tswitch obj := s.(type) {\n\t\t\tcase Counted:\n\t\t\t\treturn Int{i: c + obj.Count()}\n\t\t\t}\n\t\t}\n\t\treturn Int{i: c}\n\t}\n}\n\nvar procSubvec Proc = func(args []Object) Object {\n\t\/\/ TODO: implement proper Subvector structure\n\tv := args[0].(*Vector)\n\tstart := args[1].(Int).i\n\tend := args[2].(Int).i\n\tsubv := make([]Object, 0, end-start)\n\tfor i := start; i < end; i++ {\n\t\tsubv = append(subv, v.at(i))\n\t}\n\treturn NewVectorFrom(subv...)\n}\n\nvar procCast Proc = func(args []Object) Object {\n\tt := ensureType(args, 0)\n\tif t.reflectType.Kind() == reflect.Interface &&\n\t\targs[1].GetType().reflectType.Implements(t.reflectType) ||\n\t\targs[1].GetType().reflectType == t.reflectType {\n\t\treturn args[1]\n\t}\n\tpanic(RT.newError(\"Cannot cast \" + args[1].GetType().ToString(false) + \" to \" + t.ToString(false)))\n}\n\nvar procVector Proc = func(args []Object) Object {\n\treturn NewVectorFrom(args...)\n}\n\nvar procVec Proc = func(args []Object) Object {\n\treturn NewVectorFromSeq(ensureSeq(args, 0))\n}\n\nvar coreNamespace = GLOBAL_ENV.namespaces[MakeSymbol(\"gclojure.core\").name]\n\nfunc intern(name string, proc Proc) {\n\tcoreNamespace.intern(MakeSymbol(name)).value = proc\n}\n\nfunc init() {\n\tintern(\"list*\", procList)\n\tintern(\"cons*\", procCons)\n\tintern(\"first*\", procFirst)\n\tintern(\"next*\", procNext)\n\tintern(\"rest*\", procRest)\n\tintern(\"conj*\", procConj)\n\tintern(\"seq*\", procSeq)\n\tintern(\"instance?*\", procIsInstance)\n\tintern(\"assoc*\", procAssoc)\n\tintern(\"meta*\", procMeta)\n\tintern(\"with-meta*\", procWithMeta)\n\tintern(\"=*\", procEquals)\n\tintern(\"count*\", procCount)\n\tintern(\"subvec*\", procSubvec)\n\tintern(\"cast*\", procCast)\n\tintern(\"vector*\", procVector)\n\tintern(\"vec*\", procVec)\n\n\tintern(\"zero?\", procIsZero)\n\tintern(\"+\", procAdd)\n\tintern(\"-\", procSubtract)\n\tintern(\"*\", procMultiply)\n\tintern(\"\/\", procDivide)\n\tintern(\"ex-info\", procExInfo)\n\tintern(\"print\", procPrint)\n\tintern(\"set-macro*\", procSetMacro)\n\n\tcurrentNamespace := GLOBAL_ENV.currentNamespace\n\tGLOBAL_ENV.currentNamespace = coreNamespace\n\tprocessFile(\"core.clj\", EVAL)\n\tGLOBAL_ENV.currentNamespace = currentNamespace\n}\n<commit_msg>Remove unused code<commit_after>package main\n\nimport (\n\t\"reflect\"\n)\n\nfunc assertSeq(obj Object, msg string) Seq {\n\tswitch s := obj.(type) {\n\tcase Seq:\n\t\treturn s\n\tcase Sequenceable:\n\t\treturn s.Seq()\n\tdefault:\n\t\tpanic(RT.newError(msg))\n\t}\n}\n\nfunc ensureSeq(args []Object, index int) Seq {\n\tswitch s := args[index].(type) {\n\tcase Seq:\n\t\treturn s\n\tcase Sequenceable:\n\t\treturn s.Seq()\n\tdefault:\n\t\tpanic(RT.newArgTypeError(index, \"Seq\"))\n\t}\n}\n\nfunc ensureNumber(args []Object, index int) Number {\n\tswitch obj := args[index].(type) {\n\tcase Number:\n\t\treturn obj\n\tdefault:\n\t\tpanic(RT.newArgTypeError(index, \"Number\"))\n\t}\n}\n\nfunc ensureString(args []Object, index int) String {\n\tswitch obj := args[index].(type) {\n\tcase String:\n\t\treturn obj\n\tdefault:\n\t\tpanic(RT.newArgTypeError(index, \"String\"))\n\t}\n}\n\nfunc ensureType(args []Object, index int) *Type {\n\tswitch obj := args[index].(type) {\n\tcase *Type:\n\t\treturn obj\n\tdefault:\n\t\tpanic(RT.newArgTypeError(index, \"Type\"))\n\t}\n}\n\nfunc ensureMap(args []Object, index int) *ArrayMap {\n\tswitch obj := args[index].(type) {\n\tcase *ArrayMap:\n\t\treturn obj\n\tdefault:\n\t\tpanic(RT.newArgTypeError(index, \"Map\"))\n\t}\n}\n\nfunc ensureMeta(args []Object, index int) Meta {\n\tswitch obj := args[index].(type) {\n\tcase Meta:\n\t\treturn obj\n\tdefault:\n\t\tpanic(RT.newArgTypeError(index, \"Meta\"))\n\t}\n}\n\nvar procMeta Proc = func(args []Object) Object {\n\tswitch obj := args[0].(type) {\n\tcase Meta:\n\t\tmeta := obj.GetMeta()\n\t\tif meta != nil {\n\t\t\treturn meta\n\t\t}\n\t}\n\treturn NIL\n}\n\nvar procWithMeta Proc = func(args []Object) Object {\n\tcheckArity(args, 2, 2)\n\treturn ensureMeta(args, 0).WithMeta(ensureMap(args, 1))\n}\n\nvar procIsZero Proc = func(args []Object) Object {\n\t\/\/ checkArity(args, 1, \"zero?\")\n\tn := ensureNumber(args, 0)\n\tops := GetOps(ensureNumber(args, 0))\n\treturn Bool{b: ops.IsZero(n)}\n}\n\nvar procAdd Proc = func(args []Object) Object {\n\tvar res Number = Int{i: 0}\n\tfor i, n := range args {\n\t\tops := GetOps(res).Combine(GetOps(n))\n\t\tres = ops.Add(res, ensureNumber(args, i))\n\t}\n\treturn res\n}\n\nvar procMultiply Proc = func(args []Object) Object {\n\tvar res Number = Int{i: 1}\n\tfor i, n := range args {\n\t\tops := GetOps(res).Combine(GetOps(n))\n\t\tres = ops.Multiply(res, ensureNumber(args, i))\n\t}\n\treturn res\n}\n\nvar procSubtract Proc = func(args []Object) Object {\n\tif len(args) == 0 {\n\t\tpanicArity(0)\n\t}\n\tvar res Number = Int{i: 0}\n\tstart := 0\n\tif len(args) > 1 {\n\t\tres = ensureNumber(args, 0)\n\t\tstart = 1\n\t}\n\tfor i := start; i < len(args); i++ {\n\t\tops := GetOps(res).Combine(GetOps(args[i]))\n\t\tres = ops.Subtract(res, ensureNumber(args, i))\n\t}\n\treturn res\n}\n\nvar procDivide Proc = func(args []Object) Object {\n\tif len(args) == 0 {\n\t\tpanicArity(0)\n\t}\n\tvar res Number = Int{i: 1}\n\tstart := 0\n\tif len(args) > 1 {\n\t\tres = ensureNumber(args, 0)\n\t\tstart = 1\n\t}\n\tfor i := start; i < len(args); i++ {\n\t\tops := GetOps(res).Combine(GetOps(args[i]))\n\t\tres = ops.Divide(res, ensureNumber(args, i))\n\t}\n\treturn res\n}\n\nvar procExInfo Proc = func(args []Object) Object {\n\tcheckArity(args, 2, 2)\n\treturn &ExInfo{\n\t\tmsg: ensureString(args, 0),\n\t\tdata: ensureMap(args, 1),\n\t\trt: RT.clone(),\n\t}\n}\n\nvar procPrint Proc = func(args []Object) Object {\n\tn := len(args)\n\tif n > 0 {\n\t\tfor _, arg := range args[:n-1] {\n\t\t\tprint(arg.ToString(false))\n\t\t\tprint(\" \")\n\t\t}\n\t\tprint(args[n-1].ToString(false))\n\t}\n\treturn NIL\n}\n\nvar procSetMacro Proc = func(args []Object) Object {\n\tvr := args[0].(*Var)\n\tvr.isMacro = true\n\treturn vr\n}\n\nvar procList Proc = func(args []Object) Object {\n\treturn NewListFrom(args...)\n}\n\nvar procCons Proc = func(args []Object) Object {\n\tcheckArity(args, 2, 2)\n\ts := ensureSeq(args, 1)\n\treturn s.Cons(args[0])\n}\n\nvar procFirst Proc = func(args []Object) Object {\n\tcheckArity(args, 1, 1)\n\ts := ensureSeq(args, 0)\n\treturn s.First()\n}\n\nvar procNext Proc = func(args []Object) Object {\n\tcheckArity(args, 1, 1)\n\ts := ensureSeq(args, 0)\n\tres := s.Rest()\n\tif res.IsEmpty() {\n\t\treturn NIL\n\t}\n\treturn res\n}\n\nvar procRest Proc = func(args []Object) Object {\n\tcheckArity(args, 1, 1)\n\ts := ensureSeq(args, 0)\n\treturn s.Rest()\n}\n\nvar procConj Proc = func(args []Object) Object {\n\tswitch c := args[0].(type) {\n\tcase Nil:\n\t\treturn NewListFrom(args[1])\n\tcase Conjable:\n\t\treturn c.Conj(args[1])\n\tcase Seq:\n\t\treturn c.Cons(args[1])\n\tdefault:\n\t\tpanic(RT.newError(\"conj's first argument must be a collection\"))\n\t}\n}\n\nvar procSeq Proc = func(args []Object) Object {\n\tcheckArity(args, 1, 1)\n\treturn ensureSeq(args, 0)\n}\n\nvar procIsInstance Proc = func(args []Object) Object {\n\tcheckArity(args, 2, 2)\n\tswitch t := args[0].(type) {\n\tcase *Type:\n\t\tif t.reflectType.Kind() == reflect.Interface {\n\t\t\treturn Bool{b: args[1].GetType().reflectType.Implements(t.reflectType)}\n\t\t} else {\n\t\t\treturn Bool{b: args[1].GetType().reflectType == t.reflectType}\n\t\t}\n\tdefault:\n\t\tpanic(RT.newError(\"First argument to instance? must be a type\"))\n\t}\n}\n\nvar procAssoc Proc = func(args []Object) Object {\n\treturn ensureMap(args, 0).Assoc(args[1], args[2])\n}\n\nvar procEquals Proc = func(args []Object) Object {\n\treturn Bool{b: args[0].Equals(args[1])}\n}\n\nvar procCount Proc = func(args []Object) Object {\n\tswitch obj := args[0].(type) {\n\tcase Counted:\n\t\treturn Int{i: obj.Count()}\n\tdefault:\n\t\ts := assertSeq(obj, \"count not supported on this type: \"+obj.GetType().ToString(false))\n\t\tc := 0\n\t\tfor !s.IsEmpty() {\n\t\t\tc++\n\t\t\ts = s.Rest()\n\t\t\tswitch obj := s.(type) {\n\t\t\tcase Counted:\n\t\t\t\treturn Int{i: c + obj.Count()}\n\t\t\t}\n\t\t}\n\t\treturn Int{i: c}\n\t}\n}\n\nvar procSubvec Proc = func(args []Object) Object {\n\t\/\/ TODO: implement proper Subvector structure\n\tv := args[0].(*Vector)\n\tstart := args[1].(Int).i\n\tend := args[2].(Int).i\n\tsubv := make([]Object, 0, end-start)\n\tfor i := start; i < end; i++ {\n\t\tsubv = append(subv, v.at(i))\n\t}\n\treturn NewVectorFrom(subv...)\n}\n\nvar procCast Proc = func(args []Object) Object {\n\tt := ensureType(args, 0)\n\tif t.reflectType.Kind() == reflect.Interface &&\n\t\targs[1].GetType().reflectType.Implements(t.reflectType) ||\n\t\targs[1].GetType().reflectType == t.reflectType {\n\t\treturn args[1]\n\t}\n\tpanic(RT.newError(\"Cannot cast \" + args[1].GetType().ToString(false) + \" to \" + t.ToString(false)))\n}\n\nvar procVec Proc = func(args []Object) Object {\n\treturn NewVectorFromSeq(ensureSeq(args, 0))\n}\n\nvar coreNamespace = GLOBAL_ENV.namespaces[MakeSymbol(\"gclojure.core\").name]\n\nfunc intern(name string, proc Proc) {\n\tcoreNamespace.intern(MakeSymbol(name)).value = proc\n}\n\nfunc init() {\n\tintern(\"list*\", procList)\n\tintern(\"cons*\", procCons)\n\tintern(\"first*\", procFirst)\n\tintern(\"next*\", procNext)\n\tintern(\"rest*\", procRest)\n\tintern(\"conj*\", procConj)\n\tintern(\"seq*\", procSeq)\n\tintern(\"instance?*\", procIsInstance)\n\tintern(\"assoc*\", procAssoc)\n\tintern(\"meta*\", procMeta)\n\tintern(\"with-meta*\", procWithMeta)\n\tintern(\"=*\", procEquals)\n\tintern(\"count*\", procCount)\n\tintern(\"subvec*\", procSubvec)\n\tintern(\"cast*\", procCast)\n\tintern(\"vec*\", procVec)\n\n\tintern(\"zero?\", procIsZero)\n\tintern(\"+\", procAdd)\n\tintern(\"-\", procSubtract)\n\tintern(\"*\", procMultiply)\n\tintern(\"\/\", procDivide)\n\tintern(\"ex-info\", procExInfo)\n\tintern(\"print\", procPrint)\n\tintern(\"set-macro*\", procSetMacro)\n\n\tcurrentNamespace := GLOBAL_ENV.currentNamespace\n\tGLOBAL_ENV.currentNamespace = coreNamespace\n\tprocessFile(\"core.clj\", EVAL)\n\tGLOBAL_ENV.currentNamespace = currentNamespace\n}\n<|endoftext|>"} {"text":"<commit_before>package proxy\n\nimport (\n\t\"errors\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/koding\/websocketproxy\"\n)\n\ntype ProxyOptions struct {\n\t\/\/ Number of times a request should be tried\n\tRetries int\n\n\t\/\/ Period to wait between retries\n\tPeriod time.Duration\n\n\t\/\/ Returns a url that we should proxy to for a given request\n\tBalancer func(req *http.Request) (string, error)\n\n\t\/\/ A static backend to route to\n\tBackend string\n}\n\ntype Proxy struct {\n\t*ProxyOptions\n\n\t\/\/ Http proxy\n\thttpProxy http.Handler\n\n\t\/\/ Websocket proxy\n\twebsocketProxy http.Handler\n}\n\n\/\/ New returns a new Proxy instance based on the provided ProxyOptions\n\/\/ either 'Backend' (static) or 'Balancer' must be provided\nfunc New(opts ProxyOptions) (*Proxy, error) {\n\t\/\/ Validate Balancer and Backend options\n\tif opts.Balancer == nil {\n\t\tif opts.Backend == \"\" {\n\t\t\treturn nil, errors.New(\"Please provide a Backend or a Balancer\")\n\t\t} else if err := validateUrl(opts.Backend); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Default for Retries\n\tif opts.Retries == 0 {\n\t\topts.Retries = 1\n\t}\n\n\t\/\/ Default for Period\n\tif opts.Period == 0 {\n\t\topts.Period = 100 * time.Millisecond\n\t}\n\n\tp := &Proxy{\n\t\tProxyOptions: &opts,\n\t}\n\n\treturn p.init(), nil\n}\n\n\/\/ ServeHTTP allows us to comply to the http.Handler interface\nfunc (p *Proxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\tif isWebsocket(req) {\n\t\t\/\/ we don't use https explicitly, ssl termination is done here\n\t\treq.URL.Scheme = \"ws\"\n\t\tp.websocketProxy.ServeHTTP(rw, req)\n\t\treturn\n\t}\n\n\tp.httpProxy.ServeHTTP(rw, req)\n}\n\n\/\/ init sets up proxies and other stuff based on options\nfunc (p *Proxy) init() *Proxy {\n\t\/\/ Setup http proxy\n\tp.httpProxy = &httputil.ReverseProxy{\n\t\tDirector: p.director,\n\t}\n\n\t\/\/ Setup websocket proxy\n\tp.websocketProxy = &websocketproxy.WebsocketProxy{\n\t\tBackend: func(req *http.Request) *url.URL {\n\t\t\turl, _ := p.backend(req)\n\t\t\treturn url\n\t\t},\n\t\tUpgrader: &websocket.Upgrader{\n\t\t\tReadBufferSize: 4096,\n\t\t\tWriteBufferSize: 4096,\n\t\t\tCheckOrigin: func(r *http.Request) bool {\n\t\t\t\treturn true\n\t\t\t},\n\t\t},\n\t}\n\n\treturn p\n}\n\n\/\/ director rewrites a http.Request to route to the correct host\nfunc (p *Proxy) director(req *http.Request) {\n\turl, err := p.backend(req)\n\tif url == nil || err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Rewrite outgoing request url\n\treq.URL.Scheme = url.Scheme\n\treq.URL.Host = url.Host\n\treq.URL.Path = url.Path\n\n\treq.Host = url.Host\n}\n\n\/\/ backend parses the result of getBackend and ensures it's validity\nfunc (p *Proxy) backend(req *http.Request) (*url.URL, error) {\n\trawurl, err := p.getBackend(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := validateUrl(rawurl); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn url.Parse(rawurl)\n}\n\n\/\/ getBackend gets the backend selected by the balancer or the static one set by the 'Backend' attribute\nfunc (p *Proxy) getBackend(req *http.Request) (string, error) {\n\tif p.Balancer == nil && p.Backend != \"\" {\n\t\treturn p.Backend, nil\n\t}\n\treturn p.Balancer(req)\n}\n\n\/\/ validateUrl generates an error if the the url isn't absolute or valid\nfunc validateUrl(rawurl string) error {\n\tparsed, err := url.Parse(rawurl)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Ensure url is absolute\n\tif !parsed.IsAbs() {\n\t\treturn errors.New(\"Proxy must only proxy to absolute URLs\")\n\t}\n\n\t\/\/ All is good\n\treturn nil\n}\n\n\/\/ isWebsocket checks wether the incoming request is a part of websocket\n\/\/ handshake\nfunc isWebsocket(req *http.Request) bool {\n\tif strings.ToLower(req.Header.Get(\"Upgrade\")) != \"websocket\" ||\n\t\t!strings.Contains(strings.ToLower(req.Header.Get(\"Connection\")), \"upgrade\") {\n\t\treturn false\n\t}\n\treturn true\n}\n<commit_msg>Add websocketScheme normalization func<commit_after>package proxy\n\nimport (\n\t\"errors\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/koding\/websocketproxy\"\n)\n\ntype ProxyOptions struct {\n\t\/\/ Number of times a request should be tried\n\tRetries int\n\n\t\/\/ Period to wait between retries\n\tPeriod time.Duration\n\n\t\/\/ Returns a url that we should proxy to for a given request\n\tBalancer func(req *http.Request) (string, error)\n\n\t\/\/ A static backend to route to\n\tBackend string\n}\n\ntype Proxy struct {\n\t*ProxyOptions\n\n\t\/\/ Http proxy\n\thttpProxy http.Handler\n\n\t\/\/ Websocket proxy\n\twebsocketProxy http.Handler\n}\n\n\/\/ New returns a new Proxy instance based on the provided ProxyOptions\n\/\/ either 'Backend' (static) or 'Balancer' must be provided\nfunc New(opts ProxyOptions) (*Proxy, error) {\n\t\/\/ Validate Balancer and Backend options\n\tif opts.Balancer == nil {\n\t\tif opts.Backend == \"\" {\n\t\t\treturn nil, errors.New(\"Please provide a Backend or a Balancer\")\n\t\t} else if err := validateUrl(opts.Backend); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ Default for Retries\n\tif opts.Retries == 0 {\n\t\topts.Retries = 1\n\t}\n\n\t\/\/ Default for Period\n\tif opts.Period == 0 {\n\t\topts.Period = 100 * time.Millisecond\n\t}\n\n\tp := &Proxy{\n\t\tProxyOptions: &opts,\n\t}\n\n\treturn p.init(), nil\n}\n\n\/\/ ServeHTTP allows us to comply to the http.Handler interface\nfunc (p *Proxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\tif isWebsocket(req) {\n\t\t\/\/ we don't use https explicitly, ssl termination is done here\n\t\treq.URL.Scheme = \"ws\"\n\t\tp.websocketProxy.ServeHTTP(rw, req)\n\t\treturn\n\t}\n\n\tp.httpProxy.ServeHTTP(rw, req)\n}\n\n\/\/ init sets up proxies and other stuff based on options\nfunc (p *Proxy) init() *Proxy {\n\t\/\/ Setup http proxy\n\tp.httpProxy = &httputil.ReverseProxy{\n\t\tDirector: p.director,\n\t}\n\n\t\/\/ Setup websocket proxy\n\tp.websocketProxy = &websocketproxy.WebsocketProxy{\n\t\tBackend: func(req *http.Request) *url.URL {\n\t\t\turl, _ := p.backend(req)\n\t\t\treturn url\n\t\t},\n\t\tUpgrader: &websocket.Upgrader{\n\t\t\tReadBufferSize: 4096,\n\t\t\tWriteBufferSize: 4096,\n\t\t\tCheckOrigin: func(r *http.Request) bool {\n\t\t\t\treturn true\n\t\t\t},\n\t\t},\n\t}\n\n\treturn p\n}\n\n\/\/ director rewrites a http.Request to route to the correct host\nfunc (p *Proxy) director(req *http.Request) {\n\turl, err := p.backend(req)\n\tif url == nil || err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Rewrite outgoing request url\n\treq.URL.Scheme = url.Scheme\n\treq.URL.Host = url.Host\n\treq.URL.Path = url.Path\n\n\treq.Host = url.Host\n}\n\n\/\/ backend parses the result of getBackend and ensures it's validity\nfunc (p *Proxy) backend(req *http.Request) (*url.URL, error) {\n\trawurl, err := p.getBackend(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := validateUrl(rawurl); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn url.Parse(rawurl)\n}\n\n\/\/ getBackend gets the backend selected by the balancer or the static one set by the 'Backend' attribute\nfunc (p *Proxy) getBackend(req *http.Request) (string, error) {\n\tif p.Balancer == nil && p.Backend != \"\" {\n\t\treturn p.Backend, nil\n\t}\n\treturn p.Balancer(req)\n}\n\n\/\/ validateUrl generates an error if the the url isn't absolute or valid\nfunc validateUrl(rawurl string) error {\n\tparsed, err := url.Parse(rawurl)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Ensure url is absolute\n\tif !parsed.IsAbs() {\n\t\treturn errors.New(\"Proxy must only proxy to absolute URLs\")\n\t}\n\n\t\/\/ All is good\n\treturn nil\n}\n\n\/\/ websocketScheme picks a suitable websocket scheme\nfunc websocketScheme(scheme string) string {\n\tswitch scheme {\n\t\tcase \"http\":\n\t\t\treturn \"ws\"\n\t\tcase \"https\":\n\t\t\treturn \"wss\"\n\t\tcase \"ws\":\n\t\tcase \"wss\":\n\t\t\treturn scheme\n\t}\n\t\/\/ Default\n\treturn \"ws\"\n}\n\n\/\/ isWebsocket checks wether the incoming request is a part of websocket handshake\nfunc isWebsocket(req *http.Request) bool {\n\tif strings.ToLower(req.Header.Get(\"Upgrade\")) != \"websocket\" ||\n\t\t!strings.Contains(strings.ToLower(req.Header.Get(\"Connection\")), \"upgrade\") {\n\t\treturn false\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"flag\"\n \"fmt\"\n \"github.com\/miekg\/dns\"\n \"io\"\n \"log\"\n \"net\"\n \"os\"\n \"os\/signal\"\n \"strings\"\n \"syscall\"\n)\n\nvar zones map[string]net.IP\n\nfunc ProxyMsg(m *dns.Msg) *dns.Msg {\n if len(m.Question) == 0 {\n return nil\n }\n q := m.Question[0]\n\n ip, exists := zones[q.Name]\n if !exists {\n return nil\n }\n\n if q.Qtype != dns.TypeA {\n response := new(dns.Msg)\n response.SetReply(m)\n return response\n }\n\n response := new(dns.Msg)\n response.SetReply(m)\n\n rr := new(dns.A)\n rr.Hdr = dns.RR_Header{Name: q.Name, Rrtype: dns.TypeA,\n Class: dns.ClassINET, Ttl: 0}\n rr.A = ip.To4()\n response.Answer = append(m.Answer, rr)\n\n return response\n}\n\nfunc dnsHandler(w dns.ResponseWriter, m *dns.Msg) {\n if msg := ProxyMsg(m); msg != nil {\n log.Printf(\"Proxying request for %s IN A from %s\",\n msg.Question[0].Name, w.RemoteAddr())\n w.WriteMsg(msg)\n return\n }\n\n c := new(dns.Client)\n c.Net = \"udp\"\n r, _, err := c.Exchange(m, \"8.8.8.8:53\")\n if err != nil {\n log.Print(err)\n return\n }\n w.WriteMsg(r)\n}\n\nfunc tcpProxy(local net.Conn, remoteAddr string) {\n remote, err := net.Dial(\"tcp\", remoteAddr)\n if err != nil {\n log.Printf(\"Failed to connect to %s: %s\", remoteAddr, err)\n return\n }\n go io.Copy(local, remote)\n go io.Copy(remote, local)\n}\n\nfunc listenAndServe() {\n go func() {\n err := dns.ListenAndServe(\":53\", \"udp\", dns.HandlerFunc(dnsHandler))\n if err != nil {\n log.Fatal(err)\n }\n }()\n go func() {\n err := dns.ListenAndServe(\":53\", \"tcp\", dns.HandlerFunc(dnsHandler))\n if err != nil {\n log.Fatal(err)\n }\n }()\n go func() {\n listener, err := net.Listen(\"tcp\", \":80\")\n if err != nil {\n log.Fatal(err)\n }\n for {\n conn, err := listener.Accept()\n if err != nil {\n log.Print(err)\n }\n go tcpProxy(conn, \"movies.netflix.com:80\")\n }\n }()\n go func() {\n listener, err := net.Listen(\"tcp\", \":443\")\n if err != nil {\n log.Fatal(err)\n }\n for {\n conn, err := listener.Accept()\n if err != nil {\n log.Print(err)\n }\n go tcpProxy(conn, \"cbp-us.nccp.netflix.com:443\")\n }\n }()\n}\n\nfunc printfErr(format string, a ...interface{}) {\n fmt.Fprintf(os.Stderr, format+\"\\n\", a...)\n os.Exit(2)\n}\n\nfunc main() {\n flag.Parse()\n if flag.NArg() == 0 {\n printfErr(\"usage: %s zone:ip [zone:ip ...]\", os.Args[0])\n }\n\n zones = make(map[string]net.IP, flag.NArg())\n for _, arg := range flag.Args() {\n zoneAndIp := strings.SplitN(arg, \":\", 2)\n if len(zoneAndIp) != 2 {\n printfErr(\"Invalid zone mapping: %s\", arg)\n }\n zone := dns.Fqdn(zoneAndIp[0])\n ip := net.ParseIP(zoneAndIp[1])\n if ip == nil {\n printfErr(\"Invalid IP address: %s\", zoneAndIp[1])\n }\n zones[zone] = ip\n log.Printf(\"Answering %s with %s\", zone, ip)\n }\n\n listenAndServe()\n\n sig := make(chan os.Signal)\n signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)\n for {\n select {\n case s := <-sig:\n log.Fatalf(\"Signal (%d) received, stopping\\n\", s)\n }\n }\n}\n<commit_msg>Ensure resources are cleaned up<commit_after>package main\n\nimport (\n \"flag\"\n \"fmt\"\n \"github.com\/miekg\/dns\"\n \"io\"\n \"log\"\n \"net\"\n \"os\"\n \"os\/signal\"\n \"strings\"\n \"syscall\"\n)\n\nvar zones map[string]net.IP\n\nfunc ProxyMsg(m *dns.Msg) *dns.Msg {\n if len(m.Question) == 0 {\n return nil\n }\n q := m.Question[0]\n\n ip, exists := zones[q.Name]\n if !exists {\n return nil\n }\n\n if q.Qtype != dns.TypeA {\n response := new(dns.Msg)\n response.SetReply(m)\n return response\n }\n\n response := new(dns.Msg)\n response.SetReply(m)\n\n rr := new(dns.A)\n rr.Hdr = dns.RR_Header{Name: q.Name, Rrtype: dns.TypeA,\n Class: dns.ClassINET, Ttl: 0}\n rr.A = ip.To4()\n response.Answer = append(m.Answer, rr)\n\n return response\n}\n\nfunc dnsHandler(w dns.ResponseWriter, m *dns.Msg) {\n if msg := ProxyMsg(m); msg != nil {\n log.Printf(\"Proxying request for %s IN A from %s\",\n msg.Question[0].Name, w.RemoteAddr())\n w.WriteMsg(msg)\n return\n }\n\n c := new(dns.Client)\n c.Net = \"udp\"\n r, _, err := c.Exchange(m, \"8.8.8.8:53\")\n if err != nil {\n log.Print(err)\n return\n }\n w.WriteMsg(r)\n}\n\nfunc copy(dst io.ReadWriteCloser, src io.ReadWriteCloser) {\n if _, err := io.Copy(dst, src); err != nil {\n log.Print(err)\n }\n dst.Close()\n src.Close()\n}\n\nfunc handleConn(local net.Conn, remoteAddr string) {\n remote, err := net.Dial(\"tcp\", remoteAddr)\n if err != nil {\n log.Printf(\"Failed to connect to %s: %s\", remoteAddr, err)\n return\n }\n go copy(local, remote)\n go copy(remote, local)\n}\n\nfunc tcpProxy(listenAddr string, remoteAddr string) {\n listener, err := net.Listen(\"tcp\", listenAddr)\n if err != nil {\n log.Fatal(err)\n }\n defer listener.Close()\n for {\n conn, err := listener.Accept()\n if err != nil {\n log.Print(err)\n }\n go handleConn(conn, remoteAddr)\n }\n}\n\nfunc listenAndServe() {\n go func() {\n err := dns.ListenAndServe(\":53\", \"udp\", dns.HandlerFunc(dnsHandler))\n if err != nil {\n log.Fatal(err)\n }\n }()\n go func() {\n err := dns.ListenAndServe(\":53\", \"tcp\", dns.HandlerFunc(dnsHandler))\n if err != nil {\n log.Fatal(err)\n }\n }()\n go tcpProxy(\":80\", \"movies.netflix.com:80\")\n go tcpProxy(\":443\", \"cbp-us.nccp.netflix.com:443\")\n}\n\nfunc printfErr(format string, a ...interface{}) {\n fmt.Fprintf(os.Stderr, format+\"\\n\", a...)\n os.Exit(2)\n}\n\nfunc main() {\n flag.Parse()\n if flag.NArg() == 0 {\n printfErr(\"usage: %s zone:ip [zone:ip ...]\", os.Args[0])\n }\n\n zones = make(map[string]net.IP, flag.NArg())\n for _, arg := range flag.Args() {\n zoneAndIp := strings.SplitN(arg, \":\", 2)\n if len(zoneAndIp) != 2 {\n printfErr(\"Invalid zone mapping: %s\", arg)\n }\n zone := dns.Fqdn(zoneAndIp[0])\n ip := net.ParseIP(zoneAndIp[1])\n if ip == nil {\n printfErr(\"Invalid IP address: %s\", zoneAndIp[1])\n }\n zones[zone] = ip\n log.Printf(\"Answering %s with %s\", zone, ip)\n }\n\n listenAndServe()\n\n sig := make(chan os.Signal)\n signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)\n for {\n select {\n case s := <-sig:\n log.Fatalf(\"Signal (%d) received, stopping\\n\", s)\n }\n }\n}\n<|endoftext|>"} {"text":"<commit_before>package httplog\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\t\/* This default icon is empty with a long lived cache *\/\n\tDefaultFavIcon FavIcon = defaultFavIcon{}\n)\n\ntype defaultFavIcon struct {\n}\n\nfunc (dfi defaultFavIcon) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tLogRequest(r, 200)\n\tw.Header().Set(\"Cache-Control\", \"max-age=315360000\")\n}\n\n\/* simple interface for a favicon *\/\ntype FavIcon interface {\n\tServeHTTP(w http.ResponseWriter, r *http.Request)\n}\n\n\/\/ for debugging request headers\nfunc LogHeaders(r *http.Request) {\n\tfmt.Printf(\"HEADERS:\\n\")\n\tfor k, v := range r.Header {\n\t\tfmt.Printf(\"\\t%s\\n\", k)\n\t\tfor i, _ := range v {\n\t\t\tfmt.Printf(\"\\t\\t%s\\n\", v[i])\n\t\t}\n\t}\n}\n\n\/* kindof a common log type output *\/\nfunc LogRequest(r *http.Request, statusCode int) {\n\tvar addr string\n\tvar user_agent string\n\n\tuser_agent = \"\"\n\taddr = RealIP(r)\n\n\tfor k, v := range r.Header {\n\t\tif k == \"User-Agent\" {\n\t\t\tuser_agent = strings.Join(v, \" \")\n\t\t}\n\t\tif k == \"X-Forwarded-For\" {\n\t\t\taddr = strings.Join(v, \" \")\n\t\t}\n\t}\n\n\tfmt.Printf(\"%s - - [%s] \\\"%s %s\\\" \\\"%s\\\" %d %d\\n\",\n\t\taddr,\n\t\ttime.Now().Format(time.RFC1123Z),\n\t\tr.Method,\n\t\tr.URL.String(),\n\t\tuser_agent,\n\t\tstatusCode,\n\t\tr.ContentLength)\n}\n\nfunc RealIP(r *http.Request) string {\n\trip := RealIPs(r)\n\treturn rip[len(rip)]\n}\n\nfunc RealIPs(r *http.Request) (ips []string) {\n\tip := r.RemoteAddr\n\n\tport_pos := strings.LastIndex(ip, \":\")\n\tif port_pos != -1 {\n\t\tip = ip[0:port_pos]\n\t}\n\tif ip != \"\" {\n\t\tips = append(ips, ip)\n\t}\n\n\tval := r.Header.Get(\"X-Forwarded-For\")\n\tif val != \"\" {\n\t\tfor _, ip := range strings.Split(val, \", \") {\n\t\t\tips = append(ips, ip)\n\t\t}\n\t}\n\treturn ips\n}\n<commit_msg>httplog: off by one ...<commit_after>package httplog\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\t\/* This default icon is empty with a long lived cache *\/\n\tDefaultFavIcon FavIcon = defaultFavIcon{}\n)\n\ntype defaultFavIcon struct {\n}\n\nfunc (dfi defaultFavIcon) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tLogRequest(r, 200)\n\tw.Header().Set(\"Cache-Control\", \"max-age=315360000\")\n}\n\n\/* simple interface for a favicon *\/\ntype FavIcon interface {\n\tServeHTTP(w http.ResponseWriter, r *http.Request)\n}\n\n\/\/ for debugging request headers\nfunc LogHeaders(r *http.Request) {\n\tfmt.Printf(\"HEADERS:\\n\")\n\tfor k, v := range r.Header {\n\t\tfmt.Printf(\"\\t%s\\n\", k)\n\t\tfor i, _ := range v {\n\t\t\tfmt.Printf(\"\\t\\t%s\\n\", v[i])\n\t\t}\n\t}\n}\n\n\/* kindof a common log type output *\/\nfunc LogRequest(r *http.Request, statusCode int) {\n\tvar addr string\n\tvar user_agent string\n\n\tuser_agent = \"\"\n\taddr = RealIP(r)\n\n\tfor k, v := range r.Header {\n\t\tif k == \"User-Agent\" {\n\t\t\tuser_agent = strings.Join(v, \" \")\n\t\t}\n\t\tif k == \"X-Forwarded-For\" {\n\t\t\taddr = strings.Join(v, \" \")\n\t\t}\n\t}\n\n\tfmt.Printf(\"%s - - [%s] \\\"%s %s\\\" \\\"%s\\\" %d %d\\n\",\n\t\taddr,\n\t\ttime.Now().Format(time.RFC1123Z),\n\t\tr.Method,\n\t\tr.URL.String(),\n\t\tuser_agent,\n\t\tstatusCode,\n\t\tr.ContentLength)\n}\n\nfunc RealIP(r *http.Request) string {\n\trip := RealIPs(r)\n\treturn rip[len(rip)-1]\n}\n\nfunc RealIPs(r *http.Request) (ips []string) {\n\tip := r.RemoteAddr\n\n\tport_pos := strings.LastIndex(ip, \":\")\n\tif port_pos != -1 {\n\t\tip = ip[0:port_pos]\n\t}\n\tif ip != \"\" {\n\t\tips = append(ips, ip)\n\t}\n\n\tval := r.Header.Get(\"X-Forwarded-For\")\n\tif val != \"\" {\n\t\tfor _, ip := range strings.Split(val, \", \") {\n\t\t\tips = append(ips, ip)\n\t\t}\n\t}\n\treturn ips\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/*\nPackage pango provides a type-safe way to construct pango markup.\nUsing nested Span and Text nodes, pango formatted output can be easily constructed\nwith compile-time validation of nesting and automatic escaping.\n\nFor example, to construct pango markup for:\n <span color=\"#ff0000\">Red <span weight=\"bold\">Bold Text<\/span><\/span>\n\nthe go code would be:\n pango.New(\n pango.Text(\"Red \"),\n pango.Text(\"Bold Text\").Bold()).\n Color(colors.Hex(\"#ff0000\"))\n\nor:\n pango.Text(\"Red \").\n Color(colors.Hex(\"#ff0000\")).\n Append(pango.Text(\"Bold Text\").Bold())\n*\/\npackage pango\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"html\"\n\n\t\"github.com\/soumya92\/barista\/bar\"\n)\n\ntype nodeType int\n\nconst (\n\t\/\/ ntElement is an element node with attributes and\/or children.\n\tntElement nodeType = iota\n\t\/\/ ntText is a text node with no markup or children.\n\tntText\n\t\/\/ ntSizer is a <big> or <small> tag. It has no attributes,\n\t\/\/ and must be the only child of its parent.\n\t\/\/ It exists to support calls like:\n\t\/\/ Text(\"x\").Size(10.0).Smaller().Smaller().AppendText(\"y\")\n\t\/\/ which would otherwise produce:\n\t\/\/ <span size=\"smaller\">xy<\/span>\n\t\/\/ but should actually produce:\n\t\/\/ <span size=\"10240\"><small><small>xy<\/small><\/small><\/span>\n\tntSizer\n)\n\n\/\/ Node represents a node in a pango \"document\".\ntype Node struct {\n\tnodeType nodeType\n\t\/\/ For element nodes, this holds the tag name (\"\" = 'markup' node).\n\t\/\/ For text nodes, this holds the text content.\n\tcontent string\n\tchildren []*Node\n\tattributes map[string]string\n}\n\n\/\/ Append adds one or more nodes as children of the current node.\n\/\/ The new nodes will inherit styles by virtue of being descendants,\n\/\/ to insert them *adjacent* to the current node, use .Parent().Append(...).\nfunc (n *Node) Append(nodes ...*Node) *Node {\n\tvar insertPoint = n\n\tfor len(insertPoint.children) == 1 &&\n\t\tinsertPoint.children[0].nodeType == ntSizer {\n\t\tinsertPoint = insertPoint.children[0]\n\t}\n\tfor _, node := range nodes {\n\t\tif node.nodeType == ntElement && node.content == \"\" {\n\t\t\t\/\/ Collapse empty element nodes when appending, to reduce nesting.\n\t\t\tinsertPoint.children = append(insertPoint.children, node.children...)\n\t\t} else {\n\t\t\tinsertPoint.children = append(insertPoint.children, node)\n\t\t}\n\t}\n\treturn n\n}\n\n\/\/ AppendText is a shortcut for Append(pango.Text(...), pango.Text(...), ...)\nfunc (n *Node) AppendText(texts ...string) *Node {\n\tnodes := make([]*Node, len(texts))\n\tfor i, t := range texts {\n\t\tnodes[i] = &Node{nodeType: ntText, content: t}\n\t}\n\treturn n.Append(nodes...)\n}\n\n\/\/ AppendTextf is a shortcut for Append(pango.Textf(...))\nfunc (n *Node) AppendTextf(format string, args ...interface{}) *Node {\n\treturn n.Append(&Node{\n\t\tnodeType: ntText,\n\t\tcontent: fmt.Sprintf(format, args...),\n\t})\n}\n\n\/\/ Concat adds the given nodes as siblings rather than children of the\n\/\/ current node, and returns a wrapping node for further operations.\n\/\/\n\/\/ For example,\n\/\/ Text(\"c\").Condensed().Color(red).Concat(Text(\"foo\")).UnderlineError()\n\/\/ will create\n\/\/ <span underline='error'><span stretch='condensed' color='#ff0000'>c<\/span>foo<\/span>\n\/\/ where the appended \"foo\" is not condensed or red, and everything is underlined.\nfunc (n *Node) Concat(nodes ...*Node) *Node {\n\tif n.nodeType != ntElement || n.content != \"\" {\n\t\texistingNode := *n\n\t\tn.nodeType = ntElement\n\t\tn.attributes = nil\n\t\tn.content = \"\"\n\t\tn.children = []*Node{&existingNode}\n\t}\n\treturn n.Append(nodes...)\n}\n\n\/\/ Pango returns a pango-formatted version of the node.\nfunc (n *Node) Pango() string {\n\tif n.nodeType == ntText {\n\t\treturn html.EscapeString(n.content)\n\t}\n\tvar out bytes.Buffer\n\tif n.content != \"\" {\n\t\tout.WriteString(\"<\")\n\t\tout.WriteString(n.content)\n\t\tfor attrName, attrVal := range n.attributes {\n\t\t\tout.WriteString(\" \")\n\t\t\tout.WriteString(attrName)\n\t\t\tout.WriteString(\"='\")\n\t\t\tout.WriteString(html.EscapeString(attrVal))\n\t\t\tout.WriteString(\"'\")\n\t\t}\n\t\tout.WriteString(\">\")\n\t}\n\tfor _, c := range n.children {\n\t\tout.WriteString(c.Pango())\n\t}\n\tif n.content != \"\" {\n\t\tout.WriteString(\"<\/\")\n\t\tout.WriteString(n.content)\n\t\tout.WriteString(\">\")\n\t}\n\treturn out.String()\n}\n\n\/\/ Segments implements bar.Output for a single pango Node.\nfunc (n *Node) Segments() []*bar.Segment {\n\treturn []*bar.Segment{bar.PangoSegment(n.Pango())}\n}\n\n\/\/ New constructs a markup node that wraps the given Nodes.\nfunc New(children ...*Node) *Node {\n\treturn &Node{children: children}\n}\n\n\/\/ Text constructs a text node.\nfunc Text(s string) *Node {\n\t\/\/ Wrapped in a node to allow formatting, since formatting methods\n\t\/\/ don't work directly on text nodes.\n\treturn New(&Node{nodeType: ntText, content: s})\n}\n\n\/\/ Textf constructs a text node by interpolating arguments.\n\/\/ Note that it will escape both the format string and arguments,\n\/\/ so you should use pango constructs to add formatting.\n\/\/ i.e.,\n\/\/ Textf(\"<span color='%s'>%s<\/span>\", \"red\", \"text\")\n\/\/ won't give you red text.\nfunc Textf(format string, args ...interface{}) *Node {\n\treturn Text(fmt.Sprintf(format, args...))\n}\n<commit_msg>Update comment to reflect the Parent() -> Concat() change<commit_after>\/\/ Copyright 2017 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/*\nPackage pango provides a type-safe way to construct pango markup.\nUsing nested Span and Text nodes, pango formatted output can be easily constructed\nwith compile-time validation of nesting and automatic escaping.\n\nFor example, to construct pango markup for:\n <span color=\"#ff0000\">Red <span weight=\"bold\">Bold Text<\/span><\/span>\n\nthe go code would be:\n pango.New(\n pango.Text(\"Red \"),\n pango.Text(\"Bold Text\").Bold()).\n Color(colors.Hex(\"#ff0000\"))\n\nor:\n pango.Text(\"Red \").\n Color(colors.Hex(\"#ff0000\")).\n Append(pango.Text(\"Bold Text\").Bold())\n*\/\npackage pango\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"html\"\n\n\t\"github.com\/soumya92\/barista\/bar\"\n)\n\ntype nodeType int\n\nconst (\n\t\/\/ ntElement is an element node with attributes and\/or children.\n\tntElement nodeType = iota\n\t\/\/ ntText is a text node with no markup or children.\n\tntText\n\t\/\/ ntSizer is a <big> or <small> tag. It has no attributes,\n\t\/\/ and must be the only child of its parent.\n\t\/\/ It exists to support calls like:\n\t\/\/ Text(\"x\").Size(10.0).Smaller().Smaller().AppendText(\"y\")\n\t\/\/ which would otherwise produce:\n\t\/\/ <span size=\"smaller\">xy<\/span>\n\t\/\/ but should actually produce:\n\t\/\/ <span size=\"10240\"><small><small>xy<\/small><\/small><\/span>\n\tntSizer\n)\n\n\/\/ Node represents a node in a pango \"document\".\ntype Node struct {\n\tnodeType nodeType\n\t\/\/ For element nodes, this holds the tag name (\"\" = 'markup' node).\n\t\/\/ For text nodes, this holds the text content.\n\tcontent string\n\tchildren []*Node\n\tattributes map[string]string\n}\n\n\/\/ Append adds one or more nodes as children of the current node.\n\/\/ The new nodes will inherit styles by virtue of being descendants,\n\/\/ to insert them *adjacent* to the current node, use .Concat(...).\nfunc (n *Node) Append(nodes ...*Node) *Node {\n\tvar insertPoint = n\n\tfor len(insertPoint.children) == 1 &&\n\t\tinsertPoint.children[0].nodeType == ntSizer {\n\t\tinsertPoint = insertPoint.children[0]\n\t}\n\tfor _, node := range nodes {\n\t\tif node.nodeType == ntElement && node.content == \"\" {\n\t\t\t\/\/ Collapse empty element nodes when appending, to reduce nesting.\n\t\t\tinsertPoint.children = append(insertPoint.children, node.children...)\n\t\t} else {\n\t\t\tinsertPoint.children = append(insertPoint.children, node)\n\t\t}\n\t}\n\treturn n\n}\n\n\/\/ AppendText is a shortcut for Append(pango.Text(...), pango.Text(...), ...)\nfunc (n *Node) AppendText(texts ...string) *Node {\n\tnodes := make([]*Node, len(texts))\n\tfor i, t := range texts {\n\t\tnodes[i] = &Node{nodeType: ntText, content: t}\n\t}\n\treturn n.Append(nodes...)\n}\n\n\/\/ AppendTextf is a shortcut for Append(pango.Textf(...))\nfunc (n *Node) AppendTextf(format string, args ...interface{}) *Node {\n\treturn n.Append(&Node{\n\t\tnodeType: ntText,\n\t\tcontent: fmt.Sprintf(format, args...),\n\t})\n}\n\n\/\/ Concat adds the given nodes as siblings rather than children of the\n\/\/ current node, and returns a wrapping node for further operations.\n\/\/\n\/\/ For example,\n\/\/ Text(\"c\").Condensed().Color(red).Concat(Text(\"foo\")).UnderlineError()\n\/\/ will create\n\/\/ <span underline='error'><span stretch='condensed' color='#ff0000'>c<\/span>foo<\/span>\n\/\/ where the appended \"foo\" is not condensed or red, and everything is underlined.\nfunc (n *Node) Concat(nodes ...*Node) *Node {\n\tif n.nodeType != ntElement || n.content != \"\" {\n\t\texistingNode := *n\n\t\tn.nodeType = ntElement\n\t\tn.attributes = nil\n\t\tn.content = \"\"\n\t\tn.children = []*Node{&existingNode}\n\t}\n\treturn n.Append(nodes...)\n}\n\n\/\/ Pango returns a pango-formatted version of the node.\nfunc (n *Node) Pango() string {\n\tif n.nodeType == ntText {\n\t\treturn html.EscapeString(n.content)\n\t}\n\tvar out bytes.Buffer\n\tif n.content != \"\" {\n\t\tout.WriteString(\"<\")\n\t\tout.WriteString(n.content)\n\t\tfor attrName, attrVal := range n.attributes {\n\t\t\tout.WriteString(\" \")\n\t\t\tout.WriteString(attrName)\n\t\t\tout.WriteString(\"='\")\n\t\t\tout.WriteString(html.EscapeString(attrVal))\n\t\t\tout.WriteString(\"'\")\n\t\t}\n\t\tout.WriteString(\">\")\n\t}\n\tfor _, c := range n.children {\n\t\tout.WriteString(c.Pango())\n\t}\n\tif n.content != \"\" {\n\t\tout.WriteString(\"<\/\")\n\t\tout.WriteString(n.content)\n\t\tout.WriteString(\">\")\n\t}\n\treturn out.String()\n}\n\n\/\/ Segments implements bar.Output for a single pango Node.\nfunc (n *Node) Segments() []*bar.Segment {\n\treturn []*bar.Segment{bar.PangoSegment(n.Pango())}\n}\n\n\/\/ New constructs a markup node that wraps the given Nodes.\nfunc New(children ...*Node) *Node {\n\treturn &Node{children: children}\n}\n\n\/\/ Text constructs a text node.\nfunc Text(s string) *Node {\n\t\/\/ Wrapped in a node to allow formatting, since formatting methods\n\t\/\/ don't work directly on text nodes.\n\treturn New(&Node{nodeType: ntText, content: s})\n}\n\n\/\/ Textf constructs a text node by interpolating arguments.\n\/\/ Note that it will escape both the format string and arguments,\n\/\/ so you should use pango constructs to add formatting.\n\/\/ i.e.,\n\/\/ Textf(\"<span color='%s'>%s<\/span>\", \"red\", \"text\")\n\/\/ won't give you red text.\nfunc Textf(format string, args ...interface{}) *Node {\n\treturn Text(fmt.Sprintf(format, args...))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ The idx package provides a metadata index for metrics\n\npackage idx\n\nimport (\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tschema \"gopkg.in\/raintank\/schema.v1\"\n)\n\nvar (\n\tBothBranchAndLeaf = errors.New(\"node can't be both branch and leaf\")\n\tBranchUnderLeaf = errors.New(\"can't add branch under leaf\")\n\terrInvalidQuery = errors.New(\"invalid query\")\n\terrInvalidIdString = errors.New(\"invalid ID string\")\n)\n\n\/\/go:generate msgp\ntype Node struct {\n\tPath string\n\tLeaf bool\n\tDefs []Archive\n\tHasChildren bool\n}\n\ntype Archive struct {\n\tschema.MetricDefinition\n\tSchemaId uint16 \/\/ index in mdata.schemas (not persisted)\n\tAggId uint16 \/\/ index in mdata.aggregations (not persisted)\n\tLastSave uint32 \/\/ last time the metricDefinition was saved to a backend store (cassandra)\n}\n\ntype MetricID struct {\n\torg int\n\tkey [16]byte\n}\n\nfunc NewMetricIDFromString(s string) (MetricID, error) {\n\tid := MetricID{}\n\terr := id.FromString(s)\n\treturn id, err\n}\n\nfunc (id *MetricID) FromString(s string) error {\n\tsplits := strings.Split(s, \".\")\n\tif len(splits) != 2 || len(splits[1]) != 32 {\n\t\treturn errInvalidIdString\n\t}\n\n\tvar err error\n\tid.org, err = strconv.Atoi(splits[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdst := make([]byte, 16)\n\tn, err := hex.Decode(dst, []byte(splits[1]))\n\tif n != 16 {\n\t\treturn errInvalidIdString\n\t}\n\tcopy(id.key[:], dst)\n\treturn nil\n}\n\nfunc (id *MetricID) String() string {\n\treturn fmt.Sprintf(\"%d.%x\", id.org, id.key)\n}\n\n\/\/ used primarily by tests, for convenience\nfunc NewArchiveBare(name string) Archive {\n\treturn Archive{\n\t\tMetricDefinition: schema.MetricDefinition{\n\t\t\tName: name,\n\t\t},\n\t}\n}\n\n\/*\nCurrently the index is solely used for supporting Graphite style queries.\nSo, the index only needs to be able to search by a pattern that matches the\nMetricDefinition.Name field. In future we plan to extend the searching\ncapabilities to include the other fields in the definition.\n\nNote:\n\n* metrictank is a multi-tenant system where different orgs cannot see each\n other's data\n\n* any given metric may appear multiple times, under different organisations\n\n* Each metric path can be mapped to multiple metricDefinitions in the case that\n fields other then the Name vary. The most common occurrence of this is when\n the Interval at which the metric is being collected has changed.\n\nInterface\n\n* Init()\n This is the initialization step performed at startup. This method should\n block until the index is ready to handle searches.\n\n* Stop():\n This will be called when metrictank is shutting down.\n\n* AddOrUpdate(*schema.MetricData, int32) Archive:\n Every metric received will result in a call to this method to ensure the\n metric has been added to the index. The method is passed the metricData\n payload and the partition id of the metric\n\n* Get(string) (Archive, bool):\n This method should return the MetricDefintion with the passed Id.\n\n* GetPath(string) (Archive, bool) []Archive:\n This method should return the archives under the given path\n\n* List(int) []Archive:\n This method should return all MetricDefinitions for the passed OrgId. If the\n passed OrgId is \"-1\", then all metricDefinitions across all organisations\n should be returned.\n\n* Find(int, string, int64) ([]Node, error):\n This method provides searches. The method is passed an OrgId, a query\n pattern and a unix timestamp. Searches should return all nodes that match for\n the given OrgId and OrgId -1. The pattern should be handled in the same way\n Graphite would. see https:\/\/graphite.readthedocs.io\/en\/latest\/render_api.html#paths-and-wildcards\n And the unix stimestamp is used to ignore series that have been stale since\n the timestamp.\n\n* Delete(int, string) ([]Archive, error):\n This method is used for deleting items from the index. The method is passed\n an OrgId and a query pattern. If the pattern matches a branch node, then\n all leaf nodes on that branch should also be deleted. So if the pattern is\n \"*\", all items in the index should be deleted. A copy of all of the\n metricDefinitions deleted are returned.\n\n* Prune(int, time.Time) ([]Archive, error):\n This method should delete all metrics from the index for the passed org where\n the last time the metric was seen is older then the passed timestamp. If the org\n passed is -1, then the all orgs should be examined for stale metrics to be deleted.\n The method returns a list of the metricDefinitions deleted from the index and any\n error encountered.\n\n* TagList(int, string, int64) ([]string, error):\n This method returns a list of all tag keys associated with the metrics of a given\n organization. The return values are filtered by the regex in the second parameter.\n If the third parameter is >0 then only metrics will be account of which the\n LastUpdate time is >= the given value.\n\n* TagDetails(int, string, string, int64) map[string]uint32:\n This method returns a list of all values associated with a given tag key in the\n given org. The occurences of each value is counted and the count is referred to by\n the series ids in the returned map.\n If the third parameter is not \"\" it will be used as a regular expression to filter\n the values before accouting for them.\n If the fourth parameter is > 0 then the metrics\n will be filtered and only those of which the LastUpdate time is >= the from\n timestamp will be considered while the others are being ignored.\n\n* FindByTag(int, []string, int64) ([]string, error):\n This method takes a list of expressions in the format key<operator>value.\n The allowed operators are: =, !=, =~, !=~.\n It returns a slice of Metric names that match the given conditions, the\n conditions are logically AND-ed.\n If the third argument is > 0 then the results will be filtered and only those\n where the LastUpdate time is >= from will be returned as results.\n*\/\n\ntype MetricIndex interface {\n\tInit() error\n\tStop()\n\tAddOrUpdate(*schema.MetricData, int32) Archive\n\tGet(string) (Archive, bool)\n\tGetPath(int, string) []Archive\n\tDelete(int, string) ([]Archive, error)\n\tFind(int, string, int64) ([]Node, error)\n\tList(int) []Archive\n\tPrune(int, time.Time) ([]Archive, error)\n\tFindByTag(int, []string, int64) ([]string, error)\n\tTags(int, string, int64) ([]string, error)\n\tTagDetails(int, string, string, int64) (map[string]uint64, error)\n}\n<commit_msg>comments<commit_after>\/\/ The idx package provides a metadata index for metrics\n\npackage idx\n\nimport (\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tschema \"gopkg.in\/raintank\/schema.v1\"\n)\n\nvar (\n\tBothBranchAndLeaf = errors.New(\"node can't be both branch and leaf\")\n\tBranchUnderLeaf = errors.New(\"can't add branch under leaf\")\n\terrInvalidQuery = errors.New(\"invalid query\")\n\terrInvalidIdString = errors.New(\"invalid ID string\")\n)\n\n\/\/go:generate msgp\ntype Node struct {\n\tPath string\n\tLeaf bool\n\tDefs []Archive\n\tHasChildren bool\n}\n\ntype Archive struct {\n\tschema.MetricDefinition\n\tSchemaId uint16 \/\/ index in mdata.schemas (not persisted)\n\tAggId uint16 \/\/ index in mdata.aggregations (not persisted)\n\tLastSave uint32 \/\/ last time the metricDefinition was saved to a backend store (cassandra)\n}\n\ntype MetricID struct {\n\torg int\n\tkey [16]byte\n}\n\nfunc NewMetricIDFromString(s string) (MetricID, error) {\n\tid := MetricID{}\n\terr := id.FromString(s)\n\treturn id, err\n}\n\nfunc (id *MetricID) FromString(s string) error {\n\tsplits := strings.Split(s, \".\")\n\tif len(splits) != 2 || len(splits[1]) != 32 {\n\t\treturn errInvalidIdString\n\t}\n\n\tvar err error\n\tid.org, err = strconv.Atoi(splits[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdst := make([]byte, 16)\n\tn, err := hex.Decode(dst, []byte(splits[1]))\n\tif n != 16 {\n\t\treturn errInvalidIdString\n\t}\n\tcopy(id.key[:], dst)\n\treturn nil\n}\n\nfunc (id *MetricID) String() string {\n\treturn fmt.Sprintf(\"%d.%x\", id.org, id.key)\n}\n\n\/\/ used primarily by tests, for convenience\nfunc NewArchiveBare(name string) Archive {\n\treturn Archive{\n\t\tMetricDefinition: schema.MetricDefinition{\n\t\t\tName: name,\n\t\t},\n\t}\n}\n\n\/*\nCurrently the index is solely used for supporting Graphite style queries.\nSo, the index only needs to be able to search by a pattern that matches the\nMetricDefinition.Name field. In future we plan to extend the searching\ncapabilities to include the other fields in the definition.\n\nNote:\n\n* metrictank is a multi-tenant system where different orgs cannot see each\n other's data\n\n* any given metric may appear multiple times, under different organisations\n\n* Each metric path can be mapped to multiple metricDefinitions in the case that\n fields other then the Name vary. The most common occurrence of this is when\n the Interval at which the metric is being collected has changed.\n\nInterface\n\n* Init()\n This is the initialization step performed at startup. This method should\n block until the index is ready to handle searches.\n\n* Stop():\n This will be called when metrictank is shutting down.\n\n* AddOrUpdate(*schema.MetricData, int32) Archive:\n Every metric received will result in a call to this method to ensure the\n metric has been added to the index. The method is passed the metricData\n payload and the partition id of the metric\n\n* Get(string) (Archive, bool):\n This method should return the MetricDefintion with the passed Id.\n\n* GetPath(string) (Archive, bool) []Archive:\n This method should return the archives under the given path\n\n* List(int) []Archive:\n This method should return all MetricDefinitions for the passed OrgId. If the\n passed OrgId is \"-1\", then all metricDefinitions across all organisations\n should be returned.\n\n* Find(int, string, int64) ([]Node, error):\n This method provides searches. The method is passed an OrgId, a query\n pattern and a unix timestamp. Searches should return all nodes that match for\n the given OrgId and OrgId -1. The pattern should be handled in the same way\n Graphite would. see https:\/\/graphite.readthedocs.io\/en\/latest\/render_api.html#paths-and-wildcards\n And the unix stimestamp is used to ignore series that have been stale since\n the timestamp.\n\n* Delete(int, string) ([]Archive, error):\n This method is used for deleting items from the index. The method is passed\n an OrgId and a query pattern. If the pattern matches a branch node, then\n all leaf nodes on that branch should also be deleted. So if the pattern is\n \"*\", all items in the index should be deleted. A copy of all of the\n metricDefinitions deleted are returned.\n\n* Prune(int, time.Time) ([]Archive, error):\n This method should delete all metrics from the index for the passed org where\n the last time the metric was seen is older then the passed timestamp. If the org\n passed is -1, then the all orgs should be examined for stale metrics to be deleted.\n The method returns a list of the metricDefinitions deleted from the index and any\n error encountered.\n\n* Tags(int, string, int64) ([]string, error):\n This method returns a list of all tag keys associated with the metrics of a given\n organization. The return values are filtered by the regex in the second parameter.\n If the third parameter is >0 then only metrics will be accounted of which the\n LastUpdate time is >= the given value.\n\n* TagDetails(int, string, string, int64) map[string]uint64:\n This method returns a list of all values associated with a given tag key in the\n given org. The occurences of each value is counted and the count is referred to by\n the metric names in the returned map.\n If the third parameter is not \"\" it will be used as a regular expression to filter\n the values before accouting for them.\n If the fourth parameter is > 0 then the metrics will be filtered and only those\n of which the LastUpdate time is >= the from timestamp will be considered while\n the others are being ignored.\n\n* FindByTag(int, []string, int64) ([]string, error):\n This method takes a list of expressions in the format key<operator>value.\n The allowed operators are: =, !=, =~, !=~.\n It returns a slice of metric names that match the given conditions, the\n conditions are logically AND-ed.\n If the third argument is > 0 then the results will be filtered and only those\n where the LastUpdate time is >= from will be returned as results.\n*\/\n\ntype MetricIndex interface {\n\tInit() error\n\tStop()\n\tAddOrUpdate(*schema.MetricData, int32) Archive\n\tGet(string) (Archive, bool)\n\tGetPath(int, string) []Archive\n\tDelete(int, string) ([]Archive, error)\n\tFind(int, string, int64) ([]Node, error)\n\tList(int) []Archive\n\tPrune(int, time.Time) ([]Archive, error)\n\tFindByTag(int, []string, int64) ([]string, error)\n\tTags(int, string, int64) ([]string, error)\n\tTagDetails(int, string, string, int64) (map[string]uint64, error)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ BaruwaAPI Golang bindings for Baruwa REST API\n\/\/ Copyright (C) 2019 Andrew Colin Kissa <andrew@topdog.za.net>\n\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this file,\n\/\/ You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage api\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\n\t\"github.com\/google\/go-querystring\/query\"\n)\n\n\/\/ FallBackServerOrg holds fallback server organization\ntype FallBackServerOrg struct {\n\tID int `json:\"id\"`\n\tName string `json:\"name\"`\n}\n\n\/\/ FallBackServer holds organization fallback servers\ntype FallBackServer struct {\n\tID int `json:\"id,omitempty\"`\n\tAddress string `json:\"address\"`\n\tProtocol int `json:\"protocol\"`\n\tPort int `json:\"port\"`\n\tRequireTLS bool `json:\"require_tls\"`\n\tEnabled bool `json:\"enabled\"`\n\tOrganization FallBackServerOrg `json:\"organization\"`\n}\n\n\/\/ GetFallBackServer returns radius settings\n\/\/ https:\/\/www.baruwa.com\/docs\/api\/#retrieve-a-fallback-server\nfunc (c *Client) GetFallBackServer(serverID int) (server *FallBackServer, err error) {\n\tif serverID <= 0 {\n\t\terr = fmt.Errorf(\"The serverID param should be > 0\")\n\t\treturn\n\t}\n\n\terr = c.get(fmt.Sprintf(\"failbackservers\/%d\", serverID), nil, server)\n\n\treturn\n}\n\n\/\/ CreateFallBackServer creates radius settings\n\/\/ https:\/\/www.baruwa.com\/docs\/api\/#create-a-fallback-server\nfunc (c *Client) CreateFallBackServer(organizationID int, server *FallBackServer) (err error) {\n\tvar v url.Values\n\n\tif organizationID <= 0 {\n\t\terr = fmt.Errorf(\"The organizationID param should be > 0\")\n\t\treturn\n\t}\n\n\tif server == nil {\n\t\terr = fmt.Errorf(\"The server param cannot be nil\")\n\t\treturn\n\t}\n\n\tif v, err = query.Values(server); err != nil {\n\t\treturn\n\t}\n\n\terr = c.post(fmt.Sprintf(\"failbackservers\/%d\", organizationID), v, server)\n\n\treturn\n}\n\n\/\/ UpdateFallBackServer updates radius settings\n\/\/ https:\/\/www.baruwa.com\/docs\/api\/#update-a-fallback-server\nfunc (c *Client) UpdateFallBackServer(server *FallBackServer) (err error) {\n\tvar v url.Values\n\n\tif server == nil {\n\t\terr = fmt.Errorf(\"The server param cannot be nil\")\n\t\treturn\n\t}\n\n\tif server.ID <= 0 {\n\t\terr = fmt.Errorf(\"The server.ID param should be > 0\")\n\t\treturn\n\t}\n\n\tif v, err = query.Values(server); err != nil {\n\t\treturn\n\t}\n\n\terr = c.put(fmt.Sprintf(\"failbackservers\/%d\", server.ID), v, server)\n\n\treturn\n}\n\n\/\/ DeleteFallBackServer deletes radius settings\n\/\/ https:\/\/www.baruwa.com\/docs\/api\/#delete-a-fallback-server\nfunc (c *Client) DeleteFallBackServer(server *FallBackServer) (err error) {\n\tvar v url.Values\n\n\tif server == nil {\n\t\terr = fmt.Errorf(\"The server param cannot be nil\")\n\t\treturn\n\t}\n\n\tif server.ID <= 0 {\n\t\terr = fmt.Errorf(\"The server.ID param should be > 0\")\n\t\treturn\n\t}\n\n\tif v, err = query.Values(server); err != nil {\n\t\treturn\n\t}\n\n\terr = c.delete(fmt.Sprintf(\"failbackservers\/%d\", server.ID), v)\n\n\treturn\n}\n<commit_msg>FET: Add Fallback Server Implementation<commit_after>\/\/ BaruwaAPI Golang bindings for Baruwa REST API\n\/\/ Copyright (C) 2019 Andrew Colin Kissa <andrew@topdog.za.net>\n\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this file,\n\/\/ You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage api\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\n\t\"github.com\/google\/go-querystring\/query\"\n)\n\n\/\/ FallBackServerOrg holds fallback server organization\ntype FallBackServerOrg struct {\n\tID int `json:\"id\"`\n\tName string `json:\"name\"`\n}\n\n\/\/ FallBackServer holds organization fallback servers\ntype FallBackServer struct {\n\tID int `json:\"id,omitempty\"`\n\tAddress string `json:\"address\"`\n\tProtocol int `json:\"protocol\"`\n\tPort int `json:\"port\"`\n\tRequireTLS bool `json:\"require_tls\"`\n\tEnabled bool `json:\"enabled\"`\n\tOrganization *FallBackServerOrg `json:\"organization\"`\n}\n\n\/\/ FallBackServerList holds users\ntype FallBackServerList struct {\n\tItems []FallBackServer `json:\"items\"`\n\tLinks Links `json:\"links\"`\n\tMeta Meta `json:\"meta\"`\n}\n\n\/\/ GetFallBackServers returns a FallBackServerList object\n\/\/ This contains a paginated list of fallback servers and links\n\/\/ to the neigbouring pages.\n\/\/ https:\/\/www.baruwa.com\/docs\/api\/#fallback-servers\nfunc (c *Client) GetFallBackServers(organizationID int, opts *ListOptions) (l *FallBackServerList, err error) {\n\tif organizationID <= 0 {\n\t\terr = fmt.Errorf(organizationIDError)\n\t\treturn\n\t}\n\n\tl = &FallBackServerList{}\n\n\terr = c.get(fmt.Sprintf(\"failbackservers\/%d\", organizationID), opts, l)\n\n\treturn\n}\n\n\/\/ GetFallBackServer returns radius settings\n\/\/ https:\/\/www.baruwa.com\/docs\/api\/#retrieve-a-fallback-server\nfunc (c *Client) GetFallBackServer(serverID int) (server *FallBackServer, err error) {\n\tif serverID <= 0 {\n\t\terr = fmt.Errorf(serverIDError)\n\t\treturn\n\t}\n\n\tserver = &FallBackServer{}\n\n\terr = c.get(fmt.Sprintf(\"failbackservers\/%d\", serverID), nil, server)\n\n\treturn\n}\n\n\/\/ CreateFallBackServer creates radius settings\n\/\/ https:\/\/www.baruwa.com\/docs\/api\/#create-a-fallback-server\nfunc (c *Client) CreateFallBackServer(organizationID int, server *FallBackServer) (err error) {\n\tvar v url.Values\n\n\tif organizationID <= 0 {\n\t\terr = fmt.Errorf(organizationIDError)\n\t\treturn\n\t}\n\n\tif server == nil {\n\t\terr = fmt.Errorf(serverParamError)\n\t\treturn\n\t}\n\n\tif v, err = query.Values(server); err != nil {\n\t\treturn\n\t}\n\n\terr = c.post(fmt.Sprintf(\"failbackservers\/%d\", organizationID), v, server)\n\n\treturn\n}\n\n\/\/ UpdateFallBackServer updates radius settings\n\/\/ https:\/\/www.baruwa.com\/docs\/api\/#update-a-fallback-server\nfunc (c *Client) UpdateFallBackServer(server *FallBackServer) (err error) {\n\tvar v url.Values\n\n\tif server == nil {\n\t\terr = fmt.Errorf(serverParamError)\n\t\treturn\n\t}\n\n\tif server.ID <= 0 {\n\t\terr = fmt.Errorf(serverSIDError)\n\t\treturn\n\t}\n\n\tif v, err = query.Values(server); err != nil {\n\t\treturn\n\t}\n\n\terr = c.put(fmt.Sprintf(\"failbackservers\/%d\", server.ID), v, server)\n\n\treturn\n}\n\n\/\/ DeleteFallBackServer deletes radius settings\n\/\/ https:\/\/www.baruwa.com\/docs\/api\/#delete-a-fallback-server\nfunc (c *Client) DeleteFallBackServer(server *FallBackServer) (err error) {\n\tvar v url.Values\n\n\tif server == nil {\n\t\terr = fmt.Errorf(serverParamError)\n\t\treturn\n\t}\n\n\tif server.ID <= 0 {\n\t\terr = fmt.Errorf(serverSIDError)\n\t\treturn\n\t}\n\n\tif v, err = query.Values(server); err != nil {\n\t\treturn\n\t}\n\n\terr = c.delete(fmt.Sprintf(\"failbackservers\/%d\", server.ID), v)\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package gitosis\n\nimport (\n\t\"github.com\/timeredbull\/tsuru\/config\"\n\t. \"launchpad.net\/gocheck\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"testing\"\n)\n\nfunc Test(t *testing.T) { TestingT(t) }\n\ntype S struct {\n\tgitRoot string\n\tgitosisBare string\n\tgitosisRepo string\n}\n\nvar _ = Suite(&S{})\n\nfunc (s *S) SetUpSuite(c *C) {\n\terr := config.ReadConfigFile(\"..\/..\/..\/etc\/tsuru.conf\")\n\tc.Assert(err, IsNil)\n\ts.gitRoot, err = config.GetString(\"git:root\")\n\tc.Assert(err, IsNil)\n\ts.gitosisBare, err = config.GetString(\"git:gitosis-bare\")\n\tc.Assert(err, IsNil)\n\ts.gitosisRepo, err = config.GetString(\"git:gitosis-repo\")\n\tcurrentDir := os.Getenv(\"PWD\")\n\terr = os.Mkdir(s.gitRoot, 0777)\n\tc.Assert(err, IsNil)\n\terr = os.Chdir(s.gitRoot)\n\tc.Assert(err, IsNil)\n\terr = exec.Command(\"ls\").Run()\n\tc.Assert(err, IsNil)\n\terr = exec.Command(\"git\", \"init\", \"--bare\", \"gitosis-admin.git\").Run()\n\tc.Assert(err, IsNil)\n\terr = exec.Command(\"git\", \"clone\", \"gitosis-admin.git\").Run()\n\tc.Assert(err, IsNil)\n\terr = os.Chdir(currentDir)\n\tc.Assert(err, IsNil)\n}\n\nfunc (s *S) SetUpTest(c *C) {\n\tpwd, err := os.Getwd()\n\tc.Assert(err, IsNil)\n\terr = os.Chdir(path.Join(s.gitRoot, \"gitosis-admin\"))\n\t_, err = os.Create(\"gitosis.conf\")\n\tc.Assert(err, IsNil)\n\terr = os.Chdir(pwd)\n}\n\nfunc (s *S) TearDownSuite(c *C) {\n\terr := os.RemoveAll(s.gitRoot)\n\tc.Assert(err, IsNil)\n}\n\nfunc (s *S) TearDownTest(c *C) {\n\tpwd, err := os.Getwd()\n\tc.Assert(err, IsNil)\n\terr = os.Chdir(path.Join(s.gitRoot, \"gitosis-admin\"))\n\terr = exec.Command(\"git\", \"rm\", \"gitosis.conf\").Run()\n\tif err == nil {\n\t\terr = pushToGitosis(\"removing test file\")\n\t\tc.Assert(err, IsNil)\n\t}\n\terr = os.Chdir(pwd)\n}\n\nfunc (s *S) lastBareCommit(c *C) string {\n\tpwd, err := os.Getwd()\n\tc.Assert(err, IsNil)\n\tos.Chdir(s.gitosisBare)\n\tbareOutput, err := exec.Command(\"git\", \"log\", \"-1\", \"--pretty=format:%s\").CombinedOutput()\n\tc.Assert(err, IsNil)\n\tos.Chdir(pwd)\n\treturn string(bareOutput)\n}\n<commit_msg>gitosis: using MkdirAll instead of Mkdir<commit_after>package gitosis\n\nimport (\n\t\"github.com\/timeredbull\/tsuru\/config\"\n\t. \"launchpad.net\/gocheck\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"testing\"\n)\n\nfunc Test(t *testing.T) { TestingT(t) }\n\ntype S struct {\n\tgitRoot string\n\tgitosisBare string\n\tgitosisRepo string\n}\n\nvar _ = Suite(&S{})\n\nfunc (s *S) SetUpSuite(c *C) {\n\terr := config.ReadConfigFile(\"..\/..\/..\/etc\/tsuru.conf\")\n\tc.Assert(err, IsNil)\n\ts.gitRoot, err = config.GetString(\"git:root\")\n\tc.Assert(err, IsNil)\n\ts.gitosisBare, err = config.GetString(\"git:gitosis-bare\")\n\tc.Assert(err, IsNil)\n\ts.gitosisRepo, err = config.GetString(\"git:gitosis-repo\")\n\tcurrentDir := os.Getenv(\"PWD\")\n\terr = os.MkdirAll(s.gitRoot, 0777)\n\tc.Assert(err, IsNil)\n\terr = os.Chdir(s.gitRoot)\n\tc.Assert(err, IsNil)\n\terr = exec.Command(\"ls\").Run()\n\tc.Assert(err, IsNil)\n\terr = exec.Command(\"git\", \"init\", \"--bare\", \"gitosis-admin.git\").Run()\n\tc.Assert(err, IsNil)\n\terr = exec.Command(\"git\", \"clone\", \"gitosis-admin.git\").Run()\n\tc.Assert(err, IsNil)\n\terr = os.Chdir(currentDir)\n\tc.Assert(err, IsNil)\n}\n\nfunc (s *S) SetUpTest(c *C) {\n\tpwd, err := os.Getwd()\n\tc.Assert(err, IsNil)\n\terr = os.Chdir(path.Join(s.gitRoot, \"gitosis-admin\"))\n\t_, err = os.Create(\"gitosis.conf\")\n\tc.Assert(err, IsNil)\n\terr = os.Chdir(pwd)\n}\n\nfunc (s *S) TearDownSuite(c *C) {\n\terr := os.RemoveAll(s.gitRoot)\n\tc.Assert(err, IsNil)\n}\n\nfunc (s *S) TearDownTest(c *C) {\n\tpwd, err := os.Getwd()\n\tc.Assert(err, IsNil)\n\terr = os.Chdir(path.Join(s.gitRoot, \"gitosis-admin\"))\n\terr = exec.Command(\"git\", \"rm\", \"gitosis.conf\").Run()\n\tif err == nil {\n\t\terr = pushToGitosis(\"removing test file\")\n\t\tc.Assert(err, IsNil)\n\t}\n\terr = os.Chdir(pwd)\n}\n\nfunc (s *S) lastBareCommit(c *C) string {\n\tpwd, err := os.Getwd()\n\tc.Assert(err, IsNil)\n\tos.Chdir(s.gitosisBare)\n\tbareOutput, err := exec.Command(\"git\", \"log\", \"-1\", \"--pretty=format:%s\").CombinedOutput()\n\tc.Assert(err, IsNil)\n\tos.Chdir(pwd)\n\treturn string(bareOutput)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The rkt Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage testutils\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n\n\t\"github.com\/coreos\/rkt\/Godeps\/_workspace\/src\/github.com\/vishvananda\/netlink\"\n)\n\nfunc getDefaultGW(family int) (string, error) {\n\tl, err := netlink.LinkByName(\"lo\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\troutes, err := netlink.RouteList(l, family)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn routes[0].Gw.String(), nil\n}\nfunc GetDefaultGWv4() (string, error) {\n\treturn getDefaultGW(netlink.FAMILY_V4)\n}\n\nfunc GetDefaultGWv6() (string, error) {\n\treturn getDefaultGW(netlink.FAMILY_V6)\n}\n\nfunc GetIPs(ifaceWanted string, familyWanted int) ([]string, error) {\n\tips := make([]string, 0)\n\tifaces, err := net.Interfaces()\n\tif err != nil {\n\t\treturn ips, err\n\t}\n\tfor _, iface := range ifaces {\n\t\tif iface.Name != ifaceWanted {\n\t\t\tcontinue\n\t\t}\n\n\t\taddrs, _ := iface.Addrs()\n\t\tfor _, addr := range addrs {\n\t\t\taddrString := addr.String()\n\t\t\tip, _, err := net.ParseCIDR(addrString)\n\t\t\tif err != nil {\n\t\t\t\treturn ips, err\n\t\t\t}\n\n\t\t\tif strings.Contains(addrString, \".\") && familyWanted == netlink.FAMILY_V4 ||\n\t\t\t\tstrings.Contains(addrString, \":\") && familyWanted == netlink.FAMILY_V6 {\n\t\t\t\tips = append(ips, ip.String())\n\t\t\t}\n\t\t}\n\t}\n\treturn ips, err\n}\n\nfunc GetIPsv4(iface string) ([]string, error) {\n\treturn GetIPs(iface, netlink.FAMILY_V4)\n}\nfunc GetIPsv6(iface string) ([]string, error) {\n\treturn GetIPs(iface, netlink.FAMILY_V6)\n}\n\nfunc GetGW(iface string, family int) (string, error) {\n\treturn \"\", fmt.Errorf(\"Not implemented\")\n}\nfunc GetGWv4(iface string) (string, error) {\n\treturn GetGW(iface, netlink.FAMILY_V4)\n}\n\nfunc GetGWv6(iface string) (string, error) {\n\treturn GetGW(iface, netlink.FAMILY_V4)\n}\n<commit_msg>testutils: extend iputils with iface discovery<commit_after>\/\/ Copyright 2015 The rkt Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage testutils\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n\n\t\"github.com\/coreos\/rkt\/Godeps\/_workspace\/src\/github.com\/vishvananda\/netlink\"\n)\n\nfunc getDefaultGW(family int) (string, error) {\n\tl, err := netlink.LinkByName(\"lo\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\troutes, err := netlink.RouteList(l, family)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn routes[0].Gw.String(), nil\n}\nfunc GetDefaultGWv4() (string, error) {\n\treturn getDefaultGW(netlink.FAMILY_V4)\n}\n\nfunc GetDefaultGWv6() (string, error) {\n\treturn getDefaultGW(netlink.FAMILY_V6)\n}\n\nfunc GetIPs(ifaceWanted string, familyWanted int) ([]string, error) {\n\tips := make([]string, 0)\n\tifaces, err := net.Interfaces()\n\tif err != nil {\n\t\treturn ips, err\n\t}\n\tfor _, iface := range ifaces {\n\t\tif iface.Name != ifaceWanted {\n\t\t\tcontinue\n\t\t}\n\n\t\taddrs, _ := iface.Addrs()\n\t\tfor _, addr := range addrs {\n\t\t\taddrString := addr.String()\n\t\t\tip, _, err := net.ParseCIDR(addrString)\n\t\t\tif err != nil {\n\t\t\t\treturn ips, err\n\t\t\t}\n\n\t\t\tif strings.Contains(addrString, \".\") && familyWanted == netlink.FAMILY_V4 ||\n\t\t\t\tstrings.Contains(addrString, \":\") && familyWanted == netlink.FAMILY_V6 {\n\t\t\t\tips = append(ips, ip.String())\n\t\t\t}\n\t\t}\n\t}\n\treturn ips, err\n}\n\nfunc GetIPsv4(iface string) ([]string, error) {\n\treturn GetIPs(iface, netlink.FAMILY_V4)\n}\nfunc GetIPsv6(iface string) ([]string, error) {\n\treturn GetIPs(iface, netlink.FAMILY_V6)\n}\n\nfunc GetGW(iface string, family int) (string, error) {\n\treturn \"\", fmt.Errorf(\"Not implemented\")\n}\nfunc GetGWv4(iface string) (string, error) {\n\treturn GetGW(iface, netlink.FAMILY_V4)\n}\n\nfunc GetGWv6(iface string) (string, error) {\n\treturn GetGW(iface, netlink.FAMILY_V4)\n}\n\nfunc GetNonLoIfaceWithAddrs() (iface net.Interface, err error) {\n\tifaces, err := net.Interfaces()\n\tif err != nil {\n\t\treturn iface, err\n\t}\n\n\tfor _, i := range ifaces {\n\t\tif i.Flags&net.FlagLoopback == 0 {\n\t\t\tifaceNameLower := strings.ToLower(i.Name)\n\t\t\t\/\/ Don't use rkt's interfaces\n\t\t\tif strings.HasSuffix(ifaceNameLower, \"cni\") ||\n\t\t\t\tstrings.HasSuffix(ifaceNameLower, \"veth\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\taddrs, err := i.Addrs()\n\t\t\tif err != nil {\n\t\t\t\treturn iface, fmt.Errorf(\"Cannot get IPV4 address for interface %v: %v\", i.Name, err)\n\t\t\t}\n\t\t\tif len(addrs) > 0 {\n\t\t\t\tiface = i\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn iface, err\n}\n\nfunc GetNonLoIfaceIPv4() (string, error) {\n\tiface, err := GetNonLoIfaceWithAddrs()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error while getting non-lo host interface: %v\\n\", err)\n\t}\n\tif iface.Name == \"\" {\n\t\treturn \"\", nil\n\t}\n\n\tifaceIPsv4, err := GetIPsv4(iface.Name)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Cannot get IPV4 address for interface %v: %v\", iface.Name, err)\n\t}\n\tif len(ifaceIPsv4) == 0 {\n\t\treturn \"\", nil\n\t}\n\treturn ifaceIPsv4[0], nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ Majordomo Protocol client example - asynchronous.\n\/\/ Uses the mdcli API to hide all MDP aspects\n\/\/\n\/\/ Lets us build this source without creating a library\npackage main\n\nimport (\n\t\"github.com\/pebbe\/zmq2\/examples\/mdapi\"\n\n\t\"fmt\"\n\t\"os\"\n)\n\nfunc main() {\n\tvar verbose bool\n\tif len(os.Args) > 1 && os.Args[1] == \"-v\" {\n\t\tverbose = true\n\t}\n\tsession, _ := mdapi.NewMdcli2(\"tcp:\/\/localhost:5555\", verbose)\n\n var count int\n for count = 0; count < 100000; count++ {\n\t\tsession.Send(\"echo\", \"Hello world\")\n }\n for count = 0; count < 100000; count++ {\n\t\t_, err := session.Recv()\n\t\tif err != nil {\n\t\t\tbreak\t \/\/ Interrupted by Ctrl-C\n\t\t}\n }\n fmt.Printf(\"%d replies received\\n\", count)\n}\n<commit_msg>Modified example: mdclient2<commit_after>\/\/\n\/\/ Majordomo Protocol client example - asynchronous.\n\/\/ Uses the mdcli API to hide all MDP aspects\n\/\/\n\/\/ Lets us build this source without creating a library\npackage main\n\nimport (\n\t\"github.com\/pebbe\/zmq2\/examples\/mdapi\"\n\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n)\n\nfunc main() {\n\tvar verbose bool\n\tif len(os.Args) > 1 && os.Args[1] == \"-v\" {\n\t\tverbose = true\n\t}\n\tsession, _ := mdapi.NewMdcli2(\"tcp:\/\/localhost:5555\", verbose)\n\n\tvar count int\n\tfor count = 0; count < 100000; count++ {\n\t\terr := session.Send(\"echo\", \"Hello world\")\n\t\tif err != nil {\n\t\t\tlog.Println(\"Send:\", err)\n\t\t\tbreak\n\t\t}\n\t}\n\tfor count = 0; count < 100000; count++ {\n\t\t_, err := session.Recv()\n\t\tif err != nil {\n\t\t\tlog.Println(\"Recv:\", err)\n\t\t\tbreak\n\t\t}\n\t}\n\tfmt.Printf(\"%d replies received\\n\", count)\n}\n<|endoftext|>"} {"text":"<commit_before>package scene\n\nimport (\n\t\"image\/color\"\n\t\"math\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/pankona\/gomo-simra\/simra\"\n\t\"github.com\/pankona\/gomo-simra\/simra\/image\"\n)\n\n\/\/ Title represents a scene object for Title\ntype Title struct {\n\tsimra simra.Simraer\n\tscreenWidth int\n\tscreenHeight int\n\tsprites []simra.Spriter\n\tnumOfSprite simra.Spriter\n\tfps int\n\tfpsText simra.Spriter\n\tmu sync.Mutex\n\tkokeshiTex *simra.Texture\n}\n\n\/\/ Initialize initializes title scene\n\/\/ This is called from simra.\n\/\/ simra.SetDesiredScreenSize should be called to determine\n\/\/ screen size of this scene.\nfunc (t *Title) Initialize(sim simra.Simraer) {\n\tt.simra = sim\n\n\tt.screenHeight = 1080 \/ 2\n\tt.screenWidth = 1920 \/ 2\n\tt.simra.SetDesiredScreenSize((float32)(t.screenHeight), (float32)(t.screenWidth))\n\n\t\/\/ initialize sprites\n\tt.initialize()\n\n\tt.numOfSprite = t.simra.NewSprite()\n\tt.numOfSprite.SetPosition(float32(t.screenWidth\/2), 100)\n\tt.numOfSprite.SetScale(float32(t.screenWidth), 80)\n\tt.simra.AddSprite(t.numOfSprite)\n\n\ttex := t.simra.NewTextTexture(\"0\",\n\t\t60, color.RGBA{255, 255, 255, 255}, image.Rect(0, 0, float32(t.screenWidth), 80))\n\tt.numOfSprite.ReplaceTexture(tex)\n\n\tt.fpsText = t.simra.NewSprite()\n\tt.fpsText.SetPosition(float32(t.screenWidth\/4), 100)\n\tt.fpsText.SetScale(float32(t.screenWidth), 80)\n\tt.simra.AddSprite(t.fpsText)\n\n\ttex = t.simra.NewTextTexture(\"0\",\n\t\t60, color.RGBA{255, 255, 255, 255}, image.Rect(0, 0, float32(t.screenWidth), 80))\n\tt.fpsText.ReplaceTexture(tex)\n\tgo func() {\n\t\tfor {\n\t\t\t<-time.After(1 * time.Second)\n\t\t\ttex = t.simra.NewTextTexture(strconv.Itoa(t.fps),\n\t\t\t\t60, color.RGBA{255, 255, 255, 255}, image.Rect(0, 0, float32(t.screenWidth), 80))\n\t\t\tt.fpsText.ReplaceTexture(tex)\n\t\t\tt.mu.Lock()\n\t\t\tt.fps = 0\n\t\t\tt.mu.Unlock()\n\t\t}\n\t}()\n\n\tt.kokeshiTex = t.simra.NewImageTexture(\"sample2.png\", image.Rect(0, 0, 64, 64))\n}\n\nfunc (t *Title) initialize() {\n\tt.simra.AddTouchListener(t)\n}\n\nvar degree int\n\n\/\/ Drive is called from simra.\n\/\/ This is used to update sprites position.\n\/\/ This will be called 60 times per sec.\nfunc (t *Title) Drive() {\n\tdegree = (degree - 1) % 360\n\tfor i := range t.sprites {\n\t\tr := float32(degree) * math.Pi \/ 180\n\t\tt.sprites[i].SetRotate(r)\n\t}\n\tt.mu.Lock()\n\tt.fps++\n\tt.mu.Unlock()\n\t\/\/runtime.GC()\n}\n\n\/\/ OnTouchBegin is called when Title scene is Touched.\n\/\/ It is caused by calling AddtouchListener for title.background sprite.\nfunc (t *Title) OnTouchBegin(x, y float32) {\n\tt.spawnKokeshi(x, y)\n}\n\n\/\/ OnTouchMove is called when Title scene is Touched and moved.\n\/\/ It is caused by calling AddtouchListener for title.background sprite.\nfunc (t *Title) OnTouchMove(x, y float32) {\n\tt.spawnKokeshi(x, y)\n}\n\n\/\/ OnTouchEnd is called when Title scene is Touched and it is released.\n\/\/ It is caused by calling AddtouchListener for title.background sprite.\nfunc (t *Title) OnTouchEnd(x, y float32) {\n\tt.spawnKokeshi(x, y)\n}\n\nfunc (t *Title) spawnKokeshi(x, y float32) {\n\t\/\/ scene end. go to next scene\n\tsprite := t.simra.NewSprite()\n\tsprite.SetPosition(x, y)\n\tsprite.SetScale(128, 128)\n\tt.simra.AddSprite(sprite)\n\tt.sprites = append(t.sprites, sprite)\n\tsprite.ReplaceTexture(t.kokeshiTex)\n\n\ttex := t.simra.NewTextTexture(strconv.Itoa(len(t.sprites)),\n\t\t60, color.RGBA{255, 255, 255, 255}, image.Rect(0, 0, float32(t.screenWidth), 80))\n\tt.numOfSprite.ReplaceTexture(tex)\n\n\t\/\/ later sprite goes far side\n\tt.simra.SetZIndex(sprite, len(t.sprites))\n}\n<commit_msg>[#66] remove inappropriate comments<commit_after>package scene\n\nimport (\n\t\"image\/color\"\n\t\"math\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/pankona\/gomo-simra\/simra\"\n\t\"github.com\/pankona\/gomo-simra\/simra\/image\"\n)\n\n\/\/ Title represents a scene object for Title\ntype Title struct {\n\tsimra simra.Simraer\n\tscreenWidth int\n\tscreenHeight int\n\tsprites []simra.Spriter\n\tnumOfSprite simra.Spriter\n\tfps int\n\tfpsText simra.Spriter\n\tmu sync.Mutex\n\tkokeshiTex *simra.Texture\n}\n\n\/\/ Initialize initializes title scene\n\/\/ This is called from simra.\n\/\/ simra.SetDesiredScreenSize should be called to determine\n\/\/ screen size of this scene.\nfunc (t *Title) Initialize(sim simra.Simraer) {\n\tt.simra = sim\n\n\tt.screenHeight = 1080 \/ 2\n\tt.screenWidth = 1920 \/ 2\n\tt.simra.SetDesiredScreenSize((float32)(t.screenHeight), (float32)(t.screenWidth))\n\n\t\/\/ initialize sprites\n\tt.initialize()\n\n\tt.numOfSprite = t.simra.NewSprite()\n\tt.numOfSprite.SetPosition(float32(t.screenWidth\/2), 100)\n\tt.numOfSprite.SetScale(float32(t.screenWidth), 80)\n\tt.simra.AddSprite(t.numOfSprite)\n\n\ttex := t.simra.NewTextTexture(\"0\",\n\t\t60, color.RGBA{255, 255, 255, 255}, image.Rect(0, 0, float32(t.screenWidth), 80))\n\tt.numOfSprite.ReplaceTexture(tex)\n\n\tt.fpsText = t.simra.NewSprite()\n\tt.fpsText.SetPosition(float32(t.screenWidth\/4), 100)\n\tt.fpsText.SetScale(float32(t.screenWidth), 80)\n\tt.simra.AddSprite(t.fpsText)\n\n\ttex = t.simra.NewTextTexture(\"0\",\n\t\t60, color.RGBA{255, 255, 255, 255}, image.Rect(0, 0, float32(t.screenWidth), 80))\n\tt.fpsText.ReplaceTexture(tex)\n\tgo func() {\n\t\tfor {\n\t\t\t<-time.After(1 * time.Second)\n\t\t\ttex = t.simra.NewTextTexture(strconv.Itoa(t.fps),\n\t\t\t\t60, color.RGBA{255, 255, 255, 255}, image.Rect(0, 0, float32(t.screenWidth), 80))\n\t\t\tt.fpsText.ReplaceTexture(tex)\n\t\t\tt.mu.Lock()\n\t\t\tt.fps = 0\n\t\t\tt.mu.Unlock()\n\t\t}\n\t}()\n\n\tt.kokeshiTex = t.simra.NewImageTexture(\"sample2.png\", image.Rect(0, 0, 64, 64))\n}\n\nfunc (t *Title) initialize() {\n\tt.simra.AddTouchListener(t)\n}\n\nvar degree int\n\n\/\/ Drive is called from simra.\n\/\/ This is used to update sprites position.\n\/\/ This will be called 60 times per sec.\nfunc (t *Title) Drive() {\n\tdegree = (degree - 1) % 360\n\tfor i := range t.sprites {\n\t\tr := float32(degree) * math.Pi \/ 180\n\t\tt.sprites[i].SetRotate(r)\n\t}\n\tt.mu.Lock()\n\tt.fps++\n\tt.mu.Unlock()\n\t\/\/runtime.GC()\n}\n\n\/\/ OnTouchBegin is called when Title scene is Touched.\n\/\/ It is caused by calling AddtouchListener for title.background sprite.\nfunc (t *Title) OnTouchBegin(x, y float32) {\n\tt.spawnKokeshi(x, y)\n}\n\n\/\/ OnTouchMove is called when Title scene is Touched and moved.\n\/\/ It is caused by calling AddtouchListener for title.background sprite.\nfunc (t *Title) OnTouchMove(x, y float32) {\n\tt.spawnKokeshi(x, y)\n}\n\n\/\/ OnTouchEnd is called when Title scene is Touched and it is released.\n\/\/ It is caused by calling AddtouchListener for title.background sprite.\nfunc (t *Title) OnTouchEnd(x, y float32) {\n\tt.spawnKokeshi(x, y)\n}\n\nfunc (t *Title) spawnKokeshi(x, y float32) {\n\tsprite := t.simra.NewSprite()\n\tsprite.SetPosition(x, y)\n\tsprite.SetScale(128, 128)\n\tt.simra.AddSprite(sprite)\n\tt.sprites = append(t.sprites, sprite)\n\tsprite.ReplaceTexture(t.kokeshiTex)\n\n\ttex := t.simra.NewTextTexture(strconv.Itoa(len(t.sprites)),\n\t\t60, color.RGBA{255, 255, 255, 255}, image.Rect(0, 0, float32(t.screenWidth), 80))\n\tt.numOfSprite.ReplaceTexture(tex)\n\n\t\/\/ later sprite goes far side\n\tt.simra.SetZIndex(sprite, len(t.sprites))\n}\n<|endoftext|>"} {"text":"<commit_before>package paasio\n\nimport (\n\t\"bytes\"\n\t\"crypto\/rand\"\n\t\"io\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\n\/\/ testVersion identifies the API tested by the test program.\nconst targetTestVersion = 3\n\nfunc TestMultiThreaded(t *testing.T) {\n\tif testVersion != targetTestVersion {\n\t\tt.Errorf(\"Found testVersion = %v, want %v.\", testVersion, targetTestVersion)\n\t}\n\tmincpu := 2\n\tminproc := 2\n\tncpu := runtime.NumCPU()\n\tif ncpu < mincpu {\n\t\tt.Fatalf(\"at least %d cpu cores are required\", mincpu)\n\t}\n\tnproc := runtime.GOMAXPROCS(0)\n\tif nproc < minproc {\n\t\tt.Errorf(\"at least %d threads are required; rerun the tests\", minproc)\n\t\tt.Errorf(\"\")\n\t\tt.Errorf(\"\\tgo test -cpu %d ...\", minproc)\n\t}\n}\n\n\/\/ this test could be improved to test that error conditions are preserved.\nfunc testWrite(t *testing.T, writer func(io.Writer) WriteCounter) {\n\tfor i, test := range []struct {\n\t\twrites []string\n\t}{\n\t\t{nil},\n\t\t{[]string{\"\"}},\n\t\t{[]string{\"I\", \" \", \"never met \", \"\", \"a gohper\"}},\n\t} {\n\t\tvar buf bytes.Buffer\n\t\tbuft := writer(&buf)\n\t\tfor _, s := range test.writes {\n\t\t\tn, err := buft.Write([]byte(s))\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"test %d: Write(%q) unexpected error: %v\", i, s, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif n != len(s) {\n\t\t\t\tt.Errorf(\"test %d: Write(%q) unexpected number of bytes written: %v\", i, s, n)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tout := buf.String()\n\t\tif out != strings.Join(test.writes, \"\") {\n\t\t\tt.Errorf(\"test %d: unexpected content in underlying writer: %q\", i, out)\n\t\t}\n\t}\n}\n\nfunc TestWriteWriter(t *testing.T) {\n\ttestWrite(t, NewWriteCounter)\n}\n\nfunc TestWriteReadWriter(t *testing.T) {\n\ttestWrite(t, func(w io.Writer) WriteCounter {\n\t\tvar r nopReader\n\t\treturn NewReadWriteCounter(readWriter{r, w})\n\t})\n}\n\n\/\/ this test could be improved to test exact number of operations as well as\n\/\/ ensure that error conditions are preserved.\nfunc testRead(t *testing.T, reader func(io.Reader) ReadCounter) {\n\tchunkLen := 10 << 20 \/\/ 10MB\n\torig := make([]byte, 10<<20)\n\t_, err := rand.Read(orig)\n\tif err != nil {\n\t\tt.Fatalf(\"error reading random data\")\n\t}\n\tbuf := bytes.NewBuffer(orig)\n\trc := reader(buf)\n\tvar obuf bytes.Buffer\n\tncopy, err := io.Copy(&obuf, rc)\n\tif err != nil {\n\t\tt.Fatalf(\"error reading: %v\", err)\n\t}\n\tif ncopy != int64(chunkLen) {\n\t\tt.Fatalf(\"copied %d bytes instead of %d\", ncopy, chunkLen)\n\t}\n\tif string(orig) != obuf.String() {\n\t\tt.Fatalf(\"unexpected output from Read()\")\n\t}\n\tn, nops := rc.ReadCount()\n\tif n != int64(chunkLen) {\n\t\tt.Fatalf(\"reported %d bytes read instead of %d\", n, chunkLen)\n\t}\n\tif nops < 2 {\n\t\tt.Fatalf(\"unexpected number of reads: %v\", nops)\n\t}\n}\n\nfunc TestReadReader(t *testing.T) {\n\ttestRead(t, NewReadCounter)\n}\n\nfunc TestReadReadWriter(t *testing.T) {\n\ttestRead(t, func(r io.Reader) ReadCounter {\n\t\tvar w nopWriter\n\t\treturn NewReadWriteCounter(readWriter{r, w})\n\t})\n}\n\nfunc testReadTotal(t *testing.T, rc ReadCounter) {\n\tnumGo := 8000\n\tnumBytes := 50\n\ttotalBytes := int64(numGo) * int64(numBytes)\n\tp := make([]byte, numBytes)\n\n\tt.Logf(\"Calling Read() for %d*%d=%d bytes\", numGo, numBytes, totalBytes)\n\twg := new(sync.WaitGroup)\n\twg.Add(numGo)\n\tstart := make(chan struct{})\n\tfor i := 0; i < numGo; i++ {\n\t\tgo func() {\n\t\t\t<-start\n\t\t\trc.Read(p)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\tclose(start)\n\n\twg.Wait()\n\tn, nops := rc.ReadCount()\n\tif n != totalBytes {\n\t\tt.Errorf(\"expected %d bytes read; %d bytes reported\", totalBytes, n)\n\t}\n\tif nops != numGo {\n\t\tt.Errorf(\"expected %d read operations; %d operations reported\", numGo, nops)\n\t}\n}\n\nfunc TestReadTotalReader(t *testing.T) {\n\tvar r nopReader\n\ttestReadTotal(t, NewReadCounter(r))\n}\n\nfunc TestReadTotalReadWriter(t *testing.T) {\n\tvar rw nopReadWriter\n\ttestReadTotal(t, NewReadWriteCounter(rw))\n}\n\nfunc testWriteTotal(t *testing.T, wt WriteCounter) {\n\tnumGo := 8000\n\tnumBytes := 50\n\ttotalBytes := int64(numGo) * int64(numBytes)\n\tp := make([]byte, numBytes)\n\n\tt.Logf(\"Calling Write() with %d*%d=%d bytes\", numGo, numBytes, totalBytes)\n\twg := new(sync.WaitGroup)\n\twg.Add(numGo)\n\tstart := make(chan struct{})\n\tfor i := 0; i < numGo; i++ {\n\t\tgo func() {\n\t\t\t<-start\n\t\t\twt.Write(p)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\tclose(start)\n\n\twg.Wait()\n\tn, nops := wt.WriteCount()\n\tif n != totalBytes {\n\t\tt.Errorf(\"expected %d bytes written; %d bytes reported\", totalBytes, n)\n\t}\n\tif nops != numGo {\n\t\tt.Errorf(\"expected %d write operations; %d operations reported\", numGo, nops)\n\t}\n}\n\nfunc TestWriteTotalWriter(t *testing.T) {\n\tvar w nopWriter\n\ttestWriteTotal(t, NewWriteCounter(w))\n}\n\nfunc TestWriteTotalReadWriter(t *testing.T) {\n\tvar rw nopReadWriter\n\ttestWriteTotal(t, NewReadWriteCounter(rw))\n}\n\ntype nopWriter struct{ error }\n\nfunc (w nopWriter) Write(p []byte) (int, error) {\n\ttime.Sleep(1)\n\tif w.error != nil {\n\t\treturn 0, w.error\n\t}\n\treturn len(p), nil\n}\n\ntype nopReader struct{ error }\n\nfunc (r nopReader) Read(p []byte) (int, error) {\n\ttime.Sleep(1)\n\tif r.error != nil {\n\t\treturn 0, r.error\n\t}\n\treturn len(p), nil\n}\n\ntype nopReadWriter struct {\n\tnopReader\n\tnopWriter\n}\n\ntype readWriter struct {\n\tio.Reader\n\tio.Writer\n}\n<commit_msg>paasio: Ensure test versioning consistency with other exercises (#542)<commit_after>package paasio\n\nimport (\n\t\"bytes\"\n\t\"crypto\/rand\"\n\t\"io\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\n\/\/ targetTestVersion identifies the API tested by the test program.\nconst targetTestVersion = 3\n\nfunc TestTestVersion(t *testing.T) {\n\tif testVersion != targetTestVersion {\n\t\tt.Fatalf(\"Found testVersion = %v, want %v\", testVersion, targetTestVersion)\n\t}\n}\n\nfunc TestMultiThreaded(t *testing.T) {\n\tmincpu := 2\n\tminproc := 2\n\tncpu := runtime.NumCPU()\n\tif ncpu < mincpu {\n\t\tt.Fatalf(\"at least %d cpu cores are required\", mincpu)\n\t}\n\tnproc := runtime.GOMAXPROCS(0)\n\tif nproc < minproc {\n\t\tt.Errorf(\"at least %d threads are required; rerun the tests\", minproc)\n\t\tt.Errorf(\"\")\n\t\tt.Errorf(\"\\tgo test -cpu %d ...\", minproc)\n\t}\n}\n\n\/\/ this test could be improved to test that error conditions are preserved.\nfunc testWrite(t *testing.T, writer func(io.Writer) WriteCounter) {\n\tfor i, test := range []struct {\n\t\twrites []string\n\t}{\n\t\t{nil},\n\t\t{[]string{\"\"}},\n\t\t{[]string{\"I\", \" \", \"never met \", \"\", \"a gohper\"}},\n\t} {\n\t\tvar buf bytes.Buffer\n\t\tbuft := writer(&buf)\n\t\tfor _, s := range test.writes {\n\t\t\tn, err := buft.Write([]byte(s))\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"test %d: Write(%q) unexpected error: %v\", i, s, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif n != len(s) {\n\t\t\t\tt.Errorf(\"test %d: Write(%q) unexpected number of bytes written: %v\", i, s, n)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tout := buf.String()\n\t\tif out != strings.Join(test.writes, \"\") {\n\t\t\tt.Errorf(\"test %d: unexpected content in underlying writer: %q\", i, out)\n\t\t}\n\t}\n}\n\nfunc TestWriteWriter(t *testing.T) {\n\ttestWrite(t, NewWriteCounter)\n}\n\nfunc TestWriteReadWriter(t *testing.T) {\n\ttestWrite(t, func(w io.Writer) WriteCounter {\n\t\tvar r nopReader\n\t\treturn NewReadWriteCounter(readWriter{r, w})\n\t})\n}\n\n\/\/ this test could be improved to test exact number of operations as well as\n\/\/ ensure that error conditions are preserved.\nfunc testRead(t *testing.T, reader func(io.Reader) ReadCounter) {\n\tchunkLen := 10 << 20 \/\/ 10MB\n\torig := make([]byte, 10<<20)\n\t_, err := rand.Read(orig)\n\tif err != nil {\n\t\tt.Fatalf(\"error reading random data\")\n\t}\n\tbuf := bytes.NewBuffer(orig)\n\trc := reader(buf)\n\tvar obuf bytes.Buffer\n\tncopy, err := io.Copy(&obuf, rc)\n\tif err != nil {\n\t\tt.Fatalf(\"error reading: %v\", err)\n\t}\n\tif ncopy != int64(chunkLen) {\n\t\tt.Fatalf(\"copied %d bytes instead of %d\", ncopy, chunkLen)\n\t}\n\tif string(orig) != obuf.String() {\n\t\tt.Fatalf(\"unexpected output from Read()\")\n\t}\n\tn, nops := rc.ReadCount()\n\tif n != int64(chunkLen) {\n\t\tt.Fatalf(\"reported %d bytes read instead of %d\", n, chunkLen)\n\t}\n\tif nops < 2 {\n\t\tt.Fatalf(\"unexpected number of reads: %v\", nops)\n\t}\n}\n\nfunc TestReadReader(t *testing.T) {\n\ttestRead(t, NewReadCounter)\n}\n\nfunc TestReadReadWriter(t *testing.T) {\n\ttestRead(t, func(r io.Reader) ReadCounter {\n\t\tvar w nopWriter\n\t\treturn NewReadWriteCounter(readWriter{r, w})\n\t})\n}\n\nfunc testReadTotal(t *testing.T, rc ReadCounter) {\n\tnumGo := 8000\n\tnumBytes := 50\n\ttotalBytes := int64(numGo) * int64(numBytes)\n\tp := make([]byte, numBytes)\n\n\tt.Logf(\"Calling Read() for %d*%d=%d bytes\", numGo, numBytes, totalBytes)\n\twg := new(sync.WaitGroup)\n\twg.Add(numGo)\n\tstart := make(chan struct{})\n\tfor i := 0; i < numGo; i++ {\n\t\tgo func() {\n\t\t\t<-start\n\t\t\trc.Read(p)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\tclose(start)\n\n\twg.Wait()\n\tn, nops := rc.ReadCount()\n\tif n != totalBytes {\n\t\tt.Errorf(\"expected %d bytes read; %d bytes reported\", totalBytes, n)\n\t}\n\tif nops != numGo {\n\t\tt.Errorf(\"expected %d read operations; %d operations reported\", numGo, nops)\n\t}\n}\n\nfunc TestReadTotalReader(t *testing.T) {\n\tvar r nopReader\n\ttestReadTotal(t, NewReadCounter(r))\n}\n\nfunc TestReadTotalReadWriter(t *testing.T) {\n\tvar rw nopReadWriter\n\ttestReadTotal(t, NewReadWriteCounter(rw))\n}\n\nfunc testWriteTotal(t *testing.T, wt WriteCounter) {\n\tnumGo := 8000\n\tnumBytes := 50\n\ttotalBytes := int64(numGo) * int64(numBytes)\n\tp := make([]byte, numBytes)\n\n\tt.Logf(\"Calling Write() with %d*%d=%d bytes\", numGo, numBytes, totalBytes)\n\twg := new(sync.WaitGroup)\n\twg.Add(numGo)\n\tstart := make(chan struct{})\n\tfor i := 0; i < numGo; i++ {\n\t\tgo func() {\n\t\t\t<-start\n\t\t\twt.Write(p)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\tclose(start)\n\n\twg.Wait()\n\tn, nops := wt.WriteCount()\n\tif n != totalBytes {\n\t\tt.Errorf(\"expected %d bytes written; %d bytes reported\", totalBytes, n)\n\t}\n\tif nops != numGo {\n\t\tt.Errorf(\"expected %d write operations; %d operations reported\", numGo, nops)\n\t}\n}\n\nfunc TestWriteTotalWriter(t *testing.T) {\n\tvar w nopWriter\n\ttestWriteTotal(t, NewWriteCounter(w))\n}\n\nfunc TestWriteTotalReadWriter(t *testing.T) {\n\tvar rw nopReadWriter\n\ttestWriteTotal(t, NewReadWriteCounter(rw))\n}\n\ntype nopWriter struct{ error }\n\nfunc (w nopWriter) Write(p []byte) (int, error) {\n\ttime.Sleep(1)\n\tif w.error != nil {\n\t\treturn 0, w.error\n\t}\n\treturn len(p), nil\n}\n\ntype nopReader struct{ error }\n\nfunc (r nopReader) Read(p []byte) (int, error) {\n\ttime.Sleep(1)\n\tif r.error != nil {\n\t\treturn 0, r.error\n\t}\n\treturn len(p), nil\n}\n\ntype nopReadWriter struct {\n\tnopReader\n\tnopWriter\n}\n\ntype readWriter struct {\n\tio.Reader\n\tio.Writer\n}\n<|endoftext|>"} {"text":"<commit_before>package thrift_nats\n\nimport (\n\t\"log\"\n\t\"runtime\/debug\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.apache.org\/thrift.git\/lib\/go\/thrift\"\n\t\"github.com\/nats-io\/nats\"\n)\n\nconst (\n\tqueue = \"rpc\"\n\tdefaultHeartbeatDeadline = 5 * time.Second\n\tmaxMissedHeartbeats = 3\n)\n\ntype natsServer struct {\n\tconn *nats.Conn\n\tsubject string\n\tclientTimeout time.Duration\n\theartbeatDeadline time.Duration\n\tclients map[string]thrift.TTransport\n\tmu sync.Mutex\n\tquit chan struct{}\n\tprocessorFactory thrift.TProcessorFactory\n\tserverTransport *natsServerTransport\n\tinputTransportFactory thrift.TTransportFactory\n\toutputTransportFactory thrift.TTransportFactory\n\tinputProtocolFactory thrift.TProtocolFactory\n\toutputProtocolFactory thrift.TProtocolFactory\n}\n\n\/\/ NewNATSServer returns a Thrift TServer which uses the NATS messaging system\n\/\/ as the underlying transport. The subject is the NATS subject used for\n\/\/ connection handshakes. The client timeout controls the read timeout on the\n\/\/ client connection (negative value for no timeout). The heartbeat deadline\n\/\/ controls how long clients have to respond with a heartbeat (negative value\n\/\/ for no heartbeats).\nfunc NewNATSServer(\n\tconn *nats.Conn,\n\tsubject string,\n\tclientTimeout time.Duration,\n\theartbeatDeadline time.Duration,\n\tprocessor thrift.TProcessor,\n\ttransportFactory thrift.TTransportFactory,\n\tprotocolFactory thrift.TProtocolFactory) thrift.TServer {\n\n\treturn NewNATSServerFactory7(\n\t\tconn,\n\t\tsubject,\n\t\tclientTimeout,\n\t\theartbeatDeadline,\n\t\tthrift.NewTProcessorFactory(processor),\n\t\ttransportFactory,\n\t\tprotocolFactory,\n\t)\n}\n\nfunc NewNATSServerFactory7(\n\tconn *nats.Conn,\n\tsubject string,\n\tclientTimeout time.Duration,\n\theartbeatDeadline time.Duration,\n\tprocessorFactory thrift.TProcessorFactory,\n\ttransportFactory thrift.TTransportFactory,\n\tprotocolFactory thrift.TProtocolFactory) thrift.TServer {\n\n\treturn NewNATSServerFactory9(\n\t\tconn,\n\t\tsubject,\n\t\tclientTimeout,\n\t\theartbeatDeadline,\n\t\tprocessorFactory,\n\t\ttransportFactory,\n\t\ttransportFactory,\n\t\tprotocolFactory,\n\t\tprotocolFactory,\n\t)\n}\n\nfunc NewNATSServerFactory9(\n\tconn *nats.Conn,\n\tsubject string,\n\tclientTimeout time.Duration,\n\theartbeatDeadline time.Duration,\n\tprocessorFactory thrift.TProcessorFactory,\n\tinputTransportFactory thrift.TTransportFactory,\n\toutputTransportFactory thrift.TTransportFactory,\n\tinputProtocolFactory thrift.TProtocolFactory,\n\toutputProtocolFactory thrift.TProtocolFactory) thrift.TServer {\n\n\treturn &natsServer{\n\t\tconn: conn,\n\t\tsubject: subject,\n\t\tclientTimeout: clientTimeout,\n\t\theartbeatDeadline: heartbeatDeadline,\n\t\tclients: make(map[string]thrift.TTransport),\n\t\tprocessorFactory: processorFactory,\n\t\tserverTransport: newNATSServerTransport(conn),\n\t\tinputTransportFactory: inputTransportFactory,\n\t\toutputTransportFactory: outputTransportFactory,\n\t\tinputProtocolFactory: inputProtocolFactory,\n\t\toutputProtocolFactory: outputProtocolFactory,\n\t\tquit: make(chan struct{}, 1),\n\t}\n}\n\nfunc (n *natsServer) ProcessorFactory() thrift.TProcessorFactory {\n\treturn n.processorFactory\n}\n\nfunc (n *natsServer) ServerTransport() thrift.TServerTransport {\n\treturn n.serverTransport\n}\n\nfunc (n *natsServer) InputTransportFactory() thrift.TTransportFactory {\n\treturn n.inputTransportFactory\n}\n\nfunc (n *natsServer) OutputTransportFactory() thrift.TTransportFactory {\n\treturn n.outputTransportFactory\n}\n\nfunc (n *natsServer) InputProtocolFactory() thrift.TProtocolFactory {\n\treturn n.inputProtocolFactory\n}\n\nfunc (n *natsServer) OutputProtocolFactory() thrift.TProtocolFactory {\n\treturn n.outputProtocolFactory\n}\n\nfunc (n *natsServer) Listen() error {\n\treturn n.serverTransport.Listen()\n}\n\nfunc (n *natsServer) AcceptLoop() error {\n\tsub, err := n.conn.QueueSubscribe(n.subject, queue, func(msg *nats.Msg) {\n\t\tif msg.Reply != \"\" {\n\t\t\tvar (\n\t\t\t\theartbeat = nats.NewInbox()\n\t\t\t\tlistenTo = nats.NewInbox()\n\t\t\t\tclient, err = n.accept(listenTo, msg.Reply, heartbeat)\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"thrift_nats: error accepting client transport:\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif n.isHeartbeating() {\n\t\t\t\tn.mu.Lock()\n\t\t\t\tn.clients[heartbeat] = client\n\t\t\t\tn.mu.Unlock()\n\t\t\t}\n\n\t\t\tconnectMsg := heartbeat + \" \" + strconv.FormatInt(int64(n.heartbeatDeadline), 10)\n\t\t\tif err := n.conn.PublishRequest(msg.Reply, listenTo, []byte(connectMsg)); err != nil {\n\t\t\t\tlog.Println(\"thrift_nats: error publishing transport inbox:\", err)\n\t\t\t\tif n.isHeartbeating() {\n\t\t\t\t\tn.remove(heartbeat)\n\t\t\t\t}\n\t\t\t} else if n.isHeartbeating() {\n\t\t\t\tgo n.acceptHeartbeat(heartbeat)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Printf(\"thrift_nats: discarding invalid connect message %+v\", msg)\n\t\t}\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tn.conn.Flush()\n\n\tlog.Println(\"thrift_nats: server running...\")\n\t<-n.quit\n\treturn sub.Unsubscribe()\n}\n\nfunc (n *natsServer) remove(heartbeat string) {\n\tn.mu.Lock()\n\tdefer n.mu.Unlock()\n\tclient, ok := n.clients[heartbeat]\n\tif !ok {\n\t\treturn\n\t}\n\tclient.Close()\n\tdelete(n.clients, heartbeat)\n}\n\nfunc (n *natsServer) acceptHeartbeat(heartbeat string) {\n\tmissed := 0\n\trecvHeartbeat := make(chan struct{})\n\n\tsub, err := n.conn.Subscribe(heartbeat, func(msg *nats.Msg) {\n\t\trecvHeartbeat <- struct{}{}\n\t})\n\tif err != nil {\n\t\tlog.Println(\"thrift_nats: error subscribing to heartbeat\", heartbeat)\n\t\treturn\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(time.Duration(n.heartbeatDeadline)):\n\t\t\tmissed += 1\n\t\t\tif missed >= maxMissedHeartbeats {\n\t\t\t\tlog.Println(\"thrift_nats: client heartbeat expired\")\n\t\t\t\tn.remove(heartbeat)\n\t\t\t\tsub.Unsubscribe()\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-recvHeartbeat:\n\t\t\tmissed = 0\n\t\t}\n\t}\n}\n\nfunc (n *natsServer) accept(listenTo, replyTo, heartbeat string) (thrift.TTransport, error) {\n\tclient := n.serverTransport.AcceptNATS(listenTo, replyTo, n.clientTimeout)\n\tif err := client.Open(); err != nil {\n\t\treturn nil, err\n\t}\n\tgo func() {\n\t\tif err := n.processRequests(client); err != nil {\n\t\t\tlog.Println(\"thrift_nats: error processing request:\", err)\n\t\t}\n\t\tn.remove(heartbeat)\n\t}()\n\treturn client, nil\n}\n\nfunc (n *natsServer) Serve() error {\n\tif err := n.Listen(); err != nil {\n\t\treturn err\n\t}\n\tn.AcceptLoop()\n\treturn nil\n}\n\nfunc (n *natsServer) Stop() error {\n\tn.quit <- struct{}{}\n\tn.serverTransport.Interrupt()\n\treturn nil\n}\n\nfunc (n *natsServer) processRequests(client thrift.TTransport) error {\n\tprocessor := n.processorFactory.GetProcessor(client)\n\tinputTransport := n.inputTransportFactory.GetTransport(client)\n\toutputTransport := n.outputTransportFactory.GetTransport(client)\n\tinputProtocol := n.inputProtocolFactory.GetProtocol(inputTransport)\n\toutputProtocol := n.outputProtocolFactory.GetProtocol(outputTransport)\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tlog.Printf(\"panic in processor: %s: %s\", e, debug.Stack())\n\t\t}\n\t}()\n\tif inputTransport != nil {\n\t\tdefer inputTransport.Close()\n\t}\n\tif outputTransport != nil {\n\t\tdefer outputTransport.Close()\n\t}\n\tfor {\n\t\tok, err := processor.Process(inputProtocol, outputProtocol)\n\t\tif err, ok := err.(thrift.TTransportException); ok && err.TypeId() == thrift.END_OF_FILE {\n\t\t\treturn nil\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (n *natsServer) isHeartbeating() bool {\n\treturn n.heartbeatDeadline > 0\n}\n<commit_msg>Fix heartbeat deadline to use milliseconds<commit_after>package thrift_nats\n\nimport (\n\t\"log\"\n\t\"runtime\/debug\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"git.apache.org\/thrift.git\/lib\/go\/thrift\"\n\t\"github.com\/nats-io\/nats\"\n)\n\nconst (\n\tqueue = \"rpc\"\n\tmaxMissedHeartbeats = 3\n)\n\ntype natsServer struct {\n\tconn *nats.Conn\n\tsubject string\n\tclientTimeout time.Duration\n\theartbeatDeadline time.Duration\n\tclients map[string]thrift.TTransport\n\tmu sync.Mutex\n\tquit chan struct{}\n\tprocessorFactory thrift.TProcessorFactory\n\tserverTransport *natsServerTransport\n\tinputTransportFactory thrift.TTransportFactory\n\toutputTransportFactory thrift.TTransportFactory\n\tinputProtocolFactory thrift.TProtocolFactory\n\toutputProtocolFactory thrift.TProtocolFactory\n}\n\n\/\/ NewNATSServer returns a Thrift TServer which uses the NATS messaging system\n\/\/ as the underlying transport. The subject is the NATS subject used for\n\/\/ connection handshakes. The client timeout controls the read timeout on the\n\/\/ client connection (negative value for no timeout). The heartbeat deadline\n\/\/ controls how long clients have to respond with a heartbeat (negative value\n\/\/ for no heartbeats).\nfunc NewNATSServer(\n\tconn *nats.Conn,\n\tsubject string,\n\tclientTimeout time.Duration,\n\theartbeatDeadline time.Duration,\n\tprocessor thrift.TProcessor,\n\ttransportFactory thrift.TTransportFactory,\n\tprotocolFactory thrift.TProtocolFactory) thrift.TServer {\n\n\treturn NewNATSServerFactory7(\n\t\tconn,\n\t\tsubject,\n\t\tclientTimeout,\n\t\theartbeatDeadline,\n\t\tthrift.NewTProcessorFactory(processor),\n\t\ttransportFactory,\n\t\tprotocolFactory,\n\t)\n}\n\nfunc NewNATSServerFactory7(\n\tconn *nats.Conn,\n\tsubject string,\n\tclientTimeout time.Duration,\n\theartbeatDeadline time.Duration,\n\tprocessorFactory thrift.TProcessorFactory,\n\ttransportFactory thrift.TTransportFactory,\n\tprotocolFactory thrift.TProtocolFactory) thrift.TServer {\n\n\treturn NewNATSServerFactory9(\n\t\tconn,\n\t\tsubject,\n\t\tclientTimeout,\n\t\theartbeatDeadline,\n\t\tprocessorFactory,\n\t\ttransportFactory,\n\t\ttransportFactory,\n\t\tprotocolFactory,\n\t\tprotocolFactory,\n\t)\n}\n\nfunc NewNATSServerFactory9(\n\tconn *nats.Conn,\n\tsubject string,\n\tclientTimeout time.Duration,\n\theartbeatDeadline time.Duration,\n\tprocessorFactory thrift.TProcessorFactory,\n\tinputTransportFactory thrift.TTransportFactory,\n\toutputTransportFactory thrift.TTransportFactory,\n\tinputProtocolFactory thrift.TProtocolFactory,\n\toutputProtocolFactory thrift.TProtocolFactory) thrift.TServer {\n\n\treturn &natsServer{\n\t\tconn: conn,\n\t\tsubject: subject,\n\t\tclientTimeout: clientTimeout,\n\t\theartbeatDeadline: heartbeatDeadline,\n\t\tclients: make(map[string]thrift.TTransport),\n\t\tprocessorFactory: processorFactory,\n\t\tserverTransport: newNATSServerTransport(conn),\n\t\tinputTransportFactory: inputTransportFactory,\n\t\toutputTransportFactory: outputTransportFactory,\n\t\tinputProtocolFactory: inputProtocolFactory,\n\t\toutputProtocolFactory: outputProtocolFactory,\n\t\tquit: make(chan struct{}, 1),\n\t}\n}\n\nfunc (n *natsServer) ProcessorFactory() thrift.TProcessorFactory {\n\treturn n.processorFactory\n}\n\nfunc (n *natsServer) ServerTransport() thrift.TServerTransport {\n\treturn n.serverTransport\n}\n\nfunc (n *natsServer) InputTransportFactory() thrift.TTransportFactory {\n\treturn n.inputTransportFactory\n}\n\nfunc (n *natsServer) OutputTransportFactory() thrift.TTransportFactory {\n\treturn n.outputTransportFactory\n}\n\nfunc (n *natsServer) InputProtocolFactory() thrift.TProtocolFactory {\n\treturn n.inputProtocolFactory\n}\n\nfunc (n *natsServer) OutputProtocolFactory() thrift.TProtocolFactory {\n\treturn n.outputProtocolFactory\n}\n\nfunc (n *natsServer) Listen() error {\n\treturn n.serverTransport.Listen()\n}\n\nfunc (n *natsServer) AcceptLoop() error {\n\tsub, err := n.conn.QueueSubscribe(n.subject, queue, func(msg *nats.Msg) {\n\t\tif msg.Reply != \"\" {\n\t\t\tvar (\n\t\t\t\theartbeat = nats.NewInbox()\n\t\t\t\tlistenTo = nats.NewInbox()\n\t\t\t\tclient, err = n.accept(listenTo, msg.Reply, heartbeat)\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"thrift_nats: error accepting client transport:\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif n.isHeartbeating() {\n\t\t\t\tn.mu.Lock()\n\t\t\t\tn.clients[heartbeat] = client\n\t\t\t\tn.mu.Unlock()\n\t\t\t}\n\n\t\t\tconnectMsg := heartbeat + \" \" + strconv.FormatInt(int64(n.heartbeatDeadline*time.Millisecond), 10)\n\t\t\tif err := n.conn.PublishRequest(msg.Reply, listenTo, []byte(connectMsg)); err != nil {\n\t\t\t\tlog.Println(\"thrift_nats: error publishing transport inbox:\", err)\n\t\t\t\tif n.isHeartbeating() {\n\t\t\t\t\tn.remove(heartbeat)\n\t\t\t\t}\n\t\t\t} else if n.isHeartbeating() {\n\t\t\t\tgo n.acceptHeartbeat(heartbeat)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Printf(\"thrift_nats: discarding invalid connect message %+v\", msg)\n\t\t}\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tn.conn.Flush()\n\n\tlog.Println(\"thrift_nats: server running...\")\n\t<-n.quit\n\treturn sub.Unsubscribe()\n}\n\nfunc (n *natsServer) remove(heartbeat string) {\n\tn.mu.Lock()\n\tdefer n.mu.Unlock()\n\tclient, ok := n.clients[heartbeat]\n\tif !ok {\n\t\treturn\n\t}\n\tclient.Close()\n\tdelete(n.clients, heartbeat)\n}\n\nfunc (n *natsServer) acceptHeartbeat(heartbeat string) {\n\tmissed := 0\n\trecvHeartbeat := make(chan struct{})\n\n\tsub, err := n.conn.Subscribe(heartbeat, func(msg *nats.Msg) {\n\t\trecvHeartbeat <- struct{}{}\n\t})\n\tif err != nil {\n\t\tlog.Println(\"thrift_nats: error subscribing to heartbeat\", heartbeat)\n\t\treturn\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(time.Duration(n.heartbeatDeadline)):\n\t\t\tmissed += 1\n\t\t\tif missed >= maxMissedHeartbeats {\n\t\t\t\tlog.Println(\"thrift_nats: client heartbeat expired\")\n\t\t\t\tn.remove(heartbeat)\n\t\t\t\tsub.Unsubscribe()\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-recvHeartbeat:\n\t\t\tmissed = 0\n\t\t}\n\t}\n}\n\nfunc (n *natsServer) accept(listenTo, replyTo, heartbeat string) (thrift.TTransport, error) {\n\tclient := n.serverTransport.AcceptNATS(listenTo, replyTo, n.clientTimeout)\n\tif err := client.Open(); err != nil {\n\t\treturn nil, err\n\t}\n\tgo func() {\n\t\tif err := n.processRequests(client); err != nil {\n\t\t\tlog.Println(\"thrift_nats: error processing request:\", err)\n\t\t}\n\t\tn.remove(heartbeat)\n\t}()\n\treturn client, nil\n}\n\nfunc (n *natsServer) Serve() error {\n\tif err := n.Listen(); err != nil {\n\t\treturn err\n\t}\n\tn.AcceptLoop()\n\treturn nil\n}\n\nfunc (n *natsServer) Stop() error {\n\tn.quit <- struct{}{}\n\tn.serverTransport.Interrupt()\n\treturn nil\n}\n\nfunc (n *natsServer) processRequests(client thrift.TTransport) error {\n\tprocessor := n.processorFactory.GetProcessor(client)\n\tinputTransport := n.inputTransportFactory.GetTransport(client)\n\toutputTransport := n.outputTransportFactory.GetTransport(client)\n\tinputProtocol := n.inputProtocolFactory.GetProtocol(inputTransport)\n\toutputProtocol := n.outputProtocolFactory.GetProtocol(outputTransport)\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tlog.Printf(\"panic in processor: %s: %s\", e, debug.Stack())\n\t\t}\n\t}()\n\tif inputTransport != nil {\n\t\tdefer inputTransport.Close()\n\t}\n\tif outputTransport != nil {\n\t\tdefer outputTransport.Close()\n\t}\n\tfor {\n\t\tok, err := processor.Process(inputProtocol, outputProtocol)\n\t\tif err, ok := err.(thrift.TTransportException); ok && err.TypeId() == thrift.END_OF_FILE {\n\t\t\treturn nil\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (n *natsServer) isHeartbeating() bool {\n\treturn n.heartbeatDeadline > 0\n}\n<|endoftext|>"} {"text":"<commit_before>package protoface\n\ntype Transform interface {\n\tProtoBuff\n\t\/\/ func GetObjects() []*ScriptObject\n\tfunc IGetNthObject(n int) ScriptObject\n\tfunc ISetNthObject(n int, s ScriptObject)\n\tfunc GetPkg() *Package\n}<commit_msg>rename this to match the pb name<commit_after><|endoftext|>"} {"text":"<commit_before>package clickhouse\n\nimport (\n\t\"context\"\n\t\"encoding\/binary\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"html\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/lomik\/graphite-clickhouse\/pkg\/scope\"\n\n\t\"go.uber.org\/zap\"\n\t\"go.uber.org\/zap\/zapcore\"\n)\n\ntype ErrDataParse struct {\n\terr string\n\tdata string\n}\n\nfunc NewErrDataParse(err string, data string) error {\n\treturn &ErrDataParse{err, data}\n}\n\nfunc (e *ErrDataParse) Error() string {\n\treturn fmt.Sprintf(\"%s: %s\", e.err, e.data)\n}\n\nfunc (e *ErrDataParse) PrependDescription(test string) {\n\te.data = test + e.data\n}\n\ntype ErrorWithCode struct {\n\terr string\n\tCode int \/\/ error code\n}\n\nfunc NewErrorWithCode(err string, code int) error {\n\treturn &ErrorWithCode{err, code}\n}\n\nfunc (e *ErrorWithCode) Error() string { return e.err }\n\nvar ErrUvarintRead = errors.New(\"ReadUvarint: Malformed array\")\nvar ErrUvarintOverflow = errors.New(\"ReadUvarint: varint overflows a 64-bit integer\")\nvar ErrClickHouseResponse = errors.New(\"Malformed response from clickhouse\")\n\nfunc HandleError(w http.ResponseWriter, err error) {\n\tif errors.Is(err, context.Canceled) {\n\t\thttp.Error(w, \"Storage read context canceled\", http.StatusGatewayTimeout)\n\t\treturn\n\t}\n\tnetErr, ok := err.(net.Error)\n\tif ok {\n\t\tif netErr.Timeout() {\n\t\t\thttp.Error(w, \"Storage read timeout\", http.StatusGatewayTimeout)\n\t\t} else if strings.HasSuffix(err.Error(), \"connect: no route to host\") ||\n\t\t\tstrings.HasSuffix(err.Error(), \"connect: connection refused\") ||\n\t\t\tstrings.HasSuffix(err.Error(), \": connection reset by peer\") ||\n\t\t\tstrings.HasPrefix(err.Error(), \"dial tcp: lookup \") { \/\/ DNS lookup\n\t\t\thttp.Error(w, \"Storage error\", http.StatusServiceUnavailable)\n\t\t} else {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t}\n\t\treturn\n\t}\n\terrCode, ok := err.(*ErrorWithCode)\n\tif ok {\n\t\tif (errCode.Code > 500 && errCode.Code < 512) ||\n\t\t\terrCode.Code == http.StatusBadRequest || errCode.Code == http.StatusForbidden {\n\t\t\thttp.Error(w, html.EscapeString(errCode.Error()), errCode.Code)\n\t\t} else {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t}\n\t\treturn\n\t}\n\t_, ok = err.(*ErrDataParse)\n\tif ok || strings.HasPrefix(err.Error(), \"clickhouse response status 500: Code:\") {\n\t\tif strings.Contains(err.Error(), \": Limit for \") {\n\t\t\t\/\/logger.Info(\"limit\", zap.Error(err))\n\t\t\thttp.Error(w, \"Storage read limit\", http.StatusForbidden)\n\t\t} else if !ok && strings.HasPrefix(err.Error(), \"clickhouse response status 500: Code: 170,\") {\n\t\t\t\/\/ distributed table configuration error\n\t\t\t\/\/ clickhouse response status 500: Code: 170, e.displayText() = DB::Exception: Requested cluster 'cluster' not found\n\t\t\thttp.Error(w, \"Storage configuration error\", http.StatusServiceUnavailable)\n\t\t}\n\t} else {\n\t\t\/\/logger.Debug(\"query\", zap.Error(err))\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}\n\ntype Options struct {\n\tTimeout time.Duration\n\tConnectTimeout time.Duration\n}\n\ntype loggedReader struct {\n\treader io.ReadCloser\n\tlogger *zap.Logger\n\tstart time.Time\n\tfinished bool\n\tqueryID string\n}\n\nfunc (r *loggedReader) Read(p []byte) (int, error) {\n\tn, err := r.reader.Read(p)\n\tif err != nil && !r.finished {\n\t\tr.finished = true\n\t\tr.logger.Info(\"query\", zap.String(\"query_id\", r.queryID), zap.Duration(\"time\", time.Since(r.start)))\n\t}\n\treturn n, err\n}\n\nfunc (r *loggedReader) Close() error {\n\terr := r.reader.Close()\n\tif !r.finished {\n\t\tr.finished = true\n\t\tr.logger.Info(\"query\", zap.String(\"query_id\", r.queryID), zap.Duration(\"time\", time.Since(r.start)))\n\t}\n\treturn err\n}\n\nfunc formatSQL(q string) string {\n\ts := strings.Split(q, \"\\n\")\n\tfor i := 0; i < len(s); i++ {\n\t\ts[i] = strings.TrimSpace(s[i])\n\t}\n\n\treturn strings.Join(s, \" \")\n}\n\nfunc Query(ctx context.Context, dsn string, query string, opts Options, extData *ExternalData) ([]byte, error) {\n\treturn Post(ctx, dsn, query, nil, opts, extData)\n}\n\nfunc Post(ctx context.Context, dsn string, query string, postBody io.Reader, opts Options, extData *ExternalData) ([]byte, error) {\n\treturn do(ctx, dsn, query, postBody, false, opts, extData)\n}\n\nfunc PostGzip(ctx context.Context, dsn string, query string, postBody io.Reader, opts Options, extData *ExternalData) ([]byte, error) {\n\treturn do(ctx, dsn, query, postBody, true, opts, extData)\n}\n\nfunc Reader(ctx context.Context, dsn string, query string, opts Options, extData *ExternalData) (io.ReadCloser, error) {\n\treturn reader(ctx, dsn, query, nil, false, opts, extData)\n}\n\nfunc reader(ctx context.Context, dsn string, query string, postBody io.Reader, gzip bool, opts Options, extData *ExternalData) (bodyReader io.ReadCloser, err error) {\n\tif postBody != nil && extData != nil {\n\t\terr = fmt.Errorf(\"postBody and extData could not be passed in one request\")\n\t\treturn\n\t}\n\n\tvar chQueryID string\n\n\tstart := time.Now()\n\n\trequestID := scope.RequestID(ctx)\n\n\tqueryForLogger := query\n\tif len(queryForLogger) > 500 {\n\t\tqueryForLogger = queryForLogger[:395] + \"<...>\" + queryForLogger[len(queryForLogger)-100:]\n\t}\n\tlogger := scope.Logger(ctx).With(zap.String(\"query\", formatSQL(queryForLogger)))\n\n\tdefer func() {\n\t\t\/\/ fmt.Println(time.Since(start), formatSQL(queryForLogger))\n\t\tif err != nil {\n\t\t\tlogger.Error(\"query\", zap.Error(err), zap.Duration(\"time\", time.Since(start)))\n\t\t}\n\t}()\n\n\tp, err := url.Parse(dsn)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar b [8]byte\n\tbinary.LittleEndian.PutUint64(b[:], rand.Uint64())\n\tqueryID := fmt.Sprintf(\"%x\", b)\n\n\tq := p.Query()\n\tq.Set(\"query_id\", fmt.Sprintf(\"%s::%s\", requestID, queryID))\n\t\/\/ Get X-Clickhouse-Summary header\n\t\/\/ TODO: remove when https:\/\/github.com\/ClickHouse\/ClickHouse\/issues\/16207 is done\n\tq.Set(\"send_progress_in_http_headers\", \"1\")\n\tq.Set(\"http_headers_progress_interval_ms\", \"10000\")\n\tp.RawQuery = q.Encode()\n\n\tvar contentHeader string\n\tif postBody != nil {\n\t\tq := p.Query()\n\t\tq.Set(\"query\", query)\n\t\tp.RawQuery = q.Encode()\n\t} else if extData != nil {\n\t\tq := p.Query()\n\t\tq.Set(\"query\", query)\n\t\tp.RawQuery = q.Encode()\n\t\tpostBody, contentHeader, err = extData.buildBody(ctx, p)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tpostBody = strings.NewReader(query)\n\t}\n\n\turl := p.String()\n\n\treq, err := http.NewRequest(\"POST\", url, postBody)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treq.Header.Add(\"User-Agent\", scope.ClickhouseUserAgent(ctx))\n\tif contentHeader != \"\" {\n\t\treq.Header.Add(\"Content-Type\", contentHeader)\n\t}\n\n\tif gzip {\n\t\treq.Header.Add(\"Content-Encoding\", \"gzip\")\n\t}\n\n\tclient := &http.Client{\n\t\tTimeout: opts.Timeout,\n\t\tTransport: &http.Transport{\n\t\t\tDial: (&net.Dialer{\n\t\t\t\tTimeout: opts.ConnectTimeout,\n\t\t\t}).Dial,\n\t\t\tDisableKeepAlives: true,\n\t\t},\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ chproxy overwrite our query id. So read it again\n\tchQueryID = resp.Header.Get(\"X-ClickHouse-Query-Id\")\n\n\tsummary := make(map[string]string)\n\terr = json.Unmarshal([]byte(resp.Header.Get(\"X-Clickhouse-Summary\")), &summary)\n\tif err == nil {\n\t\t\/\/ TODO: use in carbon metrics sender when it will be implemented\n\t\tfields := make([]zapcore.Field, 0, len(summary))\n\t\tfor k, v := range summary {\n\t\t\tfields = append(fields, zap.String(k, v))\n\t\t}\n\t\tlogger = logger.With(fields...)\n\t}\n\n\t\/\/ check for return 5xx error, may be 502 code if clickhouse accesed via reverse proxy\n\tif resp.StatusCode > 500 && resp.StatusCode < 512 {\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\tresp.Body.Close()\n\t\terr = NewErrorWithCode(string(body), resp.StatusCode)\n\t\treturn\n\t} else if resp.StatusCode != 200 {\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\tresp.Body.Close()\n\t\terr = fmt.Errorf(\"clickhouse response status %d: %s\", resp.StatusCode, string(body))\n\t\treturn\n\t}\n\n\tbodyReader = &loggedReader{\n\t\treader: resp.Body,\n\t\tlogger: logger,\n\t\tstart: start,\n\t\tqueryID: chQueryID,\n\t}\n\n\treturn\n}\n\nfunc do(ctx context.Context, dsn string, query string, postBody io.Reader, gzip bool, opts Options, extData *ExternalData) ([]byte, error) {\n\tbodyReader, err := reader(ctx, dsn, query, postBody, gzip, opts, extData)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbody, err := ioutil.ReadAll(bodyReader)\n\tbodyReader.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn body, nil\n}\n\nfunc ReadUvarint(array []byte) (uint64, int, error) {\n\tvar x uint64\n\tvar s uint\n\tl := len(array) - 1\n\tfor i := 0; ; i++ {\n\t\tif i > l {\n\t\t\treturn x, i + 1, ErrUvarintRead\n\t\t}\n\t\tif array[i] < 0x80 {\n\t\t\tif i > 9 || i == 9 && array[i] > 1 {\n\t\t\t\treturn x, i + 1, ErrUvarintOverflow\n\t\t\t}\n\t\t\treturn x | uint64(array[i])<<s, i + 1, nil\n\t\t}\n\t\tx |= uint64(array[i]&0x7f) << s\n\t\ts += 7\n\t}\n}\n<commit_msg>fix: logging for clickhouse query summary (no header)<commit_after>package clickhouse\n\nimport (\n\t\"context\"\n\t\"encoding\/binary\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"html\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/lomik\/graphite-clickhouse\/pkg\/scope\"\n\n\t\"go.uber.org\/zap\"\n\t\"go.uber.org\/zap\/zapcore\"\n)\n\ntype ErrDataParse struct {\n\terr string\n\tdata string\n}\n\nfunc NewErrDataParse(err string, data string) error {\n\treturn &ErrDataParse{err, data}\n}\n\nfunc (e *ErrDataParse) Error() string {\n\treturn fmt.Sprintf(\"%s: %s\", e.err, e.data)\n}\n\nfunc (e *ErrDataParse) PrependDescription(test string) {\n\te.data = test + e.data\n}\n\ntype ErrorWithCode struct {\n\terr string\n\tCode int \/\/ error code\n}\n\nfunc NewErrorWithCode(err string, code int) error {\n\treturn &ErrorWithCode{err, code}\n}\n\nfunc (e *ErrorWithCode) Error() string { return e.err }\n\nvar ErrUvarintRead = errors.New(\"ReadUvarint: Malformed array\")\nvar ErrUvarintOverflow = errors.New(\"ReadUvarint: varint overflows a 64-bit integer\")\nvar ErrClickHouseResponse = errors.New(\"Malformed response from clickhouse\")\n\nfunc HandleError(w http.ResponseWriter, err error) {\n\tif errors.Is(err, context.Canceled) {\n\t\thttp.Error(w, \"Storage read context canceled\", http.StatusGatewayTimeout)\n\t\treturn\n\t}\n\tnetErr, ok := err.(net.Error)\n\tif ok {\n\t\tif netErr.Timeout() {\n\t\t\thttp.Error(w, \"Storage read timeout\", http.StatusGatewayTimeout)\n\t\t} else if strings.HasSuffix(err.Error(), \"connect: no route to host\") ||\n\t\t\tstrings.HasSuffix(err.Error(), \"connect: connection refused\") ||\n\t\t\tstrings.HasSuffix(err.Error(), \": connection reset by peer\") ||\n\t\t\tstrings.HasPrefix(err.Error(), \"dial tcp: lookup \") { \/\/ DNS lookup\n\t\t\thttp.Error(w, \"Storage error\", http.StatusServiceUnavailable)\n\t\t} else {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t}\n\t\treturn\n\t}\n\terrCode, ok := err.(*ErrorWithCode)\n\tif ok {\n\t\tif (errCode.Code > 500 && errCode.Code < 512) ||\n\t\t\terrCode.Code == http.StatusBadRequest || errCode.Code == http.StatusForbidden {\n\t\t\thttp.Error(w, html.EscapeString(errCode.Error()), errCode.Code)\n\t\t} else {\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\t}\n\t\treturn\n\t}\n\t_, ok = err.(*ErrDataParse)\n\tif ok || strings.HasPrefix(err.Error(), \"clickhouse response status 500: Code:\") {\n\t\tif strings.Contains(err.Error(), \": Limit for \") {\n\t\t\t\/\/logger.Info(\"limit\", zap.Error(err))\n\t\t\thttp.Error(w, \"Storage read limit\", http.StatusForbidden)\n\t\t} else if !ok && strings.HasPrefix(err.Error(), \"clickhouse response status 500: Code: 170,\") {\n\t\t\t\/\/ distributed table configuration error\n\t\t\t\/\/ clickhouse response status 500: Code: 170, e.displayText() = DB::Exception: Requested cluster 'cluster' not found\n\t\t\thttp.Error(w, \"Storage configuration error\", http.StatusServiceUnavailable)\n\t\t}\n\t} else {\n\t\t\/\/logger.Debug(\"query\", zap.Error(err))\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}\n\ntype Options struct {\n\tTimeout time.Duration\n\tConnectTimeout time.Duration\n}\n\ntype loggedReader struct {\n\treader io.ReadCloser\n\tlogger *zap.Logger\n\tstart time.Time\n\tfinished bool\n\tqueryID string\n}\n\nfunc (r *loggedReader) Read(p []byte) (int, error) {\n\tn, err := r.reader.Read(p)\n\tif err != nil && !r.finished {\n\t\tr.finished = true\n\t\tr.logger.Info(\"query\", zap.String(\"query_id\", r.queryID), zap.Duration(\"time\", time.Since(r.start)))\n\t}\n\treturn n, err\n}\n\nfunc (r *loggedReader) Close() error {\n\terr := r.reader.Close()\n\tif !r.finished {\n\t\tr.finished = true\n\t\tr.logger.Info(\"query\", zap.String(\"query_id\", r.queryID), zap.Duration(\"time\", time.Since(r.start)))\n\t}\n\treturn err\n}\n\nfunc formatSQL(q string) string {\n\ts := strings.Split(q, \"\\n\")\n\tfor i := 0; i < len(s); i++ {\n\t\ts[i] = strings.TrimSpace(s[i])\n\t}\n\n\treturn strings.Join(s, \" \")\n}\n\nfunc Query(ctx context.Context, dsn string, query string, opts Options, extData *ExternalData) ([]byte, error) {\n\treturn Post(ctx, dsn, query, nil, opts, extData)\n}\n\nfunc Post(ctx context.Context, dsn string, query string, postBody io.Reader, opts Options, extData *ExternalData) ([]byte, error) {\n\treturn do(ctx, dsn, query, postBody, false, opts, extData)\n}\n\nfunc PostGzip(ctx context.Context, dsn string, query string, postBody io.Reader, opts Options, extData *ExternalData) ([]byte, error) {\n\treturn do(ctx, dsn, query, postBody, true, opts, extData)\n}\n\nfunc Reader(ctx context.Context, dsn string, query string, opts Options, extData *ExternalData) (io.ReadCloser, error) {\n\treturn reader(ctx, dsn, query, nil, false, opts, extData)\n}\n\nfunc reader(ctx context.Context, dsn string, query string, postBody io.Reader, gzip bool, opts Options, extData *ExternalData) (bodyReader io.ReadCloser, err error) {\n\tif postBody != nil && extData != nil {\n\t\terr = fmt.Errorf(\"postBody and extData could not be passed in one request\")\n\t\treturn\n\t}\n\n\tvar chQueryID string\n\n\tstart := time.Now()\n\n\trequestID := scope.RequestID(ctx)\n\n\tqueryForLogger := query\n\tif len(queryForLogger) > 500 {\n\t\tqueryForLogger = queryForLogger[:395] + \"<...>\" + queryForLogger[len(queryForLogger)-100:]\n\t}\n\tlogger := scope.Logger(ctx).With(zap.String(\"query\", formatSQL(queryForLogger)))\n\n\tdefer func() {\n\t\t\/\/ fmt.Println(time.Since(start), formatSQL(queryForLogger))\n\t\tif err != nil {\n\t\t\tlogger.Error(\"query\", zap.Error(err), zap.Duration(\"time\", time.Since(start)))\n\t\t}\n\t}()\n\n\tp, err := url.Parse(dsn)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar b [8]byte\n\tbinary.LittleEndian.PutUint64(b[:], rand.Uint64())\n\tqueryID := fmt.Sprintf(\"%x\", b)\n\n\tq := p.Query()\n\tq.Set(\"query_id\", fmt.Sprintf(\"%s::%s\", requestID, queryID))\n\t\/\/ Get X-Clickhouse-Summary header\n\t\/\/ TODO: remove when https:\/\/github.com\/ClickHouse\/ClickHouse\/issues\/16207 is done\n\tq.Set(\"send_progress_in_http_headers\", \"1\")\n\tq.Set(\"http_headers_progress_interval_ms\", \"10000\")\n\tp.RawQuery = q.Encode()\n\n\tvar contentHeader string\n\tif postBody != nil {\n\t\tq := p.Query()\n\t\tq.Set(\"query\", query)\n\t\tp.RawQuery = q.Encode()\n\t} else if extData != nil {\n\t\tq := p.Query()\n\t\tq.Set(\"query\", query)\n\t\tp.RawQuery = q.Encode()\n\t\tpostBody, contentHeader, err = extData.buildBody(ctx, p)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tpostBody = strings.NewReader(query)\n\t}\n\n\turl := p.String()\n\n\treq, err := http.NewRequest(\"POST\", url, postBody)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treq.Header.Add(\"User-Agent\", scope.ClickhouseUserAgent(ctx))\n\tif contentHeader != \"\" {\n\t\treq.Header.Add(\"Content-Type\", contentHeader)\n\t}\n\n\tif gzip {\n\t\treq.Header.Add(\"Content-Encoding\", \"gzip\")\n\t}\n\n\tclient := &http.Client{\n\t\tTimeout: opts.Timeout,\n\t\tTransport: &http.Transport{\n\t\t\tDial: (&net.Dialer{\n\t\t\t\tTimeout: opts.ConnectTimeout,\n\t\t\t}).Dial,\n\t\t\tDisableKeepAlives: true,\n\t\t},\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ chproxy overwrite our query id. So read it again\n\tchQueryID = resp.Header.Get(\"X-ClickHouse-Query-Id\")\n\n\tsummaryHeader := resp.Header.Get(\"X-Clickhouse-Summary\")\n\tif len(summaryHeader) > 0 {\n\t\tsummary := make(map[string]string)\n\t\terr = json.Unmarshal([]byte(summaryHeader), &summary)\n\t\tif err == nil {\n\t\t\t\/\/ TODO: use in carbon metrics sender when it will be implemented\n\t\t\tfields := make([]zapcore.Field, 0, len(summary))\n\t\t\tfor k, v := range summary {\n\t\t\t\tfields = append(fields, zap.String(k, v))\n\t\t\t}\n\t\t\tlogger = logger.With(fields...)\n\t\t} else {\n\t\t\tlogger.Warn(\"query\", zap.Error(err), zap.String(\"clickhouse-summary\", summaryHeader))\n\t\t\terr = nil\n\t\t}\n\t}\n\n\t\/\/ check for return 5xx error, may be 502 code if clickhouse accesed via reverse proxy\n\tif resp.StatusCode > 500 && resp.StatusCode < 512 {\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\tresp.Body.Close()\n\t\terr = NewErrorWithCode(string(body), resp.StatusCode)\n\t\treturn\n\t} else if resp.StatusCode != 200 {\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\tresp.Body.Close()\n\t\terr = fmt.Errorf(\"clickhouse response status %d: %s\", resp.StatusCode, string(body))\n\t\treturn\n\t}\n\n\tbodyReader = &loggedReader{\n\t\treader: resp.Body,\n\t\tlogger: logger,\n\t\tstart: start,\n\t\tqueryID: chQueryID,\n\t}\n\n\treturn\n}\n\nfunc do(ctx context.Context, dsn string, query string, postBody io.Reader, gzip bool, opts Options, extData *ExternalData) ([]byte, error) {\n\tbodyReader, err := reader(ctx, dsn, query, postBody, gzip, opts, extData)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbody, err := ioutil.ReadAll(bodyReader)\n\tbodyReader.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn body, nil\n}\n\nfunc ReadUvarint(array []byte) (uint64, int, error) {\n\tvar x uint64\n\tvar s uint\n\tl := len(array) - 1\n\tfor i := 0; ; i++ {\n\t\tif i > l {\n\t\t\treturn x, i + 1, ErrUvarintRead\n\t\t}\n\t\tif array[i] < 0x80 {\n\t\t\tif i > 9 || i == 9 && array[i] > 1 {\n\t\t\t\treturn x, i + 1, ErrUvarintOverflow\n\t\t\t}\n\t\t\treturn x | uint64(array[i])<<s, i + 1, nil\n\t\t}\n\t\tx |= uint64(array[i]&0x7f) << s\n\t\ts += 7\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ This code does not have any restrictions, use it as you please.\n\/\/ Instead of copyright stuff, let's have some cool phrases:\n\/\/ \"The absolute transformation of everything that we ever thought about music will\n\/\/ take place within 10 years, and nothing is going to be able to stop it. I see\n\/\/ absolutely no point in pretending that it's not going to happen.\n\/\/ I'm fully confident that copyright, for instance, will no longer exist in 10 years.\"\n\/\/ -BOWIE, David\n\n\/\/ Package parse allows you to parse the raw content of a robots.txt file into a Robots struct.\npackage parse\n\nimport (\n\t\"github.com\/lucasfcosta\/gobotto\/models\"\n\t\"strings\"\n)\n\nfunc Parse(text string) models.Robots {\n\trobots := models.NewRobots()\n\n\tlines := strings.Split(text, \"\\n\")\n\n\tvar lastUserAgent string\n\n\t\/\/ For each line\n\tfor _, line := range lines {\n\t\tnormalized := strings.ToLower(line)\n\n\t\t\/\/ Detect the semantic value of a line\n\t\tisComment := strings.HasPrefix(normalized, \"#\")\n\t\tisUserAgent := strings.HasPrefix(normalized, \"user-agent\")\n\t\tisAllow := strings.HasPrefix(normalized, \"allow\")\n\t\tisDisallow := strings.HasPrefix(normalized, \"disallow\")\n\n\t\t\/\/ Handle that line according to its semantic value\n\t\tif isComment {\n\t\t\tcomment := strings.TrimLeft(strings.Split(line, \"#\")[1], \" \")\n\t\t\trobots.Comments = append(robots.Comments, comment)\n\t\t} else if isUserAgent {\n\t\t\tlastUserAgent = strings.Split(line, \" \")[1]\n\t\t\t_, exists := robots.Rules[lastUserAgent]\n\t\t\tif !exists {\n\t\t\t\trobots.Rules[lastUserAgent] = models.NewRules()\n\t\t\t}\n\t\t} else if isAllow {\n\t\t\tpath := strings.Split(line, \" \")[1]\n\t\t\trobots.Rules[lastUserAgent].Allow[path] = precision(path)\n\t\t} else if isDisallow {\n\t\t\tpath := strings.Split(line, \" \")[1]\n\t\t\trobots.Rules[lastUserAgent].Disallow[path] = precision(path)\n\t\t}\n\t}\n\n\treturn robots\n}\n\nfunc precision(path string) int {\n\tsubpaths := strings.Split(path, \"\/\")\n\tcount := 0\n\tfor _, subpath := range subpaths {\n\t\tif subpath != \"\" {\n\t\t\tcount++\n\t\t}\n\t}\n\n\treturn count\n}\n<commit_msg>Trim trailing and leading spaces on line normalization for parse<commit_after>\/\/ This code does not have any restrictions, use it as you please.\n\/\/ Instead of copyright stuff, let's have some cool phrases:\n\/\/ \"The absolute transformation of everything that we ever thought about music will\n\/\/ take place within 10 years, and nothing is going to be able to stop it. I see\n\/\/ absolutely no point in pretending that it's not going to happen.\n\/\/ I'm fully confident that copyright, for instance, will no longer exist in 10 years.\"\n\/\/ -BOWIE, David\n\n\/\/ Package parse allows you to parse the raw content of a robots.txt file into a Robots struct.\npackage parse\n\nimport (\n\t\"github.com\/lucasfcosta\/gobotto\/models\"\n\t\"strings\"\n)\n\nfunc Parse(text string) models.Robots {\n\trobots := models.NewRobots()\n\n\tlines := strings.Split(text, \"\\n\")\n\n\tvar lastUserAgent string\n\n\t\/\/ For each line\n\tfor _, line := range lines {\n\t\tnormalized := strings.Trim(strings.ToLower(line), \" \")\n\n\t\t\/\/ Detect the semantic value of a line\n\t\tisComment := strings.HasPrefix(normalized, \"#\")\n\t\tisUserAgent := strings.HasPrefix(normalized, \"user-agent\")\n\t\tisAllow := strings.HasPrefix(normalized, \"allow\")\n\t\tisDisallow := strings.HasPrefix(normalized, \"disallow\")\n\n\t\t\/\/ Handle that line according to its semantic value\n\t\tif isComment {\n\t\t\tcomment := strings.TrimLeft(strings.Split(line, \"#\")[1], \" \")\n\t\t\trobots.Comments = append(robots.Comments, comment)\n\t\t} else if isUserAgent {\n\t\t\tlastUserAgent = strings.Split(line, \" \")[1]\n\t\t\t_, exists := robots.Rules[lastUserAgent]\n\t\t\tif !exists {\n\t\t\t\trobots.Rules[lastUserAgent] = models.NewRules()\n\t\t\t}\n\t\t} else if isAllow {\n\t\t\tpath := strings.Split(line, \" \")[1]\n\t\t\trobots.Rules[lastUserAgent].Allow[path] = precision(path)\n\t\t} else if isDisallow {\n\t\t\tpath := strings.Split(line, \" \")[1]\n\t\t\trobots.Rules[lastUserAgent].Disallow[path] = precision(path)\n\t\t}\n\t}\n\n\treturn robots\n}\n\nfunc precision(path string) int {\n\tsubpaths := strings.Split(path, \"\/\")\n\tcount := 0\n\tfor _, subpath := range subpaths {\n\t\tif subpath != \"\" {\n\t\t\tcount++\n\t\t}\n\t}\n\n\treturn count\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"archive\/zip\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/aarzilli\/sandblast\"\n\t\"golang.org\/x\/net\/html\"\n\t\"golang.org\/x\/net\/html\/charset\"\n\t\"golang.org\/x\/text\/transform\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\t\"unicode\"\n)\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \".\/qa run <dataset.zip>\\n\")\n\tfmt.Fprintf(os.Stderr, \".\/qa rebuild <dataset.zip> <out.zip>\\n\")\n\tfmt.Fprintf(os.Stderr, \".\/qa one <dataset.zip> <testname>\\n\")\n\tos.Exit(1)\n}\n\nfunc must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\ntype test struct {\n\tname string\n\tinput *zip.File\n\ttarget *zip.File\n}\n\ntype Dataset struct {\n\tdatazip *zip.ReadCloser\n\tindex *zip.File\n\ttests []test\n}\n\nfunc (d *Dataset) Close() error {\n\treturn d.datazip.Close()\n}\n\nfunc collapseWhitespace(in []rune) []rune {\n\tvar b []rune = make([]rune, len(in))\n\td := 0\n\tspaceSeen := true\n\tfor s := range in {\n\t\tif spaceSeen {\n\t\t\tif !unicode.IsSpace(in[s]) {\n\t\t\t\tspaceSeen = false\n\t\t\t\tb[d] = in[s]\n\t\t\t\td++\n\t\t\t}\n\t\t} else {\n\t\t\tif unicode.IsSpace(in[s]) {\n\t\t\t\tb[d] = ' '\n\t\t\t\td++\n\t\t\t\tspaceSeen = true\n\t\t\t} else {\n\t\t\t\tb[d] = in[s]\n\t\t\t\td++\n\t\t\t}\n\t\t}\n\t}\n\treturn b[:d]\n}\n\nfunc openDataset(datapath string) *Dataset {\n\tdataset, err := zip.OpenReader(datapath)\n\tmust(err)\n\n\tins := map[string]*zip.File{}\n\touts := map[string]*zip.File{}\n\tvar index *zip.File\n\n\tfor _, file := range dataset.File {\n\t\tif file.Name == \"index.txt\" {\n\t\t\tindex = file\n\t\t\tcontinue\n\t\t}\n\t\tv := strings.Split(file.Name, \".\")\n\t\tif len(v) != 2 {\n\t\t\tpanic(fmt.Errorf(\"wrong name in dataset: %s\\n\", file.Name))\n\t\t}\n\n\t\tswitch v[1] {\n\t\tcase \"html\":\n\t\t\tins[v[0]] = file\n\t\tcase \"target\":\n\t\t\touts[v[0]] = file\n\t\tdefault:\n\t\t\tpanic(fmt.Errorf(\"wrong name in dataset: %s\\n\", file.Name))\n\t\t}\n\t}\n\n\ttests := make([]test, 0, len(ins))\n\tfor k := range ins {\n\t\tin, inok := ins[k]\n\t\tout, outok := outs[k]\n\t\tif !inok || !outok {\n\t\t\tpanic(fmt.Errorf(\"problem with dataset: %s\", k))\n\t\t}\n\t\ttests = append(tests, test{name: k, input: in, target: out})\n\t}\n\n\treturn &Dataset{index: index, datazip: dataset, tests: tests}\n}\n\nfunc qarun(datapath string) {\n\tdataset := openDataset(datapath)\n\tdefer dataset.Close()\n\n\tos.Mkdir(\"work\", 0770)\n\n\tcount := 0\n\tfor _, test := range dataset.tests {\n\t\tfmt.Printf(\"Processing %s\\n\", test.name)\n\t\tif !qaruntest(test, false) {\n\t\t\tcount++\n\t\t}\n\t\tif count > 10 {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc qaone(datapath string, name string) {\n\tdataset := openDataset(datapath)\n\tdefer dataset.Close()\n\n\tos.Mkdir(\"work\", 0770)\n\n\tfor _, test := range dataset.tests {\n\t\tif test.name == name {\n\t\t\tqaruntest(test, true)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc extractTest(test test, writeextract bool) ([]byte, string) {\n\tin, err := test.input.Open()\n\tmust(err)\n\tdefer in.Close()\n\n\tbody, err := ioutil.ReadAll(in)\n\tmust(err)\n\n\te, _, _ := charset.DetermineEncoding(body, \"UTF-8\")\n\tr := transform.NewReader(bytes.NewReader(body), e.NewDecoder())\n\tnode, err := html.Parse(r)\n\tmust(err)\n\n\t_, output, simplified, flattened, cleaned, err := sandblast.ExtractEx(node)\n\tmust(err)\n\n\tif writeextract {\n\t\tfmt.Printf(\"SIMPLIFIED:\\n%s\\n\", simplified.DebugString())\n\t\tfmt.Printf(\"FLATTENED:\\n%s\\n\", flattened.DebugString())\n\t\tfmt.Printf(\"CLEANED:\\n%s\\n\", cleaned.DebugString())\n\t}\n\n\treturn body, output\n}\n\nfunc qaruntest(test test, writein bool) bool {\n\tbody, output := extractTest(test, writein)\n\n\ttgt, err := test.target.Open()\n\tmust(err)\n\tdefer tgt.Close()\n\n\ttgtbody, err := ioutil.ReadAll(tgt)\n\tmust(err)\n\ttarget := strings.TrimSpace(string(tgtbody))\n\n\tif string(collapseWhitespace([]rune(target))) != string(collapseWhitespace([]rune(output))) {\n\t\tfmt.Printf(\"%s output and target differ\\n\", test.name)\n\t\ttgtout, err := os.Create(fmt.Sprintf(\"work\/%s.target\", test.name))\n\t\tmust(err)\n\t\tio.WriteString(tgtout, target)\n\t\tio.WriteString(tgtout, \"\\n\")\n\t\ttgtout.Close()\n\t\toutout, err := os.Create(fmt.Sprintf(\"work\/%s.out\", test.name))\n\t\tmust(err)\n\t\tio.WriteString(outout, output)\n\t\tio.WriteString(outout, \"\\n\")\n\t\toutout.Close()\n\n\t\tif writein {\n\t\t\tinout, err := os.Create(fmt.Sprintf(\"work\/%s.html\", test.name))\n\t\t\tmust(err)\n\t\t\tinout.Write(body)\n\t\t\tinout.Close()\n\t\t}\n\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc qarebuild(datapath, outpath string) {\n\tdataset := openDataset(datapath)\n\tdefer dataset.Close()\n\n\toutw, err := os.Create(outpath)\n\tmust(err)\n\tdefer outw.Close()\n\n\toutzip := zip.NewWriter(outw)\n\tdefer outzip.Close()\n\n\tcopyFile(outzip, \"index.txt\", dataset.index)\n\n\tfor _, test := range dataset.tests {\n\t\tfmt.Printf(\"processing %s\\n\", test.name)\n\t\tcopyFile(outzip, fmt.Sprintf(\"%s.html\", test.name), test.input)\n\t\t_, output := extractTest(test, false)\n\t\tw, err := outzip.Create(fmt.Sprintf(\"%s.target\", test.name))\n\t\tmust(err)\n\t\t_, err = io.WriteString(w, output)\n\t\tmust(err)\n\t}\n}\n\nfunc copyFile(outzip *zip.Writer, name string, in *zip.File) {\n\tw, err := outzip.Create(name)\n\tmust(err)\n\tr, err := in.Open()\n\tmust(err)\n\tdefer r.Close()\n\t_, err = io.Copy(w, r)\n\tmust(err)\n}\n\nfunc main() {\n\tif len(os.Args) < 1 {\n\t\tusage()\n\t}\n\tswitch os.Args[1] {\n\tcase \"run\":\n\t\tif len(os.Args) < 3 {\n\t\t\tusage()\n\t\t}\n\t\tqarun(os.Args[2])\n\tcase \"one\":\n\t\tqaone(os.Args[2], os.Args[3])\n\tcase \"rebuild\":\n\t\tif len(os.Args) < 4 {\n\t\t\tusage()\n\t\t}\n\t\tqarebuild(os.Args[2], os.Args[3])\n\tcase \"help\":\n\t\tusage()\n\tdefault:\n\t\tusage()\n\t}\n}\n<commit_msg>qa: two messages<commit_after>package main\n\nimport (\n\t\"archive\/zip\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/aarzilli\/sandblast\"\n\t\"golang.org\/x\/net\/html\"\n\t\"golang.org\/x\/net\/html\/charset\"\n\t\"golang.org\/x\/text\/transform\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\t\"unicode\"\n)\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \".\/qa run <dataset.zip>\\n\")\n\tfmt.Fprintf(os.Stderr, \".\/qa rebuild <dataset.zip> <out.zip>\\n\")\n\tfmt.Fprintf(os.Stderr, \".\/qa one <dataset.zip> <testname>\\n\")\n\tos.Exit(1)\n}\n\nfunc must(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\ntype test struct {\n\tname string\n\tinput *zip.File\n\ttarget *zip.File\n}\n\ntype Dataset struct {\n\tdatazip *zip.ReadCloser\n\tindex *zip.File\n\ttests []test\n}\n\nfunc (d *Dataset) Close() error {\n\treturn d.datazip.Close()\n}\n\nfunc collapseWhitespace(in []rune) []rune {\n\tvar b []rune = make([]rune, len(in))\n\td := 0\n\tspaceSeen := true\n\tfor s := range in {\n\t\tif spaceSeen {\n\t\t\tif !unicode.IsSpace(in[s]) {\n\t\t\t\tspaceSeen = false\n\t\t\t\tb[d] = in[s]\n\t\t\t\td++\n\t\t\t}\n\t\t} else {\n\t\t\tif unicode.IsSpace(in[s]) {\n\t\t\t\tb[d] = ' '\n\t\t\t\td++\n\t\t\t\tspaceSeen = true\n\t\t\t} else {\n\t\t\t\tb[d] = in[s]\n\t\t\t\td++\n\t\t\t}\n\t\t}\n\t}\n\treturn b[:d]\n}\n\nfunc openDataset(datapath string) *Dataset {\n\tdataset, err := zip.OpenReader(datapath)\n\tmust(err)\n\n\tins := map[string]*zip.File{}\n\touts := map[string]*zip.File{}\n\tvar index *zip.File\n\n\tfor _, file := range dataset.File {\n\t\tif file.Name == \"index.txt\" {\n\t\t\tindex = file\n\t\t\tcontinue\n\t\t}\n\t\tv := strings.Split(file.Name, \".\")\n\t\tif len(v) != 2 {\n\t\t\tpanic(fmt.Errorf(\"wrong name in dataset: %s\\n\", file.Name))\n\t\t}\n\n\t\tswitch v[1] {\n\t\tcase \"html\":\n\t\t\tins[v[0]] = file\n\t\tcase \"target\":\n\t\t\touts[v[0]] = file\n\t\tdefault:\n\t\t\tpanic(fmt.Errorf(\"wrong name in dataset: %s\\n\", file.Name))\n\t\t}\n\t}\n\n\ttests := make([]test, 0, len(ins))\n\tfor k := range ins {\n\t\tin, inok := ins[k]\n\t\tout, outok := outs[k]\n\t\tif !inok || !outok {\n\t\t\tpanic(fmt.Errorf(\"problem with dataset: %s\", k))\n\t\t}\n\t\ttests = append(tests, test{name: k, input: in, target: out})\n\t}\n\n\treturn &Dataset{index: index, datazip: dataset, tests: tests}\n}\n\nfunc qarun(datapath string) {\n\tdataset := openDataset(datapath)\n\tdefer dataset.Close()\n\n\tos.Mkdir(\"work\", 0770)\n\n\tcount := 0\n\tfor _, test := range dataset.tests {\n\t\tfmt.Printf(\"Processing %s\\n\", test.name)\n\t\tif !qaruntest(test, false) {\n\t\t\tcount++\n\t\t}\n\t\tif count > 10 {\n\t\t\tfmt.Printf(\"Too many differences\\n\")\n\t\t\treturn\n\t\t}\n\t}\n\tfmt.Printf(\"All ok\\n\")\n}\n\nfunc qaone(datapath string, name string) {\n\tdataset := openDataset(datapath)\n\tdefer dataset.Close()\n\n\tos.Mkdir(\"work\", 0770)\n\n\tfor _, test := range dataset.tests {\n\t\tif test.name == name {\n\t\t\tqaruntest(test, true)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc extractTest(test test, writeextract bool) ([]byte, string) {\n\tin, err := test.input.Open()\n\tmust(err)\n\tdefer in.Close()\n\n\tbody, err := ioutil.ReadAll(in)\n\tmust(err)\n\n\te, _, _ := charset.DetermineEncoding(body, \"UTF-8\")\n\tr := transform.NewReader(bytes.NewReader(body), e.NewDecoder())\n\tnode, err := html.Parse(r)\n\tmust(err)\n\n\t_, output, simplified, flattened, cleaned, err := sandblast.ExtractEx(node)\n\tmust(err)\n\n\tif writeextract {\n\t\tfmt.Printf(\"SIMPLIFIED:\\n%s\\n\", simplified.DebugString())\n\t\tfmt.Printf(\"FLATTENED:\\n%s\\n\", flattened.DebugString())\n\t\tfmt.Printf(\"CLEANED:\\n%s\\n\", cleaned.DebugString())\n\t}\n\n\treturn body, output\n}\n\nfunc qaruntest(test test, writein bool) bool {\n\tbody, output := extractTest(test, writein)\n\n\ttgt, err := test.target.Open()\n\tmust(err)\n\tdefer tgt.Close()\n\n\ttgtbody, err := ioutil.ReadAll(tgt)\n\tmust(err)\n\ttarget := strings.TrimSpace(string(tgtbody))\n\n\tif string(collapseWhitespace([]rune(target))) != string(collapseWhitespace([]rune(output))) {\n\t\tfmt.Printf(\"%s output and target differ\\n\", test.name)\n\t\ttgtout, err := os.Create(fmt.Sprintf(\"work\/%s.target\", test.name))\n\t\tmust(err)\n\t\tio.WriteString(tgtout, target)\n\t\tio.WriteString(tgtout, \"\\n\")\n\t\ttgtout.Close()\n\t\toutout, err := os.Create(fmt.Sprintf(\"work\/%s.out\", test.name))\n\t\tmust(err)\n\t\tio.WriteString(outout, output)\n\t\tio.WriteString(outout, \"\\n\")\n\t\toutout.Close()\n\n\t\tif writein {\n\t\t\tinout, err := os.Create(fmt.Sprintf(\"work\/%s.html\", test.name))\n\t\t\tmust(err)\n\t\t\tinout.Write(body)\n\t\t\tinout.Close()\n\t\t}\n\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc qarebuild(datapath, outpath string) {\n\tdataset := openDataset(datapath)\n\tdefer dataset.Close()\n\n\toutw, err := os.Create(outpath)\n\tmust(err)\n\tdefer outw.Close()\n\n\toutzip := zip.NewWriter(outw)\n\tdefer outzip.Close()\n\n\tcopyFile(outzip, \"index.txt\", dataset.index)\n\n\tfor _, test := range dataset.tests {\n\t\tfmt.Printf(\"processing %s\\n\", test.name)\n\t\tcopyFile(outzip, fmt.Sprintf(\"%s.html\", test.name), test.input)\n\t\t_, output := extractTest(test, false)\n\t\tw, err := outzip.Create(fmt.Sprintf(\"%s.target\", test.name))\n\t\tmust(err)\n\t\t_, err = io.WriteString(w, output)\n\t\tmust(err)\n\t}\n}\n\nfunc copyFile(outzip *zip.Writer, name string, in *zip.File) {\n\tw, err := outzip.Create(name)\n\tmust(err)\n\tr, err := in.Open()\n\tmust(err)\n\tdefer r.Close()\n\t_, err = io.Copy(w, r)\n\tmust(err)\n}\n\nfunc main() {\n\tif len(os.Args) < 1 {\n\t\tusage()\n\t}\n\tswitch os.Args[1] {\n\tcase \"run\":\n\t\tif len(os.Args) < 3 {\n\t\t\tusage()\n\t\t}\n\t\tqarun(os.Args[2])\n\tcase \"one\":\n\t\tqaone(os.Args[2], os.Args[3])\n\tcase \"rebuild\":\n\t\tif len(os.Args) < 4 {\n\t\t\tusage()\n\t\t}\n\t\tqarebuild(os.Args[2], os.Args[3])\n\tcase \"help\":\n\t\tusage()\n\tdefault:\n\t\tusage()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package gosnow\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"testing\"\n)\n\nconst simpleSrc string = `\n# My API\n## GET \/message\n + Response 200 (text\/plain)\n\n Hello World\n`\n\nconst namelessSrc string = `\nFORMAT: 1A\n\n# Group Messages\n\n# Message [\/messages\/{id}]\n\n## Retrieve Message [GET]\n+ Response 200 (text\/plain)\n\n Hello World!\n\n`\n\nconst warningSrc string = `\nFORMAT: 1A\n\n# Group Messages\n\n# Message [\/messages\/{id}]\n\n+ Model (text\/plain)\n\n Hello World\n\n## Retrieve Message [GET]\n+ Response 200 (text\/plain)\n\n Hello World!\n\n## Retrieve Message [GET]\n+ Response 200 (text\/plain)\n\n Hello World!\n\n`\n\nvar (\n\tapibFile = \"test\/fixtures\/sample-api.apib\"\n\tastFile = \"test\/fixtures\/sample-api-ast.json\"\n\tsourcemapFile = \"test\/fixtures\/sample-api-sourcemap.json\"\n)\n\n\/\/ replace the variables with the contents of the file they point to\nfunc init() {\n\tif c, err := ioutil.ReadFile(apibFile); err != nil {\n\t\tpanic(\"apibFile not found\")\n\t} else {\n\t\tapibFile = string(c)\n\t}\n\n\tif c, err := ioutil.ReadFile(astFile); err != nil {\n\t\tpanic(\"astFile not found\")\n\t} else {\n\t\tastFile = string(c)\n\t}\n\n\tif c, err := ioutil.ReadFile(sourcemapFile); err != nil {\n\t\tpanic(\"sourcemapFile not found\")\n\t} else {\n\t\tsourcemapFile = string(c)\n\t}\n}\n\nfunc TestParse(t *testing.T) {\n\tres, err := Parse(apibFile)\n\tif err != nil {\n\t\tt.Fatalf(\"Parse failed with error: %v\", err)\n\t}\n\tif res == nil {\n\t\tt.Fatal(\"Parse returned nil result\")\n\t}\n\t\/\/ v, _ := json.MarshalIndent(res, \"\", \" \")\n\t\/\/ fmt.Println(string(v))\n}\n\n\/\/ ensure that the option parse with a 0 does the same thing as the simple parse\nfunc TestParseEquality(t *testing.T) {\n\tres1, err := Parse(simpleSrc)\n\tif err != nil {\n\t\tt.Fatalf(\"Parse failed with err: %v\", err)\n\t}\n\tres2, err := OptionParse(simpleSrc, 0)\n\tif err != nil {\n\t\tt.Fatalf(\"OptionParse failed with err: %v\", err)\n\t}\n\n\tv1, err := json.Marshal(res1)\n\tif err != nil {\n\t\tt.Fatalf(\"json marshal error: %v\", err)\n\t}\n\tv2, err := json.Marshal(res2)\n\tif err != nil {\n\t\tt.Fatalf(\"json marshal error: %v\", err)\n\t}\n\tif string(v1) != string(v2) {\n\t\tt.Error(\"Results should be equal\")\n\t}\n}\n\nfunc TestParseError(t *testing.T) {\n\tjunk := \"*#(*(R$#&)$#)R*(Y@#_RH\"\n\tres, err := OptionParse(junk, -1)\n\tif err == nil {\n\t\tt.Errorf(\"OptionParse did not fail for junk input\")\n\t}\n\tif res != nil {\n\t\tt.Errorf(\"OptionParse returned non=empty result for junk input\")\n\t}\n}\n\nfunc TestFilesOptionParse(t *testing.T) {\n\tres, err := OptionParse(apibFile, ScRenderDescriptionsOptionKey)\n\tif err != nil {\n\t\tt.Errorf(\"OptionParse failed for key ScRenderDescriptionsOptionKey with error: %v\", err)\n\t} else if res == nil {\n\t\tt.Errorf(\"OptionParse for key ScRenderDescriptionsOptionKey returned empty result\")\n\t}\n\n\t_, err = OptionParse(namelessSrc, RequireBlueprintNameOptionKey)\n\tif err == nil {\n\t\tt.Errorf(\"strict OptionParse did not fail for key RequireBlueprintNameOptionKey\")\n\t}\n\n\tres, err = OptionParse(apibFile, ExportSourcemapOptionKey)\n\tif err != nil {\n\t\tt.Errorf(\"OptionParse failed for ExportSourcemapOptionKey with error: %v\", err)\n\t} else if res == nil {\n\t\tt.Errorf(\"OptionParse for key ExportSourcemapOptionKey returned empty result\")\n\t}\n}\n<commit_msg>added some tests for new parser methods<commit_after>package gosnow\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"testing\"\n)\n\nconst simpleSrc string = `\n# My API\n## GET \/message\n + Response 200 (text\/plain)\n\n Hello World\n`\n\nconst namelessSrc string = `\nFORMAT: 1A\n\n# Group Messages\n\n# Message [\/messages\/{id}]\n\n## Retrieve Message [GET]\n+ Response 200 (text\/plain)\n\n Hello World!\n\n`\n\nconst warningSrc string = `\nFORMAT: 1A\n\n# Group Messages\n\n# Message [\/messages\/{id}]\n\n+ Model (text\/plain)\n\n Hello World\n\n## Retrieve Message [GET]\n+ Response 200 (text\/plain)\n\n Hello World!\n\n## Retrieve Message [GET]\n+ Response 200 (text\/plain)\n\n Hello World!\n\n`\n\nvar (\n\tapibFile = \"test\/fixtures\/sample-api.apib\"\n\tastFile = \"test\/fixtures\/sample-api-ast.json\"\n\tsourcemapFile = \"test\/fixtures\/sample-api-sourcemap.json\"\n)\n\n\/\/ replace the variables with the contents of the file they point to\nfunc init() {\n\tif c, err := ioutil.ReadFile(apibFile); err != nil {\n\t\tpanic(\"apibFile not found\")\n\t} else {\n\t\tapibFile = string(c)\n\t}\n\n\tif c, err := ioutil.ReadFile(astFile); err != nil {\n\t\tpanic(\"astFile not found\")\n\t} else {\n\t\tastFile = string(c)\n\t}\n\n\tif c, err := ioutil.ReadFile(sourcemapFile); err != nil {\n\t\tpanic(\"sourcemapFile not found\")\n\t} else {\n\t\tsourcemapFile = string(c)\n\t}\n}\n\nfunc TestSourceAnnotationOk(t *testing.T) {\n\tsa := new(SourceAnnotation)\n\tif !sa.Ok() {\n\t\tt.Error(\"empty source annotation should have zero value indicating ok\")\n\t}\n\tsa.Code = 2\n\tif sa.Ok() {\n\t\tt.Error(\"source annotation should have non zero value indicating not ok\")\n\t}\n}\n\nfunc TestNewPR(t *testing.T) {\n\t_, err := newPR([]byte(`{\"unrelated\": \"json\"}`))\n\tif err != nil {\n\t\tt.Errorf(\"newPR errored for valid json %v\", err)\n\t}\n}\n\nfunc TestNewPRFailure(t *testing.T) {\n\tjunk := []byte(`*#(*(R$#&)$#)R*(Y@#_RH`)\n\t_, err := newPR(junk)\n\tif err == nil {\n\t\tt.Error(\"newPR should have errored and did not\")\n\t}\n\tif e, ok := err.(*json.SyntaxError); !ok {\n\t\tt.Errorf(\"Expected json.SyntaxError, got %T\", e)\n\t}\n}\n\nfunc TestParse(t *testing.T) {\n\tres, err := Parse(apibFile)\n\tif err != nil {\n\t\tt.Fatalf(\"Parse failed with error: %v\", err)\n\t}\n\tif res == nil {\n\t\tt.Fatal(\"Parse returned nil result\")\n\t}\n\t\/\/ v, _ := json.MarshalIndent(res, \"\", \" \")\n\t\/\/ fmt.Println(string(v))\n}\n\n\/\/ ensure that the option parse with a 0 does the same thing as the simple parse\nfunc TestParseEquality(t *testing.T) {\n\tres1, err := Parse(simpleSrc)\n\tif err != nil {\n\t\tt.Fatalf(\"Parse failed with err: %v\", err)\n\t}\n\tres2, err := OptionParse(simpleSrc, 0)\n\tif err != nil {\n\t\tt.Fatalf(\"OptionParse failed with err: %v\", err)\n\t}\n\n\tv1, err := json.Marshal(res1)\n\tif err != nil {\n\t\tt.Fatalf(\"json marshal error: %v\", err)\n\t}\n\tv2, err := json.Marshal(res2)\n\tif err != nil {\n\t\tt.Fatalf(\"json marshal error: %v\", err)\n\t}\n\tif string(v1) != string(v2) {\n\t\tt.Error(\"Results should be equal\")\n\t}\n}\n\nfunc TestParseError(t *testing.T) {\n\tjunk := \"*#(*(R$#&)$#)R*(Y@#_RH\"\n\tres, err := OptionParse(junk, -1)\n\tif err == nil {\n\t\tt.Errorf(\"OptionParse did not fail for junk input\")\n\t}\n\tif res != nil {\n\t\tt.Errorf(\"OptionParse returned non=empty result for junk input\")\n\t}\n}\n\nfunc TestFilesOptionParse(t *testing.T) {\n\tres, err := OptionParse(apibFile, ScRenderDescriptionsOptionKey)\n\tif err != nil {\n\t\tt.Errorf(\"OptionParse failed for key ScRenderDescriptionsOptionKey with error: %v\", err)\n\t} else if res == nil {\n\t\tt.Errorf(\"OptionParse for key ScRenderDescriptionsOptionKey returned empty result\")\n\t}\n\n\t_, err = OptionParse(namelessSrc, RequireBlueprintNameOptionKey)\n\tif err == nil {\n\t\tt.Errorf(\"strict OptionParse did not fail for key RequireBlueprintNameOptionKey\")\n\t}\n\n\tres, err = OptionParse(apibFile, ExportSourcemapOptionKey)\n\tif err != nil {\n\t\tt.Errorf(\"OptionParse failed for ExportSourcemapOptionKey with error: %v\", err)\n\t} else if res == nil {\n\t\tt.Errorf(\"OptionParse for key ExportSourcemapOptionKey returned empty result\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"strconv\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/dustin\/go-couchstore\"\n\t\"github.com\/dustin\/go-jsonpointer\"\n)\n\nvar timeoutError = errors.New(\"query timed out\")\n\ntype ptrval struct {\n\tdi *couchstore.DocInfo\n\tval *string\n\tincluded bool\n}\n\ntype Reducer func(input chan ptrval) interface{}\n\ntype processOut struct {\n\tcacheKey string\n\tkey int64\n\tvalue []interface{}\n\terr error\n\tcacheOpaque uint32\n}\n\nfunc (p processOut) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(map[string]interface{}{\"v\": p.value})\n}\n\ntype processIn struct {\n\tcacheKey string\n\tdbname string\n\tkey int64\n\tinfos []*couchstore.DocInfo\n\tnextInfo *couchstore.DocInfo\n\tptrs []string\n\treds []string\n\tbefore time.Time\n\tfilters\t []string\n\tfiltervals []string\n\tout chan<- *processOut\n}\n\ntype queryIn struct {\n\tdbname string\n\tfrom string\n\tto string\n\tgroup int\n\tptrs []string\n\treds []string\n\tstart time.Time\n\tbefore time.Time\n\tfilters\t []string\n\tfiltervals []string\n\tstarted int32\n\ttotalKeys int32\n\tout chan *processOut\n\tcherr chan error\n}\n\nfunc processDoc(di *couchstore.DocInfo, chs []chan ptrval,\n\tdoc []byte, ptrs []string, filters []string, filtervals []string, included bool) {\n\n\tpv := ptrval{di, nil, included}\n\n\tj := map[string]interface{}{}\n\terr := json.Unmarshal(doc, &j)\n\tif err != nil {\n\t\tfor i := range ptrs {\n\t\t\tchs[i] <- pv\n\t\t}\n\t\treturn\n\t}\n\tfor i, p := range filters {\n\t\tval := jsonpointer.Get(j, p)\n\t\tcheckVal := filtervals[i]\n\t\tswitch val.(type) {\n\t\tcase string:\n\t\t\tif (val != checkVal) {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase int, uint, int64, float64, uint64, bool:\n\t\t\tv := fmt.Sprintf(\"%v\", val)\n\t\t\tif (v != checkVal) {\n\t\t\t\treturn\n\t\t\t}\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor i, p := range ptrs {\n\t\tval := jsonpointer.Get(j, p)\n\t\tswitch x := val.(type) {\n\t\tcase string:\n\t\t\tpv.val = &x\n\t\t\tchs[i] <- pv\n\t\tcase int, uint, int64, float64, uint64, bool:\n\t\t\tv := fmt.Sprintf(\"%v\", val)\n\t\t\tpv.val = &v\n\t\t\tchs[i] <- pv\n\t\tdefault:\n\t\t\tlog.Printf(\"Ignoring %T\", val)\n\t\t\tchs[i] <- pv\n\t\t}\n\t}\n}\n\nfunc process_docs(pi *processIn) {\n\n\tresult := processOut{pi.cacheKey, pi.key, nil, nil, 0}\n\n\tdb, err := dbopen(pi.dbname)\n\tif err != nil {\n\t\tresult.err = err\n\t\tpi.out <- &result\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\tchans := make([]chan ptrval, 0, len(pi.ptrs))\n\tresultchs := make([]chan interface{}, 0, len(pi.ptrs))\n\tfor i, r := range pi.reds {\n\t\tchans = append(chans, make(chan ptrval))\n\t\tresultchs = append(resultchs, make(chan interface{}))\n\n\t\tgo func() {\n\t\t\tresultchs[i] <- reducers[r](chans[i])\n\t\t}()\n\t}\n\n\tgo func() {\n\t\tdefer closeAll(chans)\n\n\t\tdodoc := func(di *couchstore.DocInfo, included bool) {\n\t\t\tdoc, err := db.GetFromDocInfo(di)\n\t\t\tif err == nil {\n\t\t\t\tprocessDoc(di, chans, doc.Value(), pi.ptrs, pi.filters, pi.filtervals, included)\n\t\t\t} else {\n\t\t\t\tfor i := range pi.ptrs {\n\t\t\t\t\tchans[i] <- ptrval{di, nil, included}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, di := range pi.infos {\n\t\t\tdodoc(di, true)\n\t\t}\n\t\tif pi.nextInfo != nil {\n\t\t\tdodoc(pi.nextInfo, false)\n\t\t}\n\t}()\n\n\tresults := make([]interface{}, len(pi.ptrs))\n\tfor i := 0; i < len(pi.ptrs); i++ {\n\t\tresults[i] = <-resultchs[i]\n\t\tif f, fok := results[i].(float64); fok && math.IsNaN(f) {\n\t\t\tresults[i] = nil\n\t\t}\n\t}\n\tresult.value = results\n\n\tif result.cacheOpaque == 0 && result.cacheKey != \"\" {\n\t\t\/\/ It's OK if we can't store our newly pulled item in\n\t\t\/\/ the cache, but it's most definitely not OK to stop\n\t\t\/\/ here because of this.\n\t\tselect {\n\t\tcase cacheInputSet <- &result:\n\t\tdefault:\n\t\t}\n\t}\n\tpi.out <- &result\n}\n\nfunc docProcessor(ch <-chan *processIn) {\n\tfor pi := range ch {\n\t\tif time.Now().Before(pi.before) {\n\t\t\tprocess_docs(pi)\n\t\t} else {\n\t\t\tpi.out <- &processOut{\"\", pi.key, nil, timeoutError, 0}\n\t\t}\n\t}\n}\n\nfunc fetchDocs(dbname string, key int64, infos []*couchstore.DocInfo,\n\tnextInfo *couchstore.DocInfo, ptrs []string, reds []string,\n\tbefore time.Time, filters []string, filtervals []string, out chan<- *processOut) {\n\n\ti := processIn{\"\", dbname, key, infos, nextInfo,\n\t\tptrs, reds, before, filters, filtervals, out}\n\n\tcacheInput <- &i\n}\n\nfunc runQuery(q *queryIn) {\n\tdb, err := dbopen(q.dbname)\n\tif err != nil {\n\t\tlog.Printf(\"Error opening db: %v - %v\", q.dbname, err)\n\t\tq.cherr <- err\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\tchunk := int64(time.Duration(q.group) * time.Millisecond)\n\n\tinfos := []*couchstore.DocInfo{}\n\tg := int64(0)\n\tnextg := \"\"\n\n\terr = db.Walk(q.from, func(d *couchstore.Couchstore,\n\t\tdi *couchstore.DocInfo) error {\n\t\tkstr := di.ID()\n\t\tvar err error\n\t\tif q.to != \"\" && kstr >= q.to {\n\t\t\terr = couchstore.StopIteration\n\t\t}\n\n\n\n\t\tatomic.AddInt32(&q.totalKeys, 1)\n\n\t\tif kstr >= nextg {\n\t\t\tif len(infos) > 0 {\n\t\t\t\tatomic.AddInt32(&q.started, 1)\n\t\t\t\tfetchDocs(q.dbname, g, infos, di,\n\t\t\t\t\tq.ptrs, q.reds, q.before, q.filters, q.filtervals, q.out)\n\n\t\t\t\tinfos = make([]*couchstore.DocInfo, 0, len(infos))\n\t\t\t}\n\n\t\t\tk := parseKey(kstr)\n\t\t\tg = (k \/ chunk) * chunk\n\t\t\tnextgi := g + chunk\n\t\t\tnextgt := time.Unix(nextgi\/1e9, nextgi%1e9).UTC()\n\t\t\tnextg = nextgt.Format(time.RFC3339Nano)\n\t\t}\n\t\tinfos = append(infos, di)\n\n\t\treturn err\n\t})\n\n\tif err == nil && len(infos) > 0 {\n\t\tatomic.AddInt32(&q.started, 1)\n\t\tfetchDocs(q.dbname, g, infos, nil,\n\t\t\tq.ptrs, q.reds, q.before, q.filters, q.filtervals, q.out)\n\t}\n\n\tq.cherr <- err\n}\n\nfunc queryExecutor(ch <-chan *queryIn) {\n\tfor q := range ch {\n\t\tif time.Now().Before(q.before) {\n\t\t\trunQuery(q)\n\t\t} else {\n\t\t\tlog.Printf(\"Timed out query that's %v late\",\n\t\t\t\ttime.Since(q.before))\n\t\t\tq.cherr <- timeoutError\n\t\t}\n\t}\n}\n\nfunc executeQuery(dbname, from, to string, group int,\n\tptrs []string, reds []string, filters []string, filtervals []string) *queryIn {\n\n\tnow := time.Now()\n\n\trv := &queryIn{\n\t\tdbname: dbname,\n\t\tfrom: from,\n\t\tto: to,\n\t\tgroup: group,\n\t\tptrs: ptrs,\n\t\treds: reds,\n\t\tstart: now,\n\t\tbefore: now.Add(*queryTimeout),\n\t filters: filters,\n\t\tfiltervals: filtervals,\n\t\tout: make(chan *processOut),\n\t\tcherr: make(chan error),\n\t}\n\tqueryInput <- rv\n\treturn rv\n}\n\nvar processorInput chan *processIn\nvar queryInput chan *queryIn\n\nfunc convertTofloat64(in chan ptrval) chan float64 {\n\tch := make(chan float64)\n\tgo func() {\n\t\tdefer close(ch)\n\t\tfor v := range in {\n\t\t\tif v.included && v.val != nil {\n\t\t\t\tx, err := strconv.ParseFloat(*v.val, 64)\n\t\t\t\tif err == nil {\n\t\t\t\t\tch <- x\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn ch\n}\n\nfunc convertTofloat64Rate(in chan ptrval) chan float64 {\n\tch := make(chan float64)\n\tgo func() {\n\t\tdefer close(ch)\n\t\tvar prevts int64\n\t\tvar preval float64\n\n\t\t\/\/ First, find a part of the stream that has usable data.\n\t\tfor v := range in {\n\t\t\tif v.di != nil && v.val != nil {\n\t\t\t\tx, err := strconv.ParseFloat(*v.val, 64)\n\t\t\t\tif err == nil {\n\t\t\t\t\tprevts = parseKey(v.di.ID())\n\t\t\t\t\tpreval = x\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\/\/ Then emit floats based on deltas from previous values.\n\t\tfor v := range in {\n\t\t\tif v.di != nil && v.val != nil {\n\t\t\t\tx, err := strconv.ParseFloat(*v.val, 64)\n\t\t\t\tif err == nil {\n\t\t\t\t\tthists := parseKey(v.di.ID())\n\n\t\t\t\t\tval := ((x - preval) \/\n\t\t\t\t\t\t(float64(thists-prevts) \/ 1e9))\n\n\t\t\t\t\tif !math.IsNaN(val) {\n\t\t\t\t\t\tch <- val\n\t\t\t\t\t}\n\n\t\t\t\t\tprevts = thists\n\t\t\t\t\tpreval = x\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn ch\n}\n\nvar reducers = map[string]Reducer{\n\t\"identity\": func(input chan ptrval) interface{} {\n\t\trv := []*string{}\n\t\tfor s := range input {\n\t\t\tif s.included {\n\t\t\t\trv = append(rv, s.val)\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n\t\"any\": func(input chan ptrval) interface{} {\n\t\tfor v := range input {\n\t\t\tif v.included && v.val != nil {\n\t\t\t\treturn *v.val\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t},\n\t\"count\": func(input chan ptrval) interface{} {\n\t\trv := 0\n\t\tfor v := range input {\n\t\t\tif v.included && v.val != nil {\n\t\t\t\trv++\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n\t\"sum\": func(input chan ptrval) interface{} {\n\t\trv := float64(0)\n\t\tfor v := range convertTofloat64(input) {\n\t\t\trv += v\n\t\t}\n\t\treturn rv\n\t},\n\t\"sumsq\": func(input chan ptrval) interface{} {\n\t\trv := float64(0)\n\t\tfor v := range convertTofloat64(input) {\n\t\t\trv += (v * v)\n\t\t}\n\t\treturn rv\n\t},\n\t\"max\": func(input chan ptrval) interface{} {\n\t\trv := math.NaN()\n\t\tfor v := range convertTofloat64(input) {\n\t\t\tif v > rv || math.IsNaN(rv) {\n\t\t\t\trv = v\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n\t\"min\": func(input chan ptrval) interface{} {\n\t\trv := math.NaN()\n\t\tfor v := range convertTofloat64(input) {\n\t\t\tif v < rv || math.IsNaN(rv) {\n\t\t\t\trv = v\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n\t\"avg\": func(input chan ptrval) interface{} {\n\t\tnums := float64(0)\n\t\tsum := float64(0)\n\t\tfor v := range convertTofloat64(input) {\n\t\t\tnums++\n\t\t\tsum += v\n\t\t}\n\t\tif nums > 0 {\n\t\t\treturn sum \/ nums\n\t\t}\n\t\treturn math.NaN()\n\t},\n\t\"c_min\": func(input chan ptrval) interface{} {\n\t\trv := math.NaN()\n\t\tfor v := range convertTofloat64Rate(input) {\n\t\t\tif v < rv || math.IsNaN(rv) {\n\t\t\t\trv = v\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n\t\"c_avg\": func(input chan ptrval) interface{} {\n\t\tnums := float64(0)\n\t\tsum := float64(0)\n\t\tfor v := range convertTofloat64Rate(input) {\n\t\t\tnums++\n\t\t\tsum += v\n\t\t}\n\t\tif nums > 0 {\n\t\t\treturn sum \/ nums\n\t\t}\n\t\treturn math.NaN()\n\t},\n\t\"c_max\": func(input chan ptrval) interface{} {\n\t\trv := math.NaN()\n\t\tfor v := range convertTofloat64Rate(input) {\n\t\t\tif v > rv || math.IsNaN(rv) {\n\t\t\t\trv = v\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n}\n<commit_msg>Minor cleanups.<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"strconv\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/dustin\/go-couchstore\"\n\t\"github.com\/dustin\/go-jsonpointer\"\n)\n\nvar timeoutError = errors.New(\"query timed out\")\n\ntype ptrval struct {\n\tdi *couchstore.DocInfo\n\tval *string\n\tincluded bool\n}\n\ntype Reducer func(input chan ptrval) interface{}\n\ntype processOut struct {\n\tcacheKey string\n\tkey int64\n\tvalue []interface{}\n\terr error\n\tcacheOpaque uint32\n}\n\nfunc (p processOut) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(map[string]interface{}{\"v\": p.value})\n}\n\ntype processIn struct {\n\tcacheKey string\n\tdbname string\n\tkey int64\n\tinfos []*couchstore.DocInfo\n\tnextInfo *couchstore.DocInfo\n\tptrs []string\n\treds []string\n\tbefore time.Time\n\tfilters []string\n\tfiltervals []string\n\tout chan<- *processOut\n}\n\ntype queryIn struct {\n\tdbname string\n\tfrom string\n\tto string\n\tgroup int\n\tptrs []string\n\treds []string\n\tstart time.Time\n\tbefore time.Time\n\tfilters []string\n\tfiltervals []string\n\tstarted int32\n\ttotalKeys int32\n\tout chan *processOut\n\tcherr chan error\n}\n\nfunc processDoc(di *couchstore.DocInfo, chs []chan ptrval,\n\tdoc []byte, ptrs []string,\n\tfilters []string, filtervals []string,\n\tincluded bool) {\n\n\tpv := ptrval{di, nil, included}\n\n\tj := map[string]interface{}{}\n\terr := json.Unmarshal(doc, &j)\n\tif err != nil {\n\t\tfor i := range ptrs {\n\t\t\tchs[i] <- pv\n\t\t}\n\t\treturn\n\t}\n\tfor i, p := range filters {\n\t\tval := jsonpointer.Get(j, p)\n\t\tcheckVal := filtervals[i]\n\t\tswitch val.(type) {\n\t\tcase string:\n\t\t\tif val != checkVal {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase int, uint, int64, float64, uint64, bool:\n\t\t\tv := fmt.Sprintf(\"%v\", val)\n\t\t\tif v != checkVal {\n\t\t\t\treturn\n\t\t\t}\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor i, p := range ptrs {\n\t\tval := jsonpointer.Get(j, p)\n\t\tswitch x := val.(type) {\n\t\tcase string:\n\t\t\tpv.val = &x\n\t\t\tchs[i] <- pv\n\t\tcase int, uint, int64, float64, uint64, bool:\n\t\t\tv := fmt.Sprintf(\"%v\", val)\n\t\t\tpv.val = &v\n\t\t\tchs[i] <- pv\n\t\tdefault:\n\t\t\tlog.Printf(\"Ignoring %T\", val)\n\t\t\tchs[i] <- pv\n\t\t}\n\t}\n}\n\nfunc process_docs(pi *processIn) {\n\n\tresult := processOut{pi.cacheKey, pi.key, nil, nil, 0}\n\n\tdb, err := dbopen(pi.dbname)\n\tif err != nil {\n\t\tresult.err = err\n\t\tpi.out <- &result\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\tchans := make([]chan ptrval, 0, len(pi.ptrs))\n\tresultchs := make([]chan interface{}, 0, len(pi.ptrs))\n\tfor i, r := range pi.reds {\n\t\tchans = append(chans, make(chan ptrval))\n\t\tresultchs = append(resultchs, make(chan interface{}))\n\n\t\tgo func() {\n\t\t\tresultchs[i] <- reducers[r](chans[i])\n\t\t}()\n\t}\n\n\tgo func() {\n\t\tdefer closeAll(chans)\n\n\t\tdodoc := func(di *couchstore.DocInfo, included bool) {\n\t\t\tdoc, err := db.GetFromDocInfo(di)\n\t\t\tif err == nil {\n\t\t\t\tprocessDoc(di, chans, doc.Value(), pi.ptrs,\n\t\t\t\t\tpi.filters, pi.filtervals, included)\n\t\t\t} else {\n\t\t\t\tfor i := range pi.ptrs {\n\t\t\t\t\tchans[i] <- ptrval{di, nil, included}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, di := range pi.infos {\n\t\t\tdodoc(di, true)\n\t\t}\n\t\tif pi.nextInfo != nil {\n\t\t\tdodoc(pi.nextInfo, false)\n\t\t}\n\t}()\n\n\tresults := make([]interface{}, len(pi.ptrs))\n\tfor i := 0; i < len(pi.ptrs); i++ {\n\t\tresults[i] = <-resultchs[i]\n\t\tif f, fok := results[i].(float64); fok && math.IsNaN(f) {\n\t\t\tresults[i] = nil\n\t\t}\n\t}\n\tresult.value = results\n\n\tif result.cacheOpaque == 0 && result.cacheKey != \"\" {\n\t\t\/\/ It's OK if we can't store our newly pulled item in\n\t\t\/\/ the cache, but it's most definitely not OK to stop\n\t\t\/\/ here because of this.\n\t\tselect {\n\t\tcase cacheInputSet <- &result:\n\t\tdefault:\n\t\t}\n\t}\n\tpi.out <- &result\n}\n\nfunc docProcessor(ch <-chan *processIn) {\n\tfor pi := range ch {\n\t\tif time.Now().Before(pi.before) {\n\t\t\tprocess_docs(pi)\n\t\t} else {\n\t\t\tpi.out <- &processOut{\"\", pi.key, nil, timeoutError, 0}\n\t\t}\n\t}\n}\n\nfunc fetchDocs(dbname string, key int64, infos []*couchstore.DocInfo,\n\tnextInfo *couchstore.DocInfo, ptrs []string, reds []string,\n\tfilters []string, filtervals []string,\n\tbefore time.Time, out chan<- *processOut) {\n\n\ti := processIn{\"\", dbname, key, infos, nextInfo,\n\t\tptrs, reds, before, filters, filtervals, out}\n\n\tcacheInput <- &i\n}\n\nfunc runQuery(q *queryIn) {\n\tdb, err := dbopen(q.dbname)\n\tif err != nil {\n\t\tlog.Printf(\"Error opening db: %v - %v\", q.dbname, err)\n\t\tq.cherr <- err\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\tchunk := int64(time.Duration(q.group) * time.Millisecond)\n\n\tinfos := []*couchstore.DocInfo{}\n\tg := int64(0)\n\tnextg := \"\"\n\n\terr = db.Walk(q.from, func(d *couchstore.Couchstore,\n\t\tdi *couchstore.DocInfo) error {\n\t\tkstr := di.ID()\n\t\tvar err error\n\t\tif q.to != \"\" && kstr >= q.to {\n\t\t\terr = couchstore.StopIteration\n\t\t}\n\n\t\tatomic.AddInt32(&q.totalKeys, 1)\n\n\t\tif kstr >= nextg {\n\t\t\tif len(infos) > 0 {\n\t\t\t\tatomic.AddInt32(&q.started, 1)\n\t\t\t\tfetchDocs(q.dbname, g, infos, di,\n\t\t\t\t\tq.ptrs, q.reds, q.filters, q.filtervals,\n\t\t\t\t\tq.before, q.out)\n\n\t\t\t\tinfos = make([]*couchstore.DocInfo, 0, len(infos))\n\t\t\t}\n\n\t\t\tk := parseKey(kstr)\n\t\t\tg = (k \/ chunk) * chunk\n\t\t\tnextgi := g + chunk\n\t\t\tnextgt := time.Unix(nextgi\/1e9, nextgi%1e9).UTC()\n\t\t\tnextg = nextgt.Format(time.RFC3339Nano)\n\t\t}\n\t\tinfos = append(infos, di)\n\n\t\treturn err\n\t})\n\n\tif err == nil && len(infos) > 0 {\n\t\tatomic.AddInt32(&q.started, 1)\n\t\tfetchDocs(q.dbname, g, infos, nil,\n\t\t\tq.ptrs, q.reds, q.filters, q.filtervals,\n\t\t\tq.before, q.out)\n\t}\n\n\tq.cherr <- err\n}\n\nfunc queryExecutor(ch <-chan *queryIn) {\n\tfor q := range ch {\n\t\tif time.Now().Before(q.before) {\n\t\t\trunQuery(q)\n\t\t} else {\n\t\t\tlog.Printf(\"Timed out query that's %v late\",\n\t\t\t\ttime.Since(q.before))\n\t\t\tq.cherr <- timeoutError\n\t\t}\n\t}\n}\n\nfunc executeQuery(dbname, from, to string, group int,\n\tptrs []string, reds []string, filters []string, filtervals []string) *queryIn {\n\n\tnow := time.Now()\n\n\trv := &queryIn{\n\t\tdbname: dbname,\n\t\tfrom: from,\n\t\tto: to,\n\t\tgroup: group,\n\t\tptrs: ptrs,\n\t\treds: reds,\n\t\tstart: now,\n\t\tbefore: now.Add(*queryTimeout),\n\t\tfilters: filters,\n\t\tfiltervals: filtervals,\n\t\tout: make(chan *processOut),\n\t\tcherr: make(chan error),\n\t}\n\tqueryInput <- rv\n\treturn rv\n}\n\nvar processorInput chan *processIn\nvar queryInput chan *queryIn\n\nfunc convertTofloat64(in chan ptrval) chan float64 {\n\tch := make(chan float64)\n\tgo func() {\n\t\tdefer close(ch)\n\t\tfor v := range in {\n\t\t\tif v.included && v.val != nil {\n\t\t\t\tx, err := strconv.ParseFloat(*v.val, 64)\n\t\t\t\tif err == nil {\n\t\t\t\t\tch <- x\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn ch\n}\n\nfunc convertTofloat64Rate(in chan ptrval) chan float64 {\n\tch := make(chan float64)\n\tgo func() {\n\t\tdefer close(ch)\n\t\tvar prevts int64\n\t\tvar preval float64\n\n\t\t\/\/ First, find a part of the stream that has usable data.\n\t\tfor v := range in {\n\t\t\tif v.di != nil && v.val != nil {\n\t\t\t\tx, err := strconv.ParseFloat(*v.val, 64)\n\t\t\t\tif err == nil {\n\t\t\t\t\tprevts = parseKey(v.di.ID())\n\t\t\t\t\tpreval = x\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\/\/ Then emit floats based on deltas from previous values.\n\t\tfor v := range in {\n\t\t\tif v.di != nil && v.val != nil {\n\t\t\t\tx, err := strconv.ParseFloat(*v.val, 64)\n\t\t\t\tif err == nil {\n\t\t\t\t\tthists := parseKey(v.di.ID())\n\n\t\t\t\t\tval := ((x - preval) \/\n\t\t\t\t\t\t(float64(thists-prevts) \/ 1e9))\n\n\t\t\t\t\tif !math.IsNaN(val) {\n\t\t\t\t\t\tch <- val\n\t\t\t\t\t}\n\n\t\t\t\t\tprevts = thists\n\t\t\t\t\tpreval = x\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn ch\n}\n\nvar reducers = map[string]Reducer{\n\t\"identity\": func(input chan ptrval) interface{} {\n\t\trv := []*string{}\n\t\tfor s := range input {\n\t\t\tif s.included {\n\t\t\t\trv = append(rv, s.val)\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n\t\"any\": func(input chan ptrval) interface{} {\n\t\tfor v := range input {\n\t\t\tif v.included && v.val != nil {\n\t\t\t\treturn *v.val\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t},\n\t\"count\": func(input chan ptrval) interface{} {\n\t\trv := 0\n\t\tfor v := range input {\n\t\t\tif v.included && v.val != nil {\n\t\t\t\trv++\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n\t\"sum\": func(input chan ptrval) interface{} {\n\t\trv := float64(0)\n\t\tfor v := range convertTofloat64(input) {\n\t\t\trv += v\n\t\t}\n\t\treturn rv\n\t},\n\t\"sumsq\": func(input chan ptrval) interface{} {\n\t\trv := float64(0)\n\t\tfor v := range convertTofloat64(input) {\n\t\t\trv += (v * v)\n\t\t}\n\t\treturn rv\n\t},\n\t\"max\": func(input chan ptrval) interface{} {\n\t\trv := math.NaN()\n\t\tfor v := range convertTofloat64(input) {\n\t\t\tif v > rv || math.IsNaN(rv) {\n\t\t\t\trv = v\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n\t\"min\": func(input chan ptrval) interface{} {\n\t\trv := math.NaN()\n\t\tfor v := range convertTofloat64(input) {\n\t\t\tif v < rv || math.IsNaN(rv) {\n\t\t\t\trv = v\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n\t\"avg\": func(input chan ptrval) interface{} {\n\t\tnums := float64(0)\n\t\tsum := float64(0)\n\t\tfor v := range convertTofloat64(input) {\n\t\t\tnums++\n\t\t\tsum += v\n\t\t}\n\t\tif nums > 0 {\n\t\t\treturn sum \/ nums\n\t\t}\n\t\treturn math.NaN()\n\t},\n\t\"c_min\": func(input chan ptrval) interface{} {\n\t\trv := math.NaN()\n\t\tfor v := range convertTofloat64Rate(input) {\n\t\t\tif v < rv || math.IsNaN(rv) {\n\t\t\t\trv = v\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n\t\"c_avg\": func(input chan ptrval) interface{} {\n\t\tnums := float64(0)\n\t\tsum := float64(0)\n\t\tfor v := range convertTofloat64Rate(input) {\n\t\t\tnums++\n\t\t\tsum += v\n\t\t}\n\t\tif nums > 0 {\n\t\t\treturn sum \/ nums\n\t\t}\n\t\treturn math.NaN()\n\t},\n\t\"c_max\": func(input chan ptrval) interface{} {\n\t\trv := math.NaN()\n\t\tfor v := range convertTofloat64Rate(input) {\n\t\t\tif v > rv || math.IsNaN(rv) {\n\t\t\t\trv = v\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n}\n<|endoftext|>"} {"text":"<commit_before>package lytics\n\nimport (\n\t\"net\/url\"\n\t\"time\"\n)\n\nconst (\n\tqueryEndpoint = \"query\/:id\"\n\tqueryListEndpoint = \"query\"\n\tqueryTestEndpoint = \"query\/_test\"\n\tqueryValidateEndpoint = \"query\/_validate\"\n)\n\ntype (\n\t\/\/ Query represents an LQL Statement structure\n\tQuery struct {\n\t\tId string `json:\"id\"`\n\t\tCreated time.Time `json:\"created\"`\n\t\tUpdated time.Time `json:\"updated\"`\n\t\tAlias string `json:\"alias\"`\n\t\tTable string `json:\"table\"`\n\t\tFrom string `json:\"from\"`\n\t\tText string `json:\"text\"`\n\t\tFields map[string]QueryField `json:\"fields\"`\n\t}\n\t\/\/ A field in a query\n\t\/\/ - very similar to catalog, query fields create catalog fields\n\tQueryField struct {\n\t\tAs string `json:\"as\"`\n\t\tIsBy bool `json:\"is_by\"`\n\t\tType string `json:\"type\"`\n\t\tShortDesc string `json:\"shortdesc\"`\n\t\tLongDesc string `json:\"longdesc\"`\n\t\tIdentities []string `json:\"identities\"`\n\t\tFroms []string `json:\"froms\"`\n\t}\n)\n\n\/\/ GetQueries returns a list of all queries associated with this account\n\/\/ https:\/\/www.getlytics.com\/developers\/rest-api#query\nfunc (l *Client) GetQueries() ([]Query, error) {\n\tres := ApiResp{}\n\tdata := []Query{}\n\n\t\/\/ make the request\n\terr := l.Get(queryListEndpoint, nil, nil, &res, &data)\n\tif err != nil {\n\t\treturn data, err\n\t}\n\n\treturn data, nil\n}\n\n\/\/ GetQueries returns a list of all queries associated with this account\n\/\/ https:\/\/www.getlytics.com\/developers\/rest-api#query\nfunc (l *Client) GetQuery(alias string) (Query, error) {\n\tres := ApiResp{}\n\tdata := Query{}\n\n\t\/\/ make the request\n\terr := l.Get(parseLyticsURL(queryEndpoint, map[string]string{\"id\": alias}), nil, nil, &res, &data)\n\tif err != nil {\n\t\treturn data, err\n\t}\n\n\treturn data, nil\n}\n\n\/\/ GetQueryTest returns the evaluated entity from given query\n\/\/ https:\/\/www.getlytics.com\/developers\/rest-api#query\nfunc (l *Client) GetQueryTest(qs url.Values, query string) (Entity, error) {\n\tres := ApiResp{}\n\tdata := Entity{}\n\n\t\/\/ make the request\n\terr := l.Post(queryTestEndpoint, qs, query, &res, &data)\n\n\tif err != nil {\n\t\treturn data, err\n\t}\n\n\treturn data, nil\n}\n\n\/\/ PostQueryValidate returns the query and how it is interpreted\n\/\/ https:\/\/www.getlytics.com\/developers\/rest-api#query\nfunc (l *Client) PostQueryValidate(query string) (Query, error) {\n\tres := ApiResp{}\n\tdata := Query{}\n\n\t\/\/ make the request\n\terr := l.PostType(\"text\/plain\", queryValidateEndpoint, nil, query, &res, &data)\n\n\tif err != nil {\n\t\treturn data, err\n\t}\n\n\treturn data, nil\n}\n<commit_msg>Queries now return arrays<commit_after>package lytics\n\nimport (\n\t\"net\/url\"\n\t\"time\"\n)\n\nconst (\n\tqueryEndpoint = \"query\/:id\"\n\tqueryListEndpoint = \"query\"\n\tqueryTestEndpoint = \"query\/_test\"\n\tqueryValidateEndpoint = \"query\/_validate\"\n)\n\ntype (\n\t\/\/ Query represents an LQL Statement structure\n\tQuery struct {\n\t\tId string `json:\"id\"`\n\t\tCreated time.Time `json:\"created\"`\n\t\tUpdated time.Time `json:\"updated\"`\n\t\tAlias string `json:\"alias\"`\n\t\tTable string `json:\"table\"`\n\t\tFrom string `json:\"from\"`\n\t\tText string `json:\"text\"`\n\t\tFields map[string]QueryField `json:\"fields\"`\n\t}\n\t\/\/ A field in a query\n\t\/\/ - very similar to catalog, query fields create catalog fields\n\tQueryField struct {\n\t\tAs string `json:\"as\"`\n\t\tIsBy bool `json:\"is_by\"`\n\t\tType string `json:\"type\"`\n\t\tShortDesc string `json:\"shortdesc\"`\n\t\tLongDesc string `json:\"longdesc\"`\n\t\tIdentities []string `json:\"identities\"`\n\t\tFroms []string `json:\"froms\"`\n\t}\n)\n\n\/\/ GetQueries returns a list of all queries associated with this account\n\/\/ https:\/\/www.getlytics.com\/developers\/rest-api#query\nfunc (l *Client) GetQueries() ([]Query, error) {\n\tres := ApiResp{}\n\tdata := []Query{}\n\n\t\/\/ make the request\n\terr := l.Get(queryListEndpoint, nil, nil, &res, &data)\n\tif err != nil {\n\t\treturn data, err\n\t}\n\n\treturn data, nil\n}\n\n\/\/ GetQueries returns a list of all queries associated with this account\n\/\/ https:\/\/www.getlytics.com\/developers\/rest-api#query\nfunc (l *Client) GetQuery(alias string) (Query, error) {\n\tres := ApiResp{}\n\tdata := Query{}\n\n\t\/\/ make the request\n\terr := l.Get(parseLyticsURL(queryEndpoint, map[string]string{\"id\": alias}), nil, nil, &res, &data)\n\tif err != nil {\n\t\treturn data, err\n\t}\n\n\treturn data, nil\n}\n\n\/\/ GetQueryTest returns the evaluated entity from given query\n\/\/ https:\/\/www.getlytics.com\/developers\/rest-api#query\nfunc (l *Client) GetQueryTest(qs url.Values, query string) (Entity, error) {\n\tres := ApiResp{}\n\tdata := Entity{}\n\n\t\/\/ make the request\n\terr := l.Post(queryTestEndpoint, qs, query, &res, &data)\n\n\tif err != nil {\n\t\treturn data, err\n\t}\n\n\treturn data, nil\n}\n\n\/\/ PostQueryValidate returns the query and how it is interpreted\n\/\/ https:\/\/www.getlytics.com\/developers\/rest-api#query\nfunc (l *Client) PostQueryValidate(query string) ([]Query, error) {\n\tres := ApiResp{}\n\tdata := []Query{}\n\n\t\/\/ make the request\n\terr := l.PostType(\"text\/plain\", queryValidateEndpoint, nil, query, &res, &data)\n\n\tif err != nil {\n\t\treturn data, err\n\t}\n\n\treturn data, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"strconv\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/dustin\/go-couchstore\"\n\t\"github.com\/dustin\/go-jsonpointer\"\n)\n\nvar timeoutError = errors.New(\"query timed out\")\n\ntype ptrval struct {\n\tdi *couchstore.DocInfo\n\tval *string\n\tincluded bool\n}\n\ntype Reducer func(input chan ptrval) interface{}\n\ntype processOut struct {\n\tcacheKey string\n\tkey int64\n\tvalue []interface{}\n\terr error\n\tcacheOpaque uint32\n}\n\nfunc (p processOut) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(map[string]interface{}{\"v\": p.value})\n}\n\ntype processIn struct {\n\tcacheKey string\n\tdbname string\n\tkey int64\n\tinfos []*couchstore.DocInfo\n\tnextInfo *couchstore.DocInfo\n\tptrs []string\n\treds []string\n\tbefore time.Time\n\tfilters []string\n\tfiltervals []string\n\tout chan<- *processOut\n}\n\ntype queryIn struct {\n\tdbname string\n\tfrom string\n\tto string\n\tgroup int\n\tptrs []string\n\treds []string\n\tstart time.Time\n\tbefore time.Time\n\tfilters []string\n\tfiltervals []string\n\tstarted int32\n\ttotalKeys int32\n\tout chan *processOut\n\tcherr chan error\n}\n\nfunc processDoc(di *couchstore.DocInfo, chs []chan ptrval,\n\tdoc []byte, ptrs []string,\n\tfilters []string, filtervals []string,\n\tincluded bool) {\n\n\tpv := ptrval{di, nil, included}\n\n\tj := map[string]interface{}{}\n\terr := json.Unmarshal(doc, &j)\n\tif err != nil {\n\t\tfor i := range ptrs {\n\t\t\tchs[i] <- pv\n\t\t}\n\t\treturn\n\t}\n\tfor i, p := range filters {\n\t\tval := jsonpointer.Get(j, p)\n\t\tcheckVal := filtervals[i]\n\t\tswitch val.(type) {\n\t\tcase string:\n\t\t\tif val != checkVal {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase int, uint, int64, float64, uint64, bool:\n\t\t\tv := fmt.Sprintf(\"%v\", val)\n\t\t\tif v != checkVal {\n\t\t\t\treturn\n\t\t\t}\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor i, p := range ptrs {\n\t\tval := jsonpointer.Get(j, p)\n\t\tswitch x := val.(type) {\n\t\tcase string:\n\t\t\tpv.val = &x\n\t\t\tchs[i] <- pv\n\t\tcase int, uint, int64, float64, uint64, bool:\n\t\t\tv := fmt.Sprintf(\"%v\", val)\n\t\t\tpv.val = &v\n\t\t\tchs[i] <- pv\n\t\tdefault:\n\t\t\tlog.Printf(\"Ignoring %T\", val)\n\t\t\tchs[i] <- pv\n\t\t}\n\t}\n}\n\nfunc process_docs(pi *processIn) {\n\n\tresult := processOut{pi.cacheKey, pi.key, nil, nil, 0}\n\n\tif len(pi.ptrs) == 0 {\n\t\tlog.Panicf(\"No pointers specified in query: %#v\", *pi)\n\t}\n\n\tdb, err := dbopen(pi.dbname)\n\tif err != nil {\n\t\tresult.err = err\n\t\tpi.out <- &result\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\tchans := make([]chan ptrval, 0, len(pi.ptrs))\n\tresultchs := make([]chan interface{}, 0, len(pi.ptrs))\n\tfor i, r := range pi.reds {\n\t\tchans = append(chans, make(chan ptrval))\n\t\tresultchs = append(resultchs, make(chan interface{}))\n\n\t\tgo func(fi int, fr string) {\n\t\t\tresultchs[fi] <- reducers[fr](chans[fi])\n\t\t}(i, r)\n\t}\n\n\tgo func() {\n\t\tdefer closeAll(chans)\n\n\t\tdodoc := func(di *couchstore.DocInfo, included bool) {\n\t\t\tdoc, err := db.GetFromDocInfo(di)\n\t\t\tif err == nil {\n\t\t\t\tprocessDoc(di, chans, doc.Value(), pi.ptrs,\n\t\t\t\t\tpi.filters, pi.filtervals, included)\n\t\t\t} else {\n\t\t\t\tfor i := range pi.ptrs {\n\t\t\t\t\tchans[i] <- ptrval{di, nil, included}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, di := range pi.infos {\n\t\t\tdodoc(di, true)\n\t\t}\n\t\tif pi.nextInfo != nil {\n\t\t\tdodoc(pi.nextInfo, false)\n\t\t}\n\t}()\n\n\tresults := make([]interface{}, len(pi.ptrs))\n\tfor i := 0; i < len(pi.ptrs); i++ {\n\t\tresults[i] = <-resultchs[i]\n\t\tif f, fok := results[i].(float64); fok && math.IsNaN(f) {\n\t\t\tresults[i] = nil\n\t\t}\n\t}\n\tresult.value = results\n\n\tif result.cacheOpaque == 0 && result.cacheKey != \"\" {\n\t\t\/\/ It's OK if we can't store our newly pulled item in\n\t\t\/\/ the cache, but it's most definitely not OK to stop\n\t\t\/\/ here because of this.\n\t\tselect {\n\t\tcase cacheInputSet <- &result:\n\t\tdefault:\n\t\t}\n\t}\n\tpi.out <- &result\n}\n\nfunc docProcessor(ch <-chan *processIn) {\n\tfor pi := range ch {\n\t\tif time.Now().Before(pi.before) {\n\t\t\tprocess_docs(pi)\n\t\t} else {\n\t\t\tpi.out <- &processOut{\"\", pi.key, nil, timeoutError, 0}\n\t\t}\n\t}\n}\n\nfunc fetchDocs(dbname string, key int64, infos []*couchstore.DocInfo,\n\tnextInfo *couchstore.DocInfo, ptrs []string, reds []string,\n\tfilters []string, filtervals []string,\n\tbefore time.Time, out chan<- *processOut) {\n\n\ti := processIn{\"\", dbname, key, infos, nextInfo,\n\t\tptrs, reds, before, filters, filtervals, out}\n\n\tcacheInput <- &i\n}\n\nfunc runQuery(q *queryIn) {\n\tif len(q.ptrs) == 0 {\n\t\tq.cherr <- fmt.Errorf(\"At least one pointer is required\")\n\t\treturn\n\t}\n\n\tdb, err := dbopen(q.dbname)\n\tif err != nil {\n\t\tlog.Printf(\"Error opening db: %v - %v\", q.dbname, err)\n\t\tq.cherr <- err\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\tchunk := int64(time.Duration(q.group) * time.Millisecond)\n\n\tinfos := []*couchstore.DocInfo{}\n\tg := int64(0)\n\tnextg := \"\"\n\n\terr = db.Walk(q.from, func(d *couchstore.Couchstore,\n\t\tdi *couchstore.DocInfo) error {\n\t\tkstr := di.ID()\n\t\tvar err error\n\t\tif q.to != \"\" && kstr >= q.to {\n\t\t\terr = couchstore.StopIteration\n\t\t}\n\n\t\tatomic.AddInt32(&q.totalKeys, 1)\n\n\t\tif kstr >= nextg {\n\t\t\tif len(infos) > 0 {\n\t\t\t\tatomic.AddInt32(&q.started, 1)\n\t\t\t\tfetchDocs(q.dbname, g, infos, di,\n\t\t\t\t\tq.ptrs, q.reds, q.filters, q.filtervals,\n\t\t\t\t\tq.before, q.out)\n\n\t\t\t\tinfos = make([]*couchstore.DocInfo, 0, len(infos))\n\t\t\t}\n\n\t\t\tk := parseKey(kstr)\n\t\t\tg = (k \/ chunk) * chunk\n\t\t\tnextgi := g + chunk\n\t\t\tnextgt := time.Unix(nextgi\/1e9, nextgi%1e9).UTC()\n\t\t\tnextg = nextgt.Format(time.RFC3339Nano)\n\t\t}\n\t\tinfos = append(infos, di)\n\n\t\treturn err\n\t})\n\n\tif err == nil && len(infos) > 0 {\n\t\tatomic.AddInt32(&q.started, 1)\n\t\tfetchDocs(q.dbname, g, infos, nil,\n\t\t\tq.ptrs, q.reds, q.filters, q.filtervals,\n\t\t\tq.before, q.out)\n\t}\n\n\tq.cherr <- err\n}\n\nfunc queryExecutor(ch <-chan *queryIn) {\n\tfor q := range ch {\n\t\tif time.Now().Before(q.before) {\n\t\t\trunQuery(q)\n\t\t} else {\n\t\t\tlog.Printf(\"Timed out query that's %v late\",\n\t\t\t\ttime.Since(q.before))\n\t\t\tq.cherr <- timeoutError\n\t\t}\n\t}\n}\n\nfunc executeQuery(dbname, from, to string, group int,\n\tptrs []string, reds []string, filters []string, filtervals []string) *queryIn {\n\n\tnow := time.Now()\n\n\trv := &queryIn{\n\t\tdbname: dbname,\n\t\tfrom: from,\n\t\tto: to,\n\t\tgroup: group,\n\t\tptrs: ptrs,\n\t\treds: reds,\n\t\tstart: now,\n\t\tbefore: now.Add(*queryTimeout),\n\t\tfilters: filters,\n\t\tfiltervals: filtervals,\n\t\tout: make(chan *processOut),\n\t\tcherr: make(chan error),\n\t}\n\tqueryInput <- rv\n\treturn rv\n}\n\nvar processorInput chan *processIn\nvar queryInput chan *queryIn\n\nfunc convertTofloat64(in chan ptrval) chan float64 {\n\tch := make(chan float64)\n\tgo func() {\n\t\tdefer close(ch)\n\t\tfor v := range in {\n\t\t\tif v.included && v.val != nil {\n\t\t\t\tx, err := strconv.ParseFloat(*v.val, 64)\n\t\t\t\tif err == nil {\n\t\t\t\t\tch <- x\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn ch\n}\n\nfunc convertTofloat64Rate(in chan ptrval) chan float64 {\n\tch := make(chan float64)\n\tgo func() {\n\t\tdefer close(ch)\n\t\tvar prevts int64\n\t\tvar preval float64\n\n\t\t\/\/ First, find a part of the stream that has usable data.\n\t\tfor v := range in {\n\t\t\tif v.di != nil && v.val != nil {\n\t\t\t\tx, err := strconv.ParseFloat(*v.val, 64)\n\t\t\t\tif err == nil {\n\t\t\t\t\tprevts = parseKey(v.di.ID())\n\t\t\t\t\tpreval = x\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\/\/ Then emit floats based on deltas from previous values.\n\t\tfor v := range in {\n\t\t\tif v.di != nil && v.val != nil {\n\t\t\t\tx, err := strconv.ParseFloat(*v.val, 64)\n\t\t\t\tif err == nil {\n\t\t\t\t\tthists := parseKey(v.di.ID())\n\n\t\t\t\t\tval := ((x - preval) \/\n\t\t\t\t\t\t(float64(thists-prevts) \/ 1e9))\n\n\t\t\t\t\tif !math.IsNaN(val) {\n\t\t\t\t\t\tch <- val\n\t\t\t\t\t}\n\n\t\t\t\t\tprevts = thists\n\t\t\t\t\tpreval = x\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn ch\n}\n\nvar reducers = map[string]Reducer{\n\t\"identity\": func(input chan ptrval) interface{} {\n\t\trv := []*string{}\n\t\tfor s := range input {\n\t\t\tif s.included {\n\t\t\t\trv = append(rv, s.val)\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n\t\"any\": func(input chan ptrval) interface{} {\n\t\tfor v := range input {\n\t\t\tif v.included && v.val != nil {\n\t\t\t\treturn *v.val\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t},\n\t\"count\": func(input chan ptrval) interface{} {\n\t\trv := 0\n\t\tfor v := range input {\n\t\t\tif v.included && v.val != nil {\n\t\t\t\trv++\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n\t\"sum\": func(input chan ptrval) interface{} {\n\t\trv := float64(0)\n\t\tfor v := range convertTofloat64(input) {\n\t\t\trv += v\n\t\t}\n\t\treturn rv\n\t},\n\t\"sumsq\": func(input chan ptrval) interface{} {\n\t\trv := float64(0)\n\t\tfor v := range convertTofloat64(input) {\n\t\t\trv += (v * v)\n\t\t}\n\t\treturn rv\n\t},\n\t\"max\": func(input chan ptrval) interface{} {\n\t\trv := math.NaN()\n\t\tfor v := range convertTofloat64(input) {\n\t\t\tif v > rv || math.IsNaN(rv) {\n\t\t\t\trv = v\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n\t\"min\": func(input chan ptrval) interface{} {\n\t\trv := math.NaN()\n\t\tfor v := range convertTofloat64(input) {\n\t\t\tif v < rv || math.IsNaN(rv) {\n\t\t\t\trv = v\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n\t\"avg\": func(input chan ptrval) interface{} {\n\t\tnums := float64(0)\n\t\tsum := float64(0)\n\t\tfor v := range convertTofloat64(input) {\n\t\t\tnums++\n\t\t\tsum += v\n\t\t}\n\t\tif nums > 0 {\n\t\t\treturn sum \/ nums\n\t\t}\n\t\treturn math.NaN()\n\t},\n\t\"c_min\": func(input chan ptrval) interface{} {\n\t\trv := math.NaN()\n\t\tfor v := range convertTofloat64Rate(input) {\n\t\t\tif v < rv || math.IsNaN(rv) {\n\t\t\t\trv = v\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n\t\"c_avg\": func(input chan ptrval) interface{} {\n\t\tnums := float64(0)\n\t\tsum := float64(0)\n\t\tfor v := range convertTofloat64Rate(input) {\n\t\t\tnums++\n\t\t\tsum += v\n\t\t}\n\t\tif nums > 0 {\n\t\t\treturn sum \/ nums\n\t\t}\n\t\treturn math.NaN()\n\t},\n\t\"c_max\": func(input chan ptrval) interface{} {\n\t\trv := math.NaN()\n\t\tfor v := range convertTofloat64Rate(input) {\n\t\t\tif v > rv || math.IsNaN(rv) {\n\t\t\t\trv = v\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n}\n<commit_msg>Use new streaming JSON pointer impl.<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"strconv\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/dustin\/go-couchstore\"\n\t\"github.com\/dustin\/go-jsonpointer\"\n)\n\nvar timeoutError = errors.New(\"query timed out\")\n\ntype ptrval struct {\n\tdi *couchstore.DocInfo\n\tval *string\n\tincluded bool\n}\n\ntype Reducer func(input chan ptrval) interface{}\n\ntype processOut struct {\n\tcacheKey string\n\tkey int64\n\tvalue []interface{}\n\terr error\n\tcacheOpaque uint32\n}\n\nfunc (p processOut) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(map[string]interface{}{\"v\": p.value})\n}\n\ntype processIn struct {\n\tcacheKey string\n\tdbname string\n\tkey int64\n\tinfos []*couchstore.DocInfo\n\tnextInfo *couchstore.DocInfo\n\tptrs []string\n\treds []string\n\tbefore time.Time\n\tfilters []string\n\tfiltervals []string\n\tout chan<- *processOut\n}\n\ntype queryIn struct {\n\tdbname string\n\tfrom string\n\tto string\n\tgroup int\n\tptrs []string\n\treds []string\n\tstart time.Time\n\tbefore time.Time\n\tfilters []string\n\tfiltervals []string\n\tstarted int32\n\ttotalKeys int32\n\tout chan *processOut\n\tcherr chan error\n}\n\nfunc pget(j []byte, s string) (rv interface{}) {\n\tb, err := jsonpointer.Find(j, s)\n\tif err != nil {\n\t\treturn nil\n\t}\n\terr = json.Unmarshal(b, &rv)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn\n}\n\nfunc processDoc(di *couchstore.DocInfo, chs []chan ptrval,\n\tdoc []byte, ptrs []string,\n\tfilters []string, filtervals []string,\n\tincluded bool) {\n\n\tpv := ptrval{di, nil, included}\n\n\tfor i, p := range filters {\n\t\tval := pget(doc, p)\n\t\tcheckVal := filtervals[i]\n\t\tswitch val.(type) {\n\t\tcase string:\n\t\t\tif val != checkVal {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase int, uint, int64, float64, uint64, bool:\n\t\t\tv := fmt.Sprintf(\"%v\", val)\n\t\t\tif v != checkVal {\n\t\t\t\treturn\n\t\t\t}\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor i, p := range ptrs {\n\t\tval := pget(doc, p)\n\t\tswitch x := val.(type) {\n\t\tcase string:\n\t\t\tpv.val = &x\n\t\t\tchs[i] <- pv\n\t\tcase int, uint, int64, float64, uint64, bool:\n\t\t\tv := fmt.Sprintf(\"%v\", val)\n\t\t\tpv.val = &v\n\t\t\tchs[i] <- pv\n\t\tdefault:\n\t\t\tlog.Printf(\"Ignoring %T\", val)\n\t\t\tchs[i] <- pv\n\t\t}\n\t}\n}\n\nfunc process_docs(pi *processIn) {\n\n\tresult := processOut{pi.cacheKey, pi.key, nil, nil, 0}\n\n\tif len(pi.ptrs) == 0 {\n\t\tlog.Panicf(\"No pointers specified in query: %#v\", *pi)\n\t}\n\n\tdb, err := dbopen(pi.dbname)\n\tif err != nil {\n\t\tresult.err = err\n\t\tpi.out <- &result\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\tchans := make([]chan ptrval, 0, len(pi.ptrs))\n\tresultchs := make([]chan interface{}, 0, len(pi.ptrs))\n\tfor i, r := range pi.reds {\n\t\tchans = append(chans, make(chan ptrval))\n\t\tresultchs = append(resultchs, make(chan interface{}))\n\n\t\tgo func(fi int, fr string) {\n\t\t\tresultchs[fi] <- reducers[fr](chans[fi])\n\t\t}(i, r)\n\t}\n\n\tgo func() {\n\t\tdefer closeAll(chans)\n\n\t\tdodoc := func(di *couchstore.DocInfo, included bool) {\n\t\t\tdoc, err := db.GetFromDocInfo(di)\n\t\t\tif err == nil {\n\t\t\t\tprocessDoc(di, chans, doc.Value(), pi.ptrs,\n\t\t\t\t\tpi.filters, pi.filtervals, included)\n\t\t\t} else {\n\t\t\t\tfor i := range pi.ptrs {\n\t\t\t\t\tchans[i] <- ptrval{di, nil, included}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, di := range pi.infos {\n\t\t\tdodoc(di, true)\n\t\t}\n\t\tif pi.nextInfo != nil {\n\t\t\tdodoc(pi.nextInfo, false)\n\t\t}\n\t}()\n\n\tresults := make([]interface{}, len(pi.ptrs))\n\tfor i := 0; i < len(pi.ptrs); i++ {\n\t\tresults[i] = <-resultchs[i]\n\t\tif f, fok := results[i].(float64); fok && math.IsNaN(f) {\n\t\t\tresults[i] = nil\n\t\t}\n\t}\n\tresult.value = results\n\n\tif result.cacheOpaque == 0 && result.cacheKey != \"\" {\n\t\t\/\/ It's OK if we can't store our newly pulled item in\n\t\t\/\/ the cache, but it's most definitely not OK to stop\n\t\t\/\/ here because of this.\n\t\tselect {\n\t\tcase cacheInputSet <- &result:\n\t\tdefault:\n\t\t}\n\t}\n\tpi.out <- &result\n}\n\nfunc docProcessor(ch <-chan *processIn) {\n\tfor pi := range ch {\n\t\tif time.Now().Before(pi.before) {\n\t\t\tprocess_docs(pi)\n\t\t} else {\n\t\t\tpi.out <- &processOut{\"\", pi.key, nil, timeoutError, 0}\n\t\t}\n\t}\n}\n\nfunc fetchDocs(dbname string, key int64, infos []*couchstore.DocInfo,\n\tnextInfo *couchstore.DocInfo, ptrs []string, reds []string,\n\tfilters []string, filtervals []string,\n\tbefore time.Time, out chan<- *processOut) {\n\n\ti := processIn{\"\", dbname, key, infos, nextInfo,\n\t\tptrs, reds, before, filters, filtervals, out}\n\n\tcacheInput <- &i\n}\n\nfunc runQuery(q *queryIn) {\n\tif len(q.ptrs) == 0 {\n\t\tq.cherr <- fmt.Errorf(\"At least one pointer is required\")\n\t\treturn\n\t}\n\n\tdb, err := dbopen(q.dbname)\n\tif err != nil {\n\t\tlog.Printf(\"Error opening db: %v - %v\", q.dbname, err)\n\t\tq.cherr <- err\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\tchunk := int64(time.Duration(q.group) * time.Millisecond)\n\n\tinfos := []*couchstore.DocInfo{}\n\tg := int64(0)\n\tnextg := \"\"\n\n\terr = db.Walk(q.from, func(d *couchstore.Couchstore,\n\t\tdi *couchstore.DocInfo) error {\n\t\tkstr := di.ID()\n\t\tvar err error\n\t\tif q.to != \"\" && kstr >= q.to {\n\t\t\terr = couchstore.StopIteration\n\t\t}\n\n\t\tatomic.AddInt32(&q.totalKeys, 1)\n\n\t\tif kstr >= nextg {\n\t\t\tif len(infos) > 0 {\n\t\t\t\tatomic.AddInt32(&q.started, 1)\n\t\t\t\tfetchDocs(q.dbname, g, infos, di,\n\t\t\t\t\tq.ptrs, q.reds, q.filters, q.filtervals,\n\t\t\t\t\tq.before, q.out)\n\n\t\t\t\tinfos = make([]*couchstore.DocInfo, 0, len(infos))\n\t\t\t}\n\n\t\t\tk := parseKey(kstr)\n\t\t\tg = (k \/ chunk) * chunk\n\t\t\tnextgi := g + chunk\n\t\t\tnextgt := time.Unix(nextgi\/1e9, nextgi%1e9).UTC()\n\t\t\tnextg = nextgt.Format(time.RFC3339Nano)\n\t\t}\n\t\tinfos = append(infos, di)\n\n\t\treturn err\n\t})\n\n\tif err == nil && len(infos) > 0 {\n\t\tatomic.AddInt32(&q.started, 1)\n\t\tfetchDocs(q.dbname, g, infos, nil,\n\t\t\tq.ptrs, q.reds, q.filters, q.filtervals,\n\t\t\tq.before, q.out)\n\t}\n\n\tq.cherr <- err\n}\n\nfunc queryExecutor(ch <-chan *queryIn) {\n\tfor q := range ch {\n\t\tif time.Now().Before(q.before) {\n\t\t\trunQuery(q)\n\t\t} else {\n\t\t\tlog.Printf(\"Timed out query that's %v late\",\n\t\t\t\ttime.Since(q.before))\n\t\t\tq.cherr <- timeoutError\n\t\t}\n\t}\n}\n\nfunc executeQuery(dbname, from, to string, group int,\n\tptrs []string, reds []string, filters []string, filtervals []string) *queryIn {\n\n\tnow := time.Now()\n\n\trv := &queryIn{\n\t\tdbname: dbname,\n\t\tfrom: from,\n\t\tto: to,\n\t\tgroup: group,\n\t\tptrs: ptrs,\n\t\treds: reds,\n\t\tstart: now,\n\t\tbefore: now.Add(*queryTimeout),\n\t\tfilters: filters,\n\t\tfiltervals: filtervals,\n\t\tout: make(chan *processOut),\n\t\tcherr: make(chan error),\n\t}\n\tqueryInput <- rv\n\treturn rv\n}\n\nvar processorInput chan *processIn\nvar queryInput chan *queryIn\n\nfunc convertTofloat64(in chan ptrval) chan float64 {\n\tch := make(chan float64)\n\tgo func() {\n\t\tdefer close(ch)\n\t\tfor v := range in {\n\t\t\tif v.included && v.val != nil {\n\t\t\t\tx, err := strconv.ParseFloat(*v.val, 64)\n\t\t\t\tif err == nil {\n\t\t\t\t\tch <- x\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn ch\n}\n\nfunc convertTofloat64Rate(in chan ptrval) chan float64 {\n\tch := make(chan float64)\n\tgo func() {\n\t\tdefer close(ch)\n\t\tvar prevts int64\n\t\tvar preval float64\n\n\t\t\/\/ First, find a part of the stream that has usable data.\n\t\tfor v := range in {\n\t\t\tif v.di != nil && v.val != nil {\n\t\t\t\tx, err := strconv.ParseFloat(*v.val, 64)\n\t\t\t\tif err == nil {\n\t\t\t\t\tprevts = parseKey(v.di.ID())\n\t\t\t\t\tpreval = x\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\/\/ Then emit floats based on deltas from previous values.\n\t\tfor v := range in {\n\t\t\tif v.di != nil && v.val != nil {\n\t\t\t\tx, err := strconv.ParseFloat(*v.val, 64)\n\t\t\t\tif err == nil {\n\t\t\t\t\tthists := parseKey(v.di.ID())\n\n\t\t\t\t\tval := ((x - preval) \/\n\t\t\t\t\t\t(float64(thists-prevts) \/ 1e9))\n\n\t\t\t\t\tif !math.IsNaN(val) {\n\t\t\t\t\t\tch <- val\n\t\t\t\t\t}\n\n\t\t\t\t\tprevts = thists\n\t\t\t\t\tpreval = x\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn ch\n}\n\nvar reducers = map[string]Reducer{\n\t\"identity\": func(input chan ptrval) interface{} {\n\t\trv := []*string{}\n\t\tfor s := range input {\n\t\t\tif s.included {\n\t\t\t\trv = append(rv, s.val)\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n\t\"any\": func(input chan ptrval) interface{} {\n\t\tfor v := range input {\n\t\t\tif v.included && v.val != nil {\n\t\t\t\treturn *v.val\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t},\n\t\"count\": func(input chan ptrval) interface{} {\n\t\trv := 0\n\t\tfor v := range input {\n\t\t\tif v.included && v.val != nil {\n\t\t\t\trv++\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n\t\"sum\": func(input chan ptrval) interface{} {\n\t\trv := float64(0)\n\t\tfor v := range convertTofloat64(input) {\n\t\t\trv += v\n\t\t}\n\t\treturn rv\n\t},\n\t\"sumsq\": func(input chan ptrval) interface{} {\n\t\trv := float64(0)\n\t\tfor v := range convertTofloat64(input) {\n\t\t\trv += (v * v)\n\t\t}\n\t\treturn rv\n\t},\n\t\"max\": func(input chan ptrval) interface{} {\n\t\trv := math.NaN()\n\t\tfor v := range convertTofloat64(input) {\n\t\t\tif v > rv || math.IsNaN(rv) {\n\t\t\t\trv = v\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n\t\"min\": func(input chan ptrval) interface{} {\n\t\trv := math.NaN()\n\t\tfor v := range convertTofloat64(input) {\n\t\t\tif v < rv || math.IsNaN(rv) {\n\t\t\t\trv = v\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n\t\"avg\": func(input chan ptrval) interface{} {\n\t\tnums := float64(0)\n\t\tsum := float64(0)\n\t\tfor v := range convertTofloat64(input) {\n\t\t\tnums++\n\t\t\tsum += v\n\t\t}\n\t\tif nums > 0 {\n\t\t\treturn sum \/ nums\n\t\t}\n\t\treturn math.NaN()\n\t},\n\t\"c_min\": func(input chan ptrval) interface{} {\n\t\trv := math.NaN()\n\t\tfor v := range convertTofloat64Rate(input) {\n\t\t\tif v < rv || math.IsNaN(rv) {\n\t\t\t\trv = v\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n\t\"c_avg\": func(input chan ptrval) interface{} {\n\t\tnums := float64(0)\n\t\tsum := float64(0)\n\t\tfor v := range convertTofloat64Rate(input) {\n\t\t\tnums++\n\t\t\tsum += v\n\t\t}\n\t\tif nums > 0 {\n\t\t\treturn sum \/ nums\n\t\t}\n\t\treturn math.NaN()\n\t},\n\t\"c_max\": func(input chan ptrval) interface{} {\n\t\trv := math.NaN()\n\t\tfor v := range convertTofloat64Rate(input) {\n\t\t\tif v > rv || math.IsNaN(rv) {\n\t\t\t\trv = v\n\t\t\t}\n\t\t}\n\t\treturn rv\n\t},\n}\n<|endoftext|>"} {"text":"<commit_before>package GoSDK\n\nimport ()\n\n\/\/ Filter is the atomic structure inside a query it contains\n\/\/ A field a value and an operator\ntype Filter struct {\n\tField string\n\tValue interface{}\n\tOperator string\n}\n\ntype Ordering struct {\n\tSortOrder bool\n\tOrderKey string\n}\n\ntype Query struct {\n\tFilters [][]Filter\n\tPageSize int\n\tPageNumber int\n\tOrder []Ordering\n}\n\nfunc NewQuery() *Query {\n\tquery := &Query{\n\t\tFilters: [][]Filter{[]Filter{}},\n\t\tOrder: []Ordering{},\n\t}\n\treturn query\n}\n\nfunc (q *Query) EqualTo(field string, value interface{}) {\n\tf := Filter{\n\t\tField: field,\n\t\tValue: value,\n\t\tOperator: \"=\",\n\t}\n\tq.Filters[0] = append(q.Filters[0], f)\n}\n\nfunc (q *Query) GreaterThan(field string, value interface{}) {\n\tf := Filter{\n\t\tField: field,\n\t\tValue: value,\n\t\tOperator: \">\",\n\t}\n\tq.Filters[0] = append(q.Filters[0], f)\n}\n\nfunc (q *Query) GreaterThanEqualTo(field string, value interface{}) {\n\tf := Filter{\n\t\tField: field,\n\t\tValue: value,\n\t\tOperator: \">=\",\n\t}\n\tq.Filters[0] = append(q.Filters[0], f)\n}\n\nfunc (q *Query) LessThan(field string, value interface{}) {\n\tf := Filter{\n\t\tField: field,\n\t\tValue: value,\n\t\tOperator: \"<\",\n\t}\n\tq.Filters[0] = append(q.Filters[0], f)\n}\n\nfunc (q *Query) LessThanEqualTo(field string, value interface{}) {\n\tf := Filter{\n\t\tField: field,\n\t\tValue: value,\n\t\tOperator: \"<=\",\n\t}\n\tq.Filters[0] = append(q.Filters[0], f)\n}\n\nfunc (q *Query) NotEqualTo(field string, value interface{}) {\n\tf := Filter{\n\t\tField: field,\n\t\tValue: value,\n\t\tOperator: \"!=\",\n\t}\n\tq.Filters[0] = append(q.Filters[0], f)\n}\n\nfunc (q *Query) Matches(field, regex string) {\n\tf := Filter{\n\t\tField: field,\n\t\tValue: regex,\n\t\tOperator: \"~\",\n\t}\n\tq.Filters[0] = append(q.Filters[0], f)\n}\n\nfunc (q *Query) Or(orQuery *Query) {\n\tq.Filters = append(q.Filters, orQuery.Filters...)\n}\n\n\/\/ Map will produce the kind of thing that is sent as a query\n\/\/ either as the body of a request or as a queryString\nfunc (q *Query) serialize() map[string]interface{} {\n\tqrMap := make(map[string]interface{})\n\tqrMap[\"PAGENUM\"] = q.PageNumber\n\tqrMap[\"PAGESIZE\"] = q.PageSize\n\tsortMap := make([]map[string]interface{}, len(q.Order))\n\tfor i, ordering := range q.Order {\n\t\tsortMap[i] = make(map[string]interface{})\n\t\tif ordering.SortOrder {\n\t\t\tsortMap[i][\"ASC\"] = ordering.OrderKey\n\t\t} else {\n\t\t\tsortMap[i][\"DESC\"] = ordering.OrderKey\n\t\t}\n\t}\n\tqrMap[\"SORT\"] = sortMap\n\tfilterSlice := make([][]map[string]interface{}, len(q.Filters))\n\tfor i, querySlice := range q.Filters {\n\t\tqm := make([]map[string]interface{}, len(querySlice))\n\t\tfor j, query := range querySlice {\n\t\t\tmapForQuery := make(map[string]interface{})\n\t\t\tvar op string\n\t\t\tswitch query.Operator {\n\t\t\tcase \"=\":\n\t\t\t\top = \"EQ\"\n\t\t\tcase \">\":\n\t\t\t\top = \"GT\"\n\t\t\tcase \"<\":\n\t\t\t\top = \"LT\"\n\t\t\tcase \">=\":\n\t\t\t\top = \"GTE\"\n\t\t\tcase \"<=\":\n\t\t\t\top = \"LTE\"\n\t\t\tcase \"\/=\":\n\t\t\t\top = \"NEQ\"\n\t\t\tcase \"~\":\n\t\t\t\top = \"RE\"\n\t\t\tdefault:\n\t\t\t\top = \"EQ\"\n\t\t\t}\n\t\t\tmapForQuery[op] = []map[string]interface{}{map[string]interface{}{query.Field: query.Value}}\n\t\t\tqm[j] = mapForQuery\n\t\t}\n\t\tfilterSlice[i] = qm\n\t}\n\tqrMap[\"FILTERS\"] = filterSlice\n\treturn qrMap\n}\n<commit_msg>Add changes to allow selecting of specific fields<commit_after>package GoSDK\n\nimport ()\n\n\/\/ Filter is the atomic structure inside a query it contains\n\/\/ A field a value and an operator\ntype Filter struct {\n\tField string\n\tValue interface{}\n\tOperator string\n}\n\ntype Ordering struct {\n\tSortOrder bool\n\tOrderKey string\n}\n\ntype Query struct {\n\tFilters [][]Filter\n\tPageSize int\n\tPageNumber int\n\tOrder []Ordering\n\tColumns []string\n}\n\nfunc NewQuery() *Query {\n\tquery := &Query{\n\t\tFilters: [][]Filter{[]Filter{}},\n\t\tOrder: []Ordering{},\n\t}\n\treturn query\n}\n\nfunc (q *Query) EqualTo(field string, value interface{}) {\n\tf := Filter{\n\t\tField: field,\n\t\tValue: value,\n\t\tOperator: \"=\",\n\t}\n\tq.Filters[0] = append(q.Filters[0], f)\n}\n\nfunc (q *Query) GreaterThan(field string, value interface{}) {\n\tf := Filter{\n\t\tField: field,\n\t\tValue: value,\n\t\tOperator: \">\",\n\t}\n\tq.Filters[0] = append(q.Filters[0], f)\n}\n\nfunc (q *Query) GreaterThanEqualTo(field string, value interface{}) {\n\tf := Filter{\n\t\tField: field,\n\t\tValue: value,\n\t\tOperator: \">=\",\n\t}\n\tq.Filters[0] = append(q.Filters[0], f)\n}\n\nfunc (q *Query) LessThan(field string, value interface{}) {\n\tf := Filter{\n\t\tField: field,\n\t\tValue: value,\n\t\tOperator: \"<\",\n\t}\n\tq.Filters[0] = append(q.Filters[0], f)\n}\n\nfunc (q *Query) LessThanEqualTo(field string, value interface{}) {\n\tf := Filter{\n\t\tField: field,\n\t\tValue: value,\n\t\tOperator: \"<=\",\n\t}\n\tq.Filters[0] = append(q.Filters[0], f)\n}\n\nfunc (q *Query) NotEqualTo(field string, value interface{}) {\n\tf := Filter{\n\t\tField: field,\n\t\tValue: value,\n\t\tOperator: \"!=\",\n\t}\n\tq.Filters[0] = append(q.Filters[0], f)\n}\n\nfunc (q *Query) Matches(field, regex string) {\n\tf := Filter{\n\t\tField: field,\n\t\tValue: regex,\n\t\tOperator: \"~\",\n\t}\n\tq.Filters[0] = append(q.Filters[0], f)\n}\n\nfunc (q *Query) Or(orQuery *Query) {\n\tq.Filters = append(q.Filters, orQuery.Filters...)\n}\n\n\/\/ Map will produce the kind of thing that is sent as a query\n\/\/ either as the body of a request or as a queryString\nfunc (q *Query) serialize() map[string]interface{} {\n\tqrMap := make(map[string]interface{})\n\tqrMap[\"PAGENUM\"] = q.PageNumber\n\tqrMap[\"PAGESIZE\"] = q.PageSize\n\tqrMap[\"SELECTCOLUMNS\"] = q.Columns\n\tsortMap := make([]map[string]interface{}, len(q.Order))\n\tfor i, ordering := range q.Order {\n\t\tsortMap[i] = make(map[string]interface{})\n\t\tif ordering.SortOrder {\n\t\t\tsortMap[i][\"ASC\"] = ordering.OrderKey\n\t\t} else {\n\t\t\tsortMap[i][\"DESC\"] = ordering.OrderKey\n\t\t}\n\t}\n\tqrMap[\"SORT\"] = sortMap\n\tfilterSlice := make([][]map[string]interface{}, len(q.Filters))\n\tfor i, querySlice := range q.Filters {\n\t\tqm := make([]map[string]interface{}, len(querySlice))\n\t\tfor j, query := range querySlice {\n\t\t\tmapForQuery := make(map[string]interface{})\n\t\t\tvar op string\n\t\t\tswitch query.Operator {\n\t\t\tcase \"=\":\n\t\t\t\top = \"EQ\"\n\t\t\tcase \">\":\n\t\t\t\top = \"GT\"\n\t\t\tcase \"<\":\n\t\t\t\top = \"LT\"\n\t\t\tcase \">=\":\n\t\t\t\top = \"GTE\"\n\t\t\tcase \"<=\":\n\t\t\t\top = \"LTE\"\n\t\t\tcase \"\/=\":\n\t\t\t\top = \"NEQ\"\n\t\t\tcase \"~\":\n\t\t\t\top = \"RE\"\n\t\t\tdefault:\n\t\t\t\top = \"EQ\"\n\t\t\t}\n\t\t\tmapForQuery[op] = []map[string]interface{}{map[string]interface{}{query.Field: query.Value}}\n\t\t\tqm[j] = mapForQuery\n\t\t}\n\t\tfilterSlice[i] = qm\n\t}\n\tqrMap[\"FILTERS\"] = filterSlice\n\treturn qrMap\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Copyright (c) 2013 Matt Jibson <matt.jibson@gmail.com>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n *\/\n\npackage miniprofiler_gae\n\nimport (\n\t\"appengine\"\n\t\"appengine\/memcache\"\n\t\"appengine\/user\"\n\t\"appengine_internal\"\n\t\"fmt\"\n\t\"github.com\/mjibson\/MiniProfiler\/go\/miniprofiler\"\n\t\"github.com\/mjibson\/appstats\"\n\t\"net\/http\"\n)\n\nfunc init() {\n\tminiprofiler.Enable = EnableIfAdminOrDev\n\tminiprofiler.Get = GetMemcache\n\tminiprofiler.Store = StoreMemcache\n\tminiprofiler.MachineName = Instance\n}\n\n\/\/ EnableIfAdminOrDev returns true if this is the dev server or the current\n\/\/ user is an admin. This is the default for miniprofiler.Enable.\nfunc EnableIfAdminOrDev(r *http.Request) bool {\n\tif appengine.IsDevAppServer() {\n\t\treturn true\n\t}\n\tc := appengine.NewContext(r)\n\tif u := user.Current(c); u != nil {\n\t\treturn u.Admin\n\t}\n\treturn false\n}\n\n\/\/ Instance returns the app engine instance id, or the hostname on dev.\n\/\/ This is the default for miniprofiler.MachineName.\nfunc Instance() string {\n\tif i := appengine.InstanceID(); i != \"\" {\n\t\treturn i[len(i)-8:]\n\t}\n\treturn miniprofiler.Hostname()\n}\n\n\/\/ StoreMemcache stores the Profile in memcache. This is the default for\n\/\/ miniprofiler.Store.\nfunc StoreMemcache(r *http.Request, p *miniprofiler.Profile) {\n\titem := &memcache.Item{\n\t\tKey: mp_key(string(p.Id)),\n\t\tValue: p.Json(),\n\t}\n\tc := appengine.NewContext(r)\n\tmemcache.Set(c, item)\n}\n\n\/\/ GetMemcache gets the Profile from memcache. This is the default for\n\/\/ miniprofiler.Get.\nfunc GetMemcache(r *http.Request, id string) *miniprofiler.Profile {\n\tc := appengine.NewContext(r)\n\titem, err := memcache.Get(c, mp_key(id))\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn miniprofiler.ProfileFromJson(item.Value)\n}\n\ntype Context struct {\n\tappstats.Context\n\tP *miniprofiler.Profile\n}\n\nfunc (c Context) Call(service, method string, in, out appengine_internal.ProtoMessage, opts *appengine_internal.CallOptions) error {\n\terr := c.Context.Call(service, method, in, out, opts)\n\tif service == \"__go__\" {\n\t\tv := c.Context.Stats.RPCStats[len(c.Context.Stats.RPCStats)-1]\n\t\tc.P.AddCustomTiming(\"RPC\", &miniprofiler.CustomTiming{\n\t\t\tStartMilliseconds: float64(v.Offset.Nanoseconds()) \/ 1000000,\n\t\t\tDurationMilliseconds: float64(v.Duration.Nanoseconds()) \/ 1000000,\n\t\t})\n\t}\n\treturn err\n}\n\n\/\/ NewHandler returns a profiled, appstats-aware appengine.Context.\nfunc NewHandler(f func(Context, http.ResponseWriter, *http.Request)) appstats.Handler {\n\treturn appstats.NewHandler(func(c appengine.Context, w http.ResponseWriter, r *http.Request) {\n\t\tpc := Context{\n\t\t\tContext: c.(appstats.Context),\n\t\t}\n\t\tpc.P = miniprofiler.NewProfile(w, r, miniprofiler.FuncName(f))\n\t\tf(pc, w, r)\n\n\t\tif pc.P.Root != nil {\n\t\t\tpc.P.CustomLink = pc.URL()\n\t\t\tpc.P.CustomLinkName = \"appstats\"\n\t\t\tpc.P.Finalize()\n\t\t}\n\t})\n}\n\nfunc mp_key(id string) string {\n\treturn fmt.Sprintf(\"mini-profiler-results:%s\", id)\n}\n<commit_msg>Oops, fix compare<commit_after>\/*\n * Copyright (c) 2013 Matt Jibson <matt.jibson@gmail.com>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n *\/\n\npackage miniprofiler_gae\n\nimport (\n\t\"appengine\"\n\t\"appengine\/memcache\"\n\t\"appengine\/user\"\n\t\"appengine_internal\"\n\t\"fmt\"\n\t\"github.com\/mjibson\/MiniProfiler\/go\/miniprofiler\"\n\t\"github.com\/mjibson\/appstats\"\n\t\"net\/http\"\n)\n\nfunc init() {\n\tminiprofiler.Enable = EnableIfAdminOrDev\n\tminiprofiler.Get = GetMemcache\n\tminiprofiler.Store = StoreMemcache\n\tminiprofiler.MachineName = Instance\n}\n\n\/\/ EnableIfAdminOrDev returns true if this is the dev server or the current\n\/\/ user is an admin. This is the default for miniprofiler.Enable.\nfunc EnableIfAdminOrDev(r *http.Request) bool {\n\tif appengine.IsDevAppServer() {\n\t\treturn true\n\t}\n\tc := appengine.NewContext(r)\n\tif u := user.Current(c); u != nil {\n\t\treturn u.Admin\n\t}\n\treturn false\n}\n\n\/\/ Instance returns the app engine instance id, or the hostname on dev.\n\/\/ This is the default for miniprofiler.MachineName.\nfunc Instance() string {\n\tif i := appengine.InstanceID(); i != \"\" {\n\t\treturn i[len(i)-8:]\n\t}\n\treturn miniprofiler.Hostname()\n}\n\n\/\/ StoreMemcache stores the Profile in memcache. This is the default for\n\/\/ miniprofiler.Store.\nfunc StoreMemcache(r *http.Request, p *miniprofiler.Profile) {\n\titem := &memcache.Item{\n\t\tKey: mp_key(string(p.Id)),\n\t\tValue: p.Json(),\n\t}\n\tc := appengine.NewContext(r)\n\tmemcache.Set(c, item)\n}\n\n\/\/ GetMemcache gets the Profile from memcache. This is the default for\n\/\/ miniprofiler.Get.\nfunc GetMemcache(r *http.Request, id string) *miniprofiler.Profile {\n\tc := appengine.NewContext(r)\n\titem, err := memcache.Get(c, mp_key(id))\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn miniprofiler.ProfileFromJson(item.Value)\n}\n\ntype Context struct {\n\tappstats.Context\n\tP *miniprofiler.Profile\n}\n\nfunc (c Context) Call(service, method string, in, out appengine_internal.ProtoMessage, opts *appengine_internal.CallOptions) error {\n\terr := c.Context.Call(service, method, in, out, opts)\n\tif service != \"__go__\" {\n\t\tv := c.Context.Stats.RPCStats[len(c.Context.Stats.RPCStats)-1]\n\t\tc.P.AddCustomTiming(\"RPC\", &miniprofiler.CustomTiming{\n\t\t\tStartMilliseconds: float64(v.Offset.Nanoseconds()) \/ 1000000,\n\t\t\tDurationMilliseconds: float64(v.Duration.Nanoseconds()) \/ 1000000,\n\t\t})\n\t}\n\treturn err\n}\n\n\/\/ NewHandler returns a profiled, appstats-aware appengine.Context.\nfunc NewHandler(f func(Context, http.ResponseWriter, *http.Request)) appstats.Handler {\n\treturn appstats.NewHandler(func(c appengine.Context, w http.ResponseWriter, r *http.Request) {\n\t\tpc := Context{\n\t\t\tContext: c.(appstats.Context),\n\t\t}\n\t\tpc.P = miniprofiler.NewProfile(w, r, miniprofiler.FuncName(f))\n\t\tf(pc, w, r)\n\n\t\tif pc.P.Root != nil {\n\t\t\tpc.P.CustomLink = pc.URL()\n\t\t\tpc.P.CustomLinkName = \"appstats\"\n\t\t\tpc.P.Finalize()\n\t\t}\n\t})\n}\n\nfunc mp_key(id string) string {\n\treturn fmt.Sprintf(\"mini-profiler-results:%s\", id)\n}\n<|endoftext|>"} {"text":"<commit_before>package bongo\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/jinzhu\/gorm\"\n)\n\nfunc (b *Bongo) Fetch(i Modellable) error {\n\tif i.GetId() == 0 {\n\t\treturn errors.New(fmt.Sprintf(\"Id is not set for %s\", i.TableName()))\n\t}\n\n\tif err := b.DB.First(i.Self(), i.GetId()).Error; err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (b *Bongo) FetchByIds(i Modellable, data interface{}, ids []int64) error {\n\tif len(ids) == 0 {\n\t\treturn nil\n\t}\n\n\treturn b.DB.\n\t\tTable(i.TableName()).\n\t\tWhere(ids).\n\t\tFind(data).\n\t\tError\n\n}\n\nfunc (b *Bongo) Create(i Modellable) error {\n\tif err := b.DB.Save(i).Error; err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (b *Bongo) Update(i Modellable) error {\n\tif i.GetId() == 0 {\n\t\treturn errors.New(fmt.Sprintf(\"Id is not set for %s\", i.TableName()))\n\t}\n\n\treturn b.Create(i)\n}\n\n\/\/ selector, set\nfunc (b *Bongo) UpdatePartial(i Modellable, rest ...map[string]interface{}) error {\n\tvar set, selector map[string]interface{}\n\n\tswitch len(rest) {\n\tcase 1:\n\t\tset = rest[0]\n\t\tselector = nil\n\tcase 2:\n\t\tselector = rest[0]\n\t\tset = rest[1]\n\tdefault:\n\t\treturn errors.New(\"Update partial parameter list is wrong\")\n\t}\n\n\tquery := b.DB.Table(i.TableName())\n\n\tif i.GetId() != 0 {\n\t\tquery = query.Where(i.GetId())\n\t} else {\n\t\t\/\/add selector\n\t\tquery = addWhere(query, selector)\n\t}\n\n\tif err := query.Model(i.Self()).Update(set).Error; err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (b *Bongo) Delete(i Modellable) error {\n\tif i.GetId() == 0 {\n\t\treturn errors.New(fmt.Sprintf(\"Id is not set for %s\", i.TableName()))\n\t}\n\n\tif err := b.DB.Delete(i.Self()).Error; err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (b *Bongo) Count(i Modellable, where ...interface{}) (int, error) {\n\tvar count int\n\n\t\/\/ init query\n\tquery := b.DB\n\n\t\/\/ add table name\n\tquery = query.Table(i.TableName())\n\n\t\/\/ add query\n\tquery = query.Where(where[0], where[1:len(where)]...)\n\n\treturn count, query.Count(&count).Error\n}\n\nfunc (b *Bongo) Some(i Modellable, data interface{}, rest ...map[string]interface{}) error {\n\n\tvar selector, options, plucked map[string]interface{}\n\tswitch len(rest) {\n\n\tcase 1: \/\/ just filter\n\t\tselector = rest[0]\n\tcase 2: \/\/filter and sort\n\t\tselector = rest[0]\n\t\toptions = rest[1]\n\tcase 3: \/\/ filter, sort and only get some data of the result set\n\t\tselector = rest[0]\n\t\toptions = rest[1]\n\t\tplucked = rest[2]\n\tdefault:\n\t\treturn errors.New(\"Some parameter list is wrong\")\n\t}\n\n\t\/\/ init query\n\tquery := b.DB\n\n\t\/\/ add pluck data\n\tquery = addPluck(query, plucked)\n\n\t\/\/ add sort options\n\tquery = addSort(query, options)\n\n\t\/\/ add table name\n\tquery = query.Table(i.TableName())\n\n\t\/\/ add selector\n\tquery = addWhere(query, selector)\n\terr := query.Find(data).Error\n\tif err == gorm.RecordNotFound {\n\t\treturn nil\n\t}\n\treturn err\n}\n\nfunc (b *Bongo) One(i Modellable, data interface{}, selector map[string]interface{}) error {\n\n\t\/\/ init query\n\tquery := b.DB\n\n\t\/\/ add table name\n\tquery = query.Table(i.TableName())\n\n\t\/\/ add selector\n\tquery = addWhere(query, selector)\n\n\t\/\/ add limit\n\tquery.Limit(1)\n\n\treturn query.Find(data).Error\n}\n\nfunc (b *Bongo) AfterCreate(i Modellable) {\n\teventName := fmt.Sprintf(\"%s_created\", i.TableName())\n\tdata, err := json.Marshal(i.Self())\n\tif err != nil {\n\t\t\/\/ here try to resend this message to RMQ again, than\n\t\t\/\/ persist it to somewhere!#!##@$%#?\n\t\t\/\/ those messages are really important now\n\t\tfmt.Println(\"Error occured\", err)\n\t\treturn\n\t}\n\terr = b.Broker.Publish(eventName, data)\n\tif err != nil {\n\t\tfmt.Println(\"jhasdjhadsjdasj\", err)\n\t}\n}\n\nfunc (b *Bongo) AfterUpdate(i Modellable) {\n\teventName := fmt.Sprintf(\"%s_updated\", i.TableName())\n\tdata, err := json.Marshal(i.Self())\n\tif err != nil {\n\t\t\/\/ here try to resend this message to RMQ again, than\n\t\t\/\/ persist it to somewhere!#!##@$%#?\n\t\t\/\/ those messages are really important now\n\t\tfmt.Println(\"Error occured\", err)\n\t\treturn\n\t}\n\terr = b.Broker.Publish(eventName, data)\n\tif err != nil {\n\t\tfmt.Println(\"jhasdjhadsjdasj\", err)\n\t}\n}\n\nfunc (b *Bongo) AfterDelete(i Modellable) {\n\teventName := fmt.Sprintf(\"%s_deleted\", i.TableName())\n\tdata, err := json.Marshal(i.Self())\n\tif err != nil {\n\t\t\/\/ here try to resend this message to RMQ again, than\n\t\t\/\/ persist it to somewhere!#!##@$%#?\n\t\t\/\/ those messages are really important now\n\t\tfmt.Println(\"Error occured\", err)\n\t\treturn\n\t}\n\terr = b.Broker.Publish(eventName, data)\n\tif err != nil {\n\t\tfmt.Println(\"jhasdjhadsjdasj\", err)\n\t}\n}\n\nfunc addSort(query *gorm.DB, options map[string]interface{}) *gorm.DB {\n\n\tif options == nil {\n\t\treturn query\n\t}\n\n\tvar opts []string\n\tfor key, val := range options {\n\t\topts = append(opts, fmt.Sprintf(\"%s %v\", key, val))\n\t}\n\treturn query.Order(strings.Join(opts, \",\"))\n}\n\nfunc addPluck(query *gorm.DB, plucked map[string]interface{}) *gorm.DB {\n\n\tif plucked == nil {\n\t\treturn query\n\t}\n\n\tvar opts []string\n\tfor key := range plucked {\n\t\topts = append(opts, fmt.Sprintf(\"%s\", key))\n\t}\n\tfmt.Println(strings.Join(opts, \",\"))\n\treturn query.Select(strings.Join(opts, \",\"))\n}\n\nfunc addWhere(query *gorm.DB, selector map[string]interface{}) *gorm.DB {\n\tif selector == nil {\n\t\treturn query\n\t}\n\treturn query.Where(selector)\n}\n<commit_msg>Social: add documentation<commit_after>package bongo\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/jinzhu\/gorm\"\n)\n\nfunc (b *Bongo) Fetch(i Modellable) error {\n\tif i.GetId() == 0 {\n\t\treturn errors.New(fmt.Sprintf(\"Id is not set for %s\", i.TableName()))\n\t}\n\n\tif err := b.DB.First(i.Self(), i.GetId()).Error; err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (b *Bongo) FetchByIds(i Modellable, data interface{}, ids []int64) error {\n\tif len(ids) == 0 {\n\t\treturn nil\n\t}\n\n\treturn b.DB.\n\t\tTable(i.TableName()).\n\t\tWhere(ids).\n\t\tFind(data).\n\t\tError\n\n}\n\nfunc (b *Bongo) Create(i Modellable) error {\n\tif err := b.DB.Save(i).Error; err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (b *Bongo) Update(i Modellable) error {\n\tif i.GetId() == 0 {\n\t\treturn errors.New(fmt.Sprintf(\"Id is not set for %s\", i.TableName()))\n\t}\n\n\t\/\/ Update and Create is using the Save method, so they are\n\t\/\/ same functions but GORM handles, AfterCreate and AfterUpdate\n\t\/\/ in correct manner\n\treturn b.Create(i)\n}\n\n\/\/ selector, set\nfunc (b *Bongo) UpdatePartial(i Modellable, rest ...map[string]interface{}) error {\n\tvar set, selector map[string]interface{}\n\n\tswitch len(rest) {\n\tcase 1:\n\t\tset = rest[0]\n\t\tselector = nil\n\tcase 2:\n\t\tselector = rest[0]\n\t\tset = rest[1]\n\tdefault:\n\t\treturn errors.New(\"Update partial parameter list is wrong\")\n\t}\n\n\tquery := b.DB.Table(i.TableName())\n\n\tif i.GetId() != 0 {\n\t\tquery = query.Where(i.GetId())\n\t} else {\n\t\t\/\/add selector\n\t\tquery = addWhere(query, selector)\n\t}\n\n\tif err := query.Model(i.Self()).Update(set).Error; err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (b *Bongo) Delete(i Modellable) error {\n\tif i.GetId() == 0 {\n\t\treturn errors.New(fmt.Sprintf(\"Id is not set for %s\", i.TableName()))\n\t}\n\n\tif err := b.DB.Delete(i.Self()).Error; err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (b *Bongo) Count(i Modellable, where ...interface{}) (int, error) {\n\tvar count int\n\n\t\/\/ init query\n\tquery := b.DB\n\n\t\/\/ add table name\n\tquery = query.Table(i.TableName())\n\n\t\/\/ add query\n\tquery = query.Where(where[0], where[1:len(where)]...)\n\n\treturn count, query.Count(&count).Error\n}\n\nfunc (b *Bongo) Some(i Modellable, data interface{}, rest ...map[string]interface{}) error {\n\n\tvar selector, options, plucked map[string]interface{}\n\tswitch len(rest) {\n\n\tcase 1: \/\/ just filter\n\t\tselector = rest[0]\n\tcase 2: \/\/filter and sort\n\t\tselector = rest[0]\n\t\toptions = rest[1]\n\tcase 3: \/\/ filter, sort and only get some data of the result set\n\t\tselector = rest[0]\n\t\toptions = rest[1]\n\t\tplucked = rest[2]\n\tdefault:\n\t\treturn errors.New(\"Some parameter list is wrong\")\n\t}\n\n\t\/\/ init query\n\tquery := b.DB\n\n\t\/\/ add pluck data\n\tquery = addPluck(query, plucked)\n\n\t\/\/ add sort options\n\tquery = addSort(query, options)\n\n\t\/\/ add table name\n\tquery = query.Table(i.TableName())\n\n\t\/\/ add selector\n\tquery = addWhere(query, selector)\n\terr := query.Find(data).Error\n\tif err == gorm.RecordNotFound {\n\t\treturn nil\n\t}\n\treturn err\n}\n\nfunc (b *Bongo) One(i Modellable, data interface{}, selector map[string]interface{}) error {\n\n\t\/\/ init query\n\tquery := b.DB\n\n\t\/\/ add table name\n\tquery = query.Table(i.TableName())\n\n\t\/\/ add selector\n\tquery = addWhere(query, selector)\n\n\t\/\/ add limit\n\tquery.Limit(1)\n\n\treturn query.Find(data).Error\n}\n\nfunc (b *Bongo) AfterCreate(i Modellable) {\n\teventName := fmt.Sprintf(\"%s_created\", i.TableName())\n\tdata, err := json.Marshal(i.Self())\n\tif err != nil {\n\t\t\/\/ here try to resend this message to RMQ again, than\n\t\t\/\/ persist it to somewhere!#!##@$%#?\n\t\t\/\/ those messages are really important now\n\t\tfmt.Println(\"Error occured\", err)\n\t\treturn\n\t}\n\terr = b.Broker.Publish(eventName, data)\n\tif err != nil {\n\t\tfmt.Println(\"jhasdjhadsjdasj\", err)\n\t}\n}\n\nfunc (b *Bongo) AfterUpdate(i Modellable) {\n\teventName := fmt.Sprintf(\"%s_updated\", i.TableName())\n\tdata, err := json.Marshal(i.Self())\n\tif err != nil {\n\t\t\/\/ here try to resend this message to RMQ again, than\n\t\t\/\/ persist it to somewhere!#!##@$%#?\n\t\t\/\/ those messages are really important now\n\t\tfmt.Println(\"Error occured\", err)\n\t\treturn\n\t}\n\terr = b.Broker.Publish(eventName, data)\n\tif err != nil {\n\t\tfmt.Println(\"jhasdjhadsjdasj\", err)\n\t}\n}\n\nfunc (b *Bongo) AfterDelete(i Modellable) {\n\teventName := fmt.Sprintf(\"%s_deleted\", i.TableName())\n\tdata, err := json.Marshal(i.Self())\n\tif err != nil {\n\t\t\/\/ here try to resend this message to RMQ again, than\n\t\t\/\/ persist it to somewhere!#!##@$%#?\n\t\t\/\/ those messages are really important now\n\t\tfmt.Println(\"Error occured\", err)\n\t\treturn\n\t}\n\terr = b.Broker.Publish(eventName, data)\n\tif err != nil {\n\t\tfmt.Println(\"jhasdjhadsjdasj\", err)\n\t}\n}\n\nfunc addSort(query *gorm.DB, options map[string]interface{}) *gorm.DB {\n\n\tif options == nil {\n\t\treturn query\n\t}\n\n\tvar opts []string\n\tfor key, val := range options {\n\t\topts = append(opts, fmt.Sprintf(\"%s %v\", key, val))\n\t}\n\treturn query.Order(strings.Join(opts, \",\"))\n}\n\nfunc addPluck(query *gorm.DB, plucked map[string]interface{}) *gorm.DB {\n\n\tif plucked == nil {\n\t\treturn query\n\t}\n\n\tvar opts []string\n\tfor key := range plucked {\n\t\topts = append(opts, fmt.Sprintf(\"%s\", key))\n\t}\n\tfmt.Println(strings.Join(opts, \",\"))\n\treturn query.Select(strings.Join(opts, \",\"))\n}\n\nfunc addWhere(query *gorm.DB, selector map[string]interface{}) *gorm.DB {\n\tif selector == nil {\n\t\treturn query\n\t}\n\treturn query.Where(selector)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nconst (\n\tpostInstall = `#!\/bin\/bash\n\nKITE_PLIST=\"\/Library\/LaunchAgents\/com.koding.kite.{{.}}.plist\"\nchown root:wheel ${KITE_PLIST}\nchmod 644 ${KITE_PLIST}\n\necho $USER\nsu $USER -c \"\/bin\/launchctl load ${KITE_PLIST}\"\n\nexit 0\n`\n\n\tpreInstall = `#!\/bin\/sh\n\necho \"Checking for plist\"\nif \/bin\/launchctl list \"com.koding.kite.{{.}}.plist\" &> \/dev\/null; then\n echo \"Unloading plist\"\n \/bin\/launchctl unload \"\/Library\/LaunchAgents\/com.koding.kite.{{.}}.plist\"\nfi\n\nKDFILE=\/usr\/local\/bin\/{{.}}\n\necho \"Removing previous installation\"\nif [ -f $KDFILE ]; then\n rm -r $KDFILE\nfi\n\nexit 0\n`\n\n\tdistribution = `<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"no\"?>\n<installer-script minSpecVersion=\"1.000000\">\n <title>Koding Kite<\/title>\n <background mime-type=\"image\/png\" file=\"bg.png\"\/>\n <options customize=\"never\" allow-external-scripts=\"no\"\/>\n <!-- <domains enable_localSystem=\"true\" \/> -->\n <options rootVolumeOnly=\"true\" \/>\n <installation-check script=\"installCheck();\"\/>\n <script>\nfunction installCheck() {\n if(system.files.fileExistsAtPath('\/usr\/local\/bin\/{{.}}')) {\n my.result.title = 'Previous Installation Detected';\n my.result.message = 'A previous installation of Koding {{.}} Kite exists at \/usr\/local\/bin. This installer will remove the previous installation prior to installing. Please back up any data before proceeding.';\n my.result.type = 'Warning';\n return false;\n }\n return true;\n}\n <\/script>\n <!-- List all component packages -->\n <pkg-ref\n id=\"com.koding.kite.{{.}}.pkg\"\n auth=\"root\">com.koding.kite.{{.}}.pkg<\/pkg-ref>\n <choices-outline>\n <line choice=\"com.koding.kite.{{.}}.choice\"\/>\n <\/choices-outline>\n <choice\n id=\"com.koding.kite.{{.}}.choice\"\n title=\"Koding Kite\"\n customLocation=\"\/\">\n <pkg-ref id=\"com.koding.kite.{{.}}.pkg\"\/>\n <\/choice>\n<\/installer-script>\n`\n\n\tlaunchAgent = `<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-\/\/Apple\/\/DTD PLIST 1.0\/\/EN\" \"http:\/\/www.apple.com\/DTDs\/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n <key>KeepAlive<\/key>\n <dict>\n <key>NetworkState<\/key>\n <true\/>\n <\/dict>\n <key>Label<\/key>\n <string>com.koding.kite.{{.}}<\/string>\n <key>ProgramArguments<\/key>\n <array>\n <string>\/usr\/local\/bin\/{{.}}<\/string>\n <\/array>\n <key>RunAtLoad<\/key>\n <true\/>\n<\/dict>\n<\/plist>\n`\n)\n<commit_msg>kd\/build: fix reloading of launchAgents, now it stops and starts the agent<commit_after>package main\n\nconst (\n\tpreInstall = `#!\/bin\/sh\n\nKITE_PLIST=\"\/Library\/LaunchAgents\/com.koding.kite.{{.}}.plist\"\n\n# see: https:\/\/lists.macosforge.org\/pipermail\/launchd-dev\/2011-January\/000890.html\necho \"Checking to unload plist\"\nfor pid_uid in $(ps -axo pid,uid,args | grep -i \"[l]oginwindow.app\" | awk '{print $1 \",\" $2}'); do\n pid=$(echo $pid_uid | cut -d, -f1)\n uid=$(echo $pid_uid | cut -d, -f2)\n echo \"unloading launch agent\"\n launchctl bsexec \"$pid\" chroot -u \"$uid\" \/ launchctl unload ${KITE_PLIST}\ndone\n\nKDFILE=\/usr\/local\/bin\/{{.}}\n\necho \"Removing previous installation\"\nif [ -f $KDFILE ]; then\n rm -r $KDFILE\nfi\n\nexit 0\n`\n\tpostInstall = `#!\/bin\/bash\n\nKITE_PLIST=\"\/Library\/LaunchAgents\/com.koding.kite.{{.}}.plist\"\nchown root:wheel ${KITE_PLIST}\nchmod 644 ${KITE_PLIST}\n\n# this is simpler than below, but it doesn't get the USER env always, don't know why.\n# echo $USER\n# su $USER -c \"\/bin\/launchctl load ${KITE_PLIST}\"\n\n# see: https:\/\/lists.macosforge.org\/pipermail\/launchd-dev\/2011-January\/000890.html\necho \"running postinstall actions for all logged in users.\"\nfor pid_uid in $(ps -axo pid,uid,args | grep -i \"[l]oginwindow.app\" | awk '{print $1 \",\" $2}'); do\n pid=$(echo $pid_uid | cut -d, -f1)\n uid=$(echo $pid_uid | cut -d, -f2)\n echo \"loading launch agent\"\n launchctl bsexec \"$pid\" chroot -u \"$uid\" \/ launchctl load ${KITE_PLIST}\ndone\n\nexit 0\n`\n\n\tdistribution = `<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"no\"?>\n<installer-script minSpecVersion=\"1.000000\">\n <title>Koding Kite<\/title>\n <background mime-type=\"image\/png\" file=\"bg.png\"\/>\n <options customize=\"never\" allow-external-scripts=\"no\"\/>\n <!-- <domains enable_localSystem=\"true\" \/> -->\n <options rootVolumeOnly=\"true\" \/>\n <installation-check script=\"installCheck();\"\/>\n <script>\nfunction installCheck() {\n if(system.files.fileExistsAtPath('\/usr\/local\/bin\/{{.}}')) {\n my.result.title = 'Previous Installation Detected';\n my.result.message = 'A previous installation of Koding {{.}} Kite exists at \/usr\/local\/bin. This installer will remove the previous installation prior to installing. Please back up any data before proceeding.';\n my.result.type = 'Warning';\n return false;\n }\n return true;\n}\n <\/script>\n <!-- List all component packages -->\n <pkg-ref\n id=\"com.koding.kite.{{.}}.pkg\"\n auth=\"root\">com.koding.kite.{{.}}.pkg<\/pkg-ref>\n <choices-outline>\n <line choice=\"com.koding.kite.{{.}}.choice\"\/>\n <\/choices-outline>\n <choice\n id=\"com.koding.kite.{{.}}.choice\"\n title=\"Koding Kite\"\n customLocation=\"\/\">\n <pkg-ref id=\"com.koding.kite.{{.}}.pkg\"\/>\n <\/choice>\n<\/installer-script>\n`\n\n\tlaunchAgent = `<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-\/\/Apple\/\/DTD PLIST 1.0\/\/EN\" \"http:\/\/www.apple.com\/DTDs\/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n<dict>\n <key>KeepAlive<\/key>\n <dict>\n <key>NetworkState<\/key>\n <true\/>\n <\/dict>\n <key>Label<\/key>\n <string>com.koding.kite.{{.}}<\/string>\n <key>ProgramArguments<\/key>\n <array>\n <string>\/usr\/local\/bin\/{{.}}<\/string>\n <\/array>\n <key>RunAtLoad<\/key>\n <true\/>\n<\/dict>\n<\/plist>\n`\n)\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2021 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"text\/template\"\n)\n\ntype label struct {\n\tName string `json:\"name\"`\n}\n\ntype prInfo struct {\n\tLabels []label `json:\"labels\"`\n\tNumber int `json:\"number\"`\n\tTitle string `json:\"title\"`\n}\n\nconst (\n\tmarkdownTemplate = `\n{{- range $typeName, $components := . }}\n## {{ $typeName }}\n{{- range $componentName, $component := $components }} \n### {{ $componentName}}\n{{- range $prInfo := $component }}\n - {{ $prInfo.Title }} #{{ $prInfo.Number }}\n{{- end }}\n{{- end }}\n{{- end }}\n`\n\n\tprefixType = \"Type: \"\n\tprefixComponent = \"Component: \"\n)\n\nfunc loadMergedPRs(from, to string) ([]string, error) {\n\tcmd := exec.Command(\"git\", \"log\", \"--oneline\", fmt.Sprintf(\"%s...%s\", from, to))\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\texecErr := err.(*exec.ExitError)\n\t\treturn nil, fmt.Errorf(\"%s:\\nstderr: %s\\nstdout: %s\", err.Error(), execErr.Stderr, out)\n\t}\n\n\tvar prs []string\n\trgx := regexp.MustCompile(`Merge pull request #(\\d+)`)\n\tlines := strings.Split(string(out), \"\\n\")\n\tfor _, line := range lines {\n\t\tlineInfo := rgx.FindStringSubmatch(line)\n\t\tif len(lineInfo) == 2 {\n\t\t\tprs = append(prs, lineInfo[1])\n\t\t}\n\t}\n\n\tsort.Strings(prs)\n\treturn prs, nil\n}\n\nfunc loadPRinfo(pr string) (prInfo, error) {\n\tcmd := exec.Command(\"gh\", \"pr\", \"view\", pr, \"--json\", \"title,number,labels\")\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\texecErr := err.(*exec.ExitError)\n\t\treturn prInfo{}, fmt.Errorf(\"%s:\\nstderr: %s\\nstdout: %s\", err.Error(), execErr.Stderr, out)\n\t}\n\tvar prInfo prInfo\n\terr = json.Unmarshal(out, &prInfo)\n\treturn prInfo, err\n}\n\nfunc loadAllPRs(prs []string) ([]prInfo, error) {\n\terrChan := make(chan error)\n\twgDone := make(chan bool)\n\tprChan := make(chan string, len(prs))\n\t\/\/ fill the work queue\n\tfor _, s := range prs {\n\t\tprChan <- s\n\t}\n\tclose(prChan)\n\n\tvar prInfos []prInfo\n\n\twg := sync.WaitGroup{}\n\tmu := sync.Mutex{}\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\t\/\/ load meta data about PRs\n\t\t\tdefer wg.Done()\n\t\t\tfor b := range prChan {\n\t\t\t\tfmt.Print(\".\")\n\t\t\t\tprInfo, err := loadPRinfo(b)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrChan <- err\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tmu.Lock()\n\t\t\t\tprInfos = append(prInfos, prInfo)\n\t\t\t\tmu.Unlock()\n\t\t\t}\n\t\t}()\n\t}\n\n\tgo func() {\n\t\t\/\/ wait for the loading to finish\n\t\twg.Wait()\n\t\tclose(wgDone)\n\t}()\n\n\tvar err error\n\tselect {\n\tcase <-wgDone:\n\t\tbreak\n\tcase err = <-errChan:\n\t\tbreak\n\t}\n\n\tfmt.Println()\n\treturn prInfos, err\n}\n\nfunc groupPRs(prInfos []prInfo) map[string]map[string][]prInfo {\n\tprPerType := map[string]map[string][]prInfo{}\n\n\tfor _, info := range prInfos {\n\t\tvar typ, component string\n\t\tfor _, lbl := range info.Labels {\n\t\t\tswitch {\n\t\t\tcase strings.HasPrefix(lbl.Name, prefixType):\n\t\t\t\ttyp = strings.TrimPrefix(lbl.Name, prefixType)\n\t\t\tcase strings.HasPrefix(lbl.Name, prefixComponent):\n\t\t\t\tcomponent = strings.TrimPrefix(lbl.Name, prefixComponent)\n\t\t\t}\n\t\t}\n\t\tif typ == \"\" {\n\t\t\ttyp = \"Other\"\n\t\t}\n\t\tif component == \"\" {\n\t\t\tcomponent = \"Other\"\n\t\t}\n\t\tcomponents, exists := prPerType[typ]\n\t\tif !exists {\n\t\t\tcomponents = map[string][]prInfo{}\n\t\t\tprPerType[typ] = components\n\t\t}\n\n\t\tprsPerComponentAndType := components[component]\n\t\tcomponents[component] = append(prsPerComponentAndType, info)\n\t}\n\treturn prPerType\n}\n\nfunc writePrInfos(fileout string, prPerType map[string]map[string][]prInfo) (err error) {\n\twriteTo := os.Stdout\n\tif fileout != \"\" {\n\t\twriteTo, err = os.OpenFile(fileout, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tt := template.Must(template.New(\"markdownTemplate\").Parse(markdownTemplate))\n\terr = t.ExecuteTemplate(writeTo, \"markdownTemplate\", prPerType)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc main() {\n\tfrom := flag.String(\"from\", \"\", \"from sha\/tag\/branch\")\n\tto := flag.String(\"to\", \"HEAD\", \"to sha\/tag\/branch\")\n\tfileout := flag.String(\"file\", \"\", \"file on which to write release notes, stdout if empty\")\n\n\tflag.Parse()\n\n\tprs, err := loadMergedPRs(*from, *to)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tprInfos, err := loadAllPRs(prs)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tprPerType := groupPRs(prInfos)\n\n\terr = writePrInfos(*fileout, prPerType)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<commit_msg>best commit ever<commit_after>\/*\nCopyright 2021 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"text\/template\"\n)\n\ntype label struct {\n\tName string `json:\"name\"`\n}\n\ntype prInfo struct {\n\tLabels []label `json:\"labels\"`\n\tNumber int `json:\"number\"`\n\tTitle string `json:\"title\"`\n}\n\nconst (\n\tmarkdownTemplate = `\n{{- range $typeName, $components := . }}\n## {{ $typeName }}\n{{- range $componentName, $component := $components }} \n### {{ $componentName}}\n{{- range $prInfo := $component }}\n - {{ $prInfo.Title }} #{{ $prInfo.Number }}\n{{- end }}\n{{- end }}\n{{- end }}\n`\n\n\tprefixType = \"Type: \"\n\tprefixComponent = \"Component: \"\n)\n\nfunc loadMergedPRs(from, to string) ([]string, error) {\n\tcmd := exec.Command(\"git\", \"log\", \"--oneline\", fmt.Sprintf(\"%s..%s\", from, to))\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\texecErr := err.(*exec.ExitError)\n\t\treturn nil, fmt.Errorf(\"%s:\\nstderr: %s\\nstdout: %s\", err.Error(), execErr.Stderr, out)\n\t}\n\n\tvar prs []string\n\trgx := regexp.MustCompile(`Merge pull request #(\\d+)`)\n\tlines := strings.Split(string(out), \"\\n\")\n\tfor _, line := range lines {\n\t\tlineInfo := rgx.FindStringSubmatch(line)\n\t\tif len(lineInfo) == 2 {\n\t\t\tprs = append(prs, lineInfo[1])\n\t\t}\n\t}\n\n\tsort.Strings(prs)\n\treturn prs, nil\n}\n\nfunc loadPRinfo(pr string) (prInfo, error) {\n\tcmd := exec.Command(\"gh\", \"pr\", \"view\", pr, \"--json\", \"title,number,labels\")\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\texecErr := err.(*exec.ExitError)\n\t\treturn prInfo{}, fmt.Errorf(\"%s:\\nstderr: %s\\nstdout: %s\", err.Error(), execErr.Stderr, out)\n\t}\n\tvar prInfo prInfo\n\terr = json.Unmarshal(out, &prInfo)\n\treturn prInfo, err\n}\n\nfunc loadAllPRs(prs []string) ([]prInfo, error) {\n\terrChan := make(chan error)\n\twgDone := make(chan bool)\n\tprChan := make(chan string, len(prs))\n\t\/\/ fill the work queue\n\tfor _, s := range prs {\n\t\tprChan <- s\n\t}\n\tclose(prChan)\n\n\tvar prInfos []prInfo\n\n\twg := sync.WaitGroup{}\n\tmu := sync.Mutex{}\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\t\/\/ load meta data about PRs\n\t\t\tdefer wg.Done()\n\t\t\tfor b := range prChan {\n\t\t\t\tfmt.Print(\".\")\n\t\t\t\tprInfo, err := loadPRinfo(b)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrChan <- err\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tmu.Lock()\n\t\t\t\tprInfos = append(prInfos, prInfo)\n\t\t\t\tmu.Unlock()\n\t\t\t}\n\t\t}()\n\t}\n\n\tgo func() {\n\t\t\/\/ wait for the loading to finish\n\t\twg.Wait()\n\t\tclose(wgDone)\n\t}()\n\n\tvar err error\n\tselect {\n\tcase <-wgDone:\n\t\tbreak\n\tcase err = <-errChan:\n\t\tbreak\n\t}\n\n\tfmt.Println()\n\treturn prInfos, err\n}\n\nfunc groupPRs(prInfos []prInfo) map[string]map[string][]prInfo {\n\tprPerType := map[string]map[string][]prInfo{}\n\n\tfor _, info := range prInfos {\n\t\tvar typ, component string\n\t\tfor _, lbl := range info.Labels {\n\t\t\tswitch {\n\t\t\tcase strings.HasPrefix(lbl.Name, prefixType):\n\t\t\t\ttyp = strings.TrimPrefix(lbl.Name, prefixType)\n\t\t\tcase strings.HasPrefix(lbl.Name, prefixComponent):\n\t\t\t\tcomponent = strings.TrimPrefix(lbl.Name, prefixComponent)\n\t\t\t}\n\t\t}\n\t\tif typ == \"\" {\n\t\t\ttyp = \"Other\"\n\t\t}\n\t\tif component == \"\" {\n\t\t\tcomponent = \"Other\"\n\t\t}\n\t\tcomponents, exists := prPerType[typ]\n\t\tif !exists {\n\t\t\tcomponents = map[string][]prInfo{}\n\t\t\tprPerType[typ] = components\n\t\t}\n\n\t\tprsPerComponentAndType := components[component]\n\t\tcomponents[component] = append(prsPerComponentAndType, info)\n\t}\n\treturn prPerType\n}\n\nfunc writePrInfos(fileout string, prPerType map[string]map[string][]prInfo) (err error) {\n\twriteTo := os.Stdout\n\tif fileout != \"\" {\n\t\twriteTo, err = os.OpenFile(fileout, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tt := template.Must(template.New(\"markdownTemplate\").Parse(markdownTemplate))\n\terr = t.ExecuteTemplate(writeTo, \"markdownTemplate\", prPerType)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc main() {\n\tfrom := flag.String(\"from\", \"\", \"from sha\/tag\/branch\")\n\tto := flag.String(\"to\", \"HEAD\", \"to sha\/tag\/branch\")\n\tfileout := flag.String(\"file\", \"\", \"file on which to write release notes, stdout if empty\")\n\n\tflag.Parse()\n\n\tprs, err := loadMergedPRs(*from, *to)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tprInfos, err := loadAllPRs(prs)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tprPerType := groupPRs(prInfos)\n\n\terr = writePrInfos(*fileout, prPerType)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package golang\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"text\/template\"\n\n\t\"github.com\/sourcegraph\/go-vcsurl\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/buildstore\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/config\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/container\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/dep2\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/repo\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/toolchain\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/unit\"\n)\n\nfunc init() {\n\t\/\/ Register the Go toolchain.\n\ttoolchain.Register(\"golang\", defaultGoVersion)\n}\n\n\/\/ goVersion represents a Go release: where to download it, how to create graph\n\/\/ references to it, etc.\ntype goVersion struct {\n\t\/\/ VersionString is the version string for this Go version, as listed at\n\t\/\/ https:\/\/code.google.com\/p\/go\/downloads\/list. (E.g., \"go1.2.1\" or\n\t\/\/ \"go1.2rc5\".)\n\tVersionString string\n\n\tRepositoryCloneURL string\n\tRepositoryURI repo.URI\n\tRepositoryVCS vcsurl.VCS\n\tVCSRevision string\n\tBaseImportPath string\n\tBasePkgDir string\n\n\tresolveCache map[string]*dep2.ResolvedTarget\n\tresolveCacheMu sync.Mutex\n}\n\nvar goVersions = map[string]*goVersion{\n\t\"1.2.1\": &goVersion{\n\t\tVersionString: \"go1.2.1\",\n\t\tRepositoryCloneURL: \"https:\/\/code.google.com\/p\/go\",\n\t\tRepositoryURI: \"code.google.com\/p\/go\",\n\t\tRepositoryVCS: vcsurl.Mercurial,\n\t\tVCSRevision: \"go1.2.1\",\n\t\tBaseImportPath: \"code.google.com\/p\/go\/src\/pkg\",\n\t\tBasePkgDir: \"src\/pkg\",\n\t},\n}\n\nvar defaultGoVersion = goVersions[\"1.2.1\"]\n\nfunc (v *goVersion) baseDockerfile() ([]byte, error) {\n\tvar buf bytes.Buffer\n\terr := template.Must(template.New(\"\").Parse(baseDockerfile)).Execute(&buf, struct {\n\t\tGoVersion *goVersion\n\t\tGOPATH string\n\t}{\n\t\tGoVersion: v,\n\t\tGOPATH: containerGOPATH,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}\n\nfunc (v *goVersion) containerForRepo(dir string, unit unit.SourceUnit, c *config.Repository) (*container.Container, error) {\n\tdockerfile, err := v.baseDockerfile()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgoConfig := v.goConfig(c)\n\tmountDir := filepath.Join(containerGOPATH, \"src\", goConfig.BaseImportPath)\n\tcontainerDir := mountDir\n\n\tvar preCmdDockerfile []byte\n\tvar addDirs, addFiles [][2]string\n\tif c.URI == v.RepositoryURI {\n\t\t\/\/ Go stdlib. This is fairly hacky. We want stdlib package paths to not\n\t\t\/\/ be prefixed with \"code.google.com\/p\/go\" everywhere (just\n\t\tdockerfile = append(dockerfile, []byte(fmt.Sprintf(`\n# Adjust for Go stdlib\nENV GOROOT \/tmp\/go\nRUN apt-get update -qqy\nRUN apt-get install -qqy build-essential\nRUN apt-get install -qqy mercurial\n\t`))...)\n\n\t\t\/\/ Add all dirs needed for make.bash. Exclude dirs that change when\n\t\t\/\/ we build, so that we can take advantage of ADD caching and not\n\t\t\/\/ recompile the Go stdlib for each package.\n\t\tentries, err := ioutil.ReadDir(dir)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, e := range entries {\n\t\t\tif n := e.Name(); n == \".\" || n == \"test\" || n == \"api\" || n == \"..\" || n == \"pkg\" || n == \"bin\" || n == buildstore.BuildDataDirName {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !e.Mode().IsDir() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\taddDirs = append(addDirs, [2]string{e.Name(), filepath.Join(\"\/tmp\/go\", e.Name())})\n\t\t}\n\n\t\t\/\/ We need to actually build the version of Go we want to analyze.\n\t\tpreCmdDockerfile = []byte(fmt.Sprintf(`\nRUN cd \/tmp\/go\/src && .\/make.bash\n`))\n\n\t\tcontainerDir = \"\/tmp\/go\"\n\t}\n\n\treturn &container.Container{\n\t\tDockerfile: dockerfile,\n\t\tRunOptions: []string{\"-v\", dir + \":\" + mountDir},\n\t\tPreCmdDockerfile: preCmdDockerfile,\n\t\tDir: containerDir,\n\t\tAddDirs: addDirs,\n\t\tAddFiles: addFiles,\n\t}, nil\n}\n\nconst containerGOPATH = \"\/tmp\/sg\/gopath\"\n\nconst baseDockerfile = `FROM ubuntu:14.04\nRUN apt-get update -qq\nRUN apt-get install -qqy curl\n\n# Install Go {{.GoVersion.VersionString}}.\nRUN curl -o \/tmp\/golang.tgz https:\/\/go.googlecode.com\/files\/{{.GoVersion.VersionString}}.linux-amd64.tar.gz\nRUN tar -xzf \/tmp\/golang.tgz -C \/usr\/local\nENV GOROOT \/usr\/local\/go\n\n# Add \"go\" to the PATH.\nENV PATH \/usr\/local\/go\/bin:\/usr\/local\/sbin:\/usr\/local\/bin:\/usr\/sbin:\/usr\/bin:\/sbin:\/bin\n\nENV GOPATH {{.GOPATH}}\n`\n\ntype baseBuild struct {\n\tStdlib *goVersion\n\tGOPATH string\n}\n<commit_msg>use go 1.3 in go grapher (closes #871)<commit_after>package golang\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"text\/template\"\n\n\t\"github.com\/sourcegraph\/go-vcsurl\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/buildstore\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/config\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/container\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/dep2\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/repo\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/toolchain\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/unit\"\n)\n\nfunc init() {\n\t\/\/ Register the Go toolchain.\n\ttoolchain.Register(\"golang\", defaultGoVersion)\n}\n\n\/\/ goVersion represents a Go release: where to download it, how to create graph\n\/\/ references to it, etc.\ntype goVersion struct {\n\t\/\/ VersionString is the version string for this Go version, as listed at\n\t\/\/ https:\/\/code.google.com\/p\/go\/downloads\/list. (E.g., \"go1.2.1\" or\n\t\/\/ \"go1.2rc5\".)\n\tVersionString string\n\n\tRepositoryCloneURL string\n\tRepositoryURI repo.URI\n\tRepositoryVCS vcsurl.VCS\n\tVCSRevision string\n\tBaseImportPath string\n\tBasePkgDir string\n\n\tresolveCache map[string]*dep2.ResolvedTarget\n\tresolveCacheMu sync.Mutex\n}\n\nvar goVersions = map[string]*goVersion{\n\t\"1.3\": &goVersion{\n\t\tVersionString: \"go1.3\",\n\t\tRepositoryCloneURL: \"https:\/\/code.google.com\/p\/go\",\n\t\tRepositoryURI: \"code.google.com\/p\/go\",\n\t\tRepositoryVCS: vcsurl.Mercurial,\n\t\tVCSRevision: \"go1.3\",\n\t\tBaseImportPath: \"code.google.com\/p\/go\/src\/pkg\",\n\t\tBasePkgDir: \"src\/pkg\",\n\t},\n}\n\nvar defaultGoVersion = goVersions[\"1.3\"]\n\nfunc (v *goVersion) baseDockerfile() ([]byte, error) {\n\tvar buf bytes.Buffer\n\terr := template.Must(template.New(\"\").Parse(baseDockerfile)).Execute(&buf, struct {\n\t\tGoVersion *goVersion\n\t\tGOPATH string\n\t}{\n\t\tGoVersion: v,\n\t\tGOPATH: containerGOPATH,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf.Bytes(), nil\n}\n\nfunc (v *goVersion) containerForRepo(dir string, unit unit.SourceUnit, c *config.Repository) (*container.Container, error) {\n\tdockerfile, err := v.baseDockerfile()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgoConfig := v.goConfig(c)\n\tmountDir := filepath.Join(containerGOPATH, \"src\", goConfig.BaseImportPath)\n\tcontainerDir := mountDir\n\n\tvar preCmdDockerfile []byte\n\tvar addDirs, addFiles [][2]string\n\tif c.URI == v.RepositoryURI {\n\t\t\/\/ Go stdlib. This is fairly hacky. We want stdlib package paths to not\n\t\t\/\/ be prefixed with \"code.google.com\/p\/go\" everywhere (just\n\t\tdockerfile = append(dockerfile, []byte(fmt.Sprintf(`\n# Adjust for Go stdlib\nENV GOROOT \/tmp\/go\nRUN apt-get update -qqy\nRUN apt-get install -qqy build-essential\nRUN apt-get install -qqy mercurial\n\t`))...)\n\n\t\t\/\/ Add all dirs needed for make.bash. Exclude dirs that change when\n\t\t\/\/ we build, so that we can take advantage of ADD caching and not\n\t\t\/\/ recompile the Go stdlib for each package.\n\t\tentries, err := ioutil.ReadDir(dir)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, e := range entries {\n\t\t\tif n := e.Name(); n == \".\" || n == \"test\" || n == \"api\" || n == \"..\" || n == \"pkg\" || n == \"bin\" || n == buildstore.BuildDataDirName {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !e.Mode().IsDir() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\taddDirs = append(addDirs, [2]string{e.Name(), filepath.Join(\"\/tmp\/go\", e.Name())})\n\t\t}\n\n\t\t\/\/ We need to actually build the version of Go we want to analyze.\n\t\tpreCmdDockerfile = []byte(fmt.Sprintf(`\nRUN cd \/tmp\/go\/src && .\/make.bash\n`))\n\n\t\tcontainerDir = \"\/tmp\/go\"\n\t}\n\n\treturn &container.Container{\n\t\tDockerfile: dockerfile,\n\t\tRunOptions: []string{\"-v\", dir + \":\" + mountDir},\n\t\tPreCmdDockerfile: preCmdDockerfile,\n\t\tDir: containerDir,\n\t\tAddDirs: addDirs,\n\t\tAddFiles: addFiles,\n\t}, nil\n}\n\nconst containerGOPATH = \"\/tmp\/sg\/gopath\"\n\nconst baseDockerfile = `FROM ubuntu:14.04\nRUN apt-get update -qq\nRUN apt-get install -qqy curl\n\n# Install Go {{.GoVersion.VersionString}}.\nRUN curl -Lo \/tmp\/golang.tgz http:\/\/golang.org\/dl\/{{.GoVersion.VersionString}}.linux-amd64.tar.gz\nRUN tar -xzf \/tmp\/golang.tgz -C \/usr\/local\nENV GOROOT \/usr\/local\/go\n\n# Add \"go\" to the PATH.\nENV PATH \/usr\/local\/go\/bin:\/usr\/local\/sbin:\/usr\/local\/bin:\/usr\/sbin:\/usr\/bin:\/sbin:\/bin\n\nENV GOPATH {{.GOPATH}}\n`\n\ntype baseBuild struct {\n\tStdlib *goVersion\n\tGOPATH string\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage local\n\nimport (\n\t\"github.com\/globocom\/config\"\n\t\"github.com\/globocom\/tsuru\/fs\"\n\t\"github.com\/globocom\/tsuru\/log\"\n\t\"io\/ioutil\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar fsystem fs.Fs\n\nfunc filesystem() fs.Fs {\n\tif fsystem == nil {\n\t\tfsystem = fs.OsFs{}\n\t}\n\treturn fsystem\n}\n\n\/\/ container represents an lxc container with the given name.\ntype container struct {\n\tname string\n}\n\n\/\/ runCmd executes commands and log the given stdout and stderror.\nfunc runCmd(cmd string, args ...string) error {\n\tcommand := exec.Command(cmd, args...)\n\tout, err := command.CombinedOutput()\n\tlog.Printf(\"running the cmd: %s with the args: %s\", cmd, args)\n\tlog.Print(string(out))\n\treturn err\n}\n\n\/\/ ip returns the ip for the container.\nfunc (c *container) ip() string {\n\ttimeout, err := config.GetInt(\"local:ip-timeout\")\n\tif err != nil {\n\t\ttimeout = 60\n\t}\n\tquit := time.After(time.Duration(timeout) * time.Second)\n\ttick := time.Tick(2 * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-tick:\n\t\t\tfile, _ := filesystem().Open(\"\/var\/lib\/misc\/dnsmasq.leases\")\n\t\t\tdata, _ := ioutil.ReadAll(file)\n\t\t\tlog.Print(\"dnsmasq.leases\")\n\t\t\tlog.Print(string(data))\n\t\t\tfor _, line := range strings.Split(string(data), \"\\n\") {\n\t\t\t\tif strings.Index(line, c.name) != -1 {\n\t\t\t\t\tlog.Printf(\"ip in %s\", line)\n\t\t\t\t\treturn strings.Split(line, \" \")[2]\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-quit:\n\t\t\treturn \"\"\n\t\tdefault:\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t}\n\t}\n}\n\n\/\/ create creates a lxc container with ubuntu template by default.\nfunc (c *container) create() error {\n\tkeyPath, err := config.GetString(\"local:authorized-key-path\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn runCmd(\"sudo\", \"lxc-create\", \"-t\", \"ubuntu\", \"-n\", c.name, \"--\", \"-S\", keyPath)\n}\n\n\/\/ start starts a lxc container.\nfunc (c *container) start() error {\n\treturn runCmd(\"sudo\", \"lxc-start\", \"--daemon\", \"-n\", c.name)\n}\n\n\/\/ stop stops a lxc container.\nfunc (c *container) stop() error {\n\treturn runCmd(\"sudo\", \"lxc-stop\", \"-n\", c.name)\n}\n\n\/\/ destroy destory a lxc container.\nfunc (c *container) destroy() error {\n\treturn runCmd(\"sudo\", \"lxc-destroy\", \"-n\", c.name)\n}\n<commit_msg>provision\/local: fix code<commit_after>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage local\n\nimport (\n\t\"github.com\/globocom\/config\"\n\t\"github.com\/globocom\/tsuru\/fs\"\n\t\"github.com\/globocom\/tsuru\/log\"\n\t\"io\/ioutil\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar fsystem fs.Fs\n\nfunc filesystem() fs.Fs {\n\tif fsystem == nil {\n\t\tfsystem = fs.OsFs{}\n\t}\n\treturn fsystem\n}\n\n\/\/ container represents an lxc container with the given name.\ntype container struct {\n\tname string\n}\n\n\/\/ runCmd executes commands and log the given stdout and stderror.\nfunc runCmd(cmd string, args ...string) error {\n\tcommand := exec.Command(cmd, args...)\n\tout, err := command.CombinedOutput()\n\tlog.Printf(\"running the cmd: %s with the args: %s\", cmd, args)\n\tlog.Print(string(out))\n\treturn err\n}\n\n\/\/ ip returns the ip for the container.\nfunc (c *container) ip() string {\n\ttimeout, err := config.GetInt(\"local:ip-timeout\")\n\tif err != nil {\n\t\ttimeout = 60\n\t}\n\tquit := time.After(time.Duration(timeout) * time.Second)\n\ttick := time.Tick(2 * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-tick:\n\t\t\tfile, _ := filesystem().Open(\"\/var\/lib\/misc\/dnsmasq.leases\")\n\t\t\tdata, _ := ioutil.ReadAll(file)\n\t\t\tlog.Print(\"dnsmasq.leases\")\n\t\t\tlog.Print(string(data))\n\t\t\tfor _, line := range strings.Split(string(data), \"\\n\") {\n\t\t\t\tif strings.Index(line, c.name) != -1 {\n\t\t\t\t\tlog.Printf(\"ip in %s\", line)\n\t\t\t\t\treturn strings.Split(line, \" \")[2]\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-quit:\n\t\t\treturn \"\"\n\t\tdefault:\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ create creates a lxc container with ubuntu template by default.\nfunc (c *container) create() error {\n\tkeyPath, err := config.GetString(\"local:authorized-key-path\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn runCmd(\"sudo\", \"lxc-create\", \"-t\", \"ubuntu\", \"-n\", c.name, \"--\", \"-S\", keyPath)\n}\n\n\/\/ start starts a lxc container.\nfunc (c *container) start() error {\n\treturn runCmd(\"sudo\", \"lxc-start\", \"--daemon\", \"-n\", c.name)\n}\n\n\/\/ stop stops a lxc container.\nfunc (c *container) stop() error {\n\treturn runCmd(\"sudo\", \"lxc-stop\", \"-n\", c.name)\n}\n\n\/\/ destroy destory a lxc container.\nfunc (c *container) destroy() error {\n\treturn runCmd(\"sudo\", \"lxc-destroy\", \"-n\", c.name)\n}\n<|endoftext|>"} {"text":"<commit_before>package cellnet\n\nimport (\n\t\"runtime\/debug\"\n)\n\ntype EventQueue interface {\n\tStartLoop()\n\n\tStopLoop(result int)\n\n\t\/\/ 等待退出\n\tWait() int\n\n\t\/\/ 投递事件, 通过队列到达消费者端\n\tPost(callback func())\n\n\t\/\/ 是否捕获异常\n\tEnableCapturePanic(v bool)\n}\n\ntype eventQueue struct {\n\tqueue chan func()\n\n\texitSignal chan int\n\n\tcapturePanic bool\n}\n\n\/\/ 启动崩溃捕获\nfunc (q *eventQueue) EnableCapturePanic(v bool) {\n\tq.capturePanic = v\n}\n\n\/\/ 派发事件处理回调到队列中\nfunc (q *eventQueue) Post(callback func()) {\n\n\tif callback == nil {\n\t\treturn\n\t}\n\n\tq.queue <- callback\n}\n\n\/\/ 保护调用用户函数\nfunc (q *eventQueue) protectedCall(callback func()) {\n\n\tif callback == nil {\n\t\treturn\n\t}\n\n\tif q.capturePanic {\n\t\tdefer func() {\n\n\t\t\tif err := recover(); err != nil {\n\n\t\t\t\tdebug.PrintStack()\n\t\t\t}\n\n\t\t}()\n\t}\n\n\tcallback()\n}\n\n\/\/ 开启事件循环\nfunc (q *eventQueue) StartLoop() {\n\n\tgo func() {\n\t\tfor callback := range q.queue {\n\t\t\tq.protectedCall(callback)\n\t\t}\n\t}()\n}\n\n\/\/ 停止事件循环\nfunc (q *eventQueue) StopLoop(result int) {\n\tq.exitSignal <- result\n}\n\n\/\/ 等待退出消息\nfunc (q *eventQueue) Wait() int {\n\treturn <-q.exitSignal\n}\n\nconst DefaultQueueSize = 100\n\n\/\/ 创建默认长度的队列\nfunc NewEventQueue() EventQueue {\n\n\treturn NewEventQueueByLen(DefaultQueueSize)\n}\n\n\/\/ 创建指定长度的队列\nfunc NewEventQueueByLen(l int) EventQueue {\n\tself := &eventQueue{\n\t\tqueue: make(chan func(), l),\n\t\texitSignal: make(chan int),\n\t}\n\n\treturn self\n}\n\nfunc QueuedCall(ses Session, callback func()) {\n\tif ses == nil {\n\t\treturn\n\t}\n\n\tq := ses.Peer().EventQueue()\n\n\t\/\/ Peer有队列时,在队列线程调用用户处理函数\n\tif q != nil {\n\t\tq.Post(callback)\n\n\t} else {\n\n\t\t\/\/ 在I\/O线程调用用户处理函数\n\t\tcallback()\n\t}\n}\n<commit_msg>修改: 队列会在处理完所有数据后再退出<commit_after>package cellnet\n\nimport (\n\t\"runtime\/debug\"\n\t\"sync\"\n)\n\ntype EventQueue interface {\n\tStartLoop()\n\n\tStopLoop()\n\n\t\/\/ 等待退出\n\tWait()\n\n\t\/\/ 投递事件, 通过队列到达消费者端\n\tPost(callback func())\n\n\t\/\/ 是否捕获异常\n\tEnableCapturePanic(v bool)\n}\n\ntype eventQueue struct {\n\tqueue chan func()\n\n\tendSignal sync.WaitGroup\n\n\tcapturePanic bool\n}\n\n\/\/ 启动崩溃捕获\nfunc (self *eventQueue) EnableCapturePanic(v bool) {\n\tself.capturePanic = v\n}\n\n\/\/ 派发事件处理回调到队列中\nfunc (self *eventQueue) Post(callback func()) {\n\n\tif callback == nil {\n\t\treturn\n\t}\n\n\tself.queue <- callback\n}\n\n\/\/ 保护调用用户函数\nfunc (self *eventQueue) protectedCall(callback func()) {\n\n\tif self.capturePanic {\n\t\tdefer func() {\n\n\t\t\tif err := recover(); err != nil {\n\n\t\t\t\tdebug.PrintStack()\n\t\t\t}\n\n\t\t}()\n\t}\n\n\tcallback()\n}\n\n\/\/ 开启事件循环\nfunc (self *eventQueue) StartLoop() {\n\n\tgo func() {\n\n\t\tfor callback := range self.queue {\n\n\t\t\tif callback == nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tself.protectedCall(callback)\n\t\t}\n\n\t\tself.endSignal.Done()\n\t}()\n}\n\n\/\/ 停止事件循环\nfunc (self *eventQueue) StopLoop() {\n\tself.queue <- nil\n}\n\n\/\/ 等待退出消息\nfunc (self *eventQueue) Wait() {\n\tself.endSignal.Wait()\n}\n\nconst DefaultQueueSize = 100\n\n\/\/ 创建默认长度的队列\nfunc NewEventQueue() EventQueue {\n\n\treturn &eventQueue{\n\t\tqueue: make(chan func(), DefaultQueueSize),\n\t}\n}\n\nfunc QueuedCall(ses Session, callback func()) {\n\tif ses == nil {\n\t\treturn\n\t}\n\n\tq := ses.Peer().EventQueue()\n\n\t\/\/ Peer有队列时,在队列线程调用用户处理函数\n\tif q != nil {\n\t\tq.Post(callback)\n\n\t} else {\n\n\t\t\/\/ 在I\/O线程调用用户处理函数\n\t\tcallback()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ Copyright 2017 Pedro Salgado\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\npackage resque\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"gopkg.in\/redis.v5\"\n)\n\n\/\/ Queue a job queue.\ntype Queue struct {\n\tredis *redis.Client\n\tjobClassName string\n\tName string\n}\n\n\/\/ newQueue links a job queue to a redis instance.\nfunc newQueue(jcn string, c *redis.Client) *Queue {\n\treturn &Queue{\n\t\tredis: c,\n\t\tjobClassName: jcn,\n\t\tName: fmt.Sprintf(\"resque:queue:%s\", jcn),\n\t}\n}\n\n\/\/ Receive gets a job from the queue.\nfunc (q Queue) Receive() (*Job, error) {\n\tcmd := q.redis.LPop(q.Name)\n\tjsonStr, err := cmd.Bytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tjob := &Job{}\n\terr = json.Unmarshal(jsonStr, job)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn job, err\n}\n\n\/\/ Send places a job on the queue.\nfunc (q Queue) Send(args []JobArgument) error {\n\tjsonStr, err := json.Marshal(Job{Class: q.jobClassName, Args: args})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn q.redis.RPush(q.Name, jsonStr).Err()\n}\n<commit_msg>queue.go: Receive() returned error when list was empty.<commit_after>\/\/\n\/\/ Copyright 2017 Pedro Salgado\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\npackage resque\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"gopkg.in\/redis.v5\"\n)\n\n\/\/ Queue a job queue.\ntype Queue struct {\n\tredis *redis.Client\n\tjobClassName string\n\tName string\n}\n\n\/\/ newQueue links a job queue to a redis instance.\nfunc newQueue(jcn string, c *redis.Client) *Queue {\n\treturn &Queue{\n\t\tredis: c,\n\t\tjobClassName: jcn,\n\t\tName: fmt.Sprintf(\"resque:queue:%s\", jcn),\n\t}\n}\n\n\/\/ Receive gets a job from the queue.\nfunc (q Queue) Receive() (*Job, error) {\n\tcmd := q.redis.LPop(q.Name)\n\tif cmd.Err() != nil {\n\t\tif cmd.Err().Error() == \"redis: nil\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, cmd.Err()\n\t}\n\n\tjsonStr, err := cmd.Bytes()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tjob := &Job{}\n\terr = json.Unmarshal(jsonStr, job)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn job, err\n}\n\n\/\/ Send places a job on the queue.\nfunc (q Queue) Send(args []JobArgument) error {\n\tjsonStr, err := json.Marshal(Job{Class: q.jobClassName, Args: args})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn q.redis.RPush(q.Name, jsonStr).Err()\n}\n<|endoftext|>"} {"text":"<commit_before>package pq\n\nimport (\n\t\"container\/heap\"\n\t\"fmt\"\n\t\"runtime\"\n\t\"sync\"\n\t\"sync\/atomic\"\n)\n\nvar ErrQueueNotStarted = fmt.Errorf(\"Queue not started or closed\")\nvar ErrQueueAlreadyStarted = fmt.Errorf(\"Queue already started\")\n\ntype Queue struct {\n\tnumWorkers int\n\tpq priorityQueue\n\twork chan *item\n\tcond *sync.Cond\n\twg sync.WaitGroup\n\tworking bool\n\ttaskRunning int32\n}\n\n\/\/ Starts work. You can add tasks only after starting queue\nfunc (q *Queue) Start(numWorkers int) (err error) {\n\tif q.cond == nil {\n\t\tq.cond = sync.NewCond(&sync.Mutex{})\n\t}\n\tq.cond.L.Lock()\n\tdefer q.cond.L.Unlock()\n\tif q.working {\n\t\treturn ErrQueueAlreadyStarted\n\t}\n\tq.numWorkers = numWorkers\n\tq.pq = make(priorityQueue, 0)\n\tq.work = make(chan *item)\n\tif q.numWorkers <= 0 {\n\t\tq.numWorkers = runtime.NumCPU()\n\t}\n\tq.runWorkers()\n\tgo q.dispatcher()\n\treturn\n}\n\n\/\/ Add func() to queue\nfunc (q *Queue) AddFunc(f func() error, priority int) (err error) {\n\ttask := &funcTask{\n\t\tf: f,\n\t\tp: priority,\n\t}\n\treturn q.AddTask(task)\n}\n\n\/\/ Add func() to queue and wait while tasks will be done\nfunc (q *Queue) WaitFunc(f func() error, priority int) (err error) {\n\ttask := &funcTask{\n\t\tf: f,\n\t\tp: priority,\n\t}\n\treturn q.WaitTask(task)\n}\n\n\/\/ Just add group of tasks\nfunc (q *Queue) AddGroup(tasks []Task) (err error) {\n\tfor _, t := range tasks {\n\t\tif err = q.AddTask(t); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ Add group of tasks and waits while all tasks will be done\nfunc (q *Queue) WaitGroup(tasks []Task) (err error) {\n\tif len(tasks) == 0 {\n\t\treturn\n\t}\n\n\tctrl := &itemCtrl{count: int32(len(tasks)), m: new(sync.Mutex), done: make(chan error)}\n\n\tfor _, t := range tasks {\n\t\tit := &item{task: t, ctrl: ctrl}\n\t\tif err = q.addItem(it); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\terr = <-ctrl.done\n\treturn\n}\n\n\/\/ Add single task to queue\nfunc (q *Queue) AddTask(task Task) (err error) {\n\tit := &item{task: task}\n\treturn q.addItem(it)\n}\n\n\/\/ Add single task to queue and waits while task will be done\nfunc (q *Queue) WaitTask(task Task) (err error) {\n\treturn q.WaitGroup([]Task{task})\n}\n\n\/\/ Size of queue\nfunc (q *Queue) Len() int {\n\treturn len(q.pq)\n}\n\n\/\/ How much workers do work at this moment\nfunc (q *Queue) TaskRunning() int {\n\treturn int(atomic.LoadInt32(&q.taskRunning))\n}\n\nfunc (q *Queue) addItem(it *item) (err error) {\n\tq.cond.L.Lock()\n\tif !q.working {\n\t\treturn ErrQueueNotStarted\n\t}\n\theap.Push(&q.pq, it)\n\tq.cond.L.Unlock()\n\tq.cond.Signal()\n\treturn\n}\n\nfunc (q *Queue) runWorkers() {\n\tfor i := 0; i < q.numWorkers; i++ {\n\t\tgo q.worker()\n\t}\n\tq.working = true\n}\n\nfunc (q *Queue) dispatcher() {\n\tfor {\n\t\tq.cond.L.Lock()\n\t\tfor q.pq.Len() == 0 {\n\t\t\tq.cond.Wait()\n\t\t}\n\t\tif !q.working {\n\t\t\tbreak\n\t\t}\n\t\tit := heap.Pop(&q.pq)\n\t\tq.work <- it.(*item)\n\t\tq.cond.L.Unlock()\n\t}\n}\n\nfunc (q *Queue) worker() {\n\tq.wg.Add(1)\n\tfor it := range q.work {\n\t\tq.runTask(it)\n\t}\n\tq.wg.Done()\n}\n\nfunc (q *Queue) runTask(it *item) {\n\tvar err error\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = fmt.Errorf(\"PQ. Panic while executing task: %v\", r)\n\t\t}\n\t\tit.done(err)\n\t}()\n\n\tatomic.AddInt32(&q.taskRunning, 1)\n\tdefer atomic.AddInt32(&q.taskRunning, -1)\n\tif it.can() {\n\t\terr = it.task.Run()\n\t}\n\treturn\n}\n\n\/\/ Stopping queue. Wait while all workers finish current tasks\nfunc (q *Queue) Stop() {\n\tq.cond.L.Lock()\n\tdefer q.cond.L.Unlock()\n\tclose(q.work)\n\tq.working = false\n\tq.wg.Wait()\n}\n<commit_msg>fixes<commit_after>package pq\n\nimport (\n\t\"container\/heap\"\n\t\"fmt\"\n\t\"runtime\"\n\t\"sync\"\n\t\"sync\/atomic\"\n)\n\nvar ErrQueueNotStarted = fmt.Errorf(\"Queue not started or closed\")\nvar ErrQueueAlreadyStarted = fmt.Errorf(\"Queue already started\")\n\ntype Queue struct {\n\tnumWorkers int\n\tpq priorityQueue\n\twork chan *item\n\tcond *sync.Cond\n\twg sync.WaitGroup\n\tworking bool\n\ttaskRunning int32\n}\n\n\/\/ Starts work. You can add tasks only after starting queue\nfunc (q *Queue) Start(numWorkers int) (err error) {\n\tif q.cond == nil {\n\t\tq.cond = sync.NewCond(&sync.Mutex{})\n\t}\n\tq.cond.L.Lock()\n\tdefer q.cond.L.Unlock()\n\tif q.working {\n\t\treturn ErrQueueAlreadyStarted\n\t}\n\tq.numWorkers = numWorkers\n\tq.pq = make(priorityQueue, 0)\n\tq.work = make(chan *item)\n\tif q.numWorkers <= 0 {\n\t\tq.numWorkers = runtime.NumCPU()\n\t}\n\tq.runWorkers()\n\tgo q.dispatcher()\n\treturn\n}\n\n\/\/ Add func() to queue\nfunc (q *Queue) AddFunc(f func() error, priority int) (err error) {\n\ttask := &funcTask{\n\t\tf: f,\n\t\tp: priority,\n\t}\n\treturn q.AddTask(task)\n}\n\n\/\/ Add func() to queue and wait while tasks will be done\nfunc (q *Queue) WaitFunc(f func() error, priority int) (err error) {\n\ttask := &funcTask{\n\t\tf: f,\n\t\tp: priority,\n\t}\n\treturn q.WaitTask(task)\n}\n\n\/\/ Just add group of tasks\nfunc (q *Queue) AddGroup(tasks []Task) (err error) {\n\tfor _, t := range tasks {\n\t\tif err = q.AddTask(t); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ Add group of tasks and waits while all tasks will be done\nfunc (q *Queue) WaitGroup(tasks []Task) (err error) {\n\tif len(tasks) == 0 {\n\t\treturn\n\t}\n\n\tctrl := &itemCtrl{count: int32(len(tasks)), m: new(sync.Mutex), done: make(chan error)}\n\n\tfor _, t := range tasks {\n\t\tit := &item{task: t, ctrl: ctrl}\n\t\tif err = q.addItem(it); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\terr = <-ctrl.done\n\treturn\n}\n\n\/\/ Add single task to queue\nfunc (q *Queue) AddTask(task Task) (err error) {\n\tit := &item{task: task}\n\treturn q.addItem(it)\n}\n\n\/\/ Add single task to queue and waits while task will be done\nfunc (q *Queue) WaitTask(task Task) (err error) {\n\treturn q.WaitGroup([]Task{task})\n}\n\n\/\/ Size of queue\nfunc (q *Queue) Len() int {\n\treturn len(q.pq)\n}\n\n\/\/ How much workers do work at this moment\nfunc (q *Queue) TaskRunning() int {\n\treturn int(atomic.LoadInt32(&q.taskRunning))\n}\n\nfunc (q *Queue) addItem(it *item) (err error) {\n\tq.cond.L.Lock()\n\tif !q.working {\n\t\treturn ErrQueueNotStarted\n\t\tq.cond.L.Unlock()\n\t}\n\theap.Push(&q.pq, it)\n\tq.cond.L.Unlock()\n\tq.cond.Signal()\n\treturn\n}\n\nfunc (q *Queue) runWorkers() {\n\tfor i := 0; i < q.numWorkers; i++ {\n\t\tgo q.worker()\n\t}\n\tq.working = true\n}\n\nfunc (q *Queue) dispatcher() {\n\tfor {\n\t\tq.cond.L.Lock()\n\t\tfor q.pq.Len() == 0 {\n\t\t\tq.cond.Wait()\n\t\t}\n\t\tif !q.working {\n\t\t\tq.cond.L.Unlock()\n\t\t\tbreak\n\t\t}\n\t\tit := heap.Pop(&q.pq)\n\t\tq.work <- it.(*item)\n\t\tq.cond.L.Unlock()\n\t}\n}\n\nfunc (q *Queue) worker() {\n\tq.wg.Add(1)\n\tfor it := range q.work {\n\t\tq.runTask(it)\n\t}\n\tq.wg.Done()\n}\n\nfunc (q *Queue) runTask(it *item) {\n\tvar err error\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = fmt.Errorf(\"PQ. Panic while executing task: %v\", r)\n\t\t}\n\t\tit.done(err)\n\t}()\n\n\tatomic.AddInt32(&q.taskRunning, 1)\n\tdefer atomic.AddInt32(&q.taskRunning, -1)\n\tif it.can() {\n\t\terr = it.task.Run()\n\t}\n\treturn\n}\n\n\/\/ Stopping queue. Wait while all workers finish current tasks\nfunc (q *Queue) Stop() {\n\tq.cond.L.Lock()\n\tdefer q.cond.L.Unlock()\n\tclose(q.work)\n\tq.working = false\n\tq.wg.Wait()\n}\n<|endoftext|>"} {"text":"<commit_before>package term\n\nimport (\n\t\"io\"\n\t\"os\"\n\n\t\"github.com\/docker\/docker\/pkg\/term\"\n)\n\ntype TTY struct {\n\tIn io.Reader\n\tOut io.Writer\n}\n\nfunc (t *TTY) Run(remoteIn io.Reader, remoteOut io.WriteCloser, resize func(h, w uint16) error) error {\n\tinFd, inIsTerm := term.GetFdInfo(t.In)\n\toutFd, outIsTerm := term.GetFdInfo(t.Out)\n\n\tif inIsTerm {\n\t\tsize := winsize(outFd)\n\t\tif err := resize(size.Height, size.Width); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar state *term.State\n\t\tstate, err := term.SetRawTerminal(inFd)\n\t\tif err == nil {\n\t\t\tdefer term.RestoreTerminal(inFd, state)\n\t\t}\n\t}\n\n\tgo func() {\n\t\tdefer remoteOut.Close()\n\t\tio.Copy(remoteOut, t.In)\n\t}()\n\n\tif outIsTerm {\n\t\tr, done := resized()\n\t\tdefer done()\n\t\tgo t.resize(r, resize, outFd)\n\t}\n\n\tio.Copy(t.Out, remoteIn)\n\treturn nil\n}\n\nfunc (t *TTY) resize(resized <-chan os.Signal, resize func(h, w uint16) error, fd uintptr) {\n\tvar h, w uint16\n\tfor range resized {\n\t\tsize := winsize(fd)\n\t\tif size.Height == h && size.Width == w {\n\t\t\tcontinue\n\t\t}\n\n\t\tresize(size.Height, size.Width)\n\t\th, w = size.Height, size.Width\n\t}\n}\n\nfunc winsize(fd uintptr) *term.Winsize {\n\tsize, err := term.GetWinsize(fd)\n\tif err != nil {\n\t\tsize = &term.Winsize{\n\t\t\tHeight: 43,\n\t\t\tWidth: 80,\n\t\t}\n\t}\n\treturn size\n}\n<commit_msg>Address edge cases for run -t option<commit_after>package term\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/docker\/docker\/pkg\/term\"\n)\n\ntype TTY struct {\n\tIn io.Reader\n\tOut io.Writer\n}\n\nfunc (t *TTY) Run(remoteIn io.Reader, remoteOut io.WriteCloser, resize func(h, w uint16) error) error {\n\tinFd, inIsTerm := term.GetFdInfo(t.In)\n\toutFd, outIsTerm := term.GetFdInfo(t.Out)\n\n\tif !inIsTerm {\n\t\treturn errors.New(\"stdin must be an interactive terminal\")\n\t}\n\n\tsize := winsize(outFd)\n\tif err := resize(size.Height, size.Width); err != nil {\n\t\ttime.Sleep(time.Second)\n\t\tsize = winsize(outFd)\n\t\tif err := resize(size.Height, size.Width); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif state, err := term.SetRawTerminal(inFd); err == nil {\n\t\tdefer term.RestoreTerminal(inFd, state)\n\t}\n\n\tgo func() {\n\t\tdefer remoteOut.Close()\n\t\tio.Copy(remoteOut, t.In)\n\t}()\n\n\tif outIsTerm {\n\t\tr, done := resized()\n\t\tdefer done()\n\t\tgo t.resize(r, resize, outFd)\n\t}\n\n\tio.Copy(t.Out, remoteIn)\n\treturn nil\n}\n\nfunc (t *TTY) resize(resized <-chan os.Signal, resize func(h, w uint16) error, fd uintptr) {\n\tvar h, w uint16\n\tfor range resized {\n\t\tsize := winsize(fd)\n\t\tif size.Height == h && size.Width == w {\n\t\t\tcontinue\n\t\t}\n\n\t\tresize(size.Height, size.Width)\n\t\th, w = size.Height, size.Width\n\t}\n}\n\nfunc winsize(fd uintptr) *term.Winsize {\n\tsize, err := term.GetWinsize(fd)\n\tif err != nil {\n\t\tsize = &term.Winsize{\n\t\t\tHeight: 43,\n\t\t\tWidth: 80,\n\t\t}\n\t}\n\treturn size\n}\n<|endoftext|>"} {"text":"<commit_before>package pgbroadcast\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/lib\/pq\"\n)\n\ntype pgnotification struct {\n\tTable string `json:\"table\"`\n\tAction string `json:\"action\"`\n\tData map[string]interface{} `json:\"data\"`\n}\n\nfunc Run(pgconninfo string, pglistenchannel string) error {\n\tgo h.run()\n\terr := startPgListener(pgconninfo, pglistenchannel, h.broadcast)\n\treturn err\n}\n\nfunc startPgListener(pgconninfo string, pglistenchannel string, broadcastingchannel chan pgnotification) error {\n\t\/\/ callback func\n\tpgEventCallback := func(ev pq.ListenerEventType, err error) {\n\t\tif err != nil {\n\t\t\tfmt.Println(\"pgbroadcast: \", err.Error())\n\t\t}\n\t}\n\n\t\/\/ create listener and start listening\n\tl := pq.NewListener(pgconninfo, 10*time.Second, time.Minute, pgEventCallback)\n\n\terr := l.Listen(pglistenchannel)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Wait for notifications in goroutine, pass them to the broadcastingchannel.\n\tgo waitForNotification(l)\n\tfmt.Println(\"pgbroadcast: listening for notifications\")\n\treturn nil\n}\n\nfunc waitForNotification(l *pq.Listener) {\n\tfor {\n\t\tselect {\n\t\tcase n := <-l.Notify:\n\t\t\t\/\/\n\t\t\tif n == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ Unmarshal JSON in pgnotification struct\n\t\t\tvar pgn pgnotification\n\t\t\terr := json.Unmarshal([]byte(n.Extra), &pgn)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"pgbroadcast: error processing JSON: \", err)\n\t\t\t} else {\n\t\t\t\th.broadcast <- pgn\n\t\t\t}\n\t\tcase <-time.After(60 * time.Second):\n\t\t\t\/\/ received no events for 60 seconds, ping connection\")\n\t\t\tgo func() {\n\t\t\t\tl.Ping()\n\t\t\t}()\n\t\t}\n\t}\n}\n<commit_msg>small comment fix<commit_after>package pgbroadcast\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/lib\/pq\"\n)\n\ntype pgnotification struct {\n\tTable string `json:\"table\"`\n\tAction string `json:\"action\"`\n\tData map[string]interface{} `json:\"data\"`\n}\n\nfunc Run(pgconninfo string, pglistenchannel string) error {\n\tgo h.run()\n\terr := startPgListener(pgconninfo, pglistenchannel, h.broadcast)\n\treturn err\n}\n\nfunc startPgListener(pgconninfo string, pglistenchannel string, broadcastingchannel chan pgnotification) error {\n\t\/\/ callback func\n\tpgEventCallback := func(ev pq.ListenerEventType, err error) {\n\t\tif err != nil {\n\t\t\tfmt.Println(\"pgbroadcast: \", err.Error())\n\t\t}\n\t}\n\n\t\/\/ create listener and start listening\n\tl := pq.NewListener(pgconninfo, 10*time.Second, time.Minute, pgEventCallback)\n\n\terr := l.Listen(pglistenchannel)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Wait for notifications in goroutine, pass them to the broadcastingchannel.\n\tgo waitForNotification(l)\n\tfmt.Println(\"pgbroadcast: listening for notifications\")\n\treturn nil\n}\n\nfunc waitForNotification(l *pq.Listener) {\n\tfor {\n\t\tselect {\n\t\tcase n := <-l.Notify:\n\t\t\t\/\/ For some reason after connection loss with the postgres database,\n\t\t\t\/\/ the first notifications is a nil notification. Ignore it.\n\t\t\tif n == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ Unmarshal JSON in pgnotification struct\n\t\t\tvar pgn pgnotification\n\t\t\terr := json.Unmarshal([]byte(n.Extra), &pgn)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(\"pgbroadcast: error processing JSON: \", err)\n\t\t\t} else {\n\t\t\t\th.broadcast <- pgn\n\t\t\t}\n\t\tcase <-time.After(60 * time.Second):\n\t\t\t\/\/ received no events for 60 seconds, ping connection\")\n\t\t\tgo func() {\n\t\t\t\tl.Ping()\n\t\t\t}()\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Andreas Koch. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage themefiles\n\nconst PresentationJs = `\n$(function() {\n\tvar presentationSelector = 'body > article.presentation > .content';\n\n\tif ($(presentationSelector).length == 0) {\n\t\t\/\/ this document is not a presentation\n\t\treturn;\n\t}\n\n\t\/**\n\t * Toggle the page header elements\n\t *\/\n\tvar togglePresentationMode = function() {\n\t\t$(\"body>nav.toplevel\").toggle();\n\t\t$(\"body>nav.breadcrumb\").toggle();\n\t\t$(\".presentation>header\").toggle();\n\t\t$(\".presentation>.description\").toggle();\n\t};\n\n\t\/\/ render the presentation\n\t$.deck('.slide', {\n\t\tselectors: {\n\t\t\tcontainer: presentationSelector\n\t\t},\n\t\t\n\t\tkeys: {\n\t\t\tgoto: 71 \/\/ 'g'\n\t\t}\n\t});\n\n\t\/\/ handle keyboard shortcuts\n\t$(document).keydown(function(e) {\n\n\t\t\/* <ctrl> + <shift> *\/\n\t\tif (e.ctrlKey && (e.which === 16) ) {\n\t\t\tconsole.log( \"You pressed Ctrl + Shift\" );\n\t\t\ttogglePresentationMode();\n\t\t}\n\n\t});\n\n});`<commit_msg>Default Theme Presentations Bug Fix: Toggle the footer when in presentation mode<commit_after>\/\/ Copyright 2013 Andreas Koch. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage themefiles\n\nconst PresentationJs = `\n$(function() {\n\tvar presentationSelector = 'body > article.presentation > .content';\n\n\tif ($(presentationSelector).length == 0) {\n\t\t\/\/ this document is not a presentation\n\t\treturn;\n\t}\n\n\t\/**\n\t * Toggle the page header elements\n\t *\/\n\tvar togglePresentationMode = function() {\n\t\t$(\"body>nav.toplevel\").toggle();\n\t\t$(\"body>nav.breadcrumb\").toggle();\n\t\t$(\".presentation>header\").toggle();\n\t\t$(\".presentation>.description\").toggle();\n\t\t$(\"body>footer\").toggle();\n\t};\n\n\t\/\/ render the presentation\n\t$.deck('.slide', {\n\t\tselectors: {\n\t\t\tcontainer: presentationSelector\n\t\t},\n\t\t\n\t\tkeys: {\n\t\t\tgoto: 71 \/\/ 'g'\n\t\t}\n\t});\n\n\t\/\/ handle keyboard shortcuts\n\t$(document).keydown(function(e) {\n\n\t\t\/* <ctrl> + <shift> *\/\n\t\tif (e.ctrlKey && (e.which === 16) ) {\n\t\t\tconsole.log( \"You pressed Ctrl + Shift\" );\n\t\t\ttogglePresentationMode();\n\t\t}\n\n\t});\n\n});`\n<|endoftext|>"} {"text":"<commit_before>package proxmox\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\"\n\t\"log\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype ConfigQemu struct {\n\tName string `json:\"name\"`\n\tDescription string `json:\"desc\"`\n\tMemory int `json:\"memory\"`\n\tDiskSize float64 `json:\"diskGB\"`\n\tStorage string `json:\"storage\"`\n\tQemuOs string `json:\"os\"`\n\tQemuCores int `json:\"cores\"`\n\tQemuSockets int `json:\"sockets\"`\n\tQemuIso string `json:\"iso\"`\n\tQemuNicModel string `json:\"nic\"`\n\tQemuBrige string `json:\"bridge\"`\n\tQemuVlanTag int `json:\"vlan\"`\n}\n\nfunc (config ConfigQemu) CreateVm(vmr *VmRef, client *Client) (err error) {\n\tvmr.SetVmType(\"qemu\")\n\tnetwork := config.QemuNicModel + \",bridge=\" + config.QemuBrige\n\tif config.QemuVlanTag > 0 {\n\t\tnetwork = network + \",tag=\" + string(config.QemuVlanTag)\n\t}\n\tparams := map[string]string{\n\t\t\"vmid\": strconv.Itoa(vmr.vmId),\n\t\t\"name\": config.Name,\n\t\t\"ide2\": config.QemuIso + \",media=cdrom\",\n\t\t\"ostype\": config.QemuOs,\n\t\t\"virtio0\": config.Storage + \":\" + strconv.FormatFloat(config.DiskSize, 'f', -1, 64),\n\t\t\"sockets\": strconv.Itoa(config.QemuSockets),\n\t\t\"cores\": strconv.Itoa(config.QemuCores),\n\t\t\"cpu\": \"host\",\n\t\t\"memory\": strconv.Itoa(config.Memory),\n\t\t\"net0\": network,\n\t\t\"description\": config.Description,\n\t}\n\n\t_, err = client.CreateQemuVm(vmr.node, params)\n\treturn\n}\n\n\/*\n\nCloneVm\nExample: Request\n\nnodes\/proxmox1-xx\/qemu\/1012\/clone\n\nnewid:145\nname:tf-clone1\ntarget:proxmox1-xx\nfull:1\nstorage:xxx\n\n*\/\nfunc (config ConfigQemu) CloneVm(sourceVmr *VmRef, vmr *VmRef, client *Client) (err error) {\n\tvmr.SetVmType(\"qemu\")\n\tparams := map[string]string{\n\t\t\"newid\": strconv.Itoa(vmr.vmId),\n\t\t\"target\": vmr.node,\n\t\t\"name\": config.Name,\n\t\t\"storage\": config.Storage,\n\t\t\"full\": \"1\",\n\t}\n\t_, err = client.CloneQemuVm(sourceVmr, params)\n\tif err != nil {\n\t\treturn\n\t}\n\tconfigParams := map[string]string{\n\t\t\"sockets\": strconv.Itoa(config.QemuSockets),\n\t\t\"cores\": strconv.Itoa(config.QemuCores),\n\t\t\"memory\": strconv.Itoa(config.Memory),\n\t\t\"description\": config.Description,\n\t}\n\t_, err = client.SetVmConfig(vmr, configParams)\n\treturn\n}\n\nfunc NewConfigQemuFromJson(io io.Reader) (config *ConfigQemu, err error) {\n\tconfig = &ConfigQemu{QemuVlanTag: -1}\n\terr = json.NewDecoder(io).Decode(config)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn nil, err\n\t}\n\tlog.Println(config)\n\treturn\n}\n\nvar rxStorage = regexp.MustCompile(\"(.*?):.*?,size=(\\\\d+)G\")\nvar rxIso = regexp.MustCompile(\"(.*?),media\")\nvar rxNetwork = regexp.MustCompile(\"(.*?)=.*?,bridge=([^,]+)(?:,tag=)?(.*)\")\n\nfunc NewConfigQemuFromApi(vmr *VmRef, client *Client) (config *ConfigQemu, err error) {\n\tvmConfig, err := client.GetVmConfig(vmr)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn nil, err\n\t}\n\n\t\/\/ vmConfig Sample: map[ cpu:host\n\t\/\/ net0:virtio=62:DF:XX:XX:XX:XX,bridge=vmbr0\n\t\/\/ ide2:local:iso\/xxx-xx.iso,media=cdrom memory:2048\n\t\/\/ smbios1:uuid=8b3bf833-aad8-4545-xxx-xxxxxxx digest:aa6ce5xxxxx1b9ce33e4aaeff564d4 sockets:1\n\t\/\/ name:terraform-ubuntu1404-template bootdisk:virtio0\n\t\/\/ virtio0:ProxmoxxxxISCSI:vm-1014-disk-2,size=4G\n\t\/\/ description:Base image\n\t\/\/ cores:2 ostype:l26 ]\n\tconfig = &ConfigQemu{\n\t\tName: vmConfig[\"name\"].(string),\n\t\tDescription: vmConfig[\"description\"].(string),\n\t\tQemuOs: vmConfig[\"ostype\"].(string),\n\t\tMemory: int(vmConfig[\"memory\"].(float64)),\n\t\tQemuCores: int(vmConfig[\"cores\"].(float64)),\n\t\tQemuSockets: int(vmConfig[\"sockets\"].(float64)),\n\t\tQemuVlanTag: -1,\n\t}\n\n\tstorageMatch := rxStorage.FindStringSubmatch(vmConfig[\"virtio0\"].(string))\n\tconfig.Storage = storageMatch[1]\n\tconfig.DiskSize, _ = strconv.ParseFloat(storageMatch[2], 64)\n\n\tisoMatch := rxIso.FindStringSubmatch(vmConfig[\"ide2\"].(string))\n\tconfig.QemuIso = isoMatch[1]\n\n\tnetMatch := rxNetwork.FindStringSubmatch(vmConfig[\"net0\"].(string))\n\tconfig.QemuNicModel = netMatch[1]\n\tconfig.QemuBrige = netMatch[2]\n\tif netMatch[3] != \"\" {\n\t\tconfig.QemuVlanTag, _ = strconv.Atoi(netMatch[3])\n\t}\n\n\treturn\n}\n\n\/\/ Useful waiting for ISO install to complete\nfunc WaitForShutdown(vmr *VmRef, client *Client) (err error) {\n\tfor ii := 0; ii < 100; ii++ {\n\t\tvmState, err := client.GetVmState(vmr)\n\t\tif err != nil {\n\t\t\tlog.Print(\"Wait error:\")\n\t\t\tlog.Println(err)\n\t\t} else if vmState[\"status\"] == \"stopped\" {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(5 * time.Second)\n\t}\n\treturn errors.New(\"Not shutdown within wait time\")\n}\n\n\/\/ This is because proxmox create\/config API won't let us make usernet devices\nfunc SshForwardUsernet(vmr *VmRef, client *Client) (sshPort string, err error) {\n\tvmState, err := client.GetVmState(vmr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif vmState[\"status\"] == \"stopped\" {\n\t\treturn \"\", errors.New(\"VM must be running first\")\n\t}\n\tsshPort = strconv.Itoa(vmr.VmId() + 22000)\n\t_, err = client.MonitorCmd(vmr, \"netdev_add user,id=net1,hostfwd=tcp::\"+sshPort+\"-:22\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t_, err = client.MonitorCmd(vmr, \"device_add virtio-net-pci,id=net1,netdev=net1,addr=0x13\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn\n}\n\nfunc MaxVmId(client *Client) (max int, err error) {\n\tresp, err := client.GetVmList()\n\tvms := resp[\"data\"].([]interface{})\n\tmax = 0\n\tfor vmii := range vms {\n\t\tvm := vms[vmii].(map[string]interface{})\n\t\tvmid := int(vm[\"vmid\"].(float64))\n\t\tif vmid > max {\n\t\t\tmax = vmid\n\t\t}\n\t}\n\treturn\n}\n<commit_msg>trim description<commit_after>package proxmox\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\"\n\t\"log\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype ConfigQemu struct {\n\tName string `json:\"name\"`\n\tDescription string `json:\"desc\"`\n\tMemory int `json:\"memory\"`\n\tDiskSize float64 `json:\"diskGB\"`\n\tStorage string `json:\"storage\"`\n\tQemuOs string `json:\"os\"`\n\tQemuCores int `json:\"cores\"`\n\tQemuSockets int `json:\"sockets\"`\n\tQemuIso string `json:\"iso\"`\n\tQemuNicModel string `json:\"nic\"`\n\tQemuBrige string `json:\"bridge\"`\n\tQemuVlanTag int `json:\"vlan\"`\n}\n\nfunc (config ConfigQemu) CreateVm(vmr *VmRef, client *Client) (err error) {\n\tvmr.SetVmType(\"qemu\")\n\tnetwork := config.QemuNicModel + \",bridge=\" + config.QemuBrige\n\tif config.QemuVlanTag > 0 {\n\t\tnetwork = network + \",tag=\" + string(config.QemuVlanTag)\n\t}\n\tparams := map[string]string{\n\t\t\"vmid\": strconv.Itoa(vmr.vmId),\n\t\t\"name\": config.Name,\n\t\t\"ide2\": config.QemuIso + \",media=cdrom\",\n\t\t\"ostype\": config.QemuOs,\n\t\t\"virtio0\": config.Storage + \":\" + strconv.FormatFloat(config.DiskSize, 'f', -1, 64),\n\t\t\"sockets\": strconv.Itoa(config.QemuSockets),\n\t\t\"cores\": strconv.Itoa(config.QemuCores),\n\t\t\"cpu\": \"host\",\n\t\t\"memory\": strconv.Itoa(config.Memory),\n\t\t\"net0\": network,\n\t\t\"description\": config.Description,\n\t}\n\n\t_, err = client.CreateQemuVm(vmr.node, params)\n\treturn\n}\n\n\/*\n\nCloneVm\nExample: Request\n\nnodes\/proxmox1-xx\/qemu\/1012\/clone\n\nnewid:145\nname:tf-clone1\ntarget:proxmox1-xx\nfull:1\nstorage:xxx\n\n*\/\nfunc (config ConfigQemu) CloneVm(sourceVmr *VmRef, vmr *VmRef, client *Client) (err error) {\n\tvmr.SetVmType(\"qemu\")\n\tparams := map[string]string{\n\t\t\"newid\": strconv.Itoa(vmr.vmId),\n\t\t\"target\": vmr.node,\n\t\t\"name\": config.Name,\n\t\t\"storage\": config.Storage,\n\t\t\"full\": \"1\",\n\t}\n\t_, err = client.CloneQemuVm(sourceVmr, params)\n\tif err != nil {\n\t\treturn\n\t}\n\tconfigParams := map[string]string{\n\t\t\"sockets\": strconv.Itoa(config.QemuSockets),\n\t\t\"cores\": strconv.Itoa(config.QemuCores),\n\t\t\"memory\": strconv.Itoa(config.Memory),\n\t\t\"description\": config.Description,\n\t}\n\t_, err = client.SetVmConfig(vmr, configParams)\n\treturn\n}\n\nfunc NewConfigQemuFromJson(io io.Reader) (config *ConfigQemu, err error) {\n\tconfig = &ConfigQemu{QemuVlanTag: -1}\n\terr = json.NewDecoder(io).Decode(config)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn nil, err\n\t}\n\tlog.Println(config)\n\treturn\n}\n\nvar rxStorage = regexp.MustCompile(\"(.*?):.*?,size=(\\\\d+)G\")\nvar rxIso = regexp.MustCompile(\"(.*?),media\")\nvar rxNetwork = regexp.MustCompile(\"(.*?)=.*?,bridge=([^,]+)(?:,tag=)?(.*)\")\n\nfunc NewConfigQemuFromApi(vmr *VmRef, client *Client) (config *ConfigQemu, err error) {\n\tvmConfig, err := client.GetVmConfig(vmr)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn nil, err\n\t}\n\n\t\/\/ vmConfig Sample: map[ cpu:host\n\t\/\/ net0:virtio=62:DF:XX:XX:XX:XX,bridge=vmbr0\n\t\/\/ ide2:local:iso\/xxx-xx.iso,media=cdrom memory:2048\n\t\/\/ smbios1:uuid=8b3bf833-aad8-4545-xxx-xxxxxxx digest:aa6ce5xxxxx1b9ce33e4aaeff564d4 sockets:1\n\t\/\/ name:terraform-ubuntu1404-template bootdisk:virtio0\n\t\/\/ virtio0:ProxmoxxxxISCSI:vm-1014-disk-2,size=4G\n\t\/\/ description:Base image\n\t\/\/ cores:2 ostype:l26 ]\n\tconfig = &ConfigQemu{\n\t\tName: vmConfig[\"name\"].(string),\n\t\tDescription: strings.TrimSpace(vmConfig[\"description\"].(string)),\n\t\tQemuOs: vmConfig[\"ostype\"].(string),\n\t\tMemory: int(vmConfig[\"memory\"].(float64)),\n\t\tQemuCores: int(vmConfig[\"cores\"].(float64)),\n\t\tQemuSockets: int(vmConfig[\"sockets\"].(float64)),\n\t\tQemuVlanTag: -1,\n\t}\n\n\tstorageMatch := rxStorage.FindStringSubmatch(vmConfig[\"virtio0\"].(string))\n\tconfig.Storage = storageMatch[1]\n\tconfig.DiskSize, _ = strconv.ParseFloat(storageMatch[2], 64)\n\n\tisoMatch := rxIso.FindStringSubmatch(vmConfig[\"ide2\"].(string))\n\tconfig.QemuIso = isoMatch[1]\n\n\tnetMatch := rxNetwork.FindStringSubmatch(vmConfig[\"net0\"].(string))\n\tconfig.QemuNicModel = netMatch[1]\n\tconfig.QemuBrige = netMatch[2]\n\tif netMatch[3] != \"\" {\n\t\tconfig.QemuVlanTag, _ = strconv.Atoi(netMatch[3])\n\t}\n\n\treturn\n}\n\n\/\/ Useful waiting for ISO install to complete\nfunc WaitForShutdown(vmr *VmRef, client *Client) (err error) {\n\tfor ii := 0; ii < 100; ii++ {\n\t\tvmState, err := client.GetVmState(vmr)\n\t\tif err != nil {\n\t\t\tlog.Print(\"Wait error:\")\n\t\t\tlog.Println(err)\n\t\t} else if vmState[\"status\"] == \"stopped\" {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(5 * time.Second)\n\t}\n\treturn errors.New(\"Not shutdown within wait time\")\n}\n\n\/\/ This is because proxmox create\/config API won't let us make usernet devices\nfunc SshForwardUsernet(vmr *VmRef, client *Client) (sshPort string, err error) {\n\tvmState, err := client.GetVmState(vmr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif vmState[\"status\"] == \"stopped\" {\n\t\treturn \"\", errors.New(\"VM must be running first\")\n\t}\n\tsshPort = strconv.Itoa(vmr.VmId() + 22000)\n\t_, err = client.MonitorCmd(vmr, \"netdev_add user,id=net1,hostfwd=tcp::\"+sshPort+\"-:22\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t_, err = client.MonitorCmd(vmr, \"device_add virtio-net-pci,id=net1,netdev=net1,addr=0x13\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn\n}\n\nfunc MaxVmId(client *Client) (max int, err error) {\n\tresp, err := client.GetVmList()\n\tvms := resp[\"data\"].([]interface{})\n\tmax = 0\n\tfor vmii := range vms {\n\t\tvm := vms[vmii].(map[string]interface{})\n\t\tvmid := int(vm[\"vmid\"].(float64))\n\t\tif vmid > max {\n\t\t\tmax = vmid\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/package phonecountry provide information about a phone number\npackage phonecountry\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/maknahar\/phonecountry\/internal\/content\"\n)\n\nvar phoneMap map[string]string\nvar reversePhoneMap map[string]string\n\nfunc init() {\n\n\tphoneMap = make(map[string]string)\n\n\t\/\/ Decode JSON into our map\n\terr := json.Unmarshal([]byte(content.PhoneCode), &phoneMap)\n\tif err != nil {\n\t\tprintln(err)\n\t}\n\n\treversePhoneMap = make(map[string]string)\n\tfor k, v := range phoneMap {\n\t\treversePhoneMap[strings.ToUpper(v)] = k\n\t}\n}\n\n\/\/GetCountryNameFromPhone returns country common name for given phone number\n\/\/Example +919445454528 - India\nfunc GetCountryNameFromPhone(phoneNumber string) (countryName string, err error) {\n\t\/\/strip + from starting\n\tphoneNumber = strings.TrimSpace(phoneNumber)\n\tphoneNumber = strings.TrimLeft(phoneNumber, \"+\")\n\n\t\/\/check minimum length\n\tif len(phoneNumber) < 7 {\n\t\treturn \"\", errors.New(\"Minimum length of a number should be atleast 7 digit\")\n\t}\n\n\t\/\/handle special cases\n\tswitch phoneNumber[0:1] {\n\tcase \"1\":\n\t\tc, ok := phoneMap[phoneNumber[0:4]]\n\t\tif ok {\n\t\t\treturn c, nil\n\t\t}\n\t\treturn \"\", fmt.Errorf(\"No country found with country code %s\", phoneNumber[0:4])\n\n\tcase \"3\":\n\t\tif phoneNumber[0:2] == \"39\" && phoneNumber[0:5] == \"39066\" {\n\t\t\treturn phoneMap[\"39066\"], nil\n\t\t}\n\n\tcase \"4\":\n\t\t\/\/Guernsey\n\t\tif phoneNumber[0:6] == \"441481\" || phoneNumber[0:6] == \"447839\" ||\n\t\t\tphoneNumber[0:6] == \"447911\" {\n\t\t\treturn phoneMap[\"39066\"], nil\n\t\t}\n\t\t\/\/Isle of Man\n\t\tif phoneNumber[0:6] == \"441624\" || phoneNumber[0:6] == \"447524\" ||\n\t\t\tphoneNumber[0:6] == \"447624\" || phoneNumber[0:6] == \"447924\" {\n\t\t\treturn phoneMap[\"447924\"], nil\n\t\t}\n\t\t\/\/Jersey\n\t\tif phoneNumber[0:6] == \"441534\" || phoneNumber[0:6] == \"447509\" ||\n\t\t\tphoneNumber[0:6] == \"447797\" || phoneNumber[0:6] == \"447937\" ||\n\t\t\tphoneNumber[0:6] == \"447700\" || phoneNumber[0:6] == \"447829\" {\n\t\t\treturn phoneMap[\"447829\"], nil\n\t\t}\n\tcase \"6\":\n\t\tfmt.Println(\"here\")\n\t\tif phoneNumber[0:4] == \"6721\" {\n\t\t\treturn phoneMap[\"6721\"], nil\n\t\t}\n\t\tif phoneNumber[0:4] == \"6723\" {\n\t\t\treturn phoneMap[\"6723\"], nil\n\t\t}\n\t\t\/\/Christmas Island\n\t\tif phoneNumber[0:7] == \"6189162\" {\n\t\t\treturn phoneMap[\"6189162\"], nil\n\t\t}\n\t\t\/\/Christmas Island\n\t\tif phoneNumber[0:7] == \"6189164\" {\n\t\t\treturn phoneMap[\"6189164\"], nil\n\t\t}\n\tcase \"7\":\n\t\tif phoneNumber[0:2] == \"76\" || phoneNumber[0:2] == \"77\" {\n\t\t\treturn phoneMap[phoneNumber[0:2]], nil\n\t\t}\n\t\treturn phoneMap[\"7\"], nil\n\t}\n\tc, ok := phoneMap[phoneNumber[0:2]]\n\tif ok {\n\t\treturn c, nil\n\t}\n\tc, ok = phoneMap[phoneNumber[0:3]]\n\tif ok {\n\t\treturn c, nil\n\t}\n\treturn \"\", fmt.Errorf(\"No country code found for %s\", phoneNumber)\n}\n\n\/\/GetCountryISO2Code returns country iso2 code of given number\n\/\/For Example +919445454528 => IN\nfunc GetCountryISO2Code(phoneNumber string) (countryCode string, err error) {\n\tcountryName, err := GetCountryNameFromPhone(phoneNumber)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcountryCode = content.CountryInfo[countryName].Cca2\n\tif countryCode == \"\" {\n\t\treturn \"\", errors.New(fmt.Sprintf(\"country name %s not found\", countryName))\n\t}\n\treturn\n}\n<commit_msg>removed print<commit_after>\/\/package phonecountry provide information about a phone number\npackage phonecountry\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/maknahar\/phonecountry\/internal\/content\"\n)\n\nvar phoneMap map[string]string\n\nfunc init() {\n\n\tphoneMap = make(map[string]string)\n\n\t\/\/ Decode JSON into our map\n\terr := json.Unmarshal([]byte(content.PhoneCode), &phoneMap)\n\tif err != nil {\n\t\tprintln(err)\n\t}\n}\n\n\/\/GetCountryNameFromPhone returns country common name for given phone number\n\/\/Example +919445454528 => India\nfunc GetCountryNameFromPhone(phoneNumber string) (countryName string, err error) {\n\t\/\/strip + from starting\n\tphoneNumber = strings.TrimSpace(phoneNumber)\n\tphoneNumber = strings.TrimLeft(phoneNumber, \"+\")\n\n\t\/\/check minimum length\n\tif len(phoneNumber) < 7 {\n\t\treturn \"\", errors.New(\"Minimum length of a number should be atleast 7 digit\")\n\t}\n\n\t\/\/handle special cases\n\tswitch phoneNumber[0:1] {\n\tcase \"1\":\n\t\tc, ok := phoneMap[phoneNumber[0:4]]\n\t\tif ok {\n\t\t\treturn c, nil\n\t\t}\n\t\treturn \"\", fmt.Errorf(\"No country found with country code %s\", phoneNumber[0:4])\n\n\tcase \"3\":\n\t\tif phoneNumber[0:2] == \"39\" && phoneNumber[0:5] == \"39066\" {\n\t\t\treturn phoneMap[\"39066\"], nil\n\t\t}\n\n\tcase \"4\":\n\t\t\/\/Guernsey\n\t\tif phoneNumber[0:6] == \"441481\" || phoneNumber[0:6] == \"447839\" ||\n\t\t\tphoneNumber[0:6] == \"447911\" {\n\t\t\treturn phoneMap[\"39066\"], nil\n\t\t}\n\t\t\/\/Isle of Man\n\t\tif phoneNumber[0:6] == \"441624\" || phoneNumber[0:6] == \"447524\" ||\n\t\t\tphoneNumber[0:6] == \"447624\" || phoneNumber[0:6] == \"447924\" {\n\t\t\treturn phoneMap[\"447924\"], nil\n\t\t}\n\t\t\/\/Jersey\n\t\tif phoneNumber[0:6] == \"441534\" || phoneNumber[0:6] == \"447509\" ||\n\t\t\tphoneNumber[0:6] == \"447797\" || phoneNumber[0:6] == \"447937\" ||\n\t\t\tphoneNumber[0:6] == \"447700\" || phoneNumber[0:6] == \"447829\" {\n\t\t\treturn phoneMap[\"447829\"], nil\n\t\t}\n\tcase \"6\":\n\t\tif phoneNumber[0:4] == \"6721\" {\n\t\t\treturn phoneMap[\"6721\"], nil\n\t\t}\n\t\tif phoneNumber[0:4] == \"6723\" {\n\t\t\treturn phoneMap[\"6723\"], nil\n\t\t}\n\t\t\/\/Christmas Island\n\t\tif phoneNumber[0:7] == \"6189162\" {\n\t\t\treturn phoneMap[\"6189162\"], nil\n\t\t}\n\t\t\/\/Christmas Island\n\t\tif phoneNumber[0:7] == \"6189164\" {\n\t\t\treturn phoneMap[\"6189164\"], nil\n\t\t}\n\tcase \"7\":\n\t\tif phoneNumber[0:2] == \"76\" || phoneNumber[0:2] == \"77\" {\n\t\t\treturn phoneMap[phoneNumber[0:2]], nil\n\t\t}\n\t\treturn phoneMap[\"7\"], nil\n\t}\n\tc, ok := phoneMap[phoneNumber[0:2]]\n\tif ok {\n\t\treturn c, nil\n\t}\n\tc, ok = phoneMap[phoneNumber[0:3]]\n\tif ok {\n\t\treturn c, nil\n\t}\n\treturn \"\", fmt.Errorf(\"No country code found for %s\", phoneNumber)\n}\n\n\/\/GetCountryISO2Code returns country iso2 code of given number\n\/\/For Example +919445454528 => IN\nfunc GetCountryISO2Code(phoneNumber string) (countryCode string, err error) {\n\tcountryName, err := GetCountryNameFromPhone(phoneNumber)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcountryCode = content.CountryInfo[countryName].Cca2\n\tif countryCode == \"\" {\n\t\treturn \"\", errors.New(fmt.Sprintf(\"country name %s not found\", countryName))\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2014 Ludovic Fauvet\n\/\/ Licensed under the MIT license\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"time\"\n)\n\nconst (\n\tredisConnectionTimeout = 200 * time.Millisecond\n\tredisReadWriteTimeout = 300 * time.Second\n)\n\nvar (\n\terrUnreachable = errors.New(\"endpoint unreachable\")\n)\n\ntype redisobj struct {\n\tpool *redis.Pool\n}\n\nfunc NewRedis() *redisobj {\n\tr := &redisobj{}\n\n\tr.pool = &redis.Pool{\n\t\tMaxIdle: 10,\n\t\tIdleTimeout: 240 * time.Second,\n\t\tDial: func() (redis.Conn, error) {\n\t\t\treturn r.connect()\n\t\t},\n\t\tTestOnBorrow: func(c redis.Conn, t time.Time) error {\n\t\t\t_, err := c.Do(\"PING\")\n\t\t\treturn err\n\t\t},\n\t}\n\n\treturn r\n}\n\nfunc (r *redisobj) Close() {\n\tr.pool.Close()\n}\n\nfunc (r *redisobj) connect() (redis.Conn, error) {\n\tsentinels := GetConfig().RedisSentinels\n\n\tif len(sentinels) > 0 {\n\n\t\tif len(GetConfig().RedisSentinelMasterName) == 0 {\n\t\t\tlog.Error(\"Config: RedisSentinelMasterName cannot be empty!\")\n\t\t\tgoto single\n\t\t}\n\n\t\tfor _, s := range sentinels {\n\t\t\tlog.Debug(\"Connecting to redis sentinel %s\", s.Host)\n\t\t\tvar master []string\n\t\t\tvar masterhost string\n\t\t\tvar cm redis.Conn\n\n\t\t\tc, err := r.connectTo(s.Host)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Sentinel: %s\", err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/AUTH?\n\t\t\trole, err := r.askRole(c)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Sentinel: %s\", err.Error())\n\t\t\t\tgoto closeSentinel\n\t\t\t}\n\t\t\tif role != \"sentinel\" {\n\t\t\t\tlog.Error(\"Sentinel: %s is not a sentinel but a %s\", s.Host, role)\n\t\t\t\tgoto closeSentinel\n\t\t\t}\n\n\t\t\tmaster, err = redis.Strings(c.Do(\"SENTINEL\", \"get-master-addr-by-name\", GetConfig().RedisSentinelMasterName))\n\t\t\tif err == redis.ErrNil {\n\t\t\t\tlog.Error(\"Sentinel: %s doesn't know the master-name %s\", s.Host, GetConfig().RedisSentinelMasterName)\n\t\t\t\tgoto closeSentinel\n\t\t\t} else if err != nil {\n\t\t\t\tlog.Error(\"Sentinel: %s\", err.Error())\n\t\t\t\tgoto closeSentinel\n\t\t\t}\n\n\t\t\tmasterhost = fmt.Sprintf(\"%s:%s\", master[0], master[1])\n\n\t\t\tcm, err = r.connectTo(masterhost)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Redis master: %s\", err.Error())\n\t\t\t\tgoto closeSentinel\n\t\t\t}\n\n\t\t\tif r.auth(cm) != nil {\n\t\t\t\tlog.Error(\"Redis master: auth failed\")\n\t\t\t\tgoto closeMaster\n\t\t\t}\n\n\t\t\trole, err = r.askRole(cm)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"Redis master: %s\", err.Error())\n\t\t\t\tgoto closeMaster\n\t\t\t}\n\t\t\tif role != \"master\" {\n\t\t\t\tlog.Error(\"Redis master: %s is not a master but a %s\", masterhost, role)\n\t\t\t\tgoto closeMaster\n\t\t\t}\n\n\t\t\t\/\/ Close the connection to the sentinel\n\t\t\tc.Close()\n\n\t\t\tlog.Debug(\"Connected to redis master %s\", masterhost)\n\t\t\treturn cm, nil\n\n\t\tcloseMaster:\n\t\t\tcm.Close()\n\n\t\tcloseSentinel:\n\t\t\tc.Close()\n\t\t}\n\t}\n\nsingle:\n\n\tif len(GetConfig().RedisAddress) == 0 {\n\t\tif len(sentinels) == 0 {\n\t\t\tlog.Error(\"No redis master available\")\n\t\t}\n\t\treturn nil, errUnreachable\n\t}\n\n\tlog.Warning(\"No redis master available, trying using the configured RedisAddress as fallback\")\n\n\tc, err := r.connectTo(GetConfig().RedisAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif r.auth(c); err != nil {\n\t\tc.Close()\n\t\treturn nil, err\n\t}\n\trole, err := r.askRole(c)\n\tif err != nil {\n\t\tlog.Error(\"Redis master: %s\", err.Error())\n\t\treturn nil, errUnreachable\n\t}\n\tif role != \"master\" {\n\t\tlog.Error(\"Redis master: %s is not a master but a %s\", GetConfig().RedisAddress, role)\n\t\treturn nil, errUnreachable\n\t}\n\tlog.Debug(\"Connected to redis master %s\", GetConfig().RedisAddress)\n\treturn c, err\n\n}\n\nfunc (r *redisobj) connectTo(address string) (redis.Conn, error) {\n\treturn redis.DialTimeout(\"tcp\", address, redisConnectionTimeout, redisReadWriteTimeout, redisReadWriteTimeout)\n}\n\nfunc (r *redisobj) askRole(c redis.Conn) (string, error) {\n\troleReply, err := redis.Values(c.Do(\"ROLE\"))\n\trole, err := redis.String(roleReply[0], err)\n\treturn role, err\n}\n\nfunc (r *redisobj) auth(c redis.Conn) (err error) {\n\tif GetConfig().RedisPassword != \"\" {\n\t\t_, err = c.Do(\"AUTH\", GetConfig().RedisPassword)\n\t}\n\treturn\n}\n<commit_msg>redis: don't spam the logs with connection errors<commit_after>\/\/ Copyright (c) 2014 Ludovic Fauvet\n\/\/ Licensed under the MIT license\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"time\"\n)\n\nconst (\n\tredisConnectionTimeout = 200 * time.Millisecond\n\tredisReadWriteTimeout = 300 * time.Second\n)\n\nvar (\n\terrUnreachable = errors.New(\"endpoint unreachable\")\n)\n\ntype redisobj struct {\n\tpool *redis.Pool\n\tfailure bool\n}\n\nfunc NewRedis() *redisobj {\n\tr := &redisobj{}\n\n\tr.pool = &redis.Pool{\n\t\tMaxIdle: 10,\n\t\tIdleTimeout: 240 * time.Second,\n\t\tDial: func() (redis.Conn, error) {\n\t\t\treturn r.connect()\n\t\t},\n\t\tTestOnBorrow: func(c redis.Conn, t time.Time) error {\n\t\t\t_, err := c.Do(\"PING\")\n\t\t\treturn err\n\t\t},\n\t}\n\n\treturn r\n}\n\nfunc (r *redisobj) Close() {\n\tr.pool.Close()\n}\n\nfunc (r *redisobj) connect() (redis.Conn, error) {\n\tsentinels := GetConfig().RedisSentinels\n\n\tif len(sentinels) > 0 {\n\n\t\tif len(GetConfig().RedisSentinelMasterName) == 0 {\n\t\t\tr.logError(\"Config: RedisSentinelMasterName cannot be empty!\")\n\t\t\tgoto single\n\t\t}\n\n\t\tfor _, s := range sentinels {\n\t\t\tlog.Debug(\"Connecting to redis sentinel %s\", s.Host)\n\t\t\tvar master []string\n\t\t\tvar masterhost string\n\t\t\tvar cm redis.Conn\n\n\t\t\tc, err := r.connectTo(s.Host)\n\t\t\tif err != nil {\n\t\t\t\tr.logError(\"Sentinel: %s\", err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/AUTH?\n\t\t\trole, err := r.askRole(c)\n\t\t\tif err != nil {\n\t\t\t\tr.logError(\"Sentinel: %s\", err.Error())\n\t\t\t\tgoto closeSentinel\n\t\t\t}\n\t\t\tif role != \"sentinel\" {\n\t\t\t\tr.logError(\"Sentinel: %s is not a sentinel but a %s\", s.Host, role)\n\t\t\t\tgoto closeSentinel\n\t\t\t}\n\n\t\t\tmaster, err = redis.Strings(c.Do(\"SENTINEL\", \"get-master-addr-by-name\", GetConfig().RedisSentinelMasterName))\n\t\t\tif err == redis.ErrNil {\n\t\t\t\tr.logError(\"Sentinel: %s doesn't know the master-name %s\", s.Host, GetConfig().RedisSentinelMasterName)\n\t\t\t\tgoto closeSentinel\n\t\t\t} else if err != nil {\n\t\t\t\tr.logError(\"Sentinel: %s\", err.Error())\n\t\t\t\tgoto closeSentinel\n\t\t\t}\n\n\t\t\tmasterhost = fmt.Sprintf(\"%s:%s\", master[0], master[1])\n\n\t\t\tcm, err = r.connectTo(masterhost)\n\t\t\tif err != nil {\n\t\t\t\tr.logError(\"Redis master: %s\", err.Error())\n\t\t\t\tgoto closeSentinel\n\t\t\t}\n\n\t\t\tif r.auth(cm) != nil {\n\t\t\t\tr.logError(\"Redis master: auth failed\")\n\t\t\t\tgoto closeMaster\n\t\t\t}\n\n\t\t\trole, err = r.askRole(cm)\n\t\t\tif err != nil {\n\t\t\t\tr.logError(\"Redis master: %s\", err.Error())\n\t\t\t\tgoto closeMaster\n\t\t\t}\n\t\t\tif role != \"master\" {\n\t\t\t\tr.logError(\"Redis master: %s is not a master but a %s\", masterhost, role)\n\t\t\t\tgoto closeMaster\n\t\t\t}\n\n\t\t\t\/\/ Close the connection to the sentinel\n\t\t\tc.Close()\n\t\t\tr.failure = false\n\n\t\t\tlog.Debug(\"Connected to redis master %s\", masterhost)\n\t\t\treturn cm, nil\n\n\t\tcloseMaster:\n\t\t\tcm.Close()\n\n\t\tcloseSentinel:\n\t\t\tc.Close()\n\t\t}\n\t}\n\nsingle:\n\n\tif len(GetConfig().RedisAddress) == 0 {\n\t\tif len(sentinels) == 0 {\n\t\t\tlog.Error(\"No redis master available\")\n\t\t}\n\t\tr.failure = true\n\t\treturn nil, errUnreachable\n\t}\n\n\tif r.failure == false {\n\t\tlog.Warning(\"No redis master available, trying using the configured RedisAddress as fallback\")\n\t}\n\n\tc, err := r.connectTo(GetConfig().RedisAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif r.auth(c); err != nil {\n\t\tc.Close()\n\t\treturn nil, err\n\t}\n\trole, err := r.askRole(c)\n\tif err != nil {\n\t\tr.logError(\"Redis master: %s\", err.Error())\n\t\tr.failure = true\n\t\treturn nil, errUnreachable\n\t}\n\tif role != \"master\" {\n\t\tr.logError(\"Redis master: %s is not a master but a %s\", GetConfig().RedisAddress, role)\n\t\tr.failure = true\n\t\treturn nil, errUnreachable\n\t}\n\tr.failure = false\n\tlog.Debug(\"Connected to redis master %s\", GetConfig().RedisAddress)\n\treturn c, err\n\n}\n\nfunc (r *redisobj) connectTo(address string) (redis.Conn, error) {\n\treturn redis.DialTimeout(\"tcp\", address, redisConnectionTimeout, redisReadWriteTimeout, redisReadWriteTimeout)\n}\n\nfunc (r *redisobj) askRole(c redis.Conn) (string, error) {\n\troleReply, err := redis.Values(c.Do(\"ROLE\"))\n\trole, err := redis.String(roleReply[0], err)\n\treturn role, err\n}\n\nfunc (r *redisobj) auth(c redis.Conn) (err error) {\n\tif GetConfig().RedisPassword != \"\" {\n\t\t_, err = c.Do(\"AUTH\", GetConfig().RedisPassword)\n\t}\n\treturn\n}\n\nfunc (r *redisobj) logError(format string, args ...interface{}) {\n\tif r.failure {\n\t\tlog.Debug(format, args...)\n\t} else {\n\t\tlog.Error(format, args...)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package jwt\n\nimport (\n\t\"errors\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\tjwt \"github.com\/dgrijalva\/jwt-go\"\n\t\"github.com\/go-kit\/kit\/endpoint\"\n)\n\ntype contextKey string\n\nconst (\n\t\/\/ JWTTokenContextKey holds the key used to store a JWT Token in the context\n\tJWTTokenContextKey contextKey = \"JWTToken\"\n\t\/\/ JWTClaimsContxtKey holds the key used to store the JWT Claims in the context\n\tJWTClaimsContextKey contextKey = \"JWTClaims\"\n)\n\nvar (\n\tErrTokenContextMissing = errors.New(\"Token up for parsing was not passed through the context\")\n\tErrTokenInvalid = errors.New(\"JWT Token was invalid\")\n\tErrTokenExpired = errors.New(\"JWT Token is expired\")\n\tErrTokenMalformed = errors.New(\"JWT Token is malformed\")\n\tErrTokenNotActive = errors.New(\"Token is not valid yet\")\n\tErrUnexpectedSigningMethod = errors.New(\"Unexpected signing method\")\n)\n\ntype Claims map[string]interface{}\n\ntype KeySet map[string]struct {\n\tMethod jwt.SigningMethod\n\tKey []byte\n}\n\n\/\/ Create a new JWT token generating middleware, specifying signing method and the claims\n\/\/ you would like it to contain. Particularly useful for clients.\nfunc NewSigner(kid string, key []byte, method jwt.SigningMethod, claims Claims) endpoint.Middleware {\n\treturn func(next endpoint.Endpoint) endpoint.Endpoint {\n\t\treturn func(ctx context.Context, request interface{}) (response interface{}, err error) {\n\t\t\ttoken := jwt.NewWithClaims(method, jwt.MapClaims(claims))\n\t\t\ttoken.Header[\"kid\"] = kid\n\n\t\t\t\/\/ Sign and get the complete encoded token as a string using the secret\n\t\t\ttokenString, err := token.SignedString(key)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tctx = context.WithValue(ctx, JWTTokenContextKey, tokenString)\n\n\t\t\treturn next(ctx, request)\n\t\t}\n\t}\n}\n\n\/\/ Create a new JWT token parsing middleware, specifying a jwt.Keyfunc interface and the\n\/\/ signing method. Adds the resulting claims to endpoint context or returns error on invalid\n\/\/ token. Particularly useful for servers.\nfunc NewParser(keyFunc jwt.Keyfunc, method jwt.SigningMethod) endpoint.Middleware {\n\treturn func(next endpoint.Endpoint) endpoint.Endpoint {\n\t\treturn func(ctx context.Context, request interface{}) (response interface{}, err error) {\n\t\t\t\/\/ tokenString is stored in the context from the transport handlers\n\t\t\ttokenString, ok := ctx.Value(JWTTokenContextKey).(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, ErrTokenContextMissing\n\t\t\t}\n\n\t\t\t\/\/ Parse takes the token string and a function for looking up the key. The latter is especially\n\t\t\t\/\/ useful if you use multiple keys for your application. The standard is to use 'kid' in the\n\t\t\t\/\/ head of the token to identify which key to use, but the parsed token (head and claims) is provided\n\t\t\t\/\/ to the callback, providing flexibility.\n\t\t\ttoken, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\t\t\t\/\/ Don't forget to validate the alg is what you expect:\n\t\t\t\tif token.Method != method {\n\t\t\t\t\treturn nil, ErrUnexpectedSigningMethod\n\t\t\t\t}\n\n\t\t\t\treturn keyFunc(token)\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tif e, ok := err.(*jwt.ValidationError); ok && e.Inner != nil {\n\t\t\t\t\tif e.Errors&jwt.ValidationErrorMalformed != 0 {\n\t\t\t\t\t\t\/\/ Token is malformed\n\t\t\t\t\t\treturn nil, ErrTokenMalformed\n\t\t\t\t\t} else if e.Errors&jwt.ValidationErrorExpired != 0 {\n\t\t\t\t\t\t\/\/ Token is expired\n\t\t\t\t\t\treturn nil, ErrTokenExpired\n\t\t\t\t\t} else if e.Errors&jwt.ValidationErrorNotValidYet != 0 {\n\t\t\t\t\t\t\/\/ Token is not active yet\n\t\t\t\t\t\treturn nil, ErrTokenNotActive\n\t\t\t\t\t}\n\n\t\t\t\t\treturn nil, e.Inner\n\t\t\t\t}\n\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif !token.Valid {\n\t\t\t\treturn nil, ErrTokenInvalid\n\t\t\t}\n\n\t\t\tif claims, ok := token.Claims.(jwt.MapClaims); ok {\n\t\t\t\tctx = context.WithValue(ctx, JWTClaimsContextKey, Claims(claims))\n\t\t\t}\n\n\t\t\treturn next(ctx, request)\n\t\t}\n\t}\n}\n<commit_msg>Remove unused keyset struct<commit_after>package jwt\n\nimport (\n\t\"errors\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\tjwt \"github.com\/dgrijalva\/jwt-go\"\n\t\"github.com\/go-kit\/kit\/endpoint\"\n)\n\ntype contextKey string\n\nconst (\n\t\/\/ JWTTokenContextKey holds the key used to store a JWT Token in the context\n\tJWTTokenContextKey contextKey = \"JWTToken\"\n\t\/\/ JWTClaimsContxtKey holds the key used to store the JWT Claims in the context\n\tJWTClaimsContextKey contextKey = \"JWTClaims\"\n)\n\nvar (\n\tErrTokenContextMissing = errors.New(\"Token up for parsing was not passed through the context\")\n\tErrTokenInvalid = errors.New(\"JWT Token was invalid\")\n\tErrTokenExpired = errors.New(\"JWT Token is expired\")\n\tErrTokenMalformed = errors.New(\"JWT Token is malformed\")\n\tErrTokenNotActive = errors.New(\"Token is not valid yet\")\n\tErrUnexpectedSigningMethod = errors.New(\"Unexpected signing method\")\n)\n\ntype Claims map[string]interface{}\n\n\/\/ Create a new JWT token generating middleware, specifying signing method and the claims\n\/\/ you would like it to contain. Particularly useful for clients.\nfunc NewSigner(kid string, key []byte, method jwt.SigningMethod, claims Claims) endpoint.Middleware {\n\treturn func(next endpoint.Endpoint) endpoint.Endpoint {\n\t\treturn func(ctx context.Context, request interface{}) (response interface{}, err error) {\n\t\t\ttoken := jwt.NewWithClaims(method, jwt.MapClaims(claims))\n\t\t\ttoken.Header[\"kid\"] = kid\n\n\t\t\t\/\/ Sign and get the complete encoded token as a string using the secret\n\t\t\ttokenString, err := token.SignedString(key)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tctx = context.WithValue(ctx, JWTTokenContextKey, tokenString)\n\n\t\t\treturn next(ctx, request)\n\t\t}\n\t}\n}\n\n\/\/ Create a new JWT token parsing middleware, specifying a jwt.Keyfunc interface and the\n\/\/ signing method. Adds the resulting claims to endpoint context or returns error on invalid\n\/\/ token. Particularly useful for servers.\nfunc NewParser(keyFunc jwt.Keyfunc, method jwt.SigningMethod) endpoint.Middleware {\n\treturn func(next endpoint.Endpoint) endpoint.Endpoint {\n\t\treturn func(ctx context.Context, request interface{}) (response interface{}, err error) {\n\t\t\t\/\/ tokenString is stored in the context from the transport handlers\n\t\t\ttokenString, ok := ctx.Value(JWTTokenContextKey).(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, ErrTokenContextMissing\n\t\t\t}\n\n\t\t\t\/\/ Parse takes the token string and a function for looking up the key. The latter is especially\n\t\t\t\/\/ useful if you use multiple keys for your application. The standard is to use 'kid' in the\n\t\t\t\/\/ head of the token to identify which key to use, but the parsed token (head and claims) is provided\n\t\t\t\/\/ to the callback, providing flexibility.\n\t\t\ttoken, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {\n\t\t\t\t\/\/ Don't forget to validate the alg is what you expect:\n\t\t\t\tif token.Method != method {\n\t\t\t\t\treturn nil, ErrUnexpectedSigningMethod\n\t\t\t\t}\n\n\t\t\t\treturn keyFunc(token)\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tif e, ok := err.(*jwt.ValidationError); ok && e.Inner != nil {\n\t\t\t\t\tif e.Errors&jwt.ValidationErrorMalformed != 0 {\n\t\t\t\t\t\t\/\/ Token is malformed\n\t\t\t\t\t\treturn nil, ErrTokenMalformed\n\t\t\t\t\t} else if e.Errors&jwt.ValidationErrorExpired != 0 {\n\t\t\t\t\t\t\/\/ Token is expired\n\t\t\t\t\t\treturn nil, ErrTokenExpired\n\t\t\t\t\t} else if e.Errors&jwt.ValidationErrorNotValidYet != 0 {\n\t\t\t\t\t\t\/\/ Token is not active yet\n\t\t\t\t\t\treturn nil, ErrTokenNotActive\n\t\t\t\t\t}\n\n\t\t\t\t\treturn nil, e.Inner\n\t\t\t\t}\n\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif !token.Valid {\n\t\t\t\treturn nil, ErrTokenInvalid\n\t\t\t}\n\n\t\t\tif claims, ok := token.Claims.(jwt.MapClaims); ok {\n\t\t\t\tctx = context.WithValue(ctx, JWTClaimsContextKey, Claims(claims))\n\t\t\t}\n\n\t\t\treturn next(ctx, request)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package picoborgrev allows users to control pico borg reverse motor controllers over I2C using the gobot.io robotic framework.\n\/\/ See: https:\/\/www.piborg.org\/, https:\/\/gobot.io\/\npackage picoborgrev\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\tmultierror \"github.com\/hashicorp\/go-multierror\"\n\t\"gobot.io\/x\/gobot\"\n\t\"gobot.io\/x\/gobot\/drivers\/i2c\"\n)\n\nvar _ gobot.Driver = (*Driver)(nil)\n\nconst (\n\tpicoborgAddress = 0x44 \/\/ Default address\n\tpwmMax = 255 \/\/ Max pwm value\n\ti2cMaxLen = 4 \/\/ Max len of I2C message\n\ti2cIDPicoborgRev = 0x15 \/\/ i2c id of picoborg rev board\n\tcommandSetLED = 0x1 \/\/ Set the LED status\n\tcommandGetLED = 0x2 \/\/ Get the LED status\n\tcommandSetAFWD = 0x3 \/\/ Set motor 2 PWM rate in a forwards direction\n\tcommandSetAREV = 0x4 \/\/ Set motor 2 PWM rate in a reverse direction\n\tcommandGetA = 0x5 \/\/ Get motor 2 direction and PWM rate\n\tcommandSetBFWD = 0x6 \/\/ Set motor 1 PWM rate in a forwards direction\n\tcommandSetBREV = 0x7 \/\/ Set motor 1 PWM rate in a reverse direction\n\tcommandGetB = 0x8 \/\/ Get motor 1 direction and PWM rate\n\tcommandAllOFF = 0x9 \/\/ Switch everything off\n\tcommandResetEPO = 0x10 \/\/ Resets the EPO flag, use after EPO has been tripped and switch is now clear\n\tcommandGetEPO = 0x11 \/\/ Get the EPO latched flag\n\tcommandSetEPOIgnore = 0x12 \/\/ Set the EPO ignored flag, allows the system to run without an EPO\n\tcommandGetEPOIgnore = 0x13 \/\/ Get the EPO ignored flag\n\tcommadGetDriveFault = 0x14 \/\/ Get the drive fault flag, indicates faults such as short-circuits and under voltage\n\tcommandSetAllFWD = 0x15 \/\/ Set all motors PWM rate in a forwards direction\n\tcommandSetAllREV = 0x16 \/\/ Set all motors PWM rate in a reverse direction\n\tcommandSetFailsafe = 0x17 \/\/ Set the failsafe flag, turns the motors off if communication is interrupted\n\tcommandGetFailsafe = 0x18 \/\/ Get the failsafe flag\n\tcommandSetENCMode = 0x19 \/\/ Set the board into encoder or speed mode\n\tcommandGetENCMode = 0x20 \/\/ Get the boards current mode, encoder or speed\n\tcommandMoveAFWD = 0x21 \/\/ Move motor 2 forward by n encoder ticks\n\tcommandMoveAREV = 0x22 \/\/ Move motor 2 reverse by n encoder ticks\n\tcommandMoveBFWD = 0x23 \/\/ Move motor 1 forward by n encoder ticks\n\tcommandMoveBREV = 0x24 \/\/ Move motor 1 reverse by n encoder ticks\n\tcommandMoveAllFWD = 0x25 \/\/ Move all motors forward by n encoder ticks\n\tcommandMoveAllREV = 0x26 \/\/ Move all motors reverse by n encoder ticks\n\tcommandGetENCMoving = 0x27 \/\/ Get the status of encoders moving\n\tcommandENCSpeed = 0x28 \/\/ Set the maximum PWM rate in encoder mode\n\tcommandGetENCSpeed = 0x29 \/\/ Get the maximum PWM rate in encoder mode\n\tcommandGetID = 0x99 \/\/ Get the board identifier\n\tcommandSetI2cAddr = 0xAA \/\/ Set a new I2C address\n\n\tcommandValueFWD = 0x1 \/\/ I2C value representing forward\n\tcommandValueREV = 0x2 \/\/ I2C value representing reverse\n\n\tcommandValueOn = 0x1 \/\/ I2C value representing on\n\tcommandValueOff = 0x0 \/\/ I2C value representing off\n\n)\n\n\/\/ RevDriver pico borg rev driver interace\ntype RevDriver interface {\n\tName() string\n\tConnection() gobot.Connection\n\tStart() error\n\tHalt() error\n\tResetEPO() error\n\tGetEPO() (bool, error)\n\tSetMotorA(float32) error\n\tSetMotorB(float32) error\n\tStopAllMotors() error\n}\n\n\/\/ Driver struct\ntype Driver struct {\n\tname string\n\tconnector i2c.Connector\n\tconnection i2c.Connection\n\ti2c.Config\n\tlock sync.Mutex\n}\n\n\/\/ NewDriver creates a new driver with specified name and i2c interface\n\/\/\n\/\/ Params:\n\/\/\t\tconn Connector - the Adaptor to use with this Driver\n\/\/\n\/\/ Optional params:\n\/\/\t\ti2c.WithBus(int):\tbus to use with this driver\n\/\/\t\ti2c.WithAddress(int):\taddress to use with this driver\n\/\/\n\/\/\nfunc NewDriver(a i2c.Connector, options ...func(i2c.Config)) *Driver {\n\td := &Driver{\n\t\tname: gobot.DefaultName(\"PicoBorg\"),\n\t\tconnector: a,\n\t\tConfig: i2c.NewConfig(),\n\t\tlock: sync.Mutex{},\n\t}\n\n\tfor _, option := range options {\n\t\toption(d)\n\t}\n\treturn d\n}\n\n\/\/ Name returns the name of the device\nfunc (h *Driver) Name() string { return h.name }\n\n\/\/ SetName of the device\nfunc (h *Driver) SetName(n string) { h.name = n }\n\n\/\/ Connection returns the connection\nfunc (h *Driver) Connection() gobot.Connection { return h.connection.(gobot.Connection) }\n\n\/\/ Start initialized the picoborgrev\nfunc (h *Driver) Start() (err error) {\n\tbus := h.GetBusOrDefault(h.connector.GetDefaultBus())\n\taddress := h.GetAddressOrDefault(picoborgAddress)\n\th.lock.Lock()\n\tdefer h.lock.Unlock()\n\th.connection, err = h.connector.GetConnection(address, bus)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\th.connection.Write([]byte{commandGetID})\n\tdata := []byte{0, 0, 0, 0}\n\n\tread, err := h.connection.Read(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif read == i2cMaxLen {\n\t\tif data[1] != i2cIDPicoborgRev {\n\t\t\terr := fmt.Errorf(\"Found a device but it is not a PicoBorg Revers (ID %X instead of %X)\", data[1], i2cIDPicoborgRev)\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\terr := fmt.Errorf(\"Device not found\")\n\t\treturn err\n\t}\n\treturn\n}\n\n\/\/ Halt stops all motors\nfunc (h *Driver) Halt() (errs error) {\n\terr := h.StopAllMotors()\n\tif err != nil {\n\t\tmultierror.Append(errs, err)\n\t}\n\treturn nil\n}\n\n\/\/ StopAllMotors will stop all motors\nfunc (h *Driver) StopAllMotors() error {\n\th.lock.Lock()\n\tdefer h.lock.Unlock()\n\n\terr := h.connection.WriteByte(byte(commandAllOFF))\n\treturn err\n}\n\n\/\/ ResetEPO latch state, use to allow movement again after the EPO has been tripped\nfunc (h *Driver) ResetEPO() error {\n\th.lock.Lock()\n\tdefer h.lock.Unlock()\n\n\terr := h.connection.WriteByte(byte(commandResetEPO))\n\treturn err\n}\n\n\/\/ GetEPO Reads the system EPO latch state.\nfunc (h *Driver) GetEPO() (bool, error) {\n\th.lock.Lock()\n\tdefer h.lock.Unlock()\n\n\terr := h.connection.WriteByte(byte(commandGetEPO))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tdata, err := h.connection.ReadByte()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif int(data) == commandValueOff {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}\n\n\/\/ SetMotorA generic set motor speed function\nfunc (h *Driver) SetMotorA(power float32) error {\n\th.lock.Lock()\n\tdefer h.lock.Unlock()\n\n\tvar command byte\n\tvar pwm int\n\tif power < 0 {\n\t\tcommand = commandSetAREV\n\t\tpwm = -int(pwmMax * power)\n\t} else {\n\t\tcommand = commandSetAFWD\n\t\tpwm = int(pwmMax * power)\n\t}\n\n\terr := h.connection.WriteByteData(byte(command), byte(pwm))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ SetMotorB generic set motor speed function\nfunc (h *Driver) SetMotorB(power float32) error {\n\th.lock.Lock()\n\tdefer h.lock.Unlock()\n\tvar command byte\n\tvar pwm int\n\tif power < 0 {\n\t\tcommand = commandSetBREV\n\t\tpwm = -int(pwmMax * power)\n\t} else {\n\t\tcommand = commandSetBFWD\n\t\tpwm = int(pwmMax * power)\n\t}\n\n\terr := h.connection.WriteByteData(byte(command), byte(pwm))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<commit_msg>Minor changes<commit_after>\/\/ Package picoborgrev allows users to control pico borg reverse motor controllers over I2C using the gobot.io robotic framework.\n\/\/ See: https:\/\/www.piborg.org\/, https:\/\/gobot.io\/\npackage picoborgrev\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\tmultierror \"github.com\/hashicorp\/go-multierror\"\n\t\"gobot.io\/x\/gobot\"\n\t\"gobot.io\/x\/gobot\/drivers\/i2c\"\n)\n\nvar _ gobot.Driver = (*Driver)(nil)\n\nconst (\n\tpicoborgAddress = 0x44 \/\/ Default address\n\tpwmMax = 255 \/\/ Max pwm value\n\ti2cMaxLen = 4 \/\/ Max len of I2C message\n\ti2cIDPicoborgRev = 0x15 \/\/ i2c id of picoborg rev board\n\tcommandSetLED = 0x1 \/\/ Set the LED status\n\tcommandGetLED = 0x2 \/\/ Get the LED status\n\tcommandSetAFWD = 0x3 \/\/ Set motor 2 PWM rate in a forwards direction\n\tcommandSetAREV = 0x4 \/\/ Set motor 2 PWM rate in a reverse direction\n\tcommandGetA = 0x5 \/\/ Get motor 2 direction and PWM rate\n\tcommandSetBFWD = 0x6 \/\/ Set motor 1 PWM rate in a forwards direction\n\tcommandSetBREV = 0x7 \/\/ Set motor 1 PWM rate in a reverse direction\n\tcommandGetB = 0x8 \/\/ Get motor 1 direction and PWM rate\n\tcommandAllOFF = 0x9 \/\/ Switch everything off\n\tcommandResetEPO = 0x10 \/\/ Resets the EPO flag, use after EPO has been tripped and switch is now clear\n\tcommandGetEPO = 0x11 \/\/ Get the EPO latched flag\n\tcommandSetEPOIgnore = 0x12 \/\/ Set the EPO ignored flag, allows the system to run without an EPO\n\tcommandGetEPOIgnore = 0x13 \/\/ Get the EPO ignored flag\n\tcommadGetDriveFault = 0x14 \/\/ Get the drive fault flag, indicates faults such as short-circuits and under voltage\n\tcommandSetAllFWD = 0x15 \/\/ Set all motors PWM rate in a forwards direction\n\tcommandSetAllREV = 0x16 \/\/ Set all motors PWM rate in a reverse direction\n\tcommandSetFailsafe = 0x17 \/\/ Set the failsafe flag, turns the motors off if communication is interrupted\n\tcommandGetFailsafe = 0x18 \/\/ Get the failsafe flag\n\tcommandSetENCMode = 0x19 \/\/ Set the board into encoder or speed mode\n\tcommandGetENCMode = 0x20 \/\/ Get the boards current mode, encoder or speed\n\tcommandMoveAFWD = 0x21 \/\/ Move motor 2 forward by n encoder ticks\n\tcommandMoveAREV = 0x22 \/\/ Move motor 2 reverse by n encoder ticks\n\tcommandMoveBFWD = 0x23 \/\/ Move motor 1 forward by n encoder ticks\n\tcommandMoveBREV = 0x24 \/\/ Move motor 1 reverse by n encoder ticks\n\tcommandMoveAllFWD = 0x25 \/\/ Move all motors forward by n encoder ticks\n\tcommandMoveAllREV = 0x26 \/\/ Move all motors reverse by n encoder ticks\n\tcommandGetENCMoving = 0x27 \/\/ Get the status of encoders moving\n\tcommandENCSpeed = 0x28 \/\/ Set the maximum PWM rate in encoder mode\n\tcommandGetENCSpeed = 0x29 \/\/ Get the maximum PWM rate in encoder mode\n\tcommandGetID = 0x99 \/\/ Get the board identifier\n\tcommandSetI2cAddr = 0xAA \/\/ Set a new I2C address\n\n\tcommandValueFWD = 0x1 \/\/ I2C value representing forward\n\tcommandValueREV = 0x2 \/\/ I2C value representing reverse\n\n\tcommandValueOn = 0x1 \/\/ I2C value representing on\n\tcommandValueOff = 0x0 \/\/ I2C value representing off\n\n)\n\n\/\/ RevDriver pico borg rev driver interace\ntype RevDriver interface {\n\tName() string\n\tConnection() gobot.Connection\n\tStart() error\n\tHalt() error\n\tResetEPO() error\n\tGetEPO() (bool, error)\n\tSetMotorA(float32) error\n\tSetMotorB(float32) error\n\tStopAllMotors() error\n}\n\n\/\/ Driver struct\ntype Driver struct {\n\tname string\n\tconnector i2c.Connector\n\tconnection i2c.Connection\n\ti2c.Config\n\tlock sync.Mutex\n}\n\n\/\/ NewDriver creates a new driver with specified name and i2c interface\n\/\/\n\/\/ Params:\n\/\/\t\tconn Connector - the Adaptor to use with this Driver\n\/\/\n\/\/ Optional params:\n\/\/\t\ti2c.WithBus(int):\tbus to use with this driver\n\/\/\t\ti2c.WithAddress(int):\taddress to use with this driver\n\/\/\n\/\/\nfunc NewDriver(a i2c.Connector, options ...func(i2c.Config)) *Driver {\n\td := &Driver{\n\t\tname: gobot.DefaultName(\"PicoBorg\"),\n\t\tconnector: a,\n\t\tConfig: i2c.NewConfig(),\n\t\tlock: sync.Mutex{},\n\t}\n\n\tfor _, option := range options {\n\t\toption(d)\n\t}\n\treturn d\n}\n\n\/\/ Name returns the name of the device\nfunc (h *Driver) Name() string { return h.name }\n\n\/\/ SetName of the device\nfunc (h *Driver) SetName(n string) { h.name = n }\n\n\/\/ Connection returns the connection\nfunc (h *Driver) Connection() gobot.Connection { return h.connection.(gobot.Connection) }\n\n\/\/ Start initialized the picoborgrev\nfunc (h *Driver) Start() (err error) {\n\tbus := h.GetBusOrDefault(h.connector.GetDefaultBus())\n\taddress := h.GetAddressOrDefault(picoborgAddress)\n\th.connection, err = h.connector.GetConnection(address, bus)\n\n\th.lock.Lock()\n\tdefer h.lock.Unlock()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\th.connection.Write([]byte{commandGetID})\n\tdata := []byte{0, 0, 0, 0}\n\n\tread, err := h.connection.Read(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif read == i2cMaxLen {\n\t\tif data[1] != i2cIDPicoborgRev {\n\t\t\terr := fmt.Errorf(\"Found a device but it is not a PicoBorg Revers (ID %X instead of %X)\", data[1], i2cIDPicoborgRev)\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\terr := fmt.Errorf(\"Device not found\")\n\t\treturn err\n\t}\n\treturn\n}\n\n\/\/ Halt stops all motors\nfunc (h *Driver) Halt() (errs error) {\n\terr := h.StopAllMotors()\n\tif err != nil {\n\t\tmultierror.Append(errs, err)\n\t}\n\treturn nil\n}\n\n\/\/ StopAllMotors will stop all motors\nfunc (h *Driver) StopAllMotors() error {\n\th.lock.Lock()\n\tdefer h.lock.Unlock()\n\n\terr := h.connection.WriteByte(byte(commandAllOFF))\n\treturn err\n}\n\n\/\/ ResetEPO latch state, use to allow movement again after the EPO has been tripped\nfunc (h *Driver) ResetEPO() error {\n\th.lock.Lock()\n\tdefer h.lock.Unlock()\n\n\terr := h.connection.WriteByte(byte(commandResetEPO))\n\treturn err\n}\n\n\/\/ GetEPO Reads the system EPO latch state.\nfunc (h *Driver) GetEPO() (bool, error) {\n\th.lock.Lock()\n\tdefer h.lock.Unlock()\n\n\terr := h.connection.WriteByte(byte(commandGetEPO))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tdata, err := h.connection.ReadByte()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif int(data) == commandValueOff {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}\n\n\/\/ SetMotorA generic set motor speed function\nfunc (h *Driver) SetMotorA(power float32) error {\n\th.lock.Lock()\n\tdefer h.lock.Unlock()\n\n\tvar command byte\n\tvar pwm int\n\tif power < 0 {\n\t\tcommand = commandSetAREV\n\t\tpwm = -int(pwmMax * power)\n\t} else {\n\t\tcommand = commandSetAFWD\n\t\tpwm = int(pwmMax * power)\n\t}\n\n\terr := h.connection.WriteByteData(byte(command), byte(pwm))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ SetMotorB generic set motor speed function\nfunc (h *Driver) SetMotorB(power float32) error {\n\th.lock.Lock()\n\tdefer h.lock.Unlock()\n\tvar command byte\n\tvar pwm int\n\tif power < 0 {\n\t\tcommand = commandSetBREV\n\t\tpwm = -int(pwmMax * power)\n\t} else {\n\t\tcommand = commandSetBFWD\n\t\tpwm = int(pwmMax * power)\n\t}\n\n\terr := h.connection.WriteByteData(byte(command), byte(pwm))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage contentaddressable contains tools for writing content addressable files.\nFiles are written to a temporary location, and only renamed to the final\nlocation after the file's OID (Object ID) has been verified.\n\n filename := \"path\/to\/01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b\"\n file, err := contentaddressable.NewFile(filename)\n if err != nil {\n panic(err)\n }\n defer file.Close()\n\n file.Oid \/\/ 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b\n\n written, err := io.Copy(file, someReader)\n\n if err == nil {\n \/\/ Move file to final location if OID is verified.\n err = file.Accept()\n }\n\n if err != nil {\n panic(err)\n }\n\nCurrently SHA-256 is used for a file's OID.\n*\/\npackage contentaddressable\n<commit_msg>update docs with proposed reader interface<commit_after>\/*\nPackage contentaddressable contains tools for reading and writing content\naddressable files. Files are written to a temporary location, and only renamed\nto the final location after the file's OID (Object ID) has been verified.\n\n filename := \"path\/to\/01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b\"\n file, err := contentaddressable.NewFile(filename)\n if err != nil {\n panic(err)\n }\n defer file.Close()\n\n file.Oid \/\/ 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b\n\n written, err := io.Copy(file, someReader)\n\n if err == nil {\n \/\/ Move file to final location if OID is verified.\n err = file.Accept()\n }\n\n if err != nil {\n panic(err)\n }\n\nCurrently SHA-256 is used for a file's OID.\n\nYou can also read files, while verifying that they are not corrupt.\n\n filename := \"path\/to\/01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b\"\n\n \/\/ get this from doing an os.Stat() or something\n expectedSize := 123\n\n \/\/ returns a contentaddressable.ReadCloser, with some extra functions on top\n \/\/ of io.ReadCloser.\n reader, err := contentaddressable.Open(filename)\n if err != nil {\n panic(err)\n }\n defer file.Close()\n\n written, err := io.Copy(ioutil.Discard, reader)\n if err != nil {\n panic(err)\n }\n\n seenBytes := reader.SeenBytes()\n\n if written != seenBytes {\n panic(\"reader is broken\")\n }\n\n if seenBytes < expected {\n panic(\"partial read\")\n }\n\n if reader.Oid() != \"01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b\" {\n panic(\"SHA-256 signature doesn't match expected\")\n }\n*\/\npackage contentaddressable\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ A tool to add missing fields to an existing GCS bucket, for #18.\n\/\/\n\/\/ Input is on stdin, and is of the form \"<SHA-1> <CRC32C> <MD5>\", e.g.:\n\/\/\n\/\/ e04b25d650dee1dff6ab1743724fa7c184282e94 0x12e9bf88 2bad5bb78f17232ef8c727f59eb82325\n\/\/\npackage main\n\nimport (\n\t\"bufio\"\n\t\"crypto\/md5\"\n\t\"crypto\/sha1\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/jacobsa\/comeback\/blob\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\"\n\t\"github.com\/jacobsa\/gcloud\/oauthutil\"\n\t\"github.com\/jacobsa\/gcloud\/syncutil\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nvar fKeyFile = flag.String(\"key_file\", \"\", \"\")\nvar fBucket = flag.String(\"bucket\", \"\", \"\")\nvar fToken = flag.String(\"token\", \"\", \"Initial continuation token. Be careful.\")\nvar fUpdateParallelism = flag.Int(\"parallelism\", 128, \"\")\n\ntype crc32cChecksum uint32\ntype md5Hash [md5.Size]byte\ntype sha1Hash [sha1.Size]byte\n\ntype checksums struct {\n\tcrc32c crc32cChecksum\n\tmd5 md5Hash\n}\n\n\/\/ A mapping from SHA-1 to CRC32C and MD5.\ntype checksumMap map[sha1Hash]checksums\n\nconst (\n\t\/\/ Cf. blob_store.go\n\tblobObjectNamePrefix = \"blobs\/\"\n\n\t\/\/ Cf. gcs_store.go\n\tmetadataKey_SHA1 = \"comeback_sha1\"\n\tmetadataKey_CRC32C = \"comeback_crc32c\"\n\tmetadataKey_MD5 = \"comeback_md5\"\n)\n\nvar gInputLineRe = regexp.MustCompile(\n\t\"^([0-9a-f]{40}) (0x[0-9a-f]{8}) ([0-9a-f]{32})$\")\n\nfunc parseInputLine(line []byte) (sha1 sha1Hash, c checksums, err error) {\n\t\/\/ Match against the regexp.\n\tmatches := gInputLineRe.FindSubmatch(line)\n\tif matches == nil {\n\t\terr = errors.New(\"No match.\")\n\t\treturn\n\t}\n\n\t\/\/ Parse each component.\n\t_, err = hex.Decode(sha1[:], matches[1])\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Unexpected decode error for %q: %v\", matches[1], err))\n\t}\n\n\tcrc32c64, err := strconv.ParseUint(string(matches[2]), 0, 32)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Unexpected decode error for %q: %v\", matches[2], err))\n\t}\n\n\tc.crc32c = crc32cChecksum(crc32c64)\n\n\t_, err = hex.Decode(c.md5[:], matches[3])\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Unexpected decode error for %q: %v\", matches[3], err))\n\t}\n\n\treturn\n}\n\n\/\/ Read the supplied input file, producing a checksum map.\nfunc parseInput(in io.Reader) (m checksumMap, err error) {\n\tm = make(checksumMap)\n\n\t\/\/ Scan each line.\n\tscanner := bufio.NewScanner(in)\n\tfor scanner.Scan() {\n\t\tvar sha1 sha1Hash\n\t\tvar c checksums\n\t\tsha1, c, err = parseInputLine(scanner.Bytes())\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Parsing input line %q: %v\", scanner.Text(), err)\n\t\t\treturn\n\t\t}\n\n\t\tm[sha1] = c\n\t}\n\n\t\/\/ Was there an error scanning?\n\tif scanner.Err() != nil {\n\t\terr = fmt.Errorf(\"Scanning: %v\", scanner.Err())\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ List all blob objects in the GCS bucket into the channel.\nfunc listBlobObjects(\n\tctx context.Context,\n\tbucket gcs.Bucket,\n\tobjects chan<- *gcs.Object) (err error) {\n\treq := &gcs.ListObjectsRequest{\n\t\tPrefix: blobObjectNamePrefix,\n\t\tContinuationToken: *fToken,\n\t}\n\n\t\/\/ List until we run out.\n\tfor {\n\t\t\/\/ Fetch the next batch.\n\t\tvar listing *gcs.Listing\n\t\tlisting, err = bucket.ListObjects(ctx, req)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"ListObjects: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Pass on each object.\n\t\tfor _, o := range listing.Objects {\n\t\t\t\/\/ Special case: for gcsfuse compatibility, we allow blobObjectNamePrefix\n\t\t\t\/\/ to exist as its own object name. Skip it.\n\t\t\tif o.Name == blobObjectNamePrefix {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase objects <- o:\n\n\t\t\t\t\/\/ Cancelled?\n\t\t\tcase <-ctx.Done():\n\t\t\t\terr = ctx.Err()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Are we done?\n\t\tif listing.ContinuationToken == \"\" {\n\t\t\tbreak\n\t\t}\n\n\t\treq.ContinuationToken = listing.ContinuationToken\n\t\tlog.Printf(\"Continuation token: %q\", req.ContinuationToken)\n\t}\n\n\treturn\n}\n\n\/\/ Filter to names of objects that lack the appropriate metadata keys.\nfunc filterToProblematicNames(\n\tctx context.Context,\n\tobjects <-chan *gcs.Object,\n\tnames chan<- string) (err error) {\n\tfor o := range objects {\n\t\t\/\/ Skip objects that already have all of the keys.\n\t\t_, ok0 := o.Metadata[metadataKey_SHA1]\n\t\t_, ok1 := o.Metadata[metadataKey_CRC32C]\n\t\t_, ok2 := o.Metadata[metadataKey_MD5]\n\n\t\tif ok0 && ok1 && ok2 {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Pass on the names of others.\n\t\tselect {\n\t\tcase names <- o.Name:\n\n\t\t\t\/\/ Cancelled?\n\t\tcase <-ctx.Done():\n\t\t\terr = ctx.Err()\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ Parse the object name into its expected SHA-1 hash.\nfunc parseObjectName(name string) (sha1 sha1Hash, err error) {\n\tif !strings.HasPrefix(name, blobObjectNamePrefix) {\n\t\terr = fmt.Errorf(\"Expected prefix\")\n\t\treturn\n\t}\n\n\thexSha1 := strings.TrimPrefix(name, blobObjectNamePrefix)\n\tscore, err := blob.ParseHexScore(hexSha1)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"ParseHexScore: %v\", err)\n\t\treturn\n\t}\n\n\tsha1 = sha1Hash(score)\n\treturn\n}\n\n\/\/ For each object name, issue a request to set the appropriate metadata keys\n\/\/ based on the contents of the supplied map. Write out the names of the\n\/\/ objects processed, and those for whom info wasn't available.\nfunc fixProblematicObjects(\n\tctx context.Context,\n\tbucket gcs.Bucket,\n\tinfo checksumMap,\n\tnames <-chan string,\n\tprocessed chan<- string,\n\tunknown chan<- string) (err error) {\n\tfor name := range names {\n\t\t\/\/ Parse the name.\n\t\tvar sha1 sha1Hash\n\t\tsha1, err = parseObjectName(name)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Parsing object name %q: %v\", name, err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Do we have info for this object?\n\t\tc, ok := info[sha1]\n\t\tif !ok {\n\t\t\tselect {\n\t\t\tcase unknown <- name:\n\n\t\t\t\t\/\/ Cancelled?\n\t\t\tcase <-ctx.Done():\n\t\t\t\terr = ctx.Err()\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Fix it up.\n\t\t\/\/\n\t\t\/\/ Formats cf. gcs_store.go.\n\t\tsha1Str := hex.EncodeToString(sha1[:])\n\t\tcrc32cStr := fmt.Sprintf(\"%#08x\", c.crc32c)\n\t\tmd5Str := hex.EncodeToString(c.md5[:])\n\n\t\treq := &gcs.UpdateObjectRequest{\n\t\t\tName: name,\n\t\t\tMetadata: map[string]*string{\n\t\t\t\tmetadataKey_SHA1: &sha1Str,\n\t\t\t\tmetadataKey_CRC32C: &crc32cStr,\n\t\t\t\tmetadataKey_MD5: &md5Str,\n\t\t\t},\n\t\t}\n\n\t\t_, err = bucket.UpdateObject(ctx, req)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"UpdateObject: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Pass on the name as processed.\n\t\tselect {\n\t\tcase processed <- name:\n\n\t\t\t\/\/ Cancelled?\n\t\tcase <-ctx.Done():\n\t\t\terr = ctx.Err()\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ Log status updates, and at the end log the objects that were not\n\/\/ processed, returning an error if non-zero.\nfunc monitorProgress(\n\tctx context.Context,\n\tprocessedChan <-chan string,\n\tunknownChan <-chan string) (err error) {\n\tvar processed int\n\tvar unknown int\n\n\t\/\/ Set up a ticker for logging status updates.\n\tconst period = time.Second\n\tticker := time.NewTicker(period)\n\tdefer ticker.Stop()\n\n\t\/\/ Keep going until both channels are closed.\n\tfor processedChan != nil || unknownChan != nil {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tlog.Printf(\"%v processed successfully, %v unknown\", processed, unknown)\n\n\t\tcase _, ok := <-processedChan:\n\t\t\tif ok {\n\t\t\t\tprocessed++\n\t\t\t} else {\n\t\t\t\tprocessedChan = nil\n\t\t\t}\n\n\t\tcase name, ok := <-unknownChan:\n\t\t\tif ok {\n\t\t\t\tlog.Printf(\"Unknown object: %q\", name)\n\t\t\t\tunknown++\n\t\t\t} else {\n\t\t\t\tunknownChan = nil\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Return an error if any object was unknown.\n\tif unknown != 0 {\n\t\terr = fmt.Errorf(\"%v unknown objects\", unknown)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc run(\n\tbucket gcs.Bucket,\n\tinfo checksumMap) (err error) {\n\tb := syncutil.NewBundle(context.Background())\n\tdefer func() { err = b.Join() }()\n\n\t\/\/ List all of the blob objects.\n\tobjectRecords := make(chan *gcs.Object, 10000)\n\tb.Add(func(ctx context.Context) (err error) {\n\t\tdefer close(objectRecords)\n\t\terr = listBlobObjects(ctx, bucket, objectRecords)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"listBlobObjects: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\treturn\n\t})\n\n\t\/\/ Filter to the ones we need to fix up.\n\tproblematicNames := make(chan string, 100)\n\tb.Add(func(ctx context.Context) (err error) {\n\t\tdefer close(problematicNames)\n\t\terr = filterToProblematicNames(ctx, objectRecords, problematicNames)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"filterToProblematicNames: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\treturn\n\t})\n\n\t\/\/ Fix those objects with some parallelism.\n\tconst parallelism = 128\n\tvar wg sync.WaitGroup\n\n\tprocessed := make(chan string, 100)\n\tunknown := make(chan string, 100)\n\n\tfor i := 0; i < *fUpdateParallelism; i++ {\n\t\twg.Add(1)\n\t\tb.Add(func(ctx context.Context) (err error) {\n\t\t\tdefer wg.Done()\n\t\t\terr = fixProblematicObjects(\n\t\t\t\tctx,\n\t\t\t\tbucket,\n\t\t\t\tinfo,\n\t\t\t\tproblematicNames,\n\t\t\t\tprocessed,\n\t\t\t\tunknown)\n\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"fixProblematicObjects: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\treturn\n\t\t})\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(processed)\n\t\tclose(unknown)\n\t}()\n\n\t\/\/ Log status updates, and at the end log the objects that were not\n\t\/\/ processed, returning an error if non-zero.\n\tb.Add(func(ctx context.Context) (err error) {\n\t\terr = monitorProgress(ctx, processed, unknown)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"monitorProgress: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\treturn\n\t})\n\n\treturn\n}\n\nfunc panicIf(err *error) {\n\tif *err != nil {\n\t\tpanic(*err)\n\t}\n}\n\nfunc getHTTPClient() (client *http.Client, err error) {\n\tif *fKeyFile == \"\" {\n\t\terr = errors.New(\"You must set --key_file.\")\n\t\treturn\n\t}\n\n\tconst scope = gcs.Scope_FullControl\n\tclient, err = oauthutil.NewJWTHttpClient(*fKeyFile, []string{scope})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"oauthutil.NewJWTHttpClient: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc bucketName() (name string, err error) {\n\tname = *fBucket\n\tif name == \"\" {\n\t\terr = errors.New(\"You must set --bucket.\")\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc getBucket() (b gcs.Bucket) {\n\tvar err error\n\tdefer panicIf(&err)\n\n\t\/\/ Get the HTTP client,\n\tclient, err := getHTTPClient()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"IntegrationTestHTTPClient: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Find the bucket name.\n\tname, err := bucketName()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"bucketName: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Create a connection.\n\tcfg := &gcs.ConnConfig{\n\t\tHTTPClient: client,\n\t\tMaxBackoffSleep: 30 * time.Second,\n\t}\n\n\tconn, err := gcs.NewConn(cfg)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"gcs.NewConn: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Extract the bucket.\n\tb = conn.GetBucket(name)\n\n\treturn\n}\n\nfunc main() {\n\tflag.Parse()\n\tbucket := getBucket()\n\tlog.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)\n\n\t\/\/ Panic if anything below fails.\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}()\n\n\t\/\/ Parse the input.\n\tlog.Println(\"Parsing input...\")\n\tinfo, err := parseInput(os.Stdin)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"parseInput: %v\", err)\n\t\treturn\n\t}\n\n\tlog.Println(\"Done parsing input.\")\n\n\t\/\/ Run.\n\terr = run(bucket, info)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"run: %v\", err)\n\t\treturn\n\t}\n}\n<commit_msg>Use for fix_metadata.<commit_after>\/\/ Copyright 2015 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ A tool to add missing fields to an existing GCS bucket, for #18.\n\/\/\n\/\/ Input is on stdin, and is of the form \"<SHA-1> <CRC32C> <MD5>\", e.g.:\n\/\/\n\/\/ e04b25d650dee1dff6ab1743724fa7c184282e94 0x12e9bf88 2bad5bb78f17232ef8c727f59eb82325\n\/\/\npackage main\n\nimport (\n\t\"bufio\"\n\t\"crypto\/md5\"\n\t\"crypto\/sha1\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/jacobsa\/comeback\/blob\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\"\n\t\"github.com\/jacobsa\/gcloud\/oauthutil\"\n\t\"github.com\/jacobsa\/gcloud\/syncutil\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nvar fKeyFile = flag.String(\"key_file\", \"\", \"\")\nvar fBucket = flag.String(\"bucket\", \"\", \"\")\nvar fToken = flag.String(\"token\", \"\", \"Initial continuation token. Be careful.\")\nvar fUpdateParallelism = flag.Int(\"parallelism\", 128, \"\")\n\ntype crc32cChecksum uint32\ntype md5Hash [md5.Size]byte\ntype sha1Hash [sha1.Size]byte\n\ntype checksums struct {\n\tcrc32c crc32cChecksum\n\tmd5 md5Hash\n}\n\n\/\/ A mapping from SHA-1 to CRC32C and MD5.\ntype checksumMap map[sha1Hash]checksums\n\nconst (\n\t\/\/ Cf. blob_store.go\n\tblobObjectNamePrefix = \"blobs\/\"\n\n\t\/\/ Cf. gcs_store.go\n\tmetadataKey_SHA1 = \"comeback_sha1\"\n\tmetadataKey_CRC32C = \"comeback_crc32c\"\n\tmetadataKey_MD5 = \"comeback_md5\"\n)\n\nvar gInputLineRe = regexp.MustCompile(\n\t\"^([0-9a-f]{40}) (0x[0-9a-f]{8}) ([0-9a-f]{32})$\")\n\nfunc parseInputLine(line []byte) (sha1 sha1Hash, c checksums, err error) {\n\t\/\/ Match against the regexp.\n\tmatches := gInputLineRe.FindSubmatch(line)\n\tif matches == nil {\n\t\terr = errors.New(\"No match.\")\n\t\treturn\n\t}\n\n\t\/\/ Parse each component.\n\t_, err = hex.Decode(sha1[:], matches[1])\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Unexpected decode error for %q: %v\", matches[1], err))\n\t}\n\n\tcrc32c64, err := strconv.ParseUint(string(matches[2]), 0, 32)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Unexpected decode error for %q: %v\", matches[2], err))\n\t}\n\n\tc.crc32c = crc32cChecksum(crc32c64)\n\n\t_, err = hex.Decode(c.md5[:], matches[3])\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Unexpected decode error for %q: %v\", matches[3], err))\n\t}\n\n\treturn\n}\n\n\/\/ Read the supplied input file, producing a checksum map.\nfunc parseInput(in io.Reader) (m checksumMap, err error) {\n\tm = make(checksumMap)\n\n\t\/\/ Scan each line.\n\tscanner := bufio.NewScanner(in)\n\tfor scanner.Scan() {\n\t\tvar sha1 sha1Hash\n\t\tvar c checksums\n\t\tsha1, c, err = parseInputLine(scanner.Bytes())\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Parsing input line %q: %v\", scanner.Text(), err)\n\t\t\treturn\n\t\t}\n\n\t\tm[sha1] = c\n\t}\n\n\t\/\/ Was there an error scanning?\n\tif scanner.Err() != nil {\n\t\terr = fmt.Errorf(\"Scanning: %v\", scanner.Err())\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ Filter to names of objects that lack the appropriate metadata keys.\nfunc filterToProblematicNames(\n\tctx context.Context,\n\tobjects <-chan *gcs.Object,\n\tnames chan<- string) (err error) {\n\tfor o := range objects {\n\t\t\/\/ Skip objects that already have all of the keys.\n\t\t_, ok0 := o.Metadata[metadataKey_SHA1]\n\t\t_, ok1 := o.Metadata[metadataKey_CRC32C]\n\t\t_, ok2 := o.Metadata[metadataKey_MD5]\n\n\t\tif ok0 && ok1 && ok2 {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Pass on the names of others.\n\t\tselect {\n\t\tcase names <- o.Name:\n\n\t\t\t\/\/ Cancelled?\n\t\tcase <-ctx.Done():\n\t\t\terr = ctx.Err()\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ Parse the object name into its expected SHA-1 hash.\nfunc parseObjectName(name string) (sha1 sha1Hash, err error) {\n\tif !strings.HasPrefix(name, blobObjectNamePrefix) {\n\t\terr = fmt.Errorf(\"Expected prefix\")\n\t\treturn\n\t}\n\n\thexSha1 := strings.TrimPrefix(name, blobObjectNamePrefix)\n\tscore, err := blob.ParseHexScore(hexSha1)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"ParseHexScore: %v\", err)\n\t\treturn\n\t}\n\n\tsha1 = sha1Hash(score)\n\treturn\n}\n\n\/\/ For each object name, issue a request to set the appropriate metadata keys\n\/\/ based on the contents of the supplied map. Write out the names of the\n\/\/ objects processed, and those for whom info wasn't available.\nfunc fixProblematicObjects(\n\tctx context.Context,\n\tbucket gcs.Bucket,\n\tinfo checksumMap,\n\tnames <-chan string,\n\tprocessed chan<- string,\n\tunknown chan<- string) (err error) {\n\tfor name := range names {\n\t\t\/\/ Parse the name.\n\t\tvar sha1 sha1Hash\n\t\tsha1, err = parseObjectName(name)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Parsing object name %q: %v\", name, err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Do we have info for this object?\n\t\tc, ok := info[sha1]\n\t\tif !ok {\n\t\t\tselect {\n\t\t\tcase unknown <- name:\n\n\t\t\t\t\/\/ Cancelled?\n\t\t\tcase <-ctx.Done():\n\t\t\t\terr = ctx.Err()\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Fix it up.\n\t\t\/\/\n\t\t\/\/ Formats cf. gcs_store.go.\n\t\tsha1Str := hex.EncodeToString(sha1[:])\n\t\tcrc32cStr := fmt.Sprintf(\"%#08x\", c.crc32c)\n\t\tmd5Str := hex.EncodeToString(c.md5[:])\n\n\t\treq := &gcs.UpdateObjectRequest{\n\t\t\tName: name,\n\t\t\tMetadata: map[string]*string{\n\t\t\t\tmetadataKey_SHA1: &sha1Str,\n\t\t\t\tmetadataKey_CRC32C: &crc32cStr,\n\t\t\t\tmetadataKey_MD5: &md5Str,\n\t\t\t},\n\t\t}\n\n\t\t_, err = bucket.UpdateObject(ctx, req)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"UpdateObject: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Pass on the name as processed.\n\t\tselect {\n\t\tcase processed <- name:\n\n\t\t\t\/\/ Cancelled?\n\t\tcase <-ctx.Done():\n\t\t\terr = ctx.Err()\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ Log status updates, and at the end log the objects that were not\n\/\/ processed, returning an error if non-zero.\nfunc monitorProgress(\n\tctx context.Context,\n\tprocessedChan <-chan string,\n\tunknownChan <-chan string) (err error) {\n\tvar processed int\n\tvar unknown int\n\n\t\/\/ Set up a ticker for logging status updates.\n\tconst period = time.Second\n\tticker := time.NewTicker(period)\n\tdefer ticker.Stop()\n\n\t\/\/ Keep going until both channels are closed.\n\tfor processedChan != nil || unknownChan != nil {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tlog.Printf(\"%v processed successfully, %v unknown\", processed, unknown)\n\n\t\tcase _, ok := <-processedChan:\n\t\t\tif ok {\n\t\t\t\tprocessed++\n\t\t\t} else {\n\t\t\t\tprocessedChan = nil\n\t\t\t}\n\n\t\tcase name, ok := <-unknownChan:\n\t\t\tif ok {\n\t\t\t\tlog.Printf(\"Unknown object: %q\", name)\n\t\t\t\tunknown++\n\t\t\t} else {\n\t\t\t\tunknownChan = nil\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Return an error if any object was unknown.\n\tif unknown != 0 {\n\t\terr = fmt.Errorf(\"%v unknown objects\", unknown)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc run(\n\tbucket gcs.Bucket,\n\tinfo checksumMap) (err error) {\n\tb := syncutil.NewBundle(context.Background())\n\tdefer func() { err = b.Join() }()\n\n\t\/\/ List all of the blob objects.\n\tobjectRecords := make(chan *gcs.Object, 10000)\n\tb.Add(func(ctx context.Context) (err error) {\n\t\tdefer close(objectRecords)\n\t\terr = blob.ListBlobObjects(ctx, bucket, blobObjectNamePrefix, objectRecords)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"ListBlobObjects: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\treturn\n\t})\n\n\t\/\/ Filter to the ones we need to fix up.\n\tproblematicNames := make(chan string, 100)\n\tb.Add(func(ctx context.Context) (err error) {\n\t\tdefer close(problematicNames)\n\t\terr = filterToProblematicNames(ctx, objectRecords, problematicNames)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"filterToProblematicNames: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\treturn\n\t})\n\n\t\/\/ Fix those objects with some parallelism.\n\tconst parallelism = 128\n\tvar wg sync.WaitGroup\n\n\tprocessed := make(chan string, 100)\n\tunknown := make(chan string, 100)\n\n\tfor i := 0; i < *fUpdateParallelism; i++ {\n\t\twg.Add(1)\n\t\tb.Add(func(ctx context.Context) (err error) {\n\t\t\tdefer wg.Done()\n\t\t\terr = fixProblematicObjects(\n\t\t\t\tctx,\n\t\t\t\tbucket,\n\t\t\t\tinfo,\n\t\t\t\tproblematicNames,\n\t\t\t\tprocessed,\n\t\t\t\tunknown)\n\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"fixProblematicObjects: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\treturn\n\t\t})\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(processed)\n\t\tclose(unknown)\n\t}()\n\n\t\/\/ Log status updates, and at the end log the objects that were not\n\t\/\/ processed, returning an error if non-zero.\n\tb.Add(func(ctx context.Context) (err error) {\n\t\terr = monitorProgress(ctx, processed, unknown)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"monitorProgress: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\treturn\n\t})\n\n\treturn\n}\n\nfunc panicIf(err *error) {\n\tif *err != nil {\n\t\tpanic(*err)\n\t}\n}\n\nfunc getHTTPClient() (client *http.Client, err error) {\n\tif *fKeyFile == \"\" {\n\t\terr = errors.New(\"You must set --key_file.\")\n\t\treturn\n\t}\n\n\tconst scope = gcs.Scope_FullControl\n\tclient, err = oauthutil.NewJWTHttpClient(*fKeyFile, []string{scope})\n\tif err != nil {\n\t\terr = fmt.Errorf(\"oauthutil.NewJWTHttpClient: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc bucketName() (name string, err error) {\n\tname = *fBucket\n\tif name == \"\" {\n\t\terr = errors.New(\"You must set --bucket.\")\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc getBucket() (b gcs.Bucket) {\n\tvar err error\n\tdefer panicIf(&err)\n\n\t\/\/ Get the HTTP client,\n\tclient, err := getHTTPClient()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"IntegrationTestHTTPClient: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Find the bucket name.\n\tname, err := bucketName()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"bucketName: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Create a connection.\n\tcfg := &gcs.ConnConfig{\n\t\tHTTPClient: client,\n\t\tMaxBackoffSleep: 30 * time.Second,\n\t}\n\n\tconn, err := gcs.NewConn(cfg)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"gcs.NewConn: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Extract the bucket.\n\tb = conn.GetBucket(name)\n\n\treturn\n}\n\nfunc main() {\n\tflag.Parse()\n\tbucket := getBucket()\n\tlog.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)\n\n\t\/\/ Panic if anything below fails.\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}()\n\n\t\/\/ Parse the input.\n\tlog.Println(\"Parsing input...\")\n\tinfo, err := parseInput(os.Stdin)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"parseInput: %v\", err)\n\t\treturn\n\t}\n\n\tlog.Println(\"Done parsing input.\")\n\n\t\/\/ Run.\n\terr = run(bucket, info)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"run: %v\", err)\n\t\treturn\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Upspin Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage grpcauth\n\nimport (\n\t\"crypto\/tls\"\n\t\"math\/rand\"\n\t\"strings\"\n\t\"time\"\n\n\tgContext \"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/credentials\"\n\t\"google.golang.org\/grpc\/metadata\"\n\n\t\"upspin.io\/log\"\n\t\"upspin.io\/upspin\"\n\t\"upspin.io\/upspin\/proto\"\n)\n\n\/\/ GRPCCommon is an interface that all GRPC services implement for authentication and ping as part of upspin.Service.\ntype GRPCCommon interface {\n\t\/\/ Authenticate is the GRPC call for Authenticate.\n\tAuthenticate(ctx gContext.Context, in *proto.AuthenticateRequest, opts ...grpc.CallOption) (*proto.AuthenticateResponse, error)\n\t\/\/ Ping is the GRPC call for Ping.\n\tPing(ctx gContext.Context, in *proto.PingRequest, opts ...grpc.CallOption) (*proto.PingResponse, error)\n}\n\n\/\/ AuthClientService is a partial Service that uses GRPC as transport and implements Authentication.\ntype AuthClientService struct {\n\tgrpcCommon GRPCCommon\n\tgrpcConn *grpc.ClientConn\n\tcontext upspin.Context\n\tauthToken string\n\tlastTokenRefresh time.Time\n\n\tkeepAliveInterval time.Duration \/\/ interval of keep-alive packets.\n\tlastNetActivity time.Time \/\/ last known time of some network activity.\n\tcloseKeepAlive chan bool \/\/ channel used to tell the keep-alive routine to exit.\n\tkeepAliveRound uint64 \/\/ counts iterations of the keep-alive routine. Mostly for tests.\n}\n\nconst (\n\t\/\/ AllowSelfSignedCertificate is used for documenting the parameter with same name in NewGRPCClient.\n\tAllowSelfSignedCertificate = true\n\n\t\/\/ KeepAliveInterval is a suggested interval between keep-alive ping requests to the server.\n\t\/\/ A value of 0 means keep-alives are disabled. Google Cloud Platform (GCP) times out connections\n\t\/\/ every 10 minutes so a smaller values are recommended for talking to servers on GCP.\n\tKeepAliveInterval = 5 * time.Minute\n)\n\n\/\/ To be safe, we refresh the token 1 hour ahead of time.\nvar tokenFreshnessDuration = authTokenDuration - time.Hour\n\n\/\/ NewGRPCClient returns new GRPC client connected securely (with TLS) to a GRPC server at a net address.\n\/\/ The address is expected to be a raw network address with port number, as in domain.com:5580. However, for convenience,\n\/\/ it is optionally accepted for the time being to use one of the following prefixes:\n\/\/ https:\/\/, http:\/\/, grpc:\/\/. This may change in the future.\n\/\/ A keep alive interval indicates the amount of time to send ping requests to the server. A duration of 0 disables\n\/\/ keep-alive packets.\n\/\/ If allowSelfSignedCertificates is true, the client will connect with a server with a self-signed certificate.\n\/\/ Otherwise it will reject it. Mostly only useful for testing a local server.\nfunc NewGRPCClient(context *upspin.Context, netAddr upspin.NetAddr, keepAliveInterval time.Duration, allowSelfSignedCertificate bool) (*AuthClientService, error) {\n\tif keepAliveInterval != 0 && keepAliveInterval < time.Minute {\n\t\tlog.Info.Printf(\"Keep-alive interval too short. You may overload the server and be throttled\")\n\t}\n\taddr := string(netAddr)\n\tisHTTP := strings.HasPrefix(addr, \"http:\/\/\")\n\tisHTTPS := strings.HasPrefix(addr, \"https:\/\/\")\n\tisGRPC := strings.HasPrefix(addr, \"grpc:\/\/\")\n\tskip := 0\n\tswitch {\n\tcase isHTTP, isGRPC:\n\t\tskip = 7\n\tcase isHTTPS:\n\t\tskip = 8\n\t}\n\tconn, err := grpc.Dial(addr[skip:],\n\t\tgrpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{InsecureSkipVerify: allowSelfSignedCertificate})),\n\t\tgrpc.WithBlock(),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tac := &AuthClientService{\n\t\tgrpcConn: conn,\n\t\tcontext: *context,\n\t\tkeepAliveInterval: keepAliveInterval,\n\t\tcloseKeepAlive: make(chan bool, 1),\n\t}\n\tif keepAliveInterval != 0 {\n\t\tgo ac.keepAlive()\n\t}\n\treturn ac, nil\n}\n\n\/\/ keepAlive loops forever pinging the server every keepAliveInterval. It skips pings if there has been network\n\/\/ activity more recently than the keep alive interval. It must run on a separate go routine.\nfunc (ac *AuthClientService) keepAlive() {\n\tlog.Debug.Printf(\"Starting keep alive client\")\n\tsleepFor := ac.keepAliveInterval\n\tfor {\n\t\tac.keepAliveRound++\n\t\tselect {\n\t\tcase <-time.After(sleepFor):\n\t\t\tlastIdleness := time.Since(ac.lastNetActivity)\n\t\t\tif lastIdleness < ac.keepAliveInterval {\n\t\t\t\tsleepFor = ac.keepAliveInterval - lastIdleness\n\t\t\t\tlog.Debug.Printf(\"New ping in %v\", sleepFor)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsleepFor = ac.keepAliveInterval\n\t\t\tif !ac.Ping() {\n\t\t\t\tlog.Error.Printf(\"grpcauth: keepAlive: ping failed\")\n\t\t\t}\n\t\t\tlog.Debug.Printf(\"grpcAuth: keepAlive: ping okay\")\n\t\t\tac.lastNetActivity = time.Now()\n\t\tcase <-ac.closeKeepAlive:\n\t\t\tlog.Debug.Printf(\"grpcauth: keepAlive: exiting keep alive routine\")\n\t\t\tac.keepAliveRound = 0\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ SetService sets the underlying RPC service which was obtained with proto.NewSERVICENAMEClient, where SERVICENAME is\n\/\/ the RPC service definition from the proto file.\nfunc (ac *AuthClientService) SetService(common GRPCCommon) {\n\tac.grpcCommon = common\n}\n\n\/\/ GRPCConn returns the grpc client connection used to dial the server.\nfunc (ac *AuthClientService) GRPCConn() *grpc.ClientConn {\n\treturn ac.grpcConn\n}\n\n\/\/ Authenticate implements upspin.Service.\nfunc (ac *AuthClientService) Authenticate(ctx *upspin.Context) error {\n\treq := &proto.AuthenticateRequest{\n\t\tUserName: string(ctx.UserName),\n\t\tNow: time.Now().UTC().Format(time.ANSIC), \/\/ to discourage signature replay\n\t}\n\tsig, err := ctx.Factotum.UserSign([]byte(string(req.UserName) + \" Authenticate \" + req.Now))\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Signature = &proto.Signature{\n\t\tR: sig.R.String(),\n\t\tS: sig.S.String(),\n\t}\n\tresp, err := ac.grpcCommon.Authenticate(gContext.Background(), req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Debug.Printf(\"Authenticate: got authtoken for user %s: %s\", req.UserName, resp.Token)\n\tac.authToken = resp.Token\n\tnow := time.Now()\n\tac.lastTokenRefresh = now\n\tac.lastNetActivity = now\n\treturn nil\n}\n\n\/\/ Ping implements uspin.Service.\nfunc (ac *AuthClientService) Ping() bool {\n\tseq := rand.Int31()\n\treq := &proto.PingRequest{\n\t\tPingSequence: seq,\n\t}\n\tgctx, _ := gContext.WithTimeout(gContext.Background(), 3*time.Second) \/\/ ignore the cancel function.\n\tresp, err := ac.grpcCommon.Ping(gctx, req)\n\tif err != nil {\n\t\tlog.Printf(\"Ping error: %s\", err)\n\t}\n\tac.lastNetActivity = time.Now()\n\treturn err == nil && resp.PingSequence == seq\n}\n\nfunc (ac *AuthClientService) isAuthTokenExpired() bool {\n\treturn ac.authToken == \"\" || ac.lastTokenRefresh.Add(tokenFreshnessDuration).Before(time.Now())\n}\n\n\/\/ NewAuthContext creates a new RPC context with the required authentication tokens set and ensures re-authentication\n\/\/ is done if necessary.\nfunc (ac *AuthClientService) NewAuthContext() (gContext.Context, error) {\n\tvar err error\n\tif ac.isAuthTokenExpired() {\n\t\terr = ac.Authenticate(&ac.context)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tlog.Debug.Printf(\"SetAuthContext: set auth token: %s\", ac.authToken)\n\treturn metadata.NewContext(gContext.Background(), metadata.Pairs(authTokenKey, ac.authToken)), nil\n}\n\n\/\/ Close implements upspin.Service.\nfunc (ac *AuthClientService) Close() {\n\tselect { \/\/ prevents blocking if Close is called more than once.\n\tcase ac.closeKeepAlive <- true:\n\tdefault:\n\t}\n\t\/\/ The only error returned is ErrClientConnClosing, meaning something else has already caused it to close.\n\t_ = ac.grpcConn.Close() \/\/ explicitly ignore the error as there's nothing we can do.\n}\n<commit_msg>auth\/grpcauth: enable TCP keepalives on GRPC connections<commit_after>\/\/ Copyright 2016 The Upspin Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage grpcauth\n\nimport (\n\t\"crypto\/tls\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"strings\"\n\t\"time\"\n\n\tgContext \"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/credentials\"\n\t\"google.golang.org\/grpc\/metadata\"\n\n\t\"upspin.io\/log\"\n\t\"upspin.io\/upspin\"\n\t\"upspin.io\/upspin\/proto\"\n)\n\n\/\/ GRPCCommon is an interface that all GRPC services implement for authentication and ping as part of upspin.Service.\ntype GRPCCommon interface {\n\t\/\/ Authenticate is the GRPC call for Authenticate.\n\tAuthenticate(ctx gContext.Context, in *proto.AuthenticateRequest, opts ...grpc.CallOption) (*proto.AuthenticateResponse, error)\n\t\/\/ Ping is the GRPC call for Ping.\n\tPing(ctx gContext.Context, in *proto.PingRequest, opts ...grpc.CallOption) (*proto.PingResponse, error)\n}\n\n\/\/ AuthClientService is a partial Service that uses GRPC as transport and implements Authentication.\ntype AuthClientService struct {\n\tgrpcCommon GRPCCommon\n\tgrpcConn *grpc.ClientConn\n\tcontext upspin.Context\n\tauthToken string\n\tlastTokenRefresh time.Time\n\n\tkeepAliveInterval time.Duration \/\/ interval of keep-alive packets.\n\tlastNetActivity time.Time \/\/ last known time of some network activity.\n\tcloseKeepAlive chan bool \/\/ channel used to tell the keep-alive routine to exit.\n\tkeepAliveRound uint64 \/\/ counts iterations of the keep-alive routine. Mostly for tests.\n}\n\nconst (\n\t\/\/ AllowSelfSignedCertificate is used for documenting the parameter with same name in NewGRPCClient.\n\tAllowSelfSignedCertificate = true\n\n\t\/\/ KeepAliveInterval is a suggested interval between keep-alive ping requests to the server.\n\t\/\/ A value of 0 means keep-alives are disabled. Google Cloud Platform (GCP) times out connections\n\t\/\/ every 10 minutes so a smaller values are recommended for talking to servers on GCP.\n\tKeepAliveInterval = 5 * time.Minute\n)\n\n\/\/ To be safe, we refresh the token 1 hour ahead of time.\nvar tokenFreshnessDuration = authTokenDuration - time.Hour\n\n\/\/ NewGRPCClient returns new GRPC client connected securely (with TLS) to a GRPC server at a net address.\n\/\/ The address is expected to be a raw network address with port number, as in domain.com:5580. However, for convenience,\n\/\/ it is optionally accepted for the time being to use one of the following prefixes:\n\/\/ https:\/\/, http:\/\/, grpc:\/\/. This may change in the future.\n\/\/ A keep alive interval indicates the amount of time to send ping requests to the server. A duration of 0 disables\n\/\/ keep-alive packets.\n\/\/ If allowSelfSignedCertificates is true, the client will connect with a server with a self-signed certificate.\n\/\/ Otherwise it will reject it. Mostly only useful for testing a local server.\nfunc NewGRPCClient(context *upspin.Context, netAddr upspin.NetAddr, keepAliveInterval time.Duration, allowSelfSignedCertificate bool) (*AuthClientService, error) {\n\tif keepAliveInterval != 0 && keepAliveInterval < time.Minute {\n\t\tlog.Info.Printf(\"Keep-alive interval too short. You may overload the server and be throttled\")\n\t}\n\taddr := string(netAddr)\n\tisHTTP := strings.HasPrefix(addr, \"http:\/\/\")\n\tisHTTPS := strings.HasPrefix(addr, \"https:\/\/\")\n\tisGRPC := strings.HasPrefix(addr, \"grpc:\/\/\")\n\tskip := 0\n\tswitch {\n\tcase isHTTP, isGRPC:\n\t\tskip = 7\n\tcase isHTTPS:\n\t\tskip = 8\n\t}\n\tconn, err := grpc.Dial(addr[skip:],\n\t\tgrpc.WithBlock(),\n\t\tgrpc.WithDialer(dialWithKeepAlive),\n\t\tgrpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{InsecureSkipVerify: allowSelfSignedCertificate})),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tac := &AuthClientService{\n\t\tgrpcConn: conn,\n\t\tcontext: *context,\n\t\tkeepAliveInterval: keepAliveInterval,\n\t\tcloseKeepAlive: make(chan bool, 1),\n\t}\n\tif keepAliveInterval != 0 {\n\t\tgo ac.keepAlive()\n\t}\n\treturn ac, nil\n}\n\n\/\/ keepAlive loops forever pinging the server every keepAliveInterval. It skips pings if there has been network\n\/\/ activity more recently than the keep alive interval. It must run on a separate go routine.\nfunc (ac *AuthClientService) keepAlive() {\n\tlog.Debug.Printf(\"Starting keep alive client\")\n\tsleepFor := ac.keepAliveInterval\n\tfor {\n\t\tac.keepAliveRound++\n\t\tselect {\n\t\tcase <-time.After(sleepFor):\n\t\t\tlastIdleness := time.Since(ac.lastNetActivity)\n\t\t\tif lastIdleness < ac.keepAliveInterval {\n\t\t\t\tsleepFor = ac.keepAliveInterval - lastIdleness\n\t\t\t\tlog.Debug.Printf(\"New ping in %v\", sleepFor)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsleepFor = ac.keepAliveInterval\n\t\t\tif !ac.Ping() {\n\t\t\t\tlog.Error.Printf(\"grpcauth: keepAlive: ping failed\")\n\t\t\t}\n\t\t\tlog.Debug.Printf(\"grpcAuth: keepAlive: ping okay\")\n\t\t\tac.lastNetActivity = time.Now()\n\t\tcase <-ac.closeKeepAlive:\n\t\t\tlog.Debug.Printf(\"grpcauth: keepAlive: exiting keep alive routine\")\n\t\t\tac.keepAliveRound = 0\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ SetService sets the underlying RPC service which was obtained with proto.NewSERVICENAMEClient, where SERVICENAME is\n\/\/ the RPC service definition from the proto file.\nfunc (ac *AuthClientService) SetService(common GRPCCommon) {\n\tac.grpcCommon = common\n}\n\n\/\/ GRPCConn returns the grpc client connection used to dial the server.\nfunc (ac *AuthClientService) GRPCConn() *grpc.ClientConn {\n\treturn ac.grpcConn\n}\n\nfunc dialWithKeepAlive(target string, timeout time.Duration) (net.Conn, error) {\n\tc, err := net.DialTimeout(\"tcp\", target, timeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif tc, ok := c.(*net.TCPConn); ok {\n\t\tif err := tc.SetKeepAlive(true); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err := tc.SetKeepAlivePeriod(KeepAliveInterval); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn c, nil\n}\n\n\/\/ Authenticate implements upspin.Service.\nfunc (ac *AuthClientService) Authenticate(ctx *upspin.Context) error {\n\treq := &proto.AuthenticateRequest{\n\t\tUserName: string(ctx.UserName),\n\t\tNow: time.Now().UTC().Format(time.ANSIC), \/\/ to discourage signature replay\n\t}\n\tsig, err := ctx.Factotum.UserSign([]byte(string(req.UserName) + \" Authenticate \" + req.Now))\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Signature = &proto.Signature{\n\t\tR: sig.R.String(),\n\t\tS: sig.S.String(),\n\t}\n\tresp, err := ac.grpcCommon.Authenticate(gContext.Background(), req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Debug.Printf(\"Authenticate: got authtoken for user %s: %s\", req.UserName, resp.Token)\n\tac.authToken = resp.Token\n\tnow := time.Now()\n\tac.lastTokenRefresh = now\n\tac.lastNetActivity = now\n\treturn nil\n}\n\n\/\/ Ping implements uspin.Service.\nfunc (ac *AuthClientService) Ping() bool {\n\tseq := rand.Int31()\n\treq := &proto.PingRequest{\n\t\tPingSequence: seq,\n\t}\n\tgctx, _ := gContext.WithTimeout(gContext.Background(), 3*time.Second) \/\/ ignore the cancel function.\n\tresp, err := ac.grpcCommon.Ping(gctx, req)\n\tif err != nil {\n\t\tlog.Printf(\"Ping error: %s\", err)\n\t}\n\tac.lastNetActivity = time.Now()\n\treturn err == nil && resp.PingSequence == seq\n}\n\nfunc (ac *AuthClientService) isAuthTokenExpired() bool {\n\treturn ac.authToken == \"\" || ac.lastTokenRefresh.Add(tokenFreshnessDuration).Before(time.Now())\n}\n\n\/\/ NewAuthContext creates a new RPC context with the required authentication tokens set and ensures re-authentication\n\/\/ is done if necessary.\nfunc (ac *AuthClientService) NewAuthContext() (gContext.Context, error) {\n\tvar err error\n\tif ac.isAuthTokenExpired() {\n\t\terr = ac.Authenticate(&ac.context)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tlog.Debug.Printf(\"SetAuthContext: set auth token: %s\", ac.authToken)\n\treturn metadata.NewContext(gContext.Background(), metadata.Pairs(authTokenKey, ac.authToken)), nil\n}\n\n\/\/ Close implements upspin.Service.\nfunc (ac *AuthClientService) Close() {\n\tselect { \/\/ prevents blocking if Close is called more than once.\n\tcase ac.closeKeepAlive <- true:\n\tdefault:\n\t}\n\t\/\/ The only error returned is ErrClientConnClosing, meaning something else has already caused it to close.\n\t_ = ac.grpcConn.Close() \/\/ explicitly ignore the error as there's nothing we can do.\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/bobziuchkovski\/writ\"\n\t\"github.com\/jkomoros\/boardgame\/boardgame-util\/lib\/gamepkg\"\n\t\"github.com\/jkomoros\/boardgame\/boardgame-util\/lib\/golden\"\n)\n\ntype Golden struct {\n\tServe\n}\n\nfunc (g *Golden) Run(p writ.Path, positional []string) {\n\tg.Prod = false\n\tg.Storage = \"filesystem\"\n\n\tpkg, err := gamepkg.NewFromPath(\".\", \"\")\n\n\tif err != nil {\n\t\tg.Base().errAndQuit(\"Current directory is not a valid package. You must run this command sitting in the root of a valid package. \" + err.Error())\n\t}\n\n\tfmt.Println(\"Creating golden structures with \" + pkg.AbsolutePath())\n\n\tif err := golden.MakeGoldenTest(pkg); err != nil {\n\t\tg.Base().errAndQuit(\"Couldn't create golden directory: \" + err.Error())\n\t}\n\n\tg.doServe(p, positional, []*gamepkg.Pkg{pkg}, `\"`+golden.GameRecordsFolder+`\"`)\n\n\tfmt.Println(\"Cleaning golden folder...\")\n\tif err := golden.CleanGoldenTest(pkg); err != nil {\n\t\tg.Base().errAndQuit(\"Couldn't clean golden: \" + err.Error())\n\t}\n}\n\nfunc (g *Golden) Name() string {\n\treturn \"golden\"\n}\n\nfunc (g *Golden) Description() string {\n\treturn \"Helps create golden test files for the current package\"\n}\n\nfunc (g *Golden) HelpText() string {\n\treturn g.Name() + ` helps create golden example games to test the current game package.\n\nYou run it sittig in the root of a game package, and it will create a stub server (with similiar behavior to what you'd get with 'boardgame-util serve'), but with only one game, and the games will all be persisted to a testdata folder, with a golden_test.go created.\n\nThis is useful for saving runs of games that are known good so that you can ensure you don't mess with the game logic later.\n`\n}\n\nfunc (s *Golden) WritOptions() []*writ.Option {\n\t\/\/Skip the first two, which are not valid for us.\n\treturn s.Serve.WritOptions()[2:]\n}\n<commit_msg>Switch the name of golden command to `create-golden` to make it stand out more as a weird mode. Part of #648.<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/bobziuchkovski\/writ\"\n\t\"github.com\/jkomoros\/boardgame\/boardgame-util\/lib\/gamepkg\"\n\t\"github.com\/jkomoros\/boardgame\/boardgame-util\/lib\/golden\"\n)\n\ntype Golden struct {\n\tServe\n}\n\nfunc (g *Golden) Run(p writ.Path, positional []string) {\n\tg.Prod = false\n\tg.Storage = \"filesystem\"\n\n\tpkg, err := gamepkg.NewFromPath(\".\", \"\")\n\n\tif err != nil {\n\t\tg.Base().errAndQuit(\"Current directory is not a valid package. You must run this command sitting in the root of a valid package. \" + err.Error())\n\t}\n\n\tfmt.Println(\"Creating golden structures with \" + pkg.AbsolutePath())\n\n\tif err := golden.MakeGoldenTest(pkg); err != nil {\n\t\tg.Base().errAndQuit(\"Couldn't create golden directory: \" + err.Error())\n\t}\n\n\tg.doServe(p, positional, []*gamepkg.Pkg{pkg}, `\"`+golden.GameRecordsFolder+`\"`)\n\n\tfmt.Println(\"Cleaning golden folder...\")\n\tif err := golden.CleanGoldenTest(pkg); err != nil {\n\t\tg.Base().errAndQuit(\"Couldn't clean golden: \" + err.Error())\n\t}\n}\n\nfunc (g *Golden) Name() string {\n\treturn \"create-golden\"\n}\n\nfunc (g *Golden) Description() string {\n\treturn \"Helps create golden test files for the current package\"\n}\n\nfunc (g *Golden) HelpText() string {\n\treturn g.Name() + ` helps create golden example games to test the current game package.\n\nYou run it sitting in the root of a game package, and it will create a stub server (with similiar behavior to what you'd get with 'boardgame-util serve'), but with only one game, and the games will all be persisted to a testdata folder, with a golden_test.go created.\n\nThis is useful for saving runs of games that are known good so that you can ensure you don't mess with the game logic later.\n`\n}\n\nfunc (s *Golden) WritOptions() []*writ.Option {\n\t\/\/Skip the first two, which are not valid for us.\n\treturn s.Serve.WritOptions()[2:]\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright IBM Corp. All Rights Reserved.\nSPDX-License-Identifier: Apache-2.0\n*\/\n\npackage lockbasedtxmgr\n\nimport (\n\t\"sync\"\n\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/pvtdatapolicy\"\n\n\t\"github.com\/hyperledger\/fabric\/common\/flogging\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/kvledger\/bookkeeping\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/kvledger\/txmgmt\/privacyenabledstate\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/kvledger\/txmgmt\/pvtstatepurgemgmt\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/kvledger\/txmgmt\/validator\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/kvledger\/txmgmt\/validator\/valimpl\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/kvledger\/txmgmt\/version\"\n\t\"github.com\/hyperledger\/fabric\/protos\/common\"\n\t\"github.com\/hyperledger\/fabric\/protos\/ledger\/rwset\/kvrwset\"\n)\n\nvar logger = flogging.MustGetLogger(\"lockbasedtxmgr\")\n\n\/\/ LockBasedTxMgr a simple implementation of interface `txmgmt.TxMgr`.\n\/\/ This implementation uses a read-write lock to prevent conflicts between transaction simulation and committing\ntype LockBasedTxMgr struct {\n\tledgerid string\n\tdb privacyenabledstate.DB\n\tpvtdataPurgeMgr *pvtdataPurgeMgr\n\tvalidator validator.Validator\n\tstateListeners []ledger.StateListener\n\tcommitRWLock sync.RWMutex\n\tcurrent *current\n}\n\ntype current struct {\n\tblock *common.Block\n\tbatch *privacyenabledstate.UpdateBatch\n\tlisteners []ledger.StateListener\n}\n\nfunc (c *current) blockNum() uint64 {\n\treturn c.block.Header.Number\n}\n\nfunc (c *current) maxTxNumber() uint64 {\n\treturn uint64(len(c.block.Data.Data)) - 1\n}\n\n\/\/ NewLockBasedTxMgr constructs a new instance of NewLockBasedTxMgr\nfunc NewLockBasedTxMgr(ledgerid string, db privacyenabledstate.DB, stateListeners []ledger.StateListener,\n\tbtlPolicy pvtdatapolicy.BTLPolicy, bookkeepingProvider bookkeeping.Provider) (*LockBasedTxMgr, error) {\n\tdb.Open()\n\ttxmgr := &LockBasedTxMgr{ledgerid: ledgerid, db: db, stateListeners: stateListeners}\n\tpvtstatePurgeMgr, err := pvtstatepurgemgmt.InstantiatePurgeMgr(ledgerid, db, btlPolicy, bookkeepingProvider)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttxmgr.pvtdataPurgeMgr = &pvtdataPurgeMgr{pvtstatePurgeMgr, false}\n\ttxmgr.validator = valimpl.NewStatebasedValidator(txmgr, db)\n\treturn txmgr, nil\n}\n\n\/\/ GetLastSavepoint returns the block num recorded in savepoint,\n\/\/ returns 0 if NO savepoint is found\nfunc (txmgr *LockBasedTxMgr) GetLastSavepoint() (*version.Height, error) {\n\treturn txmgr.db.GetLatestSavePoint()\n}\n\n\/\/ NewQueryExecutor implements method in interface `txmgmt.TxMgr`\nfunc (txmgr *LockBasedTxMgr) NewQueryExecutor(txid string) (ledger.QueryExecutor, error) {\n\tqe := newQueryExecutor(txmgr, txid)\n\ttxmgr.commitRWLock.RLock()\n\treturn qe, nil\n}\n\n\/\/ NewTxSimulator implements method in interface `txmgmt.TxMgr`\nfunc (txmgr *LockBasedTxMgr) NewTxSimulator(txid string) (ledger.TxSimulator, error) {\n\tlogger.Debugf(\"constructing new tx simulator\")\n\ts, err := newLockBasedTxSimulator(txmgr, txid)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttxmgr.commitRWLock.RLock()\n\treturn s, nil\n}\n\n\/\/ ValidateAndPrepare implements method in interface `txmgmt.TxMgr`\nfunc (txmgr *LockBasedTxMgr) ValidateAndPrepare(blockAndPvtdata *ledger.BlockAndPvtData, doMVCCValidation bool) error {\n\tblock := blockAndPvtdata.Block\n\tlogger.Debugf(\"Validating new block with num trans = [%d]\", len(block.Data.Data))\n\tbatch, err := txmgr.validator.ValidateAndPrepareBatch(blockAndPvtdata, doMVCCValidation)\n\tif err != nil {\n\t\ttxmgr.reset()\n\t\treturn err\n\t}\n\ttxmgr.current = ¤t{block: block, batch: batch}\n\tif err := txmgr.invokeNamespaceListeners(); err != nil {\n\t\ttxmgr.reset()\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (txmgr *LockBasedTxMgr) invokeNamespaceListeners() error {\n\tfor _, listener := range txmgr.stateListeners {\n\t\tstateUpdatesForListener := extractStateUpdates(txmgr.current.batch, listener.InterestedInNamespaces())\n\t\tif len(stateUpdatesForListener) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\ttxmgr.current.listeners = append(txmgr.current.listeners, listener)\n\t\tif err := listener.HandleStateUpdates(txmgr.ledgerid, stateUpdatesForListener, txmgr.current.blockNum()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlogger.Debugf(\"Invoking listener for state changes:%s\", listener)\n\t}\n\treturn nil\n}\n\n\/\/ Shutdown implements method in interface `txmgmt.TxMgr`\nfunc (txmgr *LockBasedTxMgr) Shutdown() {\n\ttxmgr.db.Close()\n}\n\n\/\/ Commit implements method in interface `txmgmt.TxMgr`\nfunc (txmgr *LockBasedTxMgr) Commit() error {\n\t\/\/ When using the purge manager for the first block commit after peer start, the asynchronous function\n\t\/\/ 'PrepareForExpiringKeys' is invoked in-line. However, for the subsequent blocks commits, this function is invoked\n\t\/\/ in advance for the next block\n\tif !txmgr.pvtdataPurgeMgr.usedOnce {\n\t\ttxmgr.pvtdataPurgeMgr.PrepareForExpiringKeys(txmgr.current.blockNum())\n\t\ttxmgr.pvtdataPurgeMgr.usedOnce = true\n\t}\n\tdefer func() {\n\t\ttxmgr.pvtdataPurgeMgr.BlockCommitDone()\n\t\ttxmgr.pvtdataPurgeMgr.PrepareForExpiringKeys(txmgr.current.blockNum() + 1)\n\t\ttxmgr.reset()\n\t\ttxmgr.commitRWLock.Unlock()\n\t}()\n\n\tlogger.Debugf(\"Committing updates to state database\")\n\ttxmgr.commitRWLock.Lock()\n\tlogger.Debugf(\"Write lock acquired for committing updates to state database\")\n\tif txmgr.current == nil {\n\t\tpanic(\"validateAndPrepare() method should have been called before calling commit()\")\n\t}\n\n\tif err := txmgr.pvtdataPurgeMgr.DeleteExpiredAndUpdateBookkeeping(\n\t\ttxmgr.current.batch.PvtUpdates, txmgr.current.batch.HashUpdates); err != nil {\n\t\treturn err\n\t}\n\n\tcommitHeight := version.NewHeight(txmgr.current.blockNum(), txmgr.current.maxTxNumber())\n\tif err := txmgr.db.ApplyPrivacyAwareUpdates(txmgr.current.batch, commitHeight); err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Debugf(\"Updates committed to state database\")\n\t\/\/ In the case of error state listeners will not recieve this call - instead a peer panic is caused by the ledger upon receiveing\n\t\/\/ an error from this function\n\ttxmgr.updateStateListeners()\n\treturn nil\n}\n\n\/\/ Rollback implements method in interface `txmgmt.TxMgr`\nfunc (txmgr *LockBasedTxMgr) Rollback() {\n\ttxmgr.reset()\n}\n\n\/\/ clearCache empty the cache maintained by the statedb implementation\nfunc (txmgr *LockBasedTxMgr) clearCache() {\n\tif txmgr.db.IsBulkOptimizable() {\n\t\ttxmgr.db.ClearCachedVersions()\n\t}\n}\n\n\/\/ ShouldRecover implements method in interface kvledger.Recoverer\nfunc (txmgr *LockBasedTxMgr) ShouldRecover(lastAvailableBlock uint64) (bool, uint64, error) {\n\tsavepoint, err := txmgr.GetLastSavepoint()\n\tif err != nil {\n\t\treturn false, 0, err\n\t}\n\tif savepoint == nil {\n\t\treturn true, 0, nil\n\t}\n\treturn savepoint.BlockNum != lastAvailableBlock, savepoint.BlockNum + 1, nil\n}\n\n\/\/ CommitLostBlock implements method in interface kvledger.Recoverer\nfunc (txmgr *LockBasedTxMgr) CommitLostBlock(blockAndPvtdata *ledger.BlockAndPvtData) error {\n\tblock := blockAndPvtdata.Block\n\tlogger.Debugf(\"Constructing updateSet for the block %d\", block.Header.Number)\n\tif err := txmgr.ValidateAndPrepare(blockAndPvtdata, false); err != nil {\n\t\treturn err\n\t}\n\tlogger.Debugf(\"Committing block %d to state database\", block.Header.Number)\n\treturn txmgr.Commit()\n}\n\nfunc extractStateUpdates(batch *privacyenabledstate.UpdateBatch, namespaces []string) ledger.StateUpdates {\n\tstateupdates := make(ledger.StateUpdates)\n\tfor _, namespace := range namespaces {\n\t\tupdatesMap := batch.PubUpdates.GetUpdates(namespace)\n\t\tvar kvwrites []*kvrwset.KVWrite\n\t\tfor key, versionedValue := range updatesMap {\n\t\t\tkvwrites = append(kvwrites, &kvrwset.KVWrite{Key: key, IsDelete: versionedValue.Value == nil, Value: versionedValue.Value})\n\t\t\tif len(kvwrites) > 0 {\n\t\t\t\tstateupdates[namespace] = kvwrites\n\t\t\t}\n\t\t}\n\t}\n\treturn stateupdates\n}\n\nfunc (txmgr *LockBasedTxMgr) updateStateListeners() {\n\tfor _, l := range txmgr.current.listeners {\n\t\tl.StateCommitDone(txmgr.ledgerid)\n\t}\n}\n\nfunc (txmgr *LockBasedTxMgr) reset() {\n\ttxmgr.current = nil\n\t\/\/ If statedb implementation needed bulk read optimization, cache might have been populated by\n\t\/\/ ValidateAndPrepare(). Once the block is validated and committed, populated cache needs to\n\t\/\/ be cleared.\n\tdefer txmgr.clearCache()\n}\n\n\/\/ pvtdataPurgeMgr wraps the actual purge manager and an additional flag 'usedOnce'\n\/\/ for usage of this additional flag, see the relevant comments in the txmgr.Commit() function above\ntype pvtdataPurgeMgr struct {\n\tpvtstatepurgemgmt.PurgeMgr\n\tusedOnce bool\n}\n<commit_msg>[FAB-8347] Fix re-entrant lock isuue<commit_after>\/*\nCopyright IBM Corp. All Rights Reserved.\nSPDX-License-Identifier: Apache-2.0\n*\/\n\npackage lockbasedtxmgr\n\nimport (\n\t\"sync\"\n\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/pvtdatapolicy\"\n\n\t\"github.com\/hyperledger\/fabric\/common\/flogging\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/kvledger\/bookkeeping\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/kvledger\/txmgmt\/privacyenabledstate\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/kvledger\/txmgmt\/pvtstatepurgemgmt\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/kvledger\/txmgmt\/validator\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/kvledger\/txmgmt\/validator\/valimpl\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/kvledger\/txmgmt\/version\"\n\t\"github.com\/hyperledger\/fabric\/protos\/common\"\n\t\"github.com\/hyperledger\/fabric\/protos\/ledger\/rwset\/kvrwset\"\n)\n\nvar logger = flogging.MustGetLogger(\"lockbasedtxmgr\")\n\n\/\/ LockBasedTxMgr a simple implementation of interface `txmgmt.TxMgr`.\n\/\/ This implementation uses a read-write lock to prevent conflicts between transaction simulation and committing\ntype LockBasedTxMgr struct {\n\tledgerid string\n\tdb privacyenabledstate.DB\n\tpvtdataPurgeMgr *pvtdataPurgeMgr\n\tvalidator validator.Validator\n\tstateListeners []ledger.StateListener\n\tcommitRWLock sync.RWMutex\n\tcurrent *current\n}\n\ntype current struct {\n\tblock *common.Block\n\tbatch *privacyenabledstate.UpdateBatch\n\tlisteners []ledger.StateListener\n}\n\nfunc (c *current) blockNum() uint64 {\n\treturn c.block.Header.Number\n}\n\nfunc (c *current) maxTxNumber() uint64 {\n\treturn uint64(len(c.block.Data.Data)) - 1\n}\n\n\/\/ NewLockBasedTxMgr constructs a new instance of NewLockBasedTxMgr\nfunc NewLockBasedTxMgr(ledgerid string, db privacyenabledstate.DB, stateListeners []ledger.StateListener,\n\tbtlPolicy pvtdatapolicy.BTLPolicy, bookkeepingProvider bookkeeping.Provider) (*LockBasedTxMgr, error) {\n\tdb.Open()\n\ttxmgr := &LockBasedTxMgr{ledgerid: ledgerid, db: db, stateListeners: stateListeners}\n\tpvtstatePurgeMgr, err := pvtstatepurgemgmt.InstantiatePurgeMgr(ledgerid, db, btlPolicy, bookkeepingProvider)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttxmgr.pvtdataPurgeMgr = &pvtdataPurgeMgr{pvtstatePurgeMgr, false}\n\ttxmgr.validator = valimpl.NewStatebasedValidator(txmgr, db)\n\treturn txmgr, nil\n}\n\n\/\/ GetLastSavepoint returns the block num recorded in savepoint,\n\/\/ returns 0 if NO savepoint is found\nfunc (txmgr *LockBasedTxMgr) GetLastSavepoint() (*version.Height, error) {\n\treturn txmgr.db.GetLatestSavePoint()\n}\n\n\/\/ NewQueryExecutor implements method in interface `txmgmt.TxMgr`\nfunc (txmgr *LockBasedTxMgr) NewQueryExecutor(txid string) (ledger.QueryExecutor, error) {\n\tqe := newQueryExecutor(txmgr, txid)\n\ttxmgr.commitRWLock.RLock()\n\treturn qe, nil\n}\n\n\/\/ NewTxSimulator implements method in interface `txmgmt.TxMgr`\nfunc (txmgr *LockBasedTxMgr) NewTxSimulator(txid string) (ledger.TxSimulator, error) {\n\tlogger.Debugf(\"constructing new tx simulator\")\n\ts, err := newLockBasedTxSimulator(txmgr, txid)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttxmgr.commitRWLock.RLock()\n\treturn s, nil\n}\n\n\/\/ ValidateAndPrepare implements method in interface `txmgmt.TxMgr`\nfunc (txmgr *LockBasedTxMgr) ValidateAndPrepare(blockAndPvtdata *ledger.BlockAndPvtData, doMVCCValidation bool) error {\n\tblock := blockAndPvtdata.Block\n\tlogger.Debugf(\"Validating new block with num trans = [%d]\", len(block.Data.Data))\n\tbatch, err := txmgr.validator.ValidateAndPrepareBatch(blockAndPvtdata, doMVCCValidation)\n\tif err != nil {\n\t\ttxmgr.reset()\n\t\treturn err\n\t}\n\ttxmgr.current = ¤t{block: block, batch: batch}\n\tif err := txmgr.invokeNamespaceListeners(); err != nil {\n\t\ttxmgr.reset()\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (txmgr *LockBasedTxMgr) invokeNamespaceListeners() error {\n\tfor _, listener := range txmgr.stateListeners {\n\t\tstateUpdatesForListener := extractStateUpdates(txmgr.current.batch, listener.InterestedInNamespaces())\n\t\tif len(stateUpdatesForListener) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\ttxmgr.current.listeners = append(txmgr.current.listeners, listener)\n\t\tif err := listener.HandleStateUpdates(txmgr.ledgerid, stateUpdatesForListener, txmgr.current.blockNum()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlogger.Debugf(\"Invoking listener for state changes:%s\", listener)\n\t}\n\treturn nil\n}\n\n\/\/ Shutdown implements method in interface `txmgmt.TxMgr`\nfunc (txmgr *LockBasedTxMgr) Shutdown() {\n\ttxmgr.db.Close()\n}\n\n\/\/ Commit implements method in interface `txmgmt.TxMgr`\nfunc (txmgr *LockBasedTxMgr) Commit() error {\n\t\/\/ When using the purge manager for the first block commit after peer start, the asynchronous function\n\t\/\/ 'PrepareForExpiringKeys' is invoked in-line. However, for the subsequent blocks commits, this function is invoked\n\t\/\/ in advance for the next block\n\tif !txmgr.pvtdataPurgeMgr.usedOnce {\n\t\ttxmgr.pvtdataPurgeMgr.PrepareForExpiringKeys(txmgr.current.blockNum())\n\t\ttxmgr.pvtdataPurgeMgr.usedOnce = true\n\t}\n\tdefer func() {\n\t\ttxmgr.pvtdataPurgeMgr.BlockCommitDone()\n\t\ttxmgr.pvtdataPurgeMgr.PrepareForExpiringKeys(txmgr.current.blockNum() + 1)\n\t\ttxmgr.reset()\n\t\ttxmgr.commitRWLock.Unlock()\n\t}()\n\n\tlogger.Debugf(\"Committing updates to state database\")\n\tif txmgr.current == nil {\n\t\tpanic(\"validateAndPrepare() method should have been called before calling commit()\")\n\t}\n\n\tif err := txmgr.pvtdataPurgeMgr.DeleteExpiredAndUpdateBookkeeping(\n\t\ttxmgr.current.batch.PvtUpdates, txmgr.current.batch.HashUpdates); err != nil {\n\t\treturn err\n\t}\n\n\ttxmgr.commitRWLock.Lock()\n\tlogger.Debugf(\"Write lock acquired for committing updates to state database\")\n\tcommitHeight := version.NewHeight(txmgr.current.blockNum(), txmgr.current.maxTxNumber())\n\tif err := txmgr.db.ApplyPrivacyAwareUpdates(txmgr.current.batch, commitHeight); err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Debugf(\"Updates committed to state database\")\n\t\/\/ In the case of error state listeners will not recieve this call - instead a peer panic is caused by the ledger upon receiveing\n\t\/\/ an error from this function\n\ttxmgr.updateStateListeners()\n\treturn nil\n}\n\n\/\/ Rollback implements method in interface `txmgmt.TxMgr`\nfunc (txmgr *LockBasedTxMgr) Rollback() {\n\ttxmgr.reset()\n}\n\n\/\/ clearCache empty the cache maintained by the statedb implementation\nfunc (txmgr *LockBasedTxMgr) clearCache() {\n\tif txmgr.db.IsBulkOptimizable() {\n\t\ttxmgr.db.ClearCachedVersions()\n\t}\n}\n\n\/\/ ShouldRecover implements method in interface kvledger.Recoverer\nfunc (txmgr *LockBasedTxMgr) ShouldRecover(lastAvailableBlock uint64) (bool, uint64, error) {\n\tsavepoint, err := txmgr.GetLastSavepoint()\n\tif err != nil {\n\t\treturn false, 0, err\n\t}\n\tif savepoint == nil {\n\t\treturn true, 0, nil\n\t}\n\treturn savepoint.BlockNum != lastAvailableBlock, savepoint.BlockNum + 1, nil\n}\n\n\/\/ CommitLostBlock implements method in interface kvledger.Recoverer\nfunc (txmgr *LockBasedTxMgr) CommitLostBlock(blockAndPvtdata *ledger.BlockAndPvtData) error {\n\tblock := blockAndPvtdata.Block\n\tlogger.Debugf(\"Constructing updateSet for the block %d\", block.Header.Number)\n\tif err := txmgr.ValidateAndPrepare(blockAndPvtdata, false); err != nil {\n\t\treturn err\n\t}\n\tlogger.Debugf(\"Committing block %d to state database\", block.Header.Number)\n\treturn txmgr.Commit()\n}\n\nfunc extractStateUpdates(batch *privacyenabledstate.UpdateBatch, namespaces []string) ledger.StateUpdates {\n\tstateupdates := make(ledger.StateUpdates)\n\tfor _, namespace := range namespaces {\n\t\tupdatesMap := batch.PubUpdates.GetUpdates(namespace)\n\t\tvar kvwrites []*kvrwset.KVWrite\n\t\tfor key, versionedValue := range updatesMap {\n\t\t\tkvwrites = append(kvwrites, &kvrwset.KVWrite{Key: key, IsDelete: versionedValue.Value == nil, Value: versionedValue.Value})\n\t\t\tif len(kvwrites) > 0 {\n\t\t\t\tstateupdates[namespace] = kvwrites\n\t\t\t}\n\t\t}\n\t}\n\treturn stateupdates\n}\n\nfunc (txmgr *LockBasedTxMgr) updateStateListeners() {\n\tfor _, l := range txmgr.current.listeners {\n\t\tl.StateCommitDone(txmgr.ledgerid)\n\t}\n}\n\nfunc (txmgr *LockBasedTxMgr) reset() {\n\ttxmgr.current = nil\n\t\/\/ If statedb implementation needed bulk read optimization, cache might have been populated by\n\t\/\/ ValidateAndPrepare(). Once the block is validated and committed, populated cache needs to\n\t\/\/ be cleared.\n\tdefer txmgr.clearCache()\n}\n\n\/\/ pvtdataPurgeMgr wraps the actual purge manager and an additional flag 'usedOnce'\n\/\/ for usage of this additional flag, see the relevant comments in the txmgr.Commit() function above\ntype pvtdataPurgeMgr struct {\n\tpvtstatepurgemgmt.PurgeMgr\n\tusedOnce bool\n}\n<|endoftext|>"} {"text":"<commit_before>package tests\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/deis\/deis\/tests\/dockercli\"\n\t\"github.com\/deis\/deis\/tests\/etcdutils\"\n\t\"github.com\/deis\/deis\/tests\/utils\"\n)\n\nfunc TestLogger(t *testing.T) {\n\tvar err error\n\ttag, etcdPort := utils.BuildTag(), utils.RandomPort()\n\timageName := utils.ImagePrefix() + \"logger\" + \":\" + tag\n\n\t\/\/start etcd container\n\tetcdName := \"deis-etcd-\" + tag\n\tcli, stdout, stdoutPipe := dockercli.NewClient()\n\tdockercli.RunTestEtcd(t, etcdName, etcdPort)\n\tdefer cli.CmdRm(\"-f\", etcdName)\n\n\thost, port := utils.HostAddress(), utils.RandomPort()\n\tfmt.Printf(\"--- Run %s at %s:%s\\n\", imageName, host, port)\n\tname := \"deis-logger-\" + tag\n\tdefer cli.CmdRm(\"-f\", name)\n\tgo func() {\n\t\t_ = cli.CmdRm(\"-f\", name)\n\t\terr = dockercli.RunContainer(cli,\n\t\t\t\"--name\", name,\n\t\t\t\"--rm\",\n\t\t\t\"-p\", port+\":514\/udp\",\n\t\t\t\"-e\", \"EXTERNAL_PORT=\"+port,\n\t\t\t\"-e\", \"HOST=\"+host,\n\t\t\t\"-e\", \"ETCD_PORT=\"+etcdPort,\n\t\t\timageName)\n\t}()\n\tdockercli.PrintToStdout(t, stdout, stdoutPipe, \"deis-logger running\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ FIXME: Wait until etcd keys are published\n\ttime.Sleep(5000 * time.Millisecond)\n\tdockercli.DeisServiceTest(t, name, port, \"udp\")\n\tetcdutils.VerifyEtcdValue(t, \"\/deis\/logs\/host\", host, etcdPort)\n\tetcdutils.VerifyEtcdValue(t, \"\/deis\/logs\/port\", port, etcdPort)\n}\n<commit_msg>test(logger): update functional tests<commit_after>package tests\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/deis\/deis\/tests\/dockercli\"\n\t\"github.com\/deis\/deis\/tests\/etcdutils\"\n\t\"github.com\/deis\/deis\/tests\/utils\"\n)\n\nfunc TestLogger(t *testing.T) {\n\tvar err error\n\ttag, etcdPort := utils.BuildTag(), utils.RandomPort()\n\timageName := utils.ImagePrefix() + \"logger\" + \":\" + tag\n\n\t\/\/start etcd container\n\tetcdName := \"deis-etcd-\" + tag\n\tcli, stdout, stdoutPipe := dockercli.NewClient()\n\tdockercli.RunTestEtcd(t, etcdName, etcdPort)\n\tdefer cli.CmdRm(\"-f\", etcdName)\n\n\thost, port := utils.HostAddress(), utils.RandomPort()\n\tfmt.Printf(\"--- Run %s at %s:%s\\n\", imageName, host, port)\n\tname := \"deis-logger-\" + tag\n\tdefer cli.CmdRm(\"-f\", name)\n\tgo func() {\n\t\t_ = cli.CmdRm(\"-f\", name)\n\t\terr = dockercli.RunContainer(cli,\n\t\t\t\"--name\", name,\n\t\t\t\"--rm\",\n\t\t\t\"-p\", port+\":514\/udp\",\n\t\t\timageName,\n\t\t\t\"--publish\",\n\t\t\t\"--log-port=\"+port,\n\t\t\t\"--publish-host=\"+host,\n\t\t\t\"--publish-port=\"+etcdPort)\n\t}()\n\tdockercli.PrintToStdout(t, stdout, stdoutPipe, \"deis-logger running\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ FIXME: Wait until etcd keys are published\n\ttime.Sleep(5000 * time.Millisecond)\n\tdockercli.DeisServiceTest(t, name, port, \"udp\")\n\tetcdutils.VerifyEtcdValue(t, \"\/deis\/logs\/host\", host, etcdPort)\n\tetcdutils.VerifyEtcdValue(t, \"\/deis\/logs\/port\", port, etcdPort)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/howeyc\/fsnotify\"\n\t\"go\/build\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n)\n\nfunc install(buildpath, lastError string) (installed bool, errorOutput string, err error) {\n\tcmdline := []string{\"go\", \"get\", \"-v\", buildpath}\n\n\tcmd := exec.Command(\"go\", cmdline[1:]...)\n\tbufOut := bytes.NewBuffer([]byte{})\n\tbufErr := bytes.NewBuffer([]byte{})\n\tcmd.Stdout = bufOut\n\tcmd.Stderr = bufErr\n\n\terr = cmd.Run()\n\n\tif bufOut.Len() != 0 {\n\t\terrorOutput = bufOut.String()\n\t\tif errorOutput != lastError {\n\t\t\tfmt.Print(bufOut)\n\t\t}\n\t\terr = errors.New(\"compile error\")\n\t\treturn\n\t}\n\n\tinstalled = bufErr.Len() != 0\n\n\treturn\n}\n\nfunc run(binName, binPath string, args []string) (runch chan bool) {\n\trunch = make(chan bool)\n\tgo func() {\n\t\tcmdline := append([]string{binName}, args...)\n\t\tvar proc *os.Process\n\t\tfor _ = range runch {\n\t\t\tif proc != nil {\n\t\t\t\tproc.Kill()\n\t\t\t}\n\t\t\tcmd := exec.Command(binPath, args...)\n\t\t\tcmd.Stdout = os.Stdout\n\t\t\tcmd.Stderr = os.Stderr\n\t\t\tlog.Print(cmdline)\n\t\t\terr := cmd.Start()\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error on starting process: '%s'\\n\", err)\n\t\t\t}\n\t\t\tproc = cmd.Process\n\t\t}\n\t}()\n\treturn\n}\n\nfunc getWatcher(buildpath string) (watcher *fsnotify.Watcher, err error) {\n\twatcher, err = fsnotify.NewWatcher()\n\taddToWatcher(watcher, buildpath, map[string]bool{})\n\treturn\n}\n\nfunc addToWatcher(watcher *fsnotify.Watcher, importpath string, watching map[string]bool) {\n\tpkg, err := build.Import(importpath, \"\", 0)\n\tif err != nil {\n\t\treturn\n\t}\n\tif pkg.Goroot {\n\t\treturn\n\t}\n\twatcher.Watch(pkg.Dir)\n\twatching[importpath] = true\n\tfor _, imp := range pkg.Imports {\n\t\tif !watching[imp] {\n\t\t\taddToWatcher(watcher, imp, watching)\n\t\t}\n\t}\n}\n\nfunc rerun(buildpath string, args []string) (err error) {\n\tlog.Printf(\"setting up %s %v\", buildpath, args)\n\n\tpkg, err := build.Import(buildpath, \"\", 0)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif pkg.Name != \"main\" {\n\t\terr = errors.New(fmt.Sprintf(\"expected package %q, got %q\", \"main\", pkg.Name))\n\t\treturn\n\t}\n\n\t_, binName := path.Split(buildpath)\n\tvar binPath string\n\tif gobin := os.Getenv(\"GOBIN\"); gobin != \"\" {\n\t\tbinPath = filepath.Join(gobin, binName)\n\t} else {\n\t\tbinPath = filepath.Join(pkg.BinDir, binName)\n\t}\n\n\trunch := run(binName, binPath, args)\n\n\tvar errorOutput string\n\t_, errorOutput, ierr := install(buildpath, errorOutput)\n\tif ierr == nil {\n\t\trunch <- true\n\t}\n\n\tvar watcher *fsnotify.Watcher\n\twatcher, err = getWatcher(buildpath)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor {\n\t\t\/\/ read event from the watcher\n\t\twe, _ := <-watcher.Event\n\t\tvar installed bool\n\t\tinstalled, errorOutput, _ = install(buildpath, errorOutput)\n\t\tif installed {\n\t\t\tlog.Print(we.Name)\n\t\t\t\/\/ re-build and re-run the application\n\t\t\trunch <- true\n\t\t\t\/\/ close the watcher\n\t\t\twatcher.Close()\n\t\t\t\/\/ to clean things up: read events from the watcher until events chan is closed.\n\t\t\tgo func(events chan *fsnotify.FileEvent) {\n\t\t\t\tfor _ = range events {\n\n\t\t\t\t}\n\t\t\t}(watcher.Event)\n\t\t\t\/\/ create a new watcher\n\t\t\tlog.Println(\"rescanning\")\n\t\t\twatcher, err = getWatcher(buildpath)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/\/ we don't need the errors from the new watcher.\n\t\t\t\/\/ therfore we continiously discard them from the channel to avoid a deadlock.\n\t\t\tgo func(errors chan error) {\n\t\t\t\tfor _ = range errors {\n\n\t\t\t\t}\n\t\t\t}(watcher.Error)\n\t\t}\n\t}\n\treturn\n}\n\nfunc main() {\n\tif len(os.Args) < 2 {\n\t\tlog.Fatal(\"Usage: rerun <import path> [arg]*\")\n\t}\n\terr := rerun(os.Args[1], os.Args[2:])\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n}\n<commit_msg>Update\/fix for changes to the go1.1 tool. Apparently, the new version of the go tool sends compile error messages on stdErr (was: stdOut).<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/howeyc\/fsnotify\"\n\t\"go\/build\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n)\n\nfunc install(buildpath, lastError string) (installed bool, errorOutput string, err error) {\n\tcmdline := []string{\"go\", \"get\", \"-v\", buildpath}\n\n\tcmd := exec.Command(\"go\", cmdline[1:]...)\n\tbufOut := bytes.NewBuffer([]byte{})\n\tbufErr := bytes.NewBuffer([]byte{})\n\tcmd.Stdout = bufOut\n\tcmd.Stderr = bufErr\n\n\terr = cmd.Run()\n\n\tif bufErr.Len() != 0 {\n\t\terrorOutput = bufErr.String()\n\t\tif errorOutput != lastError {\n\t\t\tfmt.Print(bufErr)\n\t\t}\n\t\terr = errors.New(\"compile error\")\n\t\treturn\n\t}\n\n\tinstalled = bufErr.Len() != 0\n\n\treturn\n}\n\nfunc run(binName, binPath string, args []string) (runch chan bool) {\n\trunch = make(chan bool)\n\tgo func() {\n\t\tcmdline := append([]string{binName}, args...)\n\t\tvar proc *os.Process\n\t\tfor _ = range runch {\n\t\t\tif proc != nil {\n\t\t\t\tproc.Kill()\n\t\t\t}\n\t\t\tcmd := exec.Command(binPath, args...)\n\t\t\tcmd.Stdout = os.Stdout\n\t\t\tcmd.Stderr = os.Stderr\n\t\t\tlog.Print(cmdline)\n\t\t\terr := cmd.Start()\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"error on starting process: '%s'\\n\", err)\n\t\t\t}\n\t\t\tproc = cmd.Process\n\t\t}\n\t}()\n\treturn\n}\n\nfunc getWatcher(buildpath string) (watcher *fsnotify.Watcher, err error) {\n\twatcher, err = fsnotify.NewWatcher()\n\taddToWatcher(watcher, buildpath, map[string]bool{})\n\treturn\n}\n\nfunc addToWatcher(watcher *fsnotify.Watcher, importpath string, watching map[string]bool) {\n\tpkg, err := build.Import(importpath, \"\", 0)\n\tif err != nil {\n\t\treturn\n\t}\n\tif pkg.Goroot {\n\t\treturn\n\t}\n\twatcher.Watch(pkg.Dir)\n\twatching[importpath] = true\n\tfor _, imp := range pkg.Imports {\n\t\tif !watching[imp] {\n\t\t\taddToWatcher(watcher, imp, watching)\n\t\t}\n\t}\n}\n\nfunc rerun(buildpath string, args []string) (err error) {\n\tlog.Printf(\"setting up %s %v\", buildpath, args)\n\n\tpkg, err := build.Import(buildpath, \"\", 0)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif pkg.Name != \"main\" {\n\t\terr = errors.New(fmt.Sprintf(\"expected package %q, got %q\", \"main\", pkg.Name))\n\t\treturn\n\t}\n\n\t_, binName := path.Split(buildpath)\n\tvar binPath string\n\tif gobin := os.Getenv(\"GOBIN\"); gobin != \"\" {\n\t\tbinPath = filepath.Join(gobin, binName)\n\t} else {\n\t\tbinPath = filepath.Join(pkg.BinDir, binName)\n\t}\n\n\trunch := run(binName, binPath, args)\n\n\tvar errorOutput string\n\t_, errorOutput, ierr := install(buildpath, errorOutput)\n\tif ierr == nil {\n\t\trunch <- true\n\t}\n\n\tvar watcher *fsnotify.Watcher\n\twatcher, err = getWatcher(buildpath)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor {\n\t\t\/\/ read event from the watcher\n\t\twe, _ := <-watcher.Event\n\t\tvar installed bool\n\t\tinstalled, errorOutput, _ = install(buildpath, errorOutput)\n\t\tif installed {\n\t\t\tlog.Print(we.Name)\n\t\t\t\/\/ re-build and re-run the application\n\t\t\trunch <- true\n\t\t\t\/\/ close the watcher\n\t\t\twatcher.Close()\n\t\t\t\/\/ to clean things up: read events from the watcher until events chan is closed.\n\t\t\tgo func(events chan *fsnotify.FileEvent) {\n\t\t\t\tfor _ = range events {\n\n\t\t\t\t}\n\t\t\t}(watcher.Event)\n\t\t\t\/\/ create a new watcher\n\t\t\tlog.Println(\"rescanning\")\n\t\t\twatcher, err = getWatcher(buildpath)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/\/ we don't need the errors from the new watcher.\n\t\t\t\/\/ therfore we continiously discard them from the channel to avoid a deadlock.\n\t\t\tgo func(errors chan error) {\n\t\t\t\tfor _ = range errors {\n\n\t\t\t\t}\n\t\t\t}(watcher.Error)\n\t\t}\n\t}\n\treturn\n}\n\nfunc main() {\n\tif len(os.Args) < 2 {\n\t\tlog.Fatal(\"Usage: rerun <import path> [arg]*\")\n\t}\n\terr := rerun(os.Args[1], os.Args[2:])\n\tif err != nil {\n\t\tlog.Print(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\tlog \"github.com\/GameGophers\/nsq-logger\"\n\tnsq \"github.com\/bitly\/go-nsq\"\n\t\"github.com\/boltdb\/bolt\"\n\t\"gopkg.in\/vmihailenco\/msgpack.v2\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tDEFAULT_NSQLOOKUPD = \"127.0.0.1:4160\"\n\tENV_NSQLOOKUPD = \"NSQLOOKUPD_HOST\"\n\tTOPIC = \"REDOLOG\"\n\tCHANNEL = \"ARCH\"\n\tSERVICE = \"[ARCH]\"\n\tREDO_TIME_FORMAT = \"REDO-2006-01-02T15:04:05.RDO\"\n\tREDO_ROTATE_INTERVAL = 24 * time.Hour\n\tBOLTDB_BUCKET = \"REDOLOG\"\n\tDATA_DIRECTORY = \"\/data\"\n)\n\ntype Archiver struct {\n\tpending chan []byte\n\tstop chan bool\n}\n\nfunc (arch *Archiver) init() {\n\tarch.pending = make(chan []byte)\n\tarch.stop = make(chan bool)\n\tcfg := nsq.NewConfig()\n\tconsumer, err := nsq.NewConsumer(TOPIC, CHANNEL, cfg)\n\tif err != nil {\n\t\tlog.Critical(err)\n\t\tos.Exit(-1)\n\t}\n\n\t\/\/ message process\n\tconsumer.AddHandler(nsq.HandlerFunc(func(msg *nsq.Message) error {\n\t\treturn nil\n\t}))\n\n\t\/\/ read environtment variable\n\taddresses := []string{DEFAULT_NSQLOOKUPD}\n\tif env := os.Getenv(ENV_NSQLOOKUPD); env != \"\" {\n\t\taddresses = strings.Split(env, \";\")\n\t}\n\n\t\/\/ connect to nsqlookupd\n\tlog.Trace(\"connect to nsqlookupds ip:\", addresses)\n\tif err := consumer.ConnectToNSQLookupds(addresses); err != nil {\n\t\tlog.Critical(err)\n\t\treturn\n\t}\n\tlog.Info(\"nsqlookupd connected\")\n\n\tgo arch.archive_task()\n}\n\nfunc (arch *Archiver) archive_task() {\n\ttimer := time.After(REDO_ROTATE_INTERVAL)\n\tdb, err := bolt.Open(arch.new_redofile(), 0600, nil)\n\tif err != nil {\n\t\tlog.Critical(err)\n\t\tos.Exit(-1)\n\t}\n\tfor {\n\t\tselect {\n\t\tcase msg := <-arch.pending:\n\t\t\tvar record map[string]interface{}\n\t\t\terr := msgpack.Unmarshal(msg, &record)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdb.Update(func(tx *bolt.Tx) error {\n\t\t\t\tb := tx.Bucket([]byte(BOLTDB_BUCKET))\n\t\t\t\terr := b.Put([]byte(fmt.Sprint(record[\"TS\"])), msg)\n\t\t\t\treturn err\n\t\t\t})\n\t\tcase <-timer:\n\t\t\tdb.Close()\n\t\t\tdb, err = bolt.Open(arch.new_redofile(), 0600, nil)\n\t\t\tif err != nil {\n\t\t\t\tlog.Critical(err)\n\t\t\t\tos.Exit(-1)\n\t\t\t}\n\t\t\ttimer = time.After(REDO_ROTATE_INTERVAL)\n\t\t}\n\t}\n}\n\nfunc (arch *Archiver) new_redofile() string {\n\treturn DATA_DIRECTORY + \"\/\" + time.Now().Format(REDO_TIME_FORMAT)\n}\n<commit_msg>update<commit_after>package main\n\nimport (\n\t\"fmt\"\n\tlog \"github.com\/GameGophers\/nsq-logger\"\n\tnsq \"github.com\/bitly\/go-nsq\"\n\t\"github.com\/boltdb\/bolt\"\n\t\"gopkg.in\/vmihailenco\/msgpack.v2\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tDEFAULT_NSQLOOKUPD = \"127.0.0.1:4160\"\n\tENV_NSQLOOKUPD = \"NSQLOOKUPD_HOST\"\n\tTOPIC = \"REDOLOG\"\n\tCHANNEL = \"ARCH\"\n\tSERVICE = \"[ARCH]\"\n\tREDO_TIME_FORMAT = \"REDO-2006-01-02T15:04:05.RDO\"\n\tREDO_ROTATE_INTERVAL = 24 * time.Hour\n\tBOLTDB_BUCKET = \"REDOLOG\"\n\tDATA_DIRECTORY = \"\/data\"\n)\n\ntype Archiver struct {\n\tpending chan []byte\n\tstop chan bool\n}\n\nfunc (arch *Archiver) init() {\n\tarch.pending = make(chan []byte)\n\tarch.stop = make(chan bool)\n\tcfg := nsq.NewConfig()\n\tconsumer, err := nsq.NewConsumer(TOPIC, CHANNEL, cfg)\n\tif err != nil {\n\t\tlog.Critical(err)\n\t\tos.Exit(-1)\n\t}\n\n\t\/\/ message process\n\tconsumer.AddHandler(nsq.HandlerFunc(func(msg *nsq.Message) error {\n\t\tpending <- msg.Body\n\t\treturn nil\n\t}))\n\n\t\/\/ read environtment variable\n\taddresses := []string{DEFAULT_NSQLOOKUPD}\n\tif env := os.Getenv(ENV_NSQLOOKUPD); env != \"\" {\n\t\taddresses = strings.Split(env, \";\")\n\t}\n\n\t\/\/ connect to nsqlookupd\n\tlog.Trace(\"connect to nsqlookupds ip:\", addresses)\n\tif err := consumer.ConnectToNSQLookupds(addresses); err != nil {\n\t\tlog.Critical(err)\n\t\treturn\n\t}\n\tlog.Info(\"nsqlookupd connected\")\n\n\tgo arch.archive_task()\n}\n\nfunc (arch *Archiver) archive_task() {\n\ttimer := time.After(REDO_ROTATE_INTERVAL)\n\tdb, err := bolt.Open(arch.new_redofile(), 0600, nil)\n\tif err != nil {\n\t\tlog.Critical(err)\n\t\tos.Exit(-1)\n\t}\n\tfor {\n\t\tselect {\n\t\tcase msg := <-arch.pending:\n\t\t\tvar record map[string]interface{}\n\t\t\terr := msgpack.Unmarshal(msg, &record)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdb.Update(func(tx *bolt.Tx) error {\n\t\t\t\tb := tx.Bucket([]byte(BOLTDB_BUCKET))\n\t\t\t\terr := b.Put([]byte(fmt.Sprint(record[\"TS\"])), msg)\n\t\t\t\treturn err\n\t\t\t})\n\t\tcase <-timer:\n\t\t\tdb.Close()\n\t\t\tdb, err = bolt.Open(arch.new_redofile(), 0600, nil)\n\t\t\tif err != nil {\n\t\t\t\tlog.Critical(err)\n\t\t\t\tos.Exit(-1)\n\t\t\t}\n\t\t\ttimer = time.After(REDO_ROTATE_INTERVAL)\n\t\t}\n\t}\n}\n\nfunc (arch *Archiver) new_redofile() string {\n\treturn DATA_DIRECTORY + \"\/\" + time.Now().Format(REDO_TIME_FORMAT)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Netflix, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage chunked\n\nimport (\n\t\"bufio\"\n\t\"encoding\/binary\"\n\t\"io\"\n\n\t\"github.com\/netflix\/rend\/binprot\"\n\t\"github.com\/netflix\/rend\/common\"\n\t\"github.com\/netflix\/rend\/metrics\"\n)\n\n\/\/ TODO: replace sending new empty metadata on miss with emptyMeta\nvar emptyMeta = metadata{}\n\nfunc getAndTouchMetadata(rw *bufio.ReadWriter, key []byte, exptime uint32) ([]byte, metadata, error) {\n\tmetaKey := metaKey(key)\n\tif err := binprot.WriteGATCmd(rw, metaKey, exptime); err != nil {\n\t\treturn nil, metadata{}, err\n\t}\n\tmetadata, err := getMetadataCommon(rw)\n\treturn metaKey, metadata, err\n}\n\nfunc getMetadata(rw *bufio.ReadWriter, key []byte) ([]byte, metadata, error) {\n\tmetaKey := metaKey(key)\n\tif err := binprot.WriteGetCmd(rw, metaKey); err != nil {\n\t\treturn nil, metadata{}, err\n\t}\n\tmetadata, err := getMetadataCommon(rw)\n\treturn metaKey, metadata, err\n}\n\nfunc getMetadataCommon(rw *bufio.ReadWriter) (metadata, error) {\n\tif err := rw.Flush(); err != nil {\n\t\treturn metadata{}, err\n\t}\n\n\tresHeader, err := binprot.ReadResponseHeader(rw)\n\tif err != nil {\n\t\treturn metadata{}, err\n\t}\n\tdefer binprot.PutResponseHeader(resHeader)\n\n\terr = binprot.DecodeError(resHeader)\n\tif err != nil {\n\t\t\/\/ read in the message \"Not found\" after a miss\n\t\tn, ioerr := rw.Discard(int(resHeader.TotalBodyLength))\n\t\tmetrics.IncCounterBy(common.MetricBytesReadLocal, uint64(n))\n\t\tif ioerr != nil {\n\t\t\treturn metadata{}, ioerr\n\t\t}\n\t\treturn metadata{}, err\n\t}\n\n\tvar serverFlags uint32\n\tif err := binary.Read(rw, binary.BigEndian, &serverFlags); err != nil {\n\t\treturn metadata{}, err\n\t}\n\n\tvar metaData metadata\n\tif err := binary.Read(rw, binary.BigEndian, &metaData); err != nil {\n\t\treturn metadata{}, err\n\t}\n\n\tmetrics.IncCounterBy(common.MetricBytesReadLocal, uint64(metadataSize+4))\n\n\treturn metaData, nil\n}\n\nfunc simpleCmdLocal(rw *bufio.ReadWriter, flush bool) error {\n\tif flush {\n\t\tif err := rw.Flush(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tresHeader, err := binprot.ReadResponseHeader(rw)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer binprot.PutResponseHeader(resHeader)\n\n\tn, ioerr := rw.Discard(int(resHeader.TotalBodyLength))\n\tmetrics.IncCounterBy(common.MetricBytesReadLocal, uint64(n))\n\tif ioerr != nil {\n\t\treturn ioerr\n\t}\n\n\treturn binprot.DecodeError(resHeader)\n}\n\nfunc getLocalIntoBuf(rw *bufio.Reader, metaData metadata, tokenBuf, dataBuf []byte, chunkNum, totalDataLength int) (opcodeNoop bool, err error) {\n\tresHeader, err := binprot.ReadResponseHeader(rw)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer binprot.PutResponseHeader(resHeader)\n\n\t\/\/ it feels a bit dirty knowing about batch gets here, but it's the most logical place to put\n\t\/\/ a check for an opcode that signals the end of a batch get or GAT. This code is a bit too big\n\t\/\/ to copy-paste in multiple places.\n\tif resHeader.Opcode == binprot.OpcodeNoop {\n\t\treturn true, nil\n\t}\n\n\terr = binprot.DecodeError(resHeader)\n\tif err != nil {\n\t\t\/\/ read in the message \"Not found\" after a miss\n\t\tn, ioerr := rw.Discard(int(resHeader.TotalBodyLength))\n\t\tmetrics.IncCounterBy(common.MetricBytesReadLocal, uint64(n))\n\t\tif ioerr != nil {\n\t\t\treturn false, ioerr\n\t\t}\n\t\treturn false, err\n\t}\n\n\tvar serverFlags uint32\n\tif err := binary.Read(rw, binary.BigEndian, &serverFlags); err != nil {\n\t\treturn false, err\n\t}\n\tmetrics.IncCounterBy(common.MetricBytesReadLocal, 4)\n\n\t\/\/ Read in token if requested\n\tif tokenBuf != nil {\n\t\tn, err := io.ReadFull(rw, tokenBuf)\n\t\tmetrics.IncCounterBy(common.MetricBytesReadLocal, uint64(n))\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\t\/\/ indices for slicing, end exclusive\n\tstart, end := chunkSliceIndices(int(metaData.ChunkSize), chunkNum, int(metaData.Length))\n\t\/\/ read data directly into buf\n\tchunkBuf := dataBuf[start:end]\n\n\t\/\/ Read in value\n\tn, err := io.ReadFull(rw, chunkBuf)\n\tmetrics.IncCounterBy(common.MetricBytesReadLocal, uint64(n))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t\/\/ consume padding at end of chunk if needed\n\tif len(chunkBuf) < totalDataLength {\n\t\tn, ioerr := rw.Discard(totalDataLength - len(chunkBuf))\n\t\tmetrics.IncCounterBy(common.MetricBytesReadLocal, uint64(n))\n\t\tif ioerr != nil {\n\t\t\treturn false, ioerr\n\t\t}\n\t}\n\n\treturn false, nil\n}\n<commit_msg>Use a single empty metadata for all metadata errors and misses<commit_after>\/\/ Copyright 2015 Netflix, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage chunked\n\nimport (\n\t\"bufio\"\n\t\"encoding\/binary\"\n\t\"io\"\n\n\t\"github.com\/netflix\/rend\/binprot\"\n\t\"github.com\/netflix\/rend\/common\"\n\t\"github.com\/netflix\/rend\/metrics\"\n)\n\n\/\/ TODO: replace sending new empty metadata on miss with emptyMeta\nvar emptyMeta = metadata{}\n\nfunc getAndTouchMetadata(rw *bufio.ReadWriter, key []byte, exptime uint32) ([]byte, metadata, error) {\n\tmetaKey := metaKey(key)\n\tif err := binprot.WriteGATCmd(rw, metaKey, exptime); err != nil {\n\t\treturn nil, emptyMeta, err\n\t}\n\tmetadata, err := getMetadataCommon(rw)\n\treturn metaKey, metadata, err\n}\n\nfunc getMetadata(rw *bufio.ReadWriter, key []byte) ([]byte, metadata, error) {\n\tmetaKey := metaKey(key)\n\tif err := binprot.WriteGetCmd(rw, metaKey); err != nil {\n\t\treturn nil, emptyMeta, err\n\t}\n\tmetadata, err := getMetadataCommon(rw)\n\treturn metaKey, metadata, err\n}\n\nfunc getMetadataCommon(rw *bufio.ReadWriter) (metadata, error) {\n\tif err := rw.Flush(); err != nil {\n\t\treturn emptyMeta, err\n\t}\n\n\tresHeader, err := binprot.ReadResponseHeader(rw)\n\tif err != nil {\n\t\treturn emptyMeta, err\n\t}\n\tdefer binprot.PutResponseHeader(resHeader)\n\n\terr = binprot.DecodeError(resHeader)\n\tif err != nil {\n\t\t\/\/ read in the message \"Not found\" after a miss\n\t\tn, ioerr := rw.Discard(int(resHeader.TotalBodyLength))\n\t\tmetrics.IncCounterBy(common.MetricBytesReadLocal, uint64(n))\n\t\tif ioerr != nil {\n\t\t\treturn emptyMeta, ioerr\n\t\t}\n\t\treturn emptyMeta, err\n\t}\n\n\tvar serverFlags uint32\n\tif err := binary.Read(rw, binary.BigEndian, &serverFlags); err != nil {\n\t\treturn emptyMeta, err\n\t}\n\n\tvar metaData metadata\n\tif err := binary.Read(rw, binary.BigEndian, &metaData); err != nil {\n\t\treturn emptyMeta, err\n\t}\n\n\tmetrics.IncCounterBy(common.MetricBytesReadLocal, uint64(metadataSize+4))\n\n\treturn metaData, nil\n}\n\nfunc simpleCmdLocal(rw *bufio.ReadWriter, flush bool) error {\n\tif flush {\n\t\tif err := rw.Flush(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tresHeader, err := binprot.ReadResponseHeader(rw)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer binprot.PutResponseHeader(resHeader)\n\n\tn, ioerr := rw.Discard(int(resHeader.TotalBodyLength))\n\tmetrics.IncCounterBy(common.MetricBytesReadLocal, uint64(n))\n\tif ioerr != nil {\n\t\treturn ioerr\n\t}\n\n\treturn binprot.DecodeError(resHeader)\n}\n\nfunc getLocalIntoBuf(rw *bufio.Reader, metaData metadata, tokenBuf, dataBuf []byte, chunkNum, totalDataLength int) (opcodeNoop bool, err error) {\n\tresHeader, err := binprot.ReadResponseHeader(rw)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer binprot.PutResponseHeader(resHeader)\n\n\t\/\/ it feels a bit dirty knowing about batch gets here, but it's the most logical place to put\n\t\/\/ a check for an opcode that signals the end of a batch get or GAT. This code is a bit too big\n\t\/\/ to copy-paste in multiple places.\n\tif resHeader.Opcode == binprot.OpcodeNoop {\n\t\treturn true, nil\n\t}\n\n\terr = binprot.DecodeError(resHeader)\n\tif err != nil {\n\t\t\/\/ read in the message \"Not found\" after a miss\n\t\tn, ioerr := rw.Discard(int(resHeader.TotalBodyLength))\n\t\tmetrics.IncCounterBy(common.MetricBytesReadLocal, uint64(n))\n\t\tif ioerr != nil {\n\t\t\treturn false, ioerr\n\t\t}\n\t\treturn false, err\n\t}\n\n\tvar serverFlags uint32\n\tif err := binary.Read(rw, binary.BigEndian, &serverFlags); err != nil {\n\t\treturn false, err\n\t}\n\tmetrics.IncCounterBy(common.MetricBytesReadLocal, 4)\n\n\t\/\/ Read in token if requested\n\tif tokenBuf != nil {\n\t\tn, err := io.ReadFull(rw, tokenBuf)\n\t\tmetrics.IncCounterBy(common.MetricBytesReadLocal, uint64(n))\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\t\/\/ indices for slicing, end exclusive\n\tstart, end := chunkSliceIndices(int(metaData.ChunkSize), chunkNum, int(metaData.Length))\n\t\/\/ read data directly into buf\n\tchunkBuf := dataBuf[start:end]\n\n\t\/\/ Read in value\n\tn, err := io.ReadFull(rw, chunkBuf)\n\tmetrics.IncCounterBy(common.MetricBytesReadLocal, uint64(n))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t\/\/ consume padding at end of chunk if needed\n\tif len(chunkBuf) < totalDataLength {\n\t\tn, ioerr := rw.Discard(totalDataLength - len(chunkBuf))\n\t\tmetrics.IncCounterBy(common.MetricBytesReadLocal, uint64(n))\n\t\tif ioerr != nil {\n\t\t\treturn false, ioerr\n\t\t}\n\t}\n\n\treturn false, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package app\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/atlassian\/smith\"\n\t\"github.com\/atlassian\/smith\/pkg\/processor\"\n\t\"github.com\/atlassian\/smith\/pkg\/readychecker\"\n\t\"github.com\/atlassian\/smith\/pkg\/resources\"\n\n\tkerrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/fields\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/client-go\/dynamic\"\n\t\"k8s.io\/client-go\/informers\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\textensions \"k8s.io\/client-go\/pkg\/apis\/extensions\/v1beta1\"\n\t\"k8s.io\/client-go\/rest\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n)\n\ntype App struct {\n\tRestConfig *rest.Config\n}\n\nfunc (a *App) Run(ctx context.Context) error {\n\tclientset, err := kubernetes.NewForConfig(a.RestConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbundleScheme := resources.GetBundleScheme()\n\tbundleClient, err := resources.GetBundleTprClient(a.RestConfig, bundleScheme)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclients := dynamic.NewClientPool(a.RestConfig, nil, dynamic.LegacyAPIPathResolverFunc)\n\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\t\/\/ 1. Informers\n\n\tinformerFactory := informers.NewSharedInformerFactory(clientset, 1*time.Minute)\n\ttprInf := informerFactory.Extensions().V1beta1().ThirdPartyResources().Informer()\n\tdeploymentInf := informerFactory.Extensions().V1beta1().Deployments().Informer()\n\tingressInf := informerFactory.Extensions().V1beta1().Ingresses().Informer()\n\tserviceInf := informerFactory.Core().V1().Services().Informer()\n\n\tinformerFactory.Start(ctx.Done())\n\n\t\/\/ We must wait for tprInf to populate its cache to avoid reading from an empty cache\n\t\/\/ in Ready Checker.\n\tif !cache.WaitForCacheSync(ctx.Done(), tprInf.HasSynced) {\n\t\treturn errors.New(\"wait for TPR Informer was cancelled\")\n\t}\n\n\t\/\/ 2. Ready Checker\n\n\trc := &readychecker.ReadyChecker{\n\t\tStore: &tprStore{\n\t\t\tstore: tprInf.GetStore(),\n\t\t},\n\t}\n\n\t\/\/ 3. Processor\n\n\tbp := processor.New(ctx, bundleClient, clients, rc, bundleScheme)\n\tdefer bp.Join() \/\/ await termination\n\tdefer cancel() \/\/ cancel ctx to signal done to processor (and everything else)\n\n\t\/\/ 4. Ensure ThirdPartyResource TEMPLATE exists\n\terr = retryUntilSuccessOrDone(ctx, func() error {\n\t\treturn ensureResourceExists(clientset)\n\t}, func(e error) bool {\n\t\t\/\/ TODO be smarter about what is retried\n\t\tlog.Printf(\"Failed to create resource %s: %v\", smith.BundleResourceName, e)\n\t\treturn false\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ 5. Watch Bundles\n\tbundleInf, err := watchBundles(ctx, bundleClient, bundleScheme, bp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ We must wait for bundleInf to populate its cache to avoid reading from an empty cache\n\t\/\/ in case of resource-generated events.\n\tif !cache.WaitForCacheSync(ctx.Done(), bundleInf.HasSynced) {\n\t\treturn errors.New(\"wait for Bundle Informer was cancelled\")\n\t}\n\n\tsl := bundleStore{\n\t\tstore: bundleInf.GetStore(),\n\t\tscheme: bundleScheme,\n\t}\n\treh := &resourceEventHandler{\n\t\tprocessor: bp,\n\t\tname2bundle: sl.Get,\n\t}\n\n\t\/\/ 6. TODO watch supported built-in resource types for events.\n\n\t\/\/ 7. Watch Third Party Resources to add watches for supported ones\n\n\ttprInf.AddEventHandler(newTprEventHandler(ctx, reh, clients))\n\n\t\/\/ 8. Watch other kinds of resources\n\n\tdeploymentInf.AddEventHandler(reh)\n\tingressInf.AddEventHandler(reh)\n\tserviceInf.AddEventHandler(reh)\n\n\t<-ctx.Done()\n\treturn ctx.Err()\n}\n\nfunc ensureResourceExists(clientset kubernetes.Interface) error {\n\tlog.Printf(\"Creating ThirdPartyResource %s\", smith.BundleResourceName)\n\ttpr := &extensions.ThirdPartyResource{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: smith.BundleResourceName,\n\t\t},\n\t\tDescription: \"Smith resource manager\",\n\t\tVersions: []extensions.APIVersion{\n\t\t\t{Name: smith.BundleResourceVersion},\n\t\t},\n\t}\n\tres, err := clientset.ExtensionsV1beta1().ThirdPartyResources().Create(tpr)\n\tif err != nil {\n\t\tif !kerrors.IsAlreadyExists(err) {\n\t\t\treturn fmt.Errorf(\"failed to create %s ThirdPartyResource: %v\", smith.BundleResourceName, err)\n\t\t}\n\t\t\/\/ TODO handle conflicts and update properly\n\t\t\/\/log.Printf(\"ThirdPartyResource %s already exists, updating\", smith.BundleResourceName)\n\t\t\/\/_, err = clientset.ExtensionsV1beta1().ThirdPartyResources().Update(tpr)\n\t\t\/\/if err != nil {\n\t\t\/\/\treturn fmt.Errorf(\"failed to update %s ThirdPartyResource: %v\", smith.BundleResourceName, err)\n\t\t\/\/}\n\t} else {\n\t\tlog.Printf(\"ThirdPartyResource %s created: %+v\", smith.BundleResourceName, res)\n\t\t\/\/ TODO It takes a while for k8s to add a new rest endpoint. Polling?\n\t\ttime.Sleep(10 * time.Second)\n\t}\n\treturn nil\n}\n\nfunc watchBundles(ctx context.Context, bundleClient cache.Getter, bundleScheme *runtime.Scheme, processor Processor) (cache.SharedInformer, error) {\n\tbundleInf := cache.NewSharedInformer(\n\t\tcache.NewListWatchFromClient(bundleClient, smith.BundleResourcePath, metav1.NamespaceAll, fields.Everything()),\n\t\t&smith.Bundle{},\n\t\t1*time.Minute)\n\n\tbundleInf.AddEventHandler(&bundleEventHandler{\n\t\tprocessor: processor,\n\t\tscheme: bundleScheme,\n\t})\n\n\tgo bundleInf.Run(ctx.Done())\n\n\treturn bundleInf, nil\n}\n<commit_msg>Cleanup<commit_after>package app\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/atlassian\/smith\"\n\t\"github.com\/atlassian\/smith\/pkg\/processor\"\n\t\"github.com\/atlassian\/smith\/pkg\/readychecker\"\n\t\"github.com\/atlassian\/smith\/pkg\/resources\"\n\n\tkerrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/fields\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/client-go\/dynamic\"\n\t\"k8s.io\/client-go\/informers\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\textensions \"k8s.io\/client-go\/pkg\/apis\/extensions\/v1beta1\"\n\t\"k8s.io\/client-go\/rest\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n)\n\ntype App struct {\n\tRestConfig *rest.Config\n}\n\nfunc (a *App) Run(ctx context.Context) error {\n\tclientset, err := kubernetes.NewForConfig(a.RestConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbundleScheme := resources.GetBundleScheme()\n\tbundleClient, err := resources.GetBundleTprClient(a.RestConfig, bundleScheme)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclients := dynamic.NewClientPool(a.RestConfig, nil, dynamic.LegacyAPIPathResolverFunc)\n\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\t\/\/ 1. Informers\n\n\tinformerFactory := informers.NewSharedInformerFactory(clientset, 1*time.Minute)\n\ttprInf := informerFactory.Extensions().V1beta1().ThirdPartyResources().Informer()\n\tdeploymentInf := informerFactory.Extensions().V1beta1().Deployments().Informer()\n\tingressInf := informerFactory.Extensions().V1beta1().Ingresses().Informer()\n\tserviceInf := informerFactory.Core().V1().Services().Informer()\n\n\tinformerFactory.Start(ctx.Done())\n\n\t\/\/ We must wait for tprInf to populate its cache to avoid reading from an empty cache\n\t\/\/ in Ready Checker.\n\tif !cache.WaitForCacheSync(ctx.Done(), tprInf.HasSynced) {\n\t\treturn errors.New(\"wait for TPR Informer was cancelled\")\n\t}\n\n\t\/\/ 2. Ready Checker\n\n\trc := &readychecker.ReadyChecker{\n\t\tStore: &tprStore{\n\t\t\tstore: tprInf.GetStore(),\n\t\t},\n\t}\n\n\t\/\/ 3. Processor\n\n\tbp := processor.New(ctx, bundleClient, clients, rc, bundleScheme)\n\tdefer bp.Join() \/\/ await termination\n\tdefer cancel() \/\/ cancel ctx to signal done to processor (and everything else)\n\n\t\/\/ 4. Ensure ThirdPartyResource TEMPLATE exists\n\terr = retryUntilSuccessOrDone(ctx, func() error {\n\t\treturn ensureResourceExists(clientset)\n\t}, func(e error) bool {\n\t\t\/\/ TODO be smarter about what is retried\n\t\tlog.Printf(\"Failed to create resource %s: %v\", smith.BundleResourceName, e)\n\t\treturn false\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ 5. Watch Bundles\n\tbundleInf, err := watchBundles(ctx, bundleClient, bundleScheme, bp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ We must wait for bundleInf to populate its cache to avoid reading from an empty cache\n\t\/\/ in case of resource-generated events.\n\tif !cache.WaitForCacheSync(ctx.Done(), bundleInf.HasSynced) {\n\t\treturn errors.New(\"wait for Bundle Informer was cancelled\")\n\t}\n\n\tsl := bundleStore{\n\t\tstore: bundleInf.GetStore(),\n\t\tscheme: bundleScheme,\n\t}\n\treh := &resourceEventHandler{\n\t\tprocessor: bp,\n\t\tname2bundle: sl.Get,\n\t}\n\n\t\/\/ 6. Watch supported built-in resource types\n\n\tdeploymentInf.AddEventHandler(reh)\n\tingressInf.AddEventHandler(reh)\n\tserviceInf.AddEventHandler(reh)\n\n\t\/\/ 7. Watch Third Party Resources to add watches for supported ones\n\n\ttprInf.AddEventHandler(newTprEventHandler(ctx, reh, clients))\n\n\t<-ctx.Done()\n\treturn ctx.Err()\n}\n\nfunc ensureResourceExists(clientset kubernetes.Interface) error {\n\tlog.Printf(\"Creating ThirdPartyResource %s\", smith.BundleResourceName)\n\ttpr := &extensions.ThirdPartyResource{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: smith.BundleResourceName,\n\t\t},\n\t\tDescription: \"Smith resource manager\",\n\t\tVersions: []extensions.APIVersion{\n\t\t\t{Name: smith.BundleResourceVersion},\n\t\t},\n\t}\n\tres, err := clientset.ExtensionsV1beta1().ThirdPartyResources().Create(tpr)\n\tif err != nil {\n\t\tif !kerrors.IsAlreadyExists(err) {\n\t\t\treturn fmt.Errorf(\"failed to create %s ThirdPartyResource: %v\", smith.BundleResourceName, err)\n\t\t}\n\t\t\/\/ TODO handle conflicts and update properly\n\t\t\/\/log.Printf(\"ThirdPartyResource %s already exists, updating\", smith.BundleResourceName)\n\t\t\/\/_, err = clientset.ExtensionsV1beta1().ThirdPartyResources().Update(tpr)\n\t\t\/\/if err != nil {\n\t\t\/\/\treturn fmt.Errorf(\"failed to update %s ThirdPartyResource: %v\", smith.BundleResourceName, err)\n\t\t\/\/}\n\t} else {\n\t\tlog.Printf(\"ThirdPartyResource %s created: %+v\", smith.BundleResourceName, res)\n\t\t\/\/ TODO It takes a while for k8s to add a new rest endpoint. Polling?\n\t\ttime.Sleep(10 * time.Second)\n\t}\n\treturn nil\n}\n\nfunc watchBundles(ctx context.Context, bundleClient cache.Getter, bundleScheme *runtime.Scheme, processor Processor) (cache.SharedInformer, error) {\n\tbundleInf := cache.NewSharedInformer(\n\t\tcache.NewListWatchFromClient(bundleClient, smith.BundleResourcePath, metav1.NamespaceAll, fields.Everything()),\n\t\t&smith.Bundle{},\n\t\t1*time.Minute)\n\n\tbundleInf.AddEventHandler(&bundleEventHandler{\n\t\tprocessor: processor,\n\t\tscheme: bundleScheme,\n\t})\n\n\tgo bundleInf.Run(ctx.Done())\n\n\treturn bundleInf, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package git\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"time\"\n\n\t\"gopkg.in\/src-d\/go-git.v4\"\n\t\"gopkg.in\/src-d\/go-git.v4\/plumbing\"\n\t\"gopkg.in\/src-d\/go-git.v4\/plumbing\/object\"\n\t\"gopkg.in\/src-d\/go-git.v4\/plumbing\/transport\/http\"\n)\n\ntype GitRepo struct {\n\turl string\n\trepo *git.Repository\n\tworktree *git.Worktree\n\tmu sync.Mutex\n\n\tworkDir string\n\tuser string\n\ttoken string\n\tcommitName string\n\tcommitEmail string\n\n\tbranch string\n}\n\ntype Config struct {\n\tURL string\n\tWorkDir string\n\n\tUsername string\n\tToken string\n\n\tAuthorName string\n\tAuthorEmail string\n}\n\nfunc NewGitRepo(cfg Config) (*GitRepo, error) {\n\tu, err := url.Parse(cfg.URL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tworkDir := filepath.Join(cfg.WorkDir, u.Path)\n\n\treturn &GitRepo{\n\t\tworkDir: workDir,\n\t\turl: cfg.URL,\n\t\tuser: cfg.Username,\n\t\ttoken: cfg.Token,\n\t\tcommitName: cfg.AuthorName,\n\t\tcommitEmail: cfg.AuthorEmail,\n\t\tbranch: \"master\",\n\t}, nil\n}\n\nfunc (r *GitRepo) auth() *http.BasicAuth {\n\treturn &http.BasicAuth{\n\t\tUsername: r.user,\n\t\tPassword: r.token,\n\t}\n}\n\nfunc (r *GitRepo) author() *object.Signature {\n\treturn &object.Signature{\n\t\tName: r.commitName,\n\t\tEmail: r.commitEmail,\n\t\tWhen: time.Now(),\n\t}\n}\n\nfunc (r *GitRepo) Clone(ctx context.Context) error {\n\trepo, err := git.PlainCloneContext(ctx, r.workDir, false, &git.CloneOptions{\n\t\tURL: r.url,\n\t\tAuth: r.auth(),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw, err := repo.Worktree()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr.repo = repo\n\tr.worktree = w\n\n\treturn nil\n}\n\nfunc (r *GitRepo) Open(ctx context.Context) error {\n\trepo, err := git.PlainOpen(r.workDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw, err := repo.Worktree()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr.repo = repo\n\tr.worktree = w\n\n\treturn nil\n}\n\nfunc (r *GitRepo) CloneOrOpen(ctx context.Context) error {\n\t_, err := os.Stat(r.workDir)\n\tswitch {\n\tcase os.IsNotExist(err):\n\t\treturn r.Clone(ctx)\n\tdefault:\n\t\terr := r.Open(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn r.Pull(ctx)\n\t}\n}\n\nfunc (r *GitRepo) Pull(ctx context.Context) error {\n\tif err := r.worktree.Checkout(&git.CheckoutOptions{\n\t\tBranch: plumbing.NewBranchReferenceName(r.branch),\n\t\tForce: true,\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\terr := r.worktree.PullContext(ctx, &git.PullOptions{\n\t\tAuth: r.auth(),\n\t\tReferenceName: plumbing.NewBranchReferenceName(r.branch),\n\t\tForce: true,\n\t})\n\tif err != nil && err != git.NoErrAlreadyUpToDate {\n\t\treturn fmt.Errorf(\"failed to pull: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (r *GitRepo) Path() string {\n\treturn r.workDir\n}\n\nfunc (r *GitRepo) Push(ctx context.Context) error {\n\tif err := r.repo.PushContext(ctx, &git.PushOptions{\n\t\tAuth: r.auth(),\n\t\tRemoteName: \"origin\",\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"failed to push %v: %v\", r.branch, err)\n\t}\n\n\treturn nil\n}\n\nfunc (r *GitRepo) Add(path string) error {\n\t_, err := r.worktree.Add(path)\n\treturn err\n}\n\nfunc (r *GitRepo) Commit(msg string) error {\n\t_, err := r.worktree.Commit(msg, &git.CommitOptions{\n\t\tAuthor: r.author(),\n\t})\n\treturn err\n}\n\nfunc (r *GitRepo) IsClean() bool {\n\tstatus, err := r.worktree.Status()\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn status.IsClean()\n}\n<commit_msg>Add Objects<commit_after>package git\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"time\"\n\n\t\"gopkg.in\/src-d\/go-git.v4\"\n\t\"gopkg.in\/src-d\/go-git.v4\/plumbing\"\n\t\"gopkg.in\/src-d\/go-git.v4\/plumbing\/object\"\n\t\"gopkg.in\/src-d\/go-git.v4\/plumbing\/transport\/http\"\n)\n\ntype GitRepo struct {\n\turl string\n\trepo *git.Repository\n\tworktree *git.Worktree\n\tmu sync.Mutex\n\n\tworkDir string\n\tuser string\n\ttoken string\n\tcommitName string\n\tcommitEmail string\n\n\tbranch string\n}\n\ntype Config struct {\n\tURL string\n\tWorkDir string\n\n\tUsername string\n\tToken string\n\n\tAuthorName string\n\tAuthorEmail string\n}\n\nfunc NewGitRepo(cfg Config) (*GitRepo, error) {\n\tu, err := url.Parse(cfg.URL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tworkDir := filepath.Join(cfg.WorkDir, u.Path)\n\n\treturn &GitRepo{\n\t\tworkDir: workDir,\n\t\turl: cfg.URL,\n\t\tuser: cfg.Username,\n\t\ttoken: cfg.Token,\n\t\tcommitName: cfg.AuthorName,\n\t\tcommitEmail: cfg.AuthorEmail,\n\t\tbranch: \"master\",\n\t}, nil\n}\n\nfunc (r *GitRepo) auth() *http.BasicAuth {\n\treturn &http.BasicAuth{\n\t\tUsername: r.user,\n\t\tPassword: r.token,\n\t}\n}\n\nfunc (r *GitRepo) author() *object.Signature {\n\treturn &object.Signature{\n\t\tName: r.commitName,\n\t\tEmail: r.commitEmail,\n\t\tWhen: time.Now(),\n\t}\n}\n\nfunc (r *GitRepo) Clone(ctx context.Context) error {\n\trepo, err := git.PlainCloneContext(ctx, r.workDir, false, &git.CloneOptions{\n\t\tURL: r.url,\n\t\tAuth: r.auth(),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw, err := repo.Worktree()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr.repo = repo\n\tr.worktree = w\n\n\treturn nil\n}\n\nfunc (r *GitRepo) Objects() (map[string]string, error) {\n\tm := make(map[string]string)\n\thead, err := r.repo.Head()\n\tif err != nil {\n\t\treturn m, err\n\t}\n\tcommit, err := r.repo.CommitObject(head.Hash())\n\tif err != nil {\n\t\treturn m, err\n\t}\n\ttree, err := commit.Tree()\n\tif err != nil {\n\t\treturn m, err\n\t}\n\tfor _, entry := range tree.Entries {\n\t\tcontent, _ := ioutil.ReadFile(filepath.Join(r.workDir, entry.Name))\n\t\tm[entry.Name] = string(content)\n\t}\n\treturn m, nil\n}\n\nfunc (r *GitRepo) Open(ctx context.Context) error {\n\trepo, err := git.PlainOpen(r.workDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw, err := repo.Worktree()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr.repo = repo\n\tr.worktree = w\n\n\treturn nil\n}\n\nfunc (r *GitRepo) CloneOrOpen(ctx context.Context) error {\n\t_, err := os.Stat(r.workDir)\n\tswitch {\n\tcase os.IsNotExist(err):\n\t\treturn r.Clone(ctx)\n\tdefault:\n\t\terr := r.Open(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn r.Pull(ctx)\n\t}\n}\n\nfunc (r *GitRepo) Pull(ctx context.Context) error {\n\tif err := r.worktree.Checkout(&git.CheckoutOptions{\n\t\tBranch: plumbing.NewBranchReferenceName(r.branch),\n\t\tForce: true,\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\terr := r.worktree.PullContext(ctx, &git.PullOptions{\n\t\tAuth: r.auth(),\n\t\tReferenceName: plumbing.NewBranchReferenceName(r.branch),\n\t\tForce: true,\n\t})\n\tif err != nil && err != git.NoErrAlreadyUpToDate {\n\t\treturn fmt.Errorf(\"failed to pull: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (r *GitRepo) Path() string {\n\treturn r.workDir\n}\n\nfunc (r *GitRepo) Push(ctx context.Context) error {\n\tif err := r.repo.PushContext(ctx, &git.PushOptions{\n\t\tAuth: r.auth(),\n\t\tRemoteName: \"origin\",\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"failed to push %v: %v\", r.branch, err)\n\t}\n\n\treturn nil\n}\n\nfunc (r *GitRepo) Add(path string) error {\n\t_, err := r.worktree.Add(path)\n\treturn err\n}\n\nfunc (r *GitRepo) Commit(msg string) error {\n\t_, err := r.worktree.Commit(msg, &git.CommitOptions{\n\t\tAuthor: r.author(),\n\t})\n\treturn err\n}\n\nfunc (r *GitRepo) IsClean() bool {\n\tstatus, err := r.worktree.Status()\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn status.IsClean()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 the LinuxBoot Authors. All rights reserved\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage utk\n\n\/\/ Package utk is where the implementation of the utk command lives.\nimport (\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"github.com\/linuxboot\/fiano\/pkg\/uefi\"\n\t\"github.com\/linuxboot\/fiano\/pkg\/visitors\"\n)\n\n\/\/ Run runs the utk command with the given arguments.\nfunc Run(args ...string) error {\n\tif len(args) == 0 {\n\t\treturn errors.New(\"at least one argument is required\")\n\t}\n\n\tv, err := visitors.ParseCLI(args[1:])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Load and parse the image.\n\tpath := args[0]\n\tf, err := os.Stat(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar parsedRoot uefi.Firmware\n\tif m := f.Mode(); m.IsDir() {\n\t\t\/\/ Call ParseDir\n\t\tpd := visitors.ParseDir{DirPath: path}\n\t\tif parsedRoot, err = pd.Parse(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ Assemble the tree from the bottom up\n\t\ta := visitors.Assemble{}\n\t\tif err = a.Run(parsedRoot); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t\/\/ Regular file\n\t\timage, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tparsedRoot, err = uefi.Parse(image)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Execute the instructions from the command line.\n\treturn visitors.ExecuteCLI(parsedRoot, v)\n}\n<commit_msg>fix-lint<commit_after>\/\/ Copyright 2018 the LinuxBoot Authors. All rights reserved\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package utk is where the implementation of the utk command lives.\npackage utk\n\nimport (\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"github.com\/linuxboot\/fiano\/pkg\/uefi\"\n\t\"github.com\/linuxboot\/fiano\/pkg\/visitors\"\n)\n\n\/\/ Run runs the utk command with the given arguments.\nfunc Run(args ...string) error {\n\tif len(args) == 0 {\n\t\treturn errors.New(\"at least one argument is required\")\n\t}\n\n\tv, err := visitors.ParseCLI(args[1:])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Load and parse the image.\n\tpath := args[0]\n\tf, err := os.Stat(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar parsedRoot uefi.Firmware\n\tif m := f.Mode(); m.IsDir() {\n\t\t\/\/ Call ParseDir\n\t\tpd := visitors.ParseDir{DirPath: path}\n\t\tif parsedRoot, err = pd.Parse(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ Assemble the tree from the bottom up\n\t\ta := visitors.Assemble{}\n\t\tif err = a.Run(parsedRoot); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\t\/\/ Regular file\n\t\timage, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tparsedRoot, err = uefi.Parse(image)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Execute the instructions from the command line.\n\treturn visitors.ExecuteCLI(parsedRoot, v)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 phcurtis fn Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage fn_test\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/phcurtis\/fn\"\n)\n\nvar pkgCfgDefWant = &fn.PkgCfgStruct{\n\tLogFlags: fn.LflagsDef,\n\tLogPrefix: fn.LogPrefixDef,\n\tLogTraceFlags: fn.TrFlagsDef,\n\tLogAlignFile: fn.LogAlignFileDef,\n\tLogAlignFunc: fn.LogAlignFuncDef,\n}\nvar pkgCfgDefWantstr = fmt.Sprintf(\"%+v\", pkgCfgDefWant)\nvar noteStdio = fmt.Sprintf(\"\\n Note:os.Stdout:%v \\n Note:os.Stderr:%v\", os.Stdout, os.Stderr)\n\nfunc TestPkgCfgDef(t *testing.T) {\n\t\/\/ verify PkgCfgDef returns expected values\n\n\tfn.SetPkgCfgDef(true) \/\/ set pkg config to (what should be) a known default state\n\tgot, giowr := fn.PkgCfgDef()\n\tgotstr := fmt.Sprintf(\"%+v\", got)\n\twantstr := pkgCfgDefWantstr\n\tif gotstr != wantstr {\n\t\tt.Errorf(\"PkgCfgDef() incorrect:\\n got:%s \\nwant:%s\\n\", gotstr, wantstr)\n\t}\n\twiowr := fn.LogGetOutputDef()\n\tif giowr != wiowr {\n\t\tt.Errorf(\"PkgCfgDef() incorrect:\\n got-iowr:%s \\nwant-iowr:%s\\n\", giowr, wiowr)\n\t}\n\n\tfn.SetPkgCfgDef(true) \/\/ set pkg config to (what should be) a known default state\n}\n\nfunc TestPkgCfg(t *testing.T) {\n\t\/\/ verify individual setter funcs alter what makes up a PkgCfg\n\n\tfn.SetPkgCfgDef(true) \/\/ set pkg config to (what should be) a known default state\n\tfn.LogSetAlignFile(5)\n\tfn.LogSetAlignFunc(6)\n\tfn.LogSetFlags(0xffffffff)\n\tfn.LogSetPrefix(\"pRe\")\n\tfn.LogSetTraceFlags(0xdeadbeef) \/\/3735928559\n\tvar wiowr io.Writer = os.Stderr\n\tfn.LogSetOutput(wiowr)\n\tgot, giowr := fn.PkgCfg()\n\tgotstr := fmt.Sprintf(\"%+v\", got)\n\twantstr := pkgCfgDefWantstr\n\tif gotstr == wantstr {\n\t\tt.Errorf(\"PkgCfg() incorrect:\\n got:%s \\nwant:%s \\n\", gotstr, wantstr)\n\t}\n\tif giowr != wiowr {\n\t\tt.Errorf(\"PkgCfg() incorrect:\\n gotiowr:%x \\nwantowr:%x %s\\n\",\n\t\t\tgiowr, wiowr, noteStdio)\n\t}\n\tfn.SetPkgCfgDef(true) \/\/ set pkg config to (what should be) a known default state\n}\n\nfunc TestSetPkgCfgDef(t *testing.T) {\n\t\/\/ verify rewriting the defaults matches what should be in the pkg defaults\n\n\tfn.SetPkgCfgDef(true) \/\/ set pkg config to (what should be) a known default state\n\twiowr := fn.LogGetOutputDef()\n\tgot, giowr := fn.PkgCfg()\n\tgotstr := fmt.Sprintf(\"%+v\", got)\n\twantstr := pkgCfgDefWantstr\n\tif gotstr != wantstr {\n\t\tt.Errorf(\"SetPkgCfgDef() incorrect:\\n got:%s \\nwant:%s %s\\n\", gotstr, wantstr, noteStdio)\n\t}\n\tif giowr != wiowr {\n\t\tt.Errorf(\"PkgCfgDef() incorrect:\\n gotiowr:%x \\nwantiowr:%x %s\\n\", giowr, wiowr, noteStdio)\n\t}\n\tfn.SetPkgCfgDef(true) \/\/ set pkg config to (what should be) a known default state\n}\n\nfunc Test_fetchedpkgconfig(t *testing.T) {\n\t\/\/ verify setter funcs mods match a corresponding fetched pkgCfg\n\tfn.SetPkgCfgDef(true) \/\/ set pkg config to (what should be) a known default state\n\twiowr := io.Writer(os.Stderr)\n\tfn.LogSetOutput(wiowr)\n\tfn.LogSetFlags(0xffff)\n\tfn.LogSetPrefix(\"ZyxAbcd\")\n\tfn.LogSetTraceFlags(0xbeef) \/\/dec=48879\n\tfn.LogSetAlignFile(11)\n\tfn.LogSetAlignFunc(12)\n\tgot, _ := fn.PkgCfg()\n\twant := &fn.PkgCfgStruct{\n\t\tLogFlags: 0xffff,\n\t\tLogPrefix: \"ZyxAbcd\",\n\t\tLogTraceFlags: 0xbeef, \/\/dec=48879\n\t\tLogAlignFile: 11,\n\t\tLogAlignFunc: 12,\n\t}\n\tvar giowr io.Writer\n\tgot, giowr = fn.PkgCfg()\n\tgotstr := fmt.Sprintf(\"%+v\", got)\n\twantstr := fmt.Sprintf(\"%+v\", want)\n\tif gotstr != wantstr {\n\t\tt.Errorf(\"PkgCfgDef() incorrect:\\n got:%s \\nwant:%s \\n\", gotstr, wantstr)\n\t}\n\tif giowr != wiowr {\n\t\tt.Errorf(\"PkgCfgDef() incorrect:\\n gotiowr:%x \\nwantiowr:%x %s\\n\", giowr, wiowr, noteStdio)\n\t}\n\n\tfn.SetPkgCfgDef(true) \/\/ set pkg config to (what should be) a known default state\n}\n\nfunc Test_pkgcfg_matches_setpkgcfg(t *testing.T) {\n\t\/\/ verify setting a pkgcfg matches returned values from PkgCfg\n\n\tfn.SetPkgCfgDef(true) \/\/ set pkg config to (what should be) a known default state\n\twant, _ := fn.PkgCfgDef()\n\twiowr := fn.LogGetOutputDef()\n\twant.LogAlignFile = 8\n\tfn.SetPkgCfg(want, wiowr)\n\tgot, giowr := fn.PkgCfg()\n\tgotstr := fmt.Sprintf(\"%+v\", got)\n\twantstr := fmt.Sprintf(\"%+v\", want)\n\tif gotstr != wantstr {\n\t\tt.Fatalf(\"SetPkgCfg() incorrect:\\n got:%s \\nwant:%s \", gotstr, wantstr)\n\t}\n\tif giowr != wiowr {\n\t\tt.Fatalf(\"SetPkgCfg() incorrect:\\n gotiowr:%x \\nwantiowr:%x \", giowr, wiowr)\n\t}\n\n\tfn.SetPkgCfgDef(true) \/\/ set pkg config to (what should be) a known default state\n}\n<commit_msg>removed unneccessary code<commit_after>\/\/ Copyright 2017 phcurtis fn Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage fn_test\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/phcurtis\/fn\"\n)\n\nvar pkgCfgDefWant = &fn.PkgCfgStruct{\n\tLogFlags: fn.LflagsDef,\n\tLogPrefix: fn.LogPrefixDef,\n\tLogTraceFlags: fn.TrFlagsDef,\n\tLogAlignFile: fn.LogAlignFileDef,\n\tLogAlignFunc: fn.LogAlignFuncDef,\n}\nvar pkgCfgDefWantstr = fmt.Sprintf(\"%+v\", pkgCfgDefWant)\nvar noteStdio = fmt.Sprintf(\"\\n Note:os.Stdout:%v \\n Note:os.Stderr:%v\", os.Stdout, os.Stderr)\n\nfunc TestPkgCfgDef(t *testing.T) {\n\t\/\/ verify PkgCfgDef returns expected values\n\n\tfn.SetPkgCfgDef(true) \/\/ set pkg config to (what should be) a known default state\n\tgot, giowr := fn.PkgCfgDef()\n\tgotstr := fmt.Sprintf(\"%+v\", got)\n\twantstr := pkgCfgDefWantstr\n\tif gotstr != wantstr {\n\t\tt.Errorf(\"PkgCfgDef() incorrect:\\n got:%s \\nwant:%s\\n\", gotstr, wantstr)\n\t}\n\twiowr := fn.LogGetOutputDef()\n\tif giowr != wiowr {\n\t\tt.Errorf(\"PkgCfgDef() incorrect:\\n got-iowr:%s \\nwant-iowr:%s\\n\", giowr, wiowr)\n\t}\n\n\tfn.SetPkgCfgDef(true) \/\/ set pkg config to (what should be) a known default state\n}\n\nfunc TestPkgCfg(t *testing.T) {\n\t\/\/ verify individual setter funcs alter what makes up a PkgCfg\n\n\tfn.SetPkgCfgDef(true) \/\/ set pkg config to (what should be) a known default state\n\tfn.LogSetAlignFile(5)\n\tfn.LogSetAlignFunc(6)\n\tfn.LogSetFlags(0xffffffff)\n\tfn.LogSetPrefix(\"pRe\")\n\tfn.LogSetTraceFlags(0xdeadbeef) \/\/3735928559\n\tvar wiowr io.Writer = os.Stderr\n\tfn.LogSetOutput(wiowr)\n\tgot, giowr := fn.PkgCfg()\n\tgotstr := fmt.Sprintf(\"%+v\", got)\n\twantstr := pkgCfgDefWantstr\n\tif gotstr == wantstr {\n\t\tt.Errorf(\"PkgCfg() incorrect:\\n got:%s \\nwant:%s \\n\", gotstr, wantstr)\n\t}\n\tif giowr != wiowr {\n\t\tt.Errorf(\"PkgCfg() incorrect:\\n gotiowr:%x \\nwantowr:%x %s\\n\",\n\t\t\tgiowr, wiowr, noteStdio)\n\t}\n\tfn.SetPkgCfgDef(true) \/\/ set pkg config to (what should be) a known default state\n}\n\nfunc TestSetPkgCfgDef(t *testing.T) {\n\t\/\/ verify rewriting the defaults matches what should be in the pkg defaults\n\n\tfn.SetPkgCfgDef(true) \/\/ set pkg config to (what should be) a known default state\n\twiowr := fn.LogGetOutputDef()\n\tgot, giowr := fn.PkgCfg()\n\tgotstr := fmt.Sprintf(\"%+v\", got)\n\twantstr := pkgCfgDefWantstr\n\tif gotstr != wantstr {\n\t\tt.Errorf(\"SetPkgCfgDef() incorrect:\\n got:%s \\nwant:%s %s\\n\", gotstr, wantstr, noteStdio)\n\t}\n\tif giowr != wiowr {\n\t\tt.Errorf(\"PkgCfgDef() incorrect:\\n gotiowr:%x \\nwantiowr:%x %s\\n\", giowr, wiowr, noteStdio)\n\t}\n\tfn.SetPkgCfgDef(true) \/\/ set pkg config to (what should be) a known default state\n}\n\nfunc Test_fetchedpkgconfig(t *testing.T) {\n\t\/\/ verify setter funcs mods match a corresponding fetched pkgCfg\n\tfn.SetPkgCfgDef(true) \/\/ set pkg config to (what should be) a known default state\n\twiowr := io.Writer(os.Stderr)\n\tfn.LogSetOutput(wiowr)\n\tfn.LogSetFlags(0xffff)\n\tfn.LogSetPrefix(\"ZyxAbcd\")\n\tfn.LogSetTraceFlags(0xbeef)\n\tfn.LogSetAlignFile(11)\n\tfn.LogSetAlignFunc(12)\n\twant := &fn.PkgCfgStruct{\n\t\tLogFlags: 0xffff,\n\t\tLogPrefix: \"ZyxAbcd\",\n\t\tLogTraceFlags: 0xbeef, \/\/dec=48879\n\t\tLogAlignFile: 11,\n\t\tLogAlignFunc: 12,\n\t}\n\tgot, giowr := fn.PkgCfg()\n\tgotstr := fmt.Sprintf(\"%+v\", got)\n\twantstr := fmt.Sprintf(\"%+v\", want)\n\tif gotstr != wantstr {\n\t\tt.Errorf(\"PkgCfgDef() incorrect:\\n got:%s \\nwant:%s \\n\", gotstr, wantstr)\n\t}\n\tif giowr != wiowr {\n\t\tt.Errorf(\"PkgCfgDef() incorrect:\\n gotiowr:%x \\nwantiowr:%x %s\\n\", giowr, wiowr, noteStdio)\n\t}\n\n\tfn.SetPkgCfgDef(true) \/\/ set pkg config to (what should be) a known default state\n}\n\nfunc Test_pkgcfg_matches_setpkgcfg(t *testing.T) {\n\t\/\/ verify setting a pkgcfg matches returned values from PkgCfg\n\n\tfn.SetPkgCfgDef(true) \/\/ set pkg config to (what should be) a known default state\n\twant, _ := fn.PkgCfgDef()\n\twiowr := fn.LogGetOutputDef()\n\twant.LogAlignFile = 8\n\tfn.SetPkgCfg(want, wiowr)\n\tgot, giowr := fn.PkgCfg()\n\tgotstr := fmt.Sprintf(\"%+v\", got)\n\twantstr := fmt.Sprintf(\"%+v\", want)\n\tif gotstr != wantstr {\n\t\tt.Fatalf(\"SetPkgCfg() incorrect:\\n got:%s \\nwant:%s \", gotstr, wantstr)\n\t}\n\tif giowr != wiowr {\n\t\tt.Fatalf(\"SetPkgCfg() incorrect:\\n gotiowr:%x \\nwantiowr:%x \", giowr, wiowr)\n\t}\n\n\tfn.SetPkgCfgDef(true) \/\/ set pkg config to (what should be) a known default state\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/rmera\/gochem\"\n\t\"github.com\/rmera\/gochem\/v3\"\n\t\"github.com\/rmera\/scu\"\n)\n\n\/\/This program will align the best plane passing through a set of atoms in a molecule with the XY-plane.\n\/\/Usage:\nfunc main() {\n\tif len(os.Args) < 2 {\n\t\tfmt.Printf(\"Usage:\\n%s file.xyz [indexes.dat]\\nindexes.dat is a file containing one single line, with all the atoms defining the plane separated by spaces. If it is not given, all the atoms of the molecule will be taken to define the plane.\\n\", os.Args[0])\n\t\tos.Exit(1)\n\t}\n\tmol, err := chem.XYZFileRead(os.Args[1])\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tvar indexes []int\n\t\/\/if no file with indexes given, will just use all the atoms.\n\tif len(os.Args) < 3 {\n\t\tindexes = make([]int, mol.Len())\n\t\tfor k, v := range indexes {\n\t\t\tindexes[k] = v\n\t\t}\n\t} else {\n\t\tindexes, err = scu.IndexFileParse(os.Args[2])\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t}\n\tsome := v3.Zeros(len(indexes)) \/\/will contain the atoms selected to define the plane.\n\tsome.SomeVecs(mol.Coords[0], indexes)\n\t\/\/for most rotation things it is good to have the molecule centered on its mean.\n\tmol.Coords[0], _, _ = chem.MassCenter(mol.Coords[0], some, nil)\n\t\/\/As we changed the atomic positions, must extract the plane-defining atoms again.\n\tsome.SomeVecs(mol.Coords[0], indexes)\n\t\/\/The strategy is: Take the normal to the plane of the molecule (os molecular subset), and rotate it until it matches the Z-axis\n\t\/\/This will mean that the plane of the molecule will now match the XY-plane.\n\tbest, err := chem.BestPlane(some, nil)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tz, _ := v3.NewMatrix([]float64{0, 0, 1})\n\tzero, _ := v3.NewMatrix([]float64{0, 0, 0})\n\tfmt.Fprintln(os.Stderr, \"Best Plane\", best, z, indexes)\n\taxis := v3.Zeros(1)\n\taxis.Cross(best, z)\n\tfmt.Fprintln(os.Stderr, \"axis\", axis)\n\t\/\/The main part of the program, where the rotation actually happens. Note that we rotate the whole\n\t\/\/molecule, not just the planar subset, this is only used to calculate the rotation angle.\n\tmol.Coords[0], err = chem.RotateAbout(mol.Coords[0], zero, axis, chem.Angle(best, z))\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\t\/\/Now we write the rotated result.\n\tfinal, err := chem.XYZStringWrite(mol.Coords[0], mol)\n\tfmt.Print(final)\n\tfmt.Fprintln(os.Stderr, err)\n}\n<commit_msg>Fixed bug in plane.go<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/rmera\/gochem\"\n\t\"github.com\/rmera\/gochem\/v3\"\n\t\"github.com\/rmera\/scu\"\n)\n\n\/\/This program will align the best plane passing through a set of atoms in a molecule with the XY-plane.\n\/\/Usage:\nfunc main() {\n\tif len(os.Args) < 2 {\n\t\tfmt.Printf(\"Usage:\\n%s file.xyz [indexes.dat]\\nindexes.dat is a file containing one single line, with all the atoms defining the plane separated by spaces. If it is not given, all the atoms of the molecule will be taken to define the plane.\\n\", os.Args[0])\n\t\tos.Exit(1)\n\t}\n\tmol, err := chem.XYZFileRead(os.Args[1])\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tvar indexes []int\n\t\/\/if no file with indexes given, will just use all the atoms.\n\tif len(os.Args) < 3 {\n\t\tindexes = make([]int, mol.Len())\n\t\tfor k, _ := range indexes {\n\t\t\tindexes[k] = k\n\t\t}\n\t} else {\n\t\tindexes, err = scu.IndexFileParse(os.Args[2])\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\t}\n\tsome := v3.Zeros(len(indexes)) \/\/will contain the atoms selected to define the plane.\n\tsome.SomeVecs(mol.Coords[0], indexes)\n\t\/\/for most rotation things it is good to have the molecule centered on its mean.\n\tmol.Coords[0], _, _ = chem.MassCenter(mol.Coords[0], some, nil)\n\t\/\/As we changed the atomic positions, must extract the plane-defining atoms again.\n\tsome.SomeVecs(mol.Coords[0], indexes)\n\t\/\/The strategy is: Take the normal to the plane of the molecule (os molecular subset), and rotate it until it matches the Z-axis\n\t\/\/This will mean that the plane of the molecule will now match the XY-plane.\n\tbest, err := chem.BestPlane(some, nil)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tz, _ := v3.NewMatrix([]float64{0, 0, 1})\n\tzero, _ := v3.NewMatrix([]float64{0, 0, 0})\n\tfmt.Fprintln(os.Stderr, \"Best Plane\", best, z, indexes)\n\taxis := v3.Zeros(1)\n\taxis.Cross(best, z)\n\tfmt.Fprintln(os.Stderr, \"axis\", axis)\n\t\/\/The main part of the program, where the rotation actually happens. Note that we rotate the whole\n\t\/\/molecule, not just the planar subset, this is only used to calculate the rotation angle.\n\tmol.Coords[0], err = chem.RotateAbout(mol.Coords[0], zero, axis, chem.Angle(best, z))\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\t\/\/Now we write the rotated result.\n\tfinal, err := chem.XYZStringWrite(mol.Coords[0], mol)\n\tfmt.Print(final)\n\tfmt.Fprintln(os.Stderr, err)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nfunc indexHandler(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\n\tmsg := `\n\n\t\thttp:\/\/127.0.0.1:7777\/list-category-android\n\n\t\thttp:\/\/127.0.0.1:7777\/list-category-ios\n\n\t\thttp:\/\/127.0.0.1:7777\/category-android\/?p=GAME_ACTION\n\n\t\thttp:\/\/127.0.0.1:7777\/category-ios\/?p=GAMES_ACTION\n\n\t\thttp:\/\/127.0.0.1:7777\/storeid\/?a=com.google.android.apps.photos&i=293622097\n`\n\tfmt.Fprint(w, \"Welcome to Storemeta!\\n\", fmt.Sprintf(\"Version: %s\\n\\n\\n%s\", pVersion, msg))\n}\n\nfunc formatHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\/\/Content-Type: application\/json\n\tvar pAppsMeta AppsMeta\n\n\tvar q = r.URL.Query()\n\tvar p = strings.TrimSpace(q.Get(\"p\"))\n\tvar m = strings.ToUpper(strings.TrimSpace(ps.ByName(\"mode\")))\n\n\t\/\/re-init it here, eventhoug, its defined @ global.go\n\tpAppsData = make(chan *App)\n\tpAppList = []*App{}\n\tpStores = []*StoreApp{}\n\n\t\/\/not-found\n\tv, ok := Formatters[m]\n\tif !ok {\n\t\tif !strings.EqualFold(m, \"STOREID\") {\n\t\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\t\/\/store-ids here\n\t\tands := strings.Split(strings.TrimSpace(q.Get(\"a\")), \",\")\n\t\tioss := strings.Split(strings.TrimSpace(q.Get(\"i\")), \",\")\n\t\tqueryStoreIds(ands, ioss)\n\t\tif len(pStores) == 0 {\n\t\t\thttp.Error(w, \"StoreId is Missing \/ \"+http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/handle the storeids\n\t\thandler(pAppsMeta)\n\n\t\t\/\/show the list saved\n\t\tif len(pAppList) > 0 {\n\t\t\t\/\/json fmt\n\t\t\tjdata, _ := json.MarshalIndent(pAppList, \"\", \"\\t\")\n\t\t\t\/\/dont leave your friend behind :-)\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\t\tfmt.Fprint(w, string(jdata))\n\t\t}\n\t\treturn\n\t}\n\n\t\/\/reset\n\tif ok, _ := regexp.MatchString(\"(?i)^list-category-(ios|android)$\", m); ok {\n\t\tp = \"\"\n\t}\n\tresult := v.Format(pAppsMeta, v.Mode, p)\n\tfmt.Println(\"RAW-DATA: \", p)\n\t\/\/good\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tfmt.Fprint(w, string(result))\n}\n\nfunc queryStoreIds(androids, ioss []string) {\n\t\/\/store-ids here\n\tfor _, s := range androids {\n\t\tif len(s) > 0 {\n\t\t\tpStores = append(pStores, &StoreApp{OS: ANDROID, URL: pStoreURI[ANDROID][0] + s + pStoreURI[ANDROID][1], StoreID: s})\n\t\t}\n\t}\n\tfor _, s := range ioss {\n\t\tif len(s) > 0 {\n\t\t\tpStores = append(pStores, &StoreApp{OS: IOS, URL: pStoreURI[IOS][0] + s + pStoreURI[IOS][1], StoreID: s})\n\t\t}\n\t}\n}\n<commit_msg>. more fmt<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nfunc indexHandler(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\n\tmsg := `\n\n\t\thttp:\/\/127.0.0.1:7777\/list-category-android\n\n\t\thttp:\/\/127.0.0.1:7777\/list-category-ios\n\n\t\thttp:\/\/127.0.0.1:7777\/category-android\/?p=GAME_ACTION\n\n\t\thttp:\/\/127.0.0.1:7777\/category-ios\/?p=GAMES_ACTION\n\n\t\thttp:\/\/127.0.0.1:7777\/storeid\/?a=com.google.android.apps.photos&i=293622097\n`\n\tfmt.Fprint(w, \"Welcome to Storemeta!\\n\", fmt.Sprintf(\"Version: %s\\n\\n\\n%s\", pVersion, msg))\n}\n\nfunc formatHandler(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\/\/Content-Type: application\/json\n\tvar pAppsMeta AppsMeta\n\n\tvar q = r.URL.Query()\n\tvar p = strings.TrimSpace(q.Get(\"p\"))\n\tvar m = strings.ToUpper(strings.TrimSpace(ps.ByName(\"mode\")))\n\n\t\/\/re-init it here, eventhough, its defined @ global.go\n\tpAppsData = make(chan *App)\n\tpAppList = []*App{}\n\tpStores = []*StoreApp{}\n\t\/\/hdrset\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\t\/\/not-found\n\tv, ok := Formatters[m]\n\tif !ok {\n\t\tif !strings.EqualFold(m, \"STOREID\") {\n\t\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\t\/\/store-ids here\n\t\tands := strings.Split(strings.TrimSpace(q.Get(\"a\")), \",\")\n\t\tioss := strings.Split(strings.TrimSpace(q.Get(\"i\")), \",\")\n\t\tqueryStoreIds(ands, ioss)\n\t\tif len(pStores) == 0 {\n\t\t\thttp.Error(w, \"StoreId is Missing \/ \"+http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/handle the storeids\n\t\thandler(pAppsMeta)\n\n\t\t\/\/show the list saved\n\t\tif len(pAppList) > 0 {\n\t\t\t\/\/json fmt\n\t\t\tjdata, _ := json.MarshalIndent(pAppList, \"\", \"\\t\")\n\t\t\t\/\/dont leave your friend behind :-)\n\t\t\tfmt.Fprint(w, string(jdata))\n\t\t}\n\t\treturn\n\t}\n\n\t\/\/reset\n\tif ok, _ := regexp.MatchString(\"(?i)^list-category-(ios|android)$\", m); ok {\n\t\tp = \"\"\n\t}\n\tresult := v.Format(pAppsMeta, v.Mode, p)\n\tfmt.Println(\"RAW-DATA: \", p)\n\t\/\/good\n\tfmt.Fprint(w, string(result))\n}\n\nfunc queryStoreIds(androids, ioss []string) {\n\t\/\/store-ids here\n\tfor _, s := range androids {\n\t\tif len(s) > 0 {\n\t\t\tpStores = append(pStores, &StoreApp{OS: ANDROID, URL: pStoreURI[ANDROID][0] + s + pStoreURI[ANDROID][1], StoreID: s})\n\t\t}\n\t}\n\tfor _, s := range ioss {\n\t\tif len(s) > 0 {\n\t\t\tpStores = append(pStores, &StoreApp{OS: IOS, URL: pStoreURI[IOS][0] + s + pStoreURI[IOS][1], StoreID: s})\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package bpmon\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestStringToWeekday(t *testing.T) {\n\ttests := []struct {\n\t\tstr string\n\t\tday time.Weekday\n\t\terrExpected bool\n\t}{\n\t\t{str: \"monday\", day: time.Monday, errExpected: false},\n\t\t{str: \"Tuesday\", day: time.Tuesday, errExpected: false},\n\t\t{str: \"Wednesday\", day: time.Wednesday, errExpected: false},\n\t\t{str: \"Thursday\", day: time.Thursday, errExpected: false},\n\t\t{str: \"FRIDAY\", day: time.Friday, errExpected: false},\n\t\t{str: \"Saturday\", day: time.Saturday, errExpected: false},\n\t\t{str: \"Sunday\", day: time.Sunday, errExpected: false},\n\t\t{str: \"Casual-Friday\", errExpected: true},\n\t}\n\n\tfor _, test := range tests {\n\t\tday, err := toWeekday(test.str)\n\t\tif err == nil && test.errExpected == true {\n\t\t\tt.Errorf(\"Error expected for '%s' but test succeeded\", test.str)\n\t\t} else if err != nil && test.errExpected == false {\n\t\t\tt.Errorf(\"No error expected for '%s' but test failed: %s\", test.str, err.Error())\n\t\t} else if err == nil && test.errExpected == false {\n\t\t\tif day != test.day {\n\t\t\t\tt.Errorf(\"Result not as expected for '%s': Should be '%v', is '%v'\", test.str, test.day, day)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc ParseTime(str string) time.Time {\n\tformat := \"15:04:05.000\"\n\tt, err := time.Parse(format, str)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Time in test malformed, is '%s', must match '%s', error is: %s\", str, format, err.Error()))\n\t}\n\treturn t\n}\n\nfunc TestStringsToAvailabilityTime(t *testing.T) {\n\ttests := []struct {\n\t\tstr []string\n\t\tat AvailabilityTime\n\t\terrExpected bool\n\t}{\n\t\t{\n\t\t\tstr: []string{\"09:00:00-12:00:00\"},\n\t\t\tat: AvailabilityTime{\n\t\t\t\tTimeRanges: []TimeRange{\n\t\t\t\t\t{Start: ParseTime(\"09:00:00.000\"), End: ParseTime(\"12:00:00.000\")},\n\t\t\t\t},\n\t\t\t\tAllDay: false,\n\t\t\t},\n\t\t\terrExpected: false,\n\t\t},\n\t\t{\n\t\t\tstr: []string{},\n\t\t\terrExpected: true,\n\t\t},\n\t\t{\n\t\t\tstr: []string{\"12:00:00\"},\n\t\t\terrExpected: true,\n\t\t},\n\t\t\/\/{\n\t\t\/\/\tstr: []string{\"ALLDAY\", \"09:00:00-12:00:00\"},\n\t\t\/\/\tat: AvailabilityTime{\n\t\t\/\/\t\tAllDay: true,\n\t\t\/\/\t},\n\t\t\/\/\terrExpected: false,\n\t\t\/\/},\n\t}\n\n\tfor _, test := range tests {\n\t\tat, err := toAvailabilityTime(test.str)\n\t\tif err == nil && test.errExpected == true {\n\t\t\tt.Errorf(\"Error expected for '%s' but test succeeded\", test.str)\n\t\t} else if err != nil && test.errExpected == false {\n\t\t\tt.Errorf(\"No error expected for '%s' but test failed: %s\", test.str, err.Error())\n\t\t} else if err == nil && test.errExpected == false {\n\t\t\teq := reflect.DeepEqual(at, test.at)\n\t\t\tif !eq {\n\t\t\t\tt.Errorf(\"Results do not match for %v: '%v' vs. '%v'\", test.str, at, test.at)\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>added availability test<commit_after>package bpmon\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestStringToWeekday(t *testing.T) {\n\ttests := []struct {\n\t\tstr string\n\t\tday time.Weekday\n\t\terrExpected bool\n\t}{\n\t\t{str: \"monday\", day: time.Monday, errExpected: false},\n\t\t{str: \"Tuesday\", day: time.Tuesday, errExpected: false},\n\t\t{str: \"Wednesday\", day: time.Wednesday, errExpected: false},\n\t\t{str: \"Thursday\", day: time.Thursday, errExpected: false},\n\t\t{str: \"FRIDAY\", day: time.Friday, errExpected: false},\n\t\t{str: \"Saturday\", day: time.Saturday, errExpected: false},\n\t\t{str: \"Sunday\", day: time.Sunday, errExpected: false},\n\t\t{str: \"Casual-Friday\", errExpected: true},\n\t}\n\n\tfor _, test := range tests {\n\t\tday, err := toWeekday(test.str)\n\t\tif err == nil && test.errExpected == true {\n\t\t\tt.Errorf(\"Error expected for '%s' but test succeeded\", test.str)\n\t\t} else if err != nil && test.errExpected == false {\n\t\t\tt.Errorf(\"No error expected for '%s' but test failed: %s\", test.str, err.Error())\n\t\t} else if err == nil && test.errExpected == false {\n\t\t\tif day != test.day {\n\t\t\t\tt.Errorf(\"Result not as expected for '%s': Should be '%v', is '%v'\", test.str, test.day, day)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc ParseTime(str string) time.Time {\n\tformat := \"15:04:05.000\"\n\tt, err := time.Parse(format, str)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Time in test malformed, is '%s', must match '%s', error is: %s\", str, format, err.Error()))\n\t}\n\treturn t\n}\n\nfunc TestStringsToAvailabilityTime(t *testing.T) {\n\ttests := []struct {\n\t\tstr []string\n\t\tat AvailabilityTime\n\t\terrExpected bool\n\t}{\n\t\t{\n\t\t\tstr: []string{\"09:00:00-12:00:00\"},\n\t\t\tat: AvailabilityTime{\n\t\t\t\tTimeRanges: []TimeRange{\n\t\t\t\t\t{Start: ParseTime(\"09:00:00.000\"), End: ParseTime(\"12:00:00.000\")},\n\t\t\t\t},\n\t\t\t\tAllDay: false,\n\t\t\t},\n\t\t\terrExpected: false,\n\t\t},\n\t\t{\n\t\t\tstr: []string{},\n\t\t\terrExpected: true,\n\t\t},\n\t\t{\n\t\t\tstr: []string{\"12:00:00\"},\n\t\t\terrExpected: true,\n\t\t},\n\t\t{\n\t\t\tstr: []string{\"foo-bar\"},\n\t\t\terrExpected: true,\n\t\t},\n\t\t\/\/{\n\t\t\/\/\tstr: []string{\"ALLDAY\", \"09:00:00-12:00:00\"},\n\t\t\/\/\tat: AvailabilityTime{\n\t\t\/\/\t\tAllDay: true,\n\t\t\/\/\t},\n\t\t\/\/\terrExpected: false,\n\t\t\/\/},\n\t}\n\n\tfor _, test := range tests {\n\t\tat, err := toAvailabilityTime(test.str)\n\t\tif err == nil && test.errExpected == true {\n\t\t\tt.Errorf(\"Error expected for '%s' but test succeeded\", test.str)\n\t\t} else if err != nil && test.errExpected == false {\n\t\t\tt.Errorf(\"No error expected for '%s' but test failed: %s\", test.str, err.Error())\n\t\t} else if err == nil && test.errExpected == false {\n\t\t\teq := reflect.DeepEqual(at, test.at)\n\t\t\tif !eq {\n\t\t\t\tt.Errorf(\"Results do not match for %v: '%v' vs. '%v'\", test.str, at, test.at)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc ParseDate(str string) time.Time {\n\tformat := \"Mon 2006\/01\/02 15:04:05.000\"\n\tt, err := time.Parse(format, str)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Time in test malformed, is '%s', must match '%s', error is: %s\", str, format, err.Error()))\n\t}\n\treturn t\n}\n\nfunc TestContains(t *testing.T) {\n\ta := Availability{\n\t\ttime.Monday: AvailabilityTime{\n\t\t\tTimeRanges: []TimeRange{\n\t\t\t\t{Start: ParseTime(\"09:00:00.000\"), End: ParseTime(\"12:00:00.000\")},\n\t\t\t},\n\t\t\tAllDay: false,\n\t\t},\n\t\ttime.Friday: AvailabilityTime{\n\t\t\tAllDay: true,\n\t\t},\n\t}\n\n\ttests := []struct {\n\t\tinAvalilability bool\n\t\ttimestamp time.Time\n\t}{\n\t\t{\n\t\t\ttimestamp: ParseDate(\"Mon 2017\/03\/20 08:00:00.000\"),\n\t\t\tinAvalilability: false,\n\t\t},\n\t\t{\n\t\t\ttimestamp: ParseDate(\"Mon 2017\/03\/20 09:00:00.001\"),\n\t\t\tinAvalilability: true,\n\t\t},\n\t\t{\n\t\t\ttimestamp: ParseDate(\"Fri 2017\/03\/17 09:00:00.001\"),\n\t\t\tinAvalilability: true,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tcontained := a.Contains(test.timestamp)\n\t\tif !contained && test.inAvalilability {\n\t\t\tt.Errorf(\"Time %v is not in avalability but should\", test.timestamp)\n\t\t}\n\t\tif contained && !test.inAvalilability {\n\t\t\tt.Errorf(\"Time %v is in avalability but should not\", test.timestamp)\n\t\t}\n\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package controllers\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/nicomo\/abacaxi\/logger\"\n\t\"github.com\/nicomo\/abacaxi\/models\"\n\t\"github.com\/nicomo\/abacaxi\/session\"\n\t\"github.com\/nicomo\/abacaxi\/views\"\n)\n\ntype parseparams struct {\n\ttsname string\n\tfpath string\n\tfiletype string\n\tdelimiter rune\n\tcsvconf map[string]int\n}\n\n\/\/ UploadGetHandler manages upload of a source file\nfunc UploadGetHandler(w http.ResponseWriter, r *http.Request) {\n\t\/\/ our messages (errors, confirmation, etc) to the user & the template will be stored in this map\n\td := make(map[string]interface{})\n\n\t\/\/ Get session\n\tsess := session.Instance(r)\n\tif sess.Values[\"id\"] != nil {\n\t\td[\"IsLoggedIn\"] = true\n\t}\n\n\t\/\/ Get flash messages, if any.\n\tif flashes := sess.Flashes(); len(flashes) > 0 {\n\t\td[\"Flashes\"] = flashes\n\t}\n\tsess.Save(r, w)\n\n\tTSListing, _ := models.GetTargetServicesListing()\n\td[\"TSListing\"] = TSListing\n\tviews.RenderTmpl(w, \"upload\", d)\n\n}\n\n\/\/ UploadPostHandler receives source file, checks extension\n\/\/ then passes the file on to the appropriate controller\nfunc UploadPostHandler(w http.ResponseWriter, r *http.Request) {\n\n\t\/\/ Get session, to be used for feedback flash messages\n\tsess := session.Instance(r)\n\n\t\/\/ parsing multipart file\n\tr.ParseMultipartForm(32 << 20)\n\n\t\/\/ get the Target Service name and the file type\n\ttsname := r.PostFormValue(\"tsname\")\n\tfiletype := r.PostFormValue(\"filetype\")\n\n\t\/\/ get the file delimiter (for csv and kbart), defaulting to tab\n\tdelimiter := rune('\\t')\n\tif r.PostFormValue(\"delimiter\") == \"semicolon\" {\n\t\tdelimiter = ';'\n\t}\n\n\t\/\/ get the optional csv fields\n\tcsvconf, err := getCSVParams(r)\n\tif filetype == \"publishercsv\" && err != nil {\n\t\tlogger.Error.Printf(\"couldn't get csv params: %v\", err)\n\t}\n\n\t\/\/ upload the file\n\tfile, handler, err := r.FormFile(\"uploadfile\")\n\tif err != nil {\n\t\tlogger.Error.Println(err)\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\t\/\/ create dir if it doesn't exist\n\tpath := \"data\"\n\tErrPath := os.MkdirAll(\"data\", os.ModePerm)\n\tif ErrPath != nil {\n\t\tlogger.Error.Println(ErrPath)\n\t}\n\n\t\/\/ open newly created file\n\tfpath := path + \"\/\" + handler.Filename\n\tf, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE, 0666)\n\tif err != nil {\n\t\tlogger.Error.Println(err)\n\t\treturn\n\t}\n\tdefer f.Close()\n\n\t\/\/ copy uploaded file into new file\n\tio.Copy(f, file)\n\n\tpp := parseparams{\n\t\ttsname,\n\t\tfpath,\n\t\tfiletype,\n\t\tdelimiter,\n\t\tcsvconf,\n\t}\n\n\t\/\/ we have a file to parse\n\t\/\/ let's do that in a separate go routine\n\tgo parseFile(pp)\n\n\t\/\/ and redirect the user home with a flash message\n\tsess.AddFlash(\"Upload is running in the background, result will be in the reports\")\n\tsess.Save(r, w)\n\thttp.Redirect(w, r, \"\/\", http.StatusFound)\n}\n\nfunc parseFile(pp parseparams) {\n\tvar (\n\t\trecords []models.Record\n\t\treport models.Report\n\t\terr error\n\t)\n\n\tif pp.filetype == \"sfxxml\" {\n\t\trecords, err = xmlIO(pp, &report)\n\t\tif err != nil {\n\t\t\tlogger.Error.Println(err)\n\t\t\treport.Success = false\n\t\t\treport.Text = append(report.Text, fmt.Sprintf(\"Upload process couldn't complete: %v\", err))\n\t\t\treport.ReportCreate()\n\t\t\treturn\n\t\t}\n\t\treport.ReportType = models.UploadSfx\n\t} else if pp.filetype == \"publishercsv\" || pp.filetype == \"kbart\" {\n\t\trecords, err = fileIO(pp, &report)\n\t\tif err != nil {\n\t\t\tlogger.Error.Println(err)\n\t\t\treport.Success = false\n\t\t\treport.Text = append(report.Text, fmt.Sprintf(\"Upload process couldn't complete: %v\", err))\n\t\t\treport.ReportCreate()\n\t\t\treturn\n\t\t}\n\t\tif pp.filetype == \"publishercsv\" {\n\t\t\treport.ReportType = models.UploadCsv\n\t\t}\n\t\tif pp.filetype == \"kbart\" {\n\t\t\treport.ReportType = models.UploadKbart\n\t\t}\n\t} else {\n\t\t\/\/ manage case wrong file extension : message to the user\n\t\tlogger.Error.Println(\"unknown file type\")\n\t\treport.Success = false\n\t\treport.Text = append(report.Text, fmt.Sprintln(\"unknown file type\"))\n\t\treport.ReportCreate()\n\t\treturn\n\t}\n\n\t\/\/ save the records to DB\n\trecordsUpdated, recordsInserted := models.RecordsUpsert(records)\n\n\t\/\/ report\n\treport.Text = append(report.Text, fmt.Sprintf(\"Updated %d records \/ Inserted %d records\",\n\t\trecordsUpdated,\n\t\trecordsInserted))\n\treport.Success = true\n\n\t\/\/ save the report to DB\n\tif err := report.ReportCreate(); err != nil {\n\t\tlogger.Error.Printf(\"couldn't save the report to DB: %v\", err)\n\t}\n\n}\n<commit_msg>rename uploaded file with timestamp<commit_after>package controllers\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/nicomo\/abacaxi\/logger\"\n\t\"github.com\/nicomo\/abacaxi\/models\"\n\t\"github.com\/nicomo\/abacaxi\/session\"\n\t\"github.com\/nicomo\/abacaxi\/views\"\n)\n\ntype parseparams struct {\n\ttsname string\n\tfpath string\n\tfiletype string\n\tdelimiter rune\n\tcsvconf map[string]int\n}\n\n\/\/ UploadGetHandler manages upload of a source file\nfunc UploadGetHandler(w http.ResponseWriter, r *http.Request) {\n\t\/\/ our messages (errors, confirmation, etc) to the user & the template will be stored in this map\n\td := make(map[string]interface{})\n\n\t\/\/ Get session\n\tsess := session.Instance(r)\n\tif sess.Values[\"id\"] != nil {\n\t\td[\"IsLoggedIn\"] = true\n\t}\n\n\t\/\/ Get flash messages, if any.\n\tif flashes := sess.Flashes(); len(flashes) > 0 {\n\t\td[\"Flashes\"] = flashes\n\t}\n\tsess.Save(r, w)\n\n\tTSListing, _ := models.GetTargetServicesListing()\n\td[\"TSListing\"] = TSListing\n\tviews.RenderTmpl(w, \"upload\", d)\n\n}\n\n\/\/ UploadPostHandler receives source file, checks extension\n\/\/ then passes the file on to the appropriate controller\nfunc UploadPostHandler(w http.ResponseWriter, r *http.Request) {\n\n\t\/\/ Get session, to be used for feedback flash messages\n\tsess := session.Instance(r)\n\n\t\/\/ parsing multipart file\n\tr.ParseMultipartForm(32 << 20)\n\n\t\/\/ get the Target Service name and the file type\n\ttsname := r.PostFormValue(\"tsname\")\n\tfiletype := r.PostFormValue(\"filetype\")\n\n\t\/\/ get the file delimiter (for csv and kbart), defaulting to tab\n\tdelimiter := rune('\\t')\n\tif r.PostFormValue(\"delimiter\") == \"semicolon\" {\n\t\tdelimiter = ';'\n\t}\n\n\t\/\/ get the optional csv fields\n\tcsvconf, err := getCSVParams(r)\n\tif filetype == \"publishercsv\" && err != nil {\n\t\tlogger.Error.Printf(\"couldn't get csv params: %v\", err)\n\t}\n\n\t\/\/ upload the file\n\tfile, handler, err := r.FormFile(\"uploadfile\")\n\tif err != nil {\n\t\tlogger.Error.Println(err)\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\t\/\/ create dir if it doesn't exist\n\tpath := \"data\"\n\tErrPath := os.MkdirAll(\"data\", os.ModePerm)\n\tif ErrPath != nil {\n\t\tlogger.Error.Println(ErrPath)\n\t}\n\n\t\/\/ open newly created file\n\tfpath := path + \"\/\" + time.Now().Format(\"2006-01-02-15:04:05\") + \"-\" + handler.Filename\n\tf, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE, 0666)\n\tif err != nil {\n\t\tlogger.Error.Println(err)\n\t\treturn\n\t}\n\tdefer f.Close()\n\n\t\/\/ copy uploaded file into new file\n\tio.Copy(f, file)\n\n\tpp := parseparams{\n\t\ttsname,\n\t\tfpath,\n\t\tfiletype,\n\t\tdelimiter,\n\t\tcsvconf,\n\t}\n\n\t\/\/ we have a file to parse\n\t\/\/ let's do that in a separate go routine\n\tgo parseFile(pp)\n\n\t\/\/ and redirect the user home with a flash message\n\tsess.AddFlash(\"Upload is running in the background, result will be in the reports\")\n\tsess.Save(r, w)\n\thttp.Redirect(w, r, \"\/\", http.StatusFound)\n}\n\nfunc parseFile(pp parseparams) {\n\tvar (\n\t\trecords []models.Record\n\t\treport models.Report\n\t\terr error\n\t)\n\n\tif pp.filetype == \"sfxxml\" {\n\t\trecords, err = xmlIO(pp, &report)\n\t\tif err != nil {\n\t\t\tlogger.Error.Println(err)\n\t\t\treport.Success = false\n\t\t\treport.Text = append(report.Text, fmt.Sprintf(\"Upload process couldn't complete: %v\", err))\n\t\t\treport.ReportCreate()\n\t\t\treturn\n\t\t}\n\t\treport.ReportType = models.UploadSfx\n\t} else if pp.filetype == \"publishercsv\" || pp.filetype == \"kbart\" {\n\t\trecords, err = fileIO(pp, &report)\n\t\tif err != nil {\n\t\t\tlogger.Error.Println(err)\n\t\t\treport.Success = false\n\t\t\treport.Text = append(report.Text, fmt.Sprintf(\"Upload process couldn't complete: %v\", err))\n\t\t\treport.ReportCreate()\n\t\t\treturn\n\t\t}\n\t\tif pp.filetype == \"publishercsv\" {\n\t\t\treport.ReportType = models.UploadCsv\n\t\t}\n\t\tif pp.filetype == \"kbart\" {\n\t\t\treport.ReportType = models.UploadKbart\n\t\t}\n\t} else {\n\t\t\/\/ manage case wrong file extension : message to the user\n\t\tlogger.Error.Println(\"unknown file type\")\n\t\treport.Success = false\n\t\treport.Text = append(report.Text, fmt.Sprintln(\"unknown file type\"))\n\t\treport.ReportCreate()\n\t\treturn\n\t}\n\n\t\/\/ save the records to DB\n\trecordsUpdated, recordsInserted := models.RecordsUpsert(records)\n\n\t\/\/ report\n\treport.Text = append(report.Text, fmt.Sprintf(\"Updated %d records \/ Inserted %d records\",\n\t\trecordsUpdated,\n\t\trecordsInserted))\n\treport.Success = true\n\n\t\/\/ save the report to DB\n\tif err := report.ReportCreate(); err != nil {\n\t\tlogger.Error.Printf(\"couldn't save the report to DB: %v\", err)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Copyright (C) 2015 Stefan Luecke\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU Affero General Public License as published\n * by the Free Software Foundation, either version 3 of the License, or\n * (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU Affero General Public License for more details.\n *\n * You should have received a copy of the GNU Affero General Public License\n * along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n *\n * Authors: Stefan Luecke <glaxx@glaxx.net>\n *\/\n\npackage backend\n\nimport (\n\t\"github.com\/emicklei\/go-restful\"\n\t\"gopkg.in\/mgo.v2\"\n\t\"testing\"\n)\n\nfunc startTestDB() (*mgo.Session, error) {\n\ts, err := mgo.Dial(\"localhost\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tRegisterDatabase(s, \"lsmsd_test\", &Mailconfig{})\n\treturn s, nil\n}\n\nfunc flushAndCloseTestDB(m *mgo.Session, t testing.TB) {\n\terr := uCol.DropCollection()\n\tif err != nil && err.Error() != \"ns not found\" {\n\t\tt.Error(\"failed to clean up: \" + err.Error())\n\t}\n\terr = iCol.DropCollection()\n\tif err != nil && err.Error() != \"ns not found\" {\n\t\tt.Error(\"failed to clean up: \" + err.Error())\n\t}\n\terr = ihCol.DropCollection()\n\tif err != nil && err.Error() != \"ns not found\" {\n\t\tt.Error(\"failed to clean up: \" + err.Error())\n\t}\n\terr = pCol.DropCollection()\n\tif err != nil && err.Error() != \"ns not found\" {\n\t\tt.Error(\"failed to clean up: \" + err.Error())\n\t}\n\terr = phCol.DropCollection()\n\tif err != nil && err.Error() != \"ns not found\" {\n\t\tt.Error(\"failed to clean up: \" + err.Error())\n\t}\n\tidgen.StopIDGenerator()\n\tidgen.ResetCounter()\n\terr = m.DB(\"lsmsd_test\").DropDatabase()\n\tif err != nil {\n\t\tt.Error(\"failed to clean up: \" + err.Error())\n\t}\n\tm.Close()\n}\n\nfunc newTestContainer() *restful.Container {\n\tcont := restful.NewContainer()\n\tcont.Filter(restful.DefaultContainer.OPTIONSFilter)\n\tcont.Add(NewItemService())\n\tcont.Add(NewUserService())\n\tcont.Add(NewPolicyService())\n\treturn cont\n}\n<commit_msg>Removed obsolete test functions<commit_after><|endoftext|>"} {"text":"<commit_before>package libkbfs\n\nimport (\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/keybase\/kbfs\/kbfsblock\"\n\t\"github.com\/keybase\/kbfs\/kbfscrypto\"\n\t\"github.com\/keybase\/kbfs\/tlf\"\n\t\"github.com\/syndtr\/goleveldb\/leveldb\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\tevictionConsiderationFactor uint = 3\n\tblockDbFilename string = \"diskCacheBlocks.leveldb\"\n\tlruDbFilename string = \"diskCacheLRU.leveldb\"\n)\n\ntype diskBlockCacheEntry struct {\n\tbuf []byte\n\tserverHalf kbfscrypto.BlockCryptKeyServerHalf\n}\n\ntype diskBlockCacheConfig interface {\n\tcodecGetter\n}\n\n\/\/ DiskBlockCacheStandard is the standard implementation for DiskBlockCache.\ntype DiskBlockCacheStandard struct {\n\tconfig diskBlockCacheConfig\n\tmaxBytes uint64\n\t\/\/ protects everything below\n\tlock sync.RWMutex\n\tisClosed bool\n\tblockDb *leveldb.DB\n\tlruDb *leveldb.DB\n}\n\nvar _ DiskBlockCache = (*DiskBlockCacheStandard)(nil)\n\nfunc newDiskBlockCacheStandard(config diskBlockCacheConfig, dirPath string,\n\tmaxBytes uint64) (*DiskBlockCacheStandard, error) {\n\tblockDbPath := filepath.Join(dirPath, blockDbFilename)\n\tblockDb, err := leveldb.OpenFile(blockDbPath, leveldbOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlruDbPath := filepath.Join(dirPath, lruDbFilename)\n\tlruDb, err := leveldb.OpenFile(lruDbPath, leveldbOptions)\n\tif err != nil {\n\t\tblockDb.Close()\n\t\treturn nil, err\n\t}\n\treturn &DiskBlockCacheStandard{\n\t\tconfig: config,\n\t\tmaxBytes: maxBytes,\n\t\tblockDb: blockDb,\n\t\tlruDb: lruDb,\n\t}, nil\n}\n\nfunc (cache *DiskBlockCacheStandard) updateLruLocked(tlfID tlf.ID,\n\tblockBytes []byte) error {\n\tkey := append(tlfID.Bytes(), blockBytes...)\n\tval, err := time.Now().MarshalBinary()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcache.lruDb.Put(key, val, nil)\n\treturn nil\n}\n\nfunc (cache *DiskBlockCacheStandard) decodeBlockCacheEntry(buf []byte) ([]byte,\n\tkbfscrypto.BlockCryptKeyServerHalf, error) {\n\tentry := diskBlockCacheEntry{}\n\terr := cache.config.Codec().Decode(buf, &entry)\n\tif err != nil {\n\t\treturn nil, kbfscrypto.BlockCryptKeyServerHalf{}, err\n\t}\n\treturn entry.buf, entry.serverHalf, nil\n}\n\nfunc (cache *DiskBlockCacheStandard) encodeBlockCacheEntry(buf []byte,\n\tserverHalf kbfscrypto.BlockCryptKeyServerHalf) ([]byte, error) {\n\tentry := diskBlockCacheEntry{\n\t\tbuf: buf,\n\t\tserverHalf: serverHalf,\n\t}\n\treturn cache.config.Codec().Encode(&entry)\n}\n\n\/\/ Get implements the DiskBlockCache interface for DiskBlockCacheStandard.\nfunc (cache *DiskBlockCacheStandard) Get(ctx context.Context, tlfID tlf.ID,\n\tblockID kbfsblock.ID) ([]byte, kbfscrypto.BlockCryptKeyServerHalf, error) {\n\tcache.lock.RLock()\n\tdefer cache.lock.RUnlock()\n\tif cache.isClosed {\n\t\treturn nil, kbfscrypto.BlockCryptKeyServerHalf{},\n\t\t\tDiskCacheClosedError{\"Get\"}\n\t}\n\tkey := blockID.Bytes()\n\tbuf, err := cache.blockDb.Get(key, nil)\n\tif err != nil {\n\t\treturn nil, kbfscrypto.BlockCryptKeyServerHalf{}, NoSuchBlockError{blockID}\n\t}\n\tcache.updateLruLocked(tlfID, key)\n\treturn cache.decodeBlockCacheEntry(buf)\n}\n\n\/\/ Put implements the DiskBlockCache interface for DiskBlockCacheStandard.\nfunc (cache *DiskBlockCacheStandard) Put(ctx context.Context, tlfID tlf.ID,\n\tblockID kbfsblock.ID, buf []byte,\n\tserverHalf kbfscrypto.BlockCryptKeyServerHalf) error {\n\tcache.lock.RLock()\n\tdefer cache.lock.RUnlock()\n\tif cache.isClosed {\n\t\treturn DiskCacheClosedError{\"Put\"}\n\t}\n\treturn nil\n}\n\n\/\/ Delete implements the DiskBlockCache interface for DiskBlockCacheStandard.\nfunc (cache *DiskBlockCacheStandard) Delete(ctx context.Context, tlfID tlf.ID,\n\tblockID kbfsblock.ID) error {\n\tcache.lock.RLock()\n\tdefer cache.lock.RUnlock()\n\tif cache.isClosed {\n\t\treturn DiskCacheClosedError{\"Delete\"}\n\t}\n\treturn nil\n}\n\n\/\/ Evict implements the DiskBlockCache interface for DiskBlockCacheStandard.\nfunc (cache *DiskBlockCacheStandard) Evict(ctx context.Context, tlfID tlf.ID,\n\tnumBlocks int) error {\n\tcache.lock.RLock()\n\tdefer cache.lock.RUnlock()\n\tif cache.isClosed {\n\t\treturn DiskCacheClosedError{\"Evict\"}\n\t}\n\t\/\/ Use kbfscrypto.MakeTemporaryID() to create a random hash ID. Then begin\n\t\/\/ an interator into cache.lruDb.Range(b, nil) and iterate from there to\n\t\/\/ get numBlocks * evictionConsiderationFactor block IDs. We sort the\n\t\/\/ resulting blocks by value (LRU time) and pick the minimum numBlocks. We\n\t\/\/ put those block IDs into a leveldb.Batch for cache.blockDb via\n\t\/\/ Batch.Delete(), then Write() that batch.\n\t\/\/ NOTE: It is important that we store LRU times using a monotonic clock\n\t\/\/ for this device. Use runtime.nanotime() for now.\n\treturn nil\n}\n\n\/\/ Shutdown implements the DiskBlockCache interface for DiskBlockCacheStandard.\nfunc (cache *DiskBlockCacheStandard) Shutdown() {\n\tcache.lock.Lock()\n\tdefer cache.lock.Unlock()\n\tif cache.isClosed {\n\t\treturn\n\t}\n\tcache.isClosed = true\n\tcache.blockDb.Close()\n\tcache.lruDb.Close()\n}\n<commit_msg>disk_block_cache: Implemented Get, Put, and Delete methods<commit_after>package libkbfs\n\nimport (\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/keybase\/kbfs\/kbfsblock\"\n\t\"github.com\/keybase\/kbfs\/kbfscrypto\"\n\t\"github.com\/keybase\/kbfs\/tlf\"\n\t\"github.com\/syndtr\/goleveldb\/leveldb\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\tevictionConsiderationFactor uint = 3\n\tblockDbFilename string = \"diskCacheBlocks.leveldb\"\n\tlruDbFilename string = \"diskCacheLRU.leveldb\"\n)\n\ntype diskBlockCacheEntry struct {\n\tbuf []byte\n\tserverHalf kbfscrypto.BlockCryptKeyServerHalf\n}\n\ntype diskBlockCacheConfig interface {\n\tcodecGetter\n}\n\n\/\/ DiskBlockCacheStandard is the standard implementation for DiskBlockCache.\ntype DiskBlockCacheStandard struct {\n\tconfig diskBlockCacheConfig\n\tmaxBytes uint64\n\t\/\/ protects everything below\n\tlock sync.RWMutex\n\tisClosed bool\n\tblockDb *leveldb.DB\n\tlruDb *leveldb.DB\n}\n\nvar _ DiskBlockCache = (*DiskBlockCacheStandard)(nil)\n\nfunc newDiskBlockCacheStandard(config diskBlockCacheConfig, dirPath string,\n\tmaxBytes uint64) (*DiskBlockCacheStandard, error) {\n\tblockDbPath := filepath.Join(dirPath, blockDbFilename)\n\tblockDb, err := leveldb.OpenFile(blockDbPath, leveldbOptions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlruDbPath := filepath.Join(dirPath, lruDbFilename)\n\tlruDb, err := leveldb.OpenFile(lruDbPath, leveldbOptions)\n\tif err != nil {\n\t\tblockDb.Close()\n\t\treturn nil, err\n\t}\n\treturn &DiskBlockCacheStandard{\n\t\tconfig: config,\n\t\tmaxBytes: maxBytes,\n\t\tblockDb: blockDb,\n\t\tlruDb: lruDb,\n\t}, nil\n}\n\nfunc (cache *DiskBlockCacheStandard) updateLruLocked(tlfID tlf.ID,\n\tblockBytes []byte) error {\n\tkey := append(tlfID.Bytes(), blockBytes...)\n\tval, err := time.Now().MarshalBinary()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcache.lruDb.Put(key, val, nil)\n\treturn nil\n}\n\nfunc (cache *DiskBlockCacheStandard) decodeBlockCacheEntry(buf []byte) ([]byte,\n\tkbfscrypto.BlockCryptKeyServerHalf, error) {\n\tentry := diskBlockCacheEntry{}\n\terr := cache.config.Codec().Decode(buf, &entry)\n\tif err != nil {\n\t\treturn nil, kbfscrypto.BlockCryptKeyServerHalf{}, err\n\t}\n\treturn entry.buf, entry.serverHalf, nil\n}\n\nfunc (cache *DiskBlockCacheStandard) encodeBlockCacheEntry(buf []byte,\n\tserverHalf kbfscrypto.BlockCryptKeyServerHalf) ([]byte, error) {\n\tentry := diskBlockCacheEntry{\n\t\tbuf: buf,\n\t\tserverHalf: serverHalf,\n\t}\n\treturn cache.config.Codec().Encode(&entry)\n}\n\n\/\/ Get implements the DiskBlockCache interface for DiskBlockCacheStandard.\nfunc (cache *DiskBlockCacheStandard) Get(ctx context.Context, tlfID tlf.ID,\n\tblockID kbfsblock.ID) ([]byte, kbfscrypto.BlockCryptKeyServerHalf, error) {\n\tcache.lock.RLock()\n\tdefer cache.lock.RUnlock()\n\tif cache.isClosed {\n\t\treturn nil, kbfscrypto.BlockCryptKeyServerHalf{},\n\t\t\tDiskCacheClosedError{\"Get\"}\n\t}\n\tvar entry []byte\n\terr := runUnlessCanceled(ctx, func() error {\n\t\tblockBytes := blockID.Bytes()\n\t\tbuf, err := cache.blockDb.Get(blockBytes, nil)\n\t\tif err != nil {\n\t\t\treturn NoSuchBlockError{blockID}\n\t\t}\n\t\terr = cache.updateLruLocked(tlfID, blockBytes)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tentry = buf\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, kbfscrypto.BlockCryptKeyServerHalf{}, err\n\t}\n\tif entry == nil {\n\t\treturn nil, kbfscrypto.BlockCryptKeyServerHalf{}, NoSuchBlockError{blockID}\n\t}\n\treturn cache.decodeBlockCacheEntry(entry)\n}\n\n\/\/ Put implements the DiskBlockCache interface for DiskBlockCacheStandard.\nfunc (cache *DiskBlockCacheStandard) Put(ctx context.Context, tlfID tlf.ID,\n\tblockID kbfsblock.ID, buf []byte,\n\tserverHalf kbfscrypto.BlockCryptKeyServerHalf) error {\n\tcache.lock.RLock()\n\tdefer cache.lock.RUnlock()\n\tif cache.isClosed {\n\t\treturn DiskCacheClosedError{\"Put\"}\n\t}\n\tentry, err := cache.encodeBlockCacheEntry(buf, serverHalf)\n\tblockBytes := blockID.Bytes()\n\terr = cache.blockDb.Put(blockBytes, entry, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn cache.updateLruLocked(tlfID, blockBytes)\n}\n\n\/\/ Delete implements the DiskBlockCache interface for DiskBlockCacheStandard.\nfunc (cache *DiskBlockCacheStandard) Delete(ctx context.Context, tlfID tlf.ID,\n\tblockID kbfsblock.ID) error {\n\tcache.lock.RLock()\n\tdefer cache.lock.RUnlock()\n\tif cache.isClosed {\n\t\treturn DiskCacheClosedError{\"Delete\"}\n\t}\n\tblockBytes := blockID.Bytes()\n\terr := cache.blockDb.Delete(blockBytes, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlruKey := append(tlfID.Bytes(), blockBytes...)\n\treturn cache.lruDb.Delete(lruKey, nil)\n}\n\n\/\/ Evict implements the DiskBlockCache interface for DiskBlockCacheStandard.\nfunc (cache *DiskBlockCacheStandard) Evict(ctx context.Context, tlfID tlf.ID,\n\tnumBlocks int) error {\n\tcache.lock.RLock()\n\tdefer cache.lock.RUnlock()\n\tif cache.isClosed {\n\t\treturn DiskCacheClosedError{\"Evict\"}\n\t}\n\t\/\/ Use kbfscrypto.MakeTemporaryID() to create a random hash ID. Then begin\n\t\/\/ an interator into cache.lruDb.Range(b, nil) and iterate from there to\n\t\/\/ get numBlocks * evictionConsiderationFactor block IDs. We sort the\n\t\/\/ resulting blocks by value (LRU time) and pick the minimum numBlocks. We\n\t\/\/ put those block IDs into a leveldb.Batch for cache.blockDb via\n\t\/\/ Batch.Delete(), then Write() that batch.\n\t\/\/ NOTE: It is important that we store LRU times using a monotonic clock\n\t\/\/ for this device. Use runtime.nanotime() for now.\n\treturn nil\n}\n\n\/\/ Shutdown implements the DiskBlockCache interface for DiskBlockCacheStandard.\nfunc (cache *DiskBlockCacheStandard) Shutdown() {\n\tcache.lock.Lock()\n\tdefer cache.lock.Unlock()\n\tif cache.isClosed {\n\t\treturn\n\t}\n\tcache.isClosed = true\n\tcache.blockDb.Close()\n\tcache.lruDb.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package mock\n\nimport (\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/blang\/semver\"\n\t\"github.com\/justwatchcom\/gopass\/backend\/gpg\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ Mocker is a no-op GPG mock\ntype Mocker struct{}\n\n\/\/ New creates a new GPG mock\nfunc New() *Mocker {\n\treturn &Mocker{}\n}\n\n\/\/ ListPublicKeys does nothing\nfunc (m *Mocker) ListPublicKeys(context.Context) (gpg.KeyList, error) {\n\treturn gpg.KeyList{}, nil\n}\n\n\/\/ FindPublicKeys does nothing\nfunc (m *Mocker) FindPublicKeys(context.Context, ...string) (gpg.KeyList, error) {\n\treturn gpg.KeyList{}, nil\n}\n\n\/\/ ListPrivateKeys does nothing\nfunc (m *Mocker) ListPrivateKeys(context.Context) (gpg.KeyList, error) {\n\treturn gpg.KeyList{}, nil\n}\n\n\/\/ FindPrivateKeys does nothing\nfunc (m *Mocker) FindPrivateKeys(context.Context, ...string) (gpg.KeyList, error) {\n\treturn gpg.KeyList{}, nil\n}\n\n\/\/ GetRecipients does nothing\nfunc (m *Mocker) GetRecipients(context.Context, string) ([]string, error) {\n\treturn []string{}, nil\n}\n\n\/\/ Encrypt writes the input to disk unaltered\nfunc (m *Mocker) Encrypt(ctx context.Context, path string, content []byte, recipients []string) error {\n\tif err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to create dir '%s'\", path)\n\t}\n\treturn ioutil.WriteFile(path, content, 0600)\n}\n\n\/\/ Decrypt read the file from disk unaltered\nfunc (m *Mocker) Decrypt(ctx context.Context, path string) ([]byte, error) {\n\treturn ioutil.ReadFile(path)\n}\n\n\/\/ ExportPublicKey does nothing\nfunc (m *Mocker) ExportPublicKey(context.Context, string, string) error {\n\treturn nil\n}\n\n\/\/ ImportPublicKey does nothing\nfunc (m *Mocker) ImportPublicKey(context.Context, string) error {\n\treturn nil\n}\n\n\/\/ Version returns dummy version info\nfunc (m *Mocker) Version(context.Context) semver.Version {\n\treturn semver.Version{}\n}\n<commit_msg>Fix tests<commit_after>package mock\n\nimport (\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/blang\/semver\"\n\t\"github.com\/justwatchcom\/gopass\/backend\/gpg\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ Mocker is a no-op GPG mock\ntype Mocker struct{}\n\n\/\/ New creates a new GPG mock\nfunc New() *Mocker {\n\treturn &Mocker{}\n}\n\n\/\/ ListPublicKeys does nothing\nfunc (m *Mocker) ListPublicKeys(context.Context) (gpg.KeyList, error) {\n\treturn gpg.KeyList{}, nil\n}\n\n\/\/ FindPublicKeys does nothing\nfunc (m *Mocker) FindPublicKeys(context.Context, ...string) (gpg.KeyList, error) {\n\treturn gpg.KeyList{}, nil\n}\n\n\/\/ ListPrivateKeys does nothing\nfunc (m *Mocker) ListPrivateKeys(context.Context) (gpg.KeyList, error) {\n\treturn gpg.KeyList{}, nil\n}\n\n\/\/ FindPrivateKeys does nothing\nfunc (m *Mocker) FindPrivateKeys(context.Context, ...string) (gpg.KeyList, error) {\n\treturn gpg.KeyList{}, nil\n}\n\n\/\/ GetRecipients does nothing\nfunc (m *Mocker) GetRecipients(context.Context, string) ([]string, error) {\n\treturn []string{}, nil\n}\n\n\/\/ Encrypt writes the input to disk unaltered\nfunc (m *Mocker) Encrypt(ctx context.Context, path string, content []byte, recipients []string) error {\n\tif err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {\n\t\treturn errors.Wrapf(err, \"failed to create dir '%s'\", path)\n\t}\n\treturn ioutil.WriteFile(path, content, 0600)\n}\n\n\/\/ Decrypt read the file from disk unaltered\nfunc (m *Mocker) Decrypt(ctx context.Context, path string) ([]byte, error) {\n\treturn ioutil.ReadFile(path)\n}\n\n\/\/ ExportPublicKey does nothing\nfunc (m *Mocker) ExportPublicKey(context.Context, string, string) error {\n\treturn nil\n}\n\n\/\/ ImportPublicKey does nothing\nfunc (m *Mocker) ImportPublicKey(context.Context, string) error {\n\treturn nil\n}\n\n\/\/ Version returns dummy version info\nfunc (m *Mocker) Version(context.Context) semver.Version {\n\treturn semver.Version{}\n}\n\n\/\/ Binary always returns 'gpg'\nfunc (m *Mocker) Binary() string {\n\treturn \"gpg\"\n}\n<|endoftext|>"} {"text":"<commit_before>package java\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\t\"github.com\/anchore\/imgbom\/imgbom\/pkg\"\n\t\"github.com\/mitchellh\/mapstructure\"\n)\n\nconst pomPropertiesGlob = \"*pom.properties\"\n\nfunc parsePomProperties(path string, reader io.Reader) (*pkg.PomProperties, error) {\n\tvar props pkg.PomProperties\n\tpropMap := make(map[string]string)\n\tscanner := bufio.NewScanner(reader)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\n\t\t\/\/ ignore empty lines and comments\n\t\tif strings.TrimSpace(line) == \"\" || strings.HasPrefix(strings.TrimLeft(line, \" \"), \"#\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tidx := strings.Index(line, \"=\")\n\t\tif idx == -1 {\n\t\t\treturn nil, fmt.Errorf(\"unable to split pom.properties key-value pairs: %q\", line)\n\t\t}\n\n\t\tkey := strings.TrimSpace(line[0:idx])\n\t\tvalue := strings.TrimSpace(line[idx+1:])\n\t\tpropMap[key] = value\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, fmt.Errorf(\"unable read pom.properties: %w\", err)\n\t}\n\n\tif err := mapstructure.Decode(propMap, &props); err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to parse pom.properties: %w\", err)\n\t}\n\n\tprops.Path = path\n\n\treturn &props, nil\n}\n<commit_msg>Update imgbom\/cataloger\/java\/pom_properties.go<commit_after>package java\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\t\"github.com\/anchore\/imgbom\/imgbom\/pkg\"\n\t\"github.com\/mitchellh\/mapstructure\"\n)\n\nconst pomPropertiesGlob = \"*pom.properties\"\n\nfunc parsePomProperties(path string, reader io.Reader) (*pkg.PomProperties, error) {\n\tvar props pkg.PomProperties\n\tpropMap := make(map[string]string)\n\tscanner := bufio.NewScanner(reader)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\n\t\t\/\/ ignore empty lines and comments\n\t\tif strings.TrimSpace(line) == \"\" || strings.HasPrefix(strings.TrimLeft(line, \" \"), \"#\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tidx := strings.Index(line, \"=\")\n\t\tif idx == -1 {\n\t\t\treturn nil, fmt.Errorf(\"unable to split pom.properties key-value pairs: %q\", line)\n\t\t}\n\n\t\tkey := strings.TrimSpace(line[0:idx])\n\t\tvalue := strings.TrimSpace(line[idx+1:])\n\t\tpropMap[key] = value\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to read pom.properties: %w\", err)\n\t}\n\n\tif err := mapstructure.Decode(propMap, &props); err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to parse pom.properties: %w\", err)\n\t}\n\n\tprops.Path = path\n\n\treturn &props, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package backends\n\nimport (\n\t\"fmt\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/yunify\/metadata-proxy\/log\"\n\t\"github.com\/yunify\/metadata-proxy\/store\"\n\t\"math\/rand\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestClientSync(t *testing.T) {\n\tlog.SetLevel(\"debug\")\n\tbackendNodes := []string{\"etcd\"}\n\tprefix := fmt.Sprintf(\"\/prefix%v\", rand.Intn(1000))\n\n\tstopChan := make(chan bool)\n\tdefer func() {\n\t\tstopChan <- true\n\t}()\n\tfor _, backend := range backendNodes {\n\n\t\tnodes := GetDefaultBackends(backend)\n\n\t\tconfig := Config{\n\t\t\tBackend: backend,\n\t\t\tBackendNodes: nodes,\n\t\t\tPrefix: prefix,\n\t\t}\n\t\tstoreClient, err := New(config)\n\t\tassert.Nil(t, err)\n\n\t\tstoreClient.Delete(\"\/\")\n\t\t\/\/assert.Nil(t, err)\n\n\t\tmetastore := store.New()\n\t\tstoreClient.Sync(metastore, stopChan)\n\n\t\ttestData := FillTestData(storeClient)\n\t\ttime.Sleep(1000 * time.Millisecond)\n\t\tValidTestData(t, testData, metastore)\n\n\t\tRandomUpdate(testData, storeClient, 10)\n\t\ttime.Sleep(1000 * time.Millisecond)\n\t\tValidTestData(t, testData, metastore)\n\n\t\tdeletedKey := RandomDelete(testData, storeClient)\n\t\ttime.Sleep(1000 * time.Millisecond)\n\t\tValidTestData(t, testData, metastore)\n\n\t\tval, ok := metastore.Get(deletedKey)\n\t\tassert.False(t, ok)\n\t\tassert.Nil(t, val)\n\n\t\tstoreClient.Delete(\"\/\")\n\t}\n}\n\nfunc TestSelfMapping(t *testing.T) {\n\tlog.SetLevel(\"debug\")\n\n\tbackendNodes := []string{\"etcd\"}\n\tprefix := fmt.Sprintf(\"\/prefix%v\", rand.Intn(1000))\n\n\tstopChan := make(chan bool)\n\tdefer func() {\n\t\tstopChan <- true\n\t}()\n\tfor _, backend := range backendNodes {\n\n\t\tnodes := GetDefaultBackends(backend)\n\n\t\tconfig := Config{\n\t\t\tBackend: backend,\n\t\t\tBackendNodes: nodes,\n\t\t\tPrefix: prefix,\n\t\t}\n\t\tstoreClient, err := New(config)\n\t\tassert.Nil(t, err)\n\n\t\tmetastore := store.New()\n\t\tstoreClient.SyncSelfMapping(metastore, stopChan)\n\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tip := fmt.Sprintf(\"192.168.1.%v\", i)\n\t\t\tmapping := map[string]string{\n\t\t\t\t\"instance\": fmt.Sprintf(\"\/instances\/%v\", i),\n\t\t\t}\n\t\t\tstoreClient.RegisterSelfMapping(ip, mapping)\n\t\t}\n\t\ttime.Sleep(1000 * time.Millisecond)\n\t\tmeta, _ := metastore.Get(\"\/\")\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tip := fmt.Sprintf(\"192.168.1.%v\", i)\n\t\t\tval, ok := metastore.Get(ip)\n\t\t\tassert.True(t, ok)\n\t\t\tmapVal, mok := val.(map[string]interface{})\n\t\t\tassert.True(t, mok)\n\t\t\tpath := mapVal[\"instance\"]\n\t\t\tassert.Equal(t, path, fmt.Sprintf(\"\/instances\/%v\", i))\n\t\t}\n\t}\n}\n\nfunc FillTestData(storeClient StoreClient) map[string]string {\n\ttestData := make(map[string]string)\n\tfor i := 0; i < 10; i++ {\n\t\tfor j := 0; j < 10; j++ {\n\t\t\tkey := fmt.Sprintf(\"\/%v\/%v\", i, j)\n\t\t\tval := fmt.Sprintf(\"%v-%v\", i, j)\n\t\t\ttestData[key] = val\n\t\t}\n\t}\n\terr := storeClient.SetValues(testData)\n\tif err != nil {\n\t\tlog.Error(\"SetValues error\", err.Error())\n\t}\n\treturn testData\n}\n\nfunc RandomUpdate(testData map[string]string, storeClient StoreClient, times int) {\n\tlength := len(testData)\n\tkeys := make([]string, 0, length)\n\tfor k := range testData {\n\t\tkeys = append(keys, k)\n\t}\n\tfor i := 0; i < times; i++ {\n\t\tidx := rand.Intn(length)\n\t\tkey := keys[idx]\n\t\tval := testData[key]\n\t\tnewVal := fmt.Sprintf(\"%s-%v\", val, 0)\n\n\t\tstoreClient.SetValues(map[string]string{key: newVal})\n\t\ttestData[key] = newVal\n\t}\n}\n\nfunc RandomDelete(testData map[string]string, storeClient StoreClient) string {\n\tlength := len(testData)\n\tkeys := make([]string, 0, length)\n\tfor k := range testData {\n\t\tkeys = append(keys, k)\n\t}\n\tidx := rand.Intn(length)\n\tkey := keys[idx]\n\tstoreClient.Delete(key)\n\tdelete(testData, key)\n\treturn key\n}\n\nfunc ValidTestData(t *testing.T, testData map[string]string, metastore store.Store) {\n\tfor k, v := range testData {\n\t\tstoreVal, _ := metastore.Get(k)\n\t\tassert.Equal(t, v, storeVal)\n\t}\n}\n<commit_msg>clear unused var.<commit_after>package backends\n\nimport (\n\t\"fmt\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/yunify\/metadata-proxy\/log\"\n\t\"github.com\/yunify\/metadata-proxy\/store\"\n\t\"math\/rand\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestClientSync(t *testing.T) {\n\tlog.SetLevel(\"debug\")\n\tbackendNodes := []string{\"etcd\"}\n\tprefix := fmt.Sprintf(\"\/prefix%v\", rand.Intn(1000))\n\n\tstopChan := make(chan bool)\n\tdefer func() {\n\t\tstopChan <- true\n\t}()\n\tfor _, backend := range backendNodes {\n\n\t\tnodes := GetDefaultBackends(backend)\n\n\t\tconfig := Config{\n\t\t\tBackend: backend,\n\t\t\tBackendNodes: nodes,\n\t\t\tPrefix: prefix,\n\t\t}\n\t\tstoreClient, err := New(config)\n\t\tassert.Nil(t, err)\n\n\t\tstoreClient.Delete(\"\/\")\n\t\t\/\/assert.Nil(t, err)\n\n\t\tmetastore := store.New()\n\t\tstoreClient.Sync(metastore, stopChan)\n\n\t\ttestData := FillTestData(storeClient)\n\t\ttime.Sleep(1000 * time.Millisecond)\n\t\tValidTestData(t, testData, metastore)\n\n\t\tRandomUpdate(testData, storeClient, 10)\n\t\ttime.Sleep(1000 * time.Millisecond)\n\t\tValidTestData(t, testData, metastore)\n\n\t\tdeletedKey := RandomDelete(testData, storeClient)\n\t\ttime.Sleep(1000 * time.Millisecond)\n\t\tValidTestData(t, testData, metastore)\n\n\t\tval, ok := metastore.Get(deletedKey)\n\t\tassert.False(t, ok)\n\t\tassert.Nil(t, val)\n\n\t\tstoreClient.Delete(\"\/\")\n\t}\n}\n\nfunc TestSelfMapping(t *testing.T) {\n\tlog.SetLevel(\"debug\")\n\n\tbackendNodes := []string{\"etcd\"}\n\tprefix := fmt.Sprintf(\"\/prefix%v\", rand.Intn(1000))\n\n\tstopChan := make(chan bool)\n\tdefer func() {\n\t\tstopChan <- true\n\t}()\n\tfor _, backend := range backendNodes {\n\n\t\tnodes := GetDefaultBackends(backend)\n\n\t\tconfig := Config{\n\t\t\tBackend: backend,\n\t\t\tBackendNodes: nodes,\n\t\t\tPrefix: prefix,\n\t\t}\n\t\tstoreClient, err := New(config)\n\t\tassert.Nil(t, err)\n\n\t\tmetastore := store.New()\n\t\tstoreClient.SyncSelfMapping(metastore, stopChan)\n\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tip := fmt.Sprintf(\"192.168.1.%v\", i)\n\t\t\tmapping := map[string]string{\n\t\t\t\t\"instance\": fmt.Sprintf(\"\/instances\/%v\", i),\n\t\t\t}\n\t\t\tstoreClient.RegisterSelfMapping(ip, mapping)\n\t\t}\n\t\ttime.Sleep(1000 * time.Millisecond)\n\t\tfor i := 0; i < 10; i++ {\n\t\t\tip := fmt.Sprintf(\"192.168.1.%v\", i)\n\t\t\tval, ok := metastore.Get(ip)\n\t\t\tassert.True(t, ok)\n\t\t\tmapVal, mok := val.(map[string]interface{})\n\t\t\tassert.True(t, mok)\n\t\t\tpath := mapVal[\"instance\"]\n\t\t\tassert.Equal(t, path, fmt.Sprintf(\"\/instances\/%v\", i))\n\t\t}\n\t}\n}\n\nfunc FillTestData(storeClient StoreClient) map[string]string {\n\ttestData := make(map[string]string)\n\tfor i := 0; i < 10; i++ {\n\t\tfor j := 0; j < 10; j++ {\n\t\t\tkey := fmt.Sprintf(\"\/%v\/%v\", i, j)\n\t\t\tval := fmt.Sprintf(\"%v-%v\", i, j)\n\t\t\ttestData[key] = val\n\t\t}\n\t}\n\terr := storeClient.SetValues(testData)\n\tif err != nil {\n\t\tlog.Error(\"SetValues error\", err.Error())\n\t}\n\treturn testData\n}\n\nfunc RandomUpdate(testData map[string]string, storeClient StoreClient, times int) {\n\tlength := len(testData)\n\tkeys := make([]string, 0, length)\n\tfor k := range testData {\n\t\tkeys = append(keys, k)\n\t}\n\tfor i := 0; i < times; i++ {\n\t\tidx := rand.Intn(length)\n\t\tkey := keys[idx]\n\t\tval := testData[key]\n\t\tnewVal := fmt.Sprintf(\"%s-%v\", val, 0)\n\n\t\tstoreClient.SetValues(map[string]string{key: newVal})\n\t\ttestData[key] = newVal\n\t}\n}\n\nfunc RandomDelete(testData map[string]string, storeClient StoreClient) string {\n\tlength := len(testData)\n\tkeys := make([]string, 0, length)\n\tfor k := range testData {\n\t\tkeys = append(keys, k)\n\t}\n\tidx := rand.Intn(length)\n\tkey := keys[idx]\n\tstoreClient.Delete(key)\n\tdelete(testData, key)\n\treturn key\n}\n\nfunc ValidTestData(t *testing.T, testData map[string]string, metastore store.Store) {\n\tfor k, v := range testData {\n\t\tstoreVal, _ := metastore.Get(k)\n\t\tassert.Equal(t, v, storeVal)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\tcmds \"github.com\/ipfs\/go-ipfs\/commands\"\n\tcorerepo \"github.com\/ipfs\/go-ipfs\/core\/corerepo\"\n\tfsrepo \"github.com\/ipfs\/go-ipfs\/repo\/fsrepo\"\n\tu \"gx\/ipfs\/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1\/go-ipfs-util\"\n)\n\ntype RepoStat struct {\n\trepoPath string\n\trepoSize uint64 \/\/ size in bytes\n\tnumBlocks uint64\n}\n\nvar RepoCmd = &cmds.Command{\n\tHelptext: cmds.HelpText{\n\t\tTagline: \"Manipulate the IPFS repo.\",\n\t\tShortDescription: `\n'ipfs repo' is a plumbing command used to manipulate the repo.\n`,\n\t},\n\n\tSubcommands: map[string]*cmds.Command{\n\t\t\"gc\": repoGcCmd,\n\t\t\"stat\": repoStatCmd,\n\t},\n}\n\nvar repoGcCmd = &cmds.Command{\n\tHelptext: cmds.HelpText{\n\t\tTagline: \"Perform a garbage collection sweep on the repo.\",\n\t\tShortDescription: `\n'ipfs repo gc' is a plumbing command that will sweep the local\nset of stored objects and remove ones that are not pinned in\norder to reclaim hard disk space.\n`,\n\t},\n\n\tOptions: []cmds.Option{\n\t\tcmds.BoolOption(\"quiet\", \"q\", \"Write minimal output.\"),\n\t},\n\tRun: func(req cmds.Request, res cmds.Response) {\n\t\tn, err := req.InvocContext().GetNode()\n\t\tif err != nil {\n\t\t\tres.SetError(err, cmds.ErrNormal)\n\t\t\treturn\n\t\t}\n\n\t\tgcOutChan, err := corerepo.GarbageCollectAsync(n, req.Context())\n\t\tif err != nil {\n\t\t\tres.SetError(err, cmds.ErrNormal)\n\t\t\treturn\n\t\t}\n\n\t\toutChan := make(chan interface{})\n\t\tres.SetOutput((<-chan interface{})(outChan))\n\n\t\tgo func() {\n\t\t\tdefer close(outChan)\n\t\t\tfor k := range gcOutChan {\n\t\t\t\toutChan <- k\n\t\t\t}\n\t\t}()\n\t},\n\tType: corerepo.KeyRemoved{},\n\tMarshalers: cmds.MarshalerMap{\n\t\tcmds.Text: func(res cmds.Response) (io.Reader, error) {\n\t\t\toutChan, ok := res.Output().(<-chan interface{})\n\t\t\tif !ok {\n\t\t\t\treturn nil, u.ErrCast()\n\t\t\t}\n\n\t\t\tquiet, _, err := res.Request().Option(\"quiet\").Bool()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tmarshal := func(v interface{}) (io.Reader, error) {\n\t\t\t\tobj, ok := v.(*corerepo.KeyRemoved)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, u.ErrCast()\n\t\t\t\t}\n\n\t\t\t\tbuf := new(bytes.Buffer)\n\t\t\t\tif quiet {\n\t\t\t\t\tbuf = bytes.NewBufferString(string(obj.Key) + \"\\n\")\n\t\t\t\t} else {\n\t\t\t\t\tbuf = bytes.NewBufferString(fmt.Sprintf(\"removed %s\\n\", obj.Key))\n\t\t\t\t}\n\t\t\t\treturn buf, nil\n\t\t\t}\n\n\t\t\treturn &cmds.ChannelMarshaler{\n\t\t\t\tChannel: outChan,\n\t\t\t\tMarshaler: marshal,\n\t\t\t\tRes: res,\n\t\t\t}, nil\n\t\t},\n\t},\n}\n\nvar repoStatCmd = &cmds.Command{\n\tHelptext: cmds.HelpText{\n\t\tTagline: \"Print status of the local repo.\",\n\t\tShortDescription: ``,\n\t},\n\tRun: func(req cmds.Request, res cmds.Response) {\n\t\tctx := req.Context()\n\t\tn, err := req.InvocContext().GetNode()\n\t\tif err != nil {\n\t\t\tres.SetError(err, cmds.ErrNormal)\n\t\t\treturn\n\t\t}\n\n\t\tusage, err := n.Repo.GetStorageUsage()\n\t\tif err != nil {\n\t\t\tres.SetError(err, cmds.ErrNormal)\n\t\t\treturn\n\t\t}\n\n\t\tallKeys, err := n.Blockstore.AllKeysChan(ctx)\n\t\tif err != nil {\n\t\t\tres.SetError(err, cmds.ErrNormal)\n\t\t\treturn\n\t\t}\n\n\t\tcount := uint64(0)\n\t\tfor range allKeys {\n\t\t\tcount++\n\t\t}\n\n\t\tpath, err := fsrepo.BestKnownPath()\n\t\tif err != nil {\n\t\t\tres.SetError(err, cmds.ErrNormal)\n\t\t\treturn\n\t\t}\n\n\t\tout := &RepoStat{\n\t\t\trepoPath: path,\n\t\t\trepoSize: usage,\n\t\t\tnumBlocks: count,\n\t\t}\n\t\tres.SetOutput(out)\n\t},\n\tMarshalers: cmds.MarshalerMap{\n\t\tcmds.Text: func(res cmds.Response) (io.Reader, error) {\n\t\t\tstat, ok := res.Output().(*RepoStat)\n\t\t\tif !ok {\n\t\t\t\treturn nil, u.ErrCast()\n\t\t\t}\n\n\t\t\tout := fmt.Sprintf(\"Path: %s\\nSize: %d bytes\\n\"+\n\t\t\t\t\"Blocks: %d\\n\",\n\t\t\t\tstat.repoPath, stat.repoSize, stat.numBlocks)\n\t\t\treturn strings.NewReader(out), nil\n\t\t},\n\t},\n}\n<commit_msg>`repo stat`: add Type<commit_after>package commands\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\tcmds \"github.com\/ipfs\/go-ipfs\/commands\"\n\tcorerepo \"github.com\/ipfs\/go-ipfs\/core\/corerepo\"\n\tfsrepo \"github.com\/ipfs\/go-ipfs\/repo\/fsrepo\"\n\tu \"gx\/ipfs\/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1\/go-ipfs-util\"\n)\n\ntype RepoStat struct {\n\trepoPath string\n\trepoSize uint64 \/\/ size in bytes\n\tnumBlocks uint64\n}\n\nvar RepoCmd = &cmds.Command{\n\tHelptext: cmds.HelpText{\n\t\tTagline: \"Manipulate the IPFS repo.\",\n\t\tShortDescription: `\n'ipfs repo' is a plumbing command used to manipulate the repo.\n`,\n\t},\n\n\tSubcommands: map[string]*cmds.Command{\n\t\t\"gc\": repoGcCmd,\n\t\t\"stat\": repoStatCmd,\n\t},\n}\n\nvar repoGcCmd = &cmds.Command{\n\tHelptext: cmds.HelpText{\n\t\tTagline: \"Perform a garbage collection sweep on the repo.\",\n\t\tShortDescription: `\n'ipfs repo gc' is a plumbing command that will sweep the local\nset of stored objects and remove ones that are not pinned in\norder to reclaim hard disk space.\n`,\n\t},\n\n\tOptions: []cmds.Option{\n\t\tcmds.BoolOption(\"quiet\", \"q\", \"Write minimal output.\"),\n\t},\n\tRun: func(req cmds.Request, res cmds.Response) {\n\t\tn, err := req.InvocContext().GetNode()\n\t\tif err != nil {\n\t\t\tres.SetError(err, cmds.ErrNormal)\n\t\t\treturn\n\t\t}\n\n\t\tgcOutChan, err := corerepo.GarbageCollectAsync(n, req.Context())\n\t\tif err != nil {\n\t\t\tres.SetError(err, cmds.ErrNormal)\n\t\t\treturn\n\t\t}\n\n\t\toutChan := make(chan interface{})\n\t\tres.SetOutput((<-chan interface{})(outChan))\n\n\t\tgo func() {\n\t\t\tdefer close(outChan)\n\t\t\tfor k := range gcOutChan {\n\t\t\t\toutChan <- k\n\t\t\t}\n\t\t}()\n\t},\n\tType: corerepo.KeyRemoved{},\n\tMarshalers: cmds.MarshalerMap{\n\t\tcmds.Text: func(res cmds.Response) (io.Reader, error) {\n\t\t\toutChan, ok := res.Output().(<-chan interface{})\n\t\t\tif !ok {\n\t\t\t\treturn nil, u.ErrCast()\n\t\t\t}\n\n\t\t\tquiet, _, err := res.Request().Option(\"quiet\").Bool()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tmarshal := func(v interface{}) (io.Reader, error) {\n\t\t\t\tobj, ok := v.(*corerepo.KeyRemoved)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, u.ErrCast()\n\t\t\t\t}\n\n\t\t\t\tbuf := new(bytes.Buffer)\n\t\t\t\tif quiet {\n\t\t\t\t\tbuf = bytes.NewBufferString(string(obj.Key) + \"\\n\")\n\t\t\t\t} else {\n\t\t\t\t\tbuf = bytes.NewBufferString(fmt.Sprintf(\"removed %s\\n\", obj.Key))\n\t\t\t\t}\n\t\t\t\treturn buf, nil\n\t\t\t}\n\n\t\t\treturn &cmds.ChannelMarshaler{\n\t\t\t\tChannel: outChan,\n\t\t\t\tMarshaler: marshal,\n\t\t\t\tRes: res,\n\t\t\t}, nil\n\t\t},\n\t},\n}\n\nvar repoStatCmd = &cmds.Command{\n\tHelptext: cmds.HelpText{\n\t\tTagline: \"Print status of the local repo.\",\n\t\tShortDescription: ``,\n\t},\n\tRun: func(req cmds.Request, res cmds.Response) {\n\t\tctx := req.Context()\n\t\tn, err := req.InvocContext().GetNode()\n\t\tif err != nil {\n\t\t\tres.SetError(err, cmds.ErrNormal)\n\t\t\treturn\n\t\t}\n\n\t\tusage, err := n.Repo.GetStorageUsage()\n\t\tif err != nil {\n\t\t\tres.SetError(err, cmds.ErrNormal)\n\t\t\treturn\n\t\t}\n\n\t\tallKeys, err := n.Blockstore.AllKeysChan(ctx)\n\t\tif err != nil {\n\t\t\tres.SetError(err, cmds.ErrNormal)\n\t\t\treturn\n\t\t}\n\n\t\tcount := uint64(0)\n\t\tfor range allKeys {\n\t\t\tcount++\n\t\t}\n\n\t\tpath, err := fsrepo.BestKnownPath()\n\t\tif err != nil {\n\t\t\tres.SetError(err, cmds.ErrNormal)\n\t\t\treturn\n\t\t}\n\n\t\tres.SetOutput(&RepoStat{\n\t\t\trepoPath: path,\n\t\t\trepoSize: usage,\n\t\t\tnumBlocks: count,\n\t\t})\n\t},\n\tType: RepoStat{},\n\tMarshalers: cmds.MarshalerMap{\n\t\tcmds.Text: func(res cmds.Response) (io.Reader, error) {\n\t\t\tstat, ok := res.Output().(*RepoStat)\n\t\t\tif !ok {\n\t\t\t\treturn nil, u.ErrCast()\n\t\t\t}\n\n\t\t\tout := fmt.Sprintf(\n\t\t\t\t\"Path: %s\\nSize: %d bytes\\nBlocks: %d\\n\",\n\t\t\t\tstat.repoPath, stat.repoSize, stat.numBlocks)\n\n\t\t\treturn strings.NewReader(out), nil\n\t\t},\n\t},\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 - 2014 Alex Palaistras. All rights reserved.\n\/\/ Use of this source code is governed by the MIT License, the\n\/\/ full text of which can be found in the LICENSE file.\n\n\/\/ Package config implements configuration file support for Sleepy.\n\/\/ The files are loaded using Load and parsed using the various parsing\n\/\/ methods.\npackage config\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com\/miguel-branco\/goconfig\"\n)\n\n\/\/ Config represents a parsed configuration file.\ntype Config map[string]map[string]interface{}\n\n\/\/ String returns a value of 'option' in 'section' as a string. If the\n\/\/ value cannot be found, or it cannot be converted into a string, an\n\/\/ empty string is returned along with an error.\nfunc (c *Config) String(section, option string) (string, error) {\n\tif exists, error := c.exists(section, option); !exists {\n\t\treturn \"\", error\n\t}\n\n\t\/\/ Return value as string, after conversion, if necessary.\n\tswitch value := (*c)[section][option].(type) {\n\tcase string:\n\t\treturn value, nil\n\tcase []byte:\n\t\treturn string(value), nil\n\tcase int:\n\t\treturn strconv.Itoa(value), nil\n\tcase bool:\n\t\treturn strconv.FormatBool(value), nil\n\tcase float64:\n\t\treturn strconv.FormatFloat(value, 'g', -1, 64), nil\n\tcase nil:\n\t\treturn \"\", nil\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"Couldn't convert type '%T' to string\", value)\n\t}\n\n\treturn \"\", fmt.Errorf(\"Unknown parse error\")\n}\n\n\/\/ S is like String, but never returns an error.\nfunc (c *Config) S(section, option string) string {\n\tv, _ := c.String(section, option)\n\treturn v\n}\n\n\/\/ Int returns a value of 'option' in 'section' as an integer. It returns\n\/\/ a zero (0) integer along with an error message on failure.\nfunc (c *Config) Int(section, option string) (int64, error) {\n\tif exists, error := c.exists(section, option); !exists {\n\t\treturn 0, error\n\t}\n\n\t\/\/ Return value as int, after conversion, if necessary.\n\tswitch value := (*c)[section][option].(type) {\n\tcase string:\n\t\tnum, error := strconv.ParseInt(value, 10, 64)\n\t\tif error != nil {\n\t\t\treturn 0, fmt.Errorf(\"Couldn't convert string '%s' to int\", value)\n\t\t}\n\n\t\treturn num, nil\n\tcase int:\n\t\treturn int64(value), nil\n\tcase bool:\n\t\tif value {\n\t\t\treturn 1, nil\n\t\t}\n\n\t\treturn 0, nil\n\tcase nil:\n\t\treturn 0, nil\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"Couldn't convert type '%T' to int\", value)\n\t}\n\n\treturn 0, fmt.Errorf(\"Unknown parse error\")\n}\n\n\/\/ I is like Int, but never returns an error.\nfunc (c *Config) I(section, option string) int64 {\n\tv, _ := c.Int(section, option)\n\treturn v\n}\n\n\/\/ Bool returns a value of 'option' in 'section' as an boolean. It returns\n\/\/ 'false' along with an error message on failure.\nfunc (c *Config) Bool(section, option string) (bool, error) {\n\tif exists, error := c.exists(section, option); !exists {\n\t\treturn false, error\n\t}\n\n\t\/\/ Return value as bool, after conversion, if necessary.\n\tswitch value := (*c)[section][option].(type) {\n\tcase string:\n\t\tstatus, error := strconv.ParseBool(value)\n\t\tif error != nil {\n\t\t\treturn false, fmt.Errorf(\"Couldn't convert string '%s' to bool\", value)\n\t\t}\n\n\t\treturn status, nil\n\tcase bool:\n\t\treturn value, nil\n\tcase nil:\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, fmt.Errorf(\"Couldn't convert type '%T' to bool\", value)\n\t}\n\n\treturn false, fmt.Errorf(\"Unknown parse error\")\n}\n\n\/\/ B is like Bool, but never returns an error.\nfunc (c *Config) B(section, option string) bool {\n\tv, _ := c.Bool(section, option)\n\treturn v\n}\n\n\/\/ Float returns a value of 'option' in 'section' as an float64. It returns\n\/\/ 0.0 along with an error message on failure.\nfunc (c *Config) Float(section, option string) (float64, error) {\n\tif exists, error := c.exists(section, option); !exists {\n\t\treturn 0.0, error\n\t}\n\n\t\/\/ Return value as float, after conversion, if necessary.\n\tswitch value := (*c)[section][option].(type) {\n\tcase string:\n\t\tnum, error := strconv.ParseFloat(value, 64)\n\t\tif error != nil {\n\t\t\treturn 0.0, fmt.Errorf(\"Couldn't convert string '%s' to float\", value)\n\t\t}\n\n\t\treturn num, nil\n\tcase float64:\n\t\treturn value, nil\n\tcase nil:\n\t\treturn 0.0, nil\n\tdefault:\n\t\treturn 0.0, fmt.Errorf(\"Couldn't convert type '%T' to float\", value)\n\t}\n\n\treturn 0.0, fmt.Errorf(\"Unknown parse error\")\n}\n\n\/\/ F is like Float, but never returns an error.\nfunc (c *Config) F(section, option string) float64 {\n\tv, _ := c.Float(section, option)\n\treturn v\n}\n\n\/\/ Merge merges two or more configuration files, and returns a new,\n\/\/ combined *Config type. Identical sections and options are overwritten\n\/\/ according to the order of definition.\nfunc Merge(conf ...*Config) (*Config, error) {\n\tc := new(Config)\n\tdata := make(map[string]map[string]interface{})\n\n\tfor _, current := range conf {\n\t\tif current == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor key, value := range *current {\n\t\t\tdata[key] = make(map[string]interface{})\n\t\t\tdata[key] = value\n\t\t}\n\t}\n\n\t*c = data\n\n\treturn c, nil\n}\n\n\/\/ Load reads the configuration file located in 'conf' and returns\n\/\/ a new Config type. If the configuration file cannot be found,\n\/\/ the function returns nil and an error message.\nfunc Load(conf string) (*Config, error) {\n\tvar error error\n\n\tc := new(Config)\n\n\tif c, error = parse(conf); error != nil {\n\t\treturn nil, error\n\t}\n\n\treturn c, nil\n}\n\n\/\/ Checks if 'option' exists under 'section' in the Config file.\nfunc (c *Config) exists(section, option string) (bool, error) {\n\tif c == nil {\n\t\treturn false, fmt.Errorf(\"config is invalid\")\n\t}\n\n\tif _, ok := (*c)[section]; !ok {\n\t\treturn false, fmt.Errorf(\"section '%s' not found\", section)\n\t}\n\n\tif _, ok := (*c)[section][option]; !ok {\n\t\treturn false, fmt.Errorf(\"option '%s' not found\", option)\n\t}\n\n\treturn true, nil\n}\n\n\/\/ Parse parses the configuration file in 'conf' and returns the data\n\/\/ as values mapped to options, mapped to sections.\nfunc parse(conf string) (*map[string]map[string]interface{}, error) {\n\tc, err := goconfig.ReadConfigFile(conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := make(map[string]map[string]interface{})\n\n\tfor _, section := range c.GetSections() {\n\t\tdata[section] = make(map[string]interface{})\n\t\ts, _ := c.GetOptions(section)\n\n\t\tfor _, option := range s {\n\t\t\tv, err := c.GetString(section, option)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tdata[section][option] = v\n\t\t}\n\t}\n\n\treturn &data, nil\n}\n<commit_msg>Fix issues with incorrect assignment to Config type<commit_after>\/\/ Copyright 2012 - 2014 Alex Palaistras. All rights reserved.\n\/\/ Use of this source code is governed by the MIT License, the\n\/\/ full text of which can be found in the LICENSE file.\n\n\/\/ Package config implements configuration file support for Sleepy.\n\/\/ The files are loaded using Load and parsed using the various parsing\n\/\/ methods.\npackage config\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com\/miguel-branco\/goconfig\"\n)\n\n\/\/ Config represents a parsed configuration file.\ntype Config map[string]map[string]interface{}\n\n\/\/ String returns a value of 'option' in 'section' as a string. If the\n\/\/ value cannot be found, or it cannot be converted into a string, an\n\/\/ empty string is returned along with an error.\nfunc (c *Config) String(section, option string) (string, error) {\n\tif exists, error := c.exists(section, option); !exists {\n\t\treturn \"\", error\n\t}\n\n\t\/\/ Return value as string, after conversion, if necessary.\n\tswitch value := (*c)[section][option].(type) {\n\tcase string:\n\t\treturn value, nil\n\tcase []byte:\n\t\treturn string(value), nil\n\tcase int:\n\t\treturn strconv.Itoa(value), nil\n\tcase bool:\n\t\treturn strconv.FormatBool(value), nil\n\tcase float64:\n\t\treturn strconv.FormatFloat(value, 'g', -1, 64), nil\n\tcase nil:\n\t\treturn \"\", nil\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"Couldn't convert type '%T' to string\", value)\n\t}\n\n\treturn \"\", fmt.Errorf(\"Unknown parse error\")\n}\n\n\/\/ S is like String, but never returns an error.\nfunc (c *Config) S(section, option string) string {\n\tv, _ := c.String(section, option)\n\treturn v\n}\n\n\/\/ Int returns a value of 'option' in 'section' as an integer. It returns\n\/\/ a zero (0) integer along with an error message on failure.\nfunc (c *Config) Int(section, option string) (int64, error) {\n\tif exists, error := c.exists(section, option); !exists {\n\t\treturn 0, error\n\t}\n\n\t\/\/ Return value as int, after conversion, if necessary.\n\tswitch value := (*c)[section][option].(type) {\n\tcase string:\n\t\tnum, error := strconv.ParseInt(value, 10, 64)\n\t\tif error != nil {\n\t\t\treturn 0, fmt.Errorf(\"Couldn't convert string '%s' to int\", value)\n\t\t}\n\n\t\treturn num, nil\n\tcase int:\n\t\treturn int64(value), nil\n\tcase bool:\n\t\tif value {\n\t\t\treturn 1, nil\n\t\t}\n\n\t\treturn 0, nil\n\tcase nil:\n\t\treturn 0, nil\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"Couldn't convert type '%T' to int\", value)\n\t}\n\n\treturn 0, fmt.Errorf(\"Unknown parse error\")\n}\n\n\/\/ I is like Int, but never returns an error.\nfunc (c *Config) I(section, option string) int64 {\n\tv, _ := c.Int(section, option)\n\treturn v\n}\n\n\/\/ Bool returns a value of 'option' in 'section' as an boolean. It returns\n\/\/ 'false' along with an error message on failure.\nfunc (c *Config) Bool(section, option string) (bool, error) {\n\tif exists, error := c.exists(section, option); !exists {\n\t\treturn false, error\n\t}\n\n\t\/\/ Return value as bool, after conversion, if necessary.\n\tswitch value := (*c)[section][option].(type) {\n\tcase string:\n\t\tstatus, error := strconv.ParseBool(value)\n\t\tif error != nil {\n\t\t\treturn false, fmt.Errorf(\"Couldn't convert string '%s' to bool\", value)\n\t\t}\n\n\t\treturn status, nil\n\tcase bool:\n\t\treturn value, nil\n\tcase nil:\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, fmt.Errorf(\"Couldn't convert type '%T' to bool\", value)\n\t}\n\n\treturn false, fmt.Errorf(\"Unknown parse error\")\n}\n\n\/\/ B is like Bool, but never returns an error.\nfunc (c *Config) B(section, option string) bool {\n\tv, _ := c.Bool(section, option)\n\treturn v\n}\n\n\/\/ Float returns a value of 'option' in 'section' as an float64. It returns\n\/\/ 0.0 along with an error message on failure.\nfunc (c *Config) Float(section, option string) (float64, error) {\n\tif exists, error := c.exists(section, option); !exists {\n\t\treturn 0.0, error\n\t}\n\n\t\/\/ Return value as float, after conversion, if necessary.\n\tswitch value := (*c)[section][option].(type) {\n\tcase string:\n\t\tnum, error := strconv.ParseFloat(value, 64)\n\t\tif error != nil {\n\t\t\treturn 0.0, fmt.Errorf(\"Couldn't convert string '%s' to float\", value)\n\t\t}\n\n\t\treturn num, nil\n\tcase float64:\n\t\treturn value, nil\n\tcase nil:\n\t\treturn 0.0, nil\n\tdefault:\n\t\treturn 0.0, fmt.Errorf(\"Couldn't convert type '%T' to float\", value)\n\t}\n\n\treturn 0.0, fmt.Errorf(\"Unknown parse error\")\n}\n\n\/\/ F is like Float, but never returns an error.\nfunc (c *Config) F(section, option string) float64 {\n\tv, _ := c.Float(section, option)\n\treturn v\n}\n\n\/\/ Merge merges two or more configuration files, and returns a new,\n\/\/ combined *Config type. Identical sections and options are overwritten\n\/\/ according to the order of definition.\nfunc Merge(conf ...*Config) (*Config, error) {\n\tc := new(Config)\n\tdata := make(map[string]map[string]interface{})\n\n\tfor _, current := range conf {\n\t\tif current == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor key, value := range *current {\n\t\t\tdata[key] = make(map[string]interface{})\n\t\t\tdata[key] = value\n\t\t}\n\t}\n\n\t*c = data\n\n\treturn c, nil\n}\n\n\/\/ Load reads the configuration file located in 'conf' and returns\n\/\/ a new Config type. If the configuration file cannot be found,\n\/\/ the function returns nil and an error message.\nfunc Load(conf string) (*Config, error) {\n\tc := new(Config)\n\n\tdata, err := parse(conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t*c = data\n\n\treturn c, nil\n}\n\n\/\/ Checks if 'option' exists under 'section' in the Config file.\nfunc (c *Config) exists(section, option string) (bool, error) {\n\tif c == nil {\n\t\treturn false, fmt.Errorf(\"config is invalid\")\n\t}\n\n\tif _, ok := (*c)[section]; !ok {\n\t\treturn false, fmt.Errorf(\"section '%s' not found\", section)\n\t}\n\n\tif _, ok := (*c)[section][option]; !ok {\n\t\treturn false, fmt.Errorf(\"option '%s' not found\", option)\n\t}\n\n\treturn true, nil\n}\n\n\/\/ Parse parses the configuration file in 'conf' and returns the data\n\/\/ as values mapped to options, mapped to sections.\nfunc parse(conf string) (map[string]map[string]interface{}, error) {\n\tc, err := goconfig.ReadConfigFile(conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := make(map[string]map[string]interface{})\n\n\tfor _, section := range c.GetSections() {\n\t\tdata[section] = make(map[string]interface{})\n\t\ts, _ := c.GetOptions(section)\n\n\t\tfor _, option := range s {\n\t\t\tv, err := c.GetString(section, option)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tdata[section][option] = v\n\t\t}\n\t}\n\n\treturn data, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package backend\n\nimport (\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/coreos\/go-etcd\/etcd\"\n)\n\nconst (\n\tKeySeperator = \"\/\"\n)\n\ntype EtcdBackend struct {\n\tnamespace, address string\n\tclient *etcd.Client\n}\n\nfunc New(namespace, machines []string) *EtcdBackend {\n\treturn &EtcdBackend{\n\t\tnamespace: namespace,\n\t\taddress: address,\n\t\tclient: etcd.NewClient(machines),\n\t}\n}\n\nfunc key(components ...string) string {\n\treturn strings.Join(components, KeySeperator)\n}\n\nfunc (e *EtcdBackend) keyGroup(group string) string {\n\treturn key(e.namespace, group)\n}\n\nfunc (e *EtcdBackend) keyVariable(group, variable string) string {\n\treturn key(e.namespace, group, variable)\n}\n\nfunc (e *EtcdBackend) GetVariable(group, variable string) ([]byte, error) {\n\tresponse, err := e.client.Get(e.keyVariable(group, variable), false, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn base64.StdEncoding.DecodeString(response.Node.Value)\n}\n\nfunc (e *EtcdBackend) setVariable(group, variable string, value []byte, ttl uint64) error {\n\tencodedValue := base64.StdEncoding.EncodeToString(value)\n\t_, err := e.client.Set(e.keyVariable(group, variable), encodedValue, ttl)\n\treturn err\n}\n\nfunc (e *EtcdBackend) SetVariable(group, variable string, value []byte) error {\n\treturn e.setVariable(group, variable, value, 0)\n}\n\nfunc (e *EtcdBackend) RemoveVariable(group, variable string) error {\n\t_, err := e.client.Delete(e.keyVariable(group, variable), false)\n\treturn err\n}\n\nfunc (e *EtcdBackend) GetGroup(group string) (map[string][]byte, error) {\n\tkey := e.keyGroup(group)\n\tresponse, err := e.client.Get(key, false, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprefix := fmt.Sprintf(\"\/%s\/\", key)\n\tgroupMap := make(map[string][]byte)\n\tfor _, node := range response.Node.Nodes {\n\t\tvalue, err := base64.StdEncoding.DecodeString(node.Value)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tgroupMap[strings.TrimPrefix(node.Key, prefix)] = value\n\t}\n\n\treturn groupMap, nil\n}\n\nfunc (e *EtcdBackend) RemoveGroup(group string) error {\n\t_, err := e.client.Delete(e.keyGroup(group), true)\n\treturn err\n}\n<commit_msg>address is never used<commit_after>package backend\n\nimport (\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/coreos\/go-etcd\/etcd\"\n)\n\nconst (\n\tKeySeperator = \"\/\"\n)\n\ntype EtcdBackend struct {\n\tnamespace, address string\n\tclient *etcd.Client\n}\n\nfunc NewEtcdBackend(namespace, machines []string) *EtcdBackend {\n\treturn &EtcdBackend{\n\t\tnamespace: namespace,\n\t\tclient: etcd.NewClient(machines),\n\t}\n}\n\nfunc key(components ...string) string {\n\treturn strings.Join(components, KeySeperator)\n}\n\nfunc (e *EtcdBackend) keyGroup(group string) string {\n\treturn key(e.namespace, group)\n}\n\nfunc (e *EtcdBackend) keyVariable(group, variable string) string {\n\treturn key(e.namespace, group, variable)\n}\n\nfunc (e *EtcdBackend) GetVariable(group, variable string) ([]byte, error) {\n\tresponse, err := e.client.Get(e.keyVariable(group, variable), false, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn base64.StdEncoding.DecodeString(response.Node.Value)\n}\n\nfunc (e *EtcdBackend) setVariable(group, variable string, value []byte, ttl uint64) error {\n\tencodedValue := base64.StdEncoding.EncodeToString(value)\n\t_, err := e.client.Set(e.keyVariable(group, variable), encodedValue, ttl)\n\treturn err\n}\n\nfunc (e *EtcdBackend) SetVariable(group, variable string, value []byte) error {\n\treturn e.setVariable(group, variable, value, 0)\n}\n\nfunc (e *EtcdBackend) RemoveVariable(group, variable string) error {\n\t_, err := e.client.Delete(e.keyVariable(group, variable), false)\n\treturn err\n}\n\nfunc (e *EtcdBackend) GetGroup(group string) (map[string][]byte, error) {\n\tkey := e.keyGroup(group)\n\tresponse, err := e.client.Get(key, false, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprefix := fmt.Sprintf(\"\/%s\/\", key)\n\tgroupMap := make(map[string][]byte)\n\tfor _, node := range response.Node.Nodes {\n\t\tvalue, err := base64.StdEncoding.DecodeString(node.Value)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tgroupMap[strings.TrimPrefix(node.Key, prefix)] = value\n\t}\n\n\treturn groupMap, nil\n}\n\nfunc (e *EtcdBackend) RemoveGroup(group string) error {\n\t_, err := e.client.Delete(e.keyGroup(group), true)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\tr \"github.com\/dancannon\/gorethink\"\n\t\"log\"\n\t\"net\/http\"\n)\n\nfunc main() {\n\tsession, err := r.Connect(r.ConnectOpts{\n\t\tAddress: RethinkHost,\n\t\tDatabase: \"chatserver\",\n\t})\n\tif err != nil {\n\t\tlog.Panic(err.Error())\n\t}\n\trouter := NewRouter(session)\n\n\trouter.Handle(\"channel add\", addChannel)\n\trouter.Handle(\"channel subscribe\", subscribeChannel)\n\trouter.Handle(\"channel unsubscribe\", unsubscribeChannel)\n\n\t\/\/ User routes\n\t\/\/ router.Handle(\"user edit\", editUser)\n\t\/\/ router.Handle(\"user subscribe\", subscribeUser)\n\t\/\/ router.Handle(\"user unsubscribe\", unsubscribeIser)\n\n\t\/\/ Message routes\n\n\thttp.Handle(\"\/\", router)\n\thttp.ListenAndServe(BackendPort, nil)\n}\n<commit_msg>Running msg<commit_after>package main\n\nimport (\n\tr \"github.com\/dancannon\/gorethink\"\n\t\"log\"\n\t\"net\/http\"\n)\n\nfunc main() {\n\tsession, err := r.Connect(r.ConnectOpts{\n\t\tAddress: RethinkHost,\n\t\tDatabase: \"chatserver\",\n\t})\n\tif err != nil {\n\t\tlog.Panic(err.Error())\n\t}\n\trouter := NewRouter(session)\n\n\trouter.Handle(\"channel add\", addChannel)\n\trouter.Handle(\"channel subscribe\", subscribeChannel)\n\trouter.Handle(\"channel unsubscribe\", unsubscribeChannel)\n\n\t\/\/ User routes\n\t\/\/ router.Handle(\"user edit\", editUser)\n\t\/\/ router.Handle(\"user subscribe\", subscribeUser)\n\t\/\/ router.Handle(\"user unsubscribe\", unsubscribeIser)\n\n\t\/\/ Message routes\n\tprintln(\"Running...\")\n\thttp.Handle(\"\/\", router)\n\thttp.ListenAndServe(BackendPort, nil)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage backup_test\n\nimport (\n\t\"errors\"\n\t\"github.com\/jacobsa\/aws\/sdb\"\n\t\"github.com\/jacobsa\/aws\/sdb\/mock\"\n\t\"github.com\/jacobsa\/comeback\/backup\"\n\t\"github.com\/jacobsa\/comeback\/crypto\"\n\t\"github.com\/jacobsa\/comeback\/crypto\/mock\"\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t\"github.com\/jacobsa\/oglemock\"\n\t. \"github.com\/jacobsa\/ogletest\"\n\t\"testing\"\n)\n\nfunc TestRegistry(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype registryTest struct {\n\tcrypter mock_crypto.MockCrypter\n\tdomain mock_sdb.MockDomain\n}\n\nfunc (t *registryTest) SetUp(i *TestInfo) {\n\tt.crypter = mock_crypto.NewMockCrypter(i.MockController, \"crypter\")\n\tt.domain = mock_sdb.NewMockDomain(i.MockController, \"domain\")\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ NewRegistry\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype NewRegistryTest struct {\n\tregistryTest\n\n\tr backup.Registry\n\terr error\n}\n\nfunc init() { RegisterTestSuite(&NewRegistryTest{}) }\n\nfunc (t *NewRegistryTest) callConstructor() {\n\tt.r, t.err = backup.NewRegistry(t.crypter, t.domain)\n}\n\nfunc (t *NewRegistryTest) CallsGetAttributes() {\n\t\/\/ Domain\n\tExpectCall(t.domain, \"GetAttributes\")(\n\t\t\"comeback_marker\",\n\t\tfalse,\n\t\tElementsAre(\"encrypted_data\")).\n\t\tWillOnce(oglemock.Return(nil, errors.New(\"\")))\n\n\t\/\/ Call\n\tt.callConstructor()\n}\n\nfunc (t *NewRegistryTest) GetAttributesReturnsError() {\n\t\/\/ Domain\n\tExpectCall(t.domain, \"GetAttributes\")(Any(), Any(), Any()).\n\t\tWillOnce(oglemock.Return(nil, errors.New(\"taco\")))\n\n\t\/\/ Call\n\tt.callConstructor()\n\n\tExpectThat(t.err, Error(HasSubstr(\"GetAttributes\")))\n\tExpectThat(t.err, Error(HasSubstr(\"taco\")))\n}\n\nfunc (t *NewRegistryTest) CallsDecrypt() {\n\t\/\/ Domain\n\tattr := sdb.Attribute{Name: \"encrypted_data\", Value: \"taco\"}\n\tExpectCall(t.domain, \"GetAttributes\")(Any(), Any(), Any()).\n\t\tWillOnce(oglemock.Return([]sdb.Attribute{attr}, nil))\n\n\t\/\/ Crypter\n\tExpectCall(t.crypter, \"Decrypt\")(DeepEquals([]byte(\"taco\"))).\n\t\tWillOnce(oglemock.Return(nil, errors.New(\"\")))\n\n\t\/\/ Call\n\tt.callConstructor()\n}\n\nfunc (t *NewRegistryTest) DecryptReturnsGenericError() {\n\t\/\/ Domain\n\tattr := sdb.Attribute{Name: \"encrypted_data\"}\n\tExpectCall(t.domain, \"GetAttributes\")(Any(), Any(), Any()).\n\t\tWillOnce(oglemock.Return([]sdb.Attribute{attr}, nil))\n\n\t\/\/ Crypter\n\tExpectCall(t.crypter, \"Decrypt\")(Any()).\n\t\tWillOnce(oglemock.Return(nil, errors.New(\"taco\")))\n\n\t\/\/ Call\n\tt.callConstructor()\n\n\tExpectThat(t.err, Error(HasSubstr(\"Decrypt\")))\n\tExpectThat(t.err, Error(HasSubstr(\"taco\")))\n}\n\nfunc (t *NewRegistryTest) DecryptReturnsNotAuthenticError() {\n\t\/\/ Domain\n\tattr := sdb.Attribute{Name: \"encrypted_data\"}\n\tExpectCall(t.domain, \"GetAttributes\")(Any(), Any(), Any()).\n\t\tWillOnce(oglemock.Return([]sdb.Attribute{attr}, nil))\n\n\t\/\/ Crypter\n\tExpectCall(t.crypter, \"Decrypt\")(Any()).\n\t\tWillOnce(oglemock.Return(nil, &crypto.NotAuthenticError{}))\n\n\t\/\/ Call\n\tt.callConstructor()\n\n\t_, ok := t.err.(*backup.IncompatibleCrypterError)\n\tAssertTrue(ok, \"Error: %v\", t.err)\n\n\tExpectThat(t.err, Error(HasSubstr(\"crypter\")))\n\tExpectThat(t.err, Error(HasSubstr(\"incompatible\")))\n}\n\nfunc (t *NewRegistryTest) DecryptSucceeds() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *NewRegistryTest) CallsEncrypt() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *NewRegistryTest) EncryptReturnsError() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *NewRegistryTest) CallsPutAttributes() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *NewRegistryTest) PutAttributesReturnsError() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *NewRegistryTest) PutAttributesSucceeds() {\n\tExpectEq(\"TODO\", \"\")\n}\n<commit_msg>NewRegistryTest.DecryptSucceeds<commit_after>\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage backup_test\n\nimport (\n\t\"errors\"\n\t\"github.com\/jacobsa\/aws\/sdb\"\n\t\"github.com\/jacobsa\/aws\/sdb\/mock\"\n\t\"github.com\/jacobsa\/comeback\/backup\"\n\t\"github.com\/jacobsa\/comeback\/crypto\"\n\t\"github.com\/jacobsa\/comeback\/crypto\/mock\"\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t\"github.com\/jacobsa\/oglemock\"\n\t. \"github.com\/jacobsa\/ogletest\"\n\t\"testing\"\n)\n\nfunc TestRegistry(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype registryTest struct {\n\tcrypter mock_crypto.MockCrypter\n\tdomain mock_sdb.MockDomain\n}\n\nfunc (t *registryTest) SetUp(i *TestInfo) {\n\tt.crypter = mock_crypto.NewMockCrypter(i.MockController, \"crypter\")\n\tt.domain = mock_sdb.NewMockDomain(i.MockController, \"domain\")\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ NewRegistry\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype NewRegistryTest struct {\n\tregistryTest\n\n\tregistry backup.Registry\n\terr error\n}\n\nfunc init() { RegisterTestSuite(&NewRegistryTest{}) }\n\nfunc (t *NewRegistryTest) callConstructor() {\n\tt.registry, t.err = backup.NewRegistry(t.crypter, t.domain)\n}\n\nfunc (t *NewRegistryTest) CallsGetAttributes() {\n\t\/\/ Domain\n\tExpectCall(t.domain, \"GetAttributes\")(\n\t\t\"comeback_marker\",\n\t\tfalse,\n\t\tElementsAre(\"encrypted_data\")).\n\t\tWillOnce(oglemock.Return(nil, errors.New(\"\")))\n\n\t\/\/ Call\n\tt.callConstructor()\n}\n\nfunc (t *NewRegistryTest) GetAttributesReturnsError() {\n\t\/\/ Domain\n\tExpectCall(t.domain, \"GetAttributes\")(Any(), Any(), Any()).\n\t\tWillOnce(oglemock.Return(nil, errors.New(\"taco\")))\n\n\t\/\/ Call\n\tt.callConstructor()\n\n\tExpectThat(t.err, Error(HasSubstr(\"GetAttributes\")))\n\tExpectThat(t.err, Error(HasSubstr(\"taco\")))\n}\n\nfunc (t *NewRegistryTest) CallsDecrypt() {\n\t\/\/ Domain\n\tattr := sdb.Attribute{Name: \"encrypted_data\", Value: \"taco\"}\n\tExpectCall(t.domain, \"GetAttributes\")(Any(), Any(), Any()).\n\t\tWillOnce(oglemock.Return([]sdb.Attribute{attr}, nil))\n\n\t\/\/ Crypter\n\tExpectCall(t.crypter, \"Decrypt\")(DeepEquals([]byte(\"taco\"))).\n\t\tWillOnce(oglemock.Return(nil, errors.New(\"\")))\n\n\t\/\/ Call\n\tt.callConstructor()\n}\n\nfunc (t *NewRegistryTest) DecryptReturnsGenericError() {\n\t\/\/ Domain\n\tattr := sdb.Attribute{Name: \"encrypted_data\"}\n\tExpectCall(t.domain, \"GetAttributes\")(Any(), Any(), Any()).\n\t\tWillOnce(oglemock.Return([]sdb.Attribute{attr}, nil))\n\n\t\/\/ Crypter\n\tExpectCall(t.crypter, \"Decrypt\")(Any()).\n\t\tWillOnce(oglemock.Return(nil, errors.New(\"taco\")))\n\n\t\/\/ Call\n\tt.callConstructor()\n\n\tExpectThat(t.err, Error(HasSubstr(\"Decrypt\")))\n\tExpectThat(t.err, Error(HasSubstr(\"taco\")))\n}\n\nfunc (t *NewRegistryTest) DecryptReturnsNotAuthenticError() {\n\t\/\/ Domain\n\tattr := sdb.Attribute{Name: \"encrypted_data\"}\n\tExpectCall(t.domain, \"GetAttributes\")(Any(), Any(), Any()).\n\t\tWillOnce(oglemock.Return([]sdb.Attribute{attr}, nil))\n\n\t\/\/ Crypter\n\tExpectCall(t.crypter, \"Decrypt\")(Any()).\n\t\tWillOnce(oglemock.Return(nil, &crypto.NotAuthenticError{}))\n\n\t\/\/ Call\n\tt.callConstructor()\n\n\t_, ok := t.err.(*backup.IncompatibleCrypterError)\n\tAssertTrue(ok, \"Error: %v\", t.err)\n\n\tExpectThat(t.err, Error(HasSubstr(\"crypter\")))\n\tExpectThat(t.err, Error(HasSubstr(\"incompatible\")))\n}\n\nfunc (t *NewRegistryTest) DecryptSucceeds() {\n\t\/\/ Domain\n\tattr := sdb.Attribute{Name: \"encrypted_data\"}\n\tExpectCall(t.domain, \"GetAttributes\")(Any(), Any(), Any()).\n\t\tWillOnce(oglemock.Return([]sdb.Attribute{attr}, nil))\n\n\t\/\/ Crypter\n\tExpectCall(t.crypter, \"Decrypt\")(Any()).\n\t\tWillOnce(oglemock.Return([]byte{}, nil))\n\n\t\/\/ Call\n\tt.callConstructor()\n\n\tAssertEq(nil, t.err)\n\tExpectNe(nil, t.registry)\n}\n\nfunc (t *NewRegistryTest) CallsEncrypt() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *NewRegistryTest) EncryptReturnsError() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *NewRegistryTest) CallsPutAttributes() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *NewRegistryTest) PutAttributesReturnsError() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *NewRegistryTest) PutAttributesSucceeds() {\n\tExpectEq(\"TODO\", \"\")\n}\n<|endoftext|>"} {"text":"<commit_before>package udp_tracker\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"bitbucket.org\/anacrolix\/go.torrent\/tracker\"\n)\n\ntype Action int32\n\nconst (\n\tConnect Action = iota\n\tAnnounce\n\tScrape\n\tError\n)\n\ntype ConnectionRequest struct {\n\tConnectionId int64\n\tAction int32\n\tTransctionId int32\n}\n\ntype ConnectionResponse struct {\n\tConnectionId int64\n}\n\ntype ResponseHeader struct {\n\tAction Action\n\tTransactionId int32\n}\n\ntype RequestHeader struct {\n\tConnectionId int64\n\tAction Action\n\tTransactionId int32\n}\n\ntype AnnounceResponseHeader struct {\n\tInterval int32\n\tLeechers int32\n\tSeeders int32\n}\n\ntype Peer struct {\n\tIP [4]byte\n\tPort uint16\n}\n\nfunc init() {\n\ttracker.RegisterClientScheme(\"udp\", newClient)\n}\n\nfunc newClient(url *url.URL) tracker.Client {\n\treturn &client{\n\t\turl: url,\n\t}\n}\n\nfunc newTransactionId() int32 {\n\treturn int32(rand.Uint32())\n}\n\nfunc timeout(contiguousTimeouts int) (d time.Duration) {\n\tif contiguousTimeouts > 8 {\n\t\tcontiguousTimeouts = 8\n\t}\n\td = 15 * time.Second\n\tfor ; contiguousTimeouts > 0; contiguousTimeouts-- {\n\t\td *= 2\n\t}\n\treturn\n}\n\ntype client struct {\n\tcontiguousTimeouts int\n\tconnectionIdReceived time.Time\n\tconnectionId int64\n\tsocket net.Conn\n\turl *url.URL\n}\n\nfunc (c *client) String() string {\n\treturn c.url.String()\n}\n\nfunc (c *client) Announce(req *tracker.AnnounceRequest) (res tracker.AnnounceResponse, err error) {\n\tif !c.connected() {\n\t\terr = tracker.ErrNotConnected\n\t\treturn\n\t}\n\tb, err := c.request(Announce, req)\n\tif err != nil {\n\t\treturn\n\t}\n\tvar h AnnounceResponseHeader\n\terr = readBody(b, &h)\n\tif err != nil {\n\t\treturn\n\t}\n\tres.Interval = h.Interval\n\tres.Leechers = h.Leechers\n\tres.Seeders = h.Seeders\n\tfor {\n\t\tvar p Peer\n\t\terr = binary.Read(b, binary.BigEndian, &p)\n\t\tswitch err {\n\t\tcase nil:\n\t\tcase io.EOF:\n\t\t\terr = nil\n\t\t\tfallthrough\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t\tres.Peers = append(res.Peers, tracker.Peer{\n\t\t\tIP: p.IP[:],\n\t\t\tPort: int(p.Port),\n\t\t})\n\t}\n\treturn\n}\n\nfunc (c *client) write(h *RequestHeader, body interface{}) (err error) {\n\tbuf := &bytes.Buffer{}\n\terr = binary.Write(buf, binary.BigEndian, h)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif body != nil {\n\t\terr = binary.Write(buf, binary.BigEndian, body)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tn, err := c.socket.Write(buf.Bytes())\n\tif err != nil {\n\t\treturn\n\t}\n\tif n != buf.Len() {\n\t\tpanic(\"write should send all or error\")\n\t}\n\treturn\n}\n\nfunc (c *client) request(action Action, args interface{}) (responseBody *bytes.Reader, err error) {\n\ttid := newTransactionId()\n\terr = c.write(&RequestHeader{\n\t\tConnectionId: c.connectionId,\n\t\tAction: action,\n\t\tTransactionId: tid,\n\t}, args)\n\tif err != nil {\n\t\treturn\n\t}\n\tc.socket.SetDeadline(time.Now().Add(timeout(c.contiguousTimeouts)))\n\tb := make([]byte, 0x10000) \/\/ IP limits packet size to 64KB\n\tfor {\n\t\tvar n int\n\t\tn, err = c.socket.Read(b)\n\t\tif opE, ok := err.(*net.OpError); ok {\n\t\t\tif opE.Timeout() {\n\t\t\t\tc.contiguousTimeouts++\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tbuf := bytes.NewBuffer(b[:n])\n\t\tvar h ResponseHeader\n\t\terr = binary.Read(buf, binary.BigEndian, &h)\n\t\tswitch err {\n\t\tcase io.ErrUnexpectedEOF:\n\t\t\tcontinue\n\t\tcase nil:\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t\tif h.TransactionId != tid {\n\t\t\tcontinue\n\t\t}\n\t\tc.contiguousTimeouts = 0\n\t\tif h.Action == Error {\n\t\t\terr = errors.New(buf.String())\n\t\t}\n\t\tresponseBody = bytes.NewReader(buf.Bytes())\n\t\treturn\n\t}\n}\n\nfunc readBody(r *bytes.Reader, data ...interface{}) (err error) {\n\tfor _, datum := range data {\n\t\terr = binary.Read(r, binary.BigEndian, datum)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}\n\nfunc (c *client) connected() bool {\n\treturn !c.connectionIdReceived.IsZero() && time.Now().Before(c.connectionIdReceived.Add(time.Minute))\n}\n\nfunc (c *client) Connect() (err error) {\n\tif c.connected() {\n\t\treturn nil\n\t}\n\tc.connectionId = 0x41727101980\n\tif c.socket == nil {\n\t\tc.socket, err = net.Dial(\"udp\", c.url.Host)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tb, err := c.request(Connect, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\tvar res ConnectionResponse\n\terr = readBody(b, &res)\n\tif err != nil {\n\t\treturn\n\t}\n\tc.connectionId = res.ConnectionId\n\tc.connectionIdReceived = time.Now()\n\treturn\n}\n<commit_msg>udp_tracker: If a read deadline occurs, writes were failing with i\/o timeout<commit_after>package udp_tracker\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"bitbucket.org\/anacrolix\/go.torrent\/tracker\"\n)\n\ntype Action int32\n\nconst (\n\tConnect Action = iota\n\tAnnounce\n\tScrape\n\tError\n)\n\ntype ConnectionRequest struct {\n\tConnectionId int64\n\tAction int32\n\tTransctionId int32\n}\n\ntype ConnectionResponse struct {\n\tConnectionId int64\n}\n\ntype ResponseHeader struct {\n\tAction Action\n\tTransactionId int32\n}\n\ntype RequestHeader struct {\n\tConnectionId int64\n\tAction Action\n\tTransactionId int32\n}\n\ntype AnnounceResponseHeader struct {\n\tInterval int32\n\tLeechers int32\n\tSeeders int32\n}\n\ntype Peer struct {\n\tIP [4]byte\n\tPort uint16\n}\n\nfunc init() {\n\ttracker.RegisterClientScheme(\"udp\", newClient)\n}\n\nfunc newClient(url *url.URL) tracker.Client {\n\treturn &client{\n\t\turl: url,\n\t}\n}\n\nfunc newTransactionId() int32 {\n\treturn int32(rand.Uint32())\n}\n\nfunc timeout(contiguousTimeouts int) (d time.Duration) {\n\tif contiguousTimeouts > 8 {\n\t\tcontiguousTimeouts = 8\n\t}\n\td = 15 * time.Second\n\tfor ; contiguousTimeouts > 0; contiguousTimeouts-- {\n\t\td *= 2\n\t}\n\treturn\n}\n\ntype client struct {\n\tcontiguousTimeouts int\n\tconnectionIdReceived time.Time\n\tconnectionId int64\n\tsocket net.Conn\n\turl *url.URL\n}\n\nfunc (c *client) String() string {\n\treturn c.url.String()\n}\n\nfunc (c *client) Announce(req *tracker.AnnounceRequest) (res tracker.AnnounceResponse, err error) {\n\tif !c.connected() {\n\t\terr = tracker.ErrNotConnected\n\t\treturn\n\t}\n\tb, err := c.request(Announce, req)\n\tif err != nil {\n\t\treturn\n\t}\n\tvar h AnnounceResponseHeader\n\terr = readBody(b, &h)\n\tif err != nil {\n\t\treturn\n\t}\n\tres.Interval = h.Interval\n\tres.Leechers = h.Leechers\n\tres.Seeders = h.Seeders\n\tfor {\n\t\tvar p Peer\n\t\terr = binary.Read(b, binary.BigEndian, &p)\n\t\tswitch err {\n\t\tcase nil:\n\t\tcase io.EOF:\n\t\t\terr = nil\n\t\t\tfallthrough\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t\tres.Peers = append(res.Peers, tracker.Peer{\n\t\t\tIP: p.IP[:],\n\t\t\tPort: int(p.Port),\n\t\t})\n\t}\n\treturn\n}\n\nfunc (c *client) write(h *RequestHeader, body interface{}) (err error) {\n\tbuf := &bytes.Buffer{}\n\terr = binary.Write(buf, binary.BigEndian, h)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif body != nil {\n\t\terr = binary.Write(buf, binary.BigEndian, body)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tn, err := c.socket.Write(buf.Bytes())\n\tif err != nil {\n\t\treturn\n\t}\n\tif n != buf.Len() {\n\t\tpanic(\"write should send all or error\")\n\t}\n\treturn\n}\n\nfunc (c *client) request(action Action, args interface{}) (responseBody *bytes.Reader, err error) {\n\ttid := newTransactionId()\n\terr = c.write(&RequestHeader{\n\t\tConnectionId: c.connectionId,\n\t\tAction: action,\n\t\tTransactionId: tid,\n\t}, args)\n\tif err != nil {\n\t\treturn\n\t}\n\tc.socket.SetReadDeadline(time.Now().Add(timeout(c.contiguousTimeouts)))\n\tb := make([]byte, 0x10000) \/\/ IP limits packet size to 64KB\n\tfor {\n\t\tvar n int\n\t\tn, err = c.socket.Read(b)\n\t\tif opE, ok := err.(*net.OpError); ok {\n\t\t\tif opE.Timeout() {\n\t\t\t\tc.contiguousTimeouts++\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tbuf := bytes.NewBuffer(b[:n])\n\t\tvar h ResponseHeader\n\t\terr = binary.Read(buf, binary.BigEndian, &h)\n\t\tswitch err {\n\t\tcase io.ErrUnexpectedEOF:\n\t\t\tcontinue\n\t\tcase nil:\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t\tif h.TransactionId != tid {\n\t\t\tcontinue\n\t\t}\n\t\tc.contiguousTimeouts = 0\n\t\tif h.Action == Error {\n\t\t\terr = errors.New(buf.String())\n\t\t}\n\t\tresponseBody = bytes.NewReader(buf.Bytes())\n\t\treturn\n\t}\n}\n\nfunc readBody(r *bytes.Reader, data ...interface{}) (err error) {\n\tfor _, datum := range data {\n\t\terr = binary.Read(r, binary.BigEndian, datum)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}\n\nfunc (c *client) connected() bool {\n\treturn !c.connectionIdReceived.IsZero() && time.Now().Before(c.connectionIdReceived.Add(time.Minute))\n}\n\nfunc (c *client) Connect() (err error) {\n\tif c.connected() {\n\t\treturn nil\n\t}\n\tc.connectionId = 0x41727101980\n\tif c.socket == nil {\n\t\tc.socket, err = net.Dial(\"udp\", c.url.Host)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tb, err := c.request(Connect, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\tvar res ConnectionResponse\n\terr = readBody(b, &res)\n\tif err != nil {\n\t\treturn\n\t}\n\tc.connectionId = res.ConnectionId\n\tc.connectionIdReceived = time.Now()\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package cli contains helper functions related to flag parsing and logging.\npackage cli\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/coreos\/go-semver\/semver\"\n\t\"github.com\/dustin\/go-humanize\"\n\t\"github.com\/jessevdk\/go-flags\"\n)\n\n\/\/ ParseFlags parses the app's flags and returns the parser, any extra arguments, and any error encountered.\n\/\/ It may exit if certain options are encountered (eg. --help).\nfunc ParseFlags(appname string, data interface{}, args []string) (*flags.Parser, []string, error) {\n\tparser := flags.NewNamedParser(path.Base(args[0]), flags.HelpFlag|flags.PassDoubleDash)\n\tparser.AddGroup(appname+\" options\", \"\", data)\n\textraArgs, err := parser.ParseArgs(args[1:])\n\tif err != nil {\n\t\tif err.(*flags.Error).Type == flags.ErrHelp {\n\t\t\twriteUsage(data)\n\t\t\tfmt.Printf(\"%s\\n\", err)\n\t\t\tos.Exit(0)\n\t\t} else if err.(*flags.Error).Type == flags.ErrUnknownFlag && strings.Contains(err.(*flags.Error).Message, \"`halp'\") {\n\t\t\tfmt.Printf(\"Hmmmmm, hows can I halp you?\\n\")\n\t\t\twriteUsage(data)\n\t\t\tparser.WriteHelp(os.Stderr)\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\treturn parser, extraArgs, err\n}\n\n\/\/ ParseFlagsOrDie, as the name suggests, parses the app's flags and dies if unsuccessful.\n\/\/ Also dies if any unexpected arguments are passed.\nfunc ParseFlagsOrDie(appname, version string, data interface{}) *flags.Parser {\n\treturn ParseFlagsFromArgsOrDie(appname, version, data, os.Args)\n}\n\n\/\/ ParseFlagsFromArgsOrDie is similar to ParseFlagsOrDie but allows control over the\n\/\/ flags passed.\nfunc ParseFlagsFromArgsOrDie(appname, version string, data interface{}, args []string) *flags.Parser {\n\tparser, extraArgs, err := ParseFlags(appname, data, args)\n\tif err != nil && err.(*flags.Error).Type == flags.ErrUnknownFlag && strings.Contains(err.(*flags.Error).Message, \"`version'\") {\n\t\tfmt.Printf(\"%s version %s\\n\", appname, version)\n\t\tos.Exit(0) \/\/ Ignore other errors if --version was passed.\n\t}\n\tif err != nil {\n\t\twriteUsage(data)\n\t\tparser.WriteHelp(os.Stderr)\n\t\tfmt.Printf(\"\\n%s\\n\", err)\n\t\tos.Exit(1)\n\t} else if len(extraArgs) > 0 {\n\t\twriteUsage(data)\n\t\tfmt.Printf(\"Unknown option %s\\n\", extraArgs)\n\t\tparser.WriteHelp(os.Stderr)\n\t\tos.Exit(1)\n\t}\n\treturn parser\n}\n\n\/\/ writeUsage prints any usage specified on the flag struct.\nfunc writeUsage(opts interface{}) {\n\tif s := getUsage(opts); s != \"\" {\n\t\tfmt.Println(s)\n\t\tfmt.Println(\"\") \/\/ extra blank line\n\t}\n}\n\n\/\/ getUsage extracts any usage specified on a flag struct.\n\/\/ It is set on a field named Usage, either by value or in a struct tag named usage.\nfunc getUsage(opts interface{}) string {\n\tif field := reflect.ValueOf(opts).Elem().FieldByName(\"Usage\"); field.IsValid() && field.String() != \"\" {\n\t\treturn strings.TrimSpace(field.String())\n\t}\n\tif field, present := reflect.TypeOf(opts).Elem().FieldByName(\"Usage\"); present {\n\t\treturn field.Tag.Get(\"usage\")\n\t}\n\treturn \"\"\n}\n\n\/\/ A ByteSize is used for flags that represent some quantity of bytes that can be\n\/\/ passed as human-readable quantities (eg. \"10G\").\ntype ByteSize uint64\n\n\/\/ UnmarshalFlag implements the flags.Unmarshaler interface.\nfunc (b *ByteSize) UnmarshalFlag(in string) error {\n\tb2, err := humanize.ParseBytes(in)\n\t*b = ByteSize(b2)\n\treturn err\n}\n\n\/\/ UnmarshalText implements the encoding.TextUnmarshaler interface\nfunc (b *ByteSize) UnmarshalText(text []byte) error {\n\treturn b.UnmarshalFlag(string(text))\n}\n\n\/\/ A Duration is used for flags that represent a time duration; it's just a wrapper\n\/\/ around time.Duration that implements the flags.Unmarshaler and\n\/\/ encoding.TextUnmarshaler interfaces.\ntype Duration time.Duration\n\n\/\/ UnmarshalFlag implements the flags.Unmarshaler interface.\nfunc (d *Duration) UnmarshalFlag(in string) error {\n\td2, err := time.ParseDuration(in)\n\t\/\/ For backwards compatibility, treat missing units as seconds.\n\tif err != nil {\n\t\tif d3, err := strconv.Atoi(in); err == nil {\n\t\t\t*d = Duration(time.Duration(d3) * time.Second)\n\t\t\treturn nil\n\t\t}\n\t}\n\t*d = Duration(d2)\n\treturn err\n}\n\n\/\/ UnmarshalText implements the encoding.TextUnmarshaler interface\nfunc (duration *Duration) UnmarshalText(text []byte) error {\n\treturn duration.UnmarshalFlag(string(text))\n}\n\n\/\/ A URL is used for flags or config fields that represent a URL.\n\/\/ It's just a string because it's more convenient that way; we haven't needed them as a net.URL so far.\ntype URL string\n\n\/\/ UnmarshalFlag implements the flags.Unmarshaler interface.\nfunc (u *URL) UnmarshalFlag(in string) error {\n\tif _, err := url.Parse(in); err != nil {\n\t\treturn err\n\t}\n\t*u = URL(in)\n\treturn nil\n}\n\n\/\/ UnmarshalText implements the encoding.TextUnmarshaler interface\nfunc (u *URL) UnmarshalText(text []byte) error {\n\treturn u.UnmarshalFlag(string(text))\n}\n\n\/\/ String implements the fmt.Stringer interface\nfunc (u *URL) String() string {\n\treturn string(*u)\n}\n\n\/\/ A Version is an extension to semver.Version extending it with the ability to\n\/\/ recognise >= prefixes.\ntype Version struct {\n\tsemver.Version\n\tIsGTE bool\n}\n\n\/\/ UnmarshalText implements the encoding.TextUnmarshaler interface\nfunc (v *Version) UnmarshalText(text []byte) error {\n\treturn v.UnmarshalFlag(string(text))\n}\n\n\/\/ UnmarshalFlag implements the flags.Unmarshaler interface.\nfunc (v *Version) UnmarshalFlag(in string) error {\n\tif strings.HasPrefix(in, \">=\") {\n\t\tv.IsGTE = true\n\t\tin = strings.TrimSpace(strings.TrimPrefix(in, \">=\"))\n\t}\n\treturn v.Set(in)\n}\n\n\/\/ String implements the fmt.Stringer interface\nfunc (v Version) String() string {\n\tif v.IsGTE {\n\t\treturn \">=\" + v.Version.String()\n\t}\n\treturn v.Version.String()\n}\n\n\/\/ VersionString returns just the version, without any preceding >=.\nfunc (v *Version) VersionString() string {\n\treturn v.Version.String()\n}\n\n\/\/ Semver converts a Version to a semver.Version\nfunc (v *Version) Semver() semver.Version {\n\treturn v.Version\n}\n<commit_msg>Fix a panic in flags unmarshalling<commit_after>\/\/ Package cli contains helper functions related to flag parsing and logging.\npackage cli\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/coreos\/go-semver\/semver\"\n\t\"github.com\/dustin\/go-humanize\"\n\t\"github.com\/jessevdk\/go-flags\"\n)\n\n\/\/ ParseFlags parses the app's flags and returns the parser, any extra arguments, and any error encountered.\n\/\/ It may exit if certain options are encountered (eg. --help).\nfunc ParseFlags(appname string, data interface{}, args []string) (*flags.Parser, []string, error) {\n\tparser := flags.NewNamedParser(path.Base(args[0]), flags.HelpFlag|flags.PassDoubleDash)\n\tparser.AddGroup(appname+\" options\", \"\", data)\n\textraArgs, err := parser.ParseArgs(args[1:])\n\tif err != nil {\n\t\tif err.(*flags.Error).Type == flags.ErrHelp {\n\t\t\twriteUsage(data)\n\t\t\tfmt.Printf(\"%s\\n\", err)\n\t\t\tos.Exit(0)\n\t\t} else if err.(*flags.Error).Type == flags.ErrUnknownFlag && strings.Contains(err.(*flags.Error).Message, \"`halp'\") {\n\t\t\tfmt.Printf(\"Hmmmmm, hows can I halp you?\\n\")\n\t\t\twriteUsage(data)\n\t\t\tparser.WriteHelp(os.Stderr)\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\treturn parser, extraArgs, err\n}\n\n\/\/ ParseFlagsOrDie, as the name suggests, parses the app's flags and dies if unsuccessful.\n\/\/ Also dies if any unexpected arguments are passed.\nfunc ParseFlagsOrDie(appname, version string, data interface{}) *flags.Parser {\n\treturn ParseFlagsFromArgsOrDie(appname, version, data, os.Args)\n}\n\n\/\/ ParseFlagsFromArgsOrDie is similar to ParseFlagsOrDie but allows control over the\n\/\/ flags passed.\nfunc ParseFlagsFromArgsOrDie(appname, version string, data interface{}, args []string) *flags.Parser {\n\tparser, extraArgs, err := ParseFlags(appname, data, args)\n\tif err != nil && err.(*flags.Error).Type == flags.ErrUnknownFlag && strings.Contains(err.(*flags.Error).Message, \"`version'\") {\n\t\tfmt.Printf(\"%s version %s\\n\", appname, version)\n\t\tos.Exit(0) \/\/ Ignore other errors if --version was passed.\n\t}\n\tif err != nil {\n\t\twriteUsage(data)\n\t\tparser.WriteHelp(os.Stderr)\n\t\tfmt.Printf(\"\\n%s\\n\", err)\n\t\tos.Exit(1)\n\t} else if len(extraArgs) > 0 {\n\t\twriteUsage(data)\n\t\tfmt.Printf(\"Unknown option %s\\n\", extraArgs)\n\t\tparser.WriteHelp(os.Stderr)\n\t\tos.Exit(1)\n\t}\n\treturn parser\n}\n\n\/\/ writeUsage prints any usage specified on the flag struct.\nfunc writeUsage(opts interface{}) {\n\tif s := getUsage(opts); s != \"\" {\n\t\tfmt.Println(s)\n\t\tfmt.Println(\"\") \/\/ extra blank line\n\t}\n}\n\n\/\/ getUsage extracts any usage specified on a flag struct.\n\/\/ It is set on a field named Usage, either by value or in a struct tag named usage.\nfunc getUsage(opts interface{}) string {\n\tif field := reflect.ValueOf(opts).Elem().FieldByName(\"Usage\"); field.IsValid() && field.String() != \"\" {\n\t\treturn strings.TrimSpace(field.String())\n\t}\n\tif field, present := reflect.TypeOf(opts).Elem().FieldByName(\"Usage\"); present {\n\t\treturn field.Tag.Get(\"usage\")\n\t}\n\treturn \"\"\n}\n\n\/\/ A ByteSize is used for flags that represent some quantity of bytes that can be\n\/\/ passed as human-readable quantities (eg. \"10G\").\ntype ByteSize uint64\n\n\/\/ UnmarshalFlag implements the flags.Unmarshaler interface.\nfunc (b *ByteSize) UnmarshalFlag(in string) error {\n\tb2, err := humanize.ParseBytes(in)\n\t*b = ByteSize(b2)\n\treturn flagsError(err)\n}\n\n\/\/ UnmarshalText implements the encoding.TextUnmarshaler interface\nfunc (b *ByteSize) UnmarshalText(text []byte) error {\n\treturn b.UnmarshalFlag(string(text))\n}\n\n\/\/ A Duration is used for flags that represent a time duration; it's just a wrapper\n\/\/ around time.Duration that implements the flags.Unmarshaler and\n\/\/ encoding.TextUnmarshaler interfaces.\ntype Duration time.Duration\n\n\/\/ UnmarshalFlag implements the flags.Unmarshaler interface.\nfunc (d *Duration) UnmarshalFlag(in string) error {\n\td2, err := time.ParseDuration(in)\n\t\/\/ For backwards compatibility, treat missing units as seconds.\n\tif err != nil {\n\t\tif d3, err := strconv.Atoi(in); err == nil {\n\t\t\t*d = Duration(time.Duration(d3) * time.Second)\n\t\t\treturn nil\n\t\t}\n\t}\n\t*d = Duration(d2)\n\treturn flagsError(err)\n}\n\n\/\/ UnmarshalText implements the encoding.TextUnmarshaler interface\nfunc (duration *Duration) UnmarshalText(text []byte) error {\n\treturn duration.UnmarshalFlag(string(text))\n}\n\n\/\/ A URL is used for flags or config fields that represent a URL.\n\/\/ It's just a string because it's more convenient that way; we haven't needed them as a net.URL so far.\ntype URL string\n\n\/\/ UnmarshalFlag implements the flags.Unmarshaler interface.\nfunc (u *URL) UnmarshalFlag(in string) error {\n\tif _, err := url.Parse(in); err != nil {\n\t\treturn flagsError(err)\n\t}\n\t*u = URL(in)\n\treturn nil\n}\n\n\/\/ UnmarshalText implements the encoding.TextUnmarshaler interface\nfunc (u *URL) UnmarshalText(text []byte) error {\n\treturn u.UnmarshalFlag(string(text))\n}\n\n\/\/ String implements the fmt.Stringer interface\nfunc (u *URL) String() string {\n\treturn string(*u)\n}\n\n\/\/ A Version is an extension to semver.Version extending it with the ability to\n\/\/ recognise >= prefixes.\ntype Version struct {\n\tsemver.Version\n\tIsGTE bool\n}\n\n\/\/ UnmarshalText implements the encoding.TextUnmarshaler interface\nfunc (v *Version) UnmarshalText(text []byte) error {\n\treturn v.UnmarshalFlag(string(text))\n}\n\n\/\/ UnmarshalFlag implements the flags.Unmarshaler interface.\nfunc (v *Version) UnmarshalFlag(in string) error {\n\tif strings.HasPrefix(in, \">=\") {\n\t\tv.IsGTE = true\n\t\tin = strings.TrimSpace(strings.TrimPrefix(in, \">=\"))\n\t}\n\treturn v.Set(in)\n}\n\n\/\/ String implements the fmt.Stringer interface\nfunc (v Version) String() string {\n\tif v.IsGTE {\n\t\treturn \">=\" + v.Version.String()\n\t}\n\treturn v.Version.String()\n}\n\n\/\/ VersionString returns just the version, without any preceding >=.\nfunc (v *Version) VersionString() string {\n\treturn v.Version.String()\n}\n\n\/\/ Semver converts a Version to a semver.Version\nfunc (v *Version) Semver() semver.Version {\n\treturn v.Version\n}\n\n\/\/ flagsError converts an error to a flags.Error, which is required for flag parsing.\nfunc flagsError(err error) error {\n\tif err == nil {\n\t\treturn err\n\t}\n\treturn &flags.Error{Type: flags.ErrMarshal, Message: err.Error()}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n \"encoding\/base64\"\n \"errors\"\n \"flag\"\n \"log\"\n \"net\/http\"\n \"strings\"\n)\n\nvar (\n ErrInvalidAuth = errors.New(\"Invalid auth\")\n\n login = flag.String(\"login\", \"\", \"Define the login of the authentication\")\n password = flag.String(\"password\", \"\", \"Define the password of the authentication\")\n realm = flag.String(\"realm\", \"\", \"Define the message of the prompt of the authentication\")\n addr = flag.String(\"addr\", \"\", \"addr host:port\")\n)\n\ntype BasicAuth struct {\n Login string\n Password string\n Realm string\n}\n\nfunc NewBasicAuth(login, pass string) *BasicAuth {\n return &BasicAuth{Login: login, Password: pass}\n}\n\nfunc (a *BasicAuth) Authenticate(w http.ResponseWriter, r *http.Request) {\n w.Header().Set(\"WWW-Authenticate\", `Basic realm=\"`+a.Realm+`\"`)\n http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n}\n\nfunc (a *BasicAuth) BasicAuthHandler(h http.Handler) http.Handler {\n return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n if a.ValidAuth(r) != nil {\n a.Authenticate(w, r)\n }\n })\n}\n\nfunc (a *BasicAuth) ValidAuth(r *http.Request) error {\n s := strings.SplitN(r.Header.Get(\"Authorization\"), \" \", 2)\n if len(s) != 2 || s[0] != \"Basic\" {\n return ErrInvalidAuth\n }\n\n b, err := base64.StdEncoding.DecodeString(s[1])\n if err != nil {\n return err\n }\n\n pair := strings.SplitN(string(b), \":\", 2)\n if len(pair) != 2 {\n return ErrInvalidAuth\n }\n\n if a.Login == pair[0] && a.Password == pair[1] {\n return nil\n }\n\n return ErrInvalidAuth\n}\n\nfunc main() {\n flag.Parse()\n\n auth := NewBasicAuth(*login, *password)\n fs := http.FileServer(http.Dir(\"\/\"))\n handler := auth.BasicAuthHandler(fs)\n\n err := http.ListenAndServe(*addr, handler)\n if err != nil {\n log.Fatalf(err.Error())\n }\n}\n<commit_msg>Improving code<commit_after>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n \"encoding\/base64\"\n \"errors\"\n \"flag\"\n \"log\"\n \"net\/http\"\n \"strings\"\n)\n\nvar (\n ErrInvalidAuth = errors.New(\"Invalid auth\")\n\n login = flag.String(\"login\", \"\", \"Define the login of the authentication\")\n password = flag.String(\"password\", \"\", \"Define the password of the authentication\")\n realm = flag.String(\"realm\", \"\", \"Define the message of the prompt of the authentication\")\n addr = flag.String(\"addr\", \"\", \"addr host:port\")\n dir = flag.String(\"dir\", \".\", \"Define the dirname of file server\")\n)\n\ntype BasicAuth struct {\n Login string\n Password string\n Realm string\n}\n\nfunc NewBasicAuth(login, pass string) *BasicAuth {\n return &BasicAuth{Login: login, Password: pass}\n}\n\nfunc (a *BasicAuth) Authenticate(w http.ResponseWriter, r *http.Request) {\n w.Header().Set(\"WWW-Authenticate\", `Basic realm=\"`+a.Realm+`\"`)\n http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n}\n\nfunc (a *BasicAuth) BasicAuthHandler(h http.Handler) http.Handler {\n return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n if a.ValidAuth(r) != nil {\n a.Authenticate(w, r)\n } else {\n h.ServeHTTP(w, r)\n }\n })\n}\n\nfunc (a *BasicAuth) ValidAuth(r *http.Request) error {\n s := strings.SplitN(r.Header.Get(\"Authorization\"), \" \", 2)\n if len(s) != 2 || s[0] != \"Basic\" {\n return ErrInvalidAuth\n }\n\n b, err := base64.StdEncoding.DecodeString(s[1])\n if err != nil {\n return err\n }\n\n pair := strings.SplitN(string(b), \":\", 2)\n if len(pair) != 2 {\n return ErrInvalidAuth\n }\n\n if a.Login == pair[0] && a.Password == pair[1] {\n return nil\n }\n\n return ErrInvalidAuth\n}\n\nfunc main() {\n flag.Parse()\n\n auth := NewBasicAuth(*login, *password)\n fs := http.FileServer(http.Dir(*dir))\n handler := auth.BasicAuthHandler(fs)\n\n err := http.ListenAndServe(*addr, handler)\n if err != nil {\n log.Fatalf(err.Error())\n }\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package txfeed implements Chain Core's transaction feeds.\npackage txfeed\n\nimport (\n\t\"context\"\n\t\"database\/sql\"\n\n\t\"chain\/core\/query\/filter\"\n\t\"chain\/database\/pg\"\n\t\"chain\/errors\"\n)\n\nvar ErrDuplicateAlias = errors.New(\"duplicate feed alias\")\n\ntype Tracker struct {\n\tDB pg.DB\n}\n\ntype TxFeed struct {\n\tID string `json:\"id,omitempty\"`\n\tAlias *string `json:\"alias\"`\n\tFilter string `json:\"filter,omitempty\"`\n\tAfter string `json:\"after,omitempty\"`\n}\n\nfunc (t *Tracker) Create(ctx context.Context, alias, fil, after string, clientToken *string) (*TxFeed, error) {\n\t\/\/ Validate the filter.\n\t_, err := filter.Parse(fil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ptrAlias *string\n\tif alias != \"\" {\n\t\tptrAlias = &alias\n\t}\n\n\tfeed := &TxFeed{\n\t\tAlias: ptrAlias,\n\t\tFilter: fil,\n\t\tAfter: after,\n\t}\n\treturn insertTxFeed(ctx, t.DB, feed, clientToken)\n}\n\n\/\/ insertTxFeed adds the txfeed to the database. If the txfeed has a client token,\n\/\/ and there already exists a txfeed with that client token, insertTxFeed will\n\/\/ lookup and return the existing txfeed instead.\nfunc insertTxFeed(ctx context.Context, db pg.DB, feed *TxFeed, clientToken *string) (*TxFeed, error) {\n\tconst q = `\n\t\tINSERT INTO txfeeds (alias, filter, after, client_token)\n\t\tVALUES ($1, $2, $3, $4)\n\t\tON CONFLICT (client_token) DO NOTHING\n\t\tRETURNING id\n\t`\n\n\tvar alias sql.NullString\n\tif feed.Alias != nil {\n\t\talias = sql.NullString{Valid: true, String: *feed.Alias}\n\t}\n\n\terr := db.QueryRow(\n\t\tctx, q, alias, feed.Filter, feed.After,\n\t\tclientToken).Scan(&feed.ID)\n\n\tif pg.IsUniqueViolation(err) {\n\t\treturn nil, errors.WithDetail(ErrDuplicateAlias, \"a transaction feed with the provided alias already exists\")\n\t} else if err == sql.ErrNoRows && clientToken != nil {\n\t\t\/\/ There is already a txfeed with the provided client\n\t\t\/\/ token. We should return the existing txfeed\n\t\tfeed, err = txfeedByClientToken(ctx, db, *clientToken)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"retrieving existing txfeed\")\n\t\t}\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn feed, nil\n}\n\nfunc txfeedByClientToken(ctx context.Context, db pg.DB, clientToken string) (*TxFeed, error) {\n\tconst q = `\n\t\tSELECT id, alias, filter, after\n\t\tFROM txfeeds\n\t\tWHERE client_token=$1\n\t`\n\n\tvar (\n\t\tfeed TxFeed\n\t\talias sql.NullString\n\t)\n\terr := db.QueryRow(ctx, q, clientToken).Scan(&feed.ID, &alias, &feed.Filter, &feed.After)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif alias.Valid {\n\t\tfeed.Alias = &alias.String\n\t}\n\n\treturn &feed, nil\n}\n\nfunc (t *Tracker) Find(ctx context.Context, id, alias string) (*TxFeed, error) {\n\twhere := ` WHERE `\n\tif id != \"\" {\n\t\twhere += `id=$1`\n\t} else {\n\t\twhere += `alias=$1`\n\t\tid = alias\n\t}\n\n\tq := `\n\t\tSELECT id, alias, filter, after\n\t\tFROM txfeeds\n\t` + where\n\n\tvar (\n\t\tfeed TxFeed\n\t\tsqlAlias sql.NullString\n\t)\n\n\terr := t.DB.QueryRow(ctx, q, id).Scan(&feed.ID, &sqlAlias, &feed.Filter, &feed.After)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif sqlAlias.Valid {\n\t\tfeed.Alias = &sqlAlias.String\n\t}\n\n\treturn &feed, nil\n}\n\nfunc (t *Tracker) Delete(ctx context.Context, id, alias string) error {\n\twhere := ` WHERE `\n\tif id != \"\" {\n\t\twhere += `id=$1`\n\t} else {\n\t\twhere += `alias=$1`\n\t\tid = alias\n\t}\n\n\tq := `DELETE FROM txfeeds` + where\n\n\tres, err := t.DB.Exec(ctx, q, id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\taffected, err := res.RowsAffected()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif affected == 0 {\n\t\treturn errors.WithDetailf(pg.ErrUserInputNotFound, \"could not find and delete txfeed with id\/alias=%s\", id)\n\t}\n\n\treturn nil\n}\n\nfunc (t *Tracker) Update(ctx context.Context, id, alias, after, prev string) (*TxFeed, error) {\n\twhere := ` WHERE `\n\tif id != \"\" {\n\t\twhere += `id=$2`\n\t} else {\n\t\twhere += `alias=$2`\n\t\tid = alias\n\t}\n\n\tq := `\n\t\tUPDATE txfeeds SET after=$1\n\t` + where + ` AND after=$3`\n\n\tres, err := t.DB.Exec(ctx, q, after, id, prev)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taffected, err := res.RowsAffected()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif affected == 0 {\n\t\treturn nil, errors.WithDetailf(pg.ErrUserInputNotFound, \"could not find txfeed with id\/alias=%s and prev=%s\", id, prev)\n\t}\n\n\treturn &TxFeed{\n\t\tID: id,\n\t\tAlias: &alias,\n\t\tAfter: after,\n\t}, nil\n}\n<commit_msg>core\/txfeed: Use bytes.Buffer to construct txfeed queries<commit_after>\/\/ Package txfeed implements Chain Core's transaction feeds.\npackage txfeed\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"database\/sql\"\n\n\t\"chain\/core\/query\/filter\"\n\t\"chain\/database\/pg\"\n\t\"chain\/errors\"\n)\n\nvar ErrDuplicateAlias = errors.New(\"duplicate feed alias\")\n\ntype Tracker struct {\n\tDB pg.DB\n}\n\ntype TxFeed struct {\n\tID string `json:\"id,omitempty\"`\n\tAlias *string `json:\"alias\"`\n\tFilter string `json:\"filter,omitempty\"`\n\tAfter string `json:\"after,omitempty\"`\n}\n\nfunc (t *Tracker) Create(ctx context.Context, alias, fil, after string, clientToken *string) (*TxFeed, error) {\n\t\/\/ Validate the filter.\n\t_, err := filter.Parse(fil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ptrAlias *string\n\tif alias != \"\" {\n\t\tptrAlias = &alias\n\t}\n\n\tfeed := &TxFeed{\n\t\tAlias: ptrAlias,\n\t\tFilter: fil,\n\t\tAfter: after,\n\t}\n\treturn insertTxFeed(ctx, t.DB, feed, clientToken)\n}\n\n\/\/ insertTxFeed adds the txfeed to the database. If the txfeed has a client token,\n\/\/ and there already exists a txfeed with that client token, insertTxFeed will\n\/\/ lookup and return the existing txfeed instead.\nfunc insertTxFeed(ctx context.Context, db pg.DB, feed *TxFeed, clientToken *string) (*TxFeed, error) {\n\tconst q = `\n\t\tINSERT INTO txfeeds (alias, filter, after, client_token)\n\t\tVALUES ($1, $2, $3, $4)\n\t\tON CONFLICT (client_token) DO NOTHING\n\t\tRETURNING id\n\t`\n\n\tvar alias sql.NullString\n\tif feed.Alias != nil {\n\t\talias = sql.NullString{Valid: true, String: *feed.Alias}\n\t}\n\n\terr := db.QueryRow(\n\t\tctx, q, alias, feed.Filter, feed.After,\n\t\tclientToken).Scan(&feed.ID)\n\n\tif pg.IsUniqueViolation(err) {\n\t\treturn nil, errors.WithDetail(ErrDuplicateAlias, \"a transaction feed with the provided alias already exists\")\n\t} else if err == sql.ErrNoRows && clientToken != nil {\n\t\t\/\/ There is already a txfeed with the provided client\n\t\t\/\/ token. We should return the existing txfeed\n\t\tfeed, err = txfeedByClientToken(ctx, db, *clientToken)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"retrieving existing txfeed\")\n\t\t}\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn feed, nil\n}\n\nfunc txfeedByClientToken(ctx context.Context, db pg.DB, clientToken string) (*TxFeed, error) {\n\tconst q = `\n\t\tSELECT id, alias, filter, after\n\t\tFROM txfeeds\n\t\tWHERE client_token=$1\n\t`\n\n\tvar (\n\t\tfeed TxFeed\n\t\talias sql.NullString\n\t)\n\terr := db.QueryRow(ctx, q, clientToken).Scan(&feed.ID, &alias, &feed.Filter, &feed.After)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif alias.Valid {\n\t\tfeed.Alias = &alias.String\n\t}\n\n\treturn &feed, nil\n}\n\nfunc (t *Tracker) Find(ctx context.Context, id, alias string) (*TxFeed, error) {\n\tvar q bytes.Buffer\n\n\tq.WriteString(`\n\t\tSELECT id, alias, filter, after\n\t\tFROM txfeeds\n\t\tWHERE\n\t`)\n\n\tif id != \"\" {\n\t\tq.WriteString(`id=$1`)\n\t} else {\n\t\tq.WriteString(`alias=$1`)\n\t\tid = alias\n\t}\n\n\tvar (\n\t\tfeed TxFeed\n\t\tsqlAlias sql.NullString\n\t)\n\n\terr := t.DB.QueryRow(ctx, q.String(), id).Scan(&feed.ID, &sqlAlias, &feed.Filter, &feed.After)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif sqlAlias.Valid {\n\t\tfeed.Alias = &sqlAlias.String\n\t}\n\n\treturn &feed, nil\n}\n\nfunc (t *Tracker) Delete(ctx context.Context, id, alias string) error {\n\tvar q bytes.Buffer\n\n\tq.WriteString(`DELETE FROM txfeeds WHERE `)\n\n\tif id != \"\" {\n\t\tq.WriteString(`id=$1`)\n\t} else {\n\t\tq.WriteString(`alias=$1`)\n\t\tid = alias\n\t}\n\n\tres, err := t.DB.Exec(ctx, q.String(), id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\taffected, err := res.RowsAffected()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif affected == 0 {\n\t\treturn errors.WithDetailf(pg.ErrUserInputNotFound, \"could not find and delete txfeed with id\/alias=%s\", id)\n\t}\n\n\treturn nil\n}\n\nfunc (t *Tracker) Update(ctx context.Context, id, alias, after, prev string) (*TxFeed, error) {\n\tvar q bytes.Buffer\n\n\tq.WriteString(`UPDATE txfeeds SET after=$1 WHERE `)\n\n\tif id != \"\" {\n\t\tq.WriteString(`id=$2`)\n\t} else {\n\t\tq.WriteString(`alias=$2`)\n\t\tid = alias\n\t}\n\n\tq.WriteString(` AND after=$3`)\n\n\tres, err := t.DB.Exec(ctx, q.String(), after, id, prev)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taffected, err := res.RowsAffected()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif affected == 0 {\n\t\treturn nil, errors.WithDetailf(pg.ErrUserInputNotFound, \"could not find txfeed with id\/alias=%s and prev=%s\", id, prev)\n\t}\n\n\treturn &TxFeed{\n\t\tID: id,\n\t\tAlias: &alias,\n\t\tAfter: after,\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Microsoft. All rights reserved.\n\/\/ MIT License\n\n\/\/ +build windows\n\npackage network\n\nimport (\n\t\"encoding\/json\"\n\t\"net\"\n\t\"strings\"\n\n\t\"github.com\/Azure\/azure-container-networking\/log\"\n\t\"github.com\/Azure\/azure-container-networking\/network\/policy\"\n\t\"github.com\/Microsoft\/hcsshim\"\n)\n\n\/\/ HotAttachEndpoint is a wrapper of hcsshim's HotAttachEndpoint.\nfunc (endpoint *EndpointInfo) HotAttachEndpoint(containerID string) error {\n\treturn hcsshim.HotAttachEndpoint(containerID, endpoint.Id)\n}\n\n\/\/ ConstructEndpointID constructs endpoint name from netNsPath.\nfunc ConstructEndpointID(containerID string, netNsPath string, ifName string) (string, string) {\n\tinfraEpName, workloadEpName := \"\", \"\"\n\n\tif len(containerID) > 8 {\n\t\tcontainerID = containerID[:8]\n\t}\n\n\tif netNsPath != \"\" {\n\t\tsplits := strings.Split(netNsPath, \":\")\n\t\t\/\/ For workload containers, we extract its linking infrastructure container ID.\n\t\tif len(splits) == 2 {\n\t\t\tif len(splits[1]) > 8 {\n\t\t\t\tsplits[1] = splits[1][:8]\n\t\t\t}\n\t\t\tinfraEpName = splits[1] + \"-\" + ifName\n\t\t\tworkloadEpName = containerID + \"-\" + ifName\n\t\t} else {\n\t\t\t\/\/ For infrastructure containers, we just use its container ID.\n\t\t\tinfraEpName = containerID + \"-\" + ifName\n\t\t}\n\t}\n\n\treturn infraEpName, workloadEpName\n}\n\n\/\/ newEndpointImpl creates a new endpoint in the network.\nfunc (nw *network) newEndpointImpl(epInfo *EndpointInfo) (*endpoint, error) {\n\t\/\/ Get Infrastructure containerID. Handle ADD calls for workload container.\n\tinfraEpName, _ := ConstructEndpointID(epInfo.ContainerID, epInfo.NetNsPath, epInfo.IfName)\n\n\thnsEndpoint := &hcsshim.HNSEndpoint{\n\t\tName: infraEpName,\n\t\tVirtualNetwork: nw.HnsId,\n\t\tDNSSuffix: epInfo.DNS.Suffix,\n\t\tDNSServerList: strings.Join(epInfo.DNS.Servers, \",\"),\n\t\tPolicies: policy.SerializePolicies(policy.EndpointPolicy, epInfo.Policies),\n\t}\n\n\t\/\/ HNS currently supports only one IP address per endpoint.\n\tif epInfo.IPAddresses != nil {\n\t\thnsEndpoint.IPAddress = epInfo.IPAddresses[0].IP\n\t\tpl, _ := epInfo.IPAddresses[0].Mask.Size()\n\t\thnsEndpoint.PrefixLength = uint8(pl)\n\t}\n\n\t\/\/ Marshal the request.\n\tbuffer, err := json.Marshal(hnsEndpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thnsRequest := string(buffer)\n\n\t\/\/ Create the HNS endpoint.\n\tlog.Printf(\"[net] HNSEndpointRequest POST request:%+v\", hnsRequest)\n\thnsResponse, err := hcsshim.HNSEndpointRequest(\"POST\", \"\", hnsRequest)\n\tlog.Printf(\"[net] HNSEndpointRequest POST response:%+v err:%v.\", hnsResponse, err)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Attach the endpoint.\n\tlog.Printf(\"[net] Attaching endpoint %v to container %v.\", hnsResponse.Id, epInfo.ContainerID)\n\terr = hcsshim.HotAttachEndpoint(epInfo.ContainerID, hnsResponse.Id)\n\tif err != nil {\n\t\tlog.Printf(\"[net] Failed to attach endpoint: %v.\", err)\n\t}\n\n\t\/\/ Create the endpoint object.\n\tep := &endpoint{\n\t\tId: infraEpName,\n\t\tHnsId: hnsResponse.Id,\n\t\tSandboxKey: epInfo.ContainerID,\n\t\tIfName: epInfo.IfName,\n\t\tIPAddresses: epInfo.IPAddresses,\n\t\tGateways: []net.IP{net.ParseIP(hnsResponse.GatewayAddress)},\n\t\tDNS: epInfo.DNS,\n\t}\n\n\tfor _, route := range epInfo.Routes {\n\t\tep.Routes = append(ep.Routes, route)\n\t}\n\n\tep.MacAddress, _ = net.ParseMAC(hnsResponse.MacAddress)\n\n\treturn ep, nil\n}\n\n\/\/ deleteEndpointImpl deletes an existing endpoint from the network.\nfunc (nw *network) deleteEndpointImpl(ep *endpoint) error {\n\t\/\/ Delete the HNS endpoint.\n\tlog.Printf(\"[net] HNSEndpointRequest DELETE id:%v\", ep.HnsId)\n\thnsResponse, err := hcsshim.HNSEndpointRequest(\"DELETE\", ep.HnsId, \"\")\n\tlog.Printf(\"[net] HNSEndpointRequest DELETE response:%+v err:%v.\", hnsResponse, err)\n\n\treturn err\n}\n\n\/\/ getInfoImpl returns information about the endpoint.\nfunc (ep *endpoint) getInfoImpl(epInfo *EndpointInfo) {\n\tepInfo.Data[\"hnsid\"] = ep.HnsId\n}\n<commit_msg>handle empty string (#213)<commit_after>\/\/ Copyright 2017 Microsoft. All rights reserved.\n\/\/ MIT License\n\n\/\/ +build windows\n\npackage network\n\nimport (\n\t\"encoding\/json\"\n\t\"net\"\n\t\"strings\"\n\n\t\"github.com\/Azure\/azure-container-networking\/log\"\n\t\"github.com\/Azure\/azure-container-networking\/network\/policy\"\n\t\"github.com\/Microsoft\/hcsshim\"\n)\n\n\/\/ HotAttachEndpoint is a wrapper of hcsshim's HotAttachEndpoint.\nfunc (endpoint *EndpointInfo) HotAttachEndpoint(containerID string) error {\n\treturn hcsshim.HotAttachEndpoint(containerID, endpoint.Id)\n}\n\n\/\/ ConstructEndpointID constructs endpoint name from netNsPath.\nfunc ConstructEndpointID(containerID string, netNsPath string, ifName string) (string, string) {\n\tif len(containerID) > 8 {\n\t\tcontainerID = containerID[:8]\n\t}\n\n\tinfraEpName, workloadEpName := \"\", \"\"\n\n\tsplits := strings.Split(netNsPath, \":\")\n\tif len(splits) == 2 {\n\t\t\/\/ For workload containers, we extract its linking infrastructure container ID.\n\t\tif len(splits[1]) > 8 {\n\t\t\tsplits[1] = splits[1][:8]\n\t\t}\n\t\tinfraEpName = splits[1] + \"-\" + ifName\n\t\tworkloadEpName = containerID + \"-\" + ifName\n\t} else {\n\t\t\/\/ For infrastructure containers, we use its container ID directly.\n\t\tinfraEpName = containerID + \"-\" + ifName\n\t}\n\n\treturn infraEpName, workloadEpName\n}\n\n\/\/ newEndpointImpl creates a new endpoint in the network.\nfunc (nw *network) newEndpointImpl(epInfo *EndpointInfo) (*endpoint, error) {\n\t\/\/ Get Infrastructure containerID. Handle ADD calls for workload container.\n\tinfraEpName, _ := ConstructEndpointID(epInfo.ContainerID, epInfo.NetNsPath, epInfo.IfName)\n\n\thnsEndpoint := &hcsshim.HNSEndpoint{\n\t\tName: infraEpName,\n\t\tVirtualNetwork: nw.HnsId,\n\t\tDNSSuffix: epInfo.DNS.Suffix,\n\t\tDNSServerList: strings.Join(epInfo.DNS.Servers, \",\"),\n\t\tPolicies: policy.SerializePolicies(policy.EndpointPolicy, epInfo.Policies),\n\t}\n\n\t\/\/ HNS currently supports only one IP address per endpoint.\n\tif epInfo.IPAddresses != nil {\n\t\thnsEndpoint.IPAddress = epInfo.IPAddresses[0].IP\n\t\tpl, _ := epInfo.IPAddresses[0].Mask.Size()\n\t\thnsEndpoint.PrefixLength = uint8(pl)\n\t}\n\n\t\/\/ Marshal the request.\n\tbuffer, err := json.Marshal(hnsEndpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thnsRequest := string(buffer)\n\n\t\/\/ Create the HNS endpoint.\n\tlog.Printf(\"[net] HNSEndpointRequest POST request:%+v\", hnsRequest)\n\thnsResponse, err := hcsshim.HNSEndpointRequest(\"POST\", \"\", hnsRequest)\n\tlog.Printf(\"[net] HNSEndpointRequest POST response:%+v err:%v.\", hnsResponse, err)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Attach the endpoint.\n\tlog.Printf(\"[net] Attaching endpoint %v to container %v.\", hnsResponse.Id, epInfo.ContainerID)\n\terr = hcsshim.HotAttachEndpoint(epInfo.ContainerID, hnsResponse.Id)\n\tif err != nil {\n\t\tlog.Printf(\"[net] Failed to attach endpoint: %v.\", err)\n\t}\n\n\t\/\/ Create the endpoint object.\n\tep := &endpoint{\n\t\tId: infraEpName,\n\t\tHnsId: hnsResponse.Id,\n\t\tSandboxKey: epInfo.ContainerID,\n\t\tIfName: epInfo.IfName,\n\t\tIPAddresses: epInfo.IPAddresses,\n\t\tGateways: []net.IP{net.ParseIP(hnsResponse.GatewayAddress)},\n\t\tDNS: epInfo.DNS,\n\t}\n\n\tfor _, route := range epInfo.Routes {\n\t\tep.Routes = append(ep.Routes, route)\n\t}\n\n\tep.MacAddress, _ = net.ParseMAC(hnsResponse.MacAddress)\n\n\treturn ep, nil\n}\n\n\/\/ deleteEndpointImpl deletes an existing endpoint from the network.\nfunc (nw *network) deleteEndpointImpl(ep *endpoint) error {\n\t\/\/ Delete the HNS endpoint.\n\tlog.Printf(\"[net] HNSEndpointRequest DELETE id:%v\", ep.HnsId)\n\thnsResponse, err := hcsshim.HNSEndpointRequest(\"DELETE\", ep.HnsId, \"\")\n\tlog.Printf(\"[net] HNSEndpointRequest DELETE response:%+v err:%v.\", hnsResponse, err)\n\n\treturn err\n}\n\n\/\/ getInfoImpl returns information about the endpoint.\nfunc (ep *endpoint) getInfoImpl(epInfo *EndpointInfo) {\n\tepInfo.Data[\"hnsid\"] = ep.HnsId\n}\n<|endoftext|>"} {"text":"<commit_before>package s3vfs\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\tpathpkg \"path\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"code.google.com\/p\/go.tools\/godoc\/vfs\"\n\n\t\"github.com\/sourcegraph\/rwvfs\"\n\t\"github.com\/sqs\/s3\"\n\t\"github.com\/sqs\/s3\/s3util\"\n)\n\nvar DefaultS3Config = s3util.Config{\n\tKeys: &s3.Keys{\n\t\tAccessKey: os.Getenv(\"AWS_ACCESS_KEY_ID\"),\n\t\tSecretKey: os.Getenv(\"AWS_SECRET_KEY\"),\n\t},\n\tService: s3.DefaultService,\n}\n\n\/\/ S3 returns an implementation of FileSystem using the specified S3 bucket and\n\/\/ config. If config is nil, DefaultS3Config is used.\n\/\/\n\/\/ The bucket URL is the full URL to the bucket on Amazon S3, including the\n\/\/ bucket name and AWS region (e.g.,\n\/\/ https:\/\/s3-us-west-2.amazonaws.com\/mybucket).\nfunc S3(bucket *url.URL, config *s3util.Config) rwvfs.FileSystem {\n\tif config == nil {\n\t\tconfig = &DefaultS3Config\n\t}\n\treturn &S3FS{bucket, config}\n}\n\ntype S3FS struct {\n\tbucket *url.URL\n\tconfig *s3util.Config\n}\n\nfunc (fs *S3FS) String() string {\n\treturn fmt.Sprintf(\"S3 filesystem at %s\", fs.bucket)\n}\n\nfunc (fs *S3FS) url(path string) string {\n\tpath = pathpkg.Join(fs.bucket.Path, path)\n\treturn fs.bucket.ResolveReference(&url.URL{Path: path}).String()\n}\n\nfunc (fs *S3FS) Open(name string) (vfs.ReadSeekCloser, error) {\n\trdr, err := s3util.Open(fs.url(name), fs.config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb, err := ioutil.ReadAll(rdr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nopCloser{bytes.NewReader(b)}, nil\n}\n\nfunc (fs *S3FS) ReadDir(path string) ([]os.FileInfo, error) {\n\tdir, err := s3util.NewFile(fs.url(path), fs.config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfis, err := dir.Readdir(0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i, fi := range fis {\n\t\tfis[i] = &fileInfo{\n\t\t\tname: pathpkg.Base(fi.Name()),\n\t\t\tsize: fi.Size(),\n\t\t\tmode: fi.Mode(),\n\t\t\tmodTime: fi.ModTime(),\n\t\t\tsys: fi.Sys(),\n\t\t}\n\t}\n\treturn fis, nil\n}\n\nfunc (fs *S3FS) Lstat(name string) (os.FileInfo, error) {\n\tname = filepath.Clean(name)\n\n\tif name == \".\" {\n\t\treturn &fileInfo{\n\t\t\tname: \".\",\n\t\t\tsize: 0,\n\t\t\tmode: os.ModeDir,\n\t\t\tmodTime: time.Time{},\n\t\t}, nil\n\t}\n\n\tfis, err := fs.ReadDir(pathpkg.Dir(name))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, fi := range fis {\n\t\tif fi.Name() == pathpkg.Base(name) {\n\t\t\treturn fi, nil\n\t\t}\n\t}\n\treturn nil, os.ErrNotExist\n}\n\nfunc (fs *S3FS) Stat(name string) (os.FileInfo, error) {\n\treturn fs.Lstat(name)\n}\n\n\/\/ Create opens the file at path for writing, creating the file if it doesn't\n\/\/ exist and truncating it otherwise.\nfunc (fs *S3FS) Create(path string) (io.WriteCloser, error) {\n\treturn s3util.Create(fs.url(path), nil, fs.config)\n}\n\nfunc (fs *S3FS) Mkdir(name string) error {\n\t\/\/ S3 doesn't have directories.\n\treturn nil\n}\n\nfunc (fs *S3FS) Remove(name string) error {\n\trdr, err := s3util.Delete(fs.url(name), fs.config)\n\tif rdr != nil {\n\t\trdr.Close()\n\t}\n\treturn err\n}\n\ntype nopCloser struct {\n\tio.ReadSeeker\n}\n\nfunc (nc nopCloser) Close() error { return nil }\n\ntype fileInfo struct {\n\tname string\n\tsize int64\n\tmode os.FileMode\n\tmodTime time.Time\n\tsys interface{}\n}\n\nfunc (f *fileInfo) Name() string { return f.name }\nfunc (f *fileInfo) Size() int64 { return f.size }\nfunc (f *fileInfo) Mode() os.FileMode { return f.mode }\nfunc (f *fileInfo) ModTime() time.Time { return f.modTime }\nfunc (f *fileInfo) IsDir() bool { return f.mode&os.ModeDir != 0 }\nfunc (f *fileInfo) Sys() interface{} { return f.sys }\n<commit_msg>use new golang.org\/x\/... subrepo naming (see https:\/\/docs.google.com\/document\/d\/1VdB_ecg6wTBC-iRzAT-eCwvzj0rlRw7iL5KRdzGtlu4\/edit)<commit_after>package s3vfs\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\tpathpkg \"path\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"golang.org\/x\/tools\/godoc\/vfs\"\n\n\t\"github.com\/sourcegraph\/rwvfs\"\n\t\"github.com\/sqs\/s3\"\n\t\"github.com\/sqs\/s3\/s3util\"\n)\n\nvar DefaultS3Config = s3util.Config{\n\tKeys: &s3.Keys{\n\t\tAccessKey: os.Getenv(\"AWS_ACCESS_KEY_ID\"),\n\t\tSecretKey: os.Getenv(\"AWS_SECRET_KEY\"),\n\t},\n\tService: s3.DefaultService,\n}\n\n\/\/ S3 returns an implementation of FileSystem using the specified S3 bucket and\n\/\/ config. If config is nil, DefaultS3Config is used.\n\/\/\n\/\/ The bucket URL is the full URL to the bucket on Amazon S3, including the\n\/\/ bucket name and AWS region (e.g.,\n\/\/ https:\/\/s3-us-west-2.amazonaws.com\/mybucket).\nfunc S3(bucket *url.URL, config *s3util.Config) rwvfs.FileSystem {\n\tif config == nil {\n\t\tconfig = &DefaultS3Config\n\t}\n\treturn &S3FS{bucket, config}\n}\n\ntype S3FS struct {\n\tbucket *url.URL\n\tconfig *s3util.Config\n}\n\nfunc (fs *S3FS) String() string {\n\treturn fmt.Sprintf(\"S3 filesystem at %s\", fs.bucket)\n}\n\nfunc (fs *S3FS) url(path string) string {\n\tpath = pathpkg.Join(fs.bucket.Path, path)\n\treturn fs.bucket.ResolveReference(&url.URL{Path: path}).String()\n}\n\nfunc (fs *S3FS) Open(name string) (vfs.ReadSeekCloser, error) {\n\trdr, err := s3util.Open(fs.url(name), fs.config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb, err := ioutil.ReadAll(rdr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nopCloser{bytes.NewReader(b)}, nil\n}\n\nfunc (fs *S3FS) ReadDir(path string) ([]os.FileInfo, error) {\n\tdir, err := s3util.NewFile(fs.url(path), fs.config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfis, err := dir.Readdir(0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i, fi := range fis {\n\t\tfis[i] = &fileInfo{\n\t\t\tname: pathpkg.Base(fi.Name()),\n\t\t\tsize: fi.Size(),\n\t\t\tmode: fi.Mode(),\n\t\t\tmodTime: fi.ModTime(),\n\t\t\tsys: fi.Sys(),\n\t\t}\n\t}\n\treturn fis, nil\n}\n\nfunc (fs *S3FS) Lstat(name string) (os.FileInfo, error) {\n\tname = filepath.Clean(name)\n\n\tif name == \".\" {\n\t\treturn &fileInfo{\n\t\t\tname: \".\",\n\t\t\tsize: 0,\n\t\t\tmode: os.ModeDir,\n\t\t\tmodTime: time.Time{},\n\t\t}, nil\n\t}\n\n\tfis, err := fs.ReadDir(pathpkg.Dir(name))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, fi := range fis {\n\t\tif fi.Name() == pathpkg.Base(name) {\n\t\t\treturn fi, nil\n\t\t}\n\t}\n\treturn nil, os.ErrNotExist\n}\n\nfunc (fs *S3FS) Stat(name string) (os.FileInfo, error) {\n\treturn fs.Lstat(name)\n}\n\n\/\/ Create opens the file at path for writing, creating the file if it doesn't\n\/\/ exist and truncating it otherwise.\nfunc (fs *S3FS) Create(path string) (io.WriteCloser, error) {\n\treturn s3util.Create(fs.url(path), nil, fs.config)\n}\n\nfunc (fs *S3FS) Mkdir(name string) error {\n\t\/\/ S3 doesn't have directories.\n\treturn nil\n}\n\nfunc (fs *S3FS) Remove(name string) error {\n\trdr, err := s3util.Delete(fs.url(name), fs.config)\n\tif rdr != nil {\n\t\trdr.Close()\n\t}\n\treturn err\n}\n\ntype nopCloser struct {\n\tio.ReadSeeker\n}\n\nfunc (nc nopCloser) Close() error { return nil }\n\ntype fileInfo struct {\n\tname string\n\tsize int64\n\tmode os.FileMode\n\tmodTime time.Time\n\tsys interface{}\n}\n\nfunc (f *fileInfo) Name() string { return f.name }\nfunc (f *fileInfo) Size() int64 { return f.size }\nfunc (f *fileInfo) Mode() os.FileMode { return f.mode }\nfunc (f *fileInfo) ModTime() time.Time { return f.modTime }\nfunc (f *fileInfo) IsDir() bool { return f.mode&os.ModeDir != 0 }\nfunc (f *fileInfo) Sys() interface{} { return f.sys }\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage clustermesh\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com\/cilium\/cilium\/api\/v1\/models\"\n\t\"github.com\/cilium\/cilium\/pkg\/allocator\"\n\t\"github.com\/cilium\/cilium\/pkg\/controller\"\n\t\"github.com\/cilium\/cilium\/pkg\/ipcache\"\n\t\"github.com\/cilium\/cilium\/pkg\/kvstore\"\n\t\"github.com\/cilium\/cilium\/pkg\/kvstore\/store\"\n\t\"github.com\/cilium\/cilium\/pkg\/lock\"\n\tnodeStore \"github.com\/cilium\/cilium\/pkg\/node\/store\"\n\tserviceStore \"github.com\/cilium\/cilium\/pkg\/service\/store\"\n\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ remoteCluster represents another cluster other than the cluster the agent is\n\/\/ running in\ntype remoteCluster struct {\n\t\/\/ name is the name of the cluster\n\tname string\n\n\t\/\/ configPath is the path to the etcd configuration to be used to\n\t\/\/ connect to the etcd cluster of the remote cluster\n\tconfigPath string\n\n\t\/\/ changed receives an event when the remote cluster configuration has\n\t\/\/ changed and is closed when the configuration file was removed\n\tchanged chan bool\n\n\t\/\/ mesh is the cluster mesh this remote cluster belongs to\n\tmesh *ClusterMesh\n\n\tcontrollers *controller.Manager\n\n\t\/\/ remoteConnectionControllerName is the name of the backing controller\n\t\/\/ that maintains the remote connection\n\tremoteConnectionControllerName string\n\n\t\/\/ mutex protects the following variables\n\t\/\/ - backend\n\t\/\/ - store\n\t\/\/ - remoteNodes\n\t\/\/ - ipCacheWatcher\n\t\/\/ - remoteIdentityCache\n\tmutex lock.RWMutex\n\n\t\/\/ store is the shared store representing all nodes in the remote cluster\n\tremoteNodes *store.SharedStore\n\n\t\/\/ remoteServices is the shared store representing services in remote\n\t\/\/ clusters\n\tremoteServices *store.SharedStore\n\n\t\/\/ ipCacheWatcher is the watcher that notifies about IP<->identity\n\t\/\/ changes in the remote cluster\n\tipCacheWatcher *ipcache.IPIdentityWatcher\n\n\t\/\/ remoteIdentityCache is a locally cached copy of the identity\n\t\/\/ allocations in the remote cluster\n\tremoteIdentityCache *allocator.RemoteCache\n\n\t\/\/ backend is the kvstore backend being used\n\tbackend kvstore.BackendOperations\n\n\tswg *lock.StoppableWaitGroup\n}\n\nvar (\n\t\/\/ skipKvstoreConnection skips the etcd connection, used for testing\n\tskipKvstoreConnection bool\n)\n\nfunc (rc *remoteCluster) getLogger() *logrus.Entry {\n\tvar (\n\t\tstatus string\n\t\terr error\n\t)\n\n\tif rc.backend != nil {\n\t\tstatus, err = rc.backend.Status()\n\t}\n\n\treturn log.WithFields(logrus.Fields{\n\t\tfieldClusterName: rc.name,\n\t\tfieldConfig: rc.configPath,\n\t\tfieldKVStoreStatus: status,\n\t\tfieldKVStoreErr: err,\n\t})\n}\n\n\/\/ releaseOldConnection releases the etcd connection to a remote cluster. Must\n\/\/ be called with rc.mutex held for writing.\nfunc (rc *remoteCluster) releaseOldConnection() {\n\tif rc.ipCacheWatcher != nil {\n\t\trc.ipCacheWatcher.Close()\n\t\trc.ipCacheWatcher = nil\n\t}\n\n\tif rc.remoteNodes != nil {\n\t\trc.remoteNodes.Close(context.TODO())\n\t\trc.remoteNodes = nil\n\t}\n\tif rc.remoteIdentityCache != nil {\n\t\trc.remoteIdentityCache.Close()\n\t\trc.remoteIdentityCache = nil\n\t}\n\tif rc.remoteServices != nil {\n\t\trc.remoteServices.Close(context.TODO())\n\t\trc.remoteServices = nil\n\t}\n\tif rc.backend != nil {\n\t\trc.backend.Close()\n\t\trc.backend = nil\n\t}\n}\n\nfunc (rc *remoteCluster) restartRemoteConnection(allocator RemoteIdentityWatcher) {\n\trc.controllers.UpdateController(rc.remoteConnectionControllerName,\n\t\tcontroller.ControllerParams{\n\t\t\tDoFunc: func(ctx context.Context) error {\n\t\t\t\trc.mutex.Lock()\n\t\t\t\tif rc.backend != nil {\n\t\t\t\t\trc.releaseOldConnection()\n\t\t\t\t}\n\t\t\t\trc.mutex.Unlock()\n\n\t\t\t\tbackend, errChan := kvstore.NewClient(context.TODO(), kvstore.EtcdBackendName,\n\t\t\t\t\tmap[string]string{\n\t\t\t\t\t\tkvstore.EtcdOptionConfig: rc.configPath,\n\t\t\t\t\t},\n\t\t\t\t\t&kvstore.ExtraOptions{NoLockQuorumCheck: true})\n\n\t\t\t\t\/\/ Block until either an error is returned or\n\t\t\t\t\/\/ the channel is closed due to success of the\n\t\t\t\t\/\/ connection\n\t\t\t\trc.getLogger().Debugf(\"Waiting for connection to be established\")\n\t\t\t\terr, isErr := <-errChan\n\t\t\t\tif isErr {\n\t\t\t\t\tif backend != nil {\n\t\t\t\t\t\tbackend.Close()\n\t\t\t\t\t}\n\t\t\t\t\trc.getLogger().WithError(err).Warning(\"Unable to establish etcd connection to remote cluster\")\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\trc.getLogger().Info(\"Connection to remote cluster established\")\n\n\t\t\t\tremoteNodes, err := store.JoinSharedStore(store.Configuration{\n\t\t\t\t\tPrefix: path.Join(nodeStore.NodeStorePrefix, rc.name),\n\t\t\t\t\tKeyCreator: rc.mesh.conf.NodeKeyCreator,\n\t\t\t\t\tSynchronizationInterval: time.Minute,\n\t\t\t\t\tBackend: backend,\n\t\t\t\t\tObserver: rc.mesh.conf.NodeObserver(),\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tbackend.Close()\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tremoteServices, err := store.JoinSharedStore(store.Configuration{\n\t\t\t\t\tPrefix: path.Join(serviceStore.ServiceStorePrefix, rc.name),\n\t\t\t\t\tKeyCreator: func() store.Key {\n\t\t\t\t\t\tsvc := serviceStore.ClusterService{}\n\t\t\t\t\t\treturn &svc\n\t\t\t\t\t},\n\t\t\t\t\tSynchronizationInterval: time.Minute,\n\t\t\t\t\tBackend: backend,\n\t\t\t\t\tObserver: &remoteServiceObserver{\n\t\t\t\t\t\tremoteCluster: rc,\n\t\t\t\t\t\tswg: rc.swg,\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tremoteNodes.Close(context.TODO())\n\t\t\t\t\tbackend.Close()\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\trc.swg.Stop()\n\n\t\t\t\tremoteIdentityCache, err := allocator.WatchRemoteIdentities(backend)\n\t\t\t\tif err != nil {\n\t\t\t\t\tremoteServices.Close(context.TODO())\n\t\t\t\t\tremoteNodes.Close(context.TODO())\n\t\t\t\t\tbackend.Close()\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tipCacheWatcher := ipcache.NewIPIdentityWatcher(backend)\n\t\t\t\tgo ipCacheWatcher.Watch(ctx)\n\n\t\t\t\trc.mutex.Lock()\n\t\t\t\trc.remoteNodes = remoteNodes\n\t\t\t\trc.remoteServices = remoteServices\n\t\t\t\trc.backend = backend\n\t\t\t\trc.ipCacheWatcher = ipCacheWatcher\n\t\t\t\trc.remoteIdentityCache = remoteIdentityCache\n\t\t\t\trc.mutex.Unlock()\n\n\t\t\t\trc.getLogger().Info(\"Established connection to remote etcd\")\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\tStopFunc: func(ctx context.Context) error {\n\t\t\t\trc.mutex.Lock()\n\t\t\t\trc.releaseOldConnection()\n\t\t\t\trc.mutex.Unlock()\n\n\t\t\t\trc.getLogger().Info(\"All resources of remote cluster cleaned up\")\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t)\n}\n\nfunc (rc *remoteCluster) onInsert(allocator RemoteIdentityWatcher) {\n\trc.getLogger().Info(\"New remote cluster configuration\")\n\n\tif skipKvstoreConnection {\n\t\treturn\n\t}\n\n\trc.remoteConnectionControllerName = fmt.Sprintf(\"remote-etcd-%s\", rc.name)\n\trc.restartRemoteConnection(allocator)\n\n\tgo func() {\n\t\tfor {\n\t\t\tval := <-rc.changed\n\t\t\tif val {\n\t\t\t\trc.getLogger().Info(\"etcd configuration has changed, re-creating connection\")\n\t\t\t\trc.restartRemoteConnection(allocator)\n\t\t\t} else {\n\t\t\t\trc.getLogger().Info(\"Closing connection to remote etcd\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\t\/\/ terminate routine when remote cluster is removed\n\t\t\tcase _, ok := <-rc.changed:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t}\n\n\t\t\t\/\/ wait for backend to appear\n\t\t\trc.mutex.RLock()\n\t\t\tif rc.backend == nil {\n\t\t\t\trc.mutex.RUnlock()\n\t\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstatusCheckErrors := rc.backend.StatusCheckErrors()\n\t\t\trc.mutex.RUnlock()\n\n\t\t\terr, ok := <-statusCheckErrors\n\t\t\tif ok && err != nil {\n\t\t\t\trc.getLogger().WithError(err).Warning(\"Error observed on etcd connection, reconnecting etcd\")\n\t\t\t\trc.restartRemoteConnection(allocator)\n\t\t\t}\n\t\t}\n\t}()\n\n}\n\nfunc (rc *remoteCluster) onRemove() {\n\trc.controllers.RemoveAllAndWait()\n\tclose(rc.changed)\n\n\trc.getLogger().Info(\"Remote cluster disconnected\")\n}\n\nfunc (rc *remoteCluster) isReady() bool {\n\trc.mutex.RLock()\n\tdefer rc.mutex.RUnlock()\n\n\treturn rc.isReadyLocked()\n}\n\nfunc (rc *remoteCluster) isReadyLocked() bool {\n\treturn rc.backend != nil && rc.remoteNodes != nil && rc.ipCacheWatcher != nil\n}\n\nfunc (rc *remoteCluster) status() *models.RemoteCluster {\n\trc.mutex.RLock()\n\tdefer rc.mutex.RUnlock()\n\n\t\/\/ This can happen when the controller in restartRemoteConnection is waiting\n\t\/\/ for the first connection to succeed.\n\tvar backendStatus = \"Waiting for initial connection to be established\"\n\tif rc.backend != nil {\n\t\tvar backendError error\n\t\tbackendStatus, backendError = rc.backend.Status()\n\t\tif backendError != nil {\n\t\t\tbackendStatus = backendError.Error()\n\t\t}\n\t}\n\n\treturn &models.RemoteCluster{\n\t\tName: rc.name,\n\t\tReady: rc.isReadyLocked(),\n\t\tNumNodes: int64(rc.remoteNodes.NumEntries()),\n\t\tNumSharedServices: int64(rc.remoteServices.NumEntries()),\n\t\tNumIdentities: int64(rc.remoteIdentityCache.NumEntries()),\n\t\tStatus: backendStatus,\n\t}\n}\n<commit_msg>clustermesh: Release old connection asynchronously<commit_after>\/\/ Copyright 2018 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage clustermesh\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"path\"\n\t\"time\"\n\n\t\"github.com\/cilium\/cilium\/api\/v1\/models\"\n\t\"github.com\/cilium\/cilium\/pkg\/allocator\"\n\t\"github.com\/cilium\/cilium\/pkg\/controller\"\n\t\"github.com\/cilium\/cilium\/pkg\/ipcache\"\n\t\"github.com\/cilium\/cilium\/pkg\/kvstore\"\n\t\"github.com\/cilium\/cilium\/pkg\/kvstore\/store\"\n\t\"github.com\/cilium\/cilium\/pkg\/lock\"\n\tnodeStore \"github.com\/cilium\/cilium\/pkg\/node\/store\"\n\tserviceStore \"github.com\/cilium\/cilium\/pkg\/service\/store\"\n\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ remoteCluster represents another cluster other than the cluster the agent is\n\/\/ running in\ntype remoteCluster struct {\n\t\/\/ name is the name of the cluster\n\tname string\n\n\t\/\/ configPath is the path to the etcd configuration to be used to\n\t\/\/ connect to the etcd cluster of the remote cluster\n\tconfigPath string\n\n\t\/\/ changed receives an event when the remote cluster configuration has\n\t\/\/ changed and is closed when the configuration file was removed\n\tchanged chan bool\n\n\t\/\/ mesh is the cluster mesh this remote cluster belongs to\n\tmesh *ClusterMesh\n\n\tcontrollers *controller.Manager\n\n\t\/\/ remoteConnectionControllerName is the name of the backing controller\n\t\/\/ that maintains the remote connection\n\tremoteConnectionControllerName string\n\n\t\/\/ mutex protects the following variables\n\t\/\/ - backend\n\t\/\/ - store\n\t\/\/ - remoteNodes\n\t\/\/ - ipCacheWatcher\n\t\/\/ - remoteIdentityCache\n\tmutex lock.RWMutex\n\n\t\/\/ store is the shared store representing all nodes in the remote cluster\n\tremoteNodes *store.SharedStore\n\n\t\/\/ remoteServices is the shared store representing services in remote\n\t\/\/ clusters\n\tremoteServices *store.SharedStore\n\n\t\/\/ ipCacheWatcher is the watcher that notifies about IP<->identity\n\t\/\/ changes in the remote cluster\n\tipCacheWatcher *ipcache.IPIdentityWatcher\n\n\t\/\/ remoteIdentityCache is a locally cached copy of the identity\n\t\/\/ allocations in the remote cluster\n\tremoteIdentityCache *allocator.RemoteCache\n\n\t\/\/ backend is the kvstore backend being used\n\tbackend kvstore.BackendOperations\n\n\tswg *lock.StoppableWaitGroup\n}\n\nvar (\n\t\/\/ skipKvstoreConnection skips the etcd connection, used for testing\n\tskipKvstoreConnection bool\n)\n\nfunc (rc *remoteCluster) getLogger() *logrus.Entry {\n\tvar (\n\t\tstatus string\n\t\terr error\n\t)\n\n\tif rc.backend != nil {\n\t\tstatus, err = rc.backend.Status()\n\t}\n\n\treturn log.WithFields(logrus.Fields{\n\t\tfieldClusterName: rc.name,\n\t\tfieldConfig: rc.configPath,\n\t\tfieldKVStoreStatus: status,\n\t\tfieldKVStoreErr: err,\n\t})\n}\n\n\/\/ releaseOldConnection releases the etcd connection to a remote cluster\nfunc (rc *remoteCluster) releaseOldConnection() {\n\trc.mutex.Lock()\n\tipCacheWatcher := rc.ipCacheWatcher\n\trc.ipCacheWatcher = nil\n\n\tremoteNodes := rc.remoteNodes\n\trc.remoteNodes = nil\n\n\tremoteIdentityCache := rc.remoteIdentityCache\n\trc.remoteIdentityCache = nil\n\n\tremoteServices := rc.remoteServices\n\trc.remoteServices = nil\n\n\tbackend := rc.backend\n\trc.backend = nil\n\trc.mutex.Unlock()\n\n\t\/\/ Release resources asynchroneously in the background. Many of these\n\t\/\/ operations may time out if the connection was closed due to an error\n\t\/\/ condition.\n\tgo func() {\n\t\tif ipCacheWatcher != nil {\n\t\t\tipCacheWatcher.Close()\n\t\t}\n\t\tif remoteNodes != nil {\n\t\t\tremoteNodes.Close(context.TODO())\n\t\t}\n\t\tif remoteIdentityCache != nil {\n\t\t\tremoteIdentityCache.Close()\n\t\t}\n\t\tif remoteServices != nil {\n\t\t\tremoteServices.Close(context.TODO())\n\t\t}\n\t\tif backend != nil {\n\t\t\tbackend.Close()\n\t\t}\n\t}()\n}\n\nfunc (rc *remoteCluster) restartRemoteConnection(allocator RemoteIdentityWatcher) {\n\trc.controllers.UpdateController(rc.remoteConnectionControllerName,\n\t\tcontroller.ControllerParams{\n\t\t\tDoFunc: func(ctx context.Context) error {\n\t\t\t\trc.releaseOldConnection()\n\n\t\t\t\tbackend, errChan := kvstore.NewClient(context.TODO(), kvstore.EtcdBackendName,\n\t\t\t\t\tmap[string]string{\n\t\t\t\t\t\tkvstore.EtcdOptionConfig: rc.configPath,\n\t\t\t\t\t},\n\t\t\t\t\t&kvstore.ExtraOptions{NoLockQuorumCheck: true})\n\n\t\t\t\t\/\/ Block until either an error is returned or\n\t\t\t\t\/\/ the channel is closed due to success of the\n\t\t\t\t\/\/ connection\n\t\t\t\trc.getLogger().Debugf(\"Waiting for connection to be established\")\n\t\t\t\terr, isErr := <-errChan\n\t\t\t\tif isErr {\n\t\t\t\t\tif backend != nil {\n\t\t\t\t\t\tbackend.Close()\n\t\t\t\t\t}\n\t\t\t\t\trc.getLogger().WithError(err).Warning(\"Unable to establish etcd connection to remote cluster\")\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\trc.getLogger().Info(\"Connection to remote cluster established\")\n\n\t\t\t\tremoteNodes, err := store.JoinSharedStore(store.Configuration{\n\t\t\t\t\tPrefix: path.Join(nodeStore.NodeStorePrefix, rc.name),\n\t\t\t\t\tKeyCreator: rc.mesh.conf.NodeKeyCreator,\n\t\t\t\t\tSynchronizationInterval: time.Minute,\n\t\t\t\t\tBackend: backend,\n\t\t\t\t\tObserver: rc.mesh.conf.NodeObserver(),\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tbackend.Close()\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tremoteServices, err := store.JoinSharedStore(store.Configuration{\n\t\t\t\t\tPrefix: path.Join(serviceStore.ServiceStorePrefix, rc.name),\n\t\t\t\t\tKeyCreator: func() store.Key {\n\t\t\t\t\t\tsvc := serviceStore.ClusterService{}\n\t\t\t\t\t\treturn &svc\n\t\t\t\t\t},\n\t\t\t\t\tSynchronizationInterval: time.Minute,\n\t\t\t\t\tBackend: backend,\n\t\t\t\t\tObserver: &remoteServiceObserver{\n\t\t\t\t\t\tremoteCluster: rc,\n\t\t\t\t\t\tswg: rc.swg,\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tremoteNodes.Close(context.TODO())\n\t\t\t\t\tbackend.Close()\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\trc.swg.Stop()\n\n\t\t\t\tremoteIdentityCache, err := allocator.WatchRemoteIdentities(backend)\n\t\t\t\tif err != nil {\n\t\t\t\t\tremoteServices.Close(context.TODO())\n\t\t\t\t\tremoteNodes.Close(context.TODO())\n\t\t\t\t\tbackend.Close()\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tipCacheWatcher := ipcache.NewIPIdentityWatcher(backend)\n\t\t\t\tgo ipCacheWatcher.Watch(ctx)\n\n\t\t\t\trc.mutex.Lock()\n\t\t\t\trc.remoteNodes = remoteNodes\n\t\t\t\trc.remoteServices = remoteServices\n\t\t\t\trc.backend = backend\n\t\t\t\trc.ipCacheWatcher = ipCacheWatcher\n\t\t\t\trc.remoteIdentityCache = remoteIdentityCache\n\t\t\t\trc.mutex.Unlock()\n\n\t\t\t\trc.getLogger().Info(\"Established connection to remote etcd\")\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\tStopFunc: func(ctx context.Context) error {\n\t\t\t\trc.releaseOldConnection()\n\t\t\t\trc.getLogger().Info(\"All resources of remote cluster cleaned up\")\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t)\n}\n\nfunc (rc *remoteCluster) onInsert(allocator RemoteIdentityWatcher) {\n\trc.getLogger().Info(\"New remote cluster configuration\")\n\n\tif skipKvstoreConnection {\n\t\treturn\n\t}\n\n\trc.remoteConnectionControllerName = fmt.Sprintf(\"remote-etcd-%s\", rc.name)\n\trc.restartRemoteConnection(allocator)\n\n\tgo func() {\n\t\tfor {\n\t\t\tval := <-rc.changed\n\t\t\tif val {\n\t\t\t\trc.getLogger().Info(\"etcd configuration has changed, re-creating connection\")\n\t\t\t\trc.restartRemoteConnection(allocator)\n\t\t\t} else {\n\t\t\t\trc.getLogger().Info(\"Closing connection to remote etcd\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\t\/\/ terminate routine when remote cluster is removed\n\t\t\tcase _, ok := <-rc.changed:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t}\n\n\t\t\t\/\/ wait for backend to appear\n\t\t\trc.mutex.RLock()\n\t\t\tif rc.backend == nil {\n\t\t\t\trc.mutex.RUnlock()\n\t\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstatusCheckErrors := rc.backend.StatusCheckErrors()\n\t\t\trc.mutex.RUnlock()\n\n\t\t\terr, ok := <-statusCheckErrors\n\t\t\tif ok && err != nil {\n\t\t\t\trc.getLogger().WithError(err).Warning(\"Error observed on etcd connection, reconnecting etcd\")\n\t\t\t\trc.restartRemoteConnection(allocator)\n\t\t\t}\n\t\t}\n\t}()\n\n}\n\nfunc (rc *remoteCluster) onRemove() {\n\trc.controllers.RemoveAllAndWait()\n\tclose(rc.changed)\n\n\trc.getLogger().Info(\"Remote cluster disconnected\")\n}\n\nfunc (rc *remoteCluster) isReady() bool {\n\trc.mutex.RLock()\n\tdefer rc.mutex.RUnlock()\n\n\treturn rc.isReadyLocked()\n}\n\nfunc (rc *remoteCluster) isReadyLocked() bool {\n\treturn rc.backend != nil && rc.remoteNodes != nil && rc.ipCacheWatcher != nil\n}\n\nfunc (rc *remoteCluster) status() *models.RemoteCluster {\n\trc.mutex.RLock()\n\tdefer rc.mutex.RUnlock()\n\n\t\/\/ This can happen when the controller in restartRemoteConnection is waiting\n\t\/\/ for the first connection to succeed.\n\tvar backendStatus = \"Waiting for initial connection to be established\"\n\tif rc.backend != nil {\n\t\tvar backendError error\n\t\tbackendStatus, backendError = rc.backend.Status()\n\t\tif backendError != nil {\n\t\t\tbackendStatus = backendError.Error()\n\t\t}\n\t}\n\n\treturn &models.RemoteCluster{\n\t\tName: rc.name,\n\t\tReady: rc.isReadyLocked(),\n\t\tNumNodes: int64(rc.remoteNodes.NumEntries()),\n\t\tNumSharedServices: int64(rc.remoteServices.NumEntries()),\n\t\tNumIdentities: int64(rc.remoteIdentityCache.NumEntries()),\n\t\tStatus: backendStatus,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 The Nuclio Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage functionconfig\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/nuclio\/logger\"\n\t\"github.com\/nuclio\/zap\"\n\t\"github.com\/stretchr\/testify\/suite\"\n\t\"k8s.io\/api\/core\/v1\"\n)\n\ntype ReaderTestSuite struct {\n\tsuite.Suite\n\tlogger logger.Logger\n\treader *Reader\n}\n\nfunc (suite *ReaderTestSuite) SetupTest() {\n\tsuite.logger, _ = nucliozap.NewNuclioZapTest(\"test\")\n\tsuite.reader, _ = NewReader(suite.logger)\n}\n\nfunc (suite *ReaderTestSuite) TestPartitions() {\n\tconfigData := `\nmetadata:\n name: python handler\nspec:\n runtime: python\n handler: reverser:handler\n triggers:\n http:\n maxWorkers: 4\n kind: http\n franz:\n kind: \"kafka\"\n url: \"127.0.0.1:9092\"\n total_tasks: 2\n max_task_allocation: 3\n partitions:\n - id: \"0\"\n checkpoint: \"7\"\n - id: \"1\"\n attributes:\n topic: trial\n`\n\n\tconfig := Config{}\n\treader, err := NewReader(suite.logger)\n\tsuite.Require().NoError(err, \"Can't create reader\")\n\terr = reader.Read(strings.NewReader(configData), \"processor\", &config)\n\tsuite.Require().NoError(err, \"Can't reader configuration\")\n\n\ttrigger := config.Spec.Triggers[\"franz\"]\n\tsuite.Require().Equal(2, trigger.TotalTasks, \"Bad total_tasks\")\n\tsuite.Require().Equal(3, trigger.MaxTaskAllocation, \"Bad max_task_allocations\")\n\n\tsuite.Require().Equal(2, len(trigger.Partitions), \"Wrong number of partitions\")\n\tfor _, partition := range trigger.Partitions {\n\t\tswitch partition.ID {\n\t\tcase \"0\":\n\t\t\tsuite.Require().Equal(\"7\", *partition.Checkpoint, \"Bad checkpoint\")\n\t\tcase \"1\":\n\t\t\tsuite.Require().Nil(partition.Checkpoint)\n\t\tdefault:\n\t\t\tsuite.Require().Failf(\"Unknown partition ID - %s\", partition.ID)\n\t\t}\n\t}\n}\n\nfunc (suite *ReaderTestSuite) TestCodeEntryConfigCaseInsensitivity() {\n\tconfigData := `\nmetadata:\n name: code_entry_name\n namespace: code_entry_namespace\nspec:\n runtime: python3.6\n handler: code_entry_handler\n targetCpu: 13 # instead of targetCPU to test case insensitivity\n`\n\n\tconfig := Config{\n\t\tMeta: Meta{\n\t\t\tName: \"my_name\",\n\t\t\tNamespace: \"my_namespace\",\n\t\t},\n\t\tSpec: Spec{\n\t\t\tRuntime: \"python2.7\",\n\t\t\tHandler: \"my_handler\",\n\t\t},\n\t}\n\treader, err := NewReader(suite.logger)\n\tsuite.Require().NoError(err, \"Can't create reader\")\n\terr = reader.Read(strings.NewReader(configData), \"processor\", &config)\n\tsuite.Require().NoError(err, \"Can't reader configuration\")\n\n\tsuite.Require().Equal(13, config.Spec.TargetCPU, \"Bad target cpu\")\n}\n\nfunc (suite *ReaderTestSuite) TestCodeEntryConfigDontOverrideConfigValues() {\n\tconfigData := `\nmetadata:\n name: code_entry_name\n namespace: code_entry_namespace\n labels:\n label_key: label_val\nspec:\n runtime: python3.6\n handler: code_entry_handler\n targetCpu: 13\n build:\n commands:\n - pip install code_entry_package\n env:\n - name: env_var\n value: code_entry_env_val\n - name: code_entry_env_var\n value: code_entry_env_val_2\n`\n\n\tconfig := Config{\n\t\tMeta: Meta{\n\t\t\tName: \"my_name\",\n\t\t\tNamespace: \"my_namespace\",\n\t\t\tLabels: map[string]string{}, \/\/ empty map\n\t\t},\n\t\tSpec: Spec{\n\t\t\tRuntime: \"python2.7\",\n\t\t\tHandler: \"my_handler\",\n\t\t\tEnv: []v1.EnvVar{{Name: \"env_var\", Value: \"my_env_val\"}},\n\t\t\tTargetCPU: 51,\n\t\t},\n\t}\n\treader, err := NewReader(suite.logger)\n\tsuite.Require().NoError(err, \"Can't create reader\")\n\terr = reader.Read(strings.NewReader(configData), \"processor\", &config)\n\tsuite.Require().NoError(err, \"Can't reader configuration\")\n\n\tsuite.Require().Equal(\"my_name\", config.Meta.Name, \"Bad name\")\n\tsuite.Require().Equal(\"my_namespace\", config.Meta.Namespace, \"Bad namespace\")\n\n\texpectedEnvVariables := []v1.EnvVar{\n\t\t{Name: \"env_var\", Value: \"my_env_val\"},\n\t\t{Name: \"code_entry_env_var\", Value: \"code_entry_env_val_2\"},\n\t}\n\tsuite.Require().Equal(expectedEnvVariables, config.Spec.Env, \"Bad env vars\")\n\n\tsuite.Require().Equal(\"my_handler\", config.Spec.Handler, \"Bad handler\")\n\tsuite.Require().Equal(\"python2.7\", config.Spec.Runtime, \"Bad runtime\")\n\tsuite.Require().Equal([]string{\"pip install code_entry_package\"}, config.Spec.Build.Commands, \"Bad commands\")\n\tsuite.Require().Equal(map[string]string{\"label_key\": \"label_val\"}, config.Meta.Labels, \"Bad labels\")\n\tsuite.Require().Equal(51, config.Spec.TargetCPU, \"Bad target cpu\")\n}\n\nfunc (suite *ReaderTestSuite) TestToDeployOptions() {\n\tsuite.T().Skip(\"TODO\")\n\t\/\/\tflatConfigurationContents := `\n\t\/\/\n\t\/\/name: function-name\n\t\/\/namespace: function-namespace\n\t\/\/runtime: golang\n\t\/\/handler: some.module:handler\n\t\/\/triggers:\n\t\/\/\n\t\/\/ http:\n\t\/\/ maxWorkers: 4\n\t\/\/ kind: http\n\t\/\/\n\t\/\/ rmq:\n\t\/\/ kind: rabbit-mq\n\t\/\/ url: amqp:\/\/guest:guest@34.224.60.166:5672\n\t\/\/ attributes:\n\t\/\/ exchangeName: functions\n\t\/\/ queueName: functions\n\t\/\/\n\t\/\/dataBindings:\n\t\/\/ db0:\n\t\/\/ class: v3io\n\t\/\/ secret: something\n\t\/\/ url: http:\/\/192.168.51.240:8081\/1024\n\t\/\/\n\t\/\/build:\n\t\/\/ commands:\n\t\/\/ - command1\n\t\/\/ - command2\n\t\/\/ - command3\n\t\/\/ baseImage: someBaseImage\n\t\/\/`\n\n\t\/\/createFunctionOptions := platform.NewDeployOptions(nil)\n\t\/\/\n\t\/\/err := suite.reader.Read(bytes.NewBufferString(flatConfigurationContents), \"yaml\")\n\t\/\/suite.Require().NoError(err)\n\t\/\/\n\t\/\/err = suite.reader.ToDeployOptions(createFunctionOptions)\n\t\/\/suite.Require().NoError(err)\n\t\/\/\n\n\t\/\/ compare.CompareNoOrder(&createFunctionOptions, &createFunctionOptions)\n\t\/\/ TODO\n}\n\nfunc TestRegistryTestSuite(t *testing.T) {\n\tsuite.Run(t, new(ReaderTestSuite))\n}\n<commit_msg>Added external code entry type merger test for http trigger serviceType (#1973)<commit_after>\/*\nCopyright 2017 The Nuclio Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage functionconfig\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/ghodss\/yaml\"\n\t\"github.com\/nuclio\/logger\"\n\t\"github.com\/nuclio\/zap\"\n\t\"github.com\/stretchr\/testify\/suite\"\n\t\"k8s.io\/api\/core\/v1\"\n)\n\ntype ReaderTestSuite struct {\n\tsuite.Suite\n\tlogger logger.Logger\n\treader *Reader\n}\n\nfunc (suite *ReaderTestSuite) SetupTest() {\n\tsuite.logger, _ = nucliozap.NewNuclioZapTest(\"test\")\n\tsuite.reader, _ = NewReader(suite.logger)\n}\n\nfunc (suite *ReaderTestSuite) TestPartitions() {\n\tconfigData := `\nmetadata:\n name: python handler\nspec:\n runtime: python\n handler: reverser:handler\n triggers:\n http:\n maxWorkers: 4\n kind: http\n franz:\n kind: \"kafka\"\n url: \"127.0.0.1:9092\"\n total_tasks: 2\n max_task_allocation: 3\n partitions:\n - id: \"0\"\n checkpoint: \"7\"\n - id: \"1\"\n attributes:\n topic: trial\n`\n\n\tconfig := Config{}\n\treader, err := NewReader(suite.logger)\n\tsuite.Require().NoError(err, \"Can't create reader\")\n\terr = reader.Read(strings.NewReader(configData), \"processor\", &config)\n\tsuite.Require().NoError(err, \"Can't reader configuration\")\n\n\ttrigger := config.Spec.Triggers[\"franz\"]\n\tsuite.Require().Equal(2, trigger.TotalTasks, \"Bad total_tasks\")\n\tsuite.Require().Equal(3, trigger.MaxTaskAllocation, \"Bad max_task_allocations\")\n\n\tsuite.Require().Equal(2, len(trigger.Partitions), \"Wrong number of partitions\")\n\tfor _, partition := range trigger.Partitions {\n\t\tswitch partition.ID {\n\t\tcase \"0\":\n\t\t\tsuite.Require().Equal(\"7\", *partition.Checkpoint, \"Bad checkpoint\")\n\t\tcase \"1\":\n\t\t\tsuite.Require().Nil(partition.Checkpoint)\n\t\tdefault:\n\t\t\tsuite.Require().Failf(\"Unknown partition ID - %s\", partition.ID)\n\t\t}\n\t}\n}\n\nfunc (suite *ReaderTestSuite) TestCodeEntryConfigCaseInsensitivity() {\n\tconfigData := `\nmetadata:\n name: code_entry_name\n namespace: code_entry_namespace\nspec:\n runtime: python3.6\n handler: code_entry_handler\n targetCpu: 13 # instead of targetCPU to test case insensitivity\n`\n\n\tconfig := Config{\n\t\tMeta: Meta{\n\t\t\tName: \"my_name\",\n\t\t\tNamespace: \"my_namespace\",\n\t\t},\n\t\tSpec: Spec{\n\t\t\tRuntime: \"python2.7\",\n\t\t\tHandler: \"my_handler\",\n\t\t},\n\t}\n\treader, err := NewReader(suite.logger)\n\tsuite.Require().NoError(err, \"Can't create reader\")\n\terr = reader.Read(strings.NewReader(configData), \"processor\", &config)\n\tsuite.Require().NoError(err, \"Can't reader configuration\")\n\n\tsuite.Require().Equal(13, config.Spec.TargetCPU, \"Bad target cpu\")\n}\n\nfunc (suite *ReaderTestSuite) TestCodeEntryConfigDontOverrideConfigValues() {\n\tconfigData := `\nmetadata:\n name: code_entry_name\n namespace: code_entry_namespace\n labels:\n label_key: label_val\nspec:\n runtime: python3.6\n handler: code_entry_handler\n targetCpu: 13\n build:\n commands:\n - pip install code_entry_package\n env:\n - name: env_var\n value: code_entry_env_val\n - name: code_entry_env_var\n value: code_entry_env_val_2\n`\n\n\tconfig := Config{\n\t\tMeta: Meta{\n\t\t\tName: \"my_name\",\n\t\t\tNamespace: \"my_namespace\",\n\t\t\tLabels: map[string]string{}, \/\/ empty map\n\t\t},\n\t\tSpec: Spec{\n\t\t\tRuntime: \"python2.7\",\n\t\t\tHandler: \"my_handler\",\n\t\t\tEnv: []v1.EnvVar{{Name: \"env_var\", Value: \"my_env_val\"}},\n\t\t\tTargetCPU: 51,\n\t\t},\n\t}\n\treader, err := NewReader(suite.logger)\n\tsuite.Require().NoError(err, \"Can't create reader\")\n\terr = reader.Read(strings.NewReader(configData), \"processor\", &config)\n\tsuite.Require().NoError(err, \"Can't reader configuration\")\n\n\tsuite.Require().Equal(\"my_name\", config.Meta.Name, \"Bad name\")\n\tsuite.Require().Equal(\"my_namespace\", config.Meta.Namespace, \"Bad namespace\")\n\n\texpectedEnvVariables := []v1.EnvVar{\n\t\t{Name: \"env_var\", Value: \"my_env_val\"},\n\t\t{Name: \"code_entry_env_var\", Value: \"code_entry_env_val_2\"},\n\t}\n\tsuite.Require().Equal(expectedEnvVariables, config.Spec.Env, \"Bad env vars\")\n\n\tsuite.Require().Equal(\"my_handler\", config.Spec.Handler, \"Bad handler\")\n\tsuite.Require().Equal(\"python2.7\", config.Spec.Runtime, \"Bad runtime\")\n\tsuite.Require().Equal([]string{\"pip install code_entry_package\"}, config.Spec.Build.Commands, \"Bad commands\")\n\tsuite.Require().Equal(map[string]string{\"label_key\": \"label_val\"}, config.Meta.Labels, \"Bad labels\")\n\tsuite.Require().Equal(51, config.Spec.TargetCPU, \"Bad target cpu\")\n}\n\nfunc (suite *ReaderTestSuite) TestToDeployOptions() {\n\tsuite.T().Skip(\"TODO\")\n\t\/\/\tflatConfigurationContents := `\n\t\/\/\n\t\/\/name: function-name\n\t\/\/namespace: function-namespace\n\t\/\/runtime: golang\n\t\/\/handler: some.module:handler\n\t\/\/triggers:\n\t\/\/\n\t\/\/ http:\n\t\/\/ maxWorkers: 4\n\t\/\/ kind: http\n\t\/\/\n\t\/\/ rmq:\n\t\/\/ kind: rabbit-mq\n\t\/\/ url: amqp:\/\/guest:guest@34.224.60.166:5672\n\t\/\/ attributes:\n\t\/\/ exchangeName: functions\n\t\/\/ queueName: functions\n\t\/\/\n\t\/\/dataBindings:\n\t\/\/ db0:\n\t\/\/ class: v3io\n\t\/\/ secret: something\n\t\/\/ url: http:\/\/192.168.51.240:8081\/1024\n\t\/\/\n\t\/\/build:\n\t\/\/ commands:\n\t\/\/ - command1\n\t\/\/ - command2\n\t\/\/ - command3\n\t\/\/ baseImage: someBaseImage\n\t\/\/`\n\n\t\/\/createFunctionOptions := platform.NewDeployOptions(nil)\n\t\/\/\n\t\/\/err := suite.reader.Read(bytes.NewBufferString(flatConfigurationContents), \"yaml\")\n\t\/\/suite.Require().NoError(err)\n\t\/\/\n\t\/\/err = suite.reader.ToDeployOptions(createFunctionOptions)\n\t\/\/suite.Require().NoError(err)\n\t\/\/\n\n\t\/\/ compare.CompareNoOrder(&createFunctionOptions, &createFunctionOptions)\n\t\/\/ TODO\n}\n\nfunc (suite *ReaderTestSuite) TestCodeEntryConfigTriggerMerge() {\n\ttype TestTrigger struct {\n\t\tName string\n\t\tServiceType string\n\t}\n\n\ttestCases := []struct {\n\t\tname string\n\t\tconfigTrigger TestTrigger\n\t\tcodeEntryTrigger TestTrigger\n\t\texpectedConfigTrigger TestTrigger\n\t\texpectValidityError bool\n\t}{\n\t\t{\n\t\t\tname: \"bothNamesDefault\",\n\t\t\tconfigTrigger: TestTrigger{\n\t\t\t\tName: \"default-http\",\n\t\t\t\tServiceType: \"ClusterIP\",\n\t\t\t},\n\t\t\tcodeEntryTrigger: TestTrigger{\n\t\t\t\tName: \"default-http\",\n\t\t\t\tServiceType: \"NodePort\",\n\t\t\t},\n\t\t\texpectedConfigTrigger: TestTrigger{\n\t\t\t\tName: \"default-http\",\n\t\t\t\tServiceType: \"ClusterIP\",\n\t\t\t},\n\t\t\texpectValidityError: false,\n\t\t},\n\t\t{\n\t\t\tname: \"codeEntryDefaultNameConfigCustomName\",\n\t\t\tconfigTrigger: TestTrigger{\n\t\t\t\tName: \"my-trigger\",\n\t\t\t\tServiceType: \"ClusterIP\",\n\t\t\t},\n\t\t\tcodeEntryTrigger: TestTrigger{\n\t\t\t\tName: \"default-http\",\n\t\t\t\tServiceType: \"NodePort\",\n\t\t\t},\n\t\t\texpectedConfigTrigger: TestTrigger{\n\t\t\t\tName: \"my-trigger\",\n\t\t\t\tServiceType: \"ClusterIP\",\n\t\t\t},\n\t\t\texpectValidityError: false,\n\t\t},\n\t\t{\n\t\t\tname: \"codeEntryCustomNameConfigDefaultName\",\n\t\t\tconfigTrigger: TestTrigger{\n\t\t\t\tName: \"default-http\",\n\t\t\t\tServiceType: \"ClusterIP\",\n\t\t\t},\n\t\t\tcodeEntryTrigger: TestTrigger{\n\t\t\t\tName: \"my-trigger\",\n\t\t\t\tServiceType: \"NodePort\",\n\t\t\t},\n\t\t\texpectedConfigTrigger: TestTrigger{\n\t\t\t\tName: \"my-trigger\",\n\t\t\t\tServiceType: \"NodePort\",\n\t\t\t},\n\t\t\texpectValidityError: false,\n\t\t},\n\t\t{\n\t\t\tname: \"bothSameCustomNames\",\n\t\t\tconfigTrigger: TestTrigger{\n\t\t\t\tName: \"my-trigger\",\n\t\t\t\tServiceType: \"ClusterIP\",\n\t\t\t},\n\t\t\tcodeEntryTrigger: TestTrigger{\n\t\t\t\tName: \"my-trigger\",\n\t\t\t\tServiceType: \"NodePort\",\n\t\t\t},\n\t\t\texpectedConfigTrigger: TestTrigger{\n\t\t\t\tName: \"my-trigger\",\n\t\t\t\tServiceType: \"ClusterIP\",\n\t\t\t},\n\t\t\texpectValidityError: false,\n\t\t},\n\t\t{\n\t\t\tname: \"differentCustomNames\",\n\t\t\tconfigTrigger: TestTrigger{\n\t\t\t\tName: \"my-trigger\",\n\t\t\t\tServiceType: \"ClusterIP\",\n\t\t\t},\n\t\t\tcodeEntryTrigger: TestTrigger{\n\t\t\t\tName: \"not-my-trigger\",\n\t\t\t\tServiceType: \"NodePort\",\n\t\t\t},\n\t\t\texpectValidityError: true,\n\t\t},\n\t}\n\n\tfor _, testCase := range testCases {\n\t\tsuite.Run(testCase.name, func() {\n\t\t\tconfig := Config{\n\t\t\t\tMeta: Meta{\n\t\t\t\t\tName: \"my_name\",\n\t\t\t\t\tNamespace: \"my_namespace\",\n\t\t\t\t},\n\t\t\t\tSpec: Spec{\n\t\t\t\t\tRuntime: \"python3.7\",\n\t\t\t\t\tHandler: \"my_handler\",\n\t\t\t\t},\n\t\t\t}\n\t\t\tconfig.Spec.Triggers = map[string]Trigger{\n\t\t\t\ttestCase.codeEntryTrigger.Name: {\n\t\t\t\t\tName: testCase.codeEntryTrigger.Name,\n\t\t\t\t\tKind: \"http\",\n\t\t\t\t\tAttributes: map[string]interface{}{\"serviceType\": testCase.codeEntryTrigger.ServiceType},\n\t\t\t\t},\n\t\t\t}\n\t\t\tconfigData, err := yaml.Marshal(config)\n\t\t\tsuite.Require().NoError(err, \"Can't marshal config\")\n\n\t\t\tconfig.Spec.Triggers = map[string]Trigger{\n\t\t\t\ttestCase.configTrigger.Name: {\n\t\t\t\t\tName: testCase.configTrigger.Name,\n\t\t\t\t\tKind: \"http\",\n\t\t\t\t\tAttributes: map[string]interface{}{\"serviceType\": testCase.configTrigger.ServiceType},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\treader, err := NewReader(suite.logger)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = reader.Read(strings.NewReader(string(configData)), \"processor\", &config)\n\t\t\tsuite.Require().NoError(err)\n\n\t\t\terr = reader.validateConfigurationFileFunctionConfig(&config)\n\t\t\tif testCase.expectValidityError {\n\t\t\t\tsuite.Require().Error(err)\n\t\t\t} else {\n\t\t\t\tsuite.Require().NoError(err)\n\t\t\t\tsuite.Assert().Equal(1, len(config.Spec.Triggers))\n\t\t\t\tsuite.Assert().Equal(testCase.expectedConfigTrigger.Name,\n\t\t\t\t\tconfig.Spec.Triggers[testCase.expectedConfigTrigger.Name].Name)\n\t\t\t\tsuite.Assert().Equal(testCase.expectedConfigTrigger.ServiceType,\n\t\t\t\t\tconfig.Spec.Triggers[testCase.expectedConfigTrigger.Name].Attributes[\"serviceType\"])\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestRegistryTestSuite(t *testing.T) {\n\tsuite.Run(t, new(ReaderTestSuite))\n}\n<|endoftext|>"} {"text":"<commit_before>package cli\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"testing\"\n\n\t\"github.com\/spf13\/pflag\"\n\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n\tkcmd \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\"\n\n\t\"github.com\/openshift\/origin\/pkg\/cmd\/util\/clientcmd\"\n)\n\n\/\/ MissingCommands is the list of commands we're already missing.\n\/\/ NEVER ADD TO THIS LIST\n\/\/ TODO kill this list\nvar MissingCommands = sets.NewString(\n\t\"namespace\", \"rolling-update\",\n\t\"cluster-info\", \"api-versions\",\n\t\"stop\",\n\n\t\/\/ are on admin commands\n\t\"cordon\",\n\t\"drain\",\n\t\"uncordon\",\n\t\"taint\",\n\t\"top\",\n\t\"certificate\",\n\n\t\/\/ TODO commands to assess\n\t\"apiversions\",\n\t\"clusterinfo\",\n\t\"resize\",\n\t\"rollingupdate\",\n\t\"run-container\",\n\t\"update\",\n)\n\n\/\/ WhitelistedCommands is the list of commands we're never going to have,\n\/\/ defend each one with a comment\nvar WhitelistedCommands = sets.NewString()\n\nfunc TestKubectlCompatibility(t *testing.T) {\n\tf := clientcmd.New(pflag.NewFlagSet(\"name\", pflag.ContinueOnError))\n\n\toc := NewCommandCLI(\"oc\", \"oc\", &bytes.Buffer{}, ioutil.Discard, ioutil.Discard)\n\tkubectl := kcmd.NewKubectlCommand(f, nil, ioutil.Discard, ioutil.Discard)\n\nkubectlLoop:\n\tfor _, kubecmd := range kubectl.Commands() {\n\t\tfor _, occmd := range oc.Commands() {\n\t\t\tif kubecmd.Name() == occmd.Name() {\n\t\t\t\tif MissingCommands.Has(kubecmd.Name()) {\n\t\t\t\t\tt.Errorf(\"%s was supposed to be missing\", kubecmd.Name())\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif WhitelistedCommands.Has(kubecmd.Name()) {\n\t\t\t\t\tt.Errorf(\"%s was supposed to be whitelisted\", kubecmd.Name())\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tcontinue kubectlLoop\n\t\t\t}\n\t\t}\n\t\tif MissingCommands.Has(kubecmd.Name()) || WhitelistedCommands.Has(kubecmd.Name()) {\n\t\t\tcontinue\n\t\t}\n\n\t\tt.Errorf(\"missing %q,\", kubecmd.Name())\n\t}\n}\n\n\/\/ this only checks one level deep for nested commands, but it does ensure that we've gotten several\n\/\/ --validate flags. Based on that we can reasonably assume we got them in the kube commands since they\n\/\/ all share the same registration.\nfunc TestValidateDisabled(t *testing.T) {\n\tf := clientcmd.New(pflag.NewFlagSet(\"name\", pflag.ContinueOnError))\n\n\toc := NewCommandCLI(\"oc\", \"oc\", &bytes.Buffer{}, ioutil.Discard, ioutil.Discard)\n\tkubectl := kcmd.NewKubectlCommand(f, nil, ioutil.Discard, ioutil.Discard)\n\n\tfor _, kubecmd := range kubectl.Commands() {\n\t\tfor _, occmd := range oc.Commands() {\n\t\t\tif kubecmd.Name() == occmd.Name() {\n\t\t\t\tocValidateFlag := occmd.Flags().Lookup(\"validate\")\n\t\t\t\tif ocValidateFlag == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif ocValidateFlag.Value.String() != \"false\" {\n\t\t\t\t\tt.Errorf(\"%s --validate is not defaulting to false\", occmd.Name())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n}\n<commit_msg>boring: exclude new alpha kubectl command<commit_after>package cli\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"testing\"\n\n\t\"github.com\/spf13\/pflag\"\n\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n\tkcmd \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\"\n\n\t\"github.com\/openshift\/origin\/pkg\/cmd\/util\/clientcmd\"\n)\n\n\/\/ MissingCommands is the list of commands we're already missing.\n\/\/ NEVER ADD TO THIS LIST\n\/\/ TODO kill this list\nvar MissingCommands = sets.NewString(\n\t\"namespace\", \"rolling-update\",\n\t\"cluster-info\", \"api-versions\",\n\t\"stop\",\n\n\t\/\/ are on admin commands\n\t\"cordon\",\n\t\"drain\",\n\t\"uncordon\",\n\t\"taint\",\n\t\"top\",\n\t\"certificate\",\n\n\t\/\/ TODO commands to assess\n\t\"apiversions\",\n\t\"clusterinfo\",\n\t\"resize\",\n\t\"rollingupdate\",\n\t\"run-container\",\n\t\"update\",\n\t\"alpha\",\n)\n\n\/\/ WhitelistedCommands is the list of commands we're never going to have,\n\/\/ defend each one with a comment\nvar WhitelistedCommands = sets.NewString()\n\nfunc TestKubectlCompatibility(t *testing.T) {\n\tf := clientcmd.New(pflag.NewFlagSet(\"name\", pflag.ContinueOnError))\n\n\toc := NewCommandCLI(\"oc\", \"oc\", &bytes.Buffer{}, ioutil.Discard, ioutil.Discard)\n\tkubectl := kcmd.NewKubectlCommand(f, nil, ioutil.Discard, ioutil.Discard)\n\nkubectlLoop:\n\tfor _, kubecmd := range kubectl.Commands() {\n\t\tfor _, occmd := range oc.Commands() {\n\t\t\tif kubecmd.Name() == occmd.Name() {\n\t\t\t\tif MissingCommands.Has(kubecmd.Name()) {\n\t\t\t\t\tt.Errorf(\"%s was supposed to be missing\", kubecmd.Name())\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif WhitelistedCommands.Has(kubecmd.Name()) {\n\t\t\t\t\tt.Errorf(\"%s was supposed to be whitelisted\", kubecmd.Name())\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tcontinue kubectlLoop\n\t\t\t}\n\t\t}\n\t\tif MissingCommands.Has(kubecmd.Name()) || WhitelistedCommands.Has(kubecmd.Name()) {\n\t\t\tcontinue\n\t\t}\n\n\t\tt.Errorf(\"missing %q,\", kubecmd.Name())\n\t}\n}\n\n\/\/ this only checks one level deep for nested commands, but it does ensure that we've gotten several\n\/\/ --validate flags. Based on that we can reasonably assume we got them in the kube commands since they\n\/\/ all share the same registration.\nfunc TestValidateDisabled(t *testing.T) {\n\tf := clientcmd.New(pflag.NewFlagSet(\"name\", pflag.ContinueOnError))\n\n\toc := NewCommandCLI(\"oc\", \"oc\", &bytes.Buffer{}, ioutil.Discard, ioutil.Discard)\n\tkubectl := kcmd.NewKubectlCommand(f, nil, ioutil.Discard, ioutil.Discard)\n\n\tfor _, kubecmd := range kubectl.Commands() {\n\t\tfor _, occmd := range oc.Commands() {\n\t\t\tif kubecmd.Name() == occmd.Name() {\n\t\t\t\tocValidateFlag := occmd.Flags().Lookup(\"validate\")\n\t\t\t\tif ocValidateFlag == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif ocValidateFlag.Value.String() != \"false\" {\n\t\t\t\t\tt.Errorf(\"%s --validate is not defaulting to false\", occmd.Name())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ Package statusupdater implements interfaces that enable updating the status\n\/\/ of API objects.\npackage statusupdater\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"github.com\/golang\/glog\"\n\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\tkcache \"k8s.io\/kubernetes\/pkg\/client\/cache\"\n\t\"k8s.io\/kubernetes\/pkg\/client\/clientset_generated\/internalclientset\"\n\t\"k8s.io\/kubernetes\/pkg\/controller\/volume\/attachdetach\/cache\"\n\t\"k8s.io\/kubernetes\/pkg\/conversion\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/strategicpatch\"\n)\n\n\/\/ NodeStatusUpdater defines a set of operations for updating the\n\/\/ VolumesAttached field in the Node Status.\ntype NodeStatusUpdater interface {\n\t\/\/ Gets a list of node statuses that should be updated from the actual state\n\t\/\/ of the world and updates them.\n\tUpdateNodeStatuses() error\n}\n\n\/\/ NewNodeStatusUpdater returns a new instance of NodeStatusUpdater.\nfunc NewNodeStatusUpdater(\n\tkubeClient internalclientset.Interface,\n\tnodeInformer kcache.SharedInformer,\n\tactualStateOfWorld cache.ActualStateOfWorld) NodeStatusUpdater {\n\treturn &nodeStatusUpdater{\n\t\tactualStateOfWorld: actualStateOfWorld,\n\t\tnodeInformer: nodeInformer,\n\t\tkubeClient: kubeClient,\n\t}\n}\n\ntype nodeStatusUpdater struct {\n\tkubeClient internalclientset.Interface\n\tnodeInformer kcache.SharedInformer\n\tactualStateOfWorld cache.ActualStateOfWorld\n}\n\nfunc (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {\n\t\/\/ TODO: investigate right behavior if nodeName is empty\n\t\/\/ kubernetes\/kubernetes\/issues\/37777\n\tnodesToUpdate := nsu.actualStateOfWorld.GetVolumesToReportAttached()\n\tfor nodeName, attachedVolumes := range nodesToUpdate {\n\t\tnodeObj, exists, err := nsu.nodeInformer.GetStore().GetByKey(string(nodeName))\n\t\tif nodeObj == nil || !exists || err != nil {\n\t\t\t\/\/ If node does not exist, its status cannot be updated, log error and\n\t\t\t\/\/ reset flag statusUpdateNeeded back to true to indicate this node status\n\t\t\t\/\/ needs to be udpated again\n\t\t\tglog.V(2).Infof(\n\t\t\t\t\"Could not update node status. Failed to find node %q in NodeInformer cache. %v\",\n\t\t\t\tnodeName,\n\t\t\t\terr)\n\t\t\tnsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName)\n\t\t\tcontinue\n\t\t}\n\n\t\tclonedNode, err := conversion.NewCloner().DeepCopy(nodeObj)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error cloning node %q: %v\",\n\t\t\t\tnodeName,\n\t\t\t\terr)\n\t\t}\n\n\t\tnode, ok := clonedNode.(*api.Node)\n\t\tif !ok || node == nil {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"failed to cast %q object %#v to Node\",\n\t\t\t\tnodeName,\n\t\t\t\tclonedNode)\n\t\t}\n\n\t\toldData, err := json.Marshal(node)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"failed to Marshal oldData for node %q. %v\",\n\t\t\t\tnodeName,\n\t\t\t\terr)\n\t\t}\n\n\t\tnode.Status.VolumesAttached = attachedVolumes\n\n\t\tnewData, err := json.Marshal(node)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"failed to Marshal newData for node %q. %v\",\n\t\t\t\tnodeName,\n\t\t\t\terr)\n\t\t}\n\n\t\tpatchBytes, err :=\n\t\t\tstrategicpatch.CreateStrategicMergePatch(oldData, newData, node)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"failed to CreateStrategicMergePatch for node %q. %v\",\n\t\t\t\tnodeName,\n\t\t\t\terr)\n\t\t}\n\n\t\t_, err = nsu.kubeClient.Core().Nodes().PatchStatus(string(nodeName), patchBytes)\n\t\tif err != nil {\n\t\t\t\/\/ If update node status fails, reset flag statusUpdateNeeded back to true\n\t\t\t\/\/ to indicate this node status needs to be udpated again\n\t\t\tnsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName)\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"failed to kubeClient.Core().Nodes().Patch for node %q. %v\",\n\t\t\t\tnodeName,\n\t\t\t\terr)\n\t\t}\n\t\tglog.V(2).Infof(\n\t\t\t\"Updating status for node %q succeeded. patchBytes: %q VolumesAttached: %v\",\n\t\t\tnodeName,\n\t\t\tstring(patchBytes),\n\t\t\tnode.Status.VolumesAttached)\n\n\t}\n\treturn nil\n}\n<commit_msg>fix typo<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ Package statusupdater implements interfaces that enable updating the status\n\/\/ of API objects.\npackage statusupdater\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"github.com\/golang\/glog\"\n\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\tkcache \"k8s.io\/kubernetes\/pkg\/client\/cache\"\n\t\"k8s.io\/kubernetes\/pkg\/client\/clientset_generated\/internalclientset\"\n\t\"k8s.io\/kubernetes\/pkg\/controller\/volume\/attachdetach\/cache\"\n\t\"k8s.io\/kubernetes\/pkg\/conversion\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/strategicpatch\"\n)\n\n\/\/ NodeStatusUpdater defines a set of operations for updating the\n\/\/ VolumesAttached field in the Node Status.\ntype NodeStatusUpdater interface {\n\t\/\/ Gets a list of node statuses that should be updated from the actual state\n\t\/\/ of the world and updates them.\n\tUpdateNodeStatuses() error\n}\n\n\/\/ NewNodeStatusUpdater returns a new instance of NodeStatusUpdater.\nfunc NewNodeStatusUpdater(\n\tkubeClient internalclientset.Interface,\n\tnodeInformer kcache.SharedInformer,\n\tactualStateOfWorld cache.ActualStateOfWorld) NodeStatusUpdater {\n\treturn &nodeStatusUpdater{\n\t\tactualStateOfWorld: actualStateOfWorld,\n\t\tnodeInformer: nodeInformer,\n\t\tkubeClient: kubeClient,\n\t}\n}\n\ntype nodeStatusUpdater struct {\n\tkubeClient internalclientset.Interface\n\tnodeInformer kcache.SharedInformer\n\tactualStateOfWorld cache.ActualStateOfWorld\n}\n\nfunc (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {\n\t\/\/ TODO: investigate right behavior if nodeName is empty\n\t\/\/ kubernetes\/kubernetes\/issues\/37777\n\tnodesToUpdate := nsu.actualStateOfWorld.GetVolumesToReportAttached()\n\tfor nodeName, attachedVolumes := range nodesToUpdate {\n\t\tnodeObj, exists, err := nsu.nodeInformer.GetStore().GetByKey(string(nodeName))\n\t\tif nodeObj == nil || !exists || err != nil {\n\t\t\t\/\/ If node does not exist, its status cannot be updated, log error and\n\t\t\t\/\/ reset flag statusUpdateNeeded back to true to indicate this node status\n\t\t\t\/\/ needs to be updated again\n\t\t\tglog.V(2).Infof(\n\t\t\t\t\"Could not update node status. Failed to find node %q in NodeInformer cache. %v\",\n\t\t\t\tnodeName,\n\t\t\t\terr)\n\t\t\tnsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName)\n\t\t\tcontinue\n\t\t}\n\n\t\tclonedNode, err := conversion.NewCloner().DeepCopy(nodeObj)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error cloning node %q: %v\",\n\t\t\t\tnodeName,\n\t\t\t\terr)\n\t\t}\n\n\t\tnode, ok := clonedNode.(*api.Node)\n\t\tif !ok || node == nil {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"failed to cast %q object %#v to Node\",\n\t\t\t\tnodeName,\n\t\t\t\tclonedNode)\n\t\t}\n\n\t\toldData, err := json.Marshal(node)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"failed to Marshal oldData for node %q. %v\",\n\t\t\t\tnodeName,\n\t\t\t\terr)\n\t\t}\n\n\t\tnode.Status.VolumesAttached = attachedVolumes\n\n\t\tnewData, err := json.Marshal(node)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"failed to Marshal newData for node %q. %v\",\n\t\t\t\tnodeName,\n\t\t\t\terr)\n\t\t}\n\n\t\tpatchBytes, err :=\n\t\t\tstrategicpatch.CreateStrategicMergePatch(oldData, newData, node)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"failed to CreateStrategicMergePatch for node %q. %v\",\n\t\t\t\tnodeName,\n\t\t\t\terr)\n\t\t}\n\n\t\t_, err = nsu.kubeClient.Core().Nodes().PatchStatus(string(nodeName), patchBytes)\n\t\tif err != nil {\n\t\t\t\/\/ If update node status fails, reset flag statusUpdateNeeded back to true\n\t\t\t\/\/ to indicate this node status needs to be updated again\n\t\t\tnsu.actualStateOfWorld.SetNodeStatusUpdateNeeded(nodeName)\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"failed to kubeClient.Core().Nodes().Patch for node %q. %v\",\n\t\t\t\tnodeName,\n\t\t\t\terr)\n\t\t}\n\t\tglog.V(2).Infof(\n\t\t\t\"Updating status for node %q succeeded. patchBytes: %q VolumesAttached: %v\",\n\t\t\tnodeName,\n\t\t\tstring(patchBytes),\n\t\t\tnode.Status.VolumesAttached)\n\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Marc-Antoine Ruel. All rights reserved.\n\/\/ Use of this source code is governed under the Apache License, Version 2.0\n\/\/ that can be found in the LICENSE file.\n\n\/\/ Packages the static files in a .go file.\n\/\/go:generate go run ..\/package\/main.go -out static_files_gen.go ..\/..\/..\/web\n\n\/\/ dlibox drives the dlibox LED strip on a Raspberry Pi. It runs a web server\n\/\/ for remote control.\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\t\"syscall\"\n\n\t\"github.com\/kardianos\/osext\"\n\t\"github.com\/maruel\/dlibox\/go\/anim1d\"\n\t\"github.com\/maruel\/dlibox\/go\/bw2d\"\n\t\"github.com\/maruel\/dlibox\/go\/pio\/buses\"\n\t\"github.com\/maruel\/dlibox\/go\/pio\/buses\/bcm283x\"\n\t\"github.com\/maruel\/dlibox\/go\/pio\/buses\/i2c\"\n\t\"github.com\/maruel\/dlibox\/go\/pio\/buses\/ir\"\n\t\"github.com\/maruel\/dlibox\/go\/pio\/buses\/spi\"\n\t\"github.com\/maruel\/dlibox\/go\/pio\/devices\"\n\t\"github.com\/maruel\/dlibox\/go\/pio\/devices\/apa102\"\n\t\"github.com\/maruel\/dlibox\/go\/pio\/devices\/ssd1306\"\n\t\"github.com\/maruel\/dlibox\/go\/pio\/fakes\/screen\"\n\t\"github.com\/maruel\/dlibox\/go\/psf\"\n\t\"github.com\/maruel\/interrupt\"\n)\n\nfunc initDisplay() (devices.Display, error) {\n\ti2cBus, err := i2c.Make(1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdisplay, err := ssd1306.MakeI2C(i2cBus, 128, 64, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tf12, err := psf.Load(\"Terminus12x6\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tf20, err := psf.Load(\"Terminus20x10\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ TODO(maruel): Leverage bme280 while at it but don't fail if not\n\t\/\/ connected.\n\timg, err := bw2d.Make(display.W, display.H)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tf20.Draw(img, 0, 0, bw2d.On, nil, \"dlibox!\")\n\tf12.Draw(img, 0, display.H-f12.H-1, bw2d.On, nil, \"is awesome\")\n\tif _, err = display.Write(img.Buf); err != nil {\n\t\treturn nil, err\n\t}\n\treturn display, nil\n}\n\nfunc initIR(painter *anim1d.Painter, config *IR) error {\n\tbus, err := ir.Make()\n\tif err != nil {\n\t\treturn err\n\t}\n\tgo func() {\n\t\tc := bus.Channel()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase msg, ok := <-c:\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif !msg.Repeat {\n\t\t\t\t\t\/\/ TODO(maruel): Locking.\n\t\t\t\t\tif pat := config.Mapping[msg.Key]; len(pat) != 0 {\n\t\t\t\t\t\tpainter.SetPattern(string(pat))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\treturn nil\n}\n\nfunc initPIR(painter *anim1d.Painter, config *PIR) error {\n\t\/\/ TODO(maruel): Cleaner.\n\tpin := bcm283x.GetPin(config.Pin)\n\tif pin.String() == \"INVALID\" {\n\t\treturn nil\n\t}\n\tif err := pin.In(buses.Down, buses.Rising); err != nil {\n\t\treturn err\n\t}\n\tgo func() {\n\t\tfor {\n\t\t\tpin.ReadEdge()\n\t\t\t\/\/ TODO(maruel): Locking.\n\t\t\tpainter.SetPattern(string(config.Pattern))\n\t\t}\n\t}()\n\treturn nil\n}\n\nfunc mainImpl() error {\n\tthisFile, err := osext.Executable()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcpuprofile := flag.String(\"cpuprofile\", \"\", \"dump CPU profile in file\")\n\tport := flag.Int(\"port\", 8010, \"http port to listen on\")\n\tverbose := flag.Bool(\"verbose\", false, \"enable log output\")\n\tfake := flag.Bool(\"fake\", false, \"use a terminal mock, useful to test without the hardware\")\n\tflag.Parse()\n\tif flag.NArg() != 0 {\n\t\treturn fmt.Errorf(\"unexpected argument: %s\", flag.Args())\n\t}\n\n\tif !*verbose {\n\t\tlog.SetOutput(ioutil.Discard)\n\t}\n\n\tinterrupt.HandleCtrlC()\n\tdefer interrupt.Set()\n\tchanSignal := make(chan os.Signal)\n\tgo func() {\n\t\t<-chanSignal\n\t\tinterrupt.Set()\n\t}()\n\tsignal.Notify(chanSignal, syscall.SIGTERM)\n\n\tvar properties []string\n\tif *cpuprofile != \"\" {\n\t\t\/\/ Run with cpuprofile, then use 'go tool pprof' to analyze it. See\n\t\t\/\/ http:\/\/blog.golang.org\/profiling-go-programs for more details.\n\t\tf, err := os.Create(*cpuprofile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpprof.StartCPUProfile(f)\n\t\tdefer pprof.StopCPUProfile()\n\t\tproperties = append(properties, \"profiled=1\")\n\t}\n\n\t\/\/ Config.\n\tconfig := ConfigMgr{}\n\tconfig.ResetDefault()\n\tif err := config.Load(); err != nil {\n\t\treturn err\n\t}\n\tdefer config.Close()\n\n\tb, err := json.MarshalIndent(config, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Config:\\n%s\", string(b))\n\n\tfps := 60\n\tif bcm283x.MaxSpeed < 900000 || runtime.NumCPU() < 4 {\n\t\t\/\/ Use 30Hz on slower devices because it is too slow.\n\t\tfps = 30\n\t}\n\n\t\/\/ Output (terminal with ANSI codes or APA102).\n\tvar leds devices.Display\n\tif *fake {\n\t\t\/\/ Hardcode to 100 characters when using a terminal output.\n\t\t\/\/ TODO(maruel): Query the terminal and use its width.\n\t\tleds = screen.Make(100)\n\t\tdefer os.Stdout.Write([]byte(\"\\033[0m\\n\"))\n\t\t\/\/ Use lower refresh rate too.\n\t\tfps = 30\n\t\tproperties = append(properties, \"fake=1\")\n\t} else {\n\t\tspiBus, err := spi.Make(0, 0, config.Settings.APA102.SPIspeed)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer spiBus.Close()\n\t\tif leds, err = apa102.Make(spiBus, config.Settings.APA102.NumberLights, 255, 6500); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tproperties = append(properties, fmt.Sprintf(\"APA102=%d\", config.Settings.APA102.NumberLights))\n\t}\n\n\t\/\/ Try to initialize the display.\n\tif _, err = initDisplay(); err != nil {\n\t\tlog.Printf(\"Display not connected\")\n\t}\n\n\t\/\/ Painter.\n\tp := anim1d.MakePainter(leds, fps)\n\tif err := config.Init(p); err != nil {\n\t\treturn err\n\t}\n\tstartWebServer(*port, p, &config.Config)\n\n\tif err = initIR(p, &config.Settings.IR); err != nil {\n\t\tlog.Printf(\"IR not connected\")\n\t}\n\n\tif err = initPIR(p, &config.Settings.PIR); err != nil {\n\t\tlog.Printf(\"PIR not connected\")\n\t}\n\n\t\/*\n\t\tservice, err := initmDNS(*port, properties)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer service.Close()\n\t*\/\n\n\treturn watchFile(thisFile)\n}\n\nfunc main() {\n\tif err := mainImpl(); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"\\ndlibox: %s.\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n<commit_msg>Fail less often; survive lack of SPI<commit_after>\/\/ Copyright 2016 Marc-Antoine Ruel. All rights reserved.\n\/\/ Use of this source code is governed under the Apache License, Version 2.0\n\/\/ that can be found in the LICENSE file.\n\n\/\/ Packages the static files in a .go file.\n\/\/go:generate go run ..\/package\/main.go -out static_files_gen.go ..\/..\/..\/web\n\n\/\/ dlibox drives the dlibox LED strip on a Raspberry Pi. It runs a web server\n\/\/ for remote control.\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"image\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\t\"syscall\"\n\n\t\"github.com\/kardianos\/osext\"\n\t\"github.com\/maruel\/dlibox\/go\/anim1d\"\n\t\"github.com\/maruel\/dlibox\/go\/bw2d\"\n\t\"github.com\/maruel\/dlibox\/go\/pio\/buses\"\n\t\"github.com\/maruel\/dlibox\/go\/pio\/buses\/bcm283x\"\n\t\"github.com\/maruel\/dlibox\/go\/pio\/buses\/i2c\"\n\t\"github.com\/maruel\/dlibox\/go\/pio\/buses\/ir\"\n\t\"github.com\/maruel\/dlibox\/go\/pio\/buses\/spi\"\n\t\"github.com\/maruel\/dlibox\/go\/pio\/devices\"\n\t\"github.com\/maruel\/dlibox\/go\/pio\/devices\/apa102\"\n\t\"github.com\/maruel\/dlibox\/go\/pio\/devices\/ssd1306\"\n\t\"github.com\/maruel\/dlibox\/go\/pio\/fakes\"\n\t\"github.com\/maruel\/dlibox\/go\/pio\/fakes\/screen\"\n\t\"github.com\/maruel\/dlibox\/go\/psf\"\n\t\"github.com\/maruel\/interrupt\"\n)\n\nfunc initDisplay() (devices.Display, error) {\n\ti2cBus, err := i2c.Make(1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdisplay, err := ssd1306.MakeI2C(i2cBus, 128, 64, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tf12, err := psf.Load(\"Terminus12x6\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tf20, err := psf.Load(\"Terminus20x10\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ TODO(maruel): Leverage bme280 while at it but don't fail if not\n\t\/\/ connected.\n\timg, err := bw2d.Make(display.W, display.H)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tf20.Draw(img, 0, 0, bw2d.On, nil, \"dlibox!\")\n\tf12.Draw(img, 0, display.H-f12.H-1, bw2d.On, nil, \"is awesome\")\n\tif _, err = display.Write(img.Buf); err != nil {\n\t\treturn nil, err\n\t}\n\treturn display, nil\n}\n\nfunc initIR(painter *anim1d.Painter, config *IR) error {\n\tbus, err := ir.Make()\n\tif err != nil {\n\t\treturn err\n\t}\n\tgo func() {\n\t\tc := bus.Channel()\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase msg, ok := <-c:\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif !msg.Repeat {\n\t\t\t\t\t\/\/ TODO(maruel): Locking.\n\t\t\t\t\tif pat := config.Mapping[msg.Key]; len(pat) != 0 {\n\t\t\t\t\t\tpainter.SetPattern(string(pat))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\treturn nil\n}\n\nfunc initPIR(painter *anim1d.Painter, config *PIR) error {\n\t\/\/ TODO(maruel): Cleaner.\n\tpin := bcm283x.GetPin(config.Pin)\n\tif pin.String() == \"INVALID\" {\n\t\treturn nil\n\t}\n\tif err := pin.In(buses.Down, buses.Rising); err != nil {\n\t\treturn err\n\t}\n\tgo func() {\n\t\tfor {\n\t\t\tpin.ReadEdge()\n\t\t\t\/\/ TODO(maruel): Locking.\n\t\t\tpainter.SetPattern(string(config.Pattern))\n\t\t}\n\t}()\n\treturn nil\n}\n\nfunc mainImpl() error {\n\tthisFile, err := osext.Executable()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcpuprofile := flag.String(\"cpuprofile\", \"\", \"dump CPU profile in file\")\n\tport := flag.Int(\"port\", 8010, \"http port to listen on\")\n\tverbose := flag.Bool(\"verbose\", false, \"enable log output\")\n\tfake := flag.Bool(\"fake\", false, \"use a terminal mock, useful to test without the hardware\")\n\tflag.Parse()\n\tif flag.NArg() != 0 {\n\t\treturn fmt.Errorf(\"unexpected argument: %s\", flag.Args())\n\t}\n\n\tif !*verbose {\n\t\tlog.SetOutput(ioutil.Discard)\n\t}\n\n\tinterrupt.HandleCtrlC()\n\tdefer interrupt.Set()\n\tchanSignal := make(chan os.Signal)\n\tgo func() {\n\t\t<-chanSignal\n\t\tinterrupt.Set()\n\t}()\n\tsignal.Notify(chanSignal, syscall.SIGTERM)\n\n\tvar properties []string\n\tif *cpuprofile != \"\" {\n\t\t\/\/ Run with cpuprofile, then use 'go tool pprof' to analyze it. See\n\t\t\/\/ http:\/\/blog.golang.org\/profiling-go-programs for more details.\n\t\tf, err := os.Create(*cpuprofile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpprof.StartCPUProfile(f)\n\t\tdefer pprof.StopCPUProfile()\n\t\tproperties = append(properties, \"profiled=1\")\n\t}\n\n\t\/\/ Config.\n\tconfig := ConfigMgr{}\n\tconfig.ResetDefault()\n\tif err := config.Load(); err != nil {\n\t\tlog.Printf(\"Loading config failed: %v\", err)\n\t}\n\tdefer config.Close()\n\n\tb, err := json.MarshalIndent(config, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Config:\\n%s\", string(b))\n\n\tfps := 60\n\tif bcm283x.MaxSpeed < 900000 || runtime.NumCPU() < 4 {\n\t\t\/\/ Use 30Hz on slower devices because it is too slow.\n\t\tfps = 30\n\t}\n\n\t\/\/ Output (terminal with ANSI codes or APA102).\n\tvar leds devices.Display\n\tif *fake {\n\t\t\/\/ Hardcode to 100 characters when using a terminal output.\n\t\t\/\/ TODO(maruel): Query the terminal and use its width.\n\t\tleds = screen.Make(100)\n\t\tdefer os.Stdout.Write([]byte(\"\\033[0m\\n\"))\n\t\t\/\/ Use lower refresh rate too.\n\t\tfps = 30\n\t\tproperties = append(properties, \"fake=1\")\n\t} else {\n\t\tspiBus, err := spi.Make(0, 0, config.Settings.APA102.SPIspeed)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"SPI failed: %v\", err)\n\t\t\tleds = &fakes.Display{image.NewNRGBA(image.Rect(0, 0, config.Settings.APA102.NumberLights, 1))}\n\t\t} else {\n\t\t\tdefer spiBus.Close()\n\t\t\tif leds, err = apa102.Make(spiBus, config.Settings.APA102.NumberLights, 255, 6500); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tproperties = append(properties, fmt.Sprintf(\"APA102=%d\", config.Settings.APA102.NumberLights))\n\t\t}\n\t}\n\n\t\/\/ Try to initialize the display.\n\tif _, err = initDisplay(); err != nil {\n\t\tlog.Printf(\"Display not connected\")\n\t}\n\n\t\/\/ Painter.\n\tp := anim1d.MakePainter(leds, fps)\n\tif err := config.Init(p); err != nil {\n\t\treturn err\n\t}\n\tstartWebServer(*port, p, &config.Config)\n\n\tif err = initIR(p, &config.Settings.IR); err != nil {\n\t\tlog.Printf(\"IR not connected: %v\", err)\n\t}\n\n\tif err = initPIR(p, &config.Settings.PIR); err != nil {\n\t\tlog.Printf(\"PIR not connected: %v\", err)\n\t}\n\n\t\/*\n\t\tservice, err := initmDNS(*port, properties)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer service.Close()\n\t*\/\n\n\treturn watchFile(thisFile)\n}\n\nfunc main() {\n\tif err := mainImpl(); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"\\ndlibox: %s.\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package libkb\n\nimport (\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n\n\tkeybase1 \"github.com\/keybase\/client\/protocol\/go\"\n)\n\nconst (\n\tDevelServerURI = \"http:\/\/localhost:3000\"\n\tProductionServerURI = \"https:\/\/keybase.io\"\n)\n\nvar ServerURI = DevelServerURI\n\nconst (\n\tConfigFile = \"config.json\"\n\tSessionFile = \"session.json\"\n\tDBFile = \"keybase.leveldb\"\n\tSocketFile = \"keybased.sock\"\n\tPIDFile = \"keybased.pid\"\n\n\tSecretKeyringTemplate = \"secretkeys.%u.mpack\"\n\n\tAPIVersion = \"1.0\"\n\tAPIURIPathPrefix = \"\/_\/api\/\" + APIVersion\n\tDaemonPort = 40933\n\tGoClientID = \"keybase.io go client\"\n\tIdentifyAs = GoClientID + \" v\" + Version + \" \" + runtime.GOOS\n)\n\nvar UserAgent = \"Keybase-Go-CLI\/\" + Version + \" (\" + runtime.Version() + \" on \" + runtime.GOOS + \")\"\n\nconst (\n\tPermFile os.FileMode = 0600\n\tPermDir os.FileMode = 0700\n\tUmaskablePermFile os.FileMode = 0666\n)\n\nconst (\n\tUserCacheSize = 0x1000\n\tPGPFingerprintHexLen = 40\n\n\tProofCacheSize = 0x1000\n\tProofCacheLongDur = 6 * time.Hour\n\tProofCacheMediumDur = 30 * time.Minute\n\tProofCacheShortDur = 1 * time.Minute\n\n\tSigShortIDBytes = 27\n)\n\nvar MerkleProdKIDs = []string{\n\t\"010159baae6c7d43c66adf8fb7bb2b8b4cbe408c062cfc369e693ccb18f85631dbcd0a\",\n}\nvar MerkleTestKIDs = []string{\n\t\"0101be58b6c82db64f6ccabb05088db443c69f87d5d48857d709ed6f73948dabe67d0a\",\n}\n\nconst (\n\tKeybaseKIDV1 = 1 \/\/ Uses SHA-256\n\tKeybaseSignatureV1 = 1\n\n\tSigExpireIn = 24 * 60 * 60 * 365 * 10 \/\/ 10 years\n\tNaclEdDSAExpireIn = 24 * 60 * 60 * 365 * 3 \/\/ 3 years\n\tNaclDHExpireIn = 24 * 60 * 60 * 365 * 3 \/\/ 3 years\n\tAuthExpireIn = 24 * 60 * 60 * 365 \/\/ 1 year\n\tKeyExpireIn = 24 * 60 * 60 * 365 * 8 \/\/ 8 years\n\tSubkeyExpireIn = 24 * 60 * 60 * 365 * 4 \/\/ 4 years\n)\n\n\/\/ Status codes. This list should match keybase\/lib\/constants.iced.\nconst (\n\tSCOk = 0\n\tSCLoginRequired = 201\n\tSCBadSession = 202\n\tSCBadLoginPassword = 204\n\tSCNotFound = 205\n\tSCGeneric = 218\n\tSCAlreadyLoggedIn = 235\n\tSCCanceled = 237\n\tSCBadSignupUsernameTaken = 701\n\tSCKeyNotFound = 901\n\tSCKeyInUse = 907\n\tSCKeyBadGen = 913\n\tSCKeyNoSecret = 914\n\tSCKeyNoActive = 915\n\tSCBadTrackSession = 1301\n\tSCStreamExists = 1501\n\tSCStreamNotFound = 1502\n\tSCStreamWrongKind = 1503\n\tSCStreamEOF = 1504\n\tSCAPINetworkError = 1601\n\tSCTimeout = 1602\n\tSCProofError = 1701\n\tSCIdentificationExpired = 1702\n)\n\nconst (\n\tIDSuffixKID = 0x0a\n)\n\nconst (\n\tMerkleTreeNode = 1\n\tMerkleTreeLeaf = 2\n)\n\nconst (\n\tSibkeyType = \"sibkey\"\n\tSubkeyType = \"subkey\"\n\tEldestType = \"eldest\"\n)\n\nconst (\n\tSigTypeNone = 0\n\tSigTypeSelfSig = 1\n\tSigTypeRemoteProof = 2\n\tSigTypeTrack = 3\n\tSigTypeUntrack = 4\n\tSigTypeRevoke = 5\n\tSigTypeCryptocurrency = 6\n\tSigTypeAnnouncement = 7\n)\n\ntype KeyType int\n\nconst (\n\tKeyTypeNone KeyType = 0\n\tKeyTypeOpenPGPPublic = 1\n\tKeyTypeP3skbPrivate = 2\n\tKeyTypeKbNaclEddsa = 3\n\tKeyTypeKbNaclDH = 4\n\tKeyTypeKbNaclEddsaServerHalf = 5\n\tKeyTypeKbNaclDHServerHalf = 6\n)\n\nconst (\n\tDeviceStatusNone = 0\n\tDeviceStatusActive = 1\n\tDeviceStatusDefunct = 2\n)\n\n\/\/ these strings need to match the keys in\n\/\/ keybase\/lib_public\/public_constants.iced ->\n\/\/ public_constants.device.type\nconst (\n\tDeviceTypeDesktop = \"desktop\"\n\tDeviceTypeMobile = \"mobile\"\n\tDeviceTypeWeb = \"web\"\n\tDeviceTypePaper = \"backup\"\n)\n\nconst DownloadURL = \"https:\/\/keybase.io\/download\"\n\nvar PGPVersion = \"Keybase Go \" + Version + \" (\" + runtime.GOOS + \")\"\n\nvar PGPArmorHeaders = map[string]string{\n\t\"Version\": PGPVersion,\n\t\"Comment\": DownloadURL,\n}\n\nvar RemoteServiceTypes = map[string]keybase1.ProofType{\n\t\"keybase\": keybase1.ProofType_KEYBASE,\n\t\"twitter\": keybase1.ProofType_TWITTER,\n\t\"github\": keybase1.ProofType_GITHUB,\n\t\"reddit\": keybase1.ProofType_REDDIT,\n\t\"coinbase\": keybase1.ProofType_COINBASE,\n\t\"hackernews\": keybase1.ProofType_HACKERNEWS,\n\t\"https\": keybase1.ProofType_GENERIC_WEB_SITE,\n\t\"http\": keybase1.ProofType_GENERIC_WEB_SITE,\n\t\"dns\": keybase1.ProofType_DNS,\n\t\"rooter\": keybase1.ProofType_ROOTER,\n}\n\nvar RemoteServiceOrder = []keybase1.ProofType{\n\tkeybase1.ProofType_KEYBASE,\n\tkeybase1.ProofType_TWITTER,\n\tkeybase1.ProofType_GITHUB,\n\tkeybase1.ProofType_REDDIT,\n\tkeybase1.ProofType_COINBASE,\n\tkeybase1.ProofType_HACKERNEWS,\n\tkeybase1.ProofType_GENERIC_WEB_SITE,\n\tkeybase1.ProofType_ROOTER,\n}\n\nconst CanonicalHost = \"keybase.io\"\n\nconst (\n\tHTTPDefaultTimeout = 10 * time.Second\n)\n\n\/\/ Packet tags for OpenPGP and also Keybase packets\nconst (\n\tKeybasePacketV1 = 1\n\tTagP3skb = 513\n\tTagSignature = 514\n\tTagEncryption = 515\n)\n\nconst (\n\tKIDPGPBase AlgoType = 0x00\n\tKIDPGPRsa = 0x1\n\tKIDPGPElgamal = 0x10\n\tKIDPGPDsa = 0x11\n\tKIDPGPEcdh = 0x12\n\tKIDPGPEcdsa = 0x13\n\tKIDNaclEddsa = 0x20\n\tKIDNaclDH = 0x21\n)\n\n\/\/ OpenPGP hash IDs, taken from http:\/\/tools.ietf.org\/html\/rfc4880#section-9.4\nconst (\n\tHashPGPMd5 = 1\n\tHashPGPSha1 = 2\n\tHashPGPRipemd160 = 3\n\tHashPGPSha256 = 8\n\tHashPGPSha384 = 9\n\tHashPGPSha512 = 10\n\tHashPGPSha224 = 11\n)\n\nconst (\n\tSigKbEddsa = KIDNaclEddsa\n)\n\nconst (\n\tServerUpdateLag = time.Minute\n)\n\n\/\/ key_revocation_types\nconst (\n\tRevSimpleDelete = 0\n\tRevFull = 1\n\tRevDated = 2\n)\n\ntype KeyStatus int\n\nconst (\n\tKeyUncancelled KeyStatus = iota\n\tKeyRevoked\n\tKeyDeleted\n\tKeySuperseded\n)\n\ntype KeyRole int\n\nconst (\n\tDLGNone KeyRole = iota\n\tDLGSibkey\n\tDLGSubkey\n)\n\nconst (\n\tKexScryptCost = 32768\n\tKexScryptR = 8\n\tKexScryptP = 1\n\tKexScryptKeylen = 32\n\tKexSessionIDEntropy = 65 \/\/ kex doc specifies 65 bits of entropy\n)\n\nconst (\n\tPaperKeyScryptCost = 32768\n\tPaperKeyScryptR = 8\n\tPaperKeyScryptP = 1\n\tPaperKeyScryptKeylen = 128\n\tPaperKeyPhraseEntropy = 144\n\tPaperKeyVersion = 0\n)\n\nconst UserSummaryLimit = 500 \/\/ max number of user summaries in one request\n<commit_msg>Move all expirations (except auth) to 10 years. Also cleanup the constant so its clearer<commit_after>package libkb\n\nimport (\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n\n\tkeybase1 \"github.com\/keybase\/client\/protocol\/go\"\n)\n\nconst (\n\tDevelServerURI = \"http:\/\/localhost:3000\"\n\tProductionServerURI = \"https:\/\/keybase.io\"\n)\n\nvar ServerURI = DevelServerURI\n\nconst (\n\tConfigFile = \"config.json\"\n\tSessionFile = \"session.json\"\n\tDBFile = \"keybase.leveldb\"\n\tSocketFile = \"keybased.sock\"\n\tPIDFile = \"keybased.pid\"\n\n\tSecretKeyringTemplate = \"secretkeys.%u.mpack\"\n\n\tAPIVersion = \"1.0\"\n\tAPIURIPathPrefix = \"\/_\/api\/\" + APIVersion\n\tDaemonPort = 40933\n\tGoClientID = \"keybase.io go client\"\n\tIdentifyAs = GoClientID + \" v\" + Version + \" \" + runtime.GOOS\n)\n\nvar UserAgent = \"Keybase-Go-CLI\/\" + Version + \" (\" + runtime.Version() + \" on \" + runtime.GOOS + \")\"\n\nconst (\n\tPermFile os.FileMode = 0600\n\tPermDir os.FileMode = 0700\n\tUmaskablePermFile os.FileMode = 0666\n)\n\nconst (\n\tUserCacheSize = 0x1000\n\tPGPFingerprintHexLen = 40\n\n\tProofCacheSize = 0x1000\n\tProofCacheLongDur = 6 * time.Hour\n\tProofCacheMediumDur = 30 * time.Minute\n\tProofCacheShortDur = 1 * time.Minute\n\n\tSigShortIDBytes = 27\n)\n\nvar MerkleProdKIDs = []string{\n\t\"010159baae6c7d43c66adf8fb7bb2b8b4cbe408c062cfc369e693ccb18f85631dbcd0a\",\n}\nvar MerkleTestKIDs = []string{\n\t\"0101be58b6c82db64f6ccabb05088db443c69f87d5d48857d709ed6f73948dabe67d0a\",\n}\n\nconst (\n\tKeybaseKIDV1 = 1 \/\/ Uses SHA-256\n\tKeybaseSignatureV1 = 1\n\tOneYearInMinutes = 24 * 60 * 60 * 365\n\n\tSigExpireIn = OneYearInMinutes * 10 \/\/ 10 years\n\tNaclEdDSAExpireIn = OneYearInMinutes * 10 \/\/ 10 years\n\tNaclDHExpireIn = OneYearInMinutes * 10 \/\/ 10 years\n\tKeyExpireIn = OneYearInMinutes * 10 \/\/ 10 years\n\tSubkeyExpireIn = OneYearInMinutes * 10 \/\/ 10 years\n\tAuthExpireIn = OneYearInMinutes \/\/ 1 year\n)\n\n\/\/ Status codes. This list should match keybase\/lib\/constants.iced.\nconst (\n\tSCOk = 0\n\tSCLoginRequired = 201\n\tSCBadSession = 202\n\tSCBadLoginPassword = 204\n\tSCNotFound = 205\n\tSCGeneric = 218\n\tSCAlreadyLoggedIn = 235\n\tSCCanceled = 237\n\tSCBadSignupUsernameTaken = 701\n\tSCKeyNotFound = 901\n\tSCKeyInUse = 907\n\tSCKeyBadGen = 913\n\tSCKeyNoSecret = 914\n\tSCKeyNoActive = 915\n\tSCBadTrackSession = 1301\n\tSCStreamExists = 1501\n\tSCStreamNotFound = 1502\n\tSCStreamWrongKind = 1503\n\tSCStreamEOF = 1504\n\tSCAPINetworkError = 1601\n\tSCTimeout = 1602\n\tSCProofError = 1701\n\tSCIdentificationExpired = 1702\n)\n\nconst (\n\tIDSuffixKID = 0x0a\n)\n\nconst (\n\tMerkleTreeNode = 1\n\tMerkleTreeLeaf = 2\n)\n\nconst (\n\tSibkeyType = \"sibkey\"\n\tSubkeyType = \"subkey\"\n\tEldestType = \"eldest\"\n)\n\nconst (\n\tSigTypeNone = 0\n\tSigTypeSelfSig = 1\n\tSigTypeRemoteProof = 2\n\tSigTypeTrack = 3\n\tSigTypeUntrack = 4\n\tSigTypeRevoke = 5\n\tSigTypeCryptocurrency = 6\n\tSigTypeAnnouncement = 7\n)\n\ntype KeyType int\n\nconst (\n\tKeyTypeNone KeyType = 0\n\tKeyTypeOpenPGPPublic = 1\n\tKeyTypeP3skbPrivate = 2\n\tKeyTypeKbNaclEddsa = 3\n\tKeyTypeKbNaclDH = 4\n\tKeyTypeKbNaclEddsaServerHalf = 5\n\tKeyTypeKbNaclDHServerHalf = 6\n)\n\nconst (\n\tDeviceStatusNone = 0\n\tDeviceStatusActive = 1\n\tDeviceStatusDefunct = 2\n)\n\n\/\/ these strings need to match the keys in\n\/\/ keybase\/lib_public\/public_constants.iced ->\n\/\/ public_constants.device.type\nconst (\n\tDeviceTypeDesktop = \"desktop\"\n\tDeviceTypeMobile = \"mobile\"\n\tDeviceTypeWeb = \"web\"\n\tDeviceTypePaper = \"backup\"\n)\n\nconst DownloadURL = \"https:\/\/keybase.io\/download\"\n\nvar PGPVersion = \"Keybase Go \" + Version + \" (\" + runtime.GOOS + \")\"\n\nvar PGPArmorHeaders = map[string]string{\n\t\"Version\": PGPVersion,\n\t\"Comment\": DownloadURL,\n}\n\nvar RemoteServiceTypes = map[string]keybase1.ProofType{\n\t\"keybase\": keybase1.ProofType_KEYBASE,\n\t\"twitter\": keybase1.ProofType_TWITTER,\n\t\"github\": keybase1.ProofType_GITHUB,\n\t\"reddit\": keybase1.ProofType_REDDIT,\n\t\"coinbase\": keybase1.ProofType_COINBASE,\n\t\"hackernews\": keybase1.ProofType_HACKERNEWS,\n\t\"https\": keybase1.ProofType_GENERIC_WEB_SITE,\n\t\"http\": keybase1.ProofType_GENERIC_WEB_SITE,\n\t\"dns\": keybase1.ProofType_DNS,\n\t\"rooter\": keybase1.ProofType_ROOTER,\n}\n\nvar RemoteServiceOrder = []keybase1.ProofType{\n\tkeybase1.ProofType_KEYBASE,\n\tkeybase1.ProofType_TWITTER,\n\tkeybase1.ProofType_GITHUB,\n\tkeybase1.ProofType_REDDIT,\n\tkeybase1.ProofType_COINBASE,\n\tkeybase1.ProofType_HACKERNEWS,\n\tkeybase1.ProofType_GENERIC_WEB_SITE,\n\tkeybase1.ProofType_ROOTER,\n}\n\nconst CanonicalHost = \"keybase.io\"\n\nconst (\n\tHTTPDefaultTimeout = 10 * time.Second\n)\n\n\/\/ Packet tags for OpenPGP and also Keybase packets\nconst (\n\tKeybasePacketV1 = 1\n\tTagP3skb = 513\n\tTagSignature = 514\n\tTagEncryption = 515\n)\n\nconst (\n\tKIDPGPBase AlgoType = 0x00\n\tKIDPGPRsa = 0x1\n\tKIDPGPElgamal = 0x10\n\tKIDPGPDsa = 0x11\n\tKIDPGPEcdh = 0x12\n\tKIDPGPEcdsa = 0x13\n\tKIDNaclEddsa = 0x20\n\tKIDNaclDH = 0x21\n)\n\n\/\/ OpenPGP hash IDs, taken from http:\/\/tools.ietf.org\/html\/rfc4880#section-9.4\nconst (\n\tHashPGPMd5 = 1\n\tHashPGPSha1 = 2\n\tHashPGPRipemd160 = 3\n\tHashPGPSha256 = 8\n\tHashPGPSha384 = 9\n\tHashPGPSha512 = 10\n\tHashPGPSha224 = 11\n)\n\nconst (\n\tSigKbEddsa = KIDNaclEddsa\n)\n\nconst (\n\tServerUpdateLag = time.Minute\n)\n\n\/\/ key_revocation_types\nconst (\n\tRevSimpleDelete = 0\n\tRevFull = 1\n\tRevDated = 2\n)\n\ntype KeyStatus int\n\nconst (\n\tKeyUncancelled KeyStatus = iota\n\tKeyRevoked\n\tKeyDeleted\n\tKeySuperseded\n)\n\ntype KeyRole int\n\nconst (\n\tDLGNone KeyRole = iota\n\tDLGSibkey\n\tDLGSubkey\n)\n\nconst (\n\tKexScryptCost = 32768\n\tKexScryptR = 8\n\tKexScryptP = 1\n\tKexScryptKeylen = 32\n\tKexSessionIDEntropy = 65 \/\/ kex doc specifies 65 bits of entropy\n)\n\nconst (\n\tPaperKeyScryptCost = 32768\n\tPaperKeyScryptR = 8\n\tPaperKeyScryptP = 1\n\tPaperKeyScryptKeylen = 128\n\tPaperKeyPhraseEntropy = 144\n\tPaperKeyVersion = 0\n)\n\nconst UserSummaryLimit = 500 \/\/ max number of user summaries in one request\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\npackage libkb\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\ntype cmpTest struct {\n\ta, b string\n\teq bool\n}\n\nvar nameCmpTest = []cmpTest{\n\t{a: \"UpperCase\", b: \"uppercase\", eq: true},\n\t{a: \" Space prefix\", b: \"Space prefix\", eq: true},\n\t{a: \"Space suffix \", b: \"Space suffix\", eq: true},\n\t{a: \"Space Inside\", b: \"SpaceInside\", eq: true},\n\t{a: \"work iPad\", b: \"work ipad\", eq: true},\n\t{a: \"my_ipad\", b: \"MY IPAD\", eq: true},\n\t{a: \"device a\", b: \"device b\", eq: false},\n\t{a: \"mike's computer\", b: \"mikes computer\", eq: true},\n\t{a: \"my+-'_device\", b: \"my device\", eq: true},\n}\n\nfunc TestNameCmp(t *testing.T) {\n\tfor _, test := range nameCmpTest {\n\t\teq := NameCmp(test.a, test.b)\n\t\tif eq != test.eq {\n\t\t\tt.Errorf(\"name compare %q == %q => %v, expected %v\", test.a, test.b, eq, test.eq)\n\t\t}\n\t}\n}\n\nfunc TestCombineErrors(t *testing.T) {\n\terr := CombineErrors(fmt.Errorf(\"error1\"), nil, fmt.Errorf(\"error3\"))\n\texpected := \"There were multiple errors: error1; error3\"\n\tif err.Error() != expected {\n\t\tt.Errorf(\"Wrong output for combine errors: %#v != %#v\", err.Error(), expected)\n\t}\n}\n\nfunc TestWhitespaceNormalize(t *testing.T) {\n\n\tdata := []struct {\n\t\tin, out string\n\t}{\n\t\t{\" ab cd ef gh \", \"ab cd ef gh\"},\n\t\t{\"a\\nb c\\nd\", \"a b c d\"},\n\t\t{\" a \", \"a\"},\n\t\t{\"\\na\\nb \", \"a b\"},\n\t\t{\n\t\t\t\" Verifying myself: I am pomf on Keybase.io. 8a6cewzit2o7zuLKGbDqQADhzfOlGerGuBpq\\n\/ https:\/\/keybase.io\/pomf\/sigs\/8a6cewzit2o7zuLKGbDqQADhzfOlGerGuBpq \",\n\t\t\t\"Verifying myself: I am pomf on Keybase.io. 8a6cewzit2o7zuLKGbDqQADhzfOlGerGuBpq \/ https:\/\/keybase.io\/pomf\/sigs\/8a6cewzit2o7zuLKGbDqQADhzfOlGerGuBpq\",\n\t\t},\n\t}\n\n\tfor i, p := range data {\n\t\tout := WhitespaceNormalize(p.in)\n\t\tif out != p.out {\n\t\t\tt.Errorf(\"Failed on test %d: %s != %s\", i, out, p.out)\n\t\t}\n\t}\n\n}\n\nfunc TestMakeByte24(t *testing.T) {\n\tvar x1 [24]byte\n\tvar x2 [31]byte\n\tvar x3 [33]byte\n\n\tx1[3] = 5\n\n\ty := MakeByte24(x1[:])\n\trequire.Equal(t, x1, y)\n\n\trequire.Panics(t, func() {\n\t\tMakeByte24(x2[:])\n\t})\n\n\trequire.Panics(t, func() {\n\t\tMakeByte24(x3[:])\n\t})\n}\n\nfunc TestMakeByte32(t *testing.T) {\n\tvar x1 [32]byte\n\tvar x2 [31]byte\n\tvar x3 [33]byte\n\n\tx1[3] = 5\n\n\ty := MakeByte32(x1[:])\n\trequire.Equal(t, x1, y)\n\n\trequire.Panics(t, func() {\n\t\tMakeByte32(x2[:])\n\t})\n\n\trequire.Panics(t, func() {\n\t\tMakeByte32(x3[:])\n\t})\n}\n\nfunc TestAppDataDir(t *testing.T) {\n\tdir, err := AppDataDir()\n\tif err != nil {\n\t\t\/\/ Non-Windows case.\n\t\trequire.True(t, strings.HasPrefix(err.Error(), \"unsupported\"))\n\t\treturn\n\t}\n\n\t\/\/ Windows case. AppDataDir should exist, at least on our test\n\t\/\/ machines.\n\trequire.NoError(t, err)\n\texists, err := FileExists(dir)\n\trequire.NoError(t, err)\n\trequire.True(t, exists)\n}\n\nfunc TestLocalDataDir(t *testing.T) {\n\tdir, err := LocalDataDir()\n\tif err != nil {\n\t\t\/\/ Non-Windows case.\n\t\trequire.True(t, strings.HasPrefix(err.Error(), \"unsupported\"))\n\t\treturn\n\t}\n\n\t\/\/ Windows case. LocalDataDir should exist, at least on our\n\t\/\/ test machines.\n\trequire.NoError(t, err)\n\texists, err := FileExists(dir)\n\trequire.NoError(t, err)\n\trequire.True(t, exists)\n}\n<commit_msg>test ForceWallClock (#11787)<commit_after>\/\/ Copyright 2015 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\npackage libkb\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\ntype cmpTest struct {\n\ta, b string\n\teq bool\n}\n\nvar nameCmpTest = []cmpTest{\n\t{a: \"UpperCase\", b: \"uppercase\", eq: true},\n\t{a: \" Space prefix\", b: \"Space prefix\", eq: true},\n\t{a: \"Space suffix \", b: \"Space suffix\", eq: true},\n\t{a: \"Space Inside\", b: \"SpaceInside\", eq: true},\n\t{a: \"work iPad\", b: \"work ipad\", eq: true},\n\t{a: \"my_ipad\", b: \"MY IPAD\", eq: true},\n\t{a: \"device a\", b: \"device b\", eq: false},\n\t{a: \"mike's computer\", b: \"mikes computer\", eq: true},\n\t{a: \"my+-'_device\", b: \"my device\", eq: true},\n}\n\nfunc TestNameCmp(t *testing.T) {\n\tfor _, test := range nameCmpTest {\n\t\teq := NameCmp(test.a, test.b)\n\t\tif eq != test.eq {\n\t\t\tt.Errorf(\"name compare %q == %q => %v, expected %v\", test.a, test.b, eq, test.eq)\n\t\t}\n\t}\n}\n\nfunc TestCombineErrors(t *testing.T) {\n\terr := CombineErrors(fmt.Errorf(\"error1\"), nil, fmt.Errorf(\"error3\"))\n\texpected := \"There were multiple errors: error1; error3\"\n\tif err.Error() != expected {\n\t\tt.Errorf(\"Wrong output for combine errors: %#v != %#v\", err.Error(), expected)\n\t}\n}\n\nfunc TestWhitespaceNormalize(t *testing.T) {\n\n\tdata := []struct {\n\t\tin, out string\n\t}{\n\t\t{\" ab cd ef gh \", \"ab cd ef gh\"},\n\t\t{\"a\\nb c\\nd\", \"a b c d\"},\n\t\t{\" a \", \"a\"},\n\t\t{\"\\na\\nb \", \"a b\"},\n\t\t{\n\t\t\t\" Verifying myself: I am pomf on Keybase.io. 8a6cewzit2o7zuLKGbDqQADhzfOlGerGuBpq\\n\/ https:\/\/keybase.io\/pomf\/sigs\/8a6cewzit2o7zuLKGbDqQADhzfOlGerGuBpq \",\n\t\t\t\"Verifying myself: I am pomf on Keybase.io. 8a6cewzit2o7zuLKGbDqQADhzfOlGerGuBpq \/ https:\/\/keybase.io\/pomf\/sigs\/8a6cewzit2o7zuLKGbDqQADhzfOlGerGuBpq\",\n\t\t},\n\t}\n\n\tfor i, p := range data {\n\t\tout := WhitespaceNormalize(p.in)\n\t\tif out != p.out {\n\t\t\tt.Errorf(\"Failed on test %d: %s != %s\", i, out, p.out)\n\t\t}\n\t}\n\n}\n\nfunc TestMakeByte24(t *testing.T) {\n\tvar x1 [24]byte\n\tvar x2 [31]byte\n\tvar x3 [33]byte\n\n\tx1[3] = 5\n\n\ty := MakeByte24(x1[:])\n\trequire.Equal(t, x1, y)\n\n\trequire.Panics(t, func() {\n\t\tMakeByte24(x2[:])\n\t})\n\n\trequire.Panics(t, func() {\n\t\tMakeByte24(x3[:])\n\t})\n}\n\nfunc TestMakeByte32(t *testing.T) {\n\tvar x1 [32]byte\n\tvar x2 [31]byte\n\tvar x3 [33]byte\n\n\tx1[3] = 5\n\n\ty := MakeByte32(x1[:])\n\trequire.Equal(t, x1, y)\n\n\trequire.Panics(t, func() {\n\t\tMakeByte32(x2[:])\n\t})\n\n\trequire.Panics(t, func() {\n\t\tMakeByte32(x3[:])\n\t})\n}\n\nfunc TestAppDataDir(t *testing.T) {\n\tdir, err := AppDataDir()\n\tif err != nil {\n\t\t\/\/ Non-Windows case.\n\t\trequire.True(t, strings.HasPrefix(err.Error(), \"unsupported\"))\n\t\treturn\n\t}\n\n\t\/\/ Windows case. AppDataDir should exist, at least on our test\n\t\/\/ machines.\n\trequire.NoError(t, err)\n\texists, err := FileExists(dir)\n\trequire.NoError(t, err)\n\trequire.True(t, exists)\n}\n\nfunc TestLocalDataDir(t *testing.T) {\n\tdir, err := LocalDataDir()\n\tif err != nil {\n\t\t\/\/ Non-Windows case.\n\t\trequire.True(t, strings.HasPrefix(err.Error(), \"unsupported\"))\n\t\treturn\n\t}\n\n\t\/\/ Windows case. LocalDataDir should exist, at least on our\n\t\/\/ test machines.\n\trequire.NoError(t, err)\n\texists, err := FileExists(dir)\n\trequire.NoError(t, err)\n\trequire.True(t, exists)\n}\n\nfunc hasMonotonicClock(t time.Time) bool {\n\tre := regexp.MustCompile(\" m=[-+]([.0-9]+)$\")\n\treturn re.FindString(t.String()) != \"\"\n}\n\nfunc TestForceWallClock(t *testing.T) {\n\tn := time.Now()\n\trequire.True(t, hasMonotonicClock(n))\n\trequire.False(t, hasMonotonicClock(ForceWallClock(n)))\n}\n<|endoftext|>"} {"text":"<commit_before>package twitter\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"strconv\"\n\n\t\"github.com\/dghubble\/go-twitter\/twitter\"\n\t\"github.com\/dghubble\/oauth1\"\n\t\"github.com\/g-hyoga\/kyuko\/go\/model\"\n)\n\nvar (\n\tT_CONSUMER_KEY = os.Getenv(\"T_CONSUMER_KEY\")\n\tT_CONSUMER_SECRET = os.Getenv(\"T_CONSUMER_SECRET\")\n\tT_ACCESS_TOKEN = os.Getenv(\"T_ACCESS_TOKEN\")\n\tT_ACCESS_TOKEN_SECRET = os.Getenv(\"T_ACCESS_TOKEN_SECRET\")\n\n\tI_CONSUMER_KEY = os.Getenv(\"I_CONSUMER_KEY\")\n\tI_CONSUMER_SECRET = os.Getenv(\"I_CONSUMER_SECRET\")\n\tI_ACCESS_TOKEN = os.Getenv(\"I_ACCESS_TOKEN\")\n\tI_ACCESS_TOKEN_SECRET = os.Getenv(\"I_ACCESS_TOKEN_SECRET\")\n)\n\nvar tClient twitter.Client\n\nfunc init() {\n\t\/\/京田辺\n\tconfig := oauth1.NewConfig(T_CONSUMER_KEY, T_CONSUMER_SECRET)\n\ttoken := oauth1.NewToken(T_ACCESS_TOKEN, T_ACCESS_TOKEN_SECRET)\n\thttpClient := config.Client(oauth1.NoContext, token)\n\ttClient = *twitter.NewClient(httpClient)\n}\n\n\/\/ create line of template\n\/\/ period:className(Instructor)\nfunc CreateLine(kyuko model.KyukoData) (string, error) {\n\tif kyuko.ClassName == \"\" || kyuko.Instructor == \"\" || kyuko.Period == 0 {\n\t\treturn \"\", errors.New(\"休講情報がないです\")\n\t}\n\n\tperiod := strconv.Itoa(kyuko.Period)\n\n\tline := period + \"限:\" + kyuko.ClassName + \"(\" + kyuko.Instructor + \")\\n\"\n\treturn line, nil\n}\n\n\/\/ tweet argment\nfunc Update(text string) error {\n\t_, _, err := tClient.Statuses.Update(text, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>tweetのテンプレート作成の途中(>weekdayを漢字に直す)<commit_after>package twitter\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"strconv\"\n\n\t\"github.com\/dghubble\/go-twitter\/twitter\"\n\t\"github.com\/dghubble\/oauth1\"\n\t\"github.com\/g-hyoga\/kyuko\/go\/model\"\n)\n\nvar (\n\tT_CONSUMER_KEY = os.Getenv(\"T_CONSUMER_KEY\")\n\tT_CONSUMER_SECRET = os.Getenv(\"T_CONSUMER_SECRET\")\n\tT_ACCESS_TOKEN = os.Getenv(\"T_ACCESS_TOKEN\")\n\tT_ACCESS_TOKEN_SECRET = os.Getenv(\"T_ACCESS_TOKEN_SECRET\")\n\n\tI_CONSUMER_KEY = os.Getenv(\"I_CONSUMER_KEY\")\n\tI_CONSUMER_SECRET = os.Getenv(\"I_CONSUMER_SECRET\")\n\tI_ACCESS_TOKEN = os.Getenv(\"I_ACCESS_TOKEN\")\n\tI_ACCESS_TOKEN_SECRET = os.Getenv(\"I_ACCESS_TOKEN_SECRET\")\n)\n\nvar tClient twitter.Client\n\nfunc init() {\n\t\/\/京田辺\n\tconfig := oauth1.NewConfig(T_CONSUMER_KEY, T_CONSUMER_SECRET)\n\ttoken := oauth1.NewToken(T_ACCESS_TOKEN, T_ACCESS_TOKEN_SECRET)\n\thttpClient := config.Client(oauth1.NoContext, token)\n\ttClient = *twitter.NewClient(httpClient)\n}\n\n\/\/ create line of template\n\/\/ period:className(Instructor)\nfunc CreateLine(kyuko model.KyukoData) (string, error) {\n\tif kyuko.ClassName == \"\" || kyuko.Instructor == \"\" || kyuko.Period == 0 {\n\t\treturn \"\", errors.New(\"休講情報がないです\")\n\t}\n\n\tperiod := strconv.Itoa(kyuko.Period)\n\n\tline := period + \"限:\" + kyuko.ClassName + \"(\" + kyuko.Instructor + \")\\n\"\n\treturn line, nil\n}\n\n\/\/ create tweet template\n\/\/ exsample\n\/\/\n\/\/ hoge曜日の休講情報\n\/\/ period:className(Instructor)\n\/\/ period:className(Instructor)\n\/\/ ...\n\/\/\n\/\/ in 140 characters\nfunc CreateContent(kyuko []model.KyukoDara) ([]string, error) {\n\tvar tws []string\n\n\tvar lines []string\n\tfor _, v := range kyuko {\n\t\tline, err := CreateLine(v)\n\t\tif err != nil {\n\t\t\treturn tws, err\n\t\t}\n\t\tlines = append(lines, line)\n\t}\n\n\tweekday := kyuko[0].Weekday\n\tstringWeekday := time.Weekday(weekday)\n\ttw := \"曜日の休講情報\\n\"\n\tfor i, v := range lines {\n\n\t}\n}\n\n\/\/ tweet argment\nfunc Update(text string) error {\n\t_, _, err := tClient.Statuses.Update(text, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013, Google Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage zktopo\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com\/golang\/glog\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/topo\"\n\t\"github.com\/youtube\/vitess\/go\/zk\"\n\t\"launchpad.net\/gozk\/zookeeper\"\n)\n\n\/*\nThis file contains the code to support the local agent process for zktopo.Server\n*\/\n\nfunc (zkts *Server) ValidateTabletActions(tabletAlias topo.TabletAlias) error {\n\tactionPath := TabletActionPathForAlias(tabletAlias)\n\n\t\/\/ Ensure that the action node is there. There is no conflict creating\n\t\/\/ this node.\n\t_, err := zkts.zconn.Create(actionPath, \"\", 0, zookeeper.WorldACL(zookeeper.PERM_ALL))\n\tif err != nil && !zookeeper.IsError(err, zookeeper.ZNODEEXISTS) {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (zkts *Server) CreateTabletPidNode(tabletAlias topo.TabletAlias, contents string, done chan struct{}) error {\n\tzkTabletPath := TabletPathForAlias(tabletAlias)\n\tpath := path.Join(zkTabletPath, \"pid\")\n\treturn zk.CreatePidNode(zkts.zconn, path, contents, done)\n}\n\nfunc (zkts *Server) ValidateTabletPidNode(tabletAlias topo.TabletAlias) error {\n\tzkTabletPath := TabletPathForAlias(tabletAlias)\n\tpath := path.Join(zkTabletPath, \"pid\")\n\t_, _, err := zkts.zconn.Get(path)\n\treturn err\n}\n\nfunc (zkts *Server) GetSubprocessFlags() []string {\n\treturn zk.GetZkSubprocessFlags()\n}\n\nfunc (zkts *Server) handleActionQueue(tabletAlias topo.TabletAlias, dispatchAction func(actionPath, data string) error) (<-chan zookeeper.Event, error) {\n\tzkActionPath := TabletActionPathForAlias(tabletAlias)\n\n\t\/\/ This read may seem a bit pedantic, but it makes it easier\n\t\/\/ for the system to trend towards consistency if an action\n\t\/\/ fails or somehow the action queue gets mangled by an errant\n\t\/\/ process.\n\tchildren, _, watch, err := zkts.zconn.ChildrenW(zkActionPath)\n\tif err != nil {\n\t\treturn watch, err\n\t}\n\tif len(children) > 0 {\n\t\tsort.Strings(children)\n\t\tfor _, child := range children {\n\t\t\tactionPath := zkActionPath + \"\/\" + child\n\t\t\tif _, err := strconv.ParseUint(child, 10, 64); err != nil {\n\t\t\t\t\/\/ This is handy if you want to restart a stuck queue.\n\t\t\t\t\/\/ FIXME(msolomon) could listen on the queue node for a change\n\t\t\t\t\/\/ generated by a \"touch\", but listening on two things is a bit\n\t\t\t\t\/\/ more complex.\n\t\t\t\tlog.Warningf(\"remove invalid event from action queue: %v\", child)\n\t\t\t\tzkts.zconn.Delete(actionPath, -1)\n\t\t\t}\n\n\t\t\tdata, _, err := zkts.zconn.Get(actionPath)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"cannot read action %v from zk: %v\", actionPath, err)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif err := dispatchAction(actionPath, data); err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn watch, nil\n}\n\nfunc (zkts *Server) ActionEventLoop(tabletAlias topo.TabletAlias, dispatchAction func(actionPath, data string) error, done chan struct{}) {\n\tfor {\n\t\t\/\/ Process any pending actions when we startup, before we start listening\n\t\t\/\/ for events.\n\t\twatch, err := zkts.handleActionQueue(tabletAlias, dispatchAction)\n\t\tif err != nil {\n\t\t\tlog.Warningf(\"action queue failed: %v\", err)\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ FIXME(msolomon) Add a skewing timer here to guarantee we wakeup\n\t\t\/\/ periodically even if events are missed?\n\t\tselect {\n\t\tcase event := <-watch:\n\t\t\tif !event.Ok() {\n\t\t\t\t\/\/ NOTE(msolomon) The zk meta conn will reconnect automatically, or\n\t\t\t\t\/\/ error out. At this point, there isn't much to do.\n\t\t\t\tlog.Warningf(\"zookeeper not OK: %v\", event)\n\t\t\t\ttime.Sleep(5 * time.Second)\n\t\t\t}\n\t\t\t\/\/ Otherwise, just handle the queue above.\n\t\tcase <-done:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ actionPathToTabletAlias parses an actionPath back\n\/\/ zkActionPath is \/zk\/<cell>\/vt\/tablets\/<uid>\/action\/<number>\nfunc actionPathToTabletAlias(actionPath string) (topo.TabletAlias, error) {\n\tpathParts := strings.Split(actionPath, \"\/\")\n\tif len(pathParts) != 8 || pathParts[0] != \"\" || pathParts[1] != \"zk\" || pathParts[3] != \"vt\" || pathParts[4] != \"tablets\" || pathParts[6] != \"action\" {\n\t\treturn topo.TabletAlias{}, fmt.Errorf(\"invalid action path: %v\", actionPath)\n\t}\n\treturn topo.ParseTabletAliasString(pathParts[2] + \"-\" + pathParts[5])\n}\n\nfunc (zkts *Server) ReadTabletActionPath(actionPath string) (topo.TabletAlias, string, int64, error) {\n\ttabletAlias, err := actionPathToTabletAlias(actionPath)\n\tif err != nil {\n\t\treturn topo.TabletAlias{}, \"\", 0, err\n\t}\n\n\tdata, stat, err := zkts.zconn.Get(actionPath)\n\tif err != nil {\n\t\treturn topo.TabletAlias{}, \"\", 0, err\n\t}\n\n\treturn tabletAlias, data, int64(stat.Version()), nil\n}\n\nfunc (zkts *Server) UpdateTabletAction(actionPath, data string, version int64) error {\n\t_, err := zkts.zconn.Set(actionPath, data, int(version))\n\tif err != nil {\n\t\tif zookeeper.IsError(err, zookeeper.ZBADVERSION) {\n\t\t\terr = topo.ErrBadVersion\n\t\t}\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ StoreTabletActionResponse stores the data both in action and actionlog\nfunc (zkts *Server) StoreTabletActionResponse(actionPath, data string) error {\n\t_, err := zkts.zconn.Set(actionPath, data, -1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tactionLogPath := strings.Replace(actionPath, \"\/action\/\", \"\/actionlog\/\", 1)\n\t_, err = zk.CreateRecursive(zkts.zconn, actionLogPath, data, 0, zookeeper.WorldACL(zookeeper.PERM_ALL))\n\treturn err\n}\n\nfunc (zkts *Server) UnblockTabletAction(actionPath string) error {\n\treturn zkts.zconn.Delete(actionPath, -1)\n}\n<commit_msg>Small cleanup in this file.<commit_after>\/\/ Copyright 2013, Google Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage zktopo\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com\/golang\/glog\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/topo\"\n\t\"github.com\/youtube\/vitess\/go\/zk\"\n\t\"launchpad.net\/gozk\/zookeeper\"\n)\n\n\/*\nThis file contains the code to support the local agent process for zktopo.Server\n*\/\n\nfunc (zkts *Server) ValidateTabletActions(tabletAlias topo.TabletAlias) error {\n\tactionPath := TabletActionPathForAlias(tabletAlias)\n\n\t\/\/ Ensure that the action node is there. There is no conflict creating\n\t\/\/ this node.\n\t_, err := zkts.zconn.Create(actionPath, \"\", 0, zookeeper.WorldACL(zookeeper.PERM_ALL))\n\tif err != nil && !zookeeper.IsError(err, zookeeper.ZNODEEXISTS) {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (zkts *Server) CreateTabletPidNode(tabletAlias topo.TabletAlias, contents string, done chan struct{}) error {\n\tzkTabletPath := TabletPathForAlias(tabletAlias)\n\tpath := path.Join(zkTabletPath, \"pid\")\n\treturn zk.CreatePidNode(zkts.zconn, path, contents, done)\n}\n\nfunc (zkts *Server) ValidateTabletPidNode(tabletAlias topo.TabletAlias) error {\n\tzkTabletPath := TabletPathForAlias(tabletAlias)\n\tpath := path.Join(zkTabletPath, \"pid\")\n\t_, _, err := zkts.zconn.Get(path)\n\treturn err\n}\n\nfunc (zkts *Server) GetSubprocessFlags() []string {\n\treturn zk.GetZkSubprocessFlags()\n}\n\n\/\/ handleActionQueue will set the watch on the action queue,\n\/\/ or return an error if it can't.\n\/\/ It will also process all pending actions, until it can't read one\n\/\/ or one fails. No error is returned for action failures.\nfunc (zkts *Server) handleActionQueue(tabletAlias topo.TabletAlias, dispatchAction func(actionPath, data string) error) (<-chan zookeeper.Event, error) {\n\tzkActionPath := TabletActionPathForAlias(tabletAlias)\n\n\t\/\/ This read may seem a bit pedantic, but it makes it easier\n\t\/\/ for the system to trend towards consistency if an action\n\t\/\/ fails or somehow the action queue gets mangled by an errant\n\t\/\/ process.\n\tchildren, _, watch, err := zkts.zconn.ChildrenW(zkActionPath)\n\tif err != nil {\n\t\treturn watch, err\n\t}\n\tif len(children) > 0 {\n\t\tsort.Strings(children)\n\t\tfor _, child := range children {\n\t\t\tactionPath := zkActionPath + \"\/\" + child\n\t\t\tif _, err := strconv.ParseUint(child, 10, 64); err != nil {\n\t\t\t\t\/\/ This is handy if you want to restart a stuck queue.\n\t\t\t\t\/\/ FIXME(msolomon) could listen on the queue node for a change\n\t\t\t\t\/\/ generated by a \"touch\", but listening on two things is a bit\n\t\t\t\t\/\/ more complex.\n\t\t\t\tlog.Warningf(\"remove invalid event from action queue: %v\", child)\n\t\t\t\tzkts.zconn.Delete(actionPath, -1)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdata, _, err := zkts.zconn.Get(actionPath)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"cannot read action %v from zk: %v\", actionPath, err)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif err := dispatchAction(actionPath, data); err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn watch, nil\n}\n\nfunc (zkts *Server) ActionEventLoop(tabletAlias topo.TabletAlias, dispatchAction func(actionPath, data string) error, done chan struct{}) {\n\tfor {\n\t\t\/\/ Process any pending actions when we startup, before\n\t\t\/\/ we start listening for events.\n\t\twatch, err := zkts.handleActionQueue(tabletAlias, dispatchAction)\n\t\tif err != nil {\n\t\t\tlog.Warningf(\"failed to set the watch on action queue, will try again in 5 seconds: %v\", err)\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\tselect {\n\t\tcase event := <-watch:\n\t\t\tif !event.Ok() {\n\t\t\t\t\/\/ NOTE(msolomon) The zk meta conn will\n\t\t\t\t\/\/ reconnect automatically, or error out.\n\t\t\t\t\/\/ At this point, there isn't much to do.\n\t\t\t\tlog.Warningf(\"zookeeper not OK: %v, will try again in 5 seconds\", event)\n\t\t\t\ttime.Sleep(5 * time.Second)\n\t\t\t}\n\t\t\t\/\/ Otherwise, just handle the queue above.\n\t\tcase <-done:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ actionPathToTabletAlias parses an actionPath back\n\/\/ zkActionPath is \/zk\/<cell>\/vt\/tablets\/<uid>\/action\/<number>\nfunc actionPathToTabletAlias(actionPath string) (topo.TabletAlias, error) {\n\tpathParts := strings.Split(actionPath, \"\/\")\n\tif len(pathParts) != 8 || pathParts[0] != \"\" || pathParts[1] != \"zk\" || pathParts[3] != \"vt\" || pathParts[4] != \"tablets\" || pathParts[6] != \"action\" {\n\t\treturn topo.TabletAlias{}, fmt.Errorf(\"invalid action path: %v\", actionPath)\n\t}\n\treturn topo.ParseTabletAliasString(pathParts[2] + \"-\" + pathParts[5])\n}\n\nfunc (zkts *Server) ReadTabletActionPath(actionPath string) (topo.TabletAlias, string, int64, error) {\n\ttabletAlias, err := actionPathToTabletAlias(actionPath)\n\tif err != nil {\n\t\treturn topo.TabletAlias{}, \"\", 0, err\n\t}\n\n\tdata, stat, err := zkts.zconn.Get(actionPath)\n\tif err != nil {\n\t\treturn topo.TabletAlias{}, \"\", 0, err\n\t}\n\n\treturn tabletAlias, data, int64(stat.Version()), nil\n}\n\nfunc (zkts *Server) UpdateTabletAction(actionPath, data string, version int64) error {\n\t_, err := zkts.zconn.Set(actionPath, data, int(version))\n\tif err != nil {\n\t\tif zookeeper.IsError(err, zookeeper.ZBADVERSION) {\n\t\t\terr = topo.ErrBadVersion\n\t\t}\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ StoreTabletActionResponse stores the data both in action and actionlog\nfunc (zkts *Server) StoreTabletActionResponse(actionPath, data string) error {\n\t_, err := zkts.zconn.Set(actionPath, data, -1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tactionLogPath := strings.Replace(actionPath, \"\/action\/\", \"\/actionlog\/\", 1)\n\t_, err = zk.CreateRecursive(zkts.zconn, actionLogPath, data, 0, zookeeper.WorldACL(zookeeper.PERM_ALL))\n\treturn err\n}\n\nfunc (zkts *Server) UnblockTabletAction(actionPath string) error {\n\treturn zkts.zconn.Delete(actionPath, -1)\n}\n<|endoftext|>"} {"text":"<commit_before>package goConsulRoundRobin\n\nimport (\n\t\"sync\"\n\t\"time\"\n)\n\ntype serviceMap map[string]*serviceEndpoints\n\nvar services = make(serviceMap)\nvar requestLock = &sync.Mutex{}\n\n\/\/GetServiceEndpoint returns a healthy, round robbined service endpoint\nfunc GetServiceEndpoint(service string) (endpoint string, err error) {\n\t\/\/requestLock makes all requests synchronus as maps are not thread safe\n\trequestLock.Lock()\n\tdefer requestLock.Unlock()\n\n\t\/\/if new service request\n\tif _, present := services[service]; !present {\n\t\t\/\/make new service and return endpoint\n\t\terr = services.newService(service)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tendpoint = services[service].getAndInc()\n\t\treturn endpoint, nil\n\t}\n\n\t\/\/if timeout\n\tif services[service].timedOut() {\n\t\t\/\/refresh endpoints\n\t\terr := services[service].refresh()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\t\/\/return endpoint\n\tendpoint = services[service].getAndInc()\n\treturn endpoint, nil\n}\n\n\/\/possible problem that serviceMap has no *\nfunc (s serviceMap) newService(service string) error {\n\tendpoints, err := getHealthyEndpoints(service)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts[service] = &serviceEndpoints{\n\t\tname: service,\n\t\tendpoints: endpoints,\n\t\tindex: 0,\n\t\ttimeout: time.After(consulRefreshRate),\n\t}\n\n\treturn nil\n}\n<commit_msg>added test mode<commit_after>package goConsulRoundRobin\n\nimport (\n\t\"sync\"\n\t\"time\"\n)\n\ntype serviceMap map[string]*serviceEndpoints\n\nvar services = make(serviceMap)\nvar requestLock = &sync.Mutex{}\n\n\/\/TestURL is setable in test scenarios (set CONSUL_IP=testMode) and is what will return on a GetServiceEndpoint call\nvar TestURL string\n\n\/\/GetServiceEndpoint returns a healthy, round robbined service endpoint\nfunc GetServiceEndpoint(service string) (endpoint string, err error) {\n\tif consulIP == \"testMode\" {\n\t\treturn TestURL, nil\n\t}\n\t\/\/requestLock makes all requests synchronus as maps are not thread safe\n\trequestLock.Lock()\n\tdefer requestLock.Unlock()\n\n\t\/\/if new service request\n\tif _, present := services[service]; !present {\n\t\t\/\/make new service and return endpoint\n\t\terr = services.newService(service)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tendpoint = services[service].getAndInc()\n\t\treturn endpoint, nil\n\t}\n\n\t\/\/if timeout\n\tif services[service].timedOut() {\n\t\t\/\/refresh endpoints\n\t\terr := services[service].refresh()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\t\/\/return endpoint\n\tendpoint = services[service].getAndInc()\n\treturn endpoint, nil\n}\n\n\/\/possible problem that serviceMap has no *\nfunc (s serviceMap) newService(service string) error {\n\tendpoints, err := getHealthyEndpoints(service)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts[service] = &serviceEndpoints{\n\t\tname: service,\n\t\tendpoints: endpoints,\n\t\tindex: 0,\n\t\ttimeout: time.After(consulRefreshRate),\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package gocode\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n)\n\nconst TestDirectory = \"..\/_testing\"\n\ntype Test struct {\n\tName string\n\tFile []byte\n\tCursor int\n\tResult []string\n}\n\nfunc (t Test) Check(conf *Config) error {\n\tfn := filepath.Base(filepath.Dir(t.Name))\n\tcs := conf.Complete(t.File, t.Name, t.Cursor)\n\tif len(cs) != len(t.Result) {\n\t\treturn fmt.Errorf(\"count: expected %d got %d: %s\", len(t.Result), len(cs), fn)\n\t}\n\tfor i, c := range cs {\n\t\tr := t.Result[i]\n\t\tif c.String() != r {\n\t\t\treturn fmt.Errorf(\"candidate: expected '%s' got '%s': %s\", r, c, fn)\n\t\t}\n\t}\n\treturn nil\n}\n\nvar (\n\ttests []Test\n\tconf *Config\n)\n\nfunc init() {\n\tvar err error\n\tconf, err = newConfig()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\ttests, err = loadTests()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc TestInit(t *testing.T) {\n\tif _, err := newConfig(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := loadTests(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestGocode(t *testing.T) {\n\tfor _, test := range tests {\n\t\tif err := test.Check(conf); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestParallel_1(t *testing.T) {\n\tt.Parallel()\n\tfor _, test := range tests {\n\t\tif err := test.Check(conf); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestParallel_2(t *testing.T) {\n\tt.Parallel()\n\tfor _, test := range tests {\n\t\tif err := test.Check(conf); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestParallel_3(t *testing.T) {\n\tt.Parallel()\n\tfor _, test := range tests {\n\t\tif err := test.Check(conf); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestParallel_4(t *testing.T) {\n\tt.Parallel()\n\tconf.GOPATH = \"\" \/\/ Alter GOPATH\n\tfor _, test := range tests {\n\t\tif err := test.Check(conf); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc loadTests() ([]Test, error) {\n\tvar tests []Test\n\tlist, err := ioutil.ReadDir(TestDirectory)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, fi := range list {\n\t\tif fi.IsDir() {\n\t\t\ttest, err := newTest(filepath.Join(TestDirectory, fi.Name()))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ttests = append(tests, *test)\n\t\t}\n\t}\n\treturn tests, nil\n}\n\nfunc newTest(path string) (*Test, error) {\n\tlist, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt := Test{Cursor: -1}\n\tfor _, fi := range list {\n\t\tfn := fi.Name()\n\t\tswitch fn {\n\t\tcase \"test.go.in\":\n\t\t\tt.Name = filepath.Join(path, fn)\n\t\t\tt.File, err = ioutil.ReadFile(t.Name)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tcase \"out.expected\":\n\t\t\tt.Result, err = newResult(filepath.Join(path, fn))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tdefault:\n\t\t\tif strings.HasPrefix(fn, \"cursor\") {\n\t\t\t\tn := strings.IndexByte(fn, '.')\n\t\t\t\tif n == -1 {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error parsing cursor file: %s\", fn)\n\t\t\t\t}\n\t\t\t\tt.Cursor, err = strconv.Atoi(fn[n+1:])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif t.Cursor == -1 {\n\t\treturn nil, fmt.Errorf(\"no cursor file in directory: %s\", path)\n\t}\n\tif t.Name == \"\" {\n\t\treturn nil, fmt.Errorf(\"no test file in directory: %s\", path)\n\t}\n\tif t.File == nil {\n\t\treturn nil, fmt.Errorf(\"nil test file in directory: %s\", path)\n\t}\n\treturn &t, nil\n}\n\nfunc newResult(path string) ([]string, error) {\n\tb, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tn := bytes.IndexByte(b, '\\n')\n\tif n == len(b)-1 {\n\t\treturn []string{}, nil\n\t}\n\tvar s []string\n\tfor _, b := range bytes.Split(b[n+1:], []byte{'\\n'}) {\n\t\tif len(b) > 1 {\n\t\t\ts = append(s, string(bytes.TrimSpace(b)))\n\t\t}\n\t}\n\treturn s, nil\n}\n\nfunc newConfig() (*Config, error) {\n\tc := Config{\n\t\tGOROOT: runtime.GOROOT(),\n\t\tGOPATH: os.Getenv(\"GOPATH\"),\n\t}\n\tif c.GOROOT == \"\" {\n\t\treturn nil, fmt.Errorf(\"GOROOT must be set\")\n\t}\n\tif c.GOPATH == \"\" {\n\t\treturn nil, fmt.Errorf(\"GOPATH must be set\")\n\t}\n\treturn &c, nil\n}\n\nfunc BenchmarkGocode(b *testing.B) {\n\tconf, err := newConfig()\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\ttests, err := loadTests()\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\tt := tests[0]\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_ = conf.Complete(t.File, t.Name, t.Cursor)\n\t}\n}\n\nfunc benchTest(t Test) {\n\t_ = conf.Complete(t.File, t.Name, t.Cursor)\n}\n\nfunc Benchmark_01(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[1-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_02(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[2-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_03(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[3-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_04(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[4-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_05(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[5-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_06(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[6-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_07(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[7-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_08(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[8-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_09(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[9-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_10(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[10-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_11(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[11-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_12(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[12-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_13(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[13-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_14(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[14-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_15(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[15-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_16(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[16-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_17(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[17-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_18(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[18-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_19(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[19-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_20(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[20-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_21(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[21-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_22(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[22-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_23(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[23-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_24(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[24-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_25(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[25-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_26(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[26-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_27(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[27-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_28(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[28-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_29(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[29-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_30(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[30-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_31(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[31-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_32(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[32-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_33(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[33-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_34(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[34-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_35(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[35-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_36(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[36-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_37(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[37-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_38(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[38-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_39(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[39-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_40(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[40-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_41(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[41-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_42(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[42-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_43(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[43-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_44(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[44-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_45(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[45-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_46(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[46-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_47(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[47-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_48(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[48-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_49(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[49-1])\n\t\t}\n\t}\n}\n\nfunc Benchmark_50(b *testing.B) {\n\tif !testing.Short() {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tbenchTest(tests[50-1])\n\t\t}\n\t}\n}\n<commit_msg>update test<commit_after>package gocode\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n)\n\nconst TestDirectory = \"..\/_testing\"\n\nvar (\n\ttests []Test\n\tconf *Config\n)\n\nfunc init() {\n\tvar err error\n\tconf, err = newConfig()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\ttests, err = loadTests()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc TestInit(t *testing.T) {\n\tif _, err := newConfig(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := loadTests(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestGocode(t *testing.T) {\n\tfor _, test := range tests {\n\t\tif err := test.Check(conf); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestParallel_1(t *testing.T) {\n\tt.Parallel()\n\tfor _, test := range tests {\n\t\tif err := test.Check(conf); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestParallel_2(t *testing.T) {\n\tt.Parallel()\n\tfor _, test := range tests {\n\t\tif err := test.Check(conf); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestParallel_3(t *testing.T) {\n\tt.Parallel()\n\tfor _, test := range tests {\n\t\tif err := test.Check(conf); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestParallel_4(t *testing.T) {\n\tt.Parallel()\n\tconf.GOPATH = \"\" \/\/ Alter GOPATH\n\tfor _, test := range tests {\n\t\tif err := test.Check(conf); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkOne(b *testing.B) {\n\tt := tests[0]\n\tfor i := 0; i < b.N; i++ {\n\t\t_ = conf.Complete(t.File, t.Name, t.Cursor)\n\t}\n}\n\nfunc BenchmarkTen(b *testing.B) {\n\tif len(tests) < 10 {\n\t\tb.Fatal(\"Expected 10+ test cases\")\n\t}\n\ttt := tests[:10]\n\tfor i := 0; i < b.N; i++ {\n\t\tfor _, t := range tt {\n\t\t\t_ = conf.Complete(t.File, t.Name, t.Cursor)\n\t\t}\n\t}\n}\n\nfunc BenchmarkAll(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tfor _, t := range tests {\n\t\t\t_ = conf.Complete(t.File, t.Name, t.Cursor)\n\t\t}\n\t}\n}\n\ntype Test struct {\n\tName string\n\tFile []byte\n\tCursor int\n\tResult []string\n}\n\nfunc (t Test) Check(conf *Config) error {\n\tfn := filepath.Base(filepath.Dir(t.Name))\n\tcs := conf.Complete(t.File, t.Name, t.Cursor)\n\tif len(cs) != len(t.Result) {\n\t\treturn fmt.Errorf(\"count: expected %d got %d: %s\", len(t.Result), len(cs), fn)\n\t}\n\tfor i, c := range cs {\n\t\tr := t.Result[i]\n\t\tif c.String() != r {\n\t\t\treturn fmt.Errorf(\"candidate: expected '%s' got '%s': %s\", r, c, fn)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc loadTests() ([]Test, error) {\n\tvar tests []Test\n\tlist, err := ioutil.ReadDir(TestDirectory)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, fi := range list {\n\t\tif fi.IsDir() {\n\t\t\ttest, err := newTest(filepath.Join(TestDirectory, fi.Name()))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\ttests = append(tests, *test)\n\t\t}\n\t}\n\treturn tests, nil\n}\n\nfunc newTest(path string) (*Test, error) {\n\tlist, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt := Test{Cursor: -1}\n\tfor _, fi := range list {\n\t\tfn := fi.Name()\n\t\tswitch fn {\n\t\tcase \"test.go.in\":\n\t\t\tt.Name = filepath.Join(path, fn)\n\t\t\tt.File, err = ioutil.ReadFile(t.Name)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tcase \"out.expected\":\n\t\t\tt.Result, err = newResult(filepath.Join(path, fn))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tdefault:\n\t\t\tif strings.HasPrefix(fn, \"cursor\") {\n\t\t\t\tn := strings.IndexByte(fn, '.')\n\t\t\t\tif n == -1 {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error parsing cursor file: %s\", fn)\n\t\t\t\t}\n\t\t\t\tt.Cursor, err = strconv.Atoi(fn[n+1:])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif t.Cursor == -1 {\n\t\treturn nil, fmt.Errorf(\"no cursor file in directory: %s\", path)\n\t}\n\tif t.Name == \"\" {\n\t\treturn nil, fmt.Errorf(\"no test file in directory: %s\", path)\n\t}\n\tif t.File == nil {\n\t\treturn nil, fmt.Errorf(\"nil test file in directory: %s\", path)\n\t}\n\treturn &t, nil\n}\n\nfunc newResult(path string) ([]string, error) {\n\tb, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tn := bytes.IndexByte(b, '\\n')\n\tif n == len(b)-1 {\n\t\treturn []string{}, nil\n\t}\n\tvar s []string\n\tfor _, b := range bytes.Split(b[n+1:], []byte{'\\n'}) {\n\t\tif len(b) > 1 {\n\t\t\ts = append(s, string(bytes.TrimSpace(b)))\n\t\t}\n\t}\n\treturn s, nil\n}\n\nfunc newConfig() (*Config, error) {\n\tc := Config{\n\t\tGOROOT: runtime.GOROOT(),\n\t\tGOPATH: os.Getenv(\"GOPATH\"),\n\t}\n\tif c.GOROOT == \"\" {\n\t\treturn nil, fmt.Errorf(\"GOROOT must be set\")\n\t}\n\tif c.GOPATH == \"\" {\n\t\treturn nil, fmt.Errorf(\"GOPATH must be set\")\n\t}\n\treturn &c, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"log\"\n\n\t\"github.com\/dolab\/logger\"\n)\n\nvar (\n\tstderr *logger.Logger\n\n\tenvTemplate = `#!\/usr\/bin\/env bash\n\n# adjust GOPATH\ncase \":$GOPATH:\" in\n *\":$(pwd):\"*) :;;\n *) GOPATH=$(pwd):$GOPATH;;\nesac\nexport GOPATH\n\n\n# adjust PATH\nwhile IFS=':' read -ra ADDR; do\n for i in \"${ADDR[@]}\"; do\n case \":$PATH:\" in\n *\":$i\/bin:\"*) :;;\n *) PATH=$i\/bin:$PATH\n esac\n done\ndone <<< \"$GOPATH\"\nexport PATH\n\n\n# mock development && test envs\nif [ ! -d \"$(pwd)\/src\/{{.Namespace}}\/{{.Application}}\" ];\nthen\n mkdir -p \"$(pwd)\/src\/{{.Namespace}}\"\n ln -s \"$(pwd)\/gogo\" \"$(pwd)\/src\/{{.Namespace}}\/{{.Application}}\"\nfi\n`\n\n\tmakefileTemplate = `all: gobuild gotest\n\ngodev:\n cd gogo && go run main.go\n\ngobuild: goclean goinstall\n\ngorebuild: goclean goreinstall\n\ngoclean:\n rm -rf bin\n rm -rf pkg\n\ngoinstall:\n cd gogo && go get -t -v .\/...\n\ngoreinstall:\n cd gogo && go get -t -a -v .\/...\n\ngotest:\n go test {{.Namespace}}\/{{.Application}}\/app\/controllers\n go test {{.Namespace}}\/{{.Application}}\/app\/middlewares\n go test {{.Namespace}}\/{{.Application}}\/app\/models\n\ngopackage:\n mkdir -p bin && go build -a -o bin\/{{.Application}} src\/{{.Namespace}}\/{{.Application}}\/main.go\n\ntravis: gobuild gotest\n`\n\n\tgitIgnoreTemplate = `# Compiled Object files, Static and Dynamic libs (Shared Objects)\n*.o\n*.a\n*.so\n\n# Folders\n_obj\n_test\nbin\npkg\nsrc\n\n# Architecture specific extensions\/prefixes\n*.[568vq]\n[568vq].out\n\n*.cgo1.go\n*.cgo2.c\n_cgo_defun.c\n_cgo_gotypes.go\n_cgo_export.*\n\n_testmain.go\n\n*.exe\n*.test\n*.prof\n\n# development & test config files\n*.development.json\n*.test.json\n`\n\n\tapplicationTemplate = []string{`package controllers\n\nimport (\n \"github.com\/dolab\/gogo\"\n\n \"{{.Namespace}}\/{{.Application}}\/app\/middlewares\"\n)\n\ntype Application struct {\n *gogo.AppServer\n}\n\nfunc New(runMode, srcPath string) *Application {\n appServer := gogo.New(runMode, srcPath)\n\n err := NewAppConfig(appServer.Config())\n if err != nil {\n panic(err.Error())\n }\n\n return &Application{appServer}\n}\n\n\/\/ Middlerwares implements gogo.Middlewarer\n\/\/ NOTE: DO NOT change the method name, its required by gogo!\nfunc (app *Application) Middlewares() {\n \/\/ apply your middlewares\n\n \/\/ panic recovery\n app.Use(middlewares.Recovery())\n}\n\n\/\/ Resources implements gogo.Resourcer\n\/\/ NOTE: DO NOT change the method name, its required by gogo!\nfunc (app *Application) Resources() {\n \/\/ register your resources\n \/\/ app.GET(\"\/\", handler)\n\n app.GET(\"\/@getting_start\/hello\", GettingStart.Hello)\n}\n\n\/\/ Run runs application after registering middelwares and resources\nfunc (app *Application) Run() {\n \/\/ register middlewares\n app.Middlewares()\n\n \/\/ register resources\n app.Resources()\n\n \/\/ run server\n app.AppServer.Run()\n}\n`, `package controllers\n\nimport (\n \"net\/http\/httptest\"\n \"os\"\n \"path\"\n \"testing\"\n\n \"github.com\/dolab\/httptesting\"\n)\n\nvar (\n testServer *httptest.Server\n testClient *httptesting.Client\n)\n\nfunc TestMain(m *testing.M) {\n var (\n runMode = \"test\"\n srcPath = path.Clean(\"..\/..\/\")\n )\n\n app := New(runMode, srcPath)\n app.Resources()\n\n testServer = httptest.NewServer(app)\n testClient = httptesting.New(testServer.URL, false)\n\n code := m.Run()\n\n testServer.Close()\n\n os.Exit(code)\n}\n`}\n\n\tconfigTemplate = []string{`package controllers\n\nimport (\n \"github.com\/dolab\/gogo\"\n)\n\nvar (\n Config *AppConfig\n)\n\n\/\/ Application configuration specs\ntype AppConfig struct {\n Domain string ` + \"`\" + `json:\"domain\"` + \"`\" + `\n GettingStart *GettingStartConfig ` + \"`\" + `json:\"getting_start\"` + \"`\" + `\n}\n\n\/\/ NewAppConfig apply application config from *gogo.AppConfig\nfunc NewAppConfig(config *gogo.AppConfig) error {\n return config.UnmarshalJSON(&Config)\n}\n\n\/\/ Sample application config for illustration\ntype GettingStartConfig struct {\n Greeting string ` + \"`\" + `json:\"greeting\"` + \"`\" + `\n}\n`, `package controllers\n\nimport (\n \"testing\"\n\n \"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc Test_AppConfig(t *testing.T) {\n assertion := assert.New(t)\n\n assertion.NotEmpty(Config.Domain)\n assertion.NotNil(Config.GettingStart)\n}\n`}\n\n\tgettingStartTemplate = []string{`package controllers\n\nimport (\n \"github.com\/dolab\/gogo\"\n)\n\nvar (\n GettingStart *_GettingStart\n)\n\ntype _GettingStart struct{}\n\n\/\/ @route GET \/@getting_start\/hello\nfunc (_ *_GettingStart) Hello(ctx *gogo.Context) {\n ctx.Logger.Warnf(\"Visiting domain is: %s\", Config.Domain)\n\n ctx.Text(Config.GettingStart.Greeting)\n}\n`, `package controllers\n\nimport (\n \"testing\"\n)\n\nfunc Test_ExampleHello(t *testing.T) {\n testClient.Get(t, \"\/@getting_start\/hello\")\n\n testClient.AssertOK()\n testClient.AssertContains(Config.GettingStart.Greeting)\n}\n`}\n\n\tmiddlewareTemplate = []string{`package middlewares\n\nimport (\n \"runtime\"\n \"strings\"\n\n \"github.com\/dolab\/gogo\"\n)\n\nfunc Recovery() gogo.Middleware {\n return func(ctx *gogo.Context) {\n defer func() {\n if panicErr := recover(); panicErr != nil {\n \/\/ where does panic occur? try max 20 depths\n pcs := make([]uintptr, 20)\n max := runtime.Callers(2, pcs)\n for i := 0; i < max; i++ {\n pcfunc := runtime.FuncForPC(pcs[i])\n if strings.HasPrefix(pcfunc.Name(), \"runtime.\") {\n continue\n }\n\n pcfile, pcline := pcfunc.FileLine(pcs[i])\n\n tmp := strings.SplitN(pcfile, \"\/src\/\", 2)\n if len(tmp) == 2 {\n pcfile = \"src\/\" + tmp[1]\n }\n ctx.Logger.Errorf(\"(%s:%d: %v)\", pcfile, pcline, panicErr)\n\n break\n }\n\n ctx.Abort()\n }\n }()\n\n ctx.Next()\n }\n}\n`, `package middlewares\n\nimport (\n \"testing\"\n\n \"github.com\/dolab\/gogo\"\n)\n\nfunc Test_Recovery(t *testing.T) {\n testApp.Use(Recovery())\n defer testApp.Clean()\n\n \/\/ register temp resource for testing\n testApp.GET(\"\/middlewares\/recovery\", func(ctx *gogo.Context) {\n panic(\"Recover testing\")\n })\n\n testClient.Get(t, \"\/middlewares\/recovery\", nil)\n testClient.AssertOK()\n}\n`, `package middlewares\n\nimport (\n \"net\/http\/httptest\"\n \"os\"\n \"path\"\n \"testing\"\n\n \"github.com\/dolab\/gogo\"\n \"github.com\/dolab\/httptesting\"\n)\n\nvar (\n testApp *gogo.AppServer\n testServer *httptest.Server\n testClient *httptesting.Client\n)\n\nfunc TestMain(m *testing.M) {\n var (\n runMode = \"test\"\n srcPath = path.Clean(\"..\/..\/\")\n )\n\n testApp = gogo.New(runMode, srcPath)\n testServer = httptest.NewServer(testApp)\n testClient = httptesting.New(testServer.URL, false)\n\n code := m.Run()\n\n testServer.Close()\n\n os.Exit(code)\n}\n`}\n\n\tjsonTemplate = `{\n \"name\": \"{{.Application}}\",\n \"mode\": \"test\",\n \"sections\": {\n \"development\": {\n \"server\": {\n \"addr\": \"localhost\",\n \"port\": 9090,\n \"ssl\": false,\n \"ssl_cert\": \"\/path\/to\/ssl\/cert\",\n \"ssl_key\": \"\/path\/to\/ssl\/key\",\n \"request_timeout\": 30,\n \"response_timeout\": 30,\n \"request_id\": \"X-Request-Id\"\n },\n \"logger\": {\n \"output\": \"stdout\",\n \"level\": \"debug\",\n \"filter_params\": [\"password\", \"password_confirmation\"]\n },\n \"domain\": \"https:\/\/example.com\",\n \"getting_start\": {\n \"greeting\": \"Hello, gogo!\"\n }\n },\n\n \"test\": {\n \"server\": {\n \"addr\": \"localhost\",\n \"port\": 9090,\n \"ssl\": false,\n \"ssl_cert\": \"\/path\/to\/ssl\/cert\",\n \"ssl_key\": \"\/path\/to\/ssl\/key\",\n \"request_timeout\": 30,\n \"response_timeout\": 30,\n \"request_id\": \"X-Request-Id\"\n },\n \"logger\": {\n \"output\": \"stdout\",\n \"level\": \"info\",\n \"filter_params\": [\"password\", \"password_confirmation\"]\n },\n \"domain\": \"https:\/\/example.com\",\n \"getting_start\": {\n \"greeting\": \"Hello, gogo!\"\n }\n },\n\n \"production\": {\n \"server\": {\n \"addr\": \"localhost\",\n \"port\": 9090,\n \"ssl\": true,\n \"ssl_cert\": \"\/path\/to\/ssl\/cert\",\n \"ssl_key\": \"\/path\/to\/ssl\/key\",\n \"request_timeout\": 30,\n \"response_timeout\": 30,\n \"request_id\": \"X-Request-Id\"\n },\n \"logger\": {\n \"output\": \"stdout\",\n \"level\": \"warn\",\n \"filter_params\": [\"password\", \"password_confirmation\"]\n }\n }\n }\n}\n`\n\n\tmainTemplate = `package main\n\nimport (\n \"flag\"\n \"os\"\n \"path\"\n\n \"github.com\/dolab\/gogo\"\n\n \"{{.Namespace}}\/{{.Application}}\/app\/controllers\"\n)\n\nvar (\n runMode string \/\/ app run mode, available values are [development|test|production], default to development\n srcPath string \/\/ app source path, e.g. \/home\/deploy\/websites\/helloapp\n)\n\nfunc main() {\n flag.StringVar(&runMode, \"runMode\", \"development\", \"{{.Application}} -runMode=[development|test|production]\")\n flag.StringVar(&srcPath, \"srcPath\", \"\", \"{{.Application}} -srcPath=\/path\/to\/source\")\n flag.Parse()\n\n \/\/ verify run mode\n if mode := gogo.RunMode(runMode); !mode.IsValid() {\n flag.PrintDefaults()\n return\n }\n\n \/\/ adjust src path\n if srcPath == \"\" {\n var err error\n\n srcPath, err = os.Getwd()\n if err != nil {\n panic(err)\n }\n } else {\n srcPath = path.Clean(srcPath)\n }\n\n controllers.New(runMode, srcPath).Run()\n}\n`\n)\n\ntype templateData struct {\n\tNamespace string\n\tApplication string\n}\n\nfunc init() {\n\tvar err error\n\n\t\/\/ setup logger\n\tstderr, err = logger.New(\"stderr\")\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\tstderr.SetLevelByName(\"info\")\n\tstderr.SetFlag(log.Lshortfile)\n}\n<commit_msg>fix env.sh template issue<commit_after>package commands\n\nimport (\n\t\"log\"\n\n\t\"github.com\/dolab\/logger\"\n)\n\nvar (\n\tstderr *logger.Logger\n\n\tenvTemplate = `#!\/usr\/bin\/env bash\n\n# adjust GOPATH\ncase \":$GOPATH:\" in\n *\":$(pwd):\"*) :;;\n *) GOPATH=$(pwd):$GOPATH;;\nesac\nexport GOPATH\n\n\n# adjust PATH\nwhile IFS=':' read -ra ADDR; do\n for i in \"${ADDR[@]}\"; do\n case \":$PATH:\" in\n *\":$i\/bin:\"*) :;;\n *) PATH=$i\/bin:$PATH\n esac\n done\ndone <<< \"$GOPATH\"\nexport PATH\n\n\n# mock development && test envs\nif [ ! -d \"$(pwd)\/src\/{{.Namespace}}\/{{.Application}}\" ];\nthen\n mkdir -p \"$(pwd)\/src\/{{.Namespace}}\"\n ln -s \"$(pwd)\/gogo\/\" \"$(pwd)\/src\/{{.Namespace}}\/{{.Application}}\"\nfi\n`\n\n\tmakefileTemplate = `all: gobuild gotest\n\ngodev:\n cd gogo && go run main.go\n\ngobuild: goclean goinstall\n\ngorebuild: goclean goreinstall\n\ngoclean:\n rm -rf bin\n rm -rf pkg\n\ngoinstall:\n cd gogo && go get -t -v .\/...\n\ngoreinstall:\n cd gogo && go get -t -a -v .\/...\n\ngotest:\n go test {{.Namespace}}\/{{.Application}}\/app\/controllers\n go test {{.Namespace}}\/{{.Application}}\/app\/middlewares\n # go test {{.Namespace}}\/{{.Application}}\/app\/models\n\ngopackage:\n mkdir -p bin && go build -a -o bin\/{{.Application}} src\/{{.Namespace}}\/{{.Application}}\/main.go\n\ntravis: gobuild gotest\n`\n\n\tgitIgnoreTemplate = `# Compiled Object files, Static and Dynamic libs (Shared Objects)\n*.o\n*.a\n*.so\n\n# Folders\n_obj\n_test\nbin\npkg\nsrc\n\n# Architecture specific extensions\/prefixes\n*.[568vq]\n[568vq].out\n\n*.cgo1.go\n*.cgo2.c\n_cgo_defun.c\n_cgo_gotypes.go\n_cgo_export.*\n\n_testmain.go\n\n*.exe\n*.test\n*.prof\n\n# development & test config files\n*.development.json\n*.test.json\n`\n\n\tapplicationTemplate = []string{`package controllers\n\nimport (\n \"github.com\/dolab\/gogo\"\n\n \"{{.Namespace}}\/{{.Application}}\/app\/middlewares\"\n)\n\ntype Application struct {\n *gogo.AppServer\n}\n\nfunc New(runMode, srcPath string) *Application {\n appServer := gogo.New(runMode, srcPath)\n\n err := NewAppConfig(appServer.Config())\n if err != nil {\n panic(err.Error())\n }\n\n return &Application{appServer}\n}\n\n\/\/ Middlerwares implements gogo.Middlewarer\n\/\/ NOTE: DO NOT change the method name, its required by gogo!\nfunc (app *Application) Middlewares() {\n \/\/ apply your middlewares\n\n \/\/ panic recovery\n app.Use(middlewares.Recovery())\n}\n\n\/\/ Resources implements gogo.Resourcer\n\/\/ NOTE: DO NOT change the method name, its required by gogo!\nfunc (app *Application) Resources() {\n \/\/ register your resources\n \/\/ app.GET(\"\/\", handler)\n\n app.GET(\"\/@getting_start\/hello\", GettingStart.Hello)\n}\n\n\/\/ Run runs application after registering middelwares and resources\nfunc (app *Application) Run() {\n \/\/ register middlewares\n app.Middlewares()\n\n \/\/ register resources\n app.Resources()\n\n \/\/ run server\n app.AppServer.Run()\n}\n`, `package controllers\n\nimport (\n \"net\/http\/httptest\"\n \"os\"\n \"path\"\n \"testing\"\n\n \"github.com\/dolab\/httptesting\"\n)\n\nvar (\n testServer *httptest.Server\n testClient *httptesting.Client\n)\n\nfunc TestMain(m *testing.M) {\n var (\n runMode = \"test\"\n srcPath = path.Clean(\"..\/..\/\")\n )\n\n app := New(runMode, srcPath)\n app.Resources()\n\n testServer = httptest.NewServer(app)\n testClient = httptesting.New(testServer.URL, false)\n\n code := m.Run()\n\n testServer.Close()\n\n os.Exit(code)\n}\n`}\n\n\tconfigTemplate = []string{`package controllers\n\nimport (\n \"github.com\/dolab\/gogo\"\n)\n\nvar (\n Config *AppConfig\n)\n\n\/\/ Application configuration specs\ntype AppConfig struct {\n Domain string ` + \"`\" + `json:\"domain\"` + \"`\" + `\n GettingStart *GettingStartConfig ` + \"`\" + `json:\"getting_start\"` + \"`\" + `\n}\n\n\/\/ NewAppConfig apply application config from *gogo.AppConfig\nfunc NewAppConfig(config *gogo.AppConfig) error {\n return config.UnmarshalJSON(&Config)\n}\n\n\/\/ Sample application config for illustration\ntype GettingStartConfig struct {\n Greeting string ` + \"`\" + `json:\"greeting\"` + \"`\" + `\n}\n`, `package controllers\n\nimport (\n \"testing\"\n\n \"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc Test_AppConfig(t *testing.T) {\n assertion := assert.New(t)\n\n assertion.NotEmpty(Config.Domain)\n assertion.NotNil(Config.GettingStart)\n}\n`}\n\n\tgettingStartTemplate = []string{`package controllers\n\nimport (\n \"github.com\/dolab\/gogo\"\n)\n\nvar (\n GettingStart *_GettingStart\n)\n\ntype _GettingStart struct{}\n\n\/\/ @route GET \/@getting_start\/hello\nfunc (_ *_GettingStart) Hello(ctx *gogo.Context) {\n ctx.Logger.Warnf(\"Visiting domain is: %s\", Config.Domain)\n\n ctx.Text(Config.GettingStart.Greeting)\n}\n`, `package controllers\n\nimport (\n \"testing\"\n)\n\nfunc Test_ExampleHello(t *testing.T) {\n testClient.Get(t, \"\/@getting_start\/hello\")\n\n testClient.AssertOK()\n testClient.AssertContains(Config.GettingStart.Greeting)\n}\n`}\n\n\tmiddlewareTemplate = []string{`package middlewares\n\nimport (\n \"runtime\"\n \"strings\"\n\n \"github.com\/dolab\/gogo\"\n)\n\nfunc Recovery() gogo.Middleware {\n return func(ctx *gogo.Context) {\n defer func() {\n if panicErr := recover(); panicErr != nil {\n \/\/ where does panic occur? try max 20 depths\n pcs := make([]uintptr, 20)\n max := runtime.Callers(2, pcs)\n for i := 0; i < max; i++ {\n pcfunc := runtime.FuncForPC(pcs[i])\n if strings.HasPrefix(pcfunc.Name(), \"runtime.\") {\n continue\n }\n\n pcfile, pcline := pcfunc.FileLine(pcs[i])\n\n tmp := strings.SplitN(pcfile, \"\/src\/\", 2)\n if len(tmp) == 2 {\n pcfile = \"src\/\" + tmp[1]\n }\n ctx.Logger.Errorf(\"(%s:%d: %v)\", pcfile, pcline, panicErr)\n\n break\n }\n\n ctx.Abort()\n }\n }()\n\n ctx.Next()\n }\n}\n`, `package middlewares\n\nimport (\n \"testing\"\n\n \"github.com\/dolab\/gogo\"\n)\n\nfunc Test_Recovery(t *testing.T) {\n testApp.Use(Recovery())\n defer testApp.Clean()\n\n \/\/ register temp resource for testing\n testApp.GET(\"\/middlewares\/recovery\", func(ctx *gogo.Context) {\n panic(\"Recover testing\")\n })\n\n testClient.Get(t, \"\/middlewares\/recovery\", nil)\n testClient.AssertOK()\n}\n`, `package middlewares\n\nimport (\n \"net\/http\/httptest\"\n \"os\"\n \"path\"\n \"testing\"\n\n \"github.com\/dolab\/gogo\"\n \"github.com\/dolab\/httptesting\"\n)\n\nvar (\n testApp *gogo.AppServer\n testServer *httptest.Server\n testClient *httptesting.Client\n)\n\nfunc TestMain(m *testing.M) {\n var (\n runMode = \"test\"\n srcPath = path.Clean(\"..\/..\/\")\n )\n\n testApp = gogo.New(runMode, srcPath)\n testServer = httptest.NewServer(testApp)\n testClient = httptesting.New(testServer.URL, false)\n\n code := m.Run()\n\n testServer.Close()\n\n os.Exit(code)\n}\n`}\n\n\tjsonTemplate = `{\n \"name\": \"{{.Application}}\",\n \"mode\": \"test\",\n \"sections\": {\n \"development\": {\n \"server\": {\n \"addr\": \"localhost\",\n \"port\": 9090,\n \"ssl\": false,\n \"ssl_cert\": \"\/path\/to\/ssl\/cert\",\n \"ssl_key\": \"\/path\/to\/ssl\/key\",\n \"request_timeout\": 30,\n \"response_timeout\": 30,\n \"request_id\": \"X-Request-Id\"\n },\n \"logger\": {\n \"output\": \"stdout\",\n \"level\": \"debug\",\n \"filter_params\": [\"password\", \"password_confirmation\"]\n },\n \"domain\": \"https:\/\/example.com\",\n \"getting_start\": {\n \"greeting\": \"Hello, gogo!\"\n }\n },\n\n \"test\": {\n \"server\": {\n \"addr\": \"localhost\",\n \"port\": 9090,\n \"ssl\": false,\n \"ssl_cert\": \"\/path\/to\/ssl\/cert\",\n \"ssl_key\": \"\/path\/to\/ssl\/key\",\n \"request_timeout\": 30,\n \"response_timeout\": 30,\n \"request_id\": \"X-Request-Id\"\n },\n \"logger\": {\n \"output\": \"stdout\",\n \"level\": \"info\",\n \"filter_params\": [\"password\", \"password_confirmation\"]\n },\n \"domain\": \"https:\/\/example.com\",\n \"getting_start\": {\n \"greeting\": \"Hello, gogo!\"\n }\n },\n\n \"production\": {\n \"server\": {\n \"addr\": \"localhost\",\n \"port\": 9090,\n \"ssl\": true,\n \"ssl_cert\": \"\/path\/to\/ssl\/cert\",\n \"ssl_key\": \"\/path\/to\/ssl\/key\",\n \"request_timeout\": 30,\n \"response_timeout\": 30,\n \"request_id\": \"X-Request-Id\"\n },\n \"logger\": {\n \"output\": \"stdout\",\n \"level\": \"warn\",\n \"filter_params\": [\"password\", \"password_confirmation\"]\n }\n }\n }\n}\n`\n\n\tmainTemplate = `package main\n\nimport (\n \"flag\"\n \"os\"\n \"path\"\n\n \"github.com\/dolab\/gogo\"\n\n \"{{.Namespace}}\/{{.Application}}\/app\/controllers\"\n)\n\nvar (\n runMode string \/\/ app run mode, available values are [development|test|production], default to development\n srcPath string \/\/ app source path, e.g. \/home\/deploy\/websites\/helloapp\n)\n\nfunc main() {\n flag.StringVar(&runMode, \"runMode\", \"development\", \"{{.Application}} -runMode=[development|test|production]\")\n flag.StringVar(&srcPath, \"srcPath\", \"\", \"{{.Application}} -srcPath=\/path\/to\/source\")\n flag.Parse()\n\n \/\/ verify run mode\n if mode := gogo.RunMode(runMode); !mode.IsValid() {\n flag.PrintDefaults()\n return\n }\n\n \/\/ adjust src path\n if srcPath == \"\" {\n var err error\n\n srcPath, err = os.Getwd()\n if err != nil {\n panic(err)\n }\n } else {\n srcPath = path.Clean(srcPath)\n }\n\n controllers.New(runMode, srcPath).Run()\n}\n`\n)\n\ntype templateData struct {\n\tNamespace string\n\tApplication string\n}\n\nfunc init() {\n\tvar err error\n\n\t\/\/ setup logger\n\tstderr, err = logger.New(\"stderr\")\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\n\tstderr.SetLevelByName(\"info\")\n\tstderr.SetFlag(log.Lshortfile)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015, Quentin RENARD. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage gozzle\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestExec(t *testing.T) {\n\t\/\/ Initialize\n\tn := 5\n\tf := \"test %d\"\n\th := \"Test\"\n\n\t\/\/ Create server\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\/\/ Loop\n\t\tfor i := 0; i < n; i++ {\n\t\t\t\/\/ Get formatted message\n\t\t\tm := fmt.Sprintf(f, i)\n\n\t\t\t\/\/ Valid header\n\t\t\tif r.Header.Get(h) == m {\n\t\t\t\t\/\/ Set header\n\t\t\t\tw.Header().Set(h, m)\n\n\t\t\t\t\/\/ Set body\n\t\t\t\tw.Write([]byte(m))\n\t\t\t}\n\t\t}\n\t}))\n\tdefer server.Close()\n\n\t\/\/ Create request set\n\treqSet := NewRequestSet()\n\n\t\/\/ Loop\n\tfor i := 0; i < n; i++ {\n\t\t\/\/ Get formatted message\n\t\tm := fmt.Sprintf(f, i)\n\n\t\t\/\/ Create request\n\t\treq := NewRequest(m, MethodGet, server.URL)\n\t\treq.AddHeader(h, m)\n\t\treqSet.AddRequest(req)\n\t}\n\n\t\/\/ Create gozzle\n\tg := NewGozzle(0)\n\n\t\/\/ Execute requests\n\trespSet := g.Exec(reqSet)\n\n\t\/\/ Loop\n\tassert.Len(t, respSet.Names(), n)\n\tfor i := 0; i < n; i++ {\n\t\t\/\/ Get formatted message\n\t\tm := fmt.Sprintf(f, i)\n\n\t\t\/\/ Get response\n\t\tresp := respSet.GetResponse(m)\n\n\t\t\/\/ Assert\n\t\tassert.Len(t, resp.Errors(), 0)\n\t\tassert.Equal(t, m, resp.Header().Get(h))\n\t\tc, e := ioutil.ReadAll(resp.BodyReader())\n\t\tassert.NoError(t, e)\n\t\tassert.Equal(t, m, string(c))\n\t}\n}\n\nfunc TestExecRequestBeforeHandler(t *testing.T) {\n\t\/\/ Create server\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))\n\tdefer server.Close()\n\n\t\/\/ Create request set\n\treqSet := NewRequestSet()\n\n\t\/\/ Create request\n\treq := NewRequest(\"test\", MethodGet, server.URL)\n\treq.SetBeforeHandler(func(r Request) bool {\n\t\treturn false\n\t})\n\treqSet.AddRequest(req)\n\n\t\/\/ Create gozzle\n\tg := NewGozzle(0)\n\n\t\/\/ Execute requests\n\trespSet := g.Exec(reqSet)\n\n\t\/\/ Assert\n\tassert.Len(t, respSet.Names(), 0)\n}\n\nfunc TestExecRequestError(t *testing.T) {\n\t\/\/ Initialize\n\tc := 500\n\n\t\/\/ Create server\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(c)\n\t}))\n\tdefer server.Close()\n\n\t\/\/ Create request set\n\treqSet := NewRequestSet()\n\n\t\/\/ Create requests\n\treq1 := NewRequest(\"test1\", MethodGet, server.URL)\n\treqSet.AddRequest(req1)\n\treq2 := NewRequest(\"test2\", MethodGet, \"test\")\n\treqSet.AddRequest(req2)\n\n\t\/\/ Create gozzle\n\tg := NewGozzle(0)\n\n\t\/\/ Execute requests\n\trespSet := g.Exec(reqSet)\n\n\t\/\/ Assert\n\tassert.Len(t, respSet.Names(), 2)\n\tassert.Equal(t, c, respSet.GetResponse(\"test1\").StatusCode())\n\tassert.Len(t, respSet.GetResponse(\"test1\").Errors(), 1)\n\tassert.EqualError(t, respSet.GetResponse(\"test1\").Errors()[0], ErrInvalidStatusCode.Error())\n\tassert.Len(t, respSet.GetResponse(\"test2\").Errors(), 1)\n\tassert.EqualError(t, respSet.GetResponse(\"test2\").Errors()[0], \"Get test: unsupported protocol scheme \\\"\\\"\")\n}\n\nfunc TestQuery(t *testing.T) {\n\t\/\/ Initialize\n\tr1 := request{\n\t\tquery: map[string]string{\n\t\t\t\"a\": \"b\",\n\t\t\t\"ké@lù\": \"ùl@ék\",\n\t\t},\n\t}\n\tr2 := request{}\n\n\t\/\/ Assert\n\tassert.Contains(t, \"?a=b&k%C3%A9%40l%C3%B9=%C3%B9l%40%C3%A9k?k%C3%A9%40l%C3%B9=%C3%B9l%40%C3%A9k&a=b\", query(&r1))\n\tassert.Empty(t, query(&r2))\n}\n\nfunc TestBody(t *testing.T) {\n\t\/\/ Initialize\n\tr := request{\n\t\tbody: map[string]string{\n\t\t\t\"test\": \"message\",\n\t\t},\n\t\theaders: map[string]string{\n\t\t\t\"Content-Type\": \"application\/json\",\n\t\t},\n\t}\n\n\t\/\/ Get body reader\n\tb, e := body(&r)\n\tassert.NoError(t, e)\n\n\t\/\/ Read body\n\tc, e := ioutil.ReadAll(b)\n\tassert.NoError(t, e)\n\n\t\/\/ Assert\n\tassert.Equal(t, \"{\\\"test\\\":\\\"message\\\"}\", string(c))\n}\n\nfunc TestBodyEmpty(t *testing.T) {\n\tr := request{\n\t\tbody: interface{}(nil),\n\t}\n\n\t\/\/ Get body reader\n\tb, e := body(&r)\n\tassert.NoError(t, e)\n\n\t\/\/ Read body\n\tc, e := ioutil.ReadAll(b)\n\tassert.NoError(t, e)\n\n\t\/\/ Assert\n\tassert.Equal(t, \"\", string(c))\n}\n\nfunc TestHeaders(t *testing.T) {\n\t\/\/ Initialize\n\tk := \"Key\"\n\tv := \"Value\"\n\tr := NewRequest(\"test\", MethodGet, \"\/test\")\n\tr.AddHeader(k, v)\n\thr := http.Request{Header: http.Header{}}\n\n\t\/\/ Assert\n\tassert.Empty(t, hr.Header.Get(k))\n\n\t\/\/ Add headers\n\theaders(r, &hr)\n\n\t\/\/ Assert\n\tassert.Equal(t, v, hr.Header.Get(k))\n}\n<commit_msg>Added unit test for body reader<commit_after>\/\/ Copyright 2015, Quentin RENARD. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage gozzle\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"bytes\"\n)\n\nfunc TestExec(t *testing.T) {\n\t\/\/ Initialize\n\tn := 5\n\tf := \"test %d\"\n\th := \"Test\"\n\n\t\/\/ Create server\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\/\/ Loop\n\t\tfor i := 0; i < n; i++ {\n\t\t\t\/\/ Get formatted message\n\t\t\tm := fmt.Sprintf(f, i)\n\n\t\t\t\/\/ Valid header\n\t\t\tif r.Header.Get(h) == m {\n\t\t\t\t\/\/ Set header\n\t\t\t\tw.Header().Set(h, m)\n\n\t\t\t\t\/\/ Set body\n\t\t\t\tw.Write([]byte(m))\n\t\t\t}\n\t\t}\n\t}))\n\tdefer server.Close()\n\n\t\/\/ Create request set\n\treqSet := NewRequestSet()\n\n\t\/\/ Loop\n\tfor i := 0; i < n; i++ {\n\t\t\/\/ Get formatted message\n\t\tm := fmt.Sprintf(f, i)\n\n\t\t\/\/ Create request\n\t\treq := NewRequest(m, MethodGet, server.URL)\n\t\treq.AddHeader(h, m)\n\t\treqSet.AddRequest(req)\n\t}\n\n\t\/\/ Create gozzle\n\tg := NewGozzle(0)\n\n\t\/\/ Execute requests\n\trespSet := g.Exec(reqSet)\n\n\t\/\/ Loop\n\tassert.Len(t, respSet.Names(), n)\n\tfor i := 0; i < n; i++ {\n\t\t\/\/ Get formatted message\n\t\tm := fmt.Sprintf(f, i)\n\n\t\t\/\/ Get response\n\t\tresp := respSet.GetResponse(m)\n\n\t\t\/\/ Assert\n\t\tassert.Len(t, resp.Errors(), 0)\n\t\tassert.Equal(t, m, resp.Header().Get(h))\n\t\tc, e := ioutil.ReadAll(resp.BodyReader())\n\t\tassert.NoError(t, e)\n\t\tassert.Equal(t, m, string(c))\n\t}\n}\n\nfunc TestExecRequestBeforeHandler(t *testing.T) {\n\t\/\/ Create server\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))\n\tdefer server.Close()\n\n\t\/\/ Create request set\n\treqSet := NewRequestSet()\n\n\t\/\/ Create request\n\treq := NewRequest(\"test\", MethodGet, server.URL)\n\treq.SetBeforeHandler(func(r Request) bool {\n\t\treturn false\n\t})\n\treqSet.AddRequest(req)\n\n\t\/\/ Create gozzle\n\tg := NewGozzle(0)\n\n\t\/\/ Execute requests\n\trespSet := g.Exec(reqSet)\n\n\t\/\/ Assert\n\tassert.Len(t, respSet.Names(), 0)\n}\n\nfunc TestExecRequestError(t *testing.T) {\n\t\/\/ Initialize\n\tc := 500\n\n\t\/\/ Create server\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(c)\n\t}))\n\tdefer server.Close()\n\n\t\/\/ Create request set\n\treqSet := NewRequestSet()\n\n\t\/\/ Create requests\n\treq1 := NewRequest(\"test1\", MethodGet, server.URL)\n\treqSet.AddRequest(req1)\n\treq2 := NewRequest(\"test2\", MethodGet, \"test\")\n\treqSet.AddRequest(req2)\n\n\t\/\/ Create gozzle\n\tg := NewGozzle(0)\n\n\t\/\/ Execute requests\n\trespSet := g.Exec(reqSet)\n\n\t\/\/ Assert\n\tassert.Len(t, respSet.Names(), 2)\n\tassert.Equal(t, c, respSet.GetResponse(\"test1\").StatusCode())\n\tassert.Len(t, respSet.GetResponse(\"test1\").Errors(), 1)\n\tassert.EqualError(t, respSet.GetResponse(\"test1\").Errors()[0], ErrInvalidStatusCode.Error())\n\tassert.Len(t, respSet.GetResponse(\"test2\").Errors(), 1)\n\tassert.EqualError(t, respSet.GetResponse(\"test2\").Errors()[0], \"Get test: unsupported protocol scheme \\\"\\\"\")\n}\n\nfunc TestQuery(t *testing.T) {\n\t\/\/ Initialize\n\tr1 := request{\n\t\tquery: map[string]string{\n\t\t\t\"a\": \"b\",\n\t\t\t\"ké@lù\": \"ùl@ék\",\n\t\t},\n\t}\n\tr2 := request{}\n\n\t\/\/ Assert\n\tassert.Contains(t, \"?a=b&k%C3%A9%40l%C3%B9=%C3%B9l%40%C3%A9k?k%C3%A9%40l%C3%B9=%C3%B9l%40%C3%A9k&a=b\", query(&r1))\n\tassert.Empty(t, query(&r2))\n}\n\nfunc TestBody(t *testing.T) {\n\t\/\/ Initialize\n\tr := request{\n\t\tbody: map[string]string{\n\t\t\t\"test\": \"message\",\n\t\t},\n\t\theaders: map[string]string{\n\t\t\t\"Content-Type\": \"application\/json\",\n\t\t},\n\t}\n\n\t\/\/ Get body reader\n\tb, e := body(&r)\n\tassert.NoError(t, e)\n\n\t\/\/ Read body\n\tc, e := ioutil.ReadAll(b)\n\tassert.NoError(t, e)\n\n\t\/\/ Assert\n\tassert.Equal(t, \"{\\\"test\\\":\\\"message\\\"}\", string(c))\n}\n\nfunc TestBodyEmpty(t *testing.T) {\n\tr := request{\n\t\tbody: interface{}(nil),\n\t}\n\n\t\/\/ Get body reader\n\tb, e := body(&r)\n\tassert.NoError(t, e)\n\n\t\/\/ Read body\n\tc, e := ioutil.ReadAll(b)\n\tassert.NoError(t, e)\n\n\t\/\/ Assert\n\tassert.Equal(t, \"\", string(c))\n}\n\nfunc TestBodyReader(t *testing.T) {\n\t\/\/ Initialize\n\tr := request{\n\t\tbody: map[string]string{\n\t\t\t\"test\": \"message\",\n\t\t},\n\t\tbodyReader: bytes.NewBuffer([]byte(\"{\\\"test\\\":\\\"message_reader\\\"}\")),\n\t\theaders: map[string]string{\n\t\t\t\"Content-Type\": \"application\/json\",\n\t\t},\n\t}\n\n\t\/\/ Get body reader\n\tb, e := body(&r)\n\tassert.NoError(t, e)\n\n\t\/\/ Read body\n\tc, e := ioutil.ReadAll(b)\n\tassert.NoError(t, e)\n\n\t\/\/ Assert\n\tassert.Equal(t, \"{\\\"test\\\":\\\"message_reader\\\"}\", string(c))\n}\n\nfunc TestHeaders(t *testing.T) {\n\t\/\/ Initialize\n\tk := \"Key\"\n\tv := \"Value\"\n\tr := NewRequest(\"test\", MethodGet, \"\/test\")\n\tr.AddHeader(k, v)\n\thr := http.Request{Header: http.Header{}}\n\n\t\/\/ Assert\n\tassert.Empty(t, hr.Header.Get(k))\n\n\t\/\/ Add headers\n\theaders(r, &hr)\n\n\t\/\/ Assert\n\tassert.Equal(t, v, hr.Header.Get(k))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage otelsarama\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\n\t\"go.opentelemetry.io\/otel\"\n\t\"go.opentelemetry.io\/otel\/trace\"\n)\n\nfunc TestNewConfig(t *testing.T) {\n\ttestCases := []struct {\n\t\tname string\n\t\topts []Option\n\t\texpected config\n\t}{\n\t\t{\n\t\t\tname: \"with provider\",\n\t\t\topts: []Option{\n\t\t\t\tWithTracerProvider(otel.GetTracerProvider()),\n\t\t\t},\n\t\t\texpected: config{\n\t\t\t\tTracerProvider: otel.GetTracerProvider(),\n\t\t\t\tTracer: otel.GetTracerProvider().Tracer(defaultTracerName, trace.WithInstrumentationVersion(SemVersion())),\n\t\t\t\tPropagators: otel.GetTextMapPropagator(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"with propagators\",\n\t\t\topts: []Option{\n\t\t\t\tWithPropagators(nil),\n\t\t\t},\n\t\t\texpected: config{\n\t\t\t\tTracerProvider: otel.GetTracerProvider(),\n\t\t\t\tTracer: otel.GetTracerProvider().Tracer(defaultTracerName, trace.WithInstrumentationVersion(SemVersion())),\n\t\t\t\tPropagators: otel.GetTextMapPropagator(),\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tresult := newConfig(tc.opts...)\n\t\t\tassert.Equal(t, tc.expected, result)\n\t\t})\n\t}\n}\n<commit_msg>Actually test the passed options in otelsarama (#2270)<commit_after>\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage otelsarama\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\n\t\"go.opentelemetry.io\/otel\"\n\t\"go.opentelemetry.io\/otel\/propagation\"\n\t\"go.opentelemetry.io\/otel\/trace\"\n)\n\n\/\/ We need a fake tracer provider to ensure the one passed in options is the one used afterwards.\n\/\/ In order to avoid adding the SDK as a dependency, we use this mock.\ntype fakeTracerProvider struct{}\n\nfunc (fakeTracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {\n\treturn fakeTracer{\n\t\tname: name,\n\t}\n}\n\ntype fakeTracer struct {\n\tname string\n}\n\nfunc (fakeTracer) Start(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {\n\treturn ctx, nil\n}\n\nfunc TestNewConfig(t *testing.T) {\n\ttp := fakeTracerProvider{}\n\tprop := propagation.NewCompositeTextMapPropagator()\n\n\ttestCases := []struct {\n\t\tname string\n\t\topts []Option\n\t\texpected config\n\t}{\n\t\t{\n\t\t\tname: \"with provider\",\n\t\t\topts: []Option{\n\t\t\t\tWithTracerProvider(tp),\n\t\t\t},\n\t\t\texpected: config{\n\t\t\t\tTracerProvider: tp,\n\t\t\t\tTracer: tp.Tracer(defaultTracerName, trace.WithInstrumentationVersion(SemVersion())),\n\t\t\t\tPropagators: otel.GetTextMapPropagator(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"with empty provider\",\n\t\t\topts: []Option{\n\t\t\t\tWithTracerProvider(nil),\n\t\t\t},\n\t\t\texpected: config{\n\t\t\t\tTracerProvider: otel.GetTracerProvider(),\n\t\t\t\tTracer: otel.GetTracerProvider().Tracer(defaultTracerName, trace.WithInstrumentationVersion(SemVersion())),\n\t\t\t\tPropagators: otel.GetTextMapPropagator(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"with propagators\",\n\t\t\topts: []Option{\n\t\t\t\tWithPropagators(prop),\n\t\t\t},\n\t\t\texpected: config{\n\t\t\t\tTracerProvider: otel.GetTracerProvider(),\n\t\t\t\tTracer: otel.GetTracerProvider().Tracer(defaultTracerName, trace.WithInstrumentationVersion(SemVersion())),\n\t\t\t\tPropagators: prop,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"with empty propagators\",\n\t\t\topts: []Option{\n\t\t\t\tWithPropagators(nil),\n\t\t\t},\n\t\t\texpected: config{\n\t\t\t\tTracerProvider: otel.GetTracerProvider(),\n\t\t\t\tTracer: otel.GetTracerProvider().Tracer(defaultTracerName, trace.WithInstrumentationVersion(SemVersion())),\n\t\t\t\tPropagators: otel.GetTextMapPropagator(),\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tresult := newConfig(tc.opts...)\n\t\t\tassert.Equal(t, tc.expected, result)\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Windows filesystem monitoring implementation based on\n\/\/ golang.org\/x\/exp\/winfsnotify\n\/\/ (specifically\n\/\/ https:\/\/github.com\/golang\/exp\/tree\/c84be7c6d1cd7b6a43fd7101daaf2dc35ded445f\/winfsnotify),\n\/\/ but modified to remove import path enforcement, increase\n\/\/ ReadDirectoryChangesW buffer size, support recursive watching, use more\n\/\/ idiomatic filesystem path joins, and remove test logging.\n\/\/\n\/\/ The original code license:\n\/\/\n\/\/ Copyright (c) 2009 The Go Authors. All rights reserved.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions are\n\/\/ met:\n\/\/\n\/\/ * Redistributions of source code must retain the above copyright\n\/\/ notice, this list of conditions and the following disclaimer.\n\/\/ * Redistributions in binary form must reproduce the above\n\/\/ copyright notice, this list of conditions and the following disclaimer\n\/\/ in the documentation and\/or other materials provided with the\n\/\/ distribution.\n\/\/ * Neither the name of Google Inc. nor the names of its\n\/\/ contributors may be used to endorse or promote products derived from\n\/\/ this software without specific prior written permission.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\/\/ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n\/\/ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n\/\/ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n\/\/ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n\/\/ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n\/\/ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n\/\/ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n\/\/ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n\/\/ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\/\/ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\/\/\n\/\/ The original license header inside the code itself:\n\/\/\n\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build windows\n\npackage winfsnotify\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\/atomic\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc expect(t *testing.T, eventstream <-chan *Event, name string, mask uint32) {\n\tselect {\n\tcase event := <-eventstream:\n\t\tif event == nil {\n\t\t\tt.Fatal(\"nil event received\")\n\t\t}\n\t\tif event.Name != name || event.Mask != mask {\n\t\t\tt.Fatal(\"did not receive expected event\")\n\t\t}\n\tcase <-time.After(1 * time.Second):\n\t\tt.Fatal(\"timed out waiting for event\")\n\t}\n}\n\nfunc TestNotifyEvents(t *testing.T) {\n\twatcher, err := NewWatcher()\n\tif err != nil {\n\t\tt.Fatalf(\"NewWatcher() failed: %s\", err)\n\t}\n\n\ttestDir := \"TestNotifyEvents.testdirectory\"\n\ttestFile := filepath.Join(testDir, \"TestNotifyEvents.testfile\")\n\ttestFile2 := testFile + \".new\"\n\tconst mask = FS_ALL_EVENTS & ^(FS_ATTRIB|FS_CLOSE) | FS_IGNORED\n\n\t\/\/ Add a watch for testDir\n\tos.RemoveAll(testDir)\n\tif err = os.Mkdir(testDir, 0777); err != nil {\n\t\tt.Fatalf(\"Failed to create test directory: %s\", err)\n\t}\n\tdefer os.RemoveAll(testDir)\n\terr = watcher.AddWatch(testDir, mask)\n\tif err != nil {\n\t\tt.Fatalf(\"Watcher.Watch() failed: %s\", err)\n\t}\n\n\t\/\/ Create a file\n\tfile, err := os.Create(testFile)\n\tif err != nil {\n\t\tt.Fatalf(\"creating test file failed: %s\", err)\n\t}\n\texpect(t, watcher.Event, testFile, FS_CREATE)\n\n\terr = watcher.AddWatch(testFile, mask)\n\tif err != nil {\n\t\tt.Fatalf(\"Watcher.Watch() failed: %s\", err)\n\t}\n\n\tif _, err = file.WriteString(\"hello, world\"); err != nil {\n\t\tt.Fatalf(\"failed to write to test file: %s\", err)\n\t}\n\tif err = file.Close(); err != nil {\n\t\tt.Fatalf(\"failed to close test file: %s\", err)\n\t}\n\texpect(t, watcher.Event, testFile, FS_MODIFY)\n\texpect(t, watcher.Event, testFile, FS_MODIFY)\n\n\tif err = os.Rename(testFile, testFile2); err != nil {\n\t\tt.Fatalf(\"failed to rename test file: %s\", err)\n\t}\n\texpect(t, watcher.Event, testFile, FS_MOVED_FROM)\n\texpect(t, watcher.Event, testFile2, FS_MOVED_TO)\n\texpect(t, watcher.Event, testFile, FS_MOVE_SELF)\n\n\tif err = os.RemoveAll(testDir); err != nil {\n\t\tt.Fatalf(\"failed to remove test directory: %s\", err)\n\t}\n\texpect(t, watcher.Event, testFile2, FS_DELETE_SELF)\n\texpect(t, watcher.Event, testFile2, FS_IGNORED)\n\texpect(t, watcher.Event, testFile2, FS_DELETE)\n\texpect(t, watcher.Event, testDir, FS_DELETE_SELF)\n\texpect(t, watcher.Event, testDir, FS_IGNORED)\n\n\tif err = watcher.Close(); err != nil {\n\t\tt.Fatalf(\"failed to close watcher: %s\", err)\n\t}\n\n\t\/\/ Check for errors\n\tif err := <-watcher.Error; err != nil {\n\t\tt.Fatalf(\"error received: %s\", err)\n\t}\n}\n\nfunc TestNotifyClose(t *testing.T) {\n\twatcher, _ := NewWatcher()\n\twatcher.Close()\n\n\tvar done int32\n\tgo func() {\n\t\twatcher.Close()\n\t\tatomic.StoreInt32(&done, 1)\n\t}()\n\n\ttime.Sleep(50 * time.Millisecond)\n\tif atomic.LoadInt32(&done) == 0 {\n\t\tt.Fatal(\"double Close() test failed: second Close() call didn't return\")\n\t}\n\n\terr = watcher.Watch(t.TempDir())\n\tif err == nil {\n\t\tt.Fatal(\"expected error on Watch() after Close(), got nil\")\n\t}\n}\n<commit_msg>Additional fix for f3dde29.<commit_after>\/\/ Windows filesystem monitoring implementation based on\n\/\/ golang.org\/x\/exp\/winfsnotify\n\/\/ (specifically\n\/\/ https:\/\/github.com\/golang\/exp\/tree\/c84be7c6d1cd7b6a43fd7101daaf2dc35ded445f\/winfsnotify),\n\/\/ but modified to remove import path enforcement, increase\n\/\/ ReadDirectoryChangesW buffer size, support recursive watching, use more\n\/\/ idiomatic filesystem path joins, and remove test logging.\n\/\/\n\/\/ The original code license:\n\/\/\n\/\/ Copyright (c) 2009 The Go Authors. All rights reserved.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions are\n\/\/ met:\n\/\/\n\/\/ * Redistributions of source code must retain the above copyright\n\/\/ notice, this list of conditions and the following disclaimer.\n\/\/ * Redistributions in binary form must reproduce the above\n\/\/ copyright notice, this list of conditions and the following disclaimer\n\/\/ in the documentation and\/or other materials provided with the\n\/\/ distribution.\n\/\/ * Neither the name of Google Inc. nor the names of its\n\/\/ contributors may be used to endorse or promote products derived from\n\/\/ this software without specific prior written permission.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\/\/ \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n\/\/ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n\/\/ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n\/\/ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n\/\/ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n\/\/ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n\/\/ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n\/\/ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n\/\/ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\/\/ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\/\/\n\/\/ The original license header inside the code itself:\n\/\/\n\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build windows\n\npackage winfsnotify\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\/atomic\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc expect(t *testing.T, eventstream <-chan *Event, name string, mask uint32) {\n\tselect {\n\tcase event := <-eventstream:\n\t\tif event == nil {\n\t\t\tt.Fatal(\"nil event received\")\n\t\t}\n\t\tif event.Name != name || event.Mask != mask {\n\t\t\tt.Fatal(\"did not receive expected event\")\n\t\t}\n\tcase <-time.After(1 * time.Second):\n\t\tt.Fatal(\"timed out waiting for event\")\n\t}\n}\n\nfunc TestNotifyEvents(t *testing.T) {\n\twatcher, err := NewWatcher()\n\tif err != nil {\n\t\tt.Fatalf(\"NewWatcher() failed: %s\", err)\n\t}\n\n\ttestDir := \"TestNotifyEvents.testdirectory\"\n\ttestFile := filepath.Join(testDir, \"TestNotifyEvents.testfile\")\n\ttestFile2 := testFile + \".new\"\n\tconst mask = FS_ALL_EVENTS & ^(FS_ATTRIB|FS_CLOSE) | FS_IGNORED\n\n\t\/\/ Add a watch for testDir\n\tos.RemoveAll(testDir)\n\tif err = os.Mkdir(testDir, 0777); err != nil {\n\t\tt.Fatalf(\"Failed to create test directory: %s\", err)\n\t}\n\tdefer os.RemoveAll(testDir)\n\terr = watcher.AddWatch(testDir, mask)\n\tif err != nil {\n\t\tt.Fatalf(\"Watcher.Watch() failed: %s\", err)\n\t}\n\n\t\/\/ Create a file\n\tfile, err := os.Create(testFile)\n\tif err != nil {\n\t\tt.Fatalf(\"creating test file failed: %s\", err)\n\t}\n\texpect(t, watcher.Event, testFile, FS_CREATE)\n\n\terr = watcher.AddWatch(testFile, mask)\n\tif err != nil {\n\t\tt.Fatalf(\"Watcher.Watch() failed: %s\", err)\n\t}\n\n\tif _, err = file.WriteString(\"hello, world\"); err != nil {\n\t\tt.Fatalf(\"failed to write to test file: %s\", err)\n\t}\n\tif err = file.Close(); err != nil {\n\t\tt.Fatalf(\"failed to close test file: %s\", err)\n\t}\n\texpect(t, watcher.Event, testFile, FS_MODIFY)\n\texpect(t, watcher.Event, testFile, FS_MODIFY)\n\n\tif err = os.Rename(testFile, testFile2); err != nil {\n\t\tt.Fatalf(\"failed to rename test file: %s\", err)\n\t}\n\texpect(t, watcher.Event, testFile, FS_MOVED_FROM)\n\texpect(t, watcher.Event, testFile2, FS_MOVED_TO)\n\texpect(t, watcher.Event, testFile, FS_MOVE_SELF)\n\n\tif err = os.RemoveAll(testDir); err != nil {\n\t\tt.Fatalf(\"failed to remove test directory: %s\", err)\n\t}\n\texpect(t, watcher.Event, testFile2, FS_DELETE_SELF)\n\texpect(t, watcher.Event, testFile2, FS_IGNORED)\n\texpect(t, watcher.Event, testFile2, FS_DELETE)\n\texpect(t, watcher.Event, testDir, FS_DELETE_SELF)\n\texpect(t, watcher.Event, testDir, FS_IGNORED)\n\n\tif err = watcher.Close(); err != nil {\n\t\tt.Fatalf(\"failed to close watcher: %s\", err)\n\t}\n\n\t\/\/ Check for errors\n\tif err := <-watcher.Error; err != nil {\n\t\tt.Fatalf(\"error received: %s\", err)\n\t}\n}\n\nfunc TestNotifyClose(t *testing.T) {\n\twatcher, _ := NewWatcher()\n\twatcher.Close()\n\n\tvar done int32\n\tgo func() {\n\t\twatcher.Close()\n\t\tatomic.StoreInt32(&done, 1)\n\t}()\n\n\ttime.Sleep(50 * time.Millisecond)\n\tif atomic.LoadInt32(&done) == 0 {\n\t\tt.Fatal(\"double Close() test failed: second Close() call didn't return\")\n\t}\n\n\terr := watcher.Watch(t.TempDir())\n\tif err == nil {\n\t\tt.Fatal(\"expected error on Watch() after Close(), got nil\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/LiamHaworth\/go-tproxy\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"sync\"\n)\n\nfunc main() {\n\tlog.Println(\"Starting GoLang TProxy example\")\n\n\tbindAddr := &net.TCPAddr{IP: net.ParseIP(\"0.0.0.0\"), Port: 8080}\n\tlog.Printf(\"Attempting to bind listener on: %s\", bindAddr.String())\n\n\tlistener, err := tproxy.ListenTCP(\"tcp\", bindAddr)\n\tif err != nil {\n\t\tlog.Fatalf(\"Encountered error while binding listener: %s\", err)\n\t\treturn\n\t}\n\n\tlog.Println(\"Listener bound successfully, now accepting connections\")\n\tfor {\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tif netErr, ok := err.(net.Error); ok && netErr.Temporary() {\n\t\t\t\tlog.Printf(\"Temporary error while accepting connection: %s\", netErr)\n\t\t\t}\n\n\t\t\tlog.Fatalf(\"Unrecoverable error while accepting connection: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tgo handleConn(conn)\n\t}\n}\n\nfunc handleConn(conn net.Conn) {\n\tlog.Printf(\"Accepting connection from %s with destination of %s\", conn.RemoteAddr().String(), conn.LocalAddr().String())\n\n\tremoteConn, err := conn.(*tproxy.Conn).DialOriginalDestination(false)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to connect to original destination [%s]: %s\", conn.LocalAddr().String(), err)\n\t} else {\n\t\tdefer remoteConn.Close()\n\t\tdefer conn.Close()\n\t}\n\n\tvar streamWait sync.WaitGroup\n\tstreamWait.Add(2)\n\n\tstreamConn := func(dst io.Writer, src io.Reader) {\n\t\tio.Copy(dst, src)\n\t\tstreamWait.Done()\n\t}\n\n\tgo streamConn(remoteConn, conn)\n\tgo streamConn(conn, remoteConn)\n\n\tstreamWait.Wait()\n}\n<commit_msg>update(example): Updated example code to use both TCP and UDP<commit_after>package main\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/LiamHaworth\/go-tproxy\"\n)\n\nvar (\n\t\/\/ tcpListener represents the TCP\n\t\/\/ listening socket that will receive\n\t\/\/ TCP connections from TProxy\n\ttcpListener net.Listener\n\n\t\/\/ udpListener represents tje UDP\n\t\/\/ listening socket that will receive\n\t\/\/ UDP packets from TProxy\n\tudpListener *net.UDPConn\n)\n\n\/\/ main will initialize the TProxy\n\/\/ handling application\nfunc main() {\n\tlog.Println(\"Starting GoLang TProxy example\")\n\tvar err error\n\n\tlog.Println(\"Binding TCP TProxy listener to 0.0.0.0:8080\")\n\ttcpListener, err = tproxy.ListenTCP(\"tcp\", &net.TCPAddr{IP: net.ParseIP(\"0.0.0.0\"), Port: 8080})\n\tif err != nil {\n\t\tlog.Fatalf(\"Encountered error while binding listener: %s\", err)\n\t\treturn\n\t}\n\n\tdefer tcpListener.Close()\n\tgo listenTCP()\n\n\tlog.Println(\"Binding UDP TProxy listener to 0.0.0.0:8080\")\n\tudpListener, err = tproxy.ListenUDP(\"udp\", &net.UDPAddr{IP: net.ParseIP(\"0.0.0.0\"), Port: 8080})\n\tif err != nil {\n\t\tlog.Fatalf(\"Encountered error while binding UDP listener: %s\", err)\n\t\treturn\n\t}\n\n\tdefer udpListener.Close()\n\tgo listenUDP()\n\n\tinterruptListener := make(chan os.Signal)\n\tsignal.Notify(interruptListener, os.Interrupt)\n\t<-interruptListener\n\n\tlog.Println(\"TProxy listener closing\")\n}\n\n\/\/ listenUDP runs in a routine to\n\/\/ accept UDP connections and hand them\n\/\/ off into their own routines for handling\nfunc listenUDP() {\n\tfor {\n\t\tbuff := make([]byte, 1024)\n\t\tn, srcAddr, dstAddr, err := tproxy.ReadFromUDP(udpListener, buff)\n\t\tif err != nil {\n\t\t\tif netErr, ok := err.(net.Error); ok && netErr.Temporary() {\n\t\t\t\tlog.Printf(\"Temporary error while reading data: %s\", netErr)\n\t\t\t}\n\n\t\t\tlog.Fatalf(\"Unrecoverable error while reading data: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Printf(\"Accepting UDP connection from %s with destination of %s\", srcAddr.String(), dstAddr.String())\n\t\tgo handleUDPConn(buff[:n], srcAddr, dstAddr)\n\t}\n}\n\n\/\/ listenTCP runs in a routine to\n\/\/ accept TCP connections and hand them\n\/\/ off into their own routines for handling\nfunc listenTCP() {\n\tfor {\n\t\tconn, err := tcpListener.Accept()\n\t\tif err != nil {\n\t\t\tif netErr, ok := err.(net.Error); ok && netErr.Temporary() {\n\t\t\t\tlog.Printf(\"Temporary error while accepting connection: %s\", netErr)\n\t\t\t}\n\n\t\t\tlog.Fatalf(\"Unrecoverable error while accepting connection: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tgo handleTCPConn(conn)\n\t}\n}\n\n\/\/ handleUDPConn will open a connection\n\/\/ to the original destination pretending\n\/\/ to be the client. It will when right\n\/\/ the received data to the remote host\n\/\/ and wait a few seconds for any possible\n\/\/ response data\nfunc handleUDPConn(data []byte, srcAddr, dstAddr *net.UDPAddr) {\n\tlog.Printf(\"Accepting UDP connection from %s with destination of %s\", srcAddr, dstAddr)\n\n\tlocalConn, err := tproxy.DialUDP(\"udp\", dstAddr, srcAddr)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to connect to original UDP source [%s]: %s\", srcAddr.String(), err)\n\t\treturn\n\t}\n\tdefer localConn.Close()\n\n\tremoteConn, err := tproxy.DialUDP(\"udp\", srcAddr, dstAddr)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to connect to original UDP destination [%s]: %s\", dstAddr.String(), err)\n\t\treturn\n\t}\n\tdefer remoteConn.Close()\n\n\tbytesWritten, err := remoteConn.Write(data)\n\tif err != nil {\n\t\tlog.Printf(\"Encountered error while writing to remote [%s]: %s\", remoteConn.RemoteAddr(), err)\n\t\treturn\n\t} else if bytesWritten < len(data) {\n\t\tlog.Printf(\"Not all bytes [%d < %d] in buffer written to remote [%s]\", bytesWritten, len(data), remoteConn.RemoteAddr())\n\t\treturn\n\t}\n\n\tdata = make([]byte, 1024)\n\tremoteConn.SetReadDeadline(time.Now().Add(2 * time.Second)) \/\/ Add deadline to ensure it doesn't block forever\n\tbytesRead, err := remoteConn.Read(data)\n\tif err != nil {\n\t\tif netErr, ok := err.(net.Error); ok && netErr.Timeout() {\n\t\t\treturn\n\t\t}\n\n\t\tlog.Printf(\"Encountered error while reading from remote [%s]: %s\", remoteConn.RemoteAddr(), err)\n\t\treturn\n\t}\n\n\tbytesWritten, err = localConn.Write(data)\n\tif err != nil {\n\t\tlog.Printf(\"Encountered error while writing to local [%s]: %s\", localConn.RemoteAddr(), err)\n\t\treturn\n\t} else if bytesWritten < bytesRead {\n\t\tlog.Printf(\"Not all bytes [%d < %d] in buffer written to locoal [%s]\", bytesWritten, len(data), remoteConn.RemoteAddr())\n\t\treturn\n\t}\n}\n\n\/\/ handleTCPConn will open a connection\n\/\/ to the original destination pretending\n\/\/ to be the client. From there it will setup\n\/\/ two routines to stream data between the\n\/\/ connections\nfunc handleTCPConn(conn net.Conn) {\n\tlog.Printf(\"Accepting TCP connection from %s with destination of %s\", conn.RemoteAddr().String(), conn.LocalAddr().String())\n\n\tremoteConn, err := conn.(*tproxy.Conn).DialOriginalDestination(false)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to connect to original destination [%s]: %s\", conn.LocalAddr().String(), err)\n\t} else {\n\t\tdefer remoteConn.Close()\n\t\tdefer conn.Close()\n\t}\n\n\tvar streamWait sync.WaitGroup\n\tstreamWait.Add(2)\n\n\tstreamConn := func(dst io.Writer, src io.Reader) {\n\t\tio.Copy(dst, src)\n\t\tstreamWait.Done()\n\t}\n\n\tgo streamConn(remoteConn, conn)\n\tgo streamConn(conn, remoteConn)\n\n\tstreamWait.Wait()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Chromium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style license that can be\n\/\/ found in the LICENSE file.\n\npackage swarming\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/luci\/luci-go\/client\/logdog\/annotee\"\n\t\"github.com\/luci\/luci-go\/common\/clock\"\n\t\"github.com\/luci\/luci-go\/common\/logdog\/types\"\n\tmiloProto \"github.com\/luci\/luci-go\/common\/proto\/milo\"\n\t\"github.com\/luci\/luci-go\/common\/transport\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/luci\/luci-go\/appengine\/cmd\/milo\/resp\"\n)\n\nfunc resolveServer(server string) string {\n\t\/\/ TODO(hinoka): configure this map in luci-config\n\tif server == \"\" || server == \"default\" || server == \"dev\" {\n\t\treturn \"chromium-swarm-dev.appspot.com\"\n\t} else if server == \"prod\" {\n\t\treturn \"chromium-swarm.appspot.com\"\n\t} else {\n\t\treturn server\n\t}\n}\n\n\/\/ swarmingIDs that beging with \"debug:\" wil redirect to json found in\n\/\/ \/testdata\/\nfunc getSwarmingLog(server string, swarmingID string, c context.Context) ([]byte, error) {\n\t\/\/ Fetch the debug file instead.\n\tif strings.HasPrefix(swarmingID, \"debug:\") {\n\t\tfilename := strings.Join(\n\t\t\t[]string{\"testdata\", swarmingID[6:]}, \"\/\")\n\t\tb, err := ioutil.ReadFile(filename)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn b, nil\n\t}\n\n\tswarmingURL := fmt.Sprintf(\n\t\t\"https:\/\/%s\/swarming\/api\/v1\/client\/task\/%s\/output\/0\",\n\t\tresolveServer(server), swarmingID)\n\tclient := transport.GetClient(c)\n\tresp, err := client.Get(swarmingURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"Failed to fetch %s, status code %d\", swarmingURL, resp.StatusCode)\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Decode the JSON and extract the actual log.\n\tsm := map[string]*string{}\n\tif err := json.Unmarshal(body, &sm); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Decode the data using annotee.\n\tif output, ok := sm[\"output\"]; ok {\n\t\treturn []byte(*output), nil\n\t}\n\treturn nil, fmt.Errorf(\"Swarming response did not contain output\\n%s\", body)\n}\n\n\/\/ TODO(hinoka): This should go in a more generic file, when milo has more\n\/\/ than one page.\nfunc getNavi(swarmingID string, URL string) *resp.Navigation {\n\tnavi := &resp.Navigation{}\n\tnavi.PageTitle = &resp.Link{\n\t\tLabel: swarmingID,\n\t\tURL: URL,\n\t}\n\tnavi.SiteTitle = &resp.Link{\n\t\tLabel: \"Milo\",\n\t\tURL: \"\/\",\n\t}\n\treturn navi\n}\n\n\/\/ Given a logdog\/milo step, translate it to a BuildComponent struct.\nfunc miloBuildStep(\n\turl string, anno *miloProto.Step, name string) *resp.BuildComponent {\n\tcomp := &resp.BuildComponent{}\n\tasc := anno.GetStepComponent()\n\tcomp.Label = asc.Name\n\tswitch asc.Status {\n\tcase miloProto.Status_RUNNING:\n\t\tcomp.Status = resp.Running\n\n\tcase miloProto.Status_SUCCESS:\n\t\tcomp.Status = resp.Success\n\n\tcase miloProto.Status_FAILURE:\n\t\tif anno.GetFailureDetails() != nil {\n\t\t\tswitch anno.GetFailureDetails().Type {\n\t\t\tcase miloProto.FailureDetails_INFRA:\n\t\t\t\tcomp.Status = resp.InfraFailure\n\n\t\t\tcase miloProto.FailureDetails_DM_DEPENDENCY_FAILED:\n\t\t\t\tcomp.Status = resp.DependencyFailure\n\n\t\t\tdefault:\n\t\t\t\tcomp.Status = resp.Failure\n\t\t\t}\n\t\t} else {\n\t\t\tcomp.Status = resp.Failure\n\t\t}\n\t\t\/\/ Missing the case of waiting on unfinished dependency...\n\tdefault:\n\t\tcomp.Status = resp.NotRun\n\t}\n\t\/\/ Sub link is for one link per log that isn't stdio.\n\tfor _, link := range asc.GetOtherLinks() {\n\t\tlds := link.GetLogdogStream()\n\t\tshortName := lds.Name[5 : len(lds.Name)-2]\n\t\tif strings.HasSuffix(lds.Name, \"annotations\") || strings.HasSuffix(lds.Name, \"stdio\") {\n\t\t\t\/\/ Skip the special ones.\n\t\t\tcontinue\n\t\t}\n\t\tnewLink := &resp.Link{\n\t\t\tLabel: shortName,\n\t\t\tURL: strings.Join([]string{url, lds.Name}, \"\/\"),\n\t\t}\n\t\tcomp.SubLink = append(comp.SubLink, newLink)\n\t}\n\n\t\/\/ Main link is a link to the stdio.\n\tcomp.MainLink = &resp.Link{\n\t\tLabel: \"stdio\",\n\t\tURL: strings.Join([]string{url, name, \"logs\", \"stdio\"}, \"\/\"),\n\t}\n\n\t\/\/ This should always be a step.\n\tcomp.Type = resp.Step\n\n\t\/\/ This should always be 0\n\tcomp.LevelsDeep = 0\n\n\t\/\/ Timeswamapts\n\tcomp.Started = asc.Started.Time().Format(time.RFC3339)\n\n\t\/\/ This should be the exact same thing.\n\tcomp.Text = asc.Text\n\n\treturn comp\n}\n\n\/\/ Takes a butler client and return a fully populated milo build.\nfunc buildFromClient(c context.Context, swarmingID string, url string, s *memoryClient) (*resp.MiloBuild, error) {\n\t\/\/ Build the basic page response.\n\tbuild := &resp.MiloBuild{}\n\tbuild.Navi = getNavi(swarmingID, url)\n\tbuild.CurrentTime = clock.Now(c).String()\n\n\t\/\/ Now Fetch the main annotation of the build.\n\tmainAnno := &miloProto.Step{}\n\tproto.Unmarshal(s.stream[\"annotations\"].dg, mainAnno)\n\n\t\/\/ Now fill in each of the step components.\n\t\/\/ TODO(hinoka): This is totes cachable.\n\tfor _, name := range mainAnno.SubstepLogdogNameBase {\n\t\tanno := &miloProto.Step{}\n\t\tfullname := strings.Join([]string{name, \"annotations\"}, \"\/\")\n\t\tproto.Unmarshal(s.stream[fullname].dg, anno)\n\t\tbuild.Components = append(build.Components, miloBuildStep(url, anno, name))\n\t}\n\n\t\/\/ Take care of properties\n\tpropGroup := &resp.PropertyGroup{\n\t\tGroupName: \"Main\",\n\t}\n\tfor _, prop := range mainAnno.GetStepComponent().Property {\n\t\tpropGroup.Property = append(propGroup.Property, &resp.Property{\n\t\t\tKey: prop.Name,\n\t\t\tValue: prop.Value,\n\t\t})\n\t}\n\tbuild.PropertyGroup = append(build.PropertyGroup, propGroup)\n\n\t\/\/ And we're done!\n\treturn build, nil\n}\n\n\/\/ Takes in an annotated log and returns a fully populated memory client.\nfunc clientFromAnnotatedLog(ctx context.Context, log []byte) (*memoryClient, error) {\n\tc := &memoryClient{}\n\tp := annotee.Processor{\n\t\tContext: ctx,\n\t\tClient: c,\n\t\tMetadataUpdateInterval: time.Hour * 24, \/\/ Neverrrrrr send incr updates.\n\t}\n\tis := annotee.Stream{\n\t\tReader: bytes.NewBuffer(log),\n\t\tName: types.StreamName(\"stdio\"),\n\t\tAnnotate: true,\n\t\tStripAnnotations: true,\n\t}\n\t\/\/ If this ever has more than one stream then memoryClient needs to become\n\t\/\/ goroutine safe\n\tif err := p.RunStreams([]*annotee.Stream{&is}); err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}\n\nfunc swarmingBuildImpl(c context.Context, URL string, server string, id string) (*resp.MiloBuild, error) {\n\t\/\/ Fetch the data from Swarming\n\tbody, err := getSwarmingLog(server, id, c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Decode the data using annotee.\n\tclient, err := clientFromAnnotatedLog(c, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buildFromClient(c, id, URL, client)\n}\n<commit_msg>Milo: Use the new swarming endpoint<commit_after>\/\/ Copyright 2015 The Chromium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style license that can be\n\/\/ found in the LICENSE file.\n\npackage swarming\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/luci\/luci-go\/client\/logdog\/annotee\"\n\t\"github.com\/luci\/luci-go\/common\/clock\"\n\t\"github.com\/luci\/luci-go\/common\/logdog\/types\"\n\tmiloProto \"github.com\/luci\/luci-go\/common\/proto\/milo\"\n\t\"github.com\/luci\/luci-go\/common\/transport\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/luci\/luci-go\/appengine\/cmd\/milo\/resp\"\n\t\"github.com\/luci\/luci-go\/appengine\/gaeauth\/client\"\n)\n\nfunc resolveServer(server string) string {\n\t\/\/ TODO(hinoka): configure this map in luci-config\n\tif server == \"\" || server == \"default\" || server == \"dev\" {\n\t\treturn \"chromium-swarm-dev.appspot.com\"\n\t} else if server == \"prod\" {\n\t\treturn \"chromium-swarm.appspot.com\"\n\t} else {\n\t\treturn server\n\t}\n}\n\n\/\/ swarmingIDs that beging with \"debug:\" wil redirect to json found in\n\/\/ \/testdata\/\nfunc getSwarmingLog(server string, swarmingID string, c context.Context) ([]byte, error) {\n\t\/\/ Fetch the debug file instead.\n\tif strings.HasPrefix(swarmingID, \"debug:\") {\n\t\tfilename := strings.Join(\n\t\t\t[]string{\"testdata\", swarmingID[6:]}, \"\/\")\n\t\tb, err := ioutil.ReadFile(filename)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn b, nil\n\t}\n\n\tswarmingURL := fmt.Sprintf(\n\t\t\"https:\/\/%s\/_ah\/api\/swarming\/v1\/task\/%s\/stdout\",\n\t\tresolveServer(server), swarmingID)\n\tclient := transport.GetClient(client.UseServiceAccountTransport(c,\n\t\t[]string{\"https:\/\/www.googleapis.com\/auth\/userinfo.email\"}, nil))\n\tresp, err := client.Get(swarmingURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"Failed to fetch %s, status code %d\", swarmingURL, resp.StatusCode)\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Decode the JSON and extract the actual log.\n\tsm := map[string]*string{}\n\tif err := json.Unmarshal(body, &sm); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Decode the data using annotee.\n\tif output, ok := sm[\"output\"]; ok {\n\t\treturn []byte(*output), nil\n\t}\n\treturn nil, fmt.Errorf(\"Swarming response did not contain output\\n%s\", body)\n}\n\n\/\/ TODO(hinoka): This should go in a more generic file, when milo has more\n\/\/ than one page.\nfunc getNavi(swarmingID string, URL string) *resp.Navigation {\n\tnavi := &resp.Navigation{}\n\tnavi.PageTitle = &resp.Link{\n\t\tLabel: swarmingID,\n\t\tURL: URL,\n\t}\n\tnavi.SiteTitle = &resp.Link{\n\t\tLabel: \"Milo\",\n\t\tURL: \"\/\",\n\t}\n\treturn navi\n}\n\n\/\/ Given a logdog\/milo step, translate it to a BuildComponent struct.\nfunc miloBuildStep(\n\turl string, anno *miloProto.Step, name string) *resp.BuildComponent {\n\tcomp := &resp.BuildComponent{}\n\tasc := anno.GetStepComponent()\n\tcomp.Label = asc.Name\n\tswitch asc.Status {\n\tcase miloProto.Status_RUNNING:\n\t\tcomp.Status = resp.Running\n\n\tcase miloProto.Status_SUCCESS:\n\t\tcomp.Status = resp.Success\n\n\tcase miloProto.Status_FAILURE:\n\t\tif anno.GetFailureDetails() != nil {\n\t\t\tswitch anno.GetFailureDetails().Type {\n\t\t\tcase miloProto.FailureDetails_INFRA:\n\t\t\t\tcomp.Status = resp.InfraFailure\n\n\t\t\tcase miloProto.FailureDetails_DM_DEPENDENCY_FAILED:\n\t\t\t\tcomp.Status = resp.DependencyFailure\n\n\t\t\tdefault:\n\t\t\t\tcomp.Status = resp.Failure\n\t\t\t}\n\t\t} else {\n\t\t\tcomp.Status = resp.Failure\n\t\t}\n\t\t\/\/ Missing the case of waiting on unfinished dependency...\n\tdefault:\n\t\tcomp.Status = resp.NotRun\n\t}\n\t\/\/ Sub link is for one link per log that isn't stdio.\n\tfor _, link := range asc.GetOtherLinks() {\n\t\tlds := link.GetLogdogStream()\n\t\tshortName := lds.Name[5 : len(lds.Name)-2]\n\t\tif strings.HasSuffix(lds.Name, \"annotations\") || strings.HasSuffix(lds.Name, \"stdio\") {\n\t\t\t\/\/ Skip the special ones.\n\t\t\tcontinue\n\t\t}\n\t\tnewLink := &resp.Link{\n\t\t\tLabel: shortName,\n\t\t\tURL: strings.Join([]string{url, lds.Name}, \"\/\"),\n\t\t}\n\t\tcomp.SubLink = append(comp.SubLink, newLink)\n\t}\n\n\t\/\/ Main link is a link to the stdio.\n\tcomp.MainLink = &resp.Link{\n\t\tLabel: \"stdio\",\n\t\tURL: strings.Join([]string{url, name, \"logs\", \"stdio\"}, \"\/\"),\n\t}\n\n\t\/\/ This should always be a step.\n\tcomp.Type = resp.Step\n\n\t\/\/ This should always be 0\n\tcomp.LevelsDeep = 0\n\n\t\/\/ Timeswamapts\n\tcomp.Started = asc.Started.Time().Format(time.RFC3339)\n\n\t\/\/ This should be the exact same thing.\n\tcomp.Text = asc.Text\n\n\treturn comp\n}\n\n\/\/ Takes a butler client and return a fully populated milo build.\nfunc buildFromClient(c context.Context, swarmingID string, url string, s *memoryClient) (*resp.MiloBuild, error) {\n\t\/\/ Build the basic page response.\n\tbuild := &resp.MiloBuild{}\n\tbuild.Navi = getNavi(swarmingID, url)\n\tbuild.CurrentTime = clock.Now(c).String()\n\n\t\/\/ Now Fetch the main annotation of the build.\n\tmainAnno := &miloProto.Step{}\n\tproto.Unmarshal(s.stream[\"annotations\"].dg, mainAnno)\n\n\t\/\/ Now fill in each of the step components.\n\t\/\/ TODO(hinoka): This is totes cachable.\n\tfor _, name := range mainAnno.SubstepLogdogNameBase {\n\t\tanno := &miloProto.Step{}\n\t\tfullname := strings.Join([]string{name, \"annotations\"}, \"\/\")\n\t\tproto.Unmarshal(s.stream[fullname].dg, anno)\n\t\tbuild.Components = append(build.Components, miloBuildStep(url, anno, name))\n\t}\n\n\t\/\/ Take care of properties\n\tpropGroup := &resp.PropertyGroup{\n\t\tGroupName: \"Main\",\n\t}\n\tfor _, prop := range mainAnno.GetStepComponent().Property {\n\t\tpropGroup.Property = append(propGroup.Property, &resp.Property{\n\t\t\tKey: prop.Name,\n\t\t\tValue: prop.Value,\n\t\t})\n\t}\n\tbuild.PropertyGroup = append(build.PropertyGroup, propGroup)\n\n\t\/\/ And we're done!\n\treturn build, nil\n}\n\n\/\/ Takes in an annotated log and returns a fully populated memory client.\nfunc clientFromAnnotatedLog(ctx context.Context, log []byte) (*memoryClient, error) {\n\tc := &memoryClient{}\n\tp := annotee.Processor{\n\t\tContext: ctx,\n\t\tClient: c,\n\t\tMetadataUpdateInterval: time.Hour * 24, \/\/ Neverrrrrr send incr updates.\n\t}\n\tis := annotee.Stream{\n\t\tReader: bytes.NewBuffer(log),\n\t\tName: types.StreamName(\"stdio\"),\n\t\tAnnotate: true,\n\t\tStripAnnotations: true,\n\t}\n\t\/\/ If this ever has more than one stream then memoryClient needs to become\n\t\/\/ goroutine safe\n\tif err := p.RunStreams([]*annotee.Stream{&is}); err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}\n\nfunc swarmingBuildImpl(c context.Context, URL string, server string, id string) (*resp.MiloBuild, error) {\n\t\/\/ Fetch the data from Swarming\n\tbody, err := getSwarmingLog(server, id, c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Decode the data using annotee.\n\tclient, err := clientFromAnnotatedLog(c, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buildFromClient(c, id, URL, client)\n}\n<|endoftext|>"} {"text":"<commit_before>package proxy_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\n\t\"github.com\/tendermint\/tendermint\/lite\"\n\t\"github.com\/tendermint\/tendermint\/lite\/proxy\"\n\t\"github.com\/tendermint\/tendermint\/types\"\n)\n\nvar (\n\tdeadBeefTxs = types.Txs{[]byte(\"DE\"), []byte(\"AD\"), []byte(\"BE\"), []byte(\"EF\")}\n\tdeadBeefRipEmd160Hash = deadBeefTxs.Hash()\n)\n\nfunc TestValidateBlock(t *testing.T) {\n\ttests := []struct {\n\t\tblock *types.Block\n\t\tcommit lite.Commit\n\t\twantErr string\n\t}{\n\t\t{\n\t\t\tblock: nil, wantErr: \"non-nil Block\",\n\t\t},\n\t\t{\n\t\t\tblock: &types.Block{}, wantErr: \"nil Header\",\n\t\t},\n\t\t{\n\t\t\tblock: &types.Block{Header: new(types.Header)},\n\t\t},\n\n\t\t\/\/ Start Header.Height mismatch test\n\t\t{\n\t\t\tblock: &types.Block{Header: &types.Header{Height: 10}},\n\t\t\tcommit: lite.Commit{Header: &types.Header{Height: 11}},\n\t\t\twantErr: \"don't match - 10 vs 11\",\n\t\t},\n\n\t\t{\n\t\t\tblock: &types.Block{Header: &types.Header{Height: 11}},\n\t\t\tcommit: lite.Commit{Header: &types.Header{Height: 11}},\n\t\t},\n\t\t\/\/ End Header.Height mismatch test\n\n\t\t\/\/ Start Header.Hash mismatch test\n\t\t{\n\t\t\tblock: &types.Block{\n\t\t\t\tHeader: &types.Header{\n\t\t\t\t\tHeight: 11,\n\t\t\t\t\tTime: time.Date(2018, 1, 1, 1, 1, 1, 1, time.UTC),\n\t\t\t\t\tValidatorsHash: []byte(\"Tendermint\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tcommit: lite.Commit{Header: &types.Header{Height: 11}},\n\t\t\twantErr: \"Headers don't match\",\n\t\t},\n\n\t\t{\n\t\t\tblock: &types.Block{\n\t\t\t\tHeader: &types.Header{\n\t\t\t\t\tHeight: 11,\n\t\t\t\t\tTime: time.Date(2018, 1, 1, 1, 1, 1, 1, time.UTC),\n\t\t\t\t\tValidatorsHash: []byte(\"Tendermint\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tcommit: lite.Commit{\n\t\t\t\tHeader: &types.Header{\n\t\t\t\t\tHeight: 11,\n\t\t\t\t\tTime: time.Date(2018, 1, 1, 1, 1, 1, 1, time.UTC),\n\t\t\t\t\tValidatorsHash: []byte(\"Tendermint\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\/\/ End Header.Hash mismatch test\n\n\t\t\/\/ Start Header.Data hash mismatch test\n\t\t{\n\t\t\tblock: &types.Block{\n\t\t\t\tHeader: &types.Header{Height: 11},\n\t\t\t\tData: &types.Data{Txs: []types.Tx{[]byte(\"0xDE\"), []byte(\"AD\")}},\n\t\t\t},\n\t\t\tcommit: lite.Commit{\n\t\t\t\tHeader: &types.Header{Height: 11},\n\t\t\t\tCommit: &types.Commit{BlockID: types.BlockID{Hash: []byte(\"0xDEADBEEF\")}},\n\t\t\t},\n\t\t\twantErr: \"Data hash doesn't match header\",\n\t\t},\n\t\t{\n\t\t\tblock: &types.Block{\n\t\t\t\tHeader: &types.Header{Height: 11, DataHash: deadBeefRipEmd160Hash},\n\t\t\t\tData: &types.Data{Txs: deadBeefTxs},\n\t\t\t},\n\t\t\tcommit: lite.Commit{\n\t\t\t\tHeader: &types.Header{Height: 11},\n\t\t\t\tCommit: &types.Commit{BlockID: types.BlockID{Hash: []byte(\"DEADBEEF\")}},\n\t\t\t},\n\t\t},\n\t\t\/\/ End Header.Data hash mismatch test\n\t}\n\n\tfor i, tt := range tests {\n\t\terr := proxy.ValidateBlock(tt.block, tt.commit)\n\t\tif tt.wantErr != \"\" {\n\t\t\tif err == nil {\n\t\t\t\tassert.FailNowf(t, \"Unexpectedly passed\", \"#%d\", i)\n\t\t\t} else {\n\t\t\t\tassert.Contains(t, err.Error(), tt.wantErr, \"#%d should contain the substring\\n\\n\", i)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tassert.Nil(t, err, \"#%d: expecting a nil error\", i)\n\t}\n}\n\nfunc TestValidateBlockMeta(t *testing.T) {\n\ttests := []struct {\n\t\tmeta *types.BlockMeta\n\t\tcommit lite.Commit\n\t\twantErr string\n\t}{\n\t\t{\n\t\t\tmeta: nil, wantErr: \"non-nil BlockMeta\",\n\t\t},\n\t\t{\n\t\t\tmeta: &types.BlockMeta{}, wantErr: \"non-nil Header\",\n\t\t},\n\t\t{\n\t\t\tmeta: &types.BlockMeta{Header: new(types.Header)},\n\t\t},\n\n\t\t\/\/ Start Header.Height mismatch test\n\t\t{\n\t\t\tmeta: &types.BlockMeta{Header: &types.Header{Height: 10}},\n\t\t\tcommit: lite.Commit{Header: &types.Header{Height: 11}},\n\t\t\twantErr: \"don't match - 10 vs 11\",\n\t\t},\n\n\t\t{\n\t\t\tmeta: &types.BlockMeta{Header: &types.Header{Height: 11}},\n\t\t\tcommit: lite.Commit{Header: &types.Header{Height: 11}},\n\t\t},\n\t\t\/\/ End Header.Height mismatch test\n\n\t\t\/\/ Start Headers don't match test\n\t\t{\n\t\t\tmeta: &types.BlockMeta{\n\t\t\t\tHeader: &types.Header{\n\t\t\t\t\tHeight: 11,\n\t\t\t\t\tTime: time.Date(2018, 1, 1, 1, 1, 1, 1, time.UTC),\n\t\t\t\t\tValidatorsHash: []byte(\"Tendermint\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tcommit: lite.Commit{Header: &types.Header{Height: 11}},\n\t\t\twantErr: \"Headers don't match\",\n\t\t},\n\n\t\t{\n\t\t\tmeta: &types.BlockMeta{\n\t\t\t\tHeader: &types.Header{\n\t\t\t\t\tHeight: 11,\n\t\t\t\t\tTime: time.Date(2018, 1, 1, 1, 1, 1, 1, time.UTC),\n\t\t\t\t\tValidatorsHash: []byte(\"Tendermint\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tcommit: lite.Commit{\n\t\t\t\tHeader: &types.Header{\n\t\t\t\t\tHeight: 11,\n\t\t\t\t\tTime: time.Date(2018, 1, 1, 1, 1, 1, 1, time.UTC),\n\t\t\t\t\tValidatorsHash: []byte(\"Tendermint\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tmeta: &types.BlockMeta{\n\t\t\t\tHeader: &types.Header{\n\t\t\t\t\tHeight: 11,\n\t\t\t\t\t\/\/ TODO: (@odeke-em) inquire why ValidatorsHash has to be non-blank\n\t\t\t\t\t\/\/ for the Header to be hashed. Perhaps this is a security hole because\n\t\t\t\t\t\/\/ an aggressor could perhaps pass in headers that don't have\n\t\t\t\t\t\/\/ ValidatorsHash set and we won't be able to validate blocks.\n\t\t\t\t\tValidatorsHash: []byte(\"lite-test\"),\n\t\t\t\t\t\/\/ TODO: (@odeke-em) file an issue with Tendermint to get them to update\n\t\t\t\t\t\/\/ to the latest go-wire, then no more need for this value fill to avoid\n\t\t\t\t\t\/\/ the time zero value of less than 1970.\n\t\t\t\t\tTime: time.Date(2018, 1, 1, 1, 1, 1, 1, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\tcommit: lite.Commit{\n\t\t\t\tHeader: &types.Header{Height: 11, DataHash: deadBeefRipEmd160Hash},\n\t\t\t},\n\t\t\twantErr: \"Headers don't match\",\n\t\t},\n\n\t\t{\n\t\t\tmeta: &types.BlockMeta{\n\t\t\t\tHeader: &types.Header{\n\t\t\t\t\tHeight: 11, DataHash: deadBeefRipEmd160Hash,\n\t\t\t\t\tValidatorsHash: []byte(\"Tendermint\"),\n\t\t\t\t\tTime: time.Date(2017, 1, 2, 1, 1, 1, 1, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\tcommit: lite.Commit{\n\t\t\t\tHeader: &types.Header{\n\t\t\t\t\tHeight: 11, DataHash: deadBeefRipEmd160Hash,\n\t\t\t\t\tValidatorsHash: []byte(\"Tendermint\"),\n\t\t\t\t\tTime: time.Date(2017, 1, 2, 2, 1, 1, 1, time.UTC),\n\t\t\t\t},\n\t\t\t\tCommit: &types.Commit{BlockID: types.BlockID{Hash: []byte(\"DEADBEEF\")}},\n\t\t\t},\n\t\t\twantErr: \"Headers don't match\",\n\t\t},\n\n\t\t{\n\t\t\tmeta: &types.BlockMeta{\n\t\t\t\tHeader: &types.Header{\n\t\t\t\t\tHeight: 11, DataHash: deadBeefRipEmd160Hash,\n\t\t\t\t\tValidatorsHash: []byte(\"Tendermint\"),\n\t\t\t\t\tTime: time.Date(2017, 1, 2, 1, 1, 1, 1, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\tcommit: lite.Commit{\n\t\t\t\tHeader: &types.Header{\n\t\t\t\t\tHeight: 11, DataHash: deadBeefRipEmd160Hash,\n\t\t\t\t\tValidatorsHash: []byte(\"Tendermint-x\"),\n\t\t\t\t\tTime: time.Date(2017, 1, 2, 1, 1, 1, 1, time.UTC),\n\t\t\t\t},\n\t\t\t\tCommit: &types.Commit{BlockID: types.BlockID{Hash: []byte(\"DEADBEEF\")}},\n\t\t\t},\n\t\t\twantErr: \"Headers don't match\",\n\t\t},\n\t\t\/\/ End Headers don't match test\n\t}\n\n\tfor i, tt := range tests {\n\t\terr := proxy.ValidateBlockMeta(tt.meta, tt.commit)\n\t\tif tt.wantErr != \"\" {\n\t\t\tif err == nil {\n\t\t\t\tassert.FailNowf(t, \"Unexpectedly passed\", \"#%d: wanted error %q\", i, tt.wantErr)\n\t\t\t} else {\n\t\t\t\tassert.Contains(t, err.Error(), tt.wantErr, \"#%d should contain the substring\\n\\n\", i)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tassert.Nil(t, err, \"#%d: expecting a nil error\", i)\n\t}\n}\n<commit_msg>lite\/proxy: consolidate some common test headers into a variable<commit_after>package proxy_test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\n\t\"github.com\/tendermint\/tendermint\/lite\"\n\t\"github.com\/tendermint\/tendermint\/lite\/proxy\"\n\t\"github.com\/tendermint\/tendermint\/types\"\n)\n\nvar (\n\tdeadBeefTxs = types.Txs{[]byte(\"DE\"), []byte(\"AD\"), []byte(\"BE\"), []byte(\"EF\")}\n\n\tdeadBeefRipEmd160Hash = deadBeefTxs.Hash()\n)\n\nvar hdrHeight11Tendermint = &types.Header{\n\tHeight: 11,\n\tTime: time.Date(2018, 1, 1, 1, 1, 1, 1, time.UTC),\n\tValidatorsHash: []byte(\"Tendermint\"),\n}\n\nfunc TestValidateBlock(t *testing.T) {\n\ttests := []struct {\n\t\tblock *types.Block\n\t\tcommit lite.Commit\n\t\twantErr string\n\t}{\n\t\t{\n\t\t\tblock: nil, wantErr: \"non-nil Block\",\n\t\t},\n\t\t{\n\t\t\tblock: &types.Block{}, wantErr: \"nil Header\",\n\t\t},\n\t\t{\n\t\t\tblock: &types.Block{Header: new(types.Header)},\n\t\t},\n\n\t\t\/\/ Start Header.Height mismatch test\n\t\t{\n\t\t\tblock: &types.Block{Header: &types.Header{Height: 10}},\n\t\t\tcommit: lite.Commit{Header: &types.Header{Height: 11}},\n\t\t\twantErr: \"don't match - 10 vs 11\",\n\t\t},\n\n\t\t{\n\t\t\tblock: &types.Block{Header: &types.Header{Height: 11}},\n\t\t\tcommit: lite.Commit{Header: &types.Header{Height: 11}},\n\t\t},\n\t\t\/\/ End Header.Height mismatch test\n\n\t\t\/\/ Start Header.Hash mismatch test\n\t\t{\n\t\t\tblock: &types.Block{Header: hdrHeight11Tendermint},\n\t\t\tcommit: lite.Commit{Header: &types.Header{Height: 11}},\n\t\t\twantErr: \"Headers don't match\",\n\t\t},\n\n\t\t{\n\t\t\tblock: &types.Block{Header: hdrHeight11Tendermint},\n\t\t\tcommit: lite.Commit{Header: hdrHeight11Tendermint},\n\t\t},\n\t\t\/\/ End Header.Hash mismatch test\n\n\t\t\/\/ Start Header.Data hash mismatch test\n\t\t{\n\t\t\tblock: &types.Block{\n\t\t\t\tHeader: &types.Header{Height: 11},\n\t\t\t\tData: &types.Data{Txs: []types.Tx{[]byte(\"0xDE\"), []byte(\"AD\")}},\n\t\t\t},\n\t\t\tcommit: lite.Commit{\n\t\t\t\tHeader: &types.Header{Height: 11},\n\t\t\t\tCommit: &types.Commit{BlockID: types.BlockID{Hash: []byte(\"0xDEADBEEF\")}},\n\t\t\t},\n\t\t\twantErr: \"Data hash doesn't match header\",\n\t\t},\n\t\t{\n\t\t\tblock: &types.Block{\n\t\t\t\tHeader: &types.Header{Height: 11, DataHash: deadBeefRipEmd160Hash},\n\t\t\t\tData: &types.Data{Txs: deadBeefTxs},\n\t\t\t},\n\t\t\tcommit: lite.Commit{\n\t\t\t\tHeader: &types.Header{Height: 11},\n\t\t\t\tCommit: &types.Commit{BlockID: types.BlockID{Hash: []byte(\"DEADBEEF\")}},\n\t\t\t},\n\t\t},\n\t\t\/\/ End Header.Data hash mismatch test\n\t}\n\n\tfor i, tt := range tests {\n\t\terr := proxy.ValidateBlock(tt.block, tt.commit)\n\t\tif tt.wantErr != \"\" {\n\t\t\tif err == nil {\n\t\t\t\tassert.FailNowf(t, \"Unexpectedly passed\", \"#%d\", i)\n\t\t\t} else {\n\t\t\t\tassert.Contains(t, err.Error(), tt.wantErr, \"#%d should contain the substring\\n\\n\", i)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tassert.Nil(t, err, \"#%d: expecting a nil error\", i)\n\t}\n}\n\nfunc TestValidateBlockMeta(t *testing.T) {\n\ttests := []struct {\n\t\tmeta *types.BlockMeta\n\t\tcommit lite.Commit\n\t\twantErr string\n\t}{\n\t\t{\n\t\t\tmeta: nil, wantErr: \"non-nil BlockMeta\",\n\t\t},\n\t\t{\n\t\t\tmeta: &types.BlockMeta{}, wantErr: \"non-nil Header\",\n\t\t},\n\t\t{\n\t\t\tmeta: &types.BlockMeta{Header: new(types.Header)},\n\t\t},\n\n\t\t\/\/ Start Header.Height mismatch test\n\t\t{\n\t\t\tmeta: &types.BlockMeta{Header: &types.Header{Height: 10}},\n\t\t\tcommit: lite.Commit{Header: &types.Header{Height: 11}},\n\t\t\twantErr: \"don't match - 10 vs 11\",\n\t\t},\n\n\t\t{\n\t\t\tmeta: &types.BlockMeta{Header: &types.Header{Height: 11}},\n\t\t\tcommit: lite.Commit{Header: &types.Header{Height: 11}},\n\t\t},\n\t\t\/\/ End Header.Height mismatch test\n\n\t\t\/\/ Start Headers don't match test\n\t\t{\n\t\t\tmeta: &types.BlockMeta{Header: hdrHeight11Tendermint},\n\t\t\tcommit: lite.Commit{Header: &types.Header{Height: 11}},\n\t\t\twantErr: \"Headers don't match\",\n\t\t},\n\n\t\t{\n\t\t\tmeta: &types.BlockMeta{Header: hdrHeight11Tendermint},\n\t\t\tcommit: lite.Commit{Header: hdrHeight11Tendermint},\n\t\t},\n\n\t\t{\n\t\t\tmeta: &types.BlockMeta{\n\t\t\t\tHeader: &types.Header{\n\t\t\t\t\tHeight: 11,\n\t\t\t\t\t\/\/ TODO: (@odeke-em) inquire why ValidatorsHash has to be non-blank\n\t\t\t\t\t\/\/ for the Header to be hashed. Perhaps this is a security hole because\n\t\t\t\t\t\/\/ an aggressor could perhaps pass in headers that don't have\n\t\t\t\t\t\/\/ ValidatorsHash set and we won't be able to validate blocks.\n\t\t\t\t\tValidatorsHash: []byte(\"lite-test\"),\n\t\t\t\t\t\/\/ TODO: (@odeke-em) file an issue with Tendermint to get them to update\n\t\t\t\t\t\/\/ to the latest go-wire, then no more need for this value fill to avoid\n\t\t\t\t\t\/\/ the time zero value of less than 1970.\n\t\t\t\t\tTime: time.Date(2018, 1, 1, 1, 1, 1, 1, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\tcommit: lite.Commit{\n\t\t\t\tHeader: &types.Header{Height: 11, DataHash: deadBeefRipEmd160Hash},\n\t\t\t},\n\t\t\twantErr: \"Headers don't match\",\n\t\t},\n\n\t\t{\n\t\t\tmeta: &types.BlockMeta{\n\t\t\t\tHeader: &types.Header{\n\t\t\t\t\tHeight: 11, DataHash: deadBeefRipEmd160Hash,\n\t\t\t\t\tValidatorsHash: []byte(\"Tendermint\"),\n\t\t\t\t\tTime: time.Date(2017, 1, 2, 1, 1, 1, 1, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\tcommit: lite.Commit{\n\t\t\t\tHeader: &types.Header{\n\t\t\t\t\tHeight: 11, DataHash: deadBeefRipEmd160Hash,\n\t\t\t\t\tValidatorsHash: []byte(\"Tendermint\"),\n\t\t\t\t\tTime: time.Date(2018, 1, 2, 1, 1, 1, 1, time.UTC),\n\t\t\t\t},\n\t\t\t\tCommit: &types.Commit{BlockID: types.BlockID{Hash: []byte(\"DEADBEEF\")}},\n\t\t\t},\n\t\t\twantErr: \"Headers don't match\",\n\t\t},\n\n\t\t{\n\t\t\tmeta: &types.BlockMeta{\n\t\t\t\tHeader: &types.Header{\n\t\t\t\t\tHeight: 11, DataHash: deadBeefRipEmd160Hash,\n\t\t\t\t\tValidatorsHash: []byte(\"Tendermint\"),\n\t\t\t\t\tTime: time.Date(2017, 1, 2, 1, 1, 1, 1, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\tcommit: lite.Commit{\n\t\t\t\tHeader: &types.Header{\n\t\t\t\t\tHeight: 11, DataHash: deadBeefRipEmd160Hash,\n\t\t\t\t\tValidatorsHash: []byte(\"Tendermint-x\"),\n\t\t\t\t\tTime: time.Date(2017, 1, 2, 1, 1, 1, 1, time.UTC),\n\t\t\t\t},\n\t\t\t\tCommit: &types.Commit{BlockID: types.BlockID{Hash: []byte(\"DEADBEEF\")}},\n\t\t\t},\n\t\t\twantErr: \"Headers don't match\",\n\t\t},\n\t\t\/\/ End Headers don't match test\n\t}\n\n\tfor i, tt := range tests {\n\t\terr := proxy.ValidateBlockMeta(tt.meta, tt.commit)\n\t\tif tt.wantErr != \"\" {\n\t\t\tif err == nil {\n\t\t\t\tassert.FailNowf(t, \"Unexpectedly passed\", \"#%d: wanted error %q\", i, tt.wantErr)\n\t\t\t} else {\n\t\t\t\tassert.Contains(t, err.Error(), tt.wantErr, \"#%d should contain the substring\\n\\n\", i)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tassert.Nil(t, err, \"#%d: expecting a nil error\", i)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/**\n * Copyright (C) 2011 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/kubernetes-incubator\/service-catalog\/pkg\/apis\/servicecatalog\"\n\t\"github.com\/snowdrop\/service-catalog-java-api\/generator\/pkg\/schemagen\"\n\t\"os\"\n)\n\n\/\/A Schema with the core types of the Service Catalog\ntype Schema struct {\n\tClusterServiceBroker servicecatalog.ClusterServiceBroker\n\tClusterServiceBrokerList servicecatalog.ClusterServiceBrokerList\n\tClusterServiceClass servicecatalog.ClusterServiceClass\n\tClusterServiceClassList servicecatalog.ClusterServiceClassList\n\tClusterServicePlan servicecatalog.ClusterServicePlan\n\tClusterServicePlanList servicecatalog.ClusterServicePlanList\n\tServiceInstance servicecatalog.ServiceInstance\n\tServiceInstanceList servicecatalog.ServiceInstanceList\n\tServiceBinding servicecatalog.ServiceBinding\n\tServiceBindingList servicecatalog.ServiceBindingList\n\tServiceBroker servicecatalog.ServiceBroker\n\tServiceBrokerList servicecatalog.ServiceBrokerList\n}\n\nfunc main() {\n\tpackages := []schemagen.PackageDescriptor{\n\t\t{\"github.com\/kubernetes-incubator\/service-catalog\/pkg\/apis\/servicecatalog\", \"\", \"me.snowdrop.servicecatalog.api.model\", \"servicecatalog_\"},\n\t}\n\n\ttypeMap := map[reflect.Type]reflect.Type{\n\t\treflect.TypeOf(time.Time{}): reflect.TypeOf(\"\"),\n\t\treflect.TypeOf(struct{}{}): reflect.TypeOf(\"\"),\n\t}\n\tschema, err := schemagen.GenerateSchema(reflect.TypeOf(Schema{}), packages, typeMap)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\targs := os.Args[1:]\n\tif len(args) < 1 || args[0] != \"validation\" {\n\t\tschema.Resources = nil\n\t}\n\n\tb, err := json.Marshal(&schema)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tresult := string(b)\n\tresult = strings.Replace(result, \"\\\"additionalProperty\\\":\", \"\\\"additionalProperties\\\":\", -1)\n\tvar out bytes.Buffer\n\terr = json.Indent(&out, []byte(result), \"\", \" \")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Println(out.String())\n}\n<commit_msg>chore: package reoder.<commit_after>\/**\n * Copyright (C) 2011 Red Hat, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/kubernetes-incubator\/service-catalog\/pkg\/apis\/servicecatalog\"\n\t\"github.com\/snowdrop\/service-catalog-java-api\/generator\/pkg\/schemagen\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/A Schema with the core types of the Service Catalog\ntype Schema struct {\n\tClusterServiceBroker servicecatalog.ClusterServiceBroker\n\tClusterServiceBrokerList servicecatalog.ClusterServiceBrokerList\n\tClusterServiceClass servicecatalog.ClusterServiceClass\n\tClusterServiceClassList servicecatalog.ClusterServiceClassList\n\tClusterServicePlan servicecatalog.ClusterServicePlan\n\tClusterServicePlanList servicecatalog.ClusterServicePlanList\n\tServiceInstance servicecatalog.ServiceInstance\n\tServiceInstanceList servicecatalog.ServiceInstanceList\n\tServiceBinding servicecatalog.ServiceBinding\n\tServiceBindingList servicecatalog.ServiceBindingList\n\tServiceBroker servicecatalog.ServiceBroker\n\tServiceBrokerList servicecatalog.ServiceBrokerList\n}\n\nfunc main() {\n\tpackages := []schemagen.PackageDescriptor{\n\t\t{\"github.com\/kubernetes-incubator\/service-catalog\/pkg\/apis\/servicecatalog\", \"\", \"me.snowdrop.servicecatalog.api.model\", \"servicecatalog_\"},\n\t}\n\n\ttypeMap := map[reflect.Type]reflect.Type{\n\t\treflect.TypeOf(time.Time{}): reflect.TypeOf(\"\"),\n\t\treflect.TypeOf(struct{}{}): reflect.TypeOf(\"\"),\n\t}\n\tschema, err := schemagen.GenerateSchema(reflect.TypeOf(Schema{}), packages, typeMap)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\targs := os.Args[1:]\n\tif len(args) < 1 || args[0] != \"validation\" {\n\t\tschema.Resources = nil\n\t}\n\n\tb, err := json.Marshal(&schema)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tresult := string(b)\n\tresult = strings.Replace(result, \"\\\"additionalProperty\\\":\", \"\\\"additionalProperties\\\":\", -1)\n\tvar out bytes.Buffer\n\terr = json.Indent(&out, []byte(result), \"\", \" \")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Println(out.String())\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"testing\"\n)\n\n\/\/ See issue docker\/docker#8141\nfunc TestPullImageWithAliases(t *testing.T) {\n\tdefer setupRegistry(t)()\n\n\trepoName := fmt.Sprintf(\"%v\/dockercli\/busybox\", privateRegistryURL)\n\tdefer deleteImages(repoName)\n\n\trepos := []string{}\n\tfor _, tag := range []string{\"recent\", \"fresh\"} {\n\t\trepos = append(repos, fmt.Sprintf(\"%v:%v\", repoName, tag))\n\t}\n\n\t\/\/ Tag and push the same image multiple times.\n\tfor _, repo := range repos {\n\t\tif out, _, err := runCommandWithOutput(exec.Command(dockerBinary, \"tag\", \"busybox\", repo)); err != nil {\n\t\t\tt.Fatalf(\"Failed to tag image %v: error %v, output %q\", repos, err, out)\n\t\t}\n\t\tif out, err := exec.Command(dockerBinary, \"push\", repo).CombinedOutput(); err != nil {\n\t\t\tt.Fatalf(\"Failed to push image %v: error %v, output %q\", err, string(out))\n\t\t}\n\t}\n\n\t\/\/ Clear local images store.\n\targs := append([]string{\"rmi\"}, repos...)\n\tif out, err := exec.Command(dockerBinary, args...).CombinedOutput(); err != nil {\n\t\tt.Fatalf(\"Failed to clean images: error %v, output %q\", err, string(out))\n\t}\n\n\t\/\/ Pull a single tag and verify it doesn't bring down all aliases.\n\tpullCmd := exec.Command(dockerBinary, \"pull\", repos[0])\n\tif out, _, err := runCommandWithOutput(pullCmd); err != nil {\n\t\tt.Fatalf(\"Failed to pull %v: error %v, output %q\", repoName, err, out)\n\t}\n\tdefer deleteImages(repos[0])\n\tif err := exec.Command(dockerBinary, \"inspect\", repos[0]).Run(); err != nil {\n\t\tt.Fatalf(\"Image %v was not pulled down\", repos[0])\n\t}\n\tfor _, repo := range repos[1:] {\n\t\tif err := exec.Command(dockerBinary, \"inspect\", repo).Run(); err == nil {\n\t\t\tt.Fatalf(\"Image %v shouldn't have been pulled down\", repo)\n\t\t}\n\t}\n\n\tlogDone(\"pull - image with aliases\")\n}\n\n\/\/ pulling an image from the central registry should work\nfunc TestPullImageFromCentralRegistry(t *testing.T) {\n\tpullCmd := exec.Command(dockerBinary, \"pull\", \"hello-world\")\n\tif out, _, err := runCommandWithOutput(pullCmd); err != nil {\n\t\tt.Fatalf(\"pulling the hello-world image from the registry has failed: %s, %v\", out, err)\n\t}\n\tlogDone(\"pull - pull hello-world\")\n}\n\n\/\/ pulling a non-existing image from the central registry should return a non-zero exit code\nfunc TestPullNonExistingImage(t *testing.T) {\n\tpullCmd := exec.Command(dockerBinary, \"pull\", \"fooblahblah1234\")\n\tif out, _, err := runCommandWithOutput(pullCmd); err == nil {\n\t\tt.Fatalf(\"expected non-zero exit status when pulling non-existing image: %s\", out)\n\t}\n\tlogDone(\"pull - pull fooblahblah1234 (non-existing image)\")\n}\n\n\/\/ pulling an image from the central registry using official names should work\n\/\/ ensure all pulls result in the same image\nfunc TestPullImageOfficialNames(t *testing.T) {\n\tnames := []string{\n\t\t\"docker.io\/hello-world\",\n\t\t\"index.docker.io\/hello-world\",\n\t\t\"library\/hello-world\",\n\t\t\"docker.io\/library\/hello-world\",\n\t\t\"index.docker.io\/library\/hello-world\",\n\t}\n\tfor _, name := range names {\n\t\tpullCmd := exec.Command(dockerBinary, \"pull\", name)\n\t\tout, exitCode, err := runCommandWithOutput(pullCmd)\n\t\tif err != nil || exitCode != 0 {\n\t\t\tt.Errorf(\"pulling the '%s' image from the registry has failed: %s\", name, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ ensure we don't have multiple image names.\n\t\timagesCmd := exec.Command(dockerBinary, \"images\")\n\t\tout, _, err = runCommandWithOutput(imagesCmd)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"listing images failed with errors: %v\", err)\n\t\t} else if strings.Contains(out, name) {\n\t\t\tt.Errorf(\"images should not have listed '%s'\", name)\n\t\t}\n\t}\n\tlogDone(\"pull - pull official names\")\n}\n<commit_msg>Add test for pull verified<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"testing\"\n)\n\n\/\/ See issue docker\/docker#8141\nfunc TestPullImageWithAliases(t *testing.T) {\n\tdefer setupRegistry(t)()\n\n\trepoName := fmt.Sprintf(\"%v\/dockercli\/busybox\", privateRegistryURL)\n\tdefer deleteImages(repoName)\n\n\trepos := []string{}\n\tfor _, tag := range []string{\"recent\", \"fresh\"} {\n\t\trepos = append(repos, fmt.Sprintf(\"%v:%v\", repoName, tag))\n\t}\n\n\t\/\/ Tag and push the same image multiple times.\n\tfor _, repo := range repos {\n\t\tif out, _, err := runCommandWithOutput(exec.Command(dockerBinary, \"tag\", \"busybox\", repo)); err != nil {\n\t\t\tt.Fatalf(\"Failed to tag image %v: error %v, output %q\", repos, err, out)\n\t\t}\n\t\tif out, err := exec.Command(dockerBinary, \"push\", repo).CombinedOutput(); err != nil {\n\t\t\tt.Fatalf(\"Failed to push image %v: error %v, output %q\", err, string(out))\n\t\t}\n\t}\n\n\t\/\/ Clear local images store.\n\targs := append([]string{\"rmi\"}, repos...)\n\tif out, err := exec.Command(dockerBinary, args...).CombinedOutput(); err != nil {\n\t\tt.Fatalf(\"Failed to clean images: error %v, output %q\", err, string(out))\n\t}\n\n\t\/\/ Pull a single tag and verify it doesn't bring down all aliases.\n\tpullCmd := exec.Command(dockerBinary, \"pull\", repos[0])\n\tif out, _, err := runCommandWithOutput(pullCmd); err != nil {\n\t\tt.Fatalf(\"Failed to pull %v: error %v, output %q\", repoName, err, out)\n\t}\n\tdefer deleteImages(repos[0])\n\tif err := exec.Command(dockerBinary, \"inspect\", repos[0]).Run(); err != nil {\n\t\tt.Fatalf(\"Image %v was not pulled down\", repos[0])\n\t}\n\tfor _, repo := range repos[1:] {\n\t\tif err := exec.Command(dockerBinary, \"inspect\", repo).Run(); err == nil {\n\t\t\tt.Fatalf(\"Image %v shouldn't have been pulled down\", repo)\n\t\t}\n\t}\n\n\tlogDone(\"pull - image with aliases\")\n}\n\n\/\/ pulling busybox should show verified message\nfunc TestPullVerified(t *testing.T) {\n\tdefer setupRegistry(t)()\n\n\trepo := fmt.Sprintf(\"%v\/dockercli\/busybox:verified\", privateRegistryURL)\n\tdefer deleteImages(repo)\n\n\t\/\/ tag the image\n\tif out, _, err := runCommandWithOutput(exec.Command(dockerBinary, \"tag\", \"busybox\", repo)); err != nil {\n\t\tt.Fatalf(\"Failed to tag image verifiedTest: error %v, output %q\", err, out)\n\t}\n\n\t\/\/ push it\n\tif out, err := exec.Command(dockerBinary, \"push\", repo).CombinedOutput(); err != nil {\n\t\tt.Fatalf(\"Failed to push image %v: error %v, output %q\", err, string(out))\n\t}\n\n\t\/\/ remove it locally\n\tif out, err := exec.Command(dockerBinary, \"rmi\", repo).CombinedOutput(); err != nil {\n\t\tt.Fatalf(\"Failed to clean images: error %v, output %q\", err, string(out))\n\t}\n\n\t\/\/ pull it\n\texpected := \"The image you are pulling has been verified\"\n\tpullCmd := exec.Command(dockerBinary, \"pull\", repo)\n\tif out, _, err := runCommandWithOutput(pullCmd); err != nil || !strings.Contains(out, expected) {\n\t\tt.Fatalf(\"pulling a verified image failed. expected: %s\\ngot: %s, %v\", expected, out, err)\n\t}\n\n\t\/\/ pull it again\n\tpullCmd = exec.Command(dockerBinary, \"pull\", repo)\n\tif out, _, err := runCommandWithOutput(pullCmd); err != nil || !strings.Contains(out, expected) {\n\t\tt.Fatalf(\"pulling a verified image failed. expected: %s\\ngot: %s, %v\", expected, out, err)\n\t}\n\n\tlogDone(\"pull - pull verified\")\n}\n\n\/\/ pulling an image from the central registry should work\nfunc TestPullImageFromCentralRegistry(t *testing.T) {\n\tpullCmd := exec.Command(dockerBinary, \"pull\", \"hello-world\")\n\tif out, _, err := runCommandWithOutput(pullCmd); err != nil {\n\t\tt.Fatalf(\"pulling the hello-world image from the registry has failed: %s, %v\", out, err)\n\t}\n\tlogDone(\"pull - pull hello-world\")\n}\n\n\/\/ pulling a non-existing image from the central registry should return a non-zero exit code\nfunc TestPullNonExistingImage(t *testing.T) {\n\tpullCmd := exec.Command(dockerBinary, \"pull\", \"fooblahblah1234\")\n\tif out, _, err := runCommandWithOutput(pullCmd); err == nil {\n\t\tt.Fatalf(\"expected non-zero exit status when pulling non-existing image: %s\", out)\n\t}\n\tlogDone(\"pull - pull fooblahblah1234 (non-existing image)\")\n}\n\n\/\/ pulling an image from the central registry using official names should work\n\/\/ ensure all pulls result in the same image\nfunc TestPullImageOfficialNames(t *testing.T) {\n\tnames := []string{\n\t\t\"docker.io\/hello-world\",\n\t\t\"index.docker.io\/hello-world\",\n\t\t\"library\/hello-world\",\n\t\t\"docker.io\/library\/hello-world\",\n\t\t\"index.docker.io\/library\/hello-world\",\n\t}\n\tfor _, name := range names {\n\t\tpullCmd := exec.Command(dockerBinary, \"pull\", name)\n\t\tout, exitCode, err := runCommandWithOutput(pullCmd)\n\t\tif err != nil || exitCode != 0 {\n\t\t\tt.Errorf(\"pulling the '%s' image from the registry has failed: %s\", name, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ ensure we don't have multiple image names.\n\t\timagesCmd := exec.Command(dockerBinary, \"images\")\n\t\tout, _, err = runCommandWithOutput(imagesCmd)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"listing images failed with errors: %v\", err)\n\t\t} else if strings.Contains(out, name) {\n\t\t\tt.Errorf(\"images should not have listed '%s'\", name)\n\t\t}\n\t}\n\tlogDone(\"pull - pull official names\")\n}\n<|endoftext|>"} {"text":"<commit_before>package bark\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\n\t\"github.com\/NYTimes\/gizmo\/auth\"\n\t\"github.com\/NYTimes\/gizmo\/auth\/gcp\"\n\t\"github.com\/NYTimes\/gizmo\/server\/kit\"\n\t\"github.com\/go-kit\/kit\/endpoint\"\n\thttptransport \"github.com\/go-kit\/kit\/transport\/http\"\n\t\"github.com\/kelseyhightower\/envconfig\"\n\t\"github.com\/pkg\/errors\"\n\t\"google.golang.org\/grpc\"\n)\n\ntype econfig struct {\n\tSlackKeys []string `envconfig:\"SLACK_KEYS\"`\n\n\tTwitterTokens []string `envconfig:\"TWITTER_TOKENS\"`\n\tTwitterSecrets []string `envconfig:\"TWITTER_SECRETS\"`\n\n\tAuth gcp.IdentityConfig `envconfig:\"AUTH\"`\n}\n\ntype service struct {\n\talertsOut []AlertBarker\n\n\teventsOut []EventBarker\n\n\tverifier *auth.Verifier\n}\n\nfunc NewService() (kit.Service, error) {\n\tctx := context.Background()\n\n\tvar cfg econfig\n\tenvconfig.MustProcess(\"\", &cfg)\n\n\tv, err := gcp.NewDefaultIdentityVerifier(ctx, cfg.Auth)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to init ID verifier\")\n\t}\n\n\tvar (\n\t\talerts []AlertBarker\n\t\tevents []EventBarker\n\t)\n\n\tfor _, key := range cfg.SlackKeys {\n\t\talerts = append(alerts, NewSlackAlertBarker(\n\t\t\tSlackConfig{Key: key, BotName: \"Newshound Alerts\"}))\n\t\tevents = append(events, NewSlackEventBarker(\n\t\t\tSlackConfig{Key: key, BotName: \"Newshound Alerts\"}))\n\t}\n\n\tif len(cfg.TwitterSecrets) != len(cfg.TwitterTokens) {\n\t\treturn nil, errors.Wrap(err, \"invalid twitter config. token counts mismatch\")\n\t}\n\n\tfor i, token := range cfg.TwitterTokens {\n\t\tsecret := cfg.TwitterSecrets[i]\n\t\talerts = append(alerts, NewTwitterAlertBarker(token, secret))\n\t\tevents = append(events, NewTwitterEventBarker(token, secret))\n\t}\n\n\treturn &service{\n\t\tverifier: v,\n\t\talertsOut: alerts,\n\t\teventsOut: events,\n\t}, nil\n}\n\nfunc (s *service) Middleware(e endpoint.Endpoint) endpoint.Endpoint {\n\treturn e\n}\n\nfunc (s *service) HTTPMiddleware(h http.Handler) http.Handler {\n\tif s.verifier == nil {\n\t\treturn h\n\t}\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tok, err := s.verifier.VerifyRequest(r)\n\t\tif err != nil || !ok {\n\t\t\tcode := http.StatusForbidden\n\t\t\thttp.Error(w, http.StatusText(code), code)\n\t\t\treturn\n\t\t}\n\n\t\th.ServeHTTP(w, r)\n\t})\n}\n\nfunc (s *service) HTTPOptions() []httptransport.ServerOption {\n\treturn nil\n}\n\nfunc (s *service) HTTPRouterOptions() []kit.RouterOption {\n\treturn nil\n}\n\nfunc (s *service) HTTPEndpoints() map[string]map[string]kit.HTTPEndpoint {\n\treturn map[string]map[string]kit.HTTPEndpoint{\n\t\t\"\/svc\/newshound\/v1\/bark\/alert\": {\n\t\t\t\"POST\": {\n\t\t\t\tDecoder: decodeAlert,\n\t\t\t\tEndpoint: s.postAlert,\n\t\t\t},\n\t\t},\n\t\t\"\/svc\/newshound\/v1\/bark\/event\": {\n\t\t\t\"POST\": {\n\t\t\t\tDecoder: decodeEvent,\n\t\t\t\tEndpoint: s.postEvent,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc (s *service) RPCMiddleware() grpc.UnaryServerInterceptor {\n\treturn nil\n}\n\nfunc (s *service) RPCServiceDesc() *grpc.ServiceDesc {\n\treturn nil\n}\n\nfunc (s *service) RPCOptions() []grpc.ServerOption {\n\treturn nil\n}\n\ntype psmessage struct {\n\tMessage psdata `json:\"message\"`\n}\ntype psdata struct {\n\tData []byte `json:\"data\"`\n}\n<commit_msg>debugging barkd<commit_after>package bark\n\nimport (\n\t\"context\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/NYTimes\/gizmo\/auth\"\n\t\"github.com\/NYTimes\/gizmo\/auth\/gcp\"\n\t\"github.com\/NYTimes\/gizmo\/server\/kit\"\n\t\"github.com\/go-kit\/kit\/endpoint\"\n\thttptransport \"github.com\/go-kit\/kit\/transport\/http\"\n\t\"github.com\/kelseyhightower\/envconfig\"\n\t\"github.com\/pkg\/errors\"\n\t\"google.golang.org\/grpc\"\n)\n\ntype econfig struct {\n\tSlackKeys []string `envconfig:\"SLACK_KEYS\"`\n\n\tTwitterTokens []string `envconfig:\"TWITTER_TOKENS\"`\n\tTwitterSecrets []string `envconfig:\"TWITTER_SECRETS\"`\n\n\tAuth gcp.IdentityConfig `envconfig:\"AUTH\"`\n}\n\ntype service struct {\n\talertsOut []AlertBarker\n\n\teventsOut []EventBarker\n\n\tverifier *auth.Verifier\n}\n\nfunc NewService() (kit.Service, error) {\n\tctx := context.Background()\n\n\tvar cfg econfig\n\tenvconfig.MustProcess(\"\", &cfg)\n\n\tv, err := gcp.NewDefaultIdentityVerifier(ctx, cfg.Auth)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"unable to init ID verifier\")\n\t}\n\n\tvar (\n\t\talerts []AlertBarker\n\t\tevents []EventBarker\n\t\tslackers, twitters int\n\t)\n\n\tfor _, key := range cfg.SlackKeys {\n\t\talerts = append(alerts, NewSlackAlertBarker(\n\t\t\tSlackConfig{Key: key, BotName: \"Newshound Alerts\"}))\n\t\tevents = append(events, NewSlackEventBarker(\n\t\t\tSlackConfig{Key: key, BotName: \"Newshound Alerts\"}))\n\t\tslackers++\n\t}\n\n\tif len(cfg.TwitterSecrets) != len(cfg.TwitterTokens) {\n\t\treturn nil, errors.Wrap(err, \"invalid twitter config. token counts mismatch\")\n\t}\n\n\tfor i, token := range cfg.TwitterTokens {\n\t\tsecret := cfg.TwitterSecrets[i]\n\t\talerts = append(alerts, NewTwitterAlertBarker(token, secret))\n\t\tevents = append(events, NewTwitterEventBarker(token, secret))\n\t\ttwitters++\n\t}\n\n\tlog.Printf(\"starting with %d slack accounts and %d twitter accounts\",\n\t\tslackers, twitters)\n\n\treturn &service{\n\t\tverifier: v,\n\t\talertsOut: alerts,\n\t\teventsOut: events,\n\t}, nil\n}\n\nfunc (s *service) Middleware(e endpoint.Endpoint) endpoint.Endpoint {\n\treturn e\n}\n\nfunc (s *service) HTTPMiddleware(h http.Handler) http.Handler {\n\tif s.verifier == nil {\n\t\treturn h\n\t}\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tok, err := s.verifier.VerifyRequest(r)\n\t\tif err != nil || !ok {\n\t\t\tcode := http.StatusForbidden\n\t\t\thttp.Error(w, http.StatusText(code), code)\n\t\t\treturn\n\t\t}\n\n\t\th.ServeHTTP(w, r)\n\t})\n}\n\nfunc (s *service) HTTPOptions() []httptransport.ServerOption {\n\treturn nil\n}\n\nfunc (s *service) HTTPRouterOptions() []kit.RouterOption {\n\treturn nil\n}\n\nfunc (s *service) HTTPEndpoints() map[string]map[string]kit.HTTPEndpoint {\n\treturn map[string]map[string]kit.HTTPEndpoint{\n\t\t\"\/svc\/newshound\/v1\/bark\/alert\": {\n\t\t\t\"POST\": {\n\t\t\t\tDecoder: decodeAlert,\n\t\t\t\tEndpoint: s.postAlert,\n\t\t\t},\n\t\t},\n\t\t\"\/svc\/newshound\/v1\/bark\/event\": {\n\t\t\t\"POST\": {\n\t\t\t\tDecoder: decodeEvent,\n\t\t\t\tEndpoint: s.postEvent,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc (s *service) RPCMiddleware() grpc.UnaryServerInterceptor {\n\treturn nil\n}\n\nfunc (s *service) RPCServiceDesc() *grpc.ServiceDesc {\n\treturn nil\n}\n\nfunc (s *service) RPCOptions() []grpc.ServerOption {\n\treturn nil\n}\n\ntype psmessage struct {\n\tMessage psdata `json:\"message\"`\n}\ntype psdata struct {\n\tData []byte `json:\"data\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package benchmarks\n\nimport (\n\t\"runtime\"\n\t\"time\"\n)\n\ntype benchmark struct {\n\trepeats int\n\truns int\n\tsignal chan interface{}\n\tf func(int)\n\tresult float64\n}\n\nfunc (b *benchmark) launch() {\n\tdefer func() {\n\t\tb.signal <- b\n\t}()\n\n\tfirst := true\n\tvar minDuration time.Duration\n\tfor i := 0; i < b.runs; i++ {\n\t\truntime.GC()\n\t\tstart := time.Now()\n\t\tb.f(b.repeats)\n\t\tduration := time.Now().Sub(start)\n\t\tif first {\n\t\t\tminDuration = duration\n\t\t} else {\n\t\t\tif minDuration > duration {\n\t\t\t\tminDuration = duration\n\t\t\t}\n\t\t}\n\t}\n\tb.result = minDuration.Seconds()\n}\n\nfunc runBenchmark(runs, repeats int, f func(int)) float64 {\n\tb := benchmark{\n\t\trepeats: repeats,\n\t\truns: runs,\n\t\tsignal: make(chan interface{}),\n\t\tf: f,\n\t}\n\n\tgo b.launch()\n\t<-b.signal\n\treturn b.result\n}\n<commit_msg>fix warnings<commit_after>package benchmarks\n\nimport (\n\t\"runtime\"\n\t\"time\"\n)\n\ntype benchmark struct {\n\tsignal chan interface{}\n\tf func(int)\n\tresult float64\n}\n\nfunc (b *benchmark) launch(runs, repeats int) {\n\tdefer func() {\n\t\tb.signal <- b\n\t}()\n\n\tfirst := true\n\tvar minDuration time.Duration\n\tfor i := 0; i < runs; i++ {\n\t\truntime.GC()\n\t\tstart := time.Now()\n\t\tb.f(repeats)\n\t\tduration := time.Now().Sub(start)\n\t\tif first {\n\t\t\tminDuration = duration\n\t\t} else {\n\t\t\tif minDuration > duration {\n\t\t\t\tminDuration = duration\n\t\t\t}\n\t\t}\n\t}\n\tb.result = minDuration.Seconds()\n}\n\nfunc runBenchmark(runs, repeats int, f func(int)) float64 {\n\tb := benchmark{\n\t\tsignal: make(chan interface{}),\n\t\tf: f,\n\t}\n\n\tgo b.launch(runs, repeats)\n\t<-b.signal\n\treturn b.result\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport \"fmt\"\n\ntype Number interface {\n\tint64 | float64\n}\n\/\/ SumNumbers sums the values of map m. Its supports both integers\n\/\/ and floats as map values.\nfunc SumNumbers[K comparable, V Number](m map[K]V) V {\n\tvar s V\n\tfor _, v := range m {\n\t\ts += v\n\t}\n\treturn s\n}\n\/\/ SumInts adds together the values of m.\nfunc SumInts(m map[string]int64) int64 {\n\tvar s int64\n\tfor _, v := range m {\n\t\ts += v\n\t}\n\treturn s\n}\n\n\/\/ SumFloats adds together the values of m.\nfunc SumFloats(m map[string]float64) float64 {\n\tvar s float64\n\tfor _, v := range m {\n\t\ts += v\n\t}\n\treturn s\n}\n\nfunc main() {\n\t\/\/ Initialize a map for the integer values\n\tints := map[string]int64{\n\t\t\"first\": 34,\n\t\t\"second\": 12,\n\t}\n\n\t\/\/ Initialize a map for the float values\n\tfloats := map[string]float64{\n\t\t\"first\": 35.98,\n\t\t\"second\": 26.99,\n\t}\n\n\tfmt.Printf(\"Non-Generic Sums: %v and %v\\n\",\n\t\tSumInts(ints),\n\t\tSumFloats(floats))\n\n\tfmt.Printf(\"Generic Sums: %v and %v\\n\",\n\t\tSumIntsOrFloats[string, int64](ints),\n\t\tSumIntsOrFloats[string, float64](floats))\n\n\tfmt.Printf(\"Generic Sums, type parameters inferred: %v and %v\\n\",\n\t\tSumIntsOrFloats(ints),\n\t\tSumIntsOrFloats(floats))\n\n\tfmt.Printf(\"Generic Sums with Constraint: %v and %v\\n\",\n\t\tSumNumbers(ints),\n\t\tSumNumbers(floats))\n\n}\n\n\/\/ SumIntsOrFloats sums the values of map m. It supports both int64 and float64\n\/\/ as types for map values.\nfunc SumIntsOrFloats[K comparable, V int64 | float64](m map[K]V) V {\n\tvar s V\n\tfor _, v := range m {\n\t\ts += v\n\t}\n\treturn s\n}\n<commit_msg>Modified Generics example<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"math\/rand\"\n)\n\ntype Number interface {\n\t~int | ~int32 | ~int64 | ~float64\n}\n\/\/ SumNumbers sums the values of map m. Its supports both integers\n\/\/ and floats as map values.\nfunc SumNumbers[K comparable, V Number](m map[K]V) V {\n\tvar s V\n\tfor _, v := range m {\n\t\ts += v\n\t}\n\treturn s\n}\n\/\/ SumInts adds together the values of m.\nfunc SumInts(m map[string]int64) int64 {\n\tvar s int64\n\tfor _, v := range m {\n\t\ts += v\n\t}\n\treturn s\n}\n\n\/\/ SumFloats adds together the values of m.\nfunc SumFloats(m map[string]float64) float64 {\n\tvar s float64\n\tfor _, v := range m {\n\t\ts += v\n\t}\n\treturn s\n}\n\nfunc main() {\n\t\/\/ Initialize a map for the integer values\n\tints := map[string]int64{\n\t\t\"first\": 34,\n\t\t\"second\": 12,\n\t}\n\n\t\/\/ Initialize a map for the float values\n\tfloats := map[string]float64{\n\t\t\"first\": 35.98,\n\t\t\"second\": 26.99,\n\t}\n\n\tfmt.Printf(\"Non-Generic Sums: %v and %v\\n\",\n\t\tSumInts(ints),\n\t\tSumFloats(floats))\n\n\tfmt.Printf(\"Generic Sums: %v and %v\\n\",\n\t\tSumIntsOrFloats[string, int64](ints),\n\t\tSumIntsOrFloats[string, float64](floats))\n\n\tfmt.Printf(\"Generic Sums, type parameters inferred: %v and %v\\n\",\n\t\tSumIntsOrFloats(ints),\n\t\tSumIntsOrFloats(floats))\n\n\tfmt.Printf(\"Generic Sums with Constraint: %v and %v\\n\",\n\t\tSumNumbers(ints),\n\t\tSumNumbers(floats))\n\n\tfmt.Printf(\"Max with integer: %v\\n\",Max([]num{10, 15, 4, 25, 16, 18, 2}))\n\tfmt.Printf(\"Max with float: %v\\n\",Max([]float64{6.2, 4.1, 6.2, 9.6, 8.2, 1.5, 4.7}))\n\tfmt.Printf(\"Contains with string: %v\\n\", Contains([]string{\"one\", \"two\", \"three\"},\"two\"))\n\tfmt.Printf(\"Contains with int: %v\\n\", Contains([]int{100, 200, 300},50))\n\n\trandomNumbers:=make([]int, 5, 5)\n\tfor i:=0;i<5; i++ {\n\t\trandomNumbers[i]=rand.Intn(25)\n\t}\n fibvalues:= MapSlice(randomNumbers,findFibonacci)\n fmt.Println(fibvalues)\n sqrs:= MapSlice(randomNumbers, func(num int) int {\n\t return num * num\n })\n\tfmt.Println(sqrs)\n\n}\n\ntype fibvalue struct {\n\tinput, value int\n}\nfunc findFibonacci(num int) fibvalue {\n\tinput := float64(num)\n\t\/\/ Fibonacci using Binet's formula\n\tPhi := (1 + math.Sqrt(5)) \/ 2\n\tphi := (1 - math.Sqrt(5)) \/ 2\n\tresult := (math.Pow(Phi, input) - math.Pow(phi, input)) \/ math.Sqrt(5)\n\treturn fibvalue {\n\t\tinput: num,\n\t\tvalue: int(result),\n\t}\n}\n\/\/ SumIntsOrFloats sums the values of map m. It supports both int64 and float64\n\/\/ as types for map values.\nfunc SumIntsOrFloats[K comparable, V int64 | float64](m map[K]V) V {\n\tvar s V\n\tfor _, v := range m {\n\t\ts += v\n\t}\n\treturn s\n}\n\ntype num int\nfunc Max[T Number](s []T) T {\n\tif len(s) == 0 {\n\t\tvar zero T\n\t\treturn zero\n\t}\n\tm := s[0]\n\tfor _, v := range s {\n\t\tif m < v {\n\t\t\tm = v\n\t\t}\n\t}\n\treturn m\n}\nfunc Contains[T comparable](elems []T, v T) bool {\n\tfor _, s := range elems {\n\t\tif v == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc MapSlice[T any, M any](a []T, f func(T) M) []M {\n\tn := make([]M, len(a))\n\tfor i, e := range a {\n\t\tn[i] = f(e)\n\t}\n\treturn n\n}<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 The Matrix.org Foundation C.I.C.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage deltas\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\n\t\"github.com\/lib\/pq\"\n\t\"github.com\/matrix-org\/dendrite\/internal\/sqlutil\"\n\t\"github.com\/matrix-org\/dendrite\/roomserver\/types\"\n\t\"github.com\/matrix-org\/util\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\ntype stateSnapshotData struct {\n\tStateSnapshotNID types.StateSnapshotNID\n\tRoomNID types.RoomNID\n}\n\ntype stateBlockData struct {\n\tstateSnapshotData\n\tStateBlockNID types.StateBlockNID\n\tEventNIDs types.EventNIDs\n}\n\nfunc LoadStateBlocksRefactor(m *sqlutil.Migrations) {\n\tm.AddMigration(UpStateBlocksRefactor, DownStateBlocksRefactor)\n}\n\n\/\/ nolint:gocyclo\nfunc UpStateBlocksRefactor(tx *sql.Tx) error {\n\tlogrus.Warn(\"Performing state storage upgrade. Please wait, this may take some time!\")\n\tdefer logrus.Warn(\"State storage upgrade complete\")\n\n\tvar snapshotcount int\n\tvar maxsnapshotid int\n\tvar maxblockid int\n\tif err := tx.QueryRow(`SELECT COUNT(DISTINCT state_snapshot_nid) FROM roomserver_state_snapshots;`).Scan(&snapshotcount); err != nil {\n\t\treturn fmt.Errorf(\"tx.QueryRow.Scan (count snapshots): %w\", err)\n\t}\n\tif err := tx.QueryRow(`SELECT COALESCE(MAX(state_snapshot_nid),0) FROM roomserver_state_snapshots;`).Scan(&maxsnapshotid); err != nil {\n\t\treturn fmt.Errorf(\"tx.QueryRow.Scan (count snapshots): %w\", err)\n\t}\n\tif err := tx.QueryRow(`SELECT COALESCE(MAX(state_block_nid),0) FROM roomserver_state_block;`).Scan(&maxblockid); err != nil {\n\t\treturn fmt.Errorf(\"tx.QueryRow.Scan (count snapshots): %w\", err)\n\t}\n\tmaxsnapshotid++\n\tmaxblockid++\n\n\tif _, err := tx.Exec(`ALTER TABLE roomserver_state_block RENAME TO _roomserver_state_block;`); err != nil {\n\t\treturn fmt.Errorf(\"tx.Exec: %w\", err)\n\t}\n\tif _, err := tx.Exec(`ALTER TABLE roomserver_state_snapshots RENAME TO _roomserver_state_snapshots;`); err != nil {\n\t\treturn fmt.Errorf(\"tx.Exec: %w\", err)\n\t}\n\t\/\/ We create new sequences starting with the maximum state snapshot and block NIDs.\n\t\/\/ This means that all newly created snapshots and blocks by the migration will have\n\t\/\/ NIDs higher than these values, so that when we come to update the references to\n\t\/\/ these NIDs using UPDATE statements, we can guarantee we are only ever updating old\n\t\/\/ values and not accidentally overwriting new ones.\n\tif _, err := tx.Exec(fmt.Sprintf(`CREATE SEQUENCE roomserver_state_block_nid_sequence START WITH %d;`, maxblockid)); err != nil {\n\t\treturn fmt.Errorf(\"tx.Exec: %w\", err)\n\t}\n\tif _, err := tx.Exec(fmt.Sprintf(`CREATE SEQUENCE roomserver_state_snapshot_nid_sequence START WITH %d;`, maxsnapshotid)); err != nil {\n\t\treturn fmt.Errorf(\"tx.Exec: %w\", err)\n\t}\n\t_, err := tx.Exec(`\n\t\tCREATE TABLE IF NOT EXISTS roomserver_state_block (\n\t\t\tstate_block_nid bigint PRIMARY KEY DEFAULT nextval('roomserver_state_block_nid_sequence'),\n\t\t\tstate_block_hash BYTEA UNIQUE,\n\t\t\tevent_nids bigint[] NOT NULL\n\t\t);\n\t`)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"tx.Exec (create blocks table): %w\", err)\n\t}\n\t_, err = tx.Exec(`\n\t\tCREATE TABLE IF NOT EXISTS roomserver_state_snapshots (\n\t\t\tstate_snapshot_nid bigint PRIMARY KEY DEFAULT nextval('roomserver_state_snapshot_nid_sequence'),\n\t\t\tstate_snapshot_hash BYTEA UNIQUE,\n\t\t\troom_nid bigint NOT NULL,\n\t\t\tstate_block_nids bigint[] NOT NULL\n\t\t);\n\t`)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"tx.Exec (create snapshots table): %w\", err)\n\t}\n\tlogrus.Warn(\"New tables created...\")\n\n\tbatchsize := 100\n\tfor batchoffset := 0; batchoffset < snapshotcount; batchoffset += batchsize {\n\t\tvar snapshotrows *sql.Rows\n\t\tsnapshotrows, err = tx.Query(`\n\t\t\tSELECT\n\t\t\t\tstate_snapshot_nid,\n\t\t\t\troom_nid,\n\t\t\t\tstate_block_nid,\n\t\t\t\tARRAY_AGG(event_nid) AS event_nids\n\t\t\tFROM (\n\t\t\t\tSELECT\n\t\t\t\t\t_roomserver_state_snapshots.state_snapshot_nid,\n\t\t\t\t\t_roomserver_state_snapshots.room_nid,\n\t\t\t\t\t_roomserver_state_block.state_block_nid,\n\t\t\t\t\t_roomserver_state_block.event_nid\n\t\t\t\tFROM\n\t\t\t\t\t_roomserver_state_snapshots\n\t\t\t\t\tJOIN _roomserver_state_block ON _roomserver_state_block.state_block_nid = ANY (_roomserver_state_snapshots.state_block_nids)\n\t\t\t\tWHERE\n\t\t\t\t\t_roomserver_state_snapshots.state_snapshot_nid = ANY (\n\t\t\t\t\t\tSELECT\n\t\t\t\t\t\t\t_roomserver_state_snapshots.state_snapshot_nid\n\t\t\t\t\t\tFROM\n\t\t\t\t\t\t\t_roomserver_state_snapshots\n\t\t\t\t\t\tORDER BY _roomserver_state_snapshots.state_snapshot_nid ASC\n\t\t\t\t\t\tLIMIT $1 OFFSET $2\n\t\t\t\t\t)\n\t\t\t) AS _roomserver_state_block\n\t\t\tGROUP BY\n\t\t\t\tstate_snapshot_nid,\n\t\t\t\troom_nid,\n\t\t\t\tstate_block_nid;\n\t\t`, batchsize, batchoffset)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"tx.Query: %w\", err)\n\t\t}\n\n\t\tlogrus.Warnf(\"Rewriting snapshots %d-%d of %d...\", batchoffset, batchoffset+batchsize, snapshotcount)\n\t\tvar snapshots []stateBlockData\n\n\t\tfor snapshotrows.Next() {\n\t\t\tvar snapshot stateBlockData\n\t\t\tvar eventsarray pq.Int64Array\n\t\t\tif err = snapshotrows.Scan(&snapshot.StateSnapshotNID, &snapshot.RoomNID, &snapshot.StateBlockNID, &eventsarray); err != nil {\n\t\t\t\treturn fmt.Errorf(\"rows.Scan: %w\", err)\n\t\t\t}\n\t\t\tfor _, e := range eventsarray {\n\t\t\t\tsnapshot.EventNIDs = append(snapshot.EventNIDs, types.EventNID(e))\n\t\t\t}\n\t\t\tsnapshot.EventNIDs = snapshot.EventNIDs[:util.SortAndUnique(snapshot.EventNIDs)]\n\t\t\tsnapshots = append(snapshots, snapshot)\n\t\t}\n\n\t\tif err = snapshotrows.Close(); err != nil {\n\t\t\treturn fmt.Errorf(\"snapshots.Close: %w\", err)\n\t\t}\n\n\t\tnewsnapshots := map[stateSnapshotData]types.StateBlockNIDs{}\n\n\t\tfor _, snapshot := range snapshots {\n\t\t\tvar eventsarray pq.Int64Array\n\t\t\tfor _, e := range snapshot.EventNIDs {\n\t\t\t\teventsarray = append(eventsarray, int64(e))\n\t\t\t}\n\n\t\t\tvar blocknid types.StateBlockNID\n\t\t\terr = tx.QueryRow(`\n\t\t\t\tINSERT INTO roomserver_state_block (state_block_hash, event_nids)\n\t\t\t\t\tVALUES ($1, $2)\n\t\t\t\t\tON CONFLICT (state_block_hash) DO UPDATE SET event_nids=$2\n\t\t\t\t\tRETURNING state_block_nid\n\t\t\t`, snapshot.EventNIDs.Hash(), eventsarray).Scan(&blocknid)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"tx.QueryRow.Scan (insert new block with %d events): %w\", len(eventsarray), err)\n\t\t\t}\n\t\t\tindex := stateSnapshotData{snapshot.StateSnapshotNID, snapshot.RoomNID}\n\t\t\tnewsnapshots[index] = append(newsnapshots[index], blocknid)\n\t\t}\n\n\t\tfor snapshotdata, newblocks := range newsnapshots {\n\t\t\tvar newblocksarray pq.Int64Array\n\t\t\tfor _, b := range newblocks {\n\t\t\t\tnewblocksarray = append(newblocksarray, int64(b))\n\t\t\t}\n\n\t\t\tvar newNID types.StateSnapshotNID\n\t\t\terr = tx.QueryRow(`\n\t\t\t\tINSERT INTO roomserver_state_snapshots (state_snapshot_hash, room_nid, state_block_nids)\n\t\t\t\t\tVALUES ($1, $2, $3)\n\t\t\t\t\tON CONFLICT (state_snapshot_hash) DO UPDATE SET room_nid=$2\n\t\t\t\t\tRETURNING state_snapshot_nid\n\t\t\t`, newblocks.Hash(), snapshotdata.RoomNID, newblocksarray).Scan(&newNID)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"tx.QueryRow.Scan (insert new snapshot): %w\", err)\n\t\t\t}\n\n\t\t\tif _, err = tx.Exec(`UPDATE roomserver_events SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newNID, snapshotdata.StateSnapshotNID, maxsnapshotid); err != nil {\n\t\t\t\treturn fmt.Errorf(\"tx.Exec (update events): %w\", err)\n\t\t\t}\n\n\t\t\tif _, err = tx.Exec(`UPDATE roomserver_rooms SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newNID, snapshotdata.StateSnapshotNID, maxsnapshotid); err != nil {\n\t\t\t\treturn fmt.Errorf(\"tx.Exec (update rooms): %w\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ By this point we should have no more state_snapshot_nids below maxsnapshotid in either roomserver_rooms or roomserver_events\n\t\/\/ If we do, this is a problem if Dendrite tries to load the snapshot as it will not exist\n\t\/\/ in roomserver_state_snapshots\n\tvar count int64\n\tif err = tx.QueryRow(`SELECT COUNT(*) FROM roomserver_events WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, maxsnapshotid).Scan(&count); err != nil {\n\t\treturn fmt.Errorf(\"assertion query failed: %s\", err)\n\t}\n\tif count > 0 {\n\t\treturn fmt.Errorf(\"%d events exist in roomserver_events which have not been converted to a new state_snapshot_nid; this is a bug, please report\", count)\n\t}\n\tif err = tx.QueryRow(`SELECT COUNT(*) FROM roomserver_rooms WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, maxsnapshotid).Scan(&count); err != nil {\n\t\treturn fmt.Errorf(\"assertion query failed: %s\", err)\n\t}\n\tif count > 0 {\n\t\treturn fmt.Errorf(\"%d rooms exist in roomserver_rooms which have not been converted to a new state_snapshot_nid; this is a bug, please report\", count)\n\t}\n\n\tif _, err = tx.Exec(`\n\t\tDROP TABLE _roomserver_state_snapshots;\n\t\tDROP SEQUENCE roomserver_state_snapshot_nid_seq;\n\t`); err != nil {\n\t\treturn fmt.Errorf(\"tx.Exec (delete old snapshot table): %w\", err)\n\t}\n\tif _, err = tx.Exec(`\n\t\tDROP TABLE _roomserver_state_block;\n\t\tDROP SEQUENCE roomserver_state_block_nid_seq;\n\t`); err != nil {\n\t\treturn fmt.Errorf(\"tx.Exec (delete old block table): %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc DownStateBlocksRefactor(tx *sql.Tx) error {\n\tpanic(\"Downgrading state storage is not supported\")\n}\n<commit_msg>db migration: handle create events with no state blocks from v0.1.0 (#1904)<commit_after>\/\/ Copyright 2020 The Matrix.org Foundation C.I.C.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage deltas\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\n\t\"github.com\/lib\/pq\"\n\t\"github.com\/matrix-org\/dendrite\/internal\/sqlutil\"\n\t\"github.com\/matrix-org\/dendrite\/roomserver\/types\"\n\t\"github.com\/matrix-org\/util\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\ntype stateSnapshotData struct {\n\tStateSnapshotNID types.StateSnapshotNID\n\tRoomNID types.RoomNID\n}\n\ntype stateBlockData struct {\n\tstateSnapshotData\n\tStateBlockNID types.StateBlockNID\n\tEventNIDs types.EventNIDs\n}\n\nfunc LoadStateBlocksRefactor(m *sqlutil.Migrations) {\n\tm.AddMigration(UpStateBlocksRefactor, DownStateBlocksRefactor)\n}\n\n\/\/ nolint:gocyclo\nfunc UpStateBlocksRefactor(tx *sql.Tx) error {\n\tlogrus.Warn(\"Performing state storage upgrade. Please wait, this may take some time!\")\n\tdefer logrus.Warn(\"State storage upgrade complete\")\n\n\tvar snapshotcount int\n\tvar maxsnapshotid int\n\tvar maxblockid int\n\tif err := tx.QueryRow(`SELECT COUNT(DISTINCT state_snapshot_nid) FROM roomserver_state_snapshots;`).Scan(&snapshotcount); err != nil {\n\t\treturn fmt.Errorf(\"tx.QueryRow.Scan (count snapshots): %w\", err)\n\t}\n\tif err := tx.QueryRow(`SELECT COALESCE(MAX(state_snapshot_nid),0) FROM roomserver_state_snapshots;`).Scan(&maxsnapshotid); err != nil {\n\t\treturn fmt.Errorf(\"tx.QueryRow.Scan (count snapshots): %w\", err)\n\t}\n\tif err := tx.QueryRow(`SELECT COALESCE(MAX(state_block_nid),0) FROM roomserver_state_block;`).Scan(&maxblockid); err != nil {\n\t\treturn fmt.Errorf(\"tx.QueryRow.Scan (count snapshots): %w\", err)\n\t}\n\tmaxsnapshotid++\n\tmaxblockid++\n\n\tif _, err := tx.Exec(`ALTER TABLE roomserver_state_block RENAME TO _roomserver_state_block;`); err != nil {\n\t\treturn fmt.Errorf(\"tx.Exec: %w\", err)\n\t}\n\tif _, err := tx.Exec(`ALTER TABLE roomserver_state_snapshots RENAME TO _roomserver_state_snapshots;`); err != nil {\n\t\treturn fmt.Errorf(\"tx.Exec: %w\", err)\n\t}\n\t\/\/ We create new sequences starting with the maximum state snapshot and block NIDs.\n\t\/\/ This means that all newly created snapshots and blocks by the migration will have\n\t\/\/ NIDs higher than these values, so that when we come to update the references to\n\t\/\/ these NIDs using UPDATE statements, we can guarantee we are only ever updating old\n\t\/\/ values and not accidentally overwriting new ones.\n\tif _, err := tx.Exec(fmt.Sprintf(`CREATE SEQUENCE roomserver_state_block_nid_sequence START WITH %d;`, maxblockid)); err != nil {\n\t\treturn fmt.Errorf(\"tx.Exec: %w\", err)\n\t}\n\tif _, err := tx.Exec(fmt.Sprintf(`CREATE SEQUENCE roomserver_state_snapshot_nid_sequence START WITH %d;`, maxsnapshotid)); err != nil {\n\t\treturn fmt.Errorf(\"tx.Exec: %w\", err)\n\t}\n\t_, err := tx.Exec(`\n\t\tCREATE TABLE IF NOT EXISTS roomserver_state_block (\n\t\t\tstate_block_nid bigint PRIMARY KEY DEFAULT nextval('roomserver_state_block_nid_sequence'),\n\t\t\tstate_block_hash BYTEA UNIQUE,\n\t\t\tevent_nids bigint[] NOT NULL\n\t\t);\n\t`)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"tx.Exec (create blocks table): %w\", err)\n\t}\n\t_, err = tx.Exec(`\n\t\tCREATE TABLE IF NOT EXISTS roomserver_state_snapshots (\n\t\t\tstate_snapshot_nid bigint PRIMARY KEY DEFAULT nextval('roomserver_state_snapshot_nid_sequence'),\n\t\t\tstate_snapshot_hash BYTEA UNIQUE,\n\t\t\troom_nid bigint NOT NULL,\n\t\t\tstate_block_nids bigint[] NOT NULL\n\t\t);\n\t`)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"tx.Exec (create snapshots table): %w\", err)\n\t}\n\tlogrus.Warn(\"New tables created...\")\n\n\tbatchsize := 100\n\tfor batchoffset := 0; batchoffset < snapshotcount; batchoffset += batchsize {\n\t\tvar snapshotrows *sql.Rows\n\t\tsnapshotrows, err = tx.Query(`\n\t\t\tSELECT\n\t\t\t\tstate_snapshot_nid,\n\t\t\t\troom_nid,\n\t\t\t\tstate_block_nid,\n\t\t\t\tARRAY_AGG(event_nid) AS event_nids\n\t\t\tFROM (\n\t\t\t\tSELECT\n\t\t\t\t\t_roomserver_state_snapshots.state_snapshot_nid,\n\t\t\t\t\t_roomserver_state_snapshots.room_nid,\n\t\t\t\t\t_roomserver_state_block.state_block_nid,\n\t\t\t\t\t_roomserver_state_block.event_nid\n\t\t\t\tFROM\n\t\t\t\t\t_roomserver_state_snapshots\n\t\t\t\t\tLEFT JOIN _roomserver_state_block ON _roomserver_state_block.state_block_nid = ANY (_roomserver_state_snapshots.state_block_nids)\n\t\t\t\tWHERE\n\t\t\t\t\t_roomserver_state_snapshots.state_snapshot_nid = ANY (\n\t\t\t\t\t\tSELECT\n\t\t\t\t\t\t\t_roomserver_state_snapshots.state_snapshot_nid\n\t\t\t\t\t\tFROM\n\t\t\t\t\t\t\t_roomserver_state_snapshots\n\t\t\t\t\t\tORDER BY _roomserver_state_snapshots.state_snapshot_nid ASC\n\t\t\t\t\t\tLIMIT $1 OFFSET $2\n\t\t\t\t\t)\n\t\t\t) AS _roomserver_state_block\n\t\t\tGROUP BY\n\t\t\t\tstate_snapshot_nid,\n\t\t\t\troom_nid,\n\t\t\t\tstate_block_nid;\n\t\t`, batchsize, batchoffset)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"tx.Query: %w\", err)\n\t\t}\n\n\t\tlogrus.Warnf(\"Rewriting snapshots %d-%d of %d...\", batchoffset, batchoffset+batchsize, snapshotcount)\n\t\tvar snapshots []stateBlockData\n\n\t\tvar badCreateSnapshots []stateBlockData\n\t\tfor snapshotrows.Next() {\n\t\t\tvar snapshot stateBlockData\n\t\t\tvar eventsarray []sql.NullInt64\n\t\t\tvar nulStateBlockNID sql.NullInt64\n\t\t\tif err = snapshotrows.Scan(&snapshot.StateSnapshotNID, &snapshot.RoomNID, &nulStateBlockNID, pq.Array(&eventsarray)); err != nil {\n\t\t\t\treturn fmt.Errorf(\"rows.Scan: %w\", err)\n\t\t\t}\n\t\t\tif nulStateBlockNID.Valid {\n\t\t\t\tsnapshot.StateBlockNID = types.StateBlockNID(nulStateBlockNID.Int64)\n\t\t\t}\n\t\t\t\/\/ Dendrite v0.1.0 would not make a state block for the create event, resulting in [NULL] from the query above.\n\t\t\t\/\/ Remember the snapshot and we'll fill it in after we close this cursor as we can't have 2 queries running at the same time\n\t\t\tif len(eventsarray) == 1 && !eventsarray[0].Valid {\n\t\t\t\tbadCreateSnapshots = append(badCreateSnapshots, snapshot)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, e := range eventsarray {\n\t\t\t\tif e.Valid {\n\t\t\t\t\tsnapshot.EventNIDs = append(snapshot.EventNIDs, types.EventNID(e.Int64))\n\t\t\t\t}\n\t\t\t}\n\t\t\tsnapshot.EventNIDs = snapshot.EventNIDs[:util.SortAndUnique(snapshot.EventNIDs)]\n\t\t\tsnapshots = append(snapshots, snapshot)\n\t\t}\n\t\tif err = snapshotrows.Close(); err != nil {\n\t\t\treturn fmt.Errorf(\"snapshots.Close: %w\", err)\n\t\t}\n\t\t\/\/ fill in bad create snapshots\n\t\tfor _, s := range badCreateSnapshots {\n\t\t\tvar createEventNID types.EventNID\n\t\t\terr = tx.QueryRow(\n\t\t\t\t`SELECT event_nid FROM roomserver_events WHERE state_snapshot_nid = $1 AND event_type_nid = 1`, s.StateSnapshotNID,\n\t\t\t).Scan(&createEventNID)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"cannot xref null state block with snapshot %d: %s\", s.StateSnapshotNID, err)\n\t\t\t}\n\t\t\tif createEventNID == 0 {\n\t\t\t\treturn fmt.Errorf(\"cannot xref null state block with snapshot %d, no create event\", s.StateSnapshotNID)\n\t\t\t}\n\t\t\ts.EventNIDs = append(s.EventNIDs, createEventNID)\n\t\t\tsnapshots = append(snapshots, s)\n\t\t}\n\n\t\tnewsnapshots := map[stateSnapshotData]types.StateBlockNIDs{}\n\n\t\tfor _, snapshot := range snapshots {\n\t\t\tvar eventsarray pq.Int64Array\n\t\t\tfor _, e := range snapshot.EventNIDs {\n\t\t\t\teventsarray = append(eventsarray, int64(e))\n\t\t\t}\n\n\t\t\tvar blocknid types.StateBlockNID\n\t\t\terr = tx.QueryRow(`\n\t\t\t\tINSERT INTO roomserver_state_block (state_block_hash, event_nids)\n\t\t\t\t\tVALUES ($1, $2)\n\t\t\t\t\tON CONFLICT (state_block_hash) DO UPDATE SET event_nids=$2\n\t\t\t\t\tRETURNING state_block_nid\n\t\t\t`, snapshot.EventNIDs.Hash(), eventsarray).Scan(&blocknid)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"tx.QueryRow.Scan (insert new block with %d events): %w\", len(eventsarray), err)\n\t\t\t}\n\t\t\tindex := stateSnapshotData{snapshot.StateSnapshotNID, snapshot.RoomNID}\n\t\t\tnewsnapshots[index] = append(newsnapshots[index], blocknid)\n\t\t}\n\n\t\tfor snapshotdata, newblocks := range newsnapshots {\n\t\t\tvar newblocksarray pq.Int64Array\n\t\t\tfor _, b := range newblocks {\n\t\t\t\tnewblocksarray = append(newblocksarray, int64(b))\n\t\t\t}\n\n\t\t\tvar newNID types.StateSnapshotNID\n\t\t\terr = tx.QueryRow(`\n\t\t\t\tINSERT INTO roomserver_state_snapshots (state_snapshot_hash, room_nid, state_block_nids)\n\t\t\t\t\tVALUES ($1, $2, $3)\n\t\t\t\t\tON CONFLICT (state_snapshot_hash) DO UPDATE SET room_nid=$2\n\t\t\t\t\tRETURNING state_snapshot_nid\n\t\t\t`, newblocks.Hash(), snapshotdata.RoomNID, newblocksarray).Scan(&newNID)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"tx.QueryRow.Scan (insert new snapshot): %w\", err)\n\t\t\t}\n\n\t\t\tif _, err = tx.Exec(`UPDATE roomserver_events SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newNID, snapshotdata.StateSnapshotNID, maxsnapshotid); err != nil {\n\t\t\t\treturn fmt.Errorf(\"tx.Exec (update events): %w\", err)\n\t\t\t}\n\n\t\t\tif _, err = tx.Exec(`UPDATE roomserver_rooms SET state_snapshot_nid=$1 WHERE state_snapshot_nid=$2 AND state_snapshot_nid<$3`, newNID, snapshotdata.StateSnapshotNID, maxsnapshotid); err != nil {\n\t\t\t\treturn fmt.Errorf(\"tx.Exec (update rooms): %w\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ By this point we should have no more state_snapshot_nids below maxsnapshotid in either roomserver_rooms or roomserver_events\n\t\/\/ If we do, this is a problem if Dendrite tries to load the snapshot as it will not exist\n\t\/\/ in roomserver_state_snapshots\n\tvar count int64\n\tif err = tx.QueryRow(`SELECT COUNT(*) FROM roomserver_events WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, maxsnapshotid).Scan(&count); err != nil {\n\t\treturn fmt.Errorf(\"assertion query failed: %s\", err)\n\t}\n\tif count > 0 {\n\t\treturn fmt.Errorf(\"%d events exist in roomserver_events which have not been converted to a new state_snapshot_nid; this is a bug, please report\", count)\n\t}\n\tif err = tx.QueryRow(`SELECT COUNT(*) FROM roomserver_rooms WHERE state_snapshot_nid < $1 AND state_snapshot_nid != 0`, maxsnapshotid).Scan(&count); err != nil {\n\t\treturn fmt.Errorf(\"assertion query failed: %s\", err)\n\t}\n\tif count > 0 {\n\t\treturn fmt.Errorf(\"%d rooms exist in roomserver_rooms which have not been converted to a new state_snapshot_nid; this is a bug, please report\", count)\n\t}\n\n\tif _, err = tx.Exec(`\n\t\tDROP TABLE _roomserver_state_snapshots;\n\t\tDROP SEQUENCE roomserver_state_snapshot_nid_seq;\n\t`); err != nil {\n\t\treturn fmt.Errorf(\"tx.Exec (delete old snapshot table): %w\", err)\n\t}\n\tif _, err = tx.Exec(`\n\t\tDROP TABLE _roomserver_state_block;\n\t\tDROP SEQUENCE roomserver_state_block_nid_seq;\n\t`); err != nil {\n\t\treturn fmt.Errorf(\"tx.Exec (delete old block table): %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc DownStateBlocksRefactor(tx *sql.Tx) error {\n\tpanic(\"Downgrading state storage is not supported\")\n}\n<|endoftext|>"} {"text":"<commit_before>package defaults\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/goreleaser\/goreleaser\/internal\/testlib\"\n\t\"github.com\/goreleaser\/goreleaser\/pkg\/config\"\n\t\"github.com\/goreleaser\/goreleaser\/pkg\/context\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestDescription(t *testing.T) {\n\trequire.NotEmpty(t, Pipe{}.String())\n}\n\nfunc TestFillBasicData(t *testing.T) {\n\ttestlib.Mktmp(t)\n\ttestlib.GitInit(t)\n\ttestlib.GitRemoteAdd(t, \"git@github.com:goreleaser\/goreleaser.git\")\n\n\tctx := &context.Context{\n\t\tTokenType: context.TokenTypeGitHub,\n\t\tConfig: config.Project{},\n\t}\n\n\trequire.NoError(t, Pipe{}.Run(ctx))\n\trequire.Equal(t, \"goreleaser\", ctx.Config.Release.GitHub.Owner)\n\trequire.Equal(t, \"goreleaser\", ctx.Config.Release.GitHub.Name)\n\trequire.NotEmpty(t, ctx.Config.Builds)\n\trequire.Equal(t, \"goreleaser\", ctx.Config.Builds[0].Binary)\n\trequire.Equal(t, \".\", ctx.Config.Builds[0].Main)\n\trequire.Contains(t, ctx.Config.Builds[0].Goos, \"darwin\")\n\trequire.Contains(t, ctx.Config.Builds[0].Goos, \"linux\")\n\trequire.Contains(t, ctx.Config.Builds[0].Goarch, \"386\")\n\trequire.Contains(t, ctx.Config.Builds[0].Goarch, \"amd64\")\n\trequire.Equal(t, \"tar.gz\", ctx.Config.Archives[0].Format)\n\trequire.Empty(t, ctx.Config.Dockers)\n\trequire.Equal(t, \"https:\/\/github.com\", ctx.Config.GitHubURLs.Download)\n\trequire.NotEmpty(t, ctx.Config.Archives[0].NameTemplate)\n\trequire.NotEmpty(t, ctx.Config.Builds[0].Ldflags)\n\trequire.NotEmpty(t, ctx.Config.Archives[0].Files)\n\trequire.NotEmpty(t, ctx.Config.Dist)\n}\n\nfunc TestFillPartial(t *testing.T) {\n\ttestlib.Mktmp(t)\n\ttestlib.GitInit(t)\n\ttestlib.GitRemoteAdd(t, \"git@github.com:goreleaser\/goreleaser.git\")\n\n\tctx := &context.Context{\n\t\tConfig: config.Project{\n\t\t\tGitHubURLs: config.GitHubURLs{\n\t\t\t\tDownload: \"https:\/\/github.company.com\",\n\t\t\t},\n\t\t\tDist: \"disttt\",\n\t\t\tRelease: config.Release{\n\t\t\t\tGitHub: config.Repo{\n\t\t\t\t\tOwner: \"goreleaser\",\n\t\t\t\t\tName: \"test\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tArchives: []config.Archive{\n\t\t\t\t{\n\t\t\t\t\tFiles: []config.File{\n\t\t\t\t\t\t{Source: \"glob\/*\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuilds: []config.Build{\n\t\t\t\t{\n\t\t\t\t\tID: \"build1\",\n\t\t\t\t\tBinary: \"testreleaser\",\n\t\t\t\t},\n\t\t\t\t{Goos: []string{\"linux\"}},\n\t\t\t\t{\n\t\t\t\t\tID: \"build3\",\n\t\t\t\t\tBinary: \"another\",\n\t\t\t\t\tIgnore: []config.IgnoredBuild{\n\t\t\t\t\t\t{Goos: \"darwin\", Goarch: \"amd64\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tDockers: []config.Docker{\n\t\t\t\t{\n\t\t\t\t\tImageTemplates: []string{\"a\/b\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBrews: []config.Homebrew{\n\t\t\t\t{\n\t\t\t\t\tDescription: \"foo\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\trequire.NoError(t, Pipe{}.Run(ctx))\n\trequire.Len(t, ctx.Config.Archives[0].Files, 1)\n\trequire.Equal(t, `bin.install \"test\"`, ctx.Config.Brews[0].Install)\n\trequire.NotEmpty(t, ctx.Config.Dockers[0].Goos)\n\trequire.NotEmpty(t, ctx.Config.Dockers[0].Goarch)\n\trequire.NotEmpty(t, ctx.Config.Dockers[0].Dockerfile)\n\trequire.Empty(t, ctx.Config.Dockers[0].Goarm)\n\trequire.Equal(t, \"disttt\", ctx.Config.Dist)\n\trequire.NotEqual(t, \"https:\/\/github.com\", ctx.Config.GitHubURLs.Download)\n\n\tctx = &context.Context{\n\t\tTokenType: context.TokenTypeGitea,\n\n\t\tConfig: config.Project{\n\t\t\tGiteaURLs: config.GiteaURLs{\n\t\t\t\tAPI: \"https:\/\/gitea.com\/api\/v1\",\n\t\t\t},\n\t\t},\n\t}\n\trequire.NoError(t, Pipe{}.Run(ctx))\n\trequire.Equal(t, \"https:\/\/gitea.com\", ctx.Config.GiteaURLs.Download)\n}\n\nfunc TestGiteaTemplateDownloadURL(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tapiURL string\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"string_url\",\n\t\t\tapiURL: \"https:\/\/gitea.com\/api\/v1\",\n\t\t},\n\t\t{\n\t\t\tname: \"download_url_template\",\n\t\t\tapiURL: \"{{ .Env.GORELEASER_TEST_GITEA_URLS_API }}\",\n\t\t},\n\t\t{\n\t\t\tname: \"download_url_template_invalid_value\",\n\t\t\tapiURL: \"{{ .Env.GORELEASER_NOT_EXISTS }}\",\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"download_url_template_invalid\",\n\t\t\tapiURL: \"{{.dddddddddd\",\n\t\t\twantErr: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tctx := &context.Context{\n\t\t\tTokenType: context.TokenTypeGitea,\n\t\t\tEnv: context.Env{\n\t\t\t\t\"GORELEASER_TEST_GITEA_URLS_API\": \"https:\/\/gitea.com\/api\/v1\",\n\t\t\t},\n\t\t\tConfig: config.Project{\n\t\t\t\tGiteaURLs: config.GiteaURLs{\n\t\t\t\t\tAPI: tt.apiURL,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\terr := Pipe{}.Run(ctx)\n\t\tif tt.wantErr {\n\t\t\trequire.Error(t, err)\n\t\t\treturn\n\t\t}\n\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, \"https:\/\/gitea.com\", ctx.Config.GiteaURLs.Download)\n\t}\n}\n<commit_msg>test: fix test<commit_after>package defaults\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/goreleaser\/goreleaser\/internal\/testlib\"\n\t\"github.com\/goreleaser\/goreleaser\/pkg\/config\"\n\t\"github.com\/goreleaser\/goreleaser\/pkg\/context\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestDescription(t *testing.T) {\n\trequire.NotEmpty(t, Pipe{}.String())\n}\n\nfunc TestFillBasicData(t *testing.T) {\n\ttestlib.Mktmp(t)\n\ttestlib.GitInit(t)\n\ttestlib.GitRemoteAdd(t, \"git@github.com:goreleaser\/goreleaser.git\")\n\n\tctx := &context.Context{\n\t\tTokenType: context.TokenTypeGitHub,\n\t\tConfig: config.Project{},\n\t}\n\n\trequire.NoError(t, Pipe{}.Run(ctx))\n\trequire.Equal(t, \"goreleaser\", ctx.Config.Release.GitHub.Owner)\n\trequire.Equal(t, \"goreleaser\", ctx.Config.Release.GitHub.Name)\n\trequire.NotEmpty(t, ctx.Config.Builds)\n\trequire.Equal(t, \"goreleaser\", ctx.Config.Builds[0].Binary)\n\trequire.Equal(t, \".\", ctx.Config.Builds[0].Main)\n\trequire.Contains(t, ctx.Config.Builds[0].Goos, \"darwin\")\n\trequire.Contains(t, ctx.Config.Builds[0].Goos, \"linux\")\n\trequire.Contains(t, ctx.Config.Builds[0].Goarch, \"386\")\n\trequire.Contains(t, ctx.Config.Builds[0].Goarch, \"amd64\")\n\trequire.Equal(t, \"tar.gz\", ctx.Config.Archives[0].Format)\n\trequire.Empty(t, ctx.Config.Dockers)\n\trequire.Equal(t, \"https:\/\/github.com\", ctx.Config.GitHubURLs.Download)\n\trequire.NotEmpty(t, ctx.Config.Archives[0].NameTemplate)\n\trequire.NotEmpty(t, ctx.Config.Builds[0].Ldflags)\n\trequire.NotEmpty(t, ctx.Config.Archives[0].Files)\n\trequire.NotEmpty(t, ctx.Config.Dist)\n}\n\nfunc TestFillPartial(t *testing.T) {\n\ttestlib.Mktmp(t)\n\ttestlib.GitInit(t)\n\ttestlib.GitRemoteAdd(t, \"git@github.com:goreleaser\/goreleaser.git\")\n\n\tctx := &context.Context{\n\t\tConfig: config.Project{\n\t\t\tGitHubURLs: config.GitHubURLs{\n\t\t\t\tDownload: \"https:\/\/github.company.com\",\n\t\t\t},\n\t\t\tDist: \"disttt\",\n\t\t\tRelease: config.Release{\n\t\t\t\tGitHub: config.Repo{\n\t\t\t\t\tOwner: \"goreleaser\",\n\t\t\t\t\tName: \"test\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tArchives: []config.Archive{\n\t\t\t\t{\n\t\t\t\t\tFiles: []config.File{\n\t\t\t\t\t\t{Source: \"glob\/*\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuilds: []config.Build{\n\t\t\t\t{\n\t\t\t\t\tID: \"build1\",\n\t\t\t\t\tBinary: \"testreleaser\",\n\t\t\t\t},\n\t\t\t\t{Goos: []string{\"linux\"}},\n\t\t\t\t{\n\t\t\t\t\tID: \"build3\",\n\t\t\t\t\tBinary: \"another\",\n\t\t\t\t\tIgnore: []config.IgnoredBuild{\n\t\t\t\t\t\t{Goos: \"darwin\", Goarch: \"amd64\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tDockers: []config.Docker{\n\t\t\t\t{\n\t\t\t\t\tImageTemplates: []string{\"a\/b\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBrews: []config.Homebrew{\n\t\t\t\t{\n\t\t\t\t\tDescription: \"foo\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\trequire.NoError(t, Pipe{}.Run(ctx))\n\trequire.Len(t, ctx.Config.Archives[0].Files, 1)\n\trequire.NotEmpty(t, ctx.Config.Dockers[0].Goos)\n\trequire.NotEmpty(t, ctx.Config.Dockers[0].Goarch)\n\trequire.NotEmpty(t, ctx.Config.Dockers[0].Dockerfile)\n\trequire.Empty(t, ctx.Config.Dockers[0].Goarm)\n\trequire.Equal(t, \"disttt\", ctx.Config.Dist)\n\trequire.NotEqual(t, \"https:\/\/github.com\", ctx.Config.GitHubURLs.Download)\n\n\tctx = &context.Context{\n\t\tTokenType: context.TokenTypeGitea,\n\n\t\tConfig: config.Project{\n\t\t\tGiteaURLs: config.GiteaURLs{\n\t\t\t\tAPI: \"https:\/\/gitea.com\/api\/v1\",\n\t\t\t},\n\t\t},\n\t}\n\trequire.NoError(t, Pipe{}.Run(ctx))\n\trequire.Equal(t, \"https:\/\/gitea.com\", ctx.Config.GiteaURLs.Download)\n}\n\nfunc TestGiteaTemplateDownloadURL(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tapiURL string\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"string_url\",\n\t\t\tapiURL: \"https:\/\/gitea.com\/api\/v1\",\n\t\t},\n\t\t{\n\t\t\tname: \"download_url_template\",\n\t\t\tapiURL: \"{{ .Env.GORELEASER_TEST_GITEA_URLS_API }}\",\n\t\t},\n\t\t{\n\t\t\tname: \"download_url_template_invalid_value\",\n\t\t\tapiURL: \"{{ .Env.GORELEASER_NOT_EXISTS }}\",\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"download_url_template_invalid\",\n\t\t\tapiURL: \"{{.dddddddddd\",\n\t\t\twantErr: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tctx := &context.Context{\n\t\t\tTokenType: context.TokenTypeGitea,\n\t\t\tEnv: context.Env{\n\t\t\t\t\"GORELEASER_TEST_GITEA_URLS_API\": \"https:\/\/gitea.com\/api\/v1\",\n\t\t\t},\n\t\t\tConfig: config.Project{\n\t\t\t\tGiteaURLs: config.GiteaURLs{\n\t\t\t\t\tAPI: tt.apiURL,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\terr := Pipe{}.Run(ctx)\n\t\tif tt.wantErr {\n\t\t\trequire.Error(t, err)\n\t\t\treturn\n\t\t}\n\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, \"https:\/\/gitea.com\", ctx.Config.GiteaURLs.Download)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package interceptor\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/datawire\/teleproxy\/internal\/pkg\/nat\"\n\trt \"github.com\/datawire\/teleproxy\/internal\/pkg\/route\"\n\t\"log\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n)\n\ntype Interceptor struct {\n\tport string\n\ttables map[string]rt.Table\n\ttranslator *nat.Translator\n\tdomains sync.Map\n\twork chan func()\n\tdone chan empty\n}\n\ntype empty interface{}\n\nfunc NewInterceptor(name string) *Interceptor {\n\treturn &Interceptor{\n\t\ttables: make(map[string]rt.Table),\n\t\ttranslator: nat.NewTranslator(name),\n\t\twork: make(chan func()),\n\t\tdone: make(chan empty),\n\t}\n}\n\nfunc (i *Interceptor) Start() {\n\tgo func() {\n\t\tdefer close(i.done)\n\t\ti.translator.Enable()\n\t\tdefer i.translator.Disable()\n\t\tfor {\n\t\t\taction, ok := <-i.work\n\t\t\tif ok {\n\t\t\t\taction()\n\t\t\t} else {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (i *Interceptor) Stop() {\n\tclose(i.work)\n\t<-i.done\n}\n\nfunc (i *Interceptor) Resolve(name string) *rt.Route {\n\tvalue, ok := i.domains.Load(strings.ToLower(name))\n\tif ok {\n\t\treturn value.(*rt.Route)\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc (i *Interceptor) Destination(conn *net.TCPConn) (string, error) {\n\t_, host, err := i.translator.GetOriginalDst(conn)\n\treturn host, err\n}\n\nfunc (i *Interceptor) Render(table string) string {\n\tresult := make(chan string, 1)\n\ti.work <- func() {\n\t\tvar obj interface{}\n\n\t\tif table == \"\" {\n\t\t\tvar tables []rt.Table\n\t\t\tfor _, t := range i.tables {\n\t\t\t\ttables = append(tables, t)\n\t\t\t}\n\t\t\tobj = tables\n\t\t} else {\n\t\t\tvar ok bool\n\t\t\tobj, ok = i.tables[table]\n\t\t\tif !ok {\n\t\t\t\tresult <- \"\"\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tbytes, err := json.MarshalIndent(obj, \"\", \" \")\n\t\tif err != nil {\n\t\t\tresult <- err.Error()\n\t\t} else {\n\t\t\tresult <- string(bytes)\n\t\t}\n\t}\n\treturn <-result\n}\n\nfunc (i *Interceptor) Delete(table string) bool {\n\tresult := make(chan bool, 1)\n\ti.work <- func() {\n\t\tvar names []string\n\t\tif table == \"\" {\n\t\t\tfor name := range i.tables {\n\t\t\t\tnames = append(names, name)\n\t\t\t}\n\t\t} else if _, ok := i.tables[table]; ok {\n\t\t\tnames = []string{table}\n\t\t} else {\n\t\t\tresult <- false\n\t\t}\n\n\t\tfor _, name := range names {\n\t\t\tif name != \"bootstrap\" {\n\t\t\t\ti.update(rt.Table{Name: name})\n\t\t\t}\n\t\t}\n\n\t\tresult <- true\n\t}\n\treturn <-result\n}\n\nfunc (i *Interceptor) Update(table rt.Table) {\n\ti.work <- func() {\n\t\ti.update(table)\n\t}\n}\n\nfunc (i *Interceptor) update(table rt.Table) {\n\told, ok := i.tables[table.Name]\n\n\troutes := make(map[string]rt.Route)\n\tif ok {\n\t\tfor _, route := range old.Routes {\n\t\t\troutes[route.Name] = route\n\t\t}\n\t}\n\n\tfor _, route := range table.Routes {\n\t\texisting, ok := routes[route.Name]\n\t\tif ok && route != existing {\n\n\t\t\tswitch route.Proto {\n\t\t\tcase \"tcp\":\n\t\t\t\ti.translator.ClearTCP(existing.Ip)\n\t\t\tcase \"udp\":\n\t\t\t\ti.translator.ClearUDP(existing.Ip)\n\t\t\tdefault:\n\t\t\t\tlog.Printf(\"INT: unrecognized protocol: %v\", route)\n\t\t\t}\n\n\t\t}\n\n\t\tif !ok || route != existing {\n\n\t\t\tif route.Target != \"\" {\n\t\t\t\tswitch route.Proto {\n\t\t\t\tcase \"tcp\":\n\t\t\t\t\ti.translator.ForwardTCP(route.Ip, route.Target)\n\t\t\t\tcase \"udp\":\n\t\t\t\t\ti.translator.ForwardUDP(route.Ip, route.Target)\n\t\t\t\tdefault:\n\t\t\t\t\tlog.Printf(\"INT: unrecognized protocol: %v\", route)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif route.Name != \"\" {\n\t\t\t\tlog.Printf(\"INT: STORE %v->%v\", route.Domain(), route)\n\t\t\t\tcopy := route\n\t\t\t\ti.domains.Store(route.Domain(), ©)\n\t\t\t}\n\n\t\t}\n\n\t\tif ok {\n\t\t\t\/\/ remove the route from our map of\n\t\t\t\/\/ old routes so we don't end up\n\t\t\t\/\/ deleting it below\n\t\t\tdelete(routes, route.Name)\n\t\t}\n\t}\n\n\tfor _, route := range routes {\n\t\tlog.Printf(\"INT: CLEAR %v->%v\", route.Domain(), route)\n\t\ti.domains.Delete(route.Domain())\n\n\t\tswitch route.Proto {\n\t\tcase \"tcp\":\n\t\t\ti.translator.ClearTCP(route.Ip)\n\t\tcase \"udp\":\n\t\t\ti.translator.ClearUDP(route.Ip)\n\t\tdefault:\n\t\t\tlog.Printf(\"INT: unrecognized protocol: %v\", route)\n\t\t}\n\n\t}\n\n\tif table.Routes == nil || len(table.Routes) == 0 {\n\t\tdelete(i.tables, table.Name)\n\t} else {\n\t\ti.tables[table.Name] = table\n\t}\n}\n<commit_msg>Add DNS search path handling for the resolver<commit_after>package interceptor\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/datawire\/teleproxy\/internal\/pkg\/nat\"\n\trt \"github.com\/datawire\/teleproxy\/internal\/pkg\/route\"\n\t\"log\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\"\n)\n\ntype Interceptor struct {\n\tport string\n\ttables map[string]rt.Table\n\ttranslator *nat.Translator\n\tdomains sync.Map\n\twork chan func()\n\tdone chan empty\n\tsearch []string\n}\n\ntype empty interface{}\n\nfunc NewInterceptor(name string) *Interceptor {\n\treturn &Interceptor{\n\t\ttables: make(map[string]rt.Table),\n\t\ttranslator: nat.NewTranslator(name),\n\t\twork: make(chan func()),\n\t\tdone: make(chan empty),\n\t\tsearch: []string{\"\"},\n\t}\n}\n\nfunc (i *Interceptor) Start() {\n\tgo func() {\n\t\tdefer close(i.done)\n\t\ti.translator.Enable()\n\t\tdefer i.translator.Disable()\n\t\tfor {\n\t\t\taction, ok := <-i.work\n\t\t\tif ok {\n\t\t\t\taction()\n\t\t\t} else {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (i *Interceptor) Stop() {\n\tclose(i.work)\n\t<-i.done\n}\n\n\/\/ Resolve looks up the given query in the (FIXME: somewhere), trying\n\/\/ all the suffixes in the search path, and returns a Route on success\n\/\/ or nil on failure. This implementation does not count the number of\n\/\/ dots in the query.\nfunc (i *Interceptor) Resolve(query string) *rt.Route {\n\tif !strings.HasSuffix(query, \".\") {\n\t\tquery += \".\"\n\t}\n\tfor _, suffix := range i.GetSearchPath() {\n\t\tname := query + suffix\n\t\tvalue, ok := i.domains.Load(strings.ToLower(name))\n\t\tif ok {\n\t\t\treturn value.(*rt.Route)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (i *Interceptor) Destination(conn *net.TCPConn) (string, error) {\n\t_, host, err := i.translator.GetOriginalDst(conn)\n\treturn host, err\n}\n\nfunc (i *Interceptor) Render(table string) string {\n\tresult := make(chan string, 1)\n\ti.work <- func() {\n\t\tvar obj interface{}\n\n\t\tif table == \"\" {\n\t\t\tvar tables []rt.Table\n\t\t\tfor _, t := range i.tables {\n\t\t\t\ttables = append(tables, t)\n\t\t\t}\n\t\t\tobj = tables\n\t\t} else {\n\t\t\tvar ok bool\n\t\t\tobj, ok = i.tables[table]\n\t\t\tif !ok {\n\t\t\t\tresult <- \"\"\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tbytes, err := json.MarshalIndent(obj, \"\", \" \")\n\t\tif err != nil {\n\t\t\tresult <- err.Error()\n\t\t} else {\n\t\t\tresult <- string(bytes)\n\t\t}\n\t}\n\treturn <-result\n}\n\nfunc (i *Interceptor) Delete(table string) bool {\n\tresult := make(chan bool, 1)\n\ti.work <- func() {\n\t\tvar names []string\n\t\tif table == \"\" {\n\t\t\tfor name := range i.tables {\n\t\t\t\tnames = append(names, name)\n\t\t\t}\n\t\t} else if _, ok := i.tables[table]; ok {\n\t\t\tnames = []string{table}\n\t\t} else {\n\t\t\tresult <- false\n\t\t}\n\n\t\tfor _, name := range names {\n\t\t\tif name != \"bootstrap\" {\n\t\t\t\ti.update(rt.Table{Name: name})\n\t\t\t}\n\t\t}\n\n\t\tresult <- true\n\t}\n\treturn <-result\n}\n\nfunc (i *Interceptor) Update(table rt.Table) {\n\ti.work <- func() {\n\t\ti.update(table)\n\t}\n}\n\nfunc (i *Interceptor) update(table rt.Table) {\n\told, ok := i.tables[table.Name]\n\n\troutes := make(map[string]rt.Route)\n\tif ok {\n\t\tfor _, route := range old.Routes {\n\t\t\troutes[route.Name] = route\n\t\t}\n\t}\n\n\tfor _, route := range table.Routes {\n\t\texisting, ok := routes[route.Name]\n\t\tif ok && route != existing {\n\n\t\t\tswitch route.Proto {\n\t\t\tcase \"tcp\":\n\t\t\t\ti.translator.ClearTCP(existing.Ip)\n\t\t\tcase \"udp\":\n\t\t\t\ti.translator.ClearUDP(existing.Ip)\n\t\t\tdefault:\n\t\t\t\tlog.Printf(\"INT: unrecognized protocol: %v\", route)\n\t\t\t}\n\n\t\t}\n\n\t\tif !ok || route != existing {\n\n\t\t\tif route.Target != \"\" {\n\t\t\t\tswitch route.Proto {\n\t\t\t\tcase \"tcp\":\n\t\t\t\t\ti.translator.ForwardTCP(route.Ip, route.Target)\n\t\t\t\tcase \"udp\":\n\t\t\t\t\ti.translator.ForwardUDP(route.Ip, route.Target)\n\t\t\t\tdefault:\n\t\t\t\t\tlog.Printf(\"INT: unrecognized protocol: %v\", route)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif route.Name != \"\" {\n\t\t\t\tlog.Printf(\"INT: STORE %v->%v\", route.Domain(), route)\n\t\t\t\tcopy := route\n\t\t\t\ti.domains.Store(route.Domain(), ©)\n\t\t\t}\n\n\t\t}\n\n\t\tif ok {\n\t\t\t\/\/ remove the route from our map of\n\t\t\t\/\/ old routes so we don't end up\n\t\t\t\/\/ deleting it below\n\t\t\tdelete(routes, route.Name)\n\t\t}\n\t}\n\n\tfor _, route := range routes {\n\t\tlog.Printf(\"INT: CLEAR %v->%v\", route.Domain(), route)\n\t\ti.domains.Delete(route.Domain())\n\n\t\tswitch route.Proto {\n\t\tcase \"tcp\":\n\t\t\ti.translator.ClearTCP(route.Ip)\n\t\tcase \"udp\":\n\t\t\ti.translator.ClearUDP(route.Ip)\n\t\tdefault:\n\t\t\tlog.Printf(\"INT: unrecognized protocol: %v\", route)\n\t\t}\n\n\t}\n\n\tif table.Routes == nil || len(table.Routes) == 0 {\n\t\tdelete(i.tables, table.Name)\n\t} else {\n\t\ti.tables[table.Name] = table\n\t}\n}\n\n\/\/ SetSearchPath updates the DNS search path used by the resolver\nfunc (i *Interceptor) SetSearchPath(paths []string) {\n\ti.work <- func() {\n\t\ti.search = paths\n\t}\n}\n\n\/\/ GetSearchPath retrieves the current search path\nfunc (i *Interceptor) GetSearchPath() []string {\n\tresult := make(chan []string, 1)\n\ti.work <- func() {\n\t\tresult <- i.search\n\t}\n\treturn <-result\n}\n<|endoftext|>"} {"text":"<commit_before>package generatorControllers\r\n\r\nimport (\r\n\t. \"eaciit\/wfdemo-git\/library\/helper\"\r\n\t. \"eaciit\/wfdemo-git\/library\/models\"\r\n\t. \"eaciit\/wfdemo-git\/processapp\/summaryGenerator\/controllers\"\r\n\t\"eaciit\/wfdemo-git\/web\/helper\"\r\n\t_ \"fmt\"\r\n\t\"log\"\r\n\t\"os\"\r\n\t_ \"strings\"\r\n\t\"time\"\r\n\r\n\t\"strings\"\r\n\r\n\t\"github.com\/eaciit\/dbox\"\r\n\t_ \"github.com\/eaciit\/dbox\/dbc\/mongo\"\r\n\ttk \"github.com\/eaciit\/toolkit\"\r\n)\r\n\r\ntype GenScadaLast24 struct {\r\n\t*BaseController\r\n}\r\n\r\nfunc (d *GenScadaLast24) Generate(base *BaseController) {\r\n\tif base != nil {\r\n\t\td.BaseController = base\r\n\t\tctx, e := PrepareConnection()\r\n\t\tif e != nil {\r\n\t\t\tErrorHandler(e, \"Scada Summary\")\r\n\t\t\tos.Exit(0)\r\n\t\t}\r\n\r\n\t\td.BaseController.Ctx.DeleteMany(new(ScadaLastUpdate), dbox.And(dbox.Ne(\"_id\", \"\")))\r\n\r\n\t\tprojectList, _ := helper.GetProjectList()\r\n\r\n\t\tinprojectactive := func(str string) bool {\r\n\t\t\tfor _, v := range projectList {\r\n\t\t\t\tif v.Value == str {\r\n\t\t\t\t\treturn true\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t\treturn false\r\n\t\t}\r\n\r\n\t\tmapbudget := map[string]float64{}\r\n\t\tcsrBudget, _ := ctx.NewQuery().From(new(ExpPValueModel).TableName()).\r\n\t\t\tCursor(nil)\r\n\r\n\t\tbudgets := make([]ExpPValueModel, 0)\r\n\t\t_ = csrBudget.Fetch(&budgets, 0, false)\r\n\t\tcsrBudget.Close()\r\n\r\n\t\tfor _, budget := range budgets {\r\n\t\t\tmapbudget[tk.Sprintf(\"%s_%d\", budget.ProjectName, budget.MonthNo)] = budget.P75NetGenMWH\r\n\t\t\tif inprojectactive(budget.ProjectName) {\r\n\t\t\t\tmapbudget[tk.Sprintf(\"fleet_%d\", budget.MonthNo)] = budget.P75NetGenMWH\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\tfor _, proj := range d.BaseController.ProjectList {\r\n\t\t\tprojectName := proj.Value\r\n\t\t\tturbineList := []TurbineOut{}\r\n\r\n\t\t\tif projectName != \"Fleet\" {\r\n\t\t\t\tturbineList, _ = helper.GetTurbineList([]interface{}{projectName})\r\n\t\t\t} else {\r\n\t\t\t\tturbineList, _ = helper.GetTurbineList(nil)\r\n\t\t\t}\r\n\r\n\t\t\ttotalTurbine := len(turbineList)\r\n\r\n\t\t\tfilter := dbox.Eq(\"available\", 1)\r\n\t\t\tif projectName != \"Fleet\" {\r\n\t\t\t\tfilter = dbox.And(dbox.Eq(\"projectname\", projectName), filter)\r\n\t\t\t}\r\n\r\n\t\t\t\/*for _, v := range filter {\r\n\t\t\t\tlog.Printf(\">> %#v \\n\", v)\r\n\t\t\t}*\/\r\n\r\n\t\t\tcsr, e := ctx.NewQuery().\r\n\t\t\t\tFrom(new(ScadaData).TableName()).\r\n\t\t\t\tWhere(filter).\r\n\t\t\t\tAggr(dbox.AggrMax, \"$timestamp\", \"timestamp\").\r\n\t\t\t\tAggr(dbox.AggrMax, \"$dateinfo.dateid\", \"dateid\").\r\n\t\t\t\tGroup(\"\").\r\n\t\t\t\tCursor(nil)\r\n\r\n\t\t\tif e != nil {\r\n\t\t\t\tlog.Printf(\"Error: %v \\n\", e.Error())\r\n\t\t\t} else {\r\n\t\t\t\tdatas := []tk.M{}\r\n\t\t\t\te = csr.Fetch(&datas, 0, false)\r\n\t\t\t\tcsr.Close()\r\n\r\n\t\t\t\ttk.Printf(\">> %#v \\n\", datas)\r\n\r\n\t\t\t\tif len(datas) > 0 {\r\n\t\t\t\t\tdateId := datas[0].Get(\"dateid\", time.Time{}).(time.Time).UTC()\r\n\t\t\t\t\tdtInfo := GetDateInfo(dateId)\r\n\t\t\t\t\tmaxTimeStamp := datas[0].Get(\"timestamp\", time.Time{}).(time.Time).UTC()\r\n\r\n\t\t\t\t\tvar budgetCurrMonthDaily float64\r\n\r\n\t\t\t\t\t_id := tk.Sprintf(\"%s_%d\", projectName, dateId.Month())\r\n\t\t\t\t\tif val, cond := mapbudget[_id]; cond {\r\n\t\t\t\t\t\tbudgetCurrMonths := val * 1000.0\r\n\t\t\t\t\t\tnoOfDay := float64(daysIn(dateId.Month(), dateId.Year()))\r\n\t\t\t\t\t\tbudgetCurrMonthDaily = tk.Div(budgetCurrMonths, noOfDay)\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\tmdl := new(ScadaLastUpdate).New()\r\n\r\n\t\t\t\t\tif projectName != \"Fleet\" {\r\n\t\t\t\t\t\tmdl.ID = \"SCADALASTUPDATE_\" + strings.ToUpper(projectName)\r\n\t\t\t\t\t\tmdl.ProjectName = projectName\r\n\t\t\t\t\t\tmdl.NoOfProjects = 1\r\n\t\t\t\t\t} else {\r\n\t\t\t\t\t\tmdl.ID = \"SCADALASTUPDATE_FLEET\"\r\n\t\t\t\t\t\tmdl.ProjectName = \"Fleet\"\r\n\t\t\t\t\t\tmdl.NoOfProjects = len(d.BaseController.ProjectList) - 1\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\tfor _, t := range turbineList {\r\n\t\t\t\t\t\tmdl.TotalMaxCapacity += t.Capacity\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\tmdl.TotalMaxCapacity = tk.ToFloat64(mdl.TotalMaxCapacity*1000.0, 2, tk.RoundingAuto)\r\n\t\t\t\t\tmdl.LastUpdate = maxTimeStamp\r\n\t\t\t\t\tmdl.DateInfo = dtInfo\r\n\t\t\t\t\tmdl.NoOfTurbines = totalTurbine\r\n\r\n\t\t\t\t\titems := make([]LastData24Hours, 0)\r\n\t\t\t\t\tcdatehour := dateId.UTC()\r\n\t\t\t\t\tfor i := 0; i < 24; i++ {\r\n\t\t\t\t\t\tcdatehour = cdatehour.Add(time.Duration(i) * time.Hour)\r\n\r\n\t\t\t\t\t\t\/\/ year := strconv.Itoa(dateId.Year())\r\n\t\t\t\t\t\t\/\/ month := dateId.Month().String()\r\n\t\t\t\t\t\t\/\/ day := strconv.Itoa(dateId.Day())\r\n\t\t\t\t\t\t\/\/ strTime := year + \"-\" + month + \"-\" + day + \" \" + strconv.Itoa(i) + \":00:00\"\r\n\t\t\t\t\t\t\/\/ timeHr, _ := time.Parse(\"2006-January-2 15:04:05\", strTime)\r\n\r\n\t\t\t\t\t\t\/\/ timeHrStart := timeHr.Add(-1 * time.Hour)\r\n\r\n\t\t\t\t\t\tfilterSub := []*dbox.Filter{}\r\n\t\t\t\t\t\tfilterSub = append(filterSub, dbox.Gt(\"timestamp\", cdatehour.Add(time.Hour*-1)))\r\n\t\t\t\t\t\tfilterSub = append(filterSub, dbox.Lte(\"timestamp\", cdatehour))\r\n\t\t\t\t\t\tfilterSub = append(filterSub, dbox.Eq(\"available\", 1))\r\n\r\n\t\t\t\t\t\tif projectName != \"Fleet\" {\r\n\t\t\t\t\t\t\tfilterSub = append(filterSub, dbox.Eq(\"projectname\", projectName))\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\tcsr, e = ctx.NewQuery().From(new(ScadaData).TableName()).\r\n\t\t\t\t\t\t\tWhere(dbox.And(filterSub...)).\r\n\t\t\t\t\t\t\tAggr(dbox.AggrSum, \"$power\", \"totalpower\").\r\n\t\t\t\t\t\t\tAggr(dbox.AggrSum, \"$powerlost\", \"totalpowerlost\").\r\n\t\t\t\t\t\t\tAggr(dbox.AggrSum, \"$energylost\", \"energylost\").\r\n\t\t\t\t\t\t\tAggr(dbox.AggrSum, \"$denpower\", \"denpower\").\r\n\t\t\t\t\t\t\tAggr(dbox.AggrSum, \"$oktime\", \"totaloktime\").\r\n\t\t\t\t\t\t\tAggr(dbox.AggrSum, \"$griddowntime\", \"totalgriddowntime\").\r\n\t\t\t\t\t\t\tAggr(dbox.AggrAvr, \"$windspeed\", \"avgwindspeed\").\r\n\t\t\t\t\t\t\tGroup(\"projectname\").\r\n\t\t\t\t\t\t\tCursor(nil)\r\n\t\t\t\t\t\tdefer csr.Close()\r\n\r\n\t\t\t\t\t\tscadas := []tk.M{}\r\n\t\t\t\t\t\te = csr.Fetch(&scadas, 0, false)\r\n\r\n\t\t\t\t\t\tvar last LastData24Hours\r\n\t\t\t\t\t\tif len(scadas) > 0 {\r\n\t\t\t\t\t\t\tdata := scadas[0]\r\n\t\t\t\t\t\t\ttrueAvail := 0.0\r\n\t\t\t\t\t\t\tgridAvail := 0.0\r\n\r\n\t\t\t\t\t\t\tipower := data[\"totalpower\"]\r\n\t\t\t\t\t\t\tpower := 0.0\r\n\t\t\t\t\t\t\tif ipower != nil {\r\n\t\t\t\t\t\t\t\tpower = ipower.(float64)\r\n\t\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\t\tipotentialpower := data[\"denpower\"]\r\n\t\t\t\t\t\t\tpotentialpower := 0.0\r\n\t\t\t\t\t\t\tif ipotentialpower != nil {\r\n\t\t\t\t\t\t\t\tpotentialpower = ipotentialpower.(float64)\r\n\t\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\t\tiwindspeed := data[\"avgwindspeed\"]\r\n\t\t\t\t\t\t\twindspeed := 0.0\r\n\t\t\t\t\t\t\tif iwindspeed != nil {\r\n\t\t\t\t\t\t\t\twindspeed = iwindspeed.(float64)\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\tlast.Hour = i\r\n\t\t\t\t\t\t\tlast.TimeHour = cdatehour\r\n\t\t\t\t\t\t\tlast.AvgWindSpeed = windspeed\r\n\t\t\t\t\t\t\tlast.PowerKw = power\r\n\t\t\t\t\t\t\tlast.EnergyKwh = power \/ 6\r\n\t\t\t\t\t\t\tlast.Potential = potentialpower\r\n\t\t\t\t\t\t\tlast.PotentialKwh = potentialpower \/ 6\r\n\t\t\t\t\t\t\tlast.TrueAvail = trueAvail\r\n\t\t\t\t\t\t\tlast.GridAvail = gridAvail\r\n\t\t\t\t\t\t} else {\r\n\t\t\t\t\t\t\tlast.Hour = i\r\n\t\t\t\t\t\t\tlast.TimeHour = cdatehour\r\n\t\t\t\t\t\t\tlast.AvgWindSpeed = 0.0\r\n\t\t\t\t\t\t\tlast.PowerKw = 0.0\r\n\t\t\t\t\t\t\tlast.EnergyKwh = 0.0\r\n\t\t\t\t\t\t\tlast.Potential = 0.0\r\n\t\t\t\t\t\t\tlast.PotentialKwh = 0.0\r\n\t\t\t\t\t\t\tlast.TrueAvail = 0.0\r\n\t\t\t\t\t\t\tlast.GridAvail = 0.0\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\titems = append(items, last)\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\tmatch := tk.M{}\r\n\r\n\t\t\t\t\tmatch.Set(\"dateinfo.monthid\", tk.M{}.Set(\"$eq\", dtInfo.MonthId)).Set(\"power\", tk.M{}.Set(\"$gte\", -200))\r\n\r\n\t\t\t\t\tif projectName != \"Fleet\" {\r\n\t\t\t\t\t\tmatch.Set(\"projectname\", projectName)\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\tpipe := []tk.M{tk.M{}.Set(\"$match\", match), tk.M{}.Set(\"$group\", tk.M{}.Set(\"_id\", \"$dateinfo.dateid\").Set(\"totalpower\", tk.M{}.Set(\"$sum\", \"$power\"))), tk.M{}.Set(\"$sort\", tk.M{}.Set(\"_id\", 1))}\r\n\r\n\t\t\t\t\tcsr, _ := ctx.NewQuery().\r\n\t\t\t\t\t\tCommand(\"pipe\", pipe).\r\n\t\t\t\t\t\tFrom(new(ScadaData).TableName()).\r\n\t\t\t\t\t\tCursor(nil)\r\n\t\t\t\t\tdefer csr.Close()\r\n\r\n\t\t\t\t\tscadas := []tk.M{}\r\n\t\t\t\t\te = csr.Fetch(&scadas, 0, false)\r\n\r\n\t\t\t\t\titem30s := make([]Last30Days, 0)\r\n\t\t\t\t\tdateData := dateId\r\n\t\t\t\t\tcummProd := 0.0\r\n\t\t\t\t\tcummBudget := 0.0\r\n\t\t\t\t\tfor _, data := range scadas {\r\n\t\t\t\t\t\tdateData = data[\"_id\"].(time.Time)\r\n\t\t\t\t\t\tvar last30 Last30Days\r\n\t\t\t\t\t\tlast30.DateId = dateData\r\n\t\t\t\t\t\tlast30.DayNo = dateData.Day()\r\n\r\n\t\t\t\t\t\tcurrProd := 0.0\r\n\t\t\t\t\t\tcurrBudget := budgetCurrMonthDaily \/\/ 565160.32\r\n\t\t\t\t\t\tif data != nil {\r\n\t\t\t\t\t\t\tipower := data[\"totalpower\"]\r\n\t\t\t\t\t\t\tpower := 0.0\r\n\t\t\t\t\t\t\tif ipower != nil {\r\n\t\t\t\t\t\t\t\tpower = ipower.(float64)\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\tcurrProd = power \/ 6\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tcummProd = cummProd + currProd\r\n\t\t\t\t\t\tcummBudget = cummBudget + currBudget\r\n\r\n\t\t\t\t\t\tlast30.CurrBudget = currBudget\r\n\t\t\t\t\t\tlast30.CurrProduction = currProd\r\n\t\t\t\t\t\tlast30.CumBudget = cummBudget \/ 1000000\r\n\t\t\t\t\t\tlast30.CumProduction = cummProd \/ 1000000\r\n\r\n\t\t\t\t\t\titem30s = append(item30s, last30)\r\n\r\n\t\t\t\t\t\tdateData = dateId.Add(-1)\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\tmdl.Productions = items\r\n\t\t\t\t\tmdl.CummulativeProductions = item30s\r\n\r\n\t\t\t\t\td.BaseController.Ctx.Insert(mdl)\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n}\r\n<commit_msg>filter available only<commit_after>package generatorControllers\r\n\r\nimport (\r\n\t. \"eaciit\/wfdemo-git\/library\/helper\"\r\n\t. \"eaciit\/wfdemo-git\/library\/models\"\r\n\t. \"eaciit\/wfdemo-git\/processapp\/summaryGenerator\/controllers\"\r\n\t\"eaciit\/wfdemo-git\/web\/helper\"\r\n\t_ \"fmt\"\r\n\t\"log\"\r\n\t\"os\"\r\n\t_ \"strings\"\r\n\t\"time\"\r\n\r\n\t\"strings\"\r\n\r\n\t\"github.com\/eaciit\/dbox\"\r\n\t_ \"github.com\/eaciit\/dbox\/dbc\/mongo\"\r\n\ttk \"github.com\/eaciit\/toolkit\"\r\n)\r\n\r\ntype GenScadaLast24 struct {\r\n\t*BaseController\r\n}\r\n\r\nfunc (d *GenScadaLast24) Generate(base *BaseController) {\r\n\tif base != nil {\r\n\t\td.BaseController = base\r\n\t\tctx, e := PrepareConnection()\r\n\t\tif e != nil {\r\n\t\t\tErrorHandler(e, \"Scada Summary\")\r\n\t\t\tos.Exit(0)\r\n\t\t}\r\n\r\n\t\td.BaseController.Ctx.DeleteMany(new(ScadaLastUpdate), dbox.And(dbox.Ne(\"_id\", \"\")))\r\n\r\n\t\tprojectList, _ := helper.GetProjectList()\r\n\r\n\t\tinprojectactive := func(str string) bool {\r\n\t\t\tfor _, v := range projectList {\r\n\t\t\t\tif v.Value == str {\r\n\t\t\t\t\treturn true\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t\treturn false\r\n\t\t}\r\n\r\n\t\tmapbudget := map[string]float64{}\r\n\t\tcsrBudget, _ := ctx.NewQuery().From(new(ExpPValueModel).TableName()).\r\n\t\t\tCursor(nil)\r\n\r\n\t\tbudgets := make([]ExpPValueModel, 0)\r\n\t\t_ = csrBudget.Fetch(&budgets, 0, false)\r\n\t\tcsrBudget.Close()\r\n\r\n\t\tfor _, budget := range budgets {\r\n\t\t\tmapbudget[tk.Sprintf(\"%s_%d\", budget.ProjectName, budget.MonthNo)] = budget.P75NetGenMWH\r\n\t\t\tif inprojectactive(budget.ProjectName) {\r\n\t\t\t\tmapbudget[tk.Sprintf(\"fleet_%d\", budget.MonthNo)] = budget.P75NetGenMWH\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\tfor _, proj := range d.BaseController.ProjectList {\r\n\t\t\tprojectName := proj.Value\r\n\t\t\tturbineList := []TurbineOut{}\r\n\r\n\t\t\tif projectName != \"Fleet\" {\r\n\t\t\t\tturbineList, _ = helper.GetTurbineList([]interface{}{projectName})\r\n\t\t\t} else {\r\n\t\t\t\tturbineList, _ = helper.GetTurbineList(nil)\r\n\t\t\t}\r\n\r\n\t\t\ttotalTurbine := len(turbineList)\r\n\r\n\t\t\tfilter := dbox.Eq(\"available\", 1)\r\n\t\t\tif projectName != \"Fleet\" {\r\n\t\t\t\tfilter = dbox.And(dbox.Eq(\"projectname\", projectName), filter)\r\n\t\t\t}\r\n\r\n\t\t\t\/*for _, v := range filter {\r\n\t\t\t\tlog.Printf(\">> %#v \\n\", v)\r\n\t\t\t}*\/\r\n\r\n\t\t\tcsr, e := ctx.NewQuery().\r\n\t\t\t\tFrom(new(ScadaData).TableName()).\r\n\t\t\t\tWhere(filter).\r\n\t\t\t\tAggr(dbox.AggrMax, \"$timestamp\", \"timestamp\").\r\n\t\t\t\tAggr(dbox.AggrMax, \"$dateinfo.dateid\", \"dateid\").\r\n\t\t\t\tGroup(\"\").\r\n\t\t\t\tCursor(nil)\r\n\r\n\t\t\tif e != nil {\r\n\t\t\t\tlog.Printf(\"Error: %v \\n\", e.Error())\r\n\t\t\t} else {\r\n\t\t\t\tdatas := []tk.M{}\r\n\t\t\t\te = csr.Fetch(&datas, 0, false)\r\n\t\t\t\tcsr.Close()\r\n\r\n\t\t\t\ttk.Printf(\">> %#v \\n\", datas)\r\n\r\n\t\t\t\tif len(datas) > 0 {\r\n\t\t\t\t\tdateId := datas[0].Get(\"dateid\", time.Time{}).(time.Time).UTC()\r\n\t\t\t\t\tdtInfo := GetDateInfo(dateId)\r\n\t\t\t\t\tmaxTimeStamp := datas[0].Get(\"timestamp\", time.Time{}).(time.Time).UTC()\r\n\r\n\t\t\t\t\tvar budgetCurrMonthDaily float64\r\n\r\n\t\t\t\t\t_id := tk.Sprintf(\"%s_%d\", projectName, dateId.Month())\r\n\t\t\t\t\tif val, cond := mapbudget[_id]; cond {\r\n\t\t\t\t\t\tbudgetCurrMonths := val * 1000.0\r\n\t\t\t\t\t\tnoOfDay := float64(daysIn(dateId.Month(), dateId.Year()))\r\n\t\t\t\t\t\tbudgetCurrMonthDaily = tk.Div(budgetCurrMonths, noOfDay)\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\tmdl := new(ScadaLastUpdate).New()\r\n\r\n\t\t\t\t\tif projectName != \"Fleet\" {\r\n\t\t\t\t\t\tmdl.ID = \"SCADALASTUPDATE_\" + strings.ToUpper(projectName)\r\n\t\t\t\t\t\tmdl.ProjectName = projectName\r\n\t\t\t\t\t\tmdl.NoOfProjects = 1\r\n\t\t\t\t\t} else {\r\n\t\t\t\t\t\tmdl.ID = \"SCADALASTUPDATE_FLEET\"\r\n\t\t\t\t\t\tmdl.ProjectName = \"Fleet\"\r\n\t\t\t\t\t\tmdl.NoOfProjects = len(d.BaseController.ProjectList) - 1\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\tfor _, t := range turbineList {\r\n\t\t\t\t\t\tmdl.TotalMaxCapacity += t.Capacity\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\tmdl.TotalMaxCapacity = tk.ToFloat64(mdl.TotalMaxCapacity*1000.0, 2, tk.RoundingAuto)\r\n\t\t\t\t\tmdl.LastUpdate = maxTimeStamp\r\n\t\t\t\t\tmdl.DateInfo = dtInfo\r\n\t\t\t\t\tmdl.NoOfTurbines = totalTurbine\r\n\r\n\t\t\t\t\titems := make([]LastData24Hours, 0)\r\n\t\t\t\t\tcdatehour := dateId.UTC()\r\n\t\t\t\t\tfor i := 0; i < 24; i++ {\r\n\t\t\t\t\t\tcdatehour = cdatehour.Add(time.Duration(i) * time.Hour)\r\n\r\n\t\t\t\t\t\t\/\/ year := strconv.Itoa(dateId.Year())\r\n\t\t\t\t\t\t\/\/ month := dateId.Month().String()\r\n\t\t\t\t\t\t\/\/ day := strconv.Itoa(dateId.Day())\r\n\t\t\t\t\t\t\/\/ strTime := year + \"-\" + month + \"-\" + day + \" \" + strconv.Itoa(i) + \":00:00\"\r\n\t\t\t\t\t\t\/\/ timeHr, _ := time.Parse(\"2006-January-2 15:04:05\", strTime)\r\n\r\n\t\t\t\t\t\t\/\/ timeHrStart := timeHr.Add(-1 * time.Hour)\r\n\r\n\t\t\t\t\t\tfilterSub := []*dbox.Filter{}\r\n\t\t\t\t\t\tfilterSub = append(filterSub, dbox.Gt(\"timestamp\", cdatehour.Add(time.Hour*-1)))\r\n\t\t\t\t\t\tfilterSub = append(filterSub, dbox.Lte(\"timestamp\", cdatehour))\r\n\t\t\t\t\t\tfilterSub = append(filterSub, dbox.Eq(\"available\", 1))\r\n\r\n\t\t\t\t\t\tif projectName != \"Fleet\" {\r\n\t\t\t\t\t\t\tfilterSub = append(filterSub, dbox.Eq(\"projectname\", projectName))\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\tcsr, e = ctx.NewQuery().From(new(ScadaData).TableName()).\r\n\t\t\t\t\t\t\tWhere(dbox.And(filterSub...)).\r\n\t\t\t\t\t\t\tAggr(dbox.AggrSum, \"$power\", \"totalpower\").\r\n\t\t\t\t\t\t\tAggr(dbox.AggrSum, \"$powerlost\", \"totalpowerlost\").\r\n\t\t\t\t\t\t\tAggr(dbox.AggrSum, \"$energylost\", \"energylost\").\r\n\t\t\t\t\t\t\tAggr(dbox.AggrSum, \"$denpower\", \"denpower\").\r\n\t\t\t\t\t\t\tAggr(dbox.AggrSum, \"$oktime\", \"totaloktime\").\r\n\t\t\t\t\t\t\tAggr(dbox.AggrSum, \"$griddowntime\", \"totalgriddowntime\").\r\n\t\t\t\t\t\t\tAggr(dbox.AggrAvr, \"$windspeed\", \"avgwindspeed\").\r\n\t\t\t\t\t\t\tGroup(\"projectname\").\r\n\t\t\t\t\t\t\tCursor(nil)\r\n\t\t\t\t\t\tdefer csr.Close()\r\n\r\n\t\t\t\t\t\tscadas := []tk.M{}\r\n\t\t\t\t\t\te = csr.Fetch(&scadas, 0, false)\r\n\r\n\t\t\t\t\t\tvar last LastData24Hours\r\n\t\t\t\t\t\tif len(scadas) > 0 {\r\n\t\t\t\t\t\t\tdata := scadas[0]\r\n\t\t\t\t\t\t\ttrueAvail := 0.0\r\n\t\t\t\t\t\t\tgridAvail := 0.0\r\n\r\n\t\t\t\t\t\t\tipower := data[\"totalpower\"]\r\n\t\t\t\t\t\t\tpower := 0.0\r\n\t\t\t\t\t\t\tif ipower != nil {\r\n\t\t\t\t\t\t\t\tpower = ipower.(float64)\r\n\t\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\t\tipotentialpower := data[\"denpower\"]\r\n\t\t\t\t\t\t\tpotentialpower := 0.0\r\n\t\t\t\t\t\t\tif ipotentialpower != nil {\r\n\t\t\t\t\t\t\t\tpotentialpower = ipotentialpower.(float64)\r\n\t\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\t\tiwindspeed := data[\"avgwindspeed\"]\r\n\t\t\t\t\t\t\twindspeed := 0.0\r\n\t\t\t\t\t\t\tif iwindspeed != nil {\r\n\t\t\t\t\t\t\t\twindspeed = iwindspeed.(float64)\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\tlast.Hour = i\r\n\t\t\t\t\t\t\tlast.TimeHour = cdatehour\r\n\t\t\t\t\t\t\tlast.AvgWindSpeed = windspeed\r\n\t\t\t\t\t\t\tlast.PowerKw = power\r\n\t\t\t\t\t\t\tlast.EnergyKwh = power \/ 6\r\n\t\t\t\t\t\t\tlast.Potential = potentialpower\r\n\t\t\t\t\t\t\tlast.PotentialKwh = potentialpower \/ 6\r\n\t\t\t\t\t\t\tlast.TrueAvail = trueAvail\r\n\t\t\t\t\t\t\tlast.GridAvail = gridAvail\r\n\t\t\t\t\t\t} else {\r\n\t\t\t\t\t\t\tlast.Hour = i\r\n\t\t\t\t\t\t\tlast.TimeHour = cdatehour\r\n\t\t\t\t\t\t\tlast.AvgWindSpeed = 0.0\r\n\t\t\t\t\t\t\tlast.PowerKw = 0.0\r\n\t\t\t\t\t\t\tlast.EnergyKwh = 0.0\r\n\t\t\t\t\t\t\tlast.Potential = 0.0\r\n\t\t\t\t\t\t\tlast.PotentialKwh = 0.0\r\n\t\t\t\t\t\t\tlast.TrueAvail = 0.0\r\n\t\t\t\t\t\t\tlast.GridAvail = 0.0\r\n\t\t\t\t\t\t}\r\n\r\n\t\t\t\t\t\titems = append(items, last)\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\tmatch := tk.M{}\r\n\r\n\t\t\t\t\tmatch.Set(\"dateinfo.monthid\", tk.M{}.Set(\"$eq\", dtInfo.MonthId)).Set(\"available\", tk.M{}.Set(\"$eq\", 1))\r\n\r\n\t\t\t\t\tif projectName != \"Fleet\" {\r\n\t\t\t\t\t\tmatch.Set(\"projectname\", projectName)\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\tpipe := []tk.M{tk.M{}.Set(\"$match\", match), tk.M{}.Set(\"$group\", tk.M{}.Set(\"_id\", \"$dateinfo.dateid\").Set(\"totalpower\", tk.M{}.Set(\"$sum\", \"$power\"))), tk.M{}.Set(\"$sort\", tk.M{}.Set(\"_id\", 1))}\r\n\r\n\t\t\t\t\tcsr, _ := ctx.NewQuery().\r\n\t\t\t\t\t\tCommand(\"pipe\", pipe).\r\n\t\t\t\t\t\tFrom(new(ScadaData).TableName()).\r\n\t\t\t\t\t\tCursor(nil)\r\n\t\t\t\t\tdefer csr.Close()\r\n\r\n\t\t\t\t\tscadas := []tk.M{}\r\n\t\t\t\t\te = csr.Fetch(&scadas, 0, false)\r\n\r\n\t\t\t\t\titem30s := make([]Last30Days, 0)\r\n\t\t\t\t\tdateData := dateId\r\n\t\t\t\t\tcummProd := 0.0\r\n\t\t\t\t\tcummBudget := 0.0\r\n\t\t\t\t\tfor _, data := range scadas {\r\n\t\t\t\t\t\tdateData = data[\"_id\"].(time.Time)\r\n\t\t\t\t\t\tvar last30 Last30Days\r\n\t\t\t\t\t\tlast30.DateId = dateData\r\n\t\t\t\t\t\tlast30.DayNo = dateData.Day()\r\n\r\n\t\t\t\t\t\tcurrProd := 0.0\r\n\t\t\t\t\t\tcurrBudget := budgetCurrMonthDaily \/\/ 565160.32\r\n\t\t\t\t\t\tif data != nil {\r\n\t\t\t\t\t\t\tipower := data[\"totalpower\"]\r\n\t\t\t\t\t\t\tpower := 0.0\r\n\t\t\t\t\t\t\tif ipower != nil {\r\n\t\t\t\t\t\t\t\tpower = ipower.(float64)\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\tcurrProd = power \/ 6\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tcummProd = cummProd + currProd\r\n\t\t\t\t\t\tcummBudget = cummBudget + currBudget\r\n\r\n\t\t\t\t\t\tlast30.CurrBudget = currBudget\r\n\t\t\t\t\t\tlast30.CurrProduction = currProd\r\n\t\t\t\t\t\tlast30.CumBudget = cummBudget \/ 1000000\r\n\t\t\t\t\t\tlast30.CumProduction = cummProd \/ 1000000\r\n\r\n\t\t\t\t\t\titem30s = append(item30s, last30)\r\n\r\n\t\t\t\t\t\tdateData = dateId.Add(-1)\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t\tmdl.Productions = items\r\n\t\t\t\t\tmdl.CummulativeProductions = item30s\r\n\r\n\t\t\t\t\td.BaseController.Ctx.Insert(mdl)\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n}\r\n<|endoftext|>"} {"text":"<commit_before>package gateway\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"encoding\/json\"\n\n\t\"github.com\/ReneGa\/tweetcount-microservices\/recorder\/domain\"\n)\n\n\/\/ Tweets is a gateway to a tweet producing service\ntype Tweets interface {\n\tTweets(query string) domain.Tweets\n}\n\n\/\/ HTTPTweets is the gateway to get tweets over http\ntype HTTPTweets struct {\n\tClient *http.Client\n\tURL string\n}\n\ntype decodeResult int\n\nconst (\n\tdecodeError decodeResult = iota\n\tdecodeStopped\n)\n\nfunc decodeResponse(res *http.Response, data chan domain.Tweet, stop chan bool) decodeResult {\n\tdefer res.Body.Close()\n\tvar tweet domain.Tweet\n\tjd := json.NewDecoder(res.Body)\n\tfor {\n\t\tselect {\n\t\tcase <-stop:\n\t\t\treturn decodeStopped\n\t\tdefault:\n\t\t\terr := jd.Decode(&tweet)\n\t\t\tif err != nil {\n\t\t\t\treturn decodeError\n\t\t\t}\n\t\t\tdata <- tweet\n\t\t}\n\t}\n}\n\n\/\/ Tweets return a stream of tweets for a given search query\nfunc (t *HTTPTweets) Tweets(query string) domain.Tweets {\n\turl := fmt.Sprintf(\"%s?q=%s\", t.URL, query)\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdata := make(chan domain.Tweet)\n\tstop := make(chan bool)\n\n\ttweets := domain.Tweets{\n\t\tData: data,\n\t\tStop: stop,\n\t}\n\n\tgo func() {\n\t\tdefer close(data)\n\t\treconnect := true\n\t\tfor reconnect {\n\t\t\tselect {\n\t\t\tcase <-stop:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\t\t\tres, err := t.Client.Do(req)\n\t\t\tif err == nil {\n\t\t\t\tdecodeResult := decodeResponse(res, data, stop)\n\t\t\t\treconnect = decodeResult == decodeError\n\t\t\t} else {\n\t\t\t\tlog.Println(\"Error: \", err)\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn tweets\n}\n<commit_msg>Handle stop during tweet sending<commit_after>package gateway\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"encoding\/json\"\n\n\t\"github.com\/ReneGa\/tweetcount-microservices\/recorder\/domain\"\n)\n\n\/\/ Tweets is a gateway to a tweet producing service\ntype Tweets interface {\n\tTweets(query string) domain.Tweets\n}\n\n\/\/ HTTPTweets is the gateway to get tweets over http\ntype HTTPTweets struct {\n\tClient *http.Client\n\tURL string\n}\n\ntype decodeResult int\n\nconst (\n\tdecodeError decodeResult = iota\n\tdecodeStopped\n)\n\nfunc decodeResponse(res *http.Response, data chan domain.Tweet, stop chan bool) decodeResult {\n\tdefer res.Body.Close()\n\tjd := json.NewDecoder(res.Body)\n\tfor {\n\t\tselect {\n\t\tcase <-stop:\n\t\t\treturn decodeStopped\n\t\tdefault:\n\t\t\tvar tweet domain.Tweet\n\t\t\terr := jd.Decode(&tweet)\n\t\t\tif err != nil {\n\t\t\t\treturn decodeError\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase data <- tweet:\n\t\t\tcase <-stop:\n\t\t\t\treturn decodeStopped\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Tweets return a stream of tweets for a given search query\nfunc (t *HTTPTweets) Tweets(query string) domain.Tweets {\n\turl := fmt.Sprintf(\"%s?q=%s\", t.URL, query)\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdata := make(chan domain.Tweet)\n\tstop := make(chan bool)\n\n\ttweets := domain.Tweets{\n\t\tData: data,\n\t\tStop: stop,\n\t}\n\n\tgo func() {\n\t\tdefer close(data)\n\t\treconnect := true\n\t\tfor reconnect {\n\t\t\tselect {\n\t\t\tcase <-stop:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\t\t\tres, err := t.Client.Do(req)\n\t\t\tif err == nil {\n\t\t\t\tdecodeResult := decodeResponse(res, data, stop)\n\t\t\t\treconnect = decodeResult == decodeError\n\t\t\t} else {\n\t\t\t\tlog.Println(\"Error: \", err)\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn tweets\n}\n<|endoftext|>"} {"text":"<commit_before>package bench\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"time\"\n\n\t\"sort\"\n\n\t\"github.com\/pilosa\/pilosa\/pilosactl\"\n)\n\nfunc NewImport(stdin io.Reader, stdout, stderr io.Writer) *Import {\n\treturn &Import{\n\t\tImportCommand: pilosactl.NewImportCommand(stdin, stdout, stderr),\n\t}\n}\n\n\/\/ Import sets bits with increasing profile id and bitmap id.\ntype Import struct {\n\tName string `json:\"name\"`\n\tBaseBitmapID int64 `json:\"base-bitmap-id\"`\n\tMaxBitmapID int64 `json:\"max-bitmap-id\"`\n\tBaseProfileID int64 `json:\"base-profile-id\"`\n\tMaxProfileID int64 `json:\"max-profile-id\"`\n\tRandomBitmapOrder bool `json:\"random-bitmap-order\"`\n\tMinBitsPerMap int64 `json:\"min-bits-per-map\"`\n\tMaxBitsPerMap int64 `json:\"max-bits-per-map\"`\n\tAgentControls string `json:\"agent-controls\"`\n\tSeed int64 `json:\"seed\"`\n\tnumbits int\n\n\t*pilosactl.ImportCommand\n}\n\nfunc (b *Import) Usage() string {\n\treturn `\nimport generates an import file and imports using pilosa's bulk import interface\n\nUsage: import [arguments]\n\nThe following arguments are available:\n\n\t-base-bitmap-id int\n\t\tbits being set will all be greater than this\n\n\t-maximum-bitmap-id int\n\t\tbits being set will all be less than this\n\n\t-base-profile-id int\n\t\tprofile id num to start from\n\n\t-max-profile-id int\n\t\tmaximum profile id to generate\n\n\t-random-bitmap-order\n\t\tif this option is set, the import file will not be sorted by bitmap id\n\n\t-min-bits-per-map int\n\t\tminimum number of bits set per bitmap\n\n\t-max-bits-per-map int\n\t\tmaximum number of bits set per bitmap\n\n\t-agent-controls string\n\t\tcan be 'height', 'width', or empty (TODO or square?)- increasing\n\t\tnumber of agents modulates bitmap id range, profile id range,\n\t\tor just sets more bits in the same range.\n\n\t-seed int\n\t\tseed for RNG\n\n\t-db string\n\t\tpilosa db to use\n\n\t-frame string\n\t\tframe to import into\n`[1:]\n}\n\nfunc (b *Import) ConsumeFlags(args []string) ([]string, error) {\n\tfs := flag.NewFlagSet(\"Import\", flag.ContinueOnError)\n\tfs.SetOutput(ioutil.Discard)\n\tfs.Int64Var(&b.BaseBitmapID, \"base-bitmap-id\", 0, \"\")\n\tfs.Int64Var(&b.MaxBitmapID, \"max-bitmap-id\", 1000, \"\")\n\tfs.Int64Var(&b.BaseProfileID, \"base-profile-id\", 0, \"\")\n\tfs.Int64Var(&b.MaxProfileID, \"max-profile-id\", 1000, \"\")\n\tfs.BoolVar(&b.RandomBitmapOrder, \"random-bitmap-order\", false, \"\")\n\tfs.Int64Var(&b.MinBitsPerMap, \"min-bits-per-map\", 0, \"\")\n\tfs.Int64Var(&b.MaxBitsPerMap, \"max-bits-per-map\", 10, \"\")\n\tfs.StringVar(&b.AgentControls, \"agent-controls\", \"\", \"\")\n\tfs.Int64Var(&b.Seed, \"seed\", 0, \"\")\n\tfs.StringVar(&b.Database, \"db\", \"benchdb\", \"\")\n\tfs.StringVar(&b.Frame, \"frame\", \"testframe\", \"\")\n\tfs.IntVar(&b.BufferSize, \"buffer-size\", 10000000, \"\")\n\n\tif err := fs.Parse(args); err != nil {\n\t\treturn nil, err\n\t}\n\treturn fs.Args(), nil\n}\n\nfunc (b *Import) Init(hosts []string, agentNum int) error {\n\tif len(hosts) == 0 {\n\t\treturn fmt.Errorf(\"Need at least one host\")\n\t}\n\tb.Name = \"import\"\n\tb.Host = hosts[0]\n\t\/\/ generate csv data\n\tbaseBitmapID, maxBitmapID, baseProfileID, maxProfileID := b.BaseBitmapID, b.MaxBitmapID, b.BaseProfileID, b.MaxProfileID\n\tswitch b.AgentControls {\n\tcase \"height\":\n\t\tnumBitmapIDs := (b.MaxBitmapID - b.BaseBitmapID)\n\t\tbaseBitmapID = b.BaseBitmapID + (numBitmapIDs * int64(agentNum))\n\t\tmaxBitmapID = baseBitmapID + numBitmapIDs\n\tcase \"width\":\n\t\tnumProfileIDs := (b.MaxProfileID - b.BaseProfileID)\n\t\tbaseProfileID = b.BaseProfileID + (numProfileIDs * int64(agentNum))\n\t\tmaxProfileID = baseProfileID + numProfileIDs\n\tcase \"\":\n\t\tbreak\n\tdefault:\n\t\treturn fmt.Errorf(\"agent-controls: '%v' is not supported\", b.AgentControls)\n\t}\n\tf, err := ioutil.TempFile(\"\", \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ set b.Paths)\n\tnum := GenerateImportCSV(f, baseBitmapID, maxBitmapID, baseProfileID, maxProfileID,\n\t\tb.MinBitsPerMap, b.MaxBitsPerMap, b.Seed+int64(agentNum), b.RandomBitmapOrder)\n\tb.numbits = num\n\t\/\/ set b.Paths\n\tf.Close()\n\tb.Paths = []string{f.Name()}\n\treturn nil\n}\n\n\/\/ Run runs the Import benchmark\nfunc (b *Import) Run(ctx context.Context, agentNum int) map[string]interface{} {\n\tresults := make(map[string]interface{})\n\tresults[\"numbits\"] = b.numbits\n\tresults[\"db\"] = b.Database\n\tstart := time.Now()\n\terr := b.ImportCommand.Run(ctx)\n\n\tif err != nil {\n\t\tresults[\"error\"] = err.Error()\n\t}\n\tresults[\"time\"] = time.Now().Sub(start)\n\treturn results\n}\n\ntype Int64Slice []int64\n\nfunc (s Int64Slice) Len() int { return len(s) }\nfunc (s Int64Slice) Less(i, j int) bool { return s[i] < s[j] }\nfunc (s Int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\n\nfunc GenerateImportCSV(w io.Writer, baseBitmapID, maxBitmapID, baseProfileID, maxProfileID, minBitsPerMap, maxBitsPerMap, seed int64, randomOrder bool) int {\n\tsrc := rand.NewSource(seed)\n\trng := rand.New(src)\n\n\tvar bitmapIDs []int\n\tif randomOrder {\n\t\tbitmapIDs = rng.Perm(int(maxBitmapID - baseBitmapID))\n\t}\n\tnumrows := 0\n\tprofileIDs := make(Int64Slice, maxBitsPerMap)\n\tfor i := baseBitmapID; i < maxBitmapID; i++ {\n\t\tvar bitmapID int64\n\t\tif randomOrder {\n\t\t\tbitmapID = int64(bitmapIDs[i-baseBitmapID])\n\t\t} else {\n\t\t\tbitmapID = int64(i)\n\t\t}\n\n\t\tnumBitsToSet := rng.Int63n(maxBitsPerMap-minBitsPerMap) + minBitsPerMap\n\t\tnumrows += int(numBitsToSet)\n\t\tfor j := int64(0); j < numBitsToSet; j++ {\n\t\t\tprofileIDs[j] = rng.Int63n(maxProfileID-baseProfileID) + baseProfileID\n\t\t}\n\t\tprofIDs := profileIDs[:numBitsToSet]\n\t\tif !randomOrder {\n\t\t\tsort.Sort(profIDs)\n\t\t}\n\t\tfor j := int64(0); j < numBitsToSet; j++ {\n\t\t\tfmt.Fprintf(w, \"%d,%d\\n\", bitmapID, profIDs[j])\n\t\t}\n\n\t}\n\treturn numrows\n}\n<commit_msg>fix usage typo maximum->max<commit_after>package bench\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"time\"\n\n\t\"sort\"\n\n\t\"github.com\/pilosa\/pilosa\/pilosactl\"\n)\n\nfunc NewImport(stdin io.Reader, stdout, stderr io.Writer) *Import {\n\treturn &Import{\n\t\tImportCommand: pilosactl.NewImportCommand(stdin, stdout, stderr),\n\t}\n}\n\n\/\/ Import sets bits with increasing profile id and bitmap id.\ntype Import struct {\n\tName string `json:\"name\"`\n\tBaseBitmapID int64 `json:\"base-bitmap-id\"`\n\tMaxBitmapID int64 `json:\"max-bitmap-id\"`\n\tBaseProfileID int64 `json:\"base-profile-id\"`\n\tMaxProfileID int64 `json:\"max-profile-id\"`\n\tRandomBitmapOrder bool `json:\"random-bitmap-order\"`\n\tMinBitsPerMap int64 `json:\"min-bits-per-map\"`\n\tMaxBitsPerMap int64 `json:\"max-bits-per-map\"`\n\tAgentControls string `json:\"agent-controls\"`\n\tSeed int64 `json:\"seed\"`\n\tnumbits int\n\n\t*pilosactl.ImportCommand\n}\n\nfunc (b *Import) Usage() string {\n\treturn `\nimport generates an import file and imports using pilosa's bulk import interface\n\nUsage: import [arguments]\n\nThe following arguments are available:\n\n\t-base-bitmap-id int\n\t\tbits being set will all be greater than this\n\n\t-max-bitmap-id int\n\t\tbits being set will all be less than this\n\n\t-base-profile-id int\n\t\tprofile id num to start from\n\n\t-max-profile-id int\n\t\tmaximum profile id to generate\n\n\t-random-bitmap-order\n\t\tif this option is set, the import file will not be sorted by bitmap id\n\n\t-min-bits-per-map int\n\t\tminimum number of bits set per bitmap\n\n\t-max-bits-per-map int\n\t\tmaximum number of bits set per bitmap\n\n\t-agent-controls string\n\t\tcan be 'height', 'width', or empty (TODO or square?)- increasing\n\t\tnumber of agents modulates bitmap id range, profile id range,\n\t\tor just sets more bits in the same range.\n\n\t-seed int\n\t\tseed for RNG\n\n\t-db string\n\t\tpilosa db to use\n\n\t-frame string\n\t\tframe to import into\n`[1:]\n}\n\nfunc (b *Import) ConsumeFlags(args []string) ([]string, error) {\n\tfs := flag.NewFlagSet(\"Import\", flag.ContinueOnError)\n\tfs.SetOutput(ioutil.Discard)\n\tfs.Int64Var(&b.BaseBitmapID, \"base-bitmap-id\", 0, \"\")\n\tfs.Int64Var(&b.MaxBitmapID, \"max-bitmap-id\", 1000, \"\")\n\tfs.Int64Var(&b.BaseProfileID, \"base-profile-id\", 0, \"\")\n\tfs.Int64Var(&b.MaxProfileID, \"max-profile-id\", 1000, \"\")\n\tfs.BoolVar(&b.RandomBitmapOrder, \"random-bitmap-order\", false, \"\")\n\tfs.Int64Var(&b.MinBitsPerMap, \"min-bits-per-map\", 0, \"\")\n\tfs.Int64Var(&b.MaxBitsPerMap, \"max-bits-per-map\", 10, \"\")\n\tfs.StringVar(&b.AgentControls, \"agent-controls\", \"\", \"\")\n\tfs.Int64Var(&b.Seed, \"seed\", 0, \"\")\n\tfs.StringVar(&b.Database, \"db\", \"benchdb\", \"\")\n\tfs.StringVar(&b.Frame, \"frame\", \"testframe\", \"\")\n\tfs.IntVar(&b.BufferSize, \"buffer-size\", 10000000, \"\")\n\n\tif err := fs.Parse(args); err != nil {\n\t\treturn nil, err\n\t}\n\treturn fs.Args(), nil\n}\n\nfunc (b *Import) Init(hosts []string, agentNum int) error {\n\tif len(hosts) == 0 {\n\t\treturn fmt.Errorf(\"Need at least one host\")\n\t}\n\tb.Name = \"import\"\n\tb.Host = hosts[0]\n\t\/\/ generate csv data\n\tbaseBitmapID, maxBitmapID, baseProfileID, maxProfileID := b.BaseBitmapID, b.MaxBitmapID, b.BaseProfileID, b.MaxProfileID\n\tswitch b.AgentControls {\n\tcase \"height\":\n\t\tnumBitmapIDs := (b.MaxBitmapID - b.BaseBitmapID)\n\t\tbaseBitmapID = b.BaseBitmapID + (numBitmapIDs * int64(agentNum))\n\t\tmaxBitmapID = baseBitmapID + numBitmapIDs\n\tcase \"width\":\n\t\tnumProfileIDs := (b.MaxProfileID - b.BaseProfileID)\n\t\tbaseProfileID = b.BaseProfileID + (numProfileIDs * int64(agentNum))\n\t\tmaxProfileID = baseProfileID + numProfileIDs\n\tcase \"\":\n\t\tbreak\n\tdefault:\n\t\treturn fmt.Errorf(\"agent-controls: '%v' is not supported\", b.AgentControls)\n\t}\n\tf, err := ioutil.TempFile(\"\", \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ set b.Paths)\n\tnum := GenerateImportCSV(f, baseBitmapID, maxBitmapID, baseProfileID, maxProfileID,\n\t\tb.MinBitsPerMap, b.MaxBitsPerMap, b.Seed+int64(agentNum), b.RandomBitmapOrder)\n\tb.numbits = num\n\t\/\/ set b.Paths\n\tf.Close()\n\tb.Paths = []string{f.Name()}\n\treturn nil\n}\n\n\/\/ Run runs the Import benchmark\nfunc (b *Import) Run(ctx context.Context, agentNum int) map[string]interface{} {\n\tresults := make(map[string]interface{})\n\tresults[\"numbits\"] = b.numbits\n\tresults[\"db\"] = b.Database\n\tstart := time.Now()\n\terr := b.ImportCommand.Run(ctx)\n\n\tif err != nil {\n\t\tresults[\"error\"] = err.Error()\n\t}\n\tresults[\"time\"] = time.Now().Sub(start)\n\treturn results\n}\n\ntype Int64Slice []int64\n\nfunc (s Int64Slice) Len() int { return len(s) }\nfunc (s Int64Slice) Less(i, j int) bool { return s[i] < s[j] }\nfunc (s Int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\n\nfunc GenerateImportCSV(w io.Writer, baseBitmapID, maxBitmapID, baseProfileID, maxProfileID, minBitsPerMap, maxBitsPerMap, seed int64, randomOrder bool) int {\n\tsrc := rand.NewSource(seed)\n\trng := rand.New(src)\n\n\tvar bitmapIDs []int\n\tif randomOrder {\n\t\tbitmapIDs = rng.Perm(int(maxBitmapID - baseBitmapID))\n\t}\n\tnumrows := 0\n\tprofileIDs := make(Int64Slice, maxBitsPerMap)\n\tfor i := baseBitmapID; i < maxBitmapID; i++ {\n\t\tvar bitmapID int64\n\t\tif randomOrder {\n\t\t\tbitmapID = int64(bitmapIDs[i-baseBitmapID])\n\t\t} else {\n\t\t\tbitmapID = int64(i)\n\t\t}\n\n\t\tnumBitsToSet := rng.Int63n(maxBitsPerMap-minBitsPerMap) + minBitsPerMap\n\t\tnumrows += int(numBitsToSet)\n\t\tfor j := int64(0); j < numBitsToSet; j++ {\n\t\t\tprofileIDs[j] = rng.Int63n(maxProfileID-baseProfileID) + baseProfileID\n\t\t}\n\t\tprofIDs := profileIDs[:numBitsToSet]\n\t\tif !randomOrder {\n\t\t\tsort.Sort(profIDs)\n\t\t}\n\t\tfor j := int64(0); j < numBitsToSet; j++ {\n\t\t\tfmt.Fprintf(w, \"%d,%d\\n\", bitmapID, profIDs[j])\n\t\t}\n\n\t}\n\treturn numrows\n}\n<|endoftext|>"} {"text":"<commit_before>package serve\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/go-redis\/redis\/v8\"\n\t\"github.com\/spf13\/pflag\"\n\t\"github.com\/tarampampam\/mikrotik-hosts-parser\/v4\/internal\/pkg\/env\"\n)\n\ntype flags struct {\n\tlisten struct {\n\t\tip string\n\t\tport uint16\n\t}\n\n\tresourcesDir string \/\/ can be empty\n\tconfigPath string\n\n\tcache struct {\n\t\tttl string\n\t\tengine string\n\t}\n\n\t\/\/ redisDSN allows to setup redis server using single string. Examples:\n\t\/\/\tredis:\/\/<user>:<password>@<host>:<port>\/<db_number>\n\t\/\/\tunix:\/\/<user>:<password>@<\/path\/to\/redis.sock>?db=<db_number>\n\tredisDSN string\n}\n\nfunc (f *flags) init(flagSet *pflag.FlagSet) {\n\texe, _ := os.Executable()\n\texe = path.Dir(exe)\n\n\tflagSet.StringVarP(\n\t\t&f.listen.ip,\n\t\t\"listen\",\n\t\t\"l\",\n\t\t\"0.0.0.0\",\n\t\tfmt.Sprintf(\"IP address to listen on [$%s]\", env.ListenAddr),\n\t)\n\tflagSet.Uint16VarP(\n\t\t&f.listen.port,\n\t\t\"port\",\n\t\t\"p\",\n\t\t8080, \/\/nolint:gomnd\n\t\tfmt.Sprintf(\"TCP port number [$%s]\", env.ListenPort),\n\t)\n\tflagSet.StringVarP(\n\t\t&f.resourcesDir,\n\t\t\"resources-dir\",\n\t\t\"r\",\n\t\tfilepath.Join(exe, \"web\"),\n\t\tfmt.Sprintf(\"path to the directory with public assets [$%s]\", env.ResourcesDir),\n\t)\n\tflagSet.StringVarP(\n\t\t&f.configPath,\n\t\t\"config\",\n\t\t\"c\",\n\t\tfilepath.Join(exe, \"configs\", \"config.yml\"),\n\t\tfmt.Sprintf(\"config file path [$%s]\", env.ConfigPath),\n\t)\n\tflagSet.StringVarP(\n\t\t&f.cache.engine,\n\t\t\"caching-engine\",\n\t\t\"\",\n\t\tcachingEngineMemory,\n\t\tfmt.Sprintf(\"caching engine (%s|%s) [$%s]\", cachingEngineMemory, cachingEngineRedis, env.CachingEngine),\n\t)\n\tflagSet.StringVarP(\n\t\t&f.cache.ttl,\n\t\t\"cache-ttl\",\n\t\t\"\",\n\t\t\"30m\",\n\t\tfmt.Sprintf(\"cache entries lifetime (examples: 50s, 1h30m) [$%s]\", env.CacheTTL),\n\t)\n\tflagSet.StringVarP(\n\t\t&f.redisDSN,\n\t\t\"redis-dsn\",\n\t\t\"\",\n\t\t\"redis:\/\/127.0.0.1:6379\/0\",\n\t\tfmt.Sprintf(\"redis server DSN (format: \\\"redis:\/\/<user>:<password>@<host>:<port>\/<db_number>\\\") [$%s]\", env.RedisDSN), \/\/nolint:lll\n\t)\n}\n\nfunc (f *flags) overrideUsingEnv() error {\n\tif envVar, exists := env.ListenAddr.Lookup(); exists {\n\t\tf.listen.ip = envVar\n\t}\n\n\tif envVar, exists := env.ListenPort.Lookup(); exists {\n\t\tif p, err := strconv.ParseUint(envVar, 10, 16); err == nil { \/\/nolint:gomnd\n\t\t\tf.listen.port = uint16(p)\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"wrong TCP port environment variable [%s] value\", envVar)\n\t\t}\n\t}\n\n\tif envVar, exists := env.ResourcesDir.Lookup(); exists {\n\t\tf.resourcesDir = envVar\n\t}\n\n\tif envVar, exists := env.ConfigPath.Lookup(); exists {\n\t\tf.configPath = envVar\n\t}\n\n\tif envVar, exists := env.CachingEngine.Lookup(); exists {\n\t\tf.cache.engine = envVar\n\t}\n\n\tif envVar, exists := env.CacheTTL.Lookup(); exists {\n\t\tf.cache.ttl = envVar\n\t}\n\n\tif envVar, exists := env.RedisDSN.Lookup(); exists {\n\t\tf.redisDSN = envVar\n\t}\n\n\treturn nil\n}\n\nfunc (f *flags) validate() error {\n\tif net.ParseIP(f.listen.ip) == nil {\n\t\treturn fmt.Errorf(\"wrong IP address [%s] for listening\", f.listen.ip)\n\t}\n\n\tif f.resourcesDir != \"\" {\n\t\tif info, err := os.Stat(f.resourcesDir); err != nil || !info.Mode().IsDir() {\n\t\t\treturn fmt.Errorf(\"wrong resources directory [%s] path\", f.resourcesDir)\n\t\t}\n\t}\n\n\tif info, err := os.Stat(f.configPath); err != nil || !info.Mode().IsRegular() {\n\t\treturn fmt.Errorf(\"config file [%s] was not found\", f.configPath)\n\t}\n\n\tswitch f.cache.engine {\n\tcase cachingEngineMemory:\n\tcase cachingEngineRedis:\n\t\tif _, err := redis.ParseURL(f.redisDSN); err != nil {\n\t\t\treturn fmt.Errorf(\"wrong redis DSN [%s]: %w\", f.redisDSN, err)\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported caching engine: %s\", f.cache.engine)\n\t}\n\n\tif _, err := time.ParseDuration(f.cache.ttl); err != nil {\n\t\treturn fmt.Errorf(\"wrong cache lifetime [%s] period\", f.cache.ttl)\n\t}\n\n\treturn nil\n}\n<commit_msg>wip: fix golangci-lint false-positive issues<commit_after>package serve\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/go-redis\/redis\/v8\"\n\t\"github.com\/spf13\/pflag\"\n\n\t\"github.com\/tarampampam\/mikrotik-hosts-parser\/v4\/internal\/pkg\/env\"\n)\n\ntype flags struct {\n\tlisten struct {\n\t\tip string\n\t\tport uint16\n\t}\n\n\tresourcesDir string \/\/ can be empty\n\tconfigPath string\n\n\tcache struct {\n\t\tttl string\n\t\tengine string\n\t}\n\n\t\/\/ redisDSN allows to setup redis server using single string. Examples:\n\t\/\/\tredis:\/\/<user>:<password>@<host>:<port>\/<db_number>\n\t\/\/\tunix:\/\/<user>:<password>@<\/path\/to\/redis.sock>?db=<db_number>\n\tredisDSN string\n}\n\nfunc (f *flags) init(flagSet *pflag.FlagSet) {\n\texe, _ := os.Executable()\n\texe = path.Dir(exe)\n\n\tflagSet.StringVarP(\n\t\t&f.listen.ip,\n\t\t\"listen\",\n\t\t\"l\",\n\t\t\"0.0.0.0\",\n\t\tfmt.Sprintf(\"IP address to listen on [$%s]\", env.ListenAddr),\n\t)\n\tflagSet.Uint16VarP(\n\t\t&f.listen.port,\n\t\t\"port\",\n\t\t\"p\",\n\t\t8080, \/\/nolint:gomnd\n\t\tfmt.Sprintf(\"TCP port number [$%s]\", env.ListenPort),\n\t)\n\tflagSet.StringVarP(\n\t\t&f.resourcesDir,\n\t\t\"resources-dir\",\n\t\t\"r\",\n\t\tfilepath.Join(exe, \"web\"),\n\t\tfmt.Sprintf(\"path to the directory with public assets [$%s]\", env.ResourcesDir),\n\t)\n\tflagSet.StringVarP(\n\t\t&f.configPath,\n\t\t\"config\",\n\t\t\"c\",\n\t\tfilepath.Join(exe, \"configs\", \"config.yml\"),\n\t\tfmt.Sprintf(\"config file path [$%s]\", env.ConfigPath),\n\t)\n\tflagSet.StringVarP(\n\t\t&f.cache.engine,\n\t\t\"caching-engine\",\n\t\t\"\",\n\t\tcachingEngineMemory,\n\t\tfmt.Sprintf(\"caching engine (%s|%s) [$%s]\", cachingEngineMemory, cachingEngineRedis, env.CachingEngine),\n\t)\n\tflagSet.StringVarP(\n\t\t&f.cache.ttl,\n\t\t\"cache-ttl\",\n\t\t\"\",\n\t\t\"30m\",\n\t\tfmt.Sprintf(\"cache entries lifetime (examples: 50s, 1h30m) [$%s]\", env.CacheTTL),\n\t)\n\tflagSet.StringVarP(\n\t\t&f.redisDSN,\n\t\t\"redis-dsn\",\n\t\t\"\",\n\t\t\"redis:\/\/127.0.0.1:6379\/0\",\n\t\tfmt.Sprintf(\"redis server DSN (format: \\\"redis:\/\/<user>:<password>@<host>:<port>\/<db_number>\\\") [$%s]\", env.RedisDSN), \/\/nolint:lll\n\t)\n}\n\nfunc (f *flags) overrideUsingEnv() error {\n\tif envVar, exists := env.ListenAddr.Lookup(); exists {\n\t\tf.listen.ip = envVar\n\t}\n\n\tif envVar, exists := env.ListenPort.Lookup(); exists {\n\t\tif p, err := strconv.ParseUint(envVar, 10, 16); err == nil {\n\t\t\tf.listen.port = uint16(p)\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"wrong TCP port environment variable [%s] value\", envVar)\n\t\t}\n\t}\n\n\tif envVar, exists := env.ResourcesDir.Lookup(); exists {\n\t\tf.resourcesDir = envVar\n\t}\n\n\tif envVar, exists := env.ConfigPath.Lookup(); exists {\n\t\tf.configPath = envVar\n\t}\n\n\tif envVar, exists := env.CachingEngine.Lookup(); exists {\n\t\tf.cache.engine = envVar\n\t}\n\n\tif envVar, exists := env.CacheTTL.Lookup(); exists {\n\t\tf.cache.ttl = envVar\n\t}\n\n\tif envVar, exists := env.RedisDSN.Lookup(); exists {\n\t\tf.redisDSN = envVar\n\t}\n\n\treturn nil\n}\n\nfunc (f *flags) validate() error {\n\tif net.ParseIP(f.listen.ip) == nil {\n\t\treturn fmt.Errorf(\"wrong IP address [%s] for listening\", f.listen.ip)\n\t}\n\n\tif f.resourcesDir != \"\" {\n\t\tif info, err := os.Stat(f.resourcesDir); err != nil || !info.Mode().IsDir() {\n\t\t\treturn fmt.Errorf(\"wrong resources directory [%s] path\", f.resourcesDir)\n\t\t}\n\t}\n\n\tif info, err := os.Stat(f.configPath); err != nil || !info.Mode().IsRegular() {\n\t\treturn fmt.Errorf(\"config file [%s] was not found\", f.configPath)\n\t}\n\n\tswitch f.cache.engine {\n\tcase cachingEngineMemory:\n\tcase cachingEngineRedis:\n\t\tif _, err := redis.ParseURL(f.redisDSN); err != nil {\n\t\t\treturn fmt.Errorf(\"wrong redis DSN [%s]: %w\", f.redisDSN, err)\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported caching engine: %s\", f.cache.engine)\n\t}\n\n\tif _, err := time.ParseDuration(f.cache.ttl); err != nil {\n\t\treturn fmt.Errorf(\"wrong cache lifetime [%s] period\", f.cache.ttl)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2018 Nippon Telegraph and Telephone Corporation.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage version\n\nimport \"fmt\"\n\nconst MAJOR uint = 3\nconst MINOR uint = 6\nconst PATCH uint = 0\n\nvar COMMIT string = \"\"\nvar IDENTIFIER string = \"\"\nvar METADATA string = \"\"\n\nfunc Version() string {\n\tvar suffix string = \"\"\n\tif len(IDENTIFIER) > 0 {\n\t\tsuffix = fmt.Sprintf(\"-%s\", IDENTIFIER)\n\t}\n\n\tif len(COMMIT) > 0 || len(METADATA) > 0 {\n\t\tsuffix = suffix + \"+\"\n\t}\n\n\tif len(COMMIT) > 0 {\n\t\tsuffix = fmt.Sprintf(\"%s\"+\"commit.%s\", suffix, COMMIT)\n\n\t}\n\n\tif len(METADATA) > 0 {\n\t\tif len(COMMIT) > 0 {\n\t\t\tsuffix = suffix + \".\"\n\t\t}\n\t\tsuffix = suffix + METADATA\n\t}\n\n\treturn fmt.Sprintf(\"%d.%d.%d%s\", MAJOR, MINOR, PATCH, suffix)\n}\n<commit_msg>v3.7.0<commit_after>\/\/ Copyright (C) 2018 Nippon Telegraph and Telephone Corporation.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage version\n\nimport \"fmt\"\n\nconst MAJOR uint = 3\nconst MINOR uint = 7\nconst PATCH uint = 0\n\nvar COMMIT string = \"\"\nvar IDENTIFIER string = \"\"\nvar METADATA string = \"\"\n\nfunc Version() string {\n\tvar suffix string = \"\"\n\tif len(IDENTIFIER) > 0 {\n\t\tsuffix = fmt.Sprintf(\"-%s\", IDENTIFIER)\n\t}\n\n\tif len(COMMIT) > 0 || len(METADATA) > 0 {\n\t\tsuffix = suffix + \"+\"\n\t}\n\n\tif len(COMMIT) > 0 {\n\t\tsuffix = fmt.Sprintf(\"%s\"+\"commit.%s\", suffix, COMMIT)\n\n\t}\n\n\tif len(METADATA) > 0 {\n\t\tif len(COMMIT) > 0 {\n\t\t\tsuffix = suffix + \".\"\n\t\t}\n\t\tsuffix = suffix + METADATA\n\t}\n\n\treturn fmt.Sprintf(\"%d.%d.%d%s\", MAJOR, MINOR, PATCH, suffix)\n}\n<|endoftext|>"} {"text":"<commit_before>package progress\n\nimport (\n\t\"os\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/restic\/restic\/internal\/debug\"\n)\n\n\/\/ A Func is a callback for a Counter.\n\/\/\n\/\/ The final argument is true if Counter.Done has been called,\n\/\/ which means that the current call will be the last.\ntype Func func(value uint64, runtime time.Duration, final bool)\n\n\/\/ A Counter tracks a running count and controls a goroutine that passes its\n\/\/ value periodically to a Func.\n\/\/\n\/\/ The Func is also called when SIGUSR1 (or SIGINFO, on BSD) is received.\ntype Counter struct {\n\treport Func\n\tstart time.Time\n\tstopped chan struct{} \/\/ Closed by run.\n\tstop chan struct{} \/\/ Close to stop run.\n\ttick *time.Ticker\n\tvalue uint64\n}\n\n\/\/ New starts a new Counter.\nfunc New(interval time.Duration, report Func) *Counter {\n\tsignals.Once.Do(func() {\n\t\tsignals.ch = make(chan os.Signal, 1)\n\t\tsetupSignals()\n\t})\n\n\tc := &Counter{\n\t\treport: report,\n\t\tstart: time.Now(),\n\t\tstopped: make(chan struct{}),\n\t\tstop: make(chan struct{}),\n\t\ttick: time.NewTicker(interval),\n\t}\n\n\tgo c.run()\n\treturn c\n}\n\n\/\/ Add v to the Counter. This method is concurrency-safe.\nfunc (c *Counter) Add(v uint64) {\n\tif c == nil {\n\t\treturn\n\t}\n\tatomic.AddUint64(&c.value, v)\n}\n\n\/\/ Done tells a Counter to stop and waits for it to report its final value.\nfunc (c *Counter) Done() {\n\tif c == nil {\n\t\treturn\n\t}\n\tc.tick.Stop()\n\tclose(c.stop)\n\t<-c.stopped \/\/ Wait for last progress report.\n\t*c = Counter{} \/\/ Prevent reuse.\n}\n\nfunc (c *Counter) get() uint64 { return atomic.LoadUint64(&c.value) }\n\nfunc (c *Counter) run() {\n\tdefer close(c.stopped)\n\tdefer func() {\n\t\t\/\/ Must be a func so that time.Since isn't called at defer time.\n\t\tc.report(c.get(), time.Since(c.start), true)\n\t}()\n\n\tfor {\n\t\tvar now time.Time\n\n\t\tselect {\n\t\tcase now = <-c.tick.C:\n\t\tcase sig := <-signals.ch:\n\t\t\tdebug.Log(\"Signal received: %v\\n\", sig)\n\t\t\tnow = time.Now()\n\t\tcase <-c.stop:\n\t\t\treturn\n\t\t}\n\n\t\tc.report(c.get(), now.Sub(c.start), false)\n\t}\n}\n\n\/\/ XXX The fact that signals is a single global variable means that only one\n\/\/ Counter receives each incoming signal.\nvar signals struct {\n\tch chan os.Signal\n\tsync.Once\n}\n<commit_msg>ui\/progress: Use mutex instead of atomic<commit_after>package progress\n\nimport (\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/restic\/restic\/internal\/debug\"\n)\n\n\/\/ A Func is a callback for a Counter.\n\/\/\n\/\/ The final argument is true if Counter.Done has been called,\n\/\/ which means that the current call will be the last.\ntype Func func(value uint64, runtime time.Duration, final bool)\n\n\/\/ A Counter tracks a running count and controls a goroutine that passes its\n\/\/ value periodically to a Func.\n\/\/\n\/\/ The Func is also called when SIGUSR1 (or SIGINFO, on BSD) is received.\ntype Counter struct {\n\treport Func\n\tstart time.Time\n\tstopped chan struct{} \/\/ Closed by run.\n\tstop chan struct{} \/\/ Close to stop run.\n\ttick *time.Ticker\n\n\tvalueMutex sync.Mutex\n\tvalue uint64\n}\n\n\/\/ New starts a new Counter.\nfunc New(interval time.Duration, report Func) *Counter {\n\tsignals.Once.Do(func() {\n\t\tsignals.ch = make(chan os.Signal, 1)\n\t\tsetupSignals()\n\t})\n\n\tc := &Counter{\n\t\treport: report,\n\t\tstart: time.Now(),\n\t\tstopped: make(chan struct{}),\n\t\tstop: make(chan struct{}),\n\t\ttick: time.NewTicker(interval),\n\t}\n\n\tgo c.run()\n\treturn c\n}\n\n\/\/ Add v to the Counter. This method is concurrency-safe.\nfunc (c *Counter) Add(v uint64) {\n\tif c == nil {\n\t\treturn\n\t}\n\n\tc.valueMutex.Lock()\n\tc.value += v\n\tc.valueMutex.Unlock()\n}\n\n\/\/ Done tells a Counter to stop and waits for it to report its final value.\nfunc (c *Counter) Done() {\n\tif c == nil {\n\t\treturn\n\t}\n\tc.tick.Stop()\n\tclose(c.stop)\n\t<-c.stopped \/\/ Wait for last progress report.\n\t*c = Counter{} \/\/ Prevent reuse.\n}\n\nfunc (c *Counter) get() uint64 {\n\tc.valueMutex.Lock()\n\tv := c.value\n\tc.valueMutex.Unlock()\n\n\treturn v\n}\n\nfunc (c *Counter) run() {\n\tdefer close(c.stopped)\n\tdefer func() {\n\t\t\/\/ Must be a func so that time.Since isn't called at defer time.\n\t\tc.report(c.get(), time.Since(c.start), true)\n\t}()\n\n\tfor {\n\t\tvar now time.Time\n\n\t\tselect {\n\t\tcase now = <-c.tick.C:\n\t\tcase sig := <-signals.ch:\n\t\t\tdebug.Log(\"Signal received: %v\\n\", sig)\n\t\t\tnow = time.Now()\n\t\tcase <-c.stop:\n\t\t\treturn\n\t\t}\n\n\t\tc.report(c.get(), now.Sub(c.start), false)\n\t}\n}\n\n\/\/ XXX The fact that signals is a single global variable means that only one\n\/\/ Counter receives each incoming signal.\nvar signals struct {\n\tch chan os.Signal\n\tsync.Once\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 Google LLC\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package oauth implements functions to diagnose the supported OAuth2 flows\n\/\/ (web and installed app flows) in a Google Ads API client library client\n\/\/ environment.\npackage oauth\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/googleads\/google-ads-doctor\/oauthdoctor\/diag\"\n\n\t\"golang.org\/x\/oauth2\"\n\t\"golang.org\/x\/oauth2\/google\"\n)\n\n\/\/ This is a list of error codes (not comprehensive) returned by Google OAuth2\n\/\/ endpoint based on Google Ads API scope.\nconst (\n\tAccessNotPermittedForManagerAccount = iota\n\tGoogleAdsAPIDisabled\n\tInvalidClientInfo\n\tInvalidRefreshToken\n\tInvalidCustomerID\n\tMissingDevToken\n\tUnauthenticated\n\tUnauthorized\n\tUnknownError\n\n\tGoogleAdsApiScope = \"https:\/\/www.googleapis.com\/auth\/adwords\"\n)\n\n\/\/ Config is a required configuration for diagnosing the OAuth2 flow based on\n\/\/ the client library configuration.\ntype Config struct {\n\tConfigFile diag.ConfigFile\n\tCustomerID string\n\tOAuthType string\n\tVerbose bool\n}\n\n\/\/ ConfigWriter allows replacement of key by a given value in a configuration.\ntype ConfigWriter interface {\n\tReplaceConfig(k, v string) string\n}\n\nvar (\n\tappVersion string\n\n\tstdinSanitizer = strings.NewReplacer(\"\\n\", \"\")\n\n\treadStdin = func() string {\n\t\treader := bufio.NewReader(os.Stdin)\n\t\tstr, err := reader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error reading input (%s) from command line: %s\", str, err)\n\t\t}\n\n\t\treturn strings.TrimSpace(stdinSanitizer.Replace(str))\n\t}\n)\n\n\/\/ SimulateOAuthFlow simulates the OAuth2 flows supported by the Google Ads API\n\/\/ client libraries.\nfunc (c *Config) SimulateOAuthFlow() {\n\tswitch c.OAuthType {\n\tcase diag.Web:\n\t\tc.simulateWebFlow()\n\tcase diag.InstalledApp:\n\t\tc.simulateAppFlow()\n\tcase diag.ServiceAccount:\n\t\tc.simulateServiceAccFlow()\n\t}\n}\n\n\/\/ decodeError checks the JSON response in the error and determines the error\n\/\/ code.\nfunc (c *Config) decodeError(err error) int32 {\n\terrstr := err.Error()\n\n\tif strings.Contains(errstr, \"invalid_client\") {\n\t\t\/\/ Client ID and\/or secret is invalid\n\t\treturn InvalidClientInfo\n\t}\n\tif strings.Contains(errstr, \"unauthorized_client\") {\n\t\t\/\/ The given refresh token may not be generated with the given client ID\n\t\t\/\/ and secret\n\t\treturn Unauthorized\n\t}\n\tif strings.Contains(errstr, \"invalid_grant\") {\n\t\t\/\/ Refresh token is not valid for any users\n\t\treturn InvalidRefreshToken\n\t}\n\tif strings.Contains(errstr, \"refresh token is not set\") {\n\t\treturn InvalidRefreshToken\n\t}\n\tif strings.Contains(errstr, \"USER_PERMISSION_DENIED\") {\n\t\t\/\/ User doesn't have permission to access Google Ads account\n\t\treturn InvalidRefreshToken\n\t}\n\tif strings.Contains(errstr, \"\\\"PERMISSION_DENIED\\\"\") {\n\t\treturn GoogleAdsAPIDisabled\n\t}\n\tif strings.Contains(errstr, \"UNAUTHENTICATED\") {\n\t\treturn Unauthenticated\n\t}\n\tif strings.Contains(errstr, \"CANNOT_BE_EXECUTED_BY_MANAGER_ACCOUNT\") {\n\t\t\/\/ Request cannot be executed by a manager account\n\t\treturn AccessNotPermittedForManagerAccount\n\t}\n\tif strings.Contains(errstr, \"DEVELOPER_TOKEN_PARAMETER_MISSING\") {\n\t\treturn MissingDevToken\n\t}\n\tif strings.Contains(errstr, \"INVALID_CUSTOMER_ID\") {\n\t\treturn InvalidCustomerID\n\t}\n\treturn UnknownError\n}\n\n\/\/ diagnose handles the error by guiding the user to take appropriate\n\/\/ actions to fix the OAuth2 error based on the error code.\nfunc (c *Config) diagnose(err error) {\n\t\/\/ Print the given message from JSON response if there's any\n\tvar parsedMsg map[string]interface{}\n\tif err := json.Unmarshal([]byte(err.Error()), &parsedMsg); err == nil {\n\t\terrMsg := parsedMsg[\"error\"].(map[string]interface{})[\"message\"]\n\t\tlog.Print(\"JSON response error: \" + errMsg.(string))\n\t}\n\n\tswitch c.decodeError(err) {\n\tcase AccessNotPermittedForManagerAccount:\n\t\tlog.Print(\"ERROR: Your credentials are not permitted to access to a manager account.\" +\n\t\t\t\"\\nPlease create your credentials with a Google Ads account with manager access.\")\n\tcase GoogleAdsAPIDisabled:\n\t\tlog.Print(\"Press <Enter> to continue after you enable Google Ads API\")\n\t\treadStdin()\n\tcase InvalidClientInfo:\n\t\tlog.Print(\"ERROR: Your client ID and\/or client secret may be invalid.\")\n\t\treplaceCloudCredentials(&c.ConfigFile)\n\tcase InvalidRefreshToken, Unauthorized:\n\t\tlog.Print(\"ERROR: Your refresh token may be invalid.\")\n\tcase MissingDevToken:\n\t\tlog.Print(\"ERROR: Your developer token is missing in the configuration file\")\n\t\treplaceDevToken(&c.ConfigFile)\n\tcase Unauthenticated:\n\t\tlog.Print(\"ERROR: The login email may not have access to the given account.\")\n\tcase InvalidCustomerID:\n\t\tlog.Print(\"ERROR: You customer ID is invalid.\")\n\tdefault:\n\t\tvar helperText string\n\t\tswitch c.ConfigFile.OAuthType {\n\t\tcase diag.ServiceAccount:\n\t\t\thelperText = \"Please verify the path of JSON key file and impersonate email (or delegated email).\"\n\t\tcase diag.Web:\n\t\t\thelperText = \"Please verify your developer token, client ID and client secret.\"\n\t\tcase diag.InstalledApp:\n\t\t\thelperText = \"Please verify your developer token, client ID, client secret and refresh token.\"\n\t\t}\n\t\tlog.Print(\"ERROR: Your credentials are invalid but we cannot determine the exact error. \" + helperText)\n\t}\n}\n\nvar (\n\tgetClientID = func() string {\n\t\tfmt.Print(\"New Client ID >> \")\n\t\treturn readStdin()\n\t}\n\n\tgetClientSecret = func() string {\n\t\tfmt.Print(\"New Client Secret >> \")\n\t\treturn readStdin()\n\t}\n)\n\n\/\/ replaceCloudCredentials prompts the user to create a new client ID and\n\/\/ secret and to then enter them at the prompt. The values entered will\n\/\/ replace the existing values in the client library configuration file.\nfunc replaceCloudCredentials(c ConfigWriter) {\n\tlog.Print(\"Follow this guide to setup your OAuth2 client ID and client secret: \" +\n\t\t\"https:\/\/developers.google.com\/adwords\/api\/docs\/guides\/first-api-call#set_up_oauth2_authentication\")\n\n\tclientID := getClientID()\n\tclientSecret := getClientSecret()\n\n\tc.ReplaceConfig(diag.ClientID, clientID)\n\tc.ReplaceConfig(diag.ClientSecret, clientSecret)\n}\n\n\/\/ replaceDevToken guides the user to retrieve their developer token and\n\/\/ enter it at the prompt. The entered value will replace the existing\n\/\/ developer token in the client library configuration file.\nvar replaceDevToken = func(c ConfigWriter) {\n\tlog.Print(\"Please follow this guide to retrieve your developer token: \" +\n\t\t\"https:\/\/developers.google.com\/adwords\/api\/docs\/guides\/signup#step-2\")\n\tlog.Print(\"Pleae enter a new Developer Token here and it will replace \" +\n\t\t\"the one in your client library configuration file\")\n\n\tfmt.Print(\"New Developer Token >> \")\n\tdevToken := readStdin()\n\n\tc.ReplaceConfig(diag.DevToken, devToken)\n}\n\n\/\/ replaceRefreshToken asks the user if they want to replace the refresh\n\/\/ token in the configuration file with the newly generated value.\nfunc replaceRefreshToken(c ConfigWriter, refreshToken string) {\n\tlog.Print(\"Would you like to replace your refresh token in the \" +\n\t\t\"client library config file with the new one generated?\")\n\n\tfmt.Print(\"Enter Y for Yes [Anything else is No] >> \")\n\tanswer := readStdin()\n\n\tif answer == \"Y\" {\n\t\tc.ReplaceConfig(diag.RefreshToken, refreshToken)\n\t} else {\n\t\tlog.Print(\"Refresh token is NOT replaced\")\n\t}\n}\n\nvar oauthEndpoint = google.Endpoint\n\n\/\/ oauth2Conf creates a corresponding OAuth2 config struct based on the\n\/\/ given configuration details. This is only applicable when a refresh token\n\/\/ is not given.\nfunc (c *Config) oauth2Conf(redirectURL string) *oauth2.Config {\n\treturn &oauth2.Config{\n\t\tClientID: c.ConfigFile.ConfigKeys.ClientID,\n\t\tClientSecret: c.ConfigFile.ClientSecret,\n\t\tRedirectURL: redirectURL,\n\t\tScopes: []string{GoogleAdsApiScope},\n\t\tEndpoint: oauthEndpoint,\n\t}\n}\n\n\/\/ Given the auth code returned after the authentication and authorization\n\/\/ step, oauth2Client creates a HTTP client with an authorized access token.\nfunc (c *Config) oauth2Client(code string) (*http.Client, string) {\n\tconf := c.oauth2Conf(InstalledAppRedirectURL)\n\t\/\/ Handle the exchange code to initiate a transport.\n\ttoken, err := conf.Exchange(oauth2.NoContext, code)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn conf.Client(oauth2.NoContext, token), token.RefreshToken\n}\n\nvar apiURL = \"https:\/\/googleads.googleapis.com\/v1\/customers\/\"\n\n\/\/ getAccount makes a HTTP request to Google Ads API customer account\n\/\/ endpoint and parses the JSON response.\nfunc (c *Config) getAccount(client *http.Client) (*bytes.Buffer, error) {\n\treq, err := http.NewRequest(\"GET\", apiURL+c.CustomerID, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"user-agent\", userAgent())\n\treq.Header.Set(\"developer-token\", c.ConfigFile.DevToken)\n\tif c.ConfigFile.LoginCustomerID != \"\" {\n\t\treq.Header.Set(\"login-customer-id\", c.ConfigFile.LoginCustomerID)\n\t}\n\n\tif c.Verbose {\n\t\tdump, err := httputil.DumpRequestOut(req, true)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error printing HTTP request: %s\", err)\n\t\t}\n\t\tlog.Printf(\"Making a HTTP Request to Google Ads API:\\n%v\\n\", c.sanitizeOutput(string(dump)))\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(resp.Body)\n\n\tvar jsonBody map[string]interface{}\n\tjson.Unmarshal(buf.Bytes(), &jsonBody)\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"A HTTP Status (%s) is returned while calling %s\", resp.Status, apiURL+c.CustomerID)\n\t}\n\n\tif jsonBody[\"error\"] != nil {\n\t\treturn nil, fmt.Errorf(jsonBody[\"error\"].(string))\n\t}\n\n\treturn buf, nil\n}\n\nfunc userAgent() string {\n\tua := \"google-ads-doctor\/\"\n\tif appVersion != \"\" {\n\t\tua += appVersion\n\t} else {\n\t\tua += \"source\"\n\t}\n\treturn ua\n}\n\nfunc (c *Config) sanitizeOutput(s string) string {\n\treturn strings.ReplaceAll(s, c.ConfigFile.DevToken, \"REDACTED\")\n}\n\n\/\/ ReadCustomerID retrieves the CID from stdin.\nfunc ReadCustomerID() string {\n\tfor {\n\t\tlog.Print(\"Please enter a Google Ads account ID:\")\n\t\tcustomerID := readStdin()\n\n\t\tif customerID != \"\" {\n\t\t\treturn strings.ReplaceAll(customerID, \"-\", \"\")\n\t\t}\n\t}\n}\n<commit_msg>Added documentation for userAgent()<commit_after>\/\/ Copyright 2019 Google LLC\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package oauth implements functions to diagnose the supported OAuth2 flows\n\/\/ (web and installed app flows) in a Google Ads API client library client\n\/\/ environment.\npackage oauth\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/googleads\/google-ads-doctor\/oauthdoctor\/diag\"\n\n\t\"golang.org\/x\/oauth2\"\n\t\"golang.org\/x\/oauth2\/google\"\n)\n\n\/\/ This is a list of error codes (not comprehensive) returned by Google OAuth2\n\/\/ endpoint based on Google Ads API scope.\nconst (\n\tAccessNotPermittedForManagerAccount = iota\n\tGoogleAdsAPIDisabled\n\tInvalidClientInfo\n\tInvalidRefreshToken\n\tInvalidCustomerID\n\tMissingDevToken\n\tUnauthenticated\n\tUnauthorized\n\tUnknownError\n\n\tGoogleAdsApiScope = \"https:\/\/www.googleapis.com\/auth\/adwords\"\n)\n\n\/\/ Config is a required configuration for diagnosing the OAuth2 flow based on\n\/\/ the client library configuration.\ntype Config struct {\n\tConfigFile diag.ConfigFile\n\tCustomerID string\n\tOAuthType string\n\tVerbose bool\n}\n\n\/\/ ConfigWriter allows replacement of key by a given value in a configuration.\ntype ConfigWriter interface {\n\tReplaceConfig(k, v string) string\n}\n\nvar (\n\tappVersion string\n\n\tstdinSanitizer = strings.NewReplacer(\"\\n\", \"\")\n\n\treadStdin = func() string {\n\t\treader := bufio.NewReader(os.Stdin)\n\t\tstr, err := reader.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error reading input (%s) from command line: %s\", str, err)\n\t\t}\n\n\t\treturn strings.TrimSpace(stdinSanitizer.Replace(str))\n\t}\n)\n\n\/\/ SimulateOAuthFlow simulates the OAuth2 flows supported by the Google Ads API\n\/\/ client libraries.\nfunc (c *Config) SimulateOAuthFlow() {\n\tswitch c.OAuthType {\n\tcase diag.Web:\n\t\tc.simulateWebFlow()\n\tcase diag.InstalledApp:\n\t\tc.simulateAppFlow()\n\tcase diag.ServiceAccount:\n\t\tc.simulateServiceAccFlow()\n\t}\n}\n\n\/\/ decodeError checks the JSON response in the error and determines the error\n\/\/ code.\nfunc (c *Config) decodeError(err error) int32 {\n\terrstr := err.Error()\n\n\tif strings.Contains(errstr, \"invalid_client\") {\n\t\t\/\/ Client ID and\/or secret is invalid\n\t\treturn InvalidClientInfo\n\t}\n\tif strings.Contains(errstr, \"unauthorized_client\") {\n\t\t\/\/ The given refresh token may not be generated with the given client ID\n\t\t\/\/ and secret\n\t\treturn Unauthorized\n\t}\n\tif strings.Contains(errstr, \"invalid_grant\") {\n\t\t\/\/ Refresh token is not valid for any users\n\t\treturn InvalidRefreshToken\n\t}\n\tif strings.Contains(errstr, \"refresh token is not set\") {\n\t\treturn InvalidRefreshToken\n\t}\n\tif strings.Contains(errstr, \"USER_PERMISSION_DENIED\") {\n\t\t\/\/ User doesn't have permission to access Google Ads account\n\t\treturn InvalidRefreshToken\n\t}\n\tif strings.Contains(errstr, \"\\\"PERMISSION_DENIED\\\"\") {\n\t\treturn GoogleAdsAPIDisabled\n\t}\n\tif strings.Contains(errstr, \"UNAUTHENTICATED\") {\n\t\treturn Unauthenticated\n\t}\n\tif strings.Contains(errstr, \"CANNOT_BE_EXECUTED_BY_MANAGER_ACCOUNT\") {\n\t\t\/\/ Request cannot be executed by a manager account\n\t\treturn AccessNotPermittedForManagerAccount\n\t}\n\tif strings.Contains(errstr, \"DEVELOPER_TOKEN_PARAMETER_MISSING\") {\n\t\treturn MissingDevToken\n\t}\n\tif strings.Contains(errstr, \"INVALID_CUSTOMER_ID\") {\n\t\treturn InvalidCustomerID\n\t}\n\treturn UnknownError\n}\n\n\/\/ diagnose handles the error by guiding the user to take appropriate\n\/\/ actions to fix the OAuth2 error based on the error code.\nfunc (c *Config) diagnose(err error) {\n\t\/\/ Print the given message from JSON response if there's any\n\tvar parsedMsg map[string]interface{}\n\tif err := json.Unmarshal([]byte(err.Error()), &parsedMsg); err == nil {\n\t\terrMsg := parsedMsg[\"error\"].(map[string]interface{})[\"message\"]\n\t\tlog.Print(\"JSON response error: \" + errMsg.(string))\n\t}\n\n\tswitch c.decodeError(err) {\n\tcase AccessNotPermittedForManagerAccount:\n\t\tlog.Print(\"ERROR: Your credentials are not permitted to access to a manager account.\" +\n\t\t\t\"\\nPlease create your credentials with a Google Ads account with manager access.\")\n\tcase GoogleAdsAPIDisabled:\n\t\tlog.Print(\"Press <Enter> to continue after you enable Google Ads API\")\n\t\treadStdin()\n\tcase InvalidClientInfo:\n\t\tlog.Print(\"ERROR: Your client ID and\/or client secret may be invalid.\")\n\t\treplaceCloudCredentials(&c.ConfigFile)\n\tcase InvalidRefreshToken, Unauthorized:\n\t\tlog.Print(\"ERROR: Your refresh token may be invalid.\")\n\tcase MissingDevToken:\n\t\tlog.Print(\"ERROR: Your developer token is missing in the configuration file\")\n\t\treplaceDevToken(&c.ConfigFile)\n\tcase Unauthenticated:\n\t\tlog.Print(\"ERROR: The login email may not have access to the given account.\")\n\tcase InvalidCustomerID:\n\t\tlog.Print(\"ERROR: You customer ID is invalid.\")\n\tdefault:\n\t\tvar helperText string\n\t\tswitch c.ConfigFile.OAuthType {\n\t\tcase diag.ServiceAccount:\n\t\t\thelperText = \"Please verify the path of JSON key file and impersonate email (or delegated email).\"\n\t\tcase diag.Web:\n\t\t\thelperText = \"Please verify your developer token, client ID and client secret.\"\n\t\tcase diag.InstalledApp:\n\t\t\thelperText = \"Please verify your developer token, client ID, client secret and refresh token.\"\n\t\t}\n\t\tlog.Print(\"ERROR: Your credentials are invalid but we cannot determine the exact error. \" + helperText)\n\t}\n}\n\nvar (\n\tgetClientID = func() string {\n\t\tfmt.Print(\"New Client ID >> \")\n\t\treturn readStdin()\n\t}\n\n\tgetClientSecret = func() string {\n\t\tfmt.Print(\"New Client Secret >> \")\n\t\treturn readStdin()\n\t}\n)\n\n\/\/ replaceCloudCredentials prompts the user to create a new client ID and\n\/\/ secret and to then enter them at the prompt. The values entered will\n\/\/ replace the existing values in the client library configuration file.\nfunc replaceCloudCredentials(c ConfigWriter) {\n\tlog.Print(\"Follow this guide to setup your OAuth2 client ID and client secret: \" +\n\t\t\"https:\/\/developers.google.com\/adwords\/api\/docs\/guides\/first-api-call#set_up_oauth2_authentication\")\n\n\tclientID := getClientID()\n\tclientSecret := getClientSecret()\n\n\tc.ReplaceConfig(diag.ClientID, clientID)\n\tc.ReplaceConfig(diag.ClientSecret, clientSecret)\n}\n\n\/\/ replaceDevToken guides the user to retrieve their developer token and\n\/\/ enter it at the prompt. The entered value will replace the existing\n\/\/ developer token in the client library configuration file.\nvar replaceDevToken = func(c ConfigWriter) {\n\tlog.Print(\"Please follow this guide to retrieve your developer token: \" +\n\t\t\"https:\/\/developers.google.com\/adwords\/api\/docs\/guides\/signup#step-2\")\n\tlog.Print(\"Pleae enter a new Developer Token here and it will replace \" +\n\t\t\"the one in your client library configuration file\")\n\n\tfmt.Print(\"New Developer Token >> \")\n\tdevToken := readStdin()\n\n\tc.ReplaceConfig(diag.DevToken, devToken)\n}\n\n\/\/ replaceRefreshToken asks the user if they want to replace the refresh\n\/\/ token in the configuration file with the newly generated value.\nfunc replaceRefreshToken(c ConfigWriter, refreshToken string) {\n\tlog.Print(\"Would you like to replace your refresh token in the \" +\n\t\t\"client library config file with the new one generated?\")\n\n\tfmt.Print(\"Enter Y for Yes [Anything else is No] >> \")\n\tanswer := readStdin()\n\n\tif answer == \"Y\" {\n\t\tc.ReplaceConfig(diag.RefreshToken, refreshToken)\n\t} else {\n\t\tlog.Print(\"Refresh token is NOT replaced\")\n\t}\n}\n\nvar oauthEndpoint = google.Endpoint\n\n\/\/ oauth2Conf creates a corresponding OAuth2 config struct based on the\n\/\/ given configuration details. This is only applicable when a refresh token\n\/\/ is not given.\nfunc (c *Config) oauth2Conf(redirectURL string) *oauth2.Config {\n\treturn &oauth2.Config{\n\t\tClientID: c.ConfigFile.ConfigKeys.ClientID,\n\t\tClientSecret: c.ConfigFile.ClientSecret,\n\t\tRedirectURL: redirectURL,\n\t\tScopes: []string{GoogleAdsApiScope},\n\t\tEndpoint: oauthEndpoint,\n\t}\n}\n\n\/\/ Given the auth code returned after the authentication and authorization\n\/\/ step, oauth2Client creates a HTTP client with an authorized access token.\nfunc (c *Config) oauth2Client(code string) (*http.Client, string) {\n\tconf := c.oauth2Conf(InstalledAppRedirectURL)\n\t\/\/ Handle the exchange code to initiate a transport.\n\ttoken, err := conf.Exchange(oauth2.NoContext, code)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn conf.Client(oauth2.NoContext, token), token.RefreshToken\n}\n\nvar apiURL = \"https:\/\/googleads.googleapis.com\/v1\/customers\/\"\n\n\/\/ getAccount makes a HTTP request to Google Ads API customer account\n\/\/ endpoint and parses the JSON response.\nfunc (c *Config) getAccount(client *http.Client) (*bytes.Buffer, error) {\n\treq, err := http.NewRequest(\"GET\", apiURL+c.CustomerID, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"user-agent\", userAgent())\n\treq.Header.Set(\"developer-token\", c.ConfigFile.DevToken)\n\tif c.ConfigFile.LoginCustomerID != \"\" {\n\t\treq.Header.Set(\"login-customer-id\", c.ConfigFile.LoginCustomerID)\n\t}\n\n\tif c.Verbose {\n\t\tdump, err := httputil.DumpRequestOut(req, true)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error printing HTTP request: %s\", err)\n\t\t}\n\t\tlog.Printf(\"Making a HTTP Request to Google Ads API:\\n%v\\n\", c.sanitizeOutput(string(dump)))\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(resp.Body)\n\n\tvar jsonBody map[string]interface{}\n\tjson.Unmarshal(buf.Bytes(), &jsonBody)\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"A HTTP Status (%s) is returned while calling %s\", resp.Status, apiURL+c.CustomerID)\n\t}\n\n\tif jsonBody[\"error\"] != nil {\n\t\treturn nil, fmt.Errorf(jsonBody[\"error\"].(string))\n\t}\n\n\treturn buf, nil\n}\n\n\/\/ userAgent returns a User-Agent HTTP header for this tool.\nfunc userAgent() string {\n\tua := \"google-ads-doctor\/\"\n\tif appVersion != \"\" {\n\t\tua += appVersion\n\t} else {\n\t\tua += \"source\"\n\t}\n\treturn ua\n}\n\nfunc (c *Config) sanitizeOutput(s string) string {\n\treturn strings.ReplaceAll(s, c.ConfigFile.DevToken, \"REDACTED\")\n}\n\n\/\/ ReadCustomerID retrieves the CID from stdin.\nfunc ReadCustomerID() string {\n\tfor {\n\t\tlog.Print(\"Please enter a Google Ads account ID:\")\n\t\tcustomerID := readStdin()\n\n\t\tif customerID != \"\" {\n\t\t\treturn strings.ReplaceAll(customerID, \"-\", \"\")\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage govmomi\n\nimport (\n\t\"github.com\/vmware\/govmomi\/vim25\/methods\"\n\t\"github.com\/vmware\/govmomi\/vim25\/types\"\n)\n\ntype GuestFileManager struct {\n\ttypes.ManagedObjectReference\n\n\tc *Client\n}\n\nfunc (m GuestFileManager) Reference() types.ManagedObjectReference {\n\treturn m.ManagedObjectReference\n}\n\nfunc (m GuestFileManager) ChangeFileAttributesInGuest(vm *VirtualMachine, auth types.BaseGuestAuthentication, guestFilePath string, fileAttributes types.BaseGuestFileAttributes) error {\n\treq := types.ChangeFileAttributesInGuest{\n\t\tThis: m.Reference(),\n\t\tVm: vm.Reference(),\n\t\tAuth: auth,\n\t\tGuestFilePath: guestFilePath,\n\t\tFileAttributes: fileAttributes,\n\t}\n\n\t_, err := methods.ChangeFileAttributesInGuest(m.c, &req)\n\treturn err\n}\n\nfunc (m GuestFileManager) CreateTemporaryDirectoryInGuest(vm *VirtualMachine, auth types.BaseGuestAuthentication, prefix, suffix string) (string, error) {\n\treq := types.CreateTemporaryDirectoryInGuest{\n\t\tThis: m.Reference(),\n\t\tVm: vm.Reference(),\n\t\tAuth: auth,\n\t\tPrefix: prefix,\n\t\tSuffix: suffix,\n\t}\n\n\tres, err := methods.CreateTemporaryDirectoryInGuest(m.c, &req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m GuestFileManager) CreateTemporaryFileInGuest(vm *VirtualMachine, auth types.BaseGuestAuthentication, prefix, suffix string) (string, error) {\n\treq := types.CreateTemporaryFileInGuest{\n\t\tThis: m.Reference(),\n\t\tVm: vm.Reference(),\n\t\tAuth: auth,\n\t\tPrefix: prefix,\n\t\tSuffix: suffix,\n\t}\n\n\tres, err := methods.CreateTemporaryFileInGuest(m.c, &req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m GuestFileManager) DeleteDirectoryInGuest(vm *VirtualMachine, auth types.BaseGuestAuthentication, directoryPath string, recursive bool) error {\n\treq := types.DeleteDirectoryInGuest{\n\t\tThis: m.Reference(),\n\t\tVm: vm.Reference(),\n\t\tAuth: auth,\n\t\tDirectoryPath: directoryPath,\n\t\tRecursive: recursive,\n\t}\n\n\t_, err := methods.DeleteDirectoryInGuest(m.c, &req)\n\treturn err\n}\n\nfunc (m GuestFileManager) DeleteFileInGuest(vm *VirtualMachine, auth types.BaseGuestAuthentication, filePath string, recursive bool) error {\n\treq := types.DeleteFileInGuest{\n\t\tThis: m.Reference(),\n\t\tVm: vm.Reference(),\n\t\tAuth: auth,\n\t\tFilePath: filePath,\n\t}\n\n\t_, err := methods.DeleteFileInGuest(m.c, &req)\n\treturn err\n}\n\nfunc (m GuestFileManager) InitiateFileTransferFromGuest(vm *VirtualMachine, auth types.BaseGuestAuthentication, guestFilePath string) (*types.FileTransferInformation, error) {\n\treq := types.InitiateFileTransferFromGuest{\n\t\tThis: m.Reference(),\n\t\tVm: vm.Reference(),\n\t\tAuth: auth,\n\t\tGuestFilePath: guestFilePath,\n\t}\n\n\tres, err := methods.InitiateFileTransferFromGuest(m.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n\nfunc (m GuestFileManager) InitiateFileTransferToGuest(vm *VirtualMachine, auth types.BaseGuestAuthentication, guestFilePath string, fileAttributes types.BaseGuestFileAttributes, fileSize int64, overwrite bool) (string, error) {\n\treq := types.InitiateFileTransferToGuest{\n\t\tThis: m.Reference(),\n\t\tVm: vm.Reference(),\n\t\tAuth: auth,\n\t\tGuestFilePath: guestFilePath,\n\t\tFileAttributes: fileAttributes,\n\t\tFileSize: fileSize,\n\t\tOverwrite: overwrite,\n\t}\n\n\tres, err := methods.InitiateFileTransferToGuest(m.c, &req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn res.Returnval, nil\n}\n<commit_msg>More GuestFileManager wrappers<commit_after>\/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage govmomi\n\nimport (\n\t\"github.com\/vmware\/govmomi\/vim25\/methods\"\n\t\"github.com\/vmware\/govmomi\/vim25\/types\"\n)\n\ntype GuestFileManager struct {\n\ttypes.ManagedObjectReference\n\n\tc *Client\n}\n\nfunc (m GuestFileManager) Reference() types.ManagedObjectReference {\n\treturn m.ManagedObjectReference\n}\n\nfunc (m GuestFileManager) ChangeFileAttributesInGuest(vm *VirtualMachine, auth types.BaseGuestAuthentication, guestFilePath string, fileAttributes types.BaseGuestFileAttributes) error {\n\treq := types.ChangeFileAttributesInGuest{\n\t\tThis: m.Reference(),\n\t\tVm: vm.Reference(),\n\t\tAuth: auth,\n\t\tGuestFilePath: guestFilePath,\n\t\tFileAttributes: fileAttributes,\n\t}\n\n\t_, err := methods.ChangeFileAttributesInGuest(m.c, &req)\n\treturn err\n}\n\nfunc (m GuestFileManager) CreateTemporaryDirectoryInGuest(vm *VirtualMachine, auth types.BaseGuestAuthentication, prefix, suffix string) (string, error) {\n\treq := types.CreateTemporaryDirectoryInGuest{\n\t\tThis: m.Reference(),\n\t\tVm: vm.Reference(),\n\t\tAuth: auth,\n\t\tPrefix: prefix,\n\t\tSuffix: suffix,\n\t}\n\n\tres, err := methods.CreateTemporaryDirectoryInGuest(m.c, &req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m GuestFileManager) CreateTemporaryFileInGuest(vm *VirtualMachine, auth types.BaseGuestAuthentication, prefix, suffix string) (string, error) {\n\treq := types.CreateTemporaryFileInGuest{\n\t\tThis: m.Reference(),\n\t\tVm: vm.Reference(),\n\t\tAuth: auth,\n\t\tPrefix: prefix,\n\t\tSuffix: suffix,\n\t}\n\n\tres, err := methods.CreateTemporaryFileInGuest(m.c, &req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m GuestFileManager) DeleteDirectoryInGuest(vm *VirtualMachine, auth types.BaseGuestAuthentication, directoryPath string, recursive bool) error {\n\treq := types.DeleteDirectoryInGuest{\n\t\tThis: m.Reference(),\n\t\tVm: vm.Reference(),\n\t\tAuth: auth,\n\t\tDirectoryPath: directoryPath,\n\t\tRecursive: recursive,\n\t}\n\n\t_, err := methods.DeleteDirectoryInGuest(m.c, &req)\n\treturn err\n}\n\nfunc (m GuestFileManager) DeleteFileInGuest(vm *VirtualMachine, auth types.BaseGuestAuthentication, filePath string, recursive bool) error {\n\treq := types.DeleteFileInGuest{\n\t\tThis: m.Reference(),\n\t\tVm: vm.Reference(),\n\t\tAuth: auth,\n\t\tFilePath: filePath,\n\t}\n\n\t_, err := methods.DeleteFileInGuest(m.c, &req)\n\treturn err\n}\n\nfunc (m GuestFileManager) InitiateFileTransferFromGuest(vm *VirtualMachine, auth types.BaseGuestAuthentication, guestFilePath string) (*types.FileTransferInformation, error) {\n\treq := types.InitiateFileTransferFromGuest{\n\t\tThis: m.Reference(),\n\t\tVm: vm.Reference(),\n\t\tAuth: auth,\n\t\tGuestFilePath: guestFilePath,\n\t}\n\n\tres, err := methods.InitiateFileTransferFromGuest(m.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n\nfunc (m GuestFileManager) InitiateFileTransferToGuest(vm *VirtualMachine, auth types.BaseGuestAuthentication, guestFilePath string, fileAttributes types.BaseGuestFileAttributes, fileSize int64, overwrite bool) (string, error) {\n\treq := types.InitiateFileTransferToGuest{\n\t\tThis: m.Reference(),\n\t\tVm: vm.Reference(),\n\t\tAuth: auth,\n\t\tGuestFilePath: guestFilePath,\n\t\tFileAttributes: fileAttributes,\n\t\tFileSize: fileSize,\n\t\tOverwrite: overwrite,\n\t}\n\n\tres, err := methods.InitiateFileTransferToGuest(m.c, &req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn res.Returnval, nil\n}\n\nfunc (m GuestFileManager) ListFilesInGuest(vm *VirtualMachine, auth types.BaseGuestAuthentication, filePath string, index int, maxResults int, matchPattern string) (*types.GuestListFileInfo, error) {\n\treq := types.ListFilesInGuest{\n\t\tThis: m.Reference(),\n\t\tVm: vm.Reference(),\n\t\tAuth: auth,\n\t\tFilePath: filePath,\n\t\tIndex: index,\n\t\tMaxResults: maxResults,\n\t\tMatchPattern: matchPattern,\n\t}\n\n\tres, err := methods.ListFilesInGuest(m.c, &req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &res.Returnval, nil\n}\n\nfunc (m GuestFileManager) MakeDirectoryInGuest(vm *VirtualMachine, auth types.BaseGuestAuthentication, directoryPath string, createParentDirectories bool) error {\n\treq := types.MakeDirectoryInGuest{\n\t\tThis: m.Reference(),\n\t\tVm: vm.Reference(),\n\t\tAuth: auth,\n\t\tDirectoryPath: directoryPath,\n\t\tCreateParentDirectories: createParentDirectories,\n\t}\n\n\t_, err := methods.MakeDirectoryInGuest(m.c, &req)\n\treturn err\n}\n\nfunc (m GuestFileManager) MoveDirectoryInGuest(vm *VirtualMachine, auth types.BaseGuestAuthentication, srcDirectoryPath string, dstDirectoryPath string) error {\n\treq := types.MoveDirectoryInGuest{\n\t\tThis: m.Reference(),\n\t\tVm: vm.Reference(),\n\t\tAuth: auth,\n\t\tSrcDirectoryPath: srcDirectoryPath,\n\t\tDstDirectoryPath: dstDirectoryPath,\n\t}\n\n\t_, err := methods.MoveDirectoryInGuest(m.c, &req)\n\treturn err\n}\n\nfunc (m GuestFileManager) MoveFileInGuest(vm *VirtualMachine, auth types.BaseGuestAuthentication, srcFilePath string, dstFilePath string, overwrite bool) error {\n\treq := types.MoveFileInGuest{\n\t\tThis: m.Reference(),\n\t\tVm: vm.Reference(),\n\t\tAuth: auth,\n\t\tSrcFilePath: srcFilePath,\n\t\tDstFilePath: dstFilePath,\n\t\tOverwrite: overwrite,\n\t}\n\n\t_, err := methods.MoveFileInGuest(m.c, &req)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Ebiten Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/go:build darwin && !ios\n\/\/ +build darwin,!ios\n\npackage metal\n\n\/\/ #cgo CFLAGS: -x objective-c\n\/\/ #cgo LDFLAGS: -framework Foundation\n\/\/\n\/\/ #import <Foundation\/Foundation.h>\n\/\/\n\/\/ static int getMacOSMajorVersion() {\n\/\/ NSOperatingSystemVersion version = [[NSProcessInfo processInfo] operatingSystemVersion];\n\/\/ return (int)version.majorVersion;\n\/\/ }\nimport \"C\"\n\nimport (\n\t\"github.com\/hajimehoshi\/ebiten\/v2\/internal\/graphicsdriver\/metal\/mtl\"\n\t\"github.com\/hajimehoshi\/ebiten\/v2\/internal\/graphicsdriver\/metal\/ns\"\n)\n\nvar macOSMajorVersion = int(C.getMacOSMajorVersion())\n\nfunc (v *view) setWindow(window uintptr) {\n\t\/\/ NSView can be updated e.g., fullscreen-state is switched.\n\tv.window = window\n\tv.windowChanged = true\n}\n\nfunc (v *view) setUIView(uiview uintptr) {\n\tpanic(\"metal: setUIView is not available on macOS\")\n}\n\nfunc (v *view) update() {\n\tif !v.windowChanged {\n\t\treturn\n\t}\n\n\tcocoaWindow := ns.NewWindow(v.window)\n\tcocoaWindow.ContentView().SetLayer(v.ml)\n\tcocoaWindow.ContentView().SetWantsLayer(true)\n\tv.windowChanged = false\n}\n\nfunc (v *view) usePresentsWithTransaction() bool {\n\t\/\/ On macOS 12 (or later), do not use presentsWithTransaction, or vsync doesn't work (#1885).\n\t\/\/ This works only for Metal. Unfortunately, there is not a good solution for OpenGL.\n\tif macOSMajorVersion >= 12 {\n\t\treturn false\n\t}\n\n\t\/\/ Disable presentsWithTransaction on the fullscreen mode (#1745).\n\treturn !v.vsyncDisabled\n}\n\nfunc (v *view) maximumDrawableCount() int {\n\t\/\/ When presentsWithTransaction is YES and triple buffering is enabled, nextDrawing returns immediately once every two times.\n\t\/\/ This makes FPS doubled. To avoid this, disable the triple buffering.\n\tif v.usePresentsWithTransaction() {\n\t\treturn 2\n\t}\n\treturn 3\n}\n\nconst (\n\tstorageMode = mtl.StorageModeManaged\n\tresourceStorageMode = mtl.ResourceStorageModeManaged\n)\n<commit_msg>Revert \"internal\/graphicsdriver\/metal: Bug fix: Vsync didn't work on macOS\"<commit_after>\/\/ Copyright 2019 The Ebiten Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/go:build darwin && !ios\n\/\/ +build darwin,!ios\n\npackage metal\n\nimport (\n\t\"github.com\/hajimehoshi\/ebiten\/v2\/internal\/graphicsdriver\/metal\/mtl\"\n\t\"github.com\/hajimehoshi\/ebiten\/v2\/internal\/graphicsdriver\/metal\/ns\"\n)\n\nfunc (v *view) setWindow(window uintptr) {\n\t\/\/ NSView can be updated e.g., fullscreen-state is switched.\n\tv.window = window\n\tv.windowChanged = true\n}\n\nfunc (v *view) setUIView(uiview uintptr) {\n\tpanic(\"metal: setUIView is not available on macOS\")\n}\n\nfunc (v *view) update() {\n\tif !v.windowChanged {\n\t\treturn\n\t}\n\n\tcocoaWindow := ns.NewWindow(v.window)\n\tcocoaWindow.ContentView().SetLayer(v.ml)\n\tcocoaWindow.ContentView().SetWantsLayer(true)\n\tv.windowChanged = false\n}\n\nfunc (v *view) usePresentsWithTransaction() bool {\n\t\/\/ Disable presentsWithTransaction on the fullscreen mode (#1745).\n\treturn !v.vsyncDisabled\n}\n\nfunc (v *view) maximumDrawableCount() int {\n\t\/\/ When presentsWithTransaction is YES and triple buffering is enabled, nextDrawing returns immediately once every two times.\n\t\/\/ This makes FPS doubled. To avoid this, disable the triple buffering.\n\tif v.usePresentsWithTransaction() {\n\t\treturn 2\n\t}\n\treturn 3\n}\n\nconst (\n\tstorageMode = mtl.StorageModeManaged\n\tresourceStorageMode = mtl.ResourceStorageModeManaged\n)\n<|endoftext|>"} {"text":"<commit_before>package provider\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"testing\"\n\n\t\"github.com\/google\/go-cmp\/cmp\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/terraform\"\n)\n\nfunc TestAccResourcePasswordBasic(t *testing.T) {\n\tresource.UnitTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviderFactories: testAccProviders,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: `resource \"random_password\" \"basic\" {\n\t\t\t\t\t\t\tlength = 12\n\t\t\t\t\t\t}`,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccResourceStringCheck(\"random_password.basic\", &customLens{\n\t\t\t\t\t\tcustomLen: 12,\n\t\t\t\t\t}),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceName: \"random_password.basic\",\n\t\t\t\t\/\/ Usage of ImportStateIdFunc is required as the value passed to the `terraform import` command needs\n\t\t\t\t\/\/ to be the password itself, as the password resource sets ID to \"none\" and \"result\" to the password\n\t\t\t\t\/\/ supplied during import.\n\t\t\t\tImportStateIdFunc: func(s *terraform.State) (string, error) {\n\t\t\t\t\tid := \"random_password.basic\"\n\t\t\t\t\trs, ok := s.RootModule().Resources[id]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn \"\", fmt.Errorf(\"not found: %s\", id)\n\t\t\t\t\t}\n\t\t\t\t\tif rs.Primary.ID == \"\" {\n\t\t\t\t\t\treturn \"\", fmt.Errorf(\"no ID is set\")\n\t\t\t\t\t}\n\n\t\t\t\t\treturn rs.Primary.Attributes[\"result\"], nil\n\t\t\t\t},\n\t\t\t\tImportState: true,\n\t\t\t\tImportStateVerify: true,\n\t\t\t\tImportStateVerifyIgnore: []string{\"bcrypt_hash\", \"length\", \"lower\", \"number\", \"special\", \"upper\", \"min_lower\", \"min_numeric\", \"min_special\", \"min_upper\", \"override_special\"},\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccResourcePasswordOverride(t *testing.T) {\n\tresource.UnitTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviderFactories: testAccProviders,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: `resource \"random_password\" \"override\" {\n\t\t\t\t\t\t\tlength = 4\n\t\t\t\t\t\t\toverride_special = \"!\"\n\t\t\t\t\t\t\tlower = false\n\t\t\t\t\t\t\tupper = false\n\t\t\t\t\t\t\tnumber = false\n\t\t\t\t\t\t}`,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccResourceStringCheck(\"random_password.override\", &customLens{\n\t\t\t\t\t\tcustomLen: 4,\n\t\t\t\t\t}),\n\t\t\t\t\tpatternMatch(\"random_password.override\", \"!!!!\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccResourcePasswordMin(t *testing.T) {\n\tresource.UnitTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviderFactories: testAccProviders,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: `resource \"random_password\" \"min\" {\n\t\t\t\t\t\t\tlength = 12\n\t\t\t\t\t\t\toverride_special = \"!#@\"\n\t\t\t\t\t\t\tmin_lower = 2\n\t\t\t\t\t\t\tmin_upper = 3\n\t\t\t\t\t\t\tmin_special = 1\n\t\t\t\t\t\t\tmin_numeric = 4\n\t\t\t\t\t\t}`,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccResourceStringCheck(\"random_password.min\", &customLens{\n\t\t\t\t\t\tcustomLen: 12,\n\t\t\t\t\t}),\n\t\t\t\t\tregexMatch(\"random_password.min\", regexp.MustCompile(`([a-z])`), 2),\n\t\t\t\t\tregexMatch(\"random_password.min\", regexp.MustCompile(`([A-Z])`), 3),\n\t\t\t\t\tregexMatch(\"random_password.min\", regexp.MustCompile(`([0-9])`), 4),\n\t\t\t\t\tregexMatch(\"random_password.min\", regexp.MustCompile(`([!#@])`), 1),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestResourcePasswordStateUpgradeV0(t *testing.T) {\n\tcases := []struct {\n\t\tname string\n\t\tstateV0 map[string]interface{}\n\t\tshouldError bool\n\t\terrMsg string\n\t\texpectedStateV1 map[string]interface{}\n\t}{\n\t\t{\n\t\t\tname: \"result is not string\",\n\t\t\tstateV0: map[string]interface{}{\"result\": 0},\n\t\t\tshouldError: true,\n\t\t\terrMsg: \"resource password state upgrade failed, result could not be asserted as string: int\",\n\t\t},\n\t\t{\n\t\t\tname: \"success\",\n\t\t\tstateV0: map[string]interface{}{\"result\": \"abc123\"},\n\t\t\tshouldError: false,\n\t\t\texpectedStateV1: map[string]interface{}{\"result\": \"abc123\", \"bcrypt_hash\": \"123\"},\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\tt.Run(c.name, func(t *testing.T) {\n\t\t\tactualStateV1, err := resourcePasswordStateUpgradeV0(context.Background(), c.stateV0, nil)\n\n\t\t\tif c.shouldError {\n\t\t\t\tif !cmp.Equal(c.errMsg, err.Error()) {\n\t\t\t\t\tt.Errorf(\"expected: %q, got: %q\", c.errMsg, err)\n\t\t\t\t}\n\t\t\t\tif !cmp.Equal(c.expectedStateV1, actualStateV1) {\n\t\t\t\t\tt.Errorf(\"expected: %+v, got: %+v\", c.expectedStateV1, err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"err should be nil, actual: %v\", err)\n\t\t\t\t}\n\n\t\t\t\tfor k := range c.expectedStateV1 {\n\t\t\t\t\t_, ok := actualStateV1[k]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tt.Errorf(\"expected key: %s is missing from state\", k)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestResourcePasswordStateUpgradeV1(t *testing.T) {\n\tcases := []struct {\n\t\tname string\n\t\tstateV1 map[string]interface{}\n\t\tshouldError bool\n\t\terrMsg string\n\t\texpectedStateV2 map[string]interface{}\n\t}{\n\t\t{\n\t\t\tname: \"number is not bool\",\n\t\t\tstateV1: map[string]interface{}{\"number\": 0},\n\t\t\tshouldError: true,\n\t\t\terrMsg: \"resource password state upgrade failed, number could not be asserted as bool: int\",\n\t\t},\n\t\t{\n\t\t\tname: \"success\",\n\t\t\tstateV1: map[string]interface{}{\"number\": true},\n\t\t\tshouldError: false,\n\t\t\texpectedStateV2: map[string]interface{}{\"number\": true, \"numeric\": true},\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\tt.Run(c.name, func(t *testing.T) {\n\t\t\tactualStateV2, err := resourcePasswordStateUpgradeV1(context.Background(), c.stateV1, nil)\n\n\t\t\tif c.shouldError {\n\t\t\t\tif !cmp.Equal(c.errMsg, err.Error()) {\n\t\t\t\t\tt.Errorf(\"expected: %q, got: %q\", c.errMsg, err)\n\t\t\t\t}\n\t\t\t\tif !cmp.Equal(c.expectedStateV2, actualStateV2) {\n\t\t\t\t\tt.Errorf(\"expected: %+v, got: %+v\", c.expectedStateV2, err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"err should be nil, actual: %v\", err)\n\t\t\t\t}\n\n\t\t\t\tfor k := range c.expectedStateV2 {\n\t\t\t\t\t_, ok := actualStateV2[k]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tt.Errorf(\"expected key: %s is missing from state\", k)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n<commit_msg>Adding test for updating number and numeric<commit_after>package provider\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"testing\"\n\n\t\"github.com\/google\/go-cmp\/cmp\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/terraform\"\n)\n\nfunc TestAccResourcePasswordBasic(t *testing.T) {\n\tresource.UnitTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviderFactories: testAccProviders,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: `resource \"random_password\" \"basic\" {\n\t\t\t\t\t\t\tlength = 12\n\t\t\t\t\t\t}`,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccResourceStringCheck(\"random_password.basic\", &customLens{\n\t\t\t\t\t\tcustomLen: 12,\n\t\t\t\t\t}),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceName: \"random_password.basic\",\n\t\t\t\t\/\/ Usage of ImportStateIdFunc is required as the value passed to the `terraform import` command needs\n\t\t\t\t\/\/ to be the password itself, as the password resource sets ID to \"none\" and \"result\" to the password\n\t\t\t\t\/\/ supplied during import.\n\t\t\t\tImportStateIdFunc: func(s *terraform.State) (string, error) {\n\t\t\t\t\tid := \"random_password.basic\"\n\t\t\t\t\trs, ok := s.RootModule().Resources[id]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn \"\", fmt.Errorf(\"not found: %s\", id)\n\t\t\t\t\t}\n\t\t\t\t\tif rs.Primary.ID == \"\" {\n\t\t\t\t\t\treturn \"\", fmt.Errorf(\"no ID is set\")\n\t\t\t\t\t}\n\n\t\t\t\t\treturn rs.Primary.Attributes[\"result\"], nil\n\t\t\t\t},\n\t\t\t\tImportState: true,\n\t\t\t\tImportStateVerify: true,\n\t\t\t\tImportStateVerifyIgnore: []string{\"bcrypt_hash\", \"length\", \"lower\", \"number\", \"special\", \"upper\", \"min_lower\", \"min_numeric\", \"min_special\", \"min_upper\", \"override_special\"},\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccResourcePasswordOverride(t *testing.T) {\n\tresource.UnitTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviderFactories: testAccProviders,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: `resource \"random_password\" \"override\" {\n\t\t\t\t\t\t\tlength = 4\n\t\t\t\t\t\t\toverride_special = \"!\"\n\t\t\t\t\t\t\tlower = false\n\t\t\t\t\t\t\tupper = false\n\t\t\t\t\t\t\tnumber = false\n\t\t\t\t\t\t}`,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccResourceStringCheck(\"random_password.override\", &customLens{\n\t\t\t\t\t\tcustomLen: 4,\n\t\t\t\t\t}),\n\t\t\t\t\tpatternMatch(\"random_password.override\", \"!!!!\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccResourcePasswordMin(t *testing.T) {\n\tresource.UnitTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviderFactories: testAccProviders,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: `resource \"random_password\" \"min\" {\n\t\t\t\t\t\t\tlength = 12\n\t\t\t\t\t\t\toverride_special = \"!#@\"\n\t\t\t\t\t\t\tmin_lower = 2\n\t\t\t\t\t\t\tmin_upper = 3\n\t\t\t\t\t\t\tmin_special = 1\n\t\t\t\t\t\t\tmin_numeric = 4\n\t\t\t\t\t\t}`,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccResourceStringCheck(\"random_password.min\", &customLens{\n\t\t\t\t\t\tcustomLen: 12,\n\t\t\t\t\t}),\n\t\t\t\t\tregexMatch(\"random_password.min\", regexp.MustCompile(`([a-z])`), 2),\n\t\t\t\t\tregexMatch(\"random_password.min\", regexp.MustCompile(`([A-Z])`), 3),\n\t\t\t\t\tregexMatch(\"random_password.min\", regexp.MustCompile(`([0-9])`), 4),\n\t\t\t\t\tregexMatch(\"random_password.min\", regexp.MustCompile(`([!#@])`), 1),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccResourcePassword_UpdateNumberAndNumeric(t *testing.T) {\n\tt.Parallel()\n\tresource.UnitTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviderFactories: testAccProviders,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: `resource \"random_password\" \"default\" {\n\t\t\t\t\t\t\tlength = 12\n\t\t\t\t\t\t}`,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\tresource.TestCheckResourceAttr(\"random_password.default\", \"number\", \"true\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"random_password.default\", \"numeric\", \"true\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: `resource \"random_password\" \"default\" {\n\t\t\t\t\t\t\tlength = 12\n\t\t\t\t\t\t\tnumber = false\n\t\t\t\t\t\t}`,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\tresource.TestCheckResourceAttr(\"random_password.default\", \"number\", \"false\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"random_password.default\", \"numeric\", \"false\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: `resource \"random_password\" \"default\" {\n\t\t\t\t\t\t\tlength = 12\n\t\t\t\t\t\t\tnumeric = true\n\t\t\t\t\t\t}`,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\tresource.TestCheckResourceAttr(\"random_password.default\", \"number\", \"true\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"random_password.default\", \"numeric\", \"true\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: `resource \"random_password\" \"default\" {\n\t\t\t\t\t\t\tlength = 12\n\t\t\t\t\t\t\tnumeric = false\n\t\t\t\t\t\t}`,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\tresource.TestCheckResourceAttr(\"random_password.default\", \"number\", \"false\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"random_password.default\", \"numeric\", \"false\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: `resource \"random_password\" \"default\" {\n\t\t\t\t\t\t\tlength = 12\n\t\t\t\t\t\t}`,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\tresource.TestCheckResourceAttr(\"random_password.default\", \"number\", \"true\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"random_password.default\", \"numeric\", \"true\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestResourcePasswordStateUpgradeV0(t *testing.T) {\n\tcases := []struct {\n\t\tname string\n\t\tstateV0 map[string]interface{}\n\t\tshouldError bool\n\t\terrMsg string\n\t\texpectedStateV1 map[string]interface{}\n\t}{\n\t\t{\n\t\t\tname: \"result is not string\",\n\t\t\tstateV0: map[string]interface{}{\"result\": 0},\n\t\t\tshouldError: true,\n\t\t\terrMsg: \"resource password state upgrade failed, result could not be asserted as string: int\",\n\t\t},\n\t\t{\n\t\t\tname: \"success\",\n\t\t\tstateV0: map[string]interface{}{\"result\": \"abc123\"},\n\t\t\tshouldError: false,\n\t\t\texpectedStateV1: map[string]interface{}{\"result\": \"abc123\", \"bcrypt_hash\": \"123\"},\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\tt.Run(c.name, func(t *testing.T) {\n\t\t\tactualStateV1, err := resourcePasswordStateUpgradeV0(context.Background(), c.stateV0, nil)\n\n\t\t\tif c.shouldError {\n\t\t\t\tif !cmp.Equal(c.errMsg, err.Error()) {\n\t\t\t\t\tt.Errorf(\"expected: %q, got: %q\", c.errMsg, err)\n\t\t\t\t}\n\t\t\t\tif !cmp.Equal(c.expectedStateV1, actualStateV1) {\n\t\t\t\t\tt.Errorf(\"expected: %+v, got: %+v\", c.expectedStateV1, err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"err should be nil, actual: %v\", err)\n\t\t\t\t}\n\n\t\t\t\tfor k := range c.expectedStateV1 {\n\t\t\t\t\t_, ok := actualStateV1[k]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tt.Errorf(\"expected key: %s is missing from state\", k)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestResourcePasswordStateUpgradeV1(t *testing.T) {\n\tcases := []struct {\n\t\tname string\n\t\tstateV1 map[string]interface{}\n\t\tshouldError bool\n\t\terrMsg string\n\t\texpectedStateV2 map[string]interface{}\n\t}{\n\t\t{\n\t\t\tname: \"number is not bool\",\n\t\t\tstateV1: map[string]interface{}{\"number\": 0},\n\t\t\tshouldError: true,\n\t\t\terrMsg: \"resource password state upgrade failed, number could not be asserted as bool: int\",\n\t\t},\n\t\t{\n\t\t\tname: \"success\",\n\t\t\tstateV1: map[string]interface{}{\"number\": true},\n\t\t\tshouldError: false,\n\t\t\texpectedStateV2: map[string]interface{}{\"number\": true, \"numeric\": true},\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\tt.Run(c.name, func(t *testing.T) {\n\t\t\tactualStateV2, err := resourcePasswordStateUpgradeV1(context.Background(), c.stateV1, nil)\n\n\t\t\tif c.shouldError {\n\t\t\t\tif !cmp.Equal(c.errMsg, err.Error()) {\n\t\t\t\t\tt.Errorf(\"expected: %q, got: %q\", c.errMsg, err)\n\t\t\t\t}\n\t\t\t\tif !cmp.Equal(c.expectedStateV2, actualStateV2) {\n\t\t\t\t\tt.Errorf(\"expected: %+v, got: %+v\", c.expectedStateV2, err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"err should be nil, actual: %v\", err)\n\t\t\t\t}\n\n\t\t\t\tfor k := range c.expectedStateV2 {\n\t\t\t\t\t_, ok := actualStateV2[k]\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tt.Errorf(\"expected key: %s is missing from state\", k)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package restclient\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\ntype gubleSender struct {\n\tEndpoint string\n\thttpClient *http.Client\n}\n\n\/\/ New returns a new Sender.\nfunc New(endpoint string) Sender {\n\treturn &gubleSender{\n\t\tEndpoint: endpoint,\n\t\thttpClient: &http.Client{},\n\t}\n}\n\nfunc (gs gubleSender) Check() bool {\n\trequest, err := http.NewRequest(http.MethodHead, gs.Endpoint, nil)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"error creating request url\")\n\t\treturn false\n\t}\n\tresponse, err := gs.httpClient.Do(request)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"error reaching guble server endpoint\")\n\t\treturn false\n\t}\n\tdefer response.Body.Close()\n\treturn response.StatusCode == http.StatusOK\n}\n\nfunc (gs gubleSender) Send(topic string, body []byte, userID string) error {\n\turl := fmt.Sprintf(\"%s%s?userId=%s\",\n\t\tstrings.TrimPrefix(gs.Endpoint, \"\/\"), topic, userID)\n\trequest, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(body))\n\tif err != nil {\n\t\treturn err\n\t}\n\tresponse, err := gs.httpClient.Do(request)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\n\tif response.StatusCode != http.StatusOK {\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"header\": response.Header,\n\t\t\t\"code\": response.StatusCode,\n\t\t\t\"status\": response.Status,\n\t\t}).Error(\"Guble response error\")\n\t\treturn fmt.Errorf(\"Error code returned from guble: %d\", response.StatusCode)\n\t}\n\treturn nil\n}\n<commit_msg>updating rest client to to insert \/ and use filter<commit_after>package restclient\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\ntype gubleSender struct {\n\tEndpoint string\n\thttpClient *http.Client\n}\n\n\/\/ New returns a new Sender.\nfunc New(endpoint string) Sender {\n\treturn &gubleSender{\n\t\tEndpoint: endpoint,\n\t\thttpClient: &http.Client{},\n\t}\n}\n\nfunc (gs gubleSender) Check() bool {\n\trequest, err := http.NewRequest(http.MethodHead, gs.Endpoint, nil)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"error creating request url\")\n\t\treturn false\n\t}\n\tresponse, err := gs.httpClient.Do(request)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"error reaching guble server endpoint\")\n\t\treturn false\n\t}\n\tdefer response.Body.Close()\n\treturn response.StatusCode == http.StatusOK\n}\n\nfunc (gs gubleSender) Send(topic string, body []byte, userID string) error {\n\turl := fmt.Sprintf(\"%s\/%s?userId=%s&filterUserID\",\n\t\tstrings.TrimPrefix(gs.Endpoint, \"\/\"), topic, userID)\n\trequest, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(body))\n\tif err != nil {\n\t\treturn err\n\t}\n\tresponse, err := gs.httpClient.Do(request)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\n\tif response.StatusCode != http.StatusOK {\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"header\": response.Header,\n\t\t\t\"code\": response.StatusCode,\n\t\t\t\"status\": response.Status,\n\t\t}).Error(\"Guble response error\")\n\t\treturn fmt.Errorf(\"Error code returned from guble: %d\", response.StatusCode)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package restclient\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\ntype gubleSender struct {\n\tEndpoint string\n\thttpClient *http.Client\n}\n\n\/\/ New returns a new Sender.\nfunc New(endpoint string) Sender {\n\treturn &gubleSender{\n\t\tEndpoint: endpoint,\n\t\thttpClient: &http.Client{},\n\t}\n}\n\nfunc (gs gubleSender) Check() bool {\n\trequest, err := http.NewRequest(http.MethodHead, gs.Endpoint, nil)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"error creating request url\")\n\t\treturn false\n\t}\n\tresponse, err := gs.httpClient.Do(request)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"error reaching guble server endpoint\")\n\t\treturn false\n\t}\n\treturn response.StatusCode == http.StatusOK\n}\n\nfunc (gs gubleSender) Send(topic string, body []byte, userID string) error {\n\turl := fmt.Sprintf(\"%s%s?userId=%s\",\n\t\tstrings.TrimPrefix(gs.Endpoint, \"\/\"), topic, userID)\n\trequest, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(body))\n\tif err != nil {\n\t\treturn err\n\t}\n\tresponse, err := gs.httpClient.Do(request)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\n\tif response.StatusCode != http.StatusOK {\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"header\": response.Header,\n\t\t\t\"code\": response.StatusCode,\n\t\t\t\"status\": response.Status,\n\t\t}).Error(\"Guble response error\")\n\t\treturn fmt.Errorf(\"Error code returned from guble: %d\", response.StatusCode)\n\t}\n\treturn nil\n}\n<commit_msg>adding Close() in restclient Check()<commit_after>package restclient\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\ntype gubleSender struct {\n\tEndpoint string\n\thttpClient *http.Client\n}\n\n\/\/ New returns a new Sender.\nfunc New(endpoint string) Sender {\n\treturn &gubleSender{\n\t\tEndpoint: endpoint,\n\t\thttpClient: &http.Client{},\n\t}\n}\n\nfunc (gs gubleSender) Check() bool {\n\trequest, err := http.NewRequest(http.MethodHead, gs.Endpoint, nil)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"error creating request url\")\n\t\treturn false\n\t}\n\tresponse, err := gs.httpClient.Do(request)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"error reaching guble server endpoint\")\n\t\treturn false\n\t}\n\tdefer response.Body.Close()\n\treturn response.StatusCode == http.StatusOK\n}\n\nfunc (gs gubleSender) Send(topic string, body []byte, userID string) error {\n\turl := fmt.Sprintf(\"%s%s?userId=%s\",\n\t\tstrings.TrimPrefix(gs.Endpoint, \"\/\"), topic, userID)\n\trequest, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(body))\n\tif err != nil {\n\t\treturn err\n\t}\n\tresponse, err := gs.httpClient.Do(request)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\n\tif response.StatusCode != http.StatusOK {\n\t\tlogger.WithFields(log.Fields{\n\t\t\t\"header\": response.Header,\n\t\t\t\"code\": response.StatusCode,\n\t\t\t\"status\": response.Status,\n\t\t}).Error(\"Guble response error\")\n\t\treturn fmt.Errorf(\"Error code returned from guble: %d\", response.StatusCode)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Google Inc. All rights reserved.\n\/\/ Copyright 2016 the gousb Authors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ rawread attempts to read from the specified USB device.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/kylelemons\/gousb\/usb\"\n\t\"github.com\/kylelemons\/gousb\/usbid\"\n)\n\nvar (\n\tdevice = flag.String(\"device\", \"vend:prod\", \"Device to which to connect\")\n\tconfig = flag.Int(\"config\", 1, \"Endpoint to which to connect\")\n\tiface = flag.Int(\"interface\", 0, \"Endpoint to which to connect\")\n\tsetup = flag.Int(\"setup\", 0, \"Endpoint to which to connect\")\n\tendpoint = flag.Int(\"endpoint\", 1, \"Endpoint to which to connect\")\n\tdebug = flag.Int(\"debug\", 3, \"Debug level for libusb\")\n)\n\nfunc main() {\n\tflag.Parse()\n\n\t\/\/ Only one context should be needed for an application. It should always be closed.\n\tctx := usb.NewContext()\n\tdefer ctx.Close()\n\n\tctx.Debug(*debug)\n\n\tlog.Printf(\"Scanning for device %q...\", *device)\n\n\t\/\/ ListDevices is used to find the devices to open.\n\tdevs, err := ctx.ListDevices(func(desc *usb.Descriptor) bool {\n\t\tif fmt.Sprintf(\"%s:%s\", desc.Vendor, desc.Product) != *device {\n\t\t\treturn false\n\t\t}\n\n\t\t\/\/ The usbid package can be used to print out human readable information.\n\t\tfmt.Printf(\" Protocol: %s\\n\", usbid.Classify(desc))\n\n\t\t\/\/ The configurations can be examined from the Descriptor, though they can only\n\t\t\/\/ be set once the device is opened. All configuration references must be closed,\n\t\t\/\/ to free up the memory in libusb.\n\t\tfor _, cfg := range desc.Configs {\n\t\t\t\/\/ This loop just uses more of the built-in and usbid pretty printing to list\n\t\t\t\/\/ the USB devices.\n\t\t\tfmt.Printf(\" %s:\\n\", cfg)\n\t\t\tfor _, alt := range cfg.Interfaces {\n\t\t\t\tfmt.Printf(\" --------------\\n\")\n\t\t\t\tfor _, iface := range alt.Setups {\n\t\t\t\t\tfmt.Printf(\" %s\\n\", iface)\n\t\t\t\t\tfmt.Printf(\" %s\\n\", usbid.Classify(iface))\n\t\t\t\t\tfor _, end := range iface.Endpoints {\n\t\t\t\t\t\tfmt.Printf(\" %s\\n\", end)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Printf(\" --------------\\n\")\n\t\t}\n\n\t\treturn true\n\t})\n\n\t\/\/ All Devices returned from ListDevices must be closed.\n\tdefer func() {\n\t\tfor _, d := range devs {\n\t\t\td.Close()\n\t\t}\n\t}()\n\n\t\/\/ ListDevices can occaionally fail, so be sure to check its return value.\n\tif err != nil {\n\t\tlog.Fatalf(\"list: %s\", err)\n\t}\n\n\tif len(devs) == 0 {\n\t\tlog.Fatalf(\"no devices found\")\n\t}\n\n\tdev := devs[0]\n\n\tlog.Printf(\"Connecting to endpoint...\")\n\tlog.Printf(\"- %#v\", dev.Descriptor)\n\tep, err := dev.OpenEndpoint(uint8(*config), uint8(*iface), uint8(*setup), uint8(*endpoint)|uint8(usb.ENDPOINT_DIR_IN))\n\tif err != nil {\n\t\tlog.Fatalf(\"open: %s\", err)\n\t}\n\t_ = ep\n}\n<commit_msg>Change how device is addressed, add alternative --bus\/--addr flags. Print all diagnostics to the log (stderr). Capture the data from the specified endpoint and print it to stdout.<commit_after>\/\/ Copyright 2013 Google Inc. All rights reserved.\n\/\/ Copyright 2016 the gousb Authors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ rawread attempts to read from the specified USB device.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/kylelemons\/gousb\/usb\"\n\t\"github.com\/kylelemons\/gousb\/usbid\"\n)\n\nvar (\n\tvid = flag.Uint(\"vid\", 0, \"VID of the device to which to connect. Exclusive with bus\/addr flags.\")\n\tpid = flag.Uint(\"pid\", 0, \"PID of the device to which to connect. Exclusive with bus\/addr flags.\")\n\tbus = flag.Uint(\"bus\", 0, \"Bus number for the device to which to connect. Exclusive with vid\/pid flags.\")\n\taddr = flag.Uint(\"addr\", 0, \"Address of the device to which to connect. Exclusive with vid\/pid flags.\")\n\tconfig = flag.Uint(\"config\", 1, \"Endpoint to which to connect\")\n\tiface = flag.Uint(\"interface\", 0, \"Endpoint to which to connect\")\n\tsetup = flag.Uint(\"setup\", 0, \"Endpoint to which to connect\")\n\tendpoint = flag.Uint(\"endpoint\", 1, \"Endpoint to which to connect\")\n\tdebug = flag.Int(\"debug\", 3, \"Debug level for libusb\")\n\tsize = flag.Uint(\"read_size\", 1024, \"Maximum number of bytes of data to read. Collected will be printed to STDOUT.\")\n)\n\nfunc main() {\n\tflag.Parse()\n\n\t\/\/ Only one context should be needed for an application. It should always be closed.\n\tctx := usb.NewContext()\n\tdefer ctx.Close()\n\n\tctx.Debug(*debug)\n\n\tvar devName string\n\tswitch {\n\tcase *vid == 0 && *pid == 0 && *bus == 0 && *addr == 0:\n\t\tlog.Fatal(\"You need to specify the device, either through --vid\/--pid flags or through --bus\/--addr flags.\")\n\tcase (*vid > 0 || *pid > 0) && (*bus > 0 || *addr > 0):\n\t\tlog.Fatal(\"You can't use --vid\/--pid flags at the same time as --bus\/--addr.\")\n\tcase *vid > 0 || *pid > 0:\n\t\tdevName = fmt.Sprintf(\"VID:PID %04x:%04x\", *vid, *pid)\n\tdefault:\n\t\tdevName = fmt.Sprintf(\"bus:addr %d:%d\", *bus, *addr)\n\t}\n\n\tlog.Printf(\"Scanning for device %q...\", devName)\n\t\/\/ ListDevices is used to find the devices to open.\n\tdevs, err := ctx.ListDevices(func(desc *usb.Descriptor) bool {\n\t\tswitch {\n\t\tcase usb.ID(*vid) == desc.Vendor && usb.ID(*pid) == desc.Product:\n\t\t\treturn true\n\t\tcase uint8(*bus) == desc.Bus && uint8(*addr) == desc.Address:\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t})\n\t\/\/ All Devices returned from ListDevices must be closed.\n\tdefer func() {\n\t\tfor _, d := range devs {\n\t\t\td.Close()\n\t\t}\n\t}()\n\n\t\/\/ ListDevices can occasionally fail, so be sure to check its return value.\n\tif err != nil {\n\t\tlog.Printf(\"Warning: ListDevices: %s.\", err)\n\t}\n\tswitch {\n\tcase len(devs) == 0:\n\t\tlog.Fatal(\"No matching devices found.\")\n\tcase len(devs) > 1:\n\t\tlog.Printf(\"Warning: multiple devices found. Using bus %d, addr %d.\", devs[0].Bus, devs[0].Address)\n\t\tfor _, d := range devs[1:] {\n\t\t\td.Close()\n\t\t}\n\t\tdevs = devs[:1]\n\t}\n\tdev := devs[0]\n\n\t\/\/ The usbid package can be used to print out human readable information.\n\tlog.Printf(\" Protocol: %s\\n\", usbid.Classify(dev.Descriptor))\n\n\t\/\/ The configurations can be examined from the Descriptor, though they can only\n\t\/\/ be set once the device is opened. All configuration references must be closed,\n\t\/\/ to free up the memory in libusb.\n\tfor _, cfg := range dev.Configs {\n\t\t\/\/ This loop just uses more of the built-in and usbid pretty printing to list\n\t\t\/\/ the USB devices.\n\t\tlog.Printf(\" %s:\\n\", cfg)\n\t\tfor _, alt := range cfg.Interfaces {\n\t\t\tlog.Printf(\" --------------\\n\")\n\t\t\tfor _, iface := range alt.Setups {\n\t\t\t\tlog.Printf(\" %s\\n\", iface)\n\t\t\t\tlog.Printf(\" %s\\n\", usbid.Classify(iface))\n\t\t\t\tfor _, end := range iface.Endpoints {\n\t\t\t\t\tlog.Printf(\" %s\\n\", end)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tlog.Printf(\" --------------\\n\")\n\t}\n\n\tlog.Printf(\"Connecting to endpoint...\")\n\tep, err := dev.OpenEndpoint(uint8(*config), uint8(*iface), uint8(*setup), uint8(*endpoint)|uint8(usb.ENDPOINT_DIR_IN))\n\tif err != nil {\n\t\tlog.Fatalf(\"open: %s\", err)\n\t}\n\n\tbuf := make([]byte, *size)\n\tnum, err := ep.Read(buf)\n\tif err != nil {\n\t\tlog.Fatalf(\"Reading from device failed: %v\", err)\n\t}\n\tlog.Printf(\"Read %d bytes of data\", num)\n\tos.Stdout.Write(buf[:num])\n}\n<|endoftext|>"} {"text":"<commit_before>package kite\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/op\/go-logging\"\n\t\"koding\/newkite\/dnode\/rpc\"\n\t\"koding\/newkite\/protocol\"\n\t\"koding\/newkite\/utils\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\"\n\t\"syscall\"\n\t\"time\"\n)\n\n\/\/ Debugging helper.\nfunc init() {\n\t\/\/ Print stacktrace on SIGUSR1.\n\tc := make(chan os.Signal)\n\tsignal.Notify(c, syscall.SIGUSR1)\n\tgo func() {\n\t\tfor {\n\t\t\ts := <-c\n\t\t\tfmt.Println(\"Got signal:\", s)\n\t\t\tbuf := make([]byte, 1<<16)\n\t\t\truntime.Stack(buf, true)\n\t\t\tfmt.Println(string(buf))\n\t\t\tfmt.Println(\"Number of goroutines:\", runtime.NumGoroutine())\n\t\t}\n\t}()\n}\n\n\/\/ Kite defines a single process that enables distributed service messaging\n\/\/ amongst the peers it is connected. A Kite process acts as a Client and as a\n\/\/ Server. That means it can receive request, process them, but it also can\n\/\/ make request to other kites. A Kite can be anything. It can be simple Image\n\/\/ processing kite (which would process data), it could be a Chat kite that\n\/\/ enables peer-to-peer chat. For examples we have FileSystem kite that expose\n\/\/ the file system to a client, which in order build the filetree.\ntype Kite struct {\n\tprotocol.Kite\n\n\t\/\/ KodingKey is used for authenticate to Kontrol.\n\tKodingKey string\n\n\t\/\/ Is this Kite Public or Private? Default is Private.\n\tVisibility protocol.Visibility\n\n\t\/\/ Points to the Kontrol instance if enabled\n\tKontrol *Kontrol\n\n\t\/\/ Wheter we want to connect to Kontrol on startup, true by default.\n\tKontrolEnabled bool\n\n\t\/\/ Wheter we want to register our Kite to Kontrol, true by default.\n\tRegisterToKontrol bool\n\n\t\/\/ method map for exported methods\n\thandlers map[string]HandlerFunc\n\n\t\/\/ Dnode rpc server\n\tserver *rpc.Server\n\n\t\/\/ Handlers to call when a Kite opens a connection to this Kite.\n\tonConnectHandlers []func(*RemoteKite)\n\n\t\/\/ Handlers to call when a client has disconnected.\n\tonDisconnectHandlers []func(*RemoteKite)\n\n\t\/\/ Contains different functions for authenticating user from request.\n\t\/\/ Keys are the authentication types (options.authentication.type).\n\tAuthenticators map[string]func(*Request) error\n\n\t\/\/ Used to signal if the kite is ready to start and make calls to\n\t\/\/ other kites.\n\tready chan bool\n\n\t\/\/ Prints logging messages to stderr and syslog.\n\tLog *logging.Logger\n}\n\n\/\/ New creates, initialize and then returns a new Kite instance. It accepts\n\/\/ a single options argument that is a config struct that needs to be filled\n\/\/ with several informations like Name, Port, IP and so on.\nfunc New(options *Options) *Kite {\n\tvar err error\n\tif options == nil {\n\t\toptions, err = ReadKiteOptions(\"manifest.json\")\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"error: could not read config file\", err)\n\t\t}\n\t}\n\n\toptions.validate() \/\/ exits if validating fails\n\n\thostname, _ := os.Hostname()\n\tkiteID := utils.GenerateUUID()\n\tkodingKey, err := utils.GetKodingKey()\n\tif err != nil {\n\t\tlog.Fatal(\"Couldn't find koding.key. Please run 'kd register'.\")\n\t}\n\n\tk := &Kite{\n\t\tKite: protocol.Kite{\n\t\t\tName: options.Kitename,\n\t\t\tUsername: options.Username,\n\t\t\tID: kiteID,\n\t\t\tVersion: options.Version,\n\t\t\tHostname: hostname,\n\t\t\tPort: options.Port,\n\t\t\tEnvironment: options.Environment,\n\t\t\tRegion: options.Region,\n\t\t\tVisibility: options.Visibility,\n\n\t\t\t\/\/ PublicIP will be set by Kontrol after registering if it is not set.\n\t\t\tPublicIP: options.PublicIP,\n\t\t},\n\t\tKodingKey: kodingKey,\n\t\tserver: rpc.NewServer(),\n\t\tKontrolEnabled: true,\n\t\tRegisterToKontrol: true,\n\t\tAuthenticators: make(map[string]func(*Request) error),\n\t\thandlers: make(map[string]HandlerFunc),\n\t\tready: make(chan bool),\n\t}\n\n\tk.Log = newLogger(k.Name, k.hasDebugFlag())\n\tk.Kontrol = k.NewKontrol(options.KontrolAddr)\n\n\t\/\/ Call registered handlers when a client has disconnected.\n\tk.server.OnDisconnect(func(c *rpc.Client) {\n\t\tif r, ok := c.Properties()[\"remoteKite\"]; ok {\n\t\t\t\/\/ Run OnDisconnect handlers.\n\t\t\tk.notifyRemoteKiteDisconnected(r.(*RemoteKite))\n\t\t}\n\t})\n\n\t\/\/ Every kite should be able to authenticate the user from token.\n\tk.Authenticators[\"token\"] = k.AuthenticateFromToken\n\t\/\/ A kite accepts requests from Kontrol.\n\tk.Authenticators[\"kodingKey\"] = k.AuthenticateFromKodingKey\n\n\t\/\/ Register our internal methods\n\tk.HandleFunc(\"systemInfo\", new(Status).Info)\n\tk.HandleFunc(\"heartbeat\", k.handleHeartbeat)\n\tk.HandleFunc(\"log\", k.handleLog)\n\n\treturn k\n}\n\n\/\/ Run is a blocking method. It runs the kite server and then accepts requests\n\/\/ asynchronously.\nfunc (k *Kite) Run() {\n\tk.Start()\n\tselect {}\n}\n\n\/\/ Start is like Run(), but does not wait for it to complete. It's nonblocking.\nfunc (k *Kite) Start() {\n\tk.parseVersionFlag()\n\n\tgo func() {\n\t\terr := k.listenAndServe()\n\t\tif err != nil {\n\t\t\tk.Log.Fatal(err)\n\t\t}\n\t}()\n\n\t<-k.ready \/\/ wait until we are ready\n}\n\nfunc (k *Kite) handleHeartbeat(r *Request) (interface{}, error) {\n\targs := r.Args.MustSliceOfLength(2)\n\tseconds := args[0].MustFloat64()\n\tping := args[1].MustFunction()\n\n\tgo func() {\n\t\tfor {\n\t\t\ttime.Sleep(time.Duration(seconds) * time.Second)\n\t\t\tif ping() != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil, nil\n}\n\n\/\/ handleLog prints a log message to stdout.\nfunc (k *Kite) handleLog(r *Request) (interface{}, error) {\n\tmsg := r.Args.MustString()\n\tk.Log.Info(fmt.Sprintf(\"%s: %s\", r.RemoteKite.Name, msg))\n\treturn nil, nil\n}\n\nfunc init() {\n\t\/\/ These logging related stuff needs to be called once because stupid\n\t\/\/ logging library uses global variables and resets the backends every time.\n\tlogging.SetFormatter(logging.MustStringFormatter(\"%{level:-8s} ▶ %{message}\"))\n\tstderrBackend := logging.NewLogBackend(os.Stderr, \"\", log.LstdFlags)\n\tstderrBackend.Color = true\n\tsyslogBackend, _ := logging.NewSyslogBackend(\"\")\n\tlogging.SetBackend(stderrBackend, syslogBackend)\n}\n\n\/\/ newLogger returns a new logger object for desired name and level.\nfunc newLogger(name string, debug bool) *logging.Logger {\n\tlogger := logging.MustGetLogger(name)\n\n\tlevel := logging.INFO\n\tif debug {\n\t\tlevel = logging.DEBUG\n\t}\n\n\tlogging.SetLevel(level, name)\n\treturn logger\n}\n\n\/\/ If the user wants to call flag.Parse() the flag must be defined in advance.\nvar _ = flag.Bool(\"version\", false, \"show version\")\nvar _ = flag.Bool(\"debug\", false, \"print debug logs\")\n\n\/\/ parseVersionFlag prints the version number of the kite and exits with 0\n\/\/ if \"-version\" flag is enabled.\n\/\/ We did not use the \"flag\" package because it causes trouble if the user\n\/\/ also calls \"flag.Parse()\" in his code. flag.Parse() can be called only once.\nfunc (k *Kite) parseVersionFlag() {\n\tfor _, flag := range os.Args {\n\t\tif flag == \"-version\" {\n\t\t\tfmt.Println(k.Version)\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n}\n\n\/\/ hasDebugFlag returns true if -debug flag is present in os.Args.\nfunc (k *Kite) hasDebugFlag() bool {\n\tfor _, flag := range os.Args {\n\t\tif flag == \"-debug\" {\n\t\t\treturn true\n\t\t}\n\t}\n\n\t\/\/ We can't use flags when running \"go test\" command.\n\t\/\/ This is another way to print debug logs.\n\tif os.Getenv(\"DEBUG\") != \"\" {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n\/\/ listenAndServe starts our rpc server with the given addr.\nfunc (k *Kite) listenAndServe() error {\n\tlistener, err := net.Listen(\"tcp4\", \":\"+k.Port)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tk.Log.Info(\"Listening: %s\", listener.Addr().String())\n\n\t\/\/ Port is known here if \"0\" is used as port number\n\t_, k.Port, _ = net.SplitHostPort(listener.Addr().String())\n\n\t\/\/ We must connect to Kontrol after starting to listen on port\n\tif k.KontrolEnabled {\n\t\tif k.RegisterToKontrol {\n\t\t\tk.Kontrol.OnConnect(k.registerToKontrol)\n\t\t}\n\n\t\tk.Kontrol.DialForever()\n\t}\n\n\tk.ready <- true \/\/ listener is ready, means we are ready too\n\treturn http.Serve(listener, k.server)\n}\n\nfunc (k *Kite) registerToKontrol() {\n\terr := k.Kontrol.Register()\n\tif err != nil {\n\t\tk.Log.Fatalf(\"Cannot register to Kontrol: %s\", err)\n\t}\n}\n\n\/\/ OnConnect registers a function to run when a Kite connects to this Kite.\nfunc (k *Kite) OnConnect(handler func(*RemoteKite)) {\n\tk.onConnectHandlers = append(k.onConnectHandlers, handler)\n}\n\n\/\/ OnDisconnect registers a function to run when a connected Kite is disconnected.\nfunc (k *Kite) OnDisconnect(handler func(*RemoteKite)) {\n\tk.onDisconnectHandlers = append(k.onDisconnectHandlers, handler)\n}\n\n\/\/ notifyRemoteKiteConnected runs the registered handlers with OnConnect().\nfunc (k *Kite) notifyRemoteKiteConnected(r *RemoteKite) {\n\tk.Log.Info(\"Client is connected to us: [%s %s]\", r.Name, r.Addr())\n\n\tfor _, handler := range k.onConnectHandlers {\n\t\tgo handler(r)\n\t}\n}\n\nfunc (k *Kite) notifyRemoteKiteDisconnected(r *RemoteKite) {\n\tk.Log.Info(\"Client has disconnected: [%s %s]\", r.Name, r.Addr())\n\n\tfor _, handler := range k.onDisconnectHandlers {\n\t\tgo handler(r)\n\t}\n}\n<commit_msg>kite: use all available cpus<commit_after>package kite\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/op\/go-logging\"\n\t\"koding\/newkite\/dnode\/rpc\"\n\t\"koding\/newkite\/protocol\"\n\t\"koding\/newkite\/utils\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"runtime\"\n\t\"syscall\"\n\t\"time\"\n)\n\nfunc init() {\n\t\/\/ Use all available CPUS.\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\t\/\/ Debugging helper: Prints stacktrace on SIGUSR1.\n\tc := make(chan os.Signal)\n\tsignal.Notify(c, syscall.SIGUSR1)\n\tgo func() {\n\t\tfor {\n\t\t\ts := <-c\n\t\t\tfmt.Println(\"Got signal:\", s)\n\t\t\tbuf := make([]byte, 1<<16)\n\t\t\truntime.Stack(buf, true)\n\t\t\tfmt.Println(string(buf))\n\t\t\tfmt.Println(\"Number of goroutines:\", runtime.NumGoroutine())\n\t\t}\n\t}()\n}\n\n\/\/ Kite defines a single process that enables distributed service messaging\n\/\/ amongst the peers it is connected. A Kite process acts as a Client and as a\n\/\/ Server. That means it can receive request, process them, but it also can\n\/\/ make request to other kites. A Kite can be anything. It can be simple Image\n\/\/ processing kite (which would process data), it could be a Chat kite that\n\/\/ enables peer-to-peer chat. For examples we have FileSystem kite that expose\n\/\/ the file system to a client, which in order build the filetree.\ntype Kite struct {\n\tprotocol.Kite\n\n\t\/\/ KodingKey is used for authenticate to Kontrol.\n\tKodingKey string\n\n\t\/\/ Is this Kite Public or Private? Default is Private.\n\tVisibility protocol.Visibility\n\n\t\/\/ Points to the Kontrol instance if enabled\n\tKontrol *Kontrol\n\n\t\/\/ Wheter we want to connect to Kontrol on startup, true by default.\n\tKontrolEnabled bool\n\n\t\/\/ Wheter we want to register our Kite to Kontrol, true by default.\n\tRegisterToKontrol bool\n\n\t\/\/ method map for exported methods\n\thandlers map[string]HandlerFunc\n\n\t\/\/ Dnode rpc server\n\tserver *rpc.Server\n\n\t\/\/ Handlers to call when a Kite opens a connection to this Kite.\n\tonConnectHandlers []func(*RemoteKite)\n\n\t\/\/ Handlers to call when a client has disconnected.\n\tonDisconnectHandlers []func(*RemoteKite)\n\n\t\/\/ Contains different functions for authenticating user from request.\n\t\/\/ Keys are the authentication types (options.authentication.type).\n\tAuthenticators map[string]func(*Request) error\n\n\t\/\/ Used to signal if the kite is ready to start and make calls to\n\t\/\/ other kites.\n\tready chan bool\n\n\t\/\/ Prints logging messages to stderr and syslog.\n\tLog *logging.Logger\n}\n\n\/\/ New creates, initialize and then returns a new Kite instance. It accepts\n\/\/ a single options argument that is a config struct that needs to be filled\n\/\/ with several informations like Name, Port, IP and so on.\nfunc New(options *Options) *Kite {\n\tvar err error\n\tif options == nil {\n\t\toptions, err = ReadKiteOptions(\"manifest.json\")\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"error: could not read config file\", err)\n\t\t}\n\t}\n\n\toptions.validate() \/\/ exits if validating fails\n\n\thostname, _ := os.Hostname()\n\tkiteID := utils.GenerateUUID()\n\tkodingKey, err := utils.GetKodingKey()\n\tif err != nil {\n\t\tlog.Fatal(\"Couldn't find koding.key. Please run 'kd register'.\")\n\t}\n\n\tk := &Kite{\n\t\tKite: protocol.Kite{\n\t\t\tName: options.Kitename,\n\t\t\tUsername: options.Username,\n\t\t\tID: kiteID,\n\t\t\tVersion: options.Version,\n\t\t\tHostname: hostname,\n\t\t\tPort: options.Port,\n\t\t\tEnvironment: options.Environment,\n\t\t\tRegion: options.Region,\n\t\t\tVisibility: options.Visibility,\n\n\t\t\t\/\/ PublicIP will be set by Kontrol after registering if it is not set.\n\t\t\tPublicIP: options.PublicIP,\n\t\t},\n\t\tKodingKey: kodingKey,\n\t\tserver: rpc.NewServer(),\n\t\tKontrolEnabled: true,\n\t\tRegisterToKontrol: true,\n\t\tAuthenticators: make(map[string]func(*Request) error),\n\t\thandlers: make(map[string]HandlerFunc),\n\t\tready: make(chan bool),\n\t}\n\n\tk.Log = newLogger(k.Name, k.hasDebugFlag())\n\tk.Kontrol = k.NewKontrol(options.KontrolAddr)\n\n\t\/\/ Call registered handlers when a client has disconnected.\n\tk.server.OnDisconnect(func(c *rpc.Client) {\n\t\tif r, ok := c.Properties()[\"remoteKite\"]; ok {\n\t\t\t\/\/ Run OnDisconnect handlers.\n\t\t\tk.notifyRemoteKiteDisconnected(r.(*RemoteKite))\n\t\t}\n\t})\n\n\t\/\/ Every kite should be able to authenticate the user from token.\n\tk.Authenticators[\"token\"] = k.AuthenticateFromToken\n\t\/\/ A kite accepts requests from Kontrol.\n\tk.Authenticators[\"kodingKey\"] = k.AuthenticateFromKodingKey\n\n\t\/\/ Register our internal methods\n\tk.HandleFunc(\"systemInfo\", new(Status).Info)\n\tk.HandleFunc(\"heartbeat\", k.handleHeartbeat)\n\tk.HandleFunc(\"log\", k.handleLog)\n\n\treturn k\n}\n\n\/\/ Run is a blocking method. It runs the kite server and then accepts requests\n\/\/ asynchronously.\nfunc (k *Kite) Run() {\n\tk.Start()\n\tselect {}\n}\n\n\/\/ Start is like Run(), but does not wait for it to complete. It's nonblocking.\nfunc (k *Kite) Start() {\n\tk.parseVersionFlag()\n\n\tgo func() {\n\t\terr := k.listenAndServe()\n\t\tif err != nil {\n\t\t\tk.Log.Fatal(err)\n\t\t}\n\t}()\n\n\t<-k.ready \/\/ wait until we are ready\n}\n\nfunc (k *Kite) handleHeartbeat(r *Request) (interface{}, error) {\n\targs := r.Args.MustSliceOfLength(2)\n\tseconds := args[0].MustFloat64()\n\tping := args[1].MustFunction()\n\n\tgo func() {\n\t\tfor {\n\t\t\ttime.Sleep(time.Duration(seconds) * time.Second)\n\t\t\tif ping() != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil, nil\n}\n\n\/\/ handleLog prints a log message to stdout.\nfunc (k *Kite) handleLog(r *Request) (interface{}, error) {\n\tmsg := r.Args.MustString()\n\tk.Log.Info(fmt.Sprintf(\"%s: %s\", r.RemoteKite.Name, msg))\n\treturn nil, nil\n}\n\nfunc init() {\n\t\/\/ These logging related stuff needs to be called once because stupid\n\t\/\/ logging library uses global variables and resets the backends every time.\n\tlogging.SetFormatter(logging.MustStringFormatter(\"%{level:-8s} ▶ %{message}\"))\n\tstderrBackend := logging.NewLogBackend(os.Stderr, \"\", log.LstdFlags)\n\tstderrBackend.Color = true\n\tsyslogBackend, _ := logging.NewSyslogBackend(\"\")\n\tlogging.SetBackend(stderrBackend, syslogBackend)\n}\n\n\/\/ newLogger returns a new logger object for desired name and level.\nfunc newLogger(name string, debug bool) *logging.Logger {\n\tlogger := logging.MustGetLogger(name)\n\n\tlevel := logging.INFO\n\tif debug {\n\t\tlevel = logging.DEBUG\n\t}\n\n\tlogging.SetLevel(level, name)\n\treturn logger\n}\n\n\/\/ If the user wants to call flag.Parse() the flag must be defined in advance.\nvar _ = flag.Bool(\"version\", false, \"show version\")\nvar _ = flag.Bool(\"debug\", false, \"print debug logs\")\n\n\/\/ parseVersionFlag prints the version number of the kite and exits with 0\n\/\/ if \"-version\" flag is enabled.\n\/\/ We did not use the \"flag\" package because it causes trouble if the user\n\/\/ also calls \"flag.Parse()\" in his code. flag.Parse() can be called only once.\nfunc (k *Kite) parseVersionFlag() {\n\tfor _, flag := range os.Args {\n\t\tif flag == \"-version\" {\n\t\t\tfmt.Println(k.Version)\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n}\n\n\/\/ hasDebugFlag returns true if -debug flag is present in os.Args.\nfunc (k *Kite) hasDebugFlag() bool {\n\tfor _, flag := range os.Args {\n\t\tif flag == \"-debug\" {\n\t\t\treturn true\n\t\t}\n\t}\n\n\t\/\/ We can't use flags when running \"go test\" command.\n\t\/\/ This is another way to print debug logs.\n\tif os.Getenv(\"DEBUG\") != \"\" {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n\/\/ listenAndServe starts our rpc server with the given addr.\nfunc (k *Kite) listenAndServe() error {\n\tlistener, err := net.Listen(\"tcp4\", \":\"+k.Port)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tk.Log.Info(\"Listening: %s\", listener.Addr().String())\n\n\t\/\/ Port is known here if \"0\" is used as port number\n\t_, k.Port, _ = net.SplitHostPort(listener.Addr().String())\n\n\t\/\/ We must connect to Kontrol after starting to listen on port\n\tif k.KontrolEnabled {\n\t\tif k.RegisterToKontrol {\n\t\t\tk.Kontrol.OnConnect(k.registerToKontrol)\n\t\t}\n\n\t\tk.Kontrol.DialForever()\n\t}\n\n\tk.ready <- true \/\/ listener is ready, means we are ready too\n\treturn http.Serve(listener, k.server)\n}\n\nfunc (k *Kite) registerToKontrol() {\n\terr := k.Kontrol.Register()\n\tif err != nil {\n\t\tk.Log.Fatalf(\"Cannot register to Kontrol: %s\", err)\n\t}\n}\n\n\/\/ OnConnect registers a function to run when a Kite connects to this Kite.\nfunc (k *Kite) OnConnect(handler func(*RemoteKite)) {\n\tk.onConnectHandlers = append(k.onConnectHandlers, handler)\n}\n\n\/\/ OnDisconnect registers a function to run when a connected Kite is disconnected.\nfunc (k *Kite) OnDisconnect(handler func(*RemoteKite)) {\n\tk.onDisconnectHandlers = append(k.onDisconnectHandlers, handler)\n}\n\n\/\/ notifyRemoteKiteConnected runs the registered handlers with OnConnect().\nfunc (k *Kite) notifyRemoteKiteConnected(r *RemoteKite) {\n\tk.Log.Info(\"Client is connected to us: [%s %s]\", r.Name, r.Addr())\n\n\tfor _, handler := range k.onConnectHandlers {\n\t\tgo handler(r)\n\t}\n}\n\nfunc (k *Kite) notifyRemoteKiteDisconnected(r *RemoteKite) {\n\tk.Log.Info(\"Client has disconnected: [%s %s]\", r.Name, r.Addr())\n\n\tfor _, handler := range k.onDisconnectHandlers {\n\t\tgo handler(r)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package models\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/koding\/bongo\"\n)\n\ntype Channel struct {\n\t\/\/ unique identifier of the channel\n\tId int64 `json:\"id\"`\n\n\t\/\/ Name of the channel\n\tName string `json:\"name\" sql:\"NOT NULL;TYPE:VARCHAR(200);\"`\n\n\t\/\/ Creator of the channel\n\tCreatorId int64 `json:\"creatorId\" sql:\"NOT NULL\"`\n\n\t\/\/ Name of the group which channel is belong to\n\tGroupName string `json:\"groupName\" sql:\"NOT NULL;TYPE:VARCHAR(200);\"`\n\n\t\/\/ Purpose of the channel\n\tPurpose string `json:\"purpose\"`\n\n\t\/\/ Secret key of the channel for event propagation purposes\n\t\/\/ we can put this key into another table?\n\tSecretKey string `json:\"secretKey\"`\n\n\t\/\/ Type of the channel\n\tTypeConstant string `json:\"typeConstant\" sql:\"NOT NULL;TYPE:VARCHAR(100);\"`\n\n\t\/\/ Privacy constant of the channel\n\tPrivacyConstant string `json:\"privacyConstant\" sql:\"NOT NULL;TYPE:VARCHAR(100);\"`\n\n\t\/\/ Creation date of the channel\n\tCreatedAt time.Time `json:\"createdAt\" sql:\"NOT NULL\"`\n\n\t\/\/ Modification date of the channel\n\tUpdatedAt time.Time `json:\"updatedAt\" sql:\"NOT NULL\"`\n}\n\n\/\/ to-do check for allowed channels\nconst (\n\t\/\/ TYPES\n\tChannel_TYPE_GROUP = \"group\"\n\tChannel_TYPE_TOPIC = \"topic\"\n\tChannel_TYPE_FOLLOWINGFEED = \"followingfeed\"\n\tChannel_TYPE_FOLLOWERS = \"followers\"\n\tChannel_TYPE_CHAT = \"chat\"\n\tChannel_TYPE_PINNED_ACTIVITY = \"pinnedActivity\"\n\t\/\/ Privacy\n\tChannel_TYPE_PUBLIC = \"public\"\n\tChannel_TYPE_PRIVATE = \"private\"\n\t\/\/ Koding Group Name\n\tChannel_KODING_NAME = \"koding\"\n)\n\nfunc NewChannel() *Channel {\n\treturn &Channel{\n\t\tName: \"koding\",\n\t\tCreatorId: 123,\n\t\tGroupName: Channel_KODING_NAME,\n\t\tPurpose: \"string\",\n\t\tSecretKey: \"string\",\n\t\tTypeConstant: \"default\",\n\t\tPrivacyConstant: Channel_TYPE_PRIVATE,\n\t}\n}\n\nfunc (c *Channel) BeforeCreate() {\n\tc.CreatedAt = time.Now()\n\tc.UpdatedAt = time.Now()\n}\n\nfunc (c *Channel) BeforeUpdate() {\n\tc.UpdatedAt = time.Now()\n}\n\nfunc (c *Channel) GetId() int64 {\n\treturn c.Id\n}\n\nfunc (c Channel) TableName() string {\n\treturn \"api.channel\"\n}\n\nfunc (c *Channel) Fetch() error {\n\treturn bongo.B.Fetch(c)\n}\n\nfunc (c *Channel) AfterCreate() {\n\tbongo.B.AfterCreate(c)\n}\n\nfunc (c *Channel) AfterUpdate() {\n\tbongo.B.AfterUpdate(c)\n}\n\nfunc (c *Channel) AfterDelete() {\n\tbongo.B.AfterDelete(c)\n}\n\nfunc (c *Channel) Update() error {\n\tif c.Name == \"\" || c.GroupName == \"\" {\n\t\treturn fmt.Errorf(\"Validation failed %s - %s\", c.Name, c.GroupName)\n\t}\n\n\treturn bongo.B.Update(c)\n}\n\nfunc (c *Channel) Create() error {\n\tif c.Name == \"\" || c.GroupName == \"\" || c.TypeConstant == \"\" {\n\t\treturn fmt.Errorf(\"Validation failed %s - %s -%s\", c.Name, c.GroupName, c.TypeConstant)\n\t}\n\n\t\/\/ golang returns -1 if item not in the string\n\tif strings.Index(c.Name, \" \") > -1 {\n\t\treturn fmt.Errorf(\"Channel name %q has empty space in it\", c.Name)\n\t}\n\n\tif c.TypeConstant == Channel_TYPE_GROUP \/* we can add more types here *\/ {\n\t\tselector := map[string]interface{}{\n\t\t\t\"group_name\": c.GroupName,\n\t\t\t\"type_constant\": c.TypeConstant,\n\t\t}\n\n\t\t\/\/ if err is nil\n\t\t\/\/ it means we already have that channel\n\t\terr := c.One(bongo.NewQS(selector))\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t\t\/\/ return fmt.Errorf(\"%s typed channel is already created before for %s group\", c.TypeConstant, c.GroupName)\n\t\t}\n\n\t\tif err != gorm.RecordNotFound {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\treturn bongo.B.Create(c)\n}\n\nfunc (c *Channel) Delete() error {\n\treturn bongo.B.Delete(c)\n}\n\nfunc (c *Channel) One(q *bongo.Query) error {\n\treturn bongo.B.One(c, c, q)\n}\n\nfunc (c *Channel) Some(data interface{}, q *bongo.Query) error {\n\treturn bongo.B.Some(c, data, q)\n}\n\nfunc (c *Channel) FetchByIds(ids []int64) ([]Channel, error) {\n\tvar channels []Channel\n\n\tif len(ids) == 0 {\n\t\treturn channels, nil\n\t}\n\n\tif err := bongo.B.FetchByIds(c, &channels, ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn channels, nil\n}\n\nfunc (c *Channel) AddParticipant(participantId int64) (*ChannelParticipant, error) {\n\tif c.Id == 0 {\n\t\treturn nil, errors.New(\"Channel Id is not set\")\n\t}\n\n\tcp := NewChannelParticipant()\n\tcp.ChannelId = c.Id\n\tcp.AccountId = participantId\n\n\terr := cp.FetchParticipant()\n\tif err != nil && err != gorm.RecordNotFound {\n\t\treturn nil, err\n\t}\n\n\t\/\/ if we have this record in DB\n\tif cp.Id != 0 {\n\t\t\/\/ if status is not active\n\t\tif cp.StatusConstant == ChannelParticipant_STATUS_ACTIVE {\n\t\t\treturn nil, errors.New(fmt.Sprintf(\"Account %d is already a participant of channel %d\", cp.AccountId, cp.ChannelId))\n\t\t}\n\t\tcp.StatusConstant = ChannelParticipant_STATUS_ACTIVE\n\t\tif err := cp.Update(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn cp, nil\n\t}\n\n\tcp.StatusConstant = ChannelParticipant_STATUS_ACTIVE\n\n\tif err := cp.Create(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cp, nil\n}\n\nfunc (c *Channel) RemoveParticipant(participantId int64) error {\n\tif c.Id == 0 {\n\t\treturn errors.New(\"Channel Id is not set\")\n\t}\n\n\tcp := NewChannelParticipant()\n\tcp.ChannelId = c.Id\n\tcp.AccountId = participantId\n\n\terr := cp.FetchParticipant()\n\t\/\/ if user is not in this channel, do nothing\n\tif err == gorm.RecordNotFound {\n\t\treturn nil\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cp.StatusConstant == ChannelParticipant_STATUS_LEFT {\n\t\treturn nil\n\t}\n\n\tcp.StatusConstant = ChannelParticipant_STATUS_LEFT\n\tif err := cp.Update(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *Channel) FetchParticipantIds() ([]int64, error) {\n\tvar participantIds []int64\n\n\tif c.Id == 0 {\n\t\treturn participantIds, errors.New(\"Channel Id is not set\")\n\t}\n\n\tquery := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"channel_id\": c.Id,\n\t\t\t\"status_constant\": ChannelParticipant_STATUS_ACTIVE,\n\t\t},\n\t\tPluck: \"account_id\",\n\t}\n\n\tcp := NewChannelParticipant()\n\terr := cp.Some(&participantIds, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn participantIds, nil\n}\n\nfunc (c *Channel) AddMessage(messageId int64) (*ChannelMessageList, error) {\n\tif c.Id == 0 {\n\t\treturn nil, errors.New(\"Channel Id is not set\")\n\t}\n\n\tcml := NewChannelMessageList()\n\n\tselector := map[string]interface{}{\n\t\t\"channel_id\": c.Id,\n\t\t\"message_id\": messageId,\n\t}\n\terr := cml.One(bongo.NewQS(selector))\n\tif err == nil {\n\t\treturn nil, errors.New(\"Message is already in the channel\")\n\t}\n\n\tif err != gorm.RecordNotFound {\n\t\treturn nil, err\n\t}\n\t\/\/ silence record not found err\n\n\tcml.ChannelId = c.Id\n\tcml.MessageId = messageId\n\n\tif err := cml.Create(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cml, nil\n}\n\nfunc (c *Channel) RemoveMessage(messageId int64) (*ChannelMessageList, error) {\n\tif c.Id == 0 {\n\t\treturn nil, errors.New(\"Channel Id is not set\")\n\t}\n\n\tcml := NewChannelMessageList()\n\tselector := map[string]interface{}{\n\t\t\"channel_id\": c.Id,\n\t\t\"message_id\": messageId,\n\t}\n\terr := cml.One(bongo.NewQS(selector))\n\t\/\/ one returns error when record not found case\n\t\/\/ but we dont care if it is not there tho\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := cml.Delete(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cml, nil\n}\n\nfunc (c *Channel) List(q *Query) ([]Channel, error) {\n\n\tif q.GroupName == \"\" {\n\t\treturn nil, fmt.Errorf(\"Query doesnt have any Group info %+v\", q)\n\t}\n\n\tvar channels []Channel\n\n\tquery := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"group_name\": q.GroupName,\n\t\t},\n\t}\n\n\tif q.Type != \"\" {\n\t\tquery.Selector[\"type_constant\"] = q.Type\n\t}\n\n\terr := bongo.B.Some(c, &channels, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn channels, nil\n}\n<commit_msg>Social: use channel some function<commit_after>package models\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/koding\/bongo\"\n)\n\ntype Channel struct {\n\t\/\/ unique identifier of the channel\n\tId int64 `json:\"id\"`\n\n\t\/\/ Name of the channel\n\tName string `json:\"name\" sql:\"NOT NULL;TYPE:VARCHAR(200);\"`\n\n\t\/\/ Creator of the channel\n\tCreatorId int64 `json:\"creatorId\" sql:\"NOT NULL\"`\n\n\t\/\/ Name of the group which channel is belong to\n\tGroupName string `json:\"groupName\" sql:\"NOT NULL;TYPE:VARCHAR(200);\"`\n\n\t\/\/ Purpose of the channel\n\tPurpose string `json:\"purpose\"`\n\n\t\/\/ Secret key of the channel for event propagation purposes\n\t\/\/ we can put this key into another table?\n\tSecretKey string `json:\"secretKey\"`\n\n\t\/\/ Type of the channel\n\tTypeConstant string `json:\"typeConstant\" sql:\"NOT NULL;TYPE:VARCHAR(100);\"`\n\n\t\/\/ Privacy constant of the channel\n\tPrivacyConstant string `json:\"privacyConstant\" sql:\"NOT NULL;TYPE:VARCHAR(100);\"`\n\n\t\/\/ Creation date of the channel\n\tCreatedAt time.Time `json:\"createdAt\" sql:\"NOT NULL\"`\n\n\t\/\/ Modification date of the channel\n\tUpdatedAt time.Time `json:\"updatedAt\" sql:\"NOT NULL\"`\n}\n\n\/\/ to-do check for allowed channels\nconst (\n\t\/\/ TYPES\n\tChannel_TYPE_GROUP = \"group\"\n\tChannel_TYPE_TOPIC = \"topic\"\n\tChannel_TYPE_FOLLOWINGFEED = \"followingfeed\"\n\tChannel_TYPE_FOLLOWERS = \"followers\"\n\tChannel_TYPE_CHAT = \"chat\"\n\tChannel_TYPE_PINNED_ACTIVITY = \"pinnedActivity\"\n\t\/\/ Privacy\n\tChannel_TYPE_PUBLIC = \"public\"\n\tChannel_TYPE_PRIVATE = \"private\"\n\t\/\/ Koding Group Name\n\tChannel_KODING_NAME = \"koding\"\n)\n\nfunc NewChannel() *Channel {\n\treturn &Channel{\n\t\tName: \"koding\",\n\t\tCreatorId: 123,\n\t\tGroupName: Channel_KODING_NAME,\n\t\tPurpose: \"string\",\n\t\tSecretKey: \"string\",\n\t\tTypeConstant: \"default\",\n\t\tPrivacyConstant: Channel_TYPE_PRIVATE,\n\t}\n}\n\nfunc (c *Channel) BeforeCreate() {\n\tc.CreatedAt = time.Now()\n\tc.UpdatedAt = time.Now()\n}\n\nfunc (c *Channel) BeforeUpdate() {\n\tc.UpdatedAt = time.Now()\n}\n\nfunc (c *Channel) GetId() int64 {\n\treturn c.Id\n}\n\nfunc (c Channel) TableName() string {\n\treturn \"api.channel\"\n}\n\nfunc (c *Channel) Fetch() error {\n\treturn bongo.B.Fetch(c)\n}\n\nfunc (c *Channel) AfterCreate() {\n\tbongo.B.AfterCreate(c)\n}\n\nfunc (c *Channel) AfterUpdate() {\n\tbongo.B.AfterUpdate(c)\n}\n\nfunc (c *Channel) AfterDelete() {\n\tbongo.B.AfterDelete(c)\n}\n\nfunc (c *Channel) Update() error {\n\tif c.Name == \"\" || c.GroupName == \"\" {\n\t\treturn fmt.Errorf(\"Validation failed %s - %s\", c.Name, c.GroupName)\n\t}\n\n\treturn bongo.B.Update(c)\n}\n\nfunc (c *Channel) Create() error {\n\tif c.Name == \"\" || c.GroupName == \"\" || c.TypeConstant == \"\" {\n\t\treturn fmt.Errorf(\"Validation failed %s - %s -%s\", c.Name, c.GroupName, c.TypeConstant)\n\t}\n\n\t\/\/ golang returns -1 if item not in the string\n\tif strings.Index(c.Name, \" \") > -1 {\n\t\treturn fmt.Errorf(\"Channel name %q has empty space in it\", c.Name)\n\t}\n\n\tif c.TypeConstant == Channel_TYPE_GROUP \/* we can add more types here *\/ {\n\t\tselector := map[string]interface{}{\n\t\t\t\"group_name\": c.GroupName,\n\t\t\t\"type_constant\": c.TypeConstant,\n\t\t}\n\n\t\t\/\/ if err is nil\n\t\t\/\/ it means we already have that channel\n\t\terr := c.One(bongo.NewQS(selector))\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t\t\/\/ return fmt.Errorf(\"%s typed channel is already created before for %s group\", c.TypeConstant, c.GroupName)\n\t\t}\n\n\t\tif err != gorm.RecordNotFound {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\treturn bongo.B.Create(c)\n}\n\nfunc (c *Channel) Delete() error {\n\treturn bongo.B.Delete(c)\n}\n\nfunc (c *Channel) One(q *bongo.Query) error {\n\treturn bongo.B.One(c, c, q)\n}\n\nfunc (c *Channel) Some(data interface{}, q *bongo.Query) error {\n\treturn bongo.B.Some(c, data, q)\n}\n\nfunc (c *Channel) FetchByIds(ids []int64) ([]Channel, error) {\n\tvar channels []Channel\n\n\tif len(ids) == 0 {\n\t\treturn channels, nil\n\t}\n\n\tif err := bongo.B.FetchByIds(c, &channels, ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn channels, nil\n}\n\nfunc (c *Channel) AddParticipant(participantId int64) (*ChannelParticipant, error) {\n\tif c.Id == 0 {\n\t\treturn nil, errors.New(\"Channel Id is not set\")\n\t}\n\n\tcp := NewChannelParticipant()\n\tcp.ChannelId = c.Id\n\tcp.AccountId = participantId\n\n\terr := cp.FetchParticipant()\n\tif err != nil && err != gorm.RecordNotFound {\n\t\treturn nil, err\n\t}\n\n\t\/\/ if we have this record in DB\n\tif cp.Id != 0 {\n\t\t\/\/ if status is not active\n\t\tif cp.StatusConstant == ChannelParticipant_STATUS_ACTIVE {\n\t\t\treturn nil, errors.New(fmt.Sprintf(\"Account %d is already a participant of channel %d\", cp.AccountId, cp.ChannelId))\n\t\t}\n\t\tcp.StatusConstant = ChannelParticipant_STATUS_ACTIVE\n\t\tif err := cp.Update(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn cp, nil\n\t}\n\n\tcp.StatusConstant = ChannelParticipant_STATUS_ACTIVE\n\n\tif err := cp.Create(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cp, nil\n}\n\nfunc (c *Channel) RemoveParticipant(participantId int64) error {\n\tif c.Id == 0 {\n\t\treturn errors.New(\"Channel Id is not set\")\n\t}\n\n\tcp := NewChannelParticipant()\n\tcp.ChannelId = c.Id\n\tcp.AccountId = participantId\n\n\terr := cp.FetchParticipant()\n\t\/\/ if user is not in this channel, do nothing\n\tif err == gorm.RecordNotFound {\n\t\treturn nil\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cp.StatusConstant == ChannelParticipant_STATUS_LEFT {\n\t\treturn nil\n\t}\n\n\tcp.StatusConstant = ChannelParticipant_STATUS_LEFT\n\tif err := cp.Update(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *Channel) FetchParticipantIds() ([]int64, error) {\n\tvar participantIds []int64\n\n\tif c.Id == 0 {\n\t\treturn participantIds, errors.New(\"Channel Id is not set\")\n\t}\n\n\tquery := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"channel_id\": c.Id,\n\t\t\t\"status_constant\": ChannelParticipant_STATUS_ACTIVE,\n\t\t},\n\t\tPluck: \"account_id\",\n\t}\n\n\tcp := NewChannelParticipant()\n\terr := cp.Some(&participantIds, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn participantIds, nil\n}\n\nfunc (c *Channel) AddMessage(messageId int64) (*ChannelMessageList, error) {\n\tif c.Id == 0 {\n\t\treturn nil, errors.New(\"Channel Id is not set\")\n\t}\n\n\tcml := NewChannelMessageList()\n\n\tselector := map[string]interface{}{\n\t\t\"channel_id\": c.Id,\n\t\t\"message_id\": messageId,\n\t}\n\terr := cml.One(bongo.NewQS(selector))\n\tif err == nil {\n\t\treturn nil, errors.New(\"Message is already in the channel\")\n\t}\n\n\tif err != gorm.RecordNotFound {\n\t\treturn nil, err\n\t}\n\t\/\/ silence record not found err\n\n\tcml.ChannelId = c.Id\n\tcml.MessageId = messageId\n\n\tif err := cml.Create(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cml, nil\n}\n\nfunc (c *Channel) RemoveMessage(messageId int64) (*ChannelMessageList, error) {\n\tif c.Id == 0 {\n\t\treturn nil, errors.New(\"Channel Id is not set\")\n\t}\n\n\tcml := NewChannelMessageList()\n\tselector := map[string]interface{}{\n\t\t\"channel_id\": c.Id,\n\t\t\"message_id\": messageId,\n\t}\n\terr := cml.One(bongo.NewQS(selector))\n\t\/\/ one returns error when record not found case\n\t\/\/ but we dont care if it is not there tho\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := cml.Delete(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cml, nil\n}\n\nfunc (c *Channel) List(q *Query) ([]Channel, error) {\n\n\tif q.GroupName == \"\" {\n\t\treturn nil, fmt.Errorf(\"Query doesnt have any Group info %+v\", q)\n\t}\n\n\tvar channels []Channel\n\n\tquery := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"group_name\": q.GroupName,\n\t\t},\n\t}\n\n\tif q.Type != \"\" {\n\t\tquery.Selector[\"type_constant\"] = q.Type\n\t}\n\n\terr := c.Some(&channels, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn channels, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package receiver\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\n\t\"go.uber.org\/zap\"\n)\n\n\/\/ UDP receive metrics from UDP messages\ntype UDP struct {\n\tBase\n\tconn *net.UDPConn\n\tparseChan chan *Buffer\n}\n\n\/\/ Addr returns binded socket address. For bind port 0 in tests\nfunc (rcv *UDP) Addr() net.Addr {\n\tif rcv.conn == nil {\n\t\treturn nil\n\t}\n\treturn rcv.conn.LocalAddr()\n}\n\nfunc (rcv *UDP) Stat(send func(metric string, value float64)) {\n\trcv.SendStat(send, \"metricsReceived\", \"errors\", \"incompleteReceived\", \"futureDropped\", \"pastDropped\")\n}\n\nfunc (rcv *UDP) receiveWorker(ctx context.Context) {\n\tdefer rcv.conn.Close()\n\n\tbuffer := GetBuffer()\n\nReceiveLoop:\n\tfor {\n\n\t\tn, peer, err := rcv.conn.ReadFromUDP(buffer.Body[:])\n\t\tif err != nil {\n\t\t\tif strings.Contains(err.Error(), \"use of closed network connection\") {\n\t\t\t\tbreak ReceiveLoop\n\t\t\t}\n\t\t\tatomic.AddUint64(&rcv.stat.errors, 1)\n\t\t\trcv.logger.Error(\"ReadFromUDP failed\", zap.Error(err), zap.String(\"peer\", peer.String()))\n\t\t\tcontinue ReceiveLoop\n\t\t}\n\n\t\tif n > 0 {\n\t\t\tchunkSize := bytes.LastIndexByte(buffer.Body[:n], '\\n') + 1\n\n\t\t\tif chunkSize < n {\n\t\t\t\t\/\/ @TODO: log and count incomplete with peer\n\t\t\t}\n\n\t\t\tif chunkSize > 0 {\n\t\t\t\tbuffer.Used = chunkSize\n\t\t\t\trcv.parseChan <- buffer\n\t\t\t\tbuffer = GetBuffer()\n\t\t\t}\n\n\t\t}\n\t}\n}\n\n\/\/ Listen bind port. Receive messages and send to out channel\nfunc (rcv *UDP) Listen(addr *net.UDPAddr) error {\n\treturn rcv.StartFunc(func() error {\n\t\tvar err error\n\n\t\trcv.conn, err = net.ListenUDP(\"udp\", addr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trcv.Go(func(ctx context.Context) {\n\t\t\t<-ctx.Done()\n\t\t\trcv.conn.Close()\n\t\t})\n\n\t\tfor i := 0; i < rcv.parseThreads; i++ {\n\t\t\trcv.Go(func(ctx context.Context) {\n\t\t\t\trcv.PlainParser(ctx, rcv.parseChan)\n\t\t\t})\n\t\t}\n\n\t\trcv.Go(rcv.receiveWorker)\n\n\t\treturn nil\n\t})\n}\n<commit_msg>missed current time in udp receiver<commit_after>package receiver\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"net\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"go.uber.org\/zap\"\n)\n\n\/\/ UDP receive metrics from UDP messages\ntype UDP struct {\n\tBase\n\tconn *net.UDPConn\n\tparseChan chan *Buffer\n}\n\n\/\/ Addr returns binded socket address. For bind port 0 in tests\nfunc (rcv *UDP) Addr() net.Addr {\n\tif rcv.conn == nil {\n\t\treturn nil\n\t}\n\treturn rcv.conn.LocalAddr()\n}\n\nfunc (rcv *UDP) Stat(send func(metric string, value float64)) {\n\trcv.SendStat(send, \"metricsReceived\", \"errors\", \"incompleteReceived\", \"futureDropped\", \"pastDropped\")\n}\n\nfunc (rcv *UDP) receiveWorker(ctx context.Context) {\n\tdefer rcv.conn.Close()\n\n\tbuffer := GetBuffer()\n\nReceiveLoop:\n\tfor {\n\n\t\tn, peer, err := rcv.conn.ReadFromUDP(buffer.Body[:])\n\t\tif err != nil {\n\t\t\tif strings.Contains(err.Error(), \"use of closed network connection\") {\n\t\t\t\tbreak ReceiveLoop\n\t\t\t}\n\t\t\tatomic.AddUint64(&rcv.stat.errors, 1)\n\t\t\trcv.logger.Error(\"ReadFromUDP failed\", zap.Error(err), zap.String(\"peer\", peer.String()))\n\t\t\tcontinue ReceiveLoop\n\t\t}\n\n\t\tif n > 0 {\n\t\t\tchunkSize := bytes.LastIndexByte(buffer.Body[:n], '\\n') + 1\n\n\t\t\tif chunkSize < n {\n\t\t\t\t\/\/ @TODO: log and count incomplete with peer\n\t\t\t}\n\n\t\t\tif chunkSize > 0 {\n\t\t\t\tbuffer.Used = chunkSize\n\t\t\t\tbuffer.Time = uint32(time.Now().Unix())\n\t\t\t\trcv.parseChan <- buffer\n\t\t\t\tbuffer = GetBuffer()\n\t\t\t}\n\n\t\t}\n\t}\n}\n\n\/\/ Listen bind port. Receive messages and send to out channel\nfunc (rcv *UDP) Listen(addr *net.UDPAddr) error {\n\treturn rcv.StartFunc(func() error {\n\t\tvar err error\n\n\t\trcv.conn, err = net.ListenUDP(\"udp\", addr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trcv.Go(func(ctx context.Context) {\n\t\t\t<-ctx.Done()\n\t\t\trcv.conn.Close()\n\t\t})\n\n\t\tfor i := 0; i < rcv.parseThreads; i++ {\n\t\t\trcv.Go(func(ctx context.Context) {\n\t\t\t\trcv.PlainParser(ctx, rcv.parseChan)\n\t\t\t})\n\t\t}\n\n\t\trcv.Go(rcv.receiveWorker)\n\n\t\treturn nil\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Licensed to the Apache Software Foundation (ASF) under one or more\n\/\/ contributor license agreements. See the NOTICE file distributed with\n\/\/ this work for additional information regarding copyright ownership.\n\/\/ The ASF licenses this file to You under the Apache License, Version 2.0\n\/\/ (the \"License\"); you may not use this file except in compliance with\n\/\/ the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage environment\n\nimport (\n\tplayground \"beam.apache.org\/playground\/backend\/internal\/api\/v1\"\n\t\"fmt\"\n\t\"io\/fs\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n)\n\nconst (\n\tjavaConfig = \"{\\n \\\"compile_cmd\\\": \\\"javac\\\",\\n \\\"run_cmd\\\": \\\"java\\\",\\n \\\"compile_args\\\": [\\\"-d\\\", \\\"bin\\\", \\\"-classpath\\\"],\\n \\\"run_args\\\": [\\\"-cp\\\", \\\"bin:\\\"]\\n}\"\n)\n\nfunc TestMain(m *testing.M) {\n\terr := setup()\n\tif err != nil {\n\t\tfmt.Errorf(\"error during test setup: %s\", err.Error())\n\t}\n\tdefer teardown()\n\tm.Run()\n}\n\nfunc setup() error {\n\terr := os.MkdirAll(configFolderName, fs.ModePerm)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfilePath := filepath.Join(configFolderName, defaultSdk.String()+jsonExt)\n\terr = os.WriteFile(filePath, []byte(javaConfig), 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc teardown() {\n\terr := os.RemoveAll(configFolderName)\n\tif err != nil {\n\t\tfmt.Errorf(\"error during test setup: %s\", err.Error())\n\t}\n}\n\nfunc setOsEnvs(envsToSet map[string]string) error {\n\tfor key, value := range envsToSet {\n\t\tif err := os.Setenv(key, value); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\treturn nil\n}\n\nfunc TestNewEnvironment(t *testing.T) {\n\texecutorConfig := NewExecutorConfig(\"javac\", \"java\", []string{\"\"}, []string{\"\"})\n\ttests := []struct {\n\t\tname string\n\t\twant *Environment\n\t}{\n\t\t{name: \"create env service with default envs\", want: &Environment{\n\t\t\tNetworkEnvs: *NewNetworkEnvs(defaultIp, defaultPort),\n\t\t\tBeamSdkEnvs: *NewBeamEnvs(defaultSdk, executorConfig),\n\t\t\tApplicationEnvs: *NewApplicationEnvs(\"\/app\", &CacheEnvs{defaultCacheType, defaultCacheAddress, defaultCacheKeyExpirationTime}, defaultPipelineExecuteTimeout),\n\t\t}},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif got := NewEnvironment(\n\t\t\t\t*NewNetworkEnvs(defaultIp, defaultPort),\n\t\t\t\t*NewBeamEnvs(defaultSdk, executorConfig),\n\t\t\t\t*NewApplicationEnvs(\"\/app\", &CacheEnvs{defaultCacheType, defaultCacheAddress, defaultCacheKeyExpirationTime}, defaultPipelineExecuteTimeout)); !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"NewEnvironment() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_getSdkEnvsFromOsEnvs(t *testing.T) {\n\tjars := strings.Join([]string{defaultBeamSdkPath, defaultBeamRunner, defaultSLF4j}, \":\")\n\tworkingDir := \".\/\"\n\ttests := []struct {\n\t\tname string\n\t\twant *BeamEnvs\n\t\tenvsToSet map[string]string\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"not specified beam sdk key in os envs\",\n\t\t\twant: nil,\n\t\t\tenvsToSet: map[string]string{},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"default beam envs\",\n\t\t\twant: NewBeamEnvs(defaultSdk, NewExecutorConfig(\"javac\", \"java\", []string{\"-d\", \"bin\", \"-classpath\", defaultBeamSdkPath}, []string{\"-cp\", \"bin:\" + jars})),\n\t\t\tenvsToSet: map[string]string{beamSdkKey: \"SDK_JAVA\"},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"specific sdk key in os envs\",\n\t\t\twant: NewBeamEnvs(defaultSdk, NewExecutorConfig(\"javac\", \"java\", []string{\"-d\", \"bin\", \"-classpath\", defaultBeamSdkPath}, []string{\"-cp\", \"bin:\" + jars})),\n\t\t\tenvsToSet: map[string]string{beamSdkKey: \"SDK_JAVA\"},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"wrong sdk key in os envs\",\n\t\t\twant: nil,\n\t\t\tenvsToSet: map[string]string{beamSdkKey: \"SDK_J\"},\n\t\t\twantErr: true,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif err := setOsEnvs(tt.envsToSet); err != nil {\n\t\t\t\tt.Fatalf(\"couldn't setup os env\")\n\t\t\t}\n\t\t\tgot, err := ConfigureBeamEnvs(workingDir)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"getSdkEnvsFromOsEnvs() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"getSdkEnvsFromOsEnvs() got = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_getNetworkEnvsFromOsEnvs(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\twant *NetworkEnvs\n\t\tenvsToSet map[string]string\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"default values\",\n\t\t\twant: NewNetworkEnvs(defaultIp, defaultPort),\n\t\t},\n\t\t{\n\t\t\tname: \"values from os envs\",\n\t\t\twant: NewNetworkEnvs(\"12.12.12.21\", 1234),\n\t\t\tenvsToSet: map[string]string{serverIpKey: \"12.12.12.21\", serverPortKey: \"1234\"},\n\t\t},\n\t\t{\n\t\t\tname: \"not int port in os env, should be default\",\n\t\t\twant: nil,\n\t\t\tenvsToSet: map[string]string{serverIpKey: \"12.12.12.21\", serverPortKey: \"1a34\"},\n\t\t\twantErr: true,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif err := setOsEnvs(tt.envsToSet); err != nil {\n\t\t\t\tt.Fatalf(\"couldn't setup os env\")\n\t\t\t}\n\t\t\tgot, err := GetNetworkEnvsFromOsEnvs()\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"getNetworkEnvsFromOsEnvs() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"getNetworkEnvsFromOsEnvs() got = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_getApplicationEnvsFromOsEnvs(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\twant *ApplicationEnvs\n\t\twantErr bool\n\t\tenvsToSet map[string]string\n\t}{\n\t\t{name: \"working dir is provided\", want: NewApplicationEnvs(\"\/app\", &CacheEnvs{defaultCacheType, defaultCacheAddress, defaultCacheKeyExpirationTime}, defaultPipelineExecuteTimeout), wantErr: false, envsToSet: map[string]string{workingDirKey: \"\/app\"}},\n\t\t{name: \"working dir isn't provided\", want: nil, wantErr: true},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif err := setOsEnvs(tt.envsToSet); err != nil {\n\t\t\t\tt.Fatalf(\"couldn't setup os env\")\n\t\t\t}\n\t\t\tgot, err := GetApplicationEnvsFromOsEnvs()\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"getApplicationEnvsFromOsEnvs() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"getApplicationEnvsFromOsEnvs() got = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t\tos.Clearenv()\n\t\t})\n\t}\n}\n\nfunc Test_createExecutorConfig(t *testing.T) {\n\tjars := strings.Join([]string{defaultBeamSdkPath, defaultBeamRunner, defaultSLF4j}, \":\")\n\ttype args struct {\n\t\tapacheBeamSdk playground.Sdk\n\t\tconfigPath string\n\t}\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twant *ExecutorConfig\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"create executor configuration from json file\",\n\t\t\targs: args{apacheBeamSdk: defaultSdk, configPath: filepath.Join(configFolderName, defaultSdk.String()+jsonExt)},\n\t\t\twant: NewExecutorConfig(\"javac\", \"java\", []string{\"-d\", \"bin\", \"-classpath\", defaultBeamSdkPath}, []string{\"-cp\", \"bin:\" + jars}),\n\t\t\twantErr: false,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot, err := createExecutorConfig(tt.args.apacheBeamSdk, tt.args.configPath)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"createExecutorConfig() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"createExecutorConfig() got = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_getConfigFromJson(t *testing.T) {\n\ttype args struct {\n\t\tconfigPath string\n\t}\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twant *ExecutorConfig\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"get object from json\",\n\t\t\targs: args{filepath.Join(configFolderName, defaultSdk.String()+jsonExt)},\n\t\t\twant: NewExecutorConfig(\"javac\", \"java\", []string{\"-d\", \"bin\", \"-classpath\"}, []string{\"-cp\", \"bin:\"}),\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"error if wrong json path\",\n\t\t\targs: args{filepath.Join(\"wrong_folder\", defaultSdk.String()+jsonExt)},\n\t\t\twant: nil,\n\t\t\twantErr: true,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot, err := getConfigFromJson(tt.args.configPath)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"getConfigFromJson() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"getConfigFromJson() got = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n<commit_msg>Added cleanup os env before and after tests<commit_after>\/\/ Licensed to the Apache Software Foundation (ASF) under one or more\n\/\/ contributor license agreements. See the NOTICE file distributed with\n\/\/ this work for additional information regarding copyright ownership.\n\/\/ The ASF licenses this file to You under the Apache License, Version 2.0\n\/\/ (the \"License\"); you may not use this file except in compliance with\n\/\/ the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage environment\n\nimport (\n\tplayground \"beam.apache.org\/playground\/backend\/internal\/api\/v1\"\n\t\"fmt\"\n\t\"io\/fs\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n)\n\nconst (\n\tjavaConfig = \"{\\n \\\"compile_cmd\\\": \\\"javac\\\",\\n \\\"run_cmd\\\": \\\"java\\\",\\n \\\"compile_args\\\": [\\\"-d\\\", \\\"bin\\\", \\\"-classpath\\\"],\\n \\\"run_args\\\": [\\\"-cp\\\", \\\"bin:\\\"]\\n}\"\n)\n\nfunc TestMain(m *testing.M) {\n\terr := setup()\n\tif err != nil {\n\t\tfmt.Errorf(\"error during test setup: %s\", err.Error())\n\t}\n\tdefer teardown()\n\tm.Run()\n}\n\nfunc setup() error {\n\terr := os.MkdirAll(configFolderName, fs.ModePerm)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfilePath := filepath.Join(configFolderName, defaultSdk.String()+jsonExt)\n\terr = os.WriteFile(filePath, []byte(javaConfig), 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\tos.Clearenv()\n\treturn nil\n}\n\nfunc teardown() {\n\terr := os.RemoveAll(configFolderName)\n\tif err != nil {\n\t\tfmt.Errorf(\"error during test setup: %s\", err.Error())\n\t}\n}\n\nfunc setOsEnvs(envsToSet map[string]string) error {\n\tfor key, value := range envsToSet {\n\t\tif err := os.Setenv(key, value); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\treturn nil\n}\n\nfunc TestNewEnvironment(t *testing.T) {\n\texecutorConfig := NewExecutorConfig(\"javac\", \"java\", []string{\"\"}, []string{\"\"})\n\ttests := []struct {\n\t\tname string\n\t\twant *Environment\n\t}{\n\t\t{name: \"create env service with default envs\", want: &Environment{\n\t\t\tNetworkEnvs: *NewNetworkEnvs(defaultIp, defaultPort),\n\t\t\tBeamSdkEnvs: *NewBeamEnvs(defaultSdk, executorConfig),\n\t\t\tApplicationEnvs: *NewApplicationEnvs(\"\/app\", &CacheEnvs{defaultCacheType, defaultCacheAddress, defaultCacheKeyExpirationTime}, defaultPipelineExecuteTimeout),\n\t\t}},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif got := NewEnvironment(\n\t\t\t\t*NewNetworkEnvs(defaultIp, defaultPort),\n\t\t\t\t*NewBeamEnvs(defaultSdk, executorConfig),\n\t\t\t\t*NewApplicationEnvs(\"\/app\", &CacheEnvs{defaultCacheType, defaultCacheAddress, defaultCacheKeyExpirationTime}, defaultPipelineExecuteTimeout)); !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"NewEnvironment() = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_getSdkEnvsFromOsEnvs(t *testing.T) {\n\tjars := strings.Join([]string{defaultBeamSdkPath, defaultBeamRunner, defaultSLF4j}, \":\")\n\tworkingDir := \".\/\"\n\ttests := []struct {\n\t\tname string\n\t\twant *BeamEnvs\n\t\tenvsToSet map[string]string\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"not specified beam sdk key in os envs\",\n\t\t\twant: nil,\n\t\t\tenvsToSet: map[string]string{},\n\t\t\twantErr: true,\n\t\t},\n\t\t{\n\t\t\tname: \"default beam envs\",\n\t\t\twant: NewBeamEnvs(defaultSdk, NewExecutorConfig(\"javac\", \"java\", []string{\"-d\", \"bin\", \"-classpath\", defaultBeamSdkPath}, []string{\"-cp\", \"bin:\" + jars})),\n\t\t\tenvsToSet: map[string]string{beamSdkKey: \"SDK_JAVA\"},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"specific sdk key in os envs\",\n\t\t\twant: NewBeamEnvs(defaultSdk, NewExecutorConfig(\"javac\", \"java\", []string{\"-d\", \"bin\", \"-classpath\", defaultBeamSdkPath}, []string{\"-cp\", \"bin:\" + jars})),\n\t\t\tenvsToSet: map[string]string{beamSdkKey: \"SDK_JAVA\"},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"wrong sdk key in os envs\",\n\t\t\twant: nil,\n\t\t\tenvsToSet: map[string]string{beamSdkKey: \"SDK_J\"},\n\t\t\twantErr: true,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif err := setOsEnvs(tt.envsToSet); err != nil {\n\t\t\t\tt.Fatalf(\"couldn't setup os env\")\n\t\t\t}\n\t\t\tgot, err := ConfigureBeamEnvs(workingDir)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"getSdkEnvsFromOsEnvs() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"getSdkEnvsFromOsEnvs() got = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n\tos.Clearenv()\n}\n\nfunc Test_getNetworkEnvsFromOsEnvs(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\twant *NetworkEnvs\n\t\tenvsToSet map[string]string\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"default values\",\n\t\t\twant: NewNetworkEnvs(defaultIp, defaultPort),\n\t\t},\n\t\t{\n\t\t\tname: \"values from os envs\",\n\t\t\twant: NewNetworkEnvs(\"12.12.12.21\", 1234),\n\t\t\tenvsToSet: map[string]string{serverIpKey: \"12.12.12.21\", serverPortKey: \"1234\"},\n\t\t},\n\t\t{\n\t\t\tname: \"not int port in os env, should be default\",\n\t\t\twant: nil,\n\t\t\tenvsToSet: map[string]string{serverIpKey: \"12.12.12.21\", serverPortKey: \"1a34\"},\n\t\t\twantErr: true,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif err := setOsEnvs(tt.envsToSet); err != nil {\n\t\t\t\tt.Fatalf(\"couldn't setup os env\")\n\t\t\t}\n\t\t\tgot, err := GetNetworkEnvsFromOsEnvs()\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"getNetworkEnvsFromOsEnvs() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"getNetworkEnvsFromOsEnvs() got = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n\tos.Clearenv()\n}\n\nfunc Test_getApplicationEnvsFromOsEnvs(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\twant *ApplicationEnvs\n\t\twantErr bool\n\t\tenvsToSet map[string]string\n\t}{\n\t\t{name: \"working dir is provided\", want: NewApplicationEnvs(\"\/app\", &CacheEnvs{defaultCacheType, defaultCacheAddress, defaultCacheKeyExpirationTime}, defaultPipelineExecuteTimeout), wantErr: false, envsToSet: map[string]string{workingDirKey: \"\/app\"}},\n\t\t{name: \"working dir isn't provided\", want: nil, wantErr: true},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif err := setOsEnvs(tt.envsToSet); err != nil {\n\t\t\t\tt.Fatalf(\"couldn't setup os env\")\n\t\t\t}\n\t\t\tgot, err := GetApplicationEnvsFromOsEnvs()\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"getApplicationEnvsFromOsEnvs() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"getApplicationEnvsFromOsEnvs() got = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t\tos.Clearenv()\n\t\t})\n\t}\n\tos.Clearenv()\n}\n\nfunc Test_createExecutorConfig(t *testing.T) {\n\tjars := strings.Join([]string{defaultBeamSdkPath, defaultBeamRunner, defaultSLF4j}, \":\")\n\ttype args struct {\n\t\tapacheBeamSdk playground.Sdk\n\t\tconfigPath string\n\t}\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twant *ExecutorConfig\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"create executor configuration from json file\",\n\t\t\targs: args{apacheBeamSdk: defaultSdk, configPath: filepath.Join(configFolderName, defaultSdk.String()+jsonExt)},\n\t\t\twant: NewExecutorConfig(\"javac\", \"java\", []string{\"-d\", \"bin\", \"-classpath\", defaultBeamSdkPath}, []string{\"-cp\", \"bin:\" + jars}),\n\t\t\twantErr: false,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot, err := createExecutorConfig(tt.args.apacheBeamSdk, tt.args.configPath)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"createExecutorConfig() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"createExecutorConfig() got = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_getConfigFromJson(t *testing.T) {\n\ttype args struct {\n\t\tconfigPath string\n\t}\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twant *ExecutorConfig\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"get object from json\",\n\t\t\targs: args{filepath.Join(configFolderName, defaultSdk.String()+jsonExt)},\n\t\t\twant: NewExecutorConfig(\"javac\", \"java\", []string{\"-d\", \"bin\", \"-classpath\"}, []string{\"-cp\", \"bin:\"}),\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"error if wrong json path\",\n\t\t\targs: args{filepath.Join(\"wrong_folder\", defaultSdk.String()+jsonExt)},\n\t\t\twant: nil,\n\t\t\twantErr: true,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot, err := getConfigFromJson(tt.args.configPath)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"getConfigFromJson() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\t\tt.Errorf(\"getConfigFromJson() got = %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package models\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/koding\/bongo\"\n)\n\ntype Channel struct {\n\t\/\/ unique identifier of the channel\n\tId int64 `json:\"id,string\"`\n\n\t\/\/ Name of the channel\n\tName string `json:\"name\" sql:\"NOT NULL;TYPE:VARCHAR(200);\"`\n\n\t\/\/ Creator of the channel\n\tCreatorId int64 `json:\"creatorId,string\" sql:\"NOT NULL\"`\n\n\t\/\/ Name of the group which channel is belong to\n\tGroupName string `json:\"groupName\" sql:\"NOT NULL;TYPE:VARCHAR(200);\"`\n\n\t\/\/ Purpose of the channel\n\tPurpose string `json:\"purpose\"`\n\n\t\/\/ Secret key of the channel for event propagation purposes\n\t\/\/ we can put this key into another table?\n\tSecretKey string `json:\"-\"`\n\n\t\/\/ Type of the channel\n\tTypeConstant string `json:\"typeConstant\" sql:\"NOT NULL;TYPE:VARCHAR(100);\"`\n\n\t\/\/ Privacy constant of the channel\n\tPrivacyConstant string `json:\"privacyConstant\" sql:\"NOT NULL;TYPE:VARCHAR(100);\"`\n\n\t\/\/ Creation date of the channel\n\tCreatedAt time.Time `json:\"createdAt\" sql:\"NOT NULL\"`\n\n\t\/\/ Modification date of the channel\n\tUpdatedAt time.Time `json:\"updatedAt\" sql:\"NOT NULL\"`\n\n\t\/\/ Deletion date of the channel\n\tDeletedAt time.Time `json:\"deletedAt\"`\n}\n\n\/\/ to-do check for allowed channels\nconst (\n\t\/\/ TYPES\n\tChannel_TYPE_GROUP = \"group\"\n\tChannel_TYPE_TOPIC = \"topic\"\n\tChannel_TYPE_FOLLOWINGFEED = \"followingfeed\"\n\tChannel_TYPE_FOLLOWERS = \"followers\"\n\tChannel_TYPE_CHAT = \"chat\"\n\tChannel_TYPE_PINNED_ACTIVITY = \"pinnedactivity\"\n\tChannel_TYPE_PRIVATE_MESSAGE = \"privatemessage\"\n\tChannel_TYPE_DEFAULT = \"default\"\n\t\/\/ Privacy\n\tChannel_PRIVACY_PUBLIC = \"public\"\n\tChannel_PRIVACY_PRIVATE = \"private\"\n\t\/\/ Koding Group Name\n\tChannel_KODING_NAME = \"koding\"\n)\n\nfunc NewChannel() *Channel {\n\treturn &Channel{\n\t\tName: \"Channel\" + RandomName(),\n\t\tCreatorId: 0,\n\t\tGroupName: Channel_KODING_NAME,\n\t\tPurpose: \"\",\n\t\tSecretKey: \"\",\n\t\tTypeConstant: Channel_TYPE_DEFAULT,\n\t\tPrivacyConstant: Channel_PRIVACY_PRIVATE,\n\t}\n}\n\nfunc NewPrivateMessageChannel(creatorId int64, groupName string) *Channel {\n\tc := NewChannel()\n\tc.GroupName = groupName\n\tc.CreatorId = creatorId\n\tc.Name = RandomName()\n\tc.TypeConstant = Channel_TYPE_PRIVATE_MESSAGE\n\tc.PrivacyConstant = Channel_PRIVACY_PRIVATE\n\tc.Purpose = \"\"\n\treturn c\n}\n\nfunc (c *Channel) BeforeCreate() {\n\tc.CreatedAt = time.Now().UTC()\n\tc.UpdatedAt = time.Now().UTC()\n\tc.DeletedAt = ZeroDate()\n}\n\nfunc (c *Channel) BeforeUpdate() {\n\tc.UpdatedAt = time.Now()\n}\n\nfunc (c Channel) GetId() int64 {\n\treturn c.Id\n}\n\nfunc (c Channel) TableName() string {\n\treturn \"api.channel\"\n}\n\nfunc (c *Channel) AfterCreate() {\n\tbongo.B.AfterCreate(c)\n}\n\nfunc (c *Channel) AfterUpdate() {\n\tbongo.B.AfterUpdate(c)\n}\n\nfunc (c Channel) AfterDelete() {\n\tbongo.B.AfterDelete(c)\n}\n\nfunc (c *Channel) Update() error {\n\tif c.Name == \"\" || c.GroupName == \"\" {\n\t\treturn fmt.Errorf(\"Validation failed %s - %s\", c.Name, c.GroupName)\n\t}\n\n\treturn bongo.B.Update(c)\n}\n\nfunc (c *Channel) Create() error {\n\tif c.Name == \"\" || c.GroupName == \"\" || c.TypeConstant == \"\" {\n\t\treturn fmt.Errorf(\"Validation failed %s - %s -%s\", c.Name, c.GroupName, c.TypeConstant)\n\t}\n\n\t\/\/ golang returns -1 if item not in the string\n\tif strings.Index(c.Name, \" \") > -1 {\n\t\treturn fmt.Errorf(\"Channel name %q has empty space in it\", c.Name)\n\t}\n\n\tif c.TypeConstant == Channel_TYPE_GROUP ||\n\t\tc.TypeConstant == Channel_TYPE_FOLLOWERS \/* we can add more types here *\/ {\n\n\t\tvar selector map[string]interface{}\n\t\tswitch c.TypeConstant {\n\t\tcase Channel_TYPE_GROUP:\n\t\t\tselector = map[string]interface{}{\n\t\t\t\t\"group_name\": c.GroupName,\n\t\t\t\t\"type_constant\": c.TypeConstant,\n\t\t\t}\n\t\tcase Channel_TYPE_FOLLOWERS:\n\t\t\tselector = map[string]interface{}{\n\t\t\t\t\"creator_id\": c.CreatorId,\n\t\t\t\t\"type_constant\": c.TypeConstant,\n\t\t\t}\n\t\t}\n\n\t\t\/\/ if err is nil\n\t\t\/\/ it means we already have that channel\n\t\terr := c.One(bongo.NewQS(selector))\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t\t\/\/ return fmt.Errorf(\"%s typed channel is already created before for %s group\", c.TypeConstant, c.GroupName)\n\t\t}\n\n\t\tif err != gorm.RecordNotFound {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\treturn bongo.B.Create(c)\n}\n\nfunc (c *Channel) Delete() error {\n\treturn bongo.B.Delete(c)\n}\n\nfunc (c *Channel) ById(id int64) error {\n\treturn bongo.B.ById(c, id)\n}\n\nfunc (c *Channel) One(q *bongo.Query) error {\n\treturn bongo.B.One(c, c, q)\n}\n\nfunc (c *Channel) Some(data interface{}, q *bongo.Query) error {\n\treturn bongo.B.Some(c, data, q)\n}\n\nfunc (c *Channel) FetchByIds(ids []int64) ([]Channel, error) {\n\tvar channels []Channel\n\n\tif len(ids) == 0 {\n\t\treturn channels, nil\n\t}\n\n\tif err := bongo.B.FetchByIds(c, &channels, ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn channels, nil\n}\n\nfunc (c *Channel) AddParticipant(participantId int64) (*ChannelParticipant, error) {\n\tif c.Id == 0 {\n\t\treturn nil, errors.New(\"Channel Id is not set\")\n\t}\n\n\tcp := NewChannelParticipant()\n\tcp.ChannelId = c.Id\n\tcp.AccountId = participantId\n\n\terr := cp.FetchParticipant()\n\tif err != nil && err != gorm.RecordNotFound {\n\t\treturn nil, err\n\t}\n\n\t\/\/ if we have this record in DB\n\tif cp.Id != 0 {\n\t\t\/\/ if status is not active\n\t\tif cp.StatusConstant == ChannelParticipant_STATUS_ACTIVE {\n\t\t\treturn nil, errors.New(fmt.Sprintf(\"Account %d is already a participant of channel %d\", cp.AccountId, cp.ChannelId))\n\t\t}\n\t\tcp.StatusConstant = ChannelParticipant_STATUS_ACTIVE\n\t\tif err := cp.Update(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn cp, nil\n\t}\n\n\tcp.StatusConstant = ChannelParticipant_STATUS_ACTIVE\n\n\tif err := cp.Create(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cp, nil\n}\n\nfunc (c *Channel) RemoveParticipant(participantId int64) error {\n\tif c.Id == 0 {\n\t\treturn errors.New(\"Channel Id is not set\")\n\t}\n\n\tcp := NewChannelParticipant()\n\tcp.ChannelId = c.Id\n\tcp.AccountId = participantId\n\n\terr := cp.FetchParticipant()\n\t\/\/ if user is not in this channel, do nothing\n\tif err == gorm.RecordNotFound {\n\t\treturn nil\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cp.StatusConstant == ChannelParticipant_STATUS_LEFT {\n\t\treturn nil\n\t}\n\n\tcp.StatusConstant = ChannelParticipant_STATUS_LEFT\n\tif err := cp.Update(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *Channel) FetchParticipantIds() ([]int64, error) {\n\tvar participantIds []int64\n\n\tif c.Id == 0 {\n\t\treturn participantIds, errors.New(\"Channel Id is not set\")\n\t}\n\n\tquery := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"channel_id\": c.Id,\n\t\t\t\"status_constant\": ChannelParticipant_STATUS_ACTIVE,\n\t\t},\n\t\tPluck: \"account_id\",\n\t}\n\n\tcp := NewChannelParticipant()\n\terr := cp.Some(&participantIds, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn participantIds, nil\n}\n\nfunc (c *Channel) AddMessage(messageId int64) (*ChannelMessageList, error) {\n\tcml, err := c.FetchMessageList(messageId)\n\tif err == nil {\n\t\treturn nil, errors.New(\"Message is already in the channel\")\n\t}\n\n\t\/\/ silence record not found err\n\tif err != gorm.RecordNotFound {\n\t\treturn nil, err\n\t}\n\n\tcml.ChannelId = c.Id\n\tcml.MessageId = messageId\n\n\tif err := cml.Create(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cml, nil\n}\n\nfunc (c *Channel) RemoveMessage(messageId int64) (*ChannelMessageList, error) {\n\tcml, err := c.FetchMessageList(messageId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := cml.Delete(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cml, nil\n}\n\nfunc (c *Channel) FetchMessageList(messageId int64) (*ChannelMessageList, error) {\n\tif c.Id == 0 {\n\t\treturn nil, errors.New(\"Channel Id is not set\")\n\t}\n\n\tcml := NewChannelMessageList()\n\tselector := map[string]interface{}{\n\t\t\"channel_id\": c.Id,\n\t\t\"message_id\": messageId,\n\t}\n\n\treturn cml, cml.One(bongo.NewQS(selector))\n}\n\nfunc (c *Channel) FetchChannelIdByNameAndGroupName(name, groupName string) (int64, error) {\n\tquery := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"name\": name,\n\t\t\t\"group_name\": groupName,\n\t\t},\n\t\tPagination: *bongo.NewPagination(1, 0),\n\t\tPluck: \"id\",\n\t}\n\tvar ids []int64\n\tif err := c.Some(&ids, query); err != nil {\n\t\treturn 0, err\n\t}\n\n\tif ids == nil {\n\t\treturn 0, gorm.RecordNotFound\n\t}\n\n\tif len(ids) == 0 {\n\t\treturn 0, gorm.RecordNotFound\n\t}\n\n\treturn ids[0], nil\n}\n\nfunc (c *Channel) Search(q *Query) ([]Channel, error) {\n\n\tif q.GroupName == \"\" {\n\t\treturn nil, fmt.Errorf(\"Query doesnt have any Group info %+v\", q)\n\t}\n\n\tvar channels []Channel\n\n\tquery := bongo.B.DB.Table(c.TableName()).Limit(q.Limit)\n\n\tquery = query.Where(\"type_constant = ?\", q.Type)\n\tquery = query.Where(\"privacy_constant = ?\", Channel_PRIVACY_PUBLIC)\n\tquery = query.Where(\"group_name = ?\", q.GroupName)\n\tquery = query.Where(\"name like ?\", q.Name+\"%\")\n\n\tif err := query.Find(&channels).Error; err != nil {\n\t\treturn nil, err\n\t}\n\n\tif channels == nil {\n\t\treturn make([]Channel, 0), nil\n\t}\n\n\treturn channels, nil\n}\n\nfunc (c *Channel) ByName(q *Query) (Channel, error) {\n\tfmt.Println(\"-------- FIX THIS PART ------\")\n\tfmt.Println(\"TODO - check permissions here\")\n\tfmt.Println(\"-------- FIX THIS PART ------\")\n\tvar channel Channel\n\n\tif q.GroupName == \"\" {\n\t\treturn channel, fmt.Errorf(\"Query doesnt have any Group info %+v\", q)\n\t}\n\n\tquery := bongo.B.DB.Table(c.TableName()).Limit(q.Limit)\n\n\tquery = query.Where(\"type_constant = ?\", q.Type)\n\tquery = query.Where(\"group_name = ?\", q.GroupName)\n\tquery = query.Where(\"name = ?\", q.Name)\n\n\treturn channel, query.Find(&channel).Error\n}\n\nfunc (c *Channel) List(q *Query) ([]Channel, error) {\n\n\tif q.GroupName == \"\" {\n\t\treturn nil, fmt.Errorf(\"Query doesnt have any Group info %+v\", q)\n\t}\n\n\tvar channels []Channel\n\n\tquery := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"group_name\": q.GroupName,\n\t\t},\n\t\tPagination: *bongo.NewPagination(q.Limit, q.Skip),\n\t}\n\n\tif q.Type != \"\" {\n\t\tquery.Selector[\"type_constant\"] = q.Type\n\t}\n\n\terr := c.Some(&channels, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif channels == nil {\n\t\treturn make([]Channel, 0), nil\n\t}\n\n\treturn channels, nil\n}\n\nfunc (c *Channel) FetchLastMessage() (*ChannelMessage, error) {\n\tif c.Id == 0 {\n\t\treturn nil, errors.New(\"Channel Id is not set\")\n\t}\n\n\tcml := NewChannelMessageList()\n\tquery := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"channel_id\": c.Id,\n\t\t},\n\t\tSort: map[string]string{\n\t\t\t\"added_at\": \"DESC\",\n\t\t},\n\t\tPagination: *bongo.NewPagination(1, 0),\n\t\tPluck: \"message_id\",\n\t}\n\n\tvar messageIds []int64\n\terr := cml.Some(&messageIds, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif messageIds == nil || len(messageIds) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tcm := NewChannelMessage()\n\tif err := cm.ById(messageIds[0]); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cm, nil\n}\n\nfunc (c *Channel) FetchPinnedActivityChannel(accountId int64, groupName string) error {\n\tquery := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"creator_id\": accountId,\n\t\t\t\"group_name\": groupName,\n\t\t\t\"type_constant\": Channel_TYPE_PINNED_ACTIVITY,\n\t\t},\n\t}\n\n\treturn c.One(query)\n}\n<commit_msg>Migration: CreateRaw method is added to channel to be used in migration<commit_after>package models\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/koding\/bongo\"\n)\n\ntype Channel struct {\n\t\/\/ unique identifier of the channel\n\tId int64 `json:\"id,string\"`\n\n\t\/\/ Name of the channel\n\tName string `json:\"name\" sql:\"NOT NULL;TYPE:VARCHAR(200);\"`\n\n\t\/\/ Creator of the channel\n\tCreatorId int64 `json:\"creatorId,string\" sql:\"NOT NULL\"`\n\n\t\/\/ Name of the group which channel is belong to\n\tGroupName string `json:\"groupName\" sql:\"NOT NULL;TYPE:VARCHAR(200);\"`\n\n\t\/\/ Purpose of the channel\n\tPurpose string `json:\"purpose\"`\n\n\t\/\/ Secret key of the channel for event propagation purposes\n\t\/\/ we can put this key into another table?\n\tSecretKey string `json:\"-\"`\n\n\t\/\/ Type of the channel\n\tTypeConstant string `json:\"typeConstant\" sql:\"NOT NULL;TYPE:VARCHAR(100);\"`\n\n\t\/\/ Privacy constant of the channel\n\tPrivacyConstant string `json:\"privacyConstant\" sql:\"NOT NULL;TYPE:VARCHAR(100);\"`\n\n\t\/\/ Creation date of the channel\n\tCreatedAt time.Time `json:\"createdAt\" sql:\"NOT NULL\"`\n\n\t\/\/ Modification date of the channel\n\tUpdatedAt time.Time `json:\"updatedAt\" sql:\"NOT NULL\"`\n\n\t\/\/ Deletion date of the channel\n\tDeletedAt time.Time `json:\"deletedAt\"`\n}\n\n\/\/ to-do check for allowed channels\nconst (\n\t\/\/ TYPES\n\tChannel_TYPE_GROUP = \"group\"\n\tChannel_TYPE_TOPIC = \"topic\"\n\tChannel_TYPE_FOLLOWINGFEED = \"followingfeed\"\n\tChannel_TYPE_FOLLOWERS = \"followers\"\n\tChannel_TYPE_CHAT = \"chat\"\n\tChannel_TYPE_PINNED_ACTIVITY = \"pinnedactivity\"\n\tChannel_TYPE_PRIVATE_MESSAGE = \"privatemessage\"\n\tChannel_TYPE_DEFAULT = \"default\"\n\t\/\/ Privacy\n\tChannel_PRIVACY_PUBLIC = \"public\"\n\tChannel_PRIVACY_PRIVATE = \"private\"\n\t\/\/ Koding Group Name\n\tChannel_KODING_NAME = \"koding\"\n)\n\nfunc NewChannel() *Channel {\n\treturn &Channel{\n\t\tName: \"Channel\" + RandomName(),\n\t\tCreatorId: 0,\n\t\tGroupName: Channel_KODING_NAME,\n\t\tPurpose: \"\",\n\t\tSecretKey: \"\",\n\t\tTypeConstant: Channel_TYPE_DEFAULT,\n\t\tPrivacyConstant: Channel_PRIVACY_PRIVATE,\n\t}\n}\n\nfunc NewPrivateMessageChannel(creatorId int64, groupName string) *Channel {\n\tc := NewChannel()\n\tc.GroupName = groupName\n\tc.CreatorId = creatorId\n\tc.Name = RandomName()\n\tc.TypeConstant = Channel_TYPE_PRIVATE_MESSAGE\n\tc.PrivacyConstant = Channel_PRIVACY_PRIVATE\n\tc.Purpose = \"\"\n\treturn c\n}\n\nfunc (c *Channel) BeforeCreate() {\n\tc.CreatedAt = time.Now().UTC()\n\tc.UpdatedAt = time.Now().UTC()\n\tc.DeletedAt = ZeroDate()\n}\n\nfunc (c *Channel) BeforeUpdate() {\n\tc.UpdatedAt = time.Now()\n}\n\nfunc (c Channel) GetId() int64 {\n\treturn c.Id\n}\n\nfunc (c Channel) TableName() string {\n\treturn \"api.channel\"\n}\n\nfunc (c *Channel) AfterCreate() {\n\tbongo.B.AfterCreate(c)\n}\n\nfunc (c *Channel) AfterUpdate() {\n\tbongo.B.AfterUpdate(c)\n}\n\nfunc (c Channel) AfterDelete() {\n\tbongo.B.AfterDelete(c)\n}\n\nfunc (c *Channel) Update() error {\n\tif c.Name == \"\" || c.GroupName == \"\" {\n\t\treturn fmt.Errorf(\"Validation failed %s - %s\", c.Name, c.GroupName)\n\t}\n\n\treturn bongo.B.Update(c)\n}\n\nfunc (c *Channel) Create() error {\n\tif c.Name == \"\" || c.GroupName == \"\" || c.TypeConstant == \"\" {\n\t\treturn fmt.Errorf(\"Validation failed %s - %s -%s\", c.Name, c.GroupName, c.TypeConstant)\n\t}\n\n\t\/\/ golang returns -1 if item not in the string\n\tif strings.Index(c.Name, \" \") > -1 {\n\t\treturn fmt.Errorf(\"Channel name %q has empty space in it\", c.Name)\n\t}\n\n\tif c.TypeConstant == Channel_TYPE_GROUP ||\n\t\tc.TypeConstant == Channel_TYPE_FOLLOWERS \/* we can add more types here *\/ {\n\n\t\tvar selector map[string]interface{}\n\t\tswitch c.TypeConstant {\n\t\tcase Channel_TYPE_GROUP:\n\t\t\tselector = map[string]interface{}{\n\t\t\t\t\"group_name\": c.GroupName,\n\t\t\t\t\"type_constant\": c.TypeConstant,\n\t\t\t}\n\t\tcase Channel_TYPE_FOLLOWERS:\n\t\t\tselector = map[string]interface{}{\n\t\t\t\t\"creator_id\": c.CreatorId,\n\t\t\t\t\"type_constant\": c.TypeConstant,\n\t\t\t}\n\t\t}\n\n\t\t\/\/ if err is nil\n\t\t\/\/ it means we already have that channel\n\t\terr := c.One(bongo.NewQS(selector))\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t\t\/\/ return fmt.Errorf(\"%s typed channel is already created before for %s group\", c.TypeConstant, c.GroupName)\n\t\t}\n\n\t\tif err != gorm.RecordNotFound {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\treturn bongo.B.Create(c)\n}\n\nfunc (c *Channel) CreateRaw() error {\n\tinsertSql := \"INSERT INTO \" +\n\t\tc.TableName() +\n\t\t` (\"name\",\"creator_id\",\"group_name\",\"purpose\",\"secret_key\",\"type_constant\",` +\n\t\t`\"privacy_constant\", \"created_at\", \"updated_at\", \"deleted_at\")` +\n\t\t\"VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10) \" +\n\t\t\"RETURNING ID\"\n\n\treturn bongo.B.DB.CommonDB().QueryRow(insertSql, c.Name, c.CreatorId,\n\t\tc.GroupName, c.Purpose, c.SecretKey, c.TypeConstant, c.PrivacyConstant,\n\t\tc.CreatedAt, c.UpdatedAt, c.DeletedAt).Scan(&c.Id)\n}\n\nfunc (c *Channel) Delete() error {\n\treturn bongo.B.Delete(c)\n}\n\nfunc (c *Channel) ById(id int64) error {\n\treturn bongo.B.ById(c, id)\n}\n\nfunc (c *Channel) One(q *bongo.Query) error {\n\treturn bongo.B.One(c, c, q)\n}\n\nfunc (c *Channel) Some(data interface{}, q *bongo.Query) error {\n\treturn bongo.B.Some(c, data, q)\n}\n\nfunc (c *Channel) FetchByIds(ids []int64) ([]Channel, error) {\n\tvar channels []Channel\n\n\tif len(ids) == 0 {\n\t\treturn channels, nil\n\t}\n\n\tif err := bongo.B.FetchByIds(c, &channels, ids); err != nil {\n\t\treturn nil, err\n\t}\n\treturn channels, nil\n}\n\nfunc (c *Channel) AddParticipant(participantId int64) (*ChannelParticipant, error) {\n\tif c.Id == 0 {\n\t\treturn nil, errors.New(\"Channel Id is not set\")\n\t}\n\n\tcp := NewChannelParticipant()\n\tcp.ChannelId = c.Id\n\tcp.AccountId = participantId\n\n\terr := cp.FetchParticipant()\n\tif err != nil && err != gorm.RecordNotFound {\n\t\treturn nil, err\n\t}\n\n\t\/\/ if we have this record in DB\n\tif cp.Id != 0 {\n\t\t\/\/ if status is not active\n\t\tif cp.StatusConstant == ChannelParticipant_STATUS_ACTIVE {\n\t\t\treturn nil, errors.New(fmt.Sprintf(\"Account %d is already a participant of channel %d\", cp.AccountId, cp.ChannelId))\n\t\t}\n\t\tcp.StatusConstant = ChannelParticipant_STATUS_ACTIVE\n\t\tif err := cp.Update(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn cp, nil\n\t}\n\n\tcp.StatusConstant = ChannelParticipant_STATUS_ACTIVE\n\n\tif err := cp.Create(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cp, nil\n}\n\nfunc (c *Channel) RemoveParticipant(participantId int64) error {\n\tif c.Id == 0 {\n\t\treturn errors.New(\"Channel Id is not set\")\n\t}\n\n\tcp := NewChannelParticipant()\n\tcp.ChannelId = c.Id\n\tcp.AccountId = participantId\n\n\terr := cp.FetchParticipant()\n\t\/\/ if user is not in this channel, do nothing\n\tif err == gorm.RecordNotFound {\n\t\treturn nil\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif cp.StatusConstant == ChannelParticipant_STATUS_LEFT {\n\t\treturn nil\n\t}\n\n\tcp.StatusConstant = ChannelParticipant_STATUS_LEFT\n\tif err := cp.Update(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *Channel) FetchParticipantIds() ([]int64, error) {\n\tvar participantIds []int64\n\n\tif c.Id == 0 {\n\t\treturn participantIds, errors.New(\"Channel Id is not set\")\n\t}\n\n\tquery := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"channel_id\": c.Id,\n\t\t\t\"status_constant\": ChannelParticipant_STATUS_ACTIVE,\n\t\t},\n\t\tPluck: \"account_id\",\n\t}\n\n\tcp := NewChannelParticipant()\n\terr := cp.Some(&participantIds, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn participantIds, nil\n}\n\nfunc (c *Channel) AddMessage(messageId int64) (*ChannelMessageList, error) {\n\tcml, err := c.FetchMessageList(messageId)\n\tif err == nil {\n\t\treturn nil, errors.New(\"Message is already in the channel\")\n\t}\n\n\t\/\/ silence record not found err\n\tif err != gorm.RecordNotFound {\n\t\treturn nil, err\n\t}\n\n\tcml.ChannelId = c.Id\n\tcml.MessageId = messageId\n\n\tif err := cml.Create(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cml, nil\n}\n\nfunc (c *Channel) RemoveMessage(messageId int64) (*ChannelMessageList, error) {\n\tcml, err := c.FetchMessageList(messageId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := cml.Delete(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cml, nil\n}\n\nfunc (c *Channel) FetchMessageList(messageId int64) (*ChannelMessageList, error) {\n\tif c.Id == 0 {\n\t\treturn nil, errors.New(\"Channel Id is not set\")\n\t}\n\n\tcml := NewChannelMessageList()\n\tselector := map[string]interface{}{\n\t\t\"channel_id\": c.Id,\n\t\t\"message_id\": messageId,\n\t}\n\n\treturn cml, cml.One(bongo.NewQS(selector))\n}\n\nfunc (c *Channel) FetchChannelIdByNameAndGroupName(name, groupName string) (int64, error) {\n\tquery := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"name\": name,\n\t\t\t\"group_name\": groupName,\n\t\t},\n\t\tPagination: *bongo.NewPagination(1, 0),\n\t\tPluck: \"id\",\n\t}\n\tvar ids []int64\n\tif err := c.Some(&ids, query); err != nil {\n\t\treturn 0, err\n\t}\n\n\tif ids == nil {\n\t\treturn 0, gorm.RecordNotFound\n\t}\n\n\tif len(ids) == 0 {\n\t\treturn 0, gorm.RecordNotFound\n\t}\n\n\treturn ids[0], nil\n}\n\nfunc (c *Channel) Search(q *Query) ([]Channel, error) {\n\n\tif q.GroupName == \"\" {\n\t\treturn nil, fmt.Errorf(\"Query doesnt have any Group info %+v\", q)\n\t}\n\n\tvar channels []Channel\n\n\tquery := bongo.B.DB.Table(c.TableName()).Limit(q.Limit)\n\n\tquery = query.Where(\"type_constant = ?\", q.Type)\n\tquery = query.Where(\"privacy_constant = ?\", Channel_PRIVACY_PUBLIC)\n\tquery = query.Where(\"group_name = ?\", q.GroupName)\n\tquery = query.Where(\"name like ?\", q.Name+\"%\")\n\n\tif err := query.Find(&channels).Error; err != nil {\n\t\treturn nil, err\n\t}\n\n\tif channels == nil {\n\t\treturn make([]Channel, 0), nil\n\t}\n\n\treturn channels, nil\n}\n\nfunc (c *Channel) ByName(q *Query) (Channel, error) {\n\tfmt.Println(\"-------- FIX THIS PART ------\")\n\tfmt.Println(\"TODO - check permissions here\")\n\tfmt.Println(\"-------- FIX THIS PART ------\")\n\tvar channel Channel\n\n\tif q.GroupName == \"\" {\n\t\treturn channel, fmt.Errorf(\"Query doesnt have any Group info %+v\", q)\n\t}\n\n\tquery := bongo.B.DB.Table(c.TableName()).Limit(q.Limit)\n\n\tquery = query.Where(\"type_constant = ?\", q.Type)\n\tquery = query.Where(\"group_name = ?\", q.GroupName)\n\tquery = query.Where(\"name = ?\", q.Name)\n\n\treturn channel, query.Find(&channel).Error\n}\n\nfunc (c *Channel) List(q *Query) ([]Channel, error) {\n\n\tif q.GroupName == \"\" {\n\t\treturn nil, fmt.Errorf(\"Query doesnt have any Group info %+v\", q)\n\t}\n\n\tvar channels []Channel\n\n\tquery := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"group_name\": q.GroupName,\n\t\t},\n\t\tPagination: *bongo.NewPagination(q.Limit, q.Skip),\n\t}\n\n\tif q.Type != \"\" {\n\t\tquery.Selector[\"type_constant\"] = q.Type\n\t}\n\n\terr := c.Some(&channels, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif channels == nil {\n\t\treturn make([]Channel, 0), nil\n\t}\n\n\treturn channels, nil\n}\n\nfunc (c *Channel) FetchLastMessage() (*ChannelMessage, error) {\n\tif c.Id == 0 {\n\t\treturn nil, errors.New(\"Channel Id is not set\")\n\t}\n\n\tcml := NewChannelMessageList()\n\tquery := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"channel_id\": c.Id,\n\t\t},\n\t\tSort: map[string]string{\n\t\t\t\"added_at\": \"DESC\",\n\t\t},\n\t\tPagination: *bongo.NewPagination(1, 0),\n\t\tPluck: \"message_id\",\n\t}\n\n\tvar messageIds []int64\n\terr := cml.Some(&messageIds, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif messageIds == nil || len(messageIds) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tcm := NewChannelMessage()\n\tif err := cm.ById(messageIds[0]); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cm, nil\n}\n\nfunc (c *Channel) FetchPinnedActivityChannel(accountId int64, groupName string) error {\n\tquery := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"creator_id\": accountId,\n\t\t\t\"group_name\": groupName,\n\t\t\t\"type_constant\": Channel_TYPE_PINNED_ACTIVITY,\n\t\t},\n\t}\n\n\treturn c.One(query)\n}\n<|endoftext|>"} {"text":"<commit_before>package weed_server\n\nimport (\n\t\"bytes\"\n\t\"code.google.com\/p\/weed-fs\/go\/glog\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/goraft\/raft\"\n\t\"github.com\/gorilla\/mux\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype RaftServer struct {\n\tpeers []string \/\/ initial peers to join with\n\traftServer raft.Server\n\tdataDir string\n\thttpAddr string\n\tversion string\n\trouter *mux.Router\n}\n\nfunc NewRaftServer(r *mux.Router, version string, peers []string, httpAddr string, dataDir string) *RaftServer {\n\ts := &RaftServer{\n\t\tversion: version,\n\t\tpeers: peers,\n\t\thttpAddr: httpAddr,\n\t\tdataDir: dataDir,\n\t\trouter: r,\n\t}\n\n\t\/\/raft.SetLogLevel(2)\n\n\tvar err error\n\ttransporter := raft.NewHTTPTransporter(\"\/raft\")\n\ts.raftServer, err = raft.NewServer(s.httpAddr, s.dataDir, transporter, nil, nil, \"\")\n\tif err != nil {\n\t\tglog.V(0).Infoln(err)\n\t\treturn nil\n\t}\n\ttransporter.Install(s.raftServer, s)\n\ts.raftServer.SetHeartbeatTimeout(1 * time.Second)\n\ts.raftServer.SetElectionTimeout(1500 * time.Millisecond)\n\ts.raftServer.Start()\n\n\ts.router.HandleFunc(\"\/raft\/join\", s.joinHandler).Methods(\"POST\")\n\n\t\/\/ Join to leader if specified.\n\tif len(s.peers) > 0 {\n\t\tglog.V(0).Infoln(\"Joining cluster:\", strings.Join(s.peers, \",\"))\n\n\t\tif !s.raftServer.IsLogEmpty() {\n\t\t\tglog.V(0).Infoln(\"Cannot join with an existing log\")\n\t\t}\n\n\t\tif err := s.Join(s.peers); err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tglog.V(0).Infoln(\"Joined cluster\")\n\n\t\t\/\/ Initialize the server by joining itself.\n\t} else if s.raftServer.IsLogEmpty() {\n\t\tglog.V(0).Infoln(\"Initializing new cluster\")\n\n\t\t_, err := s.raftServer.Do(&raft.DefaultJoinCommand{\n\t\t\tName: s.raftServer.Name(),\n\t\t\tConnectionString: \"http:\/\/\" + s.httpAddr,\n\t\t})\n\n\t\tif err != nil {\n\t\t\tglog.V(0).Infoln(err)\n\t\t\treturn nil\n\t\t}\n\n\t} else {\n\t\tglog.V(0).Infoln(\"Recovered from log\")\n\t}\n\n\treturn s\n}\n\nfunc (s *RaftServer) Leader() string {\n\tl := s.raftServer.Leader()\n\n\tif l == \"\" {\n\t\t\/\/ We are a single node cluster, we are the leader\n\t\treturn s.raftServer.Name()\n\t}\n\n\treturn l\n}\n\nfunc (s *RaftServer) Members() (members []string) {\n\tpeers := s.raftServer.Peers()\n\n\tfor _, p := range peers {\n\t\tmembers = append(members, strings.TrimPrefix(p.ConnectionString, \"http:\/\/\"))\n\t}\n\n\treturn\n}\n\n\/\/ Join joins an existing cluster.\nfunc (s *RaftServer) Join(peers []string) error {\n\tcommand := &raft.DefaultJoinCommand{\n\t\tName: s.raftServer.Name(),\n\t\tConnectionString: \"http:\/\/\" + s.httpAddr,\n\t}\n\n\tvar b bytes.Buffer\n\tjson.NewEncoder(&b).Encode(command)\n\n\tfor _, m := range peers {\n\t\tglog.V(0).Infoln(\"Attempting to connect to:\", m)\n\n\t\tresp, err := http.Post(fmt.Sprintf(\"http:\/\/%s\/raft\/join\", strings.TrimSpace(m)), \"application\/json\", &b)\n\t\tglog.V(0).Infoln(\"Post returned: \", err)\n\n\t\tif err != nil {\n\t\t\tif _, ok := err.(*url.Error); ok {\n\t\t\t\t\/\/ If we receive a network error try the next member\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\treturn err\n\t\t}\n\n\t\tresp.Body.Close()\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"Could not connect to any cluster peers\")\n}\n<commit_msg>Issue 62:\tLatest raft update breaks the build<commit_after>package weed_server\n\nimport (\n\t\"bytes\"\n\t\"code.google.com\/p\/weed-fs\/go\/glog\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/goraft\/raft\"\n\t\"github.com\/gorilla\/mux\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype RaftServer struct {\n\tpeers []string \/\/ initial peers to join with\n\traftServer raft.Server\n\tdataDir string\n\thttpAddr string\n\tversion string\n\trouter *mux.Router\n}\n\nfunc NewRaftServer(r *mux.Router, version string, peers []string, httpAddr string, dataDir string) *RaftServer {\n\ts := &RaftServer{\n\t\tversion: version,\n\t\tpeers: peers,\n\t\thttpAddr: httpAddr,\n\t\tdataDir: dataDir,\n\t\trouter: r,\n\t}\n\n\t\/\/raft.SetLogLevel(2)\n\n\tvar err error\n\ttransporter := raft.NewHTTPTransporter(\"\/raft\")\n\ts.raftServer, err = raft.NewServer(s.httpAddr, s.dataDir, transporter, nil, nil, \"\")\n\tif err != nil {\n\t\tglog.V(0).Infoln(err)\n\t\treturn nil\n\t}\n\ttransporter.Install(s.raftServer, s)\n\ts.raftServer.SetHeartbeatInterval(1 * time.Second)\n\ts.raftServer.SetElectionTimeout(1500 * time.Millisecond)\n\ts.raftServer.Start()\n\n\ts.router.HandleFunc(\"\/raft\/join\", s.joinHandler).Methods(\"POST\")\n\n\t\/\/ Join to leader if specified.\n\tif len(s.peers) > 0 {\n\t\tglog.V(0).Infoln(\"Joining cluster:\", strings.Join(s.peers, \",\"))\n\n\t\tif !s.raftServer.IsLogEmpty() {\n\t\t\tglog.V(0).Infoln(\"Cannot join with an existing log\")\n\t\t}\n\n\t\tif err := s.Join(s.peers); err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tglog.V(0).Infoln(\"Joined cluster\")\n\n\t\t\/\/ Initialize the server by joining itself.\n\t} else if s.raftServer.IsLogEmpty() {\n\t\tglog.V(0).Infoln(\"Initializing new cluster\")\n\n\t\t_, err := s.raftServer.Do(&raft.DefaultJoinCommand{\n\t\t\tName: s.raftServer.Name(),\n\t\t\tConnectionString: \"http:\/\/\" + s.httpAddr,\n\t\t})\n\n\t\tif err != nil {\n\t\t\tglog.V(0).Infoln(err)\n\t\t\treturn nil\n\t\t}\n\n\t} else {\n\t\tglog.V(0).Infoln(\"Recovered from log\")\n\t}\n\n\treturn s\n}\n\nfunc (s *RaftServer) Leader() string {\n\tl := s.raftServer.Leader()\n\n\tif l == \"\" {\n\t\t\/\/ We are a single node cluster, we are the leader\n\t\treturn s.raftServer.Name()\n\t}\n\n\treturn l\n}\n\nfunc (s *RaftServer) Members() (members []string) {\n\tpeers := s.raftServer.Peers()\n\n\tfor _, p := range peers {\n\t\tmembers = append(members, strings.TrimPrefix(p.ConnectionString, \"http:\/\/\"))\n\t}\n\n\treturn\n}\n\n\/\/ Join joins an existing cluster.\nfunc (s *RaftServer) Join(peers []string) error {\n\tcommand := &raft.DefaultJoinCommand{\n\t\tName: s.raftServer.Name(),\n\t\tConnectionString: \"http:\/\/\" + s.httpAddr,\n\t}\n\n\tvar b bytes.Buffer\n\tjson.NewEncoder(&b).Encode(command)\n\n\tfor _, m := range peers {\n\t\tglog.V(0).Infoln(\"Attempting to connect to:\", m)\n\n\t\tresp, err := http.Post(fmt.Sprintf(\"http:\/\/%s\/raft\/join\", strings.TrimSpace(m)), \"application\/json\", &b)\n\t\tglog.V(0).Infoln(\"Post returned: \", err)\n\n\t\tif err != nil {\n\t\t\tif _, ok := err.(*url.Error); ok {\n\t\t\t\t\/\/ If we receive a network error try the next member\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\treturn err\n\t\t}\n\n\t\tresp.Body.Close()\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"Could not connect to any cluster peers\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\npackage client\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/keybase\/cli\"\n\t\"github.com\/keybase\/client\/go\/libcmdline\"\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\tkeybase1 \"github.com\/keybase\/client\/go\/protocol\"\n)\n\nconst spacesPerIndent = 4\n\nfunc indentSpace(level int) string {\n\treturn strings.Repeat(\" \", level*spacesPerIndent)\n}\n\ntype CmdDumpKeyfamily struct {\n\tlibkb.Contextified\n}\n\nfunc NewCmdDumpKeyfamily(cl *libcmdline.CommandLine, g *libkb.GlobalContext) cli.Command {\n\treturn cli.Command{\n\t\tName: \"dump-keyfamily\",\n\t\tFlags: []cli.Flag{},\n\t\tAction: func(c *cli.Context) {\n\t\t\tcl.ChooseCommand(&CmdDumpKeyfamily{Contextified: libkb.NewContextified(g)}, \"dump-keyfamily\", c)\n\t\t},\n\t}\n}\n\nfunc (v *CmdDumpKeyfamily) Run() (err error) {\n\tconfigCli, err := GetConfigClient(v.G())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcurrentStatus, err := configCli.GetCurrentStatus(context.TODO(), 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !currentStatus.LoggedIn {\n\t\treturn fmt.Errorf(\"Not logged in.\")\n\t}\n\tmyUID := currentStatus.User.Uid\n\n\tuserCli, err := GetUserClient(v.G())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tme, err := userCli.LoadUser(context.TODO(), keybase1.LoadUserArg{Uid: myUID})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpublicKeys, err := userCli.LoadPublicKeys(context.TODO(), keybase1.LoadPublicKeysArg{Uid: myUID})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdevCli, err := GetDeviceClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdevs, err := devCli.DeviceList(context.TODO(), 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tv.printExportedMe(me, publicKeys, devs)\n\treturn nil\n}\n\nfunc findSubkeys(parentID keybase1.KID, allKeys []keybase1.PublicKey) []keybase1.PublicKey {\n\tret := []keybase1.PublicKey{}\n\tfor _, key := range allKeys {\n\t\tif keybase1.KIDFromString(key.ParentID).Equal(parentID) {\n\t\t\tret = append(ret, key)\n\t\t}\n\t}\n\treturn ret\n}\n\nfunc (v *CmdDumpKeyfamily) printExportedMe(me keybase1.User, publicKeys []keybase1.PublicKey, devices []keybase1.Device) error {\n\tdui := v.G().UI.GetDumbOutputUI()\n\tif len(publicKeys) == 0 {\n\t\tdui.Printf(\"No public keys.\\n\")\n\t\treturn nil\n\t}\n\tdui.Printf(\"Public keys:\\n\")\n\t\/\/ Keep track of subkeys we print, so that if e.g. a subkey's parent is\n\t\/\/ nonexistent, we can notice that we skipped it.\n\tsubkeysShown := make(map[keybase1.KID]bool)\n\tfor _, key := range publicKeys {\n\t\tif !key.IsSibkey {\n\t\t\t\/\/ Subkeys will be printed under their respective sibkeys.\n\t\t\tcontinue\n\t\t}\n\t\tsubkeys := findSubkeys(key.KID, publicKeys)\n\t\terr := v.printKey(key, subkeys, 1)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, subkey := range subkeys {\n\t\t\tsubkeysShown[subkey.KID] = true\n\t\t}\n\t}\n\t\/\/ Print errors for any subkeys we failed to show.\n\tfor _, key := range publicKeys {\n\t\tif !key.IsSibkey && !subkeysShown[key.KID] {\n\t\t\tv.G().Log.Errorf(\"Dangling subkey: %s\", key.KID)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (v *CmdDumpKeyfamily) printKey(key keybase1.PublicKey, subkeys []keybase1.PublicKey, indent int) error {\n\tif key.KID == \"\" {\n\t\treturn fmt.Errorf(\"Found a key with an empty KID.\")\n\t}\n\teldestStr := \"\"\n\tif key.IsEldest {\n\t\teldestStr = \" (eldest)\"\n\t}\n\tdui := v.G().UI.GetDumbOutputUI()\n\tdui.Printf(\"%s%s%s\\n\", indentSpace(indent), key.KID, eldestStr)\n\tif key.PGPFingerprint != \"\" {\n\t\tdui.Printf(\"%sPGP Fingerprint: %s\\n\", indentSpace(indent+1), libkb.PGPFingerprintFromHexNoError(key.PGPFingerprint).ToQuads())\n\t\tdui.Printf(\"%sPGP Identities:\\n\", indentSpace(indent+1))\n\t\tfor _, identity := range key.PGPIdentities {\n\t\t\tcommentStr := \"\"\n\t\t\tif identity.Comment != \"\" {\n\t\t\t\tcommentStr = fmt.Sprintf(\" (%s)\", identity.Comment)\n\t\t\t}\n\t\t\temailStr := \"\"\n\t\t\tif identity.Email != \"\" {\n\t\t\t\temailStr = fmt.Sprintf(\" <%s>\", identity.Email)\n\t\t\t}\n\t\t\tdui.Printf(\"%s%s%s%s\\n\", indentSpace(indent+2), identity.Username, commentStr, emailStr)\n\t\t}\n\t}\n\tif key.DeviceID != \"\" || key.DeviceType != \"\" || key.DeviceDescription != \"\" {\n\t\tdui.Printf(\"%sDevice:\\n\", indentSpace(indent+1))\n\t\tif key.DeviceID != \"\" {\n\t\t\tdui.Printf(\"%sID: %s\\n\", indentSpace(indent+2), key.DeviceID)\n\t\t}\n\t\tif key.DeviceType != \"\" {\n\t\t\tdui.Printf(\"%sType: %s\\n\", indentSpace(indent+2), key.DeviceType)\n\t\t}\n\t\tif key.DeviceDescription != \"\" {\n\t\t\tdui.Printf(\"%sDescription: %s\\n\", indentSpace(indent+2), key.DeviceDescription)\n\t\t}\n\t}\n\tdui.Printf(\"%sCreated: %s\\n\", indentSpace(indent+1), keybase1.FromTime(key.CTime))\n\tdui.Printf(\"%sExpires: %s\\n\", indentSpace(indent+1), keybase1.FromTime(key.ETime))\n\n\tif len(subkeys) > 0 {\n\t\tdui.Printf(\"%sSubkeys:\\n\", indentSpace(indent+1))\n\t\tfor _, subkey := range subkeys {\n\t\t\tv.printKey(subkey, nil, indent+2)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (v *CmdDumpKeyfamily) ParseArgv(ctx *cli.Context) error {\n\treturn nil\n}\n\nfunc (v *CmdDumpKeyfamily) GetUsage() libkb.Usage {\n\treturn libkb.Usage{\n\t\tConfig: true,\n\t\tAPI: true,\n\t}\n}\n<commit_msg>add username parameter to dump-keyfamily<commit_after>\/\/ Copyright 2015 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\npackage client\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/keybase\/cli\"\n\t\"github.com\/keybase\/client\/go\/libcmdline\"\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\tkeybase1 \"github.com\/keybase\/client\/go\/protocol\"\n)\n\nconst spacesPerIndent = 4\n\nfunc indentSpace(level int) string {\n\treturn strings.Repeat(\" \", level*spacesPerIndent)\n}\n\ntype CmdDumpKeyfamily struct {\n\tlibkb.Contextified\n\tuser string\n}\n\nfunc (v *CmdDumpKeyfamily) ParseArgv(ctx *cli.Context) error {\n\tnargs := len(ctx.Args())\n\tif nargs > 1 {\n\t\treturn fmt.Errorf(\"dump-keyfamily only takes on argument, the user to lookup\")\n\t}\n\tif nargs == 1 {\n\t\tv.user = ctx.Args()[0]\n\t}\n\treturn nil\n}\n\nfunc NewCmdDumpKeyfamily(cl *libcmdline.CommandLine, g *libkb.GlobalContext) cli.Command {\n\treturn cli.Command{\n\t\tName: \"dump-keyfamily\",\n\t\tArgumentHelp: \"[username]\",\n\t\tUsage: \"Print out a user's current key family\",\n\t\tDescription: \"Print out a user's current key family. Don't specify a username to dump out your own keys.\",\n\t\tFlags: []cli.Flag{},\n\t\tAction: func(c *cli.Context) {\n\t\t\tcl.ChooseCommand(&CmdDumpKeyfamily{Contextified: libkb.NewContextified(g)}, \"dump-keyfamily\", c)\n\t\t},\n\t}\n}\n\nfunc (v *CmdDumpKeyfamily) Run() (err error) {\n\tconfigCli, err := GetConfigClient(v.G())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcurrentStatus, err := configCli.GetCurrentStatus(context.TODO(), 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !currentStatus.LoggedIn {\n\t\treturn fmt.Errorf(\"Not logged in.\")\n\t}\n\n\tvar UID keybase1.UID\n\tif v.user != \"\" {\n\t\tres := v.G().Resolver.Resolve(v.user)\n\t\tif res.GetError() != nil {\n\t\t\treturn fmt.Errorf(\"invalid user specified\")\n\t\t}\n\t\tUID = res.GetUID()\n\t} else {\n\t\tUID = currentStatus.User.Uid\n\t}\n\n\tuserCli, err := GetUserClient(v.G())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tuser, err := userCli.LoadUser(context.TODO(), keybase1.LoadUserArg{Uid: UID})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error loading user: %s\", err)\n\t}\n\n\tpublicKeys, err := userCli.LoadPublicKeys(context.TODO(), keybase1.LoadPublicKeysArg{Uid: UID})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error loading keys: %s\", err)\n\t}\n\n\tdevCli, err := GetDeviceClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdevs, err := devCli.DeviceList(context.TODO(), 0)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error loading device list: %s\", err)\n\t}\n\n\tv.printExportedUser(user, publicKeys, devs)\n\treturn nil\n}\n\nfunc findSubkeys(parentID keybase1.KID, allKeys []keybase1.PublicKey) []keybase1.PublicKey {\n\tret := []keybase1.PublicKey{}\n\tfor _, key := range allKeys {\n\t\tif keybase1.KIDFromString(key.ParentID).Equal(parentID) {\n\t\t\tret = append(ret, key)\n\t\t}\n\t}\n\treturn ret\n}\n\nfunc (v *CmdDumpKeyfamily) printExportedUser(user keybase1.User, publicKeys []keybase1.PublicKey,\n\tdevices []keybase1.Device) error {\n\n\tdui := v.G().UI.GetDumbOutputUI()\n\tif len(publicKeys) == 0 {\n\t\tdui.Printf(\"No public keys.\\n\")\n\t\treturn nil\n\t}\n\tdui.Printf(\"Public keys:\\n\")\n\t\/\/ Keep track of subkeys we print, so that if e.g. a subkey's parent is\n\t\/\/ nonexistent, we can notice that we skipped it.\n\tsubkeysShown := make(map[keybase1.KID]bool)\n\tfor _, key := range publicKeys {\n\t\tif !key.IsSibkey {\n\t\t\t\/\/ Subkeys will be printed under their respective sibkeys.\n\t\t\tcontinue\n\t\t}\n\t\tsubkeys := findSubkeys(key.KID, publicKeys)\n\t\terr := v.printKey(key, subkeys, 1)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, subkey := range subkeys {\n\t\t\tsubkeysShown[subkey.KID] = true\n\t\t}\n\t}\n\t\/\/ Print errors for any subkeys we failed to show.\n\tfor _, key := range publicKeys {\n\t\tif !key.IsSibkey && !subkeysShown[key.KID] {\n\t\t\tv.G().Log.Errorf(\"Dangling subkey: %s\", key.KID)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (v *CmdDumpKeyfamily) printKey(key keybase1.PublicKey, subkeys []keybase1.PublicKey, indent int) error {\n\tif key.KID == \"\" {\n\t\treturn fmt.Errorf(\"Found a key with an empty KID.\")\n\t}\n\teldestStr := \"\"\n\tif key.IsEldest {\n\t\teldestStr = \" (eldest)\"\n\t}\n\tdui := v.G().UI.GetDumbOutputUI()\n\tdui.Printf(\"%s%s%s\\n\", indentSpace(indent), key.KID, eldestStr)\n\tif key.PGPFingerprint != \"\" {\n\t\tdui.Printf(\"%sPGP Fingerprint: %s\\n\", indentSpace(indent+1), libkb.PGPFingerprintFromHexNoError(key.PGPFingerprint).ToQuads())\n\t\tdui.Printf(\"%sPGP Identities:\\n\", indentSpace(indent+1))\n\t\tfor _, identity := range key.PGPIdentities {\n\t\t\tcommentStr := \"\"\n\t\t\tif identity.Comment != \"\" {\n\t\t\t\tcommentStr = fmt.Sprintf(\" (%s)\", identity.Comment)\n\t\t\t}\n\t\t\temailStr := \"\"\n\t\t\tif identity.Email != \"\" {\n\t\t\t\temailStr = fmt.Sprintf(\" <%s>\", identity.Email)\n\t\t\t}\n\t\t\tdui.Printf(\"%s%s%s%s\\n\", indentSpace(indent+2), identity.Username, commentStr, emailStr)\n\t\t}\n\t}\n\tif key.DeviceID != \"\" || key.DeviceType != \"\" || key.DeviceDescription != \"\" {\n\t\tdui.Printf(\"%sDevice:\\n\", indentSpace(indent+1))\n\t\tif key.DeviceID != \"\" {\n\t\t\tdui.Printf(\"%sID: %s\\n\", indentSpace(indent+2), key.DeviceID)\n\t\t}\n\t\tif key.DeviceType != \"\" {\n\t\t\tdui.Printf(\"%sType: %s\\n\", indentSpace(indent+2), key.DeviceType)\n\t\t}\n\t\tif key.DeviceDescription != \"\" {\n\t\t\tdui.Printf(\"%sDescription: %s\\n\", indentSpace(indent+2), key.DeviceDescription)\n\t\t}\n\t}\n\tdui.Printf(\"%sCreated: %s\\n\", indentSpace(indent+1), keybase1.FromTime(key.CTime))\n\tdui.Printf(\"%sExpires: %s\\n\", indentSpace(indent+1), keybase1.FromTime(key.ETime))\n\n\tif len(subkeys) > 0 {\n\t\tdui.Printf(\"%sSubkeys:\\n\", indentSpace(indent+1))\n\t\tfor _, subkey := range subkeys {\n\t\t\tv.printKey(subkey, nil, indent+2)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (v *CmdDumpKeyfamily) GetUsage() libkb.Usage {\n\treturn libkb.Usage{\n\t\tConfig: true,\n\t\tAPI: true,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012, Google Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage tabletserver\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n\n\tlog \"github.com\/golang\/glog\"\n\t\"github.com\/youtube\/vitess\/go\/acl\"\n)\n\nvar (\n\tquerylogzHeader = []byte(`\n\t\t<tr>\n\t\t\t<th>Method<\/th>\n\t\t\t<th>Client<\/th>\n\t\t\t<th>User<\/th>\n\t\t\t<th>Start<\/th>\n\t\t\t<th>End<\/th>\n\t\t\t<th>Duration<\/th>\n\t\t\t<th>MySQL time<\/th>\n\t\t\t<th>Conn wait<\/th>\n\t\t\t<th>Plan<\/th>\n\t\t\t<th>SQL<\/th>\n\t\t\t<th>Queries<\/th>\n\t\t\t<th>Sources<\/th>\n\t\t\t<th>Response Size (Rows)<\/th>\n\t\t\t<th>Cache Hits<\/th>\n\t\t\t<th>Cache Misses<\/th>\n\t\t\t<th>Cache Absent<\/th>\n\t\t\t<th>Cache Invalidations<\/th>\n\t\t<\/tr>\n\t`)\n\tquerylogzFuncMap = template.FuncMap{\n\t\t\"stampMicro\": func(t time.Time) string { return t.Format(time.StampMicro) },\n\t\t\"cssWrappable\": wrappable,\n\t\t\"unquote\": func(s string) string { return strings.Trim(s, \"\\\"\") },\n\t}\n\tquerylogzTmpl = template.Must(template.New(\"example\").Funcs(querylogzFuncMap).Parse(`\n\t\t<tr class=\".ColorLevel\">\n\t\t\t<td>{{.Method}}<\/td>\n\t\t\t<td>{{.RemoteAddr}}<\/td>\n\t\t\t<td>{{.Username}}<\/td>\n\t\t\t<td>{{.StartTime | stampMicro}}<\/td>\n\t\t\t<td>{{.EndTime | stampMicro}}<\/td>\n\t\t\t<td>{{.TotalTime.Seconds}}<\/td>\n\t\t\t<td>{{.MysqlResponseTime.Seconds}}<\/td>\n\t\t\t<td>{{.WaitingForConnection.Seconds}}<\/td>\n\t\t\t<td>{{.PlanType}}<\/td>\n\t\t\t<td>{{.OriginalSql | unquote | cssWrappable}}<\/td>\n\t\t\t<td>{{.NumberOfQueries}}<\/td>\n\t\t\t<td>{{.FmtQuerySources}}<\/td>\n\t\t\t<td>{{.SizeOfResponse}}<\/td>\n\t\t\t<td>{{.CacheHits}}<\/td>\n\t\t\t<td>{{.CacheMisses}}<\/td>\n\t\t\t<td>{{.CacheAbsent}}<\/td>\n\t\t\t<td>{{.CacheInvalidations}}<\/td>\n\t\t<\/tr>\n\t`))\n)\n\nfunc init() {\n\thttp.HandleFunc(\"\/querylogz\", querylogzHandler)\n}\n\n\/\/ querylogzHandler serves a human readable snapshot of the\n\/\/ current query log.\nfunc querylogzHandler(w http.ResponseWriter, r *http.Request) {\n\tif err := acl.CheckAccessHTTP(r, acl.DEBUGGING); err != nil {\n\t\tacl.SendError(w, err)\n\t\treturn\n\t}\n\tch := SqlQueryLogger.Subscribe()\n\tdefer SqlQueryLogger.Unsubscribe(ch)\n\tstartHTMLTable(w)\n\tdefer endHTMLTable(w)\n\tw.Write(querylogzHeader)\n\n\tdeadline := time.After(10 * time.Second)\n\tfor i := 0; i < 300; i++ {\n\t\tselect {\n\t\tcase out := <-ch:\n\t\t\tstats, ok := out.(*SQLQueryStats)\n\t\t\tif !ok {\n\t\t\t\terr := fmt.Errorf(\"Unexpected value in %s: %#v (expecting value of type %T)\", TxLogger.Name, out, &SQLQueryStats{})\n\t\t\t\tio.WriteString(w, `<tr class=\"error\">`)\n\t\t\t\tio.WriteString(w, err.Error())\n\t\t\t\tio.WriteString(w, \"<\/tr>\")\n\t\t\t\tlog.Error(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar level string\n\t\t\tif stats.TotalTime().Seconds() < 0.01 {\n\t\t\t\tlevel = \"low\"\n\t\t\t} else if stats.TotalTime().Seconds() < 0.1 {\n\t\t\t\tlevel = \"medium\"\n\t\t\t} else {\n\t\t\t\tlevel = \"high\"\n\t\t\t}\n\t\t\ttmplData := struct {\n\t\t\t\t*SQLQueryStats\n\t\t\t\tColorLevel string\n\t\t\t}{stats, level}\n\t\t\tquerylogzTmpl.Execute(w, tmplData)\n\t\tcase <-deadline:\n\t\t\treturn\n\t\t}\n\t}\n}\n<commit_msg>Add querylog txid to querylogz output.<commit_after>\/\/ Copyright 2012, Google Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage tabletserver\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n\n\tlog \"github.com\/golang\/glog\"\n\t\"github.com\/youtube\/vitess\/go\/acl\"\n)\n\nvar (\n\tquerylogzHeader = []byte(`\n\t\t<tr>\n\t\t\t<th>Method<\/th>\n\t\t\t<th>Client<\/th>\n\t\t\t<th>User<\/th>\n\t\t\t<th>Start<\/th>\n\t\t\t<th>End<\/th>\n\t\t\t<th>Duration<\/th>\n\t\t\t<th>MySQL time<\/th>\n\t\t\t<th>Conn wait<\/th>\n\t\t\t<th>Plan<\/th>\n\t\t\t<th>SQL<\/th>\n\t\t\t<th>Queries<\/th>\n\t\t\t<th>Sources<\/th>\n\t\t\t<th>Response Size (Rows)<\/th>\n\t\t\t<th>Cache Hits<\/th>\n\t\t\t<th>Cache Misses<\/th>\n\t\t\t<th>Cache Absent<\/th>\n\t\t\t<th>Cache Invalidations<\/th>\n\t\t\t<th>Transaction ID<\/th>\n\t\t<\/tr>\n\t`)\n\tquerylogzFuncMap = template.FuncMap{\n\t\t\"stampMicro\": func(t time.Time) string { return t.Format(time.StampMicro) },\n\t\t\"cssWrappable\": wrappable,\n\t\t\"unquote\": func(s string) string { return strings.Trim(s, \"\\\"\") },\n\t}\n\tquerylogzTmpl = template.Must(template.New(\"example\").Funcs(querylogzFuncMap).Parse(`\n\t\t<tr class=\".ColorLevel\">\n\t\t\t<td>{{.Method}}<\/td>\n\t\t\t<td>{{.RemoteAddr}}<\/td>\n\t\t\t<td>{{.Username}}<\/td>\n\t\t\t<td>{{.StartTime | stampMicro}}<\/td>\n\t\t\t<td>{{.EndTime | stampMicro}}<\/td>\n\t\t\t<td>{{.TotalTime.Seconds}}<\/td>\n\t\t\t<td>{{.MysqlResponseTime.Seconds}}<\/td>\n\t\t\t<td>{{.WaitingForConnection.Seconds}}<\/td>\n\t\t\t<td>{{.PlanType}}<\/td>\n\t\t\t<td>{{.OriginalSql | unquote | cssWrappable}}<\/td>\n\t\t\t<td>{{.NumberOfQueries}}<\/td>\n\t\t\t<td>{{.FmtQuerySources}}<\/td>\n\t\t\t<td>{{.SizeOfResponse}}<\/td>\n\t\t\t<td>{{.CacheHits}}<\/td>\n\t\t\t<td>{{.CacheMisses}}<\/td>\n\t\t\t<td>{{.CacheAbsent}}<\/td>\n\t\t\t<td>{{.CacheInvalidations}}<\/td>\n <td>{{.TransactionID}}<\/td>\n\t\t<\/tr>\n\t`))\n)\n\nfunc init() {\n\thttp.HandleFunc(\"\/querylogz\", querylogzHandler)\n}\n\n\/\/ querylogzHandler serves a human readable snapshot of the\n\/\/ current query log.\nfunc querylogzHandler(w http.ResponseWriter, r *http.Request) {\n\tif err := acl.CheckAccessHTTP(r, acl.DEBUGGING); err != nil {\n\t\tacl.SendError(w, err)\n\t\treturn\n\t}\n\tch := SqlQueryLogger.Subscribe()\n\tdefer SqlQueryLogger.Unsubscribe(ch)\n\tstartHTMLTable(w)\n\tdefer endHTMLTable(w)\n\tw.Write(querylogzHeader)\n\n\tdeadline := time.After(10 * time.Second)\n\tfor i := 0; i < 300; i++ {\n\t\tselect {\n\t\tcase out := <-ch:\n\t\t\tstats, ok := out.(*SQLQueryStats)\n\t\t\tif !ok {\n\t\t\t\terr := fmt.Errorf(\"Unexpected value in %s: %#v (expecting value of type %T)\", TxLogger.Name, out, &SQLQueryStats{})\n\t\t\t\tio.WriteString(w, `<tr class=\"error\">`)\n\t\t\t\tio.WriteString(w, err.Error())\n\t\t\t\tio.WriteString(w, \"<\/tr>\")\n\t\t\t\tlog.Error(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tvar level string\n\t\t\tif stats.TotalTime().Seconds() < 0.01 {\n\t\t\t\tlevel = \"low\"\n\t\t\t} else if stats.TotalTime().Seconds() < 0.1 {\n\t\t\t\tlevel = \"medium\"\n\t\t\t} else {\n\t\t\t\tlevel = \"high\"\n\t\t\t}\n\t\t\ttmplData := struct {\n\t\t\t\t*SQLQueryStats\n\t\t\t\tColorLevel string\n\t\t\t}{stats, level}\n\t\t\tquerylogzTmpl.Execute(w, tmplData)\n\t\tcase <-deadline:\n\t\t\treturn\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package acceptance_test\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\tacceptance \"github.com\/cloudfoundry\/bosh-bootloader\/acceptance-tests\"\n\t\"github.com\/cloudfoundry\/bosh-bootloader\/storage\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n)\n\nvar _ = Describe(\"bbl latest-error\", func() {\n\tvar (\n\t\ttempDirectory string\n\t)\n\n\tBeforeEach(func() {\n\t\tacceptance.SkipUnless(\"latest-error\")\n\n\t\tvar err error\n\t\ttempDirectory, err = ioutil.TempDir(\"\", \"\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tstate := []byte(`{\n\t\t\t\"version\": 8,\n\t\t\t\"noDirector\": true,\n\t\t\t\"tfState\": \"some-tf-state\",\n\t\t\t\"latestTFOutput\": \"some terraform output\"\n\t\t}`)\n\t\terr = ioutil.WriteFile(filepath.Join(tempDirectory, storage.StateFileName), state, os.ModePerm)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t})\n\n\tIt(\"prints the terraform output from the last command\", func() {\n\t\targs := []string{\n\t\t\t\"--state-dir\", tempDirectory,\n\t\t\t\"latest-error\",\n\t\t}\n\n\t\tcmd := exec.Command(pathToBBL, args...)\n\t\tsession, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tEventually(session, 10*time.Second).Should(gexec.Exit(1))\n\n\t\tExpect(string(session.Out.Contents())).To(ContainSubstring(\"some terraform output\"))\n\t})\n})\n<commit_msg>Fix exit code in latest error test.<commit_after>package acceptance_test\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\tacceptance \"github.com\/cloudfoundry\/bosh-bootloader\/acceptance-tests\"\n\t\"github.com\/cloudfoundry\/bosh-bootloader\/storage\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n)\n\nvar _ = Describe(\"bbl latest-error\", func() {\n\tvar (\n\t\ttempDirectory string\n\t)\n\n\tBeforeEach(func() {\n\t\tacceptance.SkipUnless(\"latest-error\")\n\n\t\tvar err error\n\t\ttempDirectory, err = ioutil.TempDir(\"\", \"\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tstate := []byte(`{\n\t\t\t\"version\": 8,\n\t\t\t\"noDirector\": true,\n\t\t\t\"tfState\": \"some-tf-state\",\n\t\t\t\"latestTFOutput\": \"some terraform output\"\n\t\t}`)\n\t\terr = ioutil.WriteFile(filepath.Join(tempDirectory, storage.StateFileName), state, os.ModePerm)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t})\n\n\tIt(\"prints the terraform output from the last command\", func() {\n\t\targs := []string{\n\t\t\t\"--state-dir\", tempDirectory,\n\t\t\t\"latest-error\",\n\t\t}\n\n\t\tcmd := exec.Command(pathToBBL, args...)\n\t\tsession, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tEventually(session, 10*time.Second).Should(gexec.Exit(0))\n\n\t\tExpect(string(session.Out.Contents())).To(ContainSubstring(\"some terraform output\"))\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>\/*\n\naufs driver directory structure\n\n.\n├── layers \/\/ Metadata of layers\n│   ├── 1\n│   ├── 2\n│   └── 3\n├── diffs \/\/ Content of the layer\n│   ├── 1 \/\/ Contains layers that need to be mounted for the id\n│   ├── 2\n│   └── 3\n└── mnt \/\/ Mount points for the rw layers to be mounted\n ├── 1\n ├── 2\n └── 3\n\n*\/\n\npackage aufslimit\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/dotcloud\/docker\/archive\"\n\t\"github.com\/dotcloud\/docker\/graphdriver\"\n\t\"github.com\/dotcloud\/docker\/utils\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"strings\"\n)\n\nfunc init() {\n\tgraphdriver.Register(\"aufslimit\", Init)\n}\n\ntype Driver struct {\n\troot string\n}\n\n\/\/ New returns a new AUFS driver.\n\/\/ An error is returned if AUFS is not supported.\nfunc Init(root string) (graphdriver.Driver, error) {\n\t\/\/ Try to load the aufs kernel module\n\tif err := supportsAufs(); err != nil {\n\t\treturn nil, err\n\t}\n\tpaths := []string{\n\t\t\"mnt\",\n\t\t\"diff\",\n\t\t\"layers\",\n\t}\n\n\t\/\/ Create the root aufs driver dir and return\n\t\/\/ if it already exists\n\t\/\/ If not populate the dir structure\n\tif err := os.MkdirAll(root, 0755); err != nil {\n\t\tif os.IsExist(err) {\n\t\t\treturn &Driver{root}, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tfor _, p := range paths {\n\t\tif err := os.MkdirAll(path.Join(root, p), 0755); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn &Driver{root}, nil\n}\n\n\/\/ Return a nil error if the kernel supports aufs\n\/\/ We cannot modprobe because inside dind modprobe fails\n\/\/ to run\nfunc supportsAufs() error {\n\t\/\/ We can try to modprobe aufs first before looking at\n\t\/\/ proc\/filesystems for when aufs is supported\n\texec.Command(\"modprobe\", \"aufs\").Run()\n\n\tf, err := os.Open(\"\/proc\/filesystems\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\ts := bufio.NewScanner(f)\n\tfor s.Scan() {\n\t\tif strings.Contains(s.Text(), \"aufs\") {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"AUFS was not found in \/proc\/filesystems\")\n}\n\nfunc (a Driver) rootPath() string {\n\treturn a.root\n}\n\nfunc (Driver) String() string {\n\treturn \"aufslimit\"\n}\n\nfunc (a Driver) Status() [][2]string {\n\tids, _ := loadIds(path.Join(a.rootPath(), \"layers\"))\n\treturn [][2]string{\n\t\t{\"Root Dir\", a.rootPath()},\n\t\t{\"Dirs\", fmt.Sprintf(\"%d\", len(ids))},\n\t}\n}\n\n\/\/ Exists returns true if the given id is registered with\n\/\/ this driver\nfunc (a Driver) Exists(id string) bool {\n\tif _, err := os.Lstat(path.Join(a.rootPath(), \"layers\", id)); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\n\/\/ Three folders are created for each id\n\/\/ mnt, layers, and diff\nfunc (a *Driver) Create(id, parent string) error {\n\tif err := a.createDirsFor(id); err != nil {\n\t\treturn err\n\t}\n\t\/\/ Write the layers metadata\n\tf, err := os.Create(path.Join(a.rootPath(), \"layers\", id))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tif parent != \"\" {\n\t\tids, err := getParentIds(a.rootPath(), parent)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif _, err := fmt.Fprintln(f, parent); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, i := range ids {\n\t\t\tif _, err := fmt.Fprintln(f, i); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *Driver) CreateWithQuota(id, parent string, quota int64) error {\n\tlog.Printf(\"We should limit this container to DiskQuota: %d\", quota)\n\treturn a.Create(id, parent)\n}\n\nfunc (a *Driver) createDirsFor(id string) error {\n\tpaths := []string{\n\t\t\"mnt\",\n\t\t\"diff\",\n\t}\n\n\tfor _, p := range paths {\n\t\tif err := os.MkdirAll(path.Join(a.rootPath(), p, id), 0755); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Unmount and remove the dir information\nfunc (a *Driver) Remove(id string) error {\n\t\/\/ Make sure the dir is umounted first\n\tif err := a.unmount(id); err != nil {\n\t\treturn err\n\t}\n\ttmpDirs := []string{\n\t\t\"mnt\",\n\t\t\"diff\",\n\t}\n\n\t\/\/ Remove the dirs atomically\n\tfor _, p := range tmpDirs {\n\t\t\/\/ We need to use a temp dir in the same dir as the driver so Rename\n\t\t\/\/ does not fall back to the slow copy if \/tmp and the driver dir\n\t\t\/\/ are on different devices\n\t\ttmp := path.Join(a.rootPath(), \"tmp\", p, id)\n\t\tif err := os.MkdirAll(tmp, 0755); err != nil {\n\t\t\treturn err\n\t\t}\n\t\trealPath := path.Join(a.rootPath(), p, id)\n\t\tif err := os.Rename(realPath, tmp); err != nil && !os.IsNotExist(err) {\n\t\t\treturn err\n\t\t}\n\t\tdefer os.RemoveAll(tmp)\n\t}\n\n\t\/\/ Remove the layers file for the id\n\tif err := os.Remove(path.Join(a.rootPath(), \"layers\", id)); err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Return the rootfs path for the id\n\/\/ This will mount the dir at it's given path\nfunc (a *Driver) Get(id string) (string, error) {\n\tids, err := getParentIds(a.rootPath(), id)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn \"\", err\n\t\t}\n\t\tids = []string{}\n\t}\n\n\t\/\/ If a dir does not have a parent ( no layers )do not try to mount\n\t\/\/ just return the diff path to the data\n\tout := path.Join(a.rootPath(), \"diff\", id)\n\tif len(ids) > 0 {\n\t\tout = path.Join(a.rootPath(), \"mnt\", id)\n\t\tif err := a.mount(id); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn out, nil\n}\n\n\/\/ Returns an archive of the contents for the id\nfunc (a *Driver) Diff(id string) (archive.Archive, error) {\n\treturn archive.TarFilter(path.Join(a.rootPath(), \"diff\", id), &archive.TarOptions{\n\t\tRecursive: true,\n\t\tCompression: archive.Uncompressed,\n\t})\n}\n\nfunc (a *Driver) ApplyDiff(id string, diff archive.Archive) error {\n\treturn archive.Untar(diff, path.Join(a.rootPath(), \"diff\", id), nil)\n}\n\n\/\/ Returns the size of the contents for the id\nfunc (a *Driver) DiffSize(id string) (int64, error) {\n\treturn utils.TreeSize(path.Join(a.rootPath(), \"diff\", id))\n}\n\nfunc (a *Driver) Changes(id string) ([]archive.Change, error) {\n\tlayers, err := a.getParentLayerPaths(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn archive.Changes(layers, path.Join(a.rootPath(), \"diff\", id))\n}\n\nfunc (a *Driver) getParentLayerPaths(id string) ([]string, error) {\n\tparentIds, err := getParentIds(a.rootPath(), id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(parentIds) == 0 {\n\t\treturn nil, fmt.Errorf(\"Dir %s does not have any parent layers\", id)\n\t}\n\tlayers := make([]string, len(parentIds))\n\n\t\/\/ Get the diff paths for all the parent ids\n\tfor i, p := range parentIds {\n\t\tlayers[i] = path.Join(a.rootPath(), \"diff\", p)\n\t}\n\treturn layers, nil\n}\n\nfunc (a *Driver) mount(id string) error {\n\t\/\/ If the id is mounted or we get an error return\n\tif mounted, err := a.mounted(id); err != nil || mounted {\n\t\treturn err\n\t}\n\n\tvar (\n\t\ttarget = path.Join(a.rootPath(), \"mnt\", id)\n\t\trw = path.Join(a.rootPath(), \"diff\", id)\n\t)\n\n\tlayers, err := a.getParentLayerPaths(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := a.aufsMount(layers, rw, target); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (a *Driver) unmount(id string) error {\n\tif mounted, err := a.mounted(id); err != nil || !mounted {\n\t\treturn err\n\t}\n\ttarget := path.Join(a.rootPath(), \"mnt\", id)\n\treturn Unmount(target)\n}\n\nfunc (a *Driver) mounted(id string) (bool, error) {\n\ttarget := path.Join(a.rootPath(), \"mnt\", id)\n\treturn Mounted(target)\n}\n\n\/\/ During cleanup aufs needs to unmount all mountpoints\nfunc (a *Driver) Cleanup() error {\n\tids, err := loadIds(path.Join(a.rootPath(), \"layers\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, id := range ids {\n\t\tif err := a.unmount(id); err != nil {\n\t\t\tutils.Errorf(\"Unmounting %s: %s\", utils.TruncateID(id), err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *Driver) aufsMount(ro []string, rw, target string) error {\n\trwBranch := fmt.Sprintf(\"%v=rw\", rw)\n\troBranches := \"\"\n\tfor _, layer := range ro {\n\t\troBranches += fmt.Sprintf(\"%v=ro+wh:\", layer)\n\t}\n\tbranches := fmt.Sprintf(\"br:%v:%v,xino=\/dev\/shm\/aufs.xino\", rwBranch, roBranches)\n\n\t\/\/if error, try to load aufs kernel module\n\tif err := mount(\"none\", target, \"aufs\", 0, branches); err != nil {\n\t\tlog.Printf(\"Kernel does not support AUFS, trying to load the AUFS module with modprobe...\")\n\t\tif err := exec.Command(\"modprobe\", \"aufs\").Run(); err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to load the AUFS module\")\n\t\t}\n\t\tlog.Printf(\"...module loaded.\")\n\t\tif err := mount(\"none\", target, \"aufs\", 0, branches); err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to mount using aufs %s\", err)\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>limit and unlimit containers<commit_after>\/*\n\naufs driver directory structure\n\n.\n├── layers \/\/ Metadata of layers\n│   ├── 1\n│   ├── 2\n│   └── 3\n├── diffs \/\/ Content of the layer\n│   ├── 1 \/\/ Contains layers that need to be mounted for the id\n│   ├── 2\n│   └── 3\n└── mnt \/\/ Mount points for the rw layers to be mounted\n ├── 1\n ├── 2\n └── 3\n\n*\/\n\npackage aufslimit\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/dotcloud\/docker\/archive\"\n\t\"github.com\/dotcloud\/docker\/graphdriver\"\n\t\"github.com\/dotcloud\/docker\/utils\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"strings\"\n\t\"strconv\"\n)\n\nconst containerQuotaPath string = \"containers_quota\"\nconst dev0 string = \"\/dev\/zero\"\nconst blkSize int64 = 4 \/\/ MB\nconst extension string = \"ext3\"\nconst driverPath string = \"diff\"\n\nfunc init() {\n\tgraphdriver.Register(\"aufslimit\", Init)\n}\n\ntype Driver struct {\n\troot string\n}\n\n\/\/ New returns a new AUFS driver.\n\/\/ An error is returned if AUFS is not supported.\nfunc Init(root string) (graphdriver.Driver, error) {\n\t\/\/ Try to load the aufs kernel module\n\tif err := supportsAufs(); err != nil {\n\t\treturn nil, err\n\t}\n\tpaths := []string{\n\t\t\"mnt\",\n\t\t\"diff\",\n\t\t\"layers\",\n\t}\n\n\t\/\/ Create the root aufs driver dir and return\n\t\/\/ if it already exists\n\t\/\/ If not populate the dir structure\n\tif err := os.MkdirAll(root, 0755); err != nil {\n\t\tif os.IsExist(err) {\n\t\t\treturn &Driver{root}, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tfor _, p := range paths {\n\t\tif err := os.MkdirAll(path.Join(root, p), 0755); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn &Driver{root}, nil\n}\n\n\/\/ Return a nil error if the kernel supports aufs\n\/\/ We cannot modprobe because inside dind modprobe fails\n\/\/ to run\nfunc supportsAufs() error {\n\t\/\/ We can try to modprobe aufs first before looking at\n\t\/\/ proc\/filesystems for when aufs is supported\n\texec.Command(\"modprobe\", \"aufs\").Run()\n\n\tf, err := os.Open(\"\/proc\/filesystems\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\ts := bufio.NewScanner(f)\n\tfor s.Scan() {\n\t\tif strings.Contains(s.Text(), \"aufs\") {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn fmt.Errorf(\"AUFS was not found in \/proc\/filesystems\")\n}\n\nfunc (a Driver) rootPath() string {\n\treturn a.root\n}\n\nfunc (Driver) String() string {\n\treturn \"aufslimit\"\n}\n\nfunc (a Driver) Status() [][2]string {\n\tids, _ := loadIds(path.Join(a.rootPath(), \"layers\"))\n\treturn [][2]string{\n\t\t{\"Root Dir\", a.rootPath()},\n\t\t{\"Dirs\", fmt.Sprintf(\"%d\", len(ids))},\n\t}\n}\n\n\/\/ Exists returns true if the given id is registered with\n\/\/ this driver\nfunc (a Driver) Exists(id string) bool {\n\tif _, err := os.Lstat(path.Join(a.rootPath(), \"layers\", id)); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\n\/\/ Three folders are created for each id\n\/\/ mnt, layers, and diff\nfunc (a *Driver) Create(id, parent string) error {\n\tif err := a.createDirsFor(id); err != nil {\n\t\treturn err\n\t}\n\t\/\/ Write the layers metadata\n\tf, err := os.Create(path.Join(a.rootPath(), \"layers\", id))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tif parent != \"\" {\n\t\tids, err := getParentIds(a.rootPath(), parent)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif _, err := fmt.Fprintln(f, parent); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, i := range ids {\n\t\t\tif _, err := fmt.Fprintln(f, i); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *Driver) CreateWithQuota(id, parent string, quota int64) error {\n\tlog.Printf(\"Creating with quota %d\", quota)\n\tif err := a.Create(id, parent); err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Limiting container with quota %d\", quota)\n\tif err := a.limitContainer(id, quota); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (a *Driver) createDirsFor(id string) error {\n\tpaths := []string{\n\t\t\"mnt\",\n\t\t\"diff\",\n\t}\n\n\tfor _, p := range paths {\n\t\tif err := os.MkdirAll(path.Join(a.rootPath(), p, id), 0755); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Unmount and remove the dir information\nfunc (a *Driver) Remove(id string) error {\n\t\/\/ Make sure the dir is umounted first\n\tif err := a.unmount(id); err != nil {\n\t\treturn err\n\t}\n\tif err := a.unLimitContainer(id); err != nil {\n\t\treturn err\n\t}\n\n\ttmpDirs := []string{\n\t\t\"mnt\",\n\t\t\"diff\",\n\t}\n\n\t\/\/ Remove the dirs atomically\n\tfor _, p := range tmpDirs {\n\t\t\/\/ We need to use a temp dir in the same dir as the driver so Rename\n\t\t\/\/ does not fall back to the slow copy if \/tmp and the driver dir\n\t\t\/\/ are on different devices\n\t\ttmp := path.Join(a.rootPath(), \"tmp\", p, id)\n\t\tif err := os.MkdirAll(tmp, 0755); err != nil {\n\t\t\treturn err\n\t\t}\n\t\trealPath := path.Join(a.rootPath(), p, id)\n\t\tif err := os.Rename(realPath, tmp); err != nil && !os.IsNotExist(err) {\n\t\t\treturn err\n\t\t}\n\t\tdefer os.RemoveAll(tmp)\n\t}\n\n\t\/\/ Remove the layers file for the id\n\tif err := os.Remove(path.Join(a.rootPath(), \"layers\", id)); err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Return the rootfs path for the id\n\/\/ This will mount the dir at it's given path\nfunc (a *Driver) Get(id string) (string, error) {\n\tids, err := getParentIds(a.rootPath(), id)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn \"\", err\n\t\t}\n\t\tids = []string{}\n\t}\n\n\t\/\/ If a dir does not have a parent ( no layers )do not try to mount\n\t\/\/ just return the diff path to the data\n\tout := path.Join(a.rootPath(), \"diff\", id)\n\tif len(ids) > 0 {\n\t\tout = path.Join(a.rootPath(), \"mnt\", id)\n\t\tif err := a.mount(id); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn out, nil\n}\n\n\/\/ Returns an archive of the contents for the id\nfunc (a *Driver) Diff(id string) (archive.Archive, error) {\n\treturn archive.TarFilter(path.Join(a.rootPath(), \"diff\", id), &archive.TarOptions{\n\t\tRecursive: true,\n\t\tCompression: archive.Uncompressed,\n\t})\n}\n\nfunc (a *Driver) ApplyDiff(id string, diff archive.Archive) error {\n\treturn archive.Untar(diff, path.Join(a.rootPath(), \"diff\", id), nil)\n}\n\n\/\/ Returns the size of the contents for the id\nfunc (a *Driver) DiffSize(id string) (int64, error) {\n\treturn utils.TreeSize(path.Join(a.rootPath(), \"diff\", id))\n}\n\nfunc (a *Driver) Changes(id string) ([]archive.Change, error) {\n\tlayers, err := a.getParentLayerPaths(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn archive.Changes(layers, path.Join(a.rootPath(), \"diff\", id))\n}\n\nfunc (a *Driver) getParentLayerPaths(id string) ([]string, error) {\n\tparentIds, err := getParentIds(a.rootPath(), id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(parentIds) == 0 {\n\t\treturn nil, fmt.Errorf(\"Dir %s does not have any parent layers\", id)\n\t}\n\tlayers := make([]string, len(parentIds))\n\n\t\/\/ Get the diff paths for all the parent ids\n\tfor i, p := range parentIds {\n\t\tlayers[i] = path.Join(a.rootPath(), \"diff\", p)\n\t}\n\treturn layers, nil\n}\n\nfunc (a *Driver) mount(id string) error {\n\t\/\/ If the id is mounted or we get an error return\n\tif mounted, err := a.mounted(id); err != nil || mounted {\n\t\treturn err\n\t}\n\n\tvar (\n\t\ttarget = path.Join(a.rootPath(), \"mnt\", id)\n\t\trw = path.Join(a.rootPath(), \"diff\", id)\n\t)\n\n\tlayers, err := a.getParentLayerPaths(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := a.aufsMount(layers, rw, target); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (a *Driver) unmount(id string) error {\n\tif mounted, err := a.mounted(id); err != nil || !mounted {\n\t\treturn err\n\t}\n\ttarget := path.Join(a.rootPath(), \"mnt\", id)\n\treturn Unmount(target)\n}\n\nfunc (a *Driver) mounted(id string) (bool, error) {\n\ttarget := path.Join(a.rootPath(), \"mnt\", id)\n\treturn Mounted(target)\n}\n\n\/\/ During cleanup aufs needs to unmount all mountpoints\nfunc (a *Driver) Cleanup() error {\n\tids, err := loadIds(path.Join(a.rootPath(), \"layers\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, id := range ids {\n\t\tif err := a.unmount(id); err != nil {\n\t\t\tutils.Errorf(\"Unmounting %s: %s\", utils.TruncateID(id), err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *Driver) aufsMount(ro []string, rw, target string) error {\n\trwBranch := fmt.Sprintf(\"%v=rw\", rw)\n\troBranches := \"\"\n\tfor _, layer := range ro {\n\t\troBranches += fmt.Sprintf(\"%v=ro+wh:\", layer)\n\t}\n\tbranches := fmt.Sprintf(\"br:%v:%v,xino=\/dev\/shm\/aufs.xino\", rwBranch, roBranches)\n\n\t\/\/if error, try to load aufs kernel module\n\tif err := mount(\"none\", target, \"aufs\", 0, branches); err != nil {\n\t\tlog.Printf(\"Kernel does not support AUFS, trying to load the AUFS module with modprobe...\")\n\t\tif err := exec.Command(\"modprobe\", \"aufs\").Run(); err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to load the AUFS module\")\n\t\t}\n\t\tlog.Printf(\"...module loaded.\")\n\t\tif err := mount(\"none\", target, \"aufs\", 0, branches); err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to mount using aufs %s\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *Driver) limitContainer(id string, quota int64) error {\n\n \/\/ Make sure container's quota dir exists\n if err := os.MkdirAll(path.Join(a.rootPath(), containerQuotaPath), 0755); err != nil {\n return err\n }\n\n containerQuotaFile := path.Join(a.rootPath(), containerQuotaPath, id) + \".\" + extension\n containerFilesystem := path.Join(a.rootPath(), driverPath, id)\n\n log.Printf(\"Executing dd...\")\n cmd := \"dd\"\n ifParam := \"if=\" + dev0 \n ofParam := \"of=\" + containerQuotaFile\n blkSizeBytes := blkSize * 1024 * 1024\n bsParam := \"bs=\" + strconv.FormatInt(blkSizeBytes, 10)\n blkNumber := quota \/ blkSize\n countParam := \"count=\" + strconv.FormatInt(blkNumber, 10)\n ddCmd := exec.Command(cmd, ifParam, ofParam, bsParam, countParam)\n err := ddCmd.Run()\n if err != nil {\n return err\n }\n\n log.Printf(\"Executing mkfs...\")\n cmd = \"\/sbin\/mkfs\"\n opt1 := \"-t\" \n opt2 := \"-q\"\n opt3 := \"-F\"\n mkfsCmd := exec.Command(cmd, opt1, extension, opt2, containerQuotaFile, opt3)\n err = mkfsCmd.Run()\n if err != nil {\n return err\n }\n\n log.Printf(\"Executing mount -o loop...\")\n cmd = \"mount\"\n opt1 = \"-o\" \n opt2 = \"loop,rw,usrquota,grpquota\"\n mountCmd := exec.Command(cmd, opt1, opt2, containerQuotaFile, containerFilesystem)\n err = mountCmd.Run()\n if err != nil {\n return err\n }\n return nil\n}\n\nfunc (a *Driver) unLimitContainer(id string) error {\n\n containerFilesystem := path.Join(a.rootPath(), driverPath, id)\n containerQuotaFile := path.Join(a.rootPath(), containerQuotaPath, id) + \".\" + extension\n\n log.Printf(\"Executing umount...\")\n cmd := \"umount\"\n opt1 := \"-l\" \n umountCmd := exec.Command(cmd, opt1, containerFilesystem)\n err := umountCmd.Run()\n if err != nil {\n \tlog.Printf(\"Error unmounting %s (probably a -init folder)\", id)\n return nil\n }\n\n log.Printf(\"Executing rm...\")\n cmd = \"rm\"\n opt1 = \"-f\" \n rmQuotaCmd := exec.Command(cmd, opt1, containerQuotaFile)\n err = rmQuotaCmd.Run()\n if err != nil {\n return err\n }\n return nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cache\n\nimport (\n\t\"errors\"\n\t\"sync\"\n\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n)\n\n\/\/ PopProcessFunc is passed to Pop() method of Queue interface.\n\/\/ It is supposed to process the accumulator popped from the queue.\ntype PopProcessFunc func(interface{}) error\n\n\/\/ ErrRequeue may be returned by a PopProcessFunc to safely requeue\n\/\/ the current item. The value of Err will be returned from Pop.\ntype ErrRequeue struct {\n\t\/\/ Err is returned by the Pop function\n\tErr error\n}\n\n\/\/ ErrFIFOClosed used when FIFO is closed\nvar ErrFIFOClosed = errors.New(\"DeltaFIFO: manipulating with closed queue\")\n\nfunc (e ErrRequeue) Error() string {\n\tif e.Err == nil {\n\t\treturn \"the popped item should be requeued without returning an error\"\n\t}\n\treturn e.Err.Error()\n}\n\n\/\/ Queue extends Store with a collection of Store keys to \"process\".\n\/\/ Every Add, Update, or Delete may put the object's key in that collection.\n\/\/ A Queue has a way to derive the corresponding key given an accumulator.\n\/\/ A Queue can be accessed concurrently from multiple goroutines.\n\/\/ A Queue can be \"closed\", after which Pop operations return an error.\ntype Queue interface {\n\tStore\n\n\t\/\/ Pop blocks until there is at least one key to process or the\n\t\/\/ Queue is closed. In the latter case Pop returns with an error.\n\t\/\/ In the former case Pop atomically picks one key to process,\n\t\/\/ removes that (key, accumulator) association from the Store, and\n\t\/\/ processes the accumulator. Pop returns the accumulator that\n\t\/\/ was processed and the result of processing. The PopProcessFunc\n\t\/\/ may return an ErrRequeue{inner} and in this case Pop will (a)\n\t\/\/ return that (key, accumulator) association to the Queue as part\n\t\/\/ of the atomic processing and (b) return the inner error from\n\t\/\/ Pop.\n\tPop(PopProcessFunc) (interface{}, error)\n\n\t\/\/ AddIfNotPresent puts the given accumulator into the Queue (in\n\t\/\/ association with the accumulator's key) if and only if that key\n\t\/\/ is not already associated with a non-empty accumulator.\n\tAddIfNotPresent(interface{}) error\n\n\t\/\/ HasSynced returns true if the first batch of keys have all been\n\t\/\/ popped. The first batch of keys are those of the first Replace\n\t\/\/ operation if that happened before any Add, Update, or Delete;\n\t\/\/ otherwise the first batch is empty.\n\tHasSynced() bool\n\n\t\/\/ Close the queue\n\tClose()\n}\n\n\/\/ Pop is helper function for popping from Queue.\n\/\/ WARNING: Do NOT use this function in non-test code to avoid races\n\/\/ unless you really really really really know what you are doing.\nfunc Pop(queue Queue) interface{} {\n\tvar result interface{}\n\tqueue.Pop(func(obj interface{}) error {\n\t\tresult = obj\n\t\treturn nil\n\t})\n\treturn result\n}\n\n\/\/ FIFO is a Queue in which (a) each accumulator is simply the most\n\/\/ recently provided object and (b) the collection of keys to process\n\/\/ is a FIFO. The accumulators all start out empty, and deleting an\n\/\/ object from its accumulator empties the accumulator. The Resync\n\/\/ operation is a no-op.\n\/\/\n\/\/ Thus: if multiple adds\/updates of a single object happen while that\n\/\/ object's key is in the queue before it has been processed then it\n\/\/ will only be processed once, and when it is processed the most\n\/\/ recent version will be processed. This can't be done with a channel\n\/\/\n\/\/ FIFO solves this use case:\n\/\/ * You want to process every object (exactly) once.\n\/\/ * You want to process the most recent version of the object when you process it.\n\/\/ * You do not want to process deleted objects, they should be removed from the queue.\n\/\/ * You do not want to periodically reprocess objects.\n\/\/ Compare with DeltaFIFO for other use cases.\ntype FIFO struct {\n\tlock sync.RWMutex\n\tcond sync.Cond\n\t\/\/ We depend on the property that every key in `items` is also in `queue`\n\titems map[string]interface{}\n\tqueue []string\n\n\t\/\/ populated is true if the first batch of items inserted by Replace() has been populated\n\t\/\/ or Delete\/Add\/Update was called first.\n\tpopulated bool\n\t\/\/ initialPopulationCount is the number of items inserted by the first call of Replace()\n\tinitialPopulationCount int\n\n\t\/\/ keyFunc is used to make the key used for queued item insertion and retrieval, and\n\t\/\/ should be deterministic.\n\tkeyFunc KeyFunc\n\n\t\/\/ Indication the queue is closed.\n\t\/\/ Used to indicate a queue is closed so a control loop can exit when a queue is empty.\n\t\/\/ Currently, not used to gate any of CRED operations.\n\tclosed bool\n\tclosedLock sync.Mutex\n}\n\nvar (\n\t_ = Queue(&FIFO{}) \/\/ FIFO is a Queue\n)\n\n\/\/ Close the queue.\nfunc (f *FIFO) Close() {\n\tf.closedLock.Lock()\n\tdefer f.closedLock.Unlock()\n\tf.closed = true\n\tf.cond.Broadcast()\n}\n\n\/\/ HasSynced returns true if an Add\/Update\/Delete\/AddIfNotPresent are called first,\n\/\/ or an Update called first but the first batch of items inserted by Replace() has been popped\nfunc (f *FIFO) HasSynced() bool {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\treturn f.populated && f.initialPopulationCount == 0\n}\n\n\/\/ Add inserts an item, and puts it in the queue. The item is only enqueued\n\/\/ if it doesn't already exist in the set.\nfunc (f *FIFO) Add(obj interface{}) error {\n\tid, err := f.keyFunc(obj)\n\tif err != nil {\n\t\treturn KeyError{obj, err}\n\t}\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\tf.populated = true\n\tif _, exists := f.items[id]; !exists {\n\t\tf.queue = append(f.queue, id)\n\t}\n\tf.items[id] = obj\n\tf.cond.Broadcast()\n\treturn nil\n}\n\n\/\/ AddIfNotPresent inserts an item, and puts it in the queue. If the item is already\n\/\/ present in the set, it is neither enqueued nor added to the set.\n\/\/\n\/\/ This is useful in a single producer\/consumer scenario so that the consumer can\n\/\/ safely retry items without contending with the producer and potentially enqueueing\n\/\/ stale items.\nfunc (f *FIFO) AddIfNotPresent(obj interface{}) error {\n\tid, err := f.keyFunc(obj)\n\tif err != nil {\n\t\treturn KeyError{obj, err}\n\t}\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\tf.addIfNotPresent(id, obj)\n\treturn nil\n}\n\n\/\/ addIfNotPresent assumes the fifo lock is already held and adds the provided\n\/\/ item to the queue under id if it does not already exist.\nfunc (f *FIFO) addIfNotPresent(id string, obj interface{}) {\n\tf.populated = true\n\tif _, exists := f.items[id]; exists {\n\t\treturn\n\t}\n\n\tf.queue = append(f.queue, id)\n\tf.items[id] = obj\n\tf.cond.Broadcast()\n}\n\n\/\/ Update is the same as Add in this implementation.\nfunc (f *FIFO) Update(obj interface{}) error {\n\treturn f.Add(obj)\n}\n\n\/\/ Delete removes an item. It doesn't add it to the queue, because\n\/\/ this implementation assumes the consumer only cares about the objects,\n\/\/ not the order in which they were created\/added.\nfunc (f *FIFO) Delete(obj interface{}) error {\n\tid, err := f.keyFunc(obj)\n\tif err != nil {\n\t\treturn KeyError{obj, err}\n\t}\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\tf.populated = true\n\tdelete(f.items, id)\n\treturn err\n}\n\n\/\/ List returns a list of all the items.\nfunc (f *FIFO) List() []interface{} {\n\tf.lock.RLock()\n\tdefer f.lock.RUnlock()\n\tlist := make([]interface{}, 0, len(f.items))\n\tfor _, item := range f.items {\n\t\tlist = append(list, item)\n\t}\n\treturn list\n}\n\n\/\/ ListKeys returns a list of all the keys of the objects currently\n\/\/ in the FIFO.\nfunc (f *FIFO) ListKeys() []string {\n\tf.lock.RLock()\n\tdefer f.lock.RUnlock()\n\tlist := make([]string, 0, len(f.items))\n\tfor key := range f.items {\n\t\tlist = append(list, key)\n\t}\n\treturn list\n}\n\n\/\/ Get returns the requested item, or sets exists=false.\nfunc (f *FIFO) Get(obj interface{}) (item interface{}, exists bool, err error) {\n\tkey, err := f.keyFunc(obj)\n\tif err != nil {\n\t\treturn nil, false, KeyError{obj, err}\n\t}\n\treturn f.GetByKey(key)\n}\n\n\/\/ GetByKey returns the requested item, or sets exists=false.\nfunc (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) {\n\tf.lock.RLock()\n\tdefer f.lock.RUnlock()\n\titem, exists = f.items[key]\n\treturn item, exists, nil\n}\n\n\/\/ IsClosed checks if the queue is closed\nfunc (f *FIFO) IsClosed() bool {\n\tf.closedLock.Lock()\n\tdefer f.closedLock.Unlock()\n\tif f.closed {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ Pop waits until an item is ready and processes it. If multiple items are\n\/\/ ready, they are returned in the order in which they were added\/updated.\n\/\/ The item is removed from the queue (and the store) before it is processed,\n\/\/ so if you don't successfully process it, it should be added back with\n\/\/ AddIfNotPresent(). process function is called under lock, so it is safe\n\/\/ update data structures in it that need to be in sync with the queue.\nfunc (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\tfor {\n\t\tfor len(f.queue) == 0 {\n\t\t\t\/\/ When the queue is empty, invocation of Pop() is blocked until new item is enqueued.\n\t\t\t\/\/ When Close() is called, the f.closed is set and the condition is broadcasted.\n\t\t\t\/\/ Which causes this loop to continue and return from the Pop().\n\t\t\tif f.IsClosed() {\n\t\t\t\treturn nil, ErrFIFOClosed\n\t\t\t}\n\n\t\t\tf.cond.Wait()\n\t\t}\n\t\tid := f.queue[0]\n\t\tf.queue = f.queue[1:]\n\t\tif f.initialPopulationCount > 0 {\n\t\t\tf.initialPopulationCount--\n\t\t}\n\t\titem, ok := f.items[id]\n\t\tif !ok {\n\t\t\t\/\/ Item may have been deleted subsequently.\n\t\t\tcontinue\n\t\t}\n\t\tdelete(f.items, id)\n\t\terr := process(item)\n\t\tif e, ok := err.(ErrRequeue); ok {\n\t\t\tf.addIfNotPresent(id, item)\n\t\t\terr = e.Err\n\t\t}\n\t\treturn item, err\n\t}\n}\n\n\/\/ Replace will delete the contents of 'f', using instead the given map.\n\/\/ 'f' takes ownership of the map, you should not reference the map again\n\/\/ after calling this function. f's queue is reset, too; upon return, it\n\/\/ will contain the items in the map, in no particular order.\nfunc (f *FIFO) Replace(list []interface{}, resourceVersion string) error {\n\titems := make(map[string]interface{}, len(list))\n\tfor _, item := range list {\n\t\tkey, err := f.keyFunc(item)\n\t\tif err != nil {\n\t\t\treturn KeyError{item, err}\n\t\t}\n\t\titems[key] = item\n\t}\n\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\tif !f.populated {\n\t\tf.populated = true\n\t\tf.initialPopulationCount = len(items)\n\t}\n\n\tf.items = items\n\tf.queue = f.queue[:0]\n\tfor id := range items {\n\t\tf.queue = append(f.queue, id)\n\t}\n\tif len(f.queue) > 0 {\n\t\tf.cond.Broadcast()\n\t}\n\treturn nil\n}\n\n\/\/ Resync will ensure that every object in the Store has its key in the queue.\n\/\/ This should be a no-op, because that property is maintained by all operations.\nfunc (f *FIFO) Resync() error {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\tinQueue := sets.NewString()\n\tfor _, id := range f.queue {\n\t\tinQueue.Insert(id)\n\t}\n\tfor id := range f.items {\n\t\tif !inQueue.Has(id) {\n\t\t\tf.queue = append(f.queue, id)\n\t\t}\n\t}\n\tif len(f.queue) > 0 {\n\t\tf.cond.Broadcast()\n\t}\n\treturn nil\n}\n\n\/\/ NewFIFO returns a Store which can be used to queue up items to\n\/\/ process.\nfunc NewFIFO(keyFunc KeyFunc) *FIFO {\n\tf := &FIFO{\n\t\titems: map[string]interface{}{},\n\t\tqueue: []string{},\n\t\tkeyFunc: keyFunc,\n\t}\n\tf.cond.L = &f.lock\n\treturn f\n}\n<commit_msg>Polished up HasSync comment<commit_after>\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cache\n\nimport (\n\t\"errors\"\n\t\"sync\"\n\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n)\n\n\/\/ PopProcessFunc is passed to Pop() method of Queue interface.\n\/\/ It is supposed to process the accumulator popped from the queue.\ntype PopProcessFunc func(interface{}) error\n\n\/\/ ErrRequeue may be returned by a PopProcessFunc to safely requeue\n\/\/ the current item. The value of Err will be returned from Pop.\ntype ErrRequeue struct {\n\t\/\/ Err is returned by the Pop function\n\tErr error\n}\n\n\/\/ ErrFIFOClosed used when FIFO is closed\nvar ErrFIFOClosed = errors.New(\"DeltaFIFO: manipulating with closed queue\")\n\nfunc (e ErrRequeue) Error() string {\n\tif e.Err == nil {\n\t\treturn \"the popped item should be requeued without returning an error\"\n\t}\n\treturn e.Err.Error()\n}\n\n\/\/ Queue extends Store with a collection of Store keys to \"process\".\n\/\/ Every Add, Update, or Delete may put the object's key in that collection.\n\/\/ A Queue has a way to derive the corresponding key given an accumulator.\n\/\/ A Queue can be accessed concurrently from multiple goroutines.\n\/\/ A Queue can be \"closed\", after which Pop operations return an error.\ntype Queue interface {\n\tStore\n\n\t\/\/ Pop blocks until there is at least one key to process or the\n\t\/\/ Queue is closed. In the latter case Pop returns with an error.\n\t\/\/ In the former case Pop atomically picks one key to process,\n\t\/\/ removes that (key, accumulator) association from the Store, and\n\t\/\/ processes the accumulator. Pop returns the accumulator that\n\t\/\/ was processed and the result of processing. The PopProcessFunc\n\t\/\/ may return an ErrRequeue{inner} and in this case Pop will (a)\n\t\/\/ return that (key, accumulator) association to the Queue as part\n\t\/\/ of the atomic processing and (b) return the inner error from\n\t\/\/ Pop.\n\tPop(PopProcessFunc) (interface{}, error)\n\n\t\/\/ AddIfNotPresent puts the given accumulator into the Queue (in\n\t\/\/ association with the accumulator's key) if and only if that key\n\t\/\/ is not already associated with a non-empty accumulator.\n\tAddIfNotPresent(interface{}) error\n\n\t\/\/ HasSynced returns true if the first batch of keys have all been\n\t\/\/ popped. The first batch of keys are those of the first Replace\n\t\/\/ operation if that happened before any Add, AddIfNotPresent,\n\t\/\/ Update, or Delete; otherwise the first batch is empty.\n\tHasSynced() bool\n\n\t\/\/ Close the queue\n\tClose()\n}\n\n\/\/ Pop is helper function for popping from Queue.\n\/\/ WARNING: Do NOT use this function in non-test code to avoid races\n\/\/ unless you really really really really know what you are doing.\nfunc Pop(queue Queue) interface{} {\n\tvar result interface{}\n\tqueue.Pop(func(obj interface{}) error {\n\t\tresult = obj\n\t\treturn nil\n\t})\n\treturn result\n}\n\n\/\/ FIFO is a Queue in which (a) each accumulator is simply the most\n\/\/ recently provided object and (b) the collection of keys to process\n\/\/ is a FIFO. The accumulators all start out empty, and deleting an\n\/\/ object from its accumulator empties the accumulator. The Resync\n\/\/ operation is a no-op.\n\/\/\n\/\/ Thus: if multiple adds\/updates of a single object happen while that\n\/\/ object's key is in the queue before it has been processed then it\n\/\/ will only be processed once, and when it is processed the most\n\/\/ recent version will be processed. This can't be done with a channel\n\/\/\n\/\/ FIFO solves this use case:\n\/\/ * You want to process every object (exactly) once.\n\/\/ * You want to process the most recent version of the object when you process it.\n\/\/ * You do not want to process deleted objects, they should be removed from the queue.\n\/\/ * You do not want to periodically reprocess objects.\n\/\/ Compare with DeltaFIFO for other use cases.\ntype FIFO struct {\n\tlock sync.RWMutex\n\tcond sync.Cond\n\t\/\/ We depend on the property that every key in `items` is also in `queue`\n\titems map[string]interface{}\n\tqueue []string\n\n\t\/\/ populated is true if the first batch of items inserted by Replace() has been populated\n\t\/\/ or Delete\/Add\/Update was called first.\n\tpopulated bool\n\t\/\/ initialPopulationCount is the number of items inserted by the first call of Replace()\n\tinitialPopulationCount int\n\n\t\/\/ keyFunc is used to make the key used for queued item insertion and retrieval, and\n\t\/\/ should be deterministic.\n\tkeyFunc KeyFunc\n\n\t\/\/ Indication the queue is closed.\n\t\/\/ Used to indicate a queue is closed so a control loop can exit when a queue is empty.\n\t\/\/ Currently, not used to gate any of CRED operations.\n\tclosed bool\n\tclosedLock sync.Mutex\n}\n\nvar (\n\t_ = Queue(&FIFO{}) \/\/ FIFO is a Queue\n)\n\n\/\/ Close the queue.\nfunc (f *FIFO) Close() {\n\tf.closedLock.Lock()\n\tdefer f.closedLock.Unlock()\n\tf.closed = true\n\tf.cond.Broadcast()\n}\n\n\/\/ HasSynced returns true if an Add\/Update\/Delete\/AddIfNotPresent are called first,\n\/\/ or the first batch of items inserted by Replace() has been popped.\nfunc (f *FIFO) HasSynced() bool {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\treturn f.populated && f.initialPopulationCount == 0\n}\n\n\/\/ Add inserts an item, and puts it in the queue. The item is only enqueued\n\/\/ if it doesn't already exist in the set.\nfunc (f *FIFO) Add(obj interface{}) error {\n\tid, err := f.keyFunc(obj)\n\tif err != nil {\n\t\treturn KeyError{obj, err}\n\t}\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\tf.populated = true\n\tif _, exists := f.items[id]; !exists {\n\t\tf.queue = append(f.queue, id)\n\t}\n\tf.items[id] = obj\n\tf.cond.Broadcast()\n\treturn nil\n}\n\n\/\/ AddIfNotPresent inserts an item, and puts it in the queue. If the item is already\n\/\/ present in the set, it is neither enqueued nor added to the set.\n\/\/\n\/\/ This is useful in a single producer\/consumer scenario so that the consumer can\n\/\/ safely retry items without contending with the producer and potentially enqueueing\n\/\/ stale items.\nfunc (f *FIFO) AddIfNotPresent(obj interface{}) error {\n\tid, err := f.keyFunc(obj)\n\tif err != nil {\n\t\treturn KeyError{obj, err}\n\t}\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\tf.addIfNotPresent(id, obj)\n\treturn nil\n}\n\n\/\/ addIfNotPresent assumes the fifo lock is already held and adds the provided\n\/\/ item to the queue under id if it does not already exist.\nfunc (f *FIFO) addIfNotPresent(id string, obj interface{}) {\n\tf.populated = true\n\tif _, exists := f.items[id]; exists {\n\t\treturn\n\t}\n\n\tf.queue = append(f.queue, id)\n\tf.items[id] = obj\n\tf.cond.Broadcast()\n}\n\n\/\/ Update is the same as Add in this implementation.\nfunc (f *FIFO) Update(obj interface{}) error {\n\treturn f.Add(obj)\n}\n\n\/\/ Delete removes an item. It doesn't add it to the queue, because\n\/\/ this implementation assumes the consumer only cares about the objects,\n\/\/ not the order in which they were created\/added.\nfunc (f *FIFO) Delete(obj interface{}) error {\n\tid, err := f.keyFunc(obj)\n\tif err != nil {\n\t\treturn KeyError{obj, err}\n\t}\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\tf.populated = true\n\tdelete(f.items, id)\n\treturn err\n}\n\n\/\/ List returns a list of all the items.\nfunc (f *FIFO) List() []interface{} {\n\tf.lock.RLock()\n\tdefer f.lock.RUnlock()\n\tlist := make([]interface{}, 0, len(f.items))\n\tfor _, item := range f.items {\n\t\tlist = append(list, item)\n\t}\n\treturn list\n}\n\n\/\/ ListKeys returns a list of all the keys of the objects currently\n\/\/ in the FIFO.\nfunc (f *FIFO) ListKeys() []string {\n\tf.lock.RLock()\n\tdefer f.lock.RUnlock()\n\tlist := make([]string, 0, len(f.items))\n\tfor key := range f.items {\n\t\tlist = append(list, key)\n\t}\n\treturn list\n}\n\n\/\/ Get returns the requested item, or sets exists=false.\nfunc (f *FIFO) Get(obj interface{}) (item interface{}, exists bool, err error) {\n\tkey, err := f.keyFunc(obj)\n\tif err != nil {\n\t\treturn nil, false, KeyError{obj, err}\n\t}\n\treturn f.GetByKey(key)\n}\n\n\/\/ GetByKey returns the requested item, or sets exists=false.\nfunc (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) {\n\tf.lock.RLock()\n\tdefer f.lock.RUnlock()\n\titem, exists = f.items[key]\n\treturn item, exists, nil\n}\n\n\/\/ IsClosed checks if the queue is closed\nfunc (f *FIFO) IsClosed() bool {\n\tf.closedLock.Lock()\n\tdefer f.closedLock.Unlock()\n\tif f.closed {\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ Pop waits until an item is ready and processes it. If multiple items are\n\/\/ ready, they are returned in the order in which they were added\/updated.\n\/\/ The item is removed from the queue (and the store) before it is processed,\n\/\/ so if you don't successfully process it, it should be added back with\n\/\/ AddIfNotPresent(). process function is called under lock, so it is safe\n\/\/ update data structures in it that need to be in sync with the queue.\nfunc (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\tfor {\n\t\tfor len(f.queue) == 0 {\n\t\t\t\/\/ When the queue is empty, invocation of Pop() is blocked until new item is enqueued.\n\t\t\t\/\/ When Close() is called, the f.closed is set and the condition is broadcasted.\n\t\t\t\/\/ Which causes this loop to continue and return from the Pop().\n\t\t\tif f.IsClosed() {\n\t\t\t\treturn nil, ErrFIFOClosed\n\t\t\t}\n\n\t\t\tf.cond.Wait()\n\t\t}\n\t\tid := f.queue[0]\n\t\tf.queue = f.queue[1:]\n\t\tif f.initialPopulationCount > 0 {\n\t\t\tf.initialPopulationCount--\n\t\t}\n\t\titem, ok := f.items[id]\n\t\tif !ok {\n\t\t\t\/\/ Item may have been deleted subsequently.\n\t\t\tcontinue\n\t\t}\n\t\tdelete(f.items, id)\n\t\terr := process(item)\n\t\tif e, ok := err.(ErrRequeue); ok {\n\t\t\tf.addIfNotPresent(id, item)\n\t\t\terr = e.Err\n\t\t}\n\t\treturn item, err\n\t}\n}\n\n\/\/ Replace will delete the contents of 'f', using instead the given map.\n\/\/ 'f' takes ownership of the map, you should not reference the map again\n\/\/ after calling this function. f's queue is reset, too; upon return, it\n\/\/ will contain the items in the map, in no particular order.\nfunc (f *FIFO) Replace(list []interface{}, resourceVersion string) error {\n\titems := make(map[string]interface{}, len(list))\n\tfor _, item := range list {\n\t\tkey, err := f.keyFunc(item)\n\t\tif err != nil {\n\t\t\treturn KeyError{item, err}\n\t\t}\n\t\titems[key] = item\n\t}\n\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\tif !f.populated {\n\t\tf.populated = true\n\t\tf.initialPopulationCount = len(items)\n\t}\n\n\tf.items = items\n\tf.queue = f.queue[:0]\n\tfor id := range items {\n\t\tf.queue = append(f.queue, id)\n\t}\n\tif len(f.queue) > 0 {\n\t\tf.cond.Broadcast()\n\t}\n\treturn nil\n}\n\n\/\/ Resync will ensure that every object in the Store has its key in the queue.\n\/\/ This should be a no-op, because that property is maintained by all operations.\nfunc (f *FIFO) Resync() error {\n\tf.lock.Lock()\n\tdefer f.lock.Unlock()\n\n\tinQueue := sets.NewString()\n\tfor _, id := range f.queue {\n\t\tinQueue.Insert(id)\n\t}\n\tfor id := range f.items {\n\t\tif !inQueue.Has(id) {\n\t\t\tf.queue = append(f.queue, id)\n\t\t}\n\t}\n\tif len(f.queue) > 0 {\n\t\tf.cond.Broadcast()\n\t}\n\treturn nil\n}\n\n\/\/ NewFIFO returns a Store which can be used to queue up items to\n\/\/ process.\nfunc NewFIFO(keyFunc KeyFunc) *FIFO {\n\tf := &FIFO{\n\t\titems: map[string]interface{}{},\n\t\tqueue: []string{},\n\t\tkeyFunc: keyFunc,\n\t}\n\tf.cond.L = &f.lock\n\treturn f\n}\n<|endoftext|>"} {"text":"<commit_before>package localforeman\n\nimport (\n\t\"os\"\n\n\t\"polydawn.net\/repeatr\/def\"\n\t\"polydawn.net\/repeatr\/executor\"\n\t\"polydawn.net\/repeatr\/lib\/guid\"\n\t\"polydawn.net\/repeatr\/model\/cassandra\"\n\t\"polydawn.net\/repeatr\/model\/catalog\"\n\t\"polydawn.net\/repeatr\/model\/formula\"\n)\n\ntype Foreman struct {\n\t\/\/ configuration\n\n\tcassy cassandra.Cassandra\n\texecutor executor.Executor\n\n\t\/\/ work state\n\n\tchNewCatalog <-chan catalog.ID\n\tchOldCatalog <-chan catalog.ID\n\tcurrentPlans currentPlans\n}\n\nfunc (man *Foreman) work() {\n\tman.register()\n\tfor {\n\t\tman.pump()\n\t\tman.evoke()\n\t}\n}\n\n\/\/ runs once upon start, rigging up our event feeds\nfunc (man *Foreman) register() {\n\t\/\/ Register for catalog changes.\n\tchNewCatalog := make(chan catalog.ID, 100)\n\tman.cassy.ObserveCatalogs(chNewCatalog)\n\tman.chNewCatalog = chNewCatalog\n\n\t\/\/ Grab all current catalogs. Give em one due consideration.\n\t\/\/ Dump em into a channel so we can select freely between these and fresh updates.\n\t\/\/ If an update careens in for one of these, and we react to that first, that's\n\t\/\/ completely AOK: it'll end up nilled out when we reduce to stage2 formulas;\n\t\/\/ the whole thing is an \"at least once\" situation.\n\t\/\/ We operate on CatalogIDs here instead of the full struct for two reasons:\n\t\/\/ - it's cheaper, if you didn't already have the whole thing loaded\n\t\/\/ - it means when you get the memo, you go get the latest -- and this\n\t\/\/ absolves a race between old and updated catalogs in select.\n\toldCats := man.cassy.ListCatalogs()\n\toldCatalogChan := make(chan catalog.ID, len(oldCats))\n\tfor _, cat := range oldCats {\n\t\toldCatalogChan <- cat\n\t}\n\tman.chOldCatalog = oldCatalogChan\n\n\t\/\/ other misc init (can't be arsed to seperate since it's also an \"exactly once, at start\" thing)\n\tman.currentPlans.commissionIndex = make(map[formula.CommissionID]int)\n}\n\n\/\/ runs in a loop, accepting events, generating new formulas, and adding them to currentPlans.\nfunc (man *Foreman) pump() {\n\t\/\/ Select a new and interesting catalog.\n\tvar catID catalog.ID\n\tselect {\n\tcase catID = <-man.chNewCatalog: \/\/ Voom\n\tcase catID = <-man.chOldCatalog: \/\/ Voom\n\t}\n\n\t\/\/ 'Mark' phase: See what we can do with it.\n\tmarkedSet := man.cassy.SelectCommissionsByInputCatalog(catID)\n\n\t\/\/ 'Fill' phase.\n\tformulas := make([]*formula.Stage2, 0)\n\treasons := make(map[formula.CommissionID]*formula.Stage2)\n\tfor _, commish := range markedSet {\n\t\tformula := (*formula.Stage2)(&commish.Formula) \/\/ FIXME need clone func and sane mem owner defn\n\t\tfor iname, input := range formula.Inputs {\n\t\t\tcellID := catalog.ID(iname) \/\/ this may not always be true \/ this is the same type haze around pre-pin inputs showing again\n\t\t\tinput.Hash = string(man.cassy.Catalog(cellID).Latest().Hash) \/\/ this string cast is because def is currently Wrong\n\t\t}\n\t\tformulas = append(formulas, formula)\n\t\treasons[commish.ID] = formula\n\t}\n\n\t\/\/ 'Seenit' filter.\n\t\/\/ TODO\n\t\/\/ Compute Stage2 identifiers and index by that. If it's been seen before, forget it.\n\n\t\/\/ Commit phase: push the stage2 formula back to the knowledge base.\n\t\/\/ TODO\n\n\t\/\/ Planning phase: update our internal concept of what's up next.\n\tfor reason, formula := range reasons {\n\t\tman.currentPlans.push(formula, reason)\n\t}\n}\n\n\/*\n\tAn atom capturing the foreman's current best idea of what formulas\n\tit wants to evaluate next.\n\n\tThis is stateful because the foreman acknowledges info and produces\n\tnew plans at a different pace than it can execute their evaluation,\n\tand it may also decide to cancel some plans in response to new info.\n\t(Also, it's a checkpoint for use in testing.)\n*\/\ntype currentPlans struct {\n\t\/\/ flat list of what formulas we want to run next, in order.\n\tqueue []*formula.Stage2\n\n\t\/\/ map from cmid to queue index (so we can delete\/replace things if they're now out of date).\n\tcommissionIndex map[formula.CommissionID]int\n}\n\nfunc (p *currentPlans) push(f *formula.Stage2, reason formula.CommissionID) {\n\tif i, ok := p.commissionIndex[reason]; ok {\n\t\tp.queue[i] = f\n\t} else {\n\t\ti = len(p.queue)\n\t\tp.queue = append(p.queue, f)\n\t\tp.commissionIndex[reason] = i\n\t}\n}\n\nfunc (p *currentPlans) poll() *formula.Stage2 {\n\tl := len(p.queue)\n\tif l == 0 {\n\t\treturn nil\n\t}\n\tv := p.queue[0]\n\tp.queue = p.queue[1:]\n\treturn v\n}\n\nfunc (man *Foreman) evoke() {\n\t\/\/ Run.\n\tfor _, formula := range man.currentPlans.queue {\n\t\tjob := man.executor.Start(def.Formula(*formula), def.JobID(guid.New()), nil, os.Stderr)\n\t\tjob.Wait()\n\t}\n\t\/\/ TODO all sorts of other housekeeping on the queue\n\n\t\/\/ Commit phase: push the stage3 formulas back to storage.\n\t\/\/ TODO\n\t\/\/ If someone wants to react to these new run records by publishing\n\t\/\/ a new edition of a catalog, they can do that by asking\n\t\/\/ cassy to observe new run records like this one as they come in.\n}\n<commit_msg>It's really just plans. \"Current\" is in your head.<commit_after>package localforeman\n\nimport (\n\t\"os\"\n\n\t\"polydawn.net\/repeatr\/def\"\n\t\"polydawn.net\/repeatr\/executor\"\n\t\"polydawn.net\/repeatr\/lib\/guid\"\n\t\"polydawn.net\/repeatr\/model\/cassandra\"\n\t\"polydawn.net\/repeatr\/model\/catalog\"\n\t\"polydawn.net\/repeatr\/model\/formula\"\n)\n\ntype Foreman struct {\n\t\/\/ configuration\n\n\tcassy cassandra.Cassandra\n\texecutor executor.Executor\n\n\t\/\/ work state\n\n\tchNewCatalog <-chan catalog.ID\n\tchOldCatalog <-chan catalog.ID\n\tcurrentPlans plans\n}\n\nfunc (man *Foreman) work() {\n\tman.register()\n\tfor {\n\t\tman.pump()\n\t\tman.evoke()\n\t}\n}\n\n\/\/ runs once upon start, rigging up our event feeds\nfunc (man *Foreman) register() {\n\t\/\/ Register for catalog changes.\n\tchNewCatalog := make(chan catalog.ID, 100)\n\tman.cassy.ObserveCatalogs(chNewCatalog)\n\tman.chNewCatalog = chNewCatalog\n\n\t\/\/ Grab all current catalogs. Give em one due consideration.\n\t\/\/ Dump em into a channel so we can select freely between these and fresh updates.\n\t\/\/ If an update careens in for one of these, and we react to that first, that's\n\t\/\/ completely AOK: it'll end up nilled out when we reduce to stage2 formulas;\n\t\/\/ the whole thing is an \"at least once\" situation.\n\t\/\/ We operate on CatalogIDs here instead of the full struct for two reasons:\n\t\/\/ - it's cheaper, if you didn't already have the whole thing loaded\n\t\/\/ - it means when you get the memo, you go get the latest -- and this\n\t\/\/ absolves a race between old and updated catalogs in select.\n\toldCats := man.cassy.ListCatalogs()\n\toldCatalogChan := make(chan catalog.ID, len(oldCats))\n\tfor _, cat := range oldCats {\n\t\toldCatalogChan <- cat\n\t}\n\tman.chOldCatalog = oldCatalogChan\n\n\t\/\/ other misc init (can't be arsed to seperate since it's also an \"exactly once, at start\" thing)\n\tman.currentPlans.commissionIndex = make(map[formula.CommissionID]int)\n}\n\n\/\/ runs in a loop, accepting events, generating new formulas, and adding them to currentPlans.\nfunc (man *Foreman) pump() {\n\t\/\/ Select a new and interesting catalog.\n\tvar catID catalog.ID\n\tselect {\n\tcase catID = <-man.chNewCatalog: \/\/ Voom\n\tcase catID = <-man.chOldCatalog: \/\/ Voom\n\t}\n\n\t\/\/ 'Mark' phase: See what we can do with it.\n\tmarkedSet := man.cassy.SelectCommissionsByInputCatalog(catID)\n\n\t\/\/ 'Fill' phase.\n\tformulas := make([]*formula.Stage2, 0)\n\treasons := make(map[formula.CommissionID]*formula.Stage2)\n\tfor _, commish := range markedSet {\n\t\tformula := (*formula.Stage2)(&commish.Formula) \/\/ FIXME need clone func and sane mem owner defn\n\t\tfor iname, input := range formula.Inputs {\n\t\t\tcellID := catalog.ID(iname) \/\/ this may not always be true \/ this is the same type haze around pre-pin inputs showing again\n\t\t\tinput.Hash = string(man.cassy.Catalog(cellID).Latest().Hash) \/\/ this string cast is because def is currently Wrong\n\t\t}\n\t\tformulas = append(formulas, formula)\n\t\treasons[commish.ID] = formula\n\t}\n\n\t\/\/ 'Seenit' filter.\n\t\/\/ TODO\n\t\/\/ Compute Stage2 identifiers and index by that. If it's been seen before, forget it.\n\n\t\/\/ Commit phase: push the stage2 formula back to the knowledge base.\n\t\/\/ TODO\n\n\t\/\/ Planning phase: update our internal concept of what's up next.\n\tfor reason, formula := range reasons {\n\t\tman.currentPlans.push(formula, reason)\n\t}\n}\n\n\/*\n\tAn atom capturing the foreman's current best idea of what formulas\n\tit wants to evaluate next.\n\n\tThis is stateful because the foreman acknowledges info and produces\n\tnew plans at a different pace than it can execute their evaluation,\n\tand it may also decide to cancel some plans in response to new info.\n\t(Also, it's a checkpoint for use in testing.)\n*\/\ntype plans struct {\n\t\/\/ flat list of what formulas we want to run next, in order.\n\tqueue []*formula.Stage2\n\n\t\/\/ map from cmid to queue index (so we can delete\/replace things if they're now out of date).\n\tcommissionIndex map[formula.CommissionID]int\n}\n\nfunc (p *plans) push(f *formula.Stage2, reason formula.CommissionID) {\n\tif i, ok := p.commissionIndex[reason]; ok {\n\t\tp.queue[i] = f\n\t} else {\n\t\ti = len(p.queue)\n\t\tp.queue = append(p.queue, f)\n\t\tp.commissionIndex[reason] = i\n\t}\n}\n\nfunc (p *plans) poll() *formula.Stage2 {\n\tl := len(p.queue)\n\tif l == 0 {\n\t\treturn nil\n\t}\n\tv := p.queue[0]\n\tp.queue = p.queue[1:]\n\treturn v\n}\n\nfunc (man *Foreman) evoke() {\n\t\/\/ Run.\n\tfor _, formula := range man.currentPlans.queue {\n\t\tjob := man.executor.Start(def.Formula(*formula), def.JobID(guid.New()), nil, os.Stderr)\n\t\tjob.Wait()\n\t}\n\t\/\/ TODO all sorts of other housekeeping on the queue\n\n\t\/\/ Commit phase: push the stage3 formulas back to storage.\n\t\/\/ TODO\n\t\/\/ If someone wants to react to these new run records by publishing\n\t\/\/ a new edition of a catalog, they can do that by asking\n\t\/\/ cassy to observe new run records like this one as they come in.\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nPresent displays slide presentations and articles. It runs a web server that\npresents slide and article files from the current directory.\n\nIt may be run as a stand-alone command or an App Engine app.\nThe stand-alone version permits the execution of programs from within a\npresentation. The App Engine version does not provide this functionality.\n\nUsage of present:\n -base=\"\": base path for slide template and static resources\n -http=\"127.0.0.1:3999\": host:port to listen on\n\nYou may use the app.yaml file provided in the root of the go.talks repository\nto deploy present to App Engine:\n\tappcfg.py update -A your-app-id -V your-app-version \/path\/to\/go.talks\n\nInput files are named foo.extension, where \"extension\" defines the format of\nthe generated output. The supported formats are:\n\t.slide \/\/ HTML5 slide presentation\n\t.article \/\/ article format, such as a blog post\n\nThe present file format is documented by the present package:\nhttp:\/\/godoc.org\/code.google.com\/p\/go.tools\/godoc\/present\n*\/\npackage main\n<commit_msg>go.talk\/present: fix a link.<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nPresent displays slide presentations and articles. It runs a web server that\npresents slide and article files from the current directory.\n\nIt may be run as a stand-alone command or an App Engine app.\nThe stand-alone version permits the execution of programs from within a\npresentation. The App Engine version does not provide this functionality.\n\nUsage of present:\n -base=\"\": base path for slide template and static resources\n -http=\"127.0.0.1:3999\": host:port to listen on\n\nYou may use the app.yaml file provided in the root of the go.talks repository\nto deploy present to App Engine:\n\tappcfg.py update -A your-app-id -V your-app-version \/path\/to\/go.talks\n\nInput files are named foo.extension, where \"extension\" defines the format of\nthe generated output. The supported formats are:\n\t.slide \/\/ HTML5 slide presentation\n\t.article \/\/ article format, such as a blog post\n\nThe present file format is documented by the present package:\nhttp:\/\/godoc.org\/code.google.com\/p\/go.tools\/present\n*\/\npackage main\n<|endoftext|>"} {"text":"<commit_before>package chug_test\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"code.cloudfoundry.org\/lager\/chug\"\n\t\"github.com\/onsi\/gomega\/format\"\n\t\"github.com\/onsi\/gomega\/types\"\n)\n\nfunc MatchLogEntry(entry chug.LogEntry) types.GomegaMatcher {\n\treturn &logEntryMatcher{entry}\n}\n\ntype logEntryMatcher struct {\n\tentry chug.LogEntry\n}\n\nfunc (m *logEntryMatcher) Match(actual interface{}) (success bool, err error) {\n\tactualEntry, ok := actual.(chug.LogEntry)\n\tif !ok {\n\t\treturn false, fmt.Errorf(\"MatchLogEntry must be passed a chug.LogEntry. Got:\\n%s\", format.Object(actual, 1))\n\t}\n\n\treturn m.entry.LogLevel == actualEntry.LogLevel &&\n\t\tm.entry.Source == actualEntry.Source &&\n\t\tm.entry.Message == actualEntry.Message &&\n\t\tm.entry.Session == actualEntry.Session &&\n\t\treflect.DeepEqual(m.entry.Error, actualEntry.Error) &&\n\t\tm.entry.Trace == actualEntry.Trace &&\n\t\treflect.DeepEqual(m.entry.Data, actualEntry.Data), nil\n}\n\nfunc (m *logEntryMatcher) FailureMessage(actual interface{}) (message string) {\n\treturn format.Message(actual, \"to equal\", m.entry)\n}\n\nfunc (m *logEntryMatcher) NegatedFailureMessage(actual interface{}) (message string) {\n\treturn format.Message(actual, \"not to equal\", m.entry)\n}\n<commit_msg>Fix code which breaks compiler in match_log_entry_test<commit_after>package chug_test\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"code.cloudfoundry.org\/lager\/chug\"\n\t\"github.com\/onsi\/gomega\/format\"\n\t\"github.com\/onsi\/gomega\/types\"\n)\n\nfunc MatchLogEntry(entry chug.LogEntry) types.GomegaMatcher {\n\treturn &logEntryMatcher{entry}\n}\n\ntype logEntryMatcher struct {\n\tentry chug.LogEntry\n}\n\nfunc (m *logEntryMatcher) Match(actual interface{}) (success bool, err error) {\n\tactualEntry, ok := actual.(chug.LogEntry)\n\tif !ok {\n\t\treturn false, fmt.Errorf(\"MatchLogEntry must be passed a chug.LogEntry. Got:\\n%s\", format.Object(actual, 1))\n\t}\n\n\treturn reflect.DeepEqual(m.entry.Error, actualEntry.Error) &&\n\t\tm.entry.LogLevel == actualEntry.LogLevel &&\n\t\tm.entry.Source == actualEntry.Source &&\n\t\tm.entry.Message == actualEntry.Message &&\n\t\tm.entry.Session == actualEntry.Session &&\n\t\tm.entry.Trace == actualEntry.Trace &&\n\t\treflect.DeepEqual(m.entry.Data, actualEntry.Data), nil\n}\n\nfunc (m *logEntryMatcher) FailureMessage(actual interface{}) (message string) {\n\treturn format.Message(actual, \"to equal\", m.entry)\n}\n\nfunc (m *logEntryMatcher) NegatedFailureMessage(actual interface{}) (message string) {\n\treturn format.Message(actual, \"not to equal\", m.entry)\n}\n<|endoftext|>"} {"text":"<commit_before>package localca\n\nimport (\n\t\"encoding\/pem\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/cloudflare\/cfssl\/csr\"\n\t\"github.com\/cloudflare\/cfssl\/helpers\"\n\t\"github.com\/cloudflare\/cfssl\/initca\"\n)\n\nfunc tempName() (string, error) {\n\ttmpf, err := ioutil.TempFile(\"\", \"transport_cachedkp_\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tname := tmpf.Name()\n\ttmpf.Close()\n\treturn name, nil\n}\n\nfunc TestEncodePEM(t *testing.T) {\n\tp := &pem.Block{\n\t\tType: \"CERTIFICATE REQUEST\",\n\t\tBytes: []byte(`¯\\_(ツ)_\/¯`),\n\t}\n\tt.Logf(\"PEM:\\n%s\\n\\n\", string(pem.EncodeToMemory(p)))\n}\n\nfunc TestLoadSigner(t *testing.T) {\n\tlca := &CA{}\n\tcertPEM, csrPEM, keyPEM, err := initca.New(ExampleRequest())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = lca.CACertificate()\n\tif !errors.Is(err, errNotSetup) {\n\t\tt.Fatalf(\"expected an errNotSetup (%v), got: %v\", errNotSetup, err)\n\t}\n\n\t_, err = lca.SignCSR(csrPEM)\n\tif !errors.Is(err, errNotSetup) {\n\t\tt.Fatalf(\"expected an errNotSetup (%v), got: %v\", errNotSetup, err)\n\t}\n\n\tlca.KeyFile, err = tempName()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.Remove(lca.KeyFile)\n\n\tlca.CertFile, err = tempName()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.Remove(lca.CertFile)\n\n\terr = ioutil.WriteFile(lca.KeyFile, keyPEM, 0644)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = ioutil.WriteFile(lca.CertFile, certPEM, 0644)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = Load(lca, ExampleSigningConfig())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nvar testRequest = &csr.CertificateRequest{\n\tCN: \"Transport Test Identity\",\n\tKeyRequest: &csr.KeyRequest{\n\t\tA: \"ecdsa\",\n\t\tS: 256,\n\t},\n\tHosts: []string{\"127.0.0.1\"},\n}\n\nfunc TestNewSigner(t *testing.T) {\n\treq := ExampleRequest()\n\tlca, err := New(req, ExampleSigningConfig())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcsrPEM, _, err := csr.ParseRequest(testRequest)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcertPEM, err := lca.SignCSR(csrPEM)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = helpers.ParseCertificatePEM(certPEM)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcertPEM, err = lca.CACertificate()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcert, err := helpers.ParseCertificatePEM(certPEM)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif cert.Subject.CommonName != req.CN {\n\t\tt.Fatalf(\"common names don't match: '%s' != '%s'\", cert.Subject.CommonName, req.CN)\n\t}\n\n\tlca.Toggle()\n\t_, err = lca.SignCSR(csrPEM)\n\tif !errors.Is(err, errDisabled) {\n\t\tt.Fatalf(\"expected an errDisabled (%v), got: %v\", errDisabled, err)\n\t}\n\tlca.Toggle()\n\n\t_, err = lca.SignCSR(certPEM)\n\tif err == nil {\n\t\tt.Fatal(\"shouldn't be able to sign non-CSRs\")\n\t}\n\n\tp := &pem.Block{\n\t\tType: \"CERTIFICATE REQUEST\",\n\t\tBytes: []byte(`¯\\_(ツ)_\/¯`),\n\t}\n\tjunkCSR := pem.EncodeToMemory(p)\n\n\t_, err = lca.SignCSR(junkCSR)\n\tif err == nil {\n\t\tt.Fatal(\"signing a junk CSR should fail\")\n\t}\n\tt.Logf(\"error: %s\", err)\n}\n<commit_msg>transport\/ca\/localca: remove uses of deprecated io\/ioutil<commit_after>package localca\n\nimport (\n\t\"encoding\/pem\"\n\t\"errors\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/cloudflare\/cfssl\/csr\"\n\t\"github.com\/cloudflare\/cfssl\/helpers\"\n\t\"github.com\/cloudflare\/cfssl\/initca\"\n)\n\nfunc TestEncodePEM(t *testing.T) {\n\tp := &pem.Block{\n\t\tType: \"CERTIFICATE REQUEST\",\n\t\tBytes: []byte(`¯\\_(ツ)_\/¯`),\n\t}\n\tt.Logf(\"PEM:\\n%s\\n\\n\", string(pem.EncodeToMemory(p)))\n}\n\nfunc TestLoadSigner(t *testing.T) {\n\tlca := &CA{}\n\tcertPEM, csrPEM, keyPEM, err := initca.New(ExampleRequest())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = lca.CACertificate()\n\tif !errors.Is(err, errNotSetup) {\n\t\tt.Fatalf(\"expected an errNotSetup (%v), got: %v\", errNotSetup, err)\n\t}\n\n\t_, err = lca.SignCSR(csrPEM)\n\tif !errors.Is(err, errNotSetup) {\n\t\tt.Fatalf(\"expected an errNotSetup (%v), got: %v\", errNotSetup, err)\n\t}\n\n\ttmpDir := t.TempDir()\n\tlca.KeyFile = filepath.Join(tmpDir, \"KeyFile\")\n\tlca.CertFile = filepath.Join(tmpDir, \"CertFile\")\n\n\terr = os.WriteFile(lca.KeyFile, keyPEM, 0644)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = os.WriteFile(lca.CertFile, certPEM, 0644)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = Load(lca, ExampleSigningConfig())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nvar testRequest = &csr.CertificateRequest{\n\tCN: \"Transport Test Identity\",\n\tKeyRequest: &csr.KeyRequest{\n\t\tA: \"ecdsa\",\n\t\tS: 256,\n\t},\n\tHosts: []string{\"127.0.0.1\"},\n}\n\nfunc TestNewSigner(t *testing.T) {\n\treq := ExampleRequest()\n\tlca, err := New(req, ExampleSigningConfig())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcsrPEM, _, err := csr.ParseRequest(testRequest)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcertPEM, err := lca.SignCSR(csrPEM)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t_, err = helpers.ParseCertificatePEM(certPEM)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcertPEM, err = lca.CACertificate()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcert, err := helpers.ParseCertificatePEM(certPEM)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif cert.Subject.CommonName != req.CN {\n\t\tt.Fatalf(\"common names don't match: '%s' != '%s'\", cert.Subject.CommonName, req.CN)\n\t}\n\n\tlca.Toggle()\n\t_, err = lca.SignCSR(csrPEM)\n\tif !errors.Is(err, errDisabled) {\n\t\tt.Fatalf(\"expected an errDisabled (%v), got: %v\", errDisabled, err)\n\t}\n\tlca.Toggle()\n\n\t_, err = lca.SignCSR(certPEM)\n\tif err == nil {\n\t\tt.Fatal(\"shouldn't be able to sign non-CSRs\")\n\t}\n\n\tp := &pem.Block{\n\t\tType: \"CERTIFICATE REQUEST\",\n\t\tBytes: []byte(`¯\\_(ツ)_\/¯`),\n\t}\n\tjunkCSR := pem.EncodeToMemory(p)\n\n\t_, err = lca.SignCSR(junkCSR)\n\tif err == nil {\n\t\tt.Fatal(\"signing a junk CSR should fail\")\n\t}\n\tt.Logf(\"error: %s\", err)\n}\n<|endoftext|>"} {"text":"<commit_before>package editform\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/aerogo\/api\"\n\t\"github.com\/animenotifier\/arn\"\n\t\"github.com\/animenotifier\/notify.moe\/components\"\n\t\"github.com\/animenotifier\/notify.moe\/utils\"\n)\n\n\/\/ Render renders a generic editing UI for any kind of datatype that has an ID.\nfunc Render(obj interface{}, title string, user *arn.User) string {\n\tt := reflect.TypeOf(obj).Elem()\n\tv := reflect.ValueOf(obj).Elem()\n\tid := findMainID(t, v)\n\tlowerCaseTypeName := strings.ToLower(t.Name())\n\tendpoint := `\/api\/` + lowerCaseTypeName + `\/` + id.String()\n\n\tvar b bytes.Buffer\n\n\tb.WriteString(`<div class=\"widget-form\">`)\n\tb.WriteString(`<div class=\"widget\" data-api=\"` + endpoint + `\">`)\n\n\t\/\/ Title\n\tb.WriteString(`<h1 class=\"mountable\">`)\n\tb.WriteString(title)\n\tb.WriteString(`<\/h1>`)\n\n\t\/\/ Render the object with its fields\n\tRenderObject(&b, obj, \"\")\n\n\t\/\/ Additional buttons when logged in\n\tif user != nil {\n\t\tb.WriteString(`<div class=\"buttons\">`)\n\n\t\t\/\/ Publish button\n\t\t_, ok := t.FieldByName(\"IsDraft\")\n\n\t\tif ok {\n\t\t\tisDraft := v.FieldByName(\"IsDraft\").Interface().(bool)\n\n\t\t\tif isDraft {\n\t\t\t\tb.WriteString(`<div class=\"buttons\"><button class=\"mountable action\" data-action=\"publish\" data-trigger=\"click\">` + utils.Icon(\"share-alt\") + `Publish<\/button><\/div>`)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Delete button\n\t\t_, isDeletable := obj.(api.Deletable)\n\n\t\tif isDeletable && (user.Role == \"editor\" || user.Role == \"admin\") {\n\t\t\treturnPath := \"\"\n\n\t\t\tswitch lowerCaseTypeName {\n\t\t\tcase \"anime\":\n\t\t\t\treturnPath = \"\/explore\"\n\t\t\tcase \"company\":\n\t\t\t\treturnPath = \"\/companies\"\n\t\t\tdefault:\n\t\t\t\treturnPath = \"\/\" + lowerCaseTypeName + \"s\"\n\t\t\t}\n\n\t\t\tb.WriteString(`<button class=\"mountable action\" data-action=\"deleteObject\" data-trigger=\"click\" data-return-path=\"` + returnPath + `\" data-confirm-type=\"` + lowerCaseTypeName + `\">` + utils.Icon(\"trash\") + `Delete<\/button>`)\n\t\t}\n\n\t\tb.WriteString(`<\/div>`)\n\t}\n\n\tb.WriteString(\"<\/div>\")\n\tb.WriteString(\"<\/div>\")\n\n\treturn b.String()\n}\n\n\/\/ RenderObject renders the UI for the object into the bytes buffer and appends an ID prefix for all API requests.\n\/\/ The ID prefix should either be empty or end with a dot character.\nfunc RenderObject(b *bytes.Buffer, obj interface{}, idPrefix string) {\n\tt := reflect.TypeOf(obj)\n\tv := reflect.ValueOf(obj)\n\n\tif t.Kind() == reflect.Ptr {\n\t\tt = t.Elem()\n\t\tv = v.Elem()\n\t}\n\n\t\/\/ Fields\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tfield := t.Field(i)\n\t\tRenderField(b, &v, field, idPrefix)\n\t}\n}\n\n\/\/ RenderField ...\nfunc RenderField(b *bytes.Buffer, v *reflect.Value, field reflect.StructField, idPrefix string) {\n\tfieldValue := reflect.Indirect(v.FieldByName(field.Name))\n\n\t\/\/ Embedded fields\n\tif field.Anonymous {\n\t\tRenderObject(b, fieldValue.Interface(), idPrefix)\n\t\treturn\n\t}\n\n\tif field.Tag.Get(\"editable\") != \"true\" {\n\t\treturn\n\t}\n\n\tb.WriteString(\"<div class='mountable'>\")\n\tdefer b.WriteString(\"<\/div>\")\n\n\tfieldType := field.Type.String()\n\n\t\/\/ String\n\tif fieldType == \"string\" {\n\t\tidType := field.Tag.Get(\"idType\")\n\n\t\t\/\/ Try to infer the ID type by the field name\n\t\tif idType == \"\" {\n\t\t\tswitch field.Name {\n\t\t\tcase \"AnimeID\":\n\t\t\t\tidType = \"Anime\"\n\n\t\t\tcase \"CharacterID\":\n\t\t\t\tidType = \"Character\"\n\t\t\t}\n\t\t}\n\n\t\tshowPreview := idType != \"\" && fieldValue.String() != \"\"\n\n\t\tif showPreview {\n\t\t\tb.WriteString(\"<div class='widget-section-with-preview'>\")\n\t\t}\n\n\t\t\/\/ Input field\n\t\tif field.Tag.Get(\"datalist\") != \"\" {\n\t\t\tdataList := field.Tag.Get(\"datalist\")\n\t\t\tvalues := arn.DataLists[dataList]\n\t\t\tb.WriteString(components.InputSelection(idPrefix+field.Name, fieldValue.String(), field.Name, field.Tag.Get(\"tooltip\"), values))\n\t\t} else if field.Tag.Get(\"type\") == \"textarea\" {\n\t\t\tb.WriteString(components.InputTextArea(idPrefix+field.Name, fieldValue.String(), field.Name, field.Tag.Get(\"tooltip\")))\n\t\t} else if field.Tag.Get(\"type\") == \"upload\" {\n\t\t\tendpoint := field.Tag.Get(\"endpoint\")\n\t\t\tid := v.FieldByName(\"ID\").String()\n\t\t\tendpoint = strings.Replace(endpoint, \":id\", id, 1)\n\n\t\t\tb.WriteString(components.InputFileUpload(idPrefix+field.Name, field.Name, field.Tag.Get(\"filetype\"), endpoint))\n\t\t} else {\n\t\t\tb.WriteString(components.InputText(idPrefix+field.Name, fieldValue.String(), field.Name, field.Tag.Get(\"tooltip\")))\n\t\t}\n\n\t\tif showPreview {\n\t\t\tb.WriteString(\"<div class='widget-section-preview'>\")\n\t\t}\n\n\t\t\/\/ Preview\n\t\tswitch idType {\n\t\tcase \"Anime\":\n\t\t\tanimeID := fieldValue.String()\n\t\t\tanime, err := arn.GetAnime(animeID)\n\n\t\t\tif err == nil {\n\t\t\t\tb.WriteString(components.EditFormImagePreview(anime.Link(), anime.ImageLink(\"small\"), true))\n\t\t\t}\n\n\t\tcase \"Character\":\n\t\t\tcharacterID := fieldValue.String()\n\t\t\tcharacter, err := arn.GetCharacter(characterID)\n\n\t\t\tif err == nil {\n\t\t\t\tb.WriteString(components.EditFormImagePreview(character.Link(), character.ImageLink(\"medium\"), false))\n\t\t\t}\n\n\t\tcase \"\":\n\t\t\tbreak\n\n\t\tdefault:\n\t\t\tfmt.Println(\"Error: Unknown idType tag: \" + idType)\n\t\t}\n\n\t\t\/\/ Close preview tags\n\t\tif showPreview {\n\t\t\tb.WriteString(\"<\/div><\/div>\")\n\t\t}\n\n\t\treturn\n\t}\n\n\t\/\/ Int\n\tif fieldType == \"int\" {\n\t\tb.WriteString(components.InputNumber(idPrefix+field.Name, float64(fieldValue.Int()), field.Name, field.Tag.Get(\"tooltip\"), \"\", \"\", \"1\"))\n\t\treturn\n\t}\n\n\t\/\/ Float\n\tif fieldType == \"float64\" {\n\t\tb.WriteString(components.InputNumber(idPrefix+field.Name, fieldValue.Float(), field.Name, field.Tag.Get(\"tooltip\"), \"\", \"\", \"\"))\n\t\treturn\n\t}\n\n\t\/\/ Bool\n\tif fieldType == \"bool\" {\n\t\tif field.Name == \"IsDraft\" {\n\t\t\treturn\n\t\t}\n\n\t\tb.WriteString(components.InputBool(idPrefix+field.Name, fieldValue.Bool(), field.Name, field.Tag.Get(\"tooltip\")))\n\t\treturn\n\t}\n\n\t\/\/ Array of strings\n\tif fieldType == \"[]string\" {\n\t\tb.WriteString(components.InputTags(idPrefix+field.Name, fieldValue.Interface().([]string), field.Name, field.Tag.Get(\"tooltip\")))\n\t\treturn\n\t}\n\n\t\/\/ Any kind of array\n\tif strings.HasPrefix(fieldType, \"[]\") {\n\t\tb.WriteString(`<div class=\"widget-section\">`)\n\t\tb.WriteString(`<h3 class=\"widget-title\">`)\n\t\tb.WriteString(field.Name)\n\t\tb.WriteString(`<\/h3>`)\n\n\t\tfor sliceIndex := 0; sliceIndex < fieldValue.Len(); sliceIndex++ {\n\t\t\tb.WriteString(`<div class=\"widget-section\">`)\n\n\t\t\tb.WriteString(`<div class=\"widget-title\">`)\n\n\t\t\t\/\/ Title\n\t\t\tb.WriteString(strconv.Itoa(sliceIndex+1) + \". \" + field.Name)\n\t\t\tb.WriteString(`<div class=\"spacer\"><\/div>`)\n\n\t\t\t\/\/ Remove button\n\t\t\tb.WriteString(`<button class=\"action\" title=\"Delete this ` + field.Name + `\" data-action=\"arrayRemove\" data-trigger=\"click\" data-field=\"` + field.Name + `\" data-index=\"`)\n\t\t\tb.WriteString(strconv.Itoa(sliceIndex))\n\t\t\tb.WriteString(`\">` + utils.RawIcon(\"trash\") + `<\/button>`)\n\n\t\t\tb.WriteString(`<\/div>`)\n\n\t\t\tarrayObj := fieldValue.Index(sliceIndex).Interface()\n\t\t\tarrayIDPrefix := fmt.Sprintf(\"%s[%d].\", field.Name, sliceIndex)\n\t\t\tRenderObject(b, arrayObj, arrayIDPrefix)\n\n\t\t\t\/\/ Preview\n\t\t\t\/\/ elementValue := fieldValue.Index(sliceIndex)\n\t\t\t\/\/ RenderArrayElement(b, &elementValue)\n\t\t\tif fieldType == \"[]*arn.ExternalMedia\" {\n\t\t\t\tb.WriteString(components.ExternalMedia(fieldValue.Index(sliceIndex).Interface().(*arn.ExternalMedia)))\n\t\t\t}\n\n\t\t\tb.WriteString(`<\/div>`)\n\t\t}\n\n\t\tb.WriteString(`<div class=\"buttons\">`)\n\t\tb.WriteString(`<button class=\"action\" data-action=\"arrayAppend\" data-trigger=\"click\" data-field=\"` + field.Name + `\">` + utils.Icon(\"plus\") + `Add ` + field.Name + `<\/button>`)\n\t\tb.WriteString(`<\/div>`)\n\n\t\tb.WriteString(`<\/div>`)\n\t\treturn\n\t}\n\n\t\/\/ Any custom field type will be recursively rendered via another RenderObject call\n\tb.WriteString(`<div class=\"widget-section\">`)\n\tb.WriteString(`<h3 class=\"widget-title\">` + field.Name + `<\/h3>`)\n\n\t\/\/ Indent the fields\n\tb.WriteString(`<div class=\"indent\">`)\n\tRenderObject(b, fieldValue.Interface(), field.Name+\".\")\n\tb.WriteString(`<\/div>`)\n\n\tb.WriteString(`<\/div>`)\n}\n\n\/\/ findMainID finds the main ID of the object.\nfunc findMainID(t reflect.Type, v reflect.Value) reflect.Value {\n\tidField := v.FieldByName(\"ID\")\n\n\tif idField.IsValid() {\n\t\treturn reflect.Indirect(idField)\n\t}\n\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tfield := t.Field(i)\n\n\t\tif field.Tag.Get(\"mainID\") == \"true\" {\n\t\t\treturn reflect.Indirect(v.Field(i))\n\t\t}\n\t}\n\n\tpanic(\"Type \" + t.Name() + \" doesn't have a main ID!\")\n}\n<commit_msg>Made editform less complex<commit_after>package editform\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/aerogo\/api\"\n\t\"github.com\/animenotifier\/arn\"\n\t\"github.com\/animenotifier\/notify.moe\/components\"\n\t\"github.com\/animenotifier\/notify.moe\/utils\"\n)\n\n\/\/ Render renders a generic editing UI for any kind of datatype that has an ID.\nfunc Render(obj interface{}, title string, user *arn.User) string {\n\tt := reflect.TypeOf(obj).Elem()\n\tv := reflect.ValueOf(obj).Elem()\n\tid := findMainID(t, v)\n\tlowerCaseTypeName := strings.ToLower(t.Name())\n\tendpoint := `\/api\/` + lowerCaseTypeName + `\/` + id.String()\n\n\tvar b bytes.Buffer\n\n\tb.WriteString(`<div class=\"widget-form\">`)\n\tb.WriteString(`<div class=\"widget\" data-api=\"` + endpoint + `\">`)\n\n\t\/\/ Title\n\tb.WriteString(`<h1 class=\"mountable\">`)\n\tb.WriteString(title)\n\tb.WriteString(`<\/h1>`)\n\n\t\/\/ Render the object with its fields\n\tRenderObject(&b, obj, \"\")\n\n\t\/\/ Additional buttons when logged in\n\tif user != nil {\n\t\tb.WriteString(`<div class=\"buttons\">`)\n\n\t\t\/\/ Publish button\n\t\t_, ok := t.FieldByName(\"IsDraft\")\n\n\t\tif ok {\n\t\t\tisDraft := v.FieldByName(\"IsDraft\").Interface().(bool)\n\n\t\t\tif isDraft {\n\t\t\t\tb.WriteString(`<div class=\"buttons\"><button class=\"mountable action\" data-action=\"publish\" data-trigger=\"click\">` + utils.Icon(\"share-alt\") + `Publish<\/button><\/div>`)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Delete button\n\t\t_, isDeletable := obj.(api.Deletable)\n\n\t\tif isDeletable && (user.Role == \"editor\" || user.Role == \"admin\") {\n\t\t\treturnPath := \"\"\n\n\t\t\tswitch lowerCaseTypeName {\n\t\t\tcase \"anime\":\n\t\t\t\treturnPath = \"\/explore\"\n\t\t\tcase \"company\":\n\t\t\t\treturnPath = \"\/companies\"\n\t\t\tdefault:\n\t\t\t\treturnPath = \"\/\" + lowerCaseTypeName + \"s\"\n\t\t\t}\n\n\t\t\tb.WriteString(`<button class=\"mountable action\" data-action=\"deleteObject\" data-trigger=\"click\" data-return-path=\"` + returnPath + `\" data-confirm-type=\"` + lowerCaseTypeName + `\">` + utils.Icon(\"trash\") + `Delete<\/button>`)\n\t\t}\n\n\t\tb.WriteString(`<\/div>`)\n\t}\n\n\tb.WriteString(\"<\/div>\")\n\tb.WriteString(\"<\/div>\")\n\n\treturn b.String()\n}\n\n\/\/ RenderObject renders the UI for the object into the bytes buffer and appends an ID prefix for all API requests.\n\/\/ The ID prefix should either be empty or end with a dot character.\nfunc RenderObject(b *bytes.Buffer, obj interface{}, idPrefix string) {\n\tt := reflect.TypeOf(obj)\n\tv := reflect.ValueOf(obj)\n\n\tif t.Kind() == reflect.Ptr {\n\t\tt = t.Elem()\n\t\tv = v.Elem()\n\t}\n\n\t\/\/ Fields\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tfield := t.Field(i)\n\t\tRenderField(b, &v, field, idPrefix)\n\t}\n}\n\n\/\/ RenderField ...\nfunc RenderField(b *bytes.Buffer, v *reflect.Value, field reflect.StructField, idPrefix string) {\n\tfieldValue := reflect.Indirect(v.FieldByName(field.Name))\n\n\t\/\/ Embedded fields\n\tif field.Anonymous {\n\t\tRenderObject(b, fieldValue.Interface(), idPrefix)\n\t\treturn\n\t}\n\n\tif field.Tag.Get(\"editable\") != \"true\" {\n\t\treturn\n\t}\n\n\tb.WriteString(\"<div class='mountable'>\")\n\tdefer b.WriteString(\"<\/div>\")\n\n\tfieldType := field.Type.String()\n\n\t\/\/ String\n\tif fieldType == \"string\" {\n\t\trenderStringField(b, v, field, idPrefix, fieldValue)\n\t\treturn\n\t}\n\n\t\/\/ Int\n\tif fieldType == \"int\" {\n\t\tb.WriteString(components.InputNumber(idPrefix+field.Name, float64(fieldValue.Int()), field.Name, field.Tag.Get(\"tooltip\"), \"\", \"\", \"1\"))\n\t\treturn\n\t}\n\n\t\/\/ Float\n\tif fieldType == \"float64\" {\n\t\tb.WriteString(components.InputNumber(idPrefix+field.Name, fieldValue.Float(), field.Name, field.Tag.Get(\"tooltip\"), \"\", \"\", \"\"))\n\t\treturn\n\t}\n\n\t\/\/ Bool\n\tif fieldType == \"bool\" {\n\t\tif field.Name == \"IsDraft\" {\n\t\t\treturn\n\t\t}\n\n\t\tb.WriteString(components.InputBool(idPrefix+field.Name, fieldValue.Bool(), field.Name, field.Tag.Get(\"tooltip\")))\n\t\treturn\n\t}\n\n\t\/\/ Array of strings\n\tif fieldType == \"[]string\" {\n\t\tb.WriteString(components.InputTags(idPrefix+field.Name, fieldValue.Interface().([]string), field.Name, field.Tag.Get(\"tooltip\")))\n\t\treturn\n\t}\n\n\t\/\/ Any kind of array\n\tif strings.HasPrefix(fieldType, \"[]\") {\n\t\trenderSliceField(b, v, field, idPrefix, fieldType, fieldValue)\n\t\treturn\n\t}\n\n\t\/\/ Any custom field type will be recursively rendered via another RenderObject call\n\tb.WriteString(`<div class=\"widget-section\">`)\n\tb.WriteString(`<h3 class=\"widget-title\">` + field.Name + `<\/h3>`)\n\n\t\/\/ Indent the fields\n\tb.WriteString(`<div class=\"indent\">`)\n\tRenderObject(b, fieldValue.Interface(), field.Name+\".\")\n\tb.WriteString(`<\/div>`)\n\n\tb.WriteString(`<\/div>`)\n}\n\n\/\/ String field\nfunc renderStringField(b *bytes.Buffer, v *reflect.Value, field reflect.StructField, idPrefix string, fieldValue reflect.Value) {\n\tidType := field.Tag.Get(\"idType\")\n\n\t\/\/ Try to infer the ID type by the field name\n\tif idType == \"\" {\n\t\tswitch field.Name {\n\t\tcase \"AnimeID\":\n\t\t\tidType = \"Anime\"\n\n\t\tcase \"CharacterID\":\n\t\t\tidType = \"Character\"\n\t\t}\n\t}\n\n\tshowPreview := idType != \"\" && fieldValue.String() != \"\"\n\n\tif showPreview {\n\t\tb.WriteString(\"<div class='widget-section-with-preview'>\")\n\t}\n\n\t\/\/ Input field\n\tif field.Tag.Get(\"datalist\") != \"\" {\n\t\tdataList := field.Tag.Get(\"datalist\")\n\t\tvalues := arn.DataLists[dataList]\n\t\tb.WriteString(components.InputSelection(idPrefix+field.Name, fieldValue.String(), field.Name, field.Tag.Get(\"tooltip\"), values))\n\t} else if field.Tag.Get(\"type\") == \"textarea\" {\n\t\tb.WriteString(components.InputTextArea(idPrefix+field.Name, fieldValue.String(), field.Name, field.Tag.Get(\"tooltip\")))\n\t} else if field.Tag.Get(\"type\") == \"upload\" {\n\t\tendpoint := field.Tag.Get(\"endpoint\")\n\t\tid := v.FieldByName(\"ID\").String()\n\t\tendpoint = strings.Replace(endpoint, \":id\", id, 1)\n\n\t\tb.WriteString(components.InputFileUpload(idPrefix+field.Name, field.Name, field.Tag.Get(\"filetype\"), endpoint))\n\t} else {\n\t\tb.WriteString(components.InputText(idPrefix+field.Name, fieldValue.String(), field.Name, field.Tag.Get(\"tooltip\")))\n\t}\n\n\tif showPreview {\n\t\tb.WriteString(\"<div class='widget-section-preview'>\")\n\t}\n\n\t\/\/ Preview\n\tswitch idType {\n\tcase \"Anime\":\n\t\tanimeID := fieldValue.String()\n\t\tanime, err := arn.GetAnime(animeID)\n\n\t\tif err == nil {\n\t\t\tb.WriteString(components.EditFormImagePreview(anime.Link(), anime.ImageLink(\"small\"), true))\n\t\t}\n\n\tcase \"Character\":\n\t\tcharacterID := fieldValue.String()\n\t\tcharacter, err := arn.GetCharacter(characterID)\n\n\t\tif err == nil {\n\t\t\tb.WriteString(components.EditFormImagePreview(character.Link(), character.ImageLink(\"medium\"), false))\n\t\t}\n\n\tcase \"\":\n\t\tbreak\n\n\tdefault:\n\t\tfmt.Println(\"Error: Unknown idType tag: \" + idType)\n\t}\n\n\t\/\/ Close preview tags\n\tif showPreview {\n\t\tb.WriteString(\"<\/div><\/div>\")\n\t}\n}\n\n\/\/ Slice field\nfunc renderSliceField(b *bytes.Buffer, v *reflect.Value, field reflect.StructField, idPrefix string, fieldType string, fieldValue reflect.Value) {\n\tb.WriteString(`<div class=\"widget-section\">`)\n\tb.WriteString(`<h3 class=\"widget-title\">`)\n\tb.WriteString(field.Name)\n\tb.WriteString(`<\/h3>`)\n\n\tfor sliceIndex := 0; sliceIndex < fieldValue.Len(); sliceIndex++ {\n\t\tb.WriteString(`<div class=\"widget-section\">`)\n\n\t\tb.WriteString(`<div class=\"widget-title\">`)\n\n\t\t\/\/ Title\n\t\tb.WriteString(strconv.Itoa(sliceIndex+1) + \". \" + field.Name)\n\t\tb.WriteString(`<div class=\"spacer\"><\/div>`)\n\n\t\t\/\/ Remove button\n\t\tb.WriteString(`<button class=\"action\" title=\"Delete this ` + field.Name + `\" data-action=\"arrayRemove\" data-trigger=\"click\" data-field=\"` + field.Name + `\" data-index=\"`)\n\t\tb.WriteString(strconv.Itoa(sliceIndex))\n\t\tb.WriteString(`\">` + utils.RawIcon(\"trash\") + `<\/button>`)\n\n\t\tb.WriteString(`<\/div>`)\n\n\t\tarrayObj := fieldValue.Index(sliceIndex).Interface()\n\t\tarrayIDPrefix := fmt.Sprintf(\"%s[%d].\", field.Name, sliceIndex)\n\t\tRenderObject(b, arrayObj, arrayIDPrefix)\n\n\t\t\/\/ Preview\n\t\t\/\/ elementValue := fieldValue.Index(sliceIndex)\n\t\t\/\/ RenderArrayElement(b, &elementValue)\n\t\tif fieldType == \"[]*arn.ExternalMedia\" {\n\t\t\tb.WriteString(components.ExternalMedia(fieldValue.Index(sliceIndex).Interface().(*arn.ExternalMedia)))\n\t\t}\n\n\t\tb.WriteString(`<\/div>`)\n\t}\n\n\tb.WriteString(`<div class=\"buttons\">`)\n\tb.WriteString(`<button class=\"action\" data-action=\"arrayAppend\" data-trigger=\"click\" data-field=\"` + field.Name + `\">` + utils.Icon(\"plus\") + `Add ` + field.Name + `<\/button>`)\n\tb.WriteString(`<\/div>`)\n\n\tb.WriteString(`<\/div>`)\n}\n\n\/\/ findMainID finds the main ID of the object.\nfunc findMainID(t reflect.Type, v reflect.Value) reflect.Value {\n\tidField := v.FieldByName(\"ID\")\n\n\tif idField.IsValid() {\n\t\treturn reflect.Indirect(idField)\n\t}\n\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tfield := t.Field(i)\n\n\t\tif field.Tag.Get(\"mainID\") == \"true\" {\n\t\t\treturn reflect.Indirect(v.Field(i))\n\t\t}\n\t}\n\n\tpanic(\"Type \" + t.Name() + \" doesn't have a main ID!\")\n}\n<|endoftext|>"} {"text":"<commit_before>package brokers_test\n\nimport (\n\t\"testing\"\n\n\t\"errors\"\n\n\t\"github.com\/RichardKnop\/machinery\/v1\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/brokers\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/config\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/retry\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/sqs\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nvar (\n\ttestBroker brokers.Interface\n\ttestAWSSQSBroker *brokers.AWSSQSBroker\n\terrAWSSQSBroker *brokers.AWSSQSBroker\n\tcnf *config.Config\n\treceiveMessageOutput *sqs.ReceiveMessageOutput\n)\n\nfunc init() {\n\ttestAWSSQSBroker = brokers.TestAWSSQSBroker\n\terrAWSSQSBroker = brokers.ErrAWSSQSBroker\n\tcnf = brokers.TestConf\n\treceiveMessageOutput = brokers.ReceiveMessageOutput\n\ttestBroker = brokers.NewAWSSQSBroker(cnf)\n}\n\nfunc TestNewAWSSQSBroker(t *testing.T) {\n\tassert.IsType(t, testAWSSQSBroker, testBroker)\n}\n\nfunc TestPrivateFunc_continueReceivingMessages(t *testing.T) {\n\tqURL := testAWSSQSBroker.DefaultQueueURLForTest()\n\tdeliveries := make(chan *sqs.ReceiveMessageOutput)\n\tnextStep := make(chan int)\n\tgo func() {\n\t\tstopReceivingChan := testAWSSQSBroker.GetStopReceivingChanForTest()\n\t\tstopReceivingChan <- 1\n\t}()\n\n\tvar (\n\t\twhetherContinue bool\n\t\terr error\n\t)\n\tgo func() {\n\t\twhetherContinue, err = testAWSSQSBroker.ContinueReceivingMessagesForTest(qURL, deliveries)\n\t\tnextStep <- 1\n\t}()\n\tassert.False(t, whetherContinue)\n\tassert.Nil(t, err)\n\n\t<-nextStep\n\twhetherContinue, err = testAWSSQSBroker.ContinueReceivingMessagesForTest(qURL, deliveries)\n\tassert.True(t, whetherContinue)\n\tassert.Nil(t, err)\n\n\twhetherContinue, err = errAWSSQSBroker.ContinueReceivingMessagesForTest(qURL, deliveries)\n\tassert.True(t, whetherContinue)\n\tassert.NotNil(t, err)\n\n\toutputCopy := *receiveMessageOutput\n\treceiveMessageOutput.Messages = []*sqs.Message{}\n\twhetherContinue, err = testAWSSQSBroker.ContinueReceivingMessagesForTest(qURL, deliveries)\n\tassert.True(t, whetherContinue)\n\tassert.Nil(t, err)\n\t\/\/ recover original value\n\t*receiveMessageOutput = outputCopy\n\n}\n\nfunc TestPrivateFunc_consume(t *testing.T) {\n\tserver1, err := machinery.NewServer(cnf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\twk := server1.NewWorker(\"sms_worker\", 0)\n\tdeliveries := make(chan *sqs.ReceiveMessageOutput)\n\toutputCopy := *receiveMessageOutput\n\toutputCopy.Messages = []*sqs.Message{}\n\tgo func() { deliveries <- &outputCopy }()\n\n\t\/\/ an infinite loop will be executed only when there is no error\n\terr = testAWSSQSBroker.ConsumeForTest(deliveries, 0, wk)\n\tassert.NotNil(t, err)\n\n}\n\nfunc TestPrivateFunc_consumeOne(t *testing.T) {\n\tserver1, err := machinery.NewServer(cnf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\twk := server1.NewWorker(\"sms_worker\", 0)\n\terr = testAWSSQSBroker.ConsumeOneForTest(receiveMessageOutput, wk)\n\tassert.Nil(t, err)\n\n\toutputCopy := *receiveMessageOutput\n\toutputCopy.Messages = []*sqs.Message{}\n\terr = testAWSSQSBroker.ConsumeOneForTest(&outputCopy, wk)\n\tassert.NotNil(t, err)\n\n\toutputCopy.Messages = []*sqs.Message{\n\t\t{\n\t\t\tBody: aws.String(\"foo message\"),\n\t\t},\n\t}\n\terr = testAWSSQSBroker.ConsumeOneForTest(&outputCopy, wk)\n\tassert.NotNil(t, err)\n}\n\nfunc TestPrivateFunc_initializePool(t *testing.T) {\n\tconcurrency := 9\n\tpool := make(chan struct{}, concurrency)\n\ttestAWSSQSBroker.InitializePoolForTest(pool, concurrency)\n\tassert.Len(t, pool, concurrency)\n}\n\nfunc TestPrivateFunc_startConsuming(t *testing.T) {\n\tserver1, err := machinery.NewServer(cnf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twk := server1.NewWorker(\"sms_worker\", 0)\n\tretryFunc := testAWSSQSBroker.GetRetryFuncForTest()\n\tstopChan := testAWSSQSBroker.GetStopChanForTest()\n\tretryStopChan := testAWSSQSBroker.GetRetryStopChanForTest()\n\tassert.Nil(t, retryFunc)\n\ttestAWSSQSBroker.StartConsumingForTest(\"fooTag\", wk)\n\tassert.IsType(t, retryFunc, retry.Closure())\n\tassert.Equal(t, len(stopChan), 0)\n\tassert.Equal(t, len(retryStopChan), 0)\n}\n\nfunc TestPrivateFuncDefaultQueueURL(t *testing.T) {\n\tqURL := testAWSSQSBroker.DefaultQueueURLForTest()\n\n\tassert.EqualValues(t, *qURL, \"https:\/\/sqs.foo.amazonaws.com.cn\/test_queue\")\n}\n\nfunc TestPrivateFunc_stopReceiving(t *testing.T) {\n\tgo testAWSSQSBroker.StopReceivingForTest()\n\tstopReceivingChan := testAWSSQSBroker.GetStopReceivingChanForTest()\n\tassert.NotNil(t, <-stopReceivingChan)\n}\n\nfunc TestPrivateFunc_receiveMessage(t *testing.T) {\n\tqURL := testAWSSQSBroker.DefaultQueueURLForTest()\n\toutput, err := testAWSSQSBroker.ReceiveMessageForTest(qURL)\n\tassert.Nil(t, err)\n\tassert.Equal(t, receiveMessageOutput, output)\n}\n\nfunc TestPrivateFunc_consumeDeliveries(t *testing.T) {\n\tconcurrency := 0\n\tpool := make(chan struct{}, concurrency)\n\terrorsChan := make(chan error)\n\tdeliveries := make(chan *sqs.ReceiveMessageOutput)\n\tserver1, err := machinery.NewServer(cnf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\twk := server1.NewWorker(\"sms_worker\", 0)\n\tgo func() { deliveries <- receiveMessageOutput }()\n\twhetherContinue, err := testAWSSQSBroker.ConsumeDeliveriesForTest(deliveries, concurrency, wk, pool, errorsChan)\n\tassert.True(t, whetherContinue)\n\tassert.Nil(t, err)\n\n\tgo func() { errorsChan <- errors.New(\"foo error\") }()\n\twhetherContinue, err = testAWSSQSBroker.ConsumeDeliveriesForTest(deliveries, concurrency, wk, pool, errorsChan)\n\tassert.False(t, whetherContinue)\n\tassert.NotNil(t, err)\n\n\tgo func() { testAWSSQSBroker.GetStopChanForTest() <- 1 }()\n\twhetherContinue, err = testAWSSQSBroker.ConsumeDeliveriesForTest(deliveries, concurrency, wk, pool, errorsChan)\n\tassert.False(t, whetherContinue)\n\tassert.Nil(t, err)\n\n\toutputCopy := *receiveMessageOutput\n\toutputCopy.Messages = []*sqs.Message{}\n\tgo func() { deliveries <- &outputCopy }()\n\twhetherContinue, err = testAWSSQSBroker.ConsumeDeliveriesForTest(deliveries, concurrency, wk, pool, errorsChan)\n\te := <-errorsChan\n\tassert.True(t, whetherContinue)\n\tassert.NotNil(t, e)\n\tassert.Nil(t, err)\n\n\tgo func() { pool <- struct{}{} }()\n\tgo func() { deliveries <- receiveMessageOutput }()\n\twhetherContinue, err = testAWSSQSBroker.ConsumeDeliveriesForTest(deliveries, concurrency, wk, pool, errorsChan)\n\tp := <-pool\n\tassert.True(t, whetherContinue)\n\tassert.NotNil(t, p)\n\tassert.Nil(t, err)\n}\n\nfunc TestPrivateFunc_deleteOne(t *testing.T) {\n\terr := testAWSSQSBroker.DeleteOneForTest(receiveMessageOutput)\n\tassert.Nil(t, err)\n\n\terr = errAWSSQSBroker.DeleteOneForTest(receiveMessageOutput)\n\tassert.NotNil(t, err)\n}\n<commit_msg>add more control on channel flow<commit_after>package brokers_test\n\nimport (\n\t\"testing\"\n\n\t\"errors\"\n\n\t\"github.com\/RichardKnop\/machinery\/v1\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/brokers\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/config\"\n\t\"github.com\/RichardKnop\/machinery\/v1\/retry\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/sqs\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nvar (\n\ttestBroker brokers.Interface\n\ttestAWSSQSBroker *brokers.AWSSQSBroker\n\terrAWSSQSBroker *brokers.AWSSQSBroker\n\tcnf *config.Config\n\treceiveMessageOutput *sqs.ReceiveMessageOutput\n)\n\nfunc init() {\n\ttestAWSSQSBroker = brokers.TestAWSSQSBroker\n\terrAWSSQSBroker = brokers.ErrAWSSQSBroker\n\tcnf = brokers.TestConf\n\treceiveMessageOutput = brokers.ReceiveMessageOutput\n\ttestBroker = brokers.NewAWSSQSBroker(cnf)\n}\n\nfunc TestNewAWSSQSBroker(t *testing.T) {\n\tassert.IsType(t, testAWSSQSBroker, testBroker)\n}\n\nfunc TestPrivateFunc_continueReceivingMessages(t *testing.T) {\n\tqURL := testAWSSQSBroker.DefaultQueueURLForTest()\n\tdeliveries := make(chan *sqs.ReceiveMessageOutput)\n\tfirstStep := make(chan int)\n\tnextStep := make(chan int)\n\tgo func() {\n\t\tstopReceivingChan := testAWSSQSBroker.GetStopReceivingChanForTest()\n\t\tfirstStep <- 1\n\t\tstopReceivingChan <- 1\n\t}()\n\n\tvar (\n\t\twhetherContinue bool\n\t\terr error\n\t)\n\t<-firstStep\n\t\/\/ Test the case that a signal was received from stopReceivingChan\n\tgo func() {\n\t\twhetherContinue, err = testAWSSQSBroker.ContinueReceivingMessagesForTest(qURL, deliveries)\n\t\tnextStep <- 1\n\t}()\n\t<-nextStep\n\tassert.False(t, whetherContinue)\n\tassert.Nil(t, err)\n\n\t\/\/ Test the default condition\n\twhetherContinue, err = testAWSSQSBroker.ContinueReceivingMessagesForTest(qURL, deliveries)\n\tassert.True(t, whetherContinue)\n\tassert.Nil(t, err)\n\n\t\/\/ Test the error\n\twhetherContinue, err = errAWSSQSBroker.ContinueReceivingMessagesForTest(qURL, deliveries)\n\tassert.True(t, whetherContinue)\n\tassert.NotNil(t, err)\n\n\t\/\/ Test when there is no message\n\toutputCopy := *receiveMessageOutput\n\treceiveMessageOutput.Messages = []*sqs.Message{}\n\twhetherContinue, err = testAWSSQSBroker.ContinueReceivingMessagesForTest(qURL, deliveries)\n\tassert.True(t, whetherContinue)\n\tassert.Nil(t, err)\n\t\/\/ recover original value\n\t*receiveMessageOutput = outputCopy\n\n}\n\nfunc TestPrivateFunc_consume(t *testing.T) {\n\tserver1, err := machinery.NewServer(cnf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\twk := server1.NewWorker(\"sms_worker\", 0)\n\tdeliveries := make(chan *sqs.ReceiveMessageOutput)\n\toutputCopy := *receiveMessageOutput\n\toutputCopy.Messages = []*sqs.Message{}\n\tgo func() { deliveries <- &outputCopy }()\n\n\t\/\/ an infinite loop will be executed only when there is no error\n\terr = testAWSSQSBroker.ConsumeForTest(deliveries, 0, wk)\n\tassert.NotNil(t, err)\n\n}\n\nfunc TestPrivateFunc_consumeOne(t *testing.T) {\n\tserver1, err := machinery.NewServer(cnf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\twk := server1.NewWorker(\"sms_worker\", 0)\n\terr = testAWSSQSBroker.ConsumeOneForTest(receiveMessageOutput, wk)\n\tassert.Nil(t, err)\n\n\toutputCopy := *receiveMessageOutput\n\toutputCopy.Messages = []*sqs.Message{}\n\terr = testAWSSQSBroker.ConsumeOneForTest(&outputCopy, wk)\n\tassert.NotNil(t, err)\n\n\toutputCopy.Messages = []*sqs.Message{\n\t\t{\n\t\t\tBody: aws.String(\"foo message\"),\n\t\t},\n\t}\n\terr = testAWSSQSBroker.ConsumeOneForTest(&outputCopy, wk)\n\tassert.NotNil(t, err)\n}\n\nfunc TestPrivateFunc_initializePool(t *testing.T) {\n\tconcurrency := 9\n\tpool := make(chan struct{}, concurrency)\n\ttestAWSSQSBroker.InitializePoolForTest(pool, concurrency)\n\tassert.Len(t, pool, concurrency)\n}\n\nfunc TestPrivateFunc_startConsuming(t *testing.T) {\n\tserver1, err := machinery.NewServer(cnf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\twk := server1.NewWorker(\"sms_worker\", 0)\n\tretryFunc := testAWSSQSBroker.GetRetryFuncForTest()\n\tstopChan := testAWSSQSBroker.GetStopChanForTest()\n\tretryStopChan := testAWSSQSBroker.GetRetryStopChanForTest()\n\tassert.Nil(t, retryFunc)\n\ttestAWSSQSBroker.StartConsumingForTest(\"fooTag\", wk)\n\tassert.IsType(t, retryFunc, retry.Closure())\n\tassert.Equal(t, len(stopChan), 0)\n\tassert.Equal(t, len(retryStopChan), 0)\n}\n\nfunc TestPrivateFuncDefaultQueueURL(t *testing.T) {\n\tqURL := testAWSSQSBroker.DefaultQueueURLForTest()\n\n\tassert.EqualValues(t, *qURL, \"https:\/\/sqs.foo.amazonaws.com.cn\/test_queue\")\n}\n\nfunc TestPrivateFunc_stopReceiving(t *testing.T) {\n\tgo testAWSSQSBroker.StopReceivingForTest()\n\tstopReceivingChan := testAWSSQSBroker.GetStopReceivingChanForTest()\n\tassert.NotNil(t, <-stopReceivingChan)\n}\n\nfunc TestPrivateFunc_receiveMessage(t *testing.T) {\n\tqURL := testAWSSQSBroker.DefaultQueueURLForTest()\n\toutput, err := testAWSSQSBroker.ReceiveMessageForTest(qURL)\n\tassert.Nil(t, err)\n\tassert.Equal(t, receiveMessageOutput, output)\n}\n\nfunc TestPrivateFunc_consumeDeliveries(t *testing.T) {\n\tconcurrency := 0\n\tpool := make(chan struct{}, concurrency)\n\terrorsChan := make(chan error)\n\tdeliveries := make(chan *sqs.ReceiveMessageOutput)\n\tserver1, err := machinery.NewServer(cnf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\twk := server1.NewWorker(\"sms_worker\", 0)\n\tgo func() { deliveries <- receiveMessageOutput }()\n\twhetherContinue, err := testAWSSQSBroker.ConsumeDeliveriesForTest(deliveries, concurrency, wk, pool, errorsChan)\n\tassert.True(t, whetherContinue)\n\tassert.Nil(t, err)\n\n\tgo func() { errorsChan <- errors.New(\"foo error\") }()\n\twhetherContinue, err = testAWSSQSBroker.ConsumeDeliveriesForTest(deliveries, concurrency, wk, pool, errorsChan)\n\tassert.False(t, whetherContinue)\n\tassert.NotNil(t, err)\n\n\tgo func() { testAWSSQSBroker.GetStopChanForTest() <- 1 }()\n\twhetherContinue, err = testAWSSQSBroker.ConsumeDeliveriesForTest(deliveries, concurrency, wk, pool, errorsChan)\n\tassert.False(t, whetherContinue)\n\tassert.Nil(t, err)\n\n\toutputCopy := *receiveMessageOutput\n\toutputCopy.Messages = []*sqs.Message{}\n\tgo func() { deliveries <- &outputCopy }()\n\twhetherContinue, err = testAWSSQSBroker.ConsumeDeliveriesForTest(deliveries, concurrency, wk, pool, errorsChan)\n\te := <-errorsChan\n\tassert.True(t, whetherContinue)\n\tassert.NotNil(t, e)\n\tassert.Nil(t, err)\n\n\tgo func() { pool <- struct{}{} }()\n\tgo func() { deliveries <- receiveMessageOutput }()\n\twhetherContinue, err = testAWSSQSBroker.ConsumeDeliveriesForTest(deliveries, concurrency, wk, pool, errorsChan)\n\tp := <-pool\n\tassert.True(t, whetherContinue)\n\tassert.NotNil(t, p)\n\tassert.Nil(t, err)\n}\n\nfunc TestPrivateFunc_deleteOne(t *testing.T) {\n\terr := testAWSSQSBroker.DeleteOneForTest(receiveMessageOutput)\n\tassert.Nil(t, err)\n\n\terr = errAWSSQSBroker.DeleteOneForTest(receiveMessageOutput)\n\tassert.NotNil(t, err)\n}\n<|endoftext|>"} {"text":"<commit_before>package builder\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/docker\/cli\/cli\"\n\t\"github.com\/docker\/cli\/cli\/command\"\n\t\"github.com\/docker\/cli\/opts\"\n\t\"github.com\/docker\/docker\/api\/types\"\n\tunits \"github.com\/docker\/go-units\"\n\t\"github.com\/spf13\/cobra\"\n)\n\ntype pruneOptions struct {\n\tforce bool\n\tall bool\n\tfilter opts.FilterOpt\n\tkeepStorage opts.MemBytes\n}\n\n\/\/ NewPruneCommand returns a new cobra prune command for images\nfunc NewPruneCommand(dockerCli command.Cli) *cobra.Command {\n\toptions := pruneOptions{filter: opts.NewFilterOpt()}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"prune\",\n\t\tShort: \"Remove build cache\",\n\t\tArgs: cli.NoArgs,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tspaceReclaimed, output, err := runPrune(dockerCli, options)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif output != \"\" {\n\t\t\t\tfmt.Fprintln(dockerCli.Out(), output)\n\t\t\t}\n\t\t\tfmt.Fprintln(dockerCli.Out(), \"Total reclaimed space:\", units.HumanSize(float64(spaceReclaimed)))\n\t\t\treturn nil\n\t\t},\n\t\tAnnotations: map[string]string{\"version\": \"1.39\"},\n\t}\n\n\tflags := cmd.Flags()\n\tflags.BoolVarP(&options.force, \"force\", \"f\", false, \"Do not prompt for confirmation\")\n\tflags.BoolVarP(&options.all, \"all\", \"a\", false, \"Remove all unused images, not just dangling ones\")\n\tflags.Var(&options.filter, \"filter\", \"Provide filter values (e.g. 'max-age=24h')\")\n\tflags.Var(&options.keepStorage, \"keep-storage\", \"Amount of disk space to keep for cache\")\n\n\treturn cmd\n}\n\nconst (\n\tnormalWarning = `WARNING! This will remove all dangling build cache. Are you sure you want to continue?`\n\tallCacheWarning = `WARNING! This will remove all build cache. Are you sure you want to continue?`\n)\n\nfunc runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint64, output string, err error) {\n\tpruneFilters := options.filter.Value()\n\tpruneFilters = command.PruneFilters(dockerCli, pruneFilters)\n\n\twarning := normalWarning\n\tif options.all {\n\t\twarning = allCacheWarning\n\t}\n\tif !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) {\n\t\treturn 0, \"\", nil\n\t}\n\n\treport, err := dockerCli.Client().BuildCachePrune(context.Background(), types.BuildCachePruneOptions{\n\t\tAll: options.all,\n\t\tKeepStorage: options.keepStorage.Value(),\n\t\tFilters: pruneFilters,\n\t})\n\tif err != nil {\n\t\treturn 0, \"\", err\n\t}\n\n\tif len(report.CachesDeleted) > 0 {\n\t\tvar sb strings.Builder\n\t\tsb.WriteString(\"Deleted build cache objects:\\n\")\n\t\tfor _, id := range report.CachesDeleted {\n\t\t\tsb.WriteString(id)\n\t\t\tsb.WriteByte('\\n')\n\t\t}\n\t\toutput = sb.String()\n\t}\n\n\treturn report.SpaceReclaimed, output, nil\n}\n\n\/\/ CachePrune executes a prune command for build cache\nfunc CachePrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) {\n\treturn runPrune(dockerCli, pruneOptions{force: true, all: all, filter: filter})\n}\n<commit_msg>builder\/prune: rename max-age filter to unused-for in help output<commit_after>package builder\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/docker\/cli\/cli\"\n\t\"github.com\/docker\/cli\/cli\/command\"\n\t\"github.com\/docker\/cli\/opts\"\n\t\"github.com\/docker\/docker\/api\/types\"\n\tunits \"github.com\/docker\/go-units\"\n\t\"github.com\/spf13\/cobra\"\n)\n\ntype pruneOptions struct {\n\tforce bool\n\tall bool\n\tfilter opts.FilterOpt\n\tkeepStorage opts.MemBytes\n}\n\n\/\/ NewPruneCommand returns a new cobra prune command for images\nfunc NewPruneCommand(dockerCli command.Cli) *cobra.Command {\n\toptions := pruneOptions{filter: opts.NewFilterOpt()}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"prune\",\n\t\tShort: \"Remove build cache\",\n\t\tArgs: cli.NoArgs,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\tspaceReclaimed, output, err := runPrune(dockerCli, options)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif output != \"\" {\n\t\t\t\tfmt.Fprintln(dockerCli.Out(), output)\n\t\t\t}\n\t\t\tfmt.Fprintln(dockerCli.Out(), \"Total reclaimed space:\", units.HumanSize(float64(spaceReclaimed)))\n\t\t\treturn nil\n\t\t},\n\t\tAnnotations: map[string]string{\"version\": \"1.39\"},\n\t}\n\n\tflags := cmd.Flags()\n\tflags.BoolVarP(&options.force, \"force\", \"f\", false, \"Do not prompt for confirmation\")\n\tflags.BoolVarP(&options.all, \"all\", \"a\", false, \"Remove all unused images, not just dangling ones\")\n\tflags.Var(&options.filter, \"filter\", \"Provide filter values (e.g. 'unused-for=24h')\")\n\tflags.Var(&options.keepStorage, \"keep-storage\", \"Amount of disk space to keep for cache\")\n\n\treturn cmd\n}\n\nconst (\n\tnormalWarning = `WARNING! This will remove all dangling build cache. Are you sure you want to continue?`\n\tallCacheWarning = `WARNING! This will remove all build cache. Are you sure you want to continue?`\n)\n\nfunc runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint64, output string, err error) {\n\tpruneFilters := options.filter.Value()\n\tpruneFilters = command.PruneFilters(dockerCli, pruneFilters)\n\n\twarning := normalWarning\n\tif options.all {\n\t\twarning = allCacheWarning\n\t}\n\tif !options.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), warning) {\n\t\treturn 0, \"\", nil\n\t}\n\n\treport, err := dockerCli.Client().BuildCachePrune(context.Background(), types.BuildCachePruneOptions{\n\t\tAll: options.all,\n\t\tKeepStorage: options.keepStorage.Value(),\n\t\tFilters: pruneFilters,\n\t})\n\tif err != nil {\n\t\treturn 0, \"\", err\n\t}\n\n\tif len(report.CachesDeleted) > 0 {\n\t\tvar sb strings.Builder\n\t\tsb.WriteString(\"Deleted build cache objects:\\n\")\n\t\tfor _, id := range report.CachesDeleted {\n\t\t\tsb.WriteString(id)\n\t\t\tsb.WriteByte('\\n')\n\t\t}\n\t\toutput = sb.String()\n\t}\n\n\treturn report.SpaceReclaimed, output, nil\n}\n\n\/\/ CachePrune executes a prune command for build cache\nfunc CachePrune(dockerCli command.Cli, all bool, filter opts.FilterOpt) (uint64, string, error) {\n\treturn runPrune(dockerCli, pruneOptions{force: true, all: all, filter: filter})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/ This file is available under the Apache license.\n\npackage exporter\n\nimport (\n\t\"expvar\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com\/google\/mtail\/metrics\"\n)\n\nvar (\n\tstatsdHostPort = flag.String(\"statsd_hostport\", \"\",\n\t\t\"Host:port to statsd server to write metrics to.\")\n\n\tstatsdExportTotal = expvar.NewInt(\"statsd_export_total\")\n\tstatsdExportSuccess = expvar.NewInt(\"statsd_export_success\")\n)\n\nfunc metricToStatsd(hostname string, m *metrics.Metric, l *metrics.LabelSet) string {\n\t\/\/ TODO(jaq): handle units better, send timing as |ms\n\tm.RLock()\n\tdefer m.RUnlock()\n\treturn fmt.Sprintf(\"%s.%s:%d|c\",\n\t\tm.Program,\n\t\tformatLabels(m.Name, l.Labels, \".\", \".\"),\n\t\tl.Datum.Get())\n}\n<commit_msg>Add a todo for statsd gauges.<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/ This file is available under the Apache license.\n\npackage exporter\n\nimport (\n\t\"expvar\"\n\t\"flag\"\n\t\"fmt\"\n\n\t\"github.com\/google\/mtail\/metrics\"\n)\n\nvar (\n\tstatsdHostPort = flag.String(\"statsd_hostport\", \"\",\n\t\t\"Host:port to statsd server to write metrics to.\")\n\n\tstatsdExportTotal = expvar.NewInt(\"statsd_export_total\")\n\tstatsdExportSuccess = expvar.NewInt(\"statsd_export_success\")\n)\n\nfunc metricToStatsd(hostname string, m *metrics.Metric, l *metrics.LabelSet) string {\n\t\/\/ TODO(jaq): handle units better, send timing as |ms\n\tm.RLock()\n\tdefer m.RUnlock()\n\t\/\/ TODO(jaq): handle gauge types\n\treturn fmt.Sprintf(\"%s.%s:%d|c\",\n\t\tm.Program,\n\t\tformatLabels(m.Name, l.Labels, \".\", \".\"),\n\t\tl.Datum.Get())\n}\n<|endoftext|>"} {"text":"<commit_before>package berlingo\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc do(ai AI, r io.Reader) (response *Response, response_json []byte, err error) {\n\n\tgame, err := NewGame(ai, r)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tgame.Do()\n\n\tresponse_json, err = game.Response.ToJson()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn response, response_json, nil\n}\n\n\/\/ Callback used to process an incoming HTTP request\nfunc serveHttpRequest(ai AI, w http.ResponseWriter, r *http.Request) {\n\n\tlog.Printf(\"HTTP: [%v] Processing %v %v\", r.RemoteAddr, r.Method, r.RequestURI)\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\n\tvar input io.Reader\n\tcontent_type := r.Header.Get(\"Content-Type\")\n\tswitch {\n\tcase r.Method == \"POST\" && content_type == \"application\/json\":\n\t\tinput = r.Body\n\tcase r.Method == \"POST\" && content_type == \"application\/x-www-form-urlencoded\":\n\t\t\/\/ Detect & work-around bug https:\/\/github.com\/thirdside\/berlin-ai\/issues\/4\n\t\tr.ParseForm()\n\t\tj := `{\n\t\t\t\t\"action\": \"` + r.Form[\"action\"][0] + `\",\n\t\t\t\t\"infos\": ` + r.Form[\"infos\"][0] + `,\n\t\t\t\t\"map\": ` + r.Form[\"map\"][0] + `,\n\t\t\t\t\"state\": ` + r.Form[\"state\"][0] + `\n\t\t\t}`\n\t\tinput = strings.NewReader(j)\n\tdefault:\n\t\tlog.Printf(\"HTTP: Replying with error\")\n\t\tw.Write([]byte(`{\"error\": \"Invalid request\"}`))\n\t\treturn\n\t}\n\n\t_, response_json, err := do(ai, input)\n\tif err != nil {\n\t\tlog.Printf(\"HTTP: Responding with error: %+v\\n\", err)\n\t\tw.Write([]byte(\"Error\"))\n\t} else {\n\t\tlog.Printf(\"HTTP: Responding with moves\\n\")\n\t\tw.Write(response_json)\n\t}\n\n}\n\n\/\/ ServeHttp serves the given AI over HTTP on the given port\nfunc ServeHttp(ai AI, port string) {\n\n\tlog.Println(\"Starting HTTP server on port\", port)\n\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tserveHttpRequest(ai, w, r)\n\t})\n\n\terr := http.ListenAndServe(\":\"+port, nil)\n\tif err != nil {\n\t\tlog.Println(\"HTTP Serving Error:\", err)\n\t}\n\n}\n\n\/\/ ServeHttp serves the given AI a single time\n\/\/ Request is read from the given filename\n\/\/ filename may be supplied as \"-\" to indicate STDIN\nfunc ServeFile(ai AI, filename string) {\n\n\tvar fh *os.File\n\tvar err error\n\n\tif filename == \"-\" {\n\t\tfh = os.Stdin\n\t} else {\n\t\tfh, err = os.Open(filename)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error opening\", filename, \": \", err)\n\t\t\treturn\n\t\t}\n\t\tdefer fh.Close()\n\t}\n\n\t_, response_json, err := do(ai, fh)\n\tif err != nil {\n\t\tlog.Println(\"Error processing request:\", err)\n\t\treturn\n\t}\n\tos.Stdout.Write(response_json)\n}\n\n\/\/ Serve will inspect the CLI arguments and automatically call either ServeHttp or ServeFile\nfunc Serve(ai AI) {\n\n\tport_or_filename := \"-\"\n\tif len(os.Args) >= 2 {\n\t\tport_or_filename = os.Args[1]\n\t}\n\n\t_, err := strconv.Atoi(port_or_filename)\n\tif err == nil {\n\t\tServeHttp(ai, port_or_filename)\n\t} else {\n\t\tServeFile(ai, port_or_filename)\n\t}\n}\n<commit_msg>Adding InitAppEngine<commit_after>package berlingo\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc do(ai AI, r io.Reader) (response *Response, response_json []byte, err error) {\n\n\tgame, err := NewGame(ai, r)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tgame.Do()\n\n\tresponse_json, err = game.Response.ToJson()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn response, response_json, nil\n}\n\n\/\/ Callback used to process an incoming HTTP request\nfunc serveHttpRequest(ai AI, w http.ResponseWriter, r *http.Request) {\n\n\tlog.Printf(\"HTTP: [%v] Processing %v %v\", r.RemoteAddr, r.Method, r.RequestURI)\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\n\tvar input io.Reader\n\tcontent_type := r.Header.Get(\"Content-Type\")\n\tswitch {\n\tcase r.Method == \"POST\" && content_type == \"application\/json\":\n\t\tinput = r.Body\n\tcase r.Method == \"POST\" && content_type == \"application\/x-www-form-urlencoded\":\n\t\t\/\/ Detect & work-around bug https:\/\/github.com\/thirdside\/berlin-ai\/issues\/4\n\t\tr.ParseForm()\n\t\tj := `{\n\t\t\t\t\"action\": \"` + r.Form[\"action\"][0] + `\",\n\t\t\t\t\"infos\": ` + r.Form[\"infos\"][0] + `,\n\t\t\t\t\"map\": ` + r.Form[\"map\"][0] + `,\n\t\t\t\t\"state\": ` + r.Form[\"state\"][0] + `\n\t\t\t}`\n\t\tinput = strings.NewReader(j)\n\tdefault:\n\t\tlog.Printf(\"HTTP: Replying with error\")\n\t\tw.Write([]byte(`{\"error\": \"Invalid request\"}`))\n\t\treturn\n\t}\n\n\t_, response_json, err := do(ai, input)\n\tif err != nil {\n\t\tlog.Printf(\"HTTP: Responding with error: %+v\\n\", err)\n\t\tw.Write([]byte(\"Error\"))\n\t} else {\n\t\tlog.Printf(\"HTTP: Responding with moves\\n\")\n\t\tw.Write(response_json)\n\t}\n\n}\n\nfunc InitAppEngine(ai AI) {\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tserveHttpRequest(ai, w, r)\n\t})\n}\n\n\/\/ ServeHttp serves the given AI over HTTP on the given port\nfunc ServeHttp(ai AI, port string) {\n\n\tlog.Println(\"Starting HTTP server on port\", port)\n\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tserveHttpRequest(ai, w, r)\n\t})\n\n\terr := http.ListenAndServe(\":\"+port, nil)\n\tif err != nil {\n\t\tlog.Println(\"HTTP Serving Error:\", err)\n\t}\n\n}\n\n\/\/ ServeHttp serves the given AI a single time\n\/\/ Request is read from the given filename\n\/\/ filename may be supplied as \"-\" to indicate STDIN\nfunc ServeFile(ai AI, filename string) {\n\n\tvar fh *os.File\n\tvar err error\n\n\tif filename == \"-\" {\n\t\tfh = os.Stdin\n\t} else {\n\t\tfh, err = os.Open(filename)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error opening\", filename, \": \", err)\n\t\t\treturn\n\t\t}\n\t\tdefer fh.Close()\n\t}\n\n\t_, response_json, err := do(ai, fh)\n\tif err != nil {\n\t\tlog.Println(\"Error processing request:\", err)\n\t\treturn\n\t}\n\tos.Stdout.Write(response_json)\n}\n\n\/\/ Serve will inspect the CLI arguments and automatically call either ServeHttp or ServeFile\nfunc Serve(ai AI) {\n\n\tport_or_filename := \"-\"\n\tif len(os.Args) >= 2 {\n\t\tport_or_filename = os.Args[1]\n\t}\n\n\t_, err := strconv.Atoi(port_or_filename)\n\tif err == nil {\n\t\tServeHttp(ai, port_or_filename)\n\t} else {\n\t\tServeFile(ai, port_or_filename)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package rfm69\n\nimport (\n\t\"bytes\"\n\t\"time\"\n\n\t\"github.com\/ecc1\/gpio\"\n\t\"github.com\/ecc1\/medtronic\/radio\"\n)\n\nconst (\n\tspiSpeed = 10000000 \/\/ Hz\n\tresetPin = 12 \/\/ Intel Edison GPIO for hardware reset\n\thwVersion = 0x0204\n)\n\ntype flavor struct{}\n\nfunc (hw flavor) Name() string {\n\treturn \"RFM69HCW\"\n}\n\nfunc (hw flavor) Speed() int {\n\treturn spiSpeed\n}\n\nfunc (hw flavor) ReadSingleAddress(addr byte) byte {\n\treturn addr\n}\n\nfunc (hw flavor) ReadBurstAddress(addr byte) byte {\n\treturn addr\n}\n\nfunc (hw flavor) WriteSingleAddress(addr byte) byte {\n\treturn SpiWriteMode | addr\n}\n\nfunc (hw flavor) WriteBurstAddress(addr byte) byte {\n\treturn SpiWriteMode | addr\n}\n\ntype Radio struct {\n\thw *radio.Hardware\n\tresetPin gpio.OutputPin\n\treceiveBuffer bytes.Buffer\n\tstats radio.Statistics\n\terr error\n}\n\nfunc Open() radio.Interface {\n\tr := &Radio{hw: radio.Open(flavor{})}\n\tv := r.Version()\n\tif r.Error() != nil {\n\t\treturn r\n\t}\n\tif v != hwVersion {\n\t\tr.hw.Close()\n\t\tr.SetError(radio.HardwareVersionError{Actual: v, Expected: hwVersion})\n\t\treturn r\n\t}\n\tr.resetPin, r.err = gpio.Output(resetPin, false)\n\tif r.Error() != nil {\n\t\tr.hw.Close()\n\t\treturn r\n\t}\n\treturn r\n}\n\nfunc (r *Radio) Version() uint16 {\n\tv := r.hw.ReadRegister(RegVersion)\n\treturn uint16(v>>4)<<8 | uint16(v&0xF)\n}\n\n\/\/ Reset module. See section 7.2.2 of data sheet.\nfunc (r *Radio) Reset() {\n\tif r.Error() != nil {\n\t\treturn\n\t}\n\tr.err = r.resetPin.Write(true)\n\tif r.Error() != nil {\n\t\tr.resetPin.Write(false)\n\t\treturn\n\t}\n\ttime.Sleep(100 * time.Microsecond)\n\tr.err = r.resetPin.Write(false)\n\tif r.Error() != nil {\n\t\treturn\n\t}\n\ttime.Sleep(5 * time.Millisecond)\n}\n\nfunc (r *Radio) Init(frequency uint32) {\n\tr.Reset()\n\tr.InitRF(frequency)\n}\n\nfunc (r *Radio) Statistics() radio.Statistics {\n\treturn r.stats\n}\n\nfunc (r *Radio) Error() error {\n\terr := r.hw.Error()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn r.err\n}\n\nfunc (r *Radio) SetError(err error) {\n\tr.hw.SetError(err)\n\tr.err = err\n}\n\nfunc (r *Radio) Hardware() *radio.Hardware {\n\treturn r.hw\n}\n<commit_msg>Lower SPI speed to avoid errors on Edison 3.5 kernel<commit_after>package rfm69\n\nimport (\n\t\"bytes\"\n\t\"time\"\n\n\t\"github.com\/ecc1\/gpio\"\n\t\"github.com\/ecc1\/medtronic\/radio\"\n)\n\nconst (\n\tspiSpeed = 6000000 \/\/ Hz\n\tresetPin = 12 \/\/ Intel Edison GPIO for hardware reset\n\thwVersion = 0x0204\n)\n\ntype flavor struct{}\n\nfunc (hw flavor) Name() string {\n\treturn \"RFM69HCW\"\n}\n\nfunc (hw flavor) Speed() int {\n\treturn spiSpeed\n}\n\nfunc (hw flavor) ReadSingleAddress(addr byte) byte {\n\treturn addr\n}\n\nfunc (hw flavor) ReadBurstAddress(addr byte) byte {\n\treturn addr\n}\n\nfunc (hw flavor) WriteSingleAddress(addr byte) byte {\n\treturn SpiWriteMode | addr\n}\n\nfunc (hw flavor) WriteBurstAddress(addr byte) byte {\n\treturn SpiWriteMode | addr\n}\n\ntype Radio struct {\n\thw *radio.Hardware\n\tresetPin gpio.OutputPin\n\treceiveBuffer bytes.Buffer\n\tstats radio.Statistics\n\terr error\n}\n\nfunc Open() radio.Interface {\n\tr := &Radio{hw: radio.Open(flavor{})}\n\tv := r.Version()\n\tif r.Error() != nil {\n\t\treturn r\n\t}\n\tif v != hwVersion {\n\t\tr.hw.Close()\n\t\tr.SetError(radio.HardwareVersionError{Actual: v, Expected: hwVersion})\n\t\treturn r\n\t}\n\tr.resetPin, r.err = gpio.Output(resetPin, false)\n\tif r.Error() != nil {\n\t\tr.hw.Close()\n\t\treturn r\n\t}\n\treturn r\n}\n\nfunc (r *Radio) Version() uint16 {\n\tv := r.hw.ReadRegister(RegVersion)\n\treturn uint16(v>>4)<<8 | uint16(v&0xF)\n}\n\n\/\/ Reset module. See section 7.2.2 of data sheet.\nfunc (r *Radio) Reset() {\n\tif r.Error() != nil {\n\t\treturn\n\t}\n\tr.err = r.resetPin.Write(true)\n\tif r.Error() != nil {\n\t\tr.resetPin.Write(false)\n\t\treturn\n\t}\n\ttime.Sleep(100 * time.Microsecond)\n\tr.err = r.resetPin.Write(false)\n\tif r.Error() != nil {\n\t\treturn\n\t}\n\ttime.Sleep(5 * time.Millisecond)\n}\n\nfunc (r *Radio) Init(frequency uint32) {\n\tr.Reset()\n\tr.InitRF(frequency)\n}\n\nfunc (r *Radio) Statistics() radio.Statistics {\n\treturn r.stats\n}\n\nfunc (r *Radio) Error() error {\n\terr := r.hw.Error()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn r.err\n}\n\nfunc (r *Radio) SetError(err error) {\n\tr.hw.SetError(err)\n\tr.err = err\n}\n\nfunc (r *Radio) Hardware() *radio.Hardware {\n\treturn r.hw\n}\n<|endoftext|>"} {"text":"<commit_before>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/guardduty\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n)\n\nfunc resourceAwsGuardDutyMember() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsGuardDutyMemberCreate,\n\t\tRead: resourceAwsGuardDutyMemberRead,\n\t\tDelete: resourceAwsGuardDutyMemberDelete,\n\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: schema.ImportStatePassthrough,\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"account_id\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validateAwsAccountId,\n\t\t\t},\n\t\t\t\"detector_id\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"email\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"invite\": {\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tRequired: false,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceAwsGuardDutyMemberCreate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).guarddutyconn\n\taccountID := d.Get(\"account_id\").(string)\n\tdetectorID := d.Get(\"detector_id\").(string)\n\n\tinput := guardduty.CreateMembersInput{\n\t\tAccountDetails: []*guardduty.AccountDetail{{\n\t\t\tAccountId: aws.String(accountID),\n\t\t\tEmail: aws.String(d.Get(\"email\").(string)),\n\t\t}},\n\t\tDetectorId: aws.String(detectorID),\n\t}\n\n\tlog.Printf(\"[DEBUG] Creating GuardDuty Member: %s\", input)\n\t_, err := conn.CreateMembers(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Creating GuardDuty Member failed: %s\", err.Error())\n\t}\n\td.SetId(fmt.Sprintf(\"%s:%s\", detectorID, accountID))\n\n\n\timi := &guardduty.InviteMembersInput{\n\t\tDetectorId: &detectorID,\n\t\tAccountIds: []*string{&accountID},\n\t\tMessage: aws.String(d.Get(\"message\").(string)),\n\t}\n\n\t_, err = conn.InviteMembers(imi)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Inviting GuardDuty Member failed: %s\", err.Error())\n\t}\n\n\treturn resourceAwsGuardDutyMemberRead(d, meta)\n}\n\nfunc resourceAwsGuardDutyMemberRead(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).guarddutyconn\n\n\taccountID, detectorID, err := decodeGuardDutyMemberID(d.Id())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinput := guardduty.GetMembersInput{\n\t\tAccountIds: []*string{aws.String(accountID)},\n\t\tDetectorId: aws.String(detectorID),\n\t}\n\n\tlog.Printf(\"[DEBUG] Reading GuardDuty Member: %s\", input)\n\tgmo, err := conn.GetMembers(&input)\n\tif err != nil {\n\t\tif isAWSErr(err, guardduty.ErrCodeBadRequestException, \"The request is rejected because the input detectorId is not owned by the current account.\") {\n\t\t\tlog.Printf(\"[WARN] GuardDuty detector %q not found, removing from state\", d.Id())\n\t\t\td.SetId(\"\")\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"Reading GuardDuty Member '%s' failed: %s\", d.Id(), err.Error())\n\t}\n\n\tif gmo.Members == nil || (len(gmo.Members) < 1) {\n\t\tlog.Printf(\"[WARN] GuardDuty Member %q not found, removing from state\", d.Id())\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\tmember := gmo.Members[0]\n\td.Set(\"account_id\", member.AccountId)\n\td.Set(\"detector_id\", detectorID)\n\td.Set(\"email\", member.Email)\n\td.Set(\"invite\", member.RelationshipStatus == aws.String(\"INVITED\"))\n\n\treturn nil\n}\n\nfunc resourceAwsGuardDutyMemberDelete(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).guarddutyconn\n\n\taccountID, detectorID, err := decodeGuardDutyMemberID(d.Id())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinput := guardduty.DeleteMembersInput{\n\t\tAccountIds: []*string{aws.String(accountID)},\n\t\tDetectorId: aws.String(detectorID),\n\t}\n\n\tlog.Printf(\"[DEBUG] Delete GuardDuty Member: %s\", input)\n\t_, err = conn.DeleteMembers(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Deleting GuardDuty Member '%s' failed: %s\", d.Id(), err.Error())\n\t}\n\treturn nil\n}\n\nfunc decodeGuardDutyMemberID(id string) (accountID, detectorID string, err error) {\n\tparts := strings.Split(id, \":\")\n\tif len(parts) != 2 {\n\t\terr = fmt.Errorf(\"GuardDuty Member ID must be of the form <Detector ID>:<Member AWS Account ID>, was provided: %s\", id)\n\t\treturn\n\t}\n\taccountID = parts[1]\n\tdetectorID = parts[0]\n\treturn\n}\n<commit_msg>Add lines to wait for email verification progress finishes<commit_after>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/guardduty\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"time\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n)\n\nfunc resourceAwsGuardDutyMember() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsGuardDutyMemberCreate,\n\t\tRead: resourceAwsGuardDutyMemberRead,\n\t\tDelete: resourceAwsGuardDutyMemberDelete,\n\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: schema.ImportStatePassthrough,\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"account_id\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validateAwsAccountId,\n\t\t\t},\n\t\t\t\"detector_id\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"email\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"relationshipStatus\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"invite\": {\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tDescription: \"Indicate whether to invite the account\",\n\t\t\t\tDefault: true,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"invitation_message\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t},\n\t\tTimeouts: &schema.ResourceTimeout{\n\t\t\tCreate: schema.DefaultTimeout(1 * time.Minute),\n\t\t},\n\t}\n}\n\nfunc resourceAwsGuardDutyMemberCreate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).guarddutyconn\n\taccountID := d.Get(\"account_id\").(string)\n\tdetectorID := d.Get(\"detector_id\").(string)\n\n\tinput := guardduty.CreateMembersInput{\n\t\tAccountDetails: []*guardduty.AccountDetail{{\n\t\t\tAccountId: aws.String(accountID),\n\t\t\tEmail: aws.String(d.Get(\"email\").(string)),\n\t\t}},\n\t\tDetectorId: aws.String(detectorID),\n\t}\n\n\tlog.Printf(\"[DEBUG] Creating GuardDuty Member: %s\", input)\n\t_, err := conn.CreateMembers(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Creating GuardDuty Member failed: %s\", err.Error())\n\t}\n\n\td.SetId(fmt.Sprintf(\"%s:%s\", detectorID, accountID))\n\n\tif !d.Get(\"invite\").(bool) {\n\t\treturn resourceAwsGuardDutyMemberRead(d, meta)\n\t}\n\n\timi := &guardduty.InviteMembersInput{\n\t\tDetectorId: &detectorID,\n\t\tAccountIds: []*string{&accountID},\n\t\tMessage: aws.String(d.Get(\"message\").(string)),\n\t}\n\n\t_, err = conn.InviteMembers(imi)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Inviting GuardDuty Member failed: %s\", err.Error())\n\t}\n\n\t\/\/ wait until e-mail verification finishes\n\treturn resource.Retry(d.Timeout(schema.TimeoutCreate), func() *resource.RetryError {\n\t\tif d.Get(\"relationshipStatus\").(string) != \"EmailVerificationInProgress\" {\n\t\t\treturn nil\n\t\t}\n\n\t\tlog.Printf(\"[INFO] Email verification for %s is still in progress\", accountID)\n\t\treturn resource.NonRetryableError(resourceAwsGuardDutyMemberRead(d, meta))\n\t})\n}\n\nfunc resourceAwsGuardDutyMemberRead(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).guarddutyconn\n\n\taccountID, detectorID, err := decodeGuardDutyMemberID(d.Id())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinput := guardduty.GetMembersInput{\n\t\tAccountIds: []*string{aws.String(accountID)},\n\t\tDetectorId: aws.String(detectorID),\n\t}\n\n\tlog.Printf(\"[DEBUG] Reading GuardDuty Member: %s\", input)\n\tgmo, err := conn.GetMembers(&input)\n\tif err != nil {\n\t\tif isAWSErr(err, guardduty.ErrCodeBadRequestException, \"The request is rejected because the input detectorId is not owned by the current account.\") {\n\t\t\tlog.Printf(\"[WARN] GuardDuty detector %q not found, removing from state\", d.Id())\n\t\t\td.SetId(\"\")\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"Reading GuardDuty Member '%s' failed: %s\", d.Id(), err.Error())\n\t}\n\n\tif gmo.Members == nil || (len(gmo.Members) < 1) {\n\t\tlog.Printf(\"[WARN] GuardDuty Member %q not found, removing from state\", d.Id())\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\n\tmember := gmo.Members[0]\n\td.Set(\"account_id\", member.AccountId)\n\td.Set(\"detector_id\", detectorID)\n\td.Set(\"email\", member.Email)\n\td.Set(\"relationshipStatus\", member.RelationshipStatus)\n\n\treturn nil\n}\n\nfunc resourceAwsGuardDutyMemberDelete(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).guarddutyconn\n\n\taccountID, detectorID, err := decodeGuardDutyMemberID(d.Id())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinput := guardduty.DeleteMembersInput{\n\t\tAccountIds: []*string{aws.String(accountID)},\n\t\tDetectorId: aws.String(detectorID),\n\t}\n\n\tlog.Printf(\"[DEBUG] Delete GuardDuty Member: %s\", input)\n\t_, err = conn.DeleteMembers(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Deleting GuardDuty Member '%s' failed: %s\", d.Id(), err.Error())\n\t}\n\treturn nil\n}\n\nfunc decodeGuardDutyMemberID(id string) (accountID, detectorID string, err error) {\n\tparts := strings.Split(id, \":\")\n\tif len(parts) != 2 {\n\t\terr = fmt.Errorf(\"GuardDuty Member ID must be of the form <Detector ID>:<Member AWS Account ID>, was provided: %s\", id)\n\t\treturn\n\t}\n\taccountID = parts[1]\n\tdetectorID = parts[0]\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package aws\n\nimport (\n\t\"context\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/mq\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/helper\/schema\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/helper\/validation\"\n\t\"github.com\/terraform-providers\/terraform-provider-aws\/aws\/internal\/keyvaluetags\"\n)\n\nfunc resourceAwsMqConfiguration() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsMqConfigurationCreate,\n\t\tRead: resourceAwsMqConfigurationRead,\n\t\tUpdate: resourceAwsMqConfigurationUpdate,\n\t\tDelete: resourceAwsMqConfigurationDelete,\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: schema.ImportStatePassthrough,\n\t\t},\n\t\tCustomizeDiff: func(_ context.Context, diff *schema.ResourceDiff, v interface{}) error {\n\t\t\tif diff.HasChange(\"description\") {\n\t\t\t\treturn diff.SetNewComputed(\"latest_revision\")\n\t\t\t}\n\t\t\tif diff.HasChange(\"data\") {\n\t\t\t\to, n := diff.GetChange(\"data\")\n\t\t\t\tos := o.(string)\n\t\t\t\tns := n.(string)\n\t\t\t\tif !suppressXMLEquivalentConfig(\"data\", os, ns, nil) {\n\t\t\t\t\treturn diff.SetNewComputed(\"latest_revision\")\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"arn\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"data\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tDiffSuppressFunc: suppressXMLEquivalentConfig,\n\t\t\t},\n\t\t\t\"description\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"engine_type\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validation.StringInSlice([]string{\n\t\t\t\t\tmq.EngineTypeActivemq,\n\t\t\t\t}, true),\n\t\t\t},\n\t\t\t\"engine_version\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"name\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"latest_revision\": {\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"tags\": tagsSchema(),\n\t\t},\n\t}\n}\n\nfunc resourceAwsMqConfigurationCreate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).mqconn\n\n\tinput := mq.CreateConfigurationRequest{\n\t\tEngineType: aws.String(d.Get(\"engine_type\").(string)),\n\t\tEngineVersion: aws.String(d.Get(\"engine_version\").(string)),\n\t\tName: aws.String(d.Get(\"name\").(string)),\n\t}\n\n\tif v, ok := d.GetOk(\"tags\"); ok {\n\t\tinput.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().MqTags()\n\t}\n\n\tlog.Printf(\"[INFO] Creating MQ Configuration: %s\", input)\n\tout, err := conn.CreateConfiguration(&input)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.SetId(*out.Id)\n\td.Set(\"arn\", out.Arn)\n\n\treturn resourceAwsMqConfigurationUpdate(d, meta)\n}\n\nfunc resourceAwsMqConfigurationRead(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).mqconn\n\tignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig\n\n\tlog.Printf(\"[INFO] Reading MQ Configuration %s\", d.Id())\n\tout, err := conn.DescribeConfiguration(&mq.DescribeConfigurationInput{\n\t\tConfigurationId: aws.String(d.Id()),\n\t})\n\tif err != nil {\n\t\tif isAWSErr(err, mq.ErrCodeNotFoundException, \"\") {\n\t\t\tlog.Printf(\"[WARN] MQ Configuration %q not found, removing from state\", d.Id())\n\t\t\td.SetId(\"\")\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\td.Set(\"arn\", out.Arn)\n\td.Set(\"description\", out.LatestRevision.Description)\n\td.Set(\"engine_type\", out.EngineType)\n\td.Set(\"engine_version\", out.EngineVersion)\n\td.Set(\"name\", out.Name)\n\td.Set(\"latest_revision\", out.LatestRevision.Revision)\n\n\trOut, err := conn.DescribeConfigurationRevision(&mq.DescribeConfigurationRevisionInput{\n\t\tConfigurationId: aws.String(d.Id()),\n\t\tConfigurationRevision: aws.String(fmt.Sprintf(\"%d\", *out.LatestRevision.Revision)),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb, err := base64.StdEncoding.DecodeString(*rOut.Data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.Set(\"data\", string(b))\n\n\tif err := d.Set(\"tags\", keyvaluetags.MqKeyValueTags(out.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil {\n\t\treturn fmt.Errorf(\"error setting tags: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc resourceAwsMqConfigurationUpdate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).mqconn\n\n\trawData := d.Get(\"data\").(string)\n\tdata := base64.StdEncoding.EncodeToString([]byte(rawData))\n\n\tinput := mq.UpdateConfigurationRequest{\n\t\tConfigurationId: aws.String(d.Id()),\n\t\tData: aws.String(data),\n\t}\n\tif v, ok := d.GetOk(\"description\"); ok {\n\t\tinput.Description = aws.String(v.(string))\n\t}\n\n\tlog.Printf(\"[INFO] Updating MQ Configuration %s: %s\", d.Id(), input)\n\t_, err := conn.UpdateConfiguration(&input)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif d.HasChange(\"tags\") {\n\t\to, n := d.GetChange(\"tags\")\n\n\t\tif err := keyvaluetags.MqUpdateTags(conn, d.Get(\"arn\").(string), o, n); err != nil {\n\t\t\treturn fmt.Errorf(\"error updating MQ Broker (%s) tags: %s\", d.Get(\"arn\").(string), err)\n\t\t}\n\t}\n\n\treturn resourceAwsMqConfigurationRead(d, meta)\n}\n\nfunc resourceAwsMqConfigurationDelete(d *schema.ResourceData, meta interface{}) error {\n\t\/\/ TODO: Delete is not available in the API\n\n\treturn nil\n}\n\nfunc suppressXMLEquivalentConfig(k, old, new string, d *schema.ResourceData) bool {\n\tos, err := canonicalXML(old)\n\tif err != nil {\n\t\tlog.Printf(\"[ERR] Error getting cannonicalXML from state (%s): %s\", k, err)\n\t\treturn false\n\t}\n\tns, err := canonicalXML(new)\n\tif err != nil {\n\t\tlog.Printf(\"[ERR] Error getting cannonicalXML from config (%s): %s\", k, err)\n\t\treturn false\n\t}\n\n\treturn os == ns\n}\n<commit_msg>resource\/aws_mq_configuration: Don't update the configuration if only tags change. (#14850)<commit_after>package aws\n\nimport (\n\t\"context\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/mq\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/helper\/schema\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/helper\/validation\"\n\t\"github.com\/terraform-providers\/terraform-provider-aws\/aws\/internal\/keyvaluetags\"\n)\n\nfunc resourceAwsMqConfiguration() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsMqConfigurationCreate,\n\t\tRead: resourceAwsMqConfigurationRead,\n\t\tUpdate: resourceAwsMqConfigurationUpdate,\n\t\tDelete: resourceAwsMqConfigurationDelete,\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: schema.ImportStatePassthrough,\n\t\t},\n\t\tCustomizeDiff: func(_ context.Context, diff *schema.ResourceDiff, v interface{}) error {\n\t\t\tif diff.HasChange(\"description\") {\n\t\t\t\treturn diff.SetNewComputed(\"latest_revision\")\n\t\t\t}\n\t\t\tif diff.HasChange(\"data\") {\n\t\t\t\to, n := diff.GetChange(\"data\")\n\t\t\t\tos := o.(string)\n\t\t\t\tns := n.(string)\n\t\t\t\tif !suppressXMLEquivalentConfig(\"data\", os, ns, nil) {\n\t\t\t\t\treturn diff.SetNewComputed(\"latest_revision\")\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"arn\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"data\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tDiffSuppressFunc: suppressXMLEquivalentConfig,\n\t\t\t},\n\t\t\t\"description\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"engine_type\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validation.StringInSlice([]string{\n\t\t\t\t\tmq.EngineTypeActivemq,\n\t\t\t\t}, true),\n\t\t\t},\n\t\t\t\"engine_version\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"name\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"latest_revision\": {\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"tags\": tagsSchema(),\n\t\t},\n\t}\n}\n\nfunc resourceAwsMqConfigurationCreate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).mqconn\n\n\tinput := mq.CreateConfigurationRequest{\n\t\tEngineType: aws.String(d.Get(\"engine_type\").(string)),\n\t\tEngineVersion: aws.String(d.Get(\"engine_version\").(string)),\n\t\tName: aws.String(d.Get(\"name\").(string)),\n\t}\n\n\tif v, ok := d.GetOk(\"tags\"); ok {\n\t\tinput.Tags = keyvaluetags.New(v.(map[string]interface{})).IgnoreAws().MqTags()\n\t}\n\n\tlog.Printf(\"[INFO] Creating MQ Configuration: %s\", input)\n\tout, err := conn.CreateConfiguration(&input)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.SetId(*out.Id)\n\td.Set(\"arn\", out.Arn)\n\n\treturn resourceAwsMqConfigurationUpdate(d, meta)\n}\n\nfunc resourceAwsMqConfigurationRead(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).mqconn\n\tignoreTagsConfig := meta.(*AWSClient).IgnoreTagsConfig\n\n\tlog.Printf(\"[INFO] Reading MQ Configuration %s\", d.Id())\n\tout, err := conn.DescribeConfiguration(&mq.DescribeConfigurationInput{\n\t\tConfigurationId: aws.String(d.Id()),\n\t})\n\tif err != nil {\n\t\tif isAWSErr(err, mq.ErrCodeNotFoundException, \"\") {\n\t\t\tlog.Printf(\"[WARN] MQ Configuration %q not found, removing from state\", d.Id())\n\t\t\td.SetId(\"\")\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\td.Set(\"arn\", out.Arn)\n\td.Set(\"description\", out.LatestRevision.Description)\n\td.Set(\"engine_type\", out.EngineType)\n\td.Set(\"engine_version\", out.EngineVersion)\n\td.Set(\"name\", out.Name)\n\td.Set(\"latest_revision\", out.LatestRevision.Revision)\n\n\trOut, err := conn.DescribeConfigurationRevision(&mq.DescribeConfigurationRevisionInput{\n\t\tConfigurationId: aws.String(d.Id()),\n\t\tConfigurationRevision: aws.String(fmt.Sprintf(\"%d\", *out.LatestRevision.Revision)),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb, err := base64.StdEncoding.DecodeString(*rOut.Data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\td.Set(\"data\", string(b))\n\n\tif err := d.Set(\"tags\", keyvaluetags.MqKeyValueTags(out.Tags).IgnoreAws().IgnoreConfig(ignoreTagsConfig).Map()); err != nil {\n\t\treturn fmt.Errorf(\"error setting tags: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc resourceAwsMqConfigurationUpdate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).mqconn\n\n\tif d.HasChanges(\"data\", \"description\") {\n\t\trawData := d.Get(\"data\").(string)\n\t\tdata := base64.StdEncoding.EncodeToString([]byte(rawData))\n\n\t\tinput := mq.UpdateConfigurationRequest{\n\t\t\tConfigurationId: aws.String(d.Id()),\n\t\t\tData: aws.String(data),\n\t\t}\n\t\tif v, ok := d.GetOk(\"description\"); ok {\n\t\t\tinput.Description = aws.String(v.(string))\n\t\t}\n\n\t\tlog.Printf(\"[INFO] Updating MQ Configuration %s: %s\", d.Id(), input)\n\t\t_, err := conn.UpdateConfiguration(&input)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif d.HasChange(\"tags\") {\n\t\to, n := d.GetChange(\"tags\")\n\n\t\tif err := keyvaluetags.MqUpdateTags(conn, d.Get(\"arn\").(string), o, n); err != nil {\n\t\t\treturn fmt.Errorf(\"error updating MQ Broker (%s) tags: %s\", d.Get(\"arn\").(string), err)\n\t\t}\n\t}\n\n\treturn resourceAwsMqConfigurationRead(d, meta)\n}\n\nfunc resourceAwsMqConfigurationDelete(d *schema.ResourceData, meta interface{}) error {\n\t\/\/ TODO: Delete is not available in the API\n\n\treturn nil\n}\n\nfunc suppressXMLEquivalentConfig(k, old, new string, d *schema.ResourceData) bool {\n\tos, err := canonicalXML(old)\n\tif err != nil {\n\t\tlog.Printf(\"[ERR] Error getting cannonicalXML from state (%s): %s\", k, err)\n\t\treturn false\n\t}\n\tns, err := canonicalXML(new)\n\tif err != nil {\n\t\tlog.Printf(\"[ERR] Error getting cannonicalXML from config (%s): %s\", k, err)\n\t\treturn false\n\t}\n\n\treturn os == ns\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage procfs\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/prometheus\/procfs\/internal\/util\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ Cgroup models one line from \/proc\/[pid]\/cgroup. Each Cgroup struct describes the the placement of a PID inside a\n\/\/ specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource\n\/\/ controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies\n\/\/ contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in\n\/\/ this hierarchy' (where==what path). By prefixing this path with the mount point of this hierarchy, you can locate\n\/\/ the relevant pseudo-files needed to read\/set the data for this PID in this hierarchy\n\/\/\n\/\/ Also see http:\/\/man7.org\/linux\/man-pages\/man7\/cgroups.7.html\ntype Cgroup struct {\n\t\/\/ HierarchyId for cgroups V2 is always 0. For cgroups v1 this is a unique\n\t\/\/ ID number that can be matched to a hierarchy ID found in \/proc\/cgroups\n\tHierarchyId int\n\t\/\/ Controllers using this hierarchy of processes. Controllers are also known as subsystems.\n\tControllers []string\n\t\/\/ Path of this control group, relative to the mount point of the various controllers\n\tPath string\n}\n\n\/\/ parseCgroupString parses each line of the \/proc\/[pid]\/cgroup file\nfunc parseCgroupString(cgroupStr string) (*Cgroup, error) {\n\tvar err error\n\n\tfields := strings.Split(cgroupStr, \":\")\n\tif len(fields) != 3 {\n\t\treturn nil, fmt.Errorf(\"incorrect number of fields (%d) in cgroup string: %s\", len(fields), cgroupStr)\n\t}\n\n\tcgroup := &Cgroup{\n\t\tPath: fields[2],\n\t\tControllers: nil,\n\t}\n\tcgroup.HierarchyId, err = strconv.Atoi(fields[0])\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse hierarchy ID\")\n\t}\n\tif fields[1] != \"\" {\n\t\tssNames := strings.Split(fields[1], \",\")\n\t\tcgroup.Controllers = append(cgroup.Controllers, ssNames...)\n\t}\n\treturn cgroup, nil\n}\n\n\/\/ parseCgroups reads each line of the \/proc\/[pid]\/cgroup file\nfunc parseCgroups(data []byte) ([]*Cgroup, error) {\n\tcgroups := []*Cgroup{}\n\tscanner := bufio.NewScanner(bytes.NewReader(data))\n\tfor scanner.Scan() {\n\t\tmountString := scanner.Text()\n\t\tparsedMounts, err := parseCgroupString(mountString)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcgroups = append(cgroups, parsedMounts)\n\t}\n\n\terr := scanner.Err()\n\treturn cgroups, err\n}\n\n\/\/ GetCgroups returns a Cgroup struct for all process control hierarchies running on this system\nfunc GetCgroups(pid int) ([]*Cgroup, error) {\n\tdata, err := util.ReadFileNoStat(fmt.Sprintf(\"\/proc\/%d\/cgroup\", pid))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parseCgroups(data)\n}\n<commit_msg>Improve cgroup documentation<commit_after>\/\/ Copyright 2019 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage procfs\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/prometheus\/procfs\/internal\/util\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ Cgroup models one line from \/proc\/[pid]\/cgroup. Each Cgroup struct describes the the placement of a PID inside a\n\/\/ specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource\n\/\/ controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies\n\/\/ contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in\n\/\/ this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of\n\/\/ *this specific* hierarchy, you can locate the relevant pseudo-files needed to read\/set the data for this PID\n\/\/ in this hierarchy\n\/\/\n\/\/ Also see http:\/\/man7.org\/linux\/man-pages\/man7\/cgroups.7.html\ntype Cgroup struct {\n\t\/\/ HierarchyId that can be matched to a named hierarchy using \/proc\/cgroups. Cgroups V2 only has one\n\t\/\/ hierarchy, so HierarchyId is always 0. For cgroups v1 this is a unique ID number\n\tHierarchyId int\n\t\/\/ Controllers using this hierarchy of processes. Controllers are also known as subsystems. For\n\t\/\/ Cgroups V2 this may be empty, as all active controllers use the same hierarchy\n\tControllers []string\n\t\/\/ Path of this control group, relative to the mount point of the cgroupfs representing this specific\n\t\/\/ hierarchy\n\tPath string\n}\n\n\/\/ parseCgroupString parses each line of the \/proc\/[pid]\/cgroup file\n\/\/ Line format is hierarchyID:[controller1,controller2]:path\nfunc parseCgroupString(cgroupStr string) (*Cgroup, error) {\n\tvar err error\n\n\tfields := strings.Split(cgroupStr, \":\")\n\tif len(fields) != 3 {\n\t\treturn nil, fmt.Errorf(\"incorrect number of fields (%d) in cgroup string: %s\", len(fields), cgroupStr)\n\t}\n\n\tcgroup := &Cgroup{\n\t\tPath: fields[2],\n\t\tControllers: nil,\n\t}\n\tcgroup.HierarchyId, err = strconv.Atoi(fields[0])\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse hierarchy ID\")\n\t}\n\tif fields[1] != \"\" {\n\t\tssNames := strings.Split(fields[1], \",\")\n\t\tcgroup.Controllers = append(cgroup.Controllers, ssNames...)\n\t}\n\treturn cgroup, nil\n}\n\n\/\/ parseCgroups reads each line of the \/proc\/[pid]\/cgroup file\nfunc parseCgroups(data []byte) ([]*Cgroup, error) {\n\tcgroups := []*Cgroup{}\n\tscanner := bufio.NewScanner(bytes.NewReader(data))\n\tfor scanner.Scan() {\n\t\tmountString := scanner.Text()\n\t\tparsedMounts, err := parseCgroupString(mountString)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcgroups = append(cgroups, parsedMounts)\n\t}\n\n\terr := scanner.Err()\n\treturn cgroups, err\n}\n\n\/\/ GetCgroups returns a Cgroup struct for all process control hierarchies running on this system\nfunc GetCgroups(pid int) ([]*Cgroup, error) {\n\tdata, err := util.ReadFileNoStat(fmt.Sprintf(\"\/proc\/%d\/cgroup\", pid))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parseCgroups(data)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar (\n\tskippedPaths = regexp.MustCompile(`Godeps|third_party|_gopath|_output|\\.git|cluster\/env.sh|vendor|test\/e2e\/generated\/bindata.go|site\/themes\/docsy`)\n\trootdir *string\n\tboilerplatedir *string\n)\n\nfunc init() {\n\tcwd, _ := os.Getwd()\n\tboilerplatedir = flag.String(\"boilerplate-dir\", cwd, \"Boilerplate directory for boilerplate files\")\n\tcwd = cwd + \"\/..\/..\/\"\n\trootdir = flag.String(\"rootdir\", filepath.Dir(cwd), \"Root directory to examine\")\n}\n\nfunc main() {\n\tflag.Parse()\n\trefs := getRefs(*boilerplatedir)\n\tif len(refs) == 0 {\n\t\tlog.Fatal(\"no references in \", *boilerplatedir)\n\t}\n\tfiles := getFileList(*rootdir, refs)\n\tfor _, file := range files {\n\t\tif !filePasses(file, refs[getFileExtension(file)]) {\n\t\t\tfmt.Println(file)\n\t\t}\n\t}\n\n}\n\nfunc getRefs(dir string) map[string][]byte {\n\trefs := make(map[string][]byte)\n\tfiles, _ := filepath.Glob(dir + \"\/*.txt\")\n\tfor _, filename := range files {\n\t\textension := strings.ToLower(strings.Split(filename, \".\")[1])\n\t\tdata, err := ioutil.ReadFile(filename)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tre := regexp.MustCompile(`\\r`)\n\t\trefs[extension] = re.ReplaceAll(data, nil)\n\t}\n\treturn refs\n}\n\nfunc filePasses(filename string, ref []byte) bool {\n\tvar re *regexp.Regexp\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tre = regexp.MustCompile(`\\r`)\n\tdata = re.ReplaceAll(data, nil)\n\n\textension := getFileExtension(filename)\n\n\t\/\/ remove build tags from the top of Go files\n\tif extension == \"go\" {\n\t\tre = regexp.MustCompile(`(?m)^(\/\/ \\+build.*\\n)+\\n`)\n\t\tdata = re.ReplaceAll(data, nil)\n\t}\n\n\t\/\/ remove shebang from the top of shell files\n\tif extension == \"sh\" {\n\t\tre = regexp.MustCompile(`(?m)^(#!.*\\n)\\n*`)\n\t\tdata = re.ReplaceAll(data, nil)\n\t}\n\n\t\/\/ if our test file is smaller than the reference it surely fails!\n\tif len(data) < len(ref) {\n\t\treturn false\n\t}\n\n\tdata = data[:len(ref)]\n\n\t\/\/ Search for \"Copyright YEAR\" which exists in the boilerplate, but shouldn't in the real thing\n\tre = regexp.MustCompile(`Copyright YEAR`)\n\tif re.Match(data) {\n\t\treturn false\n\t}\n\n\t\/\/ Replace all occurrences of the regex \"Copyright \\d{4}\" with \"Copyright YEAR\"\n\tre = regexp.MustCompile(`Copyright \\d{4}`)\n\tdata = re.ReplaceAll(data, []byte(`Copyright YEAR`))\n\n\treturn bytes.Equal(data, ref)\n}\n\n\/\/ get the file extensin or the filename if the file has no extension\nfunc getFileExtension(filename string) string {\n\tsplitted := strings.Split(filepath.Base(filename), \".\")\n\treturn strings.ToLower(splitted[len(splitted)-1])\n}\n\nfunc getFileList(rootDir string, extensions map[string][]byte) []string {\n\tvar outFiles []string\n\terr := filepath.Walk(rootDir, func(path string, info os.FileInfo, err error) error {\n\t\tif !info.IsDir() && !skippedPaths.MatchString(filepath.Dir(path)) {\n\t\t\tif extensions[strings.ToLower(getFileExtension(path))] != nil {\n\t\t\t\toutFiles = append(outFiles, path)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn outFiles\n}\n<commit_msg>Add comments to boilerplate.go<commit_after>\/*\nCopyright 2019 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar (\n\tskippedPaths = regexp.MustCompile(`Godeps|third_party|_gopath|_output|\\.git|cluster\/env.sh|vendor|test\/e2e\/generated\/bindata.go|site\/themes\/docsy`)\n\trootdir *string\n\tboilerplatedir *string\n)\n\nfunc init() {\n\tcwd, _ := os.Getwd()\n\tboilerplatedir = flag.String(\"boilerplate-dir\", cwd, \"Boilerplate directory for boilerplate files\")\n\tcwd = cwd + \"\/..\/..\/\"\n\trootdir = flag.String(\"rootdir\", filepath.Dir(cwd), \"Root directory to examine\")\n}\n\nfunc main() {\n\tflag.Parse()\n\trefs := getRefs(*boilerplatedir)\n\tif len(refs) == 0 {\n\t\tlog.Fatal(\"no references in \", *boilerplatedir)\n\t}\n\tfiles := getFileList(*rootdir, refs)\n\tfor _, file := range files {\n\t\tif !filePasses(file, refs[getFileExtension(file)]) {\n\t\t\tfmt.Println(file)\n\t\t}\n\t}\n\n}\n\n\/*\nThis function is to populate the refs variable with the\ndifferent boilerplate\/template for different extension.\n*\/\nfunc getRefs(dir string) map[string][]byte {\n\trefs := make(map[string][]byte)\n\tfiles, _ := filepath.Glob(dir + \"\/*.txt\")\n\tfor _, filename := range files {\n\t\textension := strings.ToLower(strings.Split(filename, \".\")[1])\n\t\tdata, err := ioutil.ReadFile(filename)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tre := regexp.MustCompile(`\\r`)\n\t\trefs[extension] = re.ReplaceAll(data, nil)\n\t}\n\treturn refs\n}\n\n\/*\nFunction to check whether the processed file\nis valid.\nReturning false means that the file does not the\nproper boilerplate template\n*\/\nfunc filePasses(filename string, ref []byte) bool {\n\tvar re *regexp.Regexp\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tre = regexp.MustCompile(`\\r`)\n\tdata = re.ReplaceAll(data, nil)\n\n\textension := getFileExtension(filename)\n\n\t\/\/ remove build tags from the top of Go files\n\tif extension == \"go\" {\n\t\tre = regexp.MustCompile(`(?m)^(\/\/ \\+build.*\\n)+\\n`)\n\t\tdata = re.ReplaceAll(data, nil)\n\t}\n\n\t\/\/ remove shebang from the top of shell files\n\tif extension == \"sh\" {\n\t\tre = regexp.MustCompile(`(?m)^(#!.*\\n)\\n*`)\n\t\tdata = re.ReplaceAll(data, nil)\n\t}\n\n\t\/\/ if our test file is smaller than the reference it surely fails!\n\tif len(data) < len(ref) {\n\t\treturn false\n\t}\n\n\tdata = data[:len(ref)]\n\n\t\/\/ Search for \"Copyright YEAR\" which exists in the boilerplate, but shouldn't in the real thing\n\tre = regexp.MustCompile(`Copyright YEAR`)\n\tif re.Match(data) {\n\t\treturn false\n\t}\n\n\t\/\/ Replace all occurrences of the regex \"Copyright \\d{4}\" with \"Copyright YEAR\"\n\tre = regexp.MustCompile(`Copyright \\d{4}`)\n\tdata = re.ReplaceAll(data, []byte(`Copyright YEAR`))\n\n\treturn bytes.Equal(data, ref)\n}\n\n\/**\nFunction to get the file extensin or the filename if the file has no extension\n*\/\nfunc getFileExtension(filename string) string {\n\tsplitted := strings.Split(filepath.Base(filename), \".\")\n\treturn strings.ToLower(splitted[len(splitted)-1])\n}\n\n\/**\nFunction to get all the files from the directory that heeds to be checked.\n*\/\nfunc getFileList(rootDir string, extensions map[string][]byte) []string {\n\tvar outFiles []string\n\terr := filepath.Walk(rootDir, func(path string, info os.FileInfo, err error) error {\n\t\tif !info.IsDir() && !skippedPaths.MatchString(filepath.Dir(path)) {\n\t\t\tif extensions[strings.ToLower(getFileExtension(path))] != nil {\n\t\t\t\toutFiles = append(outFiles, path)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn outFiles\n}\n<|endoftext|>"} {"text":"<commit_before>package handlers\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/ivan1993spb\/snake-server\/connections\"\n)\n\nconst URLRouteDeleteGameByID = \"\/games\/{id}\"\n\nconst MethodDeleteGame = http.MethodDelete\n\ntype responseDeleteGameHandler struct {\n\tID int `json:\"id\"`\n}\n\ntype deleteGameHandler struct {\n\tlogger logrus.FieldLogger\n\tgroupManager *connections.ConnectionGroupManager\n}\n\ntype ErrDeleteGameHandler string\n\nfunc (e ErrDeleteGameHandler) Error() string {\n\treturn \"delete game handler error: \" + string(e)\n}\n\nfunc NewDeleteGameHandler(logger logrus.FieldLogger, groupManager *connections.ConnectionGroupManager) http.Handler {\n\treturn &deleteGameHandler{\n\t\tlogger: logger,\n\t\tgroupManager: groupManager,\n\t}\n}\n\nfunc (h *deleteGameHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\n\tid, err := strconv.Atoi(vars[\"id\"])\n\tif err != nil {\n\t\th.logger.Error(ErrDeleteGameHandler(err.Error()))\n\t\thttp.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)\n\t\treturn\n\t}\n\n\th.logger.Infoln(\"group id to delete:\", id)\n\n\tgroup, err := h.groupManager.Get(id)\n\tif err != nil {\n\t\th.logger.Error(ErrDeleteGameHandler(err.Error()))\n\n\t\tswitch err {\n\t\tcase connections.ErrNotFoundGroup:\n\t\t\thttp.NotFound(w, r)\n\t\tdefault:\n\t\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t}\n\t\treturn\n\t}\n\n\tif !group.IsEmpty() {\n\t\th.logger.Warn(ErrDeleteGameHandler(\"try to delete not empty group\"))\n\t\th.logger.Warnf(\"there is %d opened connections in group %d\", group.GetCount(), id)\n\t\thttp.Error(w, http.StatusText(http.StatusServiceUnavailable), http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\n\tif err := h.groupManager.Delete(group); err != nil {\n\t\th.logger.Error(ErrDeleteGameHandler(err.Error()))\n\n\t\tswitch err {\n\t\tcase connections.ErrDeleteNotFoundGroup:\n\t\t\thttp.NotFound(w, r)\n\t\tcase connections.ErrDeleteNotEmptyGroup:\n\t\t\thttp.Error(w, http.StatusText(http.StatusServiceUnavailable), http.StatusServiceUnavailable)\n\t\tdefault:\n\t\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\t}\n\t\treturn\n\t}\n\n\th.logger.Info(\"stop group\")\n\tgroup.Stop()\n\n\th.logger.Infoln(\"group deleted:\", id)\n\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\n\terr = json.NewEncoder(w).Encode(responseDeleteGameHandler{\n\t\tID: id,\n\t})\n\tif err != nil {\n\t\th.logger.Error(ErrDeleteGameHandler(err.Error()))\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n<commit_msg>Create JSON error response in deleteGameHandler<commit_after>package handlers\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/ivan1993spb\/snake-server\/connections\"\n)\n\nconst URLRouteDeleteGameByID = \"\/games\/{id}\"\n\nconst MethodDeleteGame = http.MethodDelete\n\ntype responseDeleteGameHandler struct {\n\tID int `json:\"id\"`\n}\n\ntype responseDeleteGameHandlerError struct {\n\tCode int `json:\"code\"`\n\tText string `json:\"text\"`\n\tID int `json:\"id\"`\n}\n\ntype deleteGameHandler struct {\n\tlogger logrus.FieldLogger\n\tgroupManager *connections.ConnectionGroupManager\n}\n\ntype ErrDeleteGameHandler string\n\nfunc (e ErrDeleteGameHandler) Error() string {\n\treturn \"delete game handler error: \" + string(e)\n}\n\nfunc NewDeleteGameHandler(logger logrus.FieldLogger, groupManager *connections.ConnectionGroupManager) http.Handler {\n\treturn &deleteGameHandler{\n\t\tlogger: logger,\n\t\tgroupManager: groupManager,\n\t}\n}\n\nfunc (h *deleteGameHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\n\tid, err := strconv.Atoi(vars[\"id\"])\n\tif err != nil {\n\t\th.logger.Error(ErrDeleteGameHandler(err.Error()))\n\t\th.writeResponseJSON(w, http.StatusBadRequest, &responseDeleteGameHandlerError{\n\t\t\tCode: http.StatusBadRequest,\n\t\t\tText: \"invalid game id\",\n\t\t\tID: id,\n\t\t})\n\t\treturn\n\t}\n\n\th.logger.Infoln(\"group id to delete:\", id)\n\n\tgroup, err := h.groupManager.Get(id)\n\tif err != nil {\n\t\th.logger.Error(ErrDeleteGameHandler(err.Error()))\n\n\t\tswitch err {\n\t\tcase connections.ErrNotFoundGroup:\n\t\t\th.writeResponseJSON(w, http.StatusNotFound, &responseDeleteGameHandlerError{\n\t\t\t\tCode: http.StatusNotFound,\n\t\t\t\tText: \"game not found\",\n\t\t\t\tID: id,\n\t\t\t})\n\t\tdefault:\n\t\t\th.writeResponseJSON(w, http.StatusInternalServerError, &responseDeleteGameHandlerError{\n\t\t\t\tCode: http.StatusInternalServerError,\n\t\t\t\tText: \"unknown error\",\n\t\t\t\tID: id,\n\t\t\t})\n\t\t}\n\t\treturn\n\t}\n\n\tif !group.IsEmpty() {\n\t\th.logger.Warn(ErrDeleteGameHandler(\"try to delete not empty group\"))\n\t\th.logger.Warnf(\"there is %d opened connections in group %d\", group.GetCount(), id)\n\t\th.writeResponseJSON(w, http.StatusServiceUnavailable, &responseDeleteGameHandlerError{\n\t\t\tCode: http.StatusServiceUnavailable,\n\t\t\tText: \"cannot delete not empty game\",\n\t\t\tID: id,\n\t\t})\n\t\treturn\n\t}\n\n\tif err := h.groupManager.Delete(group); err != nil {\n\t\th.logger.Error(ErrDeleteGameHandler(err.Error()))\n\n\t\tswitch err {\n\t\tcase connections.ErrDeleteNotFoundGroup:\n\t\t\th.writeResponseJSON(w, http.StatusNotFound, &responseDeleteGameHandlerError{\n\t\t\t\tCode: http.StatusNotFound,\n\t\t\t\tText: \"game not found\",\n\t\t\t\tID: id,\n\t\t\t})\n\t\tcase connections.ErrDeleteNotEmptyGroup:\n\t\t\th.writeResponseJSON(w, http.StatusServiceUnavailable, &responseDeleteGameHandlerError{\n\t\t\t\tCode: http.StatusServiceUnavailable,\n\t\t\t\tText: \"cannot delete not empty game\",\n\t\t\t\tID: id,\n\t\t\t})\n\t\tdefault:\n\t\t\th.writeResponseJSON(w, http.StatusInternalServerError, &responseDeleteGameHandlerError{\n\t\t\t\tCode: http.StatusInternalServerError,\n\t\t\t\tText: \"unknown error\",\n\t\t\t\tID: id,\n\t\t\t})\n\t\t}\n\t\treturn\n\t}\n\n\th.logger.Info(\"stop group\")\n\tgroup.Stop()\n\n\th.logger.Infoln(\"group deleted:\", id)\n\n\th.writeResponseJSON(w, http.StatusOK, responseDeleteGameHandler{\n\t\tID: id,\n\t})\n}\n\nfunc (h *deleteGameHandler) writeResponseJSON(w http.ResponseWriter, statusCode int, response interface{}) {\n\tw.WriteHeader(statusCode)\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n\n\tif err := json.NewEncoder(w).Encode(response); err != nil {\n\t\th.logger.Error(ErrDeleteGameHandler(err.Error()))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package seccomp\n\nimport (\n\t\"fmt\"\n)\n\nconst (\n\tF_OK = 0x0\n\tR_OK = 0x4\n\tW_OK = 0x2\n\tX_OK = 0x1\n\tEFF_ONLY_OK = 0x08\n)\n\nfunc render_access(pid int, args RegisterArgs) (string, error) {\n\n\tflags := map[uint]string{\n\t\tR_OK: \"R_OK\",\n\t\tW_OK: \"W_OK\",\n\t\tX_OK: \"X_OK\",\n\t\tEFF_ONLY_OK: \"EFF_ONLY_OK\",\n\t}\n\n\tmode := args[1]\n\tpath, err := readStringArg(pid, uintptr(args[0]))\n\n\tif (err != nil) {\n\t\treturn \"\", err\t\n\t}\n\n\tfound := false\n\tvar flagstr string\n\n\tif mode == F_OK {\n\t\tflagstr = \"F_OK\"\n\t} else {\n\n\t\tfor flag := range flags {\n\t\t\tif (mode & uint64(flag)) == mode {\n\t\t\t\tif found == true {\n\t\t\t\t\tflagstr += \"|\"\n\t\t\t\t}\n\t\t\t\tflagstr += flags[flag]\n\t\t\t\tfound = true\n\t\t\t}\n\t\t}\n\n\t}\n\tcallrep := fmt.Sprintf(\"access(\\\"%s\\\", %s)\", path, flagstr)\n\n\treturn fmt.Sprintf(\"==============================================\\nseccomp hit on sandbox pid %v (%v) syscall %v (%v): \\n\\n%s\\nI ==============================================\\n\\n\", pid, getProcessCmdLine(pid), \"access\", 1, callrep), nil\n}\n<commit_msg>Function for rendering an invocation of access(2) in the Oz seccomp tracer.<commit_after>package seccomp\n\nimport (\n\t\"fmt\"\n\t\"syscall\"\n)\n\nconst (\n\tF_OK = 0x0\n\tR_OK = 0x4\n\tW_OK = 0x2\n\tX_OK = 0x1\n\tEFF_ONLY_OK = 0x08\n)\n\nfunc render_access(pid int, args RegisterArgs) (string, error) {\n\n\tflags := map[uint]string{\n\t\tR_OK: \"R_OK\",\n\t\tW_OK: \"W_OK\",\n\t\tX_OK: \"X_OK\",\n\t\tEFF_ONLY_OK: \"EFF_ONLY_OK\",\n\t}\n\n\tmode := args[1]\n\tpath, err := readStringArg(pid, uintptr(args[0]))\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tflagstr := \"\"\n\tif mode == F_OK {\n\t\tflagstr = \"F_OK\"\n\t} else {\n\t\tflagstr = renderFlags(flags, uint(mode))\n\t}\n\tcallrep := fmt.Sprintf(\"access(\\\"%s\\\", %s)\", path, flagstr)\n\n\treturn fmt.Sprintf(\"==============================================\\nseccomp hit on sandbox pid %v (%v) syscall %v (%v): \\n\\n%s\\nI ==============================================\\n\\n\", pid, getProcessCmdLine(pid), \"access\", syscall.SYS_ACCESS, callrep), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package autonat\n\nimport (\n\t\"time\"\n)\n\n\/\/ config holds configurable options for the autonat subsystem.\ntype config struct {\n\tgetAddressFunc GetAddrs\n\tbootDelay time.Duration\n\tretryInterval time.Duration\n\trefreshInterval time.Duration\n\trequestTimeout time.Duration\n}\n\nvar defaults = func(c *config) error {\n\tc.bootDelay = 15 * time.Second\n\tc.retryInterval = 90 * time.Second\n\tc.refreshInterval = 15 * time.Minute\n\tc.requestTimeout = 30 * time.Second\n\n\treturn nil\n}\n\n\/\/ WithAddresses allows overriding which Addresses the AutoNAT client beliieves\n\/\/ are \"its own\". Useful for testing, or for more exotic port-forwarding\n\/\/ scenarios where the host may be listening on different ports than it wants\n\/\/ to externally advertise or verify connectability on.\nfunc WithAddresses(addrFunc GetAddrs) Option {\n\treturn func(c *config) error {\n\t\tc.getAddressFunc = addrFunc\n\t\treturn nil\n\t}\n}\n\n\/\/ WithSchedule configures how agressively probes will be made to verify the\n\/\/ address of the host. retryInterval indicates how often probes should be made\n\/\/ when the host lacks confident about its address, while refresh interval\n\/\/ is the schedule of periodic probes when the host believes it knows its\n\/\/ steady-state reachability.\nfunc WithSchedule(retryInterval, refreshInterval time.Duration) Option {\n\treturn func(c *config) error {\n\t\tc.retryInterval = retryInterval\n\t\tc.refreshInterval = refreshInterval\n\t\treturn nil\n\t}\n}\n\n\/\/ WithoutStartupDelay removes the initial delay the NAT subsystem typically\n\/\/ uses as a buffer for ensuring that connectivity and guesses as to the hosts\n\/\/ local interfaces have settled down during startup.\nfunc WithoutStartupDelay() Option {\n\treturn func(c *config) error {\n\t\tc.bootDelay = 1\n\t\treturn nil\n\t}\n}\n<commit_msg>remove a foot-gun<commit_after>package autonat\n\nimport (\n\t\"errors\"\n\t\"time\"\n)\n\n\/\/ config holds configurable options for the autonat subsystem.\ntype config struct {\n\tgetAddressFunc GetAddrs\n\tbootDelay time.Duration\n\tretryInterval time.Duration\n\trefreshInterval time.Duration\n\trequestTimeout time.Duration\n}\n\nvar defaults = func(c *config) error {\n\tc.bootDelay = 15 * time.Second\n\tc.retryInterval = 90 * time.Second\n\tc.refreshInterval = 15 * time.Minute\n\tc.requestTimeout = 30 * time.Second\n\n\treturn nil\n}\n\n\/\/ WithAddresses allows overriding which Addresses the AutoNAT client beliieves\n\/\/ are \"its own\". Useful for testing, or for more exotic port-forwarding\n\/\/ scenarios where the host may be listening on different ports than it wants\n\/\/ to externally advertise or verify connectability on.\nfunc WithAddresses(addrFunc GetAddrs) Option {\n\treturn func(c *config) error {\n\t\tif addrFunc == nil {\n\t\t\treturn errors.New(\"invalid address function supplied\")\n\t\t}\n\t\tc.getAddressFunc = addrFunc\n\t\treturn nil\n\t}\n}\n\n\/\/ WithSchedule configures how agressively probes will be made to verify the\n\/\/ address of the host. retryInterval indicates how often probes should be made\n\/\/ when the host lacks confident about its address, while refresh interval\n\/\/ is the schedule of periodic probes when the host believes it knows its\n\/\/ steady-state reachability.\nfunc WithSchedule(retryInterval, refreshInterval time.Duration) Option {\n\treturn func(c *config) error {\n\t\tc.retryInterval = retryInterval\n\t\tc.refreshInterval = refreshInterval\n\t\treturn nil\n\t}\n}\n\n\/\/ WithoutStartupDelay removes the initial delay the NAT subsystem typically\n\/\/ uses as a buffer for ensuring that connectivity and guesses as to the hosts\n\/\/ local interfaces have settled down during startup.\nfunc WithoutStartupDelay() Option {\n\treturn func(c *config) error {\n\t\tc.bootDelay = 1\n\t\treturn nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package raftgorums\n\nimport (\n\t\"container\/list\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/go-kit\/kit\/metrics\"\n\t\"github.com\/relab\/raft\"\n\t\"github.com\/relab\/raft\/commonpb\"\n\tpb \"github.com\/relab\/raft\/raftgorums\/raftpb\"\n)\n\n\/\/ RequestVote implements gorums.RaftServer.\nfunc (r *Raft) RequestVote(ctx context.Context, req *pb.RequestVoteRequest) (*pb.RequestVoteResponse, error) {\n\treturn r.HandleRequestVoteRequest(req), nil\n}\n\n\/\/ AppendEntries implements gorums.RaftServer.\nfunc (r *Raft) AppendEntries(ctx context.Context, req *pb.AppendEntriesRequest) (*pb.AppendEntriesResponse, error) {\n\treturn r.HandleAppendEntriesRequest(req), nil\n}\n\n\/\/ InstallSnapshot implements gorums.RaftServer.\nfunc (r *Raft) InstallSnapshot(ctx context.Context, snapshot *commonpb.Snapshot) (*pb.InstallSnapshotResponse, error) {\n\treturn r.HandleInstallSnapshotRequest(snapshot), nil\n}\n\n\/\/ CatchMeUp implements gorums.RaftServer.\nfunc (r *Raft) CatchMeUp(ctx context.Context, req *pb.CatchMeUpRequest) (res *pb.Empty, err error) {\n\tres = &pb.Empty{}\n\tr.match[r.mem.getNodeID(req.FollowerID)] <- req.NextIndex\n\treturn\n}\n\n\/\/ HandleRequestVoteRequest must be called when receiving a RequestVoteRequest,\n\/\/ the return value must be delivered to the requester.\nfunc (r *Raft) HandleRequestVoteRequest(req *pb.RequestVoteRequest) *pb.RequestVoteResponse {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tif r.metricsEnabled {\n\t\ttimer := metrics.NewTimer(rmetrics.rvreq)\n\t\tdefer timer.ObserveDuration()\n\t}\n\n\tvar voteGranted bool\n\tdefer func() {\n\t\tr.logger.WithFields(logrus.Fields{\n\t\t\t\"currentterm\": r.currentTerm,\n\t\t\t\"requestterm\": req.Term,\n\t\t\t\"prevote\": req.PreVote,\n\t\t\t\"candidateid\": req.CandidateID,\n\t\t\t\"votegranted\": voteGranted,\n\t\t}).Infoln(\"Got vote request\")\n\t}()\n\n\t\/\/ #RV1 Reply false if term < currentTerm.\n\tif req.Term < r.currentTerm {\n\t\treturn &pb.RequestVoteResponse{Term: r.currentTerm}\n\t}\n\n\t\/\/ #A2 If RPC request or response contains term T > currentTerm: set currentTerm = T, convert to follower.\n\tif req.Term > r.currentTerm && !req.PreVote {\n\t\tr.becomeFollower(req.Term)\n\t}\n\n\tvoted := r.votedFor != None\n\n\tif req.PreVote && (r.heardFromLeader || (voted && req.Term == r.currentTerm)) {\n\t\t\/\/ We don't grant pre-votes if we have recently heard from a\n\t\t\/\/ leader or already voted in the pre-term.\n\t\treturn &pb.RequestVoteResponse{Term: r.currentTerm}\n\t}\n\n\tlastIndex := r.storage.NextIndex() - 1\n\tlastLogTerm := r.logTerm(lastIndex)\n\n\t\/\/ We can grant a vote in the same term, as long as it's to the same\n\t\/\/ candidate. This is useful if the response was lost, and the candidate\n\t\/\/ sends another request.\n\talreadyVotedForCandidate := r.votedFor == req.CandidateID\n\n\t\/\/ If the logs have last entries with different terms, the log with the\n\t\/\/ later term is more up-to-date.\n\tlaterTerm := req.LastLogTerm > lastLogTerm\n\n\t\/\/ If the logs end with the same term, whichever log is longer is more\n\t\/\/ up-to-date.\n\tlongEnough := req.LastLogTerm == lastLogTerm && req.LastLogIndex >= lastIndex\n\n\t\/\/ We can only grant a vote if: we have not voted yet, we vote for the\n\t\/\/ same candidate again, or this is a pre-vote.\n\tcanGrantVote := !voted || alreadyVotedForCandidate || req.PreVote\n\n\t\/\/ #RV2 If votedFor is null or candidateId, and candidate's log is at\n\t\/\/ least as up-to-date as receiver's log, grant vote.\n\tvoteGranted = canGrantVote && (laterTerm || longEnough)\n\n\tif voteGranted {\n\t\tif req.PreVote {\n\t\t\treturn &pb.RequestVoteResponse{VoteGranted: true, Term: req.Term}\n\t\t}\n\n\t\tr.votedFor = req.CandidateID\n\t\tr.storage.Set(raft.KeyVotedFor, req.CandidateID)\n\n\t\t\/\/ #F2 If election timeout elapses without receiving\n\t\t\/\/ AppendEntries RPC from current leader or granting a vote to\n\t\t\/\/ candidate: convert to candidate. Here we are granting a vote\n\t\t\/\/ to a candidate so we reset the election timeout.\n\t\tr.resetElection = true\n\t\tr.resetBaseline = true\n\n\t\treturn &pb.RequestVoteResponse{VoteGranted: true, Term: r.currentTerm}\n\t}\n\n\t\/\/ #RV2 The candidate's log was not up-to-date\n\treturn &pb.RequestVoteResponse{Term: r.currentTerm}\n}\n\n\/\/ HandleAppendEntriesRequest must be called when receiving a\n\/\/ AppendEntriesRequest, the return value must be delivered to the requester.\nfunc (r *Raft) HandleAppendEntriesRequest(req *pb.AppendEntriesRequest) *pb.AppendEntriesResponse {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tif r.metricsEnabled {\n\t\ttimer := metrics.NewTimer(rmetrics.aereq)\n\t\tdefer timer.ObserveDuration()\n\t}\n\n\treqLogger := r.logger.WithFields(logrus.Fields{\n\t\t\"currentterm\": r.currentTerm,\n\t\t\"requestterm\": req.Term,\n\t\t\"leaderid\": req.LeaderID,\n\t\t\"prevlogindex\": req.PrevLogIndex,\n\t\t\"prevlogterm\": req.PrevLogTerm,\n\t\t\"commitindex\": req.CommitIndex,\n\t\t\"lenentries\": len(req.Entries),\n\t})\n\treqLogger.Infoln(\"Got AppendEntries\")\n\n\tlogLen := r.storage.NextIndex() - 1\n\n\tres := &pb.AppendEntriesResponse{\n\t\tTerm: r.currentTerm,\n\t\tMatchIndex: logLen,\n\t}\n\n\t\/\/ #AE1 Reply false if term < currentTerm.\n\tif req.Term < r.currentTerm {\n\t\treturn res\n\t}\n\n\tprevTerm := r.logTerm(req.PrevLogIndex)\n\n\t\/\/ An AppendEntries request is always successful for the first index. A\n\t\/\/ leader can only be elected leader if its log matches that of a\n\t\/\/ majority and our log is guaranteed to be at least 0 in length.\n\tfirstIndex := req.PrevLogIndex == 0\n\n\t\/\/ The index preceding the entries we are going to replicate must be in our log.\n\tgotPrevIndex := req.PrevLogIndex <= logLen\n\t\/\/ The term must match to satisfy the log matching property.\n\tsameTerm := req.PrevLogTerm == prevTerm\n\n\t\/\/ If the previous entry is in our log, then our log matches the leaders\n\t\/\/ up till and including the previous entry. And we can safely replicate\n\t\/\/ next new entries.\n\tgotPrevEntry := gotPrevIndex && sameTerm\n\n\tsuccess := firstIndex || gotPrevEntry\n\n\t\/\/ #A2 If RPC request or response contains term T > currentTerm: set\n\t\/\/ currentTerm = T, convert to follower. Transition to follower upon\n\t\/\/ receiving an AppendEntries call.\n\tif req.Term > r.currentTerm || r.state != Follower {\n\t\tr.becomeFollower(req.Term)\n\t\tres.Term = req.Term\n\t}\n\n\tif r.metricsEnabled {\n\t\trmetrics.leader.Set(float64(req.LeaderID))\n\t}\n\n\t\/\/ We acknowledge this server as the leader as it's has the highest term\n\t\/\/ we have seen, and there can only be one leader per term.\n\tr.leader = req.LeaderID\n\tr.heardFromLeader = true\n\tr.seenLeader = true\n\n\t\/\/ Don't timeout during catch up.\n\tif uint64(len(req.Entries)) > r.burst {\n\t\tr.resetElection = true\n\t}\n\n\tif !success {\n\t\tr.cureqout <- &catchUpReq{\n\t\t\tleaderID: req.LeaderID,\n\t\t\t\/\/ TODO term: req.Term, ?\n\t\t\tmatchIndex: res.MatchIndex,\n\t\t}\n\n\t\treturn res\n\t}\n\n\tvar toSave []*commonpb.Entry\n\tindex := req.PrevLogIndex\n\n\tfor _, entry := range req.Entries {\n\t\t\/\/ Increment first so we start at previous index + 1.\n\t\tindex++\n\n\t\t\/\/ If the terms don't match, our logs conflict at this index. On\n\t\t\/\/ the first conflict this will truncate the log to the lowest\n\t\t\/\/ common matching index. After that it will fill the log with\n\t\t\/\/ the new entries from the leader. This is because entry.Term\n\t\t\/\/ will always conflict with term 0, which will be returned for\n\t\t\/\/ indexes outside our log.\n\t\tif entry.Term != r.logTerm(index) {\n\t\t\tlogLen = r.storage.NextIndex() - 1\n\t\t\tfor logLen > index-1 {\n\t\t\t\t\/\/ If we are overwriting the latest\n\t\t\t\t\/\/ configuration, rollback to the committed one.\n\t\t\t\tif logLen == r.mem.getIndex() {\n\t\t\t\t\tr.mem.rollback()\n\t\t\t\t}\n\t\t\t\tr.storage.RemoveEntries(logLen, logLen)\n\t\t\t\tlogLen = r.storage.NextIndex() - 1\n\t\t\t}\n\t\t\ttoSave = append(toSave, entry)\n\t\t}\n\t}\n\n\tif len(toSave) > 0 {\n\t\tr.storage.StoreEntries(toSave)\n\t}\n\tlogLen = r.storage.NextIndex() - 1\n\n\tfor _, entry := range toSave {\n\t\tif entry.EntryType == commonpb.EntryReconf {\n\t\t\tvar reconf commonpb.ReconfRequest\n\t\t\terr := reconf.Unmarshal(entry.Data)\n\n\t\t\tif err != nil {\n\t\t\t\tpanic(\"could not unmarshal reconf\")\n\t\t\t}\n\n\t\t\tr.mem.setPending(&reconf)\n\t\t\tr.mem.set(entry.Index)\n\t\t}\n\t}\n\n\told := r.commitIndex\n\t\/\/ Commit index can not exceed the length of our log.\n\tr.commitIndex = min(req.CommitIndex, logLen)\n\n\tif r.metricsEnabled {\n\t\trmetrics.commitIndex.Set(float64(r.commitIndex))\n\t}\n\n\tif r.commitIndex > old {\n\t\tr.logger.WithFields(logrus.Fields{\n\t\t\t\"oldcommitindex\": old,\n\t\t\t\"commitindex\": r.commitIndex,\n\t\t}).Infoln(\"Set commit index\")\n\n\t\tr.newCommit(old)\n\t}\n\n\treqLogger.WithFields(logrus.Fields{\n\t\t\"lensaved\": len(toSave),\n\t\t\"lenlog\": logLen,\n\t\t\"success\": success,\n\t}).Infoln(\"Saved entries to stable storage\")\n\n\tres.Success = true\n\treturn res\n}\n\nfunc (r *Raft) HandleInstallSnapshotRequest(snapshot *commonpb.Snapshot) (res *pb.InstallSnapshotResponse) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tres = &pb.InstallSnapshotResponse{\n\t\tTerm: r.currentTerm,\n\t}\n\n\treturn\n}\n\n\/\/ HandleRequestVoteResponse must be invoked when receiving a\n\/\/ RequestVoteResponse.\nfunc (r *Raft) HandleRequestVoteResponse(response *pb.RequestVoteResponse) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tif r.metricsEnabled {\n\t\ttimer := metrics.NewTimer(rmetrics.rvres)\n\t\tdefer timer.ObserveDuration()\n\t}\n\n\tr.logger.WithFields(logrus.Fields{\n\t\t\"currentterm\": r.currentTerm,\n\t\t\"responseterm\": response.Term,\n\t\t\"votegranted\": response.VoteGranted,\n\t}).Infoln(\"Got vote response\")\n\n\tterm := r.currentTerm\n\n\tif r.preElection {\n\t\tterm++\n\t}\n\n\t\/\/ #A2 If RPC request or response contains term T > currentTerm: set currentTerm = T, convert to follower.\n\tif response.Term > term {\n\t\tr.becomeFollower(response.Term)\n\n\t\treturn\n\t}\n\n\t\/\/ Ignore late response\n\tif response.Term < term {\n\t\treturn\n\t}\n\n\t\/\/ Cont. from startElection(). We have now received a response from Gorums.\n\n\t\/\/ #C5 If votes received from majority of server: become leader.\n\t\/\/ Make sure we have not stepped down while waiting for replies.\n\tif r.state == Candidate && response.VoteGranted {\n\t\tif r.preElection {\n\t\t\tr.preElection = false\n\t\t\tselect {\n\t\t\tcase r.startElectionNow <- struct{}{}:\n\t\t\tcase <-r.stop:\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ We have received at least a quorum of votes.\n\t\t\/\/ We are the leader for this term. See Raft Paper Figure 2 -> Rules for Servers -> Leaders.\n\n\t\tif r.metricsEnabled {\n\t\t\trmetrics.leader.Set(float64(r.id))\n\t\t}\n\n\t\tr.logger.WithFields(logrus.Fields{\n\t\t\t\"currentterm\": r.currentTerm,\n\t\t}).Infoln(\"Elected leader\")\n\n\t\tlogLen := r.storage.NextIndex() - 1\n\n\t\tr.state = Leader\n\t\tr.leader = r.id\n\t\tr.seenLeader = true\n\t\tr.heardFromLeader = true\n\t\tr.nextIndex = logLen + 1\n\t\tr.pending = list.New()\n\t\tr.pendingReads = nil\n\t\tr.mem.setStable(false)\n\n\t\t\/\/ Empty queue.\n\tEMPTYCH:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-r.queue:\n\t\t\t\t\/\/ TODO Respond with NotLeader.\n\t\t\tdefault:\n\t\t\t\t\/\/ Paper §8: We add a no-op, so that the leader\n\t\t\t\t\/\/ commits an entry from its own term. This\n\t\t\t\t\/\/ ensures that the leader knows which entries\n\t\t\t\t\/\/ are committed.\n\t\t\t\tpromise, _ := raft.NewPromiseEntry(&commonpb.Entry{\n\t\t\t\t\tEntryType: commonpb.EntryInternal,\n\t\t\t\t\tTerm: r.currentTerm,\n\t\t\t\t\tData: raft.NOOP,\n\t\t\t\t})\n\t\t\t\tr.queue <- promise\n\t\t\t\tbreak EMPTYCH\n\t\t\t}\n\t\t}\n\n\t\tselect {\n\t\tcase r.heartbeatNow <- struct{}{}:\n\t\tdefault:\n\t\t}\n\n\t\treturn\n\t}\n\n\tr.preElection = true\n\n\t\/\/ #C7 If election timeout elapses: start new election.\n\t\/\/ This will happened if we don't receive enough replies in time. Or we lose the election but don't see a higher term number.\n}\n\n\/\/ HandleAppendEntriesResponse must be invoked when receiving an\n\/\/ AppendEntriesResponse.\nfunc (r *Raft) HandleAppendEntriesResponse(response *pb.AppendEntriesQFResponse, maxIndex uint64) {\n\tr.mu.Lock()\n\tdefer func() {\n\t\tr.mu.Unlock()\n\t\tr.advanceCommitIndex()\n\t}()\n\tif r.metricsEnabled {\n\t\ttimer := metrics.NewTimer(rmetrics.aeres)\n\t\tdefer timer.ObserveDuration()\n\t}\n\n\t\/\/ #A2 If RPC request or response contains term T > currentTerm: set currentTerm = T, convert to follower.\n\t\/\/ If we didn't get a response from a majority (excluding self) step down.\n\tif response.Term > r.currentTerm || response.Replies < uint64((len(r.mem.get().NodeIDs())+1)\/2) {\n\t\t\/\/ Become follower.\n\t\tselect {\n\t\tcase r.toggle <- struct{}{}:\n\t\t\tr.logger.Warnln(\"Leader stepping down\")\n\t\tcase <-r.stop:\n\t\t}\n\n\t\treturn\n\t}\n\n\t\/\/ Heartbeat to a majority.\n\tr.resetElection = true\n\n\t\/\/ Ignore late response\n\tif response.Term < r.currentTerm {\n\t\treturn\n\t}\n\n\tif response.Success {\n\t\tr.matchIndex = maxIndex\n\t\tr.nextIndex = r.matchIndex + 1\n\t\tr.logger.WithFields(logrus.Fields{\n\t\t\t\"matchindex\": r.matchIndex,\n\t\t\t\"nextindex\": r.nextIndex,\n\t\t}).Warnln(\"Setting matchindex\")\n\n\t\treturn\n\t}\n\n\t\/\/ If AppendEntries was not successful lower match index.\n\tr.nextIndex = max(1, min(r.nextIndex-r.burst, r.matchIndex+1))\n}\n\nfunc (r *Raft) HandleInstallSnapshotResponse(res *pb.InstallSnapshotResponse) bool {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tif res.Term > r.currentTerm {\n\t\tr.becomeFollower(res.Term)\n\n\t\treturn false\n\t}\n\n\treturn true\n}\n<commit_msg>raftgorums\/incoming.go: Fix followers timing out<commit_after>package raftgorums\n\nimport (\n\t\"container\/list\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/go-kit\/kit\/metrics\"\n\t\"github.com\/relab\/raft\"\n\t\"github.com\/relab\/raft\/commonpb\"\n\tpb \"github.com\/relab\/raft\/raftgorums\/raftpb\"\n)\n\n\/\/ RequestVote implements gorums.RaftServer.\nfunc (r *Raft) RequestVote(ctx context.Context, req *pb.RequestVoteRequest) (*pb.RequestVoteResponse, error) {\n\treturn r.HandleRequestVoteRequest(req), nil\n}\n\n\/\/ AppendEntries implements gorums.RaftServer.\nfunc (r *Raft) AppendEntries(ctx context.Context, req *pb.AppendEntriesRequest) (*pb.AppendEntriesResponse, error) {\n\treturn r.HandleAppendEntriesRequest(req), nil\n}\n\n\/\/ InstallSnapshot implements gorums.RaftServer.\nfunc (r *Raft) InstallSnapshot(ctx context.Context, snapshot *commonpb.Snapshot) (*pb.InstallSnapshotResponse, error) {\n\treturn r.HandleInstallSnapshotRequest(snapshot), nil\n}\n\n\/\/ CatchMeUp implements gorums.RaftServer.\nfunc (r *Raft) CatchMeUp(ctx context.Context, req *pb.CatchMeUpRequest) (res *pb.Empty, err error) {\n\tres = &pb.Empty{}\n\tr.match[r.mem.getNodeID(req.FollowerID)] <- req.NextIndex\n\treturn\n}\n\n\/\/ HandleRequestVoteRequest must be called when receiving a RequestVoteRequest,\n\/\/ the return value must be delivered to the requester.\nfunc (r *Raft) HandleRequestVoteRequest(req *pb.RequestVoteRequest) *pb.RequestVoteResponse {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tif r.metricsEnabled {\n\t\ttimer := metrics.NewTimer(rmetrics.rvreq)\n\t\tdefer timer.ObserveDuration()\n\t}\n\n\tvar voteGranted bool\n\tdefer func() {\n\t\tr.logger.WithFields(logrus.Fields{\n\t\t\t\"currentterm\": r.currentTerm,\n\t\t\t\"requestterm\": req.Term,\n\t\t\t\"prevote\": req.PreVote,\n\t\t\t\"candidateid\": req.CandidateID,\n\t\t\t\"votegranted\": voteGranted,\n\t\t}).Infoln(\"Got vote request\")\n\t}()\n\n\t\/\/ #RV1 Reply false if term < currentTerm.\n\tif req.Term < r.currentTerm {\n\t\treturn &pb.RequestVoteResponse{Term: r.currentTerm}\n\t}\n\n\t\/\/ #A2 If RPC request or response contains term T > currentTerm: set currentTerm = T, convert to follower.\n\tif req.Term > r.currentTerm && !req.PreVote {\n\t\tr.becomeFollower(req.Term)\n\t}\n\n\tvoted := r.votedFor != None\n\n\tif req.PreVote && (r.heardFromLeader || (voted && req.Term == r.currentTerm)) {\n\t\t\/\/ We don't grant pre-votes if we have recently heard from a\n\t\t\/\/ leader or already voted in the pre-term.\n\t\treturn &pb.RequestVoteResponse{Term: r.currentTerm}\n\t}\n\n\tlastIndex := r.storage.NextIndex() - 1\n\tlastLogTerm := r.logTerm(lastIndex)\n\n\t\/\/ We can grant a vote in the same term, as long as it's to the same\n\t\/\/ candidate. This is useful if the response was lost, and the candidate\n\t\/\/ sends another request.\n\talreadyVotedForCandidate := r.votedFor == req.CandidateID\n\n\t\/\/ If the logs have last entries with different terms, the log with the\n\t\/\/ later term is more up-to-date.\n\tlaterTerm := req.LastLogTerm > lastLogTerm\n\n\t\/\/ If the logs end with the same term, whichever log is longer is more\n\t\/\/ up-to-date.\n\tlongEnough := req.LastLogTerm == lastLogTerm && req.LastLogIndex >= lastIndex\n\n\t\/\/ We can only grant a vote if: we have not voted yet, we vote for the\n\t\/\/ same candidate again, or this is a pre-vote.\n\tcanGrantVote := !voted || alreadyVotedForCandidate || req.PreVote\n\n\t\/\/ #RV2 If votedFor is null or candidateId, and candidate's log is at\n\t\/\/ least as up-to-date as receiver's log, grant vote.\n\tvoteGranted = canGrantVote && (laterTerm || longEnough)\n\n\tif voteGranted {\n\t\tif req.PreVote {\n\t\t\treturn &pb.RequestVoteResponse{VoteGranted: true, Term: req.Term}\n\t\t}\n\n\t\tr.votedFor = req.CandidateID\n\t\tr.storage.Set(raft.KeyVotedFor, req.CandidateID)\n\n\t\t\/\/ #F2 If election timeout elapses without receiving\n\t\t\/\/ AppendEntries RPC from current leader or granting a vote to\n\t\t\/\/ candidate: convert to candidate. Here we are granting a vote\n\t\t\/\/ to a candidate so we reset the election timeout.\n\t\tr.resetElection = true\n\t\tr.resetBaseline = true\n\n\t\treturn &pb.RequestVoteResponse{VoteGranted: true, Term: r.currentTerm}\n\t}\n\n\t\/\/ #RV2 The candidate's log was not up-to-date\n\treturn &pb.RequestVoteResponse{Term: r.currentTerm}\n}\n\n\/\/ HandleAppendEntriesRequest must be called when receiving a\n\/\/ AppendEntriesRequest, the return value must be delivered to the requester.\nfunc (r *Raft) HandleAppendEntriesRequest(req *pb.AppendEntriesRequest) *pb.AppendEntriesResponse {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tif r.metricsEnabled {\n\t\ttimer := metrics.NewTimer(rmetrics.aereq)\n\t\tdefer timer.ObserveDuration()\n\t}\n\n\treqLogger := r.logger.WithFields(logrus.Fields{\n\t\t\"currentterm\": r.currentTerm,\n\t\t\"requestterm\": req.Term,\n\t\t\"leaderid\": req.LeaderID,\n\t\t\"prevlogindex\": req.PrevLogIndex,\n\t\t\"prevlogterm\": req.PrevLogTerm,\n\t\t\"commitindex\": req.CommitIndex,\n\t\t\"lenentries\": len(req.Entries),\n\t})\n\treqLogger.Infoln(\"Got AppendEntries\")\n\n\tlogLen := r.storage.NextIndex() - 1\n\n\tres := &pb.AppendEntriesResponse{\n\t\tTerm: r.currentTerm,\n\t\tMatchIndex: logLen,\n\t}\n\n\t\/\/ #AE1 Reply false if term < currentTerm.\n\tif req.Term < r.currentTerm {\n\t\treturn res\n\t}\n\n\tprevTerm := r.logTerm(req.PrevLogIndex)\n\n\t\/\/ An AppendEntries request is always successful for the first index. A\n\t\/\/ leader can only be elected leader if its log matches that of a\n\t\/\/ majority and our log is guaranteed to be at least 0 in length.\n\tfirstIndex := req.PrevLogIndex == 0\n\n\t\/\/ The index preceding the entries we are going to replicate must be in our log.\n\tgotPrevIndex := req.PrevLogIndex <= logLen\n\t\/\/ The term must match to satisfy the log matching property.\n\tsameTerm := req.PrevLogTerm == prevTerm\n\n\t\/\/ If the previous entry is in our log, then our log matches the leaders\n\t\/\/ up till and including the previous entry. And we can safely replicate\n\t\/\/ next new entries.\n\tgotPrevEntry := gotPrevIndex && sameTerm\n\n\tsuccess := firstIndex || gotPrevEntry\n\n\t\/\/ #A2 If RPC request or response contains term T > currentTerm: set\n\t\/\/ currentTerm = T, convert to follower. Transition to follower upon\n\t\/\/ receiving an AppendEntries call.\n\tif req.Term > r.currentTerm || r.state != Follower {\n\t\tr.becomeFollower(req.Term)\n\t\tres.Term = req.Term\n\t}\n\n\tif r.metricsEnabled {\n\t\trmetrics.leader.Set(float64(req.LeaderID))\n\t}\n\n\t\/\/ We acknowledge this server as the leader as it's has the highest term\n\t\/\/ we have seen, and there can only be one leader per term.\n\tr.leader = req.LeaderID\n\tr.heardFromLeader = true\n\tr.seenLeader = true\n\t\/\/ TODO Revisit heartbeat mechanism.\n\tr.resetElection = true\n\n\tif !success {\n\t\tr.cureqout <- &catchUpReq{\n\t\t\tleaderID: req.LeaderID,\n\t\t\t\/\/ TODO term: req.Term, ?\n\t\t\tmatchIndex: res.MatchIndex,\n\t\t}\n\n\t\treturn res\n\t}\n\n\tvar toSave []*commonpb.Entry\n\tindex := req.PrevLogIndex\n\n\tfor _, entry := range req.Entries {\n\t\t\/\/ Increment first so we start at previous index + 1.\n\t\tindex++\n\n\t\t\/\/ If the terms don't match, our logs conflict at this index. On\n\t\t\/\/ the first conflict this will truncate the log to the lowest\n\t\t\/\/ common matching index. After that it will fill the log with\n\t\t\/\/ the new entries from the leader. This is because entry.Term\n\t\t\/\/ will always conflict with term 0, which will be returned for\n\t\t\/\/ indexes outside our log.\n\t\tif entry.Term != r.logTerm(index) {\n\t\t\tlogLen = r.storage.NextIndex() - 1\n\t\t\tfor logLen > index-1 {\n\t\t\t\t\/\/ If we are overwriting the latest\n\t\t\t\t\/\/ configuration, rollback to the committed one.\n\t\t\t\tif logLen == r.mem.getIndex() {\n\t\t\t\t\tr.mem.rollback()\n\t\t\t\t}\n\t\t\t\tr.storage.RemoveEntries(logLen, logLen)\n\t\t\t\tlogLen = r.storage.NextIndex() - 1\n\t\t\t}\n\t\t\ttoSave = append(toSave, entry)\n\t\t}\n\t}\n\n\tif len(toSave) > 0 {\n\t\tr.storage.StoreEntries(toSave)\n\t}\n\tlogLen = r.storage.NextIndex() - 1\n\n\tfor _, entry := range toSave {\n\t\tif entry.EntryType == commonpb.EntryReconf {\n\t\t\tvar reconf commonpb.ReconfRequest\n\t\t\terr := reconf.Unmarshal(entry.Data)\n\n\t\t\tif err != nil {\n\t\t\t\tpanic(\"could not unmarshal reconf\")\n\t\t\t}\n\n\t\t\tr.mem.setPending(&reconf)\n\t\t\tr.mem.set(entry.Index)\n\t\t}\n\t}\n\n\told := r.commitIndex\n\t\/\/ Commit index can not exceed the length of our log.\n\tr.commitIndex = min(req.CommitIndex, logLen)\n\n\tif r.metricsEnabled {\n\t\trmetrics.commitIndex.Set(float64(r.commitIndex))\n\t}\n\n\tif r.commitIndex > old {\n\t\tr.logger.WithFields(logrus.Fields{\n\t\t\t\"oldcommitindex\": old,\n\t\t\t\"commitindex\": r.commitIndex,\n\t\t}).Infoln(\"Set commit index\")\n\n\t\tr.newCommit(old)\n\t}\n\n\treqLogger.WithFields(logrus.Fields{\n\t\t\"lensaved\": len(toSave),\n\t\t\"lenlog\": logLen,\n\t\t\"success\": success,\n\t}).Infoln(\"Saved entries to stable storage\")\n\n\tres.Success = true\n\treturn res\n}\n\nfunc (r *Raft) HandleInstallSnapshotRequest(snapshot *commonpb.Snapshot) (res *pb.InstallSnapshotResponse) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tres = &pb.InstallSnapshotResponse{\n\t\tTerm: r.currentTerm,\n\t}\n\n\treturn\n}\n\n\/\/ HandleRequestVoteResponse must be invoked when receiving a\n\/\/ RequestVoteResponse.\nfunc (r *Raft) HandleRequestVoteResponse(response *pb.RequestVoteResponse) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tif r.metricsEnabled {\n\t\ttimer := metrics.NewTimer(rmetrics.rvres)\n\t\tdefer timer.ObserveDuration()\n\t}\n\n\tr.logger.WithFields(logrus.Fields{\n\t\t\"currentterm\": r.currentTerm,\n\t\t\"responseterm\": response.Term,\n\t\t\"votegranted\": response.VoteGranted,\n\t}).Infoln(\"Got vote response\")\n\n\tterm := r.currentTerm\n\n\tif r.preElection {\n\t\tterm++\n\t}\n\n\t\/\/ #A2 If RPC request or response contains term T > currentTerm: set currentTerm = T, convert to follower.\n\tif response.Term > term {\n\t\tr.becomeFollower(response.Term)\n\n\t\treturn\n\t}\n\n\t\/\/ Ignore late response\n\tif response.Term < term {\n\t\treturn\n\t}\n\n\t\/\/ Cont. from startElection(). We have now received a response from Gorums.\n\n\t\/\/ #C5 If votes received from majority of server: become leader.\n\t\/\/ Make sure we have not stepped down while waiting for replies.\n\tif r.state == Candidate && response.VoteGranted {\n\t\tif r.preElection {\n\t\t\tr.preElection = false\n\t\t\tselect {\n\t\t\tcase r.startElectionNow <- struct{}{}:\n\t\t\tcase <-r.stop:\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ We have received at least a quorum of votes.\n\t\t\/\/ We are the leader for this term. See Raft Paper Figure 2 -> Rules for Servers -> Leaders.\n\n\t\tif r.metricsEnabled {\n\t\t\trmetrics.leader.Set(float64(r.id))\n\t\t}\n\n\t\tr.logger.WithFields(logrus.Fields{\n\t\t\t\"currentterm\": r.currentTerm,\n\t\t}).Infoln(\"Elected leader\")\n\n\t\tlogLen := r.storage.NextIndex() - 1\n\n\t\tr.state = Leader\n\t\tr.leader = r.id\n\t\tr.seenLeader = true\n\t\tr.heardFromLeader = true\n\t\tr.nextIndex = logLen + 1\n\t\tr.pending = list.New()\n\t\tr.pendingReads = nil\n\t\tr.mem.setStable(false)\n\n\t\t\/\/ Empty queue.\n\tEMPTYCH:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-r.queue:\n\t\t\t\t\/\/ TODO Respond with NotLeader.\n\t\t\tdefault:\n\t\t\t\t\/\/ Paper §8: We add a no-op, so that the leader\n\t\t\t\t\/\/ commits an entry from its own term. This\n\t\t\t\t\/\/ ensures that the leader knows which entries\n\t\t\t\t\/\/ are committed.\n\t\t\t\tpromise, _ := raft.NewPromiseEntry(&commonpb.Entry{\n\t\t\t\t\tEntryType: commonpb.EntryInternal,\n\t\t\t\t\tTerm: r.currentTerm,\n\t\t\t\t\tData: raft.NOOP,\n\t\t\t\t})\n\t\t\t\tr.queue <- promise\n\t\t\t\tbreak EMPTYCH\n\t\t\t}\n\t\t}\n\n\t\tselect {\n\t\tcase r.heartbeatNow <- struct{}{}:\n\t\tdefault:\n\t\t}\n\n\t\treturn\n\t}\n\n\tr.preElection = true\n\n\t\/\/ #C7 If election timeout elapses: start new election.\n\t\/\/ This will happened if we don't receive enough replies in time. Or we lose the election but don't see a higher term number.\n}\n\n\/\/ HandleAppendEntriesResponse must be invoked when receiving an\n\/\/ AppendEntriesResponse.\nfunc (r *Raft) HandleAppendEntriesResponse(response *pb.AppendEntriesQFResponse, maxIndex uint64) {\n\tr.mu.Lock()\n\tdefer func() {\n\t\tr.mu.Unlock()\n\t\tr.advanceCommitIndex()\n\t}()\n\tif r.metricsEnabled {\n\t\ttimer := metrics.NewTimer(rmetrics.aeres)\n\t\tdefer timer.ObserveDuration()\n\t}\n\n\t\/\/ #A2 If RPC request or response contains term T > currentTerm: set currentTerm = T, convert to follower.\n\t\/\/ If we didn't get a response from a majority (excluding self) step down.\n\tif response.Term > r.currentTerm || response.Replies < uint64((len(r.mem.get().NodeIDs())+1)\/2) {\n\t\t\/\/ Become follower.\n\t\tselect {\n\t\tcase r.toggle <- struct{}{}:\n\t\t\tr.logger.Warnln(\"Leader stepping down\")\n\t\tcase <-r.stop:\n\t\t}\n\n\t\treturn\n\t}\n\n\t\/\/ Heartbeat to a majority.\n\tr.resetElection = true\n\n\t\/\/ Ignore late response\n\tif response.Term < r.currentTerm {\n\t\treturn\n\t}\n\n\tif response.Success {\n\t\tr.matchIndex = maxIndex\n\t\tr.nextIndex = r.matchIndex + 1\n\t\tr.logger.WithFields(logrus.Fields{\n\t\t\t\"matchindex\": r.matchIndex,\n\t\t\t\"nextindex\": r.nextIndex,\n\t\t}).Warnln(\"Setting matchindex\")\n\n\t\treturn\n\t}\n\n\t\/\/ If AppendEntries was not successful lower match index.\n\tr.nextIndex = max(1, min(r.nextIndex-r.burst, r.matchIndex+1))\n}\n\nfunc (r *Raft) HandleInstallSnapshotResponse(res *pb.InstallSnapshotResponse) bool {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tif res.Term > r.currentTerm {\n\t\tr.becomeFollower(res.Term)\n\n\t\treturn false\n\t}\n\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage dashboard\n\n\/\/ This file handles operations on the CL entity kind.\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"appengine\"\n\t\"appengine\/datastore\"\n\t\"appengine\/taskqueue\"\n\t\"appengine\/urlfetch\"\n\t\"appengine\/user\"\n)\n\nfunc init() {\n\thttp.HandleFunc(\"\/assign\", handleAssign)\n\thttp.HandleFunc(\"\/update-cl\", handleUpdateCL)\n}\n\nconst codereviewBase = \"http:\/\/codereview.appspot.com\"\nconst gobotBase = \"http:\/\/research.swtch.com\/gobot_codereview\"\n\nvar clRegexp = regexp.MustCompile(`\\d+`)\n\n\/\/ CL represents a code review.\ntype CL struct {\n\tNumber string \/\/ e.g. \"5903061\"\n\tClosed bool\n\tOwner string \/\/ email address\n\n\tCreated, Modified time.Time\n\n\tDescription []byte `datastore:\",noindex\"`\n\tFirstLine string `datastore:\",noindex\"`\n\tLGTMs []string\n\tNotLGTMs []string\n\n\t\/\/ Mail information.\n\tSubject string `datastore:\",noindex\"`\n\tRecipients []string `datastore:\",noindex\"`\n\tLastMessageID string `datastore:\",noindex\"`\n\n\t\/\/ These are person IDs (e.g. \"rsc\"); they may be empty\n\tAuthor string\n\tReviewer string\n}\n\n\/\/ DisplayOwner returns the CL's owner, either as their email address\n\/\/ or the person ID if it's a reviewer. It is for display only.\nfunc (cl *CL) DisplayOwner() string {\n\tif p, ok := emailToPerson[cl.Owner]; ok {\n\t\treturn p\n\t}\n\treturn cl.Owner\n}\n\nfunc (cl *CL) FirstLineHTML() template.HTML {\n\ts := template.HTMLEscapeString(cl.FirstLine)\n\t\/\/ Embolden the package name.\n\tif i := strings.Index(s, \":\"); i >= 0 {\n\t\ts = \"<b>\" + s[:i] + \"<\/b>\" + s[i:]\n\t}\n\treturn template.HTML(s)\n}\n\nfunc formatEmails(e []string) template.HTML {\n\tx := make([]string, len(e))\n\tfor i, s := range e {\n\t\ts = template.HTMLEscapeString(s)\n\t\tif !strings.Contains(s, \"@\") {\n\t\t\ts = \"<b>\" + s + \"<\/b>\"\n\t\t}\n\t\ts = `<span class=\"email\">` + s + \"<\/span>\"\n\t\tx[i] = s\n\t}\n\treturn template.HTML(strings.Join(x, \", \"))\n}\n\nfunc (cl *CL) LGTMHTML() template.HTML {\n\treturn formatEmails(cl.LGTMs)\n}\n\nfunc (cl *CL) NotLGTMHTML() template.HTML {\n\treturn formatEmails(cl.NotLGTMs)\n}\n\nfunc (cl *CL) ModifiedAgo() string {\n\t\/\/ Just the first non-zero unit.\n\tunits := [...]struct {\n\t\tsuffix string\n\t\tunit time.Duration\n\t}{\n\t\t{\"d\", 24 * time.Hour},\n\t\t{\"h\", time.Hour},\n\t\t{\"m\", time.Minute},\n\t\t{\"s\", time.Second},\n\t}\n\td := time.Now().Sub(cl.Modified)\n\tfor _, u := range units {\n\t\tif d > u.unit {\n\t\t\treturn fmt.Sprintf(\"%d%s\", d\/u.unit, u.suffix)\n\t\t}\n\t}\n\treturn \"just now\"\n}\n\nfunc handleAssign(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\n\tif r.Method != \"POST\" {\n\t\thttp.Error(w, \"Bad method \"+r.Method, 400)\n\t\treturn\n\t}\n\n\tu := user.Current(c)\n\tif _, ok := emailToPerson[u.Email]; !ok {\n\t\thttp.Error(w, \"Not allowed\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tn, rev := r.FormValue(\"cl\"), r.FormValue(\"r\")\n\tif !clRegexp.MatchString(n) {\n\t\tc.Errorf(\"Bad CL %q\", n)\n\t\thttp.Error(w, \"Bad CL\", 400)\n\t\treturn\n\t}\n\tif _, ok := preferredEmail[rev]; !ok && rev != \"\" {\n\t\tc.Errorf(\"Unknown reviewer %q\", rev)\n\t\thttp.Error(w, \"Unknown reviewer\", 400)\n\t\treturn\n\t}\n\n\tkey := datastore.NewKey(c, \"CL\", n, 0, nil)\n\n\tif rev != \"\" {\n\t\t\/\/ Make sure the reviewer is listed in Rietveld as a reviewer.\n\t\turl := codereviewBase + \"\/\" + n + \"\/fields\"\n\t\tresp, err := urlfetch.Client(c).Get(url + \"?field=reviewers\")\n\t\tif err != nil {\n\t\t\tc.Errorf(\"Retrieving CL reviewer list failed: %v\", err)\n\t\t\thttp.Error(w, err.Error(), 500)\n\t\t\treturn\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tif resp.StatusCode != 200 {\n\t\t\tc.Errorf(\"Retrieving CL reviewer list failed: got HTTP response %d\", resp.StatusCode)\n\t\t\thttp.Error(w, \"Failed contacting Rietveld\", 500)\n\t\t\treturn\n\t\t}\n\n\t\tvar apiResp struct {\n\t\t\tReviewers []string `json:\"reviewers\"`\n\t\t}\n\t\tif err := json.NewDecoder(resp.Body).Decode(&apiResp); err != nil {\n\t\t\t\/\/ probably can't be retried\n\t\t\tmsg := fmt.Sprintf(\"Malformed JSON from %v: %v\", url, err)\n\t\t\tc.Errorf(\"%s\", msg)\n\t\t\thttp.Error(w, msg, 500)\n\t\t\treturn\n\t\t}\n\t\tfound := false\n\t\tfor _, r := range apiResp.Reviewers {\n\t\t\tif emailToPerson[r] == rev {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tc.Infof(\"Adding %v as a reviewer of CL %v\", rev, n)\n\n\t\t\turl := fmt.Sprintf(\"%s?cl=%s&r=%s\", gobotBase, n, rev)\n\t\t\tresp, err := urlfetch.Client(c).Get(url)\n\t\t\tif err != nil {\n\t\t\t\tc.Errorf(\"Gobot GET failed: %v\", err)\n\t\t\t\thttp.Error(w, err.Error(), 500)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\tif resp.StatusCode != 200 {\n\t\t\t\tc.Errorf(\"Gobot GET failed: got HTTP response %d\", resp.StatusCode)\n\t\t\t\thttp.Error(w, \"Failed contacting Gobot\", 500)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tc.Infof(\"Gobot said %q\", resp.Status)\n\t\t}\n\t}\n\n\t\/\/ Update our own record.\n\terr := datastore.RunInTransaction(c, func(c appengine.Context) error {\n\t\tcl := new(CL)\n\t\terr := datastore.Get(c, key, cl)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcl.Reviewer = rev\n\t\t_, err = datastore.Put(c, key, cl)\n\t\treturn err\n\t}, nil)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Assignment failed: %v\", err)\n\t\tc.Errorf(\"%s\", msg)\n\t\thttp.Error(w, msg, 500)\n\t\treturn\n\t}\n\tc.Infof(\"Assigned CL %v to %v\", n, rev)\n}\n\nfunc UpdateCLLater(c appengine.Context, n string, delay time.Duration) {\n\tt := taskqueue.NewPOSTTask(\"\/update-cl\", url.Values{\n\t\t\"cl\": []string{n},\n\t})\n\tt.Delay = delay\n\tif _, err := taskqueue.Add(c, t, \"update-cl\"); err != nil {\n\t\tc.Errorf(\"Failed adding task: %v\", err)\n\t}\n}\n\nfunc handleUpdateCL(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\n\tn := r.FormValue(\"cl\")\n\tif !clRegexp.MatchString(n) {\n\t\tc.Errorf(\"Bad CL %q\", n)\n\t\thttp.Error(w, \"Bad CL\", 400)\n\t\treturn\n\t}\n\n\tif err := updateCL(c, n); err != nil {\n\t\tc.Errorf(\"Failed updating CL %v: %v\", n, err)\n\t\thttp.Error(w, \"Failed update\", 500)\n\t\treturn\n\t}\n\n\tio.WriteString(w, \"OK\")\n}\n\n\/\/ updateCL updates a single CL. If a retryable failure occurs, an error is returned.\nfunc updateCL(c appengine.Context, n string) error {\n\tc.Debugf(\"Updating CL %v\", n)\n\tkey := datastore.NewKey(c, \"CL\", n, 0, nil)\n\n\turl := codereviewBase + \"\/api\/\" + n + \"?messages=true\"\n\tresp, err := urlfetch.Client(c).Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\traw, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed reading HTTP body: %v\", err)\n\t}\n\n\t\/\/ Special case for abandoned CLs.\n\tif resp.StatusCode == 404 && bytes.Contains(raw, []byte(\"No issue exists with that id\")) {\n\t\t\/\/ Don't bother checking for errors. The CL might never have been saved, for instance.\n\t\tdatastore.Delete(c, key)\n\t\tc.Infof(\"Deleted abandoned CL %v\", n)\n\t\treturn nil\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"Update: got HTTP response %d\", resp.StatusCode)\n\t}\n\n\tvar apiResp struct {\n\t\tDescription string `json:\"description\"`\n\t\tReviewers []string `json:\"reviewers\"`\n\t\tCreated string `json:\"created\"`\n\t\tOwnerEmail string `json:\"owner_email\"`\n\t\tModified string `json:\"modified\"`\n\t\tClosed bool `json:\"closed\"`\n\t\tSubject string `json:\"subject\"`\n\t\tMessages []struct {\n\t\t\tText string `json:\"text\"`\n\t\t\tSender string `json:\"sender\"`\n\t\t\tRecipients []string `json:\"recipients\"`\n\t\t\tApproval bool `json:\"approval\"`\n\t\t} `json:\"messages\"`\n\t}\n\tif err := json.Unmarshal(raw, &apiResp); err != nil {\n\t\t\/\/ probably can't be retried\n\t\tc.Errorf(\"Malformed JSON from %v: %v\", url, err)\n\t\treturn nil\n\t}\n\t\/\/c.Infof(\"RAW: %+v\", apiResp)\n\n\tcl := &CL{\n\t\tNumber: n,\n\t\tClosed: apiResp.Closed,\n\t\tOwner: apiResp.OwnerEmail,\n\t\tDescription: []byte(apiResp.Description),\n\t\tFirstLine: apiResp.Description,\n\t\tSubject: apiResp.Subject,\n\t\tAuthor: emailToPerson[apiResp.OwnerEmail],\n\t}\n\tcl.Created, err = time.Parse(\"2006-01-02 15:04:05.000000\", apiResp.Created)\n\tif err != nil {\n\t\tc.Errorf(\"Bad creation time %q: %v\", apiResp.Created, err)\n\t}\n\tcl.Modified, err = time.Parse(\"2006-01-02 15:04:05.000000\", apiResp.Modified)\n\tif err != nil {\n\t\tc.Errorf(\"Bad modification time %q: %v\", apiResp.Modified, err)\n\t}\n\tif i := strings.Index(cl.FirstLine, \"\\n\"); i >= 0 {\n\t\tcl.FirstLine = cl.FirstLine[:i]\n\t}\n\t\/\/ Treat zero reviewers as a signal that the CL is completed.\n\t\/\/ This could be after the CL has been submitted, but before the CL author has synced,\n\t\/\/ but it could also be a CL manually edited to remove reviewers.\n\tif len(apiResp.Reviewers) == 0 {\n\t\tcl.Closed = true\n\t}\n\n\tlgtm := make(map[string]bool)\n\tnotLGTM := make(map[string]bool)\n\trcpt := make(map[string]bool)\n\tfor _, msg := range apiResp.Messages {\n\t\ts, rev := msg.Sender, false\n\t\tif p, ok := emailToPerson[s]; ok {\n\t\t\ts, rev = p, true\n\t\t}\n\n\t\t\/\/ CLs submitted by someone other than the CL owner do not immediately\n\t\t\/\/ transition to \"closed\". Let's simulate the intention by treating\n\t\t\/\/ messages starting with \"*** Submitted as \" from a reviewer as a\n\t\t\/\/ signal that the CL is now closed.\n\t\tif rev && strings.HasPrefix(msg.Text, \"*** Submitted as \") {\n\t\t\tcl.Closed = true\n\t\t}\n\n\t\tif msg.Approval {\n\t\t\tlgtm[s] = true\n\t\t\tdelete(notLGTM, s) \/\/ \"LGTM\" overrules previous \"NOT LGTM\"\n\t\t}\n\t\tif strings.Contains(msg.Text, \"NOT LGTM\") {\n\t\t\tnotLGTM[s] = true\n\t\t\tdelete(lgtm, s) \/\/ \"NOT LGTM\" overrules previous \"LGTM\"\n\t\t}\n\n\t\tfor _, r := range msg.Recipients {\n\t\t\trcpt[r] = true\n\t\t}\n\t}\n\tfor l := range lgtm {\n\t\tcl.LGTMs = append(cl.LGTMs, l)\n\t}\n\tfor l := range notLGTM {\n\t\tcl.NotLGTMs = append(cl.NotLGTMs, l)\n\t}\n\tfor r := range rcpt {\n\t\tcl.Recipients = append(cl.Recipients, r)\n\t}\n\tsort.Strings(cl.LGTMs)\n\tsort.Strings(cl.NotLGTMs)\n\tsort.Strings(cl.Recipients)\n\n\terr = datastore.RunInTransaction(c, func(c appengine.Context) error {\n\t\tocl := new(CL)\n\t\terr := datastore.Get(c, key, ocl)\n\t\tif err != nil && err != datastore.ErrNoSuchEntity {\n\t\t\treturn err\n\t\t} else if err == nil {\n\t\t\t\/\/ LastMessageID and Reviewer need preserving.\n\t\t\tcl.LastMessageID = ocl.LastMessageID\n\t\t\tcl.Reviewer = ocl.Reviewer\n\t\t}\n\t\t_, err = datastore.Put(c, key, cl)\n\t\treturn err\n\t}, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.Infof(\"Updated CL %v\", n)\n\treturn nil\n}\n<commit_msg>misc\/dashboard\/codereview: pass user information to gobot when assigning reviewer.<commit_after>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage dashboard\n\n\/\/ This file handles operations on the CL entity kind.\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"appengine\"\n\t\"appengine\/datastore\"\n\t\"appengine\/taskqueue\"\n\t\"appengine\/urlfetch\"\n\t\"appengine\/user\"\n)\n\nfunc init() {\n\thttp.HandleFunc(\"\/assign\", handleAssign)\n\thttp.HandleFunc(\"\/update-cl\", handleUpdateCL)\n}\n\nconst codereviewBase = \"http:\/\/codereview.appspot.com\"\nconst gobotBase = \"http:\/\/research.swtch.com\/gobot_codereview\"\n\nvar clRegexp = regexp.MustCompile(`\\d+`)\n\n\/\/ CL represents a code review.\ntype CL struct {\n\tNumber string \/\/ e.g. \"5903061\"\n\tClosed bool\n\tOwner string \/\/ email address\n\n\tCreated, Modified time.Time\n\n\tDescription []byte `datastore:\",noindex\"`\n\tFirstLine string `datastore:\",noindex\"`\n\tLGTMs []string\n\tNotLGTMs []string\n\n\t\/\/ Mail information.\n\tSubject string `datastore:\",noindex\"`\n\tRecipients []string `datastore:\",noindex\"`\n\tLastMessageID string `datastore:\",noindex\"`\n\n\t\/\/ These are person IDs (e.g. \"rsc\"); they may be empty\n\tAuthor string\n\tReviewer string\n}\n\n\/\/ DisplayOwner returns the CL's owner, either as their email address\n\/\/ or the person ID if it's a reviewer. It is for display only.\nfunc (cl *CL) DisplayOwner() string {\n\tif p, ok := emailToPerson[cl.Owner]; ok {\n\t\treturn p\n\t}\n\treturn cl.Owner\n}\n\nfunc (cl *CL) FirstLineHTML() template.HTML {\n\ts := template.HTMLEscapeString(cl.FirstLine)\n\t\/\/ Embolden the package name.\n\tif i := strings.Index(s, \":\"); i >= 0 {\n\t\ts = \"<b>\" + s[:i] + \"<\/b>\" + s[i:]\n\t}\n\treturn template.HTML(s)\n}\n\nfunc formatEmails(e []string) template.HTML {\n\tx := make([]string, len(e))\n\tfor i, s := range e {\n\t\ts = template.HTMLEscapeString(s)\n\t\tif !strings.Contains(s, \"@\") {\n\t\t\ts = \"<b>\" + s + \"<\/b>\"\n\t\t}\n\t\ts = `<span class=\"email\">` + s + \"<\/span>\"\n\t\tx[i] = s\n\t}\n\treturn template.HTML(strings.Join(x, \", \"))\n}\n\nfunc (cl *CL) LGTMHTML() template.HTML {\n\treturn formatEmails(cl.LGTMs)\n}\n\nfunc (cl *CL) NotLGTMHTML() template.HTML {\n\treturn formatEmails(cl.NotLGTMs)\n}\n\nfunc (cl *CL) ModifiedAgo() string {\n\t\/\/ Just the first non-zero unit.\n\tunits := [...]struct {\n\t\tsuffix string\n\t\tunit time.Duration\n\t}{\n\t\t{\"d\", 24 * time.Hour},\n\t\t{\"h\", time.Hour},\n\t\t{\"m\", time.Minute},\n\t\t{\"s\", time.Second},\n\t}\n\td := time.Now().Sub(cl.Modified)\n\tfor _, u := range units {\n\t\tif d > u.unit {\n\t\t\treturn fmt.Sprintf(\"%d%s\", d\/u.unit, u.suffix)\n\t\t}\n\t}\n\treturn \"just now\"\n}\n\nfunc handleAssign(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\n\tif r.Method != \"POST\" {\n\t\thttp.Error(w, \"Bad method \"+r.Method, 400)\n\t\treturn\n\t}\n\n\tu := user.Current(c)\n\tperson, ok := emailToPerson[u.Email]\n\tif !ok {\n\t\thttp.Error(w, \"Not allowed\", http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tn, rev := r.FormValue(\"cl\"), r.FormValue(\"r\")\n\tif !clRegexp.MatchString(n) {\n\t\tc.Errorf(\"Bad CL %q\", n)\n\t\thttp.Error(w, \"Bad CL\", 400)\n\t\treturn\n\t}\n\tif _, ok := preferredEmail[rev]; !ok && rev != \"\" {\n\t\tc.Errorf(\"Unknown reviewer %q\", rev)\n\t\thttp.Error(w, \"Unknown reviewer\", 400)\n\t\treturn\n\t}\n\n\tkey := datastore.NewKey(c, \"CL\", n, 0, nil)\n\n\tif rev != \"\" {\n\t\t\/\/ Make sure the reviewer is listed in Rietveld as a reviewer.\n\t\turl := codereviewBase + \"\/\" + n + \"\/fields\"\n\t\tresp, err := urlfetch.Client(c).Get(url + \"?field=reviewers\")\n\t\tif err != nil {\n\t\t\tc.Errorf(\"Retrieving CL reviewer list failed: %v\", err)\n\t\t\thttp.Error(w, err.Error(), 500)\n\t\t\treturn\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tif resp.StatusCode != 200 {\n\t\t\tc.Errorf(\"Retrieving CL reviewer list failed: got HTTP response %d\", resp.StatusCode)\n\t\t\thttp.Error(w, \"Failed contacting Rietveld\", 500)\n\t\t\treturn\n\t\t}\n\n\t\tvar apiResp struct {\n\t\t\tReviewers []string `json:\"reviewers\"`\n\t\t}\n\t\tif err := json.NewDecoder(resp.Body).Decode(&apiResp); err != nil {\n\t\t\t\/\/ probably can't be retried\n\t\t\tmsg := fmt.Sprintf(\"Malformed JSON from %v: %v\", url, err)\n\t\t\tc.Errorf(\"%s\", msg)\n\t\t\thttp.Error(w, msg, 500)\n\t\t\treturn\n\t\t}\n\t\tfound := false\n\t\tfor _, r := range apiResp.Reviewers {\n\t\t\tif emailToPerson[r] == rev {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tc.Infof(\"Adding %v as a reviewer of CL %v\", rev, n)\n\n\t\t\turl := fmt.Sprintf(\"%s?cl=%s&r=%s&obo=%s\", gobotBase, n, rev, person)\n\t\t\tresp, err := urlfetch.Client(c).Get(url)\n\t\t\tif err != nil {\n\t\t\t\tc.Errorf(\"Gobot GET failed: %v\", err)\n\t\t\t\thttp.Error(w, err.Error(), 500)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer resp.Body.Close()\n\t\t\tif resp.StatusCode != 200 {\n\t\t\t\tc.Errorf(\"Gobot GET failed: got HTTP response %d\", resp.StatusCode)\n\t\t\t\thttp.Error(w, \"Failed contacting Gobot\", 500)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tc.Infof(\"Gobot said %q\", resp.Status)\n\t\t}\n\t}\n\n\t\/\/ Update our own record.\n\terr := datastore.RunInTransaction(c, func(c appengine.Context) error {\n\t\tcl := new(CL)\n\t\terr := datastore.Get(c, key, cl)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcl.Reviewer = rev\n\t\t_, err = datastore.Put(c, key, cl)\n\t\treturn err\n\t}, nil)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Assignment failed: %v\", err)\n\t\tc.Errorf(\"%s\", msg)\n\t\thttp.Error(w, msg, 500)\n\t\treturn\n\t}\n\tc.Infof(\"Assigned CL %v to %v\", n, rev)\n}\n\nfunc UpdateCLLater(c appengine.Context, n string, delay time.Duration) {\n\tt := taskqueue.NewPOSTTask(\"\/update-cl\", url.Values{\n\t\t\"cl\": []string{n},\n\t})\n\tt.Delay = delay\n\tif _, err := taskqueue.Add(c, t, \"update-cl\"); err != nil {\n\t\tc.Errorf(\"Failed adding task: %v\", err)\n\t}\n}\n\nfunc handleUpdateCL(w http.ResponseWriter, r *http.Request) {\n\tc := appengine.NewContext(r)\n\n\tn := r.FormValue(\"cl\")\n\tif !clRegexp.MatchString(n) {\n\t\tc.Errorf(\"Bad CL %q\", n)\n\t\thttp.Error(w, \"Bad CL\", 400)\n\t\treturn\n\t}\n\n\tif err := updateCL(c, n); err != nil {\n\t\tc.Errorf(\"Failed updating CL %v: %v\", n, err)\n\t\thttp.Error(w, \"Failed update\", 500)\n\t\treturn\n\t}\n\n\tio.WriteString(w, \"OK\")\n}\n\n\/\/ updateCL updates a single CL. If a retryable failure occurs, an error is returned.\nfunc updateCL(c appengine.Context, n string) error {\n\tc.Debugf(\"Updating CL %v\", n)\n\tkey := datastore.NewKey(c, \"CL\", n, 0, nil)\n\n\turl := codereviewBase + \"\/api\/\" + n + \"?messages=true\"\n\tresp, err := urlfetch.Client(c).Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\traw, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed reading HTTP body: %v\", err)\n\t}\n\n\t\/\/ Special case for abandoned CLs.\n\tif resp.StatusCode == 404 && bytes.Contains(raw, []byte(\"No issue exists with that id\")) {\n\t\t\/\/ Don't bother checking for errors. The CL might never have been saved, for instance.\n\t\tdatastore.Delete(c, key)\n\t\tc.Infof(\"Deleted abandoned CL %v\", n)\n\t\treturn nil\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"Update: got HTTP response %d\", resp.StatusCode)\n\t}\n\n\tvar apiResp struct {\n\t\tDescription string `json:\"description\"`\n\t\tReviewers []string `json:\"reviewers\"`\n\t\tCreated string `json:\"created\"`\n\t\tOwnerEmail string `json:\"owner_email\"`\n\t\tModified string `json:\"modified\"`\n\t\tClosed bool `json:\"closed\"`\n\t\tSubject string `json:\"subject\"`\n\t\tMessages []struct {\n\t\t\tText string `json:\"text\"`\n\t\t\tSender string `json:\"sender\"`\n\t\t\tRecipients []string `json:\"recipients\"`\n\t\t\tApproval bool `json:\"approval\"`\n\t\t} `json:\"messages\"`\n\t}\n\tif err := json.Unmarshal(raw, &apiResp); err != nil {\n\t\t\/\/ probably can't be retried\n\t\tc.Errorf(\"Malformed JSON from %v: %v\", url, err)\n\t\treturn nil\n\t}\n\t\/\/c.Infof(\"RAW: %+v\", apiResp)\n\n\tcl := &CL{\n\t\tNumber: n,\n\t\tClosed: apiResp.Closed,\n\t\tOwner: apiResp.OwnerEmail,\n\t\tDescription: []byte(apiResp.Description),\n\t\tFirstLine: apiResp.Description,\n\t\tSubject: apiResp.Subject,\n\t\tAuthor: emailToPerson[apiResp.OwnerEmail],\n\t}\n\tcl.Created, err = time.Parse(\"2006-01-02 15:04:05.000000\", apiResp.Created)\n\tif err != nil {\n\t\tc.Errorf(\"Bad creation time %q: %v\", apiResp.Created, err)\n\t}\n\tcl.Modified, err = time.Parse(\"2006-01-02 15:04:05.000000\", apiResp.Modified)\n\tif err != nil {\n\t\tc.Errorf(\"Bad modification time %q: %v\", apiResp.Modified, err)\n\t}\n\tif i := strings.Index(cl.FirstLine, \"\\n\"); i >= 0 {\n\t\tcl.FirstLine = cl.FirstLine[:i]\n\t}\n\t\/\/ Treat zero reviewers as a signal that the CL is completed.\n\t\/\/ This could be after the CL has been submitted, but before the CL author has synced,\n\t\/\/ but it could also be a CL manually edited to remove reviewers.\n\tif len(apiResp.Reviewers) == 0 {\n\t\tcl.Closed = true\n\t}\n\n\tlgtm := make(map[string]bool)\n\tnotLGTM := make(map[string]bool)\n\trcpt := make(map[string]bool)\n\tfor _, msg := range apiResp.Messages {\n\t\ts, rev := msg.Sender, false\n\t\tif p, ok := emailToPerson[s]; ok {\n\t\t\ts, rev = p, true\n\t\t}\n\n\t\t\/\/ CLs submitted by someone other than the CL owner do not immediately\n\t\t\/\/ transition to \"closed\". Let's simulate the intention by treating\n\t\t\/\/ messages starting with \"*** Submitted as \" from a reviewer as a\n\t\t\/\/ signal that the CL is now closed.\n\t\tif rev && strings.HasPrefix(msg.Text, \"*** Submitted as \") {\n\t\t\tcl.Closed = true\n\t\t}\n\n\t\tif msg.Approval {\n\t\t\tlgtm[s] = true\n\t\t\tdelete(notLGTM, s) \/\/ \"LGTM\" overrules previous \"NOT LGTM\"\n\t\t}\n\t\tif strings.Contains(msg.Text, \"NOT LGTM\") {\n\t\t\tnotLGTM[s] = true\n\t\t\tdelete(lgtm, s) \/\/ \"NOT LGTM\" overrules previous \"LGTM\"\n\t\t}\n\n\t\tfor _, r := range msg.Recipients {\n\t\t\trcpt[r] = true\n\t\t}\n\t}\n\tfor l := range lgtm {\n\t\tcl.LGTMs = append(cl.LGTMs, l)\n\t}\n\tfor l := range notLGTM {\n\t\tcl.NotLGTMs = append(cl.NotLGTMs, l)\n\t}\n\tfor r := range rcpt {\n\t\tcl.Recipients = append(cl.Recipients, r)\n\t}\n\tsort.Strings(cl.LGTMs)\n\tsort.Strings(cl.NotLGTMs)\n\tsort.Strings(cl.Recipients)\n\n\terr = datastore.RunInTransaction(c, func(c appengine.Context) error {\n\t\tocl := new(CL)\n\t\terr := datastore.Get(c, key, ocl)\n\t\tif err != nil && err != datastore.ErrNoSuchEntity {\n\t\t\treturn err\n\t\t} else if err == nil {\n\t\t\t\/\/ LastMessageID and Reviewer need preserving.\n\t\t\tcl.LastMessageID = ocl.LastMessageID\n\t\t\tcl.Reviewer = ocl.Reviewer\n\t\t}\n\t\t_, err = datastore.Put(c, key, cl)\n\t\treturn err\n\t}, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.Infof(\"Updated CL %v\", n)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/pilu\/traffic\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/appengine\"\n\t\"google.golang.org\/appengine\/datastore\"\n\t\"google.golang.org\/appengine\/log\"\n\t\"google.golang.org\/appengine\/taskqueue\"\n\t\"google.golang.org\/appengine\/user\"\n)\n\ntype RootData struct {\n\tPosts *[]Entry\n\tIsAdmin bool\n\tPage int64\n\tPrev int64\n\tNext int64\n}\n\nconst perPage = 30\n\nfunc RootHandler(w traffic.ResponseWriter, r *traffic.Request) {\n\tif r.Request.URL.Path == \"\/page\/0\" {\n\t\thttp.Redirect(w, r.Request, \"\/\", 301)\n\t}\n\n\tc := appengine.NewContext(r.Request)\n\tpg, err := strconv.ParseInt(r.Param(\"page\"), 10, 64)\n\tif err != nil {\n\t\tlog.Infof(c, \"Error parsing: %+v\", err)\n\t\tpg = 0\n\t}\n\n\tentries, err := Pagination(c, perPage, int(pg*perPage))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\tdata := &RootData{\n\t\tPosts: entries,\n\t\tIsAdmin: user.IsAdmin(c),\n\t\tPage: pg,\n\t\tNext: pg + 1,\n\t\tPrev: pg - 1,\n\t}\n\n\t\/\/ If there are no posts left, don't show next button.\n\tif len(*entries) == 0 {\n\t\tdata.Next = -1\n\t}\n\n\t\/\/ Get next page's posts so we don't show the next page if there is none.\n\tnxt_entr, err := Pagination(c, perPage, int((pg+1)*perPage))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\tif len(*nxt_entr) == 0 {\n\t\tdata.Next = -1\n\t}\n\n\tw.Render(\"index\", data)\n}\n\nfunc AboutHandler(w traffic.ResponseWriter, r *traffic.Request) {\n\tw.Render(\"about\", nil)\n}\n\nfunc UnimplementedHandler(w traffic.ResponseWriter, r *traffic.Request) {\n\thttp.Error(w, \"Sorry, I haven't implemented this yet\", 500)\n}\n\nfunc RedirectHomeHandler(w traffic.ResponseWriter, r *traffic.Request) {\n\thttp.Redirect(w, r.Request, \"\/\", 302)\n}\n\nfunc CleanWorkHandler(w traffic.ResponseWriter, r *traffic.Request) {\n\tc := appengine.NewContext(r.Request)\n\tq := datastore.NewQuery(\"Entry\")\n\tentries := new([]Entry)\n\t_, err := q.GetAll(c, entries)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tfor _, p := range *entries {\n\t\t\/\/ TODO: Figure out how to unset all public.\n\t\tif &p.Draft == nil {\n\t\t\tp.Draft = false\n\t\t}\n\t\tif len(p.Title) == 0 {\n\t\t\tp.Title = fmt.Sprintf(\"Untitled #%d\", p.Id)\n\t\t}\n\t\tp.Save(c)\n\t}\n}\n\ntype SiteMapData struct {\n\tPosts *[]Entry\n\tNewest time.Time\n}\n\n\/\/ http:\/\/www.sitemaps.org\/protocol.html\nfunc SitemapHandler(w traffic.ResponseWriter, r *traffic.Request) {\n\tc := appengine.NewContext(r.Request)\n\tentries, err := AllPosts(c)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\tdata := &SiteMapData{\n\t\tPosts: entries,\n\t\tNewest: (*entries)[0].Modified,\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/xml; charset=utf-8\")\n\tw.Render(\"sitemap\", data)\n}\n\nfunc queueWork(c context.Context, uri string) error {\n\tr := &taskqueue.RetryOptions{\n\t\tRetryLimit: 1,\n\t}\n\n\tt := taskqueue.NewPOSTTask(uri, url.Values{})\n\tt.RetryOptions = r\n\t_, err := taskqueue.Add(c, t, \"tasks\")\n\n\tif err != nil {\n\t\tlog.Errorf(c, \"Error queueing %s: %v\", err.Error())\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ This queues lots of work every fifteen minutes.\nfunc WorkQueueHandler(w traffic.ResponseWriter, r *traffic.Request) {\n\tc := appengine.NewContext(r.Request)\n\n\t\/\/ Build data for the Archive Page\n\terr := queueWork(c, \"\/archive\/work\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\t\/\/ Download all the links.\n\terr = queueWork(c, \"\/link\/work\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\t\/\/\/\/ Update the stats\n\t\/\/err = queueWork(c, \"\/stats\/work\")\n\t\/\/if err != nil {\n\t\/\/\thttp.Error(w, err.Error(), 500)\n\t\/\/\treturn\n\t\/\/}\n\n\t\/\/ Update the longform data.\n\terr = queueWork(c, \"\/longform\/work\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\t\/\/ Clean the database\n\terr = queueWork(c, \"\/clean\/work\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\t\/\/ Update the Search Index\n\terr = queueWork(c, \"\/search\/work\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tfmt.Fprint(w, \"success.\\n\")\n}\n\n\/\/ This queues lots of work every twelve hours.\nfunc LongWorkQueueHandler(w traffic.ResponseWriter, r *traffic.Request) {\n\tc := appengine.NewContext(r.Request)\n\n\t\/\/ Build data for the Archive Page\n\terr := queueWork(c, \"\/link\/long-work\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tfmt.Fprint(w, \"success.\\n\")\n}\n<commit_msg>enable stats<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/pilu\/traffic\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/appengine\"\n\t\"google.golang.org\/appengine\/datastore\"\n\t\"google.golang.org\/appengine\/log\"\n\t\"google.golang.org\/appengine\/taskqueue\"\n\t\"google.golang.org\/appengine\/user\"\n)\n\ntype RootData struct {\n\tPosts *[]Entry\n\tIsAdmin bool\n\tPage int64\n\tPrev int64\n\tNext int64\n}\n\nconst perPage = 30\n\nfunc RootHandler(w traffic.ResponseWriter, r *traffic.Request) {\n\tif r.Request.URL.Path == \"\/page\/0\" {\n\t\thttp.Redirect(w, r.Request, \"\/\", 301)\n\t}\n\n\tc := appengine.NewContext(r.Request)\n\tpg, err := strconv.ParseInt(r.Param(\"page\"), 10, 64)\n\tif err != nil {\n\t\tlog.Infof(c, \"Error parsing: %+v\", err)\n\t\tpg = 0\n\t}\n\n\tentries, err := Pagination(c, perPage, int(pg*perPage))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\tdata := &RootData{\n\t\tPosts: entries,\n\t\tIsAdmin: user.IsAdmin(c),\n\t\tPage: pg,\n\t\tNext: pg + 1,\n\t\tPrev: pg - 1,\n\t}\n\n\t\/\/ If there are no posts left, don't show next button.\n\tif len(*entries) == 0 {\n\t\tdata.Next = -1\n\t}\n\n\t\/\/ Get next page's posts so we don't show the next page if there is none.\n\tnxt_entr, err := Pagination(c, perPage, int((pg+1)*perPage))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\tif len(*nxt_entr) == 0 {\n\t\tdata.Next = -1\n\t}\n\n\tw.Render(\"index\", data)\n}\n\nfunc AboutHandler(w traffic.ResponseWriter, r *traffic.Request) {\n\tw.Render(\"about\", nil)\n}\n\nfunc UnimplementedHandler(w traffic.ResponseWriter, r *traffic.Request) {\n\thttp.Error(w, \"Sorry, I haven't implemented this yet\", 500)\n}\n\nfunc RedirectHomeHandler(w traffic.ResponseWriter, r *traffic.Request) {\n\thttp.Redirect(w, r.Request, \"\/\", 302)\n}\n\nfunc CleanWorkHandler(w traffic.ResponseWriter, r *traffic.Request) {\n\tc := appengine.NewContext(r.Request)\n\tq := datastore.NewQuery(\"Entry\")\n\tentries := new([]Entry)\n\t_, err := q.GetAll(c, entries)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tfor _, p := range *entries {\n\t\t\/\/ TODO: Figure out how to unset all public.\n\t\tif &p.Draft == nil {\n\t\t\tp.Draft = false\n\t\t}\n\t\tif len(p.Title) == 0 {\n\t\t\tp.Title = fmt.Sprintf(\"Untitled #%d\", p.Id)\n\t\t}\n\t\tp.Save(c)\n\t}\n}\n\ntype SiteMapData struct {\n\tPosts *[]Entry\n\tNewest time.Time\n}\n\n\/\/ http:\/\/www.sitemaps.org\/protocol.html\nfunc SitemapHandler(w traffic.ResponseWriter, r *traffic.Request) {\n\tc := appengine.NewContext(r.Request)\n\tentries, err := AllPosts(c)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\tdata := &SiteMapData{\n\t\tPosts: entries,\n\t\tNewest: (*entries)[0].Modified,\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/xml; charset=utf-8\")\n\tw.Render(\"sitemap\", data)\n}\n\nfunc queueWork(c context.Context, uri string) error {\n\tr := &taskqueue.RetryOptions{\n\t\tRetryLimit: 1,\n\t}\n\n\tt := taskqueue.NewPOSTTask(uri, url.Values{})\n\tt.RetryOptions = r\n\t_, err := taskqueue.Add(c, t, \"tasks\")\n\n\tif err != nil {\n\t\tlog.Errorf(c, \"Error queueing %s: %v\", err.Error())\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ This queues lots of work every fifteen minutes.\nfunc WorkQueueHandler(w traffic.ResponseWriter, r *traffic.Request) {\n\tc := appengine.NewContext(r.Request)\n\n\t\/\/ Build data for the Archive Page\n\terr := queueWork(c, \"\/archive\/work\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\t\/\/ Download all the links.\n\terr = queueWork(c, \"\/link\/work\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\t\/\/ Update the stats\n\terr = queueWork(c, \"\/stats\/work\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\t\/\/ Update the longform data.\n\terr = queueWork(c, \"\/longform\/work\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\t\/\/ Clean the database\n\terr = queueWork(c, \"\/clean\/work\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\t\/\/ Update the Search Index\n\terr = queueWork(c, \"\/search\/work\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tfmt.Fprint(w, \"success.\\n\")\n}\n\n\/\/ This queues lots of work every twelve hours.\nfunc LongWorkQueueHandler(w traffic.ResponseWriter, r *traffic.Request) {\n\tc := appengine.NewContext(r.Request)\n\n\t\/\/ Build data for the Archive Page\n\terr := queueWork(c, \"\/link\/long-work\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tfmt.Fprint(w, \"success.\\n\")\n}\n<|endoftext|>"} {"text":"<commit_before>package ros\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"github.com\/denverdino\/aliyungo\/common\"\n\t\"github.com\/denverdino\/aliyungo\/util\"\n)\n\n\/\/https:\/\/help.aliyun.com\/document_detail\/28916.html?spm=5176.doc49066.6.588.d7Ntjs\ntype Resource struct {\n\tId string\n\tName string\n\tType string\n\tStatus string\n\tStatusReason string\n\tUpdated string\n\tPhysicalId string\n}\n\nfunc (client *Client) DescribeResources(stackId, stackName string) ([]*Resource, error) {\n\tresponse := make([]*Resource, 0)\n\terr := client.Invoke(\"\", http.MethodGet, fmt.Sprintf(\"\/stacks\/%s\/%s\/resources\", stackName, stackId), nil, nil, &response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn response, nil\n}\n\n\/\/https:\/\/help.aliyun.com\/document_detail\/28917.html?spm=5176.doc28916.6.589.BUPJqx\nfunc (client *Client) DescribeResource(stackId, stackName, resourceName string) (*Resource, error) {\n\tresponse := &Resource{}\n\terr := client.Invoke(\"\", http.MethodGet, fmt.Sprintf(\"\/stacks\/%s\/%s\/resources\/%s\", stackName, stackId, resourceName), nil, nil, &response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn response, nil\n}\n\n\/\/https:\/\/help.aliyun.com\/document_detail\/28918.html?spm=5176.doc28917.6.590.smknll\ntype SupportStatus string\n\nconst (\n\tSUPPORT_STATUS_UNKNOWN = \"UNKNOWN\"\n\tSUPPORT_STATUS_SUPPORTED = \"SUPPORTED\"\n\tSUPPORT_STATUS_DEPRECATED = \"DEPRECATED\"\n\tSUPPORT_STATUS_UNSUPPORTED = \"UNSUPPORTED\"\n\tSUPPORT_STATUS_HIDDEN = \"HIDDEN\"\n)\n\ntype DescribeResoureTypesRequest struct {\n\tSupportStatus SupportStatus\n}\n\ntype DescribeResoureTypesResponse struct {\n\tcommon.Response\n\tResourceTypes []string\n}\n\nfunc (client *Client) DescribeResoureTypes(supportStatus SupportStatus) (*DescribeResoureTypesResponse, error) {\n\tquery := util.ConvertToQueryValues(&DescribeResoureTypesRequest{\n\t\tSupportStatus: supportStatus,\n\t})\n\n\tresponse := &DescribeResoureTypesResponse{}\n\terr := client.Invoke(\"\", http.MethodGet, \"\/resource_types\", query, nil, response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn response, nil\n}\n\n\/\/https:\/\/help.aliyun.com\/document_detail\/28919.html?spm=5176.doc28918.6.591.7QkDYC\ntype DescribeResoureTypeResponse struct {\n\tcommon.Response\n\tResourceType string\n\tAttributes interface{}\n\tSupportStatus interface{}\n\tProperties interface{}\n}\n\nfunc (client *Client) DescribeResoureType(typeName string) (*DescribeResoureTypeResponse, error) {\n\tresponse := &DescribeResoureTypeResponse{}\n\terr := client.Invoke(\"\", http.MethodGet, fmt.Sprintf(\"\/resource_types\/%s\", typeName), nil, nil, response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn response, nil\n}\n\n\/\/https:\/\/help.aliyun.com\/document_detail\/28920.html?spm=5176.doc28919.6.592.IiEwar\ntype DescribeResoureTypeTemplateResponse struct {\n\tcommon.Response\n\tROSTemplateFormatVersion string\n\tParameters interface{}\n\tOutputs interface{}\n\tResources interface{}\n}\n\nfunc (client *Client) DescribeResoureTypeTemplate(typeName string) (*DescribeResoureTypeTemplateResponse, error) {\n\tresponse := &DescribeResoureTypeTemplateResponse{}\n\terr := client.Invoke(\"\", http.MethodGet, fmt.Sprintf(\"\/resource_types\/%s\/template\", typeName), nil, nil, response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn response, nil\n}\n<commit_msg>ros DescribeResource support region id<commit_after>package ros\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"github.com\/denverdino\/aliyungo\/common\"\n\t\"github.com\/denverdino\/aliyungo\/util\"\n)\n\n\/\/https:\/\/help.aliyun.com\/document_detail\/28916.html?spm=5176.doc49066.6.588.d7Ntjs\ntype Resource struct {\n\tId string\n\tName string\n\tType string\n\tStatus string\n\tStatusReason string\n\tUpdated string\n\tPhysicalId string\n}\n\nfunc (client *Client) DescribeResources(stackId, stackName string) ([]*Resource, error) {\n\tresponse := make([]*Resource, 0)\n\terr := client.Invoke(\"\", http.MethodGet, fmt.Sprintf(\"\/stacks\/%s\/%s\/resources\", stackName, stackId), nil, nil, &response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn response, nil\n}\n\n\/\/https:\/\/help.aliyun.com\/document_detail\/28917.html?spm=5176.doc28916.6.589.BUPJqx\nfunc (client *Client) DescribeResource(stackId, stackName, resourceName string) (*Resource, error) {\n\tresponse := &Resource{}\n\terr := client.Invoke(\"\", http.MethodGet, fmt.Sprintf(\"\/stacks\/%s\/%s\/resources\/%s\", stackName, stackId, resourceName), nil, nil, &response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn response, nil\n}\n\n\/\/https:\/\/help.aliyun.com\/document_detail\/28917.html?spm=5176.doc28916.6.589.BUPJqx\nfunc (client *Client) DescribeResourceByRegion(regionId common.Region, stackId, stackName, resourceName string) (*Resource, error) {\n\tresponse := &Resource{}\n\terr := client.Invoke(regionId, http.MethodGet, fmt.Sprintf(\"\/stacks\/%s\/%s\/resources\/%s\", stackName, stackId, resourceName), nil, nil, &response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn response, nil\n}\n\n\/\/https:\/\/help.aliyun.com\/document_detail\/28918.html?spm=5176.doc28917.6.590.smknll\ntype SupportStatus string\n\nconst (\n\tSUPPORT_STATUS_UNKNOWN = \"UNKNOWN\"\n\tSUPPORT_STATUS_SUPPORTED = \"SUPPORTED\"\n\tSUPPORT_STATUS_DEPRECATED = \"DEPRECATED\"\n\tSUPPORT_STATUS_UNSUPPORTED = \"UNSUPPORTED\"\n\tSUPPORT_STATUS_HIDDEN = \"HIDDEN\"\n)\n\ntype DescribeResoureTypesRequest struct {\n\tSupportStatus SupportStatus\n}\n\ntype DescribeResoureTypesResponse struct {\n\tcommon.Response\n\tResourceTypes []string\n}\n\nfunc (client *Client) DescribeResoureTypes(supportStatus SupportStatus) (*DescribeResoureTypesResponse, error) {\n\tquery := util.ConvertToQueryValues(&DescribeResoureTypesRequest{\n\t\tSupportStatus: supportStatus,\n\t})\n\n\tresponse := &DescribeResoureTypesResponse{}\n\terr := client.Invoke(\"\", http.MethodGet, \"\/resource_types\", query, nil, response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn response, nil\n}\n\n\/\/https:\/\/help.aliyun.com\/document_detail\/28919.html?spm=5176.doc28918.6.591.7QkDYC\ntype DescribeResoureTypeResponse struct {\n\tcommon.Response\n\tResourceType string\n\tAttributes interface{}\n\tSupportStatus interface{}\n\tProperties interface{}\n}\n\nfunc (client *Client) DescribeResoureType(typeName string) (*DescribeResoureTypeResponse, error) {\n\tresponse := &DescribeResoureTypeResponse{}\n\terr := client.Invoke(\"\", http.MethodGet, fmt.Sprintf(\"\/resource_types\/%s\", typeName), nil, nil, response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn response, nil\n}\n\n\/\/https:\/\/help.aliyun.com\/document_detail\/28920.html?spm=5176.doc28919.6.592.IiEwar\ntype DescribeResoureTypeTemplateResponse struct {\n\tcommon.Response\n\tROSTemplateFormatVersion string\n\tParameters interface{}\n\tOutputs interface{}\n\tResources interface{}\n}\n\nfunc (client *Client) DescribeResoureTypeTemplate(typeName string) (*DescribeResoureTypeTemplateResponse, error) {\n\tresponse := &DescribeResoureTypeTemplateResponse{}\n\terr := client.Invoke(\"\", http.MethodGet, fmt.Sprintf(\"\/resource_types\/%s\/template\", typeName), nil, nil, response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn response, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package pxemgr\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/gorilla\/mux\"\n\t\"crypto\/x509\"\n\t\"crypto\/tls\"\n)\n\ntype EtcdNode struct {\n\tKey string `json:\"key\"`\n\tValue string `json:\"value,omitempty\"`\n\tNodes []*EtcdNode `json:\"nodes,omitempty\"`\n\tDir bool `json:\"dir,omitempty\"`\n}\n\ntype EtcdResponse struct {\n\tAction string `json:\"action\"`\n\tNode *EtcdNode `json:\"node,omitempty\"`\n}\n\ntype EtcdResponseError struct {\n\tErrorCode int `json:\"errorCode\"`\n\tMessage string `json:\"message\"`\n\tCause string `json:\"cause\"`\n}\n\nfunc (mgr *pxeManagerT) defineEtcdDiscoveryRoutes(etcdRouter *mux.Router) {\n\tetcdRouter.PathPrefix(\"\/new\").Methods(\"PUT\").HandlerFunc(mgr.etcdDiscoveryNewCluster)\n\n\ttokenRouter := etcdRouter.PathPrefix(\"\/{token:[a-f0-9]{32}}\").Subrouter()\n\ttokenRouter.PathPrefix(\"\/_config\/size\").Methods(\"GET\").HandlerFunc(mgr.etcdDiscoveryProxyHandler)\n\ttokenRouter.PathPrefix(\"\/_config\/size\").Methods(\"PUT\").HandlerFunc(mgr.etcdDiscoveryProxyHandler)\n\ttokenRouter.PathPrefix(\"\/{machine}\").Methods(\"PUT\").HandlerFunc(mgr.etcdDiscoveryProxyHandler)\n\ttokenRouter.PathPrefix(\"\/{machine}\").Methods(\"GET\").HandlerFunc(mgr.etcdDiscoveryProxyHandler)\n\ttokenRouter.PathPrefix(\"\/{machine}\").Methods(\"DELETE\").HandlerFunc(mgr.etcdDiscoveryProxyHandler)\n\ttokenRouter.Methods(\"GET\").HandlerFunc(mgr.etcdDiscoveryProxyHandler)\n\n\tetcdRouter.Methods(\"GET\").HandlerFunc(mgr.etcdDiscoveryHandler)\n}\n\nfunc (mgr *pxeManagerT) etcdDiscoveryHandler(w http.ResponseWriter, r *http.Request) {\n\thttp.Redirect(w, r,\n\t\t\"https:\/\/github.com\/giantswarm\/mayu\/blob\/master\/docs\/etcd-discovery.md\",\n\t\thttp.StatusMovedPermanently,\n\t)\n}\n\nfunc (mgr *pxeManagerT) etcdDiscoveryNewCluster(w http.ResponseWriter, r *http.Request) {\n\tvar err error\n\tsize := mgr.defaultEtcdQuorumSize\n\ts := r.FormValue(\"size\")\n\tif s != \"\" {\n\t\tsize, err = strconv.Atoi(s)\n\t\tif err != nil {\n\t\t\thttpError(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t}\n\n\ttoken, err := mgr.cluster.GenerateEtcdDiscoveryToken()\n\tif err != nil {\n\t\thttpError(w, fmt.Sprintf(\"Unable to generate token '%v'\", err), 400)\n\t\treturn\n\t}\n\n\terr = mgr.cluster.StoreEtcdDiscoveryToken(mgr.etcdEndpoint, mgr.etcdCAFile, token, size)\n\tif err != nil {\n\t\thttpError(w, fmt.Sprintf(\"Unable to store token in etcd '%v'\", err), 400)\n\t\treturn\n\t}\n\n\tglog.V(2).Infof(\"New cluster created '%s'\", token)\n\n\tfmt.Fprintf(w, \"%s\/%s\", mgr.etcdDiscoveryBaseURL(), token)\n}\n\nfunc (mgr *pxeManagerT) etcdDiscoveryBaseURL() string {\n\treturn fmt.Sprintf(\"%s\/etcd\", mgr.thisHost())\n}\n\nfunc (mgr *pxeManagerT) etcdDiscoveryProxyHandler(w http.ResponseWriter, r *http.Request) {\n\tresp, err := mgr.etcdDiscoveryProxyRequest(r)\n\tif err != nil {\n\t\thttpError(w, fmt.Sprintf(\"Error proxying request to etcd '%v'\", err), 500)\n\t}\n\n\tcopyHeader(w.Header(), resp.Header)\n\tw.WriteHeader(resp.StatusCode)\n\tio.Copy(w, resp.Body)\n}\n\nfunc (mgr *pxeManagerT) etcdDiscoveryProxyRequest(r *http.Request) (*http.Response, error) {\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar transport = http.DefaultTransport\n\n\tif strings.HasPrefix(mgr.etcdEndpoint, \"https\") && mgr.etcdCAFile != \"\" {\n\t\tcustomCA := x509.NewCertPool()\n\n\t\tpemData, err := ioutil.ReadFile(mgr.etcdCAFile)\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(\"unable to read custom CA file: \"+err.Error())\n\t\t}\n\t\tcustomCA.AppendCertsFromPEM(pemData)\n\t\ttransport = &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{RootCAs:customCA},\n\t\t}\n\t}\n\n\tu, err := url.Parse(mgr.etcdEndpoint)\n\tif err != nil {\n\t\tnil, errors.New(\"invalid etcd-endpoint: \"+err.Error())\n\t}\n\tu.Path = path.Join(\"v2\", \"keys\", \"_etcd\", \"registry\", strings.TrimPrefix(r.URL.Path, \"\/etcd\"))\n\tu.RawQuery = r.URL.RawQuery\n\n\tfor i := 0; i <= 10; i++ {\n\n\t\tbuf := bytes.NewBuffer(body)\n\t\tglog.V(2).Infof(\"Body '%s'\", body)\n\n\t\toutreq, err := http.NewRequest(r.Method, u.String(), buf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tcopyHeader(outreq.Header, r.Header)\n\n\t\tclient := http.Client{Transport:transport}\n\t\tresp, err := client.Do(outreq)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn resp, nil\n\t}\n\n\treturn nil, errors.New(\"All attempts at proxying to etcd failed\")\n}\n\n\/\/ copyHeader copies all of the headers from dst to src.\nfunc copyHeader(dst, src http.Header) {\n\tfor k, v := range src {\n\t\tfor _, q := range v {\n\t\t\tdst.Add(k, q)\n\t\t}\n\t}\n}\n<commit_msg>scheme condition simplified<commit_after>package pxemgr\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/gorilla\/mux\"\n\t\"crypto\/x509\"\n\t\"crypto\/tls\"\n)\n\ntype EtcdNode struct {\n\tKey string `json:\"key\"`\n\tValue string `json:\"value,omitempty\"`\n\tNodes []*EtcdNode `json:\"nodes,omitempty\"`\n\tDir bool `json:\"dir,omitempty\"`\n}\n\ntype EtcdResponse struct {\n\tAction string `json:\"action\"`\n\tNode *EtcdNode `json:\"node,omitempty\"`\n}\n\ntype EtcdResponseError struct {\n\tErrorCode int `json:\"errorCode\"`\n\tMessage string `json:\"message\"`\n\tCause string `json:\"cause\"`\n}\n\nfunc (mgr *pxeManagerT) defineEtcdDiscoveryRoutes(etcdRouter *mux.Router) {\n\tetcdRouter.PathPrefix(\"\/new\").Methods(\"PUT\").HandlerFunc(mgr.etcdDiscoveryNewCluster)\n\n\ttokenRouter := etcdRouter.PathPrefix(\"\/{token:[a-f0-9]{32}}\").Subrouter()\n\ttokenRouter.PathPrefix(\"\/_config\/size\").Methods(\"GET\").HandlerFunc(mgr.etcdDiscoveryProxyHandler)\n\ttokenRouter.PathPrefix(\"\/_config\/size\").Methods(\"PUT\").HandlerFunc(mgr.etcdDiscoveryProxyHandler)\n\ttokenRouter.PathPrefix(\"\/{machine}\").Methods(\"PUT\").HandlerFunc(mgr.etcdDiscoveryProxyHandler)\n\ttokenRouter.PathPrefix(\"\/{machine}\").Methods(\"GET\").HandlerFunc(mgr.etcdDiscoveryProxyHandler)\n\ttokenRouter.PathPrefix(\"\/{machine}\").Methods(\"DELETE\").HandlerFunc(mgr.etcdDiscoveryProxyHandler)\n\ttokenRouter.Methods(\"GET\").HandlerFunc(mgr.etcdDiscoveryProxyHandler)\n\n\tetcdRouter.Methods(\"GET\").HandlerFunc(mgr.etcdDiscoveryHandler)\n}\n\nfunc (mgr *pxeManagerT) etcdDiscoveryHandler(w http.ResponseWriter, r *http.Request) {\n\thttp.Redirect(w, r,\n\t\t\"https:\/\/github.com\/giantswarm\/mayu\/blob\/master\/docs\/etcd-discovery.md\",\n\t\thttp.StatusMovedPermanently,\n\t)\n}\n\nfunc (mgr *pxeManagerT) etcdDiscoveryNewCluster(w http.ResponseWriter, r *http.Request) {\n\tvar err error\n\tsize := mgr.defaultEtcdQuorumSize\n\ts := r.FormValue(\"size\")\n\tif s != \"\" {\n\t\tsize, err = strconv.Atoi(s)\n\t\tif err != nil {\n\t\t\thttpError(w, err.Error(), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t}\n\n\ttoken, err := mgr.cluster.GenerateEtcdDiscoveryToken()\n\tif err != nil {\n\t\thttpError(w, fmt.Sprintf(\"Unable to generate token '%v'\", err), 400)\n\t\treturn\n\t}\n\n\terr = mgr.cluster.StoreEtcdDiscoveryToken(mgr.etcdEndpoint, mgr.etcdCAFile, token, size)\n\tif err != nil {\n\t\thttpError(w, fmt.Sprintf(\"Unable to store token in etcd '%v'\", err), 400)\n\t\treturn\n\t}\n\n\tglog.V(2).Infof(\"New cluster created '%s'\", token)\n\n\tfmt.Fprintf(w, \"%s\/%s\", mgr.etcdDiscoveryBaseURL(), token)\n}\n\nfunc (mgr *pxeManagerT) etcdDiscoveryBaseURL() string {\n\treturn fmt.Sprintf(\"%s\/etcd\", mgr.thisHost())\n}\n\nfunc (mgr *pxeManagerT) etcdDiscoveryProxyHandler(w http.ResponseWriter, r *http.Request) {\n\tresp, err := mgr.etcdDiscoveryProxyRequest(r)\n\tif err != nil {\n\t\thttpError(w, fmt.Sprintf(\"Error proxying request to etcd '%v'\", err), 500)\n\t}\n\n\tcopyHeader(w.Header(), resp.Header)\n\tw.WriteHeader(resp.StatusCode)\n\tio.Copy(w, resp.Body)\n}\n\nfunc (mgr *pxeManagerT) etcdDiscoveryProxyRequest(r *http.Request) (*http.Response, error) {\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tu, err := url.Parse(mgr.etcdEndpoint)\n\tif err != nil {\n\t\tnil, errors.New(\"invalid etcd-endpoint: \"+err.Error())\n\t}\n\tu.Path = path.Join(\"v2\", \"keys\", \"_etcd\", \"registry\", strings.TrimPrefix(r.URL.Path, \"\/etcd\"))\n\tu.RawQuery = r.URL.RawQuery\n\tvar transport = http.DefaultTransport\n\n\tif u.Scheme == \"https\" && mgr.etcdCAFile != \"\" {\n\t\tcustomCA := x509.NewCertPool()\n\n\t\tpemData, err := ioutil.ReadFile(mgr.etcdCAFile)\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(\"unable to read custom CA file: \"+err.Error())\n\t\t}\n\t\tcustomCA.AppendCertsFromPEM(pemData)\n\t\ttransport = &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{RootCAs:customCA},\n\t\t}\n\t}\n\n\tfor i := 0; i <= 10; i++ {\n\n\t\tbuf := bytes.NewBuffer(body)\n\t\tglog.V(2).Infof(\"Body '%s'\", body)\n\n\t\toutreq, err := http.NewRequest(r.Method, u.String(), buf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tcopyHeader(outreq.Header, r.Header)\n\n\t\tclient := http.Client{Transport:transport}\n\t\tresp, err := client.Do(outreq)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn resp, nil\n\t}\n\n\treturn nil, errors.New(\"All attempts at proxying to etcd failed\")\n}\n\n\/\/ copyHeader copies all of the headers from dst to src.\nfunc copyHeader(dst, src http.Header) {\n\tfor k, v := range src {\n\t\tfor _, q := range v {\n\t\t\tdst.Add(k, q)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/animenotifier\/arn\"\n\t\"github.com\/fatih\/color\"\n\n\t\"github.com\/aerogo\/crawler\"\n)\n\nconst (\n\t\/\/ The maximum age of files we accept until we force a refresh.\n\tmaxAge = 30 * 24 * time.Hour\n\tuserAgent = \"Mozilla\/5.0 (X11; Linux x86_64) AppleWebKit\/537.36 (KHTML, like Gecko) Chrome\/64.0.3282.166 Safari\/537.36\"\n)\n\nfunc main() {\n\tdefer arn.Node.Close()\n\n\t\/\/ Filter anime with MAL ID\n\tanimes := []*arn.Anime{}\n\n\tfor anime := range arn.StreamAnime() {\n\t\tmalID := anime.GetMapping(\"myanimelist\/anime\")\n\n\t\tif malID == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tanimes = append(animes, anime)\n\t}\n\n\tcolor.Yellow(\"Found %d anime\", len(animes))\n\n\t\/\/ Create crawler\n\tmalCrawler := crawler.New(\n\t\tmap[string]string{\n\t\t\t\"User-Agent\": userAgent,\n\t\t\t\"Accept-Encoding\": \"gzip\",\n\t\t},\n\t\t1500*time.Millisecond,\n\t\tlen(animes),\n\t)\n\n\t\/\/ Sort so that we download the most important ones first\n\tarn.SortAnimeByQuality(animes, \"\")\n\n\t\/\/ Queue up URLs\n\tcount := 0\n\n\tfor _, anime := range animes {\n\t\tmalID := anime.GetMapping(\"myanimelist\/anime\")\n\t\turl := \"https:\/\/myanimelist.net\/anime\/\" + malID\n\t\tfilePath := fmt.Sprintf(\"files\/anime-%s.html\", malID)\n\t\tfileInfo, err := os.Stat(filePath)\n\n\t\tif err == nil && time.Since(fileInfo.ModTime()) <= maxAge {\n\t\t\t\/\/ fmt.Println(color.YellowString(url), \"skip\")\n\t\t\tcontinue\n\t\t}\n\n\t\tmalCrawler.Queue(&crawler.Task{\n\t\t\tURL: url,\n\t\t\tDestination: filePath,\n\t\t})\n\n\t\tcount++\n\t}\n\n\tcolor.Yellow(\"Queued up %d links\", count)\n\tmalCrawler.Wait()\n}\n<commit_msg>Updated crawler settings<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/animenotifier\/arn\"\n\t\"github.com\/fatih\/color\"\n\n\t\"github.com\/aerogo\/crawler\"\n)\n\nconst (\n\t\/\/ The maximum age of files we accept until we force a refresh.\n\tmaxAge = 30 * 24 * time.Hour\n\tdelayBetweenRequests = 1000 * time.Millisecond\n\tuserAgent = \"Mozilla\/5.0 (X11; Linux x86_64) AppleWebKit\/537.36 (KHTML, like Gecko) Chrome\/64.0.3282.166 Safari\/537.36\"\n)\n\nfunc main() {\n\tdefer arn.Node.Close()\n\n\t\/\/ Filter anime with MAL ID\n\tanimes := []*arn.Anime{}\n\n\tfor anime := range arn.StreamAnime() {\n\t\tmalID := anime.GetMapping(\"myanimelist\/anime\")\n\n\t\tif malID == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tanimes = append(animes, anime)\n\t}\n\n\tcolor.Yellow(\"Found %d anime\", len(animes))\n\n\t\/\/ Create crawler\n\tmalCrawler := crawler.New(\n\t\tmap[string]string{\n\t\t\t\"User-Agent\": userAgent,\n\t\t\t\"Accept-Encoding\": \"gzip\",\n\t\t},\n\t\tdelayBetweenRequests,\n\t\tlen(animes),\n\t)\n\n\t\/\/ Sort so that we download the most important ones first\n\tarn.SortAnimeByQuality(animes, \"\")\n\n\t\/\/ Queue up URLs\n\tcount := 0\n\n\tfor _, anime := range animes {\n\t\tmalID := anime.GetMapping(\"myanimelist\/anime\")\n\t\turl := \"https:\/\/myanimelist.net\/anime\/\" + malID\n\t\tfilePath := fmt.Sprintf(\"files\/anime-%s.html\", malID)\n\t\tfileInfo, err := os.Stat(filePath)\n\n\t\tif err == nil && time.Since(fileInfo.ModTime()) <= maxAge {\n\t\t\t\/\/ fmt.Println(color.YellowString(url), \"skip\")\n\t\t\tcontinue\n\t\t}\n\n\t\tmalCrawler.Queue(&crawler.Task{\n\t\t\tURL: url,\n\t\t\tDestination: filePath,\n\t\t})\n\n\t\tcount++\n\t}\n\n\tcolor.Yellow(\"Queued up %d links\", count)\n\tmalCrawler.Wait()\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>gofmt<commit_after><|endoftext|>"} {"text":"<commit_before>package daemon\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/pkg\/errors\"\n\tstdprometheus \"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/weaveworks\/common\/middleware\"\n\t\"github.com\/weaveworks\/flux\"\n\n\t\"github.com\/weaveworks\/flux\/api\"\n\ttransport \"github.com\/weaveworks\/flux\/http\"\n\t\"github.com\/weaveworks\/flux\/job\"\n\tfluxmetrics \"github.com\/weaveworks\/flux\/metrics\"\n\t\"github.com\/weaveworks\/flux\/policy\"\n\t\"github.com\/weaveworks\/flux\/update\"\n)\n\nvar (\n\trequestDuration = stdprometheus.NewHistogramVec(stdprometheus.HistogramOpts{\n\t\tNamespace: \"flux\",\n\t\tName: \"request_duration_seconds\",\n\t\tHelp: \"Time (in seconds) spent serving HTTP requests.\",\n\t\tBuckets: stdprometheus.DefBuckets,\n\t}, []string{fluxmetrics.LabelMethod, fluxmetrics.LabelRoute, \"status_code\", \"ws\"})\n)\n\n\/\/ An API server for the daemon\nfunc NewRouter() *mux.Router {\n\tr := transport.NewAPIRouter()\n\n\t\/\/ All old versions are deprecated in the daemon. Use an up to\n\t\/\/ date client!\n\ttransport.DeprecateVersions(r, \"v1\", \"v2\", \"v3\", \"v4\", \"v5\")\n\t\/\/ We assume every request that doesn't match a route is a client\n\t\/\/ calling an old or hitherto unsupported API.\n\tr.NewRoute().Name(\"NotFound\").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttransport.WriteError(w, r, http.StatusNotFound, transport.MakeAPINotFound(r.URL.Path))\n\t})\n\n\treturn r\n}\n\nfunc NewHandler(s api.Server, r *mux.Router) http.Handler {\n\thandle := HTTPServer{s}\n\tr.Get(\"JobStatus\").HandlerFunc(handle.JobStatus)\n\tr.Get(\"SyncStatus\").HandlerFunc(handle.SyncStatus)\n\tr.Get(\"UpdateManifests\").HandlerFunc(handle.UpdateManifests)\n\tr.Get(\"ListServices\").HandlerFunc(handle.ListServices)\n\tr.Get(\"ListImages\").HandlerFunc(handle.ListImages)\n\tr.Get(\"Export\").HandlerFunc(handle.Export)\n\tr.Get(\"GitRepoConfig\").HandlerFunc(handle.GitRepoConfig)\n\n\t\/\/ These handlers persist to support requests from older fluxctls. In general we\n\t\/\/ should avoid adding references to them so that they can eventually be removed.\n\tr.Get(\"UpdateImages\").HandlerFunc(handle.UpdateImages)\n\tr.Get(\"UpdatePolicies\").HandlerFunc(handle.UpdatePolicies)\n\tr.Get(\"GetPublicSSHKey\").HandlerFunc(handle.GetPublicSSHKey)\n\tr.Get(\"RegeneratePublicSSHKey\").HandlerFunc(handle.RegeneratePublicSSHKey)\n\n\treturn middleware.Instrument{\n\t\tRouteMatcher: r,\n\t\tDuration: requestDuration,\n\t}.Wrap(r)\n}\n\ntype HTTPServer struct {\n\tserver api.Server\n}\n\nfunc (s HTTPServer) JobStatus(w http.ResponseWriter, r *http.Request) {\n\tid := job.ID(mux.Vars(r)[\"id\"])\n\tstatus, err := s.server.JobStatus(r.Context(), id)\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\ttransport.JSONResponse(w, r, status)\n}\n\nfunc (s HTTPServer) SyncStatus(w http.ResponseWriter, r *http.Request) {\n\tref := mux.Vars(r)[\"ref\"]\n\tcommits, err := s.server.SyncStatus(r.Context(), ref)\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\ttransport.JSONResponse(w, r, commits)\n}\n\nfunc (s HTTPServer) ListImages(w http.ResponseWriter, r *http.Request) {\n\tservice := mux.Vars(r)[\"service\"]\n\tspec, err := update.ParseResourceSpec(service)\n\tif err != nil {\n\t\ttransport.WriteError(w, r, http.StatusBadRequest, errors.Wrapf(err, \"parsing service spec %q\", service))\n\t\treturn\n\t}\n\n\td, err := s.server.ListImages(r.Context(), spec)\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\ttransport.JSONResponse(w, r, d)\n}\n\nfunc (s HTTPServer) UpdateManifests(w http.ResponseWriter, r *http.Request) {\n\tvar spec update.Spec\n\tif err := json.NewDecoder(r.Body).Decode(&spec); err != nil {\n\t\ttransport.WriteError(w, r, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tjobID, err := s.server.UpdateManifests(r.Context(), spec)\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\ttransport.JSONResponse(w, r, jobID)\n}\n\nfunc (s HTTPServer) ListServices(w http.ResponseWriter, r *http.Request) {\n\tnamespace := mux.Vars(r)[\"namespace\"]\n\tres, err := s.server.ListServices(r.Context(), namespace)\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\ttransport.JSONResponse(w, r, res)\n}\n\nfunc (s HTTPServer) Export(w http.ResponseWriter, r *http.Request) {\n\tstatus, err := s.server.Export(r.Context())\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\n\ttransport.JSONResponse(w, r, status)\n}\n\nfunc (s HTTPServer) GitRepoConfig(w http.ResponseWriter, r *http.Request) {\n\tvar regenerate bool\n\tif err := json.NewDecoder(r.Body).Decode(®enerate); err != nil {\n\t\ttransport.WriteError(w, r, http.StatusBadRequest, err)\n\t}\n\tres, err := s.server.GitRepoConfig(r.Context(), regenerate)\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t}\n\ttransport.JSONResponse(w, r, res)\n}\n\n\/\/ --- handlers supporting deprecated requests\n\nfunc (s HTTPServer) UpdateImages(w http.ResponseWriter, r *http.Request) {\n\tvar (\n\t\tvars = mux.Vars(r)\n\t\timage = vars[\"image\"]\n\t\tkind = vars[\"kind\"]\n\t)\n\tif err := r.ParseForm(); err != nil {\n\t\ttransport.WriteError(w, r, http.StatusBadRequest, errors.Wrapf(err, \"parsing form\"))\n\t\treturn\n\t}\n\tvar serviceSpecs []update.ResourceSpec\n\tfor _, service := range r.Form[\"service\"] {\n\t\tserviceSpec, err := update.ParseResourceSpec(service)\n\t\tif err != nil {\n\t\t\ttransport.WriteError(w, r, http.StatusBadRequest, errors.Wrapf(err, \"parsing service spec %q\", service))\n\t\t\treturn\n\t\t}\n\t\tserviceSpecs = append(serviceSpecs, serviceSpec)\n\t}\n\timageSpec, err := update.ParseImageSpec(image)\n\tif err != nil {\n\t\ttransport.WriteError(w, r, http.StatusBadRequest, errors.Wrapf(err, \"parsing image spec %q\", image))\n\t\treturn\n\t}\n\treleaseKind, err := update.ParseReleaseKind(kind)\n\tif err != nil {\n\t\ttransport.WriteError(w, r, http.StatusBadRequest, errors.Wrapf(err, \"parsing release kind %q\", kind))\n\t\treturn\n\t}\n\n\tvar excludes []flux.ResourceID\n\tfor _, ex := range r.URL.Query()[\"exclude\"] {\n\t\ts, err := flux.ParseResourceID(ex)\n\t\tif err != nil {\n\t\t\ttransport.WriteError(w, r, http.StatusBadRequest, errors.Wrapf(err, \"parsing excluded service %q\", ex))\n\t\t\treturn\n\t\t}\n\t\texcludes = append(excludes, s)\n\t}\n\n\tspec := update.ReleaseSpec{\n\t\tServiceSpecs: serviceSpecs,\n\t\tImageSpec: imageSpec,\n\t\tKind: releaseKind,\n\t\tExcludes: excludes,\n\t}\n\tcause := update.Cause{\n\t\tUser: r.FormValue(\"user\"),\n\t\tMessage: r.FormValue(\"message\"),\n\t}\n\tresult, err := s.server.UpdateManifests(r.Context(), update.Spec{Type: update.Images, Cause: cause, Spec: spec})\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\ttransport.JSONResponse(w, r, result)\n}\n\nfunc (s HTTPServer) UpdatePolicies(w http.ResponseWriter, r *http.Request) {\n\tvar updates policy.Updates\n\tif err := json.NewDecoder(r.Body).Decode(&updates); err != nil {\n\t\ttransport.WriteError(w, r, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tcause := update.Cause{\n\t\tUser: r.FormValue(\"user\"),\n\t\tMessage: r.FormValue(\"message\"),\n\t}\n\n\tjobID, err := s.server.UpdateManifests(r.Context(), update.Spec{Type: update.Policy, Cause: cause, Spec: updates})\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\n\ttransport.JSONResponse(w, r, jobID)\n}\n\nfunc (s HTTPServer) GetPublicSSHKey(w http.ResponseWriter, r *http.Request) {\n\tres, err := s.server.GitRepoConfig(r.Context(), false)\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\ttransport.JSONResponse(w, r, res.PublicSSHKey)\n}\n\nfunc (s HTTPServer) RegeneratePublicSSHKey(w http.ResponseWriter, r *http.Request) {\n\t_, err := s.server.GitRepoConfig(r.Context(), true)\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusNoContent)\n\treturn\n}\n<commit_msg>Use route constants instead of magic strings in HTTP handler<commit_after>package daemon\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/pkg\/errors\"\n\tstdprometheus \"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/weaveworks\/common\/middleware\"\n\t\"github.com\/weaveworks\/flux\"\n\n\t\"github.com\/weaveworks\/flux\/api\"\n\ttransport \"github.com\/weaveworks\/flux\/http\"\n\t\"github.com\/weaveworks\/flux\/job\"\n\tfluxmetrics \"github.com\/weaveworks\/flux\/metrics\"\n\t\"github.com\/weaveworks\/flux\/policy\"\n\t\"github.com\/weaveworks\/flux\/update\"\n)\n\nvar (\n\trequestDuration = stdprometheus.NewHistogramVec(stdprometheus.HistogramOpts{\n\t\tNamespace: \"flux\",\n\t\tName: \"request_duration_seconds\",\n\t\tHelp: \"Time (in seconds) spent serving HTTP requests.\",\n\t\tBuckets: stdprometheus.DefBuckets,\n\t}, []string{fluxmetrics.LabelMethod, fluxmetrics.LabelRoute, \"status_code\", \"ws\"})\n)\n\n\/\/ An API server for the daemon\nfunc NewRouter() *mux.Router {\n\tr := transport.NewAPIRouter()\n\n\t\/\/ All old versions are deprecated in the daemon. Use an up to\n\t\/\/ date client!\n\ttransport.DeprecateVersions(r, \"v1\", \"v2\", \"v3\", \"v4\", \"v5\")\n\t\/\/ We assume every request that doesn't match a route is a client\n\t\/\/ calling an old or hitherto unsupported API.\n\tr.NewRoute().Name(\"NotFound\").HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttransport.WriteError(w, r, http.StatusNotFound, transport.MakeAPINotFound(r.URL.Path))\n\t})\n\n\treturn r\n}\n\nfunc NewHandler(s api.Server, r *mux.Router) http.Handler {\n\thandle := HTTPServer{s}\n\tr.Get(transport.ListServices).HandlerFunc(handle.ListServices)\n\tr.Get(transport.ListImages).HandlerFunc(handle.ListImages)\n\tr.Get(transport.UpdateManifests).HandlerFunc(handle.UpdateManifests)\n\tr.Get(transport.JobStatus).HandlerFunc(handle.JobStatus)\n\tr.Get(transport.SyncStatus).HandlerFunc(handle.SyncStatus)\n\tr.Get(transport.Export).HandlerFunc(handle.Export)\n\tr.Get(transport.GitRepoConfig).HandlerFunc(handle.GitRepoConfig)\n\n\t\/\/ These handlers persist to support requests from older fluxctls. In general we\n\t\/\/ should avoid adding references to them so that they can eventually be removed.\n\tr.Get(transport.UpdateImages).HandlerFunc(handle.UpdateImages)\n\tr.Get(transport.UpdatePolicies).HandlerFunc(handle.UpdatePolicies)\n\tr.Get(transport.GetPublicSSHKey).HandlerFunc(handle.GetPublicSSHKey)\n\tr.Get(transport.RegeneratePublicSSHKey).HandlerFunc(handle.RegeneratePublicSSHKey)\n\n\treturn middleware.Instrument{\n\t\tRouteMatcher: r,\n\t\tDuration: requestDuration,\n\t}.Wrap(r)\n}\n\ntype HTTPServer struct {\n\tserver api.Server\n}\n\nfunc (s HTTPServer) JobStatus(w http.ResponseWriter, r *http.Request) {\n\tid := job.ID(mux.Vars(r)[\"id\"])\n\tstatus, err := s.server.JobStatus(r.Context(), id)\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\ttransport.JSONResponse(w, r, status)\n}\n\nfunc (s HTTPServer) SyncStatus(w http.ResponseWriter, r *http.Request) {\n\tref := mux.Vars(r)[\"ref\"]\n\tcommits, err := s.server.SyncStatus(r.Context(), ref)\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\ttransport.JSONResponse(w, r, commits)\n}\n\nfunc (s HTTPServer) ListImages(w http.ResponseWriter, r *http.Request) {\n\tservice := mux.Vars(r)[\"service\"]\n\tspec, err := update.ParseResourceSpec(service)\n\tif err != nil {\n\t\ttransport.WriteError(w, r, http.StatusBadRequest, errors.Wrapf(err, \"parsing service spec %q\", service))\n\t\treturn\n\t}\n\n\td, err := s.server.ListImages(r.Context(), spec)\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\ttransport.JSONResponse(w, r, d)\n}\n\nfunc (s HTTPServer) UpdateManifests(w http.ResponseWriter, r *http.Request) {\n\tvar spec update.Spec\n\tif err := json.NewDecoder(r.Body).Decode(&spec); err != nil {\n\t\ttransport.WriteError(w, r, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tjobID, err := s.server.UpdateManifests(r.Context(), spec)\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\ttransport.JSONResponse(w, r, jobID)\n}\n\nfunc (s HTTPServer) ListServices(w http.ResponseWriter, r *http.Request) {\n\tnamespace := mux.Vars(r)[\"namespace\"]\n\tres, err := s.server.ListServices(r.Context(), namespace)\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\ttransport.JSONResponse(w, r, res)\n}\n\nfunc (s HTTPServer) Export(w http.ResponseWriter, r *http.Request) {\n\tstatus, err := s.server.Export(r.Context())\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\n\ttransport.JSONResponse(w, r, status)\n}\n\nfunc (s HTTPServer) GitRepoConfig(w http.ResponseWriter, r *http.Request) {\n\tvar regenerate bool\n\tif err := json.NewDecoder(r.Body).Decode(®enerate); err != nil {\n\t\ttransport.WriteError(w, r, http.StatusBadRequest, err)\n\t}\n\tres, err := s.server.GitRepoConfig(r.Context(), regenerate)\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t}\n\ttransport.JSONResponse(w, r, res)\n}\n\n\/\/ --- handlers supporting deprecated requests\n\nfunc (s HTTPServer) UpdateImages(w http.ResponseWriter, r *http.Request) {\n\tvar (\n\t\tvars = mux.Vars(r)\n\t\timage = vars[\"image\"]\n\t\tkind = vars[\"kind\"]\n\t)\n\tif err := r.ParseForm(); err != nil {\n\t\ttransport.WriteError(w, r, http.StatusBadRequest, errors.Wrapf(err, \"parsing form\"))\n\t\treturn\n\t}\n\tvar serviceSpecs []update.ResourceSpec\n\tfor _, service := range r.Form[\"service\"] {\n\t\tserviceSpec, err := update.ParseResourceSpec(service)\n\t\tif err != nil {\n\t\t\ttransport.WriteError(w, r, http.StatusBadRequest, errors.Wrapf(err, \"parsing service spec %q\", service))\n\t\t\treturn\n\t\t}\n\t\tserviceSpecs = append(serviceSpecs, serviceSpec)\n\t}\n\timageSpec, err := update.ParseImageSpec(image)\n\tif err != nil {\n\t\ttransport.WriteError(w, r, http.StatusBadRequest, errors.Wrapf(err, \"parsing image spec %q\", image))\n\t\treturn\n\t}\n\treleaseKind, err := update.ParseReleaseKind(kind)\n\tif err != nil {\n\t\ttransport.WriteError(w, r, http.StatusBadRequest, errors.Wrapf(err, \"parsing release kind %q\", kind))\n\t\treturn\n\t}\n\n\tvar excludes []flux.ResourceID\n\tfor _, ex := range r.URL.Query()[\"exclude\"] {\n\t\ts, err := flux.ParseResourceID(ex)\n\t\tif err != nil {\n\t\t\ttransport.WriteError(w, r, http.StatusBadRequest, errors.Wrapf(err, \"parsing excluded service %q\", ex))\n\t\t\treturn\n\t\t}\n\t\texcludes = append(excludes, s)\n\t}\n\n\tspec := update.ReleaseSpec{\n\t\tServiceSpecs: serviceSpecs,\n\t\tImageSpec: imageSpec,\n\t\tKind: releaseKind,\n\t\tExcludes: excludes,\n\t}\n\tcause := update.Cause{\n\t\tUser: r.FormValue(\"user\"),\n\t\tMessage: r.FormValue(\"message\"),\n\t}\n\tresult, err := s.server.UpdateManifests(r.Context(), update.Spec{Type: update.Images, Cause: cause, Spec: spec})\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\ttransport.JSONResponse(w, r, result)\n}\n\nfunc (s HTTPServer) UpdatePolicies(w http.ResponseWriter, r *http.Request) {\n\tvar updates policy.Updates\n\tif err := json.NewDecoder(r.Body).Decode(&updates); err != nil {\n\t\ttransport.WriteError(w, r, http.StatusBadRequest, err)\n\t\treturn\n\t}\n\n\tcause := update.Cause{\n\t\tUser: r.FormValue(\"user\"),\n\t\tMessage: r.FormValue(\"message\"),\n\t}\n\n\tjobID, err := s.server.UpdateManifests(r.Context(), update.Spec{Type: update.Policy, Cause: cause, Spec: updates})\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\n\ttransport.JSONResponse(w, r, jobID)\n}\n\nfunc (s HTTPServer) GetPublicSSHKey(w http.ResponseWriter, r *http.Request) {\n\tres, err := s.server.GitRepoConfig(r.Context(), false)\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\ttransport.JSONResponse(w, r, res.PublicSSHKey)\n}\n\nfunc (s HTTPServer) RegeneratePublicSSHKey(w http.ResponseWriter, r *http.Request) {\n\t_, err := s.server.GitRepoConfig(r.Context(), true)\n\tif err != nil {\n\t\ttransport.ErrorResponse(w, r, err)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusNoContent)\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package template\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"github.com\/leekchan\/gtf\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"reflect\"\n\t\"strings\"\n\ttxttmpl \"text\/template\"\n\t\"time\"\n\t\"sort\"\n)\n\ntype Templating struct {\n\ttemplate *txttmpl.Template\n\tname string\n\tcontent string\n\tfunctions map[string]interface{}\n}\n\nconst EXT_CFG = \".cfg\"\n\nvar TemplateFunctions map[string]interface{}\n\nfunc NewTemplating(partials *txttmpl.Template, filePath, content string) (*Templating, error) {\n\tt := Templating{\n\t\tname: filePath,\n\t\tcontent: CleanupOfTemplate(content),\n\t\tfunctions: TemplateFunctions,\n\t}\n\tif partials == nil {\n\t\tpartials = txttmpl.New(t.name)\n\t}\n\n\ttmpl, err := partials.New(t.name).Funcs(t.functions).Funcs(map[string]interface{}(gtf.GtfFuncMap)).Parse(t.content)\n\tt.template = tmpl\n\treturn &t, err\n}\n\nfunc CleanupOfTemplate(content string) string {\n\tvar lines []string\n\tvar currentLine string\n\tscanner := bufio.NewScanner(strings.NewReader(string(content)))\n\tfor scanner.Scan() {\n\t\tpart := strings.TrimRight(scanner.Text(), \" \")\n\t\tleftTrim := strings.TrimLeft(part, \" \")\n\t\tif strings.HasPrefix(leftTrim, \"{{-\") {\n\t\t\tpart = \"{{\" + leftTrim[3:]\n\t\t}\n\t\tcurrentLine += part\n\t\tif strings.HasSuffix(currentLine, \"-}}\") {\n\t\t\tcurrentLine = currentLine[0:len(currentLine)-3] + \"}}\"\n\t\t} else {\n\t\t\tlines = append(lines, currentLine)\n\t\t\tcurrentLine = \"\"\n\t\t}\n\t}\n\tif currentLine != \"\" {\n\t\tlines = append(lines, currentLine)\n\t}\n\treturn strings.Join(lines, \"\\n\")\n}\n\nfunc (t *Templating) Execute(wr io.Writer, data interface{}) error {\n\treturn t.template.Execute(wr, data)\n}\n\nfunc (t *Templating) AddFunction(name string, fn interface{}) {\n\tt.functions[name] = fn\n}\n\nfunc (t *Templating) AddFunctions(fs map[string]interface{}) {\n\taddFuncs(t.functions, fs)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc ifOrDef(eif interface{}, yes interface{}, no interface{}) interface{} {\n\tif eif != nil {\n\t\treturn yes\n\t}\n\treturn no\n}\n\nfunc orDef(val interface{}, def interface{}) interface{} {\n\tif val != nil {\n\t\treturn val\n\t}\n\treturn def\n}\n\nfunc orDefs(val []interface{}, def interface{}) interface{} {\n\tif val != nil && len(val) != 0 {\n\t\treturn val\n\t}\n\treturn []interface{}{def}\n}\n\nfunc addFuncs(out, in map[string]interface{}) {\n\tfor name, fn := range in {\n\t\tout[name] = fn\n\t}\n}\n\nfunc UnmarshalJsonObject(data string) (map[string]interface{}, error) {\n\tvar ret map[string]interface{}\n\terr := json.Unmarshal([]byte(data), &ret)\n\treturn ret, err\n}\n\nfunc UnmarshalJsonArray(data string) ([]interface{}, error) {\n\tvar ret []interface{}\n\terr := json.Unmarshal([]byte(data), &ret)\n\treturn ret, err\n}\n\nfunc IsType(data interface{}, t string) bool {\n\tdataType := reflect.TypeOf(data)\n\tif dataType == nil {\n\t\treturn false\n\t}\n\tif dataType.String() == t {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc IsKind(data interface{}, t string) bool {\n\tdataType := reflect.TypeOf(data)\n\tif dataType == nil {\n\t\treturn false\n\t}\n\tif dataType.Kind().String() == t {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc IsMap(data interface{}) bool {\n\tdataType := reflect.TypeOf(data)\n\tif dataType == nil {\n\t\treturn false\n\t}\n\tif dataType.Kind() == reflect.Map {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc IsArray(data interface{}) bool {\n\tdataType := reflect.TypeOf(data)\n\tif dataType == nil {\n\t\treturn false\n\t}\n\tif dataType.Kind() == reflect.Array || dataType.Kind() == reflect.Slice {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc IsString(data interface{}) bool {\n\tdataType := reflect.TypeOf(data)\n\tif dataType == nil {\n\t\treturn false\n\t}\n\tif dataType.Kind() == reflect.String {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc IsMapFirst(data interface{},element interface{}) bool {\n\tswitch reflect.TypeOf(data).Kind() {\n\t\tcase reflect.Map :\n\t\t\t\tmapItem := reflect.ValueOf(data).MapKeys()\n\n\t\t\t var keys []string\n\t\t\t\tfor _,k := range mapItem {\n\t\t\t\t keys = append(keys,k.String())\n\t\t\t\t}\n\t\t\t\tsort.Strings(keys)\n\t\t\t\tmapItemType := keys[0]\n\t\t\t\treturn (mapItemType == element)\n\t}\n\treturn false\n}\n\nfunc IsMapLast(data interface{},element interface{}) bool {\n\tswitch reflect.TypeOf(data).Kind() {\n\t\tcase reflect.Map :\n\t\t\tmapItem := reflect.ValueOf(data).MapKeys()\n\t\t\tmapLen := reflect.ValueOf(data).Len()\n\t\t\tmapItemType := mapItem[mapLen - 1].String()\n\t\t\treturn (mapItemType == element)\n\t}\n\treturn false\n}\n\n\nfunc HowDeep(data interface{},element interface{}) int{\n \treturn HowDeepIsIt(data,element,0)\n}\n\nfunc HowDeepIsIt(data interface{},element interface{},deep int) int{\n\t\/\/elemType := reflect.TypeOf(element).Kind()\n\t\/\/ dataType := reflect.TypeOf(data).Kind()\n\tmapItem := reflect.ValueOf(data)\n\telemItem := reflect.ValueOf(element)\n\tswitch elemType {\n\t\tcase reflect.String:\n\t\t\/\/\tfmt.Println(\"1Bis: Type:\",elemType,\"Value\",elemItem,\"ValueData\",mapItem)\n\t\t\/\/ \tfmt.Println(\"Type:\",dataType,\"Value\",mapItem)\n\t\t\/\/ \tfor _, b := range reflect.ValueOf(data).MapKeys() {\n\t\t\/\/ \t\t\/\/fmt.Println(\"Reflect:\",elemItem , \"Value:\",b,\"MapValue\",mapItem.MapIndex(b),\"Equal\",reflect.DeepEqual(mapItem.MapIndex(b).Interface(), elemItem.Interface()))\n\t\t\/\/ \t\t\/\/fmt.Println(\"Reflect:\",IsMap(mapItem.MapIndex(b).Interface()),\"b\",mapItem.MapIndex(b).Interface())\n\t\t\/\/ \t\t\/\/ fmt.Println(\"Reflect:\",(elemItem.Interface()) ,mapItem.MapIndex(b).Interface())\n\t\t\/\/ \t\tif reflect.DeepEqual(mapItem.MapIndex(b).Interface(), elemItem.Interface()) {\n\t\t\/\/ \t\t\treturn deep + 1\n\t\t\/\/ \t\t}\n\t\t\/\/ \t}\n\t\tcase reflect.Map :\n\t\t\t\/\/fmt.Println(\"1: Key:\",elemType , \"Value:\",element ,\"Reflect\",elemItem)\n\t\t\t\/\/fmt.Println(\"Key:\",data , \"Value:\",dataType,\"Reflect\",mapItem)\n\t\t\tfor _, b := range reflect.ValueOf(data).MapKeys() {\n\t\t\t\t\/\/fmt.Println(\"Reflect:\",elemItem , \"Value:\",b,\"MapValue\",mapItem.MapIndex(b),\"Equal\",reflect.DeepEqual(mapItem.MapIndex(b).Interface(), elemItem.Interface()))\n\t\t\t\t\/\/fmt.Println(\"Reflect:\",IsMap(mapItem.MapIndex(b).Interface()),\"b\",mapItem.MapIndex(b).Interface())\n\t\t\t\t\/\/fmt.Println(\"2: Reflect:\",(elemItem.Interface()) ,mapItem.MapIndex(b).Interface())\n\t\t\t\tif reflect.DeepEqual(mapItem.MapIndex(b).Interface(), elemItem.Interface()) {\n\t\t\t\t\treturn deep + 1\n\t\t\t\t}\n\t\t\t\t\/\/ if IsMap(mapItem.MapIndex(b).Interface()) {\n\t\t\t\t\/\/ \tfmt.Println(\"3: IsMap:\",mapItem.MapIndex(b).Interface())\n\t\t\t\t\/\/ \tindex := HowDeepIsIt(mapItem.MapIndex(b).Interface(),element,deep + 1 )\n\t\t\t\t\/\/ \tif index == deep + 2 {\n\t\t\t\t\/\/ \t\tfmt.Println(\"4: Key:\",mapItem.MapIndex(b).Interface() ,\"Deepness\",index)\n\t\t\t\t\/\/ \t\treturn index\n\t\t\t\t\/\/ \t}\n\t\t\t\t\/\/ }\n\t\t\t}\n\t}\n\n\n\treturn deep\n}\n\n\nfunc add(x, y int) int {\n\treturn x + y\n}\n\nfunc mul(x, y int) int {\n\treturn x * y\n}\n\nfunc div(x, y int) int {\n\treturn x \/ y\n}\n\nfunc mod(x, y int) int {\n\treturn x % y\n}\n\nfunc sub(x, y int) int {\n\treturn x - y\n}\n\nfunc init() {\n\tTemplateFunctions = make(map[string]interface{})\n\tTemplateFunctions[\"base\"] = path.Base\n\tTemplateFunctions[\"split\"] = strings.Split\n\tTemplateFunctions[\"json\"] = UnmarshalJsonObject\n\tTemplateFunctions[\"jsonArray\"] = UnmarshalJsonArray\n\tTemplateFunctions[\"dir\"] = path.Dir\n\tTemplateFunctions[\"getenv\"] = os.Getenv\n\tTemplateFunctions[\"join\"] = strings.Join\n\tTemplateFunctions[\"datetime\"] = time.Now\n\tTemplateFunctions[\"toUpper\"] = strings.ToUpper\n\tTemplateFunctions[\"toLower\"] = strings.ToLower\n\tTemplateFunctions[\"contains\"] = strings.Contains\n\tTemplateFunctions[\"replace\"] = strings.Replace\n\tTemplateFunctions[\"repeat\"] = strings.Repeat\n\tTemplateFunctions[\"orDef\"] = orDef\n\tTemplateFunctions[\"orDefs\"] = orDefs\n\tTemplateFunctions[\"ifOrDef\"] = ifOrDef\n\tTemplateFunctions[\"isType\"] = IsType\n\tTemplateFunctions[\"isMap\"] = IsMap\n\tTemplateFunctions[\"isArray\"] = IsArray\n\tTemplateFunctions[\"isKind\"] = IsKind\n\tTemplateFunctions[\"isString\"] = IsString\n\tTemplateFunctions[\"IsMapFirst\"] = IsMapFirst\n\tTemplateFunctions[\"IsMapLast\"] = IsMapLast\n\tTemplateFunctions[\"HowDeep\"] = HowDeep\n\tTemplateFunctions[\"add\"] = add\n\tTemplateFunctions[\"mul\"] = mul\n\tTemplateFunctions[\"div\"] = div\n\tTemplateFunctions[\"sub\"] = sub\n\tTemplateFunctions[\"mod\"] = mod\n}\n<commit_msg>Add IsMapFirst and IsMapLast and HowDeep in templater<commit_after>package template\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"github.com\/leekchan\/gtf\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"reflect\"\n\t\"strings\"\n\ttxttmpl \"text\/template\"\n\t\"time\"\n\t\"sort\"\n)\n\ntype Templating struct {\n\ttemplate *txttmpl.Template\n\tname string\n\tcontent string\n\tfunctions map[string]interface{}\n}\n\nconst EXT_CFG = \".cfg\"\n\nvar TemplateFunctions map[string]interface{}\n\nfunc NewTemplating(partials *txttmpl.Template, filePath, content string) (*Templating, error) {\n\tt := Templating{\n\t\tname: filePath,\n\t\tcontent: CleanupOfTemplate(content),\n\t\tfunctions: TemplateFunctions,\n\t}\n\tif partials == nil {\n\t\tpartials = txttmpl.New(t.name)\n\t}\n\n\ttmpl, err := partials.New(t.name).Funcs(t.functions).Funcs(map[string]interface{}(gtf.GtfFuncMap)).Parse(t.content)\n\tt.template = tmpl\n\treturn &t, err\n}\n\nfunc CleanupOfTemplate(content string) string {\n\tvar lines []string\n\tvar currentLine string\n\tscanner := bufio.NewScanner(strings.NewReader(string(content)))\n\tfor scanner.Scan() {\n\t\tpart := strings.TrimRight(scanner.Text(), \" \")\n\t\tleftTrim := strings.TrimLeft(part, \" \")\n\t\tif strings.HasPrefix(leftTrim, \"{{-\") {\n\t\t\tpart = \"{{\" + leftTrim[3:]\n\t\t}\n\t\tcurrentLine += part\n\t\tif strings.HasSuffix(currentLine, \"-}}\") {\n\t\t\tcurrentLine = currentLine[0:len(currentLine)-3] + \"}}\"\n\t\t} else {\n\t\t\tlines = append(lines, currentLine)\n\t\t\tcurrentLine = \"\"\n\t\t}\n\t}\n\tif currentLine != \"\" {\n\t\tlines = append(lines, currentLine)\n\t}\n\treturn strings.Join(lines, \"\\n\")\n}\n\nfunc (t *Templating) Execute(wr io.Writer, data interface{}) error {\n\treturn t.template.Execute(wr, data)\n}\n\nfunc (t *Templating) AddFunction(name string, fn interface{}) {\n\tt.functions[name] = fn\n}\n\nfunc (t *Templating) AddFunctions(fs map[string]interface{}) {\n\taddFuncs(t.functions, fs)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc ifOrDef(eif interface{}, yes interface{}, no interface{}) interface{} {\n\tif eif != nil {\n\t\treturn yes\n\t}\n\treturn no\n}\n\nfunc orDef(val interface{}, def interface{}) interface{} {\n\tif val != nil {\n\t\treturn val\n\t}\n\treturn def\n}\n\nfunc orDefs(val []interface{}, def interface{}) interface{} {\n\tif val != nil && len(val) != 0 {\n\t\treturn val\n\t}\n\treturn []interface{}{def}\n}\n\nfunc addFuncs(out, in map[string]interface{}) {\n\tfor name, fn := range in {\n\t\tout[name] = fn\n\t}\n}\n\nfunc UnmarshalJsonObject(data string) (map[string]interface{}, error) {\n\tvar ret map[string]interface{}\n\terr := json.Unmarshal([]byte(data), &ret)\n\treturn ret, err\n}\n\nfunc UnmarshalJsonArray(data string) ([]interface{}, error) {\n\tvar ret []interface{}\n\terr := json.Unmarshal([]byte(data), &ret)\n\treturn ret, err\n}\n\nfunc IsType(data interface{}, t string) bool {\n\tdataType := reflect.TypeOf(data)\n\tif dataType == nil {\n\t\treturn false\n\t}\n\tif dataType.String() == t {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc IsKind(data interface{}, t string) bool {\n\tdataType := reflect.TypeOf(data)\n\tif dataType == nil {\n\t\treturn false\n\t}\n\tif dataType.Kind().String() == t {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc IsMap(data interface{}) bool {\n\tdataType := reflect.TypeOf(data)\n\tif dataType == nil {\n\t\treturn false\n\t}\n\tif dataType.Kind() == reflect.Map {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc IsArray(data interface{}) bool {\n\tdataType := reflect.TypeOf(data)\n\tif dataType == nil {\n\t\treturn false\n\t}\n\tif dataType.Kind() == reflect.Array || dataType.Kind() == reflect.Slice {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc IsString(data interface{}) bool {\n\tdataType := reflect.TypeOf(data)\n\tif dataType == nil {\n\t\treturn false\n\t}\n\tif dataType.Kind() == reflect.String {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc IsMapFirst(data interface{},element interface{}) bool {\n\tswitch reflect.TypeOf(data).Kind() {\n\t\tcase reflect.Map :\n\t\t\t\tmapItem := reflect.ValueOf(data).MapKeys()\n\n\t\t\t var keys []string\n\t\t\t\tfor _,k := range mapItem {\n\t\t\t\t keys = append(keys,k.String())\n\t\t\t\t}\n\t\t\t\tsort.Strings(keys)\n\t\t\t\tmapItemType := keys[0]\n\t\t\t\treturn (mapItemType == element)\n\t}\n\treturn false\n}\n\nfunc IsMapLast(data interface{},element interface{}) bool {\n\tswitch reflect.TypeOf(data).Kind() {\n\t\tcase reflect.Map :\n\t\t\tmapItem := reflect.ValueOf(data).MapKeys()\n\t\t\tmapLen := reflect.ValueOf(data).Len()\n\t\t\tmapItemType := mapItem[mapLen - 1].String()\n\t\t\treturn (mapItemType == element)\n\t}\n\treturn false\n}\n\n\nfunc HowDeep(data interface{},element interface{}) int{\n \treturn HowDeepIsIt(data,element,0)\n}\n\nfunc HowDeepIsIt(data interface{},element interface{},deep int) int{\n\t\/\/elemType := reflect.TypeOf(element).Kind()\n\t\/\/ dataType := reflect.TypeOf(data).Kind()\n\tmapItem := reflect.ValueOf(data)\n\telemItem := reflect.ValueOf(element)\n\tswitch elemType {\n\t\tcase reflect.String:\n\t\t\/\/\tfmt.Println(\"1Bis: Type:\",elemType,\"Value\",elemItem,\"ValueData\",mapItem)\n\t\t\/\/ \tfmt.Println(\"Type:\",dataType,\"Value\",mapItem)\n\t\t\/\/ \tfor _, b := range reflect.ValueOf(data).MapKeys() {cd ~g\n\t\t\/\/ \t\t\/\/fmt.Println(\"Reflect:\",elemItem , \"Value:\",b,\"MapValue\",mapItem.MapIndex(b),\"Equal\",reflect.DeepEqual(mapItem.MapIndex(b).Interface(), elemItem.Interface()))\n\t\t\/\/ \t\t\/\/fmt.Println(\"Reflect:\",IsMap(mapItem.MapIndex(b).Interface()),\"b\",mapItem.MapIndex(b).Interface())\n\t\t\/\/ \t\t\/\/ fmt.Println(\"Reflect:\",(elemItem.Interface()) ,mapItem.MapIndex(b).Interface())\n\t\t\/\/ \t\tif reflect.DeepEqual(mapItem.MapIndex(b).Interface(), elemItem.Interface()) {\n\t\t\/\/ \t\t\treturn deep + 1\n\t\t\/\/ \t\t}\n\t\t\/\/ \t}\n\t\tcase reflect.Map :\n\t\t\t\/\/fmt.Println(\"1: Key:\",elemType , \"Value:\",element ,\"Reflect\",elemItem)\n\t\t\t\/\/fmt.Println(\"Key:\",data , \"Value:\",dataType,\"Reflect\",mapItem)\n\t\t\tfor _, b := range reflect.ValueOf(data).MapKeys() {\n\t\t\t\t\/\/fmt.Println(\"Reflect:\",elemItem , \"Value:\",b,\"MapValue\",mapItem.MapIndex(b),\"Equal\",reflect.DeepEqual(mapItem.MapIndex(b).Interface(), elemItem.Interface()))\n\t\t\t\t\/\/fmt.Println(\"Reflect:\",IsMap(mapItem.MapIndex(b).Interface()),\"b\",mapItem.MapIndex(b).Interface())\n\t\t\t\t\/\/fmt.Println(\"2: Reflect:\",(elemItem.Interface()) ,mapItem.MapIndex(b).Interface())\n\t\t\t\tif reflect.DeepEqual(mapItem.MapIndex(b).Interface(), elemItem.Interface()) {\n\t\t\t\t\treturn deep + 1\n\t\t\t\t}\n\t\t\t\t\/\/ if IsMap(mapItem.MapIndex(b).Interface()) {\n\t\t\t\t\/\/ \tfmt.Println(\"3: IsMap:\",mapItem.MapIndex(b).Interface())\n\t\t\t\t\/\/ \tindex := HowDeepIsIt(mapItem.MapIndex(b).Interface(),element,deep + 1 )\n\t\t\t\t\/\/ \tif index == deep + 2 {\n\t\t\t\t\/\/ \t\tfmt.Println(\"4: Key:\",mapItem.MapIndex(b).Interface() ,\"Deepness\",index)\n\t\t\t\t\/\/ \t\treturn index\n\t\t\t\t\/\/ \t}\n\t\t\t\t\/\/ }\n\t\t\t}\n\t}\n\n\n\treturn deep\n}\n\n\nfunc add(x, y int) int {\n\treturn x + y\n}\n\nfunc mul(x, y int) int {\n\treturn x * y\n}\n\nfunc div(x, y int) int {\n\treturn x \/ y\n}\n\nfunc mod(x, y int) int {\n\treturn x % y\n}\n\nfunc sub(x, y int) int {\n\treturn x - y\n}\n\nfunc init() {\n\tTemplateFunctions = make(map[string]interface{})\n\tTemplateFunctions[\"base\"] = path.Base\n\tTemplateFunctions[\"split\"] = strings.Split\n\tTemplateFunctions[\"json\"] = UnmarshalJsonObject\n\tTemplateFunctions[\"jsonArray\"] = UnmarshalJsonArray\n\tTemplateFunctions[\"dir\"] = path.Dir\n\tTemplateFunctions[\"getenv\"] = os.Getenv\n\tTemplateFunctions[\"join\"] = strings.Join\n\tTemplateFunctions[\"datetime\"] = time.Now\n\tTemplateFunctions[\"toUpper\"] = strings.ToUpper\n\tTemplateFunctions[\"toLower\"] = strings.ToLower\n\tTemplateFunctions[\"contains\"] = strings.Contains\n\tTemplateFunctions[\"replace\"] = strings.Replace\n\tTemplateFunctions[\"repeat\"] = strings.Repeat\n\tTemplateFunctions[\"orDef\"] = orDef\n\tTemplateFunctions[\"orDefs\"] = orDefs\n\tTemplateFunctions[\"ifOrDef\"] = ifOrDef\n\tTemplateFunctions[\"isType\"] = IsType\n\tTemplateFunctions[\"isMap\"] = IsMap\n\tTemplateFunctions[\"isArray\"] = IsArray\n\tTemplateFunctions[\"isKind\"] = IsKind\n\tTemplateFunctions[\"isString\"] = IsString\n\tTemplateFunctions[\"IsMapFirst\"] = IsMapFirst\n\tTemplateFunctions[\"IsMapLast\"] = IsMapLast\n\tTemplateFunctions[\"HowDeep\"] = HowDeep\n\tTemplateFunctions[\"add\"] = add\n\tTemplateFunctions[\"mul\"] = mul\n\tTemplateFunctions[\"div\"] = div\n\tTemplateFunctions[\"sub\"] = sub\n\tTemplateFunctions[\"mod\"] = mod\n}\n<|endoftext|>"} {"text":"<commit_before>package btctimestamper\n\nimport (\n\t\"flag\"\n\n\t\"github.com\/stratumn\/sdk\/blockchain\/btc\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nvar (\n\tfee int64\n)\n\n\/\/ RegisterFlags registers the flags used by InitializeWithFlags.\nfunc RegisterFlags() {\n\tflag.Int64Var(&fee, \"fee\", DefaultFee, \"transaction fee (satoshis)\")\n\n}\n\n\/\/ InitializeWithFlags should be called after RegisterFlags and flag.Parse to initialize\n\/\/ a bcbatchfossilizer using flag values.\nfunc InitializeWithFlags(version, commit string, key string, unspentFinder btc.UnspentFinder, broadcaster btc.Broadcaster) *Timestamper {\n\tts, err := New(&Config{\n\t\tUnspentFinder: unspentFinder,\n\t\tBroadcaster: broadcaster,\n\t\tWIF: key,\n\t\tFee: fee,\n\t})\n\tif err != nil {\n\t\tlog.WithField(\"error\", err).Fatal(\"Failed to create Bitcoin timestamper\")\n\t}\n\treturn ts\n}\n<commit_msg>blockchain: fix formatting<commit_after>package btctimestamper\n\nimport (\n\t\"flag\"\n\n\t\"github.com\/stratumn\/sdk\/blockchain\/btc\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nvar (\n\tfee int64\n)\n\n\/\/ RegisterFlags registers the flags used by InitializeWithFlags.\nfunc RegisterFlags() {\n\tflag.Int64Var(&fee, \"fee\", DefaultFee, \"transaction fee (satoshis)\")\n}\n\n\/\/ InitializeWithFlags should be called after RegisterFlags and flag.Parse to initialize\n\/\/ a bcbatchfossilizer using flag values.\nfunc InitializeWithFlags(version, commit string, key string, unspentFinder btc.UnspentFinder, broadcaster btc.Broadcaster) *Timestamper {\n\tts, err := New(&Config{\n\t\tUnspentFinder: unspentFinder,\n\t\tBroadcaster: broadcaster,\n\t\tWIF: key,\n\t\tFee: fee,\n\t})\n\tif err != nil {\n\t\tlog.WithField(\"error\", err).Fatal(\"Failed to create Bitcoin timestamper\")\n\t}\n\treturn ts\n}\n<|endoftext|>"} {"text":"<commit_before>package rtnetlink\n\nimport \"syscall\"\n\ntype Family uint8\n\nconst (\n AF_UNSPEC Family = syscall.AF_UNSPEC\n AF_INET Family = syscall.AF_INET\n AF_INET6 Family = syscall.AF_INET6\n)\n\n<commit_msg>add Family\/String() method<commit_after>package rtnetlink\n\nimport \"syscall\"\n\ntype Family uint8\n\nconst (\n AF_UNSPEC Family = syscall.AF_UNSPEC\n AF_INET Family = syscall.AF_INET\n AF_INET6 Family = syscall.AF_INET6\n)\n\nfunc (self Family)String()(out string){\n switch self {\n default: out = \"Unknown\"\n case AF_UNSPEC: out = \"AF_UNSPEC\"\n case AF_INET: out = \"AF_INET\"\n case AF_INET6: out = \"AF_INET6\"\n }\n return\n}\n<|endoftext|>"} {"text":"<commit_before>package registry\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"strconv\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/docker\/docker\/registry\/v2\"\n\t\"github.com\/docker\/docker\/utils\"\n)\n\nconst DockerDigestHeader = \"Docker-Content-Digest\"\n\nfunc getV2Builder(e *Endpoint) *v2.URLBuilder {\n\tif e.URLBuilder == nil {\n\t\te.URLBuilder = v2.NewURLBuilder(e.URL)\n\t}\n\treturn e.URLBuilder\n}\n\nfunc (r *Session) V2RegistryEndpoint(index *IndexInfo) (ep *Endpoint, err error) {\n\t\/\/ TODO check if should use Mirror\n\tif index.Official {\n\t\tep, err = newEndpoint(REGISTRYSERVER, true)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\terr = validateEndpoint(ep)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t} else if r.indexEndpoint.String() == index.GetAuthConfigKey() {\n\t\tep = r.indexEndpoint\n\t} else {\n\t\tep, err = NewEndpoint(index)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tep.URLBuilder = v2.NewURLBuilder(ep.URL)\n\treturn\n}\n\n\/\/ GetV2Authorization gets the authorization needed to the given image\n\/\/ If readonly access is requested, then only the authorization may\n\/\/ only be used for Get operations.\nfunc (r *Session) GetV2Authorization(ep *Endpoint, imageName string, readOnly bool) (auth *RequestAuthorization, err error) {\n\tscopes := []string{\"pull\"}\n\tif !readOnly {\n\t\tscopes = append(scopes, \"push\")\n\t}\n\n\tlog.Debugf(\"Getting authorization for %s %s\", imageName, scopes)\n\treturn NewRequestAuthorization(r.GetAuthConfig(true), ep, \"repository\", imageName, scopes), nil\n}\n\n\/\/\n\/\/ 1) Check if TarSum of each layer exists \/v2\/\n\/\/ 1.a) if 200, continue\n\/\/ 1.b) if 300, then push the\n\/\/ 1.c) if anything else, err\n\/\/ 2) PUT the created\/signed manifest\n\/\/\nfunc (r *Session) GetV2ImageManifest(ep *Endpoint, imageName, tagName string, auth *RequestAuthorization) ([]byte, string, error) {\n\trouteURL, err := getV2Builder(ep).BuildManifestURL(imageName, tagName)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\tmethod := \"GET\"\n\tlog.Debugf(\"[registry] Calling %q %s\", method, routeURL)\n\n\treq, err := r.reqFactory.NewRequest(method, routeURL, nil)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\tif err := auth.Authorize(req); err != nil {\n\t\treturn nil, \"\", err\n\t}\n\tres, _, err := r.doRequest(req)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\tdefer res.Body.Close()\n\tif res.StatusCode != 200 {\n\t\tif res.StatusCode == 401 {\n\t\t\treturn nil, \"\", errLoginRequired\n\t\t} else if res.StatusCode == 404 {\n\t\t\treturn nil, \"\", ErrDoesNotExist\n\t\t}\n\t\treturn nil, \"\", utils.NewHTTPRequestError(fmt.Sprintf(\"Server error: %d trying to fetch for %s:%s\", res.StatusCode, imageName, tagName), res)\n\t}\n\n\tbuf, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"Error while reading the http response: %s\", err)\n\t}\n\treturn buf, res.Header.Get(DockerDigestHeader), nil\n}\n\n\/\/ - Succeeded to head image blob (already exists)\n\/\/ - Failed with no error (continue to Push the Blob)\n\/\/ - Failed with error\nfunc (r *Session) HeadV2ImageBlob(ep *Endpoint, imageName, sumType, sum string, auth *RequestAuthorization) (bool, error) {\n\trouteURL, err := getV2Builder(ep).BuildBlobURL(imageName, sumType+\":\"+sum)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tmethod := \"HEAD\"\n\tlog.Debugf(\"[registry] Calling %q %s\", method, routeURL)\n\n\treq, err := r.reqFactory.NewRequest(method, routeURL, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif err := auth.Authorize(req); err != nil {\n\t\treturn false, err\n\t}\n\tres, _, err := r.doRequest(req)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tres.Body.Close() \/\/ close early, since we're not needing a body on this call .. yet?\n\tswitch {\n\tcase res.StatusCode >= 200 && res.StatusCode < 400:\n\t\t\/\/ return something indicating no push needed\n\t\treturn true, nil\n\tcase res.StatusCode == 401:\n\t\treturn false, errLoginRequired\n\tcase res.StatusCode == 404:\n\t\t\/\/ return something indicating blob push needed\n\t\treturn false, nil\n\t}\n\n\treturn false, utils.NewHTTPRequestError(fmt.Sprintf(\"Server error: %d trying head request for %s - %s:%s\", res.StatusCode, imageName, sumType, sum), res)\n}\n\nfunc (r *Session) GetV2ImageBlob(ep *Endpoint, imageName, sumType, sum string, blobWrtr io.Writer, auth *RequestAuthorization) error {\n\trouteURL, err := getV2Builder(ep).BuildBlobURL(imageName, sumType+\":\"+sum)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmethod := \"GET\"\n\tlog.Debugf(\"[registry] Calling %q %s\", method, routeURL)\n\treq, err := r.reqFactory.NewRequest(method, routeURL, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := auth.Authorize(req); err != nil {\n\t\treturn err\n\t}\n\tres, _, err := r.doRequest(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\tif res.StatusCode != 200 {\n\t\tif res.StatusCode == 401 {\n\t\t\treturn errLoginRequired\n\t\t}\n\t\treturn utils.NewHTTPRequestError(fmt.Sprintf(\"Server error: %d trying to pull %s blob\", res.StatusCode, imageName), res)\n\t}\n\n\t_, err = io.Copy(blobWrtr, res.Body)\n\treturn err\n}\n\nfunc (r *Session) GetV2ImageBlobReader(ep *Endpoint, imageName, sumType, sum string, auth *RequestAuthorization) (io.ReadCloser, int64, error) {\n\trouteURL, err := getV2Builder(ep).BuildBlobURL(imageName, sumType+\":\"+sum)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\tmethod := \"GET\"\n\tlog.Debugf(\"[registry] Calling %q %s\", method, routeURL)\n\treq, err := r.reqFactory.NewRequest(method, routeURL, nil)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\tif err := auth.Authorize(req); err != nil {\n\t\treturn nil, 0, err\n\t}\n\tres, _, err := r.doRequest(req)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\tif res.StatusCode != 200 {\n\t\tif res.StatusCode == 401 {\n\t\t\treturn nil, 0, errLoginRequired\n\t\t}\n\t\treturn nil, 0, utils.NewHTTPRequestError(fmt.Sprintf(\"Server error: %d trying to pull %s blob - %s:%s\", res.StatusCode, imageName, sumType, sum), res)\n\t}\n\tlenStr := res.Header.Get(\"Content-Length\")\n\tl, err := strconv.ParseInt(lenStr, 10, 64)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn res.Body, l, err\n}\n\n\/\/ Push the image to the server for storage.\n\/\/ 'layer' is an uncompressed reader of the blob to be pushed.\n\/\/ The server will generate it's own checksum calculation.\nfunc (r *Session) PutV2ImageBlob(ep *Endpoint, imageName, sumType, sumStr string, blobRdr io.Reader, auth *RequestAuthorization) error {\n\trouteURL, err := getV2Builder(ep).BuildBlobUploadURL(imageName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Debugf(\"[registry] Calling %q %s\", \"POST\", routeURL)\n\treq, err := r.reqFactory.NewRequest(\"POST\", routeURL, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := auth.Authorize(req); err != nil {\n\t\treturn err\n\t}\n\tres, _, err := r.doRequest(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlocation := res.Header.Get(\"Location\")\n\n\tmethod := \"PUT\"\n\tlog.Debugf(\"[registry] Calling %q %s\", method, location)\n\treq, err = r.reqFactory.NewRequest(method, location, ioutil.NopCloser(blobRdr))\n\tif err != nil {\n\t\treturn err\n\t}\n\tqueryParams := req.URL.Query()\n\tqueryParams.Add(\"digest\", sumType+\":\"+sumStr)\n\treq.URL.RawQuery = queryParams.Encode()\n\tif err := auth.Authorize(req); err != nil {\n\t\treturn err\n\t}\n\tres, _, err = r.doRequest(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != 201 {\n\t\tif res.StatusCode == 401 {\n\t\t\treturn errLoginRequired\n\t\t}\n\t\terrBody, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Debugf(\"Unexpected response from server: %q %#v\", errBody, res.Header)\n\t\treturn utils.NewHTTPRequestError(fmt.Sprintf(\"Server error: %d trying to push %s blob - %s:%s\", res.StatusCode, imageName, sumType, sumStr), res)\n\t}\n\n\treturn nil\n}\n\n\/\/ Finally Push the (signed) manifest of the blobs we've just pushed\nfunc (r *Session) PutV2ImageManifest(ep *Endpoint, imageName, tagName string, manifestRdr io.Reader, auth *RequestAuthorization) (string, error) {\n\trouteURL, err := getV2Builder(ep).BuildManifestURL(imageName, tagName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tmethod := \"PUT\"\n\tlog.Debugf(\"[registry] Calling %q %s\", method, routeURL)\n\treq, err := r.reqFactory.NewRequest(method, routeURL, manifestRdr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif err := auth.Authorize(req); err != nil {\n\t\treturn \"\", err\n\t}\n\tres, _, err := r.doRequest(req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer res.Body.Close()\n\n\t\/\/ All 2xx and 3xx responses can be accepted for a put.\n\tif res.StatusCode >= 400 {\n\t\tif res.StatusCode == 401 {\n\t\t\treturn \"\", errLoginRequired\n\t\t}\n\t\terrBody, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tlog.Debugf(\"Unexpected response from server: %q %#v\", errBody, res.Header)\n\t\treturn \"\", utils.NewHTTPRequestError(fmt.Sprintf(\"Server error: %d trying to push %s:%s manifest\", res.StatusCode, imageName, tagName), res)\n\t}\n\n\treturn res.Header.Get(DockerDigestHeader), nil\n}\n\ntype remoteTags struct {\n\tname string\n\ttags []string\n}\n\n\/\/ Given a repository name, returns a json array of string tags\nfunc (r *Session) GetV2RemoteTags(ep *Endpoint, imageName string, auth *RequestAuthorization) ([]string, error) {\n\trouteURL, err := getV2Builder(ep).BuildTagsURL(imageName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmethod := \"GET\"\n\tlog.Debugf(\"[registry] Calling %q %s\", method, routeURL)\n\n\treq, err := r.reqFactory.NewRequest(method, routeURL, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := auth.Authorize(req); err != nil {\n\t\treturn nil, err\n\t}\n\tres, _, err := r.doRequest(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer res.Body.Close()\n\tif res.StatusCode != 200 {\n\t\tif res.StatusCode == 401 {\n\t\t\treturn nil, errLoginRequired\n\t\t} else if res.StatusCode == 404 {\n\t\t\treturn nil, ErrDoesNotExist\n\t\t}\n\t\treturn nil, utils.NewHTTPRequestError(fmt.Sprintf(\"Server error: %d trying to fetch for %s\", res.StatusCode, imageName), res)\n\t}\n\n\tdecoder := json.NewDecoder(res.Body)\n\tvar remote remoteTags\n\terr = decoder.Decode(&remote)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error while decoding the http response: %s\", err)\n\t}\n\treturn remote.tags, nil\n}\n<commit_msg>Separate init blob upload<commit_after>package registry\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/docker\/docker\/registry\/v2\"\n\t\"github.com\/docker\/docker\/utils\"\n)\n\nconst DockerDigestHeader = \"Docker-Content-Digest\"\n\nfunc getV2Builder(e *Endpoint) *v2.URLBuilder {\n\tif e.URLBuilder == nil {\n\t\te.URLBuilder = v2.NewURLBuilder(e.URL)\n\t}\n\treturn e.URLBuilder\n}\n\nfunc (r *Session) V2RegistryEndpoint(index *IndexInfo) (ep *Endpoint, err error) {\n\t\/\/ TODO check if should use Mirror\n\tif index.Official {\n\t\tep, err = newEndpoint(REGISTRYSERVER, true)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\terr = validateEndpoint(ep)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t} else if r.indexEndpoint.String() == index.GetAuthConfigKey() {\n\t\tep = r.indexEndpoint\n\t} else {\n\t\tep, err = NewEndpoint(index)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tep.URLBuilder = v2.NewURLBuilder(ep.URL)\n\treturn\n}\n\n\/\/ GetV2Authorization gets the authorization needed to the given image\n\/\/ If readonly access is requested, then only the authorization may\n\/\/ only be used for Get operations.\nfunc (r *Session) GetV2Authorization(ep *Endpoint, imageName string, readOnly bool) (auth *RequestAuthorization, err error) {\n\tscopes := []string{\"pull\"}\n\tif !readOnly {\n\t\tscopes = append(scopes, \"push\")\n\t}\n\n\tlog.Debugf(\"Getting authorization for %s %s\", imageName, scopes)\n\treturn NewRequestAuthorization(r.GetAuthConfig(true), ep, \"repository\", imageName, scopes), nil\n}\n\n\/\/\n\/\/ 1) Check if TarSum of each layer exists \/v2\/\n\/\/ 1.a) if 200, continue\n\/\/ 1.b) if 300, then push the\n\/\/ 1.c) if anything else, err\n\/\/ 2) PUT the created\/signed manifest\n\/\/\nfunc (r *Session) GetV2ImageManifest(ep *Endpoint, imageName, tagName string, auth *RequestAuthorization) ([]byte, string, error) {\n\trouteURL, err := getV2Builder(ep).BuildManifestURL(imageName, tagName)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\tmethod := \"GET\"\n\tlog.Debugf(\"[registry] Calling %q %s\", method, routeURL)\n\n\treq, err := r.reqFactory.NewRequest(method, routeURL, nil)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\tif err := auth.Authorize(req); err != nil {\n\t\treturn nil, \"\", err\n\t}\n\tres, _, err := r.doRequest(req)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\tdefer res.Body.Close()\n\tif res.StatusCode != 200 {\n\t\tif res.StatusCode == 401 {\n\t\t\treturn nil, \"\", errLoginRequired\n\t\t} else if res.StatusCode == 404 {\n\t\t\treturn nil, \"\", ErrDoesNotExist\n\t\t}\n\t\treturn nil, \"\", utils.NewHTTPRequestError(fmt.Sprintf(\"Server error: %d trying to fetch for %s:%s\", res.StatusCode, imageName, tagName), res)\n\t}\n\n\tbuf, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"Error while reading the http response: %s\", err)\n\t}\n\treturn buf, res.Header.Get(DockerDigestHeader), nil\n}\n\n\/\/ - Succeeded to head image blob (already exists)\n\/\/ - Failed with no error (continue to Push the Blob)\n\/\/ - Failed with error\nfunc (r *Session) HeadV2ImageBlob(ep *Endpoint, imageName, sumType, sum string, auth *RequestAuthorization) (bool, error) {\n\trouteURL, err := getV2Builder(ep).BuildBlobURL(imageName, sumType+\":\"+sum)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tmethod := \"HEAD\"\n\tlog.Debugf(\"[registry] Calling %q %s\", method, routeURL)\n\n\treq, err := r.reqFactory.NewRequest(method, routeURL, nil)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif err := auth.Authorize(req); err != nil {\n\t\treturn false, err\n\t}\n\tres, _, err := r.doRequest(req)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tres.Body.Close() \/\/ close early, since we're not needing a body on this call .. yet?\n\tswitch {\n\tcase res.StatusCode >= 200 && res.StatusCode < 400:\n\t\t\/\/ return something indicating no push needed\n\t\treturn true, nil\n\tcase res.StatusCode == 401:\n\t\treturn false, errLoginRequired\n\tcase res.StatusCode == 404:\n\t\t\/\/ return something indicating blob push needed\n\t\treturn false, nil\n\t}\n\n\treturn false, utils.NewHTTPRequestError(fmt.Sprintf(\"Server error: %d trying head request for %s - %s:%s\", res.StatusCode, imageName, sumType, sum), res)\n}\n\nfunc (r *Session) GetV2ImageBlob(ep *Endpoint, imageName, sumType, sum string, blobWrtr io.Writer, auth *RequestAuthorization) error {\n\trouteURL, err := getV2Builder(ep).BuildBlobURL(imageName, sumType+\":\"+sum)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmethod := \"GET\"\n\tlog.Debugf(\"[registry] Calling %q %s\", method, routeURL)\n\treq, err := r.reqFactory.NewRequest(method, routeURL, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := auth.Authorize(req); err != nil {\n\t\treturn err\n\t}\n\tres, _, err := r.doRequest(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\tif res.StatusCode != 200 {\n\t\tif res.StatusCode == 401 {\n\t\t\treturn errLoginRequired\n\t\t}\n\t\treturn utils.NewHTTPRequestError(fmt.Sprintf(\"Server error: %d trying to pull %s blob\", res.StatusCode, imageName), res)\n\t}\n\n\t_, err = io.Copy(blobWrtr, res.Body)\n\treturn err\n}\n\nfunc (r *Session) GetV2ImageBlobReader(ep *Endpoint, imageName, sumType, sum string, auth *RequestAuthorization) (io.ReadCloser, int64, error) {\n\trouteURL, err := getV2Builder(ep).BuildBlobURL(imageName, sumType+\":\"+sum)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\tmethod := \"GET\"\n\tlog.Debugf(\"[registry] Calling %q %s\", method, routeURL)\n\treq, err := r.reqFactory.NewRequest(method, routeURL, nil)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\tif err := auth.Authorize(req); err != nil {\n\t\treturn nil, 0, err\n\t}\n\tres, _, err := r.doRequest(req)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\tif res.StatusCode != 200 {\n\t\tif res.StatusCode == 401 {\n\t\t\treturn nil, 0, errLoginRequired\n\t\t}\n\t\treturn nil, 0, utils.NewHTTPRequestError(fmt.Sprintf(\"Server error: %d trying to pull %s blob - %s:%s\", res.StatusCode, imageName, sumType, sum), res)\n\t}\n\tlenStr := res.Header.Get(\"Content-Length\")\n\tl, err := strconv.ParseInt(lenStr, 10, 64)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn res.Body, l, err\n}\n\n\/\/ Push the image to the server for storage.\n\/\/ 'layer' is an uncompressed reader of the blob to be pushed.\n\/\/ The server will generate it's own checksum calculation.\nfunc (r *Session) PutV2ImageBlob(ep *Endpoint, imageName, sumType, sumStr string, blobRdr io.Reader, auth *RequestAuthorization) error {\n\tlocation, err := r.initiateBlobUpload(ep, imageName, auth)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmethod := \"PUT\"\n\tlog.Debugf(\"[registry] Calling %q %s\", method, location)\n\treq, err := r.reqFactory.NewRequest(method, location, ioutil.NopCloser(blobRdr))\n\tif err != nil {\n\t\treturn err\n\t}\n\tqueryParams := req.URL.Query()\n\tqueryParams.Add(\"digest\", sumType+\":\"+sumStr)\n\treq.URL.RawQuery = queryParams.Encode()\n\tif err := auth.Authorize(req); err != nil {\n\t\treturn err\n\t}\n\tres, _, err := r.doRequest(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != 201 {\n\t\tif res.StatusCode == 401 {\n\t\t\treturn errLoginRequired\n\t\t}\n\t\terrBody, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Debugf(\"Unexpected response from server: %q %#v\", errBody, res.Header)\n\t\treturn utils.NewHTTPRequestError(fmt.Sprintf(\"Server error: %d trying to push %s blob - %s:%s\", res.StatusCode, imageName, sumType, sumStr), res)\n\t}\n\n\treturn nil\n}\n\n\/\/ initiateBlobUpload gets the blob upload location for the given image name.\nfunc (r *Session) initiateBlobUpload(ep *Endpoint, imageName string, auth *RequestAuthorization) (location string, err error) {\n\trouteURL, err := getV2Builder(ep).BuildBlobUploadURL(imageName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlog.Debugf(\"[registry] Calling %q %s\", \"POST\", routeURL)\n\treq, err := r.reqFactory.NewRequest(\"POST\", routeURL, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := auth.Authorize(req); err != nil {\n\t\treturn \"\", err\n\t}\n\tres, _, err := r.doRequest(req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif res.StatusCode != http.StatusAccepted {\n\t\tif res.StatusCode == http.StatusUnauthorized {\n\t\t\treturn \"\", errLoginRequired\n\t\t}\n\t\tif res.StatusCode == http.StatusNotFound {\n\t\t\treturn \"\", ErrDoesNotExist\n\t\t}\n\n\t\terrBody, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tlog.Debugf(\"Unexpected response from server: %q %#v\", errBody, res.Header)\n\t\treturn \"\", utils.NewHTTPRequestError(fmt.Sprintf(\"Server error: unexpected %d response status trying to initiate upload of %s\", res.StatusCode, imageName), res)\n\t}\n\n\tif location = res.Header.Get(\"Location\"); location == \"\" {\n\t\treturn \"\", fmt.Errorf(\"registry did not return a Location header for resumable blob upload for image %s\", imageName)\n\t}\n\n\treturn\n}\n\n\/\/ Finally Push the (signed) manifest of the blobs we've just pushed\nfunc (r *Session) PutV2ImageManifest(ep *Endpoint, imageName, tagName string, manifestRdr io.Reader, auth *RequestAuthorization) (string, error) {\n\trouteURL, err := getV2Builder(ep).BuildManifestURL(imageName, tagName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tmethod := \"PUT\"\n\tlog.Debugf(\"[registry] Calling %q %s\", method, routeURL)\n\treq, err := r.reqFactory.NewRequest(method, routeURL, manifestRdr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif err := auth.Authorize(req); err != nil {\n\t\treturn \"\", err\n\t}\n\tres, _, err := r.doRequest(req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer res.Body.Close()\n\n\t\/\/ All 2xx and 3xx responses can be accepted for a put.\n\tif res.StatusCode >= 400 {\n\t\tif res.StatusCode == 401 {\n\t\t\treturn \"\", errLoginRequired\n\t\t}\n\t\terrBody, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tlog.Debugf(\"Unexpected response from server: %q %#v\", errBody, res.Header)\n\t\treturn \"\", utils.NewHTTPRequestError(fmt.Sprintf(\"Server error: %d trying to push %s:%s manifest\", res.StatusCode, imageName, tagName), res)\n\t}\n\n\treturn res.Header.Get(DockerDigestHeader), nil\n}\n\ntype remoteTags struct {\n\tname string\n\ttags []string\n}\n\n\/\/ Given a repository name, returns a json array of string tags\nfunc (r *Session) GetV2RemoteTags(ep *Endpoint, imageName string, auth *RequestAuthorization) ([]string, error) {\n\trouteURL, err := getV2Builder(ep).BuildTagsURL(imageName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmethod := \"GET\"\n\tlog.Debugf(\"[registry] Calling %q %s\", method, routeURL)\n\n\treq, err := r.reqFactory.NewRequest(method, routeURL, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := auth.Authorize(req); err != nil {\n\t\treturn nil, err\n\t}\n\tres, _, err := r.doRequest(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer res.Body.Close()\n\tif res.StatusCode != 200 {\n\t\tif res.StatusCode == 401 {\n\t\t\treturn nil, errLoginRequired\n\t\t} else if res.StatusCode == 404 {\n\t\t\treturn nil, ErrDoesNotExist\n\t\t}\n\t\treturn nil, utils.NewHTTPRequestError(fmt.Sprintf(\"Server error: %d trying to fetch for %s\", res.StatusCode, imageName), res)\n\t}\n\n\tdecoder := json.NewDecoder(res.Body)\n\tvar remote remoteTags\n\terr = decoder.Decode(&remote)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error while decoding the http response: %s\", err)\n\t}\n\treturn remote.tags, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The go-python Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage bind\n\nimport (\n\t\"go\/token\"\n)\n\nconst (\n\t\/\/ FIXME(corona10): ffibuilder.cdef should be written this way.\n\t\/\/ ffi.cdef(\"\"\"\n\t\/\/ \/\/header exported from 'go tool cgo'\n\t\/\/ #include \"%[3]s.h\"\n\t\/\/ \"\"\")\n\t\/\/ discuss: https:\/\/github.com\/go-python\/gopy\/pull\/93#discussion_r119652220\n\tcffiPreamble = `%[1]s\nfrom __future__ import unicode_literals\nimport collections\nimport os\nimport sys\nimport cffi as _cffi_backend\n\n_PY3 = sys.version_info[0] == 3\n\nffi = _cffi_backend.FFI()\nffi.cdef(\"\"\"\ntypedef signed char GoInt8;\ntypedef unsigned char GoUint8;\ntypedef short GoInt16;\ntypedef unsigned short GoUint16;\ntypedef int GoInt32;\ntypedef unsigned int GoUint32;\ntypedef long long GoInt64;\ntypedef size_t GoUintptr;\ntypedef unsigned long long GoUint64;\ntypedef GoInt64 GoInt;\ntypedef GoUint64 GoUint;\ntypedef float GoFloat32;\ntypedef double GoFloat64;\ntypedef struct { const char *p; GoInt n; } GoString;\ntypedef void *GoMap;\ntypedef void *GoChan;\ntypedef struct { void *t; void *v; } GoInterface;\ntypedef struct { void *data; GoInt len; GoInt cap; } GoSlice;\ntypedef struct { GoFloat32 real; GoFloat32 imag; } GoComplex64;\ntypedef struct { GoFloat64 real; GoFloat64 imag; } GoComplex128;\n\nextern GoComplex64 _cgopy_GoComplex64(GoFloat32 p0, GoFloat32 p1);\nextern GoComplex128 _cgopy_GoComplex128(GoFloat64 p0, GoFloat64 p1);\nextern GoString _cgopy_GoString(char* p0);\nextern char* _cgopy_CString(GoString p0);\nextern void _cgopy_FreeCString(char* p0);\nextern GoUint8 _cgopy_ErrorIsNil(GoInterface p0);\nextern char* _cgopy_ErrorString(GoInterface p0);\nextern void cgopy_incref(void* p0);\nextern void cgopy_decref(void* p0);\n\nextern void cgo_pkg_%[2]s_init();\n\n`\n\tcffiHelperPreamble = `\"\"\")\n\n# python <--> cffi helper.\nclass _cffi_helper(object):\n\n here = os.path.dirname(os.path.abspath(__file__))\n lib = ffi.dlopen(os.path.join(here, \"_%[1]s%[2]s\"))\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_bool(o):\n return ffi.cast('_Bool', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_complex64(o):\n real = o.real\n imag = o.imag\n complex64 = _cffi_helper.lib._cgopy_GoComplex64(real, imag)\n return complex64\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_complex128(o):\n real = o.real\n imag = o.imag\n complex128 = _cffi_helper.lib._cgopy_GoComplex128(real, imag)\n return complex128\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_string(o):\n if (_PY3 and isinstance(o, str)) or (not _PY3 and isinstance(o, unicode)):\n o = o.encode('utf8')\n s = ffi.new(\"char[]\", o)\n return _cffi_helper.lib._cgopy_GoString(s)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_int(o):\n return ffi.cast('int', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_int8(o):\n return ffi.cast('int8_t', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_int16(o):\n return ffi.cast('int16_t', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_int32(o):\n return ffi.cast('int32_t', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_int64(o):\n return ffi.cast('int64_t', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_float32(o):\n return ffi.cast('float', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_float64(o):\n return ffi.cast('double', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_uint(o):\n return ffi.cast('uint', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_uint8(o):\n return ffi.cast('uint8_t', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_uint16(o):\n return ffi.cast('uint16_t', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_uint32(o):\n return ffi.cast('uint32_t', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_uint64(o):\n return ffi.cast('uint64_t', o)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_bool(c):\n return bool(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_complex64(c):\n return complex(c.real, c.imag)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_complex128(c):\n return complex(c.real, c.imag)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_string(c):\n s = _cffi_helper.lib._cgopy_CString(c)\n pystr = ffi.string(s)\n _cffi_helper.lib._cgopy_FreeCString(s)\n pystr = pystr.decode('utf8')\n return pystr\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_errstring(c):\n s = _cffi_helper.lib._cgopy_ErrorString(c)\n pystr = ffi.string(s)\n _cffi_helper.lib._cgopy_FreeCString(s)\n pystr = pystr.decode('utf8')\n return pystr\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_int(c):\n return int(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_int8(c):\n return int(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_int16(c):\n return int(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_int32(c):\n return int(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_int64(c):\n return int(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_float32(c):\n return float(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_float64(c):\n return float(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_uint(c):\n return int(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_uint8(c):\n return int(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_uint16(c):\n return int(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_uint32(c):\n return int(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_uint64(c):\n return int(c)\n\n`\n)\n\ntype cffiGen struct {\n\twrapper *printer\n\n\tfset *token.FileSet\n\tpkg *Package\n\terr ErrorList\n\n\tvm string \/\/ python interpreter\n\tlang int \/\/ c-python api version (2,3)\n}\n\nfunc (g *cffiGen) gen() error {\n\t\/\/ Write preamble for CFFI library wrapper.\n\tg.genCffiPreamble()\n\tg.genCffiCdef()\n\tg.genWrappedPy()\n\treturn nil\n}\n\nfunc (g *cffiGen) genCffiPreamble() {\n\tn := g.pkg.pkg.Name()\n\tpkgDoc := g.pkg.doc.Doc\n\tif pkgDoc != \"\" {\n\t\tg.wrapper.Printf(cffiPreamble, `\"\"\"`+pkgDoc+`\"\"\"`, n)\n\t} else {\n\t\tg.wrapper.Printf(cffiPreamble, \"\", n)\n\t}\n}\n\nfunc (g *cffiGen) genCffiCdef() {\n\n\t\/\/ first, process slices, arrays\n\t{\n\t\tnames := g.pkg.syms.names()\n\t\tfor _, n := range names {\n\t\t\tsym := g.pkg.syms.sym(n)\n\t\t\tif !sym.isType() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tg.genCdefType(sym)\n\t\t}\n\t}\n\n\t\/\/ Register struct type defintions\n\tfor _, s := range g.pkg.structs {\n\t\tg.genCdefStruct(s)\n\t}\n\n\tfor _, s := range g.pkg.structs {\n\t\tfor _, ctor := range s.ctors {\n\t\t\tg.genCdefFunc(ctor)\n\t\t}\n\t}\n\n\tfor _, s := range g.pkg.structs {\n\t\tfor _, m := range s.meths {\n\t\t\tg.genCdefMethod(m)\n\t\t}\n\n\t\ttyp := s.Struct()\n\t\tfor i := 0; i < typ.NumFields(); i++ {\n\t\t\tf := typ.Field(i)\n\t\t\tif !f.Exported() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tg.genCdefStructMemberGetter(s, i, f)\n\t\t\tg.genCdefStructMemberSetter(s, i, f)\n\t\t}\n\n\t\tg.genCdefStructTPStr(s)\n\t}\n\n\tfor _, f := range g.pkg.funcs {\n\t\tg.genCdefFunc(f)\n\t}\n\n\tfor _, c := range g.pkg.consts {\n\t\tg.genCdefConst(c)\n\t}\n\n\tfor _, v := range g.pkg.vars {\n\t\tg.genCdefVar(v)\n\t}\n}\n\nfunc (g *cffiGen) genWrappedPy() {\n\tn := g.pkg.pkg.Name()\n\tg.wrapper.Printf(cffiHelperPreamble, n, shlibExt)\n\tg.wrapper.Indent()\n\n\t\/\/ first, process slices, arrays\n\tnames := g.pkg.syms.names()\n\tfor _, n := range names {\n\t\tsym := g.pkg.syms.sym(n)\n\t\tif !sym.isType() {\n\t\t\tcontinue\n\t\t}\n\t\tif sym.isPointer() {\n\t\t\tcontinue\n\t\t}\n\t\tg.genTypeConverter(sym)\n\t}\n\n\tfor _, s := range g.pkg.structs {\n\t\tg.genStructConversion(s)\n\t}\n\tg.wrapper.Outdent()\n\n\t\/\/ After generating all of the stuff for the preamble (includes struct, interface.. etc)\n\t\/\/ then call a function which checks Cgo is successfully loaded and initialized.\n\tg.wrapper.Printf(\"# make sure Cgo is loaded and initialized\\n\")\n\tg.wrapper.Printf(\"_cffi_helper.lib.cgo_pkg_%[1]s_init()\\n\", n)\n\n\tfor _, n := range names {\n\t\tsym := g.pkg.syms.sym(n)\n\t\tif !sym.isType() {\n\t\t\tcontinue\n\t\t}\n\t\tg.genType(sym)\n\t}\n\n\tfor _, s := range g.pkg.structs {\n\t\tg.genStruct(s)\n\t}\n\n\tfor _, s := range g.pkg.structs {\n\t\tfor _, ctor := range s.ctors {\n\t\t\tg.genFunc(ctor)\n\t\t}\n\t}\n\n\tfor _, f := range g.pkg.funcs {\n\t\tg.genFunc(f)\n\t}\n\n\tfor _, c := range g.pkg.consts {\n\t\tg.genConst(c)\n\t}\n\n\tfor _, v := range g.pkg.vars {\n\t\tg.genVar(v)\n\t}\n}\n\nfunc (g *cffiGen) genConst(c Const) {\n\tg.genGetFunc(c.f)\n}\n\nfunc (g *cffiGen) genVar(v Var) {\n\tid := g.pkg.Name() + \"_\" + v.Name()\n\tget := \"returns \" + g.pkg.Name() + \".\" + v.Name()\n\tset := \"sets \" + g.pkg.Name() + \".\" + v.Name()\n\tif v.doc != \"\" {\n\t\t\/\/ if the Go variable had some documentation attached,\n\t\t\/\/ put it there as well.\n\t\tget += \"\\n\\n\" + v.doc\n\t\tset += \"\\n\\n\" + v.doc\n\t}\n\tdoc := v.doc\n\t{\n\t\tres := []*Var{newVar(g.pkg, v.GoType(), \"ret\", v.Name(), doc)}\n\t\tsig := newSignature(g.pkg, nil, nil, res)\n\t\tfget := Func{\n\t\t\tpkg: g.pkg,\n\t\t\tsig: sig,\n\t\t\ttyp: nil,\n\t\t\tname: v.Name(),\n\t\t\tid: id + \"_get\",\n\t\t\tdoc: get,\n\t\t\tret: v.GoType(),\n\t\t\terr: false,\n\t\t}\n\t\tg.genGetFunc(fget)\n\t}\n\t{\n\t\tparams := []*Var{newVar(g.pkg, v.GoType(), \"arg\", v.Name(), doc)}\n\t\tsig := newSignature(g.pkg, nil, params, nil)\n\t\tfset := Func{\n\t\t\tpkg: g.pkg,\n\t\t\tsig: sig,\n\t\t\ttyp: nil,\n\t\t\tname: v.Name(),\n\t\t\tid: id + \"_set\",\n\t\t\tdoc: set,\n\t\t\tret: nil,\n\t\t\terr: false,\n\t\t}\n\t\tg.genSetFunc(fset)\n\t}\n}\n\nfunc (g *cffiGen) genCdefConst(c Const) {\n\tg.genCdefFunc(c.f)\n}\n\nfunc (g *cffiGen) genCdefVar(v Var) {\n\tid := g.pkg.Name() + \"_\" + v.Name()\n\tdoc := v.doc\n\t{\n\t\tres := []*Var{newVar(g.pkg, v.GoType(), \"ret\", v.Name(), doc)}\n\t\tsig := newSignature(g.pkg, nil, nil, res)\n\t\tfget := Func{\n\t\t\tpkg: g.pkg,\n\t\t\tsig: sig,\n\t\t\ttyp: nil,\n\t\t\tname: v.Name(),\n\t\t\tid: id + \"_get\",\n\t\t\tdoc: \"returns \" + g.pkg.Name() + \".\" + v.Name(),\n\t\t\tret: v.GoType(),\n\t\t\terr: false,\n\t\t}\n\t\tg.genCdefFunc(fget)\n\t}\n\t{\n\t\tparams := []*Var{newVar(g.pkg, v.GoType(), \"arg\", v.Name(), doc)}\n\t\tsig := newSignature(g.pkg, nil, params, nil)\n\t\tfset := Func{\n\t\t\tpkg: g.pkg,\n\t\t\tsig: sig,\n\t\t\ttyp: nil,\n\t\t\tname: v.Name(),\n\t\t\tid: id + \"_set\",\n\t\t\tdoc: \"sets \" + g.pkg.Name() + \".\" + v.Name(),\n\t\t\tret: nil,\n\t\t\terr: false,\n\t\t}\n\t\tg.genCdefFunc(fset)\n\t}\n}\n<commit_msg>bind: use uintXX_t types<commit_after>\/\/ Copyright 2017 The go-python Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage bind\n\nimport (\n\t\"go\/token\"\n)\n\nconst (\n\t\/\/ FIXME(corona10): ffibuilder.cdef should be written this way.\n\t\/\/ ffi.cdef(\"\"\"\n\t\/\/ \/\/header exported from 'go tool cgo'\n\t\/\/ #include \"%[3]s.h\"\n\t\/\/ \"\"\")\n\t\/\/ discuss: https:\/\/github.com\/go-python\/gopy\/pull\/93#discussion_r119652220\n\tcffiPreamble = `%[1]s\nfrom __future__ import unicode_literals\nimport collections\nimport os\nimport sys\nimport cffi as _cffi_backend\n\n_PY3 = sys.version_info[0] == 3\n\nffi = _cffi_backend.FFI()\nffi.cdef(\"\"\"\ntypedef int8_t GoInt8;\ntypedef uint8_t GoUint8;\ntypedef int16_t GoInt16;\ntypedef uint16_t GoUint16;\ntypedef int32_t GoInt32;\ntypedef uint32_t GoUint32;\ntypedef int64_t GoInt64;\ntypedef uintptr_t GoUintptr;\ntypedef uint64_t GoUint64;\ntypedef GoInt64 GoInt;\ntypedef GoUint64 GoUint;\ntypedef float GoFloat32;\ntypedef double GoFloat64;\ntypedef struct { const char *p; GoInt n; } GoString;\ntypedef void *GoMap;\ntypedef void *GoChan;\ntypedef struct { void *t; void *v; } GoInterface;\ntypedef struct { void *data; GoInt len; GoInt cap; } GoSlice;\ntypedef struct { GoFloat32 real; GoFloat32 imag; } GoComplex64;\ntypedef struct { GoFloat64 real; GoFloat64 imag; } GoComplex128;\n\nextern GoComplex64 _cgopy_GoComplex64(GoFloat32 p0, GoFloat32 p1);\nextern GoComplex128 _cgopy_GoComplex128(GoFloat64 p0, GoFloat64 p1);\nextern GoString _cgopy_GoString(char* p0);\nextern char* _cgopy_CString(GoString p0);\nextern void _cgopy_FreeCString(char* p0);\nextern GoUint8 _cgopy_ErrorIsNil(GoInterface p0);\nextern char* _cgopy_ErrorString(GoInterface p0);\nextern void cgopy_incref(void* p0);\nextern void cgopy_decref(void* p0);\n\nextern void cgo_pkg_%[2]s_init();\n\n`\n\tcffiHelperPreamble = `\"\"\")\n\n# python <--> cffi helper.\nclass _cffi_helper(object):\n\n here = os.path.dirname(os.path.abspath(__file__))\n lib = ffi.dlopen(os.path.join(here, \"_%[1]s%[2]s\"))\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_bool(o):\n return ffi.cast('_Bool', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_complex64(o):\n real = o.real\n imag = o.imag\n complex64 = _cffi_helper.lib._cgopy_GoComplex64(real, imag)\n return complex64\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_complex128(o):\n real = o.real\n imag = o.imag\n complex128 = _cffi_helper.lib._cgopy_GoComplex128(real, imag)\n return complex128\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_string(o):\n if (_PY3 and isinstance(o, str)) or (not _PY3 and isinstance(o, unicode)):\n o = o.encode('utf8')\n s = ffi.new(\"char[]\", o)\n return _cffi_helper.lib._cgopy_GoString(s)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_int(o):\n return ffi.cast('int', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_int8(o):\n return ffi.cast('int8_t', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_int16(o):\n return ffi.cast('int16_t', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_int32(o):\n return ffi.cast('int32_t', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_int64(o):\n return ffi.cast('int64_t', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_float32(o):\n return ffi.cast('float', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_float64(o):\n return ffi.cast('double', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_uint(o):\n return ffi.cast('uint', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_uint8(o):\n return ffi.cast('uint8_t', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_uint16(o):\n return ffi.cast('uint16_t', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_uint32(o):\n return ffi.cast('uint32_t', o)\n\n @staticmethod\n def cffi_cgopy_cnv_py2c_uint64(o):\n return ffi.cast('uint64_t', o)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_bool(c):\n return bool(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_complex64(c):\n return complex(c.real, c.imag)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_complex128(c):\n return complex(c.real, c.imag)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_string(c):\n s = _cffi_helper.lib._cgopy_CString(c)\n pystr = ffi.string(s)\n _cffi_helper.lib._cgopy_FreeCString(s)\n pystr = pystr.decode('utf8')\n return pystr\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_errstring(c):\n s = _cffi_helper.lib._cgopy_ErrorString(c)\n pystr = ffi.string(s)\n _cffi_helper.lib._cgopy_FreeCString(s)\n pystr = pystr.decode('utf8')\n return pystr\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_int(c):\n return int(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_int8(c):\n return int(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_int16(c):\n return int(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_int32(c):\n return int(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_int64(c):\n return int(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_float32(c):\n return float(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_float64(c):\n return float(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_uint(c):\n return int(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_uint8(c):\n return int(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_uint16(c):\n return int(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_uint32(c):\n return int(c)\n\n @staticmethod\n def cffi_cgopy_cnv_c2py_uint64(c):\n return int(c)\n\n`\n)\n\ntype cffiGen struct {\n\twrapper *printer\n\n\tfset *token.FileSet\n\tpkg *Package\n\terr ErrorList\n\n\tvm string \/\/ python interpreter\n\tlang int \/\/ c-python api version (2,3)\n}\n\nfunc (g *cffiGen) gen() error {\n\t\/\/ Write preamble for CFFI library wrapper.\n\tg.genCffiPreamble()\n\tg.genCffiCdef()\n\tg.genWrappedPy()\n\treturn nil\n}\n\nfunc (g *cffiGen) genCffiPreamble() {\n\tn := g.pkg.pkg.Name()\n\tpkgDoc := g.pkg.doc.Doc\n\tif pkgDoc != \"\" {\n\t\tg.wrapper.Printf(cffiPreamble, `\"\"\"`+pkgDoc+`\"\"\"`, n)\n\t} else {\n\t\tg.wrapper.Printf(cffiPreamble, \"\", n)\n\t}\n}\n\nfunc (g *cffiGen) genCffiCdef() {\n\n\t\/\/ first, process slices, arrays\n\t{\n\t\tnames := g.pkg.syms.names()\n\t\tfor _, n := range names {\n\t\t\tsym := g.pkg.syms.sym(n)\n\t\t\tif !sym.isType() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tg.genCdefType(sym)\n\t\t}\n\t}\n\n\t\/\/ Register struct type defintions\n\tfor _, s := range g.pkg.structs {\n\t\tg.genCdefStruct(s)\n\t}\n\n\tfor _, s := range g.pkg.structs {\n\t\tfor _, ctor := range s.ctors {\n\t\t\tg.genCdefFunc(ctor)\n\t\t}\n\t}\n\n\tfor _, s := range g.pkg.structs {\n\t\tfor _, m := range s.meths {\n\t\t\tg.genCdefMethod(m)\n\t\t}\n\n\t\ttyp := s.Struct()\n\t\tfor i := 0; i < typ.NumFields(); i++ {\n\t\t\tf := typ.Field(i)\n\t\t\tif !f.Exported() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tg.genCdefStructMemberGetter(s, i, f)\n\t\t\tg.genCdefStructMemberSetter(s, i, f)\n\t\t}\n\n\t\tg.genCdefStructTPStr(s)\n\t}\n\n\tfor _, f := range g.pkg.funcs {\n\t\tg.genCdefFunc(f)\n\t}\n\n\tfor _, c := range g.pkg.consts {\n\t\tg.genCdefConst(c)\n\t}\n\n\tfor _, v := range g.pkg.vars {\n\t\tg.genCdefVar(v)\n\t}\n}\n\nfunc (g *cffiGen) genWrappedPy() {\n\tn := g.pkg.pkg.Name()\n\tg.wrapper.Printf(cffiHelperPreamble, n, shlibExt)\n\tg.wrapper.Indent()\n\n\t\/\/ first, process slices, arrays\n\tnames := g.pkg.syms.names()\n\tfor _, n := range names {\n\t\tsym := g.pkg.syms.sym(n)\n\t\tif !sym.isType() {\n\t\t\tcontinue\n\t\t}\n\t\tif sym.isPointer() {\n\t\t\tcontinue\n\t\t}\n\t\tg.genTypeConverter(sym)\n\t}\n\n\tfor _, s := range g.pkg.structs {\n\t\tg.genStructConversion(s)\n\t}\n\tg.wrapper.Outdent()\n\n\t\/\/ After generating all of the stuff for the preamble (includes struct, interface.. etc)\n\t\/\/ then call a function which checks Cgo is successfully loaded and initialized.\n\tg.wrapper.Printf(\"# make sure Cgo is loaded and initialized\\n\")\n\tg.wrapper.Printf(\"_cffi_helper.lib.cgo_pkg_%[1]s_init()\\n\", n)\n\n\tfor _, n := range names {\n\t\tsym := g.pkg.syms.sym(n)\n\t\tif !sym.isType() {\n\t\t\tcontinue\n\t\t}\n\t\tg.genType(sym)\n\t}\n\n\tfor _, s := range g.pkg.structs {\n\t\tg.genStruct(s)\n\t}\n\n\tfor _, s := range g.pkg.structs {\n\t\tfor _, ctor := range s.ctors {\n\t\t\tg.genFunc(ctor)\n\t\t}\n\t}\n\n\tfor _, f := range g.pkg.funcs {\n\t\tg.genFunc(f)\n\t}\n\n\tfor _, c := range g.pkg.consts {\n\t\tg.genConst(c)\n\t}\n\n\tfor _, v := range g.pkg.vars {\n\t\tg.genVar(v)\n\t}\n}\n\nfunc (g *cffiGen) genConst(c Const) {\n\tg.genGetFunc(c.f)\n}\n\nfunc (g *cffiGen) genVar(v Var) {\n\tid := g.pkg.Name() + \"_\" + v.Name()\n\tget := \"returns \" + g.pkg.Name() + \".\" + v.Name()\n\tset := \"sets \" + g.pkg.Name() + \".\" + v.Name()\n\tif v.doc != \"\" {\n\t\t\/\/ if the Go variable had some documentation attached,\n\t\t\/\/ put it there as well.\n\t\tget += \"\\n\\n\" + v.doc\n\t\tset += \"\\n\\n\" + v.doc\n\t}\n\tdoc := v.doc\n\t{\n\t\tres := []*Var{newVar(g.pkg, v.GoType(), \"ret\", v.Name(), doc)}\n\t\tsig := newSignature(g.pkg, nil, nil, res)\n\t\tfget := Func{\n\t\t\tpkg: g.pkg,\n\t\t\tsig: sig,\n\t\t\ttyp: nil,\n\t\t\tname: v.Name(),\n\t\t\tid: id + \"_get\",\n\t\t\tdoc: get,\n\t\t\tret: v.GoType(),\n\t\t\terr: false,\n\t\t}\n\t\tg.genGetFunc(fget)\n\t}\n\t{\n\t\tparams := []*Var{newVar(g.pkg, v.GoType(), \"arg\", v.Name(), doc)}\n\t\tsig := newSignature(g.pkg, nil, params, nil)\n\t\tfset := Func{\n\t\t\tpkg: g.pkg,\n\t\t\tsig: sig,\n\t\t\ttyp: nil,\n\t\t\tname: v.Name(),\n\t\t\tid: id + \"_set\",\n\t\t\tdoc: set,\n\t\t\tret: nil,\n\t\t\terr: false,\n\t\t}\n\t\tg.genSetFunc(fset)\n\t}\n}\n\nfunc (g *cffiGen) genCdefConst(c Const) {\n\tg.genCdefFunc(c.f)\n}\n\nfunc (g *cffiGen) genCdefVar(v Var) {\n\tid := g.pkg.Name() + \"_\" + v.Name()\n\tdoc := v.doc\n\t{\n\t\tres := []*Var{newVar(g.pkg, v.GoType(), \"ret\", v.Name(), doc)}\n\t\tsig := newSignature(g.pkg, nil, nil, res)\n\t\tfget := Func{\n\t\t\tpkg: g.pkg,\n\t\t\tsig: sig,\n\t\t\ttyp: nil,\n\t\t\tname: v.Name(),\n\t\t\tid: id + \"_get\",\n\t\t\tdoc: \"returns \" + g.pkg.Name() + \".\" + v.Name(),\n\t\t\tret: v.GoType(),\n\t\t\terr: false,\n\t\t}\n\t\tg.genCdefFunc(fget)\n\t}\n\t{\n\t\tparams := []*Var{newVar(g.pkg, v.GoType(), \"arg\", v.Name(), doc)}\n\t\tsig := newSignature(g.pkg, nil, params, nil)\n\t\tfset := Func{\n\t\t\tpkg: g.pkg,\n\t\t\tsig: sig,\n\t\t\ttyp: nil,\n\t\t\tname: v.Name(),\n\t\t\tid: id + \"_set\",\n\t\t\tdoc: \"sets \" + g.pkg.Name() + \".\" + v.Name(),\n\t\t\tret: nil,\n\t\t\terr: false,\n\t\t}\n\t\tg.genCdefFunc(fset)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package fasthttputil\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ NewPipeConns returns new bi-directonal connection pipe.\nfunc NewPipeConns() *PipeConns {\n\tch1 := acquirePipeChan()\n\tch2 := acquirePipeChan()\n\n\tpc := &PipeConns{}\n\tpc.c1.r = ch1\n\tpc.c1.w = ch2\n\tpc.c2.r = ch2\n\tpc.c2.w = ch1\n\tpc.c1.pc = pc\n\tpc.c2.pc = pc\n\treturn pc\n}\n\n\/\/ PipeConns provides bi-directional connection pipe,\n\/\/ which use in-process memory as a transport.\n\/\/\n\/\/ PipeConns must be created by calling NewPipeConns.\n\/\/\n\/\/ PipeConns have the following additional features comparing to connections\n\/\/ returned from net.Pipe():\n\/\/\n\/\/ * It is faster.\n\/\/ * It buffers Write calls, so there is no need to have concurrent goroutine\n\/\/ calling Read in order to unblock each Write call.\ntype PipeConns struct {\n\tc1 pipeConn\n\tc2 pipeConn\n}\n\n\/\/ Conn1 returns the first end of bi-directional pipe.\n\/\/\n\/\/ Data written to Conn1 may be read from Conn2.\n\/\/ Data written to Conn2 may be read from Conn1.\nfunc (pc *PipeConns) Conn1() net.Conn {\n\treturn &pc.c1\n}\n\n\/\/ Conn2 returns the second end of bi-directional pipe.\n\/\/\n\/\/ Data written to Conn2 may be read from Conn1.\n\/\/ Data written to Conn1 may be read from Conn2.\nfunc (pc *PipeConns) Conn2() net.Conn {\n\treturn &pc.c2\n}\n\nfunc (pc *PipeConns) release() {\n\tpc.c1.wlock.Lock()\n\tpc.c2.wlock.Lock()\n\tmustRelease := pc.c1.wclosed && pc.c2.wclosed\n\tpc.c1.wlock.Unlock()\n\tpc.c2.wlock.Unlock()\n\n\tif mustRelease {\n\t\tpc.c1.release()\n\t\tpc.c2.release()\n\t}\n}\n\ntype pipeConn struct {\n\tr *pipeChan\n\tw *pipeChan\n\tb *byteBuffer\n\tbb []byte\n\n\trlock sync.Mutex\n\trclosed bool\n\n\twlock sync.Mutex\n\twclosed bool\n\n\tpc *PipeConns\n}\n\nfunc (c *pipeConn) Write(p []byte) (int, error) {\n\tb := acquireByteBuffer()\n\tb.b = append(b.b[:0], p...)\n\n\tc.wlock.Lock()\n\tif c.wclosed {\n\t\tc.wlock.Unlock()\n\t\treleaseByteBuffer(b)\n\t\treturn 0, errConnectionClosed\n\t}\n\tc.w.ch <- b\n\tc.wlock.Unlock()\n\n\treturn len(p), nil\n}\n\nfunc (c *pipeConn) Read(p []byte) (int, error) {\n\tmayBlock := true\n\tnn := 0\n\tfor len(p) > 0 {\n\t\tn, err := c.read(p, mayBlock)\n\t\tnn += n\n\t\tif err != nil {\n\t\t\tif !mayBlock && err == errWouldBlock {\n\t\t\t\terr = nil\n\t\t\t}\n\t\t\treturn nn, err\n\t\t}\n\t\tp = p[n:]\n\t\tmayBlock = false\n\t}\n\n\treturn nn, nil\n}\n\nfunc (c *pipeConn) read(p []byte, mayBlock bool) (int, error) {\n\tif len(c.bb) == 0 {\n\t\treleaseByteBuffer(c.b)\n\t\tc.b = nil\n\n\t\tc.rlock.Lock()\n\t\tif c.rclosed {\n\t\t\tc.rlock.Unlock()\n\t\t\treturn 0, io.EOF\n\t\t}\n\n\t\tif mayBlock {\n\t\t\tc.b = <-c.r.ch\n\t\t} else {\n\t\t\tselect {\n\t\t\tcase c.b = <-c.r.ch:\n\t\t\tdefault:\n\t\t\t\tc.rlock.Unlock()\n\t\t\t\treturn 0, errWouldBlock\n\t\t\t}\n\t\t}\n\n\t\tif c.b == nil {\n\t\t\tc.rclosed = true\n\t\t\tc.rlock.Unlock()\n\t\t\treturn 0, io.EOF\n\t\t}\n\t\tc.rlock.Unlock()\n\n\t\tc.bb = c.b.b\n\t}\n\tn := copy(p, c.bb)\n\tc.bb = c.bb[n:]\n\n\treturn n, nil\n}\n\nvar (\n\terrWouldBlock = errors.New(\"would block\")\n\terrConnectionClosed = errors.New(\"connection closed\")\n\terrNoDeadlines = errors.New(\"deadline not supported\")\n)\n\nfunc (c *pipeConn) Close() error {\n\tc.wlock.Lock()\n\tif c.wclosed {\n\t\tc.wlock.Unlock()\n\t\treturn errConnectionClosed\n\t}\n\n\tc.wclosed = true\n\tc.w.ch <- nil\n\tc.wlock.Unlock()\n\n\tc.pc.release()\n\treturn nil\n}\n\nfunc (c *pipeConn) release() {\n\tc.rlock.Lock()\n\n\treleaseByteBuffer(c.b)\n\tc.b = nil\n\tc.bb = nil\n\n\tif !c.rclosed {\n\t\tc.rclosed = true\n\t\tfor b := range c.r.ch {\n\t\t\treleaseByteBuffer(b)\n\t\t\tif b == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif c.r != nil {\n\t\treleasePipeChan(c.r)\n\t\tc.r = nil\n\t\tc.w = nil\n\t}\n\n\tc.rlock.Unlock()\n}\n\nfunc (c *pipeConn) LocalAddr() net.Addr {\n\treturn pipeAddr(0)\n}\n\nfunc (c *pipeConn) RemoteAddr() net.Addr {\n\treturn pipeAddr(0)\n}\n\nfunc (c *pipeConn) SetDeadline(t time.Time) error {\n\treturn errNoDeadlines\n}\n\nfunc (c *pipeConn) SetReadDeadline(t time.Time) error {\n\treturn c.SetDeadline(t)\n}\n\nfunc (c *pipeConn) SetWriteDeadline(t time.Time) error {\n\treturn c.SetDeadline(t)\n}\n\ntype pipeAddr int\n\nfunc (pipeAddr) Network() string {\n\treturn \"pipe\"\n}\n\nfunc (pipeAddr) String() string {\n\treturn \"pipe\"\n}\n\ntype byteBuffer struct {\n\tb []byte\n}\n\nfunc acquireByteBuffer() *byteBuffer {\n\treturn byteBufferPool.Get().(*byteBuffer)\n}\n\nfunc releaseByteBuffer(b *byteBuffer) {\n\tif b != nil {\n\t\tbyteBufferPool.Put(b)\n\t}\n}\n\nvar byteBufferPool = &sync.Pool{\n\tNew: func() interface{} {\n\t\treturn &byteBuffer{\n\t\t\tb: make([]byte, 1024),\n\t\t}\n\t},\n}\n\nfunc acquirePipeChan() *pipeChan {\n\tch := pipeChanPool.Get().(*pipeChan)\n\tif len(ch.ch) > 0 {\n\t\tpanic(\"BUG: non-empty pipeChan acquired\")\n\t}\n\treturn ch\n}\n\nfunc releasePipeChan(ch *pipeChan) {\n\tif len(ch.ch) > 0 {\n\t\tpanic(\"BUG: non-empty pipeChan released\")\n\t}\n\tpipeChanPool.Put(ch)\n}\n\nvar pipeChanPool = &sync.Pool{\n\tNew: func() interface{} {\n\t\treturn &pipeChan{\n\t\t\tch: make(chan *byteBuffer, 4),\n\t\t}\n\t},\n}\n\ntype pipeChan struct {\n\tch chan *byteBuffer\n}\n<commit_msg>typo fix<commit_after>package fasthttputil\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ NewPipeConns returns new bi-directonal connection pipe.\nfunc NewPipeConns() *PipeConns {\n\tch1 := acquirePipeChan()\n\tch2 := acquirePipeChan()\n\n\tpc := &PipeConns{}\n\tpc.c1.r = ch1\n\tpc.c1.w = ch2\n\tpc.c2.r = ch2\n\tpc.c2.w = ch1\n\tpc.c1.pc = pc\n\tpc.c2.pc = pc\n\treturn pc\n}\n\n\/\/ PipeConns provides bi-directional connection pipe,\n\/\/ which use in-process memory as a transport.\n\/\/\n\/\/ PipeConns must be created by calling NewPipeConns.\n\/\/\n\/\/ PipeConns has the following additional features comparing to connections\n\/\/ returned from net.Pipe():\n\/\/\n\/\/ * It is faster.\n\/\/ * It buffers Write calls, so there is no need to have concurrent goroutine\n\/\/ calling Read in order to unblock each Write call.\ntype PipeConns struct {\n\tc1 pipeConn\n\tc2 pipeConn\n}\n\n\/\/ Conn1 returns the first end of bi-directional pipe.\n\/\/\n\/\/ Data written to Conn1 may be read from Conn2.\n\/\/ Data written to Conn2 may be read from Conn1.\nfunc (pc *PipeConns) Conn1() net.Conn {\n\treturn &pc.c1\n}\n\n\/\/ Conn2 returns the second end of bi-directional pipe.\n\/\/\n\/\/ Data written to Conn2 may be read from Conn1.\n\/\/ Data written to Conn1 may be read from Conn2.\nfunc (pc *PipeConns) Conn2() net.Conn {\n\treturn &pc.c2\n}\n\nfunc (pc *PipeConns) release() {\n\tpc.c1.wlock.Lock()\n\tpc.c2.wlock.Lock()\n\tmustRelease := pc.c1.wclosed && pc.c2.wclosed\n\tpc.c1.wlock.Unlock()\n\tpc.c2.wlock.Unlock()\n\n\tif mustRelease {\n\t\tpc.c1.release()\n\t\tpc.c2.release()\n\t}\n}\n\ntype pipeConn struct {\n\tr *pipeChan\n\tw *pipeChan\n\tb *byteBuffer\n\tbb []byte\n\n\trlock sync.Mutex\n\trclosed bool\n\n\twlock sync.Mutex\n\twclosed bool\n\n\tpc *PipeConns\n}\n\nfunc (c *pipeConn) Write(p []byte) (int, error) {\n\tb := acquireByteBuffer()\n\tb.b = append(b.b[:0], p...)\n\n\tc.wlock.Lock()\n\tif c.wclosed {\n\t\tc.wlock.Unlock()\n\t\treleaseByteBuffer(b)\n\t\treturn 0, errConnectionClosed\n\t}\n\tc.w.ch <- b\n\tc.wlock.Unlock()\n\n\treturn len(p), nil\n}\n\nfunc (c *pipeConn) Read(p []byte) (int, error) {\n\tmayBlock := true\n\tnn := 0\n\tfor len(p) > 0 {\n\t\tn, err := c.read(p, mayBlock)\n\t\tnn += n\n\t\tif err != nil {\n\t\t\tif !mayBlock && err == errWouldBlock {\n\t\t\t\terr = nil\n\t\t\t}\n\t\t\treturn nn, err\n\t\t}\n\t\tp = p[n:]\n\t\tmayBlock = false\n\t}\n\n\treturn nn, nil\n}\n\nfunc (c *pipeConn) read(p []byte, mayBlock bool) (int, error) {\n\tif len(c.bb) == 0 {\n\t\treleaseByteBuffer(c.b)\n\t\tc.b = nil\n\n\t\tc.rlock.Lock()\n\t\tif c.rclosed {\n\t\t\tc.rlock.Unlock()\n\t\t\treturn 0, io.EOF\n\t\t}\n\n\t\tif mayBlock {\n\t\t\tc.b = <-c.r.ch\n\t\t} else {\n\t\t\tselect {\n\t\t\tcase c.b = <-c.r.ch:\n\t\t\tdefault:\n\t\t\t\tc.rlock.Unlock()\n\t\t\t\treturn 0, errWouldBlock\n\t\t\t}\n\t\t}\n\n\t\tif c.b == nil {\n\t\t\tc.rclosed = true\n\t\t\tc.rlock.Unlock()\n\t\t\treturn 0, io.EOF\n\t\t}\n\t\tc.rlock.Unlock()\n\n\t\tc.bb = c.b.b\n\t}\n\tn := copy(p, c.bb)\n\tc.bb = c.bb[n:]\n\n\treturn n, nil\n}\n\nvar (\n\terrWouldBlock = errors.New(\"would block\")\n\terrConnectionClosed = errors.New(\"connection closed\")\n\terrNoDeadlines = errors.New(\"deadline not supported\")\n)\n\nfunc (c *pipeConn) Close() error {\n\tc.wlock.Lock()\n\tif c.wclosed {\n\t\tc.wlock.Unlock()\n\t\treturn errConnectionClosed\n\t}\n\n\tc.wclosed = true\n\tc.w.ch <- nil\n\tc.wlock.Unlock()\n\n\tc.pc.release()\n\treturn nil\n}\n\nfunc (c *pipeConn) release() {\n\tc.rlock.Lock()\n\n\treleaseByteBuffer(c.b)\n\tc.b = nil\n\tc.bb = nil\n\n\tif !c.rclosed {\n\t\tc.rclosed = true\n\t\tfor b := range c.r.ch {\n\t\t\treleaseByteBuffer(b)\n\t\t\tif b == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif c.r != nil {\n\t\treleasePipeChan(c.r)\n\t\tc.r = nil\n\t\tc.w = nil\n\t}\n\n\tc.rlock.Unlock()\n}\n\nfunc (c *pipeConn) LocalAddr() net.Addr {\n\treturn pipeAddr(0)\n}\n\nfunc (c *pipeConn) RemoteAddr() net.Addr {\n\treturn pipeAddr(0)\n}\n\nfunc (c *pipeConn) SetDeadline(t time.Time) error {\n\treturn errNoDeadlines\n}\n\nfunc (c *pipeConn) SetReadDeadline(t time.Time) error {\n\treturn c.SetDeadline(t)\n}\n\nfunc (c *pipeConn) SetWriteDeadline(t time.Time) error {\n\treturn c.SetDeadline(t)\n}\n\ntype pipeAddr int\n\nfunc (pipeAddr) Network() string {\n\treturn \"pipe\"\n}\n\nfunc (pipeAddr) String() string {\n\treturn \"pipe\"\n}\n\ntype byteBuffer struct {\n\tb []byte\n}\n\nfunc acquireByteBuffer() *byteBuffer {\n\treturn byteBufferPool.Get().(*byteBuffer)\n}\n\nfunc releaseByteBuffer(b *byteBuffer) {\n\tif b != nil {\n\t\tbyteBufferPool.Put(b)\n\t}\n}\n\nvar byteBufferPool = &sync.Pool{\n\tNew: func() interface{} {\n\t\treturn &byteBuffer{\n\t\t\tb: make([]byte, 1024),\n\t\t}\n\t},\n}\n\nfunc acquirePipeChan() *pipeChan {\n\tch := pipeChanPool.Get().(*pipeChan)\n\tif len(ch.ch) > 0 {\n\t\tpanic(\"BUG: non-empty pipeChan acquired\")\n\t}\n\treturn ch\n}\n\nfunc releasePipeChan(ch *pipeChan) {\n\tif len(ch.ch) > 0 {\n\t\tpanic(\"BUG: non-empty pipeChan released\")\n\t}\n\tpipeChanPool.Put(ch)\n}\n\nvar pipeChanPool = &sync.Pool{\n\tNew: func() interface{} {\n\t\treturn &pipeChan{\n\t\t\tch: make(chan *byteBuffer, 4),\n\t\t}\n\t},\n}\n\ntype pipeChan struct {\n\tch chan *byteBuffer\n}\n<|endoftext|>"} {"text":"<commit_before>package release\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/weaveworks\/fluxy\"\n)\n\n\/\/ InmemStore is an in-memory job store.\ntype InmemStore struct {\n\tmtx sync.RWMutex\n\tjobs map[flux.ReleaseID]flux.ReleaseJob\n\toldest time.Duration\n}\n\nvar _ flux.ReleaseJobStore = &InmemStore{}\n\n\/\/ NewInmemStore returns a usable in-mem job store.\nfunc NewInmemStore(oldest time.Duration) *InmemStore {\n\treturn &InmemStore{\n\t\tjobs: map[flux.ReleaseID]flux.ReleaseJob{},\n\t\toldest: oldest,\n\t}\n}\n\n\/\/ GetJob implements JobStore.\nfunc (s *InmemStore) GetJob(inst flux.InstanceID, id flux.ReleaseID) (flux.ReleaseJob, error) {\n\ts.mtx.RLock()\n\tdefer s.mtx.RUnlock()\n\tjob, ok := s.jobs[id]\n\tif !ok && job.Instance == inst {\n\t\treturn flux.ReleaseJob{}, flux.ErrNoSuchReleaseJob\n\t}\n\treturn job, nil\n}\n\n\/\/ PutJob implements JobStore.\nfunc (s *InmemStore) PutJob(inst flux.InstanceID, spec flux.ReleaseJobSpec) (flux.ReleaseID, error) {\n\ts.mtx.Lock()\n\tdefer s.mtx.Unlock()\n\n\tid := flux.NewReleaseID()\n\tfor _, exists := s.jobs[id]; exists; id = flux.NewReleaseID() {\n\t\t\/\/ in case of ID collision\n\t}\n\n\ts.jobs[id] = flux.ReleaseJob{\n\t\tInstance: inst,\n\t\tSpec: spec,\n\t\tID: id,\n\t\tSubmitted: time.Now().UTC(),\n\t}\n\treturn id, nil\n}\n\n\/\/ NextJob implements JobStore.\n\/\/ It returns immediately. If no job is available, ErrNoJobAvailable is returned.\nfunc (s *InmemStore) NextJob() (flux.ReleaseJob, error) {\n\ts.mtx.Lock()\n\tdefer s.mtx.Unlock()\n\n\tvar (\n\t\tcandidate flux.ReleaseJob\n\t\tearliest = time.Now().UTC()\n\t)\n\tfor _, job := range s.jobs {\n\t\tif job.Claimed.IsZero() && job.Submitted.Before(earliest) {\n\t\t\tcandidate = job\n\t\t}\n\t}\n\n\tif candidate.ID == \"\" {\n\t\treturn flux.ReleaseJob{}, flux.ErrNoReleaseJobAvailable\n\t}\n\n\tcandidate.Claimed = time.Now().UTC()\n\ts.jobs[candidate.ID] = candidate\n\treturn candidate, nil\n}\n\n\/\/ UpdateJob implements JobStore.\nfunc (s *InmemStore) UpdateJob(job flux.ReleaseJob) error {\n\ts.mtx.Lock()\n\tdefer s.mtx.Unlock()\n\ts.jobs[job.ID] = job\n\treturn nil\n}\n\nfunc (s *InmemStore) Heartbeat(id flux.ReleaseID) error {\n\ts.mtx.Lock()\n\tdefer s.mtx.Unlock()\n\tjob, ok := s.jobs[id]\n\tif !ok {\n\t\treturn flux.ErrNoSuchReleaseJob\n\t}\n\tjob.Heartbeat = time.Now().UTC()\n\ts.jobs[id] = job\n\treturn nil\n}\n\nfunc (s *InmemStore) GC() error {\n\ts.mtx.Lock()\n\tdefer s.mtx.Unlock()\n\tcutoff := time.Now().UTC().Add(-s.oldest)\n\tfor id, job := range s.jobs {\n\t\tif job.IsFinished() && job.Finished.Before(cutoff) {\n\t\t\tdelete(s.jobs, id)\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Remove release.InmemStore as it is dead code<commit_after><|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage utils\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"go.uber.org\/zap\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n)\n\nconst (\n\tBrokerConfigMapKey = \"bootstrapServers\"\n\tKafkaChannelSeparator = \".\"\n\n\t\/\/ DefaultNumPartitions defines the default number of partitions\n\tDefaultNumPartitions = 1\n\n\t\/\/ DefaultReplicationFactor defines the default number of replications\n\tDefaultReplicationFactor = 1\n\n\tknativeKafkaTopicPrefix = \"knative-messaging-kafka\"\n\n\tDefaultMaxIdleConns = 1000\n\tDefaultMaxIdleConnsPerHost = 100\n)\n\nvar (\n\tfirstKafkaConfigMapCall = true\n)\n\ntype KafkaConfig struct {\n\tBrokers []string\n\tMaxIdleConns int\n\tMaxIdleConnsPerHost int\n}\n\n\/\/ GetKafkaConfig returns the details of the Kafka cluster.\nfunc GetKafkaConfig(configMap map[string]string) (*KafkaConfig, error) {\n\tif len(configMap) == 0 {\n\t\treturn nil, fmt.Errorf(\"missing configuration\")\n\t}\n\n\tconfig := &KafkaConfig{}\n\n\tif brokers, ok := configMap[BrokerConfigMapKey]; ok {\n\t\tbootstrapServers := strings.Split(brokers, \",\")\n\t\tfor _, s := range bootstrapServers {\n\t\t\tif len(s) == 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"empty %s value in configuration\", BrokerConfigMapKey)\n\t\t\t}\n\t\t}\n\t\tconfig.Brokers = bootstrapServers\n\t} else {\n\t\treturn nil, fmt.Errorf(\"missing key %s in configuration\", BrokerConfigMapKey)\n\t}\n\n\tif maxConns, ok := configMap[\"maxIdleConns\"]; ok {\n\t\tmc, err := strconv.Atoi(maxConns)\n\t\tif err != nil {\n\t\t\tconfig.MaxIdleConns = DefaultMaxIdleConns\n\t\t}\n\t\tconfig.MaxIdleConns = mc\n\t} else {\n\t\tconfig.MaxIdleConns = DefaultMaxIdleConns\n\t}\n\tif maxConnsPerHost, ok := configMap[\"maxIdleConnsPerHost\"]; ok {\n\t\tmcph, err := strconv.Atoi(maxConnsPerHost)\n\t\tif err != nil {\n\t\t\tconfig.MaxIdleConnsPerHost = DefaultMaxIdleConnsPerHost\n\t\t}\n\t\tconfig.MaxIdleConnsPerHost = mcph\n\n\t} else {\n\t\tconfig.MaxIdleConnsPerHost = DefaultMaxIdleConnsPerHost\n\t}\n\n\treturn config, nil\n}\n\nfunc TopicName(separator, namespace, name string) string {\n\ttopic := []string{knativeKafkaTopicPrefix, namespace, name}\n\treturn strings.Join(topic, separator)\n}\n\n\/\/ We skip the first call into KafkaConfigMapObserver because it is not an indication\n\/\/ of change of the watched ConfigMap but the map's initial state. See the comment for\n\/\/ knative.dev\/pkg\/configmap\/watcher.Start()\nfunc KafkaConfigMapObserver(logger *zap.SugaredLogger) func(configMap *corev1.ConfigMap) {\n\treturn func(kafkaConfigMap *corev1.ConfigMap) {\n\t\tif firstKafkaConfigMapCall {\n\t\t\tfirstKafkaConfigMapCall = false\n\t\t} else {\n\t\t\tlogger.Info(\"Kafka broker configuration updated, restarting\")\n\t\t\tsyscall.Kill(syscall.Getpid(), syscall.SIGINT)\n\t\t}\n\t}\n}\n<commit_msg>Removed unused code (#1234)<commit_after>\/*\nCopyright 2019 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage utils\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\tBrokerConfigMapKey = \"bootstrapServers\"\n\tKafkaChannelSeparator = \".\"\n\n\t\/\/ DefaultNumPartitions defines the default number of partitions\n\tDefaultNumPartitions = 1\n\n\t\/\/ DefaultReplicationFactor defines the default number of replications\n\tDefaultReplicationFactor = 1\n\n\tknativeKafkaTopicPrefix = \"knative-messaging-kafka\"\n\n\tDefaultMaxIdleConns = 1000\n\tDefaultMaxIdleConnsPerHost = 100\n)\n\nvar (\n\tfirstKafkaConfigMapCall = true\n)\n\ntype KafkaConfig struct {\n\tBrokers []string\n\tMaxIdleConns int\n\tMaxIdleConnsPerHost int\n}\n\n\/\/ GetKafkaConfig returns the details of the Kafka cluster.\nfunc GetKafkaConfig(configMap map[string]string) (*KafkaConfig, error) {\n\tif len(configMap) == 0 {\n\t\treturn nil, fmt.Errorf(\"missing configuration\")\n\t}\n\n\tconfig := &KafkaConfig{}\n\n\tif brokers, ok := configMap[BrokerConfigMapKey]; ok {\n\t\tbootstrapServers := strings.Split(brokers, \",\")\n\t\tfor _, s := range bootstrapServers {\n\t\t\tif len(s) == 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"empty %s value in configuration\", BrokerConfigMapKey)\n\t\t\t}\n\t\t}\n\t\tconfig.Brokers = bootstrapServers\n\t} else {\n\t\treturn nil, fmt.Errorf(\"missing key %s in configuration\", BrokerConfigMapKey)\n\t}\n\n\tif maxConns, ok := configMap[\"maxIdleConns\"]; ok {\n\t\tmc, err := strconv.Atoi(maxConns)\n\t\tif err != nil {\n\t\t\tconfig.MaxIdleConns = DefaultMaxIdleConns\n\t\t}\n\t\tconfig.MaxIdleConns = mc\n\t} else {\n\t\tconfig.MaxIdleConns = DefaultMaxIdleConns\n\t}\n\tif maxConnsPerHost, ok := configMap[\"maxIdleConnsPerHost\"]; ok {\n\t\tmcph, err := strconv.Atoi(maxConnsPerHost)\n\t\tif err != nil {\n\t\t\tconfig.MaxIdleConnsPerHost = DefaultMaxIdleConnsPerHost\n\t\t}\n\t\tconfig.MaxIdleConnsPerHost = mcph\n\n\t} else {\n\t\tconfig.MaxIdleConnsPerHost = DefaultMaxIdleConnsPerHost\n\t}\n\n\treturn config, nil\n}\n\nfunc TopicName(separator, namespace, name string) string {\n\ttopic := []string{knativeKafkaTopicPrefix, namespace, name}\n\treturn strings.Join(topic, separator)\n}\n<|endoftext|>"} {"text":"<commit_before>package transit\n\nimport (\n\t\"context\"\n\t\"crypto\/elliptic\"\n\t\"crypto\/x509\"\n\t\"encoding\/base64\"\n\t\"encoding\/pem\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"golang.org\/x\/crypto\/ed25519\"\n\n\t\"github.com\/fatih\/structs\"\n\t\"github.com\/hashicorp\/vault\/helper\/keysutil\"\n\t\"github.com\/hashicorp\/vault\/logical\"\n\t\"github.com\/hashicorp\/vault\/logical\/framework\"\n)\n\nfunc (b *backend) pathListKeys() *framework.Path {\n\treturn &framework.Path{\n\t\tPattern: \"keys\/?$\",\n\n\t\tCallbacks: map[logical.Operation]framework.OperationFunc{\n\t\t\tlogical.ListOperation: b.pathKeysList,\n\t\t},\n\n\t\tHelpSynopsis: pathPolicyHelpSyn,\n\t\tHelpDescription: pathPolicyHelpDesc,\n\t}\n}\n\nfunc (b *backend) pathKeys() *framework.Path {\n\treturn &framework.Path{\n\t\tPattern: \"keys\/\" + framework.GenericNameRegex(\"name\"),\n\t\tFields: map[string]*framework.FieldSchema{\n\t\t\t\"name\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeString,\n\t\t\t\tDescription: \"Name of the key\",\n\t\t\t},\n\n\t\t\t\"type\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeString,\n\t\t\t\tDefault: \"aes256-gcm96\",\n\t\t\t\tDescription: `\nThe type of key to create. Currently, \"aes256-gcm96\" (symmetric), \"ecdsa-p256\"\n(asymmetric), 'ed25519' (asymmetric), 'rsa-2048' (asymmetric), 'rsa-4096'\n(asymmetric) are supported. Defaults to \"aes256-gcm96\".\n`,\n\t\t\t},\n\n\t\t\t\"derived\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeBool,\n\t\t\t\tDescription: `Enables key derivation mode. This\nallows for per-transaction unique\nkeys for encryption operations.`,\n\t\t\t},\n\n\t\t\t\"convergent_encryption\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeBool,\n\t\t\t\tDescription: `Whether to support convergent encryption.\nThis is only supported when using a key with\nkey derivation enabled and will require all\nrequests to carry both a context and 96-bit\n(12-byte) nonce. The given nonce will be used\nin place of a randomly generated nonce. As a\nresult, when the same context and nonce are\nsupplied, the same ciphertext is generated. It\nis *very important* when using this mode that\nyou ensure that all nonces are unique for a\ngiven context. Failing to do so will severely\nimpact the ciphertext's security.`,\n\t\t\t},\n\n\t\t\t\"exportable\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeBool,\n\t\t\t\tDescription: `Enables keys to be exportable.\nThis allows for all the valid keys\nin the key ring to be exported.`,\n\t\t\t},\n\n\t\t\t\"allow_plaintext_backup\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeBool,\n\t\t\t\tDescription: `Enables taking a backup of the named\nkey in plaintext format. Once set,\nthis cannot be disabled.`,\n\t\t\t},\n\n\t\t\t\"context\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeString,\n\t\t\t\tDescription: `Base64 encoded context for key derivation.\nWhen reading a key with key derivation enabled,\nif the key type supports public keys, this will\nreturn the public key for the given context.`,\n\t\t\t},\n\t\t},\n\n\t\tCallbacks: map[logical.Operation]framework.OperationFunc{\n\t\t\tlogical.UpdateOperation: b.pathPolicyWrite,\n\t\t\tlogical.DeleteOperation: b.pathPolicyDelete,\n\t\t\tlogical.ReadOperation: b.pathPolicyRead,\n\t\t},\n\n\t\tHelpSynopsis: pathPolicyHelpSyn,\n\t\tHelpDescription: pathPolicyHelpDesc,\n\t}\n}\n\nfunc (b *backend) pathKeysList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {\n\tentries, err := req.Storage.List(ctx, \"policy\/\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn logical.ListResponse(entries), nil\n}\n\nfunc (b *backend) pathPolicyWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {\n\tname := d.Get(\"name\").(string)\n\tderived := d.Get(\"derived\").(bool)\n\tconvergent := d.Get(\"convergent_encryption\").(bool)\n\tkeyType := d.Get(\"type\").(string)\n\texportable := d.Get(\"exportable\").(bool)\n\tallowPlaintextBackup := d.Get(\"allow_plaintext_backup\").(bool)\n\n\tif !derived && convergent {\n\t\treturn logical.ErrorResponse(\"convergent encryption requires derivation to be enabled\"), nil\n\t}\n\n\tpolReq := keysutil.PolicyRequest{\n\t\tStorage: req.Storage,\n\t\tName: name,\n\t\tDerived: derived,\n\t\tConvergent: convergent,\n\t\tExportable: exportable,\n\t\tAllowPlaintextBackup: allowPlaintextBackup,\n\t}\n\tswitch keyType {\n\tcase \"aes256-gcm96\":\n\t\tpolReq.KeyType = keysutil.KeyType_AES256_GCM96\n\tcase \"ecdsa-p256\":\n\t\tpolReq.KeyType = keysutil.KeyType_ECDSA_P256\n\tcase \"ed25519\":\n\t\tpolReq.KeyType = keysutil.KeyType_ED25519\n\tcase \"rsa-2048\":\n\t\tpolReq.KeyType = keysutil.KeyType_RSA2048\n\tcase \"rsa-4096\":\n\t\tpolReq.KeyType = keysutil.KeyType_RSA4096\n\tdefault:\n\t\treturn logical.ErrorResponse(fmt.Sprintf(\"unknown key type %v\", keyType)), logical.ErrInvalidRequest\n\t}\n\n\tp, lock, upserted, err := b.lm.GetPolicyUpsert(ctx, polReq)\n\tif lock != nil {\n\t\tdefer lock.RUnlock()\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif p == nil {\n\t\treturn nil, fmt.Errorf(\"error generating key: returned policy was nil\")\n\t}\n\n\tresp := &logical.Response{}\n\tif !upserted {\n\t\tresp.AddWarning(fmt.Sprintf(\"key %s already existed\", name))\n\t}\n\n\treturn nil, nil\n}\n\n\/\/ Built-in helper type for returning asymmetric keys\ntype asymKey struct {\n\tName string `json:\"name\" structs:\"name\" mapstructure:\"name\"`\n\tPublicKey string `json:\"public_key\" structs:\"public_key\" mapstructure:\"public_key\"`\n\tCreationTime time.Time `json:\"creation_time\" structs:\"creation_time\" mapstructure:\"creation_time\"`\n}\n\nfunc (b *backend) pathPolicyRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {\n\tname := d.Get(\"name\").(string)\n\n\tp, lock, err := b.lm.GetPolicyShared(ctx, req.Storage, name)\n\tif lock != nil {\n\t\tdefer lock.RUnlock()\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif p == nil {\n\t\treturn nil, nil\n\t}\n\n\t\/\/ Return the response\n\tresp := &logical.Response{\n\t\tData: map[string]interface{}{\n\t\t\t\"name\": p.Name,\n\t\t\t\"type\": p.Type.String(),\n\t\t\t\"derived\": p.Derived,\n\t\t\t\"deletion_allowed\": p.DeletionAllowed,\n\t\t\t\"min_decryption_version\": p.MinDecryptionVersion,\n\t\t\t\"min_encryption_version\": p.MinEncryptionVersion,\n\t\t\t\"latest_version\": p.LatestVersion,\n\t\t\t\"exportable\": p.Exportable,\n\t\t\t\"allow_plaintext_backup\": p.AllowPlaintextBackup,\n\t\t\t\"supports_encryption\": p.Type.EncryptionSupported(),\n\t\t\t\"supports_decryption\": p.Type.DecryptionSupported(),\n\t\t\t\"supports_signing\": p.Type.SigningSupported(),\n\t\t\t\"supports_derivation\": p.Type.DerivationSupported(),\n\t\t\t\"backup_info\": p.BackupInfo,\n\t\t\t\"restore_info\": p.RestoreInfo,\n\t\t},\n\t}\n\n\tif p.Derived {\n\t\tswitch p.KDF {\n\t\tcase keysutil.Kdf_hmac_sha256_counter:\n\t\t\tresp.Data[\"kdf\"] = \"hmac-sha256-counter\"\n\t\t\tresp.Data[\"kdf_mode\"] = \"hmac-sha256-counter\"\n\t\tcase keysutil.Kdf_hkdf_sha256:\n\t\t\tresp.Data[\"kdf\"] = \"hkdf_sha256\"\n\t\t}\n\t\tresp.Data[\"convergent_encryption\"] = p.ConvergentEncryption\n\t\tif p.ConvergentEncryption {\n\t\t\tresp.Data[\"convergent_encryption_version\"] = p.ConvergentVersion\n\t\t}\n\t}\n\n\tcontextRaw := d.Get(\"context\").(string)\n\tvar context []byte\n\tif len(contextRaw) != 0 {\n\t\tcontext, err = base64.StdEncoding.DecodeString(contextRaw)\n\t\tif err != nil {\n\t\t\treturn logical.ErrorResponse(\"failed to base64-decode context\"), logical.ErrInvalidRequest\n\t\t}\n\t}\n\n\tswitch p.Type {\n\tcase keysutil.KeyType_AES256_GCM96:\n\t\tretKeys := map[string]int64{}\n\t\tfor k, v := range p.Keys {\n\t\t\tretKeys[k] = v.DeprecatedCreationTime\n\t\t}\n\t\tresp.Data[\"keys\"] = retKeys\n\n\tcase keysutil.KeyType_ECDSA_P256, keysutil.KeyType_ED25519, keysutil.KeyType_RSA2048, keysutil.KeyType_RSA4096:\n\t\tretKeys := map[string]map[string]interface{}{}\n\t\tfor k, v := range p.Keys {\n\t\t\tkey := asymKey{\n\t\t\t\tPublicKey: v.FormattedPublicKey,\n\t\t\t\tCreationTime: v.CreationTime,\n\t\t\t}\n\t\t\tif key.CreationTime.IsZero() {\n\t\t\t\tkey.CreationTime = time.Unix(v.DeprecatedCreationTime, 0)\n\t\t\t}\n\n\t\t\tswitch p.Type {\n\t\t\tcase keysutil.KeyType_ECDSA_P256:\n\t\t\t\tkey.Name = elliptic.P256().Params().Name\n\t\t\tcase keysutil.KeyType_ED25519:\n\t\t\t\tif p.Derived {\n\t\t\t\t\tif len(context) == 0 {\n\t\t\t\t\t\tkey.PublicKey = \"\"\n\t\t\t\t\t} else {\n\t\t\t\t\t\tver, err := strconv.Atoi(k)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn nil, fmt.Errorf(\"invalid version %q: %v\", k, err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tderived, err := p.DeriveKey(context, ver)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn nil, fmt.Errorf(\"failed to derive key to return public component\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tpubKey := ed25519.PrivateKey(derived).Public().(ed25519.PublicKey)\n\t\t\t\t\t\tkey.PublicKey = base64.StdEncoding.EncodeToString(pubKey)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tkey.Name = \"ed25519\"\n\t\t\tcase keysutil.KeyType_RSA2048, keysutil.KeyType_RSA4096:\n\t\t\t\tkey.Name = \"rsa-2048\"\n\t\t\t\tif p.Type == keysutil.KeyType_RSA4096 {\n\t\t\t\t\tkey.Name = \"rsa-4096\"\n\t\t\t\t}\n\n\t\t\t\t\/\/ Encode the RSA public key in PEM format to return over the\n\t\t\t\t\/\/ API\n\t\t\t\tderBytes, err := x509.MarshalPKIXPublicKey(v.RSAKey.Public())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error marshaling RSA public key: %v\", err)\n\t\t\t\t}\n\t\t\t\tpemBlock := &pem.Block{\n\t\t\t\t\tType: \"PUBLIC KEY\",\n\t\t\t\t\tBytes: derBytes,\n\t\t\t\t}\n\t\t\t\tpemBytes := pem.EncodeToMemory(pemBlock)\n\t\t\t\tif pemBytes == nil || len(pemBytes) == 0 {\n\t\t\t\t\treturn nil, fmt.Errorf(\"failed to PEM-encode RSA public key\")\n\t\t\t\t}\n\t\t\t\tkey.PublicKey = string(pemBytes)\n\t\t\t}\n\n\t\t\tretKeys[k] = structs.New(key).Map()\n\t\t}\n\t\tresp.Data[\"keys\"] = retKeys\n\t}\n\n\treturn resp, nil\n}\n\nfunc (b *backend) pathPolicyDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {\n\tname := d.Get(\"name\").(string)\n\n\t\/\/ Delete does its own locking\n\terr := b.lm.DeletePolicy(ctx, req.Storage, name)\n\tif err != nil {\n\t\treturn logical.ErrorResponse(fmt.Sprintf(\"error deleting policy %s: %s\", name, err)), err\n\t}\n\n\treturn nil, nil\n}\n\nconst pathPolicyHelpSyn = `Managed named encryption keys`\n\nconst pathPolicyHelpDesc = `\nThis path is used to manage the named keys that are available.\nDoing a write with no value against a new named key will create\nit using a randomly generated key.\n`\n<commit_msg>Fix auditing for transit keys with backup\/restore info (#3919)<commit_after>package transit\n\nimport (\n\t\"context\"\n\t\"crypto\/elliptic\"\n\t\"crypto\/x509\"\n\t\"encoding\/base64\"\n\t\"encoding\/pem\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"golang.org\/x\/crypto\/ed25519\"\n\n\t\"github.com\/fatih\/structs\"\n\t\"github.com\/hashicorp\/vault\/helper\/keysutil\"\n\t\"github.com\/hashicorp\/vault\/logical\"\n\t\"github.com\/hashicorp\/vault\/logical\/framework\"\n)\n\nfunc (b *backend) pathListKeys() *framework.Path {\n\treturn &framework.Path{\n\t\tPattern: \"keys\/?$\",\n\n\t\tCallbacks: map[logical.Operation]framework.OperationFunc{\n\t\t\tlogical.ListOperation: b.pathKeysList,\n\t\t},\n\n\t\tHelpSynopsis: pathPolicyHelpSyn,\n\t\tHelpDescription: pathPolicyHelpDesc,\n\t}\n}\n\nfunc (b *backend) pathKeys() *framework.Path {\n\treturn &framework.Path{\n\t\tPattern: \"keys\/\" + framework.GenericNameRegex(\"name\"),\n\t\tFields: map[string]*framework.FieldSchema{\n\t\t\t\"name\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeString,\n\t\t\t\tDescription: \"Name of the key\",\n\t\t\t},\n\n\t\t\t\"type\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeString,\n\t\t\t\tDefault: \"aes256-gcm96\",\n\t\t\t\tDescription: `\nThe type of key to create. Currently, \"aes256-gcm96\" (symmetric), \"ecdsa-p256\"\n(asymmetric), 'ed25519' (asymmetric), 'rsa-2048' (asymmetric), 'rsa-4096'\n(asymmetric) are supported. Defaults to \"aes256-gcm96\".\n`,\n\t\t\t},\n\n\t\t\t\"derived\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeBool,\n\t\t\t\tDescription: `Enables key derivation mode. This\nallows for per-transaction unique\nkeys for encryption operations.`,\n\t\t\t},\n\n\t\t\t\"convergent_encryption\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeBool,\n\t\t\t\tDescription: `Whether to support convergent encryption.\nThis is only supported when using a key with\nkey derivation enabled and will require all\nrequests to carry both a context and 96-bit\n(12-byte) nonce. The given nonce will be used\nin place of a randomly generated nonce. As a\nresult, when the same context and nonce are\nsupplied, the same ciphertext is generated. It\nis *very important* when using this mode that\nyou ensure that all nonces are unique for a\ngiven context. Failing to do so will severely\nimpact the ciphertext's security.`,\n\t\t\t},\n\n\t\t\t\"exportable\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeBool,\n\t\t\t\tDescription: `Enables keys to be exportable.\nThis allows for all the valid keys\nin the key ring to be exported.`,\n\t\t\t},\n\n\t\t\t\"allow_plaintext_backup\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeBool,\n\t\t\t\tDescription: `Enables taking a backup of the named\nkey in plaintext format. Once set,\nthis cannot be disabled.`,\n\t\t\t},\n\n\t\t\t\"context\": &framework.FieldSchema{\n\t\t\t\tType: framework.TypeString,\n\t\t\t\tDescription: `Base64 encoded context for key derivation.\nWhen reading a key with key derivation enabled,\nif the key type supports public keys, this will\nreturn the public key for the given context.`,\n\t\t\t},\n\t\t},\n\n\t\tCallbacks: map[logical.Operation]framework.OperationFunc{\n\t\t\tlogical.UpdateOperation: b.pathPolicyWrite,\n\t\t\tlogical.DeleteOperation: b.pathPolicyDelete,\n\t\t\tlogical.ReadOperation: b.pathPolicyRead,\n\t\t},\n\n\t\tHelpSynopsis: pathPolicyHelpSyn,\n\t\tHelpDescription: pathPolicyHelpDesc,\n\t}\n}\n\nfunc (b *backend) pathKeysList(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {\n\tentries, err := req.Storage.List(ctx, \"policy\/\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn logical.ListResponse(entries), nil\n}\n\nfunc (b *backend) pathPolicyWrite(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {\n\tname := d.Get(\"name\").(string)\n\tderived := d.Get(\"derived\").(bool)\n\tconvergent := d.Get(\"convergent_encryption\").(bool)\n\tkeyType := d.Get(\"type\").(string)\n\texportable := d.Get(\"exportable\").(bool)\n\tallowPlaintextBackup := d.Get(\"allow_plaintext_backup\").(bool)\n\n\tif !derived && convergent {\n\t\treturn logical.ErrorResponse(\"convergent encryption requires derivation to be enabled\"), nil\n\t}\n\n\tpolReq := keysutil.PolicyRequest{\n\t\tStorage: req.Storage,\n\t\tName: name,\n\t\tDerived: derived,\n\t\tConvergent: convergent,\n\t\tExportable: exportable,\n\t\tAllowPlaintextBackup: allowPlaintextBackup,\n\t}\n\tswitch keyType {\n\tcase \"aes256-gcm96\":\n\t\tpolReq.KeyType = keysutil.KeyType_AES256_GCM96\n\tcase \"ecdsa-p256\":\n\t\tpolReq.KeyType = keysutil.KeyType_ECDSA_P256\n\tcase \"ed25519\":\n\t\tpolReq.KeyType = keysutil.KeyType_ED25519\n\tcase \"rsa-2048\":\n\t\tpolReq.KeyType = keysutil.KeyType_RSA2048\n\tcase \"rsa-4096\":\n\t\tpolReq.KeyType = keysutil.KeyType_RSA4096\n\tdefault:\n\t\treturn logical.ErrorResponse(fmt.Sprintf(\"unknown key type %v\", keyType)), logical.ErrInvalidRequest\n\t}\n\n\tp, lock, upserted, err := b.lm.GetPolicyUpsert(ctx, polReq)\n\tif lock != nil {\n\t\tdefer lock.RUnlock()\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif p == nil {\n\t\treturn nil, fmt.Errorf(\"error generating key: returned policy was nil\")\n\t}\n\n\tresp := &logical.Response{}\n\tif !upserted {\n\t\tresp.AddWarning(fmt.Sprintf(\"key %s already existed\", name))\n\t}\n\n\treturn nil, nil\n}\n\n\/\/ Built-in helper type for returning asymmetric keys\ntype asymKey struct {\n\tName string `json:\"name\" structs:\"name\" mapstructure:\"name\"`\n\tPublicKey string `json:\"public_key\" structs:\"public_key\" mapstructure:\"public_key\"`\n\tCreationTime time.Time `json:\"creation_time\" structs:\"creation_time\" mapstructure:\"creation_time\"`\n}\n\nfunc (b *backend) pathPolicyRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {\n\tname := d.Get(\"name\").(string)\n\n\tp, lock, err := b.lm.GetPolicyShared(ctx, req.Storage, name)\n\tif lock != nil {\n\t\tdefer lock.RUnlock()\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif p == nil {\n\t\treturn nil, nil\n\t}\n\n\t\/\/ Return the response\n\tresp := &logical.Response{\n\t\tData: map[string]interface{}{\n\t\t\t\"name\": p.Name,\n\t\t\t\"type\": p.Type.String(),\n\t\t\t\"derived\": p.Derived,\n\t\t\t\"deletion_allowed\": p.DeletionAllowed,\n\t\t\t\"min_decryption_version\": p.MinDecryptionVersion,\n\t\t\t\"min_encryption_version\": p.MinEncryptionVersion,\n\t\t\t\"latest_version\": p.LatestVersion,\n\t\t\t\"exportable\": p.Exportable,\n\t\t\t\"allow_plaintext_backup\": p.AllowPlaintextBackup,\n\t\t\t\"supports_encryption\": p.Type.EncryptionSupported(),\n\t\t\t\"supports_decryption\": p.Type.DecryptionSupported(),\n\t\t\t\"supports_signing\": p.Type.SigningSupported(),\n\t\t\t\"supports_derivation\": p.Type.DerivationSupported(),\n\t\t},\n\t}\n\n\tif p.BackupInfo != nil {\n\t\tresp.Data[\"backup_info\"] = map[string]interface{}{\n\t\t\t\"time\": p.BackupInfo.Time,\n\t\t\t\"version\": p.BackupInfo.Version,\n\t\t}\n\t}\n\tif p.RestoreInfo != nil {\n\t\tresp.Data[\"restore_info\"] = map[string]interface{}{\n\t\t\t\"time\": p.RestoreInfo.Time,\n\t\t\t\"version\": p.RestoreInfo.Version,\n\t\t}\n\t}\n\n\tif p.Derived {\n\t\tswitch p.KDF {\n\t\tcase keysutil.Kdf_hmac_sha256_counter:\n\t\t\tresp.Data[\"kdf\"] = \"hmac-sha256-counter\"\n\t\t\tresp.Data[\"kdf_mode\"] = \"hmac-sha256-counter\"\n\t\tcase keysutil.Kdf_hkdf_sha256:\n\t\t\tresp.Data[\"kdf\"] = \"hkdf_sha256\"\n\t\t}\n\t\tresp.Data[\"convergent_encryption\"] = p.ConvergentEncryption\n\t\tif p.ConvergentEncryption {\n\t\t\tresp.Data[\"convergent_encryption_version\"] = p.ConvergentVersion\n\t\t}\n\t}\n\n\tcontextRaw := d.Get(\"context\").(string)\n\tvar context []byte\n\tif len(contextRaw) != 0 {\n\t\tcontext, err = base64.StdEncoding.DecodeString(contextRaw)\n\t\tif err != nil {\n\t\t\treturn logical.ErrorResponse(\"failed to base64-decode context\"), logical.ErrInvalidRequest\n\t\t}\n\t}\n\n\tswitch p.Type {\n\tcase keysutil.KeyType_AES256_GCM96:\n\t\tretKeys := map[string]int64{}\n\t\tfor k, v := range p.Keys {\n\t\t\tretKeys[k] = v.DeprecatedCreationTime\n\t\t}\n\t\tresp.Data[\"keys\"] = retKeys\n\n\tcase keysutil.KeyType_ECDSA_P256, keysutil.KeyType_ED25519, keysutil.KeyType_RSA2048, keysutil.KeyType_RSA4096:\n\t\tretKeys := map[string]map[string]interface{}{}\n\t\tfor k, v := range p.Keys {\n\t\t\tkey := asymKey{\n\t\t\t\tPublicKey: v.FormattedPublicKey,\n\t\t\t\tCreationTime: v.CreationTime,\n\t\t\t}\n\t\t\tif key.CreationTime.IsZero() {\n\t\t\t\tkey.CreationTime = time.Unix(v.DeprecatedCreationTime, 0)\n\t\t\t}\n\n\t\t\tswitch p.Type {\n\t\t\tcase keysutil.KeyType_ECDSA_P256:\n\t\t\t\tkey.Name = elliptic.P256().Params().Name\n\t\t\tcase keysutil.KeyType_ED25519:\n\t\t\t\tif p.Derived {\n\t\t\t\t\tif len(context) == 0 {\n\t\t\t\t\t\tkey.PublicKey = \"\"\n\t\t\t\t\t} else {\n\t\t\t\t\t\tver, err := strconv.Atoi(k)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn nil, fmt.Errorf(\"invalid version %q: %v\", k, err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tderived, err := p.DeriveKey(context, ver)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn nil, fmt.Errorf(\"failed to derive key to return public component\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\tpubKey := ed25519.PrivateKey(derived).Public().(ed25519.PublicKey)\n\t\t\t\t\t\tkey.PublicKey = base64.StdEncoding.EncodeToString(pubKey)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tkey.Name = \"ed25519\"\n\t\t\tcase keysutil.KeyType_RSA2048, keysutil.KeyType_RSA4096:\n\t\t\t\tkey.Name = \"rsa-2048\"\n\t\t\t\tif p.Type == keysutil.KeyType_RSA4096 {\n\t\t\t\t\tkey.Name = \"rsa-4096\"\n\t\t\t\t}\n\n\t\t\t\t\/\/ Encode the RSA public key in PEM format to return over the\n\t\t\t\t\/\/ API\n\t\t\t\tderBytes, err := x509.MarshalPKIXPublicKey(v.RSAKey.Public())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, fmt.Errorf(\"error marshaling RSA public key: %v\", err)\n\t\t\t\t}\n\t\t\t\tpemBlock := &pem.Block{\n\t\t\t\t\tType: \"PUBLIC KEY\",\n\t\t\t\t\tBytes: derBytes,\n\t\t\t\t}\n\t\t\t\tpemBytes := pem.EncodeToMemory(pemBlock)\n\t\t\t\tif pemBytes == nil || len(pemBytes) == 0 {\n\t\t\t\t\treturn nil, fmt.Errorf(\"failed to PEM-encode RSA public key\")\n\t\t\t\t}\n\t\t\t\tkey.PublicKey = string(pemBytes)\n\t\t\t}\n\n\t\t\tretKeys[k] = structs.New(key).Map()\n\t\t}\n\t\tresp.Data[\"keys\"] = retKeys\n\t}\n\n\treturn resp, nil\n}\n\nfunc (b *backend) pathPolicyDelete(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {\n\tname := d.Get(\"name\").(string)\n\n\t\/\/ Delete does its own locking\n\terr := b.lm.DeletePolicy(ctx, req.Storage, name)\n\tif err != nil {\n\t\treturn logical.ErrorResponse(fmt.Sprintf(\"error deleting policy %s: %s\", name, err)), err\n\t}\n\n\treturn nil, nil\n}\n\nconst pathPolicyHelpSyn = `Managed named encryption keys`\n\nconst pathPolicyHelpDesc = `\nThis path is used to manage the named keys that are available.\nDoing a write with no value against a new named key will create\nit using a randomly generated key.\n`\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright 2016 Padduck, LLC\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n \thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage routing\n\nimport (\n\t\"github.com\/braintree\/manners\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/pufferpanel\/pufferd\/httphandlers\"\n\t\"github.com\/pufferpanel\/pufferd\/logging\"\n\t\"github.com\/pufferpanel\/pufferd\/programs\"\n\t\"github.com\/pufferpanel\/pufferd\/utils\"\n)\n\nfunc RegisterRoutes(e *gin.Engine) {\n\te.GET(\"\", func(c *gin.Context) {\n\t\tc.String(200, \"pufferd is running\")\n\t})\n\te.GET(\"\/\", func(c *gin.Context) {\n\t\tc.String(200, \"pufferd is running\")\n\t})\n\te.GET(\"\/templates\", GetTemplates)\n\te.GET(\"_shutdown\", httphandlers.OAuth2Handler, Shutdown)\n}\n\nfunc Shutdown(c *gin.Context) {\n\tif !hasScope(c, \"node.stop\") {\n\t\tc.AbortWithStatus(401)\n\t\treturn\n\t}\n\n\tfor _, element := range programs.GetAll() {\n\t\trunning := element.IsRunning()\n\t\tif running {\n\t\t\tlogging.Info(\"Stopping program \" + element.Id())\n\t\t\telement.Stop()\n\t\t}\n\t}\n\tmanners.Close()\n}\n\nfunc GetTemplates(c *gin.Context) {\n\tc.JSON(200, programs.GetPlugins())\n}\n\nfunc hasScope(gin *gin.Context, scope string) bool {\n\tscopes, _ := gin.Get(\"scopes\")\n\treturn utils.ContainsValue(scopes.([]string), scope)\n}\n<commit_msg>Gin considers \"\" and \/ to be same.... but does not treat them the same<commit_after>\/*\n Copyright 2016 Padduck, LLC\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n \thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage routing\n\nimport (\n\t\"github.com\/braintree\/manners\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/pufferpanel\/pufferd\/httphandlers\"\n\t\"github.com\/pufferpanel\/pufferd\/logging\"\n\t\"github.com\/pufferpanel\/pufferd\/programs\"\n\t\"github.com\/pufferpanel\/pufferd\/utils\"\n)\n\nfunc RegisterRoutes(e *gin.Engine) {\n\te.GET(\"\", func(c *gin.Context) {\n\t\tc.String(200, \"pufferd is running\")\n\t})\n\te.GET(\"\/templates\", GetTemplates)\n\te.GET(\"_shutdown\", httphandlers.OAuth2Handler, Shutdown)\n}\n\nfunc Shutdown(c *gin.Context) {\n\tif !hasScope(c, \"node.stop\") {\n\t\tc.AbortWithStatus(401)\n\t\treturn\n\t}\n\n\tfor _, element := range programs.GetAll() {\n\t\trunning := element.IsRunning()\n\t\tif running {\n\t\t\tlogging.Info(\"Stopping program \" + element.Id())\n\t\t\telement.Stop()\n\t\t}\n\t}\n\tmanners.Close()\n}\n\nfunc GetTemplates(c *gin.Context) {\n\tc.JSON(200, programs.GetPlugins())\n}\n\nfunc hasScope(gin *gin.Context, scope string) bool {\n\tscopes, _ := gin.Get(\"scopes\")\n\treturn utils.ContainsValue(scopes.([]string), scope)\n}\n<|endoftext|>"} {"text":"<commit_before>package db\n\nimport \"github.com\/pkg\/errors\"\n\nfunc (vdb *TemporaryEmail) LoadByUserIDAndConfirmationKey(tx *Tx, userID, confirmationKey string) error {\n\tstmt := getStmtBuf()\n\tdefer releaseStmtBuf(stmt)\n\n\tstmt.WriteString(`SELECT `)\n\tstmt.WriteString(TemporaryEmailStdSelectColumns)\n\tstmt.WriteString(` FROM `)\n\tstmt.WriteString(TemporaryEmailTable)\n\tstmt.WriteString(` WHERE user_id = ? AND confirmation_key = ?`)\n\n\trow := tx.QueryRow(stmt.String(), userID, confirmationKey)\n\tif err := vdb.Scan(row); err != nil {\n\t\treturn errors.Wrap(err, \"failed to execute query\")\n\t}\n\n\treturn nil\n}\n\nfunc (vdb *TemporaryEmail) Upsert(tx *Tx) error {\n\tstmt := getStmtBuf()\n\tdefer releaseStmtBuf(stmt)\n\n\tstmt.WriteString(`INSERT INTO `)\n\tstmt.WriteString(TemporaryEmailTable)\n\tstmt.WriteString(` (user_id, confirmation_key, email, expires_on) VALUES (?, ?, ?, ?) `)\n\tstmt.WriteString(` ON DUPLICATE KEY UPDATE confirmation_key = VALUES(confirmation_key), expires_on = VALUES(expires_on)`)\n\n\trow := tx.QueryRow(stmt.String(), vdb.UserID, vdb.ConfirmationKey, vdb.Email, vdb.ExpiresOn)\n\tif err := vdb.Scan(row); err != nil {\n\t\treturn errors.Wrap(err, \"failed to execute query\")\n\t}\n\n\treturn nil\n}\n<commit_msg>oops, this was an insert operation<commit_after>package db\n\nimport \"github.com\/pkg\/errors\"\n\nfunc (vdb *TemporaryEmail) LoadByUserIDAndConfirmationKey(tx *Tx, userID, confirmationKey string) error {\n\tstmt := getStmtBuf()\n\tdefer releaseStmtBuf(stmt)\n\n\tstmt.WriteString(`SELECT `)\n\tstmt.WriteString(TemporaryEmailStdSelectColumns)\n\tstmt.WriteString(` FROM `)\n\tstmt.WriteString(TemporaryEmailTable)\n\tstmt.WriteString(` WHERE user_id = ? AND confirmation_key = ?`)\n\n\trow := tx.QueryRow(stmt.String(), userID, confirmationKey)\n\tif err := vdb.Scan(row); err != nil {\n\t\treturn errors.Wrap(err, \"failed to execute query\")\n\t}\n\n\treturn nil\n}\n\nfunc (vdb *TemporaryEmail) Upsert(tx *Tx) error {\n\tstmt := getStmtBuf()\n\tdefer releaseStmtBuf(stmt)\n\n\tstmt.WriteString(`INSERT INTO `)\n\tstmt.WriteString(TemporaryEmailTable)\n\tstmt.WriteString(` (user_id, confirmation_key, email, expires_on) VALUES (?, ?, ?, ?) `)\n\tstmt.WriteString(` ON DUPLICATE KEY UPDATE confirmation_key = VALUES(confirmation_key), expires_on = VALUES(expires_on)`)\n\n\tresult, err := tx.Exec(stmt.String(), vdb.UserID, vdb.ConfirmationKey, vdb.Email, vdb.ExpiresOn)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlii, err := result.LastInsertId()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvdb.OID = lii\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The NorthShore Authors All rights reserved.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage blueprint\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/boltdb\/bolt\"\n\n\t\"github.com\/Mirantis\/northshore\/store\"\n\t\"github.com\/docker\/engine-api\/client\"\n\t\"github.com\/docker\/engine-api\/types\"\n\t\"github.com\/docker\/engine-api\/types\/container\"\n\t\"github.com\/docker\/go-connections\/nat\"\n\t\"github.com\/satori\/go.uuid\"\n\t\"github.com\/spf13\/viper\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ Stage represents a Blueprint Stage\ntype Stage struct {\n\t\/\/Docker image for bootstrap stage\n\tImage string `json:\"image\"`\n\tDescription string `json:\"description\"`\n\t\/\/Ports for exposing to host\n\tPorts []map[string]string `json:\"ports\"`\n\t\/\/Environment variables\n\tVariables map[string]string `json:\"variables\"`\n\t\/\/Provisioner type (docker\/...)\n\tProvisioner string `json:\"provisioner\"`\n\t\/\/ State is current Blueprint status\n\tState StageState `json:\"state\"`\n}\n\n\/\/ Blueprint represents a Blueprint\ntype Blueprint struct {\n\t\/\/API version for processing blueprint\n\tVersion string `json:\"version\"`\n\t\/\/Type of blueprint (pipeline\/application)\n\tType string `json:\"type\"`\n\tName string `json:\"name\"`\n\tStages map[string]Stage `json:\"stages\"`\n\t\/\/ State is current Blueprint status\n\tState State `json:\"state\"`\n\tID uuid.UUID `json:\"id\"`\n}\n\n\/\/ State represents a state of the Blueprint\ntype State string\n\n\/\/ StageState represents a state of the Stage\ntype StageState string\n\nconst (\n\t\/\/ StateNew is default state of the Blueprint\n\tStateNew State = \"new\"\n\t\/\/ StateProvision is the Blueprint status while provisioning\n\tStateProvision State = \"provision\"\n\t\/\/ StateActive is the Blueprint status when all Stages are up and ready\n\tStateActive State = \"active\"\n\t\/\/ StateInactive is the Blueprint status when some Stage is down\n\tStateInactive State = \"inactive\"\n)\n\nconst (\n\t\/\/ StageStateNew is default state of the Stage\n\tStageStateNew StageState = \"new\"\n\t\/\/ StageStateCreated indicates that container is created\n\tStageStateCreated StageState = \"created\"\n\t\/\/ StageStateRunning indicates that container is running\n\tStageStateRunning StageState = \"running\"\n\t\/\/ StageStatePaused indicates that container is paused\n\tStageStatePaused StageState = \"paused\"\n\t\/\/ StageStateStopped indicates that container is stopped\n\tStageStateStopped StageState = \"stopped\"\n\t\/\/ StageStateDeleted indicates that container is deleted\n\tStageStateDeleted StageState = \"deleted\"\n)\n\nconst (\n\t\/\/ DBBucketWatcher defines boltdb bucket for Watcher\n\tDBBucketWatcher = \"Northshore\"\n\n\t\/\/ DBKeyWatcher defines boltdb key for Watcher\n\tDBKeyWatcher = \"containers\"\n\n\t\/\/ DBBucket defines boltdb bucket for blueprints\n\tDBBucket = \"blueprints\"\n)\n\n\/\/ GetID implements `api2go.MarshalIdentifier` interface\nfunc (bp Blueprint) GetID() string {\n\treturn bp.ID.String()\n}\n\n\/\/ SetID implements `api2go.UnmarshalIdentifier` interface\nfunc (bp *Blueprint) SetID(id string) (err error) {\n\tbp.ID, err = uuid.FromString(id)\n\treturn\n}\n\n\/\/ Run creates and starts Docker containers\nfunc (bp *Blueprint) Run() {\n\tcli, err := client.NewEnvClient()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tids := []string{}\n\n\tfor name, stage := range bp.Stages {\n\t\tbindings := make(map[nat.Port][]nat.PortBinding)\n\t\tfor _, ports := range stage.Ports {\n\t\t\tport, _ := nat.NewPort(\"tcp\", ports[\"fromPort\"])\n\t\t\tbindings[port] = []nat.PortBinding{nat.PortBinding{HostIP: \"0.0.0.0\", HostPort: ports[\"toPort\"]}}\n\t\t}\n\n\t\thostConfig := container.HostConfig{\n\t\t\tPortBindings: bindings,\n\t\t}\n\n\t\tconfig := container.Config{\n\t\t\tImage: bp.Stages[name].Image,\n\t\t}\n\t\tlog.Printf(\"%s -> Config was built.\", name)\n\n\t\tr, err := cli.ContainerCreate(context.Background(), &config, &hostConfig, nil, name)\n\t\tif err != nil && strings.Contains(err.Error(), \"No such image\") {\n\t\t\tlog.Println(err)\n\t\t\tlog.Println(\"Start pulling process...\")\n\t\t\trc, e := cli.ImagePull(context.Background(), config.Image, types.ImagePullOptions{})\n\t\t\tif e != nil {\n\t\t\t\tlog.Println(e)\n\t\t\t}\n\t\t\t\/\/TODO: add pretty print of pulling process\n\t\t\t_, re := ioutil.ReadAll(rc)\n\t\t\tif re != nil {\n\t\t\t\tlog.Println(re)\n\t\t\t}\n\t\t\trc.Close()\n\t\t\tr, err = cli.ContainerCreate(context.Background(), &config, &hostConfig, nil, name)\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Printf(\"%s -> Container was created.\", name)\n\t\tids = append(ids, r.ID)\n\n\t\terr = cli.ContainerStart(\n\t\t\tcontext.Background(),\n\t\t\tr.ID,\n\t\t\ttypes.ContainerStartOptions{})\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Printf(\"%s -> Container was started.\", name)\n\t\tlog.Printf(\"%s -> Container ID %s\", name, r.ID)\n\t\tlog.Printf(\"%s -> Warnings: %s\", name, r.Warnings)\n\t}\n\tif len(ids) > 0 {\n\t\t\/\/Update list of containers in DB\n\t\t\/\/TODO add ability to add one container\n\t\tres := strings.Join(ids[:], \",\")\n\t\tstore.Save([]byte(DBBucketWatcher), []byte(DBKeyWatcher), res)\n\t}\n}\n\n\/\/ Delete deletes blueprint with containers\nfunc (bp *Blueprint) Delete() error {\n\t\/\/ TODO: stop and remove containers\n\tlog.Debugln(\"#blueprint,#DeleteBlueprint\")\n\n\treturn store.Delete([]byte(DBBucket), []byte(bp.ID.String()))\n}\n\n\/\/ Save stores blueprint in db\nfunc (bp *Blueprint) Save() error {\n\tbp.updateStates()\n\tzerouuid := uuid.UUID{}\n\tif bp.ID == zerouuid {\n\t\tbp.ID = uuid.NewV4()\n\t}\n\n\treturn store.Save([]byte(DBBucket), []byte(bp.ID.String()), bp)\n}\n\nfunc (bp *Blueprint) updateStates() {\n\t\/* Set stage as StageStateNew if unknown *\/\n\tfor i, s := range bp.Stages {\n\t\tswitch s.State {\n\t\tcase\n\t\t\tStageStateNew,\n\t\t\tStageStateCreated,\n\t\t\tStageStateRunning,\n\t\t\tStageStatePaused,\n\t\t\tStageStateStopped,\n\t\t\tStageStateDeleted:\n\t\t\tbreak\n\t\tdefault:\n\t\t\ts.State = StageStateNew\n\t\t\tbp.Stages[i] = s\n\t\t}\n\t}\n\n\tbpState := StateNew\n\n\tfor _, s := range bp.Stages {\n\t\tif s.State == StageStateRunning {\n\t\t\tbpState = StateActive\n\t\t\tbreak\n\t\t}\n\t}\nLookProvision:\n\tfor _, s := range bp.Stages {\n\t\tswitch s.State {\n\t\tcase\n\t\t\tStageStateNew:\n\t\t\tif bpState == StateActive {\n\t\t\t\tbpState = StateProvision\n\t\t\t\tbreak LookProvision\n\t\t\t}\n\t\tcase\n\t\t\tStageStateCreated:\n\t\t\tbpState = StateProvision\n\t\t\tbreak LookProvision\n\t\t}\n\t}\nLookInactive:\n\tfor _, s := range bp.Stages {\n\t\tswitch s.State {\n\t\tcase\n\t\t\tStageStateDeleted,\n\t\t\tStageStatePaused,\n\t\t\tStageStateStopped:\n\t\t\tbpState = StateInactive\n\t\t\tbreak LookInactive\n\t\t}\n\t}\n\n\tbp.State = bpState\n}\n\n\/\/ ParseFile parses and validates the incoming data\nfunc ParseFile(path string) (bp Blueprint, err error) {\n\tv := viper.New()\n\tv.SetConfigFile(path)\n\terr = v.ReadInConfig()\n\tif err != nil {\n\t\treturn bp, fmt.Errorf(\"Config not found. %s\", err)\n\t}\n\n\terr = v.Unmarshal(&bp)\n\tif err != nil {\n\t\treturn bp, fmt.Errorf(\"Unable to decode into struct, %v\", err)\n\t}\n\n\treturn bp, nil\n}\n\n\/\/ ParseBytes parses and validates the incoming data\nfunc ParseBytes(b []byte) (bp Blueprint, err error) {\n\tv := viper.New()\n\tv.SetConfigType(\"yaml\")\n\terr = v.ReadConfig(bytes.NewBuffer(b))\n\tif err != nil {\n\t\treturn bp, fmt.Errorf(\"Config not found. %s\", err)\n\t}\n\n\terr = v.Unmarshal(&bp)\n\tif err != nil {\n\t\treturn bp, fmt.Errorf(\"Unable to decode into struct, %v\", err)\n\t}\n\n\treturn bp, nil\n}\n\n\/\/ LoadAll loads stored blueprints\nfunc LoadAll() (items []Blueprint, err error) {\n\tdb := store.OpenDBBucket([]byte(DBBucket))\n\tdefer db.Close()\n\n\terr = db.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(DBBucket))\n\t\titems = make([]Blueprint, 0, b.Stats().KeyN)\n\n\t\tlog.Debugln(\"#blueprint,#LoadAll\", b.Stats().KeyN, len(items))\n\n\t\tb.ForEach(func(k, v []byte) error {\n\t\t\titem := new(Blueprint)\n\t\t\tif err = json.Unmarshal(v, item); err != nil {\n\t\t\t\tlog.Errorln(\"#DB,#LoadBucket\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\titems = append(items, *item)\n\n\t\t\tlog.Debugln(\"#blueprint,#LoadAll,##\", string(k), len(items))\n\t\t\treturn nil\n\t\t})\n\n\t\treturn nil\n\t})\n\n\treturn items, err\n}\n\n\/\/ Blueprints represents Storable collection\ntype Blueprints []Blueprint\n\n\/\/ Bucket implements Storable interface\nfunc (items *Blueprints) Bucket() []byte {\n\treturn []byte(DBBucket)\n}\n\n\/\/ Next implements Storable interface\nfunc (items *Blueprints) Next() interface{} {\n\titem := new(Blueprint)\n\t*items = append(*items, *item)\n\tl := len(*items)\n\treturn &(*items)[l-1]\n}\n\n\/\/ Prepare implements Storable interface\nfunc (items *Blueprints) Prepare(len int) {\n\t*items = make([]Blueprint, 0, len)\n}\n<commit_msg>Add (*Blueprint)UnmarshalJSON to check the typed unmarshaling<commit_after>\/\/ Copyright 2016 The NorthShore Authors All rights reserved.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage blueprint\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/boltdb\/bolt\"\n\n\t\"github.com\/Mirantis\/northshore\/store\"\n\t\"github.com\/docker\/engine-api\/client\"\n\t\"github.com\/docker\/engine-api\/types\"\n\t\"github.com\/docker\/engine-api\/types\/container\"\n\t\"github.com\/docker\/go-connections\/nat\"\n\t\"github.com\/satori\/go.uuid\"\n\t\"github.com\/spf13\/viper\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ Stage represents a Blueprint Stage\ntype Stage struct {\n\t\/\/Docker image for bootstrap stage\n\tImage string `json:\"image\"`\n\tDescription string `json:\"description\"`\n\t\/\/Ports for exposing to host\n\tPorts []map[string]string `json:\"ports\"`\n\t\/\/Environment variables\n\tVariables map[string]string `json:\"variables\"`\n\t\/\/Provisioner type (docker\/...)\n\tProvisioner string `json:\"provisioner\"`\n\t\/\/ State is current Blueprint status\n\tState StageState `json:\"state\"`\n}\n\n\/\/ Blueprint represents a Blueprint\ntype Blueprint struct {\n\t\/\/API version for processing blueprint\n\tVersion string `json:\"version\"`\n\t\/\/Type of blueprint (pipeline\/application)\n\tType string `json:\"type\"`\n\tName string `json:\"name\"`\n\tStages map[string]Stage `json:\"stages\"`\n\t\/\/ State is current Blueprint status\n\tState State `json:\"state\"`\n\tID uuid.UUID `json:\"id\"`\n}\n\n\/\/ State represents a state of the Blueprint\ntype State string\n\n\/\/ StageState represents a state of the Stage\ntype StageState string\n\nconst (\n\t\/\/ StateNew is default state of the Blueprint\n\tStateNew State = \"new\"\n\t\/\/ StateProvision is the Blueprint status while provisioning\n\tStateProvision State = \"provision\"\n\t\/\/ StateActive is the Blueprint status when all Stages are up and ready\n\tStateActive State = \"active\"\n\t\/\/ StateInactive is the Blueprint status when some Stage is down\n\tStateInactive State = \"inactive\"\n)\n\nconst (\n\t\/\/ StageStateNew is default state of the Stage\n\tStageStateNew StageState = \"new\"\n\t\/\/ StageStateCreated indicates that container is created\n\tStageStateCreated StageState = \"created\"\n\t\/\/ StageStateRunning indicates that container is running\n\tStageStateRunning StageState = \"running\"\n\t\/\/ StageStatePaused indicates that container is paused\n\tStageStatePaused StageState = \"paused\"\n\t\/\/ StageStateStopped indicates that container is stopped\n\tStageStateStopped StageState = \"stopped\"\n\t\/\/ StageStateDeleted indicates that container is deleted\n\tStageStateDeleted StageState = \"deleted\"\n)\n\nconst (\n\t\/\/ DBBucketWatcher defines boltdb bucket for Watcher\n\tDBBucketWatcher = \"Northshore\"\n\n\t\/\/ DBKeyWatcher defines boltdb key for Watcher\n\tDBKeyWatcher = \"containers\"\n\n\t\/\/ DBBucket defines boltdb bucket for blueprints\n\tDBBucket = \"blueprints\"\n)\n\n\/\/ GetID implements `api2go.MarshalIdentifier` interface\nfunc (bp Blueprint) GetID() string {\n\treturn bp.ID.String()\n}\n\n\/\/ SetID implements `api2go.UnmarshalIdentifier` interface\nfunc (bp *Blueprint) SetID(id string) (err error) {\n\tbp.ID, err = uuid.FromString(id)\n\treturn\n}\n\n\/\/ Run creates and starts Docker containers\nfunc (bp *Blueprint) Run() {\n\tcli, err := client.NewEnvClient()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tids := []string{}\n\n\tfor name, stage := range bp.Stages {\n\t\tbindings := make(map[nat.Port][]nat.PortBinding)\n\t\tfor _, ports := range stage.Ports {\n\t\t\tport, _ := nat.NewPort(\"tcp\", ports[\"fromPort\"])\n\t\t\tbindings[port] = []nat.PortBinding{nat.PortBinding{HostIP: \"0.0.0.0\", HostPort: ports[\"toPort\"]}}\n\t\t}\n\n\t\thostConfig := container.HostConfig{\n\t\t\tPortBindings: bindings,\n\t\t}\n\n\t\tconfig := container.Config{\n\t\t\tImage: bp.Stages[name].Image,\n\t\t}\n\t\tlog.Printf(\"%s -> Config was built.\", name)\n\n\t\tr, err := cli.ContainerCreate(context.Background(), &config, &hostConfig, nil, name)\n\t\tif err != nil && strings.Contains(err.Error(), \"No such image\") {\n\t\t\tlog.Println(err)\n\t\t\tlog.Println(\"Start pulling process...\")\n\t\t\trc, e := cli.ImagePull(context.Background(), config.Image, types.ImagePullOptions{})\n\t\t\tif e != nil {\n\t\t\t\tlog.Println(e)\n\t\t\t}\n\t\t\t\/\/TODO: add pretty print of pulling process\n\t\t\t_, re := ioutil.ReadAll(rc)\n\t\t\tif re != nil {\n\t\t\t\tlog.Println(re)\n\t\t\t}\n\t\t\trc.Close()\n\t\t\tr, err = cli.ContainerCreate(context.Background(), &config, &hostConfig, nil, name)\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Printf(\"%s -> Container was created.\", name)\n\t\tids = append(ids, r.ID)\n\n\t\terr = cli.ContainerStart(\n\t\t\tcontext.Background(),\n\t\t\tr.ID,\n\t\t\ttypes.ContainerStartOptions{})\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Printf(\"%s -> Container was started.\", name)\n\t\tlog.Printf(\"%s -> Container ID %s\", name, r.ID)\n\t\tlog.Printf(\"%s -> Warnings: %s\", name, r.Warnings)\n\t}\n\tif len(ids) > 0 {\n\t\t\/\/Update list of containers in DB\n\t\t\/\/TODO add ability to add one container\n\t\tres := strings.Join(ids[:], \",\")\n\t\tstore.Save([]byte(DBBucketWatcher), []byte(DBKeyWatcher), res)\n\t}\n}\n\n\/\/ Delete deletes blueprint with containers\nfunc (bp *Blueprint) Delete() error {\n\t\/\/ TODO: stop and remove containers\n\tlog.Debugln(\"#blueprint,#DeleteBlueprint\")\n\n\treturn store.Delete([]byte(DBBucket), []byte(bp.ID.String()))\n}\n\n\/\/ Save stores blueprint in db\nfunc (bp *Blueprint) Save() error {\n\tbp.updateStates()\n\tzerouuid := uuid.UUID{}\n\tif bp.ID == zerouuid {\n\t\tbp.ID = uuid.NewV4()\n\t}\n\n\treturn store.Save([]byte(DBBucket), []byte(bp.ID.String()), bp)\n}\n\nfunc (bp *Blueprint) updateStates() {\n\t\/* Set stage as StageStateNew if unknown *\/\n\tfor i, s := range bp.Stages {\n\t\tswitch s.State {\n\t\tcase\n\t\t\tStageStateNew,\n\t\t\tStageStateCreated,\n\t\t\tStageStateRunning,\n\t\t\tStageStatePaused,\n\t\t\tStageStateStopped,\n\t\t\tStageStateDeleted:\n\t\t\tbreak\n\t\tdefault:\n\t\t\ts.State = StageStateNew\n\t\t\tbp.Stages[i] = s\n\t\t}\n\t}\n\n\tbpState := StateNew\n\n\tfor _, s := range bp.Stages {\n\t\tif s.State == StageStateRunning {\n\t\t\tbpState = StateActive\n\t\t\tbreak\n\t\t}\n\t}\nLookProvision:\n\tfor _, s := range bp.Stages {\n\t\tswitch s.State {\n\t\tcase\n\t\t\tStageStateNew:\n\t\t\tif bpState == StateActive {\n\t\t\t\tbpState = StateProvision\n\t\t\t\tbreak LookProvision\n\t\t\t}\n\t\tcase\n\t\t\tStageStateCreated:\n\t\t\tbpState = StateProvision\n\t\t\tbreak LookProvision\n\t\t}\n\t}\nLookInactive:\n\tfor _, s := range bp.Stages {\n\t\tswitch s.State {\n\t\tcase\n\t\t\tStageStateDeleted,\n\t\t\tStageStatePaused,\n\t\t\tStageStateStopped:\n\t\t\tbpState = StateInactive\n\t\t\tbreak LookInactive\n\t\t}\n\t}\n\n\tbp.State = bpState\n}\n\n\/\/ ParseFile parses and validates the incoming data\nfunc ParseFile(path string) (bp Blueprint, err error) {\n\tv := viper.New()\n\tv.SetConfigFile(path)\n\terr = v.ReadInConfig()\n\tif err != nil {\n\t\treturn bp, fmt.Errorf(\"Config not found. %s\", err)\n\t}\n\n\terr = v.Unmarshal(&bp)\n\tif err != nil {\n\t\treturn bp, fmt.Errorf(\"Unable to decode into struct, %v\", err)\n\t}\n\n\treturn bp, nil\n}\n\n\/\/ ParseBytes parses and validates the incoming data\nfunc ParseBytes(b []byte) (bp Blueprint, err error) {\n\tv := viper.New()\n\tv.SetConfigType(\"yaml\")\n\terr = v.ReadConfig(bytes.NewBuffer(b))\n\tif err != nil {\n\t\treturn bp, fmt.Errorf(\"Config not found. %s\", err)\n\t}\n\n\terr = v.Unmarshal(&bp)\n\tif err != nil {\n\t\treturn bp, fmt.Errorf(\"Unable to decode into struct, %v\", err)\n\t}\n\n\treturn bp, nil\n}\n\n\/\/ LoadAll loads stored blueprints\nfunc LoadAll() (items []Blueprint, err error) {\n\tdb := store.OpenDBBucket([]byte(DBBucket))\n\tdefer db.Close()\n\n\terr = db.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(DBBucket))\n\t\titems = make([]Blueprint, 0, b.Stats().KeyN)\n\n\t\tlog.Debugln(\"#blueprint,#LoadAll\", b.Stats().KeyN, len(items))\n\n\t\tb.ForEach(func(k, v []byte) error {\n\t\t\titem := new(Blueprint)\n\t\t\tif err = json.Unmarshal(v, item); err != nil {\n\t\t\t\tlog.Errorln(\"#DB,#LoadBucket\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\titems = append(items, *item)\n\n\t\t\tlog.Debugln(\"#blueprint,#LoadAll,##\", string(k), len(items))\n\t\t\treturn nil\n\t\t})\n\n\t\treturn nil\n\t})\n\n\treturn items, err\n}\n\n\/\/ Blueprints represents Storable collection\ntype Blueprints []Blueprint\n\n\/\/ Bucket implements Storable interface\nfunc (items *Blueprints) Bucket() []byte {\n\treturn []byte(DBBucket)\n}\n\n\/\/ Next implements Storable interface\nfunc (items *Blueprints) Next() interface{} {\n\titem := new(Blueprint)\n\t*items = append(*items, *item)\n\tl := len(*items)\n\treturn &(*items)[l-1]\n}\n\n\/\/ Prepare implements Storable interface\nfunc (items *Blueprints) Prepare(len int) {\n\t*items = make([]Blueprint, 0, len)\n}\n\n\/\/ Used to avoid recursion in UnmarshalJSON below\n\/\/ Note at http:\/\/attilaolah.eu\/2013\/11\/29\/json-decoding-in-go\/\ntype blueprint Blueprint\n\n\/\/ UnmarshalJSON implements Unmarshaller interface\nfunc (bp *Blueprint) UnmarshalJSON(b []byte) (err error) {\n\tlog.Debugln(\"#Blueprint,#UnmarshalJSON\")\n\t\/\/ return json.Unmarshal(b, bp) \/\/ stack owerflow\n\n\tbuf := blueprint{}\n\tif err = json.Unmarshal(b, &buf); err == nil {\n\t\t*bp = Blueprint(buf)\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package proxy\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\n\t\"github.com\/yryz\/httpproxy\/config\"\n\n\tss \"github.com\/shadowsocks\/shadowsocks-go\/shadowsocks\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\ntype ProxyServer struct {\n\tssServer string\n\tssCipher *ss.Cipher\n}\n\nfunc NewProxyServer() *http.Server {\n\tp := &ProxyServer{\n\t\tssServer: config.Conf.SsServer,\n\t}\n\n\tvar err error\n\tp.ssCipher, err = ss.NewCipher(config.Conf.SsCipher, config.Conf.SsPassword)\n\tif err != nil {\n\t\tpanic(\"init cipher error: \" + err.Error())\n\t}\n\n\treturn &http.Server{\n\t\tAddr: config.Conf.Listen,\n\t\tHandler: p,\n\t}\n}\n\nfunc (p *ProxyServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\thttp.Error(w, fmt.Sprint(err), http.StatusInternalServerError)\n\t\t\tlog.Debugf(\"panic: %v\\n\", err)\n\t\t}\n\t}()\n\n\tif r.Method == \"CONNECT\" {\n\t\tp.HandleConnect(w, r)\n\t} else {\n\t\tp.HandleHttp(w, r)\n\t}\n}\n\n\/\/ 处理HTTPS、HTTP2代理请求\nfunc (p *ProxyServer) HandleConnect(w http.ResponseWriter, r *http.Request) {\n\tlog.Infof(\"%s %s\", r.Method, r.Host)\n\n\thj, _ := w.(http.Hijacker)\n\tconn, _, err := hj.Hijack()\n\tif err != nil {\n\t\tlog.Error(err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tssConn, err := ss.Dial(r.URL.Host, p.ssServer, p.ssCipher.Copy())\n\tif err != nil {\n\t\tlog.Error(\"ss dial: \", err)\n\t\thttp.Error(w, err.Error(), http.StatusBadGateway)\n\t\treturn\n\t}\n\n\tconn.Write([]byte(\"HTTP\/1.1 200 Connection Established\\r\\n\\r\\n\"))\n\n\tgo ss.PipeThenClose(conn, ssConn, nil)\n\tss.PipeThenClose(ssConn, conn, nil)\n}\n\n\/\/ 处理HTTP代理请求\nfunc (p *ProxyServer) HandleHttp(w http.ResponseWriter, r *http.Request) {\n\tlog.Infof(\"%s %s\", r.Method, r.URL)\n\n\t\/\/ ss proxy\n\ttr := http.Transport{\n\t\tDial: func(network, addr string) (net.Conn, error) {\n\t\t\tlog.Infof(\"dial ss %v\/%v\", addr, network)\n\t\t\treturn ss.Dial(addr, p.ssServer, p.ssCipher.Copy())\n\t\t},\n\t}\n\n\t\/\/ transport\n\tresp, err := tr.RoundTrip(r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\tlog.Error(\"request error: \", err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\t\/\/ copy headers\n\tfor k, values := range resp.Header {\n\t\tfor _, v := range values {\n\t\t\tw.Header().Add(k, v)\n\t\t}\n\t}\n\n\t\/\/ copy body\n\tn, err := io.Copy(w, resp.Body)\n\tif err != nil && err != io.EOF {\n\t\tlog.Errorf(\"copy response body error: %v\", err)\n\t}\n\n\tlog.Infof(\"copied %v bytes from %v.\", n, r.Host)\n}\n<commit_msg>修复HTTP 301 跳转HTTPS问题<commit_after>package proxy\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\n\t\"github.com\/yryz\/httpproxy\/config\"\n\n\tss \"github.com\/shadowsocks\/shadowsocks-go\/shadowsocks\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\ntype ProxyServer struct {\n\tssServer string\n\tssCipher *ss.Cipher\n}\n\nfunc NewProxyServer() *http.Server {\n\tp := &ProxyServer{\n\t\tssServer: config.Conf.SsServer,\n\t}\n\n\tvar err error\n\tp.ssCipher, err = ss.NewCipher(config.Conf.SsCipher, config.Conf.SsPassword)\n\tif err != nil {\n\t\tpanic(\"init cipher error: \" + err.Error())\n\t}\n\n\treturn &http.Server{\n\t\tAddr: config.Conf.Listen,\n\t\tHandler: p,\n\t}\n}\n\nfunc (p *ProxyServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\thttp.Error(w, fmt.Sprint(err), http.StatusInternalServerError)\n\t\t\tlog.Debugf(\"panic: %v\\n\", err)\n\t\t}\n\t}()\n\n\tif r.Method == \"CONNECT\" {\n\t\tp.HandleConnect(w, r)\n\t} else {\n\t\tp.HandleHttp(w, r)\n\t}\n}\n\n\/\/ 处理HTTPS、HTTP2代理请求\nfunc (p *ProxyServer) HandleConnect(w http.ResponseWriter, r *http.Request) {\n\tlog.Infof(\"%s %s\", r.Method, r.Host)\n\n\thj, _ := w.(http.Hijacker)\n\tconn, _, err := hj.Hijack()\n\tif err != nil {\n\t\tlog.Error(err)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tssConn, err := ss.Dial(r.URL.Host, p.ssServer, p.ssCipher.Copy())\n\tif err != nil {\n\t\tlog.Error(\"ss dial: \", err)\n\t\thttp.Error(w, err.Error(), http.StatusBadGateway)\n\t\treturn\n\t}\n\n\tconn.Write([]byte(\"HTTP\/1.1 200 Connection Established\\r\\n\\r\\n\"))\n\n\tgo ss.PipeThenClose(conn, ssConn, nil)\n\tss.PipeThenClose(ssConn, conn, nil)\n}\n\n\/\/ 处理HTTP代理请求\nfunc (p *ProxyServer) HandleHttp(w http.ResponseWriter, r *http.Request) {\n\tlog.Infof(\"%s %s\", r.Method, r.URL)\n\n\t\/\/ ss proxy\n\ttr := http.Transport{\n\t\tDial: func(network, addr string) (net.Conn, error) {\n\t\t\tlog.Infof(\"dial ss %v\/%v\", addr, network)\n\t\t\treturn ss.Dial(addr, p.ssServer, p.ssCipher.Copy())\n\t\t},\n\t}\n\n\t\/\/ transport\n\tresp, err := tr.RoundTrip(r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\tlog.Error(\"request error: \", err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\t\/\/ copy headers\n\tfor k, values := range resp.Header {\n\t\tfor _, v := range values {\n\t\t\tw.Header().Add(k, v)\n\t\t}\n\t}\n\tw.WriteHeader(resp.StatusCode)\n\n\t\/\/ copy body\n\tn, err := io.Copy(w, resp.Body)\n\tif err != nil && err != io.EOF {\n\t\tlog.Errorf(\"copy response body error: %v\", err)\n\t}\n\n\tlog.Infof(\"copied %v bytes from %v.\", n, r.Host)\n}\n<|endoftext|>"} {"text":"<commit_before>package app_config\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"github.com\/watermint\/toolbox\/essentials\/log\/es_log\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\nconst (\n\tConfigFileName = \"config.json\"\n)\n\nvar (\n\tErrorValueNotFound = errors.New(\"value not found\")\n)\n\ntype Config interface {\n\tGet(key string) (v interface{}, err error)\n\tPut(key string, v interface{}) (err error)\n\tList() (settings map[string]interface{}, err error)\n}\n\nfunc NewConfig(path string) Config {\n\treturn &configImpl{\n\t\tpath: path,\n\t}\n}\n\ntype configImpl struct {\n\tpath string\n}\n\nfunc (z *configImpl) load() (values map[string]interface{}, err error) {\n\tvalues = make(map[string]interface{})\n\tl := es_log.Default()\n\tp := filepath.Join(z.path, ConfigFileName)\n\n\t_, err = os.Lstat(p)\n\tif err != nil {\n\t\tl.Debug(\"No file information; skip loading\", es_log.Error(err))\n\t\treturn values, nil\n\t}\n\n\tl.Debug(\"load config\", es_log.String(\"path\", p))\n\tb, err := ioutil.ReadFile(p)\n\tif err != nil {\n\t\tl.Debug(\"Unable to read config\", es_log.Error(err))\n\t\treturn\n\t}\n\tif err := json.Unmarshal(b, &values); err != nil {\n\t\tl.Debug(\"unable to unmarshal\", es_log.Error(err))\n\t\treturn values, err\n\t}\n\treturn\n}\n\nfunc (z *configImpl) save(key string, v interface{}) (err error) {\n\tl := es_log.Default()\n\tp := filepath.Join(z.path, ConfigFileName)\n\tl.Debug(\"load config\", es_log.String(\"path\", p))\n\tvalues, err := z.load()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvalues[key] = v\n\n\tb, err := json.Marshal(values)\n\tif err != nil {\n\t\tl.Debug(\"Unable to marshal\", es_log.Error(err))\n\t\treturn err\n\t}\n\tif err := ioutil.WriteFile(p, b, 0644); err != nil {\n\t\tl.Debug(\"Unable to write config\", es_log.Error(err))\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (z *configImpl) Get(key string) (v interface{}, err error) {\n\tif values, err := z.load(); err != nil {\n\t\treturn nil, err\n\t} else if v, ok := values[key]; ok {\n\t\treturn v, nil\n\t} else {\n\t\treturn nil, ErrorValueNotFound\n\t}\n}\n\nfunc (z *configImpl) Put(key string, v interface{}) (err error) {\n\treturn z.save(key, v)\n}\n\nfunc (z *configImpl) List() (settings map[string]interface{}, err error) {\n\treturn z.load()\n}\n<commit_msg>#360 : always load\/save on get\/put<commit_after>package app_config\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"github.com\/watermint\/toolbox\/essentials\/log\/es_log\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\nconst (\n\tConfigFileName = \"config.json\"\n)\n\nvar (\n\tErrorValueNotFound = errors.New(\"value not found\")\n)\n\ntype Config interface {\n\tGet(key string) (v interface{}, err error)\n\tPut(key string, v interface{}) (err error)\n\tList() (settings map[string]interface{}, err error)\n}\n\nfunc NewConfig(path string) Config {\n\treturn &configImpl{\n\t\tpath: path,\n\t}\n}\n\ntype configImpl struct {\n\tpath string\n}\n\nfunc (z configImpl) load() (values map[string]interface{}, err error) {\n\tvalues = make(map[string]interface{})\n\tl := es_log.Default()\n\tp := filepath.Join(z.path, ConfigFileName)\n\n\t_, err = os.Lstat(p)\n\tif err != nil {\n\t\tl.Debug(\"No file information; skip loading\", es_log.Error(err))\n\t\treturn values, nil\n\t}\n\n\tl.Debug(\"load config\", es_log.String(\"path\", p))\n\tb, err := ioutil.ReadFile(p)\n\tif err != nil {\n\t\tl.Debug(\"Unable to read config\", es_log.Error(err))\n\t\treturn\n\t}\n\tif err := json.Unmarshal(b, &values); err != nil {\n\t\tl.Debug(\"unable to unmarshal\", es_log.Error(err))\n\t\treturn values, err\n\t}\n\treturn\n}\n\nfunc (z configImpl) save(key string, v interface{}) (err error) {\n\tl := es_log.Default()\n\tp := filepath.Join(z.path, ConfigFileName)\n\tl.Debug(\"load config\", es_log.String(\"path\", p))\n\tvalues, err := z.load()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvalues[key] = v\n\n\tb, err := json.Marshal(values)\n\tif err != nil {\n\t\tl.Debug(\"Unable to marshal\", es_log.Error(err))\n\t\treturn err\n\t}\n\tif err := ioutil.WriteFile(p, b, 0644); err != nil {\n\t\tl.Debug(\"Unable to write config\", es_log.Error(err))\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (z configImpl) Get(key string) (v interface{}, err error) {\n\tif values, err := z.load(); err != nil {\n\t\treturn nil, err\n\t} else if v, ok := values[key]; ok {\n\t\treturn v, nil\n\t} else {\n\t\treturn nil, ErrorValueNotFound\n\t}\n}\n\nfunc (z configImpl) Put(key string, v interface{}) (err error) {\n\treturn z.save(key, v)\n}\n\nfunc (z configImpl) List() (settings map[string]interface{}, err error) {\n\treturn z.load()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\/\/ Original source: github.com\/micro\/go-micro\/v3\/selector\/random\/random.go\n\npackage random\n\nimport (\n\t\"math\/rand\"\n\n\t\"github.com\/micro\/micro\/v3\/internal\/selector\"\n)\n\ntype random struct{}\n\nfunc (r *random) Select(routes []string, opts ...selector.SelectOption) (selector.Next, error) {\n\t\/\/ we can't select from an empty pool of routes\n\tif len(routes) == 0 {\n\t\treturn nil, selector.ErrNoneAvailable\n\t}\n\n\t\/\/ return the next func\n\treturn func() string {\n\t\t\/\/ if there is only one route provided we'll select it\n\t\tif len(routes) == 1 {\n\t\t\treturn routes[0]\n\t\t}\n\n\t\t\/\/ select a random route from the slice\n\t\treturn routes[rand.Intn(len(routes)-1)]\n\t}, nil\n}\n\nfunc (r *random) Record(addr string, err error) error {\n\treturn nil\n}\n\nfunc (r *random) Reset() error {\n\treturn nil\n}\n\nfunc (r *random) String() string {\n\treturn \"random\"\n}\n\n\/\/ NewSelector returns a random selector\nfunc NewSelector(opts ...selector.Option) selector.Selector {\n\treturn new(random)\n}\n<commit_msg>fix random selector to choose from full range of nodes (#1613)<commit_after>\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\/\/ Original source: github.com\/micro\/go-micro\/v3\/selector\/random\/random.go\n\npackage random\n\nimport (\n\t\"math\/rand\"\n\n\t\"github.com\/micro\/micro\/v3\/internal\/selector\"\n)\n\ntype random struct{}\n\nfunc (r *random) Select(routes []string, opts ...selector.SelectOption) (selector.Next, error) {\n\t\/\/ we can't select from an empty pool of routes\n\tif len(routes) == 0 {\n\t\treturn nil, selector.ErrNoneAvailable\n\t}\n\n\t\/\/ return the next func\n\treturn func() string {\n\t\t\/\/ if there is only one route provided we'll select it\n\t\tif len(routes) == 1 {\n\t\t\treturn routes[0]\n\t\t}\n\n\t\t\/\/ select a random route from the slice\n\t\treturn routes[rand.Intn(len(routes))]\n\t}, nil\n}\n\nfunc (r *random) Record(addr string, err error) error {\n\treturn nil\n}\n\nfunc (r *random) Reset() error {\n\treturn nil\n}\n\nfunc (r *random) String() string {\n\treturn \"random\"\n}\n\n\/\/ NewSelector returns a random selector\nfunc NewSelector(opts ...selector.Option) selector.Selector {\n\treturn new(random)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage media\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"barista.run\/bar\"\n\tdbusWatcher \"barista.run\/base\/watchers\/dbus\"\n\t\"barista.run\/outputs\"\n\ttestBar \"barista.run\/testing\/bar\"\n\t\"barista.run\/timing\"\n\n\t\"github.com\/godbus\/dbus\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc init() {\n\tbusType = dbusWatcher.Test\n}\n\ntype methodCall struct {\n\tname string\n\targ interface{}\n}\n\nfunc TestMedia(t *testing.T) {\n\ttestBar.New(t)\n\tbus := dbusWatcher.SetupTestBus()\n\tsrv := bus.RegisterService(\"org.mpris.MediaPlayer2.testplayer\")\n\tobj := srv.Object(\"\/org\/mpris\/MediaPlayer2\", \"org.mpris.MediaPlayer2.Player\")\n\tobj.SetProperties(map[string]interface{}{\n\t\t\"Position\": 180 * 1000 * 1000,\n\t\t\"PlaybackStatus\": \"Playing\",\n\t\t\"Rate\": 1.0,\n\t\t\"Metadata\": map[string]dbus.Variant{\n\t\t\t\"xesam:title\": dbus.MakeVariant(\"Title\"),\n\t\t\t\"xesam:artist\": dbus.MakeVariant([]string{\"Artist1\", \"Artist2\"}),\n\t\t},\n\t}, dbusWatcher.SignalTypeNone)\n\tcalls := make(chan methodCall, 10)\n\n\tpl := New(\"testplayer\")\n\ttestBar.Run(pl)\n\ttestBar.NextOutput(\"on start\").AssertText([]string{\"3m0s: Title\"})\n\n\tobj.SetProperty(\"PlaybackStatus\", \"Paused\", dbusWatcher.SignalTypeChanged)\n\ttestBar.NextOutput(\"on props change\").AssertText([]string{\"Title\"})\n\n\tobj.SetProperty(\"Metadata\", map[string]dbus.Variant{\n\t\t\"xesam:title\": dbus.MakeVariant(\"foo\"),\n\t}, dbusWatcher.SignalTypeChanged)\n\ttestBar.NextOutput(\"on title change\").AssertText([]string{\"foo\"})\n\n\tsrv1 := bus.RegisterService()\n\tobj = srv1.Object(\"\/org\/mpris\/MediaPlayer2\", \"org.mpris.MediaPlayer2.Player\")\n\tobj.SetProperty(\"PlaybackStatus\", \"Paused\", dbusWatcher.SignalTypeNone)\n\tobj.OnElse(func(method string, args ...interface{}) ([]interface{}, error) {\n\t\tc := methodCall{name: method}\n\t\tif len(args) > 0 {\n\t\t\tc.arg = args[0]\n\t\t}\n\t\tcalls <- c\n\t\treturn nil, nil\n\t})\n\ttestBar.AssertNoOutput(\"On unrelated service changes\")\n\n\tsrv.Unregister()\n\ttestBar.NextOutput(\"on service shutdown\").AssertEmpty()\n\n\tsrv1.AddName(\"org.mpris.MediaPlayer2.testplayer\")\n\ttestBar.NextOutput(\"on service move\").AssertText([]string{\"\"},\n\t\t\"Does not show stale title\")\n\n\tobj.SetProperties(map[string]interface{}{\n\t\t\"PlaybackStatus\": \"Paused\",\n\t\t\"Shuffle\": true,\n\t\t\"Rate\": 1.0,\n\t\t\"Metadata\": map[string]dbus.Variant{\n\t\t\t\"xesam:title\": dbus.MakeVariant(\"Song\"),\n\t\t\t\"mpris:trackid\": dbus.MakeVariant(\"a\"),\n\t\t\t\"mpris:ArtURL\": dbus.MakeVariant(\"file:\/\/\/tmp\/art.webp\"),\n\t\t},\n\t}, dbusWatcher.SignalTypeInvalidated)\n\tout := testBar.NextOutput(\"on properties change\")\n\tout.AssertText([]string{\"Song\"}, \"Still paused\")\n\n\tout.At(0).Click(bar.Event{Button: bar.ButtonLeft})\n\trequire.Equal(t,\n\t\tmethodCall{\"org.mpris.MediaPlayer2.Player.PlayPause\", nil},\n\t\t<-calls, \"On left click\")\n\n\tobj.SetProperty(\"PlaybackStatus\", \"Playing\", dbusWatcher.SignalTypeChanged)\n\tout = testBar.NextOutput(\"on playstate change\")\n\tout.AssertText([]string{\"0s: Song\"})\n\n\tout.At(0).Click(bar.Event{Button: bar.ScrollLeft})\n\tout.At(0).Click(bar.Event{Button: bar.ScrollRight})\n\trequire.Equal(t,\n\t\tmethodCall{\"org.mpris.MediaPlayer2.Player.Seek\", int64(-1000 * 1000)},\n\t\t<-calls, \"On scroll left\")\n\tselect {\n\tcase <-calls:\n\t\trequire.Fail(t, \"Unexpected method call\",\n\t\t\t\"Rate limiter should not allow second call\")\n\tcase <-time.After(50 * time.Millisecond):\n\t}\n\n\tobj.Emit(\"Seeked\", 99*1000*1000)\n\tout = testBar.NextOutput(\"on seek\")\n\tout.AssertText([]string{\"1m39s: Song\"})\n\n\tout.At(0).Click(bar.Event{Button: bar.ButtonBack})\n\trequire.Equal(t,\n\t\tmethodCall{\"org.mpris.MediaPlayer2.Player.Previous\", nil},\n\t\t<-calls, \"On back click\")\n\n\tobj.SetProperty(\"Metadata\", map[string]dbus.Variant{\n\t\t\"xesam:title\": dbus.MakeVariant(\"Title\"),\n\t\t\"xesam:artist\": dbus.MakeVariant([]string{\"Artist1\", \"Artist2\"}),\n\t\t\"xesam:album\": dbus.MakeVariant(\"Album\"),\n\t\t\"mpris:trackid\": dbus.MakeVariant(\"2\"),\n\t}, dbusWatcher.SignalTypeInvalidated)\n\ttestBar.NextOutput(\"on metadata update\").AssertText([]string{\"0s: Title\"})\n\n\tvar lastInfo Info\n\tpl.RepeatingOutput(func(i Info) bar.Output {\n\t\tlastInfo = i\n\t\treturn outputs.Textf(\"[%s, %v] %s - %s\",\n\t\t\ti.PlaybackStatus, i.TruncatedPosition(\"k\"), i.Title, i.Artist)\n\t})\n\tout = testBar.NextOutput(\"on output format change\")\n\tout.AssertText([]string{\"[Playing, 0s] Title - Artist1\"})\n\n\tout.At(0).Click(bar.Event{Button: bar.ButtonForward})\n\trequire.Equal(t,\n\t\tmethodCall{\"org.mpris.MediaPlayer2.Player.Next\", nil},\n\t\t<-calls, \"On click with custom output\")\n\n\tobj.SetProperty(\"Metadata\", map[string]dbus.Variant{\n\t\t\"xesam:title\": dbus.MakeVariant(\"Song\"),\n\t\t\"mpris:trackid\": dbus.MakeVariant(\"3\"),\n\t\t\"xesam:albumArtist\": dbus.MakeVariant([]string{\"Person1\", \"Person2\"}),\n\t\t\"mpris:length\": dbus.MakeVariant(180 * 1000 * 1000),\n\t}, dbusWatcher.SignalTypeInvalidated)\n\tout = testBar.NextOutput(\"on metadata update\")\n\tout.AssertText([]string{\"[Playing, 0s] Song - \"})\n\n\ttiming.AdvanceBy(time.Second)\n\tout = testBar.NextOutput(\"on time passing\")\n\tout.AssertText([]string{\"[Playing, 1s] Song - \"})\n\n\tobj.SetProperty(\"PlaybackStatus\", \"Paused\", dbusWatcher.SignalTypeChanged)\n\tout = testBar.NextOutput(\"on pause\")\n\tout.AssertText([]string{\"[Paused, 1s] Song - \"})\n\n\ttiming.AdvanceBy(time.Second)\n\ttestBar.AssertNoOutput(\"on time passing, but paused\")\n\n\tout.At(0).Click(bar.Event{Button: bar.ScrollDown})\n\trequire.Equal(t,\n\t\tmethodCall{\"org.mpris.MediaPlayer2.Player.Seek\", int64(1000 * 1000)},\n\t\t<-calls, \"On scroll with custom output\")\n\n\tobj.Emit(\"Seeked\", 2*1000*1000)\n\ttestBar.NextOutput(\"on seek\").AssertText([]string{\"[Paused, 2s] Song - \"})\n\n\tobj.SetProperty(\"PlaybackStatus\", \"Stopped\", dbusWatcher.SignalTypeChanged)\n\ttestBar.NextOutput(\"on stop\").AssertText([]string{\"[Stopped, 0s] Song - \"})\n\n\tobj.SetProperty(\"PlaybackStatus\", \"Playing\", dbusWatcher.SignalTypeChanged)\n\ttestBar.NextOutput(\"on play\").AssertText([]string{\"[Playing, 0s] Song - \"})\n\n\tlastInfo.Stop()\n\trequire.Equal(t,\n\t\tmethodCall{\"org.mpris.MediaPlayer2.Player.Stop\", nil},\n\t\t<-calls, \"Info.Stop()\")\n\tlastInfo.Play()\n\trequire.Equal(t,\n\t\tmethodCall{\"org.mpris.MediaPlayer2.Player.Play\", nil},\n\t\t<-calls, \"Info.Stop()\")\n\tlastInfo.Pause()\n\trequire.Equal(t,\n\t\tmethodCall{\"org.mpris.MediaPlayer2.Player.Pause\", nil},\n\t\t<-calls, \"Info.Stop()\")\n\n\trequire.True(t, lastInfo.Connected(), \"Playing is Connected()\")\n\trequire.True(t, lastInfo.Playing(), \"Playing == Playing()\")\n\trequire.False(t, lastInfo.Paused(), \"Playing != Paused()\")\n\trequire.False(t, lastInfo.Stopped(), \"Playing != Stopped()\")\n}\n\nfunc TestDbusLongAndFloats(t *testing.T) {\n\tfor _, tc := range []struct {\n\t\tval interface{}\n\t\tasDouble float64\n\t\tasLong int64\n\t}{\n\t\t{1.1, 1.1, 1},\n\t\t{2, 2.0, 2},\n\t\t{float32(3.3), 3.3, 3},\n\t\t{dbus.MakeVariant(4.4), 4.4, 4},\n\t\t{uint(5), 5.0, 5},\n\t\t{int64(6), 6.0, 6},\n\t\t{int32(7), 7.0, 7},\n\t\t{uint8(8), 8.0, 8},\n\t\t{dbus.MakeVariant(uint(9)), 9.0, 9},\n\t\t{dbus.MakeVariant(int64(10)), 10.0, 10},\n\t\t{dbus.MakeVariant(int32(11)), 11.0, 11},\n\t\t{dbus.MakeVariant(uint8(12)), 12.0, 12},\n\t\t{\"foo\", 0.0, 0},\n\t\t{dbus.MakeVariant(\"baz\"), 0.0, 0},\n\t\t{\"13.34\", 0.0, 0},\n\t\t{dbus.MakeVariant(\"14.45\"), 0.0, 0},\n\t\t{dbus.MakeVariant([]float64{15.8}), 0.0, 0},\n\t} {\n\t\trequire.InDelta(t, tc.asDouble, getDouble(tc.val), 0.001,\n\t\t\t\"getDouble(%v) == %v\", tc.val, tc.asDouble)\n\t\trequire.Equal(t, tc.asLong, getLong(tc.val),\n\t\t\t\"getLong(%v) == %v\", tc.val, tc.asLong)\n\t}\n}\n<commit_msg>Allow -count > 1 for modules\/media test<commit_after>\/\/ Copyright 2018 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage media\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"barista.run\/bar\"\n\tdbusWatcher \"barista.run\/base\/watchers\/dbus\"\n\t\"barista.run\/outputs\"\n\ttestBar \"barista.run\/testing\/bar\"\n\t\"barista.run\/timing\"\n\t\"golang.org\/x\/time\/rate\"\n\n\t\"github.com\/godbus\/dbus\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc init() {\n\tbusType = dbusWatcher.Test\n}\n\ntype methodCall struct {\n\tname string\n\targ interface{}\n}\n\nfunc TestMedia(t *testing.T) {\n\t\/\/ To allow -count >1 to work.\n\tseekLimiter = rate.NewLimiter(rate.Every(50*time.Millisecond), 1)\n\n\ttestBar.New(t)\n\tbus := dbusWatcher.SetupTestBus()\n\tsrv := bus.RegisterService(\"org.mpris.MediaPlayer2.testplayer\")\n\tobj := srv.Object(\"\/org\/mpris\/MediaPlayer2\", \"org.mpris.MediaPlayer2.Player\")\n\tobj.SetProperties(map[string]interface{}{\n\t\t\"Position\": 180 * 1000 * 1000,\n\t\t\"PlaybackStatus\": \"Playing\",\n\t\t\"Rate\": 1.0,\n\t\t\"Metadata\": map[string]dbus.Variant{\n\t\t\t\"xesam:title\": dbus.MakeVariant(\"Title\"),\n\t\t\t\"xesam:artist\": dbus.MakeVariant([]string{\"Artist1\", \"Artist2\"}),\n\t\t},\n\t}, dbusWatcher.SignalTypeNone)\n\tcalls := make(chan methodCall, 10)\n\n\tpl := New(\"testplayer\")\n\ttestBar.Run(pl)\n\ttestBar.NextOutput(\"on start\").AssertText([]string{\"3m0s: Title\"})\n\n\tobj.SetProperty(\"PlaybackStatus\", \"Paused\", dbusWatcher.SignalTypeChanged)\n\ttestBar.NextOutput(\"on props change\").AssertText([]string{\"Title\"})\n\n\tobj.SetProperty(\"Metadata\", map[string]dbus.Variant{\n\t\t\"xesam:title\": dbus.MakeVariant(\"foo\"),\n\t}, dbusWatcher.SignalTypeChanged)\n\ttestBar.NextOutput(\"on title change\").AssertText([]string{\"foo\"})\n\n\tsrv1 := bus.RegisterService()\n\tobj = srv1.Object(\"\/org\/mpris\/MediaPlayer2\", \"org.mpris.MediaPlayer2.Player\")\n\tobj.SetProperty(\"PlaybackStatus\", \"Paused\", dbusWatcher.SignalTypeNone)\n\tobj.OnElse(func(method string, args ...interface{}) ([]interface{}, error) {\n\t\tc := methodCall{name: method}\n\t\tif len(args) > 0 {\n\t\t\tc.arg = args[0]\n\t\t}\n\t\tcalls <- c\n\t\treturn nil, nil\n\t})\n\ttestBar.AssertNoOutput(\"On unrelated service changes\")\n\n\tsrv.Unregister()\n\ttestBar.NextOutput(\"on service shutdown\").AssertEmpty()\n\n\tsrv1.AddName(\"org.mpris.MediaPlayer2.testplayer\")\n\ttestBar.NextOutput(\"on service move\").AssertText([]string{\"\"},\n\t\t\"Does not show stale title\")\n\n\tobj.SetProperties(map[string]interface{}{\n\t\t\"PlaybackStatus\": \"Paused\",\n\t\t\"Shuffle\": true,\n\t\t\"Rate\": 1.0,\n\t\t\"Metadata\": map[string]dbus.Variant{\n\t\t\t\"xesam:title\": dbus.MakeVariant(\"Song\"),\n\t\t\t\"mpris:trackid\": dbus.MakeVariant(\"a\"),\n\t\t\t\"mpris:ArtURL\": dbus.MakeVariant(\"file:\/\/\/tmp\/art.webp\"),\n\t\t},\n\t}, dbusWatcher.SignalTypeInvalidated)\n\tout := testBar.NextOutput(\"on properties change\")\n\tout.AssertText([]string{\"Song\"}, \"Still paused\")\n\n\tout.At(0).Click(bar.Event{Button: bar.ButtonLeft})\n\trequire.Equal(t,\n\t\tmethodCall{\"org.mpris.MediaPlayer2.Player.PlayPause\", nil},\n\t\t<-calls, \"On left click\")\n\n\tobj.SetProperty(\"PlaybackStatus\", \"Playing\", dbusWatcher.SignalTypeChanged)\n\tout = testBar.NextOutput(\"on playstate change\")\n\tout.AssertText([]string{\"0s: Song\"})\n\n\tout.At(0).Click(bar.Event{Button: bar.ScrollLeft})\n\tout.At(0).Click(bar.Event{Button: bar.ScrollRight})\n\trequire.Equal(t,\n\t\tmethodCall{\"org.mpris.MediaPlayer2.Player.Seek\", int64(-1000 * 1000)},\n\t\t<-calls, \"On scroll left\")\n\tselect {\n\tcase <-calls:\n\t\trequire.Fail(t, \"Unexpected method call\",\n\t\t\t\"Rate limiter should not allow second call\")\n\tcase <-time.After(50 * time.Millisecond):\n\t}\n\n\tobj.Emit(\"Seeked\", 99*1000*1000)\n\tout = testBar.NextOutput(\"on seek\")\n\tout.AssertText([]string{\"1m39s: Song\"})\n\n\tout.At(0).Click(bar.Event{Button: bar.ButtonBack})\n\trequire.Equal(t,\n\t\tmethodCall{\"org.mpris.MediaPlayer2.Player.Previous\", nil},\n\t\t<-calls, \"On back click\")\n\n\tobj.SetProperty(\"Metadata\", map[string]dbus.Variant{\n\t\t\"xesam:title\": dbus.MakeVariant(\"Title\"),\n\t\t\"xesam:artist\": dbus.MakeVariant([]string{\"Artist1\", \"Artist2\"}),\n\t\t\"xesam:album\": dbus.MakeVariant(\"Album\"),\n\t\t\"mpris:trackid\": dbus.MakeVariant(\"2\"),\n\t}, dbusWatcher.SignalTypeInvalidated)\n\ttestBar.NextOutput(\"on metadata update\").AssertText([]string{\"0s: Title\"})\n\n\tvar lastInfo Info\n\tpl.RepeatingOutput(func(i Info) bar.Output {\n\t\tlastInfo = i\n\t\treturn outputs.Textf(\"[%s, %v] %s - %s\",\n\t\t\ti.PlaybackStatus, i.TruncatedPosition(\"k\"), i.Title, i.Artist)\n\t})\n\tout = testBar.NextOutput(\"on output format change\")\n\tout.AssertText([]string{\"[Playing, 0s] Title - Artist1\"})\n\n\tout.At(0).Click(bar.Event{Button: bar.ButtonForward})\n\trequire.Equal(t,\n\t\tmethodCall{\"org.mpris.MediaPlayer2.Player.Next\", nil},\n\t\t<-calls, \"On click with custom output\")\n\n\tobj.SetProperty(\"Metadata\", map[string]dbus.Variant{\n\t\t\"xesam:title\": dbus.MakeVariant(\"Song\"),\n\t\t\"mpris:trackid\": dbus.MakeVariant(\"3\"),\n\t\t\"xesam:albumArtist\": dbus.MakeVariant([]string{\"Person1\", \"Person2\"}),\n\t\t\"mpris:length\": dbus.MakeVariant(180 * 1000 * 1000),\n\t}, dbusWatcher.SignalTypeInvalidated)\n\tout = testBar.NextOutput(\"on metadata update\")\n\tout.AssertText([]string{\"[Playing, 0s] Song - \"})\n\n\ttiming.AdvanceBy(time.Second)\n\tout = testBar.NextOutput(\"on time passing\")\n\tout.AssertText([]string{\"[Playing, 1s] Song - \"})\n\n\tobj.SetProperty(\"PlaybackStatus\", \"Paused\", dbusWatcher.SignalTypeChanged)\n\tout = testBar.NextOutput(\"on pause\")\n\tout.AssertText([]string{\"[Paused, 1s] Song - \"})\n\n\ttiming.AdvanceBy(time.Second)\n\ttestBar.AssertNoOutput(\"on time passing, but paused\")\n\n\tout.At(0).Click(bar.Event{Button: bar.ScrollDown})\n\trequire.Equal(t,\n\t\tmethodCall{\"org.mpris.MediaPlayer2.Player.Seek\", int64(1000 * 1000)},\n\t\t<-calls, \"On scroll with custom output\")\n\n\tobj.Emit(\"Seeked\", 2*1000*1000)\n\ttestBar.NextOutput(\"on seek\").AssertText([]string{\"[Paused, 2s] Song - \"})\n\n\tobj.SetProperty(\"PlaybackStatus\", \"Stopped\", dbusWatcher.SignalTypeChanged)\n\ttestBar.NextOutput(\"on stop\").AssertText([]string{\"[Stopped, 0s] Song - \"})\n\n\tobj.SetProperty(\"PlaybackStatus\", \"Playing\", dbusWatcher.SignalTypeChanged)\n\ttestBar.NextOutput(\"on play\").AssertText([]string{\"[Playing, 0s] Song - \"})\n\n\tlastInfo.Stop()\n\trequire.Equal(t,\n\t\tmethodCall{\"org.mpris.MediaPlayer2.Player.Stop\", nil},\n\t\t<-calls, \"Info.Stop()\")\n\tlastInfo.Play()\n\trequire.Equal(t,\n\t\tmethodCall{\"org.mpris.MediaPlayer2.Player.Play\", nil},\n\t\t<-calls, \"Info.Stop()\")\n\tlastInfo.Pause()\n\trequire.Equal(t,\n\t\tmethodCall{\"org.mpris.MediaPlayer2.Player.Pause\", nil},\n\t\t<-calls, \"Info.Stop()\")\n\n\trequire.True(t, lastInfo.Connected(), \"Playing is Connected()\")\n\trequire.True(t, lastInfo.Playing(), \"Playing == Playing()\")\n\trequire.False(t, lastInfo.Paused(), \"Playing != Paused()\")\n\trequire.False(t, lastInfo.Stopped(), \"Playing != Stopped()\")\n}\n\nfunc TestDbusLongAndFloats(t *testing.T) {\n\tfor _, tc := range []struct {\n\t\tval interface{}\n\t\tasDouble float64\n\t\tasLong int64\n\t}{\n\t\t{1.1, 1.1, 1},\n\t\t{2, 2.0, 2},\n\t\t{float32(3.3), 3.3, 3},\n\t\t{dbus.MakeVariant(4.4), 4.4, 4},\n\t\t{uint(5), 5.0, 5},\n\t\t{int64(6), 6.0, 6},\n\t\t{int32(7), 7.0, 7},\n\t\t{uint8(8), 8.0, 8},\n\t\t{dbus.MakeVariant(uint(9)), 9.0, 9},\n\t\t{dbus.MakeVariant(int64(10)), 10.0, 10},\n\t\t{dbus.MakeVariant(int32(11)), 11.0, 11},\n\t\t{dbus.MakeVariant(uint8(12)), 12.0, 12},\n\t\t{\"foo\", 0.0, 0},\n\t\t{dbus.MakeVariant(\"baz\"), 0.0, 0},\n\t\t{\"13.34\", 0.0, 0},\n\t\t{dbus.MakeVariant(\"14.45\"), 0.0, 0},\n\t\t{dbus.MakeVariant([]float64{15.8}), 0.0, 0},\n\t} {\n\t\trequire.InDelta(t, tc.asDouble, getDouble(tc.val), 0.001,\n\t\t\t\"getDouble(%v) == %v\", tc.val, tc.asDouble)\n\t\trequire.Equal(t, tc.asLong, getLong(tc.val),\n\t\t\t\"getLong(%v) == %v\", tc.val, tc.asLong)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/spf13\/cobra\"\n\t\"k8s.io\/kops\/cmd\/kops\/util\"\n\tapi \"k8s.io\/kops\/pkg\/apis\/kops\"\n\t\"k8s.io\/kops\/pkg\/validation\"\n\t\"k8s.io\/kops\/util\/pkg\/tables\"\n\tk8sapi \"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/v1\"\n\tk8s_clientset \"k8s.io\/kubernetes\/pkg\/client\/clientset_generated\/clientset\"\n\t\"k8s.io\/kubernetes\/pkg\/client\/unversioned\/clientcmd\"\n)\n\ntype ValidateClusterOptions struct {\n\t\/\/ No options yet\n}\n\nfunc NewCmdValidateCluster(f *util.Factory, out io.Writer) *cobra.Command {\n\toptions := &ValidateClusterOptions{}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"cluster\",\n\t\t\/\/Aliases: []string{\"cluster\"},\n\t\tShort: \"Validate cluster\",\n\t\tLong: `Validate a kubernetes cluster`,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\terr := RunValidateCluster(f, cmd, args, os.Stdout, options)\n\t\t\tif err != nil {\n\t\t\t\texitWithError(err)\n\t\t\t}\n\t\t},\n\t}\n\n\treturn cmd\n}\n\nfunc RunValidateCluster(f *util.Factory, cmd *cobra.Command, args []string, out io.Writer, options *ValidateClusterOptions) error {\n\terr := rootCommand.ProcessArgs(args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcluster, err := rootCommand.Cluster()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclientSet, err := f.Clientset()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlist, err := clientSet.InstanceGroups(cluster.ObjectMeta.Name).List(k8sapi.ListOptions{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot get InstanceGroups for %q: %v\", cluster.ObjectMeta.Name, err)\n\t}\n\n\tfmt.Fprintf(out, \"Validating cluster %v\\n\\n\", cluster.ObjectMeta.Name)\n\n\tvar instanceGroups []api.InstanceGroup\n\tfor _, ig := range list.Items {\n\t\tinstanceGroups = append(instanceGroups, ig)\n\t\tglog.V(2).Infof(\"instance group: %#v\\n\\n\", ig.Spec)\n\t}\n\n\tif len(instanceGroups) == 0 {\n\t\treturn fmt.Errorf(\"no InstanceGroup objects found\\n\")\n\t}\n\n\t\/\/ TODO: Refactor into util.Factory\n\tcontextName := cluster.ObjectMeta.Name\n\tconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(\n\t\tclientcmd.NewDefaultClientConfigLoadingRules(),\n\t\t&clientcmd.ConfigOverrides{CurrentContext: contextName}).ClientConfig()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Cannot load kubecfg settings for %q: %v\\n\", contextName, err)\n\t}\n\n\tk8sClient, err := k8s_clientset.NewForConfig(config)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Cannot build kube api client for %q: %v\\n\", contextName, err)\n\t}\n\n\tvalidationCluster, validationFailed := validation.ValidateCluster(cluster.ObjectMeta.Name, list, k8sClient)\n\n\tif validationCluster == nil || validationCluster.NodeList == nil || validationCluster.NodeList.Items == nil {\n\t\t\/\/ validationFailed error is already formatted\n\t\treturn validationFailed\n\t}\n\n\tt := &tables.Table{}\n\tt.AddColumn(\"NAME\", func(c api.InstanceGroup) string {\n\t\treturn c.ObjectMeta.Name\n\t})\n\tt.AddColumn(\"ROLE\", func(c api.InstanceGroup) string {\n\t\treturn string(c.Spec.Role)\n\t})\n\tt.AddColumn(\"MACHINETYPE\", func(c api.InstanceGroup) string {\n\t\treturn c.Spec.MachineType\n\t})\n\tt.AddColumn(\"SUBNETS\", func(c api.InstanceGroup) string {\n\t\treturn strings.Join(c.Spec.Subnets, \",\")\n\t})\n\tt.AddColumn(\"MIN\", func(c api.InstanceGroup) string {\n\t\treturn int32PointerToString(c.Spec.MinSize)\n\t})\n\tt.AddColumn(\"MAX\", func(c api.InstanceGroup) string {\n\t\treturn int32PointerToString(c.Spec.MaxSize)\n\t})\n\n\tfmt.Fprintln(out, \"INSTANCE GROUPS\")\n\terr = t.Render(instanceGroups, out, \"NAME\", \"ROLE\", \"MACHINETYPE\", \"MIN\", \"MAX\", \"SUBNETS\")\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot render nodes for %q: %v\", cluster.ObjectMeta.Name, err)\n\t}\n\n\tt = &tables.Table{}\n\n\tt.AddColumn(\"NAME\", func(n v1.Node) string {\n\t\treturn n.Name\n\t})\n\n\tt.AddColumn(\"READY\", func(n v1.Node) v1.ConditionStatus {\n\t\treturn validation.GetNodeConditionStatus(&n)\n\t})\n\n\tt.AddColumn(\"ROLE\", func(n v1.Node) string {\n\t\t\/\/ TODO: Maybe print the instance group role instead?\n\t\t\/\/ TODO: Maybe include the instance group name?\n\t\trole := \"node\"\n\t\tif val, ok := n.ObjectMeta.Labels[api.RoleLabelName]; ok {\n\t\t\trole = val\n\t\t}\n\t\treturn role\n\t})\n\n\tfmt.Fprintln(out, \"\\nNODE STATUS\")\n\terr = t.Render(validationCluster.NodeList.Items, out, \"NAME\", \"ROLE\", \"READY\")\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot render nodes for %q: %v\", cluster.ObjectMeta.Name, err)\n\t}\n\n\tif validationFailed == nil {\n\t\tfmt.Fprintf(out, \"\\nYour cluster %s is ready\\n\", cluster.ObjectMeta.Name)\n\t\treturn nil\n\t} else {\n\t\t\/\/ do we need to print which instance group is not ready?\n\t\t\/\/ nodes are going to be a pain\n\t\tfmt.Fprint(out, \"\\nValidation Failed\\n\")\n\t\tfmt.Fprintf(out, \"Ready Master(s) %d out of %d.\\n\", len(validationCluster.MastersNotReadyArray), validationCluster.MastersCount)\n\t\tfmt.Fprintf(out, \"Ready Node(s) %d out of %d.\\n\", len(validationCluster.NodesNotReadyArray), validationCluster.NodesCount)\n\t\treturn fmt.Errorf(\"Your cluster %s is NOT ready.\\n\", cluster.ObjectMeta.Name)\n\t}\n}\n<commit_msg>Correct variables name<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/spf13\/cobra\"\n\t\"k8s.io\/kops\/cmd\/kops\/util\"\n\tapi \"k8s.io\/kops\/pkg\/apis\/kops\"\n\t\"k8s.io\/kops\/pkg\/validation\"\n\t\"k8s.io\/kops\/util\/pkg\/tables\"\n\tk8sapi \"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/v1\"\n\tk8s_clientset \"k8s.io\/kubernetes\/pkg\/client\/clientset_generated\/clientset\"\n\t\"k8s.io\/kubernetes\/pkg\/client\/unversioned\/clientcmd\"\n)\n\ntype ValidateClusterOptions struct {\n\t\/\/ No options yet\n}\n\nfunc NewCmdValidateCluster(f *util.Factory, out io.Writer) *cobra.Command {\n\toptions := &ValidateClusterOptions{}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"cluster\",\n\t\t\/\/Aliases: []string{\"cluster\"},\n\t\tShort: \"Validate cluster\",\n\t\tLong: `Validate a kubernetes cluster`,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\terr := RunValidateCluster(f, cmd, args, os.Stdout, options)\n\t\t\tif err != nil {\n\t\t\t\texitWithError(err)\n\t\t\t}\n\t\t},\n\t}\n\n\treturn cmd\n}\n\nfunc RunValidateCluster(f *util.Factory, cmd *cobra.Command, args []string, out io.Writer, options *ValidateClusterOptions) error {\n\terr := rootCommand.ProcessArgs(args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcluster, err := rootCommand.Cluster()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclientSet, err := f.Clientset()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlist, err := clientSet.InstanceGroups(cluster.ObjectMeta.Name).List(k8sapi.ListOptions{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot get InstanceGroups for %q: %v\", cluster.ObjectMeta.Name, err)\n\t}\n\n\tfmt.Fprintf(out, \"Validating cluster %v\\n\\n\", cluster.ObjectMeta.Name)\n\n\tvar instanceGroups []api.InstanceGroup\n\tfor _, ig := range list.Items {\n\t\tinstanceGroups = append(instanceGroups, ig)\n\t\tglog.V(2).Infof(\"instance group: %#v\\n\\n\", ig.Spec)\n\t}\n\n\tif len(instanceGroups) == 0 {\n\t\treturn fmt.Errorf(\"no InstanceGroup objects found\\n\")\n\t}\n\n\t\/\/ TODO: Refactor into util.Factory\n\tcontextName := cluster.ObjectMeta.Name\n\tconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(\n\t\tclientcmd.NewDefaultClientConfigLoadingRules(),\n\t\t&clientcmd.ConfigOverrides{CurrentContext: contextName}).ClientConfig()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Cannot load kubecfg settings for %q: %v\\n\", contextName, err)\n\t}\n\n\tk8sClient, err := k8s_clientset.NewForConfig(config)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Cannot build kube api client for %q: %v\\n\", contextName, err)\n\t}\n\n\tvalidationCluster, validationFailed := validation.ValidateCluster(cluster.ObjectMeta.Name, list, k8sClient)\n\n\tif validationCluster == nil || validationCluster.NodeList == nil || validationCluster.NodeList.Items == nil {\n\t\t\/\/ validationFailed error is already formatted\n\t\treturn validationFailed\n\t}\n\n\tt := &tables.Table{}\n\tt.AddColumn(\"NAME\", func(c api.InstanceGroup) string {\n\t\treturn c.ObjectMeta.Name\n\t})\n\tt.AddColumn(\"ROLE\", func(c api.InstanceGroup) string {\n\t\treturn string(c.Spec.Role)\n\t})\n\tt.AddColumn(\"MACHINETYPE\", func(c api.InstanceGroup) string {\n\t\treturn c.Spec.MachineType\n\t})\n\tt.AddColumn(\"SUBNETS\", func(c api.InstanceGroup) string {\n\t\treturn strings.Join(c.Spec.Subnets, \",\")\n\t})\n\tt.AddColumn(\"MIN\", func(c api.InstanceGroup) string {\n\t\treturn int32PointerToString(c.Spec.MinSize)\n\t})\n\tt.AddColumn(\"MAX\", func(c api.InstanceGroup) string {\n\t\treturn int32PointerToString(c.Spec.MaxSize)\n\t})\n\n\tfmt.Fprintln(out, \"INSTANCE GROUPS\")\n\terr = t.Render(instanceGroups, out, \"NAME\", \"ROLE\", \"MACHINETYPE\", \"MIN\", \"MAX\", \"SUBNETS\")\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot render nodes for %q: %v\", cluster.ObjectMeta.Name, err)\n\t}\n\n\tt = &tables.Table{}\n\n\tt.AddColumn(\"NAME\", func(n v1.Node) string {\n\t\treturn n.Name\n\t})\n\n\tt.AddColumn(\"READY\", func(n v1.Node) v1.ConditionStatus {\n\t\treturn validation.GetNodeConditionStatus(&n)\n\t})\n\n\tt.AddColumn(\"ROLE\", func(n v1.Node) string {\n\t\t\/\/ TODO: Maybe print the instance group role instead?\n\t\t\/\/ TODO: Maybe include the instance group name?\n\t\trole := \"node\"\n\t\tif val, ok := n.ObjectMeta.Labels[api.RoleLabelName]; ok {\n\t\t\trole = val\n\t\t}\n\t\treturn role\n\t})\n\n\tfmt.Fprintln(out, \"\\nNODE STATUS\")\n\terr = t.Render(validationCluster.NodeList.Items, out, \"NAME\", \"ROLE\", \"READY\")\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot render nodes for %q: %v\", cluster.ObjectMeta.Name, err)\n\t}\n\n\tif validationFailed == nil {\n\t\tfmt.Fprintf(out, \"\\nYour cluster %s is ready\\n\", cluster.ObjectMeta.Name)\n\t\treturn nil\n\t} else {\n\t\t\/\/ do we need to print which instance group is not ready?\n\t\t\/\/ nodes are going to be a pain\n\t\tfmt.Fprint(out, \"\\nValidation Failed\\n\")\n\t\tfmt.Fprintf(out, \"Ready Master(s) %d out of %d.\\n\", len(validationCluster.MastersReadyArray), validationCluster.MastersCount)\n\t\tfmt.Fprintf(out, \"Ready Node(s) %d out of %d.\\n\", len(validationCluster.NodesReadyArray), validationCluster.NodesCount)\n\t\treturn fmt.Errorf(\"Your cluster %s is NOT ready.\\n\", cluster.ObjectMeta.Name)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"github.com\/mingzhi\/gomath\/stat\/correlation\"\n\t\"github.com\/mingzhi\/gomath\/stat\/desc\"\n\t\"github.com\/mingzhi\/gsl-cgo\/randist\"\n\t. \"github.com\/mingzhi\/popsimu\/cmd\"\n\t\"github.com\/mingzhi\/popsimu\/pop\"\n\t\"github.com\/mingzhi\/seqcor\/calculator\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n)\n\nvar (\n\tncpu int\n\tsampleSize int\n\tinput string\n\toutput string\n)\n\nfunc init() {\n\tconst (\n\t\tdefaultMaxl = 200\n\t)\n\tdefaultNCPU := runtime.NumCPU()\n\n\tflag.IntVar(&ncpu, \"ncpu\", defaultNCPU, \"ncpu\")\n\tflag.IntVar(&sampleSize, \"sample\", 1000, \"sample size of lineages\")\n\tflag.Parse()\n\tinput = flag.Arg(0)\n\toutput = flag.Arg(1)\n\n\truntime.GOMAXPROCS(ncpu)\n}\n\nfunc main() {\n\tconfigChan := read(input)\n\tconfigMap := make(map[int][]pop.Config)\n\tfor cfg := range configChan {\n\t\tconfigMap[cfg.Length] = append(configMap[cfg.Length], cfg)\n\t}\n\n\tvar results []Result\n\tfor seqLen, cfgs := range configMap {\n\t\tcfgChan := make(chan pop.Config)\n\t\tgo func() {\n\t\t\tdefer close(cfgChan)\n\t\t\tfor _, cfg := range cfgs {\n\t\t\t\tcfgChan <- cfg\n\t\t\t}\n\t\t}()\n\t\tres := run(cfgChan, seqLen)\n\t\tresults = append(results, res...)\n\t}\n\n\twrite(output, results)\n}\n\ntype popConfig struct {\n\tp *pop.Pop\n\tc pop.Config\n}\n\nfunc run(configChan chan pop.Config, seqLen int) []Result {\n\tsimResChan := batchSimu(configChan)\n\tcalcChan := calc(simResChan, seqLen)\n\tresults := collect(calcChan)\n\treturn results\n}\n\nfunc batchSimu(configChan chan pop.Config) (resChan chan popConfig) {\n\tnumWorker := runtime.GOMAXPROCS(0)\n\tresChan = make(chan popConfig, numWorker)\n\tdone := make(chan bool)\n\tsimulator := func() {\n\t\tdefer send(done)\n\t\tfor c := range configChan {\n\t\t\tp := simu(c)\n\t\t\tpc := popConfig{p: p, c: c}\n\t\t\tresChan <- pc\n\t\t}\n\t}\n\n\tfor i := 0; i < numWorker; i++ {\n\t\tgo simulator()\n\t}\n\n\tgo func() {\n\t\tdefer close(resChan)\n\t\twait(done, numWorker)\n\t}()\n\n\treturn resChan\n}\n\ntype calculators struct {\n\tks *calculator.Ks\n\tct *calculator.AutoCovFFTW\n\tt2, t3, t4 []float64\n}\n\nfunc (c *calculators) Increment(xs []float64) {\n\tfor i := 0; i < len(xs); i++ {\n\t\tc.ks.Increment(xs[i])\n\t}\n\tc.ct.Increment(xs)\n}\n\nfunc (c *calculators) Append(c2 *calculators) {\n\tc.ks.Append(c2.ks)\n\tc.ct.Append(c2.ct)\n\tc.t2 = append(c.t2, c2.t2...)\n\tc.t3 = append(c.t3, c2.t3...)\n\tc.t4 = append(c.t4, c2.t4...)\n}\n\ntype calcConfig struct {\n\tcfg pop.Config\n\tc *calculators\n}\n\nfunc calc(simResChan chan popConfig, seqLen int) chan calcConfig {\n\tnumWorker := runtime.GOMAXPROCS(0)\n\tcircular := true\n\tdft := correlation.NewFFTW(seqLen, circular)\n\tdone := make(chan bool)\n\n\tcalcChan := make(chan calcConfig, numWorker)\n\tworker := func() {\n\t\tdefer send(done)\n\t\tfor res := range simResChan {\n\t\t\tcc := calcConfig{}\n\t\t\tcc.cfg = res.c\n\n\t\t\tsequences := [][]byte{}\n\t\t\tfor _, g := range res.p.Genomes {\n\t\t\t\tsequences = append(sequences, g.Seq())\n\t\t\t}\n\t\t\tks := calculator.CalcKs(sequences)\n\t\t\tct := calculator.CalcCtFFTW(sequences, &dft)\n\t\t\tt2 := pop.CalcT2(res.p, sampleSize)\n\t\t\tt3 := pop.CalcT3(res.p, sampleSize)\n\t\t\tt4 := pop.CalcT4(res.p, sampleSize)\n\n\t\t\tcc.c = &calculators{}\n\t\t\tcc.c.ks = ks\n\t\t\tcc.c.ct = ct\n\t\t\tcc.c.t2 = t2\n\t\t\tcc.c.t3 = t3\n\t\t\tcc.c.t4 = t4\n\n\t\t\tcalcChan <- cc\n\t\t}\n\t}\n\n\tfor i := 0; i < numWorker; i++ {\n\t\tgo worker()\n\t}\n\n\tgo func() {\n\t\tdefer dft.Close()\n\t\tdefer close(calcChan)\n\t\twait(done, numWorker)\n\t}()\n\n\treturn calcChan\n}\n\nfunc collect(calcChan chan calcConfig) []Result {\n\tm := make(map[pop.Config]*calculators)\n\tvarm := make(map[pop.Config]*desc.Variance)\n\tfor cc := range calcChan {\n\t\tc, found := m[cc.cfg]\n\t\tv, _ := varm[cc.cfg]\n\t\tif !found {\n\t\t\tc = cc.c\n\t\t\tv = desc.NewVariance()\n\t\t} else {\n\t\t\tc.Append(cc.c)\n\t\t}\n\n\t\tv.Increment(c.ks.Mean.GetResult())\n\n\t\tm[cc.cfg] = c\n\t\tvarm[cc.cfg] = v\n\t}\n\n\tvar results []Result\n\tfor cfg, c := range m {\n\t\tres := Result{}\n\t\tres.Config = cfg\n\t\tres.C = createCovResult(c)\n\t\tres.C.KsVar = varm[cfg].GetResult()\n\t\tres.T2 = c.t2\n\t\tres.T3 = c.t3\n\t\tres.T4 = c.t4\n\t\tresults = append(results, res)\n\t}\n\n\treturn results\n}\n\nfunc newPop(c pop.Config, src rand.Source) *pop.Pop {\n\tp := pop.New()\n\tr := rand.New(src)\n\tg := pop.NewRandomPopGenerator(r, c.Size, c.Length, []byte(c.Alphabet))\n\tg.Operate(p)\n\treturn p\n}\n\nfunc generateEvents(p *pop.Pop, sampler pop.Sampler, mutateEvents []*pop.Event, numGen int) chan *pop.Event {\n\tc := make(chan *pop.Event)\n\n\tgo func() {\n\t\tdefer close(c)\n\t\tmutateRate := 0.0\n\t\tfor _, e := range mutateEvents {\n\t\t\tmutateRate += e.Rate\n\t\t}\n\n\t\trng := randist.NewRNG(randist.MT19937_1999)\n\t\tdefer rng.Free()\n\n\t\tfor i := 0; i < numGen; i++ {\n\t\t\t\/\/ reproduction.\n\t\t\tsamplerEvent := &pop.Event{\n\t\t\t\tOps: sampler,\n\t\t\t\tPop: p,\n\t\t\t}\n\n\t\t\tsampler.Start()\n\t\t\tgo func() { c <- samplerEvent }()\n\t\t\tsampler.Wait()\n\n\t\t\tt := sampler.Time(p)\n\t\t\tnum := randist.PoissonRandomInt(rng, mutateRate*t*float64(p.Size()))\n\t\t\tfor j := 0; j < num; j++ {\n\t\t\t\tc <- pop.Emit(mutateEvents)\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn c\n}\n\nfunc simu(c pop.Config) *pop.Pop {\n\tseed := time.Now().UnixNano()\n\tsrc := rand.NewSource(seed)\n\n\tp := newPop(c, src)\n\tr := rand.New(src)\n\n\trng := randist.NewRNG(randist.MT19937_1999)\n\n\tvar sampler pop.Sampler\n\tswitch c.SampleMethod {\n\tcase \"WrightFisher\":\n\t\tsampler = pop.NewWrightFisherSampler(rng)\n\tcase \"LinearSelection\":\n\t\tsampler = pop.NewLinearSelectionSampler(rng)\n\tdefault:\n\t\tsampler = pop.NewMoranSampler(rng)\n\t}\n\n\tmutationEvent := &pop.Event{\n\t\tOps: pop.NewSimpleMutator(r, []byte(c.Alphabet)),\n\t\tPop: p,\n\t\tRate: c.Mutation.Rate * float64(c.Length),\n\t}\n\n\tfMutator := pop.NewFitnessMutator(c.Mutation.Beneficial.S, 0, rng, pop.MutateStep)\n\tbeneficialMutationEvent := &pop.Event{\n\t\tOps: fMutator,\n\t\tPop: p,\n\t\tRate: c.Mutation.Beneficial.Rate * float64(c.Length),\n\t}\n\n\tlambda := 1.0 \/ float64(c.Transfer.In.Fragment)\n\tfragGenerator := pop.NewExpFrag(lambda, src)\n\ttransferEvent := &pop.Event{\n\t\tOps: pop.NewSimpleTransfer(fragGenerator, r),\n\t\tPop: p,\n\t\tRate: c.Transfer.In.Rate * float64(c.Length),\n\t}\n\n\totherEvents := []*pop.Event{mutationEvent, transferEvent, beneficialMutationEvent}\n\teventChan := generateEvents(p, sampler, otherEvents, c.NumGen)\n\n\tpop.Evolve(eventChan)\n\treturn p\n}\n\nfunc send(done chan bool) {\n\tdone <- true\n}\n\nfunc wait(done chan bool, numWorker int) {\n\tfor i := 0; i < numWorker; i++ {\n\t\t<-done\n\t}\n}\n\nfunc read(filename string) (configChan chan pop.Config) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer f.Close()\n\n\tconfigs := readConfigs(f)\n\tconfigChan = make(chan pop.Config)\n\n\tmakeConfigChan := func() {\n\t\tdefer close(configChan)\n\t\tfor _, c := range configs {\n\t\t\tconfigChan <- c\n\t\t}\n\t}\n\n\tgo makeConfigChan()\n\n\treturn\n}\n\nfunc readConfigs(r io.Reader) (configs []pop.Config) {\n\td := json.NewDecoder(r)\n\terr := d.Decode(&configs)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn\n}\n\nfunc createCovResult(c *calculators) CovResult {\n\tvar cr CovResult\n\tcr.Ks = c.ks.Mean.GetResult()\n\tfor i := 0; i < c.ct.N; i++ {\n\t\tcr.Ct = append(cr.Ct, c.ct.GetResult(i))\n\t}\n\treturn cr\n}\n\nfunc write(filename string, results []Result) {\n\tw, err := os.Create(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer w.Close()\n\n\tencoder := json.NewEncoder(w)\n\tif err := encoder.Encode(results); err != nil {\n\t\tpanic(err)\n\t}\n}\n<commit_msg>seeding<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"github.com\/mingzhi\/gomath\/stat\/correlation\"\n\t\"github.com\/mingzhi\/gomath\/stat\/desc\"\n\t\"github.com\/mingzhi\/gsl-cgo\/randist\"\n\t. \"github.com\/mingzhi\/popsimu\/cmd\"\n\t\"github.com\/mingzhi\/popsimu\/pop\"\n\t\"github.com\/mingzhi\/seqcor\/calculator\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n)\n\nvar (\n\tncpu int\n\tsampleSize int\n\tinput string\n\toutput string\n)\n\nfunc init() {\n\tconst (\n\t\tdefaultMaxl = 200\n\t)\n\tdefaultNCPU := runtime.NumCPU()\n\n\tflag.IntVar(&ncpu, \"ncpu\", defaultNCPU, \"ncpu\")\n\tflag.IntVar(&sampleSize, \"sample\", 1000, \"sample size of lineages\")\n\tflag.Parse()\n\tinput = flag.Arg(0)\n\toutput = flag.Arg(1)\n\n\truntime.GOMAXPROCS(ncpu)\n}\n\nfunc main() {\n\tconfigChan := read(input)\n\tconfigMap := make(map[int][]pop.Config)\n\tfor cfg := range configChan {\n\t\tconfigMap[cfg.Length] = append(configMap[cfg.Length], cfg)\n\t}\n\n\tvar results []Result\n\tfor seqLen, cfgs := range configMap {\n\t\tcfgChan := make(chan pop.Config)\n\t\tgo func() {\n\t\t\tdefer close(cfgChan)\n\t\t\tfor _, cfg := range cfgs {\n\t\t\t\tcfgChan <- cfg\n\t\t\t}\n\t\t}()\n\t\tres := run(cfgChan, seqLen)\n\t\tresults = append(results, res...)\n\t}\n\n\twrite(output, results)\n}\n\ntype popConfig struct {\n\tp *pop.Pop\n\tc pop.Config\n}\n\nfunc run(configChan chan pop.Config, seqLen int) []Result {\n\tsimResChan := batchSimu(configChan)\n\tcalcChan := calc(simResChan, seqLen)\n\tresults := collect(calcChan)\n\treturn results\n}\n\nfunc batchSimu(configChan chan pop.Config) (resChan chan popConfig) {\n\tnumWorker := runtime.GOMAXPROCS(0)\n\tresChan = make(chan popConfig, numWorker)\n\tdone := make(chan bool)\n\tsimulator := func() {\n\t\tdefer send(done)\n\t\tfor c := range configChan {\n\t\t\tp := simu(c)\n\t\t\tpc := popConfig{p: p, c: c}\n\t\t\tresChan <- pc\n\t\t}\n\t}\n\n\tfor i := 0; i < numWorker; i++ {\n\t\tgo simulator()\n\t}\n\n\tgo func() {\n\t\tdefer close(resChan)\n\t\twait(done, numWorker)\n\t}()\n\n\treturn resChan\n}\n\ntype calculators struct {\n\tks *calculator.Ks\n\tct *calculator.AutoCovFFTW\n\tt2, t3, t4 []float64\n}\n\nfunc (c *calculators) Increment(xs []float64) {\n\tfor i := 0; i < len(xs); i++ {\n\t\tc.ks.Increment(xs[i])\n\t}\n\tc.ct.Increment(xs)\n}\n\nfunc (c *calculators) Append(c2 *calculators) {\n\tc.ks.Append(c2.ks)\n\tc.ct.Append(c2.ct)\n\tc.t2 = append(c.t2, c2.t2...)\n\tc.t3 = append(c.t3, c2.t3...)\n\tc.t4 = append(c.t4, c2.t4...)\n}\n\ntype calcConfig struct {\n\tcfg pop.Config\n\tc *calculators\n}\n\nfunc calc(simResChan chan popConfig, seqLen int) chan calcConfig {\n\tnumWorker := runtime.GOMAXPROCS(0)\n\tcircular := true\n\tdft := correlation.NewFFTW(seqLen, circular)\n\tdone := make(chan bool)\n\n\tcalcChan := make(chan calcConfig, numWorker)\n\tworker := func() {\n\t\tdefer send(done)\n\t\tfor res := range simResChan {\n\t\t\tcc := calcConfig{}\n\t\t\tcc.cfg = res.c\n\n\t\t\tsequences := [][]byte{}\n\t\t\tfor _, g := range res.p.Genomes {\n\t\t\t\tsequences = append(sequences, g.Seq())\n\t\t\t}\n\t\t\tks := calculator.CalcKs(sequences)\n\t\t\tct := calculator.CalcCtFFTW(sequences, &dft)\n\t\t\tt2 := pop.CalcT2(res.p, sampleSize)\n\t\t\tt3 := pop.CalcT3(res.p, sampleSize)\n\t\t\tt4 := pop.CalcT4(res.p, sampleSize)\n\n\t\t\tcc.c = &calculators{}\n\t\t\tcc.c.ks = ks\n\t\t\tcc.c.ct = ct\n\t\t\tcc.c.t2 = t2\n\t\t\tcc.c.t3 = t3\n\t\t\tcc.c.t4 = t4\n\n\t\t\tcalcChan <- cc\n\t\t}\n\t}\n\n\tfor i := 0; i < numWorker; i++ {\n\t\tgo worker()\n\t}\n\n\tgo func() {\n\t\tdefer dft.Close()\n\t\tdefer close(calcChan)\n\t\twait(done, numWorker)\n\t}()\n\n\treturn calcChan\n}\n\nfunc collect(calcChan chan calcConfig) []Result {\n\tm := make(map[pop.Config]*calculators)\n\tvarm := make(map[pop.Config]*desc.Variance)\n\tfor cc := range calcChan {\n\t\tc, found := m[cc.cfg]\n\t\tv, _ := varm[cc.cfg]\n\t\tif !found {\n\t\t\tc = cc.c\n\t\t\tv = desc.NewVariance()\n\t\t} else {\n\t\t\tc.Append(cc.c)\n\t\t}\n\n\t\tv.Increment(c.ks.Mean.GetResult())\n\n\t\tm[cc.cfg] = c\n\t\tvarm[cc.cfg] = v\n\t}\n\n\tvar results []Result\n\tfor cfg, c := range m {\n\t\tres := Result{}\n\t\tres.Config = cfg\n\t\tres.C = createCovResult(c)\n\t\tres.C.KsVar = varm[cfg].GetResult()\n\t\tres.T2 = c.t2\n\t\tres.T3 = c.t3\n\t\tres.T4 = c.t4\n\t\tresults = append(results, res)\n\t}\n\n\treturn results\n}\n\nfunc newPop(c pop.Config, src rand.Source) *pop.Pop {\n\tp := pop.New()\n\tr := rand.New(src)\n\tg := pop.NewRandomPopGenerator(r, c.Size, c.Length, []byte(c.Alphabet))\n\tg.Operate(p)\n\treturn p\n}\n\nfunc generateEvents(p *pop.Pop, sampler pop.Sampler, mutateEvents []*pop.Event, numGen int, rng *randist.RNG) chan *pop.Event {\n\tc := make(chan *pop.Event)\n\n\tgo func() {\n\t\tdefer close(c)\n\t\tmutateRate := 0.0\n\t\tfor _, e := range mutateEvents {\n\t\t\tmutateRate += e.Rate\n\t\t}\n\n\t\tfor i := 0; i < numGen; i++ {\n\t\t\t\/\/ reproduction.\n\t\t\tsamplerEvent := &pop.Event{\n\t\t\t\tOps: sampler,\n\t\t\t\tPop: p,\n\t\t\t}\n\n\t\t\tsampler.Start()\n\t\t\tgo func() { c <- samplerEvent }()\n\t\t\tsampler.Wait()\n\n\t\t\tt := sampler.Time(p)\n\t\t\tnum := randist.PoissonRandomInt(rng, mutateRate*t*float64(p.Size()))\n\t\t\tfor j := 0; j < num; j++ {\n\t\t\t\tc <- pop.Emit(mutateEvents)\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn c\n}\n\nfunc simu(c pop.Config) *pop.Pop {\n\tseed := time.Now().UnixNano()\n\tsrc := rand.NewSource(seed)\n\n\tp := newPop(c, src)\n\tr := rand.New(src)\n\n\trng := randist.NewRNG(randist.MT19937_1999)\n\tdefer rng.Free()\n\trng.Seed(time.Now().UnixNano())\n\n\tvar sampler pop.Sampler\n\tswitch c.SampleMethod {\n\tcase \"WrightFisher\":\n\t\tsampler = pop.NewWrightFisherSampler(rng)\n\tcase \"LinearSelection\":\n\t\tsampler = pop.NewLinearSelectionSampler(rng)\n\tdefault:\n\t\tsampler = pop.NewMoranSampler(rng)\n\t}\n\n\tmutationEvent := &pop.Event{\n\t\tOps: pop.NewSimpleMutator(r, []byte(c.Alphabet)),\n\t\tPop: p,\n\t\tRate: c.Mutation.Rate * float64(c.Length),\n\t}\n\n\tfMutator := pop.NewFitnessMutator(c.Mutation.Beneficial.S, 0, rng, pop.MutateStep)\n\tbeneficialMutationEvent := &pop.Event{\n\t\tOps: fMutator,\n\t\tPop: p,\n\t\tRate: c.Mutation.Beneficial.Rate * float64(c.Length),\n\t}\n\n\tlambda := 1.0 \/ float64(c.Transfer.In.Fragment)\n\tfragGenerator := pop.NewExpFrag(lambda, src)\n\ttransferEvent := &pop.Event{\n\t\tOps: pop.NewSimpleTransfer(fragGenerator, r),\n\t\tPop: p,\n\t\tRate: c.Transfer.In.Rate * float64(c.Length),\n\t}\n\n\totherEvents := []*pop.Event{mutationEvent, transferEvent, beneficialMutationEvent}\n\teventChan := generateEvents(p, sampler, otherEvents, c.NumGen, rng)\n\n\tpop.Evolve(eventChan)\n\treturn p\n}\n\nfunc send(done chan bool) {\n\tdone <- true\n}\n\nfunc wait(done chan bool, numWorker int) {\n\tfor i := 0; i < numWorker; i++ {\n\t\t<-done\n\t}\n}\n\nfunc read(filename string) (configChan chan pop.Config) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer f.Close()\n\n\tconfigs := readConfigs(f)\n\tconfigChan = make(chan pop.Config)\n\n\tmakeConfigChan := func() {\n\t\tdefer close(configChan)\n\t\tfor _, c := range configs {\n\t\t\tconfigChan <- c\n\t\t}\n\t}\n\n\tgo makeConfigChan()\n\n\treturn\n}\n\nfunc readConfigs(r io.Reader) (configs []pop.Config) {\n\td := json.NewDecoder(r)\n\terr := d.Decode(&configs)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn\n}\n\nfunc createCovResult(c *calculators) CovResult {\n\tvar cr CovResult\n\tcr.Ks = c.ks.Mean.GetResult()\n\tfor i := 0; i < c.ct.N; i++ {\n\t\tcr.Ct = append(cr.Ct, c.ct.GetResult(i))\n\t}\n\treturn cr\n}\n\nfunc write(filename string, results []Result) {\n\tw, err := os.Create(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer w.Close()\n\n\tencoder := json.NewEncoder(w)\n\tif err := encoder.Encode(results); err != nil {\n\t\tpanic(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"crypto\/sha256\"\n\t\"crypto\/x509\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\tstdlog \"log\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/go-kit\/kit\/log\"\n\t\"github.com\/go-kit\/kit\/log\/level\"\n\tscepclient \"github.com\/micromdm\/scep\/client\"\n\t\"github.com\/micromdm\/scep\/scep\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ version info\nvar (\n\tversion = \"unknown\"\n)\n\ntype runCfg struct {\n\tdir string\n\tcsrPath string\n\tkeyPath string\n\tkeyBits int\n\tselfSignPath string\n\tcertPath string\n\tcn string\n\torg string\n\tou string\n\tlocality string\n\tprovince string\n\tcountry string\n\tchallenge string\n\tserverURL string\n\tcaSHA256 string\n\tdebug bool\n\tlogfmt string\n\tcaCertMsg string\n}\n\nfunc run(cfg runCfg) error {\n\tctx := context.Background()\n\tvar logger log.Logger\n\t{\n\t\tif strings.ToLower(cfg.logfmt) == \"json\" {\n\t\t\tlogger = log.NewJSONLogger(os.Stderr)\n\t\t} else {\n\t\t\tlogger = log.NewLogfmtLogger(os.Stderr)\n\t\t}\n\t\tstdlog.SetOutput(log.NewStdlibAdapter(logger))\n\t\tlogger = log.With(logger, \"ts\", log.DefaultTimestampUTC)\n\t\tif !cfg.debug {\n\t\t\tlogger = level.NewFilter(logger, level.AllowInfo())\n\t\t}\n\t}\n\tlginfo := level.Info(logger)\n\n\tclient, err := scepclient.New(cfg.serverURL, logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkey, err := loadOrMakeKey(cfg.keyPath, cfg.keyBits)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\topts := &csrOptions{\n\t\tcn: cfg.cn,\n\t\torg: cfg.org,\n\t\tcountry: strings.ToUpper(cfg.country),\n\t\tou: cfg.ou,\n\t\tlocality: cfg.locality,\n\t\tprovince: cfg.province,\n\t\tchallenge: cfg.challenge,\n\t\tkey: key,\n\t}\n\n\tcsr, err := loadOrMakeCSR(cfg.csrPath, opts)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tvar self *x509.Certificate\n\tcert, err := loadPEMCertFromFile(cfg.certPath)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn err\n\t\t}\n\t\ts, err := loadOrSign(cfg.selfSignPath, key, csr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tself = s\n\t}\n\n\tresp, certNum, err := client.GetCACert(ctx, cfg.caCertMsg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar certs []*x509.Certificate\n\t{\n\t\tif certNum > 1 {\n\t\t\tcerts, err = scep.CACerts(resp)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif len(certs) < 1 {\n\t\t\t\treturn fmt.Errorf(\"no certificates returned\")\n\t\t\t}\n\t\t} else {\n\t\t\tcerts, err = x509.ParseCertificates(resp)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tvar signerCert *x509.Certificate\n\t{\n\t\tif cert != nil {\n\t\t\tsignerCert = cert\n\t\t} else {\n\t\t\tsignerCert = self\n\t\t}\n\t}\n\n\tvar msgType scep.MessageType\n\t{\n\t\t\/\/ TODO validate CA and set UpdateReq if needed\n\t\tif cert != nil {\n\t\t\tmsgType = scep.RenewalReq\n\t\t} else {\n\t\t\tmsgType = scep.PKCSReq\n\t\t}\n\t}\n\n\tvar recipients []*x509.Certificate\n\tif cfg.caSHA256 == \"\" {\n\t\trecipients = certs\n\t} else {\n\t\tr, err := findRecipients(cfg.caSHA256, certs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trecipients = r\n\t}\n\n\ttmpl := &scep.PKIMessage{\n\t\tMessageType: msgType,\n\t\tRecipients: recipients,\n\t\tSignerKey: key,\n\t\tSignerCert: signerCert,\n\t}\n\n\tif cfg.challenge != \"\" && msgType == scep.PKCSReq {\n\t\ttmpl.CSRReqMessage = &scep.CSRReqMessage{\n\t\t\tChallengePassword: cfg.challenge,\n\t\t}\n\t}\n\n\tmsg, err := scep.NewCSRRequest(csr, tmpl, scep.WithLogger(logger))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"creating csr pkiMessage\")\n\t}\n\n\tvar respMsg *scep.PKIMessage\n\n\tfor {\n\t\t\/\/ loop in case we get a PENDING response which requires\n\t\t\/\/ a manual approval.\n\n\t\trespBytes, err := client.PKIOperation(ctx, msg.Raw)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"PKIOperation for %s\", msgType)\n\t\t}\n\n\t\trespMsg, err = scep.ParsePKIMessage(respBytes, scep.WithLogger(logger), scep.WithCACerts(recipients))\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing pkiMessage response %s\", msgType)\n\t\t}\n\n\t\tswitch respMsg.PKIStatus {\n\t\tcase scep.FAILURE:\n\t\t\treturn errors.Errorf(\"%s request failed, failInfo: %s\", msgType, respMsg.FailInfo)\n\t\tcase scep.PENDING:\n\t\t\tlginfo.Log(\"pkiStatus\", \"PENDING\", \"msg\", \"sleeping for 30 seconds, then trying again.\")\n\t\t\ttime.Sleep(30 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tlginfo.Log(\"pkiStatus\", \"SUCCESS\", \"msg\", \"server returned a certificate.\")\n\t\tbreak \/\/ on scep.SUCCESS\n\t}\n\n\tif err := respMsg.DecryptPKIEnvelope(signerCert, key); err != nil {\n\t\treturn errors.Wrapf(err, \"decrypt pkiEnvelope, msgType: %s, status %s\", msgType, respMsg.PKIStatus)\n\t}\n\n\trespCert := respMsg.CertRepMessage.Certificate\n\tif err := ioutil.WriteFile(cfg.certPath, pemCert(respCert.Raw), 0666); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ remove self signer if used\n\tif self != nil {\n\t\tif err := os.Remove(cfg.selfSignPath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Determine the correct recipient based on the fingerprint.\n\/\/ In case of NDES that is the last certificate in the chain, not the RA cert.\n\/\/ Note: this function assumes that the input certs are sorted as a valid chain.\n\/\/ Return a full chain starting with the cert that matches the fingerprint.\nfunc findRecipients(fingerprint string, certs []*x509.Certificate) ([]*x509.Certificate, error) {\n\tfingerprint = strings.Join(strings.Split(fingerprint, \" \"), \"\")\n\tfingerprint = strings.ToLower(fingerprint)\n\tfor i, cert := range certs {\n\t\tsum := fmt.Sprintf(\"%x\", sha256.Sum256(cert.Raw))\n\t\tif sum == fingerprint {\n\t\t\treturn certs[i-1:], nil\n\t\t}\n\t}\n\treturn nil, errors.Errorf(\"could not find cert for sha256 fingerprint: %s\", fingerprint)\n}\n\nfunc validateFlags(keyPath, serverURL string) error {\n\tif keyPath == \"\" {\n\t\treturn errors.New(\"must specify private key path\")\n\t}\n\tif serverURL == \"\" {\n\t\treturn errors.New(\"must specify server-url flag parameter\")\n\t}\n\t_, err := url.Parse(serverURL)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid server-url flag parameter %s\", err)\n\t}\n\treturn nil\n}\n\nfunc main() {\n\tvar (\n\t\tflVersion = flag.Bool(\"version\", false, \"prints version information\")\n\t\tflServerURL = flag.String(\"server-url\", \"\", \"SCEP server url\")\n\t\tflChallengePassword = flag.String(\"challenge\", \"\", \"enforce a challenge password\")\n\t\tflPKeyPath = flag.String(\"private-key\", \"\", \"private key path, if there is no key, scepclient will create one\")\n\t\tflCertPath = flag.String(\"certificate\", \"\", \"certificate path, if there is no key, scepclient will create one\")\n\t\tflKeySize = flag.Int(\"keySize\", 2048, \"rsa key size\")\n\t\tflOrg = flag.String(\"organization\", \"scep-client\", \"organization for cert\")\n\t\tflCName = flag.String(\"cn\", \"scepclient\", \"common name for certificate\")\n\t\tflOU = flag.String(\"ou\", \"MDM\", \"organizational unit for certificate\")\n\t\tflLoc = flag.String(\"locality\", \"\", \"locality for certificate\")\n\t\tflProvince = flag.String(\"province\", \"\", \"province for certificate\")\n\t\tflCountry = flag.String(\"country\", \"US\", \"country code in certificate\")\n\t\tflCACertMessage = flag.String(\"cacert-message\", \"\", \"message sent with GetCACert operation\")\n\n\t\t\/\/ in case of multiple certificate authorities, we need to figure out who the recipient of the encrypted\n\t\t\/\/ data is.\n\t\tflCAFingerprint = flag.String(\"ca-fingerprint\", \"\", \"SHA-256 digest of CA certificate for NDES server. Note: Changed from MD5.\")\n\n\t\tflDebugLogging = flag.Bool(\"debug\", false, \"enable debug logging\")\n\t\tflLogJSON = flag.Bool(\"log-json\", false, \"use JSON for log output\")\n\t)\n\tflag.Parse()\n\n\t\/\/ print version information\n\tif *flVersion {\n\t\tfmt.Println(version)\n\t\tos.Exit(0)\n\t}\n\n\tif err := validateFlags(*flPKeyPath, *flServerURL); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tdir := filepath.Dir(*flPKeyPath)\n\tcsrPath := dir + \"\/csr.pem\"\n\tselfSignPath := dir + \"\/self.pem\"\n\tif *flCertPath == \"\" {\n\t\t*flCertPath = dir + \"\/client.pem\"\n\t}\n\tvar logfmt string\n\tif *flLogJSON {\n\t\tlogfmt = \"json\"\n\t}\n\n\tcfg := runCfg{\n\t\tdir: dir,\n\t\tcsrPath: csrPath,\n\t\tkeyPath: *flPKeyPath,\n\t\tkeyBits: *flKeySize,\n\t\tselfSignPath: selfSignPath,\n\t\tcertPath: *flCertPath,\n\t\tcn: *flCName,\n\t\torg: *flOrg,\n\t\tcountry: *flCountry,\n\t\tlocality: *flLoc,\n\t\tou: *flOU,\n\t\tprovince: *flProvince,\n\t\tchallenge: *flChallengePassword,\n\t\tserverURL: *flServerURL,\n\t\tcaSHA256: *flCAFingerprint,\n\t\tdebug: *flDebugLogging,\n\t\tlogfmt: logfmt,\n\t\tcaCertMsg: *flCACertMessage,\n\t}\n\n\tif err := run(cfg); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}\n<commit_msg>Debug log CA certs and hashes in scepclient (#157)<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"crypto\/sha256\"\n\t\"crypto\/x509\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\tstdlog \"log\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/go-kit\/kit\/log\"\n\t\"github.com\/go-kit\/kit\/log\/level\"\n\tscepclient \"github.com\/micromdm\/scep\/client\"\n\t\"github.com\/micromdm\/scep\/scep\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ version info\nvar (\n\tversion = \"unknown\"\n)\n\ntype runCfg struct {\n\tdir string\n\tcsrPath string\n\tkeyPath string\n\tkeyBits int\n\tselfSignPath string\n\tcertPath string\n\tcn string\n\torg string\n\tou string\n\tlocality string\n\tprovince string\n\tcountry string\n\tchallenge string\n\tserverURL string\n\tcaSHA256 string\n\tdebug bool\n\tlogfmt string\n\tcaCertMsg string\n}\n\nfunc run(cfg runCfg) error {\n\tctx := context.Background()\n\tvar logger log.Logger\n\t{\n\t\tif strings.ToLower(cfg.logfmt) == \"json\" {\n\t\t\tlogger = log.NewJSONLogger(os.Stderr)\n\t\t} else {\n\t\t\tlogger = log.NewLogfmtLogger(os.Stderr)\n\t\t}\n\t\tstdlog.SetOutput(log.NewStdlibAdapter(logger))\n\t\tlogger = log.With(logger, \"ts\", log.DefaultTimestampUTC)\n\t\tif !cfg.debug {\n\t\t\tlogger = level.NewFilter(logger, level.AllowInfo())\n\t\t}\n\t}\n\tlginfo := level.Info(logger)\n\n\tclient, err := scepclient.New(cfg.serverURL, logger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkey, err := loadOrMakeKey(cfg.keyPath, cfg.keyBits)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\topts := &csrOptions{\n\t\tcn: cfg.cn,\n\t\torg: cfg.org,\n\t\tcountry: strings.ToUpper(cfg.country),\n\t\tou: cfg.ou,\n\t\tlocality: cfg.locality,\n\t\tprovince: cfg.province,\n\t\tchallenge: cfg.challenge,\n\t\tkey: key,\n\t}\n\n\tcsr, err := loadOrMakeCSR(cfg.csrPath, opts)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tvar self *x509.Certificate\n\tcert, err := loadPEMCertFromFile(cfg.certPath)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn err\n\t\t}\n\t\ts, err := loadOrSign(cfg.selfSignPath, key, csr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tself = s\n\t}\n\n\tresp, certNum, err := client.GetCACert(ctx, cfg.caCertMsg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar certs []*x509.Certificate\n\t{\n\t\tif certNum > 1 {\n\t\t\tcerts, err = scep.CACerts(resp)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif len(certs) < 1 {\n\t\t\t\treturn fmt.Errorf(\"no certificates returned\")\n\t\t\t}\n\t\t} else {\n\t\t\tcerts, err = x509.ParseCertificates(resp)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif cfg.debug {\n\t\tlogCerts(level.Debug(logger), certs)\n\t}\n\n\tvar signerCert *x509.Certificate\n\t{\n\t\tif cert != nil {\n\t\t\tsignerCert = cert\n\t\t} else {\n\t\t\tsignerCert = self\n\t\t}\n\t}\n\n\tvar msgType scep.MessageType\n\t{\n\t\t\/\/ TODO validate CA and set UpdateReq if needed\n\t\tif cert != nil {\n\t\t\tmsgType = scep.RenewalReq\n\t\t} else {\n\t\t\tmsgType = scep.PKCSReq\n\t\t}\n\t}\n\n\tvar recipients []*x509.Certificate\n\tif cfg.caSHA256 == \"\" {\n\t\trecipients = certs\n\t} else {\n\t\tr, err := findRecipients(cfg.caSHA256, certs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trecipients = r\n\t}\n\n\ttmpl := &scep.PKIMessage{\n\t\tMessageType: msgType,\n\t\tRecipients: recipients,\n\t\tSignerKey: key,\n\t\tSignerCert: signerCert,\n\t}\n\n\tif cfg.challenge != \"\" && msgType == scep.PKCSReq {\n\t\ttmpl.CSRReqMessage = &scep.CSRReqMessage{\n\t\t\tChallengePassword: cfg.challenge,\n\t\t}\n\t}\n\n\tmsg, err := scep.NewCSRRequest(csr, tmpl, scep.WithLogger(logger))\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"creating csr pkiMessage\")\n\t}\n\n\tvar respMsg *scep.PKIMessage\n\n\tfor {\n\t\t\/\/ loop in case we get a PENDING response which requires\n\t\t\/\/ a manual approval.\n\n\t\trespBytes, err := client.PKIOperation(ctx, msg.Raw)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"PKIOperation for %s\", msgType)\n\t\t}\n\n\t\trespMsg, err = scep.ParsePKIMessage(respBytes, scep.WithLogger(logger), scep.WithCACerts(recipients))\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing pkiMessage response %s\", msgType)\n\t\t}\n\n\t\tswitch respMsg.PKIStatus {\n\t\tcase scep.FAILURE:\n\t\t\treturn errors.Errorf(\"%s request failed, failInfo: %s\", msgType, respMsg.FailInfo)\n\t\tcase scep.PENDING:\n\t\t\tlginfo.Log(\"pkiStatus\", \"PENDING\", \"msg\", \"sleeping for 30 seconds, then trying again.\")\n\t\t\ttime.Sleep(30 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tlginfo.Log(\"pkiStatus\", \"SUCCESS\", \"msg\", \"server returned a certificate.\")\n\t\tbreak \/\/ on scep.SUCCESS\n\t}\n\n\tif err := respMsg.DecryptPKIEnvelope(signerCert, key); err != nil {\n\t\treturn errors.Wrapf(err, \"decrypt pkiEnvelope, msgType: %s, status %s\", msgType, respMsg.PKIStatus)\n\t}\n\n\trespCert := respMsg.CertRepMessage.Certificate\n\tif err := ioutil.WriteFile(cfg.certPath, pemCert(respCert.Raw), 0666); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ remove self signer if used\n\tif self != nil {\n\t\tif err := os.Remove(cfg.selfSignPath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ logCerts logs the count, number, RDN, and SHA-256 of certs to logger\nfunc logCerts(logger log.Logger, certs []*x509.Certificate) {\n\tlogger.Log(\"msg\", \"cacertlist\", \"count\", len(certs))\n\tfor i, cert := range certs {\n\t\tlogger.Log(\n\t\t\t\"msg\", \"cacertlist\",\n\t\t\t\"number\", i,\n\t\t\t\"rdn\", cert.Subject.ToRDNSequence().String(),\n\t\t\t\"sha256\", fmt.Sprintf(\"%x\", sha256.Sum256(cert.Raw)),\n\t\t)\n\t}\n}\n\n\/\/ Determine the correct recipient based on the fingerprint.\n\/\/ In case of NDES that is the last certificate in the chain, not the RA cert.\n\/\/ Return a full chain starting with the cert that matches the fingerprint.\nfunc findRecipients(fingerprint string, certs []*x509.Certificate) ([]*x509.Certificate, error) {\n\tfingerprint = strings.Join(strings.Split(fingerprint, \" \"), \"\")\n\tfingerprint = strings.ToLower(fingerprint)\n\tfor i, cert := range certs {\n\t\tsum := fmt.Sprintf(\"%x\", sha256.Sum256(cert.Raw))\n\t\tif sum == fingerprint {\n\t\t\treturn certs[i-1:], nil\n\t\t}\n\t}\n\treturn nil, errors.Errorf(\"could not find cert for sha256 fingerprint: %s\", fingerprint)\n}\n\nfunc validateFlags(keyPath, serverURL string) error {\n\tif keyPath == \"\" {\n\t\treturn errors.New(\"must specify private key path\")\n\t}\n\tif serverURL == \"\" {\n\t\treturn errors.New(\"must specify server-url flag parameter\")\n\t}\n\t_, err := url.Parse(serverURL)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid server-url flag parameter %s\", err)\n\t}\n\treturn nil\n}\n\nfunc main() {\n\tvar (\n\t\tflVersion = flag.Bool(\"version\", false, \"prints version information\")\n\t\tflServerURL = flag.String(\"server-url\", \"\", \"SCEP server url\")\n\t\tflChallengePassword = flag.String(\"challenge\", \"\", \"enforce a challenge password\")\n\t\tflPKeyPath = flag.String(\"private-key\", \"\", \"private key path, if there is no key, scepclient will create one\")\n\t\tflCertPath = flag.String(\"certificate\", \"\", \"certificate path, if there is no key, scepclient will create one\")\n\t\tflKeySize = flag.Int(\"keySize\", 2048, \"rsa key size\")\n\t\tflOrg = flag.String(\"organization\", \"scep-client\", \"organization for cert\")\n\t\tflCName = flag.String(\"cn\", \"scepclient\", \"common name for certificate\")\n\t\tflOU = flag.String(\"ou\", \"MDM\", \"organizational unit for certificate\")\n\t\tflLoc = flag.String(\"locality\", \"\", \"locality for certificate\")\n\t\tflProvince = flag.String(\"province\", \"\", \"province for certificate\")\n\t\tflCountry = flag.String(\"country\", \"US\", \"country code in certificate\")\n\t\tflCACertMessage = flag.String(\"cacert-message\", \"\", \"message sent with GetCACert operation\")\n\n\t\t\/\/ in case of multiple certificate authorities, we need to figure out who the recipient of the encrypted\n\t\t\/\/ data is.\n\t\tflCAFingerprint = flag.String(\"ca-fingerprint\", \"\", \"SHA-256 digest of CA certificate for NDES server. Note: Changed from MD5.\")\n\n\t\tflDebugLogging = flag.Bool(\"debug\", false, \"enable debug logging\")\n\t\tflLogJSON = flag.Bool(\"log-json\", false, \"use JSON for log output\")\n\t)\n\tflag.Parse()\n\n\t\/\/ print version information\n\tif *flVersion {\n\t\tfmt.Println(version)\n\t\tos.Exit(0)\n\t}\n\n\tif err := validateFlags(*flPKeyPath, *flServerURL); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tdir := filepath.Dir(*flPKeyPath)\n\tcsrPath := dir + \"\/csr.pem\"\n\tselfSignPath := dir + \"\/self.pem\"\n\tif *flCertPath == \"\" {\n\t\t*flCertPath = dir + \"\/client.pem\"\n\t}\n\tvar logfmt string\n\tif *flLogJSON {\n\t\tlogfmt = \"json\"\n\t}\n\n\tcfg := runCfg{\n\t\tdir: dir,\n\t\tcsrPath: csrPath,\n\t\tkeyPath: *flPKeyPath,\n\t\tkeyBits: *flKeySize,\n\t\tselfSignPath: selfSignPath,\n\t\tcertPath: *flCertPath,\n\t\tcn: *flCName,\n\t\torg: *flOrg,\n\t\tcountry: *flCountry,\n\t\tlocality: *flLoc,\n\t\tou: *flOU,\n\t\tprovince: *flProvince,\n\t\tchallenge: *flChallengePassword,\n\t\tserverURL: *flServerURL,\n\t\tcaSHA256: *flCAFingerprint,\n\t\tdebug: *flDebugLogging,\n\t\tlogfmt: logfmt,\n\t\tcaCertMsg: *flCACertMessage,\n\t}\n\n\tif err := run(cfg); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main_test\n\nimport (\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"code.cloudfoundry.org\/bbs\/events\"\n\t\"code.cloudfoundry.org\/bbs\/models\"\n\t\"code.cloudfoundry.org\/clock\"\n\t\"code.cloudfoundry.org\/localip\"\n\t\"code.cloudfoundry.org\/locket\"\n\tlocketconfig \"code.cloudfoundry.org\/locket\/cmd\/locket\/config\"\n\tlocketrunner \"code.cloudfoundry.org\/locket\/cmd\/locket\/testrunner\"\n\t\"code.cloudfoundry.org\/locket\/lock\"\n\tlocketmodels \"code.cloudfoundry.org\/locket\/models\"\n\t\"code.cloudfoundry.org\/runtimeschema\/cc_messages\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n\t\"github.com\/tedsuo\/ifrit\"\n\t\"github.com\/tedsuo\/ifrit\/ginkgomon\"\n)\n\nconst watcherLockName = \"tps_watcher_lock\"\n\nvar _ = Describe(\"TPS\", func() {\n\tvar (\n\t\tdomain string\n\t\tlocketRunner ifrit.Runner\n\t\tlocketProcess ifrit.Process\n\t\tlocketAddress string\n\t)\n\n\tBeforeEach(func() {\n\t\tlocketPort, err := localip.LocalPort()\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tdbName := fmt.Sprintf(\"locket_%d\", GinkgoParallelNode())\n\t\tconnectionString := \"postgres:\/\/locket:locket_pw@localhost\"\n\t\tdb, err := sql.Open(\"postgres\", connectionString+\"?sslmode=disable\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(db.Ping()).NotTo(HaveOccurred())\n\n\t\t_, err = db.Exec(fmt.Sprintf(\"DROP DATABASE IF EXISTS %s\", dbName))\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t_, err = db.Exec(fmt.Sprintf(\"CREATE DATABASE %s\", dbName))\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tlocketBinName := \"locket\"\n\t\tlocketAddress = fmt.Sprintf(\"localhost:%d\", locketPort)\n\t\tlocketRunner = locketrunner.NewLocketRunner(locketBinName, func(cfg *locketconfig.LocketConfig) {\n\t\t\tcfg.DatabaseConnectionString = connectionString + \"\/\" + dbName\n\t\t\tcfg.DatabaseDriver = \"postgres\"\n\t\t\tcfg.ListenAddress = locketAddress\n\t\t})\n\t\tlocketProcess = ginkgomon.Invoke(locketRunner)\n\n\t\twatcherConfig.ClientLocketConfig = locketrunner.ClientLocketConfig()\n\t\twatcherConfig.ClientLocketConfig.LocketAddress = locketAddress\n\n\t\tfakeBBS.AllowUnhandledRequests = true\n\n\t\tdomain = cc_messages.AppLRPDomain\n\t})\n\n\tAfterEach(func() {\n\t\tginkgomon.Interrupt(watcher, 5*time.Second)\n\t\tginkgomon.Interrupt(locketProcess, 5*time.Second)\n\n\t\tif watcher != nil {\n\t\t\twatcher.Signal(os.Kill)\n\t\t\tEventually(watcher.Wait()).Should(Receive())\n\t\t}\n\t})\n\n\tDescribe(\"Crashed Apps\", func() {\n\t\tvar (\n\t\t\tready chan struct{}\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tready = make(chan struct{})\n\t\t\tfakeCC.RouteToHandler(\"POST\", \"\/internal\/v4\/apps\/some-process-guid\/crashed\", func(res http.ResponseWriter, req *http.Request) {\n\t\t\t\tvar appCrashed cc_messages.AppCrashedRequest\n\n\t\t\t\tbytes, err := ioutil.ReadAll(req.Body)\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\treq.Body.Close()\n\n\t\t\t\terr = json.Unmarshal(bytes, &appCrashed)\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\tExpect(appCrashed.CrashTimestamp).NotTo(BeZero())\n\t\t\t\tappCrashed.CrashTimestamp = 0\n\n\t\t\t\tExpect(appCrashed).To(Equal(cc_messages.AppCrashedRequest{\n\t\t\t\t\tInstance: \"some-instance-guid-1\",\n\t\t\t\t\tIndex: 1,\n\t\t\t\t\tCellID: \"cell-id\",\n\t\t\t\t\tReason: \"CRASHED\",\n\t\t\t\t\tExitDescription: \"out of memory\",\n\t\t\t\t\tCrashCount: 1,\n\t\t\t\t}))\n\n\t\t\t\tclose(ready)\n\t\t\t})\n\n\t\t\tlrpKey := models.NewActualLRPKey(\"some-process-guid\", 1, domain)\n\t\t\tinstanceKey := models.NewActualLRPInstanceKey(\"some-instance-guid-1\", \"cell-id\")\n\t\t\tnetInfo := models.NewActualLRPNetInfo(\"1.2.3.4\", \"5.6.7.8\", models.ActualLRPNetInfo_PreferredAddressHost, models.NewPortMapping(65100, 8080))\n\t\t\tbeforeActualLRP := *models.NewRunningActualLRP(lrpKey, instanceKey, netInfo, 0)\n\t\t\tafterActualLRP := beforeActualLRP\n\t\t\tafterActualLRP.State = models.ActualLRPStateCrashed\n\t\t\tafterActualLRP.Since = int64(1)\n\t\t\tafterActualLRP.CrashCount = 1\n\t\t\tafterActualLRP.CrashReason = \"out of memory\"\n\n\t\t\tfakeBBS.RouteToHandler(\"POST\", \"\/v1\/events\/lrp_instances.r1\",\n\t\t\t\tfunc(w http.ResponseWriter, _ *http.Request) {\n\t\t\t\t\tw.Header().Add(\"Content-Type\", \"text\/event-stream; charset=utf-8\")\n\t\t\t\t\tw.Header().Add(\"Cache-Control\", \"no-cache, no-store, must-revalidate\")\n\t\t\t\t\tw.Header().Add(\"Connection\", \"keep-alive\")\n\n\t\t\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\t\t\t\tflusher := w.(http.Flusher)\n\t\t\t\t\tflusher.Flush()\n\t\t\t\t\tcloseNotifier := w.(http.CloseNotifier).CloseNotify()\n\t\t\t\t\tevent := models.NewActualLRPCrashedEvent(&beforeActualLRP, &afterActualLRP)\n\n\t\t\t\t\tsseEvent, err := events.NewEventFromModelEvent(0, event)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\t\terr = sseEvent.Write(w)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\t\tflusher.Flush()\n\n\t\t\t\t\t<-closeNotifier\n\t\t\t\t},\n\t\t\t)\n\t\t})\n\n\t\tIt(\"POSTs to the CC that the application has crashed\", func() {\n\t\t\tEventually(ready, 5*time.Second).Should(BeClosed())\n\t\t})\n\t})\n\n\tDescribe(\"SqlLock\", func() {\n\t\tContext(\"with invalid configuration\", func() {\n\t\t\tContext(\"and the locket address is not configured\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\twatcherConfig.LocketAddress = \"\"\n\t\t\t\t\tdisableStartCheck = true\n\t\t\t\t})\n\n\t\t\t\tIt(\"exits with an error\", func() {\n\t\t\t\t\tEventually(runner).Should(gexec.Exit(2))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t\tContext(\"with valid configuration\", func() {\n\t\t\tIt(\"acquires the lock in locket and becomes active\", func() {\n\t\t\t\tEventually(runner.Buffer, 5*time.Second).Should(gbytes.Say(\"tps-watcher.started\"))\n\t\t\t})\n\n\t\t\tContext(\"and the locking server becomes unreachable after grabbing the lock\", func() {\n\t\t\t\tJustBeforeEach(func() {\n\t\t\t\t\tEventually(runner.Buffer, 5*time.Second).Should(gbytes.Say(\"tps-watcher.started\"))\n\n\t\t\t\t\tginkgomon.Interrupt(locketProcess, 5*time.Second)\n\t\t\t\t})\n\n\t\t\t\tIt(\"exits after the TTL expires\", func() {\n\t\t\t\t\tEventually(runner, 17*time.Second).Should(gexec.Exit(1))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when the lock is not available\", func() {\n\t\t\t\tvar competingProcess ifrit.Process\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tlocketClient, err := locket.NewClient(logger, watcherConfig.ClientLocketConfig)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\t\tlockIdentifier := &locketmodels.Resource{\n\t\t\t\t\t\tKey: \"tps_watcher\",\n\t\t\t\t\t\tOwner: \"Your worst enemy.\",\n\t\t\t\t\t\tValue: \"Something\",\n\t\t\t\t\t\tTypeCode: locketmodels.LOCK,\n\t\t\t\t\t}\n\n\t\t\t\t\tclock := clock.NewClock()\n\t\t\t\t\tcompetingRunner := lock.NewLockRunner(logger, locketClient, lockIdentifier, 5, clock, locket.RetryInterval)\n\t\t\t\t\tcompetingProcess = ginkgomon.Invoke(competingRunner)\n\n\t\t\t\t\tdisableStartCheck = true\n\t\t\t\t})\n\n\t\t\t\tAfterEach(func() {\n\t\t\t\t\tginkgomon.Interrupt(competingProcess, 5*time.Second)\n\t\t\t\t})\n\n\t\t\t\tIt(\"does not become active\", func() {\n\t\t\t\t\tConsistently(runner.Buffer, 5*time.Second).ShouldNot(gbytes.Say(\"tps-watcher.started\"))\n\t\t\t\t})\n\n\t\t\t\tContext(\"and the lock becomes available\", func() {\n\t\t\t\t\tJustBeforeEach(func() {\n\t\t\t\t\t\tConsistently(runner.Buffer, 5*time.Second).ShouldNot(gbytes.Say(\"tps-watcher.started\"))\n\n\t\t\t\t\t\tginkgomon.Interrupt(competingProcess, 5*time.Second)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"grabs the lock and becomes active\", func() {\n\t\t\t\t\t\tEventually(runner.Buffer, 5*time.Second).Should(gbytes.Say(\"tps-watcher.started\"))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n<commit_msg>Revert \"Update test to use instance events streaming route\"<commit_after>package main_test\n\nimport (\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"code.cloudfoundry.org\/bbs\/events\"\n\t\"code.cloudfoundry.org\/bbs\/models\"\n\t\"code.cloudfoundry.org\/clock\"\n\t\"code.cloudfoundry.org\/localip\"\n\t\"code.cloudfoundry.org\/locket\"\n\tlocketconfig \"code.cloudfoundry.org\/locket\/cmd\/locket\/config\"\n\tlocketrunner \"code.cloudfoundry.org\/locket\/cmd\/locket\/testrunner\"\n\t\"code.cloudfoundry.org\/locket\/lock\"\n\tlocketmodels \"code.cloudfoundry.org\/locket\/models\"\n\t\"code.cloudfoundry.org\/runtimeschema\/cc_messages\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n\t\"github.com\/tedsuo\/ifrit\"\n\t\"github.com\/tedsuo\/ifrit\/ginkgomon\"\n)\n\nconst watcherLockName = \"tps_watcher_lock\"\n\nvar _ = Describe(\"TPS\", func() {\n\tvar (\n\t\tdomain string\n\t\tlocketRunner ifrit.Runner\n\t\tlocketProcess ifrit.Process\n\t\tlocketAddress string\n\t)\n\n\tBeforeEach(func() {\n\t\tlocketPort, err := localip.LocalPort()\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tdbName := fmt.Sprintf(\"locket_%d\", GinkgoParallelNode())\n\t\tconnectionString := \"postgres:\/\/locket:locket_pw@localhost\"\n\t\tdb, err := sql.Open(\"postgres\", connectionString+\"?sslmode=disable\")\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tExpect(db.Ping()).NotTo(HaveOccurred())\n\n\t\t_, err = db.Exec(fmt.Sprintf(\"DROP DATABASE IF EXISTS %s\", dbName))\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t_, err = db.Exec(fmt.Sprintf(\"CREATE DATABASE %s\", dbName))\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tlocketBinName := \"locket\"\n\t\tlocketAddress = fmt.Sprintf(\"localhost:%d\", locketPort)\n\t\tlocketRunner = locketrunner.NewLocketRunner(locketBinName, func(cfg *locketconfig.LocketConfig) {\n\t\t\tcfg.DatabaseConnectionString = connectionString + \"\/\" + dbName\n\t\t\tcfg.DatabaseDriver = \"postgres\"\n\t\t\tcfg.ListenAddress = locketAddress\n\t\t})\n\t\tlocketProcess = ginkgomon.Invoke(locketRunner)\n\n\t\twatcherConfig.ClientLocketConfig = locketrunner.ClientLocketConfig()\n\t\twatcherConfig.ClientLocketConfig.LocketAddress = locketAddress\n\n\t\tfakeBBS.AllowUnhandledRequests = true\n\n\t\tdomain = cc_messages.AppLRPDomain\n\t})\n\n\tAfterEach(func() {\n\t\tginkgomon.Interrupt(watcher, 5*time.Second)\n\t\tginkgomon.Interrupt(locketProcess, 5*time.Second)\n\n\t\tif watcher != nil {\n\t\t\twatcher.Signal(os.Kill)\n\t\t\tEventually(watcher.Wait()).Should(Receive())\n\t\t}\n\t})\n\n\tDescribe(\"Crashed Apps\", func() {\n\t\tvar (\n\t\t\tready chan struct{}\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tready = make(chan struct{})\n\t\t\tfakeCC.RouteToHandler(\"POST\", \"\/internal\/v4\/apps\/some-process-guid\/crashed\", func(res http.ResponseWriter, req *http.Request) {\n\t\t\t\tvar appCrashed cc_messages.AppCrashedRequest\n\n\t\t\t\tbytes, err := ioutil.ReadAll(req.Body)\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\treq.Body.Close()\n\n\t\t\t\terr = json.Unmarshal(bytes, &appCrashed)\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\tExpect(appCrashed.CrashTimestamp).NotTo(BeZero())\n\t\t\t\tappCrashed.CrashTimestamp = 0\n\n\t\t\t\tExpect(appCrashed).To(Equal(cc_messages.AppCrashedRequest{\n\t\t\t\t\tInstance: \"some-instance-guid-1\",\n\t\t\t\t\tIndex: 1,\n\t\t\t\t\tCellID: \"cell-id\",\n\t\t\t\t\tReason: \"CRASHED\",\n\t\t\t\t\tExitDescription: \"out of memory\",\n\t\t\t\t\tCrashCount: 1,\n\t\t\t\t}))\n\n\t\t\t\tclose(ready)\n\t\t\t})\n\n\t\t\tlrpKey := models.NewActualLRPKey(\"some-process-guid\", 1, domain)\n\t\t\tinstanceKey := models.NewActualLRPInstanceKey(\"some-instance-guid-1\", \"cell-id\")\n\t\t\tnetInfo := models.NewActualLRPNetInfo(\"1.2.3.4\", \"5.6.7.8\", models.ActualLRPNetInfo_PreferredAddressHost, models.NewPortMapping(65100, 8080))\n\t\t\tbeforeActualLRP := *models.NewRunningActualLRP(lrpKey, instanceKey, netInfo, 0)\n\t\t\tafterActualLRP := beforeActualLRP\n\t\t\tafterActualLRP.State = models.ActualLRPStateCrashed\n\t\t\tafterActualLRP.Since = int64(1)\n\t\t\tafterActualLRP.CrashCount = 1\n\t\t\tafterActualLRP.CrashReason = \"out of memory\"\n\n\t\t\tfakeBBS.RouteToHandler(\"GET\", \"\/v1\/events.r1\",\n\t\t\t\tfunc(w http.ResponseWriter, _ *http.Request) {\n\t\t\t\t\tw.Header().Add(\"Content-Type\", \"text\/event-stream; charset=utf-8\")\n\t\t\t\t\tw.Header().Add(\"Cache-Control\", \"no-cache, no-store, must-revalidate\")\n\t\t\t\t\tw.Header().Add(\"Connection\", \"keep-alive\")\n\n\t\t\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\t\t\t\tflusher := w.(http.Flusher)\n\t\t\t\t\tflusher.Flush()\n\t\t\t\t\tcloseNotifier := w.(http.CloseNotifier).CloseNotify()\n\t\t\t\t\tevent := models.NewActualLRPCrashedEvent(&beforeActualLRP, &afterActualLRP)\n\n\t\t\t\t\tsseEvent, err := events.NewEventFromModelEvent(0, event)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\t\terr = sseEvent.Write(w)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\t\tflusher.Flush()\n\n\t\t\t\t\t<-closeNotifier\n\t\t\t\t},\n\t\t\t)\n\t\t})\n\n\t\tIt(\"POSTs to the CC that the application has crashed\", func() {\n\t\t\tEventually(ready, 5*time.Second).Should(BeClosed())\n\t\t})\n\t})\n\n\tDescribe(\"SqlLock\", func() {\n\t\tContext(\"with invalid configuration\", func() {\n\t\t\tContext(\"and the locket address is not configured\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\twatcherConfig.LocketAddress = \"\"\n\t\t\t\t\tdisableStartCheck = true\n\t\t\t\t})\n\n\t\t\t\tIt(\"exits with an error\", func() {\n\t\t\t\t\tEventually(runner).Should(gexec.Exit(2))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t\tContext(\"with valid configuration\", func() {\n\t\t\tIt(\"acquires the lock in locket and becomes active\", func() {\n\t\t\t\tEventually(runner.Buffer, 5*time.Second).Should(gbytes.Say(\"tps-watcher.started\"))\n\t\t\t})\n\n\t\t\tContext(\"and the locking server becomes unreachable after grabbing the lock\", func() {\n\t\t\t\tJustBeforeEach(func() {\n\t\t\t\t\tEventually(runner.Buffer, 5*time.Second).Should(gbytes.Say(\"tps-watcher.started\"))\n\n\t\t\t\t\tginkgomon.Interrupt(locketProcess, 5*time.Second)\n\t\t\t\t})\n\n\t\t\t\tIt(\"exits after the TTL expires\", func() {\n\t\t\t\t\tEventually(runner, 17*time.Second).Should(gexec.Exit(1))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when the lock is not available\", func() {\n\t\t\t\tvar competingProcess ifrit.Process\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tlocketClient, err := locket.NewClient(logger, watcherConfig.ClientLocketConfig)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\t\tlockIdentifier := &locketmodels.Resource{\n\t\t\t\t\t\tKey: \"tps_watcher\",\n\t\t\t\t\t\tOwner: \"Your worst enemy.\",\n\t\t\t\t\t\tValue: \"Something\",\n\t\t\t\t\t\tTypeCode: locketmodels.LOCK,\n\t\t\t\t\t}\n\n\t\t\t\t\tclock := clock.NewClock()\n\t\t\t\t\tcompetingRunner := lock.NewLockRunner(logger, locketClient, lockIdentifier, 5, clock, locket.RetryInterval)\n\t\t\t\t\tcompetingProcess = ginkgomon.Invoke(competingRunner)\n\n\t\t\t\t\tdisableStartCheck = true\n\t\t\t\t})\n\n\t\t\t\tAfterEach(func() {\n\t\t\t\t\tginkgomon.Interrupt(competingProcess, 5*time.Second)\n\t\t\t\t})\n\n\t\t\t\tIt(\"does not become active\", func() {\n\t\t\t\t\tConsistently(runner.Buffer, 5*time.Second).ShouldNot(gbytes.Say(\"tps-watcher.started\"))\n\t\t\t\t})\n\n\t\t\t\tContext(\"and the lock becomes available\", func() {\n\t\t\t\t\tJustBeforeEach(func() {\n\t\t\t\t\t\tConsistently(runner.Buffer, 5*time.Second).ShouldNot(gbytes.Say(\"tps-watcher.started\"))\n\n\t\t\t\t\t\tginkgomon.Interrupt(competingProcess, 5*time.Second)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"grabs the lock and becomes active\", func() {\n\t\t\t\t\t\tEventually(runner.Buffer, 5*time.Second).Should(gbytes.Say(\"tps-watcher.started\"))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package aws\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/lambda\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n)\n\nvar LambdaFunctionRegexp = `^(arn:[\\w-]+:lambda:)?([a-z]{2}-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?$`\n\nfunc resourceAwsLambdaPermission() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsLambdaPermissionCreate,\n\t\tRead: resourceAwsLambdaPermissionRead,\n\t\tDelete: resourceAwsLambdaPermissionDelete,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"action\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validateLambdaPermissionAction,\n\t\t\t},\n\t\t\t\"function_name\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validateLambdaFunctionName,\n\t\t\t},\n\t\t\t\"principal\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"qualifier\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validateLambdaQualifier,\n\t\t\t},\n\t\t\t\"source_account\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validateAwsAccountId,\n\t\t\t},\n\t\t\t\"source_arn\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validateArn,\n\t\t\t},\n\t\t\t\"statement_id\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validatePolicyStatementId,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceAwsLambdaPermissionCreate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).lambdaconn\n\n\tfunctionName := d.Get(\"function_name\").(string)\n\n\t\/\/ There is a bug in the API (reported and acknowledged by AWS)\n\t\/\/ which causes some permissions to be ignored when API calls are sent in parallel\n\t\/\/ We work around this bug via mutex\n\tawsMutexKV.Lock(functionName)\n\tdefer awsMutexKV.Unlock(functionName)\n\n\tinput := lambda.AddPermissionInput{\n\t\tAction: aws.String(d.Get(\"action\").(string)),\n\t\tFunctionName: aws.String(functionName),\n\t\tPrincipal: aws.String(d.Get(\"principal\").(string)),\n\t\tStatementId: aws.String(d.Get(\"statement_id\").(string)),\n\t}\n\n\tif v, ok := d.GetOk(\"qualifier\"); ok {\n\t\tinput.Qualifier = aws.String(v.(string))\n\t}\n\tif v, ok := d.GetOk(\"source_account\"); ok {\n\t\tinput.SourceAccount = aws.String(v.(string))\n\t}\n\tif v, ok := d.GetOk(\"source_arn\"); ok {\n\t\tinput.SourceArn = aws.String(v.(string))\n\t}\n\n\tlog.Printf(\"[DEBUG] Adding new Lambda permission: %s\", input)\n\tvar out *lambda.AddPermissionOutput\n\terr := resource.Retry(1*time.Minute, func() *resource.RetryError {\n\t\tvar err error\n\t\tout, err = conn.AddPermission(&input)\n\n\t\tif err != nil {\n\t\t\tif awsErr, ok := err.(awserr.Error); ok {\n\t\t\t\t\/\/ IAM is eventually consistent :\/\n\t\t\t\tif awsErr.Code() == \"ResourceConflictException\" {\n\t\t\t\t\treturn resource.RetryableError(\n\t\t\t\t\t\tfmt.Errorf(\"[WARN] Error adding new Lambda Permission for %s, retrying: %s\",\n\t\t\t\t\t\t\t*input.FunctionName, err))\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn resource.NonRetryableError(err)\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif out != nil && out.Statement != nil {\n\t\tlog.Printf(\"[DEBUG] Created new Lambda permission: %s\", *out.Statement)\n\t} else {\n\t\tlog.Printf(\"[DEBUG] Created new Lambda permission, but no Statement was included\")\n\t}\n\n\td.SetId(d.Get(\"statement_id\").(string))\n\n\terr = resource.Retry(5*time.Minute, func() *resource.RetryError {\n\t\t\/\/ IAM is eventually cosistent :\/\n\t\terr := resourceAwsLambdaPermissionRead(d, meta)\n\t\tif err != nil {\n\t\t\tif strings.HasPrefix(err.Error(), \"Error reading Lambda policy: ResourceNotFoundException\") {\n\t\t\t\treturn resource.RetryableError(\n\t\t\t\t\tfmt.Errorf(\"[WARN] Error reading newly created Lambda Permission for %s, retrying: %s\",\n\t\t\t\t\t\t*input.FunctionName, err))\n\t\t\t}\n\t\t\tif strings.HasPrefix(err.Error(), \"Failed to find statement \\\"\"+d.Id()) {\n\t\t\t\treturn resource.RetryableError(\n\t\t\t\t\tfmt.Errorf(\"[WARN] Error reading newly created Lambda Permission statement for %s, retrying: %s\",\n\t\t\t\t\t\t*input.FunctionName, err))\n\t\t\t}\n\n\t\t\tlog.Printf(\"[ERROR] An actual error occurred when expecting Lambda policy to be there: %s\", err)\n\t\t\treturn resource.NonRetryableError(err)\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn err\n}\n\nfunc resourceAwsLambdaPermissionRead(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).lambdaconn\n\n\tinput := lambda.GetPolicyInput{\n\t\tFunctionName: aws.String(d.Get(\"function_name\").(string)),\n\t}\n\tif v, ok := d.GetOk(\"qualifier\"); ok {\n\t\tinput.Qualifier = aws.String(v.(string))\n\t}\n\n\tlog.Printf(\"[DEBUG] Looking for Lambda permission: %s\", input)\n\tvar out *lambda.GetPolicyOutput\n\tvar statement *LambdaPolicyStatement\n\terr := resource.Retry(1*time.Minute, func() *resource.RetryError {\n\t\t\/\/ IAM is eventually cosistent :\/\n\t\tvar err error\n\t\tout, err = conn.GetPolicy(&input)\n\t\tif err != nil {\n\t\t\tif awsErr, ok := err.(awserr.Error); ok {\n\t\t\t\tif awsErr.Code() == \"ResourceNotFoundException\" {\n\t\t\t\t\treturn resource.RetryableError(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn resource.NonRetryableError(err)\n\t\t}\n\n\t\tpolicyInBytes := []byte(*out.Policy)\n\t\tpolicy := LambdaPolicy{}\n\t\terr = json.Unmarshal(policyInBytes, &policy)\n\t\tif err != nil {\n\t\t\treturn resource.NonRetryableError(err)\n\t\t}\n\n\t\tstatement, err = findLambdaPolicyStatementById(&policy, d.Id())\n\t\treturn resource.RetryableError(err)\n\t})\n\n\tif err != nil {\n\t\t\/\/ Missing whole policy or Lambda function (API error)\n\t\tif awsErr, ok := err.(awserr.Error); ok {\n\t\t\tif awsErr.Code() == \"ResourceNotFoundException\" {\n\t\t\t\tlog.Printf(\"[WARN] No Lambda Permission Policy found: %v\", input)\n\t\t\t\td.SetId(\"\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Missing permission inside valid policy\n\t\tif nfErr, ok := err.(*resource.NotFoundError); ok {\n\t\t\tlog.Printf(\"[WARN] %s\", nfErr)\n\t\t\td.SetId(\"\")\n\t\t\treturn nil\n\t\t}\n\n\t\treturn err\n\t}\n\n\tqualifier, err := getQualifierFromLambdaAliasOrVersionArn(statement.Resource)\n\tif err == nil {\n\t\td.Set(\"qualifier\", qualifier)\n\t}\n\n\t\/\/ Save Lambda function name in the same format\n\tif strings.HasPrefix(d.Get(\"function_name\").(string), \"arn:\"+meta.(*AWSClient).partition+\":lambda:\") {\n\t\t\/\/ Strip qualifier off\n\t\ttrimmedArn := strings.TrimSuffix(statement.Resource, \":\"+qualifier)\n\t\td.Set(\"function_name\", trimmedArn)\n\t} else {\n\t\tfunctionName, err := getFunctionNameFromLambdaArn(statement.Resource)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.Set(\"function_name\", functionName)\n\t}\n\n\td.Set(\"action\", statement.Action)\n\td.Set(\"principal\", statement.Principal[\"Service\"])\n\n\tif stringEquals, ok := statement.Condition[\"StringEquals\"]; ok {\n\t\td.Set(\"source_account\", stringEquals[\"AWS:SourceAccount\"])\n\t}\n\n\tif arnLike, ok := statement.Condition[\"ArnLike\"]; ok {\n\t\td.Set(\"source_arn\", arnLike[\"AWS:SourceArn\"])\n\t}\n\n\treturn nil\n}\n\nfunc resourceAwsLambdaPermissionDelete(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).lambdaconn\n\n\tfunctionName := d.Get(\"function_name\").(string)\n\n\t\/\/ There is a bug in the API (reported and acknowledged by AWS)\n\t\/\/ which causes some permissions to be ignored when API calls are sent in parallel\n\t\/\/ We work around this bug via mutex\n\tawsMutexKV.Lock(functionName)\n\tdefer awsMutexKV.Unlock(functionName)\n\n\tinput := lambda.RemovePermissionInput{\n\t\tFunctionName: aws.String(functionName),\n\t\tStatementId: aws.String(d.Id()),\n\t}\n\n\tif v, ok := d.GetOk(\"qualifier\"); ok {\n\t\tinput.Qualifier = aws.String(v.(string))\n\t}\n\n\tlog.Printf(\"[DEBUG] Removing Lambda permission: %s\", input)\n\t_, err := conn.RemovePermission(&input)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = resource.Retry(5*time.Minute, func() *resource.RetryError {\n\t\tlog.Printf(\"[DEBUG] Checking if Lambda permission %q is deleted\", d.Id())\n\n\t\tparams := &lambda.GetPolicyInput{\n\t\t\tFunctionName: aws.String(d.Get(\"function_name\").(string)),\n\t\t}\n\t\tif v, ok := d.GetOk(\"qualifier\"); ok {\n\t\t\tparams.Qualifier = aws.String(v.(string))\n\t\t}\n\n\t\tlog.Printf(\"[DEBUG] Looking for Lambda permission: %s\", *params)\n\t\tresp, err := conn.GetPolicy(params)\n\t\tif err != nil {\n\t\t\tif awsErr, ok := err.(awserr.Error); ok {\n\t\t\t\tif awsErr.Code() == \"ResourceNotFoundException\" {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn resource.NonRetryableError(err)\n\t\t}\n\n\t\tif resp.Policy == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tpolicyInBytes := []byte(*resp.Policy)\n\t\tpolicy := LambdaPolicy{}\n\t\terr = json.Unmarshal(policyInBytes, &policy)\n\t\tif err != nil {\n\t\t\treturn resource.RetryableError(\n\t\t\t\tfmt.Errorf(\"Error unmarshalling Lambda policy: %s\", err))\n\t\t}\n\n\t\t_, err = findLambdaPolicyStatementById(&policy, d.Id())\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tlog.Printf(\"[DEBUG] No error when checking if Lambda permission %s is deleted\", d.Id())\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed removing Lambda permission: %s\", err)\n\t}\n\n\tlog.Printf(\"[DEBUG] Lambda permission with ID %q removed\", d.Id())\n\td.SetId(\"\")\n\n\treturn nil\n}\n\nfunc findLambdaPolicyStatementById(policy *LambdaPolicy, id string) (\n\t*LambdaPolicyStatement, error) {\n\n\tlog.Printf(\"[DEBUG] Received %d statements in Lambda policy: %s\", len(policy.Statement), policy.Statement)\n\tfor _, statement := range policy.Statement {\n\t\tif statement.Sid == id {\n\t\t\treturn &statement, nil\n\t\t}\n\t}\n\n\treturn nil, &resource.NotFoundError{\n\t\tLastRequest: id,\n\t\tLastResponse: policy,\n\t\tMessage: fmt.Sprintf(\"Failed to find statement %q in Lambda policy:\\n%s\", id, policy.Statement),\n\t}\n}\n\nfunc getQualifierFromLambdaAliasOrVersionArn(arn string) (string, error) {\n\tmatches := regexp.MustCompile(LambdaFunctionRegexp).FindStringSubmatch(arn)\n\tif len(matches) < 8 || matches[7] == \"\" {\n\t\treturn \"\", fmt.Errorf(\"Invalid ARN or otherwise unable to get qualifier from ARN (%q)\",\n\t\t\tarn)\n\t}\n\n\treturn matches[7], nil\n}\n\nfunc getFunctionNameFromLambdaArn(arn string) (string, error) {\n\tmatches := regexp.MustCompile(LambdaFunctionRegexp).FindStringSubmatch(arn)\n\tif len(matches) < 6 || matches[5] == \"\" {\n\t\treturn \"\", fmt.Errorf(\"Invalid ARN or otherwise unable to get qualifier from ARN (%q)\",\n\t\t\tarn)\n\t}\n\treturn matches[5], nil\n}\n\ntype LambdaPolicy struct {\n\tVersion string\n\tStatement []LambdaPolicyStatement\n\tId string\n}\n\ntype LambdaPolicyStatement struct {\n\tCondition map[string]map[string]string\n\tAction string\n\tResource string\n\tEffect string\n\tPrincipal map[string]string\n\tSid string\n}\n<commit_msg>provider\/aws: Set the qualifier to an empty string if the parsing fails, to attempt to detect drift<commit_after>package aws\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/lambda\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n)\n\nvar LambdaFunctionRegexp = `^(arn:[\\w-]+:lambda:)?([a-z]{2}-[a-z]+-\\d{1}:)?(\\d{12}:)?(function:)?([a-zA-Z0-9-_]+)(:(\\$LATEST|[a-zA-Z0-9-_]+))?$`\n\nfunc resourceAwsLambdaPermission() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsLambdaPermissionCreate,\n\t\tRead: resourceAwsLambdaPermissionRead,\n\t\tDelete: resourceAwsLambdaPermissionDelete,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"action\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validateLambdaPermissionAction,\n\t\t\t},\n\t\t\t\"function_name\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validateLambdaFunctionName,\n\t\t\t},\n\t\t\t\"principal\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"qualifier\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validateLambdaQualifier,\n\t\t\t},\n\t\t\t\"source_account\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validateAwsAccountId,\n\t\t\t},\n\t\t\t\"source_arn\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validateArn,\n\t\t\t},\n\t\t\t\"statement_id\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validatePolicyStatementId,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceAwsLambdaPermissionCreate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).lambdaconn\n\n\tfunctionName := d.Get(\"function_name\").(string)\n\n\t\/\/ There is a bug in the API (reported and acknowledged by AWS)\n\t\/\/ which causes some permissions to be ignored when API calls are sent in parallel\n\t\/\/ We work around this bug via mutex\n\tawsMutexKV.Lock(functionName)\n\tdefer awsMutexKV.Unlock(functionName)\n\n\tinput := lambda.AddPermissionInput{\n\t\tAction: aws.String(d.Get(\"action\").(string)),\n\t\tFunctionName: aws.String(functionName),\n\t\tPrincipal: aws.String(d.Get(\"principal\").(string)),\n\t\tStatementId: aws.String(d.Get(\"statement_id\").(string)),\n\t}\n\n\tif v, ok := d.GetOk(\"qualifier\"); ok {\n\t\tinput.Qualifier = aws.String(v.(string))\n\t}\n\tif v, ok := d.GetOk(\"source_account\"); ok {\n\t\tinput.SourceAccount = aws.String(v.(string))\n\t}\n\tif v, ok := d.GetOk(\"source_arn\"); ok {\n\t\tinput.SourceArn = aws.String(v.(string))\n\t}\n\n\tlog.Printf(\"[DEBUG] Adding new Lambda permission: %s\", input)\n\tvar out *lambda.AddPermissionOutput\n\terr := resource.Retry(1*time.Minute, func() *resource.RetryError {\n\t\tvar err error\n\t\tout, err = conn.AddPermission(&input)\n\n\t\tif err != nil {\n\t\t\tif awsErr, ok := err.(awserr.Error); ok {\n\t\t\t\t\/\/ IAM is eventually consistent :\/\n\t\t\t\tif awsErr.Code() == \"ResourceConflictException\" {\n\t\t\t\t\treturn resource.RetryableError(\n\t\t\t\t\t\tfmt.Errorf(\"[WARN] Error adding new Lambda Permission for %s, retrying: %s\",\n\t\t\t\t\t\t\t*input.FunctionName, err))\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn resource.NonRetryableError(err)\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif out != nil && out.Statement != nil {\n\t\tlog.Printf(\"[DEBUG] Created new Lambda permission: %s\", *out.Statement)\n\t} else {\n\t\tlog.Printf(\"[DEBUG] Created new Lambda permission, but no Statement was included\")\n\t}\n\n\td.SetId(d.Get(\"statement_id\").(string))\n\n\terr = resource.Retry(5*time.Minute, func() *resource.RetryError {\n\t\t\/\/ IAM is eventually cosistent :\/\n\t\terr := resourceAwsLambdaPermissionRead(d, meta)\n\t\tif err != nil {\n\t\t\tif strings.HasPrefix(err.Error(), \"Error reading Lambda policy: ResourceNotFoundException\") {\n\t\t\t\treturn resource.RetryableError(\n\t\t\t\t\tfmt.Errorf(\"[WARN] Error reading newly created Lambda Permission for %s, retrying: %s\",\n\t\t\t\t\t\t*input.FunctionName, err))\n\t\t\t}\n\t\t\tif strings.HasPrefix(err.Error(), \"Failed to find statement \\\"\"+d.Id()) {\n\t\t\t\treturn resource.RetryableError(\n\t\t\t\t\tfmt.Errorf(\"[WARN] Error reading newly created Lambda Permission statement for %s, retrying: %s\",\n\t\t\t\t\t\t*input.FunctionName, err))\n\t\t\t}\n\n\t\t\tlog.Printf(\"[ERROR] An actual error occurred when expecting Lambda policy to be there: %s\", err)\n\t\t\treturn resource.NonRetryableError(err)\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn err\n}\n\nfunc resourceAwsLambdaPermissionRead(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).lambdaconn\n\n\tinput := lambda.GetPolicyInput{\n\t\tFunctionName: aws.String(d.Get(\"function_name\").(string)),\n\t}\n\tif v, ok := d.GetOk(\"qualifier\"); ok {\n\t\tinput.Qualifier = aws.String(v.(string))\n\t}\n\n\tlog.Printf(\"[DEBUG] Looking for Lambda permission: %s\", input)\n\tvar out *lambda.GetPolicyOutput\n\tvar statement *LambdaPolicyStatement\n\terr := resource.Retry(1*time.Minute, func() *resource.RetryError {\n\t\t\/\/ IAM is eventually cosistent :\/\n\t\tvar err error\n\t\tout, err = conn.GetPolicy(&input)\n\t\tif err != nil {\n\t\t\tif awsErr, ok := err.(awserr.Error); ok {\n\t\t\t\tif awsErr.Code() == \"ResourceNotFoundException\" {\n\t\t\t\t\treturn resource.RetryableError(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn resource.NonRetryableError(err)\n\t\t}\n\n\t\tpolicyInBytes := []byte(*out.Policy)\n\t\tpolicy := LambdaPolicy{}\n\t\terr = json.Unmarshal(policyInBytes, &policy)\n\t\tif err != nil {\n\t\t\treturn resource.NonRetryableError(err)\n\t\t}\n\n\t\tstatement, err = findLambdaPolicyStatementById(&policy, d.Id())\n\t\treturn resource.RetryableError(err)\n\t})\n\n\tif err != nil {\n\t\t\/\/ Missing whole policy or Lambda function (API error)\n\t\tif awsErr, ok := err.(awserr.Error); ok {\n\t\t\tif awsErr.Code() == \"ResourceNotFoundException\" {\n\t\t\t\tlog.Printf(\"[WARN] No Lambda Permission Policy found: %v\", input)\n\t\t\t\td.SetId(\"\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Missing permission inside valid policy\n\t\tif nfErr, ok := err.(*resource.NotFoundError); ok {\n\t\t\tlog.Printf(\"[WARN] %s\", nfErr)\n\t\t\td.SetId(\"\")\n\t\t\treturn nil\n\t\t}\n\n\t\treturn err\n\t}\n\n\tqualifier, err := getQualifierFromLambdaAliasOrVersionArn(statement.Resource)\n\tif err != nil {\n\t\tlog.Printf(\"[ERR] Error getting Lambda Qualifier: %s\", err)\n\t}\n\td.Set(\"qualifier\", qualifier)\n\n\t\/\/ Save Lambda function name in the same format\n\tif strings.HasPrefix(d.Get(\"function_name\").(string), \"arn:\"+meta.(*AWSClient).partition+\":lambda:\") {\n\t\t\/\/ Strip qualifier off\n\t\ttrimmedArn := strings.TrimSuffix(statement.Resource, \":\"+qualifier)\n\t\td.Set(\"function_name\", trimmedArn)\n\t} else {\n\t\tfunctionName, err := getFunctionNameFromLambdaArn(statement.Resource)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\td.Set(\"function_name\", functionName)\n\t}\n\n\td.Set(\"action\", statement.Action)\n\td.Set(\"principal\", statement.Principal[\"Service\"])\n\n\tif stringEquals, ok := statement.Condition[\"StringEquals\"]; ok {\n\t\td.Set(\"source_account\", stringEquals[\"AWS:SourceAccount\"])\n\t}\n\n\tif arnLike, ok := statement.Condition[\"ArnLike\"]; ok {\n\t\td.Set(\"source_arn\", arnLike[\"AWS:SourceArn\"])\n\t}\n\n\treturn nil\n}\n\nfunc resourceAwsLambdaPermissionDelete(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).lambdaconn\n\n\tfunctionName := d.Get(\"function_name\").(string)\n\n\t\/\/ There is a bug in the API (reported and acknowledged by AWS)\n\t\/\/ which causes some permissions to be ignored when API calls are sent in parallel\n\t\/\/ We work around this bug via mutex\n\tawsMutexKV.Lock(functionName)\n\tdefer awsMutexKV.Unlock(functionName)\n\n\tinput := lambda.RemovePermissionInput{\n\t\tFunctionName: aws.String(functionName),\n\t\tStatementId: aws.String(d.Id()),\n\t}\n\n\tif v, ok := d.GetOk(\"qualifier\"); ok {\n\t\tinput.Qualifier = aws.String(v.(string))\n\t}\n\n\tlog.Printf(\"[DEBUG] Removing Lambda permission: %s\", input)\n\t_, err := conn.RemovePermission(&input)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = resource.Retry(5*time.Minute, func() *resource.RetryError {\n\t\tlog.Printf(\"[DEBUG] Checking if Lambda permission %q is deleted\", d.Id())\n\n\t\tparams := &lambda.GetPolicyInput{\n\t\t\tFunctionName: aws.String(d.Get(\"function_name\").(string)),\n\t\t}\n\t\tif v, ok := d.GetOk(\"qualifier\"); ok {\n\t\t\tparams.Qualifier = aws.String(v.(string))\n\t\t}\n\n\t\tlog.Printf(\"[DEBUG] Looking for Lambda permission: %s\", *params)\n\t\tresp, err := conn.GetPolicy(params)\n\t\tif err != nil {\n\t\t\tif awsErr, ok := err.(awserr.Error); ok {\n\t\t\t\tif awsErr.Code() == \"ResourceNotFoundException\" {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn resource.NonRetryableError(err)\n\t\t}\n\n\t\tif resp.Policy == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tpolicyInBytes := []byte(*resp.Policy)\n\t\tpolicy := LambdaPolicy{}\n\t\terr = json.Unmarshal(policyInBytes, &policy)\n\t\tif err != nil {\n\t\t\treturn resource.RetryableError(\n\t\t\t\tfmt.Errorf(\"Error unmarshalling Lambda policy: %s\", err))\n\t\t}\n\n\t\t_, err = findLambdaPolicyStatementById(&policy, d.Id())\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tlog.Printf(\"[DEBUG] No error when checking if Lambda permission %s is deleted\", d.Id())\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed removing Lambda permission: %s\", err)\n\t}\n\n\tlog.Printf(\"[DEBUG] Lambda permission with ID %q removed\", d.Id())\n\td.SetId(\"\")\n\n\treturn nil\n}\n\nfunc findLambdaPolicyStatementById(policy *LambdaPolicy, id string) (\n\t*LambdaPolicyStatement, error) {\n\n\tlog.Printf(\"[DEBUG] Received %d statements in Lambda policy: %s\", len(policy.Statement), policy.Statement)\n\tfor _, statement := range policy.Statement {\n\t\tif statement.Sid == id {\n\t\t\treturn &statement, nil\n\t\t}\n\t}\n\n\treturn nil, &resource.NotFoundError{\n\t\tLastRequest: id,\n\t\tLastResponse: policy,\n\t\tMessage: fmt.Sprintf(\"Failed to find statement %q in Lambda policy:\\n%s\", id, policy.Statement),\n\t}\n}\n\nfunc getQualifierFromLambdaAliasOrVersionArn(arn string) (string, error) {\n\tmatches := regexp.MustCompile(LambdaFunctionRegexp).FindStringSubmatch(arn)\n\tif len(matches) < 8 || matches[7] == \"\" {\n\t\treturn \"\", fmt.Errorf(\"Invalid ARN or otherwise unable to get qualifier from ARN (%q)\",\n\t\t\tarn)\n\t}\n\n\treturn matches[7], nil\n}\n\nfunc getFunctionNameFromLambdaArn(arn string) (string, error) {\n\tmatches := regexp.MustCompile(LambdaFunctionRegexp).FindStringSubmatch(arn)\n\tif len(matches) < 6 || matches[5] == \"\" {\n\t\treturn \"\", fmt.Errorf(\"Invalid ARN or otherwise unable to get qualifier from ARN (%q)\",\n\t\t\tarn)\n\t}\n\treturn matches[5], nil\n}\n\ntype LambdaPolicy struct {\n\tVersion string\n\tStatement []LambdaPolicyStatement\n\tId string\n}\n\ntype LambdaPolicyStatement struct {\n\tCondition map[string]map[string]string\n\tAction string\n\tResource string\n\tEffect string\n\tPrincipal map[string]string\n\tSid string\n}\n<|endoftext|>"} {"text":"<commit_before>package mqplan\n\nimport (\n\t\"fmt\"\n\t\"meqa\/mqswag\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/go-openapi\/spec\"\n)\n\nfunc CreateTestFromOp(opNode *mqswag.DAGNode, testId int) *Test {\n\top := opNode.Data.((*spec.Operation))\n\tt := &Test{}\n\tt.Name = fmt.Sprintf(\"%s_%d\", op.ID, testId)\n\tt.Path = opNode.GetName()\n\tt.Method = opNode.GetMethod()\n\n\treturn t\n}\n\nfunc OperationIsDelete(node *mqswag.DAGNode) bool {\n\top, ok := node.Data.(*spec.Operation)\n\tif ok && op != nil {\n\t\ttag := mqswag.GetMeqaTag(op.Description)\n\t\tif (tag != nil && tag.Operation == mqswag.MethodDelete) || (tag == nil && node.GetMethod() == mqswag.MethodDelete) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ GenerateTestsForObject for the obj that we traversed to from create. Add the test cases\n\/\/ generated to plan.\nfunc GenerateTestsForObject(create *mqswag.DAGNode, obj *mqswag.DAGNode, plan *TestPlan) error {\n\tif obj.GetType() != mqswag.TypeDef {\n\t\treturn nil\n\t}\n\tif create.GetType() != mqswag.TypeOp {\n\t\treturn nil\n\t}\n\tcreatePath := create.GetName()\n\tobjName := obj.GetName()\n\n\t\/\/ A loop where we go through all the child operations\n\ttestId := 1\n\ttestCase := CreateTestCase(fmt.Sprintf(\"%s -- %s -- all\", createPath, objName), nil, plan)\n\ttestCase.Tests = append(testCase.Tests, CreateTestFromOp(create, testId))\n\tfor _, child := range obj.Children {\n\t\tif child.GetType() != mqswag.TypeOp {\n\t\t\tcontinue\n\t\t}\n\t\ttestId++\n\t\ttestCase.Tests = append(testCase.Tests, CreateTestFromOp(child, testId))\n\t\tif OperationIsDelete(child) {\n\t\t\ttestId++\n\t\t\ttestCase.Tests = append(testCase.Tests, CreateTestFromOp(create, testId))\n\t\t}\n\t}\n\tif len(testCase.Tests) > 0 {\n\t\tplan.Add(testCase)\n\t}\n\n\t\/\/ a loop where we pick random operations and pair it with the create operation.\n\t\/\/ This would generate a few objects.\n\t\/* disable random stuff during development\n\ttestId = 0\n\ttestCase = &TestCase{nil, fmt.Sprintf(\"%s -- %s -- random\", createPath, objName)}\n\tfor i := 0; i < 2*len(obj.Children); i++ {\n\t\tj := rand.Intn(len(obj.Children))\n\t\tchild := obj.Children[j]\n\t\tif child.GetType() != mqswag.TypeOp {\n\t\t\tmqutil.Logger.Printf(\"unexpected: (%s) has a child (%s) that's not an operation\", obj.Name, child.Name)\n\t\t\tcontinue\n\t\t}\n\t\ttestId++\n\t\ttestCase.Tests = append(testCase.Tests, CreateTestFromOp(create, testId))\n\t\ttestId++\n\t\ttestCase.Tests = append(testCase.Tests, CreateTestFromOp(child, testId))\n\t}\n\tif len(testCase.Tests) > 0 {\n\t\tplan.Add(testCase)\n\t}\n\t*\/\n\n\treturn nil\n}\n\nfunc GenerateTestPlan(swagger *mqswag.Swagger, dag *mqswag.DAG) (*TestPlan, error) {\n\ttestPlan := &TestPlan{}\n\ttestPlan.Init(swagger, nil)\n\n\tgenFunc := func(previous *mqswag.DAGNode, current *mqswag.DAGNode) error {\n\t\tif current.GetType() != mqswag.TypeOp {\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ Exercise the function by itself.\n\t\ttestCase := CreateTestCase(current.GetName()+\" \"+current.GetMethod(), nil, testPlan)\n\t\ttestCase.Tests = append(testCase.Tests, CreateTestFromOp(current, 1))\n\t\ttestPlan.Add(testCase)\n\n\t\t\/\/ When iterating by weight previous is always nil.\n\t\tfor _, c := range current.Children {\n\t\t\terr := GenerateTestsForObject(current, c, testPlan)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\terr := dag.IterateByWeight(genFunc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn testPlan, nil\n}\n\n\/\/ All the operations have the same path. We generate one test case, with the\n\/\/ tests of ascending weight and priority among the operations\nfunc GeneratePathTestCase(operations mqswag.NodeList, plan *TestPlan) {\n\tif len(operations) == 0 {\n\t\treturn\n\t}\n\n\tpathName := operations[0].GetName()\n\tsort.Sort(operations)\n\ttestId := 0\n\ttestCase := CreateTestCase(fmt.Sprintf(\"%s\", pathName), nil, plan)\n\tfor _, o := range operations {\n\t\ttestId++\n\t\ttestCase.Tests = append(testCase.Tests, CreateTestFromOp(o, testId))\n\t}\n\tif len(testCase.Tests) > 0 {\n\t\tplan.Add(testCase)\n\t}\n}\n\ntype PathWeight struct {\n\tpath string\n\tweight int\n}\n\ntype PathWeightList []PathWeight\n\nfunc (n PathWeightList) Len() int {\n\treturn len(n)\n}\n\nfunc (n PathWeightList) Swap(i, j int) {\n\tn[i], n[j] = n[j], n[i]\n}\n\nfunc (n PathWeightList) Less(i, j int) bool {\n\treturn n[i].weight < n[j].weight || (n[i].weight == n[j].weight && n[i].path < n[j].path)\n}\n\n\/\/ Go through all the paths in swagger, and generate the tests for all the operations under\n\/\/ the path.\nfunc GeneratePathTestPlan(swagger *mqswag.Swagger, dag *mqswag.DAG) (*TestPlan, error) {\n\ttestPlan := &TestPlan{}\n\ttestPlan.Init(swagger, nil)\n\n\tpathMap := make(map[string]mqswag.NodeList)\n\tpathWeight := make(map[string]int)\n\n\taddFunc := func(previous *mqswag.DAGNode, current *mqswag.DAGNode) error {\n\t\tif current.GetType() != mqswag.TypeOp {\n\t\t\treturn nil\n\t\t}\n\t\tname := current.GetName()\n\n\t\t\/\/ if the last path element is a {..} path param we remove it. Also remove the ending \"\/\"\n\t\t\/\/ because it has no effect.\n\t\tnameArray := strings.Split(name, \"\/\")\n\t\tif len(nameArray) > 0 && len(nameArray[len(nameArray)-1]) == 0 {\n\t\t\tnameArray = nameArray[:len(nameArray)-1]\n\t\t}\n\t\tif len(nameArray) > 0 {\n\t\t\tif last := nameArray[len(nameArray)-1]; len(last) > 0 && last[0] == '{' && last[len(last)-1] == '}' {\n\t\t\t\tnameArray = nameArray[:len(nameArray)-1]\n\t\t\t}\n\t\t}\n\t\tname = strings.Join(nameArray, \"\/\")\n\n\t\tpathMap[name] = append(pathMap[name], current)\n\n\t\tcurrentWeight := current.Weight*mqswag.DAGDepth + current.Priority\n\t\tif pathWeight[name] <= currentWeight {\n\t\t\tpathWeight[name] = currentWeight\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tdag.IterateByWeight(addFunc)\n\n\tvar pathWeightList PathWeightList\n\t\/\/ Sort the path by weight\n\tfor k, v := range pathWeight {\n\t\tp := PathWeight{k, v}\n\t\tpathWeightList = append(pathWeightList, p)\n\t}\n\tsort.Sort(pathWeightList)\n\n\tfor _, p := range pathWeightList {\n\t\tGeneratePathTestCase(pathMap[p.path], testPlan)\n\t}\n\treturn testPlan, nil\n}\n<commit_msg>Minor change to the test name.<commit_after>package mqplan\n\nimport (\n\t\"fmt\"\n\t\"meqa\/mqswag\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/go-openapi\/spec\"\n)\n\n\/\/ Given a path name, retrieve the last entry that is not a path param.\nfunc GetLastPathElement(name string) string {\n\tnameArray := strings.Split(name, \"\/\")\n\tfor i := len(nameArray) - 1; i >= 0; i-- {\n\t\tif len(nameArray[i]) > 0 && nameArray[i][0] != '{' {\n\t\t\treturn nameArray[i]\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc CreateTestFromOp(opNode *mqswag.DAGNode, testId int) *Test {\n\top := opNode.Data.((*spec.Operation))\n\tt := &Test{}\n\tt.Path = opNode.GetName()\n\tt.Method = opNode.GetMethod()\n\topId := op.ID\n\tif len(opId) == 0 {\n\t\topId = GetLastPathElement(t.Path)\n\t}\n\tt.Name = fmt.Sprintf(\"%s_%s_%d\", t.Method, opId, testId)\n\n\treturn t\n}\n\nfunc OperationIsDelete(node *mqswag.DAGNode) bool {\n\top, ok := node.Data.(*spec.Operation)\n\tif ok && op != nil {\n\t\ttag := mqswag.GetMeqaTag(op.Description)\n\t\tif (tag != nil && tag.Operation == mqswag.MethodDelete) || (tag == nil && node.GetMethod() == mqswag.MethodDelete) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ GenerateTestsForObject for the obj that we traversed to from create. Add the test cases\n\/\/ generated to plan.\nfunc GenerateTestsForObject(create *mqswag.DAGNode, obj *mqswag.DAGNode, plan *TestPlan) error {\n\tif obj.GetType() != mqswag.TypeDef {\n\t\treturn nil\n\t}\n\tif create.GetType() != mqswag.TypeOp {\n\t\treturn nil\n\t}\n\tcreatePath := create.GetName()\n\tobjName := obj.GetName()\n\n\t\/\/ A loop where we go through all the child operations\n\ttestId := 1\n\ttestCase := CreateTestCase(fmt.Sprintf(\"%s -- %s -- all\", createPath, objName), nil, plan)\n\ttestCase.Tests = append(testCase.Tests, CreateTestFromOp(create, testId))\n\tfor _, child := range obj.Children {\n\t\tif child.GetType() != mqswag.TypeOp {\n\t\t\tcontinue\n\t\t}\n\t\ttestId++\n\t\ttestCase.Tests = append(testCase.Tests, CreateTestFromOp(child, testId))\n\t\tif OperationIsDelete(child) {\n\t\t\ttestId++\n\t\t\ttestCase.Tests = append(testCase.Tests, CreateTestFromOp(create, testId))\n\t\t}\n\t}\n\tif len(testCase.Tests) > 0 {\n\t\tplan.Add(testCase)\n\t}\n\n\t\/\/ a loop where we pick random operations and pair it with the create operation.\n\t\/\/ This would generate a few objects.\n\t\/* disable random stuff during development\n\ttestId = 0\n\ttestCase = &TestCase{nil, fmt.Sprintf(\"%s -- %s -- random\", createPath, objName)}\n\tfor i := 0; i < 2*len(obj.Children); i++ {\n\t\tj := rand.Intn(len(obj.Children))\n\t\tchild := obj.Children[j]\n\t\tif child.GetType() != mqswag.TypeOp {\n\t\t\tmqutil.Logger.Printf(\"unexpected: (%s) has a child (%s) that's not an operation\", obj.Name, child.Name)\n\t\t\tcontinue\n\t\t}\n\t\ttestId++\n\t\ttestCase.Tests = append(testCase.Tests, CreateTestFromOp(create, testId))\n\t\ttestId++\n\t\ttestCase.Tests = append(testCase.Tests, CreateTestFromOp(child, testId))\n\t}\n\tif len(testCase.Tests) > 0 {\n\t\tplan.Add(testCase)\n\t}\n\t*\/\n\n\treturn nil\n}\n\nfunc GenerateTestPlan(swagger *mqswag.Swagger, dag *mqswag.DAG) (*TestPlan, error) {\n\ttestPlan := &TestPlan{}\n\ttestPlan.Init(swagger, nil)\n\n\tgenFunc := func(previous *mqswag.DAGNode, current *mqswag.DAGNode) error {\n\t\tif current.GetType() != mqswag.TypeOp {\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ Exercise the function by itself.\n\t\ttestCase := CreateTestCase(current.GetName()+\" \"+current.GetMethod(), nil, testPlan)\n\t\ttestCase.Tests = append(testCase.Tests, CreateTestFromOp(current, 1))\n\t\ttestPlan.Add(testCase)\n\n\t\t\/\/ When iterating by weight previous is always nil.\n\t\tfor _, c := range current.Children {\n\t\t\terr := GenerateTestsForObject(current, c, testPlan)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\terr := dag.IterateByWeight(genFunc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn testPlan, nil\n}\n\n\/\/ All the operations have the same path. We generate one test case, with the\n\/\/ tests of ascending weight and priority among the operations\nfunc GeneratePathTestCase(operations mqswag.NodeList, plan *TestPlan) {\n\tif len(operations) == 0 {\n\t\treturn\n\t}\n\n\tpathName := operations[0].GetName()\n\tsort.Sort(operations)\n\ttestId := 0\n\ttestCase := CreateTestCase(fmt.Sprintf(\"%s\", pathName), nil, plan)\n\tfor _, o := range operations {\n\t\ttestId++\n\t\ttestCase.Tests = append(testCase.Tests, CreateTestFromOp(o, testId))\n\t}\n\tif len(testCase.Tests) > 0 {\n\t\tplan.Add(testCase)\n\t}\n}\n\ntype PathWeight struct {\n\tpath string\n\tweight int\n}\n\ntype PathWeightList []PathWeight\n\nfunc (n PathWeightList) Len() int {\n\treturn len(n)\n}\n\nfunc (n PathWeightList) Swap(i, j int) {\n\tn[i], n[j] = n[j], n[i]\n}\n\nfunc (n PathWeightList) Less(i, j int) bool {\n\treturn n[i].weight < n[j].weight || (n[i].weight == n[j].weight && n[i].path < n[j].path)\n}\n\n\/\/ Go through all the paths in swagger, and generate the tests for all the operations under\n\/\/ the path.\nfunc GeneratePathTestPlan(swagger *mqswag.Swagger, dag *mqswag.DAG) (*TestPlan, error) {\n\ttestPlan := &TestPlan{}\n\ttestPlan.Init(swagger, nil)\n\n\tpathMap := make(map[string]mqswag.NodeList)\n\tpathWeight := make(map[string]int)\n\n\taddFunc := func(previous *mqswag.DAGNode, current *mqswag.DAGNode) error {\n\t\tif current.GetType() != mqswag.TypeOp {\n\t\t\treturn nil\n\t\t}\n\t\tname := current.GetName()\n\n\t\t\/\/ if the last path element is a {..} path param we remove it. Also remove the ending \"\/\"\n\t\t\/\/ because it has no effect.\n\t\tnameArray := strings.Split(name, \"\/\")\n\t\tif len(nameArray) > 0 && len(nameArray[len(nameArray)-1]) == 0 {\n\t\t\tnameArray = nameArray[:len(nameArray)-1]\n\t\t}\n\t\tif len(nameArray) > 0 {\n\t\t\tif last := nameArray[len(nameArray)-1]; len(last) > 0 && last[0] == '{' && last[len(last)-1] == '}' {\n\t\t\t\tnameArray = nameArray[:len(nameArray)-1]\n\t\t\t}\n\t\t}\n\t\tname = strings.Join(nameArray, \"\/\")\n\n\t\tpathMap[name] = append(pathMap[name], current)\n\n\t\tcurrentWeight := current.Weight*mqswag.DAGDepth + current.Priority\n\t\tif pathWeight[name] <= currentWeight {\n\t\t\tpathWeight[name] = currentWeight\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tdag.IterateByWeight(addFunc)\n\n\tvar pathWeightList PathWeightList\n\t\/\/ Sort the path by weight\n\tfor k, v := range pathWeight {\n\t\tp := PathWeight{k, v}\n\t\tpathWeightList = append(pathWeightList, p)\n\t}\n\tsort.Sort(pathWeightList)\n\n\tfor _, p := range pathWeightList {\n\t\tGeneratePathTestCase(pathMap[p.path], testPlan)\n\t}\n\treturn testPlan, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"github.com\/github\/git-lfs\/git\"\n\t\"github.com\/github\/git-lfs\/lfs\"\n\t\"github.com\/github\/git-lfs\/vendor\/_nuts\/github.com\/spf13\/cobra\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"sync\"\n)\n\nvar (\n\tcheckoutCmd = &cobra.Command{\n\t\tUse: \"checkout\",\n\t\tShort: \"Checks out LFS files into the working copy\",\n\t\tRun: checkoutCommand,\n\t}\n)\n\nfunc checkoutCommand(cmd *cobra.Command, args []string) {\n\tref, err := git.CurrentRef()\n\tif err != nil {\n\t\tPanic(err, \"Could not checkout\")\n\t}\n\n\tpointers, err := lfs.ScanRefs(ref, \"\", nil)\n\tif err != nil {\n\t\tPanic(err, \"Could not scan for Git LFS files\")\n\t}\n\n\tvar wait sync.WaitGroup\n\twait.Add(1)\n\n\tc := make(chan *lfs.WrappedPointer)\n\n\tcheckoutWithChan(c, &wait)\n\tfor _, pointer := range pointers {\n\t\tc <- pointer\n\t}\n\tclose(c)\n\twait.Wait()\n}\n\nfunc init() {\n\tRootCmd.AddCommand(checkoutCmd)\n}\n\n\/\/ Populate the working copy with the real content of objects where the file is\n\/\/ either missing, or contains a matching pointer placeholder, from a list of pointers.\n\/\/ If the file exists but has other content it is left alone\n\/\/ returns immediately but a goroutine listens on the in channel for objects\n\/\/ calls wait.Done() when the final item after the channel is closed is done\nfunc checkoutWithChan(in <-chan *lfs.WrappedPointer, wait *sync.WaitGroup) {\n\tgo func() {\n\t\t\/\/ Fire up the update-index command\n\t\tcmd := exec.Command(\"git\", \"update-index\", \"-q\", \"--refresh\", \"--stdin\")\n\t\tupdateIdxStdin, err := cmd.StdinPipe()\n\t\tif err != nil {\n\t\t\tPanic(err, \"Could not update the index\")\n\t\t}\n\n\t\tif err := cmd.Start(); err != nil {\n\t\t\tPanic(err, \"Could not update the index\")\n\t\t}\n\n\t\t\/\/ As files come in, write them to the wd and update the index\n\t\tfor pointer := range in {\n\n\t\t\t\/\/ Check the content - either missing or still this pointer (not exist is ok)\n\t\t\tfilepointer, err := lfs.DecodePointerFromFile(pointer.Name)\n\t\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\t\tif err == lfs.NotAPointerError {\n\t\t\t\t\t\/\/ File has non-pointer content, leave it alone\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tPanic(err, \"Problem accessing %v\", pointer.Name)\n\t\t\t}\n\t\t\tif filepointer != nil && filepointer.Oid != pointer.Oid {\n\t\t\t\t\/\/ User has probably manually reset a file to another commit\n\t\t\t\t\/\/ while leaving it a pointer; don't mess with this\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ OK now we can (over)write the file content\n\t\t\tfile, err := os.Create(pointer.Name)\n\t\t\tif err != nil {\n\t\t\t\tPanic(err, \"Could not create working directory file\")\n\t\t\t}\n\n\t\t\tif err := lfs.PointerSmudge(file, pointer.Pointer, pointer.Name, nil); err != nil {\n\t\t\t\tPanic(err, \"Could not write working directory file\")\n\t\t\t}\n\t\t\tfile.Close()\n\n\t\t\tupdateIdxStdin.Write([]byte(pointer.Name + \"\\n\"))\n\t\t}\n\n\t\tupdateIdxStdin.Close()\n\t\tif err := cmd.Wait(); err != nil {\n\t\t\tPanic(err, \"Error updating the git index\")\n\t\t}\n\t\twait.Done()\n\t}()\n\n}\n<commit_msg>Defer the wait.Done() for greater resilience<commit_after>package commands\n\nimport (\n\t\"github.com\/github\/git-lfs\/git\"\n\t\"github.com\/github\/git-lfs\/lfs\"\n\t\"github.com\/github\/git-lfs\/vendor\/_nuts\/github.com\/spf13\/cobra\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"sync\"\n)\n\nvar (\n\tcheckoutCmd = &cobra.Command{\n\t\tUse: \"checkout\",\n\t\tShort: \"Checks out LFS files into the working copy\",\n\t\tRun: checkoutCommand,\n\t}\n)\n\nfunc checkoutCommand(cmd *cobra.Command, args []string) {\n\tref, err := git.CurrentRef()\n\tif err != nil {\n\t\tPanic(err, \"Could not checkout\")\n\t}\n\n\tpointers, err := lfs.ScanRefs(ref, \"\", nil)\n\tif err != nil {\n\t\tPanic(err, \"Could not scan for Git LFS files\")\n\t}\n\n\tvar wait sync.WaitGroup\n\twait.Add(1)\n\n\tc := make(chan *lfs.WrappedPointer)\n\n\tcheckoutWithChan(c, &wait)\n\tfor _, pointer := range pointers {\n\t\tc <- pointer\n\t}\n\tclose(c)\n\twait.Wait()\n}\n\nfunc init() {\n\tRootCmd.AddCommand(checkoutCmd)\n}\n\n\/\/ Populate the working copy with the real content of objects where the file is\n\/\/ either missing, or contains a matching pointer placeholder, from a list of pointers.\n\/\/ If the file exists but has other content it is left alone\n\/\/ returns immediately but a goroutine listens on the in channel for objects\n\/\/ calls wait.Done() when the final item after the channel is closed is done\nfunc checkoutWithChan(in <-chan *lfs.WrappedPointer, wait *sync.WaitGroup) {\n\tgo func() {\n\t\tdefer wait.Done()\n\t\t\/\/ Fire up the update-index command\n\t\tcmd := exec.Command(\"git\", \"update-index\", \"-q\", \"--refresh\", \"--stdin\")\n\t\tupdateIdxStdin, err := cmd.StdinPipe()\n\t\tif err != nil {\n\t\t\tPanic(err, \"Could not update the index\")\n\t\t}\n\n\t\tif err := cmd.Start(); err != nil {\n\t\t\tPanic(err, \"Could not update the index\")\n\t\t}\n\n\t\t\/\/ As files come in, write them to the wd and update the index\n\t\tfor pointer := range in {\n\n\t\t\t\/\/ Check the content - either missing or still this pointer (not exist is ok)\n\t\t\tfilepointer, err := lfs.DecodePointerFromFile(pointer.Name)\n\t\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\t\tif err == lfs.NotAPointerError {\n\t\t\t\t\t\/\/ File has non-pointer content, leave it alone\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tPanic(err, \"Problem accessing %v\", pointer.Name)\n\t\t\t}\n\t\t\tif filepointer != nil && filepointer.Oid != pointer.Oid {\n\t\t\t\t\/\/ User has probably manually reset a file to another commit\n\t\t\t\t\/\/ while leaving it a pointer; don't mess with this\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ OK now we can (over)write the file content\n\t\t\tfile, err := os.Create(pointer.Name)\n\t\t\tif err != nil {\n\t\t\t\tPanic(err, \"Could not create working directory file\")\n\t\t\t}\n\n\t\t\tif err := lfs.PointerSmudge(file, pointer.Pointer, pointer.Name, nil); err != nil {\n\t\t\t\tPanic(err, \"Could not write working directory file\")\n\t\t\t}\n\t\t\tfile.Close()\n\n\t\t\tupdateIdxStdin.Write([]byte(pointer.Name + \"\\n\"))\n\t\t}\n\n\t\tupdateIdxStdin.Close()\n\t\tif err := cmd.Wait(); err != nil {\n\t\t\tPanic(err, \"Error updating the git index\")\n\t\t}\n\t}()\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ \thttps:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage safehttp\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n)\n\n\/\/ The HTTP request methods defined by RFC.\nconst (\n\tMethodConnect = \"CONNECT\" \/\/ RFC 7231, 4.3.6\n\tMethodDelete = \"DELETE\" \/\/ RFC 7231, 4.3.5\n\tMethodGet = \"GET\" \/\/ RFC 7231, 4.3.1\n\tMethodHead = \"HEAD\" \/\/ RFC 7231, 4.3.2\n\tMethodOptions = \"OPTIONS\" \/\/ RFC 7231, 4.3.7\n\tMethodPatch = \"PATCH\" \/\/ RFC 5789\n\tMethodPost = \"POST\" \/\/ RFC 7231, 4.3.3\n\tMethodPut = \"PUT\" \/\/ RFC 7231, 4.3.4\n\tMethodTrace = \"TRACE\" \/\/ RFC 7231, 4.3.8\n)\n\n\/\/ ServeMux is an HTTP request multiplexer. It matches the URL of each incoming\n\/\/ request against a list of registered patterns and calls the handler for\n\/\/ the pattern that most closely matches the URL.\n\/\/\n\/\/ Patterns names are fixed, rooted paths, like \"\/favicon.ico\", or rooted\n\/\/ subtrees like \"\/images\/\" (note the trailing slash). Longer patterns take\n\/\/ precedence over shorter ones, so that if there are handlers registered for\n\/\/ both \"\/images\/\" and \"\/images\/thumbnails\/\", the latter handler will be called\n\/\/ for paths beginning \"\/images\/thumbnails\/\" and the former will receive\n\/\/ requests for any other paths in the \"\/images\/\" subtree.\n\/\/\n\/\/ Note that since a pattern ending in a slash names a rooted subtree, the\n\/\/ pattern \"\/\" matches all paths not matched by other registered patterns,\n\/\/ not just the URL with Path == \"\/\".\n\/\/\n\/\/ If a subtree has been registered and a request is received naming the subtree\n\/\/ root without its trailing slash, ServeMux redirects that request to\n\/\/ the subtree root (adding the trailing slash). This behavior can be overridden\n\/\/ with a separate registration for the path without the trailing slash. For\n\/\/ example, registering \"\/images\/\" causes ServeMux to redirect a request for\n\/\/ \"\/images\" to \"\/images\/\", unless \"\/images\" has been registered separately.\n\/\/\n\/\/ Patterns may optionally begin with a host name, restricting matches to URLs\n\/\/ on that host only. Host-specific patterns take precedence over general\n\/\/ patterns, so that a handler might register for the two patterns \"\/codesearch\"\n\/\/ and \"codesearch.google.com\/\" without also taking over requests for\n\/\/ \"http:\/\/www.google.com\/\".\n\/\/\n\/\/ ServeMux also takes care of sanitizing the URL request path and the Host\n\/\/ header, stripping the port number and redirecting any request containing . or\n\/\/ .. elements or repeated slashes to an equivalent, cleaner URL.\n\/\/\n\/\/ Multiple handlers can be registered for a single pattern, as long as they\n\/\/ handle different HTTP methods.\ntype ServeMux struct {\n\tmux *http.ServeMux\n\thandlers map[string]*registeredHandler\n\n\tdispatcher Dispatcher\n\tinterceptors []Interceptor\n\tmethodNotAllowedHandler handlerRegistration\n}\n\n\/\/ ServeHTTP dispatches the request to the handler whose method matches the\n\/\/ incoming request and whose pattern most closely matches the request URL.\n\/\/\n\/\/ For each incoming request:\n\/\/ - [Before Phase] Interceptor.Before methods are called for every installed\n\/\/ interceptor, until an interceptor writes to a ResponseWriter (including\n\/\/ errors) or panics,\n\/\/ - the handler is called after a [Before Phase] if no writes or panics occured,\n\/\/ - the handler triggers the [Commit Phase] by writing to the ResponseWriter,\n\/\/ - [Commit Phase] Interceptor.Commit methods run for every interceptor whose\n\/\/ Before method was called,\n\/\/ - [Dispatcher Phase] after the [Commit Phase], the Dispatcher's appropriate\n\/\/ write method is called; the Dispatcher is responsible for determining whether\n\/\/ the response is indeed safe and writing it,\n\/\/ - if the handler attempts to write more than once, it is treated as an\n\/\/ unrecoverable error; the request processing ends abrubptly with a panic and\n\/\/ nothing else happens (note: this will change as soon as [After Phase] is\n\/\/ introduced)\n\/\/\n\/\/ Interceptors should NOT rely on the order they're run.\nfunc (m *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tm.mux.ServeHTTP(w, r)\n}\n\nfunc (m *ServeMux) Handle(pattern string, method string, h Handler, cfgs ...InterceptorConfig) {\n\tmethodNotAllowed := handlerConfig{\n\t\tDispatcher: m.dispatcher,\n\t\tHandler: m.methodNotAllowedHandler.handler,\n\t\tInterceptors: configureInterceptors(m.interceptors, m.methodNotAllowedHandler.cfgs),\n\t}\n\n\tif m.handlers[pattern] == nil {\n\t\tm.handlers[pattern] = ®isteredHandler{\n\t\t\tpattern: pattern,\n\t\t\tmethodNotAllowed: methodNotAllowed,\n\t\t\tmethods: make(map[string]handlerConfig),\n\t\t}\n\t\tm.mux.Handle(pattern, m.handlers[pattern])\n\t}\n\tm.handlers[pattern].handleMethod(method,\n\t\thandlerConfig{\n\t\t\tDispatcher: m.dispatcher,\n\t\t\tHandler: h,\n\t\t\tInterceptors: configureInterceptors(m.interceptors, cfgs),\n\t\t})\n}\n\n\/\/ ServeMuxConfig is a builder for ServeMux.\ntype ServeMuxConfig struct {\n\tdispatcher Dispatcher\n\thandlers []struct {\n\t\tpattern, method string\n\t\th Handler\n\t\tcfgs []InterceptorConfig\n\t}\n\tinterceptors []Interceptor\n\tmethodNotAllowedHandler handlerRegistration\n}\n\n\/\/ NewServeMuxConfig crates a ServeMuxConfig with the provided Dispatcher. If\n\/\/ the provided Dispatcher is nil, the DefaultDispatcher is used.\nfunc NewServeMuxConfig(disp Dispatcher) *ServeMuxConfig {\n\tif disp == nil {\n\t\tdisp = &DefaultDispatcher{}\n\t}\n\treturn &ServeMuxConfig{\n\t\tdispatcher: disp,\n\t\tmethodNotAllowedHandler: handlerRegistration{\n\t\t\thandler: HandlerFunc(defaultMethotNotAllowed),\n\t\t},\n\t}\n}\n\ntype handlerRegistration struct {\n\tpattern string\n\tmethod string\n\thandler Handler\n\tcfgs []InterceptorConfig\n}\n\n\/\/ Handle registers a handler for the given pattern and method. If a handler is\n\/\/ registered twice for the same pattern and method, Build will panic.\n\/\/\n\/\/ InterceptorConfigs can be passed in order to modify the behavior of the\n\/\/ interceptors on a registered handler. Passing an InterceptorConfig whose\n\/\/ corresponding Interceptor was not installed will produce no effect. If\n\/\/ multiple configurations are passed for the same Interceptor, Mux will panic.\nfunc (s *ServeMuxConfig) Handle(pattern string, method string, h Handler, cfgs ...InterceptorConfig) {\n\ts.handlers = append(s.handlers, struct {\n\t\tpattern string\n\t\tmethod string\n\t\th Handler\n\t\tcfgs []InterceptorConfig\n\t}{\n\t\tpattern: pattern,\n\t\tmethod: method,\n\t\th: h,\n\t\tcfgs: cfgs,\n\t})\n}\n\n\/\/ HandleMethodNotAllowed registers a handler that runs when a given method is\n\/\/ not allowed for a registered path.\nfunc (s *ServeMuxConfig) HandleMethodNotAllowed(h Handler, cfgs ...InterceptorConfig) {\n\ts.methodNotAllowedHandler = handlerRegistration{\n\t\thandler: h,\n\t\tcfgs: cfgs,\n\t}\n}\n\nfunc defaultMethotNotAllowed(w ResponseWriter, req *IncomingRequest) Result {\n\treturn w.WriteError(StatusMethodNotAllowed)\n}\n\n\/\/ Intercept installs the given interceptors.\n\/\/\n\/\/ Interceptors order is respected and interceptors are always run in the\n\/\/ order they've been installed.\n\/\/\n\/\/ Calling Intercept multiple times is valid. Interceptors that are added last\n\/\/ will run last.\nfunc (s *ServeMuxConfig) Intercept(is ...Interceptor) {\n\ts.interceptors = append(s.interceptors, is...)\n}\n\n\/\/ Mux returns the ServeMux with a copy of the current configuration.\nfunc (s *ServeMuxConfig) Mux() *ServeMux {\n\tfreezeLocalDev = true\n\tif IsLocalDev() {\n\t\tlog.Println(\"Warning: creating safehttp.Mux in dev mode. This configuration is not valid for production use\")\n\t}\n\n\tif s.dispatcher == nil {\n\t\tpanic(\"Use NewServeMuxConfig instead of creating ServeMuxConfig using a composite literal.\")\n\t}\n\n\tm := &ServeMux{\n\t\tmux: http.NewServeMux(),\n\t\thandlers: make(map[string]*registeredHandler),\n\t\tdispatcher: s.dispatcher,\n\t\tinterceptors: s.interceptors,\n\t\tmethodNotAllowedHandler: s.methodNotAllowedHandler,\n\t}\n\n\tfor _, handler := range s.handlers {\n\t\tm.Handle(handler.pattern, handler.method, handler.h, handler.cfgs...)\n\t}\n\treturn m\n}\n\n\/\/ Clone creates a copy of the current config.\n\/\/ This can be used to create several instances of Mux that share the same set of\n\/\/ plugins and some common handlers.\nfunc (s *ServeMuxConfig) Clone() *ServeMuxConfig {\n\tc := &ServeMuxConfig{\n\t\tdispatcher: s.dispatcher,\n\t\tinterceptors: make([]Interceptor, len(s.interceptors)),\n\t\tmethodNotAllowedHandler: s.methodNotAllowedHandler,\n\t}\n\tcopy(c.handlers, s.handlers)\n\tcopy(c.interceptors, s.interceptors)\n\treturn c\n}\n\ntype registeredHandler struct {\n\tpattern string\n\tmethods map[string]handlerConfig\n\tmethodNotAllowed handlerConfig\n}\n\nfunc (rh *registeredHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tcfg, ok := rh.methods[r.Method]\n\tif !ok {\n\t\tcfg = rh.methodNotAllowed\n\t}\n\tprocessRequest(cfg, w, r)\n}\n\nfunc (rh *registeredHandler) handleMethod(method string, cfg handlerConfig) {\n\tif _, exists := rh.methods[method]; exists {\n\t\tpanic(fmt.Sprintf(\"double registration of (pattern = %q, method = %q)\", rh.pattern, method))\n\t}\n\trh.methods[method] = cfg\n}\n\nfunc configureInterceptors(interceptors []Interceptor, cfgs []InterceptorConfig) []configuredInterceptor {\n\tvar its []configuredInterceptor\n\tfor _, it := range interceptors {\n\t\tvar matches []InterceptorConfig\n\t\tfor _, c := range cfgs {\n\t\t\tif it.Match(c) {\n\t\t\t\tmatches = append(matches, c)\n\t\t\t}\n\t\t}\n\n\t\tif len(matches) > 1 {\n\t\t\tmsg := fmt.Sprintf(\"multiple configurations specified for interceptor %T: \", it)\n\t\t\tfor _, match := range matches {\n\t\t\t\tmsg += fmt.Sprintf(\"%#v\", match)\n\t\t\t}\n\t\t\tpanic(msg)\n\t\t}\n\n\t\tvar cfg InterceptorConfig\n\t\tif len(matches) == 1 {\n\t\t\tcfg = matches[0]\n\t\t}\n\t\tits = append(its, configuredInterceptor{interceptor: it, config: cfg})\n\t}\n\treturn its\n}\n<commit_msg>removed unused fields from handlerRegistration (the whole struct will likely go away soon)<commit_after>\/\/ Copyright 2020 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ \thttps:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage safehttp\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n)\n\n\/\/ The HTTP request methods defined by RFC.\nconst (\n\tMethodConnect = \"CONNECT\" \/\/ RFC 7231, 4.3.6\n\tMethodDelete = \"DELETE\" \/\/ RFC 7231, 4.3.5\n\tMethodGet = \"GET\" \/\/ RFC 7231, 4.3.1\n\tMethodHead = \"HEAD\" \/\/ RFC 7231, 4.3.2\n\tMethodOptions = \"OPTIONS\" \/\/ RFC 7231, 4.3.7\n\tMethodPatch = \"PATCH\" \/\/ RFC 5789\n\tMethodPost = \"POST\" \/\/ RFC 7231, 4.3.3\n\tMethodPut = \"PUT\" \/\/ RFC 7231, 4.3.4\n\tMethodTrace = \"TRACE\" \/\/ RFC 7231, 4.3.8\n)\n\n\/\/ ServeMux is an HTTP request multiplexer. It matches the URL of each incoming\n\/\/ request against a list of registered patterns and calls the handler for\n\/\/ the pattern that most closely matches the URL.\n\/\/\n\/\/ Patterns names are fixed, rooted paths, like \"\/favicon.ico\", or rooted\n\/\/ subtrees like \"\/images\/\" (note the trailing slash). Longer patterns take\n\/\/ precedence over shorter ones, so that if there are handlers registered for\n\/\/ both \"\/images\/\" and \"\/images\/thumbnails\/\", the latter handler will be called\n\/\/ for paths beginning \"\/images\/thumbnails\/\" and the former will receive\n\/\/ requests for any other paths in the \"\/images\/\" subtree.\n\/\/\n\/\/ Note that since a pattern ending in a slash names a rooted subtree, the\n\/\/ pattern \"\/\" matches all paths not matched by other registered patterns,\n\/\/ not just the URL with Path == \"\/\".\n\/\/\n\/\/ If a subtree has been registered and a request is received naming the subtree\n\/\/ root without its trailing slash, ServeMux redirects that request to\n\/\/ the subtree root (adding the trailing slash). This behavior can be overridden\n\/\/ with a separate registration for the path without the trailing slash. For\n\/\/ example, registering \"\/images\/\" causes ServeMux to redirect a request for\n\/\/ \"\/images\" to \"\/images\/\", unless \"\/images\" has been registered separately.\n\/\/\n\/\/ Patterns may optionally begin with a host name, restricting matches to URLs\n\/\/ on that host only. Host-specific patterns take precedence over general\n\/\/ patterns, so that a handler might register for the two patterns \"\/codesearch\"\n\/\/ and \"codesearch.google.com\/\" without also taking over requests for\n\/\/ \"http:\/\/www.google.com\/\".\n\/\/\n\/\/ ServeMux also takes care of sanitizing the URL request path and the Host\n\/\/ header, stripping the port number and redirecting any request containing . or\n\/\/ .. elements or repeated slashes to an equivalent, cleaner URL.\n\/\/\n\/\/ Multiple handlers can be registered for a single pattern, as long as they\n\/\/ handle different HTTP methods.\ntype ServeMux struct {\n\tmux *http.ServeMux\n\thandlers map[string]*registeredHandler\n\n\tdispatcher Dispatcher\n\tinterceptors []Interceptor\n\tmethodNotAllowedHandler handlerRegistration\n}\n\n\/\/ ServeHTTP dispatches the request to the handler whose method matches the\n\/\/ incoming request and whose pattern most closely matches the request URL.\n\/\/\n\/\/ For each incoming request:\n\/\/ - [Before Phase] Interceptor.Before methods are called for every installed\n\/\/ interceptor, until an interceptor writes to a ResponseWriter (including\n\/\/ errors) or panics,\n\/\/ - the handler is called after a [Before Phase] if no writes or panics occured,\n\/\/ - the handler triggers the [Commit Phase] by writing to the ResponseWriter,\n\/\/ - [Commit Phase] Interceptor.Commit methods run for every interceptor whose\n\/\/ Before method was called,\n\/\/ - [Dispatcher Phase] after the [Commit Phase], the Dispatcher's appropriate\n\/\/ write method is called; the Dispatcher is responsible for determining whether\n\/\/ the response is indeed safe and writing it,\n\/\/ - if the handler attempts to write more than once, it is treated as an\n\/\/ unrecoverable error; the request processing ends abrubptly with a panic and\n\/\/ nothing else happens (note: this will change as soon as [After Phase] is\n\/\/ introduced)\n\/\/\n\/\/ Interceptors should NOT rely on the order they're run.\nfunc (m *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tm.mux.ServeHTTP(w, r)\n}\n\n\/\/ Handle registers a handler for the given pattern and method. If a handler is\n\/\/ registered twice for the same pattern and method, Build will panic.\n\/\/\n\/\/ InterceptorConfigs can be passed in order to modify the behavior of the\n\/\/ interceptors on a registered handler. Passing an InterceptorConfig whose\n\/\/ corresponding Interceptor was not installed will produce no effect. If\n\/\/ multiple configurations are passed for the same Interceptor, Mux will panic.\nfunc (m *ServeMux) Handle(pattern string, method string, h Handler, cfgs ...InterceptorConfig) {\n\tmethodNotAllowed := handlerConfig{\n\t\tDispatcher: m.dispatcher,\n\t\tHandler: m.methodNotAllowedHandler.handler,\n\t\tInterceptors: configureInterceptors(m.interceptors, m.methodNotAllowedHandler.cfgs),\n\t}\n\n\tif m.handlers[pattern] == nil {\n\t\tm.handlers[pattern] = ®isteredHandler{\n\t\t\tpattern: pattern,\n\t\t\tmethodNotAllowed: methodNotAllowed,\n\t\t\tmethods: make(map[string]handlerConfig),\n\t\t}\n\t\tm.mux.Handle(pattern, m.handlers[pattern])\n\t}\n\tm.handlers[pattern].handleMethod(method,\n\t\thandlerConfig{\n\t\t\tDispatcher: m.dispatcher,\n\t\t\tHandler: h,\n\t\t\tInterceptors: configureInterceptors(m.interceptors, cfgs),\n\t\t})\n}\n\n\/\/ ServeMuxConfig is a builder for ServeMux.\ntype ServeMuxConfig struct {\n\tdispatcher Dispatcher\n\thandlers []struct {\n\t\tpattern, method string\n\t\th Handler\n\t\tcfgs []InterceptorConfig\n\t}\n\tinterceptors []Interceptor\n\tmethodNotAllowedHandler handlerRegistration\n}\n\n\/\/ NewServeMuxConfig crates a ServeMuxConfig with the provided Dispatcher. If\n\/\/ the provided Dispatcher is nil, the DefaultDispatcher is used.\nfunc NewServeMuxConfig(disp Dispatcher) *ServeMuxConfig {\n\tif disp == nil {\n\t\tdisp = &DefaultDispatcher{}\n\t}\n\treturn &ServeMuxConfig{\n\t\tdispatcher: disp,\n\t\tmethodNotAllowedHandler: handlerRegistration{\n\t\t\thandler: HandlerFunc(defaultMethotNotAllowed),\n\t\t},\n\t}\n}\n\ntype handlerRegistration struct {\n\thandler Handler\n\tcfgs []InterceptorConfig\n}\n\nfunc (s *ServeMuxConfig) Handle(pattern string, method string, h Handler, cfgs ...InterceptorConfig) {\n\ts.handlers = append(s.handlers, struct {\n\t\tpattern string\n\t\tmethod string\n\t\th Handler\n\t\tcfgs []InterceptorConfig\n\t}{\n\t\tpattern: pattern,\n\t\tmethod: method,\n\t\th: h,\n\t\tcfgs: cfgs,\n\t})\n}\n\n\/\/ HandleMethodNotAllowed registers a handler that runs when a given method is\n\/\/ not allowed for a registered path.\nfunc (s *ServeMuxConfig) HandleMethodNotAllowed(h Handler, cfgs ...InterceptorConfig) {\n\ts.methodNotAllowedHandler = handlerRegistration{\n\t\thandler: h,\n\t\tcfgs: cfgs,\n\t}\n}\n\nfunc defaultMethotNotAllowed(w ResponseWriter, req *IncomingRequest) Result {\n\treturn w.WriteError(StatusMethodNotAllowed)\n}\n\n\/\/ Intercept installs the given interceptors.\n\/\/\n\/\/ Interceptors order is respected and interceptors are always run in the\n\/\/ order they've been installed.\n\/\/\n\/\/ Calling Intercept multiple times is valid. Interceptors that are added last\n\/\/ will run last.\nfunc (s *ServeMuxConfig) Intercept(is ...Interceptor) {\n\ts.interceptors = append(s.interceptors, is...)\n}\n\n\/\/ Mux returns the ServeMux with a copy of the current configuration.\nfunc (s *ServeMuxConfig) Mux() *ServeMux {\n\tfreezeLocalDev = true\n\tif IsLocalDev() {\n\t\tlog.Println(\"Warning: creating safehttp.Mux in dev mode. This configuration is not valid for production use\")\n\t}\n\n\tif s.dispatcher == nil {\n\t\tpanic(\"Use NewServeMuxConfig instead of creating ServeMuxConfig using a composite literal.\")\n\t}\n\n\tm := &ServeMux{\n\t\tmux: http.NewServeMux(),\n\t\thandlers: make(map[string]*registeredHandler),\n\t\tdispatcher: s.dispatcher,\n\t\tinterceptors: s.interceptors,\n\t\tmethodNotAllowedHandler: s.methodNotAllowedHandler,\n\t}\n\n\tfor _, handler := range s.handlers {\n\t\tm.Handle(handler.pattern, handler.method, handler.h, handler.cfgs...)\n\t}\n\treturn m\n}\n\n\/\/ Clone creates a copy of the current config.\n\/\/ This can be used to create several instances of Mux that share the same set of\n\/\/ plugins and some common handlers.\nfunc (s *ServeMuxConfig) Clone() *ServeMuxConfig {\n\tc := &ServeMuxConfig{\n\t\tdispatcher: s.dispatcher,\n\t\tinterceptors: make([]Interceptor, len(s.interceptors)),\n\t\tmethodNotAllowedHandler: s.methodNotAllowedHandler,\n\t}\n\tcopy(c.handlers, s.handlers)\n\tcopy(c.interceptors, s.interceptors)\n\treturn c\n}\n\ntype registeredHandler struct {\n\tpattern string\n\tmethods map[string]handlerConfig\n\tmethodNotAllowed handlerConfig\n}\n\nfunc (rh *registeredHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tcfg, ok := rh.methods[r.Method]\n\tif !ok {\n\t\tcfg = rh.methodNotAllowed\n\t}\n\tprocessRequest(cfg, w, r)\n}\n\nfunc (rh *registeredHandler) handleMethod(method string, cfg handlerConfig) {\n\tif _, exists := rh.methods[method]; exists {\n\t\tpanic(fmt.Sprintf(\"double registration of (pattern = %q, method = %q)\", rh.pattern, method))\n\t}\n\trh.methods[method] = cfg\n}\n\nfunc configureInterceptors(interceptors []Interceptor, cfgs []InterceptorConfig) []configuredInterceptor {\n\tvar its []configuredInterceptor\n\tfor _, it := range interceptors {\n\t\tvar matches []InterceptorConfig\n\t\tfor _, c := range cfgs {\n\t\t\tif it.Match(c) {\n\t\t\t\tmatches = append(matches, c)\n\t\t\t}\n\t\t}\n\n\t\tif len(matches) > 1 {\n\t\t\tmsg := fmt.Sprintf(\"multiple configurations specified for interceptor %T: \", it)\n\t\t\tfor _, match := range matches {\n\t\t\t\tmsg += fmt.Sprintf(\"%#v\", match)\n\t\t\t}\n\t\t\tpanic(msg)\n\t\t}\n\n\t\tvar cfg InterceptorConfig\n\t\tif len(matches) == 1 {\n\t\t\tcfg = matches[0]\n\t\t}\n\t\tits = append(its, configuredInterceptor{interceptor: it, config: cfg})\n\t}\n\treturn its\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package instagram provides a minimialist instagram API wrapper.\npackage instagram\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\/\/ \"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\nvar (\n\tbaseUrl = \"https:\/\/api.instagram.com\/v1\"\n)\n\ntype Api struct {\n\tClientId string\n\tAccessToken string\n}\n\n\/\/ Create an API with either a ClientId OR an accessToken. Only one is required. Access tokens are preferred because they keep rate limiting down.\nfunc New(clientId string, accessToken string) *Api {\n\tif clientId == \"\" && accessToken == \"\" {\n\t\tpanic(\"ClientId or AccessToken must be given to create an Api\")\n\t}\n\n\treturn &Api{\n\t\tClientId: clientId,\n\t\tAccessToken: accessToken,\n\t}\n}\n\n\/\/ -- Implementation of request --\n\nfunc buildGetRequest(urlStr string, params url.Values) (*http.Request, error) {\n\tu, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ If we are getting, then we can't merge query params\n\tif params != nil {\n\t\tif u.RawQuery != \"\" {\n\t\t\treturn nil, fmt.Errorf(\"Cannot merge query params in urlStr and params\")\n\t\t}\n\t\tu.RawQuery = params.Encode()\n\t}\n\n\treturn http.NewRequest(\"GET\", u.String(), nil)\n}\n\nfunc (api *Api) extendParams(p url.Values) url.Values {\n\tif p == nil {\n\t\tp = url.Values{}\n\t}\n\tif api.AccessToken != \"\" {\n\t\tp.Set(\"access_token\", api.AccessToken)\n\t} else {\n\t\tp.Set(\"client_id\", api.ClientId)\n\t}\n\treturn p\n}\n\nfunc (api *Api) get(path string, params url.Values, r interface{}) error {\n\tparams = api.extendParams(params)\n\treq, err := buildGetRequest(urlify(path), params)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn api.do(req, r)\n}\n\nfunc (api *Api) do(req *http.Request, r interface{}) error {\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn apiError(resp)\n\t}\n\n\treturn decodeResponse(resp.Body, r)\n}\n\nfunc decodeResponse(body io.Reader, to interface{}) error {\n\t\/\/b, _ := ioutil.ReadAll(body)\n\t\/\/fmt.Println(\"Body:\",string(b))\n\t\/\/ err := json.Unmarshal(b, to)\n\terr := json.NewDecoder(body).Decode(to)\n\t\/\/fmt.Println(\"Body: \" + string(body))\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"instagram: error decoding body; %s\", err.Error())\n\t}\n\treturn nil\n}\n\nfunc apiError(resp *http.Response) error {\n\tm := new(MetaResponse)\n\tif err := decodeResponse(resp.Body, m); err != nil {\n\t\treturn err\n\t}\n\n\tvar err MetaError\n\tif m.Meta != nil {\n\t\terr = MetaError(*m.Meta)\n\t} else {\n\t\terr = MetaError(Meta{Code: resp.StatusCode, ErrorMessage: resp.Status})\n\t}\n\treturn &err\n}\n\nfunc urlify(path string) string {\n\treturn baseUrl + path\n}\n\ntype MetaError Meta\n\nfunc (m *MetaError) Error() string {\n\treturn fmt.Sprintf(\"Error making api call: Code %d %s %s\", m.Code, m.ErrorType, m.ErrorMessage)\n}\n\nfunc ensureParams(v url.Values) url.Values {\n\tif v == nil {\n\t\treturn url.Values{}\n\t}\n\treturn v\n}\n<commit_msg>eduted godeps debug<commit_after>\/\/ Package instagram provides a minimialist instagram API wrapper.\npackage instagram\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\/\/ \"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\nvar (\n\tbaseUrl = \"https:\/\/api.instagram.com\/v1\"\n)\n\ntype Api struct {\n\tClientId string\n\tAccessToken string\n}\n\n\/\/ Create an API with either a ClientId OR an accessToken. Only one is required. Access tokens are preferred because they keep rate limiting down.\nfunc New(clientId string, accessToken string) *Api {\n\tif clientId == \"\" && accessToken == \"\" {\n\t\tpanic(\"ClientId or AccessToken must be given to create an Api\")\n\t}\n\n\treturn &Api{\n\t\tClientId: clientId,\n\t\tAccessToken: accessToken,\n\t}\n}\n\n\/\/ -- Implementation of request --\n\nfunc buildGetRequest(urlStr string, params url.Values) (*http.Request, error) {\n\tu, err := url.Parse(urlStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfmt.Println(\"params:\",params)\n\tfmt.Println(\"uRawQuery:\",u.RawQuery)\n\tfmt.Println(\"urlStr:\",urlStr)\n\t\/\/ If we are getting, then we can't merge query params\n\tif params != nil {\n\t\tif u.RawQuery != \"\" {\n\t\t\treturn nil, fmt.Errorf(\"Cannot merge query params in urlStr and params\")\n\t\t}\n\t\tu.RawQuery = params.Encode()\n\t}\n\n\treturn http.NewRequest(\"GET\", u.String(), nil)\n}\n\nfunc (api *Api) extendParams(p url.Values) url.Values {\n\tif p == nil {\n\t\tp = url.Values{}\n\t}\n\tif api.AccessToken != \"\" {\n\t\tp.Set(\"access_token\", api.AccessToken)\n\t} else {\n\t\tp.Set(\"client_id\", api.ClientId)\n\t}\n\treturn p\n}\n\nfunc (api *Api) get(path string, params url.Values, r interface{}) error {\n\tparams = api.extendParams(params)\n\treq, err := buildGetRequest(urlify(path), params)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn api.do(req, r)\n}\n\nfunc (api *Api) do(req *http.Request, r interface{}) error {\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn apiError(resp)\n\t}\n\n\treturn decodeResponse(resp.Body, r)\n}\n\nfunc decodeResponse(body io.Reader, to interface{}) error {\n\t\/\/b, _ := ioutil.ReadAll(body)\n\t\/\/fmt.Println(\"Body:\",string(b))\n\t\/\/ err := json.Unmarshal(b, to)\n\terr := json.NewDecoder(body).Decode(to)\n\t\/\/fmt.Println(\"Body: \" + string(body))\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"instagram: error decoding body; %s\", err.Error())\n\t}\n\treturn nil\n}\n\nfunc apiError(resp *http.Response) error {\n\tm := new(MetaResponse)\n\tif err := decodeResponse(resp.Body, m); err != nil {\n\t\treturn err\n\t}\n\n\tvar err MetaError\n\tif m.Meta != nil {\n\t\terr = MetaError(*m.Meta)\n\t} else {\n\t\terr = MetaError(Meta{Code: resp.StatusCode, ErrorMessage: resp.Status})\n\t}\n\treturn &err\n}\n\nfunc urlify(path string) string {\n\treturn baseUrl + path\n}\n\ntype MetaError Meta\n\nfunc (m *MetaError) Error() string {\n\treturn fmt.Sprintf(\"Error making api call: Code %d %s %s\", m.Code, m.ErrorType, m.ErrorMessage)\n}\n\nfunc ensureParams(v url.Values) url.Values {\n\tif v == nil {\n\t\treturn url.Values{}\n\t}\n\treturn v\n}\n<|endoftext|>"} {"text":"<commit_before>package bsdiff\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\n\thumanize \"github.com\/dustin\/go-humanize\"\n)\n\n\/\/ ErrCorrupt indicates that a patch is corrupted, most often that it would produce a longer file\n\/\/ than specified\nvar ErrCorrupt = errors.New(\"corrupt patch\")\n\n\/\/ ReadMessageFunc should read the passed protobuf and relay any errors.\n\/\/ See the `wire` package for an example implementation.\ntype ReadMessageFunc func(msg interface{}) error\n\n\/\/ BSPatch applies patch to old, according to the bspatch algorithm,\n\/\/ and writes the result to new.\nfunc BSPatch(old io.Reader, new io.Writer, newSize int64, readMessage ReadMessageFunc) error {\n\t\/\/ TODO: still debating whether we should take an io.ReadSeeker instead... probably?\n\t\/\/ the consumer can still do `ReadAll` themselves and pass a bytes.NewBuffer()\n\tobuf, err := ioutil.ReadAll(old)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ TODO: write directly to new instead of using a buffer\n\tnbuf := make([]byte, newSize)\n\n\tvar oldpos, newpos int64\n\n\tctrl := &Control{}\n\n\tfor {\n\t\tctrl.Reset()\n\n\t\terr = readMessage(ctrl)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ctrl.Eof {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Sanity-check\n\t\tif newpos+int64(len(ctrl.Add)) > newSize {\n\t\t\treturn ErrCorrupt\n\t\t}\n\n\t\t\/\/ Add old data to diff string\n\t\tfor i := int64(0); i < int64(len(ctrl.Add)); i++ {\n\t\t\tnbuf[newpos+i] = ctrl.Add[i] + obuf[oldpos+i]\n\t\t}\n\n\t\t\/\/ Adjust pointers\n\t\tnewpos += int64(len(ctrl.Add))\n\t\toldpos += int64(len(ctrl.Add))\n\n\t\t\/\/ Sanity-check\n\t\tif newpos+int64(len(ctrl.Copy)) > newSize {\n\t\t\treturn ErrCorrupt\n\t\t}\n\n\t\t\/\/ Read extra string\n\t\tcopy(nbuf[newpos:], ctrl.Copy)\n\n\t\t\/\/ Adjust pointers\n\t\tnewpos += int64(len(ctrl.Copy))\n\t\toldpos += ctrl.Seek\n\t}\n\n\tif newpos != newSize {\n\t\treturn fmt.Errorf(\"bsdiff: expected new file to be %d, was %d (%s difference)\", newSize, newpos, humanize.IBytes(uint64(newSize-newpos)))\n\t}\n\n\t\/\/ Write the new file\n\t_, err = new.Write(nbuf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>BSPatch => Patch<commit_after>package bsdiff\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\n\thumanize \"github.com\/dustin\/go-humanize\"\n)\n\n\/\/ ErrCorrupt indicates that a patch is corrupted, most often that it would produce a longer file\n\/\/ than specified\nvar ErrCorrupt = errors.New(\"corrupt patch\")\n\n\/\/ ReadMessageFunc should read the passed protobuf and relay any errors.\n\/\/ See the `wire` package for an example implementation.\ntype ReadMessageFunc func(msg interface{}) error\n\n\/\/ Patch applies patch to old, according to the bspatch algorithm,\n\/\/ and writes the result to new.\nfunc Patch(old io.Reader, new io.Writer, newSize int64, readMessage ReadMessageFunc) error {\n\t\/\/ TODO: still debating whether we should take an io.ReadSeeker instead... probably?\n\t\/\/ the consumer can still do `ReadAll` themselves and pass a bytes.NewBuffer()\n\tobuf, err := ioutil.ReadAll(old)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ TODO: write directly to new instead of using a buffer\n\tnbuf := make([]byte, newSize)\n\n\tvar oldpos, newpos int64\n\n\tctrl := &Control{}\n\n\tfor {\n\t\tctrl.Reset()\n\n\t\terr = readMessage(ctrl)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ctrl.Eof {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Sanity-check\n\t\tif newpos+int64(len(ctrl.Add)) > newSize {\n\t\t\treturn ErrCorrupt\n\t\t}\n\n\t\t\/\/ Add old data to diff string\n\t\tfor i := int64(0); i < int64(len(ctrl.Add)); i++ {\n\t\t\tnbuf[newpos+i] = ctrl.Add[i] + obuf[oldpos+i]\n\t\t}\n\n\t\t\/\/ Adjust pointers\n\t\tnewpos += int64(len(ctrl.Add))\n\t\toldpos += int64(len(ctrl.Add))\n\n\t\t\/\/ Sanity-check\n\t\tif newpos+int64(len(ctrl.Copy)) > newSize {\n\t\t\treturn ErrCorrupt\n\t\t}\n\n\t\t\/\/ Read extra string\n\t\tcopy(nbuf[newpos:], ctrl.Copy)\n\n\t\t\/\/ Adjust pointers\n\t\tnewpos += int64(len(ctrl.Copy))\n\t\toldpos += ctrl.Seek\n\t}\n\n\tif newpos != newSize {\n\t\treturn fmt.Errorf(\"bsdiff: expected new file to be %d, was %d (%s difference)\", newSize, newpos, humanize.IBytes(uint64(newSize-newpos)))\n\t}\n\n\t\/\/ Write the new file\n\t_, err = new.Write(nbuf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Google Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage version\n\nvar (\n\t\/\/ Major is the current major version of master branch.\n\tMajor = 0\n\t\/\/ Minor is the current minor version of master branch.\n\tMinor = 4\n\t\/\/ Patch is the curernt patched version of the master branch.\n\tPatch = 1\n\t\/\/ Release is the current release level of the master branch. Valid values\n\t\/\/ are dev (developement unreleased), rcX (release candidate with current\n\t\/\/ iteration), stable (indicates a final released version).\n\tRelease = \"dev\"\n)\n<commit_msg>Release 0.4.1 stable preparation<commit_after>\/\/ Copyright 2016 Google Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage version\n\nvar (\n\t\/\/ Major is the current major version of master branch.\n\tMajor = 0\n\t\/\/ Minor is the current minor version of master branch.\n\tMinor = 4\n\t\/\/ Patch is the curernt patched version of the master branch.\n\tPatch = 1\n\t\/\/ Release is the current release level of the master branch. Valid values\n\t\/\/ are dev (developement unreleased), rcX (release candidate with current\n\t\/\/ iteration), stable (indicates a final released version).\n\tRelease = \"stable\"\n)\n<|endoftext|>"} {"text":"<commit_before>package integration_test\n\nimport (\n\t\"path\/filepath\"\n\n\t\"github.com\/cloudfoundry\/libbuildpack\/cutlass\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"JRuby App\", func() {\n\tvar app *cutlass.App\n\n\tAfterEach(func() { app = DestroyApp(app) })\n\n\tContext(\"without start command\", func() {\n\t\tBeforeEach(func() {\n\t\t\tapp = cutlass.New(filepath.Join(bpDir, \"fixtures\", \"sinatra_jruby\"))\n\t\t\tapp.Memory = \"512M\"\n\t\t})\n\n\t\tIt(\"\", func() {\n\t\t\tPushAppAndConfirm(app)\n\n\t\t\tBy(\"the buildpack logged it installed a specific version of JRuby\", func() {\n\t\t\t\tExpect(app.Stdout.String()).To(ContainSubstring(\"Installing openjdk\"))\n\t\t\t\tExpect(app.Stdout.String()).To(MatchRegexp(\"ruby-2.3.\\\\d+-jruby-9.\\\\d+.\\\\d+.0\"))\n\t\t\t\tExpect(app.GetBody(\"\/ruby\")).To(MatchRegexp(\"jruby 2.3.\\\\d+\"))\n\t\t\t})\n\n\t\t\tBy(\"the OpenJDK runs properly\", func() {\n\t\t\t\tExpect(app.Stdout.String()).ToNot(ContainSubstring(\"OpenJDK 64-Bit Server VM warning\"))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"a cached buildpack\", func() {\n\t\t\tBeforeEach(SkipUnlessCached)\n\n\t\t\tAssertNoInternetTraffic(\"sinatra_jruby\")\n\t\t})\n\t})\n\tContext(\"with a jruby start command\", func() {\n\t\tBeforeEach(func() {\n\t\t\tapp = cutlass.New(filepath.Join(bpDir, \"fixtures\", \"jruby_start_command\"))\n\t\t\tapp.Memory = \"512M\"\n\t\t})\n\n\t\tIt(\"stages and runs successfully\", func() {\n\t\t\tPushAppAndConfirm(app)\n\t\t})\n\t})\n})\n<commit_msg>Build assertions from fixture Gemfile for sinatra_jruby integration test [#157851009]<commit_after>package integration_test\n\nimport (\n\t\"fmt\"\n\t\"path\/filepath\"\n\n\t\"github.com\/cloudfoundry\/libbuildpack\/cutlass\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"regexp\"\n\t\"io\/ioutil\"\n)\n\nvar _ = Describe(\"JRuby App\", func() {\n\tvar app *cutlass.App\n\n\tAfterEach(func() { app = DestroyApp(app) })\n\n\tContext(\"without start command\", func() {\n\t\tvar dir, rubyVersion, jrubyVersion string\n\n\t\tBeforeEach(func() {\n\t\t\tdir = filepath.Join(bpDir, \"fixtures\", \"sinatra_jruby\")\n\t\t\tdata, err:= ioutil.ReadFile(filepath.Join(dir, \"Gemfile\"))\n\t\t\tExpect(err).To(BeNil())\n\t\t\tre := regexp.MustCompile(`ruby '(\\d+.\\d+.\\d+)', :engine => 'jruby', :engine_version => '(\\d+.\\d+.\\d+.\\d+)'`)\n\t\t\tmatches := re.FindStringSubmatch(string(data))\n\t\t\trubyVersion = matches[1]\n\t\t\tjrubyVersion = matches[2]\n\n\t\t\tapp = cutlass.New(dir)\n\t\t\tapp.Memory = \"512M\"\n\t\t})\n\n\t\tIt(\"installs the correct version of JRuby\", func() {\n\t\t\tPushAppAndConfirm(app)\n\n\t\t\tBy(\"the buildpack logged it installed a specific version of JRuby\", func() {\n\t\t\t\tExpect(app.Stdout.String()).To(ContainSubstring(\"Installing openjdk\"))\n\t\t\t\tExpect(app.Stdout.String()).To(ContainSubstring(fmt.Sprintf(\"Installing jruby ruby-%s-jruby-%s\", rubyVersion, jrubyVersion)))\n\t\t\t\tExpect(app.GetBody(\"\/ruby\")).To(ContainSubstring(fmt.Sprintf(\"jruby %s\", rubyVersion)))\n\t\t\t})\n\n\t\t\tBy(\"the OpenJDK runs properly\", func() {\n\t\t\t\tExpect(app.Stdout.String()).ToNot(ContainSubstring(\"OpenJDK 64-Bit Server VM warning\"))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"a cached buildpack\", func() {\n\t\t\tBeforeEach(SkipUnlessCached)\n\n\t\t\tAssertNoInternetTraffic(\"sinatra_jruby\")\n\t\t})\n\t})\n\tContext(\"with a jruby start command\", func() {\n\t\tBeforeEach(func() {\n\t\t\tapp = cutlass.New(filepath.Join(bpDir, \"fixtures\", \"jruby_start_command\"))\n\t\t\tapp.Memory = \"512M\"\n\t\t})\n\n\t\tIt(\"stages and runs successfully\", func() {\n\t\t\tPushAppAndConfirm(app)\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The LUCI Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage downloader\n\nimport (\n\t\"archive\/tar\"\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\/httptest\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"go.chromium.org\/luci\/common\/data\/caching\/cache\"\n\t\"go.chromium.org\/luci\/common\/data\/stringset\"\n\t\"go.chromium.org\/luci\/common\/isolated\"\n\t\"go.chromium.org\/luci\/common\/isolatedclient\"\n\t\"go.chromium.org\/luci\/common\/isolatedclient\/isolatedfake\"\n\t\"go.chromium.org\/luci\/common\/testing\/testfs\"\n\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n)\n\nfunc TestNormalizePathSeparator(t *testing.T) {\n\tt.Parallel()\n\n\tConvey(\"Check path normalization\", t, func() {\n\t\tConvey(\"posix path\", func() {\n\t\t\tSo(normalizePathSeparator(\"a\/b\"), ShouldEqual, filepath.Join(\"a\", \"b\"))\n\t\t})\n\n\t\tConvey(\"windows path\", func() {\n\t\t\tSo(normalizePathSeparator(`a\\b`), ShouldEqual, filepath.Join(\"a\", \"b\"))\n\t\t})\n\t})\n}\n\nfunc TestDownloaderFetchIsolated(t *testing.T) {\n\tt.Parallel()\n\tctx := context.Background()\n\n\tdata1 := []byte(\"hello world!\")\n\tdata2 := []byte(\"wat\")\n\ttardata := genTar(t)\n\n\tnamespace := isolatedclient.DefaultNamespace\n\th := isolated.GetHash(namespace)\n\tserver := isolatedfake.New()\n\tdata1hash := server.Inject(namespace, data1)\n\tdata2hash := server.Inject(namespace, data2)\n\ttardatahash := server.Inject(namespace, tardata)\n\ttardataname := fmt.Sprintf(\"%s.tar\", tardatahash)\n\n\tonePath := filepath.Join(\"foo\", \"one.txt\")\n\ttwoPath := filepath.Join(\"foo\", \"two.txt\")\n\tposixPath := \"posix\/path\"\n\twinPath := `win\\path`\n\tisolated1 := isolated.New(h)\n\tisolated1.Files = map[string]isolated.File{\n\t\tonePath: isolated.BasicFile(data1hash, 0664, int64(len(data1))),\n\t\ttwoPath: isolated.BasicFile(data2hash, 0764, int64(len(data2))),\n\t\tposixPath: isolated.BasicFile(data1hash, 0664, int64(len(data1))),\n\t\twinPath: isolated.BasicFile(data2hash, 0664, int64(len(data2))),\n\t\ttardataname: isolated.TarFile(tardatahash, int64(len(tardata))),\n\t}\n\tisolated1bytes, _ := json.Marshal(&isolated1)\n\tisolated1hash := server.Inject(namespace, isolated1bytes)\n\n\tlolPath := filepath.Join(\"bar\", \"lol.txt\")\n\toloPath := filepath.Join(\"foo\", \"boz\", \"olo.txt\")\n\tisolated2 := isolated.New(h)\n\tisolated2.Files = map[string]isolated.File{\n\t\tlolPath: isolated.BasicFile(data1hash, 0664, int64(len(data1))),\n\t\toloPath: isolated.BasicFile(data2hash, 0664, int64(len(data2))),\n\t}\n\tisolatedFiles := stringset.NewFromSlice([]string{\n\t\ttardataname,\n\t\tonePath,\n\t\ttwoPath,\n\t\tnormalizePathSeparator(posixPath),\n\t\tnormalizePathSeparator(winPath),\n\t\tlolPath,\n\t\toloPath,\n\t\t\/\/ In tardata\n\t\t\"file1\",\n\t\t\"file2\",\n\t\tfilepath.Join(\"tar\", \"posix\", \"path\"),\n\t\tfilepath.Join(\"tar\", \"win\", \"path\"),\n\t}...)\n\tblahPath := \"blah.txt\"\n\n\t\/\/ Symlinks not supported on Windows.\n\tif runtime.GOOS != \"windows\" {\n\t\tisolated2.Files[blahPath] = isolated.SymLink(oloPath)\n\t\tisolatedFiles.Add(blahPath)\n\t}\n\tisolated2.Includes = isolated.HexDigests{isolated1hash}\n\tisolated2bytes, _ := json.Marshal(&isolated2)\n\tisolated2hash := server.Inject(namespace, isolated2bytes)\n\n\tts := httptest.NewServer(server)\n\tdefer ts.Close()\n\tclient := isolatedclient.NewClient(ts.URL, isolatedclient.WithNamespace(namespace))\n\n\tConvey(`A downloader should be able to download the isolated.`, t, func() {\n\t\ttmpDir, err := ioutil.TempDir(\"\", \"isolated\")\n\t\tSo(err, ShouldBeNil)\n\t\tdefer func() {\n\t\t\tSo(os.RemoveAll(tmpDir), ShouldBeNil)\n\t\t}()\n\n\t\tmu := sync.Mutex{}\n\t\tvar files []string\n\t\td := New(ctx, client, isolated2hash, tmpDir, &Options{\n\t\t\tFileCallback: func(name string, _ *isolated.File) {\n\t\t\t\tmu.Lock()\n\t\t\t\tfiles = append(files, name)\n\t\t\t\tmu.Unlock()\n\t\t\t},\n\t\t})\n\t\tSo(d.Wait(), ShouldBeNil)\n\t\tSo(stringset.NewFromSlice(files...), ShouldResemble, isolatedFiles)\n\n\t\tb, err := ioutil.ReadFile(filepath.Join(tmpDir, onePath))\n\t\tSo(err, ShouldBeNil)\n\t\tSo(b, ShouldResemble, data1)\n\n\t\tfi, err := os.Stat(filepath.Join(tmpDir, onePath))\n\t\tSo(err, ShouldBeNil)\n\t\t\/\/ to ignore effect of umask, only check executable bit.\n\t\tSo(fi.Mode()&0111, ShouldEqual, 0)\n\n\t\tb, err = ioutil.ReadFile(filepath.Join(tmpDir, twoPath))\n\t\tSo(err, ShouldBeNil)\n\t\tSo(b, ShouldResemble, data2)\n\n\t\tfi, err = os.Stat(filepath.Join(tmpDir, twoPath))\n\t\tSo(err, ShouldBeNil)\n\t\t\/\/ to ignore effect of umask, only check executable bit.\n\t\tSo(fi.Mode()&0011, ShouldEqual, 0)\n\n\t\tb, err = ioutil.ReadFile(filepath.Join(tmpDir, lolPath))\n\t\tSo(err, ShouldBeNil)\n\t\tSo(b, ShouldResemble, data1)\n\n\t\tb, err = ioutil.ReadFile(filepath.Join(tmpDir, oloPath))\n\t\tSo(err, ShouldBeNil)\n\t\tSo(b, ShouldResemble, data2)\n\n\t\t\/\/ Check files in tar archive\n\t\t_, err = os.Stat(filepath.Join(tmpDir, \"file1\"))\n\t\tSo(err, ShouldBeNil)\n\n\t\t_, err = os.Stat(filepath.Join(tmpDir, \"file2\"))\n\t\tSo(err, ShouldBeNil)\n\n\t\t_, err = os.Stat(filepath.Join(tmpDir, \"tar\", \"posix\", \"path\"))\n\t\tSo(err, ShouldBeNil)\n\n\t\t_, err = os.Stat(filepath.Join(tmpDir, \"tar\", \"win\", \"path\"))\n\t\tSo(err, ShouldBeNil)\n\n\t\t_, err = ioutil.ReadFile(filepath.Join(tmpDir, tardataname))\n\t\tSo(os.IsNotExist(err), ShouldBeTrue)\n\n\t\tif runtime.GOOS != \"windows\" {\n\t\t\tl, err := os.Readlink(filepath.Join(tmpDir, blahPath))\n\t\t\tSo(err, ShouldBeNil)\n\t\t\tSo(l, ShouldResemble, oloPath)\n\t\t}\n\t})\n}\n\n\/\/ genTar returns a valid tar file.\nfunc genTar(t *testing.T) []byte {\n\tb := bytes.Buffer{}\n\ttw := tar.NewWriter(&b)\n\td := []byte(\"hello file1\")\n\tif err := tw.WriteHeader(&tar.Header{Name: \"file1\", Mode: 0644, Typeflag: tar.TypeReg, Size: int64(len(d))}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := tw.Write(d); err != nil {\n\t\tt.Fatal(err)\n\t}\n\td = []byte(strings.Repeat(\"hello file2\", 100))\n\tif err := tw.WriteHeader(&tar.Header{Name: \"file2\", Mode: 0644, Typeflag: tar.TypeReg, Size: int64(len(d))}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := tw.Write(d); err != nil {\n\t\tt.Fatal(err)\n\t}\n\td = []byte(\"posixpath\")\n\tif err := tw.WriteHeader(&tar.Header{Name: \"tar\/posix\/path\", Mode: 0644, Typeflag: tar.TypeReg, Size: int64(len(d))}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := tw.Write(d); err != nil {\n\t\tt.Fatal(err)\n\t}\n\td = []byte(\"winpath\")\n\tif err := tw.WriteHeader(&tar.Header{Name: `tar\\win\\path`, Mode: 0644, Typeflag: tar.TypeReg, Size: int64(len(d))}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := tw.Write(d); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := tw.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn b.Bytes()\n}\n\nfunc TestDownloaderWithCache(t *testing.T) {\n\tt.Parallel()\n\tConvey(`TestDownloaderWithCache`, t, testfs.MustWithTempDir(t, \"\", func(tmpDir string) {\n\t\tctx := context.Background()\n\n\t\tmiss := []byte(\"cache miss\")\n\t\thit := []byte(\"cache hit\")\n\t\tnamespace := isolatedclient.DefaultNamespace\n\t\th := isolated.GetHash(namespace)\n\t\tserver := isolatedfake.New()\n\t\tmisshash := server.Inject(namespace, miss)\n\t\thithash := isolated.HashBytes(isolated.GetHash(namespace), hit)\n\n\t\tmissPath := filepath.Join(\"foo\", \"miss.txt\")\n\t\thitPath := filepath.Join(\"foo\", \"hit.txt\")\n\t\tisolated1 := isolated.New(h)\n\n\t\tisolated1.Files = map[string]isolated.File{\n\t\t\tmissPath: isolated.BasicFile(misshash, 0664, int64(len(miss))),\n\t\t\thitPath: isolated.BasicFile(hithash, 0664, int64(len(hit))),\n\t\t}\n\t\tisolated1bytes, _ := json.Marshal(&isolated1)\n\t\tisolated1hash := server.Inject(namespace, isolated1bytes)\n\n\t\tisolatedFiles := stringset.NewFromSlice([]string{\n\t\t\tmissPath,\n\t\t\thitPath,\n\t\t}...)\n\n\t\tts := httptest.NewServer(server)\n\t\tdefer ts.Close()\n\t\tclient := isolatedclient.NewClient(ts.URL, isolatedclient.WithNamespace(namespace))\n\n\t\tmu := sync.Mutex{}\n\t\tvar files []string\n\t\tpolicy := cache.Policies{\n\t\t\tMaxSize: 1024,\n\t\t\tMaxItems: 1024,\n\t\t}\n\t\tvar cacheObj cache.Cache\n\t\tConvey(\"memcache\", func() {\n\t\t\tcacheObj = cache.NewMemory(policy, namespace)\n\t\t})\n\n\t\tConvey(\"diskcache\", func() {\n\t\t\tcacheDir := filepath.Join(tmpDir, \"cache\")\n\t\t\tSo(os.MkdirAll(cacheDir, os.ModePerm), ShouldBeNil)\n\t\t\tvar err error\n\t\t\tcacheObj, err = cache.NewDisk(policy, cacheDir, namespace)\n\t\t\tSo(err, ShouldBeNil)\n\t\t})\n\n\t\tSo(cacheObj.Add(hithash, bytes.NewReader(hit)), ShouldBeNil)\n\n\t\td := New(ctx, client, isolated1hash, tmpDir, &Options{\n\t\t\tFileCallback: func(name string, _ *isolated.File) {\n\t\t\t\tmu.Lock()\n\t\t\t\tfiles = append(files, name)\n\t\t\t\tmu.Unlock()\n\t\t\t},\n\t\t\tCache: cacheObj,\n\t\t})\n\t\tSo(d.Wait(), ShouldBeNil)\n\t\tSo(stringset.NewFromSlice(files...), ShouldResemble, isolatedFiles)\n\n\t\tSo(cacheObj.Touch(misshash), ShouldBeTrue)\n\t}))\n}\n\nfunc TestFetchAndMap(t *testing.T) {\n\tt.Parallel()\n\tConvey(`TestFetchAndMap`, t, func() {\n\t\tctx := context.Background()\n\t\ttmpDir, err := ioutil.TempDir(\"\", \"isolated\")\n\t\tSo(err, ShouldBeNil)\n\t\tdefer func() {\n\t\t\tSo(os.RemoveAll(tmpDir), ShouldBeNil)\n\t\t}()\n\n\t\tdata1 := []byte(\"hello world!\")\n\n\t\tnamespace := isolatedclient.DefaultNamespace\n\t\th := isolated.GetHash(namespace)\n\t\tserver := isolatedfake.New()\n\t\tdata1hash := server.Inject(namespace, data1)\n\n\t\tonePath := filepath.Join(\"foo\", \"one.txt\")\n\t\tonePathFile := isolated.BasicFile(data1hash, 0664, int64(len(data1)))\n\t\tisolated1 := isolated.New(h)\n\n\t\ttardata := genTar(t)\n\t\ttarhash := server.Inject(namespace, tardata)\n\n\t\tisolated1.Files = map[string]isolated.File{\n\t\t\tonePath: onePathFile,\n\t\t\t\"tar.tar\": isolated.TarFile(tarhash, int64(len(tardata))),\n\t\t}\n\t\tisolated1bytes, _ := json.Marshal(&isolated1)\n\t\tisolated1hash := server.Inject(namespace, isolated1bytes)\n\n\t\tts := httptest.NewServer(server)\n\t\tdefer ts.Close()\n\t\tclient := isolatedclient.NewClient(ts.URL, isolatedclient.WithNamespace(namespace))\n\n\t\tpolicy := cache.Policies{\n\t\t\tMaxSize: 1024 * 1024,\n\t\t\tMaxItems: 1024,\n\t\t}\n\t\tmemcache := cache.NewMemory(policy, namespace)\n\n\t\tisomap, stats, err := FetchAndMap(ctx, isolated1hash, client, memcache, tmpDir)\n\t\tSo(err, ShouldBeNil)\n\t\tSo(isomap, ShouldResemble, &isolated.Isolated{\n\t\t\tAlgo: \"sha-1\",\n\t\t\tVersion: \"1.4\",\n\t\t})\n\n\t\tSo(stats.Duration, ShouldBeGreaterThan, 0)\n\t\tSo(stats.ItemsCold, ShouldResemble, []byte{120, 156, 226, 249, 162, 15, 8, 0, 0, 255, 255, 2, 62, 1, 48})\n\t\tSo(stats.ItemsHot, ShouldResemble, []byte(nil))\n\n\t\tbuf, err := ioutil.ReadFile(filepath.Join(tmpDir, onePath))\n\t\tSo(err, ShouldBeNil)\n\t\tSo(buf, ShouldResemble, data1)\n\n\t\tSo(memcache.Touch(tarhash), ShouldBeTrue)\n\t})\n}\n<commit_msg>downloader: remove cache.NewMemory usage<commit_after>\/\/ Copyright 2017 The LUCI Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage downloader\n\nimport (\n\t\"archive\/tar\"\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\/httptest\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"go.chromium.org\/luci\/common\/data\/caching\/cache\"\n\t\"go.chromium.org\/luci\/common\/data\/stringset\"\n\t\"go.chromium.org\/luci\/common\/isolated\"\n\t\"go.chromium.org\/luci\/common\/isolatedclient\"\n\t\"go.chromium.org\/luci\/common\/isolatedclient\/isolatedfake\"\n\t\"go.chromium.org\/luci\/common\/testing\/testfs\"\n\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n)\n\nfunc TestNormalizePathSeparator(t *testing.T) {\n\tt.Parallel()\n\n\tConvey(\"Check path normalization\", t, func() {\n\t\tConvey(\"posix path\", func() {\n\t\t\tSo(normalizePathSeparator(\"a\/b\"), ShouldEqual, filepath.Join(\"a\", \"b\"))\n\t\t})\n\n\t\tConvey(\"windows path\", func() {\n\t\t\tSo(normalizePathSeparator(`a\\b`), ShouldEqual, filepath.Join(\"a\", \"b\"))\n\t\t})\n\t})\n}\n\nfunc TestDownloaderFetchIsolated(t *testing.T) {\n\tt.Parallel()\n\tctx := context.Background()\n\n\tdata1 := []byte(\"hello world!\")\n\tdata2 := []byte(\"wat\")\n\ttardata := genTar(t)\n\n\tnamespace := isolatedclient.DefaultNamespace\n\th := isolated.GetHash(namespace)\n\tserver := isolatedfake.New()\n\tdata1hash := server.Inject(namespace, data1)\n\tdata2hash := server.Inject(namespace, data2)\n\ttardatahash := server.Inject(namespace, tardata)\n\ttardataname := fmt.Sprintf(\"%s.tar\", tardatahash)\n\n\tonePath := filepath.Join(\"foo\", \"one.txt\")\n\ttwoPath := filepath.Join(\"foo\", \"two.txt\")\n\tposixPath := \"posix\/path\"\n\twinPath := `win\\path`\n\tisolated1 := isolated.New(h)\n\tisolated1.Files = map[string]isolated.File{\n\t\tonePath: isolated.BasicFile(data1hash, 0664, int64(len(data1))),\n\t\ttwoPath: isolated.BasicFile(data2hash, 0764, int64(len(data2))),\n\t\tposixPath: isolated.BasicFile(data1hash, 0664, int64(len(data1))),\n\t\twinPath: isolated.BasicFile(data2hash, 0664, int64(len(data2))),\n\t\ttardataname: isolated.TarFile(tardatahash, int64(len(tardata))),\n\t}\n\tisolated1bytes, _ := json.Marshal(&isolated1)\n\tisolated1hash := server.Inject(namespace, isolated1bytes)\n\n\tlolPath := filepath.Join(\"bar\", \"lol.txt\")\n\toloPath := filepath.Join(\"foo\", \"boz\", \"olo.txt\")\n\tisolated2 := isolated.New(h)\n\tisolated2.Files = map[string]isolated.File{\n\t\tlolPath: isolated.BasicFile(data1hash, 0664, int64(len(data1))),\n\t\toloPath: isolated.BasicFile(data2hash, 0664, int64(len(data2))),\n\t}\n\tisolatedFiles := stringset.NewFromSlice([]string{\n\t\ttardataname,\n\t\tonePath,\n\t\ttwoPath,\n\t\tnormalizePathSeparator(posixPath),\n\t\tnormalizePathSeparator(winPath),\n\t\tlolPath,\n\t\toloPath,\n\t\t\/\/ In tardata\n\t\t\"file1\",\n\t\t\"file2\",\n\t\tfilepath.Join(\"tar\", \"posix\", \"path\"),\n\t\tfilepath.Join(\"tar\", \"win\", \"path\"),\n\t}...)\n\tblahPath := \"blah.txt\"\n\n\t\/\/ Symlinks not supported on Windows.\n\tif runtime.GOOS != \"windows\" {\n\t\tisolated2.Files[blahPath] = isolated.SymLink(oloPath)\n\t\tisolatedFiles.Add(blahPath)\n\t}\n\tisolated2.Includes = isolated.HexDigests{isolated1hash}\n\tisolated2bytes, _ := json.Marshal(&isolated2)\n\tisolated2hash := server.Inject(namespace, isolated2bytes)\n\n\tts := httptest.NewServer(server)\n\tdefer ts.Close()\n\tclient := isolatedclient.NewClient(ts.URL, isolatedclient.WithNamespace(namespace))\n\n\tConvey(`A downloader should be able to download the isolated.`, t, func() {\n\t\ttmpDir, err := ioutil.TempDir(\"\", \"isolated\")\n\t\tSo(err, ShouldBeNil)\n\t\tdefer func() {\n\t\t\tSo(os.RemoveAll(tmpDir), ShouldBeNil)\n\t\t}()\n\n\t\tmu := sync.Mutex{}\n\t\tvar files []string\n\t\td := New(ctx, client, isolated2hash, tmpDir, &Options{\n\t\t\tFileCallback: func(name string, _ *isolated.File) {\n\t\t\t\tmu.Lock()\n\t\t\t\tfiles = append(files, name)\n\t\t\t\tmu.Unlock()\n\t\t\t},\n\t\t})\n\t\tSo(d.Wait(), ShouldBeNil)\n\t\tSo(stringset.NewFromSlice(files...), ShouldResemble, isolatedFiles)\n\n\t\tb, err := ioutil.ReadFile(filepath.Join(tmpDir, onePath))\n\t\tSo(err, ShouldBeNil)\n\t\tSo(b, ShouldResemble, data1)\n\n\t\tfi, err := os.Stat(filepath.Join(tmpDir, onePath))\n\t\tSo(err, ShouldBeNil)\n\t\t\/\/ to ignore effect of umask, only check executable bit.\n\t\tSo(fi.Mode()&0111, ShouldEqual, 0)\n\n\t\tb, err = ioutil.ReadFile(filepath.Join(tmpDir, twoPath))\n\t\tSo(err, ShouldBeNil)\n\t\tSo(b, ShouldResemble, data2)\n\n\t\tfi, err = os.Stat(filepath.Join(tmpDir, twoPath))\n\t\tSo(err, ShouldBeNil)\n\t\t\/\/ to ignore effect of umask, only check executable bit.\n\t\tSo(fi.Mode()&0011, ShouldEqual, 0)\n\n\t\tb, err = ioutil.ReadFile(filepath.Join(tmpDir, lolPath))\n\t\tSo(err, ShouldBeNil)\n\t\tSo(b, ShouldResemble, data1)\n\n\t\tb, err = ioutil.ReadFile(filepath.Join(tmpDir, oloPath))\n\t\tSo(err, ShouldBeNil)\n\t\tSo(b, ShouldResemble, data2)\n\n\t\t\/\/ Check files in tar archive\n\t\t_, err = os.Stat(filepath.Join(tmpDir, \"file1\"))\n\t\tSo(err, ShouldBeNil)\n\n\t\t_, err = os.Stat(filepath.Join(tmpDir, \"file2\"))\n\t\tSo(err, ShouldBeNil)\n\n\t\t_, err = os.Stat(filepath.Join(tmpDir, \"tar\", \"posix\", \"path\"))\n\t\tSo(err, ShouldBeNil)\n\n\t\t_, err = os.Stat(filepath.Join(tmpDir, \"tar\", \"win\", \"path\"))\n\t\tSo(err, ShouldBeNil)\n\n\t\t_, err = ioutil.ReadFile(filepath.Join(tmpDir, tardataname))\n\t\tSo(os.IsNotExist(err), ShouldBeTrue)\n\n\t\tif runtime.GOOS != \"windows\" {\n\t\t\tl, err := os.Readlink(filepath.Join(tmpDir, blahPath))\n\t\t\tSo(err, ShouldBeNil)\n\t\t\tSo(l, ShouldResemble, oloPath)\n\t\t}\n\t})\n}\n\n\/\/ genTar returns a valid tar file.\nfunc genTar(t *testing.T) []byte {\n\tb := bytes.Buffer{}\n\ttw := tar.NewWriter(&b)\n\td := []byte(\"hello file1\")\n\tif err := tw.WriteHeader(&tar.Header{Name: \"file1\", Mode: 0644, Typeflag: tar.TypeReg, Size: int64(len(d))}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := tw.Write(d); err != nil {\n\t\tt.Fatal(err)\n\t}\n\td = []byte(strings.Repeat(\"hello file2\", 100))\n\tif err := tw.WriteHeader(&tar.Header{Name: \"file2\", Mode: 0644, Typeflag: tar.TypeReg, Size: int64(len(d))}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := tw.Write(d); err != nil {\n\t\tt.Fatal(err)\n\t}\n\td = []byte(\"posixpath\")\n\tif err := tw.WriteHeader(&tar.Header{Name: \"tar\/posix\/path\", Mode: 0644, Typeflag: tar.TypeReg, Size: int64(len(d))}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := tw.Write(d); err != nil {\n\t\tt.Fatal(err)\n\t}\n\td = []byte(\"winpath\")\n\tif err := tw.WriteHeader(&tar.Header{Name: `tar\\win\\path`, Mode: 0644, Typeflag: tar.TypeReg, Size: int64(len(d))}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := tw.Write(d); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := tw.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn b.Bytes()\n}\n\nfunc TestDownloaderWithCache(t *testing.T) {\n\tt.Parallel()\n\tConvey(`TestDownloaderWithCache`, t, testfs.MustWithTempDir(t, \"\", func(tmpDir string) {\n\t\tctx := context.Background()\n\n\t\tmiss := []byte(\"cache miss\")\n\t\thit := []byte(\"cache hit\")\n\t\tnamespace := isolatedclient.DefaultNamespace\n\t\th := isolated.GetHash(namespace)\n\t\tserver := isolatedfake.New()\n\t\tmisshash := server.Inject(namespace, miss)\n\t\thithash := isolated.HashBytes(isolated.GetHash(namespace), hit)\n\n\t\tmissPath := filepath.Join(\"foo\", \"miss.txt\")\n\t\thitPath := filepath.Join(\"foo\", \"hit.txt\")\n\t\tisolated1 := isolated.New(h)\n\n\t\tisolated1.Files = map[string]isolated.File{\n\t\t\tmissPath: isolated.BasicFile(misshash, 0664, int64(len(miss))),\n\t\t\thitPath: isolated.BasicFile(hithash, 0664, int64(len(hit))),\n\t\t}\n\t\tisolated1bytes, _ := json.Marshal(&isolated1)\n\t\tisolated1hash := server.Inject(namespace, isolated1bytes)\n\n\t\tisolatedFiles := stringset.NewFromSlice([]string{\n\t\t\tmissPath,\n\t\t\thitPath,\n\t\t}...)\n\n\t\tts := httptest.NewServer(server)\n\t\tdefer ts.Close()\n\t\tclient := isolatedclient.NewClient(ts.URL, isolatedclient.WithNamespace(namespace))\n\n\t\tmu := sync.Mutex{}\n\t\tvar files []string\n\t\tpolicy := cache.Policies{\n\t\t\tMaxSize: 1024,\n\t\t\tMaxItems: 1024,\n\t\t}\n\t\tvar cacheObj cache.Cache\n\t\tConvey(\"diskcache\", func() {\n\t\t\tcacheDir := filepath.Join(tmpDir, \"cache\")\n\t\t\tSo(os.MkdirAll(cacheDir, os.ModePerm), ShouldBeNil)\n\t\t\tvar err error\n\t\t\tcacheObj, err = cache.NewDisk(policy, cacheDir, namespace)\n\t\t\tSo(err, ShouldBeNil)\n\t\t})\n\n\t\tSo(cacheObj.Add(hithash, bytes.NewReader(hit)), ShouldBeNil)\n\n\t\td := New(ctx, client, isolated1hash, tmpDir, &Options{\n\t\t\tFileCallback: func(name string, _ *isolated.File) {\n\t\t\t\tmu.Lock()\n\t\t\t\tfiles = append(files, name)\n\t\t\t\tmu.Unlock()\n\t\t\t},\n\t\t\tCache: cacheObj,\n\t\t})\n\t\tSo(d.Wait(), ShouldBeNil)\n\t\tSo(stringset.NewFromSlice(files...), ShouldResemble, isolatedFiles)\n\n\t\tSo(cacheObj.Touch(misshash), ShouldBeTrue)\n\t}))\n}\n\nfunc TestFetchAndMap(t *testing.T) {\n\tt.Parallel()\n\n\tConvey(`TestFetchAndMap`, t, testfs.MustWithTempDir(t, \"\", func(tmpdir string) {\n\t\tctx := context.Background()\n\t\tisolatedDir := filepath.Join(tmpdir, \"isolated\")\n\t\tSo(os.Mkdir(isolatedDir, 0o700), ShouldBeNil)\n\n\t\tdata1 := []byte(\"hello world!\")\n\n\t\tnamespace := isolatedclient.DefaultNamespace\n\t\th := isolated.GetHash(namespace)\n\t\tserver := isolatedfake.New()\n\t\tdata1hash := server.Inject(namespace, data1)\n\n\t\tonePath := filepath.Join(\"foo\", \"one.txt\")\n\t\tonePathFile := isolated.BasicFile(data1hash, 0664, int64(len(data1)))\n\t\tisolated1 := isolated.New(h)\n\n\t\ttardata := genTar(t)\n\t\ttarhash := server.Inject(namespace, tardata)\n\n\t\tisolated1.Files = map[string]isolated.File{\n\t\t\tonePath: onePathFile,\n\t\t\t\"tar.tar\": isolated.TarFile(tarhash, int64(len(tardata))),\n\t\t}\n\t\tisolated1bytes, _ := json.Marshal(&isolated1)\n\t\tisolated1hash := server.Inject(namespace, isolated1bytes)\n\n\t\tts := httptest.NewServer(server)\n\t\tdefer ts.Close()\n\t\tclient := isolatedclient.NewClient(ts.URL, isolatedclient.WithNamespace(namespace))\n\n\t\tpolicy := cache.Policies{\n\t\t\tMaxSize: 1024 * 1024,\n\t\t\tMaxItems: 1024,\n\t\t}\n\t\tdiskcache, err := cache.NewDisk(policy, filepath.Join(tmpdir, \"cache\"), namespace)\n\t\tSo(err, ShouldBeNil)\n\t\tdefer func() {\n\t\t\tSo(diskcache.Close(), ShouldBeNil)\n\t\t}()\n\n\t\tisomap, stats, err := FetchAndMap(ctx, isolated1hash, client, diskcache, isolatedDir)\n\t\tSo(err, ShouldBeNil)\n\t\tSo(isomap, ShouldResemble, &isolated.Isolated{\n\t\t\tAlgo: \"sha-1\",\n\t\t\tVersion: \"1.4\",\n\t\t})\n\n\t\tSo(stats.Duration, ShouldBeGreaterThan, 0)\n\t\tSo(stats.ItemsCold, ShouldResemble, []byte{120, 156, 226, 249, 162, 15, 8, 0, 0, 255, 255, 2, 62, 1, 48})\n\t\tSo(stats.ItemsHot, ShouldResemble, []byte(nil))\n\n\t\tbuf, err := ioutil.ReadFile(filepath.Join(isolatedDir, onePath))\n\t\tSo(err, ShouldBeNil)\n\t\tSo(buf, ShouldResemble, data1)\n\n\t\tSo(diskcache.Touch(tarhash), ShouldBeTrue)\n\t}))\n}\n<|endoftext|>"} {"text":"<commit_before>package genetics\n\ntype Population struct {\n\t\/\/ Species in the Population. Note that the species should comprise all the genomes\n\tspecies []*Species\n\t\/\/ The highest species number\n\tlastSpecies int\n\n}\n<commit_msg>Added additional fields<commit_after>package genetics\n\ntype Population struct {\n\t\/\/ Species in the Population. Note that the species should comprise all the genomes\n\tSpecies []*Species\n\t\/\/ The highest species number\n\tLastSpecies int\n\t\/\/ For holding the genetic innovations of the newest generation\n\tInnovations []*Innovation\n\n\n\t\/\/ The current innovation number for population\n\tcurrInnovNum int64\n}\n\n\/\/ Returns current innovation number and increment innovations number counter after that\nfunc (p *Population) getInnovationNumberAndIncrement() int64 {\n\tinn_num := p.currInnovNum\n\tp.currInnovNum++\n\treturn inn_num\n}\n<|endoftext|>"} {"text":"<commit_before>package workflow\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\n\t\"github.com\/openshift\/installer\/installer\/pkg\/config\"\n\tconfiggenerator \"github.com\/openshift\/installer\/installer\/pkg\/config-generator\"\n)\n\nconst (\n\tassetsStep = \"assets\"\n\tbinaryPrefix = \"installer\"\n\tbootstrapOff = \"-var=tectonic_bootstrap=false\"\n\tbootstrapOn = \"-var=tectonic_bootstrap=true\"\n\tconfigFileName = \"config.yaml\"\n\tetcdStep = \"etcd\"\n\tinternalFileName = \"internal.yaml\"\n\tjoinWorkersStep = \"joining_workers\"\n\tmastersStep = \"masters\"\n\tnewTLSStep = \"newtls\"\n\tstepsBaseDir = \"steps\"\n\ttlsStep = \"tls\"\n\ttncDNSStep = \"tnc_dns\"\n\ttopologyStep = \"topology\"\n)\n\nfunc copyFile(fromFilePath, toFilePath string) error {\n\tfrom, err := os.Open(fromFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer from.Close()\n\n\tto, err := os.OpenFile(toFilePath, os.O_RDWR|os.O_CREATE, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer to.Close()\n\n\t_, err = io.Copy(to, from)\n\treturn err\n}\n\n\/\/ returns the directory containing templates for a given step. If platform is\n\/\/ specified, it looks for a subdirectory with platform first, falling back if\n\/\/ there are no platform-specific templates for that step\nfunc findStepTemplates(stepName string, platform config.Platform) (string, error) {\n\tbase, err := baseLocation()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error looking up step %s templates: %v\", stepName, err)\n\t}\n\tfor _, path := range []string{\n\t\tfilepath.Join(base, stepsBaseDir, stepName, platformPath(platform)),\n\t\tfilepath.Join(base, stepsBaseDir, stepName)} {\n\n\t\tstat, err := os.Stat(path)\n\t\tif err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn \"\", fmt.Errorf(\"invalid path for '%s' templates: %s\", base, err)\n\t\t}\n\t\tif !stat.IsDir() {\n\t\t\treturn \"\", fmt.Errorf(\"invalid path for '%s' templates\", base)\n\t\t}\n\t\treturn path, nil\n\t}\n\treturn \"\", os.ErrNotExist\n}\n\nfunc platformPath(platform config.Platform) string {\n\tswitch platform {\n\tcase config.PlatformLibvirt:\n\t\treturn \"libvirt\"\n\tcase config.PlatformAWS:\n\t\treturn \"aws\"\n\t}\n\tpanic(\"invalid platform\")\n}\n\nfunc generateClusterConfigMaps(m *metadata) error {\n\tclusterGeneratedPath := filepath.Join(m.clusterDir, generatedPath)\n\tif err := os.MkdirAll(clusterGeneratedPath, os.ModeDir|0755); err != nil {\n\t\treturn fmt.Errorf(\"Failed to create cluster generated directory at %s\", clusterGeneratedPath)\n\t}\n\n\tconfigGenerator := configgenerator.New(m.cluster)\n\n\tkcoConfig, err := configGenerator.CoreConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkcoConfigFilePath := filepath.Join(clusterGeneratedPath, kcoConfigFileName)\n\tif err := writeFile(kcoConfigFilePath, kcoConfig); err != nil {\n\t\treturn err\n\t}\n\n\ttncoConfig, err := configGenerator.TncoConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttncoConfigFilePath := filepath.Join(clusterGeneratedPath, tncoConfigFileName)\n\tif err := writeFile(tncoConfigFilePath, tncoConfig); err != nil {\n\t\treturn err\n\t}\n\n\tkubeSystem, err := configGenerator.KubeSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkubePath := filepath.Join(m.clusterDir, kubeSystemPath)\n\tif err := os.MkdirAll(kubePath, os.ModeDir|0755); err != nil {\n\t\treturn fmt.Errorf(\"Failed to create manifests directory at %s\", kubePath)\n\t}\n\n\tkubeSystemConfigFilePath := filepath.Join(kubePath, kubeSystemFileName)\n\tif err := writeFile(kubeSystemConfigFilePath, kubeSystem); err != nil {\n\t\treturn err\n\t}\n\n\ttectonicSystem, err := configGenerator.TectonicSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttectonicPath := filepath.Join(m.clusterDir, tectonicSystemPath)\n\tif err := os.MkdirAll(tectonicPath, os.ModeDir|0755); err != nil {\n\t\treturn fmt.Errorf(\"Failed to create tectonic directory at %s\", tectonicPath)\n\t}\n\n\ttectonicSystemConfigFilePath := filepath.Join(tectonicPath, tectonicSystemFileName)\n\treturn writeFile(tectonicSystemConfigFilePath, tectonicSystem)\n}\n\nfunc readClusterConfig(configFilePath string, internalFilePath string) (*config.Cluster, error) {\n\tcfg, err := config.ParseConfigFile(configFilePath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s is not a valid config file: %s\", configFilePath, err)\n\t}\n\n\tif internalFilePath != \"\" {\n\t\tinternal, err := config.ParseInternalFile(internalFilePath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"%s is not a valid internal file: %s\", internalFilePath, err)\n\t\t}\n\t\tcfg.Internal = *internal\n\t}\n\n\treturn cfg, nil\n}\n\nfunc readClusterConfigStep(m *metadata) error {\n\tif m.clusterDir == \"\" {\n\t\treturn errors.New(\"no cluster dir given for reading config\")\n\t}\n\tconfigFilePath := filepath.Join(m.clusterDir, configFileName)\n\tinternalFilePath := filepath.Join(m.clusterDir, internalFileName)\n\n\tcluster, err := readClusterConfig(configFilePath, internalFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := cluster.ValidateAndLog(); err != nil {\n\t\treturn err\n\t}\n\n\tm.cluster = *cluster\n\n\treturn nil\n}\n\nfunc writeFile(path, content string) error {\n\tf, err := os.Create(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tw := bufio.NewWriter(f)\n\tif _, err := fmt.Fprintln(w, content); err != nil {\n\t\treturn err\n\t}\n\tw.Flush()\n\n\treturn nil\n}\n\nfunc baseLocation() (string, error) {\n\tex, err := os.Executable()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"undetermined location of own executable: %s\", err)\n\t}\n\tex = path.Dir(ex)\n\tif path.Base(ex) != binaryPrefix {\n\t\treturn \"\", fmt.Errorf(\"%s executable in unknown location: %s\", path.Base(ex), err)\n\t}\n\treturn path.Dir(ex), nil\n}\n\nfunc clusterIsBootstrapped(stateDir string) bool {\n\treturn hasStateFile(stateDir, topologyStep) &&\n\t\thasStateFile(stateDir, mastersStep) &&\n\t\thasStateFile(stateDir, tncDNSStep)\n}\n\nfunc createTNCCNAME(m *metadata) error {\n\treturn runInstallStep(m, tncDNSStep, []string{bootstrapOn}...)\n}\n\nfunc createTNCARecord(m *metadata) error {\n\treturn runInstallStep(m, tncDNSStep, []string{bootstrapOff}...)\n}\n\nfunc destroyTNCDNS(m *metadata) error {\n\treturn runDestroyStep(m, tncDNSStep, []string{bootstrapOff}...)\n}\n<commit_msg>installer\/pkg\/workflow\/utils: Simplify findStepTemplates path logic<commit_after>package workflow\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\n\t\"github.com\/openshift\/installer\/installer\/pkg\/config\"\n\tconfiggenerator \"github.com\/openshift\/installer\/installer\/pkg\/config-generator\"\n)\n\nconst (\n\tassetsStep = \"assets\"\n\tbinaryPrefix = \"installer\"\n\tbootstrapOff = \"-var=tectonic_bootstrap=false\"\n\tbootstrapOn = \"-var=tectonic_bootstrap=true\"\n\tconfigFileName = \"config.yaml\"\n\tetcdStep = \"etcd\"\n\tinternalFileName = \"internal.yaml\"\n\tjoinWorkersStep = \"joining_workers\"\n\tmastersStep = \"masters\"\n\tnewTLSStep = \"newtls\"\n\tstepsBaseDir = \"steps\"\n\ttlsStep = \"tls\"\n\ttncDNSStep = \"tnc_dns\"\n\ttopologyStep = \"topology\"\n)\n\nfunc copyFile(fromFilePath, toFilePath string) error {\n\tfrom, err := os.Open(fromFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer from.Close()\n\n\tto, err := os.OpenFile(toFilePath, os.O_RDWR|os.O_CREATE, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer to.Close()\n\n\t_, err = io.Copy(to, from)\n\treturn err\n}\n\n\/\/ returns the directory containing templates for a given step. If platform is\n\/\/ specified, it looks for a subdirectory with platform first, falling back if\n\/\/ there are no platform-specific templates for that step\nfunc findStepTemplates(stepName string, platform config.Platform) (string, error) {\n\tbase, err := baseLocation()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error looking up step %s templates: %v\", stepName, err)\n\t}\n\tstepDir := filepath.Join(base, stepsBaseDir, stepName)\n\tfor _, path := range []string{\n\t\tfilepath.Join(stepDir, string(platform)),\n\t\tstepDir} {\n\n\t\tstat, err := os.Stat(path)\n\t\tif err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn \"\", fmt.Errorf(\"invalid path for '%s' templates: %s\", base, err)\n\t\t}\n\t\tif !stat.IsDir() {\n\t\t\treturn \"\", fmt.Errorf(\"invalid path for '%s' templates\", base)\n\t\t}\n\t\treturn path, nil\n\t}\n\treturn \"\", os.ErrNotExist\n}\n\nfunc generateClusterConfigMaps(m *metadata) error {\n\tclusterGeneratedPath := filepath.Join(m.clusterDir, generatedPath)\n\tif err := os.MkdirAll(clusterGeneratedPath, os.ModeDir|0755); err != nil {\n\t\treturn fmt.Errorf(\"Failed to create cluster generated directory at %s\", clusterGeneratedPath)\n\t}\n\n\tconfigGenerator := configgenerator.New(m.cluster)\n\n\tkcoConfig, err := configGenerator.CoreConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkcoConfigFilePath := filepath.Join(clusterGeneratedPath, kcoConfigFileName)\n\tif err := writeFile(kcoConfigFilePath, kcoConfig); err != nil {\n\t\treturn err\n\t}\n\n\ttncoConfig, err := configGenerator.TncoConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttncoConfigFilePath := filepath.Join(clusterGeneratedPath, tncoConfigFileName)\n\tif err := writeFile(tncoConfigFilePath, tncoConfig); err != nil {\n\t\treturn err\n\t}\n\n\tkubeSystem, err := configGenerator.KubeSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tkubePath := filepath.Join(m.clusterDir, kubeSystemPath)\n\tif err := os.MkdirAll(kubePath, os.ModeDir|0755); err != nil {\n\t\treturn fmt.Errorf(\"Failed to create manifests directory at %s\", kubePath)\n\t}\n\n\tkubeSystemConfigFilePath := filepath.Join(kubePath, kubeSystemFileName)\n\tif err := writeFile(kubeSystemConfigFilePath, kubeSystem); err != nil {\n\t\treturn err\n\t}\n\n\ttectonicSystem, err := configGenerator.TectonicSystem()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttectonicPath := filepath.Join(m.clusterDir, tectonicSystemPath)\n\tif err := os.MkdirAll(tectonicPath, os.ModeDir|0755); err != nil {\n\t\treturn fmt.Errorf(\"Failed to create tectonic directory at %s\", tectonicPath)\n\t}\n\n\ttectonicSystemConfigFilePath := filepath.Join(tectonicPath, tectonicSystemFileName)\n\treturn writeFile(tectonicSystemConfigFilePath, tectonicSystem)\n}\n\nfunc readClusterConfig(configFilePath string, internalFilePath string) (*config.Cluster, error) {\n\tcfg, err := config.ParseConfigFile(configFilePath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%s is not a valid config file: %s\", configFilePath, err)\n\t}\n\n\tif internalFilePath != \"\" {\n\t\tinternal, err := config.ParseInternalFile(internalFilePath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"%s is not a valid internal file: %s\", internalFilePath, err)\n\t\t}\n\t\tcfg.Internal = *internal\n\t}\n\n\treturn cfg, nil\n}\n\nfunc readClusterConfigStep(m *metadata) error {\n\tif m.clusterDir == \"\" {\n\t\treturn errors.New(\"no cluster dir given for reading config\")\n\t}\n\tconfigFilePath := filepath.Join(m.clusterDir, configFileName)\n\tinternalFilePath := filepath.Join(m.clusterDir, internalFileName)\n\n\tcluster, err := readClusterConfig(configFilePath, internalFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := cluster.ValidateAndLog(); err != nil {\n\t\treturn err\n\t}\n\n\tm.cluster = *cluster\n\n\treturn nil\n}\n\nfunc writeFile(path, content string) error {\n\tf, err := os.Create(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tw := bufio.NewWriter(f)\n\tif _, err := fmt.Fprintln(w, content); err != nil {\n\t\treturn err\n\t}\n\tw.Flush()\n\n\treturn nil\n}\n\nfunc baseLocation() (string, error) {\n\tex, err := os.Executable()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"undetermined location of own executable: %s\", err)\n\t}\n\tex = path.Dir(ex)\n\tif path.Base(ex) != binaryPrefix {\n\t\treturn \"\", fmt.Errorf(\"%s executable in unknown location: %s\", path.Base(ex), err)\n\t}\n\treturn path.Dir(ex), nil\n}\n\nfunc clusterIsBootstrapped(stateDir string) bool {\n\treturn hasStateFile(stateDir, topologyStep) &&\n\t\thasStateFile(stateDir, mastersStep) &&\n\t\thasStateFile(stateDir, tncDNSStep)\n}\n\nfunc createTNCCNAME(m *metadata) error {\n\treturn runInstallStep(m, tncDNSStep, []string{bootstrapOn}...)\n}\n\nfunc createTNCARecord(m *metadata) error {\n\treturn runInstallStep(m, tncDNSStep, []string{bootstrapOff}...)\n}\n\nfunc destroyTNCDNS(m *metadata) error {\n\treturn runDestroyStep(m, tncDNSStep, []string{bootstrapOff}...)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 Google Inc. All Rights Reserved.\n\/\/ This file is available under the Apache license.\n\npackage metrics\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"reflect\"\n\t\"sync\"\n\t\"testing\"\n\t\"testing\/quick\"\n\t\"time\"\n\n\t\"github.com\/google\/mtail\/internal\/metrics\/datum\"\n\t\"github.com\/google\/mtail\/internal\/testutil\"\n)\n\nfunc TestKindType(t *testing.T) {\n\tv := Kind(0)\n\tif s := v.String(); s != \"Unknown\" {\n\t\tt.Errorf(\"Kind.String() returned %q not Unknown\", s)\n\t}\n\tv = Counter\n\tif s := v.String(); s != \"Counter\" {\n\t\tt.Errorf(\"Kind.String() returned %q not Counter\", s)\n\t}\n\tv = Gauge\n\tif s := v.String(); s != \"Gauge\" {\n\t\tt.Errorf(\"Kind.String() returned %q not Gauge\", s)\n\t}\n\tv = Timer\n\tif s := v.String(); s != \"Timer\" {\n\t\tt.Errorf(\"Kind.String() returned %q not Timer\", s)\n\t}\n}\n\nfunc TestScalarMetric(t *testing.T) {\n\tv := NewMetric(\"test\", \"prog\", Counter, Int)\n\td, err := v.GetDatum()\n\tif err != nil {\n\t\tt.Errorf(\"no datum: %s\", err)\n\t}\n\tdatum.IncIntBy(d, 1, time.Now().UTC())\n\tlv := v.FindLabelValueOrNil([]string{})\n\tif lv == nil {\n\t\tt.Errorf(\"couldn't find labelvalue\")\n\t}\n\tnewD := lv.Value\n\tif newD == nil {\n\t\tt.Errorf(\"new_d is nil\")\n\t}\n\tif newD.ValueString() != \"1\" {\n\t\tt.Errorf(\"value not 1\")\n\t}\n\td2, err := v.GetDatum(\"a\", \"b\")\n\tif err == nil {\n\t\tt.Errorf(\"datum with keys sohuld have returned no value, got %v\", d2)\n\t}\n}\n\nfunc TestDimensionedMetric(t *testing.T) {\n\tv := NewMetric(\"test\", \"prog\", Counter, Int, \"foo\")\n\td, _ := v.GetDatum(\"a\")\n\tdatum.IncIntBy(d, 1, time.Now().UTC())\n\tif v.FindLabelValueOrNil([]string{\"a\"}).Value.ValueString() != \"1\" {\n\t\tt.Errorf(\"fail\")\n\t}\n\n\tv = NewMetric(\"test\", \"prog\", Counter, Int, \"foo\", \"bar\")\n\td, _ = v.GetDatum(\"a\", \"b\")\n\tdatum.IncIntBy(d, 1, time.Now().UTC())\n\tif v.FindLabelValueOrNil([]string{\"a\", \"b\"}).Value.ValueString() != \"1\" {\n\t\tt.Errorf(\"fail\")\n\t}\n\n\tv = NewMetric(\"test\", \"prog\", Counter, Int, \"foo\", \"bar\", \"quux\")\n\td, _ = v.GetDatum(\"a\", \"b\", \"c\")\n\tdatum.IncIntBy(d, 1, time.Now().UTC())\n\tif v.FindLabelValueOrNil([]string{\"a\", \"b\", \"c\"}).Value.ValueString() != \"1\" {\n\t\tt.Errorf(\"fail\")\n\t}\n}\n\nvar labelSetTests = []struct {\n\tvalues []string\n\texpectedLabels map[string]string\n}{\n\t{\n\t\t[]string{\"a\", \"b\", \"c\"},\n\t\tmap[string]string{\"foo\": \"a\", \"bar\": \"b\", \"quux\": \"c\"},\n\t},\n\t{\n\t\t[]string{\"a\", \"b\", \"d\"},\n\t\tmap[string]string{\"foo\": \"a\", \"bar\": \"b\", \"quux\": \"d\"},\n\t},\n}\n\nfunc TestEmitLabelSet(t *testing.T) {\n\tts := time.Now().UTC()\n\tfor _, tc := range labelSetTests {\n\t\ttc := tc\n\t\tt.Run(fmt.Sprintf(\"%v\", tc.values), func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tm := NewMetric(\"test\", \"prog\", Gauge, Int, \"foo\", \"bar\", \"quux\")\n\t\t\td, _ := m.GetDatum(tc.values...)\n\t\t\tdatum.SetInt(d, 37, ts)\n\n\t\t\tc := make(chan *LabelSet)\n\n\t\t\tgo m.EmitLabelSets(c)\n\n\t\t\tls := <-c\n\n\t\t\tdiff := testutil.Diff(tc.expectedLabels, ls.Labels)\n\t\t\tif diff != \"\" {\n\t\t\t\tt.Error(diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestFindLabelValueOrNil(t *testing.T) {\n\tm0 := NewMetric(\"foo\", \"prog\", Counter, Int)\n\tif r0 := m0.FindLabelValueOrNil([]string{}); r0 != nil {\n\t\tt.Errorf(\"m0 should be nil: %v\", r0)\n\t}\n\td, err := m0.GetDatum()\n\tif err != nil {\n\t\tt.Errorf(\"Bad datum %v: %v\\n\", d, err)\n\t}\n\tif r1 := m0.FindLabelValueOrNil([]string{}); r1 == nil {\n\t\tt.Errorf(\"m0 should not be nil: %v\", r1)\n\t}\n\tm1 := NewMetric(\"bar\", \"prog\", Counter, Int, \"a\")\n\td1, err1 := m1.GetDatum(\"1\")\n\tif err1 != nil {\n\t\tt.Errorf(\"err1 %v: %v\\n\", d1, err1)\n\t}\n\tif r2 := m1.FindLabelValueOrNil([]string{\"0\"}); r2 != nil {\n\t\tt.Errorf(\"r2 should be nil\")\n\t}\n\tif r3 := m1.FindLabelValueOrNil([]string{\"1\"}); r3 == nil {\n\t\tt.Errorf(\"r3 should be non nil\")\n\t}\n}\n\nfunc timeGenerator(rand *rand.Rand) time.Time {\n\tmonths := []time.Month{\n\t\ttime.January, time.February, time.March,\n\t\ttime.April, time.May, time.June,\n\t\ttime.July, time.August, time.September,\n\t\ttime.October, time.November, time.December,\n\t}\n\n\treturn time.Date(\n\t\trand.Intn(9999),\n\t\tmonths[rand.Intn(len(months))],\n\t\trand.Intn(31),\n\t\trand.Intn(24),\n\t\trand.Intn(60),\n\t\trand.Intn(60),\n\t\tint(rand.Int31()),\n\t\ttime.UTC,\n\t)\n}\n\nfunc TestMetricJSONRoundTrip(t *testing.T) {\n\trand := rand.New(rand.NewSource(0))\n\tf := func(name, prog string, kind Kind, keys []string, val, ti, tns int64) bool {\n\t\tm := NewMetric(name, prog, kind, Int, keys...)\n\t\tlabels := make([]string, 0)\n\t\tfor range keys {\n\t\t\tif l, ok := quick.Value(reflect.TypeOf(name), rand); ok {\n\t\t\t\tlabels = append(labels, l.String())\n\t\t\t} else {\n\t\t\t\tt.Errorf(\"failed to create value for labels\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\td, _ := m.GetDatum(labels...)\n\t\tdatum.SetInt(d, val, timeGenerator(rand))\n\n\t\tj, e := json.Marshal(m)\n\t\tif e != nil {\n\t\t\tt.Errorf(\"json.Marshal failed: %s\\n\", e)\n\t\t\treturn false\n\t\t}\n\n\t\tr := newMetric(0)\n\t\te = json.Unmarshal(j, &r)\n\t\tif e != nil {\n\t\t\tt.Errorf(\"json.Unmarshal failed: %s\\n\", e)\n\t\t\treturn false\n\t\t}\n\n\t\tif diff := testutil.Diff(m, r, testutil.IgnoreUnexported(sync.RWMutex{})); diff != \"\" {\n\t\t\tt.Errorf(\"Round trip wasn't stable:\\n%s\", diff)\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\tq := quick.Config{MaxCount: 100000}\n\tif testing.Short() {\n\t\tq.MaxCount = 1000\n\t}\n\tif err := quick.Check(f, nil); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestTimer(t *testing.T) {\n\tm := NewMetric(\"test\", \"prog\", Timer, Int)\n\tn := NewMetric(\"test\", \"prog\", Timer, Int)\n\tdiff := testutil.Diff(m, n, testutil.IgnoreUnexported(sync.RWMutex{}))\n\tif diff != \"\" {\n\t\tt.Errorf(\"Identical metrics not the same:\\n%s\", diff)\n\t}\n\td, _ := m.GetDatum()\n\tdatum.IncIntBy(d, 1, time.Now().UTC())\n\tlv := m.FindLabelValueOrNil([]string{})\n\tif lv == nil {\n\t\tt.Errorf(\"couldn't find labelvalue\")\n\t}\n\tnewD := lv.Value\n\tif newD == nil {\n\t\tt.Errorf(\"new_d is nil\")\n\t}\n\tif newD.ValueString() != \"1\" {\n\t\tt.Errorf(\"value not 1\")\n\t}\n}\n\nfunc TestRemoveMetricLabelValue(t *testing.T) {\n\tm := NewMetric(\"test\", \"prog\", Counter, Int, \"a\", \"b\", \"c\")\n\t_, e := m.GetDatum(\"a\", \"a\", \"a\")\n\tif e != nil {\n\t\tt.Errorf(\"Getdatum failed: %s\", e)\n\t}\n\tlv := m.FindLabelValueOrNil([]string{\"a\", \"a\", \"a\"})\n\tif lv == nil {\n\t\tt.Errorf(\"coidln't find labelvalue\")\n\t}\n\te = m.RemoveDatum(\"a\", \"a\", \"a\")\n\tif e != nil {\n\t\tt.Errorf(\"couldn't remove datum: %s\", e)\n\t}\n\tlv = m.FindLabelValueOrNil([]string{\"a\", \"a\", \"a\"})\n\tif lv != nil {\n\t\tt.Errorf(\"label value still exists\")\n\t}\n}\n<commit_msg>Fix quick.Check config in metric_test.<commit_after>\/\/ Copyright 2011 Google Inc. All Rights Reserved.\n\/\/ This file is available under the Apache license.\n\npackage metrics\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"reflect\"\n\t\"sync\"\n\t\"testing\"\n\t\"testing\/quick\"\n\t\"time\"\n\n\t\"github.com\/google\/mtail\/internal\/metrics\/datum\"\n\t\"github.com\/google\/mtail\/internal\/testutil\"\n)\n\nfunc TestKindType(t *testing.T) {\n\tv := Kind(0)\n\tif s := v.String(); s != \"Unknown\" {\n\t\tt.Errorf(\"Kind.String() returned %q not Unknown\", s)\n\t}\n\tv = Counter\n\tif s := v.String(); s != \"Counter\" {\n\t\tt.Errorf(\"Kind.String() returned %q not Counter\", s)\n\t}\n\tv = Gauge\n\tif s := v.String(); s != \"Gauge\" {\n\t\tt.Errorf(\"Kind.String() returned %q not Gauge\", s)\n\t}\n\tv = Timer\n\tif s := v.String(); s != \"Timer\" {\n\t\tt.Errorf(\"Kind.String() returned %q not Timer\", s)\n\t}\n}\n\nfunc TestScalarMetric(t *testing.T) {\n\tv := NewMetric(\"test\", \"prog\", Counter, Int)\n\td, err := v.GetDatum()\n\tif err != nil {\n\t\tt.Errorf(\"no datum: %s\", err)\n\t}\n\tdatum.IncIntBy(d, 1, time.Now().UTC())\n\tlv := v.FindLabelValueOrNil([]string{})\n\tif lv == nil {\n\t\tt.Errorf(\"couldn't find labelvalue\")\n\t}\n\tnewD := lv.Value\n\tif newD == nil {\n\t\tt.Errorf(\"new_d is nil\")\n\t}\n\tif newD.ValueString() != \"1\" {\n\t\tt.Errorf(\"value not 1\")\n\t}\n\td2, err := v.GetDatum(\"a\", \"b\")\n\tif err == nil {\n\t\tt.Errorf(\"datum with keys sohuld have returned no value, got %v\", d2)\n\t}\n}\n\nfunc TestDimensionedMetric(t *testing.T) {\n\tv := NewMetric(\"test\", \"prog\", Counter, Int, \"foo\")\n\td, _ := v.GetDatum(\"a\")\n\tdatum.IncIntBy(d, 1, time.Now().UTC())\n\tif v.FindLabelValueOrNil([]string{\"a\"}).Value.ValueString() != \"1\" {\n\t\tt.Errorf(\"fail\")\n\t}\n\n\tv = NewMetric(\"test\", \"prog\", Counter, Int, \"foo\", \"bar\")\n\td, _ = v.GetDatum(\"a\", \"b\")\n\tdatum.IncIntBy(d, 1, time.Now().UTC())\n\tif v.FindLabelValueOrNil([]string{\"a\", \"b\"}).Value.ValueString() != \"1\" {\n\t\tt.Errorf(\"fail\")\n\t}\n\n\tv = NewMetric(\"test\", \"prog\", Counter, Int, \"foo\", \"bar\", \"quux\")\n\td, _ = v.GetDatum(\"a\", \"b\", \"c\")\n\tdatum.IncIntBy(d, 1, time.Now().UTC())\n\tif v.FindLabelValueOrNil([]string{\"a\", \"b\", \"c\"}).Value.ValueString() != \"1\" {\n\t\tt.Errorf(\"fail\")\n\t}\n}\n\nvar labelSetTests = []struct {\n\tvalues []string\n\texpectedLabels map[string]string\n}{\n\t{\n\t\t[]string{\"a\", \"b\", \"c\"},\n\t\tmap[string]string{\"foo\": \"a\", \"bar\": \"b\", \"quux\": \"c\"},\n\t},\n\t{\n\t\t[]string{\"a\", \"b\", \"d\"},\n\t\tmap[string]string{\"foo\": \"a\", \"bar\": \"b\", \"quux\": \"d\"},\n\t},\n}\n\nfunc TestEmitLabelSet(t *testing.T) {\n\tts := time.Now().UTC()\n\tfor _, tc := range labelSetTests {\n\t\ttc := tc\n\t\tt.Run(fmt.Sprintf(\"%v\", tc.values), func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tm := NewMetric(\"test\", \"prog\", Gauge, Int, \"foo\", \"bar\", \"quux\")\n\t\t\td, _ := m.GetDatum(tc.values...)\n\t\t\tdatum.SetInt(d, 37, ts)\n\n\t\t\tc := make(chan *LabelSet)\n\n\t\t\tgo m.EmitLabelSets(c)\n\n\t\t\tls := <-c\n\n\t\t\tdiff := testutil.Diff(tc.expectedLabels, ls.Labels)\n\t\t\tif diff != \"\" {\n\t\t\t\tt.Error(diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestFindLabelValueOrNil(t *testing.T) {\n\tm0 := NewMetric(\"foo\", \"prog\", Counter, Int)\n\tif r0 := m0.FindLabelValueOrNil([]string{}); r0 != nil {\n\t\tt.Errorf(\"m0 should be nil: %v\", r0)\n\t}\n\td, err := m0.GetDatum()\n\tif err != nil {\n\t\tt.Errorf(\"Bad datum %v: %v\\n\", d, err)\n\t}\n\tif r1 := m0.FindLabelValueOrNil([]string{}); r1 == nil {\n\t\tt.Errorf(\"m0 should not be nil: %v\", r1)\n\t}\n\tm1 := NewMetric(\"bar\", \"prog\", Counter, Int, \"a\")\n\td1, err1 := m1.GetDatum(\"1\")\n\tif err1 != nil {\n\t\tt.Errorf(\"err1 %v: %v\\n\", d1, err1)\n\t}\n\tif r2 := m1.FindLabelValueOrNil([]string{\"0\"}); r2 != nil {\n\t\tt.Errorf(\"r2 should be nil\")\n\t}\n\tif r3 := m1.FindLabelValueOrNil([]string{\"1\"}); r3 == nil {\n\t\tt.Errorf(\"r3 should be non nil\")\n\t}\n}\n\nfunc timeGenerator(rand *rand.Rand) time.Time {\n\tmonths := []time.Month{\n\t\ttime.January, time.February, time.March,\n\t\ttime.April, time.May, time.June,\n\t\ttime.July, time.August, time.September,\n\t\ttime.October, time.November, time.December,\n\t}\n\n\treturn time.Date(\n\t\trand.Intn(9999),\n\t\tmonths[rand.Intn(len(months))],\n\t\trand.Intn(31),\n\t\trand.Intn(24),\n\t\trand.Intn(60),\n\t\trand.Intn(60),\n\t\tint(rand.Int31()),\n\t\ttime.UTC,\n\t)\n}\n\nfunc TestMetricJSONRoundTrip(t *testing.T) {\n\trand := rand.New(rand.NewSource(0))\n\tf := func(name, prog string, kind Kind, keys []string, val, ti, tns int64) bool {\n\t\tm := NewMetric(name, prog, kind, Int, keys...)\n\t\tlabels := make([]string, 0)\n\t\tfor range keys {\n\t\t\tif l, ok := quick.Value(reflect.TypeOf(name), rand); ok {\n\t\t\t\tlabels = append(labels, l.String())\n\t\t\t} else {\n\t\t\t\tt.Errorf(\"failed to create value for labels\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\td, _ := m.GetDatum(labels...)\n\t\tdatum.SetInt(d, val, timeGenerator(rand))\n\n\t\tj, e := json.Marshal(m)\n\t\tif e != nil {\n\t\t\tt.Errorf(\"json.Marshal failed: %s\\n\", e)\n\t\t\treturn false\n\t\t}\n\n\t\tr := newMetric(0)\n\t\te = json.Unmarshal(j, &r)\n\t\tif e != nil {\n\t\t\tt.Errorf(\"json.Unmarshal failed: %s\\n\", e)\n\t\t\treturn false\n\t\t}\n\n\t\tif diff := testutil.Diff(m, r, testutil.IgnoreUnexported(sync.RWMutex{})); diff != \"\" {\n\t\t\tt.Errorf(\"Round trip wasn't stable:\\n%s\", diff)\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\tq := &quick.Config{MaxCount: 100000}\n\tif testing.Short() {\n\t\tq.MaxCountScale = 0.01\n\t}\n\tif err := quick.Check(f, q); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestTimer(t *testing.T) {\n\tm := NewMetric(\"test\", \"prog\", Timer, Int)\n\tn := NewMetric(\"test\", \"prog\", Timer, Int)\n\tdiff := testutil.Diff(m, n, testutil.IgnoreUnexported(sync.RWMutex{}))\n\tif diff != \"\" {\n\t\tt.Errorf(\"Identical metrics not the same:\\n%s\", diff)\n\t}\n\td, _ := m.GetDatum()\n\tdatum.IncIntBy(d, 1, time.Now().UTC())\n\tlv := m.FindLabelValueOrNil([]string{})\n\tif lv == nil {\n\t\tt.Errorf(\"couldn't find labelvalue\")\n\t}\n\tnewD := lv.Value\n\tif newD == nil {\n\t\tt.Errorf(\"new_d is nil\")\n\t}\n\tif newD.ValueString() != \"1\" {\n\t\tt.Errorf(\"value not 1\")\n\t}\n}\n\nfunc TestRemoveMetricLabelValue(t *testing.T) {\n\tm := NewMetric(\"test\", \"prog\", Counter, Int, \"a\", \"b\", \"c\")\n\t_, e := m.GetDatum(\"a\", \"a\", \"a\")\n\tif e != nil {\n\t\tt.Errorf(\"Getdatum failed: %s\", e)\n\t}\n\tlv := m.FindLabelValueOrNil([]string{\"a\", \"a\", \"a\"})\n\tif lv == nil {\n\t\tt.Errorf(\"coidln't find labelvalue\")\n\t}\n\te = m.RemoveDatum(\"a\", \"a\", \"a\")\n\tif e != nil {\n\t\tt.Errorf(\"couldn't remove datum: %s\", e)\n\t}\n\tlv = m.FindLabelValueOrNil([]string{\"a\", \"a\", \"a\"})\n\tif lv != nil {\n\t\tt.Errorf(\"label value still exists\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package nametransform encrypts and decrypts filenames.\npackage nametransform\n\nimport (\n\t\"bytes\"\n\t\"crypto\/aes\"\n\t\"encoding\/base64\"\n\t\"path\/filepath\"\n\t\"syscall\"\n\n\t\"github.com\/rfjakob\/eme\"\n\n\t\"github.com\/rfjakob\/gocryptfs\/internal\/tlog\"\n)\n\nconst (\n\t\/\/ Like ext4, we allow at most 255 bytes for a file name.\n\tNameMax = 255\n)\n\n\/\/ NameTransformer is an interface used to transform filenames.\ntype NameTransformer interface {\n\tDecryptName(cipherName string, iv []byte) (string, error)\n\tEncryptName(plainName string, iv []byte) string\n\tEncryptAndHashName(name string, iv []byte) (string, error)\n\tHashLongName(name string) string\n\tWriteLongNameAt(dirfd int, hashName string, plainName string) error\n\tB64EncodeToString(src []byte) string\n\tB64DecodeString(s string) ([]byte, error)\n}\n\n\/\/ NameTransform is used to transform filenames.\ntype NameTransform struct {\n\temeCipher *eme.EMECipher\n\tlongNames bool\n\t\/\/ B64 = either base64.URLEncoding or base64.RawURLEncoding, depending\n\t\/\/ on the Raw64 feature flag\n\tB64 *base64.Encoding\n\t\/\/ Patterns to bypass decryption\n\tBadnamePatterns []string\n}\n\n\/\/ New returns a new NameTransform instance.\nfunc New(e *eme.EMECipher, longNames bool, raw64 bool) *NameTransform {\n\tb64 := base64.URLEncoding\n\tif raw64 {\n\t\tb64 = base64.RawURLEncoding\n\t}\n\treturn &NameTransform{\n\t\temeCipher: e,\n\t\tlongNames: longNames,\n\t\tB64: b64,\n\t}\n}\n\n\/\/ DecryptName calls decryptName to try and decrypt a base64-encoded encrypted\n\/\/ filename \"cipherName\", and failing that checks if it can be bypassed\nfunc (n *NameTransform) DecryptName(cipherName string, iv []byte) (string, error) {\n\tres, err := n.decryptName(cipherName, iv)\n\tif err != nil {\n\t\tfor _, pattern := range n.BadnamePatterns {\n\t\t\tmatch, err := filepath.Match(pattern, cipherName)\n\t\t\tif err == nil && match { \/\/ Pattern should have been validated already\n\t\t\t\t\/\/find longest decryptable substring\n\t\t\t\tfor charpos := len(cipherName) - 1; charpos > 0; charpos-- {\n\t\t\t\t\tres, err = n.decryptName(cipherName[:charpos], iv)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\treturn res + cipherName[charpos:] + \" GOCRYPTFS_BAD_NAME\", nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn cipherName + \" GOCRYPTFS_BAD_NAME\", nil\n\t\t\t}\n\t\t}\n\t}\n\treturn res, err\n}\n\n\/\/ decryptName decrypts a base64-encoded encrypted filename \"cipherName\" using the\n\/\/ initialization vector \"iv\".\nfunc (n *NameTransform) decryptName(cipherName string, iv []byte) (string, error) {\n\tbin, err := n.B64.DecodeString(cipherName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(bin) == 0 {\n\t\ttlog.Warn.Printf(\"DecryptName: empty input\")\n\t\treturn \"\", syscall.EBADMSG\n\t}\n\tif len(bin)%aes.BlockSize != 0 {\n\t\ttlog.Debug.Printf(\"DecryptName %q: decoded length %d is not a multiple of 16\", cipherName, len(bin))\n\t\treturn \"\", syscall.EBADMSG\n\t}\n\tbin = n.emeCipher.Decrypt(iv, bin)\n\tbin, err = unPad16(bin)\n\tif err != nil {\n\t\ttlog.Debug.Printf(\"DecryptName: unPad16 error detail: %v\", err)\n\t\t\/\/ unPad16 returns detailed errors including the position of the\n\t\t\/\/ incorrect bytes. Kill the padding oracle by lumping everything into\n\t\t\/\/ a generic error.\n\t\treturn \"\", syscall.EBADMSG\n\t}\n\t\/\/ A name can never contain a null byte or \"\/\". Make sure we never return those\n\t\/\/ to the kernel, even when we read a corrupted (or fuzzed) filesystem.\n\tif bytes.Contains(bin, []byte{0}) || bytes.Contains(bin, []byte(\"\/\")) {\n\t\treturn \"\", syscall.EBADMSG\n\t}\n\t\/\/ The name should never be \".\" or \"..\".\n\tif bytes.Equal(bin, []byte(\".\")) || bytes.Equal(bin, []byte(\"..\")) {\n\t\treturn \"\", syscall.EBADMSG\n\t}\n\tplain := string(bin)\n\treturn plain, err\n}\n\n\/\/ EncryptName encrypts \"plainName\", returns a base64-encoded \"cipherName64\",\n\/\/ encrypted using EME (https:\/\/github.com\/rfjakob\/eme).\n\/\/\n\/\/ This function is exported because in some cases, fusefrontend needs access\n\/\/ to the full (not hashed) name if longname is used.\nfunc (n *NameTransform) EncryptName(plainName string, iv []byte) (cipherName64 string) {\n\tbin := []byte(plainName)\n\tbin = pad16(bin)\n\tbin = n.emeCipher.Encrypt(iv, bin)\n\tcipherName64 = n.B64.EncodeToString(bin)\n\treturn cipherName64\n}\n\n\/\/ B64EncodeToString returns a Base64-encoded string\nfunc (n *NameTransform) B64EncodeToString(src []byte) string {\n\treturn n.B64.EncodeToString(src)\n}\n\n\/\/ B64DecodeString decodes a Base64-encoded string\nfunc (n *NameTransform) B64DecodeString(s string) ([]byte, error) {\n\treturn n.B64.DecodeString(s)\n}\n<commit_msg>badname: stop trying to decrypt at length 22<commit_after>\/\/ Package nametransform encrypts and decrypts filenames.\npackage nametransform\n\nimport (\n\t\"bytes\"\n\t\"crypto\/aes\"\n\t\"encoding\/base64\"\n\t\"path\/filepath\"\n\t\"syscall\"\n\n\t\"github.com\/rfjakob\/eme\"\n\n\t\"github.com\/rfjakob\/gocryptfs\/internal\/tlog\"\n)\n\nconst (\n\t\/\/ Like ext4, we allow at most 255 bytes for a file name.\n\tNameMax = 255\n)\n\n\/\/ NameTransformer is an interface used to transform filenames.\ntype NameTransformer interface {\n\tDecryptName(cipherName string, iv []byte) (string, error)\n\tEncryptName(plainName string, iv []byte) string\n\tEncryptAndHashName(name string, iv []byte) (string, error)\n\tHashLongName(name string) string\n\tWriteLongNameAt(dirfd int, hashName string, plainName string) error\n\tB64EncodeToString(src []byte) string\n\tB64DecodeString(s string) ([]byte, error)\n}\n\n\/\/ NameTransform is used to transform filenames.\ntype NameTransform struct {\n\temeCipher *eme.EMECipher\n\tlongNames bool\n\t\/\/ B64 = either base64.URLEncoding or base64.RawURLEncoding, depending\n\t\/\/ on the Raw64 feature flag\n\tB64 *base64.Encoding\n\t\/\/ Patterns to bypass decryption\n\tBadnamePatterns []string\n}\n\n\/\/ New returns a new NameTransform instance.\nfunc New(e *eme.EMECipher, longNames bool, raw64 bool) *NameTransform {\n\tb64 := base64.URLEncoding\n\tif raw64 {\n\t\tb64 = base64.RawURLEncoding\n\t}\n\treturn &NameTransform{\n\t\temeCipher: e,\n\t\tlongNames: longNames,\n\t\tB64: b64,\n\t}\n}\n\n\/\/ DecryptName calls decryptName to try and decrypt a base64-encoded encrypted\n\/\/ filename \"cipherName\", and failing that checks if it can be bypassed\nfunc (n *NameTransform) DecryptName(cipherName string, iv []byte) (string, error) {\n\tres, err := n.decryptName(cipherName, iv)\n\tif err != nil {\n\t\tfor _, pattern := range n.BadnamePatterns {\n\t\t\tmatch, err := filepath.Match(pattern, cipherName)\n\t\t\tif err == nil && match { \/\/ Pattern should have been validated already\n\t\t\t\t\/\/ Find longest decryptable substring\n\t\t\t\t\/\/ At least 16 bytes due to AES --> at least 22 characters in base64\n\t\t\t\tnameMin := n.B64.EncodedLen(aes.BlockSize)\n\t\t\t\tfor charpos := len(cipherName) - 1; charpos >= nameMin; charpos-- {\n\t\t\t\t\tres, err = n.decryptName(cipherName[:charpos], iv)\n\t\t\t\t\tif err == nil {\n\t\t\t\t\t\treturn res + cipherName[charpos:] + \" GOCRYPTFS_BAD_NAME\", nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn cipherName + \" GOCRYPTFS_BAD_NAME\", nil\n\t\t\t}\n\t\t}\n\t}\n\treturn res, err\n}\n\n\/\/ decryptName decrypts a base64-encoded encrypted filename \"cipherName\" using the\n\/\/ initialization vector \"iv\".\nfunc (n *NameTransform) decryptName(cipherName string, iv []byte) (string, error) {\n\tbin, err := n.B64.DecodeString(cipherName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(bin) == 0 {\n\t\ttlog.Warn.Printf(\"DecryptName: empty input\")\n\t\treturn \"\", syscall.EBADMSG\n\t}\n\tif len(bin)%aes.BlockSize != 0 {\n\t\ttlog.Debug.Printf(\"DecryptName %q: decoded length %d is not a multiple of 16\", cipherName, len(bin))\n\t\treturn \"\", syscall.EBADMSG\n\t}\n\tbin = n.emeCipher.Decrypt(iv, bin)\n\tbin, err = unPad16(bin)\n\tif err != nil {\n\t\ttlog.Debug.Printf(\"DecryptName: unPad16 error detail: %v\", err)\n\t\t\/\/ unPad16 returns detailed errors including the position of the\n\t\t\/\/ incorrect bytes. Kill the padding oracle by lumping everything into\n\t\t\/\/ a generic error.\n\t\treturn \"\", syscall.EBADMSG\n\t}\n\t\/\/ A name can never contain a null byte or \"\/\". Make sure we never return those\n\t\/\/ to the kernel, even when we read a corrupted (or fuzzed) filesystem.\n\tif bytes.Contains(bin, []byte{0}) || bytes.Contains(bin, []byte(\"\/\")) {\n\t\treturn \"\", syscall.EBADMSG\n\t}\n\t\/\/ The name should never be \".\" or \"..\".\n\tif bytes.Equal(bin, []byte(\".\")) || bytes.Equal(bin, []byte(\"..\")) {\n\t\treturn \"\", syscall.EBADMSG\n\t}\n\tplain := string(bin)\n\treturn plain, err\n}\n\n\/\/ EncryptName encrypts \"plainName\", returns a base64-encoded \"cipherName64\",\n\/\/ encrypted using EME (https:\/\/github.com\/rfjakob\/eme).\n\/\/\n\/\/ This function is exported because in some cases, fusefrontend needs access\n\/\/ to the full (not hashed) name if longname is used.\nfunc (n *NameTransform) EncryptName(plainName string, iv []byte) (cipherName64 string) {\n\tbin := []byte(plainName)\n\tbin = pad16(bin)\n\tbin = n.emeCipher.Encrypt(iv, bin)\n\tcipherName64 = n.B64.EncodeToString(bin)\n\treturn cipherName64\n}\n\n\/\/ B64EncodeToString returns a Base64-encoded string\nfunc (n *NameTransform) B64EncodeToString(src []byte) string {\n\treturn n.B64.EncodeToString(src)\n}\n\n\/\/ B64DecodeString decodes a Base64-encoded string\nfunc (n *NameTransform) B64DecodeString(s string) ([]byte, error) {\n\treturn n.B64.DecodeString(s)\n}\n<|endoftext|>"} {"text":"<commit_before>package edgectl\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/datawire\/ambassador\/pkg\/k8s\"\n\t\"github.com\/datawire\/ambassador\/pkg\/supervisor\"\n)\n\nvar simpleTransport = &http.Transport{\n\t\/\/ #nosec G402\n\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\tProxy: nil,\n\tDialContext: (&net.Dialer{\n\t\tTimeout: 10 * time.Second,\n\t\tKeepAlive: 1 * time.Second,\n\t\tDualStack: true,\n\t}).DialContext,\n\tDisableKeepAlives: true,\n}\n\nvar hClient = &http.Client{\n\tTransport: simpleTransport,\n\tTimeout: 15 * time.Second,\n}\n\n\/\/ Connect the daemon to a cluster\nfunc (d *Daemon) Connect(\n\tp *supervisor.Process, out *Emitter, rai *RunAsInfo,\n\tcontext, namespace, managerNs string, kargs []string,\n\tinstallID string, isCI bool,\n) error {\n\t\/\/ Sanity checks\n\tif d.cluster != nil {\n\t\tout.Println(\"Already connected\")\n\t\tout.Send(\"connect\", \"Already connected\")\n\t\treturn nil\n\t}\n\tif d.bridge != nil {\n\t\tout.Println(\"Not ready: Trying to disconnect\")\n\t\tout.Send(\"connect\", \"Not ready: Trying to disconnect\")\n\t\treturn nil\n\t}\n\tif d.network == nil {\n\t\tout.Println(\"Not ready: Network overrides are paused (use \\\"edgectl resume\\\")\")\n\t\tout.Send(\"connect\", \"Not ready: Paused\")\n\t\treturn nil\n\t}\n\tif !d.network.IsOkay() {\n\t\tout.Println(\"Not ready: Establishing network overrides\")\n\t\tout.Send(\"connect\", \"Not ready: Establishing network overrides\")\n\t\treturn nil\n\t}\n\n\tout.Printf(\"Connecting to traffic manager in namespace %s...\\n\", managerNs)\n\tout.Send(\"connect\", \"Connecting...\")\n\tcluster, err := TrackKCluster(p, rai, context, namespace, kargs)\n\tif err != nil {\n\t\tout.Println(err.Error())\n\t\tout.Send(\"failed\", err.Error())\n\t\tout.SendExit(1)\n\t\treturn nil\n\t}\n\td.cluster = cluster\n\n\tpreviewHost, err := getClusterPreviewHostname(p, cluster)\n\tif err != nil {\n\t\tp.Logf(\"get preview URL hostname: %+v\", err)\n\t\tpreviewHost = \"\"\n\t}\n\n\tbridge, err := CheckedRetryingCommand(\n\t\tp,\n\t\t\"bridge\",\n\t\t[]string{GetExe(), \"teleproxy\", \"bridge\", cluster.context, cluster.namespace},\n\t\trai,\n\t\tcheckBridge,\n\t\t15*time.Second,\n\t)\n\tif err != nil {\n\t\tout.Println(err.Error())\n\t\tout.Send(\"failed\", err.Error())\n\t\tout.SendExit(1)\n\t\td.cluster.Close()\n\t\td.cluster = nil\n\t\treturn nil\n\t}\n\td.bridge = bridge\n\td.cluster.SetBridgeCheck(d.bridge.IsOkay)\n\n\tout.Printf(\n\t\t\"Connected to context %s (%s)\\n\", d.cluster.Context(), d.cluster.Server(),\n\t)\n\tout.Send(\"cluster.context\", d.cluster.Context())\n\tout.Send(\"cluster.server\", d.cluster.Server())\n\n\ttmgr, err := NewTrafficManager(p, d.cluster, managerNs, installID, isCI)\n\tif err != nil {\n\t\tout.Println()\n\t\tout.Println(\"Unable to connect to the traffic manager in your cluster.\")\n\t\tout.Println(\"The intercept feature will not be available.\")\n\t\tout.Println(\"Error was:\", err)\n\t\t\/\/ out.Println(\"Use <some command> to set up the traffic manager.\") \/\/ FIXME\n\t\tout.Send(\"intercept\", false)\n\t} else {\n\t\ttmgr.previewHost = previewHost\n\t\td.trafficMgr = tmgr\n\t\tout.Send(\"intercept\", true)\n\t}\n\treturn nil\n}\n\n\/\/ Disconnect from the connected cluster\nfunc (d *Daemon) Disconnect(p *supervisor.Process, out *Emitter) error {\n\t\/\/ Sanity checks\n\tif d.cluster == nil {\n\t\tout.Println(\"Not connected (use 'edgectl connect' to connect to your cluster)\")\n\t\tout.Send(\"disconnect\", \"Not connected\")\n\t\treturn nil\n\t}\n\n\t_ = d.ClearIntercepts(p)\n\tif d.bridge != nil {\n\t\td.cluster.SetBridgeCheck(nil) \/\/ Stop depending on this bridge\n\t\t_ = d.bridge.Close()\n\t\td.bridge = nil\n\t}\n\tif d.trafficMgr != nil {\n\t\t_ = d.trafficMgr.Close()\n\t\td.trafficMgr = nil\n\t}\n\terr := d.cluster.Close()\n\td.cluster = nil\n\n\tout.Println(\"Disconnected\")\n\tout.Send(\"disconnect\", \"Disconnected\")\n\treturn err\n}\n\n\/\/ getClusterPreviewHostname returns the hostname of the first Host resource it\n\/\/ finds that has Preview URLs enabled with a supported URL type.\nfunc getClusterPreviewHostname(p *supervisor.Process, cluster *KCluster) (hostname string, err error) {\n\tp.Log(\"Looking for a Host with Preview URLs enabled\")\n\n\t\/\/ kubectl get hosts, in all namespaces or in this namespace\n\tvar outBytes []byte\n\toutBytes, err = func() ([]byte, error) {\n\t\tclusterCmd := cluster.GetKubectlCmdNoNamespace(p, \"get\", \"host\", \"-o\", \"yaml\", \"--all-namespaces\")\n\t\tif outBytes, err := clusterCmd.CombinedOutput(); err == nil {\n\t\t\treturn outBytes, nil\n\t\t}\n\n\t\tnsCmd := cluster.GetKubectlCmd(p, \"get\", \"host\", \"-o\", \"yaml\")\n\t\tif outBytes, err := nsCmd.CombinedOutput(); err == nil {\n\t\t\treturn outBytes, nil\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t}()\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Parse the output\n\thostLists, kerr := k8s.ParseResources(\"get hosts\", string(outBytes))\n\tif kerr != nil {\n\t\terr = kerr\n\t\treturn\n\t}\n\tif len(hostLists) != 1 {\n\t\terr = errors.Errorf(\"weird result with length %d\", len(hostLists))\n\t\treturn\n\t}\n\n\t\/\/ Grab the \"items\" slice, as the result should be a list of Host resources\n\thostItems := k8s.Map(hostLists[0]).GetMaps(\"items\")\n\tp.Logf(\"Found %d Host resources\", len(hostItems))\n\n\t\/\/ Loop over Hosts looking for a Preview URL hostname\n\tfor _, hostItem := range hostItems {\n\t\thost := k8s.Resource(hostItem)\n\t\tlogEntry := fmt.Sprintf(\"- Host %s \/ %s: %%s\", host.Namespace(), host.Name())\n\n\t\tpreviewUrlSpec := host.Spec().GetMap(\"previewUrl\")\n\t\tif len(previewUrlSpec) == 0 {\n\t\t\tp.Logf(logEntry, \"no preview URL config\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif enabled, ok := previewUrlSpec[\"enabled\"].(bool); !ok || !enabled {\n\t\t\tp.Logf(logEntry, \"preview URL not enabled\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif pType, ok := previewUrlSpec[\"type\"].(string); !ok || pType != \"Path\" {\n\t\t\tp.Logf(logEntry+\": %#v\", \"unsupported preview URL type\", previewUrlSpec[\"type\"])\n\t\t\tcontinue\n\t\t}\n\n\t\tif hostname = host.Spec().GetString(\"hostname\"); hostname == \"\" {\n\t\t\tp.Logf(logEntry, \"empty hostname???\")\n\t\t\tcontinue\n\t\t}\n\n\t\tp.Logf(logEntry+\": %q\", \"SUCCESS! Hostname is\", hostname)\n\t\treturn\n\t}\n\n\tp.Logf(\"No appropriate Host resource found.\")\n\treturn\n}\n\n\/\/ checkBridge checks the status of teleproxy bridge by doing the equivalent of\n\/\/ curl -k https:\/\/kubernetes\/api\/.\nfunc checkBridge(p *supervisor.Process) error {\n\tres, err := hClient.Get(\"https:\/\/kubernetes.default\/api\/\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"get\")\n\t}\n\t_, err = ioutil.ReadAll(res.Body)\n\tres.Body.Close()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"read body\")\n\t}\n\treturn nil\n}\n\n\/\/ TrafficManager is a handle to access the Traffic Manager in a\n\/\/ cluster.\ntype TrafficManager struct {\n\tcrc Resource\n\tapiPort int\n\tsshPort int\n\tnamespace string\n\tinterceptables []string\n\ttotalClusCepts int\n\tsnapshotSent bool\n\tinstallID string \/\/ edgectl's install ID\n\tconnectCI bool \/\/ whether --ci was passed to connect\n\tapiErr error \/\/ holds the latest traffic-manager API error\n\tlicenseInfo string \/\/ license information from traffic-manager\n\tpreviewHost string \/\/ hostname to use for preview URLs, if enabled\n}\n\n\/\/ NewTrafficManager returns a TrafficManager resource for the given\n\/\/ cluster if it has a Traffic Manager service.\nfunc NewTrafficManager(p *supervisor.Process, cluster *KCluster, managerNs string, installID string, isCI bool) (*TrafficManager, error) {\n\tcmd := cluster.GetKubectlCmd(p, \"get\", \"-n\", managerNs, \"svc\/telepresence-proxy\", \"deploy\/telepresence-proxy\")\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"kubectl get svc\/deploy telepresency-proxy\")\n\t}\n\n\tapiPort, err := GetFreePort()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get free port for API\")\n\t}\n\tsshPort, err := GetFreePort()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get free port for ssh\")\n\t}\n\tkpfArgStr := fmt.Sprintf(\"port-forward -n %s svc\/telepresence-proxy %d:8022 %d:8081\", managerNs, sshPort, apiPort)\n\tkpfArgs := cluster.GetKubectlArgs(strings.Fields(kpfArgStr)...)\n\ttm := &TrafficManager{\n\t\tapiPort: apiPort,\n\t\tsshPort: sshPort,\n\t\tnamespace: managerNs,\n\t\tinstallID: installID,\n\t\tconnectCI: isCI,\n\t}\n\n\tpf, err := CheckedRetryingCommand(p, \"traffic-kpf\", kpfArgs, cluster.RAI(), tm.check, 15*time.Second)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttm.crc = pf\n\treturn tm, nil\n}\n\nfunc (tm *TrafficManager) check(p *supervisor.Process) error {\n\tbody, code, err := tm.request(\"GET\", \"state\", []byte{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif code != http.StatusOK {\n\t\ttm.apiErr = fmt.Errorf(\"%v: %v\", code, body)\n\t\treturn tm.apiErr\n\t}\n\ttm.apiErr = nil\n\n\tvar state map[string]interface{}\n\tif err := json.Unmarshal([]byte(body), &state); err != nil {\n\t\tp.Logf(\"check: bad JSON from tm: %v\", err)\n\t\tp.Logf(\"check: JSON data is: %q\", body)\n\t\treturn err\n\t}\n\tif licenseInfo, ok := state[\"LicenseInfo\"]; ok {\n\t\ttm.licenseInfo = licenseInfo.(string)\n\t}\n\tdeployments, ok := state[\"Deployments\"].(map[string]interface{})\n\tif !ok {\n\t\tp.Log(\"check: failed to get deployment info\")\n\t\tp.Logf(\"check: JSON data is: %q\", body)\n\t}\n\ttm.interceptables = make([]string, len(deployments))\n\ttm.totalClusCepts = 0\n\tidx := 0\n\tfor deployment := range deployments {\n\t\ttm.interceptables[idx] = deployment\n\t\tidx++\n\t\tinfo, ok := deployments[deployment].(map[string]interface{})\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tcepts, ok := info[\"Intercepts\"].([]interface{})\n\t\tif ok {\n\t\t\ttm.totalClusCepts += len(cepts)\n\t\t}\n\t}\n\n\tif !tm.snapshotSent {\n\t\tp.Log(\"trying to send snapshot\")\n\t\ttm.snapshotSent = true \/\/ don't try again, even if this fails\n\t\tbody, code, err := tm.request(\"GET\", \"snapshot\", []byte{})\n\t\tif err != nil || code != 200 {\n\t\t\tp.Logf(\"snapshot request failed: %v\", err)\n\t\t\treturn nil\n\t\t}\n\t\tresp, err := hClient.Post(\"http:\/\/teleproxy\/api\/tables\/\", \"application\/json\", strings.NewReader(body))\n\t\tif err != nil {\n\t\t\tp.Logf(\"snapshot post failed: %v\", err)\n\t\t\treturn nil\n\t\t}\n\t\t_, _ = ioutil.ReadAll(resp.Body)\n\t\tresp.Body.Close()\n\t\tp.Log(\"snapshot sent!\")\n\t}\n\n\treturn nil\n}\n\nfunc (tm *TrafficManager) request(method, path string, data []byte) (result string, code int, err error) {\n\turl := fmt.Sprintf(\"http:\/\/127.0.0.1:%d\/%s\", tm.apiPort, path)\n\treq, err := http.NewRequest(method, url, bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn\n\t}\n\n\treq.Header.Set(\"edgectl-install-id\", tm.installID)\n\treq.Header.Set(\"edgectl-connect-ci\", strconv.FormatBool(tm.connectCI))\n\n\tresp, err := hClient.Do(req)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"get\")\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tcode = resp.StatusCode\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"read body\")\n\t\treturn\n\t}\n\tresult = string(body)\n\treturn\n}\n\n\/\/ Name implements Resource\nfunc (tm *TrafficManager) Name() string {\n\treturn \"trafficMgr\"\n}\n\n\/\/ IsOkay implements Resource\nfunc (tm *TrafficManager) IsOkay() bool {\n\treturn tm.crc.IsOkay()\n}\n\n\/\/ Close implements Resource\nfunc (tm *TrafficManager) Close() error {\n\treturn tm.crc.Close()\n}\n<commit_msg>bridge check monitor must use something that resolves in the same namespace as the user, in case of limited roles<commit_after>package edgectl\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/datawire\/ambassador\/pkg\/k8s\"\n\t\"github.com\/datawire\/ambassador\/pkg\/supervisor\"\n)\n\nvar simpleTransport = &http.Transport{\n\t\/\/ #nosec G402\n\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\tProxy: nil,\n\tDialContext: (&net.Dialer{\n\t\tTimeout: 10 * time.Second,\n\t\tKeepAlive: 1 * time.Second,\n\t\tDualStack: true,\n\t}).DialContext,\n\tDisableKeepAlives: true,\n}\n\nvar hClient = &http.Client{\n\tTransport: simpleTransport,\n\tTimeout: 15 * time.Second,\n}\n\n\/\/ Connect the daemon to a cluster\nfunc (d *Daemon) Connect(\n\tp *supervisor.Process, out *Emitter, rai *RunAsInfo,\n\tcontext, namespace, managerNs string, kargs []string,\n\tinstallID string, isCI bool,\n) error {\n\t\/\/ Sanity checks\n\tif d.cluster != nil {\n\t\tout.Println(\"Already connected\")\n\t\tout.Send(\"connect\", \"Already connected\")\n\t\treturn nil\n\t}\n\tif d.bridge != nil {\n\t\tout.Println(\"Not ready: Trying to disconnect\")\n\t\tout.Send(\"connect\", \"Not ready: Trying to disconnect\")\n\t\treturn nil\n\t}\n\tif d.network == nil {\n\t\tout.Println(\"Not ready: Network overrides are paused (use \\\"edgectl resume\\\")\")\n\t\tout.Send(\"connect\", \"Not ready: Paused\")\n\t\treturn nil\n\t}\n\tif !d.network.IsOkay() {\n\t\tout.Println(\"Not ready: Establishing network overrides\")\n\t\tout.Send(\"connect\", \"Not ready: Establishing network overrides\")\n\t\treturn nil\n\t}\n\n\tout.Printf(\"Connecting to traffic manager in namespace %s...\\n\", managerNs)\n\tout.Send(\"connect\", \"Connecting...\")\n\tcluster, err := TrackKCluster(p, rai, context, namespace, kargs)\n\tif err != nil {\n\t\tout.Println(err.Error())\n\t\tout.Send(\"failed\", err.Error())\n\t\tout.SendExit(1)\n\t\treturn nil\n\t}\n\td.cluster = cluster\n\n\tpreviewHost, err := getClusterPreviewHostname(p, cluster)\n\tif err != nil {\n\t\tp.Logf(\"get preview URL hostname: %+v\", err)\n\t\tpreviewHost = \"\"\n\t}\n\n\tbridge, err := CheckedRetryingCommand(\n\t\tp,\n\t\t\"bridge\",\n\t\t[]string{GetExe(), \"teleproxy\", \"bridge\", cluster.context, cluster.namespace},\n\t\trai,\n\t\tcheckBridge,\n\t\t15*time.Second,\n\t)\n\tif err != nil {\n\t\tout.Println(err.Error())\n\t\tout.Send(\"failed\", err.Error())\n\t\tout.SendExit(1)\n\t\td.cluster.Close()\n\t\td.cluster = nil\n\t\treturn nil\n\t}\n\td.bridge = bridge\n\td.cluster.SetBridgeCheck(d.bridge.IsOkay)\n\n\tout.Printf(\n\t\t\"Connected to context %s (%s)\\n\", d.cluster.Context(), d.cluster.Server(),\n\t)\n\tout.Send(\"cluster.context\", d.cluster.Context())\n\tout.Send(\"cluster.server\", d.cluster.Server())\n\n\ttmgr, err := NewTrafficManager(p, d.cluster, managerNs, installID, isCI)\n\tif err != nil {\n\t\tout.Println()\n\t\tout.Println(\"Unable to connect to the traffic manager in your cluster.\")\n\t\tout.Println(\"The intercept feature will not be available.\")\n\t\tout.Println(\"Error was:\", err)\n\t\t\/\/ out.Println(\"Use <some command> to set up the traffic manager.\") \/\/ FIXME\n\t\tout.Send(\"intercept\", false)\n\t} else {\n\t\ttmgr.previewHost = previewHost\n\t\td.trafficMgr = tmgr\n\t\tout.Send(\"intercept\", true)\n\t}\n\treturn nil\n}\n\n\/\/ Disconnect from the connected cluster\nfunc (d *Daemon) Disconnect(p *supervisor.Process, out *Emitter) error {\n\t\/\/ Sanity checks\n\tif d.cluster == nil {\n\t\tout.Println(\"Not connected (use 'edgectl connect' to connect to your cluster)\")\n\t\tout.Send(\"disconnect\", \"Not connected\")\n\t\treturn nil\n\t}\n\n\t_ = d.ClearIntercepts(p)\n\tif d.bridge != nil {\n\t\td.cluster.SetBridgeCheck(nil) \/\/ Stop depending on this bridge\n\t\t_ = d.bridge.Close()\n\t\td.bridge = nil\n\t}\n\tif d.trafficMgr != nil {\n\t\t_ = d.trafficMgr.Close()\n\t\td.trafficMgr = nil\n\t}\n\terr := d.cluster.Close()\n\td.cluster = nil\n\n\tout.Println(\"Disconnected\")\n\tout.Send(\"disconnect\", \"Disconnected\")\n\treturn err\n}\n\n\/\/ getClusterPreviewHostname returns the hostname of the first Host resource it\n\/\/ finds that has Preview URLs enabled with a supported URL type.\nfunc getClusterPreviewHostname(p *supervisor.Process, cluster *KCluster) (hostname string, err error) {\n\tp.Log(\"Looking for a Host with Preview URLs enabled\")\n\n\t\/\/ kubectl get hosts, in all namespaces or in this namespace\n\tvar outBytes []byte\n\toutBytes, err = func() ([]byte, error) {\n\t\tclusterCmd := cluster.GetKubectlCmdNoNamespace(p, \"get\", \"host\", \"-o\", \"yaml\", \"--all-namespaces\")\n\t\tif outBytes, err := clusterCmd.CombinedOutput(); err == nil {\n\t\t\treturn outBytes, nil\n\t\t}\n\n\t\tnsCmd := cluster.GetKubectlCmd(p, \"get\", \"host\", \"-o\", \"yaml\")\n\t\tif outBytes, err := nsCmd.CombinedOutput(); err == nil {\n\t\t\treturn outBytes, nil\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t}()\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Parse the output\n\thostLists, kerr := k8s.ParseResources(\"get hosts\", string(outBytes))\n\tif kerr != nil {\n\t\terr = kerr\n\t\treturn\n\t}\n\tif len(hostLists) != 1 {\n\t\terr = errors.Errorf(\"weird result with length %d\", len(hostLists))\n\t\treturn\n\t}\n\n\t\/\/ Grab the \"items\" slice, as the result should be a list of Host resources\n\thostItems := k8s.Map(hostLists[0]).GetMaps(\"items\")\n\tp.Logf(\"Found %d Host resources\", len(hostItems))\n\n\t\/\/ Loop over Hosts looking for a Preview URL hostname\n\tfor _, hostItem := range hostItems {\n\t\thost := k8s.Resource(hostItem)\n\t\tlogEntry := fmt.Sprintf(\"- Host %s \/ %s: %%s\", host.Namespace(), host.Name())\n\n\t\tpreviewUrlSpec := host.Spec().GetMap(\"previewUrl\")\n\t\tif len(previewUrlSpec) == 0 {\n\t\t\tp.Logf(logEntry, \"no preview URL config\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif enabled, ok := previewUrlSpec[\"enabled\"].(bool); !ok || !enabled {\n\t\t\tp.Logf(logEntry, \"preview URL not enabled\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif pType, ok := previewUrlSpec[\"type\"].(string); !ok || pType != \"Path\" {\n\t\t\tp.Logf(logEntry+\": %#v\", \"unsupported preview URL type\", previewUrlSpec[\"type\"])\n\t\t\tcontinue\n\t\t}\n\n\t\tif hostname = host.Spec().GetString(\"hostname\"); hostname == \"\" {\n\t\t\tp.Logf(logEntry, \"empty hostname???\")\n\t\t\tcontinue\n\t\t}\n\n\t\tp.Logf(logEntry+\": %q\", \"SUCCESS! Hostname is\", hostname)\n\t\treturn\n\t}\n\n\tp.Logf(\"No appropriate Host resource found.\")\n\treturn\n}\n\n\/\/ checkBridge checks the status of teleproxy bridge by doing the equivalent of\n\/\/ curl http:\/\/teleproxy. Note there is no namespace specified, as we are\n\/\/ checking for bridge status in the current namespace. We only care the service\n\/\/ responds, no matter what the response may be.\nfunc checkBridge(p *supervisor.Process) error {\n\tres, err := hClient.Get(\"http:\/\/teleproxy\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"get\")\n\t}\n\t_, err = ioutil.ReadAll(res.Body)\n\tres.Body.Close()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"read body\")\n\t}\n\treturn nil\n}\n\n\/\/ TrafficManager is a handle to access the Traffic Manager in a\n\/\/ cluster.\ntype TrafficManager struct {\n\tcrc Resource\n\tapiPort int\n\tsshPort int\n\tnamespace string\n\tinterceptables []string\n\ttotalClusCepts int\n\tsnapshotSent bool\n\tinstallID string \/\/ edgectl's install ID\n\tconnectCI bool \/\/ whether --ci was passed to connect\n\tapiErr error \/\/ holds the latest traffic-manager API error\n\tlicenseInfo string \/\/ license information from traffic-manager\n\tpreviewHost string \/\/ hostname to use for preview URLs, if enabled\n}\n\n\/\/ NewTrafficManager returns a TrafficManager resource for the given\n\/\/ cluster if it has a Traffic Manager service.\nfunc NewTrafficManager(p *supervisor.Process, cluster *KCluster, managerNs string, installID string, isCI bool) (*TrafficManager, error) {\n\tcmd := cluster.GetKubectlCmd(p, \"get\", \"-n\", managerNs, \"svc\/telepresence-proxy\", \"deploy\/telepresence-proxy\")\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"kubectl get svc\/deploy telepresency-proxy\")\n\t}\n\n\tapiPort, err := GetFreePort()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get free port for API\")\n\t}\n\tsshPort, err := GetFreePort()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get free port for ssh\")\n\t}\n\tkpfArgStr := fmt.Sprintf(\"port-forward -n %s svc\/telepresence-proxy %d:8022 %d:8081\", managerNs, sshPort, apiPort)\n\tkpfArgs := cluster.GetKubectlArgs(strings.Fields(kpfArgStr)...)\n\ttm := &TrafficManager{\n\t\tapiPort: apiPort,\n\t\tsshPort: sshPort,\n\t\tnamespace: managerNs,\n\t\tinstallID: installID,\n\t\tconnectCI: isCI,\n\t}\n\n\tpf, err := CheckedRetryingCommand(p, \"traffic-kpf\", kpfArgs, cluster.RAI(), tm.check, 15*time.Second)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttm.crc = pf\n\treturn tm, nil\n}\n\nfunc (tm *TrafficManager) check(p *supervisor.Process) error {\n\tbody, code, err := tm.request(\"GET\", \"state\", []byte{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif code != http.StatusOK {\n\t\ttm.apiErr = fmt.Errorf(\"%v: %v\", code, body)\n\t\treturn tm.apiErr\n\t}\n\ttm.apiErr = nil\n\n\tvar state map[string]interface{}\n\tif err := json.Unmarshal([]byte(body), &state); err != nil {\n\t\tp.Logf(\"check: bad JSON from tm: %v\", err)\n\t\tp.Logf(\"check: JSON data is: %q\", body)\n\t\treturn err\n\t}\n\tif licenseInfo, ok := state[\"LicenseInfo\"]; ok {\n\t\ttm.licenseInfo = licenseInfo.(string)\n\t}\n\tdeployments, ok := state[\"Deployments\"].(map[string]interface{})\n\tif !ok {\n\t\tp.Log(\"check: failed to get deployment info\")\n\t\tp.Logf(\"check: JSON data is: %q\", body)\n\t}\n\ttm.interceptables = make([]string, len(deployments))\n\ttm.totalClusCepts = 0\n\tidx := 0\n\tfor deployment := range deployments {\n\t\ttm.interceptables[idx] = deployment\n\t\tidx++\n\t\tinfo, ok := deployments[deployment].(map[string]interface{})\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tcepts, ok := info[\"Intercepts\"].([]interface{})\n\t\tif ok {\n\t\t\ttm.totalClusCepts += len(cepts)\n\t\t}\n\t}\n\n\tif !tm.snapshotSent {\n\t\tp.Log(\"trying to send snapshot\")\n\t\ttm.snapshotSent = true \/\/ don't try again, even if this fails\n\t\tbody, code, err := tm.request(\"GET\", \"snapshot\", []byte{})\n\t\tif err != nil || code != 200 {\n\t\t\tp.Logf(\"snapshot request failed: %v\", err)\n\t\t\treturn nil\n\t\t}\n\t\tresp, err := hClient.Post(\"http:\/\/teleproxy\/api\/tables\/\", \"application\/json\", strings.NewReader(body))\n\t\tif err != nil {\n\t\t\tp.Logf(\"snapshot post failed: %v\", err)\n\t\t\treturn nil\n\t\t}\n\t\t_, _ = ioutil.ReadAll(resp.Body)\n\t\tresp.Body.Close()\n\t\tp.Log(\"snapshot sent!\")\n\t}\n\n\treturn nil\n}\n\nfunc (tm *TrafficManager) request(method, path string, data []byte) (result string, code int, err error) {\n\turl := fmt.Sprintf(\"http:\/\/127.0.0.1:%d\/%s\", tm.apiPort, path)\n\treq, err := http.NewRequest(method, url, bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn\n\t}\n\n\treq.Header.Set(\"edgectl-install-id\", tm.installID)\n\treq.Header.Set(\"edgectl-connect-ci\", strconv.FormatBool(tm.connectCI))\n\n\tresp, err := hClient.Do(req)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"get\")\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tcode = resp.StatusCode\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"read body\")\n\t\treturn\n\t}\n\tresult = string(body)\n\treturn\n}\n\n\/\/ Name implements Resource\nfunc (tm *TrafficManager) Name() string {\n\treturn \"trafficMgr\"\n}\n\n\/\/ IsOkay implements Resource\nfunc (tm *TrafficManager) IsOkay() bool {\n\treturn tm.crc.IsOkay()\n}\n\n\/\/ Close implements Resource\nfunc (tm *TrafficManager) Close() error {\n\treturn tm.crc.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2018 Nippon Telegraph and Telephone Corporation.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage version\n\nimport \"fmt\"\n\nconst MAJOR uint = 2\nconst MINOR uint = 33\nconst PATCH uint = 0\n\nvar COMMIT string = \"\"\nvar IDENTIFIER string = \"\"\nvar METADATA string = \"\"\n\nfunc Version() string {\n\tvar suffix string = \"\"\n\tif len(IDENTIFIER) > 0 {\n\t\tsuffix = fmt.Sprintf(\"-%s\", IDENTIFIER)\n\t}\n\n\tif len(COMMIT) > 0 || len(METADATA) > 0 {\n\t\tsuffix = suffix + \"+\"\n\t}\n\n\tif len(COMMIT) > 0 {\n\t\tsuffix = fmt.Sprintf(\"%s\"+\"commit.%s\", suffix, COMMIT)\n\n\t}\n\n\tif len(METADATA) > 0 {\n\t\tif len(COMMIT) > 0 {\n\t\t\tsuffix = suffix + \".\"\n\t\t}\n\t\tsuffix = suffix + METADATA\n\t}\n\n\treturn fmt.Sprintf(\"%d.%d.%d%s\", MAJOR, MINOR, PATCH, suffix)\n}\n<commit_msg>v2.34.0<commit_after>\/\/ Copyright (C) 2018 Nippon Telegraph and Telephone Corporation.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage version\n\nimport \"fmt\"\n\nconst MAJOR uint = 2\nconst MINOR uint = 34\nconst PATCH uint = 0\n\nvar COMMIT string = \"\"\nvar IDENTIFIER string = \"\"\nvar METADATA string = \"\"\n\nfunc Version() string {\n\tvar suffix string = \"\"\n\tif len(IDENTIFIER) > 0 {\n\t\tsuffix = fmt.Sprintf(\"-%s\", IDENTIFIER)\n\t}\n\n\tif len(COMMIT) > 0 || len(METADATA) > 0 {\n\t\tsuffix = suffix + \"+\"\n\t}\n\n\tif len(COMMIT) > 0 {\n\t\tsuffix = fmt.Sprintf(\"%s\"+\"commit.%s\", suffix, COMMIT)\n\n\t}\n\n\tif len(METADATA) > 0 {\n\t\tif len(COMMIT) > 0 {\n\t\t\tsuffix = suffix + \".\"\n\t\t}\n\t\tsuffix = suffix + METADATA\n\t}\n\n\treturn fmt.Sprintf(\"%d.%d.%d%s\", MAJOR, MINOR, PATCH, suffix)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage master\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\tkubeadmapi \"k8s.io\/kubernetes\/cmd\/kubeadm\/app\/apis\/kubeadm\"\n\t\"k8s.io\/kubernetes\/cmd\/kubeadm\/app\/images\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/v1\"\n\text \"k8s.io\/kubernetes\/pkg\/apis\/extensions\/v1beta1\"\n\t\"k8s.io\/kubernetes\/pkg\/client\/clientset_generated\/clientset\"\n)\n\nfunc CreateSelfHostedControlPlane(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset) error {\n\tvolumes := []v1.Volume{k8sVolume(cfg)}\n\tvolumeMounts := []v1.VolumeMount{k8sVolumeMount()}\n\tif isCertsVolumeMountNeeded() {\n\t\tvolumes = append(volumes, certsVolume(cfg))\n\t\tvolumeMounts = append(volumeMounts, certsVolumeMount())\n\t}\n\n\tif isPkiVolumeMountNeeded() {\n\t\tvolumes = append(volumes, pkiVolume(cfg))\n\t\tvolumeMounts = append(volumeMounts, pkiVolumeMount())\n\t}\n\n\t\/\/ Need lock for self-hosted\n\tvolumes = append(volumes, flockVolume())\n\tvolumeMounts = append(volumeMounts, flockVolumeMount())\n\n\tif err := launchSelfHostedAPIServer(cfg, client, volumes, volumeMounts); err != nil {\n\t\treturn err\n\t}\n\n\tif err := launchSelfHostedScheduler(cfg, client, volumes, volumeMounts); err != nil {\n\t\treturn err\n\t}\n\n\tif err := launchSelfHostedControllerManager(cfg, client, volumes, volumeMounts); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc launchSelfHostedAPIServer(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error {\n\tstart := time.Now()\n\n\tapiServer := getAPIServerDS(cfg, volumes, volumeMounts)\n\tif _, err := client.Extensions().DaemonSets(metav1.NamespaceSystem).Create(&apiServer); err != nil {\n\t\treturn fmt.Errorf(\"failed to create self-hosted %q daemon set [%v]\", kubeAPIServer, err)\n\t}\n\n\twait.PollInfinite(apiCallRetryInterval, func() (bool, error) {\n\t\t\/\/ TODO: This might be pointless, checking the pods is probably enough.\n\t\t\/\/ It does however get us a count of how many there should be which may be useful\n\t\t\/\/ with HA.\n\t\tapiDS, err := client.DaemonSets(metav1.NamespaceSystem).Get(\"self-hosted-\"+kubeAPIServer,\n\t\t\tmetav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[self-hosted] error getting apiserver DaemonSet:\", err)\n\t\t\treturn false, nil\n\t\t}\n\t\tfmt.Printf(\"[self-hosted] %s DaemonSet current=%d, desired=%d\\n\",\n\t\t\tkubeAPIServer,\n\t\t\tapiDS.Status.CurrentNumberScheduled,\n\t\t\tapiDS.Status.DesiredNumberScheduled)\n\n\t\tif apiDS.Status.CurrentNumberScheduled != apiDS.Status.DesiredNumberScheduled {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn true, nil\n\t})\n\n\t\/\/ Wait for self-hosted API server to take ownership\n\twaitForPodsWithLabel(client, \"self-hosted-\"+kubeAPIServer, true)\n\n\t\/\/ Remove temporary API server\n\tapiServerStaticManifestPath := buildStaticManifestFilepath(kubeAPIServer)\n\tif err := os.RemoveAll(apiServerStaticManifestPath); err != nil {\n\t\treturn fmt.Errorf(\"unable to delete temporary API server manifest [%v]\", err)\n\t}\n\n\tWaitForAPI(client)\n\n\tfmt.Printf(\"[self-hosted] self-hosted kube-apiserver ready after %f seconds\\n\", time.Since(start).Seconds())\n\treturn nil\n}\n\nfunc launchSelfHostedControllerManager(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error {\n\tstart := time.Now()\n\n\tctrlMgr := getControllerManagerDeployment(cfg, volumes, volumeMounts)\n\tif _, err := client.Extensions().Deployments(metav1.NamespaceSystem).Create(&ctrlMgr); err != nil {\n\t\treturn fmt.Errorf(\"failed to create self-hosted %q deployment [%v]\", kubeControllerManager, err)\n\t}\n\n\twaitForPodsWithLabel(client, \"self-hosted-\"+kubeControllerManager, false)\n\n\tctrlMgrStaticManifestPath := buildStaticManifestFilepath(kubeControllerManager)\n\tif err := os.RemoveAll(ctrlMgrStaticManifestPath); err != nil {\n\t\treturn fmt.Errorf(\"unable to delete temporary controller manager manifest [%v]\", err)\n\t}\n\n\tfmt.Printf(\"[self-hosted] self-hosted kube-controller-manager ready after %f seconds\\n\", time.Since(start).Seconds())\n\treturn nil\n\n}\n\nfunc launchSelfHostedScheduler(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error {\n\tstart := time.Now()\n\tscheduler := getSchedulerDeployment(cfg)\n\tif _, err := client.Extensions().Deployments(metav1.NamespaceSystem).Create(&scheduler); err != nil {\n\t\treturn fmt.Errorf(\"failed to create self-hosted %q deployment [%v]\", kubeScheduler, err)\n\t}\n\n\twaitForPodsWithLabel(client, \"self-hosted-\"+kubeScheduler, false)\n\n\tschedulerStaticManifestPath := buildStaticManifestFilepath(kubeScheduler)\n\tif err := os.RemoveAll(schedulerStaticManifestPath); err != nil {\n\t\treturn fmt.Errorf(\"unable to delete temporary scheduler manifest [%v]\", err)\n\t}\n\n\tfmt.Printf(\"[self-hosted] self-hosted kube-scheduler ready after %f seconds\\n\", time.Since(start).Seconds())\n\treturn nil\n}\n\n\/\/ waitForPodsWithLabel will lookup pods with the given label and wait until they are all\n\/\/ reporting status as running.\nfunc waitForPodsWithLabel(client *clientset.Clientset, appLabel string, mustBeRunning bool) {\n\twait.PollInfinite(apiCallRetryInterval, func() (bool, error) {\n\t\t\/\/ TODO: Do we need a stronger label link than this?\n\t\tlistOpts := metav1.ListOptions{LabelSelector: fmt.Sprintf(\"k8s-app=%s\", appLabel)}\n\t\tapiPods, err := client.Pods(metav1.NamespaceSystem).List(listOpts)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"[self-hosted] error getting %s pods [%v]\\n\", appLabel, err)\n\t\t\treturn false, nil\n\t\t}\n\t\tfmt.Printf(\"[self-hosted] Found %d %s pods\\n\", len(apiPods.Items), appLabel)\n\n\t\t\/\/ TODO: HA\n\t\tif int32(len(apiPods.Items)) != 1 {\n\t\t\treturn false, nil\n\t\t}\n\t\tfor _, pod := range apiPods.Items {\n\t\t\tfmt.Printf(\"[self-hosted] Pod %s status: %s\\n\", pod.Name, pod.Status.Phase)\n\t\t\tif mustBeRunning && pod.Status.Phase != \"Running\" {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\n\t\treturn true, nil\n\t})\n}\n\n\/\/ Sources from bootkube templates.go\nfunc getAPIServerDS(cfg *kubeadmapi.MasterConfiguration, volumes []v1.Volume, volumeMounts []v1.VolumeMount) ext.DaemonSet {\n\tds := ext.DaemonSet{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"extensions\/v1beta1\",\n\t\t\tKind: \"DaemonSet\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"self-hosted-\" + kubeAPIServer,\n\t\t\tNamespace: \"kube-system\",\n\t\t\tLabels: map[string]string{\"k8s-app\": \"self-hosted-\" + kubeAPIServer},\n\t\t},\n\t\tSpec: ext.DaemonSetSpec{\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"k8s-app\": \"self-hosted-\" + kubeAPIServer,\n\t\t\t\t\t\t\"component\": kubeAPIServer,\n\t\t\t\t\t\t\"tier\": \"control-plane\",\n\t\t\t\t\t},\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.TolerationsAnnotationKey: getMasterToleration(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tNodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster},\n\t\t\t\t\tHostNetwork: true,\n\t\t\t\t\tVolumes: volumes,\n\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"self-hosted-\" + kubeAPIServer,\n\t\t\t\t\t\t\tImage: images.GetCoreImage(images.KubeAPIServerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),\n\t\t\t\t\t\t\tCommand: getAPIServerCommand(cfg, true),\n\t\t\t\t\t\t\tEnv: getSelfHostedAPIServerEnv(),\n\t\t\t\t\t\t\tVolumeMounts: volumeMounts,\n\t\t\t\t\t\t\tLivenessProbe: componentProbe(8080, \"\/healthz\"),\n\t\t\t\t\t\t\tResources: componentResources(\"250m\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\treturn ds\n}\n\nfunc getControllerManagerDeployment(cfg *kubeadmapi.MasterConfiguration, volumes []v1.Volume, volumeMounts []v1.VolumeMount) ext.Deployment {\n\td := ext.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"extensions\/v1beta1\",\n\t\t\tKind: \"Deployment\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"self-hosted-\" + kubeControllerManager,\n\t\t\tNamespace: \"kube-system\",\n\t\t\tLabels: map[string]string{\"k8s-app\": \"self-hosted-\" + kubeControllerManager},\n\t\t},\n\t\tSpec: ext.DeploymentSpec{\n\t\t\t\/\/ TODO bootkube uses 2 replicas\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"k8s-app\": \"self-hosted-\" + kubeControllerManager,\n\t\t\t\t\t\t\"component\": kubeControllerManager,\n\t\t\t\t\t\t\"tier\": \"control-plane\",\n\t\t\t\t\t},\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.TolerationsAnnotationKey: getMasterToleration(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tNodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster},\n\t\t\t\t\tHostNetwork: true,\n\t\t\t\t\tVolumes: volumes,\n\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"self-hosted-\" + kubeControllerManager,\n\t\t\t\t\t\t\tImage: images.GetCoreImage(images.KubeControllerManagerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),\n\t\t\t\t\t\t\tCommand: getControllerManagerCommand(cfg, true),\n\t\t\t\t\t\t\tVolumeMounts: volumeMounts,\n\t\t\t\t\t\t\tLivenessProbe: componentProbe(10252, \"\/healthz\"),\n\t\t\t\t\t\t\tResources: componentResources(\"200m\"),\n\t\t\t\t\t\t\tEnv: getProxyEnvVars(),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tDNSPolicy: v1.DNSDefault,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\treturn d\n}\n\nfunc getSchedulerDeployment(cfg *kubeadmapi.MasterConfiguration) ext.Deployment {\n\td := ext.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"extensions\/v1beta1\",\n\t\t\tKind: \"Deployment\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"self-hosted-\" + kubeScheduler,\n\t\t\tNamespace: \"kube-system\",\n\t\t\tLabels: map[string]string{\"k8s-app\": \"self-hosted-\" + kubeScheduler},\n\t\t},\n\t\tSpec: ext.DeploymentSpec{\n\t\t\t\/\/ TODO bootkube uses 2 replicas\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"k8s-app\": \"self-hosted-\" + kubeScheduler,\n\t\t\t\t\t\t\"component\": kubeScheduler,\n\t\t\t\t\t\t\"tier\": \"control-plane\",\n\t\t\t\t\t},\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.TolerationsAnnotationKey: getMasterToleration(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tNodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster},\n\t\t\t\t\tHostNetwork: true,\n\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"self-hosted-\" + kubeScheduler,\n\t\t\t\t\t\t\tImage: images.GetCoreImage(images.KubeSchedulerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),\n\t\t\t\t\t\t\tCommand: getSchedulerCommand(cfg, true),\n\t\t\t\t\t\t\tLivenessProbe: componentProbe(10251, \"\/healthz\"),\n\t\t\t\t\t\t\tResources: componentResources(\"100m\"),\n\t\t\t\t\t\t\tEnv: getProxyEnvVars(),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\treturn d\n}\n\nfunc buildStaticManifestFilepath(name string) string {\n\treturn path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, \"manifests\", name+\".json\")\n}\n\nfunc getMasterToleration() string {\n\t\/\/ Tolerate the master taint we add to our master nodes, as this can and should\n\t\/\/ run there.\n\t\/\/ TODO: Duplicated above\n\tmasterToleration, _ := json.Marshal([]v1.Toleration{{\n\t\tKey: \"dedicated\",\n\t\tValue: \"master\",\n\t\tOperator: v1.TolerationOpEqual,\n\t\tEffect: v1.TaintEffectNoSchedule,\n\t}})\n\treturn string(masterToleration)\n}\n<commit_msg>Wait for the self-hosted control plane during kubeadm init.<commit_after>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage master\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\tkubeadmapi \"k8s.io\/kubernetes\/cmd\/kubeadm\/app\/apis\/kubeadm\"\n\t\"k8s.io\/kubernetes\/cmd\/kubeadm\/app\/images\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/v1\"\n\text \"k8s.io\/kubernetes\/pkg\/apis\/extensions\/v1beta1\"\n\t\"k8s.io\/kubernetes\/pkg\/client\/clientset_generated\/clientset\"\n)\n\nfunc CreateSelfHostedControlPlane(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset) error {\n\tvolumes := []v1.Volume{k8sVolume(cfg)}\n\tvolumeMounts := []v1.VolumeMount{k8sVolumeMount()}\n\tif isCertsVolumeMountNeeded() {\n\t\tvolumes = append(volumes, certsVolume(cfg))\n\t\tvolumeMounts = append(volumeMounts, certsVolumeMount())\n\t}\n\n\tif isPkiVolumeMountNeeded() {\n\t\tvolumes = append(volumes, pkiVolume(cfg))\n\t\tvolumeMounts = append(volumeMounts, pkiVolumeMount())\n\t}\n\n\t\/\/ Need lock for self-hosted\n\tvolumes = append(volumes, flockVolume())\n\tvolumeMounts = append(volumeMounts, flockVolumeMount())\n\n\tif err := launchSelfHostedAPIServer(cfg, client, volumes, volumeMounts); err != nil {\n\t\treturn err\n\t}\n\n\tif err := launchSelfHostedScheduler(cfg, client, volumes, volumeMounts); err != nil {\n\t\treturn err\n\t}\n\n\tif err := launchSelfHostedControllerManager(cfg, client, volumes, volumeMounts); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc launchSelfHostedAPIServer(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error {\n\tstart := time.Now()\n\n\tapiServer := getAPIServerDS(cfg, volumes, volumeMounts)\n\tif _, err := client.Extensions().DaemonSets(metav1.NamespaceSystem).Create(&apiServer); err != nil {\n\t\treturn fmt.Errorf(\"failed to create self-hosted %q daemon set [%v]\", kubeAPIServer, err)\n\t}\n\n\twait.PollInfinite(apiCallRetryInterval, func() (bool, error) {\n\t\t\/\/ TODO: This might be pointless, checking the pods is probably enough.\n\t\t\/\/ It does however get us a count of how many there should be which may be useful\n\t\t\/\/ with HA.\n\t\tapiDS, err := client.DaemonSets(metav1.NamespaceSystem).Get(\"self-hosted-\"+kubeAPIServer,\n\t\t\tmetav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tfmt.Println(\"[self-hosted] error getting apiserver DaemonSet:\", err)\n\t\t\treturn false, nil\n\t\t}\n\t\tfmt.Printf(\"[self-hosted] %s DaemonSet current=%d, desired=%d\\n\",\n\t\t\tkubeAPIServer,\n\t\t\tapiDS.Status.CurrentNumberScheduled,\n\t\t\tapiDS.Status.DesiredNumberScheduled)\n\n\t\tif apiDS.Status.CurrentNumberScheduled != apiDS.Status.DesiredNumberScheduled {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn true, nil\n\t})\n\n\t\/\/ Wait for self-hosted API server to take ownership\n\twaitForPodsWithLabel(client, \"self-hosted-\"+kubeAPIServer, true)\n\n\t\/\/ Remove temporary API server\n\tapiServerStaticManifestPath := buildStaticManifestFilepath(kubeAPIServer)\n\tif err := os.RemoveAll(apiServerStaticManifestPath); err != nil {\n\t\treturn fmt.Errorf(\"unable to delete temporary API server manifest [%v]\", err)\n\t}\n\n\tWaitForAPI(client)\n\n\tfmt.Printf(\"[self-hosted] self-hosted kube-apiserver ready after %f seconds\\n\", time.Since(start).Seconds())\n\treturn nil\n}\n\nfunc launchSelfHostedControllerManager(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error {\n\tstart := time.Now()\n\n\tctrlMgr := getControllerManagerDeployment(cfg, volumes, volumeMounts)\n\tif _, err := client.Extensions().Deployments(metav1.NamespaceSystem).Create(&ctrlMgr); err != nil {\n\t\treturn fmt.Errorf(\"failed to create self-hosted %q deployment [%v]\", kubeControllerManager, err)\n\t}\n\n\twaitForPodsWithLabel(client, \"self-hosted-\"+kubeControllerManager, true)\n\n\tctrlMgrStaticManifestPath := buildStaticManifestFilepath(kubeControllerManager)\n\tif err := os.RemoveAll(ctrlMgrStaticManifestPath); err != nil {\n\t\treturn fmt.Errorf(\"unable to delete temporary controller manager manifest [%v]\", err)\n\t}\n\n\tfmt.Printf(\"[self-hosted] self-hosted kube-controller-manager ready after %f seconds\\n\", time.Since(start).Seconds())\n\treturn nil\n\n}\n\nfunc launchSelfHostedScheduler(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error {\n\tstart := time.Now()\n\tscheduler := getSchedulerDeployment(cfg)\n\tif _, err := client.Extensions().Deployments(metav1.NamespaceSystem).Create(&scheduler); err != nil {\n\t\treturn fmt.Errorf(\"failed to create self-hosted %q deployment [%v]\", kubeScheduler, err)\n\t}\n\n\twaitForPodsWithLabel(client, \"self-hosted-\"+kubeScheduler, true)\n\n\tschedulerStaticManifestPath := buildStaticManifestFilepath(kubeScheduler)\n\tif err := os.RemoveAll(schedulerStaticManifestPath); err != nil {\n\t\treturn fmt.Errorf(\"unable to delete temporary scheduler manifest [%v]\", err)\n\t}\n\n\tfmt.Printf(\"[self-hosted] self-hosted kube-scheduler ready after %f seconds\\n\", time.Since(start).Seconds())\n\treturn nil\n}\n\n\/\/ waitForPodsWithLabel will lookup pods with the given label and wait until they are all\n\/\/ reporting status as running.\nfunc waitForPodsWithLabel(client *clientset.Clientset, appLabel string, mustBeRunning bool) {\n\twait.PollInfinite(apiCallRetryInterval, func() (bool, error) {\n\t\t\/\/ TODO: Do we need a stronger label link than this?\n\t\tlistOpts := metav1.ListOptions{LabelSelector: fmt.Sprintf(\"k8s-app=%s\", appLabel)}\n\t\tapiPods, err := client.Pods(metav1.NamespaceSystem).List(listOpts)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"[self-hosted] error getting %s pods [%v]\\n\", appLabel, err)\n\t\t\treturn false, nil\n\t\t}\n\t\tfmt.Printf(\"[self-hosted] Found %d %s pods\\n\", len(apiPods.Items), appLabel)\n\n\t\t\/\/ TODO: HA\n\t\tif int32(len(apiPods.Items)) != 1 {\n\t\t\treturn false, nil\n\t\t}\n\t\tfor _, pod := range apiPods.Items {\n\t\t\tfmt.Printf(\"[self-hosted] Pod %s status: %s\\n\", pod.Name, pod.Status.Phase)\n\t\t\tif mustBeRunning && pod.Status.Phase != \"Running\" {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t}\n\n\t\treturn true, nil\n\t})\n}\n\n\/\/ Sources from bootkube templates.go\nfunc getAPIServerDS(cfg *kubeadmapi.MasterConfiguration, volumes []v1.Volume, volumeMounts []v1.VolumeMount) ext.DaemonSet {\n\tds := ext.DaemonSet{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"extensions\/v1beta1\",\n\t\t\tKind: \"DaemonSet\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"self-hosted-\" + kubeAPIServer,\n\t\t\tNamespace: \"kube-system\",\n\t\t\tLabels: map[string]string{\"k8s-app\": \"self-hosted-\" + kubeAPIServer},\n\t\t},\n\t\tSpec: ext.DaemonSetSpec{\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"k8s-app\": \"self-hosted-\" + kubeAPIServer,\n\t\t\t\t\t\t\"component\": kubeAPIServer,\n\t\t\t\t\t\t\"tier\": \"control-plane\",\n\t\t\t\t\t},\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.TolerationsAnnotationKey: getMasterToleration(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tNodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster},\n\t\t\t\t\tHostNetwork: true,\n\t\t\t\t\tVolumes: volumes,\n\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"self-hosted-\" + kubeAPIServer,\n\t\t\t\t\t\t\tImage: images.GetCoreImage(images.KubeAPIServerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),\n\t\t\t\t\t\t\tCommand: getAPIServerCommand(cfg, true),\n\t\t\t\t\t\t\tEnv: getSelfHostedAPIServerEnv(),\n\t\t\t\t\t\t\tVolumeMounts: volumeMounts,\n\t\t\t\t\t\t\tLivenessProbe: componentProbe(8080, \"\/healthz\"),\n\t\t\t\t\t\t\tResources: componentResources(\"250m\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\treturn ds\n}\n\nfunc getControllerManagerDeployment(cfg *kubeadmapi.MasterConfiguration, volumes []v1.Volume, volumeMounts []v1.VolumeMount) ext.Deployment {\n\td := ext.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"extensions\/v1beta1\",\n\t\t\tKind: \"Deployment\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"self-hosted-\" + kubeControllerManager,\n\t\t\tNamespace: \"kube-system\",\n\t\t\tLabels: map[string]string{\"k8s-app\": \"self-hosted-\" + kubeControllerManager},\n\t\t},\n\t\tSpec: ext.DeploymentSpec{\n\t\t\t\/\/ TODO bootkube uses 2 replicas\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"k8s-app\": \"self-hosted-\" + kubeControllerManager,\n\t\t\t\t\t\t\"component\": kubeControllerManager,\n\t\t\t\t\t\t\"tier\": \"control-plane\",\n\t\t\t\t\t},\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.TolerationsAnnotationKey: getMasterToleration(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tNodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster},\n\t\t\t\t\tHostNetwork: true,\n\t\t\t\t\tVolumes: volumes,\n\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"self-hosted-\" + kubeControllerManager,\n\t\t\t\t\t\t\tImage: images.GetCoreImage(images.KubeControllerManagerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),\n\t\t\t\t\t\t\tCommand: getControllerManagerCommand(cfg, true),\n\t\t\t\t\t\t\tVolumeMounts: volumeMounts,\n\t\t\t\t\t\t\tLivenessProbe: componentProbe(10252, \"\/healthz\"),\n\t\t\t\t\t\t\tResources: componentResources(\"200m\"),\n\t\t\t\t\t\t\tEnv: getProxyEnvVars(),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tDNSPolicy: v1.DNSDefault,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\treturn d\n}\n\nfunc getSchedulerDeployment(cfg *kubeadmapi.MasterConfiguration) ext.Deployment {\n\td := ext.Deployment{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: \"extensions\/v1beta1\",\n\t\t\tKind: \"Deployment\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"self-hosted-\" + kubeScheduler,\n\t\t\tNamespace: \"kube-system\",\n\t\t\tLabels: map[string]string{\"k8s-app\": \"self-hosted-\" + kubeScheduler},\n\t\t},\n\t\tSpec: ext.DeploymentSpec{\n\t\t\t\/\/ TODO bootkube uses 2 replicas\n\t\t\tTemplate: v1.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\"k8s-app\": \"self-hosted-\" + kubeScheduler,\n\t\t\t\t\t\t\"component\": kubeScheduler,\n\t\t\t\t\t\t\"tier\": \"control-plane\",\n\t\t\t\t\t},\n\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\tv1.TolerationsAnnotationKey: getMasterToleration(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: v1.PodSpec{\n\t\t\t\t\tNodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster},\n\t\t\t\t\tHostNetwork: true,\n\t\t\t\t\tContainers: []v1.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"self-hosted-\" + kubeScheduler,\n\t\t\t\t\t\t\tImage: images.GetCoreImage(images.KubeSchedulerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),\n\t\t\t\t\t\t\tCommand: getSchedulerCommand(cfg, true),\n\t\t\t\t\t\t\tLivenessProbe: componentProbe(10251, \"\/healthz\"),\n\t\t\t\t\t\t\tResources: componentResources(\"100m\"),\n\t\t\t\t\t\t\tEnv: getProxyEnvVars(),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\treturn d\n}\n\nfunc buildStaticManifestFilepath(name string) string {\n\treturn path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, \"manifests\", name+\".json\")\n}\n\nfunc getMasterToleration() string {\n\t\/\/ Tolerate the master taint we add to our master nodes, as this can and should\n\t\/\/ run there.\n\t\/\/ TODO: Duplicated above\n\tmasterToleration, _ := json.Marshal([]v1.Toleration{{\n\t\tKey: \"dedicated\",\n\t\tValue: \"master\",\n\t\tOperator: v1.TolerationOpEqual,\n\t\tEffect: v1.TaintEffectNoSchedule,\n\t}})\n\treturn string(masterToleration)\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\n\/\/ Version is the current version of the buffalo binary\nvar Version = \"0.7.0.pre\"\n<commit_msg>version bump to 0.7.0<commit_after>package cmd\n\n\/\/ Version is the current version of the buffalo binary\nvar Version = \"0.7.0\"\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ Version is the current version of the buffalo binary\nconst Version = \"0.8.1\"\n\nfunc init() {\n\tRootCmd.AddCommand(versionCmd)\n}\n\nvar versionCmd = &cobra.Command{\n\tUse: \"version\",\n\tShort: \"Print the version number of buffalo\",\n\tLong: `All software has versions. This is buffalo's.`,\n\tRun: func(c *cobra.Command, args []string) {\n\t\tfmt.Printf(\"Buffalo version is: %s\\n\", Version)\n\t},\n\t\/\/ needed to override the root level pre-run func\n\tPersistentPreRun: func(c *cobra.Command, args []string) {\n\t},\n}\n<commit_msg>set development version 0.8.2.dev<commit_after>package cmd\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ Version is the current version of the buffalo binary\nconst Version = \"0.8.2.dev\"\n\nfunc init() {\n\tRootCmd.AddCommand(versionCmd)\n}\n\nvar versionCmd = &cobra.Command{\n\tUse: \"version\",\n\tShort: \"Print the version number of buffalo\",\n\tLong: `All software has versions. This is buffalo's.`,\n\tRun: func(c *cobra.Command, args []string) {\n\t\tfmt.Printf(\"Buffalo version is: %s\\n\", Version)\n\t},\n\t\/\/ needed to override the root level pre-run func\n\tPersistentPreRun: func(c *cobra.Command, args []string) {\n\t},\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ Version is the current version of the buffalo binary\nconst Version = \"development\"\n\nfunc init() {\n\tdecorate(\"version\", versionCmd)\n\tRootCmd.AddCommand(versionCmd)\n}\n\nvar versionCmd = &cobra.Command{\n\tUse: \"version\",\n\tShort: \"Print the version number of buffalo\",\n\tLong: `All software has versions. This is buffalo's.`,\n\tRun: func(c *cobra.Command, args []string) {\n\t\tfmt.Printf(\"Buffalo version is: %s\\n\", Version)\n\t},\n\t\/\/ needed to override the root level pre-run func\n\tPersistentPreRunE: func(c *cobra.Command, args []string) error {\n\t\treturn nil\n\t},\n}\n<commit_msg>version bump<commit_after>package cmd\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ Version is the current version of the buffalo binary\nconst Version = \"v0.9.2\"\n\nfunc init() {\n\tdecorate(\"version\", versionCmd)\n\tRootCmd.AddCommand(versionCmd)\n}\n\nvar versionCmd = &cobra.Command{\n\tUse: \"version\",\n\tShort: \"Print the version number of buffalo\",\n\tLong: `All software has versions. This is buffalo's.`,\n\tRun: func(c *cobra.Command, args []string) {\n\t\tfmt.Printf(\"Buffalo version is: %s\\n\", Version)\n\t},\n\t\/\/ needed to override the root level pre-run func\n\tPersistentPreRunE: func(c *cobra.Command, args []string) error {\n\t\treturn nil\n\t},\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Replace control characters in formatted output to avoid mangling<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2017 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage resolve\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"sort\"\n\n\t\"github.com\/google\/gapid\/core\/data\/id\"\n\t\"github.com\/google\/gapid\/gapis\/api\"\n\t\"github.com\/google\/gapid\/gapis\/capture\"\n\t\"github.com\/google\/gapid\/gapis\/database\"\n\t\"github.com\/google\/gapid\/gapis\/extensions\"\n\t\"github.com\/google\/gapid\/gapis\/messages\"\n\t\"github.com\/google\/gapid\/gapis\/service\"\n\t\"github.com\/google\/gapid\/gapis\/service\/path\"\n)\n\n\/\/ Contexts resolves the list of contexts belonging to a capture.\nfunc Contexts(ctx context.Context, p *path.Contexts) ([]*api.ContextInfo, error) {\n\tobj, err := database.Build(ctx, &ContextListResolvable{p.Capture})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn obj.([]*api.ContextInfo), nil\n}\n\n\/\/ ContextsByID resolves the list of contexts belonging to a capture.\nfunc ContextsByID(ctx context.Context, p *path.Contexts) (map[api.ContextID]*api.ContextInfo, error) {\n\tctxs, err := Contexts(ctx, p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tout := map[api.ContextID]*api.ContextInfo{}\n\tfor _, c := range ctxs {\n\t\tout[c.ID] = c\n\t}\n\treturn out, nil\n}\n\n\/\/ Context resolves the single context.\nfunc Context(ctx context.Context, p *path.Context) (*api.ContextInfo, error) {\n\tcontexts, err := Contexts(ctx, p.Capture.Contexts())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tid := api.ContextID(p.Id.ID())\n\tfor _, c := range contexts {\n\t\tif c.ID == id {\n\t\t\treturn c, nil\n\t\t}\n\t}\n\treturn nil, &service.ErrInvalidPath{\n\t\tReason: messages.ErrContextDoesNotExist(p.Id),\n\t\tPath: p.Path(),\n\t}\n}\n\n\/\/ Importance is the interface implemeneted by commands that provide an\n\/\/ \"importance score\". This value is used to prioritize contexts.\ntype Importance interface {\n\tImportance() int\n}\n\n\/\/ Named is the interface implemented by context that have a name.\ntype Named interface {\n\tName() string\n}\n\n\/\/ Resolve implements the database.Resolver interface.\nfunc (r *ContextListResolvable) Resolve(ctx context.Context) (interface{}, error) {\n\tctx = capture.Put(ctx, r.Capture)\n\n\tc, err := capture.Resolve(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttype ctxInfo struct {\n\t\tctx api.Context\n\t\tcnts map[reflect.Type]int\n\t\tpri int\n\t}\n\n\tseen := map[api.ContextID]int{}\n\tcontexts := []*ctxInfo{}\n\n\ts := c.NewState()\n\terr = api.ForeachCmd(ctx, c.Commands, func(ctx context.Context, i api.CmdID, cmd api.Cmd) error {\n\t\tcmd.Mutate(ctx, i, s, nil)\n\n\t\tapi := cmd.API()\n\t\tif api == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tcontext := api.Context(s, cmd.Thread())\n\t\tif context == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tid := context.ID()\n\t\tidx, ok := seen[id]\n\t\tif !ok {\n\t\t\tidx = len(contexts)\n\t\t\tseen[id] = idx\n\t\t\tcontexts = append(contexts, &ctxInfo{\n\t\t\t\tctx: context,\n\t\t\t\tcnts: map[reflect.Type]int{},\n\t\t\t})\n\t\t}\n\n\t\tc := contexts[idx]\n\t\tcmdTy := reflect.TypeOf(cmd)\n\t\tc.cnts[cmdTy] = c.cnts[cmdTy] + 1\n\t\tif i, ok := cmd.(Importance); ok {\n\t\t\tc.pri += i.Importance()\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsort.Slice(contexts, func(i, j int) bool {\n\t\treturn contexts[i].pri < contexts[j].pri\n\t})\n\n\tout := make([]*api.ContextInfo, len(contexts))\n\tfor i, c := range contexts {\n\t\tname := fmt.Sprintf(\"Context %v\", i)\n\t\tif n, ok := c.ctx.(Named); ok {\n\t\t\tname = n.Name()\n\t\t}\n\t\tout[i] = &api.ContextInfo{\n\t\t\tPath: r.Capture.Context(id.ID(c.ctx.ID())),\n\t\t\tID: c.ctx.ID(),\n\t\t\tAPI: c.ctx.API().ID(),\n\t\t\tNumCommandsByType: c.cnts,\n\t\t\tName: name,\n\t\t\tPriority: i,\n\t\t\tUserData: map[interface{}]interface{}{},\n\t\t}\n\t}\n\n\tfor _, e := range extensions.Get() {\n\t\tif e.AdjustContexts != nil {\n\t\t\te.AdjustContexts(ctx, out)\n\t\t}\n\t}\n\n\treturn out, nil\n}\n<commit_msg>Invert the context priorioty ordering.<commit_after>\/\/ Copyright (C) 2017 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage resolve\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"sort\"\n\n\t\"github.com\/google\/gapid\/core\/data\/id\"\n\t\"github.com\/google\/gapid\/gapis\/api\"\n\t\"github.com\/google\/gapid\/gapis\/capture\"\n\t\"github.com\/google\/gapid\/gapis\/database\"\n\t\"github.com\/google\/gapid\/gapis\/extensions\"\n\t\"github.com\/google\/gapid\/gapis\/messages\"\n\t\"github.com\/google\/gapid\/gapis\/service\"\n\t\"github.com\/google\/gapid\/gapis\/service\/path\"\n)\n\n\/\/ Contexts resolves the list of contexts belonging to a capture.\nfunc Contexts(ctx context.Context, p *path.Contexts) ([]*api.ContextInfo, error) {\n\tobj, err := database.Build(ctx, &ContextListResolvable{p.Capture})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn obj.([]*api.ContextInfo), nil\n}\n\n\/\/ ContextsByID resolves the list of contexts belonging to a capture.\nfunc ContextsByID(ctx context.Context, p *path.Contexts) (map[api.ContextID]*api.ContextInfo, error) {\n\tctxs, err := Contexts(ctx, p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tout := map[api.ContextID]*api.ContextInfo{}\n\tfor _, c := range ctxs {\n\t\tout[c.ID] = c\n\t}\n\treturn out, nil\n}\n\n\/\/ Context resolves the single context.\nfunc Context(ctx context.Context, p *path.Context) (*api.ContextInfo, error) {\n\tcontexts, err := Contexts(ctx, p.Capture.Contexts())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tid := api.ContextID(p.Id.ID())\n\tfor _, c := range contexts {\n\t\tif c.ID == id {\n\t\t\treturn c, nil\n\t\t}\n\t}\n\treturn nil, &service.ErrInvalidPath{\n\t\tReason: messages.ErrContextDoesNotExist(p.Id),\n\t\tPath: p.Path(),\n\t}\n}\n\n\/\/ Importance is the interface implemeneted by commands that provide an\n\/\/ \"importance score\". This value is used to prioritize contexts.\ntype Importance interface {\n\tImportance() int\n}\n\n\/\/ Named is the interface implemented by context that have a name.\ntype Named interface {\n\tName() string\n}\n\n\/\/ Resolve implements the database.Resolver interface.\nfunc (r *ContextListResolvable) Resolve(ctx context.Context) (interface{}, error) {\n\tctx = capture.Put(ctx, r.Capture)\n\n\tc, err := capture.Resolve(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttype ctxInfo struct {\n\t\tctx api.Context\n\t\tcnts map[reflect.Type]int\n\t\tpri int\n\t}\n\n\tseen := map[api.ContextID]int{}\n\tcontexts := []*ctxInfo{}\n\n\ts := c.NewState()\n\terr = api.ForeachCmd(ctx, c.Commands, func(ctx context.Context, i api.CmdID, cmd api.Cmd) error {\n\t\tcmd.Mutate(ctx, i, s, nil)\n\n\t\tapi := cmd.API()\n\t\tif api == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tcontext := api.Context(s, cmd.Thread())\n\t\tif context == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tid := context.ID()\n\t\tidx, ok := seen[id]\n\t\tif !ok {\n\t\t\tidx = len(contexts)\n\t\t\tseen[id] = idx\n\t\t\tcontexts = append(contexts, &ctxInfo{\n\t\t\t\tctx: context,\n\t\t\t\tcnts: map[reflect.Type]int{},\n\t\t\t})\n\t\t}\n\n\t\tc := contexts[idx]\n\t\tcmdTy := reflect.TypeOf(cmd)\n\t\tc.cnts[cmdTy] = c.cnts[cmdTy] + 1\n\t\tif i, ok := cmd.(Importance); ok {\n\t\t\tc.pri += i.Importance()\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsort.Slice(contexts, func(i, j int) bool {\n\t\treturn contexts[i].pri > contexts[j].pri\n\t})\n\n\tout := make([]*api.ContextInfo, len(contexts))\n\tfor i, c := range contexts {\n\t\tname := fmt.Sprintf(\"Context %v\", i)\n\t\tif n, ok := c.ctx.(Named); ok {\n\t\t\tname = n.Name()\n\t\t}\n\t\tout[i] = &api.ContextInfo{\n\t\t\tPath: r.Capture.Context(id.ID(c.ctx.ID())),\n\t\t\tID: c.ctx.ID(),\n\t\t\tAPI: c.ctx.API().ID(),\n\t\t\tNumCommandsByType: c.cnts,\n\t\t\tName: name,\n\t\t\tPriority: len(contexts)-i,\n\t\t\tUserData: map[interface{}]interface{}{},\n\t\t}\n\t}\n\n\tfor _, e := range extensions.Get() {\n\t\tif e.AdjustContexts != nil {\n\t\t\te.AdjustContexts(ctx, out)\n\t\t}\n\t}\n\n\treturn out, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage migration_test\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/juju\/names\"\n\tjc \"github.com\/juju\/testing\/checkers\"\n\t\"github.com\/juju\/utils\"\n\t\"github.com\/juju\/version\"\n\tgc \"gopkg.in\/check.v1\"\n\t\"gopkg.in\/juju\/charm.v6-unstable\"\n\n\t\"github.com\/juju\/juju\/cmd\/modelcmd\"\n\t\"github.com\/juju\/juju\/core\/description\"\n\t\"github.com\/juju\/juju\/environs\"\n\t\"github.com\/juju\/juju\/jujuclient\/jujuclienttesting\"\n\t\"github.com\/juju\/juju\/migration\"\n\t\"github.com\/juju\/juju\/provider\/dummy\"\n\t_ \"github.com\/juju\/juju\/provider\/dummy\"\n\t\"github.com\/juju\/juju\/state\"\n\tstatetesting \"github.com\/juju\/juju\/state\/testing\"\n\t\"github.com\/juju\/juju\/testing\"\n\t\"github.com\/juju\/juju\/tools\"\n)\n\ntype ImportSuite struct {\n\tstatetesting.StateSuite\n}\n\nvar _ = gc.Suite(&ImportSuite{})\n\nfunc (s *ImportSuite) SetUpTest(c *gc.C) {\n\t\/\/ Specify the config to use for the controller model before calling\n\t\/\/ SetUpTest of the StateSuite, otherwise we get testing.ModelConfig(c).\n\t\/\/ The default provider type specified in the testing.ModelConfig function\n\t\/\/ is one that isn't registered as a valid provider. For our tests here we\n\t\/\/ need a real registered provider, so we use the dummy provider.\n\t\/\/ NOTE: make a better test provider.\n\tenv, err := environs.Prepare(\n\t\tmodelcmd.BootstrapContext(testing.Context(c)),\n\t\tjujuclienttesting.NewMemStore(),\n\t\tenvirons.PrepareParams{\n\t\t\tControllerName: \"dummycontroller\",\n\t\t\tBaseConfig: dummy.SampleConfig(),\n\t\t\tCloudName: \"dummy\",\n\t\t},\n\t)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\ts.InitialConfig = testing.CustomModelConfig(c, env.Config().AllAttrs())\n\ts.StateSuite.SetUpTest(c)\n}\n\nfunc (s *ImportSuite) TestBadBytes(c *gc.C) {\n\tbytes := []byte(\"not a model\")\n\tmodel, st, err := migration.ImportModel(s.State, bytes)\n\tc.Check(st, gc.IsNil)\n\tc.Check(model, gc.IsNil)\n\tc.Assert(err, gc.ErrorMatches, \"yaml: unmarshal errors:\\n.*\")\n}\n\nfunc (s *ImportSuite) TestImportModel(c *gc.C) {\n\tmodel, err := s.State.Export()\n\tc.Check(err, jc.ErrorIsNil)\n\n\tcontrollerConfig, err := s.State.ModelConfig()\n\tc.Check(err, jc.ErrorIsNil)\n\n\t\/\/ Update the config values in the exported model for different values for\n\t\/\/ \"state-port\", \"api-port\", and \"ca-cert\". Also give the model a new UUID\n\t\/\/ and name so we can import it nicely.\n\tmodel.UpdateConfig(map[string]interface{}{\n\t\t\"name\": \"new-model\",\n\t\t\"uuid\": utils.MustNewUUID().String(),\n\t\t\"state-port\": 12345,\n\t\t\"api-port\": 54321,\n\t\t\"ca-cert\": \"not really a cert\",\n\t})\n\n\tbytes, err := description.Serialize(model)\n\tc.Check(err, jc.ErrorIsNil)\n\n\tdbModel, dbState, err := migration.ImportModel(s.State, bytes)\n\tc.Check(err, jc.ErrorIsNil)\n\tdefer dbState.Close()\n\n\tdbConfig, err := dbModel.Config()\n\tc.Assert(err, jc.ErrorIsNil)\n\tattrs := dbConfig.AllAttrs()\n\tc.Assert(attrs[\"state-port\"], gc.Equals, controllerConfig.StatePort())\n\tc.Assert(attrs[\"api-port\"], gc.Equals, controllerConfig.APIPort())\n\tcacert, ok := controllerConfig.CACert()\n\tc.Assert(ok, jc.IsTrue)\n\tc.Assert(attrs[\"ca-cert\"], gc.Equals, cacert)\n\tc.Assert(attrs[\"controller-uuid\"], gc.Equals, controllerConfig.UUID())\n}\n\nfunc (s *ImportSuite) TestBinariesMigration(c *gc.C) {\n\tdownloader := &fakeDownloader{}\n\tuploader := &fakeUploader{\n\t\tcharms: make(map[string]string),\n\t\ttools: make(map[version.Binary]string),\n\t}\n\n\ttoolsMap := map[version.Binary]string{\n\t\tversion.MustParseBinary(\"2.1.0-trusty-amd64\"): \"\/tools\/0\",\n\t\tversion.MustParseBinary(\"2.0.0-xenial-amd64\"): \"\/tools\/1\",\n\t}\n\tconfig := migration.UploadBinariesConfig{\n\t\tCharms: []string{\"local:trusty\/magic\", \"cs:trusty\/postgresql-42\"},\n\t\tCharmDownloader: downloader,\n\t\tCharmUploader: uploader,\n\t\tTools: toolsMap,\n\t\tToolsDownloader: downloader,\n\t\tToolsUploader: uploader,\n\t}\n\terr := migration.UploadBinaries(config)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tc.Assert(downloader.charms, jc.DeepEquals, []string{\n\t\t\"local:trusty\/magic\",\n\t\t\"cs:trusty\/postgresql-42\",\n\t})\n\tc.Assert(uploader.charms, jc.DeepEquals, map[string]string{\n\t\t\"local:trusty\/magic\": \"local:trusty\/magic content\",\n\t\t\"cs:trusty\/postgresql-42\": \"cs:trusty\/postgresql-42 content\",\n\t})\n\tc.Assert(downloader.uris, jc.SameContents, []string{\n\t\t\"\/tools\/0\",\n\t\t\"\/tools\/1\",\n\t})\n\tc.Assert(uploader.tools, jc.DeepEquals, toolsMap)\n}\n\ntype fakeDownloader struct {\n\tcharms []string\n\turis []string\n}\n\nfunc (d *fakeDownloader) OpenCharm(curl *charm.URL) (io.ReadCloser, error) {\n\turlStr := curl.String()\n\td.charms = append(d.charms, urlStr)\n\t\/\/ Return the charm URL string as the fake charm content\n\treturn ioutil.NopCloser(bytes.NewReader([]byte(urlStr + \" content\"))), nil\n}\n\nfunc (d *fakeDownloader) OpenURI(uri string, query url.Values) (io.ReadCloser, error) {\n\tif query != nil {\n\t\tpanic(\"query should be empty\")\n\t}\n\td.uris = append(d.uris, uri)\n\t\/\/ Return the URI string as fake content\n\treturn ioutil.NopCloser(bytes.NewReader([]byte(uri))), nil\n}\n\ntype fakeUploader struct {\n\ttools map[version.Binary]string\n\tcharms map[string]string\n}\n\nfunc (f *fakeUploader) UploadTools(r io.ReadSeeker, v version.Binary, _ ...string) (tools.List, error) {\n\tdata, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tf.tools[v] = string(data)\n\treturn tools.List{&tools.Tools{Version: v}}, nil\n}\n\nfunc (f *fakeUploader) UploadCharm(u *charm.URL, r io.ReadSeeker) (*charm.URL, error) {\n\tdata, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tf.charms[u.String()] = string(data)\n\treturn u, nil\n}\n\ntype ExportSuite struct {\n\tstatetesting.StateSuite\n}\n\nvar _ = gc.Suite(&ExportSuite{})\n\nfunc (s *ExportSuite) TestExportModel(c *gc.C) {\n\tbytes, err := migration.ExportModel(s.State)\n\tc.Assert(err, jc.ErrorIsNil)\n\t\/\/ The bytes must be a valid model.\n\t_, err = description.Deserialize(bytes)\n\tc.Assert(err, jc.ErrorIsNil)\n}\n\ntype PrecheckSuite struct {\n\ttesting.BaseSuite\n}\n\nvar _ = gc.Suite(&PrecheckSuite{})\n\n\/\/ Assert that *state.State implements the PrecheckBackend\nvar _ migration.PrecheckBackend = (*state.State)(nil)\n\nfunc (*PrecheckSuite) TestPrecheckCleanups(c *gc.C) {\n\tbackend := &fakePrecheckBackend{}\n\terr := migration.Precheck(backend)\n\tc.Assert(err, jc.ErrorIsNil)\n}\n\nfunc (*PrecheckSuite) TestPrecheckCleanupsError(c *gc.C) {\n\tbackend := &fakePrecheckBackend{\n\t\tcleanupError: errors.New(\"boom\"),\n\t}\n\terr := migration.Precheck(backend)\n\tc.Assert(err, gc.ErrorMatches, \"precheck cleanups: boom\")\n}\n\nfunc (*PrecheckSuite) TestPrecheckCleanupsNeeded(c *gc.C) {\n\tbackend := &fakePrecheckBackend{\n\t\tcleanupNeeded: true,\n\t}\n\terr := migration.Precheck(backend)\n\tc.Assert(err, gc.ErrorMatches, \"precheck failed: cleanup needed\")\n}\n\ntype fakePrecheckBackend struct {\n\tcleanupNeeded bool\n\tcleanupError error\n}\n\nfunc (f *fakePrecheckBackend) NeedsCleanup() (bool, error) {\n\treturn f.cleanupNeeded, f.cleanupError\n}\n\ntype InternalSuite struct {\n\ttesting.BaseSuite\n}\n\nvar _ = gc.Suite(&InternalSuite{})\n\nfunc (s *InternalSuite) TestControllerValues(c *gc.C) {\n\tconfig := testing.ModelConfig(c)\n\tfields := migration.ControllerValues(config)\n\tc.Assert(fields, jc.DeepEquals, map[string]interface{}{\n\t\t\"controller-uuid\": \"deadbeef-0bad-400d-8000-4b1d0d06f00d\",\n\t\t\"state-port\": 19034,\n\t\t\"api-port\": 17777,\n\t\t\"ca-cert\": testing.CACert,\n\t})\n}\n\nfunc (s *InternalSuite) TestUpdateConfigFromProvider(c *gc.C) {\n\tcontrollerConfig := testing.ModelConfig(c)\n\tconfigAttrs := testing.FakeConfig()\n\tconfigAttrs[\"type\"] = \"dummy\"\n\t\/\/ Fake the \"state-id\" so the provider thinks it is prepared already.\n\tconfigAttrs[\"state-id\"] = \"42\"\n\t\/\/ We need to specify a valid provider type, so we use dummy.\n\t\/\/ The dummy provider grabs the UUID from the controller config\n\t\/\/ and returns it in the map with the key \"controller-uuid\", similar\n\t\/\/ to what the azure provider will need to do.\n\tmodel := description.NewModel(description.ModelArgs{\n\t\tOwner: names.NewUserTag(\"test-admin\"),\n\t\tConfig: configAttrs,\n\t})\n\n\terr := migration.UpdateConfigFromProvider(model, controllerConfig)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tmodelConfig := model.Config()\n\tc.Assert(modelConfig[\"controller-uuid\"], gc.Equals, controllerConfig.UUID())\n}\n<commit_msg>migration: Add missing UploadBinariesConfig validation tests<commit_after>\/\/ Copyright 2016 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage migration_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/juju\/names\"\n\tjc \"github.com\/juju\/testing\/checkers\"\n\t\"github.com\/juju\/utils\"\n\t\"github.com\/juju\/version\"\n\tgc \"gopkg.in\/check.v1\"\n\t\"gopkg.in\/juju\/charm.v6-unstable\"\n\n\t\"github.com\/juju\/juju\/cmd\/modelcmd\"\n\t\"github.com\/juju\/juju\/core\/description\"\n\t\"github.com\/juju\/juju\/environs\"\n\t\"github.com\/juju\/juju\/jujuclient\/jujuclienttesting\"\n\t\"github.com\/juju\/juju\/migration\"\n\t\"github.com\/juju\/juju\/provider\/dummy\"\n\t_ \"github.com\/juju\/juju\/provider\/dummy\"\n\t\"github.com\/juju\/juju\/state\"\n\tstatetesting \"github.com\/juju\/juju\/state\/testing\"\n\t\"github.com\/juju\/juju\/testing\"\n\t\"github.com\/juju\/juju\/tools\"\n)\n\ntype ImportSuite struct {\n\tstatetesting.StateSuite\n}\n\nvar _ = gc.Suite(&ImportSuite{})\n\nfunc (s *ImportSuite) SetUpTest(c *gc.C) {\n\t\/\/ Specify the config to use for the controller model before calling\n\t\/\/ SetUpTest of the StateSuite, otherwise we get testing.ModelConfig(c).\n\t\/\/ The default provider type specified in the testing.ModelConfig function\n\t\/\/ is one that isn't registered as a valid provider. For our tests here we\n\t\/\/ need a real registered provider, so we use the dummy provider.\n\t\/\/ NOTE: make a better test provider.\n\tenv, err := environs.Prepare(\n\t\tmodelcmd.BootstrapContext(testing.Context(c)),\n\t\tjujuclienttesting.NewMemStore(),\n\t\tenvirons.PrepareParams{\n\t\t\tControllerName: \"dummycontroller\",\n\t\t\tBaseConfig: dummy.SampleConfig(),\n\t\t\tCloudName: \"dummy\",\n\t\t},\n\t)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\ts.InitialConfig = testing.CustomModelConfig(c, env.Config().AllAttrs())\n\ts.StateSuite.SetUpTest(c)\n}\n\nfunc (s *ImportSuite) TestBadBytes(c *gc.C) {\n\tbytes := []byte(\"not a model\")\n\tmodel, st, err := migration.ImportModel(s.State, bytes)\n\tc.Check(st, gc.IsNil)\n\tc.Check(model, gc.IsNil)\n\tc.Assert(err, gc.ErrorMatches, \"yaml: unmarshal errors:\\n.*\")\n}\n\nfunc (s *ImportSuite) TestImportModel(c *gc.C) {\n\tmodel, err := s.State.Export()\n\tc.Check(err, jc.ErrorIsNil)\n\n\tcontrollerConfig, err := s.State.ModelConfig()\n\tc.Check(err, jc.ErrorIsNil)\n\n\t\/\/ Update the config values in the exported model for different values for\n\t\/\/ \"state-port\", \"api-port\", and \"ca-cert\". Also give the model a new UUID\n\t\/\/ and name so we can import it nicely.\n\tmodel.UpdateConfig(map[string]interface{}{\n\t\t\"name\": \"new-model\",\n\t\t\"uuid\": utils.MustNewUUID().String(),\n\t\t\"state-port\": 12345,\n\t\t\"api-port\": 54321,\n\t\t\"ca-cert\": \"not really a cert\",\n\t})\n\n\tbytes, err := description.Serialize(model)\n\tc.Check(err, jc.ErrorIsNil)\n\n\tdbModel, dbState, err := migration.ImportModel(s.State, bytes)\n\tc.Check(err, jc.ErrorIsNil)\n\tdefer dbState.Close()\n\n\tdbConfig, err := dbModel.Config()\n\tc.Assert(err, jc.ErrorIsNil)\n\tattrs := dbConfig.AllAttrs()\n\tc.Assert(attrs[\"state-port\"], gc.Equals, controllerConfig.StatePort())\n\tc.Assert(attrs[\"api-port\"], gc.Equals, controllerConfig.APIPort())\n\tcacert, ok := controllerConfig.CACert()\n\tc.Assert(ok, jc.IsTrue)\n\tc.Assert(attrs[\"ca-cert\"], gc.Equals, cacert)\n\tc.Assert(attrs[\"controller-uuid\"], gc.Equals, controllerConfig.UUID())\n}\n\nfunc (s *ImportSuite) TestUploadBinariesConfigValidate(c *gc.C) {\n\ttype T migration.UploadBinariesConfig \/\/ alias for brevity\n\n\tcheck := func(modify func(*T), missing string) {\n\t\tconfig := T{\n\t\t\tCharmDownloader: struct{ migration.CharmDownloader }{},\n\t\t\tCharmUploader: struct{ migration.CharmUploader }{},\n\t\t\tToolsDownloader: struct{ migration.ToolsDownloader }{},\n\t\t\tToolsUploader: struct{ migration.ToolsUploader }{},\n\t\t}\n\t\tmodify(&config)\n\t\trealConfig := migration.UploadBinariesConfig(config)\n\t\tc.Check(realConfig.Validate(), gc.ErrorMatches, fmt.Sprintf(\"missing %s not valid\", missing))\n\t}\n\n\tcheck(func(c *T) { c.CharmDownloader = nil }, \"CharmDownloader\")\n\tcheck(func(c *T) { c.CharmUploader = nil }, \"CharmUploader\")\n\tcheck(func(c *T) { c.ToolsDownloader = nil }, \"ToolsDownloader\")\n\tcheck(func(c *T) { c.ToolsUploader = nil }, \"ToolsUploader\")\n}\n\nfunc (s *ImportSuite) TestBinariesMigration(c *gc.C) {\n\tdownloader := &fakeDownloader{}\n\tuploader := &fakeUploader{\n\t\tcharms: make(map[string]string),\n\t\ttools: make(map[version.Binary]string),\n\t}\n\n\ttoolsMap := map[version.Binary]string{\n\t\tversion.MustParseBinary(\"2.1.0-trusty-amd64\"): \"\/tools\/0\",\n\t\tversion.MustParseBinary(\"2.0.0-xenial-amd64\"): \"\/tools\/1\",\n\t}\n\tconfig := migration.UploadBinariesConfig{\n\t\tCharms: []string{\"local:trusty\/magic\", \"cs:trusty\/postgresql-42\"},\n\t\tCharmDownloader: downloader,\n\t\tCharmUploader: uploader,\n\t\tTools: toolsMap,\n\t\tToolsDownloader: downloader,\n\t\tToolsUploader: uploader,\n\t}\n\terr := migration.UploadBinaries(config)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tc.Assert(downloader.charms, jc.DeepEquals, []string{\n\t\t\"local:trusty\/magic\",\n\t\t\"cs:trusty\/postgresql-42\",\n\t})\n\tc.Assert(uploader.charms, jc.DeepEquals, map[string]string{\n\t\t\"local:trusty\/magic\": \"local:trusty\/magic content\",\n\t\t\"cs:trusty\/postgresql-42\": \"cs:trusty\/postgresql-42 content\",\n\t})\n\tc.Assert(downloader.uris, jc.SameContents, []string{\n\t\t\"\/tools\/0\",\n\t\t\"\/tools\/1\",\n\t})\n\tc.Assert(uploader.tools, jc.DeepEquals, toolsMap)\n}\n\ntype fakeDownloader struct {\n\tcharms []string\n\turis []string\n}\n\nfunc (d *fakeDownloader) OpenCharm(curl *charm.URL) (io.ReadCloser, error) {\n\turlStr := curl.String()\n\td.charms = append(d.charms, urlStr)\n\t\/\/ Return the charm URL string as the fake charm content\n\treturn ioutil.NopCloser(bytes.NewReader([]byte(urlStr + \" content\"))), nil\n}\n\nfunc (d *fakeDownloader) OpenURI(uri string, query url.Values) (io.ReadCloser, error) {\n\tif query != nil {\n\t\tpanic(\"query should be empty\")\n\t}\n\td.uris = append(d.uris, uri)\n\t\/\/ Return the URI string as fake content\n\treturn ioutil.NopCloser(bytes.NewReader([]byte(uri))), nil\n}\n\ntype fakeUploader struct {\n\ttools map[version.Binary]string\n\tcharms map[string]string\n}\n\nfunc (f *fakeUploader) UploadTools(r io.ReadSeeker, v version.Binary, _ ...string) (tools.List, error) {\n\tdata, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tf.tools[v] = string(data)\n\treturn tools.List{&tools.Tools{Version: v}}, nil\n}\n\nfunc (f *fakeUploader) UploadCharm(u *charm.URL, r io.ReadSeeker) (*charm.URL, error) {\n\tdata, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\tf.charms[u.String()] = string(data)\n\treturn u, nil\n}\n\ntype ExportSuite struct {\n\tstatetesting.StateSuite\n}\n\nvar _ = gc.Suite(&ExportSuite{})\n\nfunc (s *ExportSuite) TestExportModel(c *gc.C) {\n\tbytes, err := migration.ExportModel(s.State)\n\tc.Assert(err, jc.ErrorIsNil)\n\t\/\/ The bytes must be a valid model.\n\t_, err = description.Deserialize(bytes)\n\tc.Assert(err, jc.ErrorIsNil)\n}\n\ntype PrecheckSuite struct {\n\ttesting.BaseSuite\n}\n\nvar _ = gc.Suite(&PrecheckSuite{})\n\n\/\/ Assert that *state.State implements the PrecheckBackend\nvar _ migration.PrecheckBackend = (*state.State)(nil)\n\nfunc (*PrecheckSuite) TestPrecheckCleanups(c *gc.C) {\n\tbackend := &fakePrecheckBackend{}\n\terr := migration.Precheck(backend)\n\tc.Assert(err, jc.ErrorIsNil)\n}\n\nfunc (*PrecheckSuite) TestPrecheckCleanupsError(c *gc.C) {\n\tbackend := &fakePrecheckBackend{\n\t\tcleanupError: errors.New(\"boom\"),\n\t}\n\terr := migration.Precheck(backend)\n\tc.Assert(err, gc.ErrorMatches, \"precheck cleanups: boom\")\n}\n\nfunc (*PrecheckSuite) TestPrecheckCleanupsNeeded(c *gc.C) {\n\tbackend := &fakePrecheckBackend{\n\t\tcleanupNeeded: true,\n\t}\n\terr := migration.Precheck(backend)\n\tc.Assert(err, gc.ErrorMatches, \"precheck failed: cleanup needed\")\n}\n\ntype fakePrecheckBackend struct {\n\tcleanupNeeded bool\n\tcleanupError error\n}\n\nfunc (f *fakePrecheckBackend) NeedsCleanup() (bool, error) {\n\treturn f.cleanupNeeded, f.cleanupError\n}\n\ntype InternalSuite struct {\n\ttesting.BaseSuite\n}\n\nvar _ = gc.Suite(&InternalSuite{})\n\nfunc (s *InternalSuite) TestControllerValues(c *gc.C) {\n\tconfig := testing.ModelConfig(c)\n\tfields := migration.ControllerValues(config)\n\tc.Assert(fields, jc.DeepEquals, map[string]interface{}{\n\t\t\"controller-uuid\": \"deadbeef-0bad-400d-8000-4b1d0d06f00d\",\n\t\t\"state-port\": 19034,\n\t\t\"api-port\": 17777,\n\t\t\"ca-cert\": testing.CACert,\n\t})\n}\n\nfunc (s *InternalSuite) TestUpdateConfigFromProvider(c *gc.C) {\n\tcontrollerConfig := testing.ModelConfig(c)\n\tconfigAttrs := testing.FakeConfig()\n\tconfigAttrs[\"type\"] = \"dummy\"\n\t\/\/ Fake the \"state-id\" so the provider thinks it is prepared already.\n\tconfigAttrs[\"state-id\"] = \"42\"\n\t\/\/ We need to specify a valid provider type, so we use dummy.\n\t\/\/ The dummy provider grabs the UUID from the controller config\n\t\/\/ and returns it in the map with the key \"controller-uuid\", similar\n\t\/\/ to what the azure provider will need to do.\n\tmodel := description.NewModel(description.ModelArgs{\n\t\tOwner: names.NewUserTag(\"test-admin\"),\n\t\tConfig: configAttrs,\n\t})\n\n\terr := migration.UpdateConfigFromProvider(model, controllerConfig)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tmodelConfig := model.Config()\n\tc.Assert(modelConfig[\"controller-uuid\"], gc.Equals, controllerConfig.UUID())\n}\n<|endoftext|>"} {"text":"<commit_before>package libvirt\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\/\/\"gopkg.in\/alexzorin\/libvirt-go.v2\"\n\tlibvirt \"github.com\/dmacvicar\/libvirt-go\"\n)\n\nfunc TestAccLibvirtDomain_Basic(t *testing.T) {\n\tvar domain libvirt.VirDomain\n\tvar config = fmt.Sprintf(`\n resource \"libvirt_domain\" \"acceptance-test-domain-1\" {\n name = \"terraform-test\"\n }`)\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckLibvirtDomainDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: config,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckLibvirtDomainExists(\"libvirt_domain.acceptance-test-domain-1\", &domain),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"libvirt_domain.acceptance-test-domain-1\", \"name\", \"terraform-test\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"libvirt_domain.acceptance-test-domain-1\", \"memory\", \"512\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"libvirt_domain.acceptance-test-domain-1\", \"vcpu\", \"1\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccLibvirtDomain_Detailed(t *testing.T) {\n\tvar domain libvirt.VirDomain\n\tvar config = fmt.Sprintf(`\n resource \"libvirt_domain\" \"acceptance-test-domain-2\" {\n name = \"terraform-test\"\n memory = 384\n vcpu = 2\n }`)\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckLibvirtDomainDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: config,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckLibvirtDomainExists(\"libvirt_domain.acceptance-test-domain-2\", &domain),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"libvirt_domain.acceptance-test-domain-2\", \"name\", \"terraform-test\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"libvirt_domain.acceptance-test-domain-2\", \"memory\", \"384\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"libvirt_domain.acceptance-test-domain-2\", \"vcpu\", \"2\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccLibvirtDomain_Volume(t *testing.T) {\n\tvar domain libvirt.VirDomain\n\tvar volume libvirt.VirStorageVol\n\n\tvar configVolAttached = fmt.Sprintf(`\n resource \"libvirt_volume\" \"acceptance-test-volume\" {\n name = \"terraform-test\"\n }\n\n resource \"libvirt_domain\" \"acceptance-test-domain\" {\n name = \"terraform-test\"\n disk {\n volume_id = \"${libvirt_volume.acceptance-test-volume.id}\"\n }\n }`)\n\n\tvar configVolDettached = fmt.Sprintf(`\n resource \"libvirt_domain\" \"acceptance-test-domain\" {\n name = \"terraform-test\"\n }`)\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckLibvirtDomainDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: configVolAttached,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckLibvirtDomainExists(\"libvirt_domain.acceptance-test-domain\", &domain),\n\t\t\t\t\ttestAccCheckLibvirtVolumeExists(\"libvirt_volume.acceptance-test-volume\", &volume),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: configVolDettached,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckLibvirtDomainExists(\"libvirt_domain.acceptance-test-domain\", &domain),\n\t\t\t\t\ttestAccCheckLibvirtVolumeDoesNotExists(\"libvirt_volume.acceptance-test-volume\", &volume),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccLibvirtDomain_VolumeTwoDisks(t *testing.T) {\n\tvar domain libvirt.VirDomain\n\tvar volume libvirt.VirStorageVol\n\n\tvar configVolAttached = fmt.Sprintf(`\n resource \"libvirt_volume\" \"acceptance-test-volume1\" {\n name = \"terraform-test-vol1\"\n }\n\n resource \"libvirt_volume\" \"acceptance-test-volume2\" {\n name = \"terraform-test-vol2\"\n }\n\n resource \"libvirt_domain\" \"acceptance-test-domain\" {\n name = \"terraform-test-domain\"\n disk {\n volume_id = \"${libvirt_volume.acceptance-test-volume1.id}\"\n }\n\n disk {\n volume_id = \"${libvirt_volume.acceptance-test-volume2.id}\"\n }\n }`)\n\n\tvar configVolDettached = fmt.Sprintf(`\n resource \"libvirt_domain\" \"acceptance-test-domain\" {\n name = \"terraform-test-domain\"\n }`)\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckLibvirtDomainDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: configVolAttached,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckLibvirtDomainExists(\"libvirt_domain.acceptance-test-domain\", &domain),\n\t\t\t\t\ttestAccCheckLibvirtVolumeExists(\"libvirt_volume.acceptance-test-volume1\", &volume),\n\t\t\t\t\ttestAccCheckLibvirtVolumeExists(\"libvirt_volume.acceptance-test-volume2\", &volume),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: configVolDettached,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckLibvirtDomainExists(\"libvirt_domain.acceptance-test-domain\", &domain),\n\t\t\t\t\ttestAccCheckLibvirtVolumeDoesNotExists(\"libvirt_volume.acceptance-test-volume1\", &volume),\n\t\t\t\t\ttestAccCheckLibvirtVolumeDoesNotExists(\"libvirt_volume.acceptance-test-volume2\", &volume),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccLibvirtDomain_NetworkInterface(t *testing.T) {\n\tvar domain libvirt.VirDomain\n\n\tvar config = fmt.Sprintf(`\n resource \"libvirt_volume\" \"acceptance-test-volume\" {\n name = \"terraform-test\"\n }\n\n resource \"libvirt_domain\" \"acceptance-test-domain\" {\n name = \"terraform-test\"\n network_interface = {\n network_name = \"default\"\n }\n network_interface = {\n mac = \"52:54:00:a9:f5:17\"\n }\n disk {\n volume_id = \"${libvirt_volume.acceptance-test-volume.id}\"\n }\n }`)\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckLibvirtDomainDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: config,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckLibvirtDomainExists(\"libvirt_domain.acceptance-test-domain\", &domain),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"libvirt_domain.acceptance-test-domain\", \"network_interface.0.network_name\", \"default\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"libvirt_domain.acceptance-test-domain\", \"network_interface.1.mac\", \"52:54:00:a9:f5:17\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc testAccCheckLibvirtDomainDestroy(s *terraform.State) error {\n\tvirtConn := testAccProvider.Meta().(*Client).libvirt\n\n\tfor _, rs := range s.RootModule().Resources {\n\t\tif rs.Type != \"libvirt_domain\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Try to find the server\n\t\t_, err := virtConn.LookupByUUIDString(rs.Primary.ID)\n\t\tif err == nil {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"Error waiting for domain (%s) to be destroyed: %s\",\n\t\t\t\trs.Primary.ID, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc testAccCheckLibvirtDomainExists(n string, domain *libvirt.VirDomain) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No libvirt domain ID is set\")\n\t\t}\n\n\t\tvirConn := testAccProvider.Meta().(*Client).libvirt\n\n\t\tretrieveDomain, err := virConn.LookupByUUIDString(rs.Primary.ID)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlog.Printf(\"The ID is %s\", rs.Primary.ID)\n\n\t\trealId, err := retrieveDomain.GetUUIDString()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif realId != rs.Primary.ID {\n\t\t\treturn fmt.Errorf(\"Libvirt domain not found\")\n\t\t}\n\n\t\t*domain = retrieveDomain\n\n\t\treturn nil\n\t}\n}\n<commit_msg>Domain acceptance tests: bridge is mandatory with mac, add it<commit_after>package libvirt\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\/\/\"gopkg.in\/alexzorin\/libvirt-go.v2\"\n\tlibvirt \"github.com\/dmacvicar\/libvirt-go\"\n)\n\nfunc TestAccLibvirtDomain_Basic(t *testing.T) {\n\tvar domain libvirt.VirDomain\n\tvar config = fmt.Sprintf(`\n resource \"libvirt_domain\" \"acceptance-test-domain-1\" {\n name = \"terraform-test\"\n }`)\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckLibvirtDomainDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: config,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckLibvirtDomainExists(\"libvirt_domain.acceptance-test-domain-1\", &domain),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"libvirt_domain.acceptance-test-domain-1\", \"name\", \"terraform-test\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"libvirt_domain.acceptance-test-domain-1\", \"memory\", \"512\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"libvirt_domain.acceptance-test-domain-1\", \"vcpu\", \"1\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccLibvirtDomain_Detailed(t *testing.T) {\n\tvar domain libvirt.VirDomain\n\tvar config = fmt.Sprintf(`\n resource \"libvirt_domain\" \"acceptance-test-domain-2\" {\n name = \"terraform-test\"\n memory = 384\n vcpu = 2\n }`)\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckLibvirtDomainDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: config,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckLibvirtDomainExists(\"libvirt_domain.acceptance-test-domain-2\", &domain),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"libvirt_domain.acceptance-test-domain-2\", \"name\", \"terraform-test\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"libvirt_domain.acceptance-test-domain-2\", \"memory\", \"384\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"libvirt_domain.acceptance-test-domain-2\", \"vcpu\", \"2\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccLibvirtDomain_Volume(t *testing.T) {\n\tvar domain libvirt.VirDomain\n\tvar volume libvirt.VirStorageVol\n\n\tvar configVolAttached = fmt.Sprintf(`\n resource \"libvirt_volume\" \"acceptance-test-volume\" {\n name = \"terraform-test\"\n }\n\n resource \"libvirt_domain\" \"acceptance-test-domain\" {\n name = \"terraform-test\"\n disk {\n volume_id = \"${libvirt_volume.acceptance-test-volume.id}\"\n }\n }`)\n\n\tvar configVolDettached = fmt.Sprintf(`\n resource \"libvirt_domain\" \"acceptance-test-domain\" {\n name = \"terraform-test\"\n }`)\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckLibvirtDomainDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: configVolAttached,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckLibvirtDomainExists(\"libvirt_domain.acceptance-test-domain\", &domain),\n\t\t\t\t\ttestAccCheckLibvirtVolumeExists(\"libvirt_volume.acceptance-test-volume\", &volume),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: configVolDettached,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckLibvirtDomainExists(\"libvirt_domain.acceptance-test-domain\", &domain),\n\t\t\t\t\ttestAccCheckLibvirtVolumeDoesNotExists(\"libvirt_volume.acceptance-test-volume\", &volume),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccLibvirtDomain_VolumeTwoDisks(t *testing.T) {\n\tvar domain libvirt.VirDomain\n\tvar volume libvirt.VirStorageVol\n\n\tvar configVolAttached = fmt.Sprintf(`\n resource \"libvirt_volume\" \"acceptance-test-volume1\" {\n name = \"terraform-test-vol1\"\n }\n\n resource \"libvirt_volume\" \"acceptance-test-volume2\" {\n name = \"terraform-test-vol2\"\n }\n\n resource \"libvirt_domain\" \"acceptance-test-domain\" {\n name = \"terraform-test-domain\"\n disk {\n volume_id = \"${libvirt_volume.acceptance-test-volume1.id}\"\n }\n\n disk {\n volume_id = \"${libvirt_volume.acceptance-test-volume2.id}\"\n }\n }`)\n\n\tvar configVolDettached = fmt.Sprintf(`\n resource \"libvirt_domain\" \"acceptance-test-domain\" {\n name = \"terraform-test-domain\"\n }`)\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckLibvirtDomainDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: configVolAttached,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckLibvirtDomainExists(\"libvirt_domain.acceptance-test-domain\", &domain),\n\t\t\t\t\ttestAccCheckLibvirtVolumeExists(\"libvirt_volume.acceptance-test-volume1\", &volume),\n\t\t\t\t\ttestAccCheckLibvirtVolumeExists(\"libvirt_volume.acceptance-test-volume2\", &volume),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: configVolDettached,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckLibvirtDomainExists(\"libvirt_domain.acceptance-test-domain\", &domain),\n\t\t\t\t\ttestAccCheckLibvirtVolumeDoesNotExists(\"libvirt_volume.acceptance-test-volume1\", &volume),\n\t\t\t\t\ttestAccCheckLibvirtVolumeDoesNotExists(\"libvirt_volume.acceptance-test-volume2\", &volume),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccLibvirtDomain_NetworkInterface(t *testing.T) {\n\tvar domain libvirt.VirDomain\n\n\tvar config = fmt.Sprintf(`\n resource \"libvirt_volume\" \"acceptance-test-volume\" {\n name = \"terraform-test\"\n }\n\n resource \"libvirt_domain\" \"acceptance-test-domain\" {\n name = \"terraform-test\"\n network_interface = {\n network_name = \"default\"\n }\n network_interface = {\n bridge = \"br0\"\n mac = \"52:54:00:a9:f5:17\"\n }\n disk {\n volume_id = \"${libvirt_volume.acceptance-test-volume.id}\"\n }\n }`)\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckLibvirtDomainDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: config,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckLibvirtDomainExists(\"libvirt_domain.acceptance-test-domain\", &domain),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"libvirt_domain.acceptance-test-domain\", \"network_interface.0.network_name\", \"default\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"libvirt_domain.acceptance-test-domain\", \"network_interface.1.mac\", \"52:54:00:a9:f5:17\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc testAccCheckLibvirtDomainDestroy(s *terraform.State) error {\n\tvirtConn := testAccProvider.Meta().(*Client).libvirt\n\n\tfor _, rs := range s.RootModule().Resources {\n\t\tif rs.Type != \"libvirt_domain\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Try to find the server\n\t\t_, err := virtConn.LookupByUUIDString(rs.Primary.ID)\n\t\tif err == nil {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"Error waiting for domain (%s) to be destroyed: %s\",\n\t\t\t\trs.Primary.ID, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc testAccCheckLibvirtDomainExists(n string, domain *libvirt.VirDomain) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No libvirt domain ID is set\")\n\t\t}\n\n\t\tvirConn := testAccProvider.Meta().(*Client).libvirt\n\n\t\tretrieveDomain, err := virConn.LookupByUUIDString(rs.Primary.ID)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlog.Printf(\"The ID is %s\", rs.Primary.ID)\n\n\t\trealId, err := retrieveDomain.GetUUIDString()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif realId != rs.Primary.ID {\n\t\t\treturn fmt.Errorf(\"Libvirt domain not found\")\n\t\t}\n\n\t\t*domain = retrieveDomain\n\n\t\treturn nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package alertsv2\n\nimport \"net\/url\"\n\ntype ExecuteCustomActionRequest struct {\n\t*Identifier\n\tActionName string `json:\"-\"`\n\tUser string `json:\"user,omitempty\"`\n\tSource string `json:\"source,omitempty\"`\n\tNote string `json:\"note,omitempty\"`\n\tApiKey string `json:\"-\"`\n}\n\nfunc (r *ExecuteCustomActionRequest) GenerateUrl() (string, url.Values, error) {\n\tpath, params, err := r.Identifier.GenerateUrl()\n\treturn path + \"\/actions\/\" + r.ActionName, params, err;\n}\n\nfunc (r *ExecuteCustomActionRequest) GetApiKey() string {\n\treturn r.ApiKey\n}\n<commit_msg>Add validation for execute custom action<commit_after>package alertsv2\n\nimport (\n\t\"net\/url\"\n\t\"errors\"\n)\n\ntype ExecuteCustomActionRequest struct {\n\t*Identifier\n\tActionName string `json:\"-\"`\n\tUser string `json:\"user,omitempty\"`\n\tSource string `json:\"source,omitempty\"`\n\tNote string `json:\"note,omitempty\"`\n\tApiKey string `json:\"-\"`\n}\n\nfunc (r *ExecuteCustomActionRequest) GenerateUrl() (string, url.Values, error) {\n\tpath, params, err := r.Identifier.GenerateUrl()\n\tif r.ActionName == \"\" {\n\t\treturn \"\", nil, errors.New(\"ActionName should be provided\")\n\t}\n\treturn path + \"\/actions\/\" + r.ActionName, params, err;\n}\n\nfunc (r *ExecuteCustomActionRequest) GetApiKey() string {\n\treturn r.ApiKey\n}\n<|endoftext|>"} {"text":"<commit_before>package apns\n\nimport (\n\t\"crypto\/tls\"\n\t\"github.com\/sideshow\/apns2\"\n\t\"github.com\/sideshow\/apns2\/certificate\"\n\t\"golang.org\/x\/net\/http2\"\n\t\"net\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\t\/\/see https:\/\/github.com\/sideshow\/apns2\/issues\/24 and https:\/\/github.com\/sideshow\/apns2\/issues\/20\n\ttlsDialTimeout = 20 * time.Second\n\thttpClientTimeout = 30 * time.Second\n)\n\ntype Pusher interface {\n\tPush(*apns2.Notification) (*apns2.Response, error)\n}\n\ntype closable interface {\n\tCloseTLS()\n}\n\nfunc newPusher(c Config) (Pusher, error) {\n\tvar (\n\t\tcert tls.Certificate\n\t\terrCert error\n\t)\n\tif c.CertificateFileName != nil && *c.CertificateFileName != \"\" {\n\t\tcert, errCert = certificate.FromP12File(*c.CertificateFileName, *c.CertificatePassword)\n\t} else {\n\t\tcert, errCert = certificate.FromP12Bytes(*c.CertificateBytes, *c.CertificatePassword)\n\t}\n\tif errCert != nil {\n\t\treturn nil, errCert\n\t}\n\n\tvar clientFactory func(certificate tls.Certificate) *apns2Client\n\tif *c.Production {\n\t\tclientFactory = newProductionClient\n\t} else {\n\t\tclientFactory = newDevelopmentClient\n\t}\n\n\tapns2.TLSDialTimeout = tlsDialTimeout\n\tapns2.HTTPClientTimeout = httpClientTimeout\n\n\treturn clientFactory(cert), nil\n}\n\nfunc newProductionClient(certificate tls.Certificate) *apns2Client {\n\tlogger.Info(\"APNS Pusher in Production mode\")\n\tc := newApns2Client(certificate)\n\tc.Production()\n\treturn c\n}\n\nfunc newDevelopmentClient(certificate tls.Certificate) *apns2Client {\n\tlogger.Info(\"APNS Pusher in Development mode\")\n\tc := newApns2Client(certificate)\n\tc.Development()\n\treturn c\n}\n\ntype apns2Client struct {\n\t*apns2.Client\n\n\ttlsConn net.Conn\n\tmu sync.Mutex\n}\n\nfunc newApns2Client(certificate tls.Certificate) *apns2Client {\n\tc := &apns2Client{}\n\n\ttlsConfig := &tls.Config{\n\t\tCertificates: []tls.Certificate{certificate},\n\t}\n\tif len(certificate.Certificate) > 0 {\n\t\ttlsConfig.BuildNameToCertificate()\n\t}\n\ttransport := &http2.Transport{\n\t\tTLSClientConfig: tlsConfig,\n\t\tDialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {\n\t\t\tconn, err := tls.DialWithDialer(&net.Dialer{Timeout: tlsDialTimeout, KeepAlive: 2 * time.Second}, network, addr, cfg)\n\n\t\t\tc.mu.Lock()\n\t\t\tdefer c.mu.Unlock()\n\t\t\tif err == nil {\n\t\t\t\tc.tlsConn = conn\n\t\t\t} else {\n\t\t\t\tc.tlsConn = nil\n\t\t\t}\n\t\t\treturn conn, err\n\t\t},\n\t}\n\tclient := &apns2.Client{\n\t\tHTTPClient: &http.Client{\n\t\t\tTransport: transport,\n\t\t\tTimeout: httpClientTimeout,\n\t\t},\n\t\tCertificate: certificate,\n\t\tHost: apns2.DefaultHost,\n\t}\n\tc.Client = client\n\treturn c\n}\n\n\/\/ interface closable used used by apns_sender\nfunc (c *apns2Client) CloseTLS() {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tif c.tlsConn != nil {\n\t\tc.tlsConn.Close()\n\t\tc.tlsConn = nil\n\t}\n}\n<commit_msg>more logs in apns pusher<commit_after>package apns\n\nimport (\n\t\"crypto\/tls\"\n\t\"github.com\/sideshow\/apns2\"\n\t\"github.com\/sideshow\/apns2\/certificate\"\n\t\"golang.org\/x\/net\/http2\"\n\t\"net\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\t\/\/see https:\/\/github.com\/sideshow\/apns2\/issues\/24 and https:\/\/github.com\/sideshow\/apns2\/issues\/20\n\ttlsDialTimeout = 20 * time.Second\n\thttpClientTimeout = 30 * time.Second\n)\n\ntype Pusher interface {\n\tPush(*apns2.Notification) (*apns2.Response, error)\n}\n\ntype closable interface {\n\tCloseTLS()\n}\n\nfunc newPusher(c Config) (Pusher, error) {\n\tlogger.Info(\"creating new apns pusher\")\n\n\tvar (\n\t\tcert tls.Certificate\n\t\terrCert error\n\t)\n\tif c.CertificateFileName != nil && *c.CertificateFileName != \"\" {\n\t\tcert, errCert = certificate.FromP12File(*c.CertificateFileName, *c.CertificatePassword)\n\t} else {\n\t\tcert, errCert = certificate.FromP12Bytes(*c.CertificateBytes, *c.CertificatePassword)\n\t}\n\tif errCert != nil {\n\t\treturn nil, errCert\n\t}\n\n\tvar clientFactory func(certificate tls.Certificate) *apns2Client\n\tif *c.Production {\n\t\tclientFactory = newProductionClient\n\t} else {\n\t\tclientFactory = newDevelopmentClient\n\t}\n\n\tapns2.TLSDialTimeout = tlsDialTimeout\n\tapns2.HTTPClientTimeout = httpClientTimeout\n\n\tlogger.Info(\"created new apns pusher\")\n\n\treturn clientFactory(cert), nil\n}\n\nfunc newProductionClient(certificate tls.Certificate) *apns2Client {\n\tlogger.Info(\"APNS Pusher in Production mode\")\n\tc := newApns2Client(certificate)\n\tc.Production()\n\treturn c\n}\n\nfunc newDevelopmentClient(certificate tls.Certificate) *apns2Client {\n\tlogger.Info(\"APNS Pusher in Development mode\")\n\tc := newApns2Client(certificate)\n\tc.Development()\n\treturn c\n}\n\ntype apns2Client struct {\n\t*apns2.Client\n\n\ttlsConn net.Conn\n\tmu sync.Mutex\n}\n\nfunc newApns2Client(certificate tls.Certificate) *apns2Client {\n\tlogger.Info(\"creating new apns2client\")\n\n\tc := &apns2Client{}\n\n\ttlsConfig := &tls.Config{\n\t\tCertificates: []tls.Certificate{certificate},\n\t}\n\tif len(certificate.Certificate) > 0 {\n\t\ttlsConfig.BuildNameToCertificate()\n\t}\n\ttransport := &http2.Transport{\n\t\tTLSClientConfig: tlsConfig,\n\t\tDialTLS: func(network, addr string, cfg *tls.Config) (net.Conn, error) {\n\t\t\tconn, err := tls.DialWithDialer(&net.Dialer{Timeout: tlsDialTimeout, KeepAlive: 2 * time.Second}, network, addr, cfg)\n\n\t\t\tc.mu.Lock()\n\t\t\tdefer c.mu.Unlock()\n\t\t\tif err == nil {\n\t\t\t\tc.tlsConn = conn\n\t\t\t} else {\n\t\t\t\tc.tlsConn = nil\n\t\t\t}\n\t\t\treturn conn, err\n\t\t},\n\t}\n\tclient := &apns2.Client{\n\t\tHTTPClient: &http.Client{\n\t\t\tTransport: transport,\n\t\t\tTimeout: httpClientTimeout,\n\t\t},\n\t\tCertificate: certificate,\n\t\tHost: apns2.DefaultHost,\n\t}\n\tc.Client = client\n\tlogger.Info(\"created new apns2client\")\n\treturn c\n}\n\n\/\/ interface closable used used by apns_sender\nfunc (c *apns2Client) CloseTLS() {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tif c.tlsConn != nil {\n\t\tlogger.Info(\"Trying to close TLS connection\")\n\t\tc.tlsConn.Close()\n\t\tlogger.Info(\"Closed TLS connection\")\n\t\tc.tlsConn = nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/go-martini\/martini\"\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/martini-contrib\/sessions\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst CLEAR string = \"\\033[H\\033[2J\"\nconst RESET string = \"\\0033\\0143\"\n\nconst MAX_CONSOLE int = 10000\n\nconst (\n\tWSTerm = 1\n\tWSClick = 2\n)\n\ntype Config struct {\n\tSecret string\n\tAddress string\n}\n\ntype LockingWebsockets struct {\n\tsync.RWMutex\n\tbyId map[int64]*websocket.Conn\n\tconsoleToId map[int64][]int64\n\tcurrentId int64\n}\n\nvar consoleBuffers map[int64]string\n\nvar websockets *LockingWebsockets\n\nfunc (c *LockingWebsockets) deleteWebsocket(id int64) {\n\tc.Lock()\n\tdefer c.Unlock()\n\tdelete(c.byId, id)\n}\n\nfunc (c *LockingWebsockets) addWebsocket(ws *websocket.Conn) int64 {\n\tc.Lock()\n\tdefer c.Unlock()\n\tc.currentId += 1\n\tretid := c.currentId\n\tc.byId[retid] = ws\n\treturn retid\n}\n\nfunc main() {\n\tconsoleBuffers = make(map[int64]string)\n\twebsockets = &LockingWebsockets{\n\t\tbyId: make(map[int64]*websocket.Conn),\n\t\tconsoleToId: make(map[int64][]int64),\n\t\tcurrentId: 0,\n\t}\n\tconsoleReadChannel = make(chan ConsoleChunk)\n\tgo consoleDispatch()\n\tvzcontrol := ConnectVZControl()\n\tdefer vzcontrol.Close()\n\n\tfile, _ := os.Open(\"game_server.cfg\")\n\tdecoder := json.NewDecoder(file)\n\tconfig := Config{}\n\tdecoder.Decode(&config)\n\n\tm := martini.Classic()\n\tstore := sessions.NewCookieStore([]byte(config.Secret))\n\tm.Use(sessions.Sessions(\"session\", store))\n\n\tgenerating := false\n\tgr := NewGraph()\n\tm.Get(\"\/reset\/:secret\", func(w http.ResponseWriter, r *http.Request, params martini.Params, session sessions.Session) string {\n\t\tif params[\"secret\"] != config.Secret {\n\t\t\treturn \"\"\n\t\t}\n\t\terr := vzcontrol.Reset()\n\t\tif err != nil {\n\t\t\treturn err.Error()\n\t\t}\n\t\tgenerating = false\n\t\tgr = NewGraph()\n\t\treturn \"Done\"\n\t})\n\n\tm.Get(\"\/gen\", func(w http.ResponseWriter, r *http.Request, session sessions.Session) string {\n\t\tif generating {\n\t\t\treturn \"Already generating\"\n\t\t}\n\t\tgenerating = true\n\t\tmaxNodes := 5\n\t\tmaxEdges := 5\n\t\tstartNodeId := 100\n\n\t\tstartNode := Node{Id: NodeId(startNodeId)}\n\t\tgr.AddNode(startNode)\n\t\terr := vzcontrol.ContainerCreate(int64(startNode.Id))\n\t\terr = vzcontrol.ConsoleStart(int64(startNode.Id))\n\n\t\tnodes := make([]Node, 0)\n\t\tnodes = append(nodes, startNode)\n\n\t\tsteps := 1\n\t\tfor len(nodes) != 0 && steps < maxNodes {\n\t\t\tnode, nodes := nodes[len(nodes)-1], nodes[:len(nodes)-1]\n\n\t\t\tnumEdges := random(1, maxEdges)\n\t\t\tfor i := 1; i <= numEdges; i++ {\n\t\t\t\ttargetNode := Node{Id: NodeId(i*steps + startNodeId)}\n\t\t\t\tif gr.AddNode(targetNode) {\n\t\t\t\t\terr = vzcontrol.ContainerCreate(int64(targetNode.Id))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Sprintf(\"Container Create: %d, %d, %d\\n%s\", targetNode.Id, i*steps, numEdges, err.Error())\n\t\t\t\t\t}\n\t\t\t\t\terr = vzcontrol.ConsoleStart(int64(targetNode.Id))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Sprintf(\"Console Start: %d\\n%s\", targetNode.Id, err.Error())\n\t\t\t\t\t}\n\t\t\t\t\tnodes = append(nodes, targetNode)\n\t\t\t\t\tedgeid := int64(i * steps)\n\t\t\t\t\tif gr.AddEdge(Edge{Id: EdgeId(edgeid), Head: node.Id, Tail: targetNode.Id}) {\n\t\t\t\t\t\terr = vzcontrol.NetworkCreate(edgeid)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Sprintf(\"Network Create: %d\\n%s\", edgeid, err.Error())\n\t\t\t\t\t\t}\n\t\t\t\t\t\terr = vzcontrol.NetworkAdd(int64(node.Id), edgeid)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Sprintf(\"Network Add Node: %d, %d\\n%s\", node.Id, edgeid, err.Error())\n\t\t\t\t\t\t}\n\t\t\t\t\t\terr = vzcontrol.NetworkAdd(int64(targetNode.Id), edgeid)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Sprintf(\"Network Add Target: %d, %d\\n%s\", targetNode.Id, edgeid, err.Error())\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\t\t\tsteps += 1\n\t\t}\n\t\treturn gr.String()\n\t})\n\n\tm.Get(\"\/graph\", func(w http.ResponseWriter, r *http.Request, session sessions.Session) string {\n\t\toutput, err := json.Marshal(gr)\n\t\tif err != nil {\n\t\t\treturn err.Error()\n\t\t}\n\t\treturn string(output)\n\t})\n\n\tm.Get(\"\/ws\", func(w http.ResponseWriter, r *http.Request, session sessions.Session) {\n\t\tvar currentVm int64 = -1\n\t\tws, err := websocket.Upgrade(w, r, nil, 1024, 1024)\n\t\tif _, ok := err.(websocket.HandshakeError); ok {\n\t\t\thttp.Error(w, \"Not a websocket handshake\", 400)\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else if err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\t\tdefer ws.Close()\n\t\twebsocketId := websockets.addWebsocket(ws)\n\t\tdefer websockets.deleteWebsocket(websocketId)\n\t\tws.WriteMessage(websocket.TextMessage, []byte(\"Welcome to ginux!\\r\\n\"))\n\t\tfor {\n\t\t\t_, message, err := ws.ReadMessage()\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tmsgType := message[0]\n\t\t\t\tmsgData := message[1:len(message)]\n\t\t\t\tswitch msgType {\n\t\t\t\tcase WSTerm:\n\t\t\t\t\tif currentVm != -1 {\n\t\t\t\t\t\tvzcontrol.ConsoleWrite(currentVm, msgData)\n\t\t\t\t\t}\n\t\t\t\tcase WSClick:\n\t\t\t\t\tprevVm := currentVm\n\t\t\t\t\ttmp, _ := strconv.Atoi(string(msgData))\n\t\t\t\t\tcurrentVm = int64(tmp)\n\t\t\t\t\twebsockets.Lock()\n\t\t\t\t\tif prevVm != -1 {\n\t\t\t\t\t\tfor index, wsId := range websockets.consoleToId[prevVm] {\n\t\t\t\t\t\t\tif wsId == websocketId {\n\t\t\t\t\t\t\t\twebsockets.consoleToId[prevVm] = append(websockets.consoleToId[prevVm][:index], websockets.consoleToId[prevVm][index+1:]...)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\twebsockets.consoleToId[currentVm] = append(websockets.consoleToId[currentVm], websocketId)\n\t\t\t\t\twebsockets.Unlock()\n\t\t\t\t\tws.WriteMessage(websocket.TextMessage, []byte(RESET))\n\t\t\t\t\t\/\/ws.WriteMessage(websocket.TextMessage, []byte(fmt.Sprintf(\"Selected Container %d\\r\\n\", currentVm)))\n\t\t\t\t\tws.WriteMessage(websocket.TextMessage, []byte(consoleBuffers[currentVm]))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\tlog.Println(\"Game Server started on\", config.Address)\n\tlog.Fatal(http.ListenAndServe(config.Address, m))\n}\n\nfunc random(min, max int) int {\n\trand.Seed(time.Now().Unix())\n\treturn rand.Intn(max-min) + min\n}\n\nfunc consoleDispatch() {\n\tfor chunk := range consoleReadChannel {\n\t\tif len(consoleBuffers[chunk.Id]) > MAX_CONSOLE { \n\t\t\tconsoleBuffers[chunk.Id] = consoleBuffers[chunk.Id][len(string(chunk.Data)):] + string(chunk.Data)\n\t\t} else {\n\t\t\tconsoleBuffers[chunk.Id] += string(chunk.Data)\n\t\t}\n\t\twebsockets.RLock()\n\t\tfor _, wsId := range websockets.consoleToId[chunk.Id] {\n\t\t\tif socket, ok := websockets.byId[wsId]; ok {\n\t\t\t\tsocket.WriteMessage(websocket.TextMessage, chunk.Data)\n\t\t\t}\n\t\t}\n\t\twebsockets.RUnlock()\n\t}\n}\n<commit_msg>Changed reset command<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/go-martini\/martini\"\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/martini-contrib\/sessions\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst CLEAR string = \"\\033[H\\033[2J\"\nconst RESET string = \"\\033[!p\\033[?3;4l\\033[4l\\033>\"\n\nconst MAX_CONSOLE int = 10000\n\nconst (\n\tWSTerm = 1\n\tWSClick = 2\n)\n\ntype Config struct {\n\tSecret string\n\tAddress string\n}\n\ntype LockingWebsockets struct {\n\tsync.RWMutex\n\tbyId map[int64]*websocket.Conn\n\tconsoleToId map[int64][]int64\n\tcurrentId int64\n}\n\nvar consoleBuffers map[int64]string\n\nvar websockets *LockingWebsockets\n\nfunc (c *LockingWebsockets) deleteWebsocket(id int64) {\n\tc.Lock()\n\tdefer c.Unlock()\n\tdelete(c.byId, id)\n}\n\nfunc (c *LockingWebsockets) addWebsocket(ws *websocket.Conn) int64 {\n\tc.Lock()\n\tdefer c.Unlock()\n\tc.currentId += 1\n\tretid := c.currentId\n\tc.byId[retid] = ws\n\treturn retid\n}\n\nfunc main() {\n\tconsoleBuffers = make(map[int64]string)\n\twebsockets = &LockingWebsockets{\n\t\tbyId: make(map[int64]*websocket.Conn),\n\t\tconsoleToId: make(map[int64][]int64),\n\t\tcurrentId: 0,\n\t}\n\tconsoleReadChannel = make(chan ConsoleChunk)\n\tgo consoleDispatch()\n\tvzcontrol := ConnectVZControl()\n\tdefer vzcontrol.Close()\n\n\tfile, _ := os.Open(\"game_server.cfg\")\n\tdecoder := json.NewDecoder(file)\n\tconfig := Config{}\n\tdecoder.Decode(&config)\n\n\tm := martini.Classic()\n\tstore := sessions.NewCookieStore([]byte(config.Secret))\n\tm.Use(sessions.Sessions(\"session\", store))\n\n\tgenerating := false\n\tgr := NewGraph()\n\tm.Get(\"\/reset\/:secret\", func(w http.ResponseWriter, r *http.Request, params martini.Params, session sessions.Session) string {\n\t\tif params[\"secret\"] != config.Secret {\n\t\t\treturn \"\"\n\t\t}\n\t\terr := vzcontrol.Reset()\n\t\tif err != nil {\n\t\t\treturn err.Error()\n\t\t}\n\t\tgenerating = false\n\t\tgr = NewGraph()\n\t\treturn \"Done\"\n\t})\n\n\tm.Get(\"\/gen\", func(w http.ResponseWriter, r *http.Request, session sessions.Session) string {\n\t\tif generating {\n\t\t\treturn \"Already generating\"\n\t\t}\n\t\tgenerating = true\n\t\tmaxNodes := 5\n\t\tmaxEdges := 5\n\t\tstartNodeId := 100\n\n\t\tstartNode := Node{Id: NodeId(startNodeId)}\n\t\tgr.AddNode(startNode)\n\t\terr := vzcontrol.ContainerCreate(int64(startNode.Id))\n\t\terr = vzcontrol.ConsoleStart(int64(startNode.Id))\n\n\t\tnodes := make([]Node, 0)\n\t\tnodes = append(nodes, startNode)\n\n\t\tsteps := 1\n\t\tfor len(nodes) != 0 && steps < maxNodes {\n\t\t\tnode, nodes := nodes[len(nodes)-1], nodes[:len(nodes)-1]\n\n\t\t\tnumEdges := random(1, maxEdges)\n\t\t\tfor i := 1; i <= numEdges; i++ {\n\t\t\t\ttargetNode := Node{Id: NodeId(i*steps + startNodeId)}\n\t\t\t\tif gr.AddNode(targetNode) {\n\t\t\t\t\terr = vzcontrol.ContainerCreate(int64(targetNode.Id))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Sprintf(\"Container Create: %d, %d, %d\\n%s\", targetNode.Id, i*steps, numEdges, err.Error())\n\t\t\t\t\t}\n\t\t\t\t\terr = vzcontrol.ConsoleStart(int64(targetNode.Id))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn fmt.Sprintf(\"Console Start: %d\\n%s\", targetNode.Id, err.Error())\n\t\t\t\t\t}\n\t\t\t\t\tnodes = append(nodes, targetNode)\n\t\t\t\t\tedgeid := int64(i * steps)\n\t\t\t\t\tif gr.AddEdge(Edge{Id: EdgeId(edgeid), Head: node.Id, Tail: targetNode.Id}) {\n\t\t\t\t\t\terr = vzcontrol.NetworkCreate(edgeid)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Sprintf(\"Network Create: %d\\n%s\", edgeid, err.Error())\n\t\t\t\t\t\t}\n\t\t\t\t\t\terr = vzcontrol.NetworkAdd(int64(node.Id), edgeid)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Sprintf(\"Network Add Node: %d, %d\\n%s\", node.Id, edgeid, err.Error())\n\t\t\t\t\t\t}\n\t\t\t\t\t\terr = vzcontrol.NetworkAdd(int64(targetNode.Id), edgeid)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Sprintf(\"Network Add Target: %d, %d\\n%s\", targetNode.Id, edgeid, err.Error())\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\t\t\tsteps += 1\n\t\t}\n\t\treturn gr.String()\n\t})\n\n\tm.Get(\"\/graph\", func(w http.ResponseWriter, r *http.Request, session sessions.Session) string {\n\t\toutput, err := json.Marshal(gr)\n\t\tif err != nil {\n\t\t\treturn err.Error()\n\t\t}\n\t\treturn string(output)\n\t})\n\n\tm.Get(\"\/ws\", func(w http.ResponseWriter, r *http.Request, session sessions.Session) {\n\t\tvar currentVm int64 = -1\n\t\tws, err := websocket.Upgrade(w, r, nil, 1024, 1024)\n\t\tif _, ok := err.(websocket.HandshakeError); ok {\n\t\t\thttp.Error(w, \"Not a websocket handshake\", 400)\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t} else if err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\t\tdefer ws.Close()\n\t\twebsocketId := websockets.addWebsocket(ws)\n\t\tdefer websockets.deleteWebsocket(websocketId)\n\t\tws.WriteMessage(websocket.TextMessage, []byte(\"Welcome to ginux!\\r\\n\"))\n\t\tfor {\n\t\t\t_, message, err := ws.ReadMessage()\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tmsgType := message[0]\n\t\t\t\tmsgData := message[1:len(message)]\n\t\t\t\tswitch msgType {\n\t\t\t\tcase WSTerm:\n\t\t\t\t\tif currentVm != -1 {\n\t\t\t\t\t\tvzcontrol.ConsoleWrite(currentVm, msgData)\n\t\t\t\t\t}\n\t\t\t\tcase WSClick:\n\t\t\t\t\tprevVm := currentVm\n\t\t\t\t\ttmp, _ := strconv.Atoi(string(msgData))\n\t\t\t\t\tcurrentVm = int64(tmp)\n\t\t\t\t\twebsockets.Lock()\n\t\t\t\t\tif prevVm != -1 {\n\t\t\t\t\t\tfor index, wsId := range websockets.consoleToId[prevVm] {\n\t\t\t\t\t\t\tif wsId == websocketId {\n\t\t\t\t\t\t\t\twebsockets.consoleToId[prevVm] = append(websockets.consoleToId[prevVm][:index], websockets.consoleToId[prevVm][index+1:]...)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\twebsockets.consoleToId[currentVm] = append(websockets.consoleToId[currentVm], websocketId)\n\t\t\t\t\twebsockets.Unlock()\n\t\t\t\t\tws.WriteMessage(websocket.TextMessage, []byte(RESET))\n\t\t\t\t\t\/\/ws.WriteMessage(websocket.TextMessage, []byte(fmt.Sprintf(\"Selected Container %d\\r\\n\", currentVm)))\n\t\t\t\t\tws.WriteMessage(websocket.TextMessage, []byte(consoleBuffers[currentVm]))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\tlog.Println(\"Game Server started on\", config.Address)\n\tlog.Fatal(http.ListenAndServe(config.Address, m))\n}\n\nfunc random(min, max int) int {\n\trand.Seed(time.Now().Unix())\n\treturn rand.Intn(max-min) + min\n}\n\nfunc consoleDispatch() {\n\tfor chunk := range consoleReadChannel {\n\t\tif len(consoleBuffers[chunk.Id]) > MAX_CONSOLE { \n\t\t\tconsoleBuffers[chunk.Id] = consoleBuffers[chunk.Id][len(string(chunk.Data)):] + string(chunk.Data)\n\t\t} else {\n\t\t\tconsoleBuffers[chunk.Id] += string(chunk.Data)\n\t\t}\n\t\twebsockets.RLock()\n\t\tfor _, wsId := range websockets.consoleToId[chunk.Id] {\n\t\t\tif socket, ok := websockets.byId[wsId]; ok {\n\t\t\t\tsocket.WriteMessage(websocket.TextMessage, chunk.Data)\n\t\t\t}\n\t\t}\n\t\twebsockets.RUnlock()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package qstat provides an interface to GridEngine's job and queue status facilities\npackage qstat\n\nimport (\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"github.com\/kisielk\/gorge\/util\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"strings\"\n)\n\n\/\/ Resource represents a GridEngine resource request\n\/\/ See man 5 sge_complex for a more detailed description of the fields\ntype Resource struct {\n\tName string `xml:\"CE_name\"` \/\/ The name of the complex resource\n\tValType int `xml:\"CE_valtype\"` \/\/ The type of value\n\tStringVal string `xml:\"CE_stringval\"` \/\/ The value as a string\n\tDoubleVal float64 `xml:\"CE_doubleval\"` \/\/ The value as a double\n\tRelOp int `xml:\"CE_relop\"` \/\/ The relation operator used to compare the value\n\tConsumable bool `xml:\"CE_consumable\"` \/\/ True if the resource is a consumable resourece\n\tDominant bool `xml:\"CE_dominant\"` \/\/ ?\n\tPJDoubleVal float64 `xml:\"CE_pj_doubleval\"` \/\/ ?\n\tPJDominant bool `xml:\"CE_pj_dominant\"` \/\/ ?\n\tRequestable bool `xml:\"CE_requestable\"` \/\/ True if the resource is a requestable resource\n\tTagged bool `xml:\"CE_tagged\"` \/\/ ?\n}\n\n\/\/ MailAddress represents an email address\ntype MailAddress struct {\n\tUser string `xml:\"MR_user\"`\n\tHost string `xml:\"MR_host\"`\n}\n\n\/\/ String implements the Stringer interface\nfunc (a MailAddress) String() string {\n\treturn a.User + \"@\" + a.Host\n}\n\n\/\/ EnvVar represents a job environment variable\ntype EnvVar struct {\n\tVariable string `xml:\"VA_variable\"` \/\/ The name of the variable\n\tValue string `xml:\"VA_value\"` \/\/ The value of the variable\n}\n\ntype PathList struct {\n\tPath string `xml:\"PN_path\"`\n\tHost string `xml:\"PN_host\"`\n\tFileHost string `xml:\"PN_file_host\"`\n\tFileStaging bool `xml:\"PN_file_staging\"`\n}\n\n\/\/ TaskIDRange represents a range of job array task identifiers\ntype TaskIDRange struct {\n\tMin int `xml:\"RN_min\"` \/\/ The minimum task ID\n\tMax int `xml:\"RN_max\"` \/\/ The maximum task ID\n\tStep int `xml:\"RN_step\"` \/\/ The ID step size between tasks\n}\n\ntype JATMessage struct {\n\tType int `xml:\"QIM_type\"`\n\tMessage string `xml:\"QIM_message\"`\n}\n\ntype Messages struct {\n\tMessages []SMEMessage `xml:\"SME_message_list>element\"`\n\tGlobalMessages []SMEMessage `xml:\"SME_global_message_list>element\"`\n}\n\ntype SMEMessage struct {\n\tJobNumbers []int `xml:\"MES_job_number_list>ulong_sublist>ULNG_value\"`\n\tNumber int `xml:\"MES_message_number\"`\n\tMessage string `xml:\"MES_message\"`\n}\n\ntype Task struct {\n\tStatus int `xml:\"JAT_status\"`\n\tTaskNumber int `xml:\"JAT_task_number\"`\n\tMessageList JATMessage `xml:\"JAT_message_list>ulong_sublist\"`\n}\n\ntype JobInfo struct {\n\tJobNumber int `xml:\"JB_job_number\"`\n\tAdvanceReservation int `xml:\"JB_ar\"`\n\tExecFile string `xml:\"JB_exec_file\"`\n\tSubmissionTime int `xml:\"JB_submission_time\"`\n\tOwner string `xml:\"JB_owner\"`\n\tUid int `xml:\"JB_uid\"`\n\tGroup string `xml:\"JB_group\"`\n\tGid int `xml:\"JB_gid\"`\n\tAccount string `xml:\"JB_account\"`\n\tMergeStdErr bool `xml:\"JB_merge_stderr\"`\n\tMailList []MailAddress `xml:\"JB_mail_list>element\"`\n\tProject string `xml:\"JB_project\"`\n\tNotify bool `xml:\"JB_notify\"`\n\tJobName string `xml:\"JB_job_name\"`\n\tStdoutPathList []PathList `xml:\"JB_stdout_path_list>path_list\"`\n\tAltStdoutPathList []PathList `xml:\"JB_stdout_path_list>stdout_path_list\"` \/\/ Alternate stdout path list\n\tJobShare int `xml:\"JB_jobshare\"`\n\tHardResourceList []Resource `xml:\"JB_hard_resource_list>qstat_l_requests\"`\n\tEnvList []EnvVar `xml:\"JB_env_list>job_sublist\"`\n\tJobArgs []string `xml:\"JB_job_args>element>ST_name\"`\n\tScriptFile string `xml:\"JB_script_file\"`\n\tJobArrayTasks []Task `xml:\"JB_ja_tasks>ulong_sublist\"`\n\tCwd string `xml:\"JB_cwd\"`\n\tStderrPathList []PathList `xml:\"JB_stderr_path_list>path_list\"`\n\tAltStderrPathList []PathList `xml:\"JB_stderr_path_list>stderr_path_list\"` \/\/ Alternate stderr path list\n\tJIDRequestList []int `xml:\"JB_jid_request_list>element>JRE_job_name\"`\n\tJIDSuccessorList []int `xml:\"JB_jid_successor_list>ulong_sublist>JRE_job_number\"`\n\tDeadline bool `xml:\"JB_deadline\"`\n\tExecutionTime int `xml:\"JB_execution_time\"`\n\tCheckpointAttr int `xml:\"JB_checkpoint_attr\"`\n\tCheckpointInterval int `xml:\"JB_checkpoint_interval\"`\n\tReserve bool `xml:\"JB_reserve\"`\n\tMailOptions int `xml:\"JB_mail_options\"`\n\tPriority int `xml:\"JB_priority\"`\n\tRestart int `xml:\"JB_restart\"`\n\tVerify bool `xml:\"JB_verify\"`\n\tScriptSize int `xml:\"JB_script_size\"`\n\tVerifySuitableQueues bool `xml:\"JB_verify_suitable_queues\"`\n\tSoftWallClockGMT int `xml:\"JB_soft_wallclock_gmt\"`\n\tHardWallClockGMT int `xml:\"JB_hard_wallclock_gmt\"`\n\tOverrideTickets int `xml:\"JB_override_tickets\"`\n\tVersion int `xml:\"JB_version\"`\n\tJobArray TaskIDRange `xml:\"JB_ja_structure>task_id_range\"`\n\tType int `xml:\"JB_type\"`\n}\n\n\/\/ Type DetailedJobInfo represents the job information returned by qstat -j\ntype DetailedJobInfo struct {\n\tJobs []JobInfo `xml:\"djob_info>element\"`\n\tMessages Messages `xml:\"messages>element\"`\n}\n\ntype QueueJob struct {\n\tJobNumber int `xml:\"JB_job_number\"`\n\tNormalizedPriority float32 `xml:\"JAT_prio\"`\n\tPOSIXPriority int `xml:\"JB_priority\"`\n\tName string `xml:\"JB_name\"`\n\tOwner string `xml:\"JB_owner\"`\n\tProject string `xml:\"JB_project\"`\n\tDepartment string `xml:\"JB_department\"`\n\tState string `xml:\"state\"`\n\tStartTime string `xml:\"JAT_start_time\"`\n\tCpuUsage float64 `xml:\"cpu_usage\"`\n\tMemUsage float64 `xml:\"mem_usage\"`\n\tIOUsage float64 `xml:\"io_usage\"`\n\tTickets int `xml:\"tickets\"`\n\tOverrideTickets int `xml:\"otickets\"`\n\tFairshareTickets int `xml:\"ftickets\"`\n\tShareTreeTickets int `xml:\"stickets\"`\n\tQueueName string `xml:\"queue_name\"`\n\tSlots int `xml:\"slots\"`\n\tTasks string `xml:\"tasks\"`\n}\n\n\/\/ DeletionState returns true if the job is in the (d)eletion state\nfunc (j QueueJob) DeletionState() bool {\n\treturn strings.Contains(j.State, \"d\")\n}\n\n\/\/ ErrorState returns true if the job is in the (E)rror state\nfunc (j QueueJob) ErrorState() bool {\n\treturn strings.Contains(j.State, \"E\")\n}\n\n\/\/ HoldState returns true if the job is in the (h)old state\nfunc (j QueueJob) HoldState() bool {\n\treturn strings.Contains(j.State, \"h\")\n}\n\n\/\/ RunningState returns true if the job is in the (r)unning state\nfunc (j QueueJob) RunningState() bool {\n\treturn strings.Contains(j.State, \"r\")\n}\n\n\/\/ RestartedState returns true if the job is in the (R)estarted state\nfunc (j QueueJob) RestartedState() bool {\n\treturn strings.Contains(j.State, \"R\")\n}\n\n\/\/ SuspendedState returns true if the job is in the (s)uspended state\nfunc (j QueueJob) SuspendedState() bool {\n\treturn strings.Contains(j.State, \"s\")\n}\n\n\/\/ QueueSuspendedState returns true if the job is in the queue (S)uspended state\nfunc (j QueueJob) QueueSuspendedState() bool {\n\treturn strings.Contains(j.State, \"S\")\n}\n\n\/\/ TransferringState returns true if the job is in the (t)ransferring state\nfunc (j QueueJob) TransferringState() bool {\n\treturn strings.Contains(j.State, \"t\")\n}\n\n\/\/ ThresholdState returns true if the job is in the (T)hreshold state\nfunc (j QueueJob) ThresholdState() bool {\n\treturn strings.Contains(j.State, \"T\")\n}\n\n\/\/ WaitingState returns true if the job is in the (w)aiting state\nfunc (j QueueJob) WaitingState() bool {\n\treturn strings.Contains(j.State, \"w\")\n}\n\n\/\/ QueuedState returns true if the job is in the (q)ueued state\nfunc (j QueueJob) QueuedState() bool {\n\treturn strings.Contains(j.State, \"q\")\n}\n\ntype Queue struct {\n\tName string `xml:\"name\"`\n\tQType string `xml:\"qtype\"`\n\tSlotsUsed int `xml:\"slots_used\"`\n\tSlotsReserved int `xml:\"slots_resv\"`\n\tSlotsTotal int `xml:\"slots_total\"`\n\tArch string `xml:\"arch\"`\n\tJoblist []QueueJob `xml:\"job_list\"`\n}\n\ntype QueueInfo struct {\n\tQueuedJobs []QueueJob `xml:\"queue_info>job_list\"` \/\/ A list of jobs currently assigned to queues, eg: executing\n\tPendingJobs []QueueJob `xml:\"job_info>job_list\"` \/\/ A list of jobs that are not yet executing in any queue\n}\n\n\/\/ Function absPaths converts the paths of a list of PathList structs in to absolute paths of root if they are not already absolute.\nfunc absPaths(root string, ps []PathList) []PathList {\n\tvar paths []PathList\n\tfor _, p := range ps {\n\t\tif !path.IsAbs(p.Path) {\n\t\t\tp.Path = path.Join(root, p.Path)\n\t\t}\n\t\tpaths = append(paths, p)\n\t}\n\treturn paths\n}\n\nfunc (i *JobInfo) StdoutPaths() []PathList {\n\tvar paths []PathList\n\tpaths = append(paths, absPaths(i.Cwd, i.StdoutPathList)...)\n\tpaths = append(paths, absPaths(i.Cwd, i.AltStdoutPathList)...)\n\treturn paths\n}\n\nfunc (i *JobInfo) StderrPaths() []PathList {\n\tvar paths []PathList\n\tif !i.MergeStdErr {\n\t\tpaths = append(paths, absPaths(i.Cwd, i.StderrPathList)...)\n\t\tpaths = append(paths, absPaths(i.Cwd, i.AltStderrPathList)...)\n\t}\n\treturn paths\n}\n\nfunc (i *JobInfo) Command() string {\n\treturn i.ScriptFile + \" \" + strings.Join(i.JobArgs, \" \")\n}\n\n\/\/ GetDetailedJobInfo returns a DetailedJobInfo structure contianing all jobs matching the provided pattern.\n\/\/ The pattern should match the type wc_job_list as defined in man 1 sge_types\nfunc GetDetailedJobInfo(pattern string) (q *DetailedJobInfo, err error) {\n\tcmd := exec.Command(\"qstat\", \"-j\", pattern, \"-xml\")\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = cmd.Start(); err != nil {\n\t\treturn nil, err\n\t}\n\n\td := xml.NewDecoder(util.NewValidUTF8Reader(stdout))\n\td.Strict = false\n\tif err = d.Decode(&q); err != nil {\n\t\t\/\/ Qstat just produces unparseable XML instead of doing real error reporting. Hurrah.\n\t\tif err.Error() == \"XML syntax error on line 3: expected element name after <\" {\n\t\t\treturn nil, errors.New(\"Unknown job: \" + pattern)\n\t\t}\n\t}\n\n\tif err = cmd.Wait(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn q, nil\n}\n\n\/\/ GetQueueInfo returns a QueueInfo reflecting the current state of the GridEngine queue.\n\/\/ The argument u can be used to limit the results to a particular user.\n\/\/ If u is the string \"*\" then results are returned for all users.\n\/\/ If u is the empty string then results are returned for the current user.\nfunc GetQueueInfo(u string) (q *QueueInfo, err error) {\n\targs := []string{\"-xml\", \"-pri\", \"-ext\"}\n\n\tif u != \"\" {\n\t\targs = append(args, \"-u\", \"*\")\n\t}\n\n\tcmd := exec.Command(\"qstat\", args...)\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = cmd.Start(); err != nil {\n\t\treturn nil, err\n\t}\n\td := xml.NewDecoder(util.NewValidUTF8Reader(stdout))\n\td.Strict = false\n\n\tif err = d.Decode(&q); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = cmd.Wait(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn q, nil\n}\n<commit_msg>Correctly restrict GetQueueInfo to the supplied user<commit_after>\/\/ Package qstat provides an interface to GridEngine's job and queue status facilities\npackage qstat\n\nimport (\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"github.com\/kisielk\/gorge\/util\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"strings\"\n)\n\n\/\/ Resource represents a GridEngine resource request\n\/\/ See man 5 sge_complex for a more detailed description of the fields\ntype Resource struct {\n\tName string `xml:\"CE_name\"` \/\/ The name of the complex resource\n\tValType int `xml:\"CE_valtype\"` \/\/ The type of value\n\tStringVal string `xml:\"CE_stringval\"` \/\/ The value as a string\n\tDoubleVal float64 `xml:\"CE_doubleval\"` \/\/ The value as a double\n\tRelOp int `xml:\"CE_relop\"` \/\/ The relation operator used to compare the value\n\tConsumable bool `xml:\"CE_consumable\"` \/\/ True if the resource is a consumable resourece\n\tDominant bool `xml:\"CE_dominant\"` \/\/ ?\n\tPJDoubleVal float64 `xml:\"CE_pj_doubleval\"` \/\/ ?\n\tPJDominant bool `xml:\"CE_pj_dominant\"` \/\/ ?\n\tRequestable bool `xml:\"CE_requestable\"` \/\/ True if the resource is a requestable resource\n\tTagged bool `xml:\"CE_tagged\"` \/\/ ?\n}\n\n\/\/ MailAddress represents an email address\ntype MailAddress struct {\n\tUser string `xml:\"MR_user\"`\n\tHost string `xml:\"MR_host\"`\n}\n\n\/\/ String implements the Stringer interface\nfunc (a MailAddress) String() string {\n\treturn a.User + \"@\" + a.Host\n}\n\n\/\/ EnvVar represents a job environment variable\ntype EnvVar struct {\n\tVariable string `xml:\"VA_variable\"` \/\/ The name of the variable\n\tValue string `xml:\"VA_value\"` \/\/ The value of the variable\n}\n\ntype PathList struct {\n\tPath string `xml:\"PN_path\"`\n\tHost string `xml:\"PN_host\"`\n\tFileHost string `xml:\"PN_file_host\"`\n\tFileStaging bool `xml:\"PN_file_staging\"`\n}\n\n\/\/ TaskIDRange represents a range of job array task identifiers\ntype TaskIDRange struct {\n\tMin int `xml:\"RN_min\"` \/\/ The minimum task ID\n\tMax int `xml:\"RN_max\"` \/\/ The maximum task ID\n\tStep int `xml:\"RN_step\"` \/\/ The ID step size between tasks\n}\n\ntype JATMessage struct {\n\tType int `xml:\"QIM_type\"`\n\tMessage string `xml:\"QIM_message\"`\n}\n\ntype Messages struct {\n\tMessages []SMEMessage `xml:\"SME_message_list>element\"`\n\tGlobalMessages []SMEMessage `xml:\"SME_global_message_list>element\"`\n}\n\ntype SMEMessage struct {\n\tJobNumbers []int `xml:\"MES_job_number_list>ulong_sublist>ULNG_value\"`\n\tNumber int `xml:\"MES_message_number\"`\n\tMessage string `xml:\"MES_message\"`\n}\n\ntype Task struct {\n\tStatus int `xml:\"JAT_status\"`\n\tTaskNumber int `xml:\"JAT_task_number\"`\n\tMessageList JATMessage `xml:\"JAT_message_list>ulong_sublist\"`\n}\n\ntype JobInfo struct {\n\tJobNumber int `xml:\"JB_job_number\"`\n\tAdvanceReservation int `xml:\"JB_ar\"`\n\tExecFile string `xml:\"JB_exec_file\"`\n\tSubmissionTime int `xml:\"JB_submission_time\"`\n\tOwner string `xml:\"JB_owner\"`\n\tUid int `xml:\"JB_uid\"`\n\tGroup string `xml:\"JB_group\"`\n\tGid int `xml:\"JB_gid\"`\n\tAccount string `xml:\"JB_account\"`\n\tMergeStdErr bool `xml:\"JB_merge_stderr\"`\n\tMailList []MailAddress `xml:\"JB_mail_list>element\"`\n\tProject string `xml:\"JB_project\"`\n\tNotify bool `xml:\"JB_notify\"`\n\tJobName string `xml:\"JB_job_name\"`\n\tStdoutPathList []PathList `xml:\"JB_stdout_path_list>path_list\"`\n\tAltStdoutPathList []PathList `xml:\"JB_stdout_path_list>stdout_path_list\"` \/\/ Alternate stdout path list\n\tJobShare int `xml:\"JB_jobshare\"`\n\tHardResourceList []Resource `xml:\"JB_hard_resource_list>qstat_l_requests\"`\n\tEnvList []EnvVar `xml:\"JB_env_list>job_sublist\"`\n\tJobArgs []string `xml:\"JB_job_args>element>ST_name\"`\n\tScriptFile string `xml:\"JB_script_file\"`\n\tJobArrayTasks []Task `xml:\"JB_ja_tasks>ulong_sublist\"`\n\tCwd string `xml:\"JB_cwd\"`\n\tStderrPathList []PathList `xml:\"JB_stderr_path_list>path_list\"`\n\tAltStderrPathList []PathList `xml:\"JB_stderr_path_list>stderr_path_list\"` \/\/ Alternate stderr path list\n\tJIDRequestList []int `xml:\"JB_jid_request_list>element>JRE_job_name\"`\n\tJIDSuccessorList []int `xml:\"JB_jid_successor_list>ulong_sublist>JRE_job_number\"`\n\tDeadline bool `xml:\"JB_deadline\"`\n\tExecutionTime int `xml:\"JB_execution_time\"`\n\tCheckpointAttr int `xml:\"JB_checkpoint_attr\"`\n\tCheckpointInterval int `xml:\"JB_checkpoint_interval\"`\n\tReserve bool `xml:\"JB_reserve\"`\n\tMailOptions int `xml:\"JB_mail_options\"`\n\tPriority int `xml:\"JB_priority\"`\n\tRestart int `xml:\"JB_restart\"`\n\tVerify bool `xml:\"JB_verify\"`\n\tScriptSize int `xml:\"JB_script_size\"`\n\tVerifySuitableQueues bool `xml:\"JB_verify_suitable_queues\"`\n\tSoftWallClockGMT int `xml:\"JB_soft_wallclock_gmt\"`\n\tHardWallClockGMT int `xml:\"JB_hard_wallclock_gmt\"`\n\tOverrideTickets int `xml:\"JB_override_tickets\"`\n\tVersion int `xml:\"JB_version\"`\n\tJobArray TaskIDRange `xml:\"JB_ja_structure>task_id_range\"`\n\tType int `xml:\"JB_type\"`\n}\n\n\/\/ Type DetailedJobInfo represents the job information returned by qstat -j\ntype DetailedJobInfo struct {\n\tJobs []JobInfo `xml:\"djob_info>element\"`\n\tMessages Messages `xml:\"messages>element\"`\n}\n\ntype QueueJob struct {\n\tJobNumber int `xml:\"JB_job_number\"`\n\tNormalizedPriority float32 `xml:\"JAT_prio\"`\n\tPOSIXPriority int `xml:\"JB_priority\"`\n\tName string `xml:\"JB_name\"`\n\tOwner string `xml:\"JB_owner\"`\n\tProject string `xml:\"JB_project\"`\n\tDepartment string `xml:\"JB_department\"`\n\tState string `xml:\"state\"`\n\tStartTime string `xml:\"JAT_start_time\"`\n\tCpuUsage float64 `xml:\"cpu_usage\"`\n\tMemUsage float64 `xml:\"mem_usage\"`\n\tIOUsage float64 `xml:\"io_usage\"`\n\tTickets int `xml:\"tickets\"`\n\tOverrideTickets int `xml:\"otickets\"`\n\tFairshareTickets int `xml:\"ftickets\"`\n\tShareTreeTickets int `xml:\"stickets\"`\n\tQueueName string `xml:\"queue_name\"`\n\tSlots int `xml:\"slots\"`\n\tTasks string `xml:\"tasks\"`\n}\n\n\/\/ DeletionState returns true if the job is in the (d)eletion state\nfunc (j QueueJob) DeletionState() bool {\n\treturn strings.Contains(j.State, \"d\")\n}\n\n\/\/ ErrorState returns true if the job is in the (E)rror state\nfunc (j QueueJob) ErrorState() bool {\n\treturn strings.Contains(j.State, \"E\")\n}\n\n\/\/ HoldState returns true if the job is in the (h)old state\nfunc (j QueueJob) HoldState() bool {\n\treturn strings.Contains(j.State, \"h\")\n}\n\n\/\/ RunningState returns true if the job is in the (r)unning state\nfunc (j QueueJob) RunningState() bool {\n\treturn strings.Contains(j.State, \"r\")\n}\n\n\/\/ RestartedState returns true if the job is in the (R)estarted state\nfunc (j QueueJob) RestartedState() bool {\n\treturn strings.Contains(j.State, \"R\")\n}\n\n\/\/ SuspendedState returns true if the job is in the (s)uspended state\nfunc (j QueueJob) SuspendedState() bool {\n\treturn strings.Contains(j.State, \"s\")\n}\n\n\/\/ QueueSuspendedState returns true if the job is in the queue (S)uspended state\nfunc (j QueueJob) QueueSuspendedState() bool {\n\treturn strings.Contains(j.State, \"S\")\n}\n\n\/\/ TransferringState returns true if the job is in the (t)ransferring state\nfunc (j QueueJob) TransferringState() bool {\n\treturn strings.Contains(j.State, \"t\")\n}\n\n\/\/ ThresholdState returns true if the job is in the (T)hreshold state\nfunc (j QueueJob) ThresholdState() bool {\n\treturn strings.Contains(j.State, \"T\")\n}\n\n\/\/ WaitingState returns true if the job is in the (w)aiting state\nfunc (j QueueJob) WaitingState() bool {\n\treturn strings.Contains(j.State, \"w\")\n}\n\n\/\/ QueuedState returns true if the job is in the (q)ueued state\nfunc (j QueueJob) QueuedState() bool {\n\treturn strings.Contains(j.State, \"q\")\n}\n\ntype Queue struct {\n\tName string `xml:\"name\"`\n\tQType string `xml:\"qtype\"`\n\tSlotsUsed int `xml:\"slots_used\"`\n\tSlotsReserved int `xml:\"slots_resv\"`\n\tSlotsTotal int `xml:\"slots_total\"`\n\tArch string `xml:\"arch\"`\n\tJoblist []QueueJob `xml:\"job_list\"`\n}\n\ntype QueueInfo struct {\n\tQueuedJobs []QueueJob `xml:\"queue_info>job_list\"` \/\/ A list of jobs currently assigned to queues, eg: executing\n\tPendingJobs []QueueJob `xml:\"job_info>job_list\"` \/\/ A list of jobs that are not yet executing in any queue\n}\n\n\/\/ Function absPaths converts the paths of a list of PathList structs in to absolute paths of root if they are not already absolute.\nfunc absPaths(root string, ps []PathList) []PathList {\n\tvar paths []PathList\n\tfor _, p := range ps {\n\t\tif !path.IsAbs(p.Path) {\n\t\t\tp.Path = path.Join(root, p.Path)\n\t\t}\n\t\tpaths = append(paths, p)\n\t}\n\treturn paths\n}\n\nfunc (i *JobInfo) StdoutPaths() []PathList {\n\tvar paths []PathList\n\tpaths = append(paths, absPaths(i.Cwd, i.StdoutPathList)...)\n\tpaths = append(paths, absPaths(i.Cwd, i.AltStdoutPathList)...)\n\treturn paths\n}\n\nfunc (i *JobInfo) StderrPaths() []PathList {\n\tvar paths []PathList\n\tif !i.MergeStdErr {\n\t\tpaths = append(paths, absPaths(i.Cwd, i.StderrPathList)...)\n\t\tpaths = append(paths, absPaths(i.Cwd, i.AltStderrPathList)...)\n\t}\n\treturn paths\n}\n\nfunc (i *JobInfo) Command() string {\n\treturn i.ScriptFile + \" \" + strings.Join(i.JobArgs, \" \")\n}\n\n\/\/ GetDetailedJobInfo returns a DetailedJobInfo structure contianing all jobs matching the provided pattern.\n\/\/ The pattern should match the type wc_job_list as defined in man 1 sge_types\nfunc GetDetailedJobInfo(pattern string) (q *DetailedJobInfo, err error) {\n\tcmd := exec.Command(\"qstat\", \"-j\", pattern, \"-xml\")\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = cmd.Start(); err != nil {\n\t\treturn nil, err\n\t}\n\n\td := xml.NewDecoder(util.NewValidUTF8Reader(stdout))\n\td.Strict = false\n\tif err = d.Decode(&q); err != nil {\n\t\t\/\/ Qstat just produces unparseable XML instead of doing real error reporting. Hurrah.\n\t\tif err.Error() == \"XML syntax error on line 3: expected element name after <\" {\n\t\t\treturn nil, errors.New(\"Unknown job: \" + pattern)\n\t\t}\n\t}\n\n\tif err = cmd.Wait(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn q, nil\n}\n\n\/\/ GetQueueInfo returns a QueueInfo reflecting the current state of the GridEngine queue.\n\/\/ The argument u can be used to limit the results to a particular user.\n\/\/ If u is the string \"*\" then results are returned for all users.\n\/\/ If u is the empty string then results are returned for the current user.\nfunc GetQueueInfo(u string) (q *QueueInfo, err error) {\n\targs := []string{\"-xml\", \"-pri\", \"-ext\"}\n\n\tif u != \"\" {\n\t\targs = append(args, \"-u\", u)\n\t}\n\n\tcmd := exec.Command(\"qstat\", args...)\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = cmd.Start(); err != nil {\n\t\treturn nil, err\n\t}\n\td := xml.NewDecoder(util.NewValidUTF8Reader(stdout))\n\td.Strict = false\n\n\tif err = d.Decode(&q); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = cmd.Wait(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn q, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package weavedns\n\nimport (\n\t\"github.com\/miekg\/dns\"\n\t\"log\"\n\t\"net\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar (\n\ttest_addr1 = \"10.0.2.1\/24\"\n)\n\nfunc sendQuery(name string, querytype uint16) error {\n\tm := new(dns.Msg)\n\tm.SetQuestion(name, querytype)\n\tm.RecursionDesired = false\n\tbuf, err := m.Pack()\n\tif err != nil {\n\t\treturn err\n\t}\n\tconn, err := net.ListenUDP(\"udp4\", &net.UDPAddr{IP: net.IPv4zero, Port: 0})\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = conn.WriteTo(buf, ipv4Addr)\n\treturn err\n}\n\nfunc TestServerSimpleQuery(t *testing.T) {\n\tlog.Println(\"TestServerSimpleQuery starting\")\n\tvar zone = new(ZoneDb)\n\tdocker_ip := net.ParseIP(\"9.8.7.6\")\n\tweave_ip, subnet, _ := net.ParseCIDR(test_addr1)\n\tzone.AddRecord(\"test.weave.\", docker_ip, weave_ip, subnet)\n\n\tmdnsServer, err := NewMDNSServer(zone)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = mdnsServer.Start(nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar received_addr net.IP\n\n\t\/\/ Implement a minimal listener for responses\n\tmulticast, err := LinkLocalMulticastListener(nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\thandleMDNS := func(w dns.ResponseWriter, r *dns.Msg) {\n\t\t\/\/ Only handle responses here\n\t\tif len(r.Answer) > 0 {\n\t\t\tfor _, answer := range r.Answer {\n\t\t\t\tswitch rr := answer.(type) {\n\t\t\t\tcase *dns.A:\n\t\t\t\t\treceived_addr = rr.A\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tserver := &dns.Server{Listener: nil, PacketConn: multicast, Handler: dns.HandlerFunc(handleMDNS)}\n\tgo server.ActivateAndServe()\n\n\tsendQuery(\"test.weave.\", dns.TypeA)\n\n\ttime.Sleep(time.Second)\n\n\tif !received_addr.Equal(weave_ip) {\n\t\tt.Log(\"Unexpected result for test.weave\", received_addr)\n\t\tt.Fail()\n\t}\n}\n<commit_msg>Shut down listener when test finished<commit_after>package weavedns\n\nimport (\n\t\"github.com\/miekg\/dns\"\n\t\"log\"\n\t\"net\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar (\n\ttest_addr1 = \"10.0.2.1\/24\"\n)\n\nfunc sendQuery(name string, querytype uint16) error {\n\tm := new(dns.Msg)\n\tm.SetQuestion(name, querytype)\n\tm.RecursionDesired = false\n\tbuf, err := m.Pack()\n\tif err != nil {\n\t\treturn err\n\t}\n\tconn, err := net.ListenUDP(\"udp4\", &net.UDPAddr{IP: net.IPv4zero, Port: 0})\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = conn.WriteTo(buf, ipv4Addr)\n\treturn err\n}\n\nfunc TestServerSimpleQuery(t *testing.T) {\n\tlog.Println(\"TestServerSimpleQuery starting\")\n\tvar zone = new(ZoneDb)\n\tdocker_ip := net.ParseIP(\"9.8.7.6\")\n\tweave_ip, subnet, _ := net.ParseCIDR(test_addr1)\n\tzone.AddRecord(\"test.weave.\", docker_ip, weave_ip, subnet)\n\n\tmdnsServer, err := NewMDNSServer(zone)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = mdnsServer.Start(nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar received_addr net.IP\n\n\t\/\/ Implement a minimal listener for responses\n\tmulticast, err := LinkLocalMulticastListener(nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\thandleMDNS := func(w dns.ResponseWriter, r *dns.Msg) {\n\t\t\/\/ Only handle responses here\n\t\tif len(r.Answer) > 0 {\n\t\t\tfor _, answer := range r.Answer {\n\t\t\t\tswitch rr := answer.(type) {\n\t\t\t\tcase *dns.A:\n\t\t\t\t\treceived_addr = rr.A\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tserver := &dns.Server{Listener: nil, PacketConn: multicast, Handler: dns.HandlerFunc(handleMDNS)}\n\tgo server.ActivateAndServe()\n\tdefer server.Shutdown()\n\n\tsendQuery(\"test.weave.\", dns.TypeA)\n\n\ttime.Sleep(time.Second)\n\n\tif !received_addr.Equal(weave_ip) {\n\t\tt.Log(\"Unexpected result for test.weave\", received_addr)\n\t\tt.Fail()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package webhook\n\nimport (\n\t\"encoding\/json\"\n\t\"time\"\n)\n\ntype oldW struct {\n\t\/\/ Configuration for message delivery\n\tConfig struct {\n\t\t\/\/ The URL to deliver messages to.\n\t\tURL string `json:\"url\"`\n\n\t\t\/\/ The content-type to set the messages to (unless specified by WRP).\n\t\tContentType string `json:\"content_type\"`\n\n\t\t\/\/ The secret to use for the SHA1 HMAC.\n\t\t\/\/ Optional, set to \"\" to disable behavior.\n\t\tSecret string `json:\"secret,omitempty\"`\n\t} `json:\"config\"`\n\n\t\/\/ The list of regular expressions to match event type against.\n\tEvents []string `json:\"events\"`\n\n\t\/\/ Matcher type contains values to match against the metadata.\n\tMatcher struct {\n\t\t\/\/ The list of regular expressions to match device id type against.\n\t\tDeviceId []string `json:\"device_id\"`\n\t} `json:\"matcher,omitempty\"`\n\n\t\/\/ The specified duration for this hook to live\n\tDuration int64 `json:\"duration\"`\n\n\t\/\/ The absolute time when this hook is to be disabled\n\tUntil int64 `json:\"until\"`\n\n\t\/\/ The address that performed the registration\n\tAddress string `json:\"registered_from_address\"`\n}\n\nfunc convertOldHooksToNewHooks(body []byte) (hooks []W, err error) {\n\tvar oldHooks []oldW\n\terr = json.Unmarshal(body, &oldHooks)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor _, oldHook range oldHooks {\n\t\tvar tempHook W\n\t\ttempHook.Config.URL = oldHook.Config.URL\n\t\ttempHook.Config.ContentType = oldHook.Config.ContentType\n\t\ttempHook.Config.Secret = oldHook.Config.Secret\n\t\ttempHook.Events = oldHook.Events\n\t\ttempHook.Matcher = oldHook.Matcher\n\t\ttempHook.Duration = time.Duration(oldHook.Duration) * time.Second\n\t\ttempHook.Until = time.Unix(oldHook.Until, 0)\n\t\ttempHook.Address = oldHook.Address\n\t\t\n\t\thooks = append(hooks, tempHook)\n\t}\n\t\n\treturn\n}\n<commit_msg>fix syntax error<commit_after>package webhook\n\nimport (\n\t\"encoding\/json\"\n\t\"time\"\n)\n\ntype oldW struct {\n\t\/\/ Configuration for message delivery\n\tConfig struct {\n\t\t\/\/ The URL to deliver messages to.\n\t\tURL string `json:\"url\"`\n\n\t\t\/\/ The content-type to set the messages to (unless specified by WRP).\n\t\tContentType string `json:\"content_type\"`\n\n\t\t\/\/ The secret to use for the SHA1 HMAC.\n\t\t\/\/ Optional, set to \"\" to disable behavior.\n\t\tSecret string `json:\"secret,omitempty\"`\n\t} `json:\"config\"`\n\n\t\/\/ The list of regular expressions to match event type against.\n\tEvents []string `json:\"events\"`\n\n\t\/\/ Matcher type contains values to match against the metadata.\n\tMatcher struct {\n\t\t\/\/ The list of regular expressions to match device id type against.\n\t\tDeviceId []string `json:\"device_id\"`\n\t} `json:\"matcher,omitempty\"`\n\n\t\/\/ The specified duration for this hook to live\n\tDuration int64 `json:\"duration\"`\n\n\t\/\/ The absolute time when this hook is to be disabled\n\tUntil int64 `json:\"until\"`\n\n\t\/\/ The address that performed the registration\n\tAddress string `json:\"registered_from_address\"`\n}\n\nfunc convertOldHooksToNewHooks(body []byte) (hooks []W, err error) {\n\tvar oldHooks []oldW\n\terr = json.Unmarshal(body, &oldHooks)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor _, oldHook := range oldHooks {\n\t\tvar tempHook W\n\t\ttempHook.Config.URL = oldHook.Config.URL\n\t\ttempHook.Config.ContentType = oldHook.Config.ContentType\n\t\ttempHook.Config.Secret = oldHook.Config.Secret\n\t\ttempHook.Events = oldHook.Events\n\t\ttempHook.Matcher = oldHook.Matcher\n\t\ttempHook.Duration = time.Duration(oldHook.Duration) * time.Second\n\t\ttempHook.Until = time.Unix(oldHook.Until, 0)\n\t\ttempHook.Address = oldHook.Address\n\t\t\n\t\thooks = append(hooks, tempHook)\n\t}\n\t\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package queue implements a boldDB backed queue for MDM Commands.\npackage queue\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/groob\/plist\"\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/micromdm\/mdm\"\n\t\"github.com\/micromdm\/micromdm\/command\"\n\t\"github.com\/micromdm\/micromdm\/pubsub\"\n)\n\nconst (\n\tDeviceCommandBucket = \"mdm.DeviceCommands\"\n)\n\ntype Store struct {\n\t*bolt.DB\n}\n\nfunc (db *Store) Next(ctx context.Context, resp mdm.Response) (*Command, error) {\n\tdc, err := db.DeviceCommand(resp.UDID)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"get device command from queue, udid: %s\", resp.UDID)\n\t}\n\n\tvar cmd *Command\n\tswitch resp.Status {\n\tcase \"NotNow\":\n\t\t\/\/ move down, send next\n\t\tx, a := cut(dc.Commands, resp.CommandUUID)\n\t\tdc.Commands = a\n\t\tif x == nil {\n\t\t\tbreak\n\t\t}\n\t\tdc.Commands = append(dc.Commands, *x)\n\n\tcase \"Acknowledged\":\n\t\t\/\/ move to completed, send next\n\t\tx, a := cut(dc.Commands, resp.CommandUUID)\n\t\tdc.Commands = a\n\t\tif x == nil {\n\t\t\tbreak\n\t\t}\n\t\tdc.Completed = append(dc.Completed, *x)\n\tcase \"Error\":\n\t\t\/\/ move to failed, send next\n\t\tx, a := cut(dc.Commands, resp.CommandUUID)\n\t\tdc.Commands = a\n\t\tif x == nil { \/\/ must've already bin ackd\n\t\t\tbreak\n\t\t}\n\t\tdc.Failed = append(dc.Failed, *x)\n\n\tcase \"CommandFormatError\":\n\t\t\/\/ move to failed\n\t\tx, a := cut(dc.Commands, resp.CommandUUID)\n\t\tdc.Commands = a\n\t\tif x == nil {\n\t\t\tbreak\n\t\t}\n\t\tdc.Failed = append(dc.Failed, *x)\n\n\tcase \"Idle\":\n\t\t\/\/ will send next command below\n\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown response status: %s\", resp.Status)\n\t}\n\n\t\/\/ pop the first command from the queue and add it to the end.\n\tcmd, dc.Commands = popFirst(dc.Commands)\n\tif cmd != nil {\n\t\tdc.Commands = append(dc.Commands, *cmd)\n\t}\n\n\tif cmd.UUID == resp.CommandUUID && resp.Status == \"NotNow\" {\n\t\t\/\/ This command was just handled by NotNow, ignore.\n\t\tcmd = nil\n\t}\n\n\tif err := db.Save(dc); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cmd, nil\n}\n\nfunc popFirst(all []Command) (*Command, []Command) {\n\tif len(all) == 0 {\n\t\treturn nil, all\n\t}\n\tfirst := all[0]\n\tall = append(all[:0], all[1:]...)\n\treturn &first, all\n}\n\nfunc cut(all []Command, uuid string) (*Command, []Command) {\n\tfor i, cmd := range all {\n\t\tif cmd.UUID == uuid {\n\t\t\tall = append(all[:i], all[i+1:]...)\n\t\t\treturn &cmd, all\n\t\t}\n\t}\n\treturn nil, all\n}\n\nfunc NewQueue(db *bolt.DB, sub pubsub.Subscriber) (*Store, error) {\n\terr := db.Update(func(tx *bolt.Tx) error {\n\t\t_, err := tx.CreateBucketIfNotExists([]byte(DeviceCommandBucket))\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"creating %s bucket\", DeviceCommandBucket)\n\t}\n\tdatastore := &Store{DB: db}\n\tif err := datastore.pollCommands(sub); err != nil {\n\t\treturn nil, err\n\t}\n\treturn datastore, nil\n}\n\nfunc (db *Store) Save(cmd *DeviceCommand) error {\n\ttx, err := db.DB.Begin(true)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"begin transaction\")\n\t}\n\tbkt := tx.Bucket([]byte(DeviceCommandBucket))\n\tif bkt == nil {\n\t\treturn fmt.Errorf(\"bucket %q not found!\", DeviceCommandBucket)\n\t}\n\tdevproto, err := MarshalDeviceCommand(cmd)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"marshalling DeviceCommand\")\n\t}\n\tkey := []byte(cmd.DeviceUDID)\n\tif err := bkt.Put(key, devproto); err != nil {\n\t\treturn errors.Wrap(err, \"put DeviceCommand to boltdb\")\n\t}\n\treturn tx.Commit()\n}\n\nfunc (db *Store) DeviceCommand(udid string) (*DeviceCommand, error) {\n\tvar dev DeviceCommand\n\terr := db.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(DeviceCommandBucket))\n\t\tv := b.Get([]byte(udid))\n\t\tif v == nil {\n\t\t\treturn ¬Found{\"DeviceCommand\", fmt.Sprintf(\"udid %s\", udid)}\n\t\t}\n\t\treturn UnmarshalDeviceCommand(v, &dev)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &dev, nil\n}\n\ntype notFound struct {\n\tResourceType string\n\tMessage string\n}\n\nfunc (e *notFound) Error() string {\n\treturn fmt.Sprintf(\"not found: %s %s\", e.ResourceType, e.Message)\n}\n\nfunc (db *Store) pollCommands(sub pubsub.Subscriber) error {\n\tcommandEvents, err := sub.Subscribe(\"command-queue\", command.CommandTopic)\n\tif err != nil {\n\t\treturn errors.Wrapf(err,\n\t\t\t\"subscribing push to %s topic\", command.CommandTopic)\n\t}\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event := <-commandEvents:\n\t\t\t\tvar ev command.Event\n\t\t\t\tif err := command.UnmarshalEvent(event.Message, &ev); err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tcmd := new(DeviceCommand)\n\t\t\t\tcmd.DeviceUDID = ev.DeviceUDID\n\t\t\t\tbyUDID, err := db.DeviceCommand(ev.DeviceUDID)\n\t\t\t\tif err == nil && byUDID != nil {\n\t\t\t\t\tcmd = byUDID\n\t\t\t\t}\n\t\t\t\tnewPayload, err := plist.Marshal(&ev.Payload)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tnewCmd := Command{\n\t\t\t\t\tUUID: ev.Payload.CommandUUID,\n\t\t\t\t\tPayload: newPayload,\n\t\t\t\t}\n\t\t\t\tcmd.Commands = append(cmd.Commands, newCmd)\n\t\t\t\tif err := db.Save(cmd); err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"queued event for device: %s\\n\", ev.DeviceUDID)\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc isNotFound(err error) bool {\n\tif _, ok := err.(*notFound); ok {\n\t\treturn true\n\t}\n\treturn false\n}\n<commit_msg>Fix panic where popFirst() can return nil (but we were immediately using it)<commit_after>\/\/ Package queue implements a boldDB backed queue for MDM Commands.\npackage queue\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com\/boltdb\/bolt\"\n\t\"github.com\/groob\/plist\"\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/micromdm\/mdm\"\n\t\"github.com\/micromdm\/micromdm\/command\"\n\t\"github.com\/micromdm\/micromdm\/pubsub\"\n)\n\nconst (\n\tDeviceCommandBucket = \"mdm.DeviceCommands\"\n)\n\ntype Store struct {\n\t*bolt.DB\n}\n\nfunc (db *Store) Next(ctx context.Context, resp mdm.Response) (*Command, error) {\n\tdc, err := db.DeviceCommand(resp.UDID)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"get device command from queue, udid: %s\", resp.UDID)\n\t}\n\n\tvar cmd *Command\n\tswitch resp.Status {\n\tcase \"NotNow\":\n\t\t\/\/ move down, send next\n\t\tx, a := cut(dc.Commands, resp.CommandUUID)\n\t\tdc.Commands = a\n\t\tif x == nil {\n\t\t\tbreak\n\t\t}\n\t\tdc.Commands = append(dc.Commands, *x)\n\n\tcase \"Acknowledged\":\n\t\t\/\/ move to completed, send next\n\t\tx, a := cut(dc.Commands, resp.CommandUUID)\n\t\tdc.Commands = a\n\t\tif x == nil {\n\t\t\tbreak\n\t\t}\n\t\tdc.Completed = append(dc.Completed, *x)\n\tcase \"Error\":\n\t\t\/\/ move to failed, send next\n\t\tx, a := cut(dc.Commands, resp.CommandUUID)\n\t\tdc.Commands = a\n\t\tif x == nil { \/\/ must've already bin ackd\n\t\t\tbreak\n\t\t}\n\t\tdc.Failed = append(dc.Failed, *x)\n\n\tcase \"CommandFormatError\":\n\t\t\/\/ move to failed\n\t\tx, a := cut(dc.Commands, resp.CommandUUID)\n\t\tdc.Commands = a\n\t\tif x == nil {\n\t\t\tbreak\n\t\t}\n\t\tdc.Failed = append(dc.Failed, *x)\n\n\tcase \"Idle\":\n\t\t\/\/ will send next command below\n\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown response status: %s\", resp.Status)\n\t}\n\n\t\/\/ pop the first command from the queue and add it to the end.\n\tcmd, dc.Commands = popFirst(dc.Commands)\n\tif cmd != nil {\n\t\tdc.Commands = append(dc.Commands, *cmd)\n\n\t\tif cmd.UUID == resp.CommandUUID && resp.Status == \"NotNow\" {\n\t\t\t\/\/ This command was just handled by NotNow, ignore.\n\t\t\tcmd = nil\n\t\t}\n\t}\n\n\tif err := db.Save(dc); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cmd, nil\n}\n\nfunc popFirst(all []Command) (*Command, []Command) {\n\tif len(all) == 0 {\n\t\treturn nil, all\n\t}\n\tfirst := all[0]\n\tall = append(all[:0], all[1:]...)\n\treturn &first, all\n}\n\nfunc cut(all []Command, uuid string) (*Command, []Command) {\n\tfor i, cmd := range all {\n\t\tif cmd.UUID == uuid {\n\t\t\tall = append(all[:i], all[i+1:]...)\n\t\t\treturn &cmd, all\n\t\t}\n\t}\n\treturn nil, all\n}\n\nfunc NewQueue(db *bolt.DB, sub pubsub.Subscriber) (*Store, error) {\n\terr := db.Update(func(tx *bolt.Tx) error {\n\t\t_, err := tx.CreateBucketIfNotExists([]byte(DeviceCommandBucket))\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"creating %s bucket\", DeviceCommandBucket)\n\t}\n\tdatastore := &Store{DB: db}\n\tif err := datastore.pollCommands(sub); err != nil {\n\t\treturn nil, err\n\t}\n\treturn datastore, nil\n}\n\nfunc (db *Store) Save(cmd *DeviceCommand) error {\n\ttx, err := db.DB.Begin(true)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"begin transaction\")\n\t}\n\tbkt := tx.Bucket([]byte(DeviceCommandBucket))\n\tif bkt == nil {\n\t\treturn fmt.Errorf(\"bucket %q not found!\", DeviceCommandBucket)\n\t}\n\tdevproto, err := MarshalDeviceCommand(cmd)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"marshalling DeviceCommand\")\n\t}\n\tkey := []byte(cmd.DeviceUDID)\n\tif err := bkt.Put(key, devproto); err != nil {\n\t\treturn errors.Wrap(err, \"put DeviceCommand to boltdb\")\n\t}\n\treturn tx.Commit()\n}\n\nfunc (db *Store) DeviceCommand(udid string) (*DeviceCommand, error) {\n\tvar dev DeviceCommand\n\terr := db.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(DeviceCommandBucket))\n\t\tv := b.Get([]byte(udid))\n\t\tif v == nil {\n\t\t\treturn ¬Found{\"DeviceCommand\", fmt.Sprintf(\"udid %s\", udid)}\n\t\t}\n\t\treturn UnmarshalDeviceCommand(v, &dev)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &dev, nil\n}\n\ntype notFound struct {\n\tResourceType string\n\tMessage string\n}\n\nfunc (e *notFound) Error() string {\n\treturn fmt.Sprintf(\"not found: %s %s\", e.ResourceType, e.Message)\n}\n\nfunc (db *Store) pollCommands(sub pubsub.Subscriber) error {\n\tcommandEvents, err := sub.Subscribe(\"command-queue\", command.CommandTopic)\n\tif err != nil {\n\t\treturn errors.Wrapf(err,\n\t\t\t\"subscribing push to %s topic\", command.CommandTopic)\n\t}\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase event := <-commandEvents:\n\t\t\t\tvar ev command.Event\n\t\t\t\tif err := command.UnmarshalEvent(event.Message, &ev); err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tcmd := new(DeviceCommand)\n\t\t\t\tcmd.DeviceUDID = ev.DeviceUDID\n\t\t\t\tbyUDID, err := db.DeviceCommand(ev.DeviceUDID)\n\t\t\t\tif err == nil && byUDID != nil {\n\t\t\t\t\tcmd = byUDID\n\t\t\t\t}\n\t\t\t\tnewPayload, err := plist.Marshal(&ev.Payload)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tnewCmd := Command{\n\t\t\t\t\tUUID: ev.Payload.CommandUUID,\n\t\t\t\t\tPayload: newPayload,\n\t\t\t\t}\n\t\t\t\tcmd.Commands = append(cmd.Commands, newCmd)\n\t\t\t\tif err := db.Save(cmd); err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"queued event for device: %s\\n\", ev.DeviceUDID)\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nil\n}\n\nfunc isNotFound(err error) bool {\n\tif _, ok := err.(*notFound); ok {\n\t\treturn true\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage test\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"time\"\n\n\t\"v.io\/jiri\/collect\"\n\t\"v.io\/jiri\/project\"\n\t\"v.io\/jiri\/retry\"\n\t\"v.io\/jiri\/tool\"\n\t\"v.io\/x\/devtools\/internal\/test\"\n\t\"v.io\/x\/devtools\/internal\/xunit\"\n)\n\n\/\/ generateXUnitTestSuite generates an xUnit test suite that\n\/\/ encapsulates the given input.\nfunc generateXUnitTestSuite(ctx *tool.Context, failure *xunit.Failure, pkg string, duration time.Duration) *xunit.TestSuite {\n\t\/\/ Generate an xUnit test suite describing the result.\n\ts := xunit.TestSuite{Name: pkg}\n\tc := xunit.TestCase{\n\t\tClassname: pkg,\n\t\tName: \"Test\",\n\t\tTime: fmt.Sprintf(\"%.2f\", duration.Seconds()),\n\t}\n\tif failure != nil {\n\t\tfmt.Fprintf(ctx.Stdout(), \"%s ... failed\\n%v\\n\", pkg, failure.Data)\n\t\tc.Failures = append(c.Failures, *failure)\n\t\ts.Failures++\n\t} else {\n\t\tfmt.Fprintf(ctx.Stdout(), \"%s ... ok\\n\", pkg)\n\t}\n\ts.Tests++\n\ts.Cases = append(s.Cases, c)\n\treturn &s\n}\n\n\/\/ testSingleProdService test the given production service.\nfunc testSingleProdService(ctx *tool.Context, vroot, principalDir string, service prodService) *xunit.TestSuite {\n\tbin := filepath.Join(vroot, \"release\", \"go\", \"bin\", \"vrpc\")\n\tvar out bytes.Buffer\n\topts := ctx.Run().Opts()\n\topts.Stdout = &out\n\topts.Stderr = &out\n\tstart := time.Now()\n\targs := []string{}\n\tif principalDir != \"\" {\n\t\targs = append(args, \"--v23.credentials\", principalDir)\n\t}\n\targs = append(args, \"signature\", \"--show-reserved\")\n\tif principalDir == \"\" {\n\t\targs = append(args, \"--insecure\")\n\t}\n\targs = append(args, service.objectName)\n\tif err := ctx.Run().TimedCommandWithOpts(test.DefaultTimeout, opts, bin, args...); err != nil {\n\t\treturn generateXUnitTestSuite(ctx, &xunit.Failure{Message: \"vrpc\", Data: out.String()}, service.name, time.Now().Sub(start))\n\t}\n\tif !service.regexp.Match(out.Bytes()) {\n\t\tfmt.Fprintf(ctx.Stderr(), \"couldn't match regexp %q in output:\\n%v\\n\", service.regexp, out.String())\n\t\treturn generateXUnitTestSuite(ctx, &xunit.Failure{Message: \"vrpc\", Data: \"mismatching signature\"}, service.name, time.Now().Sub(start))\n\t}\n\treturn generateXUnitTestSuite(ctx, nil, service.name, time.Now().Sub(start))\n}\n\ntype prodService struct {\n\tname string \/\/ Name to use for the test description\n\tobjectName string \/\/ Object name of the service to connect to\n\tregexp *regexp.Regexp \/\/ Regexp that should match the signature output\n}\n\n\/\/ vanadiumProdServicesTest runs a test of vanadium production services.\nfunc vanadiumProdServicesTest(ctx *tool.Context, testName string, opts ...Opt) (_ *test.Result, e error) {\n\t\/\/ Initialize the test.\n\t\/\/ Need the new-stype base profile since many web tests will build\n\t\/\/ go apps that need it.\n\tcleanup, err := initTest(ctx, testName, []string{\"base\"})\n\tif err != nil {\n\t\treturn nil, internalTestError{err, \"Init\"}\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\tvroot, err := project.JiriRoot()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Install the vrpc tool.\n\tif err := ctx.Run().Command(\"jiri\", \"go\", \"install\", \"v.io\/x\/ref\/cmd\/vrpc\"); err != nil {\n\t\treturn nil, internalTestError{err, \"Install VRPC\"}\n\t}\n\t\/\/ Install the principal tool.\n\tif err := ctx.Run().Command(\"jiri\", \"go\", \"install\", \"v.io\/x\/ref\/cmd\/principal\"); err != nil {\n\t\treturn nil, internalTestError{err, \"Install Principal\"}\n\t}\n\ttmpdir, err := ctx.Run().TempDir(\"\", \"prod-services-test\")\n\tif err != nil {\n\t\treturn nil, internalTestError{err, \"Create temporary directory\"}\n\t}\n\tdefer collect.Error(func() error { return ctx.Run().RemoveAll(tmpdir) }, &e)\n\n\tblessingRoot, namespaceRoot := getServiceOpts(opts)\n\tallPassed, suites := true, []xunit.TestSuite{}\n\n\t\/\/ Fetch the \"root\" blessing that all services are blessed by.\n\tsuite, pubkey, blessingNames := testIdentityProviderHTTP(ctx, blessingRoot)\n\tsuites = append(suites, *suite)\n\n\tif suite.Failures == 0 {\n\t\t\/\/ Setup a principal that will be used by testAllProdServices and will\n\t\t\/\/ recognize the blessings of the prod services.\n\t\tprincipalDir, err := setupPrincipal(ctx, vroot, tmpdir, pubkey, blessingNames)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, suite := range testAllProdServices(ctx, vroot, principalDir, namespaceRoot) {\n\t\t\tallPassed = allPassed && (suite.Failures == 0)\n\t\t\tsuites = append(suites, *suite)\n\t\t}\n\t}\n\n\t\/\/ Create the xUnit report.\n\tif err := xunit.CreateReport(ctx, testName, suites); err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, suite := range suites {\n\t\tif suite.Failures > 0 {\n\t\t\t\/\/ At least one test failed:\n\t\t\treturn &test.Result{Status: test.Failed}, nil\n\t\t}\n\t}\n\treturn &test.Result{Status: test.Passed}, nil\n}\n\nfunc testAllProdServices(ctx *tool.Context, vroot, principalDir, namespaceRoot string) []*xunit.TestSuite {\n\tservices := []prodService{\n\t\tprodService{\n\t\t\tname: \"mounttable\",\n\t\t\tobjectName: namespaceRoot,\n\t\t\tregexp: regexp.MustCompile(`MountTable[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tname: \"application repository\",\n\t\t\tobjectName: namespaceRoot + \"\/applications\",\n\t\t\tregexp: regexp.MustCompile(`Application[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tname: \"binary repository\",\n\t\t\tobjectName: namespaceRoot + \"\/binaries\",\n\t\t\tregexp: regexp.MustCompile(`Binary[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tname: \"macaroon service\",\n\t\t\tobjectName: namespaceRoot + \"\/identity\/dev.v.io\/u\/macaroon\",\n\t\t\tregexp: regexp.MustCompile(`MacaroonBlesser[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tname: \"google identity service\",\n\t\t\tobjectName: namespaceRoot + \"\/identity\/dev.v.io\/u\/google\",\n\t\t\tregexp: regexp.MustCompile(`OAuthBlesser[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tobjectName: namespaceRoot + \"\/identity\/dev.v.io\/u\/discharger\",\n\t\t\tname: \"binary discharger\",\n\t\t\tregexp: regexp.MustCompile(`Discharger[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tobjectName: namespaceRoot + \"\/proxy-mon\/__debug\",\n\t\t\tname: \"proxy service\",\n\t\t\t\/\/ We just check that the returned signature has the __Reserved interface since\n\t\t\t\/\/ proxy-mon doesn't implement any other services.\n\t\t\tregexp: regexp.MustCompile(`__Reserved[[:space:]]+interface`),\n\t\t},\n\t}\n\n\tvar suites []*xunit.TestSuite\n\tfor _, service := range services {\n\t\tsuites = append(suites, testSingleProdService(ctx, vroot, principalDir, service))\n\t}\n\treturn suites\n}\n\n\/\/ testIdentityProviderHTTP tests that the identity provider's HTTP server is\n\/\/ up and running and also fetches the set of blessing names that the provider\n\/\/ claims to be authoritative on and the public key (encoded) used by that\n\/\/ identity provider to sign certificates for blessings.\n\/\/\n\/\/ PARANOIA ALERT:\n\/\/ This function is subject to man-in-the-middle attacks because it does not\n\/\/ verify the TLS certificates presented by the server. This does open the\n\/\/ door for an attack where a parallel universe of services could be setup\n\/\/ and fool this production services test into thinking all services are\n\/\/ up and running when they may not be.\n\/\/\n\/\/ The attacker in this case will have to be able to mess with the routing\n\/\/ tables on the machine running this test, or the network routes of routers\n\/\/ used by the machine, or mess up DNS entries.\nfunc testIdentityProviderHTTP(ctx *tool.Context, blessingRoot string) (suite *xunit.TestSuite, publickey string, blessingNames []string) {\n\turl := fmt.Sprintf(\"https:\/\/%s\/auth\/blessing-root\", blessingRoot)\n\tvar response struct {\n\t\tNames []string `json:\"names\"`\n\t\tPublicKey string `json:\"publicKey\"`\n\t}\n\tvar resp *http.Response\n\tvar err error\n\tvar start time.Time\n\tfn := func() error {\n\t\tstart = time.Now()\n\t\tresp, err = http.Get(url)\n\t\treturn err\n\t}\n\tif err = retry.Function(ctx, fn); err == nil {\n\t\tdefer resp.Body.Close()\n\t\terr = json.NewDecoder(resp.Body).Decode(&response)\n\t}\n\tvar failure *xunit.Failure\n\tif err != nil {\n\t\tfailure = &xunit.Failure{Message: \"identityd HTTP\", Data: err.Error()}\n\t}\n\treturn generateXUnitTestSuite(ctx, failure, url, time.Now().Sub(start)), response.PublicKey, response.Names\n}\n\nfunc setupPrincipal(ctx *tool.Context, vroot, tmpdir, pubkey string, blessingNames []string) (string, error) {\n\tdir := filepath.Join(tmpdir, \"credentials\")\n\tbin := filepath.Join(vroot, \"release\", \"go\", \"bin\", \"principal\")\n\tif err := ctx.Run().TimedCommand(test.DefaultTimeout, bin, \"create\", dir, \"prod-services-tester\"); err != nil {\n\t\tfmt.Fprintf(ctx.Stderr(), \"principal create failed: %v\\n\", err)\n\t\treturn \"\", err\n\t}\n\tfor _, name := range blessingNames {\n\t\tif err := ctx.Run().TimedCommand(test.DefaultTimeout, bin, \"--v23.credentials\", dir, \"recognize\", name, pubkey); err != nil {\n\t\t\tfmt.Fprintf(ctx.Stderr(), \"principal recognize %v %v failed: %v\\n\", name, pubkey, err)\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn dir, nil\n}\n\n\/\/ getServiceOpts extracts blessing root and namespace root from the\n\/\/ given Opts.\nfunc getServiceOpts(opts []Opt) (string, string) {\n\tblessingRoot := \"dev.v.io\"\n\tnamespaceRoot := \"\/ns.dev.v.io:8101\"\n\tfor _, opt := range opts {\n\t\tswitch v := opt.(type) {\n\t\tcase BlessingsRootOpt:\n\t\t\tblessingRoot = string(v)\n\t\tcase NamespaceRootOpt:\n\t\t\tnamespaceRoot = string(v)\n\t\t}\n\t}\n\treturn blessingRoot, namespaceRoot\n}\n<commit_msg>jiri-test\/internal\/test\/prod.go: print more info about failed commands<commit_after>\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage test\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"time\"\n\n\t\"v.io\/jiri\/collect\"\n\t\"v.io\/jiri\/project\"\n\t\"v.io\/jiri\/retry\"\n\t\"v.io\/jiri\/tool\"\n\t\"v.io\/x\/devtools\/internal\/test\"\n\t\"v.io\/x\/devtools\/internal\/xunit\"\n)\n\n\/\/ generateXUnitTestSuite generates an xUnit test suite that\n\/\/ encapsulates the given input.\nfunc generateXUnitTestSuite(ctx *tool.Context, failure *xunit.Failure, pkg string, duration time.Duration) *xunit.TestSuite {\n\t\/\/ Generate an xUnit test suite describing the result.\n\ts := xunit.TestSuite{Name: pkg}\n\tc := xunit.TestCase{\n\t\tClassname: pkg,\n\t\tName: \"Test\",\n\t\tTime: fmt.Sprintf(\"%.2f\", duration.Seconds()),\n\t}\n\tif failure != nil {\n\t\tfmt.Fprintf(ctx.Stdout(), \"%s ... failed\\n%v\\n\", pkg, failure.Data)\n\t\tc.Failures = append(c.Failures, *failure)\n\t\ts.Failures++\n\t} else {\n\t\tfmt.Fprintf(ctx.Stdout(), \"%s ... ok\\n\", pkg)\n\t}\n\ts.Tests++\n\ts.Cases = append(s.Cases, c)\n\treturn &s\n}\n\n\/\/ testSingleProdService test the given production service.\nfunc testSingleProdService(ctx *tool.Context, vroot, principalDir string, service prodService) *xunit.TestSuite {\n\tbin := filepath.Join(vroot, \"release\", \"go\", \"bin\", \"vrpc\")\n\tvar out bytes.Buffer\n\topts := ctx.Run().Opts()\n\topts.Verbose = true\n\topts.Stdout = &out\n\topts.Stderr = &out\n\tstart := time.Now()\n\targs := []string{}\n\tif principalDir != \"\" {\n\t\targs = append(args, \"--v23.credentials\", principalDir)\n\t}\n\targs = append(args, \"signature\", \"--show-reserved\")\n\tif principalDir == \"\" {\n\t\targs = append(args, \"--insecure\")\n\t}\n\targs = append(args, service.objectName)\n\tif err := ctx.Run().TimedCommandWithOpts(test.DefaultTimeout, opts, bin, args...); err != nil {\n\t\tfmt.Fprintf(ctx.Stderr(), \"Failed running %q: %v. Output:\\n%v\\n\", append([]string{bin}, args...), err, out.String())\n\t\treturn generateXUnitTestSuite(ctx, &xunit.Failure{Message: \"vrpc\", Data: out.String()}, service.name, time.Now().Sub(start))\n\t}\n\tif !service.regexp.Match(out.Bytes()) {\n\t\tfmt.Fprintf(ctx.Stderr(), \"couldn't match regexp %q in output:\\n%v\\n\", service.regexp, out.String())\n\t\treturn generateXUnitTestSuite(ctx, &xunit.Failure{Message: \"vrpc\", Data: \"mismatching signature\"}, service.name, time.Now().Sub(start))\n\t}\n\treturn generateXUnitTestSuite(ctx, nil, service.name, time.Now().Sub(start))\n}\n\ntype prodService struct {\n\tname string \/\/ Name to use for the test description\n\tobjectName string \/\/ Object name of the service to connect to\n\tregexp *regexp.Regexp \/\/ Regexp that should match the signature output\n}\n\n\/\/ vanadiumProdServicesTest runs a test of vanadium production services.\nfunc vanadiumProdServicesTest(ctx *tool.Context, testName string, opts ...Opt) (_ *test.Result, e error) {\n\t\/\/ Initialize the test.\n\t\/\/ Need the new-stype base profile since many web tests will build\n\t\/\/ go apps that need it.\n\tcleanup, err := initTest(ctx, testName, []string{\"base\"})\n\tif err != nil {\n\t\treturn nil, internalTestError{err, \"Init\"}\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\tvroot, err := project.JiriRoot()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Install the vrpc tool.\n\tif err := ctx.Run().Command(\"jiri\", \"go\", \"install\", \"v.io\/x\/ref\/cmd\/vrpc\"); err != nil {\n\t\treturn nil, internalTestError{err, \"Install VRPC\"}\n\t}\n\t\/\/ Install the principal tool.\n\tif err := ctx.Run().Command(\"jiri\", \"go\", \"install\", \"v.io\/x\/ref\/cmd\/principal\"); err != nil {\n\t\treturn nil, internalTestError{err, \"Install Principal\"}\n\t}\n\ttmpdir, err := ctx.Run().TempDir(\"\", \"prod-services-test\")\n\tif err != nil {\n\t\treturn nil, internalTestError{err, \"Create temporary directory\"}\n\t}\n\tdefer collect.Error(func() error { return ctx.Run().RemoveAll(tmpdir) }, &e)\n\n\tblessingRoot, namespaceRoot := getServiceOpts(opts)\n\tallPassed, suites := true, []xunit.TestSuite{}\n\n\t\/\/ Fetch the \"root\" blessing that all services are blessed by.\n\tsuite, pubkey, blessingNames := testIdentityProviderHTTP(ctx, blessingRoot)\n\tsuites = append(suites, *suite)\n\n\tif suite.Failures == 0 {\n\t\t\/\/ Setup a principal that will be used by testAllProdServices and will\n\t\t\/\/ recognize the blessings of the prod services.\n\t\tprincipalDir, err := setupPrincipal(ctx, vroot, tmpdir, pubkey, blessingNames)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, suite := range testAllProdServices(ctx, vroot, principalDir, namespaceRoot) {\n\t\t\tallPassed = allPassed && (suite.Failures == 0)\n\t\t\tsuites = append(suites, *suite)\n\t\t}\n\t}\n\n\t\/\/ Create the xUnit report.\n\tif err := xunit.CreateReport(ctx, testName, suites); err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, suite := range suites {\n\t\tif suite.Failures > 0 {\n\t\t\t\/\/ At least one test failed:\n\t\t\treturn &test.Result{Status: test.Failed}, nil\n\t\t}\n\t}\n\treturn &test.Result{Status: test.Passed}, nil\n}\n\nfunc testAllProdServices(ctx *tool.Context, vroot, principalDir, namespaceRoot string) []*xunit.TestSuite {\n\tservices := []prodService{\n\t\tprodService{\n\t\t\tname: \"mounttable\",\n\t\t\tobjectName: namespaceRoot,\n\t\t\tregexp: regexp.MustCompile(`MountTable[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tname: \"application repository\",\n\t\t\tobjectName: namespaceRoot + \"\/applications\",\n\t\t\tregexp: regexp.MustCompile(`Application[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tname: \"binary repository\",\n\t\t\tobjectName: namespaceRoot + \"\/binaries\",\n\t\t\tregexp: regexp.MustCompile(`Binary[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tname: \"macaroon service\",\n\t\t\tobjectName: namespaceRoot + \"\/identity\/dev.v.io\/u\/macaroon\",\n\t\t\tregexp: regexp.MustCompile(`MacaroonBlesser[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tname: \"google identity service\",\n\t\t\tobjectName: namespaceRoot + \"\/identity\/dev.v.io\/u\/google\",\n\t\t\tregexp: regexp.MustCompile(`OAuthBlesser[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tobjectName: namespaceRoot + \"\/identity\/dev.v.io\/u\/discharger\",\n\t\t\tname: \"binary discharger\",\n\t\t\tregexp: regexp.MustCompile(`Discharger[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tobjectName: namespaceRoot + \"\/proxy-mon\/__debug\",\n\t\t\tname: \"proxy service\",\n\t\t\t\/\/ We just check that the returned signature has the __Reserved interface since\n\t\t\t\/\/ proxy-mon doesn't implement any other services.\n\t\t\tregexp: regexp.MustCompile(`__Reserved[[:space:]]+interface`),\n\t\t},\n\t}\n\n\tvar suites []*xunit.TestSuite\n\tfor _, service := range services {\n\t\tsuites = append(suites, testSingleProdService(ctx, vroot, principalDir, service))\n\t}\n\treturn suites\n}\n\n\/\/ testIdentityProviderHTTP tests that the identity provider's HTTP server is\n\/\/ up and running and also fetches the set of blessing names that the provider\n\/\/ claims to be authoritative on and the public key (encoded) used by that\n\/\/ identity provider to sign certificates for blessings.\n\/\/\n\/\/ PARANOIA ALERT:\n\/\/ This function is subject to man-in-the-middle attacks because it does not\n\/\/ verify the TLS certificates presented by the server. This does open the\n\/\/ door for an attack where a parallel universe of services could be setup\n\/\/ and fool this production services test into thinking all services are\n\/\/ up and running when they may not be.\n\/\/\n\/\/ The attacker in this case will have to be able to mess with the routing\n\/\/ tables on the machine running this test, or the network routes of routers\n\/\/ used by the machine, or mess up DNS entries.\nfunc testIdentityProviderHTTP(ctx *tool.Context, blessingRoot string) (suite *xunit.TestSuite, publickey string, blessingNames []string) {\n\turl := fmt.Sprintf(\"https:\/\/%s\/auth\/blessing-root\", blessingRoot)\n\tvar response struct {\n\t\tNames []string `json:\"names\"`\n\t\tPublicKey string `json:\"publicKey\"`\n\t}\n\tvar resp *http.Response\n\tvar err error\n\tvar start time.Time\n\tfn := func() error {\n\t\tstart = time.Now()\n\t\tresp, err = http.Get(url)\n\t\treturn err\n\t}\n\tif err = retry.Function(ctx, fn); err == nil {\n\t\tdefer resp.Body.Close()\n\t\terr = json.NewDecoder(resp.Body).Decode(&response)\n\t}\n\tvar failure *xunit.Failure\n\tif err != nil {\n\t\tfailure = &xunit.Failure{Message: \"identityd HTTP\", Data: err.Error()}\n\t}\n\treturn generateXUnitTestSuite(ctx, failure, url, time.Now().Sub(start)), response.PublicKey, response.Names\n}\n\nfunc setupPrincipal(ctx *tool.Context, vroot, tmpdir, pubkey string, blessingNames []string) (string, error) {\n\tdir := filepath.Join(tmpdir, \"credentials\")\n\tbin := filepath.Join(vroot, \"release\", \"go\", \"bin\", \"principal\")\n\tif err := ctx.Run().TimedCommand(test.DefaultTimeout, bin, \"create\", dir, \"prod-services-tester\"); err != nil {\n\t\tfmt.Fprintf(ctx.Stderr(), \"principal create failed: %v\\n\", err)\n\t\treturn \"\", err\n\t}\n\tfor _, name := range blessingNames {\n\t\tif err := ctx.Run().TimedCommand(test.DefaultTimeout, bin, \"--v23.credentials\", dir, \"recognize\", name, pubkey); err != nil {\n\t\t\tfmt.Fprintf(ctx.Stderr(), \"principal recognize %v %v failed: %v\\n\", name, pubkey, err)\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn dir, nil\n}\n\n\/\/ getServiceOpts extracts blessing root and namespace root from the\n\/\/ given Opts.\nfunc getServiceOpts(opts []Opt) (string, string) {\n\tblessingRoot := \"dev.v.io\"\n\tnamespaceRoot := \"\/ns.dev.v.io:8101\"\n\tfor _, opt := range opts {\n\t\tswitch v := opt.(type) {\n\t\tcase BlessingsRootOpt:\n\t\t\tblessingRoot = string(v)\n\t\tcase NamespaceRootOpt:\n\t\t\tnamespaceRoot = string(v)\n\t\t}\n\t}\n\treturn blessingRoot, namespaceRoot\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Copyright (c) 2014 Marco Peereboom <marco@peereboom.us>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n *\/\n\npackage queueb\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"testing\"\n)\n\nconst (\n\tAudio = \"audio\"\n\tNetwork = \"network\"\n)\n\nvar (\n\tq *Queueb\n)\n\nfunc TestQueueb(t *testing.T) {\n\tvar err error\n\tq, err = New(\"myqueueb\", 10)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\terr = q.Register(\"subsystem\", 10)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\t\/\/ should fail\n\terr = q.Register(\"subsystem\", 10)\n\tif err == nil {\n\t\tt.Error(\"duplicate QueuebChannelPair not detected\")\n\t\treturn\n\t}\n\n\tq.Unregister(\"subsystem\")\n\tif q.Len() != 0 {\n\t\tt.Error(\"invalid QueuebChannelPair count\")\n\t\treturn\n\t}\n\n\t\/\/ should fail\n\tq.Unregister(\"subsystem\")\n\tif err == nil {\n\t\tt.Error(\"QueuebChannelPair should not exist\")\n\t\treturn\n\t}\n}\n\nfunc TestQueuebMessage(t *testing.T) {\n\terr := q.Register(Audio, 10)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\terr = q.Register(Network, 20)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\terr = q.Send(Audio, []string{Network}, \"Hello world!\")\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tm, err := q.Receive(Network)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tif m.Error() != nil {\n\t\tt.Error(\"unexpected error message\")\n\t\treturn\n\t}\n\n\tif m.Message.(string) != \"Hello world!\" {\n\t\tt.Error(\"invalid message\")\n\t\treturn\n\t}\n\n\t\/\/ make sure we don't crash\n\tq.Unregister(Audio)\n\terr = q.Send(Audio, []string{Network}, \"Hello world!\")\n\t\/\/ should fail\n\tif err == nil {\n\t\tt.Error(\"queue should have been deleted\")\n\t\treturn\n\t}\n\t_, err = q.Receive(Audio)\n\t\/\/ should fail\n\tif err == nil {\n\t\tt.Error(\"receive should have failed\")\n\t\treturn\n\t}\n\n\terr = q.Send(Network, []string{Audio}, \"Hello world!\")\n\tif err != nil {\n\t\tt.Error(\"source queue should exist\")\n\t\treturn\n\t}\n\tm, err = q.Receive(Network)\n\t\/\/ get error\n\tif err != nil {\n\t\tt.Error(\"receive error failed\")\n\t\treturn\n\t}\n\n\t\/\/ should fail\n\tif m.Error() == nil {\n\t\tt.Error(\"not an error message\")\n\t\treturn\n\t}\n\n\tmm, ok := m.Message.(*QueuebMessageError)\n\tif !ok {\n\t\tt.Error(\"could not type assert m\")\n\t\treturn\n\t}\n\tt.Log(mm.Error)\n}\n\nfunc TestQueuebPrioQueueSamePrio(t *testing.T) {\n\terr := q.Register(Audio, 10)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tfor i := 0; i < 15; i++ {\n\t\terr := q.Send(Audio, []string{Network}, fmt.Sprintf(\"%v\", i))\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\tfor i := 0; i < 15; i++ {\n\t\tm, err := q.Receive(Network)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif m.Error() != nil {\n\t\t\tt.Error(\"unexpected error message\")\n\t\t\treturn\n\t\t}\n\t\tt.Log(\"got %v expected %v\", m.Message, fmt.Sprintf(\"%v\", i))\n\t\tif m.Message != fmt.Sprintf(\"%v\", i) {\n\t\t\tt.Error(\"out of order\", m.Message, fmt.Sprintf(\"%v\", i))\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc TestQueuebPrioQueueDifferentPrio(t *testing.T) {\n\tfor i := 0; i < 15; i++ {\n\t\terr := q.Send(Audio, []string{Network}, fmt.Sprintf(\"%v\", i))\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\tfor i := 15; i < 30; i++ {\n\t\terr := q.Send(Network, []string{Audio}, fmt.Sprintf(\"%v\", i))\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor i := 0; i < 15; i++ {\n\t\t\tm, err := q.Receive(Network)\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif m.Error() != nil {\n\t\t\t\tt.Error(\"unexpected error message\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Log(\"got %v expected %v\", m.Message, fmt.Sprintf(\"%v\", i))\n\t\t\tif m.Message != fmt.Sprintf(\"%v\", i) {\n\t\t\t\tt.Error(\"out of order\", m.Message, fmt.Sprintf(\"%v\", i))\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor i := 15; i < 30; i++ {\n\t\t\tm, err := q.Receive(Audio)\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif m.Error() != nil {\n\t\t\t\tt.Error(\"unexpected error message\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Log(\"got %v expected %v\", m.Message, fmt.Sprintf(\"%v\", i))\n\t\t\tif m.Message != fmt.Sprintf(\"%v\", i) {\n\t\t\t\tt.Error(\"out of order\", m.Message, fmt.Sprintf(\"%v\", i))\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\twg.Wait()\n}\n<commit_msg>add benchmark to meassure roundtrip<commit_after>\/*\n * Copyright (c) 2014 Marco Peereboom <marco@peereboom.us>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n *\/\n\npackage queueb\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst (\n\tAudio = \"audio\"\n\tNetwork = \"network\"\n)\n\nvar (\n\tq *Queueb\n)\n\nfunc TestQueueb(t *testing.T) {\n\tvar err error\n\tq, err = New(\"myqueueb\", 10)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\terr = q.Register(\"subsystem\", 10)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\t\/\/ should fail\n\terr = q.Register(\"subsystem\", 10)\n\tif err == nil {\n\t\tt.Error(\"duplicate QueuebChannelPair not detected\")\n\t\treturn\n\t}\n\n\tq.Unregister(\"subsystem\")\n\tif q.Len() != 0 {\n\t\tt.Error(\"invalid QueuebChannelPair count\")\n\t\treturn\n\t}\n\n\t\/\/ should fail\n\tq.Unregister(\"subsystem\")\n\tif err == nil {\n\t\tt.Error(\"QueuebChannelPair should not exist\")\n\t\treturn\n\t}\n}\n\nfunc TestQueuebMessage(t *testing.T) {\n\terr := q.Register(Audio, 10)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\terr = q.Register(Network, 20)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\terr = q.Send(Audio, []string{Network}, \"Hello world!\")\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tm, err := q.Receive(Network)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tif m.Error() != nil {\n\t\tt.Error(\"unexpected error message\")\n\t\treturn\n\t}\n\n\tif m.Message.(string) != \"Hello world!\" {\n\t\tt.Error(\"invalid message\")\n\t\treturn\n\t}\n\n\t\/\/ make sure we don't crash\n\tq.Unregister(Audio)\n\terr = q.Send(Audio, []string{Network}, \"Hello world!\")\n\t\/\/ should fail\n\tif err == nil {\n\t\tt.Error(\"queue should have been deleted\")\n\t\treturn\n\t}\n\t_, err = q.Receive(Audio)\n\t\/\/ should fail\n\tif err == nil {\n\t\tt.Error(\"receive should have failed\")\n\t\treturn\n\t}\n\n\terr = q.Send(Network, []string{Audio}, \"Hello world!\")\n\tif err != nil {\n\t\tt.Error(\"source queue should exist\")\n\t\treturn\n\t}\n\tm, err = q.Receive(Network)\n\t\/\/ get error\n\tif err != nil {\n\t\tt.Error(\"receive error failed\")\n\t\treturn\n\t}\n\n\t\/\/ should fail\n\tif m.Error() == nil {\n\t\tt.Error(\"not an error message\")\n\t\treturn\n\t}\n\n\tmm, ok := m.Message.(*QueuebMessageError)\n\tif !ok {\n\t\tt.Error(\"could not type assert m\")\n\t\treturn\n\t}\n\tt.Log(mm.Error)\n}\n\nfunc TestQueuebPrioQueueSamePrio(t *testing.T) {\n\terr := q.Register(Audio, 10)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\tfor i := 0; i < 15; i++ {\n\t\terr := q.Send(Audio, []string{Network}, fmt.Sprintf(\"%v\", i))\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\tfor i := 0; i < 15; i++ {\n\t\tm, err := q.Receive(Network)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t\tif m.Error() != nil {\n\t\t\tt.Error(\"unexpected error message\")\n\t\t\treturn\n\t\t}\n\t\tt.Log(\"got %v expected %v\", m.Message, fmt.Sprintf(\"%v\", i))\n\t\tif m.Message != fmt.Sprintf(\"%v\", i) {\n\t\t\tt.Error(\"out of order\", m.Message, fmt.Sprintf(\"%v\", i))\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc TestQueuebPrioQueueDifferentPrio(t *testing.T) {\n\tfor i := 0; i < 15; i++ {\n\t\terr := q.Send(Audio, []string{Network}, fmt.Sprintf(\"%v\", i))\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\tfor i := 15; i < 30; i++ {\n\t\terr := q.Send(Network, []string{Audio}, fmt.Sprintf(\"%v\", i))\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor i := 0; i < 15; i++ {\n\t\t\tm, err := q.Receive(Network)\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif m.Error() != nil {\n\t\t\t\tt.Error(\"unexpected error message\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Log(\"got %v expected %v\", m.Message, fmt.Sprintf(\"%v\", i))\n\t\t\tif m.Message != fmt.Sprintf(\"%v\", i) {\n\t\t\t\tt.Error(\"out of order\", m.Message, fmt.Sprintf(\"%v\", i))\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor i := 15; i < 30; i++ {\n\t\t\tm, err := q.Receive(Audio)\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif m.Error() != nil {\n\t\t\t\tt.Error(\"unexpected error message\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Log(\"got %v expected %v\", m.Message, fmt.Sprintf(\"%v\", i))\n\t\t\tif m.Message != fmt.Sprintf(\"%v\", i) {\n\t\t\t\tt.Error(\"out of order\", m.Message, fmt.Sprintf(\"%v\", i))\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\twg.Wait()\n}\n\nfunc BenchmarkRoundTrip(b *testing.B) {\n\tcount := 10000\n\tstart := time.Now()\n\tfor i := 0; i < count; i++ {\n\t\terr := q.Send(Audio, []string{Network}, \"a\")\n\t\tif err != nil {\n\t\t\tb.Error(err)\n\t\t\treturn\n\t\t}\n\t\t_, err = q.Receive(Network)\n\t\tif err != nil {\n\t\t\tb.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n\tend := time.Now()\n\tb.Logf(\"Roundtrip: %v\", end.Sub(start)\/10000)\n}\n<|endoftext|>"} {"text":"<commit_before>package indicators\n\nimport (\n\t\"errors\"\n\t\"github.com\/thetruetrade\/gotrade\"\n)\n\n\/\/ A Minus Directional Indicator (MinusDi), no storage, for use in other indicators\ntype MinusDiWithoutStorage struct {\n\t*baseIndicator\n\t*baseFloatBounds\n\n\t\/\/ private variables\n\tvalueAvailableAction ValueAvailableActionFloat\n\tperiodCounter int\n\tpreviousHigh float64\n\tpreviousLow float64\n\tpreviousMinusDM float64\n\tpreviousTrueRange float64\n\tcurrentTrueRange float64\n\ttrueRange *TrueRange\n\ttimePeriod int\n}\n\n\/\/ NewMinusDiWithoutStorage creates a Minus Directional Indicator (MinusDi) without storage\nfunc NewMinusDiWithoutStorage(timePeriod int, valueAvailableAction ValueAvailableActionFloat) (indicator *MinusDiWithoutStorage, err error) {\n\n\t\/\/ an indicator without storage MUST have a value available action\n\tif valueAvailableAction == nil {\n\t\treturn nil, ErrValueAvailableActionIsNil\n\t}\n\n\t\/\/ the minimum timeperiod for this indicator is 1\n\tif timePeriod < 1 {\n\t\treturn nil, errors.New(\"timePeriod is less than the minimum (1)\")\n\t}\n\n\t\/\/ check the maximum timeperiod\n\tif timePeriod > MaximumLookbackPeriod {\n\t\treturn nil, errors.New(\"timePeriod is greater than the maximum (100000)\")\n\t}\n\n\tlookback := 1\n\tif timePeriod > 1 {\n\t\tlookback = timePeriod\n\t}\n\tind := MinusDiWithoutStorage{\n\t\tbaseIndicator: newBaseIndicator(lookback),\n\t\tbaseFloatBounds: newBaseFloatBounds(),\n\t\tperiodCounter: -1,\n\t\tpreviousMinusDM: 0.0,\n\t\tpreviousTrueRange: 0.0,\n\t\tcurrentTrueRange: 0.0,\n\t\tvalueAvailableAction: valueAvailableAction,\n\t\ttimePeriod: timePeriod,\n\t}\n\n\tind.trueRange, err = NewTrueRange()\n\n\tind.trueRange.valueAvailableAction = func(dataItem float64, streamBarIndex int) {\n\t\tind.currentTrueRange = dataItem\n\t}\n\n\treturn &ind, nil\n}\n\n\/\/ A Minus Directional Indicator (MinusDi)\ntype MinusDi struct {\n\t*MinusDiWithoutStorage\n\n\t\/\/ public variables\n\tData []float64\n}\n\n\/\/ NewMinusDi creates a Minus Directional Indicator (MinusDi) for online usage\nfunc NewMinusDi(timePeriod int) (indicator *MinusDi, err error) {\n\tind := MinusDi{}\n\tind.MinusDiWithoutStorage, err = NewMinusDiWithoutStorage(timePeriod, func(dataItem float64, streamBarIndex int) {\n\t\tind.Data = append(ind.Data, dataItem)\n\t})\n\n\treturn &ind, err\n}\n\n\/\/ NewDefaultMinusDi creates a Minus Directional Indicator (MinusDi) for online usage with default parameters\n\/\/\t- timePeriod: 14\nfunc NewDefaultMinusDi() (indicator *MinusDi, err error) {\n\ttimePeriod := 14\n\treturn NewMinusDi(timePeriod)\n}\n\n\/\/ NewMinusDiWithSrcLen creates a Minus Directional Indicator (MinusDi) for offline usage\nfunc NewMinusDiWithSrcLen(sourceLength uint, timePeriod int) (indicator *MinusDi, err error) {\n\tind, err := NewMinusDi(timePeriod)\n\n\t\/\/ only initialise the storage if there is enough source data to require it\n\tif sourceLength-uint(ind.GetLookbackPeriod()) > 1 {\n\t\tind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod()))\n\t}\n\n\treturn ind, err\n}\n\n\/\/ NewDefaultMinusDiWithSrcLen creates a Minus Directional Indicator (MinusDi) for offline usage with default parameters\nfunc NewDefaultMinusDiWithSrcLen(sourceLength uint) (indicator *MinusDi, err error) {\n\tind, err := NewDefaultMinusDi()\n\n\t\/\/ only initialise the storage if there is enough source data to require it\n\tif sourceLength-uint(ind.GetLookbackPeriod()) > 1 {\n\t\tind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod()))\n\t}\n\n\treturn ind, err\n}\n\n\/\/ NewMinusDiForStream creates a Minus Directional Indicator (MinusDi) for online usage with a source data stream\nfunc NewMinusDiForStream(priceStream gotrade.DOHLCVStreamSubscriber, timePeriod int) (indicator *MinusDi, err error) {\n\tind, err := NewMinusDi(timePeriod)\n\tpriceStream.AddTickSubscription(ind)\n\treturn ind, err\n}\n\n\/\/ NewDefaultMinusDiForStream creates a Minus Directional Indicator (MinusDi) for online usage with a source data stream\nfunc NewDefaultMinusDiForStream(priceStream gotrade.DOHLCVStreamSubscriber) (indicator *MinusDi, err error) {\n\tind, err := NewDefaultMinusDi()\n\tpriceStream.AddTickSubscription(ind)\n\treturn ind, err\n}\n\n\/\/ NewMinusDiForStreamWithSrcLen creates a Minus Directional Indicator (MinusDi) for offline usage with a source data stream\nfunc NewMinusDiForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber, timePeriod int) (indicator *MinusDi, err error) {\n\tind, err := NewMinusDiWithSrcLen(sourceLength, timePeriod)\n\tpriceStream.AddTickSubscription(ind)\n\treturn ind, err\n}\n\n\/\/ NewDefaultMinusDiForStreamWithSrcLen creates a Minus Directional Indicator (MinusDi) for offline usage with a source data stream\nfunc NewDefaultMinusDiForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber) (indicator *MinusDi, err error) {\n\tind, err := NewDefaultMinusDiWithSrcLen(sourceLength)\n\tpriceStream.AddTickSubscription(ind)\n\treturn ind, err\n}\n\n\/\/ ReceiveDOHLCVTick consumes a source data DOHLCV price tick\nfunc (ind *MinusDiWithoutStorage) ReceiveDOHLCVTick(tickData gotrade.DOHLCV, streamBarIndex int) {\n\n\t\/\/ forward to the true range indicator first using previous data\n\tind.trueRange.ReceiveDOHLCVTick(tickData, streamBarIndex)\n\n\tind.periodCounter += 1\n\thigh := tickData.H()\n\tlow := tickData.L()\n\tdiffP := high - ind.previousHigh\n\tdiffM := ind.previousLow - low\n\n\tif ind.lookbackPeriod == 1 {\n\t\tif ind.periodCounter > 0 {\n\n\t\t\t\/\/ forward to the true range indicator first using previous data\n\t\t\tind.trueRange.ReceiveDOHLCVTick(tickData, streamBarIndex)\n\n\t\t\tvar result float64\n\t\t\tif (diffM > 0) && (diffP < diffM) && ind.currentTrueRange != 0.0 {\n\t\t\t\tresult = diffM \/ ind.currentTrueRange\n\t\t\t} else {\n\t\t\t\tresult = 0\n\t\t\t}\n\n\t\t\t\/\/ increment the number of results this indicator can be expected to return\n\t\t\tind.dataLength += 1\n\n\t\t\tif ind.validFromBar == -1 {\n\t\t\t\t\/\/ set the streamBarIndex from which this indicator returns valid results\n\t\t\t\tind.validFromBar = streamBarIndex\n\t\t\t}\n\n\t\t\t\/\/ update the maximum result value\n\t\t\tif result > ind.maxValue {\n\t\t\t\tind.maxValue = result\n\t\t\t}\n\n\t\t\t\/\/ update the minimum result value\n\t\t\tif result < ind.minValue {\n\t\t\t\tind.minValue = result\n\t\t\t}\n\n\t\t\t\/\/ notify of a new result value though the value available action\n\t\t\tind.valueAvailableAction(result, streamBarIndex)\n\t\t}\n\t} else {\n\t\tif ind.periodCounter > 0 {\n\t\t\tif ind.periodCounter < ind.timePeriod {\n\t\t\t\tif (diffM > 0) && (diffP < diffM) {\n\t\t\t\t\tind.previousMinusDM += diffM\n\t\t\t\t}\n\t\t\t\tind.previousTrueRange += ind.currentTrueRange\n\t\t\t} else {\n\t\t\t\tvar result float64\n\t\t\t\tind.previousTrueRange = ind.previousTrueRange - (ind.previousTrueRange \/ float64(ind.timePeriod)) + ind.currentTrueRange\n\t\t\t\tif (diffM > 0) && (diffP < diffM) {\n\t\t\t\t\tind.previousMinusDM = ind.previousMinusDM - (ind.previousMinusDM \/ float64(ind.timePeriod)) + diffM\n\t\t\t\t} else {\n\t\t\t\t\tind.previousMinusDM = ind.previousMinusDM - (ind.previousMinusDM \/ float64(ind.timePeriod))\n\t\t\t\t}\n\n\t\t\t\tif ind.previousTrueRange != 0.0 {\n\t\t\t\t\tresult = float64(100.0) * ind.previousMinusDM \/ ind.previousTrueRange\n\t\t\t\t} else {\n\t\t\t\t\tresult = 0.0\n\t\t\t\t}\n\n\t\t\t\t\/\/ increment the number of results this indicator can be expected to return\n\t\t\t\tind.dataLength += 1\n\n\t\t\t\tif ind.validFromBar == -1 {\n\t\t\t\t\t\/\/ set the streamBarIndex from which this indicator returns valid results\n\t\t\t\t\tind.validFromBar = streamBarIndex\n\t\t\t\t}\n\n\t\t\t\t\/\/ update the maximum result value\n\t\t\t\tif result > ind.maxValue {\n\t\t\t\t\tind.maxValue = result\n\t\t\t\t}\n\n\t\t\t\t\/\/ update the minimum result value\n\t\t\t\tif result < ind.minValue {\n\t\t\t\t\tind.minValue = result\n\t\t\t\t}\n\n\t\t\t\t\/\/ notify of a new result value though the value available action\n\t\t\t\tind.valueAvailableAction(result, streamBarIndex)\n\t\t\t}\n\t\t}\n\t}\n\n\tind.previousHigh = high\n\tind.previousLow = low\n}\n<commit_msg>#76 Remove duplication - minusdi<commit_after>package indicators\n\nimport (\n\t\"errors\"\n\t\"github.com\/thetruetrade\/gotrade\"\n)\n\n\/\/ A Minus Directional Indicator (MinusDi), no storage, for use in other indicators\ntype MinusDiWithoutStorage struct {\n\t*baseIndicatorWithFloatBounds\n\n\t\/\/ private variables\n\tperiodCounter int\n\tpreviousHigh float64\n\tpreviousLow float64\n\tpreviousMinusDM float64\n\tpreviousTrueRange float64\n\tcurrentTrueRange float64\n\ttrueRange *TrueRange\n\ttimePeriod int\n}\n\n\/\/ NewMinusDiWithoutStorage creates a Minus Directional Indicator (MinusDi) without storage\nfunc NewMinusDiWithoutStorage(timePeriod int, valueAvailableAction ValueAvailableActionFloat) (indicator *MinusDiWithoutStorage, err error) {\n\n\t\/\/ an indicator without storage MUST have a value available action\n\tif valueAvailableAction == nil {\n\t\treturn nil, ErrValueAvailableActionIsNil\n\t}\n\n\t\/\/ the minimum timeperiod for this indicator is 1\n\tif timePeriod < 1 {\n\t\treturn nil, errors.New(\"timePeriod is less than the minimum (1)\")\n\t}\n\n\t\/\/ check the maximum timeperiod\n\tif timePeriod > MaximumLookbackPeriod {\n\t\treturn nil, errors.New(\"timePeriod is greater than the maximum (100000)\")\n\t}\n\n\tlookback := 1\n\tif timePeriod > 1 {\n\t\tlookback = timePeriod\n\t}\n\tind := MinusDiWithoutStorage{\n\t\tbaseIndicatorWithFloatBounds: newBaseIndicatorWithFloatBounds(lookback, valueAvailableAction),\n\t\tperiodCounter: -1,\n\t\tpreviousMinusDM: 0.0,\n\t\tpreviousTrueRange: 0.0,\n\t\tcurrentTrueRange: 0.0,\n\t\ttimePeriod: timePeriod,\n\t}\n\n\tind.trueRange, err = NewTrueRange()\n\n\tind.trueRange.valueAvailableAction = func(dataItem float64, streamBarIndex int) {\n\t\tind.currentTrueRange = dataItem\n\t}\n\n\treturn &ind, nil\n}\n\n\/\/ A Minus Directional Indicator (MinusDi)\ntype MinusDi struct {\n\t*MinusDiWithoutStorage\n\n\t\/\/ public variables\n\tData []float64\n}\n\n\/\/ NewMinusDi creates a Minus Directional Indicator (MinusDi) for online usage\nfunc NewMinusDi(timePeriod int) (indicator *MinusDi, err error) {\n\tind := MinusDi{}\n\tind.MinusDiWithoutStorage, err = NewMinusDiWithoutStorage(timePeriod, func(dataItem float64, streamBarIndex int) {\n\t\tind.Data = append(ind.Data, dataItem)\n\t})\n\n\treturn &ind, err\n}\n\n\/\/ NewDefaultMinusDi creates a Minus Directional Indicator (MinusDi) for online usage with default parameters\n\/\/\t- timePeriod: 14\nfunc NewDefaultMinusDi() (indicator *MinusDi, err error) {\n\ttimePeriod := 14\n\treturn NewMinusDi(timePeriod)\n}\n\n\/\/ NewMinusDiWithSrcLen creates a Minus Directional Indicator (MinusDi) for offline usage\nfunc NewMinusDiWithSrcLen(sourceLength uint, timePeriod int) (indicator *MinusDi, err error) {\n\tind, err := NewMinusDi(timePeriod)\n\n\t\/\/ only initialise the storage if there is enough source data to require it\n\tif sourceLength-uint(ind.GetLookbackPeriod()) > 1 {\n\t\tind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod()))\n\t}\n\n\treturn ind, err\n}\n\n\/\/ NewDefaultMinusDiWithSrcLen creates a Minus Directional Indicator (MinusDi) for offline usage with default parameters\nfunc NewDefaultMinusDiWithSrcLen(sourceLength uint) (indicator *MinusDi, err error) {\n\tind, err := NewDefaultMinusDi()\n\n\t\/\/ only initialise the storage if there is enough source data to require it\n\tif sourceLength-uint(ind.GetLookbackPeriod()) > 1 {\n\t\tind.Data = make([]float64, 0, sourceLength-uint(ind.GetLookbackPeriod()))\n\t}\n\n\treturn ind, err\n}\n\n\/\/ NewMinusDiForStream creates a Minus Directional Indicator (MinusDi) for online usage with a source data stream\nfunc NewMinusDiForStream(priceStream gotrade.DOHLCVStreamSubscriber, timePeriod int) (indicator *MinusDi, err error) {\n\tind, err := NewMinusDi(timePeriod)\n\tpriceStream.AddTickSubscription(ind)\n\treturn ind, err\n}\n\n\/\/ NewDefaultMinusDiForStream creates a Minus Directional Indicator (MinusDi) for online usage with a source data stream\nfunc NewDefaultMinusDiForStream(priceStream gotrade.DOHLCVStreamSubscriber) (indicator *MinusDi, err error) {\n\tind, err := NewDefaultMinusDi()\n\tpriceStream.AddTickSubscription(ind)\n\treturn ind, err\n}\n\n\/\/ NewMinusDiForStreamWithSrcLen creates a Minus Directional Indicator (MinusDi) for offline usage with a source data stream\nfunc NewMinusDiForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber, timePeriod int) (indicator *MinusDi, err error) {\n\tind, err := NewMinusDiWithSrcLen(sourceLength, timePeriod)\n\tpriceStream.AddTickSubscription(ind)\n\treturn ind, err\n}\n\n\/\/ NewDefaultMinusDiForStreamWithSrcLen creates a Minus Directional Indicator (MinusDi) for offline usage with a source data stream\nfunc NewDefaultMinusDiForStreamWithSrcLen(sourceLength uint, priceStream gotrade.DOHLCVStreamSubscriber) (indicator *MinusDi, err error) {\n\tind, err := NewDefaultMinusDiWithSrcLen(sourceLength)\n\tpriceStream.AddTickSubscription(ind)\n\treturn ind, err\n}\n\n\/\/ ReceiveDOHLCVTick consumes a source data DOHLCV price tick\nfunc (ind *MinusDiWithoutStorage) ReceiveDOHLCVTick(tickData gotrade.DOHLCV, streamBarIndex int) {\n\n\t\/\/ forward to the true range indicator first using previous data\n\tind.trueRange.ReceiveDOHLCVTick(tickData, streamBarIndex)\n\n\tind.periodCounter += 1\n\thigh := tickData.H()\n\tlow := tickData.L()\n\tdiffP := high - ind.previousHigh\n\tdiffM := ind.previousLow - low\n\n\tif ind.lookbackPeriod == 1 {\n\t\tif ind.periodCounter > 0 {\n\n\t\t\t\/\/ forward to the true range indicator first using previous data\n\t\t\tind.trueRange.ReceiveDOHLCVTick(tickData, streamBarIndex)\n\n\t\t\tvar result float64\n\t\t\tif (diffM > 0) && (diffP < diffM) && ind.currentTrueRange != 0.0 {\n\t\t\t\tresult = diffM \/ ind.currentTrueRange\n\t\t\t} else {\n\t\t\t\tresult = 0\n\t\t\t}\n\n\t\t\tind.UpdateIndicatorWithNewValue(result, streamBarIndex)\n\t\t}\n\t} else {\n\t\tif ind.periodCounter > 0 {\n\t\t\tif ind.periodCounter < ind.timePeriod {\n\t\t\t\tif (diffM > 0) && (diffP < diffM) {\n\t\t\t\t\tind.previousMinusDM += diffM\n\t\t\t\t}\n\t\t\t\tind.previousTrueRange += ind.currentTrueRange\n\t\t\t} else {\n\t\t\t\tvar result float64\n\t\t\t\tind.previousTrueRange = ind.previousTrueRange - (ind.previousTrueRange \/ float64(ind.timePeriod)) + ind.currentTrueRange\n\t\t\t\tif (diffM > 0) && (diffP < diffM) {\n\t\t\t\t\tind.previousMinusDM = ind.previousMinusDM - (ind.previousMinusDM \/ float64(ind.timePeriod)) + diffM\n\t\t\t\t} else {\n\t\t\t\t\tind.previousMinusDM = ind.previousMinusDM - (ind.previousMinusDM \/ float64(ind.timePeriod))\n\t\t\t\t}\n\n\t\t\t\tif ind.previousTrueRange != 0.0 {\n\t\t\t\t\tresult = float64(100.0) * ind.previousMinusDM \/ ind.previousTrueRange\n\t\t\t\t} else {\n\t\t\t\t\tresult = 0.0\n\t\t\t\t}\n\n\t\t\t\tind.UpdateIndicatorWithNewValue(result, streamBarIndex)\n\t\t\t}\n\t\t}\n\t}\n\n\tind.previousHigh = high\n\tind.previousLow = low\n}\n<|endoftext|>"} {"text":"<commit_before>package internal\n\nimport (\n\t\"reflect\"\n\t\"unsafe\"\n)\n\nconst (\n\tsizeOfUint64 = 8\n)\n\n\/\/ Hash is a means of converting indices into strings.\ntype Hash struct {\n\tbytes []byte\n\theader *reflect.SliceHeader\n}\n\n\/\/ NewHash creates a hash.\nfunc NewHash(ni uint) *Hash {\n\thash := &Hash{bytes: make([]byte, 0)}\n\thash.header = (*reflect.SliceHeader)(unsafe.Pointer(&hash.bytes))\n\thash.header.Cap = int(ni * sizeOfUint64)\n\thash.header.Len = hash.header.Cap\n\treturn hash\n}\n\n\/\/ Key converts an index into a string.\nfunc (self *Hash) Key(index []uint64) string {\n\tself.header.Data = uintptr(((*reflect.SliceHeader)(unsafe.Pointer(&index))).Data)\n\tkey := string(self.bytes)\n\tself.header.Data = 0\n\treturn key\n}\n<commit_msg>a\/internal: fix the description of Hash<commit_after>package internal\n\nimport (\n\t\"reflect\"\n\t\"unsafe\"\n)\n\nconst (\n\tsizeOfUint64 = 8\n)\n\n\/\/ Hash is a means of creating hash keys from indices.\ntype Hash struct {\n\tbytes []byte\n\theader *reflect.SliceHeader\n}\n\n\/\/ NewHash creates a hash.\nfunc NewHash(ni uint) *Hash {\n\thash := &Hash{bytes: make([]byte, 0)}\n\thash.header = (*reflect.SliceHeader)(unsafe.Pointer(&hash.bytes))\n\thash.header.Cap = int(ni * sizeOfUint64)\n\thash.header.Len = hash.header.Cap\n\treturn hash\n}\n\n\/\/ Key creates a hash key from an index.\nfunc (self *Hash) Key(index []uint64) string {\n\tself.header.Data = uintptr(((*reflect.SliceHeader)(unsafe.Pointer(&index))).Data)\n\tkey := string(self.bytes)\n\tself.header.Data = 0\n\treturn key\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>first commit<commit_after><|endoftext|>"} {"text":"<commit_before>package session\n\nimport (\n\t\"crypto\/rand\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/13pinj\/todoapp\/Godeps\/_workspace\/src\/github.com\/gin-gonic\/gin\"\n\t\"github.com\/13pinj\/todoapp\/models\"\n)\n\ntype dbSession struct {\n\tSID string `gorm:\"primary_key\"`\n\tCreatedAt time.Time\n\tIntMap map[string]int `sql:\"-\"`\n\tEncodedMap string\n}\n\nfunc (s *dbSession) ID() string {\n\treturn s.SID\n}\n\nfunc (s *dbSession) GetInt(key string) int {\n\treturn s.IntMap[key]\n}\n\nfunc (s *dbSession) SetInt(key string, val int) {\n\tif key == \"\" {\n\t\treturn\n\t}\n\ts.IntMap[key] = val\n\ts.encode()\n\tmodels.DB.Save(s)\n}\n\nfunc (s *dbSession) encode() {\n\tval := url.Values{}\n\tfor k, v := range s.IntMap {\n\t\tvs := strconv.Itoa(v)\n\t\tval.Set(k, vs)\n\t}\n\ts.EncodedMap = val.Encode()\n}\n\nfunc (s *dbSession) decode() {\n\ts.IntMap = make(map[string]int)\n\tval, err := url.ParseQuery(s.EncodedMap)\n\tif err != nil {\n\t\treturn\n\t}\n\tfor k := range val {\n\t\tv, err := strconv.Atoi(val.Get(k))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\ts.IntMap[k] = v\n\t}\n}\n\nfunc dbHasSession(c *gin.Context) (string, bool) {\n\tst, err := c.Request.Cookie(\"SessID\")\n\tif err != nil {\n\t\treturn \"\", false\n\t}\n\treturn st.Value, true\n}\n\nfunc dbSessionInit(c *gin.Context) *dbSession {\n\tsession := dbSession{}\n\n\thash := make([]byte, 6)\n\trand.Read(hash)\n\n\tsession.SID = fmt.Sprintf(\"%x\", hash)\n\tsession.IntMap = make(map[string]int)\n\n\tmodels.DB.Create(&session)\n\n\thttp.SetCookie(c.Writer, &http.Cookie{\n\t\tName: \"SessID\",\n\t\tValue: session.SID,\n\t\tPath: \"\/\",\n\t})\n\treturn &session\n}\n\nfunc dbFromContext(c *gin.Context) *dbSession {\n\tiid, exists := c.Get(\"session_id\")\n\tif exists {\n\t\tid := iid.(string)\n\t\ts := &dbSession{}\n\t\terr := models.DB.Where(\"s_id = ?\", id).Find(s).Error\n\t\tif err == nil {\n\t\t\ts.decode()\n\t\t\treturn s\n\t\t}\n\t}\n\tkey, ok := dbHasSession(c)\n\tif ok {\n\t\ts := &dbSession{}\n\t\terr := models.DB.Where(\"s_id = ?\", key).Find(s).Error\n\t\tif err == nil {\n\t\t\ts.decode()\n\t\t\tc.Set(\"session_id\", key)\n\t\t\treturn s\n\t\t}\n\t}\n\tses := dbSessionInit(c)\n\tc.Set(\"session_id\", ses.ID())\n\treturn ses\n}\n\nfunc init() {\n\tmodels.DB.AutoMigrate(&dbSession{})\n}\n<commit_msg>Make sessions live longer<commit_after>package session\n\nimport (\n\t\"crypto\/rand\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/13pinj\/todoapp\/Godeps\/_workspace\/src\/github.com\/gin-gonic\/gin\"\n\t\"github.com\/13pinj\/todoapp\/models\"\n)\n\ntype dbSession struct {\n\tSID string `gorm:\"primary_key\"`\n\tCreatedAt time.Time\n\tIntMap map[string]int `sql:\"-\"`\n\tEncodedMap string\n}\n\nfunc (s *dbSession) ID() string {\n\treturn s.SID\n}\n\nfunc (s *dbSession) GetInt(key string) int {\n\treturn s.IntMap[key]\n}\n\nfunc (s *dbSession) SetInt(key string, val int) {\n\tif key == \"\" {\n\t\treturn\n\t}\n\ts.IntMap[key] = val\n\ts.encode()\n\tmodels.DB.Save(s)\n}\n\nfunc (s *dbSession) encode() {\n\tval := url.Values{}\n\tfor k, v := range s.IntMap {\n\t\tvs := strconv.Itoa(v)\n\t\tval.Set(k, vs)\n\t}\n\ts.EncodedMap = val.Encode()\n}\n\nfunc (s *dbSession) decode() {\n\ts.IntMap = make(map[string]int)\n\tval, err := url.ParseQuery(s.EncodedMap)\n\tif err != nil {\n\t\treturn\n\t}\n\tfor k := range val {\n\t\tv, err := strconv.Atoi(val.Get(k))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\ts.IntMap[k] = v\n\t}\n}\n\nfunc dbHasSession(c *gin.Context) (string, bool) {\n\tst, err := c.Request.Cookie(\"SessID\")\n\tif err != nil {\n\t\treturn \"\", false\n\t}\n\treturn st.Value, true\n}\n\nfunc dbSessionInit(c *gin.Context) *dbSession {\n\tsession := dbSession{}\n\n\thash := make([]byte, 6)\n\trand.Read(hash)\n\n\tsession.SID = fmt.Sprintf(\"%x\", hash)\n\tsession.IntMap = make(map[string]int)\n\n\tmodels.DB.Create(&session)\n\n\thttp.SetCookie(c.Writer, &http.Cookie{\n\t\tName: \"SessID\",\n\t\tValue: session.SID,\n\t\tPath: \"\/\",\n\t\tExpires: time.Now().AddDate(10, 0, 0),\n\t})\n\treturn &session\n}\n\nfunc dbFromContext(c *gin.Context) *dbSession {\n\tiid, exists := c.Get(\"session_id\")\n\tif exists {\n\t\tid := iid.(string)\n\t\ts := &dbSession{}\n\t\terr := models.DB.Where(\"s_id = ?\", id).Find(s).Error\n\t\tif err == nil {\n\t\t\ts.decode()\n\t\t\treturn s\n\t\t}\n\t}\n\tkey, ok := dbHasSession(c)\n\tif ok {\n\t\ts := &dbSession{}\n\t\terr := models.DB.Where(\"s_id = ?\", key).Find(s).Error\n\t\tif err == nil {\n\t\t\ts.decode()\n\t\t\tc.Set(\"session_id\", key)\n\t\t\treturn s\n\t\t}\n\t}\n\tses := dbSessionInit(c)\n\tc.Set(\"session_id\", ses.ID())\n\treturn ses\n}\n\nfunc init() {\n\tmodels.DB.AutoMigrate(&dbSession{})\n}\n<|endoftext|>"} {"text":"<commit_before>package filesys\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/operation\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/filer_pb\"\n\t\"sync\"\n)\n\ntype ContinuousDirtyPages struct {\n\thasData bool\n\tOffset int64\n\tSize int64\n\tData []byte\n\tf *File\n\tlock sync.Mutex\n}\n\nfunc newDirtyPages(file *File) *ContinuousDirtyPages {\n\treturn &ContinuousDirtyPages{\n\t\tData: make([]byte, file.wfs.option.ChunkSizeLimit),\n\t\tf: file,\n\t}\n}\n\nfunc (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {\n\n\tpages.lock.Lock()\n\tdefer pages.lock.Unlock()\n\n\tvar chunk *filer_pb.FileChunk\n\n\tif len(data) > len(pages.Data) {\n\t\t\/\/ this is more than what buffer can hold.\n\n\t\t\/\/ flush existing\n\t\tif chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil {\n\t\t\tif chunk != nil {\n\t\t\t\tglog.V(4).Infof(\"%s\/%s flush existing [%d,%d)\", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))\n\t\t\t\tchunks = append(chunks, chunk)\n\t\t\t}\n\t\t} else {\n\t\t\tglog.V(0).Infof(\"%s\/%s failed to flush1 [%d,%d): %v\", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)\n\t\t\treturn\n\t\t}\n\t\tpages.Size = 0\n\t\tpages.Offset = 0\n\n\t\t\/\/ flush the big page\n\t\tif chunk, err = pages.saveToStorage(ctx, data, offset); err == nil {\n\t\t\tif chunk != nil {\n\t\t\t\tglog.V(4).Infof(\"%s\/%s flush big request [%d,%d)\", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))\n\t\t\t\tchunks = append(chunks, chunk)\n\t\t\t}\n\t\t} else {\n\t\t\tglog.V(0).Infof(\"%s\/%s failed to flush2 [%d,%d): %v\", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)\n\t\t\treturn\n\t\t}\n\n\t\treturn\n\t}\n\n\tif offset < pages.Offset || offset >= pages.Offset+int64(len(pages.Data)) ||\n\t\tpages.Offset+int64(len(pages.Data)) < offset+int64(len(data)) {\n\t\t\/\/ if the data is out of range,\n\t\t\/\/ or buffer is full if adding new data,\n\t\t\/\/ flush current buffer and add new data\n\n\t\t\/\/ println(\"offset\", offset, \"size\", len(data), \"existing offset\", pages.Offset, \"size\", pages.Size)\n\n\t\tif chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil {\n\t\t\tif chunk != nil {\n\t\t\t\tglog.V(4).Infof(\"%s\/%s add save [%d,%d)\", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))\n\t\t\t\tchunks = append(chunks, chunk)\n\t\t\t}\n\t\t} else {\n\t\t\tglog.V(0).Infof(\"%s\/%s add save [%d,%d): %v\", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)\n\t\t\treturn\n\t\t}\n\t\tpages.Offset = offset\n\t\tcopy(pages.Data, data)\n\t\tpages.Size = int64(len(data))\n\t\treturn\n\t}\n\n\tif offset != pages.Offset+pages.Size {\n\t\t\/\/ when this happens, debug shows the data overlapping with existing data is empty\n\t\t\/\/ the data is not just append\n\t\tcopy(pages.Data[pages.Size:], data[pages.Offset+pages.Size-offset:])\n\t} else {\n\t\tcopy(pages.Data[offset-pages.Offset:], data)\n\t}\n\tpages.Size = max(pages.Size, offset+int64(len(data))-pages.Offset)\n\n\treturn\n}\n\nfunc (pages *ContinuousDirtyPages) FlushToStorage(ctx context.Context) (chunk *filer_pb.FileChunk, err error) {\n\n\tpages.lock.Lock()\n\tdefer pages.lock.Unlock()\n\n\tif pages.Size == 0 {\n\t\treturn nil, nil\n\t}\n\n\tif chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil {\n\t\tpages.Size = 0\n\t\tpages.Offset = 0\n\t\tif chunk != nil {\n\t\t\tglog.V(4).Infof(\"%s\/%s flush [%d,%d)\", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))\n\t\t}\n\t}\n\treturn\n}\n\nfunc (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Context) (*filer_pb.FileChunk, error) {\n\n\tif pages.Size == 0 {\n\t\treturn nil, nil\n\t}\n\n\treturn pages.saveToStorage(ctx, pages.Data[:pages.Size], pages.Offset)\n}\n\nfunc (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte, offset int64) (*filer_pb.FileChunk, error) {\n\n\tvar fileId, host string\n\n\tif err := pages.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {\n\n\t\trequest := &filer_pb.AssignVolumeRequest{\n\t\t\tCount: 1,\n\t\t\tReplication: pages.f.wfs.option.Replication,\n\t\t\tCollection: pages.f.wfs.option.Collection,\n\t\t\tTtlSec: pages.f.wfs.option.TtlSec,\n\t\t\tDataCenter: pages.f.wfs.option.DataCenter,\n\t\t}\n\n\t\tresp, err := client.AssignVolume(ctx, request)\n\t\tif err != nil {\n\t\t\tglog.V(0).Infof(\"assign volume failure %v: %v\", request, err)\n\t\t\treturn err\n\t\t}\n\n\t\tfileId, host = resp.FileId, resp.Url\n\n\t\treturn nil\n\t}); err != nil {\n\t\treturn nil, fmt.Errorf(\"filerGrpcAddress assign volume: %v\", err)\n\t}\n\n\tfileUrl := fmt.Sprintf(\"http:\/\/%s\/%s\", host, fileId)\n\tbufReader := bytes.NewReader(buf)\n\tuploadResult, err := operation.Upload(fileUrl, pages.f.Name, bufReader, false, \"application\/octet-stream\", nil, \"\")\n\tif err != nil {\n\t\tglog.V(0).Infof(\"upload data %v to %s: %v\", pages.f.Name, fileUrl, err)\n\t\treturn nil, fmt.Errorf(\"upload data: %v\", err)\n\t}\n\tif uploadResult.Error != \"\" {\n\t\tglog.V(0).Infof(\"upload failure %v to %s: %v\", pages.f.Name, fileUrl, err)\n\t\treturn nil, fmt.Errorf(\"upload result: %v\", uploadResult.Error)\n\t}\n\n\treturn &filer_pb.FileChunk{\n\t\tFileId: fileId,\n\t\tOffset: offset,\n\t\tSize: uint64(len(buf)),\n\t\tMtime: time.Now().UnixNano(),\n\t}, nil\n\n}\n\nfunc max(x, y int64) int64 {\n\tif x > y {\n\t\treturn x\n\t}\n\treturn y\n}\n<commit_msg>simplifying logic to avoid handling non continuous writes<commit_after>package filesys\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/operation\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/filer_pb\"\n\t\"sync\"\n)\n\ntype ContinuousDirtyPages struct {\n\thasData bool\n\tOffset int64\n\tSize int64\n\tData []byte\n\tf *File\n\tlock sync.Mutex\n}\n\nfunc newDirtyPages(file *File) *ContinuousDirtyPages {\n\treturn &ContinuousDirtyPages{\n\t\tData: make([]byte, file.wfs.option.ChunkSizeLimit),\n\t\tf: file,\n\t}\n}\n\nfunc (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {\n\n\tpages.lock.Lock()\n\tdefer pages.lock.Unlock()\n\n\tvar chunk *filer_pb.FileChunk\n\n\tif len(data) > len(pages.Data) {\n\t\t\/\/ this is more than what buffer can hold.\n\t\treturn pages.flushAndSave(ctx, offset, data)\n\t}\n\n\tif offset < pages.Offset || offset >= pages.Offset+int64(len(pages.Data)) ||\n\t\tpages.Offset+int64(len(pages.Data)) < offset+int64(len(data)) {\n\t\t\/\/ if the data is out of range,\n\t\t\/\/ or buffer is full if adding new data,\n\t\t\/\/ flush current buffer and add new data\n\n\t\t\/\/ println(\"offset\", offset, \"size\", len(data), \"existing offset\", pages.Offset, \"size\", pages.Size)\n\n\t\tif chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil {\n\t\t\tif chunk != nil {\n\t\t\t\tglog.V(4).Infof(\"%s\/%s add save [%d,%d)\", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))\n\t\t\t\tchunks = append(chunks, chunk)\n\t\t\t}\n\t\t} else {\n\t\t\tglog.V(0).Infof(\"%s\/%s add save [%d,%d): %v\", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)\n\t\t\treturn\n\t\t}\n\t\tpages.Offset = offset\n\t\tcopy(pages.Data, data)\n\t\tpages.Size = int64(len(data))\n\t\treturn\n\t}\n\n\tif offset != pages.Offset+pages.Size {\n\t\t\/\/ when this happens, debug shows the data overlapping with existing data is empty\n\t\t\/\/ the data is not just append\n\t\treturn pages.flushAndSave(ctx, offset, data)\n\t}\n\n\tcopy(pages.Data[offset-pages.Offset:], data)\n\tpages.Size = max(pages.Size, offset+int64(len(data))-pages.Offset)\n\n\treturn\n}\n\nfunc (pages *ContinuousDirtyPages) flushAndSave(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {\n\n\tvar chunk *filer_pb.FileChunk\n\n\t\/\/ flush existing\n\tif chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil {\n\t\tif chunk != nil {\n\t\t\tglog.V(4).Infof(\"%s\/%s flush existing [%d,%d)\", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))\n\t\t\tchunks = append(chunks, chunk)\n\t\t}\n\t} else {\n\t\tglog.V(0).Infof(\"%s\/%s failed to flush1 [%d,%d): %v\", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)\n\t\treturn\n\t}\n\tpages.Size = 0\n\tpages.Offset = 0\n\n\t\/\/ flush the new page\n\tif chunk, err = pages.saveToStorage(ctx, data, offset); err == nil {\n\t\tif chunk != nil {\n\t\t\tglog.V(4).Infof(\"%s\/%s flush big request [%d,%d)\", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))\n\t\t\tchunks = append(chunks, chunk)\n\t\t}\n\t} else {\n\t\tglog.V(0).Infof(\"%s\/%s failed to flush2 [%d,%d): %v\", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size), err)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (pages *ContinuousDirtyPages) FlushToStorage(ctx context.Context) (chunk *filer_pb.FileChunk, err error) {\n\n\tpages.lock.Lock()\n\tdefer pages.lock.Unlock()\n\n\tif pages.Size == 0 {\n\t\treturn nil, nil\n\t}\n\n\tif chunk, err = pages.saveExistingPagesToStorage(ctx); err == nil {\n\t\tpages.Size = 0\n\t\tpages.Offset = 0\n\t\tif chunk != nil {\n\t\t\tglog.V(4).Infof(\"%s\/%s flush [%d,%d)\", pages.f.dir.Path, pages.f.Name, chunk.Offset, chunk.Offset+int64(chunk.Size))\n\t\t}\n\t}\n\treturn\n}\n\nfunc (pages *ContinuousDirtyPages) saveExistingPagesToStorage(ctx context.Context) (*filer_pb.FileChunk, error) {\n\n\tif pages.Size == 0 {\n\t\treturn nil, nil\n\t}\n\n\treturn pages.saveToStorage(ctx, pages.Data[:pages.Size], pages.Offset)\n}\n\nfunc (pages *ContinuousDirtyPages) saveToStorage(ctx context.Context, buf []byte, offset int64) (*filer_pb.FileChunk, error) {\n\n\tvar fileId, host string\n\n\tif err := pages.f.wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {\n\n\t\trequest := &filer_pb.AssignVolumeRequest{\n\t\t\tCount: 1,\n\t\t\tReplication: pages.f.wfs.option.Replication,\n\t\t\tCollection: pages.f.wfs.option.Collection,\n\t\t\tTtlSec: pages.f.wfs.option.TtlSec,\n\t\t\tDataCenter: pages.f.wfs.option.DataCenter,\n\t\t}\n\n\t\tresp, err := client.AssignVolume(ctx, request)\n\t\tif err != nil {\n\t\t\tglog.V(0).Infof(\"assign volume failure %v: %v\", request, err)\n\t\t\treturn err\n\t\t}\n\n\t\tfileId, host = resp.FileId, resp.Url\n\n\t\treturn nil\n\t}); err != nil {\n\t\treturn nil, fmt.Errorf(\"filerGrpcAddress assign volume: %v\", err)\n\t}\n\n\tfileUrl := fmt.Sprintf(\"http:\/\/%s\/%s\", host, fileId)\n\tbufReader := bytes.NewReader(buf)\n\tuploadResult, err := operation.Upload(fileUrl, pages.f.Name, bufReader, false, \"application\/octet-stream\", nil, \"\")\n\tif err != nil {\n\t\tglog.V(0).Infof(\"upload data %v to %s: %v\", pages.f.Name, fileUrl, err)\n\t\treturn nil, fmt.Errorf(\"upload data: %v\", err)\n\t}\n\tif uploadResult.Error != \"\" {\n\t\tglog.V(0).Infof(\"upload failure %v to %s: %v\", pages.f.Name, fileUrl, err)\n\t\treturn nil, fmt.Errorf(\"upload result: %v\", uploadResult.Error)\n\t}\n\n\treturn &filer_pb.FileChunk{\n\t\tFileId: fileId,\n\t\tOffset: offset,\n\t\tSize: uint64(len(buf)),\n\t\tMtime: time.Now().UnixNano(),\n\t}, nil\n\n}\n\nfunc max(x, y int64) int64 {\n\tif x > y {\n\t\treturn x\n\t}\n\treturn y\n}\n<|endoftext|>"} {"text":"<commit_before>package s3api\n\nimport (\n\t\"fmt\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/filer\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/filer_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/s3_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/s3api\/s3_constants\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/s3api\/s3err\"\n\t\"github.com\/gorilla\/mux\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"sync\/atomic\"\n)\n\ntype CircuitBreaker struct {\n\tsync.RWMutex\n\tEnabled bool\n\tcounters map[string]*int64\n\tlimitations map[string]int64\n}\n\nfunc NewCircuitBreaker(option *S3ApiServerOption) *CircuitBreaker {\n\tcb := &CircuitBreaker{\n\t\tcounters: make(map[string]*int64),\n\t\tlimitations: make(map[string]int64),\n\t}\n\n\terr := pb.WithFilerClient(false, option.Filer, option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error {\n\t\tcontent, err := filer.ReadInsideFiler(client, s3_constants.CircuitBreakerConfigDir, s3_constants.CircuitBreakerConfigFile)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"read S3 circuit breaker config: %v\", err)\n\t\t}\n\t\treturn cb.LoadS3ApiConfigurationFromBytes(content)\n\t})\n\n\tif err != nil {\n\t\tglog.Warningf(\"fail to load config: %v\", err)\n\t}\n\n\treturn cb\n}\n\nfunc (cb *CircuitBreaker) LoadS3ApiConfigurationFromBytes(content []byte) error {\n\tcbCfg := &s3_pb.S3CircuitBreakerConfig{}\n\tif err := filer.ParseS3ConfigurationFromBytes(content, cbCfg); err != nil {\n\t\tglog.Warningf(\"unmarshal error: %v\", err)\n\t\treturn fmt.Errorf(\"unmarshal error: %v\", err)\n\t}\n\tif err := cb.loadCircuitBreakerConfig(cbCfg); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cb *CircuitBreaker) loadCircuitBreakerConfig(cfg *s3_pb.S3CircuitBreakerConfig) error {\n\n\t\/\/global\n\tglobalEnabled := false\n\tglobalOptions := cfg.Global\n\tlimitations := make(map[string]int64)\n\tif globalOptions != nil && globalOptions.Enabled && len(globalOptions.Actions) > 0 {\n\t\tglobalEnabled = globalOptions.Enabled\n\t\tfor action, limit := range globalOptions.Actions {\n\t\t\tlimitations[action] = limit\n\t\t}\n\t}\n\tcb.Enabled = globalEnabled\n\n\t\/\/buckets\n\tfor bucket, cbOptions := range cfg.Buckets {\n\t\tif cbOptions.Enabled {\n\t\t\tfor action, limit := range cbOptions.Actions {\n\t\t\t\tlimitations[s3_constants.Concat(bucket, action)] = limit\n\t\t\t}\n\t\t}\n\t}\n\n\tcb.limitations = limitations\n\treturn nil\n}\n\nfunc (cb *CircuitBreaker) Limit(f func(w http.ResponseWriter, r *http.Request), action string) (http.HandlerFunc, Action) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif !cb.Enabled {\n\t\t\tf(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tvars := mux.Vars(r)\n\t\tbucket := vars[\"bucket\"]\n\n\t\trollback, errCode := cb.limit(r, bucket, action)\n\t\tdefer func() {\n\t\t\tfor _, rf := range rollback {\n\t\t\t\trf()\n\t\t\t}\n\t\t}()\n\n\t\tif errCode == s3err.ErrNone {\n\t\t\tf(w, r)\n\t\t\treturn\n\t\t}\n\t\ts3err.WriteErrorResponse(w, r, errCode)\n\t}, Action(action)\n}\n\nfunc (cb *CircuitBreaker) limit(r *http.Request, bucket string, action string) (rollback []func(), errCode s3err.ErrorCode) {\n\n\t\/\/bucket simultaneous request count\n\tbucketCountRollBack, errCode := cb.loadCounterAndCompare(s3_constants.Concat(bucket, action, s3_constants.LimitTypeCount), 1, s3err.ErrTooManyRequest)\n\tif bucketCountRollBack != nil {\n\t\trollback = append(rollback, bucketCountRollBack)\n\t}\n\tif errCode != s3err.ErrNone {\n\t\treturn\n\t}\n\n\t\/\/bucket simultaneous request content bytes\n\tbucketContentLengthRollBack, errCode := cb.loadCounterAndCompare(s3_constants.Concat(bucket, action, s3_constants.LimitTypeBytes), r.ContentLength, s3err.ErrRequestBytesExceed)\n\tif bucketContentLengthRollBack != nil {\n\t\trollback = append(rollback, bucketContentLengthRollBack)\n\t}\n\tif errCode != s3err.ErrNone {\n\t\treturn\n\t}\n\n\t\/\/global simultaneous request count\n\tglobalCountRollBack, errCode := cb.loadCounterAndCompare(s3_constants.Concat(action, s3_constants.LimitTypeCount), 1, s3err.ErrTooManyRequest)\n\tif globalCountRollBack != nil {\n\t\trollback = append(rollback, globalCountRollBack)\n\t}\n\tif errCode != s3err.ErrNone {\n\t\treturn\n\t}\n\n\t\/\/global simultaneous request content bytes\n\tglobalContentLengthRollBack, errCode := cb.loadCounterAndCompare(s3_constants.Concat(action, s3_constants.LimitTypeBytes), r.ContentLength, s3err.ErrRequestBytesExceed)\n\tif globalContentLengthRollBack != nil {\n\t\trollback = append(rollback, globalContentLengthRollBack)\n\t}\n\tif errCode != s3err.ErrNone {\n\t\treturn\n\t}\n\treturn\n}\n\nfunc (cb *CircuitBreaker) loadCounterAndCompare(key string, inc int64, errCode s3err.ErrorCode) (f func(), e s3err.ErrorCode) {\n\te = s3err.ErrNone\n\tif max, ok := cb.limitations[key]; ok {\n\t\tcb.RLock()\n\t\tcounter, exists := cb.counters[key]\n\t\tcb.RUnlock()\n\n\t\tif !exists {\n\t\t\tcb.Lock()\n\t\t\tcounter, exists = cb.counters[key]\n\t\t\tif !exists {\n\t\t\t\tvar newCounter int64\n\t\t\t\tcounter = &newCounter\n\t\t\t\tcb.counters[key] = counter\n\t\t\t}\n\t\t\tcb.Unlock()\n\t\t}\n\t\tcurrent := atomic.LoadInt64(counter)\n\t\tif current+inc > max {\n\t\t\te = errCode\n\t\t\treturn\n\t\t} else {\n\t\t\tcurrent := atomic.AddInt64(counter, inc)\n\t\t\tf = func() {\n\t\t\t\tatomic.AddInt64(counter, -inc)\n\t\t\t}\n\t\t\tif current > max {\n\t\t\t\te = errCode\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n<commit_msg>adjust log message<commit_after>package s3api\n\nimport (\n\t\"fmt\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/filer\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/filer_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/s3_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/s3api\/s3_constants\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/s3api\/s3err\"\n\t\"github.com\/gorilla\/mux\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"sync\/atomic\"\n)\n\ntype CircuitBreaker struct {\n\tsync.RWMutex\n\tEnabled bool\n\tcounters map[string]*int64\n\tlimitations map[string]int64\n}\n\nfunc NewCircuitBreaker(option *S3ApiServerOption) *CircuitBreaker {\n\tcb := &CircuitBreaker{\n\t\tcounters: make(map[string]*int64),\n\t\tlimitations: make(map[string]int64),\n\t}\n\n\terr := pb.WithFilerClient(false, option.Filer, option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error {\n\t\tcontent, err := filer.ReadInsideFiler(client, s3_constants.CircuitBreakerConfigDir, s3_constants.CircuitBreakerConfigFile)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"read S3 circuit breaker config: %v\", err)\n\t\t}\n\t\treturn cb.LoadS3ApiConfigurationFromBytes(content)\n\t})\n\n\tif err != nil {\n\t\tglog.Infof(\"s3 circuit breaker not configured: %v\", err)\n\t}\n\n\treturn cb\n}\n\nfunc (cb *CircuitBreaker) LoadS3ApiConfigurationFromBytes(content []byte) error {\n\tcbCfg := &s3_pb.S3CircuitBreakerConfig{}\n\tif err := filer.ParseS3ConfigurationFromBytes(content, cbCfg); err != nil {\n\t\tglog.Warningf(\"unmarshal error: %v\", err)\n\t\treturn fmt.Errorf(\"unmarshal error: %v\", err)\n\t}\n\tif err := cb.loadCircuitBreakerConfig(cbCfg); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (cb *CircuitBreaker) loadCircuitBreakerConfig(cfg *s3_pb.S3CircuitBreakerConfig) error {\n\n\t\/\/global\n\tglobalEnabled := false\n\tglobalOptions := cfg.Global\n\tlimitations := make(map[string]int64)\n\tif globalOptions != nil && globalOptions.Enabled && len(globalOptions.Actions) > 0 {\n\t\tglobalEnabled = globalOptions.Enabled\n\t\tfor action, limit := range globalOptions.Actions {\n\t\t\tlimitations[action] = limit\n\t\t}\n\t}\n\tcb.Enabled = globalEnabled\n\n\t\/\/buckets\n\tfor bucket, cbOptions := range cfg.Buckets {\n\t\tif cbOptions.Enabled {\n\t\t\tfor action, limit := range cbOptions.Actions {\n\t\t\t\tlimitations[s3_constants.Concat(bucket, action)] = limit\n\t\t\t}\n\t\t}\n\t}\n\n\tcb.limitations = limitations\n\treturn nil\n}\n\nfunc (cb *CircuitBreaker) Limit(f func(w http.ResponseWriter, r *http.Request), action string) (http.HandlerFunc, Action) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif !cb.Enabled {\n\t\t\tf(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tvars := mux.Vars(r)\n\t\tbucket := vars[\"bucket\"]\n\n\t\trollback, errCode := cb.limit(r, bucket, action)\n\t\tdefer func() {\n\t\t\tfor _, rf := range rollback {\n\t\t\t\trf()\n\t\t\t}\n\t\t}()\n\n\t\tif errCode == s3err.ErrNone {\n\t\t\tf(w, r)\n\t\t\treturn\n\t\t}\n\t\ts3err.WriteErrorResponse(w, r, errCode)\n\t}, Action(action)\n}\n\nfunc (cb *CircuitBreaker) limit(r *http.Request, bucket string, action string) (rollback []func(), errCode s3err.ErrorCode) {\n\n\t\/\/bucket simultaneous request count\n\tbucketCountRollBack, errCode := cb.loadCounterAndCompare(s3_constants.Concat(bucket, action, s3_constants.LimitTypeCount), 1, s3err.ErrTooManyRequest)\n\tif bucketCountRollBack != nil {\n\t\trollback = append(rollback, bucketCountRollBack)\n\t}\n\tif errCode != s3err.ErrNone {\n\t\treturn\n\t}\n\n\t\/\/bucket simultaneous request content bytes\n\tbucketContentLengthRollBack, errCode := cb.loadCounterAndCompare(s3_constants.Concat(bucket, action, s3_constants.LimitTypeBytes), r.ContentLength, s3err.ErrRequestBytesExceed)\n\tif bucketContentLengthRollBack != nil {\n\t\trollback = append(rollback, bucketContentLengthRollBack)\n\t}\n\tif errCode != s3err.ErrNone {\n\t\treturn\n\t}\n\n\t\/\/global simultaneous request count\n\tglobalCountRollBack, errCode := cb.loadCounterAndCompare(s3_constants.Concat(action, s3_constants.LimitTypeCount), 1, s3err.ErrTooManyRequest)\n\tif globalCountRollBack != nil {\n\t\trollback = append(rollback, globalCountRollBack)\n\t}\n\tif errCode != s3err.ErrNone {\n\t\treturn\n\t}\n\n\t\/\/global simultaneous request content bytes\n\tglobalContentLengthRollBack, errCode := cb.loadCounterAndCompare(s3_constants.Concat(action, s3_constants.LimitTypeBytes), r.ContentLength, s3err.ErrRequestBytesExceed)\n\tif globalContentLengthRollBack != nil {\n\t\trollback = append(rollback, globalContentLengthRollBack)\n\t}\n\tif errCode != s3err.ErrNone {\n\t\treturn\n\t}\n\treturn\n}\n\nfunc (cb *CircuitBreaker) loadCounterAndCompare(key string, inc int64, errCode s3err.ErrorCode) (f func(), e s3err.ErrorCode) {\n\te = s3err.ErrNone\n\tif max, ok := cb.limitations[key]; ok {\n\t\tcb.RLock()\n\t\tcounter, exists := cb.counters[key]\n\t\tcb.RUnlock()\n\n\t\tif !exists {\n\t\t\tcb.Lock()\n\t\t\tcounter, exists = cb.counters[key]\n\t\t\tif !exists {\n\t\t\t\tvar newCounter int64\n\t\t\t\tcounter = &newCounter\n\t\t\t\tcb.counters[key] = counter\n\t\t\t}\n\t\t\tcb.Unlock()\n\t\t}\n\t\tcurrent := atomic.LoadInt64(counter)\n\t\tif current+inc > max {\n\t\t\te = errCode\n\t\t\treturn\n\t\t} else {\n\t\t\tcurrent := atomic.AddInt64(counter, inc)\n\t\t\tf = func() {\n\t\t\t\tatomic.AddInt64(counter, -inc)\n\t\t\t}\n\t\t\tif current > max {\n\t\t\t\te = errCode\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package gcvgo\n\nimport (\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n)\n\n\/\/ NewClient creates a new API client with the provided credentials\nfunc NewClient(credentials Credentials) (Client, error) {\n\n\treturn Client{\n\t\thttpClient: &http.Client{},\n\t\tCredentials: credentials,\n\t}, nil\n}\n\nfunc (reqs *Requests) Add(request Request) {\n\t*reqs = append(*reqs, request)\n}\n\nfunc (request *Request) AddImageFromBase64(base64Content string) {\n\trequest.Image.Content = base64Content\n}\n\nfunc (features *Features) Add(featureType FeatureType, MaxResults int64) {\n\t*features = append(*features, Feature{\n\t\tType: featureType,\n\t\tMaxResults: MaxResults,\n\t})\n}\n\nfunc (client *Client) Do(reqs Requests) ([]Response, error) {\n\tpayload := struct {\n\t\tRequests Requests `json:\"requests\"`\n\t}{\n\t\tRequests: reqs,\n\t}\n\tJSONPayload, err := json.Marshal(payload)\n\tif err != nil {\n\t\treturn []Response{}, err\n\t}\n\tfmt.Println(string(JSONPayload))\n\n\tresponse, _, err := client.fetchAndReturnPage(JSONPayload)\n\tif err != nil {\n\t\treturn []Response{}, err\n\t}\n\n\tfmt.Println(string(response))\n\n\tvar result struct {\n\t\tResponses []Response `json:\"responses\"`\n\t}\n\terr = json.Unmarshal(response, &result)\n\tif err != nil {\n\t\treturn []Response{}, err\n\t}\n\n\treturn result.Responses, nil\n}\n\nfunc (client *Client) fetchAndReturnPage(body []byte) ([]byte, http.Header, error) {\n\n\tdomain := fmt.Sprintf(\"https:\/\/vision.googleapis.com\/v1\/images:annotate?key=%s\", client.Credentials.APIkey)\n\trequestURL, err := url.Parse(domain)\n\tif err != nil {\n\t\treturn []byte(\"\"), http.Header{}, err\n\t}\n\trequestURL.Path = \"\/v1\/images:annotate\"\n\n\tfmt.Println(requestURL)\n\n\trequest, err := http.NewRequest(\"POST\", requestURL.String(), bytes.NewBuffer(body))\n\tif err != nil {\n\t\treturn []byte(\"\"), http.Header{}, fmt.Errorf(\"Failed to get the URL %s: %s\", requestURL, err)\n\t}\n\trequest.Header.Add(\"Content-Length\", strconv.Itoa(len(body)))\n\n\trequest.Header.Add(\"Connection\", \"Keep-Alive\")\n\trequest.Header.Add(\"Accept-Encoding\", \"gzip, deflate\")\n\trequest.Header.Add(\"Content-Type\", \"application\/json\")\n\n\tresponse, err := client.httpClient.Do(request)\n\tif err != nil {\n\t\treturn []byte(\"\"), http.Header{}, fmt.Errorf(\"Failed to get the URL %s: %s\", requestURL, err)\n\t}\n\tdefer response.Body.Close()\n\n\tvar responseReader io.ReadCloser\n\tswitch response.Header.Get(\"Content-Encoding\") {\n\tcase \"gzip\":\n\t\tdecompressedBodyReader, err := gzip.NewReader(response.Body)\n\t\tif err != nil {\n\t\t\treturn []byte(\"\"), http.Header{}, err\n\t\t}\n\t\tresponseReader = decompressedBodyReader\n\t\tdefer responseReader.Close()\n\tdefault:\n\t\tresponseReader = response.Body\n\t}\n\n\tresponseBody, err := ioutil.ReadAll(responseReader)\n\tif err != nil {\n\t\treturn []byte(\"\"), http.Header{}, err\n\t}\n\n\tif response.StatusCode > 299 || response.StatusCode < 199 {\n\t\tvar apiError Status\n\t\terr = json.Unmarshal(responseBody, &apiError)\n\t\tif err != nil {\n\t\t\treturn []byte(\"\"), http.Header{}, nil\n\t\t}\n\t\tfmt.Println(response.StatusCode)\n\t\tfmt.Println(string(responseBody))\n\t\treturn responseBody, response.Header, fmt.Errorf(\"%s\", responseBody)\n\t}\n\n\treturn responseBody, response.Header, nil\n}\n<commit_msg>Comment debug prints<commit_after>package gcvgo\n\nimport (\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n)\n\n\/\/ NewClient creates a new API client with the provided credentials\nfunc NewClient(credentials Credentials) (Client, error) {\n\n\treturn Client{\n\t\thttpClient: &http.Client{},\n\t\tCredentials: credentials,\n\t}, nil\n}\n\nfunc (reqs *Requests) Add(request Request) {\n\t*reqs = append(*reqs, request)\n}\n\nfunc (request *Request) AddImageFromBase64(base64Content string) {\n\trequest.Image.Content = base64Content\n}\n\nfunc (features *Features) Add(featureType FeatureType, MaxResults int64) {\n\t*features = append(*features, Feature{\n\t\tType: featureType,\n\t\tMaxResults: MaxResults,\n\t})\n}\n\nfunc (client *Client) Do(reqs Requests) ([]Response, error) {\n\tpayload := struct {\n\t\tRequests Requests `json:\"requests\"`\n\t}{\n\t\tRequests: reqs,\n\t}\n\tJSONPayload, err := json.Marshal(payload)\n\tif err != nil {\n\t\treturn []Response{}, err\n\t}\n\t\/\/fmt.Println(string(JSONPayload))\n\n\tresponse, _, err := client.fetchAndReturnPage(JSONPayload)\n\tif err != nil {\n\t\treturn []Response{}, err\n\t}\n\n\t\/\/fmt.Println(string(response))\n\n\tvar result struct {\n\t\tResponses []Response `json:\"responses\"`\n\t}\n\terr = json.Unmarshal(response, &result)\n\tif err != nil {\n\t\treturn []Response{}, err\n\t}\n\n\treturn result.Responses, nil\n}\n\nfunc (client *Client) fetchAndReturnPage(body []byte) ([]byte, http.Header, error) {\n\n\tdomain := fmt.Sprintf(\"https:\/\/vision.googleapis.com\/v1\/images:annotate?key=%s\", client.Credentials.APIkey)\n\trequestURL, err := url.Parse(domain)\n\tif err != nil {\n\t\treturn []byte(\"\"), http.Header{}, err\n\t}\n\trequestURL.Path = \"\/v1\/images:annotate\"\n\n\t\/\/fmt.Println(requestURL)\n\n\trequest, err := http.NewRequest(\"POST\", requestURL.String(), bytes.NewBuffer(body))\n\tif err != nil {\n\t\treturn []byte(\"\"), http.Header{}, fmt.Errorf(\"Failed to get the URL %s: %s\", requestURL, err)\n\t}\n\trequest.Header.Add(\"Content-Length\", strconv.Itoa(len(body)))\n\n\trequest.Header.Add(\"Connection\", \"Keep-Alive\")\n\trequest.Header.Add(\"Accept-Encoding\", \"gzip, deflate\")\n\trequest.Header.Add(\"Content-Type\", \"application\/json\")\n\n\tresponse, err := client.httpClient.Do(request)\n\tif err != nil {\n\t\treturn []byte(\"\"), http.Header{}, fmt.Errorf(\"Failed to get the URL %s: %s\", requestURL, err)\n\t}\n\tdefer response.Body.Close()\n\n\tvar responseReader io.ReadCloser\n\tswitch response.Header.Get(\"Content-Encoding\") {\n\tcase \"gzip\":\n\t\tdecompressedBodyReader, err := gzip.NewReader(response.Body)\n\t\tif err != nil {\n\t\t\treturn []byte(\"\"), http.Header{}, err\n\t\t}\n\t\tresponseReader = decompressedBodyReader\n\t\tdefer responseReader.Close()\n\tdefault:\n\t\tresponseReader = response.Body\n\t}\n\n\tresponseBody, err := ioutil.ReadAll(responseReader)\n\tif err != nil {\n\t\treturn []byte(\"\"), http.Header{}, err\n\t}\n\n\tif response.StatusCode > 299 || response.StatusCode < 199 {\n\t\tvar apiError Status\n\t\terr = json.Unmarshal(responseBody, &apiError)\n\t\tif err != nil {\n\t\t\treturn []byte(\"\"), http.Header{}, nil\n\t\t}\n\t\tfmt.Println(response.StatusCode)\n\t\tfmt.Println(string(responseBody))\n\t\treturn responseBody, response.Header, fmt.Errorf(\"%s\", responseBody)\n\t}\n\n\treturn responseBody, response.Header, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build !novault\n\n\/*\n * Copyright (c) 2013-2016, Jeremy Bingham (<jeremy@goiardi.gl>)\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage secret\n\n\/\/ Functions for using hashicorp vault (https:\/\/www.vaultproject.io\/) to store\n\/\/ secrets in goiardi.\n\nimport (\n\t\"crypto\/rsa\"\n\t\"crypto\/x509\"\n\t\"encoding\/pem\"\n\t\"fmt\"\n\t\"github.com\/ctdk\/goiardi\/config\"\n\tvault \"github.com\/hashicorp\/vault\/api\"\n\t\"github.com\/tideland\/golib\/logger\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ make this a pool later?\n\ntype vaultSecretStore struct {\n\tm sync.RWMutex\n\tsecrets map[string]*secretVal\n\t*vault.Client\n}\n\nconst MaxStaleAgeSeconds = 3600 \/\/ configurable later, but make it an hour for\n\/\/ now\nconst StaleTryAgainSeconds = 60 \/\/ try stale values again in a minute\n\ntype secretVal struct {\n\tpath string\n\tsecretType string\n\tcreated time.Time\n\trenewable bool\n\tttl time.Duration\n\texpires time.Time\n\tstale bool\n\tstaleTryAgain time.Time\n\tstaleTime time.Time\n\tvalue interface{}\n}\n\ntype secretConvert func(interface{}) (interface{}, error)\n\nfunc configureVault() (*vaultSecretStore, error) {\n\tconf := vault.DefaultConfig()\n\tif err := conf.ReadEnvironment(); err != nil {\n\t\treturn nil, err\n\t}\n\tif config.Config.VaultAddr != \"\" {\n\t\tconf.Address = config.Config.VaultAddr\n\t}\n\tc, err := vault.NewClient(conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar m sync.RWMutex\n\tsecrets := make(map[string]*secretVal)\n\tv := &vaultSecretStore{m, secrets, c}\n\treturn v, nil\n}\n\nfunc (v *vaultSecretStore) getSecret(path string, secretType string) (interface{}, error) {\n\tif v.secrets[path] == nil {\n\t\tlogger.Debugf(\"secret (%s) for %s is nil, fetching from vault\", secretType, path)\n\t\ts, err := v.getSecretPath(path, secretType)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tv.secrets[path] = s\n\t} else {\n\t\tlogger.Debugf(\"using cached secret for %s\", path)\n\t}\n\treturn v.secretValue(v.secrets[path])\n}\n\nfunc (v *vaultSecretStore) getSecretPath(path string, secretType string) (*secretVal, error) {\n\tt := time.Now()\n\ts, err := v.Logical().Read(path)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to read %s (%s) from vault: %s\", path, secretType, err.Error())\n\t\treturn nil, err\n\t}\n\tif s == nil {\n\t\terr := fmt.Errorf(\"No secret returned from vault for %s (%s)\", path, secretType)\n\t\treturn nil, err\n\t}\n\tp := s.Data[secretType]\n\tif p == nil {\n\t\terr := fmt.Errorf(\"no data for %s (%s) from vault\", path, secretType)\n\t\treturn nil, err\n\t}\n\tp, err = convertors(secretType)(p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsVal := newSecretVal(path, secretType, p, t, s)\n\treturn sVal, nil\n}\n\nfunc (v *vaultSecretStore) setSecret(path string, secretType string, value interface{}) error {\n\tlogger.Debugf(\"setting public key for %s (%s)\", path, secretType)\n\tt := time.Now()\n\t_, err := v.Logical().Write(path, map[string]interface{}{\n\t\tsecretType: value,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\ts, err := v.Logical().Read(path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error re-reading secret from vault after setting: %s\", err.Error())\n\t}\n\tsVal := newSecretVal(path, secretType, value, t, s)\n\tv.secrets[path] = sVal\n\treturn nil\n}\n\nfunc (v *vaultSecretStore) deleteSecret(path string) error {\n\tdelete(v.secrets, path)\n\t_, err := v.Logical().Delete(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (v *vaultSecretStore) getPublicKey(c ActorKeyer) (string, error) {\n\tv.m.RLock()\n\tdefer v.m.RUnlock()\n\tpath := makePubKeyPath(c)\n\ts, err := v.getSecret(path, \"pubKey\")\n\tswitch s := s.(type) {\n\tcase string:\n\t\treturn s, err\n\tcase []byte:\n\t\treturn string(s), err\n\tcase nil:\n\t\treturn \"\", err\n\tdefault:\n\t\tvar errStr string\n\t\tif err != nil {\n\t\t\terrStr = err.Error()\n\t\t}\n\t\terr := fmt.Errorf(\"The type was wrong fetching the public key from vault: %T -- error, if any: %s\", s, errStr)\n\t\treturn \"\", err\n\t}\n}\n\nfunc (v *vaultSecretStore) setPublicKey(c ActorKeyer, pubKey string) error {\n\tv.m.Lock()\n\tdefer v.m.Unlock()\n\tpath := makePubKeyPath(c)\n\treturn v.setSecret(path, \"pubKey\", pubKey)\n}\n\nfunc (v *vaultSecretStore) deletePublicKey(c ActorKeyer) error {\n\tv.m.Lock()\n\tdefer v.m.Unlock()\n\tpath := makePubKeyPath(c)\n\treturn v.deleteSecret(path)\n}\n\nfunc makePubKeyPath(c ActorKeyer) string {\n\treturn fmt.Sprintf(\"keys\/%s\/%s\", c.URLType(), c.GetName())\n}\n\nfunc makeHashPath(c ActorKeyer) string {\n\t\/\/ strictly speaking only users actually have passwords, but in case\n\t\/\/ something else ever comes up, make the path a little longer.\n\treturn fmt.Sprintf(\"keys\/passwd\/%s\/%s\", c.URLType(), c.GetName())\n}\n\nfunc newSecretVal(path string, secretType string, value interface{}, t time.Time, s *vault.Secret) *secretVal {\n\tsVal := new(secretVal)\n\tsVal.path = path\n\tsVal.secretType = secretType\n\tsVal.created = t\n\tsVal.renewable = s.Renewable\n\tsVal.ttl = time.Duration(s.LeaseDuration) * time.Second\n\tsVal.expires = t.Add(sVal.ttl)\n\tsVal.value = value\n\treturn sVal\n}\n\nfunc (s *secretVal) isExpired() bool {\n\tif s.ttl == 0 {\n\t\treturn false\n\t}\n\treturn time.Now().After(s.expires)\n}\n\nfunc (v *vaultSecretStore) secretValue(s *secretVal) (interface{}, error) {\n\tif s.isExpired() {\n\t\tlogger.Debugf(\"trying to renew secret for %s\", s.path)\n\t\ts2, err := v.getSecretPath(s.path, s.secretType)\n\t\tif !s.stale {\n\t\t\tif err != nil {\n\t\t\t\tlogger.Debugf(\"error trying to renew the secret for %s: %s -- marking as stale\", s.path, err.Error())\n\t\t\t\ts.stale = true\n\t\t\t\ts.staleTime = time.Now().Add(MaxStaleAgeSeconds * time.Second)\n\t\t\t\ts.staleTryAgain = time.Now().Add(StaleTryAgainSeconds * time.Second)\n\t\t\t} else {\n\t\t\t\tlogger.Debugf(\"successfully renewed secret for %s\", s.path)\n\t\t\t\ts = s2\n\t\t\t}\n\t\t} else if time.Now().After(s.staleTime) {\n\t\t\tif err != nil {\n\t\t\t\terr := fmt.Errorf(\"Couldn't renew the secret for %s before %d seconds ran out, giving up\", s.path, MaxStaleAgeSeconds)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tlogger.Debugf(\"successfully renewed secret for %s beforegiving up due to staleness\", s.path)\n\t\t\ts = s2\n\t\t} else if time.Now().After(s.staleTryAgain) {\n\t\t\tif err != nil {\n\t\t\t\tlogger.Debugf(\"error trying to renew the secret for %s: %s -- will renew again in %d seconds\", s.path, err.Error(), StaleTryAgainSeconds)\n\t\t\t\ts.staleTryAgain = time.Now().Add(StaleTryAgainSeconds)\n\t\t\t} else {\n\t\t\t\tlogger.Debugf(\"successfully renewed secret after being stale\")\n\t\t\t\ts = s2\n\t\t\t}\n\t\t}\n\t}\n\treturn s.value, nil\n}\n\nfunc (v *vaultSecretStore) valueStr(s *secretVal) (string, error) {\n\tval, err := v.secretValue(s)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvalStr, ok := val.(string)\n\tif !ok {\n\t\terr := fmt.Errorf(\"value for %s was not a string, but a %T!\", s.path, val)\n\t\treturn \"\", err\n\t}\n\treturn valStr, nil\n}\n\n\/\/ shovey signing key\n\nfunc (v *vaultSecretStore) getSigningKey(path string) (*rsa.PrivateKey, error) {\n\tv.m.RLock()\n\tdefer v.m.RUnlock()\n\ts, err := v.getSecret(path, \"RSAKey\")\n\tswitch s := s.(type) {\n\tcase *rsa.PrivateKey:\n\t\treturn s, err\n\tdefault:\n\t\tvar errStr string\n\t\tif err != nil {\n\t\t\terrStr = err.Error()\n\t\t}\n\t\tkerr := fmt.Errorf(\"RSA private key for shovey was not returned. An object of type %T was. Error, if any: %s\", s, errStr)\n\t\treturn nil, kerr\n\t}\n}\n\n\/\/ user passwd hash methods\n\nfunc (v *vaultSecretStore) setPasswdHash(c ActorKeyer, pwhash string) error {\n\tv.m.Lock()\n\tdefer v.m.Unlock()\n\tpath := makeHashPath(c)\n\treturn v.setSecret(path, \"passwd\", pwhash)\n}\n\nfunc (v *vaultSecretStore) getPasswdHash(c ActorKeyer) (string, error) {\n\tv.m.RLock()\n\tdefer v.m.RUnlock()\n\tpath := makeHashPath(c)\n\ts, err := v.getSecret(path, \"passwd\")\n\tswitch s := s.(type) {\n\tcase string:\n\t\treturn s, err\n\tcase []byte:\n\t\treturn string(s), err\n\tcase nil:\n\t\treturn \"\", err\n\tdefault:\n\t\tvar errStr string\n\t\tif err != nil {\n\t\t\terrStr = err.Error()\n\t\t}\n\t\terr := fmt.Errorf(\"The type was wrong fetching the passwd hash from vault: %T -- error, if any: %s\", s, errStr)\n\t\treturn \"\", err\n\t}\n}\n\nfunc (v *vaultSecretStore) deletePasswdHash(c ActorKeyer) error {\n\tv.m.Lock()\n\tdefer v.m.Unlock()\n\tpath := makeHashPath(c)\n\treturn v.deleteSecret(path)\n}\n\n\/\/ funcs to process secrets after fetching them from vault\n\nfunc secretPassThrough(i interface{}) (interface{}, error) {\n\treturn i, nil\n}\n\nfunc secretRSAKey(i interface{}) (interface{}, error) {\n\tp, ok := i.(string)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"not an RSA private key in string form\")\n\t}\n\tpBlock, _ := pem.Decode([]byte(p))\n\tif pBlock == nil {\n\t\treturn nil, fmt.Errorf(\"invalid block size for private key for shovey from vault\")\n\t}\n\tpk, err := x509.ParsePKCS1PrivateKey(pBlock.Bytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pk, nil\n}\n\nfunc convertors(secretType string) secretConvert {\n\tswitch secretType {\n\tcase \"RSAKey\":\n\t\treturn secretRSAKey\n\tdefault:\n\t\treturn secretPassThrough\n\t}\n}\n<commit_msg>fix go vet error in secrets processing<commit_after>\/\/ +build !novault\n\n\/*\n * Copyright (c) 2013-2016, Jeremy Bingham (<jeremy@goiardi.gl>)\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage secret\n\n\/\/ Functions for using hashicorp vault (https:\/\/www.vaultproject.io\/) to store\n\/\/ secrets in goiardi.\n\nimport (\n\t\"crypto\/rsa\"\n\t\"crypto\/x509\"\n\t\"encoding\/pem\"\n\t\"fmt\"\n\t\"github.com\/ctdk\/goiardi\/config\"\n\tvault \"github.com\/hashicorp\/vault\/api\"\n\t\"github.com\/tideland\/golib\/logger\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ make this a pool later?\n\ntype vaultSecretStore struct {\n\tm sync.RWMutex\n\tsecrets map[string]*secretVal\n\t*vault.Client\n}\n\nconst MaxStaleAgeSeconds = 3600 \/\/ configurable later, but make it an hour for\n\/\/ now\nconst StaleTryAgainSeconds = 60 \/\/ try stale values again in a minute\n\ntype secretVal struct {\n\tpath string\n\tsecretType string\n\tcreated time.Time\n\trenewable bool\n\tttl time.Duration\n\texpires time.Time\n\tstale bool\n\tstaleTryAgain time.Time\n\tstaleTime time.Time\n\tvalue interface{}\n}\n\ntype secretConvert func(interface{}) (interface{}, error)\n\nfunc configureVault() (*vaultSecretStore, error) {\n\tconf := vault.DefaultConfig()\n\tif err := conf.ReadEnvironment(); err != nil {\n\t\treturn nil, err\n\t}\n\tif config.Config.VaultAddr != \"\" {\n\t\tconf.Address = config.Config.VaultAddr\n\t}\n\tc, err := vault.NewClient(conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsecrets := make(map[string]*secretVal)\n\tv := &vaultSecretStore{secrets: secrets, Client: c}\n\treturn v, nil\n}\n\nfunc (v *vaultSecretStore) getSecret(path string, secretType string) (interface{}, error) {\n\tif v.secrets[path] == nil {\n\t\tlogger.Debugf(\"secret (%s) for %s is nil, fetching from vault\", secretType, path)\n\t\ts, err := v.getSecretPath(path, secretType)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tv.secrets[path] = s\n\t} else {\n\t\tlogger.Debugf(\"using cached secret for %s\", path)\n\t}\n\treturn v.secretValue(v.secrets[path])\n}\n\nfunc (v *vaultSecretStore) getSecretPath(path string, secretType string) (*secretVal, error) {\n\tt := time.Now()\n\ts, err := v.Logical().Read(path)\n\tif err != nil {\n\t\terr := fmt.Errorf(\"Failed to read %s (%s) from vault: %s\", path, secretType, err.Error())\n\t\treturn nil, err\n\t}\n\tif s == nil {\n\t\terr := fmt.Errorf(\"No secret returned from vault for %s (%s)\", path, secretType)\n\t\treturn nil, err\n\t}\n\tp := s.Data[secretType]\n\tif p == nil {\n\t\terr := fmt.Errorf(\"no data for %s (%s) from vault\", path, secretType)\n\t\treturn nil, err\n\t}\n\tp, err = convertors(secretType)(p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsVal := newSecretVal(path, secretType, p, t, s)\n\treturn sVal, nil\n}\n\nfunc (v *vaultSecretStore) setSecret(path string, secretType string, value interface{}) error {\n\tlogger.Debugf(\"setting public key for %s (%s)\", path, secretType)\n\tt := time.Now()\n\t_, err := v.Logical().Write(path, map[string]interface{}{\n\t\tsecretType: value,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\ts, err := v.Logical().Read(path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error re-reading secret from vault after setting: %s\", err.Error())\n\t}\n\tsVal := newSecretVal(path, secretType, value, t, s)\n\tv.secrets[path] = sVal\n\treturn nil\n}\n\nfunc (v *vaultSecretStore) deleteSecret(path string) error {\n\tdelete(v.secrets, path)\n\t_, err := v.Logical().Delete(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (v *vaultSecretStore) getPublicKey(c ActorKeyer) (string, error) {\n\tv.m.RLock()\n\tdefer v.m.RUnlock()\n\tpath := makePubKeyPath(c)\n\ts, err := v.getSecret(path, \"pubKey\")\n\tswitch s := s.(type) {\n\tcase string:\n\t\treturn s, err\n\tcase []byte:\n\t\treturn string(s), err\n\tcase nil:\n\t\treturn \"\", err\n\tdefault:\n\t\tvar errStr string\n\t\tif err != nil {\n\t\t\terrStr = err.Error()\n\t\t}\n\t\terr := fmt.Errorf(\"The type was wrong fetching the public key from vault: %T -- error, if any: %s\", s, errStr)\n\t\treturn \"\", err\n\t}\n}\n\nfunc (v *vaultSecretStore) setPublicKey(c ActorKeyer, pubKey string) error {\n\tv.m.Lock()\n\tdefer v.m.Unlock()\n\tpath := makePubKeyPath(c)\n\treturn v.setSecret(path, \"pubKey\", pubKey)\n}\n\nfunc (v *vaultSecretStore) deletePublicKey(c ActorKeyer) error {\n\tv.m.Lock()\n\tdefer v.m.Unlock()\n\tpath := makePubKeyPath(c)\n\treturn v.deleteSecret(path)\n}\n\nfunc makePubKeyPath(c ActorKeyer) string {\n\treturn fmt.Sprintf(\"keys\/%s\/%s\", c.URLType(), c.GetName())\n}\n\nfunc makeHashPath(c ActorKeyer) string {\n\t\/\/ strictly speaking only users actually have passwords, but in case\n\t\/\/ something else ever comes up, make the path a little longer.\n\treturn fmt.Sprintf(\"keys\/passwd\/%s\/%s\", c.URLType(), c.GetName())\n}\n\nfunc newSecretVal(path string, secretType string, value interface{}, t time.Time, s *vault.Secret) *secretVal {\n\tsVal := new(secretVal)\n\tsVal.path = path\n\tsVal.secretType = secretType\n\tsVal.created = t\n\tsVal.renewable = s.Renewable\n\tsVal.ttl = time.Duration(s.LeaseDuration) * time.Second\n\tsVal.expires = t.Add(sVal.ttl)\n\tsVal.value = value\n\treturn sVal\n}\n\nfunc (s *secretVal) isExpired() bool {\n\tif s.ttl == 0 {\n\t\treturn false\n\t}\n\treturn time.Now().After(s.expires)\n}\n\nfunc (v *vaultSecretStore) secretValue(s *secretVal) (interface{}, error) {\n\tif s.isExpired() {\n\t\tlogger.Debugf(\"trying to renew secret for %s\", s.path)\n\t\ts2, err := v.getSecretPath(s.path, s.secretType)\n\t\tif !s.stale {\n\t\t\tif err != nil {\n\t\t\t\tlogger.Debugf(\"error trying to renew the secret for %s: %s -- marking as stale\", s.path, err.Error())\n\t\t\t\ts.stale = true\n\t\t\t\ts.staleTime = time.Now().Add(MaxStaleAgeSeconds * time.Second)\n\t\t\t\ts.staleTryAgain = time.Now().Add(StaleTryAgainSeconds * time.Second)\n\t\t\t} else {\n\t\t\t\tlogger.Debugf(\"successfully renewed secret for %s\", s.path)\n\t\t\t\ts = s2\n\t\t\t}\n\t\t} else if time.Now().After(s.staleTime) {\n\t\t\tif err != nil {\n\t\t\t\terr := fmt.Errorf(\"Couldn't renew the secret for %s before %d seconds ran out, giving up\", s.path, MaxStaleAgeSeconds)\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tlogger.Debugf(\"successfully renewed secret for %s beforegiving up due to staleness\", s.path)\n\t\t\ts = s2\n\t\t} else if time.Now().After(s.staleTryAgain) {\n\t\t\tif err != nil {\n\t\t\t\tlogger.Debugf(\"error trying to renew the secret for %s: %s -- will renew again in %d seconds\", s.path, err.Error(), StaleTryAgainSeconds)\n\t\t\t\ts.staleTryAgain = time.Now().Add(StaleTryAgainSeconds)\n\t\t\t} else {\n\t\t\t\tlogger.Debugf(\"successfully renewed secret after being stale\")\n\t\t\t\ts = s2\n\t\t\t}\n\t\t}\n\t}\n\treturn s.value, nil\n}\n\nfunc (v *vaultSecretStore) valueStr(s *secretVal) (string, error) {\n\tval, err := v.secretValue(s)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvalStr, ok := val.(string)\n\tif !ok {\n\t\terr := fmt.Errorf(\"value for %s was not a string, but a %T!\", s.path, val)\n\t\treturn \"\", err\n\t}\n\treturn valStr, nil\n}\n\n\/\/ shovey signing key\n\nfunc (v *vaultSecretStore) getSigningKey(path string) (*rsa.PrivateKey, error) {\n\tv.m.RLock()\n\tdefer v.m.RUnlock()\n\ts, err := v.getSecret(path, \"RSAKey\")\n\tswitch s := s.(type) {\n\tcase *rsa.PrivateKey:\n\t\treturn s, err\n\tdefault:\n\t\tvar errStr string\n\t\tif err != nil {\n\t\t\terrStr = err.Error()\n\t\t}\n\t\tkerr := fmt.Errorf(\"RSA private key for shovey was not returned. An object of type %T was. Error, if any: %s\", s, errStr)\n\t\treturn nil, kerr\n\t}\n}\n\n\/\/ user passwd hash methods\n\nfunc (v *vaultSecretStore) setPasswdHash(c ActorKeyer, pwhash string) error {\n\tv.m.Lock()\n\tdefer v.m.Unlock()\n\tpath := makeHashPath(c)\n\treturn v.setSecret(path, \"passwd\", pwhash)\n}\n\nfunc (v *vaultSecretStore) getPasswdHash(c ActorKeyer) (string, error) {\n\tv.m.RLock()\n\tdefer v.m.RUnlock()\n\tpath := makeHashPath(c)\n\ts, err := v.getSecret(path, \"passwd\")\n\tswitch s := s.(type) {\n\tcase string:\n\t\treturn s, err\n\tcase []byte:\n\t\treturn string(s), err\n\tcase nil:\n\t\treturn \"\", err\n\tdefault:\n\t\tvar errStr string\n\t\tif err != nil {\n\t\t\terrStr = err.Error()\n\t\t}\n\t\terr := fmt.Errorf(\"The type was wrong fetching the passwd hash from vault: %T -- error, if any: %s\", s, errStr)\n\t\treturn \"\", err\n\t}\n}\n\nfunc (v *vaultSecretStore) deletePasswdHash(c ActorKeyer) error {\n\tv.m.Lock()\n\tdefer v.m.Unlock()\n\tpath := makeHashPath(c)\n\treturn v.deleteSecret(path)\n}\n\n\/\/ funcs to process secrets after fetching them from vault\n\nfunc secretPassThrough(i interface{}) (interface{}, error) {\n\treturn i, nil\n}\n\nfunc secretRSAKey(i interface{}) (interface{}, error) {\n\tp, ok := i.(string)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"not an RSA private key in string form\")\n\t}\n\tpBlock, _ := pem.Decode([]byte(p))\n\tif pBlock == nil {\n\t\treturn nil, fmt.Errorf(\"invalid block size for private key for shovey from vault\")\n\t}\n\tpk, err := x509.ParsePKCS1PrivateKey(pBlock.Bytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pk, nil\n}\n\nfunc convertors(secretType string) secretConvert {\n\tswitch secretType {\n\tcase \"RSAKey\":\n\t\treturn secretRSAKey\n\tdefault:\n\t\treturn secretPassThrough\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package godbg\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"regexp\"\n\t\"runtime\/debug\"\n\t\"strings\"\n)\n\nvar rxDbgLine, _ = regexp.Compile(`^.*[Vv]on[Cc](?:\/prog\/git)?\/senvgo\/main.go:(\\d+)\\s`)\nvar rxDbgFnct, _ = regexp.Compile(`^\\s+(?:com\/VonC\/senvgo)?(?:\\.\\(([^\\)]+)\\))?\\.?([^:]+)`)\n\n\/\/ http:\/\/stackoverflow.com\/a\/23554672\/6309 https:\/\/vividcortex.com\/blog\/2013\/12\/03\/go-idiom-package-and-object\/\n\/\/ you design a type with methods as usual, and then you also place matching functions at the package level itself.\n\/\/ These functions simply delegate to a default instance of the type that’s a private package-level variable, created in an init() function.\n\n\/\/ Pdbg allows to print debug message with indent and function name added\ntype Pdbg struct {\n\tbout *bytes.Buffer\n\tberr *bytes.Buffer\n\tsout *bufio.Writer\n\tserr *bufio.Writer\n}\n\n\/\/ Out returns a writer for normal messages.\n\/\/ By default, os.StdOut\nfunc Out() io.Writer {\n\treturn pdbg.Out()\n}\n\n\/\/ Out returns a writer for normal messages for a given pdbg instance.\n\/\/ By default, os.StdOut\nfunc (pdbg *Pdbg) Out() io.Writer {\n\tif pdbg.sout == nil {\n\t\treturn os.Stdout\n\t}\n\treturn pdbg.sout\n}\n\n\/\/ Err returns a writer for error messages.\n\/\/ By default, os.StdErr\nfunc Err() io.Writer {\n\treturn pdbg.Err()\n}\n\n\/\/ Err returns a writer for error messages for a given pdbg instance.\n\/\/ By default, os.StdErr\nfunc (pdbg *Pdbg) Err() io.Writer {\n\tif pdbg.serr == nil {\n\t\treturn os.Stderr\n\t}\n\treturn pdbg.serr\n}\n\n\/\/ global pdbg used for printing\nvar pdbg = &Pdbg{}\n\n\/\/ Option set an option for a Pdbg\n\/\/ http:\/\/dave.cheney.net\/2014\/10\/17\/functional-options-for-friendly-apis\ntype Option func(*Pdbg)\n\n\/\/ SetBuffers is an option for replacing stdout and stderr by\n\/\/ bytes buffers (in a bufio.Writer).\n\/\/ If apdbg is nil, set for the global pdbg instance\nfunc SetBuffers(apdbg *Pdbg) {\n\tif apdbg == nil {\n\t\tapdbg = pdbg\n\t}\n\tapdbg.bout = bytes.NewBuffer(nil)\n\tapdbg.sout = bufio.NewWriter(apdbg.bout)\n\tapdbg.berr = bytes.NewBuffer(nil)\n\tapdbg.serr = bufio.NewWriter(apdbg.berr)\n}\n\n\/\/ NewPdbg creates a PDbg instance, with options\nfunc NewPdbg(options ...Option) *Pdbg {\n\tnewpdbg := &Pdbg{}\n\tfor _, option := range options {\n\t\toption(newpdbg)\n\t}\n\treturn newpdbg\n}\n\n\/\/ ResetIOs reset the out and err buffer\n\/\/ (unless they were the default stdout and stderr,\n\/\/ in which case it does nothing)\nfunc (pdbg *Pdbg) ResetIOs() {\n\tif pdbg.sout != nil {\n\t\tpdbg.bout = bytes.NewBuffer(nil)\n\t\tpdbg.sout.Reset(pdbg.bout)\n\t\tpdbg.berr = bytes.NewBuffer(nil)\n\t\tpdbg.serr.Reset(pdbg.berr)\n\t}\n}\n\n\/\/ FlushIOs flushes the sout and serr bufio.Writer\nfunc (pdbg *Pdbg) FlushIOs() {\n\tpdbg.sout.Flush()\n\tpdbg.serr.Flush()\n}\n\nfunc pdbgInc(scanner *bufio.Scanner, line string) string {\n\tm := rxDbgLine.FindSubmatchIndex([]byte(line))\n\tif len(m) == 0 {\n\t\treturn \"\"\n\t}\n\tdbgLine := line[m[2]:m[3]]\n\t\/\/ fmt.Printf(\"line '%v', m '%+v'\\n\", line, m)\n\tscanner.Scan()\n\tline = scanner.Text()\n\tmf := rxDbgFnct.FindSubmatchIndex([]byte(line))\n\t\/\/ fmt.Printf(\"lineF '%v', mf '%+v'\\n\", line, mf)\n\tif len(mf) == 0 {\n\t\treturn \"\"\n\t}\n\tdbgFnct := \"\"\n\tif mf[2] > -1 {\n\t\tdbgFnct = line[mf[2]:mf[3]]\n\t}\n\tif dbgFnct != \"\" {\n\t\tdbgFnct = dbgFnct + \".\"\n\t}\n\tdbgFnct = dbgFnct + line[mf[4]:mf[5]]\n\n\treturn dbgFnct + \":\" + dbgLine\n}\n\nfunc pdbgExcluded(dbg string) bool {\n\tif strings.Contains(dbg, \"ReadConfig:\") {\n\t\treturn false\n\t}\n\treturn false\n}\n\n\/\/ Pdbgf uses global Pdbg variable for printing strings, with indent and function name\nfunc Pdbgf(format string, args ...interface{}) string {\n\tmsg := fmt.Sprintf(format+\"\\n\", args...)\n\tmsg = strings.TrimSpace(msg)\n\tbstack := bytes.NewBuffer(debug.Stack())\n\t\/\/ fmt.Printf(\"%+v\", bstack)\n\n\tscanner := bufio.NewScanner(bstack)\n\tpmsg := \"\"\n\tdepth := 0\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tif strings.Contains(line, \"smartystreets\") {\n\t\t\tbreak\n\t\t}\n\t\tm := rxDbgLine.FindSubmatchIndex([]byte(line))\n\t\tif len(m) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif depth > 0 && depth < 4 {\n\t\t\tdbg := pdbgInc(scanner, line)\n\t\t\tif dbg == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif depth == 1 {\n\t\t\t\tif pdbgExcluded(dbg) {\n\t\t\t\t\treturn \"\"\n\t\t\t\t}\n\t\t\t\tpmsg = \"[\" + dbg + \"]\"\n\t\t\t} else {\n\t\t\t\tpmsg = pmsg + \" (\" + dbg + \")\"\n\t\t\t}\n\t\t}\n\t\tdepth = depth + 1\n\t}\n\tspaces := \"\"\n\tif depth >= 2 {\n\t\tspaces = strings.Repeat(\" \", depth-2)\n\t}\n\tres := pmsg\n\tpmsg = spaces + pmsg\n\tmsg = pmsg + \"\\n\" + spaces + \" \" + msg + \"\\n\"\n\t\/\/ fmt.Printf(\"MSG '%v'\\n\", msg)\n\tfmt.Fprint(os.Stderr, fmt.Sprint(msg))\n\treturn res\n}\n<commit_msg>Removes FlushIOs, will be done in String() methods<commit_after>package godbg\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"regexp\"\n\t\"runtime\/debug\"\n\t\"strings\"\n)\n\nvar rxDbgLine, _ = regexp.Compile(`^.*[Vv]on[Cc](?:\/prog\/git)?\/senvgo\/main.go:(\\d+)\\s`)\nvar rxDbgFnct, _ = regexp.Compile(`^\\s+(?:com\/VonC\/senvgo)?(?:\\.\\(([^\\)]+)\\))?\\.?([^:]+)`)\n\n\/\/ http:\/\/stackoverflow.com\/a\/23554672\/6309 https:\/\/vividcortex.com\/blog\/2013\/12\/03\/go-idiom-package-and-object\/\n\/\/ you design a type with methods as usual, and then you also place matching functions at the package level itself.\n\/\/ These functions simply delegate to a default instance of the type that’s a private package-level variable, created in an init() function.\n\n\/\/ Pdbg allows to print debug message with indent and function name added\ntype Pdbg struct {\n\tbout *bytes.Buffer\n\tberr *bytes.Buffer\n\tsout *bufio.Writer\n\tserr *bufio.Writer\n}\n\n\/\/ Out returns a writer for normal messages.\n\/\/ By default, os.StdOut\nfunc Out() io.Writer {\n\treturn pdbg.Out()\n}\n\n\/\/ Out returns a writer for normal messages for a given pdbg instance.\n\/\/ By default, os.StdOut\nfunc (pdbg *Pdbg) Out() io.Writer {\n\tif pdbg.sout == nil {\n\t\treturn os.Stdout\n\t}\n\treturn pdbg.sout\n}\n\n\/\/ Err returns a writer for error messages.\n\/\/ By default, os.StdErr\nfunc Err() io.Writer {\n\treturn pdbg.Err()\n}\n\n\/\/ Err returns a writer for error messages for a given pdbg instance.\n\/\/ By default, os.StdErr\nfunc (pdbg *Pdbg) Err() io.Writer {\n\tif pdbg.serr == nil {\n\t\treturn os.Stderr\n\t}\n\treturn pdbg.serr\n}\n\n\/\/ global pdbg used for printing\nvar pdbg = &Pdbg{}\n\n\/\/ Option set an option for a Pdbg\n\/\/ http:\/\/dave.cheney.net\/2014\/10\/17\/functional-options-for-friendly-apis\ntype Option func(*Pdbg)\n\n\/\/ SetBuffers is an option for replacing stdout and stderr by\n\/\/ bytes buffers (in a bufio.Writer).\n\/\/ If apdbg is nil, set for the global pdbg instance\nfunc SetBuffers(apdbg *Pdbg) {\n\tif apdbg == nil {\n\t\tapdbg = pdbg\n\t}\n\tapdbg.bout = bytes.NewBuffer(nil)\n\tapdbg.sout = bufio.NewWriter(apdbg.bout)\n\tapdbg.berr = bytes.NewBuffer(nil)\n\tapdbg.serr = bufio.NewWriter(apdbg.berr)\n}\n\n\/\/ NewPdbg creates a PDbg instance, with options\nfunc NewPdbg(options ...Option) *Pdbg {\n\tnewpdbg := &Pdbg{}\n\tfor _, option := range options {\n\t\toption(newpdbg)\n\t}\n\treturn newpdbg\n}\n\n\/\/ ResetIOs reset the out and err buffer\n\/\/ (unless they were the default stdout and stderr,\n\/\/ in which case it does nothing)\nfunc (pdbg *Pdbg) ResetIOs() {\n\tif pdbg.sout != nil {\n\t\tpdbg.bout = bytes.NewBuffer(nil)\n\t\tpdbg.sout.Reset(pdbg.bout)\n\t\tpdbg.berr = bytes.NewBuffer(nil)\n\t\tpdbg.serr.Reset(pdbg.berr)\n\t}\n}\n\nfunc pdbgInc(scanner *bufio.Scanner, line string) string {\n\tm := rxDbgLine.FindSubmatchIndex([]byte(line))\n\tif len(m) == 0 {\n\t\treturn \"\"\n\t}\n\tdbgLine := line[m[2]:m[3]]\n\t\/\/ fmt.Printf(\"line '%v', m '%+v'\\n\", line, m)\n\tscanner.Scan()\n\tline = scanner.Text()\n\tmf := rxDbgFnct.FindSubmatchIndex([]byte(line))\n\t\/\/ fmt.Printf(\"lineF '%v', mf '%+v'\\n\", line, mf)\n\tif len(mf) == 0 {\n\t\treturn \"\"\n\t}\n\tdbgFnct := \"\"\n\tif mf[2] > -1 {\n\t\tdbgFnct = line[mf[2]:mf[3]]\n\t}\n\tif dbgFnct != \"\" {\n\t\tdbgFnct = dbgFnct + \".\"\n\t}\n\tdbgFnct = dbgFnct + line[mf[4]:mf[5]]\n\n\treturn dbgFnct + \":\" + dbgLine\n}\n\nfunc pdbgExcluded(dbg string) bool {\n\tif strings.Contains(dbg, \"ReadConfig:\") {\n\t\treturn false\n\t}\n\treturn false\n}\n\n\/\/ Pdbgf uses global Pdbg variable for printing strings, with indent and function name\nfunc Pdbgf(format string, args ...interface{}) string {\n\tmsg := fmt.Sprintf(format+\"\\n\", args...)\n\tmsg = strings.TrimSpace(msg)\n\tbstack := bytes.NewBuffer(debug.Stack())\n\t\/\/ fmt.Printf(\"%+v\", bstack)\n\n\tscanner := bufio.NewScanner(bstack)\n\tpmsg := \"\"\n\tdepth := 0\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tif strings.Contains(line, \"smartystreets\") {\n\t\t\tbreak\n\t\t}\n\t\tm := rxDbgLine.FindSubmatchIndex([]byte(line))\n\t\tif len(m) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif depth > 0 && depth < 4 {\n\t\t\tdbg := pdbgInc(scanner, line)\n\t\t\tif dbg == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif depth == 1 {\n\t\t\t\tif pdbgExcluded(dbg) {\n\t\t\t\t\treturn \"\"\n\t\t\t\t}\n\t\t\t\tpmsg = \"[\" + dbg + \"]\"\n\t\t\t} else {\n\t\t\t\tpmsg = pmsg + \" (\" + dbg + \")\"\n\t\t\t}\n\t\t}\n\t\tdepth = depth + 1\n\t}\n\tspaces := \"\"\n\tif depth >= 2 {\n\t\tspaces = strings.Repeat(\" \", depth-2)\n\t}\n\tres := pmsg\n\tpmsg = spaces + pmsg\n\tmsg = pmsg + \"\\n\" + spaces + \" \" + msg + \"\\n\"\n\t\/\/ fmt.Printf(\"MSG '%v'\\n\", msg)\n\tfmt.Fprint(os.Stderr, fmt.Sprint(msg))\n\treturn res\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package gorse holds functions common to the different tools making up the\n\/\/ project.\npackage gorse\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"time\"\n)\n\n\/\/ ReadState holds an item's state (rss_item_state table, read_state type).\ntype ReadState int\n\nconst (\n\t\/\/ Unread means the item is not yet read.\n\tUnread ReadState = iota\n\t\/\/ Read means the item was read.\n\tRead\n\t\/\/ ReadLater means to save the item to read later.\n\tReadLater\n)\n\n\/\/ DBItem represents an item in the database.\ntype DBItem struct {\n\tID int64\n\tTitle string\n\tDescription string\n\tLink string\n\tRSSFeedID int64\n\tPublicationDate time.Time\n\tGUID *string\n}\n\n\/\/ ErrItemNotFound means the item was not found in the database.\nvar ErrItemNotFound = fmt.Errorf(\"item not found\")\n\n\/\/ DBSetItemReadState sets the item's read state for the user.\nfunc DBSetItemReadState(db *sql.DB, id int64, userID int,\n\tstate ReadState) error {\n\t\/\/ Upsert.\n\tquery := `\nINSERT INTO rss_item_state\n(user_id, item_id, state)\nVALUES($1, $2, $3)\nON CONFLICT (user_id, item_id) DO UPDATE\nSET state = $4\n`\n\t_, err := db.Exec(query, userID, id, state.String(), state.String())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to set read state on item: %d: %s\", id, err)\n\t}\n\n\treturn nil\n}\n\n\/\/ Turn read state into the enumerated type in the database (read_state).\nfunc (s ReadState) String() string {\n\tif s == Unread {\n\t\treturn \"unread\"\n\t}\n\tif s == Read {\n\t\treturn \"read\"\n\t}\n\treturn \"read-later\"\n}\n\n\/\/ FindItemByLink retrieves an item's information from the database by feed and\n\/\/ link. Link is unique per feed.\nfunc FindItemByLink(db *sql.DB, feedID int64, link string) (*DBItem, error) {\n\tquery := `\nSELECT\nid, title, description, link, rss_feed_id, publication_date, guid\nFROM rss_item\nWHERE rss_feed_id = $1 AND\nlink = $2\n`\n\n\trows, err := db.Query(query, feedID, link)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor rows.Next() {\n\t\titem := &DBItem{}\n\n\t\tif err := rows.Scan(&item.ID, &item.Title, &item.Description, &item.Link,\n\t\t\t&item.RSSFeedID, &item.PublicationDate, &item.GUID); err != nil {\n\t\t\t_ = rows.Close()\n\t\t\treturn nil, fmt.Errorf(\"failed to scan row: %s\", err)\n\t\t}\n\n\t\tif err := rows.Close(); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error closing rows: %s\", err)\n\t\t}\n\n\t\treturn item, nil\n\t}\n\n\tif err := rows.Err(); err != nil {\n\t\treturn nil, fmt.Errorf(\"failure fetching rows: %s\", err)\n\t}\n\n\treturn nil, ErrItemNotFound\n}\n<commit_msg>Do not loop when we expect one row<commit_after>\/\/ Package gorse holds functions common to the different tools making up the\n\/\/ project.\npackage gorse\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"time\"\n)\n\n\/\/ ReadState holds an item's state (rss_item_state table, read_state type).\ntype ReadState int\n\nconst (\n\t\/\/ Unread means the item is not yet read.\n\tUnread ReadState = iota\n\t\/\/ Read means the item was read.\n\tRead\n\t\/\/ ReadLater means to save the item to read later.\n\tReadLater\n)\n\n\/\/ DBItem represents an item in the database.\ntype DBItem struct {\n\tID int64\n\tTitle string\n\tDescription string\n\tLink string\n\tRSSFeedID int64\n\tPublicationDate time.Time\n\tGUID *string\n}\n\n\/\/ DBSetItemReadState sets the item's read state for the user.\nfunc DBSetItemReadState(db *sql.DB, id int64, userID int,\n\tstate ReadState) error {\n\t\/\/ Upsert.\n\tquery := `\nINSERT INTO rss_item_state\n(user_id, item_id, state)\nVALUES($1, $2, $3)\nON CONFLICT (user_id, item_id) DO UPDATE\nSET state = $4\n`\n\t_, err := db.Exec(query, userID, id, state.String(), state.String())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to set read state on item: %d: %s\", id, err)\n\t}\n\n\treturn nil\n}\n\n\/\/ Turn read state into the enumerated type in the database (read_state).\nfunc (s ReadState) String() string {\n\tif s == Unread {\n\t\treturn \"unread\"\n\t}\n\tif s == Read {\n\t\treturn \"read\"\n\t}\n\treturn \"read-later\"\n}\n\n\/\/ FindItemByLink retrieves an item's information from the database by feed and\n\/\/ link. Link is unique per feed.\nfunc FindItemByLink(db *sql.DB, feedID int64, link string) (*DBItem, error) {\n\tquery := `\nSELECT\nid, title, description, link, rss_feed_id, publication_date, guid\nFROM rss_item\nWHERE rss_feed_id = $1 AND\nlink = $2\n`\n\n\trow := db.QueryRow(query, feedID, link)\n\titem := &DBItem{}\n\tif err := row.Scan(\n\t\t&item.ID,\n\t\t&item.Title,\n\t\t&item.Description,\n\t\t&item.Link,\n\t\t&item.RSSFeedID,\n\t\t&item.PublicationDate,\n\t\t&item.GUID,\n\t); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to scan row: %s\", err)\n\t}\n\n\treturn item, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package Gotem\nimport (\n\t\"syscall\"\n\t\"unsafe\"\n)\n\ntype Point struct {\n\tx, y int\n}\n\nvar (\n\t\/\/ lazyDll acts as a sort of singleton.\n\tlazyDll *syscall.LazyDLL\n)\n\n\n\nfunc MoveMouseTo(p Point) {\n\tif lazyDll == nil {\n\t\tlazyDll = syscall.NewLazyDLL(\"user32.dll\")\n\t}\n\tproc := lazyDll.NewProc(\"SetPhysicalCursorPos\")\n\tproc.Call(uintptr(p.x),uintptr(p.y))\n}\n\nfunc GetMousePosition() Point{\n\ttype mosPoint struct {\n\t\tx, y int32\n\t}\n\tif lazyDll == nil {\n\t\tlazyDll = syscall.NewLazyDLL(\"user32.dll\")\n\t}\n\tvar ran mosPoint\n\tproc := lazyDll.NewProc(\"GetPhysicalCursorPos\")\n\tproc.Call(uintptr(unsafe.Pointer(&ran)))\n\treturn Point{int(ran.x),int(ran.y)}\n\n}\n\nfunc PressKey(key uintptr, isUp bool) {\n\tif lazyDll == nil {\n\t\tlazyDll = syscall.NewLazyDLL(\"user32.dll\")\n\t}\n\t\/\/ The reason I am using keybd_event instead of SendInput is because I have no idea how to create\n\t\/\/ the Input structure that is needed to be passed in using Go since it uses a union.\n\tproc := lazyDll.NewProc(\"keybd_event\")\n\tvar dwFlags uintptr\n\tif isUp {\n\t\tdwFlags = 0x02\n\t} else {\n\t\tdwFlags = 0x0\n\t}\n\tproc.Call(key, 0, dwFlags, 0)\n}\n\n<commit_msg>Add main function<commit_after>package Gotem\nimport (\n\t\"syscall\"\n\t\"unsafe\"\n)\n\ntype Point struct {\n\tx, y int\n}\n\nvar (\n\t\/\/ lazyDll acts as a sort of singleton.\n\tlazyDll *syscall.LazyDLL\n)\n\n\n\nfunc MoveMouseTo(p Point) {\n\tif lazyDll == nil {\n\t\tlazyDll = syscall.NewLazyDLL(\"user32.dll\")\n\t}\n\tproc := lazyDll.NewProc(\"SetPhysicalCursorPos\")\n\tproc.Call(uintptr(p.x),uintptr(p.y))\n}\n\nfunc GetMousePosition() Point{\n\ttype mosPoint struct {\n\t\tx, y int32\n\t}\n\tif lazyDll == nil {\n\t\tlazyDll = syscall.NewLazyDLL(\"user32.dll\")\n\t}\n\tvar ran mosPoint\n\tproc := lazyDll.NewProc(\"GetPhysicalCursorPos\")\n\tproc.Call(uintptr(unsafe.Pointer(&ran)))\n\treturn Point{int(ran.x),int(ran.y)}\n\n}\n\nfunc PressKey(key uintptr, isUp bool) {\n\tif lazyDll == nil {\n\t\tlazyDll = syscall.NewLazyDLL(\"user32.dll\")\n\t}\n\t\/\/ The reason I am using keybd_event instead of SendInput is because I have no idea how to create\n\t\/\/ the Input structure that is needed to be passed in using Go since it uses a union.\n\tproc := lazyDll.NewProc(\"keybd_event\")\n\tvar dwFlags uintptr\n\tif isUp {\n\t\tdwFlags = 0x02\n\t} else {\n\t\tdwFlags = 0x0\n\t}\n\tproc.Call(key, 0, dwFlags, 0)\n}\n\n\/\/ for some reason I need a main even if it's a library?\nfunc main() {}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package react returns a random reaction parsed from a json file, see example json file for structure\npackage react\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\n\t\"github.com\/nboughton\/config\/parser\"\n\t\"github.com\/nboughton\/utils\"\n\t\"golang.org\/x\/exp\/inotify\"\n)\n\n\/\/ Item is the match text and possible responses for a reacion\ntype Item struct {\n\tText string\n\tResp []string\n}\n\n\/\/ React contains reactions from a reactions.json file\ntype React struct {\n\tItems []Item\n}\n\nvar (\n\t\/\/ Reactions is the struct that the reactions.json file is parsed into\n\tReactions React\n\treactRegex *regexp.Regexp\n\treactFile *string\n)\n\nfunc init() {\n\treactFile = flag.String(\"react\", \"reactions.json\", \"Path to reactions file\")\n\tflag.Parse()\n\n\treadFile()\n\tgenRegex()\n\n\twatcher, err := inotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Panicln(err)\n\t}\n\n\terr = watcher.Watch(*reactFile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase ev := <-watcher.Event:\n\t\t\t\t\/\/ if modified reload\n\t\t\t\tif ev.Mask == inotify.IN_MODIFY {\n\t\t\t\t\treadFile()\n\t\t\t\t\tgenRegex()\n\t\t\t\t}\n\n\t\t\tcase err := <-watcher.Error:\n\t\t\t\tlog.Println(\"error:\", err)\n\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/ Match tests to see if a string matches a listed string\nfunc Match(s string) bool {\n\treturn reactRegex.MatchString(s)\n}\n\n\/\/ Respond returns a random response from the first reaction to match the generated regex\nfunc Respond(s string) (string, error) {\n\tstr := reactRegex.FindAllString(s, 1)[0]\n\tfor _, r := range Reactions.Items {\n\t\tif r.Text == str {\n\t\t\treturn utils.RandS(r.Resp), nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"No response found\")\n}\n\nfunc readFile() {\n\terr := parser.NewParser(*reactFile).Scan(&Reactions)\n\tif err != nil {\n\t\tlog.Panicln(err)\n\t}\n}\n\nfunc genRegex() {\n\ts := \"(\"\n\tfor idx, i := range Reactions.Items {\n\t\ts += regexp.QuoteMeta(i.Text)\n\t\tif idx < len(Reactions.Items)-1 {\n\t\t\ts += \"|\"\n\t\t}\n\t}\n\ts += \")\"\n\n\treactRegex = regexp.MustCompile(s)\n}\n<commit_msg>fixed modify reload<commit_after>\/\/ Package react returns a random reaction parsed from a json file, see example json file for structure\npackage react\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\n\t\"github.com\/nboughton\/config\/parser\"\n\t\"github.com\/nboughton\/utils\"\n\t\"golang.org\/x\/exp\/inotify\"\n)\n\n\/\/ Item is the match text and possible responses for a reacion\ntype Item struct {\n\tText string\n\tResp []string\n}\n\n\/\/ React contains reactions from a reactions.json file\ntype React struct {\n\tItems []Item\n}\n\nvar (\n\t\/\/ Reactions is the struct that the reactions.json file is parsed into\n\tReactions React\n\treactRegex *regexp.Regexp\n\treactFile *string\n)\n\nfunc init() {\n\treactFile = flag.String(\"react\", \"reactions.json\", \"Path to reactions file\")\n\tflag.Parse()\n\n\treadFile()\n\tgenRegex()\n\n\twatcher, err := inotify.NewWatcher()\n\tif err != nil {\n\t\tlog.Panicln(err)\n\t}\n\n\terr = watcher.Watch(*reactFile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase ev := <-watcher.Event:\n\t\t\t\t\/\/ if modified reload\n\t\t\t\tif ev.Mask == inotify.IN_CLOSE_WRITE {\n\t\t\t\t\treadFile()\n\t\t\t\t\tgenRegex()\n\t\t\t\t}\n\n\t\t\tcase err := <-watcher.Error:\n\t\t\t\tlog.Println(\"error:\", err)\n\n\t\t\t}\n\t\t}\n\t}()\n}\n\n\/\/ Match tests to see if a string matches a listed string\nfunc Match(s string) bool {\n\treturn reactRegex.MatchString(s)\n}\n\n\/\/ Respond returns a random response from the first reaction to match the generated regex\nfunc Respond(s string) (string, error) {\n\tstr := reactRegex.FindAllString(s, 1)[0]\n\tfor _, r := range Reactions.Items {\n\t\tif r.Text == str {\n\t\t\treturn utils.RandS(r.Resp), nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"No response found\")\n}\n\nfunc readFile() {\n\terr := parser.NewParser(*reactFile).Scan(&Reactions)\n\tif err != nil {\n\t\tlog.Panicln(err)\n\t}\n}\n\nfunc genRegex() {\n\ts := \"(\"\n\tfor idx, i := range Reactions.Items {\n\t\ts += regexp.QuoteMeta(i.Text)\n\t\tif idx < len(Reactions.Items)-1 {\n\t\t\ts += \"|\"\n\t\t}\n\t}\n\ts += \")\"\n\n\treactRegex = regexp.MustCompile(s)\n}\n<|endoftext|>"} {"text":"<commit_before>package fit_test\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/tormoder\/fit\"\n)\n\nvar (\n\tupdate = flag.Bool(\"update\", false, \"update .golden output and table for decode test files if their fingerprint differs\")\n\tfupdate = flag.Bool(\"fupdate\", false, \"force regeneration of decode test files table\")\n\tfdecode = flag.Bool(\"fdecode\", false, \"force decode golden part of decode test irregardless of Go version\")\n)\n\nvar (\n\tactivitySmallMu sync.Mutex\n\tactivitySmallOnce sync.Once\n\tactivitySmallData []byte\n)\n\nfunc activitySmall() []byte {\n\tactivitySmallMu.Lock()\n\tdefer activitySmallMu.Unlock()\n\tactivitySmallOnce.Do(func() {\n\t\tasd, err := ioutil.ReadFile(activitySmallPath)\n\t\tif err != nil {\n\t\t\terrDesc := fmt.Sprintf(\"parseActivitySmallData failed: %v\", err)\n\t\t\tpanic(errDesc)\n\t\t}\n\t\tactivitySmallData = asd\n\t})\n\treturn activitySmallData\n}\n\nvar (\n\tactivitySmallPath = filepath.Join(tdfolder, \"me\", \"activity-small-fenix2-run.fit\")\n\tactivityLargePath = filepath.Join(tdfolder, \"me\", \"activity-large-fenxi2-multisport.fit\")\n\tactivityComponentsPath = filepath.Join(tdfolder, \"dcrainmaker\", \"Edge810-Vector-2013-08-16-15-35-10.fit\")\n\tmonitoringPath = filepath.Join(tdfolder, \"fitsdk\", \"MonitoringFile.fit\")\n)\n\nconst (\n\tgoldenSuffix = \".golden\"\n\tcurrentSuffix = \".current\"\n\tgzSuffix = \".gz\"\n\ttdfolder = \"testdata\"\n)\n\nfunc TestMain(m *testing.M) {\n\tflag.Parse()\n\tos.Exit(m.Run())\n}\n\nfunc TestDecode(t *testing.T) {\n\tconst goMajorVersionForDecodeGolden = \"go1.8\"\n\ttestDecodeGolden := true\n\tgoVersion := runtime.Version()\n\tgoVersionOK := strings.HasPrefix(goVersion, goMajorVersionForDecodeGolden)\n\tswitch {\n\tcase !goVersionOK && !*fdecode:\n\t\ttestDecodeGolden = false\n\t\tt.Logf(\n\t\t\t\"skipping golden decode part of test due to Go version (enabled for %s.x, have %q)\",\n\t\t\tgoMajorVersionForDecodeGolden,\n\t\t\tgoVersion,\n\t\t)\n\tcase !goVersionOK && *fdecode:\n\t\tt.Logf(\n\t\t\t\"override: performing golden decode part of test for Go version %q (default only for %s.x)\",\n\t\t\tgoVersion,\n\t\t\tgoMajorVersionForDecodeGolden,\n\t\t)\n\tdefault:\n\t}\n\n\tregenTestTable := struct {\n\t\tsync.Mutex \/\/ Protects val and decodeTestFiles slice in reader_util_test.go.\n\t\tval bool\n\t}{}\n\n\tt.Run(\"Group\", func(t *testing.T) {\n\t\tfor i, file := range decodeTestFiles {\n\t\t\ti, file := i, file \/\/ Capture range variables.\n\t\t\tt.Run(fmt.Sprintf(\"%s\/%s\", file.folder, file.name), func(t *testing.T) {\n\t\t\t\tt.Parallel()\n\t\t\t\tfpath := filepath.Join(tdfolder, file.folder, file.name)\n\t\t\t\tdata, err := ioutil.ReadFile(fpath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"reading file failed: %v\", err)\n\t\t\t\t}\n\t\t\t\tfitFile, err := fit.Decode(bytes.NewReader(data), file.dopts.opts()...)\n\t\t\t\tif !file.wantErr && err != nil {\n\t\t\t\t\tt.Fatalf(\"got error, want none; error is: %v\", err)\n\t\t\t\t}\n\t\t\t\tif file.wantErr && err == nil {\n\t\t\t\t\tt.Fatalf(\"got no error, want error\")\n\t\t\t\t}\n\t\t\t\tif !testDecodeGolden {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif file.fingerprint == 0 {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tfp := fitFingerprint(fitFile)\n\t\t\t\tif fp == file.fingerprint {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tt.Errorf(\"fit file fingerprint differs: got: %d, want: %d\", fp, file.fingerprint)\n\t\t\t\tif !*update {\n\t\t\t\t\tfpath = fpath + currentSuffix\n\t\t\t\t} else {\n\t\t\t\t\tfpath = fpath + goldenSuffix\n\t\t\t\t}\n\t\t\t\tif file.compress {\n\t\t\t\t\tfpath = fpath + gzSuffix\n\t\t\t\t}\n\t\t\t\terr = fitUtterDump(fitFile, fpath, file.compress)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"error writing output: %v\", err)\n\t\t\t\t}\n\t\t\t\tif !*update {\n\t\t\t\t\tt.Logf(\"current output written to: %s\", fpath)\n\t\t\t\t\tt.Logf(\"use a diff tool to compare (e.g. zdiff if compressed)\")\n\t\t\t\t} else {\n\t\t\t\t\tregenTestTable.Lock()\n\t\t\t\t\tregenTestTable.val = true\n\t\t\t\t\tdecodeTestFiles[i].fingerprint = fp\n\t\t\t\t\tregenTestTable.Unlock()\n\t\t\t\t\tt.Logf(\"%q has been updated\", fpath)\n\t\t\t\t\tt.Logf(\"new fingerprint is: %d, update test case in reader_test.go\", fp)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tif regenTestTable.val || *fupdate {\n\t\tt.Logf(\"regenerating table for decode test files...\")\n\t\terr := regenerateDecodeTestTable()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"error regenerating table for decode test files: %v\", err)\n\t\t}\n\t}\n}\n\nfunc TestDecodeChained(t *testing.T) {\n\tchainedTestFiles := []struct {\n\t\tfpath string\n\t\tdfiles int\n\t\twantErr bool\n\t\tdesc string\n\t}{\n\t\t{\n\t\t\tfilepath.Join(tdfolder, \"fitsdk\", \"Activity.fit\"),\n\t\t\t1,\n\t\t\tfalse,\n\t\t\t\"single valid fit file\",\n\t\t},\n\t\t{\n\t\t\tfilepath.Join(tdfolder, \"chained\", \"activity-settings.fit\"),\n\t\t\t2,\n\t\t\tfalse,\n\t\t\t\"two valid chained fit files\",\n\t\t},\n\t\t{\n\t\t\tfilepath.Join(tdfolder, \"chained\", \"activity-activity-filecrc.fit\"),\n\t\t\t2,\n\t\t\ttrue,\n\t\t\t\"one valid fit file + one fit file with wrong crc\",\n\t\t},\n\t\t{\n\t\t\tfilepath.Join(tdfolder, \"chained\", \"activity-settings-corruptheader.fit\"),\n\t\t\t1,\n\t\t\ttrue,\n\t\t\t\"one valid fit file + one fit file with corrupt header\",\n\t\t},\n\t\t{\n\t\t\tfilepath.Join(tdfolder, \"chained\", \"activity-settings-nodata.fit\"),\n\t\t\t2,\n\t\t\ttrue,\n\t\t\t\"one valid fit file + one fit file with ok header but no data\",\n\t\t},\n\t}\n\n\tfor _, ctf := range chainedTestFiles {\n\t\tctf := ctf\n\t\tt.Run(ctf.fpath, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tdata, err := ioutil.ReadFile(ctf.fpath)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"reading file data failed: %v\", err)\n\t\t\t}\n\t\t\tfitFiles, err := fit.DecodeChained(bytes.NewReader(data))\n\t\t\tif !ctf.wantErr && err != nil {\n\t\t\t\tt.Fatalf(\"got error, want none; error is: %v\", err)\n\t\t\t}\n\t\t\tif ctf.wantErr && err == nil {\n\t\t\t\tt.Fatalf(\"got no error, want error\")\n\t\t\t}\n\t\t\tif len(fitFiles) != ctf.dfiles {\n\t\t\t\tt.Fatalf(\"got %d decoded fit file(s), want %d\", len(fitFiles), ctf.dfiles)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCheckIntegrity(t *testing.T) {\n\tt.Run(\"ActivitySmall\", func(t *testing.T) {\n\t\terr := fit.CheckIntegrity(bytes.NewReader(activitySmall()), false)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%q: failed: %v\", activitySmallPath, err)\n\t\t}\n\t})\n\tt.Run(\"ActivitySDK\", func(t *testing.T) {\n\t\tfpath := filepath.Join(tdfolder, \"fitsdk\", \"Activity.fit\")\n\t\tdata, err := ioutil.ReadFile(fpath)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"reading %q failed: %v\", fpath, err)\n\t\t}\n\t\terr = fit.CheckIntegrity(bytes.NewReader(data), false)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%q: failed: %v\", fpath, err)\n\t\t}\n\t})\n}\n\nfunc TestDecodeHeader(t *testing.T) {\n\twantHeader := fit.Header{\n\t\tSize: 0xe,\n\t\tProtocolVersion: 0x10,\n\t\tProfileVersion: 0x457,\n\t\tDataSize: 0x1dbdf,\n\t\tDataType: [4]uint8{0x2e, 0x46, 0x49, 0x54},\n\t\tCRC: 0x1ec4,\n\t}\n\tgotHeader, err := fit.DecodeHeader(bytes.NewReader(activitySmall()))\n\tif err != nil {\n\t\tt.Errorf(\"%q: failed: %v\", activitySmallPath, err)\n\t}\n\tif gotHeader != wantHeader {\n\t\tt.Errorf(\"got header:\\n%#v\\nwant header: %#v\", gotHeader, wantHeader)\n\t}\n}\n\nfunc TestDecodeHeaderAndFileID(t *testing.T) {\n\twantHeader := fit.Header{\n\t\tSize: 0xe,\n\t\tProtocolVersion: 0x10,\n\t\tProfileVersion: 0x457,\n\t\tDataSize: 0x1dbdf,\n\t\tDataType: [4]uint8{0x2e, 0x46, 0x49, 0x54},\n\t\tCRC: 0x1ec4,\n\t}\n\ttc := time.Unix(1439652761, 0)\n\ttc = tc.UTC()\n\twantFileId := fit.FileIdMsg{\n\t\tType: 0x4,\n\t\tManufacturer: 0x1,\n\t\tProduct: 0x7af,\n\t\tSerialNumber: 0xe762d9cf,\n\t\tNumber: 0xffff,\n\t\tTimeCreated: tc,\n\t\tProductName: \"\",\n\t}\n\n\tgotHeader, gotFileId, err := fit.DecodeHeaderAndFileID(bytes.NewReader(activitySmall()))\n\tif err != nil {\n\t\tt.Errorf(\"%q: failed: %v\", activitySmallPath, err)\n\t}\n\tif gotHeader != wantHeader {\n\t\tt.Errorf(\"%q:\\ngot header:\\n%#v\\nwant header:\\n%#v\", activitySmallPath, gotHeader, wantHeader)\n\t}\n\tif gotFileId != wantFileId {\n\t\tt.Errorf(\"%q:\\ngot FileIdMsg:\\n%v\\nwant FileIdMsg:\\n%v\", activitySmallPath, gotFileId, wantFileId)\n\t}\n}\n\nfunc BenchmarkDecode(b *testing.B) {\n\tfiles := []struct {\n\t\tdesc, path string\n\t}{\n\t\t{\"ActivitySmall\", activitySmallPath},\n\t\t{\"ActivityLarge\", activityLargePath},\n\t\t{\"ActivityWithComponents\", activityComponentsPath},\n\t\t{\"MonitoringFile\", monitoringPath},\n\t}\n\tfor _, file := range files {\n\t\tb.Run(file.desc, func(b *testing.B) {\n\t\t\tdata, err := ioutil.ReadFile(file.path)\n\t\t\tif err != nil {\n\t\t\t\tb.Fatalf(\"%q: error reading file: %v\", file.path, err)\n\t\t\t}\n\t\t\tb.ReportAllocs()\n\t\t\tb.SetBytes(int64(len(data)))\n\t\t\tb.ResetTimer()\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t_, err := fit.Decode(bytes.NewReader(data))\n\t\t\t\tif err != nil {\n\t\t\t\t\tb.Fatalf(\"%q: error decoding file: %v\", file.path, err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc BenchmarkDecodeActivityLargeParallel(b *testing.B) {\n\tdata, err := ioutil.ReadFile(activityLargePath)\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\tb.ReportAllocs()\n\tb.SetBytes(int64(len(data)))\n\tb.ResetTimer()\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\t_, err := fit.Decode(bytes.NewReader(data))\n\t\t\tif err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc BenchmarkDecodeHeader(b *testing.B) {\n\tdata := activitySmall()\n\tb.ReportAllocs()\n\tb.SetBytes(int64(len(data)))\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err := fit.DecodeHeader(bytes.NewReader(data))\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"%q: error decoding header: %v\", activitySmallPath, err)\n\t\t}\n\t}\n\n}\n\nfunc BenchmarkDecodeHeaderAndFileID(b *testing.B) {\n\tdata := activitySmall()\n\tb.ReportAllocs()\n\tb.SetBytes(int64(len(data)))\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, _, err := fit.DecodeHeaderAndFileID(bytes.NewReader(data))\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"%q: error decoding header\/fileid: %v\", activitySmallPath, err)\n\t\t}\n\t}\n}\n<commit_msg>reader_test: update Go version (1.9) used for golden decode tests<commit_after>package fit_test\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/tormoder\/fit\"\n)\n\nvar (\n\tupdate = flag.Bool(\"update\", false, \"update .golden output and table for decode test files if their fingerprint differs\")\n\tfupdate = flag.Bool(\"fupdate\", false, \"force regeneration of decode test files table\")\n\tfdecode = flag.Bool(\"fdecode\", false, \"force decode golden part of decode test irregardless of Go version\")\n)\n\nvar (\n\tactivitySmallMu sync.Mutex\n\tactivitySmallOnce sync.Once\n\tactivitySmallData []byte\n)\n\nfunc activitySmall() []byte {\n\tactivitySmallMu.Lock()\n\tdefer activitySmallMu.Unlock()\n\tactivitySmallOnce.Do(func() {\n\t\tasd, err := ioutil.ReadFile(activitySmallPath)\n\t\tif err != nil {\n\t\t\terrDesc := fmt.Sprintf(\"parseActivitySmallData failed: %v\", err)\n\t\t\tpanic(errDesc)\n\t\t}\n\t\tactivitySmallData = asd\n\t})\n\treturn activitySmallData\n}\n\nvar (\n\tactivitySmallPath = filepath.Join(tdfolder, \"me\", \"activity-small-fenix2-run.fit\")\n\tactivityLargePath = filepath.Join(tdfolder, \"me\", \"activity-large-fenxi2-multisport.fit\")\n\tactivityComponentsPath = filepath.Join(tdfolder, \"dcrainmaker\", \"Edge810-Vector-2013-08-16-15-35-10.fit\")\n\tmonitoringPath = filepath.Join(tdfolder, \"fitsdk\", \"MonitoringFile.fit\")\n)\n\nconst (\n\tgoldenSuffix = \".golden\"\n\tcurrentSuffix = \".current\"\n\tgzSuffix = \".gz\"\n\ttdfolder = \"testdata\"\n)\n\nfunc TestMain(m *testing.M) {\n\tflag.Parse()\n\tos.Exit(m.Run())\n}\n\nfunc TestDecode(t *testing.T) {\n\tconst goMajorVersionForDecodeGolden = \"go1.9\"\n\ttestDecodeGolden := true\n\tgoVersion := runtime.Version()\n\tgoVersionOK := strings.HasPrefix(goVersion, goMajorVersionForDecodeGolden)\n\tswitch {\n\tcase !goVersionOK && !*fdecode:\n\t\ttestDecodeGolden = false\n\t\tt.Logf(\n\t\t\t\"skipping golden decode part of test due to Go version (enabled for %s.x, have %q)\",\n\t\t\tgoMajorVersionForDecodeGolden,\n\t\t\tgoVersion,\n\t\t)\n\tcase !goVersionOK && *fdecode:\n\t\tt.Logf(\n\t\t\t\"override: performing golden decode part of test for Go version %q (default only for %s.x)\",\n\t\t\tgoVersion,\n\t\t\tgoMajorVersionForDecodeGolden,\n\t\t)\n\tdefault:\n\t}\n\n\tregenTestTable := struct {\n\t\tsync.Mutex \/\/ Protects val and decodeTestFiles slice in reader_util_test.go.\n\t\tval bool\n\t}{}\n\n\tt.Run(\"Group\", func(t *testing.T) {\n\t\tfor i, file := range decodeTestFiles {\n\t\t\ti, file := i, file \/\/ Capture range variables.\n\t\t\tt.Run(fmt.Sprintf(\"%s\/%s\", file.folder, file.name), func(t *testing.T) {\n\t\t\t\tt.Parallel()\n\t\t\t\tfpath := filepath.Join(tdfolder, file.folder, file.name)\n\t\t\t\tdata, err := ioutil.ReadFile(fpath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"reading file failed: %v\", err)\n\t\t\t\t}\n\t\t\t\tfitFile, err := fit.Decode(bytes.NewReader(data), file.dopts.opts()...)\n\t\t\t\tif !file.wantErr && err != nil {\n\t\t\t\t\tt.Fatalf(\"got error, want none; error is: %v\", err)\n\t\t\t\t}\n\t\t\t\tif file.wantErr && err == nil {\n\t\t\t\t\tt.Fatalf(\"got no error, want error\")\n\t\t\t\t}\n\t\t\t\tif !testDecodeGolden {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif file.fingerprint == 0 {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tfp := fitFingerprint(fitFile)\n\t\t\t\tif fp == file.fingerprint {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tt.Errorf(\"fit file fingerprint differs: got: %d, want: %d\", fp, file.fingerprint)\n\t\t\t\tif !*update {\n\t\t\t\t\tfpath = fpath + currentSuffix\n\t\t\t\t} else {\n\t\t\t\t\tfpath = fpath + goldenSuffix\n\t\t\t\t}\n\t\t\t\tif file.compress {\n\t\t\t\t\tfpath = fpath + gzSuffix\n\t\t\t\t}\n\t\t\t\terr = fitUtterDump(fitFile, fpath, file.compress)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"error writing output: %v\", err)\n\t\t\t\t}\n\t\t\t\tif !*update {\n\t\t\t\t\tt.Logf(\"current output written to: %s\", fpath)\n\t\t\t\t\tt.Logf(\"use a diff tool to compare (e.g. zdiff if compressed)\")\n\t\t\t\t} else {\n\t\t\t\t\tregenTestTable.Lock()\n\t\t\t\t\tregenTestTable.val = true\n\t\t\t\t\tdecodeTestFiles[i].fingerprint = fp\n\t\t\t\t\tregenTestTable.Unlock()\n\t\t\t\t\tt.Logf(\"%q has been updated\", fpath)\n\t\t\t\t\tt.Logf(\"new fingerprint is: %d, update test case in reader_test.go\", fp)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tif regenTestTable.val || *fupdate {\n\t\tt.Logf(\"regenerating table for decode test files...\")\n\t\terr := regenerateDecodeTestTable()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"error regenerating table for decode test files: %v\", err)\n\t\t}\n\t}\n}\n\nfunc TestDecodeChained(t *testing.T) {\n\tchainedTestFiles := []struct {\n\t\tfpath string\n\t\tdfiles int\n\t\twantErr bool\n\t\tdesc string\n\t}{\n\t\t{\n\t\t\tfilepath.Join(tdfolder, \"fitsdk\", \"Activity.fit\"),\n\t\t\t1,\n\t\t\tfalse,\n\t\t\t\"single valid fit file\",\n\t\t},\n\t\t{\n\t\t\tfilepath.Join(tdfolder, \"chained\", \"activity-settings.fit\"),\n\t\t\t2,\n\t\t\tfalse,\n\t\t\t\"two valid chained fit files\",\n\t\t},\n\t\t{\n\t\t\tfilepath.Join(tdfolder, \"chained\", \"activity-activity-filecrc.fit\"),\n\t\t\t2,\n\t\t\ttrue,\n\t\t\t\"one valid fit file + one fit file with wrong crc\",\n\t\t},\n\t\t{\n\t\t\tfilepath.Join(tdfolder, \"chained\", \"activity-settings-corruptheader.fit\"),\n\t\t\t1,\n\t\t\ttrue,\n\t\t\t\"one valid fit file + one fit file with corrupt header\",\n\t\t},\n\t\t{\n\t\t\tfilepath.Join(tdfolder, \"chained\", \"activity-settings-nodata.fit\"),\n\t\t\t2,\n\t\t\ttrue,\n\t\t\t\"one valid fit file + one fit file with ok header but no data\",\n\t\t},\n\t}\n\n\tfor _, ctf := range chainedTestFiles {\n\t\tctf := ctf\n\t\tt.Run(ctf.fpath, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tdata, err := ioutil.ReadFile(ctf.fpath)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"reading file data failed: %v\", err)\n\t\t\t}\n\t\t\tfitFiles, err := fit.DecodeChained(bytes.NewReader(data))\n\t\t\tif !ctf.wantErr && err != nil {\n\t\t\t\tt.Fatalf(\"got error, want none; error is: %v\", err)\n\t\t\t}\n\t\t\tif ctf.wantErr && err == nil {\n\t\t\t\tt.Fatalf(\"got no error, want error\")\n\t\t\t}\n\t\t\tif len(fitFiles) != ctf.dfiles {\n\t\t\t\tt.Fatalf(\"got %d decoded fit file(s), want %d\", len(fitFiles), ctf.dfiles)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCheckIntegrity(t *testing.T) {\n\tt.Run(\"ActivitySmall\", func(t *testing.T) {\n\t\terr := fit.CheckIntegrity(bytes.NewReader(activitySmall()), false)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%q: failed: %v\", activitySmallPath, err)\n\t\t}\n\t})\n\tt.Run(\"ActivitySDK\", func(t *testing.T) {\n\t\tfpath := filepath.Join(tdfolder, \"fitsdk\", \"Activity.fit\")\n\t\tdata, err := ioutil.ReadFile(fpath)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"reading %q failed: %v\", fpath, err)\n\t\t}\n\t\terr = fit.CheckIntegrity(bytes.NewReader(data), false)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%q: failed: %v\", fpath, err)\n\t\t}\n\t})\n}\n\nfunc TestDecodeHeader(t *testing.T) {\n\twantHeader := fit.Header{\n\t\tSize: 0xe,\n\t\tProtocolVersion: 0x10,\n\t\tProfileVersion: 0x457,\n\t\tDataSize: 0x1dbdf,\n\t\tDataType: [4]uint8{0x2e, 0x46, 0x49, 0x54},\n\t\tCRC: 0x1ec4,\n\t}\n\tgotHeader, err := fit.DecodeHeader(bytes.NewReader(activitySmall()))\n\tif err != nil {\n\t\tt.Errorf(\"%q: failed: %v\", activitySmallPath, err)\n\t}\n\tif gotHeader != wantHeader {\n\t\tt.Errorf(\"got header:\\n%#v\\nwant header: %#v\", gotHeader, wantHeader)\n\t}\n}\n\nfunc TestDecodeHeaderAndFileID(t *testing.T) {\n\twantHeader := fit.Header{\n\t\tSize: 0xe,\n\t\tProtocolVersion: 0x10,\n\t\tProfileVersion: 0x457,\n\t\tDataSize: 0x1dbdf,\n\t\tDataType: [4]uint8{0x2e, 0x46, 0x49, 0x54},\n\t\tCRC: 0x1ec4,\n\t}\n\ttc := time.Unix(1439652761, 0)\n\ttc = tc.UTC()\n\twantFileId := fit.FileIdMsg{\n\t\tType: 0x4,\n\t\tManufacturer: 0x1,\n\t\tProduct: 0x7af,\n\t\tSerialNumber: 0xe762d9cf,\n\t\tNumber: 0xffff,\n\t\tTimeCreated: tc,\n\t\tProductName: \"\",\n\t}\n\n\tgotHeader, gotFileId, err := fit.DecodeHeaderAndFileID(bytes.NewReader(activitySmall()))\n\tif err != nil {\n\t\tt.Errorf(\"%q: failed: %v\", activitySmallPath, err)\n\t}\n\tif gotHeader != wantHeader {\n\t\tt.Errorf(\"%q:\\ngot header:\\n%#v\\nwant header:\\n%#v\", activitySmallPath, gotHeader, wantHeader)\n\t}\n\tif gotFileId != wantFileId {\n\t\tt.Errorf(\"%q:\\ngot FileIdMsg:\\n%v\\nwant FileIdMsg:\\n%v\", activitySmallPath, gotFileId, wantFileId)\n\t}\n}\n\nfunc BenchmarkDecode(b *testing.B) {\n\tfiles := []struct {\n\t\tdesc, path string\n\t}{\n\t\t{\"ActivitySmall\", activitySmallPath},\n\t\t{\"ActivityLarge\", activityLargePath},\n\t\t{\"ActivityWithComponents\", activityComponentsPath},\n\t\t{\"MonitoringFile\", monitoringPath},\n\t}\n\tfor _, file := range files {\n\t\tb.Run(file.desc, func(b *testing.B) {\n\t\t\tdata, err := ioutil.ReadFile(file.path)\n\t\t\tif err != nil {\n\t\t\t\tb.Fatalf(\"%q: error reading file: %v\", file.path, err)\n\t\t\t}\n\t\t\tb.ReportAllocs()\n\t\t\tb.SetBytes(int64(len(data)))\n\t\t\tb.ResetTimer()\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\t_, err := fit.Decode(bytes.NewReader(data))\n\t\t\t\tif err != nil {\n\t\t\t\t\tb.Fatalf(\"%q: error decoding file: %v\", file.path, err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc BenchmarkDecodeActivityLargeParallel(b *testing.B) {\n\tdata, err := ioutil.ReadFile(activityLargePath)\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\tb.ReportAllocs()\n\tb.SetBytes(int64(len(data)))\n\tb.ResetTimer()\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\t_, err := fit.Decode(bytes.NewReader(data))\n\t\t\tif err != nil {\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc BenchmarkDecodeHeader(b *testing.B) {\n\tdata := activitySmall()\n\tb.ReportAllocs()\n\tb.SetBytes(int64(len(data)))\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err := fit.DecodeHeader(bytes.NewReader(data))\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"%q: error decoding header: %v\", activitySmallPath, err)\n\t\t}\n\t}\n\n}\n\nfunc BenchmarkDecodeHeaderAndFileID(b *testing.B) {\n\tdata := activitySmall()\n\tb.ReportAllocs()\n\tb.SetBytes(int64(len(data)))\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, _, err := fit.DecodeHeaderAndFileID(bytes.NewReader(data))\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"%q: error decoding header\/fileid: %v\", activitySmallPath, err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package swarm\n\nimport \"time\"\n\n\/\/ Service represents a service.\ntype Service struct {\n\tID string\n\tMeta\n\tSpec ServiceSpec `json:\",omitempty\"`\n\tPreviousSpec *ServiceSpec `json:\",omitempty\"`\n\tEndpoint Endpoint `json:\",omitempty\"`\n\tUpdateStatus UpdateStatus `json:\",omitempty\"`\n}\n\n\/\/ ServiceSpec represents the spec of a service.\ntype ServiceSpec struct {\n\tAnnotations\n\n\t\/\/ TaskTemplate defines how the service should construct new tasks when\n\t\/\/ orchestrating this service.\n\tTaskTemplate TaskSpec `json:\",omitempty\"`\n\tMode ServiceMode `json:\",omitempty\"`\n\tUpdateConfig *UpdateConfig `json:\",omitempty\"`\n\n\t\/\/ Networks field in ServiceSpec is deprecated. The\n\t\/\/ same field in TaskSpec should be used instead.\n\t\/\/ This field will be removed in a future release.\n\tNetworks []NetworkAttachmentConfig `json:\",omitempty\"`\n\tEndpointSpec *EndpointSpec `json:\",omitempty\"`\n}\n\n\/\/ ServiceMode represents the mode of a service.\ntype ServiceMode struct {\n\tReplicated *ReplicatedService `json:\",omitempty\"`\n\tGlobal *GlobalService `json:\",omitempty\"`\n}\n\n\/\/ UpdateState is the state of a service update.\ntype UpdateState string\n\nconst (\n\t\/\/ UpdateStateUpdating is the updating state.\n\tUpdateStateUpdating UpdateState = \"updating\"\n\t\/\/ UpdateStatePaused is the paused state.\n\tUpdateStatePaused UpdateState = \"paused\"\n\t\/\/ UpdateStateCompleted is the completed state.\n\tUpdateStateCompleted UpdateState = \"completed\"\n)\n\n\/\/ UpdateStatus reports the status of a service update.\ntype UpdateStatus struct {\n\tState UpdateState `json:\",omitempty\"`\n\tStartedAt time.Time `json:\",omitempty\"`\n\tCompletedAt time.Time `json:\",omitempty\"`\n\tMessage string `json:\",omitempty\"`\n}\n\n\/\/ ReplicatedService is a kind of ServiceMode.\ntype ReplicatedService struct {\n\tReplicas *uint64 `json:\",omitempty\"`\n}\n\n\/\/ GlobalService is a kind of ServiceMode.\ntype GlobalService struct{}\n\nconst (\n\t\/\/ UpdateFailureActionPause PAUSE\n\tUpdateFailureActionPause = \"pause\"\n\t\/\/ UpdateFailureActionContinue CONTINUE\n\tUpdateFailureActionContinue = \"continue\"\n)\n\n\/\/ UpdateConfig represents the update configuration.\ntype UpdateConfig struct {\n\t\/\/ Maximum number of tasks to be updated in one iteration.\n\t\/\/ 0 means unlimited parallelism.\n\tParallelism uint64 `json:\",omitempty\"`\n\n\t\/\/ Amount of time between updates.\n\tDelay time.Duration `json:\",omitempty\"`\n\n\t\/\/ FailureAction is the action to take when an update failures.\n\tFailureAction string `json:\",omitempty\"`\n\n\t\/\/ Monitor indicates how long to monitor a task for failure after it is\n\t\/\/ created. If the task fails by ending up in one of the states\n\t\/\/ REJECTED, COMPLETED, or FAILED, within Monitor from its creation,\n\t\/\/ this counts as a failure. If it fails after Monitor, it does not\n\t\/\/ count as a failure. If Monitor is unspecified, a default value will\n\t\/\/ be used.\n\tMonitor time.Duration `json:\",omitempty\"`\n\n\t\/\/ MaxFailureRatio is the fraction of tasks that may fail during\n\t\/\/ an update before the failure action is invoked. Any task created by\n\t\/\/ the current update which ends up in one of the states REJECTED,\n\t\/\/ COMPLETED or FAILED within Monitor from its creation counts as a\n\t\/\/ failure. The number of failures is divided by the number of tasks\n\t\/\/ being updated, and if this fraction is greater than\n\t\/\/ MaxFailureRatio, the failure action is invoked.\n\t\/\/\n\t\/\/ If the failure action is CONTINUE, there is no effect.\n\t\/\/ If the failure action is PAUSE, no more tasks will be updated until\n\t\/\/ another update is started.\n\tMaxFailureRatio float32\n}\n<commit_msg>api: Remove omitempty tag on Parallelism<commit_after>package swarm\n\nimport \"time\"\n\n\/\/ Service represents a service.\ntype Service struct {\n\tID string\n\tMeta\n\tSpec ServiceSpec `json:\",omitempty\"`\n\tPreviousSpec *ServiceSpec `json:\",omitempty\"`\n\tEndpoint Endpoint `json:\",omitempty\"`\n\tUpdateStatus UpdateStatus `json:\",omitempty\"`\n}\n\n\/\/ ServiceSpec represents the spec of a service.\ntype ServiceSpec struct {\n\tAnnotations\n\n\t\/\/ TaskTemplate defines how the service should construct new tasks when\n\t\/\/ orchestrating this service.\n\tTaskTemplate TaskSpec `json:\",omitempty\"`\n\tMode ServiceMode `json:\",omitempty\"`\n\tUpdateConfig *UpdateConfig `json:\",omitempty\"`\n\n\t\/\/ Networks field in ServiceSpec is deprecated. The\n\t\/\/ same field in TaskSpec should be used instead.\n\t\/\/ This field will be removed in a future release.\n\tNetworks []NetworkAttachmentConfig `json:\",omitempty\"`\n\tEndpointSpec *EndpointSpec `json:\",omitempty\"`\n}\n\n\/\/ ServiceMode represents the mode of a service.\ntype ServiceMode struct {\n\tReplicated *ReplicatedService `json:\",omitempty\"`\n\tGlobal *GlobalService `json:\",omitempty\"`\n}\n\n\/\/ UpdateState is the state of a service update.\ntype UpdateState string\n\nconst (\n\t\/\/ UpdateStateUpdating is the updating state.\n\tUpdateStateUpdating UpdateState = \"updating\"\n\t\/\/ UpdateStatePaused is the paused state.\n\tUpdateStatePaused UpdateState = \"paused\"\n\t\/\/ UpdateStateCompleted is the completed state.\n\tUpdateStateCompleted UpdateState = \"completed\"\n)\n\n\/\/ UpdateStatus reports the status of a service update.\ntype UpdateStatus struct {\n\tState UpdateState `json:\",omitempty\"`\n\tStartedAt time.Time `json:\",omitempty\"`\n\tCompletedAt time.Time `json:\",omitempty\"`\n\tMessage string `json:\",omitempty\"`\n}\n\n\/\/ ReplicatedService is a kind of ServiceMode.\ntype ReplicatedService struct {\n\tReplicas *uint64 `json:\",omitempty\"`\n}\n\n\/\/ GlobalService is a kind of ServiceMode.\ntype GlobalService struct{}\n\nconst (\n\t\/\/ UpdateFailureActionPause PAUSE\n\tUpdateFailureActionPause = \"pause\"\n\t\/\/ UpdateFailureActionContinue CONTINUE\n\tUpdateFailureActionContinue = \"continue\"\n)\n\n\/\/ UpdateConfig represents the update configuration.\ntype UpdateConfig struct {\n\t\/\/ Maximum number of tasks to be updated in one iteration.\n\t\/\/ 0 means unlimited parallelism.\n\tParallelism uint64\n\n\t\/\/ Amount of time between updates.\n\tDelay time.Duration `json:\",omitempty\"`\n\n\t\/\/ FailureAction is the action to take when an update failures.\n\tFailureAction string `json:\",omitempty\"`\n\n\t\/\/ Monitor indicates how long to monitor a task for failure after it is\n\t\/\/ created. If the task fails by ending up in one of the states\n\t\/\/ REJECTED, COMPLETED, or FAILED, within Monitor from its creation,\n\t\/\/ this counts as a failure. If it fails after Monitor, it does not\n\t\/\/ count as a failure. If Monitor is unspecified, a default value will\n\t\/\/ be used.\n\tMonitor time.Duration `json:\",omitempty\"`\n\n\t\/\/ MaxFailureRatio is the fraction of tasks that may fail during\n\t\/\/ an update before the failure action is invoked. Any task created by\n\t\/\/ the current update which ends up in one of the states REJECTED,\n\t\/\/ COMPLETED or FAILED within Monitor from its creation counts as a\n\t\/\/ failure. The number of failures is divided by the number of tasks\n\t\/\/ being updated, and if this fraction is greater than\n\t\/\/ MaxFailureRatio, the failure action is invoked.\n\t\/\/\n\t\/\/ If the failure action is CONTINUE, there is no effect.\n\t\/\/ If the failure action is PAUSE, no more tasks will be updated until\n\t\/\/ another update is started.\n\tMaxFailureRatio float32\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Canonical Ltd.\n\/\/ Copyright 2016 Cloudbase Solutions\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage common\n\nimport (\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/juju\/names\"\n\n\t\"github.com\/juju\/juju\/apiserver\/params\"\n\t\"github.com\/juju\/juju\/state\"\n\t\"github.com\/juju\/juju\/state\/watcher\"\n)\n\n\/\/ ParamsActionExecutionResultsToStateActionResults does exactly what\n\/\/ the name implies.\nfunc ParamsActionExecutionResultsToStateActionResults(arg params.ActionExecutionResult) (state.ActionResults, error) {\n\tvar status state.ActionStatus\n\tswitch arg.Status {\n\tcase params.ActionCancelled:\n\t\tstatus = state.ActionCancelled\n\tcase params.ActionCompleted:\n\t\tstatus = state.ActionCompleted\n\tcase params.ActionFailed:\n\t\tstatus = state.ActionFailed\n\tcase params.ActionPending:\n\t\tstatus = state.ActionPending\n\tdefault:\n\t\treturn state.ActionResults{}, errors.Errorf(\"unrecognized action status '%s'\", arg.Status)\n\t}\n\treturn state.ActionResults{\n\t\tStatus: status,\n\t\tResults: arg.Results,\n\t\tMessage: arg.Message,\n\t}, nil\n}\n\n\/\/ TagToActionReceiver takes a tag string and tries to convert it to an\n\/\/ ActionReceiver. It needs a findEntity function passed in that can search for the tags in state.\nfunc TagToActionReceiverFn(findEntity func(names.Tag) (state.Entity, error)) func(tag string) (state.ActionReceiver, error) {\n\treturn func(tag string) (state.ActionReceiver, error) {\n\t\treceiverTag, err := names.ParseTag(tag)\n\t\tif err != nil {\n\t\t\treturn nil, ErrBadId\n\t\t}\n\t\tentity, err := findEntity(receiverTag)\n\t\tif err != nil {\n\t\t\treturn nil, ErrBadId\n\t\t}\n\t\treceiver, ok := entity.(state.ActionReceiver)\n\t\tif !ok {\n\t\t\treturn nil, ErrBadId\n\t\t}\n\t\treturn receiver, nil\n\t}\n}\n\n\/\/ AuthAndActionFromTagFn takes in an authorizer function and a function that can fetch action by tags from state\n\/\/ and returns a function that can fetch an action from state by id and check the authorization.\nfunc AuthAndActionFromTagFn(canAccess AuthFunc, getActionByTag func(names.ActionTag) (state.Action, error)) func(string) (state.Action, error) {\n\treturn func(tag string) (state.Action, error) {\n\t\tactionTag, err := names.ParseActionTag(tag)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\taction, err := getActionByTag(actionTag)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\treceiverTag, err := names.ActionReceiverTag(action.Receiver())\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tif !canAccess(receiverTag) {\n\t\t\treturn nil, ErrPerm\n\t\t}\n\t\treturn action, nil\n\t}\n}\n\n\/\/ BeginActions is a helper function currently used by the uniter and by machineactions\n\/\/ It needs an actionFn that can fetch an action from state using it's id.\n\/\/ It is usually created by AuthAndActionFromTagFn\nfunc BeginActions(args params.Entities, actionFn func(string) (state.Action, error)) params.ErrorResults {\n\tresults := params.ErrorResults{Results: make([]params.ErrorResult, len(args.Entities))}\n\n\tfor i, arg := range args.Entities {\n\t\taction, err := actionFn(arg.Tag)\n\t\tif err != nil {\n\t\t\tresults.Results[i].Error = ServerError(err)\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err = action.Begin()\n\t\tif err != nil {\n\t\t\tresults.Results[i].Error = ServerError(err)\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn results\n}\n\n\/\/ FinishActions is a helper function currently used by the uniter and by machineactions\n\/\/ It needs an actionFn that can fetch an action from state using it's id.\n\/\/ It is usually created by AuthAndActionFromTagFn\nfunc FinishActions(args params.ActionExecutionResults, actionFn func(string) (state.Action, error)) params.ErrorResults {\n\tresults := params.ErrorResults{Results: make([]params.ErrorResult, len(args.Results))}\n\n\tfor i, arg := range args.Results {\n\t\taction, err := actionFn(arg.ActionTag)\n\t\tif err != nil {\n\t\t\tresults.Results[i].Error = ServerError(err)\n\t\t\tcontinue\n\t\t}\n\t\tactionResults, err := ParamsActionExecutionResultsToStateActionResults(arg)\n\t\tif err != nil {\n\t\t\tresults.Results[i].Error = ServerError(err)\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err = action.Finish(actionResults)\n\t\tif err != nil {\n\t\t\tresults.Results[i].Error = ServerError(err)\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn results\n}\n\n\/\/ Actions is a helper function currently used by the uniter and by machineactions\n\/\/ It needs an actionFn that can fetch an action from state using it's id.\n\/\/ It is usually created by AuthAndActionFromTagFn\nfunc Actions(args params.Entities, actionFn func(string) (state.Action, error)) params.ActionResults {\n\tresults := params.ActionResults{\n\t\tResults: make([]params.ActionResult, len(args.Entities)),\n\t}\n\n\tfor i, arg := range args.Entities {\n\t\taction, err := actionFn(arg.Tag)\n\t\tif err != nil {\n\t\t\tresults.Results[i].Error = ServerError(err)\n\t\t\tcontinue\n\t\t}\n\t\tif action.Status() != state.ActionPending {\n\t\t\tresults.Results[i].Error = ServerError(ErrActionNotAvailable)\n\t\t\tcontinue\n\t\t}\n\t\tresults.Results[i].Action = ¶ms.Action{\n\t\t\tName: action.Name(),\n\t\t\tParameters: action.Parameters(),\n\t\t}\n\t}\n\n\treturn results\n}\n\n\/\/ WatchOneActionReceiverNotifications is a helper function currently used by the uniter and by machineactions\n\/\/ to create a watcher for one receiver. It needs a tagToActionReceiver function and a registerFunc to register\n\/\/ resources.\nfunc WatchOneActionReceiverNotifications(tagToActionReceiver func(tag string) (state.ActionReceiver, error), registerFunc func(r Resource) string) func(names.Tag) (params.StringsWatchResult, error) {\n\treturn func(tag names.Tag) (params.StringsWatchResult, error) {\n\t\tnothing := params.StringsWatchResult{}\n\t\treceiver, err := tagToActionReceiver(tag.String())\n\t\tif err != nil {\n\t\t\treturn nothing, err\n\t\t}\n\t\twatch := receiver.WatchActionNotifications()\n\n\t\tif changes, ok := <-watch.Changes(); ok {\n\t\t\treturn params.StringsWatchResult{\n\t\t\t\tStringsWatcherId: registerFunc(watch),\n\t\t\t\tChanges: changes,\n\t\t\t}, nil\n\t\t}\n\t\treturn nothing, watcher.EnsureErr(watch)\n\t}\n}\n\n\/\/ WatchActionNotifications is a helper function currently used by the uniter and by machineactions\n\/\/ to create watchers. The canAccess function is passed in by the respective caller to provide authorization.\n\/\/ watchOne is usually a function created by WatchOneActionReceiverNotifications\nfunc WatchActionNotifications(args params.Entities, canAccess AuthFunc, watchOne func(names.Tag) (params.StringsWatchResult, error)) params.StringsWatchResults {\n\tresult := params.StringsWatchResults{\n\t\tResults: make([]params.StringsWatchResult, len(args.Entities)),\n\t}\n\n\tfor i, entity := range args.Entities {\n\t\ttag, err := names.ActionReceiverFromTag(entity.Tag)\n\t\tif err != nil {\n\t\t\tresult.Results[i].Error = ServerError(err)\n\t\t\tcontinue\n\t\t}\n\t\terr = ErrPerm\n\t\tif canAccess(tag) {\n\t\t\tresult.Results[i], err = watchOne(tag)\n\t\t}\n\t\tresult.Results[i].Error = ServerError(err)\n\t}\n\n\treturn result\n}\n\n\/\/ GetActionsFn declares the function type that returns a slice of\n\/\/ state.Action and error, used to curry specific list functions.\ntype GetActionsFn func() ([]state.Action, error)\n\n\/\/ ConvertActions takes a generic getActionsFn to obtain a slice\n\/\/ of state.Action and then converts them to the API slice of\n\/\/ params.ActionResult.\nfunc ConvertActions(ar state.ActionReceiver, fn GetActionsFn) ([]params.ActionResult, error) {\n\titems := []params.ActionResult{}\n\tactions, err := fn()\n\tif err != nil {\n\t\treturn items, err\n\t}\n\tfor _, action := range actions {\n\t\tif action == nil {\n\t\t\tcontinue\n\t\t}\n\t\titems = append(items, MakeActionResult(ar.Tag(), action))\n\t}\n\treturn items, nil\n}\n\n\/\/ MakeActionResult does the actual type conversion from state.Action\n\/\/ to params.ActionResult.\nfunc MakeActionResult(actionReceiverTag names.Tag, action state.Action) params.ActionResult {\n\toutput, message := action.Results()\n\treturn params.ActionResult{\n\t\tAction: ¶ms.Action{\n\t\t\tReceiver: actionReceiverTag.String(),\n\t\t\tTag: action.ActionTag().String(),\n\t\t\tName: action.Name(),\n\t\t\tParameters: action.Parameters(),\n\t\t},\n\t\tStatus: string(action.Status()),\n\t\tMessage: message,\n\t\tOutput: output,\n\t\tEnqueued: action.Enqueued(),\n\t\tStarted: action.Started(),\n\t\tCompleted: action.Completed(),\n\t}\n}\n<commit_msg>apiserver\/common\/actions: clarified comments<commit_after>\/\/ Copyright 2016 Canonical Ltd.\n\/\/ Copyright 2016 Cloudbase Solutions\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage common\n\nimport (\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/juju\/names\"\n\n\t\"github.com\/juju\/juju\/apiserver\/params\"\n\t\"github.com\/juju\/juju\/state\"\n\t\"github.com\/juju\/juju\/state\/watcher\"\n)\n\n\/\/ ParamsActionExecutionResultsToStateActionResults does exactly what\n\/\/ the name implies.\nfunc ParamsActionExecutionResultsToStateActionResults(arg params.ActionExecutionResult) (state.ActionResults, error) {\n\tvar status state.ActionStatus\n\tswitch arg.Status {\n\tcase params.ActionCancelled:\n\t\tstatus = state.ActionCancelled\n\tcase params.ActionCompleted:\n\t\tstatus = state.ActionCompleted\n\tcase params.ActionFailed:\n\t\tstatus = state.ActionFailed\n\tcase params.ActionPending:\n\t\tstatus = state.ActionPending\n\tdefault:\n\t\treturn state.ActionResults{}, errors.Errorf(\"unrecognized action status '%s'\", arg.Status)\n\t}\n\treturn state.ActionResults{\n\t\tStatus: status,\n\t\tResults: arg.Results,\n\t\tMessage: arg.Message,\n\t}, nil\n}\n\n\/\/ TagToActionReceiver takes a tag string and tries to convert it to an\n\/\/ ActionReceiver. It needs a findEntity function passed in that can search for the tags in state.\nfunc TagToActionReceiverFn(findEntity func(names.Tag) (state.Entity, error)) func(tag string) (state.ActionReceiver, error) {\n\treturn func(tag string) (state.ActionReceiver, error) {\n\t\treceiverTag, err := names.ParseTag(tag)\n\t\tif err != nil {\n\t\t\treturn nil, ErrBadId\n\t\t}\n\t\tentity, err := findEntity(receiverTag)\n\t\tif err != nil {\n\t\t\treturn nil, ErrBadId\n\t\t}\n\t\treceiver, ok := entity.(state.ActionReceiver)\n\t\tif !ok {\n\t\t\treturn nil, ErrBadId\n\t\t}\n\t\treturn receiver, nil\n\t}\n}\n\n\/\/ AuthAndActionFromTagFn takes in an authorizer function and a function that can fetch action by tags from state\n\/\/ and returns a function that can fetch an action from state by id and check the authorization.\nfunc AuthAndActionFromTagFn(canAccess AuthFunc, getActionByTag func(names.ActionTag) (state.Action, error)) func(string) (state.Action, error) {\n\treturn func(tag string) (state.Action, error) {\n\t\tactionTag, err := names.ParseActionTag(tag)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\taction, err := getActionByTag(actionTag)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\treceiverTag, err := names.ActionReceiverTag(action.Receiver())\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tif !canAccess(receiverTag) {\n\t\t\treturn nil, ErrPerm\n\t\t}\n\t\treturn action, nil\n\t}\n}\n\n\/\/ BeginActions calls begin on every action passed in through args.\n\/\/ It's a helper function currently used by the uniter and by machineactions\n\/\/ It needs an actionFn that can fetch an action from state using it's id, that's usually created by AuthAndActionFromTagFn\nfunc BeginActions(args params.Entities, actionFn func(string) (state.Action, error)) params.ErrorResults {\n\tresults := params.ErrorResults{Results: make([]params.ErrorResult, len(args.Entities))}\n\n\tfor i, arg := range args.Entities {\n\t\taction, err := actionFn(arg.Tag)\n\t\tif err != nil {\n\t\t\tresults.Results[i].Error = ServerError(err)\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err = action.Begin()\n\t\tif err != nil {\n\t\t\tresults.Results[i].Error = ServerError(err)\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn results\n}\n\n\/\/ FinishActions saves the result of a completed Action.\n\/\/ It's a helper function currently used by the uniter and by machineactions\n\/\/ It needs an actionFn that can fetch an action from state using it's id that's usually created by AuthAndActionFromTagFn\nfunc FinishActions(args params.ActionExecutionResults, actionFn func(string) (state.Action, error)) params.ErrorResults {\n\tresults := params.ErrorResults{Results: make([]params.ErrorResult, len(args.Results))}\n\n\tfor i, arg := range args.Results {\n\t\taction, err := actionFn(arg.ActionTag)\n\t\tif err != nil {\n\t\t\tresults.Results[i].Error = ServerError(err)\n\t\t\tcontinue\n\t\t}\n\t\tactionResults, err := ParamsActionExecutionResultsToStateActionResults(arg)\n\t\tif err != nil {\n\t\t\tresults.Results[i].Error = ServerError(err)\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err = action.Finish(actionResults)\n\t\tif err != nil {\n\t\t\tresults.Results[i].Error = ServerError(err)\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn results\n}\n\n\/\/ Actions returns the Actions by Tags passed in and ensures that the receiver asking for\n\/\/ them is the same one that has the action.\n\/\/ It's a helper function currently used by the uniter and by machineactions.\n\/\/ It needs an actionFn that can fetch an action from state using it's id that's usually created by AuthAndActionFromTagFn\nfunc Actions(args params.Entities, actionFn func(string) (state.Action, error)) params.ActionResults {\n\tresults := params.ActionResults{\n\t\tResults: make([]params.ActionResult, len(args.Entities)),\n\t}\n\n\tfor i, arg := range args.Entities {\n\t\taction, err := actionFn(arg.Tag)\n\t\tif err != nil {\n\t\t\tresults.Results[i].Error = ServerError(err)\n\t\t\tcontinue\n\t\t}\n\t\tif action.Status() != state.ActionPending {\n\t\t\tresults.Results[i].Error = ServerError(ErrActionNotAvailable)\n\t\t\tcontinue\n\t\t}\n\t\tresults.Results[i].Action = ¶ms.Action{\n\t\t\tName: action.Name(),\n\t\t\tParameters: action.Parameters(),\n\t\t}\n\t}\n\n\treturn results\n}\n\n\/\/ WatchOneActionReceiverNotifications to create a watcher for one receiver.\n\/\/ It needs a tagToActionReceiver function and a registerFunc to register\n\/\/ resources.\n\/\/ It's a helper function currently used by the uniter and by machineactions\nfunc WatchOneActionReceiverNotifications(tagToActionReceiver func(tag string) (state.ActionReceiver, error), registerFunc func(r Resource) string) func(names.Tag) (params.StringsWatchResult, error) {\n\treturn func(tag names.Tag) (params.StringsWatchResult, error) {\n\t\tnothing := params.StringsWatchResult{}\n\t\treceiver, err := tagToActionReceiver(tag.String())\n\t\tif err != nil {\n\t\t\treturn nothing, err\n\t\t}\n\t\twatch := receiver.WatchActionNotifications()\n\n\t\tif changes, ok := <-watch.Changes(); ok {\n\t\t\treturn params.StringsWatchResult{\n\t\t\t\tStringsWatcherId: registerFunc(watch),\n\t\t\t\tChanges: changes,\n\t\t\t}, nil\n\t\t}\n\t\treturn nothing, watcher.EnsureErr(watch)\n\t}\n}\n\n\/\/ WatchActionNotifications returns a StringsWatcher for observing incoming actions towards an actionreceiver.\n\/\/ It's a helper function currently used by the uniter and by machineactions\n\/\/ canAccess is passed in by the respective caller to provide authorization.\n\/\/ watchOne is usually a function created by WatchOneActionReceiverNotifications\nfunc WatchActionNotifications(args params.Entities, canAccess AuthFunc, watchOne func(names.Tag) (params.StringsWatchResult, error)) params.StringsWatchResults {\n\tresult := params.StringsWatchResults{\n\t\tResults: make([]params.StringsWatchResult, len(args.Entities)),\n\t}\n\n\tfor i, entity := range args.Entities {\n\t\ttag, err := names.ActionReceiverFromTag(entity.Tag)\n\t\tif err != nil {\n\t\t\tresult.Results[i].Error = ServerError(err)\n\t\t\tcontinue\n\t\t}\n\t\terr = ErrPerm\n\t\tif canAccess(tag) {\n\t\t\tresult.Results[i], err = watchOne(tag)\n\t\t}\n\t\tresult.Results[i].Error = ServerError(err)\n\t}\n\n\treturn result\n}\n\n\/\/ GetActionsFn declares the function type that returns a slice of\n\/\/ state.Action and error, used to curry specific list functions.\ntype GetActionsFn func() ([]state.Action, error)\n\n\/\/ ConvertActions takes a generic getActionsFn to obtain a slice\n\/\/ of state.Action and then converts them to the API slice of\n\/\/ params.ActionResult.\nfunc ConvertActions(ar state.ActionReceiver, fn GetActionsFn) ([]params.ActionResult, error) {\n\titems := []params.ActionResult{}\n\tactions, err := fn()\n\tif err != nil {\n\t\treturn items, err\n\t}\n\tfor _, action := range actions {\n\t\tif action == nil {\n\t\t\tcontinue\n\t\t}\n\t\titems = append(items, MakeActionResult(ar.Tag(), action))\n\t}\n\treturn items, nil\n}\n\n\/\/ MakeActionResult does the actual type conversion from state.Action\n\/\/ to params.ActionResult.\nfunc MakeActionResult(actionReceiverTag names.Tag, action state.Action) params.ActionResult {\n\toutput, message := action.Results()\n\treturn params.ActionResult{\n\t\tAction: ¶ms.Action{\n\t\t\tReceiver: actionReceiverTag.String(),\n\t\t\tTag: action.ActionTag().String(),\n\t\t\tName: action.Name(),\n\t\t\tParameters: action.Parameters(),\n\t\t},\n\t\tStatus: string(action.Status()),\n\t\tMessage: message,\n\t\tOutput: output,\n\t\tEnqueued: action.Enqueued(),\n\t\tStarted: action.Started(),\n\t\tCompleted: action.Completed(),\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package node\n\nimport (\n\t\"github.com\/rancher\/norman\/store\/transform\"\n\t\"github.com\/rancher\/norman\/types\"\n\t\"github.com\/rancher\/norman\/types\/convert\"\n\t\"github.com\/rancher\/norman\/types\/values\"\n\t\"github.com\/rancher\/rancher\/pkg\/api\/store\/workload\"\n)\n\ntype nodeStore struct {\n\ttypes.Store\n}\n\nfunc SetupStore(schema *types.Schema) {\n\tschema.Store = &transform.Store{\n\t\tStore: nodeStore{\n\t\t\tStore: schema.Store,\n\t\t},\n\t\tTransformer: func(apiContext *types.APIContext, schema *types.Schema, data map[string]interface{}, opt *types.QueryOptions) (map[string]interface{}, error) {\n\t\t\tworkload.SetPublicEnpointsFields(data)\n\t\t\tsetState(data)\n\t\t\treturn data, nil\n\t\t},\n\t}\n}\n\nfunc (n nodeStore) List(apiContext *types.APIContext, schema *types.Schema, opt *types.QueryOptions) ([]map[string]interface{}, error) {\n\tsetName := false\n\tfor _, cond := range opt.Conditions {\n\t\tif cond.Field == \"name\" && cond.ToCondition().Modifier == types.ModifierEQ {\n\t\t\tsetName = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tdatas, err := n.Store.List(apiContext, schema, opt)\n\tif err != nil || !setName {\n\t\treturn datas, err\n\t}\n\n\tfor _, data := range datas {\n\t\tif !convert.IsAPIObjectEmpty(data[\"name\"]) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif !convert.IsAPIObjectEmpty(data[\"nodeName\"]) {\n\t\t\tdata[\"name\"] = data[\"nodeName\"]\n\t\t\tcontinue\n\t\t}\n\n\t\tif !convert.IsAPIObjectEmpty(data[\"requestedHostname\"]) {\n\t\t\tdata[\"name\"] = data[\"requestedHostname\"]\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn datas, err\n}\n\nfunc (n nodeStore) Update(apiContext *types.APIContext, schema *types.Schema, data map[string]interface{}, id string) (map[string]interface{}, error) {\n\tformat(data)\n\treturn n.Store.Update(apiContext, schema, data, id)\n}\n\nfunc format(data map[string]interface{}) {\n\tdata[\"desiredNodeLabels\"] = data[\"labels\"]\n\tdata[\"desiredNodeAnnotations\"] = data[\"annotations\"]\n}\n\nfunc setState(data map[string]interface{}) {\n\tif data[\"state\"] == \"draining\" {\n\t\treturn\n\t}\n\tif convert.ToBool(values.GetValueN(data, \"unschedulable\")) {\n\t\tconditions, _ := values.GetSlice(data, \"conditions\")\n\t\tfor _, condition := range conditions {\n\t\t\tcondType := values.GetValueN(condition, \"type\")\n\t\t\tif convert.ToString(condType) == \"Drained\" &&\n\t\t\t\tconvert.ToString(values.GetValueN(condition, \"status\")) == \"True\" {\n\t\t\t\tdata[\"state\"] = \"drained\"\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tdata[\"state\"] = \"cordoned\"\n\t}\n}\n<commit_msg>validate node hostname when node is getting created or updated<commit_after>package node\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/rancher\/norman\/httperror\"\n\t\"github.com\/rancher\/norman\/store\/transform\"\n\t\"github.com\/rancher\/norman\/types\"\n\t\"github.com\/rancher\/norman\/types\/convert\"\n\t\"github.com\/rancher\/norman\/types\/values\"\n\t\"github.com\/rancher\/rancher\/pkg\/api\/store\/workload\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/validation\"\n)\n\ntype nodeStore struct {\n\ttypes.Store\n}\n\nfunc SetupStore(schema *types.Schema) {\n\tschema.Store = &transform.Store{\n\t\tStore: nodeStore{\n\t\t\tStore: schema.Store,\n\t\t},\n\t\tTransformer: func(apiContext *types.APIContext, schema *types.Schema, data map[string]interface{}, opt *types.QueryOptions) (map[string]interface{}, error) {\n\t\t\tworkload.SetPublicEnpointsFields(data)\n\t\t\tsetState(data)\n\t\t\treturn data, nil\n\t\t},\n\t}\n}\n\nfunc (n nodeStore) List(apiContext *types.APIContext, schema *types.Schema, opt *types.QueryOptions) ([]map[string]interface{}, error) {\n\tsetName := false\n\tfor _, cond := range opt.Conditions {\n\t\tif cond.Field == \"name\" && cond.ToCondition().Modifier == types.ModifierEQ {\n\t\t\tsetName = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tdatas, err := n.Store.List(apiContext, schema, opt)\n\tif err != nil || !setName {\n\t\treturn datas, err\n\t}\n\n\tfor _, data := range datas {\n\t\tif !convert.IsAPIObjectEmpty(data[\"name\"]) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif !convert.IsAPIObjectEmpty(data[\"nodeName\"]) {\n\t\t\tdata[\"name\"] = data[\"nodeName\"]\n\t\t\tcontinue\n\t\t}\n\n\t\tif !convert.IsAPIObjectEmpty(data[\"requestedHostname\"]) {\n\t\t\tdata[\"name\"] = data[\"requestedHostname\"]\n\t\t\tcontinue\n\t\t}\n\t}\n\n\treturn datas, err\n}\n\nfunc (n nodeStore) Create(apiContext *types.APIContext, schema *types.Schema, data map[string]interface{}) (map[string]interface{}, error) {\n\tformat(data)\n\tnodePoolID := n.getNodePoolID(apiContext, schema, data, \"\")\n\tif nodePoolID != \"\" {\n\t\tif err := n.validateHostname(schema, data); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn n.Store.Create(apiContext, schema, data)\n}\n\nfunc (n nodeStore) Update(apiContext *types.APIContext, schema *types.Schema, data map[string]interface{}, id string) (map[string]interface{}, error) {\n\tformat(data)\n\tnodePoolID := n.getNodePoolID(apiContext, schema, data, id)\n\tif nodePoolID != \"\" {\n\t\tif err := n.validateHostname(schema, data); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn n.Store.Update(apiContext, schema, data, id)\n}\n\nfunc format(data map[string]interface{}) {\n\tdata[\"desiredNodeLabels\"] = data[\"labels\"]\n\tdata[\"desiredNodeAnnotations\"] = data[\"annotations\"]\n}\n\nfunc setState(data map[string]interface{}) {\n\tif data[\"state\"] == \"draining\" {\n\t\treturn\n\t}\n\tif convert.ToBool(values.GetValueN(data, \"unschedulable\")) {\n\t\tconditions, _ := values.GetSlice(data, \"conditions\")\n\t\tfor _, condition := range conditions {\n\t\t\tcondType := values.GetValueN(condition, \"type\")\n\t\t\tif convert.ToString(condType) == \"Drained\" &&\n\t\t\t\tconvert.ToString(values.GetValueN(condition, \"status\")) == \"True\" {\n\t\t\t\tdata[\"state\"] = \"drained\"\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tdata[\"state\"] = \"cordoned\"\n\t}\n}\n\nfunc (n nodeStore) getNodePoolID(apiContext *types.APIContext, schema *types.Schema, data map[string]interface{}, id string) string {\n\t_, ok := data[\"nodePoolId\"]\n\tif ok {\n\t\treturn data[\"nodePoolId\"].(string)\n\t}\n\tif id != \"\" {\n\t\texistingNode, err := n.ByID(apiContext, schema, id)\n\t\tif err != nil {\n\t\t\treturn \"\"\n\t\t}\n\t\t_, ok := existingNode[\"nodePoolId\"].(string)\n\t\tif ok {\n\t\t\treturn existingNode[\"nodePoolId\"].(string)\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc (n nodeStore) validateHostname(schema *types.Schema, data map[string]interface{}) error {\n\thostName := data[\"name\"]\n\tif hostName != nil {\n\t\terrs := validation.IsDNS1123Label(hostName.(string))\n\t\tif len(errs) != 0 {\n\t\t\treturn httperror.NewAPIError(httperror.InvalidFormat, fmt.Sprintf(\"invalid value %s: %s\", hostName.(string),\n\t\t\t\tstrings.Join(errs, \",\")))\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 Authors of Cilium\n\/\/ Copyright 2017 Lyft, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage eni\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/aws\/types\"\n\t\"github.com\/cilium\/cilium\/pkg\/k8s\/apis\/cilium.io\/v2\"\n\t\"github.com\/cilium\/cilium\/pkg\/lock\"\n\t\"github.com\/cilium\/cilium\/pkg\/trigger\"\n\n\t\"github.com\/sirupsen\/logrus\"\n)\n\ntype k8sAPI interface {\n\tUpdate(origResource, newResource *v2.CiliumNode) (*v2.CiliumNode, error)\n\tUpdateStatus(origResource, newResource *v2.CiliumNode) (*v2.CiliumNode, error)\n}\n\ntype nodeManagerAPI interface {\n\tGetENI(instanceID string, index int) *v2.ENI\n\tGetENIs(instanceID string) []*v2.ENI\n\tGetSubnet(subnetID string) *types.Subnet\n\tFindSubnetByTags(vpcID, availabilityZone string, required types.Tags) *types.Subnet\n\tResync()\n}\n\ntype ec2API interface {\n\tCreateNetworkInterface(toAllocate int64, subnetID, desc string, groups []string) (string, error)\n\tDeleteNetworkInterface(eniID string) error\n\tAttachNetworkInterface(index int64, instanceID, eniID string) (string, error)\n\tModifyNetworkInterface(eniID, attachmentID string, deleteOnTermination bool) error\n\tAssignPrivateIpAddresses(eniID string, addresses int64) error\n}\n\ntype metricsAPI interface {\n\tIncENIAllocationAttempt(status, subnetID string)\n\tAddIPAllocation(subnetID string, allocated int64)\n\tSetAllocatedIPs(typ string, allocated int)\n\tSetAvailableENIs(available int)\n\tSetNodesAtCapacity(nodes int)\n\tIncResyncCount()\n}\n\n\/\/ nodeMap is a mapping of node names to ENI nodes\ntype nodeMap map[string]*Node\n\n\/\/ NodeManager manages all nodes with ENIs\ntype NodeManager struct {\n\tmutex lock.RWMutex\n\tnodes nodeMap\n\tinstancesAPI nodeManagerAPI\n\tec2API ec2API\n\tk8sAPI k8sAPI\n\tmetricsAPI metricsAPI\n\tresyncTrigger *trigger.Trigger\n\tdeficitResolver *trigger.Trigger\n}\n\n\/\/ NewNodeManager returns a new NodeManager\nfunc NewNodeManager(instancesAPI nodeManagerAPI, ec2API ec2API, k8sAPI k8sAPI, metrics metricsAPI) (*NodeManager, error) {\n\tmngr := &NodeManager{\n\t\tnodes: nodeMap{},\n\t\tinstancesAPI: instancesAPI,\n\t\tec2API: ec2API,\n\t\tk8sAPI: k8sAPI,\n\t\tmetricsAPI: metrics,\n\t}\n\n\tdeficitResolver, err := trigger.NewTrigger(trigger.Parameters{\n\t\tName: \"eni-node-manager-deficit-resolver\",\n\t\tMinInterval: time.Second,\n\t\tTriggerFunc: func(reasons []string) {\n\t\t\tfor _, name := range reasons {\n\t\t\t\tif node := mngr.Get(name); node != nil {\n\t\t\t\t\tif err := node.ResolveIPDeficit(); err != nil {\n\t\t\t\t\t\tnode.logger().WithError(err).Warning(\"Unable to resolve IP deficit of node\")\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tlog.WithField(fieldName, name).Warning(\"Node has disappeared while allocation request was queued\")\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to initialize deficit resolver trigger: %s\", err)\n\t}\n\n\tresyncTrigger, err := trigger.NewTrigger(trigger.Parameters{\n\t\tName: \"eni-node-manager-resync\",\n\t\tMinInterval: time.Second,\n\t\tTriggerFunc: func(reasons []string) {\n\t\t\tinstancesAPI.Resync()\n\t\t\tmngr.Resync()\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to initialize resync trigger: %s\", err)\n\t}\n\n\tmngr.resyncTrigger = resyncTrigger\n\tmngr.deficitResolver = deficitResolver\n\n\treturn mngr, nil\n}\n\n\/\/ GetNames returns the list of all node names\nfunc (n *NodeManager) GetNames() (allNodeNames []string) {\n\tn.mutex.RLock()\n\tdefer n.mutex.RUnlock()\n\n\tallNodeNames = make([]string, 0, len(n.nodes))\n\n\tfor name := range n.nodes {\n\t\tallNodeNames = append(allNodeNames, name)\n\t}\n\n\treturn\n}\n\n\/\/ Update is called whenever a CiliumNode resource has been updated in the\n\/\/ Kubernetes apiserver\nfunc (n *NodeManager) Update(resource *v2.CiliumNode) bool {\n\tn.mutex.Lock()\n\tnode, ok := n.nodes[resource.Name]\n\tif !ok {\n\t\tnode = &Node{\n\t\t\tname: resource.Name,\n\t\t\tmanager: n,\n\t\t}\n\t\tn.nodes[node.name] = node\n\n\t\tlog.WithField(fieldName, resource.Name).Info(\"Discovered new CiliumNode custom resource\")\n\t}\n\tn.mutex.Unlock()\n\n\treturn node.updatedResource(resource)\n}\n\n\/\/ Delete is called after a CiliumNode resource has been deleted via the\n\/\/ Kubernetes apiserver\nfunc (n *NodeManager) Delete(nodeName string) {\n\tn.mutex.Lock()\n\tdelete(n.nodes, nodeName)\n\tn.mutex.Unlock()\n}\n\n\/\/ Get returns the node with the given name\nfunc (n *NodeManager) Get(nodeName string) *Node {\n\tn.mutex.RLock()\n\tnode := n.nodes[nodeName]\n\tn.mutex.RUnlock()\n\treturn node\n}\n\n\/\/ GetNodesByNeededAddresses returns all nodes that require addresses to be\n\/\/ allocated, sorted by the number of addresses needed in descending order\nfunc (n *NodeManager) GetNodesByNeededAddresses() []*Node {\n\tn.mutex.RLock()\n\tdefer n.mutex.RUnlock()\n\n\tlist := make([]*Node, len(n.nodes))\n\tindex := 0\n\tfor _, node := range n.nodes {\n\t\tlist[index] = node\n\t\tindex++\n\t}\n\n\tsort.Slice(list, func(i, j int) bool {\n\t\treturn list[i].getNeededAddresses() > list[j].getNeededAddresses()\n\t})\n\n\treturn list\n}\n\n\/\/ Resync will attend all nodes and resolves IP deficits. The order of\n\/\/ attendance is defined by the number of IPs needed to reach the configured\n\/\/ watermarks. Any updates to the node resource are synchronized to the\n\/\/ Kubernetes apiserver.\nfunc (n *NodeManager) Resync() {\n\tvar totalUsed, totalAvailable, totalNeeded, remainingInterfaces, nodesAtCapacity int\n\n\tfor _, node := range n.GetNodesByNeededAddresses() {\n\t\tnode.mutex.Lock()\n\t\t\/\/ Resync() is always called after resync of the instance data,\n\t\t\/\/ mark node as resynced\n\t\tnode.resyncNeeded = false\n\t\tallocationNeeded := node.recalculateLocked()\n\t\tnode.loggerLocked().WithFields(logrus.Fields{\n\t\t\tfieldName: node.name,\n\t\t\t\"available\": node.stats.availableIPs,\n\t\t\t\"used\": node.stats.usedIPs,\n\t\t}).Debug(\"Recalculated allocation requirements\")\n\t\ttotalUsed += node.stats.usedIPs\n\t\ttotalAvailable += node.stats.availableIPs - node.stats.usedIPs\n\t\ttotalNeeded += node.stats.neededIPs\n\t\tremainingInterfaces += node.stats.remainingInterfaces\n\n\t\tif remainingInterfaces == 0 && totalAvailable == 0 {\n\t\t\tnodesAtCapacity++\n\t\t}\n\t\tif allocationNeeded {\n\t\t\tn.deficitResolver.TriggerWithReason(node.name)\n\t\t}\n\t\tnode.mutex.Unlock()\n\n\t\tnode.SyncToAPIServer()\n\t}\n\n\tn.metricsAPI.SetAllocatedIPs(\"used\", totalUsed)\n\tn.metricsAPI.SetAllocatedIPs(\"available\", totalAvailable)\n\tn.metricsAPI.SetAllocatedIPs(\"needed\", totalNeeded)\n\tn.metricsAPI.SetAvailableENIs(remainingInterfaces)\n\tn.metricsAPI.SetNodesAtCapacity(nodesAtCapacity)\n}\n<commit_msg>eni: Only attempt deficit resolution if ENIs are available<commit_after>\/\/ Copyright 2019 Authors of Cilium\n\/\/ Copyright 2017 Lyft, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage eni\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/aws\/types\"\n\t\"github.com\/cilium\/cilium\/pkg\/k8s\/apis\/cilium.io\/v2\"\n\t\"github.com\/cilium\/cilium\/pkg\/lock\"\n\t\"github.com\/cilium\/cilium\/pkg\/trigger\"\n\n\t\"github.com\/sirupsen\/logrus\"\n)\n\ntype k8sAPI interface {\n\tUpdate(origResource, newResource *v2.CiliumNode) (*v2.CiliumNode, error)\n\tUpdateStatus(origResource, newResource *v2.CiliumNode) (*v2.CiliumNode, error)\n}\n\ntype nodeManagerAPI interface {\n\tGetENI(instanceID string, index int) *v2.ENI\n\tGetENIs(instanceID string) []*v2.ENI\n\tGetSubnet(subnetID string) *types.Subnet\n\tFindSubnetByTags(vpcID, availabilityZone string, required types.Tags) *types.Subnet\n\tResync()\n}\n\ntype ec2API interface {\n\tCreateNetworkInterface(toAllocate int64, subnetID, desc string, groups []string) (string, error)\n\tDeleteNetworkInterface(eniID string) error\n\tAttachNetworkInterface(index int64, instanceID, eniID string) (string, error)\n\tModifyNetworkInterface(eniID, attachmentID string, deleteOnTermination bool) error\n\tAssignPrivateIpAddresses(eniID string, addresses int64) error\n}\n\ntype metricsAPI interface {\n\tIncENIAllocationAttempt(status, subnetID string)\n\tAddIPAllocation(subnetID string, allocated int64)\n\tSetAllocatedIPs(typ string, allocated int)\n\tSetAvailableENIs(available int)\n\tSetNodesAtCapacity(nodes int)\n\tIncResyncCount()\n}\n\n\/\/ nodeMap is a mapping of node names to ENI nodes\ntype nodeMap map[string]*Node\n\n\/\/ NodeManager manages all nodes with ENIs\ntype NodeManager struct {\n\tmutex lock.RWMutex\n\tnodes nodeMap\n\tinstancesAPI nodeManagerAPI\n\tec2API ec2API\n\tk8sAPI k8sAPI\n\tmetricsAPI metricsAPI\n\tresyncTrigger *trigger.Trigger\n\tdeficitResolver *trigger.Trigger\n}\n\n\/\/ NewNodeManager returns a new NodeManager\nfunc NewNodeManager(instancesAPI nodeManagerAPI, ec2API ec2API, k8sAPI k8sAPI, metrics metricsAPI) (*NodeManager, error) {\n\tmngr := &NodeManager{\n\t\tnodes: nodeMap{},\n\t\tinstancesAPI: instancesAPI,\n\t\tec2API: ec2API,\n\t\tk8sAPI: k8sAPI,\n\t\tmetricsAPI: metrics,\n\t}\n\n\tdeficitResolver, err := trigger.NewTrigger(trigger.Parameters{\n\t\tName: \"eni-node-manager-deficit-resolver\",\n\t\tMinInterval: time.Second,\n\t\tTriggerFunc: func(reasons []string) {\n\t\t\tfor _, name := range reasons {\n\t\t\t\tif node := mngr.Get(name); node != nil {\n\t\t\t\t\tif err := node.ResolveIPDeficit(); err != nil {\n\t\t\t\t\t\tnode.logger().WithError(err).Warning(\"Unable to resolve IP deficit of node\")\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tlog.WithField(fieldName, name).Warning(\"Node has disappeared while allocation request was queued\")\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to initialize deficit resolver trigger: %s\", err)\n\t}\n\n\tresyncTrigger, err := trigger.NewTrigger(trigger.Parameters{\n\t\tName: \"eni-node-manager-resync\",\n\t\tMinInterval: time.Second,\n\t\tTriggerFunc: func(reasons []string) {\n\t\t\tinstancesAPI.Resync()\n\t\t\tmngr.Resync()\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to initialize resync trigger: %s\", err)\n\t}\n\n\tmngr.resyncTrigger = resyncTrigger\n\tmngr.deficitResolver = deficitResolver\n\n\treturn mngr, nil\n}\n\n\/\/ GetNames returns the list of all node names\nfunc (n *NodeManager) GetNames() (allNodeNames []string) {\n\tn.mutex.RLock()\n\tdefer n.mutex.RUnlock()\n\n\tallNodeNames = make([]string, 0, len(n.nodes))\n\n\tfor name := range n.nodes {\n\t\tallNodeNames = append(allNodeNames, name)\n\t}\n\n\treturn\n}\n\n\/\/ Update is called whenever a CiliumNode resource has been updated in the\n\/\/ Kubernetes apiserver\nfunc (n *NodeManager) Update(resource *v2.CiliumNode) bool {\n\tn.mutex.Lock()\n\tnode, ok := n.nodes[resource.Name]\n\tif !ok {\n\t\tnode = &Node{\n\t\t\tname: resource.Name,\n\t\t\tmanager: n,\n\t\t}\n\t\tn.nodes[node.name] = node\n\n\t\tlog.WithField(fieldName, resource.Name).Info(\"Discovered new CiliumNode custom resource\")\n\t}\n\tn.mutex.Unlock()\n\n\treturn node.updatedResource(resource)\n}\n\n\/\/ Delete is called after a CiliumNode resource has been deleted via the\n\/\/ Kubernetes apiserver\nfunc (n *NodeManager) Delete(nodeName string) {\n\tn.mutex.Lock()\n\tdelete(n.nodes, nodeName)\n\tn.mutex.Unlock()\n}\n\n\/\/ Get returns the node with the given name\nfunc (n *NodeManager) Get(nodeName string) *Node {\n\tn.mutex.RLock()\n\tnode := n.nodes[nodeName]\n\tn.mutex.RUnlock()\n\treturn node\n}\n\n\/\/ GetNodesByNeededAddresses returns all nodes that require addresses to be\n\/\/ allocated, sorted by the number of addresses needed in descending order\nfunc (n *NodeManager) GetNodesByNeededAddresses() []*Node {\n\tn.mutex.RLock()\n\tdefer n.mutex.RUnlock()\n\n\tlist := make([]*Node, len(n.nodes))\n\tindex := 0\n\tfor _, node := range n.nodes {\n\t\tlist[index] = node\n\t\tindex++\n\t}\n\n\tsort.Slice(list, func(i, j int) bool {\n\t\treturn list[i].getNeededAddresses() > list[j].getNeededAddresses()\n\t})\n\n\treturn list\n}\n\n\/\/ Resync will attend all nodes and resolves IP deficits. The order of\n\/\/ attendance is defined by the number of IPs needed to reach the configured\n\/\/ watermarks. Any updates to the node resource are synchronized to the\n\/\/ Kubernetes apiserver.\nfunc (n *NodeManager) Resync() {\n\tvar totalUsed, totalAvailable, totalNeeded, remainingInterfaces, nodesAtCapacity int\n\n\tfor _, node := range n.GetNodesByNeededAddresses() {\n\t\tnode.mutex.Lock()\n\t\t\/\/ Resync() is always called after resync of the instance data,\n\t\t\/\/ mark node as resynced\n\t\tnode.resyncNeeded = false\n\t\tallocationNeeded := node.recalculateLocked()\n\t\tnode.loggerLocked().WithFields(logrus.Fields{\n\t\t\tfieldName: node.name,\n\t\t\t\"available\": node.stats.availableIPs,\n\t\t\t\"used\": node.stats.usedIPs,\n\t\t}).Debug(\"Recalculated allocation requirements\")\n\t\ttotalUsed += node.stats.usedIPs\n\t\ttotalAvailable += node.stats.availableIPs - node.stats.usedIPs\n\t\ttotalNeeded += node.stats.neededIPs\n\t\tremainingInterfaces += node.stats.remainingInterfaces\n\n\t\tif remainingInterfaces == 0 && totalAvailable == 0 {\n\t\t\tnodesAtCapacity++\n\t\t}\n\t\tif allocationNeeded && node.stats.remainingInterfaces > 0 {\n\t\t\tn.deficitResolver.TriggerWithReason(node.name)\n\t\t}\n\t\tnode.mutex.Unlock()\n\n\t\tnode.SyncToAPIServer()\n\t}\n\n\tn.metricsAPI.SetAllocatedIPs(\"used\", totalUsed)\n\tn.metricsAPI.SetAllocatedIPs(\"available\", totalAvailable)\n\tn.metricsAPI.SetAllocatedIPs(\"needed\", totalNeeded)\n\tn.metricsAPI.SetAvailableENIs(remainingInterfaces)\n\tn.metricsAPI.SetNodesAtCapacity(nodesAtCapacity)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Berglas Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage berglas\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\tuuid \"github.com\/satori\/go.uuid\"\n)\n\nfunc TestGsecretsIntegration(t *testing.T) {\n\tt.Parallel()\n\n\tif testing.Short() {\n\t\tt.Skip(\"skipping integration test (short)\")\n\t}\n\n\tctx := context.Background()\n\n\tbucket := os.Getenv(\"GOOGLE_CLOUD_BUCKET\")\n\tif bucket == \"\" {\n\t\tt.Fatal(\"missing GOOGLE_CLOUD_BUCKET\")\n\t}\n\n\tkey := os.Getenv(\"GOOGLE_CLOUD_KMS_KEY\")\n\tif key == \"\" {\n\t\tt.Fatal(\"missing GOOGLE_CLOUD_KMS_KEY\")\n\t}\n\n\tsa := os.Getenv(\"GOOGLE_CLOUD_SERVICE_ACCOUNT\")\n\tif sa == \"\" {\n\t\tt.Fatal(\"missing GOOGLE_CLOUD_SERVICE_ACCOUNT\")\n\t}\n\tsa = fmt.Sprintf(\"serviceAccount:%s\", sa)\n\n\tobject, object2 := testUUID(t), testUUID(t)\n\tif len(object) < 3 || len(object2) < 3 {\n\t\tt.Fatal(\"bad uuid created\")\n\t}\n\t\/\/ ensure non-matching prefix\n\tfor i := 0; i < 10 && object[:3] == object2[:3]; i++ {\n\t\tobject2 = testUUID(t)\n\t}\n\tif object[:3] == object2[:3] {\n\t\tt.Fatal(\"unable to generate non-prefix matching uuids\")\n\t}\n\n\tc, err := New(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\toriginal := []byte(\"original text\")\n\tvar secret *Secret\n\n\tif secret, err = c.Create(ctx, &CreateRequest{\n\t\tBucket: bucket,\n\t\tObject: object,\n\t\tKey: key,\n\t\tPlaintext: original,\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, err = c.Create(ctx, &CreateRequest{\n\t\tBucket: bucket,\n\t\tObject: object2,\n\t\tKey: key,\n\t\tPlaintext: original,\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tsecrets, err := c.List(ctx, &ListRequest{\n\t\tBucket: bucket,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !testStringInclude(secrets.Secrets, object, secret.Generation) {\n\t\tt.Errorf(\"expected %#v to include %q\", secrets, object)\n\t}\n\tif !testStringInclude(secrets.Secrets, object2, 0) {\n\t\tt.Errorf(\"expected %#v to include %q\", secrets, object2)\n\t}\n\n\tsecrets, err = c.List(ctx, &ListRequest{\n\t\tBucket: bucket,\n\t\tPrefix: object[:3],\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !testStringInclude(secrets.Secrets, object, secret.Generation) {\n\t\tt.Errorf(\"expected %#v to include %q\", secrets, object)\n\t}\n\tif testStringInclude(secrets.Secrets, object2, secret.Generation) {\n\t\tt.Errorf(\"expected %#v to not include %q\", secrets, object)\n\t}\n\n\tupdated := []byte(\"updated text\")\n\n\tvar updatedSecret *Secret\n\tif updatedSecret, err = c.Create(ctx, &CreateRequest{\n\t\tBucket: bucket,\n\t\tObject: object,\n\t\tKey: key,\n\t\tPlaintext: updated,\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tsecrets, err = c.List(ctx, &ListRequest{\n\t\tBucket: bucket,\n\t\tPrefix: object,\n\t\tGenerations: true,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !testStringInclude(secrets.Secrets, object, updatedSecret.Generation) {\n\t\tt.Errorf(\"expected %#v to include %q\", secrets, object)\n\t}\n\tif !testStringInclude(secrets.Secrets, object, secret.Generation) {\n\t\tt.Errorf(\"expected %#v to include %q\", secrets, object)\n\t}\n\n\tplaintext, err := c.Access(ctx, &AccessRequest{\n\t\tBucket: bucket,\n\t\tObject: object,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !bytes.Equal(plaintext, updated) {\n\t\tt.Errorf(\"expected %q to be %q\", plaintext, updated)\n\t}\n\n\tplaintext, err = c.Access(ctx, &AccessRequest{\n\t\tBucket: bucket,\n\t\tObject: object,\n\t\tGeneration: secret.Generation,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !bytes.Equal(plaintext, original) {\n\t\tt.Errorf(\"expected %q to be %q\", plaintext, original)\n\t}\n\n\tif err := c.Grant(ctx, &GrantRequest{\n\t\tBucket: bucket,\n\t\tObject: object,\n\t\tMembers: []string{sa},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := c.Revoke(ctx, &RevokeRequest{\n\t\tBucket: bucket,\n\t\tObject: object,\n\t\tMembers: []string{sa},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := c.Delete(ctx, &DeleteRequest{\n\t\tBucket: bucket,\n\t\tObject: object,\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc testStringInclude(l []*Secret, n string, g int64) bool {\n\tfor _, v := range l {\n\t\tif n == v.Name && (g == 0 || g == v.Generation) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc testUUID(tb testing.TB) string {\n\ttb.Helper()\n\n\tu, err := uuid.NewV4()\n\tif err != nil {\n\t\ttb.Fatal(err)\n\t}\n\treturn u.String()\n}\n\nfunc TestKMSKeyTrimVersion(t *testing.T) {\n\tt.Parallel()\n\n\tcases := []struct {\n\t\tname string\n\t\ti string\n\t\to string\n\t}{\n\t\t{\n\t\t\t\"malformed\",\n\t\t\t\"foo\",\n\t\t\t\"foo\",\n\t\t},\n\t\t{\n\t\t\t\"no_version\",\n\t\t\t\"projects\/p\/locations\/l\/keyRings\/kr\/cryptoKeys\/ck\",\n\t\t\t\"projects\/p\/locations\/l\/keyRings\/kr\/cryptoKeys\/ck\",\n\t\t},\n\t\t{\n\t\t\t\"version\",\n\t\t\t\"projects\/p\/locations\/l\/keyRings\/kr\/cryptoKeys\/ck\/cryptoKeyVersions\/1\",\n\t\t\t\"projects\/p\/locations\/l\/keyRings\/kr\/cryptoKeys\/ck\",\n\t\t},\n\t}\n\n\tfor _, tc := range cases {\n\t\ttc := tc\n\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tif act, exp := kmsKeyTrimVersion(tc.i), tc.o; act != exp {\n\t\t\t\tt.Errorf(\"expected %q to be %q\", act, exp)\n\t\t\t}\n\t\t})\n\t}\n}\n<commit_msg>Delete object2 too<commit_after>\/\/ Copyright 2019 The Berglas Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage berglas\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\tuuid \"github.com\/satori\/go.uuid\"\n)\n\nfunc TestGsecretsIntegration(t *testing.T) {\n\tt.Parallel()\n\n\tif testing.Short() {\n\t\tt.Skip(\"skipping integration test (short)\")\n\t}\n\n\tctx := context.Background()\n\n\tbucket := os.Getenv(\"GOOGLE_CLOUD_BUCKET\")\n\tif bucket == \"\" {\n\t\tt.Fatal(\"missing GOOGLE_CLOUD_BUCKET\")\n\t}\n\n\tkey := os.Getenv(\"GOOGLE_CLOUD_KMS_KEY\")\n\tif key == \"\" {\n\t\tt.Fatal(\"missing GOOGLE_CLOUD_KMS_KEY\")\n\t}\n\n\tsa := os.Getenv(\"GOOGLE_CLOUD_SERVICE_ACCOUNT\")\n\tif sa == \"\" {\n\t\tt.Fatal(\"missing GOOGLE_CLOUD_SERVICE_ACCOUNT\")\n\t}\n\tsa = fmt.Sprintf(\"serviceAccount:%s\", sa)\n\n\tobject, object2 := testUUID(t), testUUID(t)\n\tif len(object) < 3 || len(object2) < 3 {\n\t\tt.Fatal(\"bad uuid created\")\n\t}\n\t\/\/ ensure non-matching prefix\n\tfor i := 0; i < 10 && object[:3] == object2[:3]; i++ {\n\t\tobject2 = testUUID(t)\n\t}\n\tif object[:3] == object2[:3] {\n\t\tt.Fatal(\"unable to generate non-prefix matching uuids\")\n\t}\n\n\tc, err := New(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\toriginal := []byte(\"original text\")\n\tvar secret *Secret\n\n\tif secret, err = c.Create(ctx, &CreateRequest{\n\t\tBucket: bucket,\n\t\tObject: object,\n\t\tKey: key,\n\t\tPlaintext: original,\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, err = c.Create(ctx, &CreateRequest{\n\t\tBucket: bucket,\n\t\tObject: object2,\n\t\tKey: key,\n\t\tPlaintext: original,\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tsecrets, err := c.List(ctx, &ListRequest{\n\t\tBucket: bucket,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !testStringInclude(secrets.Secrets, object, secret.Generation) {\n\t\tt.Errorf(\"expected %#v to include %q\", secrets, object)\n\t}\n\tif !testStringInclude(secrets.Secrets, object2, 0) {\n\t\tt.Errorf(\"expected %#v to include %q\", secrets, object2)\n\t}\n\n\tsecrets, err = c.List(ctx, &ListRequest{\n\t\tBucket: bucket,\n\t\tPrefix: object[:3],\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !testStringInclude(secrets.Secrets, object, secret.Generation) {\n\t\tt.Errorf(\"expected %#v to include %q\", secrets, object)\n\t}\n\tif testStringInclude(secrets.Secrets, object2, secret.Generation) {\n\t\tt.Errorf(\"expected %#v to not include %q\", secrets, object)\n\t}\n\n\tupdated := []byte(\"updated text\")\n\n\tvar updatedSecret *Secret\n\tif updatedSecret, err = c.Create(ctx, &CreateRequest{\n\t\tBucket: bucket,\n\t\tObject: object,\n\t\tKey: key,\n\t\tPlaintext: updated,\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tsecrets, err = c.List(ctx, &ListRequest{\n\t\tBucket: bucket,\n\t\tPrefix: object,\n\t\tGenerations: true,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !testStringInclude(secrets.Secrets, object, updatedSecret.Generation) {\n\t\tt.Errorf(\"expected %#v to include %q\", secrets, object)\n\t}\n\tif !testStringInclude(secrets.Secrets, object, secret.Generation) {\n\t\tt.Errorf(\"expected %#v to include %q\", secrets, object)\n\t}\n\n\tplaintext, err := c.Access(ctx, &AccessRequest{\n\t\tBucket: bucket,\n\t\tObject: object,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !bytes.Equal(plaintext, updated) {\n\t\tt.Errorf(\"expected %q to be %q\", plaintext, updated)\n\t}\n\n\tplaintext, err = c.Access(ctx, &AccessRequest{\n\t\tBucket: bucket,\n\t\tObject: object,\n\t\tGeneration: secret.Generation,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !bytes.Equal(plaintext, original) {\n\t\tt.Errorf(\"expected %q to be %q\", plaintext, original)\n\t}\n\n\tif err := c.Grant(ctx, &GrantRequest{\n\t\tBucket: bucket,\n\t\tObject: object,\n\t\tMembers: []string{sa},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := c.Revoke(ctx, &RevokeRequest{\n\t\tBucket: bucket,\n\t\tObject: object,\n\t\tMembers: []string{sa},\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := c.Delete(ctx, &DeleteRequest{\n\t\tBucket: bucket,\n\t\tObject: object,\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := c.Delete(ctx, &DeleteRequest{\n\t\tBucket: bucket,\n\t\tObject: object2,\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc testStringInclude(l []*Secret, n string, g int64) bool {\n\tfor _, v := range l {\n\t\tif n == v.Name && (g == 0 || g == v.Generation) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc testUUID(tb testing.TB) string {\n\ttb.Helper()\n\n\tu, err := uuid.NewV4()\n\tif err != nil {\n\t\ttb.Fatal(err)\n\t}\n\treturn u.String()\n}\n\nfunc TestKMSKeyTrimVersion(t *testing.T) {\n\tt.Parallel()\n\n\tcases := []struct {\n\t\tname string\n\t\ti string\n\t\to string\n\t}{\n\t\t{\n\t\t\t\"malformed\",\n\t\t\t\"foo\",\n\t\t\t\"foo\",\n\t\t},\n\t\t{\n\t\t\t\"no_version\",\n\t\t\t\"projects\/p\/locations\/l\/keyRings\/kr\/cryptoKeys\/ck\",\n\t\t\t\"projects\/p\/locations\/l\/keyRings\/kr\/cryptoKeys\/ck\",\n\t\t},\n\t\t{\n\t\t\t\"version\",\n\t\t\t\"projects\/p\/locations\/l\/keyRings\/kr\/cryptoKeys\/ck\/cryptoKeyVersions\/1\",\n\t\t\t\"projects\/p\/locations\/l\/keyRings\/kr\/cryptoKeys\/ck\",\n\t\t},\n\t}\n\n\tfor _, tc := range cases {\n\t\ttc := tc\n\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tif act, exp := kmsKeyTrimVersion(tc.i), tc.o; act != exp {\n\t\t\t\tt.Errorf(\"expected %q to be %q\", act, exp)\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017-2018 the u-root Authors. All rights reserved\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package netboot provides a one-stop shop for netboot parsing needs.\n\/\/\n\/\/ netboot can take a URL from a DHCP lease and try to detect iPXE scripts and\n\/\/ PXE scripts.\n\/\/\n\/\/ TODO: detect multiboot and Linux kernels without configuration (URL points\n\/\/ to a single kernel file).\n\/\/\n\/\/ TODO: detect iSCSI root paths.\npackage netboot\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/url\"\n\t\"path\"\n\n\t\"github.com\/u-root\/u-root\/pkg\/boot\"\n\t\"github.com\/u-root\/u-root\/pkg\/boot\/netboot\/ipxe\"\n\t\"github.com\/u-root\/u-root\/pkg\/boot\/netboot\/pxe\"\n\t\"github.com\/u-root\/u-root\/pkg\/curl\"\n\t\"github.com\/u-root\/u-root\/pkg\/dhclient\"\n)\n\n\/\/ BootImage figures out the image to boot from the given DHCP lease.\n\/\/\n\/\/ Tries, in order:\n\/\/\n\/\/ - to detect an iPXE script beginning with #!ipxe,\n\/\/\n\/\/ - to detect a pxelinux.0, in which case we will ignore the pxelinux and try\n\/\/ to parse pxelinux.cfg\/<files>.\n\/\/\n\/\/ TODO: detect straight up multiboot and bzImage Linux kernel files rather\n\/\/ than just configuration scripts.\nfunc BootImage(s curl.Schemes, lease dhclient.Lease) (*boot.LinuxImage, error) {\n\turi, err := lease.Boot()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Printf(\"Boot URI: %s\", uri)\n\n\t\/\/ IP only makes sense for v4 anyway, because the PXE probing of files\n\t\/\/ uses a MAC address and an IPv4 address to look at files.\n\tvar ip net.IP\n\tif p4, ok := lease.(*dhclient.Packet4); ok {\n\t\tip = p4.Lease().IP\n\t}\n\treturn getBootImage(s, uri, lease.Link().Attrs().HardwareAddr, ip)\n}\n\n\/\/ getBootImage attempts to parse the file at uri as an ipxe config and returns\n\/\/ the ipxe boot image. Otherwise falls back to pxe and uses the uri directory,\n\/\/ ip, and mac address to search for pxe configs.\nfunc getBootImage(schemes curl.Schemes, uri *url.URL, mac net.HardwareAddr, ip net.IP) (*boot.LinuxImage, error) {\n\t\/\/ Attempt to read the given boot path as an ipxe config file.\n\tipc, err := ipxe.ParseConfigWithSchemes(uri, schemes)\n\tif err == nil {\n\t\treturn ipc, nil\n\t}\n\tlog.Printf(\"Falling back to pxe boot: %v\", err)\n\n\t\/\/ Fallback to pxe boot.\n\twd := &url.URL{\n\t\tScheme: uri.Scheme,\n\t\tHost: uri.Host,\n\t\tPath: path.Dir(uri.Path),\n\t}\n\tpc, err := pxe.ParseConfigWithSchemes(wd, mac, ip, schemes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse pxelinux config: %v\", err)\n\t}\n\n\tlabel := pc.Entries[pc.DefaultEntry]\n\treturn label, nil\n}\n<commit_msg>pkg\/boot\/netboot: Handle the error when a boot target is not found.<commit_after>\/\/ Copyright 2017-2018 the u-root Authors. All rights reserved\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package netboot provides a one-stop shop for netboot parsing needs.\n\/\/\n\/\/ netboot can take a URL from a DHCP lease and try to detect iPXE scripts and\n\/\/ PXE scripts.\n\/\/\n\/\/ TODO: detect multiboot and Linux kernels without configuration (URL points\n\/\/ to a single kernel file).\n\/\/\n\/\/ TODO: detect iSCSI root paths.\npackage netboot\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/url\"\n\t\"path\"\n\n\t\"github.com\/u-root\/u-root\/pkg\/boot\"\n\t\"github.com\/u-root\/u-root\/pkg\/boot\/netboot\/ipxe\"\n\t\"github.com\/u-root\/u-root\/pkg\/boot\/netboot\/pxe\"\n\t\"github.com\/u-root\/u-root\/pkg\/curl\"\n\t\"github.com\/u-root\/u-root\/pkg\/dhclient\"\n)\n\n\/\/ BootImage figures out the image to boot from the given DHCP lease.\n\/\/\n\/\/ Tries, in order:\n\/\/\n\/\/ - to detect an iPXE script beginning with #!ipxe,\n\/\/\n\/\/ - to detect a pxelinux.0, in which case we will ignore the pxelinux and try\n\/\/ to parse pxelinux.cfg\/<files>.\n\/\/\n\/\/ TODO: detect straight up multiboot and bzImage Linux kernel files rather\n\/\/ than just configuration scripts.\nfunc BootImage(s curl.Schemes, lease dhclient.Lease) (*boot.LinuxImage, error) {\n\turi, err := lease.Boot()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlog.Printf(\"Boot URI: %s\", uri)\n\n\t\/\/ IP only makes sense for v4 anyway, because the PXE probing of files\n\t\/\/ uses a MAC address and an IPv4 address to look at files.\n\tvar ip net.IP\n\tif p4, ok := lease.(*dhclient.Packet4); ok {\n\t\tip = p4.Lease().IP\n\t}\n\treturn getBootImage(s, uri, lease.Link().Attrs().HardwareAddr, ip)\n}\n\n\/\/ getBootImage attempts to parse the file at uri as an ipxe config and returns\n\/\/ the ipxe boot image. Otherwise falls back to pxe and uses the uri directory,\n\/\/ ip, and mac address to search for pxe configs.\nfunc getBootImage(schemes curl.Schemes, uri *url.URL, mac net.HardwareAddr, ip net.IP) (*boot.LinuxImage, error) {\n\t\/\/ Attempt to read the given boot path as an ipxe config file.\n\tipc, err := ipxe.ParseConfigWithSchemes(uri, schemes)\n\tif err == nil {\n\t\treturn ipc, nil\n\t}\n\tlog.Printf(\"Falling back to pxe boot: %v\", err)\n\n\t\/\/ Fallback to pxe boot.\n\twd := &url.URL{\n\t\tScheme: uri.Scheme,\n\t\tHost: uri.Host,\n\t\tPath: path.Dir(uri.Path),\n\t}\n\tpc, err := pxe.ParseConfigWithSchemes(wd, mac, ip, schemes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse pxelinux config: %v\", err)\n\t}\n\n\tlabel, ok := pc.Entries[pc.DefaultEntry]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Could not find %q from entries %v\", pc.DefaultEntry, pc.Entries)\n\t}\n\treturn label, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package ocb2 implements the version 2 of the OCB authenticated-encryption algorithm.\r\n\/\/ OCB2 is specified in http:\/\/www.cs.ucdavis.edu\/~rogaway\/papers\/draft-krovetz-ocb-00.txt.\r\n\/\/\r\n\/\/ It should be noted that OCB's author, Phil Rogaway <rogaway@cs.ucdavis.edu>, holds\r\n\/\/ several US patents on the algorithm. This should be considered before using this code\r\n\/\/ in your own projects. See OCB's FAQ for more info:\r\n\/\/ http:\/\/www.cs.ucdavis.edu\/~rogaway\/ocb\/ocb-faq.htm#patent:phil\r\n\/\/\r\n\/\/ The Mumble Project has a license to use OCB mode in its BSD licensed code on a royalty\r\n\/\/ free basis.\r\npackage ocb2\r\n\r\nimport (\r\n\t\"crypto\/aes\"\r\n\t\"crypto\/cipher\"\r\n)\r\n\r\nconst (\r\n\t\/\/ TagSize specifies the length in bytes of a full OCB2 tag.\r\n\t\/\/ As per the specification, applications may truncate their\r\n\t\/\/ tags to a given length, but advocates that typical applications\r\n\t\/\/ should use a tag length of at least 8 bytes (64 bits).\r\n\tTagSize = aes.BlockSize\r\n\t\/\/ NonceSize specifies the length in bytes of an OCB2 nonce.\r\n\tNonceSize = aes.BlockSize\r\n)\r\n\r\n\/\/ zeros fills block with zero bytes.\r\nfunc zeros(block []byte) {\r\n\tfor i := range block {\r\n\t\tblock[i] = 0\r\n\t}\r\n}\r\n\r\n\/\/ xor outputs the bitwise exclusive-or of a and b to dst.\r\nfunc xor(dst []byte, a []byte, b []byte) {\r\n\tfor i := 0; i < aes.BlockSize; i++ {\r\n\t\tdst[i] = a[i] ^ b[i]\r\n\t}\r\n}\r\n\r\n\/\/ times2 performs the times2 operation, defined as:\r\n\/\/\r\n\/\/ times2(S)\r\n\/\/ S << 1 if S[1] = 0, and (S << 1) xor const(bitlength(S)) if S[1] = 1.\r\n\/\/\r\n\/\/ where const(n) is defined as\r\n\/\/\r\n\/\/ const(n)\r\n\/\/ The lexicographically first n-bit string C among all\r\n\/\/ strings that have a minimal possible number of \"1\"\r\n\/\/ bits and which name a polynomial x^n + C[1] *\r\n\/\/ x^{n-1} + ... + C[n-1] * x^1 + C[n] * x^0 that is\r\n\/\/ irreducible over the field with two elements. In\r\n\/\/ particular, const(128) = num2str(135, 128). For\r\n\/\/ other values of n, refer to a standard table of\r\n\/\/ irreducible polynomials [G. Seroussi,\r\n\/\/ \"Table of low-weight binary irreducible polynomials\",\r\n\/\/ HP Labs Technical Report HPL-98-135, 1998.].\r\n\/\/\r\n\/\/ and num2str(x, n) is defined as\r\n\/\/\r\n\/\/ num2str(x, n)\r\n\/\/ The n-bit binary representation of the integer x.\r\n\/\/ More formally, the n-bit string S where x = S[1] *\r\n\/\/ 2^{n-1} + S[2] * 2^{n-2} + ... + S[n] * 2^{0}. Only\r\n\/\/ used when 0 <= x < 2^n.\r\n\/\/\r\n\/\/ For our 128-bit block size implementation, this means that\r\n\/\/ the xor with const(bitlength(S)) if S[1] = 1 is implemented\r\n\/\/ by simply xor'ing the last byte with the number 135 when\r\n\/\/ S[1] = 1.\r\nfunc times2(block []byte) {\r\n\tcarry := (block[0] >> 7) & 0x1\r\n\tfor i := 0; i < aes.BlockSize-1; i++ {\r\n\t\tblock[i] = (block[i] << 1) | ((block[i+1] >> 7) & 0x1)\r\n\t}\r\n\tblock[aes.BlockSize-1] = (block[aes.BlockSize-1] << 1) ^ (carry * 135)\r\n}\r\n\r\n\/\/ times3 performs the times3 operation, defined as:\r\n\/\/\r\n\/\/ times3(S)\r\n\/\/ times2(S) xor S\r\nfunc times3(block []byte) {\r\n\tcarry := (block[0] >> 7) & 0x1\r\n\tfor i := 0; i < aes.BlockSize-1; i++ {\r\n\t\tblock[i] ^= (block[i] << 1) | ((block[i+1] >> 7) & 0x1)\r\n\t}\r\n\tblock[aes.BlockSize-1] ^= ((block[aes.BlockSize-1] << 1) ^ (carry * 135))\r\n}\r\n\r\n\/\/ Encrypt encrypts the plaintext src and outputs the corresponding ciphertext into dst.\r\n\/\/ Besides outputting a ciphertext into dst, Encrypt also outputs an authentication tag\r\n\/\/ of ocb2.TagSize bytes into tag, which should be used to verify the authenticity of the\r\n\/\/ message on the receiving side.\r\n\/\/\r\n\/\/ To ensure both authenticity and secrecy of messages, each invocation to this function must\r\n\/\/ be given an unique nonce of ocb2.NonceSize bytes. The nonce need not be secret (it can be\r\n\/\/ a counter), but it needs to be unique.\r\nfunc Encrypt(cipher cipher.Block, dst []byte, src []byte, nonce []byte, tag []byte) {\r\n\tvar delta [aes.BlockSize]byte\r\n\tvar checksum [aes.BlockSize]byte\r\n\tvar tmp [aes.BlockSize]byte\r\n\tvar pad [aes.BlockSize]byte\r\n\toff := 0\r\n\r\n\tcipher.Encrypt(delta[0:], nonce[0:])\r\n\tzeros(checksum[0:])\r\n\r\n\tremain := len(src)\r\n\tfor remain > aes.BlockSize {\r\n\t\ttimes2(delta[0:])\r\n\t\txor(tmp[0:], delta[0:], src[off:off+aes.BlockSize])\r\n\t\tcipher.Encrypt(tmp[0:], tmp[0:])\r\n\t\txor(dst[off:off+aes.BlockSize], delta[0:], tmp[0:])\r\n\t\txor(checksum[0:], checksum[0:], src[off:off+aes.BlockSize])\r\n\t\tremain -= aes.BlockSize\r\n\t\toff += aes.BlockSize\r\n\t}\r\n\r\n\ttimes2(delta[0:])\r\n\tzeros(tmp[0:])\r\n\tnum := remain * 8\r\n\ttmp[aes.BlockSize-2] = uint8((uint32(num) >> 8) & 0xff)\r\n\ttmp[aes.BlockSize-1] = uint8(num & 0xff)\r\n\txor(tmp[0:], tmp[0:], delta[0:])\r\n\tcipher.Encrypt(pad[0:], tmp[0:])\r\n\tcopied := copy(tmp[0:], src[off:])\r\n\tif copied != remain {\r\n\t\tpanic(\"ocb2: copy failed\")\r\n\t}\r\n\tif copy(tmp[copied:], pad[copied:]) != (aes.BlockSize - remain) {\r\n\t\tpanic(\"ocb2: copy failed\")\r\n\t}\r\n\txor(checksum[0:], checksum[0:], tmp[0:])\r\n\txor(tmp[0:], pad[0:], tmp[0:])\r\n\tif copy(dst[off:], tmp[0:]) != remain {\r\n\t\tpanic(\"ocb2: copy failed\")\r\n\t}\r\n\r\n\ttimes3(delta[0:])\r\n\txor(tmp[0:], delta[0:], checksum[0:])\r\n\tcipher.Encrypt(tag[0:], tmp[0:])\r\n}\r\n\r\n\/\/ Decrypt takes a ciphertext and a nonce as its input and outputs a decrypted plaintext\r\n\/\/ and corresponding authentication tag.\r\n\/\/\r\n\/\/ Before using the decrpyted plaintext, the application\r\n\/\/ should verify that the computed authentication tag matches the tag that was produced when\r\n\/\/ encrypting the message (taking into consideration that OCB tags are allowed to be truncated\r\n\/\/ to a length less than ocb.TagSize).\r\nfunc Decrypt(cipher cipher.Block, plain []byte, encrypted []byte, nonce []byte, tag []byte) {\r\n\tvar checksum [aes.BlockSize]byte\r\n\tvar delta [aes.BlockSize]byte\r\n\tvar tmp [aes.BlockSize]byte\r\n\tvar pad [aes.BlockSize]byte\r\n\toff := 0\r\n\r\n\tcipher.Encrypt(delta[0:], nonce[0:])\r\n\tzeros(checksum[0:])\r\n\r\n\tremain := len(encrypted)\r\n\tfor remain > aes.BlockSize {\r\n\t\ttimes2(delta[0:])\r\n\t\txor(tmp[0:], delta[0:], encrypted[off:off+aes.BlockSize])\r\n\t\tcipher.Decrypt(tmp[0:], tmp[0:])\r\n\t\txor(plain[off:off+aes.BlockSize], delta[0:], tmp[0:])\r\n\t\txor(checksum[0:], checksum[0:], plain[off:off+aes.BlockSize])\r\n\t\toff += aes.BlockSize\r\n\t\tremain -= aes.BlockSize\r\n\t}\r\n\r\n\ttimes2(delta[0:])\r\n\tzeros(tmp[0:])\r\n\tnum := remain * 8\r\n\ttmp[aes.BlockSize-2] = uint8((uint32(num) >> 8) & 0xff)\r\n\ttmp[aes.BlockSize-1] = uint8(num & 0xff)\r\n\txor(tmp[0:], tmp[0:], delta[0:])\r\n\tcipher.Encrypt(pad[0:], tmp[0:])\r\n\tzeros(tmp[0:])\r\n\tcopied := copy(tmp[0:remain], encrypted[off:off+remain])\r\n\tif copied != remain {\r\n\t\tpanic(\"ocb2: copy failed\")\r\n\t}\r\n\txor(tmp[0:], tmp[0:], pad[0:])\r\n\txor(checksum[0:], checksum[0:], tmp[0:])\r\n\tcopied = copy(plain[off:off+remain], tmp[0:remain])\r\n\tif copied != remain {\r\n\t\tpanic(\"ocb2: copy failed\")\r\n\t}\r\n\r\n\ttimes3(delta[0:])\r\n\txor(tmp[0:], delta[0:], checksum[0:])\r\n\tcipher.Encrypt(tag[0:], tmp[0:])\r\n}<commit_msg>pkg\/cryptstate\/ocb2: stricter cipher\/tag\/nonce checking.<commit_after>\/\/ Package ocb2 implements the version 2 of the OCB authenticated-encryption algorithm.\r\n\/\/ OCB2 is specified in http:\/\/www.cs.ucdavis.edu\/~rogaway\/papers\/draft-krovetz-ocb-00.txt.\r\n\/\/\r\n\/\/ Note that this implementation is limited to block ciphers with a block size of 128 bits.\r\n\/\/\r\n\/\/ It should also be noted that OCB's author, Phil Rogaway <rogaway@cs.ucdavis.edu>, holds\r\n\/\/ several US patents on the algorithm. This should be considered before using this code\r\n\/\/ in your own projects. See OCB's FAQ for more info:\r\n\/\/ http:\/\/www.cs.ucdavis.edu\/~rogaway\/ocb\/ocb-faq.htm#patent:phil\r\n\/\/\r\n\/\/ The Mumble Project has a license to use OCB mode in its BSD licensed code on a royalty\r\n\/\/ free basis.\r\npackage ocb2\r\n\r\nimport \"crypto\/cipher\"\r\n\r\nconst (\r\n\t\/\/ BlockSize defines the block size that this particular implementation\r\n\t\/\/ of OCB2 is made to work on.\r\n\tBlockSize = 16\r\n\t\/\/ TagSize specifies the length in bytes of a full OCB2 tag.\r\n\t\/\/ As per the specification, applications may truncate their\r\n\t\/\/ tags to a given length, but advocates that typical applications\r\n\t\/\/ should use a tag length of at least 8 bytes (64 bits).\r\n\tTagSize = BlockSize\r\n\t\/\/ NonceSize specifies the length in bytes of an OCB2 nonce.\r\n\tNonceSize = BlockSize\r\n)\r\n\r\n\/\/ zeros fills block with zero bytes.\r\nfunc zeros(block []byte) {\r\n\tfor i := range block {\r\n\t\tblock[i] = 0\r\n\t}\r\n}\r\n\r\n\/\/ xor outputs the bitwise exclusive-or of a and b to dst.\r\nfunc xor(dst []byte, a []byte, b []byte) {\r\n\tfor i := 0; i < BlockSize; i++ {\r\n\t\tdst[i] = a[i] ^ b[i]\r\n\t}\r\n}\r\n\r\n\/\/ times2 performs the times2 operation, defined as:\r\n\/\/\r\n\/\/ times2(S)\r\n\/\/ S << 1 if S[1] = 0, and (S << 1) xor const(bitlength(S)) if S[1] = 1.\r\n\/\/\r\n\/\/ where const(n) is defined as\r\n\/\/\r\n\/\/ const(n)\r\n\/\/ The lexicographically first n-bit string C among all\r\n\/\/ strings that have a minimal possible number of \"1\"\r\n\/\/ bits and which name a polynomial x^n + C[1] *\r\n\/\/ x^{n-1} + ... + C[n-1] * x^1 + C[n] * x^0 that is\r\n\/\/ irreducible over the field with two elements. In\r\n\/\/ particular, const(128) = num2str(135, 128). For\r\n\/\/ other values of n, refer to a standard table of\r\n\/\/ irreducible polynomials [G. Seroussi,\r\n\/\/ \"Table of low-weight binary irreducible polynomials\",\r\n\/\/ HP Labs Technical Report HPL-98-135, 1998.].\r\n\/\/\r\n\/\/ and num2str(x, n) is defined as\r\n\/\/\r\n\/\/ num2str(x, n)\r\n\/\/ The n-bit binary representation of the integer x.\r\n\/\/ More formally, the n-bit string S where x = S[1] *\r\n\/\/ 2^{n-1} + S[2] * 2^{n-2} + ... + S[n] * 2^{0}. Only\r\n\/\/ used when 0 <= x < 2^n.\r\n\/\/\r\n\/\/ For our 128-bit block size implementation, this means that\r\n\/\/ the xor with const(bitlength(S)) if S[1] = 1 is implemented\r\n\/\/ by simply xor'ing the last byte with the number 135 when\r\n\/\/ S[1] = 1.\r\nfunc times2(block []byte) {\r\n\tcarry := (block[0] >> 7) & 0x1\r\n\tfor i := 0; i < BlockSize-1; i++ {\r\n\t\tblock[i] = (block[i] << 1) | ((block[i+1] >> 7) & 0x1)\r\n\t}\r\n\tblock[BlockSize-1] = (block[BlockSize-1] << 1) ^ (carry * 135)\r\n}\r\n\r\n\/\/ times3 performs the times3 operation, defined as:\r\n\/\/\r\n\/\/ times3(S)\r\n\/\/ times2(S) xor S\r\nfunc times3(block []byte) {\r\n\tcarry := (block[0] >> 7) & 0x1\r\n\tfor i := 0; i < BlockSize-1; i++ {\r\n\t\tblock[i] ^= (block[i] << 1) | ((block[i+1] >> 7) & 0x1)\r\n\t}\r\n\tblock[BlockSize-1] ^= ((block[BlockSize-1] << 1) ^ (carry * 135))\r\n}\r\n\r\n\/\/ Encrypt encrypts the plaintext src and outputs the corresponding ciphertext into dst.\r\n\/\/ Besides outputting a ciphertext into dst, Encrypt also outputs an authentication tag\r\n\/\/ of ocb2.TagSize bytes into tag, which should be used to verify the authenticity of the\r\n\/\/ message on the receiving side.\r\n\/\/\r\n\/\/ To ensure both authenticity and secrecy of messages, each invocation to this function must\r\n\/\/ be given an unique nonce of ocb2.NonceSize bytes. The nonce need not be secret (it can be\r\n\/\/ a counter), but it needs to be unique.\r\n\/\/\r\n\/\/ The block cipher used in function must work on a block size equal to ocb2.BlockSize.\r\n\/\/ The tag slice used in this function must have a length equal to ocb2.TagSize.\r\n\/\/ The nonce slice used in this function must have a length equal to ocb2.NonceSize.\r\n\/\/ If any of the above are violated, Encrypt will panic.\r\nfunc Encrypt(cipher cipher.Block, dst []byte, src []byte, nonce []byte, tag []byte) {\r\n\tif cipher.BlockSize() != BlockSize {\r\n\t\tpanic(\"ocb2: cipher blocksize is not equal to ocb2.BlockSize\")\r\n\t}\r\n\tif len(nonce) != NonceSize {\r\n\t\tpanic(\"ocb2: nonce length is not equal to ocb2.NonceSize\")\r\n\t}\r\n\tif len(tag) != TagSize {\r\n\t\tpanic(\"ocb2: tag length is not equal to ocb2.TagSize\")\r\n\t}\r\n\r\n\tvar delta [BlockSize]byte\r\n\tvar checksum [BlockSize]byte\r\n\tvar tmp [BlockSize]byte\r\n\tvar pad [BlockSize]byte\r\n\toff := 0\r\n\r\n\tcipher.Encrypt(delta[0:], nonce[0:])\r\n\tzeros(checksum[0:])\r\n\r\n\tremain := len(src)\r\n\tfor remain > BlockSize {\r\n\t\ttimes2(delta[0:])\r\n\t\txor(tmp[0:], delta[0:], src[off:off+BlockSize])\r\n\t\tcipher.Encrypt(tmp[0:], tmp[0:])\r\n\t\txor(dst[off:off+BlockSize], delta[0:], tmp[0:])\r\n\t\txor(checksum[0:], checksum[0:], src[off:off+BlockSize])\r\n\t\tremain -= BlockSize\r\n\t\toff += BlockSize\r\n\t}\r\n\r\n\ttimes2(delta[0:])\r\n\tzeros(tmp[0:])\r\n\tnum := remain * 8\r\n\ttmp[BlockSize-2] = uint8((uint32(num) >> 8) & 0xff)\r\n\ttmp[BlockSize-1] = uint8(num & 0xff)\r\n\txor(tmp[0:], tmp[0:], delta[0:])\r\n\tcipher.Encrypt(pad[0:], tmp[0:])\r\n\tcopied := copy(tmp[0:], src[off:])\r\n\tif copied != remain {\r\n\t\tpanic(\"ocb2: copy failed\")\r\n\t}\r\n\tif copy(tmp[copied:], pad[copied:]) != (BlockSize - remain) {\r\n\t\tpanic(\"ocb2: copy failed\")\r\n\t}\r\n\txor(checksum[0:], checksum[0:], tmp[0:])\r\n\txor(tmp[0:], pad[0:], tmp[0:])\r\n\tif copy(dst[off:], tmp[0:]) != remain {\r\n\t\tpanic(\"ocb2: copy failed\")\r\n\t}\r\n\r\n\ttimes3(delta[0:])\r\n\txor(tmp[0:], delta[0:], checksum[0:])\r\n\tcipher.Encrypt(tag[0:], tmp[0:])\r\n}\r\n\r\n\/\/ Decrypt takes a ciphertext and a nonce as its input and outputs a decrypted plaintext\r\n\/\/ and corresponding authentication tag.\r\n\/\/\r\n\/\/ Before using the decrpyted plaintext, the application\r\n\/\/ should verify that the computed authentication tag matches the tag that was produced when\r\n\/\/ encrypting the message (taking into consideration that OCB tags are allowed to be truncated\r\n\/\/ to a length less than ocb.TagSize).\r\n\/\/\r\n\/\/ The block cipher used in function must work on a block size equal to ocb2.BlockSize.\r\n\/\/ The tag slice used in this function must have a length equal to ocb2.TagSize.\r\n\/\/ The nonce slice used in this function must have a length equal to ocb2.NonceSize.\r\n\/\/ If any of the above are violated, Encrypt will panic.\r\nfunc Decrypt(cipher cipher.Block, plain []byte, encrypted []byte, nonce []byte, tag []byte) {\r\n\tif cipher.BlockSize() != BlockSize {\r\n\t\tpanic(\"ocb2: cipher blocksize is not equal to ocb2.BlockSize\")\r\n\t}\r\n\tif len(nonce) != NonceSize {\r\n\t\tpanic(\"ocb2: nonce length is not equal to ocb2.NonceSize\")\r\n\t}\r\n\tif len(tag) != TagSize {\r\n\t\tpanic(\"ocb2: tag length is not equal to ocb2.TagSize\")\r\n\t}\r\n\r\n\tvar checksum [BlockSize]byte\r\n\tvar delta [BlockSize]byte\r\n\tvar tmp [BlockSize]byte\r\n\tvar pad [BlockSize]byte\r\n\toff := 0\r\n\r\n\tcipher.Encrypt(delta[0:], nonce[0:])\r\n\tzeros(checksum[0:])\r\n\r\n\tremain := len(encrypted)\r\n\tfor remain > BlockSize {\r\n\t\ttimes2(delta[0:])\r\n\t\txor(tmp[0:], delta[0:], encrypted[off:off+BlockSize])\r\n\t\tcipher.Decrypt(tmp[0:], tmp[0:])\r\n\t\txor(plain[off:off+BlockSize], delta[0:], tmp[0:])\r\n\t\txor(checksum[0:], checksum[0:], plain[off:off+BlockSize])\r\n\t\toff += BlockSize\r\n\t\tremain -= BlockSize\r\n\t}\r\n\r\n\ttimes2(delta[0:])\r\n\tzeros(tmp[0:])\r\n\tnum := remain * 8\r\n\ttmp[BlockSize-2] = uint8((uint32(num) >> 8) & 0xff)\r\n\ttmp[BlockSize-1] = uint8(num & 0xff)\r\n\txor(tmp[0:], tmp[0:], delta[0:])\r\n\tcipher.Encrypt(pad[0:], tmp[0:])\r\n\tzeros(tmp[0:])\r\n\tcopied := copy(tmp[0:remain], encrypted[off:off+remain])\r\n\tif copied != remain {\r\n\t\tpanic(\"ocb2: copy failed\")\r\n\t}\r\n\txor(tmp[0:], tmp[0:], pad[0:])\r\n\txor(checksum[0:], checksum[0:], tmp[0:])\r\n\tcopied = copy(plain[off:off+remain], tmp[0:remain])\r\n\tif copied != remain {\r\n\t\tpanic(\"ocb2: copy failed\")\r\n\t}\r\n\r\n\ttimes3(delta[0:])\r\n\txor(tmp[0:], delta[0:], checksum[0:])\r\n\tcipher.Encrypt(tag[0:], tmp[0:])\r\n}<|endoftext|>"} {"text":"<commit_before>package handler\n\nimport (\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\n\t\"github.com\/gorilla\/mux\"\n\tcoreHttp \"github.com\/skygeario\/skygear-server\/pkg\/core\/http\"\n)\n\n\/\/ NewGearHandler takes an incoming request and sends it to coresponding\n\/\/ gear server\nfunc NewGearHandler(restPathIdentifier string) http.HandlerFunc {\n\tproxy := newGearReverseProxy()\n\treturn rewriteHandler(proxy, restPathIdentifier)\n}\n\nfunc newGearReverseProxy() *httputil.ReverseProxy {\n\tdirector := func(req *http.Request) {\n\t\tpath := req.URL.Path\n\t\tquery := req.URL.RawQuery\n\t\tfragment := req.URL.Fragment\n\t\tvar err error\n\t\tu, err := url.Parse(req.Header.Get(coreHttp.HeaderGearEndpoint))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treq.URL = u\n\t\treq.URL.Path = path\n\t\treq.URL.RawQuery = query\n\t\treq.URL.Fragment = fragment\n\t}\n\treturn &httputil.ReverseProxy{Director: director}\n}\n\nfunc rewriteHandler(p *httputil.ReverseProxy, restPathIdentifier string) func(http.ResponseWriter, *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tr.URL.Path = \"\/\" + mux.Vars(r)[restPathIdentifier]\n\t\tp.ServeHTTP(w, r)\n\t}\n}\n<commit_msg>Remove CORS headers from upstream response<commit_after>package handler\n\nimport (\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\n\t\"github.com\/gorilla\/mux\"\n\tcoreHttp \"github.com\/skygeario\/skygear-server\/pkg\/core\/http\"\n)\n\n\/\/ NewGearHandler takes an incoming request and sends it to coresponding\n\/\/ gear server\nfunc NewGearHandler(restPathIdentifier string) http.HandlerFunc {\n\tproxy := newGearReverseProxy()\n\treturn rewriteHandler(proxy, restPathIdentifier)\n}\n\nfunc newGearReverseProxy() *httputil.ReverseProxy {\n\tdirector := func(req *http.Request) {\n\t\tpath := req.URL.Path\n\t\tquery := req.URL.RawQuery\n\t\tfragment := req.URL.Fragment\n\t\tvar err error\n\t\tu, err := url.Parse(req.Header.Get(coreHttp.HeaderGearEndpoint))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treq.URL = u\n\t\treq.URL.Path = path\n\t\treq.URL.RawQuery = query\n\t\treq.URL.Fragment = fragment\n\t}\n\tmodifyResponse := func(r *http.Response) error {\n\t\t\/\/ Remove CORS headers because they are managed by this gateway.\n\t\t\/\/ Auth gear in standalone mode mounts CORS middleware.\n\t\t\/\/ If we do not remove CORS headers, then the headers will duplicate.\n\t\tr.Header.Del(\"Access-Control-Allow-Origin\")\n\t\tr.Header.Del(\"Access-Control-Allow-Credentials\")\n\t\tr.Header.Del(\"Access-Control-Allow-Methods\")\n\t\tr.Header.Del(\"Access-Control-Allow-Headers\")\n\t\treturn nil\n\t}\n\n\treturn &httputil.ReverseProxy{Director: director, ModifyResponse: modifyResponse}\n}\n\nfunc rewriteHandler(p *httputil.ReverseProxy, restPathIdentifier string) func(http.ResponseWriter, *http.Request) {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tr.URL.Path = \"\/\" + mux.Vars(r)[restPathIdentifier]\n\t\tp.ServeHTTP(w, r)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2014 Constantin Schomburg <me@cschomburg.com>\n\/\/\n\/\/ Use of this source code is governed by an MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage natural\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com\/xconstruct\/stark\/proto\"\n)\n\ntype simple struct {\n\ttext string\n\tok bool\n\texpected proto.Message\n\texpectedPayload map[string]interface{}\n}\n\nfunc TestParseSimple(t *testing.T) {\n\ttests := []simple{\n\t\tsimple{\".ping\", true, proto.Message{\n\t\t\tAction: \"ping\",\n\t\t}, nil},\n\n\t\tsimple{\".ping device=me\", true, proto.Message{\n\t\t\tAction: \"ping\",\n\t\t\tDestination: \"me\",\n\t\t}, nil},\n\n\t\tsimple{\".ping some text\", true, proto.Message{\n\t\t\tAction: \"ping\",\n\t\t\tText: \"some text\",\n\t\t}, nil},\n\n\t\tsimple{\"!ping some text\", true, proto.Message{\n\t\t\tAction: \"ping\",\n\t\t\tText: \"some text\",\n\t\t}, nil},\n\n\t\tsimple{\".ping with some device=me host=another things\", true, proto.Message{\n\t\t\tAction: \"ping\",\n\t\t\tDestination: \"me\",\n\t\t\tText: \"with some things\",\n\t\t}, map[string]interface{}{\n\t\t\t\"host\": \"another\",\n\t\t}},\n\n\t\tsimple{`.ping with \"some device=me\" host=\"another things\" this`, true, proto.Message{\n\t\t\tAction: \"ping\",\n\t\t\tText: `with some device=me this`,\n\t\t}, map[string]interface{}{\n\t\t\t\"host\": \"another things\",\n\t\t}},\n\n\t\tsimple{\"ping no\", false, proto.Message{}, nil},\n\t\tsimple{\"\", false, proto.Message{}, nil},\n\t}\n\n\tfor _, test := range tests {\n\t\tmsg, ok := ParseSimple(test.text)\n\t\tif ok != test.ok {\n\t\t\tt.Errorf(\"'%s' should parse? exp: %v, got: %v\", test.text, test.ok, ok)\n\t\t} else if ok {\n\t\t\tvar payload map[string]interface{}\n\t\t\tmsg.DecodePayload(&payload)\n\t\t\tmsg.Payload = nil\n\t\t\tif !reflect.DeepEqual(msg, test.expected) {\n\t\t\t\tt.Errorf(\"decoded message differs\\nexp '%v'\\ngot '%v'\", test.expected, msg)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(payload, test.expectedPayload) {\n\t\t\t\tt.Errorf(\"decoded payload differs\\nexp '%v'\\ngot '%v'\", test.expectedPayload, payload)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestFormatMessage(t *testing.T) {\n\tmsg := proto.Message{\n\t\tText: \"Hello, the time is 2015-03-14T18:48:10+02:00.\",\n\t}\n\tFormatMessage(&msg)\n\texp := \"Hello, the time is Sat, 14 Mar 15 at 18:48.\"\n\tif msg.Text != exp {\n\t\tt.Error(\"Unexpected format: \", msg.Text)\n\t}\n}\n<commit_msg>Natural: Fix test after payload changes.<commit_after>\/\/ Copyright (C) 2014 Constantin Schomburg <me@cschomburg.com>\n\/\/\n\/\/ Use of this source code is governed by an MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage natural\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com\/xconstruct\/stark\/proto\"\n)\n\ntype simple struct {\n\ttext string\n\tok bool\n\texpected proto.Message\n\texpectedPayload map[string]interface{}\n}\n\nfunc TestParseSimple(t *testing.T) {\n\ttests := []simple{\n\t\tsimple{\".ping\", true, proto.Message{\n\t\t\tAction: \"ping\",\n\t\t}, nil},\n\n\t\tsimple{\".ping device=me\", true, proto.Message{\n\t\t\tAction: \"ping\",\n\t\t\tDestination: \"me\",\n\t\t}, nil},\n\n\t\tsimple{\".ping some text\", true, proto.Message{\n\t\t\tAction: \"ping\",\n\t\t\tText: \"some text\",\n\t\t}, nil},\n\n\t\tsimple{\"!ping some text\", true, proto.Message{\n\t\t\tAction: \"ping\",\n\t\t\tText: \"some text\",\n\t\t}, nil},\n\n\t\tsimple{\".ping with some device=me host=another things\", true, proto.Message{\n\t\t\tAction: \"ping\",\n\t\t\tDestination: \"me\",\n\t\t\tText: \"with some things\",\n\t\t}, map[string]interface{}{\n\t\t\t\"host\": \"another\",\n\t\t}},\n\n\t\tsimple{`.ping with \"some device=me\" host=\"another things\" this`, true, proto.Message{\n\t\t\tAction: \"ping\",\n\t\t\tText: `with some device=me this`,\n\t\t}, map[string]interface{}{\n\t\t\t\"host\": \"another things\",\n\t\t}},\n\n\t\tsimple{\"ping no\", false, proto.Message{}, nil},\n\t\tsimple{\"\", false, proto.Message{}, nil},\n\t}\n\n\tfor _, test := range tests {\n\t\tmsg, ok := ParseSimple(test.text)\n\t\tif ok != test.ok {\n\t\t\tt.Errorf(\"'%s' should parse? exp: %v, got: %v\", test.text, test.ok, ok)\n\t\t} else if ok {\n\t\t\tvar payload map[string]interface{}\n\t\t\tmsg.DecodePayload(&payload)\n\t\t\tmsg.Payload.Raw = nil\n\t\t\tif !reflect.DeepEqual(msg, test.expected) {\n\t\t\t\tt.Errorf(\"decoded message differs\\nexp '%v'\\ngot '%v'\", test.expected, msg)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(payload, test.expectedPayload) {\n\t\t\t\tt.Errorf(\"decoded payload differs\\nexp '%v'\\ngot '%v'\", test.expectedPayload, payload)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestFormatMessage(t *testing.T) {\n\tmsg := proto.Message{\n\t\tText: \"Hello, the time is 2015-03-14T18:48:10+02:00.\",\n\t}\n\tFormatMessage(&msg)\n\texp := \"Hello, the time is Sat, 14 Mar 15 at 18:48.\"\n\tif msg.Text != exp {\n\t\tt.Error(\"Unexpected format: \", msg.Text)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package restapi\n\nimport (\n\t\"crypto\/tls\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/go-openapi\/swag\"\n\tflags \"github.com\/jessevdk\/go-flags\"\n\tgraceful \"github.com\/tylerb\/graceful\"\n\n\t\"github.com\/radanalyticsio\/oshinko-cli\/rest\/restapi\/operations\"\n)\n\nconst (\n\tschemeHTTP = \"http\"\n\tschemeHTTPS = \"https\"\n\tschemeUnix = \"unix\"\n)\n\nvar defaultSchemes []string\n\nfunc init() {\n\tdefaultSchemes = []string{\n\t\tschemeHTTP,\n\t\tschemeHTTPS,\n\t}\n}\n\n\/\/ NewServer creates a new api oshinko rest server but does not configure it\nfunc NewServer(api *operations.OshinkoRestAPI) *Server {\n\ts := new(Server)\n\ts.api = api\n\treturn s\n}\n\n\/\/ ConfigureAPI configures the API and handlers. Needs to be called before Serve\nfunc (s *Server) ConfigureAPI() {\n\tif s.api != nil {\n\t\ts.handler = configureAPI(s.api)\n\t}\n}\n\n\/\/ ConfigureFlags configures the additional flags defined by the handlers. Needs to be called before the parser.Parse\nfunc (s *Server) ConfigureFlags() {\n\tif s.api != nil {\n\t\tconfigureFlags(s.api)\n\t}\n}\n\n\/\/ Server for the oshinko rest API\ntype Server struct {\n\tEnabledListeners []string `long:\"scheme\" description:\"the listeners to enable, this can be repeated and defaults to the schemes in the swagger spec\"`\n\n\tSocketPath flags.Filename `long:\"socket-path\" description:\"the unix socket to listen on\" default:\"\/var\/run\/oshinko-rest.sock\"`\n\tdomainSocketL net.Listener\n\n\tHost string `long:\"host\" description:\"the IP to listen on\" default:\"localhost\" env:\"HOST\"`\n\tPort int `long:\"port\" description:\"the port to listen on for insecure connections, defaults to a random value\" env:\"PORT\"`\n\thttpServerL net.Listener\n\n\tTLSHost string `long:\"tls-host\" description:\"the IP to listen on for tls, when not specified it's the same as --host\" env:\"TLS_HOST\"`\n\tTLSPort int `long:\"tls-port\" description:\"the port to listen on for secure connections, defaults to a random value\" env:\"TLS_PORT\"`\n\tTLSCertificate flags.Filename `long:\"tls-certificate\" description:\"the certificate to use for secure connections\" env:\"TLS_CERTIFICATE\"`\n\tTLSCertificateKey flags.Filename `long:\"tls-key\" description:\"the private key to use for secure conections\" env:\"TLS_PRIVATE_KEY\"`\n\thttpsServerL net.Listener\n\n\tapi *operations.OshinkoRestAPI\n\thandler http.Handler\n\thasListeners bool\n}\n\n\/\/ Logf logs message either via defined user logger or via system one if no user logger is defined.\nfunc (s *Server) Logf(f string, args ...interface{}) {\n\tif s.api != nil && s.api.Logger != nil {\n\t\ts.api.Logger(f, args...)\n\t} else {\n\t\tlog.Printf(f, args...)\n\t}\n}\n\n\/\/ Fatalf logs message either via defined user logger or via system one if no user logger is defined.\n\/\/ Exits with non-zero status after printing\nfunc (s *Server) Fatalf(f string, args ...interface{}) {\n\tif s.api != nil && s.api.Logger != nil {\n\t\ts.api.Logger(f, args...)\n\t\tos.Exit(1)\n\t} else {\n\t\tlog.Fatalf(f, args...)\n\t}\n}\n\n\/\/ SetAPI configures the server with the specified API. Needs to be called before Serve\nfunc (s *Server) SetAPI(api *operations.OshinkoRestAPI) {\n\tif api == nil {\n\t\ts.api = nil\n\t\ts.handler = nil\n\t\treturn\n\t}\n\n\ts.api = api\n\ts.api.Logger = log.Printf\n\ts.handler = configureAPI(api)\n}\n\nfunc (s *Server) hasScheme(scheme string) bool {\n\tschemes := s.EnabledListeners\n\tif len(schemes) == 0 {\n\t\tschemes = defaultSchemes\n\t}\n\n\tfor _, v := range schemes {\n\t\tif v == scheme {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Serve the api\nfunc (s *Server) Serve() (err error) {\n\tif !s.hasListeners {\n\t\tif err := s.Listen(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar wg sync.WaitGroup\n\n\tif s.hasScheme(schemeUnix) {\n\t\tdomainSocket := &graceful.Server{Server: new(http.Server)}\n\t\tdomainSocket.Handler = s.handler\n\n\t\twg.Add(1)\n\t\ts.Logf(\"Serving oshinko rest at unix:\/\/%s\", s.SocketPath)\n\t\tgo func(l net.Listener) {\n\t\t\tdefer wg.Done()\n\t\t\tif err := domainSocket.Serve(l); err != nil {\n\t\t\t\ts.Fatalf(\"%v\", err)\n\t\t\t}\n\t\t\ts.Logf(\"Stopped serving oshinko rest at unix:\/\/%s\", s.SocketPath)\n\t\t}(s.domainSocketL)\n\t}\n\n\tif s.hasScheme(schemeHTTP) {\n\t\thttpServer := &graceful.Server{Server: new(http.Server)}\n\t\thttpServer.SetKeepAlivesEnabled(true)\n\t\thttpServer.TCPKeepAlive = 3 * time.Minute\n\t\thttpServer.Handler = s.handler\n\n\t\twg.Add(1)\n\t\ts.Logf(\"Serving oshinko rest at http:\/\/%s\", s.httpServerL.Addr())\n\t\tgo func(l net.Listener) {\n\t\t\tdefer wg.Done()\n\t\t\tif err := httpServer.Serve(l); err != nil {\n\t\t\t\ts.Fatalf(\"%v\", err)\n\t\t\t}\n\t\t\ts.Logf(\"Stopped serving oshinko rest at http:\/\/%s\", l.Addr())\n\t\t}(s.httpServerL)\n\t}\n\n\tif s.hasScheme(schemeHTTPS) {\n\t\thttpsServer := &graceful.Server{Server: new(http.Server)}\n\t\thttpsServer.SetKeepAlivesEnabled(true)\n\t\thttpsServer.TCPKeepAlive = 3 * time.Minute\n\t\thttpsServer.Handler = s.handler\n\n\t\thttpsServer.TLSConfig = new(tls.Config)\n\t\thttpsServer.TLSConfig.NextProtos = []string{\"http\/1.1\"}\n\t\t\/\/ https:\/\/www.owasp.org\/index.php\/Transport_Layer_Protection_Cheat_Sheet#Rule_-_Only_Support_Strong_Protocols\n\t\thttpsServer.TLSConfig.MinVersion = tls.VersionTLS12\n\t\thttpsServer.TLSConfig.Certificates = make([]tls.Certificate, 1)\n\t\thttpsServer.TLSConfig.Certificates[0], err = tls.LoadX509KeyPair(string(s.TLSCertificate), string(s.TLSCertificateKey))\n\n\t\tconfigureTLS(httpsServer.TLSConfig)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\twg.Add(1)\n\t\ts.Logf(\"Serving oshinko rest at https:\/\/%s\", s.httpsServerL.Addr())\n\t\tgo func(l net.Listener) {\n\t\t\tdefer wg.Done()\n\t\t\tif err := httpsServer.Serve(l); err != nil {\n\t\t\t\ts.Fatalf(\"%v\", err)\n\t\t\t}\n\t\t\ts.Logf(\"Stopped serving oshinko rest at https:\/\/%s\", l.Addr())\n\t\t}(tls.NewListener(s.httpsServerL, httpsServer.TLSConfig))\n\t}\n\n\twg.Wait()\n\treturn nil\n}\n\n\/\/ Listen creates the listeners for the server\nfunc (s *Server) Listen() error {\n\tif s.hasListeners { \/\/ already done this\n\t\treturn nil\n\t}\n\n\tif s.hasScheme(schemeHTTPS) { \/\/ exit early on missing params\n\t\tif s.TLSCertificate == \"\" {\n\t\t\tif s.TLSCertificateKey == \"\" {\n\t\t\t\ts.Fatalf(\"the required flags `--tls-certificate` and `--tls-key` were not specified\")\n\t\t\t}\n\t\t\ts.Fatalf(\"the required flag `--tls-certificate` was not specified\")\n\t\t}\n\t\tif s.TLSCertificateKey == \"\" {\n\t\t\ts.Fatalf(\"the required flag `--tls-key` was not specified\")\n\t\t}\n\n\t\t\/\/ Use http host if https host wasn't defined\n\t\tif s.TLSHost == \"\" {\n\t\t\ts.TLSHost = s.Host\n\t\t}\n\t}\n\n\tif s.hasScheme(schemeUnix) {\n\t\tdomSockListener, err := net.Listen(\"unix\", string(s.SocketPath))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.domainSocketL = domSockListener\n\t}\n\n\tif s.hasScheme(schemeHTTP) {\n\t\tlistener, err := net.Listen(\"tcp\", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\th, p, err := swag.SplitHostPort(listener.Addr().String())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.Host = h\n\t\ts.Port = p\n\t\ts.httpServerL = listener\n\t}\n\n\tif s.hasScheme(schemeHTTPS) {\n\t\ttlsListener, err := net.Listen(\"tcp\", net.JoinHostPort(s.TLSHost, strconv.Itoa(s.TLSPort)))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tsh, sp, err := swag.SplitHostPort(tlsListener.Addr().String())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.TLSHost = sh\n\t\ts.TLSPort = sp\n\t\ts.httpsServerL = tlsListener\n\t}\n\n\ts.hasListeners = true\n\treturn nil\n}\n\n\/\/ Shutdown server and clean up resources\nfunc (s *Server) Shutdown() error {\n\ts.api.ServerShutdown()\n\treturn nil\n}\n\n\/\/ GetHandler returns a handler useful for testing\nfunc (s *Server) GetHandler() http.Handler {\n\treturn s.handler\n}\n<commit_msg>Restore --info flag<commit_after>package restapi\n\nimport (\n\t\"crypto\/tls\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/go-openapi\/swag\"\n\tflags \"github.com\/jessevdk\/go-flags\"\n\tgraceful \"github.com\/tylerb\/graceful\"\n\n\toshinkoFlags \"github.com\/radanalyticsio\/oshinko-cli\/rest\/helpers\/flags\"\n\t\"github.com\/radanalyticsio\/oshinko-cli\/rest\/helpers\/info\"\n\t\"github.com\/radanalyticsio\/oshinko-cli\/rest\/restapi\/operations\"\n\t\"github.com\/radanalyticsio\/oshinko-cli\/rest\/version\"\n)\n\nconst (\n\tschemeHTTP = \"http\"\n\tschemeHTTPS = \"https\"\n\tschemeUnix = \"unix\"\n)\n\nvar defaultSchemes []string\n\nfunc init() {\n\tdefaultSchemes = []string{\n\t\tschemeHTTP,\n\t\tschemeHTTPS,\n\t}\n}\n\n\/\/ NewServer creates a new api oshinko rest server but does not configure it\nfunc NewServer(api *operations.OshinkoRestAPI) *Server {\n\ts := new(Server)\n\ts.api = api\n\treturn s\n}\n\n\/\/ ConfigureAPI configures the API and handlers. Needs to be called before Serve\nfunc (s *Server) ConfigureAPI() {\n\tif s.api != nil {\n\t\ts.handler = configureAPI(s.api)\n\t}\n}\n\n\/\/ ConfigureFlags configures the additional flags defined by the handlers. Needs to be called before the parser.Parse\nfunc (s *Server) ConfigureFlags() {\n\tif s.api != nil {\n\t\tconfigureFlags(s.api)\n\t}\n}\n\n\/\/ Server for the oshinko rest API\ntype Server struct {\n\tEnabledListeners []string `long:\"scheme\" description:\"the listeners to enable, this can be repeated and defaults to the schemes in the swagger spec\"`\n\n\tSocketPath flags.Filename `long:\"socket-path\" description:\"the unix socket to listen on\" default:\"\/var\/run\/oshinko-rest.sock\"`\n\tdomainSocketL net.Listener\n\n\tHost string `long:\"host\" description:\"the IP to listen on\" default:\"localhost\" env:\"HOST\"`\n\tPort int `long:\"port\" description:\"the port to listen on for insecure connections, defaults to a random value\" env:\"PORT\"`\n\thttpServerL net.Listener\n\n\tTLSHost string `long:\"tls-host\" description:\"the IP to listen on for tls, when not specified it's the same as --host\" env:\"TLS_HOST\"`\n\tTLSPort int `long:\"tls-port\" description:\"the port to listen on for secure connections, defaults to a random value\" env:\"TLS_PORT\"`\n\tTLSCertificate flags.Filename `long:\"tls-certificate\" description:\"the certificate to use for secure connections\" env:\"TLS_CERTIFICATE\"`\n\tTLSCertificateKey flags.Filename `long:\"tls-key\" description:\"the private key to use for secure conections\" env:\"TLS_PRIVATE_KEY\"`\n\thttpsServerL net.Listener\n\n\tapi *operations.OshinkoRestAPI\n\thandler http.Handler\n\thasListeners bool\n}\n\n\/\/ Logf logs message either via defined user logger or via system one if no user logger is defined.\nfunc (s *Server) Logf(f string, args ...interface{}) {\n\tif s.api != nil && s.api.Logger != nil {\n\t\ts.api.Logger(f, args...)\n\t} else {\n\t\tlog.Printf(f, args...)\n\t}\n}\n\n\/\/ Fatalf logs message either via defined user logger or via system one if no user logger is defined.\n\/\/ Exits with non-zero status after printing\nfunc (s *Server) Fatalf(f string, args ...interface{}) {\n\tif s.api != nil && s.api.Logger != nil {\n\t\ts.api.Logger(f, args...)\n\t\tos.Exit(1)\n\t} else {\n\t\tlog.Fatalf(f, args...)\n\t}\n}\n\n\/\/ SetAPI configures the server with the specified API. Needs to be called before Serve\nfunc (s *Server) SetAPI(api *operations.OshinkoRestAPI) {\n\tif api == nil {\n\t\ts.api = nil\n\t\ts.handler = nil\n\t\treturn\n\t}\n\n\ts.api = api\n\ts.api.Logger = log.Printf\n\ts.handler = configureAPI(api)\n}\n\nfunc (s *Server) hasScheme(scheme string) bool {\n\tschemes := s.EnabledListeners\n\tif len(schemes) == 0 {\n\t\tschemes = defaultSchemes\n\t}\n\n\tfor _, v := range schemes {\n\t\tif v == scheme {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Serve the api\nfunc (s *Server) Serve() (err error) {\n\n\tif oshinkoFlags.InfoEnabled() {\n\t\timg := info.GetSparkImage()\n\t\tlog.Println(version.GetAppName() + \" \" + version.GetVersion())\n\t\tlog.Println(\"Default cluster image: \" + img)\n\t\tos.Exit(0)\n\t}\n\n\tif !s.hasListeners {\n\t\tif err := s.Listen(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar wg sync.WaitGroup\n\n\tif s.hasScheme(schemeUnix) {\n\t\tdomainSocket := &graceful.Server{Server: new(http.Server)}\n\t\tdomainSocket.Handler = s.handler\n\n\t\twg.Add(1)\n\t\ts.Logf(\"Serving oshinko rest at unix:\/\/%s\", s.SocketPath)\n\t\tgo func(l net.Listener) {\n\t\t\tdefer wg.Done()\n\t\t\tif err := domainSocket.Serve(l); err != nil {\n\t\t\t\ts.Fatalf(\"%v\", err)\n\t\t\t}\n\t\t\ts.Logf(\"Stopped serving oshinko rest at unix:\/\/%s\", s.SocketPath)\n\t\t}(s.domainSocketL)\n\t}\n\n\tif s.hasScheme(schemeHTTP) {\n\t\thttpServer := &graceful.Server{Server: new(http.Server)}\n\t\thttpServer.SetKeepAlivesEnabled(true)\n\t\thttpServer.TCPKeepAlive = 3 * time.Minute\n\t\thttpServer.Handler = s.handler\n\n\t\twg.Add(1)\n\t\ts.Logf(\"Serving oshinko rest at http:\/\/%s\", s.httpServerL.Addr())\n\t\tgo func(l net.Listener) {\n\t\t\tdefer wg.Done()\n\t\t\tif err := httpServer.Serve(l); err != nil {\n\t\t\t\ts.Fatalf(\"%v\", err)\n\t\t\t}\n\t\t\ts.Logf(\"Stopped serving oshinko rest at http:\/\/%s\", l.Addr())\n\t\t}(s.httpServerL)\n\t}\n\n\tif s.hasScheme(schemeHTTPS) {\n\t\thttpsServer := &graceful.Server{Server: new(http.Server)}\n\t\thttpsServer.SetKeepAlivesEnabled(true)\n\t\thttpsServer.TCPKeepAlive = 3 * time.Minute\n\t\thttpsServer.Handler = s.handler\n\n\t\thttpsServer.TLSConfig = new(tls.Config)\n\t\thttpsServer.TLSConfig.NextProtos = []string{\"http\/1.1\"}\n\t\t\/\/ https:\/\/www.owasp.org\/index.php\/Transport_Layer_Protection_Cheat_Sheet#Rule_-_Only_Support_Strong_Protocols\n\t\thttpsServer.TLSConfig.MinVersion = tls.VersionTLS12\n\t\thttpsServer.TLSConfig.Certificates = make([]tls.Certificate, 1)\n\t\thttpsServer.TLSConfig.Certificates[0], err = tls.LoadX509KeyPair(string(s.TLSCertificate), string(s.TLSCertificateKey))\n\n\t\tconfigureTLS(httpsServer.TLSConfig)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\twg.Add(1)\n\t\ts.Logf(\"Serving oshinko rest at https:\/\/%s\", s.httpsServerL.Addr())\n\t\tgo func(l net.Listener) {\n\t\t\tdefer wg.Done()\n\t\t\tif err := httpsServer.Serve(l); err != nil {\n\t\t\t\ts.Fatalf(\"%v\", err)\n\t\t\t}\n\t\t\ts.Logf(\"Stopped serving oshinko rest at https:\/\/%s\", l.Addr())\n\t\t}(tls.NewListener(s.httpsServerL, httpsServer.TLSConfig))\n\t}\n\n\twg.Wait()\n\treturn nil\n}\n\n\/\/ Listen creates the listeners for the server\nfunc (s *Server) Listen() error {\n\tif s.hasListeners { \/\/ already done this\n\t\treturn nil\n\t}\n\n\tif s.hasScheme(schemeHTTPS) { \/\/ exit early on missing params\n\t\tif s.TLSCertificate == \"\" {\n\t\t\tif s.TLSCertificateKey == \"\" {\n\t\t\t\ts.Fatalf(\"the required flags `--tls-certificate` and `--tls-key` were not specified\")\n\t\t\t}\n\t\t\ts.Fatalf(\"the required flag `--tls-certificate` was not specified\")\n\t\t}\n\t\tif s.TLSCertificateKey == \"\" {\n\t\t\ts.Fatalf(\"the required flag `--tls-key` was not specified\")\n\t\t}\n\n\t\t\/\/ Use http host if https host wasn't defined\n\t\tif s.TLSHost == \"\" {\n\t\t\ts.TLSHost = s.Host\n\t\t}\n\t}\n\n\tif s.hasScheme(schemeUnix) {\n\t\tdomSockListener, err := net.Listen(\"unix\", string(s.SocketPath))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.domainSocketL = domSockListener\n\t}\n\n\tif s.hasScheme(schemeHTTP) {\n\t\tlistener, err := net.Listen(\"tcp\", net.JoinHostPort(s.Host, strconv.Itoa(s.Port)))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\th, p, err := swag.SplitHostPort(listener.Addr().String())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.Host = h\n\t\ts.Port = p\n\t\ts.httpServerL = listener\n\t}\n\n\tif s.hasScheme(schemeHTTPS) {\n\t\ttlsListener, err := net.Listen(\"tcp\", net.JoinHostPort(s.TLSHost, strconv.Itoa(s.TLSPort)))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tsh, sp, err := swag.SplitHostPort(tlsListener.Addr().String())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.TLSHost = sh\n\t\ts.TLSPort = sp\n\t\ts.httpsServerL = tlsListener\n\t}\n\n\ts.hasListeners = true\n\treturn nil\n}\n\n\/\/ Shutdown server and clean up resources\nfunc (s *Server) Shutdown() error {\n\ts.api.ServerShutdown()\n\treturn nil\n}\n\n\/\/ GetHandler returns a handler useful for testing\nfunc (s *Server) GetHandler() http.Handler {\n\treturn s.handler\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Nging is a toolbox for webmasters\n Copyright (C) 2018-present Wenhui Shen <swh@admpub.com>\n\n This program is free software: you can redistribute it and\/or modify\n it under the terms of the GNU Affero General Public License as published\n by the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU Affero General Public License for more details.\n\n You should have received a copy of the GNU Affero General Public License\n along with this program. If not, see <https:\/\/www.gnu.org\/licenses\/>.\n*\/\n\npackage backend\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"github.com\/webx-top\/com\"\n\t\"github.com\/webx-top\/echo\"\n\t\"github.com\/webx-top\/echo\/handler\/pprof\"\n\t\"github.com\/webx-top\/echo\/middleware\"\n\t\"github.com\/webx-top\/echo\/middleware\/language\"\n\t\"github.com\/webx-top\/echo\/middleware\/render\"\n\t\"github.com\/webx-top\/echo\/middleware\/render\/driver\"\n\t\"github.com\/webx-top\/echo\/middleware\/session\"\n\t\"github.com\/webx-top\/echo\/subdomains\"\n\n\t\"github.com\/admpub\/events\"\n\t\"github.com\/admpub\/events\/emitter\"\n\t\"github.com\/admpub\/log\"\n\t\"github.com\/admpub\/nging\/application\/cmd\/event\"\n\t\"github.com\/admpub\/nging\/application\/handler\"\n\t\"github.com\/admpub\/nging\/application\/library\/config\"\n\tngingMW \"github.com\/admpub\/nging\/application\/middleware\"\n)\n\nconst (\n\tDefaultTemplateDir = `.\/template\/backend`\n\tDefaultAssetsDir = `.\/public\/assets`\n\tDefaultAssetsURLPath = `\/public\/assets\/backend`\n)\n\nvar (\n\tTemplateDir = DefaultTemplateDir \/\/模板文件夹\n\tAssetsDir = DefaultAssetsDir \/\/素材文件夹\n\tAssetsURLPath = DefaultAssetsURLPath\n\tDefaultAvatarURL = AssetsURLPath + `\/images\/user_128.png`\n\tRendererDo = func(driver.Driver) {}\n\tParseStrings = map[string]string{}\n\tParseStringFuncs = map[string]func() string{}\n\tSkippedGzipPaths = map[string]bool{}\n\tGzipSkipper = func(skippedPaths map[string]bool) func(c echo.Context) bool {\n\t\treturn func(c echo.Context) bool {\n\t\t\tupath := c.Request().URL().Path()\n\t\t\tskipped, _ := skippedPaths[upath]\n\t\t\treturn skipped\n\t\t}\n\t}\n\tDefaultLocalHostNames = []string{\n\t\t`127.0.0.1`, `localhost`,\n\t}\n)\n\nfunc MakeSubdomains(domain string, appends []string) string {\n\tdomain, _ = com.SplitHost(domain)\n\tport := fmt.Sprintf(\"%d\", config.DefaultCLIConfig.Port)\n\tnewDomain := domain + `,` + domain + `:` + port\n\tfor _, hostName := range appends {\n\t\tif hostName == domain {\n\t\t\tcontinue\n\t\t}\n\t\tnewDomain += `,` + hostName + `:` + port\n\t}\n\treturn newDomain\n}\n\nfunc init() {\n\techo.Set(`BackendPrefix`, handler.BackendPrefix)\n\techo.Set(`GlobalPrefix`, handler.GlobalPrefix)\n\tevent.OnStart(0, func() {\n\t\thandler.GlobalPrefix = echo.String(`GlobalPrefix`)\n\t\thandler.BackendPrefix = echo.String(`BackendPrefix`)\n\t\thandler.FrontendPrefix = echo.String(`FrontendPrefix`)\n\t\tngingMW.DefaultAvatarURL = DefaultAssetsURLPath\n\t\te := handler.Echo()\n\t\te.SetPrefix(handler.GlobalPrefix)\n\t\thandler.SetRootGroup(handler.BackendPrefix)\n\t\tsubdomains.Default.Default = `backend`\n\t\tdomainName := subdomains.Default.Default\n\t\tbackendDomain := config.DefaultCLIConfig.BackendDomain\n\t\tif len(backendDomain) > 0 {\n\t\t\tdomainName += `@` + MakeSubdomains(backendDomain, DefaultLocalHostNames)\n\t\t}\n\t\tsubdomains.Default.Add(domainName, e)\n\n\t\te.Use(middleware.Log(), middleware.Recover())\n\t\tskippedGzipPaths := map[string]bool{\n\t\t\te.Prefix() + `\/server\/cmdSend\/info`: true,\n\t\t\te.Prefix() + `\/download\/progress\/info`: true,\n\t\t\te.Prefix() + `\/debug\/pprof\/`: true,\n\t\t\te.Prefix() + `\/debug\/pprof\/allocs`: true,\n\t\t\te.Prefix() + `\/debug\/pprof\/block`: true,\n\t\t\te.Prefix() + `\/debug\/pprof\/cmdline`: true,\n\t\t\te.Prefix() + `\/debug\/pprof\/goroutine`: true,\n\t\t\te.Prefix() + `\/debug\/pprof\/heap`: true,\n\t\t\te.Prefix() + `\/debug\/pprof\/mutex`: true,\n\t\t\te.Prefix() + `\/debug\/pprof\/profile`: true,\n\t\t\te.Prefix() + `\/debug\/pprof\/threadcreate`: true,\n\t\t\te.Prefix() + `\/debug\/pprof\/trace`: true,\n\t\t}\n\t\tfor k, v := range skippedGzipPaths {\n\t\t\tSkippedGzipPaths[k] = v\n\t\t}\n\t\te.Use(middleware.Gzip(&middleware.GzipConfig{\n\t\t\tSkipper: GzipSkipper(SkippedGzipPaths),\n\t\t}))\n\t\te.Use(func(h echo.Handler) echo.HandlerFunc {\n\t\t\treturn func(c echo.Context) error {\n\t\t\t\tc.Response().Header().Set(`Server`, event.SofewareName+`\/`+config.Version.Number)\n\t\t\t\treturn h.Handle(c)\n\t\t\t}\n\t\t})\n\n\t\t\/\/ 注册静态资源文件(网站素材文件)\n\t\te.Use(event.StaticMW) \/\/打包的静态资源\n\t\t\/\/ 上传文件资源(改到manager中用File函数实现)\n\t\t\/\/ e.Use(middleware.Static(&middleware.StaticOptions{\n\t\t\/\/ \tRoot: helper.UploadDir,\n\t\t\/\/ \tPath: helper.UploadURLPath,\n\t\t\/\/ }))\n\n\t\t\/\/ 启用session\n\t\te.Use(session.Middleware(config.SessionOptions))\n\t\t\/\/ 启用多语言支持\n\t\tconfig.DefaultConfig.Language.SetFSFunc(event.LangFSFunc)\n\t\te.Use(language.New(&config.DefaultConfig.Language).Middleware())\n\n\t\t\/\/ 启用Validation\n\t\te.Use(middleware.Validate(echo.NewValidation))\n\n\t\t\/\/ 事物支持\n\t\te.Use(ngingMW.Tansaction())\n\t\t\/\/ 注册模板引擎\n\t\trenderOptions := &render.Config{\n\t\t\tTmplDir: TemplateDir,\n\t\t\tEngine: `standard`,\n\t\t\tParseStrings: map[string]string{\n\t\t\t\t`__ASSETS__`: AssetsURLPath,\n\t\t\t\t`__TMPL__`: TemplateDir,\n\t\t\t},\n\t\t\tParseStringFuncs: map[string]func() string{\n\t\t\t\t`__BACKEND__`: func() string { return subdomains.Default.URL(handler.BackendPrefix, `backend`) },\n\t\t\t\t`__FRONTEND__`: func() string { return subdomains.Default.URL(handler.FrontendPrefix, `frontend`) },\n\t\t\t},\n\t\t\tDefaultHTTPErrorCode: http.StatusOK,\n\t\t\tReload: true,\n\t\t\tErrorPages: config.DefaultConfig.Sys.ErrorPages,\n\t\t}\n\t\tif ParseStrings != nil {\n\t\t\tfor key, val := range ParseStrings {\n\t\t\t\trenderOptions.ParseStrings[key] = val\n\t\t\t}\n\t\t}\n\t\tif ParseStringFuncs != nil {\n\t\t\tfor key, val := range ParseStringFuncs {\n\t\t\t\trenderOptions.ParseStringFuncs[key] = val\n\t\t\t}\n\t\t}\n\t\tif RendererDo != nil {\n\t\t\trenderOptions.AddRendererDo(RendererDo)\n\t\t}\n\t\trenderOptions.AddFuncSetter(ngingMW.ErrorPageFunc)\n\t\trenderOptions.ApplyTo(e, event.BackendTmplMgr)\n\t\t\/\/RendererDo(renderOptions.Renderer())\n\t\temitter.DefaultCondEmitter.On(`clearCache`, events.Callback(func(_ events.Event) error {\n\t\t\tlog.Debug(`clear: Backend Template Object Cache`)\n\t\t\trenderOptions.Renderer().ClearCache()\n\t\t\treturn nil\n\t\t}))\n\t\te.Get(`\/favicon.ico`, event.FaviconHandler)\n\t\tif event.Develop {\n\t\t\tpprof.Wrap(e)\n\t\t}\n\t\tInitialize()\n\t})\n}\n<commit_msg>improved<commit_after>\/*\n Nging is a toolbox for webmasters\n Copyright (C) 2018-present Wenhui Shen <swh@admpub.com>\n\n This program is free software: you can redistribute it and\/or modify\n it under the terms of the GNU Affero General Public License as published\n by the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU Affero General Public License for more details.\n\n You should have received a copy of the GNU Affero General Public License\n along with this program. If not, see <https:\/\/www.gnu.org\/licenses\/>.\n*\/\n\npackage backend\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/webx-top\/com\"\n\t\"github.com\/webx-top\/echo\"\n\t\"github.com\/webx-top\/echo\/handler\/pprof\"\n\t\"github.com\/webx-top\/echo\/middleware\"\n\t\"github.com\/webx-top\/echo\/middleware\/language\"\n\t\"github.com\/webx-top\/echo\/middleware\/render\"\n\t\"github.com\/webx-top\/echo\/middleware\/render\/driver\"\n\t\"github.com\/webx-top\/echo\/middleware\/session\"\n\t\"github.com\/webx-top\/echo\/subdomains\"\n\n\t\"github.com\/admpub\/events\"\n\t\"github.com\/admpub\/events\/emitter\"\n\t\"github.com\/admpub\/log\"\n\t\"github.com\/admpub\/nging\/application\/cmd\/event\"\n\t\"github.com\/admpub\/nging\/application\/handler\"\n\t\"github.com\/admpub\/nging\/application\/library\/config\"\n\tngingMW \"github.com\/admpub\/nging\/application\/middleware\"\n)\n\nconst (\n\tDefaultTemplateDir = `.\/template\/backend`\n\tDefaultAssetsDir = `.\/public\/assets`\n\tDefaultAssetsURLPath = `\/public\/assets\/backend`\n)\n\nvar (\n\tTemplateDir = DefaultTemplateDir \/\/模板文件夹\n\tAssetsDir = DefaultAssetsDir \/\/素材文件夹\n\tAssetsURLPath = DefaultAssetsURLPath\n\tDefaultAvatarURL = AssetsURLPath + `\/images\/user_128.png`\n\tRendererDo = func(driver.Driver) {}\n\tParseStrings = map[string]string{}\n\tParseStringFuncs = map[string]func() string{}\n\tSkippedGzipPaths = map[string]bool{}\n\tGzipSkipper = func(skippedPaths map[string]bool) func(c echo.Context) bool {\n\t\treturn func(c echo.Context) bool {\n\t\t\tupath := c.Request().URL().Path()\n\t\t\tskipped, _ := skippedPaths[upath]\n\t\t\treturn skipped\n\t\t}\n\t}\n\tDefaultLocalHostNames = []string{\n\t\t`127.0.0.1`, `localhost`,\n\t}\n)\n\nfunc MakeSubdomains(domain string, appends []string) string {\n\tvar prefix string\n\tif pos := strings.Index(domain, `:\/\/`); pos > 0 {\n\t\tpos += 3\n\t\tprefix = domain[:pos]\n\t\tif pos+1 <= len(domain) {\n\t\t\tdomain = domain[pos+1:]\n\t\t} else {\n\t\t\tdomain = ``\n\t\t}\n\t}\n\tdomain, _ = com.SplitHost(domain)\n\tport := fmt.Sprintf(\"%d\", config.DefaultCLIConfig.Port)\n\tnewDomain := prefix + domain + `,` + domain + `:` + port\n\tfor _, hostName := range appends {\n\t\tif hostName == domain {\n\t\t\tcontinue\n\t\t}\n\t\tnewDomain += `,` + hostName + `:` + port\n\t}\n\treturn newDomain\n}\n\nfunc init() {\n\techo.Set(`BackendPrefix`, handler.BackendPrefix)\n\techo.Set(`GlobalPrefix`, handler.GlobalPrefix)\n\tevent.OnStart(0, func() {\n\t\thandler.GlobalPrefix = echo.String(`GlobalPrefix`)\n\t\thandler.BackendPrefix = echo.String(`BackendPrefix`)\n\t\thandler.FrontendPrefix = echo.String(`FrontendPrefix`)\n\t\tngingMW.DefaultAvatarURL = DefaultAssetsURLPath\n\t\te := handler.Echo()\n\t\te.SetPrefix(handler.GlobalPrefix)\n\t\thandler.SetRootGroup(handler.BackendPrefix)\n\t\tsubdomains.Default.Default = `backend`\n\t\tdomainName := subdomains.Default.Default\n\t\tbackendDomain := config.DefaultCLIConfig.BackendDomain\n\t\tif len(backendDomain) > 0 {\n\t\t\tdomainName += `@` + MakeSubdomains(backendDomain, DefaultLocalHostNames)\n\t\t}\n\t\tsubdomains.Default.Add(domainName, e)\n\n\t\te.Use(middleware.Log(), middleware.Recover())\n\t\tskippedGzipPaths := map[string]bool{\n\t\t\te.Prefix() + `\/server\/cmdSend\/info`: true,\n\t\t\te.Prefix() + `\/download\/progress\/info`: true,\n\t\t\te.Prefix() + `\/debug\/pprof\/`: true,\n\t\t\te.Prefix() + `\/debug\/pprof\/allocs`: true,\n\t\t\te.Prefix() + `\/debug\/pprof\/block`: true,\n\t\t\te.Prefix() + `\/debug\/pprof\/cmdline`: true,\n\t\t\te.Prefix() + `\/debug\/pprof\/goroutine`: true,\n\t\t\te.Prefix() + `\/debug\/pprof\/heap`: true,\n\t\t\te.Prefix() + `\/debug\/pprof\/mutex`: true,\n\t\t\te.Prefix() + `\/debug\/pprof\/profile`: true,\n\t\t\te.Prefix() + `\/debug\/pprof\/threadcreate`: true,\n\t\t\te.Prefix() + `\/debug\/pprof\/trace`: true,\n\t\t}\n\t\tfor k, v := range skippedGzipPaths {\n\t\t\tSkippedGzipPaths[k] = v\n\t\t}\n\t\te.Use(middleware.Gzip(&middleware.GzipConfig{\n\t\t\tSkipper: GzipSkipper(SkippedGzipPaths),\n\t\t}))\n\t\te.Use(func(h echo.Handler) echo.HandlerFunc {\n\t\t\treturn func(c echo.Context) error {\n\t\t\t\tc.Response().Header().Set(`Server`, event.SofewareName+`\/`+config.Version.Number)\n\t\t\t\treturn h.Handle(c)\n\t\t\t}\n\t\t})\n\n\t\t\/\/ 注册静态资源文件(网站素材文件)\n\t\te.Use(event.StaticMW) \/\/打包的静态资源\n\t\t\/\/ 上传文件资源(改到manager中用File函数实现)\n\t\t\/\/ e.Use(middleware.Static(&middleware.StaticOptions{\n\t\t\/\/ \tRoot: helper.UploadDir,\n\t\t\/\/ \tPath: helper.UploadURLPath,\n\t\t\/\/ }))\n\n\t\t\/\/ 启用session\n\t\te.Use(session.Middleware(config.SessionOptions))\n\t\t\/\/ 启用多语言支持\n\t\tconfig.DefaultConfig.Language.SetFSFunc(event.LangFSFunc)\n\t\te.Use(language.New(&config.DefaultConfig.Language).Middleware())\n\n\t\t\/\/ 启用Validation\n\t\te.Use(middleware.Validate(echo.NewValidation))\n\n\t\t\/\/ 事物支持\n\t\te.Use(ngingMW.Tansaction())\n\t\t\/\/ 注册模板引擎\n\t\trenderOptions := &render.Config{\n\t\t\tTmplDir: TemplateDir,\n\t\t\tEngine: `standard`,\n\t\t\tParseStrings: map[string]string{\n\t\t\t\t`__ASSETS__`: AssetsURLPath,\n\t\t\t\t`__TMPL__`: TemplateDir,\n\t\t\t},\n\t\t\tParseStringFuncs: map[string]func() string{\n\t\t\t\t`__BACKEND__`: func() string { return subdomains.Default.URL(handler.BackendPrefix, `backend`) },\n\t\t\t\t`__FRONTEND__`: func() string { return subdomains.Default.URL(handler.FrontendPrefix, `frontend`) },\n\t\t\t},\n\t\t\tDefaultHTTPErrorCode: http.StatusOK,\n\t\t\tReload: true,\n\t\t\tErrorPages: config.DefaultConfig.Sys.ErrorPages,\n\t\t}\n\t\tif ParseStrings != nil {\n\t\t\tfor key, val := range ParseStrings {\n\t\t\t\trenderOptions.ParseStrings[key] = val\n\t\t\t}\n\t\t}\n\t\tif ParseStringFuncs != nil {\n\t\t\tfor key, val := range ParseStringFuncs {\n\t\t\t\trenderOptions.ParseStringFuncs[key] = val\n\t\t\t}\n\t\t}\n\t\tif RendererDo != nil {\n\t\t\trenderOptions.AddRendererDo(RendererDo)\n\t\t}\n\t\trenderOptions.AddFuncSetter(ngingMW.ErrorPageFunc)\n\t\trenderOptions.ApplyTo(e, event.BackendTmplMgr)\n\t\t\/\/RendererDo(renderOptions.Renderer())\n\t\temitter.DefaultCondEmitter.On(`clearCache`, events.Callback(func(_ events.Event) error {\n\t\t\tlog.Debug(`clear: Backend Template Object Cache`)\n\t\t\trenderOptions.Renderer().ClearCache()\n\t\t\treturn nil\n\t\t}))\n\t\te.Get(`\/favicon.ico`, event.FaviconHandler)\n\t\tif event.Develop {\n\t\t\tpprof.Wrap(e)\n\t\t}\n\t\tInitialize()\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package writesplitter\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"testing\"\n\t\"io\"\n)\n\ntype mockF struct {\n\t*bytes.Buffer\n}\n\nfunc (m *mockF) Close() error {\n\tm.Reset()\n\treturn nil\n}\n\nfunc TestWriteNoSplit(t *testing.T) {\n\tcreateFile = func(name string) (io.WriteCloser, error) {\n\t\treturn &mockF{&bytes.Buffer{}}, nil\n\t}\n\tws := LineSplitter(5, \"\")\n\n\tmockD := bytes.NewBufferString(`Lorem ipsum dolor sit amet consectetur adipiscing elit\nCras in lacinia eros Aliquam aliquet sapien a\nUt mauris orci varius et cursus sed blandit\nMauris iaculis ac magna non tincidunt In rhoncus\nPellentesque quis erat quis ex aliquam porttitor Vestibulum\nPellentesque nec mollis nibh interdum eleifend nisl Donec\nid commodo urna sed tempus mi Vestibulum facilisis\nimperdiet dolor sed sollicitudin Proin in lectus sed`)\n\n\texpected := mockD.Len() - 7 \/\/ we do *not* expect the newlines\n\ttotal := 0\n\n\tscanner := bufio.NewScanner(mockD)\n\tfor scanner.Scan() {\n\n\t\tn, _ := ws.Write(scanner.Bytes())\n\t\ttotal += n\n\n\t\tif err := scanner.Err(); err != nil {\n\t\t\tt.Error(\"scanner error\", err)\n\t\t}\n\t}\n\n\tif expected != total {\n\t\tt.Error(\"len() mismatch: expected\", expected, \"actual\", total)\n\t}\n\n}\n\nfunc TestWriteSplit(t *testing.T) {\n\tvar b bytes.Buffer \/\/ pass in the buffer to allow for inspection\n\n\tcreateFile = func(name string) (io.WriteCloser, error) {\n\t\treturn &mockF{&b}, nil\n\t}\n\n\tws := ByteSplitter(255, \"\")\n\n\tmockD := bytes.NewBufferString(`Lorem ipsum dolor sit amet consectetur adipiscing elit\nCras in lacinia eros Aliquam aliquet sapien a\nUt mauris orci varius et cursus sed blandit\nMauris iaculis ac magna non tincidunt In rhoncus\nPellentesque quis erat quis ex aliquam porttitor Vestibulum\nPellentesque nec mollis nibh interdum eleifend nisl Donec\nid commodo urna sed tempus mi Vestibulum facilisis\nimperdiet dolor sed sollicitudin Proin in lectus sed`)\n\n\texpected := 102 \/\/ only the last three lines less two newlines (\\n)\n\ttotal := 0\n\n\tscanner := bufio.NewScanner(mockD)\n\tfor scanner.Scan() {\n\n\t\tn, _ := ws.Write(scanner.Bytes())\n\t\ttotal += n\n\n\t\tif err := scanner.Err(); err != nil {\n\t\t\tt.Error(\"scanner error\", err)\n\t\t}\n\t}\n\n\tif expected != b.Len() {\n\t\tt.Error(\"len() mismatch: expected\", expected, \"actual\", b.Len())\n\t}\n\n}\n\nfunc TestErrorOnCreate(t *testing.T) {\n\tcreateFile = func(name string) (io.WriteCloser, error) {\n\t\treturn nil, errors.New(\"This is an error\")\n\t}\n\tws := LineSplitter(5, \"\")\n\n\tmockD := bytes.NewBufferString(`Lorem ipsum dolor sit amet consectetur adipiscing elit\nCras in lacinia eros Aliquam aliquet sapien a\nUt mauris orci varius et cursus sed blandit\nMauris iaculis ac magna non tincidunt In rhoncus\nPellentesque quis erat quis ex aliquam porttitor Vestibulum\nPellentesque nec mollis nibh interdum eleifend nisl Donec\nid commodo urna sed tempus mi Vestibulum facilisis\nimperdiet dolor sed sollicitudin Proin in lectus sed`)\n\n\texpected := mockD.Len() - 7 \/\/ we do *not* expect the newlines\n\ttotal := 0\n\n\tvar n int\n\tvar err error\n\n\tscanner := bufio.NewScanner(mockD)\n\tfor scanner.Scan() {\n\n\t\tn, err = ws.Write(scanner.Bytes())\n\t\tif err == nil {\n\t\t\tt.Fail()\n\t\t}\n\t\ttotal += n\n\n\t\tif err = scanner.Err(); err != nil {\n\t\t\tt.Error(\"scanner error\", err)\n\t\t}\n\t}\n\n\tif expected == total {\n\t\tt.Error(\"len() mismatch: expected\", expected, \"actual\", total)\n\t}\n\n}\n<commit_msg>test was flawed<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Go Cloud Development Kit Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage azurekeyvault\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"log\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/Azure\/azure-sdk-for-go\/services\/keyvault\/v7.0\/keyvault\"\n\t\"github.com\/Azure\/go-autorest\/autorest\"\n\t\"github.com\/Azure\/go-autorest\/autorest\/azure\"\n\t\"gocloud.dev\/internal\/testing\/setup\"\n\t\"gocloud.dev\/secrets\"\n\t\"gocloud.dev\/secrets\/driver\"\n\t\"gocloud.dev\/secrets\/drivertest\"\n)\n\n\/\/ Prerequisites for --record mode\n\/\/\n\/\/ 1. Sign-in to your Azure Subscription at http:\/\/portal.azure.com.\n\/\/\n\/\/ 2. Create a KeyVault, see https:\/\/docs.microsoft.com\/en-us\/azure\/key-vault\/quick-create-portal.\n\/\/\n\/\/ 3. Choose an authentication model. This test uses Service Principal, see https:\/\/docs.microsoft.com\/en-us\/rest\/api\/azure\/index#register-your-client-application-with-azure-ad.\n\/\/ For documentation on acceptable auth models, see https:\/\/docs.microsoft.com\/en-us\/azure\/key-vault\/key-vault-whatis.\n\/\/\n\/\/ 4. Set your environment variables depending on the auth model selection. Modify helper initEnv() as needed.\n\/\/ For Service Principal, please set the following, see https:\/\/docs.microsoft.com\/en-us\/go\/azure\/azure-sdk-go-authorization.\n\/\/ - AZURE_TENANT_ID (The ID for the Active Directory tenant that the service principal belongs to.)\n\/\/ - AZURE_CLIENT_ID (The name or ID of the service principal.)\n\/\/ - AZURE_CLIENT_SECRET (The secret associated with the service principal.)\n\/\/ - AZURE_ENVIRONMENT\n\/\/ - AZURE_AD_RESOURCE to https:\/\/vault.azure.net\n\/\/\n\/\/ 5. Create\/Import a Key. This can be done in the Azure Portal or by code.\n\/\/\n\/\/ 6. Update constants below to match your Azure KeyVault settings.\n\nconst (\n\tkeyVaultName = \"go-cdk\"\n\tkeyID1 = \"test1\"\n\tkeyID2 = \"test2\"\n\t\/\/ Important: an empty key version will default to 'Current Version' in Azure Key Vault.\n\t\/\/ See link below for more information on versioning\n\t\/\/ https:\/\/docs.microsoft.com\/en-us\/azure\/key-vault\/about-keys-secrets-and-certificates\n\tkeyVersion = \"\"\n\talgorithm = string(keyvault.RSAOAEP256)\n)\n\ntype harness struct {\n\tclient *keyvault.BaseClient\n\tclose func()\n}\n\nfunc (h *harness) MakeDriver(ctx context.Context) (driver.Keeper, driver.Keeper, error) {\n\tkeeper1 := keeper{\n\t\tclient: h.client,\n\t\tkeyVaultName: keyVaultName,\n\t\tkeyName: keyID1,\n\t\tkeyVersion: keyVersion,\n\t\toptions: &KeeperOptions{Algorithm: algorithm},\n\t}\n\n\tkeeper2 := keeper{\n\t\tclient: h.client,\n\t\tkeyVaultName: keyVaultName,\n\t\tkeyName: keyID2,\n\t\tkeyVersion: keyVersion,\n\t\toptions: &KeeperOptions{Algorithm: algorithm},\n\t}\n\n\treturn &keeper1, &keeper2, nil\n}\n\nfunc (h *harness) Close() {\n\th.close()\n}\n\nfunc newHarness(ctx context.Context, t *testing.T) (drivertest.Harness, error) {\n\t\/\/ Use initEnv to setup your environment variables.\n\tif *setup.Record {\n\t\tinitEnv()\n\t}\n\n\tdone, sender := setup.NewAzureKeyVaultTestClient(ctx, t)\n\tclient, err := Dial()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient.Sender = sender\n\n\t\/\/ Use a null authorizer for replay mode.\n\tif !*setup.Record {\n\t\tna := &autorest.NullAuthorizer{}\n\t\tclient.Authorizer = na\n\t}\n\n\treturn &harness{\n\t\tclient: client,\n\t\tclose: done,\n\t}, nil\n}\n\nfunc initEnv() {\n\tenv, err := azure.EnvironmentFromName(\"AZUREPUBLICCLOUD\")\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\t\/\/ For Client Credentials authorization, set AZURE_TENANT_ID, AZURE_CLIENT_ID, AZURE_CLIENT_SECRET\n\t\/\/ For Client Certificate and Azure Managed Service Identity, see doc below for help\n\t\/\/ https:\/\/github.com\/Azure\/azure-sdk-for-go\n\n\tif os.Getenv(\"AZURE_TENANT_ID\") == \"\" ||\n\t\tos.Getenv(\"AZURE_CLIENT_ID\") == \"\" ||\n\t\tos.Getenv(\"AZURE_CLIENT_SECRET\") == \"\" {\n\t\tlog.Fatal(\"Missing environment for recording tests, set AZURE_TENANT_ID, AZURE_CLIENT_ID and AZURE_CLIENT_SECRET\")\n\t}\n\n\tos.Setenv(\"AZURE_ENVIRONMENT\", env.Name)\n\n\tvaultEndpoint := strings.TrimSuffix(env.KeyVaultEndpoint, \"\/\")\n\tos.Setenv(\"AZURE_AD_RESOURCE\", vaultEndpoint)\n}\n\nfunc TestConformance(t *testing.T) {\n\tdrivertest.RunConformanceTests(t, newHarness, []drivertest.AsTest{verifyAs{}})\n}\n\ntype verifyAs struct{}\n\nfunc (v verifyAs) Name() string {\n\treturn \"verify As function\"\n}\n\nfunc (v verifyAs) ErrorCheck(k *secrets.Keeper, err error) error {\n\tvar e autorest.DetailedError\n\tif !k.ErrorAs(err, &e) {\n\t\treturn errors.New(\"Keeper.ErrorAs failed\")\n\t}\n\treturn nil\n}\n\n\/\/ Key Vault-specific tests.\n\nfunc TestNoConnectionError(t *testing.T) {\n\tclient := keyvault.NewWithoutDefaults()\n\tk, err := NewKeeper(&client, keyVaultName, keyID1, keyVersion, &KeeperOptions{Algorithm: algorithm})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := k.Encrypt(context.Background(), []byte(\"secrets\")); err == nil {\n\t\tt.Error(\"Encrypt: got nil, want no connection error\")\n\t}\n}\n\nfunc TestAlgorithmNotProvided(t *testing.T) {\n\tclient := keyvault.NewWithoutDefaults()\n\tif _, err := NewKeeper(&client, keyVaultName, keyID1, keyVersion, nil); err == nil {\n\t\tt.Error(\"NewKeeper with no algorithm: got nil, want no algorithm error\")\n\t}\n}\n\nfunc TestKeyInfoFromURL(t *testing.T) {\n\ttests := []struct {\n\t\tURL string\n\t\tWantErr bool\n\t\tWantVault string\n\t\tWantKey string\n\t\tWantVersion string\n\t}{\n\t\t{\"azurekeyvault:\/\/vault1\/key1\/version1\", false, \"vault1\", \"key1\", \"version1\"},\n\t\t{\"azurekeyvault:\/\/vault2\/key2\/version2\", false, \"vault2\", \"key2\", \"version2\"},\n\t\t{\"azurekeyvault:\/\/vault3\/key3\", false, \"vault3\", \"key3\", \"\"},\n\t\t{\"azurekeyvault:\/\/vault\/key\/version\/extra\", true, \"\", \"\", \"\"},\n\t\t{\"azurekeyvault:\/\/vault\", true, \"\", \"\", \"\"},\n\t}\n\tfor _, test := range tests {\n\t\tu, err := url.Parse(test.URL)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tgotVault, gotKey, gotVersion, gotErr := keyInfoFromURL(u)\n\t\tif (gotErr != nil) != test.WantErr {\n\t\t\tt.Errorf(\"%s: got error %v, want error %v\", test.URL, gotErr, test.WantErr)\n\t\t}\n\t\tif gotErr != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif gotVault != test.WantVault {\n\t\t\tt.Errorf(\"%s: got vault %q want %q\", test.URL, gotVault, test.WantVault)\n\t\t}\n\t\tif gotKey != test.WantKey {\n\t\t\tt.Errorf(\"%s: got key %q want %q\", test.URL, gotKey, test.WantKey)\n\t\t}\n\t\tif gotVersion != test.WantVersion {\n\t\t\tt.Errorf(\"%s: got version %q want %q\", test.URL, gotVersion, test.WantVersion)\n\t\t}\n\t}\n}\n\nfunc TestOpenKeeper(t *testing.T) {\n\ttests := []struct {\n\t\tURL string\n\t\tWantErr bool\n\t}{\n\t\t\/\/ Missing algorithm query param.\n\t\t{\"azurekeyvault:\/\/mykeyvault\/mykey\/myversion\", true},\n\t\t\/\/ Invalid query parameter.\n\t\t{\"azurekeyvault:\/\/mykeyvault\/mykey\/myversion?algorithm=RSA-OAEP-256¶m=value\", true},\n\t\t\/\/ Empty host.\n\t\t{\"azurekeyvault:\/\/\/mykey\/myversion?algorithm=RSA-OAEP-256\", true},\n\t\t\/\/ Path has 1 elements (no version) -> OK.\n\t\t{\"azurekeyvault:\/\/mykeyvault\/mykey?algorithm=RSA-OAEP-256\", false},\n\t\t\/\/ Path has > 2 elements.\n\t\t{\"azurekeyvault:\/\/mykeyvault\/mykey\/myversion\/extra?algorithm=RSA-OAEP-256\", true},\n\t\t\/\/ Path has empty first element.\n\t\t{\"azurekeyvault:\/\/mykeyvault\/\/myversion?algorithm=RSA-OAEP-256\", true},\n\t\t\/\/ OK.\n\t\t{\"azurekeyvault:\/\/mykeyvault\/mykey\/myversion?algorithm=RSA-OAEP-256\", false},\n\t}\n\n\tctx := context.Background()\n\tfor _, test := range tests {\n\t\t_, err := secrets.OpenKeeper(ctx, test.URL)\n\t\tif (err != nil) != test.WantErr {\n\t\t\tt.Errorf(\"%s: got error %v, want error %v\", test.URL, err, test.WantErr)\n\t\t}\n\t}\n}\n<commit_msg>secrets\/azurekeyvault: improve docs for how to run -record tests (#1564)<commit_after>\/\/ Copyright 2019 The Go Cloud Development Kit Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage azurekeyvault\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"log\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/Azure\/azure-sdk-for-go\/services\/keyvault\/v7.0\/keyvault\"\n\t\"github.com\/Azure\/go-autorest\/autorest\"\n\t\"github.com\/Azure\/go-autorest\/autorest\/azure\"\n\t\"gocloud.dev\/internal\/testing\/setup\"\n\t\"gocloud.dev\/secrets\"\n\t\"gocloud.dev\/secrets\/driver\"\n\t\"gocloud.dev\/secrets\/drivertest\"\n)\n\n\/\/ Prerequisites for --record mode\n\/\/\n\/\/ 1. Sign-in to your Azure Subscription at http:\/\/portal.azure.com.\n\/\/\n\/\/ 2. Create a KeyVault, see https:\/\/docs.microsoft.com\/en-us\/azure\/key-vault\/quick-create-portal.\n\/\/\n\/\/ 3. Choose an authentication model. This test uses Service Principal, see https:\/\/docs.microsoft.com\/en-us\/rest\/api\/azure\/index#register-your-client-application-with-azure-ad.\n\/\/ For documentation on acceptable auth models, see https:\/\/docs.microsoft.com\/en-us\/azure\/key-vault\/key-vault-whatis.\n\/\/\n\/\/ 4. Set your environment variables depending on the auth model selection. Modify helper initEnv() as needed.\n\/\/ For Service Principal, please set the following, see https:\/\/docs.microsoft.com\/en-us\/go\/azure\/azure-sdk-go-authorization.\n\/\/\n\/\/ - AZURE_TENANT_ID: Go to \"Azure Active Directory\", then \"Properties\". The\n\/\/ \"Directory ID\" property is your AZURE_TENANT_ID.\n\/\/ - AZURE_CLIENT_ID: Go to \"Azure Active Directory\", then \"App Registrations\",\n\/\/ then \"View all applications\". The \"Application ID\" column shows your\n\/\/ AZURE_CLIENT_ID.\n\/\/ - AZURE_CLIENT_SECRET: Click on the application from the previous step,\n\/\/ then \"Settings\" and then \"Keys\". Create a key and use it as your\n\/\/ AZURE_CLIENT_SECRET. Make sure to save the value as it's hidden after\n\/\/ the initial creation.\n\/\/ - AZURE_ENVIRONMENT: (optional).\n\/\/ - AZURE_AD_RESOURCE: (optional).\n\/\/\n\/\/ 5. Create\/Import a Key. This can be done in the Azure Portal under \"Key vaults\".\n\/\/\n\/\/ 6. Update constants below to match your Azure KeyVault settings.\n\nconst (\n\tkeyVaultName = \"go-cdk\"\n\tkeyID1 = \"test1\"\n\tkeyID2 = \"test2\"\n\t\/\/ Important: an empty key version will default to 'Current Version' in Azure Key Vault.\n\t\/\/ See link below for more information on versioning\n\t\/\/ https:\/\/docs.microsoft.com\/en-us\/azure\/key-vault\/about-keys-secrets-and-certificates\n\tkeyVersion = \"\"\n\talgorithm = string(keyvault.RSAOAEP256)\n)\n\ntype harness struct {\n\tclient *keyvault.BaseClient\n\tclose func()\n}\n\nfunc (h *harness) MakeDriver(ctx context.Context) (driver.Keeper, driver.Keeper, error) {\n\tkeeper1 := keeper{\n\t\tclient: h.client,\n\t\tkeyVaultName: keyVaultName,\n\t\tkeyName: keyID1,\n\t\tkeyVersion: keyVersion,\n\t\toptions: &KeeperOptions{Algorithm: algorithm},\n\t}\n\n\tkeeper2 := keeper{\n\t\tclient: h.client,\n\t\tkeyVaultName: keyVaultName,\n\t\tkeyName: keyID2,\n\t\tkeyVersion: keyVersion,\n\t\toptions: &KeeperOptions{Algorithm: algorithm},\n\t}\n\n\treturn &keeper1, &keeper2, nil\n}\n\nfunc (h *harness) Close() {\n\th.close()\n}\n\nfunc newHarness(ctx context.Context, t *testing.T) (drivertest.Harness, error) {\n\t\/\/ Use initEnv to setup your environment variables.\n\tif *setup.Record {\n\t\tinitEnv()\n\t}\n\n\tdone, sender := setup.NewAzureKeyVaultTestClient(ctx, t)\n\tclient, err := Dial()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient.Sender = sender\n\n\t\/\/ Use a null authorizer for replay mode.\n\tif !*setup.Record {\n\t\tna := &autorest.NullAuthorizer{}\n\t\tclient.Authorizer = na\n\t}\n\n\treturn &harness{\n\t\tclient: client,\n\t\tclose: done,\n\t}, nil\n}\n\nfunc initEnv() {\n\tenv, err := azure.EnvironmentFromName(\"AZUREPUBLICCLOUD\")\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\t\/\/ For Client Credentials authorization, set AZURE_TENANT_ID, AZURE_CLIENT_ID, AZURE_CLIENT_SECRET\n\t\/\/ For Client Certificate and Azure Managed Service Identity, see doc below for help\n\t\/\/ https:\/\/github.com\/Azure\/azure-sdk-for-go\n\n\tif os.Getenv(\"AZURE_TENANT_ID\") == \"\" ||\n\t\tos.Getenv(\"AZURE_CLIENT_ID\") == \"\" ||\n\t\tos.Getenv(\"AZURE_CLIENT_SECRET\") == \"\" {\n\t\tlog.Fatal(\"Missing environment for recording tests, set AZURE_TENANT_ID, AZURE_CLIENT_ID and AZURE_CLIENT_SECRET\")\n\t}\n\n\tos.Setenv(\"AZURE_ENVIRONMENT\", env.Name)\n\n\tvaultEndpoint := strings.TrimSuffix(env.KeyVaultEndpoint, \"\/\")\n\tos.Setenv(\"AZURE_AD_RESOURCE\", vaultEndpoint)\n}\n\nfunc TestConformance(t *testing.T) {\n\tdrivertest.RunConformanceTests(t, newHarness, []drivertest.AsTest{verifyAs{}})\n}\n\ntype verifyAs struct{}\n\nfunc (v verifyAs) Name() string {\n\treturn \"verify As function\"\n}\n\nfunc (v verifyAs) ErrorCheck(k *secrets.Keeper, err error) error {\n\tvar e autorest.DetailedError\n\tif !k.ErrorAs(err, &e) {\n\t\treturn errors.New(\"Keeper.ErrorAs failed\")\n\t}\n\treturn nil\n}\n\n\/\/ Key Vault-specific tests.\n\nfunc TestNoConnectionError(t *testing.T) {\n\tclient := keyvault.NewWithoutDefaults()\n\tk, err := NewKeeper(&client, keyVaultName, keyID1, keyVersion, &KeeperOptions{Algorithm: algorithm})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := k.Encrypt(context.Background(), []byte(\"secrets\")); err == nil {\n\t\tt.Error(\"Encrypt: got nil, want no connection error\")\n\t}\n}\n\nfunc TestAlgorithmNotProvided(t *testing.T) {\n\tclient := keyvault.NewWithoutDefaults()\n\tif _, err := NewKeeper(&client, keyVaultName, keyID1, keyVersion, nil); err == nil {\n\t\tt.Error(\"NewKeeper with no algorithm: got nil, want no algorithm error\")\n\t}\n}\n\nfunc TestKeyInfoFromURL(t *testing.T) {\n\ttests := []struct {\n\t\tURL string\n\t\tWantErr bool\n\t\tWantVault string\n\t\tWantKey string\n\t\tWantVersion string\n\t}{\n\t\t{\"azurekeyvault:\/\/vault1\/key1\/version1\", false, \"vault1\", \"key1\", \"version1\"},\n\t\t{\"azurekeyvault:\/\/vault2\/key2\/version2\", false, \"vault2\", \"key2\", \"version2\"},\n\t\t{\"azurekeyvault:\/\/vault3\/key3\", false, \"vault3\", \"key3\", \"\"},\n\t\t{\"azurekeyvault:\/\/vault\/key\/version\/extra\", true, \"\", \"\", \"\"},\n\t\t{\"azurekeyvault:\/\/vault\", true, \"\", \"\", \"\"},\n\t}\n\tfor _, test := range tests {\n\t\tu, err := url.Parse(test.URL)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tgotVault, gotKey, gotVersion, gotErr := keyInfoFromURL(u)\n\t\tif (gotErr != nil) != test.WantErr {\n\t\t\tt.Errorf(\"%s: got error %v, want error %v\", test.URL, gotErr, test.WantErr)\n\t\t}\n\t\tif gotErr != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif gotVault != test.WantVault {\n\t\t\tt.Errorf(\"%s: got vault %q want %q\", test.URL, gotVault, test.WantVault)\n\t\t}\n\t\tif gotKey != test.WantKey {\n\t\t\tt.Errorf(\"%s: got key %q want %q\", test.URL, gotKey, test.WantKey)\n\t\t}\n\t\tif gotVersion != test.WantVersion {\n\t\t\tt.Errorf(\"%s: got version %q want %q\", test.URL, gotVersion, test.WantVersion)\n\t\t}\n\t}\n}\n\nfunc TestOpenKeeper(t *testing.T) {\n\ttests := []struct {\n\t\tURL string\n\t\tWantErr bool\n\t}{\n\t\t\/\/ Missing algorithm query param.\n\t\t{\"azurekeyvault:\/\/mykeyvault\/mykey\/myversion\", true},\n\t\t\/\/ Invalid query parameter.\n\t\t{\"azurekeyvault:\/\/mykeyvault\/mykey\/myversion?algorithm=RSA-OAEP-256¶m=value\", true},\n\t\t\/\/ Empty host.\n\t\t{\"azurekeyvault:\/\/\/mykey\/myversion?algorithm=RSA-OAEP-256\", true},\n\t\t\/\/ Path has 1 elements (no version) -> OK.\n\t\t{\"azurekeyvault:\/\/mykeyvault\/mykey?algorithm=RSA-OAEP-256\", false},\n\t\t\/\/ Path has > 2 elements.\n\t\t{\"azurekeyvault:\/\/mykeyvault\/mykey\/myversion\/extra?algorithm=RSA-OAEP-256\", true},\n\t\t\/\/ Path has empty first element.\n\t\t{\"azurekeyvault:\/\/mykeyvault\/\/myversion?algorithm=RSA-OAEP-256\", true},\n\t\t\/\/ OK.\n\t\t{\"azurekeyvault:\/\/mykeyvault\/mykey\/myversion?algorithm=RSA-OAEP-256\", false},\n\t}\n\n\tctx := context.Background()\n\tfor _, test := range tests {\n\t\t_, err := secrets.OpenKeeper(ctx, test.URL)\n\t\tif (err != nil) != test.WantErr {\n\t\t\tt.Errorf(\"%s: got error %v, want error %v\", test.URL, err, test.WantErr)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Added memory object types.<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Windows font locator implementation\n\npackage seqdiagram\n\nimport (\n \"os\"\n \"path\/filepath\"\n)\n\n\n\/\/ The font directory\n\/\/ TODO: Must not hard code to C:\\Windows\\Font\n\nvar winFontDirectory string = \"C:\\\\Windows\\\\Fonts\"\n\n\/\/ Desirable fonts\nvar ttfFonts = []string {\n \"calibri.ttf\",\n \"verdana.ttf\",\n \"arial.ttf\",\n}\n\n\/\/ Returns the first font found given the directory containing the true\n\/\/ type fonts.\nfunc locateWinTTFFont(ttfDir string) string {\n fonts := make([]string, 0)\n\n for _, fontName := range ttfFonts {\n path := filepath.Join(ttfDir, fontName)\n if stat, _ := os.Stat(path) ; (stat != nil) {\n fonts = append(fonts, path)\n }\n }\n return fonts\n}\n\n\/\/ Locates an appropriate font on Window\nfunc LocateFont() []string {\n return locateWinTTFFont(winFontDirectory)\n}\n<commit_msg>Fixed type error<commit_after>\/\/ Windows font locator implementation\n\npackage seqdiagram\n\nimport (\n \"os\"\n \"path\/filepath\"\n)\n\n\n\/\/ The font directory\n\/\/ TODO: Must not hard code to C:\\Windows\\Font\n\nvar winFontDirectory string = \"C:\\\\Windows\\\\Fonts\"\n\n\/\/ Desirable fonts\nvar ttfFonts = []string {\n \"calibri.ttf\",\n \"verdana.ttf\",\n \"arial.ttf\",\n}\n\n\/\/ Returns the first font found given the directory containing the true\n\/\/ type fonts.\nfunc locateWinTTFFont(ttfDir string) []string {\n fonts := make([]string, 0)\n\n for _, fontName := range ttfFonts {\n path := filepath.Join(ttfDir, fontName)\n if stat, _ := os.Stat(path) ; (stat != nil) {\n fonts = append(fonts, path)\n }\n }\n return fonts\n}\n\n\/\/ Locates an appropriate font on Window\nfunc LocateFont() []string {\n return locateWinTTFFont(winFontDirectory)\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/nikhan\/go-fetch\"\n\t\"github.com\/nytlabs\/st-core\/core\"\n\t\"github.com\/thejerf\/suture\"\n)\n\ntype Position struct {\n\tX float64 `json:\"x\"`\n\tY float64 `json:\"y\"`\n}\n\ntype ProtoBlock struct {\n\tLabel string `json:\"label\"`\n\tParent int `json:\"group\"`\n\tType string `json:\"type\"`\n\tPosition Position `json:\"position\"`\n}\n\ntype BlockLedger struct {\n\tLabel string `json:\"label\"`\n\tType string `json:\"type\"`\n\tId int `json:\"id\"`\n\tBlock *core.Block `json:\"-\"`\n\tParent *Group `json:\"-\"`\n\tToken suture.ServiceToken `json:\"-\"`\n\tComposition int `json:\"composition,omitempty\"`\n\tInputs []BlockLedgerInput `json:\"inputs\"`\n\tOutputs []core.Output `json:\"outputs\"`\n\tPosition Position `json:\"position\"`\n}\n\nfunc (bl *BlockLedger) GetID() int {\n\treturn bl.Id\n}\n\nfunc (bl *BlockLedger) GetParent() *Group {\n\treturn bl.Parent\n}\n\nfunc (bl *BlockLedger) SetParent(group *Group) {\n\tbl.Parent = group\n}\n\ntype BlockLedgerInput struct {\n\tName string `json:\"name,omitempty\"`\n\tType string `json:\"type\"`\n\tValue interface{} `json:\"value\"`\n\tC chan core.Message `json:\"-\"`\n}\n\nfunc (s *Server) ListBlocks() []BlockLedger {\n\tblocks := []BlockLedger{}\n\tfor _, b := range s.blocks {\n\t\tblocks = append(blocks, *b)\n\t}\n\treturn blocks\n}\n\nfunc (s *Server) BlockIndexHandler(w http.ResponseWriter, r *http.Request) {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n\tif err := json.NewEncoder(w).Encode(s.ListBlocks()); err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc (s *Server) BlockHandler(w http.ResponseWriter, r *http.Request) {\n}\nfunc (s *Server) BlockModifyPositionHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tids, ok := vars[\"id\"]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"no ID supplied\"})\n\t\treturn\n\t}\n\n\tid, err := strconv.Atoi(ids)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{err.Error()})\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"could not read request body\"})\n\t\treturn\n\t}\n\n\tvar p Position\n\terr = json.Unmarshal(body, &p)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"could not read JSON\"})\n\t\treturn\n\t}\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tb, ok := s.blocks[id]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"could not find block\"})\n\t\treturn\n\t}\n\n\tb.Position = p\n\n\tupdate := struct {\n\t\tPosition\n\t\tId int\n\t}{\n\t\tp,\n\t\tid,\n\t}\n\n\ts.websocketBroadcast(Update{Action: UPDATE, Type: BLOCK, Data: update})\n\tw.WriteHeader(http.StatusNoContent)\n}\n\nfunc (s *Server) CreateBlock(p ProtoBlock) (*BlockLedger, error) {\n\tblockSpec, ok := s.library[p.Type]\n\tif !ok {\n\t\treturn nil, errors.New(\"spec not found\")\n\t}\n\n\tblock := core.NewBlock(blockSpec)\n\n\tm := &BlockLedger{\n\t\tLabel: p.Label,\n\t\tPosition: p.Position,\n\t\tType: p.Type,\n\t\tBlock: block,\n\t\tId: s.GetNextID(),\n\t}\n\n\tis := m.Block.GetInputs()\n\n\t\/\/ may want to move this into actual block someday\n\tinputs := make([]BlockLedgerInput, len(is), len(is))\n\tfor i, v := range is {\n\t\tif q, ok := v.Value.(*fetch.Query); ok {\n\t\t\tinputs[i] = BlockLedgerInput{\n\t\t\t\tName: v.Name,\n\t\t\t\tType: \"fetch\",\n\t\t\t\tValue: q.String(),\n\t\t\t\tC: v.C,\n\t\t\t}\n\t\t} else {\n\t\t\tinputs[i] = BlockLedgerInput{\n\t\t\t\tName: v.Name,\n\t\t\t\tType: \"const\",\n\t\t\t\tValue: v.Value,\n\t\t\t\tC: v.C,\n\t\t\t}\n\t\t}\n\t}\n\n\tif _, ok := s.groups[p.Parent]; !ok {\n\t\treturn nil, errors.New(\"invalid group, could not create block\")\n\t}\n\n\tm.Token = s.supervisor.Add(block)\n\tm.Inputs = inputs\n\tm.Outputs = block.GetOutputs()\n\ts.blocks[m.Id] = m\n\n\ts.websocketBroadcast(Update{Action: CREATE, Type: BLOCK, Data: m})\n\n\terr := s.AddChildToGroup(p.Parent, m)\n\tif err != nil {\n\t\treturn nil, err\n\n\t}\n\n\treturn m, nil\n}\n\n\/\/ CreateBlockHandler responds to a POST request to instantiate a new block and add it to the Server.\nfunc (s *Server) BlockCreateHandler(w http.ResponseWriter, r *http.Request) {\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"could not read request body\"})\n\t\treturn\n\t}\n\n\tvar m ProtoBlock\n\terr = json.Unmarshal(body, &m)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"no ID supplied\"})\n\t\treturn\n\t}\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tb, err := s.CreateBlock(m)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{err.Error()})\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\twriteJSON(w, b)\n}\n\nfunc (s *Server) BlockModifyRouteHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tids, ok := vars[\"id\"]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"no ID supplied\"})\n\t\treturn\n\t}\n\n\tid, err := strconv.Atoi(ids)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{err.Error()})\n\t\treturn\n\t}\n\n\troutes, ok := vars[\"index\"]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"no route index supplied\"})\n\t\treturn\n\t}\n\n\troute, err := strconv.Atoi(routes)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{err.Error()})\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"could not read request body\"})\n\t\treturn\n\t}\n\n\tvar v BlockLedgerInput\n\terr = json.Unmarshal(body, &v)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"could not unmarshal value\"})\n\t\treturn\n\t}\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\terr = s.ModifyBlockRoute(id, route, v)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{err.Error()})\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusNoContent)\n}\n\nfunc (s *Server) ModifyBlockRoute(id int, route int, v BlockLedgerInput) error {\n\tb, ok := s.blocks[id]\n\tif !ok {\n\t\treturn errors.New(\"could not find block\")\n\t}\n\n\t\/\/ again maybe this type should be native to block under core.\n\tvar m interface{}\n\tswitch v.Type {\n\tcase \"fetch\":\n\t\tqueryString, ok := v.Value.(string)\n\t\tif !ok {\n\t\t\treturn errors.New(\"fetch is not string\")\n\t\t}\n\n\t\tfo, err := fetch.Parse(queryString)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tm = fo\n\tcase \"const\":\n\t\tm = v.Value\n\tdefault:\n\t\treturn errors.New(\"no value or query specified\")\n\t}\n\n\terr := b.Block.SetInput(core.RouteIndex(route), m)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.blocks[id].Inputs[route].Type = v.Type\n\ts.blocks[id].Inputs[route].Value = m\n\n\tupdate := struct {\n\t\tBlockLedgerInput\n\t\tId int `json:\"id\"`\n\t\tinput int `json:\"input\"`\n\t}{\n\t\tv, id, route,\n\t}\n\n\ts.websocketBroadcast(Update{Action: UPDATE, Type: BLOCK, Data: update})\n\treturn nil\n}\n\nfunc (s *Server) BlockModifyNameHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tids, ok := vars[\"id\"]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"no ID supplied\"})\n\t\treturn\n\t}\n\n\tid, err := strconv.Atoi(ids)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{err.Error()})\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"could not read request body\"})\n\t\treturn\n\t}\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\t_, ok = s.blocks[id]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"block not found\"})\n\t\treturn\n\t}\n\n\tvar label string\n\terr = json.Unmarshal(body, &label)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"could not unmarshal value\"})\n\t\treturn\n\t}\n\n\ts.blocks[id].Label = label\n\n\tupdate := struct {\n\t\tId int `json:\"id\"`\n\t\tLabel string `json:\"label\"`\n\t}{\n\t\tid, label,\n\t}\n\n\ts.websocketBroadcast(Update{Action: UPDATE, Type: BLOCK, Data: update})\n\tw.WriteHeader(http.StatusNoContent)\n}\n\nfunc (s *Server) DeleteBlock(id int) error {\n\tb, ok := s.blocks[id]\n\tif !ok {\n\t\treturn errors.New(\"block not found\")\n\t}\n\n\tdeleteSet := make(map[int]struct{})\n\n\t\/\/ build a set of connections that we may need to delete\n\t\/\/ we need to panic here because if any error is thrown we are in huge trouble\n\t\/\/ any panic indicates that our server connection ledger is no longer true\n\tfor _, c := range s.connections {\n\t\tif c.Target.Id == id || c.Source.Id == id {\n\t\t\tdeleteSet[c.Id] = struct{}{}\n\t\t}\n\t}\n\n\t\/\/ delete the connections that involve this block\n\tfor k, _ := range deleteSet {\n\t\ts.DeleteConnection(k)\n\t}\n\n\t\/\/ remove from group\n\ts.DetachChild(b)\n\n\t\/\/ stop and delete the block\n\ts.supervisor.Remove(b.Token)\n\ts.websocketBroadcast(Update{Action: DELETE, Type: BLOCK, Data: s.blocks[id]})\n\tdelete(s.blocks, id)\n\treturn nil\n}\n\nfunc (s *Server) BlockDeleteHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tids, ok := vars[\"id\"]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"no ID supplied\"})\n\t\treturn\n\t}\n\n\tid, err := strconv.Atoi(ids)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{err.Error()})\n\t\treturn\n\t}\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\terr = s.DeleteBlock(id)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{err.Error()})\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusNoContent)\n\n}\n<commit_msg>adding GET \/block\/{id}<commit_after>package server\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/nikhan\/go-fetch\"\n\t\"github.com\/nytlabs\/st-core\/core\"\n\t\"github.com\/thejerf\/suture\"\n)\n\ntype Position struct {\n\tX float64 `json:\"x\"`\n\tY float64 `json:\"y\"`\n}\n\ntype ProtoBlock struct {\n\tLabel string `json:\"label\"`\n\tParent int `json:\"group\"`\n\tType string `json:\"type\"`\n\tPosition Position `json:\"position\"`\n}\n\ntype BlockLedger struct {\n\tLabel string `json:\"label\"`\n\tType string `json:\"type\"`\n\tId int `json:\"id\"`\n\tBlock *core.Block `json:\"-\"`\n\tParent *Group `json:\"-\"`\n\tToken suture.ServiceToken `json:\"-\"`\n\tComposition int `json:\"composition,omitempty\"`\n\tInputs []BlockLedgerInput `json:\"inputs\"`\n\tOutputs []core.Output `json:\"outputs\"`\n\tPosition Position `json:\"position\"`\n}\n\nfunc (bl *BlockLedger) GetID() int {\n\treturn bl.Id\n}\n\nfunc (bl *BlockLedger) GetParent() *Group {\n\treturn bl.Parent\n}\n\nfunc (bl *BlockLedger) SetParent(group *Group) {\n\tbl.Parent = group\n}\n\ntype BlockLedgerInput struct {\n\tName string `json:\"name,omitempty\"`\n\tType string `json:\"type\"`\n\tValue interface{} `json:\"value\"`\n\tC chan core.Message `json:\"-\"`\n}\n\nfunc (s *Server) ListBlocks() []BlockLedger {\n\tblocks := []BlockLedger{}\n\tfor _, b := range s.blocks {\n\t\tblocks = append(blocks, *b)\n\t}\n\treturn blocks\n}\n\nfunc (s *Server) BlockIndexHandler(w http.ResponseWriter, r *http.Request) {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n\tif err := json.NewEncoder(w).Encode(s.ListBlocks()); err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc (s *Server) BlockHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tids, ok := vars[\"id\"]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"no ID supplied\"})\n\t\treturn\n\t}\n\n\tid, err := strconv.Atoi(ids)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{err.Error()})\n\t\treturn\n\t}\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tb, ok := s.blocks[id]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"could not find block\"})\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\twriteJSON(w, b)\n\treturn\n}\n\nfunc (s *Server) BlockModifyPositionHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tids, ok := vars[\"id\"]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"no ID supplied\"})\n\t\treturn\n\t}\n\n\tid, err := strconv.Atoi(ids)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{err.Error()})\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"could not read request body\"})\n\t\treturn\n\t}\n\n\tvar p Position\n\terr = json.Unmarshal(body, &p)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"could not read JSON\"})\n\t\treturn\n\t}\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tb, ok := s.blocks[id]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"could not find block\"})\n\t\treturn\n\t}\n\n\tb.Position = p\n\n\tupdate := struct {\n\t\tPosition\n\t\tId int\n\t}{\n\t\tp,\n\t\tid,\n\t}\n\n\ts.websocketBroadcast(Update{Action: UPDATE, Type: BLOCK, Data: update})\n\tw.WriteHeader(http.StatusNoContent)\n}\n\nfunc (s *Server) CreateBlock(p ProtoBlock) (*BlockLedger, error) {\n\tblockSpec, ok := s.library[p.Type]\n\tif !ok {\n\t\treturn nil, errors.New(\"spec not found\")\n\t}\n\n\tblock := core.NewBlock(blockSpec)\n\n\tm := &BlockLedger{\n\t\tLabel: p.Label,\n\t\tPosition: p.Position,\n\t\tType: p.Type,\n\t\tBlock: block,\n\t\tId: s.GetNextID(),\n\t}\n\n\tis := m.Block.GetInputs()\n\n\t\/\/ may want to move this into actual block someday\n\tinputs := make([]BlockLedgerInput, len(is), len(is))\n\tfor i, v := range is {\n\t\tif q, ok := v.Value.(*fetch.Query); ok {\n\t\t\tinputs[i] = BlockLedgerInput{\n\t\t\t\tName: v.Name,\n\t\t\t\tType: \"fetch\",\n\t\t\t\tValue: q.String(),\n\t\t\t\tC: v.C,\n\t\t\t}\n\t\t} else {\n\t\t\tinputs[i] = BlockLedgerInput{\n\t\t\t\tName: v.Name,\n\t\t\t\tType: \"const\",\n\t\t\t\tValue: v.Value,\n\t\t\t\tC: v.C,\n\t\t\t}\n\t\t}\n\t}\n\n\tif _, ok := s.groups[p.Parent]; !ok {\n\t\treturn nil, errors.New(\"invalid group, could not create block\")\n\t}\n\n\tm.Token = s.supervisor.Add(block)\n\tm.Inputs = inputs\n\tm.Outputs = block.GetOutputs()\n\ts.blocks[m.Id] = m\n\n\ts.websocketBroadcast(Update{Action: CREATE, Type: BLOCK, Data: m})\n\n\terr := s.AddChildToGroup(p.Parent, m)\n\tif err != nil {\n\t\treturn nil, err\n\n\t}\n\n\treturn m, nil\n}\n\n\/\/ CreateBlockHandler responds to a POST request to instantiate a new block and add it to the Server.\nfunc (s *Server) BlockCreateHandler(w http.ResponseWriter, r *http.Request) {\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"could not read request body\"})\n\t\treturn\n\t}\n\n\tvar m ProtoBlock\n\terr = json.Unmarshal(body, &m)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"no ID supplied\"})\n\t\treturn\n\t}\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tb, err := s.CreateBlock(m)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{err.Error()})\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\twriteJSON(w, b)\n}\n\nfunc (s *Server) BlockModifyRouteHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tids, ok := vars[\"id\"]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"no ID supplied\"})\n\t\treturn\n\t}\n\n\tid, err := strconv.Atoi(ids)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{err.Error()})\n\t\treturn\n\t}\n\n\troutes, ok := vars[\"index\"]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"no route index supplied\"})\n\t\treturn\n\t}\n\n\troute, err := strconv.Atoi(routes)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{err.Error()})\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"could not read request body\"})\n\t\treturn\n\t}\n\n\tvar v BlockLedgerInput\n\terr = json.Unmarshal(body, &v)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"could not unmarshal value\"})\n\t\treturn\n\t}\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\terr = s.ModifyBlockRoute(id, route, v)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{err.Error()})\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusNoContent)\n}\n\nfunc (s *Server) ModifyBlockRoute(id int, route int, v BlockLedgerInput) error {\n\tb, ok := s.blocks[id]\n\tif !ok {\n\t\treturn errors.New(\"could not find block\")\n\t}\n\n\t\/\/ again maybe this type should be native to block under core.\n\tvar m interface{}\n\tswitch v.Type {\n\tcase \"fetch\":\n\t\tqueryString, ok := v.Value.(string)\n\t\tif !ok {\n\t\t\treturn errors.New(\"fetch is not string\")\n\t\t}\n\n\t\tfo, err := fetch.Parse(queryString)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tm = fo\n\tcase \"const\":\n\t\tm = v.Value\n\tdefault:\n\t\treturn errors.New(\"no value or query specified\")\n\t}\n\n\terr := b.Block.SetInput(core.RouteIndex(route), m)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.blocks[id].Inputs[route].Type = v.Type\n\ts.blocks[id].Inputs[route].Value = m\n\n\tupdate := struct {\n\t\tBlockLedgerInput\n\t\tId int `json:\"id\"`\n\t\tinput int `json:\"input\"`\n\t}{\n\t\tv, id, route,\n\t}\n\n\ts.websocketBroadcast(Update{Action: UPDATE, Type: BLOCK, Data: update})\n\treturn nil\n}\n\nfunc (s *Server) BlockModifyNameHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tids, ok := vars[\"id\"]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"no ID supplied\"})\n\t\treturn\n\t}\n\n\tid, err := strconv.Atoi(ids)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{err.Error()})\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"could not read request body\"})\n\t\treturn\n\t}\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\t_, ok = s.blocks[id]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"block not found\"})\n\t\treturn\n\t}\n\n\tvar label string\n\terr = json.Unmarshal(body, &label)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"could not unmarshal value\"})\n\t\treturn\n\t}\n\n\ts.blocks[id].Label = label\n\n\tupdate := struct {\n\t\tId int `json:\"id\"`\n\t\tLabel string `json:\"label\"`\n\t}{\n\t\tid, label,\n\t}\n\n\ts.websocketBroadcast(Update{Action: UPDATE, Type: BLOCK, Data: update})\n\tw.WriteHeader(http.StatusNoContent)\n}\n\nfunc (s *Server) DeleteBlock(id int) error {\n\tb, ok := s.blocks[id]\n\tif !ok {\n\t\treturn errors.New(\"block not found\")\n\t}\n\n\tdeleteSet := make(map[int]struct{})\n\n\t\/\/ build a set of connections that we may need to delete\n\t\/\/ we need to panic here because if any error is thrown we are in huge trouble\n\t\/\/ any panic indicates that our server connection ledger is no longer true\n\tfor _, c := range s.connections {\n\t\tif c.Target.Id == id || c.Source.Id == id {\n\t\t\tdeleteSet[c.Id] = struct{}{}\n\t\t}\n\t}\n\n\t\/\/ delete the connections that involve this block\n\tfor k, _ := range deleteSet {\n\t\ts.DeleteConnection(k)\n\t}\n\n\t\/\/ remove from group\n\ts.DetachChild(b)\n\n\t\/\/ stop and delete the block\n\ts.supervisor.Remove(b.Token)\n\ts.websocketBroadcast(Update{Action: DELETE, Type: BLOCK, Data: s.blocks[id]})\n\tdelete(s.blocks, id)\n\treturn nil\n}\n\nfunc (s *Server) BlockDeleteHandler(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tids, ok := vars[\"id\"]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{\"no ID supplied\"})\n\t\treturn\n\t}\n\n\tid, err := strconv.Atoi(ids)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{err.Error()})\n\t\treturn\n\t}\n\n\ts.Lock()\n\tdefer s.Unlock()\n\n\terr = s.DeleteBlock(id)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\twriteJSON(w, Error{err.Error()})\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusNoContent)\n\n}\n<|endoftext|>"} {"text":"<commit_before>package fromspring\n\nimport (\n\t\"testing\"\n)\n\nvar parseLineTests = []struct {\n\tv string\n\toasName string\n\toasType string\n\toasFormat string\n\toasDefault interface{}\n\texplicitCustomTypes []string\n}{\n\t{\"private Boolean myPropBoolean;\", \"myPropBoolean\", \"boolean\", \"\", nil, []string{}},\n\t{\"private DateTime myPropDateTime;\", \"myPropDateTime\", \"string\", \"date-time\", nil, []string{}},\n\t{\"private Integer myPropInteger = 1;\", \"myPropInteger\", \"integer\", \"\", 1, []string{}},\n\t{\"private Long myPropLong = 1;\", \"myPropLong\", \"integer\", \"int64\", 1, []string{}},\n\t{\"private String myPropString;\", \"myPropString\", \"string\", \"\", nil, []string{}},\n\t{\"private String myPropString = \\\"\\\";\", \"myPropString\", \"string\", \"\", \"\", []string{}},\n\t{\"private String myPropString = \\\"AVAILABLE\\\";\", \"myPropString\", \"string\", \"\", \"AVAILABLE\", []string{}},\n\t{\"private List<Integer> myPropStrings = new ArrayList<>();\", \"myPropStrings\", \"array\", \"\", nil, []string{}},\n\t{\"private List<String> myPropStrings = new ArrayList<>();\", \"myPropStrings\", \"array\", \"\", nil, []string{}},\n\t{\"private List<Integer> myPropStrings;\", \"myPropStrings\", \"array\", \"\", nil, []string{}},\n\t{\"private List<String> myPropStrings;\", \"myPropStrings\", \"array\", \"\", nil, []string{}},\n}\n\nfunc TestParseLine(t *testing.T) {\n\tfor _, tt := range parseLineTests {\n\t\tname, schemaRef, err := ParseSpringLineToSchemaRef(tt.v, tt.explicitCustomTypes)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"fromspring.ParseSpringLineToSchema() [%v]\", err)\n\t\t}\n\t\tschema := schemaRef.Value\n\t\tif tt.oasName != name || tt.oasType != schema.Type || tt.oasFormat != schema.Format {\n\t\t\tt.Errorf(`fromspring.ParseSpringLineToSchema(\"%s\") MISMATCH W[%v]G[%v] [%v][%v] [%v][%v]`, tt.v, tt.oasName, name, tt.oasType, schema.Type, tt.oasFormat, schema.Format)\n\t\t}\n\t\t\/\/fmtutil.PrintJSON(schema)\n\t}\n}\n\n\/*\nconst CampaignLeadSearchCriteriaSimple = `private List<Integer> leadIds = new ArrayList<>();\n\tprivate List<Integer> listIds = new ArrayList<>();\n\tprivate List<String> externIds = new ArrayList<>();\n\tprivate List<String> physicalStates;\n\tprivate List<String> agentDispositions;\n\tprivate List<String> leadPhoneNumbers = new ArrayList<>();\n\tprivate boolean orphanedLeadsOnly;\n\tprivate String callerId;\n\tprivate String leadPhoneNum;\n\tprivate List<Integer> campaignIds = new ArrayList<>();\n\tprivate String firstName;\n\tprivate String lastName;\n\tprivate String address1;\n\tprivate String address2;\n\tprivate String city;\n\tprivate String zip;\n\tprivate String emailAddress;\n\tprivate String auxData1;\n\tprivate String auxData2;\n\tprivate String auxData3;\n\tprivate String auxData4;\n\tprivate String auxData5;\n\tprivate Integer pendingAgentId;\n\tprivate Integer agentId;`\n*\/\n<commit_msg>update for Java date<commit_after>package fromspring\n\nimport (\n\t\"testing\"\n)\n\nvar parseLineTests = []struct {\n\tv string\n\toasName string\n\toasType string\n\toasFormat string\n\toasDefault interface{}\n\texplicitCustomTypes []string\n}{\n\t{\"private Boolean myPropBoolean;\", \"myPropBoolean\", \"boolean\", \"\", nil, []string{}},\n\t{\"private DateTime myPropDateTime;\", \"myPropDateTime\", \"string\", \"\", nil, []string{}},\n\t{\"private Integer myPropInteger = 1;\", \"myPropInteger\", \"integer\", \"\", 1, []string{}},\n\t{\"private Long myPropLong = 1;\", \"myPropLong\", \"integer\", \"int64\", 1, []string{}},\n\t{\"private String myPropString;\", \"myPropString\", \"string\", \"\", nil, []string{}},\n\t{\"private String myPropString = \\\"\\\";\", \"myPropString\", \"string\", \"\", \"\", []string{}},\n\t{\"private String myPropString = \\\"AVAILABLE\\\";\", \"myPropString\", \"string\", \"\", \"AVAILABLE\", []string{}},\n\t{\"private List<Integer> myPropStrings = new ArrayList<>();\", \"myPropStrings\", \"array\", \"\", nil, []string{}},\n\t{\"private List<String> myPropStrings = new ArrayList<>();\", \"myPropStrings\", \"array\", \"\", nil, []string{}},\n\t{\"private List<Integer> myPropStrings;\", \"myPropStrings\", \"array\", \"\", nil, []string{}},\n\t{\"private List<String> myPropStrings;\", \"myPropStrings\", \"array\", \"\", nil, []string{}},\n}\n\nfunc TestParseLine(t *testing.T) {\n\tfor _, tt := range parseLineTests {\n\t\tname, schemaRef, err := ParseSpringLineToSchemaRef(tt.v, tt.explicitCustomTypes)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"fromspring.ParseSpringLineToSchema() [%v]\", err)\n\t\t}\n\t\tschema := schemaRef.Value\n\t\tif tt.oasName != name || tt.oasType != schema.Type || tt.oasFormat != schema.Format {\n\t\t\tt.Errorf(`fromspring.ParseSpringLineToSchema(\"%s\") MISMATCH W[%v]G[%v] [%v][%v] [%v][%v]`, tt.v, tt.oasName, name, tt.oasType, schema.Type, tt.oasFormat, schema.Format)\n\t\t}\n\t\t\/\/fmtutil.PrintJSON(schema)\n\t}\n}\n\n\/*\nconst CampaignLeadSearchCriteriaSimple = `private List<Integer> leadIds = new ArrayList<>();\n\tprivate List<Integer> listIds = new ArrayList<>();\n\tprivate List<String> externIds = new ArrayList<>();\n\tprivate List<String> physicalStates;\n\tprivate List<String> agentDispositions;\n\tprivate List<String> leadPhoneNumbers = new ArrayList<>();\n\tprivate boolean orphanedLeadsOnly;\n\tprivate String callerId;\n\tprivate String leadPhoneNum;\n\tprivate List<Integer> campaignIds = new ArrayList<>();\n\tprivate String firstName;\n\tprivate String lastName;\n\tprivate String address1;\n\tprivate String address2;\n\tprivate String city;\n\tprivate String zip;\n\tprivate String emailAddress;\n\tprivate String auxData1;\n\tprivate String auxData2;\n\tprivate String auxData3;\n\tprivate String auxData4;\n\tprivate String auxData5;\n\tprivate Integer pendingAgentId;\n\tprivate Integer agentId;`\n*\/\n<|endoftext|>"} {"text":"<commit_before>package wallet\n\nimport (\n\t\"bytes\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/NebulousLabs\/Sia\/build\"\n\t\"github.com\/NebulousLabs\/Sia\/crypto\"\n\t\"github.com\/NebulousLabs\/Sia\/modules\"\n\t\"github.com\/NebulousLabs\/bolt\"\n)\n\n\/\/ resetChangeID clears the wallet's ConsensusChangeID. When Unlock is called,\n\/\/ the wallet will rescan from the genesis block.\nfunc resetChangeID(w *Wallet) {\n\terr := w.db.Update(func(tx *bolt.Tx) error {\n\t\treturn dbPutConsensusChangeID(tx, modules.ConsensusChangeBeginning)\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/ TestPrimarySeed checks that the correct seed is returned when calling\n\/\/ PrimarySeed.\nfunc TestPrimarySeed(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\t\/\/ Start with a blank wallet tester.\n\twt, err := createBlankWalletTester(\"TestPrimarySeed\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer wt.closeWt()\n\n\t\/\/ Create a seed and unlock the wallet.\n\tseed, err := wt.wallet.Encrypt(crypto.TwofishKey{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = wt.wallet.Unlock(crypto.TwofishKey(crypto.HashObject(seed)))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Try getting an address, see that the seed advances correctly.\n\tprimarySeed, progress, err := wt.wallet.PrimarySeed()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !bytes.Equal(primarySeed[:], seed[:]) {\n\t\tt.Error(\"PrimarySeed is returning a value inconsitent with the seed returned by Encrypt\")\n\t}\n\tif progress != 0 {\n\t\tt.Error(\"primary seed is returning the wrong progress\")\n\t}\n\t_, err = wt.wallet.NextAddress()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t_, progress, err = wt.wallet.PrimarySeed()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif progress != 1 {\n\t\tt.Error(\"primary seed is returning the wrong progress\")\n\t}\n\n\t\/\/ Lock then unlock the wallet and check the responses.\n\terr = wt.wallet.Lock()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t_, _, err = wt.wallet.PrimarySeed()\n\tif err != modules.ErrLockedWallet {\n\t\tt.Error(\"unexpected err:\", err)\n\t}\n\terr = wt.wallet.Unlock(crypto.TwofishKey(crypto.HashObject(seed)))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tprimarySeed, progress, err = wt.wallet.PrimarySeed()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !bytes.Equal(primarySeed[:], seed[:]) {\n\t\tt.Error(\"PrimarySeed is returning a value inconsitent with the seed returned by Encrypt\")\n\t}\n\tif progress != 1 {\n\t\tt.Error(\"progress reporting an unexpected value\")\n\t}\n}\n\n\/\/ TestLoadSeed checks that a seed can be successfully recovered from a wallet,\n\/\/ and then remain available on subsequent loads of the wallet.\nfunc TestLoadSeed(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\twt, err := createWalletTester(\"TestLoadSeed\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer wt.closeWt()\n\tseed, _, err := wt.wallet.PrimarySeed()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tallSeeds, err := wt.wallet.AllSeeds()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(allSeeds) != 1 {\n\t\tt.Fatal(\"AllSeeds should be returning the primary seed.\")\n\t} else if allSeeds[0] != seed {\n\t\tt.Fatal(\"AllSeeds returned the wrong seed\")\n\t}\n\twt.wallet.Close()\n\n\tdir := filepath.Join(build.TempDir(modules.WalletDir, \"TestLoadSeed - 0\"), modules.WalletDir)\n\tw, err := New(wt.cs, wt.tpool, dir)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tnewSeed, err := w.Encrypt(crypto.TwofishKey{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = w.Unlock(crypto.TwofishKey(crypto.HashObject(newSeed)))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Balance of wallet should be 0.\n\tsiacoinBal, _, _ := w.ConfirmedBalance()\n\tif !siacoinBal.Equals64(0) {\n\t\tt.Error(\"fresh wallet should not have a balance\")\n\t}\n\terr = w.LoadSeed(crypto.TwofishKey(crypto.HashObject(newSeed)), seed)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tallSeeds, err = w.AllSeeds()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(allSeeds) != 2 {\n\t\tt.Error(\"AllSeeds should be returning the primary seed with the recovery seed.\")\n\t}\n\tif allSeeds[0] != newSeed {\n\t\tt.Error(\"AllSeeds returned the wrong seed\")\n\t}\n\tif !bytes.Equal(allSeeds[1][:], seed[:]) {\n\t\tt.Error(\"AllSeeds returned the wrong seed\")\n\t}\n\tw.Close()\n\n\t\/\/ Rather than worry about a rescan, which isn't implemented and has\n\t\/\/ synchronization difficulties, just load a new wallet from the same\n\t\/\/ settings file - the same effect is achieved without the difficulties.\n\t\/\/\n\t\/\/ TODO: when proper seed loading is implemented, just check the balance\n\t\/\/ of w directly.\n\tw2, err := New(wt.cs, wt.tpool, dir)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ reset the ccID so that the wallet does a full rescan\n\tresetChangeID(w2)\n\terr = w2.Unlock(crypto.TwofishKey(crypto.HashObject(newSeed)))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsiacoinBal2, _, _ := w2.ConfirmedBalance()\n\tif siacoinBal2.Cmp64(0) <= 0 {\n\t\tt.Error(\"wallet failed to load a seed with money in it\")\n\t}\n\tallSeeds, err = w2.AllSeeds()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(allSeeds) != 2 {\n\t\tt.Error(\"AllSeeds should be returning the primary seed with the recovery seed.\")\n\t}\n\tif !bytes.Equal(allSeeds[0][:], newSeed[:]) {\n\t\tt.Error(\"AllSeeds returned the wrong seed\")\n\t}\n\tif !bytes.Equal(allSeeds[1][:], seed[:]) {\n\t\tt.Error(\"AllSeeds returned the wrong seed\")\n\t}\n\tw2.Close()\n}\n\n\/\/ TestSweepSeed tests that sweeping a seed results in a transfer of its\n\/\/ outputs to the wallet.\nfunc TestSweepSeed(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\t\/\/ create a wallet with some money\n\twt, err := createWalletTester(\"TestSweepSeed0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer wt.closeWt()\n\tseed, _, err := wt.wallet.PrimarySeed()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ send money to ourselves, so that we sweep a real output (instead of\n\t\/\/ just a miner payout)\n\tuc, err := wt.wallet.NextAddress()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t_, err = wt.wallet.SendSiacoins(types.SiacoinPrecision, uc.UnlockHash())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\twt.miner.AddBlock()\n\n\t\/\/ create a blank wallet\n\tdir := filepath.Join(build.TempDir(modules.WalletDir, \"TestSweepSeed1\"), modules.WalletDir)\n\tw, err := New(wt.cs, wt.tpool, dir)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tnewSeed, err := w.Encrypt(crypto.TwofishKey{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = w.Unlock(crypto.TwofishKey(crypto.HashObject(newSeed)))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ starting balance should be 0.\n\tsiacoinBal, _, _ := w.ConfirmedBalance()\n\tif !siacoinBal.IsZero() {\n\t\tt.Error(\"fresh wallet should not have a balance\")\n\t}\n\n\t\/\/ sweep the seed of the first wallet into the second\n\tswept, err := w.SweepSeed(seed)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ new wallet should have exactly 'swept' coins\n\t_, incoming := w.UnconfirmedBalance()\n\tif incoming.Cmp(swept) != 0 {\n\t\tt.Fatalf(\"wallet should have correct balance after sweeping seed: wanted %v, got %v\", swept, incoming)\n\t}\n}\n\n\/\/ TestGenerateKeys tests that the generateKeys function correctly generates a\n\/\/ key for every index specified.\nfunc TestGenerateKeys(t *testing.T) {\n\tfor i, k := range generateKeys(modules.Seed{}, 1000, 4000) {\n\t\tif len(k.UnlockConditions.PublicKeys) == 0 {\n\t\t\tt.Errorf(\"index %v was skipped\", i)\n\t\t}\n\t}\n}\n<commit_msg>fix TestPrimarySeed<commit_after>package wallet\n\nimport (\n\t\"bytes\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/NebulousLabs\/Sia\/build\"\n\t\"github.com\/NebulousLabs\/Sia\/crypto\"\n\t\"github.com\/NebulousLabs\/Sia\/modules\"\n\t\"github.com\/NebulousLabs\/bolt\"\n)\n\n\/\/ resetChangeID clears the wallet's ConsensusChangeID. When Unlock is called,\n\/\/ the wallet will rescan from the genesis block.\nfunc resetChangeID(w *Wallet) {\n\terr := w.db.Update(func(tx *bolt.Tx) error {\n\t\treturn dbPutConsensusChangeID(tx, modules.ConsensusChangeBeginning)\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/ TestPrimarySeed checks that the correct seed is returned when calling\n\/\/ PrimarySeed.\nfunc TestPrimarySeed(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\t\/\/ Start with a blank wallet tester.\n\twt, err := createBlankWalletTester(\"TestPrimarySeed\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer wt.closeWt()\n\n\t\/\/ Create a seed and unlock the wallet.\n\tseed, err := wt.wallet.Encrypt(crypto.TwofishKey{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = wt.wallet.Unlock(crypto.TwofishKey(crypto.HashObject(seed)))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Try getting an address, see that the seed advances correctly.\n\tprimarySeed, remaining, err := wt.wallet.PrimarySeed()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !bytes.Equal(primarySeed[:], seed[:]) {\n\t\tt.Error(\"PrimarySeed is returning a value inconsitent with the seed returned by Encrypt\")\n\t}\n\tif remaining != maxScanKeys {\n\t\tt.Error(\"primary seed is returning the wrong number of remaining addresses\")\n\t}\n\t_, err = wt.wallet.NextAddress()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t_, remaining, err = wt.wallet.PrimarySeed()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif remaining != maxScanKeys-1 {\n\t\tt.Error(\"primary seed is returning the wrong number of remaining addresses\")\n\t}\n\n\t\/\/ Lock then unlock the wallet and check the responses.\n\terr = wt.wallet.Lock()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t_, _, err = wt.wallet.PrimarySeed()\n\tif err != modules.ErrLockedWallet {\n\t\tt.Error(\"unexpected err:\", err)\n\t}\n\terr = wt.wallet.Unlock(crypto.TwofishKey(crypto.HashObject(seed)))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tprimarySeed, remaining, err = wt.wallet.PrimarySeed()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !bytes.Equal(primarySeed[:], seed[:]) {\n\t\tt.Error(\"PrimarySeed is returning a value inconsitent with the seed returned by Encrypt\")\n\t}\n\tif remaining != maxScanKeys-1 {\n\t\tt.Error(\"primary seed is returning the wrong number of remaining addresses\")\n\t}\n}\n\n\/\/ TestLoadSeed checks that a seed can be successfully recovered from a wallet,\n\/\/ and then remain available on subsequent loads of the wallet.\nfunc TestLoadSeed(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\twt, err := createWalletTester(\"TestLoadSeed\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer wt.closeWt()\n\tseed, _, err := wt.wallet.PrimarySeed()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tallSeeds, err := wt.wallet.AllSeeds()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(allSeeds) != 1 {\n\t\tt.Fatal(\"AllSeeds should be returning the primary seed.\")\n\t} else if allSeeds[0] != seed {\n\t\tt.Fatal(\"AllSeeds returned the wrong seed\")\n\t}\n\twt.wallet.Close()\n\n\tdir := filepath.Join(build.TempDir(modules.WalletDir, \"TestLoadSeed - 0\"), modules.WalletDir)\n\tw, err := New(wt.cs, wt.tpool, dir)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tnewSeed, err := w.Encrypt(crypto.TwofishKey{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = w.Unlock(crypto.TwofishKey(crypto.HashObject(newSeed)))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Balance of wallet should be 0.\n\tsiacoinBal, _, _ := w.ConfirmedBalance()\n\tif !siacoinBal.Equals64(0) {\n\t\tt.Error(\"fresh wallet should not have a balance\")\n\t}\n\terr = w.LoadSeed(crypto.TwofishKey(crypto.HashObject(newSeed)), seed)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tallSeeds, err = w.AllSeeds()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(allSeeds) != 2 {\n\t\tt.Error(\"AllSeeds should be returning the primary seed with the recovery seed.\")\n\t}\n\tif allSeeds[0] != newSeed {\n\t\tt.Error(\"AllSeeds returned the wrong seed\")\n\t}\n\tif !bytes.Equal(allSeeds[1][:], seed[:]) {\n\t\tt.Error(\"AllSeeds returned the wrong seed\")\n\t}\n\tw.Close()\n\n\t\/\/ Rather than worry about a rescan, which isn't implemented and has\n\t\/\/ synchronization difficulties, just load a new wallet from the same\n\t\/\/ settings file - the same effect is achieved without the difficulties.\n\t\/\/\n\t\/\/ TODO: when proper seed loading is implemented, just check the balance\n\t\/\/ of w directly.\n\tw2, err := New(wt.cs, wt.tpool, dir)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ reset the ccID so that the wallet does a full rescan\n\tresetChangeID(w2)\n\terr = w2.Unlock(crypto.TwofishKey(crypto.HashObject(newSeed)))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsiacoinBal2, _, _ := w2.ConfirmedBalance()\n\tif siacoinBal2.Cmp64(0) <= 0 {\n\t\tt.Error(\"wallet failed to load a seed with money in it\")\n\t}\n\tallSeeds, err = w2.AllSeeds()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(allSeeds) != 2 {\n\t\tt.Error(\"AllSeeds should be returning the primary seed with the recovery seed.\")\n\t}\n\tif !bytes.Equal(allSeeds[0][:], newSeed[:]) {\n\t\tt.Error(\"AllSeeds returned the wrong seed\")\n\t}\n\tif !bytes.Equal(allSeeds[1][:], seed[:]) {\n\t\tt.Error(\"AllSeeds returned the wrong seed\")\n\t}\n\tw2.Close()\n}\n\n\/\/ TestSweepSeed tests that sweeping a seed results in a transfer of its\n\/\/ outputs to the wallet.\nfunc TestSweepSeed(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\t\/\/ create a wallet with some money\n\twt, err := createWalletTester(\"TestSweepSeed0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer wt.closeWt()\n\tseed, _, err := wt.wallet.PrimarySeed()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ send money to ourselves, so that we sweep a real output (instead of\n\t\/\/ just a miner payout)\n\tuc, err := wt.wallet.NextAddress()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t_, err = wt.wallet.SendSiacoins(types.SiacoinPrecision, uc.UnlockHash())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\twt.miner.AddBlock()\n\n\t\/\/ create a blank wallet\n\tdir := filepath.Join(build.TempDir(modules.WalletDir, \"TestSweepSeed1\"), modules.WalletDir)\n\tw, err := New(wt.cs, wt.tpool, dir)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tnewSeed, err := w.Encrypt(crypto.TwofishKey{})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = w.Unlock(crypto.TwofishKey(crypto.HashObject(newSeed)))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ starting balance should be 0.\n\tsiacoinBal, _, _ := w.ConfirmedBalance()\n\tif !siacoinBal.IsZero() {\n\t\tt.Error(\"fresh wallet should not have a balance\")\n\t}\n\n\t\/\/ sweep the seed of the first wallet into the second\n\tswept, err := w.SweepSeed(seed)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ new wallet should have exactly 'swept' coins\n\t_, incoming := w.UnconfirmedBalance()\n\tif incoming.Cmp(swept) != 0 {\n\t\tt.Fatalf(\"wallet should have correct balance after sweeping seed: wanted %v, got %v\", swept, incoming)\n\t}\n}\n\n\/\/ TestGenerateKeys tests that the generateKeys function correctly generates a\n\/\/ key for every index specified.\nfunc TestGenerateKeys(t *testing.T) {\n\tfor i, k := range generateKeys(modules.Seed{}, 1000, 4000) {\n\t\tif len(k.UnlockConditions.PublicKeys) == 0 {\n\t\t\tt.Errorf(\"index %v was skipped\", i)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package kuzzle_test\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/kuzzleio\/sdk-go\/internal\"\n\t\"github.com\/kuzzleio\/sdk-go\/kuzzle\"\n\t\"github.com\/kuzzleio\/sdk-go\/types\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"log\"\n\t\"testing\"\n)\n\nfunc TestListCollectionsIndexNull(t *testing.T) {\n\tk, _ := kuzzle.NewKuzzle(&internal.MockedConnection{}, nil)\n\t_, err := k.ListCollections(\"\", nil)\n\tassert.NotNil(t, err)\n}\n\nfunc TestListCollectionsQueryError(t *testing.T) {\n\tc := &internal.MockedConnection{\n\t\tMockSend: func(query []byte, options types.QueryOptions) types.KuzzleResponse {\n\t\t\trequest := types.KuzzleRequest{}\n\t\t\tjson.Unmarshal(query, &request)\n\t\t\tassert.Equal(t, \"collection\", request.Controller)\n\t\t\tassert.Equal(t, \"index\", request.Index)\n\t\t\tassert.Equal(t, \"list\", request.Action)\n\t\t\treturn types.KuzzleResponse{Error: types.MessageError{Message: \"error\"}}\n\t\t},\n\t}\n\tk, _ := kuzzle.NewKuzzle(c, nil)\n\t_, err := k.ListCollections(\"index\", nil)\n\tassert.NotNil(t, err)\n}\n\nfunc TestListCollections(t *testing.T) {\n\tc := &internal.MockedConnection{\n\t\tMockSend: func(query []byte, options types.QueryOptions) types.KuzzleResponse {\n\t\t\trequest := types.KuzzleRequest{}\n\t\t\tjson.Unmarshal(query, &request)\n\t\t\tassert.Equal(t, \"collection\", request.Controller)\n\t\t\tassert.Equal(t, \"list\", request.Action)\n\n\t\t\ttype collections struct {\n\t\t\t\tCollections []types.CollectionsList `json:\"collections\"`\n\t\t\t}\n\n\t\t\tlist := make([]types.CollectionsList, 0)\n\t\t\tlist = append(list, types.CollectionsList{Name: \"collection1\", Type: \"stored\"})\n\t\t\tlist = append(list, types.CollectionsList{Name: \"collection2\", Type: \"stored\"})\n\n\t\t\tc := collections{\n\t\t\t\tCollections: list,\n\t\t\t}\n\n\t\t\th, err := json.Marshal(c)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\treturn types.KuzzleResponse{Result: h}\n\t\t},\n\t}\n\tk, _ := kuzzle.NewKuzzle(c, nil)\n\n\tres, _ := k.ListCollections(\"index\", nil)\n\n\tassert.Equal(t, \"collection1\", res[0].Name)\n\tassert.Equal(t, \"collection2\", res[1].Name)\n\tassert.Equal(t, \"stored\", res[0].Type)\n\tassert.Equal(t, \"stored\", res[1].Type)\n}\n\nfunc TestListCollectionsWiithOptions(t *testing.T) {\n\tc := &internal.MockedConnection{\n\t\tMockSend: func(query []byte, options types.QueryOptions) types.KuzzleResponse {\n\t\t\trequest := types.KuzzleRequest{}\n\t\t\tjson.Unmarshal(query, &request)\n\t\t\tassert.Equal(t, \"collection\", request.Controller)\n\t\t\tassert.Equal(t, \"list\", request.Action)\n\n\t\t\ttype collections struct {\n\t\t\t\tCollections []types.CollectionsList `json:\"collections\"`\n\t\t\t}\n\n\t\t\tlist := make([]types.CollectionsList, 0)\n\t\t\tlist = append(list, types.CollectionsList{Name: \"collection1\", Type: \"stored\"})\n\t\t\tlist = append(list, types.CollectionsList{Name: \"collection2\", Type: \"stored\"})\n\n\t\t\tc := collections{\n\t\t\t\tCollections: list,\n\t\t\t}\n\n\t\t\th, err := json.Marshal(c)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\treturn types.KuzzleResponse{Result: h}\n\t\t},\n\t}\n\tk, _ := kuzzle.NewKuzzle(c, nil)\n\n\tqo := types.NewQueryOptions()\n\tqo.SetType(\"stored\")\n\n\tres, _ := k.ListCollections(\"index\", qo)\n\n\tassert.Equal(t, \"collection1\", res[0].Name)\n\tassert.Equal(t, \"collection2\", res[1].Name)\n\tassert.Equal(t, \"stored\", res[0].Type)\n\tassert.Equal(t, \"stored\", res[1].Type)\n}\n<commit_msg>Fix typo<commit_after>package kuzzle_test\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/kuzzleio\/sdk-go\/internal\"\n\t\"github.com\/kuzzleio\/sdk-go\/kuzzle\"\n\t\"github.com\/kuzzleio\/sdk-go\/types\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"log\"\n\t\"testing\"\n)\n\nfunc TestListCollectionsIndexNull(t *testing.T) {\n\tk, _ := kuzzle.NewKuzzle(&internal.MockedConnection{}, nil)\n\t_, err := k.ListCollections(\"\", nil)\n\tassert.NotNil(t, err)\n}\n\nfunc TestListCollectionsQueryError(t *testing.T) {\n\tc := &internal.MockedConnection{\n\t\tMockSend: func(query []byte, options types.QueryOptions) types.KuzzleResponse {\n\t\t\trequest := types.KuzzleRequest{}\n\t\t\tjson.Unmarshal(query, &request)\n\t\t\tassert.Equal(t, \"collection\", request.Controller)\n\t\t\tassert.Equal(t, \"index\", request.Index)\n\t\t\tassert.Equal(t, \"list\", request.Action)\n\t\t\treturn types.KuzzleResponse{Error: types.MessageError{Message: \"error\"}}\n\t\t},\n\t}\n\tk, _ := kuzzle.NewKuzzle(c, nil)\n\t_, err := k.ListCollections(\"index\", nil)\n\tassert.NotNil(t, err)\n}\n\nfunc TestListCollections(t *testing.T) {\n\tc := &internal.MockedConnection{\n\t\tMockSend: func(query []byte, options types.QueryOptions) types.KuzzleResponse {\n\t\t\trequest := types.KuzzleRequest{}\n\t\t\tjson.Unmarshal(query, &request)\n\t\t\tassert.Equal(t, \"collection\", request.Controller)\n\t\t\tassert.Equal(t, \"list\", request.Action)\n\n\t\t\ttype collections struct {\n\t\t\t\tCollections []types.CollectionsList `json:\"collections\"`\n\t\t\t}\n\n\t\t\tlist := make([]types.CollectionsList, 0)\n\t\t\tlist = append(list, types.CollectionsList{Name: \"collection1\", Type: \"stored\"})\n\t\t\tlist = append(list, types.CollectionsList{Name: \"collection2\", Type: \"stored\"})\n\n\t\t\tc := collections{\n\t\t\t\tCollections: list,\n\t\t\t}\n\n\t\t\th, err := json.Marshal(c)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\treturn types.KuzzleResponse{Result: h}\n\t\t},\n\t}\n\tk, _ := kuzzle.NewKuzzle(c, nil)\n\n\tres, _ := k.ListCollections(\"index\", nil)\n\n\tassert.Equal(t, \"collection1\", res[0].Name)\n\tassert.Equal(t, \"collection2\", res[1].Name)\n\tassert.Equal(t, \"stored\", res[0].Type)\n\tassert.Equal(t, \"stored\", res[1].Type)\n}\n\nfunc TestListCollectionsWithOptions(t *testing.T) {\n\tc := &internal.MockedConnection{\n\t\tMockSend: func(query []byte, options types.QueryOptions) types.KuzzleResponse {\n\t\t\trequest := types.KuzzleRequest{}\n\t\t\tjson.Unmarshal(query, &request)\n\t\t\tassert.Equal(t, \"collection\", request.Controller)\n\t\t\tassert.Equal(t, \"list\", request.Action)\n\n\t\t\ttype collections struct {\n\t\t\t\tCollections []types.CollectionsList `json:\"collections\"`\n\t\t\t}\n\n\t\t\tlist := make([]types.CollectionsList, 0)\n\t\t\tlist = append(list, types.CollectionsList{Name: \"collection1\", Type: \"stored\"})\n\t\t\tlist = append(list, types.CollectionsList{Name: \"collection2\", Type: \"stored\"})\n\n\t\t\tc := collections{\n\t\t\t\tCollections: list,\n\t\t\t}\n\n\t\t\th, err := json.Marshal(c)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\treturn types.KuzzleResponse{Result: h}\n\t\t},\n\t}\n\tk, _ := kuzzle.NewKuzzle(c, nil)\n\n\tqo := types.NewQueryOptions()\n\tqo.SetType(\"stored\")\n\n\tres, _ := k.ListCollections(\"index\", qo)\n\n\tassert.Equal(t, \"collection1\", res[0].Name)\n\tassert.Equal(t, \"collection2\", res[1].Name)\n\tassert.Equal(t, \"stored\", res[0].Type)\n\tassert.Equal(t, \"stored\", res[1].Type)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n)\n\nfunc main() {\n\tfmt.Println(path.Base(os.Args[0]))\n}\n<commit_msg>monorepo\/cmd\/indexd: fix command<commit_after>package main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/dvrkps\/dojo\/monorepo\/internal\/website\"\n)\n\nfunc main() {\n\tws, err := website.All()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tfor _, w := range ws {\n\t\tr, err := w.Index(w)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"index: %v\\n\", err)\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Printf(\"%v: %v\\n\", w.Key, r)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package mcstore\n\nimport (\n\t\"time\"\n\n\t\"net\/http\/httptest\"\n\n\t\"github.com\/emicklei\/go-restful\"\n\t\"github.com\/materials-commons\/config\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/app\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/app\/flow\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/db\/dai\"\n\t\"github.com\/materials-commons\/mcstore\/testdb\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"ServerApi\", func() {\n\tvar (\n\t\tapi *ServerAPI\n\t\tserver *httptest.Server\n\t\tcontainer *restful.Container\n\t\trr *httptest.ResponseRecorder\n\t\tuploads dai.Uploads\n\t\tuploadRequest CreateUploadRequest\n\t)\n\n\tBeforeEach(func() {\n\t\tcontainer = NewServicesContainerForTest()\n\t\tserver = httptest.NewServer(container)\n\t\trr = httptest.NewRecorder()\n\t\tconfig.Set(\"mcurl\", server.URL)\n\t\tconfig.Set(\"apikey\", \"test\")\n\t\tuploads = dai.NewRUploads(testdb.RSession())\n\t\tapi = NewServerAPI()\n\t\tuploadRequest = CreateUploadRequest{\n\t\t\tProjectID: \"test\",\n\t\t\tDirectoryID: \"test\",\n\t\t\tDirectoryPath: \"test\/test\",\n\t\t\tFileName: \"testreq.txt\",\n\t\t\tFileSize: 4,\n\t\t\tChunkSize: 2,\n\t\t\tFileMTime: time.Now().Format(time.RFC1123),\n\t\t\tChecksum: \"abc123\",\n\t\t}\n\t})\n\n\tAfterEach(func() {\n\t\tserver.Close()\n\t})\n\n\tDescribe(\"CreateUploadRequest\", func() {\n\t\tvar resp *CreateUploadResponse\n\t\tvar err error\n\n\t\tAfterEach(func() {\n\t\t\tif resp != nil {\n\t\t\t\tuploads.Delete(resp.RequestID)\n\t\t\t}\n\t\t})\n\n\t\tIt(\"Should create an upload request\", func() {\n\t\t\tresp, err = api.CreateUploadRequest(uploadRequest)\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(resp.RequestID).NotTo(Equal(\"\"))\n\t\t\tExpect(resp.StartingBlock).To(BeNumerically(\"==\", 1))\n\t\t})\n\n\t\tIt(\"Should return the same id for a duplicate upload request\", func() {\n\t\t\tresp, err = api.CreateUploadRequest(uploadRequest)\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(resp.RequestID).NotTo(Equal(\"\"))\n\t\t\tExpect(resp.StartingBlock).To(BeNumerically(\"==\", 1))\n\n\t\t\tresp2, err := api.CreateUploadRequest(uploadRequest)\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(resp2.RequestID).To(Equal(resp.RequestID))\n\t\t\tExpect(resp.StartingBlock).To(BeNumerically(\"==\", 1))\n\t\t})\n\t})\n\n\tDescribe(\"SendFlowData\", func() {\n\t\tvar flowReq flow.Request\n\t\tvar resp *CreateUploadResponse\n\t\tvar err error\n\n\t\tBeforeEach(func() {\n\t\t\tflowReq = flow.Request{\n\t\t\t\tFlowChunkNumber: 1,\n\t\t\t\tFlowTotalChunks: 2,\n\t\t\t\tFlowChunkSize: 2,\n\t\t\t\tFlowTotalSize: 4,\n\t\t\t\tFlowFileName: \"testreq.txt\",\n\t\t\t\tFlowRelativePath: \"test\/testreq.txt\",\n\t\t\t\tProjectID: \"test\",\n\t\t\t\tDirectoryID: \"test\",\n\t\t\t}\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tif resp != nil {\n\t\t\t\tuploads.Delete(resp.RequestID)\n\t\t\t}\n\t\t})\n\n\t\tIt(\"Should fail on an invalid request id\", func() {\n\t\t\tflowReq.FlowIdentifier = \"i-dont-exist\"\n\t\t\tcresp, err := api.SendFlowData(&flowReq)\n\t\t\tExpect(err).To(Equal(app.ErrInvalid))\n\t\t\tExpect(cresp).To(BeNil())\n\t\t})\n\n\t\tIt(\"Should Send the data an increment and increment starting block\", func() {\n\t\t\tresp, err = api.CreateUploadRequest(uploadRequest)\n\t\t\tExpect(err).To(BeNil())\n\t\t\tflowReq.FlowIdentifier = resp.RequestID\n\t\t\tcresp, err := api.SendFlowData(&flowReq)\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(cresp.Done).To(BeFalse())\n\n\t\t\tresp2, err := api.CreateUploadRequest(uploadRequest)\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(resp2.RequestID).To(Equal(resp.RequestID))\n\t\t\tExpect(resp2.StartingBlock).To(BeNumerically(\"==\", 2))\n\t\t})\n\t})\n\n\tDescribe(\"ListUploadRequests\", func() {\n\t\tvar resp *CreateUploadResponse\n\n\t\tAfterEach(func() {\n\t\t\tif resp != nil {\n\t\t\t\tuploads.Delete(resp.RequestID)\n\t\t\t}\n\t\t})\n\n\t\tIt(\"Should return an empty list when there are no upload requests\", func() {\n\t\t\tuploads, err := api.ListUploadRequests(\"test\")\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(uploads).To(HaveLen(0))\n\t\t})\n\n\t\tIt(\"Should return a list with one request when a single upload request has been created\", func() {\n\t\t\tvar err error\n\t\t\tresp, err = api.CreateUploadRequest(uploadRequest)\n\t\t\tExpect(err).To(BeNil())\n\t\t\tuploads, err := api.ListUploadRequests(\"test\")\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(uploads).To(HaveLen(1))\n\t\t})\n\t})\n\n\tDescribe(\"DeleteUploadRequest\", func() {\n\t\tvar resp *CreateUploadResponse\n\n\t\tAfterEach(func() {\n\t\t\tif resp != nil {\n\t\t\t\tuploads.Delete(resp.RequestID)\n\t\t\t}\n\t\t})\n\n\t\tIt(\"Should return an error if upload request doesn't exist\", func() {\n\t\t\terr := api.DeleteUploadRequest(\"does-not-exist\")\n\t\t\tExpect(err).NotTo(BeNil())\n\t\t})\n\n\t\tIt(\"Should return an error if user doesn't have permission\", func() {\n\t\t\tvar err error\n\t\t\tresp, err = api.CreateUploadRequest(uploadRequest)\n\t\t\tExpect(err).To(BeNil())\n\n\t\t\t\/\/ Change to a user who doesn't have permission\n\t\t\tconfig.Set(\"apikey\", \"test2\")\n\n\t\t\terr = api.DeleteUploadRequest(resp.RequestID)\n\t\t\tExpect(err).NotTo(BeNil())\n\t\t})\n\n\t\tIt(\"Should succeed if request exists and user has permission\", func() {\n\t\t\tvar err error\n\t\t\tresp, err = api.CreateUploadRequest(uploadRequest)\n\t\t\tExpect(err).To(BeNil())\n\t\t\terr = api.DeleteUploadRequest(resp.RequestID)\n\t\t\tExpect(err).To(BeNil())\n\t\t})\n\t})\n})\n<commit_msg>Add tests for GetDirectory api call.<commit_after>package mcstore\n\nimport (\n\t\"time\"\n\n\t\"net\/http\/httptest\"\n\n\t\"fmt\"\n\n\t\"github.com\/emicklei\/go-restful\"\n\t\"github.com\/materials-commons\/config\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/app\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/app\/flow\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/db\/dai\"\n\t\"github.com\/materials-commons\/mcstore\/testdb\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = fmt.Println\n\nvar _ = Describe(\"ServerApi\", func() {\n\tvar (\n\t\tapi *ServerAPI\n\t\tserver *httptest.Server\n\t\tcontainer *restful.Container\n\t\trr *httptest.ResponseRecorder\n\t\tuploads dai.Uploads\n\t\tuploadRequest CreateUploadRequest\n\t)\n\n\tBeforeEach(func() {\n\t\tcontainer = NewServicesContainerForTest()\n\t\tserver = httptest.NewServer(container)\n\t\trr = httptest.NewRecorder()\n\t\tconfig.Set(\"mcurl\", server.URL)\n\t\tconfig.Set(\"apikey\", \"test\")\n\t\tuploads = dai.NewRUploads(testdb.RSession())\n\t\tapi = NewServerAPI()\n\t\tuploadRequest = CreateUploadRequest{\n\t\t\tProjectID: \"test\",\n\t\t\tDirectoryID: \"test\",\n\t\t\tDirectoryPath: \"test\/test\",\n\t\t\tFileName: \"testreq.txt\",\n\t\t\tFileSize: 4,\n\t\t\tChunkSize: 2,\n\t\t\tFileMTime: time.Now().Format(time.RFC1123),\n\t\t\tChecksum: \"abc123\",\n\t\t}\n\t})\n\n\tAfterEach(func() {\n\t\tserver.Close()\n\t})\n\n\tDescribe(\"CreateUploadRequest\", func() {\n\t\tvar resp *CreateUploadResponse\n\t\tvar err error\n\n\t\tAfterEach(func() {\n\t\t\tif resp != nil {\n\t\t\t\tuploads.Delete(resp.RequestID)\n\t\t\t}\n\t\t})\n\n\t\tIt(\"Should create an upload request\", func() {\n\t\t\tresp, err = api.CreateUploadRequest(uploadRequest)\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(resp.RequestID).NotTo(Equal(\"\"))\n\t\t\tExpect(resp.StartingBlock).To(BeNumerically(\"==\", 1))\n\t\t})\n\n\t\tIt(\"Should return the same id for a duplicate upload request\", func() {\n\t\t\tresp, err = api.CreateUploadRequest(uploadRequest)\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(resp.RequestID).NotTo(Equal(\"\"))\n\t\t\tExpect(resp.StartingBlock).To(BeNumerically(\"==\", 1))\n\n\t\t\tresp2, err := api.CreateUploadRequest(uploadRequest)\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(resp2.RequestID).To(Equal(resp.RequestID))\n\t\t\tExpect(resp.StartingBlock).To(BeNumerically(\"==\", 1))\n\t\t})\n\t})\n\n\tDescribe(\"SendFlowData\", func() {\n\t\tvar flowReq flow.Request\n\t\tvar resp *CreateUploadResponse\n\t\tvar err error\n\n\t\tBeforeEach(func() {\n\t\t\tflowReq = flow.Request{\n\t\t\t\tFlowChunkNumber: 1,\n\t\t\t\tFlowTotalChunks: 2,\n\t\t\t\tFlowChunkSize: 2,\n\t\t\t\tFlowTotalSize: 4,\n\t\t\t\tFlowFileName: \"testreq.txt\",\n\t\t\t\tFlowRelativePath: \"test\/testreq.txt\",\n\t\t\t\tProjectID: \"test\",\n\t\t\t\tDirectoryID: \"test\",\n\t\t\t}\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tif resp != nil {\n\t\t\t\tuploads.Delete(resp.RequestID)\n\t\t\t}\n\t\t})\n\n\t\tIt(\"Should fail on an invalid request id\", func() {\n\t\t\tflowReq.FlowIdentifier = \"i-dont-exist\"\n\t\t\tcresp, err := api.SendFlowData(&flowReq)\n\t\t\tExpect(err).To(Equal(app.ErrInvalid))\n\t\t\tExpect(cresp).To(BeNil())\n\t\t})\n\n\t\tIt(\"Should Send the data an increment and increment starting block\", func() {\n\t\t\tresp, err = api.CreateUploadRequest(uploadRequest)\n\t\t\tExpect(err).To(BeNil())\n\t\t\tflowReq.FlowIdentifier = resp.RequestID\n\t\t\tcresp, err := api.SendFlowData(&flowReq)\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(cresp.Done).To(BeFalse())\n\n\t\t\tresp2, err := api.CreateUploadRequest(uploadRequest)\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(resp2.RequestID).To(Equal(resp.RequestID))\n\t\t\tExpect(resp2.StartingBlock).To(BeNumerically(\"==\", 2))\n\t\t})\n\t})\n\n\tDescribe(\"ListUploadRequests\", func() {\n\t\tvar resp *CreateUploadResponse\n\n\t\tAfterEach(func() {\n\t\t\tif resp != nil {\n\t\t\t\tuploads.Delete(resp.RequestID)\n\t\t\t}\n\t\t})\n\n\t\tIt(\"Should return an empty list when there are no upload requests\", func() {\n\t\t\tuploads, err := api.ListUploadRequests(\"test\")\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(uploads).To(HaveLen(0))\n\t\t})\n\n\t\tIt(\"Should return a list with one request when a single upload request has been created\", func() {\n\t\t\tvar err error\n\t\t\tresp, err = api.CreateUploadRequest(uploadRequest)\n\t\t\tExpect(err).To(BeNil())\n\t\t\tuploads, err := api.ListUploadRequests(\"test\")\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(uploads).To(HaveLen(1))\n\t\t})\n\t})\n\n\tDescribe(\"DeleteUploadRequest\", func() {\n\t\tvar resp *CreateUploadResponse\n\n\t\tAfterEach(func() {\n\t\t\tif resp != nil {\n\t\t\t\tuploads.Delete(resp.RequestID)\n\t\t\t}\n\t\t})\n\n\t\tIt(\"Should return an error if upload request doesn't exist\", func() {\n\t\t\terr := api.DeleteUploadRequest(\"does-not-exist\")\n\t\t\tExpect(err).NotTo(BeNil())\n\t\t})\n\n\t\tIt(\"Should return an error if user doesn't have permission\", func() {\n\t\t\tvar err error\n\t\t\tresp, err = api.CreateUploadRequest(uploadRequest)\n\t\t\tExpect(err).To(BeNil())\n\n\t\t\t\/\/ Change to a user who doesn't have permission\n\t\t\tconfig.Set(\"apikey\", \"test2\")\n\n\t\t\terr = api.DeleteUploadRequest(resp.RequestID)\n\t\t\tExpect(err).NotTo(BeNil())\n\t\t})\n\n\t\tIt(\"Should succeed if request exists and user has permission\", func() {\n\t\t\tvar err error\n\t\t\tresp, err = api.CreateUploadRequest(uploadRequest)\n\t\t\tExpect(err).To(BeNil())\n\t\t\terr = api.DeleteUploadRequest(resp.RequestID)\n\t\t\tExpect(err).To(BeNil())\n\t\t})\n\t})\n\n\tDescribe(\"GetDirectory\", func() {\n\t\tvar (\n\t\t\tdirs dai.Dirs = dai.NewRDirs(testdb.RSession())\n\t\t\tdirID string\n\t\t\tdirRequest DirectoryRequest\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tdirID = \"\"\n\t\t\tdirRequest = DirectoryRequest{\n\t\t\t\tProjectName: \"test\",\n\t\t\t\tProjectID: \"test\",\n\t\t\t\tPath: \"\/tmp\/test\/abc\",\n\t\t\t}\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tif dirID != \"\" {\n\t\t\t\tdirs.Delete(dirID)\n\t\t\t}\n\t\t})\n\n\t\tIt(\"Should fail if directory doesn't include the project name\", func() {\n\t\t\tvar err error\n\t\t\tdirRequest.Path = \"\/tmp\/test2\/abc\"\n\t\t\tdirID, err := api.GetDirectory(dirRequest)\n\t\t\tExpect(err).To(Equal(app.ErrInvalid))\n\t\t\tExpect(dirID).To(Equal(\"\"))\n\t\t})\n\n\t\tIt(\"Should retrieve an existing directory\", func() {\n\t\t\tdirRequest.Path = \"\/tmp\/test\"\n\t\t\tdirid, err := api.GetDirectory(dirRequest)\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(dirid).To(Equal(\"test\"))\n\t\t})\n\n\t\tIt(\"Should create a new directory when it doesn't exist\", func() {\n\t\t\tvar err error\n\t\t\tdirID, err = api.GetDirectory(dirRequest)\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(dirID).To(ContainSubstring(\"-\"))\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package buildkite\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n)\n\n\/\/ PipelinesService handles communication with the pipeline related\n\/\/ methods of the buildkite API.\n\/\/\n\/\/ buildkite API docs: https:\/\/buildkite.com\/docs\/api\/pipelines\ntype PipelinesService struct {\n\tclient *Client\n}\n\n\/\/ CreatePipeline - Create a Pipeline.\ntype CreatePipeline struct {\n\tName string `json:\"name\" yaml:\"name\"`\n\tRepository string `json:\"repository\" yaml:\"repository\"`\n\n\t\/\/ Either configuration needs to be specified as a yaml string or steps.\n\tConfiguration string `json:\"configuration,omitempty\" yaml:\"configuration,omitempty\"`\n\tSteps []Step `json:\"steps,omitempty\" yaml:\"steps,omitempty\"`\n\n\t\/\/ Optional fields\n\tDefaultBranch string `json:\"default_branch,omitempty\" yaml:\"default_branch,omitempty\"`\n\tDescription string `json:\"description,omitempty\" yaml:\"description,omitempty\"`\n\tEnv map[string]string `json:\"env,omitempty\" yaml:\"env,omitempty\"`\n\tProviderSettings ProviderSettings `json:\"provider_settings,omitempty\" yaml:\"provider_settings,omitempty\"`\n\tBranchConfiguration string `json:\"branch_configuration,omitempty\" yaml:\"branch_configuration,omitempty\"`\n\tSkipQueuedBranchBuilds bool `json:\"skip_queued_branch_builds,omitempty\" yaml:\"skip_queued_branch_builds,omitempty\"`\n\tSkipQueuedBranchBuildsFilter string `json:\"skip_queued_branch_builds_filter,omitempty\" yaml:\"skip_queued_branch_builds_filter,omitempty\"`\n\tCancelRunningBranchBuilds bool `json:\"cancel_running_branch_builds,omitempty\" yaml:\"cancel_running_branch_builds,omitempty\"`\n\tCancelRunningBranchBuildsFilter string `json:\"cancel_running_branch_builds_filter,omitempty\" yaml:\"cancel_running_branch_builds_filter,omitempty\"`\n\tTeamUuids []string `json:\"team_uuids,omitempty\" yaml:\"team_uuids,omitempty\"`\n\tClusterID string `json:\"cluster_id,omitempty\" yaml:\"cluster_id,omitempty\"`\n}\n\n\/\/ Pipeline represents a buildkite pipeline.\ntype Pipeline struct {\n\tID *string `json:\"id,omitempty\" yaml:\"id,omitempty\"`\n\tURL *string `json:\"url,omitempty\" yaml:\"url,omitempty\"`\n\tWebURL *string `json:\"web_url,omitempty\" yaml:\"web_url,omitempty\"`\n\tName *string `json:\"name,omitempty\" yaml:\"name,omitempty\"`\n\tSlug *string `json:\"slug,omitempty\" yaml:\"slug,omitempty\"`\n\tRepository *string `json:\"repository,omitempty\" yaml:\"repository,omitempty\"`\n\tBuildsURL *string `json:\"builds_url,omitempty\" yaml:\"builds_url,omitempty\"`\n\tBadgeURL *string `json:\"badge_url,omitempty\" yaml:\"badge_url,omitempty\"`\n\tCreatedAt *Timestamp `json:\"created_at,omitempty\" yaml:\"created_at,omitempty\"`\n\tArchivedAt *Timestamp `json:\"archived_at,omitempty\" yaml:\"archived_at,omitempty\"`\n\tDefaultBranch *string `json:\"default_branch,omitempty\" yaml:\"default_branch,omitempty\"`\n\tDescription *string `json:\"description,omitempty\" yaml:\"description,omitempty\"`\n\tBranchConfiguration *string `json:\"branch_configuration,omitempty\" yaml:\"branch_configuration,omitempty\"`\n\tSkipQueuedBranchBuilds *bool `json:\"skip_queued_branch_builds,omitempty\" yaml:\"skip_queued_branch_builds,omitempty\"`\n\tSkipQueuedBranchBuildsFilter *string `json:\"skip_queued_branch_builds_filter,omitempty\" yaml:\"skip_queued_branch_builds_filter,omitempty\"`\n\tCancelRunningBranchBuilds *bool `json:\"cancel_running_branch_builds,omitempty\" yaml:\"cancel_running_branch_builds,omitempty\"`\n\tCancelRunningBranchBuildsFilter *string `json:\"cancel_running_branch_builds_filter,omitempty\" yaml:\"cancel_running_branch_builds_filter,omitempty\"`\n\tClusterID *string `json:\"cluster_id,omitempty\" yaml:\"cluster_id,omitempty\"`\n\tVisibility *string `json:\"visibility,omitempty\" yaml:\"visibility,omitempty\"`\n\n\tScheduledBuildsCount *int `json:\"scheduled_builds_count,omitempty\" yaml:\"scheduled_builds_count,omitempty\"`\n\tRunningBuildsCount *int `json:\"running_builds_count,omitempty\" yaml:\"running_builds_count,omitempty\"`\n\tScheduledJobsCount *int `json:\"scheduled_jobs_count,omitempty\" yaml:\"scheduled_jobs_count,omitempty\"`\n\tRunningJobsCount *int `json:\"running_jobs_count,omitempty\" yaml:\"running_jobs_count,omitempty\"`\n\tWaitingJobsCount *int `json:\"waiting_jobs_count,omitempty\" yaml:\"waiting_jobs_count,omitempty\"`\n\n\t\/\/ the provider of sources\n\tProvider *Provider `json:\"provider,omitempty\" yaml:\"provider,omitempty\"`\n\n\t\/\/ build steps\n\tSteps []*Step `json:\"steps,omitempty\" yaml:\"steps,omitempty\"`\n\tConfiguration string `json:\"configuration,omitempty\" yaml:\"configuration,omitempty\"`\n\tEnv map[string]string `json:\"env,omitempty\" yaml:\"env,omitempty\"`\n}\n\n\/\/ Step represents a build step in buildkites build pipeline\ntype Step struct {\n\tType *string `json:\"type,omitempty\" yaml:\"type,omitempty\"`\n\tName *string `json:\"name,omitempty\" yaml:\"name,omitempty\"`\n\tCommand *string `json:\"command,omitempty\" yaml:\"command,omitempty\"`\n\tArtifactPaths *string `json:\"artifact_paths,omitempty\" yaml:\"artifact_paths,omitempty\"`\n\tBranchConfiguration *string `json:\"branch_configuration,omitempty\" yaml:\"branch_configuration,omitempty\"`\n\tEnv map[string]string `json:\"env,omitempty\" yaml:\"env,omitempty\"`\n\tTimeoutInMinutes *int `json:\"timeout_in_minutes,omitempty\" yaml:\"timeout_in_minutes,omitempty\"`\n\tAgentQueryRules []string `json:\"agent_query_rules,omitempty\" yaml:\"agent_query_rules,omitempty\"`\n}\n\n\/\/ PipelineListOptions specifies the optional parameters to the\n\/\/ PipelinesService.List method.\ntype PipelineListOptions struct {\n\tListOptions\n}\n\n\/\/ Create - Creates a pipeline for a given organisation.\n\/\/\n\/\/ buildkite API docs: https:\/\/buildkite.com\/docs\/rest-api\/pipelines#create-a-pipeline\nfunc (ps *PipelinesService) Create(org string, p *CreatePipeline) (*Pipeline, *Response, error) {\n\tu := fmt.Sprintf(\"v2\/organizations\/%s\/pipelines\", org)\n\n\treq, err := ps.client.NewRequest(\"POST\", u, p)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tpipeline := new(Pipeline)\n\tresp, err := ps.client.Do(req, pipeline)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn pipeline, resp, err\n}\n\n\/\/ Get fetches a pipeline.\n\/\/\n\/\/ buildkite API docs: https:\/\/buildkite.com\/docs\/rest-api\/pipelines#get-a-pipeline\nfunc (ps *PipelinesService) Get(org string, slug string) (*Pipeline, *Response, error) {\n\n\tu := fmt.Sprintf(\"v2\/organizations\/%s\/pipelines\/%s\", org, slug)\n\n\treq, err := ps.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tpipeline := new(Pipeline)\n\tresp, err := ps.client.Do(req, pipeline)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn pipeline, resp, err\n}\n\n\/\/ List the pipelines for a given organisation.\n\/\/\n\/\/ buildkite API docs: https:\/\/buildkite.com\/docs\/api\/pipelines#list-pipelines\nfunc (ps *PipelinesService) List(org string, opt *PipelineListOptions) ([]Pipeline, *Response, error) {\n\tvar u string\n\n\tu = fmt.Sprintf(\"v2\/organizations\/%s\/pipelines\", org)\n\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := ps.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tpipelines := new([]Pipeline)\n\tresp, err := ps.client.Do(req, pipelines)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn *pipelines, resp, err\n}\n\n\/\/ Delete a pipeline.\n\/\/\n\/\/ buildkite API docs: https:\/\/buildkite.com\/docs\/rest-api\/pipelines#delete-a-pipeline\nfunc (ps *PipelinesService) Delete(org string, slug string) (*Response, error) {\n\n\tu := fmt.Sprintf(\"v2\/organizations\/%s\/pipelines\/%s\", org, slug)\n\n\treq, err := ps.client.NewRequest(\"DELETE\", u, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ps.client.Do(req, nil)\n}\n\n\/\/ Update - Updates a pipeline.\n\/\/\n\/\/ buildkite API docs: https:\/\/buildkite.com\/docs\/rest-api\/pipelines#update-a-pipeline\nfunc (ps *PipelinesService) Update(org string, p *Pipeline) (*Response, error) {\n\tif p == nil {\n\t\treturn nil, errors.New(\"pipeline must not be nil\")\n\t}\n\n\tu := fmt.Sprintf(\"v2\/organizations\/%s\/pipelines\/%s\", org, *p.Slug)\n\n\treq, err := ps.client.NewRequest(\"PATCH\", u, p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := ps.client.Do(req, p)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\n\treturn resp, err\n}\n\n\/\/ AddWebhook - Adds webhook in github for pipeline.\n\/\/\n\/\/ buildkite API docs: https:\/\/buildkite.com\/docs\/apis\/rest-api\/pipelines#add-a-webhook\nfunc (ps *PipelinesService) AddWebhook(org string, slug string) (*Response, error) {\n\n\tu := fmt.Sprintf(\"v2\/organizations\/%s\/pipelines\/%s\/webhook\", org, slug)\n\n\treq, err := ps.client.NewRequest(\"POST\", u, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ps.client.Do(req, nil)\n}\n\n\/\/ Archive - Archives a pipeline.\n\/\/\n\/\/ buildkite API docs: https:\/\/buildkite.com\/docs\/apis\/rest-api\/pipelines#archive-a-pipeline\nfunc (ps *PipelinesService) Archive(org string, slug string) (*Response, error) {\n\n\tu := fmt.Sprintf(\"v2\/organizations\/%s\/pipelines\/%s\/archive\", org, slug)\n\n\treq, err := ps.client.NewRequest(\"POST\", u, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ps.client.Do(req, nil)\n}\n\n\/\/ Unarchive - Unarchive a pipeline.\n\/\/\n\/\/ buildkite API docs: https:\/\/buildkite.com\/docs\/apis\/rest-api\/pipelines#unarchive-a-pipeline\nfunc (ps *PipelinesService) Unarchive(org string, slug string) (*Response, error) {\n\n\tu := fmt.Sprintf(\"v2\/organizations\/%s\/pipelines\/%s\/unarchive\", org, slug)\n\n\treq, err := ps.client.NewRequest(\"POST\", u, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ps.client.Do(req, nil)\n}\n<commit_msg>revert pipelines<commit_after>package buildkite\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n)\n\n\/\/ PipelinesService handles communication with the pipeline related\n\/\/ methods of the buildkite API.\n\/\/\n\/\/ buildkite API docs: https:\/\/buildkite.com\/docs\/api\/pipelines\ntype PipelinesService struct {\n\tclient *Client\n}\n\n\/\/ CreatePipeline - Create a Pipeline.\ntype CreatePipeline struct {\n\tName string `json:\"name\" yaml:\"name\"`\n\tRepository string `json:\"repository\" yaml:\"repository\"`\n\n\t\/\/ Either configuration needs to be specified as a yaml string or steps.\n\tConfiguration string `json:\"configuration,omitempty\" yaml:\"configuration,omitempty\"`\n\tSteps []Step `json:\"steps,omitempty\" yaml:\"steps,omitempty\"`\n\n\t\/\/ Optional fields\n\tDefaultBranch string `json:\"default_branch,omitempty\" yaml:\"default_branch,omitempty\"`\n\tDescription string `json:\"description,omitempty\" yaml:\"description,omitempty\"`\n\tEnv map[string]string `json:\"env,omitempty\" yaml:\"env,omitempty\"`\n\tProviderSettings ProviderSettings `json:\"provider_settings,omitempty\" yaml:\"provider_settings,omitempty\"`\n\tBranchConfiguration string `json:\"branch_configuration,omitempty\" yaml:\"branch_configuration,omitempty\"`\n\tSkipQueuedBranchBuilds bool `json:\"skip_queued_branch_builds,omitempty\" yaml:\"skip_queued_branch_builds,omitempty\"`\n\tSkipQueuedBranchBuildsFilter string `json:\"skip_queued_branch_builds_filter,omitempty\" yaml:\"skip_queued_branch_builds_filter,omitempty\"`\n\tCancelRunningBranchBuilds bool `json:\"cancel_running_branch_builds,omitempty\" yaml:\"cancel_running_branch_builds,omitempty\"`\n\tCancelRunningBranchBuildsFilter string `json:\"cancel_running_branch_builds_filter,omitempty\" yaml:\"cancel_running_branch_builds_filter,omitempty\"`\n\tTeamUuids []string `json:\"team_uuids,omitempty\" yaml:\"team_uuids,omitempty\"`\n\tClusterID string `json:\"cluster_id,omitempty\" yaml:\"cluster_id,omitempty\"`\n}\n\n\/\/ Pipeline represents a buildkite pipeline.\ntype Pipeline struct {\n\tID *string `json:\"id,omitempty\" yaml:\"id,omitempty\"`\n\tURL *string `json:\"url,omitempty\" yaml:\"url,omitempty\"`\n\tWebURL *string `json:\"web_url,omitempty\" yaml:\"web_url,omitempty\"`\n\tName *string `json:\"name,omitempty\" yaml:\"name,omitempty\"`\n\tSlug *string `json:\"slug,omitempty\" yaml:\"slug,omitempty\"`\n\tRepository *string `json:\"repository,omitempty\" yaml:\"repository,omitempty\"`\n\tBuildsURL *string `json:\"builds_url,omitempty\" yaml:\"builds_url,omitempty\"`\n\tBadgeURL *string `json:\"badge_url,omitempty\" yaml:\"badge_url,omitempty\"`\n\tCreatedAt *Timestamp `json:\"created_at,omitempty\" yaml:\"created_at,omitempty\"`\n\tArchivedAt *Timestamp `json:\"archived_at,omitempty\" yaml:\"archived_at,omitempty\"`\n\tDefaultBranch *string `json:\"default_branch,omitempty\" yaml:\"default_branch,omitempty\"`\n\tDescription *string `json:\"description,omitempty\" yaml:\"description,omitempty\"`\n\tBranchConfiguration *string `json:\"branch_configuration,omitempty\" yaml:\"branch_configuration,omitempty\"`\n\tSkipQueuedBranchBuilds *bool `json:\"skip_queued_branch_builds,omitempty\" yaml:\"skip_queued_branch_builds,omitempty\"`\n\tSkipQueuedBranchBuildsFilter *string `json:\"skip_queued_branch_builds_filter,omitempty\" yaml:\"skip_queued_branch_builds_filter,omitempty\"`\n\tCancelRunningBranchBuilds *bool `json:\"cancel_running_branch_builds,omitempty\" yaml:\"cancel_running_branch_builds,omitempty\"`\n\tCancelRunningBranchBuildsFilter *string `json:\"cancel_running_branch_builds_filter,omitempty\" yaml:\"cancel_running_branch_builds_filter,omitempty\"`\n\tClusterID *string `json:\"cluster_id,omitempty\" yaml:\"cluster_id,omitempty\"`\n\n\tScheduledBuildsCount *int `json:\"scheduled_builds_count,omitempty\" yaml:\"scheduled_builds_count,omitempty\"`\n\tRunningBuildsCount *int `json:\"running_builds_count,omitempty\" yaml:\"running_builds_count,omitempty\"`\n\tScheduledJobsCount *int `json:\"scheduled_jobs_count,omitempty\" yaml:\"scheduled_jobs_count,omitempty\"`\n\tRunningJobsCount *int `json:\"running_jobs_count,omitempty\" yaml:\"running_jobs_count,omitempty\"`\n\tWaitingJobsCount *int `json:\"waiting_jobs_count,omitempty\" yaml:\"waiting_jobs_count,omitempty\"`\n\n\t\/\/ the provider of sources\n\tProvider *Provider `json:\"provider,omitempty\" yaml:\"provider,omitempty\"`\n\n\t\/\/ build steps\n\tSteps []*Step `json:\"steps,omitempty\" yaml:\"steps,omitempty\"`\n\tConfiguration string `json:\"configuration,omitempty\" yaml:\"configuration,omitempty\"`\n\tEnv map[string]interface{} `json:\"env,omitempty\" yaml:\"env,omitempty\"`\n}\n\n\/\/ Step represents a build step in buildkites build pipeline\ntype Step struct {\n\tType *string `json:\"type,omitempty\" yaml:\"type,omitempty\"`\n\tName *string `json:\"name,omitempty\" yaml:\"name,omitempty\"`\n\tCommand *string `json:\"command,omitempty\" yaml:\"command,omitempty\"`\n\tArtifactPaths *string `json:\"artifact_paths,omitempty\" yaml:\"artifact_paths,omitempty\"`\n\tBranchConfiguration *string `json:\"branch_configuration,omitempty\" yaml:\"branch_configuration,omitempty\"`\n\tEnv map[string]string `json:\"env,omitempty\" yaml:\"env,omitempty\"`\n\tTimeoutInMinutes *int `json:\"timeout_in_minutes,omitempty\" yaml:\"timeout_in_minutes,omitempty\"`\n\tAgentQueryRules []string `json:\"agent_query_rules,omitempty\" yaml:\"agent_query_rules,omitempty\"`\n}\n\n\/\/ PipelineListOptions specifies the optional parameters to the\n\/\/ PipelinesService.List method.\ntype PipelineListOptions struct {\n\tListOptions\n}\n\n\/\/ Create - Creates a pipeline for a given organisation.\n\/\/\n\/\/ buildkite API docs: https:\/\/buildkite.com\/docs\/rest-api\/pipelines#create-a-pipeline\nfunc (ps *PipelinesService) Create(org string, p *CreatePipeline) (*Pipeline, *Response, error) {\n\tu := fmt.Sprintf(\"v2\/organizations\/%s\/pipelines\", org)\n\n\treq, err := ps.client.NewRequest(\"POST\", u, p)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tpipeline := new(Pipeline)\n\tresp, err := ps.client.Do(req, pipeline)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn pipeline, resp, err\n}\n\n\/\/ Get fetches a pipeline.\n\/\/\n\/\/ buildkite API docs: https:\/\/buildkite.com\/docs\/rest-api\/pipelines#get-a-pipeline\nfunc (ps *PipelinesService) Get(org string, slug string) (*Pipeline, *Response, error) {\n\n\tu := fmt.Sprintf(\"v2\/organizations\/%s\/pipelines\/%s\", org, slug)\n\n\treq, err := ps.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tpipeline := new(Pipeline)\n\tresp, err := ps.client.Do(req, pipeline)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn pipeline, resp, err\n}\n\n\/\/ List the pipelines for a given organisation.\n\/\/\n\/\/ buildkite API docs: https:\/\/buildkite.com\/docs\/api\/pipelines#list-pipelines\nfunc (ps *PipelinesService) List(org string, opt *PipelineListOptions) ([]Pipeline, *Response, error) {\n\tvar u string\n\n\tu = fmt.Sprintf(\"v2\/organizations\/%s\/pipelines\", org)\n\n\tu, err := addOptions(u, opt)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := ps.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tpipelines := new([]Pipeline)\n\tresp, err := ps.client.Do(req, pipelines)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\n\treturn *pipelines, resp, err\n}\n\n\/\/ Delete a pipeline.\n\/\/\n\/\/ buildkite API docs: https:\/\/buildkite.com\/docs\/rest-api\/pipelines#delete-a-pipeline\nfunc (ps *PipelinesService) Delete(org string, slug string) (*Response, error) {\n\n\tu := fmt.Sprintf(\"v2\/organizations\/%s\/pipelines\/%s\", org, slug)\n\n\treq, err := ps.client.NewRequest(\"DELETE\", u, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ps.client.Do(req, nil)\n}\n\n\/\/ Update - Updates a pipeline.\n\/\/\n\/\/ buildkite API docs: https:\/\/buildkite.com\/docs\/rest-api\/pipelines#update-a-pipeline\nfunc (ps *PipelinesService) Update(org string, p *Pipeline) (*Response, error) {\n\tif p == nil {\n\t\treturn nil, errors.New(\"pipeline must not be nil\")\n\t}\n\n\tu := fmt.Sprintf(\"v2\/organizations\/%s\/pipelines\/%s\", org, *p.Slug)\n\n\treq, err := ps.client.NewRequest(\"PATCH\", u, p)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := ps.client.Do(req, p)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\n\treturn resp, err\n}\n\n\/\/ AddWebhook - Adds webhook in github for pipeline.\n\/\/\n\/\/ buildkite API docs: https:\/\/buildkite.com\/docs\/apis\/rest-api\/pipelines#add-a-webhook\nfunc (ps *PipelinesService) AddWebhook(org string, slug string) (*Response, error) {\n\n\tu := fmt.Sprintf(\"v2\/organizations\/%s\/pipelines\/%s\/webhook\", org, slug)\n\n\treq, err := ps.client.NewRequest(\"POST\", u, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ps.client.Do(req, nil)\n}\n\n\/\/ Archive - Archives a pipeline.\n\/\/\n\/\/ buildkite API docs: https:\/\/buildkite.com\/docs\/apis\/rest-api\/pipelines#archive-a-pipeline\nfunc (ps *PipelinesService) Archive(org string, slug string) (*Response, error) {\n\n\tu := fmt.Sprintf(\"v2\/organizations\/%s\/pipelines\/%s\/archive\", org, slug)\n\n\treq, err := ps.client.NewRequest(\"POST\", u, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ps.client.Do(req, nil)\n}\n\n\/\/ Unarchive - Unarchive a pipeline.\n\/\/\n\/\/ buildkite API docs: https:\/\/buildkite.com\/docs\/apis\/rest-api\/pipelines#unarchive-a-pipeline\nfunc (ps *PipelinesService) Unarchive(org string, slug string) (*Response, error) {\n\n\tu := fmt.Sprintf(\"v2\/organizations\/%s\/pipelines\/%s\/unarchive\", org, slug)\n\n\treq, err := ps.client.NewRequest(\"POST\", u, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ps.client.Do(req, nil)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2021 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package bitlocker provides functionality for managing Bitlocker.\npackage bitlocker\n\nimport (\n\t\"github.com\/google\/logger\"\n\t\"github.com\/iamacarpet\/go-win64api\"\n)\n\nvar (\n\t\/\/ Test Helpers\n\tfuncBackup = winapi.BackupBitLockerRecoveryKeys\n\tfuncRecoveryInfo = winapi.GetBitLockerRecoveryInfo\n)\n\n\/\/ BackupToAD backs up Bitlocker recovery keys to Active Directory.\nfunc BackupToAD() error {\n\tinfos, err := funcRecoveryInfo()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvolIDs := []string{}\n\tfor _, i := range infos {\n\t\tif i.ConversionStatus != 1 {\n\t\t\tlogger.Warningf(\"Skipping volume %s due to conversion status (%d).\", i.DriveLetter, i.ConversionStatus)\n\t\t\tcontinue\n\t\t}\n\t\tlogger.Infof(\"Backing up Bitlocker recovery password for drive %q.\", i.DriveLetter)\n\t\tvolIDs = append(volIDs, i.PersistentVolumeID)\n\t}\n\treturn funcBackup(volIDs)\n}\n<commit_msg>Add bitlocker.EncryptWithTPM to Glazier.<commit_after>\/\/ Copyright 2021 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package bitlocker provides functionality for managing Bitlocker.\npackage bitlocker\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/google\/logger\"\n\t\"github.com\/go-ole\/go-ole\"\n\t\"github.com\/go-ole\/go-ole\/oleutil\"\n\t\"github.com\/iamacarpet\/go-win64api\"\n)\n\nvar (\n\t\/\/ Test Helpers\n\tfuncBackup = winapi.BackupBitLockerRecoveryKeys\n\tfuncRecoveryInfo = winapi.GetBitLockerRecoveryInfo\n)\n\n\/\/ BackupToAD backs up Bitlocker recovery keys to Active Directory.\nfunc BackupToAD() error {\n\tinfos, err := funcRecoveryInfo()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvolIDs := []string{}\n\tfor _, i := range infos {\n\t\tif i.ConversionStatus != 1 {\n\t\t\tlogger.Warningf(\"Skipping volume %s due to conversion status (%d).\", i.DriveLetter, i.ConversionStatus)\n\t\t\tcontinue\n\t\t}\n\t\tlogger.Infof(\"Backing up Bitlocker recovery password for drive %q.\", i.DriveLetter)\n\t\tvolIDs = append(volIDs, i.PersistentVolumeID)\n\t}\n\treturn funcBackup(volIDs)\n}\n\ntype wmi struct {\n\tintf *ole.IDispatch\n\tsvc *ole.IDispatch\n}\n\nfunc (w *wmi) connect() error {\n\tunknown, err := oleutil.CreateObject(\"WbemScripting.SWbemLocator\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to create initial object, %w\", err)\n\t}\n\tdefer unknown.Release()\n\tw.intf, err = unknown.QueryInterface(ole.IID_IDispatch)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to create initial object, %w\", err)\n\t}\n\tserviceRaw, err := oleutil.CallMethod(w.intf, \"ConnectServer\", nil, `\\\\.\\ROOT\\CIMV2\\Security\\MicrosoftVolumeEncryption`)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"permission denied: %w\", err)\n\t}\n\tw.svc = serviceRaw.ToIDispatch()\n\treturn nil\n}\n\nfunc (w *wmi) close() {\n\tw.svc.Release()\n\tw.intf.Release()\n}\n\nconst (\n\t\/\/ Encryption Methods\n\t\/\/ https:\/\/docs.microsoft.com\/en-us\/windows\/win32\/secprov\/getencryptionmethod-win32-encryptablevolume\n\tNone int32 = iota\n\tAES128WithDiffuser\n\tAES256WithDiffuser\n\tAES128\n\tAES256\n\tHardwareEncryption\n\tXtsAES128\n\tXtsAES256\n\n\t\/\/ Encryption Flags\n\t\/\/ https:\/\/docs.microsoft.com\/en-us\/windows\/win32\/secprov\/encrypt-win32-encryptablevolume\n\tEncryptDataOnly int32 = 0x00000001\n\tEncryptDemandWipe int32 = 0x00000002\n\tEncryptSynchronous int32 = 0x00010000\n\n\t\/\/ Error Codes\n\tFVE_E_BOOTABLE_CDDVD int32 = -2144272336\n\tFVE_E_PROTECTOR_EXISTS int32 = -2144272335\n)\n\nfunc encryptErrHandler(val int32) error {\n\tswitch val {\n\tcase FVE_E_BOOTABLE_CDDVD:\n\t\treturn fmt.Errorf(\"BitLocker Drive Encryption detected bootable media (CD or DVD) in the computer. \" +\n\t\t\t\"Remove the media and restart the computer before configuring BitLocker.\")\n\tcase FVE_E_PROTECTOR_EXISTS:\n\t\treturn fmt.Errorf(\"key protector cannot be added; only one key protector of this type is allowed for this drive\")\n\tdefault:\n\t\treturn fmt.Errorf(\"error code returned during encryption: %d\", val)\n\t}\n}\n\n\/\/ EncryptWithTPM encrypts the drive with Bitlocker using TPM key protection.\n\/\/\n\/\/ Example: bitlocker.EncryptWithTPM(\"c:\", bitlocker.XtsAES256, bitlocker.EncryptDataOnly)\nfunc EncryptWithTPM(driveLetter string, method int32, flags int32) error {\n\tole.CoInitialize(0)\n\tdefer ole.CoUninitialize()\n\tw := &wmi{}\n\tif err := w.connect(); err != nil {\n\t\treturn fmt.Errorf(\"wmi.Connect: %w\", err)\n\t}\n\tdefer w.close()\n\traw, err := oleutil.CallMethod(w.svc, \"ExecQuery\",\n\t\t\"SELECT * FROM Win32_EncryptableVolume WHERE DriveLetter = '\"+driveLetter+\"'\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"ExecQuery: %w\", err)\n\t}\n\tresult := raw.ToIDispatch()\n\tdefer result.Release()\n\n\titemRaw, err := oleutil.CallMethod(result, \"ItemIndex\", 0)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to fetch result row while processing BitLocker info: %w\", err)\n\t}\n\titem := itemRaw.ToIDispatch()\n\tdefer item.Release()\n\n\t\/\/ https:\/\/docs.microsoft.com\/en-us\/windows\/win32\/secprov\/protectkeywithtpm-win32-encryptablevolume\n\tvar volumeKeyProtectorID ole.VARIANT\n\tole.VariantInit(&volumeKeyProtectorID)\n\tresultRaw, err := oleutil.CallMethod(item, \"ProtectKeyWithTPM\", nil, nil, &volumeKeyProtectorID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error calling ProtectKeyWithTPM(%s): %w\", driveLetter, err)\n\t} else if val, ok := resultRaw.Value().(int32); val != 0 || !ok {\n\t\treturn encryptErrHandler(val)\n\t}\n\n\tresultRaw, err = oleutil.CallMethod(item, \"Encrypt\", method, flags)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error calling Encrypt(%s): %w\", driveLetter, err)\n\t} else if val, ok := resultRaw.Value().(int32); val != 0 || !ok {\n\t\treturn encryptErrHandler(val)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package blackjack\n\nimport \"log\"\n\n\/\/ ParseCard returns the integer value of a card following blackjack ruleset.\nfunc ParseCard(card string) int {\n\tswitch card {\n\tcase \"one\":\n\t\treturn 1\n\tcase \"two\":\n\t\treturn 2\n\tcase \"three\":\n\t\treturn 3\n\tcase \"four\":\n\t\treturn 4\n\tcase \"five\":\n\t\treturn 5\n\tcase \"six\":\n\t\treturn 6\n\tcase \"seven\":\n\t\treturn 7\n\tcase \"eight\":\n\t\treturn 8\n\tcase \"nine\":\n\t\treturn 9\n\tcase \"ten\", \"jack\", \"queen\", \"king\":\n\t\treturn 10\n\tcase \"ace\":\n\t\treturn 11\n\tdefault:\n\t\tlog.Printf(\"card %s is not a valid card\", card)\n\t\treturn 0\n\t}\n}\n\n\/\/ IsBlackjack returns true if the player has a blackjack, false otherwise.\nfunc IsBlackjack(card1, card2 string) bool {\n\treturn ParseCard(card1)+ParseCard(card2) == 21\n}\n\n\/\/ LargeHand implements the decision tree for hand scores larger than 20 points.\nfunc LargeHand(isBlackjack bool, dealerScore int) string {\n\tpanic(\"Please implement the LargeHand function\")\n}\n\n\/\/ SmallHand implements the decision tree for hand scores with less than 21 points.\nfunc SmallHand(handScore, dealerScore int) string {\n\tpanic(\"Please implement the SmallHand function\")\n}\n\n\/\/ FirstTurn returns the semi-optimal decision for the first turn, given the cards of the player and the dealer.\n\/\/ This function is already implemented and does not need to be edited. It pulls the other functions together in a\n\/\/ complete decision tree for the first turn.\nfunc FirstTurn(card1, card2, dealerCard string) string {\n\thandScore := ParseCard(card1) + ParseCard(card2)\n\tdealerScore := ParseCard(dealerCard)\n\n\tif 20 < handScore {\n\t\treturn LargeHand(IsBlackjack(card1, card2), dealerScore)\n\t}\n\treturn SmallHand(handScore, dealerScore)\n}\n<commit_msg>Implement large hand<commit_after>package blackjack\n\nimport \"log\"\n\n\/\/ ParseCard returns the integer value of a card following blackjack ruleset.\nfunc ParseCard(card string) int {\n\tswitch card {\n\tcase \"one\":\n\t\treturn 1\n\tcase \"two\":\n\t\treturn 2\n\tcase \"three\":\n\t\treturn 3\n\tcase \"four\":\n\t\treturn 4\n\tcase \"five\":\n\t\treturn 5\n\tcase \"six\":\n\t\treturn 6\n\tcase \"seven\":\n\t\treturn 7\n\tcase \"eight\":\n\t\treturn 8\n\tcase \"nine\":\n\t\treturn 9\n\tcase \"ten\", \"jack\", \"queen\", \"king\":\n\t\treturn 10\n\tcase \"ace\":\n\t\treturn 11\n\tdefault:\n\t\tlog.Printf(\"card %s is not a valid card\", card)\n\t\treturn 0\n\t}\n}\n\n\/\/ IsBlackjack returns true if the player has a blackjack, false otherwise.\nfunc IsBlackjack(card1, card2 string) bool {\n\treturn ParseCard(card1)+ParseCard(card2) == 21\n}\n\n\/\/ LargeHand implements the decision tree for hand scores larger than 20 points.\nfunc LargeHand(isBlackjack bool, dealerScore int) string {\n\tif isBlackjack && dealerScore < 10 {\n\t\treturn \"W\"\n\t} else if isBlackjack {\n\t\treturn \"S\"\n\t} else {\n\t\treturn \"P\"\n\t}\n}\n\n\/\/ SmallHand implements the decision tree for hand scores with less than 21 points.\nfunc SmallHand(handScore, dealerScore int) string {\n\tpanic(\"Please implement the SmallHand function\")\n}\n\n\/\/ FirstTurn returns the semi-optimal decision for the first turn, given the cards of the player and the dealer.\n\/\/ This function is already implemented and does not need to be edited. It pulls the other functions together in a\n\/\/ complete decision tree for the first turn.\nfunc FirstTurn(card1, card2, dealerCard string) string {\n\thandScore := ParseCard(card1) + ParseCard(card2)\n\tdealerScore := ParseCard(dealerCard)\n\n\tif 20 < handScore {\n\t\treturn LargeHand(IsBlackjack(card1, card2), dealerScore)\n\t}\n\treturn SmallHand(handScore, dealerScore)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\n\nPackage callgraph defines the call graph and various algorithms\nand utilities to operate on it.\n\nA call graph is a labelled directed graph whose nodes represent\nfunctions and whose edge labels represent syntactic function call\nsites. The presence of a labelled edge (caller, site, callee)\nindicates that caller may call callee at the specified call site.\n\nA call graph is a multigraph: it may contain multiple edges (caller,\n*, callee) connecting the same pair of nodes, so long as the edges\ndiffer by label; this occurs when one function calls another function\nfrom multiple call sites. Also, it may contain multiple edges\n(caller, site, *) that differ only by callee; this indicates a\npolymorphic call.\n\nA SOUND call graph is one that overapproximates the dynamic calling\nbehaviors of the program in all possible executions. One call graph\nis more PRECISE than another if it is a smaller overapproximation of\nthe dynamic behavior.\n\nAll call graphs have a synthetic root node which is responsible for\ncalling main() and init().\n\nCalls to built-in functions (e.g. panic, println) are not represented\nin the call graph; they are treated like built-in operators of the\nlanguage.\n\n*\/\npackage callgraph \/\/ import \"golang.org\/x\/tools\/go\/callgraph\"\n\n\/\/ TODO(adonovan): add a function to eliminate wrappers from the\n\/\/ callgraph, preserving topology.\n\/\/ More generally, we could eliminate \"uninteresting\" nodes such as\n\/\/ nodes from packages we don't care about.\n\nimport (\n\t\"fmt\"\n\t\"go\/token\"\n\n\t\"golang.org\/x\/tools\/go\/ssa\"\n)\n\n\/\/ A Graph represents a call graph.\n\/\/\n\/\/ A graph may contain nodes that are not reachable from the root.\n\/\/ If the call graph is sound, such nodes indicate unreachable\n\/\/ functions.\n\/\/\ntype Graph struct {\n\tRoot *Node \/\/ the distinguished root node\n\tNodes map[*ssa.Function]*Node \/\/ all nodes by function\n}\n\n\/\/ New returns a new Graph with the specified root node.\nfunc New(root *ssa.Function) *Graph {\n\tg := &Graph{Nodes: make(map[*ssa.Function]*Node)}\n\tg.Root = g.CreateNode(root)\n\treturn g\n}\n\n\/\/ CreateNode returns the Node for fn, creating it if not present.\nfunc (g *Graph) CreateNode(fn *ssa.Function) *Node {\n\tn, ok := g.Nodes[fn]\n\tif !ok {\n\t\tn = &Node{Func: fn, ID: len(g.Nodes)}\n\t\tg.Nodes[fn] = n\n\t}\n\treturn n\n}\n\n\/\/ A Node represents a node in a call graph.\ntype Node struct {\n\tFunc *ssa.Function \/\/ the function this node represents\n\tID int \/\/ 0-based sequence number\n\tIn []*Edge \/\/ unordered set of incoming call edges (n.In[*].Callee == n)\n\tOut []*Edge \/\/ unordered set of outgoing call edges (n.Out[*].Caller == n)\n}\n\nfunc (n *Node) String() string {\n\treturn fmt.Sprintf(\"n%d:%s\", n.ID, n.Func)\n}\n\n\/\/ A Edge represents an edge in the call graph.\n\/\/\n\/\/ Site is nil for edges originating in synthetic or intrinsic\n\/\/ functions, e.g. reflect.Call or the root of the call graph.\ntype Edge struct {\n\tCaller *Node\n\tSite ssa.CallInstruction\n\tCallee *Node\n}\n\nfunc (e Edge) String() string {\n\treturn fmt.Sprintf(\"%s --> %s\", e.Caller, e.Callee)\n}\n\nfunc (e Edge) Description() string {\n\tvar prefix string\n\tswitch e.Site.(type) {\n\tcase nil:\n\t\treturn \"synthetic call\"\n\tcase *ssa.Go:\n\t\tprefix = \"concurrent \"\n\tcase *ssa.Defer:\n\t\tprefix = \"deferred \"\n\t}\n\treturn prefix + e.Site.Common().Description()\n}\n\nfunc (e Edge) Pos() token.Pos {\n\tif e.Site == nil {\n\t\treturn token.NoPos\n\t}\n\treturn e.Site.Pos()\n}\n\n\/\/ AddEdge adds the edge (caller, site, callee) to the call graph.\n\/\/ Elimination of duplicate edges is the caller's responsibility.\nfunc AddEdge(caller *Node, site ssa.CallInstruction, callee *Node) {\n\te := &Edge{caller, site, callee}\n\tcallee.In = append(callee.In, e)\n\tcaller.Out = append(caller.Out, e)\n}\n<commit_msg>go\/callgraph: change reflect.Call to reflect.Value.Call in comment<commit_after>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\n\nPackage callgraph defines the call graph and various algorithms\nand utilities to operate on it.\n\nA call graph is a labelled directed graph whose nodes represent\nfunctions and whose edge labels represent syntactic function call\nsites. The presence of a labelled edge (caller, site, callee)\nindicates that caller may call callee at the specified call site.\n\nA call graph is a multigraph: it may contain multiple edges (caller,\n*, callee) connecting the same pair of nodes, so long as the edges\ndiffer by label; this occurs when one function calls another function\nfrom multiple call sites. Also, it may contain multiple edges\n(caller, site, *) that differ only by callee; this indicates a\npolymorphic call.\n\nA SOUND call graph is one that overapproximates the dynamic calling\nbehaviors of the program in all possible executions. One call graph\nis more PRECISE than another if it is a smaller overapproximation of\nthe dynamic behavior.\n\nAll call graphs have a synthetic root node which is responsible for\ncalling main() and init().\n\nCalls to built-in functions (e.g. panic, println) are not represented\nin the call graph; they are treated like built-in operators of the\nlanguage.\n\n*\/\npackage callgraph \/\/ import \"golang.org\/x\/tools\/go\/callgraph\"\n\n\/\/ TODO(adonovan): add a function to eliminate wrappers from the\n\/\/ callgraph, preserving topology.\n\/\/ More generally, we could eliminate \"uninteresting\" nodes such as\n\/\/ nodes from packages we don't care about.\n\nimport (\n\t\"fmt\"\n\t\"go\/token\"\n\n\t\"golang.org\/x\/tools\/go\/ssa\"\n)\n\n\/\/ A Graph represents a call graph.\n\/\/\n\/\/ A graph may contain nodes that are not reachable from the root.\n\/\/ If the call graph is sound, such nodes indicate unreachable\n\/\/ functions.\n\/\/\ntype Graph struct {\n\tRoot *Node \/\/ the distinguished root node\n\tNodes map[*ssa.Function]*Node \/\/ all nodes by function\n}\n\n\/\/ New returns a new Graph with the specified root node.\nfunc New(root *ssa.Function) *Graph {\n\tg := &Graph{Nodes: make(map[*ssa.Function]*Node)}\n\tg.Root = g.CreateNode(root)\n\treturn g\n}\n\n\/\/ CreateNode returns the Node for fn, creating it if not present.\nfunc (g *Graph) CreateNode(fn *ssa.Function) *Node {\n\tn, ok := g.Nodes[fn]\n\tif !ok {\n\t\tn = &Node{Func: fn, ID: len(g.Nodes)}\n\t\tg.Nodes[fn] = n\n\t}\n\treturn n\n}\n\n\/\/ A Node represents a node in a call graph.\ntype Node struct {\n\tFunc *ssa.Function \/\/ the function this node represents\n\tID int \/\/ 0-based sequence number\n\tIn []*Edge \/\/ unordered set of incoming call edges (n.In[*].Callee == n)\n\tOut []*Edge \/\/ unordered set of outgoing call edges (n.Out[*].Caller == n)\n}\n\nfunc (n *Node) String() string {\n\treturn fmt.Sprintf(\"n%d:%s\", n.ID, n.Func)\n}\n\n\/\/ A Edge represents an edge in the call graph.\n\/\/\n\/\/ Site is nil for edges originating in synthetic or intrinsic\n\/\/ functions, e.g. reflect.Value.Call or the root of the call graph.\ntype Edge struct {\n\tCaller *Node\n\tSite ssa.CallInstruction\n\tCallee *Node\n}\n\nfunc (e Edge) String() string {\n\treturn fmt.Sprintf(\"%s --> %s\", e.Caller, e.Callee)\n}\n\nfunc (e Edge) Description() string {\n\tvar prefix string\n\tswitch e.Site.(type) {\n\tcase nil:\n\t\treturn \"synthetic call\"\n\tcase *ssa.Go:\n\t\tprefix = \"concurrent \"\n\tcase *ssa.Defer:\n\t\tprefix = \"deferred \"\n\t}\n\treturn prefix + e.Site.Common().Description()\n}\n\nfunc (e Edge) Pos() token.Pos {\n\tif e.Site == nil {\n\t\treturn token.NoPos\n\t}\n\treturn e.Site.Pos()\n}\n\n\/\/ AddEdge adds the edge (caller, site, callee) to the call graph.\n\/\/ Elimination of duplicate edges is the caller's responsibility.\nfunc AddEdge(caller *Node, site ssa.CallInstruction, callee *Node) {\n\te := &Edge{caller, site, callee}\n\tcallee.In = append(callee.In, e)\n\tcaller.Out = append(caller.Out, e)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris\n\npackage client\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"golang.org\/x\/crypto\/ssh\/terminal\"\n)\n\ntype TerminalEngine struct {\n\ttty *os.File\n\tfd int\n\toldTerminal *terminal.State\n\tterminal *terminal.Terminal\n\tstarted bool\n\twidth, height int\n}\n\nfunc (t *TerminalEngine) Init() error {\n\treturn nil\n}\n\nfunc NewTerminalEngine() *TerminalEngine {\n\treturn &TerminalEngine{fd: -1}\n}\n\nvar globalIsStarted = false\n\nfunc (t *TerminalEngine) GetSize() (int, int) {\n\tif err := t.Startup(); err != nil {\n\t\treturn 0, 0\n\t}\n\treturn t.width, t.height\n}\n\nfunc (t *TerminalEngine) Startup() error {\n\n\tif t.started {\n\t\treturn nil\n\t}\n\n\tt.started = true\n\n\tif globalIsStarted {\n\t\treturn fmt.Errorf(\"Can only instantiate one terminal wrapper per proc\")\n\t}\n\n\tglobalIsStarted = true\n\n\tG.Log.Debug(\"+ Opening up \/dev\/tty terminal on Linux and OSX\")\n\tfile, err := os.OpenFile(\"\/dev\/tty\", os.O_RDWR, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\tt.tty = file\n\tt.fd = int(t.tty.Fd())\n\tt.width, t.height, err = terminal.GetSize(t.fd)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tt.oldTerminal, err = terminal.MakeRaw(t.fd)\n\tif err != nil {\n\t\treturn err\n\t}\n\tG.Log.Debug(\"| switched to raw console for tty\")\n\tif t.terminal = terminal.NewTerminal(file, \"\"); t.terminal == nil {\n\t\treturn fmt.Errorf(\"failed to open terminal\")\n\t}\n\tt.terminal.AutoCompleteCallback = t.autoComp\n\n\tif err = t.terminal.SetSize(t.width, t.height); err != nil {\n\t\treturn err\n\t}\n\n\tG.Log.Debug(\"- Done opening \/dev\/tty\")\n\treturn nil\n}\n\nfunc (t *TerminalEngine) Shutdown() error {\n\tif t.oldTerminal != nil {\n\t\tG.Log.Debug(\"Restoring terminal settings\")\n\n\t\t\/\/ XXX bug in ssh\/terminal. On success, we were getting an error\n\t\t\/\/ \"errno 0\"; so let's ignore it for now.\n\t\tterminal.Restore(t.fd, t.oldTerminal)\n\t}\n\treturn nil\n}\n\nfunc (t *TerminalEngine) PromptPassword(prompt string) (string, error) {\n\tif err := t.Startup(); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn t.terminal.ReadPassword(prompt)\n}\n\nfunc (t *TerminalEngine) Write(s string) error {\n\tif err := t.Startup(); err != nil {\n\t\treturn err\n\t}\n\t_, err := t.terminal.Write([]byte(s))\n\treturn err\n}\n\nfunc (t *TerminalEngine) Prompt(prompt string) (string, error) {\n\tif err := t.Startup(); err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(prompt) >= 0 {\n\t\tt.Write(prompt)\n\t}\n\treturn t.terminal.ReadLine()\n}\n\nfunc (t *TerminalEngine) autoComp(line string, pos int, key rune) (newLine string, newPos int, ok bool) {\n\t\/\/ this is a hack to handle ctrl-c\n\tif key == 3 {\n\t\tG.Shutdown()\n\t\tG.Log.Error(\"interrupted\")\n\t\tos.Exit(3)\n\t}\n\treturn line, pos, false\n}\n<commit_msg>removed old terminal file (minterm replaces it)<commit_after><|endoftext|>"} {"text":"<commit_before>package promstats\n\nimport (\n\t\"expvar\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/youtube\/vitess\/go\/stats\"\n)\n\n\/\/ NewCollector returns a prometheus.Collector for a given stats var.\n\/\/ It supports all stats var types except String, StringFunc and Rates.\n\/\/ The returned collector still needs to be registered with prometheus registry.\nfunc NewCollector(opts prometheus.Opts, v expvar.Var) prometheus.Collector {\n\tswitch st := v.(type) {\n\tcase *stats.Int:\n\t\treturn prometheus.NewGaugeFunc(prometheus.GaugeOpts(opts), func() float64 {\n\t\t\treturn float64(st.Get())\n\t\t})\n\tcase stats.IntFunc:\n\t\treturn prometheus.NewGaugeFunc(prometheus.GaugeOpts(opts), func() float64 {\n\t\t\treturn float64(st())\n\t\t})\n\tcase *stats.Duration:\n\t\treturn prometheus.NewGaugeFunc(prometheus.GaugeOpts(opts), func() float64 {\n\t\t\treturn st.Get().Seconds()\n\t\t})\n\tcase stats.DurationFunc:\n\t\treturn prometheus.NewGaugeFunc(prometheus.GaugeOpts(opts), func() float64 {\n\t\t\treturn st().Seconds()\n\t\t})\n\tcase *stats.Float:\n\t\treturn prometheus.NewGaugeFunc(prometheus.GaugeOpts(opts), st.Get)\n\tcase stats.FloatFunc:\n\t\treturn prometheus.NewGaugeFunc(prometheus.GaugeOpts(opts), st)\n\tcase *stats.Counters:\n\t\treturn newCountersCollector(opts, st, \"tag\")\n\tcase stats.CountersFunc:\n\t\treturn newCountersCollector(opts, st, \"tag\")\n\tcase *stats.MultiCounters:\n\t\treturn newCountersCollector(opts, st, st.Labels()...)\n\tcase *stats.MultiCountersFunc:\n\t\treturn newCountersCollector(opts, st, st.Labels()...)\n\tcase *stats.Histogram:\n\t\treturn newHistogramCollector(opts, st)\n\tcase *stats.Timings:\n\t\treturn newTimingsCollector(opts, st, \"category\")\n\tcase *stats.MultiTimings:\n\t\treturn newTimingsCollector(opts, &st.Timings, st.Labels()...)\n\tcase *stats.String:\n\t\t\/\/ prometheus can't collect string values\n\t\treturn nil\n\tcase stats.StringFunc:\n\t\t\/\/ prometheus can't collect string values\n\t\treturn nil\n\tcase *stats.Rates:\n\t\t\/\/ Ignore these, because monitoring tools will calculate\n\t\t\/\/ rates for us.\n\t\treturn nil\n\tdefault:\n\t\tglog.Warningf(\"Unsupported type for %s: %T\", opts.Name, v)\n\t\treturn nil\n\t}\n}\n\ntype countersCollector struct {\n\tdesc *prometheus.Desc\n\tc stats.CountTracker\n\tnLabels int\n}\n\nfunc newCountersCollector(opts prometheus.Opts, c stats.CountTracker, labels ...string) prometheus.Collector {\n\tdesc := prometheus.NewDesc(\n\t\tprometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tlabels,\n\t\topts.ConstLabels,\n\t)\n\treturn countersCollector{\n\t\tdesc: desc,\n\t\tc: c,\n\t\tnLabels: len(labels),\n\t}\n}\n\nfunc (c countersCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.desc\n}\n\nvar replacer = strings.NewReplacer(`\\\\`, `\\`, `\\.`, `.`, `.`, \"\\000\")\n\nfunc split(key string) []string {\n\treturn strings.Split(replacer.Replace(key), \"\\000\")\n}\n\nfunc (c countersCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor k, n := range c.c.Counts() {\n\t\tif c.nLabels > 1 {\n\t\t\tlabels := split(k)\n\t\t\tif len(labels) != c.nLabels {\n\t\t\t\terr := fmt.Errorf(\"wrong number of labels in MultiCounters key: %d != %d (key=%q)\", len(labels), c.nLabels, k)\n\t\t\t\tch <- prometheus.NewInvalidMetric(c.desc, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(n), labels...)\n\t\t\tcontinue\n\t\t}\n\t\tch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(n), k)\n\t}\n}\n\nfunc newHistogram(opts prometheus.Opts, cutoffs []int64) (prometheus.Histogram, func(int64)) {\n\tbuckets := make([]float64, len(cutoffs))\n\tfor i := range cutoffs {\n\t\tbuckets[i] = float64(cutoffs[i])\n\t}\n\thOpts := prometheus.HistogramOpts{\n\t\tNamespace: opts.Namespace,\n\t\tSubsystem: opts.Subsystem,\n\t\tName: opts.Name,\n\t\tHelp: opts.Help,\n\t\tConstLabels: opts.ConstLabels,\n\t\tBuckets: buckets,\n\t}\n\tm := prometheus.NewHistogram(hOpts)\n\treturn m, func(n int64) {\n\t\tm.Observe(float64(n))\n\t}\n}\n\ntype histogramCollector struct {\n\tdesc *prometheus.Desc\n\th *stats.Histogram\n}\n\nfunc newHistogramCollector(opts prometheus.Opts, h *stats.Histogram) histogramCollector {\n\tdesc := prometheus.NewDesc(\n\t\tprometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tnil,\n\t\topts.ConstLabels,\n\t)\n\treturn histogramCollector{\n\t\tdesc: desc,\n\t\th: h,\n\t}\n}\n\nfunc histogramMetric(desc *prometheus.Desc, h *stats.Histogram, scale float64, labels ...string) prometheus.Metric {\n\tcount := uint64(0)\n\tsum := float64(h.Total()) * scale\n\tcutoffs := h.Cutoffs()\n\tstatBuckets := h.Buckets()\n\tpromBuckets := make(map[float64]uint64, len(cutoffs))\n\tfor i, cutoff := range cutoffs {\n\t\tupperBound := float64(cutoff) * scale\n\t\tcount += uint64(statBuckets[i])\n\t\tpromBuckets[upperBound] = count\n\t}\n\tcount += uint64(statBuckets[len(statBuckets)-1])\n\treturn prometheus.MustNewConstHistogram(desc, count, sum, promBuckets, labels...)\n}\n\nfunc (h histogramCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- h.desc\n}\n\nfunc (h histogramCollector) Collect(ch chan<- prometheus.Metric) {\n\tch <- histogramMetric(h.desc, h.h, 1)\n}\n\ntype timingsCollector struct {\n\tdesc *prometheus.Desc\n\tt *stats.Timings\n\tnLabels int\n}\n\nfunc newTimingsCollector(opts prometheus.Opts, t *stats.Timings, labels ...string) prometheus.Collector {\n\tdesc := prometheus.NewDesc(\n\t\tprometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tlabels,\n\t\topts.ConstLabels,\n\t)\n\treturn timingsCollector{\n\t\tdesc: desc,\n\t\tt: t,\n\t\tnLabels: len(labels),\n\t}\n}\n\nfunc (c timingsCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.desc\n}\n\nfunc (c timingsCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor k, h := range c.t.Histograms() {\n\t\tif c.nLabels > 1 {\n\t\t\tlabels := split(k)\n\t\t\tif len(labels) != c.nLabels {\n\t\t\t\terr := fmt.Errorf(\"wrong number of labels in MultiTimings key: %d != %d (key=%q)\", len(labels), c.nLabels, k)\n\t\t\t\tch <- prometheus.NewInvalidMetric(c.desc, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tch <- histogramMetric(c.desc, h, 1\/float64(time.Second), labels...)\n\t\t\tcontinue\n\t\t}\n\t\tch <- histogramMetric(c.desc, h, 1\/float64(time.Second), k)\n\t}\n}\n<commit_msg>Add package doc to promstats<commit_after>\/*\nPackage promstats contains adapters to publish stats variables to prometheus (http:\/\/prometheus.io)\n*\/\npackage promstats\n\nimport (\n\t\"expvar\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/youtube\/vitess\/go\/stats\"\n)\n\n\/\/ NewCollector returns a prometheus.Collector for a given stats var.\n\/\/ It supports all stats var types except String, StringFunc and Rates.\n\/\/ The returned collector still needs to be registered with prometheus registry.\nfunc NewCollector(opts prometheus.Opts, v expvar.Var) prometheus.Collector {\n\tswitch st := v.(type) {\n\tcase *stats.Int:\n\t\treturn prometheus.NewGaugeFunc(prometheus.GaugeOpts(opts), func() float64 {\n\t\t\treturn float64(st.Get())\n\t\t})\n\tcase stats.IntFunc:\n\t\treturn prometheus.NewGaugeFunc(prometheus.GaugeOpts(opts), func() float64 {\n\t\t\treturn float64(st())\n\t\t})\n\tcase *stats.Duration:\n\t\treturn prometheus.NewGaugeFunc(prometheus.GaugeOpts(opts), func() float64 {\n\t\t\treturn st.Get().Seconds()\n\t\t})\n\tcase stats.DurationFunc:\n\t\treturn prometheus.NewGaugeFunc(prometheus.GaugeOpts(opts), func() float64 {\n\t\t\treturn st().Seconds()\n\t\t})\n\tcase *stats.Float:\n\t\treturn prometheus.NewGaugeFunc(prometheus.GaugeOpts(opts), st.Get)\n\tcase stats.FloatFunc:\n\t\treturn prometheus.NewGaugeFunc(prometheus.GaugeOpts(opts), st)\n\tcase *stats.Counters:\n\t\treturn newCountersCollector(opts, st, \"tag\")\n\tcase stats.CountersFunc:\n\t\treturn newCountersCollector(opts, st, \"tag\")\n\tcase *stats.MultiCounters:\n\t\treturn newCountersCollector(opts, st, st.Labels()...)\n\tcase *stats.MultiCountersFunc:\n\t\treturn newCountersCollector(opts, st, st.Labels()...)\n\tcase *stats.Histogram:\n\t\treturn newHistogramCollector(opts, st)\n\tcase *stats.Timings:\n\t\treturn newTimingsCollector(opts, st, \"category\")\n\tcase *stats.MultiTimings:\n\t\treturn newTimingsCollector(opts, &st.Timings, st.Labels()...)\n\tcase *stats.String:\n\t\t\/\/ prometheus can't collect string values\n\t\treturn nil\n\tcase stats.StringFunc:\n\t\t\/\/ prometheus can't collect string values\n\t\treturn nil\n\tcase *stats.Rates:\n\t\t\/\/ Ignore these, because monitoring tools will calculate\n\t\t\/\/ rates for us.\n\t\treturn nil\n\tdefault:\n\t\tglog.Warningf(\"Unsupported type for %s: %T\", opts.Name, v)\n\t\treturn nil\n\t}\n}\n\ntype countersCollector struct {\n\tdesc *prometheus.Desc\n\tc stats.CountTracker\n\tnLabels int\n}\n\nfunc newCountersCollector(opts prometheus.Opts, c stats.CountTracker, labels ...string) prometheus.Collector {\n\tdesc := prometheus.NewDesc(\n\t\tprometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tlabels,\n\t\topts.ConstLabels,\n\t)\n\treturn countersCollector{\n\t\tdesc: desc,\n\t\tc: c,\n\t\tnLabels: len(labels),\n\t}\n}\n\nfunc (c countersCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.desc\n}\n\nvar replacer = strings.NewReplacer(`\\\\`, `\\`, `\\.`, `.`, `.`, \"\\000\")\n\nfunc split(key string) []string {\n\treturn strings.Split(replacer.Replace(key), \"\\000\")\n}\n\nfunc (c countersCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor k, n := range c.c.Counts() {\n\t\tif c.nLabels > 1 {\n\t\t\tlabels := split(k)\n\t\t\tif len(labels) != c.nLabels {\n\t\t\t\terr := fmt.Errorf(\"wrong number of labels in MultiCounters key: %d != %d (key=%q)\", len(labels), c.nLabels, k)\n\t\t\t\tch <- prometheus.NewInvalidMetric(c.desc, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(n), labels...)\n\t\t\tcontinue\n\t\t}\n\t\tch <- prometheus.MustNewConstMetric(c.desc, prometheus.GaugeValue, float64(n), k)\n\t}\n}\n\nfunc newHistogram(opts prometheus.Opts, cutoffs []int64) (prometheus.Histogram, func(int64)) {\n\tbuckets := make([]float64, len(cutoffs))\n\tfor i := range cutoffs {\n\t\tbuckets[i] = float64(cutoffs[i])\n\t}\n\thOpts := prometheus.HistogramOpts{\n\t\tNamespace: opts.Namespace,\n\t\tSubsystem: opts.Subsystem,\n\t\tName: opts.Name,\n\t\tHelp: opts.Help,\n\t\tConstLabels: opts.ConstLabels,\n\t\tBuckets: buckets,\n\t}\n\tm := prometheus.NewHistogram(hOpts)\n\treturn m, func(n int64) {\n\t\tm.Observe(float64(n))\n\t}\n}\n\ntype histogramCollector struct {\n\tdesc *prometheus.Desc\n\th *stats.Histogram\n}\n\nfunc newHistogramCollector(opts prometheus.Opts, h *stats.Histogram) histogramCollector {\n\tdesc := prometheus.NewDesc(\n\t\tprometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tnil,\n\t\topts.ConstLabels,\n\t)\n\treturn histogramCollector{\n\t\tdesc: desc,\n\t\th: h,\n\t}\n}\n\nfunc histogramMetric(desc *prometheus.Desc, h *stats.Histogram, scale float64, labels ...string) prometheus.Metric {\n\tcount := uint64(0)\n\tsum := float64(h.Total()) * scale\n\tcutoffs := h.Cutoffs()\n\tstatBuckets := h.Buckets()\n\tpromBuckets := make(map[float64]uint64, len(cutoffs))\n\tfor i, cutoff := range cutoffs {\n\t\tupperBound := float64(cutoff) * scale\n\t\tcount += uint64(statBuckets[i])\n\t\tpromBuckets[upperBound] = count\n\t}\n\tcount += uint64(statBuckets[len(statBuckets)-1])\n\treturn prometheus.MustNewConstHistogram(desc, count, sum, promBuckets, labels...)\n}\n\nfunc (h histogramCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- h.desc\n}\n\nfunc (h histogramCollector) Collect(ch chan<- prometheus.Metric) {\n\tch <- histogramMetric(h.desc, h.h, 1)\n}\n\ntype timingsCollector struct {\n\tdesc *prometheus.Desc\n\tt *stats.Timings\n\tnLabels int\n}\n\nfunc newTimingsCollector(opts prometheus.Opts, t *stats.Timings, labels ...string) prometheus.Collector {\n\tdesc := prometheus.NewDesc(\n\t\tprometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tlabels,\n\t\topts.ConstLabels,\n\t)\n\treturn timingsCollector{\n\t\tdesc: desc,\n\t\tt: t,\n\t\tnLabels: len(labels),\n\t}\n}\n\nfunc (c timingsCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.desc\n}\n\nfunc (c timingsCollector) Collect(ch chan<- prometheus.Metric) {\n\tfor k, h := range c.t.Histograms() {\n\t\tif c.nLabels > 1 {\n\t\t\tlabels := split(k)\n\t\t\tif len(labels) != c.nLabels {\n\t\t\t\terr := fmt.Errorf(\"wrong number of labels in MultiTimings key: %d != %d (key=%q)\", len(labels), c.nLabels, k)\n\t\t\t\tch <- prometheus.NewInvalidMetric(c.desc, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tch <- histogramMetric(c.desc, h, 1\/float64(time.Second), labels...)\n\t\t\tcontinue\n\t\t}\n\t\tch <- histogramMetric(c.desc, h, 1\/float64(time.Second), k)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Convenience utilities for testing.\npackage testutils\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"text\/template\"\n\t\"time\"\n\n\t\"github.com\/google\/uuid\"\n\t\"github.com\/stretchr\/testify\/mock\"\n\t\"github.com\/stretchr\/testify\/require\"\n\t\"go.skia.org\/infra\/go\/repo_root\"\n\t\"go.skia.org\/infra\/go\/sktest\"\n)\n\nvar (\n\t\/\/ TryAgainErr use used by TryUntil.\n\tTryAgainErr = errors.New(\"Trying Again\")\n)\n\n\/\/ TestDataDir returns the path to the caller's testdata directory, which\n\/\/ is assumed to be \"<path to caller dir>\/testdata\".\nfunc TestDataDir() (string, error) {\n\t_, thisFile, _, ok := runtime.Caller(0)\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"Could not find test data dir: runtime.Caller() failed.\")\n\t}\n\tfor skip := 0; ; skip++ {\n\t\t_, file, _, ok := runtime.Caller(skip)\n\t\tif !ok {\n\t\t\treturn \"\", fmt.Errorf(\"Could not find test data dir: runtime.Caller() failed.\")\n\t\t}\n\t\tif file != thisFile {\n\t\t\treturn path.Join(path.Dir(file), \"testdata\"), nil\n\t\t}\n\t}\n}\n\nfunc readFile(filename string) (io.ReadCloser, error) {\n\tdir, err := TestDataDir()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not read %s: %v\", filename, err)\n\t}\n\tf, err := os.Open(path.Join(dir, filename))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not read %s: %v\", filename, err)\n\t}\n\treturn f, nil\n}\n\n\/\/ ReadFileBytes reads a file from the caller's testdata directory and returns its contents as a\n\/\/ slice of bytes.\nfunc ReadFileBytes(filename string) ([]byte, error) {\n\tf, err := readFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not read %s: %v\", filename, err)\n\t}\n\treturn b, nil\n}\n\n\/\/ ReadFile reads a file from the caller's testdata directory.\nfunc ReadFile(filename string) (string, error) {\n\tb, err := ReadFileBytes(filename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(b), nil\n}\n\n\/\/ MustGetReader reads a file from the caller's testdata directory and panics on\n\/\/ error.\nfunc MustGetReader(filename string) io.ReadCloser {\n\tr, err := readFile(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn r\n}\n\n\/\/ MustReadFile returns from the caller's testdata directory and panics on\n\/\/ error.\nfunc MustReadFile(filename string) string {\n\ts, err := ReadFile(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn s\n}\n\n\/\/ ReadJsonFile reads a JSON file from the caller's testdata directory into the\n\/\/ given interface.\nfunc ReadJsonFile(filename string, dest interface{}) error {\n\tf, err := readFile(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn json.NewDecoder(f).Decode(dest)\n}\n\n\/\/ MustReadJsonFile reads a JSON file from the caller's testdata directory into\n\/\/ the given interface and panics on error.\nfunc MustReadJsonFile(filename string, dest interface{}) {\n\tif err := ReadJsonFile(filename, dest); err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/ WriteFile writes the given contents to the given file path, reporting any\n\/\/ error.\nfunc WriteFile(t sktest.TestingT, filename, contents string) {\n\trequire.NoErrorf(t, ioutil.WriteFile(filename, []byte(contents), os.ModePerm), \"Unable to write to file %s\", filename)\n}\n\n\/\/ AssertCloses takes an ioutil.Closer and asserts that it closes. E.g.:\n\/\/ frobber := NewFrobber()\n\/\/ defer testutils.AssertCloses(t, frobber)\nfunc AssertCloses(t sktest.TestingT, c io.Closer) {\n\trequire.NoError(t, c.Close())\n}\n\n\/\/ Remove attempts to remove the given file and asserts that no error is returned.\nfunc Remove(t sktest.TestingT, fp string) {\n\trequire.NoError(t, os.Remove(fp))\n}\n\n\/\/ RemoveAll attempts to remove the given directory and asserts that no error is returned.\nfunc RemoveAll(t sktest.TestingT, fp string) {\n\trequire.NoError(t, os.RemoveAll(fp))\n}\n\n\/\/ TempDir is a wrapper for ioutil.TempDir. Returns the path to the directory and a cleanup\n\/\/ function to defer.\nfunc TempDir(t sktest.TestingT) (string, func()) {\n\td, err := ioutil.TempDir(\"\", \"testutils\")\n\trequire.NoError(t, err)\n\treturn d, func() {\n\t\tRemoveAll(t, d)\n\t}\n}\n\n\/\/ MarshalJSON encodes the given interface to a JSON string.\nfunc MarshalJSON(t sktest.TestingT, i interface{}) string {\n\tb, err := json.Marshal(i)\n\trequire.NoError(t, err)\n\treturn string(b)\n}\n\n\/\/ MarshalIndentJSON encodes the given interface to an indented JSON string.\nfunc MarshalIndentJSON(t sktest.TestingT, i interface{}) string {\n\tb, err := json.MarshalIndent(i, \"\", \" \")\n\trequire.NoError(t, err)\n\treturn string(b)\n}\n\n\/\/ AssertErrorContains asserts that the given error contains the given string.\nfunc AssertErrorContains(t sktest.TestingT, err error, substr string) {\n\trequire.NotNil(t, err)\n\trequire.True(t, strings.Contains(err.Error(), substr))\n}\n\n\/\/ Return the path to the root of the checkout.\nfunc GetRepoRoot(t sktest.TestingT) string {\n\troot, err := repo_root.Get()\n\trequire.NoError(t, err)\n\treturn root\n}\n\n\/\/ EventuallyConsistent tries a test repeatedly until either the test passes\n\/\/ or time expires, and is used when tests are written to expect\n\/\/ non-eventual consistency.\n\/\/\n\/\/ Use this function sparingly.\n\/\/\n\/\/ duration - The amount of time to keep trying.\n\/\/ f - The func to run the tests, should return TryAgainErr if\n\/\/ we should keep trying, otherwise TryUntil will return\n\/\/ with the err that f() returns.\nfunc EventuallyConsistent(duration time.Duration, f func() error) error {\n\tbegin := time.Now()\n\tfor time.Now().Sub(begin) < duration {\n\t\tif err := f(); err != TryAgainErr {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn fmt.Errorf(\"Failed to pass test in allotted time.\")\n}\n\n\/\/ MockTestingT implements sktest.TestingT by saving calls to Log and Fail. MockTestingT can\n\/\/ be used to test a test helper function. See also AssertFails.\n\/\/ The methods Helper, Name, Skip, SkipNow, Skipf, and Skipped are unimplemented.\n\/\/ This type is not safe for concurrent use.\ntype MockTestingT struct {\n\tLogMsgs []string\n\tIsFailed bool\n}\n\nfunc (m *MockTestingT) Error(args ...interface{}) {\n\tm.Log(args...)\n\tm.Fail()\n}\nfunc (m *MockTestingT) Errorf(format string, args ...interface{}) {\n\tm.Logf(format, args...)\n\tm.Fail()\n}\nfunc (m *MockTestingT) Fail() {\n\tm.IsFailed = true\n}\nfunc (m *MockTestingT) FailNow() {\n\tm.Fail()\n\truntime.Goexit()\n}\nfunc (m *MockTestingT) Failed() bool {\n\treturn m.IsFailed\n}\nfunc (m *MockTestingT) Fatal(args ...interface{}) {\n\tm.Log(args...)\n\tm.FailNow()\n}\nfunc (m *MockTestingT) Fatalf(format string, args ...interface{}) {\n\tm.Logf(format, args...)\n\tm.FailNow()\n}\nfunc (m *MockTestingT) Helper() {}\nfunc (m *MockTestingT) Log(args ...interface{}) {\n\tm.LogMsgs = append(m.LogMsgs, fmt.Sprintln(args...))\n}\nfunc (m *MockTestingT) Logf(format string, args ...interface{}) {\n\tm.LogMsgs = append(m.LogMsgs, fmt.Sprintf(format, args...))\n}\nfunc (m *MockTestingT) Name() string {\n\treturn \"\"\n}\nfunc (m *MockTestingT) Skip(args ...interface{}) {\n\tm.Log(args...)\n\tm.SkipNow()\n}\nfunc (m *MockTestingT) SkipNow() {\n\tpanic(\"SkipNow is not implemented.\")\n}\nfunc (m *MockTestingT) Skipf(format string, args ...interface{}) {\n\tm.Logf(format, args...)\n\tm.SkipNow()\n}\nfunc (m *MockTestingT) Skipped() bool {\n\treturn false\n}\n\n\/\/ Assert that MockTestingT implements the sktest.TestingT interface:\nvar _ sktest.TestingT = (*MockTestingT)(nil)\n\n\/\/ AssertFails runs testfn with a MockTestingT and asserts that the test fails and the first failure\n\/\/ logged matches the regexp. The sktest.TestingT passed to testfn is not safe for concurrent use.\nfunc AssertFails(parent sktest.TestingT, regexp string, testfn func(sktest.TestingT)) {\n\tmock := MockTestingT{}\n\twg := sync.WaitGroup{}\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\ttestfn(&mock)\n\t}()\n\twg.Wait()\n\trequire.True(parent, mock.Failed(), \"In AssertFails, the test function did not fail.\")\n\trequire.True(parent, len(mock.LogMsgs) > 0, \"In AssertFails, the test function did not produce any failure messages.\")\n\trequire.Regexp(parent, regexp, mock.LogMsgs[0])\n}\n\n\/\/ AnyContext can be used to match any Context objects e.g.\n\/\/ m.On(\"Foo\", testutils.AnyContext).Return(...)\n\/\/ This is better than trying to used mock.AnythingOfTypeArgument\n\/\/ because that only works for concrete types, which could be brittle\n\/\/ (e.g. a \"normal\" context is *context.emptyCtx, but one modified by\n\/\/ trace.StartSpan() could be a *context.valueCtx)\nvar AnyContext = mock.MatchedBy(func(c context.Context) bool {\n\t\/\/ if the passed in parameter does not implement the context.Context interface, the\n\t\/\/ wrapping MatchedBy will panic - so we can simply return true, since we\n\t\/\/ know it's a context.Context if execution flow makes it here.\n\treturn true\n})\n\n\/\/ ExecTemplate parses the given string as a text template, executes it using\n\/\/ the given data, and returns the result as a string.\nfunc ExecTemplate(t sktest.TestingT, tmpl string, data interface{}) string {\n\ttemplate, err := template.New(uuid.New().String()).Parse(tmpl)\n\trequire.NoError(t, err)\n\tvar buf bytes.Buffer\n\trequire.NoError(t, template.Execute(&buf, data))\n\treturn buf.String()\n}\n<commit_msg>[infra] Fix use of \"path\" where \"filepath\" was meant.<commit_after>\/\/ Package testutils contains convenience utilities for testing.\npackage testutils\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"text\/template\"\n\t\"time\"\n\n\t\"github.com\/google\/uuid\"\n\t\"github.com\/stretchr\/testify\/mock\"\n\t\"github.com\/stretchr\/testify\/require\"\n\t\"go.skia.org\/infra\/go\/repo_root\"\n\t\"go.skia.org\/infra\/go\/sktest\"\n)\n\nvar (\n\t\/\/ TryAgainErr use used by TryUntil.\n\tTryAgainErr = errors.New(\"Trying Again\")\n)\n\n\/\/ TestDataDir returns the path to the caller's testdata directory, which\n\/\/ is assumed to be \"<path to caller dir>\/testdata\".\nfunc TestDataDir() (string, error) {\n\t_, thisFile, _, ok := runtime.Caller(0)\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"Could not find test data dir: runtime.Caller() failed.\")\n\t}\n\tfor skip := 0; ; skip++ {\n\t\t_, file, _, ok := runtime.Caller(skip)\n\t\tif !ok {\n\t\t\treturn \"\", fmt.Errorf(\"Could not find test data dir: runtime.Caller() failed.\")\n\t\t}\n\t\tif file != thisFile {\n\t\t\treturn filepath.Join(filepath.Dir(file), \"testdata\"), nil\n\t\t}\n\t}\n}\n\nfunc readFile(filename string) (io.ReadCloser, error) {\n\tdir, err := TestDataDir()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not read %s: %v\", filename, err)\n\t}\n\tf, err := os.Open(filepath.Join(dir, filename))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not read %s: %v\", filename, err)\n\t}\n\treturn f, nil\n}\n\n\/\/ ReadFileBytes reads a file from the caller's testdata directory and returns its contents as a\n\/\/ slice of bytes.\nfunc ReadFileBytes(filename string) ([]byte, error) {\n\tf, err := readFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not read %s: %v\", filename, err)\n\t}\n\treturn b, nil\n}\n\n\/\/ ReadFile reads a file from the caller's testdata directory.\nfunc ReadFile(filename string) (string, error) {\n\tb, err := ReadFileBytes(filename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(b), nil\n}\n\n\/\/ MustGetReader reads a file from the caller's testdata directory and panics on\n\/\/ error.\nfunc MustGetReader(filename string) io.ReadCloser {\n\tr, err := readFile(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn r\n}\n\n\/\/ MustReadFile returns from the caller's testdata directory and panics on\n\/\/ error.\nfunc MustReadFile(filename string) string {\n\ts, err := ReadFile(filename)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn s\n}\n\n\/\/ ReadJsonFile reads a JSON file from the caller's testdata directory into the\n\/\/ given interface.\nfunc ReadJsonFile(filename string, dest interface{}) error {\n\tf, err := readFile(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn json.NewDecoder(f).Decode(dest)\n}\n\n\/\/ MustReadJsonFile reads a JSON file from the caller's testdata directory into\n\/\/ the given interface and panics on error.\nfunc MustReadJsonFile(filename string, dest interface{}) {\n\tif err := ReadJsonFile(filename, dest); err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/ WriteFile writes the given contents to the given file path, reporting any\n\/\/ error.\nfunc WriteFile(t sktest.TestingT, filename, contents string) {\n\trequire.NoErrorf(t, ioutil.WriteFile(filename, []byte(contents), os.ModePerm), \"Unable to write to file %s\", filename)\n}\n\n\/\/ AssertCloses takes an ioutil.Closer and asserts that it closes. E.g.:\n\/\/ frobber := NewFrobber()\n\/\/ defer testutils.AssertCloses(t, frobber)\nfunc AssertCloses(t sktest.TestingT, c io.Closer) {\n\trequire.NoError(t, c.Close())\n}\n\n\/\/ Remove attempts to remove the given file and asserts that no error is returned.\nfunc Remove(t sktest.TestingT, fp string) {\n\trequire.NoError(t, os.Remove(fp))\n}\n\n\/\/ RemoveAll attempts to remove the given directory and asserts that no error is returned.\nfunc RemoveAll(t sktest.TestingT, fp string) {\n\trequire.NoError(t, os.RemoveAll(fp))\n}\n\n\/\/ TempDir is a wrapper for ioutil.TempDir. Returns the path to the directory and a cleanup\n\/\/ function to defer.\nfunc TempDir(t sktest.TestingT) (string, func()) {\n\td, err := ioutil.TempDir(\"\", \"testutils\")\n\trequire.NoError(t, err)\n\treturn d, func() {\n\t\tRemoveAll(t, d)\n\t}\n}\n\n\/\/ MarshalJSON encodes the given interface to a JSON string.\nfunc MarshalJSON(t sktest.TestingT, i interface{}) string {\n\tb, err := json.Marshal(i)\n\trequire.NoError(t, err)\n\treturn string(b)\n}\n\n\/\/ MarshalIndentJSON encodes the given interface to an indented JSON string.\nfunc MarshalIndentJSON(t sktest.TestingT, i interface{}) string {\n\tb, err := json.MarshalIndent(i, \"\", \" \")\n\trequire.NoError(t, err)\n\treturn string(b)\n}\n\n\/\/ AssertErrorContains asserts that the given error contains the given string.\nfunc AssertErrorContains(t sktest.TestingT, err error, substr string) {\n\trequire.NotNil(t, err)\n\trequire.True(t, strings.Contains(err.Error(), substr))\n}\n\n\/\/ Return the path to the root of the checkout.\nfunc GetRepoRoot(t sktest.TestingT) string {\n\troot, err := repo_root.Get()\n\trequire.NoError(t, err)\n\treturn root\n}\n\n\/\/ EventuallyConsistent tries a test repeatedly until either the test passes\n\/\/ or time expires, and is used when tests are written to expect\n\/\/ non-eventual consistency.\n\/\/\n\/\/ Use this function sparingly.\n\/\/\n\/\/ duration - The amount of time to keep trying.\n\/\/ f - The func to run the tests, should return TryAgainErr if\n\/\/ we should keep trying, otherwise TryUntil will return\n\/\/ with the err that f() returns.\nfunc EventuallyConsistent(duration time.Duration, f func() error) error {\n\tbegin := time.Now()\n\tfor time.Now().Sub(begin) < duration {\n\t\tif err := f(); err != TryAgainErr {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn fmt.Errorf(\"Failed to pass test in allotted time.\")\n}\n\n\/\/ MockTestingT implements sktest.TestingT by saving calls to Log and Fail. MockTestingT can\n\/\/ be used to test a test helper function. See also AssertFails.\n\/\/ The methods Helper, Name, Skip, SkipNow, Skipf, and Skipped are unimplemented.\n\/\/ This type is not safe for concurrent use.\ntype MockTestingT struct {\n\tLogMsgs []string\n\tIsFailed bool\n}\n\nfunc (m *MockTestingT) Error(args ...interface{}) {\n\tm.Log(args...)\n\tm.Fail()\n}\nfunc (m *MockTestingT) Errorf(format string, args ...interface{}) {\n\tm.Logf(format, args...)\n\tm.Fail()\n}\nfunc (m *MockTestingT) Fail() {\n\tm.IsFailed = true\n}\nfunc (m *MockTestingT) FailNow() {\n\tm.Fail()\n\truntime.Goexit()\n}\nfunc (m *MockTestingT) Failed() bool {\n\treturn m.IsFailed\n}\nfunc (m *MockTestingT) Fatal(args ...interface{}) {\n\tm.Log(args...)\n\tm.FailNow()\n}\nfunc (m *MockTestingT) Fatalf(format string, args ...interface{}) {\n\tm.Logf(format, args...)\n\tm.FailNow()\n}\nfunc (m *MockTestingT) Helper() {}\nfunc (m *MockTestingT) Log(args ...interface{}) {\n\tm.LogMsgs = append(m.LogMsgs, fmt.Sprintln(args...))\n}\nfunc (m *MockTestingT) Logf(format string, args ...interface{}) {\n\tm.LogMsgs = append(m.LogMsgs, fmt.Sprintf(format, args...))\n}\nfunc (m *MockTestingT) Name() string {\n\treturn \"\"\n}\nfunc (m *MockTestingT) Skip(args ...interface{}) {\n\tm.Log(args...)\n\tm.SkipNow()\n}\nfunc (m *MockTestingT) SkipNow() {\n\tpanic(\"SkipNow is not implemented.\")\n}\nfunc (m *MockTestingT) Skipf(format string, args ...interface{}) {\n\tm.Logf(format, args...)\n\tm.SkipNow()\n}\nfunc (m *MockTestingT) Skipped() bool {\n\treturn false\n}\n\n\/\/ Assert that MockTestingT implements the sktest.TestingT interface:\nvar _ sktest.TestingT = (*MockTestingT)(nil)\n\n\/\/ AssertFails runs testfn with a MockTestingT and asserts that the test fails and the first failure\n\/\/ logged matches the regexp. The sktest.TestingT passed to testfn is not safe for concurrent use.\nfunc AssertFails(parent sktest.TestingT, regexp string, testfn func(sktest.TestingT)) {\n\tmock := MockTestingT{}\n\twg := sync.WaitGroup{}\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\ttestfn(&mock)\n\t}()\n\twg.Wait()\n\trequire.True(parent, mock.Failed(), \"In AssertFails, the test function did not fail.\")\n\trequire.True(parent, len(mock.LogMsgs) > 0, \"In AssertFails, the test function did not produce any failure messages.\")\n\trequire.Regexp(parent, regexp, mock.LogMsgs[0])\n}\n\n\/\/ AnyContext can be used to match any Context objects e.g.\n\/\/ m.On(\"Foo\", testutils.AnyContext).Return(...)\n\/\/ This is better than trying to used mock.AnythingOfTypeArgument\n\/\/ because that only works for concrete types, which could be brittle\n\/\/ (e.g. a \"normal\" context is *context.emptyCtx, but one modified by\n\/\/ trace.StartSpan() could be a *context.valueCtx)\nvar AnyContext = mock.MatchedBy(func(c context.Context) bool {\n\t\/\/ if the passed in parameter does not implement the context.Context interface, the\n\t\/\/ wrapping MatchedBy will panic - so we can simply return true, since we\n\t\/\/ know it's a context.Context if execution flow makes it here.\n\treturn true\n})\n\n\/\/ ExecTemplate parses the given string as a text template, executes it using\n\/\/ the given data, and returns the result as a string.\nfunc ExecTemplate(t sktest.TestingT, tmpl string, data interface{}) string {\n\ttemplate, err := template.New(uuid.New().String()).Parse(tmpl)\n\trequire.NoError(t, err)\n\tvar buf bytes.Buffer\n\trequire.NoError(t, template.Execute(&buf, data))\n\treturn buf.String()\n}\n<|endoftext|>"} {"text":"<commit_before>package codegen\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"unicode\"\n\n\t\"github.com\/goadesign\/goa\/design\"\n\t\"github.com\/goadesign\/goa\/version\"\n)\n\n\/\/ CheckVersion returns an error if the ver is empty, contains an incorrect value or\n\/\/ a version number that is not compatible with the version of this repo.\nfunc CheckVersion(ver string) error {\n\tcompat, err := version.Compatible(ver)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !compat {\n\t\treturn fmt.Errorf(\"version mismatch: using goagen %s to generate code that compiles with goa %s\",\n\t\t\tver, version.String())\n\t}\n\treturn nil\n}\n\n\/\/ CommandLine return the command used to run this process.\nfunc CommandLine() string {\n\t\/\/ We don't use the full path to the tool so that running goagen multiple times doesn't\n\t\/\/ end up creating different command line comments (because of the temporary directory it\n\t\/\/ runs in).\n\tvar param string\n\tif len(os.Args) > 1 {\n\t\targs := make([]string, len(os.Args)-1)\n\t\tgopaths := filepath.SplitList(os.Getenv(\"GOPATH\"))\n\t\tfor i, a := range os.Args[1:] {\n\t\t\tfor _, p := range gopaths {\n\t\t\t\tif strings.Contains(a, p) {\n\t\t\t\t\targs[i] = strings.Replace(a, p, \"$(GOPATH)\", -1)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif args[i] == \"\" {\n\t\t\t\targs[i] = a\n\t\t\t}\n\t\t}\n\t\tparam = strings.Join(args, \" \")\n\t}\n\tcmd := fmt.Sprintf(\"$ %s %s\", filepath.Base(os.Args[0]), param)\n\treturn strings.Replace(cmd, \" --\", \"\\n\\t--\", -1)\n}\n\n\/\/ Comment produces line comments by concatenating the given strings and producing 80 characters\n\/\/ long lines starting with \"\/\/\"\nfunc Comment(elems ...string) string {\n\tvar lines []string\n\tfor _, e := range elems {\n\t\tlines = append(lines, strings.Split(e, \"\\n\")...)\n\t}\n\tvar trimmed = make([]string, len(lines))\n\tfor i, l := range lines {\n\t\ttrimmed[i] = strings.TrimLeft(l, \" \\t\")\n\t}\n\tt := strings.Join(trimmed, \"\\n\")\n\n\treturn Indent(t, \"\/\/ \")\n}\n\n\/\/ Indent inserts prefix at the beginning of each non-empty line of s. The\n\/\/ end-of-line marker is NL.\nfunc Indent(s, prefix string) string {\n\treturn string(IndentBytes([]byte(s), []byte(prefix)))\n}\n\n\/\/ IndentBytes inserts prefix at the beginning of each non-empty line of b.\n\/\/ The end-of-line marker is NL.\nfunc IndentBytes(b, prefix []byte) []byte {\n\tvar res []byte\n\tbol := true\n\tfor _, c := range b {\n\t\tif bol && c != '\\n' {\n\t\t\tres = append(res, prefix...)\n\t\t}\n\t\tres = append(res, c)\n\t\tbol = c == '\\n'\n\t}\n\treturn res\n}\n\n\/\/ Tabs returns a string made of depth tab characters.\nfunc Tabs(depth int) string {\n\tvar tabs string\n\tfor i := 0; i < depth; i++ {\n\t\ttabs += \"\\t\"\n\t}\n\t\/\/\treturn fmt.Sprintf(\"%d%s\", depth, tabs)\n\treturn tabs\n}\n\n\/\/ Add adds two integers and returns the sum of the two.\nfunc Add(a, b int) int { return a + b }\n\n\/\/ CanonicalTemplate returns the resource URI template as a format string suitable for use in the\n\/\/ fmt.Printf function family.\nfunc CanonicalTemplate(r *design.ResourceDefinition) string {\n\treturn design.WildcardRegex.ReplaceAllLiteralString(r.URITemplate(), \"\/%v\")\n}\n\n\/\/ CanonicalParams returns the list of parameter names needed to build the canonical href to the\n\/\/ resource. It returns nil if the resource does not have a canonical action.\nfunc CanonicalParams(r *design.ResourceDefinition) []string {\n\tvar params []string\n\tif ca := r.CanonicalAction(); ca != nil {\n\t\tif len(ca.Routes) > 0 {\n\t\t\tparams = ca.Routes[0].Params()\n\t\t}\n\t\tfor i, p := range params {\n\t\t\tparams[i] = Goify(p, false)\n\t\t}\n\t}\n\treturn params\n}\n\n\/\/ Casing exceptions\nvar toLower = map[string]string{\"OAuth\": \"oauth\"}\n\n\/\/ SnakeCase produces the snake_case version of the given CamelCase string.\nfunc SnakeCase(name string) string {\n\tfor u, l := range toLower {\n\t\tname = strings.Replace(name, u, l, -1)\n\t}\n\tvar b bytes.Buffer\n\tvar lastUnderscore bool\n\tln := len(name)\n\tif ln == 0 {\n\t\treturn \"\"\n\t}\n\tb.WriteRune(unicode.ToLower(rune(name[0])))\n\tfor i := 1; i < ln; i++ {\n\t\tr := rune(name[i])\n\t\tnextIsLower := false\n\t\tif i < ln-1 {\n\t\t\tn := rune(name[i+1])\n\t\t\tnextIsLower = unicode.IsLower(n) && unicode.IsLetter(n)\n\t\t}\n\t\tif unicode.IsUpper(r) {\n\t\t\tif !lastUnderscore && nextIsLower {\n\t\t\t\tb.WriteRune('_')\n\t\t\t\tlastUnderscore = true\n\t\t\t}\n\t\t\tb.WriteRune(unicode.ToLower(r))\n\t\t} else {\n\t\t\tb.WriteRune(r)\n\t\t\tlastUnderscore = false\n\t\t}\n\t}\n\treturn b.String()\n}\n<commit_msg>Remove .exe suffix from goagen output (#589)<commit_after>package codegen\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"unicode\"\n\n\t\"github.com\/goadesign\/goa\/design\"\n\t\"github.com\/goadesign\/goa\/version\"\n)\n\n\/\/ CheckVersion returns an error if the ver is empty, contains an incorrect value or\n\/\/ a version number that is not compatible with the version of this repo.\nfunc CheckVersion(ver string) error {\n\tcompat, err := version.Compatible(ver)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !compat {\n\t\treturn fmt.Errorf(\"version mismatch: using goagen %s to generate code that compiles with goa %s\",\n\t\t\tver, version.String())\n\t}\n\treturn nil\n}\n\n\/\/ CommandLine return the command used to run this process.\nfunc CommandLine() string {\n\t\/\/ We don't use the full path to the tool so that running goagen multiple times doesn't\n\t\/\/ end up creating different command line comments (because of the temporary directory it\n\t\/\/ runs in).\n\tvar param string\n\n\tif len(os.Args) > 1 {\n\t\targs := make([]string, len(os.Args)-1)\n\t\tgopaths := filepath.SplitList(os.Getenv(\"GOPATH\"))\n\t\tfor i, a := range os.Args[1:] {\n\t\t\tfor _, p := range gopaths {\n\t\t\t\tif strings.Contains(a, p) {\n\t\t\t\t\targs[i] = strings.Replace(a, p, \"$(GOPATH)\", -1)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif args[i] == \"\" {\n\t\t\t\targs[i] = a\n\t\t\t}\n\t\t}\n\t\tparam = strings.Join(args, \" \")\n\t}\n\trawcmd := filepath.Base(os.Args[0])\n\t\/\/ Remove possible .exe suffix to not create different ouptut just because\n\t\/\/ you ran goagen on Windows.\n\trawcmd = strings.TrimSuffix(rawcmd, \".exe\")\n\n\tcmd := fmt.Sprintf(\"$ %s %s\", rawcmd, param)\n\treturn strings.Replace(cmd, \" --\", \"\\n\\t--\", -1)\n}\n\n\/\/ Comment produces line comments by concatenating the given strings and producing 80 characters\n\/\/ long lines starting with \"\/\/\"\nfunc Comment(elems ...string) string {\n\tvar lines []string\n\tfor _, e := range elems {\n\t\tlines = append(lines, strings.Split(e, \"\\n\")...)\n\t}\n\tvar trimmed = make([]string, len(lines))\n\tfor i, l := range lines {\n\t\ttrimmed[i] = strings.TrimLeft(l, \" \\t\")\n\t}\n\tt := strings.Join(trimmed, \"\\n\")\n\n\treturn Indent(t, \"\/\/ \")\n}\n\n\/\/ Indent inserts prefix at the beginning of each non-empty line of s. The\n\/\/ end-of-line marker is NL.\nfunc Indent(s, prefix string) string {\n\treturn string(IndentBytes([]byte(s), []byte(prefix)))\n}\n\n\/\/ IndentBytes inserts prefix at the beginning of each non-empty line of b.\n\/\/ The end-of-line marker is NL.\nfunc IndentBytes(b, prefix []byte) []byte {\n\tvar res []byte\n\tbol := true\n\tfor _, c := range b {\n\t\tif bol && c != '\\n' {\n\t\t\tres = append(res, prefix...)\n\t\t}\n\t\tres = append(res, c)\n\t\tbol = c == '\\n'\n\t}\n\treturn res\n}\n\n\/\/ Tabs returns a string made of depth tab characters.\nfunc Tabs(depth int) string {\n\tvar tabs string\n\tfor i := 0; i < depth; i++ {\n\t\ttabs += \"\\t\"\n\t}\n\t\/\/\treturn fmt.Sprintf(\"%d%s\", depth, tabs)\n\treturn tabs\n}\n\n\/\/ Add adds two integers and returns the sum of the two.\nfunc Add(a, b int) int { return a + b }\n\n\/\/ CanonicalTemplate returns the resource URI template as a format string suitable for use in the\n\/\/ fmt.Printf function family.\nfunc CanonicalTemplate(r *design.ResourceDefinition) string {\n\treturn design.WildcardRegex.ReplaceAllLiteralString(r.URITemplate(), \"\/%v\")\n}\n\n\/\/ CanonicalParams returns the list of parameter names needed to build the canonical href to the\n\/\/ resource. It returns nil if the resource does not have a canonical action.\nfunc CanonicalParams(r *design.ResourceDefinition) []string {\n\tvar params []string\n\tif ca := r.CanonicalAction(); ca != nil {\n\t\tif len(ca.Routes) > 0 {\n\t\t\tparams = ca.Routes[0].Params()\n\t\t}\n\t\tfor i, p := range params {\n\t\t\tparams[i] = Goify(p, false)\n\t\t}\n\t}\n\treturn params\n}\n\n\/\/ Casing exceptions\nvar toLower = map[string]string{\"OAuth\": \"oauth\"}\n\n\/\/ SnakeCase produces the snake_case version of the given CamelCase string.\nfunc SnakeCase(name string) string {\n\tfor u, l := range toLower {\n\t\tname = strings.Replace(name, u, l, -1)\n\t}\n\tvar b bytes.Buffer\n\tvar lastUnderscore bool\n\tln := len(name)\n\tif ln == 0 {\n\t\treturn \"\"\n\t}\n\tb.WriteRune(unicode.ToLower(rune(name[0])))\n\tfor i := 1; i < ln; i++ {\n\t\tr := rune(name[i])\n\t\tnextIsLower := false\n\t\tif i < ln-1 {\n\t\t\tn := rune(name[i+1])\n\t\t\tnextIsLower = unicode.IsLower(n) && unicode.IsLetter(n)\n\t\t}\n\t\tif unicode.IsUpper(r) {\n\t\t\tif !lastUnderscore && nextIsLower {\n\t\t\t\tb.WriteRune('_')\n\t\t\t\tlastUnderscore = true\n\t\t\t}\n\t\t\tb.WriteRune(unicode.ToLower(r))\n\t\t} else {\n\t\t\tb.WriteRune(r)\n\t\t\tlastUnderscore = false\n\t\t}\n\t}\n\treturn b.String()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"google.golang.org\/grpc\"\n\n\tpb \"github.com\/brotherlogic\/cardserver\/card\"\n)\n\n\/\/ DoRegister Registers this server\nfunc (s *Server) DoRegister(server *grpc.Server) {\n\tpb.RegisterCardServiceServer(server, s)\n}\n\n\/\/ ReportHealth Determines if the server is healthy\nfunc (s *Server) ReportHealth() bool {\n\treturn true\n}\n\n\/\/ SaveCardList stores the cardlist\nfunc (s *Server) SaveCardList() {\n\ts.Save(\"github.com\/brotherlogic\/cardserver\/cards\", s.cards)\n}\n\nfunc main() {\n\tserver := InitServer()\n\tserver.PrepServer()\n\tserver.RegisterServer(\"cardserver\", false)\n\tserver.Serve()\n}\n<commit_msg>Added client save<commit_after>package main\n\nimport (\n\t\"github.com\/brotherlogic\/keystore\/client\"\n\t\"google.golang.org\/grpc\"\n\n\tpb \"github.com\/brotherlogic\/cardserver\/card\"\n)\n\n\/\/ DoRegister Registers this server\nfunc (s *Server) DoRegister(server *grpc.Server) {\n\tpb.RegisterCardServiceServer(server, s)\n}\n\n\/\/ ReportHealth Determines if the server is healthy\nfunc (s *Server) ReportHealth() bool {\n\treturn true\n}\n\n\/\/ SaveCardList stores the cardlist\nfunc (s *Server) SaveCardList() {\n\ts.Save(\"github.com\/brotherlogic\/cardserver\/cards\", s.cards)\n}\n\nfunc main() {\n\tserver := InitServer()\n\tserver.GoServer.KSclient = *keystoreclient.GetClient()\n\tserver.PrepServer()\n\tserver.RegisterServer(\"cardserver\", false)\n\tserver.Serve()\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\n\/\/ NetworkZonesPost represents the fields of a new LXD network zone\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: network_dns\ntype NetworkZonesPost struct {\n\tNetworkZonePut `yaml:\",inline\"`\n\n\t\/\/ The name of the zone (DNS domain name)\n\t\/\/ Example: example.net\n\tName string `json:\"name\" yaml:\"name\"`\n}\n\n\/\/ NetworkZonePut represents the modifiable fields of a LXD network zone\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: network_dns\ntype NetworkZonePut struct {\n\t\/\/ Description of the network zone\n\t\/\/ Example: Internal domain\n\tDescription string `json:\"description\" yaml:\"description\"`\n\n\t\/\/ Zone configuration map (refer to doc\/network-zones.md)\n\t\/\/ Example: {\"user.mykey\": \"foo\"}\n\tConfig map[string]string `json:\"config\" yaml:\"config\"`\n}\n\n\/\/ NetworkZone represents a network zone (DNS).\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: network_dns\ntype NetworkZone struct {\n\tNetworkZonePut `yaml:\",inline\"`\n\n\t\/\/ The name of the zone (DNS domain name)\n\t\/\/ Example: example.net\n\tName string `json:\"name\" yaml:\"name\"`\n\n\t\/\/ List of URLs of objects using this network zone\n\t\/\/ Read only: true\n\t\/\/ Example: [\"\/1.0\/networks\/foo\", \"\/1.0\/networks\/bar\"]\n\tUsedBy []string `json:\"used_by\" yaml:\"used_by\"` \/\/ Resources that use the zone.\n}\n\n\/\/ Writable converts a full NetworkZone struct into a NetworkZonePut struct (filters read-only fields).\nfunc (f *NetworkZone) Writable() NetworkZonePut {\n\treturn f.NetworkZonePut\n}\n<commit_msg>shared\/api: Add network zone record structs<commit_after>package api\n\n\/\/ NetworkZonesPost represents the fields of a new LXD network zone\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: network_dns\ntype NetworkZonesPost struct {\n\tNetworkZonePut `yaml:\",inline\"`\n\n\t\/\/ The name of the zone (DNS domain name)\n\t\/\/ Example: example.net\n\tName string `json:\"name\" yaml:\"name\"`\n}\n\n\/\/ NetworkZonePut represents the modifiable fields of a LXD network zone\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: network_dns\ntype NetworkZonePut struct {\n\t\/\/ Description of the network zone\n\t\/\/ Example: Internal domain\n\tDescription string `json:\"description\" yaml:\"description\"`\n\n\t\/\/ Zone configuration map (refer to doc\/network-zones.md)\n\t\/\/ Example: {\"user.mykey\": \"foo\"}\n\tConfig map[string]string `json:\"config\" yaml:\"config\"`\n}\n\n\/\/ NetworkZone represents a network zone (DNS).\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: network_dns\ntype NetworkZone struct {\n\tNetworkZonePut `yaml:\",inline\"`\n\n\t\/\/ The name of the zone (DNS domain name)\n\t\/\/ Example: example.net\n\tName string `json:\"name\" yaml:\"name\"`\n\n\t\/\/ List of URLs of objects using this network zone\n\t\/\/ Read only: true\n\t\/\/ Example: [\"\/1.0\/networks\/foo\", \"\/1.0\/networks\/bar\"]\n\tUsedBy []string `json:\"used_by\" yaml:\"used_by\"` \/\/ Resources that use the zone.\n}\n\n\/\/ Writable converts a full NetworkZone struct into a NetworkZonePut struct (filters read-only fields).\nfunc (f *NetworkZone) Writable() NetworkZonePut {\n\treturn f.NetworkZonePut\n}\n\n\/\/ NetworkZoneRecordsPost represents the fields of a new LXD network zone record\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: network_dns_records\ntype NetworkZoneRecordsPost struct {\n\tNetworkZoneRecordPut `yaml:\",inline\"`\n\n\t\/\/ The record name in the zone\n\t\/\/ Example: @\n\tName string `json:\"name\" yaml:\"name\"`\n}\n\n\/\/ NetworkZoneRecordPut represents the modifiable fields of a LXD network zone record\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: network_dns_records\ntype NetworkZoneRecordPut struct {\n\t\/\/ Description of the record\n\t\/\/ Example: SPF record\n\tDescription string `json:\"description\" yaml:\"description\"`\n\n\t\/\/ Entries in the record\n\tEntries []NetworkZoneRecordEntry `json:\"entries\" yaml:\"entries\"`\n\n\t\/\/ Advanced configuration for the record\n\t\/\/ Example: {\"user.mykey\": \"foo\"}\n\tConfig map[string]string `json:\"config\" yaml:\"config\"`\n}\n\n\/\/ NetworkZoneRecordEntry represents the fields in a record entry\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: network_dns_records\ntype NetworkZoneRecordEntry struct {\n\t\/\/ Type of DNS entry\n\t\/\/ Example: TXT\n\tType string `json:\"type\" yaml:\"type\"`\n\n\t\/\/ TTL for the entry\n\t\/\/ Example: 3600\n\tTTL uint64 `json:\"ttl,omitempty\" yaml:\"ttl,omitempty\"`\n\n\t\/\/ Value for the record\n\t\/\/ Example: v=spf1 mx ~all\n\tValue string `json:\"value\" yaml:\"value\"`\n}\n\n\/\/ NetworkZoneRecord represents a network zone (DNS) record.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: network_dns_records\ntype NetworkZoneRecord struct {\n\tNetworkZoneRecordPut `yaml:\",inline\"`\n\n\t\/\/ The name of the record\n\t\/\/ Example: @\n\tName string `json:\"name\" yaml:\"name\"`\n}\n\n\/\/ Writable converts a full NetworkZoneRecord struct into a NetworkZoneRecordPut struct (filters read-only fields).\nfunc (f *NetworkZoneRecord) Writable() NetworkZoneRecordPut {\n\treturn f.NetworkZoneRecordPut\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build medium\n\npackage image\n\nimport (\n\t\"context\"\n\t\"os\"\n\t\"testing\"\n\n\t\"code.cloudfoundry.org\/bytefmt\"\n\t\"github.com\/google\/go-cmp\/cmp\"\n\t\"github.com\/n0stack\/n0stack\/n0core\/pkg\/api\/provisioning\"\n\t\"github.com\/n0stack\/n0stack\/n0core\/pkg\/datastore\/memory\"\n\t\"github.com\/n0stack\/n0stack\/n0proto.go\/deployment\/v0\"\n\t\"github.com\/n0stack\/n0stack\/n0proto.go\/provisioning\/v0\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/codes\"\n)\n\nfunc getTestBlockStorageAPI() (pprovisioning.BlockStorageServiceClient, *grpc.ClientConn, error) {\n\tendpoint := \"\"\n\tif value, ok := os.LookupEnv(\"BLOCK_STORAGE_API_ENDPOINT\"); ok {\n\t\tendpoint = value\n\t} else {\n\t\tendpoint = \"localhost:20180\"\n\t}\n\n\tconn, err := grpc.Dial(endpoint, grpc.WithInsecure())\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn pprovisioning.NewBlockStorageServiceClient(conn), conn, nil\n}\n\nfunc TestEmptyImage(t *testing.T) {\n\tm := memory.NewMemoryDatastore()\n\tbsa, bsconn, err := getTestBlockStorageAPI()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to connect block storage api: err='%s'\", err.Error())\n\t}\n\tdefer bsconn.Close()\n\n\tia := CreateImageAPI(m, bsa)\n\n\tlistRes, err := ia.ListImages(context.Background(), &pdeployment.ListImagesRequest{})\n\tif err != nil && grpc.Code(err) != codes.NotFound {\n\t\tt.Errorf(\"ListImages got error, not NotFound: err='%s'\", err.Error())\n\t}\n\tif listRes != nil {\n\t\tt.Errorf(\"ListImages do not return nil: res='%s'\", listRes)\n\t}\n\n\tgetRes, err := ia.GetImage(context.Background(), &pdeployment.GetImageRequest{})\n\tif err != nil && grpc.Code(err) != codes.NotFound {\n\t\tt.Errorf(\"GetImage got error, not NotFound: err='%s'\", err.Error())\n\t}\n\tif getRes != nil {\n\t\tt.Errorf(\"GetImage do not return nil: res='%s'\", getRes)\n\t}\n}\n\nfunc TestApplyImage(t *testing.T) {\n\tm := memory.NewMemoryDatastore()\n\tbsa, bsconn, err := getTestBlockStorageAPI()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to connect block storage api: err='%s'\", err.Error())\n\t}\n\tdefer bsconn.Close()\n\n\tia := CreateImageAPI(m, bsa)\n\n\ti := &pdeployment.Image{\n\t\tName: \"test-network\",\n\t\tVersion: 1,\n\t}\n\n\tapplyRes, err := ia.ApplyImage(context.Background(), &pdeployment.ApplyImageRequest{\n\t\tName: i.Name,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"ApplyImage got error: err='%s'\", err.Error())\n\t}\n\t\/\/ diffが取れないので\n\tapplyRes.XXX_sizecache = 0\n\tif diff := cmp.Diff(i, applyRes); diff != \"\" {\n\t\tt.Fatalf(\"ApplyImage response is wrong: diff=(-want +got)\\n%s\", diff)\n\t}\n\n\tlistRes, err := ia.ListImages(context.Background(), &pdeployment.ListImagesRequest{})\n\tif err != nil {\n\t\tt.Errorf(\"ListImages got error: err='%s'\", err.Error())\n\t}\n\tif len(listRes.Images) != 1 {\n\t\tt.Errorf(\"ListImages response is wrong: have='%d', want='%d'\", len(listRes.Images), 1)\n\t}\n\n\tgetRes, err := ia.GetImage(context.Background(), &pdeployment.GetImageRequest{Name: i.Name})\n\tif err != nil {\n\t\tt.Errorf(\"GetImage got error: err='%s'\", err.Error())\n\t}\n\tif diff := cmp.Diff(i, getRes); diff != \"\" {\n\t\tt.Errorf(\"GetImage response is wrong: diff=(-want +got)\\n%s\", diff)\n\t}\n\n\tif _, err := ia.DeleteImage(context.Background(), &pdeployment.DeleteImageRequest{Name: i.Name}); err != nil {\n\t\tt.Errorf(\"DeleteImage got error: err='%s'\", err.Error())\n\t}\n}\n\nfunc TestImageAboutRegister(t *testing.T) {\n\tm := memory.NewMemoryDatastore()\n\tbsa, bsconn, err := getTestBlockStorageAPI()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to connect block storage api: err='%s'\", err.Error())\n\t}\n\tdefer bsconn.Close()\n\n\tia := CreateImageAPI(m, bsa)\n\n\ti := &pdeployment.Image{\n\t\tName: \"test-network\",\n\t\tVersion: 1,\n\t\tRegisteredBlockStorages: []*pdeployment.Image_RegisteredBlockStorage{\n\t\t\t{\n\t\t\t\tBlockStorageName: \"test-image\",\n\t\t\t},\n\t\t},\n\t\tTags: map[string]string{\n\t\t\t\"test-tag\": \"test-image\",\n\t\t},\n\t}\n\t_, err = ia.ApplyImage(context.Background(), &pdeployment.ApplyImageRequest{\n\t\tName: i.Name,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"ApplyImage got error: err='%s'\", err.Error())\n\t}\n\n\tbs := &pprovisioning.BlockStorage{\n\t\tName: \"test-image\",\n\t\tAnnotations: map[string]string{\n\t\t\tprovisioning.AnnotationRequestNodeName: \"mock-node\",\n\t\t},\n\t\tRequestBytes: 10 * bytefmt.MEGABYTE,\n\t\tLimitBytes: 1 * bytefmt.GIGABYTE,\n\t}\n\t_, err = bsa.CreateBlockStorage(context.Background(), &pprovisioning.CreateBlockStorageRequest{\n\t\tName: bs.Name,\n\t\tAnnotations: bs.Annotations,\n\t\tRequestBytes: bs.RequestBytes,\n\t\tLimitBytes: bs.LimitBytes,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create test-image on BlockStorageAPI got error: err='%s'\", err.Error())\n\t}\n\tdefer bsa.DeleteBlockStorage(context.Background(), &pprovisioning.DeleteBlockStorageRequest{Name: bs.Name})\n\tdefer bsa.SetAvailableBlockStorage(context.Background(), &pprovisioning.SetAvailableBlockStorageRequest{Name: bs.Name})\n\n\tregRes, err := ia.RegisterBlockStorage(context.Background(), &pdeployment.RegisterBlockStorageRequest{\n\t\tImageName: i.Name,\n\t\tBlockStorageName: bs.Name,\n\t\tTags: []string{\n\t\t\t\"test-tag\",\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"RegisterBlockStorage got error: err='%s'\", err.Error())\n\t}\n\tif len(regRes.RegisteredBlockStorages) != 1 {\n\t\tt.Errorf(\"RegisterBlockStorage response of len(RegisteredBlockStorages) is wrong: have=%d, want=%d\", len(regRes.RegisteredBlockStorages), 1)\n\t}\n\tregRes.XXX_sizecache = 0\n\tregRes.RegisteredBlockStorages[0].XXX_sizecache = 0\n\tregRes.RegisteredBlockStorages[0].RegisteredAt = nil\n\tif diff := cmp.Diff(i, regRes); diff != \"\" {\n\t\tt.Errorf(\"RegisterBlockStorage response is wrong: diff=(-want +got)\\n%s\", diff)\n\t}\n\n\trbs, err := bsa.GetBlockStorage(context.Background(), &pprovisioning.GetBlockStorageRequest{Name: bs.Name})\n\tif err != nil {\n\t\tt.Errorf(\"Failed to get test-image on BlockStorageAPI got error: err='%s'\", err.Error())\n\t}\n\tif rbs.State != pprovisioning.BlockStorage_PROTECTED {\n\t\tt.Errorf(\"BlockStorage 'test-image' state is wrong: have=%+v, want=%+v\", rbs.State, pprovisioning.BlockStorage_PROTECTED)\n\t}\n\n\tgenRes, err := ia.GenerateBlockStorage(context.Background(), &pdeployment.GenerateBlockStorageRequest{\n\t\tImageName: i.Name,\n\t\tTag: \"test-tag\",\n\t\tBlockStorageName: \"generated-image\",\n\t\t\/\/ Annotations: bs.Annotations,\n\t\tRequestBytes: 10 * bytefmt.MEGABYTE,\n\t\tLimitBytes: 10 * bytefmt.GIGABYTE,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"Failed to generate BlockStorageAPI got error: err='%s'\", err.Error())\n\t}\n\tdefer bsa.DeleteBlockStorage(context.Background(), &pprovisioning.DeleteBlockStorageRequest{Name: genRes.Name})\n\n\tunregRes, err := ia.UnregisterBlockStorage(context.Background(), &pdeployment.UnregisterBlockStorageRequest{\n\t\tImageName: i.Name,\n\t\tBlockStorageName: bs.Name,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"RegisterBlockStorage got error: err='%s'\", err.Error())\n\t}\n\tif len(unregRes.RegisteredBlockStorages) != 0 {\n\t\tt.Errorf(\"RegisterBlockStorage response of len(RegisteredBlockStorages) is wrong: have=%d, want=%d\", len(unregRes.RegisteredBlockStorages), 0)\n\t}\n\tif len(unregRes.Tags) != 0 {\n\t\tt.Errorf(\"RegisterBlockStorage response of len(Tags) is wrong: have=%d, want=%d\", len(unregRes.Tags), 0)\n\t}\n\n\trbs, err = bsa.GetBlockStorage(context.Background(), &pprovisioning.GetBlockStorageRequest{Name: bs.Name})\n\tif err != nil {\n\t\tt.Errorf(\"Failed to get test-image on BlockStorageAPI got error: err='%s'\", err.Error())\n\t}\n\tif rbs.State != pprovisioning.BlockStorage_AVAILABLE {\n\t\tt.Errorf(\"BlockStorage 'test-image' state is wrong: have=%+v, want=%+v\", rbs.State, pprovisioning.BlockStorage_AVAILABLE)\n\t}\n}\n\nfunc TestImageAboutTag(t *testing.T) {\n\tm := memory.NewMemoryDatastore()\n\tbsa, bsconn, err := getTestBlockStorageAPI()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to connect block storage api: err='%s'\", err.Error())\n\t}\n\tdefer bsconn.Close()\n\n\tia := CreateImageAPI(m, bsa)\n\n\ti := &pdeployment.Image{\n\t\tName: \"test-network\",\n\t\tVersion: 1,\n\t\tRegisteredBlockStorages: []*pdeployment.Image_RegisteredBlockStorage{\n\t\t\t{\n\t\t\t\tBlockStorageName: \"test-image\",\n\t\t\t},\n\t\t},\n\t\tTags: map[string]string{\n\t\t\t\"test-tag\": \"test-image\",\n\t\t},\n\t}\n\t_, err = ia.ApplyImage(context.Background(), &pdeployment.ApplyImageRequest{\n\t\tName: i.Name,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"ApplyImage got error: err='%s'\", err.Error())\n\t}\n\n\tbs := &pprovisioning.BlockStorage{\n\t\tName: \"test-image\",\n\t\tAnnotations: map[string]string{\n\t\t\tprovisioning.AnnotationRequestNodeName: \"mock-node\",\n\t\t},\n\t\tRequestBytes: 10 * bytefmt.MEGABYTE,\n\t\tLimitBytes: 1 * bytefmt.GIGABYTE,\n\t}\n\t_, err = bsa.CreateBlockStorage(context.Background(), &pprovisioning.CreateBlockStorageRequest{\n\t\tName: bs.Name,\n\t\tAnnotations: bs.Annotations,\n\t\tRequestBytes: bs.RequestBytes,\n\t\tLimitBytes: bs.LimitBytes,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create test-image on BlockStorageAPI got error: err='%s'\", err.Error())\n\t}\n\tdefer bsa.DeleteBlockStorage(context.Background(), &pprovisioning.DeleteBlockStorageRequest{Name: bs.Name})\n\tdefer bsa.SetAvailableBlockStorage(context.Background(), &pprovisioning.SetAvailableBlockStorageRequest{Name: bs.Name})\n\n\t_, err = ia.RegisterBlockStorage(context.Background(), &pdeployment.RegisterBlockStorageRequest{\n\t\tImageName: i.Name,\n\t\tBlockStorageName: bs.Name,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"RegisterBlockStorage got error: err='%s'\", err.Error())\n\t}\n\n\ttagRes, err := ia.TagImage(context.Background(), &pdeployment.TagImageRequest{\n\t\tName: i.Name,\n\t\tBlockStorageName: bs.Name,\n\t\tTags: []string{\n\t\t\t\"test-tag\",\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"TagBlockStorage got error: err='%s'\", err.Error())\n\t}\n\tif len(tagRes.Tags) != 1 {\n\t\tt.Errorf(\"TagBlockStorage response of len(Tags) is wrong: have=%d, want=%d\", len(tagRes.Tags), 0)\n\t}\n\ttagRes.XXX_sizecache = 0\n\ttagRes.RegisteredBlockStorages[0].XXX_sizecache = 0\n\ttagRes.RegisteredBlockStorages[0].RegisteredAt = nil\n\tif diff := cmp.Diff(i, tagRes); diff != \"\" {\n\t\tt.Errorf(\"TagBlockStorage response is wrong: diff=(-want +got)\\n%s\", diff)\n\t}\n\n\tuntagRes, err := ia.UntagImage(context.Background(), &pdeployment.UntagImageRequest{\n\t\tName: i.Name,\n\t\tTag: \"test-tag\",\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"UntagImage got error: err='%s'\", err.Error())\n\t}\n\tif len(untagRes.Tags) != 0 {\n\t\tt.Errorf(\"UntagImage response of len(Tags) is wrong: have=%d, want=%d\", len(untagRes.Tags), 0)\n\t}\n}\n<commit_msg>update image test about 6e6ece58193c35f3266b3ca96a3ebebfd7230ef5<commit_after>\/\/ +build medium\n\npackage image\n\nimport (\n\t\"context\"\n\t\"os\"\n\t\"testing\"\n\n\t\"code.cloudfoundry.org\/bytefmt\"\n\t\"github.com\/google\/go-cmp\/cmp\"\n\t\"github.com\/n0stack\/n0stack\/n0core\/pkg\/api\/provisioning\"\n\t\"github.com\/n0stack\/n0stack\/n0core\/pkg\/datastore\/memory\"\n\t\"github.com\/n0stack\/n0stack\/n0proto.go\/deployment\/v0\"\n\t\"github.com\/n0stack\/n0stack\/n0proto.go\/provisioning\/v0\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/codes\"\n)\n\nfunc getTestBlockStorageAPI() (pprovisioning.BlockStorageServiceClient, *grpc.ClientConn, error) {\n\tendpoint := \"\"\n\tif value, ok := os.LookupEnv(\"BLOCK_STORAGE_API_ENDPOINT\"); ok {\n\t\tendpoint = value\n\t} else {\n\t\tendpoint = \"localhost:20180\"\n\t}\n\n\tconn, err := grpc.Dial(endpoint, grpc.WithInsecure())\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn pprovisioning.NewBlockStorageServiceClient(conn), conn, nil\n}\n\nfunc TestEmptyImage(t *testing.T) {\n\tm := memory.NewMemoryDatastore()\n\tbsa, bsconn, err := getTestBlockStorageAPI()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to connect block storage api: err='%s'\", err.Error())\n\t}\n\tdefer bsconn.Close()\n\n\tia := CreateImageAPI(m, bsa)\n\n\tlistRes, err := ia.ListImages(context.Background(), &pdeployment.ListImagesRequest{})\n\tif err != nil && grpc.Code(err) != codes.NotFound {\n\t\tt.Errorf(\"ListImages got error, not NotFound: err='%s'\", err.Error())\n\t}\n\tif listRes != nil {\n\t\tt.Errorf(\"ListImages do not return nil: res='%s'\", listRes)\n\t}\n\n\tgetRes, err := ia.GetImage(context.Background(), &pdeployment.GetImageRequest{})\n\tif err != nil && grpc.Code(err) != codes.NotFound {\n\t\tt.Errorf(\"GetImage got error, not NotFound: err='%s'\", err.Error())\n\t}\n\tif getRes != nil {\n\t\tt.Errorf(\"GetImage do not return nil: res='%s'\", getRes)\n\t}\n}\n\nfunc TestApplyImage(t *testing.T) {\n\tm := memory.NewMemoryDatastore()\n\tbsa, bsconn, err := getTestBlockStorageAPI()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to connect block storage api: err='%s'\", err.Error())\n\t}\n\tdefer bsconn.Close()\n\n\tia := CreateImageAPI(m, bsa)\n\n\ti := &pdeployment.Image{\n\t\tName: \"test-network\",\n\t\tVersion: 1,\n\t}\n\n\tapplyRes, err := ia.ApplyImage(context.Background(), &pdeployment.ApplyImageRequest{\n\t\tName: i.Name,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"ApplyImage got error: err='%s'\", err.Error())\n\t}\n\t\/\/ diffが取れないので\n\tapplyRes.XXX_sizecache = 0\n\tif diff := cmp.Diff(i, applyRes); diff != \"\" {\n\t\tt.Fatalf(\"ApplyImage response is wrong: diff=(-want +got)\\n%s\", diff)\n\t}\n\n\tlistRes, err := ia.ListImages(context.Background(), &pdeployment.ListImagesRequest{})\n\tif err != nil {\n\t\tt.Errorf(\"ListImages got error: err='%s'\", err.Error())\n\t}\n\tif len(listRes.Images) != 1 {\n\t\tt.Errorf(\"ListImages response is wrong: have='%d', want='%d'\", len(listRes.Images), 1)\n\t}\n\n\tgetRes, err := ia.GetImage(context.Background(), &pdeployment.GetImageRequest{Name: i.Name})\n\tif err != nil {\n\t\tt.Errorf(\"GetImage got error: err='%s'\", err.Error())\n\t}\n\tif diff := cmp.Diff(i, getRes); diff != \"\" {\n\t\tt.Errorf(\"GetImage response is wrong: diff=(-want +got)\\n%s\", diff)\n\t}\n\n\tif _, err := ia.DeleteImage(context.Background(), &pdeployment.DeleteImageRequest{Name: i.Name}); err != nil {\n\t\tt.Errorf(\"DeleteImage got error: err='%s'\", err.Error())\n\t}\n}\n\nfunc TestImageAboutRegister(t *testing.T) {\n\tm := memory.NewMemoryDatastore()\n\tbsa, bsconn, err := getTestBlockStorageAPI()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to connect block storage api: err='%s'\", err.Error())\n\t}\n\tdefer bsconn.Close()\n\n\tia := CreateImageAPI(m, bsa)\n\n\ti := &pdeployment.Image{\n\t\tName: \"test-network\",\n\t\tVersion: 1,\n\t\tRegisteredBlockStorages: []*pdeployment.Image_RegisteredBlockStorage{\n\t\t\t{\n\t\t\t\tBlockStorageName: \"test-image\",\n\t\t\t},\n\t\t},\n\t\tTags: map[string]string{\n\t\t\t\"test-tag\": \"test-image\",\n\t\t},\n\t}\n\t_, err = ia.ApplyImage(context.Background(), &pdeployment.ApplyImageRequest{\n\t\tName: i.Name,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"ApplyImage got error: err='%s'\", err.Error())\n\t}\n\n\tbs := &pprovisioning.BlockStorage{\n\t\tName: \"test-image\",\n\t\tAnnotations: map[string]string{\n\t\t\tprovisioning.AnnotationRequestNodeName: \"mock-node\",\n\t\t},\n\t\tRequestBytes: 10 * bytefmt.MEGABYTE,\n\t\tLimitBytes: 1 * bytefmt.GIGABYTE,\n\t}\n\t_, err = bsa.CreateBlockStorage(context.Background(), &pprovisioning.CreateBlockStorageRequest{\n\t\tName: bs.Name,\n\t\tAnnotations: bs.Annotations,\n\t\tRequestBytes: bs.RequestBytes,\n\t\tLimitBytes: bs.LimitBytes,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create test-image on BlockStorageAPI got error: err='%s'\", err.Error())\n\t}\n\tdefer bsa.DeleteBlockStorage(context.Background(), &pprovisioning.DeleteBlockStorageRequest{Name: bs.Name})\n\tdefer bsa.SetAvailableBlockStorage(context.Background(), &pprovisioning.SetAvailableBlockStorageRequest{Name: bs.Name})\n\n\tregRes, err := ia.RegisterBlockStorage(context.Background(), &pdeployment.RegisterBlockStorageRequest{\n\t\tImageName: i.Name,\n\t\tBlockStorageName: bs.Name,\n\t\tTags: []string{\n\t\t\t\"test-tag\",\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"RegisterBlockStorage got error: err='%s'\", err.Error())\n\t}\n\tif len(regRes.RegisteredBlockStorages) != 1 {\n\t\tt.Errorf(\"RegisterBlockStorage response of len(RegisteredBlockStorages) is wrong: have=%d, want=%d\", len(regRes.RegisteredBlockStorages), 1)\n\t}\n\tregRes.XXX_sizecache = 0\n\tregRes.RegisteredBlockStorages[0].XXX_sizecache = 0\n\tregRes.RegisteredBlockStorages[0].RegisteredAt = nil\n\tif diff := cmp.Diff(i, regRes); diff != \"\" {\n\t\tt.Errorf(\"RegisterBlockStorage response is wrong: diff=(-want +got)\\n%s\", diff)\n\t}\n\n\trbs, err := bsa.GetBlockStorage(context.Background(), &pprovisioning.GetBlockStorageRequest{Name: bs.Name})\n\tif err != nil {\n\t\tt.Errorf(\"Failed to get test-image on BlockStorageAPI got error: err='%s'\", err.Error())\n\t}\n\tif rbs.State != pprovisioning.BlockStorage_PROTECTED {\n\t\tt.Errorf(\"BlockStorage 'test-image' state is wrong: have=%+v, want=%+v\", rbs.State, pprovisioning.BlockStorage_PROTECTED)\n\t}\n\n\tgenRes, err := ia.GenerateBlockStorage(context.Background(), &pdeployment.GenerateBlockStorageRequest{\n\t\tImageName: i.Name,\n\t\tTag: \"test-tag\",\n\t\tBlockStorageName: \"generated-image\",\n\t\tAnnotations: bs.Annotations,\n\t\tRequestBytes: 10 * bytefmt.MEGABYTE,\n\t\tLimitBytes: 10 * bytefmt.GIGABYTE,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"Failed to generate BlockStorageAPI got error: err='%s'\", err.Error())\n\t}\n\tdefer bsa.DeleteBlockStorage(context.Background(), &pprovisioning.DeleteBlockStorageRequest{Name: genRes.Name})\n\n\tunregRes, err := ia.UnregisterBlockStorage(context.Background(), &pdeployment.UnregisterBlockStorageRequest{\n\t\tImageName: i.Name,\n\t\tBlockStorageName: bs.Name,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"RegisterBlockStorage got error: err='%s'\", err.Error())\n\t}\n\tif len(unregRes.RegisteredBlockStorages) != 0 {\n\t\tt.Errorf(\"RegisterBlockStorage response of len(RegisteredBlockStorages) is wrong: have=%d, want=%d\", len(unregRes.RegisteredBlockStorages), 0)\n\t}\n\tif len(unregRes.Tags) != 0 {\n\t\tt.Errorf(\"RegisterBlockStorage response of len(Tags) is wrong: have=%d, want=%d\", len(unregRes.Tags), 0)\n\t}\n\n\trbs, err = bsa.GetBlockStorage(context.Background(), &pprovisioning.GetBlockStorageRequest{Name: bs.Name})\n\tif err != nil {\n\t\tt.Errorf(\"Failed to get test-image on BlockStorageAPI got error: err='%s'\", err.Error())\n\t}\n\tif rbs.State != pprovisioning.BlockStorage_AVAILABLE {\n\t\tt.Errorf(\"BlockStorage 'test-image' state is wrong: have=%+v, want=%+v\", rbs.State, pprovisioning.BlockStorage_AVAILABLE)\n\t}\n}\n\nfunc TestImageAboutTag(t *testing.T) {\n\tm := memory.NewMemoryDatastore()\n\tbsa, bsconn, err := getTestBlockStorageAPI()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to connect block storage api: err='%s'\", err.Error())\n\t}\n\tdefer bsconn.Close()\n\n\tia := CreateImageAPI(m, bsa)\n\n\ti := &pdeployment.Image{\n\t\tName: \"test-network\",\n\t\tVersion: 1,\n\t\tRegisteredBlockStorages: []*pdeployment.Image_RegisteredBlockStorage{\n\t\t\t{\n\t\t\t\tBlockStorageName: \"test-image\",\n\t\t\t},\n\t\t},\n\t\tTags: map[string]string{\n\t\t\t\"test-tag\": \"test-image\",\n\t\t},\n\t}\n\t_, err = ia.ApplyImage(context.Background(), &pdeployment.ApplyImageRequest{\n\t\tName: i.Name,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"ApplyImage got error: err='%s'\", err.Error())\n\t}\n\n\tbs := &pprovisioning.BlockStorage{\n\t\tName: \"test-image\",\n\t\tAnnotations: map[string]string{\n\t\t\tprovisioning.AnnotationRequestNodeName: \"mock-node\",\n\t\t},\n\t\tRequestBytes: 10 * bytefmt.MEGABYTE,\n\t\tLimitBytes: 1 * bytefmt.GIGABYTE,\n\t}\n\t_, err = bsa.CreateBlockStorage(context.Background(), &pprovisioning.CreateBlockStorageRequest{\n\t\tName: bs.Name,\n\t\tAnnotations: bs.Annotations,\n\t\tRequestBytes: bs.RequestBytes,\n\t\tLimitBytes: bs.LimitBytes,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create test-image on BlockStorageAPI got error: err='%s'\", err.Error())\n\t}\n\tdefer bsa.DeleteBlockStorage(context.Background(), &pprovisioning.DeleteBlockStorageRequest{Name: bs.Name})\n\tdefer bsa.SetAvailableBlockStorage(context.Background(), &pprovisioning.SetAvailableBlockStorageRequest{Name: bs.Name})\n\n\t_, err = ia.RegisterBlockStorage(context.Background(), &pdeployment.RegisterBlockStorageRequest{\n\t\tImageName: i.Name,\n\t\tBlockStorageName: bs.Name,\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"RegisterBlockStorage got error: err='%s'\", err.Error())\n\t}\n\n\ttagRes, err := ia.TagImage(context.Background(), &pdeployment.TagImageRequest{\n\t\tName: i.Name,\n\t\tBlockStorageName: bs.Name,\n\t\tTags: []string{\n\t\t\t\"test-tag\",\n\t\t},\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"TagBlockStorage got error: err='%s'\", err.Error())\n\t}\n\tif len(tagRes.Tags) != 1 {\n\t\tt.Errorf(\"TagBlockStorage response of len(Tags) is wrong: have=%d, want=%d\", len(tagRes.Tags), 0)\n\t}\n\ttagRes.XXX_sizecache = 0\n\ttagRes.RegisteredBlockStorages[0].XXX_sizecache = 0\n\ttagRes.RegisteredBlockStorages[0].RegisteredAt = nil\n\tif diff := cmp.Diff(i, tagRes); diff != \"\" {\n\t\tt.Errorf(\"TagBlockStorage response is wrong: diff=(-want +got)\\n%s\", diff)\n\t}\n\n\tuntagRes, err := ia.UntagImage(context.Background(), &pdeployment.UntagImageRequest{\n\t\tName: i.Name,\n\t\tTag: \"test-tag\",\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"UntagImage got error: err='%s'\", err.Error())\n\t}\n\tif len(untagRes.Tags) != 0 {\n\t\tt.Errorf(\"UntagImage response of len(Tags) is wrong: have=%d, want=%d\", len(untagRes.Tags), 0)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2015 The Syncthing Authors.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this file,\n\/\/ You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage connections\n\nimport (\n\t\"crypto\/tls\"\n\t\"net\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/syncthing\/syncthing\/lib\/dialer\"\n\t\"github.com\/syncthing\/syncthing\/lib\/model\"\n\t\"github.com\/syncthing\/syncthing\/lib\/osutil\"\n)\n\nfunc init() {\n\tdialers[\"tcp\"] = tcpDialer\n\tlisteners[\"tcp\"] = tcpListener\n}\n\nfunc tcpDialer(uri *url.URL, tlsCfg *tls.Config) (*tls.Conn, error) {\n\thost, port, err := net.SplitHostPort(uri.Host)\n\tif err != nil && strings.HasPrefix(err.Error(), \"missing port\") {\n\t\t\/\/ addr is on the form \"1.2.3.4\"\n\t\turi.Host = net.JoinHostPort(uri.Host, \"22000\")\n\t} else if err == nil && port == \"\" {\n\t\t\/\/ addr is on the form \"1.2.3.4:\"\n\t\turi.Host = net.JoinHostPort(host, \"22000\")\n\t}\n\n\traddr, err := net.ResolveTCPAddr(\"tcp\", uri.Host)\n\tif err != nil {\n\t\tl.Debugln(err)\n\t\treturn nil, err\n\t}\n\n\tconn, err := dialer.Dial(raddr.Network(), raddr.String())\n\tif err != nil {\n\t\tl.Debugln(err)\n\t\treturn nil, err\n\t}\n\n\ttc := tls.Client(conn, tlsCfg)\n\terr = tc.Handshake()\n\tif err != nil {\n\t\ttc.Close()\n\t\treturn nil, err\n\t}\n\n\treturn tc, nil\n}\n\nfunc tcpListener(uri *url.URL, tlsCfg *tls.Config, conns chan<- model.IntermediateConnection) {\n\ttcaddr, err := net.ResolveTCPAddr(\"tcp\", uri.Host)\n\tif err != nil {\n\t\tl.Fatalln(\"listen (BEP\/tcp):\", err)\n\t\treturn\n\t}\n\tlistener, err := net.ListenTCP(\"tcp\", tcaddr)\n\tif err != nil {\n\t\tl.Fatalln(\"listen (BEP\/tcp):\", err)\n\t\treturn\n\t}\n\n\tfor {\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tl.Warnln(\"Accepting connection (BEP\/tcp):\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tl.Debugln(\"connect from\", conn.RemoteAddr())\n\n\t\terr = osutil.SetTCPOptions(conn.(*net.TCPConn))\n\t\tif err != nil {\n\t\t\tl.Infoln(err)\n\t\t}\n\n\t\ttc := tls.Server(conn, tlsCfg)\n\t\terr = tc.Handshake()\n\t\tif err != nil {\n\t\t\tl.Infoln(\"TLS handshake (BEP\/tcp):\", err)\n\t\t\ttc.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\tconns <- model.IntermediateConnection{\n\t\t\ttc, model.ConnectionTypeDirectAccept,\n\t\t}\n\t}\n}\n<commit_msg>Don't resolve destination address until we need to (fixes #2671)<commit_after>\/\/ Copyright (C) 2015 The Syncthing Authors.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this file,\n\/\/ You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage connections\n\nimport (\n\t\"crypto\/tls\"\n\t\"net\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/syncthing\/syncthing\/lib\/dialer\"\n\t\"github.com\/syncthing\/syncthing\/lib\/model\"\n\t\"github.com\/syncthing\/syncthing\/lib\/osutil\"\n)\n\nfunc init() {\n\tdialers[\"tcp\"] = tcpDialer\n\tlisteners[\"tcp\"] = tcpListener\n}\n\nfunc tcpDialer(uri *url.URL, tlsCfg *tls.Config) (*tls.Conn, error) {\n\t\/\/ Check that there is a port number in uri.Host, otherwise add one.\n\thost, port, err := net.SplitHostPort(uri.Host)\n\tif err != nil && strings.HasPrefix(err.Error(), \"missing port\") {\n\t\t\/\/ addr is on the form \"1.2.3.4\"\n\t\turi.Host = net.JoinHostPort(uri.Host, \"22000\")\n\t} else if err == nil && port == \"\" {\n\t\t\/\/ addr is on the form \"1.2.3.4:\"\n\t\turi.Host = net.JoinHostPort(host, \"22000\")\n\t}\n\n\t\/\/ Don't try to resolve the address before dialing. The dialer may be a\n\t\/\/ proxy, and we should let the proxy do the resolving in that case.\n\tconn, err := dialer.Dial(\"tcp\", uri.Host)\n\tif err != nil {\n\t\tl.Debugln(err)\n\t\treturn nil, err\n\t}\n\n\ttc := tls.Client(conn, tlsCfg)\n\terr = tc.Handshake()\n\tif err != nil {\n\t\ttc.Close()\n\t\treturn nil, err\n\t}\n\n\treturn tc, nil\n}\n\nfunc tcpListener(uri *url.URL, tlsCfg *tls.Config, conns chan<- model.IntermediateConnection) {\n\ttcaddr, err := net.ResolveTCPAddr(\"tcp\", uri.Host)\n\tif err != nil {\n\t\tl.Fatalln(\"listen (BEP\/tcp):\", err)\n\t\treturn\n\t}\n\tlistener, err := net.ListenTCP(\"tcp\", tcaddr)\n\tif err != nil {\n\t\tl.Fatalln(\"listen (BEP\/tcp):\", err)\n\t\treturn\n\t}\n\n\tfor {\n\t\tconn, err := listener.Accept()\n\t\tif err != nil {\n\t\t\tl.Warnln(\"Accepting connection (BEP\/tcp):\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tl.Debugln(\"connect from\", conn.RemoteAddr())\n\n\t\terr = osutil.SetTCPOptions(conn.(*net.TCPConn))\n\t\tif err != nil {\n\t\t\tl.Infoln(err)\n\t\t}\n\n\t\ttc := tls.Server(conn, tlsCfg)\n\t\terr = tc.Handshake()\n\t\tif err != nil {\n\t\t\tl.Infoln(\"TLS handshake (BEP\/tcp):\", err)\n\t\t\ttc.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\tconns <- model.IntermediateConnection{\n\t\t\ttc, model.ConnectionTypeDirectAccept,\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package ndpcmd\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/mdlayher\/ndp\"\n)\n\nfunc sendNS(ctx context.Context, c *ndp.Conn, addr net.HardwareAddr, target net.IP) error {\n\tll := log.New(os.Stderr, \"ndp ns> \", 0)\n\n\tll.Printf(\"neighbor solicitation:\\n - source link-layer address: %s\", addr.String())\n\n\tm := &ndp.NeighborSolicitation{\n\t\tTargetAddress: target,\n\t\tOptions: []ndp.Option{\n\t\t\t&ndp.LinkLayerAddress{\n\t\t\t\tDirection: ndp.Source,\n\t\t\t\tAddr: addr,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor i := 0; ; i++ {\n\t\tif err := c.WriteTo(m, nil, target); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to write neighbor solicitation: %v\", err)\n\t\t}\n\n\t\tna, from, err := receiveNA(c)\n\t\tif err == nil {\n\t\t\tfmt.Println()\n\t\t\tprintNA(ll, na, from)\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ Was the context canceled already?\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tfmt.Println()\n\t\t\tll.Printf(\"sent %d neighbor solicitation(s)\", i+1)\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t}\n\n\t\t\/\/ Was the error caused by a read timeout, and should the loop continue?\n\t\tif nerr, ok := err.(net.Error); ok && nerr.Timeout() {\n\t\t\tfmt.Print(\".\")\n\t\t\tcontinue\n\t\t}\n\n\t\treturn fmt.Errorf(\"failed to read neighbor advertisement: %v\", err)\n\t}\n}\n\nfunc receiveNA(c *ndp.Conn) (*ndp.NeighborAdvertisement, net.IP, error) {\n\tif err := c.SetReadDeadline(time.Now().Add(1 * time.Second)); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tfor {\n\t\tmsg, _, from, err := c.ReadFrom()\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tna, ok := msg.(*ndp.NeighborAdvertisement)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn na, from, nil\n\t}\n}\n<commit_msg>internal\/ndpcmd: use solicited-node multicast for neighbor solicitation<commit_after>package ndpcmd\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/mdlayher\/ndp\"\n)\n\nfunc sendNS(ctx context.Context, c *ndp.Conn, addr net.HardwareAddr, target net.IP) error {\n\tll := log.New(os.Stderr, \"ndp ns> \", 0)\n\n\tll.Printf(\"neighbor solicitation:\\n - source link-layer address: %s\", addr.String())\n\n\t\/\/ Always multicast the message to the target's solicited-node multicast\n\t\/\/ group as if we have no knowledge of its MAC address.\n\tsnm, err := ndp.SolicitedNodeMulticast(target)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to determine solicited-node multicast address: %v\", err)\n\t}\n\n\tm := &ndp.NeighborSolicitation{\n\t\tTargetAddress: target,\n\t\tOptions: []ndp.Option{\n\t\t\t&ndp.LinkLayerAddress{\n\t\t\t\tDirection: ndp.Source,\n\t\t\t\tAddr: addr,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor i := 0; ; i++ {\n\t\tif err := c.WriteTo(m, nil, snm); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to write neighbor solicitation: %v\", err)\n\t\t}\n\n\t\tna, from, err := receiveNA(c)\n\t\tif err == nil {\n\t\t\tfmt.Println()\n\t\t\tprintNA(ll, na, from)\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ Was the context canceled already?\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tfmt.Println()\n\t\t\tll.Printf(\"sent %d neighbor solicitation(s)\", i+1)\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t}\n\n\t\t\/\/ Was the error caused by a read timeout, and should the loop continue?\n\t\tif nerr, ok := err.(net.Error); ok && nerr.Timeout() {\n\t\t\tfmt.Print(\".\")\n\t\t\tcontinue\n\t\t}\n\n\t\treturn fmt.Errorf(\"failed to read neighbor advertisement: %v\", err)\n\t}\n}\n\nfunc receiveNA(c *ndp.Conn) (*ndp.NeighborAdvertisement, net.IP, error) {\n\tif err := c.SetReadDeadline(time.Now().Add(1 * time.Second)); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tfor {\n\t\tmsg, _, from, err := c.ReadFrom()\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tna, ok := msg.(*ndp.NeighborAdvertisement)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn na, from, nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package tpl\n\nimport (\n\t\"context\"\n\t\"crypto\/md5\"\n\t\"crypto\/sha1\"\n\t\"fmt\"\n\t\"text\/template\"\n\n\t\"github.com\/jsimonetti\/pwscheme\/md5crypt\"\n\t\"github.com\/jsimonetti\/pwscheme\/ssha\"\n\t\"github.com\/jsimonetti\/pwscheme\/ssha256\"\n\t\"github.com\/jsimonetti\/pwscheme\/ssha512\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ These constants defined the template function names used\nconst (\n\tFuncMd5sum = \"md5sum\"\n\tFuncSha1sum = \"sha1sum\"\n\tFuncMd5Crypt = \"md5crypt\"\n\tFuncSSHA = \"ssha\"\n\tFuncSSHA256 = \"ssha256\"\n\tFuncSSHA512 = \"ssha512\"\n\tFuncGet = \"get\"\n\tFuncGetPassword = \"getpw\"\n\tFuncGetValue = \"getval\"\n)\n\nfunc md5sum() func(...string) (string, error) {\n\treturn func(s ...string) (string, error) {\n\t\treturn fmt.Sprintf(\"%x\", md5.Sum([]byte(s[0]))), nil\n\t}\n}\n\nfunc sha1sum() func(...string) (string, error) {\n\treturn func(s ...string) (string, error) {\n\t\treturn fmt.Sprintf(\"%x\", sha1.Sum([]byte(s[0]))), nil\n\t}\n}\n\nfunc md5cryptFunc() func(...string) (string, error) {\n\treturn func(s ...string) (string, error) {\n\t\treturn md5crypt.Generate(s[0], 4)\n\t}\n}\n\nfunc sshaFunc() func(...string) (string, error) {\n\treturn func(s ...string) (string, error) {\n\t\treturn ssha.Generate(s[0], 4)\n\t}\n}\n\nfunc ssha256Func() func(...string) (string, error) {\n\treturn func(s ...string) (string, error) {\n\t\treturn ssha256.Generate(s[0], 4)\n\t}\n}\n\nfunc ssha512Func() func(...string) (string, error) {\n\treturn func(s ...string) (string, error) {\n\t\treturn ssha512.Generate(s[0], 4)\n\t}\n}\n\nfunc get(ctx context.Context, kv kvstore) func(...string) (string, error) {\n\treturn func(s ...string) (string, error) {\n\t\tif len(s) < 1 {\n\t\t\treturn \"\", nil\n\t\t}\n\t\tif kv == nil {\n\t\t\treturn \"\", errors.Errorf(\"KV is nil\")\n\t\t}\n\t\tsec, err := kv.Get(ctx, s[0])\n\t\tif err != nil {\n\t\t\treturn err.Error(), nil\n\t\t}\n\t\treturn string(sec.Bytes()), nil\n\t}\n}\n\nfunc getPassword(ctx context.Context, kv kvstore) func(...string) (string, error) {\n\treturn func(s ...string) (string, error) {\n\t\tif len(s) < 1 {\n\t\t\treturn \"\", nil\n\t\t}\n\t\tif kv == nil {\n\t\t\treturn \"\", errors.Errorf(\"KV is nil\")\n\t\t}\n\t\tsec, err := kv.Get(ctx, s[0])\n\t\tif err != nil {\n\t\t\treturn err.Error(), nil\n\t\t}\n\t\treturn sec.Password(), nil\n\t}\n}\n\nfunc getValue(ctx context.Context, kv kvstore) func(...string) (string, error) {\n\treturn func(s ...string) (string, error) {\n\t\tif len(s) < 2 {\n\t\t\treturn \"\", nil\n\t\t}\n\t\tif kv == nil {\n\t\t\treturn \"\", errors.Errorf(\"KV is nil\")\n\t\t}\n\t\tsec, err := kv.Get(ctx, s[0])\n\t\tif err != nil {\n\t\t\treturn err.Error(), nil\n\t\t}\n\t\tsv, found := sec.Get(s[1])\n\t\tif !found {\n\t\t\treturn \"\", fmt.Errorf(\"key %q not found\", s[1])\n\t\t}\n\t\treturn sv, nil\n\t}\n}\n\nfunc funcMap(ctx context.Context, kv kvstore) template.FuncMap {\n\treturn template.FuncMap{\n\t\tFuncGet: get(ctx, kv),\n\t\tFuncGetPassword: getPassword(ctx, kv),\n\t\tFuncGetValue: getValue(ctx, kv),\n\t\tFuncMd5sum: md5sum(),\n\t\tFuncSha1sum: sha1sum(),\n\t\tFuncMd5Crypt: md5cryptFunc(),\n\t\tFuncSSHA: sshaFunc(),\n\t\tFuncSSHA256: ssha256Func(),\n\t\tFuncSSHA512: ssha512Func(),\n\t}\n}\n<commit_msg>Use 32 byte salt by default (#1690)<commit_after>package tpl\n\nimport (\n\t\"context\"\n\t\"crypto\/md5\"\n\t\"crypto\/sha1\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"text\/template\"\n\n\t\"github.com\/gopasspw\/gopass\/internal\/debug\"\n\t\"github.com\/jsimonetti\/pwscheme\/md5crypt\"\n\t\"github.com\/jsimonetti\/pwscheme\/ssha\"\n\t\"github.com\/jsimonetti\/pwscheme\/ssha256\"\n\t\"github.com\/jsimonetti\/pwscheme\/ssha512\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ These constants defined the template function names used\nconst (\n\tFuncMd5sum = \"md5sum\"\n\tFuncSha1sum = \"sha1sum\"\n\tFuncMd5Crypt = \"md5crypt\"\n\tFuncSSHA = \"ssha\"\n\tFuncSSHA256 = \"ssha256\"\n\tFuncSSHA512 = \"ssha512\"\n\tFuncGet = \"get\"\n\tFuncGetPassword = \"getpw\"\n\tFuncGetValue = \"getval\"\n)\n\nfunc md5sum() func(...string) (string, error) {\n\treturn func(s ...string) (string, error) {\n\t\treturn fmt.Sprintf(\"%x\", md5.Sum([]byte(s[0]))), nil\n\t}\n}\n\nfunc sha1sum() func(...string) (string, error) {\n\treturn func(s ...string) (string, error) {\n\t\treturn fmt.Sprintf(\"%x\", sha1.Sum([]byte(s[0]))), nil\n\t}\n}\n\n\/\/ saltLen tries to parse the given string into a numeric salt length.\n\/\/ NOTE: This is on of the rare cases where I think named returns\n\/\/ are useful.\nfunc saltLen(s []string) (saltLen int) {\n\tdefer func() {\n\t\tdebug.Log(\"using saltLen %d\", saltLen)\n\t}()\n\n\t\/\/ default should be 32bit\n\tsaltLen = 32\n\n\tif len(s) < 2 {\n\t\treturn\n\t}\n\n\ti, err := strconv.Atoi(s[0])\n\tif err == nil && i > 0 {\n\t\tsaltLen = i\n\t}\n\tif err != nil {\n\t\tdebug.Log(\"failed to parse saltLen %+v: %q\", s, err)\n\t}\n\treturn\n}\n\nfunc md5cryptFunc() func(...string) (string, error) {\n\treturn func(s ...string) (string, error) {\n\t\treturn md5crypt.Generate(s[0], uint8(saltLen(s)))\n\t}\n}\n\nfunc sshaFunc() func(...string) (string, error) {\n\treturn func(s ...string) (string, error) {\n\t\treturn ssha.Generate(s[0], uint8(saltLen(s)))\n\t}\n}\n\nfunc ssha256Func() func(...string) (string, error) {\n\treturn func(s ...string) (string, error) {\n\t\treturn ssha256.Generate(s[0], uint8(saltLen(s)))\n\t}\n}\n\nfunc ssha512Func() func(...string) (string, error) {\n\treturn func(s ...string) (string, error) {\n\t\treturn ssha512.Generate(s[0], uint8(saltLen(s)))\n\t}\n}\n\nfunc get(ctx context.Context, kv kvstore) func(...string) (string, error) {\n\treturn func(s ...string) (string, error) {\n\t\tif len(s) < 1 {\n\t\t\treturn \"\", nil\n\t\t}\n\t\tif kv == nil {\n\t\t\treturn \"\", errors.Errorf(\"KV is nil\")\n\t\t}\n\t\tsec, err := kv.Get(ctx, s[0])\n\t\tif err != nil {\n\t\t\treturn err.Error(), nil\n\t\t}\n\t\treturn string(sec.Bytes()), nil\n\t}\n}\n\nfunc getPassword(ctx context.Context, kv kvstore) func(...string) (string, error) {\n\treturn func(s ...string) (string, error) {\n\t\tif len(s) < 1 {\n\t\t\treturn \"\", nil\n\t\t}\n\t\tif kv == nil {\n\t\t\treturn \"\", errors.Errorf(\"KV is nil\")\n\t\t}\n\t\tsec, err := kv.Get(ctx, s[0])\n\t\tif err != nil {\n\t\t\treturn err.Error(), nil\n\t\t}\n\t\treturn sec.Password(), nil\n\t}\n}\n\nfunc getValue(ctx context.Context, kv kvstore) func(...string) (string, error) {\n\treturn func(s ...string) (string, error) {\n\t\tif len(s) < 2 {\n\t\t\treturn \"\", nil\n\t\t}\n\t\tif kv == nil {\n\t\t\treturn \"\", errors.Errorf(\"KV is nil\")\n\t\t}\n\t\tsec, err := kv.Get(ctx, s[0])\n\t\tif err != nil {\n\t\t\treturn err.Error(), nil\n\t\t}\n\t\tsv, found := sec.Get(s[1])\n\t\tif !found {\n\t\t\treturn \"\", fmt.Errorf(\"key %q not found\", s[1])\n\t\t}\n\t\treturn sv, nil\n\t}\n}\n\nfunc funcMap(ctx context.Context, kv kvstore) template.FuncMap {\n\treturn template.FuncMap{\n\t\tFuncGet: get(ctx, kv),\n\t\tFuncGetPassword: getPassword(ctx, kv),\n\t\tFuncGetValue: getValue(ctx, kv),\n\t\tFuncMd5sum: md5sum(),\n\t\tFuncSha1sum: sha1sum(),\n\t\tFuncMd5Crypt: md5cryptFunc(),\n\t\tFuncSSHA: sshaFunc(),\n\t\tFuncSSHA256: ssha256Func(),\n\t\tFuncSSHA512: ssha512Func(),\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package contractor\n\n\/\/ TODO: We are in the middle of migrating the contractor to a new concurrency\n\/\/ model. The contractor should never call out to another package while under a\n\/\/ lock (except for the proto package). This is because the renter is going to\n\/\/ start calling contractor methods while holding the renter lock, so we need to\n\/\/ be absolutely confident that no contractor thread will attempt to grab a\n\/\/ renter lock.\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\n\t\"github.com\/NebulousLabs\/Sia\/modules\"\n\t\"github.com\/NebulousLabs\/Sia\/modules\/renter\/proto\"\n\t\"github.com\/NebulousLabs\/Sia\/persist\"\n\tsiasync \"github.com\/NebulousLabs\/Sia\/sync\"\n\t\"github.com\/NebulousLabs\/Sia\/types\"\n)\n\nvar (\n\terrNilCS = errors.New(\"cannot create contractor with nil consensus set\")\n\terrNilTpool = errors.New(\"cannot create contractor with nil transaction pool\")\n\terrNilWallet = errors.New(\"cannot create contractor with nil wallet\")\n\n\t\/\/ COMPATv1.0.4-lts\n\t\/\/ metricsContractID identifies a special contract that contains aggregate\n\t\/\/ financial metrics from older contractors\n\tmetricsContractID = types.FileContractID{'m', 'e', 't', 'r', 'i', 'c', 's'}\n)\n\n\/\/ A Contractor negotiates, revises, renews, and provides access to file\n\/\/ contracts.\ntype Contractor struct {\n\t\/\/ dependencies\n\tcs consensusSet\n\thdb hostDB\n\tlog *persist.Logger\n\tmu sync.RWMutex\n\tpersist persister\n\tstaticDeps modules.Dependencies\n\ttg siasync.ThreadGroup\n\ttpool transactionPool\n\twallet wallet\n\n\t\/\/ Only one thread should be performing contract maintenance at a time.\n\tinterruptMaintenance chan struct{}\n\tmaintenanceLock siasync.TryMutex\n\n\tallowance modules.Allowance\n\tblockHeight types.BlockHeight\n\tcurrentPeriod types.BlockHeight\n\tlastChange modules.ConsensusChangeID\n\n\tdownloaders map[types.FileContractID]*hostDownloader\n\teditors map[types.FileContractID]*hostEditor\n\tnumFailedRenews map[types.FileContractID]types.BlockHeight\n\trenewing map[types.FileContractID]bool \/\/ prevent revising during renewal\n\trevising map[types.FileContractID]bool \/\/ prevent overlapping revisions\n\n\tstaticContracts *proto.ContractSet\n\toldContracts map[types.FileContractID]modules.RenterContract\n\trenewedIDs map[types.FileContractID]types.FileContractID\n}\n\n\/\/ readlockResolveID returns the ID of the most recent renewal of id.\nfunc (c *Contractor) readlockResolveID(id types.FileContractID) types.FileContractID {\n\tnewID, exists := c.renewedIDs[id]\n\tfor exists {\n\t\tid = newID\n\t\tnewID, exists = c.renewedIDs[id]\n\t}\n\treturn id\n}\n\n\/\/ Allowance returns the current allowance.\nfunc (c *Contractor) Allowance() modules.Allowance {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\treturn c.allowance\n}\n\n\/\/ PeriodSpending returns the amount spent on contracts during the current\n\/\/ billing period.\nfunc (c *Contractor) PeriodSpending() modules.ContractorSpending {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\tvar spending modules.ContractorSpending\n\tfor _, contract := range c.staticContracts.ViewAll() {\n\t\t\/\/ Calculate ContractFees\n\t\tspending.ContractFees = spending.ContractFees.Add(contract.ContractFee)\n\t\tspending.ContractFees = spending.ContractFees.Add(contract.TxnFee)\n\t\tspending.ContractFees = spending.ContractFees.Add(contract.SiafundFee)\n\t\t\/\/ Calculate TotalAllocated\n\t\tspending.TotalAllocated = spending.TotalAllocated.Add(contract.TotalCost)\n\t\tspending.ContractSpendingDeprecated = spending.TotalAllocated\n\t\t\/\/ Calculate Spending\n\t\tspending.DownloadSpending = spending.DownloadSpending.Add(contract.DownloadSpending)\n\t\tspending.UploadSpending = spending.UploadSpending.Add(contract.UploadSpending)\n\t\tspending.StorageSpending = spending.StorageSpending.Add(contract.StorageSpending)\n\t}\n\n\t\/\/ Calculate spending from contracts that were renewed during the current period\n\tfor _, old := range c.oldContracts {\n\t\tif old.EndHeight >= c.currentPeriod {\n\t\t\t\/\/ Calculate ContractFees\n\t\t\tspending.ContractFees = spending.ContractFees.Add(old.ContractFee)\n\t\t\tspending.ContractFees = spending.ContractFees.Add(old.TxnFee)\n\t\t\tspending.ContractFees = spending.ContractFees.Add(old.SiafundFee)\n\t\t\t\/\/ Calculate TotalAllocated\n\t\t\tspending.TotalAllocated = spending.TotalAllocated.Add(old.TotalCost)\n\t\t\t\/\/ Calculate Spending\n\t\t\tspending.DownloadSpending = spending.DownloadSpending.Add(old.DownloadSpending)\n\t\t\tspending.UploadSpending = spending.UploadSpending.Add(old.UploadSpending)\n\t\t\tspending.StorageSpending = spending.StorageSpending.Add(old.StorageSpending)\n\t\t}\n\t}\n\t\/\/ Calculate amount of spent money to get unspent money.\n\tallSpending := spending.ContractFees\n\tallSpending = allSpending.Add(spending.DownloadSpending)\n\tallSpending = allSpending.Add(spending.UploadSpending)\n\tallSpending = allSpending.Add(spending.StorageSpending)\n\tif c.allowance.Funds.Cmp(allSpending) >= 0 {\n\t\tspending.Unspent = c.allowance.Funds.Sub(allSpending)\n\t}\n\n\treturn spending\n}\n\n\/\/ ContractByID returns the contract with the id specified, if it exists. The\n\/\/ contract will be resolved if possible to the most recent child contract.\nfunc (c *Contractor) ContractByID(id types.FileContractID) (modules.RenterContract, bool) {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\treturn c.staticContracts.View(c.readlockResolveID(id))\n}\n\n\/\/ Contracts returns the contracts formed by the contractor in the current\n\/\/ allowance period. Only contracts formed with currently online hosts are\n\/\/ returned.\nfunc (c *Contractor) Contracts() []modules.RenterContract {\n\treturn c.staticContracts.ViewAll()\n}\n\n\/\/ ContractUtility returns the utility fields for the given contract.\nfunc (c *Contractor) ContractUtility(id types.FileContractID) (modules.ContractUtility, bool) {\n\treturn c.managedContractUtility(id)\n}\n\n\/\/ CurrentPeriod returns the height at which the current allowance period\n\/\/ began.\nfunc (c *Contractor) CurrentPeriod() types.BlockHeight {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\treturn c.currentPeriod\n}\n\n\/\/ ResolveID returns the ID of the most recent renewal of id.\nfunc (c *Contractor) ResolveID(id types.FileContractID) types.FileContractID {\n\tc.mu.RLock()\n\tnewID := c.readlockResolveID(id)\n\tc.mu.RUnlock()\n\treturn newID\n}\n\n\/\/ RateLimits sets the bandwidth limits for connections created by the\n\/\/ contractSet.\nfunc (c *Contractor) RateLimits() (readBPW int64, writeBPS int64, packetSize uint64) {\n\treturn c.staticContracts.RateLimits()\n}\n\n\/\/ SetRateLimits sets the bandwidth limits for connections created by the\n\/\/ contractSet.\nfunc (c *Contractor) SetRateLimits(readBPS int64, writeBPS int64, packetSize uint64) {\n\tc.staticContracts.SetRateLimits(readBPS, writeBPS, packetSize)\n}\n\n\/\/ Close closes the Contractor.\nfunc (c *Contractor) Close() error {\n\treturn c.tg.Stop()\n}\n\n\/\/ New returns a new Contractor.\nfunc New(cs consensusSet, wallet walletShim, tpool transactionPool, hdb hostDB, persistDir string) (*Contractor, error) {\n\t\/\/ Check for nil inputs.\n\tif cs == nil {\n\t\treturn nil, errNilCS\n\t}\n\tif wallet == nil {\n\t\treturn nil, errNilWallet\n\t}\n\tif tpool == nil {\n\t\treturn nil, errNilTpool\n\t}\n\n\t\/\/ Create the persist directory if it does not yet exist.\n\tif err := os.MkdirAll(persistDir, 0700); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Convert the old persist file(s), if necessary. This must occur before\n\t\/\/ loading the contract set.\n\tif err := convertPersist(persistDir); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Create the contract set.\n\tcontractSet, err := proto.NewContractSet(filepath.Join(persistDir, \"contracts\"), modules.ProdDependencies)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Create the logger.\n\tlogger, err := persist.NewFileLogger(filepath.Join(persistDir, \"contractor.log\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Create Contractor using production dependencies.\n\treturn NewCustomContractor(cs, &WalletBridge{W: wallet}, tpool, hdb, contractSet, NewPersist(persistDir), logger, modules.ProdDependencies)\n}\n\n\/\/ NewCustomContractor creates a Contractor using the provided dependencies.\nfunc NewCustomContractor(cs consensusSet, w wallet, tp transactionPool, hdb hostDB, contractSet *proto.ContractSet, p persister, l *persist.Logger, deps modules.Dependencies) (*Contractor, error) {\n\t\/\/ Create the Contractor object.\n\tc := &Contractor{\n\t\tcs: cs,\n\t\tstaticDeps: deps,\n\t\thdb: hdb,\n\t\tlog: l,\n\t\tpersist: p,\n\t\ttpool: tp,\n\t\twallet: w,\n\n\t\tinterruptMaintenance: make(chan struct{}),\n\n\t\tstaticContracts: contractSet,\n\t\tdownloaders: make(map[types.FileContractID]*hostDownloader),\n\t\teditors: make(map[types.FileContractID]*hostEditor),\n\t\toldContracts: make(map[types.FileContractID]modules.RenterContract),\n\t\trenewedIDs: make(map[types.FileContractID]types.FileContractID),\n\t\trenewing: make(map[types.FileContractID]bool),\n\t\trevising: make(map[types.FileContractID]bool),\n\t}\n\n\t\/\/ Close the contract set and logger upon shutdown.\n\tc.tg.AfterStop(func() {\n\t\tif err := c.staticContracts.Close(); err != nil {\n\t\t\tc.log.Println(\"Failed to close contract set:\", err)\n\t\t}\n\t\tif err := c.log.Close(); err != nil {\n\t\t\tfmt.Println(\"Failed to close the contractor logger:\", err)\n\t\t}\n\t})\n\n\t\/\/ Load the prior persistence structures.\n\terr := c.load()\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Subscribe to the consensus set.\n\terr = cs.ConsensusSetSubscribe(c, c.lastChange, c.tg.StopChan())\n\tif err == modules.ErrInvalidConsensusChangeID {\n\t\t\/\/ Reset the contractor consensus variables and try rescanning.\n\t\tc.blockHeight = 0\n\t\tc.lastChange = modules.ConsensusChangeBeginning\n\t\terr = cs.ConsensusSetSubscribe(c, c.lastChange, c.tg.StopChan())\n\t}\n\tif err != nil {\n\t\treturn nil, errors.New(\"contractor subscription failed: \" + err.Error())\n\t}\n\t\/\/ Unsubscribe from the consensus set upon shutdown.\n\tc.tg.OnStop(func() {\n\t\tcs.Unsubscribe(c)\n\t})\n\n\t\/\/ We may have upgraded persist or resubscribed. Save now so that we don't\n\t\/\/ lose our work.\n\tc.mu.Lock()\n\terr = c.save()\n\tc.mu.Unlock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}\n<commit_msg>Update PeriodSpending to calculate spending of old contracts based on start height and not end height<commit_after>package contractor\n\n\/\/ TODO: We are in the middle of migrating the contractor to a new concurrency\n\/\/ model. The contractor should never call out to another package while under a\n\/\/ lock (except for the proto package). This is because the renter is going to\n\/\/ start calling contractor methods while holding the renter lock, so we need to\n\/\/ be absolutely confident that no contractor thread will attempt to grab a\n\/\/ renter lock.\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\n\t\"github.com\/NebulousLabs\/Sia\/modules\"\n\t\"github.com\/NebulousLabs\/Sia\/modules\/renter\/proto\"\n\t\"github.com\/NebulousLabs\/Sia\/persist\"\n\tsiasync \"github.com\/NebulousLabs\/Sia\/sync\"\n\t\"github.com\/NebulousLabs\/Sia\/types\"\n)\n\nvar (\n\terrNilCS = errors.New(\"cannot create contractor with nil consensus set\")\n\terrNilTpool = errors.New(\"cannot create contractor with nil transaction pool\")\n\terrNilWallet = errors.New(\"cannot create contractor with nil wallet\")\n\n\t\/\/ COMPATv1.0.4-lts\n\t\/\/ metricsContractID identifies a special contract that contains aggregate\n\t\/\/ financial metrics from older contractors\n\tmetricsContractID = types.FileContractID{'m', 'e', 't', 'r', 'i', 'c', 's'}\n)\n\n\/\/ A Contractor negotiates, revises, renews, and provides access to file\n\/\/ contracts.\ntype Contractor struct {\n\t\/\/ dependencies\n\tcs consensusSet\n\thdb hostDB\n\tlog *persist.Logger\n\tmu sync.RWMutex\n\tpersist persister\n\tstaticDeps modules.Dependencies\n\ttg siasync.ThreadGroup\n\ttpool transactionPool\n\twallet wallet\n\n\t\/\/ Only one thread should be performing contract maintenance at a time.\n\tinterruptMaintenance chan struct{}\n\tmaintenanceLock siasync.TryMutex\n\n\tallowance modules.Allowance\n\tblockHeight types.BlockHeight\n\tcurrentPeriod types.BlockHeight\n\tlastChange modules.ConsensusChangeID\n\n\tdownloaders map[types.FileContractID]*hostDownloader\n\teditors map[types.FileContractID]*hostEditor\n\tnumFailedRenews map[types.FileContractID]types.BlockHeight\n\trenewing map[types.FileContractID]bool \/\/ prevent revising during renewal\n\trevising map[types.FileContractID]bool \/\/ prevent overlapping revisions\n\n\tstaticContracts *proto.ContractSet\n\toldContracts map[types.FileContractID]modules.RenterContract\n\trenewedIDs map[types.FileContractID]types.FileContractID\n}\n\n\/\/ readlockResolveID returns the ID of the most recent renewal of id.\nfunc (c *Contractor) readlockResolveID(id types.FileContractID) types.FileContractID {\n\tnewID, exists := c.renewedIDs[id]\n\tfor exists {\n\t\tid = newID\n\t\tnewID, exists = c.renewedIDs[id]\n\t}\n\treturn id\n}\n\n\/\/ Allowance returns the current allowance.\nfunc (c *Contractor) Allowance() modules.Allowance {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\treturn c.allowance\n}\n\n\/\/ PeriodSpending returns the amount spent on contracts during the current\n\/\/ billing period.\nfunc (c *Contractor) PeriodSpending() modules.ContractorSpending {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\tvar spending modules.ContractorSpending\n\tfor _, contract := range c.staticContracts.ViewAll() {\n\t\t\/\/ Calculate ContractFees\n\t\tspending.ContractFees = spending.ContractFees.Add(contract.ContractFee)\n\t\tspending.ContractFees = spending.ContractFees.Add(contract.TxnFee)\n\t\tspending.ContractFees = spending.ContractFees.Add(contract.SiafundFee)\n\t\t\/\/ Calculate TotalAllocated\n\t\tspending.TotalAllocated = spending.TotalAllocated.Add(contract.TotalCost)\n\t\tspending.ContractSpendingDeprecated = spending.TotalAllocated\n\t\t\/\/ Calculate Spending\n\t\tspending.DownloadSpending = spending.DownloadSpending.Add(contract.DownloadSpending)\n\t\tspending.UploadSpending = spending.UploadSpending.Add(contract.UploadSpending)\n\t\tspending.StorageSpending = spending.StorageSpending.Add(contract.StorageSpending)\n\t}\n\n\t\/\/ Calculate spending from contracts that were renewed during the current period\n\tfor _, old := range c.oldContracts {\n\t\tif old.StartHeight >= c.currentPeriod {\n\t\t\t\/\/ Calculate ContractFees\n\t\t\tspending.ContractFees = spending.ContractFees.Add(old.ContractFee)\n\t\t\tspending.ContractFees = spending.ContractFees.Add(old.TxnFee)\n\t\t\tspending.ContractFees = spending.ContractFees.Add(old.SiafundFee)\n\t\t\t\/\/ Calculate TotalAllocated\n\t\t\tspending.TotalAllocated = spending.TotalAllocated.Add(old.TotalCost)\n\t\t\t\/\/ Calculate Spending\n\t\t\tspending.DownloadSpending = spending.DownloadSpending.Add(old.DownloadSpending)\n\t\t\tspending.UploadSpending = spending.UploadSpending.Add(old.UploadSpending)\n\t\t\tspending.StorageSpending = spending.StorageSpending.Add(old.StorageSpending)\n\t\t}\n\t}\n\t\/\/ Calculate amount of spent money to get unspent money.\n\tallSpending := spending.ContractFees\n\tallSpending = allSpending.Add(spending.DownloadSpending)\n\tallSpending = allSpending.Add(spending.UploadSpending)\n\tallSpending = allSpending.Add(spending.StorageSpending)\n\tif c.allowance.Funds.Cmp(allSpending) >= 0 {\n\t\tspending.Unspent = c.allowance.Funds.Sub(allSpending)\n\t}\n\n\treturn spending\n}\n\n\/\/ ContractByID returns the contract with the id specified, if it exists. The\n\/\/ contract will be resolved if possible to the most recent child contract.\nfunc (c *Contractor) ContractByID(id types.FileContractID) (modules.RenterContract, bool) {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\treturn c.staticContracts.View(c.readlockResolveID(id))\n}\n\n\/\/ Contracts returns the contracts formed by the contractor in the current\n\/\/ allowance period. Only contracts formed with currently online hosts are\n\/\/ returned.\nfunc (c *Contractor) Contracts() []modules.RenterContract {\n\treturn c.staticContracts.ViewAll()\n}\n\n\/\/ ContractUtility returns the utility fields for the given contract.\nfunc (c *Contractor) ContractUtility(id types.FileContractID) (modules.ContractUtility, bool) {\n\treturn c.managedContractUtility(id)\n}\n\n\/\/ CurrentPeriod returns the height at which the current allowance period\n\/\/ began.\nfunc (c *Contractor) CurrentPeriod() types.BlockHeight {\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\treturn c.currentPeriod\n}\n\n\/\/ ResolveID returns the ID of the most recent renewal of id.\nfunc (c *Contractor) ResolveID(id types.FileContractID) types.FileContractID {\n\tc.mu.RLock()\n\tnewID := c.readlockResolveID(id)\n\tc.mu.RUnlock()\n\treturn newID\n}\n\n\/\/ RateLimits sets the bandwidth limits for connections created by the\n\/\/ contractSet.\nfunc (c *Contractor) RateLimits() (readBPW int64, writeBPS int64, packetSize uint64) {\n\treturn c.staticContracts.RateLimits()\n}\n\n\/\/ SetRateLimits sets the bandwidth limits for connections created by the\n\/\/ contractSet.\nfunc (c *Contractor) SetRateLimits(readBPS int64, writeBPS int64, packetSize uint64) {\n\tc.staticContracts.SetRateLimits(readBPS, writeBPS, packetSize)\n}\n\n\/\/ Close closes the Contractor.\nfunc (c *Contractor) Close() error {\n\treturn c.tg.Stop()\n}\n\n\/\/ New returns a new Contractor.\nfunc New(cs consensusSet, wallet walletShim, tpool transactionPool, hdb hostDB, persistDir string) (*Contractor, error) {\n\t\/\/ Check for nil inputs.\n\tif cs == nil {\n\t\treturn nil, errNilCS\n\t}\n\tif wallet == nil {\n\t\treturn nil, errNilWallet\n\t}\n\tif tpool == nil {\n\t\treturn nil, errNilTpool\n\t}\n\n\t\/\/ Create the persist directory if it does not yet exist.\n\tif err := os.MkdirAll(persistDir, 0700); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Convert the old persist file(s), if necessary. This must occur before\n\t\/\/ loading the contract set.\n\tif err := convertPersist(persistDir); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Create the contract set.\n\tcontractSet, err := proto.NewContractSet(filepath.Join(persistDir, \"contracts\"), modules.ProdDependencies)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Create the logger.\n\tlogger, err := persist.NewFileLogger(filepath.Join(persistDir, \"contractor.log\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Create Contractor using production dependencies.\n\treturn NewCustomContractor(cs, &WalletBridge{W: wallet}, tpool, hdb, contractSet, NewPersist(persistDir), logger, modules.ProdDependencies)\n}\n\n\/\/ NewCustomContractor creates a Contractor using the provided dependencies.\nfunc NewCustomContractor(cs consensusSet, w wallet, tp transactionPool, hdb hostDB, contractSet *proto.ContractSet, p persister, l *persist.Logger, deps modules.Dependencies) (*Contractor, error) {\n\t\/\/ Create the Contractor object.\n\tc := &Contractor{\n\t\tcs: cs,\n\t\tstaticDeps: deps,\n\t\thdb: hdb,\n\t\tlog: l,\n\t\tpersist: p,\n\t\ttpool: tp,\n\t\twallet: w,\n\n\t\tinterruptMaintenance: make(chan struct{}),\n\n\t\tstaticContracts: contractSet,\n\t\tdownloaders: make(map[types.FileContractID]*hostDownloader),\n\t\teditors: make(map[types.FileContractID]*hostEditor),\n\t\toldContracts: make(map[types.FileContractID]modules.RenterContract),\n\t\trenewedIDs: make(map[types.FileContractID]types.FileContractID),\n\t\trenewing: make(map[types.FileContractID]bool),\n\t\trevising: make(map[types.FileContractID]bool),\n\t}\n\n\t\/\/ Close the contract set and logger upon shutdown.\n\tc.tg.AfterStop(func() {\n\t\tif err := c.staticContracts.Close(); err != nil {\n\t\t\tc.log.Println(\"Failed to close contract set:\", err)\n\t\t}\n\t\tif err := c.log.Close(); err != nil {\n\t\t\tfmt.Println(\"Failed to close the contractor logger:\", err)\n\t\t}\n\t})\n\n\t\/\/ Load the prior persistence structures.\n\terr := c.load()\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Subscribe to the consensus set.\n\terr = cs.ConsensusSetSubscribe(c, c.lastChange, c.tg.StopChan())\n\tif err == modules.ErrInvalidConsensusChangeID {\n\t\t\/\/ Reset the contractor consensus variables and try rescanning.\n\t\tc.blockHeight = 0\n\t\tc.lastChange = modules.ConsensusChangeBeginning\n\t\terr = cs.ConsensusSetSubscribe(c, c.lastChange, c.tg.StopChan())\n\t}\n\tif err != nil {\n\t\treturn nil, errors.New(\"contractor subscription failed: \" + err.Error())\n\t}\n\t\/\/ Unsubscribe from the consensus set upon shutdown.\n\tc.tg.OnStop(func() {\n\t\tcs.Unsubscribe(c)\n\t})\n\n\t\/\/ We may have upgraded persist or resubscribed. Save now so that we don't\n\t\/\/ lose our work.\n\tc.mu.Lock()\n\terr = c.save()\n\tc.mu.Unlock()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn c, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package smux\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc init() {\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n\tln, err := net.Listen(\"tcp\", \"127.0.0.1:19999\")\n\tif err != nil {\n\t\t\/\/ handle error\n\t\tpanic(err)\n\t}\n\tgo func() {\n\t\tfor {\n\t\t\tconn, err := ln.Accept()\n\t\t\tif err != nil {\n\t\t\t\t\/\/ handle error\n\t\t\t}\n\t\t\tgo handleConnection(conn)\n\t\t}\n\t}()\n}\n\nfunc handleConnection(conn net.Conn) {\n\tsession, _ := Server(conn, nil)\n\tfor {\n\t\tif stream, err := session.AcceptStream(); err == nil {\n\t\t\tgo func(s io.ReadWriteCloser) {\n\t\t\t\tbuf := make([]byte, 65536)\n\t\t\t\tfor {\n\t\t\t\t\tn, err := s.Read(buf)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\ts.Write(buf[:n])\n\t\t\t\t}\n\t\t\t}(stream)\n\t\t} else {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc TestEcho(t *testing.T) {\n\tcli, err := net.Dial(\"tcp\", \"127.0.0.1:19999\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsession, _ := Client(cli, nil)\n\tstream, _ := session.OpenStream()\n\tconst N = 100\n\tbuf := make([]byte, 10)\n\tfor i := 0; i < N; i++ {\n\t\tmsg := fmt.Sprintf(\"hello%v\", i)\n\t\tfmt.Println(\"sent:\", msg)\n\t\tstream.Write([]byte(msg))\n\t\tif n, err := stream.Read(buf); err == nil {\n\t\t\tfmt.Println(\"recv:\", string(buf[:n]))\n\t\t} else {\n\t\t\treturn\n\t\t}\n\t}\n\tsession.Close()\n}\n\nfunc TestSpeed(t *testing.T) {\n\tcli, err := net.Dial(\"tcp\", \"127.0.0.1:19999\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsession, _ := Client(cli, nil)\n\tstream, _ := session.OpenStream()\n\n\tstart := time.Now()\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tbuf := make([]byte, 1024*1024)\n\t\tnrecv := 0\n\t\tfor {\n\t\t\tn, err := stream.Read(buf)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tnrecv += n\n\t\t\t\tif nrecv == 4096*4096 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tprintln(\"total recv:\", nrecv)\n\t\tstream.Close()\n\t\tfmt.Println(\"time for 16MB rtt\", time.Now().Sub(start))\n\t\twg.Done()\n\t}()\n\tmsg := make([]byte, 8192)\n\tfor i := 0; i < 2048; i++ {\n\t\tstream.Write(msg)\n\t}\n\twg.Wait()\n\tsession.Close()\n}\n\nfunc TestParallel(t *testing.T) {\n\tcli, err := net.Dial(\"tcp\", \"127.0.0.1:19999\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsession, _ := Client(cli, nil)\n\n\tpar := 1000\n\tmessages := 100\n\tvar wg sync.WaitGroup\n\twg.Add(par)\n\tfor i := 0; i < par; i++ {\n\t\tstream, _ := session.OpenStream()\n\t\tgo func(s *Stream) {\n\t\t\tbuf := make([]byte, 20)\n\t\t\tfor j := 0; j < messages; j++ {\n\t\t\t\tmsg := fmt.Sprintf(\"hello%v\", j)\n\t\t\t\ts.Write([]byte(msg))\n\t\t\t\tif _, err := s.Read(buf); err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\ts.Close()\n\t\t\twg.Done()\n\t\t}(stream)\n\t}\n\tt.Log(\"created\", session.NumStreams(), \"streams\")\n\twg.Wait()\n\tsession.Close()\n}\n\nfunc TestCloseThenOpen(t *testing.T) {\n\tcli, err := net.Dial(\"tcp\", \"127.0.0.1:19999\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsession, _ := Client(cli, nil)\n\tsession.Close()\n\tif _, err := session.OpenStream(); err == nil {\n\t\tt.Fatal(\"opened after close\")\n\t}\n}\n\nfunc TestTinyReadBuffer(t *testing.T) {\n\tcli, err := net.Dial(\"tcp\", \"127.0.0.1:19999\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsession, _ := Client(cli, nil)\n\tstream, _ := session.OpenStream()\n\tconst N = 100\n\ttinybuf := make([]byte, 6)\n\tfor i := 0; i < N; i++ {\n\t\tmsg := fmt.Sprintf(\"hello%v\", i)\n\t\tfmt.Println(\"sent:\", msg)\n\t\tnsent, err := stream.Write([]byte(msg))\n\t\tif err != nil {\n\t\t\tt.Fatal(\"cannot write\")\n\t\t}\n\t\tnrecv := 0\n\t\tfor nrecv < nsent {\n\t\t\tfmt.Println(nrecv, nsent)\n\t\t\tif n, err := stream.Read(tinybuf); err == nil {\n\t\t\t\tfmt.Println(\"recv:\", string(tinybuf[:n]))\n\t\t\t\tnrecv += n\n\t\t\t} else {\n\t\t\t\tt.Fatal(\"cannot read with tiny buffer\")\n\t\t\t}\n\t\t}\n\t\tfmt.Println(\"#\", nrecv, nsent)\n\t}\n\tsession.Close()\n}\n\nfunc TestIsClose(t *testing.T) {\n\tcli, err := net.Dial(\"tcp\", \"127.0.0.1:19999\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsession, _ := Client(cli, nil)\n\tsession.Close()\n\tif session.IsClosed() != true {\n\t\tt.Fatal(\"still open after close\")\n\t}\n}\n\nfunc TestKeepAliveTimeout(t *testing.T) {\n\tln, err := net.Listen(\"tcp\", \"127.0.0.1:29999\")\n\tif err != nil {\n\t\t\/\/ handle error\n\t\tpanic(err)\n\t}\n\tgo func() {\n\t\tln.Accept()\n\t}()\n\n\tcli, err := net.Dial(\"tcp\", \"127.0.0.1:29999\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tconfig := DefaultConfig()\n\tconfig.KeepAliveInterval = 1\n\tconfig.KeepAliveTimeout = 2\n\tsession, _ := Client(cli, config)\n\t<-time.After(3 * time.Second)\n\tif session.IsClosed() != true {\n\t\tt.Fatal(\"keepalive-timeout failed\")\n\t}\n}\n\nfunc BenchmarkAcceptClose(b *testing.B) {\n\tcli, err := net.Dial(\"tcp\", \"127.0.0.1:19999\")\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\tsession, _ := Client(cli, nil)\n\tfor i := 0; i < b.N; i++ {\n\t\tif stream, err := session.OpenStream(); err == nil {\n\t\t\tstream.Close()\n\t\t} else {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n}\n<commit_msg>add server open test<commit_after>package smux\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc init() {\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n\tln, err := net.Listen(\"tcp\", \"127.0.0.1:19999\")\n\tif err != nil {\n\t\t\/\/ handle error\n\t\tpanic(err)\n\t}\n\tgo func() {\n\t\tfor {\n\t\t\tconn, err := ln.Accept()\n\t\t\tif err != nil {\n\t\t\t\t\/\/ handle error\n\t\t\t}\n\t\t\tgo handleConnection(conn)\n\t\t}\n\t}()\n}\n\nfunc handleConnection(conn net.Conn) {\n\tsession, _ := Server(conn, nil)\n\tfor {\n\t\tif stream, err := session.AcceptStream(); err == nil {\n\t\t\tgo func(s io.ReadWriteCloser) {\n\t\t\t\tbuf := make([]byte, 65536)\n\t\t\t\tfor {\n\t\t\t\t\tn, err := s.Read(buf)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\ts.Write(buf[:n])\n\t\t\t\t}\n\t\t\t}(stream)\n\t\t} else {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc TestEcho(t *testing.T) {\n\tcli, err := net.Dial(\"tcp\", \"127.0.0.1:19999\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsession, _ := Client(cli, nil)\n\tstream, _ := session.OpenStream()\n\tconst N = 100\n\tbuf := make([]byte, 10)\n\tfor i := 0; i < N; i++ {\n\t\tmsg := fmt.Sprintf(\"hello%v\", i)\n\t\tfmt.Println(\"sent:\", msg)\n\t\tstream.Write([]byte(msg))\n\t\tif n, err := stream.Read(buf); err == nil {\n\t\t\tfmt.Println(\"recv:\", string(buf[:n]))\n\t\t} else {\n\t\t\treturn\n\t\t}\n\t}\n\tsession.Close()\n}\n\nfunc TestSpeed(t *testing.T) {\n\tcli, err := net.Dial(\"tcp\", \"127.0.0.1:19999\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsession, _ := Client(cli, nil)\n\tstream, _ := session.OpenStream()\n\n\tstart := time.Now()\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tbuf := make([]byte, 1024*1024)\n\t\tnrecv := 0\n\t\tfor {\n\t\t\tn, err := stream.Read(buf)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tnrecv += n\n\t\t\t\tif nrecv == 4096*4096 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tprintln(\"total recv:\", nrecv)\n\t\tstream.Close()\n\t\tfmt.Println(\"time for 16MB rtt\", time.Now().Sub(start))\n\t\twg.Done()\n\t}()\n\tmsg := make([]byte, 8192)\n\tfor i := 0; i < 2048; i++ {\n\t\tstream.Write(msg)\n\t}\n\twg.Wait()\n\tsession.Close()\n}\n\nfunc TestParallel(t *testing.T) {\n\tcli, err := net.Dial(\"tcp\", \"127.0.0.1:19999\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsession, _ := Client(cli, nil)\n\n\tpar := 1000\n\tmessages := 100\n\tvar wg sync.WaitGroup\n\twg.Add(par)\n\tfor i := 0; i < par; i++ {\n\t\tstream, _ := session.OpenStream()\n\t\tgo func(s *Stream) {\n\t\t\tbuf := make([]byte, 20)\n\t\t\tfor j := 0; j < messages; j++ {\n\t\t\t\tmsg := fmt.Sprintf(\"hello%v\", j)\n\t\t\t\ts.Write([]byte(msg))\n\t\t\t\tif _, err := s.Read(buf); err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\ts.Close()\n\t\t\twg.Done()\n\t\t}(stream)\n\t}\n\tt.Log(\"created\", session.NumStreams(), \"streams\")\n\twg.Wait()\n\tsession.Close()\n}\n\nfunc TestCloseThenOpen(t *testing.T) {\n\tcli, err := net.Dial(\"tcp\", \"127.0.0.1:19999\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsession, _ := Client(cli, nil)\n\tsession.Close()\n\tif _, err := session.OpenStream(); err == nil {\n\t\tt.Fatal(\"opened after close\")\n\t}\n}\n\nfunc TestTinyReadBuffer(t *testing.T) {\n\tcli, err := net.Dial(\"tcp\", \"127.0.0.1:19999\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsession, _ := Client(cli, nil)\n\tstream, _ := session.OpenStream()\n\tconst N = 100\n\ttinybuf := make([]byte, 6)\n\tfor i := 0; i < N; i++ {\n\t\tmsg := fmt.Sprintf(\"hello%v\", i)\n\t\tfmt.Println(\"sent:\", msg)\n\t\tnsent, err := stream.Write([]byte(msg))\n\t\tif err != nil {\n\t\t\tt.Fatal(\"cannot write\")\n\t\t}\n\t\tnrecv := 0\n\t\tfor nrecv < nsent {\n\t\t\tfmt.Println(nrecv, nsent)\n\t\t\tif n, err := stream.Read(tinybuf); err == nil {\n\t\t\t\tfmt.Println(\"recv:\", string(tinybuf[:n]))\n\t\t\t\tnrecv += n\n\t\t\t} else {\n\t\t\t\tt.Fatal(\"cannot read with tiny buffer\")\n\t\t\t}\n\t\t}\n\t\tfmt.Println(\"#\", nrecv, nsent)\n\t}\n\tsession.Close()\n}\n\nfunc TestIsClose(t *testing.T) {\n\tcli, err := net.Dial(\"tcp\", \"127.0.0.1:19999\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsession, _ := Client(cli, nil)\n\tsession.Close()\n\tif session.IsClosed() != true {\n\t\tt.Fatal(\"still open after close\")\n\t}\n}\n\nfunc TestKeepAliveTimeout(t *testing.T) {\n\tln, err := net.Listen(\"tcp\", \"127.0.0.1:29999\")\n\tif err != nil {\n\t\t\/\/ handle error\n\t\tpanic(err)\n\t}\n\tgo func() {\n\t\tln.Accept()\n\t}()\n\n\tcli, err := net.Dial(\"tcp\", \"127.0.0.1:29999\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tconfig := DefaultConfig()\n\tconfig.KeepAliveInterval = 1\n\tconfig.KeepAliveTimeout = 2\n\tsession, _ := Client(cli, config)\n\t<-time.After(3 * time.Second)\n\tif session.IsClosed() != true {\n\t\tt.Fatal(\"keepalive-timeout failed\")\n\t}\n}\n\nfunc TestServerEcho(t *testing.T) {\n\tln, err := net.Listen(\"tcp\", \"127.0.0.1:39999\")\n\tif err != nil {\n\t\t\/\/ handle error\n\t\tpanic(err)\n\t}\n\tgo func() {\n\t\tif conn, err := ln.Accept(); err == nil {\n\t\t\tsession, _ := Server(conn, nil)\n\t\t\tif stream, err := session.OpenStream(); err == nil {\n\t\t\t\tconst N = 100\n\t\t\t\tbuf := make([]byte, 10)\n\t\t\t\tfor i := 0; i < N; i++ {\n\t\t\t\t\tmsg := fmt.Sprintf(\"hello%v\", i)\n\t\t\t\t\tfmt.Println(\"sent:\", msg)\n\t\t\t\t\tstream.Write([]byte(msg))\n\t\t\t\t\tif n, err := stream.Read(buf); err == nil {\n\t\t\t\t\t\tfmt.Println(\"recv:\", string(buf[:n]))\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tstream.Close()\n\t\t\t} else {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t} else {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}()\n\n\tcli, err := net.Dial(\"tcp\", \"127.0.0.1:39999\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif session, err := Client(cli, nil); err == nil {\n\t\tif stream, err := session.AcceptStream(); err == nil {\n\t\t\tbuf := make([]byte, 65536)\n\t\t\tfor {\n\t\t\t\tn, err := stream.Read(buf)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tstream.Write(buf[:n])\n\t\t\t}\n\t\t} else {\n\t\t\tt.Fatal(err)\n\t\t}\n\t} else {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc BenchmarkAcceptClose(b *testing.B) {\n\tcli, err := net.Dial(\"tcp\", \"127.0.0.1:19999\")\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\tsession, _ := Client(cli, nil)\n\tfor i := 0; i < b.N; i++ {\n\t\tif stream, err := session.OpenStream(); err == nil {\n\t\t\tstream.Close()\n\t\t} else {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package github\n\nimport (\n \"os\"\n \"log\"\n \"os\/exec\"\n \"io\/ioutil\"\n \"encoding\/json\"\n \"github.com\/lexteam\/kabosu\/modules\"\n \"github.com\/google\/go-github\/github\"\n \"gopkg.in\/macaron.v1\"\n)\n\nfunc GetWebhook(ctx *macaron.Context) {\n \/\/ Check it was a push event\n if (ctx.Req.Header.Get(\"X-GitHub-Event\") == \"push\") {\n body, _ := ioutil.ReadAll(ctx.Req.Body().ReadCloser())\n\n var res github.PushEvent\n json.Unmarshal(body, &res)\n\n log.Println(*res.Repo.FullName)\n\n if (modules.CONFIG.Section(\"services\").HasKey(*res.Repo.FullName)) {\n log.Println(modules.CONFIG.Section(\"services\").Key(*res.Repo.FullName).String())\n\n cmd := exec.Command(\"git\", \"pull\")\n cmd.Dir = modules.CONFIG.Section(\"services\").Key(*res.Repo.FullName).String()\n cmd.Stdout = os.Stdout\n cmd.Stderr = os.Stderr\n cmd.Run()\n }\n }\n}\n<commit_msg>Execute script after git pull. Adds #1<commit_after>package github\n\nimport (\n \"os\"\n \"log\"\n \"os\/exec\"\n \"io\/ioutil\"\n \"encoding\/json\"\n \"github.com\/lexteam\/kabosu\/modules\"\n \"github.com\/google\/go-github\/github\"\n \"gopkg.in\/macaron.v1\"\n)\n\nfunc GetWebhook(ctx *macaron.Context) {\n \/\/ Check it was a push event\n if (ctx.Req.Header.Get(\"X-GitHub-Event\") == \"push\") {\n body, _ := ioutil.ReadAll(ctx.Req.Body().ReadCloser())\n\n var res github.PushEvent\n json.Unmarshal(body, &res)\n\n log.Println(*res.Repo.FullName)\n\n if (modules.CONFIG.Section(\"services\").HasKey(*res.Repo.FullName)) {\n var dir = modules.CONFIG.Section(\"services\").Key(*res.Repo.FullName).String()\n log.Println(dir)\n\n cmd := exec.Command(\"git\", \"pull\")\n cmd.Dir = dir\n cmd.Stdout = os.Stdout\n cmd.Stderr = os.Stderr\n cmd.Run()\n\n if _, err := os.Stat(dir + \"\/kabosu.sh\"); err == nil {\n cmd := exec.Command(\".\/kabosu.sh\")\n cmd.Dir = dir\n cmd.Stdout = os.Stdout\n cmd.Stderr = os.Stderr\n cmd.Run()\n }\n }\n }\n}\n<|endoftext|>"} {"text":"<commit_before>package shell\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/zetamatta\/go-mbcs\"\n\n\t\"github.com\/zetamatta\/nyagos\/dos\"\n)\n\nfunc readEnv(scan *bufio.Scanner, verbose io.Writer) error {\n\tfor scan.Scan() {\n\t\tline, err := mbcs.AtoU(scan.Bytes())\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tline = strings.TrimSpace(line)\n\t\teqlPos := strings.Index(line, \"=\")\n\t\tif eqlPos > 0 {\n\t\t\tleft := line[:eqlPos]\n\t\t\tright := line[eqlPos+1:]\n\t\t\tif left != \"ERRORLEVEL_\" {\n\t\t\t\torig := os.Getenv(left)\n\t\t\t\tif verbose != nil {\n\t\t\t\t\tfmt.Fprintf(verbose, \"%s=%s\\n\", left, right)\n\t\t\t\t}\n\t\t\t\tif orig != right {\n\t\t\t\t\t\/\/ fmt.Fprintf(os.Stderr, \"%s:=%s\\n\", left, right)\n\t\t\t\t\tos.Setenv(left, right)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn scan.Err()\n}\n\nfunc readPwd(scan *bufio.Scanner, verbose io.Writer) error {\n\tif !scan.Scan() {\n\t\treturn errors.New(\"Could not load the new current directory\")\n\t}\n\tif err := scan.Err(); err != nil {\n\t\treturn err\n\t}\n\tline, err := mbcs.AtoU(scan.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tline = strings.TrimSpace(line)\n\tif verbose != nil {\n\t\tfmt.Fprintf(verbose, \"cd \\\"%s\\\"\\n\", line)\n\t}\n\tos.Chdir(line)\n\treturn nil\n}\n\n\/\/ loadTmpFile - read update the current-directory and environment-variables from tmp-file.\nfunc loadTmpFile(fname string, verbose io.Writer) error {\n\tfp, err := os.Open(fname)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer fp.Close()\n\n\tscan := bufio.NewScanner(fp)\n\tif err := readPwd(scan, verbose); err != nil {\n\t\treturn err\n\t}\n\treturn readEnv(scan, verbose)\n}\n\nfunc callBatch(batch string,\n\targs []string,\n\ttmpfile string,\n\tverbose io.Writer,\n\tstdin io.Reader,\n\tstdout io.Writer,\n\tstderr io.Writer) (int, error) {\n\tparams := []string{\n\t\tos.Getenv(\"COMSPEC\"),\n\t\t\"\/C\",\n\t\tbatch,\n\t}\n\tfd, err := os.Create(batch)\n\tif err != nil {\n\t\treturn 1, err\n\t}\n\tvar writer *bufio.Writer\n\tif verbose != nil {\n\t\twriter = bufio.NewWriter(io.MultiWriter(fd, verbose))\n\t} else {\n\t\twriter = bufio.NewWriter(fd)\n\t}\n\tfmt.Fprint(writer, \"@call\")\n\tfor _, arg1 := range args {\n\t\t\/\/ UTF8 parameter to ANSI\n\t\tansi, err := mbcs.UtoA(arg1)\n\t\tif err != nil {\n\t\t\t\/\/ println(\"utoa: \" + err.Error())\n\t\t\tfd.Close()\n\t\t\treturn -1, err\n\t\t}\n\t\t\/\/ chop last '\\0'\n\t\tif ansi[len(ansi)-1] == 0 {\n\t\t\tansi = ansi[:len(ansi)-1]\n\t\t}\n\t\tfmt.Fprintf(writer, \" %s\", ansi)\n\t}\n\tfmt.Fprintf(writer, \"\\n@set \\\"ERRORLEVEL_=%%ERRORLEVEL%%\\\"\\n\")\n\tfmt.Fprintf(writer, \"@(cd & set) > \\\"%s\\\"\\n\", tmpfile)\n\tfmt.Fprintf(writer, \"@exit \/b \\\"%%ERRORLEVEL_%%\\\"\\n\")\n\twriter.Flush()\n\tif err := fd.Close(); err != nil {\n\t\treturn 1, err\n\t}\n\tcmd := exec.Cmd{\n\t\tPath: params[0],\n\t\tArgs: params,\n\t\tStdin: stdin,\n\t\tStdout: stdout,\n\t\tStderr: stderr,\n\t}\n\tif err := cmd.Run(); err != nil {\n\t\treturn 1, err\n\t}\n\terrorlevel, errorlevelOk := dos.GetErrorLevel(&cmd)\n\tif !errorlevelOk {\n\t\terrorlevel = 255\n\t}\n\treturn errorlevel, nil\n}\n\nfunc RawSource(args []string, verbose io.Writer, debug bool, stdin io.Reader, stdout io.Writer, stderr io.Writer) (int, error) {\n\ttempDir := os.TempDir()\n\tpid := os.Getpid()\n\tbatch := filepath.Join(tempDir, fmt.Sprintf(\"nyagos-%d.cmd\", pid))\n\ttmpfile := filepath.Join(tempDir, fmt.Sprintf(\"nyagos-%d.tmp\", pid))\n\n\terrorlevel, err := callBatch(\n\t\tbatch,\n\t\targs,\n\t\ttmpfile,\n\t\tverbose,\n\t\tstdin,\n\t\tstdout,\n\t\tstderr)\n\n\tif err != nil {\n\t\treturn errorlevel, err\n\t}\n\n\tif !debug {\n\t\tdefer os.Remove(tmpfile)\n\t\tdefer os.Remove(batch)\n\t}\n\n\tif err := loadTmpFile(tmpfile, verbose); err != nil {\n\t\treturn 1, err\n\t}\n\n\treturn errorlevel, err\n}\n<commit_msg>Fix #297 On 4.2.5_beta, running the batchfile includes `exit` without `\/b` option, an error occurs<commit_after>package shell\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/zetamatta\/go-mbcs\"\n\n\t\"github.com\/zetamatta\/nyagos\/dos\"\n)\n\nfunc readEnv(scan *bufio.Scanner, verbose io.Writer) error {\n\tfor scan.Scan() {\n\t\tline, err := mbcs.AtoU(scan.Bytes())\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tline = strings.TrimSpace(line)\n\t\teqlPos := strings.Index(line, \"=\")\n\t\tif eqlPos > 0 {\n\t\t\tleft := line[:eqlPos]\n\t\t\tright := line[eqlPos+1:]\n\t\t\tif left != \"ERRORLEVEL_\" {\n\t\t\t\torig := os.Getenv(left)\n\t\t\t\tif verbose != nil {\n\t\t\t\t\tfmt.Fprintf(verbose, \"%s=%s\\n\", left, right)\n\t\t\t\t}\n\t\t\t\tif orig != right {\n\t\t\t\t\t\/\/ fmt.Fprintf(os.Stderr, \"%s:=%s\\n\", left, right)\n\t\t\t\t\tos.Setenv(left, right)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn scan.Err()\n}\n\nfunc readPwd(scan *bufio.Scanner, verbose io.Writer) error {\n\tif !scan.Scan() {\n\t\treturn errors.New(\"Could not load the new current directory\")\n\t}\n\tif err := scan.Err(); err != nil {\n\t\treturn err\n\t}\n\tline, err := mbcs.AtoU(scan.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tline = strings.TrimSpace(line)\n\tif verbose != nil {\n\t\tfmt.Fprintf(verbose, \"cd \\\"%s\\\"\\n\", line)\n\t}\n\tos.Chdir(line)\n\treturn nil\n}\n\n\/\/ loadTmpFile - read update the current-directory and environment-variables from tmp-file.\nfunc loadTmpFile(fname string, verbose io.Writer) error {\n\tfp, err := os.Open(fname)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer fp.Close()\n\n\tscan := bufio.NewScanner(fp)\n\tif err := readPwd(scan, verbose); err != nil {\n\t\treturn err\n\t}\n\treturn readEnv(scan, verbose)\n}\n\nfunc callBatch(batch string,\n\targs []string,\n\ttmpfile string,\n\tverbose io.Writer,\n\tstdin io.Reader,\n\tstdout io.Writer,\n\tstderr io.Writer) (int, error) {\n\tparams := []string{\n\t\tos.Getenv(\"COMSPEC\"),\n\t\t\"\/C\",\n\t\tbatch,\n\t}\n\tfd, err := os.Create(batch)\n\tif err != nil {\n\t\treturn 1, err\n\t}\n\tvar writer *bufio.Writer\n\tif verbose != nil {\n\t\twriter = bufio.NewWriter(io.MultiWriter(fd, verbose))\n\t} else {\n\t\twriter = bufio.NewWriter(fd)\n\t}\n\tfmt.Fprint(writer, \"@call\")\n\tfor _, arg1 := range args {\n\t\t\/\/ UTF8 parameter to ANSI\n\t\tansi, err := mbcs.UtoA(arg1)\n\t\tif err != nil {\n\t\t\t\/\/ println(\"utoa: \" + err.Error())\n\t\t\tfd.Close()\n\t\t\treturn -1, err\n\t\t}\n\t\t\/\/ chop last '\\0'\n\t\tif ansi[len(ansi)-1] == 0 {\n\t\t\tansi = ansi[:len(ansi)-1]\n\t\t}\n\t\tfmt.Fprintf(writer, \" %s\", ansi)\n\t}\n\tfmt.Fprintf(writer, \"\\n@set \\\"ERRORLEVEL_=%%ERRORLEVEL%%\\\"\\n\")\n\tfmt.Fprintf(writer, \"@(cd & set) > \\\"%s\\\"\\n\", tmpfile)\n\tfmt.Fprintf(writer, \"@exit \/b \\\"%%ERRORLEVEL_%%\\\"\\n\")\n\twriter.Flush()\n\tif err := fd.Close(); err != nil {\n\t\treturn 1, err\n\t}\n\tcmd := exec.Cmd{\n\t\tPath: params[0],\n\t\tArgs: params,\n\t\tStdin: stdin,\n\t\tStdout: stdout,\n\t\tStderr: stderr,\n\t}\n\tif err := cmd.Run(); err != nil {\n\t\treturn 1, err\n\t}\n\terrorlevel, errorlevelOk := dos.GetErrorLevel(&cmd)\n\tif !errorlevelOk {\n\t\terrorlevel = 255\n\t}\n\treturn errorlevel, nil\n}\n\nfunc RawSource(args []string, verbose io.Writer, debug bool, stdin io.Reader, stdout io.Writer, stderr io.Writer) (int, error) {\n\ttempDir := os.TempDir()\n\tpid := os.Getpid()\n\tbatch := filepath.Join(tempDir, fmt.Sprintf(\"nyagos-%d.cmd\", pid))\n\ttmpfile := filepath.Join(tempDir, fmt.Sprintf(\"nyagos-%d.tmp\", pid))\n\n\terrorlevel, err := callBatch(\n\t\tbatch,\n\t\targs,\n\t\ttmpfile,\n\t\tverbose,\n\t\tstdin,\n\t\tstdout,\n\t\tstderr)\n\n\tif err != nil {\n\t\treturn errorlevel, err\n\t}\n\n\tif !debug {\n\t\tdefer os.Remove(tmpfile)\n\t\tdefer os.Remove(batch)\n\t}\n\n\tif err := loadTmpFile(tmpfile, verbose); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn 1, fmt.Errorf(\"%s: the batch file may use `exit` without `\/b` option. Could not find the change of the environment variables\", args[0])\n\t\t}\n\t\treturn 1, err\n\t}\n\n\treturn errorlevel, err\n}\n<|endoftext|>"} {"text":"<commit_before>package caddytls\n\nimport (\n\t\"bytes\"\n\t\"crypto\"\n\t\"crypto\/ecdsa\"\n\t\"crypto\/elliptic\"\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestSaveAndLoadRSAPrivateKey(t *testing.T) {\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 128) \/\/ make tests faster; small key size OK for testing\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ test save\n\tsavedBytes, err := savePrivateKey(privateKey)\n\tif err != nil {\n\t\tt.Fatal(\"error saving private key:\", err)\n\t}\n\n\t\/\/ test load\n\tloadedKey, err := loadPrivateKey(savedBytes)\n\tif err != nil {\n\t\tt.Error(\"error loading private key:\", err)\n\t}\n\n\t\/\/ verify loaded key is correct\n\tif !PrivateKeysSame(privateKey, loadedKey) {\n\t\tt.Error(\"Expected key bytes to be the same, but they weren't\")\n\t}\n}\n\nfunc TestSaveAndLoadECCPrivateKey(t *testing.T) {\n\tprivateKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ test save\n\tsavedBytes, err := savePrivateKey(privateKey)\n\tif err != nil {\n\t\tt.Fatal(\"error saving private key:\", err)\n\t}\n\n\t\/\/ test load\n\tloadedKey, err := loadPrivateKey(savedBytes)\n\tif err != nil {\n\t\tt.Error(\"error loading private key:\", err)\n\t}\n\n\t\/\/ verify loaded key is correct\n\tif !PrivateKeysSame(privateKey, loadedKey) {\n\t\tt.Error(\"Expected key bytes to be the same, but they weren't\")\n\t}\n}\n\n\/\/ PrivateKeysSame compares the bytes of a and b and returns true if they are the same.\nfunc PrivateKeysSame(a, b crypto.PrivateKey) bool {\n\treturn bytes.Equal(PrivateKeyBytes(a), PrivateKeyBytes(b))\n}\n\n\/\/ PrivateKeyBytes returns the bytes of DER-encoded key.\nfunc PrivateKeyBytes(key crypto.PrivateKey) []byte {\n\tvar keyBytes []byte\n\tswitch key := key.(type) {\n\tcase *rsa.PrivateKey:\n\t\tkeyBytes = x509.MarshalPKCS1PrivateKey(key)\n\tcase *ecdsa.PrivateKey:\n\t\tkeyBytes, _ = x509.MarshalECPrivateKey(key)\n\t}\n\treturn keyBytes\n}\n\nfunc TestStandaloneTLSTicketKeyRotation(t *testing.T) {\n\ttlsGovChan := make(chan struct{})\n\tdefer close(tlsGovChan)\n\tcallSync := make(chan bool, 1)\n\tdefer close(callSync)\n\n\toldHook := setSessionTicketKeysTestHook\n\tdefer func() {\n\t\tsetSessionTicketKeysTestHook = oldHook\n\t}()\n\tvar keysInUse [][32]byte\n\tsetSessionTicketKeysTestHook = func(keys [][32]byte) [][32]byte {\n\t\tkeysInUse = keys\n\t\tcallSync <- true\n\t\treturn keys\n\t}\n\n\tc := new(tls.Config)\n\ttimer := time.NewTicker(time.Millisecond * 1)\n\n\tgo standaloneTLSTicketKeyRotation(c, timer, tlsGovChan)\n\n\trounds := 0\n\tvar lastTicketKey [32]byte\n\tfor {\n\t\tselect {\n\t\tcase <-callSync:\n\t\t\tif lastTicketKey == keysInUse[0] {\n\t\t\t\tclose(tlsGovChan)\n\t\t\t\tt.Errorf(\"The same TLS ticket key has been used again (not rotated): %x.\", lastTicketKey)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlastTicketKey = keysInUse[0]\n\t\t\trounds++\n\t\t\tif rounds <= NumTickets && len(keysInUse) != rounds {\n\t\t\t\tclose(tlsGovChan)\n\t\t\t\tt.Errorf(\"Expected TLS ticket keys in use: %d; Got instead: %d.\", rounds, len(keysInUse))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif c.SessionTicketsDisabled == true {\n\t\t\t\tt.Error(\"Session tickets have been disabled unexpectedly.\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif rounds >= NumTickets+1 {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-time.After(time.Second * 1):\n\t\t\tt.Errorf(\"Timeout after %d rounds.\", rounds)\n\t\t\treturn\n\t\t}\n\t}\n}\n<commit_msg>tls: fix TestStandaloneTLSTicketKeyRotation data race<commit_after>package caddytls\n\nimport (\n\t\"bytes\"\n\t\"crypto\"\n\t\"crypto\/ecdsa\"\n\t\"crypto\/elliptic\"\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestSaveAndLoadRSAPrivateKey(t *testing.T) {\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 128) \/\/ make tests faster; small key size OK for testing\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ test save\n\tsavedBytes, err := savePrivateKey(privateKey)\n\tif err != nil {\n\t\tt.Fatal(\"error saving private key:\", err)\n\t}\n\n\t\/\/ test load\n\tloadedKey, err := loadPrivateKey(savedBytes)\n\tif err != nil {\n\t\tt.Error(\"error loading private key:\", err)\n\t}\n\n\t\/\/ verify loaded key is correct\n\tif !PrivateKeysSame(privateKey, loadedKey) {\n\t\tt.Error(\"Expected key bytes to be the same, but they weren't\")\n\t}\n}\n\nfunc TestSaveAndLoadECCPrivateKey(t *testing.T) {\n\tprivateKey, err := ecdsa.GenerateKey(elliptic.P384(), rand.Reader)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ test save\n\tsavedBytes, err := savePrivateKey(privateKey)\n\tif err != nil {\n\t\tt.Fatal(\"error saving private key:\", err)\n\t}\n\n\t\/\/ test load\n\tloadedKey, err := loadPrivateKey(savedBytes)\n\tif err != nil {\n\t\tt.Error(\"error loading private key:\", err)\n\t}\n\n\t\/\/ verify loaded key is correct\n\tif !PrivateKeysSame(privateKey, loadedKey) {\n\t\tt.Error(\"Expected key bytes to be the same, but they weren't\")\n\t}\n}\n\n\/\/ PrivateKeysSame compares the bytes of a and b and returns true if they are the same.\nfunc PrivateKeysSame(a, b crypto.PrivateKey) bool {\n\treturn bytes.Equal(PrivateKeyBytes(a), PrivateKeyBytes(b))\n}\n\n\/\/ PrivateKeyBytes returns the bytes of DER-encoded key.\nfunc PrivateKeyBytes(key crypto.PrivateKey) []byte {\n\tvar keyBytes []byte\n\tswitch key := key.(type) {\n\tcase *rsa.PrivateKey:\n\t\tkeyBytes = x509.MarshalPKCS1PrivateKey(key)\n\tcase *ecdsa.PrivateKey:\n\t\tkeyBytes, _ = x509.MarshalECPrivateKey(key)\n\t}\n\treturn keyBytes\n}\n\nfunc TestStandaloneTLSTicketKeyRotation(t *testing.T) {\n\ttype syncPkt struct {\n\t\tticketKey [32]byte\n\t\tkeysInUse int\n\t}\n\n\ttlsGovChan := make(chan struct{})\n\tdefer close(tlsGovChan)\n\tcallSync := make(chan *syncPkt, 1)\n\tdefer close(callSync)\n\n\toldHook := setSessionTicketKeysTestHook\n\tdefer func() {\n\t\tsetSessionTicketKeysTestHook = oldHook\n\t}()\n\tsetSessionTicketKeysTestHook = func(keys [][32]byte) [][32]byte {\n\t\tcallSync <- &syncPkt{keys[0], len(keys)}\n\t\treturn keys\n\t}\n\n\tc := new(tls.Config)\n\ttimer := time.NewTicker(time.Millisecond * 1)\n\n\tgo standaloneTLSTicketKeyRotation(c, timer, tlsGovChan)\n\n\trounds := 0\n\tvar lastTicketKey [32]byte\n\tfor {\n\t\tselect {\n\t\tcase pkt := <-callSync:\n\t\t\tif lastTicketKey == pkt.ticketKey {\n\t\t\t\tclose(tlsGovChan)\n\t\t\t\tt.Errorf(\"The same TLS ticket key has been used again (not rotated): %x.\", lastTicketKey)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlastTicketKey = pkt.ticketKey\n\t\t\trounds++\n\t\t\tif rounds <= NumTickets && pkt.keysInUse != rounds {\n\t\t\t\tclose(tlsGovChan)\n\t\t\t\tt.Errorf(\"Expected TLS ticket keys in use: %d; Got instead: %d.\", rounds, pkt.keysInUse)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif c.SessionTicketsDisabled == true {\n\t\t\t\tt.Error(\"Session tickets have been disabled unexpectedly.\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif rounds >= NumTickets+1 {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-time.After(time.Second * 1):\n\t\t\tt.Errorf(\"Timeout after %d rounds.\", rounds)\n\t\t\treturn\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package wsyscall\n\nimport \"syscall\"\n\nfunc convertStat(dest *Stat_t, source *syscall.Stat_t) {\n\tdest.Dev = uint64(source.Dev)\n\tdest.Ino = source.Ino\n\tdest.Nlink = uint64(source.Nlink)\n\tdest.Mode = uint32(source.Mode)\n\tdest.Uid = source.Uid\n\tdest.Gid = source.Gid\n\tdest.Rdev = uint64(source.Rdev)\n\tdest.Size = source.Size\n\tdest.Blksize = int64(source.Blksize)\n\tdest.Blocks = source.Blocks\n\tdest.Atim = source.Atimespec\n\tdest.Mtim = source.Mtimespec\n\tdest.Ctim = source.Ctimespec\n}\n\nfunc fallocate(fd int, mode uint32, off int64, len int64) error {\n\treturn syscall.ENOTSUP\n}\n\nfunc mount(source string, target string, fstype string, flags uintptr,\n\tdata string) error {\n\treturn syscall.ENOTSUP\n}\n\nfunc getrusage(who int, rusage *Rusage) error {\n\tswitch who {\n\tcase RUSAGE_CHILDREN:\n\t\twho = syscall.RUSAGE_CHILDREN\n\tcase RUSAGE_SELF:\n\t\twho = syscall.RUSAGE_SELF\n\tdefault:\n\t\treturn syscall.ENOTSUP\n\t}\n\tvar syscallRusage syscall.Rusage\n\tif err := syscall.Getrusage(who, &syscallRusage); err != nil {\n\t\treturn err\n\t}\n\trusage.Utime.Sec = int64(syscallRusage.Utime.Sec)\n\trusage.Utime.Usec = int64(syscallRusage.Utime.Usec)\n\trusage.Stime.Sec = int64(syscallRusage.Stime.Sec)\n\trusage.Stime.Usec = int64(syscallRusage.Stime.Usec)\n\trusage.Maxrss = int64(syscallRusage.Maxrss) >> 10\n\trusage.Ixrss = int64(syscallRusage.Ixrss) >> 10\n\trusage.Idrss = int64(syscallRusage.Idrss) >> 10\n\trusage.Minflt = int64(syscallRusage.Minflt)\n\trusage.Majflt = int64(syscallRusage.Majflt)\n\trusage.Nswap = int64(syscallRusage.Nswap)\n\trusage.Inblock = int64(syscallRusage.Inblock)\n\trusage.Oublock = int64(syscallRusage.Oublock)\n\trusage.Msgsnd = int64(syscallRusage.Msgsnd)\n\trusage.Msgrcv = int64(syscallRusage.Msgrcv)\n\trusage.Nsignals = int64(syscallRusage.Nsignals)\n\trusage.Nvcsw = int64(syscallRusage.Nvcsw)\n\trusage.Nivcsw = int64(syscallRusage.Nivcsw)\n\treturn nil\n}\n\nfunc setAllGid(gid int) error {\n\treturn syscall.Setregid(gid, gid)\n}\n\nfunc setAllUid(uid int) error {\n\treturn syscall.Setreuid(uid, uid)\n}\n\nfunc unshareMountNamespace() error {\n\treturn syscall.ENOTSUP\n}\n<commit_msg>Add lib\/wsyscall.{Set,Unshare}NetNamespace() stubs for Darwin.<commit_after>package wsyscall\n\nimport \"syscall\"\n\nfunc convertStat(dest *Stat_t, source *syscall.Stat_t) {\n\tdest.Dev = uint64(source.Dev)\n\tdest.Ino = source.Ino\n\tdest.Nlink = uint64(source.Nlink)\n\tdest.Mode = uint32(source.Mode)\n\tdest.Uid = source.Uid\n\tdest.Gid = source.Gid\n\tdest.Rdev = uint64(source.Rdev)\n\tdest.Size = source.Size\n\tdest.Blksize = int64(source.Blksize)\n\tdest.Blocks = source.Blocks\n\tdest.Atim = source.Atimespec\n\tdest.Mtim = source.Mtimespec\n\tdest.Ctim = source.Ctimespec\n}\n\nfunc fallocate(fd int, mode uint32, off int64, len int64) error {\n\treturn syscall.ENOTSUP\n}\n\nfunc mount(source string, target string, fstype string, flags uintptr,\n\tdata string) error {\n\treturn syscall.ENOTSUP\n}\n\nfunc getrusage(who int, rusage *Rusage) error {\n\tswitch who {\n\tcase RUSAGE_CHILDREN:\n\t\twho = syscall.RUSAGE_CHILDREN\n\tcase RUSAGE_SELF:\n\t\twho = syscall.RUSAGE_SELF\n\tdefault:\n\t\treturn syscall.ENOTSUP\n\t}\n\tvar syscallRusage syscall.Rusage\n\tif err := syscall.Getrusage(who, &syscallRusage); err != nil {\n\t\treturn err\n\t}\n\trusage.Utime.Sec = int64(syscallRusage.Utime.Sec)\n\trusage.Utime.Usec = int64(syscallRusage.Utime.Usec)\n\trusage.Stime.Sec = int64(syscallRusage.Stime.Sec)\n\trusage.Stime.Usec = int64(syscallRusage.Stime.Usec)\n\trusage.Maxrss = int64(syscallRusage.Maxrss) >> 10\n\trusage.Ixrss = int64(syscallRusage.Ixrss) >> 10\n\trusage.Idrss = int64(syscallRusage.Idrss) >> 10\n\trusage.Minflt = int64(syscallRusage.Minflt)\n\trusage.Majflt = int64(syscallRusage.Majflt)\n\trusage.Nswap = int64(syscallRusage.Nswap)\n\trusage.Inblock = int64(syscallRusage.Inblock)\n\trusage.Oublock = int64(syscallRusage.Oublock)\n\trusage.Msgsnd = int64(syscallRusage.Msgsnd)\n\trusage.Msgrcv = int64(syscallRusage.Msgrcv)\n\trusage.Nsignals = int64(syscallRusage.Nsignals)\n\trusage.Nvcsw = int64(syscallRusage.Nvcsw)\n\trusage.Nivcsw = int64(syscallRusage.Nivcsw)\n\treturn nil\n}\n\nfunc setAllGid(gid int) error {\n\treturn syscall.Setregid(gid, gid)\n}\n\nfunc setAllUid(uid int) error {\n\treturn syscall.Setreuid(uid, uid)\n}\n\nfunc setNetNamespace(namespaceFd int) error {\n\treturn syscall.ENOTSUP\n}\n\nfunc unshareNetNamespace() (int, int, error) {\n\treturn -1, -1, syscall.ENOTSUP\n}\n\nfunc unshareMountNamespace() error {\n\treturn syscall.ENOTSUP\n}\n<|endoftext|>"} {"text":"<commit_before>package meters\n\nimport (\n\t\"github.com\/rackspace\/gophercloud\"\n)\n\n\/\/ ListOptsBuilder allows extensions to add additional parameters to the\n\/\/ List request.\ntype ListOptsBuilder interface {\n\tToMeterListQuery() (string, error)\n}\n\n\/\/ ListOpts allows the filtering and sorting of collections through\n\/\/ the API. Filtering is achieved by passing in struct field values that map to\n\/\/ the server attributes you want to see returned.\ntype ListOpts struct {\n}\n\n\/\/ ToMeterListQuery formats a ListOpts into a query string.\nfunc (opts ListOpts) ToMeterListQuery() (string, error) {\n\tq, err := gophercloud.BuildQueryString(opts)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn q.String(), nil\n}\n\n\/\/ List makes a request against the API to list meters accessible to you.\nfunc List(client *gophercloud.ServiceClient, opts ListOptsBuilder) listResult {\n\tvar res listResult\n\turl := listURL(client)\n\n\tif opts != nil {\n\t\tquery, err := opts.ToMeterListQuery()\n\t\tif err != nil {\n\t\t\tres.Err = err\n\t\t\treturn res\n\t\t}\n\t\turl += query\n\t}\n\n\t_, res.Err = client.Get(url, &res.Body, &gophercloud.RequestOpts{})\n\treturn res\n}\n\n\/\/OptsKind describes the mode with which a given set of opts should be tranferred\ntype OptsKind string\n\nvar (\n\t\/\/BodyContentOpts is a kind of option serialization. The MeterStatisticsOptsBuilder is expected\n\t\/\/to emit JSON from ToMeterStatisticsQuery()\n\tBodyContentOpts = OptsKind(\"Body\")\n\t\/\/QueryOpts is a kind of option serialization. The MeterStatisticsOptsBuilder is expected\n\t\/\/to emit uri encoded fields from ToMeterStatisticsQuery()\n\tQueryOpts = OptsKind(\"Query\")\n)\n\n\/\/ MeterStatisticsOptsBuilder allows extensions to add additional parameters to the\n\/\/ List request.\ntype MeterStatisticsOptsBuilder interface {\n\tKind() OptsKind\n\tToMeterStatisticsQuery() (string, error)\n}\n\n\/\/ MeterStatisticsOpts allows the filtering and sorting of collections through\n\/\/ the API. Filtering is achieved by passing in struct field values that map to\n\/\/ the server attributes you want to see returned.\ntype MeterStatisticsOpts struct {\n\tQueryField string `q:\"q.field\"`\n\tQueryOp string `q:\"q.op\"`\n\tQueryValue string `q:\"q.value\"`\n\n\t\/\/ Optional group by\n\tGroupBy string `q:\"groupby\"`\n\n\t\/\/ Optional number of seconds in a period\n\tPeriod int `q:\"period\"`\n}\n\n\/\/ Kind returns QueryOpts by default for MeterStatisticsOpts\nfunc (opts MeterStatisticsOpts) Kind() OptsKind {\n\treturn QueryOpts\n}\n\n\/\/ ToMeterStatisticsQuery formats a StatisticsOpts into a query string.\nfunc (opts MeterStatisticsOpts) ToMeterStatisticsQuery() (string, error) {\n\tq, err := gophercloud.BuildQueryString(opts)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn q.String(), nil\n}\n\n\/\/MeterStatistics gathers statistics based on filters, groups, and period options\nfunc MeterStatistics(client *gophercloud.ServiceClient, n string, optsBuilder MeterStatisticsOptsBuilder) statisticsResult {\n\tvar (\n\t\tres statisticsResult\n\t\turl = statisticsURL(client, n)\n\t\topts gophercloud.RequestOpts\n\t\terr error\n\t)\n\n\tif optsBuilder != nil && optsBuilder.Kind() == QueryOpts {\n\t\tquery, err := optsBuilder.ToMeterStatisticsQuery()\n\t\tif err != nil {\n\t\t\tres.Err = err\n\t\t\treturn res\n\t\t}\n\t\turl += query\n\t} else if optsBuilder != nil && optsBuilder.Kind() == BodyContentOpts {\n\t\topts.JSONBody, err = optsBuilder.ToMeterStatisticsQuery()\n\t\tif err != nil {\n\t\t\tres.Err = err\n\t\t\treturn res\n\t\t}\n\t}\n\n\t_, res.Err = client.Get(url, &res.Body, &opts)\n\treturn res\n}\n<commit_msg>style(requests): follow comment conventions on gophercloud<commit_after>package meters\n\nimport (\n\t\"github.com\/rackspace\/gophercloud\"\n)\n\n\/\/ ListOptsBuilder allows extensions to add additional parameters to the\n\/\/ List request.\ntype ListOptsBuilder interface {\n\tToMeterListQuery() (string, error)\n}\n\n\/\/ ListOpts allows the filtering and sorting of collections through\n\/\/ the API. Filtering is achieved by passing in struct field values that map to\n\/\/ the server attributes you want to see returned.\ntype ListOpts struct {\n}\n\n\/\/ ToMeterListQuery formats a ListOpts into a query string.\nfunc (opts ListOpts) ToMeterListQuery() (string, error) {\n\tq, err := gophercloud.BuildQueryString(opts)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn q.String(), nil\n}\n\n\/\/ List makes a request against the API to list meters accessible to you.\nfunc List(client *gophercloud.ServiceClient, opts ListOptsBuilder) listResult {\n\tvar res listResult\n\turl := listURL(client)\n\n\tif opts != nil {\n\t\tquery, err := opts.ToMeterListQuery()\n\t\tif err != nil {\n\t\t\tres.Err = err\n\t\t\treturn res\n\t\t}\n\t\turl += query\n\t}\n\n\t_, res.Err = client.Get(url, &res.Body, &gophercloud.RequestOpts{})\n\treturn res\n}\n\n\/\/ OptsKind describes the mode with which a given set of opts should be tranferred\ntype OptsKind string\n\nvar (\n\t\/\/BodyContentOpts is a kind of option serialization. The MeterStatisticsOptsBuilder is expected\n\t\/\/to emit JSON from ToMeterStatisticsQuery()\n\tBodyContentOpts = OptsKind(\"Body\")\n\t\/\/QueryOpts is a kind of option serialization. The MeterStatisticsOptsBuilder is expected\n\t\/\/to emit uri encoded fields from ToMeterStatisticsQuery()\n\tQueryOpts = OptsKind(\"Query\")\n)\n\n\/\/ MeterStatisticsOptsBuilder allows extensions to add additional parameters to the\n\/\/ List request.\ntype MeterStatisticsOptsBuilder interface {\n\tKind() OptsKind\n\tToMeterStatisticsQuery() (string, error)\n}\n\n\/\/ MeterStatisticsOpts allows the filtering and sorting of collections through\n\/\/ the API. Filtering is achieved by passing in struct field values that map to\n\/\/ the server attributes you want to see returned.\ntype MeterStatisticsOpts struct {\n\tQueryField string `q:\"q.field\"`\n\tQueryOp string `q:\"q.op\"`\n\tQueryValue string `q:\"q.value\"`\n\n\t\/\/ Optional group by\n\tGroupBy string `q:\"groupby\"`\n\n\t\/\/ Optional number of seconds in a period\n\tPeriod int `q:\"period\"`\n}\n\n\/\/ Kind returns QueryOpts by default for MeterStatisticsOpts\nfunc (opts MeterStatisticsOpts) Kind() OptsKind {\n\treturn QueryOpts\n}\n\n\/\/ ToMeterStatisticsQuery formats a StatisticsOpts into a query string.\nfunc (opts MeterStatisticsOpts) ToMeterStatisticsQuery() (string, error) {\n\tq, err := gophercloud.BuildQueryString(opts)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn q.String(), nil\n}\n\n\/\/ MeterStatistics gathers statistics based on filters, groups, and period options\nfunc MeterStatistics(client *gophercloud.ServiceClient, n string, optsBuilder MeterStatisticsOptsBuilder) statisticsResult {\n\tvar (\n\t\tres statisticsResult\n\t\turl = statisticsURL(client, n)\n\t\topts gophercloud.RequestOpts\n\t\terr error\n\t)\n\n\tif optsBuilder != nil && optsBuilder.Kind() == QueryOpts {\n\t\tquery, err := optsBuilder.ToMeterStatisticsQuery()\n\t\tif err != nil {\n\t\t\tres.Err = err\n\t\t\treturn res\n\t\t}\n\t\turl += query\n\t} else if optsBuilder != nil && optsBuilder.Kind() == BodyContentOpts {\n\t\topts.JSONBody, err = optsBuilder.ToMeterStatisticsQuery()\n\t\tif err != nil {\n\t\t\tres.Err = err\n\t\t\treturn res\n\t\t}\n\t}\n\n\t_, res.Err = client.Get(url, &res.Body, &opts)\n\treturn res\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2018 Banzai Cloud\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage v1alpha1\n\nimport (\n\t\"encoding\/json\"\n\t\"reflect\"\n\n\t\"github.com\/spf13\/cast\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\n\/\/ +genclient\n\/\/ +genclient:noStatus\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\/\/ +k8s:openapi-gen=true\n\n\/\/ VaultList represents a list of Vault Kubernetes objects\ntype VaultList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata\"`\n\tItems []Vault `json:\"items\"`\n}\n\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\n\/\/ Vault represents a Vault Kubernetes object\ntype Vault struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\t\/\/ Standard object's metadata.\n\t\/\/ More info: https:\/\/git.k8s.io\/community\/contributors\/devel\/api-conventions.md#metadata\n\t\/\/ +optional\n\tmetav1.ObjectMeta `json:\"metadata,omitempty\" protobuf:\"bytes,1,opt,name=metadata\"`\n\tSpec VaultSpec `json:\"spec\"`\n\tStatus VaultStatus `json:\"status,omitempty\"`\n}\n\n\/\/ VaultSpec represents the Spec field of a Vault Kubernetes object\ntype VaultSpec struct {\n\tSize int32 `json:\"size\"`\n\tImage string `json:\"image\"`\n\tBankVaultsImage string `json:\"bankVaultsImage\"`\n\tStatsDImage string `json:\"statsdImage\"`\n\tAnnotations map[string]string `json:\"annotations\"`\n\tConfig map[string]interface{} `json:\"config\"`\n\tExternalConfig map[string]interface{} `json:\"externalConfig\"`\n\tUnsealConfig UnsealConfig `json:\"unsealConfig\"`\n\tCredentialsConfig CredentialsConfig `json:\"credentialsConfig\"`\n\t\/\/ This option gives us the option to workaround current StatefulSet limitations around updates\n\t\/\/ See: https:\/\/github.com\/kubernetes\/kubernetes\/issues\/67250\n\t\/\/ TODO: Should be removed once the ParallelPodManagement policy supports the broken update.\n\tSupportUpgrade bool `json:\"supportUpgrade\"`\n}\n\n\/\/ HAStorageTypes is the set of storage backends supporting High Availability\nvar HAStorageTypes = map[string]bool{\n\t\"consul\": true,\n\t\"dynamodb\": true,\n\t\"etcd\": true,\n\t\"gcs\": true,\n\t\"spanner\": true,\n\t\"zookeeper\": true,\n}\n\n\/\/ HasHAStorage detects if Vault is configured to use a storage backend which supports High Availability\nfunc (spec *VaultSpec) HasHAStorage() bool {\n\tstorageType := spec.GetStorageType()\n\tif _, ok := HAStorageTypes[storageType]; ok {\n\t\treturn spec.HasStorageHAEnabled()\n\t}\n\treturn false\n}\n\n\/\/ GetStorage returns Vault's storage stanza\nfunc (spec *VaultSpec) GetStorage() map[string]interface{} {\n\tstorage := spec.getStorage()\n\treturn cast.ToStringMap(storage[spec.GetStorageType()])\n}\n\nfunc (spec *VaultSpec) getStorage() map[string]interface{} {\n\treturn cast.ToStringMap(spec.Config[\"storage\"])\n}\n\n\/\/ GetStorageType returns the type of Vault's storage stanza\nfunc (spec *VaultSpec) GetStorageType() string {\n\tstorage := spec.getStorage()\n\treturn reflect.ValueOf(storage).MapKeys()[0].String()\n}\n\n\/\/ HasStorageHAEnabled detects if the ha_enabled field is set to true in Vault's storage stanza\nfunc (spec *VaultSpec) HasStorageHAEnabled() bool {\n\tstorageType := spec.GetStorageType()\n\tstorage := spec.getStorage()\n\tstorageSpecs := cast.ToStringMap(storage[storageType])\n\t\/\/ In Consul HA is always enabled\n\treturn storageType == \"consul\" || cast.ToBool(storageSpecs[\"ha_enabled\"])\n}\n\n\/\/ GetTLSDisable returns if Vault's TLS is disabled\nfunc (spec *VaultSpec) GetTLSDisable() bool {\n\tlistener := spec.getListener()\n\ttcpSpecs := cast.ToStringMap(listener[\"tcp\"])\n\treturn cast.ToBool(tcpSpecs[\"tls_disable\"])\n}\n\nfunc (spec *VaultSpec) getListener() map[string]interface{} {\n\treturn cast.ToStringMap(spec.Config[\"listener\"])\n}\n\n\/\/ GetBankVaultsImage returns the bank-vaults image to use\nfunc (spec *VaultSpec) GetBankVaultsImage() string {\n\tif spec.BankVaultsImage == \"\" {\n\t\treturn \"banzaicloud\/bank-vaults:latest\"\n\t}\n\treturn spec.BankVaultsImage\n}\n\n\/\/ GetStatsDImage returns the StatsD image to use\nfunc (spec *VaultSpec) GetStatsDImage() string {\n\tif spec.StatsDImage == \"\" {\n\t\treturn \"prom\/statsd-exporter:latest\"\n\t}\n\treturn spec.StatsDImage\n}\n\n\/\/ GetAnnotations returns the Annotations\nfunc (spec *VaultSpec) GetAnnotations() map[string]string {\n\tif spec.Annotations == nil {\n\t\tspec.Annotations = map[string]string{}\n\t}\n\tspec.Annotations[\"prometheus.io\/scrape\"] = \"true\"\n\tspec.Annotations[\"prometheus.io\/path\"] = \"\/metrics\"\n\tspec.Annotations[\"prometheus.io\/port\"] = \"9102\"\n\treturn spec.Annotations\n}\n\n\/\/ ConfigJSON returns the Config field as a JSON string\nfunc (spec *VaultSpec) ConfigJSON() string {\n\tconfig, _ := json.Marshal(spec.Config)\n\treturn string(config)\n}\n\n\/\/ ExternalConfigJSON returns the ExternalConfig field as a JSON string\nfunc (spec *VaultSpec) ExternalConfigJSON() string {\n\tconfig, _ := json.Marshal(spec.ExternalConfig)\n\treturn string(config)\n}\n\n\/\/ VaultStatus represents the Status field of a Vault Kubernetes object\ntype VaultStatus struct {\n\tNodes []string `json:\"nodes\"`\n}\n\n\/\/ UnsealConfig represents the UnsealConfig field of a VaultSpec Kubernetes object\ntype UnsealConfig struct {\n\tKubernetes *KubernetesUnsealConfig `json:\"kubernetes\"`\n\tGoogle *GoogleUnsealConfig `json:\"google\"`\n\tAlibaba *AlibabaUnsealConfig `json:\"alibaba\"`\n\tAzure *AzureUnsealConfig `json:\"azure\"`\n\tAWS *AWSUnsealConfig `json:\"aws\"`\n}\n\n\/\/ ToArgs returns the UnsealConfig as and argument array for bank-vaults\nfunc (usc *UnsealConfig) ToArgs(vault *Vault) []string {\n\tif usc.Kubernetes != nil {\n\t\tsecretNamespace := vault.Namespace\n\t\tif usc.Kubernetes.SecretNamespace != \"\" {\n\t\t\tsecretNamespace = usc.Kubernetes.SecretNamespace\n\t\t}\n\t\tsecretName := vault.Name + \"-unseal-keys\"\n\t\tif usc.Kubernetes.SecretName != \"\" {\n\t\t\tsecretName = usc.Kubernetes.SecretName\n\t\t}\n\t\treturn []string{\"--mode\", \"k8s\", \"--k8s-secret-namespace\", secretNamespace, \"--k8s-secret-name\", secretName}\n\t}\n\tif usc.Google != nil {\n\t\treturn []string{\n\t\t\t\"--mode\",\n\t\t\t\"google-cloud-kms-gcs\",\n\t\t\t\"--google-cloud-kms-key-ring\",\n\t\t\tusc.Google.KMSKeyRing,\n\t\t\t\"--google-cloud-kms-crypto-key\",\n\t\t\tusc.Google.KMSCryptoKey,\n\t\t\t\"--google-cloud-kms-location\",\n\t\t\tusc.Google.KMSLocation,\n\t\t\t\"--google-cloud-kms-project\",\n\t\t\tusc.Google.KMSProject,\n\t\t\t\"--google-cloud-storage-bucket\",\n\t\t\tusc.Google.StorageBucket,\n\t\t}\n\t}\n\tif usc.Azure != nil {\n\t\treturn []string{\"--mode\", \"azure-key-vault\", \"--azure-key-vault-name\", usc.Azure.KeyVaultName}\n\t}\n\tif usc.AWS != nil {\n\t\treturn []string{\n\t\t\t\"--mode\",\n\t\t\t\"aws-kms-s3\",\n\t\t\t\"--aws-kms-key-id\",\n\t\t\tusc.AWS.KMSKeyID,\n\t\t\t\"--aws-kms-region\",\n\t\t\tusc.AWS.KMSRegion,\n\t\t\t\"--aws-s3-bucket\",\n\t\t\tusc.AWS.S3Bucket,\n\t\t\t\"--aws-s3-prefix\",\n\t\t\tusc.AWS.S3Prefix,\n\t\t\t\"--aws-s3-region\",\n\t\t\tusc.AWS.S3Region,\n\t\t}\n\t}\n\tif usc.Alibaba != nil {\n\t\treturn []string{\n\t\t\t\"--mode\",\n\t\t\t\"alibaba-kms-oss\",\n\t\t\t\"--alibaba-kms-region\",\n\t\t\tusc.Alibaba.KMSRegion,\n\t\t\t\"--alibaba-kms-key-id\",\n\t\t\tusc.Alibaba.KMSKeyID,\n\t\t\t\"--alibaba-oss-endpoint\",\n\t\t\tusc.Alibaba.OSSEndpoint,\n\t\t\t\"--alibaba-oss-bucket\",\n\t\t\tusc.Alibaba.OSSBucket,\n\t\t\t\"--alibaba-oss-prefix\",\n\t\t\tusc.Alibaba.OSSPrefix,\n\t\t}\n\t}\n\treturn []string{}\n}\n\n\/\/ KubernetesUnsealConfig holds the parameters for Kubernetes based unsealing\ntype KubernetesUnsealConfig struct {\n\tSecretNamespace string `json:\"secretNamespace\"`\n\tSecretName string `json:\"secretName\"`\n}\n\n\/\/ GoogleUnsealConfig holds the parameters for Google KMS based unsealing\ntype GoogleUnsealConfig struct {\n\tKMSKeyRing string `json:\"kmsKeyRing\"`\n\tKMSCryptoKey string `json:\"kmsCryptoKey\"`\n\tKMSLocation string `json:\"kmsLocation\"`\n\tKMSProject string `json:\"kmsProject\"`\n\tStorageBucket string `json:\"storageBucket\"`\n}\n\n\/\/ AlibabaUnsealConfig holds the parameters for Alibaba Cloud KMS based unsealing\n\/\/ --alibaba-kms-region eu-central-1 --alibaba-kms-key-id 9d8063eb-f9dc-421b-be80-15d195c9f148 --alibaba-oss-endpoint oss-eu-central-1.aliyuncs.com --alibaba-oss-bucket bank-vaults\ntype AlibabaUnsealConfig struct {\n\tKMSRegion string `json:\"kmsRegion\"`\n\tKMSKeyID string `json:\"kmsKeyId\"`\n\tOSSEndpoint string `json:\"ossEndpoint\"`\n\tOSSBucket string `json:\"ossBucket\"`\n\tOSSPrefix string `json:\"ossPrefix\"`\n}\n\n\/\/ AzureUnsealConfig holds the parameters for Azure Key Vault based unsealing\ntype AzureUnsealConfig struct {\n\tKeyVaultName string `json:\"keyVaultName\"`\n}\n\n\/\/ AWSUnsealConfig holds the parameters for AWS KMS based unsealing\ntype AWSUnsealConfig struct {\n\tKMSKeyID string `json:\"kmsKeyId\"`\n\tKMSRegion string `json:\"kmsRegion\"`\n\tS3Bucket string `json:\"s3Bucket\"`\n\tS3Prefix string `json:\"s3Prefix\"`\n\tS3Region string `json:\"s3Region\"`\n}\n\n\/\/ CredentialsConfig configuration for a credentials file provided as a secret\ntype CredentialsConfig struct {\n\tEnv string `json:\"env\"`\n\tPath string `json:\"path\"`\n\tSecretName string `json:\"secretName\"`\n}\n<commit_msg>Enabled disable_clustering if not specified otherwise<commit_after>\/\/ Copyright © 2018 Banzai Cloud\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage v1alpha1\n\nimport (\n\t\"encoding\/json\"\n\t\"reflect\"\n\n\t\"github.com\/spf13\/cast\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\n\/\/ +genclient\n\/\/ +genclient:noStatus\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\/\/ +k8s:openapi-gen=true\n\n\/\/ VaultList represents a list of Vault Kubernetes objects\ntype VaultList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata\"`\n\tItems []Vault `json:\"items\"`\n}\n\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\n\/\/ Vault represents a Vault Kubernetes object\ntype Vault struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\t\/\/ Standard object's metadata.\n\t\/\/ More info: https:\/\/git.k8s.io\/community\/contributors\/devel\/api-conventions.md#metadata\n\t\/\/ +optional\n\tmetav1.ObjectMeta `json:\"metadata,omitempty\" protobuf:\"bytes,1,opt,name=metadata\"`\n\tSpec VaultSpec `json:\"spec\"`\n\tStatus VaultStatus `json:\"status,omitempty\"`\n}\n\n\/\/ VaultSpec represents the Spec field of a Vault Kubernetes object\ntype VaultSpec struct {\n\tSize int32 `json:\"size\"`\n\tImage string `json:\"image\"`\n\tBankVaultsImage string `json:\"bankVaultsImage\"`\n\tStatsDImage string `json:\"statsdImage\"`\n\tAnnotations map[string]string `json:\"annotations\"`\n\tConfig map[string]interface{} `json:\"config\"`\n\tExternalConfig map[string]interface{} `json:\"externalConfig\"`\n\tUnsealConfig UnsealConfig `json:\"unsealConfig\"`\n\tCredentialsConfig CredentialsConfig `json:\"credentialsConfig\"`\n\t\/\/ This option gives us the option to workaround current StatefulSet limitations around updates\n\t\/\/ See: https:\/\/github.com\/kubernetes\/kubernetes\/issues\/67250\n\t\/\/ TODO: Should be removed once the ParallelPodManagement policy supports the broken update.\n\tSupportUpgrade bool `json:\"supportUpgrade\"`\n}\n\n\/\/ HAStorageTypes is the set of storage backends supporting High Availability\nvar HAStorageTypes = map[string]bool{\n\t\"consul\": true,\n\t\"dynamodb\": true,\n\t\"etcd\": true,\n\t\"gcs\": true,\n\t\"spanner\": true,\n\t\"zookeeper\": true,\n}\n\n\/\/ HasHAStorage detects if Vault is configured to use a storage backend which supports High Availability\nfunc (spec *VaultSpec) HasHAStorage() bool {\n\tstorageType := spec.GetStorageType()\n\tif _, ok := HAStorageTypes[storageType]; ok {\n\t\treturn spec.HasStorageHAEnabled()\n\t}\n\treturn false\n}\n\n\/\/ GetStorage returns Vault's storage stanza\nfunc (spec *VaultSpec) GetStorage() map[string]interface{} {\n\tstorage := spec.getStorage()\n\treturn cast.ToStringMap(storage[spec.GetStorageType()])\n}\n\nfunc (spec *VaultSpec) getStorage() map[string]interface{} {\n\treturn cast.ToStringMap(spec.Config[\"storage\"])\n}\n\n\/\/ GetStorageType returns the type of Vault's storage stanza\nfunc (spec *VaultSpec) GetStorageType() string {\n\tstorage := spec.getStorage()\n\treturn reflect.ValueOf(storage).MapKeys()[0].String()\n}\n\n\/\/ HasStorageHAEnabled detects if the ha_enabled field is set to true in Vault's storage stanza\nfunc (spec *VaultSpec) HasStorageHAEnabled() bool {\n\tstorageType := spec.GetStorageType()\n\tstorage := spec.getStorage()\n\tstorageSpecs := cast.ToStringMap(storage[storageType])\n\t\/\/ In Consul HA is always enabled\n\treturn storageType == \"consul\" || cast.ToBool(storageSpecs[\"ha_enabled\"])\n}\n\n\/\/ GetTLSDisable returns if Vault's TLS is disabled\nfunc (spec *VaultSpec) GetTLSDisable() bool {\n\tlistener := spec.getListener()\n\ttcpSpecs := cast.ToStringMap(listener[\"tcp\"])\n\treturn cast.ToBool(tcpSpecs[\"tls_disable\"])\n}\n\nfunc (spec *VaultSpec) getListener() map[string]interface{} {\n\treturn cast.ToStringMap(spec.Config[\"listener\"])\n}\n\n\/\/ GetBankVaultsImage returns the bank-vaults image to use\nfunc (spec *VaultSpec) GetBankVaultsImage() string {\n\tif spec.BankVaultsImage == \"\" {\n\t\treturn \"banzaicloud\/bank-vaults:latest\"\n\t}\n\treturn spec.BankVaultsImage\n}\n\n\/\/ GetStatsDImage returns the StatsD image to use\nfunc (spec *VaultSpec) GetStatsDImage() string {\n\tif spec.StatsDImage == \"\" {\n\t\treturn \"prom\/statsd-exporter:latest\"\n\t}\n\treturn spec.StatsDImage\n}\n\n\/\/ GetAnnotations returns the Annotations\nfunc (spec *VaultSpec) GetAnnotations() map[string]string {\n\tif spec.Annotations == nil {\n\t\tspec.Annotations = map[string]string{}\n\t}\n\tspec.Annotations[\"prometheus.io\/scrape\"] = \"true\"\n\tspec.Annotations[\"prometheus.io\/path\"] = \"\/metrics\"\n\tspec.Annotations[\"prometheus.io\/port\"] = \"9102\"\n\treturn spec.Annotations\n}\n\n\/\/ ConfigJSON returns the Config field as a JSON string\nfunc (spec *VaultSpec) ConfigJSON() string {\n\tif _, ok := spec.Config[\"disable_clustering\"]; !ok {\n\t\tspec.Config[\"disable_clustering\"] = true\n\t}\n\tconfig, _ := json.Marshal(spec.Config)\n\treturn string(config)\n}\n\n\/\/ ExternalConfigJSON returns the ExternalConfig field as a JSON string\nfunc (spec *VaultSpec) ExternalConfigJSON() string {\n\tconfig, _ := json.Marshal(spec.ExternalConfig)\n\treturn string(config)\n}\n\n\/\/ VaultStatus represents the Status field of a Vault Kubernetes object\ntype VaultStatus struct {\n\tNodes []string `json:\"nodes\"`\n}\n\n\/\/ UnsealConfig represents the UnsealConfig field of a VaultSpec Kubernetes object\ntype UnsealConfig struct {\n\tKubernetes *KubernetesUnsealConfig `json:\"kubernetes\"`\n\tGoogle *GoogleUnsealConfig `json:\"google\"`\n\tAlibaba *AlibabaUnsealConfig `json:\"alibaba\"`\n\tAzure *AzureUnsealConfig `json:\"azure\"`\n\tAWS *AWSUnsealConfig `json:\"aws\"`\n}\n\n\/\/ ToArgs returns the UnsealConfig as and argument array for bank-vaults\nfunc (usc *UnsealConfig) ToArgs(vault *Vault) []string {\n\tif usc.Kubernetes != nil {\n\t\tsecretNamespace := vault.Namespace\n\t\tif usc.Kubernetes.SecretNamespace != \"\" {\n\t\t\tsecretNamespace = usc.Kubernetes.SecretNamespace\n\t\t}\n\t\tsecretName := vault.Name + \"-unseal-keys\"\n\t\tif usc.Kubernetes.SecretName != \"\" {\n\t\t\tsecretName = usc.Kubernetes.SecretName\n\t\t}\n\t\treturn []string{\"--mode\", \"k8s\", \"--k8s-secret-namespace\", secretNamespace, \"--k8s-secret-name\", secretName}\n\t}\n\tif usc.Google != nil {\n\t\treturn []string{\n\t\t\t\"--mode\",\n\t\t\t\"google-cloud-kms-gcs\",\n\t\t\t\"--google-cloud-kms-key-ring\",\n\t\t\tusc.Google.KMSKeyRing,\n\t\t\t\"--google-cloud-kms-crypto-key\",\n\t\t\tusc.Google.KMSCryptoKey,\n\t\t\t\"--google-cloud-kms-location\",\n\t\t\tusc.Google.KMSLocation,\n\t\t\t\"--google-cloud-kms-project\",\n\t\t\tusc.Google.KMSProject,\n\t\t\t\"--google-cloud-storage-bucket\",\n\t\t\tusc.Google.StorageBucket,\n\t\t}\n\t}\n\tif usc.Azure != nil {\n\t\treturn []string{\"--mode\", \"azure-key-vault\", \"--azure-key-vault-name\", usc.Azure.KeyVaultName}\n\t}\n\tif usc.AWS != nil {\n\t\treturn []string{\n\t\t\t\"--mode\",\n\t\t\t\"aws-kms-s3\",\n\t\t\t\"--aws-kms-key-id\",\n\t\t\tusc.AWS.KMSKeyID,\n\t\t\t\"--aws-kms-region\",\n\t\t\tusc.AWS.KMSRegion,\n\t\t\t\"--aws-s3-bucket\",\n\t\t\tusc.AWS.S3Bucket,\n\t\t\t\"--aws-s3-prefix\",\n\t\t\tusc.AWS.S3Prefix,\n\t\t\t\"--aws-s3-region\",\n\t\t\tusc.AWS.S3Region,\n\t\t}\n\t}\n\tif usc.Alibaba != nil {\n\t\treturn []string{\n\t\t\t\"--mode\",\n\t\t\t\"alibaba-kms-oss\",\n\t\t\t\"--alibaba-kms-region\",\n\t\t\tusc.Alibaba.KMSRegion,\n\t\t\t\"--alibaba-kms-key-id\",\n\t\t\tusc.Alibaba.KMSKeyID,\n\t\t\t\"--alibaba-oss-endpoint\",\n\t\t\tusc.Alibaba.OSSEndpoint,\n\t\t\t\"--alibaba-oss-bucket\",\n\t\t\tusc.Alibaba.OSSBucket,\n\t\t\t\"--alibaba-oss-prefix\",\n\t\t\tusc.Alibaba.OSSPrefix,\n\t\t}\n\t}\n\treturn []string{}\n}\n\n\/\/ KubernetesUnsealConfig holds the parameters for Kubernetes based unsealing\ntype KubernetesUnsealConfig struct {\n\tSecretNamespace string `json:\"secretNamespace\"`\n\tSecretName string `json:\"secretName\"`\n}\n\n\/\/ GoogleUnsealConfig holds the parameters for Google KMS based unsealing\ntype GoogleUnsealConfig struct {\n\tKMSKeyRing string `json:\"kmsKeyRing\"`\n\tKMSCryptoKey string `json:\"kmsCryptoKey\"`\n\tKMSLocation string `json:\"kmsLocation\"`\n\tKMSProject string `json:\"kmsProject\"`\n\tStorageBucket string `json:\"storageBucket\"`\n}\n\n\/\/ AlibabaUnsealConfig holds the parameters for Alibaba Cloud KMS based unsealing\n\/\/ --alibaba-kms-region eu-central-1 --alibaba-kms-key-id 9d8063eb-f9dc-421b-be80-15d195c9f148 --alibaba-oss-endpoint oss-eu-central-1.aliyuncs.com --alibaba-oss-bucket bank-vaults\ntype AlibabaUnsealConfig struct {\n\tKMSRegion string `json:\"kmsRegion\"`\n\tKMSKeyID string `json:\"kmsKeyId\"`\n\tOSSEndpoint string `json:\"ossEndpoint\"`\n\tOSSBucket string `json:\"ossBucket\"`\n\tOSSPrefix string `json:\"ossPrefix\"`\n}\n\n\/\/ AzureUnsealConfig holds the parameters for Azure Key Vault based unsealing\ntype AzureUnsealConfig struct {\n\tKeyVaultName string `json:\"keyVaultName\"`\n}\n\n\/\/ AWSUnsealConfig holds the parameters for AWS KMS based unsealing\ntype AWSUnsealConfig struct {\n\tKMSKeyID string `json:\"kmsKeyId\"`\n\tKMSRegion string `json:\"kmsRegion\"`\n\tS3Bucket string `json:\"s3Bucket\"`\n\tS3Prefix string `json:\"s3Prefix\"`\n\tS3Region string `json:\"s3Region\"`\n}\n\n\/\/ CredentialsConfig configuration for a credentials file provided as a secret\ntype CredentialsConfig struct {\n\tEnv string `json:\"env\"`\n\tPath string `json:\"path\"`\n\tSecretName string `json:\"secretName\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/rc4\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/big\"\n\t\"math\/rand\"\n\t\"misc\/crypto\/dh\"\n\t\"misc\/packet\"\n\t\"net\"\n\t\"os\"\n\t\"time\"\n)\n\nvar (\n\tseqid = uint32(0)\n\tencoder *rc4.Cipher\n\tdecoder *rc4.Cipher\n\tKEY_EXCHANGE = false\n\tSALT = \"DH\"\n)\n\nconst (\n\tDEFAULT_AGENT_HOST = \"192.168.2.127:8888\"\n)\n\nfunc checkErr(err error) {\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tpanic(\"error occured in protocol module\")\n\t}\n}\nfunc main() {\n\thost := DEFAULT_AGENT_HOST\n\tif env := os.Getenv(\"AGENT_HOST\"); env != \"\" {\n\t\thost = env\n\t}\n\taddr, err := net.ResolveTCPAddr(\"tcp\", host)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(-1)\n\t}\n\tconn, err := net.DialTCP(\"tcp\", nil, addr)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(-1)\n\t}\n\tdefer conn.Close()\n\n\t\/\/get_seed_req\n\tS1, M1 := dh.DHExchange()\n\tK1 := dh.DHKey(S1, big.NewInt(rand.Int63()))\n\tS2, M2 := dh.DHExchange()\n\tK2 := dh.DHKey(S2, big.NewInt(rand.Int63()))\n\tencoder, err = rc4.NewCipher([]byte(fmt.Sprintf(\"%v%v\", SALT, K1)))\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tdecoder, err = rc4.NewCipher([]byte(fmt.Sprintf(\"%v%v\", SALT, K2)))\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tp2 := seed_info{\n\t\tint32(M1.Int64()),\n\t\tint32(M2.Int64()),\n\t}\n\tsend_proto(conn, Code[\"get_seed_req\"], p2)\n\n\tKEY_EXCHANGE = true\n\n\t\/\/user_login_req\n\tp3 := user_login_info{\n\t\tF_login_way: 0,\n\t\tF_open_udid: \"udid\",\n\t\tF_client_certificate: \"qwertyuiopasdfgh\",\n\t\tF_client_version: 1,\n\t\tF_user_lang: \"en\",\n\t\tF_app_id: \"com.yrhd.lovegame\",\n\t\tF_os_version: \"android4.4\",\n\t\tF_device_name: \"simulate\",\n\t\tF_device_id: \"device_id\",\n\t\tF_device_id_type: 1,\n\t\tF_login_ip: \"127.0.0.1\",\n\t}\n\tsend_proto(conn, Code[\"user_login_req\"], p3)\n\n\t\/\/heart_beat_req\n\tsend_proto(conn, Code[\"heart_beat_req\"], nil)\n\n\t\/\/proto_ping_req\n\tp1 := auto_id{\n\t\tF_id: rand.Int31(),\n\t}\n\tsend_proto(conn, Code[\"proto_ping_req\"], p1)\n\n}\n\nfunc send_proto(conn net.Conn, p int16, info interface{}) {\n\tseqid++\n\tpayload := packet.Pack(p, info, nil)\n\twriter := packet.Writer()\n\twriter.WriteU16(uint16(len(payload)) + 4)\n\n\tw := packet.Writer()\n\tw.WriteU32(seqid)\n\tw.WriteRawBytes(payload)\n\tdata := w.Data()\n\tif KEY_EXCHANGE {\n\t\tencoder.XORKeyStream(data, data)\n\t}\n\twriter.WriteRawBytes(data)\n\tconn.Write(writer.Data())\n\tlog.Printf(\"send : %#v\", writer.Data())\n\ttime.Sleep(time.Second)\n\n\treturn\n}\n<commit_msg>update<commit_after>package main\n\nimport (\n\t\"crypto\/rc4\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"math\/big\"\n\t\"math\/rand\"\n\t\"misc\/crypto\/dh\"\n\t\"misc\/packet\"\n\t\"net\"\n\t\"os\"\n\t\"time\"\n)\n\nvar (\n\tseqid = uint32(0)\n\tencoder *rc4.Cipher\n\tdecoder *rc4.Cipher\n\tKEY_EXCHANGE = false\n\tSALT = \"DH\"\n)\n\nconst (\n\tDEFAULT_AGENT_HOST = \"192.168.2.127:8888\"\n)\n\nfunc checkErr(err error) {\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tpanic(\"error occured in protocol module\")\n\t}\n}\nfunc main() {\n\thost := DEFAULT_AGENT_HOST\n\tif env := os.Getenv(\"AGENT_HOST\"); env != \"\" {\n\t\thost = env\n\t}\n\taddr, err := net.ResolveTCPAddr(\"tcp\", host)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(-1)\n\t}\n\tconn, err := net.DialTCP(\"tcp\", nil, addr)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(-1)\n\t}\n\tdefer conn.Close()\n\n\t\/\/get_seed_req\n\tS1, M1 := dh.DHExchange()\n\tS2, M2 := dh.DHExchange()\n\tp2 := seed_info{\n\t\tint32(M1.Int64()),\n\t\tint32(M2.Int64()),\n\t}\n\trst := send_proto(conn, Code[\"get_seed_req\"], p2)\n\tr1, _ := PKT_seed_info(rst)\n\tlog.Printf(\"result: %#v\", r1)\n\n\tK1 := dh.DHKey(S1, big.NewInt(int64(r1.F_client_send_seed)))\n\tK2 := dh.DHKey(S2, big.NewInt(int64(r1.F_client_receive_seed)))\n\tencoder, err = rc4.NewCipher([]byte(fmt.Sprintf(\"%v%v\", SALT, K1)))\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tdecoder, err = rc4.NewCipher([]byte(fmt.Sprintf(\"%v%v\", SALT, K2)))\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tKEY_EXCHANGE = true\n\n\t\/\/user_login_req\n\tp3 := user_login_info{\n\t\tF_login_way: 0,\n\t\tF_open_udid: \"udid\",\n\t\tF_client_certificate: \"qwertyuiopasdfgh\",\n\t\tF_client_version: 1,\n\t\tF_user_lang: \"en\",\n\t\tF_app_id: \"com.yrhd.lovegame\",\n\t\tF_os_version: \"android4.4\",\n\t\tF_device_name: \"simulate\",\n\t\tF_device_id: \"device_id\",\n\t\tF_device_id_type: 1,\n\t\tF_login_ip: \"127.0.0.1\",\n\t}\n\tsend_proto(conn, Code[\"user_login_req\"], p3)\n\n\t\/\/heart_beat_req\n\tsend_proto(conn, Code[\"heart_beat_req\"], nil)\n\n\t\/\/proto_ping_req\n\tp1 := auto_id{\n\t\tF_id: rand.Int31(),\n\t}\n\tsend_proto(conn, Code[\"proto_ping_req\"], p1)\n\n}\n\nfunc send_proto(conn net.Conn, p int16, info interface{}) (reader *packet.Packet) {\n\tseqid++\n\tpayload := packet.Pack(p, info, nil)\n\twriter := packet.Writer()\n\twriter.WriteU16(uint16(len(payload)) + 4)\n\n\tw := packet.Writer()\n\tw.WriteU32(seqid)\n\tw.WriteRawBytes(payload)\n\tdata := w.Data()\n\tif KEY_EXCHANGE {\n\t\tencoder.XORKeyStream(data, data)\n\t}\n\twriter.WriteRawBytes(data)\n\tconn.Write(writer.Data())\n\tlog.Printf(\"send : %#v\", writer.Data())\n\ttime.Sleep(time.Second)\n\n\t\/\/read\n\theader := make([]byte, 2)\n\tio.ReadFull(conn, header)\n\tsize := binary.BigEndian.Uint16(header)\n\tr := make([]byte, size)\n\t_, err := io.ReadFull(conn, r)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tif KEY_EXCHANGE {\n\t\tdecoder.XORKeyStream(r, r)\n\t}\n\treader = packet.Reader(r)\n\tb, err := reader.ReadS16()\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tif _, ok := RCode[b]; !ok {\n\t\tlog.Println(\"unknown proto \", b)\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux\n\npackage fs2\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\tsecurejoin \"github.com\/cyphar\/filepath-securejoin\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/cgroups\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/configs\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ NewManager creates a manager for cgroup v2 unified hierarchy.\n\/\/ dirPath is like \"\/sys\/fs\/cgroup\/user.slice\/user-1001.slice\/session-1.scope\".\n\/\/ If dirPath is empty, it is automatically set using config.\nfunc NewManager(config *configs.Cgroup, dirPath string, rootless bool) (cgroups.Manager, error) {\n\tif config == nil {\n\t\tconfig = &configs.Cgroup{}\n\t}\n\tif dirPath != \"\" {\n\t\tif filepath.Clean(dirPath) != dirPath || !filepath.IsAbs(dirPath) {\n\t\t\treturn nil, errors.Errorf(\"invalid dir path %q\", dirPath)\n\t\t}\n\t} else {\n\t\tvar err error\n\t\tdirPath, err = defaultDirPath(config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tcontrollers, err := detectControllers(dirPath)\n\tif err != nil && !rootless {\n\t\treturn nil, err\n\t}\n\n\tm := &manager{\n\t\tconfig: config,\n\t\tdirPath: dirPath,\n\t\tcontrollers: controllers,\n\t\trootless: rootless,\n\t}\n\treturn m, nil\n}\n\nfunc detectControllers(dirPath string) (map[string]struct{}, error) {\n\tif err := os.MkdirAll(dirPath, 0755); err != nil {\n\t\treturn nil, err\n\t}\n\tcontrollersPath, err := securejoin.SecureJoin(dirPath, \"cgroup.controllers\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcontrollersData, err := ioutil.ReadFile(controllersPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcontrollersFields := strings.Fields(string(controllersData))\n\tcontrollers := make(map[string]struct{}, len(controllersFields))\n\tfor _, c := range controllersFields {\n\t\tcontrollers[c] = struct{}{}\n\t}\n\treturn controllers, nil\n}\n\ntype manager struct {\n\tconfig *configs.Cgroup\n\t\/\/ dirPath is like \"\/sys\/fs\/cgroup\/user.slice\/user-1001.slice\/session-1.scope\"\n\tdirPath string\n\t\/\/ controllers is content of \"cgroup.controllers\" file.\n\t\/\/ excludes pseudo-controllers (\"devices\" and \"freezer\").\n\tcontrollers map[string]struct{}\n\trootless bool\n}\n\nfunc (m *manager) Apply(pid int) error {\n\tif err := cgroups.WriteCgroupProc(m.dirPath, pid); err != nil && !m.rootless {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (m *manager) GetPids() ([]int, error) {\n\treturn cgroups.GetPids(m.dirPath)\n}\n\nfunc (m *manager) GetAllPids() ([]int, error) {\n\treturn cgroups.GetAllPids(m.dirPath)\n}\n\nfunc (m *manager) GetStats() (*cgroups.Stats, error) {\n\tvar (\n\t\tst cgroups.Stats\n\t\terrs []error\n\t)\n\t\/\/ pids (since kernel 4.5)\n\tif _, ok := m.controllers[\"pids\"]; ok {\n\t\tif err := statPids(m.dirPath, &st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t} else {\n\t\tif err := statPidsWithoutController(m.dirPath, &st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ memory (since kenrel 4.5)\n\tif _, ok := m.controllers[\"memory\"]; ok {\n\t\tif err := statMemory(m.dirPath, &st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ io (since kernel 4.5)\n\tif _, ok := m.controllers[\"io\"]; ok {\n\t\tif err := statIo(m.dirPath, &st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ cpu (since kernel 4.15)\n\tif _, ok := m.controllers[\"cpu\"]; ok {\n\t\tif err := statCpu(m.dirPath, &st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ hugetlb (since kernel 5.6)\n\tif _, ok := m.controllers[\"hugetlb\"]; ok {\n\t\tif err := statHugeTlb(m.dirPath, &st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\tif len(errs) > 0 && !m.rootless {\n\t\treturn &st, errors.Errorf(\"error while statting cgroup v2: %+v\", errs)\n\t}\n\treturn &st, nil\n}\n\nfunc (m *manager) Freeze(state configs.FreezerState) error {\n\tif err := setFreezer(m.dirPath, state); err != nil {\n\t\treturn err\n\t}\n\tm.config.Resources.Freezer = state\n\treturn nil\n}\n\nfunc (m *manager) Destroy() error {\n\treturn os.RemoveAll(m.dirPath)\n}\n\n\/\/ GetPaths is for compatibility purpose and should be removed in future\nfunc (m *manager) GetPaths() map[string]string {\n\tpaths := map[string]string{\n\t\t\/\/ pseudo-controller for compatibility\n\t\t\"devices\": m.dirPath,\n\t\t\"freezer\": m.dirPath,\n\t}\n\tfor c := range m.controllers {\n\t\tpaths[c] = m.dirPath\n\t}\n\treturn paths\n}\n\nfunc (m *manager) GetUnifiedPath() (string, error) {\n\treturn m.dirPath, nil\n}\n\nfunc (m *manager) Set(container *configs.Config) error {\n\tif container == nil || container.Cgroups == nil {\n\t\treturn nil\n\t}\n\tvar errs []error\n\t\/\/ pids (since kernel 4.5)\n\tif _, ok := m.controllers[\"pids\"]; ok {\n\t\tif err := setPids(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ memory (since kernel 4.5)\n\tif _, ok := m.controllers[\"memory\"]; ok {\n\t\tif err := setMemory(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ io (since kernel 4.5)\n\tif _, ok := m.controllers[\"io\"]; ok {\n\t\tif err := setIo(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ cpu (since kernel 4.15)\n\tif _, ok := m.controllers[\"cpu\"]; ok {\n\t\tif err := setCpu(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ devices (since kernel 4.15, pseudo-controller)\n\tif err := setDevices(m.dirPath, container.Cgroups); err != nil {\n\t\terrs = append(errs, err)\n\t}\n\t\/\/ cpuset (since kernel 5.0)\n\tif _, ok := m.controllers[\"cpuset\"]; ok {\n\t\tif err := setCpuset(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ hugetlb (since kernel 5.6)\n\tif _, ok := m.controllers[\"hugetlb\"]; ok {\n\t\tif err := setHugeTlb(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ freezer (since kernel 5.2, pseudo-controller)\n\tif err := setFreezer(m.dirPath, container.Cgroups.Freezer); err != nil {\n\t\terrs = append(errs, err)\n\t}\n\tif len(errs) > 0 && !m.rootless {\n\t\treturn errors.Errorf(\"error while setting cgroup v2: %+v\", errs)\n\t}\n\tm.config = container.Cgroups\n\treturn nil\n}\n\nfunc (m *manager) GetCgroups() (*configs.Cgroup, error) {\n\treturn m.config, nil\n}\n<commit_msg>libcontainer: use cgroups.NewStats<commit_after>\/\/ +build linux\n\npackage fs2\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\tsecurejoin \"github.com\/cyphar\/filepath-securejoin\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/cgroups\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/configs\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ NewManager creates a manager for cgroup v2 unified hierarchy.\n\/\/ dirPath is like \"\/sys\/fs\/cgroup\/user.slice\/user-1001.slice\/session-1.scope\".\n\/\/ If dirPath is empty, it is automatically set using config.\nfunc NewManager(config *configs.Cgroup, dirPath string, rootless bool) (cgroups.Manager, error) {\n\tif config == nil {\n\t\tconfig = &configs.Cgroup{}\n\t}\n\tif dirPath != \"\" {\n\t\tif filepath.Clean(dirPath) != dirPath || !filepath.IsAbs(dirPath) {\n\t\t\treturn nil, errors.Errorf(\"invalid dir path %q\", dirPath)\n\t\t}\n\t} else {\n\t\tvar err error\n\t\tdirPath, err = defaultDirPath(config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tcontrollers, err := detectControllers(dirPath)\n\tif err != nil && !rootless {\n\t\treturn nil, err\n\t}\n\n\tm := &manager{\n\t\tconfig: config,\n\t\tdirPath: dirPath,\n\t\tcontrollers: controllers,\n\t\trootless: rootless,\n\t}\n\treturn m, nil\n}\n\nfunc detectControllers(dirPath string) (map[string]struct{}, error) {\n\tif err := os.MkdirAll(dirPath, 0755); err != nil {\n\t\treturn nil, err\n\t}\n\tcontrollersPath, err := securejoin.SecureJoin(dirPath, \"cgroup.controllers\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcontrollersData, err := ioutil.ReadFile(controllersPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcontrollersFields := strings.Fields(string(controllersData))\n\tcontrollers := make(map[string]struct{}, len(controllersFields))\n\tfor _, c := range controllersFields {\n\t\tcontrollers[c] = struct{}{}\n\t}\n\treturn controllers, nil\n}\n\ntype manager struct {\n\tconfig *configs.Cgroup\n\t\/\/ dirPath is like \"\/sys\/fs\/cgroup\/user.slice\/user-1001.slice\/session-1.scope\"\n\tdirPath string\n\t\/\/ controllers is content of \"cgroup.controllers\" file.\n\t\/\/ excludes pseudo-controllers (\"devices\" and \"freezer\").\n\tcontrollers map[string]struct{}\n\trootless bool\n}\n\nfunc (m *manager) Apply(pid int) error {\n\tif err := cgroups.WriteCgroupProc(m.dirPath, pid); err != nil && !m.rootless {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (m *manager) GetPids() ([]int, error) {\n\treturn cgroups.GetPids(m.dirPath)\n}\n\nfunc (m *manager) GetAllPids() ([]int, error) {\n\treturn cgroups.GetAllPids(m.dirPath)\n}\n\nfunc (m *manager) GetStats() (*cgroups.Stats, error) {\n\tvar (\n\t\terrs []error\n\t)\n\n\tst := cgroups.NewStats()\n\n\t\/\/ pids (since kernel 4.5)\n\tif _, ok := m.controllers[\"pids\"]; ok {\n\t\tif err := statPids(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t} else {\n\t\tif err := statPidsWithoutController(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ memory (since kernel 4.5)\n\tif _, ok := m.controllers[\"memory\"]; ok {\n\t\tif err := statMemory(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ io (since kernel 4.5)\n\tif _, ok := m.controllers[\"io\"]; ok {\n\t\tif err := statIo(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ cpu (since kernel 4.15)\n\tif _, ok := m.controllers[\"cpu\"]; ok {\n\t\tif err := statCpu(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ hugetlb (since kernel 5.6)\n\tif _, ok := m.controllers[\"hugetlb\"]; ok {\n\t\tif err := statHugeTlb(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\tif len(errs) > 0 && !m.rootless {\n\t\treturn st, errors.Errorf(\"error while statting cgroup v2: %+v\", errs)\n\t}\n\treturn st, nil\n}\n\nfunc (m *manager) Freeze(state configs.FreezerState) error {\n\tif err := setFreezer(m.dirPath, state); err != nil {\n\t\treturn err\n\t}\n\tm.config.Resources.Freezer = state\n\treturn nil\n}\n\nfunc (m *manager) Destroy() error {\n\treturn os.RemoveAll(m.dirPath)\n}\n\n\/\/ GetPaths is for compatibility purpose and should be removed in future\nfunc (m *manager) GetPaths() map[string]string {\n\tpaths := map[string]string{\n\t\t\/\/ pseudo-controller for compatibility\n\t\t\"devices\": m.dirPath,\n\t\t\"freezer\": m.dirPath,\n\t}\n\tfor c := range m.controllers {\n\t\tpaths[c] = m.dirPath\n\t}\n\treturn paths\n}\n\nfunc (m *manager) GetUnifiedPath() (string, error) {\n\treturn m.dirPath, nil\n}\n\nfunc (m *manager) Set(container *configs.Config) error {\n\tif container == nil || container.Cgroups == nil {\n\t\treturn nil\n\t}\n\tvar errs []error\n\t\/\/ pids (since kernel 4.5)\n\tif _, ok := m.controllers[\"pids\"]; ok {\n\t\tif err := setPids(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ memory (since kernel 4.5)\n\tif _, ok := m.controllers[\"memory\"]; ok {\n\t\tif err := setMemory(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ io (since kernel 4.5)\n\tif _, ok := m.controllers[\"io\"]; ok {\n\t\tif err := setIo(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ cpu (since kernel 4.15)\n\tif _, ok := m.controllers[\"cpu\"]; ok {\n\t\tif err := setCpu(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ devices (since kernel 4.15, pseudo-controller)\n\tif err := setDevices(m.dirPath, container.Cgroups); err != nil {\n\t\terrs = append(errs, err)\n\t}\n\t\/\/ cpuset (since kernel 5.0)\n\tif _, ok := m.controllers[\"cpuset\"]; ok {\n\t\tif err := setCpuset(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ hugetlb (since kernel 5.6)\n\tif _, ok := m.controllers[\"hugetlb\"]; ok {\n\t\tif err := setHugeTlb(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ freezer (since kernel 5.2, pseudo-controller)\n\tif err := setFreezer(m.dirPath, container.Cgroups.Freezer); err != nil {\n\t\terrs = append(errs, err)\n\t}\n\tif len(errs) > 0 && !m.rootless {\n\t\treturn errors.Errorf(\"error while setting cgroup v2: %+v\", errs)\n\t}\n\tm.config = container.Cgroups\n\treturn nil\n}\n\nfunc (m *manager) GetCgroups() (*configs.Cgroup, error) {\n\treturn m.config, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux\n\npackage fs2\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\tsecurejoin \"github.com\/cyphar\/filepath-securejoin\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/cgroups\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/configs\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ NewManager creates a manager for cgroup v2 unified hierarchy.\n\/\/ dirPath is like \"\/sys\/fs\/cgroup\/user.slice\/user-1001.slice\/session-1.scope\".\n\/\/ If dirPath is empty, it is automatically set using config.\nfunc NewManager(config *configs.Cgroup, dirPath string, rootless bool) (cgroups.Manager, error) {\n\tif config == nil {\n\t\tconfig = &configs.Cgroup{}\n\t}\n\tif dirPath != \"\" {\n\t\tif filepath.Clean(dirPath) != dirPath || !filepath.IsAbs(dirPath) {\n\t\t\treturn nil, errors.Errorf(\"invalid dir path %q\", dirPath)\n\t\t}\n\t} else {\n\t\tvar err error\n\t\tdirPath, err = defaultDirPath(config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tcontrollers, err := detectControllers(dirPath)\n\tif err != nil && !rootless {\n\t\treturn nil, err\n\t}\n\n\tm := &manager{\n\t\tconfig: config,\n\t\tdirPath: dirPath,\n\t\tcontrollers: controllers,\n\t\trootless: rootless,\n\t}\n\treturn m, nil\n}\n\nfunc detectControllers(dirPath string) (map[string]struct{}, error) {\n\tif err := os.MkdirAll(dirPath, 0755); err != nil {\n\t\treturn nil, err\n\t}\n\tcontrollersPath, err := securejoin.SecureJoin(dirPath, \"cgroup.controllers\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcontrollersData, err := ioutil.ReadFile(controllersPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcontrollersFields := strings.Fields(string(controllersData))\n\tcontrollers := make(map[string]struct{}, len(controllersFields))\n\tfor _, c := range controllersFields {\n\t\tcontrollers[c] = struct{}{}\n\t}\n\treturn controllers, nil\n}\n\ntype manager struct {\n\tconfig *configs.Cgroup\n\t\/\/ dirPath is like \"\/sys\/fs\/cgroup\/user.slice\/user-1001.slice\/session-1.scope\"\n\tdirPath string\n\t\/\/ controllers is content of \"cgroup.controllers\" file.\n\t\/\/ excludes pseudo-controllers (\"devices\" and \"freezer\").\n\tcontrollers map[string]struct{}\n\trootless bool\n}\n\nfunc (m *manager) Apply(pid int) error {\n\tif err := cgroups.WriteCgroupProc(m.dirPath, pid); err != nil && !m.rootless {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (m *manager) GetPids() ([]int, error) {\n\treturn cgroups.GetPids(m.dirPath)\n}\n\nfunc (m *manager) GetAllPids() ([]int, error) {\n\treturn cgroups.GetAllPids(m.dirPath)\n}\n\nfunc (m *manager) GetStats() (*cgroups.Stats, error) {\n\tvar (\n\t\terrs []error\n\t)\n\n\tst := cgroups.NewStats()\n\n\t\/\/ pids (since kernel 4.5)\n\tif _, ok := m.controllers[\"pids\"]; ok {\n\t\tif err := statPids(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t} else {\n\t\tif err := statPidsWithoutController(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ memory (since kernel 4.5)\n\tif _, ok := m.controllers[\"memory\"]; ok {\n\t\tif err := statMemory(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ io (since kernel 4.5)\n\tif _, ok := m.controllers[\"io\"]; ok {\n\t\tif err := statIo(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ cpu (since kernel 4.15)\n\tif _, ok := m.controllers[\"cpu\"]; ok {\n\t\tif err := statCpu(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ hugetlb (since kernel 5.6)\n\tif _, ok := m.controllers[\"hugetlb\"]; ok {\n\t\tif err := statHugeTlb(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\tif len(errs) > 0 && !m.rootless {\n\t\treturn st, errors.Errorf(\"error while statting cgroup v2: %+v\", errs)\n\t}\n\treturn st, nil\n}\n\nfunc (m *manager) Freeze(state configs.FreezerState) error {\n\tif err := setFreezer(m.dirPath, state); err != nil {\n\t\treturn err\n\t}\n\tm.config.Resources.Freezer = state\n\treturn nil\n}\n\nfunc (m *manager) Destroy() error {\n\treturn os.RemoveAll(m.dirPath)\n}\n\n\/\/ GetPaths is for compatibility purpose and should be removed in future\nfunc (m *manager) GetPaths() map[string]string {\n\tpaths := map[string]string{\n\t\t\/\/ pseudo-controller for compatibility\n\t\t\"devices\": m.dirPath,\n\t\t\"freezer\": m.dirPath,\n\t}\n\tfor c := range m.controllers {\n\t\tpaths[c] = m.dirPath\n\t}\n\treturn paths\n}\n\nfunc (m *manager) GetUnifiedPath() (string, error) {\n\treturn m.dirPath, nil\n}\n\nfunc (m *manager) Set(container *configs.Config) error {\n\tif container == nil || container.Cgroups == nil {\n\t\treturn nil\n\t}\n\tvar errs []error\n\t\/\/ pids (since kernel 4.5)\n\tif _, ok := m.controllers[\"pids\"]; ok {\n\t\tif err := setPids(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ memory (since kernel 4.5)\n\tif _, ok := m.controllers[\"memory\"]; ok {\n\t\tif err := setMemory(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ io (since kernel 4.5)\n\tif _, ok := m.controllers[\"io\"]; ok {\n\t\tif err := setIo(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ cpu (since kernel 4.15)\n\tif _, ok := m.controllers[\"cpu\"]; ok {\n\t\tif err := setCpu(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ devices (since kernel 4.15, pseudo-controller)\n\tif err := setDevices(m.dirPath, container.Cgroups); err != nil {\n\t\terrs = append(errs, err)\n\t}\n\t\/\/ cpuset (since kernel 5.0)\n\tif _, ok := m.controllers[\"cpuset\"]; ok {\n\t\tif err := setCpuset(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ hugetlb (since kernel 5.6)\n\tif _, ok := m.controllers[\"hugetlb\"]; ok {\n\t\tif err := setHugeTlb(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ freezer (since kernel 5.2, pseudo-controller)\n\tif err := setFreezer(m.dirPath, container.Cgroups.Freezer); err != nil {\n\t\terrs = append(errs, err)\n\t}\n\tif len(errs) > 0 && !m.rootless {\n\t\treturn errors.Errorf(\"error while setting cgroup v2: %+v\", errs)\n\t}\n\tm.config = container.Cgroups\n\treturn nil\n}\n\nfunc (m *manager) GetCgroups() (*configs.Cgroup, error) {\n\treturn m.config, nil\n}\n<commit_msg>cgroups\/fs2: do not use securejoin<commit_after>\/\/ +build linux\n\npackage fs2\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/opencontainers\/runc\/libcontainer\/cgroups\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/configs\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ NewManager creates a manager for cgroup v2 unified hierarchy.\n\/\/ dirPath is like \"\/sys\/fs\/cgroup\/user.slice\/user-1001.slice\/session-1.scope\".\n\/\/ If dirPath is empty, it is automatically set using config.\nfunc NewManager(config *configs.Cgroup, dirPath string, rootless bool) (cgroups.Manager, error) {\n\tif config == nil {\n\t\tconfig = &configs.Cgroup{}\n\t}\n\tif dirPath != \"\" {\n\t\tif filepath.Clean(dirPath) != dirPath || !filepath.IsAbs(dirPath) {\n\t\t\treturn nil, errors.Errorf(\"invalid dir path %q\", dirPath)\n\t\t}\n\t} else {\n\t\tvar err error\n\t\tdirPath, err = defaultDirPath(config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tcontrollers, err := detectControllers(dirPath)\n\tif err != nil && !rootless {\n\t\treturn nil, err\n\t}\n\n\tm := &manager{\n\t\tconfig: config,\n\t\tdirPath: dirPath,\n\t\tcontrollers: controllers,\n\t\trootless: rootless,\n\t}\n\treturn m, nil\n}\n\nfunc detectControllers(dirPath string) (map[string]struct{}, error) {\n\tif err := os.MkdirAll(dirPath, 0755); err != nil {\n\t\treturn nil, err\n\t}\n\tcontrollersPath := filepath.Join(dirPath, \"cgroup.controllers\")\n\tcontrollersData, err := ioutil.ReadFile(controllersPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcontrollersFields := strings.Fields(string(controllersData))\n\tcontrollers := make(map[string]struct{}, len(controllersFields))\n\tfor _, c := range controllersFields {\n\t\tcontrollers[c] = struct{}{}\n\t}\n\treturn controllers, nil\n}\n\ntype manager struct {\n\tconfig *configs.Cgroup\n\t\/\/ dirPath is like \"\/sys\/fs\/cgroup\/user.slice\/user-1001.slice\/session-1.scope\"\n\tdirPath string\n\t\/\/ controllers is content of \"cgroup.controllers\" file.\n\t\/\/ excludes pseudo-controllers (\"devices\" and \"freezer\").\n\tcontrollers map[string]struct{}\n\trootless bool\n}\n\nfunc (m *manager) Apply(pid int) error {\n\tif err := cgroups.WriteCgroupProc(m.dirPath, pid); err != nil && !m.rootless {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (m *manager) GetPids() ([]int, error) {\n\treturn cgroups.GetPids(m.dirPath)\n}\n\nfunc (m *manager) GetAllPids() ([]int, error) {\n\treturn cgroups.GetAllPids(m.dirPath)\n}\n\nfunc (m *manager) GetStats() (*cgroups.Stats, error) {\n\tvar (\n\t\terrs []error\n\t)\n\n\tst := cgroups.NewStats()\n\n\t\/\/ pids (since kernel 4.5)\n\tif _, ok := m.controllers[\"pids\"]; ok {\n\t\tif err := statPids(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t} else {\n\t\tif err := statPidsWithoutController(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ memory (since kernel 4.5)\n\tif _, ok := m.controllers[\"memory\"]; ok {\n\t\tif err := statMemory(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ io (since kernel 4.5)\n\tif _, ok := m.controllers[\"io\"]; ok {\n\t\tif err := statIo(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ cpu (since kernel 4.15)\n\tif _, ok := m.controllers[\"cpu\"]; ok {\n\t\tif err := statCpu(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ hugetlb (since kernel 5.6)\n\tif _, ok := m.controllers[\"hugetlb\"]; ok {\n\t\tif err := statHugeTlb(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\tif len(errs) > 0 && !m.rootless {\n\t\treturn st, errors.Errorf(\"error while statting cgroup v2: %+v\", errs)\n\t}\n\treturn st, nil\n}\n\nfunc (m *manager) Freeze(state configs.FreezerState) error {\n\tif err := setFreezer(m.dirPath, state); err != nil {\n\t\treturn err\n\t}\n\tm.config.Resources.Freezer = state\n\treturn nil\n}\n\nfunc (m *manager) Destroy() error {\n\treturn os.RemoveAll(m.dirPath)\n}\n\n\/\/ GetPaths is for compatibility purpose and should be removed in future\nfunc (m *manager) GetPaths() map[string]string {\n\tpaths := map[string]string{\n\t\t\/\/ pseudo-controller for compatibility\n\t\t\"devices\": m.dirPath,\n\t\t\"freezer\": m.dirPath,\n\t}\n\tfor c := range m.controllers {\n\t\tpaths[c] = m.dirPath\n\t}\n\treturn paths\n}\n\nfunc (m *manager) GetUnifiedPath() (string, error) {\n\treturn m.dirPath, nil\n}\n\nfunc (m *manager) Set(container *configs.Config) error {\n\tif container == nil || container.Cgroups == nil {\n\t\treturn nil\n\t}\n\tvar errs []error\n\t\/\/ pids (since kernel 4.5)\n\tif _, ok := m.controllers[\"pids\"]; ok {\n\t\tif err := setPids(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ memory (since kernel 4.5)\n\tif _, ok := m.controllers[\"memory\"]; ok {\n\t\tif err := setMemory(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ io (since kernel 4.5)\n\tif _, ok := m.controllers[\"io\"]; ok {\n\t\tif err := setIo(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ cpu (since kernel 4.15)\n\tif _, ok := m.controllers[\"cpu\"]; ok {\n\t\tif err := setCpu(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ devices (since kernel 4.15, pseudo-controller)\n\tif err := setDevices(m.dirPath, container.Cgroups); err != nil {\n\t\terrs = append(errs, err)\n\t}\n\t\/\/ cpuset (since kernel 5.0)\n\tif _, ok := m.controllers[\"cpuset\"]; ok {\n\t\tif err := setCpuset(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ hugetlb (since kernel 5.6)\n\tif _, ok := m.controllers[\"hugetlb\"]; ok {\n\t\tif err := setHugeTlb(m.dirPath, container.Cgroups); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ freezer (since kernel 5.2, pseudo-controller)\n\tif err := setFreezer(m.dirPath, container.Cgroups.Freezer); err != nil {\n\t\terrs = append(errs, err)\n\t}\n\tif len(errs) > 0 && !m.rootless {\n\t\treturn errors.Errorf(\"error while setting cgroup v2: %+v\", errs)\n\t}\n\tm.config = container.Cgroups\n\treturn nil\n}\n\nfunc (m *manager) GetCgroups() (*configs.Cgroup, error) {\n\treturn m.config, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package openshiftkubeapiserver\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"strings\"\n\n\tconfigv1 \"github.com\/openshift\/api\/config\/v1\"\n\tkubecontrolplanev1 \"github.com\/openshift\/api\/kubecontrolplane\/v1\"\n\t\"github.com\/openshift\/origin\/pkg\/cmd\/configflags\"\n\tconfigapi \"github.com\/openshift\/origin\/pkg\/cmd\/server\/apis\/config\"\n\tconfigapilatest \"github.com\/openshift\/origin\/pkg\/cmd\/server\/apis\/config\/latest\"\n\t\"github.com\/openshift\/origin\/pkg\/configconversion\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n)\n\nfunc ConfigToFlags(kubeAPIServerConfig *kubecontrolplanev1.KubeAPIServerConfig) ([]string, error) {\n\targs := unmaskArgs(kubeAPIServerConfig.APIServerArguments)\n\n\thost, portString, err := net.SplitHostPort(kubeAPIServerConfig.ServingInfo.BindAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ TODO this list (and the content below) will be used to drive a config struct and a reflective test matching config to flags\n\t\/\/ these flags are overridden by a patch\n\t\/\/ admission-control\n\t\/\/ authentication-token-webhook-cache-ttl\n\t\/\/ authentication-token-webhook-config-file\n\t\/\/ authorization-mode\n\t\/\/ authorization-policy-file\n\t\/\/ authorization-webhook-cache-authorized-ttl\n\t\/\/ authorization-webhook-cache-unauthorized-ttl\n\t\/\/ authorization-webhook-config-file\n\t\/\/ basic-auth-file\n\t\/\/ enable-aggregator-routing\n\t\/\/ enable-bootstrap-token-auth\n\t\/\/ oidc-client-id\n\t\/\/ oidc-groups-claim\n\t\/\/ oidc-groups-prefix\n\t\/\/ oidc-issuer-url\n\t\/\/ oidc-required-claim\n\t\/\/ oidc-signing-algs\n\t\/\/ oidc-username-claim\n\t\/\/ oidc-username-prefix\n\t\/\/ service-account-lookup\n\t\/\/ token-auth-file\n\n\t\/\/ alsologtostderr - don't know whether to change it\n\t\/\/ apiserver-count - ignored, hopefully we don't have to fix via patch\n\t\/\/ cert-dir - ignored because we set certs\n\n\t\/\/ these flags were never supported via config\n\t\/\/ cloud-config\n\t\/\/ cloud-provider\n\t\/\/ cloud-provider-gce-lb-src-cidrs\n\t\/\/ contention-profiling\n\t\/\/ default-not-ready-toleration-seconds\n\t\/\/ default-unreachable-toleration-seconds\n\t\/\/ default-watch-cache-size\n\t\/\/ delete-collection-workers\n\t\/\/ deserialization-cache-size\n\t\/\/ enable-garbage-collector\n\t\/\/ etcd-compaction-interval\n\t\/\/ etcd-count-metric-poll-period\n\t\/\/ etcd-servers-overrides\n\t\/\/ experimental-encryption-provider-config\n\t\/\/ feature-gates\n\t\/\/ http2-max-streams-per-connection\n\t\/\/ insecure-bind-address\n\t\/\/ kubelet-timeout\n\t\/\/ log-backtrace-at\n\t\/\/ log-dir\n\t\/\/ log-flush-frequency\n\t\/\/ logtostderr\n\t\/\/ master-service-namespace\n\t\/\/ max-connection-bytes-per-sec\n\t\/\/ profiling\n\t\/\/ request-timeout\n\t\/\/ runtime-config\n\t\/\/ service-account-api-audiences\n\t\/\/ service-account-issuer\n\t\/\/ service-account-key-file\n\t\/\/ service-account-max-token-expiration\n\t\/\/ service-account-signing-key-file\n\t\/\/ stderrthreshold\n\t\/\/ storage-versions\n\t\/\/ target-ram-mb\n\t\/\/ v\n\t\/\/ version\n\t\/\/ vmodule\n\t\/\/ watch-cache\n\t\/\/ watch-cache-sizes\n\n\t\/\/ TODO, we need to set these in order to enable the right admission plugins in each of the servers\n\t\/\/ TODO this is needed for a viable cluster up\n\tadmissionFlags, err := admissionFlags(kubeAPIServerConfig.AdmissionPluginConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor flag, value := range admissionFlags {\n\t\tconfigflags.SetIfUnset(args, flag, value...)\n\t}\n\tconfigflags.SetIfUnset(args, \"allow-privileged\", \"true\")\n\tconfigflags.SetIfUnset(args, \"anonymous-auth\", \"false\")\n\tconfigflags.SetIfUnset(args, \"authorization-mode\", \"RBAC\", \"Node\") \/\/ overridden later, but this runs the poststarthook for bootstrapping RBAC\n\tfor flag, value := range configflags.AuditFlags(&kubeAPIServerConfig.AuditConfig, configflags.ArgsWithPrefix(args, \"audit-\")) {\n\t\tconfigflags.SetIfUnset(args, flag, value...)\n\t}\n\tconfigflags.SetIfUnset(args, \"bind-address\", host)\n\tconfigflags.SetIfUnset(args, \"client-ca-file\", kubeAPIServerConfig.ServingInfo.ClientCA)\n\tconfigflags.SetIfUnset(args, \"cors-allowed-origins\", kubeAPIServerConfig.CORSAllowedOrigins...)\n\tconfigflags.SetIfUnset(args, \"enable-logs-handler\", \"false\")\n\tconfigflags.SetIfUnset(args, \"enable-swagger-ui\", \"true\")\n\tconfigflags.SetIfUnset(args, \"endpoint-reconciler-type\", \"lease\")\n\tconfigflags.SetIfUnset(args, \"etcd-cafile\", kubeAPIServerConfig.StorageConfig.CA)\n\tconfigflags.SetIfUnset(args, \"etcd-certfile\", kubeAPIServerConfig.StorageConfig.CertFile)\n\tconfigflags.SetIfUnset(args, \"etcd-keyfile\", kubeAPIServerConfig.StorageConfig.KeyFile)\n\tconfigflags.SetIfUnset(args, \"etcd-prefix\", kubeAPIServerConfig.StorageConfig.StoragePrefix)\n\tconfigflags.SetIfUnset(args, \"etcd-servers\", kubeAPIServerConfig.StorageConfig.URLs...)\n\tconfigflags.SetIfUnset(args, \"insecure-port\", \"0\")\n\tconfigflags.SetIfUnset(args, \"kubelet-certificate-authority\", kubeAPIServerConfig.KubeletClientInfo.CA)\n\tconfigflags.SetIfUnset(args, \"kubelet-client-certificate\", kubeAPIServerConfig.KubeletClientInfo.CertFile)\n\tconfigflags.SetIfUnset(args, \"kubelet-client-key\", kubeAPIServerConfig.KubeletClientInfo.KeyFile)\n\tconfigflags.SetIfUnset(args, \"kubelet-https\", \"true\")\n\tconfigflags.SetIfUnset(args, \"kubelet-preferred-address-types\", \"Hostname\", \"InternalIP\", \"ExternalIP\")\n\tconfigflags.SetIfUnset(args, \"kubelet-read-only-port\", \"0\")\n\tconfigflags.SetIfUnset(args, \"kubernetes-service-node-port\", \"0\")\n\tconfigflags.SetIfUnset(args, \"max-mutating-requests-inflight\", fmt.Sprintf(\"%d\", kubeAPIServerConfig.ServingInfo.MaxRequestsInFlight\/2))\n\tconfigflags.SetIfUnset(args, \"max-requests-inflight\", fmt.Sprintf(\"%d\", kubeAPIServerConfig.ServingInfo.MaxRequestsInFlight))\n\tconfigflags.SetIfUnset(args, \"min-request-timeout\", fmt.Sprintf(\"%d\", kubeAPIServerConfig.ServingInfo.RequestTimeoutSeconds))\n\tconfigflags.SetIfUnset(args, \"proxy-client-cert-file\", kubeAPIServerConfig.AggregatorConfig.ProxyClientInfo.CertFile)\n\tconfigflags.SetIfUnset(args, \"proxy-client-key-file\", kubeAPIServerConfig.AggregatorConfig.ProxyClientInfo.KeyFile)\n\tconfigflags.SetIfUnset(args, \"requestheader-allowed-names\", kubeAPIServerConfig.AuthConfig.RequestHeader.ClientCommonNames...)\n\tconfigflags.SetIfUnset(args, \"requestheader-client-ca-file\", kubeAPIServerConfig.AuthConfig.RequestHeader.ClientCA)\n\tconfigflags.SetIfUnset(args, \"requestheader-extra-headers-prefix\", kubeAPIServerConfig.AuthConfig.RequestHeader.ExtraHeaderPrefixes...)\n\tconfigflags.SetIfUnset(args, \"requestheader-group-headers\", kubeAPIServerConfig.AuthConfig.RequestHeader.GroupHeaders...)\n\tconfigflags.SetIfUnset(args, \"requestheader-username-headers\", kubeAPIServerConfig.AuthConfig.RequestHeader.UsernameHeaders...)\n\tconfigflags.SetIfUnset(args, \"secure-port\", portString)\n\tconfigflags.SetIfUnset(args, \"service-cluster-ip-range\", kubeAPIServerConfig.ServicesSubnet)\n\tconfigflags.SetIfUnset(args, \"service-node-port-range\", kubeAPIServerConfig.ServicesNodePortRange)\n\tconfigflags.SetIfUnset(args, \"storage-backend\", \"etcd3\")\n\tconfigflags.SetIfUnset(args, \"storage-media-type\", \"application\/vnd.kubernetes.protobuf\")\n\tconfigflags.SetIfUnset(args, \"tls-cert-file\", kubeAPIServerConfig.ServingInfo.CertFile)\n\tconfigflags.SetIfUnset(args, \"tls-cipher-suites\", kubeAPIServerConfig.ServingInfo.CipherSuites...)\n\tconfigflags.SetIfUnset(args, \"tls-min-version\", kubeAPIServerConfig.ServingInfo.MinTLSVersion)\n\tconfigflags.SetIfUnset(args, \"tls-private-key-file\", kubeAPIServerConfig.ServingInfo.KeyFile)\n\tconfigflags.SetIfUnset(args, \"tls-sni-cert-key\", sniCertKeys(kubeAPIServerConfig.ServingInfo.NamedCertificates)...)\n\tconfigflags.SetIfUnset(args, \"secure-port\", portString)\n\n\treturn configflags.ToFlagSlice(args), nil\n}\n\nfunc admissionFlags(admissionPluginConfig map[string]configv1.AdmissionPluginConfig) (map[string][]string, error) {\n\targs := map[string][]string{}\n\n\tforceOn := []string{}\n\tforceOff := []string{}\n\tpluginConfig := map[string]configv1.AdmissionPluginConfig{}\n\tfor pluginName, origConfig := range admissionPluginConfig {\n\t\tconfig := *origConfig.DeepCopy()\n\t\tif len(config.Location) > 0 {\n\t\t\tcontent, err := ioutil.ReadFile(config.Location)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t\/\/ if the config isn't a DefaultAdmissionConfig, then assume we're enabled (we were called out after all)\n\t\t\t\/\/ if the config *is* a DefaultAdmissionConfig and it explicitly said to disable us, we are disabled\n\t\t\tobj, err := configapilatest.ReadYAML(bytes.NewBuffer(content))\n\t\t\t\/\/ if we can't read it, let the plugin deal with it\n\t\t\t\/\/ if nothing was there, let the plugin deal with it\n\t\t\tif err != nil || obj == nil {\n\t\t\t\tforceOn = append(forceOn, pluginName)\n\t\t\t\tconfig.Location = \"\"\n\t\t\t\tconfig.Configuration = runtime.RawExtension{Raw: content}\n\t\t\t\tpluginConfig[pluginName] = config\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif defaultConfig, ok := obj.(*configapi.DefaultAdmissionConfig); !ok {\n\t\t\t\tforceOn = append(forceOn, pluginName)\n\t\t\t\tconfig.Location = \"\"\n\t\t\t\tconfig.Configuration = runtime.RawExtension{Raw: content}\n\t\t\t\tpluginConfig[pluginName] = config\n\n\t\t\t} else if defaultConfig.Disable {\n\t\t\t\tforceOff = append(forceOff, pluginName)\n\n\t\t\t} else {\n\t\t\t\tforceOn = append(forceOn, pluginName)\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ if it wasn't a DefaultAdmissionConfig object, let the plugin deal with it\n\t\tcurrConfig := &configapi.DefaultAdmissionConfig{}\n\t\tuncastDefaultConfig, _, decodingErr := configapilatest.Codec.Decode(config.Configuration.Raw, nil, currConfig)\n\t\tif decodingErr != nil {\n\t\t\tforceOn = append(forceOn, pluginName)\n\t\t\tpluginConfig[pluginName] = config\n\t\t\tcontinue\n\t\t}\n\n\t\tdefaultConfig, ok := uncastDefaultConfig.(*configapi.DefaultAdmissionConfig)\n\t\tif !ok {\n\t\t\tforceOn = append(forceOn, pluginName)\n\t\t\tpluginConfig[pluginName] = config\n\n\t\t} else if defaultConfig.Disable {\n\t\t\tforceOff = append(forceOff, pluginName)\n\n\t\t} else {\n\t\t\tforceOn = append(forceOn, pluginName)\n\t\t}\n\n\t}\n\tupstreamAdmissionConfig, err := configconversion.ConvertOpenshiftAdmissionConfigToKubeAdmissionConfig(pluginConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconfigBytes, err := configapilatest.WriteYAML(upstreamAdmissionConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttempFile, err := ioutil.TempFile(\"\", \"kubeapiserver-admission-config.yaml\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif _, err := tempFile.Write(configBytes); err != nil {\n\t\treturn nil, err\n\t}\n\ttempFile.Close()\n\n\tconfigflags.SetIfUnset(args, \"admission-control-config-file\", tempFile.Name())\n\tconfigflags.SetIfUnset(args, \"disable-admission-plugins\", forceOff...)\n\tconfigflags.SetIfUnset(args, \"enable-admission-plugins\", forceOn...)\n\n\treturn args, nil\n}\n\nfunc sniCertKeys(namedCertificates []configv1.NamedCertificate) []string {\n\targs := []string{}\n\tfor _, nc := range namedCertificates {\n\t\tnames := \"\"\n\t\tif len(nc.Names) > 0 {\n\t\t\tnames = \":\" + strings.Join(nc.Names, \",\")\n\t\t}\n\t\targs = append(args, fmt.Sprintf(\"%s,%s%s\", nc.CertFile, nc.KeyFile, names))\n\t}\n\treturn args\n}\n\nfunc unmaskArgs(args map[string]kubecontrolplanev1.Arguments) map[string][]string {\n\tret := map[string][]string{}\n\tfor key, slice := range args {\n\t\tfor _, val := range slice {\n\t\t\tret[key] = append(ret[key], val)\n\t\t}\n\t}\n\treturn ret\n}\n<commit_msg>keep events for three hours<commit_after>package openshiftkubeapiserver\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"strings\"\n\n\tconfigv1 \"github.com\/openshift\/api\/config\/v1\"\n\tkubecontrolplanev1 \"github.com\/openshift\/api\/kubecontrolplane\/v1\"\n\t\"github.com\/openshift\/origin\/pkg\/cmd\/configflags\"\n\tconfigapi \"github.com\/openshift\/origin\/pkg\/cmd\/server\/apis\/config\"\n\tconfigapilatest \"github.com\/openshift\/origin\/pkg\/cmd\/server\/apis\/config\/latest\"\n\t\"github.com\/openshift\/origin\/pkg\/configconversion\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n)\n\nfunc ConfigToFlags(kubeAPIServerConfig *kubecontrolplanev1.KubeAPIServerConfig) ([]string, error) {\n\targs := unmaskArgs(kubeAPIServerConfig.APIServerArguments)\n\n\thost, portString, err := net.SplitHostPort(kubeAPIServerConfig.ServingInfo.BindAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ TODO this list (and the content below) will be used to drive a config struct and a reflective test matching config to flags\n\t\/\/ these flags are overridden by a patch\n\t\/\/ admission-control\n\t\/\/ authentication-token-webhook-cache-ttl\n\t\/\/ authentication-token-webhook-config-file\n\t\/\/ authorization-mode\n\t\/\/ authorization-policy-file\n\t\/\/ authorization-webhook-cache-authorized-ttl\n\t\/\/ authorization-webhook-cache-unauthorized-ttl\n\t\/\/ authorization-webhook-config-file\n\t\/\/ basic-auth-file\n\t\/\/ enable-aggregator-routing\n\t\/\/ enable-bootstrap-token-auth\n\t\/\/ oidc-client-id\n\t\/\/ oidc-groups-claim\n\t\/\/ oidc-groups-prefix\n\t\/\/ oidc-issuer-url\n\t\/\/ oidc-required-claim\n\t\/\/ oidc-signing-algs\n\t\/\/ oidc-username-claim\n\t\/\/ oidc-username-prefix\n\t\/\/ service-account-lookup\n\t\/\/ token-auth-file\n\n\t\/\/ alsologtostderr - don't know whether to change it\n\t\/\/ apiserver-count - ignored, hopefully we don't have to fix via patch\n\t\/\/ cert-dir - ignored because we set certs\n\n\t\/\/ these flags were never supported via config\n\t\/\/ cloud-config\n\t\/\/ cloud-provider\n\t\/\/ cloud-provider-gce-lb-src-cidrs\n\t\/\/ contention-profiling\n\t\/\/ default-not-ready-toleration-seconds\n\t\/\/ default-unreachable-toleration-seconds\n\t\/\/ default-watch-cache-size\n\t\/\/ delete-collection-workers\n\t\/\/ deserialization-cache-size\n\t\/\/ enable-garbage-collector\n\t\/\/ etcd-compaction-interval\n\t\/\/ etcd-count-metric-poll-period\n\t\/\/ etcd-servers-overrides\n\t\/\/ experimental-encryption-provider-config\n\t\/\/ feature-gates\n\t\/\/ http2-max-streams-per-connection\n\t\/\/ insecure-bind-address\n\t\/\/ kubelet-timeout\n\t\/\/ log-backtrace-at\n\t\/\/ log-dir\n\t\/\/ log-flush-frequency\n\t\/\/ logtostderr\n\t\/\/ master-service-namespace\n\t\/\/ max-connection-bytes-per-sec\n\t\/\/ profiling\n\t\/\/ request-timeout\n\t\/\/ runtime-config\n\t\/\/ service-account-api-audiences\n\t\/\/ service-account-issuer\n\t\/\/ service-account-key-file\n\t\/\/ service-account-max-token-expiration\n\t\/\/ service-account-signing-key-file\n\t\/\/ stderrthreshold\n\t\/\/ storage-versions\n\t\/\/ target-ram-mb\n\t\/\/ v\n\t\/\/ version\n\t\/\/ vmodule\n\t\/\/ watch-cache\n\t\/\/ watch-cache-sizes\n\n\t\/\/ TODO, we need to set these in order to enable the right admission plugins in each of the servers\n\t\/\/ TODO this is needed for a viable cluster up\n\tadmissionFlags, err := admissionFlags(kubeAPIServerConfig.AdmissionPluginConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor flag, value := range admissionFlags {\n\t\tconfigflags.SetIfUnset(args, flag, value...)\n\t}\n\tconfigflags.SetIfUnset(args, \"allow-privileged\", \"true\")\n\tconfigflags.SetIfUnset(args, \"anonymous-auth\", \"false\")\n\tconfigflags.SetIfUnset(args, \"authorization-mode\", \"RBAC\", \"Node\") \/\/ overridden later, but this runs the poststarthook for bootstrapping RBAC\n\tfor flag, value := range configflags.AuditFlags(&kubeAPIServerConfig.AuditConfig, configflags.ArgsWithPrefix(args, \"audit-\")) {\n\t\tconfigflags.SetIfUnset(args, flag, value...)\n\t}\n\tconfigflags.SetIfUnset(args, \"bind-address\", host)\n\tconfigflags.SetIfUnset(args, \"client-ca-file\", kubeAPIServerConfig.ServingInfo.ClientCA)\n\tconfigflags.SetIfUnset(args, \"cors-allowed-origins\", kubeAPIServerConfig.CORSAllowedOrigins...)\n\tconfigflags.SetIfUnset(args, \"enable-logs-handler\", \"false\")\n\tconfigflags.SetIfUnset(args, \"enable-swagger-ui\", \"true\")\n\tconfigflags.SetIfUnset(args, \"endpoint-reconciler-type\", \"lease\")\n\tconfigflags.SetIfUnset(args, \"etcd-cafile\", kubeAPIServerConfig.StorageConfig.CA)\n\tconfigflags.SetIfUnset(args, \"etcd-certfile\", kubeAPIServerConfig.StorageConfig.CertFile)\n\tconfigflags.SetIfUnset(args, \"etcd-keyfile\", kubeAPIServerConfig.StorageConfig.KeyFile)\n\tconfigflags.SetIfUnset(args, \"etcd-prefix\", kubeAPIServerConfig.StorageConfig.StoragePrefix)\n\tconfigflags.SetIfUnset(args, \"etcd-servers\", kubeAPIServerConfig.StorageConfig.URLs...)\n\tconfigflags.SetIfUnset(args, \"event-ttl\", \"3h\") \/\/ set a TTL long enough to last for our CI tests so we see the first set of events.\n\tconfigflags.SetIfUnset(args, \"insecure-port\", \"0\")\n\tconfigflags.SetIfUnset(args, \"kubelet-certificate-authority\", kubeAPIServerConfig.KubeletClientInfo.CA)\n\tconfigflags.SetIfUnset(args, \"kubelet-client-certificate\", kubeAPIServerConfig.KubeletClientInfo.CertFile)\n\tconfigflags.SetIfUnset(args, \"kubelet-client-key\", kubeAPIServerConfig.KubeletClientInfo.KeyFile)\n\tconfigflags.SetIfUnset(args, \"kubelet-https\", \"true\")\n\tconfigflags.SetIfUnset(args, \"kubelet-preferred-address-types\", \"Hostname\", \"InternalIP\", \"ExternalIP\")\n\tconfigflags.SetIfUnset(args, \"kubelet-read-only-port\", \"0\")\n\tconfigflags.SetIfUnset(args, \"kubernetes-service-node-port\", \"0\")\n\tconfigflags.SetIfUnset(args, \"max-mutating-requests-inflight\", fmt.Sprintf(\"%d\", kubeAPIServerConfig.ServingInfo.MaxRequestsInFlight\/2))\n\tconfigflags.SetIfUnset(args, \"max-requests-inflight\", fmt.Sprintf(\"%d\", kubeAPIServerConfig.ServingInfo.MaxRequestsInFlight))\n\tconfigflags.SetIfUnset(args, \"min-request-timeout\", fmt.Sprintf(\"%d\", kubeAPIServerConfig.ServingInfo.RequestTimeoutSeconds))\n\tconfigflags.SetIfUnset(args, \"proxy-client-cert-file\", kubeAPIServerConfig.AggregatorConfig.ProxyClientInfo.CertFile)\n\tconfigflags.SetIfUnset(args, \"proxy-client-key-file\", kubeAPIServerConfig.AggregatorConfig.ProxyClientInfo.KeyFile)\n\tconfigflags.SetIfUnset(args, \"requestheader-allowed-names\", kubeAPIServerConfig.AuthConfig.RequestHeader.ClientCommonNames...)\n\tconfigflags.SetIfUnset(args, \"requestheader-client-ca-file\", kubeAPIServerConfig.AuthConfig.RequestHeader.ClientCA)\n\tconfigflags.SetIfUnset(args, \"requestheader-extra-headers-prefix\", kubeAPIServerConfig.AuthConfig.RequestHeader.ExtraHeaderPrefixes...)\n\tconfigflags.SetIfUnset(args, \"requestheader-group-headers\", kubeAPIServerConfig.AuthConfig.RequestHeader.GroupHeaders...)\n\tconfigflags.SetIfUnset(args, \"requestheader-username-headers\", kubeAPIServerConfig.AuthConfig.RequestHeader.UsernameHeaders...)\n\tconfigflags.SetIfUnset(args, \"secure-port\", portString)\n\tconfigflags.SetIfUnset(args, \"service-cluster-ip-range\", kubeAPIServerConfig.ServicesSubnet)\n\tconfigflags.SetIfUnset(args, \"service-node-port-range\", kubeAPIServerConfig.ServicesNodePortRange)\n\tconfigflags.SetIfUnset(args, \"storage-backend\", \"etcd3\")\n\tconfigflags.SetIfUnset(args, \"storage-media-type\", \"application\/vnd.kubernetes.protobuf\")\n\tconfigflags.SetIfUnset(args, \"tls-cert-file\", kubeAPIServerConfig.ServingInfo.CertFile)\n\tconfigflags.SetIfUnset(args, \"tls-cipher-suites\", kubeAPIServerConfig.ServingInfo.CipherSuites...)\n\tconfigflags.SetIfUnset(args, \"tls-min-version\", kubeAPIServerConfig.ServingInfo.MinTLSVersion)\n\tconfigflags.SetIfUnset(args, \"tls-private-key-file\", kubeAPIServerConfig.ServingInfo.KeyFile)\n\tconfigflags.SetIfUnset(args, \"tls-sni-cert-key\", sniCertKeys(kubeAPIServerConfig.ServingInfo.NamedCertificates)...)\n\tconfigflags.SetIfUnset(args, \"secure-port\", portString)\n\n\treturn configflags.ToFlagSlice(args), nil\n}\n\nfunc admissionFlags(admissionPluginConfig map[string]configv1.AdmissionPluginConfig) (map[string][]string, error) {\n\targs := map[string][]string{}\n\n\tforceOn := []string{}\n\tforceOff := []string{}\n\tpluginConfig := map[string]configv1.AdmissionPluginConfig{}\n\tfor pluginName, origConfig := range admissionPluginConfig {\n\t\tconfig := *origConfig.DeepCopy()\n\t\tif len(config.Location) > 0 {\n\t\t\tcontent, err := ioutil.ReadFile(config.Location)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t\/\/ if the config isn't a DefaultAdmissionConfig, then assume we're enabled (we were called out after all)\n\t\t\t\/\/ if the config *is* a DefaultAdmissionConfig and it explicitly said to disable us, we are disabled\n\t\t\tobj, err := configapilatest.ReadYAML(bytes.NewBuffer(content))\n\t\t\t\/\/ if we can't read it, let the plugin deal with it\n\t\t\t\/\/ if nothing was there, let the plugin deal with it\n\t\t\tif err != nil || obj == nil {\n\t\t\t\tforceOn = append(forceOn, pluginName)\n\t\t\t\tconfig.Location = \"\"\n\t\t\t\tconfig.Configuration = runtime.RawExtension{Raw: content}\n\t\t\t\tpluginConfig[pluginName] = config\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif defaultConfig, ok := obj.(*configapi.DefaultAdmissionConfig); !ok {\n\t\t\t\tforceOn = append(forceOn, pluginName)\n\t\t\t\tconfig.Location = \"\"\n\t\t\t\tconfig.Configuration = runtime.RawExtension{Raw: content}\n\t\t\t\tpluginConfig[pluginName] = config\n\n\t\t\t} else if defaultConfig.Disable {\n\t\t\t\tforceOff = append(forceOff, pluginName)\n\n\t\t\t} else {\n\t\t\t\tforceOn = append(forceOn, pluginName)\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ if it wasn't a DefaultAdmissionConfig object, let the plugin deal with it\n\t\tcurrConfig := &configapi.DefaultAdmissionConfig{}\n\t\tuncastDefaultConfig, _, decodingErr := configapilatest.Codec.Decode(config.Configuration.Raw, nil, currConfig)\n\t\tif decodingErr != nil {\n\t\t\tforceOn = append(forceOn, pluginName)\n\t\t\tpluginConfig[pluginName] = config\n\t\t\tcontinue\n\t\t}\n\n\t\tdefaultConfig, ok := uncastDefaultConfig.(*configapi.DefaultAdmissionConfig)\n\t\tif !ok {\n\t\t\tforceOn = append(forceOn, pluginName)\n\t\t\tpluginConfig[pluginName] = config\n\n\t\t} else if defaultConfig.Disable {\n\t\t\tforceOff = append(forceOff, pluginName)\n\n\t\t} else {\n\t\t\tforceOn = append(forceOn, pluginName)\n\t\t}\n\n\t}\n\tupstreamAdmissionConfig, err := configconversion.ConvertOpenshiftAdmissionConfigToKubeAdmissionConfig(pluginConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconfigBytes, err := configapilatest.WriteYAML(upstreamAdmissionConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttempFile, err := ioutil.TempFile(\"\", \"kubeapiserver-admission-config.yaml\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif _, err := tempFile.Write(configBytes); err != nil {\n\t\treturn nil, err\n\t}\n\ttempFile.Close()\n\n\tconfigflags.SetIfUnset(args, \"admission-control-config-file\", tempFile.Name())\n\tconfigflags.SetIfUnset(args, \"disable-admission-plugins\", forceOff...)\n\tconfigflags.SetIfUnset(args, \"enable-admission-plugins\", forceOn...)\n\n\treturn args, nil\n}\n\nfunc sniCertKeys(namedCertificates []configv1.NamedCertificate) []string {\n\targs := []string{}\n\tfor _, nc := range namedCertificates {\n\t\tnames := \"\"\n\t\tif len(nc.Names) > 0 {\n\t\t\tnames = \":\" + strings.Join(nc.Names, \",\")\n\t\t}\n\t\targs = append(args, fmt.Sprintf(\"%s,%s%s\", nc.CertFile, nc.KeyFile, names))\n\t}\n\treturn args\n}\n\nfunc unmaskArgs(args map[string]kubecontrolplanev1.Arguments) map[string][]string {\n\tret := map[string][]string{}\n\tfor key, slice := range args {\n\t\tfor _, val := range slice {\n\t\t\tret[key] = append(ret[key], val)\n\t\t}\n\t}\n\treturn ret\n}\n<|endoftext|>"} {"text":"<commit_before>package openshiftkubeapiserver\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"sort\"\n\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\n\tconfigv1 \"github.com\/openshift\/api\/config\/v1\"\n\tkubecontrolplanev1 \"github.com\/openshift\/api\/kubecontrolplane\/v1\"\n\tconfigapi \"github.com\/openshift\/origin\/pkg\/cmd\/server\/apis\/config\"\n\tconfigapilatest \"github.com\/openshift\/origin\/pkg\/cmd\/server\/apis\/config\/latest\"\n\t\"github.com\/openshift\/origin\/pkg\/configconversion\"\n)\n\nfunc ConfigToFlags(kubeAPIServerConfig *kubecontrolplanev1.KubeAPIServerConfig) ([]string, error) {\n\targs := map[string][]string{}\n\tfor key, slice := range kubeAPIServerConfig.APIServerArguments {\n\t\tfor _, val := range slice {\n\t\t\targs[key] = append(args[key], val)\n\t\t}\n\t}\n\n\thost, portString, err := net.SplitHostPort(kubeAPIServerConfig.ServingInfo.BindAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ TODO this list (and the content below) will be used to drive a config struct and a reflective test matching config to flags\n\t\/\/ these flags are overridden by a patch\n\t\/\/ admission-control\n\t\/\/ authentication-token-webhook-cache-ttl\n\t\/\/ authentication-token-webhook-config-file\n\t\/\/ authorization-mode\n\t\/\/ authorization-policy-file\n\t\/\/ authorization-webhook-cache-authorized-ttl\n\t\/\/ authorization-webhook-cache-unauthorized-ttl\n\t\/\/ authorization-webhook-config-file\n\t\/\/ basic-auth-file\n\t\/\/ enable-aggregator-routing\n\t\/\/ enable-bootstrap-token-auth\n\t\/\/ oidc-client-id\n\t\/\/ oidc-groups-claim\n\t\/\/ oidc-groups-prefix\n\t\/\/ oidc-issuer-url\n\t\/\/ oidc-required-claim\n\t\/\/ oidc-signing-algs\n\t\/\/ oidc-username-claim\n\t\/\/ oidc-username-prefix\n\t\/\/ service-account-lookup\n\t\/\/ token-auth-file\n\n\t\/\/ alsologtostderr - don't know whether to change it\n\t\/\/ apiserver-count - ignored, hopefully we don't have to fix via patch\n\t\/\/ cert-dir - ignored because we set certs\n\n\t\/\/ these flags were never supported via config\n\t\/\/ cloud-config\n\t\/\/ cloud-provider\n\t\/\/ cloud-provider-gce-lb-src-cidrs\n\t\/\/ contention-profiling\n\t\/\/ default-not-ready-toleration-seconds\n\t\/\/ default-unreachable-toleration-seconds\n\t\/\/ default-watch-cache-size\n\t\/\/ delete-collection-workers\n\t\/\/ deserialization-cache-size\n\t\/\/ enable-garbage-collector\n\t\/\/ etcd-compaction-interval\n\t\/\/ etcd-count-metric-poll-period\n\t\/\/ etcd-servers-overrides\n\t\/\/ experimental-encryption-provider-config\n\t\/\/ feature-gates\n\t\/\/ http2-max-streams-per-connection\n\t\/\/ insecure-bind-address\n\t\/\/ kubelet-timeout\n\t\/\/ log-backtrace-at\n\t\/\/ log-dir\n\t\/\/ log-flush-frequency\n\t\/\/ logtostderr\n\t\/\/ master-service-namespace\n\t\/\/ max-connection-bytes-per-sec\n\t\/\/ profiling\n\t\/\/ request-timeout\n\t\/\/ runtime-config\n\t\/\/ service-account-api-audiences\n\t\/\/ service-account-issuer\n\t\/\/ service-account-key-file\n\t\/\/ service-account-max-token-expiration\n\t\/\/ service-account-signing-key-file\n\t\/\/ stderrthreshold\n\t\/\/ storage-versions\n\t\/\/ target-ram-mb\n\t\/\/ v\n\t\/\/ version\n\t\/\/ vmodule\n\t\/\/ watch-cache\n\t\/\/ watch-cache-sizes\n\n\t\/\/ TODO, we need to set these in order to enable the right admission plugins in each of the servers\n\t\/\/ TODO this is needed for a viable cluster up\n\tadmissionFlags, err := admissionFlags(kubeAPIServerConfig.AdmissionPluginConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor flag, value := range admissionFlags {\n\t\tsetIfUnset(args, flag, value...)\n\t}\n\tsetIfUnset(args, \"allow-privileged\", \"true\")\n\tsetIfUnset(args, \"anonymous-auth\", \"false\")\n\tsetIfUnset(args, \"authorization-mode\", \"RBAC\", \"Node\") \/\/ overridden later, but this runs the poststarthook for bootstrapping RBAC\n\tfor flag, value := range auditFlags(kubeAPIServerConfig) {\n\t\tsetIfUnset(args, flag, value...)\n\t}\n\tsetIfUnset(args, \"bind-address\", host)\n\tsetIfUnset(args, \"client-ca-file\", kubeAPIServerConfig.ServingInfo.ClientCA)\n\tsetIfUnset(args, \"cors-allowed-origins\", kubeAPIServerConfig.CORSAllowedOrigins...)\n\tsetIfUnset(args, \"enable-logs-handler\", \"false\")\n\tsetIfUnset(args, \"enable-swagger-ui\", \"true\")\n\tsetIfUnset(args, \"endpoint-reconciler-type\", \"lease\")\n\tsetIfUnset(args, \"etcd-cafile\", kubeAPIServerConfig.StorageConfig.CA)\n\tsetIfUnset(args, \"etcd-certfile\", kubeAPIServerConfig.StorageConfig.CertFile)\n\tsetIfUnset(args, \"etcd-keyfile\", kubeAPIServerConfig.StorageConfig.KeyFile)\n\tsetIfUnset(args, \"etcd-prefix\", kubeAPIServerConfig.StorageConfig.StoragePrefix)\n\tsetIfUnset(args, \"etcd-servers\", kubeAPIServerConfig.StorageConfig.URLs...)\n\tsetIfUnset(args, \"insecure-port\", \"0\")\n\tsetIfUnset(args, \"kubelet-certificate-authority\", kubeAPIServerConfig.KubeletClientInfo.CA)\n\tsetIfUnset(args, \"kubelet-client-certificate\", kubeAPIServerConfig.KubeletClientInfo.CertFile)\n\tsetIfUnset(args, \"kubelet-client-key\", kubeAPIServerConfig.KubeletClientInfo.KeyFile)\n\tsetIfUnset(args, \"kubelet-https\", \"true\")\n\tsetIfUnset(args, \"kubelet-preferred-address-types\", \"Hostname\", \"InternalIP\", \"ExternalIP\")\n\tsetIfUnset(args, \"kubelet-read-only-port\", \"0\")\n\tsetIfUnset(args, \"kubernetes-service-node-port\", \"0\")\n\tsetIfUnset(args, \"max-mutating-requests-inflight\", fmt.Sprintf(\"%d\", kubeAPIServerConfig.ServingInfo.MaxRequestsInFlight\/2))\n\tsetIfUnset(args, \"max-requests-inflight\", fmt.Sprintf(\"%d\", kubeAPIServerConfig.ServingInfo.MaxRequestsInFlight))\n\tsetIfUnset(args, \"min-request-timeout\", fmt.Sprintf(\"%d\", kubeAPIServerConfig.ServingInfo.RequestTimeoutSeconds))\n\tsetIfUnset(args, \"proxy-client-cert-file\", kubeAPIServerConfig.AggregatorConfig.ProxyClientInfo.CertFile)\n\tsetIfUnset(args, \"proxy-client-key-file\", kubeAPIServerConfig.AggregatorConfig.ProxyClientInfo.KeyFile)\n\tsetIfUnset(args, \"requestheader-allowed-names\", kubeAPIServerConfig.AuthConfig.RequestHeader.ClientCommonNames...)\n\tsetIfUnset(args, \"requestheader-client-ca-file\", kubeAPIServerConfig.AuthConfig.RequestHeader.ClientCA)\n\tsetIfUnset(args, \"requestheader-extra-headers-prefix\", kubeAPIServerConfig.AuthConfig.RequestHeader.ExtraHeaderPrefixes...)\n\tsetIfUnset(args, \"requestheader-group-headers\", kubeAPIServerConfig.AuthConfig.RequestHeader.GroupHeaders...)\n\tsetIfUnset(args, \"requestheader-username-headers\", kubeAPIServerConfig.AuthConfig.RequestHeader.UsernameHeaders...)\n\tsetIfUnset(args, \"secure-port\", portString)\n\tsetIfUnset(args, \"service-cluster-ip-range\", kubeAPIServerConfig.ServicesSubnet)\n\tsetIfUnset(args, \"service-node-port-range\", kubeAPIServerConfig.ServicesNodePortRange)\n\tsetIfUnset(args, \"storage-backend\", \"etcd3\")\n\tsetIfUnset(args, \"storage-media-type\", \"application\/vnd.kubernetes.protobuf\")\n\tsetIfUnset(args, \"tls-cert-file\", kubeAPIServerConfig.ServingInfo.CertFile)\n\tsetIfUnset(args, \"tls-cipher-suites\", kubeAPIServerConfig.ServingInfo.CipherSuites...)\n\tsetIfUnset(args, \"tls-min-version\", kubeAPIServerConfig.ServingInfo.MinTLSVersion)\n\tsetIfUnset(args, \"tls-private-key-file\", kubeAPIServerConfig.ServingInfo.KeyFile)\n\t\/\/ TODO re-enable SNI for cluster up\n\t\/\/ tls-sni-cert-key\n\tsetIfUnset(args, \"secure-port\", portString)\n\n\tvar keys []string\n\tfor key := range args {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Strings(keys)\n\n\tvar arguments []string\n\tfor _, key := range keys {\n\t\tfor _, token := range args[key] {\n\t\t\targuments = append(arguments, fmt.Sprintf(\"--%s=%v\", key, token))\n\t\t}\n\t}\n\treturn arguments, nil\n}\n\n\/\/ currently for cluster up, audit is just broken.\n\/\/ TODO fix this\nfunc auditFlags(kubeAPIServerConfig *kubecontrolplanev1.KubeAPIServerConfig) map[string][]string {\n\targs := map[string][]string{}\n\tfor key, slice := range kubeAPIServerConfig.APIServerArguments {\n\t\tfor _, val := range slice {\n\t\t\targs[key] = append(args[key], val)\n\t\t}\n\t}\n\n\treturn args\n}\n\nfunc setIfUnset(cmdLineArgs map[string][]string, key string, value ...string) {\n\tif _, ok := cmdLineArgs[key]; !ok {\n\t\tcmdLineArgs[key] = value\n\t}\n}\n\nfunc admissionFlags(admissionPluginConfig map[string]configv1.AdmissionPluginConfig) (map[string][]string, error) {\n\targs := map[string][]string{}\n\n\tforceOn := []string{}\n\tforceOff := []string{}\n\tpluginConfig := map[string]configv1.AdmissionPluginConfig{}\n\tfor pluginName, origConfig := range admissionPluginConfig {\n\t\tconfig := *origConfig.DeepCopy()\n\t\tif len(config.Location) > 0 {\n\t\t\tcontent, err := ioutil.ReadFile(config.Location)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t\/\/ if the config isn't a DefaultAdmissionConfig, then assume we're enabled (we were called out after all)\n\t\t\t\/\/ if the config *is* a DefaultAdmissionConfig and it explicitly said to disable us, we are disabled\n\t\t\tobj, err := configapilatest.ReadYAML(bytes.NewBuffer(content))\n\t\t\t\/\/ if we can't read it, let the plugin deal with it\n\t\t\t\/\/ if nothing was there, let the plugin deal with it\n\t\t\tif err != nil || obj == nil {\n\t\t\t\tforceOn = append(forceOn, pluginName)\n\t\t\t\tconfig.Location = \"\"\n\t\t\t\tconfig.Configuration = runtime.RawExtension{Raw: content}\n\t\t\t\tpluginConfig[pluginName] = config\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif defaultConfig, ok := obj.(*configapi.DefaultAdmissionConfig); !ok {\n\t\t\t\tforceOn = append(forceOn, pluginName)\n\t\t\t\tconfig.Location = \"\"\n\t\t\t\tconfig.Configuration = runtime.RawExtension{Raw: content}\n\t\t\t\tpluginConfig[pluginName] = config\n\n\t\t\t} else if defaultConfig.Disable {\n\t\t\t\tforceOff = append(forceOff, pluginName)\n\n\t\t\t} else {\n\t\t\t\tforceOn = append(forceOn, pluginName)\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ if it wasn't a DefaultAdmissionConfig object, let the plugin deal with it\n\t\tcurrConfig := &configapi.DefaultAdmissionConfig{}\n\t\tuncastDefaultConfig, _, decodingErr := configapilatest.Codec.Decode(config.Configuration.Raw, nil, currConfig)\n\t\tif decodingErr != nil {\n\t\t\tforceOn = append(forceOn, pluginName)\n\t\t\tpluginConfig[pluginName] = config\n\t\t\tcontinue\n\t\t}\n\n\t\tdefaultConfig, ok := uncastDefaultConfig.(*configapi.DefaultAdmissionConfig)\n\t\tif !ok {\n\t\t\tforceOn = append(forceOn, pluginName)\n\t\t\tpluginConfig[pluginName] = config\n\n\t\t} else if defaultConfig.Disable {\n\t\t\tforceOff = append(forceOff, pluginName)\n\n\t\t} else {\n\t\t\tforceOn = append(forceOn, pluginName)\n\t\t}\n\n\t}\n\tupstreamAdmissionConfig, err := configconversion.ConvertOpenshiftAdmissionConfigToKubeAdmissionConfig(pluginConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconfigBytes, err := configapilatest.WriteYAML(upstreamAdmissionConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttempFile, err := ioutil.TempFile(\"\", \"kubeapiserver-admission-config.yaml\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif _, err := tempFile.Write(configBytes); err != nil {\n\t\treturn nil, err\n\t}\n\ttempFile.Close()\n\n\tsetIfUnset(args, \"admission-control-config-file\", tempFile.Name())\n\tsetIfUnset(args, \"disable-admission-plugins\", forceOff...)\n\tsetIfUnset(args, \"enable-admission-plugins\", forceOn...)\n\n\treturn args, nil\n}\n<commit_msg>Wire tls-sni-cert-key flag<commit_after>package openshiftkubeapiserver\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\n\tconfigv1 \"github.com\/openshift\/api\/config\/v1\"\n\tkubecontrolplanev1 \"github.com\/openshift\/api\/kubecontrolplane\/v1\"\n\tconfigapi \"github.com\/openshift\/origin\/pkg\/cmd\/server\/apis\/config\"\n\tconfigapilatest \"github.com\/openshift\/origin\/pkg\/cmd\/server\/apis\/config\/latest\"\n\t\"github.com\/openshift\/origin\/pkg\/configconversion\"\n)\n\nfunc ConfigToFlags(kubeAPIServerConfig *kubecontrolplanev1.KubeAPIServerConfig) ([]string, error) {\n\targs := map[string][]string{}\n\tfor key, slice := range kubeAPIServerConfig.APIServerArguments {\n\t\tfor _, val := range slice {\n\t\t\targs[key] = append(args[key], val)\n\t\t}\n\t}\n\n\thost, portString, err := net.SplitHostPort(kubeAPIServerConfig.ServingInfo.BindAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ TODO this list (and the content below) will be used to drive a config struct and a reflective test matching config to flags\n\t\/\/ these flags are overridden by a patch\n\t\/\/ admission-control\n\t\/\/ authentication-token-webhook-cache-ttl\n\t\/\/ authentication-token-webhook-config-file\n\t\/\/ authorization-mode\n\t\/\/ authorization-policy-file\n\t\/\/ authorization-webhook-cache-authorized-ttl\n\t\/\/ authorization-webhook-cache-unauthorized-ttl\n\t\/\/ authorization-webhook-config-file\n\t\/\/ basic-auth-file\n\t\/\/ enable-aggregator-routing\n\t\/\/ enable-bootstrap-token-auth\n\t\/\/ oidc-client-id\n\t\/\/ oidc-groups-claim\n\t\/\/ oidc-groups-prefix\n\t\/\/ oidc-issuer-url\n\t\/\/ oidc-required-claim\n\t\/\/ oidc-signing-algs\n\t\/\/ oidc-username-claim\n\t\/\/ oidc-username-prefix\n\t\/\/ service-account-lookup\n\t\/\/ token-auth-file\n\n\t\/\/ alsologtostderr - don't know whether to change it\n\t\/\/ apiserver-count - ignored, hopefully we don't have to fix via patch\n\t\/\/ cert-dir - ignored because we set certs\n\n\t\/\/ these flags were never supported via config\n\t\/\/ cloud-config\n\t\/\/ cloud-provider\n\t\/\/ cloud-provider-gce-lb-src-cidrs\n\t\/\/ contention-profiling\n\t\/\/ default-not-ready-toleration-seconds\n\t\/\/ default-unreachable-toleration-seconds\n\t\/\/ default-watch-cache-size\n\t\/\/ delete-collection-workers\n\t\/\/ deserialization-cache-size\n\t\/\/ enable-garbage-collector\n\t\/\/ etcd-compaction-interval\n\t\/\/ etcd-count-metric-poll-period\n\t\/\/ etcd-servers-overrides\n\t\/\/ experimental-encryption-provider-config\n\t\/\/ feature-gates\n\t\/\/ http2-max-streams-per-connection\n\t\/\/ insecure-bind-address\n\t\/\/ kubelet-timeout\n\t\/\/ log-backtrace-at\n\t\/\/ log-dir\n\t\/\/ log-flush-frequency\n\t\/\/ logtostderr\n\t\/\/ master-service-namespace\n\t\/\/ max-connection-bytes-per-sec\n\t\/\/ profiling\n\t\/\/ request-timeout\n\t\/\/ runtime-config\n\t\/\/ service-account-api-audiences\n\t\/\/ service-account-issuer\n\t\/\/ service-account-key-file\n\t\/\/ service-account-max-token-expiration\n\t\/\/ service-account-signing-key-file\n\t\/\/ stderrthreshold\n\t\/\/ storage-versions\n\t\/\/ target-ram-mb\n\t\/\/ v\n\t\/\/ version\n\t\/\/ vmodule\n\t\/\/ watch-cache\n\t\/\/ watch-cache-sizes\n\n\t\/\/ TODO, we need to set these in order to enable the right admission plugins in each of the servers\n\t\/\/ TODO this is needed for a viable cluster up\n\tadmissionFlags, err := admissionFlags(kubeAPIServerConfig.AdmissionPluginConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor flag, value := range admissionFlags {\n\t\tsetIfUnset(args, flag, value...)\n\t}\n\tsetIfUnset(args, \"allow-privileged\", \"true\")\n\tsetIfUnset(args, \"anonymous-auth\", \"false\")\n\tsetIfUnset(args, \"authorization-mode\", \"RBAC\", \"Node\") \/\/ overridden later, but this runs the poststarthook for bootstrapping RBAC\n\tfor flag, value := range auditFlags(kubeAPIServerConfig) {\n\t\tsetIfUnset(args, flag, value...)\n\t}\n\tsetIfUnset(args, \"bind-address\", host)\n\tsetIfUnset(args, \"client-ca-file\", kubeAPIServerConfig.ServingInfo.ClientCA)\n\tsetIfUnset(args, \"cors-allowed-origins\", kubeAPIServerConfig.CORSAllowedOrigins...)\n\tsetIfUnset(args, \"enable-logs-handler\", \"false\")\n\tsetIfUnset(args, \"enable-swagger-ui\", \"true\")\n\tsetIfUnset(args, \"endpoint-reconciler-type\", \"lease\")\n\tsetIfUnset(args, \"etcd-cafile\", kubeAPIServerConfig.StorageConfig.CA)\n\tsetIfUnset(args, \"etcd-certfile\", kubeAPIServerConfig.StorageConfig.CertFile)\n\tsetIfUnset(args, \"etcd-keyfile\", kubeAPIServerConfig.StorageConfig.KeyFile)\n\tsetIfUnset(args, \"etcd-prefix\", kubeAPIServerConfig.StorageConfig.StoragePrefix)\n\tsetIfUnset(args, \"etcd-servers\", kubeAPIServerConfig.StorageConfig.URLs...)\n\tsetIfUnset(args, \"insecure-port\", \"0\")\n\tsetIfUnset(args, \"kubelet-certificate-authority\", kubeAPIServerConfig.KubeletClientInfo.CA)\n\tsetIfUnset(args, \"kubelet-client-certificate\", kubeAPIServerConfig.KubeletClientInfo.CertFile)\n\tsetIfUnset(args, \"kubelet-client-key\", kubeAPIServerConfig.KubeletClientInfo.KeyFile)\n\tsetIfUnset(args, \"kubelet-https\", \"true\")\n\tsetIfUnset(args, \"kubelet-preferred-address-types\", \"Hostname\", \"InternalIP\", \"ExternalIP\")\n\tsetIfUnset(args, \"kubelet-read-only-port\", \"0\")\n\tsetIfUnset(args, \"kubernetes-service-node-port\", \"0\")\n\tsetIfUnset(args, \"max-mutating-requests-inflight\", fmt.Sprintf(\"%d\", kubeAPIServerConfig.ServingInfo.MaxRequestsInFlight\/2))\n\tsetIfUnset(args, \"max-requests-inflight\", fmt.Sprintf(\"%d\", kubeAPIServerConfig.ServingInfo.MaxRequestsInFlight))\n\tsetIfUnset(args, \"min-request-timeout\", fmt.Sprintf(\"%d\", kubeAPIServerConfig.ServingInfo.RequestTimeoutSeconds))\n\tsetIfUnset(args, \"proxy-client-cert-file\", kubeAPIServerConfig.AggregatorConfig.ProxyClientInfo.CertFile)\n\tsetIfUnset(args, \"proxy-client-key-file\", kubeAPIServerConfig.AggregatorConfig.ProxyClientInfo.KeyFile)\n\tsetIfUnset(args, \"requestheader-allowed-names\", kubeAPIServerConfig.AuthConfig.RequestHeader.ClientCommonNames...)\n\tsetIfUnset(args, \"requestheader-client-ca-file\", kubeAPIServerConfig.AuthConfig.RequestHeader.ClientCA)\n\tsetIfUnset(args, \"requestheader-extra-headers-prefix\", kubeAPIServerConfig.AuthConfig.RequestHeader.ExtraHeaderPrefixes...)\n\tsetIfUnset(args, \"requestheader-group-headers\", kubeAPIServerConfig.AuthConfig.RequestHeader.GroupHeaders...)\n\tsetIfUnset(args, \"requestheader-username-headers\", kubeAPIServerConfig.AuthConfig.RequestHeader.UsernameHeaders...)\n\tsetIfUnset(args, \"secure-port\", portString)\n\tsetIfUnset(args, \"service-cluster-ip-range\", kubeAPIServerConfig.ServicesSubnet)\n\tsetIfUnset(args, \"service-node-port-range\", kubeAPIServerConfig.ServicesNodePortRange)\n\tsetIfUnset(args, \"storage-backend\", \"etcd3\")\n\tsetIfUnset(args, \"storage-media-type\", \"application\/vnd.kubernetes.protobuf\")\n\tsetIfUnset(args, \"tls-cert-file\", kubeAPIServerConfig.ServingInfo.CertFile)\n\tsetIfUnset(args, \"tls-cipher-suites\", kubeAPIServerConfig.ServingInfo.CipherSuites...)\n\tsetIfUnset(args, \"tls-min-version\", kubeAPIServerConfig.ServingInfo.MinTLSVersion)\n\tsetIfUnset(args, \"tls-private-key-file\", kubeAPIServerConfig.ServingInfo.KeyFile)\n\tsetIfUnset(args, \"tls-sni-cert-key\", sniCertKeys(kubeAPIServerConfig.ServingInfo.NamedCertificates)...)\n\tsetIfUnset(args, \"secure-port\", portString)\n\n\tvar keys []string\n\tfor key := range args {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Strings(keys)\n\n\tvar arguments []string\n\tfor _, key := range keys {\n\t\tfor _, token := range args[key] {\n\t\t\targuments = append(arguments, fmt.Sprintf(\"--%s=%v\", key, token))\n\t\t}\n\t}\n\treturn arguments, nil\n}\n\n\/\/ currently for cluster up, audit is just broken.\n\/\/ TODO fix this\nfunc auditFlags(kubeAPIServerConfig *kubecontrolplanev1.KubeAPIServerConfig) map[string][]string {\n\targs := map[string][]string{}\n\tfor key, slice := range kubeAPIServerConfig.APIServerArguments {\n\t\tfor _, val := range slice {\n\t\t\targs[key] = append(args[key], val)\n\t\t}\n\t}\n\n\treturn args\n}\n\nfunc setIfUnset(cmdLineArgs map[string][]string, key string, value ...string) {\n\tif _, ok := cmdLineArgs[key]; !ok {\n\t\tcmdLineArgs[key] = value\n\t}\n}\n\nfunc admissionFlags(admissionPluginConfig map[string]configv1.AdmissionPluginConfig) (map[string][]string, error) {\n\targs := map[string][]string{}\n\n\tforceOn := []string{}\n\tforceOff := []string{}\n\tpluginConfig := map[string]configv1.AdmissionPluginConfig{}\n\tfor pluginName, origConfig := range admissionPluginConfig {\n\t\tconfig := *origConfig.DeepCopy()\n\t\tif len(config.Location) > 0 {\n\t\t\tcontent, err := ioutil.ReadFile(config.Location)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t\/\/ if the config isn't a DefaultAdmissionConfig, then assume we're enabled (we were called out after all)\n\t\t\t\/\/ if the config *is* a DefaultAdmissionConfig and it explicitly said to disable us, we are disabled\n\t\t\tobj, err := configapilatest.ReadYAML(bytes.NewBuffer(content))\n\t\t\t\/\/ if we can't read it, let the plugin deal with it\n\t\t\t\/\/ if nothing was there, let the plugin deal with it\n\t\t\tif err != nil || obj == nil {\n\t\t\t\tforceOn = append(forceOn, pluginName)\n\t\t\t\tconfig.Location = \"\"\n\t\t\t\tconfig.Configuration = runtime.RawExtension{Raw: content}\n\t\t\t\tpluginConfig[pluginName] = config\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif defaultConfig, ok := obj.(*configapi.DefaultAdmissionConfig); !ok {\n\t\t\t\tforceOn = append(forceOn, pluginName)\n\t\t\t\tconfig.Location = \"\"\n\t\t\t\tconfig.Configuration = runtime.RawExtension{Raw: content}\n\t\t\t\tpluginConfig[pluginName] = config\n\n\t\t\t} else if defaultConfig.Disable {\n\t\t\t\tforceOff = append(forceOff, pluginName)\n\n\t\t\t} else {\n\t\t\t\tforceOn = append(forceOn, pluginName)\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ if it wasn't a DefaultAdmissionConfig object, let the plugin deal with it\n\t\tcurrConfig := &configapi.DefaultAdmissionConfig{}\n\t\tuncastDefaultConfig, _, decodingErr := configapilatest.Codec.Decode(config.Configuration.Raw, nil, currConfig)\n\t\tif decodingErr != nil {\n\t\t\tforceOn = append(forceOn, pluginName)\n\t\t\tpluginConfig[pluginName] = config\n\t\t\tcontinue\n\t\t}\n\n\t\tdefaultConfig, ok := uncastDefaultConfig.(*configapi.DefaultAdmissionConfig)\n\t\tif !ok {\n\t\t\tforceOn = append(forceOn, pluginName)\n\t\t\tpluginConfig[pluginName] = config\n\n\t\t} else if defaultConfig.Disable {\n\t\t\tforceOff = append(forceOff, pluginName)\n\n\t\t} else {\n\t\t\tforceOn = append(forceOn, pluginName)\n\t\t}\n\n\t}\n\tupstreamAdmissionConfig, err := configconversion.ConvertOpenshiftAdmissionConfigToKubeAdmissionConfig(pluginConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconfigBytes, err := configapilatest.WriteYAML(upstreamAdmissionConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttempFile, err := ioutil.TempFile(\"\", \"kubeapiserver-admission-config.yaml\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif _, err := tempFile.Write(configBytes); err != nil {\n\t\treturn nil, err\n\t}\n\ttempFile.Close()\n\n\tsetIfUnset(args, \"admission-control-config-file\", tempFile.Name())\n\tsetIfUnset(args, \"disable-admission-plugins\", forceOff...)\n\tsetIfUnset(args, \"enable-admission-plugins\", forceOn...)\n\n\treturn args, nil\n}\n\nfunc sniCertKeys(namedCertificates []configv1.NamedCertificate) []string {\n\targs := []string{}\n\tfor _, nc := range namedCertificates {\n\t\targs = append(args, fmt.Sprintf(\"%s,%s:%s\", nc.CertFile, nc.KeyFile, strings.Join(nc.Names, \",\")))\n\t}\n\treturn args\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package mime implements parts of the MIME spec.\npackage mime\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n)\n\nvar (\n\tmimeTypes sync.Map \/\/ map[string]string; \".Z\" => \"application\/x-compress\"\n\tmimeTypesLower sync.Map \/\/ map[string]string; \".z\" => \"application\/x-compress\"\n\n\t\/\/ extensions maps from MIME type to list of lowercase file\n\t\/\/ extensions: \"image\/jpeg\" => [\".jpg\", \".jpeg\"]\n\textensionsMu sync.Mutex \/\/ Guards stores (but not loads) on extensions.\n\textensions sync.Map \/\/ map[string][]string; slice values are append-only.\n)\n\nfunc clearSyncMap(m *sync.Map) {\n\tm.Range(func(k, _ interface{}) bool {\n\t\tm.Delete(k)\n\t\treturn true\n\t})\n}\n\n\/\/ setMimeTypes is used by initMime's non-test path, and by tests.\nfunc setMimeTypes(lowerExt, mixExt map[string]string) {\n\tclearSyncMap(&mimeTypes)\n\tclearSyncMap(&mimeTypesLower)\n\tclearSyncMap(&extensions)\n\n\tfor k, v := range lowerExt {\n\t\tmimeTypesLower.Store(k, v)\n\t}\n\tfor k, v := range mixExt {\n\t\tmimeTypes.Store(k, v)\n\t}\n\n\textensionsMu.Lock()\n\tdefer extensionsMu.Unlock()\n\tfor k, v := range lowerExt {\n\t\tjustType, _, err := ParseMediaType(v)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tvar exts []string\n\t\tif ei, ok := extensions.Load(k); ok {\n\t\t\texts = ei.([]string)\n\t\t}\n\t\textensions.Store(justType, append(exts, k))\n\t}\n}\n\nvar builtinTypesLower = map[string]string{\n\t\".css\": \"text\/css; charset=utf-8\",\n\t\".gif\": \"image\/gif\",\n\t\".htm\": \"text\/html; charset=utf-8\",\n\t\".html\": \"text\/html; charset=utf-8\",\n\t\".jpg\": \"image\/jpeg\",\n\t\".jpeg\": \"image\/jpeg\",\n\t\".js\": \"application\/javascript\",\n\t\".wasm\": \"application\/wasm\",\n\t\".pdf\": \"application\/pdf\",\n\t\".png\": \"image\/png\",\n\t\".svg\": \"image\/svg+xml\",\n\t\".xml\": \"text\/xml; charset=utf-8\",\n\t\".mjs\": \"text\/javascript\",\n}\n\nvar once sync.Once \/\/ guards initMime\n\nvar testInitMime, osInitMime func()\n\nfunc initMime() {\n\tif fn := testInitMime; fn != nil {\n\t\tfn()\n\t} else {\n\t\tsetMimeTypes(builtinTypesLower, builtinTypesLower)\n\t\tosInitMime()\n\t}\n}\n\n\/\/ TypeByExtension returns the MIME type associated with the file extension ext.\n\/\/ The extension ext should begin with a leading dot, as in \".html\".\n\/\/ When ext has no associated type, TypeByExtension returns \"\".\n\/\/\n\/\/ Extensions are looked up first case-sensitively, then case-insensitively.\n\/\/\n\/\/ The built-in table is small but on unix it is augmented by the local\n\/\/ system's mime.types file(s) if available under one or more of these\n\/\/ names:\n\/\/\n\/\/ \/etc\/mime.types\n\/\/ \/etc\/apache2\/mime.types\n\/\/ \/etc\/apache\/mime.types\n\/\/\n\/\/ On Windows, MIME types are extracted from the registry.\n\/\/\n\/\/ Text types have the charset parameter set to \"utf-8\" by default.\nfunc TypeByExtension(ext string) string {\n\tonce.Do(initMime)\n\n\t\/\/ Case-sensitive lookup.\n\tif v, ok := mimeTypes.Load(ext); ok {\n\t\treturn v.(string)\n\t}\n\n\t\/\/ Case-insensitive lookup.\n\t\/\/ Optimistically assume a short ASCII extension and be\n\t\/\/ allocation-free in that case.\n\tvar buf [10]byte\n\tlower := buf[:0]\n\tconst utf8RuneSelf = 0x80 \/\/ from utf8 package, but not importing it.\n\tfor i := 0; i < len(ext); i++ {\n\t\tc := ext[i]\n\t\tif c >= utf8RuneSelf {\n\t\t\t\/\/ Slow path.\n\t\t\tsi, _ := mimeTypesLower.Load(strings.ToLower(ext))\n\t\t\ts, _ := si.(string)\n\t\t\treturn s\n\t\t}\n\t\tif 'A' <= c && c <= 'Z' {\n\t\t\tlower = append(lower, c+('a'-'A'))\n\t\t} else {\n\t\t\tlower = append(lower, c)\n\t\t}\n\t}\n\tsi, _ := mimeTypesLower.Load(string(lower))\n\ts, _ := si.(string)\n\treturn s\n}\n\n\/\/ ExtensionsByType returns the extensions known to be associated with the MIME\n\/\/ type typ. The returned extensions will each begin with a leading dot, as in\n\/\/ \".html\". When typ has no associated extensions, ExtensionsByType returns an\n\/\/ nil slice.\nfunc ExtensionsByType(typ string) ([]string, error) {\n\tjustType, _, err := ParseMediaType(typ)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tonce.Do(initMime)\n\ts, ok := extensions.Load(justType)\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\treturn append([]string{}, s.([]string)...), nil\n}\n\n\/\/ AddExtensionType sets the MIME type associated with\n\/\/ the extension ext to typ. The extension should begin with\n\/\/ a leading dot, as in \".html\".\nfunc AddExtensionType(ext, typ string) error {\n\tif !strings.HasPrefix(ext, \".\") {\n\t\treturn fmt.Errorf(\"mime: extension %q missing leading dot\", ext)\n\t}\n\tonce.Do(initMime)\n\treturn setExtensionType(ext, typ)\n}\n\nfunc setExtensionType(extension, mimeType string) error {\n\tjustType, param, err := ParseMediaType(mimeType)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif strings.HasPrefix(mimeType, \"text\/\") && param[\"charset\"] == \"\" {\n\t\tparam[\"charset\"] = \"utf-8\"\n\t\tmimeType = FormatMediaType(mimeType, param)\n\t}\n\textLower := strings.ToLower(extension)\n\n\tmimeTypes.Store(extension, mimeType)\n\tmimeTypesLower.Store(extLower, mimeType)\n\n\textensionsMu.Lock()\n\tdefer extensionsMu.Unlock()\n\tvar exts []string\n\tif ei, ok := extensions.Load(justType); ok {\n\t\texts = ei.([]string)\n\t}\n\tfor _, v := range exts {\n\t\tif v == extLower {\n\t\t\treturn nil\n\t\t}\n\t}\n\textensions.Store(justType, append(exts, extLower))\n\treturn nil\n}\n<commit_msg>mime: update .mjs MIME type from text\/ to application\/javascript<commit_after>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package mime implements parts of the MIME spec.\npackage mime\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n)\n\nvar (\n\tmimeTypes sync.Map \/\/ map[string]string; \".Z\" => \"application\/x-compress\"\n\tmimeTypesLower sync.Map \/\/ map[string]string; \".z\" => \"application\/x-compress\"\n\n\t\/\/ extensions maps from MIME type to list of lowercase file\n\t\/\/ extensions: \"image\/jpeg\" => [\".jpg\", \".jpeg\"]\n\textensionsMu sync.Mutex \/\/ Guards stores (but not loads) on extensions.\n\textensions sync.Map \/\/ map[string][]string; slice values are append-only.\n)\n\nfunc clearSyncMap(m *sync.Map) {\n\tm.Range(func(k, _ interface{}) bool {\n\t\tm.Delete(k)\n\t\treturn true\n\t})\n}\n\n\/\/ setMimeTypes is used by initMime's non-test path, and by tests.\nfunc setMimeTypes(lowerExt, mixExt map[string]string) {\n\tclearSyncMap(&mimeTypes)\n\tclearSyncMap(&mimeTypesLower)\n\tclearSyncMap(&extensions)\n\n\tfor k, v := range lowerExt {\n\t\tmimeTypesLower.Store(k, v)\n\t}\n\tfor k, v := range mixExt {\n\t\tmimeTypes.Store(k, v)\n\t}\n\n\textensionsMu.Lock()\n\tdefer extensionsMu.Unlock()\n\tfor k, v := range lowerExt {\n\t\tjustType, _, err := ParseMediaType(v)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tvar exts []string\n\t\tif ei, ok := extensions.Load(k); ok {\n\t\t\texts = ei.([]string)\n\t\t}\n\t\textensions.Store(justType, append(exts, k))\n\t}\n}\n\nvar builtinTypesLower = map[string]string{\n\t\".css\": \"text\/css; charset=utf-8\",\n\t\".gif\": \"image\/gif\",\n\t\".htm\": \"text\/html; charset=utf-8\",\n\t\".html\": \"text\/html; charset=utf-8\",\n\t\".jpeg\": \"image\/jpeg\",\n\t\".jpg\": \"image\/jpeg\",\n\t\".js\": \"application\/javascript\",\n\t\".mjs\": \"application\/javascript\",\n\t\".pdf\": \"application\/pdf\",\n\t\".png\": \"image\/png\",\n\t\".svg\": \"image\/svg+xml\",\n\t\".wasm\": \"application\/wasm\",\n\t\".xml\": \"text\/xml; charset=utf-8\",\n}\n\nvar once sync.Once \/\/ guards initMime\n\nvar testInitMime, osInitMime func()\n\nfunc initMime() {\n\tif fn := testInitMime; fn != nil {\n\t\tfn()\n\t} else {\n\t\tsetMimeTypes(builtinTypesLower, builtinTypesLower)\n\t\tosInitMime()\n\t}\n}\n\n\/\/ TypeByExtension returns the MIME type associated with the file extension ext.\n\/\/ The extension ext should begin with a leading dot, as in \".html\".\n\/\/ When ext has no associated type, TypeByExtension returns \"\".\n\/\/\n\/\/ Extensions are looked up first case-sensitively, then case-insensitively.\n\/\/\n\/\/ The built-in table is small but on unix it is augmented by the local\n\/\/ system's mime.types file(s) if available under one or more of these\n\/\/ names:\n\/\/\n\/\/ \/etc\/mime.types\n\/\/ \/etc\/apache2\/mime.types\n\/\/ \/etc\/apache\/mime.types\n\/\/\n\/\/ On Windows, MIME types are extracted from the registry.\n\/\/\n\/\/ Text types have the charset parameter set to \"utf-8\" by default.\nfunc TypeByExtension(ext string) string {\n\tonce.Do(initMime)\n\n\t\/\/ Case-sensitive lookup.\n\tif v, ok := mimeTypes.Load(ext); ok {\n\t\treturn v.(string)\n\t}\n\n\t\/\/ Case-insensitive lookup.\n\t\/\/ Optimistically assume a short ASCII extension and be\n\t\/\/ allocation-free in that case.\n\tvar buf [10]byte\n\tlower := buf[:0]\n\tconst utf8RuneSelf = 0x80 \/\/ from utf8 package, but not importing it.\n\tfor i := 0; i < len(ext); i++ {\n\t\tc := ext[i]\n\t\tif c >= utf8RuneSelf {\n\t\t\t\/\/ Slow path.\n\t\t\tsi, _ := mimeTypesLower.Load(strings.ToLower(ext))\n\t\t\ts, _ := si.(string)\n\t\t\treturn s\n\t\t}\n\t\tif 'A' <= c && c <= 'Z' {\n\t\t\tlower = append(lower, c+('a'-'A'))\n\t\t} else {\n\t\t\tlower = append(lower, c)\n\t\t}\n\t}\n\tsi, _ := mimeTypesLower.Load(string(lower))\n\ts, _ := si.(string)\n\treturn s\n}\n\n\/\/ ExtensionsByType returns the extensions known to be associated with the MIME\n\/\/ type typ. The returned extensions will each begin with a leading dot, as in\n\/\/ \".html\". When typ has no associated extensions, ExtensionsByType returns an\n\/\/ nil slice.\nfunc ExtensionsByType(typ string) ([]string, error) {\n\tjustType, _, err := ParseMediaType(typ)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tonce.Do(initMime)\n\ts, ok := extensions.Load(justType)\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\treturn append([]string{}, s.([]string)...), nil\n}\n\n\/\/ AddExtensionType sets the MIME type associated with\n\/\/ the extension ext to typ. The extension should begin with\n\/\/ a leading dot, as in \".html\".\nfunc AddExtensionType(ext, typ string) error {\n\tif !strings.HasPrefix(ext, \".\") {\n\t\treturn fmt.Errorf(\"mime: extension %q missing leading dot\", ext)\n\t}\n\tonce.Do(initMime)\n\treturn setExtensionType(ext, typ)\n}\n\nfunc setExtensionType(extension, mimeType string) error {\n\tjustType, param, err := ParseMediaType(mimeType)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif strings.HasPrefix(mimeType, \"text\/\") && param[\"charset\"] == \"\" {\n\t\tparam[\"charset\"] = \"utf-8\"\n\t\tmimeType = FormatMediaType(mimeType, param)\n\t}\n\textLower := strings.ToLower(extension)\n\n\tmimeTypes.Store(extension, mimeType)\n\tmimeTypesLower.Store(extLower, mimeType)\n\n\textensionsMu.Lock()\n\tdefer extensionsMu.Unlock()\n\tvar exts []string\n\tif ei, ok := extensions.Load(justType); ok {\n\t\texts = ei.([]string)\n\t}\n\tfor _, v := range exts {\n\t\tif v == extLower {\n\t\t\treturn nil\n\t\t}\n\t}\n\textensions.Store(justType, append(exts, extLower))\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package mime implements parts of the MIME spec.\npackage mime\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n)\n\nvar (\n\tmimeTypes sync.Map \/\/ map[string]string; \".Z\" => \"application\/x-compress\"\n\tmimeTypesLower sync.Map \/\/ map[string]string; \".z\" => \"application\/x-compress\"\n\n\t\/\/ extensions maps from MIME type to list of lowercase file\n\t\/\/ extensions: \"image\/jpeg\" => [\".jpg\", \".jpeg\"]\n\textensionsMu sync.Mutex \/\/ Guards stores (but not loads) on extensions.\n\textensions sync.Map \/\/ map[string][]string; slice values are append-only.\n)\n\nfunc clearSyncMap(m *sync.Map) {\n\tm.Range(func(k, _ interface{}) bool {\n\t\tm.Delete(k)\n\t\treturn true\n\t})\n}\n\n\/\/ setMimeTypes is used by initMime's non-test path, and by tests.\nfunc setMimeTypes(lowerExt, mixExt map[string]string) {\n\tclearSyncMap(&mimeTypes)\n\tclearSyncMap(&mimeTypesLower)\n\tclearSyncMap(&extensions)\n\n\tfor k, v := range lowerExt {\n\t\tmimeTypesLower.Store(k, v)\n\t}\n\tfor k, v := range mixExt {\n\t\tmimeTypes.Store(k, v)\n\t}\n\n\textensionsMu.Lock()\n\tdefer extensionsMu.Unlock()\n\tfor k, v := range lowerExt {\n\t\tjustType, _, err := ParseMediaType(v)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tvar exts []string\n\t\tif ei, ok := extensions.Load(justType); ok {\n\t\t\texts = ei.([]string)\n\t\t}\n\t\textensions.Store(justType, append(exts, k))\n\t}\n}\n\nvar builtinTypesLower = map[string]string{\n\t\".css\": \"text\/css; charset=utf-8\",\n\t\".gif\": \"image\/gif\",\n\t\".htm\": \"text\/html; charset=utf-8\",\n\t\".html\": \"text\/html; charset=utf-8\",\n\t\".jpeg\": \"image\/jpeg\",\n\t\".jpg\": \"image\/jpeg\",\n\t\".js\": \"text\/javascript; charset=utf-8\",\n\t\".json\": \"application\/json\",\n\t\".mjs\": \"text\/javascript; charset=utf-8\",\n\t\".pdf\": \"application\/pdf\",\n\t\".png\": \"image\/png\",\n\t\".svg\": \"image\/svg+xml\",\n\t\".wasm\": \"application\/wasm\",\n\t\".webp\": \"image\/webp\",\n\t\".xml\": \"text\/xml; charset=utf-8\",\n}\n\nvar once sync.Once \/\/ guards initMime\n\nvar testInitMime, osInitMime func()\n\nfunc initMime() {\n\tif fn := testInitMime; fn != nil {\n\t\tfn()\n\t} else {\n\t\tsetMimeTypes(builtinTypesLower, builtinTypesLower)\n\t\tosInitMime()\n\t}\n}\n\n\/\/ TypeByExtension returns the MIME type associated with the file extension ext.\n\/\/ The extension ext should begin with a leading dot, as in \".html\".\n\/\/ When ext has no associated type, TypeByExtension returns \"\".\n\/\/\n\/\/ Extensions are looked up first case-sensitively, then case-insensitively.\n\/\/\n\/\/ The built-in table is small but on unix it is augmented by the local\n\/\/ system's mime.types file(s) if available under one or more of these\n\/\/ names:\n\/\/\n\/\/ \/etc\/mime.types\n\/\/ \/etc\/apache2\/mime.types\n\/\/ \/etc\/apache\/mime.types\n\/\/\n\/\/ On Windows, MIME types are extracted from the registry.\n\/\/\n\/\/ Text types have the charset parameter set to \"utf-8\" by default.\nfunc TypeByExtension(ext string) string {\n\tonce.Do(initMime)\n\n\t\/\/ Case-sensitive lookup.\n\tif v, ok := mimeTypes.Load(ext); ok {\n\t\treturn v.(string)\n\t}\n\n\t\/\/ Case-insensitive lookup.\n\t\/\/ Optimistically assume a short ASCII extension and be\n\t\/\/ allocation-free in that case.\n\tvar buf [10]byte\n\tlower := buf[:0]\n\tconst utf8RuneSelf = 0x80 \/\/ from utf8 package, but not importing it.\n\tfor i := 0; i < len(ext); i++ {\n\t\tc := ext[i]\n\t\tif c >= utf8RuneSelf {\n\t\t\t\/\/ Slow path.\n\t\t\tsi, _ := mimeTypesLower.Load(strings.ToLower(ext))\n\t\t\ts, _ := si.(string)\n\t\t\treturn s\n\t\t}\n\t\tif 'A' <= c && c <= 'Z' {\n\t\t\tlower = append(lower, c+('a'-'A'))\n\t\t} else {\n\t\t\tlower = append(lower, c)\n\t\t}\n\t}\n\tsi, _ := mimeTypesLower.Load(string(lower))\n\ts, _ := si.(string)\n\treturn s\n}\n\n\/\/ ExtensionsByType returns the extensions known to be associated with the MIME\n\/\/ type typ. The returned extensions will each begin with a leading dot, as in\n\/\/ \".html\". When typ has no associated extensions, ExtensionsByType returns an\n\/\/ nil slice.\nfunc ExtensionsByType(typ string) ([]string, error) {\n\tjustType, _, err := ParseMediaType(typ)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tonce.Do(initMime)\n\ts, ok := extensions.Load(justType)\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\tret := append([]string(nil), s.([]string)...)\n\tsort.Strings(ret)\n\treturn ret, nil\n}\n\n\/\/ AddExtensionType sets the MIME type associated with\n\/\/ the extension ext to typ. The extension should begin with\n\/\/ a leading dot, as in \".html\".\nfunc AddExtensionType(ext, typ string) error {\n\tif !strings.HasPrefix(ext, \".\") {\n\t\treturn fmt.Errorf(\"mime: extension %q missing leading dot\", ext)\n\t}\n\tonce.Do(initMime)\n\treturn setExtensionType(ext, typ)\n}\n\nfunc setExtensionType(extension, mimeType string) error {\n\tjustType, param, err := ParseMediaType(mimeType)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif strings.HasPrefix(mimeType, \"text\/\") && param[\"charset\"] == \"\" {\n\t\tparam[\"charset\"] = \"utf-8\"\n\t\tmimeType = FormatMediaType(mimeType, param)\n\t}\n\textLower := strings.ToLower(extension)\n\n\tmimeTypes.Store(extension, mimeType)\n\tmimeTypesLower.Store(extLower, mimeType)\n\n\textensionsMu.Lock()\n\tdefer extensionsMu.Unlock()\n\tvar exts []string\n\tif ei, ok := extensions.Load(justType); ok {\n\t\texts = ei.([]string)\n\t}\n\tfor _, v := range exts {\n\t\tif v == extLower {\n\t\t\treturn nil\n\t\t}\n\t}\n\textensions.Store(justType, append(exts, extLower))\n\treturn nil\n}\n<commit_msg>mime: add mime type for avif image file format<commit_after>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package mime implements parts of the MIME spec.\npackage mime\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n)\n\nvar (\n\tmimeTypes sync.Map \/\/ map[string]string; \".Z\" => \"application\/x-compress\"\n\tmimeTypesLower sync.Map \/\/ map[string]string; \".z\" => \"application\/x-compress\"\n\n\t\/\/ extensions maps from MIME type to list of lowercase file\n\t\/\/ extensions: \"image\/jpeg\" => [\".jpg\", \".jpeg\"]\n\textensionsMu sync.Mutex \/\/ Guards stores (but not loads) on extensions.\n\textensions sync.Map \/\/ map[string][]string; slice values are append-only.\n)\n\nfunc clearSyncMap(m *sync.Map) {\n\tm.Range(func(k, _ interface{}) bool {\n\t\tm.Delete(k)\n\t\treturn true\n\t})\n}\n\n\/\/ setMimeTypes is used by initMime's non-test path, and by tests.\nfunc setMimeTypes(lowerExt, mixExt map[string]string) {\n\tclearSyncMap(&mimeTypes)\n\tclearSyncMap(&mimeTypesLower)\n\tclearSyncMap(&extensions)\n\n\tfor k, v := range lowerExt {\n\t\tmimeTypesLower.Store(k, v)\n\t}\n\tfor k, v := range mixExt {\n\t\tmimeTypes.Store(k, v)\n\t}\n\n\textensionsMu.Lock()\n\tdefer extensionsMu.Unlock()\n\tfor k, v := range lowerExt {\n\t\tjustType, _, err := ParseMediaType(v)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tvar exts []string\n\t\tif ei, ok := extensions.Load(justType); ok {\n\t\t\texts = ei.([]string)\n\t\t}\n\t\textensions.Store(justType, append(exts, k))\n\t}\n}\n\nvar builtinTypesLower = map[string]string{\n\t\".css\": \"text\/css; charset=utf-8\",\n\t\".gif\": \"image\/gif\",\n\t\".htm\": \"text\/html; charset=utf-8\",\n\t\".html\": \"text\/html; charset=utf-8\",\n\t\".jpeg\": \"image\/jpeg\",\n\t\".jpg\": \"image\/jpeg\",\n\t\".js\": \"text\/javascript; charset=utf-8\",\n\t\".json\": \"application\/json\",\n\t\".mjs\": \"text\/javascript; charset=utf-8\",\n\t\".pdf\": \"application\/pdf\",\n\t\".png\": \"image\/png\",\n\t\".svg\": \"image\/svg+xml\",\n\t\".wasm\": \"application\/wasm\",\n\t\".webp\": \"image\/webp\",\n\t\".avif\": \"image\/avif\",\n\t\".xml\": \"text\/xml; charset=utf-8\",\n}\n\nvar once sync.Once \/\/ guards initMime\n\nvar testInitMime, osInitMime func()\n\nfunc initMime() {\n\tif fn := testInitMime; fn != nil {\n\t\tfn()\n\t} else {\n\t\tsetMimeTypes(builtinTypesLower, builtinTypesLower)\n\t\tosInitMime()\n\t}\n}\n\n\/\/ TypeByExtension returns the MIME type associated with the file extension ext.\n\/\/ The extension ext should begin with a leading dot, as in \".html\".\n\/\/ When ext has no associated type, TypeByExtension returns \"\".\n\/\/\n\/\/ Extensions are looked up first case-sensitively, then case-insensitively.\n\/\/\n\/\/ The built-in table is small but on unix it is augmented by the local\n\/\/ system's mime.types file(s) if available under one or more of these\n\/\/ names:\n\/\/\n\/\/ \/etc\/mime.types\n\/\/ \/etc\/apache2\/mime.types\n\/\/ \/etc\/apache\/mime.types\n\/\/\n\/\/ On Windows, MIME types are extracted from the registry.\n\/\/\n\/\/ Text types have the charset parameter set to \"utf-8\" by default.\nfunc TypeByExtension(ext string) string {\n\tonce.Do(initMime)\n\n\t\/\/ Case-sensitive lookup.\n\tif v, ok := mimeTypes.Load(ext); ok {\n\t\treturn v.(string)\n\t}\n\n\t\/\/ Case-insensitive lookup.\n\t\/\/ Optimistically assume a short ASCII extension and be\n\t\/\/ allocation-free in that case.\n\tvar buf [10]byte\n\tlower := buf[:0]\n\tconst utf8RuneSelf = 0x80 \/\/ from utf8 package, but not importing it.\n\tfor i := 0; i < len(ext); i++ {\n\t\tc := ext[i]\n\t\tif c >= utf8RuneSelf {\n\t\t\t\/\/ Slow path.\n\t\t\tsi, _ := mimeTypesLower.Load(strings.ToLower(ext))\n\t\t\ts, _ := si.(string)\n\t\t\treturn s\n\t\t}\n\t\tif 'A' <= c && c <= 'Z' {\n\t\t\tlower = append(lower, c+('a'-'A'))\n\t\t} else {\n\t\t\tlower = append(lower, c)\n\t\t}\n\t}\n\tsi, _ := mimeTypesLower.Load(string(lower))\n\ts, _ := si.(string)\n\treturn s\n}\n\n\/\/ ExtensionsByType returns the extensions known to be associated with the MIME\n\/\/ type typ. The returned extensions will each begin with a leading dot, as in\n\/\/ \".html\". When typ has no associated extensions, ExtensionsByType returns an\n\/\/ nil slice.\nfunc ExtensionsByType(typ string) ([]string, error) {\n\tjustType, _, err := ParseMediaType(typ)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tonce.Do(initMime)\n\ts, ok := extensions.Load(justType)\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\tret := append([]string(nil), s.([]string)...)\n\tsort.Strings(ret)\n\treturn ret, nil\n}\n\n\/\/ AddExtensionType sets the MIME type associated with\n\/\/ the extension ext to typ. The extension should begin with\n\/\/ a leading dot, as in \".html\".\nfunc AddExtensionType(ext, typ string) error {\n\tif !strings.HasPrefix(ext, \".\") {\n\t\treturn fmt.Errorf(\"mime: extension %q missing leading dot\", ext)\n\t}\n\tonce.Do(initMime)\n\treturn setExtensionType(ext, typ)\n}\n\nfunc setExtensionType(extension, mimeType string) error {\n\tjustType, param, err := ParseMediaType(mimeType)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif strings.HasPrefix(mimeType, \"text\/\") && param[\"charset\"] == \"\" {\n\t\tparam[\"charset\"] = \"utf-8\"\n\t\tmimeType = FormatMediaType(mimeType, param)\n\t}\n\textLower := strings.ToLower(extension)\n\n\tmimeTypes.Store(extension, mimeType)\n\tmimeTypesLower.Store(extLower, mimeType)\n\n\textensionsMu.Lock()\n\tdefer extensionsMu.Unlock()\n\tvar exts []string\n\tif ei, ok := extensions.Load(justType); ok {\n\t\texts = ei.([]string)\n\t}\n\tfor _, v := range exts {\n\t\tif v == extLower {\n\t\t\treturn nil\n\t\t}\n\t}\n\textensions.Store(justType, append(exts, extLower))\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage drive\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"google.golang.org\/api\/googleapi\"\n)\n\nfunc TestRemoteOpToChangerTranslator(t *testing.T) {\n\tg := &Commands{}\n\tnow := time.Now()\n\n\tcases := []struct {\n\t\tchange *Change\n\t\tname string\n\t\twantedFn func(*Change) error\n\t}{\n\t\t{change: &Change{Src: nil, Dest: nil}, wantedFn: nil, name: \"nil\"},\n\t\t{change: &Change{Src: &File{}, Dest: &File{}}, wantedFn: nil, name: \"nil\"},\n\t\t{change: &Change{Src: &File{}, Dest: nil}, wantedFn: g.remoteAdd, name: \"remoteAdd\"},\n\t\t{change: &Change{Src: nil, Dest: &File{}}, wantedFn: g.remoteTrash, name: \"remoteTrash\"},\n\t\t{\n\t\t\tchange: &Change{\n\t\t\t\tDest: &File{ModTime: now},\n\t\t\t\tSrc: &File{ModTime: now.Add(time.Hour)},\n\t\t\t},\n\t\t\twantedFn: g.remoteMod, name: \"remoteMod\",\n\t\t},\n\t\t{\n\t\t\tchange: &Change{\n\t\t\t\tDest: &File{ModTime: now},\n\t\t\t\tSrc: &File{ModTime: now},\n\t\t\t},\n\t\t\twantedFn: nil, name: \"noop\",\n\t\t},\n\t}\n\n\tfor _, tc := range cases {\n\t\tgot := remoteOpToChangerTranslator(g, tc.change)\n\t\tvptr1 := reflect.ValueOf(got).Pointer()\n\t\tvptr2 := reflect.ValueOf(tc.wantedFn).Pointer()\n\n\t\tif vptr1 != vptr2 {\n\t\t\tt.Errorf(\"expected %q expected (%v) got (%v)\", tc.name, tc.wantedFn, got)\n\t\t}\n\t}\n}\n\nfunc TestLocalOpToChangerTranslator(t *testing.T) {\n\tg := &Commands{}\n\tnow := time.Now()\n\n\tcases := []struct {\n\t\tchange *Change\n\t\tname string\n\t\twantedFn func(*Change, []string) error\n\t}{\n\t\t{change: &Change{Src: nil, Dest: nil}, wantedFn: nil, name: \"nil\"},\n\t\t{change: &Change{Src: &File{}, Dest: &File{}}, wantedFn: nil, name: \"nil\"},\n\t\t{\n\t\t\tchange: &Change{Src: &File{}, Dest: nil},\n\t\t\twantedFn: g.localAdd, name: \"localAdd\",\n\t\t},\n\t\t{\n\t\t\tchange: &Change{Dest: nil, Src: &File{}},\n\t\t\twantedFn: g.localAdd, name: \"localAdd\",\n\t\t},\n\t\t{\n\t\t\tchange: &Change{Src: nil, Dest: &File{}},\n\t\t\twantedFn: g.localDelete, name: \"localDelete\",\n\t\t},\n\t\t{\n\t\t\tchange: &Change{\n\t\t\t\tSrc: &File{ModTime: now},\n\t\t\t\tDest: &File{ModTime: now.Add(time.Hour)},\n\t\t\t},\n\t\t\twantedFn: g.localMod, name: \"localMod\",\n\t\t},\n\t\t{\n\t\t\tchange: &Change{\n\t\t\t\tDest: &File{ModTime: now},\n\t\t\t\tSrc: &File{ModTime: now},\n\t\t\t},\n\t\t\twantedFn: nil, name: \"noop\",\n\t\t},\n\t}\n\n\tfor _, tc := range cases {\n\t\tgot := localOpToChangerTranslator(g, tc.change)\n\t\tvptr1 := reflect.ValueOf(got).Pointer()\n\t\tvptr2 := reflect.ValueOf(tc.wantedFn).Pointer()\n\n\t\tif vptr1 != vptr2 {\n\t\t\tt.Errorf(\"expected %q expected (%v) got (%v)\", tc.name, tc.wantedFn, got)\n\t\t}\n\t}\n}\n\nfunc TestRetryableErrorCheck(t *testing.T) {\n\tcases := []struct {\n\t\tvalue interface{}\n\t\tsuccess, retryable bool\n\t\tcomment string\n\t}{\n\t\t{\n\t\t\tvalue: nil, success: false, retryable: true,\n\t\t\tcomment: \"a nil tuple is retryable but not successful\",\n\t\t},\n\t\t{\n\t\t\tvalue: t, success: false, retryable: true,\n\t\t\tcomment: \"t value is not a tuple, is retryable but not successful\",\n\t\t},\n\t\t{\n\t\t\tvalue: &tuple{first: nil, last: nil}, success: true, retryable: false,\n\t\t\tcomment: \"last=nil representing a nil error so success, unretryable\",\n\t\t},\n\t\t{\n\t\t\tvalue: &tuple{first: nil, last: fmt.Errorf(\"flux\")},\n\t\t\tsuccess: false, retryable: true,\n\t\t\tcomment: \"last!=nil, non-familiar error so unsuccessful, retryable\",\n\t\t},\n\t\t{\n\t\t\tvalue: &tuple{\n\t\t\t\tfirst: \"\",\n\t\t\t\tlast: &googleapi.Error{\n\t\t\t\t\tMessage: \"This is an error\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsuccess: false, retryable: false,\n\t\t\tcomment: \"last!=nil, familiar error so unsuccessful, retryable:: statusCode undefined\",\n\t\t},\n\t\t{\n\t\t\tvalue: &tuple{\n\t\t\t\tfirst: \"\",\n\t\t\t\tlast: &googleapi.Error{\n\t\t\t\t\tCode: 500,\n\t\t\t\t\tMessage: \"This is an error\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsuccess: false, retryable: true,\n\t\t\tcomment: \"last!=nil, familiar error so unsuccessful, retryable:: statusCode 500\",\n\t\t},\n\t\t{\n\t\t\tvalue: &tuple{\n\t\t\t\tfirst: nil,\n\t\t\t\tlast: &googleapi.Error{\n\t\t\t\t\tCode: 401,\n\t\t\t\t\tMessage: \"401 right here\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsuccess: false, retryable: true,\n\t\t\tcomment: \"last!=nil, 401 must be retryable\",\n\t\t},\n\t\t{\n\t\t\tvalue: &tuple{\n\t\t\t\tfirst: nil,\n\t\t\t\tlast: &googleapi.Error{\n\t\t\t\t\tCode: 409,\n\t\t\t\t\tMessage: \"409 right here\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsuccess: false, retryable: false,\n\t\t\tcomment: \"last!=nil, 409 is unclassified so unretryable\",\n\t\t},\n\t\t{\n\t\t\tvalue: &tuple{\n\t\t\t\tfirst: nil,\n\t\t\t\tlast: &googleapi.Error{\n\t\t\t\t\tCode: 403,\n\t\t\t\t\tMessage: \"403 right here\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsuccess: false, retryable: true,\n\t\t\tcomment: \"last!=nil, 403 is retryable\",\n\t\t},\n\t\t{\n\t\t\tvalue: &tuple{\n\t\t\t\tfirst: nil,\n\t\t\t\tlast: &googleapi.Error{\n\t\t\t\t\tCode: 500,\n\t\t\t\t\tMessage: MsgErrFileNotMutable,\n\t\t\t\t},\n\t\t\t},\n\t\t\tsuccess: false, retryable: false,\n\t\t\tcomment: \"issue #472 FileNotMutable is unretryable\",\n\t\t},\n\t\t{\n\t\t\tvalue: &tuple{\n\t\t\t\tfirst: nil,\n\t\t\t\tlast: &googleapi.Error{\n\t\t\t\t\tCode: 500,\n\t\t\t\t\tMessage: strings.ToLower(MsgErrFileNotMutable),\n\t\t\t\t},\n\t\t\t},\n\t\t\tsuccess: false, retryable: false,\n\t\t\tcomment: \"issue #472 FileNotMutable is unretryable, casefold held\",\n\t\t},\n\t\t{\n\t\t\tvalue: &tuple{\n\t\t\t\tfirst: nil,\n\t\t\t\tlast: &googleapi.Error{\n\t\t\t\t\tCode: 501,\n\t\t\t\t\tMessage: strings.ToUpper(MsgErrFileNotMutable),\n\t\t\t\t},\n\t\t\t},\n\t\t\tsuccess: false, retryable: false,\n\t\t\tcomment: \"issue #472 FileNotMutable is unretryable, casefold held\",\n\t\t},\n\t}\n\n\tfor _, tc := range cases {\n\t\tsuccess, retryable := retryableErrorCheck(tc.value)\n\t\tif success != tc.success {\n\t\t\tt.Errorf(\"%v success got %v expected %v\", tc.value, success, tc.success)\n\t\t}\n\t\tif retryable != tc.retryable {\n\t\t\tt.Errorf(\"%v retryable got %v expected %v: %q\", tc.value, retryable, tc.retryable, tc.comment)\n\t\t}\n\t}\n}\n<commit_msg>ignorer tests added<commit_after>\/\/ Copyright 2016 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage drive\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"google.golang.org\/api\/googleapi\"\n)\n\nfunc callerFilepath() string {\n\t_, p, _, _ := runtime.Caller(1)\n\treturn p\n}\n\nfunc TestRemoteOpToChangerTranslator(t *testing.T) {\n\tg := &Commands{}\n\tnow := time.Now()\n\n\tcases := []struct {\n\t\tchange *Change\n\t\tname string\n\t\twantedFn func(*Change) error\n\t}{\n\t\t{change: &Change{Src: nil, Dest: nil}, wantedFn: nil, name: \"nil\"},\n\t\t{change: &Change{Src: &File{}, Dest: &File{}}, wantedFn: nil, name: \"nil\"},\n\t\t{change: &Change{Src: &File{}, Dest: nil}, wantedFn: g.remoteAdd, name: \"remoteAdd\"},\n\t\t{change: &Change{Src: nil, Dest: &File{}}, wantedFn: g.remoteTrash, name: \"remoteTrash\"},\n\t\t{\n\t\t\tchange: &Change{\n\t\t\t\tDest: &File{ModTime: now},\n\t\t\t\tSrc: &File{ModTime: now.Add(time.Hour)},\n\t\t\t},\n\t\t\twantedFn: g.remoteMod, name: \"remoteMod\",\n\t\t},\n\t\t{\n\t\t\tchange: &Change{\n\t\t\t\tDest: &File{ModTime: now},\n\t\t\t\tSrc: &File{ModTime: now},\n\t\t\t},\n\t\t\twantedFn: nil, name: \"noop\",\n\t\t},\n\t}\n\n\tfor _, tc := range cases {\n\t\tgot := remoteOpToChangerTranslator(g, tc.change)\n\t\tvptr1 := reflect.ValueOf(got).Pointer()\n\t\tvptr2 := reflect.ValueOf(tc.wantedFn).Pointer()\n\n\t\tif vptr1 != vptr2 {\n\t\t\tt.Errorf(\"expected %q expected (%v) got (%v)\", tc.name, tc.wantedFn, got)\n\t\t}\n\t}\n}\n\nfunc TestLocalOpToChangerTranslator(t *testing.T) {\n\tg := &Commands{}\n\tnow := time.Now()\n\n\tcases := []struct {\n\t\tchange *Change\n\t\tname string\n\t\twantedFn func(*Change, []string) error\n\t}{\n\t\t{change: &Change{Src: nil, Dest: nil}, wantedFn: nil, name: \"nil\"},\n\t\t{change: &Change{Src: &File{}, Dest: &File{}}, wantedFn: nil, name: \"nil\"},\n\t\t{\n\t\t\tchange: &Change{Src: &File{}, Dest: nil},\n\t\t\twantedFn: g.localAdd, name: \"localAdd\",\n\t\t},\n\t\t{\n\t\t\tchange: &Change{Dest: nil, Src: &File{}},\n\t\t\twantedFn: g.localAdd, name: \"localAdd\",\n\t\t},\n\t\t{\n\t\t\tchange: &Change{Src: nil, Dest: &File{}},\n\t\t\twantedFn: g.localDelete, name: \"localDelete\",\n\t\t},\n\t\t{\n\t\t\tchange: &Change{\n\t\t\t\tSrc: &File{ModTime: now},\n\t\t\t\tDest: &File{ModTime: now.Add(time.Hour)},\n\t\t\t},\n\t\t\twantedFn: g.localMod, name: \"localMod\",\n\t\t},\n\t\t{\n\t\t\tchange: &Change{\n\t\t\t\tDest: &File{ModTime: now},\n\t\t\t\tSrc: &File{ModTime: now},\n\t\t\t},\n\t\t\twantedFn: nil, name: \"noop\",\n\t\t},\n\t}\n\n\tfor _, tc := range cases {\n\t\tgot := localOpToChangerTranslator(g, tc.change)\n\t\tvptr1 := reflect.ValueOf(got).Pointer()\n\t\tvptr2 := reflect.ValueOf(tc.wantedFn).Pointer()\n\n\t\tif vptr1 != vptr2 {\n\t\t\tt.Errorf(\"expected %q expected (%v) got (%v)\", tc.name, tc.wantedFn, got)\n\t\t}\n\t}\n}\n\nfunc TestRetryableErrorCheck(t *testing.T) {\n\tcases := []struct {\n\t\tvalue interface{}\n\t\tsuccess, retryable bool\n\t\tcomment string\n\t}{\n\t\t{\n\t\t\tvalue: nil, success: false, retryable: true,\n\t\t\tcomment: \"a nil tuple is retryable but not successful\",\n\t\t},\n\t\t{\n\t\t\tvalue: t, success: false, retryable: true,\n\t\t\tcomment: \"t value is not a tuple, is retryable but not successful\",\n\t\t},\n\t\t{\n\t\t\tvalue: &tuple{first: nil, last: nil}, success: true, retryable: false,\n\t\t\tcomment: \"last=nil representing a nil error so success, unretryable\",\n\t\t},\n\t\t{\n\t\t\tvalue: &tuple{first: nil, last: fmt.Errorf(\"flux\")},\n\t\t\tsuccess: false, retryable: true,\n\t\t\tcomment: \"last!=nil, non-familiar error so unsuccessful, retryable\",\n\t\t},\n\t\t{\n\t\t\tvalue: &tuple{\n\t\t\t\tfirst: \"\",\n\t\t\t\tlast: &googleapi.Error{\n\t\t\t\t\tMessage: \"This is an error\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsuccess: false, retryable: false,\n\t\t\tcomment: \"last!=nil, familiar error so unsuccessful, retryable:: statusCode undefined\",\n\t\t},\n\t\t{\n\t\t\tvalue: &tuple{\n\t\t\t\tfirst: \"\",\n\t\t\t\tlast: &googleapi.Error{\n\t\t\t\t\tCode: 500,\n\t\t\t\t\tMessage: \"This is an error\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsuccess: false, retryable: true,\n\t\t\tcomment: \"last!=nil, familiar error so unsuccessful, retryable:: statusCode 500\",\n\t\t},\n\t\t{\n\t\t\tvalue: &tuple{\n\t\t\t\tfirst: nil,\n\t\t\t\tlast: &googleapi.Error{\n\t\t\t\t\tCode: 401,\n\t\t\t\t\tMessage: \"401 right here\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsuccess: false, retryable: true,\n\t\t\tcomment: \"last!=nil, 401 must be retryable\",\n\t\t},\n\t\t{\n\t\t\tvalue: &tuple{\n\t\t\t\tfirst: nil,\n\t\t\t\tlast: &googleapi.Error{\n\t\t\t\t\tCode: 409,\n\t\t\t\t\tMessage: \"409 right here\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsuccess: false, retryable: false,\n\t\t\tcomment: \"last!=nil, 409 is unclassified so unretryable\",\n\t\t},\n\t\t{\n\t\t\tvalue: &tuple{\n\t\t\t\tfirst: nil,\n\t\t\t\tlast: &googleapi.Error{\n\t\t\t\t\tCode: 403,\n\t\t\t\t\tMessage: \"403 right here\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsuccess: false, retryable: true,\n\t\t\tcomment: \"last!=nil, 403 is retryable\",\n\t\t},\n\t\t{\n\t\t\tvalue: &tuple{\n\t\t\t\tfirst: nil,\n\t\t\t\tlast: &googleapi.Error{\n\t\t\t\t\tCode: 500,\n\t\t\t\t\tMessage: MsgErrFileNotMutable,\n\t\t\t\t},\n\t\t\t},\n\t\t\tsuccess: false, retryable: false,\n\t\t\tcomment: \"issue #472 FileNotMutable is unretryable\",\n\t\t},\n\t\t{\n\t\t\tvalue: &tuple{\n\t\t\t\tfirst: nil,\n\t\t\t\tlast: &googleapi.Error{\n\t\t\t\t\tCode: 500,\n\t\t\t\t\tMessage: strings.ToLower(MsgErrFileNotMutable),\n\t\t\t\t},\n\t\t\t},\n\t\t\tsuccess: false, retryable: false,\n\t\t\tcomment: \"issue #472 FileNotMutable is unretryable, casefold held\",\n\t\t},\n\t\t{\n\t\t\tvalue: &tuple{\n\t\t\t\tfirst: nil,\n\t\t\t\tlast: &googleapi.Error{\n\t\t\t\t\tCode: 501,\n\t\t\t\t\tMessage: strings.ToUpper(MsgErrFileNotMutable),\n\t\t\t\t},\n\t\t\t},\n\t\t\tsuccess: false, retryable: false,\n\t\t\tcomment: \"issue #472 FileNotMutable is unretryable, casefold held\",\n\t\t},\n\t}\n\n\tfor _, tc := range cases {\n\t\tsuccess, retryable := retryableErrorCheck(tc.value)\n\t\tif success != tc.success {\n\t\t\tt.Errorf(\"%v success got %v expected %v\", tc.value, success, tc.success)\n\t\t}\n\t\tif retryable != tc.retryable {\n\t\t\tt.Errorf(\"%v retryable got %v expected %v: %q\", tc.value, retryable, tc.retryable, tc.comment)\n\t\t}\n\t}\n}\n\nfunc TestDriveIgnore(t *testing.T) {\n\ttestCases := []struct {\n\t\tclauses []string\n\t\tmustErr bool\n\t\tnilIgnorer bool\n\t\texcludesExpected []string\n\t\tincludesExpected []string\n\t\tcomment string\n\t\tmustBeIgnored []string\n\t\tmustNotBeIgnored []string\n\t}{\n\t\t{clauses: []string{}, nilIgnorer: true, comment: \"no clauses in\"},\n\t\t{\n\t\t\tclauses: []string{\"#this is a comment\"}, nilIgnorer: false,\n\t\t\tcomment: \"plain commented file\",\n\t\t},\n\t\t{\n\t\t\tcomment: \"intentionally unescaped '.'\",\n\t\t\tclauses: []string{\".git\", \".docx$\"},\n\t\t\tmustBeIgnored: []string{\"bgits\", \"frogdocx\"},\n\t\t\tmustNotBeIgnored: []string{\"\", \" \", \"frogdocxs\"},\n\t\t},\n\t\t{\n\t\t\tcomment: \"entirely commented, so all clauses should be skipped\",\n\t\t\tclauses: []string{\"^#\"},\n\t\t\tmustBeIgnored: []string{\"#patch\", \"# \", \"#\", \"#Like this one\", \"#\\\\.git\"},\n\t\t\tmustNotBeIgnored: []string{\"\", \" \", \"src\/misc_test.go\"},\n\t\t},\n\t\t{\n\t\t\tcomment: \"strictly escaped '.'\",\n\t\t\tclauses: []string{\"\\\\.git\", \"\\\\.docx$\"},\n\t\t\tmustBeIgnored: []string{\".git\", \"drive.docx\", \".docx\"},\n\t\t\tmustNotBeIgnored: []string{\n\t\t\t\t\"\", \" \", \"frogdocxs\", \"digit\", \"drive.docxs\",\n\t\t\t\t\"drive.docxx\", \"drive.\", \".drive\", \".docx \",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tcomment: \"strictly escaped '.'\",\n\t\t\tclauses: []string{\"^\\\\.\", \"#!\\\\.driveignore\"},\n\t\t\tmustBeIgnored: []string{\".git\", \".driveignore\", \".bashrc\"},\n\t\t\tmustNotBeIgnored: []string{\n\t\t\t\t\"\", \" \", \"frogdocxs\", \"digit\", \"drive.docxs\",\n\t\t\t\t\"drive.docxx\", \"drive.\", \" .drive\", \"a.docx \",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tcomment: \"include vs exclude issue #535\",\n\t\t\tclauses: []string{\"\\\\.\", \"!^\\\\.docx$\", \"!\\\\.bashrc\", \"#!\\\\.driveignore\"},\n\t\t\tmustBeIgnored: []string{\".git\", \"drive.docx\", \".docx \", \".driveignore\"},\n\t\t\tmustNotBeIgnored: []string{\n\t\t\t\t\".docx\", \".bashrc\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tignorer, err := ignorerByClause(tc.clauses...)\n\t\tif tc.mustErr {\n\t\t\tif err == nil {\n\t\t\t\tt.Fatalf(\"expected to err with clause %v comment %q\", tc.clauses, tc.comment)\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\tt.Fatalf(\"%v should not err. Got %v\", tc.clauses, err)\n\t\t}\n\n\t\tif tc.nilIgnorer {\n\t\t\tif ignorer != nil {\n\t\t\t\tt.Fatalf(\"ignorer for (%v)(%q) expected to be nil, got %p\", tc.clauses, tc.comment, ignorer)\n\t\t\t}\n\t\t} else if ignorer == nil {\n\t\t\tt.Fatalf(\"ignorer not expected to be nil for (%v) %q\", tc.clauses, tc.comment)\n\t\t}\n\n\t\tif !tc.nilIgnorer && ignorer != nil {\n\t\t\tfor _, expectedPass := range tc.mustBeIgnored {\n\t\t\t\tif !ignorer(expectedPass) {\n\t\t\t\t\tt.Errorf(\"%q: %q must be ignored\", tc.comment, expectedPass)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, expectedFail := range tc.mustNotBeIgnored {\n\t\t\t\tif ignorer(expectedFail) {\n\t\t\t\t\tt.Errorf(\"%q: %q must not be ignored\", tc.comment, expectedFail)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestReadFile(t *testing.T) {\n\townFilepath := callerFilepath()\n\tcomment := `\n\/\/ A comment right here intentionally put that will self read and consumed.\n+ A follow up right here and now.\n`\n\tclauses, err := readCommentedFile(ownFilepath, \"\/\/\")\n\tif err != nil {\n\t\tt.Fatalf(\"%q is currently being run and should be read successfully, instead got err %v\", ownFilepath, err)\n\t}\n\n\tif len(clauses) < 1 {\n\t\tt.Errorf(\"expecting at least one line in this file %q\", ownFilepath)\n\t}\n\n\trestitched := strings.Join(clauses, \"\\n\")\n\tif strings.Index(restitched, comment) != -1 {\n\t\tt.Errorf(\"%q should have been ignored as a comment\", comment)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package deploy\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/pkg\/sftp\"\n\t\"golang.org\/x\/crypto\/ssh\"\n)\n\ntype RemoteDeployer struct {\n\tssh *ssh.Client\n\ttargetDirectory string\n}\n\nfunc NewRemoteDeployer(s *ssh.Client, target string) (*RemoteDeployer, error) {\n\td := &RemoteDeployer{\n\t\tssh: s,\n\t\ttargetDirectory: target,\n\t}\n\n\tclient, err := sftp.NewClient(d.ssh)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to create new sftp client\")\n\t}\n\tdefer client.Close()\n\n\tif err := client.MkdirAll(d.targetDirectory); err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to make target directory\")\n\t}\n\n\treturn d, nil\n}\n\n\/\/ targetDirectory にファイルを転送し、 path にシンボリックリンクを貼る\nfunc (d RemoteDeployer) SendFile(body []byte, path string, permission os.FileMode) error {\n\tclient, err := sftp.NewClient(d.ssh)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Failed to create new sftp client\")\n\t}\n\tdefer client.Close()\n\n\tvar target string\n\tif !filepath.IsAbs(path) {\n\t\ttarget = filepath.Join(d.targetDirectory, path)\n\t} else {\n\t\tfilename := filepath.Base(path)\n\t\ttarget = filepath.Join(d.targetDirectory, filename)\n\n\t\tif _, err := client.Lstat(path); err == nil {\n\t\t\tif err := client.Remove(path); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"Failed to remove old symbolic link\")\n\t\t\t}\n\t\t}\n\n\t\tif err := client.Symlink(target, path); err != nil {\n\t\t\treturn errors.Wrap(err, \"Failed to create symbolic link\")\n\t\t}\n\t}\n\n\tif _, err := client.Stat(target); err == nil {\n\t\tif err := client.Remove(target); err != nil {\n\t\t\treturn errors.Wrap(err, \"Failed to delete target\")\n\t\t}\n\t}\n\n\tfile, err := client.Create(target)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Failed to create new file\")\n\t}\n\tdefer file.Close()\n\n\tif err := file.Chmod(permission); err != nil {\n\t\treturn errors.Wrap(err, \"Failed to change permission\")\n\t}\n\n\tif _, err := file.Write(body); err != nil {\n\t\treturn errors.Wrap(err, \"Failed to write to file\")\n\t}\n\n\treturn nil\n}\n\nfunc (d RemoteDeployer) ReadSelf() ([]byte, error) {\n\tpath, err := filepath.Abs(os.Args[0])\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to get absolute path\")\n\t}\n\n\tif !filepath.IsAbs(path) {\n\t\treturn nil, fmt.Errorf(\"Use binary with absolute path to read self\")\n\t}\n\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to open self\")\n\t}\n\tdefer file.Close()\n\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(file)\n\treturn buf.Bytes(), nil\n}\n\nfunc (d RemoteDeployer) Command(command string, stdout, stderr io.Writer) error {\n\tsess, err := d.ssh.NewSession()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Failed to create new session\")\n\t}\n\tdefer sess.Close()\n\n\tsess.Stdout = stdout\n\tsess.Stderr = stderr\n\n\tif err := sess.Run(command); err != nil {\n\t\tif ee, ok := err.(*ssh.ExitError); ok {\n\t\t\treturn fmt.Errorf(\"'%s' exit status is not 0: code=%d\", command, ee.ExitStatus())\n\t\t}\n\n\t\treturn errors.Wrapf(err, \"Failed to command '%s'\", command)\n\t}\n\n\treturn nil\n}\n<commit_msg>sudo for deployer<commit_after>package deploy\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/pkg\/sftp\"\n\t\"golang.org\/x\/crypto\/ssh\"\n\t\"golang.org\/x\/crypto\/ssh\/terminal\"\n)\n\ntype RemoteDeployer struct {\n\tssh *ssh.Client\n\tpassword string\n\ttargetDirectory string\n}\n\nfunc NewRemoteDeployer(s *ssh.Client, target string) (*RemoteDeployer, error) {\n\td := &RemoteDeployer{\n\t\tssh: s,\n\t\ttargetDirectory: target,\n\t}\n\n\terr := d.CheckPriv()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to check privilege\")\n\t}\n\n\tclient, err := sftp.NewClient(d.ssh)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to create new sftp client\")\n\t}\n\tdefer client.Close()\n\n\tif err := client.MkdirAll(d.targetDirectory); err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to make target directory\")\n\t}\n\n\treturn d, nil\n}\n\n\/\/ targetDirectory にファイルを転送し、 path にシンボリックリンクを貼る\nfunc (d RemoteDeployer) SendFile(body []byte, path string, permission os.FileMode) error {\n\tclient, err := sftp.NewClient(d.ssh)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Failed to create new sftp client\")\n\t}\n\tdefer client.Close()\n\n\tvar target string\n\tif !filepath.IsAbs(path) {\n\t\ttarget = filepath.Join(d.targetDirectory, path)\n\t} else {\n\t\tfilename := filepath.Base(path)\n\t\ttarget = filepath.Join(d.targetDirectory, filename)\n\n\t\tif _, err := client.Lstat(path); err == nil {\n\t\t\tif err := client.Remove(path); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"Failed to remove old symbolic link\")\n\t\t\t}\n\t\t}\n\n\t\tif err := client.Symlink(target, path); err != nil {\n\t\t\treturn errors.Wrap(err, \"Failed to create symbolic link\")\n\t\t}\n\t}\n\n\tfmt.Println(target)\n\n\tif _, err := client.Stat(target); err == nil {\n\t\tif err := client.Remove(target); err != nil {\n\t\t\treturn errors.Wrap(err, \"Failed to delete target\")\n\t\t}\n\t}\n\n\tfile, err := client.Create(target)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Failed to create new file\")\n\t}\n\tdefer file.Close()\n\n\tif err := file.Chmod(permission); err != nil {\n\t\treturn errors.Wrap(err, \"Failed to change permission\")\n\t}\n\n\tif _, err := file.Write(body); err != nil {\n\t\treturn errors.Wrap(err, \"Failed to write to file\")\n\t}\n\n\treturn nil\n}\n\nfunc (d RemoteDeployer) ReadSelf() ([]byte, error) {\n\tpath, err := filepath.Abs(os.Args[0])\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to get absolute path\")\n\t}\n\n\tif !filepath.IsAbs(path) {\n\t\treturn nil, fmt.Errorf(\"Use binary with absolute path to read self\")\n\t}\n\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to open self\")\n\t}\n\tdefer file.Close()\n\n\tbuf := new(bytes.Buffer)\n\tbuf.ReadFrom(file)\n\treturn buf.Bytes(), nil\n}\n\nfunc (d RemoteDeployer) Command(command string, stdout, stderr io.Writer) error {\n\tsess, err := d.ssh.NewSession()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Failed to create new session\")\n\t}\n\tdefer sess.Close()\n\n\tsess.Stdout = stdout\n\tsess.Stderr = stderr\n\n\tif len(d.password) > 0 {\n\t\tcommand = \"echo \" + d.password + \" | sudo -S \" + command\n\t}\n\n\tif err := sess.Run(command); err != nil {\n\t\tif ee, ok := err.(*ssh.ExitError); ok {\n\t\t\treturn fmt.Errorf(\"'%s' exit status is not 0: code=%d\", command, ee.ExitStatus())\n\t\t}\n\n\t\treturn errors.Wrapf(err, \"Failed to command '%s'\", command)\n\t}\n\n\treturn nil\n}\n\nfunc (d *RemoteDeployer) CheckPriv() error {\n\tsess, err := d.ssh.NewSession()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to create new session\")\n\t}\n\tdefer sess.Close()\n\n\tout, err := sess.Output(\"id -u\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to execute `id -u`\")\n\t}\n\tuid, err := strconv.Atoi(strings.TrimRight(string(out), \"\\n\"))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to convert to uid from: %s\", out)\n\t}\n\n\tif uid != 0 {\n\t\tfor cnt := 0; cnt < 3; cnt++ {\n\t\t\tsess, err := d.ssh.NewSession()\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to create new session\")\n\t\t\t}\n\t\t\tdefer sess.Close()\n\n\t\t\tfmt.Print(\"input password: \")\n\t\t\tpassword, err := terminal.ReadPassword(int(syscall.Stdin))\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to read password\")\n\t\t\t}\n\t\t\tfmt.Println(\"checking password...\")\n\n\t\t\terr = sess.Run(\"echo \" + string(password) + \" | sudo -S id -u\")\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\td.password = string(password)\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"Wrong password\")\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"math\/big\"\n\t\"os\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/NebulousLabs\/Sia\/api\"\n\t\"github.com\/NebulousLabs\/Sia\/modules\"\n\t\"github.com\/NebulousLabs\/Sia\/types\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\thostCmd = &cobra.Command{\n\t\tUse: \"host\",\n\t\tShort: \"Perform host actions\",\n\t\tLong: \"View or modify host settings.\",\n\t\tRun: wrap(hostcmd),\n\t}\n\n\thostConfigCmd = &cobra.Command{\n\t\tUse: \"config [setting] [value]\",\n\t\tShort: \"Modify host settings\",\n\t\tLong: `Modify host settings.\nAvailable settings:\n\nParameter Unit\n\nacceptingcontracts boolean\ncollateral currency\/TB\ncollateralbudget currency\nmaxcollateral currency\nmaxdownloadbatchsize int\nmaxduration int\nmaxrevisebatchsize int\nminimumcontractprice currency\nminimumdownloadbandwidthprice currency\/TB\nminimumstorageprice currency\/TB\/month\nminimumuploadbandwidthprice currency\/TB\nnetaddress string\nwindowsize int\n\nCurrency units can be specified, e.g. 10SC; run 'siac help wallet' for details.\n\nFor a description of each parameter, see doc\/API.md.\n\nTo configure the host to accept new contracts, set acceptingcontracts to true:\n\tsiac host config acceptingcontracts true\n`,\n\t\tRun: wrap(hostconfigcmd),\n\t}\n\n\thostAnnounceCmd = &cobra.Command{\n\t\tUse: \"announce\",\n\t\tShort: \"Announce yourself as a host\",\n\t\tLong: `Announce yourself as a host on the network.\nAnnouncing will also configure the host to start accepting contracts.\nYou can revert this by running:\n\tsiac host config acceptingcontracts false\nYou may also supply a specific address to be announced, e.g.:\n\tsiac host announce my-host-domain.com:9001\nDoing so will override the standard connectivity checks.`,\n\t\tRun: hostannouncecmd,\n\t}\n\n\thostFolderCmd = &cobra.Command{\n\t\tUse: \"folder\",\n\t\tShort: \"Add, remove, or resize a storage folder\",\n\t\tLong: \"Add, remove, or resize a storage folder.\",\n\t}\n\n\thostFolderAddCmd = &cobra.Command{\n\t\tUse: \"add [path] [size]\",\n\t\tShort: \"Add a storage folder to the host\",\n\t\tLong: \"Add a storage folder to the host, specifying how much data it should store\",\n\t\tRun: wrap(hostfolderaddcmd),\n\t}\n\n\thostFolderRemoveCmd = &cobra.Command{\n\t\tUse: \"remove [path]\",\n\t\tShort: \"Remove a storage folder from the host\",\n\t\tLong: `Remove a storage folder from the host. Note that this does not delete any\ndata; it will instead be distributed across the remaining storage folders.`,\n\n\t\tRun: wrap(hostfolderremovecmd),\n\t}\n\n\thostFolderResizeCmd = &cobra.Command{\n\t\tUse: \"resize [path] [size]\",\n\t\tShort: \"Resize a storage folder\",\n\t\tLong: `Change how much data a storage folder should store. If the new size is less\nthan what the folder is currently storing, data will be distributed across the\nother storage folders.`,\n\t\tRun: wrap(hostfolderresizecmd),\n\t}\n\n\thostSectorCmd = &cobra.Command{\n\t\tUse: \"sector\",\n\t\tShort: \"Add or delete a sector (add not supported)\",\n\t\tLong: `Add or delete a sector. Adding is not currently supported. Note that\ndeleting a sector may impact host revenue.`,\n\t}\n\n\thostSectorDeleteCmd = &cobra.Command{\n\t\tUse: \"delete [root]\",\n\t\tShort: \"Delete a sector\",\n\t\tLong: `Delete a sector, identified by its Merkle root. Note that deleting a\nsector may impact host revenue.`,\n\t\tRun: wrap(hostsectordeletecmd),\n\t}\n)\n\n\/\/ hostcmd is the handler for the command `siac host`.\n\/\/ Prints info about the host and its storage folders.\nfunc hostcmd() {\n\thg := new(api.HostGET)\n\terr := getAPI(\"\/host\", &hg)\n\tif err != nil {\n\t\tdie(\"Could not fetch host settings:\", err)\n\t}\n\tsg := new(api.StorageGET)\n\terr = getAPI(\"\/storage\", &sg)\n\tif err != nil {\n\t\tdie(\"Could not fetch storage info:\", err)\n\t}\n\n\tes := hg.ExternalSettings\n\tfm := hg.FinancialMetrics\n\tis := hg.InternalSettings\n\tnm := hg.NetworkMetrics\n\n\t\/\/ calculate total storage available and remaining\n\tvar totalstorage, storageremaining uint64\n\tfor _, folder := range sg.StorageFolderMetadata {\n\t\ttotalstorage += folder.Capacity\n\t\tstorageremaining += folder.CapacityRemaining\n\t}\n\n\t\/\/ convert accepting bool\n\taccept := yesNo(is.AcceptingContracts)\n\t\/\/ convert price from bytes\/block to TB\/Month\n\tprice := currencyUnits(is.MinimumStoragePrice.Mul(modules.BlockBytesPerMonthTerabyte))\n\t\/\/ calculate total revenue\n\ttotalRevenue := fm.ContractCompensation.\n\t\tAdd(fm.StorageRevenue).\n\t\tAdd(fm.DownloadBandwidthRevenue).\n\t\tAdd(fm.UploadBandwidthRevenue)\n\ttotalPotentialRevenue := fm.PotentialContractCompensation.\n\t\tAdd(fm.PotentialStorageRevenue).\n\t\tAdd(fm.PotentialDownloadBandwidthRevenue).\n\t\tAdd(fm.PotentialUploadBandwidthRevenue)\n\tfmt.Printf(`Host info:\n\tStorage: %v (%v used)\n\tPrice: %v \/ TB \/ Month\n\tMax Duration: %v Blocks\n\n\tAccepting Contracts: %v\n\tAnticipated Revenue: %v\n\tRevenue: %v\n\tLost Revenue: %v\n\tLost Collateral: %v\n`, filesizeUnits(int64(totalstorage)), filesizeUnits(int64(totalstorage-storageremaining)),\n\t\tprice, is.MaxDuration, accept, currencyUnits(totalPotentialRevenue),\n\t\tcurrencyUnits(totalRevenue), currencyUnits(fm.LostRevenue),\n\t\tcurrencyUnits(fm.LostStorageCollateral))\n\n\t\/\/ display more info if verbose flag is set\n\tif hostVerbose {\n\t\t\/\/ describe net address\n\t\tnetaddr := es.NetAddress\n\t\tif is.NetAddress == \"\" {\n\t\t\tnetaddr += \" (automatically determined)\"\n\t\t} else {\n\t\t\tnetaddr += \" (manually specified)\"\n\t\t}\n\t\tfmt.Printf(`\n\tNet Address: %v\n\nRPC Stats:\n\tError Calls: %v\n\tUnrecognized Calls: %v\n\tDownload Calls: %v\n\tRenew Calls: %v\n\tRevise Calls: %v\n\tSettings Calls: %v\n\tFormContract Calls: %v\n`, netaddr, nm.ErrorCalls, nm.UnrecognizedCalls, nm.DownloadCalls,\n\t\t\tnm.RenewCalls, nm.ReviseCalls, nm.SettingsCalls, nm.FormContractCalls)\n\t}\n\n\tfmt.Println(\"\\nStorage Folders:\")\n\n\t\/\/ display storage folder info\n\tif len(sg.StorageFolderMetadata) == 0 {\n\t\tfmt.Println(\"No storage folders configured\")\n\t\treturn\n\t}\n\tw := tabwriter.NewWriter(os.Stdout, 0, 0, 4, ' ', 0)\n\tfmt.Fprintf(w, \"\\tUsed\\tCapacity\\t%% Used\\tPath\\n\")\n\tfor _, folder := range sg.StorageFolderMetadata {\n\t\tcurSize := int64(folder.Capacity - folder.CapacityRemaining)\n\t\tpctUsed := 100 * (float64(curSize) \/ float64(folder.Capacity))\n\t\tfmt.Fprintf(w, \"\\t%s\\t%s\\t%.2f\\t%s\\n\", filesizeUnits(curSize), filesizeUnits(int64(folder.Capacity)), pctUsed, folder.Path)\n\t}\n\tw.Flush()\n}\n\n\/\/ hostconfigcmd is the handler for the command `siac host config [setting] [value]`.\n\/\/ Modifies host settings.\nfunc hostconfigcmd(param, value string) {\n\tswitch param {\n\t\/\/ currency (convert to hastings)\n\tcase \"collateralbudget\", \"maxcollateral\", \"minimumcontractprice\":\n\t\thastings, err := parseCurrency(value)\n\t\tif err != nil {\n\t\t\tdie(\"Could not parse \"+param+\":\", err)\n\t\t}\n\t\tvalue = hastings\n\n\t\/\/ currency\/TB (convert to hastings\/byte)\n\tcase \"collateral\", \"minimumdownloadbandwidthprice\", \"minimumuploadbandwidthprice\":\n\t\thastings, err := parseCurrency(value)\n\t\tif err != nil {\n\t\t\tdie(\"Could not parse \"+param+\":\", err)\n\t\t}\n\t\ti, _ := new(big.Int).SetString(hastings, 10)\n\t\tc := types.NewCurrency(i).Div(modules.BytesPerTerabyte)\n\t\tvalue = c.String()\n\n\t\/\/ currency\/TB\/month (convert to hastings\/byte\/block)\n\tcase \"minimumstorageprice\":\n\t\thastings, err := parseCurrency(value)\n\t\tif err != nil {\n\t\t\tdie(\"Could not parse \"+param+\":\", err)\n\t\t}\n\t\ti, _ := new(big.Int).SetString(hastings, 10)\n\t\tc := types.NewCurrency(i).Div(modules.BlockBytesPerMonthTerabyte)\n\t\tvalue = c.String()\n\n\t\/\/ other valid settings\n\tcase \"acceptingcontracts\", \"maxdownloadbatchsize\", \"maxduration\",\n\t\t\"maxrevisebatchsize\", \"netaddress\", \"windowsize\":\n\n\t\/\/ invalid settings\n\tdefault:\n\t\tdie(\"\\\"\" + param + \"\\\" is not a host setting\")\n\t}\n\terr := post(\"\/host\", param+\"=\"+value)\n\tif err != nil {\n\t\tdie(\"Could not update host settings:\", err)\n\t}\n\tfmt.Println(\"Host settings updated.\")\n}\n\n\/\/ hostannouncecmd is the handler for the command `siac host announce`.\n\/\/ Announces yourself as a host to the network. Optionally takes an address to\n\/\/ announce as.\nfunc hostannouncecmd(cmd *cobra.Command, args []string) {\n\tvar err error\n\tswitch len(args) {\n\tcase 0:\n\t\terr = post(\"\/host\/announce\", \"\")\n\tcase 1:\n\t\terr = post(\"\/host\/announce\", \"netaddress=\"+args[0])\n\tdefault:\n\t\tcmd.Usage()\n\t\tos.Exit(exitCodeUsage)\n\t}\n\tif err != nil {\n\t\tdie(\"Could not announce host:\", err)\n\t}\n\tfmt.Println(\"Host announcement submitted to network.\")\n\n\t\/\/ start accepting contracts\n\terr = post(\"\/host\", \"acceptingcontracts=true\")\n\tif err != nil {\n\t\tdie(\"Could not configure host to accept contracts:\", err)\n\t}\n\tfmt.Println(`\nThe host has also been configured to accept contracts.\nTo revert this, run:\n\tsiac host config acceptingcontracts false\n`)\n}\n\n\/\/ hostfolderaddcmd adds a folder to the host.\nfunc hostfolderaddcmd(path, size string) {\n\tsize, err := parseFilesize(size)\n\tif err != nil {\n\t\tdie(\"Could not parse size:\", err)\n\t}\n\terr = post(\"\/storage\/folders\/add\", fmt.Sprintf(\"path=%s&size=%s\", abs(path), size))\n\tif err != nil {\n\t\tdie(\"Could not add folder:\", err)\n\t}\n\tfmt.Println(\"Added folder\", path)\n}\n\n\/\/ hostfolderremovecmd removes a folder from the host.\nfunc hostfolderremovecmd(path string) {\n\terr := post(\"\/storage\/folders\/remove\", \"path=\"+abs(path))\n\tif err != nil {\n\t\tdie(\"Could not remove folder:\", err)\n\t}\n\tfmt.Println(\"Removed folder\", path)\n}\n\n\/\/ hostfolderresizecmd resizes a folder in the host.\nfunc hostfolderresizecmd(path, newsize string) {\n\tnewsize, err := parseFilesize(newsize)\n\tif err != nil {\n\t\tdie(\"Could not parse size:\", err)\n\t}\n\terr = post(\"\/storage\/folders\/resize\", fmt.Sprintf(\"path=%s&newsize=%s\", abs(path), newsize))\n\tif err != nil {\n\t\tdie(\"Could not resize folder:\", err)\n\t}\n\tfmt.Printf(\"Resized folder %v to %v\\n\", path, newsize)\n}\n\n\/\/ hostsectordeletecmd deletes a sector from the host.\nfunc hostsectordeletecmd(root string) {\n\terr := post(\"\/storage\/sectors\/delete\/\"+root, \"\")\n\tif err != nil {\n\t\tdie(\"Could not delete sector:\", err)\n\t}\n\tfmt.Println(\"Deleted sector\", root)\n}\n<commit_msg>report collateral statistics in siac<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"math\/big\"\n\t\"os\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/NebulousLabs\/Sia\/api\"\n\t\"github.com\/NebulousLabs\/Sia\/modules\"\n\t\"github.com\/NebulousLabs\/Sia\/types\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\thostCmd = &cobra.Command{\n\t\tUse: \"host\",\n\t\tShort: \"Perform host actions\",\n\t\tLong: \"View or modify host settings.\",\n\t\tRun: wrap(hostcmd),\n\t}\n\n\thostConfigCmd = &cobra.Command{\n\t\tUse: \"config [setting] [value]\",\n\t\tShort: \"Modify host settings\",\n\t\tLong: `Modify host settings.\nAvailable settings:\n\nParameter Unit\n\nacceptingcontracts boolean\ncollateral currency\/TB\ncollateralbudget currency\nmaxcollateral currency\nmaxdownloadbatchsize int\nmaxduration int\nmaxrevisebatchsize int\nminimumcontractprice currency\nminimumdownloadbandwidthprice currency\/TB\nminimumstorageprice currency\/TB\/month\nminimumuploadbandwidthprice currency\/TB\nnetaddress string\nwindowsize int\n\nCurrency units can be specified, e.g. 10SC; run 'siac help wallet' for details.\n\nFor a description of each parameter, see doc\/API.md.\n\nTo configure the host to accept new contracts, set acceptingcontracts to true:\n\tsiac host config acceptingcontracts true\n`,\n\t\tRun: wrap(hostconfigcmd),\n\t}\n\n\thostAnnounceCmd = &cobra.Command{\n\t\tUse: \"announce\",\n\t\tShort: \"Announce yourself as a host\",\n\t\tLong: `Announce yourself as a host on the network.\nAnnouncing will also configure the host to start accepting contracts.\nYou can revert this by running:\n\tsiac host config acceptingcontracts false\nYou may also supply a specific address to be announced, e.g.:\n\tsiac host announce my-host-domain.com:9001\nDoing so will override the standard connectivity checks.`,\n\t\tRun: hostannouncecmd,\n\t}\n\n\thostFolderCmd = &cobra.Command{\n\t\tUse: \"folder\",\n\t\tShort: \"Add, remove, or resize a storage folder\",\n\t\tLong: \"Add, remove, or resize a storage folder.\",\n\t}\n\n\thostFolderAddCmd = &cobra.Command{\n\t\tUse: \"add [path] [size]\",\n\t\tShort: \"Add a storage folder to the host\",\n\t\tLong: \"Add a storage folder to the host, specifying how much data it should store\",\n\t\tRun: wrap(hostfolderaddcmd),\n\t}\n\n\thostFolderRemoveCmd = &cobra.Command{\n\t\tUse: \"remove [path]\",\n\t\tShort: \"Remove a storage folder from the host\",\n\t\tLong: `Remove a storage folder from the host. Note that this does not delete any\ndata; it will instead be distributed across the remaining storage folders.`,\n\n\t\tRun: wrap(hostfolderremovecmd),\n\t}\n\n\thostFolderResizeCmd = &cobra.Command{\n\t\tUse: \"resize [path] [size]\",\n\t\tShort: \"Resize a storage folder\",\n\t\tLong: `Change how much data a storage folder should store. If the new size is less\nthan what the folder is currently storing, data will be distributed across the\nother storage folders.`,\n\t\tRun: wrap(hostfolderresizecmd),\n\t}\n\n\thostSectorCmd = &cobra.Command{\n\t\tUse: \"sector\",\n\t\tShort: \"Add or delete a sector (add not supported)\",\n\t\tLong: `Add or delete a sector. Adding is not currently supported. Note that\ndeleting a sector may impact host revenue.`,\n\t}\n\n\thostSectorDeleteCmd = &cobra.Command{\n\t\tUse: \"delete [root]\",\n\t\tShort: \"Delete a sector\",\n\t\tLong: `Delete a sector, identified by its Merkle root. Note that deleting a\nsector may impact host revenue.`,\n\t\tRun: wrap(hostsectordeletecmd),\n\t}\n)\n\n\/\/ hostcmd is the handler for the command `siac host`.\n\/\/ Prints info about the host and its storage folders.\nfunc hostcmd() {\n\thg := new(api.HostGET)\n\terr := getAPI(\"\/host\", &hg)\n\tif err != nil {\n\t\tdie(\"Could not fetch host settings:\", err)\n\t}\n\tsg := new(api.StorageGET)\n\terr = getAPI(\"\/storage\", &sg)\n\tif err != nil {\n\t\tdie(\"Could not fetch storage info:\", err)\n\t}\n\n\tes := hg.ExternalSettings\n\tfm := hg.FinancialMetrics\n\tis := hg.InternalSettings\n\tnm := hg.NetworkMetrics\n\n\t\/\/ calculate total storage available and remaining\n\tvar totalstorage, storageremaining uint64\n\tfor _, folder := range sg.StorageFolderMetadata {\n\t\ttotalstorage += folder.Capacity\n\t\tstorageremaining += folder.CapacityRemaining\n\t}\n\n\t\/\/ convert accepting bool\n\taccept := yesNo(is.AcceptingContracts)\n\t\/\/ convert price from bytes\/block to TB\/Month\n\tprice := currencyUnits(is.MinimumStoragePrice.Mul(modules.BlockBytesPerMonthTerabyte))\n\t\/\/ calculate total revenue\n\ttotalRevenue := fm.ContractCompensation.\n\t\tAdd(fm.StorageRevenue).\n\t\tAdd(fm.DownloadBandwidthRevenue).\n\t\tAdd(fm.UploadBandwidthRevenue)\n\ttotalPotentialRevenue := fm.PotentialContractCompensation.\n\t\tAdd(fm.PotentialStorageRevenue).\n\t\tAdd(fm.PotentialDownloadBandwidthRevenue).\n\t\tAdd(fm.PotentialUploadBandwidthRevenue)\n\tfmt.Printf(`Host info:\n\tStorage: %v (%v used)\n\tPrice: %v \/ TB \/ Month\n\tMax Duration: %v Blocks\n\n\tAccepting Contracts: %v\n\tAnticipated Revenue: %v\n\tLocked Collateral: %v\n\tRisked Collateral: %v\n\tRevenue: %v\n\tLost Revenue: %v\n\tLost Collateral: %v\n`, filesizeUnits(int64(totalstorage)), filesizeUnits(int64(totalstorage-storageremaining)),\n\t\tprice, is.MaxDuration, accept, currencyUnits(totalPotentialRevenue),\n\t\tcurrencyUnits(fm.LockedStorageCollateral), currencyUnits(fm.RiskedStorageCollateral),\n\t\tcurrencyUnits(totalRevenue), currencyUnits(fm.LostRevenue),\n\t\tcurrencyUnits(fm.LostStorageCollateral))\n\n\t\/\/ display more info if verbose flag is set\n\tif hostVerbose {\n\t\t\/\/ describe net address\n\t\tnetaddr := es.NetAddress\n\t\tif is.NetAddress == \"\" {\n\t\t\tnetaddr += \" (automatically determined)\"\n\t\t} else {\n\t\t\tnetaddr += \" (manually specified)\"\n\t\t}\n\t\tfmt.Printf(`\n\tNet Address: %v\n\nRPC Stats:\n\tError Calls: %v\n\tUnrecognized Calls: %v\n\tDownload Calls: %v\n\tRenew Calls: %v\n\tRevise Calls: %v\n\tSettings Calls: %v\n\tFormContract Calls: %v\n`, netaddr, nm.ErrorCalls, nm.UnrecognizedCalls, nm.DownloadCalls,\n\t\t\tnm.RenewCalls, nm.ReviseCalls, nm.SettingsCalls, nm.FormContractCalls)\n\t}\n\n\tfmt.Println(\"\\nStorage Folders:\")\n\n\t\/\/ display storage folder info\n\tif len(sg.StorageFolderMetadata) == 0 {\n\t\tfmt.Println(\"No storage folders configured\")\n\t\treturn\n\t}\n\tw := tabwriter.NewWriter(os.Stdout, 0, 0, 4, ' ', 0)\n\tfmt.Fprintf(w, \"\\tUsed\\tCapacity\\t%% Used\\tPath\\n\")\n\tfor _, folder := range sg.StorageFolderMetadata {\n\t\tcurSize := int64(folder.Capacity - folder.CapacityRemaining)\n\t\tpctUsed := 100 * (float64(curSize) \/ float64(folder.Capacity))\n\t\tfmt.Fprintf(w, \"\\t%s\\t%s\\t%.2f\\t%s\\n\", filesizeUnits(curSize), filesizeUnits(int64(folder.Capacity)), pctUsed, folder.Path)\n\t}\n\tw.Flush()\n}\n\n\/\/ hostconfigcmd is the handler for the command `siac host config [setting] [value]`.\n\/\/ Modifies host settings.\nfunc hostconfigcmd(param, value string) {\n\tswitch param {\n\t\/\/ currency (convert to hastings)\n\tcase \"collateralbudget\", \"maxcollateral\", \"minimumcontractprice\":\n\t\thastings, err := parseCurrency(value)\n\t\tif err != nil {\n\t\t\tdie(\"Could not parse \"+param+\":\", err)\n\t\t}\n\t\tvalue = hastings\n\n\t\/\/ currency\/TB (convert to hastings\/byte)\n\tcase \"collateral\", \"minimumdownloadbandwidthprice\", \"minimumuploadbandwidthprice\":\n\t\thastings, err := parseCurrency(value)\n\t\tif err != nil {\n\t\t\tdie(\"Could not parse \"+param+\":\", err)\n\t\t}\n\t\ti, _ := new(big.Int).SetString(hastings, 10)\n\t\tc := types.NewCurrency(i).Div(modules.BytesPerTerabyte)\n\t\tvalue = c.String()\n\n\t\/\/ currency\/TB\/month (convert to hastings\/byte\/block)\n\tcase \"minimumstorageprice\":\n\t\thastings, err := parseCurrency(value)\n\t\tif err != nil {\n\t\t\tdie(\"Could not parse \"+param+\":\", err)\n\t\t}\n\t\ti, _ := new(big.Int).SetString(hastings, 10)\n\t\tc := types.NewCurrency(i).Div(modules.BlockBytesPerMonthTerabyte)\n\t\tvalue = c.String()\n\n\t\/\/ other valid settings\n\tcase \"acceptingcontracts\", \"maxdownloadbatchsize\", \"maxduration\",\n\t\t\"maxrevisebatchsize\", \"netaddress\", \"windowsize\":\n\n\t\/\/ invalid settings\n\tdefault:\n\t\tdie(\"\\\"\" + param + \"\\\" is not a host setting\")\n\t}\n\terr := post(\"\/host\", param+\"=\"+value)\n\tif err != nil {\n\t\tdie(\"Could not update host settings:\", err)\n\t}\n\tfmt.Println(\"Host settings updated.\")\n}\n\n\/\/ hostannouncecmd is the handler for the command `siac host announce`.\n\/\/ Announces yourself as a host to the network. Optionally takes an address to\n\/\/ announce as.\nfunc hostannouncecmd(cmd *cobra.Command, args []string) {\n\tvar err error\n\tswitch len(args) {\n\tcase 0:\n\t\terr = post(\"\/host\/announce\", \"\")\n\tcase 1:\n\t\terr = post(\"\/host\/announce\", \"netaddress=\"+args[0])\n\tdefault:\n\t\tcmd.Usage()\n\t\tos.Exit(exitCodeUsage)\n\t}\n\tif err != nil {\n\t\tdie(\"Could not announce host:\", err)\n\t}\n\tfmt.Println(\"Host announcement submitted to network.\")\n\n\t\/\/ start accepting contracts\n\terr = post(\"\/host\", \"acceptingcontracts=true\")\n\tif err != nil {\n\t\tdie(\"Could not configure host to accept contracts:\", err)\n\t}\n\tfmt.Println(`\nThe host has also been configured to accept contracts.\nTo revert this, run:\n\tsiac host config acceptingcontracts false\n`)\n}\n\n\/\/ hostfolderaddcmd adds a folder to the host.\nfunc hostfolderaddcmd(path, size string) {\n\tsize, err := parseFilesize(size)\n\tif err != nil {\n\t\tdie(\"Could not parse size:\", err)\n\t}\n\terr = post(\"\/storage\/folders\/add\", fmt.Sprintf(\"path=%s&size=%s\", abs(path), size))\n\tif err != nil {\n\t\tdie(\"Could not add folder:\", err)\n\t}\n\tfmt.Println(\"Added folder\", path)\n}\n\n\/\/ hostfolderremovecmd removes a folder from the host.\nfunc hostfolderremovecmd(path string) {\n\terr := post(\"\/storage\/folders\/remove\", \"path=\"+abs(path))\n\tif err != nil {\n\t\tdie(\"Could not remove folder:\", err)\n\t}\n\tfmt.Println(\"Removed folder\", path)\n}\n\n\/\/ hostfolderresizecmd resizes a folder in the host.\nfunc hostfolderresizecmd(path, newsize string) {\n\tnewsize, err := parseFilesize(newsize)\n\tif err != nil {\n\t\tdie(\"Could not parse size:\", err)\n\t}\n\terr = post(\"\/storage\/folders\/resize\", fmt.Sprintf(\"path=%s&newsize=%s\", abs(path), newsize))\n\tif err != nil {\n\t\tdie(\"Could not resize folder:\", err)\n\t}\n\tfmt.Printf(\"Resized folder %v to %v\\n\", path, newsize)\n}\n\n\/\/ hostsectordeletecmd deletes a sector from the host.\nfunc hostsectordeletecmd(root string) {\n\terr := post(\"\/storage\/sectors\/delete\/\"+root, \"\")\n\tif err != nil {\n\t\tdie(\"Could not delete sector:\", err)\n\t}\n\tfmt.Println(\"Deleted sector\", root)\n}\n<|endoftext|>"} {"text":"<commit_before>package gocb\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\ntype viewResponse struct {\n\tTotalRows int `json:\"total_rows,omitempty\"`\n\tRows []json.RawMessage `json:\"rows,omitempty\"`\n\tError string `json:\"error,omitempty\"`\n\tReason string `json:\"reason,omitempty\"`\n}\n\ntype viewError struct {\n\tMessage string `json:\"message\"`\n\tReason string `json:\"reason\"`\n}\n\nfunc (e *viewError) Error() string {\n\treturn e.Message + \" - \" + e.Reason\n}\n\ntype ViewResults interface {\n\tOne(valuePtr interface{}) error\n\tNext(valuePtr interface{}) bool\n\tClose() error\n}\n\ntype viewResults struct {\n\tindex int\n\trows []json.RawMessage\n\terr error\n}\n\nfunc (r *viewResults) Next(valuePtr interface{}) bool {\n\tif r.err != nil {\n\t\treturn false\n\t}\n\n\trow := r.NextBytes()\n\tif row == nil {\n\t\treturn false\n\t}\n\n\tr.err = json.Unmarshal(row, valuePtr)\n\tif r.err != nil {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (r *viewResults) NextBytes() []byte {\n\tif r.err != nil {\n\t\treturn nil\n\t}\n\n\tif r.index+1 >= len(r.rows) {\n\t\treturn nil\n\t}\n\tr.index++\n\n\treturn r.rows[r.index]\n}\n\nfunc (r *viewResults) Close() error {\n\treturn r.err\n}\n\nfunc (r *viewResults) One(valuePtr interface{}) error {\n\tif !r.Next(valuePtr) {\n\t\terr := r.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn ErrNoResults\n\t}\n\t\/\/ Ignore any errors occuring after we already have our result\n\tr.Close()\n\t\/\/ Return no error as we got the one result already.\n\treturn nil\n}\n\nfunc (b *Bucket) executeViewQuery(viewType, ddoc, viewName string, options url.Values) (ViewResults, error) {\n\tcapiEp, err := b.getViewEp()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treqUri := fmt.Sprintf(\"%s\/_design\/%s\/%s\/%s?%s\", capiEp, ddoc, viewType, viewName, options.Encode())\n\n\treq, err := http.NewRequest(\"GET\", reqUri, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif b.cluster.auth != nil {\n\t\tuserPass := b.cluster.auth.bucketViews(b.name)\n\t\treq.SetBasicAuth(userPass.Username, userPass.Password)\n\t} else {\n\t\treq.SetBasicAuth(b.name, b.password)\n\t}\n\n\tresp, err := doHttpWithTimeout(b.client.HttpClient(), req, b.viewTimeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tviewResp := viewResponse{}\n\tjsonDec := json.NewDecoder(resp.Body)\n\tjsonDec.Decode(&viewResp)\n\n\tresp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\tif viewResp.Error != \"\" {\n\t\t\treturn nil, &viewError{\n\t\t\t\tMessage: viewResp.Error,\n\t\t\t\tReason: viewResp.Reason,\n\t\t\t}\n\t\t}\n\n\t\treturn nil, &viewError{\n\t\t\tMessage: \"HTTP Error\",\n\t\t\tReason: fmt.Sprintf(\"Status code was %d.\", resp.StatusCode),\n\t\t}\n\t}\n\n\treturn &viewResults{\n\t\tindex: -1,\n\t\trows: viewResp.Rows,\n\t}, nil\n}\n\n\/\/ Performs a view query and returns a list of rows or an error.\nfunc (b *Bucket) ExecuteViewQuery(q *ViewQuery) (ViewResults, error) {\n\treturn b.executeViewQuery(\"_view\", q.ddoc, q.name, q.options)\n}\n\n\/\/ Performs a spatial query and returns a list of rows or an error.\nfunc (b *Bucket) ExecuteSpatialQuery(q *SpatialQuery) (ViewResults, error) {\n\treturn b.executeViewQuery(\"_spatial\", q.ddoc, q.name, q.options)\n}\n\n\/\/ Performs a spatial query and returns a list of rows or an error.\nfunc (b *Bucket) ExecuteN1qlQuery(q *N1qlQuery, params interface{}) (ViewResults, error) {\n\treturn b.cluster.doN1qlQuery(b, q, params)\n}\n<commit_msg>Do not ignore JSON decoding errors when parsing view responses.<commit_after>package gocb\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\ntype viewResponse struct {\n\tTotalRows int `json:\"total_rows,omitempty\"`\n\tRows []json.RawMessage `json:\"rows,omitempty\"`\n\tError string `json:\"error,omitempty\"`\n\tReason string `json:\"reason,omitempty\"`\n}\n\ntype viewError struct {\n\tMessage string `json:\"message\"`\n\tReason string `json:\"reason\"`\n}\n\nfunc (e *viewError) Error() string {\n\treturn e.Message + \" - \" + e.Reason\n}\n\ntype ViewResults interface {\n\tOne(valuePtr interface{}) error\n\tNext(valuePtr interface{}) bool\n\tClose() error\n}\n\ntype viewResults struct {\n\tindex int\n\trows []json.RawMessage\n\terr error\n}\n\nfunc (r *viewResults) Next(valuePtr interface{}) bool {\n\tif r.err != nil {\n\t\treturn false\n\t}\n\n\trow := r.NextBytes()\n\tif row == nil {\n\t\treturn false\n\t}\n\n\tr.err = json.Unmarshal(row, valuePtr)\n\tif r.err != nil {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (r *viewResults) NextBytes() []byte {\n\tif r.err != nil {\n\t\treturn nil\n\t}\n\n\tif r.index+1 >= len(r.rows) {\n\t\treturn nil\n\t}\n\tr.index++\n\n\treturn r.rows[r.index]\n}\n\nfunc (r *viewResults) Close() error {\n\treturn r.err\n}\n\nfunc (r *viewResults) One(valuePtr interface{}) error {\n\tif !r.Next(valuePtr) {\n\t\terr := r.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn ErrNoResults\n\t}\n\t\/\/ Ignore any errors occuring after we already have our result\n\tr.Close()\n\t\/\/ Return no error as we got the one result already.\n\treturn nil\n}\n\nfunc (b *Bucket) executeViewQuery(viewType, ddoc, viewName string, options url.Values) (ViewResults, error) {\n\tcapiEp, err := b.getViewEp()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treqUri := fmt.Sprintf(\"%s\/_design\/%s\/%s\/%s?%s\", capiEp, ddoc, viewType, viewName, options.Encode())\n\n\treq, err := http.NewRequest(\"GET\", reqUri, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif b.cluster.auth != nil {\n\t\tuserPass := b.cluster.auth.bucketViews(b.name)\n\t\treq.SetBasicAuth(userPass.Username, userPass.Password)\n\t} else {\n\t\treq.SetBasicAuth(b.name, b.password)\n\t}\n\n\tresp, err := doHttpWithTimeout(b.client.HttpClient(), req, b.viewTimeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tviewResp := viewResponse{}\n\tjsonDec := json.NewDecoder(resp.Body)\n\terr = jsonDec.Decode(&viewResp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\tif viewResp.Error != \"\" {\n\t\t\treturn nil, &viewError{\n\t\t\t\tMessage: viewResp.Error,\n\t\t\t\tReason: viewResp.Reason,\n\t\t\t}\n\t\t}\n\n\t\treturn nil, &viewError{\n\t\t\tMessage: \"HTTP Error\",\n\t\t\tReason: fmt.Sprintf(\"Status code was %d.\", resp.StatusCode),\n\t\t}\n\t}\n\n\treturn &viewResults{\n\t\tindex: -1,\n\t\trows: viewResp.Rows,\n\t}, nil\n}\n\n\/\/ Performs a view query and returns a list of rows or an error.\nfunc (b *Bucket) ExecuteViewQuery(q *ViewQuery) (ViewResults, error) {\n\treturn b.executeViewQuery(\"_view\", q.ddoc, q.name, q.options)\n}\n\n\/\/ Performs a spatial query and returns a list of rows or an error.\nfunc (b *Bucket) ExecuteSpatialQuery(q *SpatialQuery) (ViewResults, error) {\n\treturn b.executeViewQuery(\"_spatial\", q.ddoc, q.name, q.options)\n}\n\n\/\/ Performs a spatial query and returns a list of rows or an error.\nfunc (b *Bucket) ExecuteN1qlQuery(q *N1qlQuery, params interface{}) (ViewResults, error) {\n\treturn b.cluster.doN1qlQuery(b, q, params)\n}\n<|endoftext|>"} {"text":"<commit_before>package chain\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/btcsuite\/btcd\/chaincfg\"\n\t\"github.com\/btcsuite\/btcd\/chaincfg\/chainhash\"\n\t\"github.com\/btcsuite\/btcd\/rpcclient\"\n\t\"github.com\/btcsuite\/btcd\/wire\"\n\t\"github.com\/lightninglabs\/gozmq\"\n)\n\nconst (\n\t\/\/ rawBlockZMQCommand is the command used to receive raw block\n\t\/\/ notifications from bitcoind through ZMQ.\n\trawBlockZMQCommand = \"rawblock\"\n\n\t\/\/ rawTxZMQCommand is the command used to receive raw transaction\n\t\/\/ notifications from bitcoind through ZMQ.\n\trawTxZMQCommand = \"rawtx\"\n)\n\n\/\/ BitcoindConn represents a persistent client connection to a bitcoind node\n\/\/ that listens for events read from a ZMQ connection.\ntype BitcoindConn struct {\n\tstarted int32 \/\/ To be used atomically.\n\tstopped int32 \/\/ To be used atomically.\n\n\t\/\/ rescanClientCounter is an atomic counter that assigns a unique ID to\n\t\/\/ each new bitcoind rescan client using the current bitcoind\n\t\/\/ connection.\n\trescanClientCounter uint64\n\n\t\/\/ chainParams identifies the current network the bitcoind node is\n\t\/\/ running on.\n\tchainParams *chaincfg.Params\n\n\t\/\/ client is the RPC client to the bitcoind node.\n\tclient *rpcclient.Client\n\n\t\/\/ zmqBlockConn is the ZMQ connection we'll use to read raw block\n\t\/\/ events.\n\tzmqBlockConn *gozmq.Conn\n\n\t\/\/ zmqTxConn is the ZMQ connection we'll use to read raw transaction\n\t\/\/ events.\n\tzmqTxConn *gozmq.Conn\n\n\t\/\/ rescanClients is the set of active bitcoind rescan clients to which\n\t\/\/ ZMQ event notfications will be sent to.\n\trescanClientsMtx sync.Mutex\n\trescanClients map[uint64]*BitcoindClient\n\n\tquit chan struct{}\n\twg sync.WaitGroup\n}\n\n\/\/ NewBitcoindConn creates a client connection to the node described by the host\n\/\/ string. The ZMQ connections are established immediately to ensure liveness.\n\/\/ If the remote node does not operate on the same bitcoin network as described\n\/\/ by the passed chain parameters, the connection will be disconnected.\nfunc NewBitcoindConn(chainParams *chaincfg.Params,\n\thost, user, pass, zmqBlockHost, zmqTxHost string,\n\tzmqPollInterval time.Duration) (*BitcoindConn, error) {\n\n\tclientCfg := &rpcclient.ConnConfig{\n\t\tHost: host,\n\t\tUser: user,\n\t\tPass: pass,\n\t\tDisableAutoReconnect: false,\n\t\tDisableConnectOnNew: true,\n\t\tDisableTLS: true,\n\t\tHTTPPostMode: true,\n\t}\n\n\tclient, err := rpcclient.New(clientCfg, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Establish two different ZMQ connections to bitcoind to retrieve block\n\t\/\/ and transaction event notifications. We'll use two as a separation of\n\t\/\/ concern to ensure one type of event isn't dropped from the connection\n\t\/\/ queue due to another type of event filling it up.\n\tzmqBlockConn, err := gozmq.Subscribe(\n\t\tzmqBlockHost, []string{rawBlockZMQCommand}, zmqPollInterval,\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to subscribe for zmq block \"+\n\t\t\t\"events: %v\", err)\n\t}\n\n\tzmqTxConn, err := gozmq.Subscribe(\n\t\tzmqTxHost, []string{rawTxZMQCommand}, zmqPollInterval,\n\t)\n\tif err != nil {\n\t\tzmqBlockConn.Close()\n\t\treturn nil, fmt.Errorf(\"unable to subscribe for zmq tx \"+\n\t\t\t\"events: %v\", err)\n\t}\n\n\tconn := &BitcoindConn{\n\t\tchainParams: chainParams,\n\t\tclient: client,\n\t\tzmqBlockConn: zmqBlockConn,\n\t\tzmqTxConn: zmqTxConn,\n\t\trescanClients: make(map[uint64]*BitcoindClient),\n\t\tquit: make(chan struct{}),\n\t}\n\n\treturn conn, nil\n}\n\n\/\/ Start attempts to establish a RPC and ZMQ connection to a bitcoind node. If\n\/\/ successful, a goroutine is spawned to read events from the ZMQ connection.\n\/\/ It's possible for this function to fail due to a limited number of connection\n\/\/ attempts. This is done to prevent waiting forever on the connection to be\n\/\/ established in the case that the node is down.\nfunc (c *BitcoindConn) Start() error {\n\tif !atomic.CompareAndSwapInt32(&c.started, 0, 1) {\n\t\treturn nil\n\t}\n\n\t\/\/ Verify that the node is running on the expected network.\n\tnet, err := c.getCurrentNet()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif net != c.chainParams.Net {\n\t\treturn fmt.Errorf(\"expected network %v, got %v\",\n\t\t\tc.chainParams.Net, net)\n\t}\n\n\tc.wg.Add(2)\n\tgo c.blockEventHandler()\n\tgo c.txEventHandler()\n\n\treturn nil\n}\n\n\/\/ Stop terminates the RPC and ZMQ connection to a bitcoind node and removes any\n\/\/ active rescan clients.\nfunc (c *BitcoindConn) Stop() {\n\tif !atomic.CompareAndSwapInt32(&c.stopped, 0, 1) {\n\t\treturn\n\t}\n\n\tfor _, client := range c.rescanClients {\n\t\tclient.Stop()\n\t}\n\n\tclose(c.quit)\n\tc.client.Shutdown()\n\tc.zmqBlockConn.Close()\n\tc.zmqTxConn.Close()\n\n\tc.client.WaitForShutdown()\n\tc.wg.Wait()\n}\n\n\/\/ blockEventHandler reads raw blocks events from the ZMQ block socket and\n\/\/ forwards them along to the current rescan clients.\n\/\/\n\/\/ NOTE: This must be run as a goroutine.\nfunc (c *BitcoindConn) blockEventHandler() {\n\tdefer c.wg.Done()\n\n\tlog.Info(\"Started listening for bitcoind block notifications via ZMQ \"+\n\t\t\"on\", c.zmqBlockConn.RemoteAddr())\n\n\tfor {\n\t\t\/\/ Before attempting to read from the ZMQ socket, we'll make\n\t\t\/\/ sure to check if we've been requested to shut down.\n\t\tselect {\n\t\tcase <-c.quit:\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\t\/\/ Poll an event from the ZMQ socket.\n\t\tmsgBytes, err := c.zmqBlockConn.Receive()\n\t\tif err != nil {\n\t\t\t\/\/ EOF should only be returned if the connection was\n\t\t\t\/\/ explicitly closed, so we can exit at this point.\n\t\t\tif err == io.EOF {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ It's possible that the connection to the socket\n\t\t\t\/\/ continuously times out, so we'll prevent logging this\n\t\t\t\/\/ error to prevent spamming the logs.\n\t\t\tnetErr, ok := err.(net.Error)\n\t\t\tif ok && netErr.Timeout() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlog.Errorf(\"Unable to receive ZMQ %v message: %v\",\n\t\t\t\trawBlockZMQCommand, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ We have an event! We'll now ensure it is a block event,\n\t\t\/\/ deserialize it, and report it to the different rescan\n\t\t\/\/ clients.\n\t\teventType := string(msgBytes[0])\n\t\tswitch eventType {\n\t\tcase rawBlockZMQCommand:\n\t\t\tblock := &wire.MsgBlock{}\n\t\t\tr := bytes.NewReader(msgBytes[1])\n\t\t\tif err := block.Deserialize(r); err != nil {\n\t\t\t\tlog.Errorf(\"Unable to deserialize block: %v\",\n\t\t\t\t\terr)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tc.rescanClientsMtx.Lock()\n\t\t\tfor _, client := range c.rescanClients {\n\t\t\t\tselect {\n\t\t\t\tcase client.zmqBlockNtfns <- block:\n\t\t\t\tcase <-client.quit:\n\t\t\t\tcase <-c.quit:\n\t\t\t\t\tc.rescanClientsMtx.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tc.rescanClientsMtx.Unlock()\n\t\tdefault:\n\t\t\t\/\/ It's possible that the message wasn't fully read if\n\t\t\t\/\/ bitcoind shuts down, which will produce an unreadable\n\t\t\t\/\/ event type. To prevent from logging it, we'll make\n\t\t\t\/\/ sure it conforms to the ASCII standard.\n\t\t\tif eventType == \"\" || !isASCII(eventType) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlog.Warnf(\"Received unexpected event type from %v \"+\n\t\t\t\t\"subscription: %v\", rawBlockZMQCommand,\n\t\t\t\teventType)\n\t\t}\n\t}\n}\n\n\/\/ txEventHandler reads raw blocks events from the ZMQ block socket and forwards\n\/\/ them along to the current rescan clients.\n\/\/\n\/\/ NOTE: This must be run as a goroutine.\nfunc (c *BitcoindConn) txEventHandler() {\n\tdefer c.wg.Done()\n\n\tlog.Info(\"Started listening for bitcoind transaction notifications \"+\n\t\t\"via ZMQ on\", c.zmqTxConn.RemoteAddr())\n\n\tfor {\n\t\t\/\/ Before attempting to read from the ZMQ socket, we'll make\n\t\t\/\/ sure to check if we've been requested to shut down.\n\t\tselect {\n\t\tcase <-c.quit:\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\t\/\/ Poll an event from the ZMQ socket.\n\t\tmsgBytes, err := c.zmqTxConn.Receive()\n\t\tif err != nil {\n\t\t\t\/\/ EOF should only be returned if the connection was\n\t\t\t\/\/ explicitly closed, so we can exit at this point.\n\t\t\tif err == io.EOF {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ It's possible that the connection to the socket\n\t\t\t\/\/ continuously times out, so we'll prevent logging this\n\t\t\t\/\/ error to prevent spamming the logs.\n\t\t\tnetErr, ok := err.(net.Error)\n\t\t\tif ok && netErr.Timeout() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlog.Errorf(\"Unable to receive ZMQ %v message: %v\",\n\t\t\t\trawTxZMQCommand, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ We have an event! We'll now ensure it is a transaction event,\n\t\t\/\/ deserialize it, and report it to the different rescan\n\t\t\/\/ clients.\n\t\teventType := string(msgBytes[0])\n\t\tswitch eventType {\n\t\tcase rawTxZMQCommand:\n\t\t\ttx := &wire.MsgTx{}\n\t\t\tr := bytes.NewReader(msgBytes[1])\n\t\t\tif err := tx.Deserialize(r); err != nil {\n\t\t\t\tlog.Errorf(\"Unable to deserialize \"+\n\t\t\t\t\t\"transaction: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tc.rescanClientsMtx.Lock()\n\t\t\tfor _, client := range c.rescanClients {\n\t\t\t\tselect {\n\t\t\t\tcase client.zmqTxNtfns <- tx:\n\t\t\t\tcase <-client.quit:\n\t\t\t\tcase <-c.quit:\n\t\t\t\t\tc.rescanClientsMtx.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tc.rescanClientsMtx.Unlock()\n\t\tdefault:\n\t\t\t\/\/ It's possible that the message wasn't fully read if\n\t\t\t\/\/ bitcoind shuts down, which will produce an unreadable\n\t\t\t\/\/ event type. To prevent from logging it, we'll make\n\t\t\t\/\/ sure it conforms to the ASCII standard.\n\t\t\tif eventType == \"\" || !isASCII(eventType) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlog.Warnf(\"Received unexpected event type from %v \"+\n\t\t\t\t\"subscription: %v\", rawTxZMQCommand, eventType)\n\t\t}\n\t}\n}\n\n\/\/ getCurrentNet returns the network on which the bitcoind node is running.\nfunc (c *BitcoindConn) getCurrentNet() (wire.BitcoinNet, error) {\n\thash, err := c.client.GetBlockHash(0)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tswitch *hash {\n\tcase *chaincfg.TestNet3Params.GenesisHash:\n\t\treturn chaincfg.TestNet3Params.Net, nil\n\tcase *chaincfg.RegressionNetParams.GenesisHash:\n\t\treturn chaincfg.RegressionNetParams.Net, nil\n\tcase *chaincfg.MainNetParams.GenesisHash:\n\t\treturn chaincfg.MainNetParams.Net, nil\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"unknown network with genesis hash %v\", hash)\n\t}\n}\n\n\/\/ NewBitcoindClient returns a bitcoind client using the current bitcoind\n\/\/ connection. This allows us to share the same connection using multiple\n\/\/ clients.\nfunc (c *BitcoindConn) NewBitcoindClient() *BitcoindClient {\n\treturn &BitcoindClient{\n\t\tquit: make(chan struct{}),\n\n\t\tid: atomic.AddUint64(&c.rescanClientCounter, 1),\n\n\t\tchainParams: c.chainParams,\n\t\tchainConn: c,\n\n\t\trescanUpdate: make(chan interface{}),\n\t\twatchedAddresses: make(map[string]struct{}),\n\t\twatchedOutPoints: make(map[wire.OutPoint]struct{}),\n\t\twatchedTxs: make(map[chainhash.Hash]struct{}),\n\n\t\tnotificationQueue: NewConcurrentQueue(20),\n\t\tzmqTxNtfns: make(chan *wire.MsgTx),\n\t\tzmqBlockNtfns: make(chan *wire.MsgBlock),\n\n\t\tmempool: make(map[chainhash.Hash]struct{}),\n\t\texpiredMempool: make(map[int32]map[chainhash.Hash]struct{}),\n\t}\n}\n\n\/\/ AddClient adds a client to the set of active rescan clients of the current\n\/\/ chain connection. This allows the connection to include the specified client\n\/\/ in its notification delivery.\n\/\/\n\/\/ NOTE: This function is safe for concurrent access.\nfunc (c *BitcoindConn) AddClient(client *BitcoindClient) {\n\tc.rescanClientsMtx.Lock()\n\tdefer c.rescanClientsMtx.Unlock()\n\n\tc.rescanClients[client.id] = client\n}\n\n\/\/ RemoveClient removes the client with the given ID from the set of active\n\/\/ rescan clients. Once removed, the client will no longer receive block and\n\/\/ transaction notifications from the chain connection.\n\/\/\n\/\/ NOTE: This function is safe for concurrent access.\nfunc (c *BitcoindConn) RemoveClient(id uint64) {\n\tc.rescanClientsMtx.Lock()\n\tdefer c.rescanClientsMtx.Unlock()\n\n\tdelete(c.rescanClients, id)\n}\n\n\/\/ isASCII is a helper method that checks whether all bytes in `data` would be\n\/\/ printable ASCII characters if interpreted as a string.\nfunc isASCII(s string) bool {\n\tfor _, c := range s {\n\t\tif c < 32 || c > 126 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n<commit_msg>chain: trace log timed out ZMQ connections<commit_after>package chain\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/btcsuite\/btcd\/chaincfg\"\n\t\"github.com\/btcsuite\/btcd\/chaincfg\/chainhash\"\n\t\"github.com\/btcsuite\/btcd\/rpcclient\"\n\t\"github.com\/btcsuite\/btcd\/wire\"\n\t\"github.com\/lightninglabs\/gozmq\"\n)\n\nconst (\n\t\/\/ rawBlockZMQCommand is the command used to receive raw block\n\t\/\/ notifications from bitcoind through ZMQ.\n\trawBlockZMQCommand = \"rawblock\"\n\n\t\/\/ rawTxZMQCommand is the command used to receive raw transaction\n\t\/\/ notifications from bitcoind through ZMQ.\n\trawTxZMQCommand = \"rawtx\"\n)\n\n\/\/ BitcoindConn represents a persistent client connection to a bitcoind node\n\/\/ that listens for events read from a ZMQ connection.\ntype BitcoindConn struct {\n\tstarted int32 \/\/ To be used atomically.\n\tstopped int32 \/\/ To be used atomically.\n\n\t\/\/ rescanClientCounter is an atomic counter that assigns a unique ID to\n\t\/\/ each new bitcoind rescan client using the current bitcoind\n\t\/\/ connection.\n\trescanClientCounter uint64\n\n\t\/\/ chainParams identifies the current network the bitcoind node is\n\t\/\/ running on.\n\tchainParams *chaincfg.Params\n\n\t\/\/ client is the RPC client to the bitcoind node.\n\tclient *rpcclient.Client\n\n\t\/\/ zmqBlockConn is the ZMQ connection we'll use to read raw block\n\t\/\/ events.\n\tzmqBlockConn *gozmq.Conn\n\n\t\/\/ zmqTxConn is the ZMQ connection we'll use to read raw transaction\n\t\/\/ events.\n\tzmqTxConn *gozmq.Conn\n\n\t\/\/ rescanClients is the set of active bitcoind rescan clients to which\n\t\/\/ ZMQ event notfications will be sent to.\n\trescanClientsMtx sync.Mutex\n\trescanClients map[uint64]*BitcoindClient\n\n\tquit chan struct{}\n\twg sync.WaitGroup\n}\n\n\/\/ NewBitcoindConn creates a client connection to the node described by the host\n\/\/ string. The ZMQ connections are established immediately to ensure liveness.\n\/\/ If the remote node does not operate on the same bitcoin network as described\n\/\/ by the passed chain parameters, the connection will be disconnected.\nfunc NewBitcoindConn(chainParams *chaincfg.Params,\n\thost, user, pass, zmqBlockHost, zmqTxHost string,\n\tzmqPollInterval time.Duration) (*BitcoindConn, error) {\n\n\tclientCfg := &rpcclient.ConnConfig{\n\t\tHost: host,\n\t\tUser: user,\n\t\tPass: pass,\n\t\tDisableAutoReconnect: false,\n\t\tDisableConnectOnNew: true,\n\t\tDisableTLS: true,\n\t\tHTTPPostMode: true,\n\t}\n\n\tclient, err := rpcclient.New(clientCfg, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Establish two different ZMQ connections to bitcoind to retrieve block\n\t\/\/ and transaction event notifications. We'll use two as a separation of\n\t\/\/ concern to ensure one type of event isn't dropped from the connection\n\t\/\/ queue due to another type of event filling it up.\n\tzmqBlockConn, err := gozmq.Subscribe(\n\t\tzmqBlockHost, []string{rawBlockZMQCommand}, zmqPollInterval,\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to subscribe for zmq block \"+\n\t\t\t\"events: %v\", err)\n\t}\n\n\tzmqTxConn, err := gozmq.Subscribe(\n\t\tzmqTxHost, []string{rawTxZMQCommand}, zmqPollInterval,\n\t)\n\tif err != nil {\n\t\tzmqBlockConn.Close()\n\t\treturn nil, fmt.Errorf(\"unable to subscribe for zmq tx \"+\n\t\t\t\"events: %v\", err)\n\t}\n\n\tconn := &BitcoindConn{\n\t\tchainParams: chainParams,\n\t\tclient: client,\n\t\tzmqBlockConn: zmqBlockConn,\n\t\tzmqTxConn: zmqTxConn,\n\t\trescanClients: make(map[uint64]*BitcoindClient),\n\t\tquit: make(chan struct{}),\n\t}\n\n\treturn conn, nil\n}\n\n\/\/ Start attempts to establish a RPC and ZMQ connection to a bitcoind node. If\n\/\/ successful, a goroutine is spawned to read events from the ZMQ connection.\n\/\/ It's possible for this function to fail due to a limited number of connection\n\/\/ attempts. This is done to prevent waiting forever on the connection to be\n\/\/ established in the case that the node is down.\nfunc (c *BitcoindConn) Start() error {\n\tif !atomic.CompareAndSwapInt32(&c.started, 0, 1) {\n\t\treturn nil\n\t}\n\n\t\/\/ Verify that the node is running on the expected network.\n\tnet, err := c.getCurrentNet()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif net != c.chainParams.Net {\n\t\treturn fmt.Errorf(\"expected network %v, got %v\",\n\t\t\tc.chainParams.Net, net)\n\t}\n\n\tc.wg.Add(2)\n\tgo c.blockEventHandler()\n\tgo c.txEventHandler()\n\n\treturn nil\n}\n\n\/\/ Stop terminates the RPC and ZMQ connection to a bitcoind node and removes any\n\/\/ active rescan clients.\nfunc (c *BitcoindConn) Stop() {\n\tif !atomic.CompareAndSwapInt32(&c.stopped, 0, 1) {\n\t\treturn\n\t}\n\n\tfor _, client := range c.rescanClients {\n\t\tclient.Stop()\n\t}\n\n\tclose(c.quit)\n\tc.client.Shutdown()\n\tc.zmqBlockConn.Close()\n\tc.zmqTxConn.Close()\n\n\tc.client.WaitForShutdown()\n\tc.wg.Wait()\n}\n\n\/\/ blockEventHandler reads raw blocks events from the ZMQ block socket and\n\/\/ forwards them along to the current rescan clients.\n\/\/\n\/\/ NOTE: This must be run as a goroutine.\nfunc (c *BitcoindConn) blockEventHandler() {\n\tdefer c.wg.Done()\n\n\tlog.Info(\"Started listening for bitcoind block notifications via ZMQ \"+\n\t\t\"on\", c.zmqBlockConn.RemoteAddr())\n\n\tfor {\n\t\t\/\/ Before attempting to read from the ZMQ socket, we'll make\n\t\t\/\/ sure to check if we've been requested to shut down.\n\t\tselect {\n\t\tcase <-c.quit:\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\t\/\/ Poll an event from the ZMQ socket.\n\t\tmsgBytes, err := c.zmqBlockConn.Receive()\n\t\tif err != nil {\n\t\t\t\/\/ EOF should only be returned if the connection was\n\t\t\t\/\/ explicitly closed, so we can exit at this point.\n\t\t\tif err == io.EOF {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ It's possible that the connection to the socket\n\t\t\t\/\/ continuously times out, so we'll prevent logging this\n\t\t\t\/\/ error to prevent spamming the logs.\n\t\t\tnetErr, ok := err.(net.Error)\n\t\t\tif ok && netErr.Timeout() {\n\t\t\t\tlog.Trace(\"Re-establishing timed out ZMQ \" +\n\t\t\t\t\t\"block connection\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlog.Errorf(\"Unable to receive ZMQ %v message: %v\",\n\t\t\t\trawBlockZMQCommand, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ We have an event! We'll now ensure it is a block event,\n\t\t\/\/ deserialize it, and report it to the different rescan\n\t\t\/\/ clients.\n\t\teventType := string(msgBytes[0])\n\t\tswitch eventType {\n\t\tcase rawBlockZMQCommand:\n\t\t\tblock := &wire.MsgBlock{}\n\t\t\tr := bytes.NewReader(msgBytes[1])\n\t\t\tif err := block.Deserialize(r); err != nil {\n\t\t\t\tlog.Errorf(\"Unable to deserialize block: %v\",\n\t\t\t\t\terr)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tc.rescanClientsMtx.Lock()\n\t\t\tfor _, client := range c.rescanClients {\n\t\t\t\tselect {\n\t\t\t\tcase client.zmqBlockNtfns <- block:\n\t\t\t\tcase <-client.quit:\n\t\t\t\tcase <-c.quit:\n\t\t\t\t\tc.rescanClientsMtx.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tc.rescanClientsMtx.Unlock()\n\t\tdefault:\n\t\t\t\/\/ It's possible that the message wasn't fully read if\n\t\t\t\/\/ bitcoind shuts down, which will produce an unreadable\n\t\t\t\/\/ event type. To prevent from logging it, we'll make\n\t\t\t\/\/ sure it conforms to the ASCII standard.\n\t\t\tif eventType == \"\" || !isASCII(eventType) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlog.Warnf(\"Received unexpected event type from %v \"+\n\t\t\t\t\"subscription: %v\", rawBlockZMQCommand,\n\t\t\t\teventType)\n\t\t}\n\t}\n}\n\n\/\/ txEventHandler reads raw blocks events from the ZMQ block socket and forwards\n\/\/ them along to the current rescan clients.\n\/\/\n\/\/ NOTE: This must be run as a goroutine.\nfunc (c *BitcoindConn) txEventHandler() {\n\tdefer c.wg.Done()\n\n\tlog.Info(\"Started listening for bitcoind transaction notifications \"+\n\t\t\"via ZMQ on\", c.zmqTxConn.RemoteAddr())\n\n\tfor {\n\t\t\/\/ Before attempting to read from the ZMQ socket, we'll make\n\t\t\/\/ sure to check if we've been requested to shut down.\n\t\tselect {\n\t\tcase <-c.quit:\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\n\t\t\/\/ Poll an event from the ZMQ socket.\n\t\tmsgBytes, err := c.zmqTxConn.Receive()\n\t\tif err != nil {\n\t\t\t\/\/ EOF should only be returned if the connection was\n\t\t\t\/\/ explicitly closed, so we can exit at this point.\n\t\t\tif err == io.EOF {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ It's possible that the connection to the socket\n\t\t\t\/\/ continuously times out, so we'll prevent logging this\n\t\t\t\/\/ error to prevent spamming the logs.\n\t\t\tnetErr, ok := err.(net.Error)\n\t\t\tif ok && netErr.Timeout() {\n\t\t\t\tlog.Trace(\"Re-establishing timed out ZMQ \" +\n\t\t\t\t\t\"transaction connection\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlog.Errorf(\"Unable to receive ZMQ %v message: %v\",\n\t\t\t\trawTxZMQCommand, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ We have an event! We'll now ensure it is a transaction event,\n\t\t\/\/ deserialize it, and report it to the different rescan\n\t\t\/\/ clients.\n\t\teventType := string(msgBytes[0])\n\t\tswitch eventType {\n\t\tcase rawTxZMQCommand:\n\t\t\ttx := &wire.MsgTx{}\n\t\t\tr := bytes.NewReader(msgBytes[1])\n\t\t\tif err := tx.Deserialize(r); err != nil {\n\t\t\t\tlog.Errorf(\"Unable to deserialize \"+\n\t\t\t\t\t\"transaction: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tc.rescanClientsMtx.Lock()\n\t\t\tfor _, client := range c.rescanClients {\n\t\t\t\tselect {\n\t\t\t\tcase client.zmqTxNtfns <- tx:\n\t\t\t\tcase <-client.quit:\n\t\t\t\tcase <-c.quit:\n\t\t\t\t\tc.rescanClientsMtx.Unlock()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tc.rescanClientsMtx.Unlock()\n\t\tdefault:\n\t\t\t\/\/ It's possible that the message wasn't fully read if\n\t\t\t\/\/ bitcoind shuts down, which will produce an unreadable\n\t\t\t\/\/ event type. To prevent from logging it, we'll make\n\t\t\t\/\/ sure it conforms to the ASCII standard.\n\t\t\tif eventType == \"\" || !isASCII(eventType) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlog.Warnf(\"Received unexpected event type from %v \"+\n\t\t\t\t\"subscription: %v\", rawTxZMQCommand, eventType)\n\t\t}\n\t}\n}\n\n\/\/ getCurrentNet returns the network on which the bitcoind node is running.\nfunc (c *BitcoindConn) getCurrentNet() (wire.BitcoinNet, error) {\n\thash, err := c.client.GetBlockHash(0)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tswitch *hash {\n\tcase *chaincfg.TestNet3Params.GenesisHash:\n\t\treturn chaincfg.TestNet3Params.Net, nil\n\tcase *chaincfg.RegressionNetParams.GenesisHash:\n\t\treturn chaincfg.RegressionNetParams.Net, nil\n\tcase *chaincfg.MainNetParams.GenesisHash:\n\t\treturn chaincfg.MainNetParams.Net, nil\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"unknown network with genesis hash %v\", hash)\n\t}\n}\n\n\/\/ NewBitcoindClient returns a bitcoind client using the current bitcoind\n\/\/ connection. This allows us to share the same connection using multiple\n\/\/ clients.\nfunc (c *BitcoindConn) NewBitcoindClient() *BitcoindClient {\n\treturn &BitcoindClient{\n\t\tquit: make(chan struct{}),\n\n\t\tid: atomic.AddUint64(&c.rescanClientCounter, 1),\n\n\t\tchainParams: c.chainParams,\n\t\tchainConn: c,\n\n\t\trescanUpdate: make(chan interface{}),\n\t\twatchedAddresses: make(map[string]struct{}),\n\t\twatchedOutPoints: make(map[wire.OutPoint]struct{}),\n\t\twatchedTxs: make(map[chainhash.Hash]struct{}),\n\n\t\tnotificationQueue: NewConcurrentQueue(20),\n\t\tzmqTxNtfns: make(chan *wire.MsgTx),\n\t\tzmqBlockNtfns: make(chan *wire.MsgBlock),\n\n\t\tmempool: make(map[chainhash.Hash]struct{}),\n\t\texpiredMempool: make(map[int32]map[chainhash.Hash]struct{}),\n\t}\n}\n\n\/\/ AddClient adds a client to the set of active rescan clients of the current\n\/\/ chain connection. This allows the connection to include the specified client\n\/\/ in its notification delivery.\n\/\/\n\/\/ NOTE: This function is safe for concurrent access.\nfunc (c *BitcoindConn) AddClient(client *BitcoindClient) {\n\tc.rescanClientsMtx.Lock()\n\tdefer c.rescanClientsMtx.Unlock()\n\n\tc.rescanClients[client.id] = client\n}\n\n\/\/ RemoveClient removes the client with the given ID from the set of active\n\/\/ rescan clients. Once removed, the client will no longer receive block and\n\/\/ transaction notifications from the chain connection.\n\/\/\n\/\/ NOTE: This function is safe for concurrent access.\nfunc (c *BitcoindConn) RemoveClient(id uint64) {\n\tc.rescanClientsMtx.Lock()\n\tdefer c.rescanClientsMtx.Unlock()\n\n\tdelete(c.rescanClients, id)\n}\n\n\/\/ isASCII is a helper method that checks whether all bytes in `data` would be\n\/\/ printable ASCII characters if interpreted as a string.\nfunc isASCII(s string) bool {\n\tfor _, c := range s {\n\t\tif c < 32 || c > 126 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>package symlink\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"testing\"\n)\n\n\/\/ Making a symlink means:\n\/\/ - making sure the destination exists\n\/\/ - making sure the source don't exist and the parent folder is created\n\/\/ - if the source does exist and\n\/\/ - is a symlink pointing to a different destination: rmdir\n\/\/ - is a symlink pointing to the same\n\/\/ - is a folder x, rename it to x.1 (if x.1 exists, x.2, ...)\n\ntype test struct {\n\tsrc string\n\tdst string\n\terr string\n\tsl *SL\n}\n\nfunc TestDestination(t *testing.T) {\n\tt.Skip(\"Skip TestDestination\")\n\t\/\/ only a nil bit will make filepath.Abs() fail:\n\t\/\/ https:\/\/github.com\/golang\/go\/blob\/d16c7f8004bd1c9f896367af7ea86f5530596b39\/src\/syscall\/syscall_windows.go#L41\n\t\/\/ from UTF16FromString (https:\/\/github.com\/golang\/go\/blob\/d16c7f8004bd1c9f896367af7ea86f5530596b39\/src\/syscall\/syscall_windows.go#L71)\n\t\/\/ from FullPath (https:\/\/github.com\/golang\/go\/blob\/d16c7f8004bd1c9f896367af7ea86f5530596b39\/src\/syscall\/exec_windows.go#L134)\n\t\/\/ from abs (https:\/\/github.com\/golang\/go\/blob\/d16c7f8004bd1c9f896367af7ea86f5530596b39\/src\/path\/filepath\/path_windows.go#L109)\n\t\/\/ from Abs (https:\/\/github.com\/golang\/go\/blob\/d16c7f8004bd1c9f896367af7ea86f5530596b39\/src\/path\/filepath\/path.go#L235)\n\n\tosStat = testOsStat\n\texecRun = testExecRun\n\ttests := []*test{\n\t\t&test{dst: \"unknown\/dst\", err: \"The system cannot find the path specified\"},\n\t\t&test{dst: string([]byte{0}), err: \"invalid argument\"},\n\t\t&test{dst: \"err\", err: \"Test error on os.Stat with non-nil fi\"},\n\t\t&test{dst: \"badsymlink\/dir\", err: \"unreadable dir on symlink\"},\n\t\t&test{dst: \"nojunction\/dir\", err: \"Unable to find junction symlink in parent dir\"},\n\t\t&test{dst: \"cmdRun\/dir\", err: \"The system cannot find the file specified\"},\n\t\t&test{dst: \"WarningOnDir\/dir\", err: \"Warning on run\"},\n\t}\n\tvar sl *SL\n\tvar err error\n\tfor _, test := range tests {\n\t\tsl, err = New(\".\", test.dst)\n\t\tif err == nil || strings.Contains(err.Error(), test.err) == false {\n\t\t\tt.Errorf(\"Err '%v', expected '%s'\", err, test.err)\n\t\t}\n\t\tif sl != nil {\n\t\t\tt.Errorf(\"SL '%v', expected <nil>\", sl)\n\t\t}\n\t}\n\t\/\/ destination is a symlink\n\t_, err = New(`.`, `symlink`)\n\t\/\/ destination exists\n\t_, err = New(`x`, `.`)\n\t\/\/ fmt.Printf(\"%+v\\n\", err)\n}\n\nfunc TestSource(t *testing.T) {\n\tosStat = testOsStat\n\texecRun = testExecRun\n\tosMkdirAll = testOsMkdirAll\n\tosRename = testOsRename\n\n\ttests := []*test{\n\t\t&test{src: \"parentNotYetCreated\/newlink\"},\n\t\t&test{src: \"badSrcParent\/newlink\", err: \"Test error badSrcParent on os.Stat with non-nil fi\"},\n\t\t&test{src: \"badSrcParentMdirAll\/newlink\", err: \"Error on mkDirAll for\"},\n\t\t&test{src: \"symlinkdir\/newlink\", err: \"\"},\n\t\t&test{src: \"badsrcparentdir\/newlink\", err: \"Impossible to check\/access link parent folder\"},\n\t\t&test{src: string([]byte{0}), err: \"invalid argument\"},\n\t\t&test{src: \"parentnomovesymlinkdir\/newlink\", err: \"Unable to rename \"},\n\t\t&test{src: \"parent\/newlinkBadStat\", err: \"newlinkBadStat cannot be stat\"},\n\t\t&test{src: \"existingparent\/existingsymlink\", err: \"\"},\n\t\t&test{src: \"existingparent\/existingsymlinkdiff\", err: \"\"},\n\t\t&test{src: \"existingparent\/existingsymlinkdiffnomove\", err: \"Unable to rename\"},\n\t\t&test{src: \"parent\/failedmklink\", err: \"Unable to run \"},\n\t}\n\tvar sl *SL\n\tvar err error\n\tfor _, test := range tests {\n\t\tsl, err = New(test.src, \".\")\n\t\tif err != nil && strings.Contains(err.Error(), test.err) == false {\n\t\t\tt.Errorf(\"Err '%v', expected '%s'\", err, test.err)\n\t\t}\n\t\tif err == nil && test.err != \"\" {\n\t\t\tt.Errorf(\"Err nil, expected '%s'\", test.err)\n\t\t}\n\t\tif sl == nil && err == nil {\n\t\t\tt.Errorf(\"SL '%v', expected NOT <nil>\", sl)\n\t\t}\n\t\tfmt.Println(\"------------------\")\n\t}\n}\n\nfunc testOsStat(name string) (os.FileInfo, error) {\n\tfmt.Printf(\"testOsStat name='%+v'\\n\", name)\n\tif strings.HasSuffix(name, `prj\\symlink\\err\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"Test error on os.Stat with non-nil fi\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\symlink\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"readlink for symlink\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\badsymlink\\dir\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"readlink for bad symlink\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\nojunction\\dir\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"readlink for no junction\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\cmdRun\\dir\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"readlink for no junction\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\WarningOnDir\\dir\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"readlink for warning on dir\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\badSrcParent\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"Test error badSrcParent on os.Stat with non-nil fi\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\symlinkdir\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"readlink for symlinkdir\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\parentnomovesymlinkdir\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"readlink for src parent no move\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\badsrcparentdir\\`) {\n\t\treturn nil, fmt.Errorf(\"badsrcparentdir cannot be stat'd\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\parent\\newlinkBadStat\\`) {\n\t\treturn nil, fmt.Errorf(\"newlinkBadStat cannot be stat'd\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\existingparent\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, nil\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\existingparent\\existingsymlink\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"readlink for existingsymlink\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\existingparent\\existingsymlinkdiff\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"readlink for existingsymlinkdiff\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\existingparent\\existingsymlinkdiffnomove\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"readlink for existingsymlinkdiffnomove\")\n\t}\n\tif strings.HasSuffix(name, `.1`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, nil\n\t}\n\treturn os.Stat(name)\n}\n\nfunc testOsMkdirAll(path string, perm os.FileMode) error {\n\tfmt.Printf(\"testOsMkdirAll path='%+v'\\n\", path)\n\tif strings.HasSuffix(path, `badSrcParentMdirAll\\`) {\n\t\treturn fmt.Errorf(\"Error on mkDirAll for '%s'\", path)\n\t}\n\treturn nil\n}\n\nvar junctionOut = `\n Répertoire de C:\\Users\\VonC\\prog\\git\\ggb\\deps\\src\\github.com\\VonC\n\n22\/06\/2015 11:03 <REP> .\n22\/06\/2015 11:03 <REP> ..\n22\/06\/2015 11:03 <JONCTION> symlink [C:\\Users\\VonC\\prog\\git\\ggb\\]\n22\/06\/2015 11:03 <JONCTION> symlinkdir [C:\\Users\\VonC\\prog\\git\\ggb\\]\n22\/06\/2015 11:03 <JONCTION> parentnomovesymlinkdir [C:\\Users\\VonC\\prog\\git\\ggb\\]\n22\/06\/2015 11:03 <JONCTION> existingsymlink [C:\\Users\\VonC\\prog\\git\\ggb\\prj\\symlink\\]\n22\/06\/2015 11:03 <JONCTION> existingsymlinkdiff [C:\\Users\\VonC\\prog\\git\\ggb\\prj\\symlink\\diff\\]\n22\/06\/2015 11:03 <JONCTION> existingsymlinkdiffnomove [C:\\Users\\VonC\\prog\\git\\ggb\\prj\\symlink\\diff\\]\n`\n\nfunc testExecRun(cmd *exec.Cmd) error {\n\ttmsg := fmt.Sprintf(\"testExecRun cmd='%v' in '%s'\", cmd.Args, cmd.Dir)\n\tfmt.Println(tmsg)\n\tif strings.Contains(tmsg, `\\failedmklink`) {\n\t\treturn fmt.Errorf(\"mklink fails\")\n\t}\n\tif strings.Contains(tmsg, \"\/J\") {\n\t\treturn nil\n\t}\n\tif strings.HasSuffix(cmd.Dir, `\\WarningOnDir`) {\n\t\tio.WriteString(cmd.Stdout, \"dummy content\")\n\t\tio.WriteString(cmd.Stderr, \"Some warning on dir\")\n\t\treturn nil\n\t}\n\tif strings.HasSuffix(cmd.Dir, `\\nojunction`) {\n\t\tio.WriteString(cmd.Stdout, \"dummy content without any junction\")\n\t\treturn nil\n\t}\n\tif strings.HasSuffix(cmd.Dir, `\\badsymlink`) {\n\t\treturn fmt.Errorf(\"unreadable dir on symlink\")\n\t}\n\n\tpath := \"\"\n\tif strings.Contains(cmd.Dir, `ggb\\`) {\n\t\ti := strings.Index(cmd.Dir, `ggb\\`)\n\t\tpath = cmd.Dir[:i+len(`ggb\\`)]\n\t}\n\tjjunctionOut := strings.Replace(junctionOut, `C:\\Users\\VonC\\prog\\git\\ggb\\`, path, -1)\n\n\tif strings.HasSuffix(cmd.Dir, `\\symlink`) {\n\t\tio.WriteString(cmd.Stdout, jjunctionOut)\n\t\treturn nil\n\t}\n\tif strings.HasSuffix(cmd.Dir, `\\parentnomovesymlinkdir`) {\n\t\tio.WriteString(cmd.Stdout, jjunctionOut)\n\t\treturn nil\n\t}\n\tif strings.HasSuffix(cmd.Dir, `\\existingparent`) {\n\t\tio.WriteString(cmd.Stdout, jjunctionOut)\n\t\treturn nil\n\t}\n\treturn cmdRun(cmd)\n}\n\nfunc testOsRename(oldpath, newpath string) error {\n\tfmt.Printf(\"testOsRename oldpath='%v', newpath '%s'\\n\", oldpath, newpath)\n\tif strings.HasSuffix(oldpath, `\\parentnomovesymlinkdir`) {\n\t\treturn fmt.Errorf(\"Unable to rename '%s' to '%s'\", oldpath, newpath)\n\t}\n\tif strings.HasSuffix(oldpath, `\\existingsymlinkdiffnomove`) {\n\t\treturn fmt.Errorf(\"Unable to rename '%s' to '%s'\", oldpath, newpath)\n\t}\n\treturn nil\n}\n<commit_msg>symlink_test.go: test bad moveToX<commit_after>package symlink\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"testing\"\n)\n\n\/\/ Making a symlink means:\n\/\/ - making sure the destination exists\n\/\/ - making sure the source don't exist and the parent folder is created\n\/\/ - if the source does exist and\n\/\/ - is a symlink pointing to a different destination: rmdir\n\/\/ - is a symlink pointing to the same\n\/\/ - is a folder x, rename it to x.1 (if x.1 exists, x.2, ...)\n\ntype test struct {\n\tsrc string\n\tdst string\n\terr string\n\tsl *SL\n}\n\nfunc TestDestination(t *testing.T) {\n\tt.Skip(\"Skip TestDestination\")\n\t\/\/ only a nil bit will make filepath.Abs() fail:\n\t\/\/ https:\/\/github.com\/golang\/go\/blob\/d16c7f8004bd1c9f896367af7ea86f5530596b39\/src\/syscall\/syscall_windows.go#L41\n\t\/\/ from UTF16FromString (https:\/\/github.com\/golang\/go\/blob\/d16c7f8004bd1c9f896367af7ea86f5530596b39\/src\/syscall\/syscall_windows.go#L71)\n\t\/\/ from FullPath (https:\/\/github.com\/golang\/go\/blob\/d16c7f8004bd1c9f896367af7ea86f5530596b39\/src\/syscall\/exec_windows.go#L134)\n\t\/\/ from abs (https:\/\/github.com\/golang\/go\/blob\/d16c7f8004bd1c9f896367af7ea86f5530596b39\/src\/path\/filepath\/path_windows.go#L109)\n\t\/\/ from Abs (https:\/\/github.com\/golang\/go\/blob\/d16c7f8004bd1c9f896367af7ea86f5530596b39\/src\/path\/filepath\/path.go#L235)\n\n\tosStat = testOsStat\n\texecRun = testExecRun\n\ttests := []*test{\n\t\t&test{dst: \"unknown\/dst\", err: \"The system cannot find the path specified\"},\n\t\t&test{dst: string([]byte{0}), err: \"invalid argument\"},\n\t\t&test{dst: \"err\", err: \"Test error on os.Stat with non-nil fi\"},\n\t\t&test{dst: \"badsymlink\/dir\", err: \"unreadable dir on symlink\"},\n\t\t&test{dst: \"nojunction\/dir\", err: \"Unable to find junction symlink in parent dir\"},\n\t\t&test{dst: \"cmdRun\/dir\", err: \"The system cannot find the file specified\"},\n\t\t&test{dst: \"WarningOnDir\/dir\", err: \"Warning on run\"},\n\t}\n\tvar sl *SL\n\tvar err error\n\tfor _, test := range tests {\n\t\tsl, err = New(\".\", test.dst)\n\t\tif err == nil || strings.Contains(err.Error(), test.err) == false {\n\t\t\tt.Errorf(\"Err '%v', expected '%s'\", err, test.err)\n\t\t}\n\t\tif sl != nil {\n\t\t\tt.Errorf(\"SL '%v', expected <nil>\", sl)\n\t\t}\n\t}\n\t\/\/ destination is a symlink\n\t_, err = New(`.`, `symlink`)\n\t\/\/ destination exists\n\t_, err = New(`x`, `.`)\n\t\/\/ fmt.Printf(\"%+v\\n\", err)\n}\n\nfunc TestSource(t *testing.T) {\n\tosStat = testOsStat\n\texecRun = testExecRun\n\tosMkdirAll = testOsMkdirAll\n\tosRename = testOsRename\n\n\ttests := []*test{\n\t\t&test{src: \"parentNotYetCreated\/newlink\"},\n\t\t&test{src: \"badSrcParent\/newlink\", err: \"Test error badSrcParent on os.Stat with non-nil fi\"},\n\t\t&test{src: \"badSrcParentMdirAll\/newlink\", err: \"Error on mkDirAll for\"},\n\t\t&test{src: \"symlinkdir\/newlink\", err: \"\"},\n\t\t&test{src: \"badsrcparentdir\/newlink\", err: \"Impossible to check\/access link parent folder\"},\n\t\t&test{src: string([]byte{0}), err: \"invalid argument\"},\n\t\t&test{src: \"parentnomovesymlinkdir\/newlink\", err: \"Unable to rename \"},\n\t\t&test{src: \"parent\/newlinkBadStat\", err: \"newlinkBadStat cannot be stat\"},\n\t\t&test{src: \"existingparent\/existingsymlink\", err: \"\"},\n\t\t&test{src: \"existingparent\/existingsymlinkdiff\", err: \"\"},\n\t\t&test{src: \"existingparent\/existingsymlinkdiffnomove\", err: \"Unable to rename\"},\n\t\t&test{src: \"parent\/failedmklink\", err: \"Unable to run \"},\n\t\t&test{src: \"parent\/existingsymlinkbadstat\", err: \"existingsymlinkbadstat.1 cannot be stat'd\"},\n\t}\n\tvar sl *SL\n\tvar err error\n\tfor _, test := range tests {\n\t\tsl, err = New(test.src, \".\")\n\t\tif err != nil && strings.Contains(err.Error(), test.err) == false {\n\t\t\tt.Errorf(\"Err '%v', expected '%s'\", err, test.err)\n\t\t}\n\t\tif err == nil && test.err != \"\" {\n\t\t\tt.Errorf(\"Err nil, expected '%s'\", test.err)\n\t\t}\n\t\tif sl == nil && err == nil {\n\t\t\tt.Errorf(\"SL '%v', expected NOT <nil>\", sl)\n\t\t}\n\t\tfmt.Println(\"------------------\")\n\t}\n}\n\nfunc testOsStat(name string) (os.FileInfo, error) {\n\tfmt.Printf(\"testOsStat name='%+v'\\n\", name)\n\tif strings.HasSuffix(name, `prj\\symlink\\err\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"Test error on os.Stat with non-nil fi\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\symlink\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"readlink for symlink\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\badsymlink\\dir\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"readlink for bad symlink\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\nojunction\\dir\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"readlink for no junction\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\cmdRun\\dir\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"readlink for no junction\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\WarningOnDir\\dir\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"readlink for warning on dir\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\badSrcParent\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"Test error badSrcParent on os.Stat with non-nil fi\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\symlinkdir\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"readlink for symlinkdir\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\parentnomovesymlinkdir\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"readlink for src parent no move\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\badsrcparentdir\\`) {\n\t\treturn nil, fmt.Errorf(\"badsrcparentdir cannot be stat'd\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\parent\\newlinkBadStat\\`) {\n\t\treturn nil, fmt.Errorf(\"newlinkBadStat cannot be stat'd\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\parent\\existingsymlinkbadstat.1`) {\n\t\treturn nil, fmt.Errorf(\"existingsymlinkbadstat.1 cannot be stat'd\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\parent\\existingsymlinkbadstat\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, nil\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\existingparent\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, nil\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\existingparent\\existingsymlink\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"readlink for existingsymlink\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\existingparent\\existingsymlinkdiff\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"readlink for existingsymlinkdiff\")\n\t}\n\tif strings.HasSuffix(name, `prj\\symlink\\existingparent\\existingsymlinkdiffnomove\\`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, fmt.Errorf(\"readlink for existingsymlinkdiffnomove\")\n\t}\n\tif strings.HasSuffix(name, `.1`) {\n\t\tfi, _ := os.Stat(\".\")\n\t\treturn fi, nil\n\t}\n\treturn os.Stat(name)\n}\n\nfunc testOsMkdirAll(path string, perm os.FileMode) error {\n\tfmt.Printf(\"testOsMkdirAll path='%+v'\\n\", path)\n\tif strings.HasSuffix(path, `badSrcParentMdirAll\\`) {\n\t\treturn fmt.Errorf(\"Error on mkDirAll for '%s'\", path)\n\t}\n\treturn nil\n}\n\nvar junctionOut = `\n Répertoire de C:\\Users\\VonC\\prog\\git\\ggb\\deps\\src\\github.com\\VonC\n\n22\/06\/2015 11:03 <REP> .\n22\/06\/2015 11:03 <REP> ..\n22\/06\/2015 11:03 <JONCTION> symlink [C:\\Users\\VonC\\prog\\git\\ggb\\]\n22\/06\/2015 11:03 <JONCTION> symlinkdir [C:\\Users\\VonC\\prog\\git\\ggb\\]\n22\/06\/2015 11:03 <JONCTION> parentnomovesymlinkdir [C:\\Users\\VonC\\prog\\git\\ggb\\]\n22\/06\/2015 11:03 <JONCTION> existingsymlink [C:\\Users\\VonC\\prog\\git\\ggb\\prj\\symlink\\]\n22\/06\/2015 11:03 <JONCTION> existingsymlinkdiff [C:\\Users\\VonC\\prog\\git\\ggb\\prj\\symlink\\diff\\]\n22\/06\/2015 11:03 <JONCTION> existingsymlinkdiffnomove [C:\\Users\\VonC\\prog\\git\\ggb\\prj\\symlink\\diff\\]\n`\n\nfunc testExecRun(cmd *exec.Cmd) error {\n\ttmsg := fmt.Sprintf(\"testExecRun cmd='%v' in '%s'\", cmd.Args, cmd.Dir)\n\tfmt.Println(tmsg)\n\tif strings.Contains(tmsg, `\\failedmklink`) {\n\t\treturn fmt.Errorf(\"mklink fails\")\n\t}\n\tif strings.Contains(tmsg, \"\/J\") {\n\t\treturn nil\n\t}\n\tif strings.HasSuffix(cmd.Dir, `\\WarningOnDir`) {\n\t\tio.WriteString(cmd.Stdout, \"dummy content\")\n\t\tio.WriteString(cmd.Stderr, \"Some warning on dir\")\n\t\treturn nil\n\t}\n\tif strings.HasSuffix(cmd.Dir, `\\nojunction`) {\n\t\tio.WriteString(cmd.Stdout, \"dummy content without any junction\")\n\t\treturn nil\n\t}\n\tif strings.HasSuffix(cmd.Dir, `\\badsymlink`) {\n\t\treturn fmt.Errorf(\"unreadable dir on symlink\")\n\t}\n\n\tpath := \"\"\n\tif strings.Contains(cmd.Dir, `ggb\\`) {\n\t\ti := strings.Index(cmd.Dir, `ggb\\`)\n\t\tpath = cmd.Dir[:i+len(`ggb\\`)]\n\t}\n\tjjunctionOut := strings.Replace(junctionOut, `C:\\Users\\VonC\\prog\\git\\ggb\\`, path, -1)\n\n\tif strings.HasSuffix(cmd.Dir, `\\symlink`) {\n\t\tio.WriteString(cmd.Stdout, jjunctionOut)\n\t\treturn nil\n\t}\n\tif strings.HasSuffix(cmd.Dir, `\\parentnomovesymlinkdir`) {\n\t\tio.WriteString(cmd.Stdout, jjunctionOut)\n\t\treturn nil\n\t}\n\tif strings.HasSuffix(cmd.Dir, `\\existingparent`) {\n\t\tio.WriteString(cmd.Stdout, jjunctionOut)\n\t\treturn nil\n\t}\n\treturn cmdRun(cmd)\n}\n\nfunc testOsRename(oldpath, newpath string) error {\n\tfmt.Printf(\"testOsRename oldpath='%v', newpath '%s'\\n\", oldpath, newpath)\n\tif strings.HasSuffix(oldpath, `\\parentnomovesymlinkdir`) {\n\t\treturn fmt.Errorf(\"Unable to rename '%s' to '%s'\", oldpath, newpath)\n\t}\n\tif strings.HasSuffix(oldpath, `\\existingsymlinkdiffnomove`) {\n\t\treturn fmt.Errorf(\"Unable to rename '%s' to '%s'\", oldpath, newpath)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"time\"\n\n\t\"github.com\/googlecloudplatform\/gcsfuse\/fs\"\n\t\"github.com\/googlecloudplatform\/gcsfuse\/timeutil\"\n\t\"github.com\/jacobsa\/fuse\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\/gcscaching\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \"Usage of %s:\\n\", os.Args[0])\n\tfmt.Fprintf(os.Stderr, \" %s [flags] <mount-point>\\n\", os.Args[0])\n\tflag.PrintDefaults()\n}\n\nvar fBucketName = flag.String(\"bucket\", \"\", \"Name of GCS bucket to mount.\")\n\nvar fTempDir = flag.String(\n\t\"temp_dir\", \"\",\n\t\"The temporary directory in which to store local copies of GCS objects. \"+\n\t\t\"If empty, the system default (probably \/tmp) will be used.\")\n\nvar fTempDirLimit = flag.Int64(\n\t\"temp_dir_bytes\", 1<<31,\n\t\"A desired limit on the number of bytes used in --temp_dir. May be exceeded \"+\n\t\t\"for dirty files that have not been flushed or closed.\")\n\nvar fGCSChunkSize = flag.Uint64(\n\t\"gcs_chunk_size\", 1<<24,\n\t\"If set to a non-zero value N, split up GCS objects into multiple chunks of \"+\n\t\t\"size at most N when reading, and do not read or cache unnecessary chunks.\")\n\nvar fImplicitDirs = flag.Bool(\n\t\"implicit_dirs\",\n\tfalse,\n\t\"Implicitly define directories based on their content. See docs\/semantics.md.\")\n\nvar fSupportNlink = flag.Bool(\n\t\"support_nlink\",\n\tfalse,\n\t\"Return meaningful values for nlink from fstat(2). See docs\/semantics.md.\")\n\nvar fStatCacheTTL = flag.String(\n\t\"stat_cache_ttl\",\n\t\"\",\n\t\"If non-empty, a duration specifying how long to cache StatObject results \"+\n\t\t\"from GCS, e.g. \\\"2s\\\" or \\\"15ms\\\". See docs\/semantics.md for more.\")\n\nvar fTypeCacheTTL = flag.String(\n\t\"type_cache_ttl\",\n\t\"\",\n\t\"If non-empty, a duration specifying how long to cache name -> file\/dir type \"+\n\t\t\"mappings in directory inodes, e.g. \\\"2s\\\" or \\\"15ms\\\". \"+\n\t\t\"See docs\/semantics.md.\")\n\nfunc getBucketName() string {\n\ts := *fBucketName\n\tif s == \"\" {\n\t\tfmt.Println(\"You must set --bucket.\")\n\t\tos.Exit(1)\n\t}\n\n\treturn s\n}\n\nfunc registerSIGINTHandler(mountPoint string) {\n\t\/\/ Register for SIGINT.\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, os.Interrupt)\n\n\t\/\/ Start a goroutine that will unmount when the signal is received.\n\tgo func() {\n\t\tfor {\n\t\t\t<-signalChan\n\t\t\tlog.Println(\"Received SIGINT, attempting to unmount...\")\n\n\t\t\terr := fuse.Unmount(mountPoint)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Failed to unmount in response to SIGINT: %v\", err)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Successfully unmounted in response to SIGINT.\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc getBucket() (b gcs.Bucket) {\n\t\/\/ Set up a GCS connection.\n\tlog.Println(\"Initializing GCS connection.\")\n\tconn, err := getConn()\n\tif err != nil {\n\t\tlog.Fatal(\"Couldn't get GCS connection: \", err)\n\t}\n\n\t\/\/ Extract the appropriate bucket.\n\tb = conn.GetBucket(getBucketName())\n\n\t\/\/ Enable cached StatObject results, if appropriate.\n\tif *fStatCacheTTL != \"\" {\n\t\tttl, err := time.ParseDuration(*fStatCacheTTL)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Invalid --stat_cache_ttl: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tconst cacheCapacity = 4096\n\t\tb = gcscaching.NewFastStatBucket(\n\t\t\tttl,\n\t\t\tgcscaching.NewStatCache(cacheCapacity),\n\t\t\ttimeutil.RealClock(),\n\t\t\tb)\n\t}\n\n\treturn\n}\n\nfunc main() {\n\t\/\/ Make logging output better.\n\tlog.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)\n\n\t\/\/ Set up flags.\n\tflag.Usage = usage\n\tflag.Parse()\n\n\t\/\/ Grab the mount point.\n\tif flag.NArg() != 1 {\n\t\tusage()\n\t\tos.Exit(1)\n\t}\n\n\tmountPoint := flag.Arg(0)\n\n\t\/\/ Parse --type_cache_ttl\n\tvar typeCacheTTL time.Duration\n\tif *fTypeCacheTTL != \"\" {\n\t\tvar err error\n\t\ttypeCacheTTL, err = time.ParseDuration(*fTypeCacheTTL)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Invalid --type_cache_ttl: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Create a file system server.\n\tserverCfg := &fs.ServerConfig{\n\t\tClock: timeutil.RealClock(),\n\t\tBucket: getBucket(),\n\t\tTempDir: *fTempDir,\n\t\tTempDirLimit: *fTempDirLimit,\n\t\tGCSChunkSize: *fGCSChunkSize,\n\t\tImplicitDirectories: *fImplicitDirs,\n\t\tSupportNlink: *fSupportNlink,\n\t\tDirTypeCacheTTL: typeCacheTTL,\n\t}\n\n\tserver, err := fs.NewServer(serverCfg)\n\tif err != nil {\n\t\tlog.Fatal(\"fs.NewServer:\", err)\n\t}\n\n\t\/\/ Mount the file system.\n\tmountedFS, err := fuse.Mount(mountPoint, server, &fuse.MountConfig{})\n\tif err != nil {\n\t\tlog.Fatal(\"Mount:\", err)\n\t}\n\n\tlog.Println(\"File system has been successfully mounted.\")\n\n\t\/\/ Let the user unmount with Ctrl-C (SIGINT).\n\tregisterSIGINTHandler(mountedFS.Dir())\n\n\t\/\/ Wait for it to be unmounted.\n\tif err := mountedFS.Join(context.Background()); err != nil {\n\t\tlog.Fatal(\"MountedFileSystem.Join:\", err)\n\t}\n\n\tlog.Println(\"Successfully exiting.\")\n}\n<commit_msg>Set flag defaults.<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"time\"\n\n\t\"github.com\/googlecloudplatform\/gcsfuse\/fs\"\n\t\"github.com\/googlecloudplatform\/gcsfuse\/timeutil\"\n\t\"github.com\/jacobsa\/fuse\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\/gcscaching\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \"Usage of %s:\\n\", os.Args[0])\n\tfmt.Fprintf(os.Stderr, \" %s [flags] <mount-point>\\n\", os.Args[0])\n\tflag.PrintDefaults()\n}\n\nvar fBucketName = flag.String(\"bucket\", \"\", \"Name of GCS bucket to mount.\")\n\nvar fTempDir = flag.String(\n\t\"temp_dir\", \"\",\n\t\"The temporary directory in which to store local copies of GCS objects. \"+\n\t\t\"If empty, the system default (probably \/tmp) will be used.\")\n\nvar fTempDirLimit = flag.Int64(\n\t\"temp_dir_bytes\", 1<<31,\n\t\"A desired limit on the number of bytes used in --temp_dir. May be exceeded \"+\n\t\t\"for dirty files that have not been flushed or closed.\")\n\nvar fGCSChunkSize = flag.Uint64(\n\t\"gcs_chunk_size\", 1<<24,\n\t\"If set to a non-zero value N, split up GCS objects into multiple chunks of \"+\n\t\t\"size at most N when reading, and do not read or cache unnecessary chunks.\")\n\nvar fImplicitDirs = flag.Bool(\n\t\"implicit_dirs\",\n\tfalse,\n\t\"Implicitly define directories based on their content. See docs\/semantics.md.\")\n\nvar fSupportNlink = flag.Bool(\n\t\"support_nlink\",\n\tfalse,\n\t\"Return meaningful values for nlink from fstat(2). See docs\/semantics.md.\")\n\nvar fStatCacheTTL = flag.String(\n\t\"stat_cache_ttl\",\n\t\"1m\",\n\t\"If non-empty, a duration specifying how long to cache StatObject results \"+\n\t\t\"from GCS, e.g. \\\"2s\\\" or \\\"15ms\\\". See docs\/semantics.md for more.\")\n\nvar fTypeCacheTTL = flag.String(\n\t\"type_cache_ttl\",\n\t\"1m\",\n\t\"If non-empty, a duration specifying how long to cache name -> file\/dir type \"+\n\t\t\"mappings in directory inodes, e.g. \\\"2s\\\" or \\\"15ms\\\". \"+\n\t\t\"See docs\/semantics.md.\")\n\nfunc getBucketName() string {\n\ts := *fBucketName\n\tif s == \"\" {\n\t\tfmt.Println(\"You must set --bucket.\")\n\t\tos.Exit(1)\n\t}\n\n\treturn s\n}\n\nfunc registerSIGINTHandler(mountPoint string) {\n\t\/\/ Register for SIGINT.\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, os.Interrupt)\n\n\t\/\/ Start a goroutine that will unmount when the signal is received.\n\tgo func() {\n\t\tfor {\n\t\t\t<-signalChan\n\t\t\tlog.Println(\"Received SIGINT, attempting to unmount...\")\n\n\t\t\terr := fuse.Unmount(mountPoint)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Failed to unmount in response to SIGINT: %v\", err)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Successfully unmounted in response to SIGINT.\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc getBucket() (b gcs.Bucket) {\n\t\/\/ Set up a GCS connection.\n\tlog.Println(\"Initializing GCS connection.\")\n\tconn, err := getConn()\n\tif err != nil {\n\t\tlog.Fatal(\"Couldn't get GCS connection: \", err)\n\t}\n\n\t\/\/ Extract the appropriate bucket.\n\tb = conn.GetBucket(getBucketName())\n\n\t\/\/ Enable cached StatObject results, if appropriate.\n\tif *fStatCacheTTL != \"\" {\n\t\tttl, err := time.ParseDuration(*fStatCacheTTL)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Invalid --stat_cache_ttl: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tconst cacheCapacity = 4096\n\t\tb = gcscaching.NewFastStatBucket(\n\t\t\tttl,\n\t\t\tgcscaching.NewStatCache(cacheCapacity),\n\t\t\ttimeutil.RealClock(),\n\t\t\tb)\n\t}\n\n\treturn\n}\n\nfunc main() {\n\t\/\/ Make logging output better.\n\tlog.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)\n\n\t\/\/ Set up flags.\n\tflag.Usage = usage\n\tflag.Parse()\n\n\t\/\/ Grab the mount point.\n\tif flag.NArg() != 1 {\n\t\tusage()\n\t\tos.Exit(1)\n\t}\n\n\tmountPoint := flag.Arg(0)\n\n\t\/\/ Parse --type_cache_ttl\n\tvar typeCacheTTL time.Duration\n\tif *fTypeCacheTTL != \"\" {\n\t\tvar err error\n\t\ttypeCacheTTL, err = time.ParseDuration(*fTypeCacheTTL)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Invalid --type_cache_ttl: %v\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Create a file system server.\n\tserverCfg := &fs.ServerConfig{\n\t\tClock: timeutil.RealClock(),\n\t\tBucket: getBucket(),\n\t\tTempDir: *fTempDir,\n\t\tTempDirLimit: *fTempDirLimit,\n\t\tGCSChunkSize: *fGCSChunkSize,\n\t\tImplicitDirectories: *fImplicitDirs,\n\t\tSupportNlink: *fSupportNlink,\n\t\tDirTypeCacheTTL: typeCacheTTL,\n\t}\n\n\tserver, err := fs.NewServer(serverCfg)\n\tif err != nil {\n\t\tlog.Fatal(\"fs.NewServer:\", err)\n\t}\n\n\t\/\/ Mount the file system.\n\tmountedFS, err := fuse.Mount(mountPoint, server, &fuse.MountConfig{})\n\tif err != nil {\n\t\tlog.Fatal(\"Mount:\", err)\n\t}\n\n\tlog.Println(\"File system has been successfully mounted.\")\n\n\t\/\/ Let the user unmount with Ctrl-C (SIGINT).\n\tregisterSIGINTHandler(mountedFS.Dir())\n\n\t\/\/ Wait for it to be unmounted.\n\tif err := mountedFS.Join(context.Background()); err != nil {\n\t\tlog.Fatal(\"MountedFileSystem.Join:\", err)\n\t}\n\n\tlog.Println(\"Successfully exiting.\")\n}\n<|endoftext|>"} {"text":"<commit_before>package gorm\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype mysql struct{}\n\nfunc (s *mysql) BinVar(i int) string {\n\treturn \"$$\" \/\/ ?\n}\n\nfunc (s *mysql) SupportLastInsertId() bool {\n\treturn true\n}\n\nfunc (s *mysql) HasTop() bool {\n\treturn false\n}\n\nfunc (s *mysql) SqlTag(value reflect.Value, size int) string {\n\tswitch value.Kind() {\n\tcase reflect.Bool:\n\t\treturn \"boolean\"\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr:\n\t\treturn \"int\"\n\tcase reflect.Int64, reflect.Uint64:\n\t\treturn \"bigint\"\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn \"double\"\n\tcase reflect.String:\n\t\tif size > 0 && size < 65532 {\n\t\t\treturn fmt.Sprintf(\"varchar(%d)\", size)\n\t\t}\n\t\treturn \"longtext\"\n\tcase reflect.Struct:\n\t\tif _, ok := value.Interface().(time.Time); ok {\n\t\t\treturn \"timestamp NULL\"\n\t\t}\n\tcase reflect.Array:\n\t\tif value.Type().Elem().Kind() == reflect.Uint8 {\n\t\t\treturn fmt.Sprintf(\"binary(%d)\", value.Len())\n\t\t}\n\tdefault:\n\t\tif _, ok := value.Interface().([]byte); ok {\n\t\t\tif size > 0 && size < 65532 {\n\t\t\t\treturn fmt.Sprintf(\"varbinary(%d)\", size)\n\t\t\t}\n\t\t\treturn \"longblob\"\n\t\t}\n\t}\n\tpanic(fmt.Sprintf(\"invalid sql type %s (%s) for mysql\", value.Type().Name(), value.Kind().String()))\n}\n\nfunc (s *mysql) PrimaryKeyTag(value reflect.Value, size int) string {\n\tsuffix := \" NOT NULL AUTO_INCREMENT PRIMARY KEY\"\n\tswitch value.Kind() {\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr:\n\t\treturn \"int\" + suffix\n\tcase reflect.Int64, reflect.Uint64:\n\t\treturn \"bigint\" + suffix\n\tcase reflect.Array:\n\t\tif value.Type().Elem().Kind() == reflect.Uint8 {\n\t\t\treturn fmt.Sprintf(\"binary(%d) NOT NULL PRIMARY KEY\", value.Len())\n\t\t}\n\t\tfallthrough\n\tdefault:\n\t\tpanic(\"Invalid primary key type\")\n\t}\n}\n\nfunc (s *mysql) ReturningStr(tableName, key string) string {\n\treturn \"\"\n}\n\nfunc (s *mysql) SelectFromDummyTable() string {\n\treturn \"FROM DUAL\"\n}\n\nfunc (s *mysql) Quote(key string) string {\n\treturn fmt.Sprintf(\"`%s`\", key)\n}\n\nfunc (s *mysql) databaseName(scope *Scope) string {\n\tfrom := strings.Index(scope.db.parent.source, \"\/\") + 1\n\tto := strings.Index(scope.db.parent.source, \"?\")\n\tif to == -1 {\n\t\tto = len(scope.db.parent.source)\n\t}\n\treturn scope.db.parent.source[from:to]\n}\n\nfunc (s *mysql) HasTable(scope *Scope, tableName string) bool {\n\tvar count int\n\tnewScope := scope.New(nil)\n\tnewScope.Raw(fmt.Sprintf(\"SELECT count(*) FROM INFORMATION_SCHEMA.tables where table_name = %v AND table_schema = %v\",\n\t\tnewScope.AddToVars(tableName),\n\t\tnewScope.AddToVars(s.databaseName(scope))))\n\tnewScope.SqlDB().QueryRow(newScope.Sql, newScope.SqlVars...).Scan(&count)\n\treturn count > 0\n}\n\nfunc (s *mysql) HasColumn(scope *Scope, tableName string, columnName string) bool {\n\tvar count int\n\tnewScope := scope.New(nil)\n\tnewScope.Raw(fmt.Sprintf(\"SELECT count(*) FROM information_schema.columns WHERE table_schema = %v AND table_name = %v AND column_name = %v\",\n\t\tnewScope.AddToVars(s.databaseName(scope)),\n\t\tnewScope.AddToVars(tableName),\n\t\tnewScope.AddToVars(columnName),\n\t))\n\tnewScope.SqlDB().QueryRow(newScope.Sql, newScope.SqlVars...).Scan(&count)\n\treturn count > 0\n}\n\nfunc (s *mysql) RemoveIndex(scope *Scope, indexName string) {\n\tscope.Raw(fmt.Sprintf(\"DROP INDEX %v ON %v\", indexName, scope.QuotedTableName())).Exec()\n}\n<commit_msg>use Datetime(3) as default database type for time.Time<commit_after>package gorm\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype mysql struct{}\n\nfunc (s *mysql) BinVar(i int) string {\n\treturn \"$$\" \/\/ ?\n}\n\nfunc (s *mysql) SupportLastInsertId() bool {\n\treturn true\n}\n\nfunc (s *mysql) HasTop() bool {\n\treturn false\n}\n\nfunc (s *mysql) SqlTag(value reflect.Value, size int) string {\n\tswitch value.Kind() {\n\tcase reflect.Bool:\n\t\treturn \"boolean\"\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr:\n\t\treturn \"int\"\n\tcase reflect.Int64, reflect.Uint64:\n\t\treturn \"bigint\"\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn \"double\"\n\tcase reflect.String:\n\t\tif size > 0 && size < 65532 {\n\t\t\treturn fmt.Sprintf(\"varchar(%d)\", size)\n\t\t}\n\t\treturn \"longtext\"\n\tcase reflect.Struct:\n\t\tif _, ok := value.Interface().(time.Time); ok {\n\t\t\treturn \"datetime(3) NULL\"\n\t\t}\n\tcase reflect.Array:\n\t\tif value.Type().Elem().Kind() == reflect.Uint8 {\n\t\t\treturn fmt.Sprintf(\"binary(%d)\", value.Len())\n\t\t}\n\tdefault:\n\t\tif _, ok := value.Interface().([]byte); ok {\n\t\t\tif size > 0 && size < 65532 {\n\t\t\t\treturn fmt.Sprintf(\"varbinary(%d)\", size)\n\t\t\t}\n\t\t\treturn \"longblob\"\n\t\t}\n\t}\n\tpanic(fmt.Sprintf(\"invalid sql type %s (%s) for mysql\", value.Type().Name(), value.Kind().String()))\n}\n\nfunc (s *mysql) PrimaryKeyTag(value reflect.Value, size int) string {\n\tsuffix := \" NOT NULL AUTO_INCREMENT PRIMARY KEY\"\n\tswitch value.Kind() {\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uintptr:\n\t\treturn \"int\" + suffix\n\tcase reflect.Int64, reflect.Uint64:\n\t\treturn \"bigint\" + suffix\n\tcase reflect.Array:\n\t\tif value.Type().Elem().Kind() == reflect.Uint8 {\n\t\t\treturn fmt.Sprintf(\"binary(%d) NOT NULL PRIMARY KEY\", value.Len())\n\t\t}\n\t\tfallthrough\n\tdefault:\n\t\tpanic(\"Invalid primary key type\")\n\t}\n}\n\nfunc (s *mysql) ReturningStr(tableName, key string) string {\n\treturn \"\"\n}\n\nfunc (s *mysql) SelectFromDummyTable() string {\n\treturn \"FROM DUAL\"\n}\n\nfunc (s *mysql) Quote(key string) string {\n\treturn fmt.Sprintf(\"`%s`\", key)\n}\n\nfunc (s *mysql) databaseName(scope *Scope) string {\n\tfrom := strings.Index(scope.db.parent.source, \"\/\") + 1\n\tto := strings.Index(scope.db.parent.source, \"?\")\n\tif to == -1 {\n\t\tto = len(scope.db.parent.source)\n\t}\n\treturn scope.db.parent.source[from:to]\n}\n\nfunc (s *mysql) HasTable(scope *Scope, tableName string) bool {\n\tvar count int\n\tnewScope := scope.New(nil)\n\tnewScope.Raw(fmt.Sprintf(\"SELECT count(*) FROM INFORMATION_SCHEMA.tables where table_name = %v AND table_schema = %v\",\n\t\tnewScope.AddToVars(tableName),\n\t\tnewScope.AddToVars(s.databaseName(scope))))\n\tnewScope.SqlDB().QueryRow(newScope.Sql, newScope.SqlVars...).Scan(&count)\n\treturn count > 0\n}\n\nfunc (s *mysql) HasColumn(scope *Scope, tableName string, columnName string) bool {\n\tvar count int\n\tnewScope := scope.New(nil)\n\tnewScope.Raw(fmt.Sprintf(\"SELECT count(*) FROM information_schema.columns WHERE table_schema = %v AND table_name = %v AND column_name = %v\",\n\t\tnewScope.AddToVars(s.databaseName(scope)),\n\t\tnewScope.AddToVars(tableName),\n\t\tnewScope.AddToVars(columnName),\n\t))\n\tnewScope.SqlDB().QueryRow(newScope.Sql, newScope.SqlVars...).Scan(&count)\n\treturn count > 0\n}\n\nfunc (s *mysql) RemoveIndex(scope *Scope, indexName string) {\n\tscope.Raw(fmt.Sprintf(\"DROP INDEX %v ON %v\", indexName, scope.QuotedTableName())).Exec()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/go:generate bitfanDoc\npackage inputsql\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/ShowMax\/go-fqdn\"\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\t\"github.com\/vjeantet\/bitfan\/core\/location\"\n\t\"github.com\/vjeantet\/bitfan\/processors\"\n)\n\nfunc New() processors.Processor {\n\treturn &processor{opt: &options{}}\n}\n\ntype options struct {\n\t\/\/ If this filter is successful, add any arbitrary fields to this event.\n\tAdd_field map[string]interface{}\n\n\t\/\/ If this filter is successful, add arbitrary tags to the event. Tags can be dynamic\n\t\/\/ and include parts of the event using the %{field} syntax.\n\tTags []string\n\n\t\/\/ Add a type field to all events handled by this input\n\tType string\n\n\t\/\/ The codec used for input data. Input codecs are a convenient method for decoding\n\t\/\/ your data before it enters the input, without needing a separate filter in your bitfan pipeline\n\tCodec string\n\n\t\/\/ GOLANG driver class to load, for example, \"mysql\".\n\t\/\/ @ExampleLS driver => \"mysql\"\n\tDriver string `mapstructure:\"driver\" validate:\"required\"`\n\n\t\/\/ Send an event row by row or one event with all results\n\t\/\/ possible values \"row\", \"result\"\n\t\/\/ @Default \"row\"\n\tEventBy string `mapstructure:\"event_by\"`\n\n\t\/\/ SQL Statement\n\t\/\/ When there is more than 1 statement, only data from the last one will generate events.\n\t\/\/ @ExampleLS statement => \"SELECT * FROM mytable\"\n\tStatement string `mapstructure:\"statement\" validate:\"required\"`\n\n\t\/\/ Set an interval when this processor is used as a input\n\t\/\/ @ExampleLS interval => \"10\"\n\tInterval string `mapstructure:\"interval\" validate:\"required\"`\n\n\t\/\/ @ExampleLS connection_string => \"username:password@tcp(192.168.1.2:3306)\/mydatabase?charset=utf8\"\n\tConnectionString string `mapstructure:\"connection_string\" validate:\"required\"`\n\n\t\/\/ You can set variable to be used in Statements by using ${var}.\n\t\/\/ each reference will be replaced by the value of the variable found in Statement's content\n\t\/\/ The replacement is case-sensitive.\n\t\/\/ @ExampleLS var => {\"hostname\"=>\"myhost\",\"varname\"=>\"varvalue\"}\n\tVar map[string]string `mapstructure:\"var\"`\n\n\t\/\/ Define the target field for placing the retrieved data. If this setting is omitted,\n\t\/\/ the data will be stored in the \"data\" field\n\t\/\/ Set the value to \".\" to store value to the root (top level) of the event\n\t\/\/ @ExampleLS target => \"data\"\n\t\/\/ @Default \"data\"\n\tTarget string `mapstructure:\"target\"`\n}\n\ntype processor struct {\n\tprocessors.Base\n\tdb *sql.DB\n\topt *options\n\tq chan bool\n\thost string\n}\n\nfunc (p *processor) Configure(ctx processors.ProcessorContext, conf map[string]interface{}) error {\n\tdefaults := options{\n\t\tEventBy: \"row\",\n\t\tTarget: \"data\",\n\t}\n\n\tp.opt = &defaults\n\tp.host = fqdn.Get()\n\n\terr := p.ConfigureAndValidate(ctx, conf, p.opt)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif p.opt.Interval == \"\" {\n\t\tp.Logger.Warningln(\"No interval set\")\n\t}\n\n\tloc, err := location.NewLocation(p.opt.Statement, p.ConfigWorkingLocation)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontent, _, err := loc.ContentWithOptions(p.opt.Var)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.opt.Statement = string(content)\n\n\tp.db, err = sql.Open(p.opt.Driver, p.opt.ConnectionString)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = p.db.Ping()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (p *processor) Tick(e processors.IPacket) error {\n\treturn p.Receive(e)\n}\n\nfunc (p *processor) Receive(e processors.IPacket) error {\n\tp.opt.Statement = strings.Trim(p.opt.Statement, \";\")\n\treqs := strings.Split(p.opt.Statement, \";\")\n\tfor _, r := range reqs[:len(reqs)-1] {\n\t\tp.Logger.Debugf(\"db.Exec - %s\", r)\n\t\tp.db.Exec(r)\n\t}\n\n\tp.Logger.Debugf(\"db.Query - %s\", reqs[len(reqs)-1])\n\trows, err := p.db.Query(reqs[len(reqs)-1])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcolumns, err := rows.Columns()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tscanArgs := make([]interface{}, len(columns))\n\tvalues := make([]interface{}, len(columns))\n\tfor i := range values {\n\t\tscanArgs[i] = &values[i]\n\t}\n\tvar records []map[string]interface{}\n\tfor rows.Next() {\n\t\trecord := make(map[string]interface{})\n\t\terr = rows.Scan(scanArgs...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor i, col := range values {\n\t\t\tif col != nil {\n\t\t\t\t\/\/ fmt.Printf(\"\\n%s: type= %s\\n\", columns[i], reflect.TypeOf(col))\n\t\t\t\tswitch t := col.(type) {\n\t\t\t\tdefault:\n\t\t\t\t\tfmt.Printf(\"Unexpected type %T\\n\", t)\n\t\t\t\tcase bool:\n\t\t\t\t\trecord[columns[i]] = col.(bool)\n\t\t\t\tcase int:\n\t\t\t\t\trecord[columns[i]] = col.(int)\n\t\t\t\tcase int64:\n\t\t\t\t\trecord[columns[i]] = col.(int64)\n\t\t\t\tcase float64:\n\t\t\t\t\trecord[columns[i]] = col.(float64)\n\t\t\t\tcase string:\n\t\t\t\t\trecord[columns[i]] = col.(string)\n\t\t\t\tcase []byte: \/\/ -- all cases go HERE!\n\t\t\t\t\trecord[columns[i]] = string(col.([]byte))\n\t\t\t\t\t\/\/case time.Time:\n\t\t\t\t\t\/\/ record[columns[i]] = col.(string)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif p.opt.EventBy == \"row\" {\n\t\t\tvar e processors.IPacket\n\t\t\te = p.NewPacket(\"\", map[string]interface{}{})\n\t\t\te.Fields().SetValueForPath(p.host, \"host\")\n\t\t\tif len(p.opt.Var) > 0 {\n\t\t\t\te.Fields().SetValueForPath(p.opt.Var, \"var\")\n\t\t\t}\n\n\t\t\tif p.opt.Target == \".\" {\n\t\t\t\tfor k, v := range record {\n\t\t\t\t\te.Fields().SetValueForPath(v, k)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\te.Fields().SetValueForPath(record, p.opt.Target)\n\t\t\t}\n\n\t\t\tprocessors.ProcessCommonFields(e.Fields(), p.opt.Add_field, p.opt.Tags, p.opt.Type)\n\t\t\tp.Send(e)\n\t\t} else {\n\t\t\trecords = append(records, record)\n\t\t}\n\t}\n\n\trows.Close()\n\n\tif p.opt.EventBy != \"row\" {\n\t\te.Fields().SetValueForPath(p.host, \"host\")\n\t\tif len(p.opt.Var) > 0 {\n\t\t\te.Fields().SetValueForPath(p.opt.Var, \"var\")\n\t\t}\n\t\te.Fields().SetValueForPath(records, p.opt.Target)\n\n\t\tprocessors.ProcessCommonFields(e.Fields(), p.opt.Add_field, p.opt.Tags, p.opt.Type)\n\t\tp.Send(e)\n\t}\n\n\treturn nil\n}\n\nfunc (p *processor) Stop(e processors.IPacket) error {\n\tp.db.Close()\n\treturn nil\n}\n<commit_msg>processor sql : prepare processor to be used as input, filter and output !<commit_after>\/\/go:generate bitfanDoc\npackage inputsql\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/ShowMax\/go-fqdn\"\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\t\"github.com\/vjeantet\/bitfan\/core\/location\"\n\t\"github.com\/vjeantet\/bitfan\/processors\"\n)\n\nfunc New() processors.Processor {\n\treturn &processor{opt: &options{}}\n}\n\ntype options struct {\n\t\/\/ If this filter is successful, add any arbitrary fields to this event.\n\tAdd_field map[string]interface{}\n\n\t\/\/ If this filter is successful, add arbitrary tags to the event. Tags can be dynamic\n\t\/\/ and include parts of the event using the %{field} syntax.\n\tTags []string\n\n\t\/\/ Add a type field to all events handled by this input\n\tType string\n\n\t\/\/ The codec used for input data. Input codecs are a convenient method for decoding\n\t\/\/ your data before it enters the input, without needing a separate filter in your bitfan pipeline\n\tCodec string\n\n\t\/\/ GOLANG driver class to load, for example, \"mysql\".\n\t\/\/ @ExampleLS driver => \"mysql\"\n\tDriver string `mapstructure:\"driver\" validate:\"required\"`\n\n\t\/\/ Send an event row by row or one event with all results\n\t\/\/ possible values \"row\", \"result\"\n\t\/\/ @Default \"row\"\n\tEventBy string `mapstructure:\"event_by\"`\n\n\t\/\/ SQL Statement\n\t\/\/ When there is more than 1 statement, only data from the last one will generate events.\n\t\/\/ @ExampleLS statement => \"SELECT * FROM mytable\"\n\tStatement string `mapstructure:\"statement\" validate:\"required\"`\n\n\t\/\/ Set an interval when this processor is used as a input\n\t\/\/ @ExampleLS interval => \"10\"\n\tInterval string `mapstructure:\"interval\" validate:\"required\"`\n\n\t\/\/ @ExampleLS connection_string => \"username:password@tcp(192.168.1.2:3306)\/mydatabase?charset=utf8\"\n\tConnectionString string `mapstructure:\"connection_string\" validate:\"required\"`\n\n\t\/\/ You can set variable to be used in Statements by using ${var}.\n\t\/\/ each reference will be replaced by the value of the variable found in Statement's content\n\t\/\/ The replacement is case-sensitive.\n\t\/\/ @ExampleLS var => {\"hostname\"=>\"myhost\",\"varname\"=>\"varvalue\"}\n\tVar map[string]string `mapstructure:\"var\"`\n\n\t\/\/ Define the target field for placing the retrieved data. If this setting is omitted,\n\t\/\/ the data will be stored in the \"data\" field\n\t\/\/ Set the value to \".\" to store value to the root (top level) of the event\n\t\/\/ @ExampleLS target => \"data\"\n\t\/\/ @Default \"data\"\n\tTarget string `mapstructure:\"target\"`\n}\n\ntype processor struct {\n\tprocessors.Base\n\tdb *sql.DB\n\topt *options\n\tq chan bool\n\thost string\n}\n\nfunc (p *processor) Configure(ctx processors.ProcessorContext, conf map[string]interface{}) error {\n\tdefaults := options{\n\t\tEventBy: \"row\",\n\t\tTarget: \"data\",\n\t}\n\n\tp.opt = &defaults\n\tp.host = fqdn.Get()\n\n\terr := p.ConfigureAndValidate(ctx, conf, p.opt)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif p.opt.Interval == \"\" {\n\t\tp.Logger.Warningln(\"No interval set\")\n\t}\n\n\tloc, err := location.NewLocation(p.opt.Statement, p.ConfigWorkingLocation)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontent, _, err := loc.ContentWithOptions(p.opt.Var)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.opt.Statement = string(content)\n\n\tp.db, err = sql.Open(p.opt.Driver, p.opt.ConnectionString)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = p.db.Ping()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (p *processor) Tick(e processors.IPacket) error {\n\treturn p.Receive(e)\n}\n\nfunc (p *processor) Receive(e processors.IPacket) error {\n\tp.opt.Statement = strings.Trim(p.opt.Statement, \";\")\n\treqs := strings.Split(p.opt.Statement, \";\")\n\tfor _, r := range reqs[:len(reqs)-1] {\n\t\tp.Logger.Debugf(\"db.Exec - %s\", r)\n\t\tp.db.Exec(r)\n\t}\n\n\tp.Logger.Debugf(\"db.Query - %s\", reqs[len(reqs)-1])\n\trows, err := p.db.Query(reqs[len(reqs)-1])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcolumns, err := rows.Columns()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tscanArgs := make([]interface{}, len(columns))\n\tvalues := make([]interface{}, len(columns))\n\tfor i := range values {\n\t\tscanArgs[i] = &values[i]\n\t}\n\tvar records []map[string]interface{}\n\tfor rows.Next() {\n\t\trecord := make(map[string]interface{})\n\t\terr = rows.Scan(scanArgs...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor i, col := range values {\n\t\t\tif col != nil {\n\t\t\t\t\/\/ fmt.Printf(\"\\n%s: type= %s\\n\", columns[i], reflect.TypeOf(col))\n\t\t\t\tswitch t := col.(type) {\n\t\t\t\tdefault:\n\t\t\t\t\tfmt.Printf(\"Unexpected type %T\\n\", t)\n\t\t\t\tcase bool:\n\t\t\t\t\trecord[columns[i]] = col.(bool)\n\t\t\t\tcase int:\n\t\t\t\t\trecord[columns[i]] = col.(int)\n\t\t\t\tcase int64:\n\t\t\t\t\trecord[columns[i]] = col.(int64)\n\t\t\t\tcase float64:\n\t\t\t\t\trecord[columns[i]] = col.(float64)\n\t\t\t\tcase string:\n\t\t\t\t\trecord[columns[i]] = col.(string)\n\t\t\t\tcase []byte: \/\/ -- all cases go HERE!\n\t\t\t\t\trecord[columns[i]] = string(col.([]byte))\n\t\t\t\t\t\/\/case time.Time:\n\t\t\t\t\t\/\/ record[columns[i]] = col.(string)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif p.opt.EventBy == \"row\" {\n\t\t\tvar e2 processors.IPacket\n\t\t\te2 = e.Clone()\n\t\t\te2.Fields().SetValueForPath(p.host, \"host\")\n\t\t\tif len(p.opt.Var) > 0 {\n\t\t\t\te2.Fields().SetValueForPath(p.opt.Var, \"var\")\n\t\t\t}\n\n\t\t\tif p.opt.Target == \".\" {\n\t\t\t\tfor k, v := range record {\n\t\t\t\t\te2.Fields().SetValueForPath(v, k)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\te2.Fields().SetValueForPath(record, p.opt.Target)\n\t\t\t}\n\n\t\t\tprocessors.ProcessCommonFields(e2.Fields(), p.opt.Add_field, p.opt.Tags, p.opt.Type)\n\t\t\tp.Send(e2)\n\t\t} else {\n\t\t\trecords = append(records, record)\n\t\t}\n\t}\n\n\trows.Close()\n\n\tif p.opt.EventBy != \"row\" {\n\t\te.Fields().SetValueForPath(p.host, \"host\")\n\t\tif len(p.opt.Var) > 0 {\n\t\t\te.Fields().SetValueForPath(p.opt.Var, \"var\")\n\t\t}\n\t\te.Fields().SetValueForPath(records, p.opt.Target)\n\n\t\tprocessors.ProcessCommonFields(e.Fields(), p.opt.Add_field, p.opt.Tags, p.opt.Type)\n\t\tp.Send(e)\n\t}\n\n\treturn nil\n}\n\nfunc (p *processor) Stop(e processors.IPacket) error {\n\tp.db.Close()\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage mungers\n\nimport (\n\t\"path\"\n\t\"strings\"\n\n\t\"k8s.io\/contrib\/mungegithub\/features\"\n\t\"k8s.io\/contrib\/mungegithub\/github\"\n\t\"k8s.io\/contrib\/mungegithub\/mungers\/mungerutil\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/sets\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nconst maxDepth = 3\n\n\/\/ ApprovalHandler will try to add \"approved\" label once\n\/\/ all files of change has been approved by approvers.\ntype ApprovalHandler struct {\n\tfeatures *features.Features\n}\n\nfunc init() {\n\th := &ApprovalHandler{}\n\tRegisterMungerOrDie(h)\n}\n\n\/\/ Name is the name usable in --pr-mungers\nfunc (*ApprovalHandler) Name() string { return \"approval-handler\" }\n\n\/\/ RequiredFeatures is a slice of 'features' that must be provided\nfunc (*ApprovalHandler) RequiredFeatures() []string {\n\treturn []string{features.RepoFeatureName, features.AliasesFeature}\n}\n\n\/\/ Initialize will initialize the munger\nfunc (h *ApprovalHandler) Initialize(config *github.Config, features *features.Features) error {\n\th.features = features\n\treturn nil\n}\n\n\/\/ EachLoop is called at the start of every munge loop\nfunc (*ApprovalHandler) EachLoop() error { return nil }\n\n\/\/ AddFlags will add any request flags to the cobra `cmd`\nfunc (*ApprovalHandler) AddFlags(cmd *cobra.Command, config *github.Config) {}\n\n\/\/ Munge is the workhorse the will actually make updates to the PR\n\/\/ The algorithm goes as:\n\/\/ - Initially, we set up approverSet\n\/\/ - Go through all comments after latest commit. If any approver said \"\/approve\", add him to approverSet.\n\/\/ - For each file, we see if any approver of this file is in approverSet.\n\/\/ - An approver of a file is defined as:\n\/\/ - It's known that each dir has a list of approvers. (This might not hold true. For usability, current situation is enough.)\n\/\/ - Approver of a dir is also the approver of child dirs.\n\/\/ - We look at top N (default 3) level dir approvers. For example, for file \"\/a\/b\/c\/d\/e\", we might search for approver from\n\/\/ \"\/\", \"\/a\/\", \"\/a\/b\/\"\n\/\/ - Iff all files has been approved, the bot will add \"approved\" label.\nfunc (h *ApprovalHandler) Munge(obj *github.MungeObject) {\n\tif !obj.IsPR() {\n\t\treturn\n\t}\n\tfiles, err := obj.ListFiles()\n\tif err != nil {\n\t\tglog.Errorf(\"failed to list files in this PR: %v\", err)\n\t\treturn\n\t}\n\n\tcomments, err := getCommentsAfterLastModified(obj)\n\tif err != nil {\n\t\tglog.Errorf(\"failed to get comments in this PR: %v\", err)\n\t\treturn\n\t}\n\n\tapproverSet := sets.String{}\n\n\t\/\/ from oldest to latest\n\tfor i := len(comments) - 1; i >= 0; i-- {\n\t\tc := comments[i]\n\n\t\tif !mungerutil.IsValidUser(c.User) {\n\t\t\tcontinue\n\t\t}\n\n\t\tfields := strings.Fields(strings.TrimSpace(*c.Body))\n\n\t\tif len(fields) == 1 && strings.ToLower(fields[0]) == \"\/approve\" {\n\t\t\tapproverSet.Insert(*c.User.Login)\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(fields) == 2 && strings.ToLower(fields[0]) == \"\/approve\" && strings.ToLower(fields[1]) == \"cancel\" {\n\t\t\tapproverSet.Delete(*c.User.Login)\n\t\t}\n\t}\n\n\tfor _, file := range files {\n\t\tif !h.hasApproval(*file.Filename, approverSet, maxDepth) {\n\t\t\treturn\n\t\t}\n\t}\n\tobj.AddLabel(approvedLabel)\n}\n\nfunc (h *ApprovalHandler) hasApproval(filename string, approverSet sets.String, depth int) bool {\n\tpaths := strings.Split(filename, \"\/\")\n\tp := \"\"\n\tfor i := 0; i < len(paths) && i < depth; i++ {\n\t\tfileOwners := h.features.Repos.LeafAssignees(p)\n\t\tif fileOwners.Len() == 0 {\n\t\t\tglog.Warningf(\"Couldn't find an owner for path (%s)\", p)\n\t\t\tcontinue\n\t\t}\n\n\t\tif h.features.Aliases != nil && h.features.Aliases.IsEnabled {\n\t\t\tfileOwners = h.features.Aliases.Expand(fileOwners)\n\t\t}\n\n\t\tfor _, owner := range fileOwners.List() {\n\t\t\tif approverSet.Has(owner) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tp = path.Join(p, paths[i])\n\t}\n\treturn false\n}\n<commit_msg>Un-delete getCommentsAfterLastModified.<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage mungers\n\nimport (\n\t\"path\"\n\t\"strings\"\n\n\t\"k8s.io\/contrib\/mungegithub\/features\"\n\t\"k8s.io\/contrib\/mungegithub\/github\"\n\t\"k8s.io\/contrib\/mungegithub\/mungers\/mungerutil\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/sets\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nconst maxDepth = 3\n\n\/\/ ApprovalHandler will try to add \"approved\" label once\n\/\/ all files of change has been approved by approvers.\ntype ApprovalHandler struct {\n\tfeatures *features.Features\n}\n\nfunc init() {\n\th := &ApprovalHandler{}\n\tRegisterMungerOrDie(h)\n}\n\n\/\/ Name is the name usable in --pr-mungers\nfunc (*ApprovalHandler) Name() string { return \"approval-handler\" }\n\n\/\/ RequiredFeatures is a slice of 'features' that must be provided\nfunc (*ApprovalHandler) RequiredFeatures() []string {\n\treturn []string{features.RepoFeatureName, features.AliasesFeature}\n}\n\n\/\/ Initialize will initialize the munger\nfunc (h *ApprovalHandler) Initialize(config *github.Config, features *features.Features) error {\n\th.features = features\n\treturn nil\n}\n\n\/\/ EachLoop is called at the start of every munge loop\nfunc (*ApprovalHandler) EachLoop() error { return nil }\n\n\/\/ AddFlags will add any request flags to the cobra `cmd`\nfunc (*ApprovalHandler) AddFlags(cmd *cobra.Command, config *github.Config) {}\n\n\/\/ Munge is the workhorse the will actually make updates to the PR\n\/\/ The algorithm goes as:\n\/\/ - Initially, we set up approverSet\n\/\/ - Go through all comments after latest commit. If any approver said \"\/approve\", add him to approverSet.\n\/\/ - For each file, we see if any approver of this file is in approverSet.\n\/\/ - An approver of a file is defined as:\n\/\/ - It's known that each dir has a list of approvers. (This might not hold true. For usability, current situation is enough.)\n\/\/ - Approver of a dir is also the approver of child dirs.\n\/\/ - We look at top N (default 3) level dir approvers. For example, for file \"\/a\/b\/c\/d\/e\", we might search for approver from\n\/\/ \"\/\", \"\/a\/\", \"\/a\/b\/\"\n\/\/ - Iff all files has been approved, the bot will add \"approved\" label.\nfunc (h *ApprovalHandler) Munge(obj *github.MungeObject) {\n\tif !obj.IsPR() {\n\t\treturn\n\t}\n\tfiles, err := obj.ListFiles()\n\tif err != nil {\n\t\tglog.Errorf(\"failed to list files in this PR: %v\", err)\n\t\treturn\n\t}\n\n\tcomments, err := getCommentsAfterLastModified(obj)\n\tif err != nil {\n\t\tglog.Errorf(\"failed to get comments in this PR: %v\", err)\n\t\treturn\n\t}\n\n\tapproverSet := sets.String{}\n\n\t\/\/ from oldest to latest\n\tfor i := len(comments) - 1; i >= 0; i-- {\n\t\tc := comments[i]\n\n\t\tif !mungerutil.IsValidUser(c.User) {\n\t\t\tcontinue\n\t\t}\n\n\t\tfields := strings.Fields(strings.TrimSpace(*c.Body))\n\n\t\tif len(fields) == 1 && strings.ToLower(fields[0]) == \"\/approve\" {\n\t\t\tapproverSet.Insert(*c.User.Login)\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(fields) == 2 && strings.ToLower(fields[0]) == \"\/approve\" && strings.ToLower(fields[1]) == \"cancel\" {\n\t\t\tapproverSet.Delete(*c.User.Login)\n\t\t}\n\t}\n\n\tfor _, file := range files {\n\t\tif !h.hasApproval(*file.Filename, approverSet, maxDepth) {\n\t\t\treturn\n\t\t}\n\t}\n\tobj.AddLabel(approvedLabel)\n}\n\nfunc (h *ApprovalHandler) hasApproval(filename string, approverSet sets.String, depth int) bool {\n\tpaths := strings.Split(filename, \"\/\")\n\tp := \"\"\n\tfor i := 0; i < len(paths) && i < depth; i++ {\n\t\tfileOwners := h.features.Repos.LeafAssignees(p)\n\t\tif fileOwners.Len() == 0 {\n\t\t\tglog.Warningf(\"Couldn't find an owner for path (%s)\", p)\n\t\t\tcontinue\n\t\t}\n\n\t\tif h.features.Aliases != nil && h.features.Aliases.IsEnabled {\n\t\t\tfileOwners = h.features.Aliases.Expand(fileOwners)\n\t\t}\n\n\t\tfor _, owner := range fileOwners.List() {\n\t\t\tif approverSet.Has(owner) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\tp = path.Join(p, paths[i])\n\t}\n\treturn false\n}\n\nfunc getCommentsAfterLastModified(obj *github.MungeObject) ([]*githubapi.IssueComment, error) {\n\tafterLastModified := func(opt *githubapi.IssueListCommentsOptions) *githubapi.IssueListCommentsOptions {\n\t\t\/\/ Only comments updated at or after this time are returned.\n\t\t\/\/ One possible case is that reviewer might \"\/lgtm\" first, contributor updated PR, and reviewer updated \"\/lgtm\".\n\t\t\/\/ This is still valid. We don't recommend user to update it.\n\t\tlastModified := *obj.LastModifiedTime()\n\t\topt.Since = lastModified\n\t\treturn opt\n\t}\n\treturn obj.ListComments(afterLastModified)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux darwin freebsd\n\npackage commands\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\tcmds \"github.com\/jbenet\/go-ipfs\/commands\"\n\t\"github.com\/jbenet\/go-ipfs\/config\"\n\tcore \"github.com\/jbenet\/go-ipfs\/core\"\n\tipns \"github.com\/jbenet\/go-ipfs\/fuse\/ipns\"\n\trofs \"github.com\/jbenet\/go-ipfs\/fuse\/readonly\"\n)\n\n\/\/ amount of time to wait for mount errors\n\/\/ TODO is this non-deterministic?\nconst mountTimeout = time.Second\n\nvar mountCmd = &cmds.Command{\n\tHelptext: cmds.HelpText{\n\t\tTagline: \"Mounts IPFS to the filesystem (read-only)\",\n\t\tShortDescription: `\nMount ipfs at a read-only mountpoint on the OS. All ipfs objects\nwill be accessible under that directory. Note that the root will\nnot be listable, as it is virtual. Accessing known paths directly.\n`,\n\t},\n\n\tOptions: []cmds.Option{\n\t\t\/\/ TODO longform\n\t\tcmds.StringOption(\"f\", \"The path where IPFS should be mounted\"),\n\n\t\t\/\/ TODO longform\n\t\tcmds.StringOption(\"n\", \"The path where IPNS should be mounted\"),\n\t},\n\tRun: func(req cmds.Request) (interface{}, error) {\n\t\tcfg, err := req.Context().GetConfig()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tnode, err := req.Context().GetNode()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ error if we aren't running node in online mode\n\t\tif node.Network == nil {\n\t\t\treturn nil, errNotOnline\n\t\t}\n\n\t\tif err := platformFuseChecks(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfsdir, found, err := req.Option(\"f\").String()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !found {\n\t\t\tfsdir = cfg.Mounts.IPFS \/\/ use default value\n\t\t}\n\t\tfsdone := mountIpfs(node, fsdir)\n\n\t\t\/\/ get default mount points\n\t\tnsdir, found, err := req.Option(\"n\").String()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !found {\n\t\t\tnsdir = cfg.Mounts.IPNS \/\/ NB: be sure to not redeclare!\n\t\t}\n\n\t\tnsdone := mountIpns(node, nsdir, fsdir)\n\n\t\t\/\/ wait until mounts return an error (or timeout if successful)\n\t\tselect {\n\t\tcase err := <-fsdone:\n\t\t\treturn nil, err\n\t\tcase err := <-nsdone:\n\t\t\treturn nil, err\n\n\t\t\/\/ mounted successfully, we timed out with no errors\n\t\tcase <-time.After(mountTimeout):\n\t\t\toutput := cfg.Mounts\n\t\t\treturn &output, nil\n\t\t}\n\t},\n\tType: &config.Mounts{},\n\tMarshalers: cmds.MarshalerMap{\n\t\tcmds.Text: func(res cmds.Response) ([]byte, error) {\n\t\t\tv := res.Output().(*config.Mounts)\n\t\t\ts := fmt.Sprintf(\"IPFS mounted at: %s\\n\", v.IPFS)\n\t\t\ts += fmt.Sprintf(\"IPNS mounted at: %s\\n\", v.IPNS)\n\t\t\treturn []byte(s), nil\n\t\t},\n\t},\n}\n\nfunc mountIpfs(node *core.IpfsNode, fsdir string) <-chan error {\n\tdone := make(chan error)\n\tlog.Info(\"Mounting IPFS at \", fsdir)\n\n\tgo func() {\n\t\terr := rofs.Mount(node, fsdir)\n\t\tdone <- err\n\t\tclose(done)\n\t}()\n\n\treturn done\n}\n\nfunc mountIpns(node *core.IpfsNode, nsdir, fsdir string) <-chan error {\n\tif nsdir == \"\" {\n\t\treturn nil\n\t}\n\tdone := make(chan error)\n\tlog.Info(\"Mounting IPNS at \", nsdir)\n\n\tgo func() {\n\t\terr := ipns.Mount(node, nsdir, fsdir)\n\t\tdone <- err\n\t\tclose(done)\n\t}()\n\n\treturn done\n}\n\nvar platformFuseChecks = func() error {\n\treturn nil\n}\n<commit_msg>cmds2: mount exmplae<commit_after>\/\/ +build linux darwin freebsd\n\npackage commands\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\tcmds \"github.com\/jbenet\/go-ipfs\/commands\"\n\t\"github.com\/jbenet\/go-ipfs\/config\"\n\tcore \"github.com\/jbenet\/go-ipfs\/core\"\n\tipns \"github.com\/jbenet\/go-ipfs\/fuse\/ipns\"\n\trofs \"github.com\/jbenet\/go-ipfs\/fuse\/readonly\"\n)\n\n\/\/ amount of time to wait for mount errors\n\/\/ TODO is this non-deterministic?\nconst mountTimeout = time.Second\n\nvar mountCmd = &cmds.Command{\n\tHelptext: cmds.HelpText{\n\t\tTagline: \"Mounts IPFS to the filesystem (read-only)\",\n\t\tShortDescription: `\nMount ipfs at a read-only mountpoint on the OS (default: \/ipfs and \/ipns).\nAll ipfs objects will be accessible under that directory. Note that the\nroot will not be listable, as it is virtual. Access known paths directly.\n\nYou may kave to create \/ipfs and \/ipfs before using 'ipfs mount':\n\n> sudo mkdir \/ipfs \/ipns\n> sudo chown ` + \"`\" + `whoami` + \"`\" + ` \/ipfs \/ipns\n> ipfs mount\n`,\n\t\tLongDescription: `\nMount ipfs at a read-only mountpoint on the OS (default: \/ipfs and \/ipns).\nAll ipfs objects will be accessible under that directory. Note that the\nroot will not be listable, as it is virtual. Access known paths directly.\n\n> sudo mkdir \/ipfs \/ipns\n> sudo chown ` + \"`\" + `whoami` + \"`\" + ` \/ipfs \/ipns\n> ipfs mount\n\nEXAMPLE:\n\n# setup\n> mkdir foo\n> echo \"baz\" > foo\/bar\n> ipfs add -r foo\nadded QmWLdkp93sNxGRjnFHPaYg8tCQ35NBY3XPn6KiETd3Z4WR foo\/bar\nadded QmSh5e7S6fdcu75LAbXNZAFY2nGyZUJXyLCJDvn2zRkWyC foo\n> ipfs ls QmSh5e7S6fdcu75LAbXNZAFY2nGyZUJXyLCJDvn2zRkWyC\nQmWLdkp93sNxGRjnFHPaYg8tCQ35NBY3XPn6KiETd3Z4WR 12 bar\n> ipfs cat QmWLdkp93sNxGRjnFHPaYg8tCQ35NBY3XPn6KiETd3Z4WR\nbaz\n\n# mount\n> ipfs daemon &\n> ipfs mount\nIPFS mounted at: \/ipfs\nIPNS mounted at: \/ipns\n> cd \/ipfs\/QmSh5e7S6fdcu75LAbXNZAFY2nGyZUJXyLCJDvn2zRkWyC\n> ls\nbar\n> cat bar\nbaz\n> cat \/ipfs\/QmSh5e7S6fdcu75LAbXNZAFY2nGyZUJXyLCJDvn2zRkWyC\/bar\nbaz\n> cat \/ipfs\/QmWLdkp93sNxGRjnFHPaYg8tCQ35NBY3XPn6KiETd3Z4WR\nbaz\n`,\n\t},\n\n\tOptions: []cmds.Option{\n\t\t\/\/ TODO longform\n\t\tcmds.StringOption(\"f\", \"The path where IPFS should be mounted\"),\n\n\t\t\/\/ TODO longform\n\t\tcmds.StringOption(\"n\", \"The path where IPNS should be mounted\"),\n\t},\n\tRun: func(req cmds.Request) (interface{}, error) {\n\t\tcfg, err := req.Context().GetConfig()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tnode, err := req.Context().GetNode()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ error if we aren't running node in online mode\n\t\tif node.Network == nil {\n\t\t\treturn nil, errNotOnline\n\t\t}\n\n\t\tif err := platformFuseChecks(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfsdir, found, err := req.Option(\"f\").String()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !found {\n\t\t\tfsdir = cfg.Mounts.IPFS \/\/ use default value\n\t\t}\n\t\tfsdone := mountIpfs(node, fsdir)\n\n\t\t\/\/ get default mount points\n\t\tnsdir, found, err := req.Option(\"n\").String()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !found {\n\t\t\tnsdir = cfg.Mounts.IPNS \/\/ NB: be sure to not redeclare!\n\t\t}\n\n\t\tnsdone := mountIpns(node, nsdir, fsdir)\n\n\t\t\/\/ wait until mounts return an error (or timeout if successful)\n\t\tselect {\n\t\tcase err := <-fsdone:\n\t\t\treturn nil, err\n\t\tcase err := <-nsdone:\n\t\t\treturn nil, err\n\n\t\t\/\/ mounted successfully, we timed out with no errors\n\t\tcase <-time.After(mountTimeout):\n\t\t\toutput := cfg.Mounts\n\t\t\treturn &output, nil\n\t\t}\n\t},\n\tType: &config.Mounts{},\n\tMarshalers: cmds.MarshalerMap{\n\t\tcmds.Text: func(res cmds.Response) ([]byte, error) {\n\t\t\tv := res.Output().(*config.Mounts)\n\t\t\ts := fmt.Sprintf(\"IPFS mounted at: %s\\n\", v.IPFS)\n\t\t\ts += fmt.Sprintf(\"IPNS mounted at: %s\\n\", v.IPNS)\n\t\t\treturn []byte(s), nil\n\t\t},\n\t},\n}\n\nfunc mountIpfs(node *core.IpfsNode, fsdir string) <-chan error {\n\tdone := make(chan error)\n\tlog.Info(\"Mounting IPFS at \", fsdir)\n\n\tgo func() {\n\t\terr := rofs.Mount(node, fsdir)\n\t\tdone <- err\n\t\tclose(done)\n\t}()\n\n\treturn done\n}\n\nfunc mountIpns(node *core.IpfsNode, nsdir, fsdir string) <-chan error {\n\tif nsdir == \"\" {\n\t\treturn nil\n\t}\n\tdone := make(chan error)\n\tlog.Info(\"Mounting IPNS at \", nsdir)\n\n\tgo func() {\n\t\terr := ipns.Mount(node, nsdir, fsdir)\n\t\tdone <- err\n\t\tclose(done)\n\t}()\n\n\treturn done\n}\n\nvar platformFuseChecks = func() error {\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package core\n\nimport (\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n\t\"pfi\/sensorbee\/sensorbee\/core\/tuple\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestDefaultTopology(t *testing.T) {\n\tConvey(\"When creating a default topology builder\", t, func() {\n\t\tvar tb StaticTopologyBuilder = NewDefaultStaticTopologyBuilder()\n\t\tSo(tb, ShouldNotBeNil)\n\t})\n\n\tConvey(\"Given a default topology builder\", t, func() {\n\t\ttb := NewDefaultStaticTopologyBuilder()\n\t\ts := &DefaultSource{}\n\t\tb := &DefaultBox{}\n\t\tsi := &DefaultSink{}\n\t\tvar err DeclarerError\n\n\t\tConvey(\"when using a source name twice\", func() {\n\t\t\terr = tb.AddSource(\"mySource\", s)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tSo(err.Err(), ShouldBeNil)\n\n\t\t\terr = tb.AddSource(\"mySource\", s)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"the second time should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when using a source name with a box name\", func() {\n\t\t\terr = tb.AddBox(\"someName\", b)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tSo(err.Err(), ShouldBeNil)\n\n\t\t\terr = tb.AddSource(\"someName\", s)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when using a source name with a sink name\", func() {\n\t\t\terr = tb.AddSink(\"someName\", si)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tSo(err.Err(), ShouldBeNil)\n\n\t\t\terr = tb.AddSource(\"someName\", s)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when using a box name twice\", func() {\n\t\t\terr = tb.AddBox(\"myBox\", b)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tSo(err.Err(), ShouldBeNil)\n\n\t\t\terr = tb.AddBox(\"myBox\", b)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"the second time should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when using a box name with a source name\", func() {\n\t\t\terr = tb.AddSource(\"someName\", s)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tSo(err.Err(), ShouldBeNil)\n\n\t\t\terr = tb.AddBox(\"someName\", b)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when using a box name with a sink name\", func() {\n\t\t\terr = tb.AddSink(\"someName\", si)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tSo(err.Err(), ShouldBeNil)\n\n\t\t\terr = tb.AddBox(\"someName\", b)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when using a sink name twice\", func() {\n\t\t\terr = tb.AddSink(\"mySink\", si)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tSo(err.Err(), ShouldBeNil)\n\n\t\t\terr = tb.AddSink(\"mySink\", si)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"the second time should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when using a sink name with a source name\", func() {\n\t\t\terr = tb.AddSource(\"someName\", s)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tSo(err.Err(), ShouldBeNil)\n\n\t\t\terr = tb.AddSink(\"someName\", si)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when using a sink name with a box name\", func() {\n\t\t\terr = tb.AddBox(\"someName\", b)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tSo(err.Err(), ShouldBeNil)\n\n\t\t\terr = tb.AddSink(\"someName\", si)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\t})\n\n\tConvey(\"Given a default topology builder with a source\", t, func() {\n\t\ttb := NewDefaultStaticTopologyBuilder()\n\t\ts := &DefaultSource{}\n\t\ttb.AddSource(\"aSource\", s)\n\t\tb := &DefaultBox{}\n\t\ttb.AddBox(\"aBox\", b)\n\t\tsi := &DefaultSink{}\n\t\tvar err DeclarerError\n\n\t\tConvey(\"when a new box references a non-existing item\", func() {\n\t\t\terr = tb.AddBox(\"otherBox\", b).\n\t\t\t\tInput(\"something\", nil)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new box references an existing source\", func() {\n\t\t\terr = tb.AddBox(\"otherBox\", b).\n\t\t\t\tInput(\"aSource\", nil)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should work\", func() {\n\t\t\t\tSo(err.Err(), ShouldBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new box references an existing box\", func() {\n\t\t\terr = tb.AddBox(\"otherBox\", b).\n\t\t\t\tInput(\"aBox\", nil)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should work\", func() {\n\t\t\t\tSo(err.Err(), ShouldBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new box references multiple items\", func() {\n\t\t\terr = tb.AddBox(\"otherBox\", b).\n\t\t\t\tInput(\"aBox\", nil).\n\t\t\t\tInput(\"aSource\", nil)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should work\", func() {\n\t\t\t\tSo(err.Err(), ShouldBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new box references an existing source twice\", func() {\n\t\t\terr = tb.AddBox(\"otherBox\", b).\n\t\t\t\tInput(\"aSource\", nil).\n\t\t\t\tInput(\"aSource\", nil)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new box references an existing box twice\", func() {\n\t\t\terr = tb.AddBox(\"otherBox\", b).\n\t\t\t\tInput(\"aBox\", nil).\n\t\t\t\tInput(\"aBox\", nil)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new sink references a non-existing item\", func() {\n\t\t\terr = tb.AddSink(\"aSink\", si).\n\t\t\t\tInput(\"something\")\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new sink references an existing source\", func() {\n\t\t\terr = tb.AddSink(\"aSink\", si).\n\t\t\t\tInput(\"aSource\")\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should work\", func() {\n\t\t\t\tSo(err.Err(), ShouldBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new sink references an existing box\", func() {\n\t\t\terr = tb.AddSink(\"aSink\", si).\n\t\t\t\tInput(\"aBox\")\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should work\", func() {\n\t\t\t\tSo(err.Err(), ShouldBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new sink references multiple items\", func() {\n\t\t\terr = tb.AddSink(\"aSink\", si).\n\t\t\t\tInput(\"aBox\").\n\t\t\t\tInput(\"aSource\")\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should work\", func() {\n\t\t\t\tSo(err.Err(), ShouldBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new sink references an existing source twice\", func() {\n\t\t\terr = tb.AddSink(\"aSink\", si).\n\t\t\t\tInput(\"aSource\").\n\t\t\t\tInput(\"aSource\")\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new sink references an existing box twice\", func() {\n\t\t\terr = tb.AddSink(\"aSink\", si).\n\t\t\t\tInput(\"aBox\").\n\t\t\t\tInput(\"aBox\")\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\t})\n\n\t\/* TODO Add tests that show the correct data transport in DefaultTopology.\n\t * We should remove the Default* structs from defaulttopology.go and\n\t * add some replacements in this file that allow us to check data\n\t * flow. For example, a TestSource could just emit two messages with\n\t * different IDs and a TestSink could store all received messages\n\t * in a local array. Then after Run() is finished, check whether all\n\t * items arrived at the sink(s) in the correct order and where processed\n\t * correctly by all intermediate boxes.\n\t *\/\n\tConvey(\"Given a default topology\", t, func() {\n\n\t\ttb := NewDefaultStaticTopologyBuilder()\n\t\ts1 := &DummyDefaultSource{\"value\"}\n\t\ttb.AddSource(\"Source1\", s1)\n\t\ts2 := &DummyDefaultSource{\"test\"}\n\t\ttb.AddSource(\"Source2\", s2)\n\t\ts3 := &DummyDefaultSource{\"hoge\"}\n\t\ttb.AddSource(\"Source3\", s3)\n\t\ts4 := &DummyDefaultSource{\"fuga\"}\n\t\ttb.AddSource(\"Source4\", s4)\n\t\ts5 := &DummyDefaultSource{\"foo\"}\n\t\ttb.AddSource(\"Source5\", s5)\n\t\tb1 := BoxFunc(dummyToUpperBoxFunc)\n\t\ttb.AddBox(\"aBox\", &b1).Input(\"Source1\", nil).\n\t\t\tInput(\"Source2\", nil).\n\t\t\tInput(\"Source3\", nil).\n\t\t\tInput(\"Source4\", nil).\n\t\t\tInput(\"Source5\", nil)\n\t\tb2 := BoxFunc(dummyFilterBoxFunc)\n\t\ttb.AddBox(\"bBox\", &b2).Input(\"aBox\", nil)\n\t\tsi := &DummyDefaultSink{}\n\t\ttb.AddSink(\"si\", si).Input(\"aBox\")\n\t\tt := tb.Build()\n\t\tConvey(\"Run topology\", func() {\n\t\t\tt.Run()\n\t\t\tSo(si.result, ShouldEqual, \"HOGE\")\n\t\t})\n\t})\n\n}\n\ntype DummyDefaultSource struct{ initial string }\n\nfunc (this *DummyDefaultSource) GenerateStream(w Writer) error {\n\tt := &tuple.Tuple{}\n\tt.Data = tuple.Map{\n\t\t\"source\": tuple.String(this.initial),\n\t}\n\tw.Write(t)\n\treturn nil\n}\nfunc (this *DummyDefaultSource) Schema() *Schema {\n\tvar s Schema = Schema(\"test\")\n\treturn &s\n}\n\nfunc dummyToUpperBoxFunc(t *tuple.Tuple, w Writer) error {\n\tx, _ := t.Data.Get(\"source\")\n\ts, _ := x.String()\n\tt.Data = tuple.Map{\n\t\t\"source\": tuple.String(strings.ToUpper(string(s))),\n\t}\n\tw.Write(t)\n\treturn nil\n}\n\nfunc dummyFilterBoxFunc(t *tuple.Tuple, w Writer) error {\n\tx, _ := t.Data.Get(\"source\")\n\ts, _ := x.String()\n\tif s == \"HOGE\" {\n\t\tt.Data[\"filtered\"] = s\n\t}\n\treturn nil\n}\n\ntype DummyDefaultSink struct{ result string }\n\nfunc (this *DummyDefaultSink) Write(t *tuple.Tuple) error {\n\tx, err := t.Data.Get(\"filtered\")\n\tif err != nil {\n\t\treturn nil\n\t}\n\ts, _ := x.String()\n\tthis.result = string(s)\n\treturn nil\n}\n<commit_msg>add basic topology test<commit_after>package core\n\nimport (\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n\t\"pfi\/sensorbee\/sensorbee\/core\/tuple\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestDefaultTopology(t *testing.T) {\n\tConvey(\"When creating a default topology builder\", t, func() {\n\t\tvar tb StaticTopologyBuilder = NewDefaultStaticTopologyBuilder()\n\t\tSo(tb, ShouldNotBeNil)\n\t})\n\n\tConvey(\"Given a default topology builder\", t, func() {\n\t\ttb := NewDefaultStaticTopologyBuilder()\n\t\ts := &DefaultSource{}\n\t\tb := &DefaultBox{}\n\t\tsi := &DefaultSink{}\n\t\tvar err DeclarerError\n\n\t\tConvey(\"when using a source name twice\", func() {\n\t\t\terr = tb.AddSource(\"mySource\", s)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tSo(err.Err(), ShouldBeNil)\n\n\t\t\terr = tb.AddSource(\"mySource\", s)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"the second time should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when using a source name with a box name\", func() {\n\t\t\terr = tb.AddBox(\"someName\", b)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tSo(err.Err(), ShouldBeNil)\n\n\t\t\terr = tb.AddSource(\"someName\", s)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when using a source name with a sink name\", func() {\n\t\t\terr = tb.AddSink(\"someName\", si)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tSo(err.Err(), ShouldBeNil)\n\n\t\t\terr = tb.AddSource(\"someName\", s)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when using a box name twice\", func() {\n\t\t\terr = tb.AddBox(\"myBox\", b)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tSo(err.Err(), ShouldBeNil)\n\n\t\t\terr = tb.AddBox(\"myBox\", b)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"the second time should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when using a box name with a source name\", func() {\n\t\t\terr = tb.AddSource(\"someName\", s)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tSo(err.Err(), ShouldBeNil)\n\n\t\t\terr = tb.AddBox(\"someName\", b)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when using a box name with a sink name\", func() {\n\t\t\terr = tb.AddSink(\"someName\", si)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tSo(err.Err(), ShouldBeNil)\n\n\t\t\terr = tb.AddBox(\"someName\", b)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when using a sink name twice\", func() {\n\t\t\terr = tb.AddSink(\"mySink\", si)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tSo(err.Err(), ShouldBeNil)\n\n\t\t\terr = tb.AddSink(\"mySink\", si)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"the second time should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when using a sink name with a source name\", func() {\n\t\t\terr = tb.AddSource(\"someName\", s)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tSo(err.Err(), ShouldBeNil)\n\n\t\t\terr = tb.AddSink(\"someName\", si)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when using a sink name with a box name\", func() {\n\t\t\terr = tb.AddBox(\"someName\", b)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tSo(err.Err(), ShouldBeNil)\n\n\t\t\terr = tb.AddSink(\"someName\", si)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\t})\n\n\tConvey(\"Given a default topology builder with a source\", t, func() {\n\t\ttb := NewDefaultStaticTopologyBuilder()\n\t\ts := &DefaultSource{}\n\t\ttb.AddSource(\"aSource\", s)\n\t\tb := &DefaultBox{}\n\t\ttb.AddBox(\"aBox\", b)\n\t\tsi := &DefaultSink{}\n\t\tvar err DeclarerError\n\n\t\tConvey(\"when a new box references a non-existing item\", func() {\n\t\t\terr = tb.AddBox(\"otherBox\", b).\n\t\t\t\tInput(\"something\", nil)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new box references an existing source\", func() {\n\t\t\terr = tb.AddBox(\"otherBox\", b).\n\t\t\t\tInput(\"aSource\", nil)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should work\", func() {\n\t\t\t\tSo(err.Err(), ShouldBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new box references an existing box\", func() {\n\t\t\terr = tb.AddBox(\"otherBox\", b).\n\t\t\t\tInput(\"aBox\", nil)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should work\", func() {\n\t\t\t\tSo(err.Err(), ShouldBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new box references multiple items\", func() {\n\t\t\terr = tb.AddBox(\"otherBox\", b).\n\t\t\t\tInput(\"aBox\", nil).\n\t\t\t\tInput(\"aSource\", nil)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should work\", func() {\n\t\t\t\tSo(err.Err(), ShouldBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new box references an existing source twice\", func() {\n\t\t\terr = tb.AddBox(\"otherBox\", b).\n\t\t\t\tInput(\"aSource\", nil).\n\t\t\t\tInput(\"aSource\", nil)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new box references an existing box twice\", func() {\n\t\t\terr = tb.AddBox(\"otherBox\", b).\n\t\t\t\tInput(\"aBox\", nil).\n\t\t\t\tInput(\"aBox\", nil)\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new sink references a non-existing item\", func() {\n\t\t\terr = tb.AddSink(\"aSink\", si).\n\t\t\t\tInput(\"something\")\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new sink references an existing source\", func() {\n\t\t\terr = tb.AddSink(\"aSink\", si).\n\t\t\t\tInput(\"aSource\")\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should work\", func() {\n\t\t\t\tSo(err.Err(), ShouldBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new sink references an existing box\", func() {\n\t\t\terr = tb.AddSink(\"aSink\", si).\n\t\t\t\tInput(\"aBox\")\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should work\", func() {\n\t\t\t\tSo(err.Err(), ShouldBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new sink references multiple items\", func() {\n\t\t\terr = tb.AddSink(\"aSink\", si).\n\t\t\t\tInput(\"aBox\").\n\t\t\t\tInput(\"aSource\")\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should work\", func() {\n\t\t\t\tSo(err.Err(), ShouldBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new sink references an existing source twice\", func() {\n\t\t\terr = tb.AddSink(\"aSink\", si).\n\t\t\t\tInput(\"aSource\").\n\t\t\t\tInput(\"aSource\")\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"when a new sink references an existing box twice\", func() {\n\t\t\terr = tb.AddSink(\"aSink\", si).\n\t\t\t\tInput(\"aBox\").\n\t\t\t\tInput(\"aBox\")\n\t\t\tSo(err, ShouldNotBeNil)\n\t\t\tConvey(\"adding should fail\", func() {\n\t\t\t\tSo(err.Err(), ShouldNotBeNil)\n\t\t\t})\n\t\t})\n\t})\n\n\t\/* TODO Add tests that show the correct data transport in DefaultTopology.\n\t * We should remove the Default* structs from defaulttopology.go and\n\t * add some replacements in this file that allow us to check data\n\t * flow. For example, a TestSource could just emit two messages with\n\t * different IDs and a TestSink could store all received messages\n\t * in a local array. Then after Run() is finished, check whether all\n\t * items arrived at the sink(s) in the correct order and where processed\n\t * correctly by all intermediate boxes.\n\t *\/\n\tConvey(\"Given basic topology\", t, func() {\n\n\t\ttb := NewDefaultStaticTopologyBuilder()\n\t\ts1 := &DummyDefaultSource{\"value\"}\n\t\ttb.AddSource(\"Source1\", s1)\n\t\tb1 := BoxFunc(dummyToUpperBoxFunc)\n\t\ttb.AddBox(\"aBox\", &b1).Input(\"Source1\", nil)\n\t\tsi := &DummyDefaultSink{}\n\t\ttb.AddSink(\"si\", si).Input(\"aBox\")\n\t\tt := tb.Build()\n\t\tConvey(\"Run topology with ToUpperBox\", func() {\n\t\t\tt.Run()\n\t\t\tSo(si.filtered, ShouldEqual, \"VALUE\")\n\t\t})\n\t})\n\n}\n\ntype DummyDefaultSource struct{ initial string }\n\nfunc (this *DummyDefaultSource) GenerateStream(w Writer) error {\n\ttime.Sleep(0.5 * 1e9) \/\/ to confirm .Run() goroutine\n\tt := &tuple.Tuple{}\n\tt.Data = tuple.Map{\n\t\t\"source\": tuple.String(this.initial),\n\t}\n\tw.Write(t)\n\tt2 := &tuple.Tuple{}\n\tt2.Data = tuple.Map{\n\t\t\"source\": tuple.String(this.initial),\n\t}\n\tw.Write(t2)\n\treturn nil\n}\nfunc (this *DummyDefaultSource) Schema() *Schema {\n\tvar s Schema = Schema(\"test\")\n\treturn &s\n}\n\nfunc dummyToUpperBoxFunc(t *tuple.Tuple, w Writer) error {\n\tx, _ := t.Data.Get(\"source\")\n\ts, _ := x.String()\n\tt.Data[\"to-upper-box\"] = tuple.String(strings.ToUpper(string(s)))\n\tw.Write(t)\n\treturn nil\n}\n\ntype DummyDefaultSink struct {\n\tfiltered string\n}\n\nfunc (this *DummyDefaultSink) Write(t *tuple.Tuple) error {\n\tx, err := t.Data.Get(\"to-upper-box\")\n\tif err != nil {\n\t\treturn nil\n\t}\n\ts, _ := x.String()\n\tthis.filtered = string(s)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package handlers\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"encoding\/json\"\n\t\"github.com\/ghawk1ns\/golf\/database\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/ghawk1ns\/golf\/model\"\n\t\"errors\"\n\t\"github.com\/ghawk1ns\/golf\/logger\"\n)\n\nfunc GolferProfile(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=UTF-8\")\n\n\tvars := mux.Vars(r)\n\tgolferId := vars[\"id\"]\n\n\tif golferId == \"\" {\n\t\tonGolferError(w, errors.New(\"Invalid Golfer Id\"))\n\t}\n\n\tvar result model.Profile\n\tgolfer, err := database.GetGolferById(golferId)\n\tif err != nil {\n\t\tonGolferError(w, err)\n\t\treturn\n\t} else {\n\t\tresult.Golfer = golfer\n\t}\n\n\tscores, err := database.GetScoresForGolfer(golfer.GolferId)\n\tif err != nil {\n\t\tonGolferError(w, err)\n\t\treturn\n\t} else {\n\t\tresult.Scores = scores\n\t}\n\n\tresult.Stats, err = getStats(golferId)\n\n\tb, err := json.Marshal(result)\n\tif err != nil {\n\t\tonGolferProfileError(w, err)\n\t\treturn\n\t} else {\n\t\tlogger.Info.Println(\"golferProfile: \", string(b))\n\t\tw.WriteHeader(http.StatusOK)\n\t\tfmt.Fprintln(w, string(b))\n\t}\n}\n\n\/\/ This is not a good way to do this\nfunc onGolferProfileError(w http.ResponseWriter, err error) {\n\tw.WriteHeader(http.StatusBadRequest)\n\tlogger.Error.Println(\"an error occured:\", err.Error())\n\tfmt.Fprintln(w, nil)\n}\n\nfunc getStats(golferId string) (model.Stats, error) {\n\troundAvg := make(chan float64)\n\tnumRounds := make(chan int)\n\twinCounts := make(chan []model.WinCount)\n\n\t\/\/ retrieve the golfer's average\n\tgo func() {\n\t\tresult, err := database.GetGolferAverage(golferId)\n\t\tif err != nil {\n\t\t\tlogger.Error.Println(err)\n\t\t\troundAvg <- -1\n\t\t} else {\n\t\t\troundAvg <- result\n\t\t}\n\t}()\n\n\t\/\/ retrieve the golfer's total rounds played\n\tgo func() {\n\t\tresult, err := database.GetGolferNumRounds(golferId)\n\t\tif err != nil {\n\t\t\tlogger.Error.Println(err)\n\t\t\tnumRounds <- -1\n\t\t} else {\n\t\t\tnumRounds <- result\n\t\t}\n\t}()\n\n\t\/\/ retrieve the golfer's victory over other golfers\n\tgo func() {\n\t\tlogger.Info.Println(\"Getting win stats for\", golferId)\n\t\twins, err := database.GetGolferWins(golferId)\n\t\tif err != nil {\n\t\t\tlogger.Error.Println(err)\n\t\t\twinCounts <- nil\n\t\t} else {\n\t\t\tvar localWinCounts []model.WinCount\n\t\t\tfor opponentId,count := range wins {\n\t\t\t\tgolfer, err := database.GetGolferById(opponentId)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.Error.Println(err.Error())\n\t\t\t\t} else {\n\t\t\t\t\tlogger.Info.Printf(\"%s has beaten %s, %d times\\n\", golferId, golfer.Name, count)\n\t\t\t\t\tlocalWinCounts = append(localWinCounts, model.WinCount{golfer, count})\n\t\t\t\t}\n\n\t\t\t}\n\t\t\twinCounts <- localWinCounts\n\t\t}\n\t}()\n\n\tstats := model.Stats{ <- numRounds, <- roundAvg, <- winCounts}\n\tif stats.Rounds == -1 || stats.Average == -1 {\n\t\treturn stats, errors.New(\"something went wrong with stat gathering\")\n\t} else {\n\t\treturn stats, nil\n\t}\n}<commit_msg>Don't use goroutines to fetch stats async just yet<commit_after>package handlers\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"encoding\/json\"\n\t\"github.com\/ghawk1ns\/golf\/database\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/ghawk1ns\/golf\/model\"\n\t\"errors\"\n\t\"github.com\/ghawk1ns\/golf\/logger\"\n)\n\nfunc GolferProfile(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=UTF-8\")\n\n\tvars := mux.Vars(r)\n\tgolferId := vars[\"id\"]\n\n\tif golferId == \"\" {\n\t\tonGolferError(w, errors.New(\"Invalid Golfer Id\"))\n\t}\n\n\tvar result model.Profile\n\tgolfer, err := database.GetGolferById(golferId)\n\tif err != nil {\n\t\tonGolferError(w, err)\n\t\treturn\n\t} else {\n\t\tresult.Golfer = golfer\n\t}\n\n\tscores, err := database.GetScoresForGolfer(golfer.GolferId)\n\tif err != nil {\n\t\tonGolferError(w, err)\n\t\treturn\n\t} else {\n\t\tresult.Scores = scores\n\t}\n\n\tresult.Stats, err = getStats(golferId)\n\n\tb, err := json.Marshal(result)\n\tif err != nil {\n\t\tonGolferProfileError(w, err)\n\t\treturn\n\t} else {\n\t\tlogger.Info.Println(\"golferProfile: \", string(b))\n\t\tw.WriteHeader(http.StatusOK)\n\t\tfmt.Fprintln(w, string(b))\n\t}\n}\n\n\/\/ This is not a good way to do this\nfunc onGolferProfileError(w http.ResponseWriter, err error) {\n\tw.WriteHeader(http.StatusBadRequest)\n\tlogger.Error.Println(\"an error occured:\", err.Error())\n\tfmt.Fprintln(w, nil)\n}\n\n\/\/ TODO: debug me\nfunc getStatsAsync(golferId string) (model.Stats, error) {\n\troundAvg := make(chan float64)\n\tnumRounds := make(chan int)\n\twinCounts := make(chan []model.WinCount)\n\n\t\/\/ retrieve the golfer's average\n\tgo func() {\n\t\tresult, err := database.GetGolferAverage(golferId)\n\t\tif err != nil {\n\t\t\tlogger.Error.Println(err)\n\t\t\troundAvg <- -1\n\t\t} else {\n\t\t\troundAvg <- result\n\t\t}\n\t}()\n\n\t\/\/ retrieve the golfer's total rounds played\n\tgo func() {\n\t\tresult, err := database.GetGolferNumRounds(golferId)\n\t\tif err != nil {\n\t\t\tlogger.Error.Println(err)\n\t\t\tnumRounds <- -1\n\t\t} else {\n\t\t\tnumRounds <- result\n\t\t}\n\t}()\n\n\t\/\/ retrieve the golfer's victory over other golfers\n\tgo func() {\n\t\tlogger.Info.Println(\"Getting win stats for\", golferId)\n\t\twins, err := database.GetGolferWins(golferId)\n\t\tif err != nil {\n\t\t\tlogger.Error.Println(err)\n\t\t\twinCounts <- nil\n\t\t} else {\n\t\t\tvar localWinCounts []model.WinCount\n\t\t\tfor opponentId,count := range wins {\n\t\t\t\tgolfer, err := database.GetGolferById(opponentId)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.Error.Println(err.Error())\n\t\t\t\t} else {\n\t\t\t\t\tlogger.Info.Printf(\"%s has beaten %s, %d times\\n\", golferId, golfer.Name, count)\n\t\t\t\t\tlocalWinCounts = append(localWinCounts, model.WinCount{golfer, count})\n\t\t\t\t}\n\n\t\t\t}\n\t\t\twinCounts <- localWinCounts\n\t\t}\n\t}()\n\n\tstats := model.Stats{ <- numRounds, <- roundAvg, <- winCounts}\n\tif stats.Rounds == -1 || stats.Average == -1 {\n\t\treturn stats, errors.New(\"something went wrong with stat gathering\")\n\t} else {\n\t\treturn stats, nil\n\t}\n}\n\nfunc getStats(golferId string) (model.Stats, error) {\n\n\troundAvg, err := database.GetGolferAverage(golferId)\n\tif err != nil {\n\t\tlogger.Error.Println(err)\n\t\troundAvg = -1\n\t}\n\n\t\/\/ retrieve the golfer's total rounds played\n\tnumRounds, err := database.GetGolferNumRounds(golferId)\n\tif err != nil {\n\t\tlogger.Error.Println(err)\n\t\tnumRounds = -1\n\t}\n\n\t\/\/ retrieve the golfer's victory over other golfers\n\tlogger.Info.Println(\"Getting win stats for\", golferId)\n\twins, err := database.GetGolferWins(golferId)\n\tvar winCounts []model.WinCount\n\tif err != nil {\n\t\tlogger.Error.Println(err)\n\t\twinCounts = nil\n\t} else {\n\t\tvar localWinCounts []model.WinCount\n\t\tfor opponentId,count := range wins {\n\t\t\tgolfer, err := database.GetGolferById(opponentId)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error.Println(err.Error())\n\t\t\t} else {\n\t\t\t\tlogger.Info.Printf(\"%s has beaten %s, %d times\\n\", golferId, golfer.Name, count)\n\t\t\t\tlocalWinCounts = append(localWinCounts, model.WinCount{golfer, count})\n\t\t\t}\n\n\t\t}\n\t\twinCounts = localWinCounts\n\t}\n\n\tstats := model.Stats{ numRounds, roundAvg, winCounts}\n\tif stats.Rounds == -1 || stats.Average == -1 {\n\t\treturn stats, errors.New(\"something went wrong with stat gathering\")\n\t} else {\n\t\treturn stats, nil\n\t}\n}<|endoftext|>"} {"text":"<commit_before>package coin\n\nimport (\n\t\"context\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/temoto\/alive\"\n\t\"github.com\/temoto\/vender\/currency\"\n\t\"github.com\/temoto\/vender\/hardware\/mdb\"\n\t\"github.com\/temoto\/vender\/hardware\/money\"\n)\n\nconst (\n\tcoinTypeCount = 16\n\n\tDelayErr = 500 * time.Millisecond\n\tDelayNext = 200 * time.Millisecond\n\n\tRouteCashBox = 0\n\tRouteTubes = 1\n\tRouteNotUsed = 2\n\tRouteReject = 3\n)\n\n\/\/go:generate stringer -type=Features\ntype Features uint32\n\nconst (\n\tFeatureAlternativePayout Features = 1 << iota\n\tFeatureExtendedDiagnostic\n\tFeatureControlledManualFillPayout\n\tFeatureFTL\n)\n\ntype CoinAcceptor struct {\n\tmdb mdb.Mdber\n\tbyteOrder binary.ByteOrder\n\n\t\/\/ Indicates the value of the bill types 0 to 15.\n\t\/\/ These are final values including all scaling factors.\n\tcoinTypeCredit []currency.Nominal\n\n\tfeatureLevel uint8\n\tsupportedFeatures Features\n\n\tinternalScalingFactor int\n\tbatch sync.Mutex\n\tready chan struct{}\n}\n\nvar (\n\tpacketReset = mdb.PacketFromHex(\"08\")\n\tpacketSetup = mdb.PacketFromHex(\"09\")\n\tpacketTubeStatus = mdb.PacketFromHex(\"0a\")\n\tpacketPoll = mdb.PacketFromHex(\"0b\")\n\tpacketExpIdent = mdb.PacketFromHex(\"0f00\")\n)\n\nvar (\n\tErrNoCredit = fmt.Errorf(\"No Credit\")\n\tErrDoubleArrival = fmt.Errorf(\"Double Arrival\")\n\tErrCoinRouting = fmt.Errorf(\"Coin Routing\")\n\tErrCoinJam = fmt.Errorf(\"Coin Jam\")\n\tErrSlugs = fmt.Errorf(\"Slugs\")\n)\n\n\/\/ usage: defer coin.Batch()()\nfunc (self *CoinAcceptor) Batch() func() {\n\tself.batch.Lock()\n\treturn self.batch.Unlock\n}\n\nfunc (self *CoinAcceptor) Init(ctx context.Context, mdber mdb.Mdber) error {\n\t\/\/ TODO read config\n\tself.byteOrder = binary.BigEndian\n\tself.coinTypeCredit = make([]currency.Nominal, coinTypeCount)\n\tself.mdb = mdber\n\tself.internalScalingFactor = 1 \/\/ FIXME\n\tself.ready = make(chan struct{})\n\t\/\/ TODO maybe execute CommandReset?\n\terr := self.InitSequence()\n\tif err != nil {\n\t\tlog.Printf(\"hardware\/mdb\/coin\/InitSequence error=%s\", err)\n\t\t\/\/ TODO maybe execute CommandReset?\n\t}\n\treturn err\n}\n\nfunc (self *CoinAcceptor) SupportedNominals() []currency.Nominal {\n\tns := make([]currency.Nominal, 0, len(self.coinTypeCredit))\n\tfor _, n := range self.coinTypeCredit {\n\t\tif n > 0 {\n\t\t\tns = append(ns, n)\n\t\t}\n\t}\n\treturn ns\n}\n\nfunc (self *CoinAcceptor) Run(ctx context.Context, a *alive.Alive, ch chan<- money.PollResult) {\n\tstopch := a.StopChan()\n\tfor a.IsRunning() {\n\t\tpr := self.CommandPoll()\n\t\tselect {\n\t\tcase ch <- pr:\n\t\tcase <-stopch:\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-time.After(pr.Delay):\n\t\tcase <-stopch:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (self *CoinAcceptor) InitSequence() error {\n\tdefer self.Batch()()\n\n\terr := self.CommandSetup()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err = self.CommandExpansionIdentification(); err != nil {\n\t\treturn err\n\t}\n\tif err = self.CommandFeatureEnable(FeatureExtendedDiagnostic); err != nil {\n\t\treturn err\n\t}\n\tif err = self.CommandExpansionSendDiagStatus(); err != nil {\n\t\treturn err\n\t}\n\tif err = self.CommandTubeStatus(); err != nil {\n\t\treturn err\n\t}\n\t\/\/ TODO read config\n\tif err = self.CommandCoinType(0xffff, 0xffff); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (self *CoinAcceptor) CommandReset() error {\n\tresponse := new(mdb.Packet)\n\treturn self.mdb.Tx(packetReset, response)\n}\n\nfunc (self *CoinAcceptor) CommandSetup() error {\n\tconst expectLengthMin = 7\n\trequest := packetSetup\n\tresponse := new(mdb.Packet)\n\terr := self.mdb.Tx(request, response)\n\tif err != nil {\n\t\tlog.Printf(\"mdb request=%s err=%v\", request.Format(), err)\n\t\treturn err\n\t}\n\tlog.Printf(\"setup response=(%d)%s\", response.Len(), response.Format())\n\tbs := response.Bytes()\n\tif len(bs) < expectLengthMin {\n\t\treturn fmt.Errorf(\"hardware\/mdb\/coin SETUP response=%s expected >= %d bytes\", response.Format(), expectLengthMin)\n\t}\n\tscalingFactor := bs[3]\n\tfor i, sf := range bs[7:] {\n\t\tn := currency.Nominal(sf) * currency.Nominal(scalingFactor) * currency.Nominal(self.internalScalingFactor)\n\t\tlog.Printf(\"i=%d sf=%d nominal=%s\", i, sf, currency.Amount(n).Format100I())\n\t\tself.coinTypeCredit[i] = n\n\t}\n\tself.featureLevel = bs[0]\n\tlog.Printf(\"Changer Feature Level: %d\", self.featureLevel)\n\tlog.Printf(\"Country \/ Currency Code: %x\", bs[1:3])\n\tlog.Printf(\"Coin Scaling Factor: %d\", scalingFactor)\n\tlog.Printf(\"Decimal Places: %d\", bs[4])\n\tlog.Printf(\"Coin Type Routing: %d\", self.byteOrder.Uint16(bs[5:7]))\n\tlog.Printf(\"Coin Type Credit: %x %#v\", bs[7:], self.coinTypeCredit)\n\treturn nil\n}\n\nfunc (self *CoinAcceptor) CommandPoll() (result money.PollResult) {\n\tdefer func() {\n\t\tif result.Ready() {\n\t\t\tselect {\n\t\t\tcase self.ready <- struct{}{}:\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t}()\n\n\tnow := time.Now()\n\tresponse := new(mdb.Packet)\n\tsavedebug := self.mdb.SetDebug(false)\n\terr := self.mdb.Tx(packetPoll, response)\n\tself.mdb.SetDebug(savedebug)\n\tresult.Time = now\n\tresult.Delay = DelayNext\n\tif err != nil {\n\t\tresult.Error = err\n\t\tresult.Delay = DelayErr\n\t\treturn result\n\t}\n\tif response.Len() == 0 {\n\t\treturn result\n\t}\n\tresult.Items = make([]money.PollItem, 0, response.Len())\n\tlog.Printf(\"poll response=%s\", response.Format())\n\tbs := response.Bytes()\n\tpi := money.PollItem{}\n\tskip := false\n\tfor i, b := range bs {\n\t\tif skip {\n\t\t\tskip = false\n\t\t\tcontinue\n\t\t}\n\t\tb2 := byte(0)\n\t\tif i+1 < len(bs) {\n\t\t\tb2 = bs[i+1]\n\t\t}\n\t\tpi, skip = self.parsePollItem(b, b2)\n\t\tresult.Items = append(result.Items, pi)\n\t}\n\treturn result\n}\n\nfunc (self *CoinAcceptor) CommandTubeStatus() error {\n\tconst expectLengthMin = 2\n\trequest := packetTubeStatus\n\tresponse := new(mdb.Packet)\n\terr := self.mdb.Tx(request, response)\n\tif err != nil {\n\t\tlog.Printf(\"mdb request=%s err=%v\", request.Format(), err)\n\t\treturn err\n\t}\n\tlog.Printf(\"tubestatus response=(%d)%s\", response.Len(), response.Format())\n\tbs := response.Bytes()\n\tif len(bs) < expectLengthMin {\n\t\treturn fmt.Errorf(\"hardware\/mdb\/coin TUBE money.Status response=%s expected >= %d bytes\", response.Format(), expectLengthMin)\n\t}\n\tfull := self.byteOrder.Uint16(bs[0:2])\n\tcounts := bs[2:18]\n\tlog.Printf(\"tubestatus full=%b counts=%v\", full, counts)\n\t\/\/ TODO use full,counts\n\t_ = full\n\t_ = counts\n\treturn nil\n}\n\nfunc (self *CoinAcceptor) CommandCoinType(accept, dispense uint16) error {\n\tbuf := [5]byte{0x0c}\n\tself.byteOrder.PutUint16(buf[1:], accept)\n\tself.byteOrder.PutUint16(buf[3:], dispense)\n\trequest := mdb.PacketFromBytes(buf[:])\n\tresponse := new(mdb.Packet)\n\terr := self.mdb.Tx(request, response)\n\tif err != nil {\n\t\tlog.Printf(\"mdb request=%s err=%v\", request.Format(), err)\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (self *CoinAcceptor) CommandDispense(nominal currency.Nominal, count uint8) error {\n\tif count >= 16 {\n\t\treturn fmt.Errorf(\"CommandDispense count=%d overflow >=16\", count)\n\t}\n\tcoinType, err := self.nominalCoinType(nominal)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresponse := new(mdb.Packet)\n\trequest := mdb.PacketFromBytes([]byte{0x0d, (count << 4) + coinType})\n\t<-self.ready\n\terr = self.mdb.Tx(request, response)\n\tif err != nil {\n\t\tlog.Printf(\"mdb request=%s err=%v\", request.Format(), err)\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (self *CoinAcceptor) CommandExpansionIdentification() error {\n\tconst expectLength = 33\n\trequest := packetExpIdent\n\tresponse := new(mdb.Packet)\n\terr := self.mdb.Tx(request, response)\n\tif err != nil {\n\t\tlog.Printf(\"mdb request=%s err=%v\", request.Format(), err)\n\t\treturn err\n\t}\n\tlog.Printf(\"setup response=(%d)%s\", response.Len(), response.Format())\n\tbs := response.Bytes()\n\tif len(bs) < expectLength {\n\t\treturn fmt.Errorf(\"hardware\/mdb\/coin EXPANSION IDENTIFICATION response=%s expected %d bytes\", response.Format(), expectLength)\n\t}\n\tself.supportedFeatures = Features(self.byteOrder.Uint32(bs[29:]))\n\tlog.Printf(\"Supported features: %b\", self.supportedFeatures)\n\treturn nil\n}\n\nfunc (self *CoinAcceptor) CommandFeatureEnable(requested Features) error {\n\tf := requested & self.supportedFeatures\n\tbuf := [6]byte{0x0f, 0x01}\n\tself.byteOrder.PutUint32(buf[2:], uint32(f))\n\trequest := mdb.PacketFromBytes(buf[:])\n\tresponse := new(mdb.Packet)\n\terr := self.mdb.Tx(request, response)\n\tif err != nil {\n\t\tlog.Printf(\"mdb request=%s err=%v\", request.Format(), err)\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (self *CoinAcceptor) CommandExpansionSendDiagStatus() error {\n\tif self.supportedFeatures&FeatureExtendedDiagnostic == 0 {\n\t\tlog.Printf(\"CommandExpansionSendDiagStatus feature is not supported\")\n\t\treturn nil\n\t}\n\tself.mdb.TxDebug(mdb.PacketFromHex(\"0f05\"), true) \/\/ 0f05 EXPANSION SEND DIAG money.Status\n\treturn nil\n}\n\nfunc (self *CoinAcceptor) coinTypeNominal(b byte) currency.Nominal {\n\tif b >= coinTypeCount {\n\t\tlog.Printf(\"invalid coin type: %d\", b)\n\t\treturn 0\n\t}\n\treturn self.coinTypeCredit[b]\n}\n\nfunc (self *CoinAcceptor) nominalCoinType(nominal currency.Nominal) (byte, error) {\n\tfor ct, n := range self.coinTypeCredit {\n\t\tif n == nominal {\n\t\t\treturn byte(ct), nil\n\t\t}\n\t}\n\treturn 0, fmt.Errorf(\"Unknown nominal %s\", currency.Amount(nominal).Format100I())\n}\n\nfunc (self *CoinAcceptor) parsePollItem(b, b2 byte) (money.PollItem, bool) {\n\tswitch b {\n\tcase 0x01: \/\/ Escrow request\n\t\treturn money.PollItem{Status: money.StatusReturnRequest}, false\n\tcase 0x02: \/\/ Changer Payout Busy\n\t\treturn money.PollItem{Status: money.StatusBusy}, false\n\tcase 0x03: \/\/ No Credit\n\t\treturn money.PollItem{Status: money.StatusError, Error: ErrNoCredit}, false\n\tcase 0x04: \/\/ Defective Tube Sensor\n\t\treturn money.PollItem{Status: money.StatusFatal, Error: money.ErrSensor}, false\n\tcase 0x05: \/\/ Double Arrival\n\t\treturn money.PollItem{Status: money.StatusError, Error: ErrDoubleArrival}, false\n\tcase 0x06: \/\/ Acceptor Unplugged\n\t\treturn money.PollItem{Status: money.StatusFatal, Error: money.ErrNoStorage}, false\n\tcase 0x07: \/\/ Tube Jam\n\t\treturn money.PollItem{Status: money.StatusFatal, Error: money.ErrJam}, false\n\tcase 0x08: \/\/ ROM checksum error\n\t\treturn money.PollItem{Status: money.StatusFatal, Error: money.ErrROMChecksum}, false\n\tcase 0x09: \/\/ Coin Routing Error\n\t\treturn money.PollItem{Status: money.StatusError, Error: ErrCoinRouting}, false\n\tcase 0x0a: \/\/ Changer Busy\n\t\treturn money.PollItem{Status: money.StatusBusy}, false\n\tcase 0x0b: \/\/ Changer was Reset\n\t\treturn money.PollItem{Status: money.StatusWasReset}, false\n\tcase 0x0c: \/\/ Coin Jam\n\t\treturn money.PollItem{Status: money.StatusFatal, Error: ErrCoinJam}, false\n\tcase 0x0d: \/\/ Possible Credited Coin Removal\n\t\treturn money.PollItem{Status: money.StatusError, Error: money.ErrFraud}, false\n\t}\n\n\tif b&0x80 != 0 { \/\/ Coins Dispensed Manually\n\t\t\/\/ b=1yyyxxxx b2=number of coins in tube\n\t\t\/\/ yyy = coins dispensed\n\t\t\/\/ xxxx = coin type\n\t\tcount := (b >> 4) & 7\n\t\tnominal := self.coinTypeNominal(b & 0xf)\n\t\treturn money.PollItem{Status: money.StatusDispensed, DataNominal: nominal, DataCount: count}, true\n\t}\n\tif b&0x7f == b { \/\/ Coins Deposited\n\t\t\/\/ b=01yyxxxx b2=number of coins in tube\n\t\t\/\/ yy = coin routing\n\t\t\/\/ xxxx = coin type\n\t\trouting := (b >> 4) & 3\n\t\tif routing > 3 {\n\t\t\tpanic(\"code error\")\n\t\t}\n\t\tnominal := self.coinTypeNominal(b & 0xf)\n\t\treturn money.PollItem{Status: money.StatusCredit, DataNominal: nominal, DataCount: 1}, true\n\t}\n\tif b&0x3f == b { \/\/ Slug count\n\t\tslugs := b & 0x1f\n\t\tlog.Printf(\"Number of slugs: %d\", slugs)\n\t\treturn money.PollItem{Status: money.StatusInfo, Error: ErrSlugs, DataCount: slugs}, false\n\t}\n\n\terr := fmt.Errorf(\"parsePollItem unknown=%x\", b)\n\tlog.Print(err)\n\treturn money.PollItem{Status: money.StatusFatal, Error: err}, false\n}\n<commit_msg>double arrival is not error<commit_after>package coin\n\nimport (\n\t\"context\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/temoto\/alive\"\n\t\"github.com\/temoto\/vender\/currency\"\n\t\"github.com\/temoto\/vender\/hardware\/mdb\"\n\t\"github.com\/temoto\/vender\/hardware\/money\"\n)\n\nconst (\n\tcoinTypeCount = 16\n\n\tDelayErr = 500 * time.Millisecond\n\tDelayNext = 200 * time.Millisecond\n\n\tRouteCashBox = 0\n\tRouteTubes = 1\n\tRouteNotUsed = 2\n\tRouteReject = 3\n)\n\n\/\/go:generate stringer -type=Features\ntype Features uint32\n\nconst (\n\tFeatureAlternativePayout Features = 1 << iota\n\tFeatureExtendedDiagnostic\n\tFeatureControlledManualFillPayout\n\tFeatureFTL\n)\n\ntype CoinAcceptor struct {\n\tmdb mdb.Mdber\n\tbyteOrder binary.ByteOrder\n\n\t\/\/ Indicates the value of the bill types 0 to 15.\n\t\/\/ These are final values including all scaling factors.\n\tcoinTypeCredit []currency.Nominal\n\n\tfeatureLevel uint8\n\tsupportedFeatures Features\n\n\tinternalScalingFactor int\n\tbatch sync.Mutex\n\tready chan struct{}\n}\n\nvar (\n\tpacketReset = mdb.PacketFromHex(\"08\")\n\tpacketSetup = mdb.PacketFromHex(\"09\")\n\tpacketTubeStatus = mdb.PacketFromHex(\"0a\")\n\tpacketPoll = mdb.PacketFromHex(\"0b\")\n\tpacketExpIdent = mdb.PacketFromHex(\"0f00\")\n)\n\nvar (\n\tErrNoCredit = fmt.Errorf(\"No Credit\")\n\tErrDoubleArrival = fmt.Errorf(\"Double Arrival\")\n\tErrCoinRouting = fmt.Errorf(\"Coin Routing\")\n\tErrCoinJam = fmt.Errorf(\"Coin Jam\")\n\tErrSlugs = fmt.Errorf(\"Slugs\")\n)\n\n\/\/ usage: defer coin.Batch()()\nfunc (self *CoinAcceptor) Batch() func() {\n\tself.batch.Lock()\n\treturn self.batch.Unlock\n}\n\nfunc (self *CoinAcceptor) Init(ctx context.Context, mdber mdb.Mdber) error {\n\t\/\/ TODO read config\n\tself.byteOrder = binary.BigEndian\n\tself.coinTypeCredit = make([]currency.Nominal, coinTypeCount)\n\tself.mdb = mdber\n\tself.internalScalingFactor = 1 \/\/ FIXME\n\tself.ready = make(chan struct{})\n\t\/\/ TODO maybe execute CommandReset?\n\terr := self.InitSequence()\n\tif err != nil {\n\t\tlog.Printf(\"hardware\/mdb\/coin\/InitSequence error=%s\", err)\n\t\t\/\/ TODO maybe execute CommandReset?\n\t}\n\treturn err\n}\n\nfunc (self *CoinAcceptor) SupportedNominals() []currency.Nominal {\n\tns := make([]currency.Nominal, 0, len(self.coinTypeCredit))\n\tfor _, n := range self.coinTypeCredit {\n\t\tif n > 0 {\n\t\t\tns = append(ns, n)\n\t\t}\n\t}\n\treturn ns\n}\n\nfunc (self *CoinAcceptor) Run(ctx context.Context, a *alive.Alive, ch chan<- money.PollResult) {\n\tstopch := a.StopChan()\n\tfor a.IsRunning() {\n\t\tpr := self.CommandPoll()\n\t\tselect {\n\t\tcase ch <- pr:\n\t\tcase <-stopch:\n\t\t\treturn\n\t\t}\n\t\tselect {\n\t\tcase <-time.After(pr.Delay):\n\t\tcase <-stopch:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (self *CoinAcceptor) InitSequence() error {\n\tdefer self.Batch()()\n\n\terr := self.CommandSetup()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err = self.CommandExpansionIdentification(); err != nil {\n\t\treturn err\n\t}\n\tif err = self.CommandFeatureEnable(FeatureExtendedDiagnostic); err != nil {\n\t\treturn err\n\t}\n\tif err = self.CommandExpansionSendDiagStatus(); err != nil {\n\t\treturn err\n\t}\n\tif err = self.CommandTubeStatus(); err != nil {\n\t\treturn err\n\t}\n\t\/\/ TODO read config\n\tif err = self.CommandCoinType(0xffff, 0xffff); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (self *CoinAcceptor) CommandReset() error {\n\tresponse := new(mdb.Packet)\n\treturn self.mdb.Tx(packetReset, response)\n}\n\nfunc (self *CoinAcceptor) CommandSetup() error {\n\tconst expectLengthMin = 7\n\trequest := packetSetup\n\tresponse := new(mdb.Packet)\n\terr := self.mdb.Tx(request, response)\n\tif err != nil {\n\t\tlog.Printf(\"mdb request=%s err=%v\", request.Format(), err)\n\t\treturn err\n\t}\n\tlog.Printf(\"setup response=(%d)%s\", response.Len(), response.Format())\n\tbs := response.Bytes()\n\tif len(bs) < expectLengthMin {\n\t\treturn fmt.Errorf(\"hardware\/mdb\/coin SETUP response=%s expected >= %d bytes\", response.Format(), expectLengthMin)\n\t}\n\tscalingFactor := bs[3]\n\tfor i, sf := range bs[7:] {\n\t\tn := currency.Nominal(sf) * currency.Nominal(scalingFactor) * currency.Nominal(self.internalScalingFactor)\n\t\tlog.Printf(\"i=%d sf=%d nominal=%s\", i, sf, currency.Amount(n).Format100I())\n\t\tself.coinTypeCredit[i] = n\n\t}\n\tself.featureLevel = bs[0]\n\tlog.Printf(\"Changer Feature Level: %d\", self.featureLevel)\n\tlog.Printf(\"Country \/ Currency Code: %x\", bs[1:3])\n\tlog.Printf(\"Coin Scaling Factor: %d\", scalingFactor)\n\tlog.Printf(\"Decimal Places: %d\", bs[4])\n\tlog.Printf(\"Coin Type Routing: %d\", self.byteOrder.Uint16(bs[5:7]))\n\tlog.Printf(\"Coin Type Credit: %x %#v\", bs[7:], self.coinTypeCredit)\n\treturn nil\n}\n\nfunc (self *CoinAcceptor) CommandPoll() (result money.PollResult) {\n\tdefer func() {\n\t\tif result.Ready() {\n\t\t\tselect {\n\t\t\tcase self.ready <- struct{}{}:\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t}()\n\n\tnow := time.Now()\n\tresponse := new(mdb.Packet)\n\tsavedebug := self.mdb.SetDebug(false)\n\terr := self.mdb.Tx(packetPoll, response)\n\tself.mdb.SetDebug(savedebug)\n\tresult.Time = now\n\tresult.Delay = DelayNext\n\tif err != nil {\n\t\tresult.Error = err\n\t\tresult.Delay = DelayErr\n\t\treturn result\n\t}\n\tif response.Len() == 0 {\n\t\treturn result\n\t}\n\tresult.Items = make([]money.PollItem, 0, response.Len())\n\tlog.Printf(\"poll response=%s\", response.Format())\n\tbs := response.Bytes()\n\tpi := money.PollItem{}\n\tskip := false\n\tfor i, b := range bs {\n\t\tif skip {\n\t\t\tskip = false\n\t\t\tcontinue\n\t\t}\n\t\tb2 := byte(0)\n\t\tif i+1 < len(bs) {\n\t\t\tb2 = bs[i+1]\n\t\t}\n\t\tpi, skip = self.parsePollItem(b, b2)\n\t\tresult.Items = append(result.Items, pi)\n\t}\n\treturn result\n}\n\nfunc (self *CoinAcceptor) CommandTubeStatus() error {\n\tconst expectLengthMin = 2\n\trequest := packetTubeStatus\n\tresponse := new(mdb.Packet)\n\terr := self.mdb.Tx(request, response)\n\tif err != nil {\n\t\tlog.Printf(\"mdb request=%s err=%v\", request.Format(), err)\n\t\treturn err\n\t}\n\tlog.Printf(\"tubestatus response=(%d)%s\", response.Len(), response.Format())\n\tbs := response.Bytes()\n\tif len(bs) < expectLengthMin {\n\t\treturn fmt.Errorf(\"hardware\/mdb\/coin TUBE money.Status response=%s expected >= %d bytes\", response.Format(), expectLengthMin)\n\t}\n\tfull := self.byteOrder.Uint16(bs[0:2])\n\tcounts := bs[2:18]\n\tlog.Printf(\"tubestatus full=%b counts=%v\", full, counts)\n\t\/\/ TODO use full,counts\n\t_ = full\n\t_ = counts\n\treturn nil\n}\n\nfunc (self *CoinAcceptor) CommandCoinType(accept, dispense uint16) error {\n\tbuf := [5]byte{0x0c}\n\tself.byteOrder.PutUint16(buf[1:], accept)\n\tself.byteOrder.PutUint16(buf[3:], dispense)\n\trequest := mdb.PacketFromBytes(buf[:])\n\tresponse := new(mdb.Packet)\n\terr := self.mdb.Tx(request, response)\n\tif err != nil {\n\t\tlog.Printf(\"mdb request=%s err=%v\", request.Format(), err)\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (self *CoinAcceptor) CommandDispense(nominal currency.Nominal, count uint8) error {\n\tif count >= 16 {\n\t\treturn fmt.Errorf(\"CommandDispense count=%d overflow >=16\", count)\n\t}\n\tcoinType, err := self.nominalCoinType(nominal)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresponse := new(mdb.Packet)\n\trequest := mdb.PacketFromBytes([]byte{0x0d, (count << 4) + coinType})\n\t<-self.ready\n\terr = self.mdb.Tx(request, response)\n\tif err != nil {\n\t\tlog.Printf(\"mdb request=%s err=%v\", request.Format(), err)\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (self *CoinAcceptor) CommandExpansionIdentification() error {\n\tconst expectLength = 33\n\trequest := packetExpIdent\n\tresponse := new(mdb.Packet)\n\terr := self.mdb.Tx(request, response)\n\tif err != nil {\n\t\tlog.Printf(\"mdb request=%s err=%v\", request.Format(), err)\n\t\treturn err\n\t}\n\tlog.Printf(\"setup response=(%d)%s\", response.Len(), response.Format())\n\tbs := response.Bytes()\n\tif len(bs) < expectLength {\n\t\treturn fmt.Errorf(\"hardware\/mdb\/coin EXPANSION IDENTIFICATION response=%s expected %d bytes\", response.Format(), expectLength)\n\t}\n\tself.supportedFeatures = Features(self.byteOrder.Uint32(bs[29:]))\n\tlog.Printf(\"Supported features: %b\", self.supportedFeatures)\n\treturn nil\n}\n\nfunc (self *CoinAcceptor) CommandFeatureEnable(requested Features) error {\n\tf := requested & self.supportedFeatures\n\tbuf := [6]byte{0x0f, 0x01}\n\tself.byteOrder.PutUint32(buf[2:], uint32(f))\n\trequest := mdb.PacketFromBytes(buf[:])\n\tresponse := new(mdb.Packet)\n\terr := self.mdb.Tx(request, response)\n\tif err != nil {\n\t\tlog.Printf(\"mdb request=%s err=%v\", request.Format(), err)\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (self *CoinAcceptor) CommandExpansionSendDiagStatus() error {\n\tif self.supportedFeatures&FeatureExtendedDiagnostic == 0 {\n\t\tlog.Printf(\"CommandExpansionSendDiagStatus feature is not supported\")\n\t\treturn nil\n\t}\n\tself.mdb.TxDebug(mdb.PacketFromHex(\"0f05\"), true) \/\/ 0f05 EXPANSION SEND DIAG money.Status\n\treturn nil\n}\n\nfunc (self *CoinAcceptor) coinTypeNominal(b byte) currency.Nominal {\n\tif b >= coinTypeCount {\n\t\tlog.Printf(\"invalid coin type: %d\", b)\n\t\treturn 0\n\t}\n\treturn self.coinTypeCredit[b]\n}\n\nfunc (self *CoinAcceptor) nominalCoinType(nominal currency.Nominal) (byte, error) {\n\tfor ct, n := range self.coinTypeCredit {\n\t\tif n == nominal {\n\t\t\treturn byte(ct), nil\n\t\t}\n\t}\n\treturn 0, fmt.Errorf(\"Unknown nominal %s\", currency.Amount(nominal).Format100I())\n}\n\nfunc (self *CoinAcceptor) parsePollItem(b, b2 byte) (money.PollItem, bool) {\n\tswitch b {\n\tcase 0x01: \/\/ Escrow request\n\t\treturn money.PollItem{Status: money.StatusReturnRequest}, false\n\tcase 0x02: \/\/ Changer Payout Busy\n\t\treturn money.PollItem{Status: money.StatusBusy}, false\n\tcase 0x03: \/\/ No Credit\n\t\treturn money.PollItem{Status: money.StatusError, Error: ErrNoCredit}, false\n\tcase 0x04: \/\/ Defective Tube Sensor\n\t\treturn money.PollItem{Status: money.StatusFatal, Error: money.ErrSensor}, false\n\tcase 0x05: \/\/ Double Arrival\n\t\treturn money.PollItem{Status: money.StatusInfo, Error: ErrDoubleArrival}, false\n\tcase 0x06: \/\/ Acceptor Unplugged\n\t\treturn money.PollItem{Status: money.StatusFatal, Error: money.ErrNoStorage}, false\n\tcase 0x07: \/\/ Tube Jam\n\t\treturn money.PollItem{Status: money.StatusFatal, Error: money.ErrJam}, false\n\tcase 0x08: \/\/ ROM checksum error\n\t\treturn money.PollItem{Status: money.StatusFatal, Error: money.ErrROMChecksum}, false\n\tcase 0x09: \/\/ Coin Routing Error\n\t\treturn money.PollItem{Status: money.StatusError, Error: ErrCoinRouting}, false\n\tcase 0x0a: \/\/ Changer Busy\n\t\treturn money.PollItem{Status: money.StatusBusy}, false\n\tcase 0x0b: \/\/ Changer was Reset\n\t\treturn money.PollItem{Status: money.StatusWasReset}, false\n\tcase 0x0c: \/\/ Coin Jam\n\t\treturn money.PollItem{Status: money.StatusFatal, Error: ErrCoinJam}, false\n\tcase 0x0d: \/\/ Possible Credited Coin Removal\n\t\treturn money.PollItem{Status: money.StatusError, Error: money.ErrFraud}, false\n\t}\n\n\tif b&0x80 != 0 { \/\/ Coins Dispensed Manually\n\t\t\/\/ b=1yyyxxxx b2=number of coins in tube\n\t\t\/\/ yyy = coins dispensed\n\t\t\/\/ xxxx = coin type\n\t\tcount := (b >> 4) & 7\n\t\tnominal := self.coinTypeNominal(b & 0xf)\n\t\treturn money.PollItem{Status: money.StatusDispensed, DataNominal: nominal, DataCount: count}, true\n\t}\n\tif b&0x7f == b { \/\/ Coins Deposited\n\t\t\/\/ b=01yyxxxx b2=number of coins in tube\n\t\t\/\/ yy = coin routing\n\t\t\/\/ xxxx = coin type\n\t\trouting := (b >> 4) & 3\n\t\tif routing > 3 {\n\t\t\tpanic(\"code error\")\n\t\t}\n\t\tnominal := self.coinTypeNominal(b & 0xf)\n\t\treturn money.PollItem{Status: money.StatusCredit, DataNominal: nominal, DataCount: 1}, true\n\t}\n\tif b&0x3f == b { \/\/ Slug count\n\t\tslugs := b & 0x1f\n\t\tlog.Printf(\"Number of slugs: %d\", slugs)\n\t\treturn money.PollItem{Status: money.StatusInfo, Error: ErrSlugs, DataCount: slugs}, false\n\t}\n\n\terr := fmt.Errorf(\"parsePollItem unknown=%x\", b)\n\tlog.Print(err)\n\treturn money.PollItem{Status: money.StatusFatal, Error: err}, false\n}\n<|endoftext|>"} {"text":"<commit_before>package controller_manager\n\nimport (\n\t\"strings\"\n\t\"time\"\n\n\tg \"github.com\/onsi\/ginkgo\"\n\to \"github.com\/onsi\/gomega\"\n\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\n\texutil \"github.com\/openshift\/origin\/test\/extended\/util\"\n)\n\nfunc waitForServiceAccountToken(client kubernetes.Interface, ns, name string, attempts int, interval time.Duration) (string, error) {\n\tfor i := 0; i <= attempts; i++ {\n\t\ttime.Sleep(interval)\n\t\ttoken, err := getServiceAccountToken(client, ns, name)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif len(token) > 0 {\n\t\t\treturn token, nil\n\t\t}\n\t}\n\treturn \"\", nil\n}\n\nfunc getServiceAccountToken(client kubernetes.Interface, ns, name string) (string, error) {\n\tsecrets, err := client.CoreV1().Secrets(ns).List(metav1.ListOptions{})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor _, secret := range secrets.Items {\n\t\tif secret.Type == corev1.SecretTypeServiceAccountToken && secret.Annotations[corev1.ServiceAccountNameKey] == name {\n\t\t\tsa, err := client.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\tfor _, ref := range sa.Secrets {\n\t\t\t\tif ref.Name == secret.Name {\n\t\t\t\t\treturn string(secret.Data[corev1.ServiceAccountTokenKey]), nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t}\n\n\treturn \"\", nil\n}\n\nvar _ = g.Describe(\"[Feature:OpenShiftControllerManager]\", func() {\n\tdefer g.GinkgoRecover()\n\toc := exutil.NewCLI(\"pull-secrets\", exutil.KubeConfigPath())\n\n\tg.It(\"TestAutomaticCreationOfPullSecrets\", func() {\n\t\tt := g.GinkgoT()\n\n\t\tclusterAdminKubeClient := oc.AdminKubeClient()\n\t\tsaNamespace := oc.Namespace()\n\t\tsaName := \"default\"\n\n\t\t\/\/ Get a service account token\n\t\tsaToken, err := waitForServiceAccountToken(clusterAdminKubeClient, saNamespace, saName, 20, time.Second)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"unexpected error: %v\", err)\n\t\t}\n\t\tif len(saToken) == 0 {\n\t\t\tt.Errorf(\"token was not created\")\n\t\t}\n\n\t\t\/\/ Get the matching dockercfg secret\n\t\t_, saPullSecret, err := waitForServiceAccountPullSecret(clusterAdminKubeClient, saNamespace, saName, 20, time.Second)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"unexpected error: %v\", err)\n\t\t}\n\t\tif len(saPullSecret) == 0 {\n\t\t\tt.Errorf(\"pull secret was not created\")\n\t\t}\n\n\t\timageConfig, err := oc.AdminConfigClient().ConfigV1().Images().Get(\"cluster\", metav1.GetOptions{})\n\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\tif !strings.Contains(saPullSecret, imageConfig.Status.InternalRegistryHostname) {\n\t\t\tt.Errorf(\"missing %q in %v\", imageConfig.Status.InternalRegistryHostname, saPullSecret)\n\t\t}\n\n\t\tif len(imageConfig.Spec.ExternalRegistryHostnames) > 0 {\n\t\t\tif !strings.Contains(saPullSecret, imageConfig.Spec.ExternalRegistryHostnames[0]) {\n\t\t\t\tt.Errorf(\"missing %q in %v\", imageConfig.Spec.ExternalRegistryHostnames[0], saPullSecret)\n\t\t\t}\n\t\t}\n\n\t})\n})\n\nfunc waitForServiceAccountPullSecret(client kubernetes.Interface, ns, name string, attempts int, interval time.Duration) (string, string, error) {\n\tfor i := 0; i <= attempts; i++ {\n\t\ttime.Sleep(interval)\n\t\tsecretName, dockerCfg, err := getServiceAccountPullSecret(client, ns, name)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\tif len(dockerCfg) > 2 {\n\t\t\treturn secretName, dockerCfg, nil\n\t\t}\n\t}\n\treturn \"\", \"\", nil\n}\n\nfunc getServiceAccountPullSecret(client kubernetes.Interface, ns, name string) (string, string, error) {\n\tsecrets, err := client.CoreV1().Secrets(ns).List(metav1.ListOptions{})\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tfor _, secret := range secrets.Items {\n\t\tif secret.Type == corev1.SecretTypeDockercfg && secret.Annotations[corev1.ServiceAccountNameKey] == name {\n\t\t\treturn secret.Name, string(secret.Data[corev1.DockerConfigKey]), nil\n\t\t}\n\t}\n\treturn \"\", \"\", nil\n}\n\nvar _ = g.Describe(\"[Feature:OpenShiftControllerManager]\", func() {\n\tdefer g.GinkgoRecover()\n\toc := exutil.NewCLI(\"pull-secrets\", exutil.KubeConfigPath())\n\n\tg.It(\"TestDockercfgTokenDeletedController\", func() {\n\t\tt := g.GinkgoT()\n\n\t\tclusterAdminKubeClient := oc.AdminKubeClient()\n\t\tsaNamespace := oc.Namespace()\n\n\t\tsa := &corev1.ServiceAccount{\n\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"sa1\", Namespace: saNamespace},\n\t\t}\n\n\t\tsa, err := clusterAdminKubeClient.CoreV1().ServiceAccounts(sa.Namespace).Create(sa)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\n\t\t\/\/ Get the service account dockercfg secret's name\n\t\tdockercfgSecretName, _, err := waitForServiceAccountPullSecret(clusterAdminKubeClient, sa.Namespace, sa.Name, 20, time.Second)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\t\tif len(dockercfgSecretName) == 0 {\n\t\t\tt.Fatal(\"pull secret was not created\")\n\t\t}\n\n\t\t\/\/ Get the matching secret's name\n\t\tdockercfgSecret, err := clusterAdminKubeClient.CoreV1().Secrets(sa.Namespace).Get(dockercfgSecretName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\t\tsecretName := dockercfgSecret.Annotations[\"openshift.io\/token-secret.name\"]\n\t\tif len(secretName) == 0 {\n\t\t\tt.Fatal(\"secret was not created\")\n\t\t}\n\n\t\t\/\/ Delete the service account's secret\n\t\tif err := clusterAdminKubeClient.CoreV1().Secrets(sa.Namespace).Delete(secretName, nil); err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\n\t\t\/\/ Expect the matching dockercfg secret to also be deleted\n\t\tif err := wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) {\n\t\t\t_, err := clusterAdminKubeClient.CoreV1().Secrets(sa.Namespace).Get(\n\t\t\t\tdockercfgSecretName,\n\t\t\t\tmetav1.GetOptions{},\n\t\t\t)\n\t\t\treturn errors.IsNotFound(err), nil\n\t\t}); err != nil {\n\t\t\tt.Fatalf(\"waiting for secret deletion: %v\", err)\n\t\t}\n\t})\n})\n<commit_msg>Bug 1765294: Disable flaking token delete test<commit_after>package controller_manager\n\nimport (\n\t\"strings\"\n\t\"time\"\n\n\tg \"github.com\/onsi\/ginkgo\"\n\to \"github.com\/onsi\/gomega\"\n\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\n\texutil \"github.com\/openshift\/origin\/test\/extended\/util\"\n)\n\nfunc waitForServiceAccountToken(client kubernetes.Interface, ns, name string, attempts int, interval time.Duration) (string, error) {\n\tfor i := 0; i <= attempts; i++ {\n\t\ttime.Sleep(interval)\n\t\ttoken, err := getServiceAccountToken(client, ns, name)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif len(token) > 0 {\n\t\t\treturn token, nil\n\t\t}\n\t}\n\treturn \"\", nil\n}\n\nfunc getServiceAccountToken(client kubernetes.Interface, ns, name string) (string, error) {\n\tsecrets, err := client.CoreV1().Secrets(ns).List(metav1.ListOptions{})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfor _, secret := range secrets.Items {\n\t\tif secret.Type == corev1.SecretTypeServiceAccountToken && secret.Annotations[corev1.ServiceAccountNameKey] == name {\n\t\t\tsa, err := client.CoreV1().ServiceAccounts(ns).Get(name, metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\n\t\t\tfor _, ref := range sa.Secrets {\n\t\t\t\tif ref.Name == secret.Name {\n\t\t\t\t\treturn string(secret.Data[corev1.ServiceAccountTokenKey]), nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t}\n\n\treturn \"\", nil\n}\n\nvar _ = g.Describe(\"[Feature:OpenShiftControllerManager]\", func() {\n\tdefer g.GinkgoRecover()\n\toc := exutil.NewCLI(\"pull-secrets\", exutil.KubeConfigPath())\n\n\tg.It(\"TestAutomaticCreationOfPullSecrets\", func() {\n\t\tt := g.GinkgoT()\n\n\t\tclusterAdminKubeClient := oc.AdminKubeClient()\n\t\tsaNamespace := oc.Namespace()\n\t\tsaName := \"default\"\n\n\t\t\/\/ Get a service account token\n\t\tsaToken, err := waitForServiceAccountToken(clusterAdminKubeClient, saNamespace, saName, 20, time.Second)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"unexpected error: %v\", err)\n\t\t}\n\t\tif len(saToken) == 0 {\n\t\t\tt.Errorf(\"token was not created\")\n\t\t}\n\n\t\t\/\/ Get the matching dockercfg secret\n\t\t_, saPullSecret, err := waitForServiceAccountPullSecret(clusterAdminKubeClient, saNamespace, saName, 20, time.Second)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"unexpected error: %v\", err)\n\t\t}\n\t\tif len(saPullSecret) == 0 {\n\t\t\tt.Errorf(\"pull secret was not created\")\n\t\t}\n\n\t\timageConfig, err := oc.AdminConfigClient().ConfigV1().Images().Get(\"cluster\", metav1.GetOptions{})\n\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\tif !strings.Contains(saPullSecret, imageConfig.Status.InternalRegistryHostname) {\n\t\t\tt.Errorf(\"missing %q in %v\", imageConfig.Status.InternalRegistryHostname, saPullSecret)\n\t\t}\n\n\t\tif len(imageConfig.Spec.ExternalRegistryHostnames) > 0 {\n\t\t\tif !strings.Contains(saPullSecret, imageConfig.Spec.ExternalRegistryHostnames[0]) {\n\t\t\t\tt.Errorf(\"missing %q in %v\", imageConfig.Spec.ExternalRegistryHostnames[0], saPullSecret)\n\t\t\t}\n\t\t}\n\n\t})\n})\n\nfunc waitForServiceAccountPullSecret(client kubernetes.Interface, ns, name string, attempts int, interval time.Duration) (string, string, error) {\n\tfor i := 0; i <= attempts; i++ {\n\t\ttime.Sleep(interval)\n\t\tsecretName, dockerCfg, err := getServiceAccountPullSecret(client, ns, name)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\tif len(dockerCfg) > 2 {\n\t\t\treturn secretName, dockerCfg, nil\n\t\t}\n\t}\n\treturn \"\", \"\", nil\n}\n\nfunc getServiceAccountPullSecret(client kubernetes.Interface, ns, name string) (string, string, error) {\n\tsecrets, err := client.CoreV1().Secrets(ns).List(metav1.ListOptions{})\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tfor _, secret := range secrets.Items {\n\t\tif secret.Type == corev1.SecretTypeDockercfg && secret.Annotations[corev1.ServiceAccountNameKey] == name {\n\t\t\treturn secret.Name, string(secret.Data[corev1.DockerConfigKey]), nil\n\t\t}\n\t}\n\treturn \"\", \"\", nil\n}\n\nvar _ = g.Describe(\"[Feature:OpenShiftControllerManager]\", func() {\n\tdefer g.GinkgoRecover()\n\toc := exutil.NewCLI(\"pull-secrets\", exutil.KubeConfigPath())\n\n\tg.It(\"TestDockercfgTokenDeletedController\", func() {\n\t\tg.Skip(\"Bug 1765294: Pull secrets are not always being deleted when token is deleted. Disabling until root cause is fixed.\")\n\t\tt := g.GinkgoT()\n\n\t\tclusterAdminKubeClient := oc.AdminKubeClient()\n\t\tsaNamespace := oc.Namespace()\n\n\t\tsa := &corev1.ServiceAccount{\n\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"sa1\", Namespace: saNamespace},\n\t\t}\n\n\t\tsa, err := clusterAdminKubeClient.CoreV1().ServiceAccounts(sa.Namespace).Create(sa)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\n\t\t\/\/ Get the service account dockercfg secret's name\n\t\tdockercfgSecretName, _, err := waitForServiceAccountPullSecret(clusterAdminKubeClient, sa.Namespace, sa.Name, 20, time.Second)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\t\tif len(dockercfgSecretName) == 0 {\n\t\t\tt.Fatal(\"pull secret was not created\")\n\t\t}\n\n\t\t\/\/ Get the matching secret's name\n\t\tdockercfgSecret, err := clusterAdminKubeClient.CoreV1().Secrets(sa.Namespace).Get(dockercfgSecretName, metav1.GetOptions{})\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\t\tsecretName := dockercfgSecret.Annotations[\"openshift.io\/token-secret.name\"]\n\t\tif len(secretName) == 0 {\n\t\t\tt.Fatal(\"secret was not created\")\n\t\t}\n\n\t\t\/\/ Delete the service account's secret\n\t\tif err := clusterAdminKubeClient.CoreV1().Secrets(sa.Namespace).Delete(secretName, nil); err != nil {\n\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t}\n\n\t\t\/\/ Expect the matching dockercfg secret to also be deleted\n\t\tif err := wait.Poll(5*time.Second, 5*time.Minute, func() (bool, error) {\n\t\t\t_, err := clusterAdminKubeClient.CoreV1().Secrets(sa.Namespace).Get(\n\t\t\t\tdockercfgSecretName,\n\t\t\t\tmetav1.GetOptions{},\n\t\t\t)\n\t\t\treturn errors.IsNotFound(err), nil\n\t\t}); err != nil {\n\t\t\tt.Fatalf(\"waiting for secret deletion: %v\", err)\n\t\t}\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package pgpkeys\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/go-cleanhttp\"\n\t\"golang.org\/x\/crypto\/openpgp\"\n)\n\nconst (\n\tkbPrefix = \"keybase:\"\n)\n\n\/\/ FetchKeybasePubkeys fetches public keys from Keybase given a set of\n\/\/ usernames, which are derived from correctly formatted input entries. It\n\/\/ doesn't use their client code due to both the API and the fact that it is\n\/\/ considered alpha and probably best not to rely on it. The keys are returned\n\/\/ as base64-encoded strings.\nfunc FetchKeybasePubkeys(input []string) (map[string]string, error) {\n\tclient := cleanhttp.DefaultClient()\n\tif client == nil {\n\t\treturn nil, fmt.Errorf(\"unable to create an http client\")\n\t}\n\n\tif len(input) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tusernames := make([]string, 0, len(input))\n\tfor _, v := range input {\n\t\tif strings.HasPrefix(v, kbPrefix) {\n\t\t\tusernames = append(usernames, strings.TrimPrefix(v, kbPrefix))\n\t\t}\n\t}\n\n\tif len(usernames) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tret := make(map[string]string, len(usernames))\n\turl := fmt.Sprintf(\"https:\/\/keybase.io\/_\/api\/1.0\/user\/lookup.json?usernames=%s&fields=public_keys\", strings.Join(usernames, \",\"))\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\ttype publicKeys struct {\n\t\tPrimary struct {\n\t\t\tBundle string\n\t\t}\n\t}\n\n\ttype them struct {\n\t\tpublicKeys `json:\"public_keys\"`\n\t}\n\n\ttype kbResp struct {\n\t\tStatus struct {\n\t\t\tName string\n\t\t}\n\t\tThem []them\n\t}\n\n\tout := &kbResp{\n\t\tThem: []them{},\n\t}\n\n\tdec := json.NewDecoder(resp.Body)\n\tif err := dec.Decode(out); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif out.Status.Name != \"OK\" {\n\t\treturn nil, fmt.Errorf(\"got non-OK response: %s\", out.Status.Name)\n\t}\n\n\tif len(out.Them) != len(usernames) {\n\t\treturn nil, fmt.Errorf(\"returned keys length does not match number of provided usernames\")\n\t}\n\n\tvar keyReader *bytes.Reader\n\tserializedEntity := bytes.NewBuffer(nil)\n\tfor i, themVal := range out.Them {\n\t\tkeyReader = bytes.NewReader([]byte(themVal.Primary.Bundle))\n\t\tentityList, err := openpgp.ReadArmoredKeyRing(keyReader)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(entityList) != 1 {\n\t\t\treturn nil, fmt.Errorf(\"primary key could not be parsed for user %s\", usernames[i])\n\t\t}\n\t\tif entityList[0] == nil {\n\t\t\treturn nil, fmt.Errorf(\"primary key was nil for user %s\", usernames[i])\n\t\t}\n\n\t\tserializedEntity.Reset()\n\t\terr = entityList[0].Serialize(serializedEntity)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error serializing entity for user %s: %s\", usernames[i], err)\n\t\t}\n\n\t\t\/\/ The API returns values in the same ordering requested, so this should properly match\n\t\tret[kbPrefix+usernames[i]] = base64.StdEncoding.EncodeToString(serializedEntity.Bytes())\n\t}\n\n\treturn ret, nil\n}\n<commit_msg>Add returning which user names could not be looked up<commit_after>package pgpkeys\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/go-cleanhttp\"\n\t\"golang.org\/x\/crypto\/openpgp\"\n)\n\nconst (\n\tkbPrefix = \"keybase:\"\n)\n\n\/\/ FetchKeybasePubkeys fetches public keys from Keybase given a set of\n\/\/ usernames, which are derived from correctly formatted input entries. It\n\/\/ doesn't use their client code due to both the API and the fact that it is\n\/\/ considered alpha and probably best not to rely on it. The keys are returned\n\/\/ as base64-encoded strings.\nfunc FetchKeybasePubkeys(input []string) (map[string]string, error) {\n\tclient := cleanhttp.DefaultClient()\n\tif client == nil {\n\t\treturn nil, fmt.Errorf(\"unable to create an http client\")\n\t}\n\n\tif len(input) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tusernames := make([]string, 0, len(input))\n\tfor _, v := range input {\n\t\tif strings.HasPrefix(v, kbPrefix) {\n\t\t\tusernames = append(usernames, strings.TrimPrefix(v, kbPrefix))\n\t\t}\n\t}\n\n\tif len(usernames) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tret := make(map[string]string, len(usernames))\n\turl := fmt.Sprintf(\"https:\/\/keybase.io\/_\/api\/1.0\/user\/lookup.json?usernames=%s&fields=public_keys\", strings.Join(usernames, \",\"))\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\ttype publicKeys struct {\n\t\tPrimary struct {\n\t\t\tBundle string\n\t\t}\n\t}\n\n\ttype them struct {\n\t\tpublicKeys `json:\"public_keys\"`\n\t}\n\n\ttype kbResp struct {\n\t\tStatus struct {\n\t\t\tName string\n\t\t}\n\t\tThem []them\n\t}\n\n\tout := &kbResp{\n\t\tThem: []them{},\n\t}\n\n\tdec := json.NewDecoder(resp.Body)\n\tif err := dec.Decode(out); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif out.Status.Name != \"OK\" {\n\t\treturn nil, fmt.Errorf(\"got non-OK response: %s\", out.Status.Name)\n\t}\n\n\tmissingNames := make([]string, 0, len(usernames))\n\tvar keyReader *bytes.Reader\n\tserializedEntity := bytes.NewBuffer(nil)\n\tfor i, themVal := range out.Them {\n\t\tif themVal.Primary.Bundle == \"\" {\n\t\t\tmissingNames = append(missingNames, usernames[i])\n\t\t\tcontinue\n\t\t}\n\t\tkeyReader = bytes.NewReader([]byte(themVal.Primary.Bundle))\n\t\tentityList, err := openpgp.ReadArmoredKeyRing(keyReader)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(entityList) != 1 {\n\t\t\treturn nil, fmt.Errorf(\"primary key could not be parsed for user %s\", usernames[i])\n\t\t}\n\t\tif entityList[0] == nil {\n\t\t\treturn nil, fmt.Errorf(\"primary key was nil for user %s\", usernames[i])\n\t\t}\n\n\t\tserializedEntity.Reset()\n\t\terr = entityList[0].Serialize(serializedEntity)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error serializing entity for user %s: %s\", usernames[i], err)\n\t\t}\n\n\t\t\/\/ The API returns values in the same ordering requested, so this should properly match\n\t\tret[kbPrefix+usernames[i]] = base64.StdEncoding.EncodeToString(serializedEntity.Bytes())\n\t}\n\n\tif len(missingNames) > 0 {\n\t\treturn nil, fmt.Errorf(\"unable to fetch keys for user(s) %s from keybase\", strings.Join(missingNames, \",\"))\n\t}\n\n\treturn ret, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fann\"\n)\n\nfunc main() {\n\tconst numLayers = 3\n\tann := fann.CreateStandart(numLayers, []uint32{2, 3, 1})\n\tann.Foo()\n}\n<commit_msg>Added reading to simple<commit_after>package main\n\nimport (\n\t\"fann\"\n)\n\nfunc main() {\n\tconst numLayers = 3\n\tconst desiredError = 0.001\n\tconst maxEpochs = 500000\n\tconst epochsBetweenReports = 1000\n\n\tann := fann.CreateStandart(numLayers, []uint32{2, 3, 1})\n\tann.TrainOnFile(\"xor.data\", maxEpochs, epochsBetweenReports, desiredError)\n\tann.Foo()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package fakestore provides a fake in-mem storage.\n\/\/ The implementation is based on the current real implementation using Datastore.\n\/\/ See \/lib\/dsstore\/\n\/\/ TODO: once we have a fake Datastore server, get rid of this.\npackage fakestore\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com\/golang\/protobuf\/proto\" \/* copybara-comment *\/\n\t\"google.golang.org\/grpc\/codes\" \/* copybara-comment *\/\n\t\"google.golang.org\/grpc\/status\" \/* copybara-comment *\/\n\t\"github.com\/GoogleCloudPlatform\/healthcare-federated-access-services\/lib\/storage\" \/* copybara-comment: storage *\/\n)\n\n\/\/ Rev gives the string value of a revision.\nfunc Rev(rev int64) string {\n\tif rev == storage.LatestRev {\n\t\treturn storage.LatestRevName\n\t}\n\treturn fmt.Sprintf(\"%06d\", rev)\n}\n\n\/\/ Store is a fake in-mem store of data.\ntype Store struct {\n\t\/\/ Information map.\n\tInformation map[string]string\n\t\/\/ State is where the data resides.\n\t\/\/ Any block of code that wants to access the state reads it from the chan,\n\t\/\/ performs its operations, and then writes it back to the chan.\n\t\/\/ See https:\/\/bit.ly\/37ANPk4\n\tState chan State\n}\n\n\/\/ New creates a new Store.\nfunc New() *Store {\n\tf := &Store{State: make(chan State, 1)}\n\tstate := State{\n\t\tData: make(Data),\n\t\tHistory: make(Data),\n\t}\n\tf.State <- state\n\treturn f\n}\n\n\/\/ Tx creates a new transaction.\nfunc (s *Store) Tx(update bool) (storage.Tx, error) {\n\treturn NewTx(s.State, update), nil\n}\n\n\/\/ Info returns information about the storage.\nfunc (s *Store) Info() map[string]string {\n\treturn s.Information\n}\n\n\/\/ Exists checks if data item with given key exists.\nfunc (s *Store) Exists(datatype, realm, user, id string, rev int64) (_ bool, ferr error) {\n\tntx, err := s.Tx(false)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer func() {\n\t\terr := ntx.Finish()\n\t\tif ferr == nil {\n\t\t\tferr = err\n\t\t}\n\t}()\n\n\tntx.(*Tx).mu.Lock()\n\tdefer ntx.(*Tx).mu.Unlock()\n\treturn s.exists(datatype, realm, user, id, rev, ntx.(*Tx).state)\n}\n\nfunc (s *Store) exists(datatype, realm, user, id string, rev int64, state State) (bool, error) {\n\tkey := Key{datatype, realm, user, id, Rev(rev)}\n\tif _, ok := state.Data[key]; !ok {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}\n\n\/\/ Read reads a data item of a given key.\nfunc (s *Store) Read(datatype, realm, user, id string, rev int64, content proto.Message) error {\n\treturn s.ReadTx(datatype, realm, user, id, rev, content, nil)\n}\n\n\/\/ ReadTx reads a data item of a given key inside a transaction.\n\/\/ Calls Read if transaction is nil.\nfunc (s *Store) ReadTx(datatype, realm, user, id string, rev int64, content proto.Message, tx storage.Tx) (ferr error) {\n\tntx := tx\n\tif ntx == nil {\n\t\tvar err error\n\t\tntx, err = s.Tx(false)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ We need to call finish for this transaction.\n\t\t\/\/ We need to update the error returned from the function if commit fails.\n\t\tdefer func() {\n\t\t\terr := ntx.Finish()\n\t\t\tif ferr == nil {\n\t\t\t\tferr = err\n\t\t\t}\n\t\t}()\n\t}\n\n\tntx.(*Tx).mu.Lock()\n\tdefer ntx.(*Tx).mu.Unlock()\n\treturn s.read(datatype, realm, user, id, rev, content, ntx.(*Tx).state)\n}\n\nfunc (s *Store) read(datatype, realm, user, id string, rev int64, content proto.Message, state State) error {\n\tkey := Key{datatype, realm, user, id, Rev(rev)}\n\tv, ok := state.Data[key]\n\tif !ok {\n\t\treturn status.Errorf(codes.NotFound, \"not found: %+v rev:%v\", key, rev)\n\t}\n\n\tcontent.Reset()\n\tproto.Merge(content, v)\n\treturn nil\n}\n\n\/\/ MultiReadTx reads a set of items matching the input parameters and filters.\n\/\/ Returns total count and error.\n\/\/\n\/\/ content will contain the items which\n\/\/ their key matches the provided datatype, realm, user (if realm\/user are not \"\")\n\/\/ their value matches the provider filers\n\/\/ Items are sorted by their key's user and id in ascending order.\n\/\/ The type of the item's value should be typ.\n\/\/ Last revision of the items is used.\n\/\/\n\/\/ content's maps are keyed by user and id of the keys.\nfunc (s *Store) MultiReadTx(\n\tdatatype, realm, user string,\n\tfilters [][]storage.Filter,\n\toffset, pageSize int,\n\tcontent map[string]map[string]proto.Message,\n\ttyp proto.Message,\n\ttx storage.Tx,\n) (_ int, ferr error) {\n\tntx := tx\n\tif ntx == nil {\n\t\tvar err error\n\t\tntx, err = s.Tx(false)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tdefer func() {\n\t\t\terr := ntx.Finish()\n\t\t\tif ferr == nil {\n\t\t\t\tferr = err\n\t\t\t}\n\t\t}()\n\t}\n\n\tntx.(*Tx).mu.Lock()\n\tdefer ntx.(*Tx).mu.Unlock()\n\treturn s.multiRead(datatype, realm, user, filters, offset, pageSize, content, typ, ntx.(*Tx).state)\n}\n\nfunc (s *Store) multiRead(\n\tdatatype, realm, user string,\n\tfilters [][]storage.Filter,\n\toffset, pageSize int,\n\tcontent map[string]map[string]proto.Message,\n\ttyp proto.Message,\n\tstate State,\n) (int, error) {\n\tif content == nil {\n\t\treturn 0, status.Error(codes.InvalidArgument, \"content cannot be nil\")\n\t}\n\tif len(content) != 0 {\n\t\treturn 0, status.Error(codes.InvalidArgument, \"content is not empty\")\n\t}\n\n\tvar res KVList\n\tfor k, v := range state.Data {\n\t\tif k.Datatype != datatype {\n\t\t\tcontinue\n\t\t}\n\t\tif k.Realm != \"\" && k.Realm != realm {\n\t\t\tcontinue\n\t\t}\n\t\tif k.User != \"\" && k.User != user {\n\t\t\tcontinue\n\t\t}\n\t\tif k.Rev != storage.LatestRevName {\n\t\t\tcontinue\n\t\t}\n\n\t\tif !storage.MatchProtoFilters(filters, v) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif proto.MessageReflect(v).Descriptor() != proto.MessageReflect(typ).Descriptor() {\n\t\t\treturn 0, status.Errorf(codes.InvalidArgument, \"bad type provided: %T, want %T\", typ, v)\n\t\t}\n\n\t\tres = append(res, KV{k, v})\n\t}\n\n\tsort.Sort(res)\n\n\ti := offset\n\tfor ; i < len(res) && i < offset+pageSize; i++ {\n\t\tk := res[i].K\n\t\tv := res[i].V\n\t\tif _, ok := content[k.User]; !ok {\n\t\t\tcontent[k.User] = make(map[string]proto.Message)\n\t\t}\n\t\tcontent[k.User][k.ID] = proto.Clone(v)\n\t}\n\n\treturn i - offset, nil\n}\n\n\/\/ ReadHistory reads the history of a given key.\nfunc (s *Store) ReadHistory(datatype, realm, user, id string, content *[]proto.Message) error {\n\treturn s.ReadHistoryTx(datatype, realm, user, id, content, nil)\n}\n\n\/\/ ReadHistoryTx reads the history of a given key inside a transaction.\nfunc (s *Store) ReadHistoryTx(datatype, realm, user, id string, content *[]proto.Message, tx storage.Tx) (ferr error) {\n\tntx := tx\n\tif ntx == nil {\n\t\tvar err error\n\t\tntx, err = s.Tx(false)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer func() {\n\t\t\terr := ntx.Finish()\n\t\t\tif ferr == nil {\n\t\t\t\tferr = err\n\t\t\t}\n\t\t}()\n\t}\n\n\tntx.(*Tx).mu.Lock()\n\tdefer ntx.(*Tx).mu.Unlock()\n\treturn s.readHistory(datatype, realm, user, id, content, ntx.(*Tx).state)\n}\n\n\/\/ readHistory reads the history of a given key inside a transaction.\nfunc (s *Store) readHistory(datatype, realm, user, id string, content *[]proto.Message, state State) error {\n\tif content == nil {\n\t\treturn status.Error(codes.NotFound, \"content cannot be nil\")\n\t}\n\tkey := Key{datatype, realm, user, id, \"\"}\n\n\tvar res []proto.Message\n\tfor k, v := range state.History {\n\t\tk.Rev = \"\"\n\t\tif k != key {\n\t\t\tcontinue\n\t\t}\n\t\tres = append(res, proto.Clone(v))\n\t}\n\t*content = res\n\treturn nil\n}\n\n\/\/ Write writes an item.\nfunc (s *Store) Write(datatype, realm, user, id string, rev int64, content proto.Message, history proto.Message) error {\n\treturn s.WriteTx(datatype, realm, user, id, rev, content, history, nil)\n}\n\n\/\/ WriteTx writes an item inside a transaction.\nfunc (s *Store) WriteTx(datatype, realm, user, id string, rev int64, content proto.Message, history proto.Message, tx storage.Tx) (ferr error) {\n\tntx := tx\n\tif ntx == nil {\n\t\tvar err error\n\t\tntx, err = s.Tx(true)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer func() {\n\t\t\terr := ntx.Finish()\n\t\t\tif ferr == nil {\n\t\t\t\tferr = err\n\t\t\t}\n\t\t}()\n\t}\n\n\tntx.(*Tx).mu.Lock()\n\tdefer ntx.(*Tx).mu.Unlock()\n\treturn s.write(datatype, realm, user, id, rev, content, history, ntx.(*Tx).state)\n}\n\nfunc (s *Store) write(datatype, realm, user, id string, rev int64, content proto.Message, history proto.Message, state State) error {\n\tkey := Key{datatype, realm, user, id, Rev(rev)}\n\tstate.Data[key] = proto.Clone(content)\n\tif rev != storage.LatestRev {\n\t\tlatest := key\n\t\tlatest.Rev = Rev(storage.LatestRev)\n\t\tstate.Data[latest] = proto.Clone(content)\n\t}\n\tif history != nil {\n\t\tstate.History[key] = proto.Clone(history)\n\t}\n\treturn nil\n}\n\n\/\/ Delete deletes an item.\nfunc (s *Store) Delete(datatype, realm, user, id string, rev int64) error {\n\treturn s.DeleteTx(datatype, realm, user, id, rev, nil)\n}\n\n\/\/ DeleteTx deletes an item inside a transaction.\nfunc (s *Store) DeleteTx(datatype, realm, user, id string, rev int64, tx storage.Tx) (ferr error) {\n\tntx := tx\n\tif ntx == nil {\n\t\tvar err error\n\t\tntx, err = s.Tx(true)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer func() {\n\t\t\terr := ntx.Finish()\n\t\t\tif ferr == nil {\n\t\t\t\tferr = err\n\t\t\t}\n\t\t}()\n\t}\n\n\tntx.(*Tx).mu.Lock()\n\tdefer ntx.(*Tx).mu.Unlock()\n\treturn s.delete(datatype, realm, user, id, rev, ntx.(*Tx).state)\n}\n\nfunc (s *Store) delete(datatype, realm, user, id string, rev int64, state State) error {\n\tkey := Key{datatype, realm, user, id, Rev(rev)}\n\tdelete(state.Data, key)\n\treturn nil\n}\n\n\/\/ MultiDeleteTx deletes an item inside a transaction.\nfunc (s *Store) MultiDeleteTx(datatype, realm, user string, tx storage.Tx) (ferr error) {\n\tntx := tx\n\tif ntx == nil {\n\t\tvar err error\n\t\tntx, err = s.Tx(true)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer func() {\n\t\t\terr := ntx.Finish()\n\t\t\tif ferr == nil {\n\t\t\t\tferr = err\n\t\t\t}\n\t\t}()\n\t}\n\n\tntx.(*Tx).mu.Lock()\n\tdefer ntx.(*Tx).mu.Unlock()\n\treturn s.multiDelete(datatype, realm, user, ntx.(*Tx).state)\n}\n\nfunc (s *Store) multiDelete(datatype, realm, user string, state State) error {\n\tfor k := range state.Data {\n\t\tif k.Datatype == datatype && k.Realm == realm && k.User == user {\n\t\t\tdelete(state.Data, k)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Wipe clears a realm.\nfunc (s *Store) Wipe(realm string) (ferr error) {\n\tntx, err := s.Tx(true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\terr := ntx.Finish()\n\t\tif ferr == nil {\n\t\t\tferr = err\n\t\t}\n\t}()\n\n\tntx.(*Tx).mu.Lock()\n\tdefer ntx.(*Tx).mu.Unlock()\n\treturn s.wipe(realm, ntx.(*Tx).state)\n}\n\nfunc (s *Store) wipe(realm string, state State) error {\n\tfor k := range state.Data {\n\t\tif k.Realm == realm {\n\t\t\tdelete(state.Data, k)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ LockTx creates a lock with the give name.\nfunc (s *Store) LockTx(lockName string, minFrequency time.Duration, tx storage.Tx) storage.Tx {\n\t\/\/ TODO: not sure about the behavior for this one.\n\treturn tx\n}\n\nvar _ storage.Store = &Store{}\n<commit_msg>fix build<commit_after>\/\/ Copyright 2019 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package fakestore provides a fake in-mem storage.\n\/\/ The implementation is based on the current real implementation using Datastore.\n\/\/ See \/lib\/dsstore\/\n\/\/ TODO: once we have a fake Datastore server, get rid of this.\npackage fakestore\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com\/golang\/protobuf\/proto\" \/* copybara-comment *\/\n\t\"google.golang.org\/grpc\/codes\" \/* copybara-comment *\/\n\t\"google.golang.org\/grpc\/status\" \/* copybara-comment *\/\n\t\"github.com\/GoogleCloudPlatform\/healthcare-federated-access-services\/lib\/storage\" \/* copybara-comment: storage *\/\n)\n\n\/\/ Rev gives the string value of a revision.\nfunc Rev(rev int64) string {\n\tif rev == storage.LatestRev {\n\t\treturn storage.LatestRevName\n\t}\n\treturn fmt.Sprintf(\"%06d\", rev)\n}\n\n\/\/ Store is a fake in-mem store of data.\ntype Store struct {\n\t\/\/ Information map.\n\tInformation map[string]string\n\t\/\/ State is where the data resides.\n\t\/\/ Any block of code that wants to access the state reads it from the chan,\n\t\/\/ performs its operations, and then writes it back to the chan.\n\t\/\/ See https:\/\/bit.ly\/37ANPk4\n\tState chan State\n}\n\n\/\/ New creates a new Store.\nfunc New() *Store {\n\tf := &Store{State: make(chan State, 1)}\n\tstate := State{\n\t\tData: make(Data),\n\t\tHistory: make(Data),\n\t}\n\tf.State <- state\n\treturn f\n}\n\n\/\/ Tx creates a new transaction.\nfunc (s *Store) Tx(update bool) (storage.Tx, error) {\n\treturn NewTx(s.State, update), nil\n}\n\n\/\/ Info returns information about the storage.\nfunc (s *Store) Info() map[string]string {\n\treturn s.Information\n}\n\n\/\/ Exists checks if data item with given key exists.\nfunc (s *Store) Exists(datatype, realm, user, id string, rev int64) (_ bool, ferr error) {\n\tntx, err := s.Tx(false)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer func() {\n\t\terr := ntx.Finish()\n\t\tif ferr == nil {\n\t\t\tferr = err\n\t\t}\n\t}()\n\n\tntx.(*Tx).mu.Lock()\n\tdefer ntx.(*Tx).mu.Unlock()\n\treturn s.exists(datatype, realm, user, id, rev, ntx.(*Tx).state)\n}\n\nfunc (s *Store) exists(datatype, realm, user, id string, rev int64, state State) (bool, error) {\n\tkey := Key{datatype, realm, user, id, Rev(rev)}\n\tif _, ok := state.Data[key]; !ok {\n\t\treturn false, nil\n\t}\n\treturn true, nil\n}\n\n\/\/ Read reads a data item of a given key.\nfunc (s *Store) Read(datatype, realm, user, id string, rev int64, content proto.Message) error {\n\treturn s.ReadTx(datatype, realm, user, id, rev, content, nil)\n}\n\n\/\/ ReadTx reads a data item of a given key inside a transaction.\n\/\/ Calls Read if transaction is nil.\nfunc (s *Store) ReadTx(datatype, realm, user, id string, rev int64, content proto.Message, tx storage.Tx) (ferr error) {\n\tntx := tx\n\tif ntx == nil {\n\t\tvar err error\n\t\tntx, err = s.Tx(false)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ We need to call finish for this transaction.\n\t\t\/\/ We need to update the error returned from the function if commit fails.\n\t\tdefer func() {\n\t\t\terr := ntx.Finish()\n\t\t\tif ferr == nil {\n\t\t\t\tferr = err\n\t\t\t}\n\t\t}()\n\t}\n\n\tntx.(*Tx).mu.Lock()\n\tdefer ntx.(*Tx).mu.Unlock()\n\treturn s.read(datatype, realm, user, id, rev, content, ntx.(*Tx).state)\n}\n\nfunc (s *Store) read(datatype, realm, user, id string, rev int64, content proto.Message, state State) error {\n\tkey := Key{datatype, realm, user, id, Rev(rev)}\n\tv, ok := state.Data[key]\n\tif !ok {\n\t\treturn status.Errorf(codes.NotFound, \"not found: %+v rev:%v\", key, rev)\n\t}\n\n\tcontent.Reset()\n\tproto.Merge(content, v)\n\treturn nil\n}\n\n\/\/ MultiReadTx reads a set of items matching the input parameters and filters.\n\/\/ Returns total count and error.\n\/\/\n\/\/ content will contain the items which\n\/\/ their key matches the provided datatype, realm, user (if realm\/user are not \"\")\n\/\/ their value matches the provider filers\n\/\/ Items are sorted by their key's user and id in ascending order.\n\/\/ The type of the item's value should be typ.\n\/\/ Last revision of the items is used.\n\/\/\n\/\/ content's maps are keyed by user and id of the keys.\nfunc (s *Store) MultiReadTx(\n\tdatatype, realm, user string,\n\tfilters [][]storage.Filter,\n\toffset, pageSize int,\n\tcontent map[string]map[string]proto.Message,\n\ttyp proto.Message,\n\ttx storage.Tx,\n) (_ int, ferr error) {\n\tntx := tx\n\tif ntx == nil {\n\t\tvar err error\n\t\tntx, err = s.Tx(false)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tdefer func() {\n\t\t\terr := ntx.Finish()\n\t\t\tif ferr == nil {\n\t\t\t\tferr = err\n\t\t\t}\n\t\t}()\n\t}\n\n\tntx.(*Tx).mu.Lock()\n\tdefer ntx.(*Tx).mu.Unlock()\n\treturn s.multiRead(datatype, realm, user, filters, offset, pageSize, content, typ, ntx.(*Tx).state)\n}\n\nfunc (s *Store) multiRead(\n\tdatatype, realm, user string,\n\tfilters [][]storage.Filter,\n\toffset, pageSize int,\n\tcontent map[string]map[string]proto.Message,\n\ttyp proto.Message,\n\tstate State,\n) (int, error) {\n\tif content == nil {\n\t\treturn 0, status.Error(codes.InvalidArgument, \"content cannot be nil\")\n\t}\n\tif len(content) != 0 {\n\t\treturn 0, status.Error(codes.InvalidArgument, \"content is not empty\")\n\t}\n\n\tvar res KVList\n\tfor k, v := range state.Data {\n\t\tif k.Datatype != datatype {\n\t\t\tcontinue\n\t\t}\n\t\tif k.Realm != \"\" && k.Realm != realm {\n\t\t\tcontinue\n\t\t}\n\t\tif k.User != \"\" && k.User != user {\n\t\t\tcontinue\n\t\t}\n\t\tif k.Rev != storage.LatestRevName {\n\t\t\tcontinue\n\t\t}\n\n\t\tif !storage.MatchProtoFilters(filters, v) {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ TODO: check the type of v matches the type of typ\n\n\t\tres = append(res, KV{k, v})\n\t}\n\n\tsort.Sort(res)\n\n\ti := offset\n\tfor ; i < len(res) && i < offset+pageSize; i++ {\n\t\tk := res[i].K\n\t\tv := res[i].V\n\t\tif _, ok := content[k.User]; !ok {\n\t\t\tcontent[k.User] = make(map[string]proto.Message)\n\t\t}\n\t\tcontent[k.User][k.ID] = proto.Clone(v)\n\t}\n\n\treturn i - offset, nil\n}\n\n\/\/ ReadHistory reads the history of a given key.\nfunc (s *Store) ReadHistory(datatype, realm, user, id string, content *[]proto.Message) error {\n\treturn s.ReadHistoryTx(datatype, realm, user, id, content, nil)\n}\n\n\/\/ ReadHistoryTx reads the history of a given key inside a transaction.\nfunc (s *Store) ReadHistoryTx(datatype, realm, user, id string, content *[]proto.Message, tx storage.Tx) (ferr error) {\n\tntx := tx\n\tif ntx == nil {\n\t\tvar err error\n\t\tntx, err = s.Tx(false)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer func() {\n\t\t\terr := ntx.Finish()\n\t\t\tif ferr == nil {\n\t\t\t\tferr = err\n\t\t\t}\n\t\t}()\n\t}\n\n\tntx.(*Tx).mu.Lock()\n\tdefer ntx.(*Tx).mu.Unlock()\n\treturn s.readHistory(datatype, realm, user, id, content, ntx.(*Tx).state)\n}\n\n\/\/ readHistory reads the history of a given key inside a transaction.\nfunc (s *Store) readHistory(datatype, realm, user, id string, content *[]proto.Message, state State) error {\n\tif content == nil {\n\t\treturn status.Error(codes.NotFound, \"content cannot be nil\")\n\t}\n\tkey := Key{datatype, realm, user, id, \"\"}\n\n\tvar res []proto.Message\n\tfor k, v := range state.History {\n\t\tk.Rev = \"\"\n\t\tif k != key {\n\t\t\tcontinue\n\t\t}\n\t\tres = append(res, proto.Clone(v))\n\t}\n\t*content = res\n\treturn nil\n}\n\n\/\/ Write writes an item.\nfunc (s *Store) Write(datatype, realm, user, id string, rev int64, content proto.Message, history proto.Message) error {\n\treturn s.WriteTx(datatype, realm, user, id, rev, content, history, nil)\n}\n\n\/\/ WriteTx writes an item inside a transaction.\nfunc (s *Store) WriteTx(datatype, realm, user, id string, rev int64, content proto.Message, history proto.Message, tx storage.Tx) (ferr error) {\n\tntx := tx\n\tif ntx == nil {\n\t\tvar err error\n\t\tntx, err = s.Tx(true)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer func() {\n\t\t\terr := ntx.Finish()\n\t\t\tif ferr == nil {\n\t\t\t\tferr = err\n\t\t\t}\n\t\t}()\n\t}\n\n\tntx.(*Tx).mu.Lock()\n\tdefer ntx.(*Tx).mu.Unlock()\n\treturn s.write(datatype, realm, user, id, rev, content, history, ntx.(*Tx).state)\n}\n\nfunc (s *Store) write(datatype, realm, user, id string, rev int64, content proto.Message, history proto.Message, state State) error {\n\tkey := Key{datatype, realm, user, id, Rev(rev)}\n\tstate.Data[key] = proto.Clone(content)\n\tif rev != storage.LatestRev {\n\t\tlatest := key\n\t\tlatest.Rev = Rev(storage.LatestRev)\n\t\tstate.Data[latest] = proto.Clone(content)\n\t}\n\tif history != nil {\n\t\tstate.History[key] = proto.Clone(history)\n\t}\n\treturn nil\n}\n\n\/\/ Delete deletes an item.\nfunc (s *Store) Delete(datatype, realm, user, id string, rev int64) error {\n\treturn s.DeleteTx(datatype, realm, user, id, rev, nil)\n}\n\n\/\/ DeleteTx deletes an item inside a transaction.\nfunc (s *Store) DeleteTx(datatype, realm, user, id string, rev int64, tx storage.Tx) (ferr error) {\n\tntx := tx\n\tif ntx == nil {\n\t\tvar err error\n\t\tntx, err = s.Tx(true)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer func() {\n\t\t\terr := ntx.Finish()\n\t\t\tif ferr == nil {\n\t\t\t\tferr = err\n\t\t\t}\n\t\t}()\n\t}\n\n\tntx.(*Tx).mu.Lock()\n\tdefer ntx.(*Tx).mu.Unlock()\n\treturn s.delete(datatype, realm, user, id, rev, ntx.(*Tx).state)\n}\n\nfunc (s *Store) delete(datatype, realm, user, id string, rev int64, state State) error {\n\tkey := Key{datatype, realm, user, id, Rev(rev)}\n\tdelete(state.Data, key)\n\treturn nil\n}\n\n\/\/ MultiDeleteTx deletes an item inside a transaction.\nfunc (s *Store) MultiDeleteTx(datatype, realm, user string, tx storage.Tx) (ferr error) {\n\tntx := tx\n\tif ntx == nil {\n\t\tvar err error\n\t\tntx, err = s.Tx(true)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer func() {\n\t\t\terr := ntx.Finish()\n\t\t\tif ferr == nil {\n\t\t\t\tferr = err\n\t\t\t}\n\t\t}()\n\t}\n\n\tntx.(*Tx).mu.Lock()\n\tdefer ntx.(*Tx).mu.Unlock()\n\treturn s.multiDelete(datatype, realm, user, ntx.(*Tx).state)\n}\n\nfunc (s *Store) multiDelete(datatype, realm, user string, state State) error {\n\tfor k := range state.Data {\n\t\tif k.Datatype == datatype && k.Realm == realm && k.User == user {\n\t\t\tdelete(state.Data, k)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Wipe clears a realm.\nfunc (s *Store) Wipe(realm string) (ferr error) {\n\tntx, err := s.Tx(true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() {\n\t\terr := ntx.Finish()\n\t\tif ferr == nil {\n\t\t\tferr = err\n\t\t}\n\t}()\n\n\tntx.(*Tx).mu.Lock()\n\tdefer ntx.(*Tx).mu.Unlock()\n\treturn s.wipe(realm, ntx.(*Tx).state)\n}\n\nfunc (s *Store) wipe(realm string, state State) error {\n\tfor k := range state.Data {\n\t\tif k.Realm == realm {\n\t\t\tdelete(state.Data, k)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ LockTx creates a lock with the give name.\nfunc (s *Store) LockTx(lockName string, minFrequency time.Duration, tx storage.Tx) storage.Tx {\n\t\/\/ TODO: not sure about the behavior for this one.\n\treturn tx\n}\n\nvar _ storage.Store = &Store{}\n<|endoftext|>"} {"text":"<commit_before>package llbsolver\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/containerd\/containerd\/platforms\"\n\t\"github.com\/moby\/buildkit\/cache\"\n\t\"github.com\/moby\/buildkit\/cache\/remotecache\"\n\t\"github.com\/moby\/buildkit\/client\/llb\"\n\t\"github.com\/moby\/buildkit\/executor\"\n\t\"github.com\/moby\/buildkit\/frontend\"\n\tgw \"github.com\/moby\/buildkit\/frontend\/gateway\/client\"\n\t\"github.com\/moby\/buildkit\/identity\"\n\t\"github.com\/moby\/buildkit\/session\"\n\t\"github.com\/moby\/buildkit\/solver\"\n\t\"github.com\/moby\/buildkit\/solver\/pb\"\n\t\"github.com\/moby\/buildkit\/util\/flightcontrol\"\n\t\"github.com\/moby\/buildkit\/util\/tracing\"\n\t\"github.com\/moby\/buildkit\/worker\"\n\tdigest \"github.com\/opencontainers\/go-digest\"\n\tspecs \"github.com\/opencontainers\/image-spec\/specs-go\/v1\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\ntype llbBridge struct {\n\tbuilder solver.Builder\n\tfrontends map[string]frontend.Frontend\n\tresolveWorker func() (worker.Worker, error)\n\teachWorker func(func(worker.Worker) error) error\n\tresolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc\n\tcms map[string]solver.CacheManager\n\tcmsMu sync.Mutex\n\tplatforms []specs.Platform\n\tsm *session.Manager\n}\n\nfunc (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest) (res *frontend.Result, err error) {\n\tw, err := b.resolveWorker()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar cms []solver.CacheManager\n\tfor _, im := range req.CacheImports {\n\t\tb.cmsMu.Lock()\n\t\tvar cm solver.CacheManager\n\t\tcmId := identity.NewID()\n\t\tif im.Type == \"registry\" {\n\t\t\t\/\/ For compatibility with < v0.4.0\n\t\t\tif ref := im.Attrs[\"ref\"]; ref != \"\" {\n\t\t\t\tcmId = ref\n\t\t\t}\n\t\t}\n\t\tif prevCm, ok := b.cms[cmId]; !ok {\n\t\t\tfunc(cmId string, im gw.CacheOptionsEntry) {\n\t\t\t\tcm = newLazyCacheManager(cmId, func() (solver.CacheManager, error) {\n\t\t\t\t\tvar cmNew solver.CacheManager\n\t\t\t\t\tif err := inVertexContext(b.builder.Context(context.TODO()), \"importing cache manifest from \"+cmId, \"\", func(ctx context.Context) error {\n\t\t\t\t\t\tresolveCI, ok := b.resolveCacheImporterFuncs[im.Type]\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\treturn errors.Errorf(\"unknown cache importer: %s\", im.Type)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tci, desc, err := resolveCI(ctx, im.Attrs)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcmNew, err = ci.Resolve(ctx, desc, cmId, w)\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}); err != nil {\n\t\t\t\t\t\tlogrus.Debugf(\"error while importing cache manifest from cmId=%s: %v\", cmId, err)\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\treturn cmNew, nil\n\t\t\t\t})\n\t\t\t}(cmId, im)\n\t\t\tb.cms[cmId] = cm\n\t\t} else {\n\t\t\tcm = prevCm\n\t\t}\n\t\tcms = append(cms, cm)\n\t\tb.cmsMu.Unlock()\n\t}\n\n\tif req.Definition != nil && req.Definition.Def != nil && req.Frontend != \"\" {\n\t\treturn nil, errors.New(\"cannot solve with both Definition and Frontend specified\")\n\t}\n\n\tif req.Definition != nil && req.Definition.Def != nil {\n\t\tent, err := loadEntitlements(b.builder)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdpc := &detectPrunedCacheID{}\n\n\t\tedge, err := Load(req.Definition, dpc.Load, ValidateEntitlements(ent), WithCacheSources(cms), RuntimePlatforms(b.platforms), WithValidateCaps())\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to load LLB\")\n\t\t}\n\n\t\tif len(dpc.ids) > 0 {\n\t\t\tids := make([]string, 0, len(dpc.ids))\n\t\t\tfor id := range dpc.ids {\n\t\t\t\tids = append(ids, id)\n\t\t\t}\n\t\t\tif err := b.eachWorker(func(w worker.Worker) error {\n\t\t\t\treturn w.PruneCacheMounts(ctx, ids)\n\t\t\t}); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tres = &frontend.Result{\n\t\t\tRef: &resultProxy{\n\t\t\t\tdef: req.Definition,\n\t\t\t\tcb: func(ctx context.Context) (solver.CachedResult, error) {\n\t\t\t\t\tres, err := b.builder.Build(ctx, edge)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\twr, ok := res.Sys().(*worker.WorkerRef)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn nil, errors.Errorf(\"invalid reference for exporting: %T\", res.Sys())\n\t\t\t\t\t}\n\t\t\t\t\tif wr.ImmutableRef != nil {\n\t\t\t\t\t\tif err := wr.ImmutableRef.Finalize(ctx, false); err != nil {\n\t\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn res, err\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t} else if req.Frontend != \"\" {\n\t\tf, ok := b.frontends[req.Frontend]\n\t\tif !ok {\n\t\t\treturn nil, errors.Errorf(\"invalid frontend: %s\", req.Frontend)\n\t\t}\n\t\tres, err = f.Solve(ctx, b, req.FrontendOpt)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to solve with frontend %s\", req.Frontend)\n\t\t}\n\t} else {\n\t\treturn &frontend.Result{}, nil\n\t}\n\n\treturn\n}\n\ntype resultProxy struct {\n\tcb func(context.Context) (solver.CachedResult, error)\n\tdef *pb.Definition\n\tg flightcontrol.Group\n\tmu sync.Mutex\n\treleased bool\n\tv solver.CachedResult\n\terr error\n}\n\nfunc (rp *resultProxy) Definition() *pb.Definition {\n\treturn rp.def\n}\n\nfunc (rp *resultProxy) Release(ctx context.Context) error {\n\trp.mu.Lock()\n\tdefer rp.mu.Unlock()\n\tif rp.v != nil {\n\t\tif err := rp.v.Release(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\trp.released = true\n\treturn nil\n}\n\nfunc (rp *resultProxy) Result(ctx context.Context) (solver.CachedResult, error) {\n\tr, err := rp.g.Do(ctx, \"result\", func(ctx context.Context) (interface{}, error) {\n\t\trp.mu.Lock()\n\t\tif rp.released {\n\t\t\trp.mu.Unlock()\n\t\t\treturn nil, errors.Errorf(\"accessing released result\")\n\t\t}\n\t\tif rp.v != nil || rp.err != nil {\n\t\t\trp.mu.Unlock()\n\t\t\treturn rp.v, rp.err\n\t\t}\n\t\trp.mu.Unlock()\n\t\tv, err := rp.cb(ctx)\n\t\tif err != nil {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tif strings.Contains(err.Error(), context.Canceled.Error()) {\n\t\t\t\t\treturn v, err\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t\trp.mu.Lock()\n\t\tif rp.released {\n\t\t\tif v != nil {\n\t\t\t\tv.Release(context.TODO())\n\t\t\t}\n\t\t\trp.mu.Unlock()\n\t\t\treturn nil, errors.Errorf(\"evaluating released result\")\n\t\t}\n\t\trp.v = v\n\t\trp.err = err\n\t\trp.mu.Unlock()\n\t\treturn v, err\n\t})\n\tif r != nil {\n\t\treturn r.(solver.CachedResult), nil\n\t}\n\treturn nil, err\n}\n\nfunc (s *llbBridge) Exec(ctx context.Context, meta executor.Meta, root cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) (err error) {\n\tw, err := s.resolveWorker()\n\tif err != nil {\n\t\treturn err\n\t}\n\tspan, ctx := tracing.StartSpan(ctx, strings.Join(meta.Args, \" \"))\n\terr = w.Exec(ctx, meta, root, stdin, stdout, stderr)\n\ttracing.FinishWithError(span, err)\n\treturn err\n}\n\nfunc (s *llbBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (dgst digest.Digest, config []byte, err error) {\n\tw, err := s.resolveWorker()\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tif opt.LogName == \"\" {\n\t\topt.LogName = fmt.Sprintf(\"resolve image config for %s\", ref)\n\t}\n\tid := ref \/\/ make a deterministic ID for avoiding duplicates\n\tif platform := opt.Platform; platform == nil {\n\t\tid += platforms.Format(platforms.DefaultSpec())\n\t} else {\n\t\tid += platforms.Format(*platform)\n\t}\n\terr = inVertexContext(s.builder.Context(ctx), opt.LogName, id, func(ctx context.Context) error {\n\t\tdgst, config, err = w.ResolveImageConfig(ctx, ref, opt, s.sm)\n\t\treturn err\n\t})\n\treturn dgst, config, err\n}\n\ntype lazyCacheManager struct {\n\tid string\n\tmain solver.CacheManager\n\n\twaitCh chan struct{}\n\terr error\n}\n\nfunc (lcm *lazyCacheManager) ID() string {\n\treturn lcm.id\n}\nfunc (lcm *lazyCacheManager) Query(inp []solver.CacheKeyWithSelector, inputIndex solver.Index, dgst digest.Digest, outputIndex solver.Index) ([]*solver.CacheKey, error) {\n\tlcm.wait()\n\tif lcm.main == nil {\n\t\treturn nil, nil\n\t}\n\treturn lcm.main.Query(inp, inputIndex, dgst, outputIndex)\n}\nfunc (lcm *lazyCacheManager) Records(ck *solver.CacheKey) ([]*solver.CacheRecord, error) {\n\tlcm.wait()\n\tif lcm.main == nil {\n\t\treturn nil, nil\n\t}\n\treturn lcm.main.Records(ck)\n}\nfunc (lcm *lazyCacheManager) Load(ctx context.Context, rec *solver.CacheRecord) (solver.Result, error) {\n\tif err := lcm.wait(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn lcm.main.Load(ctx, rec)\n}\nfunc (lcm *lazyCacheManager) Save(key *solver.CacheKey, s solver.Result, createdAt time.Time) (*solver.ExportableCacheKey, error) {\n\tif err := lcm.wait(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn lcm.main.Save(key, s, createdAt)\n}\n\nfunc (lcm *lazyCacheManager) wait() error {\n\t<-lcm.waitCh\n\treturn lcm.err\n}\n\nfunc newLazyCacheManager(id string, fn func() (solver.CacheManager, error)) solver.CacheManager {\n\tlcm := &lazyCacheManager{id: id, waitCh: make(chan struct{})}\n\tgo func() {\n\t\tdefer close(lcm.waitCh)\n\t\tcm, err := fn()\n\t\tif err != nil {\n\t\t\tlcm.err = err\n\t\t\treturn\n\t\t}\n\t\tlcm.main = cm\n\t}()\n\treturn lcm\n}\n<commit_msg>solver: use correct context for getting cache managers lazily<commit_after>package llbsolver\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/containerd\/containerd\/platforms\"\n\t\"github.com\/mitchellh\/hashstructure\"\n\t\"github.com\/moby\/buildkit\/cache\"\n\t\"github.com\/moby\/buildkit\/cache\/remotecache\"\n\t\"github.com\/moby\/buildkit\/client\/llb\"\n\t\"github.com\/moby\/buildkit\/executor\"\n\t\"github.com\/moby\/buildkit\/frontend\"\n\tgw \"github.com\/moby\/buildkit\/frontend\/gateway\/client\"\n\t\"github.com\/moby\/buildkit\/session\"\n\t\"github.com\/moby\/buildkit\/solver\"\n\t\"github.com\/moby\/buildkit\/solver\/pb\"\n\t\"github.com\/moby\/buildkit\/util\/flightcontrol\"\n\t\"github.com\/moby\/buildkit\/util\/tracing\"\n\t\"github.com\/moby\/buildkit\/worker\"\n\tdigest \"github.com\/opencontainers\/go-digest\"\n\tspecs \"github.com\/opencontainers\/image-spec\/specs-go\/v1\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\ntype llbBridge struct {\n\tbuilder solver.Builder\n\tfrontends map[string]frontend.Frontend\n\tresolveWorker func() (worker.Worker, error)\n\teachWorker func(func(worker.Worker) error) error\n\tresolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc\n\tcms map[string]solver.CacheManager\n\tcmsMu sync.Mutex\n\tplatforms []specs.Platform\n\tsm *session.Manager\n}\n\nfunc (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImports []gw.CacheOptionsEntry) (solver.CachedResult, error) {\n\tw, err := b.resolveWorker()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tent, err := loadEntitlements(b.builder)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar cms []solver.CacheManager\n\tfor _, im := range cacheImports {\n\t\tcmID, err := cmKey(im)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tb.cmsMu.Lock()\n\t\tvar cm solver.CacheManager\n\t\tif prevCm, ok := b.cms[cmID]; !ok {\n\t\t\tfunc(cmID string, im gw.CacheOptionsEntry) {\n\t\t\t\tcm = newLazyCacheManager(cmID, func() (solver.CacheManager, error) {\n\t\t\t\t\tvar cmNew solver.CacheManager\n\t\t\t\t\tif err := inVertexContext(b.builder.Context(context.TODO()), \"importing cache manifest from \"+cmID, \"\", func(ctx context.Context) error {\n\t\t\t\t\t\tresolveCI, ok := b.resolveCacheImporterFuncs[im.Type]\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\treturn errors.Errorf(\"unknown cache importer: %s\", im.Type)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tci, desc, err := resolveCI(ctx, im.Attrs)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcmNew, err = ci.Resolve(ctx, desc, cmID, w)\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}); err != nil {\n\t\t\t\t\t\tlogrus.Debugf(\"error while importing cache manifest from cmId=%s: %v\", cmID, err)\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\treturn cmNew, nil\n\t\t\t\t})\n\t\t\t}(cmID, im)\n\t\t\tb.cms[cmID] = cm\n\t\t} else {\n\t\t\tcm = prevCm\n\t\t}\n\t\tcms = append(cms, cm)\n\t\tb.cmsMu.Unlock()\n\t}\n\tdpc := &detectPrunedCacheID{}\n\n\tedge, err := Load(def, dpc.Load, ValidateEntitlements(ent), WithCacheSources(cms), RuntimePlatforms(b.platforms), WithValidateCaps())\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to load LLB\")\n\t}\n\n\tif len(dpc.ids) > 0 {\n\t\tids := make([]string, 0, len(dpc.ids))\n\t\tfor id := range dpc.ids {\n\t\t\tids = append(ids, id)\n\t\t}\n\t\tif err := b.eachWorker(func(w worker.Worker) error {\n\t\t\treturn w.PruneCacheMounts(ctx, ids)\n\t\t}); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tres, err := b.builder.Build(ctx, edge)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\twr, ok := res.Sys().(*worker.WorkerRef)\n\tif !ok {\n\t\treturn nil, errors.Errorf(\"invalid reference for exporting: %T\", res.Sys())\n\t}\n\tif wr.ImmutableRef != nil {\n\t\tif err := wr.ImmutableRef.Finalize(ctx, false); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn res, err\n}\n\nfunc (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest) (res *frontend.Result, err error) {\n\tif req.Definition != nil && req.Definition.Def != nil && req.Frontend != \"\" {\n\t\treturn nil, errors.New(\"cannot solve with both Definition and Frontend specified\")\n\t}\n\n\tif req.Definition != nil && req.Definition.Def != nil {\n\t\tres = &frontend.Result{Ref: newResultProxy(b, req)}\n\t} else if req.Frontend != \"\" {\n\t\tf, ok := b.frontends[req.Frontend]\n\t\tif !ok {\n\t\t\treturn nil, errors.Errorf(\"invalid frontend: %s\", req.Frontend)\n\t\t}\n\t\tres, err = f.Solve(ctx, b, req.FrontendOpt)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to solve with frontend %s\", req.Frontend)\n\t\t}\n\t} else {\n\t\treturn &frontend.Result{}, nil\n\t}\n\n\treturn\n}\n\ntype resultProxy struct {\n\tcb func(context.Context) (solver.CachedResult, error)\n\tdef *pb.Definition\n\tg flightcontrol.Group\n\tmu sync.Mutex\n\treleased bool\n\tv solver.CachedResult\n\terr error\n}\n\nfunc newResultProxy(b *llbBridge, req frontend.SolveRequest) *resultProxy {\n\treturn &resultProxy{\n\t\tdef: req.Definition,\n\t\tcb: func(ctx context.Context) (solver.CachedResult, error) {\n\t\t\treturn b.loadResult(ctx, req.Definition, req.CacheImports)\n\t\t},\n\t}\n}\n\nfunc (rp *resultProxy) Definition() *pb.Definition {\n\treturn rp.def\n}\n\nfunc (rp *resultProxy) Release(ctx context.Context) error {\n\trp.mu.Lock()\n\tdefer rp.mu.Unlock()\n\tif rp.v != nil {\n\t\tif rp.released {\n\t\t\tlogrus.Warnf(\"release of already released result\")\n\t\t}\n\t\tif err := rp.v.Release(ctx); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\trp.released = true\n\treturn nil\n}\n\nfunc (rp *resultProxy) Result(ctx context.Context) (solver.CachedResult, error) {\n\tr, err := rp.g.Do(ctx, \"result\", func(ctx context.Context) (interface{}, error) {\n\t\trp.mu.Lock()\n\t\tif rp.released {\n\t\t\trp.mu.Unlock()\n\t\t\treturn nil, errors.Errorf(\"accessing released result\")\n\t\t}\n\t\tif rp.v != nil || rp.err != nil {\n\t\t\trp.mu.Unlock()\n\t\t\treturn rp.v, rp.err\n\t\t}\n\t\trp.mu.Unlock()\n\t\tv, err := rp.cb(ctx)\n\t\tif err != nil {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tif strings.Contains(err.Error(), context.Canceled.Error()) {\n\t\t\t\t\treturn v, err\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\t\trp.mu.Lock()\n\t\tif rp.released {\n\t\t\tif v != nil {\n\t\t\t\tv.Release(context.TODO())\n\t\t\t}\n\t\t\trp.mu.Unlock()\n\t\t\treturn nil, errors.Errorf(\"evaluating released result\")\n\t\t}\n\t\trp.v = v\n\t\trp.err = err\n\t\trp.mu.Unlock()\n\t\treturn v, err\n\t})\n\tif r != nil {\n\t\treturn r.(solver.CachedResult), nil\n\t}\n\treturn nil, err\n}\n\nfunc (s *llbBridge) Exec(ctx context.Context, meta executor.Meta, root cache.ImmutableRef, stdin io.ReadCloser, stdout, stderr io.WriteCloser) (err error) {\n\tw, err := s.resolveWorker()\n\tif err != nil {\n\t\treturn err\n\t}\n\tspan, ctx := tracing.StartSpan(ctx, strings.Join(meta.Args, \" \"))\n\terr = w.Exec(ctx, meta, root, stdin, stdout, stderr)\n\ttracing.FinishWithError(span, err)\n\treturn err\n}\n\nfunc (s *llbBridge) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (dgst digest.Digest, config []byte, err error) {\n\tw, err := s.resolveWorker()\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tif opt.LogName == \"\" {\n\t\topt.LogName = fmt.Sprintf(\"resolve image config for %s\", ref)\n\t}\n\tid := ref \/\/ make a deterministic ID for avoiding duplicates\n\tif platform := opt.Platform; platform == nil {\n\t\tid += platforms.Format(platforms.DefaultSpec())\n\t} else {\n\t\tid += platforms.Format(*platform)\n\t}\n\terr = inVertexContext(s.builder.Context(ctx), opt.LogName, id, func(ctx context.Context) error {\n\t\tdgst, config, err = w.ResolveImageConfig(ctx, ref, opt, s.sm)\n\t\treturn err\n\t})\n\treturn dgst, config, err\n}\n\ntype lazyCacheManager struct {\n\tid string\n\tmain solver.CacheManager\n\n\twaitCh chan struct{}\n\terr error\n}\n\nfunc (lcm *lazyCacheManager) ID() string {\n\treturn lcm.id\n}\nfunc (lcm *lazyCacheManager) Query(inp []solver.CacheKeyWithSelector, inputIndex solver.Index, dgst digest.Digest, outputIndex solver.Index) ([]*solver.CacheKey, error) {\n\tlcm.wait()\n\tif lcm.main == nil {\n\t\treturn nil, nil\n\t}\n\treturn lcm.main.Query(inp, inputIndex, dgst, outputIndex)\n}\nfunc (lcm *lazyCacheManager) Records(ck *solver.CacheKey) ([]*solver.CacheRecord, error) {\n\tlcm.wait()\n\tif lcm.main == nil {\n\t\treturn nil, nil\n\t}\n\treturn lcm.main.Records(ck)\n}\nfunc (lcm *lazyCacheManager) Load(ctx context.Context, rec *solver.CacheRecord) (solver.Result, error) {\n\tif err := lcm.wait(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn lcm.main.Load(ctx, rec)\n}\nfunc (lcm *lazyCacheManager) Save(key *solver.CacheKey, s solver.Result, createdAt time.Time) (*solver.ExportableCacheKey, error) {\n\tif err := lcm.wait(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn lcm.main.Save(key, s, createdAt)\n}\n\nfunc (lcm *lazyCacheManager) wait() error {\n\t<-lcm.waitCh\n\treturn lcm.err\n}\n\nfunc newLazyCacheManager(id string, fn func() (solver.CacheManager, error)) solver.CacheManager {\n\tlcm := &lazyCacheManager{id: id, waitCh: make(chan struct{})}\n\tgo func() {\n\t\tdefer close(lcm.waitCh)\n\t\tcm, err := fn()\n\t\tif err != nil {\n\t\t\tlcm.err = err\n\t\t\treturn\n\t\t}\n\t\tlcm.main = cm\n\t}()\n\treturn lcm\n}\n\nfunc cmKey(im gw.CacheOptionsEntry) (string, error) {\n\tif im.Type == \"registry\" && im.Attrs[\"ref\"] != \"\" {\n\t\treturn im.Attrs[\"ref\"], nil\n\t}\n\ti, err := hashstructure.Hash(im, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn fmt.Sprintf(\"%s:%d\", im.Type, i), nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux\n\npackage fs2\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/opencontainers\/runc\/libcontainer\/cgroups\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/configs\"\n\t\"github.com\/pkg\/errors\"\n)\n\ntype manager struct {\n\tconfig *configs.Cgroup\n\t\/\/ dirPath is like \"\/sys\/fs\/cgroup\/user.slice\/user-1001.slice\/session-1.scope\"\n\tdirPath string\n\t\/\/ controllers is content of \"cgroup.controllers\" file.\n\t\/\/ excludes pseudo-controllers (\"devices\" and \"freezer\").\n\tcontrollers map[string]struct{}\n\trootless bool\n}\n\n\/\/ NewManager creates a manager for cgroup v2 unified hierarchy.\n\/\/ dirPath is like \"\/sys\/fs\/cgroup\/user.slice\/user-1001.slice\/session-1.scope\".\n\/\/ If dirPath is empty, it is automatically set using config.\nfunc NewManager(config *configs.Cgroup, dirPath string, rootless bool) (cgroups.Manager, error) {\n\tif config == nil {\n\t\tconfig = &configs.Cgroup{}\n\t}\n\tif dirPath == \"\" {\n\t\tvar err error\n\t\tdirPath, err = defaultDirPath(config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tm := &manager{\n\t\tconfig: config,\n\t\tdirPath: dirPath,\n\t\trootless: rootless,\n\t}\n\treturn m, nil\n}\n\nfunc (m *manager) getControllers() error {\n\tif m.controllers != nil {\n\t\treturn nil\n\t}\n\n\tfile := filepath.Join(m.dirPath, \"cgroup.controllers\")\n\tdata, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\tif m.rootless && m.config.Path == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tfields := strings.Fields(string(data))\n\tm.controllers = make(map[string]struct{}, len(fields))\n\tfor _, c := range fields {\n\t\tm.controllers[c] = struct{}{}\n\t}\n\n\treturn nil\n}\n\nfunc (m *manager) Apply(pid int) error {\n\tif err := CreateCgroupPath(m.dirPath, m.config); err != nil {\n\t\t\/\/ Related tests:\n\t\t\/\/ - \"runc create (no limits + no cgrouppath + no permission) succeeds\"\n\t\t\/\/ - \"runc create (rootless + no limits + cgrouppath + no permission) fails with permission error\"\n\t\t\/\/ - \"runc create (rootless + limits + no cgrouppath + no permission) fails with informative error\"\n\t\tif m.rootless {\n\t\t\tif m.config.Path == \"\" {\n\t\t\t\tif blNeed, nErr := needAnyControllers(m.config); nErr == nil && !blNeed {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\treturn errors.Wrap(err, \"rootless needs no limits + no cgrouppath when no permission is granted for cgroups\")\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n\tif err := cgroups.WriteCgroupProc(m.dirPath, pid); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (m *manager) GetPids() ([]int, error) {\n\treturn cgroups.GetPids(m.dirPath)\n}\n\nfunc (m *manager) GetAllPids() ([]int, error) {\n\treturn cgroups.GetAllPids(m.dirPath)\n}\n\nfunc (m *manager) GetStats() (*cgroups.Stats, error) {\n\tvar (\n\t\terrs []error\n\t)\n\n\tst := cgroups.NewStats()\n\tif err := m.getControllers(); err != nil {\n\t\treturn st, err\n\t}\n\n\t\/\/ pids (since kernel 4.5)\n\tif _, ok := m.controllers[\"pids\"]; ok {\n\t\tif err := statPids(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t} else {\n\t\tif err := statPidsWithoutController(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ memory (since kernel 4.5)\n\tif _, ok := m.controllers[\"memory\"]; ok {\n\t\tif err := statMemory(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ io (since kernel 4.5)\n\tif _, ok := m.controllers[\"io\"]; ok {\n\t\tif err := statIo(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ cpu (since kernel 4.15)\n\tif _, ok := m.controllers[\"cpu\"]; ok {\n\t\tif err := statCpu(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ hugetlb (since kernel 5.6)\n\tif _, ok := m.controllers[\"hugetlb\"]; ok {\n\t\tif err := statHugeTlb(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\tif len(errs) > 0 && !m.rootless {\n\t\treturn st, errors.Errorf(\"error while statting cgroup v2: %+v\", errs)\n\t}\n\treturn st, nil\n}\n\nfunc (m *manager) Freeze(state configs.FreezerState) error {\n\tif err := setFreezer(m.dirPath, state); err != nil {\n\t\treturn err\n\t}\n\tm.config.Resources.Freezer = state\n\treturn nil\n}\n\n\/\/ removeCgroupPath aims to remove cgroup path recursively\n\/\/ Because there may be subcgroups in it.\nfunc removeCgroupPath(path string) error {\n\tinfos, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\terr = nil\n\t\t}\n\t\treturn err\n\t}\n\tfor _, info := range infos {\n\t\tif info.IsDir() {\n\t\t\t\/\/ We should remove subcgroups dir first\n\t\t\tif err = removeCgroupPath(filepath.Join(path, info.Name())); err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif err == nil {\n\t\terr = os.Remove(path)\n\t\tif os.IsNotExist(err) {\n\t\t\terr = nil\n\t\t}\n\t}\n\treturn err\n}\n\nfunc (m *manager) Destroy() error {\n\treturn removeCgroupPath(m.dirPath)\n}\n\nfunc (m *manager) Path(_ string) string {\n\treturn m.dirPath\n}\n\nfunc (m *manager) Set(container *configs.Config) error {\n\tif container == nil || container.Cgroups == nil {\n\t\treturn nil\n\t}\n\tif err := m.getControllers(); err != nil {\n\t\treturn err\n\t}\n\t\/\/ pids (since kernel 4.5)\n\tif err := setPids(m.dirPath, container.Cgroups); err != nil {\n\t\treturn err\n\t}\n\t\/\/ memory (since kernel 4.5)\n\tif err := setMemory(m.dirPath, container.Cgroups); err != nil {\n\t\treturn err\n\t}\n\t\/\/ io (since kernel 4.5)\n\tif err := setIo(m.dirPath, container.Cgroups); err != nil {\n\t\treturn err\n\t}\n\t\/\/ cpu (since kernel 4.15)\n\tif err := setCpu(m.dirPath, container.Cgroups); err != nil {\n\t\treturn err\n\t}\n\t\/\/ devices (since kernel 4.15, pseudo-controller)\n\t\/\/\n\t\/\/ When m.Rootless is true, errors from the device subsystem are ignored because it is really not expected to work.\n\t\/\/ However, errors from other subsystems are not ignored.\n\t\/\/ see @test \"runc create (rootless + limits + no cgrouppath + no permission) fails with informative error\"\n\tif err := setDevices(m.dirPath, container.Cgroups); err != nil && !m.rootless {\n\t\treturn err\n\t}\n\t\/\/ cpuset (since kernel 5.0)\n\tif err := setCpuset(m.dirPath, container.Cgroups); err != nil {\n\t\treturn err\n\t}\n\t\/\/ hugetlb (since kernel 5.6)\n\tif err := setHugeTlb(m.dirPath, container.Cgroups); err != nil {\n\t\treturn err\n\t}\n\t\/\/ freezer (since kernel 5.2, pseudo-controller)\n\tif err := setFreezer(m.dirPath, container.Cgroups.Freezer); err != nil {\n\t\treturn err\n\t}\n\tm.config = container.Cgroups\n\treturn nil\n}\n\nfunc (m *manager) GetPaths() map[string]string {\n\tpaths := make(map[string]string, 1)\n\tpaths[\"\"] = m.dirPath\n\treturn paths\n}\n\nfunc (m *manager) GetCgroups() (*configs.Cgroup, error) {\n\treturn m.config, nil\n}\n\nfunc (m *manager) GetFreezerState() (configs.FreezerState, error) {\n\treturn getFreezer(m.dirPath)\n}\n<commit_msg>cgroups\/fs2: make removeCgroupPath faster<commit_after>\/\/ +build linux\n\npackage fs2\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/opencontainers\/runc\/libcontainer\/cgroups\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/configs\"\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/sys\/unix\"\n)\n\ntype manager struct {\n\tconfig *configs.Cgroup\n\t\/\/ dirPath is like \"\/sys\/fs\/cgroup\/user.slice\/user-1001.slice\/session-1.scope\"\n\tdirPath string\n\t\/\/ controllers is content of \"cgroup.controllers\" file.\n\t\/\/ excludes pseudo-controllers (\"devices\" and \"freezer\").\n\tcontrollers map[string]struct{}\n\trootless bool\n}\n\n\/\/ NewManager creates a manager for cgroup v2 unified hierarchy.\n\/\/ dirPath is like \"\/sys\/fs\/cgroup\/user.slice\/user-1001.slice\/session-1.scope\".\n\/\/ If dirPath is empty, it is automatically set using config.\nfunc NewManager(config *configs.Cgroup, dirPath string, rootless bool) (cgroups.Manager, error) {\n\tif config == nil {\n\t\tconfig = &configs.Cgroup{}\n\t}\n\tif dirPath == \"\" {\n\t\tvar err error\n\t\tdirPath, err = defaultDirPath(config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tm := &manager{\n\t\tconfig: config,\n\t\tdirPath: dirPath,\n\t\trootless: rootless,\n\t}\n\treturn m, nil\n}\n\nfunc (m *manager) getControllers() error {\n\tif m.controllers != nil {\n\t\treturn nil\n\t}\n\n\tfile := filepath.Join(m.dirPath, \"cgroup.controllers\")\n\tdata, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\tif m.rootless && m.config.Path == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tfields := strings.Fields(string(data))\n\tm.controllers = make(map[string]struct{}, len(fields))\n\tfor _, c := range fields {\n\t\tm.controllers[c] = struct{}{}\n\t}\n\n\treturn nil\n}\n\nfunc (m *manager) Apply(pid int) error {\n\tif err := CreateCgroupPath(m.dirPath, m.config); err != nil {\n\t\t\/\/ Related tests:\n\t\t\/\/ - \"runc create (no limits + no cgrouppath + no permission) succeeds\"\n\t\t\/\/ - \"runc create (rootless + no limits + cgrouppath + no permission) fails with permission error\"\n\t\t\/\/ - \"runc create (rootless + limits + no cgrouppath + no permission) fails with informative error\"\n\t\tif m.rootless {\n\t\t\tif m.config.Path == \"\" {\n\t\t\t\tif blNeed, nErr := needAnyControllers(m.config); nErr == nil && !blNeed {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\treturn errors.Wrap(err, \"rootless needs no limits + no cgrouppath when no permission is granted for cgroups\")\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n\tif err := cgroups.WriteCgroupProc(m.dirPath, pid); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (m *manager) GetPids() ([]int, error) {\n\treturn cgroups.GetPids(m.dirPath)\n}\n\nfunc (m *manager) GetAllPids() ([]int, error) {\n\treturn cgroups.GetAllPids(m.dirPath)\n}\n\nfunc (m *manager) GetStats() (*cgroups.Stats, error) {\n\tvar (\n\t\terrs []error\n\t)\n\n\tst := cgroups.NewStats()\n\tif err := m.getControllers(); err != nil {\n\t\treturn st, err\n\t}\n\n\t\/\/ pids (since kernel 4.5)\n\tif _, ok := m.controllers[\"pids\"]; ok {\n\t\tif err := statPids(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t} else {\n\t\tif err := statPidsWithoutController(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ memory (since kernel 4.5)\n\tif _, ok := m.controllers[\"memory\"]; ok {\n\t\tif err := statMemory(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ io (since kernel 4.5)\n\tif _, ok := m.controllers[\"io\"]; ok {\n\t\tif err := statIo(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ cpu (since kernel 4.15)\n\tif _, ok := m.controllers[\"cpu\"]; ok {\n\t\tif err := statCpu(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\t\/\/ hugetlb (since kernel 5.6)\n\tif _, ok := m.controllers[\"hugetlb\"]; ok {\n\t\tif err := statHugeTlb(m.dirPath, st); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\tif len(errs) > 0 && !m.rootless {\n\t\treturn st, errors.Errorf(\"error while statting cgroup v2: %+v\", errs)\n\t}\n\treturn st, nil\n}\n\nfunc (m *manager) Freeze(state configs.FreezerState) error {\n\tif err := setFreezer(m.dirPath, state); err != nil {\n\t\treturn err\n\t}\n\tm.config.Resources.Freezer = state\n\treturn nil\n}\n\nfunc rmdir(path string) error {\n\terr := unix.Rmdir(path)\n\tif err == nil || err == unix.ENOENT {\n\t\treturn nil\n\t}\n\treturn &os.PathError{Op: \"rmdir\", Path: path, Err: err}\n}\n\n\/\/ removeCgroupPath aims to remove cgroup path recursively\n\/\/ Because there may be subcgroups in it.\nfunc removeCgroupPath(path string) error {\n\t\/\/ try the fast path first\n\tif err := rmdir(path); err == nil {\n\t\treturn nil\n\t}\n\n\tinfos, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\terr = nil\n\t\t}\n\t\treturn err\n\t}\n\tfor _, info := range infos {\n\t\tif info.IsDir() {\n\t\t\t\/\/ We should remove subcgroups dir first\n\t\t\tif err = removeCgroupPath(filepath.Join(path, info.Name())); err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif err == nil {\n\t\terr = rmdir(path)\n\t}\n\treturn err\n}\n\nfunc (m *manager) Destroy() error {\n\treturn removeCgroupPath(m.dirPath)\n}\n\nfunc (m *manager) Path(_ string) string {\n\treturn m.dirPath\n}\n\nfunc (m *manager) Set(container *configs.Config) error {\n\tif container == nil || container.Cgroups == nil {\n\t\treturn nil\n\t}\n\tif err := m.getControllers(); err != nil {\n\t\treturn err\n\t}\n\t\/\/ pids (since kernel 4.5)\n\tif err := setPids(m.dirPath, container.Cgroups); err != nil {\n\t\treturn err\n\t}\n\t\/\/ memory (since kernel 4.5)\n\tif err := setMemory(m.dirPath, container.Cgroups); err != nil {\n\t\treturn err\n\t}\n\t\/\/ io (since kernel 4.5)\n\tif err := setIo(m.dirPath, container.Cgroups); err != nil {\n\t\treturn err\n\t}\n\t\/\/ cpu (since kernel 4.15)\n\tif err := setCpu(m.dirPath, container.Cgroups); err != nil {\n\t\treturn err\n\t}\n\t\/\/ devices (since kernel 4.15, pseudo-controller)\n\t\/\/\n\t\/\/ When m.Rootless is true, errors from the device subsystem are ignored because it is really not expected to work.\n\t\/\/ However, errors from other subsystems are not ignored.\n\t\/\/ see @test \"runc create (rootless + limits + no cgrouppath + no permission) fails with informative error\"\n\tif err := setDevices(m.dirPath, container.Cgroups); err != nil && !m.rootless {\n\t\treturn err\n\t}\n\t\/\/ cpuset (since kernel 5.0)\n\tif err := setCpuset(m.dirPath, container.Cgroups); err != nil {\n\t\treturn err\n\t}\n\t\/\/ hugetlb (since kernel 5.6)\n\tif err := setHugeTlb(m.dirPath, container.Cgroups); err != nil {\n\t\treturn err\n\t}\n\t\/\/ freezer (since kernel 5.2, pseudo-controller)\n\tif err := setFreezer(m.dirPath, container.Cgroups.Freezer); err != nil {\n\t\treturn err\n\t}\n\tm.config = container.Cgroups\n\treturn nil\n}\n\nfunc (m *manager) GetPaths() map[string]string {\n\tpaths := make(map[string]string, 1)\n\tpaths[\"\"] = m.dirPath\n\treturn paths\n}\n\nfunc (m *manager) GetCgroups() (*configs.Cgroup, error) {\n\treturn m.config, nil\n}\n\nfunc (m *manager) GetFreezerState() (configs.FreezerState, error) {\n\treturn getFreezer(m.dirPath)\n}\n<|endoftext|>"} {"text":"<commit_before>package dsdk\n\nimport (\n\t\"context\"\n\t_path \"path\"\n\n\tgreq \"github.com\/levigross\/grequests\"\n)\n\nconst (\n\tProviderAWS = \"AWS S3\"\n\tProviderGoogle = \"Google Cloud\"\n\tProviderS3 = \"S3 Object Store\"\n)\n\ntype RemoteProvider struct {\n\tPath string `json:\"path,omitempty\" mapstructure:\"path\"`\n\tUuid string `json:\"uuid,omitempty\" mapstructure:\"uuid\"`\n\tAccountId string `json:\"account_id,omitempty\" mapstructure:\"account_id\"`\n\tRemoteType string `json:\"remote_type,omitempty\" mapstructure:\"remote_type\"`\n\tLastSeenTimestamp string `json:\"last_seen_timestamp,omitempty\" mapstructure:\"last_seen_timestamp\"`\n\tOperations []map[string]interface{} `json:\"operations,omitempty\" mapstructure:\"operations\"`\n\tSnapshots []*Snapshot `json:\"snapshots,omitempty\" mapstructure:\"snapshots\"`\n\tLabel string `json:\"label,omitempty\" mapstructure:\"label\"`\n\tStatus string `json:\"status,omitempty\" mapstructure:\"status\"`\n\tHost string `json:\"host,omitempty\" mapstructure:\"host\"`\n\tPort string `json:\"port,omitempty\" mapstructure:\"port\"`\n\tOperationsEp string\n\tSnapshotsEp *Snapshots\n}\n\nfunc RegisterRemoteProviderEndpoints(rp *RemoteProvider) {\n\t\/\/a.OperationsEp = newOperations(a.Path)\n\trp.SnapshotsEp = newSnapshots(rp.Path)\n}\n\ntype RemoteProviders struct {\n\tPath string\n}\n\ntype RemoteProvidersCreateRequest struct {\n\tCtxt context.Context `json:\"-\"`\n\tProjectName string `json:\"project_name,omitempty\" mapstructure:\"project_name\"`\n\tAccountId string `json:\"account_id,omitempty\" mapstructure:\"account_id\"`\n\tRemoteType string `json:\"remote_type,omitempty\" mapstructure:\"remote_type\"`\n\tPrivateKey string `json:\"private_key,omitempty\" mapstructure:\"private_key\"`\n\tLabel string `json:\"label,omitempty\" mapstructure:\"label\"`\n\tHost string `json:\"host,omitempty\" mapstructure:\"host\"`\n\tPort int `json:\"port,omitempty\" mapstructure:\"port\"`\n\tAccessKey string `json:\"access_key,omitempty\" mapstructure:\"access_key\"`\n\tSecretKey string `json:\"secret_key,omitempty\" mapstructure:\"secret_key\"`\n}\n\nfunc newRemoteProviders(path string) *RemoteProviders {\n\treturn &RemoteProviders{\n\t\tPath: _path.Join(path, \"remote_providers\"),\n\t}\n}\n\nfunc (e *RemoteProviders) Create(ro *RemoteProvidersCreateRequest) (*RemoteProvider, *ApiErrorResponse, error) {\n\tgro := &greq.RequestOptions{JSON: ro}\n\trs, apierr, err := GetConn(ro.Ctxt).Post(ro.Ctxt, e.Path, gro)\n\tif apierr != nil {\n\t\treturn nil, apierr, err\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tresp := &RemoteProvider{}\n\tif err = FillStruct(rs.Data, resp); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tRegisterRemoteProviderEndpoints(resp)\n\treturn resp, nil, nil\n}\n\ntype RemoteProvidersListRequest struct {\n\tCtxt context.Context `json:\"-\"`\n\tParams ListParams `json:\"params,omitempty\"`\n}\n\nfunc (e *RemoteProviders) List(ro *RemoteProvidersListRequest) ([]*RemoteProvider, *ApiErrorResponse, error) {\n\tgro := &greq.RequestOptions{\n\t\tJSON: ro,\n\t\tParams: ro.Params.ToMap()}\n\trs, apierr, err := GetConn(ro.Ctxt).GetList(ro.Ctxt, e.Path, gro)\n\tif apierr != nil {\n\t\treturn nil, apierr, err\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tresp := []*RemoteProvider{}\n\tfor _, data := range rs.Data {\n\t\telem := &RemoteProvider{}\n\t\tadata := data.(map[string]interface{})\n\t\tif err = FillStruct(adata, elem); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tRegisterRemoteProviderEndpoints(elem)\n\t\tresp = append(resp, elem)\n\t}\n\treturn resp, nil, nil\n}\n\ntype RemoteProvidersGetRequest struct {\n\tCtxt context.Context `json:\"-\"`\n\tId string `json:\"-\"`\n}\n\nfunc (e *RemoteProviders) Get(ro *RemoteProvidersGetRequest) (*RemoteProvider, *ApiErrorResponse, error) {\n\tgro := &greq.RequestOptions{JSON: ro}\n\trs, apierr, err := GetConn(ro.Ctxt).Get(ro.Ctxt, _path.Join(e.Path, ro.Id), gro)\n\tif apierr != nil {\n\t\treturn nil, apierr, err\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tresp := &RemoteProvider{}\n\tif err = FillStruct(rs.Data, resp); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tRegisterRemoteProviderEndpoints(resp)\n\treturn resp, nil, nil\n}\n\ntype RemoteProvidersRefreshRequest struct {\n\tCtxt context.Context `json:\"-\"`\n\tUuid string `json:\"-\"`\n}\n\ntype RemoteProvidersRefreshResponse struct {\n\tUuid string `json:\"uuid,omitempty\" mapstructure:\"uuid\"`\n}\n\nfunc (e *RemoteProviders) Refresh(ro *RemoteProvidersRefreshRequest) (*RemoteProvidersRefreshResponse, *ApiErrorResponse, error) {\n\tgro := &greq.RequestOptions{JSON: ro}\n\trs, apierr, err := GetConn(ro.Ctxt).Put(ro.Ctxt, _path.Join(e.Path, ro.Uuid, \"refresh\"), gro)\n\n\tif apierr != nil {\n\t\treturn nil, apierr, err\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp := &RemoteProvidersRefreshResponse{}\n\tif err = FillStruct(rs.Data, resp); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn resp, nil, nil\n}\n\ntype RemoteProviderSetRequest struct {\n\tCtxt context.Context `json:\"-\"`\n\tProjectName string `json:\"project_name,omitempty\" mapstructure:\"project_name\"`\n\tAccountId string `json:\"account_id,omitempty\" mapstructure:\"account_id\"`\n\tPrivateKey string `json:\"private_key,omitempty\" mapstructure:\"private_key\"`\n\tLabel string `json:\"label,omitempty\" mapstructure:\"label\"`\n\tHost string `json:\"host,omitempty\" mapstructure:\"host\"`\n\tPort int `json:\"port,omitempty\" mapstructure:\"port\"`\n\tAccessKey string `json:\"access_key,omitempty\" mapstructure:\"access_key\"`\n\tSecretKey string `json:\"secret_key,omitempty\" mapstructure:\"secret_key\"`\n}\n\nfunc (e *RemoteProvider) Set(ro *RemoteProviderSetRequest) (*RemoteProvider, *ApiErrorResponse, error) {\n\tgro := &greq.RequestOptions{JSON: ro}\n\trs, apierr, err := GetConn(ro.Ctxt).Put(ro.Ctxt, e.Path, gro)\n\tif apierr != nil {\n\t\treturn nil, apierr, err\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tresp := &RemoteProvider{}\n\tif err = FillStruct(rs.Data, resp); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tRegisterRemoteProviderEndpoints(resp)\n\treturn resp, nil, nil\n}\n\ntype RemoteProviderDeleteRequest struct {\n\tCtxt context.Context `json:\"-\"`\n\tForce bool `json:\"force,omitempty\" mapstructure:\"force\"`\n}\n\nfunc (e *RemoteProvider) Delete(ro *RemoteProviderDeleteRequest) (*RemoteProvider, *ApiErrorResponse, error) {\n\trs, apierr, err := GetConn(ro.Ctxt).Delete(ro.Ctxt, e.Path, nil)\n\tif apierr != nil {\n\t\treturn nil, apierr, err\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tresp := &RemoteProvider{}\n\tif err = FillStruct(rs.Data, resp); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tRegisterRemoteProviderEndpoints(resp)\n\treturn resp, nil, nil\n}\n\ntype RemoteProviderAppTemplate struct {\n\tPath string `json:\"path,omitempty\" mapstructure:\"path\"`\n\tResolvedPath string `json:\"resolved_path,omitempty\" mapstructure:\"resolved_path\"`\n\tResolvedTenant string `json:\"resolved_tenant,omitempty\" mapstructure:\"resolved_tenant\"`\n}\n\ntype RemoteProviderReloadRequest struct {\n\tCtxt context.Context `json:\"-\"`\n}\n\nfunc (e *RemoteProvider) Reload(ro *RemoteProviderReloadRequest) (*RemoteProvider, *ApiErrorResponse, error) {\n\tgro := &greq.RequestOptions{JSON: ro}\n\trs, apierr, err := GetConn(ro.Ctxt).Get(ro.Ctxt, e.Path, gro)\n\tif apierr != nil {\n\t\treturn nil, apierr, err\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tresp := &RemoteProvider{}\n\tif err = FillStruct(rs.Data, resp); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tRegisterRemoteProviderEndpoints(resp)\n\treturn resp, nil, nil\n}\n\ntype RemoteProviderOperationsUpdateRequest struct {\n\tCtxt context.Context `json:\"-\"`\n\tOperationId string `json:\"-\"`\n\tAction string `json:\"action\"` \/\/available options are 'clear' and 'abort'\n}\n\nfunc (e *RemoteProvider) CreateOperation(ao *RemoteProviderOperationsUpdateRequest) (*RemoteProvider, *ApiErrorResponse, error) {\n\n\tgro := &greq.RequestOptions{JSON: ao}\n\trs, apierr, err := GetConn(ao.Ctxt).Put(ao.Ctxt, _path.Join(e.Path, \"operations\", ao.OperationId), gro)\n\tif apierr != nil {\n\t\treturn nil, apierr, err\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tresp := &RemoteProvider{}\n\tif err = FillStruct(rs.Data, resp); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tRegisterRemoteProviderEndpoints(resp)\n\treturn resp, nil, nil\n}\n<commit_msg>Change name to UpdateOperation<commit_after>package dsdk\n\nimport (\n\t\"context\"\n\t_path \"path\"\n\n\tgreq \"github.com\/levigross\/grequests\"\n)\n\nconst (\n\tProviderAWS = \"AWS S3\"\n\tProviderGoogle = \"Google Cloud\"\n\tProviderS3 = \"S3 Object Store\"\n)\n\ntype RemoteProvider struct {\n\tPath string `json:\"path,omitempty\" mapstructure:\"path\"`\n\tUuid string `json:\"uuid,omitempty\" mapstructure:\"uuid\"`\n\tAccountId string `json:\"account_id,omitempty\" mapstructure:\"account_id\"`\n\tRemoteType string `json:\"remote_type,omitempty\" mapstructure:\"remote_type\"`\n\tLastSeenTimestamp string `json:\"last_seen_timestamp,omitempty\" mapstructure:\"last_seen_timestamp\"`\n\tOperations []map[string]interface{} `json:\"operations,omitempty\" mapstructure:\"operations\"`\n\tSnapshots []*Snapshot `json:\"snapshots,omitempty\" mapstructure:\"snapshots\"`\n\tLabel string `json:\"label,omitempty\" mapstructure:\"label\"`\n\tStatus string `json:\"status,omitempty\" mapstructure:\"status\"`\n\tHost string `json:\"host,omitempty\" mapstructure:\"host\"`\n\tPort string `json:\"port,omitempty\" mapstructure:\"port\"`\n\tOperationsEp string\n\tSnapshotsEp *Snapshots\n}\n\nfunc RegisterRemoteProviderEndpoints(rp *RemoteProvider) {\n\t\/\/a.OperationsEp = newOperations(a.Path)\n\trp.SnapshotsEp = newSnapshots(rp.Path)\n}\n\ntype RemoteProviders struct {\n\tPath string\n}\n\ntype RemoteProvidersCreateRequest struct {\n\tCtxt context.Context `json:\"-\"`\n\tProjectName string `json:\"project_name,omitempty\" mapstructure:\"project_name\"`\n\tAccountId string `json:\"account_id,omitempty\" mapstructure:\"account_id\"`\n\tRemoteType string `json:\"remote_type,omitempty\" mapstructure:\"remote_type\"`\n\tPrivateKey string `json:\"private_key,omitempty\" mapstructure:\"private_key\"`\n\tLabel string `json:\"label,omitempty\" mapstructure:\"label\"`\n\tHost string `json:\"host,omitempty\" mapstructure:\"host\"`\n\tPort int `json:\"port,omitempty\" mapstructure:\"port\"`\n\tAccessKey string `json:\"access_key,omitempty\" mapstructure:\"access_key\"`\n\tSecretKey string `json:\"secret_key,omitempty\" mapstructure:\"secret_key\"`\n}\n\nfunc newRemoteProviders(path string) *RemoteProviders {\n\treturn &RemoteProviders{\n\t\tPath: _path.Join(path, \"remote_providers\"),\n\t}\n}\n\nfunc (e *RemoteProviders) Create(ro *RemoteProvidersCreateRequest) (*RemoteProvider, *ApiErrorResponse, error) {\n\tgro := &greq.RequestOptions{JSON: ro}\n\trs, apierr, err := GetConn(ro.Ctxt).Post(ro.Ctxt, e.Path, gro)\n\tif apierr != nil {\n\t\treturn nil, apierr, err\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tresp := &RemoteProvider{}\n\tif err = FillStruct(rs.Data, resp); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tRegisterRemoteProviderEndpoints(resp)\n\treturn resp, nil, nil\n}\n\ntype RemoteProvidersListRequest struct {\n\tCtxt context.Context `json:\"-\"`\n\tParams ListParams `json:\"params,omitempty\"`\n}\n\nfunc (e *RemoteProviders) List(ro *RemoteProvidersListRequest) ([]*RemoteProvider, *ApiErrorResponse, error) {\n\tgro := &greq.RequestOptions{\n\t\tJSON: ro,\n\t\tParams: ro.Params.ToMap()}\n\trs, apierr, err := GetConn(ro.Ctxt).GetList(ro.Ctxt, e.Path, gro)\n\tif apierr != nil {\n\t\treturn nil, apierr, err\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tresp := []*RemoteProvider{}\n\tfor _, data := range rs.Data {\n\t\telem := &RemoteProvider{}\n\t\tadata := data.(map[string]interface{})\n\t\tif err = FillStruct(adata, elem); err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tRegisterRemoteProviderEndpoints(elem)\n\t\tresp = append(resp, elem)\n\t}\n\treturn resp, nil, nil\n}\n\ntype RemoteProvidersGetRequest struct {\n\tCtxt context.Context `json:\"-\"`\n\tId string `json:\"-\"`\n}\n\nfunc (e *RemoteProviders) Get(ro *RemoteProvidersGetRequest) (*RemoteProvider, *ApiErrorResponse, error) {\n\tgro := &greq.RequestOptions{JSON: ro}\n\trs, apierr, err := GetConn(ro.Ctxt).Get(ro.Ctxt, _path.Join(e.Path, ro.Id), gro)\n\tif apierr != nil {\n\t\treturn nil, apierr, err\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tresp := &RemoteProvider{}\n\tif err = FillStruct(rs.Data, resp); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tRegisterRemoteProviderEndpoints(resp)\n\treturn resp, nil, nil\n}\n\ntype RemoteProvidersRefreshRequest struct {\n\tCtxt context.Context `json:\"-\"`\n\tUuid string `json:\"-\"`\n}\n\ntype RemoteProvidersRefreshResponse struct {\n\tUuid string `json:\"uuid,omitempty\" mapstructure:\"uuid\"`\n}\n\nfunc (e *RemoteProviders) Refresh(ro *RemoteProvidersRefreshRequest) (*RemoteProvidersRefreshResponse, *ApiErrorResponse, error) {\n\tgro := &greq.RequestOptions{JSON: ro}\n\trs, apierr, err := GetConn(ro.Ctxt).Put(ro.Ctxt, _path.Join(e.Path, ro.Uuid, \"refresh\"), gro)\n\n\tif apierr != nil {\n\t\treturn nil, apierr, err\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tresp := &RemoteProvidersRefreshResponse{}\n\tif err = FillStruct(rs.Data, resp); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn resp, nil, nil\n}\n\ntype RemoteProviderSetRequest struct {\n\tCtxt context.Context `json:\"-\"`\n\tProjectName string `json:\"project_name,omitempty\" mapstructure:\"project_name\"`\n\tAccountId string `json:\"account_id,omitempty\" mapstructure:\"account_id\"`\n\tPrivateKey string `json:\"private_key,omitempty\" mapstructure:\"private_key\"`\n\tLabel string `json:\"label,omitempty\" mapstructure:\"label\"`\n\tHost string `json:\"host,omitempty\" mapstructure:\"host\"`\n\tPort int `json:\"port,omitempty\" mapstructure:\"port\"`\n\tAccessKey string `json:\"access_key,omitempty\" mapstructure:\"access_key\"`\n\tSecretKey string `json:\"secret_key,omitempty\" mapstructure:\"secret_key\"`\n}\n\nfunc (e *RemoteProvider) Set(ro *RemoteProviderSetRequest) (*RemoteProvider, *ApiErrorResponse, error) {\n\tgro := &greq.RequestOptions{JSON: ro}\n\trs, apierr, err := GetConn(ro.Ctxt).Put(ro.Ctxt, e.Path, gro)\n\tif apierr != nil {\n\t\treturn nil, apierr, err\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tresp := &RemoteProvider{}\n\tif err = FillStruct(rs.Data, resp); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tRegisterRemoteProviderEndpoints(resp)\n\treturn resp, nil, nil\n}\n\ntype RemoteProviderDeleteRequest struct {\n\tCtxt context.Context `json:\"-\"`\n\tForce bool `json:\"force,omitempty\" mapstructure:\"force\"`\n}\n\nfunc (e *RemoteProvider) Delete(ro *RemoteProviderDeleteRequest) (*RemoteProvider, *ApiErrorResponse, error) {\n\trs, apierr, err := GetConn(ro.Ctxt).Delete(ro.Ctxt, e.Path, nil)\n\tif apierr != nil {\n\t\treturn nil, apierr, err\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tresp := &RemoteProvider{}\n\tif err = FillStruct(rs.Data, resp); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tRegisterRemoteProviderEndpoints(resp)\n\treturn resp, nil, nil\n}\n\ntype RemoteProviderAppTemplate struct {\n\tPath string `json:\"path,omitempty\" mapstructure:\"path\"`\n\tResolvedPath string `json:\"resolved_path,omitempty\" mapstructure:\"resolved_path\"`\n\tResolvedTenant string `json:\"resolved_tenant,omitempty\" mapstructure:\"resolved_tenant\"`\n}\n\ntype RemoteProviderReloadRequest struct {\n\tCtxt context.Context `json:\"-\"`\n}\n\nfunc (e *RemoteProvider) Reload(ro *RemoteProviderReloadRequest) (*RemoteProvider, *ApiErrorResponse, error) {\n\tgro := &greq.RequestOptions{JSON: ro}\n\trs, apierr, err := GetConn(ro.Ctxt).Get(ro.Ctxt, e.Path, gro)\n\tif apierr != nil {\n\t\treturn nil, apierr, err\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tresp := &RemoteProvider{}\n\tif err = FillStruct(rs.Data, resp); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tRegisterRemoteProviderEndpoints(resp)\n\treturn resp, nil, nil\n}\n\ntype RemoteProviderOperationsUpdateRequest struct {\n\tCtxt context.Context `json:\"-\"`\n\tOperationId string `json:\"-\"`\n\tAction string `json:\"action\"` \/\/available options are 'clear' and 'abort'\n}\n\nfunc (e *RemoteProvider) UpdateOperation(ao *RemoteProviderOperationsUpdateRequest) (*RemoteProvider, *ApiErrorResponse, error) {\n\n\tgro := &greq.RequestOptions{JSON: ao}\n\trs, apierr, err := GetConn(ao.Ctxt).Put(ao.Ctxt, _path.Join(e.Path, \"operations\", ao.OperationId), gro)\n\tif apierr != nil {\n\t\treturn nil, apierr, err\n\t}\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tresp := &RemoteProvider{}\n\tif err = FillStruct(rs.Data, resp); err != nil {\n\t\treturn nil, nil, err\n\t}\n\tRegisterRemoteProviderEndpoints(resp)\n\treturn resp, nil, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build !windows\n\npackage homedir \/\/ import \"github.com\/docker\/docker\/pkg\/homedir\"\n\nimport (\n\t\"os\"\n\t\"os\/user\"\n)\n\n\/\/ Key returns the env var name for the user's home dir based on\n\/\/ the platform being run on\nfunc Key() string {\n\treturn \"HOME\"\n}\n\n\/\/ Get returns the home directory of the current user with the help of\n\/\/ environment variables depending on the target operating system.\n\/\/ Returned path should be used with \"path\/filepath\" to form new paths.\n\/\/ If compiling statically, ensure the osusergo build tag is used.\n\/\/ If needing to do nss lookups, do not compile statically.\nfunc Get() string {\n\thome := os.Getenv(Key())\n\tif home == \"\" {\n\t\tif u, err := user.Current(); err == nil {\n\t\t\treturn u.HomeDir\n\t\t}\n\t}\n\treturn home\n}\n\n\/\/ GetShortcutString returns the string that is shortcut to user's home directory\n\/\/ in the native shell of the platform running on.\nfunc GetShortcutString() string {\n\treturn \"~\"\n}\n<commit_msg>homedir: add cgo or osusergo buildtag constraints for unix<commit_after>\/\/ +build !windows,cgo !windows,osusergo\n\npackage homedir \/\/ import \"github.com\/docker\/docker\/pkg\/homedir\"\n\nimport (\n\t\"os\"\n\t\"os\/user\"\n)\n\n\/\/ Key returns the env var name for the user's home dir based on\n\/\/ the platform being run on\nfunc Key() string {\n\treturn \"HOME\"\n}\n\n\/\/ Get returns the home directory of the current user with the help of\n\/\/ environment variables depending on the target operating system.\n\/\/ Returned path should be used with \"path\/filepath\" to form new paths.\n\/\/ If compiling statically, ensure the osusergo build tag is used.\n\/\/ If needing to do nss lookups, do not compile statically.\nfunc Get() string {\n\thome := os.Getenv(Key())\n\tif home == \"\" {\n\t\tif u, err := user.Current(); err == nil {\n\t\t\treturn u.HomeDir\n\t\t}\n\t}\n\treturn home\n}\n\n\/\/ GetShortcutString returns the string that is shortcut to user's home directory\n\/\/ in the native shell of the platform running on.\nfunc GetShortcutString() string {\n\treturn \"~\"\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2020 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage signer\n\nimport (\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/grafeas\/kritis\/pkg\/kritis\/apis\/kritis\/v1beta1\"\n\t\"github.com\/grafeas\/kritis\/pkg\/kritis\/attestation\"\n\t\"github.com\/grafeas\/kritis\/pkg\/kritis\/crd\/authority\"\n\t\"github.com\/grafeas\/kritis\/pkg\/kritis\/cryptolib\"\n\t\"github.com\/grafeas\/kritis\/pkg\/kritis\/metadata\"\n\t\"github.com\/grafeas\/kritis\/pkg\/kritis\/util\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\n\/\/ A signer is used for creating attestations for an image.\ntype Signer struct {\n\tconfig *Config\n\tclient metadata.ReadWriteClient\n}\n\n\/\/ A signer config that includes necessary data and handler for signing.\ntype Config struct {\n\tcSigner cryptolib.Signer\n\t\/\/ an AttestaionAuthority that is used in metadata client APIs.\n\t\/\/ We should consider refactor it out because:\n\t\/\/ 1. the only useful field here is noteName\n\t\/\/ 2. other fields, e.g., public key, are unset\n\t\/\/ TODO: refactor out the authority code\n\tauthority v1beta1.AttestationAuthority\n\tproject string\n}\n\n\/\/ Creating a new signer object.\nfunc New(client metadata.ReadWriteClient, cSigner cryptolib.Signer, noteName string, project string) Signer {\n\treturn Signer{\n\t\tclient: client,\n\t\tconfig: &Config{\n\t\t\tcSigner,\n\t\t\tv1beta1.AttestationAuthority{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"signing-aa\"},\n\t\t\t\tSpec: v1beta1.AttestationAuthoritySpec{\n\t\t\t\t\tNoteReference: noteName,\n\t\t\t\t\tPublicKeys: []v1beta1.PublicKey{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tproject,\n\t\t},\n\t}\n}\n\n\/\/ ImageVulnerabilities is an input for running vulnerability policy validation.\ntype ImageVulnerabilities struct {\n\tImageRef string\n\tVulnerabilities []metadata.Vulnerability\n}\n\n\/\/ For testing\nvar (\n\tauthFetcher = authority.Authority\n)\n\n\/\/ SignImage signs an image without doing any policy check.\n\/\/ Returns an error if creating an attestation fails.\nfunc (s Signer) SignImage(image string) error {\n\texisted, _ := s.isAttestationAlreadyExist(image)\n\tif existed {\n\t\tglog.Warningf(\"Attestation for image %q has already been created.\", image)\n\t\treturn nil\n\t}\n\n\tglog.Infof(\"Creating attestations for image %q.\", image)\n\t\/\/ Create attestation\n\tatt, err := s.createAttestation(image)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tglog.Infof(\"Uploading attestations for image %q.\", image)\n\tif err := s.uploadAttestation(image, att); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Creating an atestation.\nfunc (s Signer) createAttestation(image string) (*cryptolib.Attestation, error) {\n\tpayload, err := attestation.AtomicContainerPayload(image)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tatt, err := s.config.cSigner.CreateAttestation(payload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn att, nil\n}\n\n\/\/ Uploading an attestation if not already exist under the same note.\n\/\/ The method will create a note if it does not already exist.\n\/\/ Returns error if upload failed, e.g., if an attestation already exists.\nfunc (s Signer) uploadAttestation(image string, att *cryptolib.Attestation) error {\n\tnote, err := util.GetOrCreateAttestationNote(s.client, &s.config.authority)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Upload attestation\n\t_, err = s.client.UploadAttestationOccurrence(note.GetName(), image, att, s.config.project, metadata.PgpSignatureType)\n\treturn err\n}\n\nfunc (s Signer) isAttestationAlreadyExist(image string) (bool, error) {\n\tatts, err := s.client.Attestations(image, &s.config.authority)\n\tif err == nil && len(atts) > 0 {\n\t\treturn true, nil\n\t}\n\n\treturn false, err\n}\n<commit_msg>make signer config internal.<commit_after>\/*\nCopyright 2020 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage signer\n\nimport (\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/grafeas\/kritis\/pkg\/kritis\/apis\/kritis\/v1beta1\"\n\t\"github.com\/grafeas\/kritis\/pkg\/kritis\/attestation\"\n\t\"github.com\/grafeas\/kritis\/pkg\/kritis\/crd\/authority\"\n\t\"github.com\/grafeas\/kritis\/pkg\/kritis\/cryptolib\"\n\t\"github.com\/grafeas\/kritis\/pkg\/kritis\/metadata\"\n\t\"github.com\/grafeas\/kritis\/pkg\/kritis\/util\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\n\/\/ A signer is used for creating attestations for an image.\ntype Signer struct {\n\tconfig *config\n\tclient metadata.ReadWriteClient\n}\n\n\/\/ A signer config that includes necessary data and handler for signing.\ntype config struct {\n\tcSigner cryptolib.Signer\n\t\/\/ an AttestaionAuthority that is used in metadata client APIs.\n\t\/\/ We should consider refactor it out because:\n\t\/\/ 1. the only useful field here is noteName\n\t\/\/ 2. other fields, e.g., public key, are unset\n\t\/\/ TODO: refactor out the authority code\n\tauthority v1beta1.AttestationAuthority\n\tproject string\n}\n\n\/\/ Creating a new signer object.\nfunc New(client metadata.ReadWriteClient, cSigner cryptolib.Signer, noteName string, project string) Signer {\n\treturn Signer{\n\t\tclient: client,\n\t\tconfig: &config{\n\t\t\tcSigner,\n\t\t\tv1beta1.AttestationAuthority{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"signing-aa\"},\n\t\t\t\tSpec: v1beta1.AttestationAuthoritySpec{\n\t\t\t\t\tNoteReference: noteName,\n\t\t\t\t\tPublicKeys: []v1beta1.PublicKey{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tproject,\n\t\t},\n\t}\n}\n\n\/\/ ImageVulnerabilities is an input for running vulnerability policy validation.\ntype ImageVulnerabilities struct {\n\tImageRef string\n\tVulnerabilities []metadata.Vulnerability\n}\n\n\/\/ For testing\nvar (\n\tauthFetcher = authority.Authority\n)\n\n\/\/ SignImage signs an image without doing any policy check.\n\/\/ Returns an error if creating an attestation fails.\nfunc (s Signer) SignImage(image string) error {\n\texisted, _ := s.isAttestationAlreadyExist(image)\n\tif existed {\n\t\tglog.Warningf(\"Attestation for image %q has already been created.\", image)\n\t\treturn nil\n\t}\n\n\tglog.Infof(\"Creating attestations for image %q.\", image)\n\t\/\/ Create attestation\n\tatt, err := s.createAttestation(image)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tglog.Infof(\"Uploading attestations for image %q.\", image)\n\tif err := s.uploadAttestation(image, att); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Creating an atestation.\nfunc (s Signer) createAttestation(image string) (*cryptolib.Attestation, error) {\n\tpayload, err := attestation.AtomicContainerPayload(image)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tatt, err := s.config.cSigner.CreateAttestation(payload)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn att, nil\n}\n\n\/\/ Uploading an attestation if not already exist under the same note.\n\/\/ The method will create a note if it does not already exist.\n\/\/ Returns error if upload failed, e.g., if an attestation already exists.\nfunc (s Signer) uploadAttestation(image string, att *cryptolib.Attestation) error {\n\tnote, err := util.GetOrCreateAttestationNote(s.client, &s.config.authority)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Upload attestation\n\t_, err = s.client.UploadAttestationOccurrence(note.GetName(), image, att, s.config.project, metadata.PgpSignatureType)\n\treturn err\n}\n\nfunc (s Signer) isAttestationAlreadyExist(image string) (bool, error) {\n\tatts, err := s.client.Attestations(image, &s.config.authority)\n\tif err == nil && len(atts) > 0 {\n\t\treturn true, nil\n\t}\n\n\treturn false, err\n}\n<|endoftext|>"} {"text":"<commit_before>package mapconv\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\ntype dummyTagged struct {\n\tA string `mapconv:\"ValueA.A\"`\n\tB string `mapconv:\"ValueA.ValueB.B\"`\n\tC string `mapconv:\"ValueA.ValueB.ValueC.C\"`\n\tPointer *time.Time\n\tSlice []string\n\tNoTag string\n\tunexported string\n}\n\ntype dummyNaked struct {\n\tValueA *struct {\n\t\tA string\n\t\tValueB *struct {\n\t\t\tB string\n\t\t\tValueC *struct {\n\t\t\t\tC string\n\t\t\t}\n\t\t}\n\t}\n\tPointer *time.Time\n\tSlice []string\n\tNoTag string\n\tunexported string\n}\n\nfunc TestToNaked(t *testing.T) {\n\tzeroTime := time.Unix(0, 0)\n\ttests := []struct {\n\t\tinput *dummyTagged\n\t\toutput *dummyNaked\n\t\terr error\n\t}{\n\t\t{\n\t\t\tinput: &dummyTagged{\n\t\t\t\tA: \"A\",\n\t\t\t\tB: \"B\",\n\t\t\t\tC: \"C\",\n\t\t\t\tPointer: &zeroTime,\n\t\t\t\tSlice: []string{\"a\", \"b\", \"c\"},\n\t\t\t\tNoTag: \"NoTag\",\n\t\t\t\tunexported: \"unexported\",\n\t\t\t},\n\t\t\toutput: &dummyNaked{\n\t\t\t\tValueA: &struct {\n\t\t\t\t\tA string\n\t\t\t\t\tValueB *struct {\n\t\t\t\t\t\tB string\n\t\t\t\t\t\tValueC *struct {\n\t\t\t\t\t\t\tC string\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}{\n\t\t\t\t\tA: \"A\",\n\t\t\t\t\tValueB: &struct {\n\t\t\t\t\t\tB string\n\t\t\t\t\t\tValueC *struct {\n\t\t\t\t\t\t\tC string\n\t\t\t\t\t\t}\n\t\t\t\t\t}{\n\t\t\t\t\t\tB: \"B\",\n\t\t\t\t\t\tValueC: &struct {\n\t\t\t\t\t\t\tC string\n\t\t\t\t\t\t}{\n\t\t\t\t\t\t\tC: \"C\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tPointer: &zeroTime,\n\t\t\t\tSlice: []string{\"a\", \"b\", \"c\"},\n\t\t\t\tNoTag: \"NoTag\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\toutput := &dummyNaked{}\n\t\terr := ConvertTo(tt.input, output)\n\t\trequire.Equal(t, tt.err, err)\n\t\tif err == nil {\n\t\t\trequire.EqualValues(t, tt.output.ValueA, output.ValueA)\n\t\t\trequire.EqualValues(t, tt.output.Pointer.String(), output.Pointer.String())\n\t\t\trequire.EqualValues(t, tt.output.Slice, output.Slice)\n\t\t\trequire.EqualValues(t, tt.output.NoTag, output.NoTag)\n\t\t}\n\t}\n\n}\n\nfunc TestFromNaked(t *testing.T) {\n\n\ttests := []struct {\n\t\toutput *dummyTagged\n\t\tinput *dummyNaked\n\t\terr error\n\t}{\n\t\t{\n\t\t\toutput: &dummyTagged{\n\t\t\t\tA: \"A\",\n\t\t\t\tB: \"B\",\n\t\t\t\tC: \"C\",\n\t\t\t\tNoTag: \"NoTag\",\n\t\t\t},\n\t\t\tinput: &dummyNaked{\n\t\t\t\tValueA: &struct {\n\t\t\t\t\tA string\n\t\t\t\t\tValueB *struct {\n\t\t\t\t\t\tB string\n\t\t\t\t\t\tValueC *struct {\n\t\t\t\t\t\t\tC string\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}{\n\t\t\t\t\tA: \"A\",\n\t\t\t\t\tValueB: &struct {\n\t\t\t\t\t\tB string\n\t\t\t\t\t\tValueC *struct {\n\t\t\t\t\t\t\tC string\n\t\t\t\t\t\t}\n\t\t\t\t\t}{\n\t\t\t\t\t\tB: \"B\",\n\t\t\t\t\t\tValueC: &struct {\n\t\t\t\t\t\t\tC string\n\t\t\t\t\t\t}{\n\t\t\t\t\t\t\tC: \"C\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tNoTag: \"NoTag\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\toutput := &dummyTagged{}\n\t\terr := ConvertFrom(tt.input, output)\n\t\trequire.Equal(t, tt.err, err)\n\t\tif err == nil {\n\t\t\trequire.Equal(t, tt.output, output)\n\t\t}\n\t}\n\n}\n\ntype dummySlice struct {\n\tSlice []*dummySliceInner `json:\",omitempty\"`\n}\n\ntype dummySliceInner struct {\n\tValue string `json:\",omitempty\"`\n\tSlice []*dummySliceInner `json:\",omitempty\"`\n}\n\ntype dummyExtractInnerSlice struct {\n\tValues []string `json:\",omitempty\" mapconv:\"[]Slice.Value\"`\n\tNestedValues []string `json:\",omitempty\" mapconv:\"[]Slice.[]Slice.Value\"`\n}\n\nfunc TestExtractInnerSlice(t *testing.T) {\n\ttests := []struct {\n\t\tinput *dummySlice\n\t\texpect *dummyExtractInnerSlice\n\t}{\n\t\t{\n\t\t\tinput: &dummySlice{\n\t\t\t\tSlice: []*dummySliceInner{\n\t\t\t\t\t{Value: \"value1\"},\n\t\t\t\t\t{Value: \"value2\"},\n\t\t\t\t\t{\n\t\t\t\t\t\tValue: \"value3\",\n\t\t\t\t\t\tSlice: []*dummySliceInner{\n\t\t\t\t\t\t\t{Value: \"value4\"},\n\t\t\t\t\t\t\t{Value: \"value5\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpect: &dummyExtractInnerSlice{\n\t\t\t\tValues: []string{\"value1\", \"value2\", \"value3\"},\n\t\t\t\tNestedValues: []string{\"value4\", \"value5\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\toutput := &dummyExtractInnerSlice{}\n\t\terr := ConvertFrom(tt.input, output)\n\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, tt.expect, output)\n\t}\n}\n\nfunc TestInsertInnerSlice(t *testing.T) {\n\ttests := []struct {\n\t\tinput *dummyExtractInnerSlice\n\t\toutput *dummySlice\n\t}{\n\t\t{\n\t\t\tinput: &dummyExtractInnerSlice{\n\t\t\t\tValues: []string{\"value1\", \"value2\", \"value3\"},\n\t\t\t\tNestedValues: []string{\"value4\", \"value5\"},\n\t\t\t},\n\t\t\toutput: &dummySlice{\n\t\t\t\tSlice: []*dummySliceInner{\n\t\t\t\t\t{Value: \"value1\"},\n\t\t\t\t\t{Value: \"value2\"},\n\t\t\t\t\t{Value: \"value3\"},\n\t\t\t\t\t{\n\t\t\t\t\t\tSlice: []*dummySliceInner{\n\t\t\t\t\t\t\t{Value: \"value4\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSlice: []*dummySliceInner{\n\t\t\t\t\t\t\t{Value: \"value5\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\toutput := &dummySlice{}\n\t\terr := ConvertTo(tt.input, output)\n\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, tt.output, output)\n\t}\n}\n\ntype hasDefaultSource struct {\n\tField string `mapconv:\"Field,default=default-value\"`\n}\n\ntype hasDefaultDest struct {\n\tField string\n}\n\nfunc TestDefaultValue(t *testing.T) {\n\ttests := []struct {\n\t\tinput *hasDefaultSource\n\t\toutput *hasDefaultDest\n\t}{\n\t\t{\n\t\t\tinput: &hasDefaultSource{},\n\t\t\toutput: &hasDefaultDest{\n\t\t\t\tField: \"default-value\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\toutput := &hasDefaultDest{}\n\t\terr := ConvertTo(tt.input, output)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, tt.output, output)\n\t}\n}\n\ntype multipleSource struct {\n\tField string `mapconv:\"Field1\/Field2\"`\n}\n\ntype multipleDest struct {\n\tField1 string\n\tField2 string\n}\n\nfunc TestMultipleDestination(t *testing.T) {\n\ttests := []struct {\n\t\tinput *multipleSource\n\t\toutput *multipleDest\n\t}{\n\t\t{\n\t\t\tinput: &multipleSource{\n\t\t\t\tField: \"value\",\n\t\t\t},\n\t\t\toutput: &multipleDest{\n\t\t\t\tField1: \"value\",\n\t\t\t\tField2: \"value\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\toutput := &multipleDest{}\n\t\terr := ConvertTo(tt.input, output)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, tt.output, output)\n\t}\n}\n\ntype recursiveSource struct {\n\tField *recursiveSourceChild `mapconv:\",recursive\"`\n}\n\ntype recursiveSourceChild struct {\n\tField1 string `mapconv:\"Dest1\"`\n\tField2 string `mapconv:\"Dest2\"`\n}\n\ntype recursiveDest struct {\n\tField *recursiveDestChild\n}\n\ntype recursiveDestChild struct {\n\tDest1 string\n\tDest2 string\n}\n\ntype recursiveSourceSlice struct {\n\tFields []*recursiveSourceChild `mapconv:\"[]Slice,recursive\"`\n}\n\ntype recursiveDestSlice struct {\n\tSlice []*recursiveDestChild\n}\n\nfunc TestRecursive(t *testing.T) {\n\ttests := []struct {\n\t\tinput *recursiveSource\n\t\texpect *recursiveDest\n\t}{\n\t\t{\n\t\t\tinput: &recursiveSource{\n\t\t\t\tField: &recursiveSourceChild{\n\t\t\t\t\tField1: \"value1\",\n\t\t\t\t\tField2: \"value2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpect: &recursiveDest{\n\t\t\t\tField: &recursiveDestChild{\n\t\t\t\t\tDest1: \"value1\",\n\t\t\t\t\tDest2: \"value2\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tdest := &recursiveDest{}\n\t\terr := ConvertTo(tt.input, dest)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, tt.expect, dest)\n\n\t\t\/\/ reverse\n\t\tsource := &recursiveSource{}\n\t\terr = ConvertFrom(tt.expect, source)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, tt.input, source)\n\t}\n}\n\nfunc TestRecursiveSlice(t *testing.T) {\n\ttests := []struct {\n\t\tinput *recursiveSourceSlice\n\t\toutput *recursiveDestSlice\n\t}{\n\t\t{\n\t\t\tinput: &recursiveSourceSlice{\n\t\t\t\tFields: []*recursiveSourceChild{\n\t\t\t\t\t{\n\t\t\t\t\t\tField1: \"value1\",\n\t\t\t\t\t\tField2: \"value2\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tField1: \"value3\",\n\t\t\t\t\t\tField2: \"value4\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\toutput: &recursiveDestSlice{\n\t\t\t\tSlice: []*recursiveDestChild{\n\t\t\t\t\t{\n\t\t\t\t\t\tDest1: \"value1\",\n\t\t\t\t\t\tDest2: \"value2\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tDest1: \"value3\",\n\t\t\t\t\t\tDest2: \"value4\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\toutput := &recursiveDestSlice{}\n\t\terr := ConvertTo(tt.input, output)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, tt.output, output)\n\n\t\t\/\/ reverse\n\t\tsource := &recursiveSourceSlice{}\n\t\terr = ConvertFrom(tt.output, source)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, tt.input, source)\n\t}\n}\n\ntype sourceSquash struct {\n\tField *sourceSquashChild `mapconv:\",squash\"`\n}\n\ntype sourceSquashChild struct {\n\tField1 string\n\tField2 string\n}\n\ntype destSquash struct {\n\tField1 string\n\tField2 string\n}\n\nfunc TestSquash(t *testing.T) {\n\ttests := []struct {\n\t\tinput *sourceSquash\n\t\toutput *destSquash\n\t}{\n\t\t{\n\t\t\tinput: &sourceSquash{\n\t\t\t\tField: &sourceSquashChild{\n\t\t\t\t\tField1: \"f1\",\n\t\t\t\t\tField2: \"f2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\toutput: &destSquash{\n\t\t\t\tField1: \"f1\",\n\t\t\t\tField2: \"f2\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\toutput := &destSquash{}\n\t\terr := ConvertTo(tt.input, output)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, tt.output, output)\n\n\t\t\/\/ reverse\n\t\tsource := &sourceSquash{}\n\t\terr = ConvertFrom(tt.output, source)\n\t\trequire.Error(t, err)\n\t}\n}\n<commit_msg>mapconv: Improve test cases<commit_after>package mapconv\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\ntype dummyFrom struct {\n\tA string `mapconv:\"ValueA.A\"`\n\tB string `mapconv:\"ValueA.ValueB.B\"`\n\tC string `mapconv:\"ValueA.ValueB.ValueC.C\"`\n\tPointer *time.Time\n\tSlice []string\n\tNoTag string\n\tBool bool\n\tunexported string\n}\n\ntype dummyTo struct {\n\tValueA *struct {\n\t\tA string\n\t\tValueB *struct {\n\t\t\tB string\n\t\t\tValueC *struct {\n\t\t\t\tC string\n\t\t\t}\n\t\t}\n\t}\n\tPointer *time.Time\n\tSlice []string\n\tNoTag string\n\tBool bool\n\tunexported string\n}\n\nfunc TestConvertTo(t *testing.T) {\n\tzeroTime := time.Unix(0, 0)\n\ttests := []struct {\n\t\tinput *dummyFrom\n\t\toutput *dummyTo\n\t\terr error\n\t}{\n\t\t{\n\t\t\tinput: &dummyFrom{\n\t\t\t\tA: \"A\",\n\t\t\t\tB: \"B\",\n\t\t\t\tC: \"C\",\n\t\t\t\tPointer: &zeroTime,\n\t\t\t\tSlice: []string{\"a\", \"b\", \"c\"},\n\t\t\t\tNoTag: \"NoTag\",\n\t\t\t\tBool: true,\n\t\t\t\tunexported: \"unexported\",\n\t\t\t},\n\t\t\toutput: &dummyTo{\n\t\t\t\tValueA: &struct {\n\t\t\t\t\tA string\n\t\t\t\t\tValueB *struct {\n\t\t\t\t\t\tB string\n\t\t\t\t\t\tValueC *struct {\n\t\t\t\t\t\t\tC string\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}{\n\t\t\t\t\tA: \"A\",\n\t\t\t\t\tValueB: &struct {\n\t\t\t\t\t\tB string\n\t\t\t\t\t\tValueC *struct {\n\t\t\t\t\t\t\tC string\n\t\t\t\t\t\t}\n\t\t\t\t\t}{\n\t\t\t\t\t\tB: \"B\",\n\t\t\t\t\t\tValueC: &struct {\n\t\t\t\t\t\t\tC string\n\t\t\t\t\t\t}{\n\t\t\t\t\t\t\tC: \"C\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tPointer: &zeroTime,\n\t\t\t\tSlice: []string{\"a\", \"b\", \"c\"},\n\t\t\t\tNoTag: \"NoTag\",\n\t\t\t\tBool: true,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\toutput := &dummyTo{}\n\t\terr := ConvertTo(tt.input, output)\n\t\trequire.Equal(t, tt.err, err)\n\t\tif err == nil {\n\t\t\trequire.EqualValues(t, tt.output.ValueA, output.ValueA)\n\t\t\trequire.EqualValues(t, tt.output.Pointer.String(), output.Pointer.String())\n\t\t\trequire.EqualValues(t, tt.output.Slice, output.Slice)\n\t\t\trequire.EqualValues(t, tt.output.NoTag, output.NoTag)\n\t\t}\n\t}\n\n}\n\nfunc TestConvertFrom(t *testing.T) {\n\n\ttests := []struct {\n\t\toutput *dummyFrom\n\t\tinput *dummyTo\n\t\terr error\n\t}{\n\t\t{\n\t\t\toutput: &dummyFrom{\n\t\t\t\tA: \"A\",\n\t\t\t\tB: \"B\",\n\t\t\t\tC: \"C\",\n\t\t\t\tNoTag: \"NoTag\",\n\t\t\t\tBool: true,\n\t\t\t},\n\t\t\tinput: &dummyTo{\n\t\t\t\tValueA: &struct {\n\t\t\t\t\tA string\n\t\t\t\t\tValueB *struct {\n\t\t\t\t\t\tB string\n\t\t\t\t\t\tValueC *struct {\n\t\t\t\t\t\t\tC string\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}{\n\t\t\t\t\tA: \"A\",\n\t\t\t\t\tValueB: &struct {\n\t\t\t\t\t\tB string\n\t\t\t\t\t\tValueC *struct {\n\t\t\t\t\t\t\tC string\n\t\t\t\t\t\t}\n\t\t\t\t\t}{\n\t\t\t\t\t\tB: \"B\",\n\t\t\t\t\t\tValueC: &struct {\n\t\t\t\t\t\t\tC string\n\t\t\t\t\t\t}{\n\t\t\t\t\t\t\tC: \"C\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tNoTag: \"NoTag\",\n\t\t\t\tBool: true,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\toutput := &dummyFrom{}\n\t\terr := ConvertFrom(tt.input, output)\n\t\trequire.Equal(t, tt.err, err)\n\t\tif err == nil {\n\t\t\trequire.Equal(t, tt.output, output)\n\t\t}\n\t}\n\n}\n\ntype dummySlice struct {\n\tSlice []*dummySliceInner `json:\",omitempty\"`\n}\n\ntype dummySliceInner struct {\n\tValue string `json:\",omitempty\"`\n\tSlice []*dummySliceInner `json:\",omitempty\"`\n}\n\ntype dummyExtractInnerSlice struct {\n\tValues []string `json:\",omitempty\" mapconv:\"[]Slice.Value\"`\n\tNestedValues []string `json:\",omitempty\" mapconv:\"[]Slice.[]Slice.Value\"`\n}\n\nfunc TestExtractInnerSlice(t *testing.T) {\n\ttests := []struct {\n\t\tinput *dummySlice\n\t\texpect *dummyExtractInnerSlice\n\t}{\n\t\t{\n\t\t\tinput: &dummySlice{\n\t\t\t\tSlice: []*dummySliceInner{\n\t\t\t\t\t{Value: \"value1\"},\n\t\t\t\t\t{Value: \"value2\"},\n\t\t\t\t\t{\n\t\t\t\t\t\tValue: \"value3\",\n\t\t\t\t\t\tSlice: []*dummySliceInner{\n\t\t\t\t\t\t\t{Value: \"value4\"},\n\t\t\t\t\t\t\t{Value: \"value5\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpect: &dummyExtractInnerSlice{\n\t\t\t\tValues: []string{\"value1\", \"value2\", \"value3\"},\n\t\t\t\tNestedValues: []string{\"value4\", \"value5\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\toutput := &dummyExtractInnerSlice{}\n\t\terr := ConvertFrom(tt.input, output)\n\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, tt.expect, output)\n\t}\n}\n\nfunc TestInsertInnerSlice(t *testing.T) {\n\ttests := []struct {\n\t\tinput *dummyExtractInnerSlice\n\t\toutput *dummySlice\n\t}{\n\t\t{\n\t\t\tinput: &dummyExtractInnerSlice{\n\t\t\t\tValues: []string{\"value1\", \"value2\", \"value3\"},\n\t\t\t\tNestedValues: []string{\"value4\", \"value5\"},\n\t\t\t},\n\t\t\toutput: &dummySlice{\n\t\t\t\tSlice: []*dummySliceInner{\n\t\t\t\t\t{Value: \"value1\"},\n\t\t\t\t\t{Value: \"value2\"},\n\t\t\t\t\t{Value: \"value3\"},\n\t\t\t\t\t{\n\t\t\t\t\t\tSlice: []*dummySliceInner{\n\t\t\t\t\t\t\t{Value: \"value4\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tSlice: []*dummySliceInner{\n\t\t\t\t\t\t\t{Value: \"value5\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\toutput := &dummySlice{}\n\t\terr := ConvertTo(tt.input, output)\n\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, tt.output, output)\n\t}\n}\n\ntype hasDefaultSource struct {\n\tField string `mapconv:\"Field,default=default-value\"`\n}\n\ntype hasDefaultDest struct {\n\tField string\n}\n\nfunc TestDefaultValue(t *testing.T) {\n\ttests := []struct {\n\t\tinput *hasDefaultSource\n\t\toutput *hasDefaultDest\n\t}{\n\t\t{\n\t\t\tinput: &hasDefaultSource{},\n\t\t\toutput: &hasDefaultDest{\n\t\t\t\tField: \"default-value\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\toutput := &hasDefaultDest{}\n\t\terr := ConvertTo(tt.input, output)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, tt.output, output)\n\t}\n}\n\ntype multipleSource struct {\n\tField string `mapconv:\"Field1\/Field2\"`\n}\n\ntype multipleDest struct {\n\tField1 string\n\tField2 string\n}\n\nfunc TestMultipleDestination(t *testing.T) {\n\ttests := []struct {\n\t\tinput *multipleSource\n\t\toutput *multipleDest\n\t}{\n\t\t{\n\t\t\tinput: &multipleSource{\n\t\t\t\tField: \"value\",\n\t\t\t},\n\t\t\toutput: &multipleDest{\n\t\t\t\tField1: \"value\",\n\t\t\t\tField2: \"value\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\toutput := &multipleDest{}\n\t\terr := ConvertTo(tt.input, output)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, tt.output, output)\n\t}\n}\n\ntype recursiveSource struct {\n\tField *recursiveSourceChild `mapconv:\",recursive\"`\n}\n\ntype recursiveSourceChild struct {\n\tField1 string `mapconv:\"Dest1\"`\n\tField2 string `mapconv:\"Dest2\"`\n}\n\ntype recursiveDest struct {\n\tField *recursiveDestChild\n}\n\ntype recursiveDestChild struct {\n\tDest1 string\n\tDest2 string\n}\n\ntype recursiveSourceSlice struct {\n\tFields []*recursiveSourceChild `mapconv:\"[]Slice,recursive\"`\n}\n\ntype recursiveDestSlice struct {\n\tSlice []*recursiveDestChild\n}\n\nfunc TestRecursive(t *testing.T) {\n\ttests := []struct {\n\t\tinput *recursiveSource\n\t\texpect *recursiveDest\n\t}{\n\t\t{\n\t\t\tinput: &recursiveSource{\n\t\t\t\tField: &recursiveSourceChild{\n\t\t\t\t\tField1: \"value1\",\n\t\t\t\t\tField2: \"value2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpect: &recursiveDest{\n\t\t\t\tField: &recursiveDestChild{\n\t\t\t\t\tDest1: \"value1\",\n\t\t\t\t\tDest2: \"value2\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tdest := &recursiveDest{}\n\t\terr := ConvertTo(tt.input, dest)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, tt.expect, dest)\n\n\t\t\/\/ reverse\n\t\tsource := &recursiveSource{}\n\t\terr = ConvertFrom(tt.expect, source)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, tt.input, source)\n\t}\n}\n\nfunc TestRecursiveSlice(t *testing.T) {\n\ttests := []struct {\n\t\tinput *recursiveSourceSlice\n\t\toutput *recursiveDestSlice\n\t}{\n\t\t{\n\t\t\tinput: &recursiveSourceSlice{\n\t\t\t\tFields: []*recursiveSourceChild{\n\t\t\t\t\t{\n\t\t\t\t\t\tField1: \"value1\",\n\t\t\t\t\t\tField2: \"value2\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tField1: \"value3\",\n\t\t\t\t\t\tField2: \"value4\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\toutput: &recursiveDestSlice{\n\t\t\t\tSlice: []*recursiveDestChild{\n\t\t\t\t\t{\n\t\t\t\t\t\tDest1: \"value1\",\n\t\t\t\t\t\tDest2: \"value2\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tDest1: \"value3\",\n\t\t\t\t\t\tDest2: \"value4\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\toutput := &recursiveDestSlice{}\n\t\terr := ConvertTo(tt.input, output)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, tt.output, output)\n\n\t\t\/\/ reverse\n\t\tsource := &recursiveSourceSlice{}\n\t\terr = ConvertFrom(tt.output, source)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, tt.input, source)\n\t}\n}\n\ntype sourceSquash struct {\n\tField *sourceSquashChild `mapconv:\",squash\"`\n}\n\ntype sourceSquashChild struct {\n\tField1 string\n\tField2 string\n}\n\ntype destSquash struct {\n\tField1 string\n\tField2 string\n}\n\nfunc TestSquash(t *testing.T) {\n\ttests := []struct {\n\t\tinput *sourceSquash\n\t\toutput *destSquash\n\t}{\n\t\t{\n\t\t\tinput: &sourceSquash{\n\t\t\t\tField: &sourceSquashChild{\n\t\t\t\t\tField1: \"f1\",\n\t\t\t\t\tField2: \"f2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\toutput: &destSquash{\n\t\t\t\tField1: \"f1\",\n\t\t\t\tField2: \"f2\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\toutput := &destSquash{}\n\t\terr := ConvertTo(tt.input, output)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, tt.output, output)\n\n\t\t\/\/ reverse\n\t\tsource := &sourceSquash{}\n\t\terr = ConvertFrom(tt.output, source)\n\t\trequire.Error(t, err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package github\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/codegangsta\/cli\"\n\n\t\"github.com\/scraperwiki\/hookbot\/pkg\/hookbot\"\n\t\"github.com\/scraperwiki\/hookbot\/pkg\/listen\"\n)\n\nvar RegexParseHeader = regexp.MustCompile(\"^\\\\s*([^\\\\:]+)\\\\s*:\\\\s*(.*)$\")\n\nfunc MustParseHeader(header string) (string, string) {\n\tif !RegexParseHeader.MatchString(header) {\n\t\tlog.Fatalf(\"Unable to parse header: %v (re: %v)\", header,\n\t\t\tRegexParseHeader.String())\n\t\treturn \"\", \"\"\n\t}\n\n\tparts := RegexParseHeader.FindStringSubmatch(header)\n\treturn parts[1], parts[2]\n}\n\nfunc MustParseHeaders(headerStrings []string) http.Header {\n\theaders := http.Header{}\n\n\tfor _, h := range headerStrings {\n\t\tkey, value := MustParseHeader(h)\n\t\theaders.Set(key, value)\n\t}\n\n\treturn headers\n}\n\nfunc MustMakeHeader(\n\ttarget *url.URL, origin string, headerStrings []string,\n) http.Header {\n\n\theader := MustParseHeaders(headerStrings)\n\tif origin == \"samehost\" {\n\t\torigin = \"\/\/\" + target.Host\n\t}\n\n\theader.Add(\"Origin\", origin)\n\theader.Add(\"X-Hookbot-Unsafe-Is-Ok\",\n\t\t\"I understand the security implications\")\n\n\treturn header\n}\n\nfunc ActionRoute(c *cli.Context) {\n\n\ttarget, err := url.Parse(c.String(\"monitor-url\"))\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to parse %q as URL: %v\", c.String(\"monitor-url\"), err)\n\t}\n\n\torigin := c.String(\"origin\")\n\n\theader := MustMakeHeader(target, origin, c.StringSlice(\"header\"))\n\tfinish := make(chan struct{})\n\n\tmessages, errors := listen.RetryingWatch(target.String(), header, finish)\n\n\toutbound := make(chan listen.Message, 1)\n\n\tsend := func(endpoint string, payload []byte) {\n\t\ttoken := Sha1HMAC(c.GlobalString(\"key\"), []byte(endpoint))\n\n\t\toutURL := fmt.Sprintf(\"https:\/\/%v@%v\/pub\/%v\", token, target.Host, endpoint)\n\n\t\tbody := ioutil.NopCloser(bytes.NewBuffer(payload))\n\n\t\tout, err := http.NewRequest(\"POST\", outURL, body)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to construct outbound req: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tout.SetBasicAuth(token, \"\")\n\n\t\tresp, err := http.DefaultClient.Do(out)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to transmit: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tlog.Printf(\"Transmit: %v %v\", resp.StatusCode, outURL)\n\t}\n\n\tgo func() {\n\t\tfor err := range errors {\n\t\t\tlog.Printf(\"Encountered error in Watch: %v\", err)\n\t\t}\n\t}()\n\n\tfor m := range messages {\n\t\tlog.Printf(\"Receive message\")\n\t\tif !IsValidGithubSignature(c.GlobalString(\"github-secret\"), m) {\n\t\t\tlog.Printf(\"Reject github signature\")\n\t\t\tcontinue\n\t\t}\n\n\t\tRoute(m, send)\n\t}\n\tclose(outbound)\n}\n\ntype Event struct {\n\tType string\n\n\tRepository *Repository `json:\"repository\"`\n\tPusher *Pusher `json:\"pusher\"`\n\n\tRef string `json:\"ref\"`\n\tBefore string `json:\"before\"`\n\tAfter string `json:\"after\"`\n}\n\nfunc (e *Event) Branch() string {\n\treturn strings.TrimPrefix(e.Ref, \"refs\/heads\/\")\n}\n\ntype Pusher struct {\n\tName string `json:\"name\"`\n\tEmail string `json:\"email\"`\n}\n\ntype Repository struct {\n\tFullName string `json:\"full_name\"`\n}\n\nfunc Route(message []byte, send func(string, []byte)) {\n\n\ttype GithubMessage struct {\n\t\tEvent, Signature string\n\t\tPayload []byte\n\t}\n\n\tvar m GithubMessage\n\n\terr := json.Unmarshal(message, &m)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to unmarshal message in IsValidGithubSignature: %v\",\n\t\t\terr)\n\t\treturn\n\t}\n\n\tvar event Event\n\tevent.Type = m.Event\n\n\terr = json.Unmarshal(m.Payload, &event)\n\tif err != nil {\n\t\tlog.Printf(\"Route: error in json.Unmarshal: %v\", err)\n\t\treturn\n\t}\n\n\tif event.Repository == nil || event.Repository.FullName == \"\" {\n\t\tlog.Printf(\"Could not identify repository for event %v\", event.Type)\n\t\treturn\n\t}\n\n\trepo := event.Repository.FullName\n\tbranch := event.Branch()\n\n\twho := \"<unknown>\"\n\tif event.Pusher != nil {\n\t\twho = event.Pusher.Name\n\t}\n\n\tmsgBytes, err := json.Marshal(map[string]string{\n\t\t\"Type\": event.Type,\n\t\t\"Repo\": repo,\n\t\t\"Branch\": branch,\n\t\t\"SHA\": event.After,\n\t\t\"Who\": who,\n\t})\n\tif err != nil {\n\t\tlog.Printf(\"Failed to marshal Update: %v\", err)\n\t\treturn\n\t}\n\n\tswitch event.Type {\n\tcase \"push\":\n\t\ttopicFmt := \"github.com\/repo\/%s\/branch\/%s\"\n\t\ttopic := fmt.Sprintf(topicFmt, repo, branch)\n\t\tsend(topic, msgBytes)\n\tdefault:\n\t\tlog.Printf(\"Unhandled event type: %v\", event.Type)\n\t\treturn\n\t}\n}\n\ntype Router struct{}\n\nfunc (r *Router) Name() string {\n\treturn \"github\"\n}\n\nfunc (r *Router) Topics() []string {\n\treturn []string{\"\/unsafe\/github.com\/?recursive\"}\n}\n\nfunc (r *Router) Route(in hookbot.Message, publish func(hookbot.Message) bool) {\n\n\tlog.Printf(\"route github: %q\", in.Topic)\n\n\ttype GithubMessage struct {\n\t\tEvent, Signature string\n\t\tPayload []byte\n\t}\n\n\tvar m GithubMessage\n\n\terr := json.Unmarshal(in.Body, &m)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to unmarshal message in IsValidGithubSignature: %v\",\n\t\t\terr)\n\t\treturn\n\t}\n\n\tvar event Event\n\tevent.Type = m.Event\n\n\terr = json.Unmarshal(m.Payload, &event)\n\tif err != nil {\n\t\tlog.Printf(\"Route: error in json.Unmarshal: %v\", err)\n\t\treturn\n\t}\n\n\tif event.Repository == nil || event.Repository.FullName == \"\" {\n\t\tlog.Printf(\"Could not identify repository for event %v\", event.Type)\n\t\treturn\n\t}\n\n\trepo := event.Repository.FullName\n\tbranch := event.Branch()\n\n\twho := \"<unknown>\"\n\tif event.Pusher != nil {\n\t\twho = event.Pusher.Name\n\t}\n\n\tmsgBytes, err := json.Marshal(map[string]string{\n\t\t\"Type\": event.Type,\n\t\t\"Repo\": repo,\n\t\t\"Branch\": branch,\n\t\t\"SHA\": event.After,\n\t\t\"Who\": who,\n\t})\n\tif err != nil {\n\t\tlog.Printf(\"Failed to marshal Update: %v\", err)\n\t\treturn\n\t}\n\n\tswitch event.Type {\n\tcase \"push\":\n\t\ttopicFmt := \"github.com\/repo\/%s\/branch\/%s\"\n\n\t\t\/\/ May fail\n\t\t_ = publish(hookbot.Message{\n\t\t\tTopic: fmt.Sprintf(topicFmt, repo, branch),\n\t\t\tBody: msgBytes,\n\t\t})\n\tdefault:\n\t\tlog.Printf(\"Unhandled event type: %v\", event.Type)\n\t\treturn\n\t}\n\n}\n\nfunc init() {\n\thookbot.RegisterRouter(&Router{})\n}\n<commit_msg>Rewrite standalone router to remove dup'd code<commit_after>package github\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/codegangsta\/cli\"\n\n\t\"github.com\/scraperwiki\/hookbot\/pkg\/hookbot\"\n\t\"github.com\/scraperwiki\/hookbot\/pkg\/listen\"\n)\n\nvar RegexParseHeader = regexp.MustCompile(\"^\\\\s*([^\\\\:]+)\\\\s*:\\\\s*(.*)$\")\n\nfunc MustParseHeader(header string) (string, string) {\n\tif !RegexParseHeader.MatchString(header) {\n\t\tlog.Fatalf(\"Unable to parse header: %v (re: %v)\", header,\n\t\t\tRegexParseHeader.String())\n\t\treturn \"\", \"\"\n\t}\n\n\tparts := RegexParseHeader.FindStringSubmatch(header)\n\treturn parts[1], parts[2]\n}\n\nfunc MustParseHeaders(headerStrings []string) http.Header {\n\theaders := http.Header{}\n\n\tfor _, h := range headerStrings {\n\t\tkey, value := MustParseHeader(h)\n\t\theaders.Set(key, value)\n\t}\n\n\treturn headers\n}\n\nfunc MustMakeHeader(\n\ttarget *url.URL, origin string, headerStrings []string,\n) http.Header {\n\n\theader := MustParseHeaders(headerStrings)\n\tif origin == \"samehost\" {\n\t\torigin = \"\/\/\" + target.Host\n\t}\n\n\theader.Add(\"Origin\", origin)\n\theader.Add(\"X-Hookbot-Unsafe-Is-Ok\",\n\t\t\"I understand the security implications\")\n\n\treturn header\n}\n\nfunc ActionRoute(c *cli.Context) {\n\n\ttarget, err := url.Parse(c.String(\"monitor-url\"))\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to parse %q as URL: %v\", c.String(\"monitor-url\"), err)\n\t}\n\n\torigin := c.String(\"origin\")\n\n\theader := MustMakeHeader(target, origin, c.StringSlice(\"header\"))\n\tfinish := make(chan struct{})\n\n\tmessages, errors := listen.RetryingWatch(target.String(), header, finish)\n\n\toutbound := make(chan listen.Message, 1)\n\n\tpublish := func(m hookbot.Message) bool {\n\t\ttoken := Sha1HMAC(c.GlobalString(\"key\"), []byte(m.Topic))\n\n\t\toutURL := fmt.Sprintf(\"https:\/\/%v@%v\/pub\/%s\", token, target.Host, m.Topic)\n\n\t\tbody := ioutil.NopCloser(bytes.NewBuffer(m.Body))\n\n\t\tout, err := http.NewRequest(\"POST\", outURL, body)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to construct outbound req: %v\", err)\n\t\t\treturn false\n\t\t}\n\t\tout.SetBasicAuth(token, \"\")\n\n\t\tresp, err := http.DefaultClient.Do(out)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to transmit: %v\", err)\n\t\t\treturn false\n\t\t}\n\t\tlog.Printf(\"Transmit: %v %v\", resp.StatusCode, outURL)\n\t\treturn true\n\t}\n\n\tgo func() {\n\t\tfor err := range errors {\n\t\t\tlog.Printf(\"Encountered error in Watch: %v\", err)\n\t\t}\n\t}()\n\n\trouter := &Router{}\n\n\tfor mBytes := range messages {\n\t\tlog.Printf(\"Receive message\")\n\n\t\tparts := bytes.Split(mBytes, []byte{0})\n\t\ttopic := parts[0]\n\t\tbody := parts[1]\n\n\t\tif !IsValidGithubSignature(c.GlobalString(\"github-secret\"), body) {\n\t\t\tlog.Printf(\"Reject github signature\")\n\t\t\tcontinue\n\t\t}\n\n\t\tm := hookbot.Message{Topic: string(topic), Body: body}\n\t\trouter.Route(m, publish)\n\t}\n\tclose(outbound)\n}\n\ntype Event struct {\n\tType string\n\n\tRepository *Repository `json:\"repository\"`\n\tPusher *Pusher `json:\"pusher\"`\n\n\tRef string `json:\"ref\"`\n\tBefore string `json:\"before\"`\n\tAfter string `json:\"after\"`\n}\n\nfunc (e *Event) Branch() string {\n\treturn strings.TrimPrefix(e.Ref, \"refs\/heads\/\")\n}\n\ntype Pusher struct {\n\tName string `json:\"name\"`\n\tEmail string `json:\"email\"`\n}\n\ntype Repository struct {\n\tFullName string `json:\"full_name\"`\n}\n\ntype Router struct{}\n\nfunc (r *Router) Name() string {\n\treturn \"github\"\n}\n\nfunc (r *Router) Topics() []string {\n\treturn []string{\"\/unsafe\/github.com\/?recursive\"}\n}\n\nfunc (r *Router) Route(in hookbot.Message, publish func(hookbot.Message) bool) {\n\n\tlog.Printf(\"route github: %q\", in.Topic)\n\n\ttype GithubMessage struct {\n\t\tEvent, Signature string\n\t\tPayload []byte\n\t}\n\n\tvar m GithubMessage\n\n\terr := json.Unmarshal(in.Body, &m)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to unmarshal message in IsValidGithubSignature: %v\",\n\t\t\terr)\n\t\treturn\n\t}\n\n\tvar event Event\n\tevent.Type = m.Event\n\n\terr = json.Unmarshal(m.Payload, &event)\n\tif err != nil {\n\t\tlog.Printf(\"Route: error in json.Unmarshal: %v\", err)\n\t\treturn\n\t}\n\n\tif event.Repository == nil || event.Repository.FullName == \"\" {\n\t\tlog.Printf(\"Could not identify repository for event %v\", event.Type)\n\t\treturn\n\t}\n\n\trepo := event.Repository.FullName\n\tbranch := event.Branch()\n\n\twho := \"<unknown>\"\n\tif event.Pusher != nil {\n\t\twho = event.Pusher.Name\n\t}\n\n\tmsgBytes, err := json.Marshal(map[string]string{\n\t\t\"Type\": event.Type,\n\t\t\"Repo\": repo,\n\t\t\"Branch\": branch,\n\t\t\"SHA\": event.After,\n\t\t\"Who\": who,\n\t})\n\tif err != nil {\n\t\tlog.Printf(\"Failed to marshal Update: %v\", err)\n\t\treturn\n\t}\n\n\tswitch event.Type {\n\tcase \"push\":\n\t\ttopicFmt := \"github.com\/repo\/%s\/branch\/%s\"\n\n\t\t\/\/ May fail\n\t\t_ = publish(hookbot.Message{\n\t\t\tTopic: fmt.Sprintf(topicFmt, repo, branch),\n\t\t\tBody: msgBytes,\n\t\t})\n\tdefault:\n\t\tlog.Printf(\"Unhandled event type: %v\", event.Type)\n\t\treturn\n\t}\n\n}\n\nfunc init() {\n\thookbot.RegisterRouter(&Router{})\n}\n<|endoftext|>"} {"text":"<commit_before>package manager\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/cortexproject\/cortex\/pkg\/ruler\"\n\t\"github.com\/go-kit\/kit\/log\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/common\/model\"\n\t\"github.com\/prometheus\/prometheus\/notifier\"\n\t\"github.com\/prometheus\/prometheus\/pkg\/labels\"\n\t\"github.com\/prometheus\/prometheus\/pkg\/rulefmt\"\n\t\"github.com\/prometheus\/prometheus\/pkg\/timestamp\"\n\t\"github.com\/prometheus\/prometheus\/promql\"\n\t\"github.com\/prometheus\/prometheus\/promql\/parser\"\n\t\"github.com\/prometheus\/prometheus\/rules\"\n\t\"github.com\/prometheus\/prometheus\/template\"\n\t\"github.com\/weaveworks\/common\/user\"\n\tyaml \"gopkg.in\/yaml.v3\"\n\n\t\"github.com\/grafana\/loki\/pkg\/logproto\"\n\t\"github.com\/grafana\/loki\/pkg\/logql\"\n)\n\n\/\/ engineQueryFunc returns a new query function using the rules.EngineQueryFunc function\n\/\/ and passing an altered timestamp.\nfunc engineQueryFunc(engine *logql.Engine, delay time.Duration) rules.QueryFunc {\n\treturn rules.QueryFunc(func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {\n\t\tadjusted := t.Add(-delay)\n\t\tparams := logql.NewLiteralParams(\n\t\t\tqs,\n\t\t\tadjusted,\n\t\t\tadjusted,\n\t\t\t0,\n\t\t\t0,\n\t\t\tlogproto.FORWARD,\n\t\t\t0,\n\t\t\tnil,\n\t\t)\n\t\tq := engine.Query(params)\n\n\t\tres, err := q.Exec(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tswitch v := res.Data.(type) {\n\t\tcase promql.Vector:\n\t\t\treturn v, nil\n\t\tcase promql.Scalar:\n\t\t\treturn promql.Vector{promql.Sample{\n\t\t\t\tPoint: promql.Point(v),\n\t\t\t\tMetric: labels.Labels{},\n\t\t\t}}, nil\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"rule result is not a vector or scalar\")\n\t\t}\n\t})\n\n}\n\n\/\/ MultiTenantManagerAdapter will wrap a MultiTenantManager which validates loki rules\nfunc MultiTenantManagerAdapter(mgr ruler.MultiTenantManager) *MultiTenantManager {\n\treturn &MultiTenantManager{mgr}\n}\n\n\/\/ MultiTenantManager wraps a cortex MultiTenantManager but validates loki rules\ntype MultiTenantManager struct {\n\truler.MultiTenantManager\n}\n\n\/\/ ValidateRuleGroup validates a rulegroup\nfunc (m *MultiTenantManager) ValidateRuleGroup(grp rulefmt.RuleGroup) []error {\n\treturn validateGroups(grp)\n}\n\nfunc MemstoreTenantManager(\n\tcfg ruler.Config,\n\tengine *logql.Engine,\n) ruler.ManagerFactory {\n\tvar metrics *Metrics\n\n\treturn func(\n\t\tctx context.Context,\n\t\tuserID string,\n\t\tnotifier *notifier.Manager,\n\t\tlogger log.Logger,\n\t\treg prometheus.Registerer,\n\t) *rules.Manager {\n\n\t\t\/\/ We'll ignore the passed registere and use the default registerer to avoid prefix issues and other weirdness.\n\t\t\/\/ This closure prevents re-registering.\n\t\tif metrics == nil {\n\t\t\tmetrics = NewMetrics(prometheus.DefaultRegisterer)\n\t\t}\n\t\tlogger = log.With(logger, \"user\", userID)\n\t\tqueryFunc := engineQueryFunc(engine, cfg.EvaluationDelay)\n\t\tmemStore := NewMemStore(userID, queryFunc, metrics, 5*time.Minute, log.With(logger, \"subcomponent\", \"MemStore\"))\n\n\t\tmgr := rules.NewManager(&rules.ManagerOptions{\n\t\t\tAppendable: NoopAppender{},\n\t\t\tQueryable: memStore,\n\t\t\tQueryFunc: queryFunc,\n\t\t\tContext: user.InjectOrgID(ctx, userID),\n\t\t\tExternalURL: cfg.ExternalURL.URL,\n\t\t\tNotifyFunc: ruler.SendAlerts(notifier, cfg.ExternalURL.URL.String()),\n\t\t\tLogger: logger,\n\t\t\tRegisterer: reg,\n\t\t\tOutageTolerance: cfg.OutageTolerance,\n\t\t\tForGracePeriod: cfg.ForGracePeriod,\n\t\t\tResendDelay: cfg.ResendDelay,\n\t\t\tGroupLoader: GroupLoader{},\n\t\t})\n\n\t\t\/\/ initialize memStore, bound to the manager's alerting rules\n\t\tmemStore.Start(mgr)\n\n\t\treturn mgr\n\t}\n}\n\ntype GroupLoader struct{}\n\nfunc (GroupLoader) Parse(query string) (parser.Expr, error) {\n\texpr, err := logql.ParseExpr(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn exprAdapter{expr}, nil\n}\n\nfunc (g GroupLoader) Load(identifier string) (*rulefmt.RuleGroups, []error) {\n\tb, err := ioutil.ReadFile(identifier)\n\tif err != nil {\n\t\treturn nil, []error{errors.Wrap(err, identifier)}\n\t}\n\trgs, errs := g.parseRules(b)\n\tfor i := range errs {\n\t\terrs[i] = errors.Wrap(errs[i], identifier)\n\t}\n\treturn rgs, errs\n}\n\nfunc (GroupLoader) parseRules(content []byte) (*rulefmt.RuleGroups, []error) {\n\tvar (\n\t\tgroups rulefmt.RuleGroups\n\t\terrs []error\n\t)\n\n\tdecoder := yaml.NewDecoder(bytes.NewReader(content))\n\tdecoder.KnownFields(true)\n\n\tif err := decoder.Decode(&groups); err != nil {\n\t\terrs = append(errs, err)\n\t}\n\n\tif len(errs) > 0 {\n\t\treturn nil, errs\n\t}\n\n\treturn &groups, validateGroups(groups.Groups...)\n}\n\nfunc validateGroups(grps ...rulefmt.RuleGroup) (errs []error) {\n\tset := map[string]struct{}{}\n\n\tfor i, g := range grps {\n\t\tif g.Name == \"\" {\n\t\t\terrs = append(errs, errors.Errorf(\"group %d: Groupname must not be empty\", i))\n\t\t}\n\n\t\tif _, ok := set[g.Name]; ok {\n\t\t\terrs = append(\n\t\t\t\terrs,\n\t\t\t\terrors.Errorf(\"groupname: \\\"%s\\\" is repeated in the same file\", g.Name),\n\t\t\t)\n\t\t}\n\n\t\tset[g.Name] = struct{}{}\n\n\t\tfor _, r := range g.Rules {\n\t\t\tif err := validateRuleNode(&r); err != nil {\n\t\t\t\terrs = append(errs, err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn errs\n}\n\nfunc validateRuleNode(r *rulefmt.RuleNode) error {\n\tif r.Record.Value != \"\" && r.Alert.Value != \"\" {\n\t\treturn errors.Errorf(\"only one of 'record' and 'alert' must be set\")\n\t}\n\n\tif r.Record.Value == \"\" && r.Alert.Value == \"\" {\n\t\treturn errors.Errorf(\"one of 'record' or 'alert' must be set\")\n\t}\n\n\tif r.Record.Value != \"\" && r.Alert.Value != \"\" {\n\t\treturn errors.Errorf(\"only one of 'record' or 'alert' must be set\")\n\t}\n\n\tif r.Expr.Value == \"\" {\n\t\treturn errors.Errorf(\"field 'expr' must be set in rule\")\n\t} else if _, err := logql.ParseExpr(r.Expr.Value); err != nil {\n\t\treturn errors.Wrapf(err, \"could not parse expression\")\n\t}\n\n\tif r.Record.Value != \"\" {\n\t\tif len(r.Annotations) > 0 {\n\t\t\treturn errors.Errorf(\"invalid field 'annotations' in recording rule\")\n\t\t}\n\t\tif r.For != 0 {\n\t\t\treturn errors.Errorf(\"invalid field 'for' in recording rule\")\n\t\t}\n\t\tif !model.IsValidMetricName(model.LabelValue(r.Record.Value)) {\n\t\t\treturn errors.Errorf(\"invalid recording rule name: %s\", r.Record.Value)\n\t\t}\n\t}\n\n\tfor k, v := range r.Labels {\n\t\tif !model.LabelName(k).IsValid() || k == model.MetricNameLabel {\n\t\t\treturn errors.Errorf(\"invalid label name: %s\", k)\n\t\t}\n\n\t\tif !model.LabelValue(v).IsValid() {\n\t\t\treturn errors.Errorf(\"invalid label value: %s\", v)\n\t\t}\n\t}\n\n\tfor k := range r.Annotations {\n\t\tif !model.LabelName(k).IsValid() {\n\t\t\treturn errors.Errorf(\"invalid annotation name: %s\", k)\n\t\t}\n\t}\n\n\tfor _, err := range testTemplateParsing(r) {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ testTemplateParsing checks if the templates used in labels and annotations\n\/\/ of the alerting rules are parsed correctly.\nfunc testTemplateParsing(rl *rulefmt.RuleNode) (errs []error) {\n\tif rl.Alert.Value == \"\" {\n\t\t\/\/ Not an alerting rule.\n\t\treturn errs\n\t}\n\n\t\/\/ Trying to parse templates.\n\ttmplData := template.AlertTemplateData(map[string]string{}, map[string]string{}, 0)\n\tdefs := []string{\n\t\t\"{{$labels := .Labels}}\",\n\t\t\"{{$externalLabels := .ExternalLabels}}\",\n\t\t\"{{$value := .Value}}\",\n\t}\n\tparseTest := func(text string) error {\n\t\ttmpl := template.NewTemplateExpander(\n\t\t\tcontext.TODO(),\n\t\t\tstrings.Join(append(defs, text), \"\"),\n\t\t\t\"__alert_\"+rl.Alert.Value,\n\t\t\ttmplData,\n\t\t\tmodel.Time(timestamp.FromTime(time.Now())),\n\t\t\tnil,\n\t\t\tnil,\n\t\t)\n\t\treturn tmpl.ParseTest()\n\t}\n\n\t\/\/ Parsing Labels.\n\tfor k, val := range rl.Labels {\n\t\terr := parseTest(val)\n\t\tif err != nil {\n\t\t\terrs = append(errs, errors.Wrapf(err, \"label %q\", k))\n\t\t}\n\t}\n\n\t\/\/ Parsing Annotations.\n\tfor k, val := range rl.Annotations {\n\t\terr := parseTest(val)\n\t\tif err != nil {\n\t\t\terrs = append(errs, errors.Wrapf(err, \"annotation %q\", k))\n\t\t}\n\t}\n\n\treturn errs\n}\n\n\/\/ Allows logql expressions to be treated as promql expressions by the prometheus rules pkg.\ntype exprAdapter struct {\n\tlogql.Expr\n}\n\nfunc (exprAdapter) PositionRange() parser.PositionRange { return parser.PositionRange{} }\nfunc (exprAdapter) PromQLExpr() {}\nfunc (exprAdapter) Type() parser.ValueType { return parser.ValueType(\"unimplemented\") }\n<commit_msg>exposes rule group validation fn (#2662)<commit_after>package manager\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/cortexproject\/cortex\/pkg\/ruler\"\n\t\"github.com\/go-kit\/kit\/log\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/common\/model\"\n\t\"github.com\/prometheus\/prometheus\/notifier\"\n\t\"github.com\/prometheus\/prometheus\/pkg\/labels\"\n\t\"github.com\/prometheus\/prometheus\/pkg\/rulefmt\"\n\t\"github.com\/prometheus\/prometheus\/pkg\/timestamp\"\n\t\"github.com\/prometheus\/prometheus\/promql\"\n\t\"github.com\/prometheus\/prometheus\/promql\/parser\"\n\t\"github.com\/prometheus\/prometheus\/rules\"\n\t\"github.com\/prometheus\/prometheus\/template\"\n\t\"github.com\/weaveworks\/common\/user\"\n\tyaml \"gopkg.in\/yaml.v3\"\n\n\t\"github.com\/grafana\/loki\/pkg\/logproto\"\n\t\"github.com\/grafana\/loki\/pkg\/logql\"\n)\n\n\/\/ engineQueryFunc returns a new query function using the rules.EngineQueryFunc function\n\/\/ and passing an altered timestamp.\nfunc engineQueryFunc(engine *logql.Engine, delay time.Duration) rules.QueryFunc {\n\treturn rules.QueryFunc(func(ctx context.Context, qs string, t time.Time) (promql.Vector, error) {\n\t\tadjusted := t.Add(-delay)\n\t\tparams := logql.NewLiteralParams(\n\t\t\tqs,\n\t\t\tadjusted,\n\t\t\tadjusted,\n\t\t\t0,\n\t\t\t0,\n\t\t\tlogproto.FORWARD,\n\t\t\t0,\n\t\t\tnil,\n\t\t)\n\t\tq := engine.Query(params)\n\n\t\tres, err := q.Exec(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tswitch v := res.Data.(type) {\n\t\tcase promql.Vector:\n\t\t\treturn v, nil\n\t\tcase promql.Scalar:\n\t\t\treturn promql.Vector{promql.Sample{\n\t\t\t\tPoint: promql.Point(v),\n\t\t\t\tMetric: labels.Labels{},\n\t\t\t}}, nil\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"rule result is not a vector or scalar\")\n\t\t}\n\t})\n\n}\n\n\/\/ MultiTenantManagerAdapter will wrap a MultiTenantManager which validates loki rules\nfunc MultiTenantManagerAdapter(mgr ruler.MultiTenantManager) *MultiTenantManager {\n\treturn &MultiTenantManager{mgr}\n}\n\n\/\/ MultiTenantManager wraps a cortex MultiTenantManager but validates loki rules\ntype MultiTenantManager struct {\n\truler.MultiTenantManager\n}\n\n\/\/ ValidateRuleGroup validates a rulegroup\nfunc (m *MultiTenantManager) ValidateRuleGroup(grp rulefmt.RuleGroup) []error {\n\treturn ValidateGroups(grp)\n}\n\nfunc MemstoreTenantManager(\n\tcfg ruler.Config,\n\tengine *logql.Engine,\n) ruler.ManagerFactory {\n\tvar metrics *Metrics\n\n\treturn func(\n\t\tctx context.Context,\n\t\tuserID string,\n\t\tnotifier *notifier.Manager,\n\t\tlogger log.Logger,\n\t\treg prometheus.Registerer,\n\t) *rules.Manager {\n\n\t\t\/\/ We'll ignore the passed registere and use the default registerer to avoid prefix issues and other weirdness.\n\t\t\/\/ This closure prevents re-registering.\n\t\tif metrics == nil {\n\t\t\tmetrics = NewMetrics(prometheus.DefaultRegisterer)\n\t\t}\n\t\tlogger = log.With(logger, \"user\", userID)\n\t\tqueryFunc := engineQueryFunc(engine, cfg.EvaluationDelay)\n\t\tmemStore := NewMemStore(userID, queryFunc, metrics, 5*time.Minute, log.With(logger, \"subcomponent\", \"MemStore\"))\n\n\t\tmgr := rules.NewManager(&rules.ManagerOptions{\n\t\t\tAppendable: NoopAppender{},\n\t\t\tQueryable: memStore,\n\t\t\tQueryFunc: queryFunc,\n\t\t\tContext: user.InjectOrgID(ctx, userID),\n\t\t\tExternalURL: cfg.ExternalURL.URL,\n\t\t\tNotifyFunc: ruler.SendAlerts(notifier, cfg.ExternalURL.URL.String()),\n\t\t\tLogger: logger,\n\t\t\tRegisterer: reg,\n\t\t\tOutageTolerance: cfg.OutageTolerance,\n\t\t\tForGracePeriod: cfg.ForGracePeriod,\n\t\t\tResendDelay: cfg.ResendDelay,\n\t\t\tGroupLoader: GroupLoader{},\n\t\t})\n\n\t\t\/\/ initialize memStore, bound to the manager's alerting rules\n\t\tmemStore.Start(mgr)\n\n\t\treturn mgr\n\t}\n}\n\ntype GroupLoader struct{}\n\nfunc (GroupLoader) Parse(query string) (parser.Expr, error) {\n\texpr, err := logql.ParseExpr(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn exprAdapter{expr}, nil\n}\n\nfunc (g GroupLoader) Load(identifier string) (*rulefmt.RuleGroups, []error) {\n\tb, err := ioutil.ReadFile(identifier)\n\tif err != nil {\n\t\treturn nil, []error{errors.Wrap(err, identifier)}\n\t}\n\trgs, errs := g.parseRules(b)\n\tfor i := range errs {\n\t\terrs[i] = errors.Wrap(errs[i], identifier)\n\t}\n\treturn rgs, errs\n}\n\nfunc (GroupLoader) parseRules(content []byte) (*rulefmt.RuleGroups, []error) {\n\tvar (\n\t\tgroups rulefmt.RuleGroups\n\t\terrs []error\n\t)\n\n\tdecoder := yaml.NewDecoder(bytes.NewReader(content))\n\tdecoder.KnownFields(true)\n\n\tif err := decoder.Decode(&groups); err != nil {\n\t\terrs = append(errs, err)\n\t}\n\n\tif len(errs) > 0 {\n\t\treturn nil, errs\n\t}\n\n\treturn &groups, ValidateGroups(groups.Groups...)\n}\n\nfunc ValidateGroups(grps ...rulefmt.RuleGroup) (errs []error) {\n\tset := map[string]struct{}{}\n\n\tfor i, g := range grps {\n\t\tif g.Name == \"\" {\n\t\t\terrs = append(errs, errors.Errorf(\"group %d: Groupname must not be empty\", i))\n\t\t}\n\n\t\tif _, ok := set[g.Name]; ok {\n\t\t\terrs = append(\n\t\t\t\terrs,\n\t\t\t\terrors.Errorf(\"groupname: \\\"%s\\\" is repeated in the same file\", g.Name),\n\t\t\t)\n\t\t}\n\n\t\tset[g.Name] = struct{}{}\n\n\t\tfor _, r := range g.Rules {\n\t\t\tif err := validateRuleNode(&r); err != nil {\n\t\t\t\terrs = append(errs, err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn errs\n}\n\nfunc validateRuleNode(r *rulefmt.RuleNode) error {\n\tif r.Record.Value != \"\" && r.Alert.Value != \"\" {\n\t\treturn errors.Errorf(\"only one of 'record' and 'alert' must be set\")\n\t}\n\n\tif r.Record.Value == \"\" && r.Alert.Value == \"\" {\n\t\treturn errors.Errorf(\"one of 'record' or 'alert' must be set\")\n\t}\n\n\tif r.Record.Value != \"\" && r.Alert.Value != \"\" {\n\t\treturn errors.Errorf(\"only one of 'record' or 'alert' must be set\")\n\t}\n\n\tif r.Expr.Value == \"\" {\n\t\treturn errors.Errorf(\"field 'expr' must be set in rule\")\n\t} else if _, err := logql.ParseExpr(r.Expr.Value); err != nil {\n\t\treturn errors.Wrapf(err, \"could not parse expression\")\n\t}\n\n\tif r.Record.Value != \"\" {\n\t\tif len(r.Annotations) > 0 {\n\t\t\treturn errors.Errorf(\"invalid field 'annotations' in recording rule\")\n\t\t}\n\t\tif r.For != 0 {\n\t\t\treturn errors.Errorf(\"invalid field 'for' in recording rule\")\n\t\t}\n\t\tif !model.IsValidMetricName(model.LabelValue(r.Record.Value)) {\n\t\t\treturn errors.Errorf(\"invalid recording rule name: %s\", r.Record.Value)\n\t\t}\n\t}\n\n\tfor k, v := range r.Labels {\n\t\tif !model.LabelName(k).IsValid() || k == model.MetricNameLabel {\n\t\t\treturn errors.Errorf(\"invalid label name: %s\", k)\n\t\t}\n\n\t\tif !model.LabelValue(v).IsValid() {\n\t\t\treturn errors.Errorf(\"invalid label value: %s\", v)\n\t\t}\n\t}\n\n\tfor k := range r.Annotations {\n\t\tif !model.LabelName(k).IsValid() {\n\t\t\treturn errors.Errorf(\"invalid annotation name: %s\", k)\n\t\t}\n\t}\n\n\tfor _, err := range testTemplateParsing(r) {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ testTemplateParsing checks if the templates used in labels and annotations\n\/\/ of the alerting rules are parsed correctly.\nfunc testTemplateParsing(rl *rulefmt.RuleNode) (errs []error) {\n\tif rl.Alert.Value == \"\" {\n\t\t\/\/ Not an alerting rule.\n\t\treturn errs\n\t}\n\n\t\/\/ Trying to parse templates.\n\ttmplData := template.AlertTemplateData(map[string]string{}, map[string]string{}, 0)\n\tdefs := []string{\n\t\t\"{{$labels := .Labels}}\",\n\t\t\"{{$externalLabels := .ExternalLabels}}\",\n\t\t\"{{$value := .Value}}\",\n\t}\n\tparseTest := func(text string) error {\n\t\ttmpl := template.NewTemplateExpander(\n\t\t\tcontext.TODO(),\n\t\t\tstrings.Join(append(defs, text), \"\"),\n\t\t\t\"__alert_\"+rl.Alert.Value,\n\t\t\ttmplData,\n\t\t\tmodel.Time(timestamp.FromTime(time.Now())),\n\t\t\tnil,\n\t\t\tnil,\n\t\t)\n\t\treturn tmpl.ParseTest()\n\t}\n\n\t\/\/ Parsing Labels.\n\tfor k, val := range rl.Labels {\n\t\terr := parseTest(val)\n\t\tif err != nil {\n\t\t\terrs = append(errs, errors.Wrapf(err, \"label %q\", k))\n\t\t}\n\t}\n\n\t\/\/ Parsing Annotations.\n\tfor k, val := range rl.Annotations {\n\t\terr := parseTest(val)\n\t\tif err != nil {\n\t\t\terrs = append(errs, errors.Wrapf(err, \"annotation %q\", k))\n\t\t}\n\t}\n\n\treturn errs\n}\n\n\/\/ Allows logql expressions to be treated as promql expressions by the prometheus rules pkg.\ntype exprAdapter struct {\n\tlogql.Expr\n}\n\nfunc (exprAdapter) PositionRange() parser.PositionRange { return parser.PositionRange{} }\nfunc (exprAdapter) PromQLExpr() {}\nfunc (exprAdapter) Type() parser.ValueType { return parser.ValueType(\"unimplemented\") }\n<|endoftext|>"} {"text":"<commit_before>\/\/ code modified from https:\/\/github.com\/GoogleContainerTools\/kpt\/blob\/master\/internal\/gitutil\/gitutil.go\n\n\/\/ Copyright 2019 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage git\n\nimport (\n\t\"crypto\/sha256\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/mitchellh\/go-homedir\"\n\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/config\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/constants\"\n\tlatestV1 \"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/schema\/latest\/v1\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/util\"\n)\n\n\/\/ SyncRepo syncs the target git repository with skaffold's local cache and returns the path to the repository root directory.\nvar SyncRepo = syncRepo\nvar findGit = func() (string, error) { return exec.LookPath(\"git\") }\n\n\/\/ defaultRef returns the default ref as \"master\" if master branch exists in\n\/\/ remote repository, falls back to \"main\" if master branch doesn't exist\nfunc defaultRef(repo string) (string, error) {\n\tmasterRef := \"master\"\n\tmainRef := \"main\"\n\tmasterExists, err := branchExists(repo, masterRef)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tmainExists, err := branchExists(repo, mainRef)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif masterExists {\n\t\treturn masterRef, nil\n\t} else if mainExists {\n\t\treturn mainRef, nil\n\t}\n\treturn \"\", fmt.Errorf(\"failed to get default branch for repo %s\", repo)\n}\n\n\/\/ BranchExists checks if branch is present in the input repo\nfunc branchExists(repo, branch string) (bool, error) {\n\tgitProgram, err := findGit()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tout, err := util.RunCmdOut(exec.Command(gitProgram, \"ls-remote\", repo, branch))\n\tif err != nil {\n\t\t\/\/ stdErr contains the error message for os related errors, git permission errors\n\t\t\/\/ and if repo doesn't exist\n\t\treturn false, fmt.Errorf(\"failed to lookup %s branch for repo %s: %w\", branch, repo, err)\n\t}\n\t\/\/ stdOut contains the branch information if the branch is present in remote repo\n\t\/\/ stdOut is empty if the repo doesn't have the input branch\n\tif strings.TrimSpace(string(out)) != \"\" {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}\n\n\/\/ getRepoDir returns the cache directory name for a remote repo\nfunc getRepoDir(g latestV1.GitInfo) (string, error) {\n\tinputs := []string{g.Repo, g.Ref}\n\thasher := sha256.New()\n\tenc := json.NewEncoder(hasher)\n\tif err := enc.Encode(inputs); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn base64.URLEncoding.EncodeToString(hasher.Sum(nil))[:32], nil\n}\n\n\/\/ GetRepoCacheDir returns the directory for the remote git repo cache\nfunc GetRepoCacheDir(opts config.SkaffoldOptions) (string, error) {\n\tif opts.RepoCacheDir != \"\" {\n\t\treturn opts.RepoCacheDir, nil\n\t}\n\n\t\/\/ cache location unspecified, use ~\/.skaffold\/repos\n\thome, err := homedir.Dir()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"retrieving home directory: %w\", err)\n\t}\n\treturn filepath.Join(home, constants.DefaultSkaffoldDir, \"repos\"), nil\n}\n\nfunc syncRepo(g latestV1.GitInfo, opts config.SkaffoldOptions) (string, error) {\n\tskaffoldCacheDir, err := GetRepoCacheDir(opts)\n\tr := gitCmd{Dir: skaffoldCacheDir}\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to clone repo %s: %w\", g.Repo, err)\n\t}\n\tif err := os.MkdirAll(skaffoldCacheDir, 0700); err != nil {\n\t\treturn \"\", fmt.Errorf(\n\t\t\t\"failed to clone repo %s: trouble creating cache directory: %w\", g.Repo, err)\n\t}\n\n\tref := g.Ref\n\tif ref == \"\" {\n\t\tref, err = defaultRef(g.Repo)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to clone repo %s: trouble getting default branch: %w\", g.Repo, err)\n\t\t}\n\t}\n\n\thash, err := getRepoDir(g)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to clone git repo: unable to create directory name: %w\", err)\n\t}\n\trepoCacheDir := filepath.Join(skaffoldCacheDir, hash)\n\tif _, err := os.Stat(repoCacheDir); os.IsNotExist(err) {\n\t\tif _, err := r.Run(\"clone\", g.Repo, hash, \"--branch\", ref, \"--depth\", \"1\"); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to clone repo: %w\", err)\n\t\t}\n\t} else {\n\t\tr.Dir = repoCacheDir\n\t\t\/\/ check remote is defined\n\t\tif remotes, err := r.Run(\"remote\", \"-v\"); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to clone repo %s: trouble checking repository remote; run 'git clone <REPO>; stat <DIR\/SUBDIR>' to verify credentials: %w\", g.Repo, err)\n\t\t} else if len(remotes) == 0 {\n\t\t\treturn \"\", fmt.Errorf(\"failed to clone repo %s: remote not set for existing clone\", g.Repo)\n\t\t}\n\n\t\t\/\/ if sync property is false, then skip fetching latest from remote and resetting the branch.\n\t\tif g.Sync != nil && !*g.Sync {\n\t\t\treturn repoCacheDir, nil\n\t\t}\n\n\t\tif _, err = r.Run(\"fetch\", \"origin\", ref); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to clone repo %s: unable to find any matching refs %s; run 'git clone <REPO>; stat <DIR\/SUBDIR>' to verify credentials: %w\", g.Repo, ref, err)\n\t\t}\n\n\t\t\/\/ check if the downloaded repo has uncommitted changes.\n\t\tif changes, err := r.Run(\"diff\", \"--name-only\", \"--ignore-submodules\", \"HEAD\"); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to clone repo %s: unable to check for uncommitted changes; run 'git clone <REPO>; stat <DIR\/SUBDIR>' to verify credentials: %w\", g.Repo, err)\n\t\t} else if len(changes) > 0 {\n\t\t\treturn \"\", fmt.Errorf(\"failed to clone repo %s: there are uncommitted changes in the target directory %s; either set the repository `sync` property to false in the skaffold config, or manually commit and sync changes to remote, or revert the local changes\", g.Repo, repoCacheDir)\n\t\t}\n\n\t\t\/\/ check if the downloaded repo has unpushed commits.\n\t\tif changes, err := r.Run(\"diff\", \"--name-only\", \"--ignore-submodules\", fmt.Sprintf(\"origin\/%s...\", ref)); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to clone repo %s: unable to check for unpushed commits; run 'git clone <REPO>; stat <DIR\/SUBDIR>' to verify credentials: %w\", g.Repo, err)\n\t\t} else if len(changes) > 0 {\n\t\t\treturn \"\", fmt.Errorf(\"failed to clone repo %s: there are unpushed commits in the target directory %s; either set the repository `sync` property to false in the skaffold config, or manually push commits to remote, or reset the local commits\", g.Repo, repoCacheDir)\n\t\t}\n\n\t\t\/\/ reset the repo state\n\t\tif _, err := r.Run(\"reset\", \"--hard\", fmt.Sprintf(\"origin\/%s\", ref)); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to clone repo %s: trouble resetting branch to origin\/%s; run 'git clone <REPO>; stat <DIR\/SUBDIR>' to verify credentials: %w\", g.Repo, ref, err)\n\t\t}\n\t}\n\treturn repoCacheDir, nil\n}\n\n\/\/ gitCmd runs git commands in a git repo.\ntype gitCmd struct {\n\t\/\/ Dir is the directory the commands are run in.\n\tDir string\n}\n\n\/\/ Run runs a git command.\n\/\/ Omit the 'git' part of the command.\nfunc (g *gitCmd) Run(args ...string) ([]byte, error) {\n\tp, err := findGit()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"no 'git' program on path: %w\", err)\n\t}\n\n\tcmd := exec.Command(p, args...)\n\tcmd.Dir = g.Dir\n\treturn util.RunCmdOut(cmd)\n}\n<commit_msg>Use StdEncoding for git hash directory name (#6071)<commit_after>\/\/ code modified from https:\/\/github.com\/GoogleContainerTools\/kpt\/blob\/master\/internal\/gitutil\/gitutil.go\n\n\/\/ Copyright 2019 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage git\n\nimport (\n\t\"crypto\/sha256\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/mitchellh\/go-homedir\"\n\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/config\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/constants\"\n\tlatestV1 \"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/schema\/latest\/v1\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/util\"\n)\n\n\/\/ SyncRepo syncs the target git repository with skaffold's local cache and returns the path to the repository root directory.\nvar SyncRepo = syncRepo\nvar findGit = func() (string, error) { return exec.LookPath(\"git\") }\n\n\/\/ defaultRef returns the default ref as \"master\" if master branch exists in\n\/\/ remote repository, falls back to \"main\" if master branch doesn't exist\nfunc defaultRef(repo string) (string, error) {\n\tmasterRef := \"master\"\n\tmainRef := \"main\"\n\tmasterExists, err := branchExists(repo, masterRef)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tmainExists, err := branchExists(repo, mainRef)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif masterExists {\n\t\treturn masterRef, nil\n\t} else if mainExists {\n\t\treturn mainRef, nil\n\t}\n\treturn \"\", fmt.Errorf(\"failed to get default branch for repo %s\", repo)\n}\n\n\/\/ BranchExists checks if branch is present in the input repo\nfunc branchExists(repo, branch string) (bool, error) {\n\tgitProgram, err := findGit()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tout, err := util.RunCmdOut(exec.Command(gitProgram, \"ls-remote\", repo, branch))\n\tif err != nil {\n\t\t\/\/ stdErr contains the error message for os related errors, git permission errors\n\t\t\/\/ and if repo doesn't exist\n\t\treturn false, fmt.Errorf(\"failed to lookup %s branch for repo %s: %w\", branch, repo, err)\n\t}\n\t\/\/ stdOut contains the branch information if the branch is present in remote repo\n\t\/\/ stdOut is empty if the repo doesn't have the input branch\n\tif strings.TrimSpace(string(out)) != \"\" {\n\t\treturn true, nil\n\t}\n\treturn false, nil\n}\n\n\/\/ getRepoDir returns the cache directory name for a remote repo\nfunc getRepoDir(g latestV1.GitInfo) (string, error) {\n\tinputs := []string{g.Repo, g.Ref}\n\thasher := sha256.New()\n\tenc := json.NewEncoder(hasher)\n\tif err := enc.Encode(inputs); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ UrlEncoding supports '-' as a 63rd character, which can cause dir name issues\n\treturn base64.StdEncoding.EncodeToString(hasher.Sum(nil))[:32], nil\n}\n\n\/\/ GetRepoCacheDir returns the directory for the remote git repo cache\nfunc GetRepoCacheDir(opts config.SkaffoldOptions) (string, error) {\n\tif opts.RepoCacheDir != \"\" {\n\t\treturn opts.RepoCacheDir, nil\n\t}\n\n\t\/\/ cache location unspecified, use ~\/.skaffold\/repos\n\thome, err := homedir.Dir()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"retrieving home directory: %w\", err)\n\t}\n\treturn filepath.Join(home, constants.DefaultSkaffoldDir, \"repos\"), nil\n}\n\nfunc syncRepo(g latestV1.GitInfo, opts config.SkaffoldOptions) (string, error) {\n\tskaffoldCacheDir, err := GetRepoCacheDir(opts)\n\tr := gitCmd{Dir: skaffoldCacheDir}\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to clone repo %s: %w\", g.Repo, err)\n\t}\n\tif err := os.MkdirAll(skaffoldCacheDir, 0700); err != nil {\n\t\treturn \"\", fmt.Errorf(\n\t\t\t\"failed to clone repo %s: trouble creating cache directory: %w\", g.Repo, err)\n\t}\n\n\tref := g.Ref\n\tif ref == \"\" {\n\t\tref, err = defaultRef(g.Repo)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to clone repo %s: trouble getting default branch: %w\", g.Repo, err)\n\t\t}\n\t}\n\n\thash, err := getRepoDir(g)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to clone git repo: unable to create directory name: %w\", err)\n\t}\n\trepoCacheDir := filepath.Join(skaffoldCacheDir, hash)\n\tif _, err := os.Stat(repoCacheDir); os.IsNotExist(err) {\n\t\tif _, err := r.Run(\"clone\", g.Repo, hash, \"--branch\", ref, \"--depth\", \"1\"); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to clone repo: %w\", err)\n\t\t}\n\t} else {\n\t\tr.Dir = repoCacheDir\n\t\t\/\/ check remote is defined\n\t\tif remotes, err := r.Run(\"remote\", \"-v\"); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to clone repo %s: trouble checking repository remote; run 'git clone <REPO>; stat <DIR\/SUBDIR>' to verify credentials: %w\", g.Repo, err)\n\t\t} else if len(remotes) == 0 {\n\t\t\treturn \"\", fmt.Errorf(\"failed to clone repo %s: remote not set for existing clone\", g.Repo)\n\t\t}\n\n\t\t\/\/ if sync property is false, then skip fetching latest from remote and resetting the branch.\n\t\tif g.Sync != nil && !*g.Sync {\n\t\t\treturn repoCacheDir, nil\n\t\t}\n\n\t\tif _, err = r.Run(\"fetch\", \"origin\", ref); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to clone repo %s: unable to find any matching refs %s; run 'git clone <REPO>; stat <DIR\/SUBDIR>' to verify credentials: %w\", g.Repo, ref, err)\n\t\t}\n\n\t\t\/\/ check if the downloaded repo has uncommitted changes.\n\t\tif changes, err := r.Run(\"diff\", \"--name-only\", \"--ignore-submodules\", \"HEAD\"); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to clone repo %s: unable to check for uncommitted changes; run 'git clone <REPO>; stat <DIR\/SUBDIR>' to verify credentials: %w\", g.Repo, err)\n\t\t} else if len(changes) > 0 {\n\t\t\treturn \"\", fmt.Errorf(\"failed to clone repo %s: there are uncommitted changes in the target directory %s; either set the repository `sync` property to false in the skaffold config, or manually commit and sync changes to remote, or revert the local changes\", g.Repo, repoCacheDir)\n\t\t}\n\n\t\t\/\/ check if the downloaded repo has unpushed commits.\n\t\tif changes, err := r.Run(\"diff\", \"--name-only\", \"--ignore-submodules\", fmt.Sprintf(\"origin\/%s...\", ref)); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to clone repo %s: unable to check for unpushed commits; run 'git clone <REPO>; stat <DIR\/SUBDIR>' to verify credentials: %w\", g.Repo, err)\n\t\t} else if len(changes) > 0 {\n\t\t\treturn \"\", fmt.Errorf(\"failed to clone repo %s: there are unpushed commits in the target directory %s; either set the repository `sync` property to false in the skaffold config, or manually push commits to remote, or reset the local commits\", g.Repo, repoCacheDir)\n\t\t}\n\n\t\t\/\/ reset the repo state\n\t\tif _, err := r.Run(\"reset\", \"--hard\", fmt.Sprintf(\"origin\/%s\", ref)); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"failed to clone repo %s: trouble resetting branch to origin\/%s; run 'git clone <REPO>; stat <DIR\/SUBDIR>' to verify credentials: %w\", g.Repo, ref, err)\n\t\t}\n\t}\n\treturn repoCacheDir, nil\n}\n\n\/\/ gitCmd runs git commands in a git repo.\ntype gitCmd struct {\n\t\/\/ Dir is the directory the commands are run in.\n\tDir string\n}\n\n\/\/ Run runs a git command.\n\/\/ Omit the 'git' part of the command.\nfunc (g *gitCmd) Run(args ...string) ([]byte, error) {\n\tp, err := findGit()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"no 'git' program on path: %w\", err)\n\t}\n\n\tcmd := exec.Command(p, args...)\n\tcmd.Dir = g.Dir\n\treturn util.RunCmdOut(cmd)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage proxy\n\nimport (\n\t\"bytes\"\n\t\"compress\/flate\"\n\t\"compress\/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"path\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/html\"\n\t\"golang.org\/x\/net\/html\/atom\"\n\t\"k8s.io\/klog\"\n\n\t\"k8s.io\/apimachinery\/pkg\/util\/net\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n)\n\n\/\/ atomsToAttrs states which attributes of which tags require URL substitution.\n\/\/ Sources: http:\/\/www.w3.org\/TR\/REC-html40\/index\/attributes.html\n\/\/ http:\/\/www.w3.org\/html\/wg\/drafts\/html\/master\/index.html#attributes-1\nvar atomsToAttrs = map[atom.Atom]sets.String{\n\tatom.A: sets.NewString(\"href\"),\n\tatom.Applet: sets.NewString(\"codebase\"),\n\tatom.Area: sets.NewString(\"href\"),\n\tatom.Audio: sets.NewString(\"src\"),\n\tatom.Base: sets.NewString(\"href\"),\n\tatom.Blockquote: sets.NewString(\"cite\"),\n\tatom.Body: sets.NewString(\"background\"),\n\tatom.Button: sets.NewString(\"formaction\"),\n\tatom.Command: sets.NewString(\"icon\"),\n\tatom.Del: sets.NewString(\"cite\"),\n\tatom.Embed: sets.NewString(\"src\"),\n\tatom.Form: sets.NewString(\"action\"),\n\tatom.Frame: sets.NewString(\"longdesc\", \"src\"),\n\tatom.Head: sets.NewString(\"profile\"),\n\tatom.Html: sets.NewString(\"manifest\"),\n\tatom.Iframe: sets.NewString(\"longdesc\", \"src\"),\n\tatom.Img: sets.NewString(\"longdesc\", \"src\", \"usemap\"),\n\tatom.Input: sets.NewString(\"src\", \"usemap\", \"formaction\"),\n\tatom.Ins: sets.NewString(\"cite\"),\n\tatom.Link: sets.NewString(\"href\"),\n\tatom.Object: sets.NewString(\"classid\", \"codebase\", \"data\", \"usemap\"),\n\tatom.Q: sets.NewString(\"cite\"),\n\tatom.Script: sets.NewString(\"src\"),\n\tatom.Source: sets.NewString(\"src\"),\n\tatom.Video: sets.NewString(\"poster\", \"src\"),\n\n\t\/\/ TODO: css URLs hidden in style elements.\n}\n\n\/\/ Transport is a transport for text\/html content that replaces URLs in html\n\/\/ content with the prefix of the proxy server\ntype Transport struct {\n\tScheme string\n\tHost string\n\tPathPrepend string\n\n\thttp.RoundTripper\n}\n\n\/\/ RoundTrip implements the http.RoundTripper interface\nfunc (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {\n\t\/\/ Add reverse proxy headers.\n\tforwardedURI := path.Join(t.PathPrepend, req.URL.Path)\n\tif strings.HasSuffix(req.URL.Path, \"\/\") {\n\t\tforwardedURI = forwardedURI + \"\/\"\n\t}\n\treq.Header.Set(\"X-Forwarded-Uri\", forwardedURI)\n\tif len(t.Host) > 0 {\n\t\treq.Header.Set(\"X-Forwarded-Host\", t.Host)\n\t}\n\tif len(t.Scheme) > 0 {\n\t\treq.Header.Set(\"X-Forwarded-Proto\", t.Scheme)\n\t}\n\n\trt := t.RoundTripper\n\tif rt == nil {\n\t\trt = http.DefaultTransport\n\t}\n\tresp, err := rt.RoundTrip(req)\n\n\tif err != nil {\n\t\tmessage := fmt.Sprintf(\"Error: '%s'\\nTrying to reach: '%v'\", err.Error(), req.URL.String())\n\t\tresp = &http.Response{\n\t\t\tStatusCode: http.StatusServiceUnavailable,\n\t\t\tBody: ioutil.NopCloser(strings.NewReader(message)),\n\t\t}\n\t\treturn resp, nil\n\t}\n\n\tif redirect := resp.Header.Get(\"Location\"); redirect != \"\" {\n\t\tresp.Header.Set(\"Location\", t.rewriteURL(redirect, req.URL, req.Host))\n\t\treturn resp, nil\n\t}\n\n\tcType := resp.Header.Get(\"Content-Type\")\n\tcType = strings.TrimSpace(strings.SplitN(cType, \";\", 2)[0])\n\tif cType != \"text\/html\" {\n\t\t\/\/ Do nothing, simply pass through\n\t\treturn resp, nil\n\t}\n\n\treturn t.rewriteResponse(req, resp)\n}\n\nvar _ = net.RoundTripperWrapper(&Transport{})\n\nfunc (rt *Transport) WrappedRoundTripper() http.RoundTripper {\n\treturn rt.RoundTripper\n}\n\n\/\/ rewriteURL rewrites a single URL to go through the proxy, if the URL refers\n\/\/ to the same host as sourceURL, which is the page on which the target URL\n\/\/ occurred, or if the URL matches the sourceRequestHost. If any error occurs (e.g.\n\/\/ parsing), it returns targetURL.\nfunc (t *Transport) rewriteURL(targetURL string, sourceURL *url.URL, sourceRequestHost string) string {\n\turl, err := url.Parse(targetURL)\n\tif err != nil {\n\t\treturn targetURL\n\t}\n\n\t\/\/ Example:\n\t\/\/ When API server processes a proxy request to a service (e.g. \/api\/v1\/namespace\/foo\/service\/bar\/proxy\/),\n\t\/\/ the sourceURL.Host (i.e. req.URL.Host) is the endpoint IP address of the service. The\n\t\/\/ sourceRequestHost (i.e. req.Host) is the Host header that specifies the host on which the\n\t\/\/ URL is sought, which can be different from sourceURL.Host. For example, if user sends the\n\t\/\/ request through \"kubectl proxy\" locally (i.e. localhost:8001\/api\/v1\/namespace\/foo\/service\/bar\/proxy\/),\n\t\/\/ sourceRequestHost is \"localhost:8001\".\n\t\/\/\n\t\/\/ If the service's response URL contains non-empty host, and url.Host is equal to either sourceURL.Host\n\t\/\/ or sourceRequestHost, we should not consider the returned URL to be a completely different host.\n\t\/\/ It's the API server's responsibility to rewrite a same-host-and-absolute-path URL and append the\n\t\/\/ necessary URL prefix (i.e. \/api\/v1\/namespace\/foo\/service\/bar\/proxy\/).\n\tisDifferentHost := url.Host != \"\" && url.Host != sourceURL.Host && url.Host != sourceRequestHost\n\tisRelative := !strings.HasPrefix(url.Path, \"\/\")\n\tif isDifferentHost || isRelative {\n\t\treturn targetURL\n\t}\n\n\t\/\/ Do not rewrite scheme and host if the Transport has empty scheme and host\n\t\/\/ when targetURL already contains the sourceRequestHost\n\tif !(url.Host == sourceRequestHost && t.Scheme == \"\" && t.Host == \"\") {\n\t\turl.Scheme = t.Scheme\n\t\turl.Host = t.Host\n\t}\n\n\torigPath := url.Path\n\t\/\/ Do not rewrite URL if the sourceURL already contains the necessary prefix.\n\tif strings.HasPrefix(url.Path, t.PathPrepend) {\n\t\treturn url.String()\n\t}\n\turl.Path = path.Join(t.PathPrepend, url.Path)\n\tif strings.HasSuffix(origPath, \"\/\") {\n\t\t\/\/ Add back the trailing slash, which was stripped by path.Join().\n\t\turl.Path += \"\/\"\n\t}\n\n\treturn url.String()\n}\n\n\/\/ rewriteHTML scans the HTML for tags with url-valued attributes, and updates\n\/\/ those values with the urlRewriter function. The updated HTML is output to the\n\/\/ writer.\nfunc rewriteHTML(reader io.Reader, writer io.Writer, urlRewriter func(string) string) error {\n\t\/\/ Note: This assumes the content is UTF-8.\n\ttokenizer := html.NewTokenizer(reader)\n\n\tvar err error\n\tfor err == nil {\n\t\ttokenType := tokenizer.Next()\n\t\tswitch tokenType {\n\t\tcase html.ErrorToken:\n\t\t\terr = tokenizer.Err()\n\t\tcase html.StartTagToken, html.SelfClosingTagToken:\n\t\t\ttoken := tokenizer.Token()\n\t\t\tif urlAttrs, ok := atomsToAttrs[token.DataAtom]; ok {\n\t\t\t\tfor i, attr := range token.Attr {\n\t\t\t\t\tif urlAttrs.Has(attr.Key) {\n\t\t\t\t\t\ttoken.Attr[i].Val = urlRewriter(attr.Val)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t_, err = writer.Write([]byte(token.String()))\n\t\tdefault:\n\t\t\t_, err = writer.Write(tokenizer.Raw())\n\t\t}\n\t}\n\tif err != io.EOF {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ rewriteResponse modifies an HTML response by updating absolute links referring\n\/\/ to the original host to instead refer to the proxy transport.\nfunc (t *Transport) rewriteResponse(req *http.Request, resp *http.Response) (*http.Response, error) {\n\torigBody := resp.Body\n\tdefer origBody.Close()\n\n\tnewContent := &bytes.Buffer{}\n\tvar reader io.Reader = origBody\n\tvar writer io.Writer = newContent\n\tencoding := resp.Header.Get(\"Content-Encoding\")\n\tswitch encoding {\n\tcase \"gzip\":\n\t\tvar err error\n\t\treader, err = gzip.NewReader(reader)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"errorf making gzip reader: %v\", err)\n\t\t}\n\t\tgzw := gzip.NewWriter(writer)\n\t\tdefer gzw.Close()\n\t\twriter = gzw\n\tcase \"deflate\":\n\t\tvar err error\n\t\treader = flate.NewReader(reader)\n\t\tflw, err := flate.NewWriter(writer, flate.BestCompression)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"errorf making flate writer: %v\", err)\n\t\t}\n\t\tdefer func() {\n\t\t\tflw.Close()\n\t\t\tflw.Flush()\n\t\t}()\n\t\twriter = flw\n\tcase \"\":\n\t\t\/\/ This is fine\n\tdefault:\n\t\t\/\/ Some encoding we don't understand-- don't try to parse this\n\t\tklog.Errorf(\"Proxy encountered encoding %v for text\/html; can't understand this so not fixing links.\", encoding)\n\t\treturn resp, nil\n\t}\n\n\turlRewriter := func(targetUrl string) string {\n\t\treturn t.rewriteURL(targetUrl, req.URL, req.Host)\n\t}\n\terr := rewriteHTML(reader, writer, urlRewriter)\n\tif err != nil {\n\t\tklog.Errorf(\"Failed to rewrite URLs: %v\", err)\n\t\treturn resp, err\n\t}\n\n\tresp.Body = ioutil.NopCloser(newContent)\n\t\/\/ Update header node with new content-length\n\t\/\/ TODO: Remove any hash\/signature headers here?\n\tresp.Header.Del(\"Content-Length\")\n\tresp.ContentLength = int64(newContent.Len())\n\n\treturn resp, err\n}\n<commit_msg>Avoid echoing request URL in proxy error<commit_after>\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage proxy\n\nimport (\n\t\"bytes\"\n\t\"compress\/flate\"\n\t\"compress\/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"path\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/html\"\n\t\"golang.org\/x\/net\/html\/atom\"\n\t\"k8s.io\/klog\"\n\n\t\"k8s.io\/apimachinery\/pkg\/util\/net\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n)\n\n\/\/ atomsToAttrs states which attributes of which tags require URL substitution.\n\/\/ Sources: http:\/\/www.w3.org\/TR\/REC-html40\/index\/attributes.html\n\/\/ http:\/\/www.w3.org\/html\/wg\/drafts\/html\/master\/index.html#attributes-1\nvar atomsToAttrs = map[atom.Atom]sets.String{\n\tatom.A: sets.NewString(\"href\"),\n\tatom.Applet: sets.NewString(\"codebase\"),\n\tatom.Area: sets.NewString(\"href\"),\n\tatom.Audio: sets.NewString(\"src\"),\n\tatom.Base: sets.NewString(\"href\"),\n\tatom.Blockquote: sets.NewString(\"cite\"),\n\tatom.Body: sets.NewString(\"background\"),\n\tatom.Button: sets.NewString(\"formaction\"),\n\tatom.Command: sets.NewString(\"icon\"),\n\tatom.Del: sets.NewString(\"cite\"),\n\tatom.Embed: sets.NewString(\"src\"),\n\tatom.Form: sets.NewString(\"action\"),\n\tatom.Frame: sets.NewString(\"longdesc\", \"src\"),\n\tatom.Head: sets.NewString(\"profile\"),\n\tatom.Html: sets.NewString(\"manifest\"),\n\tatom.Iframe: sets.NewString(\"longdesc\", \"src\"),\n\tatom.Img: sets.NewString(\"longdesc\", \"src\", \"usemap\"),\n\tatom.Input: sets.NewString(\"src\", \"usemap\", \"formaction\"),\n\tatom.Ins: sets.NewString(\"cite\"),\n\tatom.Link: sets.NewString(\"href\"),\n\tatom.Object: sets.NewString(\"classid\", \"codebase\", \"data\", \"usemap\"),\n\tatom.Q: sets.NewString(\"cite\"),\n\tatom.Script: sets.NewString(\"src\"),\n\tatom.Source: sets.NewString(\"src\"),\n\tatom.Video: sets.NewString(\"poster\", \"src\"),\n\n\t\/\/ TODO: css URLs hidden in style elements.\n}\n\n\/\/ Transport is a transport for text\/html content that replaces URLs in html\n\/\/ content with the prefix of the proxy server\ntype Transport struct {\n\tScheme string\n\tHost string\n\tPathPrepend string\n\n\thttp.RoundTripper\n}\n\n\/\/ RoundTrip implements the http.RoundTripper interface\nfunc (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {\n\t\/\/ Add reverse proxy headers.\n\tforwardedURI := path.Join(t.PathPrepend, req.URL.Path)\n\tif strings.HasSuffix(req.URL.Path, \"\/\") {\n\t\tforwardedURI = forwardedURI + \"\/\"\n\t}\n\treq.Header.Set(\"X-Forwarded-Uri\", forwardedURI)\n\tif len(t.Host) > 0 {\n\t\treq.Header.Set(\"X-Forwarded-Host\", t.Host)\n\t}\n\tif len(t.Scheme) > 0 {\n\t\treq.Header.Set(\"X-Forwarded-Proto\", t.Scheme)\n\t}\n\n\trt := t.RoundTripper\n\tif rt == nil {\n\t\trt = http.DefaultTransport\n\t}\n\tresp, err := rt.RoundTrip(req)\n\n\tif err != nil {\n\t\tmessage := fmt.Sprintf(\"Error trying to reach service: '%v'\", err.Error())\n\t\tresp = &http.Response{\n\t\t\tHeader: http.Header{},\n\t\t\tStatusCode: http.StatusServiceUnavailable,\n\t\t\tBody: ioutil.NopCloser(strings.NewReader(message)),\n\t\t}\n\t\tresp.Header.Set(\"Content-Type\", \"text\/plain; charset=utf-8\")\n\t\tresp.Header.Set(\"X-Content-Type-Options\", \"nosniff\")\n\t\treturn resp, nil\n\t}\n\n\tif redirect := resp.Header.Get(\"Location\"); redirect != \"\" {\n\t\tresp.Header.Set(\"Location\", t.rewriteURL(redirect, req.URL, req.Host))\n\t\treturn resp, nil\n\t}\n\n\tcType := resp.Header.Get(\"Content-Type\")\n\tcType = strings.TrimSpace(strings.SplitN(cType, \";\", 2)[0])\n\tif cType != \"text\/html\" {\n\t\t\/\/ Do nothing, simply pass through\n\t\treturn resp, nil\n\t}\n\n\treturn t.rewriteResponse(req, resp)\n}\n\nvar _ = net.RoundTripperWrapper(&Transport{})\n\nfunc (rt *Transport) WrappedRoundTripper() http.RoundTripper {\n\treturn rt.RoundTripper\n}\n\n\/\/ rewriteURL rewrites a single URL to go through the proxy, if the URL refers\n\/\/ to the same host as sourceURL, which is the page on which the target URL\n\/\/ occurred, or if the URL matches the sourceRequestHost. If any error occurs (e.g.\n\/\/ parsing), it returns targetURL.\nfunc (t *Transport) rewriteURL(targetURL string, sourceURL *url.URL, sourceRequestHost string) string {\n\turl, err := url.Parse(targetURL)\n\tif err != nil {\n\t\treturn targetURL\n\t}\n\n\t\/\/ Example:\n\t\/\/ When API server processes a proxy request to a service (e.g. \/api\/v1\/namespace\/foo\/service\/bar\/proxy\/),\n\t\/\/ the sourceURL.Host (i.e. req.URL.Host) is the endpoint IP address of the service. The\n\t\/\/ sourceRequestHost (i.e. req.Host) is the Host header that specifies the host on which the\n\t\/\/ URL is sought, which can be different from sourceURL.Host. For example, if user sends the\n\t\/\/ request through \"kubectl proxy\" locally (i.e. localhost:8001\/api\/v1\/namespace\/foo\/service\/bar\/proxy\/),\n\t\/\/ sourceRequestHost is \"localhost:8001\".\n\t\/\/\n\t\/\/ If the service's response URL contains non-empty host, and url.Host is equal to either sourceURL.Host\n\t\/\/ or sourceRequestHost, we should not consider the returned URL to be a completely different host.\n\t\/\/ It's the API server's responsibility to rewrite a same-host-and-absolute-path URL and append the\n\t\/\/ necessary URL prefix (i.e. \/api\/v1\/namespace\/foo\/service\/bar\/proxy\/).\n\tisDifferentHost := url.Host != \"\" && url.Host != sourceURL.Host && url.Host != sourceRequestHost\n\tisRelative := !strings.HasPrefix(url.Path, \"\/\")\n\tif isDifferentHost || isRelative {\n\t\treturn targetURL\n\t}\n\n\t\/\/ Do not rewrite scheme and host if the Transport has empty scheme and host\n\t\/\/ when targetURL already contains the sourceRequestHost\n\tif !(url.Host == sourceRequestHost && t.Scheme == \"\" && t.Host == \"\") {\n\t\turl.Scheme = t.Scheme\n\t\turl.Host = t.Host\n\t}\n\n\torigPath := url.Path\n\t\/\/ Do not rewrite URL if the sourceURL already contains the necessary prefix.\n\tif strings.HasPrefix(url.Path, t.PathPrepend) {\n\t\treturn url.String()\n\t}\n\turl.Path = path.Join(t.PathPrepend, url.Path)\n\tif strings.HasSuffix(origPath, \"\/\") {\n\t\t\/\/ Add back the trailing slash, which was stripped by path.Join().\n\t\turl.Path += \"\/\"\n\t}\n\n\treturn url.String()\n}\n\n\/\/ rewriteHTML scans the HTML for tags with url-valued attributes, and updates\n\/\/ those values with the urlRewriter function. The updated HTML is output to the\n\/\/ writer.\nfunc rewriteHTML(reader io.Reader, writer io.Writer, urlRewriter func(string) string) error {\n\t\/\/ Note: This assumes the content is UTF-8.\n\ttokenizer := html.NewTokenizer(reader)\n\n\tvar err error\n\tfor err == nil {\n\t\ttokenType := tokenizer.Next()\n\t\tswitch tokenType {\n\t\tcase html.ErrorToken:\n\t\t\terr = tokenizer.Err()\n\t\tcase html.StartTagToken, html.SelfClosingTagToken:\n\t\t\ttoken := tokenizer.Token()\n\t\t\tif urlAttrs, ok := atomsToAttrs[token.DataAtom]; ok {\n\t\t\t\tfor i, attr := range token.Attr {\n\t\t\t\t\tif urlAttrs.Has(attr.Key) {\n\t\t\t\t\t\ttoken.Attr[i].Val = urlRewriter(attr.Val)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t_, err = writer.Write([]byte(token.String()))\n\t\tdefault:\n\t\t\t_, err = writer.Write(tokenizer.Raw())\n\t\t}\n\t}\n\tif err != io.EOF {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ rewriteResponse modifies an HTML response by updating absolute links referring\n\/\/ to the original host to instead refer to the proxy transport.\nfunc (t *Transport) rewriteResponse(req *http.Request, resp *http.Response) (*http.Response, error) {\n\torigBody := resp.Body\n\tdefer origBody.Close()\n\n\tnewContent := &bytes.Buffer{}\n\tvar reader io.Reader = origBody\n\tvar writer io.Writer = newContent\n\tencoding := resp.Header.Get(\"Content-Encoding\")\n\tswitch encoding {\n\tcase \"gzip\":\n\t\tvar err error\n\t\treader, err = gzip.NewReader(reader)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"errorf making gzip reader: %v\", err)\n\t\t}\n\t\tgzw := gzip.NewWriter(writer)\n\t\tdefer gzw.Close()\n\t\twriter = gzw\n\tcase \"deflate\":\n\t\tvar err error\n\t\treader = flate.NewReader(reader)\n\t\tflw, err := flate.NewWriter(writer, flate.BestCompression)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"errorf making flate writer: %v\", err)\n\t\t}\n\t\tdefer func() {\n\t\t\tflw.Close()\n\t\t\tflw.Flush()\n\t\t}()\n\t\twriter = flw\n\tcase \"\":\n\t\t\/\/ This is fine\n\tdefault:\n\t\t\/\/ Some encoding we don't understand-- don't try to parse this\n\t\tklog.Errorf(\"Proxy encountered encoding %v for text\/html; can't understand this so not fixing links.\", encoding)\n\t\treturn resp, nil\n\t}\n\n\turlRewriter := func(targetUrl string) string {\n\t\treturn t.rewriteURL(targetUrl, req.URL, req.Host)\n\t}\n\terr := rewriteHTML(reader, writer, urlRewriter)\n\tif err != nil {\n\t\tklog.Errorf(\"Failed to rewrite URLs: %v\", err)\n\t\treturn resp, err\n\t}\n\n\tresp.Body = ioutil.NopCloser(newContent)\n\t\/\/ Update header node with new content-length\n\t\/\/ TODO: Remove any hash\/signature headers here?\n\tresp.Header.Del(\"Content-Length\")\n\tresp.ContentLength = int64(newContent.Len())\n\n\treturn resp, err\n}\n<|endoftext|>"} {"text":"<commit_before>package gen\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"path\/filepath\"\n\n\t\"github.com\/adamdecaf\/cert-manage\/pkg\/file\"\n\t\"github.com\/go-sqlite\/sqlite3\"\n)\n\nvar (\n\tchromeProfileLocations = []string{\n\t\tfilepath.Join(file.HomeDir(), `\/Library\/Application Support\/Google\/Chrome\/Default\/History`), \/\/ OSX not signed in\n\t\t\/\/ C:\\Users\\%USERNAME%\\AppData\\Local\\Google\\Chrome\\User Data\\Default\\Preferences \/\/ TODO\n\t}\n)\n\nfunc chrome() ([]*url.URL, error) {\n\thistory, err := findChromeHistoryFile()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn getChromeUrls(history)\n}\n\nfunc findChromeHistoryFile() (string, error) {\n\tfor i := range chromeProfileLocations {\n\t\tif file.Exists(chromeProfileLocations[i]) {\n\t\t\treturn chromeProfileLocations[i], nil\n\t\t}\n\t}\n\treturn \"\", errors.New(\"unable to find chrome History file\")\n}\n\nfunc getChromeUrls(placesPath string) ([]*url.URL, error) {\n\tdb, err := sqlite3.Open(placesPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar acc []*url.URL\n\terr = db.VisitTableRecords(\"urls\", func(rowId *int64, rec sqlite3.Record) error {\n\t\tif rowId == nil {\n\t\t\treturn fmt.Errorf(\"unexpected nil RowID in Chrome sqlite database\")\n\t\t}\n\n\t\tu, ok := rec.Values[1].(string)\n\t\tif !ok && debug {\n\t\t\tfmt.Printf(\"whitelist\/gen: (chrome) unknown rec.Values[1], %v\\n\", rec.Values[1])\n\t\t}\n\t\tparsed, err := url.Parse(u)\n\t\tif err == nil {\n\t\t\tacc = append(acc, parsed)\n\t\t}\n\t\tif err != nil && debug {\n\t\t\tfmt.Printf(\"whitelist\/gen: (chrome) error parsing %q, err=%v\\n\", u, err)\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn acc, nil\n}\n<commit_msg>whitelist\/gen: todo for chrome paths, needs testing<commit_after>package gen\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"path\/filepath\"\n\n\t\"github.com\/adamdecaf\/cert-manage\/pkg\/file\"\n\t\"github.com\/go-sqlite\/sqlite3\"\n)\n\nvar (\n\tchromeProfileLocations = []string{\n\t\tfilepath.Join(file.HomeDir(), `\/Library\/Application Support\/Google\/Chrome\/Default\/History`), \/\/ OSX not signed in\n\t\t\/\/ TODO(adam):\n\t\t\/\/ Linux: \/home\/$USER\/.config\/google-chrome\/\n\t\t\/\/ Linux: \/home\/$USER\/.config\/chromium\/\n\t\t\/\/ Windows Vista (and Win 7): C:\\Users\\[USERNAME]\\AppData\\Local\\Google\\Chrome\\\n\t\t\/\/ Windows XP: C:\\Documents and Settings\\[USERNAME]\\Local Settings\\Application Data\\Google\\Chrome\\\n\t\t\/\/ Win 8+: C:\\Users\\%USERNAME%\\AppData\\Local\\Google\\Chrome\\User Data\\Default\\Preferences\n\t}\n)\n\nfunc chrome() ([]*url.URL, error) {\n\thistory, err := findChromeHistoryFile()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn getChromeUrls(history)\n}\n\nfunc findChromeHistoryFile() (string, error) {\n\tfor i := range chromeProfileLocations {\n\t\tif file.Exists(chromeProfileLocations[i]) {\n\t\t\treturn chromeProfileLocations[i], nil\n\t\t}\n\t}\n\treturn \"\", errors.New(\"unable to find chrome History file\")\n}\n\nfunc getChromeUrls(placesPath string) ([]*url.URL, error) {\n\tdb, err := sqlite3.Open(placesPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar acc []*url.URL\n\terr = db.VisitTableRecords(\"urls\", func(rowId *int64, rec sqlite3.Record) error {\n\t\tif rowId == nil {\n\t\t\treturn fmt.Errorf(\"unexpected nil RowID in Chrome sqlite database\")\n\t\t}\n\n\t\tu, ok := rec.Values[1].(string)\n\t\tif !ok && debug {\n\t\t\tfmt.Printf(\"whitelist\/gen: (chrome) unknown rec.Values[1], %v\\n\", rec.Values[1])\n\t\t}\n\t\tparsed, err := url.Parse(u)\n\t\tif err == nil {\n\t\t\tacc = append(acc, parsed)\n\t\t}\n\t\tif err != nil && debug {\n\t\t\tfmt.Printf(\"whitelist\/gen: (chrome) error parsing %q, err=%v\\n\", u, err)\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn acc, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package local\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/errwrap\"\n\t\"github.com\/hashicorp\/go-multierror\"\n\t\"github.com\/hashicorp\/terraform\/backend\"\n\t\"github.com\/hashicorp\/terraform\/command\/clistate\"\n\t\"github.com\/hashicorp\/terraform\/config\/module\"\n\t\"github.com\/hashicorp\/terraform\/state\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n)\n\nfunc (b *Local) opApply(\n\tctx context.Context,\n\top *backend.Operation,\n\trunningOp *backend.RunningOperation) {\n\tlog.Printf(\"[INFO] backend\/local: starting Apply operation\")\n\n\t\/\/ If we have a nil module at this point, then set it to an empty tree\n\t\/\/ to avoid any potential crashes.\n\tif op.Plan == nil && op.Module == nil && !op.Destroy {\n\t\trunningOp.Err = fmt.Errorf(strings.TrimSpace(applyErrNoConfig))\n\t\treturn\n\t}\n\n\t\/\/ If we have a nil module at this point, then set it to an empty tree\n\t\/\/ to avoid any potential crashes.\n\tif op.Module == nil {\n\t\top.Module = module.NewEmptyTree()\n\t}\n\n\t\/\/ Setup our count hook that keeps track of resource changes\n\tcountHook := new(CountHook)\n\tstateHook := new(StateHook)\n\tif b.ContextOpts == nil {\n\t\tb.ContextOpts = new(terraform.ContextOpts)\n\t}\n\told := b.ContextOpts.Hooks\n\tdefer func() { b.ContextOpts.Hooks = old }()\n\tb.ContextOpts.Hooks = append(b.ContextOpts.Hooks, countHook, stateHook)\n\n\t\/\/ Get our context\n\ttfCtx, opState, err := b.context(op)\n\tif err != nil {\n\t\trunningOp.Err = err\n\t\treturn\n\t}\n\n\tif op.LockState {\n\t\tlockCtx, cancel := context.WithTimeout(ctx, op.StateLockTimeout)\n\t\tdefer cancel()\n\n\t\tlockInfo := state.NewLockInfo()\n\t\tlockInfo.Operation = op.Type.String()\n\t\tlockID, err := clistate.Lock(lockCtx, opState, lockInfo, b.CLI, b.Colorize())\n\t\tif err != nil {\n\t\t\trunningOp.Err = errwrap.Wrapf(\"Error locking state: {{err}}\", err)\n\t\t\treturn\n\t\t}\n\n\t\tdefer func() {\n\t\t\tif err := clistate.Unlock(opState, lockID, b.CLI, b.Colorize()); err != nil {\n\t\t\t\trunningOp.Err = multierror.Append(runningOp.Err, err)\n\t\t\t}\n\t\t}()\n\t}\n\n\t\/\/ Setup the state\n\trunningOp.State = tfCtx.State()\n\n\t\/\/ If we weren't given a plan, then we refresh\/plan\n\tif op.Plan == nil {\n\t\t\/\/ If we're refreshing before apply, perform that\n\t\tif op.PlanRefresh {\n\t\t\tlog.Printf(\"[INFO] backend\/local: apply calling Refresh\")\n\t\t\t_, err := tfCtx.Refresh()\n\t\t\tif err != nil {\n\t\t\t\trunningOp.Err = errwrap.Wrapf(\"Error refreshing state: {{err}}\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Perform the plan\n\t\tlog.Printf(\"[INFO] backend\/local: apply calling Plan\")\n\t\tif _, err := tfCtx.Plan(); err != nil {\n\t\t\trunningOp.Err = errwrap.Wrapf(\"Error running plan: {{err}}\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Setup our hook for continuous state updates\n\tstateHook.State = opState\n\n\t\/\/ Start the apply in a goroutine so that we can be interrupted.\n\tvar applyState *terraform.State\n\tvar applyErr error\n\tdoneCh := make(chan struct{})\n\tgo func() {\n\t\tdefer close(doneCh)\n\t\t_, applyErr = tfCtx.Apply()\n\t\t\/\/ we always want the state, even if apply failed\n\t\tapplyState = tfCtx.State()\n\n\t\t\/*\n\t\t\t\/\/ Record any shadow errors for later\n\t\t\tif err := ctx.ShadowError(); err != nil {\n\t\t\t\tshadowErr = multierror.Append(shadowErr, multierror.Prefix(\n\t\t\t\t\terr, \"apply operation:\"))\n\t\t\t}\n\t\t*\/\n\t}()\n\n\t\/\/ Wait for the apply to finish or for us to be interrupted so\n\t\/\/ we can handle it properly.\n\terr = nil\n\tselect {\n\tcase <-ctx.Done():\n\t\tif b.CLI != nil {\n\t\t\tb.CLI.Output(\"Interrupt received. Gracefully shutting down...\")\n\t\t}\n\n\t\t\/\/ Stop execution\n\t\tgo tfCtx.Stop()\n\n\t\t\/\/ Wait for completion still\n\t\t<-doneCh\n\tcase <-doneCh:\n\t}\n\n\t\/\/ Store the final state\n\trunningOp.State = applyState\n\n\t\/\/ Persist the state\n\tif err := opState.WriteState(applyState); err != nil {\n\t\trunningOp.Err = fmt.Errorf(\"Failed to save state: %s\", err)\n\t\treturn\n\t}\n\tif err := opState.PersistState(); err != nil {\n\t\trunningOp.Err = fmt.Errorf(\"Failed to save state: %s\", err)\n\t\treturn\n\t}\n\n\tif applyErr != nil {\n\t\trunningOp.Err = fmt.Errorf(\n\t\t\t\"Error applying plan:\\n\\n\"+\n\t\t\t\t\"%s\\n\\n\"+\n\t\t\t\t\"Terraform does not automatically rollback in the face of errors.\\n\"+\n\t\t\t\t\"Instead, your Terraform state file has been partially updated with\\n\"+\n\t\t\t\t\"any resources that successfully completed. Please address the error\\n\"+\n\t\t\t\t\"above and apply again to incrementally change your infrastructure.\",\n\t\t\tmultierror.Flatten(applyErr))\n\t\treturn\n\t}\n\n\t\/\/ If we have a UI, output the results\n\tif b.CLI != nil {\n\t\tif op.Destroy {\n\t\t\tb.CLI.Output(b.Colorize().Color(fmt.Sprintf(\n\t\t\t\t\"[reset][bold][green]\\n\"+\n\t\t\t\t\t\"Destroy complete! Resources: %d destroyed.\",\n\t\t\t\tcountHook.Removed)))\n\t\t} else {\n\t\t\tb.CLI.Output(b.Colorize().Color(fmt.Sprintf(\n\t\t\t\t\"[reset][bold][green]\\n\"+\n\t\t\t\t\t\"Apply complete! Resources: %d added, %d changed, %d destroyed.\",\n\t\t\t\tcountHook.Added,\n\t\t\t\tcountHook.Changed,\n\t\t\t\tcountHook.Removed)))\n\t\t}\n\n\t\tif countHook.Added > 0 || countHook.Changed > 0 {\n\t\t\tb.CLI.Output(b.Colorize().Color(fmt.Sprintf(\n\t\t\t\t\"[reset]\\n\"+\n\t\t\t\t\t\"The state of your infrastructure has been saved to the path\\n\"+\n\t\t\t\t\t\"below. This state is required to modify and destroy your\\n\"+\n\t\t\t\t\t\"infrastructure, so keep it safe. To inspect the complete state\\n\"+\n\t\t\t\t\t\"use the `terraform show` command.\\n\\n\"+\n\t\t\t\t\t\"State path: %s\",\n\t\t\t\tb.StateOutPath)))\n\t\t}\n\t}\n}\n\nconst applyErrNoConfig = `\nNo configuration files found!\n\nApply requires configuration to be present. Applying without a configuration\nwould mark everything for destruction, which is normally not what is desired.\nIf you would like to destroy everything, please run 'terraform destroy' instead\nwhich does not require any configuration files.\n`\n<commit_msg>remove redundant output when interrupting apply<commit_after>package local\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/errwrap\"\n\t\"github.com\/hashicorp\/go-multierror\"\n\t\"github.com\/hashicorp\/terraform\/backend\"\n\t\"github.com\/hashicorp\/terraform\/command\/clistate\"\n\t\"github.com\/hashicorp\/terraform\/config\/module\"\n\t\"github.com\/hashicorp\/terraform\/state\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n)\n\nfunc (b *Local) opApply(\n\tctx context.Context,\n\top *backend.Operation,\n\trunningOp *backend.RunningOperation) {\n\tlog.Printf(\"[INFO] backend\/local: starting Apply operation\")\n\n\t\/\/ If we have a nil module at this point, then set it to an empty tree\n\t\/\/ to avoid any potential crashes.\n\tif op.Plan == nil && op.Module == nil && !op.Destroy {\n\t\trunningOp.Err = fmt.Errorf(strings.TrimSpace(applyErrNoConfig))\n\t\treturn\n\t}\n\n\t\/\/ If we have a nil module at this point, then set it to an empty tree\n\t\/\/ to avoid any potential crashes.\n\tif op.Module == nil {\n\t\top.Module = module.NewEmptyTree()\n\t}\n\n\t\/\/ Setup our count hook that keeps track of resource changes\n\tcountHook := new(CountHook)\n\tstateHook := new(StateHook)\n\tif b.ContextOpts == nil {\n\t\tb.ContextOpts = new(terraform.ContextOpts)\n\t}\n\told := b.ContextOpts.Hooks\n\tdefer func() { b.ContextOpts.Hooks = old }()\n\tb.ContextOpts.Hooks = append(b.ContextOpts.Hooks, countHook, stateHook)\n\n\t\/\/ Get our context\n\ttfCtx, opState, err := b.context(op)\n\tif err != nil {\n\t\trunningOp.Err = err\n\t\treturn\n\t}\n\n\tif op.LockState {\n\t\tlockCtx, cancel := context.WithTimeout(ctx, op.StateLockTimeout)\n\t\tdefer cancel()\n\n\t\tlockInfo := state.NewLockInfo()\n\t\tlockInfo.Operation = op.Type.String()\n\t\tlockID, err := clistate.Lock(lockCtx, opState, lockInfo, b.CLI, b.Colorize())\n\t\tif err != nil {\n\t\t\trunningOp.Err = errwrap.Wrapf(\"Error locking state: {{err}}\", err)\n\t\t\treturn\n\t\t}\n\n\t\tdefer func() {\n\t\t\tif err := clistate.Unlock(opState, lockID, b.CLI, b.Colorize()); err != nil {\n\t\t\t\trunningOp.Err = multierror.Append(runningOp.Err, err)\n\t\t\t}\n\t\t}()\n\t}\n\n\t\/\/ Setup the state\n\trunningOp.State = tfCtx.State()\n\n\t\/\/ If we weren't given a plan, then we refresh\/plan\n\tif op.Plan == nil {\n\t\t\/\/ If we're refreshing before apply, perform that\n\t\tif op.PlanRefresh {\n\t\t\tlog.Printf(\"[INFO] backend\/local: apply calling Refresh\")\n\t\t\t_, err := tfCtx.Refresh()\n\t\t\tif err != nil {\n\t\t\t\trunningOp.Err = errwrap.Wrapf(\"Error refreshing state: {{err}}\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Perform the plan\n\t\tlog.Printf(\"[INFO] backend\/local: apply calling Plan\")\n\t\tif _, err := tfCtx.Plan(); err != nil {\n\t\t\trunningOp.Err = errwrap.Wrapf(\"Error running plan: {{err}}\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Setup our hook for continuous state updates\n\tstateHook.State = opState\n\n\t\/\/ Start the apply in a goroutine so that we can be interrupted.\n\tvar applyState *terraform.State\n\tvar applyErr error\n\tdoneCh := make(chan struct{})\n\tgo func() {\n\t\tdefer close(doneCh)\n\t\t_, applyErr = tfCtx.Apply()\n\t\t\/\/ we always want the state, even if apply failed\n\t\tapplyState = tfCtx.State()\n\n\t\t\/*\n\t\t\t\/\/ Record any shadow errors for later\n\t\t\tif err := ctx.ShadowError(); err != nil {\n\t\t\t\tshadowErr = multierror.Append(shadowErr, multierror.Prefix(\n\t\t\t\t\terr, \"apply operation:\"))\n\t\t\t}\n\t\t*\/\n\t}()\n\n\t\/\/ Wait for the apply to finish or for us to be interrupted so\n\t\/\/ we can handle it properly.\n\terr = nil\n\tselect {\n\tcase <-ctx.Done():\n\t\tif b.CLI != nil {\n\t\t\tb.CLI.Output(\"stopping apply operation...\")\n\t\t}\n\n\t\t\/\/ Stop execution\n\t\tgo tfCtx.Stop()\n\n\t\t\/\/ Wait for completion still\n\t\t<-doneCh\n\tcase <-doneCh:\n\t}\n\n\t\/\/ Store the final state\n\trunningOp.State = applyState\n\n\t\/\/ Persist the state\n\tif err := opState.WriteState(applyState); err != nil {\n\t\trunningOp.Err = fmt.Errorf(\"Failed to save state: %s\", err)\n\t\treturn\n\t}\n\tif err := opState.PersistState(); err != nil {\n\t\trunningOp.Err = fmt.Errorf(\"Failed to save state: %s\", err)\n\t\treturn\n\t}\n\n\tif applyErr != nil {\n\t\trunningOp.Err = fmt.Errorf(\n\t\t\t\"Error applying plan:\\n\\n\"+\n\t\t\t\t\"%s\\n\\n\"+\n\t\t\t\t\"Terraform does not automatically rollback in the face of errors.\\n\"+\n\t\t\t\t\"Instead, your Terraform state file has been partially updated with\\n\"+\n\t\t\t\t\"any resources that successfully completed. Please address the error\\n\"+\n\t\t\t\t\"above and apply again to incrementally change your infrastructure.\",\n\t\t\tmultierror.Flatten(applyErr))\n\t\treturn\n\t}\n\n\t\/\/ If we have a UI, output the results\n\tif b.CLI != nil {\n\t\tif op.Destroy {\n\t\t\tb.CLI.Output(b.Colorize().Color(fmt.Sprintf(\n\t\t\t\t\"[reset][bold][green]\\n\"+\n\t\t\t\t\t\"Destroy complete! Resources: %d destroyed.\",\n\t\t\t\tcountHook.Removed)))\n\t\t} else {\n\t\t\tb.CLI.Output(b.Colorize().Color(fmt.Sprintf(\n\t\t\t\t\"[reset][bold][green]\\n\"+\n\t\t\t\t\t\"Apply complete! Resources: %d added, %d changed, %d destroyed.\",\n\t\t\t\tcountHook.Added,\n\t\t\t\tcountHook.Changed,\n\t\t\t\tcountHook.Removed)))\n\t\t}\n\n\t\tif countHook.Added > 0 || countHook.Changed > 0 {\n\t\t\tb.CLI.Output(b.Colorize().Color(fmt.Sprintf(\n\t\t\t\t\"[reset]\\n\"+\n\t\t\t\t\t\"The state of your infrastructure has been saved to the path\\n\"+\n\t\t\t\t\t\"below. This state is required to modify and destroy your\\n\"+\n\t\t\t\t\t\"infrastructure, so keep it safe. To inspect the complete state\\n\"+\n\t\t\t\t\t\"use the `terraform show` command.\\n\\n\"+\n\t\t\t\t\t\"State path: %s\",\n\t\t\t\tb.StateOutPath)))\n\t\t}\n\t}\n}\n\nconst applyErrNoConfig = `\nNo configuration files found!\n\nApply requires configuration to be present. Applying without a configuration\nwould mark everything for destruction, which is normally not what is desired.\nIf you would like to destroy everything, please run 'terraform destroy' instead\nwhich does not require any configuration files.\n`\n<|endoftext|>"} {"text":"<commit_before>package internal\n\nimport \"go.uber.org\/yarpc\"\n\n\/\/ RemoveVariableHeaderKeys removes any headers that might have been added by tracing\nfunc RemoveVariableHeaderKeys(headers yarpc.Headers) yarpc.Headers {\n\theaders.Del(\"$tracing$uber-trace-id\")\n\tif headers.Len() == 0 {\n\t\treturn yarpc.NewHeaders()\n\t}\n\treturn headers\n}\n\n\/\/ RemoveVariableMapKeys removes any headers that might have been added by tracing\nfunc RemoveVariableMapKeys(headers map[string]string) map[string]string {\n\tdelete(headers, \"$tracing$uber-trace-id\")\n\treturn headers\n}\n<commit_msg>Update licenses (#421)<commit_after>\/\/ Copyright (c) 2016 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage internal\n\nimport \"go.uber.org\/yarpc\"\n\n\/\/ RemoveVariableHeaderKeys removes any headers that might have been added by tracing\nfunc RemoveVariableHeaderKeys(headers yarpc.Headers) yarpc.Headers {\n\theaders.Del(\"$tracing$uber-trace-id\")\n\tif headers.Len() == 0 {\n\t\treturn yarpc.NewHeaders()\n\t}\n\treturn headers\n}\n\n\/\/ RemoveVariableMapKeys removes any headers that might have been added by tracing\nfunc RemoveVariableMapKeys(headers map[string]string) map[string]string {\n\tdelete(headers, \"$tracing$uber-trace-id\")\n\treturn headers\n}\n<|endoftext|>"} {"text":"<commit_before>package merkle\n\nimport (\n\t\"math\/bits\"\n)\n\n\/\/ SimpleHashFromByteSlices computes a Merkle tree where the leaves are the byte slice,\n\/\/ in the provided order.\nfunc SimpleHashFromByteSlices(items [][]byte) []byte {\n\tswitch len(items) {\n\tcase 0:\n\t\treturn nil\n\tcase 1:\n\t\treturn leafHash(items[0])\n\tdefault:\n\t\tk := getSplitPoint(len(items))\n\t\tleft := SimpleHashFromByteSlices(items[:k])\n\t\tright := SimpleHashFromByteSlices(items[k:])\n\t\treturn innerHash(left, right)\n\t}\n}\n\n\/\/ SimpleHashFromMap computes a Merkle tree from sorted map.\n\/\/ Like calling SimpleHashFromHashers with\n\/\/ `item = []byte(Hash(key) | Hash(value))`,\n\/\/ sorted by `item`.\nfunc SimpleHashFromMap(m map[string][]byte) []byte {\n\tsm := newSimpleMap()\n\tfor k, v := range m {\n\t\tsm.Set(k, v)\n\t}\n\treturn sm.Hash()\n}\n\nfunc getSplitPoint(length int) int {\n\tif length < 1 {\n\t\tpanic(\"Trying to split a tree with size < 1\")\n\t}\n\tuLength := uint(length)\n\tbitlen := bits.Len(uLength)\n\tk := 1 << uint(bitlen-1)\n\tif k == length {\n\t\tk >>= 1\n\t}\n\treturn k\n}\n<commit_msg>Add comment to simple_merkle get_split_point (#3136)<commit_after>package merkle\n\nimport (\n\t\"math\/bits\"\n)\n\n\/\/ SimpleHashFromByteSlices computes a Merkle tree where the leaves are the byte slice,\n\/\/ in the provided order.\nfunc SimpleHashFromByteSlices(items [][]byte) []byte {\n\tswitch len(items) {\n\tcase 0:\n\t\treturn nil\n\tcase 1:\n\t\treturn leafHash(items[0])\n\tdefault:\n\t\tk := getSplitPoint(len(items))\n\t\tleft := SimpleHashFromByteSlices(items[:k])\n\t\tright := SimpleHashFromByteSlices(items[k:])\n\t\treturn innerHash(left, right)\n\t}\n}\n\n\/\/ SimpleHashFromMap computes a Merkle tree from sorted map.\n\/\/ Like calling SimpleHashFromHashers with\n\/\/ `item = []byte(Hash(key) | Hash(value))`,\n\/\/ sorted by `item`.\nfunc SimpleHashFromMap(m map[string][]byte) []byte {\n\tsm := newSimpleMap()\n\tfor k, v := range m {\n\t\tsm.Set(k, v)\n\t}\n\treturn sm.Hash()\n}\n\n\/\/ getSplitPoint returns the largest power of 2 less than length\nfunc getSplitPoint(length int) int {\n\tif length < 1 {\n\t\tpanic(\"Trying to split a tree with size < 1\")\n\t}\n\tuLength := uint(length)\n\tbitlen := bits.Len(uLength)\n\tk := 1 << uint(bitlen-1)\n\tif k == length {\n\t\tk >>= 1\n\t}\n\treturn k\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ This file was auto-generated by the veyron vdl tool.\n\/\/ Source: config.vdl\n\n\/\/ Package vdl describes the configuration for the vdl tool.\npackage vdl\n\nimport (\n\t\/\/ The non-user imports are prefixed with \"__\" to prevent collisions.\n\t__vdl \"veyron.io\/veyron\/veyron2\/vdl\"\n)\n\n\/\/ Config specifies the configuration for the vdl tool. This is typically\n\/\/ represented in optional \"vdl.config\" files in each vdl source package. Each\n\/\/ \"vdl.config\" file implicitly imports this package. E.g. you may refer to\n\/\/ vdl.Go in the \"vdl.config\" file without explicitly importing \"vdl\".\ntype Config struct {\n\t\/\/ GenLanguages restricts the set of code generation languages. If the set is\n\t\/\/ empty, all supported languages are allowed to be generated.\n\tGenLanguages map[GenLanguage]struct{}\n\t\/\/ Language-specific configurations.\n\tGo GoConfig\n\tJava JavaConfig\n\tJavascript JavascriptConfig\n}\n\nfunc (Config) __VDLReflect(struct {\n\tName string \"vdl.Config\"\n}) {\n}\n\n\/\/ GenLanguage enumerates the known code generation languages.\ntype GenLanguage int\n\nconst (\n\tGenLanguageGo GenLanguage = iota\n\tGenLanguageJava\n\tGenLanguageJavascript\n)\n\n\/\/ GenLanguageAll holds all labels for GenLanguage.\nvar GenLanguageAll = []GenLanguage{GenLanguageGo, GenLanguageJava, GenLanguageJavascript}\n\n\/\/ GenLanguageFromString creates a GenLanguage from a string label.\n\/\/ Returns true iff the label is valid.\nfunc GenLanguageFromString(label string) (x GenLanguage, ok bool) {\n\tok = x.Assign(label)\n\treturn\n}\n\n\/\/ Assign assigns label to x.\n\/\/ Returns true iff the label is valid.\nfunc (x *GenLanguage) Assign(label string) bool {\n\tswitch label {\n\tcase \"Go\":\n\t\t*x = GenLanguageGo\n\t\treturn true\n\tcase \"Java\":\n\t\t*x = GenLanguageJava\n\t\treturn true\n\tcase \"Javascript\":\n\t\t*x = GenLanguageJavascript\n\t\treturn true\n\t}\n\t*x = -1\n\treturn false\n}\n\n\/\/ String returns the string label of x.\nfunc (x GenLanguage) String() string {\n\tswitch x {\n\tcase GenLanguageGo:\n\t\treturn \"Go\"\n\tcase GenLanguageJava:\n\t\treturn \"Java\"\n\tcase GenLanguageJavascript:\n\t\treturn \"Javascript\"\n\t}\n\treturn \"\"\n}\n\nfunc (GenLanguage) __VDLReflect(struct {\n\tName string \"vdl.GenLanguage\"\n\tEnum struct{ Go, Java, Javascript string }\n}) {\n}\n\n\/\/ GoConfig specifies go specific configuration.\ntype GoConfig struct {\n\t\/\/ NoFmt disables gofmt formatting on the generated source.\n\tNoFmt bool\n}\n\nfunc (GoConfig) __VDLReflect(struct {\n\tName string \"vdl.GoConfig\"\n}) {\n}\n\n\/\/ JavaConfig specifies java specific configuration.\ntype JavaConfig struct {\n}\n\nfunc (JavaConfig) __VDLReflect(struct {\n\tName string \"vdl.JavaConfig\"\n}) {\n}\n\n\/\/ JavascriptConfig specifies javascript specific configuration.\ntype JavascriptConfig struct {\n}\n\nfunc (JavascriptConfig) __VDLReflect(struct {\n\tName string \"vdl.JavascriptConfig\"\n}) {\n}\n\nfunc init() {\n\t__vdl.Register(Config{})\n\t__vdl.Register(GenLanguageGo)\n\t__vdl.Register(GoConfig{})\n\t__vdl.Register(JavaConfig{})\n\t__vdl.Register(JavascriptConfig{})\n}\n<commit_msg>veyron\/runtimes\/google\/ipc: Failed name resolution should be retryable.<commit_after>\/\/ This file was auto-generated by the veyron vdl tool.\n\/\/ Source: config.vdl\n\n\/\/ Package vdl describes the configuration for the vdl tool.\npackage vdl\n\nimport (\n\t\/\/ The non-user imports are prefixed with \"__\" to prevent collisions.\n\t__vdl \"veyron.io\/veyron\/veyron2\/vdl\"\n)\n\n\/\/ Config specifies the configuration for the vdl tool. This is typically\n\/\/ represented in optional \"vdl.config\" files in each vdl source package. Each\n\/\/ \"vdl.config\" file implicitly imports this package. E.g. you may refer to\n\/\/ vdl.Go in the \"vdl.config\" file without explicitly importing \"vdl\".\ntype Config struct {\n\t\/\/ GenLanguages restricts the set of code generation languages. If the set is\n\t\/\/ empty, all supported languages are allowed to be generated.\n\tGenLanguages map[GenLanguage]struct{}\n\t\/\/ Language-specific configurations.\n\tGo GoConfig\n\tJava JavaConfig\n\tJavascript JavascriptConfig\n}\n\nfunc (Config) __VDLReflect(struct {\n\tName string \"veyron.io\/veyron\/veyron2\/vdl\/vdlroot\/src\/vdl.Config\"\n}) {\n}\n\n\/\/ GenLanguage enumerates the known code generation languages.\ntype GenLanguage int\n\nconst (\n\tGenLanguageGo GenLanguage = iota\n\tGenLanguageJava\n\tGenLanguageJavascript\n)\n\n\/\/ GenLanguageAll holds all labels for GenLanguage.\nvar GenLanguageAll = []GenLanguage{GenLanguageGo, GenLanguageJava, GenLanguageJavascript}\n\n\/\/ GenLanguageFromString creates a GenLanguage from a string label.\n\/\/ Returns true iff the label is valid.\nfunc GenLanguageFromString(label string) (x GenLanguage, ok bool) {\n\tok = x.Assign(label)\n\treturn\n}\n\n\/\/ Assign assigns label to x.\n\/\/ Returns true iff the label is valid.\nfunc (x *GenLanguage) Assign(label string) bool {\n\tswitch label {\n\tcase \"Go\":\n\t\t*x = GenLanguageGo\n\t\treturn true\n\tcase \"Java\":\n\t\t*x = GenLanguageJava\n\t\treturn true\n\tcase \"Javascript\":\n\t\t*x = GenLanguageJavascript\n\t\treturn true\n\t}\n\t*x = -1\n\treturn false\n}\n\n\/\/ String returns the string label of x.\nfunc (x GenLanguage) String() string {\n\tswitch x {\n\tcase GenLanguageGo:\n\t\treturn \"Go\"\n\tcase GenLanguageJava:\n\t\treturn \"Java\"\n\tcase GenLanguageJavascript:\n\t\treturn \"Javascript\"\n\t}\n\treturn \"\"\n}\n\nfunc (GenLanguage) __VDLReflect(struct {\n\tName string \"veyron.io\/veyron\/veyron2\/vdl\/vdlroot\/src\/vdl.GenLanguage\"\n\tEnum struct{ Go, Java, Javascript string }\n}) {\n}\n\n\/\/ GoConfig specifies go specific configuration.\ntype GoConfig struct {\n\t\/\/ NoFmt disables gofmt formatting on the generated source.\n\tNoFmt bool\n}\n\nfunc (GoConfig) __VDLReflect(struct {\n\tName string \"veyron.io\/veyron\/veyron2\/vdl\/vdlroot\/src\/vdl.GoConfig\"\n}) {\n}\n\n\/\/ JavaConfig specifies java specific configuration.\ntype JavaConfig struct {\n}\n\nfunc (JavaConfig) __VDLReflect(struct {\n\tName string \"veyron.io\/veyron\/veyron2\/vdl\/vdlroot\/src\/vdl.JavaConfig\"\n}) {\n}\n\n\/\/ JavascriptConfig specifies javascript specific configuration.\ntype JavascriptConfig struct {\n}\n\nfunc (JavascriptConfig) __VDLReflect(struct {\n\tName string \"veyron.io\/veyron\/veyron2\/vdl\/vdlroot\/src\/vdl.JavascriptConfig\"\n}) {\n}\n\nfunc init() {\n\t__vdl.Register(Config{})\n\t__vdl.Register(GenLanguageGo)\n\t__vdl.Register(GoConfig{})\n\t__vdl.Register(JavaConfig{})\n\t__vdl.Register(JavascriptConfig{})\n}\n<|endoftext|>"} {"text":"<commit_before>package dbhandler\n\nimport \"log\"\n\n\/*\n Add a Student to a team\n*\/\nfunc TeamAddMember(team_id int64, student_id int64) bool {\n\tlog.Print(\"# Add Team member\")\n\tlog.Printf(\"Team ID = %d, Student ID = %d\", team_id, student_id)\n\n\tdb := getDBConn()\n\n\t\/\/TODO: Make sure the Team and Student belong to the same School\n\n\tstmt, err := db.Prepare(\"INSERT INTO Student_Team(team_id, student_id) \" +\n\t\t\"VALUES($1, $2)\")\n\tdefer stmt.Close()\n\n\tif err != nil {\n\t\tlog.Print(\"Error creating prepared statement\")\n\t\tlog.Panic(err)\n\t}\n\n\tresult, err := stmt.Exec(team_id, student_id)\n\n\tif err != nil {\n\t\tif isForeignKeyError(err) {\n\t\t\treturn false\n\t\t}\n\n\t\tif isDuplicateKeyError(err) {\n\t\t\t\/\/if the entry already exists\n\t\t\treturn false\n\t\t}\n\n\t\tlog.Panic(err)\n\t}\n\n\taffectedRows, err := result.RowsAffected()\n\tif affectedRows != 1 {\n\t\tlog.Panic(\"Unexpected number of inserts\")\n\t}\n\n\treturn true\n}\n\nfunc TeamDeleteMember(team_id int64, student_id int64) bool {\n\tlog.Print(\"# Remove Team Member\")\n\tlog.Printf(\"Team ID = %d, Student ID = %d\", team_id, student_id)\n\n\tdb := getDBConn()\n\n\tdelete_stmt, err := db.Prepare(\"DELETE FROM Student_Team WHERE team_id = $1 AND student_id = $2\")\n\tdefer delete_stmt.Close()\n\n\tif err != nil {\n\t\tlog.Print(\"Error creating prepared statement\")\n\t\tlog.Panic(err)\n\t}\n\n\tresult, err := delete_stmt.Exec(team_id, student_id)\n\n\tif err != nil {\n\t\tlog.Print(\"Delete Failed\")\n\n\t\tif isForeignKeyError(err) {\n\t\t\treturn false\n\t\t}\n\n\t\tlog.Panic(err)\n\t}\n\n\taffectedRows, err := result.RowsAffected()\n\tif affectedRows != 1 {\n\t\tlog.Panic(\"Unexpected number of deletion\")\n\t}\n\n\treturn true\n}\n\nfunc TeamReadMembers(team_id int64) []int64 {\n\tlog.Print(\"# Reading Team members\")\n\tlog.Printf(\"Team ID = %d\", team_id)\n\n\tdb := getDBConn()\n\n\tstmt, err := db.Prepare(\"SELECT student_id \" +\n\t\t\"FROM Student_Team WHERE team_id = $1\")\n\tdefer stmt.Close()\n\n\tif err != nil {\n\t\tlog.Print(\"Error creating prepared statement\")\n\t\tlog.Panic(err)\n\t}\n\n\tcrsr, err := stmt.Query(team_id)\n\n\tif err != nil {\n\t\tlog.Print(\"Error getting team data\")\n\t\tlog.Panic(err)\n\t}\n\n\tstudent_ids := make([]int64, 0, MAX_STUDENT_PER_TEAM)\n\tvar s_id int64\n\tfor crsr.Next() {\n\t\tcrsr.Scan(&s_id)\n\t\tstudent_ids = append(student_ids, s_id)\n\t}\n\n\treturn student_ids\n}\n<commit_msg>Make sure the Team and Student belong to the same School<commit_after>package dbhandler\n\nimport \"log\"\n\n\/*\n Add a Student to a team\n*\/\nfunc TeamAddMember(team_id int64, student_id int64) bool {\n\tlog.Printf(\"Add Team member : Team ID = %d, Student ID = %d\", team_id, student_id)\n\n\tdb := getDBConn()\n\n\t\/\/Make sure the Team and Student belong to the same School\n\tteam := TeamRead(team_id)\n\tstudent := StudentRead(student_id)\n\n\tif team.SchoolID != student.SchoolID {\n\t\tlog.Print(\"Team and Student are not from same School\")\n\t\treturn false\n\t}\n\n\tstmt, err := db.Prepare(\"INSERT INTO Student_Team(team_id, student_id) \" +\n\t\t\"VALUES($1, $2)\")\n\tdefer stmt.Close()\n\n\tif err != nil {\n\t\tlog.Print(\"Error creating prepared statement\")\n\t\tlog.Panic(err)\n\t}\n\n\tresult, err := stmt.Exec(team_id, student_id)\n\n\tif err != nil {\n\t\tif isForeignKeyError(err) {\n\t\t\treturn false\n\t\t}\n\n\t\tif isDuplicateKeyError(err) {\n\t\t\t\/\/if the entry already exists\n\t\t\treturn false\n\t\t}\n\n\t\tlog.Panic(err)\n\t}\n\n\taffectedRows, err := result.RowsAffected()\n\tif affectedRows != 1 {\n\t\tlog.Panic(\"Unexpected number of inserts\")\n\t}\n\n\treturn true\n}\n\nfunc TeamDeleteMember(team_id int64, student_id int64) bool {\n\tlog.Print(\"# Remove Team Member\")\n\tlog.Printf(\"Team ID = %d, Student ID = %d\", team_id, student_id)\n\n\tdb := getDBConn()\n\n\tdelete_stmt, err := db.Prepare(\"DELETE FROM Student_Team WHERE team_id = $1 AND student_id = $2\")\n\tdefer delete_stmt.Close()\n\n\tif err != nil {\n\t\tlog.Print(\"Error creating prepared statement\")\n\t\tlog.Panic(err)\n\t}\n\n\tresult, err := delete_stmt.Exec(team_id, student_id)\n\n\tif err != nil {\n\t\tlog.Print(\"Delete Failed\")\n\n\t\tif isForeignKeyError(err) {\n\t\t\treturn false\n\t\t}\n\n\t\tlog.Panic(err)\n\t}\n\n\taffectedRows, err := result.RowsAffected()\n\tif affectedRows != 1 {\n\t\tlog.Panic(\"Unexpected number of deletion\")\n\t}\n\n\treturn true\n}\n\nfunc TeamReadMembers(team_id int64) []int64 {\n\tlog.Print(\"# Reading Team members\")\n\tlog.Printf(\"Team ID = %d\", team_id)\n\n\tdb := getDBConn()\n\n\tstmt, err := db.Prepare(\"SELECT student_id \" +\n\t\t\"FROM Student_Team WHERE team_id = $1\")\n\tdefer stmt.Close()\n\n\tif err != nil {\n\t\tlog.Print(\"Error creating prepared statement\")\n\t\tlog.Panic(err)\n\t}\n\n\tcrsr, err := stmt.Query(team_id)\n\n\tif err != nil {\n\t\tlog.Print(\"Error getting team data\")\n\t\tlog.Panic(err)\n\t}\n\n\tstudent_ids := make([]int64, 0, MAX_STUDENT_PER_TEAM)\n\tvar s_id int64\n\tfor crsr.Next() {\n\t\tcrsr.Scan(&s_id)\n\t\tstudent_ids = append(student_ids, s_id)\n\t}\n\n\treturn student_ids\n}\n<|endoftext|>"} {"text":"<commit_before>package command\n\nimport (\n\t\"context\"\n\n\t\"github.com\/spolu\/warp\"\n\t\"github.com\/spolu\/warp\/client\"\n\t\"github.com\/spolu\/warp\/lib\/out\"\n)\n\nconst (\n\t\/\/ CmdNmHelp is the command name.\n\tCmdNmHelp cli.CmdName = \"help\"\n)\n\nfunc init() {\n\tcli.Registrar[CmdNmHelp] = NewHelp\n}\n\n\/\/ Help a user\ntype Help struct {\n\tCommand cli.Command\n}\n\n\/\/ NewHelp constructs and initializes the command.\nfunc NewHelp() cli.Command {\n\treturn &Help{}\n}\n\n\/\/ Name returns the command name.\nfunc (c *Help) Name() cli.CmdName {\n\treturn CmdNmHelp\n}\n\n\/\/ Help prints out the help message for the command.\nfunc (c *Help) Help(\n\tctx context.Context,\n) {\n\tout.Normf(\"\\n\")\n\tout.Normf(\" _ ______ __________ \\n\")\n\tout.Normf(\" | | \/| \/ \/ __ `\/ ___\/ __ \\\\ \\n\")\n\tout.Normf(\" | |\/ |\/ \/ \/_\/ \/ \/ \/ \/_\/ \/ \\n\")\n\tout.Normf(\" |__\/|__\/\\\\__,_\/_\/ \/ .___\/ \\n\")\n\tout.Normf(\" \/_\/ \")\n\tout.Boldf(\"v%s\\n\", warp.Version)\n\tout.Normf(\"\\n\")\n\tout.Normf(\"Usage: \")\n\tout.Boldf(\"warp <command> [<args> ...]\\n\")\n\tout.Normf(\"\\n\")\n\tout.Normf(\" Instant terminal sharing directly from your machine.\\n\")\n\tout.Normf(\"\\n\")\n\tout.Normf(\"Commands:\\n\")\n\tout.Boldf(\" help <command>\\n\")\n\tout.Normf(\" Show help for a specific command.\\n\")\n\tout.Valuf(\" warp help open\\n\")\n\tout.Normf(\"\\n\")\n\tout.Boldf(\" open [<id>]\\n\")\n\tout.Normf(\" Creates a new warp.\\n\")\n\tout.Valuf(\" warp open\\n\")\n\tout.Normf(\"\\n\")\n\tout.Boldf(\" connect <id>\\n\")\n\tout.Normf(\" Connects to an existing warp.\\n\")\n\tout.Valuf(\" warp connect goofy-dev\\n\")\n\tout.Normf(\"\\n\")\n\tout.Normf(\"In-warp commands:\\n\")\n\tout.Boldf(\" state\\n\")\n\tout.Normf(\" Displays the state of the current warp.\\n\")\n\tout.Valuf(\" warp state\\n\")\n\tout.Normf(\"\\n\")\n\tout.Boldf(\" authorize <username_or_token>\\n\")\n\tout.Normf(\" Grants write access to a client of the current warp.\\n\")\n\tout.Valuf(\" warp authorize goofy\\n\")\n\tout.Normf(\"\\n\")\n\tout.Boldf(\" revoke [<username_or_token>]\\n\")\n\tout.Normf(\" Revokes write access to one or all clients of the current warp.\\n\")\n\tout.Valuf(\" warp revoke\\n\")\n\tout.Normf(\"\\n\")\n}\n\n\/\/ Parse parses the arguments passed to the command.\nfunc (c *Help) Parse(\n\tctx context.Context,\n\targs []string,\n) error {\n\tif len(args) == 0 {\n\t\tc.Command = NewHelp()\n\t} else {\n\t\tif r, ok := cli.Registrar[cli.CmdName(args[0])]; !ok {\n\t\t\tc.Command = NewHelp()\n\t\t} else {\n\t\t\tc.Command = r()\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Execute the command or return a human-friendly error.\nfunc (c *Help) Execute(\n\tctx context.Context,\n) error {\n\tc.Command.Help(ctx)\n\treturn nil\n}\n<commit_msg>Improved help message<commit_after>package command\n\nimport (\n\t\"context\"\n\n\t\"github.com\/spolu\/warp\"\n\t\"github.com\/spolu\/warp\/client\"\n\t\"github.com\/spolu\/warp\/lib\/out\"\n)\n\nconst (\n\t\/\/ CmdNmHelp is the command name.\n\tCmdNmHelp cli.CmdName = \"help\"\n)\n\nfunc init() {\n\tcli.Registrar[CmdNmHelp] = NewHelp\n}\n\n\/\/ Help a user\ntype Help struct {\n\tCommand cli.Command\n}\n\n\/\/ NewHelp constructs and initializes the command.\nfunc NewHelp() cli.Command {\n\treturn &Help{}\n}\n\n\/\/ Name returns the command name.\nfunc (c *Help) Name() cli.CmdName {\n\treturn CmdNmHelp\n}\n\n\/\/ Help prints out the help message for the command.\nfunc (c *Help) Help(\n\tctx context.Context,\n) {\n\tout.Normf(\"\\n\")\n\tout.Normf(\" _ ______ __________ \\n\")\n\tout.Normf(\" | | \/| \/ \/ __ `\/ ___\/ __ \\\\ \\n\")\n\tout.Normf(\" | |\/ |\/ \/ \/_\/ \/ \/ \/ \/_\/ \/ \\n\")\n\tout.Normf(\" |__\/|__\/\\\\__,_\/_\/ \/ .___\/ \\n\")\n\tout.Normf(\" \/_\/ \")\n\tout.Boldf(\"v%s\\n\", warp.Version)\n\tout.Normf(\"\\n\")\n\tout.Normf(\" Instant terminal sharing directly from your machine.\\n\")\n\tout.Normf(\"\\n\")\n\tout.Normf(\"Usage:\\n\")\n\tout.Boldf(\" warp <command> [<args> ...]\\n\")\n\tout.Normf(\"\\n\")\n\tout.Normf(\"Commands:\\n\")\n\tout.Boldf(\" help <command>\\n\")\n\tout.Normf(\" Show help for a specific command.\\n\")\n\tout.Valuf(\" warp help open\\n\")\n\tout.Normf(\"\\n\")\n\tout.Boldf(\" open [<id>]\\n\")\n\tout.Normf(\" Creates a new warp.\\n\")\n\tout.Valuf(\" warp open\\n\")\n\tout.Normf(\"\\n\")\n\tout.Boldf(\" connect <id>\\n\")\n\tout.Normf(\" Connects to an existing warp.\\n\")\n\tout.Valuf(\" warp connect goofy-dev\\n\")\n\tout.Normf(\"\\n\")\n\tout.Boldf(\" state\\n\")\n\tout.Normf(\" Displays the state of the current warp (in-warp only).\\n\")\n\tout.Valuf(\" warp state\\n\")\n\tout.Normf(\"\\n\")\n\tout.Boldf(\" authorize <username_or_token>\\n\")\n\tout.Normf(\" Grants write access to a client (in-warp only).\\n\")\n\tout.Valuf(\" warp authorize goofy\\n\")\n\tout.Normf(\"\\n\")\n\tout.Boldf(\" revoke [<username_or_token>]\\n\")\n\tout.Normf(\" Revokes write access to one or all clients (in-warp only).\\n\")\n\tout.Valuf(\" warp revoke\\n\")\n\tout.Normf(\"\\n\")\n}\n\n\/\/ Parse parses the arguments passed to the command.\nfunc (c *Help) Parse(\n\tctx context.Context,\n\targs []string,\n) error {\n\tif len(args) == 0 {\n\t\tc.Command = NewHelp()\n\t} else {\n\t\tif r, ok := cli.Registrar[cli.CmdName(args[0])]; !ok {\n\t\t\tc.Command = NewHelp()\n\t\t} else {\n\t\t\tc.Command = r()\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Execute the command or return a human-friendly error.\nfunc (c *Help) Execute(\n\tctx context.Context,\n) error {\n\tc.Command.Help(ctx)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2021 The LUCI Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage common\n\nimport (\n\t\"sort\"\n)\n\n\/\/ TryjobID is a unique ID of a Tryjob used internally in CV.\n\/\/\n\/\/ This ID is not a Buildbucket Build ID.\n\/\/ See also tryjob.Tryjob type.\ntype TryjobID int64\n\n\/\/ TryjobIDs is a convenience type to facilitate handling of a slice of\n\/\/ TryjobID.\ntype TryjobIDs []TryjobID\n\n\/\/ Dedupe removes duplicates in place and sorts the slice.\n\/\/\n\/\/ Note: Does not preserve original order.\nfunc (p *TryjobIDs) Dedupe() {\n\tids := *p\n\tif len(ids) <= 1 {\n\t\treturn\n\t}\n\tsort.Sort(ids)\n\tn, prev, skipped := 0, ids[0], false\n\tfor _, id := range ids[1:] {\n\t\tif id == prev {\n\t\t\tskipped = true\n\t\t\tcontinue\n\t\t}\n\t\tn++\n\t\tif skipped {\n\t\t\tids[n] = id\n\t\t}\n\t\tprev = id\n\t}\n\t*p = ids[:n+1]\n}\n\n\/\/ Len is the number of elements in the collection.\nfunc (ids TryjobIDs) Len() int {\n\treturn len(ids)\n}\n\n\/\/ Less reports whether the element with\n\/\/ index i should sort before the element with index j.\nfunc (ids TryjobIDs) Less(i int, j int) bool {\n\treturn ids[i] < ids[j]\n}\n\n\/\/ Swap swaps the elements with indexes i and j.\nfunc (ids TryjobIDs) Swap(i int, j int) {\n\tids[i], ids[j] = ids[j], ids[i]\n}\n\n\/\/ Set returns a new set of TryjobIDs.\nfunc (ids TryjobIDs) Set() map[TryjobID]struct{} {\n\tif ids == nil {\n\t\treturn nil\n\t}\n\tret := make(map[TryjobID]struct{}, len(ids))\n\tfor _, id := range ids {\n\t\tret[id] = struct{}{}\n\t}\n\treturn ret\n}\n\n\/\/ Contains returns true if TryjobID is inside these TryjobIDs.\nfunc (ids TryjobIDs) Contains(id TryjobID) bool {\n\tfor _, x := range ids {\n\t\tif x == id {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ MakeTryjobIDs returns TryjobIDs from list of TryjobID in int64.\nfunc MakeTryjobIDs(ids ...int64) TryjobIDs {\n\tif ids == nil {\n\t\treturn nil\n\t}\n\tret := make(TryjobIDs, len(ids))\n\tfor i, id := range ids {\n\t\tret[i] = TryjobID(id)\n\t}\n\treturn ret\n}\n<commit_msg>cv: introduce TryjobIDSet<commit_after>\/\/ Copyright 2021 The LUCI Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage common\n\nimport (\n\t\"sort\"\n)\n\n\/\/ TryjobID is a unique ID of a Tryjob used internally in CV.\n\/\/\n\/\/ This ID is not a Buildbucket Build ID.\n\/\/ See also tryjob.Tryjob type.\ntype TryjobID int64\n\n\/\/ TryjobIDs is a convenience type to facilitate handling of a slice of\n\/\/ TryjobID.\ntype TryjobIDs []TryjobID\n\n\/\/ Dedupe removes duplicates in place and sorts the slice.\n\/\/\n\/\/ Note: Does not preserve original order.\nfunc (p *TryjobIDs) Dedupe() {\n\tids := *p\n\tif len(ids) <= 1 {\n\t\treturn\n\t}\n\tsort.Sort(ids)\n\tn, prev, skipped := 0, ids[0], false\n\tfor _, id := range ids[1:] {\n\t\tif id == prev {\n\t\t\tskipped = true\n\t\t\tcontinue\n\t\t}\n\t\tn++\n\t\tif skipped {\n\t\t\tids[n] = id\n\t\t}\n\t\tprev = id\n\t}\n\t*p = ids[:n+1]\n}\n\n\/\/ Len is the number of elements in the collection.\nfunc (ids TryjobIDs) Len() int {\n\treturn len(ids)\n}\n\n\/\/ Less reports whether the element with\n\/\/ index i should sort before the element with index j.\nfunc (ids TryjobIDs) Less(i int, j int) bool {\n\treturn ids[i] < ids[j]\n}\n\n\/\/ Swap swaps the elements with indexes i and j.\nfunc (ids TryjobIDs) Swap(i int, j int) {\n\tids[i], ids[j] = ids[j], ids[i]\n}\n\n\/\/ Set returns a new set of TryjobIDs.\nfunc (ids TryjobIDs) Set() map[TryjobID]struct{} {\n\tif ids == nil {\n\t\treturn nil\n\t}\n\tret := make(map[TryjobID]struct{}, len(ids))\n\tfor _, id := range ids {\n\t\tret[id] = struct{}{}\n\t}\n\treturn ret\n}\n\n\/\/ Contains returns true if TryjobID is inside these TryjobIDs.\nfunc (ids TryjobIDs) Contains(id TryjobID) bool {\n\tfor _, x := range ids {\n\t\tif x == id {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ MakeTryjobIDs returns TryjobIDs from list of TryjobID in int64.\nfunc MakeTryjobIDs(ids ...int64) TryjobIDs {\n\tif ids == nil {\n\t\treturn nil\n\t}\n\tret := make(TryjobIDs, len(ids))\n\tfor i, id := range ids {\n\t\tret[i] = TryjobID(id)\n\t}\n\treturn ret\n}\n\n\/\/ TryjobIDSet is convenience type to reduce the boilerplate.\ntype TryjobIDSet map[TryjobID]struct{}\n\n\/\/ Add adds the provided Tryjob ID to the set.\nfunc (s TryjobIDSet) Add(tjID TryjobID) {\n\ts[tjID] = struct{}{}\n}\n\n\/\/ Has returns true if the provided Tryjob ID is in the set.\n\/\/\n\/\/ Otherwise, returns false.\nfunc (s TryjobIDSet) Has(tjID TryjobID) bool {\n\t_, exists := s[tjID]\n\treturn exists\n}\n<|endoftext|>"} {"text":"<commit_before>package logger\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"testing\"\n\t\"time\"\n)\n\ntype TestLoggerJSON struct {\n\t*json.Encoder\n\tdelay time.Duration\n}\n\nfunc (l *TestLoggerJSON) Log(m *Message) error {\n\tif l.delay > 0 {\n\t\ttime.Sleep(l.delay)\n\t}\n\treturn l.Encode(m)\n}\n\nfunc (l *TestLoggerJSON) Close() error { return nil }\n\nfunc (l *TestLoggerJSON) Name() string { return \"json\" }\n\ntype TestLoggerText struct {\n\t*bytes.Buffer\n}\n\nfunc (l *TestLoggerText) Log(m *Message) error {\n\t_, err := l.WriteString(m.ContainerID + \" \" + m.Source + \" \" + string(m.Line) + \"\\n\")\n\treturn err\n}\n\nfunc (l *TestLoggerText) Close() error { return nil }\n\nfunc (l *TestLoggerText) Name() string { return \"text\" }\n\nfunc TestCopier(t *testing.T) {\n\tstdoutLine := \"Line that thinks that it is log line from docker stdout\"\n\tstderrLine := \"Line that thinks that it is log line from docker stderr\"\n\tvar stdout bytes.Buffer\n\tvar stderr bytes.Buffer\n\tfor i := 0; i < 30; i++ {\n\t\tif _, err := stdout.WriteString(stdoutLine + \"\\n\"); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif _, err := stderr.WriteString(stderrLine + \"\\n\"); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\tvar jsonBuf bytes.Buffer\n\n\tjsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)}\n\n\tcid := \"a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657\"\n\tc := NewCopier(cid,\n\t\tmap[string]io.Reader{\n\t\t\t\"stdout\": &stdout,\n\t\t\t\"stderr\": &stderr,\n\t\t},\n\t\tjsonLog)\n\tc.Run()\n\twait := make(chan struct{})\n\tgo func() {\n\t\tc.Wait()\n\t\tclose(wait)\n\t}()\n\tselect {\n\tcase <-time.After(1 * time.Second):\n\t\tt.Fatal(\"Copier failed to do its work in 1 second\")\n\tcase <-wait:\n\t}\n\tdec := json.NewDecoder(&jsonBuf)\n\tfor {\n\t\tvar msg Message\n\t\tif err := dec.Decode(&msg); err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif msg.Source != \"stdout\" && msg.Source != \"stderr\" {\n\t\t\tt.Fatalf(\"Wrong Source: %q, should be %q or %q\", msg.Source, \"stdout\", \"stderr\")\n\t\t}\n\t\tif msg.ContainerID != cid {\n\t\t\tt.Fatalf(\"Wrong ContainerID: %q, expected %q\", msg.ContainerID, cid)\n\t\t}\n\t\tif msg.Source == \"stdout\" {\n\t\t\tif string(msg.Line) != stdoutLine {\n\t\t\t\tt.Fatalf(\"Wrong Line: %q, expected %q\", msg.Line, stdoutLine)\n\t\t\t}\n\t\t}\n\t\tif msg.Source == \"stderr\" {\n\t\t\tif string(msg.Line) != stderrLine {\n\t\t\t\tt.Fatalf(\"Wrong Line: %q, expected %q\", msg.Line, stderrLine)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestCopierSlow(t *testing.T) {\n\tstdoutLine := \"Line that thinks that it is log line from docker stdout\"\n\tvar stdout bytes.Buffer\n\tfor i := 0; i < 30; i++ {\n\t\tif _, err := stdout.WriteString(stdoutLine + \"\\n\"); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\tvar jsonBuf bytes.Buffer\n\t\/\/encoder := &encodeCloser{Encoder: json.NewEncoder(&jsonBuf)}\n\tjsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf), delay: 100 * time.Millisecond}\n\n\tcid := \"a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657\"\n\tc := NewCopier(cid, map[string]io.Reader{\"stdout\": &stdout}, jsonLog)\n\tc.Run()\n\twait := make(chan struct{})\n\tgo func() {\n\t\tc.Wait()\n\t\tclose(wait)\n\t}()\n\t<-time.After(150 * time.Millisecond)\n\tc.Close()\n\tselect {\n\tcase <-time.After(200 * time.Millisecond):\n\t\tt.Fatalf(\"failed to exit in time after the copier is closed\")\n\tcase <-wait:\n\t}\n}\n<commit_msg>Fix a race in daemon\/logger.TestCopier<commit_after>package logger\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\ntype TestLoggerJSON struct {\n\t*json.Encoder\n\tmu sync.Mutex\n\tdelay time.Duration\n}\n\nfunc (l *TestLoggerJSON) Log(m *Message) error {\n\tif l.delay > 0 {\n\t\ttime.Sleep(l.delay)\n\t}\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\treturn l.Encode(m)\n}\n\nfunc (l *TestLoggerJSON) Close() error { return nil }\n\nfunc (l *TestLoggerJSON) Name() string { return \"json\" }\n\ntype TestLoggerText struct {\n\t*bytes.Buffer\n}\n\nfunc (l *TestLoggerText) Log(m *Message) error {\n\t_, err := l.WriteString(m.ContainerID + \" \" + m.Source + \" \" + string(m.Line) + \"\\n\")\n\treturn err\n}\n\nfunc (l *TestLoggerText) Close() error { return nil }\n\nfunc (l *TestLoggerText) Name() string { return \"text\" }\n\nfunc TestCopier(t *testing.T) {\n\tstdoutLine := \"Line that thinks that it is log line from docker stdout\"\n\tstderrLine := \"Line that thinks that it is log line from docker stderr\"\n\tvar stdout bytes.Buffer\n\tvar stderr bytes.Buffer\n\tfor i := 0; i < 30; i++ {\n\t\tif _, err := stdout.WriteString(stdoutLine + \"\\n\"); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif _, err := stderr.WriteString(stderrLine + \"\\n\"); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\tvar jsonBuf bytes.Buffer\n\n\tjsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)}\n\n\tcid := \"a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657\"\n\tc := NewCopier(cid,\n\t\tmap[string]io.Reader{\n\t\t\t\"stdout\": &stdout,\n\t\t\t\"stderr\": &stderr,\n\t\t},\n\t\tjsonLog)\n\tc.Run()\n\twait := make(chan struct{})\n\tgo func() {\n\t\tc.Wait()\n\t\tclose(wait)\n\t}()\n\tselect {\n\tcase <-time.After(1 * time.Second):\n\t\tt.Fatal(\"Copier failed to do its work in 1 second\")\n\tcase <-wait:\n\t}\n\tdec := json.NewDecoder(&jsonBuf)\n\tfor {\n\t\tvar msg Message\n\t\tif err := dec.Decode(&msg); err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif msg.Source != \"stdout\" && msg.Source != \"stderr\" {\n\t\t\tt.Fatalf(\"Wrong Source: %q, should be %q or %q\", msg.Source, \"stdout\", \"stderr\")\n\t\t}\n\t\tif msg.ContainerID != cid {\n\t\t\tt.Fatalf(\"Wrong ContainerID: %q, expected %q\", msg.ContainerID, cid)\n\t\t}\n\t\tif msg.Source == \"stdout\" {\n\t\t\tif string(msg.Line) != stdoutLine {\n\t\t\t\tt.Fatalf(\"Wrong Line: %q, expected %q\", msg.Line, stdoutLine)\n\t\t\t}\n\t\t}\n\t\tif msg.Source == \"stderr\" {\n\t\t\tif string(msg.Line) != stderrLine {\n\t\t\t\tt.Fatalf(\"Wrong Line: %q, expected %q\", msg.Line, stderrLine)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestCopierSlow(t *testing.T) {\n\tstdoutLine := \"Line that thinks that it is log line from docker stdout\"\n\tvar stdout bytes.Buffer\n\tfor i := 0; i < 30; i++ {\n\t\tif _, err := stdout.WriteString(stdoutLine + \"\\n\"); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\tvar jsonBuf bytes.Buffer\n\t\/\/encoder := &encodeCloser{Encoder: json.NewEncoder(&jsonBuf)}\n\tjsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf), delay: 100 * time.Millisecond}\n\n\tcid := \"a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657\"\n\tc := NewCopier(cid, map[string]io.Reader{\"stdout\": &stdout}, jsonLog)\n\tc.Run()\n\twait := make(chan struct{})\n\tgo func() {\n\t\tc.Wait()\n\t\tclose(wait)\n\t}()\n\t<-time.After(150 * time.Millisecond)\n\tc.Close()\n\tselect {\n\tcase <-time.After(200 * time.Millisecond):\n\t\tt.Fatalf(\"failed to exit in time after the copier is closed\")\n\tcase <-wait:\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/user\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"syscall\"\n\n\t\"github.com\/containers\/buildah\/unshare\"\n\t\"github.com\/containers\/buildah\/util\"\n\t\"github.com\/opencontainers\/runtime-spec\/specs-go\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/syndtr\/gocapability\/capability\"\n\t\"github.com\/urfave\/cli\"\n)\n\nconst (\n\t\/\/ startedInUserNS is an environment variable that, if set, means that we shouldn't try\n\t\/\/ to create and enter a new user namespace and then re-exec ourselves.\n\tstartedInUserNS = \"_BUILDAH_STARTED_IN_USERNS\"\n)\n\nvar (\n\tunshareDescription = \"Runs a command in a modified user namespace\"\n\tunshareCommand = cli.Command{\n\t\tName: \"unshare\",\n\t\tUsage: \"Run a command in a modified user namespace\",\n\t\tDescription: unshareDescription,\n\t\tAction: unshareCmd,\n\t\tArgsUsage: \"[COMMAND [ARGS [...]]]\",\n\t\tSkipArgReorder: true,\n\t}\n)\n\ntype runnable interface {\n\tRun() error\n}\n\nfunc bailOnError(err error, format string, a ...interface{}) {\n\tif err != nil {\n\t\tif format != \"\" {\n\t\t\tlogrus.Errorf(\"%s: %v\", fmt.Sprintf(format, a...), err)\n\t\t} else {\n\t\t\tlogrus.Errorf(\"%v\", err)\n\t\t}\n\t\tcli.OsExiter(1)\n\t}\n}\n\nfunc maybeReexecUsingUserNamespace(c *cli.Context, evenForRoot bool) {\n\t\/\/ If we've already been through this once, no need to try again.\n\tif os.Getenv(startedInUserNS) != \"\" {\n\t\treturn\n\t}\n\n\t\/\/ If this is one of the commands that doesn't need this indirection, skip it.\n\tif c.NArg() == 0 {\n\t\treturn\n\t}\n\tswitch c.Args()[0] {\n\tcase \"help\", \"version\":\n\t\treturn\n\t}\n\n\t\/\/ Figure out who we are.\n\tme, err := user.Current()\n\tbailOnError(err, \"error determining current user\")\n\tuidNum, err := strconv.ParseUint(me.Uid, 10, 32)\n\tbailOnError(err, \"error parsing current UID %s\", me.Uid)\n\tgidNum, err := strconv.ParseUint(me.Gid, 10, 32)\n\tbailOnError(err, \"error parsing current GID %s\", me.Gid)\n\n\truntime.LockOSThread()\n\tdefer runtime.UnlockOSThread()\n\n\t\/\/ ID mappings to use to reexec ourselves.\n\tvar uidmap, gidmap []specs.LinuxIDMapping\n\tif uidNum != 0 || evenForRoot {\n\t\t\/\/ Read the set of ID mappings that we're allowed to use. Each\n\t\t\/\/ range in \/etc\/subuid and \/etc\/subgid file is a starting host\n\t\t\/\/ ID and a range size.\n\t\tuidmap, gidmap, err = util.GetSubIDMappings(me.Username, me.Username)\n\t\tbailOnError(err, \"error reading allowed ID mappings\")\n\t\tif len(uidmap) == 0 {\n\t\t\tlogrus.Warnf(\"Found no UID ranges set aside for user %q in \/etc\/subuid.\", me.Username)\n\t\t}\n\t\tif len(gidmap) == 0 {\n\t\t\tlogrus.Warnf(\"Found no GID ranges set aside for user %q in \/etc\/subgid.\", me.Username)\n\t\t}\n\t\t\/\/ Map our UID and GID, then the subuid and subgid ranges,\n\t\t\/\/ consecutively, starting at 0, to get the mappings to use for\n\t\t\/\/ a copy of ourselves.\n\t\tuidmap = append([]specs.LinuxIDMapping{{HostID: uint32(uidNum), ContainerID: 0, Size: 1}}, uidmap...)\n\t\tgidmap = append([]specs.LinuxIDMapping{{HostID: uint32(gidNum), ContainerID: 0, Size: 1}}, gidmap...)\n\t\tvar rangeStart uint32\n\t\tfor i := range uidmap {\n\t\t\tuidmap[i].ContainerID = rangeStart\n\t\t\trangeStart += uidmap[i].Size\n\t\t}\n\t\trangeStart = 0\n\t\tfor i := range gidmap {\n\t\t\tgidmap[i].ContainerID = rangeStart\n\t\t\trangeStart += gidmap[i].Size\n\t\t}\n\t} else {\n\t\t\/\/ If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able\n\t\t\/\/ to use unshare(), so don't bother creating a new user namespace at this point.\n\t\tcapabilities, err := capability.NewPid(0)\n\t\tbailOnError(err, \"error reading the current capabilities sets\")\n\t\tif capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) {\n\t\t\treturn\n\t\t}\n\t\t\/\/ Read the set of ID mappings that we're currently using.\n\t\tuidmap, gidmap, err = util.GetHostIDMappings(\"\")\n\t\tbailOnError(err, \"error reading current ID mappings\")\n\t\t\/\/ Just reuse them.\n\t\tfor i := range uidmap {\n\t\t\tuidmap[i].HostID = uidmap[i].ContainerID\n\t\t}\n\t\tfor i := range gidmap {\n\t\t\tgidmap[i].HostID = gidmap[i].ContainerID\n\t\t}\n\t}\n\n\t\/\/ Unlike most uses of reexec or unshare, we're using a name that\n\t\/\/ _won't_ be recognized as a registered reexec handler, since we\n\t\/\/ _want_ to fall through reexec.Init() to the normal main().\n\tcmd := unshare.Command(append([]string{\"buildah-in-a-user-namespace\"}, os.Args[1:]...)...)\n\n\t\/\/ If, somehow, we don't become UID 0 in our child, indicate that the child shouldn't try again.\n\terr = os.Setenv(startedInUserNS, \"1\")\n\tbailOnError(err, \"error setting %s=1 in environment\", startedInUserNS)\n\n\t\/\/ Set the default isolation type to use the \"rootless\" method.\n\tif _, present := os.LookupEnv(\"BUILDAH_ISOLATION\"); !present {\n\t\tif err = os.Setenv(\"BUILDAH_ISOLATION\", \"rootless\"); err != nil {\n\t\t\tlogrus.Errorf(\"error setting BUILDAH_ISOLATION=rootless in environment: %v\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t\/\/ Reuse our stdio.\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\t\/\/ Set up a new user namespace with the ID mapping.\n\tcmd.UnshareFlags = syscall.CLONE_NEWUSER|syscall.CLONE_NEWNS\n\tcmd.UseNewuidmap = uidNum != 0\n\tcmd.UidMappings = uidmap\n\tcmd.UseNewgidmap = uidNum != 0\n\tcmd.GidMappings = gidmap\n\tcmd.GidMappingsEnableSetgroups = true\n\n\t\/\/ Finish up.\n\tlogrus.Debugf(\"running %+v with environment %+v, UID map %+v, and GID map %+v\", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings)\n\texecRunnable(cmd)\n}\n\n\/\/ execRunnable runs the specified unshare command, captures its exit status,\n\/\/ and exits with the same status.\nfunc execRunnable(cmd runnable) {\n\tif err := cmd.Run(); err != nil {\n\t\tif exitError, ok := errors.Cause(err).(*exec.ExitError); ok {\n\t\t\tif exitError.ProcessState.Exited() {\n\t\t\t\tif waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {\n\t\t\t\t\tif waitStatus.Exited() {\n\t\t\t\t\t\tlogrus.Errorf(\"%v\", exitError)\n\t\t\t\t\t\tos.Exit(waitStatus.ExitStatus())\n\t\t\t\t\t}\n\t\t\t\t\tif waitStatus.Signaled() {\n\t\t\t\t\t\tlogrus.Errorf(\"%v\", exitError)\n\t\t\t\t\t\tos.Exit(int(waitStatus.Signal()) + 128)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tlogrus.Errorf(\"%v\", err)\n\t\tlogrus.Errorf(\"(unable to determine exit status)\")\n\t\tos.Exit(1)\n\t}\n\tos.Exit(0)\n}\n\n\/\/ unshareCmd execs whatever using the ID mappings that we want to use for ourselves\nfunc unshareCmd(c *cli.Context) error {\n\t\/\/ force reexec using the configured ID mappings\n\tmaybeReexecUsingUserNamespace(c, true)\n\t\/\/ exec the specified command, if there is one\n\targs := c.Args()\n\tif len(args) < 1 {\n\t\t\/\/ try to exec the shell, if one's set\n\t\tshell, shellSet := os.LookupEnv(\"SHELL\")\n\t\tif !shellSet {\n\t\t\tlogrus.Errorf(\"no command specified\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\targs = []string{shell}\n\t}\n\tcmd := exec.Command(args[0], args[1:]...)\n\tcmd.Env = append(os.Environ(), \"USER=root\", \"USERNAME=root\", \"GROUP=root\", \"LOGNAME=root\", \"UID=0\", \"GID=0\")\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\texecRunnable(cmd)\n\tos.Exit(1)\n\treturn nil\n}\n<commit_msg>Fix unshare gofmt issue<commit_after>\/\/ +build linux\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/user\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"syscall\"\n\n\t\"github.com\/containers\/buildah\/unshare\"\n\t\"github.com\/containers\/buildah\/util\"\n\t\"github.com\/opencontainers\/runtime-spec\/specs-go\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/syndtr\/gocapability\/capability\"\n\t\"github.com\/urfave\/cli\"\n)\n\nconst (\n\t\/\/ startedInUserNS is an environment variable that, if set, means that we shouldn't try\n\t\/\/ to create and enter a new user namespace and then re-exec ourselves.\n\tstartedInUserNS = \"_BUILDAH_STARTED_IN_USERNS\"\n)\n\nvar (\n\tunshareDescription = \"Runs a command in a modified user namespace\"\n\tunshareCommand = cli.Command{\n\t\tName: \"unshare\",\n\t\tUsage: \"Run a command in a modified user namespace\",\n\t\tDescription: unshareDescription,\n\t\tAction: unshareCmd,\n\t\tArgsUsage: \"[COMMAND [ARGS [...]]]\",\n\t\tSkipArgReorder: true,\n\t}\n)\n\ntype runnable interface {\n\tRun() error\n}\n\nfunc bailOnError(err error, format string, a ...interface{}) {\n\tif err != nil {\n\t\tif format != \"\" {\n\t\t\tlogrus.Errorf(\"%s: %v\", fmt.Sprintf(format, a...), err)\n\t\t} else {\n\t\t\tlogrus.Errorf(\"%v\", err)\n\t\t}\n\t\tcli.OsExiter(1)\n\t}\n}\n\nfunc maybeReexecUsingUserNamespace(c *cli.Context, evenForRoot bool) {\n\t\/\/ If we've already been through this once, no need to try again.\n\tif os.Getenv(startedInUserNS) != \"\" {\n\t\treturn\n\t}\n\n\t\/\/ If this is one of the commands that doesn't need this indirection, skip it.\n\tif c.NArg() == 0 {\n\t\treturn\n\t}\n\tswitch c.Args()[0] {\n\tcase \"help\", \"version\":\n\t\treturn\n\t}\n\n\t\/\/ Figure out who we are.\n\tme, err := user.Current()\n\tbailOnError(err, \"error determining current user\")\n\tuidNum, err := strconv.ParseUint(me.Uid, 10, 32)\n\tbailOnError(err, \"error parsing current UID %s\", me.Uid)\n\tgidNum, err := strconv.ParseUint(me.Gid, 10, 32)\n\tbailOnError(err, \"error parsing current GID %s\", me.Gid)\n\n\truntime.LockOSThread()\n\tdefer runtime.UnlockOSThread()\n\n\t\/\/ ID mappings to use to reexec ourselves.\n\tvar uidmap, gidmap []specs.LinuxIDMapping\n\tif uidNum != 0 || evenForRoot {\n\t\t\/\/ Read the set of ID mappings that we're allowed to use. Each\n\t\t\/\/ range in \/etc\/subuid and \/etc\/subgid file is a starting host\n\t\t\/\/ ID and a range size.\n\t\tuidmap, gidmap, err = util.GetSubIDMappings(me.Username, me.Username)\n\t\tbailOnError(err, \"error reading allowed ID mappings\")\n\t\tif len(uidmap) == 0 {\n\t\t\tlogrus.Warnf(\"Found no UID ranges set aside for user %q in \/etc\/subuid.\", me.Username)\n\t\t}\n\t\tif len(gidmap) == 0 {\n\t\t\tlogrus.Warnf(\"Found no GID ranges set aside for user %q in \/etc\/subgid.\", me.Username)\n\t\t}\n\t\t\/\/ Map our UID and GID, then the subuid and subgid ranges,\n\t\t\/\/ consecutively, starting at 0, to get the mappings to use for\n\t\t\/\/ a copy of ourselves.\n\t\tuidmap = append([]specs.LinuxIDMapping{{HostID: uint32(uidNum), ContainerID: 0, Size: 1}}, uidmap...)\n\t\tgidmap = append([]specs.LinuxIDMapping{{HostID: uint32(gidNum), ContainerID: 0, Size: 1}}, gidmap...)\n\t\tvar rangeStart uint32\n\t\tfor i := range uidmap {\n\t\t\tuidmap[i].ContainerID = rangeStart\n\t\t\trangeStart += uidmap[i].Size\n\t\t}\n\t\trangeStart = 0\n\t\tfor i := range gidmap {\n\t\t\tgidmap[i].ContainerID = rangeStart\n\t\t\trangeStart += gidmap[i].Size\n\t\t}\n\t} else {\n\t\t\/\/ If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able\n\t\t\/\/ to use unshare(), so don't bother creating a new user namespace at this point.\n\t\tcapabilities, err := capability.NewPid(0)\n\t\tbailOnError(err, \"error reading the current capabilities sets\")\n\t\tif capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) {\n\t\t\treturn\n\t\t}\n\t\t\/\/ Read the set of ID mappings that we're currently using.\n\t\tuidmap, gidmap, err = util.GetHostIDMappings(\"\")\n\t\tbailOnError(err, \"error reading current ID mappings\")\n\t\t\/\/ Just reuse them.\n\t\tfor i := range uidmap {\n\t\t\tuidmap[i].HostID = uidmap[i].ContainerID\n\t\t}\n\t\tfor i := range gidmap {\n\t\t\tgidmap[i].HostID = gidmap[i].ContainerID\n\t\t}\n\t}\n\n\t\/\/ Unlike most uses of reexec or unshare, we're using a name that\n\t\/\/ _won't_ be recognized as a registered reexec handler, since we\n\t\/\/ _want_ to fall through reexec.Init() to the normal main().\n\tcmd := unshare.Command(append([]string{\"buildah-in-a-user-namespace\"}, os.Args[1:]...)...)\n\n\t\/\/ If, somehow, we don't become UID 0 in our child, indicate that the child shouldn't try again.\n\terr = os.Setenv(startedInUserNS, \"1\")\n\tbailOnError(err, \"error setting %s=1 in environment\", startedInUserNS)\n\n\t\/\/ Set the default isolation type to use the \"rootless\" method.\n\tif _, present := os.LookupEnv(\"BUILDAH_ISOLATION\"); !present {\n\t\tif err = os.Setenv(\"BUILDAH_ISOLATION\", \"rootless\"); err != nil {\n\t\t\tlogrus.Errorf(\"error setting BUILDAH_ISOLATION=rootless in environment: %v\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t\/\/ Reuse our stdio.\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\t\/\/ Set up a new user namespace with the ID mapping.\n\tcmd.UnshareFlags = syscall.CLONE_NEWUSER | syscall.CLONE_NEWNS\n\tcmd.UseNewuidmap = uidNum != 0\n\tcmd.UidMappings = uidmap\n\tcmd.UseNewgidmap = uidNum != 0\n\tcmd.GidMappings = gidmap\n\tcmd.GidMappingsEnableSetgroups = true\n\n\t\/\/ Finish up.\n\tlogrus.Debugf(\"running %+v with environment %+v, UID map %+v, and GID map %+v\", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings)\n\texecRunnable(cmd)\n}\n\n\/\/ execRunnable runs the specified unshare command, captures its exit status,\n\/\/ and exits with the same status.\nfunc execRunnable(cmd runnable) {\n\tif err := cmd.Run(); err != nil {\n\t\tif exitError, ok := errors.Cause(err).(*exec.ExitError); ok {\n\t\t\tif exitError.ProcessState.Exited() {\n\t\t\t\tif waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok {\n\t\t\t\t\tif waitStatus.Exited() {\n\t\t\t\t\t\tlogrus.Errorf(\"%v\", exitError)\n\t\t\t\t\t\tos.Exit(waitStatus.ExitStatus())\n\t\t\t\t\t}\n\t\t\t\t\tif waitStatus.Signaled() {\n\t\t\t\t\t\tlogrus.Errorf(\"%v\", exitError)\n\t\t\t\t\t\tos.Exit(int(waitStatus.Signal()) + 128)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tlogrus.Errorf(\"%v\", err)\n\t\tlogrus.Errorf(\"(unable to determine exit status)\")\n\t\tos.Exit(1)\n\t}\n\tos.Exit(0)\n}\n\n\/\/ unshareCmd execs whatever using the ID mappings that we want to use for ourselves\nfunc unshareCmd(c *cli.Context) error {\n\t\/\/ force reexec using the configured ID mappings\n\tmaybeReexecUsingUserNamespace(c, true)\n\t\/\/ exec the specified command, if there is one\n\targs := c.Args()\n\tif len(args) < 1 {\n\t\t\/\/ try to exec the shell, if one's set\n\t\tshell, shellSet := os.LookupEnv(\"SHELL\")\n\t\tif !shellSet {\n\t\t\tlogrus.Errorf(\"no command specified\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\targs = []string{shell}\n\t}\n\tcmd := exec.Command(args[0], args[1:]...)\n\tcmd.Env = append(os.Environ(), \"USER=root\", \"USERNAME=root\", \"GROUP=root\", \"LOGNAME=root\", \"UID=0\", \"GID=0\")\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\texecRunnable(cmd)\n\tos.Exit(1)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n\n\t\"github.com\/codegangsta\/cli\"\n\tdockerbuilder \"github.com\/remind101\/conveyor\/builder\/docker\"\n)\n\nvar cmdServer = cli.Command{\n\tName: \"server\",\n\tUsage: \"Run an http server to build Docker images whenever a push event happens on GitHub\",\n\tAction: runServer,\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"port\",\n\t\t\tValue: \"8080\",\n\t\t\tUsage: \"Port to run the server on\",\n\t\t\tEnvVar: \"PORT\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"github.token\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"GitHub API token to use when updating commit statuses on repositories.\",\n\t\t\tEnvVar: \"GITHUB_TOKEN\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"github.secret\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Shared secret used by GitHub to sign webhook payloads. This secret will be used to verify that the request came from GitHub.\",\n\t\t\tEnvVar: \"GITHUB_SECRET\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"dry\",\n\t\t\tUsage: \"Enable dry run mode.\",\n\t\t\tEnvVar: \"DRY\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"builder.image\",\n\t\t\tValue: dockerbuilder.DefaultBuilderImage,\n\t\t\tUsage: \"A docker image to use to perform the build.\",\n\t\t\tEnvVar: \"BUILDER_IMAGE\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"queue\",\n\t\t\tValue: \"memory:\/\/\",\n\t\t\tUsage: \"Specify a queue to use for scale out. The default behavior is to spin up workers that consume off of an in memory queue.\",\n\t\t\tEnvVar: \"QUEUE\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"logger\",\n\t\t\tValue: \"stdout:\/\/\",\n\t\t\tUsage: \"The logger to use. Available options are `stdout:\/\/`, or `s3:\/\/bucket`.\",\n\t\t\tEnvVar: \"LOGGER\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"reporter\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"The reporter to use to report errors. Available options are `hb:\/\/api.honeybadger.io?key=<key>&environment=<environment>\",\n\t\t\tEnvVar: \"REPORTER\",\n\t\t},\n\t},\n}\n\nfunc runServer(c *cli.Context) {\n\tport := c.String(\"port\")\n\n\tb, err := newConveyor(c)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tquit := make(chan os.Signal, 1)\n\tsignal.Notify(quit, os.Interrupt, syscall.SIGTERM)\n\tgo func() {\n\t\tsig := <-quit\n\n\t\tlog.Printf(\"Signal %d received. Shutting down.\\n\", sig)\n\t\tif err := b.Cancel(); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tos.Exit(0)\n\t}()\n\n\ts, err := newServer(c, b)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Println(\"Listening on \" + port)\n\tlog.Fatal(http.ListenAndServe(\":\"+port, s))\n}\n<commit_msg>Remove queue flag.<commit_after>package main\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n\n\t\"github.com\/codegangsta\/cli\"\n\tdockerbuilder \"github.com\/remind101\/conveyor\/builder\/docker\"\n)\n\nvar cmdServer = cli.Command{\n\tName: \"server\",\n\tUsage: \"Run an http server to build Docker images whenever a push event happens on GitHub\",\n\tAction: runServer,\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"port\",\n\t\t\tValue: \"8080\",\n\t\t\tUsage: \"Port to run the server on\",\n\t\t\tEnvVar: \"PORT\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"github.token\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"GitHub API token to use when updating commit statuses on repositories.\",\n\t\t\tEnvVar: \"GITHUB_TOKEN\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"github.secret\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"Shared secret used by GitHub to sign webhook payloads. This secret will be used to verify that the request came from GitHub.\",\n\t\t\tEnvVar: \"GITHUB_SECRET\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"dry\",\n\t\t\tUsage: \"Enable dry run mode.\",\n\t\t\tEnvVar: \"DRY\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"builder.image\",\n\t\t\tValue: dockerbuilder.DefaultBuilderImage,\n\t\t\tUsage: \"A docker image to use to perform the build.\",\n\t\t\tEnvVar: \"BUILDER_IMAGE\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"logger\",\n\t\t\tValue: \"stdout:\/\/\",\n\t\t\tUsage: \"The logger to use. Available options are `stdout:\/\/`, or `s3:\/\/bucket`.\",\n\t\t\tEnvVar: \"LOGGER\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"reporter\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"The reporter to use to report errors. Available options are `hb:\/\/api.honeybadger.io?key=<key>&environment=<environment>\",\n\t\t\tEnvVar: \"REPORTER\",\n\t\t},\n\t},\n}\n\nfunc runServer(c *cli.Context) {\n\tport := c.String(\"port\")\n\n\tb, err := newConveyor(c)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tquit := make(chan os.Signal, 1)\n\tsignal.Notify(quit, os.Interrupt, syscall.SIGTERM)\n\tgo func() {\n\t\tsig := <-quit\n\n\t\tlog.Printf(\"Signal %d received. Shutting down.\\n\", sig)\n\t\tif err := b.Cancel(); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tos.Exit(0)\n\t}()\n\n\ts, err := newServer(c, b)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Println(\"Listening on \" + port)\n\tlog.Fatal(http.ListenAndServe(\":\"+port, s))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/datawire\/ambassador\/pkg\/supervisor\"\n)\n\n\/\/ Connect the daemon to a cluster\nfunc (d *Daemon) Connect(\n\tp *supervisor.Process, out *Emitter, rai *RunAsInfo,\n\tcontext, namespace string, kargs []string,\n) error {\n\t\/\/ Sanity checks\n\tif d.cluster != nil {\n\t\tout.Println(\"Already connected\")\n\t\tout.Send(\"connect\", \"Already connected\")\n\t\treturn nil\n\t}\n\tif d.bridge != nil {\n\t\tout.Println(\"Not ready: Trying to disconnect\")\n\t\tout.Send(\"connect\", \"Not ready: Trying to disconnect\")\n\t\treturn nil\n\t}\n\tif !d.network.IsOkay() {\n\t\tout.Println(\"Not ready: Establishing network overrides\")\n\t\tout.Send(\"connect\", \"Not ready: Establishing network overrides\")\n\t\treturn nil\n\t}\n\n\tout.Println(\"Connecting...\")\n\tout.Send(\"connect\", \"Connecting...\")\n\tcluster, err := TrackKCluster(p, rai, context, namespace, kargs)\n\tif err != nil {\n\t\tout.Println(err.Error())\n\t\tout.Send(\"failed\", err.Error())\n\t\tout.SendExit(1)\n\t\treturn nil\n\t}\n\td.cluster = cluster\n\n\tbridge, err := CheckedRetryingCommand(\n\t\tp,\n\t\t\"bridge\",\n\t\t[]string{edgectl, \"teleproxy\", \"bridge\", cluster.context, cluster.namespace},\n\t\trai,\n\t\tcheckBridge,\n\t\t15*time.Second,\n\t)\n\tif err != nil {\n\t\tout.Println(err.Error())\n\t\tout.Send(\"failed\", err.Error())\n\t\tout.SendExit(1)\n\t\td.cluster.Close()\n\t\td.cluster = nil\n\t\treturn nil\n\t}\n\td.bridge = bridge\n\td.cluster.SetBridgeCheck(d.bridge.IsOkay)\n\n\tout.Printf(\n\t\t\"Connected to context %s (%s)\\n\", d.cluster.Context(), d.cluster.Server(),\n\t)\n\tout.Send(\"cluster.context\", d.cluster.Context())\n\tout.Send(\"cluster.server\", d.cluster.Server())\n\n\ttmgr, err := NewTrafficManager(p, d.cluster)\n\tif err != nil {\n\t\tout.Println()\n\t\tout.Println(\"Unable to connect to the traffic manager in your cluster.\")\n\t\tout.Println(\"The intercept feature will not be available.\")\n\t\tout.Println(\"Error was:\", err)\n\t\t\/\/ out.Println(\"Use <some command> to set up the traffic manager.\") \/\/ FIXME\n\t\tout.Send(\"intercept\", false)\n\t} else {\n\t\td.trafficMgr = tmgr\n\t\tout.Send(\"intercept\", true)\n\t}\n\treturn nil\n}\n\n\/\/ Disconnect from the connected cluster\nfunc (d *Daemon) Disconnect(p *supervisor.Process, out *Emitter) error {\n\t\/\/ Sanity checks\n\tif d.cluster == nil {\n\t\tout.Println(\"Not connected\")\n\t\tout.Send(\"disconnect\", \"Not connected\")\n\t\treturn nil\n\t}\n\n\t_ = d.ClearIntercepts(p)\n\tif d.bridge != nil {\n\t\td.cluster.SetBridgeCheck(nil) \/\/ Stop depending on this bridge\n\t\t_ = d.bridge.Close()\n\t\td.bridge = nil\n\t}\n\tif d.trafficMgr != nil {\n\t\t_ = d.trafficMgr.Close()\n\t\td.trafficMgr = nil\n\t}\n\terr := d.cluster.Close()\n\td.cluster = nil\n\n\tout.Println(\"Disconnected\")\n\tout.Send(\"disconnect\", \"Diconnected\")\n\treturn err\n}\n\n\/\/ checkBridge checks the status of teleproxy bridge by doing the equivalent of\n\/\/ curl -k https:\/\/kubernetes\/api\/. It's okay to create a new client each time\n\/\/ because we don't want to reuse connections.\nfunc checkBridge(p *supervisor.Process) error {\n\t\/\/ A zero-value transport is (probably) okay because we set a tight overall\n\t\/\/ timeout on the client\n\ttr := &http.Transport{\n\t\t\/\/ #nosec G402\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\tclient := http.Client{Timeout: 10 * time.Second, Transport: tr}\n\tres, err := client.Get(\"https:\/\/kubernetes.default\/api\/\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"get\")\n\t}\n\t_, err = ioutil.ReadAll(res.Body)\n\tres.Body.Close()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"read body\")\n\t}\n\treturn nil\n}\n\n\/\/ GetFreePort asks the kernel for a free open port that is ready to use.\n\/\/ Similar to telepresence.utilities.find_free_port()\nfunc GetFreePort() (int, error) {\n\tlc := net.ListenConfig{\n\t\tControl: func(network, address string, c syscall.RawConn) error {\n\t\t\tvar operr error\n\t\t\tfn := func(fd uintptr) {\n\t\t\t\toperr = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)\n\t\t\t}\n\t\t\tif err := c.Control(fn); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn operr\n\t\t},\n\t}\n\tl, err := lc.Listen(context.Background(), \"tcp\", \":0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer l.Close()\n\treturn l.Addr().(*net.TCPAddr).Port, nil\n}\n\n\/\/ TrafficManager is a handle to access the Traffic Manager in a\n\/\/ cluster.\ntype TrafficManager struct {\n\tcrc Resource\n\tapiPort int\n\tsshPort int\n\tclient *http.Client\n\tinterceptables []string\n\ttotalClusCepts int\n}\n\n\/\/ NewTrafficManager returns a TrafficManager resource for the given\n\/\/ cluster if it has a Traffic Manager service.\nfunc NewTrafficManager(p *supervisor.Process, cluster *KCluster) (*TrafficManager, error) {\n\tcmd := cluster.GetKubectlCmd(p, \"get\", \"svc\/telepresence-proxy\", \"deploy\/telepresence-proxy\")\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"kubectl get svc\/deploy telepresency-proxy\")\n\t}\n\n\tapiPort, err := GetFreePort()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get free port for API\")\n\t}\n\tsshPort, err := GetFreePort()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get free port for ssh\")\n\t}\n\tkpfArgStr := fmt.Sprintf(\"port-forward svc\/telepresence-proxy %d:8022 %d:8081\", sshPort, apiPort)\n\tkpfArgs := cluster.GetKubectlArgs(strings.Fields(kpfArgStr)...)\n\ttm := &TrafficManager{apiPort: apiPort, sshPort: sshPort}\n\n\tpf, err := CheckedRetryingCommand(p, \"traffic-kpf\", kpfArgs, cluster.RAI(), tm.check, 15*time.Second)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttm.crc = pf\n\ttm.client = &http.Client{Timeout: 10 * time.Second}\n\treturn tm, nil\n}\n\nfunc (tm *TrafficManager) check(p *supervisor.Process) error {\n\tbody, _, err := tm.request(\"GET\", \"state\", []byte{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar state map[string]interface{}\n\tif err := json.Unmarshal([]byte(body), &state); err != nil {\n\t\tp.Logf(\"check: bad JSON from tm: %v\", err)\n\t\tp.Logf(\"check: JSON data is: %q\", body)\n\t\treturn err\n\t}\n\tdeployments, ok := state[\"Deployments\"].(map[string]interface{})\n\tif !ok {\n\t\tp.Log(\"check: failed to get deployment info\")\n\t\tp.Logf(\"check: JSON data is: %q\", body)\n\t}\n\ttm.interceptables = make([]string, len(deployments))\n\ttm.totalClusCepts = 0\n\tidx := 0\n\tfor deployment := range deployments {\n\t\ttm.interceptables[idx] = deployment\n\t\tidx++\n\t\tinfo, ok := deployments[deployment].(map[string]interface{})\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tcepts, ok := info[\"Intercepts\"].([]interface{})\n\t\tif ok {\n\t\t\ttm.totalClusCepts += len(cepts)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (tm *TrafficManager) request(method, path string, data []byte) (result string, code int, err error) {\n\turl := fmt.Sprintf(\"http:\/\/127.0.0.1:%d\/%s\", tm.apiPort, path)\n\treq, err := http.NewRequest(method, url, bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn\n\t}\n\tresp, err := tm.client.Do(req)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"get\")\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tcode = resp.StatusCode\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"read body\")\n\t\treturn\n\t}\n\tresult = string(body)\n\treturn\n}\n\n\/\/ Name implements Resource\nfunc (tm *TrafficManager) Name() string {\n\treturn \"trafficMgr\"\n}\n\n\/\/ IsOkay implements Resource\nfunc (tm *TrafficManager) IsOkay() bool {\n\treturn tm.crc.IsOkay()\n}\n\n\/\/ Close implements Resource\nfunc (tm *TrafficManager) Close() error {\n\treturn tm.crc.Close()\n}\n<commit_msg>Fix typo<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/datawire\/ambassador\/pkg\/supervisor\"\n)\n\n\/\/ Connect the daemon to a cluster\nfunc (d *Daemon) Connect(\n\tp *supervisor.Process, out *Emitter, rai *RunAsInfo,\n\tcontext, namespace string, kargs []string,\n) error {\n\t\/\/ Sanity checks\n\tif d.cluster != nil {\n\t\tout.Println(\"Already connected\")\n\t\tout.Send(\"connect\", \"Already connected\")\n\t\treturn nil\n\t}\n\tif d.bridge != nil {\n\t\tout.Println(\"Not ready: Trying to disconnect\")\n\t\tout.Send(\"connect\", \"Not ready: Trying to disconnect\")\n\t\treturn nil\n\t}\n\tif !d.network.IsOkay() {\n\t\tout.Println(\"Not ready: Establishing network overrides\")\n\t\tout.Send(\"connect\", \"Not ready: Establishing network overrides\")\n\t\treturn nil\n\t}\n\n\tout.Println(\"Connecting...\")\n\tout.Send(\"connect\", \"Connecting...\")\n\tcluster, err := TrackKCluster(p, rai, context, namespace, kargs)\n\tif err != nil {\n\t\tout.Println(err.Error())\n\t\tout.Send(\"failed\", err.Error())\n\t\tout.SendExit(1)\n\t\treturn nil\n\t}\n\td.cluster = cluster\n\n\tbridge, err := CheckedRetryingCommand(\n\t\tp,\n\t\t\"bridge\",\n\t\t[]string{edgectl, \"teleproxy\", \"bridge\", cluster.context, cluster.namespace},\n\t\trai,\n\t\tcheckBridge,\n\t\t15*time.Second,\n\t)\n\tif err != nil {\n\t\tout.Println(err.Error())\n\t\tout.Send(\"failed\", err.Error())\n\t\tout.SendExit(1)\n\t\td.cluster.Close()\n\t\td.cluster = nil\n\t\treturn nil\n\t}\n\td.bridge = bridge\n\td.cluster.SetBridgeCheck(d.bridge.IsOkay)\n\n\tout.Printf(\n\t\t\"Connected to context %s (%s)\\n\", d.cluster.Context(), d.cluster.Server(),\n\t)\n\tout.Send(\"cluster.context\", d.cluster.Context())\n\tout.Send(\"cluster.server\", d.cluster.Server())\n\n\ttmgr, err := NewTrafficManager(p, d.cluster)\n\tif err != nil {\n\t\tout.Println()\n\t\tout.Println(\"Unable to connect to the traffic manager in your cluster.\")\n\t\tout.Println(\"The intercept feature will not be available.\")\n\t\tout.Println(\"Error was:\", err)\n\t\t\/\/ out.Println(\"Use <some command> to set up the traffic manager.\") \/\/ FIXME\n\t\tout.Send(\"intercept\", false)\n\t} else {\n\t\td.trafficMgr = tmgr\n\t\tout.Send(\"intercept\", true)\n\t}\n\treturn nil\n}\n\n\/\/ Disconnect from the connected cluster\nfunc (d *Daemon) Disconnect(p *supervisor.Process, out *Emitter) error {\n\t\/\/ Sanity checks\n\tif d.cluster == nil {\n\t\tout.Println(\"Not connected\")\n\t\tout.Send(\"disconnect\", \"Not connected\")\n\t\treturn nil\n\t}\n\n\t_ = d.ClearIntercepts(p)\n\tif d.bridge != nil {\n\t\td.cluster.SetBridgeCheck(nil) \/\/ Stop depending on this bridge\n\t\t_ = d.bridge.Close()\n\t\td.bridge = nil\n\t}\n\tif d.trafficMgr != nil {\n\t\t_ = d.trafficMgr.Close()\n\t\td.trafficMgr = nil\n\t}\n\terr := d.cluster.Close()\n\td.cluster = nil\n\n\tout.Println(\"Disconnected\")\n\tout.Send(\"disconnect\", \"Disconnected\")\n\treturn err\n}\n\n\/\/ checkBridge checks the status of teleproxy bridge by doing the equivalent of\n\/\/ curl -k https:\/\/kubernetes\/api\/. It's okay to create a new client each time\n\/\/ because we don't want to reuse connections.\nfunc checkBridge(p *supervisor.Process) error {\n\t\/\/ A zero-value transport is (probably) okay because we set a tight overall\n\t\/\/ timeout on the client\n\ttr := &http.Transport{\n\t\t\/\/ #nosec G402\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\tclient := http.Client{Timeout: 10 * time.Second, Transport: tr}\n\tres, err := client.Get(\"https:\/\/kubernetes.default\/api\/\")\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"get\")\n\t}\n\t_, err = ioutil.ReadAll(res.Body)\n\tres.Body.Close()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"read body\")\n\t}\n\treturn nil\n}\n\n\/\/ GetFreePort asks the kernel for a free open port that is ready to use.\n\/\/ Similar to telepresence.utilities.find_free_port()\nfunc GetFreePort() (int, error) {\n\tlc := net.ListenConfig{\n\t\tControl: func(network, address string, c syscall.RawConn) error {\n\t\t\tvar operr error\n\t\t\tfn := func(fd uintptr) {\n\t\t\t\toperr = syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, syscall.SO_REUSEADDR, 1)\n\t\t\t}\n\t\t\tif err := c.Control(fn); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn operr\n\t\t},\n\t}\n\tl, err := lc.Listen(context.Background(), \"tcp\", \":0\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer l.Close()\n\treturn l.Addr().(*net.TCPAddr).Port, nil\n}\n\n\/\/ TrafficManager is a handle to access the Traffic Manager in a\n\/\/ cluster.\ntype TrafficManager struct {\n\tcrc Resource\n\tapiPort int\n\tsshPort int\n\tclient *http.Client\n\tinterceptables []string\n\ttotalClusCepts int\n}\n\n\/\/ NewTrafficManager returns a TrafficManager resource for the given\n\/\/ cluster if it has a Traffic Manager service.\nfunc NewTrafficManager(p *supervisor.Process, cluster *KCluster) (*TrafficManager, error) {\n\tcmd := cluster.GetKubectlCmd(p, \"get\", \"svc\/telepresence-proxy\", \"deploy\/telepresence-proxy\")\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"kubectl get svc\/deploy telepresency-proxy\")\n\t}\n\n\tapiPort, err := GetFreePort()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get free port for API\")\n\t}\n\tsshPort, err := GetFreePort()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get free port for ssh\")\n\t}\n\tkpfArgStr := fmt.Sprintf(\"port-forward svc\/telepresence-proxy %d:8022 %d:8081\", sshPort, apiPort)\n\tkpfArgs := cluster.GetKubectlArgs(strings.Fields(kpfArgStr)...)\n\ttm := &TrafficManager{apiPort: apiPort, sshPort: sshPort}\n\n\tpf, err := CheckedRetryingCommand(p, \"traffic-kpf\", kpfArgs, cluster.RAI(), tm.check, 15*time.Second)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttm.crc = pf\n\ttm.client = &http.Client{Timeout: 10 * time.Second}\n\treturn tm, nil\n}\n\nfunc (tm *TrafficManager) check(p *supervisor.Process) error {\n\tbody, _, err := tm.request(\"GET\", \"state\", []byte{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar state map[string]interface{}\n\tif err := json.Unmarshal([]byte(body), &state); err != nil {\n\t\tp.Logf(\"check: bad JSON from tm: %v\", err)\n\t\tp.Logf(\"check: JSON data is: %q\", body)\n\t\treturn err\n\t}\n\tdeployments, ok := state[\"Deployments\"].(map[string]interface{})\n\tif !ok {\n\t\tp.Log(\"check: failed to get deployment info\")\n\t\tp.Logf(\"check: JSON data is: %q\", body)\n\t}\n\ttm.interceptables = make([]string, len(deployments))\n\ttm.totalClusCepts = 0\n\tidx := 0\n\tfor deployment := range deployments {\n\t\ttm.interceptables[idx] = deployment\n\t\tidx++\n\t\tinfo, ok := deployments[deployment].(map[string]interface{})\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tcepts, ok := info[\"Intercepts\"].([]interface{})\n\t\tif ok {\n\t\t\ttm.totalClusCepts += len(cepts)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (tm *TrafficManager) request(method, path string, data []byte) (result string, code int, err error) {\n\turl := fmt.Sprintf(\"http:\/\/127.0.0.1:%d\/%s\", tm.apiPort, path)\n\treq, err := http.NewRequest(method, url, bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn\n\t}\n\tresp, err := tm.client.Do(req)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"get\")\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tcode = resp.StatusCode\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"read body\")\n\t\treturn\n\t}\n\tresult = string(body)\n\treturn\n}\n\n\/\/ Name implements Resource\nfunc (tm *TrafficManager) Name() string {\n\treturn \"trafficMgr\"\n}\n\n\/\/ IsOkay implements Resource\nfunc (tm *TrafficManager) IsOkay() bool {\n\treturn tm.crc.IsOkay()\n}\n\n\/\/ Close implements Resource\nfunc (tm *TrafficManager) Close() error {\n\treturn tm.crc.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/urfave\/cli\"\n\t\"github.com\/usefathom\/fathom\/pkg\/models\"\n)\n\nvar registerCmd = cli.Command{\n\tName: \"register\",\n\tUsage: \"register a new admin user\",\n\tAction: register,\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"email, e\",\n\t\t\tUsage: \"user email\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"password, p\",\n\t\t\tUsage: \"user password\",\n\t\t},\n\t},\n}\n\nfunc register(c *cli.Context) error {\n\temail := c.String(\"email\")\n\tif email == \"\" {\n\t\treturn errors.New(\"Invalid arguments: missing email\")\n\t}\n\n\tpassword := c.String(\"password\")\n\tif password == \"\" {\n\t\treturn errors.New(\"Invalid arguments: missing password\")\n\t}\n\n\tuser := models.NewUser(email, password)\n\terr := app.database.SaveUser(&user)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating user: %s\", err)\n\t}\n\n\tlog.Infof(\"Created user %s\", user.Email)\n\treturn nil\n}\n<commit_msg>add --skip-bcrypt option to register command<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/urfave\/cli\"\n\t\"github.com\/usefathom\/fathom\/pkg\/models\"\n)\n\nvar registerCmd = cli.Command{\n\tName: \"register\",\n\tUsage: \"register a new admin user\",\n\tAction: register,\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"email, e\",\n\t\t\tUsage: \"user email\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"password, p\",\n\t\t\tUsage: \"user password\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"skip-bcrypt\",\n\t\t\tUsage: \"store password string as is, skipping bcrypt\",\n\t\t},\n\t},\n}\n\nfunc register(c *cli.Context) error {\n\temail := c.String(\"email\")\n\tif email == \"\" {\n\t\treturn errors.New(\"Invalid arguments: missing email\")\n\t}\n\n\tpassword := c.String(\"password\")\n\tif password == \"\" {\n\t\treturn errors.New(\"Invalid arguments: missing password\")\n\t}\n\n\tuser := models.NewUser(email, password)\n\n\t\/\/ set password manually if --skip-bcrypt was given\n\t\/\/ this is used to supply an already encrypted password string\n\tif c.Bool(\"skip-bcrypt\") {\n\t\tuser.Password = password\n\t}\n\n\tif err := app.database.SaveUser(&user); err != nil {\n\t\treturn fmt.Errorf(\"Error creating user: %s\", err)\n\t}\n\n\tlog.Infof(\"Created user %s\", user.Email)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"..\"\n\t\"..\/filters\"\n\t\"fmt\"\n\t\"os\"\n)\n\nfunc main() {\n\tgitmedia.SetupDebugging()\n\n\tcleaned, err := gitmediafilters.Clean(os.Stdin)\n\tif err != nil {\n\t\tfmt.Println(\"Error cleaning asset\")\n\t\tpanic(err)\n\t}\n\tdefer cleaned.Close()\n\n\ttmpfile := cleaned.File.Name()\n\tmediafile := gitmedia.LocalMediaPath(cleaned.Sha)\n\tif stat, _ := os.Stat(mediafile); stat != nil {\n\t\tif stat.Size() != cleaned.Size {\n\t\t\tgitmedia.Panic(nil, \"Files don't match:\\n%s\\n%s\", mediafile, tmpfile)\n\t\t}\n\t\tgitmedia.Debug(\"%s exists\", mediafile)\n\t} else {\n\t\tif err := os.Rename(tmpfile, mediafile); err != nil {\n\t\t\tgitmedia.Panic(err, \"Unable to move %s to %s\\n\", tmpfile, mediafile)\n\t\t}\n\t\tgitmedia.Debug(\"Writing %s\", mediafile)\n\t}\n\n\tgitmedia.Encode(os.Stdout, cleaned.Sha)\n}\n<commit_msg>アーア アアアア アーアー<commit_after>package main\n\nimport (\n\t\"..\"\n\t\"..\/filters\"\n\t\"..\/queuedir\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n)\n\nfunc main() {\n\tgitmedia.SetupDebugging()\n\tqdir := queuedir.New(filepath.Join(gitmedia.LocalMediaDir, \"queue\"))\n\tcleanqueue, err := qdir.Queue(\"clean\")\n\tif err != nil {\n\t\tfmt.Println(\"Error setting up queue\")\n\t\tpanic(err)\n\t}\n\n\tcleaned, err := gitmediafilters.Clean(os.Stdin)\n\tif err != nil {\n\t\tfmt.Println(\"Error cleaning asset\")\n\t\tpanic(err)\n\t}\n\tdefer cleaned.Close()\n\n\ttmpfile := cleaned.File.Name()\n\tmediafile := gitmedia.LocalMediaPath(cleaned.Sha)\n\tif stat, _ := os.Stat(mediafile); stat != nil {\n\t\tif stat.Size() != cleaned.Size {\n\t\t\tgitmedia.Panic(nil, \"Files don't match:\\n%s\\n%s\", mediafile, tmpfile)\n\t\t}\n\t\tgitmedia.Debug(\"%s exists\", mediafile)\n\t} else {\n\t\tif err := os.Rename(tmpfile, mediafile); err != nil {\n\t\t\tgitmedia.Panic(err, \"Unable to move %s to %s\\n\", tmpfile, mediafile)\n\t\t}\n\t\tif _, err := cleanqueue.AddString(cleaned.Sha); err != nil {\n\t\t\tgitmedia.Panic(err, \"Unable to add %s to queue\", cleaned.Sha)\n\t\t}\n\t\tgitmedia.Debug(\"Writing %s\", mediafile)\n\t}\n\n\tgitmedia.Encode(os.Stdout, cleaned.Sha)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/Songmu\/timeout\"\n\n\t\"code.google.com\/p\/getopt\"\n)\n\nfunc main() {\n\toptKillAfter := getopt.StringLong(\"kill-after\", 'k', \"\", \"help message for f\")\n\toptSig := getopt.StringLong(\"signal\", 's', \"\", \"help message for long\")\n\tp := getopt.BoolLong(\"preserve-status\", 0, \"help message for bool\")\n\n\topts := getopt.CommandLine\n\topts.Parse(os.Args)\n\n\trest := opts.Args()\n\tif len(rest) < 2 {\n\t\topts.PrintUsage(os.Stderr)\n\t\tos.Exit(1)\n\t}\n\n\tvar err error\n\tkillAfter := float64(0)\n\tif *optKillAfter != \"\" {\n\t\tkillAfter, err = parseDuration(*optKillAfter)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\t\tos.Exit(125)\n\t\t}\n\t}\n\n\tvar sig os.Signal\n\tif *optSig != \"\" {\n\t\tsig, err = parseSignal(*optSig)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\t\tos.Exit(125)\n\t\t}\n\t}\n\n\tdur, err := parseDuration(rest[0])\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\tos.Exit(125)\n\t}\n\n\tcmd := exec.Command(rest[1], rest[2:]...)\n\n\ttio := &timeout.Timeout{\n\t\tDuration: time.Duration(dur * float64(time.Second)),\n\t\tCmd: cmd,\n\t\tKillAfter: time.Duration(killAfter * float64(time.Second)),\n\t\tSignal: sig,\n\t}\n\texit := tio.RunSimple(*p)\n\tos.Exit(exit)\n}\n\nvar durRe = regexp.MustCompile(`^([-0-9e.]+)([smhd])?$`)\n\nfunc parseDuration(durStr string) (float64, error) {\n\tmatches := durRe.FindStringSubmatch(durStr)\n\tif len(matches) == 0 {\n\t\treturn 0, fmt.Errorf(\"duration format invalid: %s\", durStr)\n\t}\n\n\tbase, err := strconv.ParseFloat(matches[1], 64)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"invalid time interval `%s`\", durStr)\n\t}\n\tswitch matches[2] {\n\tcase \"\", \"s\":\n\t\treturn base, nil\n\tcase \"m\":\n\t\treturn base * 60, nil\n\tcase \"h\":\n\t\treturn base * 60 * 60, nil\n\tcase \"d\":\n\t\treturn base * 60 * 60 * 24, nil\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"invalid time interval `%s`\", durStr)\n\t}\n}\n<commit_msg>fix deps<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/Songmu\/timeout\"\n\t\"github.com\/pborman\/getopt\"\n)\n\nfunc main() {\n\toptKillAfter := getopt.StringLong(\"kill-after\", 'k', \"\", \"help message for f\")\n\toptSig := getopt.StringLong(\"signal\", 's', \"\", \"help message for long\")\n\tp := getopt.BoolLong(\"preserve-status\", 0, \"help message for bool\")\n\n\topts := getopt.CommandLine\n\topts.Parse(os.Args)\n\n\trest := opts.Args()\n\tif len(rest) < 2 {\n\t\topts.PrintUsage(os.Stderr)\n\t\tos.Exit(1)\n\t}\n\n\tvar err error\n\tkillAfter := float64(0)\n\tif *optKillAfter != \"\" {\n\t\tkillAfter, err = parseDuration(*optKillAfter)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\t\tos.Exit(125)\n\t\t}\n\t}\n\n\tvar sig os.Signal\n\tif *optSig != \"\" {\n\t\tsig, err = parseSignal(*optSig)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\t\tos.Exit(125)\n\t\t}\n\t}\n\n\tdur, err := parseDuration(rest[0])\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\tos.Exit(125)\n\t}\n\n\tcmd := exec.Command(rest[1], rest[2:]...)\n\n\ttio := &timeout.Timeout{\n\t\tDuration: time.Duration(dur * float64(time.Second)),\n\t\tCmd: cmd,\n\t\tKillAfter: time.Duration(killAfter * float64(time.Second)),\n\t\tSignal: sig,\n\t}\n\texit := tio.RunSimple(*p)\n\tos.Exit(exit)\n}\n\nvar durRe = regexp.MustCompile(`^([-0-9e.]+)([smhd])?$`)\n\nfunc parseDuration(durStr string) (float64, error) {\n\tmatches := durRe.FindStringSubmatch(durStr)\n\tif len(matches) == 0 {\n\t\treturn 0, fmt.Errorf(\"duration format invalid: %s\", durStr)\n\t}\n\n\tbase, err := strconv.ParseFloat(matches[1], 64)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"invalid time interval `%s`\", durStr)\n\t}\n\tswitch matches[2] {\n\tcase \"\", \"s\":\n\t\treturn base, nil\n\tcase \"m\":\n\t\treturn base * 60, nil\n\tcase \"h\":\n\t\treturn base * 60 * 60, nil\n\tcase \"d\":\n\t\treturn base * 60 * 60 * 24, nil\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"invalid time interval `%s`\", durStr)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\n\/\/ ke: {\"notest\":true}\n\nimport (\n\t\"log\"\n\t\"path\/filepath\"\n\n\t\"strings\"\n\n\t\"fmt\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/tools\/cover\"\n\t\"kego.io\/cmd\/gotests\/scanner\"\n\t\"kego.io\/cmd\/gotests\/tester\"\n\t\"kego.io\/process\/packages\"\n)\n\nvar baseDir string\n\nfunc excludeWrap(profiles []*cover.Profile) error {\n\tsource, err := scanner.Get(baseDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm := map[string]*cover.Profile{}\n\tfor _, p := range profiles {\n\t\tm[p.FileName] = p\n\t}\n\n\tfor _, w := range source.Wraps {\n\t\tp, ok := m[w.File]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, b := range p.Blocks {\n\t\t\tif b.StartLine <= w.Line && b.EndLine >= w.Line && b.Count != 1 {\n\t\t\t\tb.Count = 1\n\t\t\t\tfmt.Printf(\"Excluding kerr.Wrap from %s:%d\\n\", w.File, w.Line)\n\t\t\t}\n\t\t}\n\t}\n\treturn err\n}\nfunc excludeGenerated(profiles []*cover.Profile) error {\n\tfor _, p := range profiles {\n\t\tif strings.HasSuffix(p.FileName, \"\/generated.go\") {\n\t\t\tfmt.Println(\"Excluding\", p.FileName)\n\t\t\t\/\/ summarize the original Profile\n\t\t\tstatements := 0\n\t\t\tlastLine := 0\n\t\t\tlastCol := 0\n\t\t\tfor _, pb := range p.Blocks {\n\t\t\t\tstatements += pb.NumStmt\n\t\t\t\tif pb.EndLine >= lastLine {\n\t\t\t\t\tlastLine = pb.EndLine\n\t\t\t\t\tif pb.EndCol >= lastCol {\n\t\t\t\t\t\tlastCol = pb.EndCol\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ overwrite with a single block\n\t\t\tp.Blocks = []cover.ProfileBlock{\n\t\t\t\tcover.ProfileBlock{\n\t\t\t\t\tStartLine: 0,\n\t\t\t\t\tStartCol: 0,\n\t\t\t\t\tEndLine: lastLine,\n\t\t\t\t\tEndCol: lastCol,\n\t\t\t\t\tNumStmt: statements,\n\t\t\t\t\tCount: 1,\n\t\t\t\t},\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc main() {\n\tvar err error\n\tbaseDir, err = packages.GetDirFromPackage(context.Background(), \"kego.io\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tprofiles, err := tester.Get(baseDir)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\terr = excludeGenerated(profiles)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\terr = excludeWrap(profiles)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\terr = tester.Save(profiles, filepath.Join(baseDir, \"coverage.out\"))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<commit_msg>Gotests command excludes kerr.Wrap fix save<commit_after>package main\n\n\/\/ ke: {\"notest\":true}\n\nimport (\n\t\"log\"\n\t\"path\/filepath\"\n\n\t\"strings\"\n\n\t\"fmt\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"golang.org\/x\/tools\/cover\"\n\t\"kego.io\/cmd\/gotests\/scanner\"\n\t\"kego.io\/cmd\/gotests\/tester\"\n\t\"kego.io\/process\/packages\"\n)\n\nvar baseDir string\n\nfunc excludeWrap(profiles []*cover.Profile) error {\n\tsource, err := scanner.Get(baseDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm := map[string]*cover.Profile{}\n\tfor _, p := range profiles {\n\t\tm[p.FileName] = p\n\t}\n\n\tfor _, w := range source.Wraps {\n\t\tp, ok := m[w.File]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tfor i, b := range p.Blocks {\n\t\t\tif b.StartLine <= w.Line && b.EndLine >= w.Line && b.Count != 1 {\n\t\t\t\tb.Count = 1\n\t\t\t\tp.Blocks[i] = b\n\t\t\t\tfmt.Printf(\"Excluding kerr.Wrap from %s:%d\\n\", w.File, w.Line)\n\t\t\t}\n\t\t}\n\t}\n\treturn err\n}\nfunc excludeGenerated(profiles []*cover.Profile) error {\n\tfor _, p := range profiles {\n\t\tif strings.HasSuffix(p.FileName, \"\/generated.go\") {\n\t\t\tfmt.Println(\"Excluding\", p.FileName)\n\t\t\t\/\/ summarize the original Profile\n\t\t\tstatements := 0\n\t\t\tlastLine := 0\n\t\t\tlastCol := 0\n\t\t\tfor _, pb := range p.Blocks {\n\t\t\t\tstatements += pb.NumStmt\n\t\t\t\tif pb.EndLine >= lastLine {\n\t\t\t\t\tlastLine = pb.EndLine\n\t\t\t\t\tif pb.EndCol >= lastCol {\n\t\t\t\t\t\tlastCol = pb.EndCol\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ overwrite with a single block\n\t\t\tp.Blocks = []cover.ProfileBlock{\n\t\t\t\tcover.ProfileBlock{\n\t\t\t\t\tStartLine: 0,\n\t\t\t\t\tStartCol: 0,\n\t\t\t\t\tEndLine: lastLine,\n\t\t\t\t\tEndCol: lastCol,\n\t\t\t\t\tNumStmt: statements,\n\t\t\t\t\tCount: 1,\n\t\t\t\t},\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc main() {\n\tvar err error\n\tbaseDir, err = packages.GetDirFromPackage(context.Background(), \"kego.io\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tprofiles, err := tester.Get(baseDir)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\terr = excludeGenerated(profiles)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\terr = excludeWrap(profiles)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\terr = tester.Save(profiles, filepath.Join(baseDir, \"coverage.out\"))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright The Helm Authors.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nconst completionDesc = `\nGenerate autocompletions script for Helm for the specified shell (bash or zsh).\n\nThis command can generate shell autocompletions. e.g.\n\n\t$ helm completion bash\n\nCan be sourced as such\n\n\t$ source <(helm completion bash)\n`\n\nvar (\n\tcompletionShells = map[string]func(out io.Writer, cmd *cobra.Command) error{\n\t\t\"bash\": runCompletionBash,\n\t\t\"zsh\": runCompletionZsh,\n\t}\n)\n\nfunc newCompletionCmd(out io.Writer) *cobra.Command {\n\tshells := []string{}\n\tfor s := range completionShells {\n\t\tshells = append(shells, s)\n\t}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"completion SHELL\",\n\t\tShort: \"Generate autocompletions script for the specified shell (bash or zsh)\",\n\t\tLong: completionDesc,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn runCompletion(out, cmd, args)\n\t\t},\n\t\tValidArgs: shells,\n\t}\n\n\treturn cmd\n}\n\nfunc runCompletion(out io.Writer, cmd *cobra.Command, args []string) error {\n\tif len(args) == 0 {\n\t\treturn errors.New(\"shell not specified\")\n\t}\n\tif len(args) > 1 {\n\t\treturn errors.New(\"too many arguments, expected only the shell type\")\n\t}\n\trun, found := completionShells[args[0]]\n\tif !found {\n\t\treturn errors.Errorf(\"unsupported shell type %q\", args[0])\n\t}\n\n\treturn run(out, cmd)\n}\n\nfunc runCompletionBash(out io.Writer, cmd *cobra.Command) error {\n\treturn cmd.Root().GenBashCompletion(out)\n}\n\nfunc runCompletionZsh(out io.Writer, cmd *cobra.Command) error {\n\tzshInitialization := `#compdef helm\n\n__helm_bash_source() {\n\talias shopt=':'\n\talias _expand=_bash_expand\n\talias _complete=_bash_comp\n\temulate -L sh\n\tsetopt kshglob noshglob braceexpand\n\tsource \"$@\"\n}\n__helm_type() {\n\t# -t is not supported by zsh\n\tif [ \"$1\" == \"-t\" ]; then\n\t\tshift\n\t\t# fake Bash 4 to disable \"complete -o nospace\". Instead\n\t\t# \"compopt +-o nospace\" is used in the code to toggle trailing\n\t\t# spaces. We don't support that, but leave trailing spaces on\n\t\t# all the time\n\t\tif [ \"$1\" = \"__helm_compopt\" ]; then\n\t\t\techo builtin\n\t\t\treturn 0\n\t\tfi\n\tfi\n\ttype \"$@\"\n}\n__helm_compgen() {\n\tlocal completions w\n\tcompletions=( $(compgen \"$@\") ) || return $?\n\t# filter by given word as prefix\n\twhile [[ \"$1\" = -* && \"$1\" != -- ]]; do\n\t\tshift\n\t\tshift\n\tdone\n\tif [[ \"$1\" == -- ]]; then\n\t\tshift\n\tfi\n\tfor w in \"${completions[@]}\"; do\n\t\tif [[ \"${w}\" = \"$1\"* ]]; then\n\t\t\techo \"${w}\"\n\t\tfi\n\tdone\n}\n__helm_compopt() {\n\ttrue # don't do anything. Not supported by bashcompinit in zsh\n}\n__helm_declare() {\n\tif [ \"$1\" == \"-F\" ]; then\n\t\twhence -w \"$@\"\n\telse\n\t\tbuiltin declare \"$@\"\n\tfi\n}\n__helm_ltrim_colon_completions()\n{\n\tif [[ \"$1\" == *:* && \"$COMP_WORDBREAKS\" == *:* ]]; then\n\t\t# Remove colon-word prefix from COMPREPLY items\n\t\tlocal colon_word=${1%${1##*:}}\n\t\tlocal i=${#COMPREPLY[*]}\n\t\twhile [[ $((--i)) -ge 0 ]]; do\n\t\t\tCOMPREPLY[$i]=${COMPREPLY[$i]#\"$colon_word\"}\n\t\tdone\n\tfi\n}\n__helm_get_comp_words_by_ref() {\n\tcur=\"${COMP_WORDS[COMP_CWORD]}\"\n\tprev=\"${COMP_WORDS[${COMP_CWORD}-1]}\"\n\twords=(\"${COMP_WORDS[@]}\")\n\tcword=(\"${COMP_CWORD[@]}\")\n}\n__helm_filedir() {\n\tlocal RET OLD_IFS w qw\n\t__debug \"_filedir $@ cur=$cur\"\n\tif [[ \"$1\" = \\~* ]]; then\n\t\t# somehow does not work. Maybe, zsh does not call this at all\n\t\teval echo \"$1\"\n\t\treturn 0\n\tfi\n\tOLD_IFS=\"$IFS\"\n\tIFS=$'\\n'\n\tif [ \"$1\" = \"-d\" ]; then\n\t\tshift\n\t\tRET=( $(compgen -d) )\n\telse\n\t\tRET=( $(compgen -f) )\n\tfi\n\tIFS=\"$OLD_IFS\"\n\tIFS=\",\" __debug \"RET=${RET[@]} len=${#RET[@]}\"\n\tfor w in ${RET[@]}; do\n\t\tif [[ ! \"${w}\" = \"${cur}\"* ]]; then\n\t\t\tcontinue\n\t\tfi\n\t\tif eval \"[[ \\\"\\${w}\\\" = *.$1 || -d \\\"\\${w}\\\" ]]\"; then\n\t\t\tqw=\"$(__helm_quote \"${w}\")\"\n\t\t\tif [ -d \"${w}\" ]; then\n\t\t\t\tCOMPREPLY+=(\"${qw}\/\")\n\t\t\telse\n\t\t\t\tCOMPREPLY+=(\"${qw}\")\n\t\t\tfi\n\t\tfi\n\tdone\n}\n__helm_quote() {\n\tif [[ $1 == \\'* || $1 == \\\"* ]]; then\n\t\t# Leave out first character\n\t\tprintf %q \"${1:1}\"\n\telse\n\t\tprintf %q \"$1\"\n\tfi\n}\nautoload -U +X bashcompinit && bashcompinit\n# use word boundary patterns for BSD or GNU sed\nLWORD='[[:<:]]'\nRWORD='[[:>:]]'\nif sed --help 2>&1 | grep -q 'GNU\\|BusyBox'; then\n\tLWORD='\\<'\n\tRWORD='\\>'\nfi\n__helm_convert_bash_to_zsh() {\n\tsed \\\n\t-e 's\/declare -F\/whence -w\/' \\\n\t-e 's\/_get_comp_words_by_ref \"\\$@\"\/_get_comp_words_by_ref \"\\$*\"\/' \\\n\t-e 's\/local \\([a-zA-Z0-9_]*\\)=\/local \\1; \\1=\/' \\\n\t-e 's\/flags+=(\"\\(--.*\\)=\")\/flags+=(\"\\1\"); two_word_flags+=(\"\\1\")\/' \\\n\t-e 's\/must_have_one_flag+=(\"\\(--.*\\)=\")\/must_have_one_flag+=(\"\\1\")\/' \\\n\t-e \"s\/${LWORD}_filedir${RWORD}\/__helm_filedir\/g\" \\\n\t-e \"s\/${LWORD}_get_comp_words_by_ref${RWORD}\/__helm_get_comp_words_by_ref\/g\" \\\n\t-e \"s\/${LWORD}__ltrim_colon_completions${RWORD}\/__helm_ltrim_colon_completions\/g\" \\\n\t-e \"s\/${LWORD}compgen${RWORD}\/__helm_compgen\/g\" \\\n\t-e \"s\/${LWORD}compopt${RWORD}\/__helm_compopt\/g\" \\\n\t-e \"s\/${LWORD}declare${RWORD}\/__helm_declare\/g\" \\\n\t-e \"s\/\\\\\\$(type${RWORD}\/\\$(__helm_type\/g\" \\\n\t-e 's\/aliashash\\[\"\\(.\\{1,\\}\\)\"\\]\/aliashash[\\1]\/g' \\\n\t<<'BASH_COMPLETION_EOF'\n`\n\tout.Write([]byte(zshInitialization))\n\n\tbuf := new(bytes.Buffer)\n\tcmd.Root().GenBashCompletion(buf)\n\tout.Write(buf.Bytes())\n\n\tzshTail := `\nBASH_COMPLETION_EOF\n}\n__helm_bash_source <(__helm_convert_bash_to_zsh)\n`\n\tout.Write([]byte(zshTail))\n\treturn nil\n}\n<commit_msg>fix(completion): --flag=val breaks zsh completion<commit_after>\/*\nCopyright The Helm Authors.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nconst completionDesc = `\nGenerate autocompletions script for Helm for the specified shell (bash or zsh).\n\nThis command can generate shell autocompletions. e.g.\n\n\t$ helm completion bash\n\nCan be sourced as such\n\n\t$ source <(helm completion bash)\n`\n\nvar (\n\tcompletionShells = map[string]func(out io.Writer, cmd *cobra.Command) error{\n\t\t\"bash\": runCompletionBash,\n\t\t\"zsh\": runCompletionZsh,\n\t}\n)\n\nfunc newCompletionCmd(out io.Writer) *cobra.Command {\n\tshells := []string{}\n\tfor s := range completionShells {\n\t\tshells = append(shells, s)\n\t}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"completion SHELL\",\n\t\tShort: \"Generate autocompletions script for the specified shell (bash or zsh)\",\n\t\tLong: completionDesc,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn runCompletion(out, cmd, args)\n\t\t},\n\t\tValidArgs: shells,\n\t}\n\n\treturn cmd\n}\n\nfunc runCompletion(out io.Writer, cmd *cobra.Command, args []string) error {\n\tif len(args) == 0 {\n\t\treturn errors.New(\"shell not specified\")\n\t}\n\tif len(args) > 1 {\n\t\treturn errors.New(\"too many arguments, expected only the shell type\")\n\t}\n\trun, found := completionShells[args[0]]\n\tif !found {\n\t\treturn errors.Errorf(\"unsupported shell type %q\", args[0])\n\t}\n\n\treturn run(out, cmd)\n}\n\nfunc runCompletionBash(out io.Writer, cmd *cobra.Command) error {\n\treturn cmd.Root().GenBashCompletion(out)\n}\n\nfunc runCompletionZsh(out io.Writer, cmd *cobra.Command) error {\n\tzshInitialization := `#compdef helm\n\n__helm_bash_source() {\n\talias shopt=':'\n\talias _expand=_bash_expand\n\talias _complete=_bash_comp\n\temulate -L sh\n\tsetopt kshglob noshglob braceexpand\n\tsource \"$@\"\n}\n__helm_type() {\n\t# -t is not supported by zsh\n\tif [ \"$1\" == \"-t\" ]; then\n\t\tshift\n\t\t# fake Bash 4 to disable \"complete -o nospace\". Instead\n\t\t# \"compopt +-o nospace\" is used in the code to toggle trailing\n\t\t# spaces. We don't support that, but leave trailing spaces on\n\t\t# all the time\n\t\tif [ \"$1\" = \"__helm_compopt\" ]; then\n\t\t\techo builtin\n\t\t\treturn 0\n\t\tfi\n\tfi\n\ttype \"$@\"\n}\n__helm_compgen() {\n\tlocal completions w\n\tcompletions=( $(compgen \"$@\") ) || return $?\n\t# filter by given word as prefix\n\twhile [[ \"$1\" = -* && \"$1\" != -- ]]; do\n\t\tshift\n\t\tshift\n\tdone\n\tif [[ \"$1\" == -- ]]; then\n\t\tshift\n\tfi\n\tfor w in \"${completions[@]}\"; do\n\t\tif [[ \"${w}\" = \"$1\"* ]]; then\n\t\t\techo \"${w}\"\n\t\tfi\n\tdone\n}\n__helm_compopt() {\n\ttrue # don't do anything. Not supported by bashcompinit in zsh\n}\n__helm_ltrim_colon_completions()\n{\n\tif [[ \"$1\" == *:* && \"$COMP_WORDBREAKS\" == *:* ]]; then\n\t\t# Remove colon-word prefix from COMPREPLY items\n\t\tlocal colon_word=${1%${1##*:}}\n\t\tlocal i=${#COMPREPLY[*]}\n\t\twhile [[ $((--i)) -ge 0 ]]; do\n\t\t\tCOMPREPLY[$i]=${COMPREPLY[$i]#\"$colon_word\"}\n\t\tdone\n\tfi\n}\n__helm_get_comp_words_by_ref() {\n\tcur=\"${COMP_WORDS[COMP_CWORD]}\"\n\tprev=\"${COMP_WORDS[${COMP_CWORD}-1]}\"\n\twords=(\"${COMP_WORDS[@]}\")\n\tcword=(\"${COMP_CWORD[@]}\")\n}\n__helm_filedir() {\n\tlocal RET OLD_IFS w qw\n\t__debug \"_filedir $@ cur=$cur\"\n\tif [[ \"$1\" = \\~* ]]; then\n\t\t# somehow does not work. Maybe, zsh does not call this at all\n\t\teval echo \"$1\"\n\t\treturn 0\n\tfi\n\tOLD_IFS=\"$IFS\"\n\tIFS=$'\\n'\n\tif [ \"$1\" = \"-d\" ]; then\n\t\tshift\n\t\tRET=( $(compgen -d) )\n\telse\n\t\tRET=( $(compgen -f) )\n\tfi\n\tIFS=\"$OLD_IFS\"\n\tIFS=\",\" __debug \"RET=${RET[@]} len=${#RET[@]}\"\n\tfor w in ${RET[@]}; do\n\t\tif [[ ! \"${w}\" = \"${cur}\"* ]]; then\n\t\t\tcontinue\n\t\tfi\n\t\tif eval \"[[ \\\"\\${w}\\\" = *.$1 || -d \\\"\\${w}\\\" ]]\"; then\n\t\t\tqw=\"$(__helm_quote \"${w}\")\"\n\t\t\tif [ -d \"${w}\" ]; then\n\t\t\t\tCOMPREPLY+=(\"${qw}\/\")\n\t\t\telse\n\t\t\t\tCOMPREPLY+=(\"${qw}\")\n\t\t\tfi\n\t\tfi\n\tdone\n}\n__helm_quote() {\n\tif [[ $1 == \\'* || $1 == \\\"* ]]; then\n\t\t# Leave out first character\n\t\tprintf %q \"${1:1}\"\n\telse\n\t\tprintf %q \"$1\"\n\tfi\n}\nautoload -U +X bashcompinit && bashcompinit\n# use word boundary patterns for BSD or GNU sed\nLWORD='[[:<:]]'\nRWORD='[[:>:]]'\nif sed --help 2>&1 | grep -q 'GNU\\|BusyBox'; then\n\tLWORD='\\<'\n\tRWORD='\\>'\nfi\n__helm_convert_bash_to_zsh() {\n\tsed \\\n\t-e 's\/declare -F\/whence -w\/' \\\n\t-e 's\/_get_comp_words_by_ref \"\\$@\"\/_get_comp_words_by_ref \"\\$*\"\/' \\\n\t-e 's\/local \\([a-zA-Z0-9_]*\\)=\/local \\1; \\1=\/' \\\n\t-e 's\/flags+=(\"\\(--.*\\)=\")\/flags+=(\"\\1\"); two_word_flags+=(\"\\1\")\/' \\\n\t-e 's\/must_have_one_flag+=(\"\\(--.*\\)=\")\/must_have_one_flag+=(\"\\1\")\/' \\\n\t-e \"s\/${LWORD}_filedir${RWORD}\/__helm_filedir\/g\" \\\n\t-e \"s\/${LWORD}_get_comp_words_by_ref${RWORD}\/__helm_get_comp_words_by_ref\/g\" \\\n\t-e \"s\/${LWORD}__ltrim_colon_completions${RWORD}\/__helm_ltrim_colon_completions\/g\" \\\n\t-e \"s\/${LWORD}compgen${RWORD}\/__helm_compgen\/g\" \\\n\t-e \"s\/${LWORD}compopt${RWORD}\/__helm_compopt\/g\" \\\n\t-e \"s\/${LWORD}declare${RWORD}\/builtin declare\/g\" \\\n\t-e \"s\/\\\\\\$(type${RWORD}\/\\$(__helm_type\/g\" \\\n\t-e 's\/aliashash\\[\"\\(.\\{1,\\}\\)\"\\]\/aliashash[\\1]\/g' \\\n\t<<'BASH_COMPLETION_EOF'\n`\n\tout.Write([]byte(zshInitialization))\n\n\tbuf := new(bytes.Buffer)\n\tcmd.Root().GenBashCompletion(buf)\n\tout.Write(buf.Bytes())\n\n\tzshTail := `\nBASH_COMPLETION_EOF\n}\n__helm_bash_source <(__helm_convert_bash_to_zsh)\n`\n\tout.Write([]byte(zshTail))\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nconst completionDesc = `\nGenerate autocompletions script for Helm for the specified shell (bash or zsh).\n\nThis command can generate shell autocompletions. e.g.\n\n\t$ helm completion bash\n\nCan be sourced as such\n\n\t$ source <(helm completion bash)\n`\n\nvar (\n\tcompletionShells = map[string]func(out io.Writer, cmd *cobra.Command) error{\n\t\t\"bash\": runCompletionBash,\n\t\t\"zsh\": runCompletionZsh,\n\t}\n)\n\nfunc newCompletionCmd(out io.Writer) *cobra.Command {\n\tshells := []string{}\n\tfor s := range completionShells {\n\t\tshells = append(shells, s)\n\t}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"completion SHELL\",\n\t\tShort: \"Generate autocompletions script for the specified shell (bash or zsh)\",\n\t\tLong: completionDesc,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn runCompletion(out, cmd, args)\n\t\t},\n\t\tValidArgs: shells,\n\t}\n\n\treturn cmd\n}\n\nfunc runCompletion(out io.Writer, cmd *cobra.Command, args []string) error {\n\tif len(args) == 0 {\n\t\treturn fmt.Errorf(\"shell not specified\")\n\t}\n\tif len(args) > 1 {\n\t\treturn fmt.Errorf(\"too many arguments, expected only the shell type\")\n\t}\n\trun, found := completionShells[args[0]]\n\tif !found {\n\t\treturn fmt.Errorf(\"unsupported shell type %q\", args[0])\n\t}\n\n\treturn run(out, cmd)\n}\n\nfunc runCompletionBash(out io.Writer, cmd *cobra.Command) error {\n\treturn cmd.Root().GenBashCompletion(out)\n}\n\nfunc runCompletionZsh(out io.Writer, cmd *cobra.Command) error {\n\tzshInitialization := `\n__helm_bash_source() {\n\talias shopt=':'\n\talias _expand=_bash_expand\n\talias _complete=_bash_comp\n\temulate -L sh\n\tsetopt kshglob noshglob braceexpand\n\tsource \"$@\"\n}\n__helm_type() {\n\t# -t is not supported by zsh\n\tif [ \"$1\" == \"-t\" ]; then\n\t\tshift\n\t\t# fake Bash 4 to disable \"complete -o nospace\". Instead\n\t\t# \"compopt +-o nospace\" is used in the code to toggle trailing\n\t\t# spaces. We don't support that, but leave trailing spaces on\n\t\t# all the time\n\t\tif [ \"$1\" = \"__helm_compopt\" ]; then\n\t\t\techo builtin\n\t\t\treturn 0\n\t\tfi\n\tfi\n\ttype \"$@\"\n}\n__helm_compgen() {\n\tlocal completions w\n\tcompletions=( $(compgen \"$@\") ) || return $?\n\t# filter by given word as prefix\n\twhile [[ \"$1\" = -* && \"$1\" != -- ]]; do\n\t\tshift\n\t\tshift\n\tdone\n\tif [[ \"$1\" == -- ]]; then\n\t\tshift\n\tfi\n\tfor w in \"${completions[@]}\"; do\n\t\tif [[ \"${w}\" = \"$1\"* ]]; then\n\t\t\techo \"${w}\"\n\t\tfi\n\tdone\n}\n__helm_compopt() {\n\ttrue # don't do anything. Not supported by bashcompinit in zsh\n}\n__helm_declare() {\n\tif [ \"$1\" == \"-F\" ]; then\n\t\twhence -w \"$@\"\n\telse\n\t\tbuiltin declare \"$@\"\n\tfi\n}\n__helm_ltrim_colon_completions()\n{\n\tif [[ \"$1\" == *:* && \"$COMP_WORDBREAKS\" == *:* ]]; then\n\t\t# Remove colon-word prefix from COMPREPLY items\n\t\tlocal colon_word=${1%${1##*:}}\n\t\tlocal i=${#COMPREPLY[*]}\n\t\twhile [[ $((--i)) -ge 0 ]]; do\n\t\t\tCOMPREPLY[$i]=${COMPREPLY[$i]#\"$colon_word\"}\n\t\tdone\n\tfi\n}\n__helm_get_comp_words_by_ref() {\n\tcur=\"${COMP_WORDS[COMP_CWORD]}\"\n\tprev=\"${COMP_WORDS[${COMP_CWORD}-1]}\"\n\twords=(\"${COMP_WORDS[@]}\")\n\tcword=(\"${COMP_CWORD[@]}\")\n}\n__helm_filedir() {\n\tlocal RET OLD_IFS w qw\n\t__debug \"_filedir $@ cur=$cur\"\n\tif [[ \"$1\" = \\~* ]]; then\n\t\t# somehow does not work. Maybe, zsh does not call this at all\n\t\teval echo \"$1\"\n\t\treturn 0\n\tfi\n\tOLD_IFS=\"$IFS\"\n\tIFS=$'\\n'\n\tif [ \"$1\" = \"-d\" ]; then\n\t\tshift\n\t\tRET=( $(compgen -d) )\n\telse\n\t\tRET=( $(compgen -f) )\n\tfi\n\tIFS=\"$OLD_IFS\"\n\tIFS=\",\" __debug \"RET=${RET[@]} len=${#RET[@]}\"\n\tfor w in ${RET[@]}; do\n\t\tif [[ ! \"${w}\" = \"${cur}\"* ]]; then\n\t\t\tcontinue\n\t\tfi\n\t\tif eval \"[[ \\\"\\${w}\\\" = *.$1 || -d \\\"\\${w}\\\" ]]\"; then\n\t\t\tqw=\"$(__helm_quote \"${w}\")\"\n\t\t\tif [ -d \"${w}\" ]; then\n\t\t\t\tCOMPREPLY+=(\"${qw}\/\")\n\t\t\telse\n\t\t\t\tCOMPREPLY+=(\"${qw}\")\n\t\t\tfi\n\t\tfi\n\tdone\n}\n__helm_quote() {\n\tif [[ $1 == \\'* || $1 == \\\"* ]]; then\n\t\t# Leave out first character\n\t\tprintf %q \"${1:1}\"\n\telse\n\t\tprintf %q \"$1\"\n\tfi\n}\nautoload -U +X bashcompinit && bashcompinit\n# use word boundary patterns for BSD or GNU sed\nLWORD='[[:<:]]'\nRWORD='[[:>:]]'\nif sed --help 2>&1 | grep -q GNU; then\n\tLWORD='\\<'\n\tRWORD='\\>'\nfi\n__helm_convert_bash_to_zsh() {\n\tsed \\\n\t-e 's\/declare -F\/whence -w\/' \\\n\t-e 's\/_get_comp_words_by_ref \"\\$@\"\/_get_comp_words_by_ref \"\\$*\"\/' \\\n\t-e 's\/local \\([a-zA-Z0-9_]*\\)=\/local \\1; \\1=\/' \\\n\t-e 's\/flags+=(\"\\(--.*\\)=\")\/flags+=(\"\\1\"); two_word_flags+=(\"\\1\")\/' \\\n\t-e 's\/must_have_one_flag+=(\"\\(--.*\\)=\")\/must_have_one_flag+=(\"\\1\")\/' \\\n\t-e \"s\/${LWORD}_filedir${RWORD}\/__helm_filedir\/g\" \\\n\t-e \"s\/${LWORD}_get_comp_words_by_ref${RWORD}\/__helm_get_comp_words_by_ref\/g\" \\\n\t-e \"s\/${LWORD}__ltrim_colon_completions${RWORD}\/__helm_ltrim_colon_completions\/g\" \\\n\t-e \"s\/${LWORD}compgen${RWORD}\/__helm_compgen\/g\" \\\n\t-e \"s\/${LWORD}compopt${RWORD}\/__helm_compopt\/g\" \\\n\t-e \"s\/${LWORD}declare${RWORD}\/__helm_declare\/g\" \\\n\t-e \"s\/\\\\\\$(type${RWORD}\/\\$(__helm_type\/g\" \\\n\t<<'BASH_COMPLETION_EOF'\n`\n\tout.Write([]byte(zshInitialization))\n\n\tbuf := new(bytes.Buffer)\n\tcmd.Root().GenBashCompletion(buf)\n\tout.Write(buf.Bytes())\n\n\tzshTail := `\nBASH_COMPLETION_EOF\n}\n__helm_bash_source <(__helm_convert_bash_to_zsh)\n`\n\tout.Write([]byte(zshTail))\n\treturn nil\n}\n<commit_msg>Allow zsh completion to be autoloaded by compinit<commit_after>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nconst completionDesc = `\nGenerate autocompletions script for Helm for the specified shell (bash or zsh).\n\nThis command can generate shell autocompletions. e.g.\n\n\t$ helm completion bash\n\nCan be sourced as such\n\n\t$ source <(helm completion bash)\n`\n\nvar (\n\tcompletionShells = map[string]func(out io.Writer, cmd *cobra.Command) error{\n\t\t\"bash\": runCompletionBash,\n\t\t\"zsh\": runCompletionZsh,\n\t}\n)\n\nfunc newCompletionCmd(out io.Writer) *cobra.Command {\n\tshells := []string{}\n\tfor s := range completionShells {\n\t\tshells = append(shells, s)\n\t}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"completion SHELL\",\n\t\tShort: \"Generate autocompletions script for the specified shell (bash or zsh)\",\n\t\tLong: completionDesc,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn runCompletion(out, cmd, args)\n\t\t},\n\t\tValidArgs: shells,\n\t}\n\n\treturn cmd\n}\n\nfunc runCompletion(out io.Writer, cmd *cobra.Command, args []string) error {\n\tif len(args) == 0 {\n\t\treturn fmt.Errorf(\"shell not specified\")\n\t}\n\tif len(args) > 1 {\n\t\treturn fmt.Errorf(\"too many arguments, expected only the shell type\")\n\t}\n\trun, found := completionShells[args[0]]\n\tif !found {\n\t\treturn fmt.Errorf(\"unsupported shell type %q\", args[0])\n\t}\n\n\treturn run(out, cmd)\n}\n\nfunc runCompletionBash(out io.Writer, cmd *cobra.Command) error {\n\treturn cmd.Root().GenBashCompletion(out)\n}\n\nfunc runCompletionZsh(out io.Writer, cmd *cobra.Command) error {\n\tzshInitialization := `\n#compdef helm\n\n__helm_bash_source() {\n\talias shopt=':'\n\talias _expand=_bash_expand\n\talias _complete=_bash_comp\n\temulate -L sh\n\tsetopt kshglob noshglob braceexpand\n\tsource \"$@\"\n}\n__helm_type() {\n\t# -t is not supported by zsh\n\tif [ \"$1\" == \"-t\" ]; then\n\t\tshift\n\t\t# fake Bash 4 to disable \"complete -o nospace\". Instead\n\t\t# \"compopt +-o nospace\" is used in the code to toggle trailing\n\t\t# spaces. We don't support that, but leave trailing spaces on\n\t\t# all the time\n\t\tif [ \"$1\" = \"__helm_compopt\" ]; then\n\t\t\techo builtin\n\t\t\treturn 0\n\t\tfi\n\tfi\n\ttype \"$@\"\n}\n__helm_compgen() {\n\tlocal completions w\n\tcompletions=( $(compgen \"$@\") ) || return $?\n\t# filter by given word as prefix\n\twhile [[ \"$1\" = -* && \"$1\" != -- ]]; do\n\t\tshift\n\t\tshift\n\tdone\n\tif [[ \"$1\" == -- ]]; then\n\t\tshift\n\tfi\n\tfor w in \"${completions[@]}\"; do\n\t\tif [[ \"${w}\" = \"$1\"* ]]; then\n\t\t\techo \"${w}\"\n\t\tfi\n\tdone\n}\n__helm_compopt() {\n\ttrue # don't do anything. Not supported by bashcompinit in zsh\n}\n__helm_declare() {\n\tif [ \"$1\" == \"-F\" ]; then\n\t\twhence -w \"$@\"\n\telse\n\t\tbuiltin declare \"$@\"\n\tfi\n}\n__helm_ltrim_colon_completions()\n{\n\tif [[ \"$1\" == *:* && \"$COMP_WORDBREAKS\" == *:* ]]; then\n\t\t# Remove colon-word prefix from COMPREPLY items\n\t\tlocal colon_word=${1%${1##*:}}\n\t\tlocal i=${#COMPREPLY[*]}\n\t\twhile [[ $((--i)) -ge 0 ]]; do\n\t\t\tCOMPREPLY[$i]=${COMPREPLY[$i]#\"$colon_word\"}\n\t\tdone\n\tfi\n}\n__helm_get_comp_words_by_ref() {\n\tcur=\"${COMP_WORDS[COMP_CWORD]}\"\n\tprev=\"${COMP_WORDS[${COMP_CWORD}-1]}\"\n\twords=(\"${COMP_WORDS[@]}\")\n\tcword=(\"${COMP_CWORD[@]}\")\n}\n__helm_filedir() {\n\tlocal RET OLD_IFS w qw\n\t__debug \"_filedir $@ cur=$cur\"\n\tif [[ \"$1\" = \\~* ]]; then\n\t\t# somehow does not work. Maybe, zsh does not call this at all\n\t\teval echo \"$1\"\n\t\treturn 0\n\tfi\n\tOLD_IFS=\"$IFS\"\n\tIFS=$'\\n'\n\tif [ \"$1\" = \"-d\" ]; then\n\t\tshift\n\t\tRET=( $(compgen -d) )\n\telse\n\t\tRET=( $(compgen -f) )\n\tfi\n\tIFS=\"$OLD_IFS\"\n\tIFS=\",\" __debug \"RET=${RET[@]} len=${#RET[@]}\"\n\tfor w in ${RET[@]}; do\n\t\tif [[ ! \"${w}\" = \"${cur}\"* ]]; then\n\t\t\tcontinue\n\t\tfi\n\t\tif eval \"[[ \\\"\\${w}\\\" = *.$1 || -d \\\"\\${w}\\\" ]]\"; then\n\t\t\tqw=\"$(__helm_quote \"${w}\")\"\n\t\t\tif [ -d \"${w}\" ]; then\n\t\t\t\tCOMPREPLY+=(\"${qw}\/\")\n\t\t\telse\n\t\t\t\tCOMPREPLY+=(\"${qw}\")\n\t\t\tfi\n\t\tfi\n\tdone\n}\n__helm_quote() {\n\tif [[ $1 == \\'* || $1 == \\\"* ]]; then\n\t\t# Leave out first character\n\t\tprintf %q \"${1:1}\"\n\telse\n\t\tprintf %q \"$1\"\n\tfi\n}\nautoload -U +X bashcompinit && bashcompinit\n# use word boundary patterns for BSD or GNU sed\nLWORD='[[:<:]]'\nRWORD='[[:>:]]'\nif sed --help 2>&1 | grep -q GNU; then\n\tLWORD='\\<'\n\tRWORD='\\>'\nfi\n__helm_convert_bash_to_zsh() {\n\tsed \\\n\t-e 's\/declare -F\/whence -w\/' \\\n\t-e 's\/_get_comp_words_by_ref \"\\$@\"\/_get_comp_words_by_ref \"\\$*\"\/' \\\n\t-e 's\/local \\([a-zA-Z0-9_]*\\)=\/local \\1; \\1=\/' \\\n\t-e 's\/flags+=(\"\\(--.*\\)=\")\/flags+=(\"\\1\"); two_word_flags+=(\"\\1\")\/' \\\n\t-e 's\/must_have_one_flag+=(\"\\(--.*\\)=\")\/must_have_one_flag+=(\"\\1\")\/' \\\n\t-e \"s\/${LWORD}_filedir${RWORD}\/__helm_filedir\/g\" \\\n\t-e \"s\/${LWORD}_get_comp_words_by_ref${RWORD}\/__helm_get_comp_words_by_ref\/g\" \\\n\t-e \"s\/${LWORD}__ltrim_colon_completions${RWORD}\/__helm_ltrim_colon_completions\/g\" \\\n\t-e \"s\/${LWORD}compgen${RWORD}\/__helm_compgen\/g\" \\\n\t-e \"s\/${LWORD}compopt${RWORD}\/__helm_compopt\/g\" \\\n\t-e \"s\/${LWORD}declare${RWORD}\/__helm_declare\/g\" \\\n\t-e \"s\/\\\\\\$(type${RWORD}\/\\$(__helm_type\/g\" \\\n\t<<'BASH_COMPLETION_EOF'\n`\n\tout.Write([]byte(zshInitialization))\n\n\tbuf := new(bytes.Buffer)\n\tcmd.Root().GenBashCompletion(buf)\n\tout.Write(buf.Bytes())\n\n\tzshTail := `\nBASH_COMPLETION_EOF\n}\n__helm_bash_source <(__helm_convert_bash_to_zsh)\n`\n\tout.Write([]byte(zshTail))\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage siv\n\nimport (\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t. \"github.com\/jacobsa\/ogletest\"\n\t\"testing\"\n)\n\nfunc TestS2v(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype S2vTest struct{}\n\nfunc init() { RegisterTestSuite(&S2vTest{}) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *S2vTest) NilKey() {\n\tkey := []byte(nil)\n\tassociatedData := [][]byte{}\n\n\tf := func() { s2v(key, associatedData) }\n\tExpectThat(f, Panics(HasSubstr(\"16-byte\")))\n}\n\nfunc (t *S2vTest) ShortKey() {\n\tkey := make([]byte, 15)\n\tassociatedData := [][]byte{}\n\n\tf := func() { s2v(key, associatedData) }\n\tExpectThat(f, Panics(HasSubstr(\"16-byte\")))\n}\n\nfunc (t *S2vTest) LongKey() {\n\tkey := make([]byte, 17)\n\tassociatedData := [][]byte{}\n\n\tf := func() { s2v(key, associatedData) }\n\tExpectThat(f, Panics(HasSubstr(\"16-byte\")))\n}\n\nfunc (t *S2vTest) Rfc5297GoldenTestCaseA1() {\n\tkey := fromRfcHex(\n\t\t\"fffefdfc fbfaf9f8 f7f6f5f4 f3f2f1f0\" +\n\t\t\"f0f1f2f3 f4f5f6f7 f8f9fafb fcfdfeff\")\n\n\tassociatedData := [][]byte{\n\t\tfromRfcHex(\n\t\t\t\"10111213 14151617 18191a1b 1c1d1e1f\" +\n\t\t\t\"20212223 24252627\"),\n\t}\n\n\texpected := fromRfcHex(\"85632d07 c6e8f37f 950acd32 0a2ecc93\")\n\n\tExpectThat(s2v(key, associatedData), DeepEquals(expected))\n}\n\nfunc (t *S2vTest) Rfc5297GoldenTestCaseA2() {\n\tExpectEq(\"TODO\", \"\")\n}\n\nfunc (t *S2vTest) GeneratedTestCases() {\n\tExpectEq(\"TODO\", \"\")\n}\n<commit_msg>S2vTest.Rfc5297GoldenTestCaseA2<commit_after>\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage siv\n\nimport (\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t. \"github.com\/jacobsa\/ogletest\"\n\t\"testing\"\n)\n\nfunc TestS2v(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype S2vTest struct{}\n\nfunc init() { RegisterTestSuite(&S2vTest{}) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *S2vTest) NilKey() {\n\tkey := []byte(nil)\n\tassociatedData := [][]byte{}\n\n\tf := func() { s2v(key, associatedData) }\n\tExpectThat(f, Panics(HasSubstr(\"16-byte\")))\n}\n\nfunc (t *S2vTest) ShortKey() {\n\tkey := make([]byte, 15)\n\tassociatedData := [][]byte{}\n\n\tf := func() { s2v(key, associatedData) }\n\tExpectThat(f, Panics(HasSubstr(\"16-byte\")))\n}\n\nfunc (t *S2vTest) LongKey() {\n\tkey := make([]byte, 17)\n\tassociatedData := [][]byte{}\n\n\tf := func() { s2v(key, associatedData) }\n\tExpectThat(f, Panics(HasSubstr(\"16-byte\")))\n}\n\nfunc (t *S2vTest) Rfc5297GoldenTestCaseA1() {\n\tkey := fromRfcHex(\n\t\t\"fffefdfc fbfaf9f8 f7f6f5f4 f3f2f1f0\" +\n\t\t\"f0f1f2f3 f4f5f6f7 f8f9fafb fcfdfeff\")\n\n\tassociatedData := [][]byte{\n\t\tfromRfcHex(\n\t\t\t\"10111213 14151617 18191a1b 1c1d1e1f\" +\n\t\t\t\"20212223 24252627\"),\n\t}\n\n\texpected := fromRfcHex(\"85632d07 c6e8f37f 950acd32 0a2ecc93\")\n\n\tExpectThat(s2v(key, associatedData), DeepEquals(expected))\n}\n\nfunc (t *S2vTest) Rfc5297GoldenTestCaseA2() {\n\tkey := fromRfcHex(\n\t\t\"7f7e7d7c 7b7a7978 77767574 73727170\" +\n\t\t\"40414243 44454647 48494a4b 4c4d4e4f\")\n\n\tassociatedData := [][]byte{\n\t\tfromRfcHex(\n\t\t\t\"00112233 44556677 8899aabb ccddeeff\" +\n\t\t\t\"deaddada deaddada ffeeddcc bbaa9988\" +\n\t\t\t\"77665544 33221100\"),\n\t\tfromRfcHex(\"10203040 50607080 90a0\"),\n\t}\n\n\texpected := fromRfcHex(\"7bdb6e3b 432667eb 06f4d14b ff2fbd0f\")\n\n\tExpectThat(s2v(key, associatedData), DeepEquals(expected))\n}\n\nfunc (t *S2vTest) GeneratedTestCases() {\n\tExpectEq(\"TODO\", \"\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/cad-san\/ikaring\"\n\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/bgentry\/speakeasy\"\n)\n\nfunc getCacheFile() (string, error) {\n\tme, err := user.Current()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Join(me.HomeDir, \".ikaring.session\"), nil\n}\n\nfunc readSession(path string) (string, error) {\n\tbuff, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strings.TrimSpace(string(buff)), nil\n}\n\nfunc writeSession(path string, session string) error {\n\treturn ioutil.WriteFile(path, []byte(session), 600)\n}\n\nfunc getAccount(r io.Reader) (string, string, error) {\n\tscanner := bufio.NewScanner(r)\n\tfor {\n\t\tfmt.Print(\"User: \")\n\t\tif scanner.Scan() {\n\t\t\tbreak\n\t\t}\n\t}\n\tusername := scanner.Text()\n\tpassword, err := speakeasy.Ask(\"Password: \")\n\treturn username, password, err\n}\n\nfunc login(client *ikaring.IkaClient) error {\n\tpath, err := getCacheFile()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsession, err := readSession(path)\n\tif err == nil && len(session) > 0 {\n\t\tclient.SetSession(session)\n\t\treturn nil \/\/ already authorized\n\t}\n\n\tusername, password, err := getAccount(os.Stdin)\n\tsession, err = client.Login(username, password)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(session) <= 0 {\n\t\treturn errors.New(\"login failure\")\n\t}\n\twriteSession(path, session)\n\treturn nil\n}\n\nfunc stage(client *ikaring.IkaClient) error {\n\tinfo, err := client.GetStageInfo()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif info.FesSchedules != nil {\n\t\tfor _, s := range *info.FesSchedules {\n\t\t\tfmt.Printf(\"%v\\n\", s)\n\t\t}\n\t}\n\n\tif info.Schedules != nil {\n\t\tfor _, s := range *info.Schedules {\n\t\t\tfmt.Printf(\"%v\\n\", s)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc main() {\n\tclient, err := ikaring.CreateClient()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tif err = login(client); err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tif err = stage(client); err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n}\n<commit_msg>add ranking() function<commit_after>package main\n\nimport (\n\t\"github.com\/cad-san\/ikaring\"\n\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/bgentry\/speakeasy\"\n)\n\nfunc getCacheFile() (string, error) {\n\tme, err := user.Current()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Join(me.HomeDir, \".ikaring.session\"), nil\n}\n\nfunc readSession(path string) (string, error) {\n\tbuff, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strings.TrimSpace(string(buff)), nil\n}\n\nfunc writeSession(path string, session string) error {\n\treturn ioutil.WriteFile(path, []byte(session), 600)\n}\n\nfunc getAccount(r io.Reader) (string, string, error) {\n\tscanner := bufio.NewScanner(r)\n\tfor {\n\t\tfmt.Print(\"User: \")\n\t\tif scanner.Scan() {\n\t\t\tbreak\n\t\t}\n\t}\n\tusername := scanner.Text()\n\tpassword, err := speakeasy.Ask(\"Password: \")\n\treturn username, password, err\n}\n\nfunc login(client *ikaring.IkaClient) error {\n\tpath, err := getCacheFile()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsession, err := readSession(path)\n\tif err == nil && len(session) > 0 {\n\t\tclient.SetSession(session)\n\t\treturn nil \/\/ already authorized\n\t}\n\n\tusername, password, err := getAccount(os.Stdin)\n\tsession, err = client.Login(username, password)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(session) <= 0 {\n\t\treturn errors.New(\"login failure\")\n\t}\n\twriteSession(path, session)\n\treturn nil\n}\n\nfunc stage(client *ikaring.IkaClient) error {\n\tinfo, err := client.GetStageInfo()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif info.FesSchedules != nil {\n\t\tfor _, s := range *info.FesSchedules {\n\t\t\tfmt.Printf(\"%v\\n\", s)\n\t\t}\n\t}\n\n\tif info.Schedules != nil {\n\t\tfor _, s := range *info.Schedules {\n\t\t\tfmt.Printf(\"%v\\n\", s)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc ranking(client *ikaring.IkaClient) error {\n\tinfo, err := client.GetRanking()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(info.Regular) > 0 {\n\t\tfmt.Println(\"レギュラーマッチ\")\n\t\tfor _, p := range info.Regular {\n\t\t\tfmt.Printf(\"\\t[%d] %3d %s (%s)\\n\", p.Rank, p.Score, p.Name, p.Weapon)\n\t\t}\n\t}\n\n\tif len(info.Gachi) > 0 {\n\t\tfmt.Println(\"ガチマッチ\")\n\t\tfor _, p := range info.Gachi {\n\t\t\tfmt.Printf(\"\\t[%d] %3d %s (%s)\\n\", p.Rank, p.Score, p.Name, p.Weapon)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc main() {\n\tclient, err := ikaring.CreateClient()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tif err = login(client); err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tif err = stage(client); err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/Symantec\/Dominator\/imagebuilder\/builder\"\n\t\"github.com\/Symantec\/Dominator\/imagebuilder\/httpd\"\n\t\"github.com\/Symantec\/Dominator\/imagebuilder\/rpcd\"\n\t\"github.com\/Symantec\/Dominator\/lib\/constants\"\n\t\"github.com\/Symantec\/Dominator\/lib\/logbuf\"\n\t\"github.com\/Symantec\/Dominator\/lib\/srpc\/setupserver\"\n\t\"github.com\/Symantec\/tricorder\/go\/tricorder\"\n\t\"log\"\n\t\"os\"\n\t\"syscall\"\n\t\"time\"\n)\n\nconst (\n\tdirPerms = syscall.S_IRWXU | syscall.S_IRGRP | syscall.S_IXGRP |\n\t\tsyscall.S_IROTH | syscall.S_IXOTH\n)\n\nvar (\n\tconfigurationUrl = flag.String(\"configurationUrl\",\n\t\t\"file:\/\/\/etc\/imaginator\/conf.json\", \"URL containing configuration\")\n\timageServerHostname = flag.String(\"imageServerHostname\", \"localhost\",\n\t\t\"Hostname of image server\")\n\timageServerPortNum = flag.Uint(\"imageServerPortNum\",\n\t\tconstants.ImageServerPortNumber,\n\t\t\"Port number of image server\")\n\timageRebuildInterval = flag.Duration(\"imageRebuildInterval\", time.Hour,\n\t\t\"time between automatic rebuilds of images\")\n\tportNum = flag.Uint(\"portNum\", constants.ImaginatorPortNumber,\n\t\t\"Port number to allocate and listen on for HTTP\/RPC\")\n\tstateDir = flag.String(\"stateDir\", \"\/var\/lib\/imaginator\",\n\t\t\"Name of state directory\")\n\tvariablesFile = flag.String(\"variablesFile\", \"\",\n\t\t\"A JSON encoded file containing special variables (i.e. secrets)\")\n)\n\nfunc main() {\n\tflag.Parse()\n\ttricorder.RegisterFlags()\n\tif os.Geteuid() != 0 {\n\t\tfmt.Fprintln(os.Stderr, \"Must run the Image Builder as root\")\n\t\tos.Exit(1)\n\t}\n\tcircularBuffer := logbuf.New()\n\tlogger := log.New(circularBuffer, \"\", log.LstdFlags)\n\tif err := setupserver.SetupTls(); err != nil {\n\t\tlogger.Println(err)\n\t\tcircularBuffer.Flush()\n\t\tos.Exit(1)\n\t}\n\tif err := os.MkdirAll(*stateDir, dirPerms); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Cannot create state directory: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tbuilderObj, err := builder.Load(*configurationUrl, *variablesFile,\n\t\t*stateDir,\n\t\tfmt.Sprintf(\"%s:%d\", *imageServerHostname, *imageServerPortNum),\n\t\t*imageRebuildInterval, logger)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Cannot start builder: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\trpcHtmlWriter, err := rpcd.Setup(builderObj, logger)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Cannot start builder: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\thttpd.AddHtmlWriter(builderObj)\n\thttpd.AddHtmlWriter(rpcHtmlWriter)\n\thttpd.AddHtmlWriter(circularBuffer)\n\tif err = httpd.StartServer(*portNum, builderObj, false); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Unable to create http server: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n<commit_msg>Switch imaginator to lib\/log\/serverlogger package.<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/Symantec\/Dominator\/imagebuilder\/builder\"\n\t\"github.com\/Symantec\/Dominator\/imagebuilder\/httpd\"\n\t\"github.com\/Symantec\/Dominator\/imagebuilder\/rpcd\"\n\t\"github.com\/Symantec\/Dominator\/lib\/constants\"\n\t\"github.com\/Symantec\/Dominator\/lib\/log\/serverlogger\"\n\t\"github.com\/Symantec\/Dominator\/lib\/srpc\/setupserver\"\n\t\"github.com\/Symantec\/tricorder\/go\/tricorder\"\n\t\"os\"\n\t\"syscall\"\n\t\"time\"\n)\n\nconst (\n\tdirPerms = syscall.S_IRWXU | syscall.S_IRGRP | syscall.S_IXGRP |\n\t\tsyscall.S_IROTH | syscall.S_IXOTH\n)\n\nvar (\n\tconfigurationUrl = flag.String(\"configurationUrl\",\n\t\t\"file:\/\/\/etc\/imaginator\/conf.json\", \"URL containing configuration\")\n\timageServerHostname = flag.String(\"imageServerHostname\", \"localhost\",\n\t\t\"Hostname of image server\")\n\timageServerPortNum = flag.Uint(\"imageServerPortNum\",\n\t\tconstants.ImageServerPortNumber,\n\t\t\"Port number of image server\")\n\timageRebuildInterval = flag.Duration(\"imageRebuildInterval\", time.Hour,\n\t\t\"time between automatic rebuilds of images\")\n\tportNum = flag.Uint(\"portNum\", constants.ImaginatorPortNumber,\n\t\t\"Port number to allocate and listen on for HTTP\/RPC\")\n\tstateDir = flag.String(\"stateDir\", \"\/var\/lib\/imaginator\",\n\t\t\"Name of state directory\")\n\tvariablesFile = flag.String(\"variablesFile\", \"\",\n\t\t\"A JSON encoded file containing special variables (i.e. secrets)\")\n)\n\nfunc main() {\n\tflag.Parse()\n\ttricorder.RegisterFlags()\n\tif os.Geteuid() != 0 {\n\t\tfmt.Fprintln(os.Stderr, \"Must run the Image Builder as root\")\n\t\tos.Exit(1)\n\t}\n\tlogger := serverlogger.New(\"\")\n\tif err := setupserver.SetupTls(); err != nil {\n\t\tlogger.Fatalln(err)\n\t}\n\tif err := os.MkdirAll(*stateDir, dirPerms); err != nil {\n\t\tlogger.Fatalf(\"Cannot create state directory: %s\\n\", err)\n\t}\n\tbuilderObj, err := builder.Load(*configurationUrl, *variablesFile,\n\t\t*stateDir,\n\t\tfmt.Sprintf(\"%s:%d\", *imageServerHostname, *imageServerPortNum),\n\t\t*imageRebuildInterval, logger)\n\tif err != nil {\n\t\tlogger.Fatalf(\"Cannot start builder: %s\\n\", err)\n\t}\n\trpcHtmlWriter, err := rpcd.Setup(builderObj, logger)\n\tif err != nil {\n\t\tlogger.Fatalf(\"Cannot start builder: %s\\n\", err)\n\t}\n\thttpd.AddHtmlWriter(builderObj)\n\thttpd.AddHtmlWriter(rpcHtmlWriter)\n\thttpd.AddHtmlWriter(logger)\n\tif err = httpd.StartServer(*portNum, builderObj, false); err != nil {\n\t\tlogger.Fatalf(\"Unable to create http server: %s\\n\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/datawire\/teleproxy\/pkg\/k8s\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nfunc main() {\n\tvar st = &cobra.Command{\n\t\tUse: \"kubestatus <kind>\",\n\t\tShort: \"kubestatus\",\n\t\tLong: \"kubestatus - get and set status of kubernetes resources\",\n\t\tArgs: cobra.ExactArgs(1),\n\t\tSilenceErrors: true,\n\t\tSilenceUsage: true,\n\t}\n\n\tinfo := k8s.NewKubeInfoFromFlags(st.Flags())\n\tfields := st.Flags().StringP(\"field-selector\", \"f\", \"\", \"field selector\")\n\tlabels := st.Flags().StringP(\"label-selector\", \"l\", \"\", \"label selector\")\n\tstatusFile := st.Flags().StringP(\"update\", \"u\", \"\", \"update with new status from file (must be json)\")\n\n\tst.RunE = func(cmd *cobra.Command, args []string) error {\n\t\tvar status map[string]interface{}\n\n\t\tif *statusFile != \"\" {\n\t\t\trawStatus, err := os.Open(*statusFile)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer rawStatus.Close()\n\n\t\t\tdec := json.NewDecoder(rawStatus)\n\t\t\terr = dec.Decode(&status)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tkind := args[0]\n\t\tnamespace, err := info.Namespace()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tw := k8s.MustNewWatcher(info)\n\t\terr = w.WatchQuery(k8s.Query{\n\t\t\tKind: kind,\n\t\t\tNamespace: namespace,\n\t\t\tFieldSelector: *fields,\n\t\t\tLabelSelector: *labels,\n\t\t}, func(w *k8s.Watcher) {\n\t\t\tfor _, rsrc := range w.List(kind) {\n\t\t\t\tif *statusFile == \"\" {\n\t\t\t\t\tfmt.Println(\"Status of\", rsrc.QName())\n\t\t\t\t\tfmt.Printf(\" %q\\n\", rsrc[\"status\"])\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"Updating\", rsrc.QName())\n\t\t\t\t\trsrc[\"status\"] = status\n\t\t\t\t\t_, err := w.UpdateStatus(rsrc)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tw.Stop()\n\t\t})\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tw.Wait()\n\t\treturn nil\n\t}\n\n\terr := st.Execute()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n}\n<commit_msg>remove long in favor of short<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/datawire\/teleproxy\/pkg\/k8s\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nfunc main() {\n\tvar st = &cobra.Command{\n\t\tUse: \"kubestatus <kind>\",\n\t\tShort: \"get and set status of kubernetes resources\",\n\t\tArgs: cobra.ExactArgs(1),\n\t\tSilenceErrors: true,\n\t\tSilenceUsage: true,\n\t}\n\n\tinfo := k8s.NewKubeInfoFromFlags(st.Flags())\n\tfields := st.Flags().StringP(\"field-selector\", \"f\", \"\", \"field selector\")\n\tlabels := st.Flags().StringP(\"label-selector\", \"l\", \"\", \"label selector\")\n\tstatusFile := st.Flags().StringP(\"update\", \"u\", \"\", \"update with new status from file (must be json)\")\n\n\tst.RunE = func(cmd *cobra.Command, args []string) error {\n\t\tvar status map[string]interface{}\n\n\t\tif *statusFile != \"\" {\n\t\t\trawStatus, err := os.Open(*statusFile)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer rawStatus.Close()\n\n\t\t\tdec := json.NewDecoder(rawStatus)\n\t\t\terr = dec.Decode(&status)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tkind := args[0]\n\t\tnamespace, err := info.Namespace()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tw := k8s.MustNewWatcher(info)\n\t\terr = w.WatchQuery(k8s.Query{\n\t\t\tKind: kind,\n\t\t\tNamespace: namespace,\n\t\t\tFieldSelector: *fields,\n\t\t\tLabelSelector: *labels,\n\t\t}, func(w *k8s.Watcher) {\n\t\t\tfor _, rsrc := range w.List(kind) {\n\t\t\t\tif *statusFile == \"\" {\n\t\t\t\t\tfmt.Println(\"Status of\", rsrc.QName())\n\t\t\t\t\tfmt.Printf(\" %q\\n\", rsrc[\"status\"])\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"Updating\", rsrc.QName())\n\t\t\t\t\trsrc[\"status\"] = status\n\t\t\t\t\t_, err := w.UpdateStatus(rsrc)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tw.Stop()\n\t\t})\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tw.Wait()\n\t\treturn nil\n\t}\n\n\terr := st.Execute()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package manager\n\nimport (\n\t\"strings\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/spf13\/cobra\"\n\n\tbivacCmd \"github.com\/camptocamp\/bivac\/cmd\"\n\t\"github.com\/camptocamp\/bivac\/internal\/manager\"\n\t\"github.com\/camptocamp\/bivac\/pkg\/volume\"\n)\n\nvar (\n\tserver manager.Server\n\torchestrator string\n\n\t\/\/ Orchestrators is a copy of manager.Orchestrators which allows orchestrator\n\t\/\/ configuration from Cobra variables\n\tOrchestrators manager.Orchestrators\n\n\tdbPath string\n\tresticForgetArgs string\n\n\tprovidersFile string\n\ttargetURL string\n\tretryCount int\n\tlogServer string\n\tagentImage string\n\twhitelistVolumes string\n\tblacklistVolumes string\n\twhitelistAnnotation bool\n)\nvar envs = make(map[string]string)\n\nvar managerCmd = &cobra.Command{\n\tUse: \"manager\",\n\tShort: \"Start Bivac backup manager\",\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tvolumesFilters := volume.Filters{\n\t\t\tBlacklist: strings.Split(blacklistVolumes, \",\"),\n\t\t\tWhitelist: strings.Split(whitelistVolumes, \",\"),\n\t\t\tWhitelistAnnotation: whitelistAnnotation,\n\t\t}\n\n\t\to, err := manager.GetOrchestrator(orchestrator, Orchestrators)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to retrieve orchestrator: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\terr = manager.Start(bivacCmd.BuildInfo, o, server, volumesFilters, providersFile, targetURL, logServer, agentImage, retryCount)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to start manager: %s\", err)\n\t\t\treturn\n\t\t}\n\t},\n}\n\nfunc init() {\n\tmanagerCmd.Flags().StringVarP(&server.Address, \"server.address\", \"\", \"0.0.0.0:8182\", \"Address to bind on.\")\n\tenvs[\"BIVAC_SERVER_ADDRESS\"] = \"server.address\"\n\tmanagerCmd.Flags().StringVarP(&server.PSK, \"server.psk\", \"\", \"\", \"Pre-shared key.\")\n\tenvs[\"BIVAC_SERVER_PSK\"] = \"server.psk\"\n\n\tmanagerCmd.Flags().StringVarP(&orchestrator, \"orchestrator\", \"o\", \"\", \"Orchestrator on which Bivac should connect to.\")\n\tenvs[\"BIVAC_ORCHESTRATOR\"] = \"orchestrator\"\n\n\tmanagerCmd.Flags().StringVarP(&Orchestrators.Docker.Endpoint, \"docker.endpoint\", \"\", \"unix:\/\/\/var\/run\/docker.sock\", \"Docker endpoint.\")\n\tenvs[\"BIVAC_DOCKER_ENDPOINT\"] = \"docker.endpoint\"\n\n\tmanagerCmd.Flags().StringVarP(&Orchestrators.Cattle.URL, \"cattle.url\", \"\", \"\", \"The Cattle URL.\")\n\tenvs[\"CATTLE_URL\"] = \"cattle.url\"\n\tmanagerCmd.Flags().StringVarP(&Orchestrators.Cattle.AccessKey, \"cattle.accesskey\", \"\", \"\", \"The Cattle access key.\")\n\tenvs[\"CATTLE_ACCESS_KEY\"] = \"cattle.accesskey\"\n\tmanagerCmd.Flags().StringVarP(&Orchestrators.Cattle.SecretKey, \"cattle.secretkey\", \"\", \"\", \"The Cattle secret key.\")\n\tenvs[\"CATTLE_SECRET_KEY\"] = \"cattle.secretkey\"\n\n\tmanagerCmd.Flags().StringVarP(&Orchestrators.Kubernetes.Namespace, \"kubernetes.namespace\", \"\", \"\", \"Namespace where you want to run Bivac.\")\n\tenvs[\"KUBERNETES_NAMESPACE\"] = \"kubernetes.namespace\"\n\tmanagerCmd.Flags().BoolVarP(&Orchestrators.Kubernetes.AllNamespaces, \"kubernetes.all-namespaces\", \"\", false, \"Backup volumes of all namespaces.\")\n\tenvs[\"KUBERNETES_ALL_NAMESPACES\"] = \"kubernetes.all-namespaces\"\n\tmanagerCmd.Flags().StringVarP(&Orchestrators.Kubernetes.KubeConfig, \"kubernetes.kubeconfig\", \"\", \"\", \"Path to your kuberconfig file.\")\n\tenvs[\"KUBERNETES_KUBECONFIG\"] = \"kubernetes.kubeconfig\"\n\tmanagerCmd.Flags().StringVarP(&Orchestrators.Kubernetes.AgentServiceAccount, \"kubernetes.agent-service-account\", \"\", \"\", \"Specify service account for agents.\")\n\tenvs[\"KUBERNETES_AGENT_SERVICE_ACCOUNT\"] = \"kubernetes.agent-service-account\"\n\n\tmanagerCmd.Flags().StringVarP(&resticForgetArgs, \"restic.forget.args\", \"\", \"--group-by host --keep-daily 15 --prune\", \"Restic forget arguments.\")\n\tenvs[\"RESTIC_FORGET_ARGS\"] = \"restic.forget.args\"\n\n\tmanagerCmd.Flags().StringVarP(&providersFile, \"providers.config\", \"\", \"\/providers-config.default.toml\", \"Configuration file for providers.\")\n\tenvs[\"BIVAC_PROVIDERS_CONFIG\"] = \"providers.config\"\n\n\tmanagerCmd.Flags().StringVarP(&targetURL, \"target.url\", \"r\", \"\", \"The target URL to push the backups to.\")\n\tenvs[\"BIVAC_TARGET_URL\"] = \"target.url\"\n\n\tmanagerCmd.Flags().IntVarP(&retryCount, \"retry.count\", \"\", 0, \"Retry to backup the volume if something goes wrong with Bivac.\")\n\tenvs[\"BIVAC_RETRY_COUNT\"] = \"retry.count\"\n\n\tmanagerCmd.Flags().StringVarP(&logServer, \"log.server\", \"\", \"\", \"Manager's API address that will receive logs from agents.\")\n\tenvs[\"BIVAC_LOG_SERVER\"] = \"log.server\"\n\n\tmanagerCmd.Flags().StringVarP(&agentImage, \"agent.image\", \"\", \"camptocamp\/bivac:2.0.0\", \"Agent's Docker image.\")\n\tenvs[\"BIVAC_AGENT_IMAGE\"] = \"agent.image\"\n\n\tmanagerCmd.Flags().StringVarP(&whitelistVolumes, \"whitelist\", \"\", \"\", \"Whitelist volumes.\")\n\tenvs[\"BIVAC_WHITELIST\"] = \"whitelist\"\n\n\tmanagerCmd.Flags().StringVarP(&blacklistVolumes, \"blacklist\", \"\", \"\", \"Blacklist volumes.\")\n\tenvs[\"BIVAC_BLACKLIST\"] = \"blacklist\"\n\n managerCmd.Flags().BoolVarP(&whitelistAnnotation, \"whitelist.annotations\", \"\", false, \"Require pvc whitelist annotation\")\n\tenvs[\"BIVAC_WHITELIST_ANNOTATION\"] = \"whitelist.annotations\"\n\n\tbivacCmd.SetValuesFromEnv(envs, managerCmd.Flags())\n\tbivacCmd.RootCmd.AddCommand(managerCmd)\n}\n<commit_msg>linting<commit_after>package manager\n\nimport (\n\t\"strings\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/spf13\/cobra\"\n\n\tbivacCmd \"github.com\/camptocamp\/bivac\/cmd\"\n\t\"github.com\/camptocamp\/bivac\/internal\/manager\"\n\t\"github.com\/camptocamp\/bivac\/pkg\/volume\"\n)\n\nvar (\n\tserver manager.Server\n\torchestrator string\n\n\t\/\/ Orchestrators is a copy of manager.Orchestrators which allows orchestrator\n\t\/\/ configuration from Cobra variables\n\tOrchestrators manager.Orchestrators\n\n\tdbPath string\n\tresticForgetArgs string\n\n\tprovidersFile string\n\ttargetURL string\n\tretryCount int\n\tlogServer string\n\tagentImage string\n\twhitelistVolumes string\n\tblacklistVolumes string\n\twhitelistAnnotation bool\n)\nvar envs = make(map[string]string)\n\nvar managerCmd = &cobra.Command{\n\tUse: \"manager\",\n\tShort: \"Start Bivac backup manager\",\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tvolumesFilters := volume.Filters{\n\t\t\tBlacklist: strings.Split(blacklistVolumes, \",\"),\n\t\t\tWhitelist: strings.Split(whitelistVolumes, \",\"),\n\t\t\tWhitelistAnnotation: whitelistAnnotation,\n\t\t}\n\n\t\to, err := manager.GetOrchestrator(orchestrator, Orchestrators)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to retrieve orchestrator: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\terr = manager.Start(bivacCmd.BuildInfo, o, server, volumesFilters, providersFile, targetURL, logServer, agentImage, retryCount)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"failed to start manager: %s\", err)\n\t\t\treturn\n\t\t}\n\t},\n}\n\nfunc init() {\n\tmanagerCmd.Flags().StringVarP(&server.Address, \"server.address\", \"\", \"0.0.0.0:8182\", \"Address to bind on.\")\n\tenvs[\"BIVAC_SERVER_ADDRESS\"] = \"server.address\"\n\tmanagerCmd.Flags().StringVarP(&server.PSK, \"server.psk\", \"\", \"\", \"Pre-shared key.\")\n\tenvs[\"BIVAC_SERVER_PSK\"] = \"server.psk\"\n\n\tmanagerCmd.Flags().StringVarP(&orchestrator, \"orchestrator\", \"o\", \"\", \"Orchestrator on which Bivac should connect to.\")\n\tenvs[\"BIVAC_ORCHESTRATOR\"] = \"orchestrator\"\n\n\tmanagerCmd.Flags().StringVarP(&Orchestrators.Docker.Endpoint, \"docker.endpoint\", \"\", \"unix:\/\/\/var\/run\/docker.sock\", \"Docker endpoint.\")\n\tenvs[\"BIVAC_DOCKER_ENDPOINT\"] = \"docker.endpoint\"\n\n\tmanagerCmd.Flags().StringVarP(&Orchestrators.Cattle.URL, \"cattle.url\", \"\", \"\", \"The Cattle URL.\")\n\tenvs[\"CATTLE_URL\"] = \"cattle.url\"\n\tmanagerCmd.Flags().StringVarP(&Orchestrators.Cattle.AccessKey, \"cattle.accesskey\", \"\", \"\", \"The Cattle access key.\")\n\tenvs[\"CATTLE_ACCESS_KEY\"] = \"cattle.accesskey\"\n\tmanagerCmd.Flags().StringVarP(&Orchestrators.Cattle.SecretKey, \"cattle.secretkey\", \"\", \"\", \"The Cattle secret key.\")\n\tenvs[\"CATTLE_SECRET_KEY\"] = \"cattle.secretkey\"\n\n\tmanagerCmd.Flags().StringVarP(&Orchestrators.Kubernetes.Namespace, \"kubernetes.namespace\", \"\", \"\", \"Namespace where you want to run Bivac.\")\n\tenvs[\"KUBERNETES_NAMESPACE\"] = \"kubernetes.namespace\"\n\tmanagerCmd.Flags().BoolVarP(&Orchestrators.Kubernetes.AllNamespaces, \"kubernetes.all-namespaces\", \"\", false, \"Backup volumes of all namespaces.\")\n\tenvs[\"KUBERNETES_ALL_NAMESPACES\"] = \"kubernetes.all-namespaces\"\n\tmanagerCmd.Flags().StringVarP(&Orchestrators.Kubernetes.KubeConfig, \"kubernetes.kubeconfig\", \"\", \"\", \"Path to your kuberconfig file.\")\n\tenvs[\"KUBERNETES_KUBECONFIG\"] = \"kubernetes.kubeconfig\"\n\tmanagerCmd.Flags().StringVarP(&Orchestrators.Kubernetes.AgentServiceAccount, \"kubernetes.agent-service-account\", \"\", \"\", \"Specify service account for agents.\")\n\tenvs[\"KUBERNETES_AGENT_SERVICE_ACCOUNT\"] = \"kubernetes.agent-service-account\"\n\n\tmanagerCmd.Flags().StringVarP(&resticForgetArgs, \"restic.forget.args\", \"\", \"--group-by host --keep-daily 15 --prune\", \"Restic forget arguments.\")\n\tenvs[\"RESTIC_FORGET_ARGS\"] = \"restic.forget.args\"\n\n\tmanagerCmd.Flags().StringVarP(&providersFile, \"providers.config\", \"\", \"\/providers-config.default.toml\", \"Configuration file for providers.\")\n\tenvs[\"BIVAC_PROVIDERS_CONFIG\"] = \"providers.config\"\n\n\tmanagerCmd.Flags().StringVarP(&targetURL, \"target.url\", \"r\", \"\", \"The target URL to push the backups to.\")\n\tenvs[\"BIVAC_TARGET_URL\"] = \"target.url\"\n\n\tmanagerCmd.Flags().IntVarP(&retryCount, \"retry.count\", \"\", 0, \"Retry to backup the volume if something goes wrong with Bivac.\")\n\tenvs[\"BIVAC_RETRY_COUNT\"] = \"retry.count\"\n\n\tmanagerCmd.Flags().StringVarP(&logServer, \"log.server\", \"\", \"\", \"Manager's API address that will receive logs from agents.\")\n\tenvs[\"BIVAC_LOG_SERVER\"] = \"log.server\"\n\n\tmanagerCmd.Flags().StringVarP(&agentImage, \"agent.image\", \"\", \"camptocamp\/bivac:2.0.0\", \"Agent's Docker image.\")\n\tenvs[\"BIVAC_AGENT_IMAGE\"] = \"agent.image\"\n\n\tmanagerCmd.Flags().StringVarP(&whitelistVolumes, \"whitelist\", \"\", \"\", \"Whitelist volumes.\")\n\tenvs[\"BIVAC_WHITELIST\"] = \"whitelist\"\n\n\tmanagerCmd.Flags().StringVarP(&blacklistVolumes, \"blacklist\", \"\", \"\", \"Blacklist volumes.\")\n\tenvs[\"BIVAC_BLACKLIST\"] = \"blacklist\"\n\n\tmanagerCmd.Flags().BoolVarP(&whitelistAnnotation, \"whitelist.annotations\", \"\", false, \"Require pvc whitelist annotation\")\n\tenvs[\"BIVAC_WHITELIST_ANNOTATION\"] = \"whitelist.annotations\"\n\n\tbivacCmd.SetValuesFromEnv(envs, managerCmd.Flags())\n\tbivacCmd.RootCmd.AddCommand(managerCmd)\n}\n<|endoftext|>"} {"text":"<commit_before>package ls\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com\/asjoyner\/shade\"\n\t\"github.com\/asjoyner\/shade\/config\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/google\/subcommands\"\n)\n\nfunc init() {\n\tsubcommands.Register(&lsCmd{}, \"\")\n}\n\nvar (\n\tdefaultConfig = path.Join(shade.ConfigDir(), \"config.json\")\n)\n\ntype lsCmd struct {\n\tlong bool\n\tconfig string\n}\n\nfunc (*lsCmd) Name() string { return \"ls\" }\nfunc (*lsCmd) Synopsis() string { return \"List files in the respository.\" }\nfunc (*lsCmd) Usage() string {\n\treturn `ls [-l] [-f FILE]:\n List all the files in the configured shade repositories.\n`\n}\n\nfunc (p *lsCmd) SetFlags(f *flag.FlagSet) {\n\tf.BoolVar(&p.long, \"l\", false, \"Long format listing\")\n\tf.StringVar(&p.config, \"f\", defaultConfig, \"Path to shade config\")\n}\n\nfunc (p *lsCmd) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {\n\t\/\/ read in the config\n\tclients, err := config.Clients(p.config)\n\tif err != nil {\n\t\tfmt.Printf(\"could not initialize clients: %s\", err)\n\t\treturn subcommands.ExitFailure\n\t}\n\n\tfile := &shade.File{}\n\tfor _, client := range clients {\n\t\tfmt.Println(client.GetConfig().Provider)\n\t\tlfm, err := client.ListFiles()\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"could not get files: %s\\n\", err)\n\t\t\treturn subcommands.ExitFailure\n\t\t}\n\t\tfor id, sha256sum := range lfm {\n\t\t\tfileJSON, err := client.GetChunk(sha256sum)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"could not get file %q: %s\\n\", id, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr = json.Unmarshal(fileJSON, file)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"failed to unmarshal: %s\\n\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif p.long {\n\t\t\t\tfmt.Printf(\" %s (%x):\\n %s\\n\", id, sha256sum, file)\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\" %s (%x)\\n\", id, sha256sum)\n\t\t\t}\n\t\t}\n\t}\n\treturn subcommands.ExitSuccess\n}\n<commit_msg>Make shadeutil ls output a little better<commit_after>package ls\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"text\/tabwriter\"\n\t\"time\"\n\n\t\"github.com\/asjoyner\/shade\"\n\t\"github.com\/asjoyner\/shade\/config\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/google\/subcommands\"\n)\n\nfunc init() {\n\tsubcommands.Register(&lsCmd{}, \"\")\n}\n\nvar (\n\tdefaultConfig = path.Join(shade.ConfigDir(), \"config.json\")\n)\n\ntype lsCmd struct {\n\tlong bool\n\tconfig string\n}\n\nfunc (*lsCmd) Name() string { return \"ls\" }\nfunc (*lsCmd) Synopsis() string { return \"List files in the respository.\" }\nfunc (*lsCmd) Usage() string {\n\treturn `ls [-l] [-f FILE]:\n List all the files in the configured shade repositories.\n`\n}\n\nfunc (p *lsCmd) SetFlags(f *flag.FlagSet) {\n\tf.BoolVar(&p.long, \"l\", false, \"Long format listing\")\n\tf.StringVar(&p.config, \"f\", defaultConfig, \"Path to shade config\")\n}\n\nfunc (p *lsCmd) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {\n\t\/\/ read in the config\n\tclients, err := config.Clients(p.config)\n\tif err != nil {\n\t\tfmt.Printf(\"could not initialize clients: %v\", err)\n\t\treturn subcommands.ExitFailure\n\t}\n\n\tfile := &shade.File{}\n\tw := &tabwriter.Writer{}\n\tw.Init(os.Stdout, 0, 2, 1, ' ', 0)\n\tif p.long {\n\t\tfmt.Fprint(w, \"\\tid\\t(sha)\\tsize\\tchunksize\\tchunks\\tmtime\\tfilename\\n\")\n\t}\n\tfor _, client := range clients {\n\t\tfmt.Println(client.GetConfig().Provider)\n\t\tlfm, err := client.ListFiles()\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"could not get files: %v\\n\", err)\n\t\t\treturn subcommands.ExitFailure\n\t\t}\n\t\tfor id, sha256sum := range lfm {\n\t\t\tfileJSON, err := client.GetChunk(sha256sum)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"could not get file %q: %v\\n\", id, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr = json.Unmarshal(fileJSON, file)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"failed to unmarshal: %v\\n\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif p.long {\n\t\t\t\tfmt.Fprintf(w, \"\\t%v\\t(%x)\\t%v\\t%v\\t%v\\t%v\\t%v\\n\", id, sha256sum, file.Filesize, file.Chunksize, file.Chunks, file.ModifiedTime.Format(time.Stamp), file.Filename)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(w, \"\\t%v\\n\", file.Filename)\n\t\t\t}\n\t\t}\n\t\tw.Flush()\n\t}\n\treturn subcommands.ExitSuccess\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/ChristopherRabotin\/smd\"\n)\n\nconst (\n\tdebug = false\n)\n\nvar (\n\tcpus int\n\tplanet string\n\tstepSize float64\n)\n\nfunc init() {\n\t\/\/ Read flags\n\tflag.IntVar(&cpus, \"cpus\", -1, \"number of CPUs to use for this simulation (set to 0 for max CPUs)\")\n\tflag.StringVar(&planet, \"planet\", \"undef\", \"departure planet to perform the spiral from\")\n\tflag.Float64Var(&stepSize, \"step\", 15, \"step size (10 to 30 recommended)\")\n}\n\n\/*\n * This example shows how to find the greatest heliocentric velocity at the end of a spiral by iterating on the initial\n * true anomaly.\n *\/\n\nfunc sc() *smd.Spacecraft {\n\teps := smd.NewUnlimitedEPS()\n\tthrusters := []smd.EPThruster{smd.NewGenericEP(5, 5000)} \/\/ VASIMR (approx.)\n\tdryMass := 10000.0\n\tfuelMass := 5000.0\n\treturn smd.NewSpacecraft(\"Spiral\", dryMass, fuelMass, eps, thrusters, false, []*smd.Cargo{},\n\t\t[]smd.Waypoint{smd.NewToHyperbolic(nil)})\n}\n\nfunc initEarthOrbit(i, Ω, ω, ν float64) *smd.Orbit {\n\ta, e := smd.Radii2ae(39300+smd.Earth.Radius, 290+smd.Earth.Radius)\n\treturn smd.NewOrbitFromOE(a, e, i, Ω, ω, ν, smd.Earth)\n}\n\n\/\/ initMarsOrbit returns the initial orbit.\nfunc initMarsOrbit(i, Ω, ω, ν float64) *smd.Orbit {\n\t\/\/ Exomars TGO.\n\ta, e := smd.Radii2ae(44500+smd.Mars.Radius, 426+smd.Mars.Radius)\n\treturn smd.NewOrbitFromOE(a, e, i, Ω, ω, ν, smd.Mars)\n}\n\nfunc main() {\n\tflag.Parse()\n\tavailableCPUs := runtime.NumCPU()\n\tif cpus <= 0 || cpus > availableCPUs {\n\t\tcpus = availableCPUs\n\t}\n\truntime.GOMAXPROCS(cpus)\n\tfmt.Printf(\"running on %d CPUs\\n\", cpus)\n\n\tif stepSize <= 0 {\n\t\tfmt.Println(\"step size must be positive\")\n\t\tflag.Usage()\n\t\treturn\n\t} else if stepSize <= 5 {\n\t\tfmt.Println(\"[WARNING] A small step size will take several days to iterate over all possibilities\")\n\t}\n\n\tvar orbitPtr func(i, Ω, ω, ν float64) *smd.Orbit\n\tplanet = strings.ToLower(planet)\n\tswitch planet {\n\tcase \"mars\":\n\t\torbitPtr = initMarsOrbit\n\tcase \"earth\":\n\t\torbitPtr = initEarthOrbit\n\tdefault:\n\t\tfmt.Printf(\"unsupported planet `%s`\\n\", planet)\n\t\tflag.Usage()\n\t\treturn\n\t}\n\n\tfmt.Printf(\"Finding spirals leaving from %s\\n\", planet)\n\n\t\/\/name := \"spiral-mars\"\n\tdepart := time.Date(2018, 11, 8, 0, 0, 0, 0, time.UTC)\n\tchgframePath := \"..\/refframes\/chgframe.py\"\n\tmaxV := -1e3\n\tminV := +1e3\n\tvar maxOrbit smd.Orbit\n\tvar minOrbit smd.Orbit\n\ta, e, _, _, _, _, _, _, _ := initMarsOrbit(10, 10, 10, 10).Elements()\n\ttsv := fmt.Sprintf(\"#a=%f km\\te=%f\\n#V(km\/s), i (degrees), raan (degrees), arg peri (degrees),nu (degrees)\\n\", a, e)\n\tfor i := 1.0; i < 90; i += stepSize {\n\t\tfor Ω := 0.0; Ω < 360; Ω += stepSize {\n\t\t\tfor ω := 0.0; ω < 360; ω += stepSize {\n\t\t\t\tfor ν := 0.0; ν < 360; ν += stepSize {\n\t\t\t\t\tinitOrbit := orbitPtr(i, Ω, ω, ν)\n\t\t\t\t\tastro := smd.NewMission(sc(), initOrbit, depart, depart.Add(-1), smd.Cartesian, smd.Perturbations{}, smd.ExportConfig{})\n\t\t\t\t\tastro.Propagate()\n\n\t\t\t\t\t\/\/ Run chgframe\n\t\t\t\t\t\/\/ We're now done so let's convert the position and velocity to heliocentric and check the output.\n\t\t\t\t\tR, V := initOrbit.RV()\n\t\t\t\t\tstate := fmt.Sprintf(\"[%f,%f,%f,%f,%f,%f]\", R[0], R[1], R[2], V[0], V[1], V[2])\n\t\t\t\t\tif debug {\n\t\t\t\t\t\tfmt.Printf(\"\\n=== RUNNING CMD ===\\npython %s -t J2000 -f IAU_Earth -e \\\"%s\\\" -s \\\"%s\\\"\\n\", chgframePath, astro.CurrentDT.Format(time.ANSIC), state)\n\t\t\t\t\t}\n\t\t\t\t\tcmd := exec.Command(\"python\", chgframePath, \"-t\", \"J2000\", \"-f\", \"IAU_Mars\", \"-e\", astro.CurrentDT.Format(time.ANSIC), \"-s\", state)\n\t\t\t\t\tcmdOut, err := cmd.Output()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfmt.Fprintln(os.Stderr, \"Error converting orbit to helio \", err)\n\t\t\t\t\t\tos.Exit(1)\n\t\t\t\t\t}\n\t\t\t\t\tout := string(cmdOut)\n\n\t\t\t\t\t\/\/ Process output\n\t\t\t\t\tnewState := strings.TrimSpace(string(out))\n\t\t\t\t\t\/\/ Cf. https:\/\/play.golang.org\/p\/g-a4idjhIb\n\t\t\t\t\tnewState = newState[1 : len(newState)-1]\n\t\t\t\t\tcomponents := strings.Split(newState, \",\")\n\t\t\t\t\tvar nR = make([]float64, 3)\n\t\t\t\t\tvar nV = make([]float64, 3)\n\t\t\t\t\tfor i := 0; i < 6; i++ {\n\t\t\t\t\t\tfl, err := strconv.ParseFloat(strings.TrimSpace(components[i]), 64)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif i < 3 {\n\t\t\t\t\t\t\tnR[i] = fl\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tnV[i-3] = fl\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tvNorm := math.Sqrt(math.Pow(nV[0], 2) + math.Pow(nV[1], 2) + math.Pow(nV[2], 2))\n\t\t\t\t\t\/\/ Add to TSV file\n\t\t\t\t\ttsv += fmt.Sprintf(\"%f,%f,%f,%f,%f\\n\", vNorm, i, Ω, ω, ν)\n\t\t\t\t\tif vNorm > maxV {\n\t\t\t\t\t\tmaxV = vNorm\n\t\t\t\t\t\tmaxOrbit = *initMarsOrbit(i, Ω, ω, ν)\n\t\t\t\t\t} else if vNorm < minV {\n\t\t\t\t\t\tminV = vNorm\n\t\t\t\t\t\tminOrbit = *initMarsOrbit(i, Ω, ω, ν)\n\t\t\t\t\t}\n\t\t\t\t\tif debug {\n\t\t\t\t\t\tfmt.Printf(\"\\nν=%f\\t=>V=%+v\\t|V|=%f\\n\", ν, nV, vNorm)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfmt.Printf(\"\\n\\n=== RESULT ===\\n\\nmaxV=%.3f km\/s\\t%s\\nminV=%.3f km\/s\\t%s\\n\\n\", maxV, maxOrbit, minV, minOrbit)\n\t\/\/ Write CSV file.\n\tf, err := os.Create(fmt.Sprintf(\".\/results-.csv\"))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer f.Close()\n\tif _, err := f.WriteString(tsv); err != nil {\n\t\tpanic(err)\n\t}\n}\n<commit_msg>Stream results<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/ChristopherRabotin\/smd\"\n)\n\nconst (\n\tdebug = false\n)\n\nvar (\n\tcpus int\n\tplanet string\n\tstepSize float64\n\twg sync.WaitGroup\n)\n\nfunc init() {\n\t\/\/ Read flags\n\tflag.IntVar(&cpus, \"cpus\", -1, \"number of CPUs to use for this simulation (set to 0 for max CPUs)\")\n\tflag.StringVar(&planet, \"planet\", \"undef\", \"departure planet to perform the spiral from\")\n\tflag.Float64Var(&stepSize, \"step\", 15, \"step size (10 to 30 recommended)\")\n}\n\n\/*\n * This example shows how to find the greatest heliocentric velocity at the end of a spiral by iterating on the initial\n * true anomaly.\n *\/\n\nfunc sc() *smd.Spacecraft {\n\teps := smd.NewUnlimitedEPS()\n\tthrusters := []smd.EPThruster{smd.NewGenericEP(5, 5000)} \/\/ VASIMR (approx.)\n\tdryMass := 10000.0\n\tfuelMass := 5000.0\n\treturn smd.NewSpacecraft(\"Spiral\", dryMass, fuelMass, eps, thrusters, false, []*smd.Cargo{},\n\t\t[]smd.Waypoint{smd.NewToHyperbolic(nil)})\n}\n\nfunc initEarthOrbit(i, Ω, ω, ν float64) *smd.Orbit {\n\ta, e := smd.Radii2ae(39300+smd.Earth.Radius, 290+smd.Earth.Radius)\n\treturn smd.NewOrbitFromOE(a, e, i, Ω, ω, ν, smd.Earth)\n}\n\n\/\/ initMarsOrbit returns the initial orbit.\nfunc initMarsOrbit(i, Ω, ω, ν float64) *smd.Orbit {\n\t\/\/ Exomars TGO.\n\ta, e := smd.Radii2ae(44500+smd.Mars.Radius, 426+smd.Mars.Radius)\n\treturn smd.NewOrbitFromOE(a, e, i, Ω, ω, ν, smd.Mars)\n}\n\nfunc main() {\n\tflag.Parse()\n\tavailableCPUs := runtime.NumCPU()\n\tif cpus <= 0 || cpus > availableCPUs {\n\t\tcpus = availableCPUs\n\t}\n\truntime.GOMAXPROCS(cpus)\n\tfmt.Printf(\"running on %d CPUs\\n\", cpus)\n\n\tif stepSize <= 0 {\n\t\tfmt.Println(\"step size must be positive\")\n\t\tflag.Usage()\n\t\treturn\n\t} else if stepSize <= 5 {\n\t\tfmt.Println(\"[WARNING] A small step size will take several days to iterate over all possibilities\")\n\t}\n\n\tvar orbitPtr func(i, Ω, ω, ν float64) *smd.Orbit\n\tplanet = strings.ToLower(planet)\n\tswitch planet {\n\tcase \"mars\":\n\t\torbitPtr = initMarsOrbit\n\tcase \"earth\":\n\t\torbitPtr = initEarthOrbit\n\tdefault:\n\t\tfmt.Printf(\"unsupported planet `%s`\\n\", planet)\n\t\tflag.Usage()\n\t\treturn\n\t}\n\n\tfmt.Printf(\"Finding spirals leaving from %s\\n\", planet)\n\n\t\/\/name := \"spiral-mars\"\n\tdepart := time.Date(2018, 11, 8, 0, 0, 0, 0, time.UTC)\n\tchgframePath := \"..\/refframes\/chgframe.py\"\n\tmaxV := -1e3\n\tminV := +1e3\n\tvar maxOrbit smd.Orbit\n\tvar minOrbit smd.Orbit\n\ta, e, _, _, _, _, _, _, _ := orbitPtr(10, 10, 10, 10).Elements()\n\trslts := make(chan string, 10)\n\twg.Add(1)\n\tgo streamResults(a, e, fmt.Sprintf(\"%s-%.0fstep\", planet, stepSize), rslts)\n\tfor i := 1.0; i < 90; i += stepSize {\n\t\tfor Ω := 0.0; Ω < 360; Ω += stepSize {\n\t\t\tfor ω := 0.0; ω < 360; ω += stepSize {\n\t\t\t\tfor ν := 0.0; ν < 360; ν += stepSize {\n\t\t\t\t\tinitOrbit := orbitPtr(i, Ω, ω, ν)\n\t\t\t\t\tastro := smd.NewMission(sc(), initOrbit, depart, depart.Add(-1), smd.Cartesian, smd.Perturbations{}, smd.ExportConfig{})\n\t\t\t\t\tastro.Propagate()\n\n\t\t\t\t\t\/\/ Run chgframe\n\t\t\t\t\t\/\/ We're now done so let's convert the position and velocity to heliocentric and check the output.\n\t\t\t\t\tR, V := initOrbit.RV()\n\t\t\t\t\tstate := fmt.Sprintf(\"[%f,%f,%f,%f,%f,%f]\", R[0], R[1], R[2], V[0], V[1], V[2])\n\t\t\t\t\tif debug {\n\t\t\t\t\t\tfmt.Printf(\"\\n=== RUNNING CMD ===\\npython %s -t J2000 -f IAU_Earth -e \\\"%s\\\" -s \\\"%s\\\"\\n\", chgframePath, astro.CurrentDT.Format(time.ANSIC), state)\n\t\t\t\t\t}\n\t\t\t\t\tcmd := exec.Command(\"python\", chgframePath, \"-t\", \"J2000\", \"-f\", \"IAU_Mars\", \"-e\", astro.CurrentDT.Format(time.ANSIC), \"-s\", state)\n\t\t\t\t\tcmdOut, err := cmd.Output()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfmt.Fprintln(os.Stderr, \"Error converting orbit to helio \", err)\n\t\t\t\t\t\tos.Exit(1)\n\t\t\t\t\t}\n\t\t\t\t\tout := string(cmdOut)\n\n\t\t\t\t\t\/\/ Process output\n\t\t\t\t\tnewState := strings.TrimSpace(string(out))\n\t\t\t\t\t\/\/ Cf. https:\/\/play.golang.org\/p\/g-a4idjhIb\n\t\t\t\t\tnewState = newState[1 : len(newState)-1]\n\t\t\t\t\tcomponents := strings.Split(newState, \",\")\n\t\t\t\t\tvar nR = make([]float64, 3)\n\t\t\t\t\tvar nV = make([]float64, 3)\n\t\t\t\t\tfor i := 0; i < 6; i++ {\n\t\t\t\t\t\tfl, err := strconv.ParseFloat(strings.TrimSpace(components[i]), 64)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif i < 3 {\n\t\t\t\t\t\t\tnR[i] = fl\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tnV[i-3] = fl\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tvNorm := math.Sqrt(math.Pow(nV[0], 2) + math.Pow(nV[1], 2) + math.Pow(nV[2], 2))\n\t\t\t\t\t\/\/ Add to TSV file\n\t\t\t\t\trslts <- fmt.Sprintf(\"%f,%f,%f,%f,%f\\n\", vNorm, i, Ω, ω, ν)\n\t\t\t\t\tif vNorm > maxV {\n\t\t\t\t\t\tmaxV = vNorm\n\t\t\t\t\t\tmaxOrbit = *initMarsOrbit(i, Ω, ω, ν)\n\t\t\t\t\t} else if vNorm < minV {\n\t\t\t\t\t\tminV = vNorm\n\t\t\t\t\t\tminOrbit = *initMarsOrbit(i, Ω, ω, ν)\n\t\t\t\t\t}\n\t\t\t\t\tif debug {\n\t\t\t\t\t\tfmt.Printf(\"\\nν=%f\\t=>V=%+v\\t|V|=%f\\n\", ν, nV, vNorm)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfmt.Printf(\"\\n\\n=== RESULT ===\\n\\nmaxV=%.3f km\/s\\t%s\\nminV=%.3f km\/s\\t%s\\n\\n\", maxV, maxOrbit, minV, minOrbit)\n}\n\nfunc streamResults(a, e float64, fn string, rslts <-chan string) {\n\t\/\/ Write CSV file.\n\tf, err := os.Create(fmt.Sprintf(\".\/%s.csv\", fn))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer f.Close()\n\t\/\/ Header\n\tf.WriteString(fmt.Sprintf(\"#a=%f km\\te=%f\\n#V(km\/s), i (degrees), raan (degrees), arg peri (degrees),nu (degrees)\\n\", a, e))\n\tfor {\n\t\trslt, more := <-rslts\n\t\tif more {\n\t\t\tif _, err := f.WriteString(rslt); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tf.Close()\n\twg.Done()\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pkg\/browser\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/stormforger\/cli\/api\"\n\t\"github.com\/stormforger\/cli\/api\/testrun\"\n\t\"github.com\/stormforger\/cli\/internal\/esbundle\"\n\t\"github.com\/stormforger\/cli\/internal\/pflagutil\"\n\t\"github.com\/stormforger\/cli\/internal\/stringutil\"\n)\n\nconst defaultNFRData = `version: \"0.1\"\nrequirements:\n- test.completed: true\n- checks:\n select: success_rate\n test: [\"=\", 1]\n- http.error_ratio:\n test: [\"=\", 0]\n`\n\nvar (\n\t\/\/ testRunLaunchCmd represents the test run launch command\n\ttestRunLaunchCmd = &cobra.Command{\n\t\tUse: \"launch <test-case-ref>\",\n\t\tShort: \"Create and launch a new test run\",\n\t\tLong: fmt.Sprintf(`Create and launch a new test run based on given test case\n\n <test-case-ref> can be 'organisation-name\/test-case-name' or 'test-case-uid'.\n\nExamples\n--------\n\n* Launch by organisation and test case name\n\n forge test-case launch acme-inc\/checkout\n\n* Alternatively the test case UID can also be provided\n\n forge test-case launch xPSX5KXM\n\nConfiguration\n-------------\n\nYou can specify configuration for a test run that will overwrite what is defined\nin your JavaScript definition.\n\n* Available cluster sizings:\n * %s\n\nAvailable cluster regions are available at https:\/\/docs.stormforger.com\/reference\/test-cluster\/#cluster-region\n\n%s\n`, strings.Join(validSizings, \"\\n * \"), bundlingHelpInfo),\n\t\tPersistentPreRun: func(cmd *cobra.Command, args []string) {\n\t\t\tif len(args) < 1 {\n\t\t\t\tlog.Fatal(\"Missing argument: test case reference\")\n\t\t\t}\n\n\t\t\tif len(args) > 1 {\n\t\t\t\tlog.Fatal(\"Too many arguments\")\n\t\t\t}\n\n\t\t\tif testRunLaunchOpts.ClusterRegion != \"\" && !stringutil.InSlice(testRunLaunchOpts.ClusterRegion, validRegions) {\n\t\t\t\tlog.Fatalf(\"%s is not a valid region\", testRunLaunchOpts.ClusterRegion)\n\t\t\t}\n\n\t\t\tif testRunLaunchOpts.ClusterSizing != \"\" && !stringutil.InSlice(testRunLaunchOpts.ClusterSizing, validSizings) {\n\t\t\t\tlog.Fatalf(\"%s is not a valid sizing\", testRunLaunchOpts.ClusterSizing)\n\t\t\t}\n\t\t},\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tclient := NewClient()\n\t\t\tMainTestRunLaunch(client, args[0], testRunLaunchOpts)\n\t\t},\n\t\tValidArgsFunction: completeOrgaAndCase,\n\t}\n\n\ttestRunLaunchOpts testRunLaunchCmdOpts\n\n\tvalidRegions = []string{\n\t\t\"ap-east-1\",\n\t\t\"ap-northeast-1\",\n\t\t\"ap-northeast-2\",\n\t\t\"ap-south-1\",\n\t\t\"ap-southeast-1\",\n\t\t\"ap-southeast-2\",\n\t\t\"ca-central-1\",\n\t\t\"eu-central-1\",\n\t\t\"eu-north-1\",\n\t\t\"eu-west-1\",\n\t\t\"eu-west-2\",\n\t\t\"eu-west-3\",\n\t\t\"sa-east-1\",\n\t\t\"us-east-1\",\n\t\t\"us-east-2\",\n\t\t\"us-west-1\",\n\t\t\"us-west-2\",\n\t}\n\n\tvalidSizings = []string{\n\t\t\"preflight\",\n\t\t\"tiny\",\n\t\t\"small\",\n\t\t\"medium\",\n\t\t\"large\",\n\t\t\"xlarge\",\n\t\t\"2xlarge\",\n\t}\n)\n\ntype testRunLaunchCmdOpts struct {\n\tOpenInBrowser bool\n\n\tTitle string\n\tNotes string\n\tJavascriptDefinitionFile string\n\n\tClusterRegion string\n\tClusterSizing string\n\tWatch bool\n\tMaxWatchTime time.Duration\n\tCheckNFR string\n\tDisableGzip bool\n\tSkipWait bool\n\tDumpTraffic bool\n\tSessionValidationMode bool\n\tValidate bool\n\tTestRunIDOutputFile string\n\n\tDefine map[string]string\n}\n\nfunc init() {\n\tTestCaseCmd.AddCommand(testRunLaunchCmd)\n\n\ttestRunLaunchCmd.Flags().StringVar(&testRunLaunchOpts.TestRunIDOutputFile, \"uid-file\", \"\", \"Output file for the test-run id\")\n\n\ttestRunLaunchCmd.Flags().BoolVar(&testRunLaunchOpts.OpenInBrowser, \"open\", false, \"Open test run in browser\")\n\n\ttestRunLaunchCmd.Flags().StringVarP(&testRunLaunchOpts.Title, \"title\", \"t\", \"\", \"Descriptive title of test run\")\n\ttestRunLaunchCmd.Flags().StringVarP(&testRunLaunchOpts.Notes, \"notes\", \"n\", \"\", \"Longer description (Markdown supported)\")\n\n\ttestRunLaunchCmd.Flags().StringVar(&testRunLaunchOpts.ClusterRegion, \"region\", \"\", \"Region to start test in\")\n\ttestRunLaunchCmd.Flags().StringVar(&testRunLaunchOpts.ClusterSizing, \"sizing\", \"\", \"Cluster sizing to use\")\n\n\ttestRunLaunchCmd.Flags().BoolVarP(&testRunLaunchOpts.Watch, \"watch\", \"w\", false, \"Automatically watch newly launched test run\")\n\ttestRunLaunchCmd.Flags().DurationVar(&testRunLaunchOpts.MaxWatchTime, \"watch-timeout\", 0, \"Maximum duration in seconds to watch\")\n\n\ttestRunLaunchCmd.Flags().StringVar(&testRunLaunchOpts.JavascriptDefinitionFile, \"test-case-file\", \"\", \"Update the test-case definition from this file before the launch\")\n\ttestRunLaunchCmd.Flags().StringVar(&testRunLaunchOpts.CheckNFR, \"nfr-check-file\", \"\", \"Check test result against NFR definition (implies --watch)\")\n\n\t\/\/ options for debugging\n\ttestRunLaunchCmd.Flags().BoolVar(&testRunLaunchOpts.DisableGzip, \"disable-gzip\", false, \"Globally disable gzip\")\n\ttestRunLaunchCmd.Flags().BoolVar(&testRunLaunchOpts.SkipWait, \"skip-wait\", false, \"Ignore defined waits\")\n\ttestRunLaunchCmd.Flags().BoolVar(&testRunLaunchOpts.DumpTraffic, \"dump-traffic\", false, \"Create traffic dump\")\n\ttestRunLaunchCmd.Flags().BoolVar(&testRunLaunchOpts.SessionValidationMode, \"session-validation-mode\", false, \"Enable session validation mode\")\n\ttestRunLaunchCmd.Flags().BoolVar(&testRunLaunchOpts.Validate, \"validate\", false, \"Perform validation run\")\n\n\t\/\/ bundling\n\ttestRunLaunchCmd.PersistentFlags().Var(&pflagutil.KeyValueFlag{Map: &testRunLaunchOpts.Define}, \"define\", \"Defines a list of K=V while parsing: debug=false\")\n\n\t\/\/ hints for completion of flags\n\ttestRunLaunchCmd.MarkFlagFilename(\"test-case-file\", \"js\")\n\ttestRunLaunchCmd.MarkFlagFilename(\"nfr-check-file\", \"yml\", \"yaml\")\n\ttestRunLaunchCmd.RegisterFlagCompletionFunc(\"region\", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {\n\t\treturn stringutil.FilterByPrefix(toComplete, validRegions), cobra.ShellCompDirectiveDefault\n\t})\n\ttestRunLaunchCmd.RegisterFlagCompletionFunc(\"sizing\", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {\n\t\treturn stringutil.FilterByPrefix(toComplete, validSizings), cobra.ShellCompDirectiveDefault\n\t})\n\n}\n\n\/\/ MainTestRunLaunch runs a test-case and allows watching and validation that test-run.\n\/\/ testCaseSpec is required and specifies the test-case to launch.\nfunc MainTestRunLaunch(client *api.Client, testCaseSpec string, testRunLaunchOpts testRunLaunchCmdOpts) {\n\tvar mapper esbundle.SourceMapper\n\n\ttestCaseUID := mustLookupTestCase(client, testCaseSpec)\n\n\tlaunchOptions := api.TestRunLaunchOptions{\n\t\tTitle: testRunLaunchOpts.Title,\n\t\tNotes: testRunLaunchOpts.Notes,\n\n\t\tClusterRegion: testRunLaunchOpts.ClusterRegion,\n\t\tClusterSizing: testRunLaunchOpts.ClusterSizing,\n\t\tDisableGzip: testRunLaunchOpts.DisableGzip,\n\t\tSkipWait: testRunLaunchOpts.SkipWait,\n\t\tDumpTraffic: testRunLaunchOpts.DumpTraffic,\n\t\tSessionValidationMode: testRunLaunchOpts.SessionValidationMode,\n\t}\n\tif testRunLaunchOpts.JavascriptDefinitionFile != \"\" {\n\t\tbundler := testCaseFileBundler{Defines: testRunLaunchOpts.Define}\n\t\tbundle, err := bundler.Bundle(testRunLaunchOpts.JavascriptDefinitionFile, \"test-case.js\")\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to open %s: %v\", bundle.Name, err)\n\t\t}\n\n\t\tmapper = bundle.Mapper\n\t\tlaunchOptions.JavascriptDefinition.Filename = bundle.Name\n\t\tlaunchOptions.JavascriptDefinition.Reader = bundle.Content\n\t}\n\n\tif testRunLaunchOpts.Validate {\n\t\tlaunchOptions.SessionValidationMode = true\n\t\tlaunchOptions.ClusterSizing = \"preflight\"\n\t}\n\n\tstatus, response, err := client.TestRunCreate(testCaseUID, launchOptions)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif !status {\n\t\terrorMeta, err := api.ErrorDecoder{SourceMapper: mapper}.UnmarshalErrorMeta(strings.NewReader(response))\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tprintValidationResultHuman(os.Stderr, status, errorMeta)\n\t\tcmdExit(status)\n\t}\n\n\ttestRun, err := testrun.UnmarshalSingle(strings.NewReader(response))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif testRunLaunchOpts.TestRunIDOutputFile != \"\" {\n\t\tf := testRunLaunchOpts.TestRunIDOutputFile\n\t\terr := ioutil.WriteFile(f, []byte(testRun.ID), 0644)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Failed to write file: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\tif rootOpts.OutputFormat == \"json\" {\n\t\tfmt.Println(string(response))\n\t} else {\n\t\t\/\/ FIXME can we integrate this into testrun.UnmarshalSingle somehow?\n\t\tmeta, err := api.UnmarshalMeta(strings.NewReader(response))\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tif launchOptions.JavascriptDefinition.Reader != nil {\n\t\t\tfmt.Println(\"Test-Case successfully updated\")\n\t\t}\n\n\t\tfmt.Printf(`Launching test %s\nUID: %s\nWeb URL: %s\n`,\n\t\t\ttestRun.Scope,\n\t\t\ttestRun.ID,\n\t\t\tmeta.Links.SelfWeb,\n\t\t)\n\n\t\tfmt.Printf(\"Configuration: %s cluster in %s\\n\", testRun.TestConfiguration.ClusterSizing, testRun.TestConfiguration.ClusterRegion)\n\n\t\tif testRun.TestConfiguration.DisableGzip {\n\t\t\tfmt.Print(\" [\\u2713] Disabled GZIP\\n\")\n\t\t}\n\t\tif testRun.TestConfiguration.SkipWait {\n\t\t\tfmt.Print(\" [\\u2713] Skip Waits\\n\")\n\t\t}\n\t\tif testRun.TestConfiguration.DumpTrafficFull {\n\t\t\tfmt.Print(\" [\\u2713] Traffic Dump\\n\")\n\t\t}\n\t\tif testRun.TestConfiguration.SessionValidationMode {\n\t\t\tfmt.Print(\" [\\u2713] Session Validation Mode\\n\")\n\t\t}\n\n\t\tif testRunLaunchOpts.OpenInBrowser {\n\t\t\tfmt.Printf(\"Opening %s in browser...\\n\", meta.Links.SelfWeb)\n\t\t\terr = browser.OpenURL(meta.Links.SelfWeb)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n\n\tif testRunLaunchOpts.Watch || testRunLaunchOpts.CheckNFR != \"\" || testRunLaunchOpts.Validate {\n\t\tif rootOpts.OutputFormat != \"json\" {\n\t\t\tfmt.Println(\"\\nWatching...\")\n\t\t}\n\n\t\twatchTestRun(testRun.ID, testRunLaunchOpts.MaxWatchTime.Round(time.Second).Seconds(), rootOpts.OutputFormat)\n\n\t\tif testRunLaunchOpts.CheckNFR != \"\" || testRunLaunchOpts.Validate {\n\t\t\tfmt.Println(\"Test finished, running non-functional checks...\")\n\n\t\t\tfileName := \"\"\n\t\t\tvar nfrData io.Reader\n\t\t\tif testRunLaunchOpts.CheckNFR != \"\" {\n\t\t\t\tfileName = filepath.Base(testRunLaunchOpts.CheckNFR)\n\t\t\t\tnfrData, err = os.OpenFile(testRunLaunchOpts.CheckNFR, os.O_RDONLY, 0755)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfileName = \"validation.yml\"\n\t\t\t\tnfrData = bytes.NewBufferString(defaultNFRData)\n\t\t\t}\n\n\t\t\trunNfrCheck(*client, testRun.ID, fileName, nfrData)\n\t\t} else {\n\t\t\tresult := fetchTestRun(*client, testRun.ID)\n\t\t\tfmt.Println(string(result))\n\t\t}\n\t}\n}\n<commit_msg>fix: Print input filename when bundling fails (#186)<commit_after>package cmd\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pkg\/browser\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/stormforger\/cli\/api\"\n\t\"github.com\/stormforger\/cli\/api\/testrun\"\n\t\"github.com\/stormforger\/cli\/internal\/esbundle\"\n\t\"github.com\/stormforger\/cli\/internal\/pflagutil\"\n\t\"github.com\/stormforger\/cli\/internal\/stringutil\"\n)\n\nconst defaultNFRData = `version: \"0.1\"\nrequirements:\n- test.completed: true\n- checks:\n select: success_rate\n test: [\"=\", 1]\n- http.error_ratio:\n test: [\"=\", 0]\n`\n\nvar (\n\t\/\/ testRunLaunchCmd represents the test run launch command\n\ttestRunLaunchCmd = &cobra.Command{\n\t\tUse: \"launch <test-case-ref>\",\n\t\tShort: \"Create and launch a new test run\",\n\t\tLong: fmt.Sprintf(`Create and launch a new test run based on given test case\n\n <test-case-ref> can be 'organisation-name\/test-case-name' or 'test-case-uid'.\n\nExamples\n--------\n\n* Launch by organisation and test case name\n\n forge test-case launch acme-inc\/checkout\n\n* Alternatively the test case UID can also be provided\n\n forge test-case launch xPSX5KXM\n\nConfiguration\n-------------\n\nYou can specify configuration for a test run that will overwrite what is defined\nin your JavaScript definition.\n\n* Available cluster sizings:\n * %s\n\nAvailable cluster regions are available at https:\/\/docs.stormforger.com\/reference\/test-cluster\/#cluster-region\n\n%s\n`, strings.Join(validSizings, \"\\n * \"), bundlingHelpInfo),\n\t\tPersistentPreRun: func(cmd *cobra.Command, args []string) {\n\t\t\tif len(args) < 1 {\n\t\t\t\tlog.Fatal(\"Missing argument: test case reference\")\n\t\t\t}\n\n\t\t\tif len(args) > 1 {\n\t\t\t\tlog.Fatal(\"Too many arguments\")\n\t\t\t}\n\n\t\t\tif testRunLaunchOpts.ClusterRegion != \"\" && !stringutil.InSlice(testRunLaunchOpts.ClusterRegion, validRegions) {\n\t\t\t\tlog.Fatalf(\"%s is not a valid region\", testRunLaunchOpts.ClusterRegion)\n\t\t\t}\n\n\t\t\tif testRunLaunchOpts.ClusterSizing != \"\" && !stringutil.InSlice(testRunLaunchOpts.ClusterSizing, validSizings) {\n\t\t\t\tlog.Fatalf(\"%s is not a valid sizing\", testRunLaunchOpts.ClusterSizing)\n\t\t\t}\n\t\t},\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\tclient := NewClient()\n\t\t\tMainTestRunLaunch(client, args[0], testRunLaunchOpts)\n\t\t},\n\t\tValidArgsFunction: completeOrgaAndCase,\n\t}\n\n\ttestRunLaunchOpts testRunLaunchCmdOpts\n\n\tvalidRegions = []string{\n\t\t\"ap-east-1\",\n\t\t\"ap-northeast-1\",\n\t\t\"ap-northeast-2\",\n\t\t\"ap-south-1\",\n\t\t\"ap-southeast-1\",\n\t\t\"ap-southeast-2\",\n\t\t\"ca-central-1\",\n\t\t\"eu-central-1\",\n\t\t\"eu-north-1\",\n\t\t\"eu-west-1\",\n\t\t\"eu-west-2\",\n\t\t\"eu-west-3\",\n\t\t\"sa-east-1\",\n\t\t\"us-east-1\",\n\t\t\"us-east-2\",\n\t\t\"us-west-1\",\n\t\t\"us-west-2\",\n\t}\n\n\tvalidSizings = []string{\n\t\t\"preflight\",\n\t\t\"tiny\",\n\t\t\"small\",\n\t\t\"medium\",\n\t\t\"large\",\n\t\t\"xlarge\",\n\t\t\"2xlarge\",\n\t}\n)\n\ntype testRunLaunchCmdOpts struct {\n\tOpenInBrowser bool\n\n\tTitle string\n\tNotes string\n\tJavascriptDefinitionFile string\n\n\tClusterRegion string\n\tClusterSizing string\n\tWatch bool\n\tMaxWatchTime time.Duration\n\tCheckNFR string\n\tDisableGzip bool\n\tSkipWait bool\n\tDumpTraffic bool\n\tSessionValidationMode bool\n\tValidate bool\n\tTestRunIDOutputFile string\n\n\tDefine map[string]string\n}\n\nfunc init() {\n\tTestCaseCmd.AddCommand(testRunLaunchCmd)\n\n\ttestRunLaunchCmd.Flags().StringVar(&testRunLaunchOpts.TestRunIDOutputFile, \"uid-file\", \"\", \"Output file for the test-run id\")\n\n\ttestRunLaunchCmd.Flags().BoolVar(&testRunLaunchOpts.OpenInBrowser, \"open\", false, \"Open test run in browser\")\n\n\ttestRunLaunchCmd.Flags().StringVarP(&testRunLaunchOpts.Title, \"title\", \"t\", \"\", \"Descriptive title of test run\")\n\ttestRunLaunchCmd.Flags().StringVarP(&testRunLaunchOpts.Notes, \"notes\", \"n\", \"\", \"Longer description (Markdown supported)\")\n\n\ttestRunLaunchCmd.Flags().StringVar(&testRunLaunchOpts.ClusterRegion, \"region\", \"\", \"Region to start test in\")\n\ttestRunLaunchCmd.Flags().StringVar(&testRunLaunchOpts.ClusterSizing, \"sizing\", \"\", \"Cluster sizing to use\")\n\n\ttestRunLaunchCmd.Flags().BoolVarP(&testRunLaunchOpts.Watch, \"watch\", \"w\", false, \"Automatically watch newly launched test run\")\n\ttestRunLaunchCmd.Flags().DurationVar(&testRunLaunchOpts.MaxWatchTime, \"watch-timeout\", 0, \"Maximum duration in seconds to watch\")\n\n\ttestRunLaunchCmd.Flags().StringVar(&testRunLaunchOpts.JavascriptDefinitionFile, \"test-case-file\", \"\", \"Update the test-case definition from this file before the launch\")\n\ttestRunLaunchCmd.Flags().StringVar(&testRunLaunchOpts.CheckNFR, \"nfr-check-file\", \"\", \"Check test result against NFR definition (implies --watch)\")\n\n\t\/\/ options for debugging\n\ttestRunLaunchCmd.Flags().BoolVar(&testRunLaunchOpts.DisableGzip, \"disable-gzip\", false, \"Globally disable gzip\")\n\ttestRunLaunchCmd.Flags().BoolVar(&testRunLaunchOpts.SkipWait, \"skip-wait\", false, \"Ignore defined waits\")\n\ttestRunLaunchCmd.Flags().BoolVar(&testRunLaunchOpts.DumpTraffic, \"dump-traffic\", false, \"Create traffic dump\")\n\ttestRunLaunchCmd.Flags().BoolVar(&testRunLaunchOpts.SessionValidationMode, \"session-validation-mode\", false, \"Enable session validation mode\")\n\ttestRunLaunchCmd.Flags().BoolVar(&testRunLaunchOpts.Validate, \"validate\", false, \"Perform validation run\")\n\n\t\/\/ bundling\n\ttestRunLaunchCmd.PersistentFlags().Var(&pflagutil.KeyValueFlag{Map: &testRunLaunchOpts.Define}, \"define\", \"Defines a list of K=V while parsing: debug=false\")\n\n\t\/\/ hints for completion of flags\n\ttestRunLaunchCmd.MarkFlagFilename(\"test-case-file\", \"js\")\n\ttestRunLaunchCmd.MarkFlagFilename(\"nfr-check-file\", \"yml\", \"yaml\")\n\ttestRunLaunchCmd.RegisterFlagCompletionFunc(\"region\", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {\n\t\treturn stringutil.FilterByPrefix(toComplete, validRegions), cobra.ShellCompDirectiveDefault\n\t})\n\ttestRunLaunchCmd.RegisterFlagCompletionFunc(\"sizing\", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {\n\t\treturn stringutil.FilterByPrefix(toComplete, validSizings), cobra.ShellCompDirectiveDefault\n\t})\n\n}\n\n\/\/ MainTestRunLaunch runs a test-case and allows watching and validation that test-run.\n\/\/ testCaseSpec is required and specifies the test-case to launch.\nfunc MainTestRunLaunch(client *api.Client, testCaseSpec string, testRunLaunchOpts testRunLaunchCmdOpts) {\n\tvar mapper esbundle.SourceMapper\n\n\ttestCaseUID := mustLookupTestCase(client, testCaseSpec)\n\n\tlaunchOptions := api.TestRunLaunchOptions{\n\t\tTitle: testRunLaunchOpts.Title,\n\t\tNotes: testRunLaunchOpts.Notes,\n\n\t\tClusterRegion: testRunLaunchOpts.ClusterRegion,\n\t\tClusterSizing: testRunLaunchOpts.ClusterSizing,\n\t\tDisableGzip: testRunLaunchOpts.DisableGzip,\n\t\tSkipWait: testRunLaunchOpts.SkipWait,\n\t\tDumpTraffic: testRunLaunchOpts.DumpTraffic,\n\t\tSessionValidationMode: testRunLaunchOpts.SessionValidationMode,\n\t}\n\tif testRunLaunchOpts.JavascriptDefinitionFile != \"\" {\n\t\tbundler := testCaseFileBundler{Defines: testRunLaunchOpts.Define}\n\t\tbundle, err := bundler.Bundle(testRunLaunchOpts.JavascriptDefinitionFile, \"test-case.js\")\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to bundle %s: %v\", testRunLaunchOpts.JavascriptDefinitionFile, err)\n\t\t}\n\n\t\tmapper = bundle.Mapper\n\t\tlaunchOptions.JavascriptDefinition.Filename = bundle.Name\n\t\tlaunchOptions.JavascriptDefinition.Reader = bundle.Content\n\t}\n\n\tif testRunLaunchOpts.Validate {\n\t\tlaunchOptions.SessionValidationMode = true\n\t\tlaunchOptions.ClusterSizing = \"preflight\"\n\t}\n\n\tstatus, response, err := client.TestRunCreate(testCaseUID, launchOptions)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif !status {\n\t\terrorMeta, err := api.ErrorDecoder{SourceMapper: mapper}.UnmarshalErrorMeta(strings.NewReader(response))\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tprintValidationResultHuman(os.Stderr, status, errorMeta)\n\t\tcmdExit(status)\n\t}\n\n\ttestRun, err := testrun.UnmarshalSingle(strings.NewReader(response))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif testRunLaunchOpts.TestRunIDOutputFile != \"\" {\n\t\tf := testRunLaunchOpts.TestRunIDOutputFile\n\t\terr := ioutil.WriteFile(f, []byte(testRun.ID), 0644)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Failed to write file: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\tif rootOpts.OutputFormat == \"json\" {\n\t\tfmt.Println(string(response))\n\t} else {\n\t\t\/\/ FIXME can we integrate this into testrun.UnmarshalSingle somehow?\n\t\tmeta, err := api.UnmarshalMeta(strings.NewReader(response))\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tif launchOptions.JavascriptDefinition.Reader != nil {\n\t\t\tfmt.Println(\"Test-Case successfully updated\")\n\t\t}\n\n\t\tfmt.Printf(`Launching test %s\nUID: %s\nWeb URL: %s\n`,\n\t\t\ttestRun.Scope,\n\t\t\ttestRun.ID,\n\t\t\tmeta.Links.SelfWeb,\n\t\t)\n\n\t\tfmt.Printf(\"Configuration: %s cluster in %s\\n\", testRun.TestConfiguration.ClusterSizing, testRun.TestConfiguration.ClusterRegion)\n\n\t\tif testRun.TestConfiguration.DisableGzip {\n\t\t\tfmt.Print(\" [\\u2713] Disabled GZIP\\n\")\n\t\t}\n\t\tif testRun.TestConfiguration.SkipWait {\n\t\t\tfmt.Print(\" [\\u2713] Skip Waits\\n\")\n\t\t}\n\t\tif testRun.TestConfiguration.DumpTrafficFull {\n\t\t\tfmt.Print(\" [\\u2713] Traffic Dump\\n\")\n\t\t}\n\t\tif testRun.TestConfiguration.SessionValidationMode {\n\t\t\tfmt.Print(\" [\\u2713] Session Validation Mode\\n\")\n\t\t}\n\n\t\tif testRunLaunchOpts.OpenInBrowser {\n\t\t\tfmt.Printf(\"Opening %s in browser...\\n\", meta.Links.SelfWeb)\n\t\t\terr = browser.OpenURL(meta.Links.SelfWeb)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n\n\tif testRunLaunchOpts.Watch || testRunLaunchOpts.CheckNFR != \"\" || testRunLaunchOpts.Validate {\n\t\tif rootOpts.OutputFormat != \"json\" {\n\t\t\tfmt.Println(\"\\nWatching...\")\n\t\t}\n\n\t\twatchTestRun(testRun.ID, testRunLaunchOpts.MaxWatchTime.Round(time.Second).Seconds(), rootOpts.OutputFormat)\n\n\t\tif testRunLaunchOpts.CheckNFR != \"\" || testRunLaunchOpts.Validate {\n\t\t\tfmt.Println(\"Test finished, running non-functional checks...\")\n\n\t\t\tfileName := \"\"\n\t\t\tvar nfrData io.Reader\n\t\t\tif testRunLaunchOpts.CheckNFR != \"\" {\n\t\t\t\tfileName = filepath.Base(testRunLaunchOpts.CheckNFR)\n\t\t\t\tnfrData, err = os.OpenFile(testRunLaunchOpts.CheckNFR, os.O_RDONLY, 0755)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfileName = \"validation.yml\"\n\t\t\t\tnfrData = bytes.NewBufferString(defaultNFRData)\n\t\t\t}\n\n\t\t\trunNfrCheck(*client, testRun.ID, fileName, nfrData)\n\t\t} else {\n\t\t\tresult := fetchTestRun(*client, testRun.ID)\n\t\t\tfmt.Println(string(result))\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n)\n\nconst version = \"0.1.3\"\n\nfunc main() {\n\tup := flag.Bool(\"update\", false, \"update command\")\n\tflag.Parse()\n\n\tfmt.Printf(\"version %s\\n\", version)\n\n\tvar code int\n\tif *up {\n\t\tif out, err := update(srcPath); err != nil {\n\t\t\tfmt.Printf(\"Update fail: %v\\n\", err)\n\t\t\tfmt.Println(string(out))\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfmt.Println(\"Update done.\")\n\t\tos.Exit(0)\n\t}\n\n\tfmt.Println(\"Command done.\")\n\n\tos.Exit(code)\n}\n\nconst srcPath = \"github.com\/dvrkps\/dojo\/cmdupdate\"\n\nfunc update(srcPath string) ([]byte, error) {\n\tcmd := exec.Command(\"go\", \"get\", \"-u\", srcPath)\n\treturn cmd.CombinedOutput()\n}\n<commit_msg>cmdupdate: clean constants<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n)\n\nconst (\n\tversion = \"0.1.3\"\n\n\tsrcPath = \"github.com\/dvrkps\/dojo\/cmdupdate\"\n)\n\nfunc main() {\n\tup := flag.Bool(\"update\", false, \"update command\")\n\tflag.Parse()\n\n\tfmt.Printf(\"version %s\\n\", version)\n\n\tvar code int\n\tif *up {\n\t\tif out, err := update(srcPath); err != nil {\n\t\t\tfmt.Printf(\"Update fail: %v\\n\", err)\n\t\t\tfmt.Println(string(out))\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfmt.Println(\"Update done.\")\n\t\tos.Exit(0)\n\t}\n\n\tfmt.Println(\"Command done.\")\n\n\tos.Exit(code)\n}\n\nfunc update(srcPath string) ([]byte, error) {\n\tcmd := exec.Command(\"go\", \"get\", \"-u\", srcPath)\n\treturn cmd.CombinedOutput()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"testing\"\n)\n\nfunc TestDiff(t *testing.T) {\n\twd, _ := os.Getwd()\n\ttd := filepath.Join(wd, \"test\")\n\n\tdirs, err := ioutil.ReadDir(td)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor _, fi := range dirs {\n\t\tif !fi.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\tdir := filepath.Join(td, fi.Name())\n\t\tactualOutputDir := filepath.Join(dir, \"actual_output\")\n\n\t\terr := os.RemoveAll(actualOutputDir)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\terr = os.MkdirAll(actualOutputDir, 0777)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\trunProto(t, dir)\n\t\trunDiff(t, dir)\n\t}\n\n}\n\nfunc runProto(t *testing.T, dir string) {\n\tcmd := exec.Command(\"protoc\", \"--elm_out=..\/actual_output\", \"test.proto\")\n\tcmd.Dir = filepath.Join(dir, \"input\")\n\tt.Logf(\"cmd: %v\", cmd)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"Error: %v, %v\", err, string(out))\n\t}\n}\n\nfunc runDiff(t *testing.T, dir string) {\n\tcmd := exec.Command(\"diff\", \"-y\", \"expected_output\", \"actual_output\")\n\tcmd.Dir = dir\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"Error: %v, %v\", err, string(out))\n\t}\n}\n<commit_msg>Print protoc output in verbose tests<commit_after>package main\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"testing\"\n)\n\nfunc TestDiff(t *testing.T) {\n\twd, _ := os.Getwd()\n\ttd := filepath.Join(wd, \"test\")\n\n\tdirs, err := ioutil.ReadDir(td)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfor _, fi := range dirs {\n\t\tif !fi.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\tdir := filepath.Join(td, fi.Name())\n\t\tactualOutputDir := filepath.Join(dir, \"actual_output\")\n\n\t\terr := os.RemoveAll(actualOutputDir)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\terr = os.MkdirAll(actualOutputDir, 0777)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\trunProto(t, dir)\n\t\trunDiff(t, dir)\n\t}\n\n}\n\nfunc runProto(t *testing.T, dir string) {\n\tcmd := exec.Command(\"protoc\", \"--elm_out=..\/actual_output\", \"test.proto\")\n\tcmd.Dir = filepath.Join(dir, \"input\")\n\tt.Logf(\"cmd: %v\", cmd)\n\tout, err := cmd.CombinedOutput()\n\tt.Logf(\"Output: %s\", out)\n\tif err != nil {\n\t\tt.Fatalf(\"Error: %v\", err)\n\t}\n}\n\nfunc runDiff(t *testing.T, dir string) {\n\tcmd := exec.Command(\"diff\", \"-y\", \"expected_output\", \"actual_output\")\n\tcmd.Dir = dir\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"Error: %v, %v\", err, string(out))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Code generated by protoc-gen-gogo.\n\/\/ source: identify.proto\n\/\/ DO NOT EDIT!\n\n\/*\nPackage identify_pb is a generated protocol buffer package.\n\nIt is generated from these files:\n\tidentify.proto\n\nIt has these top-level messages:\n\tIdentify\n*\/\npackage identify_pb\n\nimport proto \"code.google.com\/p\/gogoprotobuf\/proto\"\nimport json \"encoding\/json\"\nimport math \"math\"\n\n\/\/ Reference proto, json, and math imports to suppress error if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = &json.SyntaxError{}\nvar _ = math.Inf\n\ntype Identify struct {\n\t\/\/ protocolVersion determines compatibility between peers\n\tProtocolVersion *string `protobuf:\"bytes,5,opt,name=protocolVersion\" json:\"protocolVersion,omitempty\"`\n\t\/\/ agentVersion is like a UserAgent string in browsers, or client version in bittorrent\n\t\/\/ includes the client name and client.\n\tAgentVersion *string `protobuf:\"bytes,6,opt,name=agentVersion\" json:\"agentVersion,omitempty\"`\n\t\/\/ publicKey is this node's public key (which also gives its node.ID)\n\t\/\/ - may not need to be sent, as secure channel implies it has been sent.\n\t\/\/ - then again, if we change \/ disable secure channel, may still want it.\n\tPublicKey []byte `protobuf:\"bytes,1,opt,name=publicKey\" json:\"publicKey,omitempty\"`\n\t\/\/ listenAddrs are the multiaddrs the sender node listens for open connections on\n\tListenAddrs [][]byte `protobuf:\"bytes,2,rep,name=listenAddrs\" json:\"listenAddrs,omitempty\"`\n\t\/\/ oservedAddr is the multiaddr of the remote endpoint that the sender node perceives\n\t\/\/ this is useful information to convey to the other side, as it helps the remote endpoint\n\t\/\/ determine whether its connection to the local peer goes through NAT.\n\tObservedAddr []byte `protobuf:\"bytes,4,opt,name=observedAddr\" json:\"observedAddr,omitempty\"`\n\t\/\/ protocols are the services this node is running\n\tProtocols []string `protobuf:\"bytes,3,rep,name=protocols\" json:\"protocols,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *Identify) Reset() { *m = Identify{} }\nfunc (m *Identify) String() string { return proto.CompactTextString(m) }\nfunc (*Identify) ProtoMessage() {}\n\nfunc (m *Identify) GetProtocolVersion() string {\n\tif m != nil && m.ProtocolVersion != nil {\n\t\treturn *m.ProtocolVersion\n\t}\n\treturn \"\"\n}\n\nfunc (m *Identify) GetAgentVersion() string {\n\tif m != nil && m.AgentVersion != nil {\n\t\treturn *m.AgentVersion\n\t}\n\treturn \"\"\n}\n\nfunc (m *Identify) GetPublicKey() []byte {\n\tif m != nil {\n\t\treturn m.PublicKey\n\t}\n\treturn nil\n}\n\nfunc (m *Identify) GetListenAddrs() [][]byte {\n\tif m != nil {\n\t\treturn m.ListenAddrs\n\t}\n\treturn nil\n}\n\nfunc (m *Identify) GetObservedAddr() []byte {\n\tif m != nil {\n\t\treturn m.ObservedAddr\n\t}\n\treturn nil\n}\n\nfunc (m *Identify) GetProtocols() []string {\n\tif m != nil {\n\t\treturn m.Protocols\n\t}\n\treturn nil\n}\n\nfunc init() {\n}\n<commit_msg>vendor identify pb<commit_after>\/\/ Code generated by protoc-gen-gogo.\n\/\/ source: identify.proto\n\/\/ DO NOT EDIT!\n\n\/*\nPackage identify_pb is a generated protocol buffer package.\n\nIt is generated from these files:\n\tidentify.proto\n\nIt has these top-level messages:\n\tIdentify\n*\/\npackage identify_pb\n\nimport proto \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/code.google.com\/p\/gogoprotobuf\/proto\"\nimport json \"encoding\/json\"\nimport math \"math\"\n\n\/\/ Reference proto, json, and math imports to suppress error if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = &json.SyntaxError{}\nvar _ = math.Inf\n\ntype Identify struct {\n\t\/\/ protocolVersion determines compatibility between peers\n\tProtocolVersion *string `protobuf:\"bytes,5,opt,name=protocolVersion\" json:\"protocolVersion,omitempty\"`\n\t\/\/ agentVersion is like a UserAgent string in browsers, or client version in bittorrent\n\t\/\/ includes the client name and client.\n\tAgentVersion *string `protobuf:\"bytes,6,opt,name=agentVersion\" json:\"agentVersion,omitempty\"`\n\t\/\/ publicKey is this node's public key (which also gives its node.ID)\n\t\/\/ - may not need to be sent, as secure channel implies it has been sent.\n\t\/\/ - then again, if we change \/ disable secure channel, may still want it.\n\tPublicKey []byte `protobuf:\"bytes,1,opt,name=publicKey\" json:\"publicKey,omitempty\"`\n\t\/\/ listenAddrs are the multiaddrs the sender node listens for open connections on\n\tListenAddrs [][]byte `protobuf:\"bytes,2,rep,name=listenAddrs\" json:\"listenAddrs,omitempty\"`\n\t\/\/ oservedAddr is the multiaddr of the remote endpoint that the sender node perceives\n\t\/\/ this is useful information to convey to the other side, as it helps the remote endpoint\n\t\/\/ determine whether its connection to the local peer goes through NAT.\n\tObservedAddr []byte `protobuf:\"bytes,4,opt,name=observedAddr\" json:\"observedAddr,omitempty\"`\n\t\/\/ protocols are the services this node is running\n\tProtocols []string `protobuf:\"bytes,3,rep,name=protocols\" json:\"protocols,omitempty\"`\n\tXXX_unrecognized []byte `json:\"-\"`\n}\n\nfunc (m *Identify) Reset() { *m = Identify{} }\nfunc (m *Identify) String() string { return proto.CompactTextString(m) }\nfunc (*Identify) ProtoMessage() {}\n\nfunc (m *Identify) GetProtocolVersion() string {\n\tif m != nil && m.ProtocolVersion != nil {\n\t\treturn *m.ProtocolVersion\n\t}\n\treturn \"\"\n}\n\nfunc (m *Identify) GetAgentVersion() string {\n\tif m != nil && m.AgentVersion != nil {\n\t\treturn *m.AgentVersion\n\t}\n\treturn \"\"\n}\n\nfunc (m *Identify) GetPublicKey() []byte {\n\tif m != nil {\n\t\treturn m.PublicKey\n\t}\n\treturn nil\n}\n\nfunc (m *Identify) GetListenAddrs() [][]byte {\n\tif m != nil {\n\t\treturn m.ListenAddrs\n\t}\n\treturn nil\n}\n\nfunc (m *Identify) GetObservedAddr() []byte {\n\tif m != nil {\n\t\treturn m.ObservedAddr\n\t}\n\treturn nil\n}\n\nfunc (m *Identify) GetProtocols() []string {\n\tif m != nil {\n\t\treturn m.Protocols\n\t}\n\treturn nil\n}\n\nfunc init() {\n}\n<|endoftext|>"} {"text":"<commit_before>package types\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"sort\"\n)\n\nvar ValidOSArch = map[string][]string{\n\t\"linux\": {\"amd64\", \"i386\"},\n\t\"freebsd\": {\"amd64\", \"i386\", \"arm\"},\n\t\"darwin\": {\"x86_64\", \"i386\"},\n}\n\ntype Labels []Label\n\ntype labels Labels\n\ntype Label struct {\n\tName ACName `json:\"name\"`\n\tValue string `json:\"value\"`\n}\n\nfunc (l Labels) assertValid() error {\n\tseen := map[ACName]string{}\n\tfor _, lbl := range l {\n\t\tif lbl.Name == \"name\" {\n\t\t\treturn fmt.Errorf(`invalid label name: \"name\"`)\n\t\t}\n\t\t_, ok := seen[lbl.Name]\n\t\tif ok {\n\t\t\treturn fmt.Errorf(`duplicate labels of name %q`, lbl.Name)\n\t\t}\n\t\tseen[lbl.Name] = lbl.Value\n\t}\n\tif os, ok := seen[\"os\"]; ok {\n\t\tif validArchs, ok := ValidOSArch[os]; !ok {\n\t\t\t\/\/ Not a whitelisted OS. TODO: how to warn rather than fail?\n\t\t\tvalidOses := make([]string, 0, len(ValidOSArch))\n\t\t\tfor validOs := range ValidOSArch {\n\t\t\t\tvalidOses = append(validOses, validOs)\n\t\t\t}\n\t\t\tsort.Strings(validOses)\n\t\t\treturn fmt.Errorf(`bad os %#v (must be one of: %v)`, os, validOses)\n\t\t} else {\n\t\t\t\/\/ Whitelisted OS. We check arch here, as arch makes sense only\n\t\t\t\/\/ when os is defined.\n\t\t\tif arch, ok := seen[\"arch\"]; ok {\n\t\t\t\tfound := false\n\t\t\t\tfor _, validArch := range validArchs {\n\t\t\t\t\tif arch == validArch {\n\t\t\t\t\t\tfound = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !found {\n\t\t\t\t\treturn fmt.Errorf(`bad arch %#v for %v (must be one of: %v)`, arch, os, validArchs)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (l Labels) MarshalJSON() ([]byte, error) {\n\tif err := l.assertValid(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(labels(l))\n}\n\nfunc (l *Labels) UnmarshalJSON(data []byte) error {\n\tvar jl labels\n\tif err := json.Unmarshal(data, &jl); err != nil {\n\t\treturn err\n\t}\n\tnl := Labels(jl)\n\tif err := nl.assertValid(); err != nil {\n\t\treturn err\n\t}\n\t*l = nl\n\treturn nil\n}\n\n\/\/ Get retrieves the value of the label by the given name from Labels, if it exists\nfunc (l Labels) Get(name string) (val string, ok bool) {\n\tfor _, lbl := range l {\n\t\tif lbl.Name.String() == name {\n\t\t\treturn lbl.Value, true\n\t\t}\n\t}\n\treturn \"\", false\n}\n<commit_msg>schema: add a LabelsFromMap function.<commit_after>package types\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"sort\"\n)\n\nvar ValidOSArch = map[string][]string{\n\t\"linux\": {\"amd64\", \"i386\"},\n\t\"freebsd\": {\"amd64\", \"i386\", \"arm\"},\n\t\"darwin\": {\"x86_64\", \"i386\"},\n}\n\ntype Labels []Label\n\ntype labels Labels\n\ntype Label struct {\n\tName ACName `json:\"name\"`\n\tValue string `json:\"value\"`\n}\n\nfunc (l Labels) assertValid() error {\n\tseen := map[ACName]string{}\n\tfor _, lbl := range l {\n\t\tif lbl.Name == \"name\" {\n\t\t\treturn fmt.Errorf(`invalid label name: \"name\"`)\n\t\t}\n\t\t_, ok := seen[lbl.Name]\n\t\tif ok {\n\t\t\treturn fmt.Errorf(`duplicate labels of name %q`, lbl.Name)\n\t\t}\n\t\tseen[lbl.Name] = lbl.Value\n\t}\n\tif os, ok := seen[\"os\"]; ok {\n\t\tif validArchs, ok := ValidOSArch[os]; !ok {\n\t\t\t\/\/ Not a whitelisted OS. TODO: how to warn rather than fail?\n\t\t\tvalidOses := make([]string, 0, len(ValidOSArch))\n\t\t\tfor validOs := range ValidOSArch {\n\t\t\t\tvalidOses = append(validOses, validOs)\n\t\t\t}\n\t\t\tsort.Strings(validOses)\n\t\t\treturn fmt.Errorf(`bad os %#v (must be one of: %v)`, os, validOses)\n\t\t} else {\n\t\t\t\/\/ Whitelisted OS. We check arch here, as arch makes sense only\n\t\t\t\/\/ when os is defined.\n\t\t\tif arch, ok := seen[\"arch\"]; ok {\n\t\t\t\tfound := false\n\t\t\t\tfor _, validArch := range validArchs {\n\t\t\t\t\tif arch == validArch {\n\t\t\t\t\t\tfound = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !found {\n\t\t\t\t\treturn fmt.Errorf(`bad arch %#v for %v (must be one of: %v)`, arch, os, validArchs)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (l Labels) MarshalJSON() ([]byte, error) {\n\tif err := l.assertValid(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn json.Marshal(labels(l))\n}\n\nfunc (l *Labels) UnmarshalJSON(data []byte) error {\n\tvar jl labels\n\tif err := json.Unmarshal(data, &jl); err != nil {\n\t\treturn err\n\t}\n\tnl := Labels(jl)\n\tif err := nl.assertValid(); err != nil {\n\t\treturn err\n\t}\n\t*l = nl\n\treturn nil\n}\n\n\/\/ Get retrieves the value of the label by the given name from Labels, if it exists\nfunc (l Labels) Get(name string) (val string, ok bool) {\n\tfor _, lbl := range l {\n\t\tif lbl.Name.String() == name {\n\t\t\treturn lbl.Value, true\n\t\t}\n\t}\n\treturn \"\", false\n}\n\nfunc LabelsFromMap(labelsMap map[ACName]string) (Labels, error) {\n\tlabels := Labels{}\n\tfor n, v := range labelsMap {\n\t\tlabels = append(labels, Label{Name: n, Value: v})\n\t}\n\tif err := labels.assertValid(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn labels, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package sign\n\nimport \"time\"\n\n\/\/ Constants we expect might be used by other packages\n\/\/ TODO: can this be replaced by the application using the signer?\nvar ROUND_TIME time.Duration = 5000 * time.Second\nvar HEARTBEAT = ROUND_TIME + ROUND_TIME\/2\n\nvar RoundsPerView int64 = 200\n\nvar FALSE int64 = 0\nvar TRUE int64 = 1\n<commit_msg>changed round time<commit_after>package sign\n\nimport \"time\"\n\n\/\/ Constants we expect might be used by other packages\n\/\/ TODO: can this be replaced by the application using the signer?\nvar ROUND_TIME time.Duration = 10 * time.Second\nvar HEARTBEAT = ROUND_TIME + ROUND_TIME\/2\n\nvar RoundsPerView int64 = 200\n\nvar FALSE int64 = 0\nvar TRUE int64 = 1\n<|endoftext|>"} {"text":"<commit_before>package gitstats\n\nimport (\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/intelsdi-x\/snap\/control\/plugin\"\n\t\"github.com\/intelsdi-x\/snap\/control\/plugin\/cpolicy\"\n\t\"github.com\/intelsdi-x\/snap\/core\"\n\t\"github.com\/intelsdi-x\/snap\/core\/cdata\"\n\t\"github.com\/intelsdi-x\/snap\/core\/ctypes\"\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n)\n\nfunc TestGitstatsPlugin(t *testing.T) {\n\tConvey(\"Meta should return metadata for the plugin\", t, func() {\n\t\tmeta := Meta()\n\t\tSo(meta.Name, ShouldResemble, Name)\n\t\tSo(meta.Version, ShouldResemble, Version)\n\t\tSo(meta.Type, ShouldResemble, plugin.CollectorPluginType)\n\t})\n\n\tConvey(\"Create Gitstats Collector\", t, func() {\n\t\tcollector := &Gitstats{}\n\t\tConvey(\"So Gitstats collector should not be nil\", func() {\n\t\t\tSo(collector, ShouldNotBeNil)\n\t\t})\n\t\tConvey(\"So Gitstats collector should be of Gitstats type\", func() {\n\t\t\tSo(collector, ShouldHaveSameTypeAs, &Gitstats{})\n\t\t})\n\t\tConvey(\"collector.GetConfigPolicy() should return a config policy\", func() {\n\t\t\tconfigPolicy, _ := collector.GetConfigPolicy()\n\t\t\tConvey(\"So config policy should not be nil\", func() {\n\t\t\t\tSo(configPolicy, ShouldNotBeNil)\n\t\t\t})\n\t\t\tConvey(\"So config policy should be a cpolicy.ConfigPolicy\", func() {\n\t\t\t\tSo(configPolicy, ShouldHaveSameTypeAs, &cpolicy.ConfigPolicy{})\n\t\t\t})\n\t\t\tConvey(\"So config policy namespace should be \/raintank\/Gitstats\", func() {\n\t\t\t\tconf := configPolicy.Get([]string{\"raintank\", \"apps\", \"gitstats\"})\n\t\t\t\tSo(conf, ShouldNotBeNil)\n\t\t\t\tSo(conf.HasRules(), ShouldBeTrue)\n\t\t\t\ttables := conf.RulesAsTable()\n\t\t\t\tSo(len(tables), ShouldEqual, 3)\n\t\t\t\tfor _, rule := range tables {\n\t\t\t\t\tSo(rule.Name, ShouldBeIn, \"access_token\", \"user\", \"repo\")\n\t\t\t\t\tswitch rule.Name {\n\t\t\t\t\tcase \"access_token\":\n\t\t\t\t\t\tSo(rule.Required, ShouldBeTrue)\n\t\t\t\t\t\tSo(rule.Type, ShouldEqual, \"string\")\n\t\t\t\t\tcase \"user\":\n\t\t\t\t\t\tSo(rule.Required, ShouldBeFalse)\n\t\t\t\t\t\tSo(rule.Type, ShouldEqual, \"string\")\n\t\t\t\t\tcase \"repo\":\n\t\t\t\t\t\tSo(rule.Required, ShouldBeFalse)\n\t\t\t\t\t\tSo(rule.Type, ShouldEqual, \"string\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t})\n}\n\nfunc TestGitstatsCollectMetrics(t *testing.T) {\n\tcfg := setupCfg(\"woodsaj\", \"\")\n\n\tConvey(\"Ping collector\", t, func() {\n\t\tp := &Gitstats{}\n\t\tmt, err := p.GetMetricTypes(cfg)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"failed to get metricTypes\", err)\n\t\t}\n\t\tSo(len(mt), ShouldBeGreaterThan, 0)\n\t\tfor _, m := range mt {\n\t\t\tt.Log(m.Namespace().String())\n\t\t}\n\t\tConvey(\"collect metrics\", func() {\n\t\t\tmts := []plugin.MetricType{\n\t\t\t\tplugin.MetricType{\n\t\t\t\t\tNamespace_: core.NewNamespace(\n\t\t\t\t\t\t\"raintank\", \"apps\", \"gitstats\", \"user\", \"*\", \"followers\"),\n\t\t\t\t\tConfig_: cfg.ConfigDataNode,\n\t\t\t\t},\n\t\t\t}\n\t\t\tmetrics, err := p.CollectMetrics(mts)\n\t\t\tSo(err, ShouldBeNil)\n\t\t\tSo(metrics, ShouldNotBeNil)\n\t\t\tSo(len(metrics), ShouldEqual, 1)\n\t\t\tSo(metrics[0].Namespace()[0].Value, ShouldEqual, \"raintank\")\n\t\t\tSo(metrics[0].Namespace()[1].Value, ShouldEqual, \"apps\")\n\t\t\tSo(metrics[0].Namespace()[2].Value, ShouldEqual, \"gitstats\")\n\t\t\tfor _, m := range metrics {\n\t\t\t\tSo(m.Namespace()[3].Value, ShouldEqual, \"user\")\n\t\t\t\tSo(m.Namespace()[4].Value, ShouldEqual, \"woodsaj\")\n\t\t\t\tt.Log(m.Namespace().String(), m.Data())\n\t\t\t}\n\t\t})\n\t})\n}\n\nfunc setupCfg(user, repo string) plugin.ConfigType {\n\tnode := cdata.NewNode()\n\tnode.AddItem(\"access_token\", ctypes.ConfigValueStr{Value: os.Getenv(\"GITSTATS_ACCESS_TOKEN\")})\n\tnode.AddItem(\"user\", ctypes.ConfigValueStr{Value: user})\n\tnode.AddItem(\"repo\", ctypes.ConfigValueStr{Value: repo})\n\treturn plugin.ConfigType{ConfigDataNode: node}\n}\n<commit_msg>skip integration test if ACCESS_TOKEN not set in ENV<commit_after>package gitstats\n\nimport (\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/intelsdi-x\/snap\/control\/plugin\"\n\t\"github.com\/intelsdi-x\/snap\/control\/plugin\/cpolicy\"\n\t\"github.com\/intelsdi-x\/snap\/core\"\n\t\"github.com\/intelsdi-x\/snap\/core\/cdata\"\n\t\"github.com\/intelsdi-x\/snap\/core\/ctypes\"\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n)\n\nfunc TestGitstatsPlugin(t *testing.T) {\n\tConvey(\"Meta should return metadata for the plugin\", t, func() {\n\t\tmeta := Meta()\n\t\tSo(meta.Name, ShouldResemble, Name)\n\t\tSo(meta.Version, ShouldResemble, Version)\n\t\tSo(meta.Type, ShouldResemble, plugin.CollectorPluginType)\n\t})\n\n\tConvey(\"Create Gitstats Collector\", t, func() {\n\t\tcollector := &Gitstats{}\n\t\tConvey(\"So Gitstats collector should not be nil\", func() {\n\t\t\tSo(collector, ShouldNotBeNil)\n\t\t})\n\t\tConvey(\"So Gitstats collector should be of Gitstats type\", func() {\n\t\t\tSo(collector, ShouldHaveSameTypeAs, &Gitstats{})\n\t\t})\n\t\tConvey(\"collector.GetConfigPolicy() should return a config policy\", func() {\n\t\t\tconfigPolicy, _ := collector.GetConfigPolicy()\n\t\t\tConvey(\"So config policy should not be nil\", func() {\n\t\t\t\tSo(configPolicy, ShouldNotBeNil)\n\t\t\t})\n\t\t\tConvey(\"So config policy should be a cpolicy.ConfigPolicy\", func() {\n\t\t\t\tSo(configPolicy, ShouldHaveSameTypeAs, &cpolicy.ConfigPolicy{})\n\t\t\t})\n\t\t\tConvey(\"So config policy namespace should be \/raintank\/Gitstats\", func() {\n\t\t\t\tconf := configPolicy.Get([]string{\"raintank\", \"apps\", \"gitstats\"})\n\t\t\t\tSo(conf, ShouldNotBeNil)\n\t\t\t\tSo(conf.HasRules(), ShouldBeTrue)\n\t\t\t\ttables := conf.RulesAsTable()\n\t\t\t\tSo(len(tables), ShouldEqual, 3)\n\t\t\t\tfor _, rule := range tables {\n\t\t\t\t\tSo(rule.Name, ShouldBeIn, \"access_token\", \"user\", \"repo\")\n\t\t\t\t\tswitch rule.Name {\n\t\t\t\t\tcase \"access_token\":\n\t\t\t\t\t\tSo(rule.Required, ShouldBeTrue)\n\t\t\t\t\t\tSo(rule.Type, ShouldEqual, \"string\")\n\t\t\t\t\tcase \"user\":\n\t\t\t\t\t\tSo(rule.Required, ShouldBeFalse)\n\t\t\t\t\t\tSo(rule.Type, ShouldEqual, \"string\")\n\t\t\t\t\tcase \"repo\":\n\t\t\t\t\t\tSo(rule.Required, ShouldBeFalse)\n\t\t\t\t\t\tSo(rule.Type, ShouldEqual, \"string\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t})\n}\n\nfunc TestGitstatsCollectMetrics(t *testing.T) {\n\tif os.Getenv(\"GITSTATS_ACCESS_TOKEN\") == \"\" {\n\t\treturn\n\t}\n\tcfg := setupCfg(\"woodsaj\", \"\")\n\n\tConvey(\"Ping collector\", t, func() {\n\t\tp := &Gitstats{}\n\t\tmt, err := p.GetMetricTypes(cfg)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"failed to get metricTypes\", err)\n\t\t}\n\t\tSo(len(mt), ShouldBeGreaterThan, 0)\n\t\tfor _, m := range mt {\n\t\t\tt.Log(m.Namespace().String())\n\t\t}\n\t\tConvey(\"collect metrics\", func() {\n\t\t\tmts := []plugin.MetricType{\n\t\t\t\tplugin.MetricType{\n\t\t\t\t\tNamespace_: core.NewNamespace(\n\t\t\t\t\t\t\"raintank\", \"apps\", \"gitstats\", \"user\", \"*\", \"followers\"),\n\t\t\t\t\tConfig_: cfg.ConfigDataNode,\n\t\t\t\t},\n\t\t\t}\n\t\t\tmetrics, err := p.CollectMetrics(mts)\n\t\t\tSo(err, ShouldBeNil)\n\t\t\tSo(metrics, ShouldNotBeNil)\n\t\t\tSo(len(metrics), ShouldEqual, 1)\n\t\t\tSo(metrics[0].Namespace()[0].Value, ShouldEqual, \"raintank\")\n\t\t\tSo(metrics[0].Namespace()[1].Value, ShouldEqual, \"apps\")\n\t\t\tSo(metrics[0].Namespace()[2].Value, ShouldEqual, \"gitstats\")\n\t\t\tfor _, m := range metrics {\n\t\t\t\tSo(m.Namespace()[3].Value, ShouldEqual, \"user\")\n\t\t\t\tSo(m.Namespace()[4].Value, ShouldEqual, \"woodsaj\")\n\t\t\t\tt.Log(m.Namespace().String(), m.Data())\n\t\t\t}\n\t\t})\n\t})\n}\n\nfunc setupCfg(user, repo string) plugin.ConfigType {\n\tnode := cdata.NewNode()\n\tnode.AddItem(\"access_token\", ctypes.ConfigValueStr{Value: os.Getenv(\"GITSTATS_ACCESS_TOKEN\")})\n\tnode.AddItem(\"user\", ctypes.ConfigValueStr{Value: user})\n\tnode.AddItem(\"repo\", ctypes.ConfigValueStr{Value: repo})\n\treturn plugin.ConfigType{ConfigDataNode: node}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/peterh\/liner\"\n)\n\ntype Shell struct {\n\tshell *liner.State\n\tprompt string\n\tcmds map[string]shellCmd\n\thist string\n\tmotor Motor\n}\n\nfunc NewShell() *Shell {\n\tsh := &Shell{\n\t\tshell: liner.NewLiner(),\n\t\tprompt: \"mbus> \",\n\t\thist: filepath.Join(\".\", \".fcs_lpc_motor_history\"),\n\t\tmotor: NewMotor(\"134.158.125.223:502\"),\n\t}\n\n\tsh.shell.SetCtrlCAborts(true)\n\tsh.shell.SetCompleter(func(line string) (c []string) {\n\t\tfor n := range sh.cmds {\n\t\t\tif strings.HasPrefix(n, strings.ToLower(line)) {\n\t\t\t\tc = append(c, n)\n\t\t\t}\n\t\t}\n\t\treturn\n\t})\n\n\tif f, err := os.Open(sh.hist); err == nil {\n\t\tsh.shell.ReadHistory(f)\n\t\tf.Close()\n\t}\n\n\tsh.cmds = map[string]shellCmd{\n\t\t\"dump\": sh.cmdDump,\n\t\t\"get\": sh.cmdGet,\n\t\t\"quit\": sh.cmdQuit,\n\t\t\"set\": sh.cmdSet,\n\t}\n\treturn sh\n}\n\ntype shellCmd func(args []string) error\n\nfunc (sh *Shell) Close() error {\n\tif f, err := os.Create(sh.hist); err != nil {\n\t\tlog.Print(\"error writing history file: \", err)\n\t} else {\n\t\tsh.shell.WriteHistory(f)\n\t\tf.Close()\n\t}\n\tfmt.Printf(\"\\n\")\n\treturn sh.shell.Close()\n}\n\nfunc (sh *Shell) run() error {\n\tfor {\n\t\traw, err := sh.shell.Prompt(sh.prompt)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\terr = nil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\t\/\/ log.Printf(\"got: %q\\n\", raw)\n\t\traw = strings.TrimSpace(raw)\n\t\tif raw == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\ttoks := strings.Split(raw, \" \")\n\t\terr = sh.dispatch(toks)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\terr = nil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tsh.shell.AppendHistory(raw)\n\t}\n\n\treturn nil\n}\n\nfunc (sh *Shell) dispatch(toks []string) error {\n\tvar err error\n\tfct, ok := sh.cmds[toks[0]]\n\tif !ok {\n\t\terr = fmt.Errorf(\"invalid command [%s]\", toks[0])\n\t\tlog.Printf(\"error: %v\\n\", err)\n\t\treturn err\n\t}\n\n\treturn fct(toks[1:])\n}\n\nfunc (sh *Shell) cmdQuit(args []string) error {\n\treturn io.EOF\n}\n\nfunc (sh *Shell) cmdGet(args []string) error {\n\tparam, err := sh.parseParam(args[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\to, err := sh.motor.read(param)\n\tif err != nil {\n\t\treturn err\n\t}\n\thex := make([]string, len(o))\n\tdec := make([]string, len(o))\n\tfor i, v := range o {\n\t\thex[i] = fmt.Sprintf(\"0x%02x\", v)\n\t\tdec[i] = fmt.Sprintf(\"%3d\", v)\n\t}\n\tlog.Printf(\n\t\t\"Pr-%v: [%s] [%s] (%v)\\n\",\n\t\tparam,\n\t\tstrings.Join(hex, \" \"),\n\t\tstrings.Join(dec, \" \"),\n\t\tcodec.Uint16(o),\n\t)\n\n\treturn err\n}\n\nfunc (sh *Shell) cmdSet(args []string) error {\n\tparam, err := sh.parseParam(args[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"set Pr-%v [%s]...\\n\", param, args[1])\n\treturn err\n}\n\nfunc (sh *Shell) cmdDump(args []string) error {\n\tvar err error\n\treturn err\n}\n\nfunc (sh *Shell) parseParam(arg string) (Parameter, error) {\n\tvar err error\n\tvar p Parameter\n\n\tif strings.Contains(arg, \".\") {\n\t\treturn NewParameterFromMenu(arg)\n\t}\n\n\tvar reg uint64\n\tvar base = 10\n\tif strings.HasPrefix(arg, \"0x\") {\n\t\tbase = 16\n\t\targ = arg[len(\"0x\"):]\n\t}\n\treg, err = strconv.ParseUint(arg, base, 64)\n\tif err != nil {\n\t\treturn p, err\n\t}\n\tp = NewParameter(uint16(reg))\n\treturn p, err\n}\n<commit_msg>shell: add ability to switch motors<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/peterh\/liner\"\n)\n\ntype Shell struct {\n\tshell *liner.State\n\tprompt string\n\tcmds map[string]shellCmd\n\thist string\n\tmotor Motor\n}\n\nfunc NewShell() *Shell {\n\tsh := &Shell{\n\t\tshell: liner.NewLiner(),\n\t\tprompt: \"mbus> \",\n\t\thist: filepath.Join(\".\", \".fcs_lpc_motor_history\"),\n\t\tmotor: NewMotor(\"134.158.125.223:502\"),\n\t}\n\n\tsh.shell.SetCtrlCAborts(true)\n\tsh.shell.SetCompleter(func(line string) (c []string) {\n\t\tfor n := range sh.cmds {\n\t\t\tif strings.HasPrefix(n, strings.ToLower(line)) {\n\t\t\t\tc = append(c, n)\n\t\t\t}\n\t\t}\n\t\treturn\n\t})\n\n\tif f, err := os.Open(sh.hist); err == nil {\n\t\tsh.shell.ReadHistory(f)\n\t\tf.Close()\n\t}\n\n\tsh.cmds = map[string]shellCmd{\n\t\t\"dump\": sh.cmdDump,\n\t\t\"get\": sh.cmdGet,\n\t\t\"motor\": sh.cmdMotor,\n\t\t\"quit\": sh.cmdQuit,\n\t\t\"set\": sh.cmdSet,\n\t}\n\treturn sh\n}\n\ntype shellCmd func(args []string) error\n\nfunc (sh *Shell) Close() error {\n\tif f, err := os.Create(sh.hist); err != nil {\n\t\tlog.Print(\"error writing history file: \", err)\n\t} else {\n\t\tsh.shell.WriteHistory(f)\n\t\tf.Close()\n\t}\n\tfmt.Printf(\"\\n\")\n\treturn sh.shell.Close()\n}\n\nfunc (sh *Shell) run() error {\n\tfor {\n\t\traw, err := sh.shell.Prompt(sh.prompt)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\terr = nil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\t\/\/ log.Printf(\"got: %q\\n\", raw)\n\t\traw = strings.TrimSpace(raw)\n\t\tif raw == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\ttoks := strings.Split(raw, \" \")\n\t\terr = sh.dispatch(toks)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\terr = nil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tsh.shell.AppendHistory(raw)\n\t}\n\n\treturn nil\n}\n\nfunc (sh *Shell) dispatch(toks []string) error {\n\tvar err error\n\tfct, ok := sh.cmds[toks[0]]\n\tif !ok {\n\t\terr = fmt.Errorf(\"invalid command [%s]\", toks[0])\n\t\tlog.Printf(\"error: %v\\n\", err)\n\t\treturn err\n\t}\n\n\treturn fct(toks[1:])\n}\n\nfunc (sh *Shell) cmdQuit(args []string) error {\n\treturn io.EOF\n}\n\nfunc (sh *Shell) cmdGet(args []string) error {\n\tparam, err := sh.parseParam(args[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\to, err := sh.motor.read(param)\n\tif err != nil {\n\t\treturn err\n\t}\n\thex := make([]string, len(o))\n\tdec := make([]string, len(o))\n\tfor i, v := range o {\n\t\thex[i] = fmt.Sprintf(\"0x%02x\", v)\n\t\tdec[i] = fmt.Sprintf(\"%3d\", v)\n\t}\n\tlog.Printf(\n\t\t\"Pr-%v: [%s] [%s] (%v)\\n\",\n\t\tparam,\n\t\tstrings.Join(hex, \" \"),\n\t\tstrings.Join(dec, \" \"),\n\t\tcodec.Uint16(o),\n\t)\n\n\treturn err\n}\n\nfunc (sh *Shell) cmdSet(args []string) error {\n\tparam, err := sh.parseParam(args[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"set Pr-%v [%s]...\\n\", param, args[1])\n\treturn err\n}\n\nfunc (sh *Shell) cmdDump(args []string) error {\n\tvar err error\n\treturn err\n}\n\nfunc (sh *Shell) cmdMotor(args []string) error {\n\tswitch len(args) {\n\tcase 0:\n\t\tlog.Printf(\"connected to [%s]\\n\", sh.motor.Address)\n\t\treturn nil\n\tcase 1:\n\t\tsh.motor = NewMotor(args[0])\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"cmd-motor: too many arguments (%d)\", len(args))\n\t}\n\treturn nil\n}\n\nfunc (sh *Shell) parseParam(arg string) (Parameter, error) {\n\tvar err error\n\tvar p Parameter\n\n\tif strings.Contains(arg, \".\") {\n\t\treturn NewParameterFromMenu(arg)\n\t}\n\n\tvar reg uint64\n\tvar base = 10\n\tif strings.HasPrefix(arg, \"0x\") {\n\t\tbase = 16\n\t\targ = arg[len(\"0x\"):]\n\t}\n\treg, err = strconv.ParseUint(arg, base, 64)\n\tif err != nil {\n\t\treturn p, err\n\t}\n\tp = NewParameter(uint16(reg))\n\treturn p, err\n}\n<|endoftext|>"} {"text":"<commit_before>package space\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"path\"\n\t\"strings\"\n\n\t\"github.com\/pivotalservices\/cf-mgmt\/cloudcontroller\"\n\t\"github.com\/pivotalservices\/cf-mgmt\/ldap\"\n\t\"github.com\/pivotalservices\/cf-mgmt\/organization\"\n\t\"github.com\/pivotalservices\/cf-mgmt\/uaac\"\n\t\"github.com\/pivotalservices\/cf-mgmt\/utils\"\n\t\"github.com\/xchapter7x\/lo\"\n)\n\n\/\/NewManager -\nfunc NewManager(sysDomain, token, uaacToken string) (mgr Manager) {\n\tcloudController := cloudcontroller.NewManager(fmt.Sprintf(\"https:\/\/api.%s\", sysDomain), token)\n\tldapMgr := ldap.NewManager()\n\tuaacMgr := uaac.NewManager(sysDomain, uaacToken)\n\treturn &DefaultSpaceManager{\n\t\tUAACMgr: uaacMgr,\n\t\tCloudController: cloudController,\n\t\tOrgMgr: organization.NewManager(sysDomain, token, uaacToken),\n\t\tLdapMgr: ldapMgr,\n\t\tUtilsMgr: utils.NewDefaultManager(),\n\t\tUserMgr: NewUserManager(cloudController, ldapMgr, uaacMgr),\n\t}\n}\n\nfunc (m *DefaultSpaceManager) GetSpaceConfigs(configDir string) ([]*InputUpdateSpaces, error) {\n\n\tspaceDefaults := &InputUpdateSpaces{}\n\tm.UtilsMgr.LoadFile(path.Join(configDir, \"spaceDefaults.yml\"), spaceDefaults)\n\tif files, err := utils.NewDefaultManager().FindFiles(configDir, \"spaceConfig.yml\"); err == nil {\n\t\tspaceConfigs := []*InputUpdateSpaces{}\n\t\tfor _, f := range files {\n\t\t\tlo.G.Info(\"Processing space file\", f)\n\t\t\tinput := &InputUpdateSpaces{}\n\t\t\tif err = m.UtilsMgr.LoadFile(f, input); err == nil {\n\t\t\t\tinput.Developer.LdapUsers = append(input.Developer.LdapUsers, spaceDefaults.Developer.LdapUsers...)\n\t\t\t\tinput.Developer.Users = append(input.Developer.Users, spaceDefaults.Developer.Users...)\n\t\t\t\tinput.Auditor.LdapUsers = append(input.Auditor.LdapUsers, spaceDefaults.Auditor.LdapUsers...)\n\t\t\t\tinput.Auditor.Users = append(input.Auditor.Users, spaceDefaults.Auditor.Users...)\n\t\t\t\tinput.Manager.LdapUsers = append(input.Manager.LdapUsers, spaceDefaults.Manager.LdapUsers...)\n\t\t\t\tinput.Manager.Users = append(input.Manager.Users, spaceDefaults.Manager.Users...)\n\n\t\t\t\tspaceConfigs = append(spaceConfigs, input)\n\t\t\t\tif input.EnableSecurityGroup {\n\t\t\t\t\tsecurityGroupFile := strings.Replace(f, \"spaceConfig.yml\", \"security-group.json\", -1)\n\t\t\t\t\tlo.G.Debug(\"Loading security group contents\", securityGroupFile)\n\t\t\t\t\tvar bytes []byte\n\t\t\t\t\tif bytes, err = ioutil.ReadFile(securityGroupFile); err == nil {\n\t\t\t\t\t\tlo.G.Debug(\"setting security group contents\", string(bytes))\n\t\t\t\t\t\tinput.SecurityGroupContents = string(bytes)\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\treturn spaceConfigs, nil\n\t} else {\n\t\treturn nil, err\n\t}\n}\n\n\/\/CreateApplicationSecurityGroups -\nfunc (m *DefaultSpaceManager) CreateApplicationSecurityGroups(configDir string) error {\n\tvar targetSGGUID string\n\tvar sgs map[string]string\n\tvar space *cloudcontroller.Space\n\n\tif spaceConfigs, err := m.GetSpaceConfigs(configDir); err != nil {\n\t\treturn err\n\t} else {\n\t\tfor _, input := range spaceConfigs {\n\t\t\tif input.EnableSecurityGroup {\n\t\t\t\tif space, err = m.FindSpace(input.Org, input.Space); err == nil {\n\t\t\t\t\tsgName := fmt.Sprintf(\"%s-%s\", input.Org, input.Space)\n\t\t\t\t\tif sgs, err = m.CloudController.ListSecurityGroups(); err == nil {\n\t\t\t\t\t\tif sgGUID, ok := sgs[sgName]; ok {\n\t\t\t\t\t\t\tlo.G.Info(\"Updating security group\", sgName)\n\t\t\t\t\t\t\tif err = m.CloudController.UpdateSecurityGroup(sgGUID, sgName, input.SecurityGroupContents); err == nil {\n\t\t\t\t\t\t\t\tlo.G.Info(\"Binding security group\", sgName, \"to space\", space.Entity.Name)\n\t\t\t\t\t\t\t\tm.CloudController.AssignSecurityGroupToSpace(space.MetaData.GUID, sgGUID)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlo.G.Info(\"Creating security group\", sgName)\n\t\t\t\t\t\t\tif targetSGGUID, err = m.CloudController.CreateSecurityGroup(sgName, input.SecurityGroupContents); err == nil {\n\t\t\t\t\t\t\t\tlo.G.Info(\"Binding security group\", sgName, \"to space\", space.Entity.Name)\n\t\t\t\t\t\t\t\tm.CloudController.AssignSecurityGroupToSpace(space.MetaData.GUID, targetSGGUID)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/CreateQuotas -\nfunc (m *DefaultSpaceManager) CreateQuotas(configDir string) error {\n\tvar quotas map[string]string\n\tvar space *cloudcontroller.Space\n\tvar targetQuotaGUID string\n\n\tif spaceConfigs, err := m.GetSpaceConfigs(configDir); err != nil {\n\t\treturn err\n\t} else {\n\t\tfor _, input := range spaceConfigs {\n\t\t\tif input.EnableSpaceQuota {\n\t\t\t\tif space, err = m.FindSpace(input.Org, input.Space); err == nil {\n\t\t\t\t\tquotaName := space.Entity.Name\n\t\t\t\t\tif quotas, err = m.CloudController.ListAllSpaceQuotasForOrg(space.Entity.OrgGUID); err == nil {\n\t\t\t\t\t\tif quotaGUID, ok := quotas[quotaName]; ok {\n\t\t\t\t\t\t\tlo.G.Info(\"Updating quota\", quotaName)\n\t\t\t\t\t\t\tif err = m.CloudController.UpdateSpaceQuota(space.Entity.OrgGUID, quotaGUID,\n\t\t\t\t\t\t\t\tquotaName, input.MemoryLimit, input.InstanceMemoryLimit, input.TotalRoutes, input.TotalServices, input.PaidServicePlansAllowed); err == nil {\n\t\t\t\t\t\t\t\tlo.G.Info(\"Assigning\", quotaName, \"to\", space.Entity.Name)\n\t\t\t\t\t\t\t\tm.CloudController.AssignQuotaToSpace(space.MetaData.GUID, quotaGUID)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlo.G.Info(\"Creating quota\", quotaName)\n\t\t\t\t\t\t\tif targetQuotaGUID, err = m.CloudController.CreateSpaceQuota(space.Entity.OrgGUID,\n\t\t\t\t\t\t\t\tquotaName, input.MemoryLimit, input.InstanceMemoryLimit, input.TotalRoutes, input.TotalServices, input.PaidServicePlansAllowed); err == nil {\n\t\t\t\t\t\t\t\tlo.G.Info(\"Assigning\", quotaName, \"to\", space.Entity.Name)\n\t\t\t\t\t\t\t\tm.CloudController.AssignQuotaToSpace(space.MetaData.GUID, targetQuotaGUID)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/UpdateSpaces -\nfunc (m *DefaultSpaceManager) UpdateSpaces(configDir string) error {\n\tvar space *cloudcontroller.Space\n\n\tif spaceConfigs, err := m.GetSpaceConfigs(configDir); err != nil {\n\t\treturn err\n\t} else {\n\t\tfor _, input := range spaceConfigs {\n\t\t\tif space, err = m.FindSpace(input.Org, input.Space); err == nil {\n\t\t\t\tlo.G.Info(\"Processing space\", space.Entity.Name)\n\t\t\t\tif input.AllowSSH != space.Entity.AllowSSH {\n\t\t\t\t\tif err = m.CloudController.UpdateSpaceSSH(input.AllowSSH, space.MetaData.GUID); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/UpdateSpaceUsers -\nfunc (m *DefaultSpaceManager) UpdateSpaceUsers(configDir, ldapBindPassword string) error {\n\tvar config *ldap.Config\n\tvar uaacUsers map[string]string\n\tvar err error\n\n\tconfig, err = m.LdapMgr.GetConfig(configDir, ldapBindPassword)\n\tif err != nil {\n\t\tlo.G.Error(err)\n\t\treturn err\n\t}\n\n\tuaacUsers, err = m.UAACMgr.ListUsers()\n\n\tif err != nil {\n\t\tlo.G.Error(err)\n\t\treturn err\n\t}\n\n\tvar spaceConfigs []*InputUpdateSpaces\n\n\tif spaceConfigs, err = m.GetSpaceConfigs(configDir); err != nil {\n\t\tlo.G.Error(err)\n\t\treturn err\n\t}\n\n\tfor _, input := range spaceConfigs {\n\t\tif err = m.updateSpaceUsers(config, input, uaacUsers); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (m *DefaultSpaceManager) updateSpaceUsers(config *ldap.Config, input *InputUpdateSpaces, uaacUsers map[string]string) error {\n\tif space, err := m.FindSpace(input.Org, input.Space); err == nil {\n\t\tif err = m.UserMgr.UpdateSpaceUsers(config, uaacUsers, UpdateUsersInput{\n\t\t\tSpaceName: space.Entity.Name,\n\t\t\tSpaceGUID: space.MetaData.GUID,\n\t\t\tOrgName: input.Org,\n\t\t\tOrgGUID: space.Entity.OrgGUID,\n\t\t\tRole: \"developers\",\n\t\t\tLdapGroupName: input.GetDeveloperGroup(),\n\t\t\tLdapUsers: input.Developer.LdapUsers,\n\t\t\tUsers: input.Developer.Users,\n\t\t\tRemoveUsers: input.RemoveUsers,\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = m.UserMgr.UpdateSpaceUsers(config, uaacUsers,\n\t\t\tUpdateUsersInput{\n\t\t\t\tSpaceName: space.Entity.Name,\n\t\t\t\tSpaceGUID: space.MetaData.GUID,\n\t\t\t\tOrgGUID: space.Entity.OrgGUID,\n\t\t\t\tOrgName: input.Org,\n\t\t\t\tRole: \"managers\",\n\t\t\t\tLdapGroupName: input.GetManagerGroup(),\n\t\t\t\tLdapUsers: input.Manager.LdapUsers,\n\t\t\t\tUsers: input.Manager.Users,\n\t\t\t\tRemoveUsers: input.RemoveUsers,\n\t\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = m.UserMgr.UpdateSpaceUsers(config, uaacUsers,\n\t\t\tUpdateUsersInput{\n\t\t\t\tSpaceName: space.Entity.Name,\n\t\t\t\tSpaceGUID: space.MetaData.GUID,\n\t\t\t\tOrgGUID: space.Entity.OrgGUID,\n\t\t\t\tOrgName: input.Org,\n\t\t\t\tRole: \"auditors\",\n\t\t\t\tLdapGroupName: input.GetAuditorGroup(),\n\t\t\t\tLdapUsers: input.Auditor.LdapUsers,\n\t\t\t\tUsers: input.Auditor.Users,\n\t\t\t\tRemoveUsers: input.RemoveUsers,\n\t\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t} else {\n\t\treturn err\n\t}\n}\n\n\/\/FindSpace -\nfunc (m *DefaultSpaceManager) FindSpace(orgName, spaceName string) (*cloudcontroller.Space, error) {\n\tif orgGUID, err := m.OrgMgr.GetOrgGUID(orgName); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tif spaces, err := m.CloudController.ListSpaces(orgGUID); err == nil {\n\t\t\tfor _, theSpace := range spaces {\n\t\t\t\tif theSpace.Entity.Name == spaceName {\n\t\t\t\t\treturn theSpace, nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"Space [%s] not found in org [%s]\", spaceName, orgName)\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t}\n}\n\nfunc (m *DefaultSpaceManager) GetSpaceConfigList(configDir string) ([]InputCreateSpaces, error) {\n\n\tif files, err := m.UtilsMgr.FindFiles(configDir, \"spaces.yml\"); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tspaceList := []InputCreateSpaces{}\n\t\tfor _, f := range files {\n\t\t\tlo.G.Info(\"Processing space file\", f)\n\t\t\tinput := InputCreateSpaces{}\n\t\t\tif err := m.UtilsMgr.LoadFile(f, &input); err == nil {\n\t\t\t\tspaceList = append(spaceList, input)\n\t\t\t}\n\t\t}\n\t\treturn spaceList, nil\n\t}\n}\n\n\/\/CreateSpaces -\nfunc (m *DefaultSpaceManager) CreateSpaces(configDir, ldapBindPassword string) error {\n\n\tif configSpaceList, err := m.GetSpaceConfigList(configDir); err != nil {\n\t\treturn err\n\t} else {\n\t\tfor _, input := range configSpaceList {\n\t\t\tif len(input.Spaces) >= 0 {\n\t\t\t\tvar orgGUID string\n\t\t\t\tif orgGUID, err = m.OrgMgr.GetOrgGUID(input.Org); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tvar spaces []*cloudcontroller.Space\n\t\t\t\tif spaces, err = m.CloudController.ListSpaces(orgGUID); err == nil {\n\t\t\t\t\tfor _, spaceName := range input.Spaces {\n\t\t\t\t\t\tif m.doesSpaceExist(spaces, spaceName) {\n\t\t\t\t\t\t\tlo.G.Info(fmt.Sprintf(\"[%s] space already exists\", spaceName))\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlo.G.Info(fmt.Sprintf(\"Creating [%s] space in [%s] org\", spaceName, input.Org))\n\t\t\t\t\t\t\tif err = m.CloudController.CreateSpace(spaceName, orgGUID); err == nil {\n\t\t\t\t\t\t\t\tif err = m.UpdateSpaceWithDefaults(configDir, spaceName, input.Org, ldapBindPassword); err != nil {\n\t\t\t\t\t\t\t\t\tlo.G.Error(err)\n\t\t\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tlo.G.Error(err)\n\t\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc (m *DefaultSpaceManager) UpdateSpaceWithDefaults(configDir, spaceName, orgName, ldapBindPassword string) error {\n\tdefaultSpaceConfigFile := configDir + \"\/spaceDefaults.yml\"\n\tif m.UtilsMgr.FileOrDirectoryExists(defaultSpaceConfigFile) {\n\t\tvar config *ldap.Config\n\t\tvar uaacUsers map[string]string\n\t\tvar err error\n\t\tif ldapBindPassword == \"\" {\n\t\t\tconfig = &ldap.Config{\n\t\t\t\tEnabled: false,\n\t\t\t}\n\t\t} else {\n\t\t\tif config, err = m.LdapMgr.GetConfig(configDir, ldapBindPassword); err != nil {\n\t\t\t\tlo.G.Error(err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif uaacUsers, err = m.UAACMgr.ListUsers(); err != nil {\n\t\t\tlo.G.Error(err)\n\t\t\treturn err\n\t\t}\n\n\t\tvar defaultSpaceConfig *InputUpdateSpaces\n\n\t\tif err = m.UtilsMgr.LoadFile(defaultSpaceConfigFile, &defaultSpaceConfig); err == nil {\n\t\t\tdefaultSpaceConfig.Org = orgName\n\t\t\tdefaultSpaceConfig.Space = spaceName\n\t\t\tif err = m.updateSpaceUsers(config, defaultSpaceConfig, uaacUsers); err != nil {\n\t\t\t\treturn err\n\t\t\t} else {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t} else {\n\t\t\tlo.G.Error(err)\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tlo.G.Info(defaultSpaceConfigFile, \"doesn't exist\")\n\t\treturn nil\n\t}\n}\n\nfunc (m *DefaultSpaceManager) doesSpaceExist(spaces []*cloudcontroller.Space, spaceName string) (result bool) {\n\tresult = false\n\tfor _, space := range spaces {\n\t\tif space.Entity.Name == spaceName {\n\t\t\tresult = true\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n\n}\n<commit_msg>space cleanup<commit_after>package space\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/pivotalservices\/cf-mgmt\/cloudcontroller\"\n\t\"github.com\/pivotalservices\/cf-mgmt\/ldap\"\n\t\"github.com\/pivotalservices\/cf-mgmt\/organization\"\n\t\"github.com\/pivotalservices\/cf-mgmt\/uaac\"\n\t\"github.com\/pivotalservices\/cf-mgmt\/utils\"\n\t\"github.com\/xchapter7x\/lo\"\n)\n\n\/\/NewManager -\nfunc NewManager(sysDomain, token, uaacToken string) (mgr Manager) {\n\tcloudController := cloudcontroller.NewManager(fmt.Sprintf(\"https:\/\/api.%s\", sysDomain), token)\n\tldapMgr := ldap.NewManager()\n\tuaacMgr := uaac.NewManager(sysDomain, uaacToken)\n\treturn &DefaultSpaceManager{\n\t\tUAACMgr: uaacMgr,\n\t\tCloudController: cloudController,\n\t\tOrgMgr: organization.NewManager(sysDomain, token, uaacToken),\n\t\tLdapMgr: ldapMgr,\n\t\tUtilsMgr: utils.NewDefaultManager(),\n\t\tUserMgr: NewUserManager(cloudController, ldapMgr, uaacMgr),\n\t}\n}\n\nfunc (m *DefaultSpaceManager) GetSpaceConfigs(configDir string) ([]*InputUpdateSpaces, error) {\n\n\tspaceDefaults := &InputUpdateSpaces{}\n\tm.UtilsMgr.LoadFile(path.Join(configDir, \"spaceDefaults.yml\"), spaceDefaults)\n\tif files, err := utils.NewDefaultManager().FindFiles(configDir, \"spaceConfig.yml\"); err == nil {\n\t\tspaceConfigs := []*InputUpdateSpaces{}\n\t\tfor _, f := range files {\n\t\t\tlo.G.Info(\"Processing space file\", f)\n\t\t\tinput := &InputUpdateSpaces{}\n\t\t\tif err = m.UtilsMgr.LoadFile(f, input); err == nil {\n\t\t\t\tinput.Developer.LdapUsers = append(input.Developer.LdapUsers, spaceDefaults.Developer.LdapUsers...)\n\t\t\t\tinput.Developer.Users = append(input.Developer.Users, spaceDefaults.Developer.Users...)\n\t\t\t\tinput.Auditor.LdapUsers = append(input.Auditor.LdapUsers, spaceDefaults.Auditor.LdapUsers...)\n\t\t\t\tinput.Auditor.Users = append(input.Auditor.Users, spaceDefaults.Auditor.Users...)\n\t\t\t\tinput.Manager.LdapUsers = append(input.Manager.LdapUsers, spaceDefaults.Manager.LdapUsers...)\n\t\t\t\tinput.Manager.Users = append(input.Manager.Users, spaceDefaults.Manager.Users...)\n\n\t\t\t\tspaceConfigs = append(spaceConfigs, input)\n\t\t\t\tif input.EnableSecurityGroup {\n\t\t\t\t\tsecurityGroupFile := strings.Replace(f, \"spaceConfig.yml\", \"security-group.json\", -1)\n\t\t\t\t\tlo.G.Debug(\"Loading security group contents\", securityGroupFile)\n\t\t\t\t\tvar bytes []byte\n\t\t\t\t\tif bytes, err = ioutil.ReadFile(securityGroupFile); err == nil {\n\t\t\t\t\t\tlo.G.Debug(\"setting security group contents\", string(bytes))\n\t\t\t\t\t\tinput.SecurityGroupContents = string(bytes)\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\treturn spaceConfigs, nil\n\t} else {\n\t\treturn nil, err\n\t}\n}\n\n\/\/CreateApplicationSecurityGroups -\nfunc (m *DefaultSpaceManager) CreateApplicationSecurityGroups(configDir string) error {\n\tvar targetSGGUID string\n\tvar sgs map[string]string\n\tvar space *cloudcontroller.Space\n\n\tif spaceConfigs, err := m.GetSpaceConfigs(configDir); err != nil {\n\t\treturn err\n\t} else {\n\t\tfor _, input := range spaceConfigs {\n\t\t\tif input.EnableSecurityGroup {\n\t\t\t\tif space, err = m.FindSpace(input.Org, input.Space); err == nil {\n\t\t\t\t\tsgName := fmt.Sprintf(\"%s-%s\", input.Org, input.Space)\n\t\t\t\t\tif sgs, err = m.CloudController.ListSecurityGroups(); err == nil {\n\t\t\t\t\t\tif sgGUID, ok := sgs[sgName]; ok {\n\t\t\t\t\t\t\tlo.G.Info(\"Updating security group\", sgName)\n\t\t\t\t\t\t\tif err = m.CloudController.UpdateSecurityGroup(sgGUID, sgName, input.SecurityGroupContents); err == nil {\n\t\t\t\t\t\t\t\tlo.G.Info(\"Binding security group\", sgName, \"to space\", space.Entity.Name)\n\t\t\t\t\t\t\t\tm.CloudController.AssignSecurityGroupToSpace(space.MetaData.GUID, sgGUID)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlo.G.Info(\"Creating security group\", sgName)\n\t\t\t\t\t\t\tif targetSGGUID, err = m.CloudController.CreateSecurityGroup(sgName, input.SecurityGroupContents); err == nil {\n\t\t\t\t\t\t\t\tlo.G.Info(\"Binding security group\", sgName, \"to space\", space.Entity.Name)\n\t\t\t\t\t\t\t\tm.CloudController.AssignSecurityGroupToSpace(space.MetaData.GUID, targetSGGUID)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/CreateQuotas -\nfunc (m *DefaultSpaceManager) CreateQuotas(configDir string) error {\n\tvar quotas map[string]string\n\tvar space *cloudcontroller.Space\n\tvar targetQuotaGUID string\n\n\tif spaceConfigs, err := m.GetSpaceConfigs(configDir); err != nil {\n\t\treturn err\n\t} else {\n\t\tfor _, input := range spaceConfigs {\n\t\t\tif input.EnableSpaceQuota {\n\t\t\t\tif space, err = m.FindSpace(input.Org, input.Space); err == nil {\n\t\t\t\t\tquotaName := space.Entity.Name\n\t\t\t\t\tif quotas, err = m.CloudController.ListAllSpaceQuotasForOrg(space.Entity.OrgGUID); err == nil {\n\t\t\t\t\t\tif quotaGUID, ok := quotas[quotaName]; ok {\n\t\t\t\t\t\t\tlo.G.Info(\"Updating quota\", quotaName)\n\t\t\t\t\t\t\tif err = m.CloudController.UpdateSpaceQuota(space.Entity.OrgGUID, quotaGUID,\n\t\t\t\t\t\t\t\tquotaName, input.MemoryLimit, input.InstanceMemoryLimit, input.TotalRoutes, input.TotalServices, input.PaidServicePlansAllowed); err == nil {\n\t\t\t\t\t\t\t\tlo.G.Info(\"Assigning\", quotaName, \"to\", space.Entity.Name)\n\t\t\t\t\t\t\t\tm.CloudController.AssignQuotaToSpace(space.MetaData.GUID, quotaGUID)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlo.G.Info(\"Creating quota\", quotaName)\n\t\t\t\t\t\t\tif targetQuotaGUID, err = m.CloudController.CreateSpaceQuota(space.Entity.OrgGUID,\n\t\t\t\t\t\t\t\tquotaName, input.MemoryLimit, input.InstanceMemoryLimit, input.TotalRoutes, input.TotalServices, input.PaidServicePlansAllowed); err == nil {\n\t\t\t\t\t\t\t\tlo.G.Info(\"Assigning\", quotaName, \"to\", space.Entity.Name)\n\t\t\t\t\t\t\t\tm.CloudController.AssignQuotaToSpace(space.MetaData.GUID, targetQuotaGUID)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/UpdateSpaces -\nfunc (m *DefaultSpaceManager) UpdateSpaces(configDir string) error {\n\tvar space *cloudcontroller.Space\n\n\tif spaceConfigs, err := m.GetSpaceConfigs(configDir); err != nil {\n\t\treturn err\n\t} else {\n\t\tfor _, input := range spaceConfigs {\n\t\t\tif space, err = m.FindSpace(input.Org, input.Space); err == nil {\n\t\t\t\tlo.G.Info(\"Processing space\", space.Entity.Name)\n\t\t\t\tif input.AllowSSH != space.Entity.AllowSSH {\n\t\t\t\t\tif err = m.CloudController.UpdateSpaceSSH(input.AllowSSH, space.MetaData.GUID); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/UpdateSpaceUsers -\nfunc (m *DefaultSpaceManager) UpdateSpaceUsers(configDir, ldapBindPassword string) error {\n\tvar config *ldap.Config\n\tvar uaacUsers map[string]string\n\tvar err error\n\n\tconfig, err = m.LdapMgr.GetConfig(configDir, ldapBindPassword)\n\tif err != nil {\n\t\tlo.G.Error(err)\n\t\treturn err\n\t}\n\n\tuaacUsers, err = m.UAACMgr.ListUsers()\n\n\tif err != nil {\n\t\tlo.G.Error(err)\n\t\treturn err\n\t}\n\n\tvar spaceConfigs []*InputUpdateSpaces\n\n\tif spaceConfigs, err = m.GetSpaceConfigs(configDir); err != nil {\n\t\tlo.G.Error(err)\n\t\treturn err\n\t}\n\n\tfor _, input := range spaceConfigs {\n\t\tif err = m.updateSpaceUsers(config, input, uaacUsers); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (m *DefaultSpaceManager) updateSpaceUsers(config *ldap.Config, input *InputUpdateSpaces, uaacUsers map[string]string) error {\n\tif space, err := m.FindSpace(input.Org, input.Space); err == nil {\n\t\tif err = m.UserMgr.UpdateSpaceUsers(config, uaacUsers, UpdateUsersInput{\n\t\t\tSpaceName: space.Entity.Name,\n\t\t\tSpaceGUID: space.MetaData.GUID,\n\t\t\tOrgName: input.Org,\n\t\t\tOrgGUID: space.Entity.OrgGUID,\n\t\t\tRole: \"developers\",\n\t\t\tLdapGroupName: input.GetDeveloperGroup(),\n\t\t\tLdapUsers: input.Developer.LdapUsers,\n\t\t\tUsers: input.Developer.Users,\n\t\t\tRemoveUsers: input.RemoveUsers,\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = m.UserMgr.UpdateSpaceUsers(config, uaacUsers,\n\t\t\tUpdateUsersInput{\n\t\t\t\tSpaceName: space.Entity.Name,\n\t\t\t\tSpaceGUID: space.MetaData.GUID,\n\t\t\t\tOrgGUID: space.Entity.OrgGUID,\n\t\t\t\tOrgName: input.Org,\n\t\t\t\tRole: \"managers\",\n\t\t\t\tLdapGroupName: input.GetManagerGroup(),\n\t\t\t\tLdapUsers: input.Manager.LdapUsers,\n\t\t\t\tUsers: input.Manager.Users,\n\t\t\t\tRemoveUsers: input.RemoveUsers,\n\t\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = m.UserMgr.UpdateSpaceUsers(config, uaacUsers,\n\t\t\tUpdateUsersInput{\n\t\t\t\tSpaceName: space.Entity.Name,\n\t\t\t\tSpaceGUID: space.MetaData.GUID,\n\t\t\t\tOrgGUID: space.Entity.OrgGUID,\n\t\t\t\tOrgName: input.Org,\n\t\t\t\tRole: \"auditors\",\n\t\t\t\tLdapGroupName: input.GetAuditorGroup(),\n\t\t\t\tLdapUsers: input.Auditor.LdapUsers,\n\t\t\t\tUsers: input.Auditor.Users,\n\t\t\t\tRemoveUsers: input.RemoveUsers,\n\t\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t} else {\n\t\treturn err\n\t}\n}\n\n\/\/FindSpace -\nfunc (m *DefaultSpaceManager) FindSpace(orgName, spaceName string) (*cloudcontroller.Space, error) {\n\tif orgGUID, err := m.OrgMgr.GetOrgGUID(orgName); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tif spaces, err := m.CloudController.ListSpaces(orgGUID); err == nil {\n\t\t\tfor _, theSpace := range spaces {\n\t\t\t\tif theSpace.Entity.Name == spaceName {\n\t\t\t\t\treturn theSpace, nil\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"Space [%s] not found in org [%s]\", spaceName, orgName)\n\t\t} else {\n\t\t\treturn nil, err\n\t\t}\n\t}\n}\n\nfunc (m *DefaultSpaceManager) GetSpaceConfigList(configDir string) ([]InputCreateSpaces, error) {\n\n\tif files, err := m.UtilsMgr.FindFiles(configDir, \"spaces.yml\"); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tspaceList := []InputCreateSpaces{}\n\t\tfor _, f := range files {\n\t\t\tlo.G.Info(\"Processing space file\", f)\n\t\t\tinput := InputCreateSpaces{}\n\t\t\tif err := m.UtilsMgr.LoadFile(f, &input); err == nil {\n\t\t\t\tspaceList = append(spaceList, input)\n\t\t\t}\n\t\t}\n\t\treturn spaceList, nil\n\t}\n}\n\n\/\/CreateSpaces -\nfunc (m *DefaultSpaceManager) CreateSpaces(configDir, ldapBindPassword string) error {\n\n\tif configSpaceList, err := m.GetSpaceConfigList(configDir); err != nil {\n\t\treturn err\n\t} else {\n\t\tfor _, input := range configSpaceList {\n\t\t\tif len(input.Spaces) >= 0 {\n\t\t\t\tvar orgGUID string\n\t\t\t\tif orgGUID, err = m.OrgMgr.GetOrgGUID(input.Org); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tvar spaces []*cloudcontroller.Space\n\t\t\t\tif spaces, err = m.CloudController.ListSpaces(orgGUID); err == nil {\n\t\t\t\t\tfor _, spaceName := range input.Spaces {\n\t\t\t\t\t\tif m.doesSpaceExist(spaces, spaceName) {\n\t\t\t\t\t\t\tlo.G.Info(fmt.Sprintf(\"[%s] space already exists\", spaceName))\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlo.G.Info(fmt.Sprintf(\"Creating [%s] space in [%s] org\", spaceName, input.Org))\n\t\t\t\t\t\t\tif err = m.CloudController.CreateSpace(spaceName, orgGUID); err == nil {\n\t\t\t\t\t\t\t\tif err = m.UpdateSpaceWithDefaults(configDir, spaceName, input.Org, ldapBindPassword); err != nil {\n\t\t\t\t\t\t\t\t\tlo.G.Error(err)\n\t\t\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tlo.G.Error(err)\n\t\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc (m *DefaultSpaceManager) UpdateSpaceWithDefaults(configDir, spaceName, orgName, ldapBindPassword string) error {\n\tdefaultSpaceConfigFile := filepath.Join(configDir, \"spaceDefaults.yml\")\n\tif m.UtilsMgr.FileOrDirectoryExists(defaultSpaceConfigFile) {\n\t\tvar config *ldap.Config\n\t\tvar err error\n\t\tif ldapBindPassword == \"\" {\n\t\t\tconfig = &ldap.Config{\n\t\t\t\tEnabled: false,\n\t\t\t}\n\t\t} else {\n\t\t\tif config, err = m.LdapMgr.GetConfig(configDir, ldapBindPassword); err != nil {\n\t\t\t\tlo.G.Error(err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tvar uaacUsers map[string]string\n\t\tif uaacUsers, err = m.UAACMgr.ListUsers(); err != nil {\n\t\t\tlo.G.Error(err)\n\t\t\treturn err\n\t\t}\n\n\t\tvar defaultSpaceConfig *InputUpdateSpaces\n\t\tif err = m.UtilsMgr.LoadFile(defaultSpaceConfigFile, &defaultSpaceConfig); err != nil {\n\t\t\tlo.G.Info(defaultSpaceConfigFile, \"doesn't exist\")\n\t\t\treturn nil\n\t\t}\n\t\tdefaultSpaceConfig.Org = orgName\n\t\tdefaultSpaceConfig.Space = spaceName\n\t\tif err = m.updateSpaceUsers(config, defaultSpaceConfig, uaacUsers); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (m *DefaultSpaceManager) doesSpaceExist(spaces []*cloudcontroller.Space, spaceName string) bool {\n\tfor _, space := range spaces {\n\t\tif space.Entity.Name == spaceName {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Pure Go SMART library\n * Copyright 2017 Daniel Swarbrick\n *\/\n\npackage smart\n\nimport (\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unsafe\"\n\n\t\"github.com\/BurntSushi\/toml\"\n)\n\n\/\/ SMART attribute conversion rule\ntype attrConv struct {\n\tConv string\n\tName string\n}\n\ntype driveModel struct {\n\tFamily string\n\tModelRegex string\n\tFirmwareRegex string\n\tWarningMsg string\n\tPresets map[string]attrConv\n\tCompiledRegexp *regexp.Regexp\n}\n\ntype driveDb struct {\n\tDrives []driveModel\n}\n\n\/\/ Individual SMART attribute (12 bytes)\ntype smartAttr struct {\n\tId uint8\n\tFlags uint16\n\tValue uint8 \/\/ normalised value\n\tWorst uint8 \/\/ worst value\n\tVendorBytes [6]byte \/\/ vendor-specific (and sometimes device-specific) data\n\tReserved uint8\n}\n\n\/\/ Page of 30 SMART attributes as per ATA spec\ntype smartPage struct {\n\tVersion uint16\n\tAttrs [30]smartAttr\n}\n\nvar nativeEndian binary.ByteOrder\n\n\/\/ Determine native endianness of system\nfunc init() {\n\ti := uint32(1)\n\tb := (*[4]byte)(unsafe.Pointer(&i))\n\tif b[0] == 1 {\n\t\tnativeEndian = binary.LittleEndian\n\t} else {\n\t\tnativeEndian = binary.BigEndian\n\t}\n}\n\n\/\/ lookupDrive returns the most appropriate driveModel for a given ATA IDENTIFY value\nfunc (db *driveDb) lookupDrive(ident []byte) driveModel {\n\tvar model, defaultModel driveModel\n\n\tfor _, d := range db.Drives {\n\t\t\/\/ Skip placeholder entry\n\t\tif strings.HasPrefix(d.Family, \"$Id\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tif d.Family == \"DEFAULT\" {\n\t\t\tdefaultModel = d\n\t\t\tcontinue\n\t\t}\n\n\t\tif d.CompiledRegexp.Match(ident) {\n\t\t\tmodel = d\n\n\t\t\t\/\/ Inherit presets from defaultModel\n\t\t\tfor id, p := range defaultModel.Presets {\n\t\t\t\tif _, exists := model.Presets[id]; exists {\n\t\t\t\t\t\/\/ Some drives override the conv but don't specify a name, so copy it from default\n\t\t\t\t\tif model.Presets[id].Name == \"\" {\n\t\t\t\t\t\tmodel.Presets[id] = attrConv{Name: p.Name, Conv: model.Presets[id].Conv}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tmodel.Presets[id] = p\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn model\n}\n\n\/\/ openDriveDb opens a .toml formatted drive database, unmarshalls it, and returns a driveDb\nfunc openDriveDb(dbfile string) (driveDb, error) {\n\tvar db driveDb\n\n\tif _, err := toml.DecodeFile(dbfile, &db); err != nil {\n\t\treturn db, fmt.Errorf(\"Cannot open \/ parse drive DB: %s\", err)\n\t}\n\n\tfor i, d := range db.Drives {\n\t\tdb.Drives[i].CompiledRegexp, _ = regexp.Compile(d.ModelRegex)\n\t}\n\n\treturn db, nil\n}\n\n\/\/ decodeVendorBytes decodes the six-byte vendor byte array based on the conversion rule passed as\n\/\/ conv. The conversion may also include the reserved byte, normalised value or worst value byte.\nfunc (sa *smartAttr) decodeVendorBytes(conv string) (r uint64) {\n\tvb := sa.VendorBytes\n\n\t\/\/ TODO: Complete other attr conversion, honour specified byte order\n\tswitch conv {\n\tcase \"raw16(raw16)\", \"raw16(avg16)\":\n\t\tr = uint64(vb[0]) | uint64(vb[1])<<8\n\tcase \"raw24(raw8)\", \"raw24\/raw24\", \"raw24\/raw32\":\n\t\tr = uint64(vb[0]) | uint64(vb[1])<<8 | uint64(vb[2])<<16\n\tcase \"raw48\":\n\t\tr = uint64(vb[0]) | uint64(vb[1])<<8 | uint64(vb[2])<<16 |\n\t\t\tuint64(vb[3])<<24 | uint64(vb[4])<<32 | uint64(vb[5])<<40\n\tcase \"raw56\":\n\t\tr = uint64(vb[0]) | uint64(vb[1])<<8 | uint64(vb[2])<<16 | uint64(vb[3])<<24 |\n\t\t\tuint64(vb[4])<<32 | uint64(vb[5])<<40 | uint64(sa.Reserved)<<48\n\tcase \"tempminmax\":\n\t\t\/\/ This is device specific!\n\t\tr = uint64(vb[0])\n\t}\n\n\treturn r\n}\n\nfunc printSMART(smart smartPage, drive driveModel) {\n\tfmt.Printf(\"\\nSMART structure version: %d\\n\", smart.Version)\n\tfmt.Printf(\"ID# ATTRIBUTE_NAME FLAG VALUE WORST RESERVED RAW_VALUE VENDOR_BYTES\\n\")\n\n\tfor _, attr := range smart.Attrs {\n\t\tvar (\n\t\t\trawValue uint64\n\t\t\tconv attrConv\n\t\t)\n\n\t\tif attr.Id == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tconv, ok := drive.Presets[strconv.Itoa(int(attr.Id))]\n\t\tif ok {\n\t\t\trawValue = attr.decodeVendorBytes(conv.Conv)\n\t\t}\n\n\t\tfmt.Printf(\"%3d %-24s %#04x %03d %03d %03d %-12d %v (%s)\\n\",\n\t\t\tattr.Id, conv.Name, attr.Flags, attr.Value, attr.Worst, attr.Reserved,\n\t\t\trawValue, attr.VendorBytes, conv.Conv)\n\t}\n}\n<commit_msg>Decode pre-fail \/ advisory bit and online data collection bit from attribute flags<commit_after>\/*\n * Pure Go SMART library\n * Copyright 2017 Daniel Swarbrick\n *\/\n\npackage smart\n\nimport (\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unsafe\"\n\n\t\"github.com\/BurntSushi\/toml\"\n)\n\n\/\/ SMART attribute conversion rule\ntype attrConv struct {\n\tConv string\n\tName string\n}\n\ntype driveModel struct {\n\tFamily string\n\tModelRegex string\n\tFirmwareRegex string\n\tWarningMsg string\n\tPresets map[string]attrConv\n\tCompiledRegexp *regexp.Regexp\n}\n\ntype driveDb struct {\n\tDrives []driveModel\n}\n\n\/\/ Individual SMART attribute (12 bytes)\ntype smartAttr struct {\n\tId uint8\n\tFlags uint16\n\tValue uint8 \/\/ normalised value\n\tWorst uint8 \/\/ worst value\n\tVendorBytes [6]byte \/\/ vendor-specific (and sometimes device-specific) data\n\tReserved uint8\n}\n\n\/\/ Page of 30 SMART attributes as per ATA spec\ntype smartPage struct {\n\tVersion uint16\n\tAttrs [30]smartAttr\n}\n\nvar nativeEndian binary.ByteOrder\n\n\/\/ Determine native endianness of system\nfunc init() {\n\ti := uint32(1)\n\tb := (*[4]byte)(unsafe.Pointer(&i))\n\tif b[0] == 1 {\n\t\tnativeEndian = binary.LittleEndian\n\t} else {\n\t\tnativeEndian = binary.BigEndian\n\t}\n}\n\n\/\/ lookupDrive returns the most appropriate driveModel for a given ATA IDENTIFY value\nfunc (db *driveDb) lookupDrive(ident []byte) driveModel {\n\tvar model, defaultModel driveModel\n\n\tfor _, d := range db.Drives {\n\t\t\/\/ Skip placeholder entry\n\t\tif strings.HasPrefix(d.Family, \"$Id\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tif d.Family == \"DEFAULT\" {\n\t\t\tdefaultModel = d\n\t\t\tcontinue\n\t\t}\n\n\t\tif d.CompiledRegexp.Match(ident) {\n\t\t\tmodel = d\n\n\t\t\t\/\/ Inherit presets from defaultModel\n\t\t\tfor id, p := range defaultModel.Presets {\n\t\t\t\tif _, exists := model.Presets[id]; exists {\n\t\t\t\t\t\/\/ Some drives override the conv but don't specify a name, so copy it from default\n\t\t\t\t\tif model.Presets[id].Name == \"\" {\n\t\t\t\t\t\tmodel.Presets[id] = attrConv{Name: p.Name, Conv: model.Presets[id].Conv}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tmodel.Presets[id] = p\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn model\n}\n\n\/\/ openDriveDb opens a .toml formatted drive database, unmarshalls it, and returns a driveDb\nfunc openDriveDb(dbfile string) (driveDb, error) {\n\tvar db driveDb\n\n\tif _, err := toml.DecodeFile(dbfile, &db); err != nil {\n\t\treturn db, fmt.Errorf(\"Cannot open \/ parse drive DB: %s\", err)\n\t}\n\n\tfor i, d := range db.Drives {\n\t\tdb.Drives[i].CompiledRegexp, _ = regexp.Compile(d.ModelRegex)\n\t}\n\n\treturn db, nil\n}\n\n\/\/ decodeVendorBytes decodes the six-byte vendor byte array based on the conversion rule passed as\n\/\/ conv. The conversion may also include the reserved byte, normalised value or worst value byte.\nfunc (sa *smartAttr) decodeVendorBytes(conv string) (r uint64) {\n\tvb := sa.VendorBytes\n\n\t\/\/ TODO: Complete other attr conversion, honour specified byte order\n\tswitch conv {\n\tcase \"raw16(raw16)\", \"raw16(avg16)\":\n\t\tr = uint64(vb[0]) | uint64(vb[1])<<8\n\tcase \"raw24(raw8)\", \"raw24\/raw24\", \"raw24\/raw32\":\n\t\tr = uint64(vb[0]) | uint64(vb[1])<<8 | uint64(vb[2])<<16\n\tcase \"raw48\":\n\t\tr = uint64(vb[0]) | uint64(vb[1])<<8 | uint64(vb[2])<<16 |\n\t\t\tuint64(vb[3])<<24 | uint64(vb[4])<<32 | uint64(vb[5])<<40\n\tcase \"raw56\":\n\t\tr = uint64(vb[0]) | uint64(vb[1])<<8 | uint64(vb[2])<<16 | uint64(vb[3])<<24 |\n\t\t\tuint64(vb[4])<<32 | uint64(vb[5])<<40 | uint64(sa.Reserved)<<48\n\tcase \"tempminmax\":\n\t\t\/\/ This is device specific!\n\t\tr = uint64(vb[0])\n\t}\n\n\treturn r\n}\n\nfunc printSMART(smart smartPage, drive driveModel) {\n\tfmt.Printf(\"\\nSMART structure version: %d\\n\", smart.Version)\n\tfmt.Printf(\"ID# ATTRIBUTE_NAME FLAG VALUE WORST RESERVED TYPE UPDATED RAW_VALUE VENDOR_BYTES\\n\")\n\n\tfor _, attr := range smart.Attrs {\n\t\tvar (\n\t\t\trawValue uint64\n\t\t\tconv attrConv\n\t\t\tattrType, attrUpdated string\n\t\t)\n\n\t\tif attr.Id == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tconv, ok := drive.Presets[strconv.Itoa(int(attr.Id))]\n\t\tif ok {\n\t\t\trawValue = attr.decodeVendorBytes(conv.Conv)\n\t\t}\n\n\t\t\/\/ Pre-fail \/ advisory bit\n\t\tif attr.Flags&0x0001 > 0 {\n\t\t\tattrType = \"Pre-fail\"\n\t\t} else {\n\t\t\tattrType = \"Old_age\"\n\t\t}\n\n\t\t\/\/ Online data collection bit\n\t\tif attr.Flags&0x0002 > 0 {\n\t\t\tattrUpdated = \"Always\"\n\t\t} else {\n\t\t\tattrUpdated = \"Offline\"\n\t\t}\n\n\t\tfmt.Printf(\"%3d %-24s %#04x %03d %03d %03d %-8s %-7s %-12d %v (%s)\\n\",\n\t\t\tattr.Id, conv.Name, attr.Flags, attr.Value, attr.Worst, attr.Reserved, attrType,\n\t\t\tattrUpdated, rawValue, attr.VendorBytes, conv.Conv)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package serial\n\n\/*\n\n#include <fcntl.h>\n#include <errno.h>\n#include <stdio.h>\n#include <sys\/ioctl.h>\n#include <sys\/select.h>\n#include <termios.h>\n#include <time.h>\n#include <unistd.h>\n\n#include <linux\/serial.h>\n\n\/\/ Define (eventually) missing constants\n#ifndef IUCLC\n\tstatic const tcflag_t IUCLC = 0;\n#endif\n\n\/\/ ioctl call is not available through syscall package\n\/\/int ioctl_wrapper(int d, unsigned long request) {\n\/\/\treturn ioctl(d, request);\n\/\/}\n\n\/\/int fcntl_wrapper(int fd, int cmd, int arg) {\n\/\/\treturn fcntl(fd, cmd, arg);\n\/\/}\n\n\/\/ Gain exclusive access to serial port\nvoid setTIOCEXCL(int handle) {\n#if defined TIOCEXCL\n\tioctl(handle, TIOCEXCL);\n#endif\n}\n\n\/\/ Release exclusive access to serial port\nvoid setTIOCNXCL(int handle) {\n#if defined TIOCNXCL\n\tioctl(handle, TIOCNXCL);\n#endif\n}\n\n\/\/int selectRead(int handle) {\n\/\/\tfd_set rfds;\n\/\/\tFD_ZERO(&rfds);\n\/\/\tFD_SET(handle, &rfds);\n\/\/ int ret = select(handle+1, &rfds, NULL, NULL, NULL);\n\/\/\tif (ret==-1)\n\/\/\t\treturn -1;\n\/\/\telse\n\/\/\t\treturn 0;\n\/\/}\n\n*\/\nimport \"C\"\nimport \"io\/ioutil\"\nimport \"regexp\"\nimport \"syscall\"\n\n\/\/ native syscall wrapper functions\n\nfunc getExclusiveAccess(handle int) error {\n\t_, err := C.setTIOCEXCL(C.int(handle))\n\treturn err\n}\n\nfunc releaseExclusiveAccess(handle int) error {\n\t_, err := C.setTIOCNXCL(C.int(handle))\n\treturn err\n}\n\nfunc getTermSettings(handle int) (*C.struct_termios, error) {\n\tsettings := new(C.struct_termios)\n\t_, err := C.tcgetattr(C.int(handle), settings)\n\treturn settings, err\n}\n\nfunc setTermSettings(handle int, settings *C.struct_termios) error {\n\t_, err := C.tcsetattr(C.int(handle), C.TCSANOW, settings)\n\treturn err\n}\n\nfunc getErrno(err error) int {\n\treturn int(err.(syscall.Errno))\n}\n\n\/\/ OS dependent values\n\nconst devFolder = \"\/dev\"\nconst regexFilter = \"(ttyS|ttyUSB|ttyACM|ttyAMA|rfcomm|ttyO)[0-9]{1,3}\"\n\n\/\/ opaque type that implements SerialPort interface for linux\ntype linuxSerialPort struct {\n\tHandle int\n}\n\nfunc GetPortsList() ([]string, error) {\n\tfiles, err := ioutil.ReadDir(devFolder)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tports := make([]string, len(files))\n\tfound := 0\n\tfor _, f := range files {\n\t\t\/\/ Skip folders\n\t\tif f.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Keep only devices with the correct name\n\t\tmatch, err := regexp.MatchString(regexFilter, f.Name())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !match {\n\t\t\tcontinue\n\t\t}\n\n\t\tportName := devFolder + \"\/\" + f.Name()\n\n\t\t\/\/ Check if serial port is real or is a placeholder serial port \"ttySxx\"\n\t\tif f.Name()[:4] == \"ttyS\" {\n\t\t\tport, err := OpenPort(portName, false)\n\t\t\tif err != nil {\n\t\t\t\tserr, ok := err.(*SerialPortError)\n\t\t\t\tif ok && serr.Code() == ERROR_INVALID_SERIAL_PORT {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tport.Close()\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Save found serial port in the resulting list\n\t\tports[found] = portName\n\t\tfound++\n\t}\n\n\tports = ports[:found]\n\treturn ports, nil\n}\n\nfunc (port *linuxSerialPort) Close() error {\n\treleaseExclusiveAccess(port.Handle)\n\treturn syscall.Close(port.Handle)\n}\n\nfunc (port *linuxSerialPort) Read(p []byte) (n int, err error) {\n\treturn syscall.Read(port.Handle, p)\n}\n\nfunc (port *linuxSerialPort) Write(p []byte) (n int, err error) {\n\treturn syscall.Write(port.Handle, p)\n}\n\nvar baudrateMap = map[int]C.speed_t{\n\t0: C.B0,\n\t50: C.B50,\n\t75: C.B75,\n\t110: C.B110,\n\t134: C.B134,\n\t150: C.B150,\n\t200: C.B200,\n\t300: C.B300,\n\t600: C.B600,\n\t1200: C.B1200,\n\t1800: C.B1800,\n\t2400: C.B2400,\n\t4800: C.B4800,\n\t9600: C.B9600,\n\t19200: C.B19200,\n\t38400: C.B38400,\n\t57600: C.B57600,\n\t115200: C.B115200,\n\t230400: C.B230400,\n\t460800: C.B460800,\n\t500000: C.B500000,\n\t576000: C.B576000,\n\t921600: C.B921600,\n\t1000000: C.B1000000,\n\t1152000: C.B1152000,\n\t1500000: C.B1500000,\n\t2000000: C.B2000000,\n\t2500000: C.B2500000,\n\t3000000: C.B3000000,\n\t3500000: C.B3500000,\n\t4000000: C.B4000000,\n}\n\nfunc (port *linuxSerialPort) SetSpeed(speed int) error {\n\tbaudrate, ok := baudrateMap[speed]\n\tif !ok {\n\t\treturn &SerialPortError{code: ERROR_INVALID_PORT_SPEED}\n\t}\n\tsettings, err := getTermSettings(port.Handle)\n\tif err != nil {\n\t\treturn err\n\t}\n\tC.cfsetispeed(settings, baudrate)\n\tC.cfsetospeed(settings, baudrate)\n\treturn setTermSettings(port.Handle, settings)\n}\n\nfunc OpenPort(portName string, exclusive bool) (SerialPort, error) {\n\thandle, err := syscall.Open(portName, syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_NDELAY, 0)\n\tif err != nil {\n\t\tswitch err {\n\t\tcase syscall.EBUSY:\n\t\t\treturn nil, &SerialPortError{code: ERROR_PORT_BUSY}\n\t\tcase syscall.EACCES:\n\t\t\treturn nil, &SerialPortError{code: ERROR_PERMISSION_DENIED}\n\t\t}\n\t\treturn nil, err\n\t}\n\n\t\/\/ Setup serial port with defaults\n\n\tsettings, err := getTermSettings(handle)\n\tif err != nil {\n\t\tsyscall.Close(handle)\n\t\treturn nil, &SerialPortError{code: ERROR_INVALID_SERIAL_PORT}\n\t}\n\n\t\/\/ Set local mode\n settings.c_cflag |= C.CREAD | C.CLOCAL\n\n\t\/\/ Set raw mode\n settings.c_lflag &= ^C.tcflag_t(C.ICANON | C.ECHO | C.ECHOE | C.ECHOK | C.ECHONL | C.ECHOCTL | C.ECHOPRT | C.ECHOKE | C.ISIG | C.IEXTEN)\n settings.c_iflag &= ^C.tcflag_t(C.IXON | C.IXOFF | C.IXANY | C.INPCK | C.IGNPAR | C.PARMRK | C.ISTRIP | C.IGNBRK | C.BRKINT | C.INLCR | C.IGNCR | C.ICRNL | C.IUCLC)\n settings.c_oflag &= ^C.tcflag_t(C.OPOST)\n\n\t\/\/ Block reads until at least one char is available (no timeout)\n\tsettings.c_cc[C.VMIN] = 1;\n\tsettings.c_cc[C.VTIME] = 0;\n\n\terr = setTermSettings(handle, settings)\n\tif err != nil {\n\t\tsyscall.Close(handle)\n\t\treturn nil, &SerialPortError{code: ERROR_INVALID_SERIAL_PORT}\n\t}\n\/*\n settings->c_cflag &= ~CRTSCTS;\n*\/\n\tsyscall.SetNonblock(handle, false)\n\n\tif exclusive {\n\t\tgetExclusiveAccess(handle)\n\t}\n\n\tserialPort := &linuxSerialPort{\n\t\tHandle: handle,\n\t}\n\treturn serialPort, nil\n}\n\n\/\/ vi:ts=2\n<commit_msg>fixed indent<commit_after>package serial\n\n\/*\n\n#include <fcntl.h>\n#include <errno.h>\n#include <stdio.h>\n#include <sys\/ioctl.h>\n#include <sys\/select.h>\n#include <termios.h>\n#include <time.h>\n#include <unistd.h>\n\n#include <linux\/serial.h>\n\n\/\/ Define (eventually) missing constants\n#ifndef IUCLC\n\tstatic const tcflag_t IUCLC = 0;\n#endif\n\n\/\/ ioctl call is not available through syscall package\n\/\/int ioctl_wrapper(int d, unsigned long request) {\n\/\/\treturn ioctl(d, request);\n\/\/}\n\n\/\/int fcntl_wrapper(int fd, int cmd, int arg) {\n\/\/\treturn fcntl(fd, cmd, arg);\n\/\/}\n\n\/\/ Gain exclusive access to serial port\nvoid setTIOCEXCL(int handle) {\n#if defined TIOCEXCL\n\tioctl(handle, TIOCEXCL);\n#endif\n}\n\n\/\/ Release exclusive access to serial port\nvoid setTIOCNXCL(int handle) {\n#if defined TIOCNXCL\n\tioctl(handle, TIOCNXCL);\n#endif\n}\n\n\/\/int selectRead(int handle) {\n\/\/\tfd_set rfds;\n\/\/\tFD_ZERO(&rfds);\n\/\/\tFD_SET(handle, &rfds);\n\/\/ int ret = select(handle+1, &rfds, NULL, NULL, NULL);\n\/\/\tif (ret==-1)\n\/\/\t\treturn -1;\n\/\/\telse\n\/\/\t\treturn 0;\n\/\/}\n\n*\/\nimport \"C\"\nimport \"io\/ioutil\"\nimport \"regexp\"\nimport \"syscall\"\n\n\/\/ native syscall wrapper functions\n\nfunc getExclusiveAccess(handle int) error {\n\t_, err := C.setTIOCEXCL(C.int(handle))\n\treturn err\n}\n\nfunc releaseExclusiveAccess(handle int) error {\n\t_, err := C.setTIOCNXCL(C.int(handle))\n\treturn err\n}\n\nfunc getTermSettings(handle int) (*C.struct_termios, error) {\n\tsettings := new(C.struct_termios)\n\t_, err := C.tcgetattr(C.int(handle), settings)\n\treturn settings, err\n}\n\nfunc setTermSettings(handle int, settings *C.struct_termios) error {\n\t_, err := C.tcsetattr(C.int(handle), C.TCSANOW, settings)\n\treturn err\n}\n\nfunc getErrno(err error) int {\n\treturn int(err.(syscall.Errno))\n}\n\n\/\/ OS dependent values\n\nconst devFolder = \"\/dev\"\nconst regexFilter = \"(ttyS|ttyUSB|ttyACM|ttyAMA|rfcomm|ttyO)[0-9]{1,3}\"\n\n\/\/ opaque type that implements SerialPort interface for linux\ntype linuxSerialPort struct {\n\tHandle int\n}\n\nfunc GetPortsList() ([]string, error) {\n\tfiles, err := ioutil.ReadDir(devFolder)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tports := make([]string, len(files))\n\tfound := 0\n\tfor _, f := range files {\n\t\t\/\/ Skip folders\n\t\tif f.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Keep only devices with the correct name\n\t\tmatch, err := regexp.MatchString(regexFilter, f.Name())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif !match {\n\t\t\tcontinue\n\t\t}\n\n\t\tportName := devFolder + \"\/\" + f.Name()\n\n\t\t\/\/ Check if serial port is real or is a placeholder serial port \"ttySxx\"\n\t\tif f.Name()[:4] == \"ttyS\" {\n\t\t\tport, err := OpenPort(portName, false)\n\t\t\tif err != nil {\n\t\t\t\tserr, ok := err.(*SerialPortError)\n\t\t\t\tif ok && serr.Code() == ERROR_INVALID_SERIAL_PORT {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tport.Close()\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Save found serial port in the resulting list\n\t\tports[found] = portName\n\t\tfound++\n\t}\n\n\tports = ports[:found]\n\treturn ports, nil\n}\n\nfunc (port *linuxSerialPort) Close() error {\n\treleaseExclusiveAccess(port.Handle)\n\treturn syscall.Close(port.Handle)\n}\n\nfunc (port *linuxSerialPort) Read(p []byte) (n int, err error) {\n\treturn syscall.Read(port.Handle, p)\n}\n\nfunc (port *linuxSerialPort) Write(p []byte) (n int, err error) {\n\treturn syscall.Write(port.Handle, p)\n}\n\nvar baudrateMap = map[int]C.speed_t{\n\t0: C.B0,\n\t50: C.B50,\n\t75: C.B75,\n\t110: C.B110,\n\t134: C.B134,\n\t150: C.B150,\n\t200: C.B200,\n\t300: C.B300,\n\t600: C.B600,\n\t1200: C.B1200,\n\t1800: C.B1800,\n\t2400: C.B2400,\n\t4800: C.B4800,\n\t9600: C.B9600,\n\t19200: C.B19200,\n\t38400: C.B38400,\n\t57600: C.B57600,\n\t115200: C.B115200,\n\t230400: C.B230400,\n\t460800: C.B460800,\n\t500000: C.B500000,\n\t576000: C.B576000,\n\t921600: C.B921600,\n\t1000000: C.B1000000,\n\t1152000: C.B1152000,\n\t1500000: C.B1500000,\n\t2000000: C.B2000000,\n\t2500000: C.B2500000,\n\t3000000: C.B3000000,\n\t3500000: C.B3500000,\n\t4000000: C.B4000000,\n}\n\nfunc (port *linuxSerialPort) SetSpeed(speed int) error {\n\tbaudrate, ok := baudrateMap[speed]\n\tif !ok {\n\t\treturn &SerialPortError{code: ERROR_INVALID_PORT_SPEED}\n\t}\n\tsettings, err := getTermSettings(port.Handle)\n\tif err != nil {\n\t\treturn err\n\t}\n\tC.cfsetispeed(settings, baudrate)\n\tC.cfsetospeed(settings, baudrate)\n\treturn setTermSettings(port.Handle, settings)\n}\n\nfunc OpenPort(portName string, exclusive bool) (SerialPort, error) {\n\thandle, err := syscall.Open(portName, syscall.O_RDWR|syscall.O_NOCTTY|syscall.O_NDELAY, 0)\n\tif err != nil {\n\t\tswitch err {\n\t\tcase syscall.EBUSY:\n\t\t\treturn nil, &SerialPortError{code: ERROR_PORT_BUSY}\n\t\tcase syscall.EACCES:\n\t\t\treturn nil, &SerialPortError{code: ERROR_PERMISSION_DENIED}\n\t\t}\n\t\treturn nil, err\n\t}\n\n\t\/\/ Setup serial port with defaults\n\n\tsettings, err := getTermSettings(handle)\n\tif err != nil {\n\t\tsyscall.Close(handle)\n\t\treturn nil, &SerialPortError{code: ERROR_INVALID_SERIAL_PORT}\n\t}\n\n\t\/\/ Set local mode\n\tsettings.c_cflag |= C.CREAD | C.CLOCAL\n\n\t\/\/ Set raw mode\n\tsettings.c_lflag &= ^C.tcflag_t(C.ICANON | C.ECHO | C.ECHOE | C.ECHOK | C.ECHONL | C.ECHOCTL | C.ECHOPRT | C.ECHOKE | C.ISIG | C.IEXTEN)\n\tsettings.c_iflag &= ^C.tcflag_t(C.IXON | C.IXOFF | C.IXANY | C.INPCK | C.IGNPAR | C.PARMRK | C.ISTRIP | C.IGNBRK | C.BRKINT | C.INLCR | C.IGNCR | C.ICRNL | C.IUCLC)\n\tsettings.c_oflag &= ^C.tcflag_t(C.OPOST)\n\n\t\/\/ Block reads until at least one char is available (no timeout)\n\tsettings.c_cc[C.VMIN] = 1\n\tsettings.c_cc[C.VTIME] = 0\n\n\terr = setTermSettings(handle, settings)\n\tif err != nil {\n\t\tsyscall.Close(handle)\n\t\treturn nil, &SerialPortError{code: ERROR_INVALID_SERIAL_PORT}\n\t}\n\t\/*\n\t settings->c_cflag &= ~CRTSCTS;\n\t*\/\n\tsyscall.SetNonblock(handle, false)\n\n\tif exclusive {\n\t\tgetExclusiveAccess(handle)\n\t}\n\n\tserialPort := &linuxSerialPort{\n\t\tHandle: handle,\n\t}\n\treturn serialPort, nil\n}\n\n\/\/ vi:ts=2\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n)\n\nvar META_VERSION = 1\nvar BASE_PORT = 10000\nvar globalWorldBaseDir string\nvar globalServerMap = NewServerMap()\nvar globalIdSequence = NewIdSequence(0)\nvar globalPortMapper = NewPortMapper()\n\ntype PortMapper struct {\n\tportSequence int\n\tfreePorts []int\n\tmutex sync.Mutex\n}\n\nfunc (pm *PortMapper) getPort() int {\n\tpm.mutex.Lock()\n\tdefer pm.mutex.Unlock()\n\tif len(pm.freePorts) == 0 {\n\t\tresult := pm.portSequence\n\t\tpm.portSequence++\n\t\treturn result\n\t}\n\tresult := pm.freePorts[len(pm.freePorts)-1]\n\tpm.freePorts = pm.freePorts[:len(pm.freePorts)-1]\n\treturn result\n}\n\nfunc (pm *PortMapper) freePort(port int) {\n\tpm.mutex.Lock()\n\tdefer pm.mutex.Unlock()\n\tpm.freePorts = append(pm.freePorts, port)\n}\n\nfunc NewPortMapper() *PortMapper {\n\treturn &PortMapper{\n\t\tfreePorts: make([]int, 0, 10),\n\t}\n}\n\ntype IdSequence struct {\n\tnextValue int\n\tmutex sync.Mutex\n}\n\nfunc (is *IdSequence) getId() int {\n\tis.mutex.Lock()\n\tdefer is.mutex.Unlock()\n\tresult := is.nextValue\n\tis.nextValue++\n\treturn result\n}\n\nfunc (is *IdSequence) updateSequence(usedValue int) {\n\tif usedValue >= is.nextValue {\n\t\tis.nextValue = usedValue + 1\n\t}\n}\n\nfunc NewIdSequence(start int) *IdSequence {\n\treturn &IdSequence{\n\t\tnextValue: start,\n\t}\n}\n\ntype Server struct {\n\tId int\n\tCreatorId int\n\tName string\n\tPortOffset int\n\tHandle *exec.Cmd `json:\"-\"`\n}\n\nfunc NewServer(id int, creatorId int, name string) *Server {\n\tif id < 0 {\n\t\tid = globalIdSequence.getId()\n\t}\n\treturn &Server{\n\t\tId: id,\n\t\tCreatorId: creatorId,\n\t\tName: strings.TrimSpace(name),\n\t\tPortOffset: globalPortMapper.getPort(),\n\t}\n}\n\ntype ServerMap struct {\n\tservers map[string]*Server\n\tmutex sync.Mutex\n}\n\nfunc (sm *ServerMap) Put(server *Server) {\n\tsm.mutex.Lock()\n\tdefer sm.mutex.Unlock()\n\tsm.servers[str(server.Id)] = server\n}\n\nfunc (sm *ServerMap) Get(id int) *Server {\n\tsm.mutex.Lock()\n\tdefer sm.mutex.Unlock()\n\treturn sm.servers[str(id)]\n}\n\nfunc (sm *ServerMap) Remove(id int) *Server {\n\tsm.mutex.Lock()\n\tdefer sm.mutex.Unlock()\n\tserver := sm.servers[str(id)]\n\tdelete(sm.servers, str(id))\n\treturn server\n}\n\nfunc (sm *ServerMap) Encode(w io.Writer) error {\n\tsm.mutex.Lock()\n\tdefer sm.mutex.Unlock()\n\terr := json.NewEncoder(w).Encode(sm.servers)\n\treturn err\n}\n\nfunc NewServerMap() *ServerMap {\n\treturn &ServerMap{\n\t\tservers: make(map[string]*Server),\n\t}\n}\n\nfunc runServer(server *Server) {\n\tapp := \".\/server\"\n\targs := []string{\n\t\t\"-client\", \"client\",\n\t\t\"-world\", worldDir(server.Id),\n\t\t\"-host\", \":\" + str(BASE_PORT+server.PortOffset),\n\t}\n\n\tcmd := exec.Command(app, args...)\n\tserver.Handle = cmd\n\tglobalServerMap.Put(server)\n\terr := cmd.Run()\n\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t}\n}\n\ntype ApiGeneric struct {\n\tServerId int\n}\n\ntype ApiCreate struct {\n\tCreatorId int\n\tServerName string\n}\n\ntype ServerListResponse struct {\n\tList map[string]Server\n}\n\nfunc parseGenericRequest(r *http.Request) (ApiGeneric, error) {\n\tvar result ApiGeneric\n\terr := json.NewDecoder(r.Body).Decode(&result)\n\tif err != nil {\n\t\tlog.Println(\"Error while parsing generic request\")\n\t\treturn result, err\n\t}\n\treturn result, nil\n}\n\nfunc getHandler(w http.ResponseWriter, r *http.Request) {\n\trequest, err := parseGenericRequest(r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tserver := globalServerMap.Get(request.ServerId)\n\tif server == nil {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\terr = json.NewEncoder(w).Encode(server)\n\tif err != nil {\n\t\tlog.Println(\"Error marshalling server\")\n\t\thttp.Error(w, err.Error(), 500)\n\t}\n}\n\nfunc listHandler(w http.ResponseWriter, r *http.Request) {\n\terr := globalServerMap.Encode(w)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t}\n}\n\nfunc createHandler(w http.ResponseWriter, r *http.Request) {\n\tvar request ApiCreate\n\terr := json.NewDecoder(r.Body).Decode(&request)\n\tif err != nil {\n\t\tlog.Println(\"Error parsing create request\", err)\n\t\thttp.Error(w, err.Error(), 500)\n\t}\n\n\tserver := NewServer(-1, request.CreatorId, request.ServerName)\n\tsaveServer(server)\n\tgo runServer(server)\n}\n\nfunc deleteHandler(w http.ResponseWriter, r *http.Request) {\n\trequest, err := parseGenericRequest(r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tserver := globalServerMap.Remove(request.ServerId)\n\terr = server.Handle.Process.Kill()\n\tif err != nil {\n\t\tlog.Println(\"Error while sending SIG_DEATH to running server\", err)\n\t}\n\t_, err = server.Handle.Process.Wait()\n\tif err != nil {\n\t\tlog.Println(\"Error while waiting on process.\", err)\n\t}\n\tglobalPortMapper.freePort(server.PortOffset)\n}\n\nfunc saveServer(server *Server) {\n\terr := os.MkdirAll(worldDir(server.Id), 0755)\n\tif err != nil {\n\t\tlog.Println(\"Error creating required directories:\", err)\n\t\treturn\n\t}\n\n\tfile, err := os.Create(path.Join(worldDir(server.Id), \"meta.server\"))\n\tif err != nil {\n\t\tlog.Println(\"Error creating file to save server:\", err)\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\terr = json.NewEncoder(file).Encode(server)\n\tif err != nil {\n\t\tlog.Println(\"Error encoding meta data.\", err)\n\t}\n}\n\nfunc loadServers() {\n\tfiles, err := ioutil.ReadDir(globalWorldBaseDir)\n\tif err != nil {\n\t\tlog.Println(\"Error reading base world directory\")\n\t\treturn\n\t}\n\n\tfor _, fileInfo := range files {\n\t\tloadServer(fileInfo)\n\t}\n}\n\nfunc loadServer(fileInfo os.FileInfo) {\n\tif !fileInfo.IsDir() {\n\t\treturn\n\t}\n\n\tfile, err := os.Open(path.Join(globalWorldBaseDir, fileInfo.Name(), \"meta.server\"))\n\tif err != nil {\n\t\tlog.Println(\"Error opening server meta data:\", err)\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\tvar server Server\n\terr = json.NewDecoder(file).Decode(&server)\n\tif err != nil {\n\t\tlog.Println(\"Error while parsing meta json for\", fileInfo.Name(), err)\n\t\treturn\n\t}\n\n\tserver.PortOffset = globalPortMapper.getPort()\n\tglobalIdSequence.updateSequence(server.Id)\n\tgo runServer(&server)\n}\n\nfunc main() {\n\tworldFolder := flag.String(\"worlds\", \"worlds\/\", \"Sets the base folder used to store the worlds.\")\n\tflag.Parse()\n\tglobalWorldBaseDir = *worldFolder\n\n\tloadServers()\n\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\thttp.HandleFunc(\"\/get\", getHandler)\n\thttp.HandleFunc(\"\/list\", listHandler)\n\thttp.HandleFunc(\"\/create\", createHandler)\n\thttp.HandleFunc(\"\/delete\", deleteHandler)\n\terr := http.ListenAndServe(\":3001\", nil)\n\tif err != nil {\n\t\tlog.Fatal(\"ListenAndServe:\", err)\n\t}\n}\n\n\/\/ Utility function\nfunc str(i int) string {\n\treturn fmt.Sprintf(\"%d\", i)\n}\n\nfunc worldDir(serverId int) string {\n\treturn path.Join(globalWorldBaseDir, \"world\"+str(serverId))\n}\n<commit_msg>Print errors in a more useful way for a couple of places<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n)\n\nvar META_VERSION = 1\nvar BASE_PORT = 10000\nvar globalWorldBaseDir string\nvar globalServerMap = NewServerMap()\nvar globalIdSequence = NewIdSequence(0)\nvar globalPortMapper = NewPortMapper()\n\ntype PortMapper struct {\n\tportSequence int\n\tfreePorts []int\n\tmutex sync.Mutex\n}\n\nfunc (pm *PortMapper) getPort() int {\n\tpm.mutex.Lock()\n\tdefer pm.mutex.Unlock()\n\tif len(pm.freePorts) == 0 {\n\t\tresult := pm.portSequence\n\t\tpm.portSequence++\n\t\treturn result\n\t}\n\tresult := pm.freePorts[len(pm.freePorts)-1]\n\tpm.freePorts = pm.freePorts[:len(pm.freePorts)-1]\n\treturn result\n}\n\nfunc (pm *PortMapper) freePort(port int) {\n\tpm.mutex.Lock()\n\tdefer pm.mutex.Unlock()\n\tpm.freePorts = append(pm.freePorts, port)\n}\n\nfunc NewPortMapper() *PortMapper {\n\treturn &PortMapper{\n\t\tfreePorts: make([]int, 0, 10),\n\t}\n}\n\ntype IdSequence struct {\n\tnextValue int\n\tmutex sync.Mutex\n}\n\nfunc (is *IdSequence) getId() int {\n\tis.mutex.Lock()\n\tdefer is.mutex.Unlock()\n\tresult := is.nextValue\n\tis.nextValue++\n\treturn result\n}\n\nfunc (is *IdSequence) updateSequence(usedValue int) {\n\tif usedValue >= is.nextValue {\n\t\tis.nextValue = usedValue + 1\n\t}\n}\n\nfunc NewIdSequence(start int) *IdSequence {\n\treturn &IdSequence{\n\t\tnextValue: start,\n\t}\n}\n\ntype Server struct {\n\tId int\n\tCreatorId int\n\tName string\n\tPortOffset int\n\tHandle *exec.Cmd `json:\"-\"`\n}\n\nfunc NewServer(id int, creatorId int, name string) *Server {\n\tif id < 0 {\n\t\tid = globalIdSequence.getId()\n\t}\n\treturn &Server{\n\t\tId: id,\n\t\tCreatorId: creatorId,\n\t\tName: strings.TrimSpace(name),\n\t\tPortOffset: globalPortMapper.getPort(),\n\t}\n}\n\ntype ServerMap struct {\n\tservers map[string]*Server\n\tmutex sync.Mutex\n}\n\nfunc (sm *ServerMap) Put(server *Server) {\n\tsm.mutex.Lock()\n\tdefer sm.mutex.Unlock()\n\tsm.servers[str(server.Id)] = server\n}\n\nfunc (sm *ServerMap) Get(id int) *Server {\n\tsm.mutex.Lock()\n\tdefer sm.mutex.Unlock()\n\treturn sm.servers[str(id)]\n}\n\nfunc (sm *ServerMap) Remove(id int) *Server {\n\tsm.mutex.Lock()\n\tdefer sm.mutex.Unlock()\n\tserver := sm.servers[str(id)]\n\tdelete(sm.servers, str(id))\n\treturn server\n}\n\nfunc (sm *ServerMap) Encode(w io.Writer) error {\n\tsm.mutex.Lock()\n\tdefer sm.mutex.Unlock()\n\terr := json.NewEncoder(w).Encode(sm.servers)\n\treturn err\n}\n\nfunc NewServerMap() *ServerMap {\n\treturn &ServerMap{\n\t\tservers: make(map[string]*Server),\n\t}\n}\n\nfunc runServer(server *Server) {\n\tapp := \".\/server\"\n\targs := []string{\n\t\t\"-client\", \"client\",\n\t\t\"-world\", worldDir(server.Id),\n\t\t\"-host\", \":\" + str(BASE_PORT+server.PortOffset),\n\t}\n\n\tcmd := exec.Command(app, args...)\n\tserver.Handle = cmd\n\tglobalServerMap.Put(server)\n\terr := cmd.Run()\n\n\tif err != nil {\n\t\tlog.Println(\"Error running the server:\", err)\n\t}\n}\n\ntype ApiGeneric struct {\n\tServerId int\n}\n\ntype ApiCreate struct {\n\tCreatorId int\n\tServerName string\n}\n\ntype ServerListResponse struct {\n\tList map[string]Server\n}\n\nfunc parseGenericRequest(r *http.Request) (ApiGeneric, error) {\n\tvar result ApiGeneric\n\terr := json.NewDecoder(r.Body).Decode(&result)\n\tif err != nil {\n\t\tlog.Println(\"Error while parsing generic request\", err)\n\t\treturn result, err\n\t}\n\treturn result, nil\n}\n\nfunc getHandler(w http.ResponseWriter, r *http.Request) {\n\trequest, err := parseGenericRequest(r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tserver := globalServerMap.Get(request.ServerId)\n\tif server == nil {\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\terr = json.NewEncoder(w).Encode(server)\n\tif err != nil {\n\t\tlog.Println(\"Error marshalling server\")\n\t\thttp.Error(w, err.Error(), 500)\n\t}\n}\n\nfunc listHandler(w http.ResponseWriter, r *http.Request) {\n\terr := globalServerMap.Encode(w)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t}\n}\n\nfunc createHandler(w http.ResponseWriter, r *http.Request) {\n\tvar request ApiCreate\n\terr := json.NewDecoder(r.Body).Decode(&request)\n\tif err != nil {\n\t\tlog.Println(\"Error parsing create request\", err)\n\t\thttp.Error(w, err.Error(), 500)\n\t}\n\n\tserver := NewServer(-1, request.CreatorId, request.ServerName)\n\tsaveServer(server)\n\tgo runServer(server)\n}\n\nfunc deleteHandler(w http.ResponseWriter, r *http.Request) {\n\trequest, err := parseGenericRequest(r)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tserver := globalServerMap.Remove(request.ServerId)\n\terr = server.Handle.Process.Kill()\n\tif err != nil {\n\t\tlog.Println(\"Error while sending SIG_DEATH to running server\", err)\n\t}\n\t_, err = server.Handle.Process.Wait()\n\tif err != nil {\n\t\tlog.Println(\"Error while waiting on process.\", err)\n\t}\n\tglobalPortMapper.freePort(server.PortOffset)\n}\n\nfunc saveServer(server *Server) {\n\terr := os.MkdirAll(worldDir(server.Id), 0755)\n\tif err != nil {\n\t\tlog.Println(\"Error creating required directories:\", err)\n\t\treturn\n\t}\n\n\tfile, err := os.Create(path.Join(worldDir(server.Id), \"meta.server\"))\n\tif err != nil {\n\t\tlog.Println(\"Error creating file to save server:\", err)\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\terr = json.NewEncoder(file).Encode(server)\n\tif err != nil {\n\t\tlog.Println(\"Error encoding meta data.\", err)\n\t}\n}\n\nfunc loadServers() {\n\tfiles, err := ioutil.ReadDir(globalWorldBaseDir)\n\tif err != nil {\n\t\tlog.Println(\"Error reading base world directory\")\n\t\treturn\n\t}\n\n\tfor _, fileInfo := range files {\n\t\tloadServer(fileInfo)\n\t}\n}\n\nfunc loadServer(fileInfo os.FileInfo) {\n\tif !fileInfo.IsDir() {\n\t\treturn\n\t}\n\n\tfile, err := os.Open(path.Join(globalWorldBaseDir, fileInfo.Name(), \"meta.server\"))\n\tif err != nil {\n\t\tlog.Println(\"Error opening server meta data:\", err)\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\tvar server Server\n\terr = json.NewDecoder(file).Decode(&server)\n\tif err != nil {\n\t\tlog.Println(\"Error while parsing meta json for\", fileInfo.Name(), err)\n\t\treturn\n\t}\n\n\tserver.PortOffset = globalPortMapper.getPort()\n\tglobalIdSequence.updateSequence(server.Id)\n\tgo runServer(&server)\n}\n\nfunc main() {\n\tworldFolder := flag.String(\"worlds\", \"worlds\/\", \"Sets the base folder used to store the worlds.\")\n\tflag.Parse()\n\tglobalWorldBaseDir = *worldFolder\n\n\tloadServers()\n\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\thttp.HandleFunc(\"\/get\", getHandler)\n\thttp.HandleFunc(\"\/list\", listHandler)\n\thttp.HandleFunc(\"\/create\", createHandler)\n\thttp.HandleFunc(\"\/delete\", deleteHandler)\n\terr := http.ListenAndServe(\":3001\", nil)\n\tif err != nil {\n\t\tlog.Fatal(\"ListenAndServe:\", err)\n\t}\n}\n\n\/\/ Utility function\nfunc str(i int) string {\n\treturn fmt.Sprintf(\"%d\", i)\n}\n\nfunc worldDir(serverId int) string {\n\treturn path.Join(globalWorldBaseDir, \"world\"+str(serverId))\n}\n<|endoftext|>"} {"text":"<commit_before>package slack\n\nimport (\n\t\"context\"\n\t\"net\/url\"\n\t\"strconv\"\n)\n\nconst (\n\tDEFAULT_STARS_USER = \"\"\n\tDEFAULT_STARS_COUNT = 100\n\tDEFAULT_STARS_PAGE = 1\n)\n\ntype StarsParameters struct {\n\tUser string\n\tCount int\n\tPage int\n}\n\ntype StarredItem Item\n\ntype listResponseFull struct {\n\tItems []Item `json:\"items\"`\n\tPaging `json:\"paging\"`\n\tSlackResponse\n}\n\n\/\/ NewStarsParameters initialises StarsParameters with default values\nfunc NewStarsParameters() StarsParameters {\n\treturn StarsParameters{\n\t\tUser: DEFAULT_STARS_USER,\n\t\tCount: DEFAULT_STARS_COUNT,\n\t\tPage: DEFAULT_STARS_PAGE,\n\t}\n}\n\n\/\/ AddStar stars an item in a channel\nfunc (api *Client) AddStar(channel string, item ItemRef) error {\n\treturn api.AddStarContext(context.Background(), channel, item)\n}\n\n\/\/ AddStarContext stars an item in a channel with a custom context\nfunc (api *Client) AddStarContext(ctx context.Context, channel string, item ItemRef) error {\n\tvalues := url.Values{\n\t\t\"channel\": {channel},\n\t\t\"token\": {api.token},\n\t}\n\tif item.Timestamp != \"\" {\n\t\tvalues.Set(\"timestamp\", item.Timestamp)\n\t}\n\tif item.File != \"\" {\n\t\tvalues.Set(\"file\", item.File)\n\t}\n\tif item.Comment != \"\" {\n\t\tvalues.Set(\"file_comment\", item.Comment)\n\t}\n\n\tresponse := &SlackResponse{}\n\tif err := api.postMethod(ctx, \"stars.add\", values, response); err != nil {\n\t\treturn err\n\t}\n\n\treturn response.Err()\n}\n\n\/\/ RemoveStar removes a starred item from a channel\nfunc (api *Client) RemoveStar(channel string, item ItemRef) error {\n\treturn api.RemoveStarContext(context.Background(), channel, item)\n}\n\n\/\/ RemoveStarContext removes a starred item from a channel with a custom context\nfunc (api *Client) RemoveStarContext(ctx context.Context, channel string, item ItemRef) error {\n\tvalues := url.Values{\n\t\t\"channel\": {channel},\n\t\t\"token\": {api.token},\n\t}\n\tif item.Timestamp != \"\" {\n\t\tvalues.Set(\"timestamp\", item.Timestamp)\n\t}\n\tif item.File != \"\" {\n\t\tvalues.Set(\"file\", item.File)\n\t}\n\tif item.Comment != \"\" {\n\t\tvalues.Set(\"file_comment\", item.Comment)\n\t}\n\n\tresponse := &SlackResponse{}\n\tif err := api.postMethod(ctx, \"stars.remove\", values, response); err != nil {\n\t\treturn err\n\t}\n\n\treturn response.Err()\n}\n\n\/\/ ListStars returns information about the stars a user added\nfunc (api *Client) ListStars(params StarsParameters) ([]Item, *Paging, error) {\n\treturn api.ListStarsContext(context.Background(), params)\n}\n\n\/\/ ListStarsContext returns information about the stars a user added with a custom context\nfunc (api *Client) ListStarsContext(ctx context.Context, params StarsParameters) ([]Item, *Paging, error) {\n\tvalues := url.Values{\n\t\t\"token\": {api.token},\n\t}\n\tif params.User != DEFAULT_STARS_USER {\n\t\tvalues.Add(\"user\", params.User)\n\t}\n\tif params.Count != DEFAULT_STARS_COUNT {\n\t\tvalues.Add(\"count\", strconv.Itoa(params.Count))\n\t}\n\tif params.Page != DEFAULT_STARS_PAGE {\n\t\tvalues.Add(\"page\", strconv.Itoa(params.Page))\n\t}\n\n\tresponse := &listResponseFull{}\n\terr := api.postMethod(ctx, \"stars.list\", values, response)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tif err := response.Err(); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn response.Items, &response.Paging, nil\n}\n\n\/\/ GetStarred returns a list of StarredItem items.\n\/\/\n\/\/ The user then has to iterate over them and figure out what they should\n\/\/ be looking at according to what is in the Type.\n\/\/ for _, item := range items {\n\/\/ switch c.Type {\n\/\/ case \"file_comment\":\n\/\/ log.Println(c.Comment)\n\/\/ case \"file\":\n\/\/ ...\n\/\/\n\/\/ }\n\/\/ This function still exists to maintain backwards compatibility.\n\/\/ I exposed it as returning []StarredItem, so it shall stay as StarredItem\nfunc (api *Client) GetStarred(params StarsParameters) ([]StarredItem, *Paging, error) {\n\treturn api.GetStarredContext(context.Background(), params)\n}\n\n\/\/ GetStarredContext returns a list of StarredItem items with a custom context\n\/\/\n\/\/ For more details see GetStarred\nfunc (api *Client) GetStarredContext(ctx context.Context, params StarsParameters) ([]StarredItem, *Paging, error) {\n\titems, paging, err := api.ListStarsContext(ctx, params)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tstarredItems := make([]StarredItem, len(items))\n\tfor i, item := range items {\n\t\tstarredItems[i] = StarredItem(item)\n\t}\n\treturn starredItems, paging, nil\n}\n\ntype listResponsePaginated struct {\n\tItems []Item `json:\"items\"`\n\tSlackResponse\n\tMetadata ResponseMetadata `json:\"response_metadata\"`\n}\n\n\/\/ StarredItemPagination allows for paginating over the starred items\ntype StarredItemPagination struct {\n\tItems []Item\n\tlimit int\n\tpreviousResp *ResponseMetadata\n\tc *Client\n}\n\n\/\/ ListStarsOption options for the GetUsers method call.\ntype ListStarsOption func(*StarredItemPagination)\n\n\/\/ ListAllStars returns the complete list of starred items\nfunc (api *Client) ListAllStars() ([]Item, error) {\n\treturn api.ListStarsPaginatedContext(context.Background())\n}\n\n\/\/ ListStarsPaginatedContext returns the list of users (with their detailed information) with a custom context\nfunc (api *Client) ListStarsPaginatedContext(ctx context.Context) (results []Item, err error) {\n\tvar p StarredItemPagination\n\n\tfor p = api.ListStarsPaginated(); !p.done(err); p, err = p.next(ctx) {\n\t\tresults = append(results, p.Items...)\n\t}\n\n\treturn results, p.failure(err)\n}\n\n\/\/ ListStarsPaginated fetches users in a paginated fashion, see ListStarsPaginationContext for usage.\nfunc (api *Client) ListStarsPaginated(options ...ListStarsOption) StarredItemPagination {\n\treturn newStarPagination(api, options...)\n}\n\nfunc newStarPagination(c *Client, options ...ListStarsOption) (sip StarredItemPagination) {\n\tsip = StarredItemPagination{\n\t\tc: c,\n\t\tlimit: 200, \/\/ per slack api documentation.\n\t}\n\n\tfor _, opt := range options {\n\t\topt(&sip)\n\t}\n\n\treturn sip\n}\n\n\/\/ done checks if the pagination has completed\nfunc (StarredItemPagination) done(err error) bool {\n\treturn err == errPaginationComplete\n}\n\n\/\/ done checks if pagination failed.\nfunc (t StarredItemPagination) failure(err error) error {\n\tif t.done(err) {\n\t\treturn nil\n\t}\n\n\treturn err\n}\n\n\/\/ next gets the next list of starred items based on the cursor value\nfunc (t StarredItemPagination) next(ctx context.Context) (_ StarredItemPagination, err error) {\n\tvar (\n\t\tresp *listResponsePaginated\n\t)\n\n\tif t.c == nil || (t.previousResp != nil && t.previousResp.Cursor == \"\") {\n\t\treturn t, errPaginationComplete\n\t}\n\n\tt.previousResp = t.previousResp.initialize()\n\n\tvalues := url.Values{\n\t\t\"limit\": {strconv.Itoa(t.limit)},\n\t\t\"token\": {t.c.token},\n\t\t\"cursor\": {t.previousResp.Cursor},\n\t}\n\n\tif err = t.c.postMethod(ctx, \"stars.list\", values, &resp); err != nil {\n\t\treturn t, err\n\t}\n\n\tt.previousResp = &resp.Metadata\n\tt.Items = resp.Items\n\n\treturn t, nil\n}\n<commit_msg>change ListStarsPaginatedContext to ListAllStarsContext<commit_after>package slack\n\nimport (\n\t\"context\"\n\t\"net\/url\"\n\t\"strconv\"\n)\n\nconst (\n\tDEFAULT_STARS_USER = \"\"\n\tDEFAULT_STARS_COUNT = 100\n\tDEFAULT_STARS_PAGE = 1\n)\n\ntype StarsParameters struct {\n\tUser string\n\tCount int\n\tPage int\n}\n\ntype StarredItem Item\n\ntype listResponseFull struct {\n\tItems []Item `json:\"items\"`\n\tPaging `json:\"paging\"`\n\tSlackResponse\n}\n\n\/\/ NewStarsParameters initialises StarsParameters with default values\nfunc NewStarsParameters() StarsParameters {\n\treturn StarsParameters{\n\t\tUser: DEFAULT_STARS_USER,\n\t\tCount: DEFAULT_STARS_COUNT,\n\t\tPage: DEFAULT_STARS_PAGE,\n\t}\n}\n\n\/\/ AddStar stars an item in a channel\nfunc (api *Client) AddStar(channel string, item ItemRef) error {\n\treturn api.AddStarContext(context.Background(), channel, item)\n}\n\n\/\/ AddStarContext stars an item in a channel with a custom context\nfunc (api *Client) AddStarContext(ctx context.Context, channel string, item ItemRef) error {\n\tvalues := url.Values{\n\t\t\"channel\": {channel},\n\t\t\"token\": {api.token},\n\t}\n\tif item.Timestamp != \"\" {\n\t\tvalues.Set(\"timestamp\", item.Timestamp)\n\t}\n\tif item.File != \"\" {\n\t\tvalues.Set(\"file\", item.File)\n\t}\n\tif item.Comment != \"\" {\n\t\tvalues.Set(\"file_comment\", item.Comment)\n\t}\n\n\tresponse := &SlackResponse{}\n\tif err := api.postMethod(ctx, \"stars.add\", values, response); err != nil {\n\t\treturn err\n\t}\n\n\treturn response.Err()\n}\n\n\/\/ RemoveStar removes a starred item from a channel\nfunc (api *Client) RemoveStar(channel string, item ItemRef) error {\n\treturn api.RemoveStarContext(context.Background(), channel, item)\n}\n\n\/\/ RemoveStarContext removes a starred item from a channel with a custom context\nfunc (api *Client) RemoveStarContext(ctx context.Context, channel string, item ItemRef) error {\n\tvalues := url.Values{\n\t\t\"channel\": {channel},\n\t\t\"token\": {api.token},\n\t}\n\tif item.Timestamp != \"\" {\n\t\tvalues.Set(\"timestamp\", item.Timestamp)\n\t}\n\tif item.File != \"\" {\n\t\tvalues.Set(\"file\", item.File)\n\t}\n\tif item.Comment != \"\" {\n\t\tvalues.Set(\"file_comment\", item.Comment)\n\t}\n\n\tresponse := &SlackResponse{}\n\tif err := api.postMethod(ctx, \"stars.remove\", values, response); err != nil {\n\t\treturn err\n\t}\n\n\treturn response.Err()\n}\n\n\/\/ ListStars returns information about the stars a user added\nfunc (api *Client) ListStars(params StarsParameters) ([]Item, *Paging, error) {\n\treturn api.ListStarsContext(context.Background(), params)\n}\n\n\/\/ ListStarsContext returns information about the stars a user added with a custom context\nfunc (api *Client) ListStarsContext(ctx context.Context, params StarsParameters) ([]Item, *Paging, error) {\n\tvalues := url.Values{\n\t\t\"token\": {api.token},\n\t}\n\tif params.User != DEFAULT_STARS_USER {\n\t\tvalues.Add(\"user\", params.User)\n\t}\n\tif params.Count != DEFAULT_STARS_COUNT {\n\t\tvalues.Add(\"count\", strconv.Itoa(params.Count))\n\t}\n\tif params.Page != DEFAULT_STARS_PAGE {\n\t\tvalues.Add(\"page\", strconv.Itoa(params.Page))\n\t}\n\n\tresponse := &listResponseFull{}\n\terr := api.postMethod(ctx, \"stars.list\", values, response)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tif err := response.Err(); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treturn response.Items, &response.Paging, nil\n}\n\n\/\/ GetStarred returns a list of StarredItem items.\n\/\/\n\/\/ The user then has to iterate over them and figure out what they should\n\/\/ be looking at according to what is in the Type.\n\/\/ for _, item := range items {\n\/\/ switch c.Type {\n\/\/ case \"file_comment\":\n\/\/ log.Println(c.Comment)\n\/\/ case \"file\":\n\/\/ ...\n\/\/\n\/\/ }\n\/\/ This function still exists to maintain backwards compatibility.\n\/\/ I exposed it as returning []StarredItem, so it shall stay as StarredItem\nfunc (api *Client) GetStarred(params StarsParameters) ([]StarredItem, *Paging, error) {\n\treturn api.GetStarredContext(context.Background(), params)\n}\n\n\/\/ GetStarredContext returns a list of StarredItem items with a custom context\n\/\/\n\/\/ For more details see GetStarred\nfunc (api *Client) GetStarredContext(ctx context.Context, params StarsParameters) ([]StarredItem, *Paging, error) {\n\titems, paging, err := api.ListStarsContext(ctx, params)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tstarredItems := make([]StarredItem, len(items))\n\tfor i, item := range items {\n\t\tstarredItems[i] = StarredItem(item)\n\t}\n\treturn starredItems, paging, nil\n}\n\ntype listResponsePaginated struct {\n\tItems []Item `json:\"items\"`\n\tSlackResponse\n\tMetadata ResponseMetadata `json:\"response_metadata\"`\n}\n\n\/\/ StarredItemPagination allows for paginating over the starred items\ntype StarredItemPagination struct {\n\tItems []Item\n\tlimit int\n\tpreviousResp *ResponseMetadata\n\tc *Client\n}\n\n\/\/ ListStarsOption options for the GetUsers method call.\ntype ListStarsOption func(*StarredItemPagination)\n\n\/\/ ListAllStars returns the complete list of starred items\nfunc (api *Client) ListAllStars() ([]Item, error) {\n\treturn api.ListAllStarsContext(context.Background())\n}\n\n\/\/ ListAllStarsContext returns the list of users (with their detailed information) with a custom context\nfunc (api *Client) ListAllStarsContext(ctx context.Context) (results []Item, err error) {\n\tvar p StarredItemPagination\n\n\tfor p = api.ListStarsPaginated(); !p.done(err); p, err = p.next(ctx) {\n\t\tresults = append(results, p.Items...)\n\t}\n\n\treturn results, p.failure(err)\n}\n\n\/\/ ListStarsPaginated fetches users in a paginated fashion, see ListStarsPaginationContext for usage.\nfunc (api *Client) ListStarsPaginated(options ...ListStarsOption) StarredItemPagination {\n\treturn newStarPagination(api, options...)\n}\n\nfunc newStarPagination(c *Client, options ...ListStarsOption) (sip StarredItemPagination) {\n\tsip = StarredItemPagination{\n\t\tc: c,\n\t\tlimit: 200, \/\/ per slack api documentation.\n\t}\n\n\tfor _, opt := range options {\n\t\topt(&sip)\n\t}\n\n\treturn sip\n}\n\n\/\/ done checks if the pagination has completed\nfunc (StarredItemPagination) done(err error) bool {\n\treturn err == errPaginationComplete\n}\n\n\/\/ done checks if pagination failed.\nfunc (t StarredItemPagination) failure(err error) error {\n\tif t.done(err) {\n\t\treturn nil\n\t}\n\n\treturn err\n}\n\n\/\/ next gets the next list of starred items based on the cursor value\nfunc (t StarredItemPagination) next(ctx context.Context) (_ StarredItemPagination, err error) {\n\tvar (\n\t\tresp *listResponsePaginated\n\t)\n\n\tif t.c == nil || (t.previousResp != nil && t.previousResp.Cursor == \"\") {\n\t\treturn t, errPaginationComplete\n\t}\n\n\tt.previousResp = t.previousResp.initialize()\n\n\tvalues := url.Values{\n\t\t\"limit\": {strconv.Itoa(t.limit)},\n\t\t\"token\": {t.c.token},\n\t\t\"cursor\": {t.previousResp.Cursor},\n\t}\n\n\tif err = t.c.postMethod(ctx, \"stars.list\", values, &resp); err != nil {\n\t\treturn t, err\n\t}\n\n\tt.previousResp = &resp.Metadata\n\tt.Items = resp.Items\n\n\treturn t, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package jsonrpc2_test\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/rpc\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/powerman\/rpc-codec\/jsonrpc2\"\n)\n\n\/\/ Svc is an RPC service for testing.\ntype Svc struct{}\n\nfunc (*Svc) Sum(vals [2]int, res *int) error {\n\t*res = vals[0] + vals[1]\n\treturn nil\n}\n\nfunc init() {\n\t_ = rpc.Register(&Svc{})\n}\n\nfunc TestHTTPServer(t *testing.T) {\n\tconst jBad = `{}`\n\tconst jSum = `{\"jsonrpc\":\"2.0\",\"id\":0,\"method\":\"Svc.Sum\",\"params\":[3,5]}`\n\tconst jNotify = `{\"jsonrpc\":\"2.0\",\"method\":\"Svc.Sum\",\"params\":[3,5]}`\n\tconst jRes = `{\"jsonrpc\":\"2.0\",\"id\":0,\"result\":8}`\n\tconst jErr = `{\"jsonrpc\":\"2.0\",\"id\":null,\"error\":{\"code\":-32600,\"message\":\"invalid request\"}}`\n\tconst jParse = `{\"jsonrpc\":\"2.0\",\"id\":null,\"error\":{\"code\":-32700,\"message\":\"parse error\"}}`\n\tconst contentType = \"application\/json\"\n\n\tcases := []struct {\n\t\tmethod string\n\t\tcontentType string\n\t\taccept string\n\t\tbody string\n\t\tcode int\n\t\treply string\n\t}{\n\t\t{\"GET\", \"\", \"\", \"\", http.StatusMethodNotAllowed, \"\"},\n\t\t{\"POST\", contentType, \"\", jSum, http.StatusUnsupportedMediaType, \"\"},\n\t\t{\"POST\", \"text\/json\", contentType, jSum, http.StatusUnsupportedMediaType, \"\"},\n\t\t{\"PUT\", contentType, contentType, jSum, http.StatusMethodNotAllowed, \"\"},\n\t\t{\"POST\", contentType, contentType, jNotify, http.StatusNoContent, \"\"},\n\t\t{\"POST\", contentType, contentType, jSum, http.StatusOK, jRes},\n\t\t{\"POST\", contentType, contentType, jBad, http.StatusOK, jErr},\n\t\t{\"POST\", contentType, contentType, \"\", http.StatusOK, jParse},\n\t\t{\"POST\", contentType, contentType, \" \", http.StatusOK, jParse},\n\t\t{\"POST\", contentType, contentType, \"{\", http.StatusOK, jParse},\n\t\t{\"POST\", contentType, contentType, `{\"jsonrpc\":\"2.0\",`, http.StatusOK, jParse},\n\t\t{\"POST\", contentType + \"; charset=utf-8\", contentType, jSum, http.StatusOK, jRes},\n\t}\n\n\tts := httptest.NewServer(jsonrpc2.HTTPHandler(nil))\n\t\/\/ Don't close because of https:\/\/github.com\/golang\/go\/issues\/12262\n\t\/\/ defer ts.Close()\n\n\tfor _, c := range cases {\n\t\treq, err := http.NewRequest(c.method, ts.URL, strings.NewReader(c.body))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"NewRequest(%s %s), err = %v\", c.method, ts.URL, err)\n\t\t}\n\t\tif c.contentType != \"\" {\n\t\t\treq.Header.Add(\"Content-Type\", c.contentType)\n\t\t}\n\t\tif c.accept != \"\" {\n\t\t\treq.Header.Add(\"Accept\", c.accept)\n\t\t}\n\t\tresp, err := (&http.Client{}).Do(req)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Do(%s %s), err = %v\", c.method, ts.URL, err)\n\t\t}\n\t\tif resp.StatusCode != c.code {\n\t\t\tt.Errorf(\"Do(%s %s), status = %v, want = %v\", c.method, ts.URL, resp.StatusCode, c.code)\n\t\t}\n\t\tif resp.Header.Get(\"Content-Type\") != contentType {\n\t\t\tt.Errorf(\"Do(%s %s), Content-Type = %q, want = %q\", c.method, ts.URL, resp.Header.Get(\"Content-Type\"), contentType)\n\t\t}\n\t\tgot, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"ReadAll(), err = %v\", err)\n\t\t}\n\t\tif c.reply == \"\" {\n\t\t\tif len(got) != 0 {\n\t\t\t\tt.Errorf(\"Do(%s %s)\\nexp: %#q\\ngot: %#q\", c.method, ts.URL, c.reply, string(bytes.TrimRight(got, \"\\n\")))\n\t\t\t}\n\t\t} else {\n\t\t\tvar jgot, jwant interface{}\n\t\t\tif err := json.Unmarshal(got, &jgot); err != nil {\n\t\t\t\tt.Errorf(\"Do(%s %s), output err = %v\\ngot: %#q\", c.method, ts.URL, err, string(bytes.TrimRight(got, \"\\n\")))\n\t\t\t}\n\t\t\tif err := json.Unmarshal([]byte(c.reply), &jwant); err != nil {\n\t\t\t\tt.Errorf(\"Do(%s %s), expect err = %v\\nexp: %#q\", c.method, ts.URL, err, c.reply)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(jgot, jwant) {\n\t\t\t\tt.Errorf(\"Do(%s %s)\\nexp: %#q\\ngot: %#q\", c.method, ts.URL, c.reply, string(bytes.TrimRight(got, \"\\n\")))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestHTTPClient(t *testing.T) {\n\tts := httptest.NewServer(jsonrpc2.HTTPHandler(nil))\n\t\/\/ Don't close because of https:\/\/github.com\/golang\/go\/issues\/12262\n\t\/\/ defer ts.Close()\n\tclient := jsonrpc2.NewHTTPClient(ts.URL)\n\tdefer client.Close()\n\n\tvar in [2]int\n\tvar got, want int\n\n\tin, want = [2]int{1, 2}, 3\n\terr := client.Call(\"Svc.Sum\", in, &got)\n\tif err != nil {\n\t\tt.Errorf(\"Call(%v), err = %v\", in, err)\n\t}\n\tif got != want {\n\t\tt.Errorf(\"Call(%v) = %v, want = %v\", in, got, want)\n\t}\n\n\tin = [2]int{2, 3}\n\terr = client.Notify(\"Svc.Sum\", in)\n\tif err != nil {\n\t\tt.Errorf(\"Notify(%v), err = %v\", in, err)\n\t}\n\n\tin, want = [2]int{3, 4}, 7\n\terr = client.Call(\"Svc.Sum\", in, &got)\n\tif err != nil {\n\t\tt.Errorf(\"Call(%v), err = %v\", in, err)\n\t}\n\tif got != want {\n\t\tt.Errorf(\"Call(%v) = %v, want = %v\", in, got, want)\n\t}\n}\n\ntype ContentTypeHandler string\n\nfunc (h ContentTypeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", string(h))\n\tio.WriteString(w, `{\"jsonrpc\":\"2.0\",\"id\":0,\"result\":8}`)\n}\n\nfunc TestHTTPClientContentType(t *testing.T) {\n\tconst contentType = \"application\/json\"\n\tcases := []struct {\n\t\tcontentType string\n\t\terrorString string\n\t}{\n\t\t{contentType, \"<nil>\"},\n\t\t{contentType + \"; charset=utf-8\", \"<nil>\"},\n\t\t{contentType + \"; bad\", `{\"code\":-32603,\"message\":\"bad HTTP Content-Type: application\/json; bad\"}`},\n\t\t{contentType + \"fail\", `{\"code\":-32603,\"message\":\"bad HTTP Content-Type: application\/jsonfail\"}`},\n\t}\n\n\tfor _, c := range cases {\n\t\tts := httptest.NewServer(ContentTypeHandler(c.contentType))\n\n\t\tclient := jsonrpc2.NewHTTPClient(ts.URL)\n\t\tin := []string{\"ads\"}\n\t\tvar got int\n\t\terr := client.Call(\"Svc.Sum\", in, &got)\n\t\tactualErrString := fmt.Sprintf(\"%v\", err)\n\t\tif actualErrString != c.errorString {\n\t\t\tt.Errorf(\"Unexpected result. exp: %#q\\ngot: %#q\", actualErrString, c.errorString)\n\t\t}\n\t}\n}\n<commit_msg>improve test<commit_after>package jsonrpc2_test\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"net\/rpc\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/powerman\/rpc-codec\/jsonrpc2\"\n)\n\n\/\/ Svc is an RPC service for testing.\ntype Svc struct{}\n\nfunc (*Svc) Sum(vals [2]int, res *int) error {\n\t*res = vals[0] + vals[1]\n\treturn nil\n}\n\nfunc init() {\n\t_ = rpc.Register(&Svc{})\n}\n\nfunc TestHTTPServer(t *testing.T) {\n\tconst jBad = `{}`\n\tconst jSum = `{\"jsonrpc\":\"2.0\",\"id\":0,\"method\":\"Svc.Sum\",\"params\":[3,5]}`\n\tconst jNotify = `{\"jsonrpc\":\"2.0\",\"method\":\"Svc.Sum\",\"params\":[3,5]}`\n\tconst jRes = `{\"jsonrpc\":\"2.0\",\"id\":0,\"result\":8}`\n\tconst jErr = `{\"jsonrpc\":\"2.0\",\"id\":null,\"error\":{\"code\":-32600,\"message\":\"invalid request\"}}`\n\tconst jParse = `{\"jsonrpc\":\"2.0\",\"id\":null,\"error\":{\"code\":-32700,\"message\":\"parse error\"}}`\n\tconst contentType = \"application\/json\"\n\n\tcases := []struct {\n\t\tmethod string\n\t\tcontentType string\n\t\taccept string\n\t\tbody string\n\t\tcode int\n\t\treply string\n\t}{\n\t\t{\"GET\", \"\", \"\", \"\", http.StatusMethodNotAllowed, \"\"},\n\t\t{\"POST\", contentType, \"\", jSum, http.StatusUnsupportedMediaType, \"\"},\n\t\t{\"POST\", \"text\/json\", contentType, jSum, http.StatusUnsupportedMediaType, \"\"},\n\t\t{\"PUT\", contentType, contentType, jSum, http.StatusMethodNotAllowed, \"\"},\n\t\t{\"POST\", contentType, contentType, jNotify, http.StatusNoContent, \"\"},\n\t\t{\"POST\", contentType, contentType, jSum, http.StatusOK, jRes},\n\t\t{\"POST\", contentType, contentType, jBad, http.StatusOK, jErr},\n\t\t{\"POST\", contentType, contentType, \"\", http.StatusOK, jParse},\n\t\t{\"POST\", contentType, contentType, \" \", http.StatusOK, jParse},\n\t\t{\"POST\", contentType, contentType, \"{\", http.StatusOK, jParse},\n\t\t{\"POST\", contentType, contentType, `{\"jsonrpc\":\"2.0\",`, http.StatusOK, jParse},\n\t\t{\"POST\", contentType + \"; charset=utf-8\", contentType, jSum, http.StatusOK, jRes},\n\t}\n\n\tts := httptest.NewServer(jsonrpc2.HTTPHandler(nil))\n\t\/\/ Don't close because of https:\/\/github.com\/golang\/go\/issues\/12262\n\t\/\/ defer ts.Close()\n\n\tfor _, c := range cases {\n\t\treq, err := http.NewRequest(c.method, ts.URL, strings.NewReader(c.body))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"NewRequest(%s %s), err = %v\", c.method, ts.URL, err)\n\t\t}\n\t\tif c.contentType != \"\" {\n\t\t\treq.Header.Add(\"Content-Type\", c.contentType)\n\t\t}\n\t\tif c.accept != \"\" {\n\t\t\treq.Header.Add(\"Accept\", c.accept)\n\t\t}\n\t\tresp, err := (&http.Client{}).Do(req)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Do(%s %s), err = %v\", c.method, ts.URL, err)\n\t\t}\n\t\tif resp.StatusCode != c.code {\n\t\t\tt.Errorf(\"Do(%s %s), status = %v, want = %v\", c.method, ts.URL, resp.StatusCode, c.code)\n\t\t}\n\t\tif resp.Header.Get(\"Content-Type\") != contentType {\n\t\t\tt.Errorf(\"Do(%s %s), Content-Type = %q, want = %q\", c.method, ts.URL, resp.Header.Get(\"Content-Type\"), contentType)\n\t\t}\n\t\tgot, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"ReadAll(), err = %v\", err)\n\t\t}\n\t\tif c.reply == \"\" {\n\t\t\tif len(got) != 0 {\n\t\t\t\tt.Errorf(\"Do(%s %s)\\nexp: %#q\\ngot: %#q\", c.method, ts.URL, c.reply, string(bytes.TrimRight(got, \"\\n\")))\n\t\t\t}\n\t\t} else {\n\t\t\tvar jgot, jwant interface{}\n\t\t\tif err := json.Unmarshal(got, &jgot); err != nil {\n\t\t\t\tt.Errorf(\"Do(%s %s), output err = %v\\ngot: %#q\", c.method, ts.URL, err, string(bytes.TrimRight(got, \"\\n\")))\n\t\t\t}\n\t\t\tif err := json.Unmarshal([]byte(c.reply), &jwant); err != nil {\n\t\t\t\tt.Errorf(\"Do(%s %s), expect err = %v\\nexp: %#q\", c.method, ts.URL, err, c.reply)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(jgot, jwant) {\n\t\t\t\tt.Errorf(\"Do(%s %s)\\nexp: %#q\\ngot: %#q\", c.method, ts.URL, c.reply, string(bytes.TrimRight(got, \"\\n\")))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestHTTPClient(t *testing.T) {\n\tts := httptest.NewServer(jsonrpc2.HTTPHandler(nil))\n\t\/\/ Don't close because of https:\/\/github.com\/golang\/go\/issues\/12262\n\t\/\/ defer ts.Close()\n\tclient := jsonrpc2.NewHTTPClient(ts.URL)\n\tdefer client.Close()\n\n\tvar in [2]int\n\tvar got, want int\n\n\tin, want = [2]int{1, 2}, 3\n\terr := client.Call(\"Svc.Sum\", in, &got)\n\tif err != nil {\n\t\tt.Errorf(\"Call(%v), err = %v\", in, err)\n\t}\n\tif got != want {\n\t\tt.Errorf(\"Call(%v) = %v, want = %v\", in, got, want)\n\t}\n\n\tin = [2]int{2, 3}\n\terr = client.Notify(\"Svc.Sum\", in)\n\tif err != nil {\n\t\tt.Errorf(\"Notify(%v), err = %v\", in, err)\n\t}\n\n\tin, want = [2]int{3, 4}, 7\n\terr = client.Call(\"Svc.Sum\", in, &got)\n\tif err != nil {\n\t\tt.Errorf(\"Call(%v), err = %v\", in, err)\n\t}\n\tif got != want {\n\t\tt.Errorf(\"Call(%v) = %v, want = %v\", in, got, want)\n\t}\n}\n\ntype ContentTypeHandler string\n\nfunc (h ContentTypeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", string(h))\n\tio.WriteString(w, `{\"jsonrpc\":\"2.0\",\"id\":0,\"result\":8}`)\n}\n\nfunc TestHTTPClientContentType(t *testing.T) {\n\tconst contentType = \"application\/json\"\n\tcases := []struct {\n\t\tcontentType string\n\t\twanterr *jsonrpc2.Error\n\t}{\n\t\t{contentType, nil},\n\t\t{contentType + \"; charset=utf-8\", nil},\n\t\t{contentType + \"; bad\", jsonrpc2.NewError(-32603, \"bad HTTP Content-Type: \"+contentType+\"; bad\")},\n\t\t{contentType + \"rpc\", jsonrpc2.NewError(-32603, \"bad HTTP Content-Type: \"+contentType+\"rpc\")},\n\t}\n\n\tfor _, v := range cases {\n\t\tts := httptest.NewServer(ContentTypeHandler(v.contentType))\n\t\tclient := jsonrpc2.NewHTTPClient(ts.URL)\n\n\t\terr := jsonrpc2.ServerError(client.Call(\"Svc.Sum\", [2]int{3, 4}, nil))\n\t\tif fmt.Sprintf(\"%#v\", err) != fmt.Sprintf(\"%#v\", v.wanterr) {\n\t\t\tt.Errorf(\"%q, err = %#v, want %#v\", v.contentType, err, v.wanterr)\n\t\t}\n\n\t\tclient.Close()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/etcdserver\"\n\tpb \"github.com\/coreos\/etcd\/etcdserver\/etcdserverpb\"\n\t\"github.com\/coreos\/etcd\/mvcc\/backend\"\n\t\"github.com\/coreos\/etcd\/mvcc\/mvccpb\"\n\t\"github.com\/coreos\/etcd\/pkg\/pbutil\"\n\t\"github.com\/coreos\/etcd\/raft\/raftpb\"\n\t\"github.com\/coreos\/etcd\/snap\"\n\t\"github.com\/coreos\/etcd\/store\"\n\t\"github.com\/coreos\/etcd\/wal\"\n\t\"github.com\/coreos\/etcd\/wal\/walpb\"\n)\n\nvar (\n\tmigrateDatadir string\n\tttl time.Duration\n)\n\nfunc init() {\n\tflag.StringVar(&migrateDatadir, \"data-dir\", \"\", \"Path to the data directory\")\n\tflag.DurationVar(&ttl, \"ttl\", time.Hour, \"TTL of event keys (default 1 hour)\")\n\tflag.Parse()\n}\n\nfunc main() {\n\tdbpath := path.Join(migrateDatadir, \"member\", \"snap\", \"db\")\n\n\tbe := backend.New(dbpath, time.Second, 10000)\n\ttx := be.BatchTx()\n\n\tst := store.New(\"\/0\", \"\/1\")\n\texpireTime := time.Now().Add(ttl)\n\n\ttx.Lock()\n\terr := tx.UnsafeForEach([]byte(\"key\"), func(k, v []byte) error {\n\t\ttb := isTombstone(k)\n\n\t\tkv := &mvccpb.KeyValue{}\n\t\tkv.Unmarshal(v)\n\n\t\t\/\/ This is a compact key..\n\t\tif !strings.HasPrefix(string(kv.Key), \"\/\") {\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ fmt.Printf(\"%s %d %d %d\\n\", string(kv.Key), kv.CreateRevision, kv.ModRevision, kv.Lease)\n\t\tttlOpt := store.TTLOptionSet{}\n\t\tif kv.Lease != 0 {\n\t\t\tttlOpt = store.TTLOptionSet{ExpireTime: expireTime}\n\t\t}\n\n\t\tif !tb {\n\t\t\t_, err := st.Set(path.Join(\"1\", string(kv.Key)), false, string(kv.Value), ttlOpt)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tst.Delete(string(kv.Key), false, false)\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\ttx.Unlock()\n\n\ttraverse(st, \"\/\")\n\n\tsnapshotter := snap.New(path.Join(migrateDatadir, \"member\", \"snap\"))\n\traftSnap, err := snapshotter.Load()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tmetadata, hardstate, oldSt := rebuild(migrateDatadir)\n\n\tif err := os.RemoveAll(migrateDatadir); err != nil {\n\t\tpanic(err)\n\t}\n\tif err := os.MkdirAll(path.Join(migrateDatadir, \"member\", \"snap\"), 0700); err != nil {\n\t\tpanic(err)\n\t}\n\twalDir := path.Join(migrateDatadir, \"member\", \"wal\")\n\n\tw, err := wal.Create(walDir, metadata)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = w.SaveSnapshot(walpb.Snapshot{Index: hardstate.Commit, Term: hardstate.Term})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tw.Close()\n\n\tevent, err := oldSt.Get(\"\/0\", true, false)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tq := []*store.NodeExtern{}\n\tq = append(q, event.Node)\n\tfor len(q) > 0 {\n\t\tn := q[0]\n\t\tq = q[1:]\n\t\tv := \"\"\n\t\tif !n.Dir {\n\t\t\tv = *n.Value\n\t\t}\n\t\tif n.Key != \"\/0\" {\n\t\t\tif n.Key == path.Join(\"\/0\", \"version\") {\n\t\t\t\tv = \"2.3.7\"\n\t\t\t}\n\t\t\tif _, err := st.Set(n.Key, n.Dir, v, store.TTLOptionSet{}); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t\tfor _, next := range n.Nodes {\n\t\t\tq = append(q, next)\n\t\t}\n\t}\n\n\tdata, err := st.Save()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\traftSnap.Data = data\n\tif err := snapshotter.SaveSnap(*raftSnap); err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Println(\"Finished.\")\n}\n\nfunc printNode(node *store.NodeExtern) {\n\t\/\/ fmt.Println(node.Key[len(\"\/1\"):])\n\t\/\/ fmt.Printf(\"key:%s ttl:%d mod_index:%d\\n\", node.Key[len(\"\/1\"):], node.TTL, node.ModifiedIndex)\n}\n\nconst (\n\trevBytesLen = 8 + 1 + 8\n\tmarkedRevBytesLen = revBytesLen + 1\n\tmarkBytePosition = markedRevBytesLen - 1\n\tmarkTombstone byte = 't'\n)\n\nfunc isTombstone(b []byte) bool {\n\treturn len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone\n}\n\nfunc traverse(st store.Store, dir string) {\n\te, err := st.Get(dir, true, false)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(e.Node.Nodes) == 0 {\n\t\tst.Delete(dir, true, true)\n\t\treturn\n\t}\n\tfor _, node := range e.Node.Nodes {\n\t\tif !node.Dir {\n\t\t\tprintNode(node)\n\t\t} else {\n\t\t\t\/\/ fmt.Println(node.Key[len(\"\/1\"):])\n\t\t\ttraverse(st, node.Key)\n\t\t}\n\t}\n}\n\nfunc rebuild(datadir string) ([]byte, raftpb.HardState, store.Store) {\n\twaldir := path.Join(datadir, \"member\", \"wal\")\n\tsnapdir := path.Join(datadir, \"member\", \"snap\")\n\n\tss := snap.New(snapdir)\n\tsnapshot, err := ss.Load()\n\tif err != nil && err != snap.ErrNoSnapshot {\n\t\tpanic(err)\n\t}\n\n\tvar walsnap walpb.Snapshot\n\tif snapshot != nil {\n\t\twalsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term\n\t}\n\n\tw, err := wal.OpenForRead(waldir, walsnap)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer w.Close()\n\n\tmeta, hardstate, ents, err := w.ReadAll()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tst := store.New()\n\tif snapshot != nil {\n\t\terr := st.Recovery(snapshot.Data)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tapplier := etcdserver.NewApplierV2(st, nil)\n\tfor _, ent := range ents {\n\t\tif ent.Type != raftpb.EntryNormal {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar raftReq pb.InternalRaftRequest\n\t\tif !pbutil.MaybeUnmarshal(&raftReq, ent.Data) { \/\/ backward compatible\n\t\t\tvar r pb.Request\n\t\t\tpbutil.MustUnmarshal(&r, ent.Data)\n\t\t\tapplyRequest(&r, applier)\n\t\t} else {\n\t\t\tif raftReq.V2 != nil {\n\t\t\t\treq := raftReq.V2\n\t\t\t\tapplyRequest(req, applier)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn meta, hardstate, st\n}\n\nfunc toTTLOptions(r *pb.Request) store.TTLOptionSet {\n\trefresh, _ := pbutil.GetBool(r.Refresh)\n\tttlOptions := store.TTLOptionSet{Refresh: refresh}\n\tif r.Expiration != 0 {\n\t\tttlOptions.ExpireTime = time.Unix(0, r.Expiration)\n\t}\n\treturn ttlOptions\n}\n\nfunc applyRequest(r *pb.Request, applyV2 etcdserver.ApplierV2) {\n\ttoTTLOptions(r)\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tapplyV2.Post(r)\n\tcase \"PUT\":\n\t\tapplyV2.Put(r)\n\tcase \"DELETE\":\n\t\tapplyV2.Delete(r)\n\tcase \"QGET\":\n\t\tapplyV2.QGet(r)\n\tcase \"SYNC\":\n\t\tapplyV2.Sync(r)\n\tdefault:\n\t\tpanic(\"unknown command\")\n\t}\n}\n<commit_msg>make 3 members work<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/etcdserver\"\n\tpb \"github.com\/coreos\/etcd\/etcdserver\/etcdserverpb\"\n\t\"github.com\/coreos\/etcd\/mvcc\/backend\"\n\t\"github.com\/coreos\/etcd\/mvcc\/mvccpb\"\n\t\"github.com\/coreos\/etcd\/pkg\/pbutil\"\n\t\"github.com\/coreos\/etcd\/raft\/raftpb\"\n\t\"github.com\/coreos\/etcd\/snap\"\n\t\"github.com\/coreos\/etcd\/store\"\n\t\"github.com\/coreos\/etcd\/wal\"\n\t\"github.com\/coreos\/etcd\/wal\/walpb\"\n)\n\nvar (\n\tmigrateDatadir string\n\tttl time.Duration\n)\n\nfunc init() {\n\tflag.StringVar(&migrateDatadir, \"data-dir\", \"\", \"Path to the data directory\")\n\tflag.DurationVar(&ttl, \"ttl\", time.Hour, \"TTL of event keys (default 1 hour)\")\n\tflag.Parse()\n}\n\nfunc main() {\n\tdbpath := path.Join(migrateDatadir, \"member\", \"snap\", \"db\")\n\n\tbe := backend.New(dbpath, time.Second, 10000)\n\ttx := be.BatchTx()\n\n\tst := store.New(\"\/0\", \"\/1\")\n\texpireTime := time.Now().Add(ttl)\n\n\ttx.Lock()\n\terr := tx.UnsafeForEach([]byte(\"key\"), func(k, v []byte) error {\n\t\ttb := isTombstone(k)\n\n\t\tkv := &mvccpb.KeyValue{}\n\t\tkv.Unmarshal(v)\n\n\t\t\/\/ This is a compact key..\n\t\tif !strings.HasPrefix(string(kv.Key), \"\/\") {\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ fmt.Printf(\"%s %d %d %d\\n\", string(kv.Key), kv.CreateRevision, kv.ModRevision, kv.Lease)\n\t\tttlOpt := store.TTLOptionSet{}\n\t\tif kv.Lease != 0 {\n\t\t\tttlOpt = store.TTLOptionSet{ExpireTime: expireTime}\n\t\t}\n\n\t\tif !tb {\n\t\t\t_, err := st.Set(path.Join(\"1\", string(kv.Key)), false, string(kv.Value), ttlOpt)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tst.Delete(string(kv.Key), false, false)\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\ttx.Unlock()\n\n\ttraverse(st, \"\/\")\n\n\tsnapshotter := snap.New(path.Join(migrateDatadir, \"member\", \"snap\"))\n\traftSnap, err := snapshotter.Load()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tmetadata, hardstate, oldSt := rebuild(migrateDatadir)\n\n\tif err := os.RemoveAll(migrateDatadir); err != nil {\n\t\tpanic(err)\n\t}\n\tif err := os.MkdirAll(path.Join(migrateDatadir, \"member\", \"snap\"), 0700); err != nil {\n\t\tpanic(err)\n\t}\n\twalDir := path.Join(migrateDatadir, \"member\", \"wal\")\n\n\tw, err := wal.Create(walDir, metadata)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = w.SaveSnapshot(walpb.Snapshot{Index: hardstate.Commit, Term: hardstate.Term})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tw.Close()\n\n\tnodeIDs := []uint64{}\n\tevent, err := oldSt.Get(\"\/0\", true, false)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tq := []*store.NodeExtern{}\n\tq = append(q, event.Node)\n\tfor len(q) > 0 {\n\t\tn := q[0]\n\t\tq = q[1:]\n\t\tv := \"\"\n\t\tif !n.Dir {\n\t\t\tv = *n.Value\n\t\t}\n\t\tif n.Key != \"\/0\" {\n\t\t\tif n.Key == path.Join(\"\/0\", \"version\") {\n\t\t\t\tv = \"2.3.7\"\n\t\t\t}\n\t\t\tif _, err := st.Set(n.Key, n.Dir, v, store.TTLOptionSet{}); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tfields := strings.Split(n.Key, \"\/\")\n\t\t\tif len(fields) == 4 && fields[2] == \"members\" {\n\t\t\t\tnodeID, err := strconv.ParseUint(fields[3], 16, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(\"wrong ID: %s\", fields[3])\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tnodeIDs = append(nodeIDs, nodeID)\n\t\t\t}\n\t\t}\n\t\tfor _, next := range n.Nodes {\n\t\t\tq = append(q, next)\n\t\t}\n\t}\n\n\tdata, err := st.Save()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\traftSnap.Data = data\n\traftSnap.Metadata.Index = hardstate.Commit\n\traftSnap.Metadata.Term = hardstate.Term\n\t\/\/ fill in nodes by iterating all members in \/0 namespace with their IDs\n\traftSnap.Metadata.ConfState.Nodes = nodeIDs\n\tif err := snapshotter.SaveSnap(*raftSnap); err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Println(\"Finished.\")\n}\n\nfunc printNode(node *store.NodeExtern) {\n\t\/\/ fmt.Println(node.Key[len(\"\/1\"):])\n\t\/\/ fmt.Printf(\"key:%s ttl:%d mod_index:%d\\n\", node.Key[len(\"\/1\"):], node.TTL, node.ModifiedIndex)\n}\n\nconst (\n\trevBytesLen = 8 + 1 + 8\n\tmarkedRevBytesLen = revBytesLen + 1\n\tmarkBytePosition = markedRevBytesLen - 1\n\tmarkTombstone byte = 't'\n)\n\nfunc isTombstone(b []byte) bool {\n\treturn len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone\n}\n\nfunc traverse(st store.Store, dir string) {\n\te, err := st.Get(dir, true, false)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(e.Node.Nodes) == 0 {\n\t\tst.Delete(dir, true, true)\n\t\treturn\n\t}\n\tfor _, node := range e.Node.Nodes {\n\t\tif !node.Dir {\n\t\t\tprintNode(node)\n\t\t} else {\n\t\t\t\/\/ fmt.Println(node.Key[len(\"\/1\"):])\n\t\t\ttraverse(st, node.Key)\n\t\t}\n\t}\n}\n\nfunc rebuild(datadir string) ([]byte, raftpb.HardState, store.Store) {\n\twaldir := path.Join(datadir, \"member\", \"wal\")\n\tsnapdir := path.Join(datadir, \"member\", \"snap\")\n\n\tss := snap.New(snapdir)\n\tsnapshot, err := ss.Load()\n\tif err != nil && err != snap.ErrNoSnapshot {\n\t\tpanic(err)\n\t}\n\n\tvar walsnap walpb.Snapshot\n\tif snapshot != nil {\n\t\twalsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term\n\t}\n\n\tw, err := wal.OpenForRead(waldir, walsnap)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer w.Close()\n\n\tmeta, hardstate, ents, err := w.ReadAll()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tst := store.New()\n\tif snapshot != nil {\n\t\terr := st.Recovery(snapshot.Data)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\tapplier := etcdserver.NewApplierV2(st, nil)\n\tfor _, ent := range ents {\n\t\tif ent.Type != raftpb.EntryNormal {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar raftReq pb.InternalRaftRequest\n\t\tif !pbutil.MaybeUnmarshal(&raftReq, ent.Data) { \/\/ backward compatible\n\t\t\tvar r pb.Request\n\t\t\tpbutil.MustUnmarshal(&r, ent.Data)\n\t\t\tapplyRequest(&r, applier)\n\t\t} else {\n\t\t\tif raftReq.V2 != nil {\n\t\t\t\treq := raftReq.V2\n\t\t\t\tapplyRequest(req, applier)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn meta, hardstate, st\n}\n\nfunc toTTLOptions(r *pb.Request) store.TTLOptionSet {\n\trefresh, _ := pbutil.GetBool(r.Refresh)\n\tttlOptions := store.TTLOptionSet{Refresh: refresh}\n\tif r.Expiration != 0 {\n\t\tttlOptions.ExpireTime = time.Unix(0, r.Expiration)\n\t}\n\treturn ttlOptions\n}\n\nfunc applyRequest(r *pb.Request, applyV2 etcdserver.ApplierV2) {\n\ttoTTLOptions(r)\n\tswitch r.Method {\n\tcase \"POST\":\n\t\tapplyV2.Post(r)\n\tcase \"PUT\":\n\t\tapplyV2.Put(r)\n\tcase \"DELETE\":\n\t\tapplyV2.Delete(r)\n\tcase \"QGET\":\n\t\tapplyV2.QGet(r)\n\tcase \"SYNC\":\n\t\tapplyV2.Sync(r)\n\tdefault:\n\t\tpanic(\"unknown command\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package ratelimit\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestRateLimit(t *testing.T) {\n\tif os.Getenv(\"REDIS_URL\") == \"\" {\n\t\tt.Skip(\"skipping redis test since there is no REDIS_URL\")\n\t}\n\n\tif time.Now().Minute() > 58 {\n\t\tt.Log(\"Note: The TestRateLimit test is known to have a bug if run near the top of the hour. Since the rate limiter isn't a moving window, it could end up checking against two different buckets on either side of the top of the hour, so if you see that just re-run it after you've passed the top of the hour.\")\n\t}\n\n\trateLimiter := NewRateLimiter(os.Getenv(\"REDIS_URL\"), fmt.Sprintf(\"worker-test-rl-%d\", os.Getpid()))\n\n\tok, err := rateLimiter.RateLimit(context.TODO(), \"slow\", 2, time.Hour)\n\tif err != nil {\n\t\tt.Fatalf(\"rate limiter error: %v\", err)\n\t}\n\tif !ok {\n\t\tt.Fatal(\"expected to not get rate limited, but was limited\")\n\t}\n\n\tok, err = rateLimiter.RateLimit(context.TODO(), \"slow\", 2, time.Hour)\n\tif err != nil {\n\t\tt.Fatalf(\"rate limiter error: %v\", err)\n\t}\n\tif !ok {\n\t\tt.Fatal(\"expected to not get rate limited, but was limited\")\n\t}\n\n\tok, err = rateLimiter.RateLimit(\"slow\", 2, time.Hour)\n\tif err != nil {\n\t\tt.Fatalf(\"rate limiter error: %v\", err)\n\t}\n\tif ok {\n\t\tt.Fatal(\"expected to get rate limited, but was not limited\")\n\t}\n}\n<commit_msg>Pass in last context.TODO to make test pass<commit_after>package ratelimit\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestRateLimit(t *testing.T) {\n\tif os.Getenv(\"REDIS_URL\") == \"\" {\n\t\tt.Skip(\"skipping redis test since there is no REDIS_URL\")\n\t}\n\n\tif time.Now().Minute() > 58 {\n\t\tt.Log(\"Note: The TestRateLimit test is known to have a bug if run near the top of the hour. Since the rate limiter isn't a moving window, it could end up checking against two different buckets on either side of the top of the hour, so if you see that just re-run it after you've passed the top of the hour.\")\n\t}\n\n\trateLimiter := NewRateLimiter(os.Getenv(\"REDIS_URL\"), fmt.Sprintf(\"worker-test-rl-%d\", os.Getpid()))\n\n\tok, err := rateLimiter.RateLimit(context.TODO(), \"slow\", 2, time.Hour)\n\tif err != nil {\n\t\tt.Fatalf(\"rate limiter error: %v\", err)\n\t}\n\tif !ok {\n\t\tt.Fatal(\"expected to not get rate limited, but was limited\")\n\t}\n\n\tok, err = rateLimiter.RateLimit(context.TODO(), \"slow\", 2, time.Hour)\n\tif err != nil {\n\t\tt.Fatalf(\"rate limiter error: %v\", err)\n\t}\n\tif !ok {\n\t\tt.Fatal(\"expected to not get rate limited, but was limited\")\n\t}\n\n\tok, err = rateLimiter.RateLimit(context.TODO(), \"slow\", 2, time.Hour)\n\tif err != nil {\n\t\tt.Fatalf(\"rate limiter error: %v\", err)\n\t}\n\tif ok {\n\t\tt.Fatal(\"expected to get rate limited, but was not limited\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package alerts\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tutil \"github.com\/MKuranowski\/WarsawGTFS\/realtime\/util\"\n\tgtfsrt \"github.com\/MobilityData\/gtfs-realtime-bindings\/golang\/gtfs\"\n\t\"github.com\/golang\/protobuf\/proto\"\n)\n\n\/\/ Alert contains an internal representation of an alert, which is also marshallable to JSON\ntype Alert struct {\n\tID string `json:\"id\"`\n\tRoutes []string `json:\"routes\"`\n\tEffect string `json:\"effect\"`\n\tLink string `json:\"link\"`\n\tTitle string `json:\"title\"`\n\tBody string `json:\"body\"`\n\tHTMLBody string `json:\"htmlbody\"`\n}\n\n\/\/ alertFromRssItem extracts basic data from an RssItem and puts them into an Alert\nfunc alertFromRssItem(r *rssItem) (a *Alert, err error) {\n\ta = &Alert{}\n\n\t\/\/ Extract the ID\n\tif idMatch := regexID.FindStringIndex(r.GUID); idMatch != nil {\n\t\t\/\/ Generate a prefix for the ID\n\t\tvar idPrefix string\n\t\tif r.Type == \"REDUCED_SERVICE\" {\n\t\t\tidPrefix = \"A\/IMPEDIMENT\/\"\n\t\t} else {\n\t\t\tidPrefix = \"A\/CHANGE\/\"\n\t\t}\n\n\t\ta.ID = idPrefix + r.GUID[idMatch[0]+3:idMatch[1]]\n\t} else {\n\t\terr = fmt.Errorf(\"Unable to find alert ID in GUID %q\", r.GUID)\n\t\treturn\n\t}\n\n\t\/\/ Extract other data\n\ta.Effect = r.Type\n\ta.Link = htmlCleaner.Sanitize(r.Link)\n\ta.Title = htmlCleaner.Sanitize(r.Description)\n\n\t\/\/ Extract affected routes from the title\n\tif strings.Contains(r.Title, \":\") {\n\t\troutesString := strings.SplitN(r.Title, \":\", 2)[1]\n\t\ta.Routes = regexRoute.FindAllString(routesString, -1)\n\t}\n\n\treturn\n}\n\n\/\/ makeEntitySelector creates a GTFS-RT []*EntitySelector that \"select\" applicable routes\nfunc (a *Alert) makeEntitySelector() []*gtfsrt.EntitySelector {\n\tvar entities []*gtfsrt.EntitySelector\n\n\tfor _, route := range a.Routes {\n\t\tentities = append(entities, >fsrt.EntitySelector{RouteId: &route})\n\t}\n\n\treturn entities\n}\n\n\/\/ makeAlertEffect creates a GTFS-RT *Alert_Effect that selects the GTFS-RT Effect attribute\nfunc (a *Alert) makeAlertEffect() *gtfsrt.Alert_Effect {\n\tvar effect gtfsrt.Alert_Effect\n\tif a.Effect == \"REDUCED_SERVICE\" {\n\t\teffect = gtfsrt.Alert_REDUCED_SERVICE\n\t} else {\n\t\teffect = gtfsrt.Alert_OTHER_EFFECT\n\t}\n\treturn &effect\n}\n\n\/\/ AsProto returns the Alert marshalled to a GTFS-RT FeedEntity\nfunc (a *Alert) AsProto() *gtfsrt.FeedEntity {\n\treturn >fsrt.FeedEntity{\n\t\tId: &a.ID,\n\t\tAlert: >fsrt.Alert{\n\t\t\tInformedEntity: a.makeEntitySelector(),\n\t\t\tEffect: a.makeAlertEffect(),\n\t\t\tUrl: util.MakeTranslatedString(a.Link),\n\t\t\tHeaderText: util.MakeTranslatedString(a.Title),\n\t\t\tDescriptionText: util.MakeTranslatedString(a.Body),\n\t\t},\n\t}\n}\n\n\/\/ LoadExternal processes data located on the website saved in a.Link\nfunc (a *Alert) LoadExternal(client exclusiveHTTPClient, routeMap map[string]sort.StringSlice) (err error) {\n\tdoc, err := getWebsite(client, a.Link, a.ID)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Process flags\n\tif len(a.Routes) <= 0 {\n\t\tflags := getAlertFlags(doc, a.Effect)\n\t\tfor _, flag := range flags {\n\t\t\tswitch flag {\n\t\t\tcase \"metro\":\n\t\t\t\ta.Routes = append(a.Routes, routeMap[\"1\"]...)\n\t\t\tcase \"tramwaje\":\n\t\t\t\ta.Routes = append(a.Routes, routeMap[\"0\"]...)\n\t\t\tcase \"kolej\":\n\t\t\tcase \"skm\":\n\t\t\t\ta.Routes = append(a.Routes, routeMap[\"2\"]...)\n\t\t\tcase \"autobusy\":\n\t\t\t\ta.Routes = append(a.Routes, routeMap[\"3\"]...)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Sanitize the document to make a usable htmlBody\n\ta.HTMLBody, err = getAlertDesc(doc, a.Effect)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Strip the cleaned HTMLBody to make a plaintext description\n\ta.Body, err = getAlertPlaintext(a.HTMLBody)\n\treturn\n}\n\n\/\/ AlertContainer is a container for multiple alerts, marshallable to JSON\ntype AlertContainer struct {\n\tTimestamp time.Time `json:\"-\"`\n\tTime string `json:\"time\"`\n\tAlerts []*Alert `json:\"alerts\"`\n}\n\n\/\/ AsProto returns this list of alerts marshalled into a GTFS-RT FeedMessage\nfunc (ac *AlertContainer) AsProto() *gtfsrt.FeedMessage {\n\tmsg := util.MakeFeedMessage(ac.Timestamp)\n\tfor _, alert := range ac.Alerts {\n\t\tmsg.Entity = append(msg.Entity, alert.AsProto())\n\t}\n\treturn msg\n}\n\n\/\/ LoadExternal asynchronously calls LoadExternal on all its alerts\nfunc (ac *AlertContainer) LoadExternal(client exclusiveHTTPClient, routeMap map[string]sort.StringSlice, throwErrors bool) error {\n\t\/\/ Make synchronization primitives\n\twg := &sync.WaitGroup{}\n\terrCh := make(chan error, len(ac.Alerts)+1)\n\n\t\/\/ Make a goroutine for all LoadExternal\n\tfor _, a := range ac.Alerts {\n\t\t\/\/ Log & update the waitgroup\n\t\tlog.Printf(\"Fetching alert desc from %v\\n\", a.Link)\n\t\twg.Add(1)\n\n\t\t\/\/ Call a goroutine to load external data\n\t\tgo func(a *Alert) {\n\t\t\tdefer wg.Done()\n\t\t\terr := a.LoadExternal(client, routeMap)\n\n\t\t\t\/\/ errors are only passed through if requested and if not nil\n\t\t\tif throwErrors && err != nil {\n\t\t\t\terrCh <- err\n\t\t\t}\n\t\t}(a)\n\t}\n\n\t\/\/ Wait until all goroutines finished\n\twg.Wait()\n\n\t\/\/ Mark the end of possible errors\n\tclose(errCh)\n\n\t\/\/ Check if any error was asked to be re-thrown\n\tfor err := range errCh {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Filter removes all alerts without associated routes\nfunc (ac *AlertContainer) Filter() {\n\tfiltered := ac.Alerts[:0]\n\t\/\/ Filter alerts\n\tfor _, a := range ac.Alerts {\n\t\tif len(a.Routes) > 0 {\n\t\t\tfiltered = append(filtered, a)\n\t\t}\n\t}\n\t\/\/ Garbage collect deleted alerts\n\tfor i := len(filtered); i < len(ac.Alerts); i++ {\n\t\tac.Alerts[i] = nil\n\t}\n\t\/\/ Set the filtered slice\n\tac.Alerts = filtered\n}\n\n\/\/ SaveJSON marshalls the container into a json file at the given location\nfunc (ac *AlertContainer) SaveJSON(target string) (err error) {\n\t\/\/ Open target file\n\tf, err := os.Create(target)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer f.Close()\n\n\t\/\/ Marshall JSON\n\tb, err := json.MarshalIndent(ac, \"\", \" \")\n\tif err != nil {\n\t\treturn\n\t}\n\tf.Write(b)\n\n\treturn\n}\n\n\/\/ SavePB marshalls the container into a GTFS-Realtime protocol buffer file\nfunc (ac *AlertContainer) SavePB(target string, humanReadable bool) (err error) {\n\t\/\/ Open target file\n\tf, err := os.Create(target)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer f.Close()\n\n\t\/\/ Marshall to GTFS-RT\n\tif humanReadable {\n\t\t\/\/ Human-readable format\n\t\terr = proto.MarshalText(f, ac.AsProto())\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t} else {\n\t\t\/\/ Binary format\n\t\tvar b []byte\n\t\tb, err = proto.Marshal(ac.AsProto())\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tf.Write(b)\n\t}\n\n\treturn\n}\n<commit_msg>alerts: fix invalid entity selectors in gtfs-realtime format<commit_after>package alerts\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tutil \"github.com\/MKuranowski\/WarsawGTFS\/realtime\/util\"\n\tgtfsrt \"github.com\/MobilityData\/gtfs-realtime-bindings\/golang\/gtfs\"\n\t\"github.com\/golang\/protobuf\/proto\"\n)\n\n\/\/ Alert contains an internal representation of an alert, which is also marshallable to JSON\ntype Alert struct {\n\tID string `json:\"id\"`\n\tRoutes []string `json:\"routes\"`\n\tEffect string `json:\"effect\"`\n\tLink string `json:\"link\"`\n\tTitle string `json:\"title\"`\n\tBody string `json:\"body\"`\n\tHTMLBody string `json:\"htmlbody\"`\n}\n\n\/\/ alertFromRssItem extracts basic data from an RssItem and puts them into an Alert\nfunc alertFromRssItem(r *rssItem) (a *Alert, err error) {\n\ta = &Alert{}\n\n\t\/\/ Extract the ID\n\tif idMatch := regexID.FindStringIndex(r.GUID); idMatch != nil {\n\t\t\/\/ Generate a prefix for the ID\n\t\tvar idPrefix string\n\t\tif r.Type == \"REDUCED_SERVICE\" {\n\t\t\tidPrefix = \"A\/IMPEDIMENT\/\"\n\t\t} else {\n\t\t\tidPrefix = \"A\/CHANGE\/\"\n\t\t}\n\n\t\ta.ID = idPrefix + r.GUID[idMatch[0]+3:idMatch[1]]\n\t} else {\n\t\terr = fmt.Errorf(\"Unable to find alert ID in GUID %q\", r.GUID)\n\t\treturn\n\t}\n\n\t\/\/ Extract other data\n\ta.Effect = r.Type\n\ta.Link = htmlCleaner.Sanitize(r.Link)\n\ta.Title = htmlCleaner.Sanitize(r.Description)\n\n\t\/\/ Extract affected routes from the title\n\tif strings.Contains(r.Title, \":\") {\n\t\troutesString := strings.SplitN(r.Title, \":\", 2)[1]\n\t\ta.Routes = regexRoute.FindAllString(routesString, -1)\n\t}\n\n\treturn\n}\n\n\/\/ makeEntitySelector creates a GTFS-RT []*EntitySelector that \"select\" applicable routes\nfunc (a *Alert) makeEntitySelector() []*gtfsrt.EntitySelector {\n\tvar entities []*gtfsrt.EntitySelector\n\n\tfor i := range a.Routes {\n\t\tentities = append(entities, >fsrt.EntitySelector{RouteId: &a.Routes[i]})\n\t}\n\n\treturn entities\n}\n\n\/\/ makeAlertEffect creates a GTFS-RT *Alert_Effect that selects the GTFS-RT Effect attribute\nfunc (a *Alert) makeAlertEffect() *gtfsrt.Alert_Effect {\n\tvar effect gtfsrt.Alert_Effect\n\tif a.Effect == \"REDUCED_SERVICE\" {\n\t\teffect = gtfsrt.Alert_REDUCED_SERVICE\n\t} else {\n\t\teffect = gtfsrt.Alert_OTHER_EFFECT\n\t}\n\treturn &effect\n}\n\n\/\/ AsProto returns the Alert marshalled to a GTFS-RT FeedEntity\nfunc (a *Alert) AsProto() *gtfsrt.FeedEntity {\n\treturn >fsrt.FeedEntity{\n\t\tId: &a.ID,\n\t\tAlert: >fsrt.Alert{\n\t\t\tInformedEntity: a.makeEntitySelector(),\n\t\t\tEffect: a.makeAlertEffect(),\n\t\t\tUrl: util.MakeTranslatedString(a.Link),\n\t\t\tHeaderText: util.MakeTranslatedString(a.Title),\n\t\t\tDescriptionText: util.MakeTranslatedString(a.Body),\n\t\t},\n\t}\n}\n\n\/\/ LoadExternal processes data located on the website saved in a.Link\nfunc (a *Alert) LoadExternal(client exclusiveHTTPClient, routeMap map[string]sort.StringSlice) (err error) {\n\tdoc, err := getWebsite(client, a.Link, a.ID)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Process flags\n\tif len(a.Routes) <= 0 {\n\t\tflags := getAlertFlags(doc, a.Effect)\n\t\tfor _, flag := range flags {\n\t\t\tswitch flag {\n\t\t\tcase \"metro\":\n\t\t\t\ta.Routes = append(a.Routes, routeMap[\"1\"]...)\n\t\t\tcase \"tramwaje\":\n\t\t\t\ta.Routes = append(a.Routes, routeMap[\"0\"]...)\n\t\t\tcase \"kolej\":\n\t\t\tcase \"skm\":\n\t\t\t\ta.Routes = append(a.Routes, routeMap[\"2\"]...)\n\t\t\tcase \"autobusy\":\n\t\t\t\ta.Routes = append(a.Routes, routeMap[\"3\"]...)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Sanitize the document to make a usable htmlBody\n\ta.HTMLBody, err = getAlertDesc(doc, a.Effect)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Strip the cleaned HTMLBody to make a plaintext description\n\ta.Body, err = getAlertPlaintext(a.HTMLBody)\n\treturn\n}\n\n\/\/ AlertContainer is a container for multiple alerts, marshallable to JSON\ntype AlertContainer struct {\n\tTimestamp time.Time `json:\"-\"`\n\tTime string `json:\"time\"`\n\tAlerts []*Alert `json:\"alerts\"`\n}\n\n\/\/ AsProto returns this list of alerts marshalled into a GTFS-RT FeedMessage\nfunc (ac *AlertContainer) AsProto() *gtfsrt.FeedMessage {\n\tmsg := util.MakeFeedMessage(ac.Timestamp)\n\tfor _, alert := range ac.Alerts {\n\t\tmsg.Entity = append(msg.Entity, alert.AsProto())\n\t}\n\treturn msg\n}\n\n\/\/ LoadExternal asynchronously calls LoadExternal on all its alerts\nfunc (ac *AlertContainer) LoadExternal(client exclusiveHTTPClient, routeMap map[string]sort.StringSlice, throwErrors bool) error {\n\t\/\/ Make synchronization primitives\n\twg := &sync.WaitGroup{}\n\terrCh := make(chan error, len(ac.Alerts)+1)\n\n\t\/\/ Make a goroutine for all LoadExternal\n\tfor _, a := range ac.Alerts {\n\t\t\/\/ Log & update the waitgroup\n\t\tlog.Printf(\"Fetching alert desc from %v\\n\", a.Link)\n\t\twg.Add(1)\n\n\t\t\/\/ Call a goroutine to load external data\n\t\tgo func(a *Alert) {\n\t\t\tdefer wg.Done()\n\t\t\terr := a.LoadExternal(client, routeMap)\n\n\t\t\t\/\/ errors are only passed through if requested and if not nil\n\t\t\tif throwErrors && err != nil {\n\t\t\t\terrCh <- err\n\t\t\t}\n\t\t}(a)\n\t}\n\n\t\/\/ Wait until all goroutines finished\n\twg.Wait()\n\n\t\/\/ Mark the end of possible errors\n\tclose(errCh)\n\n\t\/\/ Check if any error was asked to be re-thrown\n\tfor err := range errCh {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Filter removes all alerts without associated routes\nfunc (ac *AlertContainer) Filter() {\n\tfiltered := ac.Alerts[:0]\n\t\/\/ Filter alerts\n\tfor _, a := range ac.Alerts {\n\t\tif len(a.Routes) > 0 {\n\t\t\tfiltered = append(filtered, a)\n\t\t}\n\t}\n\t\/\/ Garbage collect deleted alerts\n\tfor i := len(filtered); i < len(ac.Alerts); i++ {\n\t\tac.Alerts[i] = nil\n\t}\n\t\/\/ Set the filtered slice\n\tac.Alerts = filtered\n}\n\n\/\/ SaveJSON marshalls the container into a json file at the given location\nfunc (ac *AlertContainer) SaveJSON(target string) (err error) {\n\t\/\/ Open target file\n\tf, err := os.Create(target)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer f.Close()\n\n\t\/\/ Marshall JSON\n\tb, err := json.MarshalIndent(ac, \"\", \" \")\n\tif err != nil {\n\t\treturn\n\t}\n\tf.Write(b)\n\n\treturn\n}\n\n\/\/ SavePB marshalls the container into a GTFS-Realtime protocol buffer file\nfunc (ac *AlertContainer) SavePB(target string, humanReadable bool) (err error) {\n\t\/\/ Open target file\n\tf, err := os.Create(target)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer f.Close()\n\n\t\/\/ Marshall to GTFS-RT\n\tif humanReadable {\n\t\t\/\/ Human-readable format\n\t\terr = proto.MarshalText(f, ac.AsProto())\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t} else {\n\t\t\/\/ Binary format\n\t\tvar b []byte\n\t\tb, err = proto.Marshal(ac.AsProto())\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tf.Write(b)\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2015 Couchbase, Inc.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the\n\/\/ License. You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an \"AS\n\/\/ IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n\/\/ express or implied. See the License for the specific language\n\/\/ governing permissions and limitations under the License.\n\npackage rebalance\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\n\tlog \"github.com\/couchbase\/clog\"\n\n\t\"github.com\/couchbaselabs\/cbgt\"\n\t\"github.com\/couchbaselabs\/cbgt\/rest\/monitor\"\n)\n\nfunc TestRebalance(t *testing.T) {\n\ttestDir, _ := ioutil.TempDir(\".\/tmp\", \"test\")\n\tdefer os.RemoveAll(testDir)\n\n\thttpGets := 0\n\tmonitor.HttpGet = func(url string) (resp *http.Response, err error) {\n\t\thttpGets++\n\n\t\treturn &http.Response{\n\t\t\tStatusCode: 200,\n\t\t\tBody: ioutil.NopCloser(bytes.NewBuffer([]byte(\"{}\"))),\n\t\t}, nil\n\t}\n\tdefer func() {\n\t\tmonitor.HttpGet = http.Get\n\t}()\n\n\ttests := []struct {\n\t\tlabel string\n\t\tops string \/\/ Space separated \"+a\", \"-x\".\n\t\tparams map[string]string\n\t\texpNodes string \/\/ Space separated list of nodes (\"a\"...\"v\").\n\t\texpIndexes string \/\/ Space separated list of indxes (\"x\"...\"z\").\n\t\texpStartErr bool\n\t}{\n\t\t{\"1st node\",\n\t\t\t\"+a\", nil,\n\t\t\t\"a\",\n\t\t\t\"\",\n\t\t\ttrue,\n\t\t},\n\t\t{\"add 1st index x\",\n\t\t\t\"+x\", nil,\n\t\t\t\"a\",\n\t\t\t\"x\",\n\t\t\tfalse,\n\t\t},\n\t\t{\"add 2nd node b\",\n\t\t\t\"+b\", nil,\n\t\t\t\"a b\",\n\t\t\t\"x\",\n\t\t\tfalse,\n\t\t},\n\t\t{\"remove node b\",\n\t\t\t\"-b\", nil,\n\t\t\t\"a\",\n\t\t\t\"x\",\n\t\t\tfalse,\n\t\t},\n\t}\n\n\tcfg := cbgt.NewCfgMem()\n\n\tmgrs := map[string]*cbgt.Manager{}\n\n\tvar mgr0 *cbgt.Manager\n\n\tserver := \".\"\n\n\twaitUntilEmptyCfgEvents := func(ch chan cbgt.CfgEvent) {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ch:\n\t\t\tdefault:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tcfgEventsNodeDefsWanted := make(chan cbgt.CfgEvent, 100)\n\tcfg.Subscribe(cbgt.NODE_DEFS_WANTED, cfgEventsNodeDefsWanted)\n\n\twaitUntilEmptyCfgEventsNodeDefsWanted := func() {\n\t\twaitUntilEmptyCfgEvents(cfgEventsNodeDefsWanted)\n\t}\n\n\tcfgEventsIndexDefs := make(chan cbgt.CfgEvent, 100)\n\tcfg.Subscribe(cbgt.INDEX_DEFS_KEY, cfgEventsIndexDefs)\n\n\twaitUntilEmptyCfgEventsIndexDefs := func() {\n\t\twaitUntilEmptyCfgEvents(cfgEventsIndexDefs)\n\t}\n\n\tfor testi, test := range tests {\n\t\tlog.Printf(\"testi: %d, label: %q\", testi, test.label)\n\n\t\tfor opi, op := range strings.Split(test.ops, \" \") {\n\t\t\tlog.Printf(\" opi: %d, op: %s\", opi, op)\n\n\t\t\tname := op[1:2]\n\n\t\t\tisIndexOp := name >= \"x\"\n\t\t\tif isIndexOp {\n\t\t\t\tindexName := name\n\t\t\t\tlog.Printf(\" indexOp: %s, indexName: %s\", op[0:1], indexName)\n\n\t\t\t\ttestCreateIndex(t, mgr0, indexName, test.params,\n\t\t\t\t\twaitUntilEmptyCfgEventsIndexDefs)\n\t\t\t} else { \/\/ It's a node op.\n\t\t\t\tnodeName := name\n\t\t\t\tlog.Printf(\" nodeOp: %s, nodeName: %s\", op[0:1], nodeName)\n\n\t\t\t\tregister := \"wanted\"\n\t\t\t\tif op[0:1] == \"-\" {\n\t\t\t\t\tregister = \"unknown\"\n\t\t\t\t}\n\t\t\t\tif test.params[\"register\"] != \"\" {\n\t\t\t\t\tregister = test.params[\"register\"]\n\t\t\t\t}\n\t\t\t\tif test.params[nodeName+\".register\"] != \"\" {\n\t\t\t\t\tregister = test.params[nodeName+\".register\"]\n\t\t\t\t}\n\n\t\t\t\tif mgrs[nodeName] != nil {\n\t\t\t\t\tmgrs[nodeName].Stop()\n\t\t\t\t\tdelete(mgrs, nodeName)\n\t\t\t\t}\n\n\t\t\t\twaitUntilEmptyCfgEventsNodeDefsWanted()\n\n\t\t\t\tmgr, err := startNodeManager(testDir, cfg,\n\t\t\t\t\tname, register, test.params, server)\n\t\t\t\tif err != nil || mgr == nil {\n\t\t\t\t\tt.Errorf(\"expected no err, got: %#v\", err)\n\t\t\t\t}\n\t\t\t\tif mgr0 == nil {\n\t\t\t\t\tmgr0 = mgr\n\t\t\t\t}\n\n\t\t\t\tif register != \"unknown\" {\n\t\t\t\t\tmgrs[nodeName] = mgr\n\t\t\t\t}\n\n\t\t\t\tmgr.Kick(\"kick\")\n\n\t\t\t\twaitUntilEmptyCfgEventsNodeDefsWanted()\n\t\t\t}\n\t\t}\n\n\t\tr, err := StartRebalance(cbgt.VERSION, cfg, \".\", nil)\n\t\tif (test.expStartErr && err == nil) ||\n\t\t\t(!test.expStartErr && err != nil) {\n\t\t\tt.Errorf(\"testi: %d, label: %q,\"+\n\t\t\t\t\" expStartErr: %v, but got: %v\",\n\t\t\t\ttesti, test.label,\n\t\t\t\ttest.expStartErr, err)\n\t\t}\n\n\t\tif err != nil || r == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tprogressCh := r.ProgressCh()\n\t\tif progressCh == nil {\n\t\t\tt.Errorf(\"expected progressCh\")\n\t\t}\n\n\t\terr = nil\n\t\tfor progress := range progressCh {\n\t\t\tif progress.Error != nil {\n\t\t\t\terr = progress.Error\n\n\t\t\t\tlog.Printf(\"saw progress error: %#v\\n\", progress)\n\t\t\t}\n\t\t}\n\n\t\tr.Stop()\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"expected no end err, got: %v\", err)\n\t\t}\n\n\t\tendIndexDefs, endNodeDefs, endPlanPIndexes, endPlanPIndexesCAS, err :=\n\t\t\tcbgt.PlannerGetPlan(cfg, cbgt.VERSION, \"\")\n\t\tif err != nil ||\n\t\t\tendIndexDefs == nil ||\n\t\t\tendNodeDefs == nil ||\n\t\t\tendPlanPIndexes == nil ||\n\t\t\tendPlanPIndexesCAS == 0 {\n\t\t\tt.Errorf(\"expected no err, got: %#v\", err)\n\t\t}\n\n\t\texpNodes := strings.Split(test.expNodes, \" \")\n\t\tif len(expNodes) != len(endNodeDefs.NodeDefs) {\n\t\t\tt.Errorf(\"len(expNodes) != len(endNodeDefs.NodeDefs), \"+\n\t\t\t\t\" expNodes: %#v, endNodeDefs.NodeDefs: %#v\",\n\t\t\t\texpNodes, endNodeDefs.NodeDefs)\n\t\t}\n\n\t\tfor _, expNode := range expNodes {\n\t\t\tif endNodeDefs.NodeDefs[expNode] == nil {\n\t\t\t\tt.Errorf(\"didn't find expNode: %s,\"+\n\t\t\t\t\t\" expNodes: %#v, endNodeDefs.NodeDefs: %#v\",\n\t\t\t\t\texpNode, expNodes, endNodeDefs.NodeDefs)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc testCreateIndex(t *testing.T,\n\tmgr *cbgt.Manager,\n\tindexName string,\n\tparams map[string]string,\n\twaitUntilEmptyCfgEventsIndexDefs func()) {\n\tsourceType := \"primary\"\n\tif params[\"sourceType\"] != \"\" {\n\t\tsourceType = params[\"sourceType\"]\n\t}\n\tif params[indexName+\".sourceType\"] != \"\" {\n\t\tsourceType = params[indexName+\".sourceType\"]\n\t}\n\n\tsourceName := \"default\"\n\tif params[\"sourceName\"] != \"\" {\n\t\tsourceName = params[\"sourceName\"]\n\t}\n\tif params[indexName+\".sourceName\"] != \"\" {\n\t\tsourceName = params[indexName+\".sourceName\"]\n\t}\n\n\tsourceUUID := \"\"\n\tif params[\"sourceUUID\"] != \"\" {\n\t\tsourceUUID = params[\"sourceUUID\"]\n\t}\n\tif params[indexName+\".sourceUUID\"] != \"\" {\n\t\tsourceUUID = params[indexName+\".sourceUUID\"]\n\t}\n\n\tsourceParams := `{\"numPartitions\":4}`\n\tif params[\"sourceParams\"] != \"\" {\n\t\tsourceParams = params[\"sourceParams\"]\n\t}\n\tif params[indexName+\".sourceParams\"] != \"\" {\n\t\tsourceParams = params[indexName+\".sourceParams\"]\n\t}\n\n\tindexType := \"blackhole\"\n\tif params[\"indexType\"] != \"\" {\n\t\tindexType = params[\"indexType\"]\n\t}\n\tif params[indexName+\".indexType\"] != \"\" {\n\t\tindexType = params[indexName+\".indexType\"]\n\t}\n\n\tindexParams := \"\"\n\tif params[\"indexParams\"] != \"\" {\n\t\tindexParams = params[\"indexParams\"]\n\t}\n\tif params[indexName+\".indexParams\"] != \"\" {\n\t\tindexParams = params[indexName+\".indexParams\"]\n\t}\n\n\tprevIndexUUID := \"\"\n\tif params[\"prevIndexUUID\"] != \"\" {\n\t\tprevIndexUUID = params[\"prevIndexUUID\"]\n\t}\n\tif params[indexName+\".prevIndexUUID\"] != \"\" {\n\t\tprevIndexUUID = params[indexName+\".prevIndexUUID\"]\n\t}\n\n\tplanParams := cbgt.PlanParams{\n\t\tMaxPartitionsPerPIndex: 1,\n\t}\n\n\twaitUntilEmptyCfgEventsIndexDefs()\n\n\terr := mgr.CreateIndex(\n\t\tsourceType, sourceName, sourceUUID, sourceParams,\n\t\tindexType, indexName, indexParams,\n\t\tplanParams,\n\t\tprevIndexUUID)\n\tif err != nil {\n\t\tt.Errorf(\"expected no err, got: %#v\", err)\n\t}\n\n\twaitUntilEmptyCfgEventsIndexDefs()\n}\n\nfunc startNodeManager(testDir string, cfg cbgt.Cfg, node, register string,\n\tparams map[string]string, server string) (\n\tmgr *cbgt.Manager, err error) {\n\tuuid := node\n\tif params[\"uuid\"] != \"\" {\n\t\tuuid = params[\"uuid\"]\n\t}\n\tif params[node+\".uuid\"] != \"\" {\n\t\tuuid = params[node+\".uuid\"]\n\t}\n\n\t\/\/ No planner in tags because mcp provides the planner.\n\ttags := []string{\"feed\", \"pindex\", \"janitor\", \"queryer\"}\n\tif params[\"tags\"] != \"\" {\n\t\ttags = strings.Split(params[\"tags\"], \",\")\n\t}\n\tif params[node+\".tags\"] != \"\" {\n\t\ttags = strings.Split(params[node+\".tags\"], \",\")\n\t}\n\n\tcontainer := \"\"\n\tif params[\"container\"] != \"\" {\n\t\tcontainer = params[\"container\"]\n\t}\n\tif params[node+\".container\"] != \"\" {\n\t\tcontainer = params[node+\".container\"]\n\t}\n\n\tweight := 1\n\tif params[\"weight\"] != \"\" {\n\t\tweight, err = strconv.Atoi(params[\"weight\"])\n\t}\n\tif params[node+\".weight\"] != \"\" {\n\t\tweight, err = strconv.Atoi(params[node+\".weight\"])\n\t}\n\tif weight < 1 {\n\t\tweight = 1\n\t}\n\n\textras := \"\"\n\n\tbindHttp := node\n\n\tdataDir := testDir + string(os.PathSeparator) + node\n\n\tos.MkdirAll(dataDir, 0700)\n\n\tmeh := cbgt.ManagerEventHandlers(nil)\n\n\tmgr = cbgt.NewManager(cbgt.VERSION, cfg, uuid,\n\t\ttags, container, weight, extras,\n\t\tbindHttp, dataDir, server, meh)\n\n\terr = mgr.Start(register)\n\tif err != nil {\n\t\tmgr.Stop()\n\n\t\treturn nil, err\n\t}\n\n\treturn mgr, nil\n}\n<commit_msg>test that currStates has all the indexes<commit_after>\/\/ Copyright (c) 2015 Couchbase, Inc.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the\n\/\/ License. You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing,\n\/\/ software distributed under the License is distributed on an \"AS\n\/\/ IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n\/\/ express or implied. See the License for the specific language\n\/\/ governing permissions and limitations under the License.\n\npackage rebalance\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\n\tlog \"github.com\/couchbase\/clog\"\n\n\t\"github.com\/couchbaselabs\/cbgt\"\n\t\"github.com\/couchbaselabs\/cbgt\/rest\/monitor\"\n)\n\nfunc TestRebalance(t *testing.T) {\n\ttestDir, _ := ioutil.TempDir(\".\/tmp\", \"test\")\n\tdefer os.RemoveAll(testDir)\n\n\thttpGets := 0\n\tmonitor.HttpGet = func(url string) (resp *http.Response, err error) {\n\t\thttpGets++\n\n\t\treturn &http.Response{\n\t\t\tStatusCode: 200,\n\t\t\tBody: ioutil.NopCloser(bytes.NewBuffer([]byte(\"{}\"))),\n\t\t}, nil\n\t}\n\tdefer func() {\n\t\tmonitor.HttpGet = http.Get\n\t}()\n\n\ttests := []struct {\n\t\tlabel string\n\t\tops string \/\/ Space separated \"+a\", \"-x\".\n\t\tparams map[string]string\n\t\texpNodes string \/\/ Space separated list of nodes (\"a\"...\"v\").\n\t\texpIndexes string \/\/ Space separated list of indxes (\"x\"...\"z\").\n\t\texpStartErr bool\n\t}{\n\t\t{\"1st node\",\n\t\t\t\"+a\", nil,\n\t\t\t\"a\",\n\t\t\t\"\",\n\t\t\ttrue,\n\t\t},\n\t\t{\"add 1st index x\",\n\t\t\t\"+x\", nil,\n\t\t\t\"a\",\n\t\t\t\"x\",\n\t\t\tfalse,\n\t\t},\n\t\t{\"add 2nd node b\",\n\t\t\t\"+b\", nil,\n\t\t\t\"a b\",\n\t\t\t\"x\",\n\t\t\tfalse,\n\t\t},\n\t\t{\"remove node b\",\n\t\t\t\"-b\", nil,\n\t\t\t\"a\",\n\t\t\t\"x\",\n\t\t\tfalse,\n\t\t},\n\t}\n\n\tcfg := cbgt.NewCfgMem()\n\n\tmgrs := map[string]*cbgt.Manager{}\n\n\tvar mgr0 *cbgt.Manager\n\n\tserver := \".\"\n\n\twaitUntilEmptyCfgEvents := func(ch chan cbgt.CfgEvent) {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ch:\n\t\t\tdefault:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tcfgEventsNodeDefsWanted := make(chan cbgt.CfgEvent, 100)\n\tcfg.Subscribe(cbgt.NODE_DEFS_WANTED, cfgEventsNodeDefsWanted)\n\n\twaitUntilEmptyCfgEventsNodeDefsWanted := func() {\n\t\twaitUntilEmptyCfgEvents(cfgEventsNodeDefsWanted)\n\t}\n\n\tcfgEventsIndexDefs := make(chan cbgt.CfgEvent, 100)\n\tcfg.Subscribe(cbgt.INDEX_DEFS_KEY, cfgEventsIndexDefs)\n\n\twaitUntilEmptyCfgEventsIndexDefs := func() {\n\t\twaitUntilEmptyCfgEvents(cfgEventsIndexDefs)\n\t}\n\n\tfor testi, test := range tests {\n\t\tlog.Printf(\"testi: %d, label: %q\", testi, test.label)\n\n\t\tfor opi, op := range strings.Split(test.ops, \" \") {\n\t\t\tlog.Printf(\" opi: %d, op: %s\", opi, op)\n\n\t\t\tname := op[1:2]\n\n\t\t\tisIndexOp := name >= \"x\"\n\t\t\tif isIndexOp {\n\t\t\t\tindexName := name\n\t\t\t\tlog.Printf(\" indexOp: %s, indexName: %s\", op[0:1], indexName)\n\n\t\t\t\ttestCreateIndex(t, mgr0, indexName, test.params,\n\t\t\t\t\twaitUntilEmptyCfgEventsIndexDefs)\n\t\t\t} else { \/\/ It's a node op.\n\t\t\t\tnodeName := name\n\t\t\t\tlog.Printf(\" nodeOp: %s, nodeName: %s\", op[0:1], nodeName)\n\n\t\t\t\tregister := \"wanted\"\n\t\t\t\tif op[0:1] == \"-\" {\n\t\t\t\t\tregister = \"unknown\"\n\t\t\t\t}\n\t\t\t\tif test.params[\"register\"] != \"\" {\n\t\t\t\t\tregister = test.params[\"register\"]\n\t\t\t\t}\n\t\t\t\tif test.params[nodeName+\".register\"] != \"\" {\n\t\t\t\t\tregister = test.params[nodeName+\".register\"]\n\t\t\t\t}\n\n\t\t\t\tif mgrs[nodeName] != nil {\n\t\t\t\t\tmgrs[nodeName].Stop()\n\t\t\t\t\tdelete(mgrs, nodeName)\n\t\t\t\t}\n\n\t\t\t\twaitUntilEmptyCfgEventsNodeDefsWanted()\n\n\t\t\t\tmgr, err := startNodeManager(testDir, cfg,\n\t\t\t\t\tname, register, test.params, server)\n\t\t\t\tif err != nil || mgr == nil {\n\t\t\t\t\tt.Errorf(\"expected no err, got: %#v\", err)\n\t\t\t\t}\n\t\t\t\tif mgr0 == nil {\n\t\t\t\t\tmgr0 = mgr\n\t\t\t\t}\n\n\t\t\t\tif register != \"unknown\" {\n\t\t\t\t\tmgrs[nodeName] = mgr\n\t\t\t\t}\n\n\t\t\t\tmgr.Kick(\"kick\")\n\n\t\t\t\twaitUntilEmptyCfgEventsNodeDefsWanted()\n\t\t\t}\n\t\t}\n\n\t\tr, err := StartRebalance(cbgt.VERSION, cfg, \".\", nil)\n\t\tif (test.expStartErr && err == nil) ||\n\t\t\t(!test.expStartErr && err != nil) {\n\t\t\tt.Errorf(\"testi: %d, label: %q,\"+\n\t\t\t\t\" expStartErr: %v, but got: %v\",\n\t\t\t\ttesti, test.label,\n\t\t\t\ttest.expStartErr, err)\n\t\t}\n\n\t\tif err != nil || r == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tprogressCh := r.ProgressCh()\n\t\tif progressCh == nil {\n\t\t\tt.Errorf(\"expected progressCh\")\n\t\t}\n\n\t\terr = nil\n\t\tfor progress := range progressCh {\n\t\t\tif progress.Error != nil {\n\t\t\t\terr = progress.Error\n\n\t\t\t\tlog.Printf(\"saw progress error: %#v\\n\", progress)\n\t\t\t}\n\t\t}\n\n\t\tr.Stop()\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"expected no end err, got: %v\", err)\n\t\t}\n\n\t\tendIndexDefs, endNodeDefs, endPlanPIndexes, endPlanPIndexesCAS, err :=\n\t\t\tcbgt.PlannerGetPlan(cfg, cbgt.VERSION, \"\")\n\t\tif err != nil ||\n\t\t\tendIndexDefs == nil ||\n\t\t\tendNodeDefs == nil ||\n\t\t\tendPlanPIndexes == nil ||\n\t\t\tendPlanPIndexesCAS == 0 {\n\t\t\tt.Errorf(\"expected no err, got: %#v\", err)\n\t\t}\n\n\t\texpNodes := strings.Split(test.expNodes, \" \")\n\t\tif len(expNodes) != len(endNodeDefs.NodeDefs) {\n\t\t\tt.Errorf(\"len(expNodes) != len(endNodeDefs.NodeDefs), \"+\n\t\t\t\t\" expNodes: %#v, endNodeDefs.NodeDefs: %#v\",\n\t\t\t\texpNodes, endNodeDefs.NodeDefs)\n\t\t}\n\n\t\tfor _, expNode := range expNodes {\n\t\t\tif endNodeDefs.NodeDefs[expNode] == nil {\n\t\t\t\tt.Errorf(\"didn't find expNode: %s,\"+\n\t\t\t\t\t\" expNodes: %#v, endNodeDefs.NodeDefs: %#v\",\n\t\t\t\t\texpNode, expNodes, endNodeDefs.NodeDefs)\n\t\t\t}\n\t\t}\n\n\t\texpIndexes := strings.Split(test.expIndexes, \" \")\n\n\t\tr.VisitCurrStates(func(currStates CurrStates) {\n\t\t\tif len(currStates) != len(expIndexes) {\n\t\t\t\tt.Errorf(\"len(expIndexes) != len(currStates), \"+\n\t\t\t\t\t\" expIndexes: %#v, currStates: %#v\",\n\t\t\t\t\texpIndexes, currStates)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc testCreateIndex(t *testing.T,\n\tmgr *cbgt.Manager,\n\tindexName string,\n\tparams map[string]string,\n\twaitUntilEmptyCfgEventsIndexDefs func()) {\n\tsourceType := \"primary\"\n\tif params[\"sourceType\"] != \"\" {\n\t\tsourceType = params[\"sourceType\"]\n\t}\n\tif params[indexName+\".sourceType\"] != \"\" {\n\t\tsourceType = params[indexName+\".sourceType\"]\n\t}\n\n\tsourceName := \"default\"\n\tif params[\"sourceName\"] != \"\" {\n\t\tsourceName = params[\"sourceName\"]\n\t}\n\tif params[indexName+\".sourceName\"] != \"\" {\n\t\tsourceName = params[indexName+\".sourceName\"]\n\t}\n\n\tsourceUUID := \"\"\n\tif params[\"sourceUUID\"] != \"\" {\n\t\tsourceUUID = params[\"sourceUUID\"]\n\t}\n\tif params[indexName+\".sourceUUID\"] != \"\" {\n\t\tsourceUUID = params[indexName+\".sourceUUID\"]\n\t}\n\n\tsourceParams := `{\"numPartitions\":4}`\n\tif params[\"sourceParams\"] != \"\" {\n\t\tsourceParams = params[\"sourceParams\"]\n\t}\n\tif params[indexName+\".sourceParams\"] != \"\" {\n\t\tsourceParams = params[indexName+\".sourceParams\"]\n\t}\n\n\tindexType := \"blackhole\"\n\tif params[\"indexType\"] != \"\" {\n\t\tindexType = params[\"indexType\"]\n\t}\n\tif params[indexName+\".indexType\"] != \"\" {\n\t\tindexType = params[indexName+\".indexType\"]\n\t}\n\n\tindexParams := \"\"\n\tif params[\"indexParams\"] != \"\" {\n\t\tindexParams = params[\"indexParams\"]\n\t}\n\tif params[indexName+\".indexParams\"] != \"\" {\n\t\tindexParams = params[indexName+\".indexParams\"]\n\t}\n\n\tprevIndexUUID := \"\"\n\tif params[\"prevIndexUUID\"] != \"\" {\n\t\tprevIndexUUID = params[\"prevIndexUUID\"]\n\t}\n\tif params[indexName+\".prevIndexUUID\"] != \"\" {\n\t\tprevIndexUUID = params[indexName+\".prevIndexUUID\"]\n\t}\n\n\tplanParams := cbgt.PlanParams{\n\t\tMaxPartitionsPerPIndex: 1,\n\t}\n\n\twaitUntilEmptyCfgEventsIndexDefs()\n\n\terr := mgr.CreateIndex(\n\t\tsourceType, sourceName, sourceUUID, sourceParams,\n\t\tindexType, indexName, indexParams,\n\t\tplanParams,\n\t\tprevIndexUUID)\n\tif err != nil {\n\t\tt.Errorf(\"expected no err, got: %#v\", err)\n\t}\n\n\twaitUntilEmptyCfgEventsIndexDefs()\n}\n\nfunc startNodeManager(testDir string, cfg cbgt.Cfg, node, register string,\n\tparams map[string]string, server string) (\n\tmgr *cbgt.Manager, err error) {\n\tuuid := node\n\tif params[\"uuid\"] != \"\" {\n\t\tuuid = params[\"uuid\"]\n\t}\n\tif params[node+\".uuid\"] != \"\" {\n\t\tuuid = params[node+\".uuid\"]\n\t}\n\n\t\/\/ No planner in tags because mcp provides the planner.\n\ttags := []string{\"feed\", \"pindex\", \"janitor\", \"queryer\"}\n\tif params[\"tags\"] != \"\" {\n\t\ttags = strings.Split(params[\"tags\"], \",\")\n\t}\n\tif params[node+\".tags\"] != \"\" {\n\t\ttags = strings.Split(params[node+\".tags\"], \",\")\n\t}\n\n\tcontainer := \"\"\n\tif params[\"container\"] != \"\" {\n\t\tcontainer = params[\"container\"]\n\t}\n\tif params[node+\".container\"] != \"\" {\n\t\tcontainer = params[node+\".container\"]\n\t}\n\n\tweight := 1\n\tif params[\"weight\"] != \"\" {\n\t\tweight, err = strconv.Atoi(params[\"weight\"])\n\t}\n\tif params[node+\".weight\"] != \"\" {\n\t\tweight, err = strconv.Atoi(params[node+\".weight\"])\n\t}\n\tif weight < 1 {\n\t\tweight = 1\n\t}\n\n\textras := \"\"\n\n\tbindHttp := node\n\n\tdataDir := testDir + string(os.PathSeparator) + node\n\n\tos.MkdirAll(dataDir, 0700)\n\n\tmeh := cbgt.ManagerEventHandlers(nil)\n\n\tmgr = cbgt.NewManager(cbgt.VERSION, cfg, uuid,\n\t\ttags, container, weight, extras,\n\t\tbindHttp, dataDir, server, meh)\n\n\terr = mgr.Start(register)\n\tif err != nil {\n\t\tmgr.Stop()\n\n\t\treturn nil, err\n\t}\n\n\treturn mgr, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package persona\n\nimport (\n\t\"code.google.com\/p\/gorilla\/sessions\"\n\t\"net\/http\"\n)\n\ntype Store struct {\n\tstore sessions.Store\n}\n\nfunc NewStore(secret string) Store {\n\treturn Store{sessions.NewCookieStore([]byte(secret))}\n}\n\nfunc (s Store) Get(r *http.Request) string {\n\tsession, _ := s.store.Get(r, \"session-name\")\n\n\tif v, ok := session.Values[\"email\"].(string); ok {\n\t\treturn v\n\t}\n\n\treturn \"\"\n}\n\nfunc (s Store) Set(email string, w http.ResponseWriter, r *http.Request) {\n\tsession, _ := s.store.Get(r, \"session-name\")\n\tsession.Values[\"email\"] = email\n\tsession.Save(r, w)\n}\n<commit_msg>Change gorilla\/sessions import path to github<commit_after>package persona\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/gorilla\/sessions\"\n)\n\ntype Store struct {\n\tstore sessions.Store\n}\n\nfunc NewStore(secret string) Store {\n\treturn Store{sessions.NewCookieStore([]byte(secret))}\n}\n\nfunc (s Store) Get(r *http.Request) string {\n\tsession, _ := s.store.Get(r, \"session-name\")\n\n\tif v, ok := session.Values[\"email\"].(string); ok {\n\t\treturn v\n\t}\n\n\treturn \"\"\n}\n\nfunc (s Store) Set(email string, w http.ResponseWriter, r *http.Request) {\n\tsession, _ := s.store.Get(r, \"session-name\")\n\tsession.Values[\"email\"] = email\n\tsession.Save(r, w)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package stow is used to persist objects to a bolt.DB database.\npackage stow\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"sync\"\n\n\t\"github.com\/boltdb\/bolt\"\n)\n\nvar pool *sync.Pool = &sync.Pool{\n\tNew: func() interface{} { return bytes.NewBuffer(nil) },\n}\n\n\/\/ ErrNotFound indicates object is not in database.\nvar ErrNotFound = errors.New(\"not found\")\n\n\/\/ Store manages objects persistence.\ntype Store struct {\n\tdb *bolt.DB\n\tbucket []byte\n\tcodec Codec\n}\n\n\/\/ NewStore creates a new Store, using the underlying\n\/\/ bolt.DB \"bucket\" to persist objects.\n\/\/ NewStore uses GobEncoding, your objects must be registered\n\/\/ via gob.Register() for this encoding to work.\nfunc NewStore(db *bolt.DB, bucket []byte) *Store {\n\treturn NewCustomStore(db, bucket, GobCodec{})\n}\n\n\/\/ NewJSONStore creates a new Store, using the underlying\n\/\/ bolt.DB \"bucket\" to persist objects as json.\nfunc NewJSONStore(db *bolt.DB, bucket []byte) *Store {\n\treturn NewCustomStore(db, bucket, JSONCodec{})\n}\n\n\/\/ NewXMLStore creates a new Store, using the underlying\n\/\/ bolt.DB \"bucket\" to persist objects as xml.\nfunc NewXMLStore(db *bolt.DB, bucket []byte) *Store {\n\treturn NewCustomStore(db, bucket, XMLCodec{})\n}\n\n\/\/ NewCustomStore allows you to create a store with\n\/\/ a custom underlying Encoding\nfunc NewCustomStore(db *bolt.DB, bucket []byte, codec Codec) *Store {\n\treturn &Store{db: db, bucket: bucket, codec: codec}\n}\n\nfunc (s *Store) marshal(val interface{}) (data []byte, err error) {\n\tbuf := pool.Get().(*bytes.Buffer)\n\terr = s.codec.NewEncoder(buf).Encode(val)\n\tdata = append(data, buf.Bytes()...)\n\tbuf.Reset()\n\tpool.Put(buf)\n\n\treturn data, err\n}\n\nfunc (s *Store) unmarshal(data []byte, val interface{}) (err error) {\n\treturn s.codec.NewDecoder(bytes.NewReader(data)).Decode(val)\n}\n\nfunc (s *Store) toBytes(key interface{}) (keyBytes []byte, err error) {\n\tswitch k := key.(type) {\n\tcase string:\n\t\treturn []byte(k), nil\n\tcase []byte:\n\t\treturn k, nil\n\tdefault:\n\t\treturn s.marshal(key)\n\t}\n}\n\n\/\/ Put will store b with key \"key\". If key is []byte or string it uses the key\n\/\/ directly. Otherwise, it marshals the given type into bytes using the stores Encoder.\nfunc (s *Store) Put(key interface{}, b interface{}) error {\n\tkeyBytes, err := s.toBytes(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn s.put(keyBytes, b)\n}\n\n\/\/ Put will store b with key \"key\". If key is []byte or string it uses the key\n\/\/ directly. Otherwise, it marshals the given type into bytes using the stores Encoder.\nfunc (s *Store) put(key []byte, b interface{}) (err error) {\n\tvar data []byte\n\tdata, err = s.marshal(b)\n\n\treturn s.db.Update(func(tx *bolt.Tx) error {\n\t\tobjects, err := tx.CreateBucketIfNotExists(s.bucket)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tobjects.Put(key, data)\n\t\treturn nil\n\t})\n}\n\n\/\/ Pull will retrieve b with key \"key\", and removes it from the store.\nfunc (s *Store) Pull(key interface{}, b interface{}) error {\n\tkeyBytes, err := s.toBytes(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn s.pull(keyBytes, b)\n}\n\n\/\/ Pull will retrieve b with key \"key\", and removes it from the store.\nfunc (s *Store) pull(key []byte, b interface{}) error {\n\tbuf := pool.Get().(*bytes.Buffer)\n\tdefer func() {\n\t\tbuf.Reset()\n\t\tpool.Put(buf)\n\t}()\n\n\terr := s.db.Update(func(tx *bolt.Tx) error {\n\t\tobjects := tx.Bucket(s.bucket)\n\t\tif objects == nil {\n\t\t\treturn ErrNotFound\n\t\t}\n\n\t\tdata := objects.Get(key)\n\t\tif data == nil {\n\t\t\treturn ErrNotFound\n\t\t}\n\n\t\tbuf.Write(data)\n\t\tobjects.Delete(key)\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn s.unmarshal(buf.Bytes(), b)\n}\n\n\/\/ Get will retreive b with key \"key\". If key is []byte or string it uses the key\n\/\/ directly. Otherwise, it marshals the given type into bytes using the stores Encoder.\nfunc (s *Store) Get(key interface{}, b interface{}) error {\n\tkeyBytes, err := s.toBytes(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn s.get(keyBytes, b)\n}\n\n\/\/ Get will retreive b with key \"key\"\nfunc (s *Store) get(key []byte, b interface{}) error {\n\tbuf := bytes.NewBuffer(nil)\n\terr := s.db.Update(func(tx *bolt.Tx) error {\n\t\tobjects := tx.Bucket(s.bucket)\n\t\tif objects == nil {\n\t\t\treturn ErrNotFound\n\t\t}\n\t\tdata := objects.Get(key)\n\t\tif data == nil {\n\t\t\treturn ErrNotFound\n\t\t}\n\t\tbuf.Write(data)\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn s.unmarshal(buf.Bytes(), b)\n}\n\n\/\/ ForEach will run do on each object in the store.\n\/\/ do can be a function which takes either: 1 param which will take on each \"value\"\n\/\/ or 2 params where the first param is the \"key\" and the second is the \"value\".\nfunc (s *Store) ForEach(do interface{}) error {\n\tfc, err := newFuncCall(s, do)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn s.db.Update(func(tx *bolt.Tx) error {\n\t\tobjects := tx.Bucket(s.bucket)\n\t\tif objects == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn objects.ForEach(fc.call)\n\t})\n}\n\n\/\/ DeleteAll empties the store\nfunc (s *Store) DeleteAll() error {\n\treturn s.db.Update(func(tx *bolt.Tx) error {\n\t\treturn tx.DeleteBucket(s.bucket)\n\t})\n}\n<commit_msg>Use View instead of Update with ForEach and Get<commit_after>\/\/ Package stow is used to persist objects to a bolt.DB database.\npackage stow\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"sync\"\n\n\t\"github.com\/boltdb\/bolt\"\n)\n\nvar pool *sync.Pool = &sync.Pool{\n\tNew: func() interface{} { return bytes.NewBuffer(nil) },\n}\n\n\/\/ ErrNotFound indicates object is not in database.\nvar ErrNotFound = errors.New(\"not found\")\n\n\/\/ Store manages objects persistence.\ntype Store struct {\n\tdb *bolt.DB\n\tbucket []byte\n\tcodec Codec\n}\n\n\/\/ NewStore creates a new Store, using the underlying\n\/\/ bolt.DB \"bucket\" to persist objects.\n\/\/ NewStore uses GobEncoding, your objects must be registered\n\/\/ via gob.Register() for this encoding to work.\nfunc NewStore(db *bolt.DB, bucket []byte) *Store {\n\treturn NewCustomStore(db, bucket, GobCodec{})\n}\n\n\/\/ NewJSONStore creates a new Store, using the underlying\n\/\/ bolt.DB \"bucket\" to persist objects as json.\nfunc NewJSONStore(db *bolt.DB, bucket []byte) *Store {\n\treturn NewCustomStore(db, bucket, JSONCodec{})\n}\n\n\/\/ NewXMLStore creates a new Store, using the underlying\n\/\/ bolt.DB \"bucket\" to persist objects as xml.\nfunc NewXMLStore(db *bolt.DB, bucket []byte) *Store {\n\treturn NewCustomStore(db, bucket, XMLCodec{})\n}\n\n\/\/ NewCustomStore allows you to create a store with\n\/\/ a custom underlying Encoding\nfunc NewCustomStore(db *bolt.DB, bucket []byte, codec Codec) *Store {\n\treturn &Store{db: db, bucket: bucket, codec: codec}\n}\n\nfunc (s *Store) marshal(val interface{}) (data []byte, err error) {\n\tbuf := pool.Get().(*bytes.Buffer)\n\terr = s.codec.NewEncoder(buf).Encode(val)\n\tdata = append(data, buf.Bytes()...)\n\tbuf.Reset()\n\tpool.Put(buf)\n\n\treturn data, err\n}\n\nfunc (s *Store) unmarshal(data []byte, val interface{}) (err error) {\n\treturn s.codec.NewDecoder(bytes.NewReader(data)).Decode(val)\n}\n\nfunc (s *Store) toBytes(key interface{}) (keyBytes []byte, err error) {\n\tswitch k := key.(type) {\n\tcase string:\n\t\treturn []byte(k), nil\n\tcase []byte:\n\t\treturn k, nil\n\tdefault:\n\t\treturn s.marshal(key)\n\t}\n}\n\n\/\/ Put will store b with key \"key\". If key is []byte or string it uses the key\n\/\/ directly. Otherwise, it marshals the given type into bytes using the stores Encoder.\nfunc (s *Store) Put(key interface{}, b interface{}) error {\n\tkeyBytes, err := s.toBytes(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn s.put(keyBytes, b)\n}\n\n\/\/ Put will store b with key \"key\". If key is []byte or string it uses the key\n\/\/ directly. Otherwise, it marshals the given type into bytes using the stores Encoder.\nfunc (s *Store) put(key []byte, b interface{}) (err error) {\n\tvar data []byte\n\tdata, err = s.marshal(b)\n\n\treturn s.db.Update(func(tx *bolt.Tx) error {\n\t\tobjects, err := tx.CreateBucketIfNotExists(s.bucket)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tobjects.Put(key, data)\n\t\treturn nil\n\t})\n}\n\n\/\/ Pull will retrieve b with key \"key\", and removes it from the store.\nfunc (s *Store) Pull(key interface{}, b interface{}) error {\n\tkeyBytes, err := s.toBytes(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn s.pull(keyBytes, b)\n}\n\n\/\/ Pull will retrieve b with key \"key\", and removes it from the store.\nfunc (s *Store) pull(key []byte, b interface{}) error {\n\tbuf := pool.Get().(*bytes.Buffer)\n\tdefer func() {\n\t\tbuf.Reset()\n\t\tpool.Put(buf)\n\t}()\n\n\terr := s.db.Update(func(tx *bolt.Tx) error {\n\t\tobjects := tx.Bucket(s.bucket)\n\t\tif objects == nil {\n\t\t\treturn ErrNotFound\n\t\t}\n\n\t\tdata := objects.Get(key)\n\t\tif data == nil {\n\t\t\treturn ErrNotFound\n\t\t}\n\n\t\tbuf.Write(data)\n\t\tobjects.Delete(key)\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn s.unmarshal(buf.Bytes(), b)\n}\n\n\/\/ Get will retreive b with key \"key\". If key is []byte or string it uses the key\n\/\/ directly. Otherwise, it marshals the given type into bytes using the stores Encoder.\nfunc (s *Store) Get(key interface{}, b interface{}) error {\n\tkeyBytes, err := s.toBytes(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn s.get(keyBytes, b)\n}\n\n\/\/ Get will retreive b with key \"key\"\nfunc (s *Store) get(key []byte, b interface{}) error {\n\tbuf := bytes.NewBuffer(nil)\n\terr := s.db.View(func(tx *bolt.Tx) error {\n\t\tobjects := tx.Bucket(s.bucket)\n\t\tif objects == nil {\n\t\t\treturn ErrNotFound\n\t\t}\n\t\tdata := objects.Get(key)\n\t\tif data == nil {\n\t\t\treturn ErrNotFound\n\t\t}\n\t\tbuf.Write(data)\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn s.unmarshal(buf.Bytes(), b)\n}\n\n\/\/ ForEach will run do on each object in the store.\n\/\/ do can be a function which takes either: 1 param which will take on each \"value\"\n\/\/ or 2 params where the first param is the \"key\" and the second is the \"value\".\nfunc (s *Store) ForEach(do interface{}) error {\n\tfc, err := newFuncCall(s, do)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn s.db.View(func(tx *bolt.Tx) error {\n\t\tobjects := tx.Bucket(s.bucket)\n\t\tif objects == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn objects.ForEach(fc.call)\n\t})\n}\n\n\/\/ DeleteAll empties the store\nfunc (s *Store) DeleteAll() error {\n\treturn s.db.Update(func(tx *bolt.Tx) error {\n\t\treturn tx.DeleteBucket(s.bucket)\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package ecql\n\nimport (\n\t\"strings\"\n\n\t\"fmt\"\n)\n\ntype queryType int\n\nconst (\n\tselectQuery = iota\n\tinsertQuery\n\tdeleteQuery\n\tupdateQuery\n\tcountQuery\n)\n\n\/\/ Table contains the information of a table in cassandra.\ntype Table struct {\n\tName string\n\tKeyColumns []string\n\tColumns []Column\n}\n\n\/\/ Column contains the information of a column in a table required\n\/\/ to create a map for it. Every element of position represents an\n\/\/ anonymous nesting--with a single position representing an immediate named field\ntype Column struct {\n\tName string\n\tPosition []int\n}\n\nfunc (t *Table) BuildQuery(qt queryType) (string, error) {\n\tvar cql string\n\tswitch qt {\n\tcase selectQuery:\n\t\tcql = fmt.Sprintf(\"SELECT %s FROM %s WHERE %s\", t.getCols(), t.Name, appendCols(t.KeyColumns))\n\tcase insertQuery:\n\t\tcql = fmt.Sprintf(\"INSERT INTO %s (%s) VALUES (%s)\", t.Name, t.getCols(), t.getQms())\n\tcase deleteQuery:\n\t\tcql = fmt.Sprintf(\"DELETE FROM %s WHERE %s\", t.Name, appendCols(t.KeyColumns))\n\tcase updateQuery:\n\t\t\/\/ cql = \"UPDATE %s WHERE %s = ?\"\n\t\treturn \"\", ErrInvalidQueryType\n\tcase countQuery:\n\t\tcql = fmt.Sprintf(\"SELECT COUNT(1) FROM %s WHERE %s\", t.Name, appendCols(t.KeyColumns))\n\tdefault:\n\t\treturn \"\", ErrInvalidQueryType\n\t}\n\n\treturn cql, nil\n}\n\nfunc (t *Table) getCols() string {\n\tnames := make([]string, len(t.Columns))\n\tfor i := range t.Columns {\n\t\tnames[i] = t.Columns[i].Name\n\t}\n\treturn strings.Join(names, \",\")\n}\n\nfunc (t *Table) getQms() string {\n\treturn qms(len(t.Columns))\n}\n\nfunc appendCols(cols []string) string {\n\tparts := make([]string, len(cols))\n\tfor i := range cols {\n\t\tparts[i] = fmt.Sprintf(\"%s = ?\", cols[i])\n\t}\n\treturn strings.Join(parts, \" AND \")\n}\n<commit_msg>matt --better explanation for position<commit_after>package ecql\n\nimport (\n\t\"strings\"\n\n\t\"fmt\"\n)\n\ntype queryType int\n\nconst (\n\tselectQuery = iota\n\tinsertQuery\n\tdeleteQuery\n\tupdateQuery\n\tcountQuery\n)\n\n\/\/ Table contains the information of a table in cassandra.\ntype Table struct {\n\tName string\n\tKeyColumns []string\n\tColumns []Column\n}\n\n\/\/ Column contains the information of a column in a table required\n\/\/ to create a map for it.\n\/\/ Every element of position represents its order in a hierarchy of nested structs\ntype Column struct {\n\tName string\n\tPosition []int\n}\n\nfunc (t *Table) BuildQuery(qt queryType) (string, error) {\n\tvar cql string\n\tswitch qt {\n\tcase selectQuery:\n\t\tcql = fmt.Sprintf(\"SELECT %s FROM %s WHERE %s\", t.getCols(), t.Name, appendCols(t.KeyColumns))\n\tcase insertQuery:\n\t\tcql = fmt.Sprintf(\"INSERT INTO %s (%s) VALUES (%s)\", t.Name, t.getCols(), t.getQms())\n\tcase deleteQuery:\n\t\tcql = fmt.Sprintf(\"DELETE FROM %s WHERE %s\", t.Name, appendCols(t.KeyColumns))\n\tcase updateQuery:\n\t\t\/\/ cql = \"UPDATE %s WHERE %s = ?\"\n\t\treturn \"\", ErrInvalidQueryType\n\tcase countQuery:\n\t\tcql = fmt.Sprintf(\"SELECT COUNT(1) FROM %s WHERE %s\", t.Name, appendCols(t.KeyColumns))\n\tdefault:\n\t\treturn \"\", ErrInvalidQueryType\n\t}\n\n\treturn cql, nil\n}\n\nfunc (t *Table) getCols() string {\n\tnames := make([]string, len(t.Columns))\n\tfor i := range t.Columns {\n\t\tnames[i] = t.Columns[i].Name\n\t}\n\treturn strings.Join(names, \",\")\n}\n\nfunc (t *Table) getQms() string {\n\treturn qms(len(t.Columns))\n}\n\nfunc appendCols(cols []string) string {\n\tparts := make([]string, len(cols))\n\tfor i := range cols {\n\t\tparts[i] = fmt.Sprintf(\"%s = ?\", cols[i])\n\t}\n\treturn strings.Join(parts, \" AND \")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Zack Guo <zack.y.guo@gmail.com>. All rights reserved.\n\/\/ Use of this source code is governed by a MIT license that can\n\/\/ be found in the LICENSE file.\n\npackage termui\n\nimport \"strings\"\n\n\/* Table is like:\n\n┌Awesome Table ────────────────────────────────────────────────┐\n│ Col0 | Col1 | Col2 | Col3 | Col4 | Col5 | Col6 |\n│──────────────────────────────────────────────────────────────│\n│ Some Item #1 | AAA | 123 | CCCCC | EEEEE | GGGGG | IIIII |\n│──────────────────────────────────────────────────────────────│\n│ Some Item #2 | BBB | 456 | DDDDD | FFFFF | HHHHH | JJJJJ |\n└──────────────────────────────────────────────────────────────┘\n\nDatapoints are a two dimensional array of strings: [][]string\n\nExample:\n\tdata := [][]string{\n\t\t{\"Col0\", \"Col1\", \"Col3\", \"Col4\", \"Col5\", \"Col6\"},\n\t\t{\"Some Item #1\", \"AAA\", \"123\", \"CCCCC\", \"EEEEE\", \"GGGGG\", \"IIIII\"},\n\t\t{\"Some Item #2\", \"BBB\", \"456\", \"DDDDD\", \"FFFFF\", \"HHHHH\", \"JJJJJ\"},\n\t}\n\n\ttable := termui.NewTable()\n\ttable.Rows = data \/\/ type [][]string\n\ttable.FgColor = termui.ColorWhite\n\ttable.BgColor = termui.ColorDefault\n\ttable.Height = 7\n\ttable.Width = 62\n\ttable.Y = 0\n\ttable.X = 0\n\ttable.Border = true\n*\/\n\n\/\/ Table tracks all the attributes of a Table instance\ntype Table struct {\n\tBlock\n\tRows [][]string\n\tCellWidth []int\n\tFgColor Attribute\n\tBgColor Attribute\n\tFgColors []Attribute\n\tBgColors []Attribute\n\tSeparator bool\n\tTextAlign Align\n}\n\n\/\/ NewTable returns a new Table instance\nfunc NewTable() *Table {\n\ttable := &Table{Block: *NewBlock()}\n\ttable.FgColor = ColorWhite\n\ttable.BgColor = ColorDefault\n\ttable.Separator = true\n\treturn table\n}\n\n\/\/ CellsWidth calculates the width of a cell array and returns an int\nfunc cellsWidth(cells []Cell) int {\n\twidth := 0\n\tfor _, c := range cells {\n\t\twidth += c.Width()\n\t}\n\treturn width\n}\n\n\/\/ Analysis generates and returns an array of []Cell that represent all columns in the Table\nfunc (table *Table) Analysis() [][]Cell {\n\tvar rowCells [][]Cell\n\tlength := len(table.Rows)\n\tif length < 1 {\n\t\treturn rowCells\n\t}\n\n\tif len(table.FgColors) == 0 {\n\t\ttable.FgColors = make([]Attribute, len(table.Rows))\n\t}\n\tif len(table.BgColors) == 0 {\n\t\ttable.BgColors = make([]Attribute, len(table.Rows))\n\t}\n\n\tcellWidths := make([]int, len(table.Rows[0]))\n\n\tfor y, row := range table.Rows {\n\t\tif table.FgColors[y] == 0 {\n\t\t\ttable.FgColors[y] = table.FgColor\n\t\t}\n\t\tif table.BgColors[y] == 0 {\n\t\t\ttable.BgColors[y] = table.BgColor\n\t\t}\n\t\tfor x, str := range row {\n\t\t\tcells := DefaultTxBuilder.Build(str, table.FgColors[y], table.BgColors[y])\n\t\t\tcw := cellsWidth(cells)\n\t\t\tif cellWidths[x] < cw {\n\t\t\t\tcellWidths[x] = cw\n\t\t\t}\n\t\t\trowCells = append(rowCells, cells)\n\t\t}\n\t}\n\ttable.CellWidth = cellWidths\n\treturn rowCells\n}\n\n\/\/ SetSize calculates the table size and sets the internal value\nfunc (table *Table) SetSize() {\n\tlength := len(table.Rows)\n\tif table.Separator {\n\t\ttable.Height = length*2 + 1\n\t} else {\n\t\ttable.Height = length + 2\n\t}\n\ttable.Width = 2\n\tif length != 0 {\n\t\tfor _, cellWidth := range table.CellWidth {\n\t\t\ttable.Width += cellWidth + 3\n\t\t}\n\t}\n}\n\n\/\/ CalculatePosition ...\nfunc (table *Table) CalculatePosition(x int, y int, coordinateX *int, coordinateY *int, cellStart *int) {\n\tif table.Separator {\n\t\t*coordinateY = table.innerArea.Min.Y + y*2\n\t} else {\n\t\t*coordinateY = table.innerArea.Min.Y + y\n\t}\n\tif x == 0 {\n\t\t*cellStart = table.innerArea.Min.X\n\t} else {\n\t\t*cellStart += table.CellWidth[x-1] + 3\n\t}\n\n\tswitch table.TextAlign {\n\tcase AlignRight:\n\t\t*coordinateX = *cellStart + (table.CellWidth[x] - len(table.Rows[y][x])) + 2\n\tcase AlignCenter:\n\t\t*coordinateX = *cellStart + (table.CellWidth[x]-len(table.Rows[y][x]))\/2 + 2\n\tdefault:\n\t\t*coordinateX = *cellStart + 2\n\t}\n}\n\n\/\/ Buffer ...\nfunc (table *Table) Buffer() Buffer {\n\tbuffer := table.Block.Buffer()\n\trowCells := table.Analysis()\n\tpointerX := table.innerArea.Min.X + 2\n\tpointerY := table.innerArea.Min.Y\n\tborderPointerX := table.innerArea.Min.X\n\tfor y, row := range table.Rows {\n\t\tfor x := range row {\n\t\t\ttable.CalculatePosition(x, y, &pointerX, &pointerY, &borderPointerX)\n\t\t\tbackground := DefaultTxBuilder.Build(strings.Repeat(\" \", table.CellWidth[x]+3), table.BgColors[y], table.BgColors[y])\n\t\t\tcells := rowCells[y*len(row)+x]\n\t\t\tfor i, back := range background {\n\t\t\t\tbuffer.Set(borderPointerX+i, pointerY, back)\n\t\t\t}\n\n\t\t\tcoordinateX := pointerX\n\t\t\tfor _, printer := range cells {\n\t\t\t\tbuffer.Set(coordinateX, pointerY, printer)\n\t\t\t\tcoordinateX += printer.Width()\n\t\t\t}\n\n\t\t\tif x != 0 {\n\t\t\t\tdividors := DefaultTxBuilder.Build(\"|\", table.FgColors[y], table.BgColors[y])\n\t\t\t\tfor _, dividor := range dividors {\n\t\t\t\t\tbuffer.Set(borderPointerX, pointerY, dividor)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif table.Separator {\n\t\t\tborder := DefaultTxBuilder.Build(strings.Repeat(\"─\", table.Width-2), table.FgColor, table.BgColor)\n\t\t\tfor i, cell := range border {\n\t\t\t\tbuffer.Set(i+1, pointerY+1, cell)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn buffer\n}\n<commit_msg>Fix table horizontal separator bug.<commit_after>\/\/ Copyright 2017 Zack Guo <zack.y.guo@gmail.com>. All rights reserved.\n\/\/ Use of this source code is governed by a MIT license that can\n\/\/ be found in the LICENSE file.\n\npackage termui\n\nimport \"strings\"\n\n\/* Table is like:\n\n┌Awesome Table ────────────────────────────────────────────────┐\n│ Col0 | Col1 | Col2 | Col3 | Col4 | Col5 | Col6 |\n│──────────────────────────────────────────────────────────────│\n│ Some Item #1 | AAA | 123 | CCCCC | EEEEE | GGGGG | IIIII |\n│──────────────────────────────────────────────────────────────│\n│ Some Item #2 | BBB | 456 | DDDDD | FFFFF | HHHHH | JJJJJ |\n└──────────────────────────────────────────────────────────────┘\n\nDatapoints are a two dimensional array of strings: [][]string\n\nExample:\n\tdata := [][]string{\n\t\t{\"Col0\", \"Col1\", \"Col3\", \"Col4\", \"Col5\", \"Col6\"},\n\t\t{\"Some Item #1\", \"AAA\", \"123\", \"CCCCC\", \"EEEEE\", \"GGGGG\", \"IIIII\"},\n\t\t{\"Some Item #2\", \"BBB\", \"456\", \"DDDDD\", \"FFFFF\", \"HHHHH\", \"JJJJJ\"},\n\t}\n\n\ttable := termui.NewTable()\n\ttable.Rows = data \/\/ type [][]string\n\ttable.FgColor = termui.ColorWhite\n\ttable.BgColor = termui.ColorDefault\n\ttable.Height = 7\n\ttable.Width = 62\n\ttable.Y = 0\n\ttable.X = 0\n\ttable.Border = true\n*\/\n\n\/\/ Table tracks all the attributes of a Table instance\ntype Table struct {\n\tBlock\n\tRows [][]string\n\tCellWidth []int\n\tFgColor Attribute\n\tBgColor Attribute\n\tFgColors []Attribute\n\tBgColors []Attribute\n\tSeparator bool\n\tTextAlign Align\n}\n\n\/\/ NewTable returns a new Table instance\nfunc NewTable() *Table {\n\ttable := &Table{Block: *NewBlock()}\n\ttable.FgColor = ColorWhite\n\ttable.BgColor = ColorDefault\n\ttable.Separator = true\n\treturn table\n}\n\n\/\/ CellsWidth calculates the width of a cell array and returns an int\nfunc cellsWidth(cells []Cell) int {\n\twidth := 0\n\tfor _, c := range cells {\n\t\twidth += c.Width()\n\t}\n\treturn width\n}\n\n\/\/ Analysis generates and returns an array of []Cell that represent all columns in the Table\nfunc (table *Table) Analysis() [][]Cell {\n\tvar rowCells [][]Cell\n\tlength := len(table.Rows)\n\tif length < 1 {\n\t\treturn rowCells\n\t}\n\n\tif len(table.FgColors) == 0 {\n\t\ttable.FgColors = make([]Attribute, len(table.Rows))\n\t}\n\tif len(table.BgColors) == 0 {\n\t\ttable.BgColors = make([]Attribute, len(table.Rows))\n\t}\n\n\tcellWidths := make([]int, len(table.Rows[0]))\n\n\tfor y, row := range table.Rows {\n\t\tif table.FgColors[y] == 0 {\n\t\t\ttable.FgColors[y] = table.FgColor\n\t\t}\n\t\tif table.BgColors[y] == 0 {\n\t\t\ttable.BgColors[y] = table.BgColor\n\t\t}\n\t\tfor x, str := range row {\n\t\t\tcells := DefaultTxBuilder.Build(str, table.FgColors[y], table.BgColors[y])\n\t\t\tcw := cellsWidth(cells)\n\t\t\tif cellWidths[x] < cw {\n\t\t\t\tcellWidths[x] = cw\n\t\t\t}\n\t\t\trowCells = append(rowCells, cells)\n\t\t}\n\t}\n\ttable.CellWidth = cellWidths\n\treturn rowCells\n}\n\n\/\/ SetSize calculates the table size and sets the internal value\nfunc (table *Table) SetSize() {\n\tlength := len(table.Rows)\n\tif table.Separator {\n\t\ttable.Height = length*2 + 1\n\t} else {\n\t\ttable.Height = length + 2\n\t}\n\ttable.Width = 2\n\tif length != 0 {\n\t\tfor _, cellWidth := range table.CellWidth {\n\t\t\ttable.Width += cellWidth + 3\n\t\t}\n\t}\n}\n\n\/\/ CalculatePosition ...\nfunc (table *Table) CalculatePosition(x int, y int, coordinateX *int, coordinateY *int, cellStart *int) {\n\tif table.Separator {\n\t\t*coordinateY = table.innerArea.Min.Y + y*2\n\t} else {\n\t\t*coordinateY = table.innerArea.Min.Y + y\n\t}\n\tif x == 0 {\n\t\t*cellStart = table.innerArea.Min.X\n\t} else {\n\t\t*cellStart += table.CellWidth[x-1] + 3\n\t}\n\n\tswitch table.TextAlign {\n\tcase AlignRight:\n\t\t*coordinateX = *cellStart + (table.CellWidth[x] - len(table.Rows[y][x])) + 2\n\tcase AlignCenter:\n\t\t*coordinateX = *cellStart + (table.CellWidth[x]-len(table.Rows[y][x]))\/2 + 2\n\tdefault:\n\t\t*coordinateX = *cellStart + 2\n\t}\n}\n\n\/\/ Buffer ...\nfunc (table *Table) Buffer() Buffer {\n\tbuffer := table.Block.Buffer()\n\trowCells := table.Analysis()\n\tpointerX := table.innerArea.Min.X + 2\n\tpointerY := table.innerArea.Min.Y\n\tborderPointerX := table.innerArea.Min.X\n\tfor y, row := range table.Rows {\n\t\tfor x := range row {\n\t\t\ttable.CalculatePosition(x, y, &pointerX, &pointerY, &borderPointerX)\n\t\t\tbackground := DefaultTxBuilder.Build(strings.Repeat(\" \", table.CellWidth[x]+3), table.BgColors[y], table.BgColors[y])\n\t\t\tcells := rowCells[y*len(row)+x]\n\t\t\tfor i, back := range background {\n\t\t\t\tbuffer.Set(borderPointerX+i, pointerY, back)\n\t\t\t}\n\n\t\t\tcoordinateX := pointerX\n\t\t\tfor _, printer := range cells {\n\t\t\t\tbuffer.Set(coordinateX, pointerY, printer)\n\t\t\t\tcoordinateX += printer.Width()\n\t\t\t}\n\n\t\t\tif x != 0 {\n\t\t\t\tdividors := DefaultTxBuilder.Build(\"|\", table.FgColors[y], table.BgColors[y])\n\t\t\t\tfor _, dividor := range dividors {\n\t\t\t\t\tbuffer.Set(borderPointerX, pointerY, dividor)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif table.Separator {\n\t\t\tborder := DefaultTxBuilder.Build(strings.Repeat(\"─\", table.Width-2), table.FgColor, table.BgColor)\n\t\t\tfor i, cell := range border {\n\t\t\t\tbuffer.Set(table.innerArea.Min.X+i, pointerY+1, cell)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn buffer\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ pushk pushes a new version of an app.\n\/\/\n\/\/ pushk\n\/\/ pushk docserver\n\/\/ pushk --rollback docserver\n\/\/ pushk --project=skia-public docserver\n\/\/ pushk --rollback --project=skia-public docserver\n\/\/ pushk --dry-run my-new-service\npackage main\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"go.skia.org\/infra\/go\/auth\"\n\t\"go.skia.org\/infra\/go\/common\"\n\t\"go.skia.org\/infra\/go\/exec\"\n\t\"go.skia.org\/infra\/go\/gcr\"\n\t\"go.skia.org\/infra\/go\/git\"\n\t\"go.skia.org\/infra\/go\/sklog\"\n\t\"go.skia.org\/infra\/go\/util\"\n)\n\nconst (\n\trepoUrlTemplate = \"https:\/\/skia.googlesource.com\/%s-config\"\n\trepoBaseDir = \"\/tmp\"\n\trepoDirTemplate = \"\/tmp\/%s-config\"\n)\n\nfunc init() {\n\tflag.Usage = func() {\n\t\tfmt.Printf(\"Usage: pushk <flags> [zero or more image names]\\n\\n\")\n\t\tfmt.Printf(`pushk pushes a new version of an app.\n\nThe command:\n 1. Modifies the kubernetes yaml files with the new image.\n 2. Commits the changes to the config repo.\n 3. Applies the changes with kubectl.\n\nThe config is stored in a separate repo that will automaticaly be checked out\nunder \/tmp.\n\nThe command applies the changes by default, or just changes the local yaml files\nif --dry-run is supplied.\n\nIf no image names are supplied then pushk looks through all the yaml files for\nappropriate images (ones that match the SERVER and project) and tries to push a\nnew image for each of them.\n\nExamples:\n # Push the latest version of all images from the given container repository.\n pushk\n\n # Pusk an exact tag.\n pushk gcr.io\/skia-public\/fiddler:694900e3ca9468784a5794dc53382d1c8411ab07\n\n # Push the latest version of docserver.\n pushk docserver --message=\"Fix bug #1234\"\n\n # Push the latest version of docserver and iap-proxy\n pushk docserver iap-proxy\n\n # Rollback docserver.\n pushk --rollback docserver\n\n # Compute any changes a push to docserver will make, but do not apply them.\n pushk --dry-run docserver\n\n`)\n\t\tflag.PrintDefaults()\n\t}\n}\n\n\/\/ toFullRepoURL converts the project name into a git repo URL.\nfunc toFullRepoURL(s string) string {\n\treturn fmt.Sprintf(repoUrlTemplate, s)\n\n}\n\n\/\/ toRepoDir converts the project name into a git repo directory name.\nfunc toRepoDir(s string) string {\n\treturn fmt.Sprintf(repoDirTemplate, s)\n}\n\n\/\/ flags\nvar (\n\tdryRun = flag.Bool(\"dry-run\", false, \"If true then do not run the kubectl command to apply the changes, and do not commit the changes to the config repo.\")\n\tmessage = flag.String(\"message\", \"Push\", \"Message to go along with the change.\")\n\tproject = flag.String(\"project\", \"skia-public\", \"The GCE project name.\")\n\trollback = flag.Bool(\"rollback\", false, \"If true go back to the second most recent image, otherwise use most recent image.\")\n)\n\nvar (\n\tvalidTag = regexp.MustCompile(`^\\d\\d\\d\\d-\\d\\d-\\d\\dT\\d\\d_\\d\\d_\\d\\dZ-.+$`)\n)\n\n\/\/ filter strips the list of tags down to only the ones that conform to our\n\/\/ constraints and also checks that there are enough tags. The results\n\/\/ are sorted in ascending order, so oldest tags are first, newest tags\n\/\/ are last.\nfunc filter(tags []string) ([]string, error) {\n\tvalidTags := []string{}\n\tfor _, t := range tags {\n\t\tif validTag.MatchString(t) {\n\t\t\tvalidTags = append(validTags, t)\n\t\t}\n\t}\n\tsort.Strings(validTags)\n\tif len(validTags) == 0 {\n\t\treturn nil, fmt.Errorf(\"Not enough tags returned.\")\n\t}\n\treturn validTags, nil\n}\n\n\/\/ findAllImageNames searches for all the images that come from the given\n\/\/ project container registry across all the yaml files listed in filenames.\nfunc findAllImageNames(filenames []string, server, project string) []string {\n\t\/\/ allImageRegex has the following groups returned on match:\n\t\/\/ 0 - the entire line\n\t\/\/ 1 - the image name\n\tallImageRegex := regexp.MustCompile(fmt.Sprintf(`(?m)^\\s+image:\\s+%s\/%s\/([^:]+):\\S+\\s*$`, server, project))\n\tfilenameSet := util.StringSet{}\n\tfor _, filename := range filenames {\n\t\tb, err := ioutil.ReadFile(filename)\n\t\tif err != nil {\n\t\t\tsklog.Errorf(\"Failed to read %q (skipping): %s\", filename, err)\n\t\t\tcontinue\n\t\t}\n\t\tmatches := allImageRegex.FindAllStringSubmatch(string(b), -1)\n\t\tfor _, m := range matches {\n\t\t\tfilenameSet[m[1]] = true\n\t\t}\n\t}\n\treturn filenameSet.Keys()\n}\n\n\/\/ tagProvider is a type that returns the correct tag to push for the given imageName.\ntype tagProvider func(imageName string) ([]string, error)\n\n\/\/ imageFromCmdLineImage handles image names, which can be either short, ala 'fiddler', or exact,\n\/\/ such as gcr.io\/skia-public\/fiddler:694900e3ca9468784a5794dc53382d1c8411ab07, both of which can\n\/\/ appear on the command-line.\nfunc imageFromCmdLineImage(imageName string, tp tagProvider) (string, error) {\n\tif strings.HasPrefix(imageName, \"gcr.io\/\") {\n\t\tif *rollback {\n\t\t\treturn \"\", fmt.Errorf(\"Supplying a fully qualified image name and the --rollback flag are mutually exclusive.\")\n\t\t}\n\t\treturn imageName, nil\n\t}\n\t\/\/ Get all the tags for the selected image.\n\ttags, err := tp(imageName)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Tag provider failed: %s\", err)\n\t}\n\n\t\/\/ Filter the tags\n\ttags, err = filter(tags)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to filter: %s\", err)\n\t}\n\n\t\/\/ Pick the target tag we want to move to.\n\ttag := tags[len(tags)-1]\n\tif *rollback {\n\t\tif len(tags) < 2 {\n\t\t\treturn \"\", fmt.Errorf(\"No version to rollback to.\")\n\t\t}\n\t\ttag = tags[len(tags)-2]\n\t}\n\n\t\/\/ The full docker image name and tag of the image we want to deploy.\n\treturn fmt.Sprintf(\"%s\/%s\/%s:%s\", gcr.SERVER, *project, imageName, tag), nil\n}\n\nfunc main() {\n\tcommon.Init()\n\n\tctx := context.Background()\n\trepoDir := toRepoDir(*project)\n\tcheckout, err := git.NewCheckout(ctx, toFullRepoURL(*project), repoBaseDir)\n\tif err != nil {\n\t\tsklog.Fatalf(\"Failed to check out config repo: %s\", err)\n\t}\n\tif err := checkout.Update(ctx); err != nil {\n\t\tsklog.Fatalf(\"Failed to update repo: %s\", err)\n\t}\n\n\t\/\/ Get all the yaml files.\n\tfilenames, err := filepath.Glob(filepath.Join(repoDir, \"*.yaml\"))\n\tif err != nil {\n\t\tsklog.Fatal(err)\n\t}\n\n\ttokenSource := auth.NewGCloudTokenSource(*project)\n\timageNames := flag.Args()\n\tif len(imageNames) == 0 {\n\t\timageNames = findAllImageNames(filenames, gcr.SERVER, *project)\n\t\tif len(imageNames) == 0 {\n\t\t\tfmt.Printf(\"Failed to find any images that match kubernetes directory: %q and project: %q.\", repoDir, *project)\n\t\t\tflag.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\tsklog.Infof(\"Pushing the following images: %q\", imageNames)\n\n\tgcrTagProvider := func(imageName string) ([]string, error) {\n\t\treturn gcr.NewClient(tokenSource, *project, imageName).Tags()\n\t}\n\n\tchanged := util.StringSet{}\n\tfor _, imageName := range imageNames {\n\t\timage, err := imageFromCmdLineImage(imageName, gcrTagProvider)\n\t\tif err != nil {\n\t\t\tsklog.Fatal(err)\n\t\t}\n\n\t\t\/\/ imageRegex has the following groups returned on match:\n\t\t\/\/ 0 - the entire line\n\t\t\/\/ 1 - the prefix, i.e. image:, with correct spacing.\n\t\t\/\/ 2 - full image name\n\t\t\/\/ 3 - just the tag\n\t\t\/\/\n\t\t\/\/ We pull out the 'prefix' so we can use it when\n\t\t\/\/ we rewrite the image: line so the indent level is\n\t\t\/\/ unchanged.\n\t\tparts := strings.SplitN(image, \":\", 2)\n\t\tif len(parts) != 2 {\n\t\t\tsklog.Fatalf(\"Failed to split imageName: %v\", parts)\n\t\t}\n\t\timageNoTag := parts[0]\n\t\timageRegex := regexp.MustCompile(fmt.Sprintf(`^(\\s+image:\\s+)(%s):.*$`, imageNoTag))\n\n\t\t\/\/ Loop over all the yaml files and update tags for the given imageName.\n\t\tfor _, filename := range filenames {\n\t\t\tb, err := ioutil.ReadFile(filename)\n\t\t\tif err != nil {\n\t\t\t\tsklog.Errorf(\"Failed to read %q (skipping): %s\", filename, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlines := strings.Split(string(b), \"\\n\")\n\t\t\tfor i, line := range lines {\n\t\t\t\tmatches := imageRegex.FindStringSubmatch(line)\n\t\t\t\tif len(matches) != 3 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tchanged[filename] = true\n\t\t\t\tlines[i] = matches[1] + image\n\t\t\t}\n\t\t\tif changed[filename] {\n\t\t\t\terr := util.WithWriteFile(filename, func(w io.Writer) error {\n\t\t\t\t\t_, err := w.Write([]byte(strings.Join(lines, \"\\n\")))\n\t\t\t\t\treturn err\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tsklog.Fatalf(\"Failed to write update config file %q: %s\", filename, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Were any files updated?\n\tif len(changed) != 0 {\n\t\tfilenameFlag := fmt.Sprintf(\"--filename=%s\\n\", strings.Join(changed.Keys(), \",\"))\n\t\tif !*dryRun {\n\t\t\tfor filename, _ := range changed {\n\t\t\t\tmsg, err := checkout.Git(ctx, \"add\", filepath.Base(filename))\n\t\t\t\tif err != nil {\n\t\t\t\t\tsklog.Fatalf(\"Failed to stage changes to the config repo: %s: %q\", err, msg)\n\t\t\t\t}\n\t\t\t}\n\t\t\tmsg, err := checkout.Git(ctx, \"diff\", \"--name-only\")\n\t\t\tif err != nil {\n\t\t\t\tsklog.Fatalf(\"Failed to diff :%s: %q\", err, msg)\n\t\t\t}\n\t\t\tif msg == \"\" {\n\t\t\t\tsklog.Infof(\"Not pushing since no files changed.\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tmsg, err = checkout.Git(ctx, \"commit\", \"-m\", *message)\n\t\t\tif err != nil {\n\t\t\t\tsklog.Fatalf(\"Failed to commit to the config repo: %s: %q\", err, msg)\n\t\t\t}\n\t\t\tmsg, err = checkout.Git(ctx, \"push\", \"origin\", \"master\")\n\t\t\tif err != nil {\n\t\t\t\tsklog.Fatalf(\"Failed to push the config repo: %s: %q\", err, msg)\n\t\t\t}\n\t\t\tif err := exec.Run(context.Background(), &exec.Command{\n\t\t\t\tName: \"kubectl\",\n\t\t\t\tArgs: []string{\"apply\", filenameFlag},\n\t\t\t\tLogStderr: true,\n\t\t\t\tLogStdout: true,\n\t\t\t}); err != nil {\n\t\t\t\tsklog.Errorf(\"Failed to run: %s\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Printf(\"\\nkubectl apply %s\\n\", filenameFlag)\n\t\t}\n\t} else {\n\t\tfmt.Println(\"Nothing to do.\")\n\t}\n}\n<commit_msg>pushk - Fix for detecting file changes.<commit_after>\/\/ pushk pushes a new version of an app.\n\/\/\n\/\/ pushk\n\/\/ pushk docserver\n\/\/ pushk --rollback docserver\n\/\/ pushk --project=skia-public docserver\n\/\/ pushk --rollback --project=skia-public docserver\n\/\/ pushk --dry-run my-new-service\npackage main\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"go.skia.org\/infra\/go\/auth\"\n\t\"go.skia.org\/infra\/go\/common\"\n\t\"go.skia.org\/infra\/go\/exec\"\n\t\"go.skia.org\/infra\/go\/gcr\"\n\t\"go.skia.org\/infra\/go\/git\"\n\t\"go.skia.org\/infra\/go\/sklog\"\n\t\"go.skia.org\/infra\/go\/util\"\n)\n\nconst (\n\trepoUrlTemplate = \"https:\/\/skia.googlesource.com\/%s-config\"\n\trepoBaseDir = \"\/tmp\"\n\trepoDirTemplate = \"\/tmp\/%s-config\"\n)\n\nfunc init() {\n\tflag.Usage = func() {\n\t\tfmt.Printf(\"Usage: pushk <flags> [zero or more image names]\\n\\n\")\n\t\tfmt.Printf(`pushk pushes a new version of an app.\n\nThe command:\n 1. Modifies the kubernetes yaml files with the new image.\n 2. Commits the changes to the config repo.\n 3. Applies the changes with kubectl.\n\nThe config is stored in a separate repo that will automaticaly be checked out\nunder \/tmp.\n\nThe command applies the changes by default, or just changes the local yaml files\nif --dry-run is supplied.\n\nIf no image names are supplied then pushk looks through all the yaml files for\nappropriate images (ones that match the SERVER and project) and tries to push a\nnew image for each of them.\n\nExamples:\n # Push the latest version of all images from the given container repository.\n pushk\n\n # Pusk an exact tag.\n pushk gcr.io\/skia-public\/fiddler:694900e3ca9468784a5794dc53382d1c8411ab07\n\n # Push the latest version of docserver.\n pushk docserver --message=\"Fix bug #1234\"\n\n # Push the latest version of docserver and iap-proxy\n pushk docserver iap-proxy\n\n # Rollback docserver.\n pushk --rollback docserver\n\n # Compute any changes a push to docserver will make, but do not apply them.\n pushk --dry-run docserver\n\n`)\n\t\tflag.PrintDefaults()\n\t}\n}\n\n\/\/ toFullRepoURL converts the project name into a git repo URL.\nfunc toFullRepoURL(s string) string {\n\treturn fmt.Sprintf(repoUrlTemplate, s)\n\n}\n\n\/\/ toRepoDir converts the project name into a git repo directory name.\nfunc toRepoDir(s string) string {\n\treturn fmt.Sprintf(repoDirTemplate, s)\n}\n\n\/\/ flags\nvar (\n\tdryRun = flag.Bool(\"dry-run\", false, \"If true then do not run the kubectl command to apply the changes, and do not commit the changes to the config repo.\")\n\tmessage = flag.String(\"message\", \"Push\", \"Message to go along with the change.\")\n\tproject = flag.String(\"project\", \"skia-public\", \"The GCE project name.\")\n\trollback = flag.Bool(\"rollback\", false, \"If true go back to the second most recent image, otherwise use most recent image.\")\n)\n\nvar (\n\tvalidTag = regexp.MustCompile(`^\\d\\d\\d\\d-\\d\\d-\\d\\dT\\d\\d_\\d\\d_\\d\\dZ-.+$`)\n)\n\n\/\/ filter strips the list of tags down to only the ones that conform to our\n\/\/ constraints and also checks that there are enough tags. The results\n\/\/ are sorted in ascending order, so oldest tags are first, newest tags\n\/\/ are last.\nfunc filter(tags []string) ([]string, error) {\n\tvalidTags := []string{}\n\tfor _, t := range tags {\n\t\tif validTag.MatchString(t) {\n\t\t\tvalidTags = append(validTags, t)\n\t\t}\n\t}\n\tsort.Strings(validTags)\n\tif len(validTags) == 0 {\n\t\treturn nil, fmt.Errorf(\"Not enough tags returned.\")\n\t}\n\treturn validTags, nil\n}\n\n\/\/ findAllImageNames searches for all the images that come from the given\n\/\/ project container registry across all the yaml files listed in filenames.\nfunc findAllImageNames(filenames []string, server, project string) []string {\n\t\/\/ allImageRegex has the following groups returned on match:\n\t\/\/ 0 - the entire line\n\t\/\/ 1 - the image name\n\tallImageRegex := regexp.MustCompile(fmt.Sprintf(`(?m)^\\s+image:\\s+%s\/%s\/([^:]+):\\S+\\s*$`, server, project))\n\tfilenameSet := util.StringSet{}\n\tfor _, filename := range filenames {\n\t\tb, err := ioutil.ReadFile(filename)\n\t\tif err != nil {\n\t\t\tsklog.Errorf(\"Failed to read %q (skipping): %s\", filename, err)\n\t\t\tcontinue\n\t\t}\n\t\tmatches := allImageRegex.FindAllStringSubmatch(string(b), -1)\n\t\tfor _, m := range matches {\n\t\t\tfilenameSet[m[1]] = true\n\t\t}\n\t}\n\treturn filenameSet.Keys()\n}\n\n\/\/ tagProvider is a type that returns the correct tag to push for the given imageName.\ntype tagProvider func(imageName string) ([]string, error)\n\n\/\/ imageFromCmdLineImage handles image names, which can be either short, ala 'fiddler', or exact,\n\/\/ such as gcr.io\/skia-public\/fiddler:694900e3ca9468784a5794dc53382d1c8411ab07, both of which can\n\/\/ appear on the command-line.\nfunc imageFromCmdLineImage(imageName string, tp tagProvider) (string, error) {\n\tif strings.HasPrefix(imageName, \"gcr.io\/\") {\n\t\tif *rollback {\n\t\t\treturn \"\", fmt.Errorf(\"Supplying a fully qualified image name and the --rollback flag are mutually exclusive.\")\n\t\t}\n\t\treturn imageName, nil\n\t}\n\t\/\/ Get all the tags for the selected image.\n\ttags, err := tp(imageName)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Tag provider failed: %s\", err)\n\t}\n\n\t\/\/ Filter the tags\n\ttags, err = filter(tags)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Failed to filter: %s\", err)\n\t}\n\n\t\/\/ Pick the target tag we want to move to.\n\ttag := tags[len(tags)-1]\n\tif *rollback {\n\t\tif len(tags) < 2 {\n\t\t\treturn \"\", fmt.Errorf(\"No version to rollback to.\")\n\t\t}\n\t\ttag = tags[len(tags)-2]\n\t}\n\n\t\/\/ The full docker image name and tag of the image we want to deploy.\n\treturn fmt.Sprintf(\"%s\/%s\/%s:%s\", gcr.SERVER, *project, imageName, tag), nil\n}\n\nfunc main() {\n\tcommon.Init()\n\n\tctx := context.Background()\n\trepoDir := toRepoDir(*project)\n\tcheckout, err := git.NewCheckout(ctx, toFullRepoURL(*project), repoBaseDir)\n\tif err != nil {\n\t\tsklog.Fatalf(\"Failed to check out config repo: %s\", err)\n\t}\n\tif err := checkout.Update(ctx); err != nil {\n\t\tsklog.Fatalf(\"Failed to update repo: %s\", err)\n\t}\n\n\t\/\/ Get all the yaml files.\n\tfilenames, err := filepath.Glob(filepath.Join(repoDir, \"*.yaml\"))\n\tif err != nil {\n\t\tsklog.Fatal(err)\n\t}\n\n\ttokenSource := auth.NewGCloudTokenSource(*project)\n\timageNames := flag.Args()\n\tif len(imageNames) == 0 {\n\t\timageNames = findAllImageNames(filenames, gcr.SERVER, *project)\n\t\tif len(imageNames) == 0 {\n\t\t\tfmt.Printf(\"Failed to find any images that match kubernetes directory: %q and project: %q.\", repoDir, *project)\n\t\t\tflag.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\tsklog.Infof(\"Pushing the following images: %q\", imageNames)\n\n\tgcrTagProvider := func(imageName string) ([]string, error) {\n\t\treturn gcr.NewClient(tokenSource, *project, imageName).Tags()\n\t}\n\n\tchanged := util.StringSet{}\n\tfor _, imageName := range imageNames {\n\t\timage, err := imageFromCmdLineImage(imageName, gcrTagProvider)\n\t\tif err != nil {\n\t\t\tsklog.Fatal(err)\n\t\t}\n\n\t\t\/\/ imageRegex has the following groups returned on match:\n\t\t\/\/ 0 - the entire line\n\t\t\/\/ 1 - the prefix, i.e. image:, with correct spacing.\n\t\t\/\/ 2 - full image name\n\t\t\/\/ 3 - just the tag\n\t\t\/\/\n\t\t\/\/ We pull out the 'prefix' so we can use it when\n\t\t\/\/ we rewrite the image: line so the indent level is\n\t\t\/\/ unchanged.\n\t\tparts := strings.SplitN(image, \":\", 2)\n\t\tif len(parts) != 2 {\n\t\t\tsklog.Fatalf(\"Failed to split imageName: %v\", parts)\n\t\t}\n\t\timageNoTag := parts[0]\n\t\timageRegex := regexp.MustCompile(fmt.Sprintf(`^(\\s+image:\\s+)(%s):.*$`, imageNoTag))\n\n\t\t\/\/ Loop over all the yaml files and update tags for the given imageName.\n\t\tfor _, filename := range filenames {\n\t\t\tb, err := ioutil.ReadFile(filename)\n\t\t\tif err != nil {\n\t\t\t\tsklog.Errorf(\"Failed to read %q (skipping): %s\", filename, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlines := strings.Split(string(b), \"\\n\")\n\t\t\tfor i, line := range lines {\n\t\t\t\tmatches := imageRegex.FindStringSubmatch(line)\n\t\t\t\tif len(matches) != 3 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tchanged[filename] = true\n\t\t\t\tlines[i] = matches[1] + image\n\t\t\t}\n\t\t\tif changed[filename] {\n\t\t\t\terr := util.WithWriteFile(filename, func(w io.Writer) error {\n\t\t\t\t\t_, err := w.Write([]byte(strings.Join(lines, \"\\n\")))\n\t\t\t\t\treturn err\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tsklog.Fatalf(\"Failed to write update config file %q: %s\", filename, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Were any files updated?\n\tif len(changed) != 0 {\n\t\tfilenameFlag := fmt.Sprintf(\"--filename=%s\\n\", strings.Join(changed.Keys(), \",\"))\n\t\tif !*dryRun {\n\t\t\tfor filename, _ := range changed {\n\t\t\t\tmsg, err := checkout.Git(ctx, \"add\", filepath.Base(filename))\n\t\t\t\tif err != nil {\n\t\t\t\t\tsklog.Fatalf(\"Failed to stage changes to the config repo: %s: %q\", err, msg)\n\t\t\t\t}\n\t\t\t}\n\t\t\tmsg, err := checkout.Git(ctx, \"diff\", \"--cached\", \"--name-only\")\n\t\t\tif err != nil {\n\t\t\t\tsklog.Fatalf(\"Failed to diff :%s: %q\", err, msg)\n\t\t\t}\n\t\t\tif msg == \"\" {\n\t\t\t\tsklog.Infof(\"Not pushing since no files changed.\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tmsg, err = checkout.Git(ctx, \"commit\", \"-m\", *message)\n\t\t\tif err != nil {\n\t\t\t\tsklog.Fatalf(\"Failed to commit to the config repo: %s: %q\", err, msg)\n\t\t\t}\n\t\t\tmsg, err = checkout.Git(ctx, \"push\", \"origin\", \"master\")\n\t\t\tif err != nil {\n\t\t\t\tsklog.Fatalf(\"Failed to push the config repo: %s: %q\", err, msg)\n\t\t\t}\n\t\t\tif err := exec.Run(context.Background(), &exec.Command{\n\t\t\t\tName: \"kubectl\",\n\t\t\t\tArgs: []string{\"apply\", filenameFlag},\n\t\t\t\tLogStderr: true,\n\t\t\t\tLogStdout: true,\n\t\t\t}); err != nil {\n\t\t\t\tsklog.Errorf(\"Failed to run: %s\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Printf(\"\\nkubectl apply %s\\n\", filenameFlag)\n\t\t}\n\t} else {\n\t\tfmt.Println(\"Nothing to do.\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"sort\"\n\t\"time\"\n)\n\nfunc main() {\n\tstart := time.Now()\n\t\/\/ 処理\n\tvar i float64\n\ti = 0\n\tfor {\n\t\ti++\n\t\tend := time.Now()\n\t\tdiff := float64(end.Sub(start) \/ 1000 \/ 1000 \/ 1000)\n\t\tm := int(i \/ diff)\n\t\tout := 0\n\t\tif m < 0 {\n\t\t} else {\n\t\t\tout = m\n\t\t}\n\t\tfmt.Printf(\"\\r\")\n\t\tfmt.Printf(\"\\r%v回試行 %v秒経過 %v回\/秒\", i, diff, out)\n\t\tif submain() {\n\t\t\tbreak\n\t\t}\n\t}\n\tfmt.Printf(\"\\n\")\n}\n\nfunc submain() bool {\n\t\/*\n\t\tfmt.Print(\"started.\")\n\t\t\/\/ チャネル\n\t\tsleep1_finished := make(chan bool)\n\n\t\tgo func() {\n\t\t\t\/\/ 0.2秒かかるコマンド\n\t\t\tfmt.Print(\"sleep1 started.\")\n\t\t\ttime.Sleep(200 * 1000 * 1000 * time.Nanosecond)\n\t\t\tfmt.Print(\"sleep1 finished.\")\n\t\t\tsleep1_finished <- true\n\t\t}()\n\n\t\t\/\/ 終わるまで待つ\n\t\t<- sleep1_finished\n\t*\/\n\n\t\/\/ 136枚の中から14枚返したい\n\tlist := shuffled_cards()\n\n\t\/\/ 出力\n\tresult := solve(list)\n\tstring_output(list)\n\treturn result\n}\n\n\/\/ http:\/\/d.hatena.ne.jp\/hake\/20150930\/p1\nfunc shuffle(list []int) {\n\tfor i := len(list); i > 1; i-- {\n\t\tj := rand.Intn(i) \/\/ 0 .. i-1 の乱数発生\n\t\tlist[i-1], list[j] = list[j], list[i-1]\n\t}\n}\n\nfunc shuffled_cards() []int {\n\trand.Seed(time.Now().UnixNano())\n\n\t\/\/ データ要素数指定、および初期データ作成\n\tsize := 136\n\tlist := make([]int, size, size)\n\tfor i := 0; i < size; i++ {\n\t\tlist[i] = i \/ 4\n\t}\n\n\t\/\/ シャッフル\n\tshuffle(list)\n\n\treturn list[:14]\n}\n\n\/\/ 牌の出力\nfunc string_output(list []int) {\n\t\/\/ http:\/\/qiita.com\/ruiu\/items\/2bb83b29baeae2433a79\n\t\/\/ サイズ0、内部バッファの長さ69の[]byteの値を割り当てる\n\tb := make([]byte, 0, 70)\n\n\t\/\/ bに文字列を追加\n\tfor j := 0; j < 14; j++ {\n\t\t\/\/ 126976 is 'ton'\n\t\t\/\/ https:\/\/codepoints.net\/U+1F000\n\t\tb = append(b, string(list[j]+126976)...) \/\/ ...が必要\n\t\tb = append(b, string(32)...) \/\/ ...が必要\n\t}\n\tfmt.Printf(\"%v\", string(b))\n}\n\n\/\/ solve\n\/\/ 字マンソーピンのリストをつくる\nfunc solve(list []int) bool {\n\tif is_chitoitsu(list) {\n\t\treturn true\n\t}\n\tmatrix := [][]int{{}, {}, {}, {}}\n\tfor _, value := range list {\n\t\tgroup(matrix, value)\n\t}\n\t\/\/fmt.Printf(\"%v\", matrix)\n\n\treturn group_scan(matrix)\n}\n\n\/\/ スート分類してくれる\nfunc group(m [][]int, i int) {\n\tswitch {\n\tcase i < 7:\n\t\tm[0] = append(m[0], i)\n\tcase i < 7+(9*1):\n\t\tm[1] = append(m[1], i-7)\n\tcase i < 7+(9*2):\n\t\tm[2] = append(m[2], i-7-(9*1))\n\tcase i < 7+(9*3):\n\t\tm[3] = append(m[3], i-7-(9*2))\n\t}\n}\n\nfunc group_scan(m [][]int) bool {\n\tif !valid_mod3(m) {\n\t\treturn false\n\t}\n\tif !valid_33332(m) {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc valid_mod3(m [][]int) bool {\n\t\/\/スートのサイズを3で割った時\n\t\/\/あまりが2であるすーとグループが1つであること\n\tc := 0\n\tfor _, a := range m {\n\t\tswitch len(a) % 3 {\n\t\tcase 0:\n\t\t\t\/\/ noop\n\t\tcase 1:\n\t\t\treturn false\n\t\tcase 2:\n\t\t\tc++\n\t\t}\n\t}\n\treturn c == 1\n}\n\nfunc valid_33332(m [][]int) bool {\n\tfor i := 0; i < 4; i++ {\n\t\tif !valid_suit_group(m[i], i) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc valid_suit_group(a []int, i int) bool {\n\t\/\/ 第二引数は字牌のとき0\n\n\t\/\/ソート\n\tsort.Ints(a)\n\tif len(a)%3 == 2 {\n\t\t\/\/ペアを探す\n\t\tpair_numbers := pairable_numbers(a)\n\t\t\/\/ペア候補がなかったらぬける\n\t\tif len(pair_numbers) == 0 {\n\t\t\treturn false\n\t\t}\n\t\t\/\/ペア候補毎に繰り返し処理\n\t\tfor _, v := range pair_numbers {\n\t\t\t\/\/ペアとなる2枚を除去\n\t\t\trest := []int{}\n\t\t\tc := 2\n\t\t\tfor _, w := range a {\n\t\t\t\t\/\/ ペア候補以外は新スライスに入れる\n\t\t\t\t\/\/ ペア候補は3枚目以降は新スライスに入れる\n\t\t\t\tif w != v || c <= 0 {\n\t\t\t\t\trest = append(rest, w)\n\t\t\t\t}\n\t\t\t\tif w == v {\n\t\t\t\t\tc--\n\t\t\t\t}\n\t\t\t}\n\t\t\tif valid_3cards(rest, i) {\n\t\t\t\treturn true\n\t\t\t} else {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\treturn false\n\t} else if len(a)%3 == 0 {\n\t\treturn valid_3cards(a, i)\n\t}\n\t\/\/ 来ないはず\n\treturn false\n}\n\nfunc valid_3cards(a []int, i int) bool {\n\t\/\/ 刻子や順子のみで構成されている場合true\n\t\/\/ a is sorted\n\t\/\/ a.size % 3 is0\n\t\/\/ 第二引数は字牌のとき0\n\tok := false\n\tfor {\n\t\ta, ok = remove_kotsu(a)\n\t\tif ok {\n\t\t\tcontinue\n\t\t}\n\t\tif i > 0 {\n\t\t\ta, ok = remove_shuntsu(a)\n\t\t\tif ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\treturn len(a) == 0\n\t}\n}\n\nfunc remove_kotsu(a []int) ([]int, bool) {\n\t\/\/ 刻子を除去できればtrue\n\t\/\/ a is sorted\n\tretval := a\n\tif len(a) < 3 {\n\t\treturn retval, false\n\t}\n\tif a[0] == a[1] && a[0] == a[2] {\n\t\tretval = a[3:]\n\t\treturn retval, true\n\t}\n\treturn retval, false\n}\n\nfunc remove_shuntsu(a []int) ([]int, bool) {\n\t\/\/ 順子を除去できればtrue\n\t\/\/ a is sorted\n\trest := []int{}\n\tfirst := -1\n\tsecond := -1\n\tfound := false\n\tfor _, v := range a {\n\t\tif found {\n\t\t\trest = append(rest, v)\n\t\t\tcontinue\n\t\t}\n\t\tif first == -1 {\n\t\t\tfirst = v\n\t\t} else if second == -1 && first+1 == v {\n\t\t\tsecond = v\n\t\t} else if second != -1 && first+2 == v {\n\t\t\t\/\/flush\n\t\t\tfirst = -1\n\t\t\tsecond = -1\n\t\t\tfound = true\n\t\t} else {\n\t\t\trest = append(rest, v)\n\t\t}\n\t}\n\treturn rest, found\n}\n\nfunc pairable_numbers(sorted []int) []int {\n\t\/\/カウンタ\n\tretval := []int{}\n\ta := 999 \/\/ 2つ前\n\tb := 999 \/\/ 1つ前\n\tfor _, v := range sorted {\n\t\tif b == v && a != v {\n\t\t\tretval = append(retval, v)\n\t\t} else {\n\t\t\tb = v\n\t\t}\n\t}\n\treturn retval\n}\n\nfunc is_chitoitsu(list []int) bool {\n\t\/\/カウンタ\n\tc := map[int]int{}\n\n\t\/\/コピー\n\tl := list\n\n\tfor _, v := range l {\n\t\tcount, ok := c[v]\n\t\tif ok {\n\t\t\tif count == 1 {\n\t\t\t\tc[v] = 2\n\t\t\t} else {\n\t\t\t\t\/\/ c[v] == 2\n\t\t\t\treturn false\n\t\t\t}\n\t\t} else {\n\t\t\tc[v] = 1\n\t\t}\n\t\t\/\/8個チェック\n\t\tif len(c) >= 8 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n<commit_msg>到達しないはずの場所にpanic<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"sort\"\n\t\"time\"\n)\n\nfunc main() {\n\tstart := time.Now()\n\t\/\/ 処理\n\tvar i float64\n\ti = 0\n\tfor {\n\t\ti++\n\t\tend := time.Now()\n\t\tdiff := float64(end.Sub(start) \/ 1000 \/ 1000 \/ 1000)\n\t\tm := int(i \/ diff)\n\t\tout := 0\n\t\tif m < 0 {\n\t\t} else {\n\t\t\tout = m\n\t\t}\n\t\tfmt.Printf(\"\\r\")\n\t\tfmt.Printf(\"\\r%v回試行 %v秒経過 %v回\/秒\", i, diff, out)\n\t\tif submain() {\n\t\t\tbreak\n\t\t}\n\t}\n\tfmt.Printf(\"\\n\")\n}\n\nfunc submain() bool {\n\t\/*\n\t\tfmt.Print(\"started.\")\n\t\t\/\/ チャネル\n\t\tsleep1_finished := make(chan bool)\n\n\t\tgo func() {\n\t\t\t\/\/ 0.2秒かかるコマンド\n\t\t\tfmt.Print(\"sleep1 started.\")\n\t\t\ttime.Sleep(200 * 1000 * 1000 * time.Nanosecond)\n\t\t\tfmt.Print(\"sleep1 finished.\")\n\t\t\tsleep1_finished <- true\n\t\t}()\n\n\t\t\/\/ 終わるまで待つ\n\t\t<- sleep1_finished\n\t*\/\n\n\t\/\/ 136枚の中から14枚返したい\n\tlist := shuffled_cards()\n\n\t\/\/ 出力\n\tresult := solve(list)\n\tstring_output(list)\n\treturn result\n}\n\n\/\/ http:\/\/d.hatena.ne.jp\/hake\/20150930\/p1\nfunc shuffle(list []int) {\n\tfor i := len(list); i > 1; i-- {\n\t\tj := rand.Intn(i) \/\/ 0 .. i-1 の乱数発生\n\t\tlist[i-1], list[j] = list[j], list[i-1]\n\t}\n}\n\nfunc shuffled_cards() []int {\n\trand.Seed(time.Now().UnixNano())\n\n\t\/\/ データ要素数指定、および初期データ作成\n\tsize := 136\n\tlist := make([]int, size, size)\n\tfor i := 0; i < size; i++ {\n\t\tlist[i] = i \/ 4\n\t}\n\n\t\/\/ シャッフル\n\tshuffle(list)\n\n\treturn list[:14]\n}\n\n\/\/ 牌の出力\nfunc string_output(list []int) {\n\t\/\/ http:\/\/qiita.com\/ruiu\/items\/2bb83b29baeae2433a79\n\t\/\/ サイズ0、内部バッファの長さ69の[]byteの値を割り当てる\n\tb := make([]byte, 0, 70)\n\n\t\/\/ bに文字列を追加\n\tfor j := 0; j < 14; j++ {\n\t\t\/\/ 126976 is 'ton'\n\t\t\/\/ https:\/\/codepoints.net\/U+1F000\n\t\tb = append(b, string(list[j]+126976)...) \/\/ ...が必要\n\t\tb = append(b, string(32)...) \/\/ ...が必要\n\t}\n\tfmt.Printf(\"%v\", string(b))\n}\n\n\/\/ solve\n\/\/ 字マンソーピンのリストをつくる\nfunc solve(list []int) bool {\n\tif is_chitoitsu(list) {\n\t\treturn true\n\t}\n\tmatrix := [][]int{{}, {}, {}, {}}\n\tfor _, value := range list {\n\t\tgroup(matrix, value)\n\t}\n\t\/\/fmt.Printf(\"%v\", matrix)\n\n\treturn group_scan(matrix)\n}\n\n\/\/ スート分類してくれる\nfunc group(m [][]int, i int) {\n\tswitch {\n\tcase i < 7:\n\t\tm[0] = append(m[0], i)\n\tcase i < 7+(9*1):\n\t\tm[1] = append(m[1], i-7)\n\tcase i < 7+(9*2):\n\t\tm[2] = append(m[2], i-7-(9*1))\n\tcase i < 7+(9*3):\n\t\tm[3] = append(m[3], i-7-(9*2))\n\t}\n}\n\nfunc group_scan(m [][]int) bool {\n\tif !valid_mod3(m) {\n\t\treturn false\n\t}\n\tif !valid_33332(m) {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc valid_mod3(m [][]int) bool {\n\t\/\/スートのサイズを3で割った時\n\t\/\/あまりが2であるすーとグループが1つであること\n\tc := 0\n\tfor _, a := range m {\n\t\tswitch len(a) % 3 {\n\t\tcase 0:\n\t\t\t\/\/ noop\n\t\tcase 1:\n\t\t\treturn false\n\t\tcase 2:\n\t\t\tc++\n\t\t}\n\t}\n\treturn c == 1\n}\n\nfunc valid_33332(m [][]int) bool {\n\tfor i := 0; i < 4; i++ {\n\t\tif !valid_suit_group(m[i], i) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc valid_suit_group(a []int, i int) bool {\n\t\/\/ 第二引数は字牌のとき0\n\n\t\/\/ソート\n\tsort.Ints(a)\n\tif len(a)%3 == 2 {\n\t\t\/\/ペアを探す\n\t\tpair_numbers := pairable_numbers(a)\n\t\t\/\/ペア候補がなかったらぬける\n\t\tif len(pair_numbers) == 0 {\n\t\t\treturn false\n\t\t}\n\t\t\/\/ペア候補毎に繰り返し処理\n\t\tfor _, v := range pair_numbers {\n\t\t\t\/\/ペアとなる2枚を除去\n\t\t\trest := []int{}\n\t\t\tc := 2\n\t\t\tfor _, w := range a {\n\t\t\t\t\/\/ ペア候補以外は新スライスに入れる\n\t\t\t\t\/\/ ペア候補は3枚目以降は新スライスに入れる\n\t\t\t\tif w != v || c <= 0 {\n\t\t\t\t\trest = append(rest, w)\n\t\t\t\t}\n\t\t\t\tif w == v {\n\t\t\t\t\tc--\n\t\t\t\t}\n\t\t\t}\n\t\t\tif valid_3cards(rest, i) {\n\t\t\t\treturn true\n\t\t\t} else {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\treturn false\n\t} else if len(a)%3 == 0 {\n\t\treturn valid_3cards(a, i)\n\t}\n\t\/\/ 到達しないはず\n\tpanic(\"到達しないはず\")\n}\n\nfunc valid_3cards(a []int, i int) bool {\n\t\/\/ 刻子や順子のみで構成されている場合true\n\t\/\/ a is sorted\n\t\/\/ a.size % 3 is0\n\t\/\/ 第二引数は字牌のとき0\n\tok := false\n\tfor {\n\t\ta, ok = remove_kotsu(a)\n\t\tif ok {\n\t\t\tcontinue\n\t\t}\n\t\tif i > 0 {\n\t\t\ta, ok = remove_shuntsu(a)\n\t\t\tif ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\treturn len(a) == 0\n\t}\n}\n\nfunc remove_kotsu(a []int) ([]int, bool) {\n\t\/\/ 刻子を除去できればtrue\n\t\/\/ a is sorted\n\tretval := a\n\tif len(a) < 3 {\n\t\treturn retval, false\n\t}\n\tif a[0] == a[1] && a[0] == a[2] {\n\t\tretval = a[3:]\n\t\treturn retval, true\n\t}\n\treturn retval, false\n}\n\nfunc remove_shuntsu(a []int) ([]int, bool) {\n\t\/\/ 順子を除去できればtrue\n\t\/\/ a is sorted\n\trest := []int{}\n\tfirst := -1\n\tsecond := -1\n\tfound := false\n\tfor _, v := range a {\n\t\tif found {\n\t\t\trest = append(rest, v)\n\t\t\tcontinue\n\t\t}\n\t\tif first == -1 {\n\t\t\tfirst = v\n\t\t} else if second == -1 && first+1 == v {\n\t\t\tsecond = v\n\t\t} else if second != -1 && first+2 == v {\n\t\t\t\/\/flush\n\t\t\tfirst = -1\n\t\t\tsecond = -1\n\t\t\tfound = true\n\t\t} else {\n\t\t\trest = append(rest, v)\n\t\t}\n\t}\n\treturn rest, found\n}\n\nfunc pairable_numbers(sorted []int) []int {\n\t\/\/カウンタ\n\tretval := []int{}\n\ta := 999 \/\/ 2つ前\n\tb := 999 \/\/ 1つ前\n\tfor _, v := range sorted {\n\t\tif b == v && a != v {\n\t\t\tretval = append(retval, v)\n\t\t} else {\n\t\t\tb = v\n\t\t}\n\t}\n\treturn retval\n}\n\nfunc is_chitoitsu(list []int) bool {\n\t\/\/カウンタ\n\tc := map[int]int{}\n\n\t\/\/コピー\n\tl := list\n\n\tfor _, v := range l {\n\t\tcount, ok := c[v]\n\t\tif ok {\n\t\t\tif count == 1 {\n\t\t\t\tc[v] = 2\n\t\t\t} else {\n\t\t\t\t\/\/ c[v] == 2\n\t\t\t\treturn false\n\t\t\t}\n\t\t} else {\n\t\t\tc[v] = 1\n\t\t}\n\t\t\/\/8個チェック\n\t\tif len(c) >= 8 {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>package netutil\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\n\t\"encoding\/binary\"\n\n\t\"sync\"\n\n\t\"time\"\n\n\t\"sync\/atomic\"\n\n\t\"compress\/flate\"\n\n\t\"os\"\n\n\t\"io\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/xiaonanln\/goNewlessPool\"\n\t\"github.com\/xiaonanln\/goworld\/consts\"\n\t\"github.com\/xiaonanln\/goworld\/gwlog\"\n\t\"github.com\/xiaonanln\/goworld\/opmon\"\n)\n\nconst ( \/\/ Three different level of packet size\n\tPACKET_SIZE_SMALL = 1024\n\tPACKET_SIZE_BIG = 1024 * 64\n\tPACKET_SIZE_HUGE = 1024 * 1024 * 4\n)\n\nconst (\n\tMAX_PACKET_SIZE = 25 * 1024 * 1024\n\tSIZE_FIELD_SIZE = 4\n\tPREPAYLOAD_SIZE = SIZE_FIELD_SIZE\n\tMAX_PAYLOAD_LENGTH = MAX_PACKET_SIZE - PREPAYLOAD_SIZE\n)\n\nvar (\n\tNETWORK_ENDIAN = binary.LittleEndian\n\terrRecvAgain = _ErrRecvAgain{}\n\tcompressWritersPool = newless_pool.NewNewlessPool()\n)\n\nfunc init() {\n\tfor i := 0; i < consts.COMPRESS_WRITER_POOL_SIZE; i++ {\n\t\tcw, err := flate.NewWriter(os.Stderr, flate.BestSpeed)\n\t\tif err != nil {\n\t\t\tgwlog.Fatal(\"create flate compressor failed: %v\", err)\n\t\t}\n\n\t\tcompressWritersPool.Put(cw)\n\t}\n\n\tgwlog.Info(\"%d compress writer created.\", consts.COMPRESS_WRITER_POOL_SIZE)\n}\n\ntype _ErrRecvAgain struct{}\n\nfunc (err _ErrRecvAgain) Error() string {\n\treturn \"recv again\"\n}\n\nfunc (err _ErrRecvAgain) Temporary() bool {\n\treturn true\n}\n\nfunc (err _ErrRecvAgain) Timeout() bool {\n\treturn false\n}\n\ntype PacketConnection struct {\n\tconn Connection\n\tcompressed bool\n\tpendingPackets []*Packet\n\tpendingPacketsLock sync.Mutex\n\tsendBuffer *SendBuffer \/\/ each PacketConnection uses 1 SendBuffer for sending packets\n\n\t\/\/ buffers and infos for receiving a packet\n\tpayloadLenBuf [SIZE_FIELD_SIZE]byte\n\tpayloadLenBytesRecved int\n\trecvCompressed bool\n\trecvTotalPayloadLen uint32\n\trecvedPayloadLen uint32\n\trecvingPacket *Packet\n\n\tcompressReader io.ReadCloser\n}\n\nfunc NewPacketConnection(conn Connection, compressed bool) *PacketConnection {\n\tpc := &PacketConnection{\n\t\tconn: (conn),\n\t\tsendBuffer: NewSendBuffer(),\n\t\tcompressed: compressed,\n\t}\n\n\tpc.compressReader = flate.NewReader(os.Stdin) \/\/ reader is always needed\n\treturn pc\n}\n\nfunc (pc *PacketConnection) NewPacket() *Packet {\n\treturn allocPacket()\n}\n\nfunc (pc *PacketConnection) SendPacket(packet *Packet) error {\n\tif consts.DEBUG_PACKETS {\n\t\tgwlog.Debug(\"%s SEND PACKET %p: msgtype=%v, payload(%d)=%v\", pc, packet,\n\t\t\tPACKET_ENDIAN.Uint16(packet.bytes[PREPAYLOAD_SIZE:PREPAYLOAD_SIZE+2]),\n\t\t\tpacket.GetPayloadLen(),\n\t\t\tpacket.bytes[PREPAYLOAD_SIZE+2:PREPAYLOAD_SIZE+packet.GetPayloadLen()])\n\t}\n\tif atomic.LoadInt64(&packet.refcount) <= 0 {\n\t\tgwlog.Panicf(\"sending packet with refcount=%d\", packet.refcount)\n\t}\n\n\tpacket.AddRefCount(1)\n\tpc.pendingPacketsLock.Lock()\n\tpc.pendingPackets = append(pc.pendingPackets, packet)\n\tpc.pendingPacketsLock.Unlock()\n\treturn nil\n}\n\nfunc (pc *PacketConnection) Flush() (err error) {\n\tpc.pendingPacketsLock.Lock()\n\tif len(pc.pendingPackets) == 0 { \/\/ no packets to send, common to happen, so handle efficiently\n\t\tpc.pendingPacketsLock.Unlock()\n\t\treturn\n\t}\n\tpackets := make([]*Packet, 0, len(pc.pendingPackets))\n\tpackets, pc.pendingPackets = pc.pendingPackets, packets\n\tpc.pendingPacketsLock.Unlock()\n\n\t\/\/ flush should only be called in one goroutine\n\top := opmon.StartOperation(\"FlushPackets\")\n\tdefer op.Finish(time.Millisecond * 100)\n\n\tvar cw *flate.Writer\n\tif pc.compressed {\n\t\t_cw := compressWritersPool.TryGet() \/\/ try to get a usable compress writer, might fail\n\t\tif _cw != nil {\n\t\t\tcw = _cw.(*flate.Writer)\n\t\t\tdefer compressWritersPool.Put(cw)\n\t\t} else {\n\t\t\tgwlog.Warn(\"Fail to get compressor, packet is not compressed\")\n\t\t}\n\t}\n\n\tif len(packets) == 1 {\n\t\t\/\/ only 1 packet to send, just send it directly, no need to use send buffer\n\t\tpacket := packets[0]\n\t\tif cw != nil {\n\t\t\tpacket.compress(cw)\n\t\t}\n\t\terr = WriteAll(pc.conn, packet.data())\n\t\tpacket.Release()\n\t\tif err == nil {\n\t\t\terr = pc.conn.Flush()\n\t\t}\n\t\treturn\n\t}\n\n\tsendBuffer := pc.sendBuffer \/\/ the send buffer\n\nsend_packets_loop:\n\tfor _, packet := range packets {\n\t\tif cw != nil {\n\t\t\tpacket.compress(cw)\n\t\t}\n\n\t\tpacketData := packet.data()\n\t\tif len(packetData) > sendBuffer.FreeSpace() {\n\t\t\t\/\/ can not append data to send buffer, so clear send buffer first\n\t\t\tif err = sendBuffer.WriteTo(pc.conn); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif len(packetData) >= SEND_BUFFER_SIZE {\n\t\t\t\t\/\/ packet is too large, impossible to put to send buffer\n\t\t\t\terr = WriteAll(pc.conn, packetData)\n\t\t\t\tpacket.Release()\n\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcontinue send_packets_loop\n\t\t\t}\n\t\t}\n\n\t\t\/\/ now we are sure that len(packetData) <= sendBuffer.FreeSize()\n\t\tn, _ := sendBuffer.Write(packetData)\n\t\tif n != len(packetData) {\n\t\t\tgwlog.Panicf(\"packet is not fully written\")\n\t\t}\n\t\tpacket.Release()\n\t}\n\n\t\/\/ now we send all data in the send buffer\n\terr = sendBuffer.WriteTo(pc.conn)\n\tif err == nil {\n\t\terr = pc.conn.Flush()\n\t}\n\treturn\n}\n\nfunc (pc *PacketConnection) SetRecvDeadline(deadline time.Time) error {\n\treturn pc.conn.SetReadDeadline(deadline)\n}\n\nfunc (pc *PacketConnection) RecvPacket() (*Packet, error) {\n\tif pc.payloadLenBytesRecved < SIZE_FIELD_SIZE {\n\t\t\/\/ receive more of payload len bytes\n\t\tn, err := pc.conn.Read(pc.payloadLenBuf[pc.payloadLenBytesRecved:])\n\t\tpc.payloadLenBytesRecved += n\n\t\tif pc.payloadLenBytesRecved < SIZE_FIELD_SIZE {\n\t\t\tif err == nil {\n\t\t\t\terr = errRecvAgain\n\t\t\t}\n\t\t\treturn nil, err \/\/ packet not finished yet\n\t\t}\n\n\t\tpc.recvTotalPayloadLen = NETWORK_ENDIAN.Uint32(pc.payloadLenBuf[:])\n\t\t\/\/pc.recvCompressed = false\n\t\tif pc.recvCompressed {\n\t\t\tgwlog.Panicf(\"should be false\")\n\t\t}\n\t\tif pc.recvTotalPayloadLen&COMPRESSED_BIT_MASK != 0 {\n\t\t\tpc.recvTotalPayloadLen &= PAYLOAD_LEN_MASK\n\t\t\tpc.recvCompressed = true\n\t\t}\n\n\t\tif pc.recvTotalPayloadLen == 0 || pc.recvTotalPayloadLen > MAX_PAYLOAD_LENGTH {\n\t\t\terr := errors.Errorf(\"invalid payload length: %v\", pc.recvTotalPayloadLen)\n\t\t\tpc.resetRecvStates()\n\t\t\tpc.Close()\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpc.recvedPayloadLen = 0\n\t\tpc.recvingPacket = NewPacket()\n\t\tpc.recvingPacket.assureCapacity(pc.recvTotalPayloadLen)\n\t}\n\n\t\/\/ now all bytes of payload len is received, start receiving payload\n\tn, err := pc.conn.Read(pc.recvingPacket.bytes[PREPAYLOAD_SIZE+pc.recvedPayloadLen : PREPAYLOAD_SIZE+pc.recvTotalPayloadLen])\n\tpc.recvedPayloadLen += uint32(n)\n\n\tif pc.recvedPayloadLen == pc.recvTotalPayloadLen {\n\t\t\/\/ full packet received, return the packet\n\t\tpacket := pc.recvingPacket\n\t\tpacket.setPayloadLenCompressed(pc.recvTotalPayloadLen, pc.recvCompressed)\n\t\tpc.resetRecvStates()\n\t\tpacket.decompress(pc.compressReader)\n\n\t\treturn packet, nil\n\t}\n\n\tif err == nil {\n\t\terr = errRecvAgain\n\t}\n\treturn nil, err\n}\nfunc (pc *PacketConnection) resetRecvStates() {\n\tpc.payloadLenBytesRecved = 0\n\tpc.recvTotalPayloadLen = 0\n\tpc.recvedPayloadLen = 0\n\tpc.recvingPacket = nil\n\tpc.recvCompressed = false\n}\n\nfunc (pc *PacketConnection) Close() error {\n\treturn pc.conn.Close()\n}\n\nfunc (pc *PacketConnection) RemoteAddr() net.Addr {\n\treturn pc.conn.RemoteAddr()\n}\n\nfunc (pc *PacketConnection) LocalAddr() net.Addr {\n\treturn pc.conn.LocalAddr()\n}\n\nfunc (pc *PacketConnection) String() string {\n\treturn fmt.Sprintf(\"[%s >>> %s]\", pc.LocalAddr(), pc.RemoteAddr())\n}\n<commit_msg>use new newless pool<commit_after>package netutil\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\n\t\"encoding\/binary\"\n\n\t\"sync\"\n\n\t\"time\"\n\n\t\"sync\/atomic\"\n\n\t\"compress\/flate\"\n\n\t\"os\"\n\n\t\"io\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/xiaonanln\/go-xnsyncutil\/xnsyncutil\"\n\t\"github.com\/xiaonanln\/goworld\/consts\"\n\t\"github.com\/xiaonanln\/goworld\/gwlog\"\n\t\"github.com\/xiaonanln\/goworld\/opmon\"\n)\n\nconst ( \/\/ Three different level of packet size\n\tPACKET_SIZE_SMALL = 1024\n\tPACKET_SIZE_BIG = 1024 * 64\n\tPACKET_SIZE_HUGE = 1024 * 1024 * 4\n)\n\nconst (\n\tMAX_PACKET_SIZE = 25 * 1024 * 1024\n\tSIZE_FIELD_SIZE = 4\n\tPREPAYLOAD_SIZE = SIZE_FIELD_SIZE\n\tMAX_PAYLOAD_LENGTH = MAX_PACKET_SIZE - PREPAYLOAD_SIZE\n)\n\nvar (\n\tNETWORK_ENDIAN = binary.LittleEndian\n\terrRecvAgain = _ErrRecvAgain{}\n\tcompressWritersPool = xnsyncutil.NewNewlessPool()\n)\n\nfunc init() {\n\tfor i := 0; i < consts.COMPRESS_WRITER_POOL_SIZE; i++ {\n\t\tcw, err := flate.NewWriter(os.Stderr, flate.BestSpeed)\n\t\tif err != nil {\n\t\t\tgwlog.Fatal(\"create flate compressor failed: %v\", err)\n\t\t}\n\n\t\tcompressWritersPool.Put(cw)\n\t}\n\n\tgwlog.Info(\"%d compress writer created.\", consts.COMPRESS_WRITER_POOL_SIZE)\n}\n\ntype _ErrRecvAgain struct{}\n\nfunc (err _ErrRecvAgain) Error() string {\n\treturn \"recv again\"\n}\n\nfunc (err _ErrRecvAgain) Temporary() bool {\n\treturn true\n}\n\nfunc (err _ErrRecvAgain) Timeout() bool {\n\treturn false\n}\n\ntype PacketConnection struct {\n\tconn Connection\n\tcompressed bool\n\tpendingPackets []*Packet\n\tpendingPacketsLock sync.Mutex\n\tsendBuffer *SendBuffer \/\/ each PacketConnection uses 1 SendBuffer for sending packets\n\n\t\/\/ buffers and infos for receiving a packet\n\tpayloadLenBuf [SIZE_FIELD_SIZE]byte\n\tpayloadLenBytesRecved int\n\trecvCompressed bool\n\trecvTotalPayloadLen uint32\n\trecvedPayloadLen uint32\n\trecvingPacket *Packet\n\n\tcompressReader io.ReadCloser\n}\n\nfunc NewPacketConnection(conn Connection, compressed bool) *PacketConnection {\n\tpc := &PacketConnection{\n\t\tconn: (conn),\n\t\tsendBuffer: NewSendBuffer(),\n\t\tcompressed: compressed,\n\t}\n\n\tpc.compressReader = flate.NewReader(os.Stdin) \/\/ reader is always needed\n\treturn pc\n}\n\nfunc (pc *PacketConnection) NewPacket() *Packet {\n\treturn allocPacket()\n}\n\nfunc (pc *PacketConnection) SendPacket(packet *Packet) error {\n\tif consts.DEBUG_PACKETS {\n\t\tgwlog.Debug(\"%s SEND PACKET %p: msgtype=%v, payload(%d)=%v\", pc, packet,\n\t\t\tPACKET_ENDIAN.Uint16(packet.bytes[PREPAYLOAD_SIZE:PREPAYLOAD_SIZE+2]),\n\t\t\tpacket.GetPayloadLen(),\n\t\t\tpacket.bytes[PREPAYLOAD_SIZE+2:PREPAYLOAD_SIZE+packet.GetPayloadLen()])\n\t}\n\tif atomic.LoadInt64(&packet.refcount) <= 0 {\n\t\tgwlog.Panicf(\"sending packet with refcount=%d\", packet.refcount)\n\t}\n\n\tpacket.AddRefCount(1)\n\tpc.pendingPacketsLock.Lock()\n\tpc.pendingPackets = append(pc.pendingPackets, packet)\n\tpc.pendingPacketsLock.Unlock()\n\treturn nil\n}\n\nfunc (pc *PacketConnection) Flush() (err error) {\n\tpc.pendingPacketsLock.Lock()\n\tif len(pc.pendingPackets) == 0 { \/\/ no packets to send, common to happen, so handle efficiently\n\t\tpc.pendingPacketsLock.Unlock()\n\t\treturn\n\t}\n\tpackets := make([]*Packet, 0, len(pc.pendingPackets))\n\tpackets, pc.pendingPackets = pc.pendingPackets, packets\n\tpc.pendingPacketsLock.Unlock()\n\n\t\/\/ flush should only be called in one goroutine\n\top := opmon.StartOperation(\"FlushPackets\")\n\tdefer op.Finish(time.Millisecond * 100)\n\n\tvar cw *flate.Writer\n\tif pc.compressed {\n\t\t_cw := compressWritersPool.TryGet() \/\/ try to get a usable compress writer, might fail\n\t\tif _cw != nil {\n\t\t\tcw = _cw.(*flate.Writer)\n\t\t\tdefer compressWritersPool.Put(cw)\n\t\t} else {\n\t\t\tgwlog.Warn(\"Fail to get compressor, packet is not compressed\")\n\t\t}\n\t}\n\n\tif len(packets) == 1 {\n\t\t\/\/ only 1 packet to send, just send it directly, no need to use send buffer\n\t\tpacket := packets[0]\n\t\tif cw != nil {\n\t\t\tpacket.compress(cw)\n\t\t}\n\t\terr = WriteAll(pc.conn, packet.data())\n\t\tpacket.Release()\n\t\tif err == nil {\n\t\t\terr = pc.conn.Flush()\n\t\t}\n\t\treturn\n\t}\n\n\tsendBuffer := pc.sendBuffer \/\/ the send buffer\n\nsend_packets_loop:\n\tfor _, packet := range packets {\n\t\tif cw != nil {\n\t\t\tpacket.compress(cw)\n\t\t}\n\n\t\tpacketData := packet.data()\n\t\tif len(packetData) > sendBuffer.FreeSpace() {\n\t\t\t\/\/ can not append data to send buffer, so clear send buffer first\n\t\t\tif err = sendBuffer.WriteTo(pc.conn); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif len(packetData) >= SEND_BUFFER_SIZE {\n\t\t\t\t\/\/ packet is too large, impossible to put to send buffer\n\t\t\t\terr = WriteAll(pc.conn, packetData)\n\t\t\t\tpacket.Release()\n\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcontinue send_packets_loop\n\t\t\t}\n\t\t}\n\n\t\t\/\/ now we are sure that len(packetData) <= sendBuffer.FreeSize()\n\t\tn, _ := sendBuffer.Write(packetData)\n\t\tif n != len(packetData) {\n\t\t\tgwlog.Panicf(\"packet is not fully written\")\n\t\t}\n\t\tpacket.Release()\n\t}\n\n\t\/\/ now we send all data in the send buffer\n\terr = sendBuffer.WriteTo(pc.conn)\n\tif err == nil {\n\t\terr = pc.conn.Flush()\n\t}\n\treturn\n}\n\nfunc (pc *PacketConnection) SetRecvDeadline(deadline time.Time) error {\n\treturn pc.conn.SetReadDeadline(deadline)\n}\n\nfunc (pc *PacketConnection) RecvPacket() (*Packet, error) {\n\tif pc.payloadLenBytesRecved < SIZE_FIELD_SIZE {\n\t\t\/\/ receive more of payload len bytes\n\t\tn, err := pc.conn.Read(pc.payloadLenBuf[pc.payloadLenBytesRecved:])\n\t\tpc.payloadLenBytesRecved += n\n\t\tif pc.payloadLenBytesRecved < SIZE_FIELD_SIZE {\n\t\t\tif err == nil {\n\t\t\t\terr = errRecvAgain\n\t\t\t}\n\t\t\treturn nil, err \/\/ packet not finished yet\n\t\t}\n\n\t\tpc.recvTotalPayloadLen = NETWORK_ENDIAN.Uint32(pc.payloadLenBuf[:])\n\t\t\/\/pc.recvCompressed = false\n\t\tif pc.recvCompressed {\n\t\t\tgwlog.Panicf(\"should be false\")\n\t\t}\n\t\tif pc.recvTotalPayloadLen&COMPRESSED_BIT_MASK != 0 {\n\t\t\tpc.recvTotalPayloadLen &= PAYLOAD_LEN_MASK\n\t\t\tpc.recvCompressed = true\n\t\t}\n\n\t\tif pc.recvTotalPayloadLen == 0 || pc.recvTotalPayloadLen > MAX_PAYLOAD_LENGTH {\n\t\t\terr := errors.Errorf(\"invalid payload length: %v\", pc.recvTotalPayloadLen)\n\t\t\tpc.resetRecvStates()\n\t\t\tpc.Close()\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpc.recvedPayloadLen = 0\n\t\tpc.recvingPacket = NewPacket()\n\t\tpc.recvingPacket.assureCapacity(pc.recvTotalPayloadLen)\n\t}\n\n\t\/\/ now all bytes of payload len is received, start receiving payload\n\tn, err := pc.conn.Read(pc.recvingPacket.bytes[PREPAYLOAD_SIZE+pc.recvedPayloadLen : PREPAYLOAD_SIZE+pc.recvTotalPayloadLen])\n\tpc.recvedPayloadLen += uint32(n)\n\n\tif pc.recvedPayloadLen == pc.recvTotalPayloadLen {\n\t\t\/\/ full packet received, return the packet\n\t\tpacket := pc.recvingPacket\n\t\tpacket.setPayloadLenCompressed(pc.recvTotalPayloadLen, pc.recvCompressed)\n\t\tpc.resetRecvStates()\n\t\tpacket.decompress(pc.compressReader)\n\n\t\treturn packet, nil\n\t}\n\n\tif err == nil {\n\t\terr = errRecvAgain\n\t}\n\treturn nil, err\n}\nfunc (pc *PacketConnection) resetRecvStates() {\n\tpc.payloadLenBytesRecved = 0\n\tpc.recvTotalPayloadLen = 0\n\tpc.recvedPayloadLen = 0\n\tpc.recvingPacket = nil\n\tpc.recvCompressed = false\n}\n\nfunc (pc *PacketConnection) Close() error {\n\treturn pc.conn.Close()\n}\n\nfunc (pc *PacketConnection) RemoteAddr() net.Addr {\n\treturn pc.conn.RemoteAddr()\n}\n\nfunc (pc *PacketConnection) LocalAddr() net.Addr {\n\treturn pc.conn.LocalAddr()\n}\n\nfunc (pc *PacketConnection) String() string {\n\treturn fmt.Sprintf(\"[%s >>> %s]\", pc.LocalAddr(), pc.RemoteAddr())\n}\n<|endoftext|>"} {"text":"<commit_before>package plugin\n\nimport (\n\t. \"github.com\/mickael-kerjean\/filestash\/server\/common\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_authenticate_admin\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_authenticate_ldap\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_authenticate_openid\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_authenticate_passthrough\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_authenticate_saml\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_backblaze\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_dav\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_dropbox\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_ftp\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_gdrive\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_git\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_ldap\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_local\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_mysql\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_nop\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_s3\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_samba\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_sftp\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_webdav\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_editor_onlyoffice\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_handler_console\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_handler_syncthing\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_image_light\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_search_stateless\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_security_scanner\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_security_svg\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_starter_http\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_starter_tor\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_video_transcoder\"\n)\n\nfunc init() {\n\tLog.Debug(\"Plugin loader\")\n}\n<commit_msg>plugin (add): add temporary file backend to default list<commit_after>package plugin\n\nimport (\n\t. \"github.com\/mickael-kerjean\/filestash\/server\/common\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_authenticate_admin\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_authenticate_ldap\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_authenticate_openid\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_authenticate_passthrough\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_authenticate_saml\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_backblaze\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_dav\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_dropbox\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_ftp\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_gdrive\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_git\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_ldap\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_local\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_mysql\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_nop\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_s3\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_samba\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_sftp\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_tmp\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_backend_webdav\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_editor_onlyoffice\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_handler_console\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_handler_syncthing\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_image_light\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_search_stateless\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_security_scanner\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_security_svg\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_starter_http\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_starter_tor\"\n\t_ \"github.com\/mickael-kerjean\/filestash\/server\/plugin\/plg_video_transcoder\"\n)\n\nfunc init() {\n\tLog.Debug(\"Plugin loader\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2018 Constantin Schomburg <me@cschomburg.com>\n\/\/\n\/\/ Use of this source code is governed by an MIT-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Service js provides JavaScript scripting for the sarif network.\npackage js\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/sarifsystems\/sarif\/pkg\/content\"\n\t\"github.com\/sarifsystems\/sarif\/pkg\/schema\"\n\t\"github.com\/sarifsystems\/sarif\/sarif\"\n\t\"github.com\/sarifsystems\/sarif\/services\"\n)\n\nvar Module = &services.Module{\n\tName: \"js\",\n\tVersion: \"1.0\",\n\tNewInstance: NewService,\n}\n\ntype Config struct {\n\tScriptDir string `json:\"script_dir\"`\n}\n\ntype Dependencies struct {\n\tConfig services.Config\n\tClient *sarif.Client\n\tBroker *sarif.Broker\n}\n\ntype Service struct {\n\tcfg Config\n\tBroker *sarif.Broker\n\t*sarif.Client\n\n\tScripts map[string]string\n\tMachines map[string]*Machine\n\tListeners map[string][]string\n}\n\nfunc NewService(deps *Dependencies) *Service {\n\ts := &Service{\n\t\tBroker: deps.Broker,\n\t\tClient: deps.Client,\n\t\tScripts: make(map[string]string),\n\t\tMachines: make(map[string]*Machine),\n\t\tListeners: make(map[string][]string),\n\t}\n\ts.cfg.ScriptDir = deps.Config.Dir() + \"\/js\"\n\tdeps.Config.Get(&s.cfg)\n\n\ts.createMachine(\"default\")\n\treturn s\n}\n\nfunc (s *Service) Enable() error {\n\ts.Subscribe(\"js\/do\", \"\", s.handleDo)\n\ts.Subscribe(\"js\/start\", \"\", s.handleStart)\n\ts.Subscribe(\"js\/stop\", \"\", s.handleStop)\n\ts.Subscribe(\"js\/status\", \"\", s.handleStatus)\n\ts.Subscribe(\"js\/put\", \"\", s.handlePut)\n\ts.Subscribe(\"js\/get\", \"\", s.handleGet)\n\ts.Subscribe(\"js\/attach\", \"\", s.handleAttach)\n\n\tif err := s.readAvailableScripts(); err != nil {\n\t\treturn err\n\t}\n\tfor _, f := range s.Scripts {\n\t\ts.createMachineFromScript(f)\n\t}\n\treturn nil\n}\n\nfunc (s *Service) readAvailableScripts() error {\n\tif s.cfg.ScriptDir == \"\" {\n\t\treturn nil\n\t}\n\n\tdir, err := os.Open(s.cfg.ScriptDir)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\ts.Log(\"warn\", err.Error())\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\tfiles, err := dir.Readdirnames(0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, f := range files {\n\t\tif !strings.HasSuffix(f, \".js\") {\n\t\t\tcontinue\n\t\t}\n\t\tname := strings.TrimSuffix(f, \".js\")\n\t\ts.Scripts[name] = f\n\t}\n\treturn nil\n}\n\nfunc readSource(filename string) ([]byte, error) {\n\tif filename == \"\" || filename == \"-\" {\n\t\treturn ioutil.ReadAll(os.Stdin)\n\t}\n\treturn ioutil.ReadFile(filename)\n}\n\nfunc (s *Service) createMachineFromScript(f string) (*Machine, error) {\n\ts.Log(\"info\", \"loading \"+f)\n\tm, err := s.createMachine(strings.TrimSuffix(f, \".js\"))\n\tif err != nil {\n\t\treturn m, err\n\t}\n\n\tfname := s.cfg.ScriptDir + \"\/\" + f\n\t_, err = m.Modules.Require(fname, s.cfg.ScriptDir)\n\treturn m, err\n}\n\nfunc (s *Service) createMachine(name string) (*Machine, error) {\n\tif name == \"\" {\n\t\tname = sarif.GenerateId()\n\t}\n\tif _, ok := s.Machines[name]; ok {\n\t\treturn nil, errors.New(\"Machine \" + name + \" already exists\")\n\t}\n\n\tc := sarif.NewClient(s.DeviceId + \"\/\" + name)\n\tc.Connect(s.Broker.NewLocalConn())\n\n\tm := NewMachine(c)\n\tm.Modules.AddPath(s.cfg.ScriptDir)\n\ts.Machines[name] = m\n\tif listeners, ok := s.Listeners[name]; ok {\n\t\tfor _, l := range listeners {\n\t\t\tm.Attach(l)\n\t\t}\n\t}\n\tif err := m.Enable(); err != nil {\n\t\treturn m, err\n\t}\n\treturn m, nil\n}\n\nfunc (s *Service) destroyMachine(name string) error {\n\tm, ok := s.Machines[name]\n\tif !ok {\n\t\treturn errors.New(\"Machine \" + name + \" does not exist\")\n\t}\n\tdelete(s.Machines, name)\n\treturn m.Disable()\n}\n\nfunc (s *Service) getOrCreateMachine(name string) (*Machine, error) {\n\tif m, ok := s.Machines[name]; ok {\n\t\treturn m, nil\n\t}\n\treturn s.createMachine(name)\n}\n\nfunc (s *Service) handleDo(msg sarif.Message) {\n\tmachine := strings.TrimLeft(strings.TrimPrefix(msg.Action, \"js\/do\"), \"\/\")\n\tif machine == \"\" {\n\t\tmachine = \"default\"\n\t}\n\tm, err := s.getOrCreateMachine(machine)\n\tif err != nil {\n\t\ts.ReplyInternalError(msg, err)\n\t\treturn\n\t}\n\tout, err, p := m.Do(msg.Text)\n\tif err != nil {\n\t\ts.ReplyBadRequest(msg, err)\n\t\treturn\n\t}\n\n\treply := sarif.CreateMessage(\"js\/done\", p)\n\treply.Text = out\n\ts.Reply(msg, reply)\n}\n\ntype MsgMachineStatus struct {\n\tMachine string `json:\"machine,omitempty\"`\n\tStatus string `json:\"status,omitempty\"`\n\tOut string `json:\"out,omitempty\"`\n}\n\nfunc (p MsgMachineStatus) String() string {\n\ts := \"Machine \" + p.Machine + \" is \" + p.Status + \".\"\n\tif p.Out != \"\" {\n\t\ts += \"\\n\\n\" + p.Out\n\t}\n\treturn s\n}\n\nfunc (s *Service) handleStart(msg sarif.Message) {\n\tname := strings.TrimPrefix(strings.TrimPrefix(msg.Action, \"js\/start\"), \"\/\")\n\tif name == \"\" {\n\t\ts.ReplyBadRequest(msg, errors.New(\"No machine name given!\"))\n\t\treturn\n\t}\n\n\tm, err := s.createMachineFromScript(name)\n\tif err != nil {\n\t\ts.ReplyBadRequest(msg, err)\n\t\treturn\n\t}\n\n\ts.Reply(msg, sarif.CreateMessage(\"js\/started\", &MsgMachineStatus{\n\t\tname,\n\t\t\"up\",\n\t\tm.FlushOut(),\n\t}))\n}\n\nfunc (s *Service) handleStop(msg sarif.Message) {\n\tname := strings.TrimPrefix(strings.TrimPrefix(msg.Action, \"js\/stop\"), \"\/\")\n\tif name == \"\" {\n\t\ts.ReplyBadRequest(msg, errors.New(\"No machine name given!\"))\n\t\treturn\n\t}\n\n\tif err := s.destroyMachine(name); err != nil {\n\t\ts.ReplyBadRequest(msg, err)\n\t\treturn\n\t}\n\n\ts.Reply(msg, sarif.CreateMessage(\"js\/stopped\", &MsgMachineStatus{\n\t\tname,\n\t\t\"down\",\n\t\t\"\",\n\t}))\n}\n\ntype MsgMachineAllStatus struct {\n\tUp int `json:\"up\"`\n\tStatus map[string]string `json:\"status\"`\n}\n\nfunc (s MsgMachineAllStatus) Text() string {\n\treturn fmt.Sprintf(\"%d\/%d machines running.\", s.Up, len(s.Status))\n}\n\nfunc (s *Service) handleStatus(msg sarif.Message) {\n\tname := msg.ActionSuffix(\"js\/status\")\n\n\tif name != \"\" {\n\t\tstatus := \"not_found\"\n\t\tif _, ok := s.Machines[name]; ok {\n\t\t\tstatus = \"up\"\n\t\t} else if _, ok := s.Scripts[name]; ok {\n\t\t\tstatus = \"down\"\n\t\t}\n\t\ts.Reply(msg, sarif.CreateMessage(\"js\/status\", &MsgMachineStatus{\n\t\t\tname,\n\t\t\tstatus,\n\t\t\t\"\",\n\t\t}))\n\t\treturn\n\t}\n\n\tstatus := MsgMachineAllStatus{\n\t\tStatus: make(map[string]string),\n\t}\n\tfor name := range s.Scripts {\n\t\tif _, ok := s.Machines[name]; ok {\n\t\t\tstatus.Up++\n\t\t\tstatus.Status[name] = \"up\"\n\t\t} else {\n\t\t\tstatus.Status[name] = \"down\"\n\t\t}\n\t}\n\ts.Reply(msg, sarif.CreateMessage(\"js\/status\", status))\n}\n\ntype ContentPayload struct {\n\tContent schema.Content `json:\"content\"`\n}\n\nfunc (p ContentPayload) Text() string {\n\treturn \"This message contains content.\"\n}\n\nfunc (s *Service) handlePut(msg sarif.Message) {\n\tgen := false\n\tname := strings.TrimPrefix(strings.TrimPrefix(msg.Action, \"js\/put\"), \"\/\")\n\tif name == \"\" {\n\t\tname, gen = sarif.GenerateId(), true\n\t}\n\tif _, ok := s.Machines[name]; ok {\n\t\ts.destroyMachine(name)\n\t}\n\n\tm, err := s.createMachine(name)\n\tif err != nil {\n\t\ts.ReplyInternalError(msg, err)\n\t\treturn\n\t}\n\n\tvar ctp ContentPayload\n\tif err := msg.DecodePayload(&ctp); err != nil {\n\t\ts.ReplyBadRequest(msg, err)\n\t\treturn\n\t}\n\ttext := msg.Text\n\tif ctp.Content.Url != \"\" {\n\t\tct, err := content.Get(ctp.Content)\n\t\tif err != nil {\n\t\t\ts.ReplyBadRequest(msg, err)\n\t\t}\n\t\ttext = string(ct.Data)\n\t}\n\n\tout, err, _ := m.Do(text)\n\tif err != nil {\n\t\ts.ReplyBadRequest(msg, err)\n\t\ts.destroyMachine(name)\n\t\treturn\n\t}\n\n\tif !gen {\n\t\tf, err := os.Create(s.cfg.ScriptDir + \"\/\" + name + \".js\")\n\t\tif err == nil {\n\t\t\t_, err = f.Write([]byte(text))\n\t\t\tdefer f.Close()\n\t\t}\n\t\tif err != nil {\n\t\t\ts.ReplyInternalError(msg, err)\n\t\t\ts.destroyMachine(name)\n\t\t\treturn\n\t\t}\n\t}\n\n\ts.Reply(msg, sarif.CreateMessage(\"js\/status\", &MsgMachineStatus{\n\t\tname,\n\t\t\"up\",\n\t\tout,\n\t}))\n}\n\nfunc (s *Service) handleGet(msg sarif.Message) {\n\tname := strings.TrimPrefix(strings.TrimPrefix(msg.Action, \"js\/get\"), \"\/\")\n\tif name == \"\" {\n\t\ts.ReplyBadRequest(msg, errors.New(\"No machine name given!\"))\n\t\treturn\n\t}\n\n\tf, err := os.Open(s.cfg.ScriptDir + \"\/\" + name + \".js\")\n\tif err != nil {\n\t\ts.ReplyInternalError(msg, err)\n\t\treturn\n\t}\n\tdefer f.Close()\n\tsrc, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\ts.ReplyInternalError(msg, err)\n\t\treturn\n\t}\n\n\tct := content.PutData([]byte(src))\n\tct.PutAction = \"js\/put\/\" + name\n\tct.Name = name + \".js\"\n\ts.Reply(msg, sarif.CreateMessage(\"js\/script\", ContentPayload{ct}))\n}\n\nfunc (s *Service) handleAttach(msg sarif.Message) {\n\tname := strings.TrimPrefix(strings.TrimPrefix(msg.Action, \"js\/attach\"), \"\/\")\n\tif name == \"\" {\n\t\ts.ReplyBadRequest(msg, errors.New(\"No machine name given!\"))\n\t\treturn\n\t}\n\n\tif m, ok := s.Machines[name]; ok {\n\t\tm.Attach(msg.Source)\n\t}\n\n\ts.Listeners[name] = append(s.Listeners[name], msg.Source)\n\ts.Reply(msg, sarif.CreateMessage(\"js\/attached\", nil))\n}\n<commit_msg>JS: Different module path.<commit_after>\/\/ Copyright (C) 2018 Constantin Schomburg <me@cschomburg.com>\n\/\/\n\/\/ Use of this source code is governed by an MIT-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Service js provides JavaScript scripting for the sarif network.\npackage js\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/sarifsystems\/sarif\/pkg\/content\"\n\t\"github.com\/sarifsystems\/sarif\/pkg\/schema\"\n\t\"github.com\/sarifsystems\/sarif\/sarif\"\n\t\"github.com\/sarifsystems\/sarif\/services\"\n)\n\nvar Module = &services.Module{\n\tName: \"js\",\n\tVersion: \"1.0\",\n\tNewInstance: NewService,\n}\n\ntype Config struct {\n\tScriptDir string `json:\"script_dir\"`\n}\n\ntype Dependencies struct {\n\tConfig services.Config\n\tClient *sarif.Client\n\tBroker *sarif.Broker\n}\n\ntype Service struct {\n\tcfg Config\n\tBroker *sarif.Broker\n\t*sarif.Client\n\n\tScripts map[string]string\n\tMachines map[string]*Machine\n\tListeners map[string][]string\n}\n\nfunc NewService(deps *Dependencies) *Service {\n\ts := &Service{\n\t\tBroker: deps.Broker,\n\t\tClient: deps.Client,\n\t\tScripts: make(map[string]string),\n\t\tMachines: make(map[string]*Machine),\n\t\tListeners: make(map[string][]string),\n\t}\n\ts.cfg.ScriptDir = deps.Config.Dir() + \"\/js\"\n\tdeps.Config.Get(&s.cfg)\n\n\ts.createMachine(\"default\")\n\treturn s\n}\n\nfunc (s *Service) Enable() error {\n\ts.Subscribe(\"js\/do\", \"\", s.handleDo)\n\ts.Subscribe(\"js\/start\", \"\", s.handleStart)\n\ts.Subscribe(\"js\/stop\", \"\", s.handleStop)\n\ts.Subscribe(\"js\/status\", \"\", s.handleStatus)\n\ts.Subscribe(\"js\/put\", \"\", s.handlePut)\n\ts.Subscribe(\"js\/get\", \"\", s.handleGet)\n\ts.Subscribe(\"js\/attach\", \"\", s.handleAttach)\n\n\tif err := s.readAvailableScripts(); err != nil {\n\t\treturn err\n\t}\n\tfor _, f := range s.Scripts {\n\t\ts.createMachineFromScript(f)\n\t}\n\treturn nil\n}\n\nfunc (s *Service) readAvailableScripts() error {\n\tif s.cfg.ScriptDir == \"\" {\n\t\treturn nil\n\t}\n\n\tdir, err := os.Open(s.cfg.ScriptDir)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\ts.Log(\"warn\", err.Error())\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\tfiles, err := dir.Readdirnames(0)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, f := range files {\n\t\tif !strings.HasSuffix(f, \".js\") {\n\t\t\tcontinue\n\t\t}\n\t\tname := strings.TrimSuffix(f, \".js\")\n\t\ts.Scripts[name] = f\n\t}\n\treturn nil\n}\n\nfunc readSource(filename string) ([]byte, error) {\n\tif filename == \"\" || filename == \"-\" {\n\t\treturn ioutil.ReadAll(os.Stdin)\n\t}\n\treturn ioutil.ReadFile(filename)\n}\n\nfunc (s *Service) createMachineFromScript(f string) (*Machine, error) {\n\ts.Log(\"info\", \"loading \"+f)\n\tm, err := s.createMachine(strings.TrimSuffix(f, \".js\"))\n\tif err != nil {\n\t\treturn m, err\n\t}\n\n\tfname := s.cfg.ScriptDir + \"\/\" + f\n\t_, err = m.Modules.Require(fname, s.cfg.ScriptDir)\n\treturn m, err\n}\n\nfunc (s *Service) createMachine(name string) (*Machine, error) {\n\tif name == \"\" {\n\t\tname = sarif.GenerateId()\n\t}\n\tif _, ok := s.Machines[name]; ok {\n\t\treturn nil, errors.New(\"Machine \" + name + \" already exists\")\n\t}\n\n\tc := sarif.NewClient(s.DeviceId + \"\/\" + name)\n\tc.Connect(s.Broker.NewLocalConn())\n\n\tm := NewMachine(c)\n\tm.Modules.AddPath(s.cfg.ScriptDir + \"\/node_modules\")\n\tm.Modules.AddPath(s.cfg.ScriptDir + \"\/modules\")\n\ts.Machines[name] = m\n\tif listeners, ok := s.Listeners[name]; ok {\n\t\tfor _, l := range listeners {\n\t\t\tm.Attach(l)\n\t\t}\n\t}\n\tif err := m.Enable(); err != nil {\n\t\treturn m, err\n\t}\n\treturn m, nil\n}\n\nfunc (s *Service) destroyMachine(name string) error {\n\tm, ok := s.Machines[name]\n\tif !ok {\n\t\treturn errors.New(\"Machine \" + name + \" does not exist\")\n\t}\n\tdelete(s.Machines, name)\n\treturn m.Disable()\n}\n\nfunc (s *Service) getOrCreateMachine(name string) (*Machine, error) {\n\tif m, ok := s.Machines[name]; ok {\n\t\treturn m, nil\n\t}\n\treturn s.createMachine(name)\n}\n\nfunc (s *Service) handleDo(msg sarif.Message) {\n\tmachine := strings.TrimLeft(strings.TrimPrefix(msg.Action, \"js\/do\"), \"\/\")\n\tif machine == \"\" {\n\t\tmachine = \"default\"\n\t}\n\tm, err := s.getOrCreateMachine(machine)\n\tif err != nil {\n\t\ts.ReplyInternalError(msg, err)\n\t\treturn\n\t}\n\tout, err, p := m.Do(msg.Text)\n\tif err != nil {\n\t\ts.ReplyBadRequest(msg, err)\n\t\treturn\n\t}\n\n\treply := sarif.CreateMessage(\"js\/done\", p)\n\treply.Text = out\n\ts.Reply(msg, reply)\n}\n\ntype MsgMachineStatus struct {\n\tMachine string `json:\"machine,omitempty\"`\n\tStatus string `json:\"status,omitempty\"`\n\tOut string `json:\"out,omitempty\"`\n}\n\nfunc (p MsgMachineStatus) String() string {\n\ts := \"Machine \" + p.Machine + \" is \" + p.Status + \".\"\n\tif p.Out != \"\" {\n\t\ts += \"\\n\\n\" + p.Out\n\t}\n\treturn s\n}\n\nfunc (s *Service) handleStart(msg sarif.Message) {\n\tname := strings.TrimPrefix(strings.TrimPrefix(msg.Action, \"js\/start\"), \"\/\")\n\tif name == \"\" {\n\t\ts.ReplyBadRequest(msg, errors.New(\"No machine name given!\"))\n\t\treturn\n\t}\n\n\tm, err := s.createMachineFromScript(name)\n\tif err != nil {\n\t\ts.ReplyBadRequest(msg, err)\n\t\treturn\n\t}\n\n\ts.Reply(msg, sarif.CreateMessage(\"js\/started\", &MsgMachineStatus{\n\t\tname,\n\t\t\"up\",\n\t\tm.FlushOut(),\n\t}))\n}\n\nfunc (s *Service) handleStop(msg sarif.Message) {\n\tname := strings.TrimPrefix(strings.TrimPrefix(msg.Action, \"js\/stop\"), \"\/\")\n\tif name == \"\" {\n\t\ts.ReplyBadRequest(msg, errors.New(\"No machine name given!\"))\n\t\treturn\n\t}\n\n\tif err := s.destroyMachine(name); err != nil {\n\t\ts.ReplyBadRequest(msg, err)\n\t\treturn\n\t}\n\n\ts.Reply(msg, sarif.CreateMessage(\"js\/stopped\", &MsgMachineStatus{\n\t\tname,\n\t\t\"down\",\n\t\t\"\",\n\t}))\n}\n\ntype MsgMachineAllStatus struct {\n\tUp int `json:\"up\"`\n\tStatus map[string]string `json:\"status\"`\n}\n\nfunc (s MsgMachineAllStatus) Text() string {\n\treturn fmt.Sprintf(\"%d\/%d machines running.\", s.Up, len(s.Status))\n}\n\nfunc (s *Service) handleStatus(msg sarif.Message) {\n\tname := msg.ActionSuffix(\"js\/status\")\n\n\tif name != \"\" {\n\t\tstatus := \"not_found\"\n\t\tif _, ok := s.Machines[name]; ok {\n\t\t\tstatus = \"up\"\n\t\t} else if _, ok := s.Scripts[name]; ok {\n\t\t\tstatus = \"down\"\n\t\t}\n\t\ts.Reply(msg, sarif.CreateMessage(\"js\/status\", &MsgMachineStatus{\n\t\t\tname,\n\t\t\tstatus,\n\t\t\t\"\",\n\t\t}))\n\t\treturn\n\t}\n\n\tstatus := MsgMachineAllStatus{\n\t\tStatus: make(map[string]string),\n\t}\n\tfor name := range s.Scripts {\n\t\tif _, ok := s.Machines[name]; ok {\n\t\t\tstatus.Up++\n\t\t\tstatus.Status[name] = \"up\"\n\t\t} else {\n\t\t\tstatus.Status[name] = \"down\"\n\t\t}\n\t}\n\ts.Reply(msg, sarif.CreateMessage(\"js\/status\", status))\n}\n\ntype ContentPayload struct {\n\tContent schema.Content `json:\"content\"`\n}\n\nfunc (p ContentPayload) Text() string {\n\treturn \"This message contains content.\"\n}\n\nfunc (s *Service) handlePut(msg sarif.Message) {\n\tgen := false\n\tname := strings.TrimPrefix(strings.TrimPrefix(msg.Action, \"js\/put\"), \"\/\")\n\tif name == \"\" {\n\t\tname, gen = sarif.GenerateId(), true\n\t}\n\tif _, ok := s.Machines[name]; ok {\n\t\ts.destroyMachine(name)\n\t}\n\n\tm, err := s.createMachine(name)\n\tif err != nil {\n\t\ts.ReplyInternalError(msg, err)\n\t\treturn\n\t}\n\n\tvar ctp ContentPayload\n\tif err := msg.DecodePayload(&ctp); err != nil {\n\t\ts.ReplyBadRequest(msg, err)\n\t\treturn\n\t}\n\ttext := msg.Text\n\tif ctp.Content.Url != \"\" {\n\t\tct, err := content.Get(ctp.Content)\n\t\tif err != nil {\n\t\t\ts.ReplyBadRequest(msg, err)\n\t\t}\n\t\ttext = string(ct.Data)\n\t}\n\n\tout, err, _ := m.Do(text)\n\tif err != nil {\n\t\ts.ReplyBadRequest(msg, err)\n\t\ts.destroyMachine(name)\n\t\treturn\n\t}\n\n\tif !gen {\n\t\tf, err := os.Create(s.cfg.ScriptDir + \"\/\" + name + \".js\")\n\t\tif err == nil {\n\t\t\t_, err = f.Write([]byte(text))\n\t\t\tdefer f.Close()\n\t\t}\n\t\tif err != nil {\n\t\t\ts.ReplyInternalError(msg, err)\n\t\t\ts.destroyMachine(name)\n\t\t\treturn\n\t\t}\n\t}\n\n\ts.Reply(msg, sarif.CreateMessage(\"js\/status\", &MsgMachineStatus{\n\t\tname,\n\t\t\"up\",\n\t\tout,\n\t}))\n}\n\nfunc (s *Service) handleGet(msg sarif.Message) {\n\tname := strings.TrimPrefix(strings.TrimPrefix(msg.Action, \"js\/get\"), \"\/\")\n\tif name == \"\" {\n\t\ts.ReplyBadRequest(msg, errors.New(\"No machine name given!\"))\n\t\treturn\n\t}\n\n\tf, err := os.Open(s.cfg.ScriptDir + \"\/\" + name + \".js\")\n\tif err != nil {\n\t\ts.ReplyInternalError(msg, err)\n\t\treturn\n\t}\n\tdefer f.Close()\n\tsrc, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\ts.ReplyInternalError(msg, err)\n\t\treturn\n\t}\n\n\tct := content.PutData([]byte(src))\n\tct.PutAction = \"js\/put\/\" + name\n\tct.Name = name + \".js\"\n\ts.Reply(msg, sarif.CreateMessage(\"js\/script\", ContentPayload{ct}))\n}\n\nfunc (s *Service) handleAttach(msg sarif.Message) {\n\tname := strings.TrimPrefix(strings.TrimPrefix(msg.Action, \"js\/attach\"), \"\/\")\n\tif name == \"\" {\n\t\ts.ReplyBadRequest(msg, errors.New(\"No machine name given!\"))\n\t\treturn\n\t}\n\n\tif m, ok := s.Machines[name]; ok {\n\t\tm.Attach(msg.Source)\n\t}\n\n\ts.Listeners[name] = append(s.Listeners[name], msg.Source)\n\ts.Reply(msg, sarif.CreateMessage(\"js\/attached\", nil))\n}\n<|endoftext|>"} {"text":"<commit_before>package kubernetes\n\nimport (\n\t\"time\"\n\n\t\"github.com\/weaveworks\/scope\/report\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/labels\"\n)\n\n\/\/ These constants are keys used in node metadata\nconst (\n\tServiceID = \"kubernetes_service_id\"\n\tServiceName = \"kubernetes_service_name\"\n\tServiceCreated = \"kubernetes_service_created\"\n)\n\n\/\/ Service represents a Kubernetes service\ntype Service interface {\n\tID() string\n\tName() string\n\tNamespace() string\n\tGetNode() report.Node\n\tSelector() labels.Selector\n}\n\ntype service struct {\n\t*api.Service\n}\n\n\/\/ NewService creates a new Service\nfunc NewService(s *api.Service) Service {\n\treturn &service{Service: s}\n}\n\nfunc (s *service) ID() string {\n\treturn s.ObjectMeta.Namespace + \"\/\" + s.ObjectMeta.Name\n}\n\nfunc (s *service) Name() string {\n\treturn s.ObjectMeta.Name\n}\n\nfunc (s *service) Namespace() string {\n\treturn s.ObjectMeta.Namespace\n}\n\nfunc (s *service) Selector() labels.Selector {\n\treturn labels.SelectorFromSet(labels.Set(s.Spec.Selector))\n}\n\nfunc (s *service) GetNode() report.Node {\n\treturn report.MakeNodeWith(map[string]string{\n\t\tServiceID: s.ID(),\n\t\tServiceName: s.Name(),\n\t\tServiceCreated: s.ObjectMeta.CreationTimestamp.Format(time.RFC822),\n\t\tNamespace: s.Namespace(),\n\t})\n}\n<commit_msg>nil selector for a k8s service means nothing, not everything<commit_after>package kubernetes\n\nimport (\n\t\"time\"\n\n\t\"github.com\/weaveworks\/scope\/report\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/labels\"\n)\n\n\/\/ These constants are keys used in node metadata\nconst (\n\tServiceID = \"kubernetes_service_id\"\n\tServiceName = \"kubernetes_service_name\"\n\tServiceCreated = \"kubernetes_service_created\"\n)\n\n\/\/ Service represents a Kubernetes service\ntype Service interface {\n\tID() string\n\tName() string\n\tNamespace() string\n\tGetNode() report.Node\n\tSelector() labels.Selector\n}\n\ntype service struct {\n\t*api.Service\n}\n\n\/\/ NewService creates a new Service\nfunc NewService(s *api.Service) Service {\n\treturn &service{Service: s}\n}\n\nfunc (s *service) ID() string {\n\treturn s.ObjectMeta.Namespace + \"\/\" + s.ObjectMeta.Name\n}\n\nfunc (s *service) Name() string {\n\treturn s.ObjectMeta.Name\n}\n\nfunc (s *service) Namespace() string {\n\treturn s.ObjectMeta.Namespace\n}\n\nfunc (s *service) Selector() labels.Selector {\n\tif s.Spec.Selector == nil {\n\t\treturn labels.Nothing()\n\t}\n\treturn labels.SelectorFromSet(labels.Set(s.Spec.Selector))\n}\n\nfunc (s *service) GetNode() report.Node {\n\treturn report.MakeNodeWith(map[string]string{\n\t\tServiceID: s.ID(),\n\t\tServiceName: s.Name(),\n\t\tServiceCreated: s.ObjectMeta.CreationTimestamp.Format(time.RFC822),\n\t\tNamespace: s.Namespace(),\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package keys\n\ntype ParsedInternalKey struct {\n\tUserKey []byte\n\tKind Kind\n\tSequence Sequence\n}\n\nfunc (k *ParsedInternalKey) Append(dst []byte) []byte {\n\tvar buf [TagBytes]byte\n\tCombineTag(buf[:], k.Sequence, k.Kind)\n\tdst = append(dst, k.UserKey...)\n\treturn append(dst, buf[:]...)\n}\n\nfunc (k *ParsedInternalKey) Parse(key []byte) bool {\n\ti := len(key) - TagBytes\n\tif i < 0 {\n\t\treturn false\n\t}\n\tk.UserKey = key[:i:i]\n\tk.Sequence, k.Kind = ExtractTag(key[i:])\n\treturn k.Kind <= maxKind\n}\n\nfunc (k *ParsedInternalKey) Tag() Tag {\n\treturn PackTag(k.Sequence, k.Kind)\n}\n<commit_msg>Document keys.ParsedInternalKey<commit_after>package keys\n\n\/\/ ParsedInternalKey is a parsed or splited internal representation.\ntype ParsedInternalKey struct {\n\tUserKey []byte\n\tKind Kind\n\tSequence Sequence\n}\n\n\/\/ Append appends this internal key to destination buffer.\nfunc (k *ParsedInternalKey) Append(dst []byte) []byte {\n\tvar buf [TagBytes]byte\n\tCombineTag(buf[:], k.Sequence, k.Kind)\n\tdst = append(dst, k.UserKey...)\n\treturn append(dst, buf[:]...)\n}\n\n\/\/ Parse parses input key as internal key and returns true for valid internal\n\/\/ key. It is illegal to access other fields and methods after returning false.\nfunc (k *ParsedInternalKey) Parse(key []byte) bool {\n\ti := len(key) - TagBytes\n\tif i < 0 {\n\t\treturn false\n\t}\n\tk.UserKey = key[:i:i]\n\tk.Sequence, k.Kind = ExtractTag(key[i:])\n\treturn k.Kind <= maxKind\n}\n\n\/\/ Tag returns tag of this internal key.\nfunc (k *ParsedInternalKey) Tag() Tag {\n\treturn PackTag(k.Sequence, k.Kind)\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Return from func instead of exit 0 on EOF in cat<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>add API listening log info<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>reverted back to log using fields instead of proto marshaller<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>add NewUnsupportedFormatErr so an UnsupportedFormatErr can be created by lib users<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Netflix, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage command\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"os\"\n\t\"runtime\/debug\"\n\t\"strings\"\n\t\"time\"\n\n\tflag \"github.com\/spf13\/pflag\"\n\n\t\"github.com\/Netflix\/chaosmonkey\"\n\t\"github.com\/Netflix\/chaosmonkey\/clock\"\n\t\"github.com\/Netflix\/chaosmonkey\/config\"\n\t\"github.com\/Netflix\/chaosmonkey\/config\/param\"\n\t\"github.com\/Netflix\/chaosmonkey\/deps\"\n\t\"github.com\/Netflix\/chaosmonkey\/mysql\"\n\t\"github.com\/Netflix\/chaosmonkey\/schedstore\"\n\t\"github.com\/Netflix\/chaosmonkey\/schedule\"\n\t\"github.com\/Netflix\/chaosmonkey\/spinnaker\"\n)\n\n\/\/ Version is the version number\nconst Version = \"2.0.0\"\n\nfunc printVersion() {\n\tfmt.Printf(\"%s\\n\", Version)\n}\n\nvar (\n\t\/\/ configPaths is where Chaos Monkey will look for a chaosmonkey.toml\n\t\/\/ configuration file\n\tconfigPaths = [...]string{\".\", \"\/apps\/chaosmonkey\", \"\/etc\", \"\/etc\/chaosmonkey\"}\n)\n\n\/\/ Usage prints usage\nfunc Usage() {\n\tusage := `\nChaos Monkey\n\nUsage:\n\tchaosmonkey <command> ...\n\ncommand: migrate | schedule | terminate | fetch-schedule | outage | config | email | eligible | intest\n\n\nmigrate\n-------\nApplies database migration to the database defined in the configuraton file.\n\nschedule [--max-apps=<N>] [--apps=foo,bar,baz] [--no-record-schedule]\n--------------------------------------------------------------------\nGenerates a schedule of terminations for the day and installs the\nterminations as local cron jobs that call \"chaosmonkey terminate ...\"\n\n--apps=foo,bar,baz Optionally specify an explicit list of apps to schedule.\n This is primarily used for debugging.\n\n--max-apps=<N> Optionally specify the maximum number of apps that Chaos Monkey\n\t\t\t\t\t will schedule. This is primarily used for debugging.\n\n--no-record-schedule Do not record the schedule with the database.\n This is primarily used for debugging.\n\n\nterminate <app> <account> [--region=<region>] [--stack=<stack>] [--cluster=<cluster>] [--leashed]\n-----------------------------------------------------------------------------------------------------------------\nTerminates an instance from a given app and account.\n\nOptionally specify a region, stack, cluster.\n\nThe --leashed flag forces chaosmonkey to run in leashed mode. When leashed,\nChaos Monkey will check if an instance should be terminated, but will not\nactually terminate it.\n\nfetch-schedule\n--------------\nQueries the database to see if there is an existing schedule of\nterminations for today. If so, downloads the schedule and sets up cron jobs to\nimplement the schedule.\n\noutage\n------\nOutput \"true\" if there is an ongoing outage, otherwise \"false\". Used for debugging.\n\n\nconfig [<app>]\n------------\nQuery Spinnaker for the config for a specific app and dump it to\nstandard out. This is only used for debugging.\n\nIf no app is specified, dump the Monkey-level configuration options to standard out.\n\nExamples:\n\n\tchaosmonkey config chaosguineapig\n\n\tchaosmonkey config\n\neligible <app> <account> [--region=<region>] [--stack=<stack>] [--cluster=<cluster>]\n-------------------------------------------------------------------------------------\n\nDump a list of instance-ids that are eligible for termination for a given app, account,\nand optionally region, stack, and cluster.\n\nintest\n------\n\nOutputs \"true\" on standard out if running within a test environment, otherwise outputs \"false\"\n\n\naccount <name>\n--------------\n\nLook up an cloud account ID by name.\n\nExample:\n\n\tchaosmonkey account test\n\n\nprovider <name>\n---------------\n\nLook up the cloud provider by account name.\n\nExample:\n\n\tchaosmonkey provider test\n\n`\n\tfmt.Printf(usage)\n}\n\nfunc init() {\n\t\/\/ Prepend the pid to log statements\n\tlog.SetPrefix(fmt.Sprintf(\"[%5d] \", os.Getpid()))\n}\n\n\/\/ Execute is the main entry point for the chaosmonkey cli.\nfunc Execute() {\n\tregionPtr := flag.String(\"region\", \"\", \"region of termination group\")\n\tstackPtr := flag.String(\"stack\", \"\", \"stack of termination group\")\n\tclusterPtr := flag.String(\"cluster\", \"\", \"cluster of termination group\")\n\tappsPtr := flag.String(\"apps\", \"\", \"comma-separated list of apps to schedule for termination\")\n\tnoRecordSchedulePtr := flag.Bool(\"no-record-schedule\", false, \"do not record schedule\")\n\tversionPtr := flag.BoolP(\"version\", \"v\", false, \"show version\")\n\tflag.Usage = Usage\n\n\t\/\/ These flags, if specified, override config values\n\tmaxAppsFlag := \"max-apps\"\n\tleashedFlag := \"leashed\"\n\tflag.Int(maxAppsFlag, math.MaxInt32, \"max number of apps to examine for termination\")\n\tflag.Bool(leashedFlag, false, \"force leashed mode\")\n\n\tflag.Parse()\n\tif len(flag.Args()) == 0 {\n\t\tif *versionPtr {\n\t\t\tprintVersion()\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tcmd := flag.Arg(0)\n\n\tcfg, err := getConfig()\n\n\tif err != nil {\n\t\tlog.Fatalf(\"FATAL: failed to load config: %v\", err)\n\t}\n\n\t\/\/ Associate config values with falgs\n\terr = cfg.BindPFlag(param.MaxApps, flag.Lookup(maxAppsFlag))\n\tif err != nil {\n\t\tlog.Fatalf(\"FATAL: failed to bind flag: --%s: %v\", maxAppsFlag, err)\n\t}\n\terr = cfg.BindPFlag(param.Leashed, flag.Lookup(leashedFlag))\n\tif err != nil {\n\t\tlog.Fatalf(\"FATAL: failed to bind flag: --%s: %v\", leashedFlag, err)\n\t}\n\n\tspin, err := spinnaker.NewFromConfig(cfg)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"FATAL: spinnaker.New failed: %+v\", err)\n\t}\n\n\toutage, err := deps.GetOutage(cfg)\n\tif err != nil {\n\t\tlog.Fatalf(\"FATAL: deps.GetOutage fail: %+v\", err)\n\t}\n\n\tsql, err := mysql.NewFromConfig(cfg)\n\tif err != nil {\n\t\tlog.Fatalf(\"FATAL: could not initialize mysql connection: %+v\", err)\n\t}\n\n\t\/\/ Ensure mysql object gets closed\n\tdefer func() {\n\t\t_ = sql.Close()\n\t}()\n\n\tswitch cmd {\n\tcase \"migrate\":\n\t\tMigrate(sql)\n\tcase \"schedule\":\n\t\tlog.Println(\"chaosmonkey schedule starting\")\n\t\tdefer log.Println(\"chaosmonkey schedule done\")\n\n\t\tvar apps []string\n\t\tif *appsPtr != \"\" {\n\t\t\t\/\/ User explicitly specified list of apps on the command line\n\t\t\tapps = strings.Split(*appsPtr, \",\")\n\t\t} else {\n\t\t\t\/\/ User did not explicitly specify list of apps, get 'em all\n\t\t\tvar err error\n\t\t\tapps, err = spin.AppNames()\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"FATAL: could not retrieve list of app names: %v\", err)\n\t\t\t}\n\t\t}\n\n\t\tvar schedStore schedstore.SchedStore\n\n\t\tschedStore = sql\n\t\tif *noRecordSchedulePtr {\n\t\t\tschedStore = nullSchedStore{}\n\t\t}\n\n\t\tSchedule(spin, schedStore, cfg, spin, apps)\n\tcase \"fetch-schedule\":\n\t\tFetchSchedule(sql, cfg)\n\tcase \"terminate\":\n\t\tif len(flag.Args()) != 3 {\n\t\t\tflag.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\t\tapp := flag.Arg(1)\n\t\taccount := flag.Arg(2)\n\t\ttrackers, err := deps.GetTrackers(cfg)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"FATAL: could not create trackers: %+v\", err)\n\t\t}\n\n\t\terrCounter, err := deps.GetErrorCounter(cfg)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"FATAL: could not create error counter: %+v\", err)\n\t\t}\n\n\t\tenv, err := deps.GetEnv(cfg)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"FATAL: could not determine environment: %+v\", err)\n\t\t}\n\n\t\tdefer logOnPanic(errCounter) \/\/ Handler in case of panic\n\t\tdeps := deps.Deps{\n\t\t\tMonkeyCfg: cfg,\n\t\t\tChecker: sql,\n\t\t\tConfGetter: spin,\n\t\t\tCl: clock.New(),\n\t\t\tDep: spin,\n\t\t\tT: spin,\n\t\t\tTrackers: trackers,\n\t\t\tOu: outage,\n\t\t\tErrCounter: errCounter,\n\t\t\tEnv: env,\n\t\t}\n\t\tTerminate(deps, app, account, *regionPtr, *stackPtr, *clusterPtr)\n\tcase \"outage\":\n\t\tOutage(outage)\n\tcase \"config\":\n\t\tif len(flag.Args()) != 2 {\n\t\t\tDumpMonkeyConfig(cfg)\n\t\t\treturn\n\t\t}\n\t\tapp := flag.Arg(1)\n\t\tDumpConfig(spin, app)\n\tcase \"eligible\":\n\t\tif len(flag.Args()) != 3 {\n\t\t\tflag.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\t\tapp := flag.Arg(1)\n\t\taccount := flag.Arg(2)\n\t\tEligible(spin, spin, app, account, *regionPtr, *stackPtr, *clusterPtr)\n\tcase \"intest\":\n\t\tenv, err := deps.GetEnv(cfg)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"FATAL: could not determine environment: %+v\", err)\n\t\t}\n\t\tfmt.Println(env.InTest())\n\tcase \"account\":\n\t\tif len(flag.Args()) != 2 {\n\t\t\tflag.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\taccount := flag.Arg(1)\n\t\tid, err := spin.AccountID(account)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"ERROR: Could not retrieve id for account: %s. Reason: %v\\n\", account, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(id)\n\tcase \"provider\":\n\t\tif len(flag.Args()) != 2 {\n\t\t\tflag.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\t\taccount := flag.Arg(1)\n\t\tprovider, err := spin.CloudProvider(account)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"ERROR: Could not retrieve provider for account: %s. Reason: %v\\n\", account, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(provider)\n\n\tdefault:\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n}\n\nfunc init() {\n\t\/\/ All logs to stdout\n\tlog.SetOutput(os.Stdout)\n}\n\n\/\/ logOnPanic increments an error metric and logs if a panic happens\nfunc logOnPanic(errCounter chaosmonkey.ErrorCounter) {\n\tif e := recover(); e != nil {\n\t\tlog.Printf(\"FATAL: panic: %s: %s\", e, debug.Stack())\n\t\terr := errCounter.Increment()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"failed to increment error counter: %s\", err)\n\t\t}\n\t}\n}\n\n\/\/ return configuration info\nfunc getConfig() (*config.Monkey, error) {\n\tcfg, err := config.Load(configPaths[:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cfg, nil\n}\n\n\/\/ nullSchedStore is a no-op implementation of api.SchedStore\ntype nullSchedStore struct{}\n\n\/\/ Retrieve implements api.SchedStore.Retrieve\nfunc (n nullSchedStore) Retrieve(date time.Time) (*schedule.Schedule, error) {\n\treturn nil, fmt.Errorf(\"nullSchedStore does not support Retrieve function\")\n}\n\n\/\/ Publish implements api.SchedStore.Publish\nfunc (n nullSchedStore) Publish(date time.Time, sched *schedule.Schedule) error {\n\treturn nil\n}\n<commit_msg>Bump version number<commit_after>\/\/ Copyright 2016 Netflix, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage command\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"os\"\n\t\"runtime\/debug\"\n\t\"strings\"\n\t\"time\"\n\n\tflag \"github.com\/spf13\/pflag\"\n\n\t\"github.com\/Netflix\/chaosmonkey\"\n\t\"github.com\/Netflix\/chaosmonkey\/clock\"\n\t\"github.com\/Netflix\/chaosmonkey\/config\"\n\t\"github.com\/Netflix\/chaosmonkey\/config\/param\"\n\t\"github.com\/Netflix\/chaosmonkey\/deps\"\n\t\"github.com\/Netflix\/chaosmonkey\/mysql\"\n\t\"github.com\/Netflix\/chaosmonkey\/schedstore\"\n\t\"github.com\/Netflix\/chaosmonkey\/schedule\"\n\t\"github.com\/Netflix\/chaosmonkey\/spinnaker\"\n)\n\n\/\/ Version is the version number\nconst Version = \"2.0.1\"\n\nfunc printVersion() {\n\tfmt.Printf(\"%s\\n\", Version)\n}\n\nvar (\n\t\/\/ configPaths is where Chaos Monkey will look for a chaosmonkey.toml\n\t\/\/ configuration file\n\tconfigPaths = [...]string{\".\", \"\/apps\/chaosmonkey\", \"\/etc\", \"\/etc\/chaosmonkey\"}\n)\n\n\/\/ Usage prints usage\nfunc Usage() {\n\tusage := `\nChaos Monkey\n\nUsage:\n\tchaosmonkey <command> ...\n\ncommand: migrate | schedule | terminate | fetch-schedule | outage | config | email | eligible | intest\n\n\nmigrate\n-------\nApplies database migration to the database defined in the configuraton file.\n\nschedule [--max-apps=<N>] [--apps=foo,bar,baz] [--no-record-schedule]\n--------------------------------------------------------------------\nGenerates a schedule of terminations for the day and installs the\nterminations as local cron jobs that call \"chaosmonkey terminate ...\"\n\n--apps=foo,bar,baz Optionally specify an explicit list of apps to schedule.\n This is primarily used for debugging.\n\n--max-apps=<N> Optionally specify the maximum number of apps that Chaos Monkey\n\t\t\t\t\t will schedule. This is primarily used for debugging.\n\n--no-record-schedule Do not record the schedule with the database.\n This is primarily used for debugging.\n\n\nterminate <app> <account> [--region=<region>] [--stack=<stack>] [--cluster=<cluster>] [--leashed]\n-----------------------------------------------------------------------------------------------------------------\nTerminates an instance from a given app and account.\n\nOptionally specify a region, stack, cluster.\n\nThe --leashed flag forces chaosmonkey to run in leashed mode. When leashed,\nChaos Monkey will check if an instance should be terminated, but will not\nactually terminate it.\n\nfetch-schedule\n--------------\nQueries the database to see if there is an existing schedule of\nterminations for today. If so, downloads the schedule and sets up cron jobs to\nimplement the schedule.\n\noutage\n------\nOutput \"true\" if there is an ongoing outage, otherwise \"false\". Used for debugging.\n\n\nconfig [<app>]\n------------\nQuery Spinnaker for the config for a specific app and dump it to\nstandard out. This is only used for debugging.\n\nIf no app is specified, dump the Monkey-level configuration options to standard out.\n\nExamples:\n\n\tchaosmonkey config chaosguineapig\n\n\tchaosmonkey config\n\neligible <app> <account> [--region=<region>] [--stack=<stack>] [--cluster=<cluster>]\n-------------------------------------------------------------------------------------\n\nDump a list of instance-ids that are eligible for termination for a given app, account,\nand optionally region, stack, and cluster.\n\nintest\n------\n\nOutputs \"true\" on standard out if running within a test environment, otherwise outputs \"false\"\n\n\naccount <name>\n--------------\n\nLook up an cloud account ID by name.\n\nExample:\n\n\tchaosmonkey account test\n\n\nprovider <name>\n---------------\n\nLook up the cloud provider by account name.\n\nExample:\n\n\tchaosmonkey provider test\n\n`\n\tfmt.Printf(usage)\n}\n\nfunc init() {\n\t\/\/ Prepend the pid to log statements\n\tlog.SetPrefix(fmt.Sprintf(\"[%5d] \", os.Getpid()))\n}\n\n\/\/ Execute is the main entry point for the chaosmonkey cli.\nfunc Execute() {\n\tregionPtr := flag.String(\"region\", \"\", \"region of termination group\")\n\tstackPtr := flag.String(\"stack\", \"\", \"stack of termination group\")\n\tclusterPtr := flag.String(\"cluster\", \"\", \"cluster of termination group\")\n\tappsPtr := flag.String(\"apps\", \"\", \"comma-separated list of apps to schedule for termination\")\n\tnoRecordSchedulePtr := flag.Bool(\"no-record-schedule\", false, \"do not record schedule\")\n\tversionPtr := flag.BoolP(\"version\", \"v\", false, \"show version\")\n\tflag.Usage = Usage\n\n\t\/\/ These flags, if specified, override config values\n\tmaxAppsFlag := \"max-apps\"\n\tleashedFlag := \"leashed\"\n\tflag.Int(maxAppsFlag, math.MaxInt32, \"max number of apps to examine for termination\")\n\tflag.Bool(leashedFlag, false, \"force leashed mode\")\n\n\tflag.Parse()\n\tif len(flag.Args()) == 0 {\n\t\tif *versionPtr {\n\t\t\tprintVersion()\n\t\t\tos.Exit(0)\n\t\t}\n\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tcmd := flag.Arg(0)\n\n\tcfg, err := getConfig()\n\n\tif err != nil {\n\t\tlog.Fatalf(\"FATAL: failed to load config: %v\", err)\n\t}\n\n\t\/\/ Associate config values with falgs\n\terr = cfg.BindPFlag(param.MaxApps, flag.Lookup(maxAppsFlag))\n\tif err != nil {\n\t\tlog.Fatalf(\"FATAL: failed to bind flag: --%s: %v\", maxAppsFlag, err)\n\t}\n\terr = cfg.BindPFlag(param.Leashed, flag.Lookup(leashedFlag))\n\tif err != nil {\n\t\tlog.Fatalf(\"FATAL: failed to bind flag: --%s: %v\", leashedFlag, err)\n\t}\n\n\tspin, err := spinnaker.NewFromConfig(cfg)\n\n\tif err != nil {\n\t\tlog.Fatalf(\"FATAL: spinnaker.New failed: %+v\", err)\n\t}\n\n\toutage, err := deps.GetOutage(cfg)\n\tif err != nil {\n\t\tlog.Fatalf(\"FATAL: deps.GetOutage fail: %+v\", err)\n\t}\n\n\tsql, err := mysql.NewFromConfig(cfg)\n\tif err != nil {\n\t\tlog.Fatalf(\"FATAL: could not initialize mysql connection: %+v\", err)\n\t}\n\n\t\/\/ Ensure mysql object gets closed\n\tdefer func() {\n\t\t_ = sql.Close()\n\t}()\n\n\tswitch cmd {\n\tcase \"migrate\":\n\t\tMigrate(sql)\n\tcase \"schedule\":\n\t\tlog.Println(\"chaosmonkey schedule starting\")\n\t\tdefer log.Println(\"chaosmonkey schedule done\")\n\n\t\tvar apps []string\n\t\tif *appsPtr != \"\" {\n\t\t\t\/\/ User explicitly specified list of apps on the command line\n\t\t\tapps = strings.Split(*appsPtr, \",\")\n\t\t} else {\n\t\t\t\/\/ User did not explicitly specify list of apps, get 'em all\n\t\t\tvar err error\n\t\t\tapps, err = spin.AppNames()\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"FATAL: could not retrieve list of app names: %v\", err)\n\t\t\t}\n\t\t}\n\n\t\tvar schedStore schedstore.SchedStore\n\n\t\tschedStore = sql\n\t\tif *noRecordSchedulePtr {\n\t\t\tschedStore = nullSchedStore{}\n\t\t}\n\n\t\tSchedule(spin, schedStore, cfg, spin, apps)\n\tcase \"fetch-schedule\":\n\t\tFetchSchedule(sql, cfg)\n\tcase \"terminate\":\n\t\tif len(flag.Args()) != 3 {\n\t\t\tflag.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\t\tapp := flag.Arg(1)\n\t\taccount := flag.Arg(2)\n\t\ttrackers, err := deps.GetTrackers(cfg)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"FATAL: could not create trackers: %+v\", err)\n\t\t}\n\n\t\terrCounter, err := deps.GetErrorCounter(cfg)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"FATAL: could not create error counter: %+v\", err)\n\t\t}\n\n\t\tenv, err := deps.GetEnv(cfg)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"FATAL: could not determine environment: %+v\", err)\n\t\t}\n\n\t\tdefer logOnPanic(errCounter) \/\/ Handler in case of panic\n\t\tdeps := deps.Deps{\n\t\t\tMonkeyCfg: cfg,\n\t\t\tChecker: sql,\n\t\t\tConfGetter: spin,\n\t\t\tCl: clock.New(),\n\t\t\tDep: spin,\n\t\t\tT: spin,\n\t\t\tTrackers: trackers,\n\t\t\tOu: outage,\n\t\t\tErrCounter: errCounter,\n\t\t\tEnv: env,\n\t\t}\n\t\tTerminate(deps, app, account, *regionPtr, *stackPtr, *clusterPtr)\n\tcase \"outage\":\n\t\tOutage(outage)\n\tcase \"config\":\n\t\tif len(flag.Args()) != 2 {\n\t\t\tDumpMonkeyConfig(cfg)\n\t\t\treturn\n\t\t}\n\t\tapp := flag.Arg(1)\n\t\tDumpConfig(spin, app)\n\tcase \"eligible\":\n\t\tif len(flag.Args()) != 3 {\n\t\t\tflag.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\t\tapp := flag.Arg(1)\n\t\taccount := flag.Arg(2)\n\t\tEligible(spin, spin, app, account, *regionPtr, *stackPtr, *clusterPtr)\n\tcase \"intest\":\n\t\tenv, err := deps.GetEnv(cfg)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"FATAL: could not determine environment: %+v\", err)\n\t\t}\n\t\tfmt.Println(env.InTest())\n\tcase \"account\":\n\t\tif len(flag.Args()) != 2 {\n\t\t\tflag.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\taccount := flag.Arg(1)\n\t\tid, err := spin.AccountID(account)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"ERROR: Could not retrieve id for account: %s. Reason: %v\\n\", account, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(id)\n\tcase \"provider\":\n\t\tif len(flag.Args()) != 2 {\n\t\t\tflag.Usage()\n\t\t\tos.Exit(1)\n\t\t}\n\t\taccount := flag.Arg(1)\n\t\tprovider, err := spin.CloudProvider(account)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"ERROR: Could not retrieve provider for account: %s. Reason: %v\\n\", account, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(provider)\n\n\tdefault:\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n}\n\nfunc init() {\n\t\/\/ All logs to stdout\n\tlog.SetOutput(os.Stdout)\n}\n\n\/\/ logOnPanic increments an error metric and logs if a panic happens\nfunc logOnPanic(errCounter chaosmonkey.ErrorCounter) {\n\tif e := recover(); e != nil {\n\t\tlog.Printf(\"FATAL: panic: %s: %s\", e, debug.Stack())\n\t\terr := errCounter.Increment()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"failed to increment error counter: %s\", err)\n\t\t}\n\t}\n}\n\n\/\/ return configuration info\nfunc getConfig() (*config.Monkey, error) {\n\tcfg, err := config.Load(configPaths[:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn cfg, nil\n}\n\n\/\/ nullSchedStore is a no-op implementation of api.SchedStore\ntype nullSchedStore struct{}\n\n\/\/ Retrieve implements api.SchedStore.Retrieve\nfunc (n nullSchedStore) Retrieve(date time.Time) (*schedule.Schedule, error) {\n\treturn nil, fmt.Errorf(\"nullSchedStore does not support Retrieve function\")\n}\n\n\/\/ Publish implements api.SchedStore.Publish\nfunc (n nullSchedStore) Publish(date time.Time, sched *schedule.Schedule) error {\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\/\/\n\/\/ Author: Radu Berinde (radu@cockroachlabs.com)\n\npackage distsql\n\nimport (\n\t\"sync\"\n\n\t\"github.com\/cockroachdb\/cockroach\/client\"\n\t\"github.com\/cockroachdb\/cockroach\/sql\/parser\"\n\t\"github.com\/cockroachdb\/cockroach\/sql\/sqlbase\"\n\t\"github.com\/cockroachdb\/cockroach\/util\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/log\"\n)\n\n\/\/ tableReader is the start of a computation flow; it performs KV operations to\n\/\/ retrieve rows for a table, runs a filter expression, and passes rows with the\n\/\/ desired column values to an output rowReceiver.\n\/\/ See docs\/RFCS\/distributed_sql.md\ntype tableReader struct {\n\tdesc sqlbase.TableDescriptor\n\tspans sqlbase.Spans\n\toutputCols []int\n\thardLimit, softLimit int64\n\n\toutput rowReceiver\n\n\tfilter exprHelper\n\n\ttxn *client.Txn\n\tfetcher sqlbase.RowFetcher\n\t\/\/ Last row returned by the rowFetcher; it has one entry per table column.\n\trow sqlbase.EncDatumRow\n\trowAlloc sqlbase.EncDatumRowAlloc\n}\n\nvar _ processor = &tableReader{}\n\n\/\/ newTableReader creates a tableReader.\nfunc newTableReader(\n\tspec *TableReaderSpec, txn *client.Txn, output rowReceiver, evalCtx *parser.EvalContext,\n) (*tableReader, error) {\n\ttr := &tableReader{\n\t\tdesc: spec.Table,\n\t\ttxn: txn,\n\t\toutput: output,\n\t\thardLimit: spec.HardLimit,\n\t\tsoftLimit: spec.SoftLimit,\n\t}\n\n\tif tr.hardLimit != 0 && tr.hardLimit < tr.softLimit {\n\t\treturn nil, util.Errorf(\"soft limit %d larger than hard limit %d\", tr.softLimit,\n\t\t\ttr.hardLimit)\n\t}\n\n\tnumCols := len(tr.desc.Columns)\n\n\ttr.outputCols = make([]int, len(spec.OutputColumns))\n\tfor i, v := range spec.OutputColumns {\n\t\ttr.outputCols[i] = int(v)\n\t}\n\n\t\/\/ Figure out which columns we need: the output columns plus any other\n\t\/\/ columns used by the filter expression.\n\tvalNeededForCol := make([]bool, numCols)\n\tfor _, c := range tr.outputCols {\n\t\tif c < 0 || c >= numCols {\n\t\t\treturn nil, util.Errorf(\"invalid column index %d\", c)\n\t\t}\n\t\tvalNeededForCol[c] = true\n\t}\n\n\tif spec.Filter.Expr != \"\" {\n\t\ttypes := make([]sqlbase.ColumnType_Kind, numCols)\n\t\tfor i := range types {\n\t\t\ttypes[i] = tr.desc.Columns[i].Type.Kind\n\t\t}\n\t\tif err := tr.filter.init(spec.Filter, types, evalCtx); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor c := 0; c < numCols; c++ {\n\t\t\tvalNeededForCol[c] = valNeededForCol[c] || tr.filter.vars.IndexedVarUsed(c)\n\t\t}\n\t}\n\n\tif spec.IndexIdx > uint32(len(tr.desc.Indexes)) {\n\t\treturn nil, util.Errorf(\"invalid IndexIdx %d\", spec.IndexIdx)\n\t}\n\n\tvar index *sqlbase.IndexDescriptor\n\tvar isSecondaryIndex bool\n\n\tif spec.IndexIdx > 0 {\n\t\tindex = &tr.desc.Indexes[spec.IndexIdx-1]\n\t\tisSecondaryIndex = true\n\t} else {\n\t\tindex = &tr.desc.PrimaryIndex\n\t}\n\n\tcolIdxMap := make(map[sqlbase.ColumnID]int, len(tr.desc.Columns))\n\tfor i, c := range tr.desc.Columns {\n\t\tcolIdxMap[c.ID] = i\n\t}\n\terr := tr.fetcher.Init(&tr.desc, colIdxMap, index, spec.Reverse, isSecondaryIndex,\n\t\ttr.desc.Columns, valNeededForCol)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttr.spans = make(sqlbase.Spans, len(spec.Spans))\n\tfor i, s := range spec.Spans {\n\t\ttr.spans[i] = sqlbase.Span{Start: s.Span.Key, End: s.Span.EndKey}\n\t}\n\n\treturn tr, nil\n}\n\n\/\/ getLimitHint calculates the row limit hint for the row fetcher.\nfunc (tr *tableReader) getLimitHint() int64 {\n\tsoftLimit := tr.softLimit\n\tif tr.hardLimit != 0 {\n\t\tif tr.filter.expr == nil {\n\t\t\treturn tr.hardLimit\n\t\t}\n\t\t\/\/ If we have a filter, we don't know how many rows will pass the filter\n\t\t\/\/ so the hard limit is actually a \"soft\" limit at the row fetcher.\n\t\tif softLimit == 0 {\n\t\t\tsoftLimit = tr.hardLimit\n\t\t}\n\t}\n\t\/\/ If the limit is soft, we request a multiple of the limit.\n\t\/\/ If the limit is 0 (no limit), we must return 0.\n\treturn softLimit * 2\n}\n\n\/\/ nextRow processes table rows until it finds a row that passes the filter.\n\/\/ Returns a nil row when there are no more rows.\nfunc (tr *tableReader) nextRow() (sqlbase.EncDatumRow, error) {\n\tfor {\n\t\tfetcherRow, err := tr.fetcher.NextRow()\n\t\tif err != nil || fetcherRow == nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ TODO(radu): we are defeating the purpose of EncDatum here - we\n\t\t\/\/ should modify RowFetcher to return EncDatums directly and avoid\n\t\t\/\/ the cost of decoding\/reencoding.\n\t\tfor i := range fetcherRow {\n\t\t\tif fetcherRow[i] != nil {\n\t\t\t\ttr.row[i].SetDatum(tr.desc.Columns[i].Type.Kind, fetcherRow[i])\n\t\t\t}\n\t\t}\n\t\tpassesFilter, err := tr.filter.evalFilter(tr.row)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif passesFilter {\n\t\t\tbreak\n\t\t}\n\t}\n\toutRow := tr.rowAlloc.AllocRow(len(tr.outputCols))\n\tfor i, col := range tr.outputCols {\n\t\toutRow[i] = tr.row[col]\n\t}\n\treturn outRow, nil\n}\n\n\/\/ Run is part of the processor interface.\nfunc (tr *tableReader) Run(wg *sync.WaitGroup) {\n\tif wg != nil {\n\t\tdefer wg.Done()\n\t}\n\tif log.V(1) {\n\t\tlog.Infof(\"TableReader filter: %s\\n\", tr.filter.expr)\n\t}\n\n\tif err := tr.fetcher.StartScan(tr.txn, tr.spans, tr.getLimitHint()); err != nil {\n\t\ttr.output.Close(err)\n\t\treturn\n\t}\n\ttr.row = make(sqlbase.EncDatumRow, len(tr.desc.Columns))\n\tvar rowIdx int64\n\tfor {\n\t\toutRow, err := tr.nextRow()\n\t\tif err != nil || outRow == nil {\n\t\t\ttr.output.Close(err)\n\t\t\treturn\n\t\t}\n\t\t\/\/ Push the row to the output rowReceiver; stop if they don't need more\n\t\t\/\/ rows.\n\t\tif !tr.output.PushRow(outRow) {\n\t\t\ttr.output.Close(nil)\n\t\t\treturn\n\t\t}\n\t\trowIdx++\n\t\tif tr.hardLimit != 0 && rowIdx == tr.hardLimit {\n\t\t\t\/\/ We sent tr.hardLimit rows.\n\t\t\ttr.output.Close(nil)\n\t\t\treturn\n\t\t}\n\t}\n}\n<commit_msg>sql\/distsql: move basic row fetching code into readerBase<commit_after>\/\/ Copyright 2016 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\/\/\n\/\/ Author: Radu Berinde (radu@cockroachlabs.com)\n\npackage distsql\n\nimport (\n\t\"sync\"\n\n\t\"github.com\/cockroachdb\/cockroach\/client\"\n\t\"github.com\/cockroachdb\/cockroach\/sql\/parser\"\n\t\"github.com\/cockroachdb\/cockroach\/sql\/sqlbase\"\n\t\"github.com\/cockroachdb\/cockroach\/util\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/log\"\n)\n\n\/\/ readerBase implements basic code shared by tableReader and joinReader.\ntype readerBase struct {\n\tdesc sqlbase.TableDescriptor\n\ttxn *client.Txn\n\tfetcher sqlbase.RowFetcher\n\n\tfilter exprHelper\n\n\toutputCols []int\n\n\t\/\/ Last row returned by the rowFetcher; it has one entry per table column.\n\trow sqlbase.EncDatumRow\n\trowAlloc sqlbase.EncDatumRowAlloc\n}\n\nfunc (rb *readerBase) init(\n\tdesc *sqlbase.TableDescriptor,\n\tindexIdx int,\n\ttxn *client.Txn,\n\tfilter Expression,\n\tevalCtx *parser.EvalContext,\n\toutputCols []uint32,\n\treverseScan bool,\n) error {\n\trb.desc = *desc\n\trb.txn = txn\n\n\trb.outputCols = make([]int, len(outputCols))\n\tfor i, v := range outputCols {\n\t\trb.outputCols[i] = int(v)\n\t}\n\n\tnumCols := len(rb.desc.Columns)\n\n\t\/\/ Figure out which columns we need: the output columns plus any other\n\t\/\/ columns used by the filter expression.\n\tvalNeededForCol := make([]bool, numCols)\n\tfor _, c := range rb.outputCols {\n\t\tif c < 0 || c >= numCols {\n\t\t\treturn util.Errorf(\"invalid column index %d\", c)\n\t\t}\n\t\tvalNeededForCol[c] = true\n\t}\n\n\tif filter.Expr != \"\" {\n\t\ttypes := make([]sqlbase.ColumnType_Kind, numCols)\n\t\tfor i := range types {\n\t\t\ttypes[i] = rb.desc.Columns[i].Type.Kind\n\t\t}\n\t\tif err := rb.filter.init(filter, types, evalCtx); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor c := 0; c < numCols; c++ {\n\t\t\tvalNeededForCol[c] = valNeededForCol[c] || rb.filter.vars.IndexedVarUsed(c)\n\t\t}\n\t}\n\n\t\/\/ indexIdx is 0 for the primary index, or 1 to <num-indexes> for a\n\t\/\/ secondary index.\n\tif indexIdx < 0 || indexIdx > len(rb.desc.Indexes) {\n\t\treturn util.Errorf(\"invalid indexIdx %d\", indexIdx)\n\t}\n\n\tvar index *sqlbase.IndexDescriptor\n\tvar isSecondaryIndex bool\n\n\tif indexIdx > 0 {\n\t\tindex = &rb.desc.Indexes[indexIdx-1]\n\t\tisSecondaryIndex = true\n\t} else {\n\t\tindex = &rb.desc.PrimaryIndex\n\t}\n\n\tcolIdxMap := make(map[sqlbase.ColumnID]int, len(rb.desc.Columns))\n\tfor i, c := range rb.desc.Columns {\n\t\tcolIdxMap[c.ID] = i\n\t}\n\terr := rb.fetcher.Init(&rb.desc, colIdxMap, index, reverseScan, isSecondaryIndex,\n\t\ttr.desc.Columns, valNeededForCol)\n\tif err != nil {\n\t\treturn err\n\t}\n\trb.row = make(sqlbase.EncDatumRow, len(rb.desc.Columns))\n\treturn nil\n}\n\n\/\/ nextRow processes table rows until it finds a row that passes the filter.\n\/\/ Returns a nil row when there are no more rows.\nfunc (rb *readerBase) nextRow() (sqlbase.EncDatumRow, error) {\n\tfor {\n\t\tfetcherRow, err := rb.fetcher.NextRow()\n\t\tif err != nil || fetcherRow == nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ TODO(radu): we are defeating the purpose of EncDatum here - we\n\t\t\/\/ should modify RowFetcher to return EncDatums directly and avoid\n\t\t\/\/ the cost of decoding\/reencoding.\n\t\tfor i := range fetcherRow {\n\t\t\tif fetcherRow[i] != nil {\n\t\t\t\trb.row[i].SetDatum(rb.desc.Columns[i].Type.Kind, fetcherRow[i])\n\t\t\t}\n\t\t}\n\t\tpassesFilter, err := rb.filter.evalFilter(rb.row)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif passesFilter {\n\t\t\tbreak\n\t\t}\n\t}\n\toutRow := rb.rowAlloc.AllocRow(len(rb.outputCols))\n\tfor i, col := range rb.outputCols {\n\t\toutRow[i] = rb.row[col]\n\t}\n\treturn outRow, nil\n}\n\n\/\/ tableReader is the start of a computation flow; it performs KV operations to\n\/\/ retrieve rows for a table, runs a filter expression, and passes rows with the\n\/\/ desired column values to an output rowReceiver.\n\/\/ See docs\/RFCS\/distributed_sql.md\ntype tableReader struct {\n\treaderBase\n\n\tspans sqlbase.Spans\n\thardLimit, softLimit int64\n\n\toutput rowReceiver\n}\n\nvar _ processor = &tableReader{}\n\n\/\/ newTableReader creates a tableReader.\nfunc newTableReader(\n\tspec *TableReaderSpec, txn *client.Txn, output rowReceiver, evalCtx *parser.EvalContext,\n) (*tableReader, error) {\n\ttr := &tableReader{\n\t\toutput: output,\n\t\thardLimit: spec.HardLimit,\n\t\tsoftLimit: spec.SoftLimit,\n\t}\n\n\tif tr.hardLimit != 0 && tr.hardLimit < tr.softLimit {\n\t\treturn nil, util.Errorf(\"soft limit %d larger than hard limit %d\", tr.softLimit,\n\t\t\ttr.hardLimit)\n\t}\n\n\terr := tr.readerBase.init(&spec.Table, int(spec.IndexIdx), txn, spec.Filter, evalCtx,\n\t\tspec.OutputColumns, spec.Reverse)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttr.spans = make(sqlbase.Spans, len(spec.Spans))\n\tfor i, s := range spec.Spans {\n\t\ttr.spans[i] = sqlbase.Span{Start: s.Span.Key, End: s.Span.EndKey}\n\t}\n\n\treturn tr, nil\n}\n\n\/\/ getLimitHint calculates the row limit hint for the row fetcher.\nfunc (tr *tableReader) getLimitHint() int64 {\n\tsoftLimit := tr.softLimit\n\tif tr.hardLimit != 0 {\n\t\tif tr.filter.expr == nil {\n\t\t\treturn tr.hardLimit\n\t\t}\n\t\t\/\/ If we have a filter, we don't know how many rows will pass the filter\n\t\t\/\/ so the hard limit is actually a \"soft\" limit at the row fetcher.\n\t\tif softLimit == 0 {\n\t\t\tsoftLimit = tr.hardLimit\n\t\t}\n\t}\n\t\/\/ If the limit is soft, we request a multiple of the limit.\n\t\/\/ If the limit is 0 (no limit), we must return 0.\n\treturn softLimit * 2\n}\n\n\/\/ Run is part of the processor interface.\nfunc (tr *tableReader) Run(wg *sync.WaitGroup) {\n\tif wg != nil {\n\t\tdefer wg.Done()\n\t}\n\tif log.V(1) {\n\t\tlog.Infof(\"TableReader filter: %s\\n\", tr.filter.expr)\n\t}\n\n\tif err := tr.fetcher.StartScan(tr.txn, tr.spans, tr.getLimitHint()); err != nil {\n\t\ttr.output.Close(err)\n\t\treturn\n\t}\n\tvar rowIdx int64\n\tfor {\n\t\toutRow, err := tr.nextRow()\n\t\tif err != nil || outRow == nil {\n\t\t\ttr.output.Close(err)\n\t\t\treturn\n\t\t}\n\t\t\/\/ Push the row to the output rowReceiver; stop if they don't need more\n\t\t\/\/ rows.\n\t\tif !tr.output.PushRow(outRow) {\n\t\t\ttr.output.Close(nil)\n\t\t\treturn\n\t\t}\n\t\trowIdx++\n\t\tif tr.hardLimit != 0 && rowIdx == tr.hardLimit {\n\t\t\t\/\/ We sent tr.hardLimit rows.\n\t\t\ttr.output.Close(nil)\n\t\t\treturn\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package slackbot\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"html\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"github.com\/justinian\/dice\"\n)\n\ntype Command struct {\n\tName string\n\tDesc string\n\tFunc func(*Instance, *Message, []string) error\n}\n\nvar COMMANDS []*Command\n\nfunc init() {\n\tCOMMANDS = []*Command{\n\t\t&Command{\"apollo\", \"Tell a random quote from Apollo's source code.\", cmd_apollo},\n\t\t&Command{\"catfact\", \"Tell a random cat fact.\", cmd_catfact},\n\t\t&Command{\"duck\", \"Quack.\", cmd_duck},\n\t\t&Command{\"goon\", \"Tell a random quote from Goon's source code.\", cmd_goon},\n\t\t&Command{\"help\", \"Show a list of commands.\", cmd_help},\n\t\t&Command{\"mute\", \"Mute my messages to this channel, for a while.\", cmd_mute},\n\t\t&Command{\"pun\", \"Tell a random pun.\", cmd_pun},\n\t\t&Command{\"roll\", \"Throw a dice roll.\", cmd_roll},\n\t\t&Command{\"status\", \"Show my current status.\", cmd_status},\n\t\t&Command{\"vote\", \"start\/stop a vote or vote yes\/no during a vote.\", cmd_vote},\n\t\t&Command{\"wiki\", \"Quote a page from our SS13 wiki.\", cmd_wiki},\n\t}\n}\n\nfunc cmd_help(i *Instance, m *Message, args []string) error {\n\ttmp := \"Available commands:\\n\"\n\tfor _, c := range COMMANDS {\n\t\ttmp += fmt.Sprintf(\"- `%s\\t%s`\\n\", c.Name, c.Desc)\n\t}\n\ti.UserMsg(m.User, tmp)\n\treturn nil\n}\n\nfunc cmd_duck(i *Instance, m *Message, args []string) error {\n\tvar quotes = []string{\n\t\t\"Quack\",\n\t\t\"Wenk\",\n\t\t\"O RLY?\",\n\t\t\"Kraaawk!\",\n\t\t\"Bwaak\",\n\t\t\"Chirp\",\n\t}\n\ti.ChannelMsg(m.Channel, quotes[rand.Intn(len(quotes))])\n\treturn nil\n}\n\nfunc cmd_vote(i *Instance, m *Message, args []string) error {\n\tif len(args) < 1 {\n\t\treturn fmt.Errorf(\"Missing argument.\")\n\t}\n\n\tif args[0] == \"start\" {\n\t\ti.StartVote(m.Channel)\n\t\ti.ChannelMsg(m.Channel, fmt.Sprintf(\"@%s has started a new vote! Tell me if you would like to `vote yes` or `vote no` on it.\", i.Users[m.User].Name))\n\t} else if args[0] == \"stop\" {\n\t\tvotes := i.StopVote(m.Channel)\n\t\tvar result string\n\t\tif votes > 0 {\n\t\t\tresult = \"*YES* has won\"\n\t\t} else if votes < 0 {\n\t\t\tresult = \"*NO* has won\"\n\t\t} else {\n\t\t\tresult = \"It's a tie! No one won\"\n\t\t}\n\t\ti.ChannelMsg(m.Channel, fmt.Sprintf(\"@%s has stopped the vote! The result is...\\n%s (score: %d)!\", i.Users[m.User].Name, result, votes))\n\t} else if args[0] == \"yes\" {\n\t\ti.Vote(+1, m.Channel)\n\t} else if args[0] == \"no\" {\n\t\ti.Vote(-1, m.Channel)\n\t}\n\treturn nil\n}\n\nfunc cmd_mute(i *Instance, m *Message, args []string) error {\n\tif len(args) < 1 {\n\t\treturn fmt.Errorf(\"Missing argument.\")\n\t}\n\n\tdur, e := time.ParseDuration(args[0])\n\tif e != nil {\n\t\treturn fmt.Errorf(\"Couldn't parse a duration.\")\n\t}\n\tif dur.Minutes() < 1 {\n\t\tdur = time.Duration(5) * time.Minute\n\t}\n\tif dur.Minutes() > 60 {\n\t\tdur = time.Duration(60) * time.Minute\n\t}\n\n\ti.ChannelMsg(m.Channel, fmt.Sprintf(\"I will now shut up for ~%.0f minutes.\", dur.Minutes()))\n\tgo func() {\n\t\ttime.Sleep(5)\n\t\ti.Mute(m.Channel, dur)\n\t}()\n\treturn nil\n}\n\nfunc cmd_status(i *Instance, m *Message, args []string) error {\n\ttmp := \"My current status:\\n\"\n\ttmp += fmt.Sprintf(\"`Running: %v`\\n\", i.running)\n\ttmp += fmt.Sprintf(\"`Version: v%v`\\n\", VERSION)\n\ttmp += fmt.Sprintf(\"`Debug mode: %v`\\n\", i.Debug)\n\ttmp += fmt.Sprintf(\"`Latency: %v`\\n\", i.Latency)\n\ttmp += fmt.Sprintf(\"`My User ID: @%v`\\n\", i.BotId)\n\ttmp += fmt.Sprintf(\"`Tracked channels: %v`\\n\", len(i.Channels))\n\ttmp += fmt.Sprintf(\"`Tracked users: %v`\\n\", len(i.Users))\n\ttmp += fmt.Sprintf(\"`No. of Goon quotes: %v`\\n\", len(i.goon))\n\ttmp += fmt.Sprintf(\"`No. of Apollo quotes: %v`\\n\", len(i.apollo))\n\ttmp += fmt.Sprintf(\"\\nI am currently muted on channels:\\n\")\n\tif len(i.mutes) < 1 {\n\t\ttmp += \"- `None`\\n\"\n\t} else {\n\t\tfor k, v := range i.mutes {\n\t\t\tc := i.Channels[k].Name\n\t\t\tt := v.Sub(time.Now())\n\t\t\ttmp += fmt.Sprintf(\"- `%v (ends in ~%.0f minutes)`\\n\", c, t.Minutes())\n\t\t}\n\t}\n\ti.UserMsg(m.User, tmp)\n\treturn nil\n}\n\nfunc cmd_roll(i *Instance, m *Message, args []string) error {\n\tif len(args) < 1 {\n\t\treturn fmt.Errorf(\"Missing argument.\")\n\t}\n\n\tret, res, e := dice.Roll(args[0])\n\tif e != nil {\n\t\treturn fmt.Errorf(\"Bad dice format.\")\n\t}\n\ti.ChannelMsg(m.Channel, fmt.Sprintf(\"@%s has rolled: %v, %v\\n\", i.Users[m.User].Name, ret, res))\n\treturn nil\n}\n\nfunc cmd_wiki(i *Instance, m *Message, args []string) error {\n\tif len(args) < 1 {\n\t\treturn fmt.Errorf(\"Missing argument.\")\n\t}\n\n\tu := WIKI_URL + url.QueryEscape(args[0])\n\tdoc, e := goquery.NewDocument(u)\n\tif e != nil {\n\t\treturn fmt.Errorf(\"Couldn't open the wiki for you (%v).\", u)\n\t}\n\n\tnode := doc.Find(\"div #mw-content-text > p\").First()\n\ttext := strings.TrimSpace(node.Text())\n\tif node.Length() < 1 || len(text) < 1 {\n\t\treturn fmt.Errorf(\"Couldn't quote that page for you (%v).\", u)\n\t}\n\n\ti.ChannelMsg(m.Channel, fmt.Sprintf(\">>>%s\\n`Source: %s`\", text, u))\n\treturn nil\n}\n\nfunc cmd_pun(i *Instance, m *Message, args []string) error {\n\tresp, e := http.Get(\"http:\/\/www.punoftheday.com\/cgi-bin\/arandompun.pl\")\n\tif e != nil {\n\t\treturn fmt.Errorf(\"Sorry, couldn't make up a pun for you.\")\n\t}\n\tdefer resp.Body.Close()\n\tbody, e := ioutil.ReadAll(resp.Body)\n\tif e != nil {\n\t\treturn fmt.Errorf(\"Sorry, couldn't make up a pun for you.\")\n\t}\n\t\/\/ Yep this is a fucking mess.\n\ts := strings.TrimSpace(string(body))\n\ts = strings.TrimPrefix(s, \"document.write('"\")\n\tindex := strings.Index(s, \""\")\n\ts = html.UnescapeString(s[:index])\n\ti.ChannelMsg(m.Channel, fmt.Sprintf(\">>>%v\\n`© 1996-2011 PunoftheDay.com`\", s))\n\treturn nil\n}\n\ntype CatFacts struct {\n\tFacts []string\n\tSuccess string\n}\n\nvar LAST_CATFACT_TIME time.Time\n\nfunc cmd_catfact(i *Instance, m *Message, args []string) error {\n\tnow := time.Now()\n\tif now.After(LAST_CATFACT_TIME) != true {\n\t\treturn fmt.Errorf(\"Sorry, I can only show a single catfact per day (or else I'll get banned!).\")\n\t}\n\tLAST_CATFACT_TIME = now.Add(24 * time.Hour)\n\n\tresp, e := http.Get(\"http:\/\/catfacts-api.appspot.com\/api\/facts?number=1\")\n\tif e != nil {\n\t\treturn fmt.Errorf(\"Sorry, couldn't find any cat facts.\")\n\t}\n\n\tdefer resp.Body.Close()\n\tdecoder := json.NewDecoder(resp.Body)\n\tvar cf CatFacts\n\te = decoder.Decode(&cf)\n\tif e != nil {\n\t\treturn fmt.Errorf(\"Sorry, couldn't find any cat facts.\")\n\t}\n\n\tif len(cf.Facts) > 0 {\n\t\tmsg := fmt.Sprintf(\">>>%v\\n`© http:\/\/catfacts-api.appspot.com`\", cf.Facts[0])\n\t\ti.ChannelMsg(m.Channel, msg)\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"Sorry, couldn't find any cat facts.\")\n}\n\nfunc cmd_goon(i *Instance, m *Message, args []string) error {\n\tq := i.random_goon_quote()\n\tmsg := fmt.Sprintf(\">>>%v\\n`%v`\", q.Quote, q.File)\n\ti.ChannelMsg(m.Channel, msg)\n\treturn nil\n}\nfunc cmd_apollo(i *Instance, m *Message, args []string) error {\n\tq := i.random_apollo_quote()\n\tmsg := fmt.Sprintf(\">>>%v\\n`%v`\", q.Quote, q.File)\n\ti.ChannelMsg(m.Channel, msg)\n\treturn nil\n}\n<commit_msg>Add channels and users commands.<commit_after>package slackbot\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"html\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"github.com\/justinian\/dice\"\n)\n\ntype Command struct {\n\tName string\n\tDesc string\n\tFunc func(*Instance, *Message, []string) error\n}\n\nvar COMMANDS []*Command\n\nfunc init() {\n\tCOMMANDS = []*Command{\n\t\t&Command{\"apollo\", \"Tell a random quote from Apollo's source code.\", cmd_apollo},\n\t\t&Command{\"catfact\", \"Tell a random cat fact.\", cmd_catfact},\n\t\t&Command{\"channels\", \"Show a list of known channels.\", cmd_channels},\n\t\t&Command{\"duck\", \"Quack.\", cmd_duck},\n\t\t&Command{\"goon\", \"Tell a random quote from Goon's source code.\", cmd_goon},\n\t\t&Command{\"help\", \"Show a list of commands.\", cmd_help},\n\t\t&Command{\"mute\", \"Mute my messages to this channel, for a while.\", cmd_mute},\n\t\t&Command{\"pun\", \"Tell a random pun.\", cmd_pun},\n\t\t&Command{\"roll\", \"Throw a dice roll.\", cmd_roll},\n\t\t&Command{\"status\", \"Show my current status.\", cmd_status},\n\t\t&Command{\"users\", \"Show a list of known users.\", cmd_users},\n\t\t&Command{\"vote\", \"start\/stop a vote or vote yes\/no during a vote.\", cmd_vote},\n\t\t&Command{\"wiki\", \"Quote a page from our SS13 wiki.\", cmd_wiki},\n\t}\n}\n\nfunc cmd_help(i *Instance, m *Message, args []string) error {\n\ttmp := \"Available commands:\\n\"\n\tfor _, c := range COMMANDS {\n\t\ttmp += fmt.Sprintf(\"- `%s\\t%s`\\n\", c.Name, c.Desc)\n\t}\n\ti.UserMsg(m.User, tmp)\n\treturn nil\n}\n\nfunc cmd_duck(i *Instance, m *Message, args []string) error {\n\tvar quotes = []string{\n\t\t\"Quack\",\n\t\t\"Wenk\",\n\t\t\"O RLY?\",\n\t\t\"Kraaawk!\",\n\t\t\"Bwaak\",\n\t\t\"Chirp\",\n\t}\n\ti.ChannelMsg(m.Channel, quotes[rand.Intn(len(quotes))])\n\treturn nil\n}\n\nfunc cmd_vote(i *Instance, m *Message, args []string) error {\n\tif len(args) < 1 {\n\t\treturn fmt.Errorf(\"Missing argument.\")\n\t}\n\n\tif args[0] == \"start\" {\n\t\ti.StartVote(m.Channel)\n\t\ti.ChannelMsg(m.Channel, fmt.Sprintf(\"@%s has started a new vote! Tell me if you would like to `vote yes` or `vote no` on it.\", i.Users[m.User].Name))\n\t} else if args[0] == \"stop\" {\n\t\tvotes := i.StopVote(m.Channel)\n\t\tvar result string\n\t\tif votes > 0 {\n\t\t\tresult = \"*YES* has won\"\n\t\t} else if votes < 0 {\n\t\t\tresult = \"*NO* has won\"\n\t\t} else {\n\t\t\tresult = \"It's a tie! No one won\"\n\t\t}\n\t\ti.ChannelMsg(m.Channel, fmt.Sprintf(\"@%s has stopped the vote! The result is...\\n%s (score: %d)!\", i.Users[m.User].Name, result, votes))\n\t} else if args[0] == \"yes\" {\n\t\ti.Vote(+1, m.Channel)\n\t} else if args[0] == \"no\" {\n\t\ti.Vote(-1, m.Channel)\n\t}\n\treturn nil\n}\n\nfunc cmd_mute(i *Instance, m *Message, args []string) error {\n\tif len(args) < 1 {\n\t\treturn fmt.Errorf(\"Missing argument.\")\n\t}\n\n\tdur, e := time.ParseDuration(args[0])\n\tif e != nil {\n\t\treturn fmt.Errorf(\"Couldn't parse a duration.\")\n\t}\n\tif dur.Minutes() < 1 {\n\t\tdur = time.Duration(5) * time.Minute\n\t}\n\tif dur.Minutes() > 60 {\n\t\tdur = time.Duration(60) * time.Minute\n\t}\n\n\ti.ChannelMsg(m.Channel, fmt.Sprintf(\"I will now shut up for ~%.0f minutes.\", dur.Minutes()))\n\tgo func() {\n\t\ttime.Sleep(5)\n\t\ti.Mute(m.Channel, dur)\n\t}()\n\treturn nil\n}\n\nfunc cmd_status(i *Instance, m *Message, args []string) error {\n\ttmp := \"My current status:\\n\"\n\ttmp += fmt.Sprintf(\"`Running: %v`\\n\", i.running)\n\ttmp += fmt.Sprintf(\"`Version: v%v`\\n\", VERSION)\n\ttmp += fmt.Sprintf(\"`Debug mode: %v`\\n\", i.Debug)\n\ttmp += fmt.Sprintf(\"`Latency: %v`\\n\", i.Latency)\n\ttmp += fmt.Sprintf(\"`My User ID: @%v`\\n\", i.BotId)\n\ttmp += fmt.Sprintf(\"`Tracked channels: %v`\\n\", len(i.Channels))\n\ttmp += fmt.Sprintf(\"`Tracked users: %v`\\n\", len(i.Users))\n\ttmp += fmt.Sprintf(\"`No. of Goon quotes: %v`\\n\", len(i.goon))\n\ttmp += fmt.Sprintf(\"`No. of Apollo quotes: %v`\\n\", len(i.apollo))\n\ttmp += fmt.Sprintf(\"\\nI am currently muted on channels:\\n\")\n\tif len(i.mutes) < 1 {\n\t\ttmp += \"- `None`\\n\"\n\t} else {\n\t\tfor k, v := range i.mutes {\n\t\t\tc := i.Channels[k].Name\n\t\t\tt := v.Sub(time.Now())\n\t\t\ttmp += fmt.Sprintf(\"- `%v (ends in ~%.0f minutes)`\\n\", c, t.Minutes())\n\t\t}\n\t}\n\ti.UserMsg(m.User, tmp)\n\treturn nil\n}\n\nfunc cmd_roll(i *Instance, m *Message, args []string) error {\n\tif len(args) < 1 {\n\t\treturn fmt.Errorf(\"Missing argument.\")\n\t}\n\n\tret, res, e := dice.Roll(args[0])\n\tif e != nil {\n\t\treturn fmt.Errorf(\"Bad dice format.\")\n\t}\n\ti.ChannelMsg(m.Channel, fmt.Sprintf(\"@%s has rolled: %v, %v\\n\", i.Users[m.User].Name, ret, res))\n\treturn nil\n}\n\nfunc cmd_wiki(i *Instance, m *Message, args []string) error {\n\tif len(args) < 1 {\n\t\treturn fmt.Errorf(\"Missing argument.\")\n\t}\n\n\tu := WIKI_URL + url.QueryEscape(args[0])\n\tdoc, e := goquery.NewDocument(u)\n\tif e != nil {\n\t\treturn fmt.Errorf(\"Couldn't open the wiki for you (%v).\", u)\n\t}\n\n\tnode := doc.Find(\"div #mw-content-text > p\").First()\n\ttext := strings.TrimSpace(node.Text())\n\tif node.Length() < 1 || len(text) < 1 {\n\t\treturn fmt.Errorf(\"Couldn't quote that page for you (%v).\", u)\n\t}\n\n\ti.ChannelMsg(m.Channel, fmt.Sprintf(\">>>%s\\n`Source: %s`\", text, u))\n\treturn nil\n}\n\nfunc cmd_pun(i *Instance, m *Message, args []string) error {\n\tresp, e := http.Get(\"http:\/\/www.punoftheday.com\/cgi-bin\/arandompun.pl\")\n\tif e != nil {\n\t\treturn fmt.Errorf(\"Sorry, couldn't make up a pun for you.\")\n\t}\n\tdefer resp.Body.Close()\n\tbody, e := ioutil.ReadAll(resp.Body)\n\tif e != nil {\n\t\treturn fmt.Errorf(\"Sorry, couldn't make up a pun for you.\")\n\t}\n\t\/\/ Yep this is a fucking mess.\n\ts := strings.TrimSpace(string(body))\n\ts = strings.TrimPrefix(s, \"document.write('"\")\n\tindex := strings.Index(s, \""\")\n\ts = html.UnescapeString(s[:index])\n\ti.ChannelMsg(m.Channel, fmt.Sprintf(\">>>%v\\n`© 1996-2011 PunoftheDay.com`\", s))\n\treturn nil\n}\n\ntype CatFacts struct {\n\tFacts []string\n\tSuccess string\n}\n\nvar LAST_CATFACT_TIME time.Time\n\nfunc cmd_catfact(i *Instance, m *Message, args []string) error {\n\tnow := time.Now()\n\tif now.After(LAST_CATFACT_TIME) != true {\n\t\treturn fmt.Errorf(\"Sorry, I can only show a single catfact per day (or else I'll get banned!).\")\n\t}\n\tLAST_CATFACT_TIME = now.Add(24 * time.Hour)\n\n\tresp, e := http.Get(\"http:\/\/catfacts-api.appspot.com\/api\/facts?number=1\")\n\tif e != nil {\n\t\treturn fmt.Errorf(\"Sorry, couldn't find any cat facts.\")\n\t}\n\n\tdefer resp.Body.Close()\n\tdecoder := json.NewDecoder(resp.Body)\n\tvar cf CatFacts\n\te = decoder.Decode(&cf)\n\tif e != nil {\n\t\treturn fmt.Errorf(\"Sorry, couldn't find any cat facts.\")\n\t}\n\n\tif len(cf.Facts) > 0 {\n\t\tmsg := fmt.Sprintf(\">>>%v\\n`© http:\/\/catfacts-api.appspot.com`\", cf.Facts[0])\n\t\ti.ChannelMsg(m.Channel, msg)\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"Sorry, couldn't find any cat facts.\")\n}\n\nfunc cmd_goon(i *Instance, m *Message, args []string) error {\n\tq := i.random_goon_quote()\n\tmsg := fmt.Sprintf(\">>>%v\\n`%v`\", q.Quote, q.File)\n\ti.ChannelMsg(m.Channel, msg)\n\treturn nil\n}\nfunc cmd_apollo(i *Instance, m *Message, args []string) error {\n\tq := i.random_apollo_quote()\n\tmsg := fmt.Sprintf(\">>>%v\\n`%v`\", q.Quote, q.File)\n\ti.ChannelMsg(m.Channel, msg)\n\treturn nil\n}\n\nfunc cmd_channels(i *Instance, m *Message, args []string) error {\n\ttmp := fmt.Sprintf(\"Known channels (%d):\\n\", len(i.Channels))\n\n\tfor id, c := range i.Channels {\n\t\ttmp += fmt.Sprintf(\"`#%s %s - %s`\\n\", id, c.Name, c.Topic.Value)\n\t}\n\ti.UserMsg(m.User, tmp)\n\treturn nil\n}\n\nfunc cmd_users(i *Instance, m *Message, args []string) error {\n\ttmp := fmt.Sprintf(\"Known users (%d):\\n\", len(i.Users))\n\n\tfor id, u := range i.Users {\n\t\ttmp += fmt.Sprintf(\"`#%s %s - %s`\\n\", id, u.Name, u.Presence)\n\t}\n\ti.UserMsg(m.User, tmp)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package ts\n\nimport \"io\"\nimport \"log\"\nimport \"errors\"\nimport \"github.com\/32bitkid\/bitreader\"\n\nfunc NewReader(reader io.Reader) TransportStreamReader {\n\tbr := bitreader.NewReader32(reader)\n\treturn &tsReader{br}\n}\n\ntype TransportStreamReader interface {\n\tNext() (*TsPacket, error)\n}\n\ntype tsReader struct {\n\tbitreader.Reader32\n}\n\nconst SyncByte = 0x47\n\ntype TsPacket struct {\n\tTransportErrorIndicator bool\n\tPayloadUnitStartIndicator bool\n\tTransportPriority bool\n\tPID uint32\n\tTransportScramblingControl uint32\n\tAdaptationFieldControl uint32\n\tContinuityCounter uint32\n\tPayload []byte\n}\n\nfunc (tsr *tsReader) Next() (*TsPacket, error) {\n\n\tif !tsr.isAligned() && !tsr.realign() {\n\t\treturn nil, errors.New(\"No sync_byte found\")\n\t}\n\n\ttsr.Trash(8)\n\n\tpacket := TsPacket{\n\t\tTransportErrorIndicator: tsr.ReadBit(),\n\t\tPayloadUnitStartIndicator: tsr.ReadBit(),\n\t\tTransportPriority: tsr.ReadBit(),\n\t\tPID: tsr.Read32(13),\n\t\tTransportScramblingControl: tsr.Read32(2),\n\t\tAdaptationFieldControl: tsr.Read32(2),\n\t\tContinuityCounter: tsr.Read32(4),\n\t\tPayload: make([]byte, 184),\n\t}\n\n\tfor i := 0; i < 184; i++ {\n\t\tpacket.Payload[i] = byte(tsr.Read32(8))\n\t}\n\n\treturn &packet, nil\n}\n\nfunc (tsr *tsReader) isAligned() bool {\n\treturn tsr.Peek32(8) == SyncByte\n}\n\nfunc (tsr *tsReader) realign() bool {\n\tlog.Printf(\"Attempting to realign\")\n\tfor i := 0; i < 188; i++ {\n\t\tif tsr.isAligned() {\n\t\t\tlog.Printf(\"Realigned after %d bytes.\\n\", i)\n\t\t\treturn true\n\t\t}\n\t\ttsr.Trash(8)\n\t}\n\treturn false\n}\n<commit_msg>Adding error handling from bitreader interface<commit_after>package ts\n\nimport \"io\"\nimport \"log\"\nimport \"errors\"\nimport \"github.com\/32bitkid\/bitreader\"\n\nfunc NewReader(reader io.Reader) TransportStreamReader {\n\tbr := bitreader.NewReader32(reader)\n\treturn &tsReader{br}\n}\n\ntype TransportStreamReader interface {\n\tNext() (*TsPacket, error)\n}\n\ntype tsReader struct {\n\tbitreader.Reader32\n}\n\nconst SyncByte = 0x47\n\ntype TsPacket struct {\n\tTransportErrorIndicator bool\n\tPayloadUnitStartIndicator bool\n\tTransportPrriority bool\n\tPID uint32\n\tTransportScramblingControl uint32\n\tAdaptationFieldControl uint32\n\tContinuityCounter uint32\n\tPayload []byte\n}\n\nfunc isFatalErr(err error) bool {\n\treturn err != nil && err != io.EOF\n}\n\nfunc (tsr *tsReader) Next() (*TsPacket, error) {\n\n\tvar err error\n\n\taligned, err := tsr.isAligned()\n\tif isFatalErr(err) {\n\t\treturn nil, err\n\t}\n\n\tif !aligned {\n\t\terr = tsr.realign()\n\t\tif isFatalErr(err) {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif err = tsr.Trash(8); isFatalErr(err) {\n\t\treturn nil, err\n\t}\n\n\tpacket := TsPacket{}\n\n\tpacket.TransportErrorIndicator, err = tsr.ReadBit()\n\tif isFatalErr(err) {\n\t\treturn nil, err\n\t}\n\n\tpacket.PayloadUnitStartIndicator, err = tsr.ReadBit()\n\tif isFatalErr(err) {\n\t\treturn nil, err\n\t}\n\n\tpacket.TransportPrriority, err = tsr.ReadBit()\n\tif isFatalErr(err) {\n\t\treturn nil, err\n\t}\n\n\tpacket.PID, err = tsr.Read32(13)\n\tif isFatalErr(err) {\n\t\treturn nil, err\n\t}\n\n\tpacket.TransportScramblingControl, err = tsr.Read32(2)\n\tif isFatalErr(err) {\n\t\treturn nil, err\n\t}\n\n\tpacket.AdaptationFieldControl, err = tsr.Read32(2)\n\tif isFatalErr(err) {\n\t\treturn nil, err\n\t}\n\n\tpacket.ContinuityCounter, err = tsr.Read32(4)\n\tif isFatalErr(err) {\n\t\treturn nil, err\n\t}\n\n\t\/\/ TODO handle adaptation field\n\n\tpacket.Payload = make([]byte, 184)\n\n\tvar val uint32\n\tfor i := 0; i < 184; i++ {\n\t\tval, err = tsr.Read32(8)\n\t\tif isFatalErr(err) {\n\t\t\treturn nil, err\n\t\t}\n\t\tpacket.Payload[i] = byte(val)\n\t}\n\n\treturn &packet, nil\n}\n\nfunc (tsr *tsReader) isAligned() (bool, error) {\n\tval, err := tsr.Peek32(8)\n\treturn val == SyncByte, err\n}\n\nfunc (tsr *tsReader) realign() error {\n\tlog.Printf(\"Attempting to realign\")\n\tfor i := 0; i < 188; i++ {\n\t\ttsr.Trash(8)\n\t\tisAligned, err := tsr.isAligned()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif isAligned {\n\t\t\tlog.Printf(\"Realigned after %d bytes.\\n\", i)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn errors.New(\"Unable to find SyncByte\")\n}\n<|endoftext|>"} {"text":"<commit_before>4f15bc32-2e56-11e5-9284-b827eb9e62be<commit_msg>4f1ae644-2e56-11e5-9284-b827eb9e62be<commit_after>4f1ae644-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>a14a1738-2e55-11e5-9284-b827eb9e62be<commit_msg>a14f430c-2e55-11e5-9284-b827eb9e62be<commit_after>a14f430c-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>21c93b14-2e56-11e5-9284-b827eb9e62be<commit_msg>21ce76b0-2e56-11e5-9284-b827eb9e62be<commit_after>21ce76b0-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>f8a069de-2e54-11e5-9284-b827eb9e62be<commit_msg>f8a58702-2e54-11e5-9284-b827eb9e62be<commit_after>f8a58702-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>079248b6-2e57-11e5-9284-b827eb9e62be<commit_msg>079765c6-2e57-11e5-9284-b827eb9e62be<commit_after>079765c6-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>dda778ce-2e55-11e5-9284-b827eb9e62be<commit_msg>ddac9430-2e55-11e5-9284-b827eb9e62be<commit_after>ddac9430-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>d3225130-2e55-11e5-9284-b827eb9e62be<commit_msg>d3276d14-2e55-11e5-9284-b827eb9e62be<commit_after>d3276d14-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>1fce4daa-2e55-11e5-9284-b827eb9e62be<commit_msg>1fd3a2be-2e55-11e5-9284-b827eb9e62be<commit_after>1fd3a2be-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>33619fe8-2e55-11e5-9284-b827eb9e62be<commit_msg>3366ce3c-2e55-11e5-9284-b827eb9e62be<commit_after>3366ce3c-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>accefb9c-2e54-11e5-9284-b827eb9e62be<commit_msg>acd4121c-2e54-11e5-9284-b827eb9e62be<commit_after>acd4121c-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>2b81ab6e-2e56-11e5-9284-b827eb9e62be<commit_msg>2b870924-2e56-11e5-9284-b827eb9e62be<commit_after>2b870924-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>7cde49fe-2e56-11e5-9284-b827eb9e62be<commit_msg>7ce366be-2e56-11e5-9284-b827eb9e62be<commit_after>7ce366be-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>f2f87e26-2e55-11e5-9284-b827eb9e62be<commit_msg>f2fdac8e-2e55-11e5-9284-b827eb9e62be<commit_after>f2fdac8e-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>7ffbf02e-2e55-11e5-9284-b827eb9e62be<commit_msg>8001274c-2e55-11e5-9284-b827eb9e62be<commit_after>8001274c-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>10e47bde-2e55-11e5-9284-b827eb9e62be<commit_msg>10e9cf3a-2e55-11e5-9284-b827eb9e62be<commit_after>10e9cf3a-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>b6a4f220-2e54-11e5-9284-b827eb9e62be<commit_msg>b6aa2862-2e54-11e5-9284-b827eb9e62be<commit_after>b6aa2862-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>20e556ba-2e56-11e5-9284-b827eb9e62be<commit_msg>20ea82a2-2e56-11e5-9284-b827eb9e62be<commit_after>20ea82a2-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>944f6628-2e55-11e5-9284-b827eb9e62be<commit_msg>9454837e-2e55-11e5-9284-b827eb9e62be<commit_after>9454837e-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>0a567ad2-2e56-11e5-9284-b827eb9e62be<commit_msg>0a5bcb22-2e56-11e5-9284-b827eb9e62be<commit_after>0a5bcb22-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>16a96b36-2e57-11e5-9284-b827eb9e62be<commit_msg>16aeb5c8-2e57-11e5-9284-b827eb9e62be<commit_after>16aeb5c8-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>2a336fd6-2e56-11e5-9284-b827eb9e62be<commit_msg>2a38a73a-2e56-11e5-9284-b827eb9e62be<commit_after>2a38a73a-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>7e3917ac-2e56-11e5-9284-b827eb9e62be<commit_msg>7e3e4b6e-2e56-11e5-9284-b827eb9e62be<commit_after>7e3e4b6e-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>92714d3a-2e55-11e5-9284-b827eb9e62be<commit_msg>92766d10-2e55-11e5-9284-b827eb9e62be<commit_after>92766d10-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage maas\n\nimport (\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/juju\/gomaasapi\"\n\tjc \"github.com\/juju\/testing\/checkers\"\n\t\"github.com\/juju\/utils\/set\"\n\tgc \"gopkg.in\/check.v1\"\n\n\t\"github.com\/juju\/juju\/environs\/config\"\n\t\"github.com\/juju\/juju\/feature\"\n\tcoretesting \"github.com\/juju\/juju\/testing\"\n\t\"github.com\/juju\/juju\/version\"\n)\n\ntype maas2Suite struct {\n\tbaseProviderSuite\n}\n\nfunc (suite *maas2Suite) SetUpTest(c *gc.C) {\n\tsuite.baseProviderSuite.SetUpTest(c)\n\tsuite.SetFeatureFlags(feature.MAAS2)\n}\n\nfunc (suite *maas2Suite) injectController(controller gomaasapi.Controller) {\n\tmockGetController := func(maasServer, apiKey string) (gomaasapi.Controller, error) {\n\t\treturn controller, nil\n\t}\n\tsuite.PatchValue(&GetMAAS2Controller, mockGetController)\n}\n\nfunc (suite *maas2Suite) makeEnviron(c *gc.C, controller gomaasapi.Controller) *maasEnviron {\n\tif controller != nil {\n\t\tsuite.injectController(controller)\n\t}\n\ttestAttrs := coretesting.Attrs{}\n\tfor k, v := range maasEnvAttrs {\n\t\ttestAttrs[k] = v\n\t}\n\ttestAttrs[\"maas-server\"] = \"http:\/\/any-old-junk.invalid\/\"\n\ttestAttrs[\"agent-version\"] = version.Current.String()\n\ttestAttrs[\"maas-agent-name\"] = \"agent-prefix\"\n\n\tattrs := coretesting.FakeConfig().Merge(testAttrs)\n\tcfg, err := config.New(config.NoDefaults, attrs)\n\tc.Assert(err, jc.ErrorIsNil)\n\tenv, err := NewEnviron(cfg)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(env, gc.NotNil)\n\treturn env\n}\n\ntype fakeController struct {\n\tgomaasapi.Controller\n\tbootResources []gomaasapi.BootResource\n\tbootResourcesError error\n\tmachines []gomaasapi.Machine\n\tmachinesError error\n\tmachinesArgsCheck func(gomaasapi.MachinesArgs)\n\tzones []gomaasapi.Zone\n\tzonesError error\n\tspaces []gomaasapi.Space\n\tspacesError error\n\n\tallocateMachine gomaasapi.Machine\n\tallocateMachineMatches gomaasapi.ConstraintMatches\n\tallocateMachineError error\n\tallocateMachineArgsCheck func(gomaasapi.AllocateMachineArgs)\n\n\tfiles []gomaasapi.File\n\tfilesPrefix string\n\tfilesError error\n\tgetFileFilename string\n\taddFileArgs gomaasapi.AddFileArgs\n\n\treleaseMachinesErrors []error\n\treleaseMachinesArgs []gomaasapi.ReleaseMachinesArgs\n}\n\nfunc (c *fakeController) Machines(args gomaasapi.MachinesArgs) ([]gomaasapi.Machine, error) {\n\tif c.machinesArgsCheck != nil {\n\t\tc.machinesArgsCheck(args)\n\t}\n\tif c.machinesError != nil {\n\t\treturn nil, c.machinesError\n\t}\n\tif len(args.SystemIDs) > 0 {\n\t\tresult := []gomaasapi.Machine{}\n\t\tsystemIds := set.NewStrings(args.SystemIDs...)\n\t\tfor _, machine := range c.machines {\n\t\t\tif systemIds.Contains(machine.SystemID()) {\n\t\t\t\tresult = append(result, machine)\n\t\t\t}\n\t\t}\n\t\treturn result, nil\n\t}\n\treturn c.machines, nil\n}\n\nfunc (c *fakeController) AllocateMachine(args gomaasapi.AllocateMachineArgs) (gomaasapi.Machine, gomaasapi.ConstraintMatches, error) {\n\tif c.allocateMachineArgsCheck != nil {\n\t\tc.allocateMachineArgsCheck(args)\n\t}\n\tif c.allocateMachineError != nil {\n\t\treturn nil, c.allocateMachineMatches, c.allocateMachineError\n\t}\n\treturn c.allocateMachine, c.allocateMachineMatches, nil\n}\n\nfunc (c *fakeController) BootResources() ([]gomaasapi.BootResource, error) {\n\tif c.bootResourcesError != nil {\n\t\treturn nil, c.bootResourcesError\n\t}\n\treturn c.bootResources, nil\n}\n\nfunc (c *fakeController) Zones() ([]gomaasapi.Zone, error) {\n\tif c.zonesError != nil {\n\t\treturn nil, c.zonesError\n\t}\n\treturn c.zones, nil\n}\n\nfunc (c *fakeController) Spaces() ([]gomaasapi.Space, error) {\n\tif c.spacesError != nil {\n\t\treturn nil, c.spacesError\n\t}\n\treturn c.spaces, nil\n}\n\nfunc (c *fakeController) Files(prefix string) ([]gomaasapi.File, error) {\n\tc.filesPrefix = prefix\n\tif c.filesError != nil {\n\t\treturn nil, c.filesError\n\t}\n\treturn c.files, nil\n}\n\nfunc (c *fakeController) GetFile(filename string) (gomaasapi.File, error) {\n\tc.getFileFilename = filename\n\tif c.filesError != nil {\n\t\treturn nil, c.filesError\n\t}\n\t\/\/ Try to find the file by name (needed for testing RemoveAll)\n\tfor _, file := range c.files {\n\t\tif file.Filename() == filename {\n\t\t\treturn file, nil\n\t\t}\n\t}\n\t\/\/ The test forgot to set up matching files!\n\treturn nil, errors.Errorf(\"no file named %v found - did you set up your test correctly?\", filename)\n}\n\nfunc (c *fakeController) AddFile(args gomaasapi.AddFileArgs) error {\n\tc.addFileArgs = args\n\treturn c.filesError\n}\n\nfunc (c *fakeController) ReleaseMachines(args gomaasapi.ReleaseMachinesArgs) error {\n\tc.releaseMachinesArgs = append(c.releaseMachinesArgs, args)\n\tif len(c.releaseMachinesErrors) == 0 {\n\t\treturn nil\n\t}\n\terr := c.releaseMachinesErrors[0]\n\tc.releaseMachinesErrors = c.releaseMachinesErrors[1:]\n\treturn err\n}\n\ntype fakeBootResource struct {\n\tgomaasapi.BootResource\n\tname string\n\tarchitecture string\n}\n\nfunc (r *fakeBootResource) Name() string {\n\treturn r.name\n}\n\nfunc (r *fakeBootResource) Architecture() string {\n\treturn r.architecture\n}\n\ntype fakeMachine struct {\n\tgomaasapi.Machine\n\tzoneName string\n\thostname string\n\tsystemID string\n\tipAddresses []string\n\tstatusName string\n\tstatusMessage string\n\tcpuCount int\n\tmemory int\n\tarchitecture string\n\tinterfaceSet []gomaasapi.Interface\n}\n\nfunc (m *fakeMachine) CPUCount() int {\n\treturn m.cpuCount\n}\n\nfunc (m *fakeMachine) Memory() int {\n\treturn m.memory\n}\n\nfunc (m *fakeMachine) Architecture() string {\n\treturn m.architecture\n}\n\nfunc (m *fakeMachine) SystemID() string {\n\treturn m.systemID\n}\n\nfunc (m *fakeMachine) Hostname() string {\n\treturn m.hostname\n}\n\nfunc (m *fakeMachine) IPAddresses() []string {\n\treturn m.ipAddresses\n}\n\nfunc (m *fakeMachine) StatusName() string {\n\treturn m.statusName\n}\n\nfunc (m *fakeMachine) StatusMessage() string {\n\treturn m.statusMessage\n}\n\nfunc (m *fakeMachine) Zone() gomaasapi.Zone {\n\treturn fakeZone{name: m.zoneName}\n}\n\nfunc (m *fakeMachine) InterfaceSet() []gomaasapi.Interface {\n\treturn m.interfaceSet\n}\n\nfunc (m *fakeMachine) Start(args gomaasapi.StartArgs) error {\n\treturn nil\n}\n\ntype fakeZone struct {\n\tgomaasapi.Zone\n\tname string\n}\n\nfunc (z fakeZone) Name() string {\n\treturn z.name\n}\n\ntype fakeSpace struct {\n\tgomaasapi.Space\n\tname string\n\tid int\n\tsubnets []gomaasapi.Subnet\n}\n\nfunc (s fakeSpace) Name() string {\n\treturn s.name\n}\n\nfunc (s fakeSpace) ID() int {\n\treturn s.id\n}\n\nfunc (s fakeSpace) Subnets() []gomaasapi.Subnet {\n\treturn s.subnets\n}\n\ntype fakeSubnet struct {\n\tgomaasapi.Subnet\n\tid int\n\tspace string\n\tvlan gomaasapi.VLAN\n\tgateway string\n\tcidr string\n\tdnsServers []string\n}\n\nfunc (s fakeSubnet) ID() int {\n\treturn s.id\n}\n\nfunc (s fakeSubnet) Space() string {\n\treturn s.space\n}\n\nfunc (s fakeSubnet) VLAN() gomaasapi.VLAN {\n\treturn s.vlan\n}\n\nfunc (s fakeSubnet) Gateway() string {\n\treturn s.gateway\n}\n\nfunc (s fakeSubnet) CIDR() string {\n\treturn s.cidr\n}\n\nfunc (s fakeSubnet) DNSServers() []string {\n\treturn s.dnsServers\n}\n\ntype fakeVLAN struct {\n\tgomaasapi.VLAN\n\tid int\n\tvid int\n\tmtu int\n}\n\nfunc (v fakeVLAN) ID() int {\n\treturn v.id\n}\n\nfunc (v fakeVLAN) VID() int {\n\treturn v.vid\n}\n\nfunc (v fakeVLAN) MTU() int {\n\treturn v.mtu\n}\n\ntype fakeInterface struct {\n\tgomaasapi.Interface\n\tid int\n\tname string\n\tparents []string\n\tchildren []string\n\ttype_ string\n\tenabled bool\n\tvlan gomaasapi.VLAN\n\tlinks []gomaasapi.Link\n\tmacAddress string\n}\n\nfunc (v *fakeInterface) ID() int {\n\treturn v.id\n}\n\nfunc (v *fakeInterface) Name() string {\n\treturn v.name\n}\n\nfunc (v *fakeInterface) Parents() []string {\n\treturn v.parents\n}\n\nfunc (v *fakeInterface) Children() []string {\n\treturn v.children\n}\n\nfunc (v *fakeInterface) Type() string {\n\treturn v.type_\n}\n\nfunc (v *fakeInterface) Enabled() bool {\n\treturn v.enabled\n}\n\nfunc (v *fakeInterface) VLAN() gomaasapi.VLAN {\n\treturn v.vlan\n}\n\nfunc (v *fakeInterface) Links() []gomaasapi.Link {\n\treturn v.links\n}\n\nfunc (v *fakeInterface) MACAddress() string {\n\treturn v.macAddress\n}\n\ntype fakeLink struct {\n\tgomaasapi.Link\n\tid int\n\tmode string\n\tsubnet gomaasapi.Subnet\n\tipAddress string\n}\n\nfunc (l *fakeLink) ID() int {\n\treturn l.id\n}\n\nfunc (l *fakeLink) Mode() string {\n\treturn l.mode\n}\n\nfunc (l *fakeLink) Subnet() gomaasapi.Subnet {\n\treturn l.subnet\n}\n\nfunc (l *fakeLink) IPAddress() string {\n\treturn l.ipAddress\n}\n\ntype fakeFile struct {\n\tgomaasapi.File\n\tname string\n\turl string\n\tcontents []byte\n\tdeleted bool\n\terror error\n}\n\nfunc (f *fakeFile) Filename() string {\n\treturn f.name\n}\n\nfunc (f *fakeFile) AnonymousURL() string {\n\treturn f.url\n}\n\nfunc (f *fakeFile) Delete() error {\n\tf.deleted = true\n\treturn f.error\n}\n\nfunc (f *fakeFile) ReadAll() ([]byte, error) {\n\tif f.error != nil {\n\t\treturn nil, f.error\n\t}\n\treturn f.contents, nil\n}\n<commit_msg>Add fakeBlockDevice for testing<commit_after>\/\/ Copyright 2016 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage maas\n\nimport (\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/juju\/gomaasapi\"\n\tjc \"github.com\/juju\/testing\/checkers\"\n\t\"github.com\/juju\/utils\/set\"\n\tgc \"gopkg.in\/check.v1\"\n\n\t\"github.com\/juju\/juju\/environs\/config\"\n\t\"github.com\/juju\/juju\/feature\"\n\tcoretesting \"github.com\/juju\/juju\/testing\"\n\t\"github.com\/juju\/juju\/version\"\n)\n\ntype maas2Suite struct {\n\tbaseProviderSuite\n}\n\nfunc (suite *maas2Suite) SetUpTest(c *gc.C) {\n\tsuite.baseProviderSuite.SetUpTest(c)\n\tsuite.SetFeatureFlags(feature.MAAS2)\n}\n\nfunc (suite *maas2Suite) injectController(controller gomaasapi.Controller) {\n\tmockGetController := func(maasServer, apiKey string) (gomaasapi.Controller, error) {\n\t\treturn controller, nil\n\t}\n\tsuite.PatchValue(&GetMAAS2Controller, mockGetController)\n}\n\nfunc (suite *maas2Suite) makeEnviron(c *gc.C, controller gomaasapi.Controller) *maasEnviron {\n\tif controller != nil {\n\t\tsuite.injectController(controller)\n\t}\n\ttestAttrs := coretesting.Attrs{}\n\tfor k, v := range maasEnvAttrs {\n\t\ttestAttrs[k] = v\n\t}\n\ttestAttrs[\"maas-server\"] = \"http:\/\/any-old-junk.invalid\/\"\n\ttestAttrs[\"agent-version\"] = version.Current.String()\n\ttestAttrs[\"maas-agent-name\"] = \"agent-prefix\"\n\n\tattrs := coretesting.FakeConfig().Merge(testAttrs)\n\tcfg, err := config.New(config.NoDefaults, attrs)\n\tc.Assert(err, jc.ErrorIsNil)\n\tenv, err := NewEnviron(cfg)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(env, gc.NotNil)\n\treturn env\n}\n\ntype fakeController struct {\n\tgomaasapi.Controller\n\tbootResources []gomaasapi.BootResource\n\tbootResourcesError error\n\tmachines []gomaasapi.Machine\n\tmachinesError error\n\tmachinesArgsCheck func(gomaasapi.MachinesArgs)\n\tzones []gomaasapi.Zone\n\tzonesError error\n\tspaces []gomaasapi.Space\n\tspacesError error\n\n\tallocateMachine gomaasapi.Machine\n\tallocateMachineMatches gomaasapi.ConstraintMatches\n\tallocateMachineError error\n\tallocateMachineArgsCheck func(gomaasapi.AllocateMachineArgs)\n\n\tfiles []gomaasapi.File\n\tfilesPrefix string\n\tfilesError error\n\tgetFileFilename string\n\taddFileArgs gomaasapi.AddFileArgs\n\n\treleaseMachinesErrors []error\n\treleaseMachinesArgs []gomaasapi.ReleaseMachinesArgs\n}\n\nfunc (c *fakeController) Machines(args gomaasapi.MachinesArgs) ([]gomaasapi.Machine, error) {\n\tif c.machinesArgsCheck != nil {\n\t\tc.machinesArgsCheck(args)\n\t}\n\tif c.machinesError != nil {\n\t\treturn nil, c.machinesError\n\t}\n\tif len(args.SystemIDs) > 0 {\n\t\tresult := []gomaasapi.Machine{}\n\t\tsystemIds := set.NewStrings(args.SystemIDs...)\n\t\tfor _, machine := range c.machines {\n\t\t\tif systemIds.Contains(machine.SystemID()) {\n\t\t\t\tresult = append(result, machine)\n\t\t\t}\n\t\t}\n\t\treturn result, nil\n\t}\n\treturn c.machines, nil\n}\n\nfunc (c *fakeController) AllocateMachine(args gomaasapi.AllocateMachineArgs) (gomaasapi.Machine, gomaasapi.ConstraintMatches, error) {\n\tif c.allocateMachineArgsCheck != nil {\n\t\tc.allocateMachineArgsCheck(args)\n\t}\n\tif c.allocateMachineError != nil {\n\t\treturn nil, c.allocateMachineMatches, c.allocateMachineError\n\t}\n\treturn c.allocateMachine, c.allocateMachineMatches, nil\n}\n\nfunc (c *fakeController) BootResources() ([]gomaasapi.BootResource, error) {\n\tif c.bootResourcesError != nil {\n\t\treturn nil, c.bootResourcesError\n\t}\n\treturn c.bootResources, nil\n}\n\nfunc (c *fakeController) Zones() ([]gomaasapi.Zone, error) {\n\tif c.zonesError != nil {\n\t\treturn nil, c.zonesError\n\t}\n\treturn c.zones, nil\n}\n\nfunc (c *fakeController) Spaces() ([]gomaasapi.Space, error) {\n\tif c.spacesError != nil {\n\t\treturn nil, c.spacesError\n\t}\n\treturn c.spaces, nil\n}\n\nfunc (c *fakeController) Files(prefix string) ([]gomaasapi.File, error) {\n\tc.filesPrefix = prefix\n\tif c.filesError != nil {\n\t\treturn nil, c.filesError\n\t}\n\treturn c.files, nil\n}\n\nfunc (c *fakeController) GetFile(filename string) (gomaasapi.File, error) {\n\tc.getFileFilename = filename\n\tif c.filesError != nil {\n\t\treturn nil, c.filesError\n\t}\n\t\/\/ Try to find the file by name (needed for testing RemoveAll)\n\tfor _, file := range c.files {\n\t\tif file.Filename() == filename {\n\t\t\treturn file, nil\n\t\t}\n\t}\n\t\/\/ The test forgot to set up matching files!\n\treturn nil, errors.Errorf(\"no file named %v found - did you set up your test correctly?\", filename)\n}\n\nfunc (c *fakeController) AddFile(args gomaasapi.AddFileArgs) error {\n\tc.addFileArgs = args\n\treturn c.filesError\n}\n\nfunc (c *fakeController) ReleaseMachines(args gomaasapi.ReleaseMachinesArgs) error {\n\tc.releaseMachinesArgs = append(c.releaseMachinesArgs, args)\n\tif len(c.releaseMachinesErrors) == 0 {\n\t\treturn nil\n\t}\n\terr := c.releaseMachinesErrors[0]\n\tc.releaseMachinesErrors = c.releaseMachinesErrors[1:]\n\treturn err\n}\n\ntype fakeBootResource struct {\n\tgomaasapi.BootResource\n\tname string\n\tarchitecture string\n}\n\nfunc (r *fakeBootResource) Name() string {\n\treturn r.name\n}\n\nfunc (r *fakeBootResource) Architecture() string {\n\treturn r.architecture\n}\n\ntype fakeMachine struct {\n\tgomaasapi.Machine\n\tzoneName string\n\thostname string\n\tsystemID string\n\tipAddresses []string\n\tstatusName string\n\tstatusMessage string\n\tcpuCount int\n\tmemory int\n\tarchitecture string\n\tinterfaceSet []gomaasapi.Interface\n}\n\nfunc (m *fakeMachine) CPUCount() int {\n\treturn m.cpuCount\n}\n\nfunc (m *fakeMachine) Memory() int {\n\treturn m.memory\n}\n\nfunc (m *fakeMachine) Architecture() string {\n\treturn m.architecture\n}\n\nfunc (m *fakeMachine) SystemID() string {\n\treturn m.systemID\n}\n\nfunc (m *fakeMachine) Hostname() string {\n\treturn m.hostname\n}\n\nfunc (m *fakeMachine) IPAddresses() []string {\n\treturn m.ipAddresses\n}\n\nfunc (m *fakeMachine) StatusName() string {\n\treturn m.statusName\n}\n\nfunc (m *fakeMachine) StatusMessage() string {\n\treturn m.statusMessage\n}\n\nfunc (m *fakeMachine) Zone() gomaasapi.Zone {\n\treturn fakeZone{name: m.zoneName}\n}\n\nfunc (m *fakeMachine) InterfaceSet() []gomaasapi.Interface {\n\treturn m.interfaceSet\n}\n\nfunc (m *fakeMachine) Start(args gomaasapi.StartArgs) error {\n\treturn nil\n}\n\ntype fakeZone struct {\n\tgomaasapi.Zone\n\tname string\n}\n\nfunc (z fakeZone) Name() string {\n\treturn z.name\n}\n\ntype fakeSpace struct {\n\tgomaasapi.Space\n\tname string\n\tid int\n\tsubnets []gomaasapi.Subnet\n}\n\nfunc (s fakeSpace) Name() string {\n\treturn s.name\n}\n\nfunc (s fakeSpace) ID() int {\n\treturn s.id\n}\n\nfunc (s fakeSpace) Subnets() []gomaasapi.Subnet {\n\treturn s.subnets\n}\n\ntype fakeSubnet struct {\n\tgomaasapi.Subnet\n\tid int\n\tspace string\n\tvlan gomaasapi.VLAN\n\tgateway string\n\tcidr string\n\tdnsServers []string\n}\n\nfunc (s fakeSubnet) ID() int {\n\treturn s.id\n}\n\nfunc (s fakeSubnet) Space() string {\n\treturn s.space\n}\n\nfunc (s fakeSubnet) VLAN() gomaasapi.VLAN {\n\treturn s.vlan\n}\n\nfunc (s fakeSubnet) Gateway() string {\n\treturn s.gateway\n}\n\nfunc (s fakeSubnet) CIDR() string {\n\treturn s.cidr\n}\n\nfunc (s fakeSubnet) DNSServers() []string {\n\treturn s.dnsServers\n}\n\ntype fakeVLAN struct {\n\tgomaasapi.VLAN\n\tid int\n\tvid int\n\tmtu int\n}\n\nfunc (v fakeVLAN) ID() int {\n\treturn v.id\n}\n\nfunc (v fakeVLAN) VID() int {\n\treturn v.vid\n}\n\nfunc (v fakeVLAN) MTU() int {\n\treturn v.mtu\n}\n\ntype fakeInterface struct {\n\tgomaasapi.Interface\n\tid int\n\tname string\n\tparents []string\n\tchildren []string\n\ttype_ string\n\tenabled bool\n\tvlan gomaasapi.VLAN\n\tlinks []gomaasapi.Link\n\tmacAddress string\n}\n\nfunc (v *fakeInterface) ID() int {\n\treturn v.id\n}\n\nfunc (v *fakeInterface) Name() string {\n\treturn v.name\n}\n\nfunc (v *fakeInterface) Parents() []string {\n\treturn v.parents\n}\n\nfunc (v *fakeInterface) Children() []string {\n\treturn v.children\n}\n\nfunc (v *fakeInterface) Type() string {\n\treturn v.type_\n}\n\nfunc (v *fakeInterface) Enabled() bool {\n\treturn v.enabled\n}\n\nfunc (v *fakeInterface) VLAN() gomaasapi.VLAN {\n\treturn v.vlan\n}\n\nfunc (v *fakeInterface) Links() []gomaasapi.Link {\n\treturn v.links\n}\n\nfunc (v *fakeInterface) MACAddress() string {\n\treturn v.macAddress\n}\n\ntype fakeLink struct {\n\tgomaasapi.Link\n\tid int\n\tmode string\n\tsubnet gomaasapi.Subnet\n\tipAddress string\n}\n\nfunc (l *fakeLink) ID() int {\n\treturn l.id\n}\n\nfunc (l *fakeLink) Mode() string {\n\treturn l.mode\n}\n\nfunc (l *fakeLink) Subnet() gomaasapi.Subnet {\n\treturn l.subnet\n}\n\nfunc (l *fakeLink) IPAddress() string {\n\treturn l.ipAddress\n}\n\ntype fakeFile struct {\n\tgomaasapi.File\n\tname string\n\turl string\n\tcontents []byte\n\tdeleted bool\n\terror error\n}\n\nfunc (f *fakeFile) Filename() string {\n\treturn f.name\n}\n\nfunc (f *fakeFile) AnonymousURL() string {\n\treturn f.url\n}\n\nfunc (f *fakeFile) Delete() error {\n\tf.deleted = true\n\treturn f.error\n}\n\nfunc (f *fakeFile) ReadAll() ([]byte, error) {\n\tif f.error != nil {\n\t\treturn nil, f.error\n\t}\n\treturn f.contents, nil\n}\n\ntype fakeBlockDevice struct {\n\tgomaasapi.BlockDevice\n\tname string\n\tsize int\n}\n\nfunc (bd fakeBlockDevice) Name() string {\n\treturn bd.name\n}\n\nfunc (bd fakeBlockDevice) Size() int {\n\treturn bd.size\n}\n<|endoftext|>"} {"text":"<commit_before>676f6720-2e55-11e5-9284-b827eb9e62be<commit_msg>67747e9a-2e55-11e5-9284-b827eb9e62be<commit_after>67747e9a-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>fa364f14-2e56-11e5-9284-b827eb9e62be<commit_msg>fa3b6c38-2e56-11e5-9284-b827eb9e62be<commit_after>fa3b6c38-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>e5b41cc6-2e54-11e5-9284-b827eb9e62be<commit_msg>e5b93ecc-2e54-11e5-9284-b827eb9e62be<commit_after>e5b93ecc-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>d2dbd6ce-2e55-11e5-9284-b827eb9e62be<commit_msg>d2e0f3c0-2e55-11e5-9284-b827eb9e62be<commit_after>d2e0f3c0-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>4976c10e-2e56-11e5-9284-b827eb9e62be<commit_msg>497c1e7e-2e56-11e5-9284-b827eb9e62be<commit_after>497c1e7e-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>b74e29e4-2e54-11e5-9284-b827eb9e62be<commit_msg>b75359f0-2e54-11e5-9284-b827eb9e62be<commit_after>b75359f0-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage docker\n\nimport (\n\t\"errors\"\n\t\"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/globocom\/tsuru\/action\"\n\t\"github.com\/globocom\/tsuru\/app\"\n\t\"github.com\/globocom\/tsuru\/db\"\n\t\"github.com\/globocom\/tsuru\/log\"\n\t\"github.com\/globocom\/tsuru\/provision\"\n\t\"github.com\/globocom\/tsuru\/queue\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\t\"net\"\n\t\"strings\"\n)\n\nvar createContainer = action.Action{\n\tName: \"create-container\",\n\tForward: func(ctx action.FWContext) (action.Result, error) {\n\t\tapp := ctx.Params[0].(provision.App)\n\t\timageId := ctx.Params[1].(string)\n\t\tcmds := ctx.Params[2].([]string)\n\t\tlog.Debugf(\"create container for app %s, based on image %s, with cmds %s\", app.GetName(), imageId, cmds)\n\t\tcont, err := newContainer(app, imageId, cmds)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"error on create container for app %s - %s\", app.GetName(), err)\n\t\t\treturn nil, err\n\t\t}\n\t\treturn cont, nil\n\t},\n\tBackward: func(ctx action.BWContext) {\n\t\tc := ctx.FWResult.(container)\n\t\tdockerCluster().RemoveContainer(docker.RemoveContainerOptions{ID: c.ID})\n\t\tcoll := collection()\n\t\tdefer coll.Close()\n\t\tcoll.Remove(bson.M{\"id\": c.ID})\n\t},\n}\n\nvar setNetworkInfo = action.Action{\n\tName: \"set-network-info\",\n\tForward: func(ctx action.FWContext) (action.Result, error) {\n\t\tc := ctx.Previous.(container)\n\t\tip, hostPort, err := c.networkInfo()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tc.IP = ip\n\t\tc.HostPort = hostPort\n\t\treturn c, nil\n\t},\n}\n\nvar addRoute = action.Action{\n\tName: \"add-route\",\n\tForward: func(ctx action.FWContext) (action.Result, error) {\n\t\tc := ctx.Previous.(container)\n\t\tr, err := getRouter()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\terr = r.AddRoute(c.AppName, c.getAddress())\n\t\treturn c, err\n\t},\n\tBackward: func(ctx action.BWContext) {\n\t},\n}\n\nvar startContainer = action.Action{\n\tName: \"start-container\",\n\tForward: func(ctx action.FWContext) (action.Result, error) {\n\t\tc := ctx.Previous.(container)\n\t\tlog.Debugf(\"starting container %s\", c.ID)\n\t\terr := c.start()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"error on start container %s - %s\", c.ID, err)\n\t\t\treturn nil, err\n\t\t}\n\t\treturn c, nil\n\t},\n\tBackward: func(ctx action.BWContext) {\n\t\tc := ctx.FWResult.(container)\n\t\terr := dockerCluster().StopContainer(c.ID, 10)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to stop the container %q: %s\", c.ID, err)\n\t\t}\n\t},\n}\n\nvar injectEnvirons = action.Action{\n\tName: \"inject-environs\",\n\tForward: func(ctx action.FWContext) (action.Result, error) {\n\t\tapp, ok := ctx.Params[0].(provision.App)\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"First parameter must be a provision.App.\")\n\t\t}\n\t\tgo injectEnvsAndRestart(app)\n\t\treturn nil, nil\n\t},\n\tBackward: func(ctx action.BWContext) {\n\t},\n}\n\nvar saveUnits = action.Action{\n\tName: \"save-units\",\n\tForward: func(ctx action.FWContext) (action.Result, error) {\n\t\ta, ok := ctx.Params[0].(*app.App)\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"First parameter must be a *app.App.\")\n\t\t}\n\t\ta, err := app.GetByName(a.Name)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcontainers, err := listAppContainers(a.GetName())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, c := range containers {\n\t\t\tvar status string\n\t\t\taddr := strings.Replace(c.getAddress(), \"http:\/\/\", \"\", 1)\n\t\t\tconn, err := net.Dial(\"tcp\", addr)\n\t\t\tif err != nil {\n\t\t\t\tstatus = provision.StatusUnreachable.String()\n\t\t\t} else {\n\t\t\t\tconn.Close()\n\t\t\t\tstatus = provision.StatusStarted.String()\n\t\t\t}\n\t\t\tu := app.Unit{\n\t\t\t\tName: c.ID,\n\t\t\t\tType: c.Type,\n\t\t\t\tIp: c.HostAddr,\n\t\t\t\tState: status,\n\t\t\t}\n\t\t\ta.AddUnit(&u)\n\t\t}\n\t\tconn, err := db.Conn()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer conn.Close()\n\t\tconn.Apps().Update(bson.M{\"name\": a.Name}, a)\n\t\treturn nil, nil\n\t},\n\tBackward: func(ctx action.BWContext) {\n\t},\n}\n\nvar bindService = action.Action{\n\tName: \"bind-service\",\n\tForward: func(ctx action.FWContext) (action.Result, error) {\n\t\ta, ok := ctx.Params[0].(provision.App)\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"First parameter must be a provision.App.\")\n\t\t}\n\t\tfor _, u := range a.ProvisionedUnits() {\n\t\t\tmsg := queue.Message{\n\t\t\t\tAction: app.BindService,\n\t\t\t\tArgs: []string{a.GetName(), u.GetName()},\n\t\t\t}\n\t\t\tgo app.Enqueue(msg)\n\t\t}\n\t\treturn nil, nil\n\t},\n\tBackward: func(ctx action.BWContext) {\n\t},\n}\n<commit_msg>docker: removing container from name in createContainer backward.<commit_after>\/\/ Copyright 2014 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage docker\n\nimport (\n\t\"errors\"\n\t\"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/globocom\/tsuru\/action\"\n\t\"github.com\/globocom\/tsuru\/app\"\n\t\"github.com\/globocom\/tsuru\/db\"\n\t\"github.com\/globocom\/tsuru\/log\"\n\t\"github.com\/globocom\/tsuru\/provision\"\n\t\"github.com\/globocom\/tsuru\/queue\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\t\"net\"\n\t\"strings\"\n)\n\nvar createContainer = action.Action{\n\tName: \"create-container\",\n\tForward: func(ctx action.FWContext) (action.Result, error) {\n\t\tapp := ctx.Params[0].(provision.App)\n\t\timageId := ctx.Params[1].(string)\n\t\tcmds := ctx.Params[2].([]string)\n\t\tlog.Debugf(\"create container for app %s, based on image %s, with cmds %s\", app.GetName(), imageId, cmds)\n\t\tcont, err := newContainer(app, imageId, cmds)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"error on create container for app %s - %s\", app.GetName(), err)\n\t\t\treturn nil, err\n\t\t}\n\t\treturn cont, nil\n\t},\n\tBackward: func(ctx action.BWContext) {\n\t\tc := ctx.FWResult.(container)\n\t\tdockerCluster().RemoveContainer(docker.RemoveContainerOptions{ID: c.ID})\n\t\tcoll := collection()\n\t\tdefer coll.Close()\n\t\tcoll.Remove(bson.M{\"name\": c.Name})\n\t},\n}\n\nvar setNetworkInfo = action.Action{\n\tName: \"set-network-info\",\n\tForward: func(ctx action.FWContext) (action.Result, error) {\n\t\tc := ctx.Previous.(container)\n\t\tip, hostPort, err := c.networkInfo()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tc.IP = ip\n\t\tc.HostPort = hostPort\n\t\treturn c, nil\n\t},\n}\n\nvar addRoute = action.Action{\n\tName: \"add-route\",\n\tForward: func(ctx action.FWContext) (action.Result, error) {\n\t\tc := ctx.Previous.(container)\n\t\tr, err := getRouter()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\terr = r.AddRoute(c.AppName, c.getAddress())\n\t\treturn c, err\n\t},\n\tBackward: func(ctx action.BWContext) {\n\t},\n}\n\nvar startContainer = action.Action{\n\tName: \"start-container\",\n\tForward: func(ctx action.FWContext) (action.Result, error) {\n\t\tc := ctx.Previous.(container)\n\t\tlog.Debugf(\"starting container %s\", c.ID)\n\t\terr := c.start()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"error on start container %s - %s\", c.ID, err)\n\t\t\treturn nil, err\n\t\t}\n\t\treturn c, nil\n\t},\n\tBackward: func(ctx action.BWContext) {\n\t\tc := ctx.FWResult.(container)\n\t\terr := dockerCluster().StopContainer(c.ID, 10)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to stop the container %q: %s\", c.ID, err)\n\t\t}\n\t},\n}\n\nvar injectEnvirons = action.Action{\n\tName: \"inject-environs\",\n\tForward: func(ctx action.FWContext) (action.Result, error) {\n\t\tapp, ok := ctx.Params[0].(provision.App)\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"First parameter must be a provision.App.\")\n\t\t}\n\t\tgo injectEnvsAndRestart(app)\n\t\treturn nil, nil\n\t},\n\tBackward: func(ctx action.BWContext) {\n\t},\n}\n\nvar saveUnits = action.Action{\n\tName: \"save-units\",\n\tForward: func(ctx action.FWContext) (action.Result, error) {\n\t\ta, ok := ctx.Params[0].(*app.App)\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"First parameter must be a *app.App.\")\n\t\t}\n\t\ta, err := app.GetByName(a.Name)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcontainers, err := listAppContainers(a.GetName())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, c := range containers {\n\t\t\tvar status string\n\t\t\taddr := strings.Replace(c.getAddress(), \"http:\/\/\", \"\", 1)\n\t\t\tconn, err := net.Dial(\"tcp\", addr)\n\t\t\tif err != nil {\n\t\t\t\tstatus = provision.StatusUnreachable.String()\n\t\t\t} else {\n\t\t\t\tconn.Close()\n\t\t\t\tstatus = provision.StatusStarted.String()\n\t\t\t}\n\t\t\tu := app.Unit{\n\t\t\t\tName: c.ID,\n\t\t\t\tType: c.Type,\n\t\t\t\tIp: c.HostAddr,\n\t\t\t\tState: status,\n\t\t\t}\n\t\t\ta.AddUnit(&u)\n\t\t}\n\t\tconn, err := db.Conn()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer conn.Close()\n\t\tconn.Apps().Update(bson.M{\"name\": a.Name}, a)\n\t\treturn nil, nil\n\t},\n\tBackward: func(ctx action.BWContext) {\n\t},\n}\n\nvar bindService = action.Action{\n\tName: \"bind-service\",\n\tForward: func(ctx action.FWContext) (action.Result, error) {\n\t\ta, ok := ctx.Params[0].(provision.App)\n\t\tif !ok {\n\t\t\treturn nil, errors.New(\"First parameter must be a provision.App.\")\n\t\t}\n\t\tfor _, u := range a.ProvisionedUnits() {\n\t\t\tmsg := queue.Message{\n\t\t\t\tAction: app.BindService,\n\t\t\t\tArgs: []string{a.GetName(), u.GetName()},\n\t\t\t}\n\t\t\tgo app.Enqueue(msg)\n\t\t}\n\t\treturn nil, nil\n\t},\n\tBackward: func(ctx action.BWContext) {\n\t},\n}\n<|endoftext|>"} {"text":"<commit_before>14776b48-2e56-11e5-9284-b827eb9e62be<commit_msg>147c9c3a-2e56-11e5-9284-b827eb9e62be<commit_after>147c9c3a-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>968af092-2e55-11e5-9284-b827eb9e62be<commit_msg>96900bb8-2e55-11e5-9284-b827eb9e62be<commit_after>96900bb8-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>c00e044e-2e56-11e5-9284-b827eb9e62be<commit_msg>c0132956-2e56-11e5-9284-b827eb9e62be<commit_after>c0132956-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>30233196-2e57-11e5-9284-b827eb9e62be<commit_msg>3028581a-2e57-11e5-9284-b827eb9e62be<commit_after>3028581a-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>0d9f8a36-2e55-11e5-9284-b827eb9e62be<commit_msg>0da4de1e-2e55-11e5-9284-b827eb9e62be<commit_after>0da4de1e-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>4446aa96-2e56-11e5-9284-b827eb9e62be<commit_msg>444bc314-2e56-11e5-9284-b827eb9e62be<commit_after>444bc314-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>de7e08de-2e56-11e5-9284-b827eb9e62be<commit_msg>de832616-2e56-11e5-9284-b827eb9e62be<commit_after>de832616-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>package shadowsocks\n\nimport (\n\t\"context\"\n\n\t\"v2ray.com\/core\"\n\t\"v2ray.com\/core\/common\"\n\t\"v2ray.com\/core\/common\/buf\"\n\t\"v2ray.com\/core\/common\/net\"\n\t\"v2ray.com\/core\/common\/protocol\"\n\t\"v2ray.com\/core\/common\/retry\"\n\t\"v2ray.com\/core\/common\/signal\"\n\t\"v2ray.com\/core\/proxy\"\n\t\"v2ray.com\/core\/transport\/internet\"\n\t\"v2ray.com\/core\/transport\/ray\"\n)\n\n\/\/ Client is a inbound handler for Shadowsocks protocol\ntype Client struct {\n\tserverPicker protocol.ServerPicker\n\tv *core.Instance\n}\n\n\/\/ NewClient create a new Shadowsocks client.\nfunc NewClient(ctx context.Context, config *ClientConfig) (*Client, error) {\n\tserverList := protocol.NewServerList()\n\tfor _, rec := range config.Server {\n\t\tserverList.AddServer(protocol.NewServerSpecFromPB(*rec))\n\t}\n\tif serverList.Size() == 0 {\n\t\treturn nil, newError(\"0 server\")\n\t}\n\tclient := &Client{\n\t\tserverPicker: protocol.NewRoundRobinServerPicker(serverList),\n\t\tv: core.MustFromContext(ctx),\n\t}\n\treturn client, nil\n}\n\n\/\/ Process implements OutboundHandler.Process().\nfunc (v *Client) Process(ctx context.Context, outboundRay ray.OutboundRay, dialer proxy.Dialer) error {\n\tdestination, ok := proxy.TargetFromContext(ctx)\n\tif !ok {\n\t\treturn newError(\"target not specified\")\n\t}\n\tnetwork := destination.Network\n\n\tvar server *protocol.ServerSpec\n\tvar conn internet.Connection\n\n\terr := retry.ExponentialBackoff(5, 100).On(func() error {\n\t\tserver = v.serverPicker.PickServer()\n\t\tdest := server.Destination()\n\t\tdest.Network = network\n\t\trawConn, err := dialer.Dial(ctx, dest)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconn = rawConn\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn newError(\"failed to find an available destination\").AtWarning().Base(err)\n\t}\n\tnewError(\"tunneling request to \", destination, \" via \", server.Destination()).WriteToLog()\n\n\tdefer conn.Close()\n\n\trequest := &protocol.RequestHeader{\n\t\tVersion: Version,\n\t\tAddress: destination.Address,\n\t\tPort: destination.Port,\n\t}\n\tif destination.Network == net.Network_TCP {\n\t\trequest.Command = protocol.RequestCommandTCP\n\t} else {\n\t\trequest.Command = protocol.RequestCommandUDP\n\t}\n\n\tuser := server.PickUser()\n\trawAccount, err := user.GetTypedAccount()\n\tif err != nil {\n\t\treturn newError(\"failed to get a valid user account\").AtWarning().Base(err)\n\t}\n\taccount := rawAccount.(*MemoryAccount)\n\trequest.User = user\n\n\tif account.OneTimeAuth == Account_Auto || account.OneTimeAuth == Account_Enabled {\n\t\trequest.Option |= RequestOptionOneTimeAuth\n\t}\n\n\tsessionPolicy := v.v.PolicyManager().ForLevel(user.Level)\n\tctx, cancel := context.WithCancel(ctx)\n\ttimer := signal.CancelAfterInactivity(ctx, cancel, sessionPolicy.Timeouts.ConnectionIdle)\n\n\tif request.Command == protocol.RequestCommandTCP {\n\t\tbufferedWriter := buf.NewBufferedWriter(buf.NewWriter(conn))\n\t\tbodyWriter, err := WriteTCPRequest(request, bufferedWriter)\n\t\tif err != nil {\n\t\t\treturn newError(\"failed to write request\").Base(err)\n\t\t}\n\n\t\tif err := bufferedWriter.SetBuffered(false); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trequestDone := signal.ExecuteAsync(func() error {\n\t\t\tdefer timer.SetTimeout(sessionPolicy.Timeouts.DownlinkOnly)\n\t\t\treturn buf.Copy(outboundRay.OutboundInput(), bodyWriter, buf.UpdateActivity(timer))\n\t\t})\n\n\t\tresponseDone := signal.ExecuteAsync(func() error {\n\t\t\tdefer timer.SetTimeout(sessionPolicy.Timeouts.UplinkOnly)\n\n\t\t\tresponseReader, err := ReadTCPResponse(user, conn)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn buf.Copy(responseReader, outboundRay.OutboundOutput(), buf.UpdateActivity(timer))\n\t\t})\n\n\t\tif err := signal.ErrorOrFinish2(ctx, requestDone, responseDone); err != nil {\n\t\t\treturn newError(\"connection ends\").Base(err)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tif request.Command == protocol.RequestCommandUDP {\n\n\t\twriter := buf.NewSequentialWriter(&UDPWriter{\n\t\t\tWriter: conn,\n\t\t\tRequest: request,\n\t\t})\n\n\t\trequestDone := signal.ExecuteAsync(func() error {\n\t\t\tdefer timer.SetTimeout(sessionPolicy.Timeouts.DownlinkOnly)\n\n\t\t\tif err := buf.Copy(outboundRay.OutboundInput(), writer, buf.UpdateActivity(timer)); err != nil {\n\t\t\t\treturn newError(\"failed to transport all UDP request\").Base(err)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\tresponseDone := signal.ExecuteAsync(func() error {\n\t\t\tdefer timer.SetTimeout(sessionPolicy.Timeouts.UplinkOnly)\n\n\t\t\treader := &UDPReader{\n\t\t\t\tReader: conn,\n\t\t\t\tUser: user,\n\t\t\t}\n\n\t\t\tif err := buf.Copy(reader, outboundRay.OutboundOutput(), buf.UpdateActivity(timer)); err != nil {\n\t\t\t\treturn newError(\"failed to transport all UDP response\").Base(err)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\tif err := signal.ErrorOrFinish2(ctx, requestDone, responseDone); err != nil {\n\t\t\treturn newError(\"connection ends\").Base(err)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn nil\n}\n\nfunc init() {\n\tcommon.Must(common.RegisterConfig((*ClientConfig)(nil), func(ctx context.Context, config interface{}) (interface{}, error) {\n\t\treturn NewClient(ctx, config.(*ClientConfig))\n\t}))\n}\n<commit_msg>trace context<commit_after>package shadowsocks\n\nimport (\n\t\"context\"\n\n\t\"v2ray.com\/core\"\n\t\"v2ray.com\/core\/common\"\n\t\"v2ray.com\/core\/common\/buf\"\n\t\"v2ray.com\/core\/common\/net\"\n\t\"v2ray.com\/core\/common\/protocol\"\n\t\"v2ray.com\/core\/common\/retry\"\n\t\"v2ray.com\/core\/common\/signal\"\n\t\"v2ray.com\/core\/proxy\"\n\t\"v2ray.com\/core\/transport\/internet\"\n\t\"v2ray.com\/core\/transport\/ray\"\n)\n\n\/\/ Client is a inbound handler for Shadowsocks protocol\ntype Client struct {\n\tserverPicker protocol.ServerPicker\n\tv *core.Instance\n}\n\n\/\/ NewClient create a new Shadowsocks client.\nfunc NewClient(ctx context.Context, config *ClientConfig) (*Client, error) {\n\tserverList := protocol.NewServerList()\n\tfor _, rec := range config.Server {\n\t\tserverList.AddServer(protocol.NewServerSpecFromPB(*rec))\n\t}\n\tif serverList.Size() == 0 {\n\t\treturn nil, newError(\"0 server\")\n\t}\n\tclient := &Client{\n\t\tserverPicker: protocol.NewRoundRobinServerPicker(serverList),\n\t\tv: core.MustFromContext(ctx),\n\t}\n\treturn client, nil\n}\n\n\/\/ Process implements OutboundHandler.Process().\nfunc (v *Client) Process(ctx context.Context, outboundRay ray.OutboundRay, dialer proxy.Dialer) error {\n\tdestination, ok := proxy.TargetFromContext(ctx)\n\tif !ok {\n\t\treturn newError(\"target not specified\")\n\t}\n\tnetwork := destination.Network\n\n\tvar server *protocol.ServerSpec\n\tvar conn internet.Connection\n\n\terr := retry.ExponentialBackoff(5, 100).On(func() error {\n\t\tserver = v.serverPicker.PickServer()\n\t\tdest := server.Destination()\n\t\tdest.Network = network\n\t\trawConn, err := dialer.Dial(ctx, dest)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconn = rawConn\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn newError(\"failed to find an available destination\").AtWarning().Base(err)\n\t}\n\tnewError(\"tunneling request to \", destination, \" via \", server.Destination()).WithContext(ctx).WriteToLog()\n\n\tdefer conn.Close()\n\n\trequest := &protocol.RequestHeader{\n\t\tVersion: Version,\n\t\tAddress: destination.Address,\n\t\tPort: destination.Port,\n\t}\n\tif destination.Network == net.Network_TCP {\n\t\trequest.Command = protocol.RequestCommandTCP\n\t} else {\n\t\trequest.Command = protocol.RequestCommandUDP\n\t}\n\n\tuser := server.PickUser()\n\trawAccount, err := user.GetTypedAccount()\n\tif err != nil {\n\t\treturn newError(\"failed to get a valid user account\").AtWarning().Base(err)\n\t}\n\taccount := rawAccount.(*MemoryAccount)\n\trequest.User = user\n\n\tif account.OneTimeAuth == Account_Auto || account.OneTimeAuth == Account_Enabled {\n\t\trequest.Option |= RequestOptionOneTimeAuth\n\t}\n\n\tsessionPolicy := v.v.PolicyManager().ForLevel(user.Level)\n\tctx, cancel := context.WithCancel(ctx)\n\ttimer := signal.CancelAfterInactivity(ctx, cancel, sessionPolicy.Timeouts.ConnectionIdle)\n\n\tif request.Command == protocol.RequestCommandTCP {\n\t\tbufferedWriter := buf.NewBufferedWriter(buf.NewWriter(conn))\n\t\tbodyWriter, err := WriteTCPRequest(request, bufferedWriter)\n\t\tif err != nil {\n\t\t\treturn newError(\"failed to write request\").Base(err)\n\t\t}\n\n\t\tif err := bufferedWriter.SetBuffered(false); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trequestDone := signal.ExecuteAsync(func() error {\n\t\t\tdefer timer.SetTimeout(sessionPolicy.Timeouts.DownlinkOnly)\n\t\t\treturn buf.Copy(outboundRay.OutboundInput(), bodyWriter, buf.UpdateActivity(timer))\n\t\t})\n\n\t\tresponseDone := signal.ExecuteAsync(func() error {\n\t\t\tdefer timer.SetTimeout(sessionPolicy.Timeouts.UplinkOnly)\n\n\t\t\tresponseReader, err := ReadTCPResponse(user, conn)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn buf.Copy(responseReader, outboundRay.OutboundOutput(), buf.UpdateActivity(timer))\n\t\t})\n\n\t\tif err := signal.ErrorOrFinish2(ctx, requestDone, responseDone); err != nil {\n\t\t\treturn newError(\"connection ends\").Base(err)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tif request.Command == protocol.RequestCommandUDP {\n\n\t\twriter := buf.NewSequentialWriter(&UDPWriter{\n\t\t\tWriter: conn,\n\t\t\tRequest: request,\n\t\t})\n\n\t\trequestDone := signal.ExecuteAsync(func() error {\n\t\t\tdefer timer.SetTimeout(sessionPolicy.Timeouts.DownlinkOnly)\n\n\t\t\tif err := buf.Copy(outboundRay.OutboundInput(), writer, buf.UpdateActivity(timer)); err != nil {\n\t\t\t\treturn newError(\"failed to transport all UDP request\").Base(err)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\tresponseDone := signal.ExecuteAsync(func() error {\n\t\t\tdefer timer.SetTimeout(sessionPolicy.Timeouts.UplinkOnly)\n\n\t\t\treader := &UDPReader{\n\t\t\t\tReader: conn,\n\t\t\t\tUser: user,\n\t\t\t}\n\n\t\t\tif err := buf.Copy(reader, outboundRay.OutboundOutput(), buf.UpdateActivity(timer)); err != nil {\n\t\t\t\treturn newError(\"failed to transport all UDP response\").Base(err)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\tif err := signal.ErrorOrFinish2(ctx, requestDone, responseDone); err != nil {\n\t\t\treturn newError(\"connection ends\").Base(err)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn nil\n}\n\nfunc init() {\n\tcommon.Must(common.RegisterConfig((*ClientConfig)(nil), func(ctx context.Context, config interface{}) (interface{}, error) {\n\t\treturn NewClient(ctx, config.(*ClientConfig))\n\t}))\n}\n<|endoftext|>"} {"text":"<commit_before>78eb5b70-2e56-11e5-9284-b827eb9e62be<commit_msg>78f083b6-2e56-11e5-9284-b827eb9e62be<commit_after>78f083b6-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>30e1b0e4-2e57-11e5-9284-b827eb9e62be<commit_msg>30e6cc1e-2e57-11e5-9284-b827eb9e62be<commit_after>30e6cc1e-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>a97068bc-2e56-11e5-9284-b827eb9e62be<commit_msg>a975834c-2e56-11e5-9284-b827eb9e62be<commit_after>a975834c-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>892c29de-2e55-11e5-9284-b827eb9e62be<commit_msg>893144b4-2e55-11e5-9284-b827eb9e62be<commit_after>893144b4-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>package shell\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"reflect\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/gruntwork-io\/terragrunt\/errors\"\n\t\"github.com\/gruntwork-io\/terragrunt\/options\"\n)\n\n\/\/ Run the specified shell command with the specified arguments. Connect the command's stdin, stdout, and stderr to\n\/\/ the currently running app.\nfunc RunShellCommand(terragruntOptions *options.TerragruntOptions, command string, args ...string) error {\n\tterragruntOptions.Logger.Printf(\"Running command: %s %s\", command, strings.Join(args, \" \"))\n\n\tcmd := exec.Command(command, args...)\n\n\t\/\/ TODO: consider adding prefix from terragruntOptions logger to stdout and stderr\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\t\/\/ Terragrunt can run some commands (such as terraform remote config) before running the actual terraform\n\t\/\/ command requested by the user. The output of these other commands should not end up on stdout as this\n\t\/\/ breaks scripts relying on terraform's output.\n\tif !reflect.DeepEqual(terragruntOptions.TerraformCliArgs, args) {\n\t\tcmd.Stdout = os.Stderr\n\t}\n\n\tcmd.Dir = terragruntOptions.WorkingDir\n\n\tcmdChannel := make(chan error)\n\tsignalChannel := NewSignalsForwarder(forwardSignals, cmd, terragruntOptions.Logger, cmdChannel)\n\tdefer signalChannel.Close()\n\n\terr := cmd.Run()\n\tcmdChannel <- err\n\n\treturn errors.WithStackTrace(err)\n}\n\n\n\/\/ Return the exit code of a command. If the error is not an exec.ExitError type,\n\/\/ the error is returned.\nfunc GetExitCode(err error) (int, error) {\n\tvar retCode int\n\tif exiterr, ok := errors.Unwrap(err).(*exec.ExitError); ok {\n\tstatus := exiterr.Sys().(syscall.WaitStatus)\n\tretCode = status.ExitStatus()\n\treturn retCode, nil\n\t\t}\n\treturn retCode, err\n}\n\ntype SignalsForwarder chan os.Signal\n\n\/\/ Fowards signals to a command, waiting for the command to finish.\nfunc NewSignalsForwarder(signals []os.Signal, c *exec.Cmd, logger *log.Logger, cmdChannel chan error) SignalsForwarder {\n\tsignalChannel := make(chan os.Signal, 1)\n\tsignal.Notify(signalChannel, signals...)\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase s := <-signalChannel:\n\t\t\t\tlogger.Printf(\"Forward signal %s to terraform.\", s.String())\n\t\t\t\terr := c.Process.Signal(s)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.Printf(\"Error forwarding signal: %v\", err)\n\t\t\t\t}\n\t\t\tcase <- cmdChannel:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn signalChannel\n}\n\nfunc (signalChannel *SignalsForwarder) Close() error {\n\tsignal.Stop(*signalChannel)\n\t*signalChannel <- nil\n\tclose(*signalChannel)\n\treturn nil\n}\n<commit_msg>cleanup GetExitCode<commit_after>package shell\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"reflect\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/gruntwork-io\/terragrunt\/errors\"\n\t\"github.com\/gruntwork-io\/terragrunt\/options\"\n)\n\n\/\/ Run the specified shell command with the specified arguments. Connect the command's stdin, stdout, and stderr to\n\/\/ the currently running app.\nfunc RunShellCommand(terragruntOptions *options.TerragruntOptions, command string, args ...string) error {\n\tterragruntOptions.Logger.Printf(\"Running command: %s %s\", command, strings.Join(args, \" \"))\n\n\tcmd := exec.Command(command, args...)\n\n\t\/\/ TODO: consider adding prefix from terragruntOptions logger to stdout and stderr\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\t\/\/ Terragrunt can run some commands (such as terraform remote config) before running the actual terraform\n\t\/\/ command requested by the user. The output of these other commands should not end up on stdout as this\n\t\/\/ breaks scripts relying on terraform's output.\n\tif !reflect.DeepEqual(terragruntOptions.TerraformCliArgs, args) {\n\t\tcmd.Stdout = os.Stderr\n\t}\n\n\tcmd.Dir = terragruntOptions.WorkingDir\n\n\tcmdChannel := make(chan error)\n\tsignalChannel := NewSignalsForwarder(forwardSignals, cmd, terragruntOptions.Logger, cmdChannel)\n\tdefer signalChannel.Close()\n\n\terr := cmd.Run()\n\tcmdChannel <- err\n\n\treturn errors.WithStackTrace(err)\n}\n\n\n\/\/ Return the exit code of a command. If the error is not an exec.ExitError type,\n\/\/ the error is returned.\nfunc GetExitCode(err error) (int, error) {\n\tif exiterr, ok := errors.Unwrap(err).(*exec.ExitError); ok {\n\t\tstatus := exiterr.Sys().(syscall.WaitStatus)\n\t\treturn status.ExitStatus(), nil\n\t}\n\treturn 0, err\n}\n\ntype SignalsForwarder chan os.Signal\n\n\/\/ Fowards signals to a command, waiting for the command to finish.\nfunc NewSignalsForwarder(signals []os.Signal, c *exec.Cmd, logger *log.Logger, cmdChannel chan error) SignalsForwarder {\n\tsignalChannel := make(chan os.Signal, 1)\n\tsignal.Notify(signalChannel, signals...)\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase s := <-signalChannel:\n\t\t\t\tlogger.Printf(\"Forward signal %s to terraform.\", s.String())\n\t\t\t\terr := c.Process.Signal(s)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.Printf(\"Error forwarding signal: %v\", err)\n\t\t\t\t}\n\t\t\tcase <- cmdChannel:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn signalChannel\n}\n\nfunc (signalChannel *SignalsForwarder) Close() error {\n\tsignal.Stop(*signalChannel)\n\t*signalChannel <- nil\n\tclose(*signalChannel)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>3b1b79fa-2e57-11e5-9284-b827eb9e62be<commit_msg>3b20982c-2e57-11e5-9284-b827eb9e62be<commit_after>3b20982c-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>aa49ca8c-2e54-11e5-9284-b827eb9e62be<commit_msg>aa4f06d2-2e54-11e5-9284-b827eb9e62be<commit_after>aa4f06d2-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>74566cbc-2e56-11e5-9284-b827eb9e62be<commit_msg>745b9e12-2e56-11e5-9284-b827eb9e62be<commit_after>745b9e12-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>b2a5d1ce-2e56-11e5-9284-b827eb9e62be<commit_msg>b2aaec68-2e56-11e5-9284-b827eb9e62be<commit_after>b2aaec68-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>4fb010ee-2e55-11e5-9284-b827eb9e62be<commit_msg>4fb52d86-2e55-11e5-9284-b827eb9e62be<commit_after>4fb52d86-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>69aa9b72-2e55-11e5-9284-b827eb9e62be<commit_msg>69afbf08-2e55-11e5-9284-b827eb9e62be<commit_after>69afbf08-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>9ade4540-2e55-11e5-9284-b827eb9e62be<commit_msg>9ae36eda-2e55-11e5-9284-b827eb9e62be<commit_after>9ae36eda-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>d47e9ff6-2e56-11e5-9284-b827eb9e62be<commit_msg>d483bbc6-2e56-11e5-9284-b827eb9e62be<commit_after>d483bbc6-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>a9e37cb8-2e55-11e5-9284-b827eb9e62be<commit_msg>a9ea8c06-2e55-11e5-9284-b827eb9e62be<commit_after>a9ea8c06-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>f40afa1a-2e54-11e5-9284-b827eb9e62be<commit_msg>f410b8ec-2e54-11e5-9284-b827eb9e62be<commit_after>f410b8ec-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>1dbcd9a0-2e55-11e5-9284-b827eb9e62be<commit_msg>1dc22842-2e55-11e5-9284-b827eb9e62be<commit_after>1dc22842-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>a8109722-2e55-11e5-9284-b827eb9e62be<commit_msg>a815c9fe-2e55-11e5-9284-b827eb9e62be<commit_after>a815c9fe-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>fe40b550-2e55-11e5-9284-b827eb9e62be<commit_msg>fe460438-2e55-11e5-9284-b827eb9e62be<commit_after>fe460438-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>5d0b2034-2e56-11e5-9284-b827eb9e62be<commit_msg>5d10471c-2e56-11e5-9284-b827eb9e62be<commit_after>5d10471c-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>f886c31c-2e54-11e5-9284-b827eb9e62be<commit_msg>f88be388-2e54-11e5-9284-b827eb9e62be<commit_after>f88be388-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>7ffaa754-2e56-11e5-9284-b827eb9e62be<commit_msg>8000ef6a-2e56-11e5-9284-b827eb9e62be<commit_after>8000ef6a-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>12114c70-2e56-11e5-9284-b827eb9e62be<commit_msg>1216bf20-2e56-11e5-9284-b827eb9e62be<commit_after>1216bf20-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>40453b10-2e56-11e5-9284-b827eb9e62be<commit_msg>404a5050-2e56-11e5-9284-b827eb9e62be<commit_after>404a5050-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>32255d40-2e55-11e5-9284-b827eb9e62be<commit_msg>322a94a4-2e55-11e5-9284-b827eb9e62be<commit_after>322a94a4-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>34d5bae8-2e56-11e5-9284-b827eb9e62be<commit_msg>34daf0c6-2e56-11e5-9284-b827eb9e62be<commit_after>34daf0c6-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>7193d8ac-2e56-11e5-9284-b827eb9e62be<commit_msg>7198f530-2e56-11e5-9284-b827eb9e62be<commit_after>7198f530-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>2e4dc0a2-2e57-11e5-9284-b827eb9e62be<commit_msg>2e52dcf4-2e57-11e5-9284-b827eb9e62be<commit_after>2e52dcf4-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>8f53c9f6-2e56-11e5-9284-b827eb9e62be<commit_msg>8f5a8f48-2e56-11e5-9284-b827eb9e62be<commit_after>8f5a8f48-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>e7fbca50-2e55-11e5-9284-b827eb9e62be<commit_msg>e800f02a-2e55-11e5-9284-b827eb9e62be<commit_after>e800f02a-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>0740bbcc-2e57-11e5-9284-b827eb9e62be<commit_msg>07466504-2e57-11e5-9284-b827eb9e62be<commit_after>07466504-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>a944fee0-2e54-11e5-9284-b827eb9e62be<commit_msg>a94a3252-2e54-11e5-9284-b827eb9e62be<commit_after>a94a3252-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>b04de218-2e56-11e5-9284-b827eb9e62be<commit_msg>b052fabe-2e56-11e5-9284-b827eb9e62be<commit_after>b052fabe-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>ebd7cc18-2e56-11e5-9284-b827eb9e62be<commit_msg>ebdcea5e-2e56-11e5-9284-b827eb9e62be<commit_after>ebdcea5e-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>7f79eac0-2e55-11e5-9284-b827eb9e62be<commit_msg>7f7f193c-2e55-11e5-9284-b827eb9e62be<commit_after>7f7f193c-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>f629a4b2-2e55-11e5-9284-b827eb9e62be<commit_msg>f62edfd6-2e55-11e5-9284-b827eb9e62be<commit_after>f62edfd6-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>607f6078-2e55-11e5-9284-b827eb9e62be<commit_msg>60847806-2e55-11e5-9284-b827eb9e62be<commit_after>60847806-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>90908ff2-2e56-11e5-9284-b827eb9e62be<commit_msg>9095b09a-2e56-11e5-9284-b827eb9e62be<commit_after>9095b09a-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>dd0e4cb8-2e54-11e5-9284-b827eb9e62be<commit_msg>dd13a942-2e54-11e5-9284-b827eb9e62be<commit_after>dd13a942-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>d5b3f4f2-2e56-11e5-9284-b827eb9e62be<commit_msg>d5b940ce-2e56-11e5-9284-b827eb9e62be<commit_after>d5b940ce-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>bdafdcc4-2e54-11e5-9284-b827eb9e62be<commit_msg>bdb52f9e-2e54-11e5-9284-b827eb9e62be<commit_after>bdb52f9e-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>4aa2f1a2-2e55-11e5-9284-b827eb9e62be<commit_msg>4aa80aa2-2e55-11e5-9284-b827eb9e62be<commit_after>4aa80aa2-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>4c589268-2e55-11e5-9284-b827eb9e62be<commit_msg>4c5db554-2e55-11e5-9284-b827eb9e62be<commit_after>4c5db554-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>74a451e8-2e56-11e5-9284-b827eb9e62be<commit_msg>74a96f66-2e56-11e5-9284-b827eb9e62be<commit_after>74a96f66-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>package toxiproxy\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"sync\"\n\n\t\"github.com\/Shopify\/toxiproxy\/v2\/stream\"\n\t\"github.com\/sirupsen\/logrus\"\n\ttomb \"gopkg.in\/tomb.v1\"\n)\n\n\/\/ Proxy represents the proxy in its entirity with all its links. The main\n\/\/ responsibility of Proxy is to accept new client and create Links between the\n\/\/ client and upstream.\n\/\/\n\/\/ Client <-> toxiproxy <-> Upstream.\n\/\/\ntype Proxy struct {\n\tsync.Mutex\n\n\tName string `json:\"name\"`\n\tListen string `json:\"listen\"`\n\tUpstream string `json:\"upstream\"`\n\tEnabled bool `json:\"enabled\"`\n\n\tlistener net.Listener\n\tstarted chan error\n\n\ttomb tomb.Tomb\n\tconnections ConnectionList\n\tToxics *ToxicCollection `json:\"-\"`\n}\n\ntype ConnectionList struct {\n\tlist map[string]net.Conn\n\tlock sync.Mutex\n}\n\nfunc (c *ConnectionList) Lock() {\n\tc.lock.Lock()\n}\n\nfunc (c *ConnectionList) Unlock() {\n\tc.lock.Unlock()\n}\n\nvar ErrProxyAlreadyStarted = errors.New(\"Proxy already started\")\n\nfunc NewProxy() *Proxy {\n\tproxy := &Proxy{\n\t\tstarted: make(chan error),\n\t\tconnections: ConnectionList{list: make(map[string]net.Conn)},\n\t}\n\tproxy.Toxics = NewToxicCollection(proxy)\n\treturn proxy\n}\n\nfunc (proxy *Proxy) Start() error {\n\tproxy.Lock()\n\tdefer proxy.Unlock()\n\n\treturn start(proxy)\n}\n\nfunc (proxy *Proxy) Update(input *Proxy) error {\n\tproxy.Lock()\n\tdefer proxy.Unlock()\n\n\tif input.Listen != proxy.Listen || input.Upstream != proxy.Upstream {\n\t\tstop(proxy)\n\t\tproxy.Listen = input.Listen\n\t\tproxy.Upstream = input.Upstream\n\t}\n\n\tif input.Enabled != proxy.Enabled {\n\t\tif input.Enabled {\n\t\t\treturn start(proxy)\n\t\t}\n\t\tstop(proxy)\n\t}\n\treturn nil\n}\n\nfunc (proxy *Proxy) Stop() {\n\tproxy.Lock()\n\tdefer proxy.Unlock()\n\n\tstop(proxy)\n}\n\nfunc (proxy *Proxy) listen() error {\n\tvar err error\n\tproxy.listener, err = net.Listen(\"tcp\", proxy.Listen)\n\tif err != nil {\n\t\tproxy.started <- err\n\t\treturn err\n\t}\n\tproxy.Listen = proxy.listener.Addr().String()\n\tproxy.started <- nil\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"name\": proxy.Name,\n\t\t\"proxy\": proxy.Listen,\n\t\t\"upstream\": proxy.Upstream,\n\t}).Info(\"Started proxy\")\n\n\treturn nil\n}\n\nfunc (proxy *Proxy) close() {\n\t\/\/ Unblock proxy.listener.Accept()\n\terr := proxy.listener.Close()\n\tif err != nil {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"proxy\": proxy.Name,\n\t\t\t\"listen\": proxy.Listen,\n\t\t\t\"err\": err,\n\t\t}).Warn(\"Attempted to close an already closed proxy server\")\n\t}\n}\n\n\/\/ This channel is to kill the blocking Accept() call below by closing the\n\/\/ net.Listener.\nfunc (proxy *Proxy) freeBlocker(acceptTomb *tomb.Tomb) {\n\t<-proxy.tomb.Dying()\n\n\t\/\/ Notify ln.Accept() that the shutdown was safe\n\tacceptTomb.Killf(\"Shutting down from stop()\")\n\n\tproxy.close()\n\n\t\/\/ Wait for the accept loop to finish processing\n\tacceptTomb.Wait()\n\tproxy.tomb.Done()\n}\n\n\/\/ server runs the Proxy server, accepting new clients and creating Links to\n\/\/ connect them to upstreams.\nfunc (proxy *Proxy) server() {\n\terr := proxy.listen()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tacceptTomb := &tomb.Tomb{}\n\tdefer acceptTomb.Done()\n\n\t\/\/ This channel is to kill the blocking Accept() call below by closing the\n\t\/\/ net.Listener.\n\tgo proxy.freeBlocker(acceptTomb)\n\n\tfor {\n\t\tclient, err := proxy.listener.Accept()\n\t\tif err != nil {\n\t\t\t\/\/ This is to confirm we're being shut down in a legit way. Unfortunately,\n\t\t\t\/\/ Go doesn't export the error when it's closed from Close() so we have to\n\t\t\t\/\/ sync up with a channel here.\n\t\t\t\/\/\n\t\t\t\/\/ See http:\/\/zhen.org\/blog\/graceful-shutdown-of-go-net-dot-listeners\/\n\t\t\tselect {\n\t\t\tcase <-acceptTomb.Dying():\n\t\t\tdefault:\n\t\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\t\"proxy\": proxy.Name,\n\t\t\t\t\t\"listen\": proxy.Listen,\n\t\t\t\t\t\"err\": err,\n\t\t\t\t}).Warn(\"Error while accepting client\")\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"name\": proxy.Name,\n\t\t\t\"client\": client.RemoteAddr(),\n\t\t\t\"proxy\": proxy.Listen,\n\t\t\t\"upstream\": proxy.Upstream,\n\t\t}).Info(\"Accepted client\")\n\n\t\tupstream, err := net.Dial(\"tcp\", proxy.Upstream)\n\t\tif err != nil {\n\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\"name\": proxy.Name,\n\t\t\t\t\"client\": client.RemoteAddr(),\n\t\t\t\t\"proxy\": proxy.Listen,\n\t\t\t\t\"upstream\": proxy.Upstream,\n\t\t\t}).Error(\"Unable to open connection to upstream\")\n\t\t\tclient.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\tname := client.RemoteAddr().String()\n\t\tproxy.connections.Lock()\n\t\tproxy.connections.list[name+\"upstream\"] = upstream\n\t\tproxy.connections.list[name+\"downstream\"] = client\n\t\tproxy.connections.Unlock()\n\t\tproxy.Toxics.StartLink(name+\"upstream\", client, upstream, stream.Upstream)\n\t\tproxy.Toxics.StartLink(name+\"downstream\", upstream, client, stream.Downstream)\n\t}\n}\n\nfunc (proxy *Proxy) RemoveConnection(name string) {\n\tproxy.connections.Lock()\n\tdefer proxy.connections.Unlock()\n\tdelete(proxy.connections.list, name)\n}\n\n\/\/ Starts a proxy, assumes the lock has already been taken.\nfunc start(proxy *Proxy) error {\n\tif proxy.Enabled {\n\t\treturn ErrProxyAlreadyStarted\n\t}\n\n\tproxy.tomb = tomb.Tomb{} \/\/ Reset tomb, from previous starts\/stops\n\tgo proxy.server()\n\terr := <-proxy.started\n\t\/\/ Only enable the proxy if it successfully started\n\tproxy.Enabled = err == nil\n\treturn err\n}\n\n\/\/ Stops a proxy, assumes the lock has already been taken.\nfunc stop(proxy *Proxy) {\n\tif !proxy.Enabled {\n\t\treturn\n\t}\n\tproxy.Enabled = false\n\n\tproxy.tomb.Killf(\"Shutting down from stop()\")\n\tproxy.tomb.Wait() \/\/ Wait until we stop accepting new connections\n\n\tproxy.connections.Lock()\n\tdefer proxy.connections.Unlock()\n\tfor _, conn := range proxy.connections.list {\n\t\tconn.Close()\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"name\": proxy.Name,\n\t\t\"proxy\": proxy.Listen,\n\t\t\"upstream\": proxy.Upstream,\n\t}).Info(\"Terminated proxy\")\n}\n<commit_msg>Fix the typo in comment for Proxy<commit_after>package toxiproxy\n\nimport (\n\t\"errors\"\n\t\"net\"\n\t\"sync\"\n\n\t\"github.com\/Shopify\/toxiproxy\/v2\/stream\"\n\t\"github.com\/sirupsen\/logrus\"\n\ttomb \"gopkg.in\/tomb.v1\"\n)\n\n\/\/ Proxy represents the proxy in its entirety with all its links. The main\n\/\/ responsibility of Proxy is to accept new client and create Links between the\n\/\/ client and upstream.\n\/\/\n\/\/ Client <-> toxiproxy <-> Upstream.\n\/\/\ntype Proxy struct {\n\tsync.Mutex\n\n\tName string `json:\"name\"`\n\tListen string `json:\"listen\"`\n\tUpstream string `json:\"upstream\"`\n\tEnabled bool `json:\"enabled\"`\n\n\tlistener net.Listener\n\tstarted chan error\n\n\ttomb tomb.Tomb\n\tconnections ConnectionList\n\tToxics *ToxicCollection `json:\"-\"`\n}\n\ntype ConnectionList struct {\n\tlist map[string]net.Conn\n\tlock sync.Mutex\n}\n\nfunc (c *ConnectionList) Lock() {\n\tc.lock.Lock()\n}\n\nfunc (c *ConnectionList) Unlock() {\n\tc.lock.Unlock()\n}\n\nvar ErrProxyAlreadyStarted = errors.New(\"Proxy already started\")\n\nfunc NewProxy() *Proxy {\n\tproxy := &Proxy{\n\t\tstarted: make(chan error),\n\t\tconnections: ConnectionList{list: make(map[string]net.Conn)},\n\t}\n\tproxy.Toxics = NewToxicCollection(proxy)\n\treturn proxy\n}\n\nfunc (proxy *Proxy) Start() error {\n\tproxy.Lock()\n\tdefer proxy.Unlock()\n\n\treturn start(proxy)\n}\n\nfunc (proxy *Proxy) Update(input *Proxy) error {\n\tproxy.Lock()\n\tdefer proxy.Unlock()\n\n\tif input.Listen != proxy.Listen || input.Upstream != proxy.Upstream {\n\t\tstop(proxy)\n\t\tproxy.Listen = input.Listen\n\t\tproxy.Upstream = input.Upstream\n\t}\n\n\tif input.Enabled != proxy.Enabled {\n\t\tif input.Enabled {\n\t\t\treturn start(proxy)\n\t\t}\n\t\tstop(proxy)\n\t}\n\treturn nil\n}\n\nfunc (proxy *Proxy) Stop() {\n\tproxy.Lock()\n\tdefer proxy.Unlock()\n\n\tstop(proxy)\n}\n\nfunc (proxy *Proxy) listen() error {\n\tvar err error\n\tproxy.listener, err = net.Listen(\"tcp\", proxy.Listen)\n\tif err != nil {\n\t\tproxy.started <- err\n\t\treturn err\n\t}\n\tproxy.Listen = proxy.listener.Addr().String()\n\tproxy.started <- nil\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"name\": proxy.Name,\n\t\t\"proxy\": proxy.Listen,\n\t\t\"upstream\": proxy.Upstream,\n\t}).Info(\"Started proxy\")\n\n\treturn nil\n}\n\nfunc (proxy *Proxy) close() {\n\t\/\/ Unblock proxy.listener.Accept()\n\terr := proxy.listener.Close()\n\tif err != nil {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"proxy\": proxy.Name,\n\t\t\t\"listen\": proxy.Listen,\n\t\t\t\"err\": err,\n\t\t}).Warn(\"Attempted to close an already closed proxy server\")\n\t}\n}\n\n\/\/ This channel is to kill the blocking Accept() call below by closing the\n\/\/ net.Listener.\nfunc (proxy *Proxy) freeBlocker(acceptTomb *tomb.Tomb) {\n\t<-proxy.tomb.Dying()\n\n\t\/\/ Notify ln.Accept() that the shutdown was safe\n\tacceptTomb.Killf(\"Shutting down from stop()\")\n\n\tproxy.close()\n\n\t\/\/ Wait for the accept loop to finish processing\n\tacceptTomb.Wait()\n\tproxy.tomb.Done()\n}\n\n\/\/ server runs the Proxy server, accepting new clients and creating Links to\n\/\/ connect them to upstreams.\nfunc (proxy *Proxy) server() {\n\terr := proxy.listen()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tacceptTomb := &tomb.Tomb{}\n\tdefer acceptTomb.Done()\n\n\t\/\/ This channel is to kill the blocking Accept() call below by closing the\n\t\/\/ net.Listener.\n\tgo proxy.freeBlocker(acceptTomb)\n\n\tfor {\n\t\tclient, err := proxy.listener.Accept()\n\t\tif err != nil {\n\t\t\t\/\/ This is to confirm we're being shut down in a legit way. Unfortunately,\n\t\t\t\/\/ Go doesn't export the error when it's closed from Close() so we have to\n\t\t\t\/\/ sync up with a channel here.\n\t\t\t\/\/\n\t\t\t\/\/ See http:\/\/zhen.org\/blog\/graceful-shutdown-of-go-net-dot-listeners\/\n\t\t\tselect {\n\t\t\tcase <-acceptTomb.Dying():\n\t\t\tdefault:\n\t\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\t\"proxy\": proxy.Name,\n\t\t\t\t\t\"listen\": proxy.Listen,\n\t\t\t\t\t\"err\": err,\n\t\t\t\t}).Warn(\"Error while accepting client\")\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"name\": proxy.Name,\n\t\t\t\"client\": client.RemoteAddr(),\n\t\t\t\"proxy\": proxy.Listen,\n\t\t\t\"upstream\": proxy.Upstream,\n\t\t}).Info(\"Accepted client\")\n\n\t\tupstream, err := net.Dial(\"tcp\", proxy.Upstream)\n\t\tif err != nil {\n\t\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\t\"name\": proxy.Name,\n\t\t\t\t\"client\": client.RemoteAddr(),\n\t\t\t\t\"proxy\": proxy.Listen,\n\t\t\t\t\"upstream\": proxy.Upstream,\n\t\t\t}).Error(\"Unable to open connection to upstream\")\n\t\t\tclient.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\tname := client.RemoteAddr().String()\n\t\tproxy.connections.Lock()\n\t\tproxy.connections.list[name+\"upstream\"] = upstream\n\t\tproxy.connections.list[name+\"downstream\"] = client\n\t\tproxy.connections.Unlock()\n\t\tproxy.Toxics.StartLink(name+\"upstream\", client, upstream, stream.Upstream)\n\t\tproxy.Toxics.StartLink(name+\"downstream\", upstream, client, stream.Downstream)\n\t}\n}\n\nfunc (proxy *Proxy) RemoveConnection(name string) {\n\tproxy.connections.Lock()\n\tdefer proxy.connections.Unlock()\n\tdelete(proxy.connections.list, name)\n}\n\n\/\/ Starts a proxy, assumes the lock has already been taken.\nfunc start(proxy *Proxy) error {\n\tif proxy.Enabled {\n\t\treturn ErrProxyAlreadyStarted\n\t}\n\n\tproxy.tomb = tomb.Tomb{} \/\/ Reset tomb, from previous starts\/stops\n\tgo proxy.server()\n\terr := <-proxy.started\n\t\/\/ Only enable the proxy if it successfully started\n\tproxy.Enabled = err == nil\n\treturn err\n}\n\n\/\/ Stops a proxy, assumes the lock has already been taken.\nfunc stop(proxy *Proxy) {\n\tif !proxy.Enabled {\n\t\treturn\n\t}\n\tproxy.Enabled = false\n\n\tproxy.tomb.Killf(\"Shutting down from stop()\")\n\tproxy.tomb.Wait() \/\/ Wait until we stop accepting new connections\n\n\tproxy.connections.Lock()\n\tdefer proxy.connections.Unlock()\n\tfor _, conn := range proxy.connections.list {\n\t\tconn.Close()\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"name\": proxy.Name,\n\t\t\"proxy\": proxy.Listen,\n\t\t\"upstream\": proxy.Upstream,\n\t}).Info(\"Terminated proxy\")\n}\n<|endoftext|>"} {"text":"<commit_before>3538fe4a-2e57-11e5-9284-b827eb9e62be<commit_msg>353e1f38-2e57-11e5-9284-b827eb9e62be<commit_after>353e1f38-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>ab856496-2e55-11e5-9284-b827eb9e62be<commit_msg>ab8a8188-2e55-11e5-9284-b827eb9e62be<commit_after>ab8a8188-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>22afe498-2e55-11e5-9284-b827eb9e62be<commit_msg>22b532b8-2e55-11e5-9284-b827eb9e62be<commit_after>22b532b8-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>93af6b32-2e55-11e5-9284-b827eb9e62be<commit_msg>93b49576-2e55-11e5-9284-b827eb9e62be<commit_after>93b49576-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>fd0816d4-2e54-11e5-9284-b827eb9e62be<commit_msg>fd0d57c0-2e54-11e5-9284-b827eb9e62be<commit_after>fd0d57c0-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>4a06be12-2e56-11e5-9284-b827eb9e62be<commit_msg>4a0bdb4a-2e56-11e5-9284-b827eb9e62be<commit_after>4a0bdb4a-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>71ef3914-2e55-11e5-9284-b827eb9e62be<commit_msg>71f46754-2e55-11e5-9284-b827eb9e62be<commit_after>71f46754-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>53e5dd2e-2e55-11e5-9284-b827eb9e62be<commit_msg>53eb1118-2e55-11e5-9284-b827eb9e62be<commit_after>53eb1118-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>3c80dd1e-2e55-11e5-9284-b827eb9e62be<commit_msg>3c9c38ca-2e55-11e5-9284-b827eb9e62be<commit_after>3c9c38ca-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>06215a8c-2e55-11e5-9284-b827eb9e62be<commit_msg>0626fb36-2e55-11e5-9284-b827eb9e62be<commit_after>0626fb36-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>a9fb034c-2e55-11e5-9284-b827eb9e62be<commit_msg>aa00278c-2e55-11e5-9284-b827eb9e62be<commit_after>aa00278c-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>da61ce22-2e54-11e5-9284-b827eb9e62be<commit_msg>da674bc2-2e54-11e5-9284-b827eb9e62be<commit_after>da674bc2-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>0d355a7e-2e57-11e5-9284-b827eb9e62be<commit_msg>0d3a8922-2e57-11e5-9284-b827eb9e62be<commit_after>0d3a8922-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>f1b4bace-2e56-11e5-9284-b827eb9e62be<commit_msg>f1b9d126-2e56-11e5-9284-b827eb9e62be<commit_after>f1b9d126-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>0574095e-2e55-11e5-9284-b827eb9e62be<commit_msg>05795b5c-2e55-11e5-9284-b827eb9e62be<commit_after>05795b5c-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>c5b98e24-2e54-11e5-9284-b827eb9e62be<commit_msg>c5becd30-2e54-11e5-9284-b827eb9e62be<commit_after>c5becd30-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>4d585b1c-2e55-11e5-9284-b827eb9e62be<commit_msg>4d5d7e12-2e55-11e5-9284-b827eb9e62be<commit_after>4d5d7e12-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>b4997582-2e54-11e5-9284-b827eb9e62be<commit_msg>b49ead22-2e54-11e5-9284-b827eb9e62be<commit_after>b49ead22-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>404aaa24-2e55-11e5-9284-b827eb9e62be<commit_msg>404fe1b0-2e55-11e5-9284-b827eb9e62be<commit_after>404fe1b0-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>0817b276-2e57-11e5-9284-b827eb9e62be<commit_msg>081cd4ae-2e57-11e5-9284-b827eb9e62be<commit_after>081cd4ae-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>308d9662-2e57-11e5-9284-b827eb9e62be<commit_msg>3092b548-2e57-11e5-9284-b827eb9e62be<commit_after>3092b548-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>94401998-2e55-11e5-9284-b827eb9e62be<commit_msg>944536da-2e55-11e5-9284-b827eb9e62be<commit_after>944536da-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>f1eba1d4-2e55-11e5-9284-b827eb9e62be<commit_msg>f1f0d7da-2e55-11e5-9284-b827eb9e62be<commit_after>f1f0d7da-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>3833313e-2e56-11e5-9284-b827eb9e62be<commit_msg>38387996-2e56-11e5-9284-b827eb9e62be<commit_after>38387996-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>ca8e32c4-2e54-11e5-9284-b827eb9e62be<commit_msg>ca935c54-2e54-11e5-9284-b827eb9e62be<commit_after>ca935c54-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>d2e5ce64-2e54-11e5-9284-b827eb9e62be<commit_msg>d2eb3c32-2e54-11e5-9284-b827eb9e62be<commit_after>d2eb3c32-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017 The btcsuite developers\n\/\/ Use of this source code is governed by an ISC\n\/\/ license that can be found in the LICENSE file.\n\npackage indexers\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/btcsuite\/btcd\/blockchain\"\n\t\"github.com\/btcsuite\/btcd\/chaincfg\/chainhash\"\n\t\"github.com\/btcsuite\/btcd\/database\"\n\t\"github.com\/btcsuite\/btcutil\"\n\t\"github.com\/btcsuite\/btcutil\/gcs\/builder\"\n\n\t\"os\"\n)\n\nconst (\n\t\/\/ cfIndexName is the human-readable name for the index.\n\tcfIndexName = \"committed bloom filter index\"\n)\n\nvar (\n\t\/\/ cfIndexKey is the name of the db bucket used to house the\n\t\/\/ block hash -> CF index.\n\tcfIndexKey = []byte(\"cfbyhashidx\")\n)\n\nfunc dbFetchCFIndexEntry(dbTx database.Tx, blockHash *chainhash.Hash) ([]byte,\n error) {\n\t\/\/ Load the record from the database and return now if it doesn't exist.\n\tindex := dbTx.Metadata().Bucket(cfIndexKey)\n\tserializedFilter := index.Get(blockHash[:])\n\tif len(serializedFilter) == 0 {\n\t\treturn nil, nil\n\t}\n\n\treturn serializedFilter, nil\n}\n\n\/\/ The serialized format for keys and values in the block hash to CF bucket is:\n\/\/ <hash> = <CF>\n\/\/\n\/\/ Field Type Size\n\/\/ hash chainhash.Hash 32 bytes\n\/\/ CF []byte variable\n\/\/ -----\n\/\/ Total: > 32 bytes\n\n\/\/ CFIndex implements a CF by hash index.\ntype CFIndex struct {\n\tdb database.DB\n}\n\n\/\/ Ensure the CFIndex type implements the Indexer interface.\nvar _ Indexer = (*CFIndex)(nil)\n\n\/\/ Init initializes the hash-based CF index.\n\/\/\n\/\/ This is part of the Indexer interface.\nfunc (idx *CFIndex) Init() error {\n\treturn nil\n}\n\n\/\/ Key returns the database key to use for the index as a byte slice.\n\/\/\n\/\/ This is part of the Indexer interface.\nfunc (idx *CFIndex) Key() []byte {\n\treturn cfIndexKey\n}\n\n\/\/ Name returns the human-readable name of the index.\n\/\/\n\/\/ This is part of the Indexer interface.\nfunc (idx *CFIndex) Name() string {\n\treturn cfIndexName\n}\n\n\/\/ Create is invoked when the indexer manager determines the index needs\n\/\/ to be created for the first time. It creates the buckets for the hash-based\n\/\/ CF index.\n\/\/\n\/\/ This is part of the Indexer interface.\nfunc (idx *CFIndex) Create(dbTx database.Tx) error {\n\tmeta := dbTx.Metadata()\n\t_, err := meta.CreateBucket(cfIndexKey)\n\treturn err\n}\n\nfunc generateFilterForBlock(block *btcutil.Block) ([]byte, error) {\n\tb := builder.WithKeyHash(block.Hash())\n\t_, err := b.Key()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, tx := range block.Transactions() {\n\t\tfor _, txIn := range tx.MsgTx().TxIn {\n\t\t\tb.AddOutPoint(txIn.PreviousOutPoint)\n\t\t}\n\t}\n\tf, err := b.Build()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn f.Bytes(), nil\n}\n\n\/\/ ConnectBlock is invoked by the index manager when a new block has been\n\/\/ connected to the main chain. This indexer adds a hash-to-CF mapping for\n\/\/ every passed block.\n\/\/\n\/\/ This is part of the Indexer interface.\nfunc (idx *CFIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block,\n view *blockchain.UtxoViewpoint) error {\n\tfilterBytes, err := generateFilterForBlock(block)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmeta := dbTx.Metadata()\n\tindex := meta.Bucket(cfIndexKey)\n\terr = index.Put(block.Hash()[:], filterBytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Fprintf(os.Stderr, \"Stored CF for block %v\", block.Hash())\n\n\treturn nil\n}\n\n\/\/ DisconnectBlock is invoked by the index manager when a block has been\n\/\/ disconnected from the main chain. This indexer removes the hash-to-CF\n\/\/ mapping for every passed block.\n\/\/\n\/\/ This is part of the Indexer interface.\nfunc (idx *CFIndex) DisconnectBlock(dbTx database.Tx, block *btcutil.Block,\n view *blockchain.UtxoViewpoint) error {\n\tindex := dbTx.Metadata().Bucket(cfIndexKey)\n\tfilterBytes := index.Get(block.Hash()[:])\n\tif len(filterBytes) == 0 {\n\t\treturn fmt.Errorf(\"can't remove non-existent filter %s from \" +\n\t\t \"the cfilter index\", block.Hash())\n\t}\n\treturn index.Delete(block.Hash()[:])\n}\n\nfunc (idx *CFIndex) FilterByBlockHash(hash *chainhash.Hash) ([]byte, error) {\n\tvar filterBytes []byte\n\terr := idx.db.View(func(dbTx database.Tx) error {\n\t\tvar err error\n\t\tfilterBytes, err = dbFetchCFIndexEntry(dbTx, hash)\n\t\treturn err\n\t})\n\treturn filterBytes, err\n}\n\n\/\/ NewCFIndex returns a new instance of an indexer that is used to create a\n\/\/ mapping of the hashes of all blocks in the blockchain to their respective\n\/\/ committed bloom filters.\n\/\/\n\/\/ It implements the Indexer interface which plugs into the IndexManager that in\n\/\/ turn is used by the blockchain package. This allows the index to be\n\/\/ seamlessly maintained along with the chain.\nfunc NewCFIndex(db database.DB) *CFIndex {\n\treturn &CFIndex{db: db}\n}\n\n\/\/ DropCFIndex drops the CF index from the provided database if exists.\nfunc DropCFIndex(db database.DB) error {\n\treturn dropIndex(db, cfIndexKey, cfIndexName)\n}\n<commit_msg>Start preparing the ground for layer {0,1} filters<commit_after>\/\/ Copyright (c) 2017 The btcsuite developers\n\/\/ Use of this source code is governed by an ISC\n\/\/ license that can be found in the LICENSE file.\n\npackage indexers\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/btcsuite\/btcd\/blockchain\"\n\t\"github.com\/btcsuite\/btcd\/chaincfg\/chainhash\"\n\t\"github.com\/btcsuite\/btcd\/database\"\n\t\"github.com\/btcsuite\/btcutil\"\n\t\"github.com\/btcsuite\/btcutil\/gcs\/builder\"\n\n\t\"os\"\n)\n\nconst (\n\t\/\/ cfIndexName is the human-readable name for the index.\n\tcfIndexName = \"committed filter index\"\n)\n\nvar (\n\t\/\/ cfBasicIndexKey is the name of the db bucket used to house the\n\t\/\/ block hash -> Basic CF index (CF #0).\n\tcfBasicIndexKey = []byte(\"cf0byhashidx\")\n\t\/\/ cfExtendedIndexKey is the name of the db bucket used to house the\n\t\/\/ block hash -> Extended CF index (CF #1).\n\tcfExtendedIndexKey = []byte(\"cf1byhashidx\")\n)\n\nfunc dbFetchCFIndexEntry(dbTx database.Tx, blockHash *chainhash.Hash) ([]byte,\n error) {\n\t\/\/ Load the record from the database and return now if it doesn't exist.\n\tindex := dbTx.Metadata().Bucket(cfBasicIndexKey)\n\tserializedFilter := index.Get(blockHash[:])\n\tif len(serializedFilter) == 0 {\n\t\treturn nil, nil\n\t}\n\n\treturn serializedFilter, nil\n}\n\n\/\/ The serialized format for keys and values in the block hash to CF bucket is:\n\/\/ <hash> = <CF>\n\/\/\n\/\/ Field Type Size\n\/\/ hash chainhash.Hash 32 bytes\n\/\/ CF []byte variable\n\/\/ -----\n\/\/ Total: > 32 bytes\n\n\/\/ CFIndex implements a CF by hash index.\ntype CFIndex struct {\n\tdb database.DB\n}\n\n\/\/ Ensure the CFIndex type implements the Indexer interface.\nvar _ Indexer = (*CFIndex)(nil)\n\n\/\/ Init initializes the hash-based CF index.\n\/\/\n\/\/ This is part of the Indexer interface.\nfunc (idx *CFIndex) Init() error {\n\treturn nil\n}\n\n\/\/ Key returns the database key to use for the index as a byte slice.\nfunc (idx *CFIndex) Key() []byte {\n\treturn cfBasicIndexKey\n}\n\n\/\/ Name returns the human-readable name of the index.\n\/\/\n\/\/ This is part of the Indexer interface.\nfunc (idx *CFIndex) Name() string {\n\treturn cfIndexName\n}\n\n\/\/ Create is invoked when the indexer manager determines the index needs to be\n\/\/ created for the first time. It creates buckets for the two hash-based CF\n\/\/ indexes (simple, extended).\nfunc (idx *CFIndex) Create(dbTx database.Tx) error {\n\tmeta := dbTx.Metadata()\n\t_, err := meta.CreateBucket(cfBasicIndexKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = meta.CreateBucket(cfExtendedIndexKey)\n\treturn err\n}\n\nfunc generateFilterForBlock(block *btcutil.Block) ([]byte, error) {\n\tb := builder.WithKeyHash(block.Hash())\n\t_, err := b.Key()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, tx := range block.Transactions() {\n\t\tfor _, txIn := range tx.MsgTx().TxIn {\n\t\t\tb.AddOutPoint(txIn.PreviousOutPoint)\n\t\t}\n\t}\n\tf, err := b.Build()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn f.Bytes(), nil\n}\n\n\/\/ ConnectBlock is invoked by the index manager when a new block has been\n\/\/ connected to the main chain. This indexer adds a hash-to-CF mapping for\n\/\/ every passed block.\n\/\/\n\/\/ This is part of the Indexer interface.\nfunc (idx *CFIndex) ConnectBlock(dbTx database.Tx, block *btcutil.Block,\n view *blockchain.UtxoViewpoint) error {\n\tfilterBytes, err := generateFilterForBlock(block)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmeta := dbTx.Metadata()\n\tindex := meta.Bucket(cfBasicIndexKey)\n\terr = index.Put(block.Hash()[:], filterBytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Fprintf(os.Stderr, \"Stored CF for block %v\", block.Hash())\n\n\treturn nil\n}\n\n\/\/ DisconnectBlock is invoked by the index manager when a block has been\n\/\/ disconnected from the main chain. This indexer removes the hash-to-CF\n\/\/ mapping for every passed block.\n\/\/\n\/\/ This is part of the Indexer interface.\nfunc (idx *CFIndex) DisconnectBlock(dbTx database.Tx, block *btcutil.Block,\n view *blockchain.UtxoViewpoint) error {\n\tindex := dbTx.Metadata().Bucket(cfBasicIndexKey)\n\tfilterBytes := index.Get(block.Hash()[:])\n\tif len(filterBytes) == 0 {\n\t\treturn fmt.Errorf(\"can't remove non-existent filter %s from \" +\n\t\t \"the cfilter index\", block.Hash())\n\t}\n\treturn index.Delete(block.Hash()[:])\n}\n\nfunc (idx *CFIndex) FilterByBlockHash(hash *chainhash.Hash) ([]byte, error) {\n\tvar filterBytes []byte\n\terr := idx.db.View(func(dbTx database.Tx) error {\n\t\tvar err error\n\t\tfilterBytes, err = dbFetchCFIndexEntry(dbTx, hash)\n\t\treturn err\n\t})\n\treturn filterBytes, err\n}\n\n\/\/ NewCFIndex returns a new instance of an indexer that is used to create a\n\/\/ mapping of the hashes of all blocks in the blockchain to their respective\n\/\/ committed bloom filters.\n\/\/\n\/\/ It implements the Indexer interface which plugs into the IndexManager that in\n\/\/ turn is used by the blockchain package. This allows the index to be\n\/\/ seamlessly maintained along with the chain.\nfunc NewCFIndex(db database.DB) *CFIndex {\n\treturn &CFIndex{db: db}\n}\n\n\/\/ DropCFIndex drops the CF index from the provided database if exists.\nfunc DropCFIndex(db database.DB) error {\n\treturn dropIndex(db, cfBasicIndexKey, cfIndexName)\n}\n<|endoftext|>"} {"text":"<commit_before>73e488dc-2e55-11e5-9284-b827eb9e62be<commit_msg>73e9d81e-2e55-11e5-9284-b827eb9e62be<commit_after>73e9d81e-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>af6254de-2e55-11e5-9284-b827eb9e62be<commit_msg>af676b36-2e55-11e5-9284-b827eb9e62be<commit_after>af676b36-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>eb784794-2e55-11e5-9284-b827eb9e62be<commit_msg>eb7d66fc-2e55-11e5-9284-b827eb9e62be<commit_after>eb7d66fc-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>cda390c4-2e56-11e5-9284-b827eb9e62be<commit_msg>cda8b90a-2e56-11e5-9284-b827eb9e62be<commit_after>cda8b90a-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage oom\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\tv1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/client-go\/tools\/record\"\n\n\t\"github.com\/google\/cadvisor\/utils\/oomparser\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\ntype fakeStreamer struct {\n\toomInstancesToStream []*oomparser.OomInstance\n}\n\nfunc (fs *fakeStreamer) StreamOoms(outStream chan<- *oomparser.OomInstance) {\n\tfor _, oomInstance := range fs.oomInstancesToStream {\n\t\toutStream <- oomInstance\n\t}\n}\n\n\/\/ TestStartingWatcher tests that the watcher, using the actual streamer\n\/\/ and not the fake, starts successfully.\nfunc TestStartingWatcher(t *testing.T) {\n\tfakeRecorder := &record.FakeRecorder{}\n\tnode := &v1.ObjectReference{}\n\n\toomWatcher, err := NewWatcher(fakeRecorder)\n\tassert.NoError(t, err)\n\tassert.NoError(t, oomWatcher.Start(node))\n}\n\n\/\/ TestWatcherRecordsEventsForOomEvents ensures that our OomInstances coming\n\/\/ from `StreamOoms` are translated into events in our recorder.\nfunc TestWatcherRecordsEventsForOomEvents(t *testing.T) {\n\toomInstancesToStream := []*oomparser.OomInstance{\n\t\t{\n\t\t\tPid: 1000,\n\t\t\tProcessName: \"fakeProcess\",\n\t\t\tTimeOfDeath: time.Now(),\n\t\t\tContainerName: recordEventContainerName + \"some-container\",\n\t\t\tVictimContainerName: recordEventContainerName,\n\t\t},\n\t}\n\tnumExpectedOomEvents := len(oomInstancesToStream)\n\n\tfakeStreamer := &fakeStreamer{\n\t\toomInstancesToStream: oomInstancesToStream,\n\t}\n\n\tfakeRecorder := record.NewFakeRecorder(numExpectedOomEvents)\n\tnode := &v1.ObjectReference{}\n\n\toomWatcher := &realWatcher{\n\t\trecorder: fakeRecorder,\n\t\toomStreamer: fakeStreamer,\n\t}\n\tassert.NoError(t, oomWatcher.Start(node))\n\n\teventsRecorded := getRecordedEvents(fakeRecorder, numExpectedOomEvents)\n\tassert.Equal(t, numExpectedOomEvents, len(eventsRecorded))\n}\n\nfunc getRecordedEvents(fakeRecorder *record.FakeRecorder, numExpectedOomEvents int) []string {\n\teventsRecorded := []string{}\n\n\tselect {\n\tcase event := <-fakeRecorder.Events:\n\t\teventsRecorded = append(eventsRecorded, event)\n\n\t\tif len(eventsRecorded) == numExpectedOomEvents {\n\t\t\tbreak\n\t\t}\n\tcase <-time.After(10 * time.Second):\n\t\tbreak\n\t}\n\n\treturn eventsRecorded\n}\n\n\/\/ TestWatcherRecordsEventsForOomEventsCorrectContainerName verifies that we\n\/\/ only record OOM events when the container name is the one for which we want\n\/\/ to record events (i.e. \/).\nfunc TestWatcherRecordsEventsForOomEventsCorrectContainerName(t *testing.T) {\n\t\/\/ By \"incorrect\" container name, we mean a container name for which we\n\t\/\/ don't want to record an oom event.\n\tnumOomEventsWithIncorrectContainerName := 1\n\toomInstancesToStream := []*oomparser.OomInstance{\n\t\t{\n\t\t\tPid: 1000,\n\t\t\tProcessName: \"fakeProcess\",\n\t\t\tTimeOfDeath: time.Now(),\n\t\t\tContainerName: recordEventContainerName + \"some-container\",\n\t\t\tVictimContainerName: recordEventContainerName,\n\t\t},\n\t\t{\n\t\t\tPid: 1000,\n\t\t\tProcessName: \"fakeProcess\",\n\t\t\tTimeOfDeath: time.Now(),\n\t\t\tContainerName: recordEventContainerName + \"kubepods\/some-container\",\n\t\t\tVictimContainerName: recordEventContainerName + \"kubepods\",\n\t\t},\n\t}\n\tnumExpectedOomEvents := len(oomInstancesToStream) - numOomEventsWithIncorrectContainerName\n\n\tfakeStreamer := &fakeStreamer{\n\t\toomInstancesToStream: oomInstancesToStream,\n\t}\n\n\tfakeRecorder := record.NewFakeRecorder(numExpectedOomEvents)\n\tnode := &v1.ObjectReference{}\n\n\toomWatcher := &realWatcher{\n\t\trecorder: fakeRecorder,\n\t\toomStreamer: fakeStreamer,\n\t}\n\tassert.NoError(t, oomWatcher.Start(node))\n\n\teventsRecorded := getRecordedEvents(fakeRecorder, numExpectedOomEvents)\n\tassert.Equal(t, numExpectedOomEvents, len(eventsRecorded))\n}\n\n\/\/ TestWatcherRecordsEventsForOomEventsWithAdditionalInfo verifies that our the\n\/\/ emitted event has the proper pid\/process data when appropriate.\nfunc TestWatcherRecordsEventsForOomEventsWithAdditionalInfo(t *testing.T) {\n\t\/\/ The process and event info should appear in the event message.\n\teventPid := 1000\n\tprocessName := \"fakeProcess\"\n\n\toomInstancesToStream := []*oomparser.OomInstance{\n\t\t{\n\t\t\tPid: eventPid,\n\t\t\tProcessName: processName,\n\t\t\tTimeOfDeath: time.Now(),\n\t\t\tContainerName: recordEventContainerName + \"some-container\",\n\t\t\tVictimContainerName: recordEventContainerName,\n\t\t},\n\t}\n\tnumExpectedOomEvents := len(oomInstancesToStream)\n\n\tfakeStreamer := &fakeStreamer{\n\t\toomInstancesToStream: oomInstancesToStream,\n\t}\n\n\tfakeRecorder := record.NewFakeRecorder(numExpectedOomEvents)\n\tnode := &v1.ObjectReference{}\n\n\toomWatcher := &realWatcher{\n\t\trecorder: fakeRecorder,\n\t\toomStreamer: fakeStreamer,\n\t}\n\tassert.NoError(t, oomWatcher.Start(node))\n\n\teventsRecorded := getRecordedEvents(fakeRecorder, numExpectedOomEvents)\n\n\tassert.Equal(t, numExpectedOomEvents, len(eventsRecorded))\n\tassert.Contains(t, eventsRecorded[0], systemOOMEvent)\n\tassert.Contains(t, eventsRecorded[0], fmt.Sprintf(\"pid: %d\", eventPid))\n\tassert.Contains(t, eventsRecorded[0], fmt.Sprintf(\"victim process: %s\", processName))\n}\n<commit_msg>remove oom TestStartingWatcher \"\"unit\"\" test<commit_after>\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage oom\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\tv1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/client-go\/tools\/record\"\n\n\t\"github.com\/google\/cadvisor\/utils\/oomparser\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\ntype fakeStreamer struct {\n\toomInstancesToStream []*oomparser.OomInstance\n}\n\nfunc (fs *fakeStreamer) StreamOoms(outStream chan<- *oomparser.OomInstance) {\n\tfor _, oomInstance := range fs.oomInstancesToStream {\n\t\toutStream <- oomInstance\n\t}\n}\n\n\/\/ TestWatcherRecordsEventsForOomEvents ensures that our OomInstances coming\n\/\/ from `StreamOoms` are translated into events in our recorder.\nfunc TestWatcherRecordsEventsForOomEvents(t *testing.T) {\n\toomInstancesToStream := []*oomparser.OomInstance{\n\t\t{\n\t\t\tPid: 1000,\n\t\t\tProcessName: \"fakeProcess\",\n\t\t\tTimeOfDeath: time.Now(),\n\t\t\tContainerName: recordEventContainerName + \"some-container\",\n\t\t\tVictimContainerName: recordEventContainerName,\n\t\t},\n\t}\n\tnumExpectedOomEvents := len(oomInstancesToStream)\n\n\tfakeStreamer := &fakeStreamer{\n\t\toomInstancesToStream: oomInstancesToStream,\n\t}\n\n\tfakeRecorder := record.NewFakeRecorder(numExpectedOomEvents)\n\tnode := &v1.ObjectReference{}\n\n\toomWatcher := &realWatcher{\n\t\trecorder: fakeRecorder,\n\t\toomStreamer: fakeStreamer,\n\t}\n\tassert.NoError(t, oomWatcher.Start(node))\n\n\teventsRecorded := getRecordedEvents(fakeRecorder, numExpectedOomEvents)\n\tassert.Equal(t, numExpectedOomEvents, len(eventsRecorded))\n}\n\nfunc getRecordedEvents(fakeRecorder *record.FakeRecorder, numExpectedOomEvents int) []string {\n\teventsRecorded := []string{}\n\n\tselect {\n\tcase event := <-fakeRecorder.Events:\n\t\teventsRecorded = append(eventsRecorded, event)\n\n\t\tif len(eventsRecorded) == numExpectedOomEvents {\n\t\t\tbreak\n\t\t}\n\tcase <-time.After(10 * time.Second):\n\t\tbreak\n\t}\n\n\treturn eventsRecorded\n}\n\n\/\/ TestWatcherRecordsEventsForOomEventsCorrectContainerName verifies that we\n\/\/ only record OOM events when the container name is the one for which we want\n\/\/ to record events (i.e. \/).\nfunc TestWatcherRecordsEventsForOomEventsCorrectContainerName(t *testing.T) {\n\t\/\/ By \"incorrect\" container name, we mean a container name for which we\n\t\/\/ don't want to record an oom event.\n\tnumOomEventsWithIncorrectContainerName := 1\n\toomInstancesToStream := []*oomparser.OomInstance{\n\t\t{\n\t\t\tPid: 1000,\n\t\t\tProcessName: \"fakeProcess\",\n\t\t\tTimeOfDeath: time.Now(),\n\t\t\tContainerName: recordEventContainerName + \"some-container\",\n\t\t\tVictimContainerName: recordEventContainerName,\n\t\t},\n\t\t{\n\t\t\tPid: 1000,\n\t\t\tProcessName: \"fakeProcess\",\n\t\t\tTimeOfDeath: time.Now(),\n\t\t\tContainerName: recordEventContainerName + \"kubepods\/some-container\",\n\t\t\tVictimContainerName: recordEventContainerName + \"kubepods\",\n\t\t},\n\t}\n\tnumExpectedOomEvents := len(oomInstancesToStream) - numOomEventsWithIncorrectContainerName\n\n\tfakeStreamer := &fakeStreamer{\n\t\toomInstancesToStream: oomInstancesToStream,\n\t}\n\n\tfakeRecorder := record.NewFakeRecorder(numExpectedOomEvents)\n\tnode := &v1.ObjectReference{}\n\n\toomWatcher := &realWatcher{\n\t\trecorder: fakeRecorder,\n\t\toomStreamer: fakeStreamer,\n\t}\n\tassert.NoError(t, oomWatcher.Start(node))\n\n\teventsRecorded := getRecordedEvents(fakeRecorder, numExpectedOomEvents)\n\tassert.Equal(t, numExpectedOomEvents, len(eventsRecorded))\n}\n\n\/\/ TestWatcherRecordsEventsForOomEventsWithAdditionalInfo verifies that our the\n\/\/ emitted event has the proper pid\/process data when appropriate.\nfunc TestWatcherRecordsEventsForOomEventsWithAdditionalInfo(t *testing.T) {\n\t\/\/ The process and event info should appear in the event message.\n\teventPid := 1000\n\tprocessName := \"fakeProcess\"\n\n\toomInstancesToStream := []*oomparser.OomInstance{\n\t\t{\n\t\t\tPid: eventPid,\n\t\t\tProcessName: processName,\n\t\t\tTimeOfDeath: time.Now(),\n\t\t\tContainerName: recordEventContainerName + \"some-container\",\n\t\t\tVictimContainerName: recordEventContainerName,\n\t\t},\n\t}\n\tnumExpectedOomEvents := len(oomInstancesToStream)\n\n\tfakeStreamer := &fakeStreamer{\n\t\toomInstancesToStream: oomInstancesToStream,\n\t}\n\n\tfakeRecorder := record.NewFakeRecorder(numExpectedOomEvents)\n\tnode := &v1.ObjectReference{}\n\n\toomWatcher := &realWatcher{\n\t\trecorder: fakeRecorder,\n\t\toomStreamer: fakeStreamer,\n\t}\n\tassert.NoError(t, oomWatcher.Start(node))\n\n\teventsRecorded := getRecordedEvents(fakeRecorder, numExpectedOomEvents)\n\n\tassert.Equal(t, numExpectedOomEvents, len(eventsRecorded))\n\tassert.Contains(t, eventsRecorded[0], systemOOMEvent)\n\tassert.Contains(t, eventsRecorded[0], fmt.Sprintf(\"pid: %d\", eventPid))\n\tassert.Contains(t, eventsRecorded[0], fmt.Sprintf(\"victim process: %s\", processName))\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2020 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage components\n\nimport (\n\t\"k8s.io\/kops\/pkg\/apis\/kops\"\n\t\"k8s.io\/kops\/pkg\/apis\/kops\/util\"\n\t\"k8s.io\/kops\/upup\/pkg\/fi\"\n\t\"k8s.io\/kops\/upup\/pkg\/fi\/loader\"\n)\n\n\/\/ ClusterAutoscalerOptionsBuilder adds options for cluster autoscaler to the model\ntype ClusterAutoscalerOptionsBuilder struct {\n\t*OptionsContext\n}\n\nvar _ loader.OptionsBuilder = &ClusterAutoscalerOptionsBuilder{}\n\nfunc (b *ClusterAutoscalerOptionsBuilder) BuildOptions(o interface{}) error {\n\tclusterSpec := o.(*kops.ClusterSpec)\n\tcas := clusterSpec.ClusterAutoscaler\n\tif cas == nil || !fi.BoolValue(cas.Enabled) {\n\t\treturn nil\n\t}\n\n\tif cas.Image == nil {\n\n\t\timage := \"registry.k8s.io\/autoscaling\/cluster-autoscaler:latest\"\n\t\tv, err := util.ParseKubernetesVersion(clusterSpec.KubernetesVersion)\n\t\tif err == nil {\n\t\t\tswitch v.Minor {\n\t\t\tcase 23:\n\t\t\t\timage = \"registry.k8s.io\/autoscaling\/cluster-autoscaler:v1.23.0\"\n\t\t\tcase 22:\n\t\t\t\timage = \"registry.k8s.io\/autoscaling\/cluster-autoscaler:v1.22.2\"\n\t\t\tcase 21:\n\t\t\t\timage = \"registry.k8s.io\/autoscaling\/cluster-autoscaler:v1.21.2\"\n\t\t\tcase 20:\n\t\t\t\timage = \"registry.k8s.io\/autoscaling\/cluster-autoscaler:v1.20.1\"\n\t\t\tcase 19:\n\t\t\t\timage = \"registry.k8s.io\/autoscaling\/cluster-autoscaler:v1.19.2\"\n\t\t\t}\n\t\t}\n\t\tcas.Image = fi.String(image)\n\n\t}\n\n\tif cas.Expander == nil {\n\t\tcas.Expander = fi.String(\"random\")\n\t}\n\tif cas.ScaleDownUtilizationThreshold == nil {\n\t\tcas.ScaleDownUtilizationThreshold = fi.String(\"0.5\")\n\t}\n\tif cas.SkipNodesWithLocalStorage == nil {\n\t\tcas.SkipNodesWithLocalStorage = fi.Bool(true)\n\t}\n\tif cas.SkipNodesWithSystemPods == nil {\n\t\tcas.SkipNodesWithSystemPods = fi.Bool(true)\n\t}\n\tif cas.BalanceSimilarNodeGroups == nil {\n\t\tcas.BalanceSimilarNodeGroups = fi.Bool(false)\n\t}\n\tif cas.AWSUseStaticInstanceList == nil {\n\t\tcas.AWSUseStaticInstanceList = fi.Bool(false)\n\t}\n\tif cas.NewPodScaleUpDelay == nil {\n\t\tcas.NewPodScaleUpDelay = fi.String(\"0s\")\n\t}\n\tif cas.ScaleDownDelayAfterAdd == nil {\n\t\tcas.ScaleDownDelayAfterAdd = fi.String(\"10m0s\")\n\t}\n\tif cas.MaxNodeProvisionTime == \"\" {\n\t\tcas.MaxNodeProvisionTime = \"15m0s\"\n\t}\n\n\treturn nil\n}\n<commit_msg>Use Cluster Autoscaler 1.23 for k8s 1.24<commit_after>\/*\nCopyright 2020 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage components\n\nimport (\n\t\"k8s.io\/kops\/pkg\/apis\/kops\"\n\t\"k8s.io\/kops\/pkg\/apis\/kops\/util\"\n\t\"k8s.io\/kops\/upup\/pkg\/fi\"\n\t\"k8s.io\/kops\/upup\/pkg\/fi\/loader\"\n)\n\n\/\/ ClusterAutoscalerOptionsBuilder adds options for cluster autoscaler to the model\ntype ClusterAutoscalerOptionsBuilder struct {\n\t*OptionsContext\n}\n\nvar _ loader.OptionsBuilder = &ClusterAutoscalerOptionsBuilder{}\n\nfunc (b *ClusterAutoscalerOptionsBuilder) BuildOptions(o interface{}) error {\n\tclusterSpec := o.(*kops.ClusterSpec)\n\tcas := clusterSpec.ClusterAutoscaler\n\tif cas == nil || !fi.BoolValue(cas.Enabled) {\n\t\treturn nil\n\t}\n\n\tif cas.Image == nil {\n\n\t\timage := \"registry.k8s.io\/autoscaling\/cluster-autoscaler:latest\"\n\t\tv, err := util.ParseKubernetesVersion(clusterSpec.KubernetesVersion)\n\t\tif err == nil {\n\t\t\tswitch v.Minor {\n\t\t\tcase 24:\n\t\t\t\timage = \"registry.k8s.io\/autoscaling\/cluster-autoscaler:v1.23.0\"\n\t\t\tcase 23:\n\t\t\t\timage = \"registry.k8s.io\/autoscaling\/cluster-autoscaler:v1.23.0\"\n\t\t\tcase 22:\n\t\t\t\timage = \"registry.k8s.io\/autoscaling\/cluster-autoscaler:v1.22.2\"\n\t\t\tcase 21:\n\t\t\t\timage = \"registry.k8s.io\/autoscaling\/cluster-autoscaler:v1.21.2\"\n\t\t\tcase 20:\n\t\t\t\timage = \"registry.k8s.io\/autoscaling\/cluster-autoscaler:v1.20.1\"\n\t\t\tcase 19:\n\t\t\t\timage = \"registry.k8s.io\/autoscaling\/cluster-autoscaler:v1.19.2\"\n\t\t\t}\n\t\t}\n\t\tcas.Image = fi.String(image)\n\n\t}\n\n\tif cas.Expander == nil {\n\t\tcas.Expander = fi.String(\"random\")\n\t}\n\tif cas.ScaleDownUtilizationThreshold == nil {\n\t\tcas.ScaleDownUtilizationThreshold = fi.String(\"0.5\")\n\t}\n\tif cas.SkipNodesWithLocalStorage == nil {\n\t\tcas.SkipNodesWithLocalStorage = fi.Bool(true)\n\t}\n\tif cas.SkipNodesWithSystemPods == nil {\n\t\tcas.SkipNodesWithSystemPods = fi.Bool(true)\n\t}\n\tif cas.BalanceSimilarNodeGroups == nil {\n\t\tcas.BalanceSimilarNodeGroups = fi.Bool(false)\n\t}\n\tif cas.AWSUseStaticInstanceList == nil {\n\t\tcas.AWSUseStaticInstanceList = fi.Bool(false)\n\t}\n\tif cas.NewPodScaleUpDelay == nil {\n\t\tcas.NewPodScaleUpDelay = fi.String(\"0s\")\n\t}\n\tif cas.ScaleDownDelayAfterAdd == nil {\n\t\tcas.ScaleDownDelayAfterAdd = fi.String(\"10m0s\")\n\t}\n\tif cas.MaxNodeProvisionTime == \"\" {\n\t\tcas.MaxNodeProvisionTime = \"15m0s\"\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package handler\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/auth\/authz\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/auth\/authz\/policy\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/db\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/handler\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/inject\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/server\"\n\trecordGear \"github.com\/skygeario\/skygear-server\/pkg\/record\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/record\/dependency\/record\"\n)\n\nfunc AttachFieldAccessUpdateHandler(\n\tserver *server.Server,\n\trecordDependency recordGear.DependencyMap,\n) *server.Server {\n\tserver.Handle(\"\/schema\/field_access\/update\", &FieldAccessUpdateHandlerFactory{\n\t\trecordDependency,\n\t}).Methods(\"POST\")\n\treturn server\n}\n\ntype FieldAccessUpdateHandlerFactory struct {\n\tDependency recordGear.DependencyMap\n}\n\nfunc (f FieldAccessUpdateHandlerFactory) NewHandler(request *http.Request) http.Handler {\n\th := &FieldAccessUpdateHandler{}\n\tinject.DefaultInject(h, f.Dependency, request)\n\treturn handler.APIHandlerToHandler(h, h.TxContext)\n}\n\nfunc (f FieldAccessUpdateHandlerFactory) ProvideAuthzPolicy() authz.Policy {\n\treturn policy.AllOf(\n\t\tauthz.PolicyFunc(policy.RequireMasterKey),\n\t)\n}\n\ntype FieldAccessUpdateRequestPayload struct {\n}\n\nfunc (s FieldAccessUpdateRequestPayload) Validate() error {\n\treturn nil\n}\n\n\/*\nFieldAccessUpdateHandler fetches the entire Field ACL settings.\ncurl -X POST -H \"Content-Type: application\/json\" \\\n -d @- http:\/\/localhost:3000\/schema\/field_access\/update <<EOF\n{\n\t\"access\": [\n\t\t{\n\t\t\t\"record_type\":\"note\",\n\t\t\t\"record_field\":\"content\",\n\t\t\t\"user_role\":\"_user_id:johndoe\",\n\t\t\t\"writable\":false,\n\t\t\t\"readable\":true,\n\t\t\t\"comparable\":false,\n\t\t\t\"discoverable\":false\n\t\t}\n\t]\n}\nEOF\n*\/\ntype FieldAccessUpdateHandler struct {\n\tTxContext db.TxContext `dependency:\"TxContext\"`\n\tRecordStore record.Store `dependency:\"RecordStore\"`\n\tLogger *logrus.Entry `dependency:\"HandlerLogger\"`\n}\n\nfunc (h FieldAccessUpdateHandler) WithTx() bool {\n\treturn true\n}\n\nfunc (h FieldAccessUpdateHandler) DecodeRequest(request *http.Request) (handler.RequestPayload, error) {\n\tpayload := FieldAccessUpdateRequestPayload{}\n\tif err := json.NewDecoder(request.Body).Decode(&payload); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn payload, nil\n}\n\nfunc (h FieldAccessUpdateHandler) Handle(req interface{}) (resp interface{}, err error) {\n\treturn\n}\n<commit_msg>Decode and validate field acl update req payload<commit_after>package handler\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/auth\/authz\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/auth\/authz\/policy\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/db\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/handler\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/inject\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/server\"\n\trecordGear \"github.com\/skygeario\/skygear-server\/pkg\/record\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/record\/dependency\/record\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/record\/dependency\/recordconv\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/server\/skyerr\"\n)\n\nfunc AttachFieldAccessUpdateHandler(\n\tserver *server.Server,\n\trecordDependency recordGear.DependencyMap,\n) *server.Server {\n\tserver.Handle(\"\/schema\/field_access\/update\", &FieldAccessUpdateHandlerFactory{\n\t\trecordDependency,\n\t}).Methods(\"POST\")\n\treturn server\n}\n\ntype FieldAccessUpdateHandlerFactory struct {\n\tDependency recordGear.DependencyMap\n}\n\nfunc (f FieldAccessUpdateHandlerFactory) NewHandler(request *http.Request) http.Handler {\n\th := &FieldAccessUpdateHandler{}\n\tinject.DefaultInject(h, f.Dependency, request)\n\treturn handler.APIHandlerToHandler(h, h.TxContext)\n}\n\nfunc (f FieldAccessUpdateHandlerFactory) ProvideAuthzPolicy() authz.Policy {\n\treturn policy.AllOf(\n\t\tauthz.PolicyFunc(policy.RequireMasterKey),\n\t)\n}\n\ntype FieldAccessUpdateRequestPayload struct {\n\tRawAccess []map[string]interface{} `json:\"access\"`\n\tFieldACL record.FieldACL\n}\n\nfunc (p FieldAccessUpdateRequestPayload) Validate() error {\n\treturn nil\n}\n\n\/*\nFieldAccessUpdateHandler fetches the entire Field ACL settings.\ncurl -X POST -H \"Content-Type: application\/json\" \\\n -d @- http:\/\/localhost:3000\/schema\/field_access\/update <<EOF\n{\n\t\"access\": [\n\t\t{\n\t\t\t\"record_type\":\"note\",\n\t\t\t\"record_field\":\"content\",\n\t\t\t\"user_role\":\"_user_id:johndoe\",\n\t\t\t\"writable\":false,\n\t\t\t\"readable\":true,\n\t\t\t\"comparable\":false,\n\t\t\t\"discoverable\":false\n\t\t}\n\t]\n}\nEOF\n*\/\ntype FieldAccessUpdateHandler struct {\n\tTxContext db.TxContext `dependency:\"TxContext\"`\n\tRecordStore record.Store `dependency:\"RecordStore\"`\n\tLogger *logrus.Entry `dependency:\"HandlerLogger\"`\n}\n\nfunc (h FieldAccessUpdateHandler) WithTx() bool {\n\treturn true\n}\n\nfunc (h FieldAccessUpdateHandler) DecodeRequest(request *http.Request) (handler.RequestPayload, error) {\n\tpayload := FieldAccessUpdateRequestPayload{}\n\tif err := json.NewDecoder(request.Body).Decode(&payload); err != nil {\n\t\treturn nil, err\n\t}\n\n\tentries := record.FieldACLEntryList{}\n\tfor _, v := range payload.RawAccess {\n\t\tace := record.FieldACLEntry{}\n\t\tif err := (*recordconv.MapFieldACLEntry)(&ace).FromMap(v); err != nil {\n\t\t\treturn nil, skyerr.NewInvalidArgument(\"invalid access entry\", []string{\"access\"})\n\t\t}\n\t\tentries = append(entries, ace)\n\t}\n\n\tpayload.FieldACL = record.NewFieldACL(entries)\n\n\treturn payload, nil\n}\n\nfunc (h FieldAccessUpdateHandler) Handle(req interface{}) (resp interface{}, err error) {\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build integration\n\npackage sqlstore\n\nimport (\n\t\"testing\"\n\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n\n\t\"github.com\/grafana\/grafana\/pkg\/models\"\n\t\"github.com\/grafana\/grafana\/pkg\/setting\"\n)\n\nfunc TestPreferencesDataAccess(t *testing.T) {\n\tConvey(\"Testing preferences data access\", t, func() {\n\t\tss := InitTestDB(t)\n\n\t\tConvey(\"GetPreferencesWithDefaults with no saved preferences should return defaults\", func() {\n\t\t\tsetting.DefaultTheme = \"light\"\n\t\t\tss.Cfg.DateFormats.DefaultTimezone = \"UTC\"\n\n\t\t\tquery := &models.GetPreferencesWithDefaultsQuery{User: &models.SignedInUser{}}\n\t\t\terr := ss.GetPreferencesWithDefaults(query)\n\t\t\tSo(err, ShouldBeNil)\n\t\t\tSo(query.Result.Theme, ShouldEqual, \"light\")\n\t\t\tSo(query.Result.Timezone, ShouldEqual, \"UTC\")\n\t\t\tSo(query.Result.HomeDashboardId, ShouldEqual, 0)\n\t\t})\n\n\t\tConvey(\"GetPreferencesWithDefaults with saved org and user home dashboard should return user home dashboard\", func() {\n\t\t\terr := SavePreferences(&models.SavePreferencesCommand{OrgId: 1, HomeDashboardId: 1})\n\t\t\tSo(err, ShouldBeNil)\n\t\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, UserId: 1, HomeDashboardId: 4})\n\t\t\tSo(err, ShouldBeNil)\n\n\t\t\tquery := &models.GetPreferencesWithDefaultsQuery{User: &models.SignedInUser{OrgId: 1, UserId: 1}}\n\t\t\terr = ss.GetPreferencesWithDefaults(query)\n\t\t\tSo(err, ShouldBeNil)\n\t\t\tSo(query.Result.HomeDashboardId, ShouldEqual, 4)\n\t\t})\n\n\t\tConvey(\"GetPreferencesWithDefaults with saved org and other user home dashboard should return org home dashboard\", func() {\n\t\t\terr := SavePreferences(&models.SavePreferencesCommand{OrgId: 1, HomeDashboardId: 1})\n\t\t\tSo(err, ShouldBeNil)\n\t\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, UserId: 1, HomeDashboardId: 4})\n\t\t\tSo(err, ShouldBeNil)\n\n\t\t\tquery := &models.GetPreferencesWithDefaultsQuery{User: &models.SignedInUser{OrgId: 1, UserId: 2}}\n\t\t\terr = ss.GetPreferencesWithDefaults(query)\n\t\t\tSo(err, ShouldBeNil)\n\t\t\tSo(query.Result.HomeDashboardId, ShouldEqual, 1)\n\t\t})\n\n\t\tConvey(\"GetPreferencesWithDefaults with saved org and teams home dashboard should return last team home dashboard\", func() {\n\t\t\terr := SavePreferences(&models.SavePreferencesCommand{OrgId: 1, HomeDashboardId: 1})\n\t\t\tSo(err, ShouldBeNil)\n\t\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, TeamId: 2, HomeDashboardId: 2})\n\t\t\tSo(err, ShouldBeNil)\n\t\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, TeamId: 3, HomeDashboardId: 3})\n\t\t\tSo(err, ShouldBeNil)\n\n\t\t\tquery := &models.GetPreferencesWithDefaultsQuery{\n\t\t\t\tUser: &models.SignedInUser{OrgId: 1, Teams: []int64{2, 3}},\n\t\t\t}\n\t\t\terr = ss.GetPreferencesWithDefaults(query)\n\t\t\tSo(err, ShouldBeNil)\n\t\t\tSo(query.Result.HomeDashboardId, ShouldEqual, 3)\n\t\t})\n\n\t\tConvey(\"GetPreferencesWithDefaults with saved org and other teams home dashboard should return org home dashboard\", func() {\n\t\t\terr := SavePreferences(&models.SavePreferencesCommand{OrgId: 1, HomeDashboardId: 1})\n\t\t\tSo(err, ShouldBeNil)\n\t\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, TeamId: 2, HomeDashboardId: 2})\n\t\t\tSo(err, ShouldBeNil)\n\t\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, TeamId: 3, HomeDashboardId: 3})\n\t\t\tSo(err, ShouldBeNil)\n\n\t\t\tquery := &models.GetPreferencesWithDefaultsQuery{User: &models.SignedInUser{OrgId: 1}}\n\t\t\terr = ss.GetPreferencesWithDefaults(query)\n\t\t\tSo(err, ShouldBeNil)\n\t\t\tSo(query.Result.HomeDashboardId, ShouldEqual, 1)\n\t\t})\n\n\t\tConvey(\"GetPreferencesWithDefaults with saved org, teams and user home dashboard should return user home dashboard\", func() {\n\t\t\terr := SavePreferences(&models.SavePreferencesCommand{OrgId: 1, HomeDashboardId: 1})\n\t\t\tSo(err, ShouldBeNil)\n\t\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, TeamId: 2, HomeDashboardId: 2})\n\t\t\tSo(err, ShouldBeNil)\n\t\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, TeamId: 3, HomeDashboardId: 3})\n\t\t\tSo(err, ShouldBeNil)\n\t\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, UserId: 1, HomeDashboardId: 4})\n\t\t\tSo(err, ShouldBeNil)\n\n\t\t\tquery := &models.GetPreferencesWithDefaultsQuery{\n\t\t\t\tUser: &models.SignedInUser{OrgId: 1, UserId: 1, Teams: []int64{2, 3}},\n\t\t\t}\n\t\t\terr = ss.GetPreferencesWithDefaults(query)\n\t\t\tSo(err, ShouldBeNil)\n\t\t\tSo(query.Result.HomeDashboardId, ShouldEqual, 4)\n\t\t})\n\n\t\tConvey(\"GetPreferencesWithDefaults with saved org, other teams and user home dashboard should return org home dashboard\", func() {\n\t\t\terr := SavePreferences(&models.SavePreferencesCommand{OrgId: 1, HomeDashboardId: 1})\n\t\t\tSo(err, ShouldBeNil)\n\t\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, TeamId: 2, HomeDashboardId: 2})\n\t\t\tSo(err, ShouldBeNil)\n\t\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, TeamId: 3, HomeDashboardId: 3})\n\t\t\tSo(err, ShouldBeNil)\n\t\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, UserId: 1, HomeDashboardId: 4})\n\t\t\tSo(err, ShouldBeNil)\n\n\t\t\tquery := &models.GetPreferencesWithDefaultsQuery{\n\t\t\t\tUser: &models.SignedInUser{OrgId: 1, UserId: 2},\n\t\t\t}\n\t\t\terr = ss.GetPreferencesWithDefaults(query)\n\t\t\tSo(err, ShouldBeNil)\n\t\t\tSo(query.Result.HomeDashboardId, ShouldEqual, 1)\n\t\t})\n\t})\n}\n<commit_msg>Chore: Rewrite preferences test from GoConvey to stdlib and testify (#29129)<commit_after>\/\/ +build integration\n\npackage sqlstore\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/grafana\/grafana\/pkg\/models\"\n\t\"github.com\/grafana\/grafana\/pkg\/setting\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestPreferencesDataAccess(t *testing.T) {\n\tss := InitTestDB(t)\n\n\tt.Run(\"GetPreferencesWithDefaults with no saved preferences should return defaults\", func(t *testing.T) {\n\t\tsetting.DefaultTheme = \"light\"\n\t\tss.Cfg.DateFormats.DefaultTimezone = \"UTC\"\n\n\t\tquery := &models.GetPreferencesWithDefaultsQuery{User: &models.SignedInUser{}}\n\t\terr := ss.GetPreferencesWithDefaults(query)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, \"light\", query.Result.Theme)\n\t\trequire.Equal(t, \"UTC\", query.Result.Timezone)\n\t\trequire.Equal(t, int64(0), query.Result.HomeDashboardId)\n\t})\n\n\tt.Run(\"GetPreferencesWithDefaults with saved org and user home dashboard should return user home dashboard\", func(t *testing.T) {\n\t\terr := SavePreferences(&models.SavePreferencesCommand{OrgId: 1, HomeDashboardId: 1})\n\t\trequire.NoError(t, err)\n\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, UserId: 1, HomeDashboardId: 4})\n\t\trequire.NoError(t, err)\n\n\t\tquery := &models.GetPreferencesWithDefaultsQuery{User: &models.SignedInUser{OrgId: 1, UserId: 1}}\n\t\terr = ss.GetPreferencesWithDefaults(query)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, int64(4), query.Result.HomeDashboardId)\n\t})\n\n\tt.Run(\"GetPreferencesWithDefaults with saved org and other user home dashboard should return org home dashboard\", func(t *testing.T) {\n\t\terr := SavePreferences(&models.SavePreferencesCommand{OrgId: 1, HomeDashboardId: 1})\n\t\trequire.NoError(t, err)\n\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, UserId: 1, HomeDashboardId: 4})\n\t\trequire.NoError(t, err)\n\n\t\tquery := &models.GetPreferencesWithDefaultsQuery{User: &models.SignedInUser{OrgId: 1, UserId: 2}}\n\t\terr = ss.GetPreferencesWithDefaults(query)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, int64(1), query.Result.HomeDashboardId)\n\t})\n\n\tt.Run(\"GetPreferencesWithDefaults with saved org and teams home dashboard should return last team home dashboard\", func(t *testing.T) {\n\t\terr := SavePreferences(&models.SavePreferencesCommand{OrgId: 1, HomeDashboardId: 1})\n\t\trequire.NoError(t, err)\n\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, TeamId: 2, HomeDashboardId: 2})\n\t\trequire.NoError(t, err)\n\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, TeamId: 3, HomeDashboardId: 3})\n\t\trequire.NoError(t, err)\n\n\t\tquery := &models.GetPreferencesWithDefaultsQuery{\n\t\t\tUser: &models.SignedInUser{OrgId: 1, Teams: []int64{2, 3}},\n\t\t}\n\t\terr = ss.GetPreferencesWithDefaults(query)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, int64(3), query.Result.HomeDashboardId)\n\t})\n\n\tt.Run(\"GetPreferencesWithDefaults with saved org and other teams home dashboard should return org home dashboard\", func(t *testing.T) {\n\t\terr := SavePreferences(&models.SavePreferencesCommand{OrgId: 1, HomeDashboardId: 1})\n\t\trequire.NoError(t, err)\n\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, TeamId: 2, HomeDashboardId: 2})\n\t\trequire.NoError(t, err)\n\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, TeamId: 3, HomeDashboardId: 3})\n\t\trequire.NoError(t, err)\n\n\t\tquery := &models.GetPreferencesWithDefaultsQuery{User: &models.SignedInUser{OrgId: 1}}\n\t\terr = ss.GetPreferencesWithDefaults(query)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, int64(1), query.Result.HomeDashboardId)\n\t})\n\n\tt.Run(\"GetPreferencesWithDefaults with saved org, teams and user home dashboard should return user home dashboard\", func(t *testing.T) {\n\t\terr := SavePreferences(&models.SavePreferencesCommand{OrgId: 1, HomeDashboardId: 1})\n\t\trequire.NoError(t, err)\n\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, TeamId: 2, HomeDashboardId: 2})\n\t\trequire.NoError(t, err)\n\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, TeamId: 3, HomeDashboardId: 3})\n\t\trequire.NoError(t, err)\n\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, UserId: 1, HomeDashboardId: 4})\n\t\trequire.NoError(t, err)\n\n\t\tquery := &models.GetPreferencesWithDefaultsQuery{\n\t\t\tUser: &models.SignedInUser{OrgId: 1, UserId: 1, Teams: []int64{2, 3}},\n\t\t}\n\t\terr = ss.GetPreferencesWithDefaults(query)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, int64(4), query.Result.HomeDashboardId)\n\t})\n\n\tt.Run(\"GetPreferencesWithDefaults with saved org, other teams and user home dashboard should return org home dashboard\", func(t *testing.T) {\n\t\terr := SavePreferences(&models.SavePreferencesCommand{OrgId: 1, HomeDashboardId: 1})\n\t\trequire.NoError(t, err)\n\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, TeamId: 2, HomeDashboardId: 2})\n\t\trequire.NoError(t, err)\n\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, TeamId: 3, HomeDashboardId: 3})\n\t\trequire.NoError(t, err)\n\t\terr = SavePreferences(&models.SavePreferencesCommand{OrgId: 1, UserId: 1, HomeDashboardId: 4})\n\t\trequire.NoError(t, err)\n\n\t\tquery := &models.GetPreferencesWithDefaultsQuery{\n\t\t\tUser: &models.SignedInUser{OrgId: 1, UserId: 2},\n\t\t}\n\t\terr = ss.GetPreferencesWithDefaults(query)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, int64(1), query.Result.HomeDashboardId)\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package appId\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/vault\/logical\"\n\tlogicaltest \"github.com\/hashicorp\/vault\/logical\/testing\"\n)\n\nfunc TestBackend_basic(t *testing.T) {\n\tlogicaltest.Test(t, logicaltest.TestCase{\n\t\tFactory: Factory,\n\t\tSteps: []logicaltest.TestStep{\n\t\t\ttestAccStepMapAppId(t),\n\t\t\ttestAccStepMapUserId(t),\n\t\t\ttestAccLogin(t, \"\"),\n\t\t\ttestAccLoginInvalid(t),\n\t\t\ttestAccStepDeleteUserId(t),\n\t\t\ttestAccLoginDeleted(t),\n\t\t},\n\t})\n}\n\nfunc TestBackend_cidr(t *testing.T) {\n\tlogicaltest.Test(t, logicaltest.TestCase{\n\t\tFactory: Factory,\n\t\tSteps: []logicaltest.TestStep{\n\t\t\ttestAccStepMapAppIdDisplayName(t),\n\t\t\ttestAccStepMapUserIdCidr(t, \"192.168.1.0\/16\"),\n\t\t\ttestAccLoginCidr(t, \"192.168.1.5\", false),\n\t\t\ttestAccLoginCidr(t, \"10.0.1.5\", true),\n\t\t\ttestAccLoginCidr(t, \"\", true),\n\t\t},\n\t})\n}\n\nfunc TestBackend_displayName(t *testing.T) {\n\tlogicaltest.Test(t, logicaltest.TestCase{\n\t\tFactory: Factory,\n\t\tSteps: []logicaltest.TestStep{\n\t\t\ttestAccStepMapAppIdDisplayName(t),\n\t\t\ttestAccStepMapUserId(t),\n\t\t\ttestAccLogin(t, \"tubbin\"),\n\t\t\ttestAccLoginInvalid(t),\n\t\t\ttestAccStepDeleteUserId(t),\n\t\t\ttestAccLoginDeleted(t),\n\t\t},\n\t})\n}\n\nfunc testAccStepMapAppId(t *testing.T) logicaltest.TestStep {\n\treturn logicaltest.TestStep{\n\t\tOperation: logical.WriteOperation,\n\t\tPath: \"map\/app-id\/foo\",\n\t\tData: map[string]interface{}{\n\t\t\t\"value\": \"foo,bar\",\n\t\t},\n\t}\n}\n\nfunc testAccStepMapAppIdDisplayName(t *testing.T) logicaltest.TestStep {\n\treturn logicaltest.TestStep{\n\t\tOperation: logical.WriteOperation,\n\t\tPath: \"map\/app-id\/foo\",\n\t\tData: map[string]interface{}{\n\t\t\t\"display_name\": \"tubbin\",\n\t\t\t\"value\": \"foo,bar\",\n\t\t},\n\t}\n}\n\nfunc testAccStepMapUserId(t *testing.T) logicaltest.TestStep {\n\treturn logicaltest.TestStep{\n\t\tOperation: logical.WriteOperation,\n\t\tPath: \"map\/user-id\/42\",\n\t\tData: map[string]interface{}{\n\t\t\t\"value\": \"foo\",\n\t\t},\n\t}\n}\n\nfunc testAccStepDeleteUserId(t *testing.T) logicaltest.TestStep {\n\treturn logicaltest.TestStep{\n\t\tOperation: logical.DeleteOperation,\n\t\tPath: \"map\/user-id\/42\",\n\t}\n}\n\nfunc testAccStepMapUserIdCidr(t *testing.T, cidr string) logicaltest.TestStep {\n\treturn logicaltest.TestStep{\n\t\tOperation: logical.WriteOperation,\n\t\tPath: \"map\/user-id\/42\",\n\t\tData: map[string]interface{}{\n\t\t\t\"value\": \"foo\",\n\t\t\t\"cidr_block\": cidr,\n\t\t},\n\t}\n}\n\nfunc testAccLogin(t *testing.T, display string) logicaltest.TestStep {\n\treturn logicaltest.TestStep{\n\t\tOperation: logical.WriteOperation,\n\t\tPath: \"login\",\n\t\tData: map[string]interface{}{\n\t\t\t\"app_id\": \"foo\",\n\t\t\t\"user_id\": \"42\",\n\t\t},\n\t\tUnauthenticated: true,\n\n\t\tCheck: logicaltest.TestCheckMulti(\n\t\t\tlogicaltest.TestCheckAuth([]string{\"bar\", \"foo\"}),\n\t\t\tlogicaltest.TestCheckAuthDisplayName(display),\n\t\t),\n\t}\n}\n\nfunc testAccLoginCidr(t *testing.T, ip string, err bool) logicaltest.TestStep {\n\tcheck := logicaltest.TestCheckError()\n\tif !err {\n\t\tcheck = logicaltest.TestCheckAuth([]string{\"bar\", \"foo\"})\n\t}\n\n\treturn logicaltest.TestStep{\n\t\tOperation: logical.WriteOperation,\n\t\tPath: \"login\",\n\t\tData: map[string]interface{}{\n\t\t\t\"app_id\": \"foo\",\n\t\t\t\"user_id\": \"42\",\n\t\t},\n\t\tErrorOk: err,\n\t\tUnauthenticated: true,\n\t\tRemoteAddr: ip,\n\n\t\tCheck: check,\n\t}\n}\n\nfunc testAccLoginInvalid(t *testing.T) logicaltest.TestStep {\n\treturn logicaltest.TestStep{\n\t\tOperation: logical.WriteOperation,\n\t\tPath: \"login\",\n\t\tData: map[string]interface{}{\n\t\t\t\"app_id\": \"foo\",\n\t\t\t\"user_id\": \"48\",\n\t\t},\n\t\tErrorOk: true,\n\t\tUnauthenticated: true,\n\n\t\tCheck: logicaltest.TestCheckError(),\n\t}\n}\n\nfunc testAccLoginDeleted(t *testing.T) logicaltest.TestStep {\n\treturn logicaltest.TestStep{\n\t\tOperation: logical.WriteOperation,\n\t\tPath: \"login\",\n\t\tData: map[string]interface{}{\n\t\t\t\"app_id\": \"foo\",\n\t\t\t\"user_id\": \"42\",\n\t\t},\n\t\tErrorOk: true,\n\t\tUnauthenticated: true,\n\n\t\tCheck: logicaltest.TestCheckError(),\n\t}\n}\n<commit_msg>cred\/app-id: testing upgrade to salted keys<commit_after>package appId\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/vault\/logical\"\n\tlogicaltest \"github.com\/hashicorp\/vault\/logical\/testing\"\n)\n\nfunc TestBackend_basic(t *testing.T) {\n\tlogicaltest.Test(t, logicaltest.TestCase{\n\t\tFactory: Factory,\n\t\tSteps: []logicaltest.TestStep{\n\t\t\ttestAccStepMapAppId(t),\n\t\t\ttestAccStepMapUserId(t),\n\t\t\ttestAccLogin(t, \"\"),\n\t\t\ttestAccLoginInvalid(t),\n\t\t\ttestAccStepDeleteUserId(t),\n\t\t\ttestAccLoginDeleted(t),\n\t\t},\n\t})\n}\n\nfunc TestBackend_cidr(t *testing.T) {\n\tlogicaltest.Test(t, logicaltest.TestCase{\n\t\tFactory: Factory,\n\t\tSteps: []logicaltest.TestStep{\n\t\t\ttestAccStepMapAppIdDisplayName(t),\n\t\t\ttestAccStepMapUserIdCidr(t, \"192.168.1.0\/16\"),\n\t\t\ttestAccLoginCidr(t, \"192.168.1.5\", false),\n\t\t\ttestAccLoginCidr(t, \"10.0.1.5\", true),\n\t\t\ttestAccLoginCidr(t, \"\", true),\n\t\t},\n\t})\n}\n\nfunc TestBackend_displayName(t *testing.T) {\n\tlogicaltest.Test(t, logicaltest.TestCase{\n\t\tFactory: Factory,\n\t\tSteps: []logicaltest.TestStep{\n\t\t\ttestAccStepMapAppIdDisplayName(t),\n\t\t\ttestAccStepMapUserId(t),\n\t\t\ttestAccLogin(t, \"tubbin\"),\n\t\t\ttestAccLoginInvalid(t),\n\t\t\ttestAccStepDeleteUserId(t),\n\t\t\ttestAccLoginDeleted(t),\n\t\t},\n\t})\n}\n\n\/\/ Verify that we are able to update from non-salted (<0.2) to\n\/\/ using a Salt for the paths\nfunc TestBackend_upgradeToSalted(t *testing.T) {\n\tinm := new(logical.InmemStorage)\n\n\t\/\/ Create some fake keys\n\tse, _ := logical.StorageEntryJSON(\"struct\/map\/app-id\/foo\",\n\t\tmap[string]string{\"value\": \"test\"})\n\tinm.Put(se)\n\tse, _ = logical.StorageEntryJSON(\"struct\/map\/user-id\/bar\",\n\t\tmap[string]string{\"value\": \"foo\"})\n\tinm.Put(se)\n\n\t\/\/ Initialize the backend, this should do the automatic upgrade\n\tconf := &logical.BackendConfig{\n\t\tView: inm,\n\t}\n\tbackend, err := Factory(conf)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\n\t\/\/ Check the keys have been upgraded\n\tout, err := inm.Get(\"struct\/map\/app-id\/foo\")\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif out != nil {\n\t\tt.Fatalf(\"unexpected key\")\n\t}\n\tout, err = inm.Get(\"struct\/map\/user-id\/bar\")\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif out != nil {\n\t\tt.Fatalf(\"unexpected key\")\n\t}\n\n\t\/\/ Backend should still be able to resolve\n\treq := logical.TestRequest(t, logical.ReadOperation, \"map\/app-id\/foo\")\n\treq.Storage = inm\n\tresp, err := backend.HandleRequest(req)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif resp.Data[\"value\"] != \"test\" {\n\t\tt.Fatalf(\"bad: %#v\", resp)\n\t}\n\n\treq = logical.TestRequest(t, logical.ReadOperation, \"map\/user-id\/bar\")\n\treq.Storage = inm\n\tresp, err = backend.HandleRequest(req)\n\tif err != nil {\n\t\tt.Fatalf(\"err: %v\", err)\n\t}\n\tif resp.Data[\"value\"] != \"foo\" {\n\t\tt.Fatalf(\"bad: %#v\", resp)\n\t}\n}\n\nfunc testAccStepMapAppId(t *testing.T) logicaltest.TestStep {\n\treturn logicaltest.TestStep{\n\t\tOperation: logical.WriteOperation,\n\t\tPath: \"map\/app-id\/foo\",\n\t\tData: map[string]interface{}{\n\t\t\t\"value\": \"foo,bar\",\n\t\t},\n\t}\n}\n\nfunc testAccStepMapAppIdDisplayName(t *testing.T) logicaltest.TestStep {\n\treturn logicaltest.TestStep{\n\t\tOperation: logical.WriteOperation,\n\t\tPath: \"map\/app-id\/foo\",\n\t\tData: map[string]interface{}{\n\t\t\t\"display_name\": \"tubbin\",\n\t\t\t\"value\": \"foo,bar\",\n\t\t},\n\t}\n}\n\nfunc testAccStepMapUserId(t *testing.T) logicaltest.TestStep {\n\treturn logicaltest.TestStep{\n\t\tOperation: logical.WriteOperation,\n\t\tPath: \"map\/user-id\/42\",\n\t\tData: map[string]interface{}{\n\t\t\t\"value\": \"foo\",\n\t\t},\n\t}\n}\n\nfunc testAccStepDeleteUserId(t *testing.T) logicaltest.TestStep {\n\treturn logicaltest.TestStep{\n\t\tOperation: logical.DeleteOperation,\n\t\tPath: \"map\/user-id\/42\",\n\t}\n}\n\nfunc testAccStepMapUserIdCidr(t *testing.T, cidr string) logicaltest.TestStep {\n\treturn logicaltest.TestStep{\n\t\tOperation: logical.WriteOperation,\n\t\tPath: \"map\/user-id\/42\",\n\t\tData: map[string]interface{}{\n\t\t\t\"value\": \"foo\",\n\t\t\t\"cidr_block\": cidr,\n\t\t},\n\t}\n}\n\nfunc testAccLogin(t *testing.T, display string) logicaltest.TestStep {\n\treturn logicaltest.TestStep{\n\t\tOperation: logical.WriteOperation,\n\t\tPath: \"login\",\n\t\tData: map[string]interface{}{\n\t\t\t\"app_id\": \"foo\",\n\t\t\t\"user_id\": \"42\",\n\t\t},\n\t\tUnauthenticated: true,\n\n\t\tCheck: logicaltest.TestCheckMulti(\n\t\t\tlogicaltest.TestCheckAuth([]string{\"bar\", \"foo\"}),\n\t\t\tlogicaltest.TestCheckAuthDisplayName(display),\n\t\t),\n\t}\n}\n\nfunc testAccLoginCidr(t *testing.T, ip string, err bool) logicaltest.TestStep {\n\tcheck := logicaltest.TestCheckError()\n\tif !err {\n\t\tcheck = logicaltest.TestCheckAuth([]string{\"bar\", \"foo\"})\n\t}\n\n\treturn logicaltest.TestStep{\n\t\tOperation: logical.WriteOperation,\n\t\tPath: \"login\",\n\t\tData: map[string]interface{}{\n\t\t\t\"app_id\": \"foo\",\n\t\t\t\"user_id\": \"42\",\n\t\t},\n\t\tErrorOk: err,\n\t\tUnauthenticated: true,\n\t\tRemoteAddr: ip,\n\n\t\tCheck: check,\n\t}\n}\n\nfunc testAccLoginInvalid(t *testing.T) logicaltest.TestStep {\n\treturn logicaltest.TestStep{\n\t\tOperation: logical.WriteOperation,\n\t\tPath: \"login\",\n\t\tData: map[string]interface{}{\n\t\t\t\"app_id\": \"foo\",\n\t\t\t\"user_id\": \"48\",\n\t\t},\n\t\tErrorOk: true,\n\t\tUnauthenticated: true,\n\n\t\tCheck: logicaltest.TestCheckError(),\n\t}\n}\n\nfunc testAccLoginDeleted(t *testing.T) logicaltest.TestStep {\n\treturn logicaltest.TestStep{\n\t\tOperation: logical.WriteOperation,\n\t\tPath: \"login\",\n\t\tData: map[string]interface{}{\n\t\t\t\"app_id\": \"foo\",\n\t\t\t\"user_id\": \"42\",\n\t\t},\n\t\tErrorOk: true,\n\t\tUnauthenticated: true,\n\n\t\tCheck: logicaltest.TestCheckError(),\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tstreamDir = \"stream\"\n)\n\nvar transcoded map[string]bool = make(map[string]bool)\n\ntype song struct {\n\tId string `json:\"id\"`\n\tPath string `json:\"path\"`\n\tTags map[string]string `json:\"tags\"`\n}\n\ntype river struct {\n\tSongs map[string]*song `json:\"songs\"`\n\tLibrary string `json:\"library\"`\n\tpassword string\n\tport uint16\n\tconvCmd string\n\tprobeCmd string\n}\n\nfunc chooseCmd(a string, b string) (string, error) {\n\tif _, err := exec.LookPath(a); err != nil {\n\t\tif _, err := exec.LookPath(b); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"could not find %s or %s executable\", a, b)\n\t\t}\n\t\treturn b, nil\n\t}\n\treturn a, nil\n}\n\nfunc (s *song) readTags(container map[string]interface{}) {\n\ttagsRaw, ok := container[\"tags\"]\n\tif !ok {\n\t\treturn\n\t}\n\tfor key, value := range tagsRaw.(map[string]interface{}) {\n\t\ts.Tags[key] = value.(string)\n\t}\n}\n\nfunc (r river) newSong(relPath string) (s *song, err error) {\n\tabsPath := path.Join(r.Library, relPath)\n\tcmd := exec.Command(r.probeCmd,\n\t\t\"-print_format\", \"json\",\n\t\t\"-show_streams\",\n\t\tabsPath)\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn\n\t}\n\tif err = cmd.Start(); err != nil {\n\t\treturn\n\t}\n\tvar streams struct {\n\t\tStreams []map[string]interface{}\n\t}\n\tif err = json.NewDecoder(stdout).Decode(&streams); err != nil {\n\t\treturn\n\t}\n\tif err = cmd.Wait(); err != nil {\n\t\treturn\n\t}\n\taudio := false\n\tfor _, stream := range streams.Streams {\n\t\tif stream[\"codec_type\"] == \"audio\" {\n\t\t\taudio = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !audio {\n\t\treturn nil, fmt.Errorf(\"'%s' does not contain an audio stream\", relPath)\n\t}\n\tcmd = exec.Command(r.probeCmd,\n\t\t\"-print_format\", \"json\",\n\t\t\"-show_format\",\n\t\tabsPath)\n\tif stdout, err = cmd.StdoutPipe(); err != nil {\n\t\treturn\n\t}\n\tif err = cmd.Start(); err != nil {\n\t\treturn\n\t}\n\tvar format struct {\n\t\tFormat map[string]interface{}\n\t}\n\tif err = json.NewDecoder(stdout).Decode(&format); err != nil {\n\t\treturn\n\t}\n\tif err = cmd.Wait(); err != nil {\n\t\treturn\n\t}\n\ts = &song{\n\t\tId: id(),\n\t\tPath: relPath,\n\t\tTags: make(map[string]string),\n\t}\n\ts.readTags(format.Format)\n\tfor _, stream := range streams.Streams {\n\t\ts.readTags(stream)\n\t}\n\treturn\n}\n\nfunc id() string {\n\tletters := []byte(\"abcdefghijklmnopqrstuvwxyz\")\n\trand.Seed(time.Now().UnixNano())\n\tidBytes := make([]byte, 0, 8)\n\tfor i := 0; i < cap(idBytes); i++ {\n\t\tidBytes = append(idBytes, letters[rand.Intn(len(letters))])\n\t}\n\treturn string(idBytes)\n}\n\nfunc (r *river) readDir(relDir string) (err error) {\n\tabsDir := path.Join(r.Library, relDir)\n\tfis, err := ioutil.ReadDir(absDir)\n\tif err != nil {\n\t\treturn\n\t}\n\tfor _, fi := range fis {\n\t\trelPath := path.Join(relDir, fi.Name())\n\t\tif fi.IsDir() {\n\t\t\tif err = r.readDir(relPath); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\ts, err := r.newSong(relPath)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tr.Songs[s.Id] = s\n\t\t}\n\t}\n\treturn\n}\n\nfunc (r *river) readLibrary() (err error) {\n\tlog.Println(\"reading songs into database\")\n\tr.Songs = make(map[string]*song)\n\tif err = r.readDir(\"\"); err != nil {\n\t\treturn\n\t}\n\tdb, err := os.OpenFile(\"db.json\", os.O_CREATE|os.O_TRUNC, 0200)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer db.Close()\n\terr = json.NewEncoder(db).Encode(r)\n\tfis, err := ioutil.ReadDir(streamDir)\n\tif err != nil {\n\t\treturn\n\t}\n\tfor _, fi := range fis {\n\t\tif err = os.RemoveAll(path.Join(streamDir, fi.Name())); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\ntype config struct {\n\tLibrary string `json:\"library\"`\n\tPassword string `json:\"pass\"`\n\tPort uint16\t`json:\"port\"`\n}\n\nfunc newRiver(c *config) (r *river, err error) {\n\tr = &river{\n\t\tLibrary: c.Library,\n\t\tpassword: c.Password,\n\t\tport: c.Port,\n\t}\n\tconvCmd, err := chooseCmd(\"ffmpeg\", \"avconv\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.convCmd = convCmd\n\tprobeCmd, err := chooseCmd(\"ffprobe\", \"avprobe\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.probeCmd = probeCmd\n\tdbPath := \"db.json\"\n\tif _, err := os.Stat(dbPath); os.IsNotExist(err) {\n\t\tr.Library = c.Library\n\t\tif err = r.readLibrary(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tdb, err := os.Open(dbPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer db.Close()\n\t\tif err = json.NewDecoder(db).Decode(r); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif r.Library != c.Library {\n\t\t\tr.Library = c.Library\n\t\t\tif err = r.readLibrary(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc (ri river) serveSongs(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tif err := json.NewEncoder(w).Encode(ri.Songs); err != nil {\n\t\thttp.Error(w, \"unable to encode song list\", 500)\n\t\treturn\n\t}\n}\n\nfunc (ri river) serveSong(w http.ResponseWriter, r *http.Request) {\n\tbase := path.Base(r.URL.Path)\n\text := path.Ext(base)\n\tid := strings.TrimSuffix(base, ext)\n\tstream := path.Join(\"stream\", base)\n\tif !transcoded[id] {\n\t\tsong, ok := ri.Songs[id]\n\t\tif !ok {\n\t\t\thttp.Error(w, \"file not found\", 404)\n\t\t\treturn\n\t\t}\n\t\tvar codec string\n\t\tvar qFlag string\n\t\tvar quality string\n\t\tvar format string\n\t\tvar bitrate string = \"0\"\n\t\tswitch ext {\n\t\tcase \".opus\":\n\t\t\tcodec = \"opus\"\n\t\t\tqFlag = \"-compression_level\"\n\t\t\tquality = \"10\"\n\t\t\tformat = \"opus\"\n\t\t\tbitrate = \"128000\"\n\t\t\tbreak\n\t\tcase \".mp3\":\n\t\t\tcodec = \"libmp3lame\"\n\t\t\tqFlag = \"-q\"\n\t\t\tquality = \"0\"\n\t\t\tformat = \"mp3\"\n\t\t\tbreak\n\t\tdefault:\n\t\t\thttp.Error(w, \"unsupported file extension\", 403)\n\t\t\treturn\n\t\t}\n\t\tcmd := exec.Command(ri.convCmd,\n\t\t\t\"-i\", path.Join(ri.Library, song.Path),\n\t\t\t\"-c\", codec,\n\t\t\tqFlag, quality,\n\t\t\t\"-b:a\", bitrate,\n\t\t\t\"-f\", format,\n\t\t\tstream)\n\t\tif err := cmd.Run(); err != nil {\n\t\t\thttp.Error(w, \"error encoding file\", 500)\n\t\t\treturn\n\t\t}\n\t\ttranscoded[id] = true\n\t}\n\thttp.ServeFile(w, r, stream)\n}\n\nfunc (ri river) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.URL.Path == \"\/\" {\n\t\tri.serveSongs(w, r)\n\t} else {\n\t\tri.serveSong(w, r)\n\t}\n}\n\nfunc (r river) serve() {\n\thttp.Handle(\"\/\", r)\n\tlog.Println(\"ready\")\n\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%d\", r.port), nil))\n}\n\nfunc main() {\n\tflagConfig := flag.String(\"config\", \"config.json\", \"the configuration file\")\n\tflagLibrary := flag.String(\"library\", \"\", \"the music library\")\n\tflagPort := flag.Uint(\"port\", 21313, \"the port to listen on\")\n\tflag.Parse()\n\tconfigFile, err := os.Open(*flagConfig)\n\tif err != nil {\n\t\tlog.Fatalf(\"unable to open '%s'\\n\", *flagConfig)\n\t}\n\tvar c config\n\tif err = json.NewDecoder(configFile).Decode(&c); err != nil {\n\t\tlog.Fatalf(\"unable to parse '%s': %v\", *flagConfig, err)\n\t}\n\tif *flagLibrary != \"\" {\n\t\tc.Library = *flagLibrary\n\t}\n\tif *flagPort != 0 {\n\t\tc.Port = uint16(*flagPort)\n\t}\n\tr, err := newRiver(&c)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tr.serve()\n}\n<commit_msg>Check for transcoding properly<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tstreamDir = \"stream\"\n)\n\ntype song struct {\n\tId string `json:\"id\"`\n\tPath string `json:\"path\"`\n\tTags map[string]string `json:\"tags\"`\n}\n\ntype river struct {\n\tSongs map[string]*song `json:\"songs\"`\n\tLibrary string `json:\"library\"`\n\tpassword string\n\tport uint16\n\tconvCmd string\n\tprobeCmd string\n\ttranscoding map[string]sync.WaitGroup\n}\n\nfunc chooseCmd(a string, b string) (string, error) {\n\tif _, err := exec.LookPath(a); err != nil {\n\t\tif _, err := exec.LookPath(b); err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"could not find %s or %s executable\", a, b)\n\t\t}\n\t\treturn b, nil\n\t}\n\treturn a, nil\n}\n\nfunc (s *song) readTags(container map[string]interface{}) {\n\ttagsRaw, ok := container[\"tags\"]\n\tif !ok {\n\t\treturn\n\t}\n\tfor key, value := range tagsRaw.(map[string]interface{}) {\n\t\ts.Tags[key] = value.(string)\n\t}\n}\n\nfunc (r river) newSong(relPath string) (s *song, err error) {\n\tabsPath := path.Join(r.Library, relPath)\n\tcmd := exec.Command(r.probeCmd,\n\t\t\"-print_format\", \"json\",\n\t\t\"-show_streams\",\n\t\tabsPath)\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn\n\t}\n\tif err = cmd.Start(); err != nil {\n\t\treturn\n\t}\n\tvar streams struct {\n\t\tStreams []map[string]interface{}\n\t}\n\tif err = json.NewDecoder(stdout).Decode(&streams); err != nil {\n\t\treturn\n\t}\n\tif err = cmd.Wait(); err != nil {\n\t\treturn\n\t}\n\taudio := false\n\tfor _, stream := range streams.Streams {\n\t\tif stream[\"codec_type\"] == \"audio\" {\n\t\t\taudio = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !audio {\n\t\treturn nil, fmt.Errorf(\"'%s' does not contain an audio stream\", relPath)\n\t}\n\tcmd = exec.Command(r.probeCmd,\n\t\t\"-print_format\", \"json\",\n\t\t\"-show_format\",\n\t\tabsPath)\n\tif stdout, err = cmd.StdoutPipe(); err != nil {\n\t\treturn\n\t}\n\tif err = cmd.Start(); err != nil {\n\t\treturn\n\t}\n\tvar format struct {\n\t\tFormat map[string]interface{}\n\t}\n\tif err = json.NewDecoder(stdout).Decode(&format); err != nil {\n\t\treturn\n\t}\n\tif err = cmd.Wait(); err != nil {\n\t\treturn\n\t}\n\ts = &song{\n\t\tId: id(),\n\t\tPath: relPath,\n\t\tTags: make(map[string]string),\n\t}\n\ts.readTags(format.Format)\n\tfor _, stream := range streams.Streams {\n\t\ts.readTags(stream)\n\t}\n\treturn\n}\n\nfunc id() string {\n\tletters := []byte(\"abcdefghijklmnopqrstuvwxyz\")\n\trand.Seed(time.Now().UnixNano())\n\tidBytes := make([]byte, 0, 8)\n\tfor i := 0; i < cap(idBytes); i++ {\n\t\tidBytes = append(idBytes, letters[rand.Intn(len(letters))])\n\t}\n\treturn string(idBytes)\n}\n\nfunc (r *river) readDir(relDir string) (err error) {\n\tabsDir := path.Join(r.Library, relDir)\n\tfis, err := ioutil.ReadDir(absDir)\n\tif err != nil {\n\t\treturn\n\t}\n\tfor _, fi := range fis {\n\t\trelPath := path.Join(relDir, fi.Name())\n\t\tif fi.IsDir() {\n\t\t\tif err = r.readDir(relPath); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\ts, err := r.newSong(relPath)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tr.Songs[s.Id] = s\n\t\t}\n\t}\n\treturn\n}\n\nfunc (r *river) readLibrary() (err error) {\n\tlog.Println(\"reading songs into database\")\n\tr.Songs = make(map[string]*song)\n\tif err = r.readDir(\"\"); err != nil {\n\t\treturn\n\t}\n\tdb, err := os.OpenFile(\"db.json\", os.O_CREATE|os.O_TRUNC, 0200)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer db.Close()\n\terr = json.NewEncoder(db).Encode(r)\n\tfis, err := ioutil.ReadDir(streamDir)\n\tif err != nil {\n\t\treturn\n\t}\n\tfor _, fi := range fis {\n\t\tif err = os.RemoveAll(path.Join(streamDir, fi.Name())); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\ntype config struct {\n\tLibrary string `json:\"library\"`\n\tPassword string `json:\"pass\"`\n\tPort uint16\t`json:\"port\"`\n}\n\nfunc newRiver(c *config) (r *river, err error) {\n\tr = &river{\n\t\tLibrary: c.Library,\n\t\tpassword: c.Password,\n\t\tport: c.Port,\n\t\ttranscoding: make(map[string]sync.WaitGroup),\n\t}\nconvCmd, err := chooseCmd(\"ffmpeg\", \"avconv\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.convCmd = convCmd\n\tprobeCmd, err := chooseCmd(\"ffprobe\", \"avprobe\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.probeCmd = probeCmd\n\tdbPath := \"db.json\"\n\tif _, err := os.Stat(dbPath); os.IsNotExist(err) {\n\t\tr.Library = c.Library\n\t\tif err = r.readLibrary(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tdb, err := os.Open(dbPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer db.Close()\n\t\tif err = json.NewDecoder(db).Decode(r); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif r.Library != c.Library {\n\t\t\tr.Library = c.Library\n\t\t\tif err = r.readLibrary(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc (ri river) serveSongs(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tif err := json.NewEncoder(w).Encode(ri.Songs); err != nil {\n\t\thttp.Error(w, \"unable to encode song list\", 500)\n\t\treturn\n\t}\n}\n\nfunc (ri river) serveSong(w http.ResponseWriter, r *http.Request) {\n\tbase := path.Base(r.URL.Path)\n\text := path.Ext(base)\n\tid := strings.TrimSuffix(base, ext)\n\tstream := path.Join(\"stream\", base)\n\t_, err := os.Stat(stream)\n\tif err != nil && !os.IsNotExist(err) {\n\t\thttp.Error(w, \"error looking for cached file\", 500)\n\t\treturn\n\t}\n\tvar newWg sync.WaitGroup\n\twg, ok := ri.transcoding[stream]\n\tif !ok {\n\t\tri.transcoding[stream] = newWg\n\t}\n\tif os.IsNotExist(err) {\n\t\twg.Add(1)\n\t\tdefer wg.Done()\n\t\tsong, ok := ri.Songs[id]\n\t\tif !ok {\n\t\t\thttp.Error(w, \"file not found\", 404)\n\t\t\treturn\n\t\t}\n\t\tvar codec string\n\t\tvar qFlag string\n\t\tvar quality string\n\t\tvar format string\n\t\tvar bitrate string = \"0\"\n\t\tswitch ext {\n\t\tcase \".opus\":\n\t\t\tcodec = \"opus\"\n\t\t\tqFlag = \"-compression_level\"\n\t\t\tquality = \"10\"\n\t\t\tformat = \"opus\"\n\t\t\tbitrate = \"128000\"\n\t\t\tbreak\n\t\tcase \".mp3\":\n\t\t\tcodec = \"libmp3lame\"\n\t\t\tqFlag = \"-q\"\n\t\t\tquality = \"0\"\n\t\t\tformat = \"mp3\"\n\t\t\tbreak\n\t\tdefault:\n\t\t\thttp.Error(w, \"unsupported file extension\", 403)\n\t\t\treturn\n\t\t}\n\t\tcmd := exec.Command(ri.convCmd,\n\t\t\t\"-i\", path.Join(ri.Library, song.Path),\n\t\t\t\"-c\", codec,\n\t\t\tqFlag, quality,\n\t\t\t\"-b:a\", bitrate,\n\t\t\t\"-f\", format,\n\t\t\tstream)\n\t\tif err := cmd.Run(); err != nil {\n\t\t\thttp.Error(w, \"error encoding file\", 500)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\twg.Wait()\n\t}\n\thttp.ServeFile(w, r, stream)\n}\n\nfunc (ri river) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif r.URL.Path == \"\/\" {\n\t\tri.serveSongs(w, r)\n\t} else {\n\t\tri.serveSong(w, r)\n\t}\n}\n\nfunc (r river) serve() {\n\thttp.Handle(\"\/\", r)\n\tlog.Println(\"ready\")\n\tlog.Fatal(http.ListenAndServe(fmt.Sprintf(\":%d\", r.port), nil))\n}\n\nfunc main() {\n\tflagConfig := flag.String(\"config\", \"config.json\", \"the configuration file\")\n\tflagLibrary := flag.String(\"library\", \"\", \"the music library\")\n\tflagPort := flag.Uint(\"port\", 21313, \"the port to listen on\")\n\tflag.Parse()\n\tconfigFile, err := os.Open(*flagConfig)\n\tif err != nil {\n\t\tlog.Fatalf(\"unable to open '%s'\\n\", *flagConfig)\n\t}\n\tvar c config\n\tif err = json.NewDecoder(configFile).Decode(&c); err != nil {\n\t\tlog.Fatalf(\"unable to parse '%s': %v\", *flagConfig, err)\n\t}\n\tif *flagLibrary != \"\" {\n\t\tc.Library = *flagLibrary\n\t}\n\tif *flagPort != 0 {\n\t\tc.Port = uint16(*flagPort)\n\t}\n\tr, err := newRiver(&c)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tr.serve()\n}\n<|endoftext|>"} {"text":"<commit_before>package orchestrator\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/docker\/swarmkit\/api\"\n\t\"github.com\/docker\/swarmkit\/log\"\n\t\"github.com\/docker\/swarmkit\/manager\/state\"\n\t\"github.com\/docker\/swarmkit\/manager\/state\/store\"\n\t\"github.com\/docker\/swarmkit\/manager\/state\/watch\"\n\t\"github.com\/docker\/swarmkit\/protobuf\/ptypes\"\n)\n\n\/\/ UpdateSupervisor supervises a set of updates. It's responsible for keeping track of updates,\n\/\/ shutting them down and replacing them.\ntype UpdateSupervisor struct {\n\tstore *store.MemoryStore\n\trestarts *RestartSupervisor\n\tupdates map[string]*Updater\n\tl sync.Mutex\n}\n\n\/\/ NewUpdateSupervisor creates a new UpdateSupervisor.\nfunc NewUpdateSupervisor(store *store.MemoryStore, restartSupervisor *RestartSupervisor) *UpdateSupervisor {\n\treturn &UpdateSupervisor{\n\t\tstore: store,\n\t\tupdates: make(map[string]*Updater),\n\t\trestarts: restartSupervisor,\n\t}\n}\n\n\/\/ Update starts an Update of `tasks` belonging to `service` in the background and returns immediately.\n\/\/ If an update for that service was already in progress, it will be cancelled before the new one starts.\nfunc (u *UpdateSupervisor) Update(ctx context.Context, service *api.Service, tasks []*api.Task) {\n\tu.l.Lock()\n\tdefer u.l.Unlock()\n\n\tid := service.ID\n\n\tif update, ok := u.updates[id]; ok {\n\t\tupdate.Cancel()\n\t}\n\n\tupdate := NewUpdater(u.store, u.restarts)\n\tu.updates[id] = update\n\tgo func() {\n\t\tupdate.Run(ctx, service, tasks)\n\t\tu.l.Lock()\n\t\tif u.updates[id] == update {\n\t\t\tdelete(u.updates, id)\n\t\t}\n\t\tu.l.Unlock()\n\t}()\n}\n\n\/\/ CancelAll cancels all current updates.\nfunc (u *UpdateSupervisor) CancelAll() {\n\tu.l.Lock()\n\tdefer u.l.Unlock()\n\n\tfor _, update := range u.updates {\n\t\tupdate.Cancel()\n\t}\n}\n\n\/\/ Updater updates a set of tasks to a new version.\ntype Updater struct {\n\tstore *store.MemoryStore\n\twatchQueue *watch.Queue\n\trestarts *RestartSupervisor\n\n\t\/\/ stopChan signals to the state machine to stop running.\n\tstopChan chan struct{}\n\t\/\/ doneChan is closed when the state machine terminates.\n\tdoneChan chan struct{}\n}\n\n\/\/ NewUpdater creates a new Updater.\nfunc NewUpdater(store *store.MemoryStore, restartSupervisor *RestartSupervisor) *Updater {\n\treturn &Updater{\n\t\tstore: store,\n\t\twatchQueue: store.WatchQueue(),\n\t\trestarts: restartSupervisor,\n\t\tstopChan: make(chan struct{}),\n\t\tdoneChan: make(chan struct{}),\n\t}\n}\n\n\/\/ Cancel cancels the current update immediately. It blocks until the cancellation is confirmed.\nfunc (u *Updater) Cancel() {\n\tclose(u.stopChan)\n\t<-u.doneChan\n}\n\n\/\/ Run starts the update and returns only once its complete or cancelled.\nfunc (u *Updater) Run(ctx context.Context, service *api.Service, tasks []*api.Task) {\n\tdefer close(u.doneChan)\n\n\tdirtyTasks := []*api.Task{}\n\tfor _, t := range tasks {\n\t\tif !reflect.DeepEqual(service.Spec.Task, t.Spec) ||\n\t\t\t!reflect.DeepEqual(service.Endpoint, t.Endpoint) {\n\t\t\tdirtyTasks = append(dirtyTasks, t)\n\t\t}\n\t}\n\t\/\/ Abort immediately if all tasks are clean.\n\tif len(dirtyTasks) == 0 {\n\t\treturn\n\t}\n\n\tparallelism := 0\n\tif service.Spec.Update != nil {\n\t\tparallelism = int(service.Spec.Update.Parallelism)\n\t}\n\tif parallelism == 0 {\n\t\t\/\/ TODO(aluzzardi): We could try to optimize unlimited parallelism by performing updates in a single\n\t\t\/\/ goroutine using a batch transaction.\n\t\tparallelism = len(dirtyTasks)\n\t}\n\n\t\/\/ Start the workers.\n\ttaskQueue := make(chan *api.Task)\n\twg := sync.WaitGroup{}\n\twg.Add(parallelism)\n\tfor i := 0; i < parallelism; i++ {\n\t\tgo func() {\n\t\t\tu.worker(ctx, service, taskQueue)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\tfor _, t := range dirtyTasks {\n\t\t\/\/ Wait for a worker to pick up the task or abort the update, whichever comes first.\n\t\tselect {\n\t\tcase <-u.stopChan:\n\t\t\tbreak\n\n\t\tcase taskQueue <- t:\n\t\t}\n\t}\n\n\tclose(taskQueue)\n\twg.Wait()\n}\n\nfunc (u *Updater) worker(ctx context.Context, service *api.Service, queue <-chan *api.Task) {\n\tfor t := range queue {\n\t\tupdated := newTask(service, t.Slot)\n\t\tupdated.DesiredState = api.TaskStateReady\n\t\tif isGlobalService(service) {\n\t\t\tupdated.NodeID = t.NodeID\n\t\t}\n\n\t\tif err := u.updateTask(ctx, service, t, updated); err != nil {\n\t\t\tlog.G(ctx).WithError(err).WithField(\"task.id\", t.ID).Error(\"update failed\")\n\t\t}\n\n\t\tif service.Spec.Update != nil && (service.Spec.Update.Delay.Seconds != 0 || service.Spec.Update.Delay.Nanos != 0) {\n\t\t\tdelay, err := ptypes.Duration(&service.Spec.Update.Delay)\n\t\t\tif err != nil {\n\t\t\t\tlog.G(ctx).WithError(err).Error(\"invalid update delay\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase <-time.After(delay):\n\t\t\tcase <-u.stopChan:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (u *Updater) updateTask(ctx context.Context, service *api.Service, original, updated *api.Task) error {\n\tlog.G(ctx).Debugf(\"replacing %s with %s\", original.ID, updated.ID)\n\t\/\/ Kick off the watch before even creating the updated task. This is in order to avoid missing any event.\n\ttaskUpdates, cancel := state.Watch(u.watchQueue, state.EventUpdateTask{\n\t\tTask: &api.Task{ID: updated.ID},\n\t\tChecks: []state.TaskCheckFunc{state.TaskCheckID},\n\t})\n\tdefer cancel()\n\n\tvar delayStartCh <-chan struct{}\n\t\/\/ Atomically create the updated task and bring down the old one.\n\terr := u.store.Update(func(tx store.Tx) error {\n\t\tt := store.GetTask(tx, original.ID)\n\t\tif t == nil {\n\t\t\treturn fmt.Errorf(\"task %s not found while trying to update it\", original.ID)\n\t\t}\n\t\tt.DesiredState = api.TaskStateShutdown\n\t\tif err := store.UpdateTask(tx, t); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := store.CreateTask(tx, updated); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Wait for the old task to stop or time out, and then set the new one\n\t\t\/\/ to RUNNING.\n\t\tdelayStartCh = u.restarts.DelayStart(ctx, tx, original, updated.ID, 0, true)\n\n\t\treturn nil\n\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t<-delayStartCh\n\n\t\/\/ Wait for the new task to come up.\n\t\/\/ TODO(aluzzardi): Consider adding a timeout here.\n\tfor {\n\t\tselect {\n\t\tcase e := <-taskUpdates:\n\t\t\tupdated = e.(state.EventUpdateTask).Task\n\t\t\tif updated.Status.State >= api.TaskStateRunning {\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase <-u.stopChan:\n\t\t\treturn nil\n\t\t}\n\t}\n}\n<commit_msg>orchestrator: Fix rolling update race that could create extra tasks<commit_after>package orchestrator\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/docker\/swarmkit\/api\"\n\t\"github.com\/docker\/swarmkit\/log\"\n\t\"github.com\/docker\/swarmkit\/manager\/state\"\n\t\"github.com\/docker\/swarmkit\/manager\/state\/store\"\n\t\"github.com\/docker\/swarmkit\/manager\/state\/watch\"\n\t\"github.com\/docker\/swarmkit\/protobuf\/ptypes\"\n)\n\n\/\/ UpdateSupervisor supervises a set of updates. It's responsible for keeping track of updates,\n\/\/ shutting them down and replacing them.\ntype UpdateSupervisor struct {\n\tstore *store.MemoryStore\n\trestarts *RestartSupervisor\n\tupdates map[string]*Updater\n\tl sync.Mutex\n}\n\n\/\/ NewUpdateSupervisor creates a new UpdateSupervisor.\nfunc NewUpdateSupervisor(store *store.MemoryStore, restartSupervisor *RestartSupervisor) *UpdateSupervisor {\n\treturn &UpdateSupervisor{\n\t\tstore: store,\n\t\tupdates: make(map[string]*Updater),\n\t\trestarts: restartSupervisor,\n\t}\n}\n\n\/\/ Update starts an Update of `tasks` belonging to `service` in the background and returns immediately.\n\/\/ If an update for that service was already in progress, it will be cancelled before the new one starts.\nfunc (u *UpdateSupervisor) Update(ctx context.Context, service *api.Service, tasks []*api.Task) {\n\tu.l.Lock()\n\tdefer u.l.Unlock()\n\n\tid := service.ID\n\n\tif update, ok := u.updates[id]; ok {\n\t\tupdate.Cancel()\n\t}\n\n\tupdate := NewUpdater(u.store, u.restarts)\n\tu.updates[id] = update\n\tgo func() {\n\t\tupdate.Run(ctx, service, tasks)\n\t\tu.l.Lock()\n\t\tif u.updates[id] == update {\n\t\t\tdelete(u.updates, id)\n\t\t}\n\t\tu.l.Unlock()\n\t}()\n}\n\n\/\/ CancelAll cancels all current updates.\nfunc (u *UpdateSupervisor) CancelAll() {\n\tu.l.Lock()\n\tdefer u.l.Unlock()\n\n\tfor _, update := range u.updates {\n\t\tupdate.Cancel()\n\t}\n}\n\n\/\/ Updater updates a set of tasks to a new version.\ntype Updater struct {\n\tstore *store.MemoryStore\n\twatchQueue *watch.Queue\n\trestarts *RestartSupervisor\n\n\t\/\/ stopChan signals to the state machine to stop running.\n\tstopChan chan struct{}\n\t\/\/ doneChan is closed when the state machine terminates.\n\tdoneChan chan struct{}\n}\n\n\/\/ NewUpdater creates a new Updater.\nfunc NewUpdater(store *store.MemoryStore, restartSupervisor *RestartSupervisor) *Updater {\n\treturn &Updater{\n\t\tstore: store,\n\t\twatchQueue: store.WatchQueue(),\n\t\trestarts: restartSupervisor,\n\t\tstopChan: make(chan struct{}),\n\t\tdoneChan: make(chan struct{}),\n\t}\n}\n\n\/\/ Cancel cancels the current update immediately. It blocks until the cancellation is confirmed.\nfunc (u *Updater) Cancel() {\n\tclose(u.stopChan)\n\t<-u.doneChan\n}\n\n\/\/ Run starts the update and returns only once its complete or cancelled.\nfunc (u *Updater) Run(ctx context.Context, service *api.Service, tasks []*api.Task) {\n\tdefer close(u.doneChan)\n\n\tdirtyTasks := []*api.Task{}\n\tfor _, t := range tasks {\n\t\tif !reflect.DeepEqual(service.Spec.Task, t.Spec) ||\n\t\t\t!reflect.DeepEqual(service.Endpoint, t.Endpoint) {\n\t\t\tdirtyTasks = append(dirtyTasks, t)\n\t\t}\n\t}\n\t\/\/ Abort immediately if all tasks are clean.\n\tif len(dirtyTasks) == 0 {\n\t\treturn\n\t}\n\n\tparallelism := 0\n\tif service.Spec.Update != nil {\n\t\tparallelism = int(service.Spec.Update.Parallelism)\n\t}\n\tif parallelism == 0 {\n\t\t\/\/ TODO(aluzzardi): We could try to optimize unlimited parallelism by performing updates in a single\n\t\t\/\/ goroutine using a batch transaction.\n\t\tparallelism = len(dirtyTasks)\n\t}\n\n\t\/\/ Start the workers.\n\ttaskQueue := make(chan *api.Task)\n\twg := sync.WaitGroup{}\n\twg.Add(parallelism)\n\tfor i := 0; i < parallelism; i++ {\n\t\tgo func() {\n\t\t\tu.worker(ctx, service, taskQueue)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\tfor _, t := range dirtyTasks {\n\t\t\/\/ Wait for a worker to pick up the task or abort the update, whichever comes first.\n\t\tselect {\n\t\tcase <-u.stopChan:\n\t\t\tbreak\n\n\t\tcase taskQueue <- t:\n\t\t}\n\t}\n\n\tclose(taskQueue)\n\twg.Wait()\n}\n\nfunc (u *Updater) worker(ctx context.Context, service *api.Service, queue <-chan *api.Task) {\n\tfor t := range queue {\n\t\tupdated := newTask(service, t.Slot)\n\t\tupdated.DesiredState = api.TaskStateReady\n\t\tif isGlobalService(service) {\n\t\t\tupdated.NodeID = t.NodeID\n\t\t}\n\n\t\tif err := u.updateTask(ctx, service, t, updated); err != nil {\n\t\t\tlog.G(ctx).WithError(err).WithField(\"task.id\", t.ID).Error(\"update failed\")\n\t\t}\n\n\t\tif service.Spec.Update != nil && (service.Spec.Update.Delay.Seconds != 0 || service.Spec.Update.Delay.Nanos != 0) {\n\t\t\tdelay, err := ptypes.Duration(&service.Spec.Update.Delay)\n\t\t\tif err != nil {\n\t\t\t\tlog.G(ctx).WithError(err).Error(\"invalid update delay\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase <-time.After(delay):\n\t\t\tcase <-u.stopChan:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (u *Updater) updateTask(ctx context.Context, service *api.Service, original, updated *api.Task) error {\n\tlog.G(ctx).Debugf(\"replacing %s with %s\", original.ID, updated.ID)\n\t\/\/ Kick off the watch before even creating the updated task. This is in order to avoid missing any event.\n\ttaskUpdates, cancel := state.Watch(u.watchQueue, state.EventUpdateTask{\n\t\tTask: &api.Task{ID: updated.ID},\n\t\tChecks: []state.TaskCheckFunc{state.TaskCheckID},\n\t})\n\tdefer cancel()\n\n\tvar delayStartCh <-chan struct{}\n\t\/\/ Atomically create the updated task and bring down the old one.\n\terr := u.store.Update(func(tx store.Tx) error {\n\t\tt := store.GetTask(tx, original.ID)\n\t\tif t == nil {\n\t\t\treturn fmt.Errorf(\"task %s not found while trying to update it\", original.ID)\n\t\t}\n\t\tif t.DesiredState > api.TaskStateRunning {\n\t\t\treturn fmt.Errorf(\"task %s was already shut down when reached by updater\", original.ID)\n\t\t}\n\t\tt.DesiredState = api.TaskStateShutdown\n\t\tif err := store.UpdateTask(tx, t); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := store.CreateTask(tx, updated); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Wait for the old task to stop or time out, and then set the new one\n\t\t\/\/ to RUNNING.\n\t\tdelayStartCh = u.restarts.DelayStart(ctx, tx, original, updated.ID, 0, true)\n\n\t\treturn nil\n\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t<-delayStartCh\n\n\t\/\/ Wait for the new task to come up.\n\t\/\/ TODO(aluzzardi): Consider adding a timeout here.\n\tfor {\n\t\tselect {\n\t\tcase e := <-taskUpdates:\n\t\t\tupdated = e.(state.EventUpdateTask).Task\n\t\t\tif updated.Status.State >= api.TaskStateRunning {\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase <-u.stopChan:\n\t\t\treturn nil\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package client\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"errors\"\n\n\t\"github.com\/Gamebuildr\/Gogeta\/pkg\/config\"\n\t\"github.com\/Gamebuildr\/Gogeta\/pkg\/gbcrypto\"\n\t\"github.com\/Gamebuildr\/Gogeta\/pkg\/publisher\"\n\t\"github.com\/Gamebuildr\/Gogeta\/pkg\/sourcesystem\"\n\t\"github.com\/Gamebuildr\/Gogeta\/pkg\/storehouse\"\n\t\"github.com\/Gamebuildr\/gamebuildr-compressor\/pkg\/compressor\"\n\t\"github.com\/Gamebuildr\/gamebuildr-credentials\/pkg\/credentials\"\n\t\"github.com\/Gamebuildr\/gamebuildr-lumberjack\/pkg\/logger\"\n\t\"github.com\/Gamebuildr\/gamebuildr-lumberjack\/pkg\/papertrail\"\n)\n\n\/\/ Gogeta is the source control manager implementation\ntype Gogeta struct {\n\tLog logger.Log\n\tSCM sourcesystem.SourceSystem\n\tStorage storehouse.StoreHouse\n\tPublisher publisher.Publish\n\tmessageCounter int\n\tCrypto gbcrypto.Interface\n}\n\ntype gogetaMessage struct {\n\tArchivePath string `json:\"archivepath\"`\n\tID string `json:\"id\"`\n\tProject string `json:\"project\"`\n\tEngineName string `json:\"enginename\"`\n\tEngineVersion string `json:\"engineversion\"`\n\tEnginePlatform string `json:\"engineplatform\"`\n\tBuildrID string `json:\"buildrid\"`\n\tRepoType string `json:\"repotype\"`\n\tRepoURL string `json:\"repourl\"`\n\tBuildOwner string `json:\"buildowner\"`\n\tMessageReceipt string\n}\n\ntype mrRobotMessage struct {\n\tArchivePath string `json:\"archivepath\"`\n\tProject string `json:\"project\"`\n\tEngineName string `json:\"enginename\"`\n\tEnginePlatform string `json:\"engineplatform\"`\n\tEngineVersion string `json:\"engineversion\"`\n\tBuildrID string `json:\"buildrid\"`\n\tBuildID string `json:\"buildid\"`\n}\n\ntype gamebuildrMessage struct {\n\tType string `json:\"type\"`\n\tMessage string `json:\"message\"`\n\tOrder int `json:\"order\"`\n\tBuildID string `json:\"buildid\"`\n\tChunk string `json:\"chunk\"`\n\tMessageID string `json:\"messageid\"`\n}\n\ntype buildResponse struct {\n\tSuccess bool `json:\"success\"`\n\tLogPath string `json:\"logpath\"`\n\tBuildrID string `json:\"buildrid\"`\n\tBuildID string `json:\"buildid\"`\n\tType string `json:\"type\"`\n\tMessage string `json:\"message\"`\n\tBuildPath string `json:\"buildpath\"`\n\tEnd int64 `json:\"end\"`\n\tChunk string `json:\"chunk\"`\n\tMessageID string `json:\"messageid\"`\n}\n\nconst buildrMessage string = \"BUILDR_MESSAGE\"\n\nconst logFileName string = \"gogeta_client_\"\n\nconst chunkID string = \"GOGETA\"\n\n\/\/ Supported SCM types\nconst git string = \"GIT\"\nconst github string = \"GITHUB\"\n\n\/\/ Start initializes a new gogeta client\nfunc (client *Gogeta) Start(devMode bool) {\n\t\/\/ logging system\n\tlog := logger.SystemLogger{}\n\tif devMode {\n\t\tfileLogger := logger.FileLogSave{\n\t\t\tLogFileName: logFileName,\n\t\t\tLogFileDir: os.Getenv(config.LogPath),\n\t\t}\n\t\tlog.LogSave = fileLogger\n\t} else {\n\t\tsaveSystem := &papertrail.PapertrailLogSave{\n\t\t\tApp: \"Gogeta\",\n\t\t\tURL: os.Getenv(config.LogEndpoint),\n\t\t}\n\t\tlog.LogSave = saveSystem\n\t}\n\n\t\/\/ storage system\n\tstore := &storehouse.Compressed{}\n\tzipCompress := &compressor.Zip{}\n\tcloudStorage := &storehouse.GoogleCloud{\n\t\tBucketName: os.Getenv(config.CodeRepoStorage),\n\t}\n\tstore.Compression = zipCompress\n\tstore.StorageSystem = cloudStorage\n\n\t\/\/ publisher system\n\tamazonSNS := publisher.AmazonNotification{}\n\tamazonSNS.Setup()\n\tnotifications := publisher.SimpleNotification{\n\t\tApplication: &amazonSNS,\n\t\tLog: log,\n\t}\n\n\t\/\/ Setup client\n\tclient.Log = log\n\tclient.Storage = store\n\tclient.Publisher = ¬ifications\n\tclient.Crypto = gbcrypto.Cryptography{}\n\n\t\/\/ Generate gcloud service .json file\n\tcreds := credentials.GcloudCredentials{}\n\tcreds.JSON = credentials.GcloudJSONCredentials{}\n\tif err := creds.GenerateAccount(); err != nil {\n\t\tclient.Log.Error(err.Error())\n\t}\n}\n\n\/\/ RunGogetaClient will run the complete gogeta scm system\nfunc (client *Gogeta) RunGogetaClient(messageString string) *sourcesystem.SourceRepository {\n\trepo := sourcesystem.SourceRepository{}\n\n\tif &messageString == nil || messageString == \"\" {\n\t\tclient.Log.Info(\"No data received to clone project\")\n\t\tclient.broadcastProgress(\"No data received to clone project\", \"\")\n\t\treturn &repo\n\t}\n\n\tvar message gogetaMessage\n\n\tclient.Log.Info(fmt.Sprintf(\"received message with data: %v\", messageString))\n\tif err := json.Unmarshal([]byte(messageString), &message); err != nil {\n\t\tclient.Log.Error(\"Failed to parse message data\")\n\t\treturn &repo\n\t}\n\n\tdefer client.sendBuildEndIfPanic(message)\n\n\trepoURL := client.Crypto.Decrypt(os.Getenv(config.GamebuildrEncryptionKey), message.RepoURL)\n\n\tclient.broadcastProgress(\"Source code download request received\", message.ID)\n\n\tif err := client.setVersionControl(message.RepoType); err != nil {\n\t\tclient.broadcastFailure(err.Error(), \"client.SCM value is nil\", message)\n\t\treturn &repo\n\t}\n\n\tclient.broadcastProgress(\"Downloading latest project source\", message.ID)\n\n\tif err := client.downloadSource(&repo, message.Project, repoURL); err != nil {\n\t\tredactedErr := strings.Replace(err.Error(), repo.SourceOrigin, \"*****\", -1)\n\t\tcloneErr := fmt.Sprintf(\"Cloning failed with the following error: %v\", redactedErr)\n\t\tclient.broadcastFailure(cloneErr, err.Error(), message)\n\t\treturn &repo\n\t}\n\tif repo.SourceLocation == \"\" {\n\t\tclient.broadcastFailure(\"Cloned source location does not exist\", \"repo.SourceLocation is missing repo path\", message)\n\t\treturn &repo\n\t}\n\n\tclient.broadcastProgress(\"Cloning project finished successfully\", message.ID)\n\tclient.broadcastProgress(\"Compressing and uploading project to storage system\", message.ID)\n\n\tif err := client.archiveRepo(&repo, &message); err != nil {\n\t\tclient.broadcastFailure(\"Archiving source failed\", err.Error(), message)\n\t\treturn &repo\n\t}\n\n\tclient.broadcastProgress(\"Notifying build system\", message.ID)\n\n\tif err := client.notifyMrRobot(&repo, message); err != nil {\n\t\tclient.broadcastFailure(\"Notifying build system failed\", err.Error(), message)\n\t\treturn &repo\n\t}\n\n\treturn &repo\n}\n\nfunc (client *Gogeta) sendBuildEndIfPanic(message gogetaMessage) {\n\tif r := recover(); r != nil {\n\t\terr := fmt.Sprintf(\"%v\", r)\n\t\tclient.broadcastFailure(err, \"An unexpected error has occured\", message)\n\t\tpanic(r)\n\t}\n}\n\nfunc (client *Gogeta) broadcastProgress(info string, buildID string) {\n\tlogInfo := fmt.Sprintf(\"Build ID: %v, Update: %v\", buildID, info)\n\n\tclient.Log.Info(logInfo)\n\tclient.sendGamebuildrMessage(info, buildID)\n}\n\nfunc (client *Gogeta) broadcastFailure(info string, err string, message gogetaMessage) {\n\tlogErr := fmt.Sprintf(\"Build ID: %v, Data: %v, Update: %v, Error: %v\", message.ID, message, info, err)\n\n\tclient.Log.Error(logErr)\n\tclient.sendBuildFailedMessage(info, message)\n}\n\nfunc (client *Gogeta) sendGamebuildrMessage(messageInfo string, buildID string) {\n\treponse := gamebuildrMessage{\n\t\tType: buildrMessage,\n\t\tMessage: messageInfo,\n\t\tBuildID: buildID,\n\t\tChunk: chunkID,\n\t\tMessageID: strconv.Itoa(client.messageCounter),\n\t}\n\n\tclient.messageCounter++\n\n\tjsonMessage, err := json.Marshal(reponse)\n\tif err != nil {\n\t\tclient.Log.Error(err.Error())\n\t\treturn\n\t}\n\tnotification := publisher.Message{\n\t\tJSON: jsonMessage,\n\t\tSubject: buildrMessage,\n\t\tEndpoint: os.Getenv(config.GamebuildrNotifications),\n\t}\n\tclient.Publisher.SendJSON(¬ification)\n}\n\nfunc (client *Gogeta) sendBuildFailedMessage(failMessage string, message gogetaMessage) {\n\tresponse := buildResponse{\n\t\tSuccess: false,\n\t\tBuildrID: message.BuildrID,\n\t\tBuildID: message.ID,\n\t\tType: buildrMessage,\n\t\tMessage: failMessage,\n\t\tEnd: getBuildEndTime(),\n\t\tChunk: chunkID,\n\t\tMessageID: strconv.Itoa(client.messageCounter),\n\t}\n\n\tclient.messageCounter++\n\n\tjsonMessage, err := json.Marshal(response)\n\tif err != nil {\n\t\tclient.Log.Error(err.Error())\n\t\treturn\n\t}\n\tnotification := publisher.Message{\n\t\tJSON: jsonMessage,\n\t\tSubject: buildrMessage,\n\t\tEndpoint: os.Getenv(config.GamebuildrNotifications),\n\t}\n\tclient.Publisher.SendJSON(¬ification)\n}\n\nfunc (client *Gogeta) setVersionControl(repoType string) error {\n\tif client.SCM != nil {\n\t\treturn nil\n\t}\n\n\tdataType := strings.ToUpper(repoType)\n\tscm := &sourcesystem.SystemSCM{}\n\tscm.Log = client.Log\n\tswitch dataType {\n\tcase github:\n\t\tscm.VersionControl = &sourcesystem.GitVersionControl{}\n\tcase git:\n\t\tscm.VersionControl = &sourcesystem.GitVersionControl{}\n\tdefault:\n\t\terr := fmt.Sprintf(\"SCM of type %v could not be found\", dataType)\n\t\treturn errors.New(err)\n\t}\n\tclient.SCM = scm\n\treturn nil\n}\n\nfunc (client *Gogeta) downloadSource(repo *sourcesystem.SourceRepository, project string, origin string) error {\n\tif project == \"\" || origin == \"\" {\n\t\treturn errors.New(\"No data found to download source\")\n\t}\n\n\trepo.ProjectName = project\n\trepo.SourceOrigin = origin\n\n\tif err := client.SCM.AddSource(repo); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (client *Gogeta) archiveRepo(repo *sourcesystem.SourceRepository, message *gogetaMessage) error {\n\tfileName := repo.ProjectName + \".zip\"\n\tarchive := path.Join(os.Getenv(\"GOPATH\"), \"repos\", fileName)\n\tarchiveDir := message.ID\n\tarchivePath := path.Join(archiveDir, fileName)\n\tstorageData := storehouse.StorageData{\n\t\tSource: repo.SourceLocation,\n\t\tTarget: archive,\n\t\tTargetDir: archiveDir,\n\t}\n\tif err := client.Storage.StoreFiles(&storageData); err != nil {\n\t\treturn err\n\t}\n\tmessage.ArchivePath = archivePath\n\treturn nil\n}\n\nfunc (client *Gogeta) notifyMrRobot(repo *sourcesystem.SourceRepository, message gogetaMessage) error {\n\tmessageToSend := mrRobotMessage{\n\t\tArchivePath: message.ArchivePath,\n\t\tBuildID: message.ID,\n\t\tProject: message.Project,\n\t\tEngineName: message.EngineName,\n\t\tEngineVersion: message.EngineVersion,\n\t\tEnginePlatform: message.EnginePlatform,\n\t\tBuildrID: message.BuildrID,\n\t}\n\tjsonMessage, err := json.Marshal(messageToSend)\n\tif err != nil {\n\t\treturn err\n\t}\n\tnotification := publisher.Message{\n\t\tJSON: jsonMessage,\n\t\tSubject: \"Buildr Request\",\n\t\tEndpoint: os.Getenv(config.MrrobotNotifications),\n\t}\n\tclient.Publisher.SendJSON(¬ification)\n\treturn nil\n}\n\nfunc getBuildEndTime() int64 {\n\treturn time.Now().UnixNano() \/ int64(time.Millisecond)\n}\n<commit_msg>Remove more creds from logs<commit_after>package client\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"errors\"\n\n\t\"github.com\/Gamebuildr\/Gogeta\/pkg\/config\"\n\t\"github.com\/Gamebuildr\/Gogeta\/pkg\/gbcrypto\"\n\t\"github.com\/Gamebuildr\/Gogeta\/pkg\/publisher\"\n\t\"github.com\/Gamebuildr\/Gogeta\/pkg\/sourcesystem\"\n\t\"github.com\/Gamebuildr\/Gogeta\/pkg\/storehouse\"\n\t\"github.com\/Gamebuildr\/gamebuildr-compressor\/pkg\/compressor\"\n\t\"github.com\/Gamebuildr\/gamebuildr-credentials\/pkg\/credentials\"\n\t\"github.com\/Gamebuildr\/gamebuildr-lumberjack\/pkg\/logger\"\n\t\"github.com\/Gamebuildr\/gamebuildr-lumberjack\/pkg\/papertrail\"\n)\n\n\/\/ Gogeta is the source control manager implementation\ntype Gogeta struct {\n\tLog logger.Log\n\tSCM sourcesystem.SourceSystem\n\tStorage storehouse.StoreHouse\n\tPublisher publisher.Publish\n\tmessageCounter int\n\tCrypto gbcrypto.Interface\n}\n\ntype gogetaMessage struct {\n\tArchivePath string `json:\"archivepath\"`\n\tID string `json:\"id\"`\n\tProject string `json:\"project\"`\n\tEngineName string `json:\"enginename\"`\n\tEngineVersion string `json:\"engineversion\"`\n\tEnginePlatform string `json:\"engineplatform\"`\n\tBuildrID string `json:\"buildrid\"`\n\tRepoType string `json:\"repotype\"`\n\tRepoURL string `json:\"repourl\"`\n\tBuildOwner string `json:\"buildowner\"`\n\tMessageReceipt string\n}\n\ntype mrRobotMessage struct {\n\tArchivePath string `json:\"archivepath\"`\n\tProject string `json:\"project\"`\n\tEngineName string `json:\"enginename\"`\n\tEnginePlatform string `json:\"engineplatform\"`\n\tEngineVersion string `json:\"engineversion\"`\n\tBuildrID string `json:\"buildrid\"`\n\tBuildID string `json:\"buildid\"`\n}\n\ntype gamebuildrMessage struct {\n\tType string `json:\"type\"`\n\tMessage string `json:\"message\"`\n\tOrder int `json:\"order\"`\n\tBuildID string `json:\"buildid\"`\n\tChunk string `json:\"chunk\"`\n\tMessageID string `json:\"messageid\"`\n}\n\ntype buildResponse struct {\n\tSuccess bool `json:\"success\"`\n\tLogPath string `json:\"logpath\"`\n\tBuildrID string `json:\"buildrid\"`\n\tBuildID string `json:\"buildid\"`\n\tType string `json:\"type\"`\n\tMessage string `json:\"message\"`\n\tBuildPath string `json:\"buildpath\"`\n\tEnd int64 `json:\"end\"`\n\tChunk string `json:\"chunk\"`\n\tMessageID string `json:\"messageid\"`\n}\n\nconst buildrMessage string = \"BUILDR_MESSAGE\"\n\nconst logFileName string = \"gogeta_client_\"\n\nconst chunkID string = \"GOGETA\"\n\n\/\/ Supported SCM types\nconst git string = \"GIT\"\nconst github string = \"GITHUB\"\n\n\/\/ Start initializes a new gogeta client\nfunc (client *Gogeta) Start(devMode bool) {\n\t\/\/ logging system\n\tlog := logger.SystemLogger{}\n\tif devMode {\n\t\tfileLogger := logger.FileLogSave{\n\t\t\tLogFileName: logFileName,\n\t\t\tLogFileDir: os.Getenv(config.LogPath),\n\t\t}\n\t\tlog.LogSave = fileLogger\n\t} else {\n\t\tsaveSystem := &papertrail.PapertrailLogSave{\n\t\t\tApp: \"Gogeta\",\n\t\t\tURL: os.Getenv(config.LogEndpoint),\n\t\t}\n\t\tlog.LogSave = saveSystem\n\t}\n\n\t\/\/ storage system\n\tstore := &storehouse.Compressed{}\n\tzipCompress := &compressor.Zip{}\n\tcloudStorage := &storehouse.GoogleCloud{\n\t\tBucketName: os.Getenv(config.CodeRepoStorage),\n\t}\n\tstore.Compression = zipCompress\n\tstore.StorageSystem = cloudStorage\n\n\t\/\/ publisher system\n\tamazonSNS := publisher.AmazonNotification{}\n\tamazonSNS.Setup()\n\tnotifications := publisher.SimpleNotification{\n\t\tApplication: &amazonSNS,\n\t\tLog: log,\n\t}\n\n\t\/\/ Setup client\n\tclient.Log = log\n\tclient.Storage = store\n\tclient.Publisher = ¬ifications\n\tclient.Crypto = gbcrypto.Cryptography{}\n\n\t\/\/ Generate gcloud service .json file\n\tcreds := credentials.GcloudCredentials{}\n\tcreds.JSON = credentials.GcloudJSONCredentials{}\n\tif err := creds.GenerateAccount(); err != nil {\n\t\tclient.Log.Error(err.Error())\n\t}\n}\n\n\/\/ RunGogetaClient will run the complete gogeta scm system\nfunc (client *Gogeta) RunGogetaClient(messageString string) *sourcesystem.SourceRepository {\n\trepo := sourcesystem.SourceRepository{}\n\n\tif &messageString == nil || messageString == \"\" {\n\t\tclient.Log.Info(\"No data received to clone project\")\n\t\tclient.broadcastProgress(\"No data received to clone project\", \"\")\n\t\treturn &repo\n\t}\n\n\tvar message gogetaMessage\n\n\tclient.Log.Info(fmt.Sprintf(\"received message with data: %v\", messageString))\n\tif err := json.Unmarshal([]byte(messageString), &message); err != nil {\n\t\tclient.Log.Error(\"Failed to parse message data\")\n\t\treturn &repo\n\t}\n\n\tdefer client.sendBuildEndIfPanic(message)\n\n\trepoURL := client.Crypto.Decrypt(os.Getenv(config.GamebuildrEncryptionKey), message.RepoURL)\n\n\tclient.broadcastProgress(\"Source code download request received\", message.ID)\n\n\tif err := client.setVersionControl(message.RepoType); err != nil {\n\t\tclient.broadcastFailure(err.Error(), \"client.SCM value is nil\", message)\n\t\treturn &repo\n\t}\n\n\tclient.broadcastProgress(\"Downloading latest project source\", message.ID)\n\n\tif err := client.downloadSource(&repo, message.Project, repoURL); err != nil {\n\t\tredactedErr := strings.Replace(err.Error(), repo.SourceOrigin, \"*****\", -1)\n\t\tcloneErr := fmt.Sprintf(\"Cloning failed with the following error: %v\", redactedErr)\n\t\tclient.broadcastFailure(cloneErr, redactedErr, message)\n\t\treturn &repo\n\t}\n\tif repo.SourceLocation == \"\" {\n\t\tclient.broadcastFailure(\"Cloned source location does not exist\", \"repo.SourceLocation is missing repo path\", message)\n\t\treturn &repo\n\t}\n\n\tclient.broadcastProgress(\"Cloning project finished successfully\", message.ID)\n\tclient.broadcastProgress(\"Compressing and uploading project to storage system\", message.ID)\n\n\tif err := client.archiveRepo(&repo, &message); err != nil {\n\t\tclient.broadcastFailure(\"Archiving source failed\", err.Error(), message)\n\t\treturn &repo\n\t}\n\n\tclient.broadcastProgress(\"Notifying build system\", message.ID)\n\n\tif err := client.notifyMrRobot(&repo, message); err != nil {\n\t\tclient.broadcastFailure(\"Notifying build system failed\", err.Error(), message)\n\t\treturn &repo\n\t}\n\n\treturn &repo\n}\n\nfunc (client *Gogeta) sendBuildEndIfPanic(message gogetaMessage) {\n\tif r := recover(); r != nil {\n\t\terr := fmt.Sprintf(\"%v\", r)\n\t\tclient.broadcastFailure(err, \"An unexpected error has occured\", message)\n\t\tpanic(r)\n\t}\n}\n\nfunc (client *Gogeta) broadcastProgress(info string, buildID string) {\n\tlogInfo := fmt.Sprintf(\"Build ID: %v, Update: %v\", buildID, info)\n\n\tclient.Log.Info(logInfo)\n\tclient.sendGamebuildrMessage(info, buildID)\n}\n\nfunc (client *Gogeta) broadcastFailure(info string, err string, message gogetaMessage) {\n\tlogErr := fmt.Sprintf(\"Build ID: %v, Data: %v, Update: %v, Error: %v\", message.ID, message, info, err)\n\n\tclient.Log.Error(logErr)\n\tclient.sendBuildFailedMessage(info, message)\n}\n\nfunc (client *Gogeta) sendGamebuildrMessage(messageInfo string, buildID string) {\n\treponse := gamebuildrMessage{\n\t\tType: buildrMessage,\n\t\tMessage: messageInfo,\n\t\tBuildID: buildID,\n\t\tChunk: chunkID,\n\t\tMessageID: strconv.Itoa(client.messageCounter),\n\t}\n\n\tclient.messageCounter++\n\n\tjsonMessage, err := json.Marshal(reponse)\n\tif err != nil {\n\t\tclient.Log.Error(err.Error())\n\t\treturn\n\t}\n\tnotification := publisher.Message{\n\t\tJSON: jsonMessage,\n\t\tSubject: buildrMessage,\n\t\tEndpoint: os.Getenv(config.GamebuildrNotifications),\n\t}\n\tclient.Publisher.SendJSON(¬ification)\n}\n\nfunc (client *Gogeta) sendBuildFailedMessage(failMessage string, message gogetaMessage) {\n\tresponse := buildResponse{\n\t\tSuccess: false,\n\t\tBuildrID: message.BuildrID,\n\t\tBuildID: message.ID,\n\t\tType: buildrMessage,\n\t\tMessage: failMessage,\n\t\tEnd: getBuildEndTime(),\n\t\tChunk: chunkID,\n\t\tMessageID: strconv.Itoa(client.messageCounter),\n\t}\n\n\tclient.messageCounter++\n\n\tjsonMessage, err := json.Marshal(response)\n\tif err != nil {\n\t\tclient.Log.Error(err.Error())\n\t\treturn\n\t}\n\tnotification := publisher.Message{\n\t\tJSON: jsonMessage,\n\t\tSubject: buildrMessage,\n\t\tEndpoint: os.Getenv(config.GamebuildrNotifications),\n\t}\n\tclient.Publisher.SendJSON(¬ification)\n}\n\nfunc (client *Gogeta) setVersionControl(repoType string) error {\n\tif client.SCM != nil {\n\t\treturn nil\n\t}\n\n\tdataType := strings.ToUpper(repoType)\n\tscm := &sourcesystem.SystemSCM{}\n\tscm.Log = client.Log\n\tswitch dataType {\n\tcase github:\n\t\tscm.VersionControl = &sourcesystem.GitVersionControl{}\n\tcase git:\n\t\tscm.VersionControl = &sourcesystem.GitVersionControl{}\n\tdefault:\n\t\terr := fmt.Sprintf(\"SCM of type %v could not be found\", dataType)\n\t\treturn errors.New(err)\n\t}\n\tclient.SCM = scm\n\treturn nil\n}\n\nfunc (client *Gogeta) downloadSource(repo *sourcesystem.SourceRepository, project string, origin string) error {\n\tif project == \"\" || origin == \"\" {\n\t\treturn errors.New(\"No data found to download source\")\n\t}\n\n\trepo.ProjectName = project\n\trepo.SourceOrigin = origin\n\n\tif err := client.SCM.AddSource(repo); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (client *Gogeta) archiveRepo(repo *sourcesystem.SourceRepository, message *gogetaMessage) error {\n\tfileName := repo.ProjectName + \".zip\"\n\tarchive := path.Join(os.Getenv(\"GOPATH\"), \"repos\", fileName)\n\tarchiveDir := message.ID\n\tarchivePath := path.Join(archiveDir, fileName)\n\tstorageData := storehouse.StorageData{\n\t\tSource: repo.SourceLocation,\n\t\tTarget: archive,\n\t\tTargetDir: archiveDir,\n\t}\n\tif err := client.Storage.StoreFiles(&storageData); err != nil {\n\t\treturn err\n\t}\n\tmessage.ArchivePath = archivePath\n\treturn nil\n}\n\nfunc (client *Gogeta) notifyMrRobot(repo *sourcesystem.SourceRepository, message gogetaMessage) error {\n\tmessageToSend := mrRobotMessage{\n\t\tArchivePath: message.ArchivePath,\n\t\tBuildID: message.ID,\n\t\tProject: message.Project,\n\t\tEngineName: message.EngineName,\n\t\tEngineVersion: message.EngineVersion,\n\t\tEnginePlatform: message.EnginePlatform,\n\t\tBuildrID: message.BuildrID,\n\t}\n\tjsonMessage, err := json.Marshal(messageToSend)\n\tif err != nil {\n\t\treturn err\n\t}\n\tnotification := publisher.Message{\n\t\tJSON: jsonMessage,\n\t\tSubject: \"Buildr Request\",\n\t\tEndpoint: os.Getenv(config.MrrobotNotifications),\n\t}\n\tclient.Publisher.SendJSON(¬ification)\n\treturn nil\n}\n\nfunc getBuildEndTime() int64 {\n\treturn time.Now().UnixNano() \/ int64(time.Millisecond)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage v1\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/google\/gofuzz\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/diff\"\n)\n\nfunc TestLabelSelectorAsSelector(t *testing.T) {\n\tmatchLabels := map[string]string{\"foo\": \"bar\"}\n\tmatchExpressions := []LabelSelectorRequirement{{\n\t\tKey: \"baz\",\n\t\tOperator: LabelSelectorOpIn,\n\t\tValues: []string{\"qux\", \"norf\"},\n\t}}\n\tmustParse := func(s string) labels.Selector {\n\t\tout, e := labels.Parse(s)\n\t\tif e != nil {\n\t\t\tpanic(e)\n\t\t}\n\t\treturn out\n\t}\n\ttc := []struct {\n\t\tin *LabelSelector\n\t\tout labels.Selector\n\t\texpectErr bool\n\t}{\n\t\t{in: nil, out: labels.Nothing()},\n\t\t{in: &LabelSelector{}, out: labels.Everything()},\n\t\t{\n\t\t\tin: &LabelSelector{MatchLabels: matchLabels},\n\t\t\tout: mustParse(\"foo=bar\"),\n\t\t},\n\t\t{\n\t\t\tin: &LabelSelector{MatchExpressions: matchExpressions},\n\t\t\tout: mustParse(\"baz in (norf,qux)\"),\n\t\t},\n\t\t{\n\t\t\tin: &LabelSelector{MatchLabels: matchLabels, MatchExpressions: matchExpressions},\n\t\t\tout: mustParse(\"baz in (norf,qux),foo=bar\"),\n\t\t},\n\t\t{\n\t\t\tin: &LabelSelector{\n\t\t\t\tMatchExpressions: []LabelSelectorRequirement{{\n\t\t\t\t\tKey: \"baz\",\n\t\t\t\t\tOperator: LabelSelectorOpExists,\n\t\t\t\t\tValues: []string{\"qux\", \"norf\"},\n\t\t\t\t}},\n\t\t\t},\n\t\t\texpectErr: true,\n\t\t},\n\t}\n\n\tfor i, tc := range tc {\n\t\tinCopy := tc.in.DeepCopy()\n\t\tout, err := LabelSelectorAsSelector(tc.in)\n\t\t\/\/ after calling LabelSelectorAsSelector, tc.in shouldn't be modified\n\t\tif !reflect.DeepEqual(inCopy, tc.in) {\n\t\t\tt.Errorf(\"[%v]expected:\\n\\t%#v\\nbut got:\\n\\t%#v\", i, inCopy, tc.in)\n\t\t}\n\t\tif err == nil && tc.expectErr {\n\t\t\tt.Errorf(\"[%v]expected error but got none.\", i)\n\t\t}\n\t\tif err != nil && !tc.expectErr {\n\t\t\tt.Errorf(\"[%v]did not expect error but got: %v\", i, err)\n\t\t}\n\t\t\/\/ fmt.Sprint() over String() as nil.String() will panic\n\t\tif fmt.Sprint(out) != fmt.Sprint(tc.out) {\n\t\t\tt.Errorf(\"[%v]expected:\\n\\t%s\\nbut got:\\n\\t%s\", i, fmt.Sprint(tc.out), fmt.Sprint(out))\n\t\t}\n\t}\n}\n\nfunc TestLabelSelectorAsMap(t *testing.T) {\n\tmatchLabels := map[string]string{\"foo\": \"bar\"}\n\tmatchExpressions := func(operator LabelSelectorOperator, values []string) []LabelSelectorRequirement {\n\t\treturn []LabelSelectorRequirement{{\n\t\t\tKey: \"baz\",\n\t\t\tOperator: operator,\n\t\t\tValues: values,\n\t\t}}\n\t}\n\n\ttests := []struct {\n\t\tin *LabelSelector\n\t\tout map[string]string\n\t\terrString string\n\t}{\n\t\t{in: nil, out: nil},\n\t\t{\n\t\t\tin: &LabelSelector{MatchLabels: matchLabels},\n\t\t\tout: map[string]string{\"foo\": \"bar\"},\n\t\t},\n\t\t{\n\t\t\tin: &LabelSelector{MatchLabels: matchLabels, MatchExpressions: matchExpressions(LabelSelectorOpIn, []string{\"norf\"})},\n\t\t\tout: map[string]string{\"foo\": \"bar\", \"baz\": \"norf\"},\n\t\t},\n\t\t{\n\t\t\tin: &LabelSelector{MatchExpressions: matchExpressions(LabelSelectorOpIn, []string{\"norf\"})},\n\t\t\tout: map[string]string{\"baz\": \"norf\"},\n\t\t},\n\t\t{\n\t\t\tin: &LabelSelector{MatchLabels: matchLabels, MatchExpressions: matchExpressions(LabelSelectorOpIn, []string{\"norf\", \"qux\"})},\n\t\t\tout: map[string]string{\"foo\": \"bar\"},\n\t\t\terrString: \"without a single value cannot be converted\",\n\t\t},\n\t\t{\n\t\t\tin: &LabelSelector{MatchExpressions: matchExpressions(LabelSelectorOpNotIn, []string{\"norf\", \"qux\"})},\n\t\t\tout: map[string]string{},\n\t\t\terrString: \"cannot be converted\",\n\t\t},\n\t\t{\n\t\t\tin: &LabelSelector{MatchLabels: matchLabels, MatchExpressions: matchExpressions(LabelSelectorOpExists, []string{})},\n\t\t\tout: map[string]string{\"foo\": \"bar\"},\n\t\t\terrString: \"cannot be converted\",\n\t\t},\n\t\t{\n\t\t\tin: &LabelSelector{MatchExpressions: matchExpressions(LabelSelectorOpDoesNotExist, []string{})},\n\t\t\tout: map[string]string{},\n\t\t\terrString: \"cannot be converted\",\n\t\t},\n\t}\n\n\tfor i, tc := range tests {\n\t\tout, err := LabelSelectorAsMap(tc.in)\n\t\tif err == nil && len(tc.errString) > 0 {\n\t\t\tt.Errorf(\"[%v]expected error but got none.\", i)\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil && len(tc.errString) == 0 {\n\t\t\tt.Errorf(\"[%v]did not expect error but got: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil && len(tc.errString) > 0 && !strings.Contains(err.Error(), tc.errString) {\n\t\t\tt.Errorf(\"[%v]expected error with %q but got: %v\", i, tc.errString, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(out, tc.out) {\n\t\t\tt.Errorf(\"[%v]expected:\\n\\t%+v\\nbut got:\\n\\t%+v\", i, tc.out, out)\n\t\t}\n\t}\n}\n\nfunc TestResetObjectMetaForStatus(t *testing.T) {\n\tmeta := &ObjectMeta{}\n\texistingMeta := &ObjectMeta{}\n\n\t\/\/ fuzz the existingMeta to set every field, no nils\n\tf := fuzz.New().NilChance(0).NumElements(1, 1).MaxDepth(10)\n\tf.Fuzz(existingMeta)\n\tResetObjectMetaForStatus(meta, existingMeta)\n\n\t\/\/ not all fields are stomped during the reset. These fields should not have been set. False\n\t\/\/ set them all to their zero values. Before you add anything to this list, consider whether or not\n\t\/\/ you're enforcing immutability (those are fine) and whether \/status should be able to update\n\t\/\/ these values (these are usually not fine).\n\n\t\/\/ generateName doesn't do anything after create\n\texistingMeta.SetGenerateName(\"\")\n\t\/\/ resourceVersion is enforced in validation and used during the storage update\n\texistingMeta.SetResourceVersion(\"\")\n\t\/\/ fields made immutable in validation\n\texistingMeta.SetUID(types.UID(\"\"))\n\texistingMeta.SetName(\"\")\n\texistingMeta.SetNamespace(\"\")\n\texistingMeta.SetClusterName(\"\")\n\texistingMeta.SetCreationTimestamp(Time{})\n\texistingMeta.SetDeletionTimestamp(nil)\n\texistingMeta.SetDeletionGracePeriodSeconds(nil)\n\texistingMeta.SetManagedFields(nil)\n\n\tif !reflect.DeepEqual(meta, existingMeta) {\n\t\tt.Error(diff.ObjectDiff(meta, existingMeta))\n\t}\n}\n\nfunc TestSetMetaDataLabel(t *testing.T) {\n\ttests := []struct {\n\t\tobj *ObjectMeta\n\t\tlabel string\n\t\tvalue string\n\t\twant map[string]string\n\t}{\n\t\t{\n\t\t\tobj: &ObjectMeta{},\n\t\t\tlabel: \"foo\",\n\t\t\tvalue: \"bar\",\n\t\t\twant: map[string]string{\"foo\": \"bar\"},\n\t\t},\n\t\t{\n\t\t\tobj: &ObjectMeta{Labels: map[string]string{\"foo\": \"bar\"}},\n\t\t\tlabel: \"foo\",\n\t\t\tvalue: \"baz\",\n\t\t\twant: map[string]string{\"foo\": \"baz\"},\n\t\t},\n\t\t{\n\t\t\tobj: &ObjectMeta{Labels: map[string]string{\"foo\": \"bar\"}},\n\t\t\tlabel: \"version\",\n\t\t\tvalue: \"1.0.0\",\n\t\t\twant: map[string]string{\"foo\": \"bar\", \"version\": \"1.0.0\"},\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tSetMetaDataLabel(tc.obj, tc.label, tc.value)\n\t\tif !reflect.DeepEqual(tc.obj.Labels, tc.want) {\n\t\t\tt.Errorf(\"got %v, want %v\", tc.obj.Labels, tc.want)\n\t\t}\n\t}\n}\n<commit_msg>Add benchmark for LabelSelectorAsSelector<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage v1\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/google\/gofuzz\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/diff\"\n)\n\nfunc TestLabelSelectorAsSelector(t *testing.T) {\n\tmatchLabels := map[string]string{\"foo\": \"bar\"}\n\tmatchExpressions := []LabelSelectorRequirement{{\n\t\tKey: \"baz\",\n\t\tOperator: LabelSelectorOpIn,\n\t\tValues: []string{\"qux\", \"norf\"},\n\t}}\n\tmustParse := func(s string) labels.Selector {\n\t\tout, e := labels.Parse(s)\n\t\tif e != nil {\n\t\t\tpanic(e)\n\t\t}\n\t\treturn out\n\t}\n\ttc := []struct {\n\t\tin *LabelSelector\n\t\tout labels.Selector\n\t\texpectErr bool\n\t}{\n\t\t{in: nil, out: labels.Nothing()},\n\t\t{in: &LabelSelector{}, out: labels.Everything()},\n\t\t{\n\t\t\tin: &LabelSelector{MatchLabels: matchLabels},\n\t\t\tout: mustParse(\"foo=bar\"),\n\t\t},\n\t\t{\n\t\t\tin: &LabelSelector{MatchExpressions: matchExpressions},\n\t\t\tout: mustParse(\"baz in (norf,qux)\"),\n\t\t},\n\t\t{\n\t\t\tin: &LabelSelector{MatchLabels: matchLabels, MatchExpressions: matchExpressions},\n\t\t\tout: mustParse(\"baz in (norf,qux),foo=bar\"),\n\t\t},\n\t\t{\n\t\t\tin: &LabelSelector{\n\t\t\t\tMatchExpressions: []LabelSelectorRequirement{{\n\t\t\t\t\tKey: \"baz\",\n\t\t\t\t\tOperator: LabelSelectorOpExists,\n\t\t\t\t\tValues: []string{\"qux\", \"norf\"},\n\t\t\t\t}},\n\t\t\t},\n\t\t\texpectErr: true,\n\t\t},\n\t}\n\n\tfor i, tc := range tc {\n\t\tinCopy := tc.in.DeepCopy()\n\t\tout, err := LabelSelectorAsSelector(tc.in)\n\t\t\/\/ after calling LabelSelectorAsSelector, tc.in shouldn't be modified\n\t\tif !reflect.DeepEqual(inCopy, tc.in) {\n\t\t\tt.Errorf(\"[%v]expected:\\n\\t%#v\\nbut got:\\n\\t%#v\", i, inCopy, tc.in)\n\t\t}\n\t\tif err == nil && tc.expectErr {\n\t\t\tt.Errorf(\"[%v]expected error but got none.\", i)\n\t\t}\n\t\tif err != nil && !tc.expectErr {\n\t\t\tt.Errorf(\"[%v]did not expect error but got: %v\", i, err)\n\t\t}\n\t\t\/\/ fmt.Sprint() over String() as nil.String() will panic\n\t\tif fmt.Sprint(out) != fmt.Sprint(tc.out) {\n\t\t\tt.Errorf(\"[%v]expected:\\n\\t%s\\nbut got:\\n\\t%s\", i, fmt.Sprint(tc.out), fmt.Sprint(out))\n\t\t}\n\t}\n}\n\nfunc BenchmarkLabelSelectorAsSelector(b *testing.B) {\n\tselector := &LabelSelector{\n\t\tMatchLabels: map[string]string{\n\t\t\t\"foo\": \"foo\",\n\t\t\t\"bar\": \"bar\",\n\t\t},\n\t\tMatchExpressions: []LabelSelectorRequirement{{\n\t\t\tKey: \"baz\",\n\t\t\tOperator: LabelSelectorOpExists,\n\t\t}},\n\t}\n\tb.StartTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t_, err := LabelSelectorAsSelector(selector)\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestLabelSelectorAsMap(t *testing.T) {\n\tmatchLabels := map[string]string{\"foo\": \"bar\"}\n\tmatchExpressions := func(operator LabelSelectorOperator, values []string) []LabelSelectorRequirement {\n\t\treturn []LabelSelectorRequirement{{\n\t\t\tKey: \"baz\",\n\t\t\tOperator: operator,\n\t\t\tValues: values,\n\t\t}}\n\t}\n\n\ttests := []struct {\n\t\tin *LabelSelector\n\t\tout map[string]string\n\t\terrString string\n\t}{\n\t\t{in: nil, out: nil},\n\t\t{\n\t\t\tin: &LabelSelector{MatchLabels: matchLabels},\n\t\t\tout: map[string]string{\"foo\": \"bar\"},\n\t\t},\n\t\t{\n\t\t\tin: &LabelSelector{MatchLabels: matchLabels, MatchExpressions: matchExpressions(LabelSelectorOpIn, []string{\"norf\"})},\n\t\t\tout: map[string]string{\"foo\": \"bar\", \"baz\": \"norf\"},\n\t\t},\n\t\t{\n\t\t\tin: &LabelSelector{MatchExpressions: matchExpressions(LabelSelectorOpIn, []string{\"norf\"})},\n\t\t\tout: map[string]string{\"baz\": \"norf\"},\n\t\t},\n\t\t{\n\t\t\tin: &LabelSelector{MatchLabels: matchLabels, MatchExpressions: matchExpressions(LabelSelectorOpIn, []string{\"norf\", \"qux\"})},\n\t\t\tout: map[string]string{\"foo\": \"bar\"},\n\t\t\terrString: \"without a single value cannot be converted\",\n\t\t},\n\t\t{\n\t\t\tin: &LabelSelector{MatchExpressions: matchExpressions(LabelSelectorOpNotIn, []string{\"norf\", \"qux\"})},\n\t\t\tout: map[string]string{},\n\t\t\terrString: \"cannot be converted\",\n\t\t},\n\t\t{\n\t\t\tin: &LabelSelector{MatchLabels: matchLabels, MatchExpressions: matchExpressions(LabelSelectorOpExists, []string{})},\n\t\t\tout: map[string]string{\"foo\": \"bar\"},\n\t\t\terrString: \"cannot be converted\",\n\t\t},\n\t\t{\n\t\t\tin: &LabelSelector{MatchExpressions: matchExpressions(LabelSelectorOpDoesNotExist, []string{})},\n\t\t\tout: map[string]string{},\n\t\t\terrString: \"cannot be converted\",\n\t\t},\n\t}\n\n\tfor i, tc := range tests {\n\t\tout, err := LabelSelectorAsMap(tc.in)\n\t\tif err == nil && len(tc.errString) > 0 {\n\t\t\tt.Errorf(\"[%v]expected error but got none.\", i)\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil && len(tc.errString) == 0 {\n\t\t\tt.Errorf(\"[%v]did not expect error but got: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil && len(tc.errString) > 0 && !strings.Contains(err.Error(), tc.errString) {\n\t\t\tt.Errorf(\"[%v]expected error with %q but got: %v\", i, tc.errString, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(out, tc.out) {\n\t\t\tt.Errorf(\"[%v]expected:\\n\\t%+v\\nbut got:\\n\\t%+v\", i, tc.out, out)\n\t\t}\n\t}\n}\n\nfunc TestResetObjectMetaForStatus(t *testing.T) {\n\tmeta := &ObjectMeta{}\n\texistingMeta := &ObjectMeta{}\n\n\t\/\/ fuzz the existingMeta to set every field, no nils\n\tf := fuzz.New().NilChance(0).NumElements(1, 1).MaxDepth(10)\n\tf.Fuzz(existingMeta)\n\tResetObjectMetaForStatus(meta, existingMeta)\n\n\t\/\/ not all fields are stomped during the reset. These fields should not have been set. False\n\t\/\/ set them all to their zero values. Before you add anything to this list, consider whether or not\n\t\/\/ you're enforcing immutability (those are fine) and whether \/status should be able to update\n\t\/\/ these values (these are usually not fine).\n\n\t\/\/ generateName doesn't do anything after create\n\texistingMeta.SetGenerateName(\"\")\n\t\/\/ resourceVersion is enforced in validation and used during the storage update\n\texistingMeta.SetResourceVersion(\"\")\n\t\/\/ fields made immutable in validation\n\texistingMeta.SetUID(types.UID(\"\"))\n\texistingMeta.SetName(\"\")\n\texistingMeta.SetNamespace(\"\")\n\texistingMeta.SetClusterName(\"\")\n\texistingMeta.SetCreationTimestamp(Time{})\n\texistingMeta.SetDeletionTimestamp(nil)\n\texistingMeta.SetDeletionGracePeriodSeconds(nil)\n\texistingMeta.SetManagedFields(nil)\n\n\tif !reflect.DeepEqual(meta, existingMeta) {\n\t\tt.Error(diff.ObjectDiff(meta, existingMeta))\n\t}\n}\n\nfunc TestSetMetaDataLabel(t *testing.T) {\n\ttests := []struct {\n\t\tobj *ObjectMeta\n\t\tlabel string\n\t\tvalue string\n\t\twant map[string]string\n\t}{\n\t\t{\n\t\t\tobj: &ObjectMeta{},\n\t\t\tlabel: \"foo\",\n\t\t\tvalue: \"bar\",\n\t\t\twant: map[string]string{\"foo\": \"bar\"},\n\t\t},\n\t\t{\n\t\t\tobj: &ObjectMeta{Labels: map[string]string{\"foo\": \"bar\"}},\n\t\t\tlabel: \"foo\",\n\t\t\tvalue: \"baz\",\n\t\t\twant: map[string]string{\"foo\": \"baz\"},\n\t\t},\n\t\t{\n\t\t\tobj: &ObjectMeta{Labels: map[string]string{\"foo\": \"bar\"}},\n\t\t\tlabel: \"version\",\n\t\t\tvalue: \"1.0.0\",\n\t\t\twant: map[string]string{\"foo\": \"bar\", \"version\": \"1.0.0\"},\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tSetMetaDataLabel(tc.obj, tc.label, tc.value)\n\t\tif !reflect.DeepEqual(tc.obj.Labels, tc.want) {\n\t\t\tt.Errorf(\"got %v, want %v\", tc.obj.Labels, tc.want)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Concurrently get env\/secrets<commit_after><|endoftext|>"} {"text":"<commit_before>package config\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\n\t\"github.com\/GoogleCloudPlatform\/k8s-stackdriver\/prometheus-to-sd\/flags\"\n\t\"github.com\/golang\/glog\"\n\tcore \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\tclientset \"k8s.io\/client-go\/kubernetes\"\n\t\"k8s.io\/client-go\/rest\"\n)\n\nconst (\n\tpodNamespace = \"kube-system\"\n\tnameLabel = \"k8s-app\"\n)\n\n\/\/ SourceConfigsFromDynamicSources takes pod specifications from the Kubernetes API and maps them to source configs.\nfunc SourceConfigsFromDynamicSources(gceConfig *GceConfig, sources []flags.Uri) ([]SourceConfig, error) {\n\tif len(sources) == 0 {\n\t\treturn nil, nil\n\t}\n\tsourceMap, err := validateSources(sources)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkubeApi, err := createKubernetesApiClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpodResponse, err := kubeApi.CoreV1().Pods(podNamespace).List(createOptionsForPodSelection(gceConfig.Instance, sourceMap))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn getConfigsFromPods(podResponse.Items, sourceMap), nil\n}\n\nfunc validateSources(sources flags.Uris) (map[string]url.URL, error) {\n\tsourceMap := make(map[string]url.URL)\n\tfor _, source := range sources {\n\t\tif source.Val.Hostname() != \"\" {\n\t\t\treturn nil, errors.New(\"hostname should be empty for all dynamic sources\")\n\t\t}\n\t\tif source.Key == \"\" {\n\t\t\treturn nil, errors.New(\"component name should NOT be empty for any dynamic source\")\n\t\t}\n\t\tif source.Val.Port() == \"\" {\n\t\t\treturn nil, errors.New(\"port should NOT be empty for any dynamic source\")\n\t\t}\n\t\tsourceMap[source.Key] = source.Val\n\t}\n\tif len(sourceMap) != len(sources) {\n\t\treturn nil, errors.New(\"source should have unique component names\")\n\t}\n\treturn sourceMap, nil\n}\n\nfunc createKubernetesApiClient() (clientset.Interface, error) {\n\tconf, err := rest.InClusterConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn clientset.NewForConfig(conf)\n}\n\nfunc createOptionsForPodSelection(nodeName string, sources map[string]url.URL) v1.ListOptions {\n\tvar nameList string\n\tfor key := range sources {\n\t\tnameList += key + \",\"\n\t}\n\tlabelSelector := fmt.Sprintf(\"%s in (%s)\", nameLabel, nameList)\n\treturn v1.ListOptions{\n\t\tFieldSelector: fmt.Sprintf(\"spec.nodeName=%s\", nodeName),\n\t\tLabelSelector: labelSelector,\n\t}\n}\n\nfunc getConfigsFromPods(pods []core.Pod, sources map[string]url.URL) []SourceConfig {\n\tvar sourceConfigs []SourceConfig\n\tfor _, pod := range pods {\n\t\tcomponentName := pod.Labels[nameLabel]\n\t\tsource, _ := sources[componentName]\n\t\tsourceConfig, err := mapToSourceConfig(componentName, source, pod.Status.PodIP, pod.Name, pod.Namespace)\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"could not create source config for pod %s: %v\", pod.Name, err)\n\t\t}\n\t\tsourceConfigs = append(sourceConfigs, *sourceConfig)\n\t}\n\treturn sourceConfigs\n}\n\nfunc mapToSourceConfig(componentName string, url url.URL, ip, podId, namespaceId string) (*SourceConfig, error) {\n\tport := url.Port()\n\twhitelisted := url.Query().Get(\"whitelisted\")\n\tpodIdLabel := url.Query().Get(\"podIdLabel\")\n\tnamespaceIdLabel := url.Query().Get(\"namespaceIdLabel\")\n\tcontainerNamelabel := url.Query().Get(\"containerNamelabel\")\n\tpodConfig := NewPodConfig(podId, namespaceId, podIdLabel, namespaceIdLabel, containerNamelabel)\n\treturn newSourceConfig(componentName, ip, port, url.Path, whitelisted, podConfig)\n}\n<commit_msg>Fix undefined sort order in dynamic sources.<commit_after>package config\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"sort\"\n\n\t\"github.com\/GoogleCloudPlatform\/k8s-stackdriver\/prometheus-to-sd\/flags\"\n\t\"github.com\/golang\/glog\"\n\tcore \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\tclientset \"k8s.io\/client-go\/kubernetes\"\n\t\"k8s.io\/client-go\/rest\"\n)\n\nconst (\n\tpodNamespace = \"kube-system\"\n\tnameLabel = \"k8s-app\"\n)\n\n\/\/ SourceConfigsFromDynamicSources takes pod specifications from the Kubernetes API and maps them to source configs.\nfunc SourceConfigsFromDynamicSources(gceConfig *GceConfig, sources []flags.Uri) ([]SourceConfig, error) {\n\tif len(sources) == 0 {\n\t\treturn nil, nil\n\t}\n\tsourceMap, err := validateSources(sources)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkubeApi, err := createKubernetesApiClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpodResponse, err := kubeApi.CoreV1().Pods(podNamespace).List(createOptionsForPodSelection(gceConfig.Instance, sourceMap))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn getConfigsFromPods(podResponse.Items, sourceMap), nil\n}\n\nfunc validateSources(sources flags.Uris) (map[string]url.URL, error) {\n\tsourceMap := make(map[string]url.URL)\n\tfor _, source := range sources {\n\t\tif source.Val.Hostname() != \"\" {\n\t\t\treturn nil, errors.New(\"hostname should be empty for all dynamic sources\")\n\t\t}\n\t\tif source.Key == \"\" {\n\t\t\treturn nil, errors.New(\"component name should NOT be empty for any dynamic source\")\n\t\t}\n\t\tif source.Val.Port() == \"\" {\n\t\t\treturn nil, errors.New(\"port should NOT be empty for any dynamic source\")\n\t\t}\n\t\tsourceMap[source.Key] = source.Val\n\t}\n\tif len(sourceMap) != len(sources) {\n\t\treturn nil, errors.New(\"source should have unique component names\")\n\t}\n\treturn sourceMap, nil\n}\n\nfunc createKubernetesApiClient() (clientset.Interface, error) {\n\tconf, err := rest.InClusterConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn clientset.NewForConfig(conf)\n}\n\nfunc createOptionsForPodSelection(nodeName string, sources map[string]url.URL) v1.ListOptions {\n\tvar nameList string\n\tvar keys []string\n\tfor key := range sources {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Strings(keys)\n\tfor _, key := range keys {\n\t\tnameList += key + \",\"\n\t}\n\tlabelSelector := fmt.Sprintf(\"%s in (%s)\", nameLabel, nameList)\n\treturn v1.ListOptions{\n\t\tFieldSelector: fmt.Sprintf(\"spec.nodeName=%s\", nodeName),\n\t\tLabelSelector: labelSelector,\n\t}\n}\n\nfunc getConfigsFromPods(pods []core.Pod, sources map[string]url.URL) []SourceConfig {\n\tvar sourceConfigs []SourceConfig\n\tfor _, pod := range pods {\n\t\tcomponentName := pod.Labels[nameLabel]\n\t\tsource, _ := sources[componentName]\n\t\tsourceConfig, err := mapToSourceConfig(componentName, source, pod.Status.PodIP, pod.Name, pod.Namespace)\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"could not create source config for pod %s: %v\", pod.Name, err)\n\t\t}\n\t\tsourceConfigs = append(sourceConfigs, *sourceConfig)\n\t}\n\treturn sourceConfigs\n}\n\nfunc mapToSourceConfig(componentName string, url url.URL, ip, podId, namespaceId string) (*SourceConfig, error) {\n\tport := url.Port()\n\twhitelisted := url.Query().Get(\"whitelisted\")\n\tpodIdLabel := url.Query().Get(\"podIdLabel\")\n\tnamespaceIdLabel := url.Query().Get(\"namespaceIdLabel\")\n\tcontainerNamelabel := url.Query().Get(\"containerNamelabel\")\n\tpodConfig := NewPodConfig(podId, namespaceId, podIdLabel, namespaceIdLabel, containerNamelabel)\n\treturn newSourceConfig(componentName, ip, port, url.Path, whitelisted, podConfig)\n}\n<|endoftext|>"} {"text":"<commit_before>\/* This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/. *\/\n\npackage availabilityset\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/r3labs\/terraform\/helper\/schema\"\n\n\taes \"github.com\/ernestio\/crypto\/aes\"\n\t\"github.com\/ernestio\/ernestprovider\/event\"\n\t\"github.com\/ernestio\/ernestprovider\/providers\/azure\"\n)\n\n\/\/ Event : This is the Ernest representation of an azure availability setb\ntype Event struct {\n\tevent.Base\n\tID string `json:\"id\"`\n\tName string `json:\"name\" validate:\"required\"`\n\tResourceGroupName string `json:\"resource_group_name\" validate:\"required\"`\n\tLocation string `json:\"location\"`\n\tPlatformUpdateDomainCount int `json:\"platform_update_domain_count\"`\n\tPlatformFaultDomainCount int `json:\"platform_fault_domain_count\"`\n\tManaged bool `json:\"managed\"`\n\tTags map[string]string `json:\"tags\"`\n\tClientID string `json:\"azure_client_id\"`\n\tClientSecret string `json:\"azure_client_secret\"`\n\tTenantID string `json:\"azure_tenant_id\"`\n\tSubscriptionID string `json:\"azure_subscription_id\"`\n\tEnvironment string `json:\"environment\"`\n\tErrorMessage string `json:\"error,omitempty\"`\n\tComponents []json.RawMessage `json:\"components\"`\n\tCryptoKey string `json:\"-\"`\n\tValidator *event.Validator `json:\"-\"`\n}\n\n\/\/ New : Constructor\nfunc New(subject, cryptoKey string, body []byte, val *event.Validator) (event.Event, error) {\n\tvar ev event.Resource\n\tev = &Event{CryptoKey: cryptoKey, Validator: val}\n\tbody = []byte(strings.Replace(string(body), `\"_component\":\"availability_sets\"`, `\"_component\":\"availability_set\"`, 1))\n\tif err := json.Unmarshal(body, &ev); err != nil {\n\t\terr := fmt.Errorf(\"Error on input message : %s\", err)\n\t\treturn nil, err\n\t}\n\n\treturn azure.New(subject, \"azurerm_availability_set\", body, val, ev)\n}\n\n\/\/ SetComponents : ....\nfunc (ev *Event) SetComponents(components []event.Event) {\n\tfor _, v := range components {\n\t\tev.Components = append(ev.Components, v.GetBody())\n\t}\n}\n\n\/\/ ValidateID : determines if the given id is valid for this resource type\nfunc (ev *Event) ValidateID(id string) bool {\n\tparts := strings.Split(strings.ToLower(id), \"\/\")\n\tif len(parts) != 8 {\n\t\treturn false\n\t}\n\tif parts[6] != \"microsoft.compute\" {\n\t\treturn false\n\t}\n\tif parts[7] != \"availabilitysets\" {\n\t\treturn false\n\t}\n\treturn true\n}\n\n\/\/ SetID : id setter\nfunc (ev *Event) SetID(id string) {\n\tev.ID = id\n}\n\n\/\/ GetID : id getter\nfunc (ev *Event) GetID() string {\n\treturn ev.ID\n}\n\n\/\/ SetState : state setter\nfunc (ev *Event) SetState(state string) {\n\tev.State = state\n}\n\n\/\/ ResourceDataToEvent : Translates a ResourceData on a valid Ernest Event\nfunc (ev *Event) ResourceDataToEvent(d *schema.ResourceData) error {\n\tidParts := strings.Split(d.Id(), \"\/\")\n\tev.ID = d.Id()\n\tev.Name = idParts[len(idParts)-1]\n\tev.ComponentID = \"availability_set::\" + ev.Name\n\tev.ResourceGroupName = d.Get(\"resource_group_name\").(string)\n\tev.Location = d.Get(\"location\").(string)\n\tev.PlatformUpdateDomainCount = d.Get(\"platform_update_domain_count\").(int)\n\tev.PlatformFaultDomainCount = d.Get(\"platform_fault_domain_count\").(int)\n\n\ttags := make(map[string]string, 0)\n\tfor k, v := range d.Get(\"tags\").(map[string]interface{}) {\n\t\ttags[k] = v.(string)\n\t}\n\tev.Tags = tags\n\treturn nil\n}\n\n\/\/ EventToResourceData : Translates the current event on a valid ResourceData\nfunc (ev *Event) EventToResourceData(d *schema.ResourceData) error {\n\tcrypto := aes.New()\n\n\tencFields := make(map[string]string)\n\tencFields[\"subscription_id\"] = ev.SubscriptionID\n\tencFields[\"client_id\"] = ev.ClientID\n\tencFields[\"client_secret\"] = ev.ClientSecret\n\tencFields[\"tenant_id\"] = ev.TenantID\n\tencFields[\"environment\"] = ev.Environment\n\tfor k, v := range encFields {\n\t\tdec, err := crypto.Decrypt(v, ev.CryptoKey)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"Field '%s' not valid : %s\", k, err)\n\t\t\tev.Log(\"error\", err.Error())\n\t\t\treturn err\n\t\t}\n\t\tif err := d.Set(k, dec); err != nil {\n\t\t\terr := fmt.Errorf(\"Field '%s' not valid : %s\", k, err)\n\t\t\tev.Log(\"error\", err.Error())\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfields := make(map[string]interface{})\n\tfields[\"name\"] = ev.Name\n\tfields[\"resource_group_name\"] = ev.ResourceGroupName\n\tfields[\"location\"] = ev.Location\n\tfields[\"platform_update_domain_count\"] = ev.PlatformUpdateDomainCount\n\tfields[\"platform_fault_domain_count\"] = ev.PlatformFaultDomainCount\n\tfields[\"tags\"] = ev.Tags\n\tfor k, v := range fields {\n\t\tif err := d.Set(k, v); err != nil {\n\t\t\terr := fmt.Errorf(\"Field '%s' not valid : %s\", k, err)\n\t\t\tev.Log(\"error\", err.Error())\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Clone : will mark the event as errored\nfunc (ev *Event) Clone() (event.Event, error) {\n\tbody, _ := json.Marshal(ev)\n\treturn New(ev.Subject, ev.CryptoKey, body, ev.Validator)\n}\n\n\/\/ Error : will mark the event as errored\nfunc (ev *Event) Error(err error) {\n\tev.ErrorMessage = err.Error()\n\tev.Body, err = json.Marshal(ev)\n}\n<commit_msg>Map managed availability set<commit_after>\/* This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/. *\/\n\npackage availabilityset\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/r3labs\/terraform\/helper\/schema\"\n\n\taes \"github.com\/ernestio\/crypto\/aes\"\n\t\"github.com\/ernestio\/ernestprovider\/event\"\n\t\"github.com\/ernestio\/ernestprovider\/providers\/azure\"\n)\n\n\/\/ Event : This is the Ernest representation of an azure availability setb\ntype Event struct {\n\tevent.Base\n\tID string `json:\"id\"`\n\tName string `json:\"name\" validate:\"required\"`\n\tResourceGroupName string `json:\"resource_group_name\" validate:\"required\"`\n\tLocation string `json:\"location\"`\n\tPlatformUpdateDomainCount int `json:\"platform_update_domain_count\"`\n\tPlatformFaultDomainCount int `json:\"platform_fault_domain_count\"`\n\tManaged bool `json:\"managed\"`\n\tTags map[string]string `json:\"tags\"`\n\tClientID string `json:\"azure_client_id\"`\n\tClientSecret string `json:\"azure_client_secret\"`\n\tTenantID string `json:\"azure_tenant_id\"`\n\tSubscriptionID string `json:\"azure_subscription_id\"`\n\tEnvironment string `json:\"environment\"`\n\tErrorMessage string `json:\"error,omitempty\"`\n\tComponents []json.RawMessage `json:\"components\"`\n\tCryptoKey string `json:\"-\"`\n\tValidator *event.Validator `json:\"-\"`\n}\n\n\/\/ New : Constructor\nfunc New(subject, cryptoKey string, body []byte, val *event.Validator) (event.Event, error) {\n\tvar ev event.Resource\n\tev = &Event{CryptoKey: cryptoKey, Validator: val}\n\tbody = []byte(strings.Replace(string(body), `\"_component\":\"availability_sets\"`, `\"_component\":\"availability_set\"`, 1))\n\tif err := json.Unmarshal(body, &ev); err != nil {\n\t\terr := fmt.Errorf(\"Error on input message : %s\", err)\n\t\treturn nil, err\n\t}\n\n\treturn azure.New(subject, \"azurerm_availability_set\", body, val, ev)\n}\n\n\/\/ SetComponents : ....\nfunc (ev *Event) SetComponents(components []event.Event) {\n\tfor _, v := range components {\n\t\tev.Components = append(ev.Components, v.GetBody())\n\t}\n}\n\n\/\/ ValidateID : determines if the given id is valid for this resource type\nfunc (ev *Event) ValidateID(id string) bool {\n\tparts := strings.Split(strings.ToLower(id), \"\/\")\n\tif len(parts) != 8 {\n\t\treturn false\n\t}\n\tif parts[6] != \"microsoft.compute\" {\n\t\treturn false\n\t}\n\tif parts[7] != \"availabilitysets\" {\n\t\treturn false\n\t}\n\treturn true\n}\n\n\/\/ SetID : id setter\nfunc (ev *Event) SetID(id string) {\n\tev.ID = id\n}\n\n\/\/ GetID : id getter\nfunc (ev *Event) GetID() string {\n\treturn ev.ID\n}\n\n\/\/ SetState : state setter\nfunc (ev *Event) SetState(state string) {\n\tev.State = state\n}\n\n\/\/ ResourceDataToEvent : Translates a ResourceData on a valid Ernest Event\nfunc (ev *Event) ResourceDataToEvent(d *schema.ResourceData) error {\n\tidParts := strings.Split(d.Id(), \"\/\")\n\tev.ID = d.Id()\n\tev.Name = idParts[len(idParts)-1]\n\tev.ComponentID = \"availability_set::\" + ev.Name\n\tev.ResourceGroupName = d.Get(\"resource_group_name\").(string)\n\tev.Location = d.Get(\"location\").(string)\n\tev.PlatformUpdateDomainCount = d.Get(\"platform_update_domain_count\").(int)\n\tev.PlatformFaultDomainCount = d.Get(\"platform_fault_domain_count\").(int)\n\tev.Managed = d.Get(\"managed\").(bool)\n\n\ttags := make(map[string]string, 0)\n\tfor k, v := range d.Get(\"tags\").(map[string]interface{}) {\n\t\ttags[k] = v.(string)\n\t}\n\tev.Tags = tags\n\treturn nil\n}\n\n\/\/ EventToResourceData : Translates the current event on a valid ResourceData\nfunc (ev *Event) EventToResourceData(d *schema.ResourceData) error {\n\tcrypto := aes.New()\n\n\tencFields := make(map[string]string)\n\tencFields[\"subscription_id\"] = ev.SubscriptionID\n\tencFields[\"client_id\"] = ev.ClientID\n\tencFields[\"client_secret\"] = ev.ClientSecret\n\tencFields[\"tenant_id\"] = ev.TenantID\n\tencFields[\"environment\"] = ev.Environment\n\tfor k, v := range encFields {\n\t\tdec, err := crypto.Decrypt(v, ev.CryptoKey)\n\t\tif err != nil {\n\t\t\terr := fmt.Errorf(\"Field '%s' not valid : %s\", k, err)\n\t\t\tev.Log(\"error\", err.Error())\n\t\t\treturn err\n\t\t}\n\t\tif err := d.Set(k, dec); err != nil {\n\t\t\terr := fmt.Errorf(\"Field '%s' not valid : %s\", k, err)\n\t\t\tev.Log(\"error\", err.Error())\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfields := make(map[string]interface{})\n\tfields[\"name\"] = ev.Name\n\tfields[\"resource_group_name\"] = ev.ResourceGroupName\n\tfields[\"location\"] = ev.Location\n\tfields[\"platform_update_domain_count\"] = ev.PlatformUpdateDomainCount\n\tfields[\"platform_fault_domain_count\"] = ev.PlatformFaultDomainCount\n\tfields[\"managed\"] = ev.Managed\n\tfields[\"tags\"] = ev.Tags\n\tfor k, v := range fields {\n\t\tif err := d.Set(k, v); err != nil {\n\t\t\terr := fmt.Errorf(\"Field '%s' not valid : %s\", k, err)\n\t\t\tev.Log(\"error\", err.Error())\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Clone : will mark the event as errored\nfunc (ev *Event) Clone() (event.Event, error) {\n\tbody, _ := json.Marshal(ev)\n\treturn New(ev.Subject, ev.CryptoKey, body, ev.Validator)\n}\n\n\/\/ Error : will mark the event as errored\nfunc (ev *Event) Error(err error) {\n\tev.ErrorMessage = err.Error()\n\tev.Body, err = json.Marshal(ev)\n}\n<|endoftext|>"} {"text":"<commit_before>package botReactions\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/bwmarrin\/discordgo\"\n\n\t\"github.com\/adayoung\/ada-bot\/settings\"\n)\n\ntype dice struct {\n\tTrigger string\n}\n\nfunc (d *dice) Help() string {\n\treturn fmt.Sprintf(\"Roll a dice! DnD style, %sdice xdy+z\", settings.Settings.Discord.BotPrefix)\n}\n\nfunc (d *dice) HelpDetail() string {\n\treturn d.Help()\n}\n\nvar diceRegexp = regexp.MustCompile(`(?i)([0-9]+)d([0-9]+)(?:\\+([0-9]+))?`)\n\nfunc (d *dice) Reaction(m *discordgo.Message, a *discordgo.Member, mType string) Reaction {\n\tvar response string\n\trequest := strings.TrimSpace(m.Content[len(settings.Settings.Discord.BotPrefix)+len(d.Trigger):])\n\tif !(len(request) > 0) {\n\t\trequest = \"1d6\"\n\t}\n\tdiceRoll := \"\"\n\ttotal := 0\n\tdMatch := diceRegexp.FindStringSubmatch(request)\n\tif len(dMatch) > 0 {\n\t\tnumDice, numSides, addNum, roll := dMatch[1], dMatch[2], dMatch[3], 0\n\t\tif _numDice, err := strconv.Atoi(numDice); err == nil {\n\t\t\tif _numDice > 20 {\n\t\t\t\tresponse = \"But I have small hands, I can't hold that many dice :frowning:\"\n\t\t\t}\n\t\t\tif _numSides, err := strconv.Atoi(numSides); err == nil {\n\t\t\t\tif _numSides > 32 {\n\t\t\t\t\tresponse = \"Wow those are strange die, I don't even know how to roll 'em :confused:\"\n\t\t\t\t}\n\t\t\t\tfor dice := 0; dice < _numDice; dice++ {\n\t\t\t\t\tif _numSides > 0 {\n\t\t\t\t\t\troll = rand.Intn(_numSides) + 1\n\t\t\t\t\t} else {\n\t\t\t\t\t\troll = 0\n\t\t\t\t\t}\n\t\t\t\t\tdiceRoll = fmt.Sprintf(\"%s %d\", diceRoll, roll)\n\t\t\t\t\ttotal += roll\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"error: %v\", err) \/\/ Non fatal error at strconv.Atoi() call\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Printf(\"error: %v\", err) \/\/ Non fatal error at strconv.Atoi() call\n\t\t}\n\n\t\tif len(addNum) > 0 {\n\t\t\tif _addNum, err := strconv.Atoi(addNum); err == nil {\n\t\t\t\ttotal += _addNum\n\t\t\t\tdiceRoll = fmt.Sprintf(\"%s %d\", diceRoll, _addNum)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"error: %v\", err) \/\/ Non fatal error at strconv.Atoi() call\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(diceRoll) > 0 {\n\t\tresponse = fmt.Sprintf(\"```Dice roll [%s]: %s\\tTotal: %d```\", request, diceRoll, total)\n\t}\n\treturn Reaction{Text: response}\n}\n\nfunc init() {\n\trand.Seed(time.Now().Unix())\n\n\t_dice := &dice{\n\t\tTrigger: \"dice\",\n\t}\n\taddReaction(_dice.Trigger, \"CREATE\", _dice)\n}\n<commit_msg>Dice be crazy!<commit_after>package botReactions\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/bwmarrin\/discordgo\"\n\n\t\"github.com\/adayoung\/ada-bot\/settings\"\n)\n\ntype dice struct {\n\tTrigger string\n}\n\nfunc (d *dice) Help() string {\n\treturn fmt.Sprintf(\"Roll a dice! DnD style, %sdice xdy+z\", settings.Settings.Discord.BotPrefix)\n}\n\nfunc (d *dice) HelpDetail() string {\n\treturn d.Help()\n}\n\nvar diceRegexp = regexp.MustCompile(`(?i)([0-9]+)d([0-9]+)(?:\\+([0-9]+))?`)\nvar validSides = [6]int{4, 6, 8, 10, 12, 20}\n\nfunc (d *dice) Reaction(m *discordgo.Message, a *discordgo.Member, mType string) Reaction {\n\tvar response string\n\trequest := strings.TrimSpace(m.Content[len(settings.Settings.Discord.BotPrefix)+len(d.Trigger):])\n\tif !(len(request) > 0) {\n\t\trequest = \"1d6\"\n\t}\n\tdiceRoll := \"\"\n\ttotal := 0\n\tdMatch := diceRegexp.FindStringSubmatch(request)\n\tif len(dMatch) > 0 {\n\t\tnumDice, numSides, addNum, roll := dMatch[1], dMatch[2], dMatch[3], 0\n\t\tif _numDice, err := strconv.Atoi(numDice); err == nil {\n\t\t\tif _numDice > 7 {\n\t\t\t\tresponse = \"But I have small hands, I can't hold that many dice :frowning:\"\n\t\t\t\treturn Reaction{Text: response}\n\t\t\t}\n\t\t\tif _numSides, err := strconv.Atoi(numSides); err == nil {\n\t\t\t\t_validNumSides := 0\n\t\t\t\tfor _, nSide := range validSides {\n\t\t\t\t\tif nSide == _numSides {\n\t\t\t\t\t\t_validNumSides = nSide\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif _validNumSides == 0 {\n\t\t\t\t\tresponse = \"Wow those are strange dice, I don't even know how to roll 'em :confused:\"\n\t\t\t\t\treturn Reaction{Text: response}\n\t\t\t\t}\n\t\t\t\tfor dice := 0; dice < _numDice; dice++ {\n\t\t\t\t\tif _numSides > 0 {\n\t\t\t\t\t\troll = rand.Intn(_numSides) + 1\n\t\t\t\t\t} else {\n\t\t\t\t\t\troll = 0\n\t\t\t\t\t}\n\t\t\t\t\tdiceRoll = fmt.Sprintf(\"%s %d\", diceRoll, roll)\n\t\t\t\t\ttotal += roll\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"error: %v\", err) \/\/ Non fatal error at strconv.Atoi() call\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Printf(\"error: %v\", err) \/\/ Non fatal error at strconv.Atoi() call\n\t\t}\n\n\t\tif len(addNum) > 0 {\n\t\t\tif _addNum, err := strconv.Atoi(addNum); err == nil {\n\t\t\t\tif _addNum > 99 {\n\t\t\t\t\tresponse = \"Are you trying to cheat here? :open_mouth:\"\n\t\t\t\t\treturn Reaction{Text: response}\n\t\t\t\t}\n\t\t\t\ttotal += _addNum\n\t\t\t\tdiceRoll = fmt.Sprintf(\"%s %d\", diceRoll, _addNum)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"error: %v\", err) \/\/ Non fatal error at strconv.Atoi() call\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(diceRoll) > 0 {\n\t\tresponse = fmt.Sprintf(\"```Dice roll [%s]: %s\\tTotal: %d```\", request, diceRoll, total)\n\t}\n\treturn Reaction{Text: response}\n}\n\nfunc init() {\n\trand.Seed(time.Now().Unix())\n\n\t_dice := &dice{\n\t\tTrigger: \"dice\",\n\t}\n\taddReaction(_dice.Trigger, \"CREATE\", _dice)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2022 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage ddl_test\n\nimport (\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/pingcap\/failpoint\"\n\t\"github.com\/pingcap\/tidb\/ddl\"\n\t\"github.com\/pingcap\/tidb\/parser\/model\"\n\t\"github.com\/pingcap\/tidb\/sessionctx\/variable\"\n\t\"github.com\/pingcap\/tidb\/testkit\"\n\t\"github.com\/pingcap\/tidb\/util\"\n\t\"github.com\/stretchr\/testify\/require\"\n\t\"golang.org\/x\/exp\/slices\"\n)\n\n\/\/ TestDDLScheduling tests the DDL scheduling. See Concurrent DDL RFC for the rules of DDL scheduling.\n\/\/ This test checks the chosen job records to see if there are wrong scheduling, if job A and job B cannot run concurrently,\n\/\/ then the all the record of job A must before or after job B, no cross record between these 2 jobs should be in between.\nfunc TestDDLScheduling(t *testing.T) {\n\tif !variable.EnableConcurrentDDL.Load() {\n\t\tt.Skipf(\"test requires concurrent ddl\")\n\t}\n\tstore, dom := testkit.CreateMockStoreAndDomain(t)\n\n\ttk := testkit.NewTestKit(t, store)\n\ttk.MustExec(\"use test\")\n\ttk.MustExec(\"CREATE TABLE e (id INT NOT NULL) PARTITION BY RANGE (id) (PARTITION p1 VALUES LESS THAN (50), PARTITION p2 VALUES LESS THAN (100));\")\n\ttk.MustExec(\"CREATE TABLE e2 (id INT NOT NULL);\")\n\ttk.MustExec(\"CREATE TABLE e3 (id INT NOT NULL);\")\n\n\td := dom.DDL()\n\n\tddlJobs := []string{\n\t\t\"alter table e2 add index idx(id)\",\n\t\t\"alter table e2 add index idx1(id)\",\n\t\t\"alter table e2 add index idx2(id)\",\n\t\t\"create table e5 (id int)\",\n\t\t\"ALTER TABLE e EXCHANGE PARTITION p1 WITH TABLE e2;\",\n\t\t\"alter table e add index idx(id)\",\n\t\t\"alter table e add partition (partition p3 values less than (150))\",\n\t\t\"create table e4 (id int)\",\n\t\t\"alter table e3 add index idx1(id)\",\n\t\t\"ALTER TABLE e EXCHANGE PARTITION p1 WITH TABLE e3;\",\n\t}\n\n\thook := &ddl.TestDDLCallback{}\n\tvar wg util.WaitGroupWrapper\n\twg.Add(1)\n\tvar once sync.Once\n\thook.OnGetJobBeforeExported = func(jobType string) {\n\t\tonce.Do(func() {\n\t\t\tfor i, job := range ddlJobs {\n\t\t\t\twg.Run(func() {\n\t\t\t\t\ttk := testkit.NewTestKit(t, store)\n\t\t\t\t\ttk.MustExec(\"use test\")\n\t\t\t\t\ttk.MustExec(\"set @@tidb_enable_exchange_partition=1\")\n\t\t\t\t\trecordSet, _ := tk.Exec(job)\n\t\t\t\t\tif recordSet != nil {\n\t\t\t\t\t\trequire.NoError(t, recordSet.Close())\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t\tfor {\n\t\t\t\t\ttime.Sleep(time.Millisecond * 100)\n\t\t\t\t\tjobs, err := ddl.GetAllDDLJobs(testkit.NewTestKit(t, store).Session(), nil)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\tif len(jobs) == i+1 {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\twg.Done()\n\t\t})\n\t}\n\n\trecord := make([]int64, 0, 16)\n\thook.OnGetJobAfterExported = func(jobType string, job *model.Job) {\n\t\t\/\/ record the job schedule order\n\t\trecord = append(record, job.ID)\n\t}\n\n\terr := failpoint.Enable(\"github.com\/pingcap\/tidb\/ddl\/mockRunJobTime\", `return(true)`)\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\terr := failpoint.Disable(\"github.com\/pingcap\/tidb\/ddl\/mockRunJobTime\")\n\t\trequire.NoError(t, err)\n\t}()\n\n\td.SetHook(hook)\n\twg.Wait()\n\n\t\/\/ sort all the job id.\n\tids := make(map[int64]struct{}, 16)\n\tfor _, id := range record {\n\t\tids[id] = struct{}{}\n\t}\n\n\tsortedIDs := make([]int64, 0, 16)\n\tfor id := range ids {\n\t\tsortedIDs = append(sortedIDs, id)\n\t}\n\tslices.Sort(sortedIDs)\n\n\t\/\/ map the job id to the DDL sequence.\n\t\/\/ sortedIDs may looks like [30, 32, 34, 36, ...], it is the same order with the job in `ddlJobs`, 30 is the first job in `ddlJobs`, 32 is second...\n\t\/\/ record may looks like [30, 30, 32, 32, 34, 32, 36, 34, ...]\n\t\/\/ and the we map the record to the DDL sequence, [0, 0, 1, 1, 2, 1, 3, 2, ...]\n\tfor i := range record {\n\t\tidx, b := slices.BinarySearch(sortedIDs, record[i])\n\t\trequire.True(t, b)\n\t\trecord[i] = int64(idx)\n\t}\n\n\tcheck(t, record, 0, 1, 2)\n\tcheck(t, record, 0, 4)\n\tcheck(t, record, 1, 4)\n\tcheck(t, record, 2, 4)\n\tcheck(t, record, 4, 5)\n\tcheck(t, record, 4, 6)\n\tcheck(t, record, 4, 9)\n\tcheck(t, record, 5, 6)\n\tcheck(t, record, 5, 9)\n\tcheck(t, record, 6, 9)\n\tcheck(t, record, 8, 9)\n}\n\n\/\/ check will check if there are any cross between ids.\n\/\/ e.g. if ids is [1, 2] this function checks all `1` is before or after than `2` in record.\nfunc check(t *testing.T, record []int64, ids ...int64) {\n\t\/\/ have return true if there are any `i` is before `j`, false if there are any `j` is before `i`.\n\thave := func(i, j int64) bool {\n\t\tfor _, id := range record {\n\t\t\tif id == i {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif id == j {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\trequire.FailNow(t, \"should not reach here\")\n\t\treturn false\n\t}\n\n\t\/\/ all checks if all `i` is before `j`.\n\tall := func(i, j int64) {\n\t\tmeet := false\n\t\tfor _, id := range record {\n\t\t\tif id == j {\n\t\t\t\tmeet = true\n\t\t\t}\n\t\t\trequire.False(t, meet && id == i)\n\t\t}\n\t}\n\n\tfor i := 0; i < len(ids)-1; i++ {\n\t\tfor j := i + 1; j < len(ids); j++ {\n\t\t\tif have(ids[i], ids[j]) {\n\t\t\t\tall(ids[i], ids[j])\n\t\t\t} else {\n\t\t\t\tall(ids[j], ids[i])\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>test: add information if test failed (#38234)<commit_after>\/\/ Copyright 2022 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage ddl_test\n\nimport (\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/pingcap\/failpoint\"\n\t\"github.com\/pingcap\/tidb\/ddl\"\n\t\"github.com\/pingcap\/tidb\/parser\/model\"\n\t\"github.com\/pingcap\/tidb\/sessionctx\/variable\"\n\t\"github.com\/pingcap\/tidb\/testkit\"\n\t\"github.com\/pingcap\/tidb\/util\"\n\t\"github.com\/stretchr\/testify\/require\"\n\t\"golang.org\/x\/exp\/slices\"\n)\n\n\/\/ TestDDLScheduling tests the DDL scheduling. See Concurrent DDL RFC for the rules of DDL scheduling.\n\/\/ This test checks the chosen job records to see if there are wrong scheduling, if job A and job B cannot run concurrently,\n\/\/ then the all the record of job A must before or after job B, no cross record between these 2 jobs should be in between.\nfunc TestDDLScheduling(t *testing.T) {\n\tif !variable.EnableConcurrentDDL.Load() {\n\t\tt.Skipf(\"test requires concurrent ddl\")\n\t}\n\tstore, dom := testkit.CreateMockStoreAndDomain(t)\n\n\ttk := testkit.NewTestKit(t, store)\n\ttk.MustExec(\"use test\")\n\ttk.MustExec(\"CREATE TABLE e (id INT NOT NULL) PARTITION BY RANGE (id) (PARTITION p1 VALUES LESS THAN (50), PARTITION p2 VALUES LESS THAN (100));\")\n\ttk.MustExec(\"CREATE TABLE e2 (id INT NOT NULL);\")\n\ttk.MustExec(\"CREATE TABLE e3 (id INT NOT NULL);\")\n\n\td := dom.DDL()\n\n\tddlJobs := []string{\n\t\t\"alter table e2 add index idx(id)\",\n\t\t\"alter table e2 add index idx1(id)\",\n\t\t\"alter table e2 add index idx2(id)\",\n\t\t\"create table e5 (id int)\",\n\t\t\"ALTER TABLE e EXCHANGE PARTITION p1 WITH TABLE e2;\",\n\t\t\"alter table e add index idx(id)\",\n\t\t\"alter table e add partition (partition p3 values less than (150))\",\n\t\t\"create table e4 (id int)\",\n\t\t\"alter table e3 add index idx1(id)\",\n\t\t\"ALTER TABLE e EXCHANGE PARTITION p1 WITH TABLE e3;\",\n\t}\n\n\thook := &ddl.TestDDLCallback{}\n\tvar wg util.WaitGroupWrapper\n\twg.Add(1)\n\tvar once sync.Once\n\thook.OnGetJobBeforeExported = func(jobType string) {\n\t\tonce.Do(func() {\n\t\t\tfor i, job := range ddlJobs {\n\t\t\t\twg.Run(func() {\n\t\t\t\t\ttk := testkit.NewTestKit(t, store)\n\t\t\t\t\ttk.MustExec(\"use test\")\n\t\t\t\t\ttk.MustExec(\"set @@tidb_enable_exchange_partition=1\")\n\t\t\t\t\trecordSet, _ := tk.Exec(job)\n\t\t\t\t\tif recordSet != nil {\n\t\t\t\t\t\trequire.NoError(t, recordSet.Close())\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t\tfor {\n\t\t\t\t\ttime.Sleep(time.Millisecond * 100)\n\t\t\t\t\tjobs, err := ddl.GetAllDDLJobs(testkit.NewTestKit(t, store).Session(), nil)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\tif len(jobs) == i+1 {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\twg.Done()\n\t\t})\n\t}\n\n\trecord := make([]int64, 0, 16)\n\thook.OnGetJobAfterExported = func(jobType string, job *model.Job) {\n\t\t\/\/ record the job schedule order\n\t\trecord = append(record, job.ID)\n\t}\n\n\terr := failpoint.Enable(\"github.com\/pingcap\/tidb\/ddl\/mockRunJobTime\", `return(true)`)\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\terr := failpoint.Disable(\"github.com\/pingcap\/tidb\/ddl\/mockRunJobTime\")\n\t\trequire.NoError(t, err)\n\t}()\n\n\td.SetHook(hook)\n\twg.Wait()\n\n\t\/\/ sort all the job id.\n\tids := make(map[int64]struct{}, 16)\n\tfor _, id := range record {\n\t\tids[id] = struct{}{}\n\t}\n\n\tsortedIDs := make([]int64, 0, 16)\n\tfor id := range ids {\n\t\tsortedIDs = append(sortedIDs, id)\n\t}\n\tslices.Sort(sortedIDs)\n\n\t\/\/ map the job id to the DDL sequence.\n\t\/\/ sortedIDs may looks like [30, 32, 34, 36, ...], it is the same order with the job in `ddlJobs`, 30 is the first job in `ddlJobs`, 32 is second...\n\t\/\/ record may looks like [30, 30, 32, 32, 34, 32, 36, 34, ...]\n\t\/\/ and the we map the record to the DDL sequence, [0, 0, 1, 1, 2, 1, 3, 2, ...]\n\tfor i := range record {\n\t\tidx, b := slices.BinarySearch(sortedIDs, record[i])\n\t\trequire.True(t, b)\n\t\trecord[i] = int64(idx)\n\t}\n\n\tcheck(t, record, 0, 1, 2)\n\tcheck(t, record, 0, 4)\n\tcheck(t, record, 1, 4)\n\tcheck(t, record, 2, 4)\n\tcheck(t, record, 4, 5)\n\tcheck(t, record, 4, 6)\n\tcheck(t, record, 4, 9)\n\tcheck(t, record, 5, 6)\n\tcheck(t, record, 5, 9)\n\tcheck(t, record, 6, 9)\n\tcheck(t, record, 8, 9)\n}\n\n\/\/ check will check if there are any cross between ids.\n\/\/ e.g. if ids is [1, 2] this function checks all `1` is before or after than `2` in record.\nfunc check(t *testing.T, record []int64, ids ...int64) {\n\t\/\/ have return true if there are any `i` is before `j`, false if there are any `j` is before `i`.\n\thave := func(i, j int64) bool {\n\t\tfor _, id := range record {\n\t\t\tif id == i {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif id == j {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\trequire.FailNow(t, \"should not reach here\", record)\n\t\treturn false\n\t}\n\n\t\/\/ all checks if all `i` is before `j`.\n\tall := func(i, j int64) {\n\t\tmeet := false\n\t\tfor _, id := range record {\n\t\t\tif id == j {\n\t\t\t\tmeet = true\n\t\t\t}\n\t\t\trequire.False(t, meet && id == i, record)\n\t\t}\n\t}\n\n\tfor i := 0; i < len(ids)-1; i++ {\n\t\tfor j := i + 1; j < len(ids); j++ {\n\t\t\tif have(ids[i], ids[j]) {\n\t\t\t\tall(ids[i], ids[j])\n\t\t\t} else {\n\t\t\t\tall(ids[j], ids[i])\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package common\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com\/flant\/dapp\/pkg\/build\"\n\t\"github.com\/flant\/dapp\/pkg\/git_repo\"\n\t\"github.com\/flant\/dapp\/pkg\/slug\"\n)\n\nfunc GetDeployTag(cmdData *CmdData, projectDir string) (string, error) {\n\toptionsCount := 0\n\tif len(*cmdData.Tag) > 0 {\n\t\toptionsCount += len(*cmdData.Tag)\n\t}\n\n\tif *cmdData.TagBranch {\n\t\toptionsCount++\n\t}\n\tif *cmdData.TagCommit {\n\t\toptionsCount++\n\t}\n\tif *cmdData.TagBuildID {\n\t\toptionsCount++\n\t}\n\tif *cmdData.TagCI {\n\t\toptionsCount++\n\t}\n\n\tif optionsCount > 1 {\n\t\treturn \"\", fmt.Errorf(\"exactly one tag should be specified for deploy\")\n\t}\n\n\topts, err := GetTagOptions(cmdData, projectDir)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttags := []string{}\n\ttags = append(tags, opts.Tags...)\n\ttags = append(tags, opts.TagsByCI...)\n\ttags = append(tags, opts.TagsByGitBranch...)\n\ttags = append(tags, opts.TagsByGitCommit...)\n\ttags = append(tags, opts.TagsByGitTag...)\n\n\treturn tags[0], nil\n}\n\nfunc GetTagOptions(cmdData *CmdData, projectDir string) (build.TagOptions, error) {\n\temptyTags := true\n\n\topts := build.TagOptions{}\n\topts.Tags = *cmdData.Tag\n\tif len(*cmdData.Tag) > 0 {\n\t\temptyTags = false\n\t}\n\n\tif *cmdData.TagBranch {\n\t\tlocalGitRepo := &git_repo.Local{\n\t\t\tPath: projectDir,\n\t\t\tGitDir: path.Join(projectDir, \".git\"),\n\t\t}\n\n\t\tbranch, err := localGitRepo.HeadBranchName()\n\t\tif err != nil {\n\t\t\treturn build.TagOptions{}, fmt.Errorf(\"cannot detect local git branch for --tag-branch option: %s\", err)\n\t\t}\n\n\t\topts.TagsByGitBranch = append(opts.TagsByGitBranch, slug.DockerTag(branch))\n\t\temptyTags = false\n\t}\n\n\tif *cmdData.TagCommit {\n\t\tlocalGitRepo := &git_repo.Local{\n\t\t\tPath: projectDir,\n\t\t\tGitDir: path.Join(projectDir, \".git\"),\n\t\t}\n\n\t\tcommit, err := localGitRepo.HeadCommit()\n\t\tif err != nil {\n\t\t\treturn build.TagOptions{}, fmt.Errorf(\"cannot detect local git HEAD commit for --tag-commit option: %s\", err)\n\t\t}\n\n\t\topts.TagsByGitCommit = append(opts.TagsByGitCommit, commit)\n\t\temptyTags = false\n\t}\n\n\tif *cmdData.TagBuildID {\n\t\tvar buildID string\n\n\t\tif os.Getenv(\"GITLAB_CI\") != \"\" {\n\t\t\tbuildID = os.Getenv(\"CI_BUILD_ID\")\n\t\t\tif buildID == \"\" {\n\t\t\t\tbuildID = os.Getenv(\"CI_JOB_ID\")\n\t\t\t}\n\t\t} else if os.Getenv(\"TRAVIS\") != \"\" {\n\t\t\tbuildID = os.Getenv(\"TRAVIS_BUILD_NUMBER\")\n\t\t} else {\n\t\t\treturn build.TagOptions{}, fmt.Errorf(\"GITLAB_CI or TRAVIS environment variables has not been found for --tag-build-id option\")\n\t\t}\n\n\t\tif buildID != \"\" {\n\t\t\topts.TagsByCI = append(opts.TagsByCI, buildID)\n\t\t\temptyTags = false\n\t\t}\n\t}\n\n\tif *cmdData.TagCI {\n\t\tvar gitBranch, gitTag string\n\n\t\tif os.Getenv(\"GITLAB_CI\") != \"\" {\n\t\t\tgitTag = os.Getenv(\"CI_BUILD_TAG\")\n\t\t\tif gitTag != \"\" {\n\t\t\t\tgitTag = os.Getenv(\"CI_COMMIT_TAG\")\n\t\t\t}\n\n\t\t\tgitBranch = os.Getenv(\"CI_BUILD_REF_NAME\")\n\t\t\tif gitBranch != \"\" {\n\t\t\t\tgitBranch = os.Getenv(\"CI_COMMIT_REF_NAME\")\n\t\t\t}\n\t\t} else if os.Getenv(\"TRAVIS\") != \"\" {\n\t\t\tgitTag = os.Getenv(\"TRAVIS_TAG\")\n\t\t\tgitBranch = os.Getenv(\"TRAVIS_BRANCH\")\n\t\t} else {\n\t\t\treturn build.TagOptions{}, fmt.Errorf(\"GITLAB_CI or TRAVIS environment variables has not been found for --tag-ci option\")\n\t\t}\n\n\t\tif gitTag != \"\" {\n\t\t\topts.TagsByGitTag = append(opts.TagsByGitTag, slug.DockerTag(gitTag))\n\t\t\temptyTags = false\n\t\t}\n\t\tif gitBranch != \"\" {\n\t\t\topts.TagsByGitBranch = append(opts.TagsByGitBranch, slug.DockerTag(gitBranch))\n\t\t\temptyTags = false\n\t\t}\n\t}\n\n\tif emptyTags {\n\t\topts.Tags = append(opts.Tags, \"latest\")\n\t}\n\n\treturn opts, nil\n}\n<commit_msg>Validate `--tag` Docker tag parameter<commit_after>package common\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com\/flant\/dapp\/pkg\/build\"\n\t\"github.com\/flant\/dapp\/pkg\/git_repo\"\n\t\"github.com\/flant\/dapp\/pkg\/slug\"\n)\n\nfunc GetDeployTag(cmdData *CmdData, projectDir string) (string, error) {\n\toptionsCount := 0\n\tif len(*cmdData.Tag) > 0 {\n\t\toptionsCount += len(*cmdData.Tag)\n\t}\n\n\tif *cmdData.TagBranch {\n\t\toptionsCount++\n\t}\n\tif *cmdData.TagCommit {\n\t\toptionsCount++\n\t}\n\tif *cmdData.TagBuildID {\n\t\toptionsCount++\n\t}\n\tif *cmdData.TagCI {\n\t\toptionsCount++\n\t}\n\n\tif optionsCount > 1 {\n\t\treturn \"\", fmt.Errorf(\"exactly one tag should be specified for deploy\")\n\t}\n\n\topts, err := GetTagOptions(cmdData, projectDir)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttags := []string{}\n\ttags = append(tags, opts.Tags...)\n\ttags = append(tags, opts.TagsByCI...)\n\ttags = append(tags, opts.TagsByGitBranch...)\n\ttags = append(tags, opts.TagsByGitCommit...)\n\ttags = append(tags, opts.TagsByGitTag...)\n\n\treturn tags[0], nil\n}\n\nfunc GetTagOptions(cmdData *CmdData, projectDir string) (build.TagOptions, error) {\n\temptyTags := true\n\n\topts := build.TagOptions{}\n\n\tfor _, tag := range *cmdData.Tag {\n\t\terr := slug.ValidateDockerTag(tag)\n\t\tif err != nil {\n\t\t\treturn build.TagOptions{}, fmt.Errorf(\"bad --tag parameter '%s' specified: %s\", tag, err)\n\t\t}\n\n\t\topts.Tags = append(opts.Tags, tag)\n\t\temptyTags = false\n\t}\n\n\tif *cmdData.TagBranch {\n\t\tlocalGitRepo := &git_repo.Local{\n\t\t\tPath: projectDir,\n\t\t\tGitDir: path.Join(projectDir, \".git\"),\n\t\t}\n\n\t\tbranch, err := localGitRepo.HeadBranchName()\n\t\tif err != nil {\n\t\t\treturn build.TagOptions{}, fmt.Errorf(\"cannot detect local git branch for --tag-branch option: %s\", err)\n\t\t}\n\n\t\topts.TagsByGitBranch = append(opts.TagsByGitBranch, slug.DockerTag(branch))\n\t\temptyTags = false\n\t}\n\n\tif *cmdData.TagCommit {\n\t\tlocalGitRepo := &git_repo.Local{\n\t\t\tPath: projectDir,\n\t\t\tGitDir: path.Join(projectDir, \".git\"),\n\t\t}\n\n\t\tcommit, err := localGitRepo.HeadCommit()\n\t\tif err != nil {\n\t\t\treturn build.TagOptions{}, fmt.Errorf(\"cannot detect local git HEAD commit for --tag-commit option: %s\", err)\n\t\t}\n\n\t\topts.TagsByGitCommit = append(opts.TagsByGitCommit, commit)\n\t\temptyTags = false\n\t}\n\n\tif *cmdData.TagBuildID {\n\t\tvar buildID string\n\n\t\tif os.Getenv(\"GITLAB_CI\") != \"\" {\n\t\t\tbuildID = os.Getenv(\"CI_BUILD_ID\")\n\t\t\tif buildID == \"\" {\n\t\t\t\tbuildID = os.Getenv(\"CI_JOB_ID\")\n\t\t\t}\n\t\t} else if os.Getenv(\"TRAVIS\") != \"\" {\n\t\t\tbuildID = os.Getenv(\"TRAVIS_BUILD_NUMBER\")\n\t\t} else {\n\t\t\treturn build.TagOptions{}, fmt.Errorf(\"GITLAB_CI or TRAVIS environment variables has not been found for --tag-build-id option\")\n\t\t}\n\n\t\tif buildID != \"\" {\n\t\t\topts.TagsByCI = append(opts.TagsByCI, buildID)\n\t\t\temptyTags = false\n\t\t}\n\t}\n\n\tif *cmdData.TagCI {\n\t\tvar gitBranch, gitTag string\n\n\t\tif os.Getenv(\"GITLAB_CI\") != \"\" {\n\t\t\tgitTag = os.Getenv(\"CI_BUILD_TAG\")\n\t\t\tif gitTag != \"\" {\n\t\t\t\tgitTag = os.Getenv(\"CI_COMMIT_TAG\")\n\t\t\t}\n\n\t\t\tgitBranch = os.Getenv(\"CI_BUILD_REF_NAME\")\n\t\t\tif gitBranch != \"\" {\n\t\t\t\tgitBranch = os.Getenv(\"CI_COMMIT_REF_NAME\")\n\t\t\t}\n\t\t} else if os.Getenv(\"TRAVIS\") != \"\" {\n\t\t\tgitTag = os.Getenv(\"TRAVIS_TAG\")\n\t\t\tgitBranch = os.Getenv(\"TRAVIS_BRANCH\")\n\t\t} else {\n\t\t\treturn build.TagOptions{}, fmt.Errorf(\"GITLAB_CI or TRAVIS environment variables has not been found for --tag-ci option\")\n\t\t}\n\n\t\tif gitTag != \"\" {\n\t\t\topts.TagsByGitTag = append(opts.TagsByGitTag, slug.DockerTag(gitTag))\n\t\t\temptyTags = false\n\t\t}\n\t\tif gitBranch != \"\" {\n\t\t\topts.TagsByGitBranch = append(opts.TagsByGitBranch, slug.DockerTag(gitBranch))\n\t\t\temptyTags = false\n\t\t}\n\t}\n\n\tif emptyTags {\n\t\topts.Tags = append(opts.Tags, \"latest\")\n\t}\n\n\treturn opts, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"code.cloudfoundry.org\/cfhttp\"\n\t\"code.cloudfoundry.org\/cflager\"\n\t\"code.cloudfoundry.org\/consuladapter\"\n\t\"code.cloudfoundry.org\/debugserver\"\n\t\"code.cloudfoundry.org\/fileserver\/handlers\"\n\t\"code.cloudfoundry.org\/locket\"\n\t\"github.com\/cloudfoundry\/dropsonde\"\n\t\"github.com\/hashicorp\/consul\/api\"\n\t\"github.com\/pivotal-golang\/clock\"\n\t\"github.com\/pivotal-golang\/lager\"\n\t\"github.com\/tedsuo\/ifrit\"\n\t\"github.com\/tedsuo\/ifrit\/grouper\"\n\t\"github.com\/tedsuo\/ifrit\/http_server\"\n\t\"github.com\/tedsuo\/ifrit\/sigmon\"\n)\n\nvar serverAddress = flag.String(\n\t\"address\",\n\t\"0.0.0.0:8080\",\n\t\"Specifies the address to bind to\",\n)\n\nvar staticDirectory = flag.String(\n\t\"staticDirectory\",\n\t\"\",\n\t\"Specifies the directory to serve local static files from\",\n)\n\nvar skipCertVerify = flag.Bool(\n\t\"skipCertVerify\",\n\tfalse,\n\t\"Skip SSL certificate verification\",\n)\n\nvar dropsondePort = flag.Int(\n\t\"dropsondePort\",\n\t3457,\n\t\"port the local metron agent is listening on\",\n)\n\nvar communicationTimeout = flag.Duration(\n\t\"communicationTimeout\",\n\t30*time.Second,\n\t\"Timeout applied to all HTTP requests.\",\n)\n\nvar consulCluster = flag.String(\n\t\"consulCluster\",\n\t\"\",\n\t\"Consul Agent URL\",\n)\n\nconst (\n\tccUploadDialTimeout = 10 * time.Second\n\tccUploadKeepAlive = 30 * time.Second\n\tccUploadTLSHandshakeTimeout = 10 * time.Second\n\tdropsondeOrigin = \"file_server\"\n)\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tdebugserver.AddFlags(flag.CommandLine)\n\tcflager.AddFlags(flag.CommandLine)\n\tflag.Parse()\n\n\tcfhttp.Initialize(*communicationTimeout)\n\n\tlogger, reconfigurableSink := cflager.New(\"file-server\")\n\n\tinitializeDropsonde(logger)\n\tconsulClient, err := consuladapter.NewClientFromUrl(*consulCluster)\n\tif err != nil {\n\t\tlogger.Fatal(\"new-client-failed\", err)\n\t}\n\n\tregistrationRunner := initializeRegistrationRunner(logger, consulClient, *serverAddress, clock.NewClock())\n\n\tmembers := grouper.Members{\n\t\t{\"file server\", initializeServer(logger)},\n\t\t{\"registration-runner\", registrationRunner},\n\t}\n\n\tif dbgAddr := debugserver.DebugAddress(flag.CommandLine); dbgAddr != \"\" {\n\t\tmembers = append(grouper.Members{\n\t\t\t{\"debug-server\", debugserver.Runner(dbgAddr, reconfigurableSink)},\n\t\t}, members...)\n\t}\n\n\tgroup := grouper.NewOrdered(os.Interrupt, members)\n\n\tmonitor := ifrit.Invoke(sigmon.New(group))\n\tlogger.Info(\"ready\")\n\n\terr = <-monitor.Wait()\n\tif err != nil {\n\t\tlogger.Error(\"exited-with-failure\", err)\n\t\tos.Exit(1)\n\t}\n\n\tlogger.Info(\"exited\")\n}\n\nfunc initializeDropsonde(logger lager.Logger) {\n\tdropsondeDestination := fmt.Sprint(\"localhost:\", *dropsondePort)\n\terr := dropsonde.Initialize(dropsondeDestination, dropsondeOrigin)\n\tif err != nil {\n\t\tlogger.Error(\"failed to initialize dropsonde: %v\", err)\n\t}\n}\n\nfunc initializeServer(logger lager.Logger) ifrit.Runner {\n\tif *staticDirectory == \"\" {\n\t\tlogger.Fatal(\"static-directory-missing\", nil)\n\t}\n\n\ttransport := &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDial: (&net.Dialer{\n\t\t\tTimeout: ccUploadDialTimeout,\n\t\t\tKeepAlive: ccUploadKeepAlive,\n\t\t}).Dial,\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tInsecureSkipVerify: *skipCertVerify,\n\t\t},\n\t\tTLSHandshakeTimeout: ccUploadTLSHandshakeTimeout,\n\t}\n\n\tpollerHttpClient := cfhttp.NewClient()\n\tpollerHttpClient.Transport = transport\n\n\tfileServerHandler, err := handlers.New(*staticDirectory, logger)\n\tif err != nil {\n\t\tlogger.Error(\"router-building-failed\", err)\n\t\tos.Exit(1)\n\t}\n\n\treturn http_server.New(*serverAddress, fileServerHandler)\n}\n\nfunc initializeRegistrationRunner(logger lager.Logger, consulClient consuladapter.Client, listenAddress string, clock clock.Clock) ifrit.Runner {\n\t_, portString, err := net.SplitHostPort(listenAddress)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed-invalid-listen-address\", err)\n\t}\n\tportNum, err := net.LookupPort(\"tcp\", portString)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed-invalid-listen-port\", err)\n\t}\n\n\tregistration := &api.AgentServiceRegistration{\n\t\tName: \"file-server\",\n\t\tPort: portNum,\n\t\tCheck: &api.AgentServiceCheck{\n\t\t\tTTL: \"3s\",\n\t\t},\n\t}\n\n\treturn locket.NewRegistrationRunner(logger, registration, consulClient, locket.RetryInterval, clock)\n}\n<commit_msg>Update import location of clock<commit_after>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"code.cloudfoundry.org\/cfhttp\"\n\t\"code.cloudfoundry.org\/cflager\"\n\t\"code.cloudfoundry.org\/clock\"\n\t\"code.cloudfoundry.org\/consuladapter\"\n\t\"code.cloudfoundry.org\/debugserver\"\n\t\"code.cloudfoundry.org\/fileserver\/handlers\"\n\t\"code.cloudfoundry.org\/locket\"\n\t\"github.com\/cloudfoundry\/dropsonde\"\n\t\"github.com\/hashicorp\/consul\/api\"\n\t\"github.com\/pivotal-golang\/lager\"\n\t\"github.com\/tedsuo\/ifrit\"\n\t\"github.com\/tedsuo\/ifrit\/grouper\"\n\t\"github.com\/tedsuo\/ifrit\/http_server\"\n\t\"github.com\/tedsuo\/ifrit\/sigmon\"\n)\n\nvar serverAddress = flag.String(\n\t\"address\",\n\t\"0.0.0.0:8080\",\n\t\"Specifies the address to bind to\",\n)\n\nvar staticDirectory = flag.String(\n\t\"staticDirectory\",\n\t\"\",\n\t\"Specifies the directory to serve local static files from\",\n)\n\nvar skipCertVerify = flag.Bool(\n\t\"skipCertVerify\",\n\tfalse,\n\t\"Skip SSL certificate verification\",\n)\n\nvar dropsondePort = flag.Int(\n\t\"dropsondePort\",\n\t3457,\n\t\"port the local metron agent is listening on\",\n)\n\nvar communicationTimeout = flag.Duration(\n\t\"communicationTimeout\",\n\t30*time.Second,\n\t\"Timeout applied to all HTTP requests.\",\n)\n\nvar consulCluster = flag.String(\n\t\"consulCluster\",\n\t\"\",\n\t\"Consul Agent URL\",\n)\n\nconst (\n\tccUploadDialTimeout = 10 * time.Second\n\tccUploadKeepAlive = 30 * time.Second\n\tccUploadTLSHandshakeTimeout = 10 * time.Second\n\tdropsondeOrigin = \"file_server\"\n)\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tdebugserver.AddFlags(flag.CommandLine)\n\tcflager.AddFlags(flag.CommandLine)\n\tflag.Parse()\n\n\tcfhttp.Initialize(*communicationTimeout)\n\n\tlogger, reconfigurableSink := cflager.New(\"file-server\")\n\n\tinitializeDropsonde(logger)\n\tconsulClient, err := consuladapter.NewClientFromUrl(*consulCluster)\n\tif err != nil {\n\t\tlogger.Fatal(\"new-client-failed\", err)\n\t}\n\n\tregistrationRunner := initializeRegistrationRunner(logger, consulClient, *serverAddress, clock.NewClock())\n\n\tmembers := grouper.Members{\n\t\t{\"file server\", initializeServer(logger)},\n\t\t{\"registration-runner\", registrationRunner},\n\t}\n\n\tif dbgAddr := debugserver.DebugAddress(flag.CommandLine); dbgAddr != \"\" {\n\t\tmembers = append(grouper.Members{\n\t\t\t{\"debug-server\", debugserver.Runner(dbgAddr, reconfigurableSink)},\n\t\t}, members...)\n\t}\n\n\tgroup := grouper.NewOrdered(os.Interrupt, members)\n\n\tmonitor := ifrit.Invoke(sigmon.New(group))\n\tlogger.Info(\"ready\")\n\n\terr = <-monitor.Wait()\n\tif err != nil {\n\t\tlogger.Error(\"exited-with-failure\", err)\n\t\tos.Exit(1)\n\t}\n\n\tlogger.Info(\"exited\")\n}\n\nfunc initializeDropsonde(logger lager.Logger) {\n\tdropsondeDestination := fmt.Sprint(\"localhost:\", *dropsondePort)\n\terr := dropsonde.Initialize(dropsondeDestination, dropsondeOrigin)\n\tif err != nil {\n\t\tlogger.Error(\"failed to initialize dropsonde: %v\", err)\n\t}\n}\n\nfunc initializeServer(logger lager.Logger) ifrit.Runner {\n\tif *staticDirectory == \"\" {\n\t\tlogger.Fatal(\"static-directory-missing\", nil)\n\t}\n\n\ttransport := &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDial: (&net.Dialer{\n\t\t\tTimeout: ccUploadDialTimeout,\n\t\t\tKeepAlive: ccUploadKeepAlive,\n\t\t}).Dial,\n\t\tTLSClientConfig: &tls.Config{\n\t\t\tInsecureSkipVerify: *skipCertVerify,\n\t\t},\n\t\tTLSHandshakeTimeout: ccUploadTLSHandshakeTimeout,\n\t}\n\n\tpollerHttpClient := cfhttp.NewClient()\n\tpollerHttpClient.Transport = transport\n\n\tfileServerHandler, err := handlers.New(*staticDirectory, logger)\n\tif err != nil {\n\t\tlogger.Error(\"router-building-failed\", err)\n\t\tos.Exit(1)\n\t}\n\n\treturn http_server.New(*serverAddress, fileServerHandler)\n}\n\nfunc initializeRegistrationRunner(logger lager.Logger, consulClient consuladapter.Client, listenAddress string, clock clock.Clock) ifrit.Runner {\n\t_, portString, err := net.SplitHostPort(listenAddress)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed-invalid-listen-address\", err)\n\t}\n\tportNum, err := net.LookupPort(\"tcp\", portString)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed-invalid-listen-port\", err)\n\t}\n\n\tregistration := &api.AgentServiceRegistration{\n\t\tName: \"file-server\",\n\t\tPort: portNum,\n\t\tCheck: &api.AgentServiceCheck{\n\t\t\tTTL: \"3s\",\n\t\t},\n\t}\n\n\treturn locket.NewRegistrationRunner(logger, registration, consulClient, locket.RetryInterval, clock)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nThis application handles all the startup and connection scaffolding for\nrunning a gRPC server serving the APIService as defined in\nfrontendapi\/proto\/frontend.pb.go\n\nAll the actual important bits are in the API Server source code: apisrv\/apisrv.go\n\nCopyright 2018 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\npackage main\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"time\"\n\n\t\"github.com\/GoogleCloudPlatform\/open-match\/cmd\/frontendapi\/apisrv\"\n\t\"github.com\/GoogleCloudPlatform\/open-match\/config\"\n\t\"github.com\/GoogleCloudPlatform\/open-match\/internal\/metrics\"\n\n\t\"github.com\/gomodule\/redigo\/redis\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/spf13\/viper\"\n\t\"go.opencensus.io\/plugin\/ocgrpc\"\n)\n\nvar (\n\t\/\/ Logrus structured logging setup\n\tfeLogFields = log.Fields{\n\t\t\"app\": \"openmatch\",\n\t\t\"component\": \"backend\",\n\t\t\"caller\": \"backendapi\/main.go\",\n\t}\n\tfeLog = log.WithFields(feLogFields)\n\n\t\/\/ Viper config management setup\n\tcfg = viper.New()\n\terr = errors.New(\"\")\n)\n\nfunc init() {\n\t\/\/ Logrus structured logging initialization\n\t\/\/ Add a hook to the logger to auto-count log lines for metrics output thru OpenCensus\n\tlog.AddHook(metrics.NewHook(apisrv.FeLogLines, apisrv.KeySeverity))\n\n\t\/\/ Viper config management initialization\n\tcfg, err = config.Read()\n\tif err != nil {\n\t\tfeLog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t}).Error(\"Unable to load config file\")\n\t}\n\n\tif cfg.GetBool(\"debug\") == true {\n\t\tlog.SetLevel(log.DebugLevel) \/\/ debug only, verbose - turn off in production!\n\t\tfeLog.Warn(\"Debug logging configured. Not recommended for production!\")\n\t}\n\n\t\/\/ Configure OpenCensus exporter to Prometheus\n\t\/\/ metrics.ConfigureOpenCensusPrometheusExporter expects that every OpenCensus view you\n\t\/\/ want to register is in an array, so append any views you want from other\n\t\/\/ packages to a single array here.\n\tocServerViews := apisrv.DefaultFrontendAPIViews \/\/ FrontendAPI OpenCensus views.\n\tocServerViews = append(ocServerViews, ocgrpc.DefaultServerViews...) \/\/ gRPC OpenCensus views.\n\tocServerViews = append(ocServerViews, config.CfgVarCountView) \/\/ config loader view.\n\t\/\/ Waiting on https:\/\/github.com\/opencensus-integrations\/redigo\/pull\/1\n\t\/\/ ocServerViews = append(ocServerViews, redis.ObservabilityMetricViews...) \/\/ redis OpenCensus views.\n\tfeLog.WithFields(log.Fields{\"viewscount\": len(ocServerViews)}).Info(\"Loaded OpenCensus views\")\n\tmetrics.ConfigureOpenCensusPrometheusExporter(cfg, ocServerViews)\n}\n\nfunc main() {\n\n\t\/\/ Connect to redis\n\tpool := redisConnect(cfg)\n\tdefer pool.Close()\n\n\t\/\/ Instantiate the gRPC server with the connections we've made\n\tfeLog.WithFields(log.Fields{\"testfield\": \"test\"}).Info(\"Attempting to start gRPC server\")\n\tsrv := apisrv.New(cfg, pool)\n\n\t\/\/ Run the gRPC server\n\terr := srv.Open()\n\tif err != nil {\n\t\tfeLog.WithFields(log.Fields{\"error\": err.Error()}).Fatal(\"Failed to start gRPC server\")\n\t}\n\n\t\/\/ Exit when we see a signal\n\tterminate := make(chan os.Signal, 1)\n\tsignal.Notify(terminate, os.Interrupt)\n\t<-terminate\n\tfeLog.Info(\"Shutting down gRPC server\")\n}\n\n\/\/ redisConnect reads the configuration and attempts to instantiate a redis connection\n\/\/ pool based on the configured hostname and port.\n\/\/ TODO: needs to be reworked to use redis sentinel when we're ready to support it.\nfunc redisConnect(cfg *viper.Viper) *redis.Pool {\n\n\t\/\/ As per https:\/\/www.iana.org\/assignments\/uri-schemes\/prov\/redis\n\t\/\/ redis:\/\/user:secret@localhost:6379\/0?foo=bar&qux=baz\n\tredisURL := \"redis:\/\/\" + cfg.GetString(\"redis.hostname\") + \":\" + cfg.GetString(\"redis.port\")\n\t\/\/ TODO: check if auth details are in the config, and append them if they\n\t\/\/ are. Right now, assumes your redis instance is unsecured!\n\n\tfeLog.WithFields(log.Fields{\"redisURL\": redisURL}).Info(\"Attempting to connect to Redis\")\n\tpool := redis.Pool{\n\t\tMaxIdle: 3,\n\t\tMaxActive: 0,\n\t\tIdleTimeout: 60 * time.Second,\n\t\tDial: func() (redis.Conn, error) { return redis.DialURL(redisURL) },\n\t}\n\n\tfeLog.Info(\"Connected to Redis\")\n\treturn &pool\n}\n<commit_msg>Fix mislabeled components in log lines<commit_after>\/*\nThis application handles all the startup and connection scaffolding for\nrunning a gRPC server serving the APIService as defined in\nfrontendapi\/proto\/frontend.pb.go\n\nAll the actual important bits are in the API Server source code: apisrv\/apisrv.go\n\nCopyright 2018 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\npackage main\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"time\"\n\n\t\"github.com\/GoogleCloudPlatform\/open-match\/cmd\/frontendapi\/apisrv\"\n\t\"github.com\/GoogleCloudPlatform\/open-match\/config\"\n\t\"github.com\/GoogleCloudPlatform\/open-match\/internal\/metrics\"\n\n\t\"github.com\/gomodule\/redigo\/redis\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/spf13\/viper\"\n\t\"go.opencensus.io\/plugin\/ocgrpc\"\n)\n\nvar (\n\t\/\/ Logrus structured logging setup\n\tfeLogFields = log.Fields{\n\t\t\"app\": \"openmatch\",\n\t\t\"component\": \"frontend\",\n\t\t\"caller\": \"frontendapi\/main.go\",\n\t}\n\tfeLog = log.WithFields(feLogFields)\n\n\t\/\/ Viper config management setup\n\tcfg = viper.New()\n\terr = errors.New(\"\")\n)\n\nfunc init() {\n\t\/\/ Logrus structured logging initialization\n\t\/\/ Add a hook to the logger to auto-count log lines for metrics output thru OpenCensus\n\tlog.AddHook(metrics.NewHook(apisrv.FeLogLines, apisrv.KeySeverity))\n\n\t\/\/ Viper config management initialization\n\tcfg, err = config.Read()\n\tif err != nil {\n\t\tfeLog.WithFields(log.Fields{\n\t\t\t\"error\": err.Error(),\n\t\t}).Error(\"Unable to load config file\")\n\t}\n\n\tif cfg.GetBool(\"debug\") == true {\n\t\tlog.SetLevel(log.DebugLevel) \/\/ debug only, verbose - turn off in production!\n\t\tfeLog.Warn(\"Debug logging configured. Not recommended for production!\")\n\t}\n\n\t\/\/ Configure OpenCensus exporter to Prometheus\n\t\/\/ metrics.ConfigureOpenCensusPrometheusExporter expects that every OpenCensus view you\n\t\/\/ want to register is in an array, so append any views you want from other\n\t\/\/ packages to a single array here.\n\tocServerViews := apisrv.DefaultFrontendAPIViews \/\/ FrontendAPI OpenCensus views.\n\tocServerViews = append(ocServerViews, ocgrpc.DefaultServerViews...) \/\/ gRPC OpenCensus views.\n\tocServerViews = append(ocServerViews, config.CfgVarCountView) \/\/ config loader view.\n\t\/\/ Waiting on https:\/\/github.com\/opencensus-integrations\/redigo\/pull\/1\n\t\/\/ ocServerViews = append(ocServerViews, redis.ObservabilityMetricViews...) \/\/ redis OpenCensus views.\n\tfeLog.WithFields(log.Fields{\"viewscount\": len(ocServerViews)}).Info(\"Loaded OpenCensus views\")\n\tmetrics.ConfigureOpenCensusPrometheusExporter(cfg, ocServerViews)\n}\n\nfunc main() {\n\n\t\/\/ Connect to redis\n\tpool := redisConnect(cfg)\n\tdefer pool.Close()\n\n\t\/\/ Instantiate the gRPC server with the connections we've made\n\tfeLog.WithFields(log.Fields{\"testfield\": \"test\"}).Info(\"Attempting to start gRPC server\")\n\tsrv := apisrv.New(cfg, pool)\n\n\t\/\/ Run the gRPC server\n\terr := srv.Open()\n\tif err != nil {\n\t\tfeLog.WithFields(log.Fields{\"error\": err.Error()}).Fatal(\"Failed to start gRPC server\")\n\t}\n\n\t\/\/ Exit when we see a signal\n\tterminate := make(chan os.Signal, 1)\n\tsignal.Notify(terminate, os.Interrupt)\n\t<-terminate\n\tfeLog.Info(\"Shutting down gRPC server\")\n}\n\n\/\/ redisConnect reads the configuration and attempts to instantiate a redis connection\n\/\/ pool based on the configured hostname and port.\n\/\/ TODO: needs to be reworked to use redis sentinel when we're ready to support it.\nfunc redisConnect(cfg *viper.Viper) *redis.Pool {\n\n\t\/\/ As per https:\/\/www.iana.org\/assignments\/uri-schemes\/prov\/redis\n\t\/\/ redis:\/\/user:secret@localhost:6379\/0?foo=bar&qux=baz\n\tredisURL := \"redis:\/\/\" + cfg.GetString(\"redis.hostname\") + \":\" + cfg.GetString(\"redis.port\")\n\t\/\/ TODO: check if auth details are in the config, and append them if they\n\t\/\/ are. Right now, assumes your redis instance is unsecured!\n\n\tfeLog.WithFields(log.Fields{\"redisURL\": redisURL}).Info(\"Attempting to connect to Redis\")\n\tpool := redis.Pool{\n\t\tMaxIdle: 3,\n\t\tMaxActive: 0,\n\t\tIdleTimeout: 60 * time.Second,\n\t\tDial: func() (redis.Conn, error) { return redis.DialURL(redisURL) },\n\t}\n\n\tfeLog.Info(\"Connected to Redis\")\n\treturn &pool\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"time\"\n)\n\n\/\/ WebHook structure\ntype Webhook struct {\n\t\/\/ URL for the http\/s request\n\tURL string\n\t\/\/ Method for the http\/s request\n\tMethod string\n\t\/\/ Code to look for when determining if the request was successful.\n\t\/\/ If this is not specified, request is sent and forgotten about.\n\tSuccess int\n\t\/\/ Timeout for the http\/s request\n\tTimeout time.Duration\n\t\/\/ Backoff for failed webhook calls\n\tBackoff time.Duration\n}\n\ntype webhookRepoInfo struct {\n\tHash string\n}\n\nfunc (w *Webhook) Do(info webhookRepoInfo) error {\n\treq, err := http.NewRequest(w.Method, w.URL, nil)\n\treq.Header.Set(\"Git-Sync-Hash\", info.Hash)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), w.Timeout)\n\tdefer cancel()\n\treq = req.WithContext(ctx)\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp.Body.Close()\n\n\t\/\/ If the webhook has a success statusCode, check against it\n\tif w.Success != -1 && resp.StatusCode != w.Success {\n\t\treturn fmt.Errorf(\"received response code %d expected %d\", resp.StatusCode, w.Success)\n\t}\n\n\treturn nil\n}\n\n\/\/ Wait for trigger events from the channel, and send webhooks when triggered\nfunc (w *Webhook) run(ch chan webhookRepoInfo) {\n\tfor {\n\t\t\/\/ Wait for trigger\n\t\tinfo := <-ch\n\n\t\tfor {\n\t\t\tif err := w.Do(info); err != nil {\n\t\t\t\tlog.Error(err, \"error calling webhook\", \"url\", w.URL)\n\t\t\t\ttime.Sleep(w.Backoff)\n\t\t\t} else {\n\t\t\t\tlog.V(0).Info(\"success calling webhook\", \"url\", w.URL)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>Rename http header \"Git-Sync-Hash\" into \"Gitsync-Hash\".<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"time\"\n)\n\n\/\/ WebHook structure\ntype Webhook struct {\n\t\/\/ URL for the http\/s request\n\tURL string\n\t\/\/ Method for the http\/s request\n\tMethod string\n\t\/\/ Code to look for when determining if the request was successful.\n\t\/\/ If this is not specified, request is sent and forgotten about.\n\tSuccess int\n\t\/\/ Timeout for the http\/s request\n\tTimeout time.Duration\n\t\/\/ Backoff for failed webhook calls\n\tBackoff time.Duration\n}\n\ntype webhookRepoInfo struct {\n\tHash string\n}\n\nfunc (w *Webhook) Do(info webhookRepoInfo) error {\n\treq, err := http.NewRequest(w.Method, w.URL, nil)\n\treq.Header.Set(\"Gitsync-Hash\", info.Hash)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), w.Timeout)\n\tdefer cancel()\n\treq = req.WithContext(ctx)\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp.Body.Close()\n\n\t\/\/ If the webhook has a success statusCode, check against it\n\tif w.Success != -1 && resp.StatusCode != w.Success {\n\t\treturn fmt.Errorf(\"received response code %d expected %d\", resp.StatusCode, w.Success)\n\t}\n\n\treturn nil\n}\n\n\/\/ Wait for trigger events from the channel, and send webhooks when triggered\nfunc (w *Webhook) run(ch chan webhookRepoInfo) {\n\tfor {\n\t\t\/\/ Wait for trigger\n\t\tinfo := <-ch\n\n\t\tfor {\n\t\t\tif err := w.Do(info); err != nil {\n\t\t\t\tlog.Error(err, \"error calling webhook\", \"url\", w.URL)\n\t\t\t\ttime.Sleep(w.Backoff)\n\t\t\t} else {\n\t\t\t\tlog.V(0).Info(\"success calling webhook\", \"url\", w.URL)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/tkrajina\/golongfuncs\/internal\"\n)\n\nfunc main() {\n\tty := make([]string, len(internal.AllTypes))\n\tfor n := range internal.AllTypes {\n\t\tty[n] = string(internal.AllTypes[n])\n\t}\n\n\tvar ignoreFilesRegexp, ignoreFuncsRegexp, types string\n\n\tvar params internal.CmdParams\n\tflag.StringVar(&types, \"type\", \"\", \"Type of stats, valid types are: \"+strings.Join(ty, \", \"))\n\tflag.Float64Var(¶ms.Threshold, \"threshold\", 0, \"Min value, functions with value less than this will be ignored\")\n\tflag.IntVar(¶ms.MinLines, \"min-lines\", 10, \"Functions shorter than this will be ignored\")\n\tflag.IntVar(¶ms.Top, \"top\", 25, \"Show only top n functions\")\n\tflag.BoolVar(¶ms.IncludeTests, \"include-tests\", false, \"Include tests\")\n\tflag.BoolVar(¶ms.IncludeVendor, \"include-vendor\", false, \"Include vendored files\")\n\tflag.StringVar(&ignoreFilesRegexp, \"ignore\", \"\", \"Regexp for files\/directories to ignore\")\n\tflag.StringVar(&ignoreFuncsRegexp, \"ignore-func\", \"\", \"Regexp for functions to ignore\")\n\tflag.BoolVar(¶ms.Verbose, \"verbose\", false, \"Verbose\")\n\tflag.Parse()\n\n\targs := flag.Args()\n\tif len(args) == 0 {\n\t\targs = append(args, \".\/...\")\n\t}\n\n\tvar paths []string\n\tfor _, p := range args {\n\t\tif p[0] == '+' {\n\t\t\ttypes += \",\" + p[1:]\n\t\t\ttypes = strings.Trim(types, \",\")\n\t\t} else {\n\t\t\tpaths = append(paths, p)\n\t\t}\n\t}\n\n\tif len(types) == 0 {\n\t\ttypes = fmt.Sprintf(\"%s,%s,%s\", internal.Lines, internal.Complexity, internal.MaxNesting)\n\t}\n\n\tfmt.Println(\"types=\", types)\n\tfmt.Println(\"paths=\", paths)\n\n\tprepareParams(¶ms, types, ignoreFilesRegexp, ignoreFuncsRegexp)\n\tstats := internal.Do(params, paths)\n\tprintStats(params, stats)\n}\n\nfunc prepareParams(params *internal.CmdParams, types, ignoreFilesRegexp, ignoreFuncsRegexp string) {\n\tvar err error\n\tparams.Types, err = internal.ParseTypes(types)\n\tif err != nil {\n\t\tinternal.PrintUsage(\"Invalid type(s) '%s'\", types)\n\t}\n\tif len(ignoreFilesRegexp) > 0 {\n\t\tr, err := regexp.Compile(ignoreFilesRegexp)\n\t\tif err != nil {\n\t\t\tinternal.PrintUsage(\"Invalid ignore regexp '%s'\", ignoreFilesRegexp)\n\t\t}\n\t\tparams.Ignore = r\n\t}\n\tif len(ignoreFuncsRegexp) > 0 {\n\t\tr, err := regexp.Compile(ignoreFuncsRegexp)\n\t\tif err != nil {\n\t\t\tinternal.PrintUsage(\"Invalid ignore regexp '%s'\", ignoreFuncsRegexp)\n\t\t}\n\t\tparams.IgnoreFuncs = r\n\t}\n}\n\nfunc printStats(params internal.CmdParams, stats []internal.FunctionStats) {\n\tcount := 0\n\tfor _, st := range stats {\n\t\tval, err := st.Get(params.Types[0])\n\t\tif err != nil {\n\t\t\tinternal.PrintUsage(\"Invalid type %s\\n\", params.Types[0])\n\t\t}\n\t\tlines, _ := st.Get(internal.Lines)\n\t\tif val >= params.Threshold && int(lines) >= params.MinLines {\n\t\t\tfmt.Printf(\"%40s %-40s\", shortenTo(st.FuncWithRecv(), 40), shortenTo(st.Location, 40))\n\t\t\tprintSingleStat(params.Types[0], val)\n\t\t\tcount += 1\n\t\t\tif len(params.Types) > 1 {\n\t\t\t\tfor i := 1; i < len(params.Types); i++ {\n\t\t\t\t\tval, _ := st.Get(params.Types[i])\n\t\t\t\t\tprintSingleStat(params.Types[i], val)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Println()\n\t\t}\n\t\tif count >= params.Top {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc shortenTo(str string, l int) string {\n\tif len(str) > l {\n\t\treturn \"...\" + str[len(str)-l+5:]\n\t}\n\treturn str\n}\n\nfunc printSingleStat(ty internal.FuncMeasurement, val float64) {\n\tformat := fmt.Sprintf(\"%%%ds\", len(string(ty))+8)\n\tfmt.Printf(format, fmt.Sprintf(\"%s=%.1f\", ty, val))\n}\n<commit_msg>Default empty paths fix<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/tkrajina\/golongfuncs\/internal\"\n)\n\nfunc main() {\n\tty := make([]string, len(internal.AllTypes))\n\tfor n := range internal.AllTypes {\n\t\tty[n] = string(internal.AllTypes[n])\n\t}\n\n\tvar ignoreFilesRegexp, ignoreFuncsRegexp, types string\n\n\tvar params internal.CmdParams\n\tflag.StringVar(&types, \"type\", \"\", \"Type of stats, valid types are: \"+strings.Join(ty, \", \"))\n\tflag.Float64Var(¶ms.Threshold, \"threshold\", 0, \"Min value, functions with value less than this will be ignored\")\n\tflag.IntVar(¶ms.MinLines, \"min-lines\", 10, \"Functions shorter than this will be ignored\")\n\tflag.IntVar(¶ms.Top, \"top\", 25, \"Show only top n functions\")\n\tflag.BoolVar(¶ms.IncludeTests, \"include-tests\", false, \"Include tests\")\n\tflag.BoolVar(¶ms.IncludeVendor, \"include-vendor\", false, \"Include vendored files\")\n\tflag.StringVar(&ignoreFilesRegexp, \"ignore\", \"\", \"Regexp for files\/directories to ignore\")\n\tflag.StringVar(&ignoreFuncsRegexp, \"ignore-func\", \"\", \"Regexp for functions to ignore\")\n\tflag.BoolVar(¶ms.Verbose, \"verbose\", false, \"Verbose\")\n\tflag.Parse()\n\n\tvar paths []string\n\tfor _, arg := range flag.Args() {\n\t\tif arg[0] == '+' {\n\t\t\ttypes = strings.Trim(types+\",\"+arg[1:], \",\")\n\t\t} else {\n\t\t\tpaths = append(paths, arg)\n\t\t}\n\t}\n\n\tif len(paths) == 0 {\n\t\tpaths = append(paths, \".\/...\")\n\t}\n\tif len(types) == 0 {\n\t\ttypes = fmt.Sprintf(\"%s,%s,%s\", internal.Lines, internal.Complexity, internal.MaxNesting)\n\t}\n\n\tprepareParams(¶ms, types, ignoreFilesRegexp, ignoreFuncsRegexp)\n\tstats := internal.Do(params, paths)\n\tprintStats(params, stats)\n}\n\nfunc prepareParams(params *internal.CmdParams, types, ignoreFilesRegexp, ignoreFuncsRegexp string) {\n\tvar err error\n\tparams.Types, err = internal.ParseTypes(types)\n\tif err != nil {\n\t\tinternal.PrintUsage(\"Invalid type(s) '%s'\", types)\n\t}\n\tif len(ignoreFilesRegexp) > 0 {\n\t\tr, err := regexp.Compile(ignoreFilesRegexp)\n\t\tif err != nil {\n\t\t\tinternal.PrintUsage(\"Invalid ignore regexp '%s'\", ignoreFilesRegexp)\n\t\t}\n\t\tparams.Ignore = r\n\t}\n\tif len(ignoreFuncsRegexp) > 0 {\n\t\tr, err := regexp.Compile(ignoreFuncsRegexp)\n\t\tif err != nil {\n\t\t\tinternal.PrintUsage(\"Invalid ignore regexp '%s'\", ignoreFuncsRegexp)\n\t\t}\n\t\tparams.IgnoreFuncs = r\n\t}\n}\n\nfunc printStats(params internal.CmdParams, stats []internal.FunctionStats) {\n\tcount := 0\n\tfor _, st := range stats {\n\t\tval, err := st.Get(params.Types[0])\n\t\tif err != nil {\n\t\t\tinternal.PrintUsage(\"Invalid type %s\\n\", params.Types[0])\n\t\t}\n\t\tlines, _ := st.Get(internal.Lines)\n\t\tif val >= params.Threshold && int(lines) >= params.MinLines {\n\t\t\tfmt.Printf(\"%40s %-40s\", shortenTo(st.FuncWithRecv(), 40), shortenTo(st.Location, 40))\n\t\t\tprintSingleStat(params.Types[0], val)\n\t\t\tcount += 1\n\t\t\tif len(params.Types) > 1 {\n\t\t\t\tfor i := 1; i < len(params.Types); i++ {\n\t\t\t\t\tval, _ := st.Get(params.Types[i])\n\t\t\t\t\tprintSingleStat(params.Types[i], val)\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Println()\n\t\t}\n\t\tif count >= params.Top {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc shortenTo(str string, l int) string {\n\tif len(str) > l {\n\t\treturn \"...\" + str[len(str)-l+5:]\n\t}\n\treturn str\n}\n\nfunc printSingleStat(ty internal.FuncMeasurement, val float64) {\n\tformat := fmt.Sprintf(\"%%%ds\", len(string(ty))+8)\n\tfmt.Printf(format, fmt.Sprintf(\"%s=%.1f\", ty, val))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Upspin Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/sha256\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/big\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"upspin.io\/cloud\/mail\"\n\t\"upspin.io\/cloud\/mail\/sendgrid\"\n\t\"upspin.io\/config\"\n\t\"upspin.io\/errors\"\n\t\"upspin.io\/factotum\"\n\t\"upspin.io\/log\"\n\t\"upspin.io\/serverutil\"\n\t\"upspin.io\/upspin\"\n\t\"upspin.io\/user\"\n\t\"upspin.io\/valid\"\n)\n\nconst (\n\t\/\/ signupGracePeriod is the period of validity for a signup request.\n\tsignupGracePeriod = 24 * time.Hour\n\n\t\/\/ signupNotifyAddress is the address that should receive signup notifications.\n\tsignupNotifyAddress = \"upspin-sendgrid@google.com\"\n\n\tnoHTML = \"\" \/\/ for mail.Send\n)\n\n\/\/ signupHandler implements an http.Handler that handles user creation requests\n\/\/ made by 'upspin signup' and the user themselves.\ntype signupHandler struct {\n\tfact upspin.Factotum\n\tkey upspin.KeyServer\n\tmail mail.Mail\n\n\trate serverutil.RateLimiter\n}\n\n\/\/ newSignupHandler creates a new handler that serves \/signup.\nfunc newSignupHandler(fact upspin.Factotum, key upspin.KeyServer, mailConfig string) (*signupHandler, error) {\n\tapiKey, _, _, err := parseMailConfig(mailConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm := &signupHandler{\n\t\tfact: fact,\n\t\tkey: key,\n\t\tmail: sendgrid.New(apiKey, \"upspin.io\"),\n\t\trate: serverutil.RateLimiter{\n\t\t\tBackoff: 1 * time.Minute,\n\t\t\tMax: 24 * time.Hour,\n\t\t},\n\t}\n\treturn m, nil\n}\n\nfunc (m *signupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\terrorf := func(code int, format string, args ...interface{}) {\n\t\ts := fmt.Sprintf(format, args...)\n\t\thttp.Error(w, s, code)\n\t}\n\n\t\/\/ Parse and validate request.\n\tv := r.FormValue\n\tu := &upspin.User{\n\t\tName: upspin.UserName(v(\"name\")),\n\t\tDirs: []upspin.Endpoint{{\n\t\t\tTransport: upspin.Remote,\n\t\t\tNetAddr: upspin.NetAddr(v(\"dir\")),\n\t\t}},\n\t\tStores: []upspin.Endpoint{{\n\t\t\tTransport: upspin.Remote,\n\t\t\tNetAddr: upspin.NetAddr(v(\"store\")),\n\t\t}},\n\t\tPublicKey: upspin.PublicKey(v(\"key\")),\n\t}\n\tif err := valid.UserName(u.Name); err != nil {\n\t\terrorf(http.StatusBadRequest, \"invalid user name\")\n\t\treturn\n\t}\n\tsigR, sigS, nowS := v(\"sigR\"), v(\"sigS\"), v(\"now\")\n\tcreate := sigR+sigS+nowS != \"\"\n\n\t\/\/ Lookup userName. It must not exist yet.\n\t_, err := m.key.Lookup(u.Name)\n\tif err == nil {\n\t\terrorf(http.StatusBadRequest, \"user already exists on key server\")\n\t\treturn\n\t} else if !errors.Match(errors.E(errors.NotExist), err) {\n\t\terrorf(http.StatusInternalServerError, \"error looking up user: %v\", err)\n\t\treturn\n\t}\n\n\tif create {\n\t\t\/\/ This is the user clicking the link in the signup mail.\n\t\t\/\/ Validate the server signature and create the user.\n\n\t\t\/\/ Parse signature.\n\t\tvar rs, ss big.Int\n\t\tif _, ok := rs.SetString(sigR, 10); !ok {\n\t\t\terrorf(http.StatusBadRequest, \"invalid signature R value\")\n\t\t\treturn\n\t\t}\n\t\tif _, ok := ss.SetString(sigS, 10); !ok {\n\t\t\terrorf(http.StatusBadRequest, \"invalid signature S value\")\n\t\t\treturn\n\t\t}\n\t\tsig := upspin.Signature{R: &rs, S: &ss}\n\n\t\t\/\/ Parse time.\n\t\tnowI, err := strconv.ParseInt(nowS, 10, 64)\n\t\tif err != nil {\n\t\t\terrorf(http.StatusBadRequest, \"invalid now value: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tnow := time.Unix(nowI, 0)\n\n\t\t\/\/ Validate signature.\n\t\tif err := m.validateSignature(u, now, sig); err != nil {\n\t\t\terrorf(http.StatusBadRequest, \"invalid signature: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Create user.\n\t\terr = m.createUser(u)\n\t\tif err != nil {\n\t\t\terrorf(http.StatusInternalServerError, \"could not create user: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Send a note to our internal list, so we're aware of signups.\n\t\tsubject := \"New signup: \" + string(u.Name)\n\t\tbody := fmt.Sprintf(\"%s signed up on %s\", u.Name, time.Now().Format(time.Stamp))\n\t\terr = m.mail.Send(signupNotifyAddress, serverName, subject, body, noHTML)\n\t\tif err != nil {\n\t\t\tlog.Error.Printf(\"Error sending mail to %q: %v\", signupNotifyAddress, err)\n\t\t\t\/\/ Don't prevent signup if this fails.\n\t\t}\n\n\t\t\/\/ TODO(adg): display user friendly welcome message\n\t\tfmt.Fprintf(w, \"An account for %q has been registered with the key server.\", u.Name)\n\t\treturn\n\t}\n\t\/\/ We are being called by 'upspin signup'.\n\n\tif r.Method != \"POST\" {\n\t\terrorf(http.StatusMethodNotAllowed, \"method not allowed\")\n\t\treturn\n\t}\n\n\t\/\/ Aggressively rate limit requests to this service,\n\t\/\/ so that we can't be used for a mail bomb.\n\t\/\/ TODO(adg): also limit by remote IP address\n\tname, _, domain, err := user.Parse(u.Name)\n\tif err != nil {\n\t\terrorf(http.StatusBadRequest, \"invalid user name: %v\", err)\n\t\treturn\n\t}\n\tkey := strings.ToLower(name + \"@\" + domain)\n\tif ok, wait := m.rate.Pass(key); !ok {\n\t\terrorf(http.StatusTooManyRequests, \"repeated signup attempt; please wait %v before trying again\", wait)\n\t\treturn\n\t}\n\n\t\/\/ Construct signed sign-up URL.\n\t\/\/ Important: the signaure must only be transmitted to the calling user\n\t\/\/ by email, as it is proof of ownership of that email address. We must\n\t\/\/ take care not to expose the signature in response to this request\n\t\/\/ (in an error message, for example).\n\tnow := time.Now()\n\tsig, err := m.sign(u, now)\n\tif err != nil {\n\t\terrorf(http.StatusInternalServerError, \"could not generate signature: %v\", err)\n\t\treturn\n\t}\n\tvals := url.Values{\n\t\t\"name\": {string(u.Name)},\n\t\t\"dir\": {string(u.Dirs[0].NetAddr)},\n\t\t\"store\": {string(u.Stores[0].NetAddr)},\n\t\t\"key\": {string(u.PublicKey)},\n\t\t\"sigR\": {sig.R.String()},\n\t\t\"sigS\": {sig.S.String()},\n\t\t\"now\": {fmt.Sprint(now.Unix())},\n\t}\n\tsignupURL := (&url.URL{\n\t\tScheme: \"https\",\n\t\tHost: \"key.upspin.io\", \/\/ TODO(adg): make configurable\n\t\tPath: \"\/signup\",\n\t\tRawQuery: vals.Encode(),\n\t}).String()\n\n\t\/\/ Send signup confirmation mail to user.\n\tbody := new(bytes.Buffer)\n\tfmt.Fprintln(body, \"Follow this link to complete the Upspin signup process:\")\n\tfmt.Fprintln(body, signupURL)\n\tfmt.Fprintln(body, \"\\nIf you were not expecting this message, please ignore it.\")\n\t\/\/ TODO(adg): implement opt out link\n\tconst subject = \"Upspin signup confirmation\"\n\terr = m.mail.Send(string(u.Name), serverName, subject, body.String(), noHTML)\n\tif err != nil {\n\t\tlog.Error.Printf(\"Error sending mail to %q: %v\", u.Name, err)\n\t\terrorf(http.StatusInternalServerError, \"could not send signup email\")\n\t\treturn\n\t}\n\n\tfmt.Fprintln(w, \"OK\")\n}\n\nfunc (m *signupHandler) createUser(u *upspin.User) error {\n\tkey, err := m.dialForUser(u.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer key.Close()\n\tif err := key.Put(u); err != nil {\n\t\treturn err\n\t}\n\n\tsnapshotUser, err := snapshotUser(u.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Lookup snapshotUser to ensure we don't overwrite an existing one.\n\t_, err = key.Lookup(snapshotUser)\n\tif err != nil && !errors.Match(errors.E(errors.NotExist), err) {\n\t\treturn err\n\t}\n\tif err == nil {\n\t\t\/\/ Snapshot user exists; no need to create it.\n\t\treturn nil\n\t}\n\t\/\/ Create snapshot user.\n\tkey, err = m.dialForUser(snapshotUser)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer key.Close() \/\/ be nice and release resources.\n\treturn key.Put(&upspin.User{\n\t\tName: snapshotUser,\n\t\tPublicKey: u.PublicKey,\n\t})\n}\n\nfunc (m *signupHandler) dialForUser(name upspin.UserName) (upspin.KeyServer, error) {\n\t\/\/ We need to dial this server locally so the new user is authenticated\n\t\/\/ with it implicitly.\n\tcfg := config.New()\n\tcfg = config.SetKeyEndpoint(cfg, m.key.Endpoint())\n\tcfg = config.SetUserName(cfg, name)\n\n\tservice, err := m.key.Dial(cfg, m.key.Endpoint())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkeyServer, ok := service.(upspin.KeyServer)\n\tif !ok {\n\t\treturn nil, errors.E(errors.Internal, errors.Str(\"dialed service not an instance of upspin.KeyServer\"))\n\t}\n\treturn keyServer, nil\n}\n\n\/\/ sign generates a signature for the given user creation request at time now.\nfunc (m *signupHandler) sign(u *upspin.User, now time.Time) (upspin.Signature, error) {\n\tb, err := sigBytes(u, now)\n\tif err != nil {\n\t\treturn upspin.Signature{}, err\n\t}\n\treturn m.fact.Sign(b)\n}\n\nfunc (m *signupHandler) validateSignature(u *upspin.User, now time.Time, sig upspin.Signature) error {\n\t\/\/ Check that the signature is still valid.\n\tif time.Now().After(now.Add(signupGracePeriod)) {\n\t\treturn errors.Str(\"request too old; please try again\")\n\t}\n\tb, err := sigBytes(u, now)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn factotum.Verify(b, sig, m.fact.PublicKey())\n}\n\nfunc sigBytes(u *upspin.User, now time.Time) ([]byte, error) {\n\tb, err := json.Marshal(u)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb = strconv.AppendInt(b, now.Unix(), 10)\n\th := sha256.Sum256(b)\n\treturn h[:], nil\n}\n\nfunc parseMailConfig(name string) (apiKey, userName, password string, err error) {\n\tdata, err := ioutil.ReadFile(name)\n\tif err != nil {\n\t\treturn \"\", \"\", \"\", errors.E(errors.IO, err)\n\t}\n\tlines := strings.Split(strings.TrimSpace(string(data)), \"\\n\")\n\tif len(lines) != 3 {\n\t\treturn \"\", \"\", \"\", errors.E(errors.IO, errors.Str(\"config file must have 3 entries: api key, user name, password\"))\n\t}\n\tapiKey = strings.TrimSpace(lines[0])\n\tuserName = strings.TrimSpace(lines[1])\n\tpassword = strings.TrimSpace(lines[2])\n\treturn\n}\n\n\/\/ snapshotUser returns the snapshot username for the named user.\nfunc snapshotUser(u upspin.UserName) (upspin.UserName, error) {\n\t\/\/ Attempt to create a \"+snapshot\" user.\n\tname, suffix, domain, err := user.Parse(u)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif suffix != \"\" {\n\t\tname = name[:len(name)-len(suffix)-1]\n\t}\n\treturn upspin.UserName(name + \"+snapshot@\" + domain), nil\n}\n<commit_msg>cmd\/keyserver: tell the user what went wrong<commit_after>\/\/ Copyright 2016 The Upspin Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/sha256\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/big\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"upspin.io\/cloud\/mail\"\n\t\"upspin.io\/cloud\/mail\/sendgrid\"\n\t\"upspin.io\/config\"\n\t\"upspin.io\/errors\"\n\t\"upspin.io\/factotum\"\n\t\"upspin.io\/log\"\n\t\"upspin.io\/serverutil\"\n\t\"upspin.io\/upspin\"\n\t\"upspin.io\/user\"\n\t\"upspin.io\/valid\"\n)\n\nconst (\n\t\/\/ signupGracePeriod is the period of validity for a signup request.\n\tsignupGracePeriod = 24 * time.Hour\n\n\t\/\/ signupNotifyAddress is the address that should receive signup notifications.\n\tsignupNotifyAddress = \"upspin-sendgrid@google.com\"\n\n\tnoHTML = \"\" \/\/ for mail.Send\n)\n\n\/\/ signupHandler implements an http.Handler that handles user creation requests\n\/\/ made by 'upspin signup' and the user themselves.\ntype signupHandler struct {\n\tfact upspin.Factotum\n\tkey upspin.KeyServer\n\tmail mail.Mail\n\n\trate serverutil.RateLimiter\n}\n\n\/\/ newSignupHandler creates a new handler that serves \/signup.\nfunc newSignupHandler(fact upspin.Factotum, key upspin.KeyServer, mailConfig string) (*signupHandler, error) {\n\tapiKey, _, _, err := parseMailConfig(mailConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm := &signupHandler{\n\t\tfact: fact,\n\t\tkey: key,\n\t\tmail: sendgrid.New(apiKey, \"upspin.io\"),\n\t\trate: serverutil.RateLimiter{\n\t\t\tBackoff: 1 * time.Minute,\n\t\t\tMax: 24 * time.Hour,\n\t\t},\n\t}\n\treturn m, nil\n}\n\nfunc (m *signupHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\terrorf := func(code int, format string, args ...interface{}) {\n\t\ts := fmt.Sprintf(format, args...)\n\t\thttp.Error(w, s, code)\n\t}\n\n\t\/\/ Parse and validate request.\n\tv := r.FormValue\n\tu := &upspin.User{\n\t\tName: upspin.UserName(v(\"name\")),\n\t\tDirs: []upspin.Endpoint{{\n\t\t\tTransport: upspin.Remote,\n\t\t\tNetAddr: upspin.NetAddr(v(\"dir\")),\n\t\t}},\n\t\tStores: []upspin.Endpoint{{\n\t\t\tTransport: upspin.Remote,\n\t\t\tNetAddr: upspin.NetAddr(v(\"store\")),\n\t\t}},\n\t\tPublicKey: upspin.PublicKey(v(\"key\")),\n\t}\n\tif err := valid.UserName(u.Name); err != nil {\n\t\terrorf(http.StatusBadRequest, \"invalid user name: %s\", u.Name)\n\t\treturn\n\t}\n\tsigR, sigS, nowS := v(\"sigR\"), v(\"sigS\"), v(\"now\")\n\tcreate := sigR+sigS+nowS != \"\"\n\n\t\/\/ Lookup userName. It must not exist yet.\n\t_, err := m.key.Lookup(u.Name)\n\tif err == nil {\n\t\terrorf(http.StatusBadRequest, \"user already exists on key server: %s\", u.Name)\n\t\treturn\n\t} else if !errors.Match(errors.E(errors.NotExist), err) {\n\t\terrorf(http.StatusInternalServerError, \"error looking up user: %v\", err)\n\t\treturn\n\t}\n\n\tif create {\n\t\t\/\/ This is the user clicking the link in the signup mail.\n\t\t\/\/ Validate the server signature and create the user.\n\n\t\t\/\/ Parse signature.\n\t\tvar rs, ss big.Int\n\t\tif _, ok := rs.SetString(sigR, 10); !ok {\n\t\t\terrorf(http.StatusBadRequest, \"invalid signature R value\")\n\t\t\treturn\n\t\t}\n\t\tif _, ok := ss.SetString(sigS, 10); !ok {\n\t\t\terrorf(http.StatusBadRequest, \"invalid signature S value\")\n\t\t\treturn\n\t\t}\n\t\tsig := upspin.Signature{R: &rs, S: &ss}\n\n\t\t\/\/ Parse time.\n\t\tnowI, err := strconv.ParseInt(nowS, 10, 64)\n\t\tif err != nil {\n\t\t\terrorf(http.StatusBadRequest, \"invalid now value: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tnow := time.Unix(nowI, 0)\n\n\t\t\/\/ Validate signature.\n\t\tif err := m.validateSignature(u, now, sig); err != nil {\n\t\t\terrorf(http.StatusBadRequest, \"invalid signature: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Create user.\n\t\terr = m.createUser(u)\n\t\tif err != nil {\n\t\t\terrorf(http.StatusInternalServerError, \"could not create user: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Send a note to our internal list, so we're aware of signups.\n\t\tsubject := \"New signup: \" + string(u.Name)\n\t\tbody := fmt.Sprintf(\"%s signed up on %s\", u.Name, time.Now().Format(time.Stamp))\n\t\terr = m.mail.Send(signupNotifyAddress, serverName, subject, body, noHTML)\n\t\tif err != nil {\n\t\t\tlog.Error.Printf(\"Error sending mail to %q: %v\", signupNotifyAddress, err)\n\t\t\t\/\/ Don't prevent signup if this fails.\n\t\t}\n\n\t\t\/\/ TODO(adg): display user friendly welcome message\n\t\tfmt.Fprintf(w, \"An account for %q has been registered with the key server.\", u.Name)\n\t\treturn\n\t}\n\t\/\/ We are being called by 'upspin signup'.\n\n\tif r.Method != \"POST\" {\n\t\terrorf(http.StatusMethodNotAllowed, \"method not allowed\")\n\t\treturn\n\t}\n\n\t\/\/ Aggressively rate limit requests to this service,\n\t\/\/ so that we can't be used for a mail bomb.\n\t\/\/ TODO(adg): also limit by remote IP address\n\tname, _, domain, err := user.Parse(u.Name)\n\tif err != nil {\n\t\terrorf(http.StatusBadRequest, \"invalid user name: %v\", err)\n\t\treturn\n\t}\n\tkey := strings.ToLower(name + \"@\" + domain)\n\tif ok, wait := m.rate.Pass(key); !ok {\n\t\terrorf(http.StatusTooManyRequests, \"repeated signup attempt; please wait %v before trying again\", wait)\n\t\treturn\n\t}\n\n\t\/\/ Construct signed sign-up URL.\n\t\/\/ Important: the signaure must only be transmitted to the calling user\n\t\/\/ by email, as it is proof of ownership of that email address. We must\n\t\/\/ take care not to expose the signature in response to this request\n\t\/\/ (in an error message, for example).\n\tnow := time.Now()\n\tsig, err := m.sign(u, now)\n\tif err != nil {\n\t\terrorf(http.StatusInternalServerError, \"could not generate signature: %v\", err)\n\t\treturn\n\t}\n\tvals := url.Values{\n\t\t\"name\": {string(u.Name)},\n\t\t\"dir\": {string(u.Dirs[0].NetAddr)},\n\t\t\"store\": {string(u.Stores[0].NetAddr)},\n\t\t\"key\": {string(u.PublicKey)},\n\t\t\"sigR\": {sig.R.String()},\n\t\t\"sigS\": {sig.S.String()},\n\t\t\"now\": {fmt.Sprint(now.Unix())},\n\t}\n\tsignupURL := (&url.URL{\n\t\tScheme: \"https\",\n\t\tHost: \"key.upspin.io\", \/\/ TODO(adg): make configurable\n\t\tPath: \"\/signup\",\n\t\tRawQuery: vals.Encode(),\n\t}).String()\n\n\t\/\/ Send signup confirmation mail to user.\n\tbody := new(bytes.Buffer)\n\tfmt.Fprintln(body, \"Follow this link to complete the Upspin signup process:\")\n\tfmt.Fprintln(body, signupURL)\n\tfmt.Fprintln(body, \"\\nIf you were not expecting this message, please ignore it.\")\n\t\/\/ TODO(adg): implement opt out link\n\tconst subject = \"Upspin signup confirmation\"\n\terr = m.mail.Send(string(u.Name), serverName, subject, body.String(), noHTML)\n\tif err != nil {\n\t\tlog.Error.Printf(\"Error sending mail to %q: %v\", u.Name, err)\n\t\terrorf(http.StatusInternalServerError, \"could not send signup email\")\n\t\treturn\n\t}\n\n\tfmt.Fprintln(w, \"OK\")\n}\n\nfunc (m *signupHandler) createUser(u *upspin.User) error {\n\tkey, err := m.dialForUser(u.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer key.Close()\n\tif err := key.Put(u); err != nil {\n\t\treturn err\n\t}\n\n\tsnapshotUser, err := snapshotUser(u.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Lookup snapshotUser to ensure we don't overwrite an existing one.\n\t_, err = key.Lookup(snapshotUser)\n\tif err != nil && !errors.Match(errors.E(errors.NotExist), err) {\n\t\treturn err\n\t}\n\tif err == nil {\n\t\t\/\/ Snapshot user exists; no need to create it.\n\t\treturn nil\n\t}\n\t\/\/ Create snapshot user.\n\tkey, err = m.dialForUser(snapshotUser)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer key.Close() \/\/ be nice and release resources.\n\treturn key.Put(&upspin.User{\n\t\tName: snapshotUser,\n\t\tPublicKey: u.PublicKey,\n\t})\n}\n\nfunc (m *signupHandler) dialForUser(name upspin.UserName) (upspin.KeyServer, error) {\n\t\/\/ We need to dial this server locally so the new user is authenticated\n\t\/\/ with it implicitly.\n\tcfg := config.New()\n\tcfg = config.SetKeyEndpoint(cfg, m.key.Endpoint())\n\tcfg = config.SetUserName(cfg, name)\n\n\tservice, err := m.key.Dial(cfg, m.key.Endpoint())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkeyServer, ok := service.(upspin.KeyServer)\n\tif !ok {\n\t\treturn nil, errors.E(errors.Internal, errors.Str(\"dialed service not an instance of upspin.KeyServer\"))\n\t}\n\treturn keyServer, nil\n}\n\n\/\/ sign generates a signature for the given user creation request at time now.\nfunc (m *signupHandler) sign(u *upspin.User, now time.Time) (upspin.Signature, error) {\n\tb, err := sigBytes(u, now)\n\tif err != nil {\n\t\treturn upspin.Signature{}, err\n\t}\n\treturn m.fact.Sign(b)\n}\n\nfunc (m *signupHandler) validateSignature(u *upspin.User, now time.Time, sig upspin.Signature) error {\n\t\/\/ Check that the signature is still valid.\n\tif time.Now().After(now.Add(signupGracePeriod)) {\n\t\treturn errors.Str(\"request too old; please try again\")\n\t}\n\tb, err := sigBytes(u, now)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn factotum.Verify(b, sig, m.fact.PublicKey())\n}\n\nfunc sigBytes(u *upspin.User, now time.Time) ([]byte, error) {\n\tb, err := json.Marshal(u)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb = strconv.AppendInt(b, now.Unix(), 10)\n\th := sha256.Sum256(b)\n\treturn h[:], nil\n}\n\nfunc parseMailConfig(name string) (apiKey, userName, password string, err error) {\n\tdata, err := ioutil.ReadFile(name)\n\tif err != nil {\n\t\treturn \"\", \"\", \"\", errors.E(errors.IO, err)\n\t}\n\tlines := strings.Split(strings.TrimSpace(string(data)), \"\\n\")\n\tif len(lines) != 3 {\n\t\treturn \"\", \"\", \"\", errors.E(errors.IO, errors.Str(\"config file must have 3 entries: api key, user name, password\"))\n\t}\n\tapiKey = strings.TrimSpace(lines[0])\n\tuserName = strings.TrimSpace(lines[1])\n\tpassword = strings.TrimSpace(lines[2])\n\treturn\n}\n\n\/\/ snapshotUser returns the snapshot username for the named user.\nfunc snapshotUser(u upspin.UserName) (upspin.UserName, error) {\n\t\/\/ Attempt to create a \"+snapshot\" user.\n\tname, suffix, domain, err := user.Parse(u)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif suffix != \"\" {\n\t\tname = name[:len(name)-len(suffix)-1]\n\t}\n\treturn upspin.UserName(name + \"+snapshot@\" + domain), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/gin-contrib\/pprof\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/joho\/godotenv\"\n\t\"golang.org\/x\/crypto\/acme\/autocert\"\n\t\"golang.org\/x\/sync\/errgroup\"\n\t\"gopkg.in\/urfave\/cli.v2\"\n)\n\n\/\/go:generate retool do fileb0x ab0x.yaml\n\nvar (\n\tdefaultAddr = \":9000\"\n)\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tif env := os.Getenv(\"KLEISTER_ENV_FILE\"); env != \"\" {\n\t\tgodotenv.Load(env)\n\t}\n\n\tapp := &cli.App{\n\t\tName: \"kleister-ui\",\n\t\tVersion: Version.String(),\n\t\tUsage: \"Manage mod packs for Minecraft\",\n\t\tCompiled: time.Now(),\n\n\t\tAuthors: []*cli.Author{\n\t\t\t{\n\t\t\t\tName: \"Thomas Boerger\",\n\t\t\t\tEmail: \"thomas@webhippie.de\",\n\t\t\t},\n\t\t},\n\n\t\tFlags: []cli.Flag{\n\t\t\t&cli.BoolFlag{\n\t\t\t\tName: \"debug\",\n\t\t\t\tValue: false,\n\t\t\t\tUsage: \"Activate debug information\",\n\t\t\t\tEnvVars: []string{\"KLEISTER_DEBUG\"},\n\t\t\t\tDestination: &Config.Debug,\n\t\t\t\tHidden: true,\n\t\t\t},\n\t\t},\n\n\t\tBefore: func(c *cli.Context) error {\n\t\t\tlogrus.SetOutput(os.Stdout)\n\n\t\t\tif Config.Debug {\n\t\t\t\tlogrus.SetLevel(logrus.DebugLevel)\n\t\t\t} else {\n\t\t\t\tlogrus.SetLevel(logrus.InfoLevel)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t},\n\n\t\tCommands: []*cli.Command{\n\t\t\t{\n\t\t\t\tName: \"server\",\n\t\t\t\tUsage: \"Start the Kleister UI\",\n\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\tName: \"host\",\n\t\t\t\t\t\tValue: \"http:\/\/localhost:9000\",\n\t\t\t\t\t\tUsage: \"External access to the UI\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_UI_HOST\"},\n\t\t\t\t\t\tDestination: &Config.Server.Host,\n\t\t\t\t\t},\n\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\tName: \"addr\",\n\t\t\t\t\t\tValue: defaultAddr,\n\t\t\t\t\t\tUsage: \"Address to bind the server\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_UI_ADDR\"},\n\t\t\t\t\t\tDestination: &Config.Server.Addr,\n\t\t\t\t\t},\n\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\tName: \"endpoint\",\n\t\t\t\t\t\tValue: \"http:\/\/localhost:8000\",\n\t\t\t\t\t\tUsage: \"URL for the API server\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_UI_ENDPOINT\"},\n\t\t\t\t\t\tDestination: &Config.Server.Endpoint,\n\t\t\t\t\t},\n\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\tName: \"static\",\n\t\t\t\t\t\tValue: \"\",\n\t\t\t\t\t\tUsage: \"Folder for serving assets\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_UI_STATIC\"},\n\t\t\t\t\t\tDestination: &Config.Server.Static,\n\t\t\t\t\t},\n\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\tName: \"storage\",\n\t\t\t\t\t\tValue: \"storage\/\",\n\t\t\t\t\t\tUsage: \"Folder for storing files\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_UI_STORAGE\"},\n\t\t\t\t\t\tDestination: &Config.Server.Storage,\n\t\t\t\t\t},\n\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\tName: \"cert\",\n\t\t\t\t\t\tValue: \"\",\n\t\t\t\t\t\tUsage: \"Path to SSL cert\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_UI_CERT\"},\n\t\t\t\t\t\tDestination: &Config.Server.Cert,\n\t\t\t\t\t},\n\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\tName: \"key\",\n\t\t\t\t\t\tValue: \"\",\n\t\t\t\t\t\tUsage: \"Path to SSL key\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_UI_KEY\"},\n\t\t\t\t\t\tDestination: &Config.Server.Key,\n\t\t\t\t\t},\n\t\t\t\t\t&cli.BoolFlag{\n\t\t\t\t\t\tName: \"letsencrypt\",\n\t\t\t\t\t\tValue: false,\n\t\t\t\t\t\tUsage: \"Enable Let's Encrypt SSL\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_UI_LETSENCRYPT\"},\n\t\t\t\t\t\tDestination: &Config.Server.LetsEncrypt,\n\t\t\t\t\t},\n\t\t\t\t\t&cli.BoolFlag{\n\t\t\t\t\t\tName: \"strict-curves\",\n\t\t\t\t\t\tValue: false,\n\t\t\t\t\t\tUsage: \"Use strict SSL curves\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_STRICT_CURVES\"},\n\t\t\t\t\t\tDestination: &Config.Server.StrictCurves,\n\t\t\t\t\t},\n\t\t\t\t\t&cli.BoolFlag{\n\t\t\t\t\t\tName: \"strict-ciphers\",\n\t\t\t\t\t\tValue: false,\n\t\t\t\t\t\tUsage: \"Use strict SSL ciphers\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_STRICT_CIPHERS\"},\n\t\t\t\t\t\tDestination: &Config.Server.StrictCiphers,\n\t\t\t\t\t},\n\t\t\t\t\t&cli.BoolFlag{\n\t\t\t\t\t\tName: \"pprof\",\n\t\t\t\t\t\tValue: false,\n\t\t\t\t\t\tUsage: \"Enable pprof debugger\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_UI_PPROF\"},\n\t\t\t\t\t\tDestination: &Config.Server.Pprof,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\t\tif Config.Debug {\n\t\t\t\t\t\tgin.SetMode(gin.DebugMode)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tgin.SetMode(gin.ReleaseMode)\n\t\t\t\t\t}\n\n\t\t\t\t\te := gin.New()\n\n\t\t\t\t\te.SetHTMLTemplate(\n\t\t\t\t\t\tTemplate(),\n\t\t\t\t\t)\n\n\t\t\t\t\te.Use(SetLogger())\n\t\t\t\t\te.Use(SetRecovery())\n\n\t\t\t\t\tif Config.Server.Pprof {\n\t\t\t\t\t\tpprof.Register(\n\t\t\t\t\t\t\te,\n\t\t\t\t\t\t\t&pprof.Options{\n\t\t\t\t\t\t\t\tRoutePrefix: \"\/debug\/pprof\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t)\n\t\t\t\t\t}\n\n\t\t\t\t\te.StaticFS(\n\t\t\t\t\t\t\"\/assets\",\n\t\t\t\t\t\tAssets(),\n\t\t\t\t\t)\n\n\t\t\t\t\te.NoRoute(Index)\n\n\t\t\t\t\tif Config.Server.LetsEncrypt || (Config.Server.Cert != \"\" && Config.Server.Key != \"\") {\n\t\t\t\t\t\tcfg := &tls.Config{\n\t\t\t\t\t\t\tPreferServerCipherSuites: true,\n\t\t\t\t\t\t\tMinVersion: tls.VersionTLS12,\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif Config.Server.StrictCurves {\n\t\t\t\t\t\t\tcfg.CurvePreferences = []tls.CurveID{\n\t\t\t\t\t\t\t\ttls.CurveP521,\n\t\t\t\t\t\t\t\ttls.CurveP384,\n\t\t\t\t\t\t\t\ttls.CurveP256,\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif Config.Server.StrictCiphers {\n\t\t\t\t\t\t\tcfg.CipherSuites = []uint16{\n\t\t\t\t\t\t\t\ttls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,\n\t\t\t\t\t\t\t\ttls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,\n\t\t\t\t\t\t\t\ttls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,\n\t\t\t\t\t\t\t\ttls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif Config.Server.LetsEncrypt {\n\t\t\t\t\t\t\tif Config.Server.Addr != defaultAddr {\n\t\t\t\t\t\t\t\tlogrus.Infof(\"With Let's Encrypt bind port have been overwritten!\")\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tparsed, err := url.Parse(Config.Server.Host)\n\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tlogrus.Fatal(\"Failed to parse host name. %s\", err)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tcertManager := autocert.Manager{\n\t\t\t\t\t\t\t\tPrompt: autocert.AcceptTOS,\n\t\t\t\t\t\t\t\tHostPolicy: autocert.HostWhitelist(parsed.Host),\n\t\t\t\t\t\t\t\tCache: autocert.DirCache(Config.Server.Storage),\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tcfg.GetCertificate = certManager.GetCertificate\n\n\t\t\t\t\t\t\tvar (\n\t\t\t\t\t\t\t\tg errgroup.Group\n\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\tsplitAddr := strings.SplitN(Config.Server.Addr, \":\", 2)\n\t\t\t\t\t\t\tlogrus.Infof(\"Starting on %s:80 and %s:443\", splitAddr[0], splitAddr[0])\n\n\t\t\t\t\t\t\tg.Go(func() error {\n\t\t\t\t\t\t\t\treturn http.ListenAndServe(\n\t\t\t\t\t\t\t\t\tfmt.Sprintf(\"%s:80\", splitAddr[0]),\n\t\t\t\t\t\t\t\t\thttp.HandlerFunc(redirect),\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\tg.Go(func() error {\n\t\t\t\t\t\t\t\treturn startServer(&http.Server{\n\t\t\t\t\t\t\t\t\tAddr: fmt.Sprintf(\"%s:443\", splitAddr[0]),\n\t\t\t\t\t\t\t\t\tHandler: e,\n\t\t\t\t\t\t\t\t\tReadTimeout: 5 * time.Second,\n\t\t\t\t\t\t\t\t\tWriteTimeout: 10 * time.Second,\n\t\t\t\t\t\t\t\t\tTLSConfig: cfg,\n\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\tif err := g.Wait(); err != nil {\n\t\t\t\t\t\t\t\tlogrus.Fatal(err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlogrus.Infof(\"Starting on %s\", Config.Server.Addr)\n\n\t\t\t\t\t\t\tcert, err := tls.LoadX509KeyPair(\n\t\t\t\t\t\t\t\tConfig.Server.Cert,\n\t\t\t\t\t\t\t\tConfig.Server.Key,\n\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tlogrus.Fatal(\"Failed to load SSL certificates. %s\", err)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tcfg.Certificates = []tls.Certificate{\n\t\t\t\t\t\t\t\tcert,\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tserver := &http.Server{\n\t\t\t\t\t\t\t\tAddr: Config.Server.Addr,\n\t\t\t\t\t\t\t\tHandler: e,\n\t\t\t\t\t\t\t\tReadTimeout: 5 * time.Second,\n\t\t\t\t\t\t\t\tWriteTimeout: 10 * time.Second,\n\t\t\t\t\t\t\t\tTLSConfig: cfg,\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif err := startServer(server); err != nil {\n\t\t\t\t\t\t\t\tlogrus.Fatal(err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlogrus.Infof(\"Starting on %s\", Config.Server.Addr)\n\n\t\t\t\t\t\tserver := &http.Server{\n\t\t\t\t\t\t\tAddr: Config.Server.Addr,\n\t\t\t\t\t\t\tHandler: e,\n\t\t\t\t\t\t\tReadTimeout: 5 * time.Second,\n\t\t\t\t\t\t\tWriteTimeout: 10 * time.Second,\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif err := startServer(server); err != nil {\n\t\t\t\t\t\t\tlogrus.Fatal(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tcli.HelpFlag = &cli.BoolFlag{\n\t\tName: \"help\",\n\t\tAliases: []string{\"h\"},\n\t\tUsage: \"Show the help, so what you see now\",\n\t}\n\n\tcli.VersionFlag = &cli.BoolFlag{\n\t\tName: \"version\",\n\t\tAliases: []string{\"v\"},\n\t\tUsage: \"Print the current version of that tool\",\n\t}\n\n\tif err := app.Run(os.Args); err != nil {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc redirect(w http.ResponseWriter, req *http.Request) {\n\ttarget := \"https:\/\/\" + req.Host + req.URL.Path\n\n\tif len(req.URL.RawQuery) > 0 {\n\t\ttarget += \"?\" + req.URL.RawQuery\n\t}\n\n\tlogrus.Debugf(\"Redirecting to %s\", target)\n\thttp.Redirect(w, req, target, http.StatusTemporaryRedirect)\n}\n<commit_msg>Another fix for generate command<commit_after>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/gin-contrib\/pprof\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/joho\/godotenv\"\n\t\"golang.org\/x\/crypto\/acme\/autocert\"\n\t\"golang.org\/x\/sync\/errgroup\"\n\t\"gopkg.in\/urfave\/cli.v2\"\n)\n\n\/\/go:generate retool -tool-dir ..\/..\/_tools do fileb0x ab0x.yaml\n\nvar (\n\tdefaultAddr = \":9000\"\n)\n\nfunc main() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tif env := os.Getenv(\"KLEISTER_ENV_FILE\"); env != \"\" {\n\t\tgodotenv.Load(env)\n\t}\n\n\tapp := &cli.App{\n\t\tName: \"kleister-ui\",\n\t\tVersion: Version.String(),\n\t\tUsage: \"Manage mod packs for Minecraft\",\n\t\tCompiled: time.Now(),\n\n\t\tAuthors: []*cli.Author{\n\t\t\t{\n\t\t\t\tName: \"Thomas Boerger\",\n\t\t\t\tEmail: \"thomas@webhippie.de\",\n\t\t\t},\n\t\t},\n\n\t\tFlags: []cli.Flag{\n\t\t\t&cli.BoolFlag{\n\t\t\t\tName: \"debug\",\n\t\t\t\tValue: false,\n\t\t\t\tUsage: \"Activate debug information\",\n\t\t\t\tEnvVars: []string{\"KLEISTER_DEBUG\"},\n\t\t\t\tDestination: &Config.Debug,\n\t\t\t\tHidden: true,\n\t\t\t},\n\t\t},\n\n\t\tBefore: func(c *cli.Context) error {\n\t\t\tlogrus.SetOutput(os.Stdout)\n\n\t\t\tif Config.Debug {\n\t\t\t\tlogrus.SetLevel(logrus.DebugLevel)\n\t\t\t} else {\n\t\t\t\tlogrus.SetLevel(logrus.InfoLevel)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t},\n\n\t\tCommands: []*cli.Command{\n\t\t\t{\n\t\t\t\tName: \"server\",\n\t\t\t\tUsage: \"Start the Kleister UI\",\n\t\t\t\tFlags: []cli.Flag{\n\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\tName: \"host\",\n\t\t\t\t\t\tValue: \"http:\/\/localhost:9000\",\n\t\t\t\t\t\tUsage: \"External access to the UI\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_UI_HOST\"},\n\t\t\t\t\t\tDestination: &Config.Server.Host,\n\t\t\t\t\t},\n\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\tName: \"addr\",\n\t\t\t\t\t\tValue: defaultAddr,\n\t\t\t\t\t\tUsage: \"Address to bind the server\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_UI_ADDR\"},\n\t\t\t\t\t\tDestination: &Config.Server.Addr,\n\t\t\t\t\t},\n\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\tName: \"endpoint\",\n\t\t\t\t\t\tValue: \"http:\/\/localhost:8000\",\n\t\t\t\t\t\tUsage: \"URL for the API server\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_UI_ENDPOINT\"},\n\t\t\t\t\t\tDestination: &Config.Server.Endpoint,\n\t\t\t\t\t},\n\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\tName: \"static\",\n\t\t\t\t\t\tValue: \"\",\n\t\t\t\t\t\tUsage: \"Folder for serving assets\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_UI_STATIC\"},\n\t\t\t\t\t\tDestination: &Config.Server.Static,\n\t\t\t\t\t},\n\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\tName: \"storage\",\n\t\t\t\t\t\tValue: \"storage\/\",\n\t\t\t\t\t\tUsage: \"Folder for storing files\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_UI_STORAGE\"},\n\t\t\t\t\t\tDestination: &Config.Server.Storage,\n\t\t\t\t\t},\n\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\tName: \"cert\",\n\t\t\t\t\t\tValue: \"\",\n\t\t\t\t\t\tUsage: \"Path to SSL cert\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_UI_CERT\"},\n\t\t\t\t\t\tDestination: &Config.Server.Cert,\n\t\t\t\t\t},\n\t\t\t\t\t&cli.StringFlag{\n\t\t\t\t\t\tName: \"key\",\n\t\t\t\t\t\tValue: \"\",\n\t\t\t\t\t\tUsage: \"Path to SSL key\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_UI_KEY\"},\n\t\t\t\t\t\tDestination: &Config.Server.Key,\n\t\t\t\t\t},\n\t\t\t\t\t&cli.BoolFlag{\n\t\t\t\t\t\tName: \"letsencrypt\",\n\t\t\t\t\t\tValue: false,\n\t\t\t\t\t\tUsage: \"Enable Let's Encrypt SSL\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_UI_LETSENCRYPT\"},\n\t\t\t\t\t\tDestination: &Config.Server.LetsEncrypt,\n\t\t\t\t\t},\n\t\t\t\t\t&cli.BoolFlag{\n\t\t\t\t\t\tName: \"strict-curves\",\n\t\t\t\t\t\tValue: false,\n\t\t\t\t\t\tUsage: \"Use strict SSL curves\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_STRICT_CURVES\"},\n\t\t\t\t\t\tDestination: &Config.Server.StrictCurves,\n\t\t\t\t\t},\n\t\t\t\t\t&cli.BoolFlag{\n\t\t\t\t\t\tName: \"strict-ciphers\",\n\t\t\t\t\t\tValue: false,\n\t\t\t\t\t\tUsage: \"Use strict SSL ciphers\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_STRICT_CIPHERS\"},\n\t\t\t\t\t\tDestination: &Config.Server.StrictCiphers,\n\t\t\t\t\t},\n\t\t\t\t\t&cli.BoolFlag{\n\t\t\t\t\t\tName: \"pprof\",\n\t\t\t\t\t\tValue: false,\n\t\t\t\t\t\tUsage: \"Enable pprof debugger\",\n\t\t\t\t\t\tEnvVars: []string{\"KLEISTER_UI_PPROF\"},\n\t\t\t\t\t\tDestination: &Config.Server.Pprof,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tAction: func(c *cli.Context) error {\n\t\t\t\t\tif Config.Debug {\n\t\t\t\t\t\tgin.SetMode(gin.DebugMode)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tgin.SetMode(gin.ReleaseMode)\n\t\t\t\t\t}\n\n\t\t\t\t\te := gin.New()\n\n\t\t\t\t\te.SetHTMLTemplate(\n\t\t\t\t\t\tTemplate(),\n\t\t\t\t\t)\n\n\t\t\t\t\te.Use(SetLogger())\n\t\t\t\t\te.Use(SetRecovery())\n\n\t\t\t\t\tif Config.Server.Pprof {\n\t\t\t\t\t\tpprof.Register(\n\t\t\t\t\t\t\te,\n\t\t\t\t\t\t\t&pprof.Options{\n\t\t\t\t\t\t\t\tRoutePrefix: \"\/debug\/pprof\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t)\n\t\t\t\t\t}\n\n\t\t\t\t\te.StaticFS(\n\t\t\t\t\t\t\"\/assets\",\n\t\t\t\t\t\tAssets(),\n\t\t\t\t\t)\n\n\t\t\t\t\te.NoRoute(Index)\n\n\t\t\t\t\tif Config.Server.LetsEncrypt || (Config.Server.Cert != \"\" && Config.Server.Key != \"\") {\n\t\t\t\t\t\tcfg := &tls.Config{\n\t\t\t\t\t\t\tPreferServerCipherSuites: true,\n\t\t\t\t\t\t\tMinVersion: tls.VersionTLS12,\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif Config.Server.StrictCurves {\n\t\t\t\t\t\t\tcfg.CurvePreferences = []tls.CurveID{\n\t\t\t\t\t\t\t\ttls.CurveP521,\n\t\t\t\t\t\t\t\ttls.CurveP384,\n\t\t\t\t\t\t\t\ttls.CurveP256,\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif Config.Server.StrictCiphers {\n\t\t\t\t\t\t\tcfg.CipherSuites = []uint16{\n\t\t\t\t\t\t\t\ttls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,\n\t\t\t\t\t\t\t\ttls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,\n\t\t\t\t\t\t\t\ttls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,\n\t\t\t\t\t\t\t\ttls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif Config.Server.LetsEncrypt {\n\t\t\t\t\t\t\tif Config.Server.Addr != defaultAddr {\n\t\t\t\t\t\t\t\tlogrus.Infof(\"With Let's Encrypt bind port have been overwritten!\")\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tparsed, err := url.Parse(Config.Server.Host)\n\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tlogrus.Fatal(\"Failed to parse host name. %s\", err)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tcertManager := autocert.Manager{\n\t\t\t\t\t\t\t\tPrompt: autocert.AcceptTOS,\n\t\t\t\t\t\t\t\tHostPolicy: autocert.HostWhitelist(parsed.Host),\n\t\t\t\t\t\t\t\tCache: autocert.DirCache(Config.Server.Storage),\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tcfg.GetCertificate = certManager.GetCertificate\n\n\t\t\t\t\t\t\tvar (\n\t\t\t\t\t\t\t\tg errgroup.Group\n\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\tsplitAddr := strings.SplitN(Config.Server.Addr, \":\", 2)\n\t\t\t\t\t\t\tlogrus.Infof(\"Starting on %s:80 and %s:443\", splitAddr[0], splitAddr[0])\n\n\t\t\t\t\t\t\tg.Go(func() error {\n\t\t\t\t\t\t\t\treturn http.ListenAndServe(\n\t\t\t\t\t\t\t\t\tfmt.Sprintf(\"%s:80\", splitAddr[0]),\n\t\t\t\t\t\t\t\t\thttp.HandlerFunc(redirect),\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\tg.Go(func() error {\n\t\t\t\t\t\t\t\treturn startServer(&http.Server{\n\t\t\t\t\t\t\t\t\tAddr: fmt.Sprintf(\"%s:443\", splitAddr[0]),\n\t\t\t\t\t\t\t\t\tHandler: e,\n\t\t\t\t\t\t\t\t\tReadTimeout: 5 * time.Second,\n\t\t\t\t\t\t\t\t\tWriteTimeout: 10 * time.Second,\n\t\t\t\t\t\t\t\t\tTLSConfig: cfg,\n\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\tif err := g.Wait(); err != nil {\n\t\t\t\t\t\t\t\tlogrus.Fatal(err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlogrus.Infof(\"Starting on %s\", Config.Server.Addr)\n\n\t\t\t\t\t\t\tcert, err := tls.LoadX509KeyPair(\n\t\t\t\t\t\t\t\tConfig.Server.Cert,\n\t\t\t\t\t\t\t\tConfig.Server.Key,\n\t\t\t\t\t\t\t)\n\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tlogrus.Fatal(\"Failed to load SSL certificates. %s\", err)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tcfg.Certificates = []tls.Certificate{\n\t\t\t\t\t\t\t\tcert,\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tserver := &http.Server{\n\t\t\t\t\t\t\t\tAddr: Config.Server.Addr,\n\t\t\t\t\t\t\t\tHandler: e,\n\t\t\t\t\t\t\t\tReadTimeout: 5 * time.Second,\n\t\t\t\t\t\t\t\tWriteTimeout: 10 * time.Second,\n\t\t\t\t\t\t\t\tTLSConfig: cfg,\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif err := startServer(server); err != nil {\n\t\t\t\t\t\t\t\tlogrus.Fatal(err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlogrus.Infof(\"Starting on %s\", Config.Server.Addr)\n\n\t\t\t\t\t\tserver := &http.Server{\n\t\t\t\t\t\t\tAddr: Config.Server.Addr,\n\t\t\t\t\t\t\tHandler: e,\n\t\t\t\t\t\t\tReadTimeout: 5 * time.Second,\n\t\t\t\t\t\t\tWriteTimeout: 10 * time.Second,\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif err := startServer(server); err != nil {\n\t\t\t\t\t\t\tlogrus.Fatal(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tcli.HelpFlag = &cli.BoolFlag{\n\t\tName: \"help\",\n\t\tAliases: []string{\"h\"},\n\t\tUsage: \"Show the help, so what you see now\",\n\t}\n\n\tcli.VersionFlag = &cli.BoolFlag{\n\t\tName: \"version\",\n\t\tAliases: []string{\"v\"},\n\t\tUsage: \"Print the current version of that tool\",\n\t}\n\n\tif err := app.Run(os.Args); err != nil {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc redirect(w http.ResponseWriter, req *http.Request) {\n\ttarget := \"https:\/\/\" + req.Host + req.URL.Path\n\n\tif len(req.URL.RawQuery) > 0 {\n\t\ttarget += \"?\" + req.URL.RawQuery\n\t}\n\n\tlogrus.Debugf(\"Redirecting to %s\", target)\n\thttp.Redirect(w, req, target, http.StatusTemporaryRedirect)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Sorint.lab\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"sort\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/sorintlab\/stolon\/pkg\/cluster\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar cmdStatus = &cobra.Command{\n\tUse: \"status\",\n\tRun: status,\n\tShort: \"Display the current cluster status\",\n}\n\nfunc init() {\n\tcmdStolonCtl.AddCommand(cmdStatus)\n}\n\nfunc printTree(dbuid string, cd *cluster.ClusterData, level int, prefix string, tail bool) {\n\tout := prefix\n\tif level > 0 {\n\t\tif tail {\n\t\t\tout += \"└─\"\n\t\t} else {\n\t\t\tout += \"├─\"\n\t\t}\n\t}\n\tout += cd.DBs[dbuid].Spec.KeeperUID\n\tif dbuid == cd.Cluster.Status.Master {\n\t\tout += \" (master)\"\n\t}\n\tstdout(out)\n\tdb := cd.DBs[dbuid]\n\tfollowers := db.Spec.Followers\n\tc := len(followers)\n\tfor i, f := range followers {\n\t\temptyspace := \"\"\n\t\tif level > 0 {\n\t\t\temptyspace = \" \"\n\t\t}\n\t\tlinespace := \"│ \"\n\t\tif i < c-1 {\n\t\t\tif tail {\n\t\t\t\tprintTree(f, cd, level+1, prefix+emptyspace, false)\n\t\t\t} else {\n\t\t\t\tprintTree(f, cd, level+1, prefix+linespace, false)\n\t\t\t}\n\t\t} else {\n\t\t\tif tail {\n\t\t\t\tprintTree(f, cd, level+1, prefix+emptyspace, true)\n\t\t\t} else {\n\t\t\t\tprintTree(f, cd, level+1, prefix+linespace, true)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc status(cmd *cobra.Command, args []string) {\n\ttabOut := new(tabwriter.Writer)\n\ttabOut.Init(os.Stdout, 0, 8, 1, '\\t', 0)\n\n\tif cfg.clusterName == \"\" {\n\t\tdie(\"cluster name required\")\n\t}\n\n\te, err := NewStore()\n\tif err != nil {\n\t\tdie(\"cannot create store: %v\", err)\n\t}\n\n\tsentinelsInfo, err := e.GetSentinelsInfo()\n\tif err != nil {\n\t\tdie(\"cannot get sentinels info: %v\", err)\n\t}\n\n\tlsid, err := e.GetLeaderSentinelId()\n\tif err != nil {\n\t\tdie(\"cannot get leader sentinel info\")\n\t}\n\n\tstdout(\"=== Active sentinels ===\")\n\tstdout(\"\")\n\tif len(sentinelsInfo) == 0 {\n\t\tstdout(\"No active sentinels\")\n\t} else {\n\t\tsort.Sort(sentinelsInfo)\n\t\tfmt.Fprintf(tabOut, \"ID\\tLEADER\\n\")\n\t\tfor _, si := range sentinelsInfo {\n\t\t\tleader := false\n\t\t\tif lsid != \"\" {\n\t\t\t\tif si.UID == lsid {\n\t\t\t\t\tleader = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Fprintf(tabOut, \"%s\\t%t\\n\", si.UID, leader)\n\t\t\ttabOut.Flush()\n\t\t}\n\t}\n\n\tproxiesInfo, err := e.GetProxiesInfo()\n\tif err != nil {\n\t\tdie(\"cannot get proxies info: %v\", err)\n\t}\n\n\tstdout(\"\")\n\tstdout(\"=== Active proxies ===\")\n\tstdout(\"\")\n\tif len(proxiesInfo) == 0 {\n\t\tstdout(\"No active proxies\")\n\t} else {\n\t\tsort.Sort(proxiesInfo)\n\t\tfmt.Fprintf(tabOut, \"ID\\n\")\n\t\tfor _, pi := range proxiesInfo {\n\t\t\tfmt.Fprintf(tabOut, \"%s\\n\", pi.UID)\n\t\t\ttabOut.Flush()\n\t\t}\n\t}\n\n\tcd, _, err := getClusterData(e)\n\tif err != nil {\n\t\tdie(\"%v\", err)\n\t}\n\n\tstdout(\"\")\n\tstdout(\"=== Keepers ===\")\n\tstdout(\"\")\n\tif cd.Keepers == nil {\n\t\tstdout(\"No keepers available\")\n\t\tstdout(\"\")\n\t} else {\n\t\tkssKeys := cd.Keepers.SortedKeys()\n\t\tfmt.Fprintf(tabOut, \"UID\\tPG LISTENADDRESS\\tHEALTHY\\tPGWANTEDGENERATION\\tPGCURRENTGENERATION\\n\")\n\t\tfor _, kuid := range kssKeys {\n\t\t\tk := cd.Keepers[kuid]\n\t\t\tdb := cd.FindDB(k)\n\t\t\tif db != nil {\n\t\t\t\tif db.Status.ListenAddress != \"\" {\n\t\t\t\t\tfmt.Fprintf(tabOut, \"%s\\t%s:%s\\t%t\\t%d\\t%d\\n\", k.UID, db.Status.ListenAddress, db.Status.Port, k.Status.Healthy, db.Generation, db.Status.CurrentGeneration)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Fprintf(tabOut, \"%s\\t(unknown)\\t%t\\t%d\\t%d\\n\", k.UID, k.Status.Healthy, db.Generation, db.Status.CurrentGeneration)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(tabOut, \"%s\\n\", k.UID)\n\t\t\t}\n\t\t}\n\t}\n\ttabOut.Flush()\n\n\tif cd.Cluster == nil || cd.DBs == nil {\n\t\tstdout(\"No cluster available\")\n\t\treturn\n\t}\n\n\tmaster := cd.Cluster.Status.Master\n\tstdout(\"\")\n\tstdout(\"=== Cluster Info ===\")\n\tstdout(\"\")\n\tif master != \"\" {\n\t\tstdout(\"Master: %s\", cd.Keepers[cd.DBs[master].Spec.KeeperUID].UID)\n\t} else {\n\t\tstdout(\"Master Keeper: (none)\")\n\t}\n\n\tif master != \"\" {\n\t\tstdout(\"\")\n\t\tstdout(\"===== Keepers tree =====\")\n\t\tmasterDB := cd.DBs[master]\n\t\tstdout(\"\")\n\t\tprintTree(masterDB.UID, cd, 0, \"\", true)\n\t}\n\n\tstdout(\"\")\n}\n<commit_msg>stolonct: status: report db status<commit_after>\/\/ Copyright 2015 Sorint.lab\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"sort\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/sorintlab\/stolon\/pkg\/cluster\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar cmdStatus = &cobra.Command{\n\tUse: \"status\",\n\tRun: status,\n\tShort: \"Display the current cluster status\",\n}\n\nfunc init() {\n\tcmdStolonCtl.AddCommand(cmdStatus)\n}\n\nfunc printTree(dbuid string, cd *cluster.ClusterData, level int, prefix string, tail bool) {\n\tout := prefix\n\tif level > 0 {\n\t\tif tail {\n\t\t\tout += \"└─\"\n\t\t} else {\n\t\t\tout += \"├─\"\n\t\t}\n\t}\n\tout += cd.DBs[dbuid].Spec.KeeperUID\n\tif dbuid == cd.Cluster.Status.Master {\n\t\tout += \" (master)\"\n\t}\n\tstdout(out)\n\tdb := cd.DBs[dbuid]\n\tfollowers := db.Spec.Followers\n\tc := len(followers)\n\tfor i, f := range followers {\n\t\temptyspace := \"\"\n\t\tif level > 0 {\n\t\t\temptyspace = \" \"\n\t\t}\n\t\tlinespace := \"│ \"\n\t\tif i < c-1 {\n\t\t\tif tail {\n\t\t\t\tprintTree(f, cd, level+1, prefix+emptyspace, false)\n\t\t\t} else {\n\t\t\t\tprintTree(f, cd, level+1, prefix+linespace, false)\n\t\t\t}\n\t\t} else {\n\t\t\tif tail {\n\t\t\t\tprintTree(f, cd, level+1, prefix+emptyspace, true)\n\t\t\t} else {\n\t\t\t\tprintTree(f, cd, level+1, prefix+linespace, true)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc status(cmd *cobra.Command, args []string) {\n\ttabOut := new(tabwriter.Writer)\n\ttabOut.Init(os.Stdout, 0, 8, 1, '\\t', 0)\n\n\tif cfg.clusterName == \"\" {\n\t\tdie(\"cluster name required\")\n\t}\n\n\te, err := NewStore()\n\tif err != nil {\n\t\tdie(\"cannot create store: %v\", err)\n\t}\n\n\tsentinelsInfo, err := e.GetSentinelsInfo()\n\tif err != nil {\n\t\tdie(\"cannot get sentinels info: %v\", err)\n\t}\n\n\tlsid, err := e.GetLeaderSentinelId()\n\tif err != nil {\n\t\tdie(\"cannot get leader sentinel info\")\n\t}\n\n\tstdout(\"=== Active sentinels ===\")\n\tstdout(\"\")\n\tif len(sentinelsInfo) == 0 {\n\t\tstdout(\"No active sentinels\")\n\t} else {\n\t\tsort.Sort(sentinelsInfo)\n\t\tfmt.Fprintf(tabOut, \"ID\\tLEADER\\n\")\n\t\tfor _, si := range sentinelsInfo {\n\t\t\tleader := false\n\t\t\tif lsid != \"\" {\n\t\t\t\tif si.UID == lsid {\n\t\t\t\t\tleader = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Fprintf(tabOut, \"%s\\t%t\\n\", si.UID, leader)\n\t\t\ttabOut.Flush()\n\t\t}\n\t}\n\n\tproxiesInfo, err := e.GetProxiesInfo()\n\tif err != nil {\n\t\tdie(\"cannot get proxies info: %v\", err)\n\t}\n\n\tstdout(\"\")\n\tstdout(\"=== Active proxies ===\")\n\tstdout(\"\")\n\tif len(proxiesInfo) == 0 {\n\t\tstdout(\"No active proxies\")\n\t} else {\n\t\tsort.Sort(proxiesInfo)\n\t\tfmt.Fprintf(tabOut, \"ID\\n\")\n\t\tfor _, pi := range proxiesInfo {\n\t\t\tfmt.Fprintf(tabOut, \"%s\\n\", pi.UID)\n\t\t\ttabOut.Flush()\n\t\t}\n\t}\n\n\tcd, _, err := getClusterData(e)\n\tif err != nil {\n\t\tdie(\"%v\", err)\n\t}\n\n\tstdout(\"\")\n\tstdout(\"=== Keepers ===\")\n\tstdout(\"\")\n\tif cd.Keepers == nil {\n\t\tstdout(\"No keepers available\")\n\t\tstdout(\"\")\n\t} else {\n\t\tkssKeys := cd.Keepers.SortedKeys()\n\t\tfmt.Fprintf(tabOut, \"UID\\tHEALTHY\\tPG LISTENADDRESS\\tPG HEALTHY\\tPG WANTEDGENERATION\\tPG CURRENTGENERATION\\n\")\n\t\tfor _, kuid := range kssKeys {\n\t\t\tk := cd.Keepers[kuid]\n\t\t\tdb := cd.FindDB(k)\n\t\t\tif db != nil {\n\t\t\t\tdbListenAddress := \"(unknown)\"\n\t\t\t\tif db.Status.ListenAddress != \"\" {\n\t\t\t\t\tdbListenAddress = fmt.Sprintf(\"%s:%s\", db.Status.ListenAddress, db.Status.Port)\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(tabOut, \"%s\\t%t\\t%s\\t%t\\t%d\\t%d\\t\\n\", k.UID, k.Status.Healthy, dbListenAddress, db.Status.Healthy, db.Generation, db.Status.CurrentGeneration)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(tabOut, \"%s\\t%t\\t(no db assigned)\\t\\t\\t\\t\\n\", k.UID, k.Status.Healthy)\n\t\t\t}\n\t\t}\n\t}\n\ttabOut.Flush()\n\n\tif cd.Cluster == nil || cd.DBs == nil {\n\t\tstdout(\"No cluster available\")\n\t\treturn\n\t}\n\n\tmaster := cd.Cluster.Status.Master\n\tstdout(\"\")\n\tstdout(\"=== Cluster Info ===\")\n\tstdout(\"\")\n\tif master != \"\" {\n\t\tstdout(\"Master: %s\", cd.Keepers[cd.DBs[master].Spec.KeeperUID].UID)\n\t} else {\n\t\tstdout(\"Master Keeper: (none)\")\n\t}\n\n\tif master != \"\" {\n\t\tstdout(\"\")\n\t\tstdout(\"===== Keepers\/DB tree =====\")\n\t\tmasterDB := cd.DBs[master]\n\t\tstdout(\"\")\n\t\tprintTree(masterDB.UID, cd, 0, \"\", true)\n\t}\n\n\tstdout(\"\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t_ \"embed\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/influxdata\/telegraf\"\n\t\"github.com\/influxdata\/telegraf\/plugins\/aggregators\"\n\t\"github.com\/influxdata\/telegraf\/plugins\/inputs\"\n\t\"github.com\/influxdata\/telegraf\/plugins\/outputs\"\n\t\"github.com\/influxdata\/telegraf\/plugins\/processors\"\n)\n\nvar (\n\t\/\/ Default sections\n\tsectionDefaults = []string{\"global_tags\", \"agent\", \"outputs\",\n\t\t\"processors\", \"aggregators\", \"inputs\"}\n\n\t\/\/ Default input plugins\n\tinputDefaults = []string{\"cpu\", \"mem\", \"swap\", \"system\", \"kernel\",\n\t\t\"processes\", \"disk\", \"diskio\"}\n\n\t\/\/ Default output plugins\n\toutputDefaults = []string{\"influxdb\"}\n)\n\nvar header = `# Telegraf Configuration\n#\n# Telegraf is entirely plugin driven. All metrics are gathered from the\n# declared inputs, and sent to the declared outputs.\n#\n# Plugins must be declared in here to be active.\n# To deactivate a plugin, comment out the name and any variables.\n#\n# Use 'telegraf -config telegraf.conf -test' to see what metrics a config\n# file would generate.\n#\n# Environment variables can be used anywhere in this config file, simply surround\n# them with ${}. For strings the variable must be within quotes (ie, \"${STR_VAR}\"),\n# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})\n\n`\nvar globalTagsConfig = `\n# Global tags can be specified here in key=\"value\" format.\n[global_tags]\n # dc = \"us-east-1\" # will tag all metrics with dc=us-east-1\n # rack = \"1a\"\n ## Environment variables can be used as tags, and throughout the config file\n # user = \"$USER\"\n\n`\n\n\/\/ DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the agentConfig data.\n\/\/\n\/\/go:embed agent.conf\nvar agentConfig string\n\nvar outputHeader = `\n###############################################################################\n# OUTPUT PLUGINS #\n###############################################################################\n\n`\n\nvar processorHeader = `\n###############################################################################\n# PROCESSOR PLUGINS #\n###############################################################################\n\n`\n\nvar aggregatorHeader = `\n###############################################################################\n# AGGREGATOR PLUGINS #\n###############################################################################\n\n`\n\nvar inputHeader = `\n###############################################################################\n# INPUT PLUGINS #\n###############################################################################\n\n`\n\nvar serviceInputHeader = `\n###############################################################################\n# SERVICE INPUT PLUGINS #\n###############################################################################\n\n`\n\nfunc sliceContains(name string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ printSampleConfig prints the sample config\nfunc printSampleConfig(\n\toutputBuffer io.Writer,\n\tsectionFilters []string,\n\tinputFilters []string,\n\toutputFilters []string,\n\taggregatorFilters []string,\n\tprocessorFilters []string,\n) {\n\t\/\/ print headers\n\toutputBuffer.Write([]byte(header))\n\n\tif len(sectionFilters) == 0 {\n\t\tsectionFilters = sectionDefaults\n\t}\n\tprintFilteredGlobalSections(sectionFilters, outputBuffer)\n\n\t\/\/ print output plugins\n\tif sliceContains(\"outputs\", sectionFilters) {\n\t\tif len(outputFilters) != 0 {\n\t\t\tif len(outputFilters) >= 3 && outputFilters[1] != \"none\" {\n\t\t\t\toutputBuffer.Write([]byte(outputHeader))\n\t\t\t}\n\t\t\tprintFilteredOutputs(outputFilters, false, outputBuffer)\n\t\t} else {\n\t\t\toutputBuffer.Write([]byte(outputHeader))\n\t\t\tprintFilteredOutputs(outputDefaults, false, outputBuffer)\n\t\t\t\/\/ Print non-default outputs, commented\n\t\t\tvar pnames []string\n\t\t\tfor pname := range outputs.Outputs {\n\t\t\t\tif !sliceContains(pname, outputDefaults) {\n\t\t\t\t\tpnames = append(pnames, pname)\n\t\t\t\t}\n\t\t\t}\n\t\t\tprintFilteredOutputs(pnames, true, outputBuffer)\n\t\t}\n\t}\n\n\t\/\/ print processor plugins\n\tif sliceContains(\"processors\", sectionFilters) {\n\t\tif len(processorFilters) != 0 {\n\t\t\tif len(processorFilters) >= 3 && processorFilters[1] != \"none\" {\n\t\t\t\toutputBuffer.Write([]byte(processorHeader))\n\t\t\t}\n\t\t\tprintFilteredProcessors(processorFilters, false, outputBuffer)\n\t\t} else {\n\t\t\toutputBuffer.Write([]byte(processorHeader))\n\t\t\tpnames := []string{}\n\t\t\tfor pname := range processors.Processors {\n\t\t\t\tpnames = append(pnames, pname)\n\t\t\t}\n\t\t\tprintFilteredProcessors(pnames, true, outputBuffer)\n\t\t}\n\t}\n\n\t\/\/ print aggregator plugins\n\tif sliceContains(\"aggregators\", sectionFilters) {\n\t\tif len(aggregatorFilters) != 0 {\n\t\t\tif len(aggregatorFilters) >= 3 && aggregatorFilters[1] != \"none\" {\n\t\t\t\toutputBuffer.Write([]byte(aggregatorHeader))\n\t\t\t}\n\t\t\tprintFilteredAggregators(aggregatorFilters, false, outputBuffer)\n\t\t} else {\n\t\t\toutputBuffer.Write([]byte(aggregatorHeader))\n\t\t\tpnames := []string{}\n\t\t\tfor pname := range aggregators.Aggregators {\n\t\t\t\tpnames = append(pnames, pname)\n\t\t\t}\n\t\t\tprintFilteredAggregators(pnames, true, outputBuffer)\n\t\t}\n\t}\n\n\t\/\/ print input plugins\n\tif sliceContains(\"inputs\", sectionFilters) {\n\t\tif len(inputFilters) != 0 {\n\t\t\tif len(inputFilters) >= 3 && inputFilters[1] != \"none\" {\n\t\t\t\toutputBuffer.Write([]byte(inputHeader))\n\t\t\t}\n\t\t\tprintFilteredInputs(inputFilters, false, outputBuffer)\n\t\t} else {\n\t\t\toutputBuffer.Write([]byte(inputHeader))\n\t\t\tprintFilteredInputs(inputDefaults, false, outputBuffer)\n\t\t\t\/\/ Print non-default inputs, commented\n\t\t\tvar pnames []string\n\t\t\tfor pname := range inputs.Inputs {\n\t\t\t\tif !sliceContains(pname, inputDefaults) {\n\t\t\t\t\tpnames = append(pnames, pname)\n\t\t\t\t}\n\t\t\t}\n\t\t\tprintFilteredInputs(pnames, true, outputBuffer)\n\t\t}\n\t}\n}\n\nfunc printFilteredProcessors(processorFilters []string, commented bool, outputBuffer io.Writer) {\n\t\/\/ Filter processors\n\tvar pnames []string\n\tfor pname := range processors.Processors {\n\t\tif sliceContains(pname, processorFilters) {\n\t\t\tpnames = append(pnames, pname)\n\t\t}\n\t}\n\tsort.Strings(pnames)\n\n\t\/\/ Print Outputs\n\tfor _, pname := range pnames {\n\t\tcreator := processors.Processors[pname]\n\t\toutput := creator()\n\t\tprintConfig(pname, output, \"processors\", commented, processors.Deprecations[pname], outputBuffer)\n\t}\n}\n\nfunc printFilteredAggregators(aggregatorFilters []string, commented bool, outputBuffer io.Writer) {\n\t\/\/ Filter outputs\n\tvar anames []string\n\tfor aname := range aggregators.Aggregators {\n\t\tif sliceContains(aname, aggregatorFilters) {\n\t\t\tanames = append(anames, aname)\n\t\t}\n\t}\n\tsort.Strings(anames)\n\n\t\/\/ Print Outputs\n\tfor _, aname := range anames {\n\t\tcreator := aggregators.Aggregators[aname]\n\t\toutput := creator()\n\t\tprintConfig(aname, output, \"aggregators\", commented, aggregators.Deprecations[aname], outputBuffer)\n\t}\n}\n\nfunc printFilteredInputs(inputFilters []string, commented bool, outputBuffer io.Writer) {\n\t\/\/ Filter inputs\n\tvar pnames []string\n\tfor pname := range inputs.Inputs {\n\t\tif sliceContains(pname, inputFilters) {\n\t\t\tpnames = append(pnames, pname)\n\t\t}\n\t}\n\tsort.Strings(pnames)\n\n\t\/\/ cache service inputs to print them at the end\n\tservInputs := make(map[string]telegraf.ServiceInput)\n\t\/\/ for alphabetical looping:\n\tservInputNames := []string{}\n\n\t\/\/ Print Inputs\n\tfor _, pname := range pnames {\n\t\t\/\/ Skip inputs that are registered twice for backward compatibility\n\t\tswitch pname {\n\t\tcase \"cisco_telemetry_gnmi\", \"io\", \"KNXListener\":\n\t\t\tcontinue\n\t\t}\n\t\tcreator := inputs.Inputs[pname]\n\t\tinput := creator()\n\n\t\tif p, ok := input.(telegraf.ServiceInput); ok {\n\t\t\tservInputs[pname] = p\n\t\t\tservInputNames = append(servInputNames, pname)\n\t\t\tcontinue\n\t\t}\n\n\t\tprintConfig(pname, input, \"inputs\", commented, inputs.Deprecations[pname], outputBuffer)\n\t}\n\n\t\/\/ Print Service Inputs\n\tif len(servInputs) == 0 {\n\t\treturn\n\t}\n\tsort.Strings(servInputNames)\n\n\toutputBuffer.Write([]byte(serviceInputHeader))\n\tfor _, name := range servInputNames {\n\t\tprintConfig(name, servInputs[name], \"inputs\", commented, inputs.Deprecations[name], outputBuffer)\n\t}\n}\n\nfunc printFilteredOutputs(outputFilters []string, commented bool, outputBuffer io.Writer) {\n\t\/\/ Filter outputs\n\tvar onames []string\n\tfor oname := range outputs.Outputs {\n\t\tif sliceContains(oname, outputFilters) {\n\t\t\tonames = append(onames, oname)\n\t\t}\n\t}\n\tsort.Strings(onames)\n\n\t\/\/ Print Outputs\n\tfor _, oname := range onames {\n\t\tcreator := outputs.Outputs[oname]\n\t\toutput := creator()\n\t\tprintConfig(oname, output, \"outputs\", commented, outputs.Deprecations[oname], outputBuffer)\n\t}\n}\n\nfunc printFilteredGlobalSections(sectionFilters []string, outputBuffer io.Writer) {\n\tif sliceContains(\"global_tags\", sectionFilters) {\n\t\toutputBuffer.Write([]byte(globalTagsConfig))\n\t}\n\n\tif sliceContains(\"agent\", sectionFilters) {\n\t\toutputBuffer.Write([]byte(agentConfig))\n\t}\n}\n\nfunc printConfig(name string, p telegraf.PluginDescriber, op string, commented bool, di telegraf.DeprecationInfo, outputBuffer io.Writer) {\n\tcomment := \"\"\n\tif commented {\n\t\tcomment = \"# \"\n\t}\n\n\tif di.Since != \"\" {\n\t\tremovalNote := \"\"\n\t\tif di.RemovalIn != \"\" {\n\t\t\tremovalNote = \" and will be removed in \" + di.RemovalIn\n\t\t}\n\t\toutputBuffer.Write([]byte(fmt.Sprintf(\"\\n%s ## DEPRECATED: The '%s' plugin is deprecated in version %s%s, %s.\", comment, name, di.Since, removalNote, di.Notice)))\n\t}\n\n\tsample := p.SampleConfig()\n\tif sample == \"\" {\n\t\toutputBuffer.Write([]byte(fmt.Sprintf(\"\\n#[[%s.%s]]\", op, name)))\n\t\toutputBuffer.Write([]byte(fmt.Sprintf(\"\\n%s # no configuration\\n\\n\", comment)))\n\t} else {\n\t\tlines := strings.Split(sample, \"\\n\")\n\t\toutputBuffer.Write([]byte(\"\\n\"))\n\t\tfor i, line := range lines {\n\t\t\tif i == len(lines)-1 {\n\t\t\t\toutputBuffer.Write([]byte(\"\\n\"))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\toutputBuffer.Write([]byte(strings.TrimRight(comment+line, \" \") + \"\\n\"))\n\t\t}\n\t}\n}\n\n\/\/ PrintInputConfig prints the config usage of a single input.\nfunc PrintInputConfig(name string, outputBuffer io.Writer) error {\n\tcreator, ok := inputs.Inputs[name]\n\tif !ok {\n\t\treturn fmt.Errorf(\"input %s not found\", name)\n\t}\n\n\tprintConfig(name, creator(), \"inputs\", false, inputs.Deprecations[name], outputBuffer)\n\treturn nil\n}\n\n\/\/ PrintOutputConfig prints the config usage of a single output.\nfunc PrintOutputConfig(name string, outputBuffer io.Writer) error {\n\tcreator, ok := outputs.Outputs[name]\n\tif !ok {\n\t\treturn fmt.Errorf(\"output %s not found\", name)\n\t}\n\n\tprintConfig(name, creator(), \"outputs\", false, outputs.Deprecations[name], outputBuffer)\n\treturn nil\n}\n<commit_msg>feat!: Disable [[outputs.influxdb]] in sample config (#12158)<commit_after>package main\n\nimport (\n\t_ \"embed\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/influxdata\/telegraf\"\n\t\"github.com\/influxdata\/telegraf\/plugins\/aggregators\"\n\t\"github.com\/influxdata\/telegraf\/plugins\/inputs\"\n\t\"github.com\/influxdata\/telegraf\/plugins\/outputs\"\n\t\"github.com\/influxdata\/telegraf\/plugins\/processors\"\n)\n\nvar (\n\t\/\/ Default sections\n\tsectionDefaults = []string{\"global_tags\", \"agent\", \"outputs\",\n\t\t\"processors\", \"aggregators\", \"inputs\"}\n\n\t\/\/ Default input plugins\n\tinputDefaults = []string{\"cpu\", \"mem\", \"swap\", \"system\", \"kernel\",\n\t\t\"processes\", \"disk\", \"diskio\"}\n\n\t\/\/ Default output plugins\n\toutputDefaults = []string{}\n)\n\nvar header = `# Telegraf Configuration\n#\n# Telegraf is entirely plugin driven. All metrics are gathered from the\n# declared inputs, and sent to the declared outputs.\n#\n# Plugins must be declared in here to be active.\n# To deactivate a plugin, comment out the name and any variables.\n#\n# Use 'telegraf -config telegraf.conf -test' to see what metrics a config\n# file would generate.\n#\n# Environment variables can be used anywhere in this config file, simply surround\n# them with ${}. For strings the variable must be within quotes (ie, \"${STR_VAR}\"),\n# for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR})\n\n`\nvar globalTagsConfig = `\n# Global tags can be specified here in key=\"value\" format.\n[global_tags]\n # dc = \"us-east-1\" # will tag all metrics with dc=us-east-1\n # rack = \"1a\"\n ## Environment variables can be used as tags, and throughout the config file\n # user = \"$USER\"\n\n`\n\n\/\/ DO NOT REMOVE THE NEXT TWO LINES! This is required to embed the agentConfig data.\n\/\/\n\/\/go:embed agent.conf\nvar agentConfig string\n\nvar outputHeader = `\n###############################################################################\n# OUTPUT PLUGINS #\n###############################################################################\n\n`\n\nvar processorHeader = `\n###############################################################################\n# PROCESSOR PLUGINS #\n###############################################################################\n\n`\n\nvar aggregatorHeader = `\n###############################################################################\n# AGGREGATOR PLUGINS #\n###############################################################################\n\n`\n\nvar inputHeader = `\n###############################################################################\n# INPUT PLUGINS #\n###############################################################################\n\n`\n\nvar serviceInputHeader = `\n###############################################################################\n# SERVICE INPUT PLUGINS #\n###############################################################################\n\n`\n\nfunc sliceContains(name string, list []string) bool {\n\tfor _, b := range list {\n\t\tif b == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ printSampleConfig prints the sample config\nfunc printSampleConfig(\n\toutputBuffer io.Writer,\n\tsectionFilters []string,\n\tinputFilters []string,\n\toutputFilters []string,\n\taggregatorFilters []string,\n\tprocessorFilters []string,\n) {\n\t\/\/ print headers\n\toutputBuffer.Write([]byte(header))\n\n\tif len(sectionFilters) == 0 {\n\t\tsectionFilters = sectionDefaults\n\t}\n\tprintFilteredGlobalSections(sectionFilters, outputBuffer)\n\n\t\/\/ print output plugins\n\tif sliceContains(\"outputs\", sectionFilters) {\n\t\tif len(outputFilters) != 0 {\n\t\t\tif len(outputFilters) >= 3 && outputFilters[1] != \"none\" {\n\t\t\t\toutputBuffer.Write([]byte(outputHeader))\n\t\t\t}\n\t\t\tprintFilteredOutputs(outputFilters, false, outputBuffer)\n\t\t} else {\n\t\t\toutputBuffer.Write([]byte(outputHeader))\n\t\t\tprintFilteredOutputs(outputDefaults, false, outputBuffer)\n\t\t\t\/\/ Print non-default outputs, commented\n\t\t\tvar pnames []string\n\t\t\tfor pname := range outputs.Outputs {\n\t\t\t\tif !sliceContains(pname, outputDefaults) {\n\t\t\t\t\tpnames = append(pnames, pname)\n\t\t\t\t}\n\t\t\t}\n\t\t\tprintFilteredOutputs(pnames, true, outputBuffer)\n\t\t}\n\t}\n\n\t\/\/ print processor plugins\n\tif sliceContains(\"processors\", sectionFilters) {\n\t\tif len(processorFilters) != 0 {\n\t\t\tif len(processorFilters) >= 3 && processorFilters[1] != \"none\" {\n\t\t\t\toutputBuffer.Write([]byte(processorHeader))\n\t\t\t}\n\t\t\tprintFilteredProcessors(processorFilters, false, outputBuffer)\n\t\t} else {\n\t\t\toutputBuffer.Write([]byte(processorHeader))\n\t\t\tpnames := []string{}\n\t\t\tfor pname := range processors.Processors {\n\t\t\t\tpnames = append(pnames, pname)\n\t\t\t}\n\t\t\tprintFilteredProcessors(pnames, true, outputBuffer)\n\t\t}\n\t}\n\n\t\/\/ print aggregator plugins\n\tif sliceContains(\"aggregators\", sectionFilters) {\n\t\tif len(aggregatorFilters) != 0 {\n\t\t\tif len(aggregatorFilters) >= 3 && aggregatorFilters[1] != \"none\" {\n\t\t\t\toutputBuffer.Write([]byte(aggregatorHeader))\n\t\t\t}\n\t\t\tprintFilteredAggregators(aggregatorFilters, false, outputBuffer)\n\t\t} else {\n\t\t\toutputBuffer.Write([]byte(aggregatorHeader))\n\t\t\tpnames := []string{}\n\t\t\tfor pname := range aggregators.Aggregators {\n\t\t\t\tpnames = append(pnames, pname)\n\t\t\t}\n\t\t\tprintFilteredAggregators(pnames, true, outputBuffer)\n\t\t}\n\t}\n\n\t\/\/ print input plugins\n\tif sliceContains(\"inputs\", sectionFilters) {\n\t\tif len(inputFilters) != 0 {\n\t\t\tif len(inputFilters) >= 3 && inputFilters[1] != \"none\" {\n\t\t\t\toutputBuffer.Write([]byte(inputHeader))\n\t\t\t}\n\t\t\tprintFilteredInputs(inputFilters, false, outputBuffer)\n\t\t} else {\n\t\t\toutputBuffer.Write([]byte(inputHeader))\n\t\t\tprintFilteredInputs(inputDefaults, false, outputBuffer)\n\t\t\t\/\/ Print non-default inputs, commented\n\t\t\tvar pnames []string\n\t\t\tfor pname := range inputs.Inputs {\n\t\t\t\tif !sliceContains(pname, inputDefaults) {\n\t\t\t\t\tpnames = append(pnames, pname)\n\t\t\t\t}\n\t\t\t}\n\t\t\tprintFilteredInputs(pnames, true, outputBuffer)\n\t\t}\n\t}\n}\n\nfunc printFilteredProcessors(processorFilters []string, commented bool, outputBuffer io.Writer) {\n\t\/\/ Filter processors\n\tvar pnames []string\n\tfor pname := range processors.Processors {\n\t\tif sliceContains(pname, processorFilters) {\n\t\t\tpnames = append(pnames, pname)\n\t\t}\n\t}\n\tsort.Strings(pnames)\n\n\t\/\/ Print Outputs\n\tfor _, pname := range pnames {\n\t\tcreator := processors.Processors[pname]\n\t\toutput := creator()\n\t\tprintConfig(pname, output, \"processors\", commented, processors.Deprecations[pname], outputBuffer)\n\t}\n}\n\nfunc printFilteredAggregators(aggregatorFilters []string, commented bool, outputBuffer io.Writer) {\n\t\/\/ Filter outputs\n\tvar anames []string\n\tfor aname := range aggregators.Aggregators {\n\t\tif sliceContains(aname, aggregatorFilters) {\n\t\t\tanames = append(anames, aname)\n\t\t}\n\t}\n\tsort.Strings(anames)\n\n\t\/\/ Print Outputs\n\tfor _, aname := range anames {\n\t\tcreator := aggregators.Aggregators[aname]\n\t\toutput := creator()\n\t\tprintConfig(aname, output, \"aggregators\", commented, aggregators.Deprecations[aname], outputBuffer)\n\t}\n}\n\nfunc printFilteredInputs(inputFilters []string, commented bool, outputBuffer io.Writer) {\n\t\/\/ Filter inputs\n\tvar pnames []string\n\tfor pname := range inputs.Inputs {\n\t\tif sliceContains(pname, inputFilters) {\n\t\t\tpnames = append(pnames, pname)\n\t\t}\n\t}\n\tsort.Strings(pnames)\n\n\t\/\/ cache service inputs to print them at the end\n\tservInputs := make(map[string]telegraf.ServiceInput)\n\t\/\/ for alphabetical looping:\n\tservInputNames := []string{}\n\n\t\/\/ Print Inputs\n\tfor _, pname := range pnames {\n\t\t\/\/ Skip inputs that are registered twice for backward compatibility\n\t\tswitch pname {\n\t\tcase \"cisco_telemetry_gnmi\", \"io\", \"KNXListener\":\n\t\t\tcontinue\n\t\t}\n\t\tcreator := inputs.Inputs[pname]\n\t\tinput := creator()\n\n\t\tif p, ok := input.(telegraf.ServiceInput); ok {\n\t\t\tservInputs[pname] = p\n\t\t\tservInputNames = append(servInputNames, pname)\n\t\t\tcontinue\n\t\t}\n\n\t\tprintConfig(pname, input, \"inputs\", commented, inputs.Deprecations[pname], outputBuffer)\n\t}\n\n\t\/\/ Print Service Inputs\n\tif len(servInputs) == 0 {\n\t\treturn\n\t}\n\tsort.Strings(servInputNames)\n\n\toutputBuffer.Write([]byte(serviceInputHeader))\n\tfor _, name := range servInputNames {\n\t\tprintConfig(name, servInputs[name], \"inputs\", commented, inputs.Deprecations[name], outputBuffer)\n\t}\n}\n\nfunc printFilteredOutputs(outputFilters []string, commented bool, outputBuffer io.Writer) {\n\t\/\/ Filter outputs\n\tvar onames []string\n\tfor oname := range outputs.Outputs {\n\t\tif sliceContains(oname, outputFilters) {\n\t\t\tonames = append(onames, oname)\n\t\t}\n\t}\n\tsort.Strings(onames)\n\n\t\/\/ Print Outputs\n\tfor _, oname := range onames {\n\t\tcreator := outputs.Outputs[oname]\n\t\toutput := creator()\n\t\tprintConfig(oname, output, \"outputs\", commented, outputs.Deprecations[oname], outputBuffer)\n\t}\n}\n\nfunc printFilteredGlobalSections(sectionFilters []string, outputBuffer io.Writer) {\n\tif sliceContains(\"global_tags\", sectionFilters) {\n\t\toutputBuffer.Write([]byte(globalTagsConfig))\n\t}\n\n\tif sliceContains(\"agent\", sectionFilters) {\n\t\toutputBuffer.Write([]byte(agentConfig))\n\t}\n}\n\nfunc printConfig(name string, p telegraf.PluginDescriber, op string, commented bool, di telegraf.DeprecationInfo, outputBuffer io.Writer) {\n\tcomment := \"\"\n\tif commented {\n\t\tcomment = \"# \"\n\t}\n\n\tif di.Since != \"\" {\n\t\tremovalNote := \"\"\n\t\tif di.RemovalIn != \"\" {\n\t\t\tremovalNote = \" and will be removed in \" + di.RemovalIn\n\t\t}\n\t\toutputBuffer.Write([]byte(fmt.Sprintf(\"\\n%s ## DEPRECATED: The '%s' plugin is deprecated in version %s%s, %s.\", comment, name, di.Since, removalNote, di.Notice)))\n\t}\n\n\tsample := p.SampleConfig()\n\tif sample == \"\" {\n\t\toutputBuffer.Write([]byte(fmt.Sprintf(\"\\n#[[%s.%s]]\", op, name)))\n\t\toutputBuffer.Write([]byte(fmt.Sprintf(\"\\n%s # no configuration\\n\\n\", comment)))\n\t} else {\n\t\tlines := strings.Split(sample, \"\\n\")\n\t\toutputBuffer.Write([]byte(\"\\n\"))\n\t\tfor i, line := range lines {\n\t\t\tif i == len(lines)-1 {\n\t\t\t\toutputBuffer.Write([]byte(\"\\n\"))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\toutputBuffer.Write([]byte(strings.TrimRight(comment+line, \" \") + \"\\n\"))\n\t\t}\n\t}\n}\n\n\/\/ PrintInputConfig prints the config usage of a single input.\nfunc PrintInputConfig(name string, outputBuffer io.Writer) error {\n\tcreator, ok := inputs.Inputs[name]\n\tif !ok {\n\t\treturn fmt.Errorf(\"input %s not found\", name)\n\t}\n\n\tprintConfig(name, creator(), \"inputs\", false, inputs.Deprecations[name], outputBuffer)\n\treturn nil\n}\n\n\/\/ PrintOutputConfig prints the config usage of a single output.\nfunc PrintOutputConfig(name string, outputBuffer io.Writer) error {\n\tcreator, ok := outputs.Outputs[name]\n\tif !ok {\n\t\treturn fmt.Errorf(\"output %s not found\", name)\n\t}\n\n\tprintConfig(name, creator(), \"outputs\", false, outputs.Deprecations[name], outputBuffer)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/dlespiau\/x86db\"\n)\n\nfunc nasmOpcodeToPlan9(op string) string {\n\t\/*\n\t * SSE\n\t *\/\n\tswitch op {\n\t\/\/ The condition is encoded as an imm8 operand of CMPPS\n\tcase \"CMPEQPS\", \"CMPLTPS\", \"CMPLEPS\", \"CMPUNORDPS\", \"CMPNEQPS\", \"CMPNLTPS\",\n\t\t\"CMPNLEPS\", \"CMPORDPS\":\n\t\treturn \"CMPPS\"\n\t\/\/ The condition is encoded as an imm8 operand of CMPSS.\n\tcase \"CMPEQSS\", \"CMPLTSS\", \"CMPLESS\", \"CMPUNORDSS\", \"CMPNEQSS\", \"CMPNLTSS\",\n\t\t\"CMPNLESS\", \"CMPORDSS\":\n\t\treturn \"CMPSS\"\n\tcase \"CVTSI2SS\":\n\t\treturn \"CVTSL2SS\"\n\tcase \"CVTSS2SI\":\n\t\treturn \"CVTSS2SL\"\n\tcase \"CVTTSS2SI\":\n\t\treturn \"CVTTSS2SL\"\n\t}\n\n\t\/*\n\t * PCLMULQDQ\n\t *\/\n\tswitch op {\n\tcase \"PCLMULLQLQDQ\", \"PCLMULHQLQDQ\", \"PCLMULLQHQDQ\", \"PCLMULHQHQDQ\":\n\t\treturn \"PCLMULQDQ\"\n\t}\n\n\t\/*\n\t * SSE2\n\t *\/\n\tswitch op {\n\t\/\/ The condition is encoded as an imm8 operand of CMPPD\n\tcase \"CMPEQPD\", \"CMPLTPD\", \"CMPLEPD\", \"CMPUNORDPD\", \"CMPNEQPD\", \"CMPNLTPD\",\n\t\t\"CMPNLEPD\", \"CMPORDPD\":\n\t\treturn \"CMPPD\"\n\t\/\/ The condition is encoded as an imm8 operand of CMPSD.\n\tcase \"CMPEQSD\", \"CMPLTSD\", \"CMPLESD\", \"CMPUNORDSD\", \"CMPNEQSD\", \"CMPNLTSD\",\n\t\t\"CMPNLESD\", \"CMPORDSD\":\n\t\treturn \"CMPSD\"\n\t\/\/ D (double word) has been replaced by L (Long)\n\tcase \"PSUBD\":\n\t\treturn \"PSUBL\"\n\t}\n\n\treturn op\n}\n\nfunc isAlreadyKnown(name string) bool {\n\tname = nasmOpcodeToPlan9(name)\n\tfor _, opcode := range Anames {\n\t\tif name == opcode {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc isAlreadyTested(name string) bool {\n\tname = nasmOpcodeToPlan9(name)\n\t_, ok := testedMap[name]\n\treturn ok\n}\n\nfunc doHelp(insns x86db.InstructionSlice) {\n\tusage()\n}\n\nfunc doList(insns x86db.InstructionSlice) {\n\tw := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)\n\tfor _, insn := range insns {\n\t\tfmt.Fprintf(w, \"%s\\t%s\\t%s\\t%s\\n\", insn.Name,\n\t\t\tstrings.Join(insn.Operands, \",\"), insn.Pattern, insn.Flags)\n\t}\n\tw.Flush()\n}\n\nvar (\n\tfilterFlags = flag.NewFlagSet(\"filter\", flag.ExitOnError)\n\textension = filterFlags.String(\"extension\", \"\",\n\t\t\"select instructions by extension\")\n\tknown = filterFlags.Bool(\"known\", false,\n\t\t\"select instructions already known by the go assembler\")\n\tnotKnown = filterFlags.Bool(\"not-known\", false,\n\t\t\"select instructions not already known by the go assembler\")\n\ttested = filterFlags.Bool(\"tested\", false,\n\t\t\"select instructions with test cases in the go assembler\")\n\tnotTested = filterFlags.Bool(\"not-tested\", false,\n\t\t\"select instructions with no test case in the go assembler\")\n)\n\ntype command struct {\n\tname string\n\thelp string\n\thandler func(x86db.InstructionSlice)\n}\n\nvar commands = []command{\n\t{\"help\", \"print this help\", nil},\n\t{\"list\", \"list x86 instructions\", doList},\n}\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \"Usage:\\n\\n\")\n\tfmt.Fprintf(os.Stderr, \" x86db-gogen command [options]\\n\\n\")\n\tfmt.Fprintf(os.Stderr, \"List of commands:\\n\\n\")\n\tw := tabwriter.NewWriter(os.Stdout, 0, 0, 4, ' ', 0)\n\tfor _, cmd := range commands {\n\t\tfmt.Fprintf(w, \" %s\\t%s\\n\", cmd.name, cmd.help)\n\t}\n\tw.Flush()\n\tfmt.Fprintf(os.Stderr, \"\\n\")\n\tfmt.Fprintf(os.Stderr, \"Filtering options:\\n\\n\")\n\tfilterFlags.PrintDefaults()\n}\n\nfunc main() {\n\tdb := x86db.NewDB()\n\n\tif err := filterFlags.Parse(os.Args[2:]); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\terr := db.Open()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer db.Close()\n\n\tinsns := db.Instructions\n\n\tif *extension != \"\" {\n\t\text, err := x86db.ExtensionFromString(*extension)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tinsns = insns.Where(func(insn x86db.Instruction) bool {\n\t\t\treturn insn.Extension == ext\n\t\t})\n\t}\n\n\tif *known || *notKnown {\n\t\tinsns = insns.Where(func(insn x86db.Instruction) bool {\n\t\t\tk := isAlreadyKnown(insn.Name)\n\t\t\tif *notKnown {\n\t\t\t\treturn !k\n\t\t\t}\n\t\t\treturn k\n\t\t})\n\t}\n\n\tif *tested || *notTested {\n\t\tinsns = insns.Where(func(insn x86db.Instruction) bool {\n\t\t\tt := isAlreadyTested(insn.Name)\n\t\t\tif *notTested {\n\t\t\t\treturn !t\n\t\t\t}\n\t\t\treturn t\n\t\t})\n\t}\n\n\tcmdName := os.Args[1]\n\thandled := false\n\tfor _, cmd := range commands {\n\t\tif cmd.name != cmdName {\n\t\t\tcontinue\n\t\t}\n\t\tif cmd.handler == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\thandled = true\n\t\tcmd.handler(insns)\n\t}\n\n\tif !handled {\n\t\tfmt.Fprintf(os.Stderr, \"unknown command '%s'\\n\\n\", cmdName)\n\t\tusage()\n\t\tos.Exit(1)\n\t}\n}\n<commit_msg>gogen: Allow filtering out MMX instructions<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/dlespiau\/x86db\"\n)\n\nfunc nasmOpcodeToPlan9(op string) string {\n\t\/*\n\t * SSE\n\t *\/\n\tswitch op {\n\t\/\/ The condition is encoded as an imm8 operand of CMPPS\n\tcase \"CMPEQPS\", \"CMPLTPS\", \"CMPLEPS\", \"CMPUNORDPS\", \"CMPNEQPS\", \"CMPNLTPS\",\n\t\t\"CMPNLEPS\", \"CMPORDPS\":\n\t\treturn \"CMPPS\"\n\t\/\/ The condition is encoded as an imm8 operand of CMPSS.\n\tcase \"CMPEQSS\", \"CMPLTSS\", \"CMPLESS\", \"CMPUNORDSS\", \"CMPNEQSS\", \"CMPNLTSS\",\n\t\t\"CMPNLESS\", \"CMPORDSS\":\n\t\treturn \"CMPSS\"\n\tcase \"CVTSI2SS\":\n\t\treturn \"CVTSL2SS\"\n\tcase \"CVTSS2SI\":\n\t\treturn \"CVTSS2SL\"\n\tcase \"CVTTSS2SI\":\n\t\treturn \"CVTTSS2SL\"\n\t}\n\n\t\/*\n\t * PCLMULQDQ\n\t *\/\n\tswitch op {\n\tcase \"PCLMULLQLQDQ\", \"PCLMULHQLQDQ\", \"PCLMULLQHQDQ\", \"PCLMULHQHQDQ\":\n\t\treturn \"PCLMULQDQ\"\n\t}\n\n\t\/*\n\t * SSE2\n\t *\/\n\tswitch op {\n\t\/\/ The condition is encoded as an imm8 operand of CMPPD\n\tcase \"CMPEQPD\", \"CMPLTPD\", \"CMPLEPD\", \"CMPUNORDPD\", \"CMPNEQPD\", \"CMPNLTPD\",\n\t\t\"CMPNLEPD\", \"CMPORDPD\":\n\t\treturn \"CMPPD\"\n\t\/\/ The condition is encoded as an imm8 operand of CMPSD.\n\tcase \"CMPEQSD\", \"CMPLTSD\", \"CMPLESD\", \"CMPUNORDSD\", \"CMPNEQSD\", \"CMPNLTSD\",\n\t\t\"CMPNLESD\", \"CMPORDSD\":\n\t\treturn \"CMPSD\"\n\t\/\/ D (double word) has been replaced by L (Long)\n\tcase \"PSUBD\":\n\t\treturn \"PSUBL\"\n\t}\n\n\treturn op\n}\n\nfunc isAlreadyKnown(name string) bool {\n\tname = nasmOpcodeToPlan9(name)\n\tfor _, opcode := range Anames {\n\t\tif name == opcode {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc isAlreadyTested(name string) bool {\n\tname = nasmOpcodeToPlan9(name)\n\t_, ok := testedMap[name]\n\treturn ok\n}\n\nfunc isMMXOperand(op string) bool {\n\tif op == \"mmxreg\" || op == \"mmxrm\" || op == \"mmxrm64\" {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc isMMX(insn *x86db.Instruction) bool {\n\tfor _, op := range insn.Operands {\n\t\tif isMMXOperand(op) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc doHelp(insns x86db.InstructionSlice) {\n\tusage()\n}\n\nfunc doList(insns x86db.InstructionSlice) {\n\tw := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)\n\tfor _, insn := range insns {\n\t\tfmt.Fprintf(w, \"%s\\t%s\\t%s\\t%s\\n\", insn.Name,\n\t\t\tstrings.Join(insn.Operands, \",\"), insn.Pattern, insn.Flags)\n\t}\n\tw.Flush()\n}\n\nvar (\n\tfilterFlags = flag.NewFlagSet(\"filter\", flag.ExitOnError)\n\textension = filterFlags.String(\"extension\", \"\",\n\t\t\"select instructions by extension\")\n\tnoMMX = filterFlags.Bool(\"no-mmx\", false,\n\t\t\"do not select instructions taking MMX operands\")\n\tknown = filterFlags.Bool(\"known\", false,\n\t\t\"select instructions already known by the go assembler\")\n\tnotKnown = filterFlags.Bool(\"not-known\", false,\n\t\t\"select instructions not already known by the go assembler\")\n\ttested = filterFlags.Bool(\"tested\", false,\n\t\t\"select instructions with test cases in the go assembler\")\n\tnotTested = filterFlags.Bool(\"not-tested\", false,\n\t\t\"select instructions with no test case in the go assembler\")\n)\n\ntype command struct {\n\tname string\n\thelp string\n\thandler func(x86db.InstructionSlice)\n}\n\nvar commands = []command{\n\t{\"help\", \"print this help\", nil},\n\t{\"list\", \"list x86 instructions\", doList},\n}\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, \"Usage:\\n\\n\")\n\tfmt.Fprintf(os.Stderr, \" x86db-gogen command [options]\\n\\n\")\n\tfmt.Fprintf(os.Stderr, \"List of commands:\\n\\n\")\n\tw := tabwriter.NewWriter(os.Stdout, 0, 0, 4, ' ', 0)\n\tfor _, cmd := range commands {\n\t\tfmt.Fprintf(w, \" %s\\t%s\\n\", cmd.name, cmd.help)\n\t}\n\tw.Flush()\n\tfmt.Fprintf(os.Stderr, \"\\n\")\n\tfmt.Fprintf(os.Stderr, \"Filtering options:\\n\\n\")\n\tfilterFlags.PrintDefaults()\n}\n\nfunc main() {\n\tdb := x86db.NewDB()\n\n\tif err := filterFlags.Parse(os.Args[2:]); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\terr := db.Open()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer db.Close()\n\n\tinsns := db.Instructions\n\n\tif *extension != \"\" {\n\t\text, err := x86db.ExtensionFromString(*extension)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tinsns = insns.Where(func(insn x86db.Instruction) bool {\n\t\t\treturn insn.Extension == ext\n\t\t})\n\t}\n\n\tif *noMMX {\n\t\tinsns = insns.Where(func(insn x86db.Instruction) bool {\n\t\t\treturn !isMMX(&insn)\n\t\t})\n\t}\n\n\tif *known || *notKnown {\n\t\tinsns = insns.Where(func(insn x86db.Instruction) bool {\n\t\t\tk := isAlreadyKnown(insn.Name)\n\t\t\tif *notKnown {\n\t\t\t\treturn !k\n\t\t\t}\n\t\t\treturn k\n\t\t})\n\t}\n\n\tif *tested || *notTested {\n\t\tinsns = insns.Where(func(insn x86db.Instruction) bool {\n\t\t\tt := isAlreadyTested(insn.Name)\n\t\t\tif *notTested {\n\t\t\t\treturn !t\n\t\t\t}\n\t\t\treturn t\n\t\t})\n\t}\n\n\tcmdName := os.Args[1]\n\thandled := false\n\tfor _, cmd := range commands {\n\t\tif cmd.name != cmdName {\n\t\t\tcontinue\n\t\t}\n\t\tif cmd.handler == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\thandled = true\n\t\tcmd.handler(insns)\n\t}\n\n\tif !handled {\n\t\tfmt.Fprintf(os.Stderr, \"unknown command '%s'\\n\\n\", cmdName)\n\t\tusage()\n\t\tos.Exit(1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package agent\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tmqtt \"git.eclipse.org\/gitroot\/paho\/org.eclipse.paho.mqtt.golang.git\"\n\t\"github.com\/juju\/loggo\"\n)\n\nvar AlreadyConfigured = errors.New(\"Already configured\")\nvar AlreadyUnConfigured = errors.New(\"Already unconfigured\")\n\n\/\/\n\/\/ Acts as a bridge between local and cloud brokers, this includes reconnecting\n\/\/ and emitting status changes.\n\/\/\n\/\/ Once configured and started this will attempt to connect to connect to local\n\/\/ and cloud brokers, if something dies it will reconnect based on the configured\n\/\/ reconnect backoff.\n\/\/\ntype Bridge struct {\n\tconf *Config\n\tlocal *mqtt.MqttClient\n\tremote *mqtt.MqttClient\n\tlog loggo.Logger\n\n\tlocalTopics []replaceTopic\n\tcloudTopics []replaceTopic\n\n\tcloudUrl *url.URL\n\ttoken string\n\n\ttimer *time.Timer\n\treconnectCh chan bool\n\tshutdownCh chan bool\n\n\tConfigured bool\n\tConnected bool\n\tCounter int64\n\n\tIngressCounter int64\n\tEgressCounter int64\n\n\tIngressBytes int64\n\tEgressBytes int64\n\n\tLastError error\n\n\tbridgeLock sync.Mutex\n}\n\ntype replaceTopic struct {\n\ton string\n\treplace string\n\twith string\n}\n\nfunc (r *replaceTopic) updated(originalTopic string) string {\n\treturn strings.Replace(originalTopic, r.replace, r.with, 1)\n}\n\nvar localTopics = []replaceTopic{\n\t\/\/ location related topics (TODO: move to cloud userspace RPC)\n\t{on: \"$location\/calibration\", replace: \"$location\", with: \"$cloud\/location\"},\n\t{on: \"$location\/delete\", replace: \"$location\", with: \"$cloud\/location\"},\n\t{on: \"$device\/+\/+\/rssi\", replace: \"$device\", with: \"$cloud\/device\"},\n\n\t\/\/ module health statistics\n\t\/\/{on: \"$node\/+\/module\/status\", replace: \"$node\", with: \"$cloud\/node\"},\n\n\t\/\/ cloud userspace RPC requests\n\t{on: \"$ninja\/services\/rpc\/+\/+\", replace: \"$ninja\", with: \"$cloud\/ninja\"},\n\t{on: \"$ninja\/services\/+\", replace: \"$ninja\", with: \"$cloud\/ninja\"},\n\n\t\/\/ temporary alternate topic to distinguish remote device replies from local-destined ones\n\t\/\/ used by the phone app for remote actuations\n\t\/\/ the alternate remote_ topic is to prevent a loopback with the below rule in the other direction\n\t\/\/ TODO: use a tag like $mesh-source to prevent loops (never re-proxy msgs with your source)\n\t{on: \"$device\/+\/channel\/+\/reply\", replace: \"$device\", with: \"$cloud\/remote_device\"},\n\n\t\/\/ push up all local RPC methods in case the cloud is responding,\n\t\/\/ this is currently used for the push notification channel\n\t{on: \"$device\/+\/channel\/+\", replace: \"$device\", with: \"$cloud\/device\"},\n\n\t\/\/ push up state changes to the cloud\n\t{on: \"$device\/+\/channel\/+\/event\/state\", replace: \"$device\", with: \"$cloud\/device\"},\n}\n\nvar cloudTopics = []replaceTopic{\n\t\/\/ location related topics\n\t{on: \"$cloud\/location\/calibration\/progress\", replace: \"$cloud\/location\", with: \"$location\"},\n\t{on: \"$cloud\/device\/+\/+\/location\", replace: \"$cloud\/device\", with: \"$device\"},\n\n\t\/\/ cloud userspace RPC replies\n\t{on: \"$cloud\/ninja\/services\/rpc\/+\/+\/reply\", replace: \"$cloud\/ninja\", with: \"$ninja\"},\n\n\t\/\/ see comment for $device\/+\/channel\/+\/reply above\n\t{on: \"$cloud\/remote_device\/+\/channel\/+\", replace: \"$cloud\/remote_device\", with: \"$device\"},\n\n\t\/\/ allow cloud to announce devices and channels (used for phone on 3G and notification subscription channel)\n\t{on: \"$cloud\/device\/+\/event\/announce\", replace: \"$cloud\/device\", with: \"$device\"},\n\t{on: \"$cloud\/device\/+\/channel\/+\/event\/announce\", replace: \"$cloud\/device\", with: \"$device\"},\n\n\t\/\/ retrieve RPC responses from the cloud,\n\t\/\/ this is currently used for the push notification channel\n\t{on: \"$cloud\/device\/+\/channel\/+\/reply\", replace: \"$cloud\/device\", with: \"$device\"},\n}\n\nfunc createBridge(conf *Config) *Bridge {\n\treturn &Bridge{conf: conf, localTopics: localTopics, cloudTopics: cloudTopics, log: loggo.GetLogger(\"bridge\")}\n}\n\nfunc (b *Bridge) start(cloudUrl string, token string) (err error) {\n\n\tif b.Configured {\n\t\tb.log.Warningf(\"Already configured.\")\n\t\treturn AlreadyConfigured\n\t}\n\n\tdefer b.bridgeLock.Unlock()\n\n\tb.bridgeLock.Lock()\n\n\tb.log.Infof(\"Connecting the bridge\")\n\n\tb.Configured = true\n\n\turl, err := url.Parse(cloudUrl)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb.cloudUrl = url\n\tb.token = token\n\n\tb.reconnectCh = make(chan bool, 1)\n\tb.shutdownCh = make(chan bool, 1)\n\n\tif err = b.connect(); err != nil {\n\t\tb.log.Errorf(\"Connect failed %s\", err)\n\t\tb.scheduleReconnect(err)\n\t}\n\n\tgo b.mainBridgeLoop()\n\n\treturn err\n}\n\nfunc (b *Bridge) stop() error {\n\n\tif !b.Configured {\n\t\tb.log.Warningf(\"Already unconfigured.\")\n\t\treturn AlreadyUnConfigured\n\t}\n\n\tdefer b.bridgeLock.Unlock()\n\n\tb.bridgeLock.Lock()\n\n\tb.log.Infof(\"Disconnecting bridge\")\n\n\tif b.Configured {\n\t\t\/\/ tell the worker to shutdown\n\t\tb.shutdownCh <- true\n\n\t\tb.Configured = false\n\t}\n\n\tb.resetTimer()\n\n\tb.disconnectAll()\n\n\treturn nil\n}\n\nfunc (b *Bridge) connect() (err error) {\n\n\tif b.local, err = b.buildClient(b.conf.LocalUrl, \"\"); err != nil {\n\t\tb.Connected = false\n\t\treturn err\n\t}\n\n\tif b.remote, err = b.buildClient(b.cloudUrl.String(), b.token); err != nil {\n\t\tb.Connected = false\n\t\treturn err\n\t}\n\n\tif err = b.subscriptions(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ we are now connected\n\tb.Connected = true\n\n\treturn nil\n}\n\nfunc (b *Bridge) reconnect() (err error) {\n\n\tif b.local, err = b.buildClient(b.conf.LocalUrl, \"\"); err != nil {\n\t\tb.Connected = false\n\t\treturn err\n\t}\n\n\tif b.remote, err = b.buildClient(b.cloudUrl.String(), b.token); err != nil {\n\t\tb.Connected = false\n\t\treturn err\n\t}\n\n\tif err = b.subscriptions(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ we are now connected\n\tb.Connected = true\n\tb.LastError = nil\n\n\treturn nil\n}\n\nfunc (b *Bridge) subscriptions() (err error) {\n\n\tif err = b.subscribe(b.local, b.remote, b.localTopics, \"local\"); err != nil {\n\t\treturn err\n\t}\n\n\tif err = b.subscribe(b.remote, b.local, b.cloudTopics, \"cloud\"); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}\n\nfunc (b *Bridge) disconnectAll() {\n\tb.log.Infof(\"disconnectAll\")\n\t\/\/ we are now disconnected\n\tb.Connected = false\n\tif b.local != nil && b.local.IsConnected() {\n\t\tb.local.Disconnect(100)\n\t}\n\tif b.remote != nil && b.remote.IsConnected() {\n\t\tb.remote.Disconnect(100)\n\t}\n}\n\nfunc (b *Bridge) mainBridgeLoop() {\n\n\tfor {\n\t\tselect {\n\t\tcase <-b.reconnectCh:\n\t\t\tb.log.Infof(\"reconnecting\")\n\t\t\tif err := b.reconnect(); err != nil {\n\t\t\t\tb.log.Errorf(\"Reconnect failed %s\", err)\n\t\t\t\tb.scheduleReconnect(err)\n\t\t\t}\n\t\tcase <-b.shutdownCh:\n\t\t\tb.log.Infof(\"shutting down bridge\")\n\t\t\treturn\n\t\t}\n\n\t}\n\n}\n\nfunc (b *Bridge) buildClient(server string, token string) (*mqtt.MqttClient, error) {\n\n\tb.log.Infof(\"building client for %s\", server)\n\n\topts := mqtt.NewClientOptions().AddBroker(server).SetTlsConfig(&tls.Config{InsecureSkipVerify: true})\n\n\tif token != \"\" {\n\t\topts.SetUsername(token)\n\t}\n\n\topts.SetClientId(fmt.Sprintf(\"%d\", time.Now().Unix()))\n\n\topts.SetKeepAlive(15) \/\/ set a 15 second ping time for ELB\n\n\t\/\/ pretty much log the reason and quit\n\topts.SetOnConnectionLost(b.onConnectionLoss)\n\n\tclient := mqtt.NewClient(opts)\n\t_, err := client.Start()\n\n\treturn client, err\n}\n\nfunc (b *Bridge) subscribe(src *mqtt.MqttClient, dst *mqtt.MqttClient, topics []replaceTopic, tag string) (err error) {\n\n\tfor _, topic := range topics {\n\n\t\ttopicFilter, _ := mqtt.NewTopicFilter(topic.on, 0)\n\t\tb.log.Infof(\"(%s) subscribed to %s\", tag, topic.on)\n\n\t\tif receipt, err := src.StartSubscription(b.buildHandler(topic, tag, dst), topicFilter); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\t<-receipt\n\t\t\tb.log.Infof(\"(%s) subscribed to %+v\", tag, topicFilter)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (b *Bridge) unsubscribe(client *mqtt.MqttClient, topics []replaceTopic, tag string) {\n\n\ttopicNames := []string{}\n\n\tfor _, topic := range topics {\n\t\ttopicNames = append(topicNames, topic.on)\n\t}\n\n\tb.log.Infof(\"(%s) unsubscribed to %s\", tag, topicNames)\n\tclient.EndSubscription(topicNames...)\n}\n\nfunc (b *Bridge) buildHandler(topic replaceTopic, tag string, dst *mqtt.MqttClient) mqtt.MessageHandler {\n\treturn func(src *mqtt.MqttClient, msg mqtt.Message) {\n\t\tif b.log.IsDebugEnabled() {\n\t\t\tb.log.Debugf(\"(%s) topic: %s updated: %s len: %d\", tag, msg.Topic(), topic.updated(msg.Topic()), len(msg.Payload()))\n\t\t}\n\t\tb.updateCounters(tag, msg)\n\t\tpayload := b.updateSource(msg.Payload(), b.buildSource(tag))\n\t\tdst.PublishMessage(topic.updated(msg.Topic()), mqtt.NewMessage(payload))\n\t}\n}\n\nfunc (b *Bridge) scheduleReconnect(reason error) {\n\tb.LastError = reason\n\tb.disconnectAll()\n\tb.resetTimer()\n\n\tswitch reason {\n\tcase mqtt.ErrBadCredentials:\n\t\tb.log.Warningf(\"Reconnect failed trying again in 30s\")\n\n\t\tb.timer = time.AfterFunc(30*time.Second, func() {\n\t\t\tb.reconnectCh <- true\n\t\t})\n\n\tdefault:\n\t\tb.log.Warningf(\"Reconnect failed trying again in 5s\")\n\t\t\/\/ TODO add exponential backoff\n\t\tb.timer = time.AfterFunc(5*time.Second, func() {\n\t\t\tb.reconnectCh <- true\n\t\t})\n\t}\n\n}\n\nfunc (b *Bridge) resetTimer() {\n\tif b.timer != nil {\n\t\tb.timer.Stop()\n\t}\n}\n\nfunc (b *Bridge) onConnectionLoss(client *mqtt.MqttClient, reason error) {\n\tb.log.Errorf(\"Connection failed %s\", reason)\n\n\t\/\/ we are now disconnected\n\tb.Connected = false\n\n\tb.scheduleReconnect(reason)\n\n}\n\nfunc (b *Bridge) IsConnected() bool {\n\tif b.remote == nil || b.local == nil {\n\t\treturn false\n\t}\n\treturn (b.remote.IsConnected() && b.local.IsConnected())\n}\n\nfunc (b *Bridge) buildSource(tag string) string {\n\n\tswitch tag {\n\tcase \"local\":\n\t\treturn b.conf.SerialNo\n\tcase \"cloud\":\n\t\treturn \"cloud-\" + strings.Replace(b.cloudUrl.Host, \".\", \"_\", -1) \/\/ encoded to look less wierd\n\t}\n\n\treturn \"\"\n}\n\nfunc (b *Bridge) updateSource(payload []byte, source string) []byte {\n\n\tif !bytes.Contains(payload, []byte(\"$mesh-source\")) {\n\t\tpayload = bytes.Replace(payload, []byte(\"{\"), []byte(`{\"$mesh-source\":\"`+source+`\", `), 1)\n\t}\n\n\tb.log.Debugf(\"msg %s\", string(payload))\n\n\treturn payload\n}\n\nfunc (b *Bridge) updateCounters(tag string, msg mqtt.Message) {\n\tswitch tag {\n\tcase \"local\":\n\t\tb.EgressCounter++\n\t\tb.EgressBytes += int64(len(msg.Bytes())) \/\/ message size not payload size\n\tcase \"cloud\":\n\t\tb.IngressCounter++\n\t\tb.IngressBytes += int64(len(msg.Bytes())) \/\/ message size not payload size\n\t}\n\n}\n<commit_msg>Add $ninja\/client-services topics to support inbound RPCs from the clouds to sphere-hosted services.<commit_after>package agent\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tmqtt \"git.eclipse.org\/gitroot\/paho\/org.eclipse.paho.mqtt.golang.git\"\n\t\"github.com\/juju\/loggo\"\n)\n\nvar AlreadyConfigured = errors.New(\"Already configured\")\nvar AlreadyUnConfigured = errors.New(\"Already unconfigured\")\n\n\/\/\n\/\/ Acts as a bridge between local and cloud brokers, this includes reconnecting\n\/\/ and emitting status changes.\n\/\/\n\/\/ Once configured and started this will attempt to connect to connect to local\n\/\/ and cloud brokers, if something dies it will reconnect based on the configured\n\/\/ reconnect backoff.\n\/\/\ntype Bridge struct {\n\tconf *Config\n\tlocal *mqtt.MqttClient\n\tremote *mqtt.MqttClient\n\tlog loggo.Logger\n\n\tlocalTopics []replaceTopic\n\tcloudTopics []replaceTopic\n\n\tcloudUrl *url.URL\n\ttoken string\n\n\ttimer *time.Timer\n\treconnectCh chan bool\n\tshutdownCh chan bool\n\n\tConfigured bool\n\tConnected bool\n\tCounter int64\n\n\tIngressCounter int64\n\tEgressCounter int64\n\n\tIngressBytes int64\n\tEgressBytes int64\n\n\tLastError error\n\n\tbridgeLock sync.Mutex\n}\n\ntype replaceTopic struct {\n\ton string\n\treplace string\n\twith string\n}\n\nfunc (r *replaceTopic) updated(originalTopic string) string {\n\treturn strings.Replace(originalTopic, r.replace, r.with, 1)\n}\n\nvar localTopics = []replaceTopic{\n\t\/\/ location related topics (TODO: move to cloud userspace RPC)\n\t{on: \"$location\/calibration\", replace: \"$location\", with: \"$cloud\/location\"},\n\t{on: \"$location\/delete\", replace: \"$location\", with: \"$cloud\/location\"},\n\t{on: \"$device\/+\/+\/rssi\", replace: \"$device\", with: \"$cloud\/device\"},\n\n\t\/\/ module health statistics\n\t\/\/{on: \"$node\/+\/module\/status\", replace: \"$node\", with: \"$cloud\/node\"},\n\n\t\/\/ cloud userspace RPC requests\n\t{on: \"$ninja\/services\/rpc\/+\/+\", replace: \"$ninja\", with: \"$cloud\/ninja\"},\n\t{on: \"$ninja\/services\/+\", replace: \"$ninja\", with: \"$cloud\/ninja\"},\n\n\t\/\/ temporary alternate topic to distinguish remote device replies from local-destined ones\n\t\/\/ used by the phone app for remote actuations\n\t\/\/ the alternate remote_ topic is to prevent a loopback with the below rule in the other direction\n\t\/\/ TODO: use a tag like $mesh-source to prevent loops (never re-proxy msgs with your source)\n\t{on: \"$device\/+\/channel\/+\/reply\", replace: \"$device\", with: \"$cloud\/remote_device\"},\n\n\t\/\/ push up all local RPC methods in case the cloud is responding,\n\t\/\/ this is currently used for the push notification channel\n\t{on: \"$device\/+\/channel\/+\", replace: \"$device\", with: \"$cloud\/device\"},\n\n\t\/\/ push up state changes to the cloud\n\t{on: \"$device\/+\/channel\/+\/event\/state\", replace: \"$device\", with: \"$cloud\/device\"},\n\n\t\/\/ topic used to listen to replies from client-hosted services\n\t{on: \"$ninja\/client-services\/+\/reply\", replace: \"$ninja\", with: \"$cloud\/ninja\"},\n}\n\nvar cloudTopics = []replaceTopic{\n\t\/\/ location related topics\n\t{on: \"$cloud\/location\/calibration\/progress\", replace: \"$cloud\/location\", with: \"$location\"},\n\t{on: \"$cloud\/device\/+\/+\/location\", replace: \"$cloud\/device\", with: \"$device\"},\n\n\t\/\/ cloud userspace RPC replies\n\t{on: \"$cloud\/ninja\/services\/rpc\/+\/+\/reply\", replace: \"$cloud\/ninja\", with: \"$ninja\"},\n\n\t\/\/ see comment for $device\/+\/channel\/+\/reply above\n\t{on: \"$cloud\/remote_device\/+\/channel\/+\", replace: \"$cloud\/remote_device\", with: \"$device\"},\n\n\t\/\/ allow cloud to announce devices and channels (used for phone on 3G and notification subscription channel)\n\t{on: \"$cloud\/device\/+\/event\/announce\", replace: \"$cloud\/device\", with: \"$device\"},\n\t{on: \"$cloud\/device\/+\/channel\/+\/event\/announce\", replace: \"$cloud\/device\", with: \"$device\"},\n\n\t\/\/ retrieve RPC responses from the cloud,\n\t\/\/ this is currently used for the push notification channel\n\t{on: \"$cloud\/device\/+\/channel\/+\/reply\", replace: \"$cloud\/device\", with: \"$device\"},\n\n\t\/\/ topic used to listen to services published by clients to the cloud\n\t{on: \"$ninja\/client-services\/+\", replace: \"$ninja\", with: \"$cloud\/ninja\"},\n}\n\nfunc createBridge(conf *Config) *Bridge {\n\treturn &Bridge{conf: conf, localTopics: localTopics, cloudTopics: cloudTopics, log: loggo.GetLogger(\"bridge\")}\n}\n\nfunc (b *Bridge) start(cloudUrl string, token string) (err error) {\n\n\tif b.Configured {\n\t\tb.log.Warningf(\"Already configured.\")\n\t\treturn AlreadyConfigured\n\t}\n\n\tdefer b.bridgeLock.Unlock()\n\n\tb.bridgeLock.Lock()\n\n\tb.log.Infof(\"Connecting the bridge\")\n\n\tb.Configured = true\n\n\turl, err := url.Parse(cloudUrl)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb.cloudUrl = url\n\tb.token = token\n\n\tb.reconnectCh = make(chan bool, 1)\n\tb.shutdownCh = make(chan bool, 1)\n\n\tif err = b.connect(); err != nil {\n\t\tb.log.Errorf(\"Connect failed %s\", err)\n\t\tb.scheduleReconnect(err)\n\t}\n\n\tgo b.mainBridgeLoop()\n\n\treturn err\n}\n\nfunc (b *Bridge) stop() error {\n\n\tif !b.Configured {\n\t\tb.log.Warningf(\"Already unconfigured.\")\n\t\treturn AlreadyUnConfigured\n\t}\n\n\tdefer b.bridgeLock.Unlock()\n\n\tb.bridgeLock.Lock()\n\n\tb.log.Infof(\"Disconnecting bridge\")\n\n\tif b.Configured {\n\t\t\/\/ tell the worker to shutdown\n\t\tb.shutdownCh <- true\n\n\t\tb.Configured = false\n\t}\n\n\tb.resetTimer()\n\n\tb.disconnectAll()\n\n\treturn nil\n}\n\nfunc (b *Bridge) connect() (err error) {\n\n\tif b.local, err = b.buildClient(b.conf.LocalUrl, \"\"); err != nil {\n\t\tb.Connected = false\n\t\treturn err\n\t}\n\n\tif b.remote, err = b.buildClient(b.cloudUrl.String(), b.token); err != nil {\n\t\tb.Connected = false\n\t\treturn err\n\t}\n\n\tif err = b.subscriptions(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ we are now connected\n\tb.Connected = true\n\n\treturn nil\n}\n\nfunc (b *Bridge) reconnect() (err error) {\n\n\tif b.local, err = b.buildClient(b.conf.LocalUrl, \"\"); err != nil {\n\t\tb.Connected = false\n\t\treturn err\n\t}\n\n\tif b.remote, err = b.buildClient(b.cloudUrl.String(), b.token); err != nil {\n\t\tb.Connected = false\n\t\treturn err\n\t}\n\n\tif err = b.subscriptions(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ we are now connected\n\tb.Connected = true\n\tb.LastError = nil\n\n\treturn nil\n}\n\nfunc (b *Bridge) subscriptions() (err error) {\n\n\tif err = b.subscribe(b.local, b.remote, b.localTopics, \"local\"); err != nil {\n\t\treturn err\n\t}\n\n\tif err = b.subscribe(b.remote, b.local, b.cloudTopics, \"cloud\"); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}\n\nfunc (b *Bridge) disconnectAll() {\n\tb.log.Infof(\"disconnectAll\")\n\t\/\/ we are now disconnected\n\tb.Connected = false\n\tif b.local != nil && b.local.IsConnected() {\n\t\tb.local.Disconnect(100)\n\t}\n\tif b.remote != nil && b.remote.IsConnected() {\n\t\tb.remote.Disconnect(100)\n\t}\n}\n\nfunc (b *Bridge) mainBridgeLoop() {\n\n\tfor {\n\t\tselect {\n\t\tcase <-b.reconnectCh:\n\t\t\tb.log.Infof(\"reconnecting\")\n\t\t\tif err := b.reconnect(); err != nil {\n\t\t\t\tb.log.Errorf(\"Reconnect failed %s\", err)\n\t\t\t\tb.scheduleReconnect(err)\n\t\t\t}\n\t\tcase <-b.shutdownCh:\n\t\t\tb.log.Infof(\"shutting down bridge\")\n\t\t\treturn\n\t\t}\n\n\t}\n\n}\n\nfunc (b *Bridge) buildClient(server string, token string) (*mqtt.MqttClient, error) {\n\n\tb.log.Infof(\"building client for %s\", server)\n\n\topts := mqtt.NewClientOptions().AddBroker(server).SetTlsConfig(&tls.Config{InsecureSkipVerify: true})\n\n\tif token != \"\" {\n\t\topts.SetUsername(token)\n\t}\n\n\topts.SetClientId(fmt.Sprintf(\"%d\", time.Now().Unix()))\n\n\topts.SetKeepAlive(15) \/\/ set a 15 second ping time for ELB\n\n\t\/\/ pretty much log the reason and quit\n\topts.SetOnConnectionLost(b.onConnectionLoss)\n\n\tclient := mqtt.NewClient(opts)\n\t_, err := client.Start()\n\n\treturn client, err\n}\n\nfunc (b *Bridge) subscribe(src *mqtt.MqttClient, dst *mqtt.MqttClient, topics []replaceTopic, tag string) (err error) {\n\n\tfor _, topic := range topics {\n\n\t\ttopicFilter, _ := mqtt.NewTopicFilter(topic.on, 0)\n\t\tb.log.Infof(\"(%s) subscribed to %s\", tag, topic.on)\n\n\t\tif receipt, err := src.StartSubscription(b.buildHandler(topic, tag, dst), topicFilter); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\t<-receipt\n\t\t\tb.log.Infof(\"(%s) subscribed to %+v\", tag, topicFilter)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (b *Bridge) unsubscribe(client *mqtt.MqttClient, topics []replaceTopic, tag string) {\n\n\ttopicNames := []string{}\n\n\tfor _, topic := range topics {\n\t\ttopicNames = append(topicNames, topic.on)\n\t}\n\n\tb.log.Infof(\"(%s) unsubscribed to %s\", tag, topicNames)\n\tclient.EndSubscription(topicNames...)\n}\n\nfunc (b *Bridge) buildHandler(topic replaceTopic, tag string, dst *mqtt.MqttClient) mqtt.MessageHandler {\n\treturn func(src *mqtt.MqttClient, msg mqtt.Message) {\n\t\tif b.log.IsDebugEnabled() {\n\t\t\tb.log.Debugf(\"(%s) topic: %s updated: %s len: %d\", tag, msg.Topic(), topic.updated(msg.Topic()), len(msg.Payload()))\n\t\t}\n\t\tb.updateCounters(tag, msg)\n\t\tpayload := b.updateSource(msg.Payload(), b.buildSource(tag))\n\t\tdst.PublishMessage(topic.updated(msg.Topic()), mqtt.NewMessage(payload))\n\t}\n}\n\nfunc (b *Bridge) scheduleReconnect(reason error) {\n\tb.LastError = reason\n\tb.disconnectAll()\n\tb.resetTimer()\n\n\tswitch reason {\n\tcase mqtt.ErrBadCredentials:\n\t\tb.log.Warningf(\"Reconnect failed trying again in 30s\")\n\n\t\tb.timer = time.AfterFunc(30*time.Second, func() {\n\t\t\tb.reconnectCh <- true\n\t\t})\n\n\tdefault:\n\t\tb.log.Warningf(\"Reconnect failed trying again in 5s\")\n\t\t\/\/ TODO add exponential backoff\n\t\tb.timer = time.AfterFunc(5*time.Second, func() {\n\t\t\tb.reconnectCh <- true\n\t\t})\n\t}\n\n}\n\nfunc (b *Bridge) resetTimer() {\n\tif b.timer != nil {\n\t\tb.timer.Stop()\n\t}\n}\n\nfunc (b *Bridge) onConnectionLoss(client *mqtt.MqttClient, reason error) {\n\tb.log.Errorf(\"Connection failed %s\", reason)\n\n\t\/\/ we are now disconnected\n\tb.Connected = false\n\n\tb.scheduleReconnect(reason)\n\n}\n\nfunc (b *Bridge) IsConnected() bool {\n\tif b.remote == nil || b.local == nil {\n\t\treturn false\n\t}\n\treturn (b.remote.IsConnected() && b.local.IsConnected())\n}\n\nfunc (b *Bridge) buildSource(tag string) string {\n\n\tswitch tag {\n\tcase \"local\":\n\t\treturn b.conf.SerialNo\n\tcase \"cloud\":\n\t\treturn \"cloud-\" + strings.Replace(b.cloudUrl.Host, \".\", \"_\", -1) \/\/ encoded to look less wierd\n\t}\n\n\treturn \"\"\n}\n\nfunc (b *Bridge) updateSource(payload []byte, source string) []byte {\n\n\tif !bytes.Contains(payload, []byte(\"$mesh-source\")) {\n\t\tpayload = bytes.Replace(payload, []byte(\"{\"), []byte(`{\"$mesh-source\":\"`+source+`\", `), 1)\n\t}\n\n\tb.log.Debugf(\"msg %s\", string(payload))\n\n\treturn payload\n}\n\nfunc (b *Bridge) updateCounters(tag string, msg mqtt.Message) {\n\tswitch tag {\n\tcase \"local\":\n\t\tb.EgressCounter++\n\t\tb.EgressBytes += int64(len(msg.Bytes())) \/\/ message size not payload size\n\tcase \"cloud\":\n\t\tb.IngressCounter++\n\t\tb.IngressBytes += int64(len(msg.Bytes())) \/\/ message size not payload size\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"os\"\n\n\t\"github.com\/github\/git-lfs\/git\"\n\t\"github.com\/github\/git-lfs\/lfs\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\tgetCmd = &cobra.Command{\n\t\tUse: \"get\",\n\t\tShort: \"get\",\n\t\tRun: getCommand,\n\t}\n)\n\nfunc getCommand(cmd *cobra.Command, args []string) {\n\tvar ref string\n\tvar err error\n\n\tif len(args) == 1 {\n\t\tref = args[0]\n\t} else {\n\t\tref, err = git.CurrentRef()\n\t\tif err != nil {\n\t\t\tPanic(err, \"Could not get\")\n\t\t}\n\t}\n\n\tpointers, err := lfs.ScanRefs(ref, \"\")\n\tif err != nil {\n\t\tPanic(err, \"Could not scan for Git LFS files\")\n\t}\n\n\tq := lfs.NewDownloadQueue(lfs.Config.ConcurrentTransfers(), len(pointers))\n\n\tfor _, p := range pointers {\n\t\tq.Add(lfs.NewDownloadable(p))\n\t}\n\n\tq.Process()\n\n\ttarget, err := git.ResolveRef(ref)\n\tif err != nil {\n\t}\n\n\tcurrent, err := git.CurrentRef()\n\tif err != nil {\n\t}\n\n\tif target == current {\n\t\t\/\/ We just downloaded the files for the current ref, we can copy them into\n\t\t\/\/ the working directory and update the git index\n\t\tfor _, pointer := range pointers {\n\t\t\tfile, err := os.Create(pointer.Name)\n\t\t\tif err != nil {\n\t\t\t\tPanic(err, \"Could not create working directory file\")\n\t\t\t}\n\n\t\t\tif err := lfs.PointerSmudge(file, pointer.Pointer, pointer.Name, nil); err != nil {\n\t\t\t\tPanic(err, \"Could not write working directory file\")\n\t\t\t}\n\n\t\t\tif err := git.UpdateIndex(pointer.Name); err != nil {\n\t\t\t\tPanic(err, \"Could not update index\")\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc init() {\n\tRootCmd.AddCommand(getCmd)\n}\n<commit_msg>ンンンンン ンンンン<commit_after>package commands\n\nimport (\n\t\"os\"\n\t\"os\/exec\"\n\t\"time\"\n\n\t\"github.com\/github\/git-lfs\/git\"\n\t\"github.com\/github\/git-lfs\/lfs\"\n\t\"github.com\/rubyist\/tracerx\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\tgetCmd = &cobra.Command{\n\t\tUse: \"get\",\n\t\tShort: \"get\",\n\t\tRun: getCommand,\n\t}\n)\n\nfunc getCommand(cmd *cobra.Command, args []string) {\n\tvar ref string\n\tvar err error\n\n\tif len(args) == 1 {\n\t\tref = args[0]\n\t} else {\n\t\tref, err = git.CurrentRef()\n\t\tif err != nil {\n\t\t\tPanic(err, \"Could not get\")\n\t\t}\n\t}\n\n\tpointers, err := lfs.ScanRefs(ref, \"\")\n\tif err != nil {\n\t\tPanic(err, \"Could not scan for Git LFS files\")\n\t}\n\n\tq := lfs.NewDownloadQueue(lfs.Config.ConcurrentTransfers(), len(pointers))\n\n\tfor _, p := range pointers {\n\t\tq.Add(lfs.NewDownloadable(p))\n\t}\n\n\tprocessQueue := time.Now()\n\tq.Process()\n\ttracerx.PerformanceSince(\"process queue\", processQueue)\n\n\ttarget, err := git.ResolveRef(ref)\n\tif err != nil {\n\t}\n\n\tcurrent, err := git.CurrentRef()\n\tif err != nil {\n\t}\n\n\tif target == current {\n\t\t\/\/ We just downloaded the files for the current ref, we can copy them into\n\t\t\/\/ the working directory and update the git index\n\t\tupdateWd := time.Now()\n\t\tfor _, pointer := range pointers {\n\t\t\tfile, err := os.Create(pointer.Name)\n\t\t\tif err != nil {\n\t\t\t\tPanic(err, \"Could not create working directory file\")\n\t\t\t}\n\n\t\t\tif err := lfs.PointerSmudge(file, pointer.Pointer, pointer.Name, nil); err != nil {\n\t\t\t\tPanic(err, \"Could not write working directory file\")\n\t\t\t}\n\t\t}\n\t\ttracerx.PerformanceSince(\"update working directory\", updateWd)\n\n\t\tupdateIndex := time.Now()\n\t\tcmd := exec.Command(\"git\", \"update-index\", \"-q\", \"--refresh\", \"--stdin\")\n\t\tstdin, err := cmd.StdinPipe()\n\t\tif err != nil {\n\t\t\tPanic(err, \"Could not update the index\")\n\t\t}\n\n\t\tif err := cmd.Start(); err != nil {\n\t\t\tPanic(err, \"Could not update the index\")\n\t\t}\n\n\t\tfor _, pointer := range pointers {\n\t\t\tstdin.Write([]byte(pointer.Name + \"\\n\"))\n\t\t}\n\t\tstdin.Close()\n\t\tcmd.Wait()\n\t\ttracerx.PerformanceSince(\"update index\", updateIndex)\n\t}\n\n}\n\nfunc init() {\n\tRootCmd.AddCommand(getCmd)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"strconv\"\n\n\t\"github.com\/satori\/go.uuid\"\n)\n\ntype somaOncallRequest struct {\n\taction string\n\toncall somaproto.ProtoOncall\n\treply chan []somaOncallResult\n}\n\ntype somaOncallResult struct {\n\trErr error\n\tlErr error\n\toncall somaproto.ProtoOncall\n}\n\n\/* Read Access\n *\n *\/\ntype somaOncallReadHandler struct {\n\tinput chan somaOncallRequest\n\tshutdown chan bool\n\tconn *sql.DB\n\tlist_stmt *sql.Stmt\n\tshow_stmt *sql.Stmt\n}\n\nfunc (r *somaOncallReadHandler) run() {\n\tvar err error\n\n\tr.list_stmt, err = r.conn.Prepare(`\nSELECT oncall_duty_id, oncall_duty_name\nFROM inventory.oncall_duty_teams;`)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tr.show_stmt, err = r.conn.Prepare(`\nSELECT oncall_duty_id, oncall_duty_name, oncall_duty_phone_number\nFROM inventory.oncall_duty_teams\nWHERE oncall_duty_id = $1;`)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\nrunloop:\n\tfor {\n\t\tselect {\n\t\tcase <-r.shutdown:\n\t\t\tbreak runloop\n\t\tcase req := <-r.input:\n\t\t\tgo func() {\n\t\t\t\tr.process(&req)\n\t\t\t}()\n\t\t}\n\t}\n}\n\nfunc (r *somaOncallReadHandler) process(q *somaOncallRequest) {\n\tvar oncallId, oncallName string\n\tvar oncallNumber int\n\tvar rows *sql.Rows\n\tvar err error\n\tresult := make([]somaOncallResult, 0)\n\n\tswitch q.action {\n\tcase \"list\":\n\t\tlog.Printf(\"R: oncall\/list\")\n\t\trows, err = r.list_stmt.Query()\n\t\tdefer rows.Close()\n\t\tif err != nil {\n\t\t\tresult = append(result, somaOncallResult{\n\t\t\t\trErr: err,\n\t\t\t})\n\t\t\tq.reply <- result\n\t\t\treturn\n\t\t}\n\n\t\tfor rows.Next() {\n\t\t\terr := rows.Scan(&oncallId, &oncallName)\n\t\t\tif err != nil {\n\t\t\t\tresult = append(result, somaOncallResult{\n\t\t\t\t\tlErr: err,\n\t\t\t\t})\n\t\t\t\terr = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresult = append(result, somaOncallResult{\n\t\t\t\toncall: somaproto.ProtoOncall{\n\t\t\t\t\tId: oncallId,\n\t\t\t\t\tName: oncallName,\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\tcase \"show\":\n\t\tlog.Printf(\"R: oncall\/show for %s\", q.oncall.Id)\n\t\terr = r.show_stmt.QueryRow(q.oncall.Id).Scan(&oncallId, &oncallName, &oncallNumber)\n\t\tif err != nil {\n\t\t\tif err.Error() != \"sql: no rows in result set\" {\n\t\t\t\tresult = append(result, somaOncallResult{\n\t\t\t\t\trErr: err,\n\t\t\t\t})\n\t\t\t}\n\t\t\tq.reply <- result\n\t\t\treturn\n\t\t}\n\n\t\tresult = append(result, somaOncallResult{\n\t\t\toncall: somaproto.ProtoOncall{\n\t\t\t\tId: oncallId,\n\t\t\t\tName: oncallName,\n\t\t\t\tNumber: strconv.Itoa(oncallNumber),\n\t\t\t},\n\t\t})\n\tdefault:\n\t\tresult = append(result, somaOncallResult{\n\t\t\trErr: errors.New(\"not implemented\"),\n\t\t})\n\t}\n\tq.reply <- result\n}\n\n\/* Write Access\n *\n *\/\ntype somaOncallWriteHandler struct {\n\tinput chan somaOncallRequest\n\tshutdown chan bool\n\tconn *sql.DB\n\tadd_stmt *sql.Stmt\n\tupd_stmt *sql.Stmt\n\tdel_stmt *sql.Stmt\n}\n\nfunc (w *somaOncallWriteHandler) run() {\n\tvar err error\n\n\tlog.Println(\"Prepare: oncall\/add\")\n\tw.add_stmt, err = w.conn.Prepare(`\nINSERT INTO inventory.oncall_duty_teams (\n\toncall_duty_id,\n\toncall_duty_name,\n\toncall_duty_phone_number)\nSELECT $1, $2, $3 WHERE NOT EXISTS (\n\tSELECT oncall_duty_id\n\tFROM inventory.oncall_duty_teams\n\tWHERE oncall_duty_id = $4\n\tOR oncall_duty_name = $5\n\tOR oncall_duty_phone_number = $6);`)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer w.add_stmt.Close()\n\n\tlog.Println(\"Prepare: oncall\/upd\")\n\tw.upd_stmt, err = w.conn.Prepare(`\nUPDATE inventory.oncall_duty_teams\nSET oncall_duty_name = CASE WHEN $1::varchar IS NOT NULL THEN $2::varchar ELSE oncall_duty_name END,\n oncall_duty_phone_number = CASE WHEN $3::numeric IS NOT NULL THEN $4::numeric ELSE oncall_duty_phone_number END\nWHERE oncall_duty_id = $5;`)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer w.upd_stmt.Close()\n\n\tlog.Println(\"Prepare: oncall\/del\")\n\tw.del_stmt, err = w.conn.Prepare(`\nDELETE FROM inventory.oncall_duty_teams\nWHERE oncall_duty_id = $1;`)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer w.del_stmt.Close()\n\nrunloop:\n\tfor {\n\t\tselect {\n\t\tcase <-w.shutdown:\n\t\t\tbreak runloop\n\t\tcase req := <-w.input:\n\t\t\tw.process(&req)\n\t\t}\n\t}\n}\n\nfunc (w *somaOncallWriteHandler) process(q *somaOncallRequest) {\n\tvar res sql.Result\n\tvar err error\n\tresult := make([]somaOncallResult, 0)\n\n\tswitch q.action {\n\tcase \"add\":\n\t\tlog.Printf(\"R: oncall\/add for %s\", q.oncall.Name)\n\t\tid := uuid.NewV4()\n\t\tres, err = w.add_stmt.Exec(\n\t\t\tid.String(),\n\t\t\tq.oncall.Name,\n\t\t\tq.oncall.Number,\n\t\t\tid.String(),\n\t\t\tq.oncall.Name,\n\t\t\tq.oncall.Number,\n\t\t)\n\tcase \"update\":\n\t\tlog.Printf(\"R: oncall\/update for %s\", q.oncall.Id)\n\t\t\/\/ our update statement uses NULL to check which of the values\n\t\t\/\/ should be updated\n\t\tvar name sql.NullString\n\t\tif q.oncall.Name == \"\" {\n\t\t\tname = sql.NullString{String: \"\", Valid: false}\n\t\t} else {\n\t\t\tname = sql.NullString{String: q.oncall.Name, Valid: true}\n\t\t}\n\n\t\tvar n int \/\/ ensure err not redeclared in if block\n\t\tvar number sql.NullInt64\n\t\tif q.oncall.Number != \"\" {\n\t\t\tn, err = strconv.Atoi(q.oncall.Number)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tnumber = sql.NullInt64{Int64: int64(n), Valid: true}\n\t\t} else {\n\t\t\tnumber = sql.NullInt64{Int64: 0, Valid: false}\n\t\t}\n\t\tres, err = w.upd_stmt.Exec(\n\t\t\tname,\n\t\t\tname,\n\t\t\tnumber,\n\t\t\tnumber,\n\t\t\tq.oncall.Id,\n\t\t)\n\tcase \"delete\":\n\t\tlog.Printf(\"R: oncall\/del for %s\", q.oncall.Id)\n\t\tres, err = w.del_stmt.Exec(\n\t\t\tq.oncall.Id,\n\t\t)\n\tdefault:\n\t\tlog.Printf(\"R: unimplemented oncall\/%s\", q.action)\n\t\tresult = append(result, somaOncallResult{\n\t\t\trErr: errors.New(\"not implemented\"),\n\t\t})\n\t\tq.reply <- result\n\t\treturn\n\t}\n\tif err != nil {\n\t\tresult = append(result, somaOncallResult{\n\t\t\trErr: err,\n\t\t})\n\t\tq.reply <- result\n\t\treturn\n\t}\n\n\trowCnt, _ := res.RowsAffected()\n\tswitch {\n\tcase rowCnt == 0:\n\t\tresult = append(result, somaOncallResult{\n\t\t\tlErr: errors.New(\"No rows affected\"),\n\t\t})\n\tcase rowCnt > 1:\n\t\tresult = append(result, somaOncallResult{\n\t\t\tlErr: fmt.Errorf(\"Too many rows affected: %d\", rowCnt),\n\t\t})\n\tdefault:\n\t\tresult = append(result, somaOncallResult{\n\t\t\toncall: q.oncall,\n\t\t})\n\t}\n\tq.reply <- result\n}\n\n\/\/ vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix\n<commit_msg>oncall\/add: return id of team<commit_after>package main\n\nimport (\n\t\"database\/sql\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"strconv\"\n\n\t\"github.com\/satori\/go.uuid\"\n)\n\ntype somaOncallRequest struct {\n\taction string\n\toncall somaproto.ProtoOncall\n\treply chan []somaOncallResult\n}\n\ntype somaOncallResult struct {\n\trErr error\n\tlErr error\n\toncall somaproto.ProtoOncall\n}\n\n\/* Read Access\n *\n *\/\ntype somaOncallReadHandler struct {\n\tinput chan somaOncallRequest\n\tshutdown chan bool\n\tconn *sql.DB\n\tlist_stmt *sql.Stmt\n\tshow_stmt *sql.Stmt\n}\n\nfunc (r *somaOncallReadHandler) run() {\n\tvar err error\n\n\tr.list_stmt, err = r.conn.Prepare(`\nSELECT oncall_duty_id, oncall_duty_name\nFROM inventory.oncall_duty_teams;`)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tr.show_stmt, err = r.conn.Prepare(`\nSELECT oncall_duty_id, oncall_duty_name, oncall_duty_phone_number\nFROM inventory.oncall_duty_teams\nWHERE oncall_duty_id = $1;`)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\nrunloop:\n\tfor {\n\t\tselect {\n\t\tcase <-r.shutdown:\n\t\t\tbreak runloop\n\t\tcase req := <-r.input:\n\t\t\tgo func() {\n\t\t\t\tr.process(&req)\n\t\t\t}()\n\t\t}\n\t}\n}\n\nfunc (r *somaOncallReadHandler) process(q *somaOncallRequest) {\n\tvar oncallId, oncallName string\n\tvar oncallNumber int\n\tvar rows *sql.Rows\n\tvar err error\n\tresult := make([]somaOncallResult, 0)\n\n\tswitch q.action {\n\tcase \"list\":\n\t\tlog.Printf(\"R: oncall\/list\")\n\t\trows, err = r.list_stmt.Query()\n\t\tdefer rows.Close()\n\t\tif err != nil {\n\t\t\tresult = append(result, somaOncallResult{\n\t\t\t\trErr: err,\n\t\t\t})\n\t\t\tq.reply <- result\n\t\t\treturn\n\t\t}\n\n\t\tfor rows.Next() {\n\t\t\terr := rows.Scan(&oncallId, &oncallName)\n\t\t\tif err != nil {\n\t\t\t\tresult = append(result, somaOncallResult{\n\t\t\t\t\tlErr: err,\n\t\t\t\t})\n\t\t\t\terr = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresult = append(result, somaOncallResult{\n\t\t\t\toncall: somaproto.ProtoOncall{\n\t\t\t\t\tId: oncallId,\n\t\t\t\t\tName: oncallName,\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\tcase \"show\":\n\t\tlog.Printf(\"R: oncall\/show for %s\", q.oncall.Id)\n\t\terr = r.show_stmt.QueryRow(q.oncall.Id).Scan(&oncallId, &oncallName, &oncallNumber)\n\t\tif err != nil {\n\t\t\tif err.Error() != \"sql: no rows in result set\" {\n\t\t\t\tresult = append(result, somaOncallResult{\n\t\t\t\t\trErr: err,\n\t\t\t\t})\n\t\t\t}\n\t\t\tq.reply <- result\n\t\t\treturn\n\t\t}\n\n\t\tresult = append(result, somaOncallResult{\n\t\t\toncall: somaproto.ProtoOncall{\n\t\t\t\tId: oncallId,\n\t\t\t\tName: oncallName,\n\t\t\t\tNumber: strconv.Itoa(oncallNumber),\n\t\t\t},\n\t\t})\n\tdefault:\n\t\tresult = append(result, somaOncallResult{\n\t\t\trErr: errors.New(\"not implemented\"),\n\t\t})\n\t}\n\tq.reply <- result\n}\n\n\/* Write Access\n *\n *\/\ntype somaOncallWriteHandler struct {\n\tinput chan somaOncallRequest\n\tshutdown chan bool\n\tconn *sql.DB\n\tadd_stmt *sql.Stmt\n\tupd_stmt *sql.Stmt\n\tdel_stmt *sql.Stmt\n}\n\nfunc (w *somaOncallWriteHandler) run() {\n\tvar err error\n\n\tlog.Println(\"Prepare: oncall\/add\")\n\tw.add_stmt, err = w.conn.Prepare(`\nINSERT INTO inventory.oncall_duty_teams (\n\toncall_duty_id,\n\toncall_duty_name,\n\toncall_duty_phone_number)\nSELECT $1, $2, $3 WHERE NOT EXISTS (\n\tSELECT oncall_duty_id\n\tFROM inventory.oncall_duty_teams\n\tWHERE oncall_duty_id = $4\n\tOR oncall_duty_name = $5\n\tOR oncall_duty_phone_number = $6);`)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer w.add_stmt.Close()\n\n\tlog.Println(\"Prepare: oncall\/upd\")\n\tw.upd_stmt, err = w.conn.Prepare(`\nUPDATE inventory.oncall_duty_teams\nSET oncall_duty_name = CASE WHEN $1::varchar IS NOT NULL THEN $2::varchar ELSE oncall_duty_name END,\n oncall_duty_phone_number = CASE WHEN $3::numeric IS NOT NULL THEN $4::numeric ELSE oncall_duty_phone_number END\nWHERE oncall_duty_id = $5;`)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer w.upd_stmt.Close()\n\n\tlog.Println(\"Prepare: oncall\/del\")\n\tw.del_stmt, err = w.conn.Prepare(`\nDELETE FROM inventory.oncall_duty_teams\nWHERE oncall_duty_id = $1;`)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer w.del_stmt.Close()\n\nrunloop:\n\tfor {\n\t\tselect {\n\t\tcase <-w.shutdown:\n\t\t\tbreak runloop\n\t\tcase req := <-w.input:\n\t\t\tw.process(&req)\n\t\t}\n\t}\n}\n\nfunc (w *somaOncallWriteHandler) process(q *somaOncallRequest) {\n\tvar res sql.Result\n\tvar err error\n\tresult := make([]somaOncallResult, 0)\n\n\tswitch q.action {\n\tcase \"add\":\n\t\tlog.Printf(\"R: oncall\/add for %s\", q.oncall.Name)\n\t\tid := uuid.NewV4()\n\t\tres, err = w.add_stmt.Exec(\n\t\t\tid.String(),\n\t\t\tq.oncall.Name,\n\t\t\tq.oncall.Number,\n\t\t\tid.String(),\n\t\t\tq.oncall.Name,\n\t\t\tq.oncall.Number,\n\t\t)\n\t\tq.oncall.Id = id.String()\n\tcase \"update\":\n\t\tlog.Printf(\"R: oncall\/update for %s\", q.oncall.Id)\n\t\t\/\/ our update statement uses NULL to check which of the values\n\t\t\/\/ should be updated\n\t\tvar name sql.NullString\n\t\tif q.oncall.Name == \"\" {\n\t\t\tname = sql.NullString{String: \"\", Valid: false}\n\t\t} else {\n\t\t\tname = sql.NullString{String: q.oncall.Name, Valid: true}\n\t\t}\n\n\t\tvar n int \/\/ ensure err not redeclared in if block\n\t\tvar number sql.NullInt64\n\t\tif q.oncall.Number != \"\" {\n\t\t\tn, err = strconv.Atoi(q.oncall.Number)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tnumber = sql.NullInt64{Int64: int64(n), Valid: true}\n\t\t} else {\n\t\t\tnumber = sql.NullInt64{Int64: 0, Valid: false}\n\t\t}\n\t\tres, err = w.upd_stmt.Exec(\n\t\t\tname,\n\t\t\tname,\n\t\t\tnumber,\n\t\t\tnumber,\n\t\t\tq.oncall.Id,\n\t\t)\n\tcase \"delete\":\n\t\tlog.Printf(\"R: oncall\/del for %s\", q.oncall.Id)\n\t\tres, err = w.del_stmt.Exec(\n\t\t\tq.oncall.Id,\n\t\t)\n\tdefault:\n\t\tlog.Printf(\"R: unimplemented oncall\/%s\", q.action)\n\t\tresult = append(result, somaOncallResult{\n\t\t\trErr: errors.New(\"not implemented\"),\n\t\t})\n\t\tq.reply <- result\n\t\treturn\n\t}\n\tif err != nil {\n\t\tresult = append(result, somaOncallResult{\n\t\t\trErr: err,\n\t\t})\n\t\tq.reply <- result\n\t\treturn\n\t}\n\n\trowCnt, _ := res.RowsAffected()\n\tswitch {\n\tcase rowCnt == 0:\n\t\tresult = append(result, somaOncallResult{\n\t\t\tlErr: errors.New(\"No rows affected\"),\n\t\t})\n\tcase rowCnt > 1:\n\t\tresult = append(result, somaOncallResult{\n\t\t\tlErr: fmt.Errorf(\"Too many rows affected: %d\", rowCnt),\n\t\t})\n\tdefault:\n\t\tresult = append(result, somaOncallResult{\n\t\t\toncall: q.oncall,\n\t\t})\n\t}\n\tq.reply <- result\n}\n\n\/\/ vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix\n<|endoftext|>"} {"text":"<commit_before>package cbft\n\nimport (\n\t\"encoding\/json\"\n)\n\n\/\/ FtsSort represents an FTS sorting for a search query.\ntype FtsSort interface {\n}\n\ntype ftsSortBase struct {\n\toptions map[string]interface{}\n}\n\nfunc newFtsSortBase() ftsSortBase {\n\treturn ftsSortBase{\n\t\toptions: make(map[string]interface{}),\n\t}\n}\n\n\/\/ MarshalJSON marshal's this query to JSON for the FTS REST API.\nfunc (q ftsSortBase) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(q.options)\n}\n\n\/\/ SearchSortScore represents a FTS score sort.\ntype SearchSortScore struct {\n\tftsSortBase\n}\n\n\/\/ NewSearchSortScore creates a new SearchSortScore.\nfunc NewSearchSortScore() *SearchSortScore {\n\tq := &SearchSortScore{newFtsSortBase()}\n\tq.options[\"by\"] = \"score\"\n\treturn q\n}\n\n\/\/ Descending specifies the ordering of the results.\nfunc (q *SearchSortScore) Descending(descending bool) *SearchSortScore {\n\tq.options[\"descending\"] = descending\n\treturn q\n}\n\n\/\/ SearchSortId represents a FTS Document ID sort.\ntype SearchSortId struct {\n\tftsSortBase\n}\n\n\/\/ NewSearchSortId creates a new SearchSortScore.\nfunc NewSearchSortId() *SearchSortId {\n\tq := &SearchSortId{newFtsSortBase()}\n\tq.options[\"by\"] = \"id\"\n\treturn q\n}\n\n\/\/ Descending specifies the ordering of the results.\nfunc (q *SearchSortId) Descending(descending bool) *SearchSortId {\n\tq.options[\"descending\"] = descending\n\treturn q\n}\n\n\/\/ SearchSortField represents a FTS field sort.\ntype SearchSortField struct {\n\tftsSortBase\n}\n\n\/\/ NewSearchSortField creates a new SearchSortField.\nfunc NewSearchSortField(field string) *SearchSortField {\n\tq := &SearchSortField{newFtsSortBase()}\n\tq.options[\"by\"] = \"field\"\n\tq.options[\"field\"] = field\n\treturn q\n}\n\n\/\/ Type allows you to specify the FTS field sort type.\nfunc (q *SearchSortField) Type(value string) *SearchSortField {\n\tq.options[\"type\"] = value\n\treturn q\n}\n\n\/\/ Mode allows you to specify the FTS field sort mode.\nfunc (q *SearchSortField) Mode(mode string) *SearchSortField {\n\tq.options[\"mode\"] = mode\n\treturn q\n}\n\n\/\/ Missing allows you to specify the FTS field sort missing behaviour.\nfunc (q *SearchSortField) Missing(missing string) *SearchSortField {\n\tq.options[\"missing\"] = missing\n\treturn q\n}\n\n\/\/ Descending specifies the ordering of the results.\nfunc (q *SearchSortField) Descending(descending bool) *SearchSortField {\n\tq.options[\"descending\"] = descending\n\treturn q\n}\n\n\/\/ SearchSortGeoDistance represents a FTS geo sort.\ntype SearchSortGeoDistance struct {\n\tftsSortBase\n}\n\n\/\/ NewSearchSortGeoDistance creates a new SearchSortGeoDistance.\nfunc NewSearchSortGeoDistance(field string, lat, lon float64) *SearchSortGeoDistance {\n\tq := &SearchSortGeoDistance{newFtsSortBase()}\n\tq.options[\"by\"] = \"geo_distance\"\n\tq.options[\"field\"] = field\n\tq.options[\"location\"] = []float64{lon, lat}\n\treturn q\n}\n\n\/\/ Unit specifies the unit used for sorting\nfunc (q *SearchSortGeoDistance) Unit(unit string) *SearchSortGeoDistance {\n\tq.options[\"unit\"] = unit\n\treturn q\n}\n\n\/\/ Descending specifies the ordering of the results.\nfunc (q *SearchSortGeoDistance) Descending(descending bool) *SearchSortGeoDistance {\n\tq.options[\"descending\"] = descending\n\treturn q\n}\n<commit_msg>GOCB-235: Change Search query sorting order from descending to desc<commit_after>package cbft\n\nimport (\n\t\"encoding\/json\"\n)\n\n\/\/ FtsSort represents an FTS sorting for a search query.\ntype FtsSort interface {\n}\n\ntype ftsSortBase struct {\n\toptions map[string]interface{}\n}\n\nfunc newFtsSortBase() ftsSortBase {\n\treturn ftsSortBase{\n\t\toptions: make(map[string]interface{}),\n\t}\n}\n\n\/\/ MarshalJSON marshal's this query to JSON for the FTS REST API.\nfunc (q ftsSortBase) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(q.options)\n}\n\n\/\/ SearchSortScore represents a FTS score sort.\ntype SearchSortScore struct {\n\tftsSortBase\n}\n\n\/\/ NewSearchSortScore creates a new SearchSortScore.\nfunc NewSearchSortScore() *SearchSortScore {\n\tq := &SearchSortScore{newFtsSortBase()}\n\tq.options[\"by\"] = \"score\"\n\treturn q\n}\n\n\/\/ Descending specifies the ordering of the results.\nfunc (q *SearchSortScore) Descending(descending bool) *SearchSortScore {\n\tq.options[\"desc\"] = descending\n\treturn q\n}\n\n\/\/ SearchSortId represents a FTS Document ID sort.\ntype SearchSortId struct {\n\tftsSortBase\n}\n\n\/\/ NewSearchSortId creates a new SearchSortScore.\nfunc NewSearchSortId() *SearchSortId {\n\tq := &SearchSortId{newFtsSortBase()}\n\tq.options[\"by\"] = \"id\"\n\treturn q\n}\n\n\/\/ Descending specifies the ordering of the results.\nfunc (q *SearchSortId) Descending(descending bool) *SearchSortId {\n\tq.options[\"desc\"] = descending\n\treturn q\n}\n\n\/\/ SearchSortField represents a FTS field sort.\ntype SearchSortField struct {\n\tftsSortBase\n}\n\n\/\/ NewSearchSortField creates a new SearchSortField.\nfunc NewSearchSortField(field string) *SearchSortField {\n\tq := &SearchSortField{newFtsSortBase()}\n\tq.options[\"by\"] = \"field\"\n\tq.options[\"field\"] = field\n\treturn q\n}\n\n\/\/ Type allows you to specify the FTS field sort type.\nfunc (q *SearchSortField) Type(value string) *SearchSortField {\n\tq.options[\"type\"] = value\n\treturn q\n}\n\n\/\/ Mode allows you to specify the FTS field sort mode.\nfunc (q *SearchSortField) Mode(mode string) *SearchSortField {\n\tq.options[\"mode\"] = mode\n\treturn q\n}\n\n\/\/ Missing allows you to specify the FTS field sort missing behaviour.\nfunc (q *SearchSortField) Missing(missing string) *SearchSortField {\n\tq.options[\"missing\"] = missing\n\treturn q\n}\n\n\/\/ Descending specifies the ordering of the results.\nfunc (q *SearchSortField) Descending(descending bool) *SearchSortField {\n\tq.options[\"desc\"] = descending\n\treturn q\n}\n\n\/\/ SearchSortGeoDistance represents a FTS geo sort.\ntype SearchSortGeoDistance struct {\n\tftsSortBase\n}\n\n\/\/ NewSearchSortGeoDistance creates a new SearchSortGeoDistance.\nfunc NewSearchSortGeoDistance(field string, lat, lon float64) *SearchSortGeoDistance {\n\tq := &SearchSortGeoDistance{newFtsSortBase()}\n\tq.options[\"by\"] = \"geo_distance\"\n\tq.options[\"field\"] = field\n\tq.options[\"location\"] = []float64{lon, lat}\n\treturn q\n}\n\n\/\/ Unit specifies the unit used for sorting\nfunc (q *SearchSortGeoDistance) Unit(unit string) *SearchSortGeoDistance {\n\tq.options[\"unit\"] = unit\n\treturn q\n}\n\n\/\/ Descending specifies the ordering of the results.\nfunc (q *SearchSortGeoDistance) Descending(descending bool) *SearchSortGeoDistance {\n\tq.options[\"desc\"] = descending\n\treturn q\n}\n<|endoftext|>"} {"text":"<commit_before>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strconv\"\n\n\t\"github.com\/hashicorp\/terraform\/flatmap\"\n\t\"github.com\/hashicorp\/terraform\/helper\/config\"\n\t\"github.com\/hashicorp\/terraform\/helper\/diff\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\"github.com\/mitchellh\/goamz\/elb\"\n)\n\nfunc resource_aws_elb_create(\n\ts *terraform.InstanceState,\n\td *terraform.InstanceDiff,\n\tmeta interface{}) (*terraform.InstanceState, error) {\n\tp := meta.(*ResourceProvider)\n\telbconn := p.elbconn\n\n\t\/\/ Merge the diff into the state so that we have all the attributes\n\t\/\/ properly.\n\trs := s.MergeDiff(d)\n\n\t\/\/ The name specified for the ELB. This is also our unique ID\n\t\/\/ we save to state if the creation is successful (amazon verifies\n\t\/\/ it is unique)\n\telbName := rs.Attributes[\"name\"]\n\n\t\/\/ Expand the \"listener\" array to goamz compat []elb.Listener\n\tv := flatmap.Expand(rs.Attributes, \"listener\").([]interface{})\n\tlisteners, err := expandListeners(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Provision the elb\n\telbOpts := &elb.CreateLoadBalancer{\n\t\tLoadBalancerName: elbName,\n\t\tListeners: listeners,\n\t}\n\n\tif rs.Attributes[\"internal\"] == \"true\" {\n\t\telbOpts.Internal = true\n\t}\n\n\tif _, ok := rs.Attributes[\"availability_zones.#\"]; ok {\n\t\tv = flatmap.Expand(rs.Attributes, \"availability_zones\").([]interface{})\n\t\telbOpts.AvailZone = expandStringList(v)\n\t}\n\n\tif _, ok := rs.Attributes[\"security_groups.#\"]; ok {\n\t\tv = flatmap.Expand(rs.Attributes, \"security_groups\").([]interface{})\n\t\telbOpts.SecurityGroups = expandStringList(v)\n\t}\n\n\tif _, ok := rs.Attributes[\"subnets.#\"]; ok {\n\t\tv = flatmap.Expand(rs.Attributes, \"subnets\").([]interface{})\n\t\telbOpts.Subnets = expandStringList(v)\n\t}\n\n\tlog.Printf(\"[DEBUG] ELB create configuration: %#v\", elbOpts)\n\n\t_, err = elbconn.CreateLoadBalancer(elbOpts)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating ELB: %s\", err)\n\t}\n\n\t\/\/ Assign the elb's unique identifier for use later\n\trs.ID = elbName\n\tlog.Printf(\"[INFO] ELB ID: %s\", elbName)\n\n\tif _, ok := rs.Attributes[\"instances.#\"]; ok {\n\t\t\/\/ If we have any instances, we need to register them\n\t\tv = flatmap.Expand(rs.Attributes, \"instances\").([]interface{})\n\t\tinstances := expandStringList(v)\n\n\t\tif len(instances) > 0 {\n\t\t\tregisterInstancesOpts := elb.RegisterInstancesWithLoadBalancer{\n\t\t\t\tLoadBalancerName: elbName,\n\t\t\t\tInstances: instances,\n\t\t\t}\n\n\t\t\t_, err := elbconn.RegisterInstancesWithLoadBalancer(®isterInstancesOpts)\n\n\t\t\tif err != nil {\n\t\t\t\treturn rs, fmt.Errorf(\"Failure registering instances: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\tif _, ok := rs.Attributes[\"health_check.#\"]; ok {\n\t\tv := flatmap.Expand(rs.Attributes, \"health_check\").([]interface{})\n\t\thealth_check := v[0].(map[string]interface{})\n\t\thealthyThreshold, err := strconv.ParseInt(health_check[\"healthy_threshold\"].(string), 0, 0)\n\t\tunhealthyThreshold, err := strconv.ParseInt(health_check[\"unhealthy_threshold\"].(string), 0, 0)\n\t\tinterval, err := strconv.ParseInt(health_check[\"interval\"].(string), 0, 0)\n\t\ttimeout, err := strconv.ParseInt(health_check[\"timeout\"].(string), 0, 0)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tconfigureHealthCheckOpts := elb.ConfigureHealthCheck{\n\t\t\tLoadBalancerName: elbName,\n\t\t\tCheck: elb.HealthCheck{\n\t\t\t\tHealthyThreshold: healthyThreshold,\n\t\t\t\tUnhealthyThreshold: unhealthyThreshold,\n\t\t\t\tInterval: interval,\n\t\t\t\tTarget: health_check[\"target\"].(string),\n\t\t\t\tTimeout: timeout,\n\t\t\t},\n\t\t}\n\n\t\t_, err = elbconn.ConfigureHealthCheck(&configureHealthCheckOpts)\n\t\tif err != nil {\n\t\t\treturn rs, fmt.Errorf(\"Failure configuring health check: %s\", err)\n\t\t}\n\t}\n\n\tloadBalancer, err := resource_aws_elb_retrieve_balancer(rs.ID, elbconn)\n\tif err != nil {\n\t\treturn rs, err\n\t}\n\n\treturn resource_aws_elb_update_state(rs, loadBalancer)\n}\n\nfunc resource_aws_elb_update(\n\ts *terraform.InstanceState,\n\td *terraform.InstanceDiff,\n\tmeta interface{}) (*terraform.InstanceState, error) {\n\tp := meta.(*ResourceProvider)\n\telbconn := p.elbconn\n\n\trs := s.MergeDiff(d)\n\n\t\/\/ If we currently have instances, or did have instances,\n\t\/\/ we want to figure out what to add and remove from the load\n\t\/\/ balancer\n\tif attr, ok := d.Attributes[\"instances.#\"]; ok && attr.Old != \"\" {\n\t\t\/\/ The new state of instances merged with the diff\n\t\tmergedInstances := expandStringList(flatmap.Expand(\n\t\t\trs.Attributes, \"instances\").([]interface{}))\n\n\t\t\/\/ The state before the diff merge\n\t\tpreviousInstances := expandStringList(flatmap.Expand(\n\t\t\ts.Attributes, \"instances\").([]interface{}))\n\n\t\t\/\/ keep track of what instances we are removing, and which\n\t\t\/\/ we are adding\n\t\tvar toRemove []string\n\t\tvar toAdd []string\n\n\t\tfor _, instanceId := range mergedInstances {\n\t\t\tfor _, prevId := range previousInstances {\n\t\t\t\t\/\/ If the merged instance ID existed\n\t\t\t\t\/\/ previously, we don't have to do anything\n\t\t\t\tif instanceId == prevId {\n\t\t\t\t\tcontinue\n\t\t\t\t\t\/\/ Otherwise, we need to add it to the load balancer\n\t\t\t\t} else {\n\t\t\t\t\ttoAdd = append(toAdd, instanceId)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor i, instanceId := range toAdd {\n\t\t\tfor _, prevId := range previousInstances {\n\t\t\t\t\/\/ If the instance ID we are adding existed\n\t\t\t\t\/\/ previously, we want to not add it, but rather remove\n\t\t\t\t\/\/ it\n\t\t\t\tif instanceId == prevId {\n\t\t\t\t\ttoRemove = append(toRemove, instanceId)\n\t\t\t\t\ttoAdd = append(toAdd[:i], toAdd[i+1:]...)\n\t\t\t\t\t\/\/ Otherwise, we continue adding it to the ELB\n\t\t\t\t} else {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(toAdd) > 0 {\n\t\t\tregisterInstancesOpts := elb.RegisterInstancesWithLoadBalancer{\n\t\t\t\tLoadBalancerName: rs.ID,\n\t\t\t\tInstances: toAdd,\n\t\t\t}\n\n\t\t\t_, err := elbconn.RegisterInstancesWithLoadBalancer(®isterInstancesOpts)\n\n\t\t\tif err != nil {\n\t\t\t\treturn s, fmt.Errorf(\"Failure registering instances: %s\", err)\n\t\t\t}\n\t\t}\n\n\t\tif len(toRemove) > 0 {\n\t\t\tdeRegisterInstancesOpts := elb.DeregisterInstancesFromLoadBalancer{\n\t\t\t\tLoadBalancerName: rs.ID,\n\t\t\t\tInstances: toRemove,\n\t\t\t}\n\n\t\t\t_, err := elbconn.DeregisterInstancesFromLoadBalancer(&deRegisterInstancesOpts)\n\n\t\t\tif err != nil {\n\t\t\t\treturn s, fmt.Errorf(\"Failure deregistering instances: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\tloadBalancer, err := resource_aws_elb_retrieve_balancer(rs.ID, elbconn)\n\n\tif err != nil {\n\t\treturn s, err\n\t}\n\n\treturn resource_aws_elb_update_state(rs, loadBalancer)\n}\n\nfunc resource_aws_elb_destroy(\n\ts *terraform.InstanceState,\n\tmeta interface{}) error {\n\tp := meta.(*ResourceProvider)\n\telbconn := p.elbconn\n\n\tlog.Printf(\"[INFO] Deleting ELB: %s\", s.ID)\n\n\t\/\/ Destroy the load balancer\n\tdeleteElbOpts := elb.DeleteLoadBalancer{\n\t\tLoadBalancerName: s.ID,\n\t}\n\t_, err := elbconn.DeleteLoadBalancer(&deleteElbOpts)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error deleting ELB: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc resource_aws_elb_refresh(\n\ts *terraform.InstanceState,\n\tmeta interface{}) (*terraform.InstanceState, error) {\n\tp := meta.(*ResourceProvider)\n\telbconn := p.elbconn\n\n\tloadBalancer, err := resource_aws_elb_retrieve_balancer(s.ID, elbconn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resource_aws_elb_update_state(s, loadBalancer)\n}\n\nfunc resource_aws_elb_diff(\n\ts *terraform.InstanceState,\n\tc *terraform.ResourceConfig,\n\tmeta interface{}) (*terraform.InstanceDiff, error) {\n\n\tb := &diff.ResourceBuilder{\n\t\tAttrs: map[string]diff.AttrType{\n\t\t\t\"name\": diff.AttrTypeCreate,\n\t\t\t\"availability_zone\": diff.AttrTypeCreate,\n\t\t\t\"security_groups\": diff.AttrTypeCreate, \/\/ TODO could be AttrTypeUpdate\n\t\t\t\"subnets\": diff.AttrTypeCreate, \/\/ TODO could be AttrTypeUpdate\n\t\t\t\"listener\": diff.AttrTypeCreate,\n\t\t\t\"instances\": diff.AttrTypeUpdate,\n\t\t\t\"health_check\": diff.AttrTypeCreate,\n\t\t\t\"internal\": diff.AttrTypeCreate,\n\t\t},\n\n\t\tComputedAttrs: []string{\n\t\t\t\"dns_name\",\n\t\t},\n\t}\n\n\treturn b.Diff(s, c)\n}\n\nfunc resource_aws_elb_update_state(\n\ts *terraform.InstanceState,\n\tbalancer *elb.LoadBalancer) (*terraform.InstanceState, error) {\n\n\ts.Attributes[\"name\"] = balancer.LoadBalancerName\n\ts.Attributes[\"dns_name\"] = balancer.DNSName\n\n\t\/\/ Flatten our group values\n\ttoFlatten := make(map[string]interface{})\n\n\tif len(balancer.Instances) > 0 && balancer.Instances[0].InstanceId != \"\" {\n\t\ttoFlatten[\"instances\"] = flattenInstances(balancer.Instances)\n\t}\n\n\tif len(balancer.SecurityGroups) > 0 && balancer.SecurityGroups[0] != \"\" {\n\t\ttoFlatten[\"security_groups\"] = balancer.SecurityGroups\n\t}\n\n\tif len(balancer.Subnets) > 0 && balancer.Subnets[0] != \"\" {\n\t\ttoFlatten[\"subnets\"] = balancer.Subnets\n\t}\n\n\t\/\/ There's only one health check, so save that to state as we\n\t\/\/ currently can\n\tif balancer.HealthCheck.Target != \"\" {\n\t\ttoFlatten[\"health_check\"] = flattenHealthCheck(balancer.HealthCheck)\n\t}\n\n\tfor k, v := range flatmap.Flatten(toFlatten) {\n\t\ts.Attributes[k] = v\n\t}\n\n\treturn s, nil\n}\n\n\/\/ retrieves an ELB by its ID\nfunc resource_aws_elb_retrieve_balancer(id string, elbconn *elb.ELB) (*elb.LoadBalancer, error) {\n\tdescribeElbOpts := &elb.DescribeLoadBalancer{\n\t\tNames: []string{id},\n\t}\n\n\t\/\/ Retrieve the ELB properties for updating the state\n\tdescribeResp, err := elbconn.DescribeLoadBalancers(describeElbOpts)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error retrieving ELB: %s\", err)\n\t}\n\n\tloadBalancer := describeResp.LoadBalancers[0]\n\n\t\/\/ Verify AWS returned our ELB\n\tif len(describeResp.LoadBalancers) != 1 ||\n\t\tdescribeResp.LoadBalancers[0].LoadBalancerName != id {\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Unable to find ELB: %#v\", describeResp.LoadBalancers)\n\t\t}\n\t}\n\n\treturn &loadBalancer, nil\n}\n\nfunc resource_aws_elb_validation() *config.Validator {\n\treturn &config.Validator{\n\t\tRequired: []string{\n\t\t\t\"name\",\n\t\t\t\"listener.*\",\n\t\t\t\"listener.*.instance_port\",\n\t\t\t\"listener.*.instance_protocol\",\n\t\t\t\"listener.*.lb_port\",\n\t\t\t\"listener.*.lb_protocol\",\n\t\t},\n\t\tOptional: []string{\n\t\t\t\"instances.*\",\n\t\t\t\"internal\",\n\t\t\t\"availability_zones.*\",\n\t\t\t\"security_groups.*\",\n\t\t\t\"subnets.*\",\n\t\t\t\"health_check.#\",\n\t\t\t\"health_check.0.healthy_threshold\",\n\t\t\t\"health_check.0.unhealthy_threshold\",\n\t\t\t\"health_check.0.interval\",\n\t\t\t\"health_check.0.target\",\n\t\t\t\"health_check.0.timeout\",\n\t\t},\n\t}\n}\n<commit_msg>providers\/aws: read internal for ELB<commit_after>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strconv\"\n\n\t\"github.com\/hashicorp\/terraform\/flatmap\"\n\t\"github.com\/hashicorp\/terraform\/helper\/config\"\n\t\"github.com\/hashicorp\/terraform\/helper\/diff\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\"github.com\/mitchellh\/goamz\/elb\"\n)\n\nfunc resource_aws_elb_create(\n\ts *terraform.InstanceState,\n\td *terraform.InstanceDiff,\n\tmeta interface{}) (*terraform.InstanceState, error) {\n\tp := meta.(*ResourceProvider)\n\telbconn := p.elbconn\n\n\t\/\/ Merge the diff into the state so that we have all the attributes\n\t\/\/ properly.\n\trs := s.MergeDiff(d)\n\n\t\/\/ The name specified for the ELB. This is also our unique ID\n\t\/\/ we save to state if the creation is successful (amazon verifies\n\t\/\/ it is unique)\n\telbName := rs.Attributes[\"name\"]\n\n\t\/\/ Expand the \"listener\" array to goamz compat []elb.Listener\n\tv := flatmap.Expand(rs.Attributes, \"listener\").([]interface{})\n\tlisteners, err := expandListeners(v)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Provision the elb\n\telbOpts := &elb.CreateLoadBalancer{\n\t\tLoadBalancerName: elbName,\n\t\tListeners: listeners,\n\t}\n\n\tif rs.Attributes[\"internal\"] == \"true\" {\n\t\telbOpts.Internal = true\n\t}\n\n\tif _, ok := rs.Attributes[\"availability_zones.#\"]; ok {\n\t\tv = flatmap.Expand(rs.Attributes, \"availability_zones\").([]interface{})\n\t\telbOpts.AvailZone = expandStringList(v)\n\t}\n\n\tif _, ok := rs.Attributes[\"security_groups.#\"]; ok {\n\t\tv = flatmap.Expand(rs.Attributes, \"security_groups\").([]interface{})\n\t\telbOpts.SecurityGroups = expandStringList(v)\n\t}\n\n\tif _, ok := rs.Attributes[\"subnets.#\"]; ok {\n\t\tv = flatmap.Expand(rs.Attributes, \"subnets\").([]interface{})\n\t\telbOpts.Subnets = expandStringList(v)\n\t}\n\n\tlog.Printf(\"[DEBUG] ELB create configuration: %#v\", elbOpts)\n\n\t_, err = elbconn.CreateLoadBalancer(elbOpts)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating ELB: %s\", err)\n\t}\n\n\t\/\/ Assign the elb's unique identifier for use later\n\trs.ID = elbName\n\tlog.Printf(\"[INFO] ELB ID: %s\", elbName)\n\n\tif _, ok := rs.Attributes[\"instances.#\"]; ok {\n\t\t\/\/ If we have any instances, we need to register them\n\t\tv = flatmap.Expand(rs.Attributes, \"instances\").([]interface{})\n\t\tinstances := expandStringList(v)\n\n\t\tif len(instances) > 0 {\n\t\t\tregisterInstancesOpts := elb.RegisterInstancesWithLoadBalancer{\n\t\t\t\tLoadBalancerName: elbName,\n\t\t\t\tInstances: instances,\n\t\t\t}\n\n\t\t\t_, err := elbconn.RegisterInstancesWithLoadBalancer(®isterInstancesOpts)\n\n\t\t\tif err != nil {\n\t\t\t\treturn rs, fmt.Errorf(\"Failure registering instances: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\tif _, ok := rs.Attributes[\"health_check.#\"]; ok {\n\t\tv := flatmap.Expand(rs.Attributes, \"health_check\").([]interface{})\n\t\thealth_check := v[0].(map[string]interface{})\n\t\thealthyThreshold, err := strconv.ParseInt(health_check[\"healthy_threshold\"].(string), 0, 0)\n\t\tunhealthyThreshold, err := strconv.ParseInt(health_check[\"unhealthy_threshold\"].(string), 0, 0)\n\t\tinterval, err := strconv.ParseInt(health_check[\"interval\"].(string), 0, 0)\n\t\ttimeout, err := strconv.ParseInt(health_check[\"timeout\"].(string), 0, 0)\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tconfigureHealthCheckOpts := elb.ConfigureHealthCheck{\n\t\t\tLoadBalancerName: elbName,\n\t\t\tCheck: elb.HealthCheck{\n\t\t\t\tHealthyThreshold: healthyThreshold,\n\t\t\t\tUnhealthyThreshold: unhealthyThreshold,\n\t\t\t\tInterval: interval,\n\t\t\t\tTarget: health_check[\"target\"].(string),\n\t\t\t\tTimeout: timeout,\n\t\t\t},\n\t\t}\n\n\t\t_, err = elbconn.ConfigureHealthCheck(&configureHealthCheckOpts)\n\t\tif err != nil {\n\t\t\treturn rs, fmt.Errorf(\"Failure configuring health check: %s\", err)\n\t\t}\n\t}\n\n\tloadBalancer, err := resource_aws_elb_retrieve_balancer(rs.ID, elbconn)\n\tif err != nil {\n\t\treturn rs, err\n\t}\n\n\treturn resource_aws_elb_update_state(rs, loadBalancer)\n}\n\nfunc resource_aws_elb_update(\n\ts *terraform.InstanceState,\n\td *terraform.InstanceDiff,\n\tmeta interface{}) (*terraform.InstanceState, error) {\n\tp := meta.(*ResourceProvider)\n\telbconn := p.elbconn\n\n\trs := s.MergeDiff(d)\n\n\t\/\/ If we currently have instances, or did have instances,\n\t\/\/ we want to figure out what to add and remove from the load\n\t\/\/ balancer\n\tif attr, ok := d.Attributes[\"instances.#\"]; ok && attr.Old != \"\" {\n\t\t\/\/ The new state of instances merged with the diff\n\t\tmergedInstances := expandStringList(flatmap.Expand(\n\t\t\trs.Attributes, \"instances\").([]interface{}))\n\n\t\t\/\/ The state before the diff merge\n\t\tpreviousInstances := expandStringList(flatmap.Expand(\n\t\t\ts.Attributes, \"instances\").([]interface{}))\n\n\t\t\/\/ keep track of what instances we are removing, and which\n\t\t\/\/ we are adding\n\t\tvar toRemove []string\n\t\tvar toAdd []string\n\n\t\tfor _, instanceId := range mergedInstances {\n\t\t\tfor _, prevId := range previousInstances {\n\t\t\t\t\/\/ If the merged instance ID existed\n\t\t\t\t\/\/ previously, we don't have to do anything\n\t\t\t\tif instanceId == prevId {\n\t\t\t\t\tcontinue\n\t\t\t\t\t\/\/ Otherwise, we need to add it to the load balancer\n\t\t\t\t} else {\n\t\t\t\t\ttoAdd = append(toAdd, instanceId)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor i, instanceId := range toAdd {\n\t\t\tfor _, prevId := range previousInstances {\n\t\t\t\t\/\/ If the instance ID we are adding existed\n\t\t\t\t\/\/ previously, we want to not add it, but rather remove\n\t\t\t\t\/\/ it\n\t\t\t\tif instanceId == prevId {\n\t\t\t\t\ttoRemove = append(toRemove, instanceId)\n\t\t\t\t\ttoAdd = append(toAdd[:i], toAdd[i+1:]...)\n\t\t\t\t\t\/\/ Otherwise, we continue adding it to the ELB\n\t\t\t\t} else {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(toAdd) > 0 {\n\t\t\tregisterInstancesOpts := elb.RegisterInstancesWithLoadBalancer{\n\t\t\t\tLoadBalancerName: rs.ID,\n\t\t\t\tInstances: toAdd,\n\t\t\t}\n\n\t\t\t_, err := elbconn.RegisterInstancesWithLoadBalancer(®isterInstancesOpts)\n\n\t\t\tif err != nil {\n\t\t\t\treturn s, fmt.Errorf(\"Failure registering instances: %s\", err)\n\t\t\t}\n\t\t}\n\n\t\tif len(toRemove) > 0 {\n\t\t\tdeRegisterInstancesOpts := elb.DeregisterInstancesFromLoadBalancer{\n\t\t\t\tLoadBalancerName: rs.ID,\n\t\t\t\tInstances: toRemove,\n\t\t\t}\n\n\t\t\t_, err := elbconn.DeregisterInstancesFromLoadBalancer(&deRegisterInstancesOpts)\n\n\t\t\tif err != nil {\n\t\t\t\treturn s, fmt.Errorf(\"Failure deregistering instances: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\tloadBalancer, err := resource_aws_elb_retrieve_balancer(rs.ID, elbconn)\n\n\tif err != nil {\n\t\treturn s, err\n\t}\n\n\treturn resource_aws_elb_update_state(rs, loadBalancer)\n}\n\nfunc resource_aws_elb_destroy(\n\ts *terraform.InstanceState,\n\tmeta interface{}) error {\n\tp := meta.(*ResourceProvider)\n\telbconn := p.elbconn\n\n\tlog.Printf(\"[INFO] Deleting ELB: %s\", s.ID)\n\n\t\/\/ Destroy the load balancer\n\tdeleteElbOpts := elb.DeleteLoadBalancer{\n\t\tLoadBalancerName: s.ID,\n\t}\n\t_, err := elbconn.DeleteLoadBalancer(&deleteElbOpts)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error deleting ELB: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc resource_aws_elb_refresh(\n\ts *terraform.InstanceState,\n\tmeta interface{}) (*terraform.InstanceState, error) {\n\tp := meta.(*ResourceProvider)\n\telbconn := p.elbconn\n\n\tloadBalancer, err := resource_aws_elb_retrieve_balancer(s.ID, elbconn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resource_aws_elb_update_state(s, loadBalancer)\n}\n\nfunc resource_aws_elb_diff(\n\ts *terraform.InstanceState,\n\tc *terraform.ResourceConfig,\n\tmeta interface{}) (*terraform.InstanceDiff, error) {\n\n\tb := &diff.ResourceBuilder{\n\t\tAttrs: map[string]diff.AttrType{\n\t\t\t\"name\": diff.AttrTypeCreate,\n\t\t\t\"availability_zone\": diff.AttrTypeCreate,\n\t\t\t\"security_groups\": diff.AttrTypeCreate, \/\/ TODO could be AttrTypeUpdate\n\t\t\t\"subnets\": diff.AttrTypeCreate, \/\/ TODO could be AttrTypeUpdate\n\t\t\t\"listener\": diff.AttrTypeCreate,\n\t\t\t\"instances\": diff.AttrTypeUpdate,\n\t\t\t\"health_check\": diff.AttrTypeCreate,\n\t\t\t\"internal\": diff.AttrTypeCreate,\n\t\t},\n\n\t\tComputedAttrs: []string{\n\t\t\t\"dns_name\",\n\t\t},\n\t}\n\n\treturn b.Diff(s, c)\n}\n\nfunc resource_aws_elb_update_state(\n\ts *terraform.InstanceState,\n\tbalancer *elb.LoadBalancer) (*terraform.InstanceState, error) {\n\n\ts.Attributes[\"name\"] = balancer.LoadBalancerName\n\ts.Attributes[\"dns_name\"] = balancer.DNSName\n\n\tif balancer.Scheme == \"internal\" {\n\t\ts.Attributes[\"internal\"] = \"true\"\n\t}\n\n\t\/\/ Flatten our group values\n\ttoFlatten := make(map[string]interface{})\n\n\tif len(balancer.Instances) > 0 && balancer.Instances[0].InstanceId != \"\" {\n\t\ttoFlatten[\"instances\"] = flattenInstances(balancer.Instances)\n\t}\n\n\tif len(balancer.SecurityGroups) > 0 && balancer.SecurityGroups[0] != \"\" {\n\t\ttoFlatten[\"security_groups\"] = balancer.SecurityGroups\n\t}\n\n\tif len(balancer.Subnets) > 0 && balancer.Subnets[0] != \"\" {\n\t\ttoFlatten[\"subnets\"] = balancer.Subnets\n\t}\n\n\t\/\/ There's only one health check, so save that to state as we\n\t\/\/ currently can\n\tif balancer.HealthCheck.Target != \"\" {\n\t\ttoFlatten[\"health_check\"] = flattenHealthCheck(balancer.HealthCheck)\n\t}\n\n\tfor k, v := range flatmap.Flatten(toFlatten) {\n\t\ts.Attributes[k] = v\n\t}\n\n\treturn s, nil\n}\n\n\/\/ retrieves an ELB by its ID\nfunc resource_aws_elb_retrieve_balancer(id string, elbconn *elb.ELB) (*elb.LoadBalancer, error) {\n\tdescribeElbOpts := &elb.DescribeLoadBalancer{\n\t\tNames: []string{id},\n\t}\n\n\t\/\/ Retrieve the ELB properties for updating the state\n\tdescribeResp, err := elbconn.DescribeLoadBalancers(describeElbOpts)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error retrieving ELB: %s\", err)\n\t}\n\n\tloadBalancer := describeResp.LoadBalancers[0]\n\n\t\/\/ Verify AWS returned our ELB\n\tif len(describeResp.LoadBalancers) != 1 ||\n\t\tdescribeResp.LoadBalancers[0].LoadBalancerName != id {\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Unable to find ELB: %#v\", describeResp.LoadBalancers)\n\t\t}\n\t}\n\n\treturn &loadBalancer, nil\n}\n\nfunc resource_aws_elb_validation() *config.Validator {\n\treturn &config.Validator{\n\t\tRequired: []string{\n\t\t\t\"name\",\n\t\t\t\"listener.*\",\n\t\t\t\"listener.*.instance_port\",\n\t\t\t\"listener.*.instance_protocol\",\n\t\t\t\"listener.*.lb_port\",\n\t\t\t\"listener.*.lb_protocol\",\n\t\t},\n\t\tOptional: []string{\n\t\t\t\"instances.*\",\n\t\t\t\"internal\",\n\t\t\t\"availability_zones.*\",\n\t\t\t\"security_groups.*\",\n\t\t\t\"subnets.*\",\n\t\t\t\"health_check.#\",\n\t\t\t\"health_check.0.healthy_threshold\",\n\t\t\t\"health_check.0.unhealthy_threshold\",\n\t\t\t\"health_check.0.interval\",\n\t\t\t\"health_check.0.target\",\n\t\t\t\"health_check.0.timeout\",\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package codegen\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"goa.design\/goa\/codegen\"\n\thttpdesign \"goa.design\/goa\/http\/design\"\n)\n\n\/\/ ExampleServerFiles returns and example main and dummy service\n\/\/ implementations.\nfunc ExampleServerFiles(genpkg string, root *httpdesign.RootExpr) []*codegen.File {\n\tfw := make([]*codegen.File, len(root.HTTPServices)+1)\n\tfor i, svc := range root.HTTPServices {\n\t\tfw[i] = dummyServiceFile(genpkg, root, svc)\n\t}\n\tfw[len(root.HTTPServices)] = exampleMain(genpkg, root)\n\treturn fw\n}\n\n\/\/ dummyServiceFile returns a dummy implementation of the given service.\nfunc dummyServiceFile(genpkg string, root *httpdesign.RootExpr, svc *httpdesign.ServiceExpr) *codegen.File {\n\tdata := HTTPServices.Get(svc.Name())\n\tapiPkg := strings.ToLower(codegen.Goify(root.Design.API.Name, false))\n\tsections := []*codegen.SectionTemplate{\n\t\tcodegen.Header(\"\", apiPkg, []*codegen.ImportSpec{\n\t\t\t{Path: \"context\"},\n\t\t\t{Path: \"log\"},\n\t\t\t{Path: genpkg + \"\/\" + codegen.Goify(svc.Name(), false)},\n\t\t}),\n\t\t{\n\t\t\tName: \"dummy-service\",\n\t\t\tSource: dummyServiceStructT,\n\t\t\tData: data,\n\t\t},\n\t}\n\tfor _, e := range data.Endpoints {\n\t\tsections = append(sections, &codegen.SectionTemplate{\n\t\t\tName: \"dummy-endpoint\",\n\t\t\tSource: dummyEndpointImplT,\n\t\t\tData: e,\n\t\t})\n\t}\n\n\treturn &codegen.File{\n\t\tPath: codegen.SnakeCase(svc.Name()) + \".go\",\n\t\tSectionTemplates: sections,\n\t}\n}\n\nfunc exampleMain(genpkg string, root *httpdesign.RootExpr) *codegen.File {\n\tidx := strings.LastIndex(genpkg, string(os.PathSeparator))\n\trootPath := \".\"\n\tif idx > 0 {\n\t\trootPath = genpkg[:idx]\n\t}\n\tapiPkg := strings.ToLower(codegen.Goify(root.Design.API.Name, false))\n\tspecs := []*codegen.ImportSpec{\n\t\t{Path: \"context\"},\n\t\t{Path: \"flag\"},\n\t\t{Path: \"fmt\"},\n\t\t{Path: \"log\"},\n\t\t{Path: \"net\/http\"},\n\t\t{Path: \"os\"},\n\t\t{Path: \"os\/signal\"},\n\t\t{Path: \"time\"},\n\t\t{Path: \"goa.design\/goa\", Name: \"goa\"},\n\t\t{Path: \"goa.design\/goa\/http\", Name: \"goahttp\"},\n\t\t{Path: \"goa.design\/goa\/http\/middleware\/debugging\"},\n\t\t{Path: \"goa.design\/goa\/http\/middleware\/logging\"},\n\t\t{Path: rootPath, Name: apiPkg},\n\t}\n\tfor _, svc := range root.HTTPServices {\n\t\tpkgName := HTTPServices.Get(svc.Name()).Service.PkgName\n\t\tspecs = append(specs, &codegen.ImportSpec{\n\t\t\tPath: filepath.Join(genpkg, \"http\", pkgName, \"server\"),\n\t\t\tName: pkgName + \"svr\",\n\t\t})\n\t\tspecs = append(specs, &codegen.ImportSpec{\n\t\t\tPath: filepath.Join(genpkg, pkgName),\n\t\t})\n\t}\n\tsections := []*codegen.SectionTemplate{codegen.Header(\"\", \"main\", specs)}\n\tvar svcdata []*ServiceData\n\tfor _, svc := range root.HTTPServices {\n\t\tsvcdata = append(svcdata, HTTPServices.Get(svc.Name()))\n\t}\n\tdata := map[string]interface{}{\n\t\t\"Services\": svcdata,\n\t\t\"APIPkg\": apiPkg,\n\t}\n\tsections = append(sections, &codegen.SectionTemplate{\n\t\tName: \"service-main\",\n\t\tSource: mainT,\n\t\tData: data,\n\t})\n\tpath := filepath.Join(\"cmd\", codegen.SnakeCase(root.Design.API.Name)+\"svc\", \"main.go\")\n\n\treturn &codegen.File{Path: path, SectionTemplates: sections}\n}\n\n\/\/ input: ServiceData\nconst dummyServiceStructT = `{{ printf \"%s service example implementation.\\nThe example methods log the requests and return zero values.\" .Service.Name | comment }}\ntype {{ .Service.PkgName }}Svc struct {\n\tlogger *log.Logger\n}\n\n{{ printf \"New%s returns the %s service implementation.\" .Service.VarName .Service.Name | comment }}\nfunc New{{ .Service.VarName }}(logger *log.Logger) {{ .Service.PkgName }}.Service {\n\treturn &{{ .Service.PkgName }}Svc{logger}\n}\n`\n\n\/\/ input: EndpointData\nconst dummyEndpointImplT = `{{ comment .Method.Description }}\nfunc (s *{{ .ServicePkgName }}Svc) {{ .Method.VarName }}(ctx context.Context{{ if .Payload.Ref }}, p {{ .Payload.Ref }}{{ end }}) ({{ if .Result.Ref }}{{ .Result.Ref }}, {{ end }}error) {\n{{- if and .Result.Ref .Result.IsStruct }}\n\tres := &{{ .Result.Name }}{}\n{{- else if .Result.Ref }}\n\tvar res {{ .Result.Ref }}\n{{- end }}\n\ts.logger.Print(\"{{ .ServiceName }}.{{ .Method.Name }}\")\n\treturn {{ if .Result.Ref }}res, {{ end }}nil\n}\n`\n\n\/\/ input: map[string]interface{}{\"Services\":[]ServiceData, \"APIPkg\": string}\nconst mainT = `func main() {\n\t\/\/ Define command line flags, add any other flag required to configure\n\t\/\/ the service.\n\tvar (\n\t\taddr = flag.String(\"listen\", \":8080\", \"HTTP listen ` + \"`\" + `address` + \"`\" + `\")\n\t\tdbg = flag.Bool(\"debug\", false, \"Log request and response bodies\")\n\t)\n\tflag.Parse()\n\n\t\/\/ Setup logger and goa log adapter. Replace logger with your own using\n\t\/\/ your log package of choice. The goa.design\/middleware\/logging\/...\n\t\/\/ packages define log adapters for common log packages.\n\tvar (\n\t\tlogger *log.Logger\n\t\tadapter logging.Adapter\n\t)\n\t{\n\t\tlogger = log.New(os.Stderr, \"[{{ .APIPkg }}] \", log.Ltime)\n\t\tadapter = logging.Adapt(logger)\n\t}\n\n\t\/\/ Create the structs that implement the services.\n\tvar (\n\t{{- range .Services }}\n\t\t{{- if .Endpoints }}\n\t\t{{ .Service.PkgName }}Svc {{.Service.PkgName}}.Service\n\t\t{{- end }}\n\t{{- end }}\n\t)\n\t{\n\t{{- range .Services }}\n\t\t{{- if .Endpoints }}\n\t\t{{ .Service.PkgName }}Svc = {{ $.APIPkg }}.New{{ .Service.VarName }}(logger)\n\t\t{{- end }}\n\t{{- end }}\n\t}\n\n\t\/\/ Wrap the services in endpoints that can be invoked from other\n\t\/\/ services potentially running in different processes.\n\tvar (\n\t{{- range .Services }}\n\t\t{{- if .Endpoints }}\n\t\t{{ .Service.PkgName }}Endpoints *{{.Service.PkgName}}.Endpoints\n\t\t{{- end }}\n\t{{- end }}\n\t)\n\t{\n\t{{- range .Services }}\n\t\t{{- if .Endpoints }}\n\t\t{{ .Service.PkgName }}Endpoints = {{ .Service.PkgName }}.NewEndpoints({{ .Service.PkgName }}Svc)\n\t\t{{- end }}\n\t{{- end }}\n\t}\n\n\t\/\/ Provide the transport specific request decoder and response encoder.\n\t\/\/ The goa http package has built-in support for JSON, XML and gob.\n\t\/\/ Other encodings can be used by providing the corresponding functions,\n\t\/\/ see goa.design\/encoding.\n\tvar (\n\t\tdec = goahttp.RequestDecoder\n\t\tenc = goahttp.ResponseEncoder\n\t)\n\n\t\/\/ Build the service HTTP request multiplexer and configure it to serve\n\t\/\/ HTTP requests to the service endpoints.\n\tvar mux goahttp.Muxer\n\t{\n\t\tmux = goahttp.NewMuxer()\n\t}\n\n\t\/\/ Wrap the endpoints with the transport specific layers. The generated\n\t\/\/ server packages contains code generated from the design which maps\n\t\/\/ the service input and output data structures to HTTP requests and\n\t\/\/ responses.\n\tvar (\n\t{{- range .Services }}\n\t\t{{- if .Endpoints }}\n\t\t{{ .Service.PkgName }}Server *{{.Service.PkgName}}svr.Server\n\t\t{{- end }}\n\t{{- end }}\n\t)\n\t{\n\t{{- range .Services }}\n\t\t{{- if .Endpoints }}\n\t\t{{ .Service.PkgName }}Server = {{ .Service.PkgName }}svr.New({{ .Service.PkgName }}Endpoints, mux, dec, enc)\n\t\t{{- end }}\n\t{{- end }}\n\t}\n\n\t\/\/ Configure the mux.\n\t{{- range .Services }}\n\t{{ .Service.PkgName }}svr.Mount(mux{{ if .Endpoints }}, {{ .Service.PkgName }}Server{{ end }})\n\t{{- end }}\n\n\t\/\/ Wrap the multiplexer with additional middlewares. Middlewares mounted\n\t\/\/ here apply to all the service endpoints.\n\tvar handler http.Handler = mux\n\t{\n\t\tif *dbg {\n\t\t\thandler = debugging.New(mux, adapter)(handler)\n\t\t}\n\t\thandler = logging.New(adapter)(handler)\n\t}\n\n\t\/\/ Create channel used by both the signal handler and server goroutines\n\t\/\/ to notify the main goroutine when to stop the server.\n\terrc := make(chan error)\n\n\t\/\/ Setup interrupt handler. This optional step configures the process so\n\t\/\/ that SIGINT and SIGTERM signals cause the service to stop gracefully.\n\tgo func() {\n\t\tc := make(chan os.Signal, 1)\n\t\tsignal.Notify(c, os.Interrupt)\n\t\terrc <- fmt.Errorf(\"%s\", <-c)\n\t}()\n\n\t\/\/ Start HTTP server using default configuration, change the code to\n\t\/\/ configure the server as required by your service.\n\tsrv := &http.Server{Addr: *addr, Handler: handler}\n\tgo func() {\n\t\tlogger.Printf(\"[INFO] listening on %s\", *addr)\n\t\terrc <- srv.ListenAndServe()\n\t}()\n\n\t\/\/ Wait for signal.\n\tlogger.Printf(\"exiting (%v)\", <-errc)\n\n\t\/\/ Shutdown gracefully with a 30s timeout.\n\tctx, _ := context.WithTimeout(context.Background(), 30*time.Second)\n\tsrv.Shutdown(ctx)\n\n\tlogger.Println(\"exited\")\n}\n`\n<commit_msg>Fix import path for service names with underscores in main<commit_after>package codegen\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"goa.design\/goa\/codegen\"\n\thttpdesign \"goa.design\/goa\/http\/design\"\n)\n\n\/\/ ExampleServerFiles returns and example main and dummy service\n\/\/ implementations.\nfunc ExampleServerFiles(genpkg string, root *httpdesign.RootExpr) []*codegen.File {\n\tfw := make([]*codegen.File, len(root.HTTPServices)+1)\n\tfor i, svc := range root.HTTPServices {\n\t\tfw[i] = dummyServiceFile(genpkg, root, svc)\n\t}\n\tfw[len(root.HTTPServices)] = exampleMain(genpkg, root)\n\treturn fw\n}\n\n\/\/ dummyServiceFile returns a dummy implementation of the given service.\nfunc dummyServiceFile(genpkg string, root *httpdesign.RootExpr, svc *httpdesign.ServiceExpr) *codegen.File {\n\tdata := HTTPServices.Get(svc.Name())\n\tapiPkg := strings.ToLower(codegen.Goify(root.Design.API.Name, false))\n\tsections := []*codegen.SectionTemplate{\n\t\tcodegen.Header(\"\", apiPkg, []*codegen.ImportSpec{\n\t\t\t{Path: \"context\"},\n\t\t\t{Path: \"log\"},\n\t\t\t{Path: genpkg + \"\/\" + codegen.Goify(svc.Name(), false)},\n\t\t}),\n\t\t{\n\t\t\tName: \"dummy-service\",\n\t\t\tSource: dummyServiceStructT,\n\t\t\tData: data,\n\t\t},\n\t}\n\tfor _, e := range data.Endpoints {\n\t\tsections = append(sections, &codegen.SectionTemplate{\n\t\t\tName: \"dummy-endpoint\",\n\t\t\tSource: dummyEndpointImplT,\n\t\t\tData: e,\n\t\t})\n\t}\n\n\treturn &codegen.File{\n\t\tPath: codegen.SnakeCase(svc.Name()) + \".go\",\n\t\tSectionTemplates: sections,\n\t}\n}\n\nfunc exampleMain(genpkg string, root *httpdesign.RootExpr) *codegen.File {\n\tidx := strings.LastIndex(genpkg, string(os.PathSeparator))\n\trootPath := \".\"\n\tif idx > 0 {\n\t\trootPath = genpkg[:idx]\n\t}\n\tapiPkg := strings.ToLower(codegen.Goify(root.Design.API.Name, false))\n\tspecs := []*codegen.ImportSpec{\n\t\t{Path: \"context\"},\n\t\t{Path: \"flag\"},\n\t\t{Path: \"fmt\"},\n\t\t{Path: \"log\"},\n\t\t{Path: \"net\/http\"},\n\t\t{Path: \"os\"},\n\t\t{Path: \"os\/signal\"},\n\t\t{Path: \"time\"},\n\t\t{Path: \"goa.design\/goa\", Name: \"goa\"},\n\t\t{Path: \"goa.design\/goa\/http\", Name: \"goahttp\"},\n\t\t{Path: \"goa.design\/goa\/http\/middleware\/debugging\"},\n\t\t{Path: \"goa.design\/goa\/http\/middleware\/logging\"},\n\t\t{Path: rootPath, Name: apiPkg},\n\t}\n\tfor _, svc := range root.HTTPServices {\n\t\tpkgName := HTTPServices.Get(svc.Name()).Service.PkgName\n\t\tspecs = append(specs, &codegen.ImportSpec{\n\t\t\tPath: filepath.Join(genpkg, \"http\", codegen.SnakeCase(svc.Name()), \"server\"),\n\t\t\tName: pkgName + \"svr\",\n\t\t})\n\t\tspecs = append(specs, &codegen.ImportSpec{\n\t\t\tPath: filepath.Join(genpkg, codegen.SnakeCase(svc.Name())),\n\t\t\tName: pkgName,\n\t\t})\n\t}\n\tsections := []*codegen.SectionTemplate{codegen.Header(\"\", \"main\", specs)}\n\tvar svcdata []*ServiceData\n\tfor _, svc := range root.HTTPServices {\n\t\tsvcdata = append(svcdata, HTTPServices.Get(svc.Name()))\n\t}\n\tdata := map[string]interface{}{\n\t\t\"Services\": svcdata,\n\t\t\"APIPkg\": apiPkg,\n\t}\n\tsections = append(sections, &codegen.SectionTemplate{\n\t\tName: \"service-main\",\n\t\tSource: mainT,\n\t\tData: data,\n\t})\n\tpath := filepath.Join(\"cmd\", codegen.SnakeCase(root.Design.API.Name)+\"svc\", \"main.go\")\n\n\treturn &codegen.File{Path: path, SectionTemplates: sections}\n}\n\n\/\/ input: ServiceData\nconst dummyServiceStructT = `{{ printf \"%s service example implementation.\\nThe example methods log the requests and return zero values.\" .Service.Name | comment }}\ntype {{ .Service.PkgName }}Svc struct {\n\tlogger *log.Logger\n}\n\n{{ printf \"New%s returns the %s service implementation.\" .Service.VarName .Service.Name | comment }}\nfunc New{{ .Service.VarName }}(logger *log.Logger) {{ .Service.PkgName }}.Service {\n\treturn &{{ .Service.PkgName }}Svc{logger}\n}\n`\n\n\/\/ input: EndpointData\nconst dummyEndpointImplT = `{{ comment .Method.Description }}\nfunc (s *{{ .ServicePkgName }}Svc) {{ .Method.VarName }}(ctx context.Context{{ if .Payload.Ref }}, p {{ .Payload.Ref }}{{ end }}) ({{ if .Result.Ref }}{{ .Result.Ref }}, {{ end }}error) {\n{{- if and .Result.Ref .Result.IsStruct }}\n\tres := &{{ .Result.Name }}{}\n{{- else if .Result.Ref }}\n\tvar res {{ .Result.Ref }}\n{{- end }}\n\ts.logger.Print(\"{{ .ServiceName }}.{{ .Method.Name }}\")\n\treturn {{ if .Result.Ref }}res, {{ end }}nil\n}\n`\n\n\/\/ input: map[string]interface{}{\"Services\":[]ServiceData, \"APIPkg\": string}\nconst mainT = `func main() {\n\t\/\/ Define command line flags, add any other flag required to configure\n\t\/\/ the service.\n\tvar (\n\t\taddr = flag.String(\"listen\", \":8080\", \"HTTP listen ` + \"`\" + `address` + \"`\" + `\")\n\t\tdbg = flag.Bool(\"debug\", false, \"Log request and response bodies\")\n\t)\n\tflag.Parse()\n\n\t\/\/ Setup logger and goa log adapter. Replace logger with your own using\n\t\/\/ your log package of choice. The goa.design\/middleware\/logging\/...\n\t\/\/ packages define log adapters for common log packages.\n\tvar (\n\t\tlogger *log.Logger\n\t\tadapter logging.Adapter\n\t)\n\t{\n\t\tlogger = log.New(os.Stderr, \"[{{ .APIPkg }}] \", log.Ltime)\n\t\tadapter = logging.Adapt(logger)\n\t}\n\n\t\/\/ Create the structs that implement the services.\n\tvar (\n\t{{- range .Services }}\n\t\t{{- if .Endpoints }}\n\t\t{{ .Service.PkgName }}Svc {{.Service.PkgName}}.Service\n\t\t{{- end }}\n\t{{- end }}\n\t)\n\t{\n\t{{- range .Services }}\n\t\t{{- if .Endpoints }}\n\t\t{{ .Service.PkgName }}Svc = {{ $.APIPkg }}.New{{ .Service.VarName }}(logger)\n\t\t{{- end }}\n\t{{- end }}\n\t}\n\n\t\/\/ Wrap the services in endpoints that can be invoked from other\n\t\/\/ services potentially running in different processes.\n\tvar (\n\t{{- range .Services }}\n\t\t{{- if .Endpoints }}\n\t\t{{ .Service.PkgName }}Endpoints *{{.Service.PkgName}}.Endpoints\n\t\t{{- end }}\n\t{{- end }}\n\t)\n\t{\n\t{{- range .Services }}\n\t\t{{- if .Endpoints }}\n\t\t{{ .Service.PkgName }}Endpoints = {{ .Service.PkgName }}.NewEndpoints({{ .Service.PkgName }}Svc)\n\t\t{{- end }}\n\t{{- end }}\n\t}\n\n\t\/\/ Provide the transport specific request decoder and response encoder.\n\t\/\/ The goa http package has built-in support for JSON, XML and gob.\n\t\/\/ Other encodings can be used by providing the corresponding functions,\n\t\/\/ see goa.design\/encoding.\n\tvar (\n\t\tdec = goahttp.RequestDecoder\n\t\tenc = goahttp.ResponseEncoder\n\t)\n\n\t\/\/ Build the service HTTP request multiplexer and configure it to serve\n\t\/\/ HTTP requests to the service endpoints.\n\tvar mux goahttp.Muxer\n\t{\n\t\tmux = goahttp.NewMuxer()\n\t}\n\n\t\/\/ Wrap the endpoints with the transport specific layers. The generated\n\t\/\/ server packages contains code generated from the design which maps\n\t\/\/ the service input and output data structures to HTTP requests and\n\t\/\/ responses.\n\tvar (\n\t{{- range .Services }}\n\t\t{{- if .Endpoints }}\n\t\t{{ .Service.PkgName }}Server *{{.Service.PkgName}}svr.Server\n\t\t{{- end }}\n\t{{- end }}\n\t)\n\t{\n\t{{- range .Services }}\n\t\t{{- if .Endpoints }}\n\t\t{{ .Service.PkgName }}Server = {{ .Service.PkgName }}svr.New({{ .Service.PkgName }}Endpoints, mux, dec, enc)\n\t\t{{- end }}\n\t{{- end }}\n\t}\n\n\t\/\/ Configure the mux.\n\t{{- range .Services }}\n\t{{ .Service.PkgName }}svr.Mount(mux{{ if .Endpoints }}, {{ .Service.PkgName }}Server{{ end }})\n\t{{- end }}\n\n\t\/\/ Wrap the multiplexer with additional middlewares. Middlewares mounted\n\t\/\/ here apply to all the service endpoints.\n\tvar handler http.Handler = mux\n\t{\n\t\tif *dbg {\n\t\t\thandler = debugging.New(mux, adapter)(handler)\n\t\t}\n\t\thandler = logging.New(adapter)(handler)\n\t}\n\n\t\/\/ Create channel used by both the signal handler and server goroutines\n\t\/\/ to notify the main goroutine when to stop the server.\n\terrc := make(chan error)\n\n\t\/\/ Setup interrupt handler. This optional step configures the process so\n\t\/\/ that SIGINT and SIGTERM signals cause the service to stop gracefully.\n\tgo func() {\n\t\tc := make(chan os.Signal, 1)\n\t\tsignal.Notify(c, os.Interrupt)\n\t\terrc <- fmt.Errorf(\"%s\", <-c)\n\t}()\n\n\t\/\/ Start HTTP server using default configuration, change the code to\n\t\/\/ configure the server as required by your service.\n\tsrv := &http.Server{Addr: *addr, Handler: handler}\n\tgo func() {\n\t\tlogger.Printf(\"[INFO] listening on %s\", *addr)\n\t\terrc <- srv.ListenAndServe()\n\t}()\n\n\t\/\/ Wait for signal.\n\tlogger.Printf(\"exiting (%v)\", <-errc)\n\n\t\/\/ Shutdown gracefully with a 30s timeout.\n\tctx, _ := context.WithTimeout(context.Background(), 30*time.Second)\n\tsrv.Shutdown(ctx)\n\n\tlogger.Println(\"exited\")\n}\n`\n<|endoftext|>"} {"text":"<commit_before>package itest\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com\/btcsuite\/btcutil\"\n\t\"github.com\/lightningnetwork\/lnd\/lnrpc\/routerrpc\"\n\t\"github.com\/lightningnetwork\/lnd\/lntest\"\n\t\"github.com\/lightningnetwork\/lnd\/lntest\/wait\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\n\/\/ testMultiHopRemoteForceCloseOnChainHtlcTimeout tests that if we extend a\n\/\/ multi-hop HTLC, and the final destination of the HTLC force closes the\n\/\/ channel, then we properly timeout the HTLC directly on *their* commitment\n\/\/ transaction once the timeout has expired. Once we sweep the transaction, we\n\/\/ should also cancel back the initial HTLC.\nfunc testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,\n\tt *harnessTest, alice, bob *lntest.HarnessNode, c commitType) {\n\n\tctxb := context.Background()\n\n\t\/\/ First, we'll create a three hop network: Alice -> Bob -> Carol, with\n\t\/\/ Carol refusing to actually settle or directly cancel any HTLC's\n\t\/\/ self.\n\taliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(\n\t\tt, net, alice, bob, true, c,\n\t)\n\n\t\/\/ Clean up carol's node when the test finishes.\n\tdefer shutdownAndAssert(net, t, carol)\n\n\t\/\/ With our channels set up, we'll then send a single HTLC from Alice\n\t\/\/ to Carol. As Carol is in hodl mode, she won't settle this HTLC which\n\t\/\/ opens up the base for out tests.\n\tconst (\n\t\tfinalCltvDelta = 40\n\t\thtlcAmt = btcutil.Amount(30000)\n\t)\n\n\tctx, cancel := context.WithCancel(ctxb)\n\tdefer cancel()\n\n\t\/\/ We'll now send a single HTLC across our multi-hop network.\n\tcarolPubKey := carol.PubKey[:]\n\tpayHash := makeFakePayHash(t)\n\t_, err := alice.RouterClient.SendPaymentV2(\n\t\tctx, &routerrpc.SendPaymentRequest{\n\t\t\tDest: carolPubKey,\n\t\t\tAmt: int64(htlcAmt),\n\t\t\tPaymentHash: payHash,\n\t\t\tFinalCltvDelta: finalCltvDelta,\n\t\t\tTimeoutSeconds: 60,\n\t\t\tFeeLimitMsat: noFeeLimitMsat,\n\t\t},\n\t)\n\trequire.NoError(t.t, err)\n\n\t\/\/ Once the HTLC has cleared, all the nodes in our mini network should\n\t\/\/ show that the HTLC has been locked in.\n\tnodes := []*lntest.HarnessNode{alice, bob, carol}\n\terr = wait.NoError(func() error {\n\t\treturn assertActiveHtlcs(nodes, payHash)\n\t}, defaultTimeout)\n\trequire.NoError(t.t, err)\n\n\t\/\/ Increase the fee estimate so that the following force close tx will\n\t\/\/ be cpfp'ed.\n\tnet.SetFeeEstimate(30000)\n\n\t\/\/ At this point, we'll now instruct Carol to force close the\n\t\/\/ transaction. This will let us exercise that Bob is able to sweep the\n\t\/\/ expired HTLC on Carol's version of the commitment transaction. If\n\t\/\/ Carol has an anchor, it will be swept too.\n\tctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout)\n\tcloseChannelAndAssertType(\n\t\tctxt, t, net, carol, bobChanPoint, c == commitTypeAnchors,\n\t\ttrue,\n\t)\n\n\t\/\/ At this point, Bob should have a pending force close channel as\n\t\/\/ Carol has gone directly to chain.\n\tctxt, _ = context.WithTimeout(ctxb, defaultTimeout)\n\terr = waitForNumChannelPendingForceClose(ctxt, bob, 1, nil)\n\trequire.NoError(t.t, err)\n\n\t\/\/ Bob can sweep his output immediately. If there is an anchor, Bob will\n\t\/\/ sweep that as well.\n\texpectedTxes := 1\n\tif c == commitTypeAnchors {\n\t\texpectedTxes = 2\n\t}\n\n\t_, err = waitForNTxsInMempool(\n\t\tnet.Miner.Client, expectedTxes, minerMempoolTimeout,\n\t)\n\trequire.NoError(t.t, err)\n\n\t\/\/ Next, we'll mine enough blocks for the HTLC to expire. At this\n\t\/\/ point, Bob should hand off the output to his internal utxo nursery,\n\t\/\/ which will broadcast a sweep transaction.\n\tnumBlocks := padCLTV(finalCltvDelta - 1)\n\t_, err = net.Miner.Client.Generate(numBlocks)\n\trequire.NoError(t.t, err)\n\n\t\/\/ If we check Bob's pending channel report, it should show that he has\n\t\/\/ a single HTLC that's now in the second stage, as skip the initial\n\t\/\/ first stage since this is a direct HTLC.\n\tctxt, _ = context.WithTimeout(ctxb, defaultTimeout)\n\terr = waitForNumChannelPendingForceClose(\n\t\tctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {\n\t\t\tif len(c.PendingHtlcs) != 1 {\n\t\t\t\treturn fmt.Errorf(\"bob should have pending \" +\n\t\t\t\t\t\"htlc but doesn't\")\n\t\t\t}\n\n\t\t\tif c.PendingHtlcs[0].Stage != 2 {\n\t\t\t\treturn fmt.Errorf(\"bob's htlc should have \"+\n\t\t\t\t\t\"advanced to the second stage: %v\", err)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t},\n\t)\n\trequire.NoError(t.t, err)\n\n\t\/\/ We need to generate an additional block to trigger the sweep.\n\t_, err = net.Miner.Client.Generate(1)\n\trequire.NoError(t.t, err)\n\n\t\/\/ Bob's sweeping transaction should now be found in the mempool at\n\t\/\/ this point.\n\tsweepTx, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)\n\tif err != nil {\n\t\t\/\/ If Bob's transaction isn't yet in the mempool, then due to\n\t\t\/\/ internal message passing and the low period between blocks\n\t\t\/\/ being mined, it may have been detected as a late\n\t\t\/\/ registration. As a result, we'll mine another block and\n\t\t\/\/ repeat the check. If it doesn't go through this time, then\n\t\t\/\/ we'll fail.\n\t\t\/\/ TODO(halseth): can we use waitForChannelPendingForceClose to\n\t\t\/\/ avoid this hack?\n\t\t_, err = net.Miner.Client.Generate(1)\n\t\trequire.NoError(t.t, err)\n\t\tsweepTx, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)\n\t\trequire.NoError(t.t, err)\n\t}\n\n\t\/\/ If we mine an additional block, then this should confirm Bob's\n\t\/\/ transaction which sweeps the direct HTLC output.\n\tblock := mineBlocks(t, net, 1, 1)[0]\n\tassertTxInBlock(t, block, sweepTx)\n\n\t\/\/ Now that the sweeping transaction has been confirmed, Bob should\n\t\/\/ cancel back that HTLC. As a result, Alice should not know of any\n\t\/\/ active HTLC's.\n\tnodes = []*lntest.HarnessNode{alice}\n\terr = wait.NoError(func() error {\n\t\treturn assertNumActiveHtlcs(nodes, 0)\n\t}, defaultTimeout)\n\trequire.NoError(t.t, err)\n\n\t\/\/ Now we'll check Bob's pending channel report. Since this was Carol's\n\t\/\/ commitment, he doesn't have to wait for any CSV delays. As a result,\n\t\/\/ he should show no additional pending transactions.\n\tctxt, _ = context.WithTimeout(ctxb, defaultTimeout)\n\terr = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)\n\trequire.NoError(t.t, err)\n\n\t\/\/ We'll close out the test by closing the channel from Alice to Bob,\n\t\/\/ and then shutting down the new node we created as its no longer\n\t\/\/ needed. Coop close, no anchors.\n\tctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)\n\tcloseChannelAndAssertType(\n\t\tctxt, t, net, alice, aliceChanPoint, false, false,\n\t)\n}\n<commit_msg>itest\/test: update remote force close timeout to use hodl inv<commit_after>package itest\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com\/btcsuite\/btcutil\"\n\t\"github.com\/lightningnetwork\/lnd\/lnrpc\/invoicesrpc\"\n\t\"github.com\/lightningnetwork\/lnd\/lnrpc\/routerrpc\"\n\t\"github.com\/lightningnetwork\/lnd\/lntest\"\n\t\"github.com\/lightningnetwork\/lnd\/lntest\/wait\"\n\t\"github.com\/lightningnetwork\/lnd\/lntypes\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\n\/\/ testMultiHopRemoteForceCloseOnChainHtlcTimeout tests that if we extend a\n\/\/ multi-hop HTLC, and the final destination of the HTLC force closes the\n\/\/ channel, then we properly timeout the HTLC directly on *their* commitment\n\/\/ transaction once the timeout has expired. Once we sweep the transaction, we\n\/\/ should also cancel back the initial HTLC.\nfunc testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness,\n\tt *harnessTest, alice, bob *lntest.HarnessNode, c commitType) {\n\n\tctxb := context.Background()\n\n\t\/\/ First, we'll create a three hop network: Alice -> Bob -> Carol, with\n\t\/\/ Carol refusing to actually settle or directly cancel any HTLC's\n\t\/\/ self.\n\taliceChanPoint, bobChanPoint, carol := createThreeHopNetwork(\n\t\tt, net, alice, bob, true, c,\n\t)\n\n\t\/\/ Clean up carol's node when the test finishes.\n\tdefer shutdownAndAssert(net, t, carol)\n\n\t\/\/ With our channels set up, we'll then send a single HTLC from Alice\n\t\/\/ to Carol. As Carol is in hodl mode, she won't settle this HTLC which\n\t\/\/ opens up the base for out tests.\n\tconst (\n\t\tfinalCltvDelta = 40\n\t\thtlcAmt = btcutil.Amount(30000)\n\t)\n\n\tctx, cancel := context.WithCancel(ctxb)\n\tdefer cancel()\n\n\t\/\/ We'll now send a single HTLC across our multi-hop network.\n\tpreimage := lntypes.Preimage{1, 2, 3}\n\tpayHash := preimage.Hash()\n\tinvoiceReq := &invoicesrpc.AddHoldInvoiceRequest{\n\t\tValue: int64(htlcAmt),\n\t\tCltvExpiry: 40,\n\t\tHash: payHash[:],\n\t}\n\n\tctxt, _ := context.WithTimeout(ctxb, defaultTimeout)\n\tcarolInvoice, err := carol.AddHoldInvoice(ctxt, invoiceReq)\n\trequire.NoError(t.t, err)\n\n\t_, err = alice.RouterClient.SendPaymentV2(\n\t\tctx, &routerrpc.SendPaymentRequest{\n\t\t\tPaymentRequest: carolInvoice.PaymentRequest,\n\t\t\tTimeoutSeconds: 60,\n\t\t\tFeeLimitMsat: noFeeLimitMsat,\n\t\t},\n\t)\n\trequire.NoError(t.t, err)\n\n\t\/\/ Once the HTLC has cleared, all the nodes in our mini network should\n\t\/\/ show that the HTLC has been locked in.\n\tnodes := []*lntest.HarnessNode{alice, bob, carol}\n\terr = wait.NoError(func() error {\n\t\treturn assertActiveHtlcs(nodes, payHash[:])\n\t}, defaultTimeout)\n\trequire.NoError(t.t, err)\n\n\t\/\/ Increase the fee estimate so that the following force close tx will\n\t\/\/ be cpfp'ed.\n\tnet.SetFeeEstimate(30000)\n\n\t\/\/ At this point, we'll now instruct Carol to force close the\n\t\/\/ transaction. This will let us exercise that Bob is able to sweep the\n\t\/\/ expired HTLC on Carol's version of the commitment transaction. If\n\t\/\/ Carol has an anchor, it will be swept too.\n\tctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)\n\tcloseChannelAndAssertType(\n\t\tctxt, t, net, carol, bobChanPoint, c == commitTypeAnchors,\n\t\ttrue,\n\t)\n\n\t\/\/ At this point, Bob should have a pending force close channel as\n\t\/\/ Carol has gone directly to chain.\n\tctxt, _ = context.WithTimeout(ctxb, defaultTimeout)\n\terr = waitForNumChannelPendingForceClose(ctxt, bob, 1, nil)\n\trequire.NoError(t.t, err)\n\n\t\/\/ Bob can sweep his output immediately. If there is an anchor, Bob will\n\t\/\/ sweep that as well.\n\texpectedTxes := 1\n\tif c == commitTypeAnchors {\n\t\texpectedTxes = 2\n\t}\n\n\t_, err = waitForNTxsInMempool(\n\t\tnet.Miner.Client, expectedTxes, minerMempoolTimeout,\n\t)\n\trequire.NoError(t.t, err)\n\n\t\/\/ Next, we'll mine enough blocks for the HTLC to expire. At this\n\t\/\/ point, Bob should hand off the output to his internal utxo nursery,\n\t\/\/ which will broadcast a sweep transaction.\n\tnumBlocks := padCLTV(finalCltvDelta - 1)\n\t_, err = net.Miner.Client.Generate(numBlocks)\n\trequire.NoError(t.t, err)\n\n\t\/\/ If we check Bob's pending channel report, it should show that he has\n\t\/\/ a single HTLC that's now in the second stage, as skip the initial\n\t\/\/ first stage since this is a direct HTLC.\n\tctxt, _ = context.WithTimeout(ctxb, defaultTimeout)\n\terr = waitForNumChannelPendingForceClose(\n\t\tctxt, bob, 1, func(c *lnrpcForceCloseChannel) error {\n\t\t\tif len(c.PendingHtlcs) != 1 {\n\t\t\t\treturn fmt.Errorf(\"bob should have pending \" +\n\t\t\t\t\t\"htlc but doesn't\")\n\t\t\t}\n\n\t\t\tif c.PendingHtlcs[0].Stage != 2 {\n\t\t\t\treturn fmt.Errorf(\"bob's htlc should have \"+\n\t\t\t\t\t\"advanced to the second stage: %v\", err)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t},\n\t)\n\trequire.NoError(t.t, err)\n\n\t\/\/ We need to generate an additional block to trigger the sweep.\n\t_, err = net.Miner.Client.Generate(1)\n\trequire.NoError(t.t, err)\n\n\t\/\/ Bob's sweeping transaction should now be found in the mempool at\n\t\/\/ this point.\n\tsweepTx, err := waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)\n\tif err != nil {\n\t\t\/\/ If Bob's transaction isn't yet in the mempool, then due to\n\t\t\/\/ internal message passing and the low period between blocks\n\t\t\/\/ being mined, it may have been detected as a late\n\t\t\/\/ registration. As a result, we'll mine another block and\n\t\t\/\/ repeat the check. If it doesn't go through this time, then\n\t\t\/\/ we'll fail.\n\t\t\/\/ TODO(halseth): can we use waitForChannelPendingForceClose to\n\t\t\/\/ avoid this hack?\n\t\t_, err = net.Miner.Client.Generate(1)\n\t\trequire.NoError(t.t, err)\n\t\tsweepTx, err = waitForTxInMempool(net.Miner.Client, minerMempoolTimeout)\n\t\trequire.NoError(t.t, err)\n\t}\n\n\t\/\/ If we mine an additional block, then this should confirm Bob's\n\t\/\/ transaction which sweeps the direct HTLC output.\n\tblock := mineBlocks(t, net, 1, 1)[0]\n\tassertTxInBlock(t, block, sweepTx)\n\n\t\/\/ Now that the sweeping transaction has been confirmed, Bob should\n\t\/\/ cancel back that HTLC. As a result, Alice should not know of any\n\t\/\/ active HTLC's.\n\tnodes = []*lntest.HarnessNode{alice}\n\terr = wait.NoError(func() error {\n\t\treturn assertNumActiveHtlcs(nodes, 0)\n\t}, defaultTimeout)\n\trequire.NoError(t.t, err)\n\n\t\/\/ Now we'll check Bob's pending channel report. Since this was Carol's\n\t\/\/ commitment, he doesn't have to wait for any CSV delays. As a result,\n\t\/\/ he should show no additional pending transactions.\n\tctxt, _ = context.WithTimeout(ctxb, defaultTimeout)\n\terr = waitForNumChannelPendingForceClose(ctxt, bob, 0, nil)\n\trequire.NoError(t.t, err)\n\n\t\/\/ We'll close out the test by closing the channel from Alice to Bob,\n\t\/\/ and then shutting down the new node we created as its no longer\n\t\/\/ needed. Coop close, no anchors.\n\tctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout)\n\tcloseChannelAndAssertType(\n\t\tctxt, t, net, alice, aliceChanPoint, false, false,\n\t)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/leeola\/service\"\n)\n\n\/\/ newService provides a preconfigured (based on klientctl's config)\n\/\/ service object to install, uninstall, start and stop Klient.\nfunc newService() (service.Service, error) {\n\t\/\/ TODO: Add hosts's username\n\tsvcConfig := &service.Config{\n\t\tName: \"klient\",\n\t\tDisplayName: \"klient\",\n\t\tDescription: \"Koding Service Connector\",\n\t\tExecutable: filepath.Join(KlientDirectory, \"klient.sh\"),\n\t\tOption: map[string]interface{}{\n\t\t\t\"LogStderr\": true,\n\t\t\t\"LogStdout\": true,\n\t\t},\n\t}\n\n\treturn service.New(&serviceProgram{}, svcConfig)\n}\n\ntype serviceProgram struct{}\n\nfunc (p *serviceProgram) Start(s service.Service) error {\n\tfmt.Println(\"Error: serviceProgram Start called\")\n\treturn nil\n}\n\nfunc (p *serviceProgram) Stop(s service.Service) error {\n\tfmt.Println(\"Error: serviceProgram Stop called\")\n\treturn nil\n}\n\nfunc InstallCommand(c *cli.Context) int {\n\tif len(c.Args()) < 1 {\n\t\tcli.ShowCommandHelp(c, \"install\")\n\t\treturn 1\n\t}\n\n\tauthToken := c.Args().Get(0)\n\n\t\/\/ We need to check if the authToken is somehow empty, because klient\n\t\/\/ will default to user\/pass if there is no auth token (despite setting\n\t\/\/ the token flag)\n\tif authToken == \"\" {\n\t\tcli.ShowCommandHelp(c, \"install\")\n\t\treturn 1\n\t}\n\n\tklientShPath, err := filepath.Abs(filepath.Join(KlientDirectory, \"klient.sh\"))\n\tif err != nil {\n\t\tfmt.Printf(\"Error getting %s wrapper path: '%s'\\n\", KlientName, err)\n\t\treturn 1\n\t}\n\n\tklientBinPath, err := filepath.Abs(filepath.Join(KlientDirectory, \"klient\"))\n\tif err != nil {\n\t\tfmt.Printf(\"Error getting %s path: '%s'\\n\", KlientName, err)\n\t\treturn 1\n\t}\n\n\t\/\/ Create the installation dir, if needed.\n\terr = os.MkdirAll(KlientDirectory, 0755)\n\tif err != nil {\n\t\tfmt.Printf(\"Error creating directory to hold: '%s'\\n\", KlientName, err)\n\t\treturn 1\n\t}\n\n\t\/\/ TODO: Accept `kd install --user foo` flag to replace the\n\t\/\/ environ checking.\n\tvar sudoCmd string\n\tfor _, s := range os.Environ() {\n\t\tenv := strings.Split(s, \"=\")\n\n\t\tif len(env) != 2 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif env[0] == \"SUDO_USER\" {\n\t\t\tsudoCmd = fmt.Sprintf(\"sudo -u %s \", env[1])\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ TODO: Stop using this klient.sh file.\n\t\/\/ If the klient.sh file is missing, write it. We can use build tags\n\t\/\/ for os specific tags, if needed.\n\t_, err = os.Stat(klientShPath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tklientShFile := []byte(fmt.Sprintf(`#!\/bin\/sh\n%sKITE_HOME=%s %s --kontrol-url=%s -debug\n`,\n\t\t\t\tsudoCmd, KiteHome, klientBinPath, KontrolUrl))\n\n\t\t\t\/\/ perm -rwr-xr-x, same as klient\n\t\t\terr := ioutil.WriteFile(klientShPath, klientShFile, 0755)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Error creating %s wrapper: '%s'\\n\", KlientName, err)\n\t\t\t\treturn 1\n\t\t\t}\n\n\t\t} else {\n\t\t\t\/\/ Unknown error stating (possibly permission), exit\n\t\t\t\/\/ TODO: Print UX friendly err\n\t\t\tfmt.Println(\"Error:\", err)\n\t\t\treturn 1\n\t\t}\n\t}\n\n\tfmt.Println(\"Downloading...\")\n\n\tif err = downloadRemoteToLocal(S3KlientPath, klientBinPath); err != nil {\n\t\tfmt.Printf(\"Error downloading %s: '%s'\\n\", KlientName, err)\n\t\treturn 1\n\t}\n\n\tfmt.Printf(`Authenticating you to the %s\n\n`, KlientName)\n\n\tcmd := exec.Command(klientBinPath, \"-register\",\n\t\t\"-token\", authToken,\n\t\t\"--kontrol-url\", KontrolUrl, \"--kite-home\", KiteHome)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tcmd.Stdin = os.Stdin\n\n\terr = cmd.Run()\n\tif err != nil {\n\t\tfmt.Printf(\"Error registering %s: '%s'\\n\", KlientName, err)\n\t\treturn 1\n\t}\n\n\t\/\/ Klient is setting the wrong file permissions when installed by ctl,\n\t\/\/ so since this is just ctl problem, we'll just fix the permission\n\t\/\/ here for now.\n\tif err = os.Chmod(KiteHome, 0755); err != nil {\n\t\tfmt.Printf(\"Error installing %s: '%s'\\n\", KlientName, err)\n\t\treturn 1\n\t}\n\n\tif err = os.Chmod(filepath.Join(KiteHome, \"kite.key\"), 0644); err != nil {\n\t\tfmt.Printf(\"Error installing kite.key: '%s'\\n\", err)\n\t\treturn 1\n\t}\n\n\t\/\/ Create our interface to the OS specific service\n\ts, err := newService()\n\tif err != nil {\n\t\tfmt.Printf(\"Error starting service: '%s'\\n\", err)\n\t\treturn 1\n\t}\n\n\t\/\/ Install the klient binary as a OS service\n\tif err = s.Install(); err != nil {\n\t\tfmt.Printf(\"Error installing service: '%s'\\n\", err)\n\t\treturn 1\n\t}\n\n\t\/\/ Tell the service to start. Normally it starts automatically, but\n\t\/\/ if the user told the service to stop (previously), it may not\n\t\/\/ start automatically.\n\t\/\/\n\t\/\/ Note that the service may error if it is already running, so\n\t\/\/ we're ignoring any starting errors here. We will verify the\n\t\/\/ connection below, anyway.\n\ts.Start()\n\n\tfmt.Println(\"Verifying installation...\")\n\terr = WaitUntilStarted(KlientAddress, 5, 1*time.Second)\n\n\t\/\/ After X times, if err != nil we failed to connect to klient.\n\t\/\/ Inform the user.\n\tif err != nil {\n\t\tfmt.Printf(\"Error verifying the installation of %s: '%s'\\n\", KlientName, err)\n\t\treturn 1\n\t}\n\n\tfmt.Printf(\"\\n\\nSuccessfully installed and started the %s!\\n\", KlientName)\n\n\treturn 0\n}\n<commit_msg>klientctl: Trim authtoken before checking if empty<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/leeola\/service\"\n)\n\n\/\/ newService provides a preconfigured (based on klientctl's config)\n\/\/ service object to install, uninstall, start and stop Klient.\nfunc newService() (service.Service, error) {\n\t\/\/ TODO: Add hosts's username\n\tsvcConfig := &service.Config{\n\t\tName: \"klient\",\n\t\tDisplayName: \"klient\",\n\t\tDescription: \"Koding Service Connector\",\n\t\tExecutable: filepath.Join(KlientDirectory, \"klient.sh\"),\n\t\tOption: map[string]interface{}{\n\t\t\t\"LogStderr\": true,\n\t\t\t\"LogStdout\": true,\n\t\t},\n\t}\n\n\treturn service.New(&serviceProgram{}, svcConfig)\n}\n\ntype serviceProgram struct{}\n\nfunc (p *serviceProgram) Start(s service.Service) error {\n\tfmt.Println(\"Error: serviceProgram Start called\")\n\treturn nil\n}\n\nfunc (p *serviceProgram) Stop(s service.Service) error {\n\tfmt.Println(\"Error: serviceProgram Stop called\")\n\treturn nil\n}\n\nfunc InstallCommand(c *cli.Context) int {\n\tif len(c.Args()) < 1 {\n\t\tcli.ShowCommandHelp(c, \"install\")\n\t\treturn 1\n\t}\n\n\tauthToken := c.Args().Get(0)\n\n\t\/\/ We need to check if the authToken is somehow empty, because klient\n\t\/\/ will default to user\/pass if there is no auth token (despite setting\n\t\/\/ the token flag)\n\tif strings.TrimSpace(authToken) == \"\" {\n\t\tcli.ShowCommandHelp(c, \"install\")\n\t\treturn 1\n\t}\n\n\tklientShPath, err := filepath.Abs(filepath.Join(KlientDirectory, \"klient.sh\"))\n\tif err != nil {\n\t\tfmt.Printf(\"Error getting %s wrapper path: '%s'\\n\", KlientName, err)\n\t\treturn 1\n\t}\n\n\tklientBinPath, err := filepath.Abs(filepath.Join(KlientDirectory, \"klient\"))\n\tif err != nil {\n\t\tfmt.Printf(\"Error getting %s path: '%s'\\n\", KlientName, err)\n\t\treturn 1\n\t}\n\n\t\/\/ Create the installation dir, if needed.\n\terr = os.MkdirAll(KlientDirectory, 0755)\n\tif err != nil {\n\t\tfmt.Printf(\"Error creating directory to hold: '%s'\\n\", KlientName, err)\n\t\treturn 1\n\t}\n\n\t\/\/ TODO: Accept `kd install --user foo` flag to replace the\n\t\/\/ environ checking.\n\tvar sudoCmd string\n\tfor _, s := range os.Environ() {\n\t\tenv := strings.Split(s, \"=\")\n\n\t\tif len(env) != 2 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif env[0] == \"SUDO_USER\" {\n\t\t\tsudoCmd = fmt.Sprintf(\"sudo -u %s \", env[1])\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ TODO: Stop using this klient.sh file.\n\t\/\/ If the klient.sh file is missing, write it. We can use build tags\n\t\/\/ for os specific tags, if needed.\n\t_, err = os.Stat(klientShPath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tklientShFile := []byte(fmt.Sprintf(`#!\/bin\/sh\n%sKITE_HOME=%s %s --kontrol-url=%s -debug\n`,\n\t\t\t\tsudoCmd, KiteHome, klientBinPath, KontrolUrl))\n\n\t\t\t\/\/ perm -rwr-xr-x, same as klient\n\t\t\terr := ioutil.WriteFile(klientShPath, klientShFile, 0755)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Error creating %s wrapper: '%s'\\n\", KlientName, err)\n\t\t\t\treturn 1\n\t\t\t}\n\n\t\t} else {\n\t\t\t\/\/ Unknown error stating (possibly permission), exit\n\t\t\t\/\/ TODO: Print UX friendly err\n\t\t\tfmt.Println(\"Error:\", err)\n\t\t\treturn 1\n\t\t}\n\t}\n\n\tfmt.Println(\"Downloading...\")\n\n\tif err = downloadRemoteToLocal(S3KlientPath, klientBinPath); err != nil {\n\t\tfmt.Printf(\"Error downloading %s: '%s'\\n\", KlientName, err)\n\t\treturn 1\n\t}\n\n\tfmt.Printf(`Authenticating you to the %s\n\n`, KlientName)\n\n\tcmd := exec.Command(klientBinPath, \"-register\",\n\t\t\"-token\", authToken,\n\t\t\"--kontrol-url\", KontrolUrl, \"--kite-home\", KiteHome)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tcmd.Stdin = os.Stdin\n\n\terr = cmd.Run()\n\tif err != nil {\n\t\tfmt.Printf(\"Error registering %s: '%s'\\n\", KlientName, err)\n\t\treturn 1\n\t}\n\n\t\/\/ Klient is setting the wrong file permissions when installed by ctl,\n\t\/\/ so since this is just ctl problem, we'll just fix the permission\n\t\/\/ here for now.\n\tif err = os.Chmod(KiteHome, 0755); err != nil {\n\t\tfmt.Printf(\"Error installing %s: '%s'\\n\", KlientName, err)\n\t\treturn 1\n\t}\n\n\tif err = os.Chmod(filepath.Join(KiteHome, \"kite.key\"), 0644); err != nil {\n\t\tfmt.Printf(\"Error installing kite.key: '%s'\\n\", err)\n\t\treturn 1\n\t}\n\n\t\/\/ Create our interface to the OS specific service\n\ts, err := newService()\n\tif err != nil {\n\t\tfmt.Printf(\"Error starting service: '%s'\\n\", err)\n\t\treturn 1\n\t}\n\n\t\/\/ Install the klient binary as a OS service\n\tif err = s.Install(); err != nil {\n\t\tfmt.Printf(\"Error installing service: '%s'\\n\", err)\n\t\treturn 1\n\t}\n\n\t\/\/ Tell the service to start. Normally it starts automatically, but\n\t\/\/ if the user told the service to stop (previously), it may not\n\t\/\/ start automatically.\n\t\/\/\n\t\/\/ Note that the service may error if it is already running, so\n\t\/\/ we're ignoring any starting errors here. We will verify the\n\t\/\/ connection below, anyway.\n\ts.Start()\n\n\tfmt.Println(\"Verifying installation...\")\n\terr = WaitUntilStarted(KlientAddress, 5, 1*time.Second)\n\n\t\/\/ After X times, if err != nil we failed to connect to klient.\n\t\/\/ Inform the user.\n\tif err != nil {\n\t\tfmt.Printf(\"Error verifying the installation of %s: '%s'\\n\", KlientName, err)\n\t\treturn 1\n\t}\n\n\tfmt.Printf(\"\\n\\nSuccessfully installed and started the %s!\\n\", KlientName)\n\n\treturn 0\n}\n<|endoftext|>"} {"text":"<commit_before>package command\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/filer\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/filer_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/remote_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/remote_storage\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/replication\/source\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/security\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/util\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"google.golang.org\/grpc\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype RemoteSyncOptions struct {\n\tfilerAddress *string\n\tgrpcDialOption grpc.DialOption\n\treadChunkFromFiler *bool\n\tdebug *bool\n\ttimeAgo *time.Duration\n\tdir *string\n}\n\nconst (\n\tRemoteSyncKeyPrefix = \"remote.sync.\"\n)\n\nvar _ = filer_pb.FilerClient(&RemoteSyncOptions{})\n\nfunc (option *RemoteSyncOptions) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {\n\treturn pb.WithFilerClient(*option.filerAddress, option.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {\n\t\treturn fn(client)\n\t})\n}\nfunc (option *RemoteSyncOptions) AdjustedUrl(location *filer_pb.Location) string {\n\treturn location.Url\n}\n\nvar (\n\tremoteSyncOptions RemoteSyncOptions\n)\n\nfunc init() {\n\tcmdFilerRemoteSynchronize.Run = runFilerRemoteSynchronize \/\/ break init cycle\n\tremoteSyncOptions.filerAddress = cmdFilerRemoteSynchronize.Flag.String(\"filer\", \"localhost:8888\", \"filer of the SeaweedFS cluster\")\n\tremoteSyncOptions.dir = cmdFilerRemoteSynchronize.Flag.String(\"dir\", \"\/\", \"a mounted directory on filer\")\n\tremoteSyncOptions.readChunkFromFiler = cmdFilerRemoteSynchronize.Flag.Bool(\"filerProxy\", false, \"read file chunks from filer instead of volume servers\")\n\tremoteSyncOptions.debug = cmdFilerRemoteSynchronize.Flag.Bool(\"debug\", false, \"debug mode to print out filer updated remote files\")\n\tremoteSyncOptions.timeAgo = cmdFilerRemoteSynchronize.Flag.Duration(\"timeAgo\", 0, \"start time before now. \\\"300ms\\\", \\\"1.5h\\\" or \\\"2h45m\\\". Valid time units are \\\"ns\\\", \\\"us\\\" (or \\\"µs\\\"), \\\"ms\\\", \\\"s\\\", \\\"m\\\", \\\"h\\\"\")\n}\n\nvar cmdFilerRemoteSynchronize = &Command{\n\tUsageLine: \"filer.remote.sync -filer=<filerHost>:<filerPort> -dir=\/mount\/s3_on_cloud\",\n\tShort: \"resumable continuously write back updates to remote storage if the directory is mounted to the remote storage\",\n\tLong: `resumable continuously write back updates to remote storage if the directory is mounted to the remote storage\n\n\tfiler.remote.sync listens on filer update events. \n\tIf any mounted remote file is updated, it will fetch the updated content,\n\tand write to the remote storage.\n`,\n}\n\nfunc runFilerRemoteSynchronize(cmd *Command, args []string) bool {\n\n\tutil.LoadConfiguration(\"security\", false)\n\tgrpcDialOption := security.LoadClientTLS(util.GetViper(), \"grpc.client\")\n\tremoteSyncOptions.grpcDialOption = grpcDialOption\n\n\tdir := *remoteSyncOptions.dir\n\tfilerAddress := *remoteSyncOptions.filerAddress\n\n\tfilerSource := &source.FilerSource{}\n\tfilerSource.DoInitialize(\n\t\tfilerAddress,\n\t\tpb.ServerToGrpcAddress(filerAddress),\n\t\t\"\/\", \/\/ does not matter\n\t\t*remoteSyncOptions.readChunkFromFiler,\n\t)\n\n\tfmt.Printf(\"synchronize %s to remote storage...\\n\", dir)\n\tutil.RetryForever(\"filer.remote.sync \"+dir, func() error {\n\t\treturn followUpdatesAndUploadToRemote(&remoteSyncOptions, filerSource, dir)\n\t}, func(err error) bool {\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"synchronize %s: %v\", dir, err)\n\t\t}\n\t\treturn true\n\t})\n\n\treturn true\n}\n\nfunc followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *source.FilerSource, mountedDir string) error {\n\n\t\/\/ read filer remote storage mount mappings\n\t_, _, remoteStorageMountLocation, remoteStorage, detectErr := filer.DetectMountInfo(option.grpcDialOption, *option.filerAddress, mountedDir)\n\tif detectErr != nil {\n\t\treturn fmt.Errorf(\"read mount info: %v\", detectErr)\n\t}\n\n\tlastOffsetTs := collectLastSyncOffset(option, mountedDir)\n\n\teachEntryFunc, err := makeEventProcessor(remoteStorage, mountedDir, remoteStorageMountLocation, filerSource)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprocessEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error {\n\t\tlastTime := time.Unix(0, lastTsNs)\n\t\tglog.V(0).Infof(\"remote sync %s progressed to %v %0.2f\/sec\", *option.filerAddress, lastTime, float64(counter)\/float64(3))\n\t\treturn remote_storage.SetSyncOffset(option.grpcDialOption, *option.filerAddress, mountedDir, lastTsNs)\n\t})\n\n\treturn pb.FollowMetadata(*option.filerAddress, option.grpcDialOption, \"filer.remote.sync\",\n\t\tmountedDir, []string{filer.DirectoryEtcRemote}, lastOffsetTs.UnixNano(), 0, processEventFnWithOffset, false)\n}\n\nfunc makeEventProcessor(remoteStorage *remote_pb.RemoteConf, mountedDir string, remoteStorageMountLocation *remote_pb.RemoteStorageLocation, filerSource *source.FilerSource) (pb.ProcessMetadataFunc, error) {\n\tclient, err := remote_storage.GetRemoteStorage(remoteStorage)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thandleEtcRemoteChanges := func(resp *filer_pb.SubscribeMetadataResponse) error {\n\t\tmessage := resp.EventNotification\n\t\tif message.NewEntry == nil {\n\t\t\treturn nil\n\t\t}\n\t\tif message.NewEntry.Name == filer.REMOTE_STORAGE_MOUNT_FILE {\n\t\t\tmappings, readErr := filer.UnmarshalRemoteStorageMappings(message.NewEntry.Content)\n\t\t\tif readErr != nil {\n\t\t\t\treturn fmt.Errorf(\"unmarshal mappings: %v\", readErr)\n\t\t\t}\n\t\t\tif remoteLoc, found := mappings.Mappings[mountedDir]; found {\n\t\t\t\tif remoteStorageMountLocation.Bucket != remoteLoc.Bucket || remoteStorageMountLocation.Path != remoteLoc.Path {\n\t\t\t\t\tglog.Fatalf(\"Unexpected mount changes %+v => %+v\", remoteStorageMountLocation, remoteLoc)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tglog.V(0).Infof(\"unmounted %s exiting ...\", mountedDir)\n\t\t\t\tos.Exit(0)\n\t\t\t}\n\t\t}\n\t\tif message.NewEntry.Name == remoteStorage.Name+filer.REMOTE_STORAGE_CONF_SUFFIX {\n\t\t\tconf := &remote_pb.RemoteConf{}\n\t\t\tif err := proto.Unmarshal(message.NewEntry.Content, conf); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unmarshal %s\/%s: %v\", filer.DirectoryEtcRemote, message.NewEntry.Name, err)\n\t\t\t}\n\t\t\tremoteStorage = conf\n\t\t\tif newClient, err := remote_storage.GetRemoteStorage(remoteStorage); err == nil {\n\t\t\t\tclient = newClient\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\teachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error {\n\t\tmessage := resp.EventNotification\n\t\tif strings.HasPrefix(resp.Directory, filer.DirectoryEtcRemote) {\n\t\t\treturn handleEtcRemoteChanges(resp)\n\t\t}\n\n\t\tif message.OldEntry == nil && message.NewEntry == nil {\n\t\t\treturn nil\n\t\t}\n\t\tif message.OldEntry == nil && message.NewEntry != nil {\n\t\t\tif !filer.HasData(message.NewEntry) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tglog.V(2).Infof(\"create: %+v\", resp)\n\t\t\tif !shouldSendToRemote(message.NewEntry) {\n\t\t\t\tglog.V(2).Infof(\"skipping creating: %+v\", resp)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tdest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)\n\t\t\tif message.NewEntry.IsDirectory {\n\t\t\t\tglog.V(0).Infof(\"mkdir %s\", remote_storage.FormatLocation(dest))\n\t\t\t\treturn client.WriteDirectory(dest, message.NewEntry)\n\t\t\t}\n\t\t\tglog.V(0).Infof(\"create %s\", remote_storage.FormatLocation(dest))\n\t\t\treader := filer.NewFileReader(filerSource, message.NewEntry)\n\t\t\tremoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)\n\t\t\tif writeErr != nil {\n\t\t\t\treturn writeErr\n\t\t\t}\n\t\t\treturn updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)\n\t\t}\n\t\tif message.OldEntry != nil && message.NewEntry == nil {\n\t\t\tglog.V(2).Infof(\"delete: %+v\", resp)\n\t\t\tdest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)\n\t\t\tif message.OldEntry.IsDirectory {\n\t\t\t\tglog.V(0).Infof(\"rmdir %s\", remote_storage.FormatLocation(dest))\n\t\t\t\treturn client.RemoveDirectory(dest)\n\t\t\t}\n\t\t\tglog.V(0).Infof(\"delete %s\", remote_storage.FormatLocation(dest))\n\t\t\treturn client.DeleteFile(dest)\n\t\t}\n\t\tif message.OldEntry != nil && message.NewEntry != nil {\n\t\t\toldDest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)\n\t\t\tdest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)\n\t\t\tif !shouldSendToRemote(message.NewEntry) {\n\t\t\t\tglog.V(2).Infof(\"skipping updating: %+v\", resp)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif message.NewEntry.IsDirectory {\n\t\t\t\treturn client.WriteDirectory(dest, message.NewEntry)\n\t\t\t}\n\t\t\tif resp.Directory == message.NewParentPath && message.OldEntry.Name == message.NewEntry.Name {\n\t\t\t\tif filer.IsSameData(message.OldEntry, message.NewEntry) {\n\t\t\t\t\tglog.V(2).Infof(\"update meta: %+v\", resp)\n\t\t\t\t\treturn client.UpdateFileMetadata(dest, message.OldEntry, message.NewEntry)\n\t\t\t\t}\n\t\t\t}\n\t\t\tglog.V(2).Infof(\"update: %+v\", resp)\n\t\t\tglog.V(0).Infof(\"delete %s\", remote_storage.FormatLocation(oldDest))\n\t\t\tif err := client.DeleteFile(oldDest); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treader := filer.NewFileReader(filerSource, message.NewEntry)\n\t\t\tglog.V(0).Infof(\"create %s\", remote_storage.FormatLocation(dest))\n\t\t\tremoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)\n\t\t\tif writeErr != nil {\n\t\t\t\treturn writeErr\n\t\t\t}\n\t\t\treturn updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)\n\t\t}\n\n\t\treturn nil\n\t}\n\treturn eachEntryFunc, nil\n}\n\nfunc collectLastSyncOffset(option *RemoteSyncOptions, mountedDir string) (time.Time) {\n\t\/\/ 1. specified by timeAgo\n\t\/\/ 2. last offset timestamp for this directory\n\t\/\/ 3. directory creation time\n\tvar lastOffsetTs time.Time\n\tif *option.timeAgo == 0 {\n\t\tmountedDirEntry, err := filer_pb.GetEntry(option, util.FullPath(mountedDir))\n\t\tif err != nil {\n\t\t\tglog.V(0).Infof(\"get mounted directory %s: %v\", mountedDir, err)\n\t\t\treturn time.Now()\n\t\t}\n\n\t\tlastOffsetTsNs, err := remote_storage.GetSyncOffset(option.grpcDialOption, *option.filerAddress, mountedDir)\n\t\tif mountedDirEntry != nil {\n\t\t\tif err == nil && mountedDirEntry.Attributes.Crtime < lastOffsetTsNs\/1000000 {\n\t\t\t\tlastOffsetTs = time.Unix(0, lastOffsetTsNs)\n\t\t\t\tglog.V(0).Infof(\"resume from %v\", lastOffsetTs)\n\t\t\t} else {\n\t\t\t\tlastOffsetTs = time.Unix(mountedDirEntry.Attributes.Crtime, 0)\n\t\t\t}\n\t\t} else {\n\t\t\tlastOffsetTs = time.Now()\n\t\t}\n\t} else {\n\t\tlastOffsetTs = time.Now().Add(-*option.timeAgo)\n\t}\n\treturn lastOffsetTs\n}\n\nfunc toRemoteStorageLocation(mountDir, sourcePath util.FullPath, remoteMountLocation *remote_pb.RemoteStorageLocation) *remote_pb.RemoteStorageLocation {\n\tsource := string(sourcePath[len(mountDir):])\n\tdest := util.FullPath(remoteMountLocation.Path).Child(source)\n\treturn &remote_pb.RemoteStorageLocation{\n\t\tName: remoteMountLocation.Name,\n\t\tBucket: remoteMountLocation.Bucket,\n\t\tPath: string(dest),\n\t}\n}\n\nfunc shouldSendToRemote(entry *filer_pb.Entry) bool {\n\tif entry.RemoteEntry == nil {\n\t\treturn true\n\t}\n\tif entry.RemoteEntry.LastLocalSyncTsNs\/1e9 < entry.Attributes.Mtime {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc updateLocalEntry(filerClient filer_pb.FilerClient, dir string, entry *filer_pb.Entry, remoteEntry *filer_pb.RemoteEntry) error {\n\tentry.RemoteEntry = remoteEntry\n\treturn filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {\n\t\t_, err := client.UpdateEntry(context.Background(), &filer_pb.UpdateEntryRequest{\n\t\t\tDirectory: dir,\n\t\t\tEntry: entry,\n\t\t})\n\t\treturn err\n\t})\n}\n<commit_msg>minor<commit_after>package command\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/filer\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/filer_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/remote_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/remote_storage\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/replication\/source\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/security\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/util\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"google.golang.org\/grpc\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype RemoteSyncOptions struct {\n\tfilerAddress *string\n\tgrpcDialOption grpc.DialOption\n\treadChunkFromFiler *bool\n\tdebug *bool\n\ttimeAgo *time.Duration\n\tdir *string\n}\n\nconst (\n\tRemoteSyncKeyPrefix = \"remote.sync.\"\n)\n\nvar _ = filer_pb.FilerClient(&RemoteSyncOptions{})\n\nfunc (option *RemoteSyncOptions) WithFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {\n\treturn pb.WithFilerClient(*option.filerAddress, option.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {\n\t\treturn fn(client)\n\t})\n}\nfunc (option *RemoteSyncOptions) AdjustedUrl(location *filer_pb.Location) string {\n\treturn location.Url\n}\n\nvar (\n\tremoteSyncOptions RemoteSyncOptions\n)\n\nfunc init() {\n\tcmdFilerRemoteSynchronize.Run = runFilerRemoteSynchronize \/\/ break init cycle\n\tremoteSyncOptions.filerAddress = cmdFilerRemoteSynchronize.Flag.String(\"filer\", \"localhost:8888\", \"filer of the SeaweedFS cluster\")\n\tremoteSyncOptions.dir = cmdFilerRemoteSynchronize.Flag.String(\"dir\", \"\/\", \"a mounted directory on filer\")\n\tremoteSyncOptions.readChunkFromFiler = cmdFilerRemoteSynchronize.Flag.Bool(\"filerProxy\", false, \"read file chunks from filer instead of volume servers\")\n\tremoteSyncOptions.debug = cmdFilerRemoteSynchronize.Flag.Bool(\"debug\", false, \"debug mode to print out filer updated remote files\")\n\tremoteSyncOptions.timeAgo = cmdFilerRemoteSynchronize.Flag.Duration(\"timeAgo\", 0, \"start time before now. \\\"300ms\\\", \\\"1.5h\\\" or \\\"2h45m\\\". Valid time units are \\\"ns\\\", \\\"us\\\" (or \\\"µs\\\"), \\\"ms\\\", \\\"s\\\", \\\"m\\\", \\\"h\\\"\")\n}\n\nvar cmdFilerRemoteSynchronize = &Command{\n\tUsageLine: \"filer.remote.sync -filer=<filerHost>:<filerPort> -dir=\/mount\/s3_on_cloud\",\n\tShort: \"resumable continuously write back updates to remote storage if the directory is mounted to the remote storage\",\n\tLong: `resumable continuously write back updates to remote storage if the directory is mounted to the remote storage\n\n\tfiler.remote.sync listens on filer update events. \n\tIf any mounted remote file is updated, it will fetch the updated content,\n\tand write to the remote storage.\n`,\n}\n\nfunc runFilerRemoteSynchronize(cmd *Command, args []string) bool {\n\n\tutil.LoadConfiguration(\"security\", false)\n\tgrpcDialOption := security.LoadClientTLS(util.GetViper(), \"grpc.client\")\n\tremoteSyncOptions.grpcDialOption = grpcDialOption\n\n\tdir := *remoteSyncOptions.dir\n\tfilerAddress := *remoteSyncOptions.filerAddress\n\n\tfilerSource := &source.FilerSource{}\n\tfilerSource.DoInitialize(\n\t\tfilerAddress,\n\t\tpb.ServerToGrpcAddress(filerAddress),\n\t\t\"\/\", \/\/ does not matter\n\t\t*remoteSyncOptions.readChunkFromFiler,\n\t)\n\n\tfmt.Printf(\"synchronize %s to remote storage...\\n\", dir)\n\tutil.RetryForever(\"filer.remote.sync \"+dir, func() error {\n\t\treturn followUpdatesAndUploadToRemote(&remoteSyncOptions, filerSource, dir)\n\t}, func(err error) bool {\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"synchronize %s: %v\", dir, err)\n\t\t}\n\t\treturn true\n\t})\n\n\treturn true\n}\n\nfunc followUpdatesAndUploadToRemote(option *RemoteSyncOptions, filerSource *source.FilerSource, mountedDir string) error {\n\n\t\/\/ read filer remote storage mount mappings\n\t_, _, remoteStorageMountLocation, remoteStorage, detectErr := filer.DetectMountInfo(option.grpcDialOption, *option.filerAddress, mountedDir)\n\tif detectErr != nil {\n\t\treturn fmt.Errorf(\"read mount info: %v\", detectErr)\n\t}\n\n\teachEntryFunc, err := makeEventProcessor(remoteStorage, mountedDir, remoteStorageMountLocation, filerSource)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprocessEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error {\n\t\tlastTime := time.Unix(0, lastTsNs)\n\t\tglog.V(0).Infof(\"remote sync %s progressed to %v %0.2f\/sec\", *option.filerAddress, lastTime, float64(counter)\/float64(3))\n\t\treturn remote_storage.SetSyncOffset(option.grpcDialOption, *option.filerAddress, mountedDir, lastTsNs)\n\t})\n\n\tlastOffsetTs := collectLastSyncOffset(option, mountedDir)\n\n\treturn pb.FollowMetadata(*option.filerAddress, option.grpcDialOption, \"filer.remote.sync\",\n\t\tmountedDir, []string{filer.DirectoryEtcRemote}, lastOffsetTs.UnixNano(), 0, processEventFnWithOffset, false)\n}\n\nfunc makeEventProcessor(remoteStorage *remote_pb.RemoteConf, mountedDir string, remoteStorageMountLocation *remote_pb.RemoteStorageLocation, filerSource *source.FilerSource) (pb.ProcessMetadataFunc, error) {\n\tclient, err := remote_storage.GetRemoteStorage(remoteStorage)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thandleEtcRemoteChanges := func(resp *filer_pb.SubscribeMetadataResponse) error {\n\t\tmessage := resp.EventNotification\n\t\tif message.NewEntry == nil {\n\t\t\treturn nil\n\t\t}\n\t\tif message.NewEntry.Name == filer.REMOTE_STORAGE_MOUNT_FILE {\n\t\t\tmappings, readErr := filer.UnmarshalRemoteStorageMappings(message.NewEntry.Content)\n\t\t\tif readErr != nil {\n\t\t\t\treturn fmt.Errorf(\"unmarshal mappings: %v\", readErr)\n\t\t\t}\n\t\t\tif remoteLoc, found := mappings.Mappings[mountedDir]; found {\n\t\t\t\tif remoteStorageMountLocation.Bucket != remoteLoc.Bucket || remoteStorageMountLocation.Path != remoteLoc.Path {\n\t\t\t\t\tglog.Fatalf(\"Unexpected mount changes %+v => %+v\", remoteStorageMountLocation, remoteLoc)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tglog.V(0).Infof(\"unmounted %s exiting ...\", mountedDir)\n\t\t\t\tos.Exit(0)\n\t\t\t}\n\t\t}\n\t\tif message.NewEntry.Name == remoteStorage.Name+filer.REMOTE_STORAGE_CONF_SUFFIX {\n\t\t\tconf := &remote_pb.RemoteConf{}\n\t\t\tif err := proto.Unmarshal(message.NewEntry.Content, conf); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unmarshal %s\/%s: %v\", filer.DirectoryEtcRemote, message.NewEntry.Name, err)\n\t\t\t}\n\t\t\tremoteStorage = conf\n\t\t\tif newClient, err := remote_storage.GetRemoteStorage(remoteStorage); err == nil {\n\t\t\t\tclient = newClient\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\teachEntryFunc := func(resp *filer_pb.SubscribeMetadataResponse) error {\n\t\tmessage := resp.EventNotification\n\t\tif strings.HasPrefix(resp.Directory, filer.DirectoryEtcRemote) {\n\t\t\treturn handleEtcRemoteChanges(resp)\n\t\t}\n\n\t\tif message.OldEntry == nil && message.NewEntry == nil {\n\t\t\treturn nil\n\t\t}\n\t\tif message.OldEntry == nil && message.NewEntry != nil {\n\t\t\tif !filer.HasData(message.NewEntry) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tglog.V(2).Infof(\"create: %+v\", resp)\n\t\t\tif !shouldSendToRemote(message.NewEntry) {\n\t\t\t\tglog.V(2).Infof(\"skipping creating: %+v\", resp)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tdest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)\n\t\t\tif message.NewEntry.IsDirectory {\n\t\t\t\tglog.V(0).Infof(\"mkdir %s\", remote_storage.FormatLocation(dest))\n\t\t\t\treturn client.WriteDirectory(dest, message.NewEntry)\n\t\t\t}\n\t\t\tglog.V(0).Infof(\"create %s\", remote_storage.FormatLocation(dest))\n\t\t\treader := filer.NewFileReader(filerSource, message.NewEntry)\n\t\t\tremoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)\n\t\t\tif writeErr != nil {\n\t\t\t\treturn writeErr\n\t\t\t}\n\t\t\treturn updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)\n\t\t}\n\t\tif message.OldEntry != nil && message.NewEntry == nil {\n\t\t\tglog.V(2).Infof(\"delete: %+v\", resp)\n\t\t\tdest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)\n\t\t\tif message.OldEntry.IsDirectory {\n\t\t\t\tglog.V(0).Infof(\"rmdir %s\", remote_storage.FormatLocation(dest))\n\t\t\t\treturn client.RemoveDirectory(dest)\n\t\t\t}\n\t\t\tglog.V(0).Infof(\"delete %s\", remote_storage.FormatLocation(dest))\n\t\t\treturn client.DeleteFile(dest)\n\t\t}\n\t\tif message.OldEntry != nil && message.NewEntry != nil {\n\t\t\toldDest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(resp.Directory, message.OldEntry.Name), remoteStorageMountLocation)\n\t\t\tdest := toRemoteStorageLocation(util.FullPath(mountedDir), util.NewFullPath(message.NewParentPath, message.NewEntry.Name), remoteStorageMountLocation)\n\t\t\tif !shouldSendToRemote(message.NewEntry) {\n\t\t\t\tglog.V(2).Infof(\"skipping updating: %+v\", resp)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif message.NewEntry.IsDirectory {\n\t\t\t\treturn client.WriteDirectory(dest, message.NewEntry)\n\t\t\t}\n\t\t\tif resp.Directory == message.NewParentPath && message.OldEntry.Name == message.NewEntry.Name {\n\t\t\t\tif filer.IsSameData(message.OldEntry, message.NewEntry) {\n\t\t\t\t\tglog.V(2).Infof(\"update meta: %+v\", resp)\n\t\t\t\t\treturn client.UpdateFileMetadata(dest, message.OldEntry, message.NewEntry)\n\t\t\t\t}\n\t\t\t}\n\t\t\tglog.V(2).Infof(\"update: %+v\", resp)\n\t\t\tglog.V(0).Infof(\"delete %s\", remote_storage.FormatLocation(oldDest))\n\t\t\tif err := client.DeleteFile(oldDest); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treader := filer.NewFileReader(filerSource, message.NewEntry)\n\t\t\tglog.V(0).Infof(\"create %s\", remote_storage.FormatLocation(dest))\n\t\t\tremoteEntry, writeErr := client.WriteFile(dest, message.NewEntry, reader)\n\t\t\tif writeErr != nil {\n\t\t\t\treturn writeErr\n\t\t\t}\n\t\t\treturn updateLocalEntry(&remoteSyncOptions, message.NewParentPath, message.NewEntry, remoteEntry)\n\t\t}\n\n\t\treturn nil\n\t}\n\treturn eachEntryFunc, nil\n}\n\nfunc collectLastSyncOffset(option *RemoteSyncOptions, mountedDir string) (time.Time) {\n\t\/\/ 1. specified by timeAgo\n\t\/\/ 2. last offset timestamp for this directory\n\t\/\/ 3. directory creation time\n\tvar lastOffsetTs time.Time\n\tif *option.timeAgo == 0 {\n\t\tmountedDirEntry, err := filer_pb.GetEntry(option, util.FullPath(mountedDir))\n\t\tif err != nil {\n\t\t\tglog.V(0).Infof(\"get mounted directory %s: %v\", mountedDir, err)\n\t\t\treturn time.Now()\n\t\t}\n\n\t\tlastOffsetTsNs, err := remote_storage.GetSyncOffset(option.grpcDialOption, *option.filerAddress, mountedDir)\n\t\tif mountedDirEntry != nil {\n\t\t\tif err == nil && mountedDirEntry.Attributes.Crtime < lastOffsetTsNs\/1000000 {\n\t\t\t\tlastOffsetTs = time.Unix(0, lastOffsetTsNs)\n\t\t\t\tglog.V(0).Infof(\"resume from %v\", lastOffsetTs)\n\t\t\t} else {\n\t\t\t\tlastOffsetTs = time.Unix(mountedDirEntry.Attributes.Crtime, 0)\n\t\t\t}\n\t\t} else {\n\t\t\tlastOffsetTs = time.Now()\n\t\t}\n\t} else {\n\t\tlastOffsetTs = time.Now().Add(-*option.timeAgo)\n\t}\n\treturn lastOffsetTs\n}\n\nfunc toRemoteStorageLocation(mountDir, sourcePath util.FullPath, remoteMountLocation *remote_pb.RemoteStorageLocation) *remote_pb.RemoteStorageLocation {\n\tsource := string(sourcePath[len(mountDir):])\n\tdest := util.FullPath(remoteMountLocation.Path).Child(source)\n\treturn &remote_pb.RemoteStorageLocation{\n\t\tName: remoteMountLocation.Name,\n\t\tBucket: remoteMountLocation.Bucket,\n\t\tPath: string(dest),\n\t}\n}\n\nfunc shouldSendToRemote(entry *filer_pb.Entry) bool {\n\tif entry.RemoteEntry == nil {\n\t\treturn true\n\t}\n\tif entry.RemoteEntry.LastLocalSyncTsNs\/1e9 < entry.Attributes.Mtime {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc updateLocalEntry(filerClient filer_pb.FilerClient, dir string, entry *filer_pb.Entry, remoteEntry *filer_pb.RemoteEntry) error {\n\tentry.RemoteEntry = remoteEntry\n\treturn filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {\n\t\t_, err := client.UpdateEntry(context.Background(), &filer_pb.UpdateEntryRequest{\n\t\t\tDirectory: dir,\n\t\t\tEntry: entry,\n\t\t})\n\t\treturn err\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package weed_server\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/chrislusf\/raft\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/master_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/backend\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/needle\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/topology\"\n\t\"google.golang.org\/grpc\/peer\"\n)\n\nfunc (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServer) error {\n\tvar dn *topology.DataNode\n\tt := ms.Topo\n\n\tdefer func() {\n\t\tif dn != nil {\n\n\t\t\tglog.V(0).Infof(\"unregister disconnected volume server %s:%d\", dn.Ip, dn.Port)\n\t\t\tt.UnRegisterDataNode(dn)\n\n\t\t\tmessage := &master_pb.VolumeLocation{\n\t\t\t\tUrl: dn.Url(),\n\t\t\t\tPublicUrl: dn.PublicUrl,\n\t\t\t}\n\t\t\tfor _, v := range dn.GetVolumes() {\n\t\t\t\tmessage.DeletedVids = append(message.DeletedVids, uint32(v.Id))\n\t\t\t}\n\t\t\tfor _, s := range dn.GetEcShards() {\n\t\t\t\tmessage.DeletedVids = append(message.DeletedVids, uint32(s.VolumeId))\n\t\t\t}\n\n\t\t\tif len(message.DeletedVids) > 0 {\n\t\t\t\tms.clientChansLock.RLock()\n\t\t\t\tfor _, ch := range ms.clientChans {\n\t\t\t\t\tch <- message\n\t\t\t\t}\n\t\t\t\tms.clientChansLock.RUnlock()\n\t\t\t}\n\n\t\t}\n\t}()\n\n\tfor {\n\t\theartbeat, err := stream.Recv()\n\t\tif err != nil {\n\t\t\tif dn != nil {\n\t\t\t\tglog.Warningf(\"SendHeartbeat.Recv server %s:%d : %v\", dn.Ip, dn.Port, err)\n\t\t\t} else {\n\t\t\t\tglog.Warningf(\"SendHeartbeat.Recv: %v\", err)\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\tt.Sequence.SetMax(heartbeat.MaxFileKey)\n\n\t\tif dn == nil {\n\t\t\tif heartbeat.Ip == \"\" {\n\t\t\t\tif pr, ok := peer.FromContext(stream.Context()); ok {\n\t\t\t\t\tif pr.Addr != net.Addr(nil) {\n\t\t\t\t\t\theartbeat.Ip = pr.Addr.String()[0:strings.LastIndex(pr.Addr.String(), \":\")]\n\t\t\t\t\t\tglog.V(0).Infof(\"remote IP address is detected as %v\", heartbeat.Ip)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tdcName, rackName := t.Configuration.Locate(heartbeat.Ip, heartbeat.DataCenter, heartbeat.Rack)\n\t\t\tdc := t.GetOrCreateDataCenter(dcName)\n\t\t\track := dc.GetOrCreateRack(rackName)\n\t\t\tdn = rack.GetOrCreateDataNode(heartbeat.Ip,\n\t\t\t\tint(heartbeat.Port), heartbeat.PublicUrl,\n\t\t\t\tint64(heartbeat.MaxVolumeCount))\n\t\t\tglog.V(0).Infof(\"added volume server %v:%d\", heartbeat.GetIp(), heartbeat.GetPort())\n\t\t\tif err := stream.Send(&master_pb.HeartbeatResponse{\n\t\t\t\tVolumeSizeLimit: uint64(ms.option.VolumeSizeLimitMB) * 1024 * 1024,\n\t\t\t\tMetricsAddress: ms.option.MetricsAddress,\n\t\t\t\tMetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec),\n\t\t\t\tStorageBackends: backend.ToPbStorageBackends(),\n\t\t\t}); err != nil {\n\t\t\t\tglog.Warningf(\"SendHeartbeat.Send volume size to %s:%d %v\", dn.Ip, dn.Port, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tglog.V(4).Infof(\"master received heartbeat %s\", heartbeat.String())\n\t\tmessage := &master_pb.VolumeLocation{\n\t\t\tUrl: dn.Url(),\n\t\t\tPublicUrl: dn.PublicUrl,\n\t\t}\n\t\tif len(heartbeat.NewVolumes) > 0 || len(heartbeat.DeletedVolumes) > 0 {\n\t\t\t\/\/ process delta volume ids if exists for fast volume id updates\n\t\t\tfor _, volInfo := range heartbeat.NewVolumes {\n\t\t\t\tmessage.NewVids = append(message.NewVids, volInfo.Id)\n\t\t\t}\n\t\t\tfor _, volInfo := range heartbeat.DeletedVolumes {\n\t\t\t\tmessage.DeletedVids = append(message.DeletedVids, volInfo.Id)\n\t\t\t}\n\t\t\t\/\/ update master internal volume layouts\n\t\t\tt.IncrementalSyncDataNodeRegistration(heartbeat.NewVolumes, heartbeat.DeletedVolumes, dn)\n\t\t}\n\n\t\tif len(heartbeat.Volumes) > 0 || heartbeat.HasNoVolumes {\n\t\t\t\/\/ process heartbeat.Volumes\n\t\t\tnewVolumes, deletedVolumes := t.SyncDataNodeRegistration(heartbeat.Volumes, dn)\n\n\t\t\tfor _, v := range newVolumes {\n\t\t\t\tglog.V(0).Infof(\"master see new volume %d from %s\", uint32(v.Id), dn.Url())\n\t\t\t\tmessage.NewVids = append(message.NewVids, uint32(v.Id))\n\t\t\t}\n\t\t\tfor _, v := range deletedVolumes {\n\t\t\t\tglog.V(0).Infof(\"master see deleted volume %d from %s\", uint32(v.Id), dn.Url())\n\t\t\t\tmessage.DeletedVids = append(message.DeletedVids, uint32(v.Id))\n\t\t\t}\n\t\t}\n\n\t\tif len(heartbeat.NewEcShards) > 0 || len(heartbeat.DeletedEcShards) > 0 {\n\n\t\t\t\/\/ update master internal volume layouts\n\t\t\tt.IncrementalSyncDataNodeEcShards(heartbeat.NewEcShards, heartbeat.DeletedEcShards, dn)\n\n\t\t\tfor _, s := range heartbeat.NewEcShards {\n\t\t\t\tmessage.NewVids = append(message.NewVids, s.Id)\n\t\t\t}\n\t\t\tfor _, s := range heartbeat.DeletedEcShards {\n\t\t\t\tif dn.HasVolumesById(needle.VolumeId(s.Id)) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tmessage.DeletedVids = append(message.DeletedVids, s.Id)\n\t\t\t}\n\n\t\t}\n\n\t\tif len(heartbeat.EcShards) > 0 || heartbeat.HasNoEcShards {\n\t\t\tglog.V(1).Infof(\"master recieved ec shards from %s: %+v\", dn.Url(), heartbeat.EcShards)\n\t\t\tnewShards, deletedShards := t.SyncDataNodeEcShards(heartbeat.EcShards, dn)\n\n\t\t\t\/\/ broadcast the ec vid changes to master clients\n\t\t\tfor _, s := range newShards {\n\t\t\t\tmessage.NewVids = append(message.NewVids, uint32(s.VolumeId))\n\t\t\t}\n\t\t\tfor _, s := range deletedShards {\n\t\t\t\tif dn.HasVolumesById(s.VolumeId) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tmessage.DeletedVids = append(message.DeletedVids, uint32(s.VolumeId))\n\t\t\t}\n\n\t\t}\n\n\t\tif len(message.NewVids) > 0 || len(message.DeletedVids) > 0 {\n\t\t\tms.clientChansLock.RLock()\n\t\t\tfor host, ch := range ms.clientChans {\n\t\t\t\tglog.V(0).Infof(\"master send to %s: %s\", host, message.String())\n\t\t\t\tch <- message\n\t\t\t}\n\t\t\tms.clientChansLock.RUnlock()\n\t\t}\n\n\t\t\/\/ tell the volume servers about the leader\n\t\tnewLeader, err := t.Leader()\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"SendHeartbeat find leader: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\tif err := stream.Send(&master_pb.HeartbeatResponse{\n\t\t\tLeader: newLeader,\n\t\t}); err != nil {\n\t\t\tglog.Warningf(\"SendHeartbeat.Send response to to %s:%d %v\", dn.Ip, dn.Port, err)\n\t\t\treturn err\n\t\t}\n\t}\n}\n\n\/\/ KeepConnected keep a stream gRPC call to the master. Used by clients to know the master is up.\n\/\/ And clients gets the up-to-date list of volume locations\nfunc (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServer) error {\n\n\treq, err := stream.Recv()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !ms.Topo.IsLeader() {\n\t\treturn ms.informNewLeader(stream)\n\t}\n\n\t\/\/ remember client address\n\tctx := stream.Context()\n\t\/\/ fmt.Printf(\"FromContext %+v\\n\", ctx)\n\tpr, ok := peer.FromContext(ctx)\n\tif !ok {\n\t\tglog.Error(\"failed to get peer from ctx\")\n\t\treturn fmt.Errorf(\"failed to get peer from ctx\")\n\t}\n\tif pr.Addr == net.Addr(nil) {\n\t\tglog.Error(\"failed to get peer address\")\n\t\treturn fmt.Errorf(\"failed to get peer address\")\n\t}\n\n\tclientName := req.Name + pr.Addr.String()\n\tglog.V(0).Infof(\"+ client %v\", clientName)\n\n\tmessageChan := make(chan *master_pb.VolumeLocation)\n\tstopChan := make(chan bool)\n\n\tms.clientChansLock.Lock()\n\tms.clientChans[clientName] = messageChan\n\tms.clientChansLock.Unlock()\n\n\tdefer func() {\n\t\tglog.V(0).Infof(\"- client %v\", clientName)\n\t\tms.clientChansLock.Lock()\n\t\tdelete(ms.clientChans, clientName)\n\t\tms.clientChansLock.Unlock()\n\t}()\n\n\tfor _, message := range ms.Topo.ToVolumeLocations() {\n\t\tif err := stream.Send(message); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\t_, err := stream.Recv()\n\t\t\tif err != nil {\n\t\t\t\tglog.V(2).Infof(\"- client %v: %v\", clientName, err)\n\t\t\t\tstopChan <- true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\tticker := time.NewTicker(5 * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase message := <-messageChan:\n\t\t\tif err := stream.Send(message); err != nil {\n\t\t\t\tglog.V(0).Infof(\"=> client %v: %+v\", clientName, message)\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase <-ticker.C:\n\t\t\tif !ms.Topo.IsLeader() {\n\t\t\t\treturn ms.informNewLeader(stream)\n\t\t\t}\n\t\tcase <-stopChan:\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (ms *MasterServer) informNewLeader(stream master_pb.Seaweed_KeepConnectedServer) error {\n\tleader, err := ms.Topo.Leader()\n\tif err != nil {\n\t\tglog.Errorf(\"topo leader: %v\", err)\n\t\treturn raft.NotLeaderError\n\t}\n\tif err := stream.Send(&master_pb.VolumeLocation{\n\t\tLeader: leader,\n\t}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<commit_msg>remove dead code<commit_after>package weed_server\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"time\"\n\n\t\"github.com\/chrislusf\/raft\"\n\t\"google.golang.org\/grpc\/peer\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/master_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/backend\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/storage\/needle\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/topology\"\n)\n\nfunc (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServer) error {\n\tvar dn *topology.DataNode\n\tt := ms.Topo\n\n\tdefer func() {\n\t\tif dn != nil {\n\n\t\t\tglog.V(0).Infof(\"unregister disconnected volume server %s:%d\", dn.Ip, dn.Port)\n\t\t\tt.UnRegisterDataNode(dn)\n\n\t\t\tmessage := &master_pb.VolumeLocation{\n\t\t\t\tUrl: dn.Url(),\n\t\t\t\tPublicUrl: dn.PublicUrl,\n\t\t\t}\n\t\t\tfor _, v := range dn.GetVolumes() {\n\t\t\t\tmessage.DeletedVids = append(message.DeletedVids, uint32(v.Id))\n\t\t\t}\n\t\t\tfor _, s := range dn.GetEcShards() {\n\t\t\t\tmessage.DeletedVids = append(message.DeletedVids, uint32(s.VolumeId))\n\t\t\t}\n\n\t\t\tif len(message.DeletedVids) > 0 {\n\t\t\t\tms.clientChansLock.RLock()\n\t\t\t\tfor _, ch := range ms.clientChans {\n\t\t\t\t\tch <- message\n\t\t\t\t}\n\t\t\t\tms.clientChansLock.RUnlock()\n\t\t\t}\n\n\t\t}\n\t}()\n\n\tfor {\n\t\theartbeat, err := stream.Recv()\n\t\tif err != nil {\n\t\t\tif dn != nil {\n\t\t\t\tglog.Warningf(\"SendHeartbeat.Recv server %s:%d : %v\", dn.Ip, dn.Port, err)\n\t\t\t} else {\n\t\t\t\tglog.Warningf(\"SendHeartbeat.Recv: %v\", err)\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\tt.Sequence.SetMax(heartbeat.MaxFileKey)\n\n\t\tif dn == nil {\n\t\t\tdcName, rackName := t.Configuration.Locate(heartbeat.Ip, heartbeat.DataCenter, heartbeat.Rack)\n\t\t\tdc := t.GetOrCreateDataCenter(dcName)\n\t\t\track := dc.GetOrCreateRack(rackName)\n\t\t\tdn = rack.GetOrCreateDataNode(heartbeat.Ip,\n\t\t\t\tint(heartbeat.Port), heartbeat.PublicUrl,\n\t\t\t\tint64(heartbeat.MaxVolumeCount))\n\t\t\tglog.V(0).Infof(\"added volume server %v:%d\", heartbeat.GetIp(), heartbeat.GetPort())\n\t\t\tif err := stream.Send(&master_pb.HeartbeatResponse{\n\t\t\t\tVolumeSizeLimit: uint64(ms.option.VolumeSizeLimitMB) * 1024 * 1024,\n\t\t\t\tMetricsAddress: ms.option.MetricsAddress,\n\t\t\t\tMetricsIntervalSeconds: uint32(ms.option.MetricsIntervalSec),\n\t\t\t\tStorageBackends: backend.ToPbStorageBackends(),\n\t\t\t}); err != nil {\n\t\t\t\tglog.Warningf(\"SendHeartbeat.Send volume size to %s:%d %v\", dn.Ip, dn.Port, err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tglog.V(4).Infof(\"master received heartbeat %s\", heartbeat.String())\n\t\tmessage := &master_pb.VolumeLocation{\n\t\t\tUrl: dn.Url(),\n\t\t\tPublicUrl: dn.PublicUrl,\n\t\t}\n\t\tif len(heartbeat.NewVolumes) > 0 || len(heartbeat.DeletedVolumes) > 0 {\n\t\t\t\/\/ process delta volume ids if exists for fast volume id updates\n\t\t\tfor _, volInfo := range heartbeat.NewVolumes {\n\t\t\t\tmessage.NewVids = append(message.NewVids, volInfo.Id)\n\t\t\t}\n\t\t\tfor _, volInfo := range heartbeat.DeletedVolumes {\n\t\t\t\tmessage.DeletedVids = append(message.DeletedVids, volInfo.Id)\n\t\t\t}\n\t\t\t\/\/ update master internal volume layouts\n\t\t\tt.IncrementalSyncDataNodeRegistration(heartbeat.NewVolumes, heartbeat.DeletedVolumes, dn)\n\t\t}\n\n\t\tif len(heartbeat.Volumes) > 0 || heartbeat.HasNoVolumes {\n\t\t\t\/\/ process heartbeat.Volumes\n\t\t\tnewVolumes, deletedVolumes := t.SyncDataNodeRegistration(heartbeat.Volumes, dn)\n\n\t\t\tfor _, v := range newVolumes {\n\t\t\t\tglog.V(0).Infof(\"master see new volume %d from %s\", uint32(v.Id), dn.Url())\n\t\t\t\tmessage.NewVids = append(message.NewVids, uint32(v.Id))\n\t\t\t}\n\t\t\tfor _, v := range deletedVolumes {\n\t\t\t\tglog.V(0).Infof(\"master see deleted volume %d from %s\", uint32(v.Id), dn.Url())\n\t\t\t\tmessage.DeletedVids = append(message.DeletedVids, uint32(v.Id))\n\t\t\t}\n\t\t}\n\n\t\tif len(heartbeat.NewEcShards) > 0 || len(heartbeat.DeletedEcShards) > 0 {\n\n\t\t\t\/\/ update master internal volume layouts\n\t\t\tt.IncrementalSyncDataNodeEcShards(heartbeat.NewEcShards, heartbeat.DeletedEcShards, dn)\n\n\t\t\tfor _, s := range heartbeat.NewEcShards {\n\t\t\t\tmessage.NewVids = append(message.NewVids, s.Id)\n\t\t\t}\n\t\t\tfor _, s := range heartbeat.DeletedEcShards {\n\t\t\t\tif dn.HasVolumesById(needle.VolumeId(s.Id)) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tmessage.DeletedVids = append(message.DeletedVids, s.Id)\n\t\t\t}\n\n\t\t}\n\n\t\tif len(heartbeat.EcShards) > 0 || heartbeat.HasNoEcShards {\n\t\t\tglog.V(1).Infof(\"master recieved ec shards from %s: %+v\", dn.Url(), heartbeat.EcShards)\n\t\t\tnewShards, deletedShards := t.SyncDataNodeEcShards(heartbeat.EcShards, dn)\n\n\t\t\t\/\/ broadcast the ec vid changes to master clients\n\t\t\tfor _, s := range newShards {\n\t\t\t\tmessage.NewVids = append(message.NewVids, uint32(s.VolumeId))\n\t\t\t}\n\t\t\tfor _, s := range deletedShards {\n\t\t\t\tif dn.HasVolumesById(s.VolumeId) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tmessage.DeletedVids = append(message.DeletedVids, uint32(s.VolumeId))\n\t\t\t}\n\n\t\t}\n\n\t\tif len(message.NewVids) > 0 || len(message.DeletedVids) > 0 {\n\t\t\tms.clientChansLock.RLock()\n\t\t\tfor host, ch := range ms.clientChans {\n\t\t\t\tglog.V(0).Infof(\"master send to %s: %s\", host, message.String())\n\t\t\t\tch <- message\n\t\t\t}\n\t\t\tms.clientChansLock.RUnlock()\n\t\t}\n\n\t\t\/\/ tell the volume servers about the leader\n\t\tnewLeader, err := t.Leader()\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"SendHeartbeat find leader: %v\", err)\n\t\t\treturn err\n\t\t}\n\t\tif err := stream.Send(&master_pb.HeartbeatResponse{\n\t\t\tLeader: newLeader,\n\t\t}); err != nil {\n\t\t\tglog.Warningf(\"SendHeartbeat.Send response to to %s:%d %v\", dn.Ip, dn.Port, err)\n\t\t\treturn err\n\t\t}\n\t}\n}\n\n\/\/ KeepConnected keep a stream gRPC call to the master. Used by clients to know the master is up.\n\/\/ And clients gets the up-to-date list of volume locations\nfunc (ms *MasterServer) KeepConnected(stream master_pb.Seaweed_KeepConnectedServer) error {\n\n\treq, err := stream.Recv()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !ms.Topo.IsLeader() {\n\t\treturn ms.informNewLeader(stream)\n\t}\n\n\t\/\/ remember client address\n\tctx := stream.Context()\n\t\/\/ fmt.Printf(\"FromContext %+v\\n\", ctx)\n\tpr, ok := peer.FromContext(ctx)\n\tif !ok {\n\t\tglog.Error(\"failed to get peer from ctx\")\n\t\treturn fmt.Errorf(\"failed to get peer from ctx\")\n\t}\n\tif pr.Addr == net.Addr(nil) {\n\t\tglog.Error(\"failed to get peer address\")\n\t\treturn fmt.Errorf(\"failed to get peer address\")\n\t}\n\n\tclientName := req.Name + pr.Addr.String()\n\tglog.V(0).Infof(\"+ client %v\", clientName)\n\n\tmessageChan := make(chan *master_pb.VolumeLocation)\n\tstopChan := make(chan bool)\n\n\tms.clientChansLock.Lock()\n\tms.clientChans[clientName] = messageChan\n\tms.clientChansLock.Unlock()\n\n\tdefer func() {\n\t\tglog.V(0).Infof(\"- client %v\", clientName)\n\t\tms.clientChansLock.Lock()\n\t\tdelete(ms.clientChans, clientName)\n\t\tms.clientChansLock.Unlock()\n\t}()\n\n\tfor _, message := range ms.Topo.ToVolumeLocations() {\n\t\tif err := stream.Send(message); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\t_, err := stream.Recv()\n\t\t\tif err != nil {\n\t\t\t\tglog.V(2).Infof(\"- client %v: %v\", clientName, err)\n\t\t\t\tstopChan <- true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\tticker := time.NewTicker(5 * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase message := <-messageChan:\n\t\t\tif err := stream.Send(message); err != nil {\n\t\t\t\tglog.V(0).Infof(\"=> client %v: %+v\", clientName, message)\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase <-ticker.C:\n\t\t\tif !ms.Topo.IsLeader() {\n\t\t\t\treturn ms.informNewLeader(stream)\n\t\t\t}\n\t\tcase <-stopChan:\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (ms *MasterServer) informNewLeader(stream master_pb.Seaweed_KeepConnectedServer) error {\n\tleader, err := ms.Topo.Leader()\n\tif err != nil {\n\t\tglog.Errorf(\"topo leader: %v\", err)\n\t\treturn raft.NotLeaderError\n\t}\n\tif err := stream.Send(&master_pb.VolumeLocation{\n\t\tLeader: leader,\n\t}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport \"testing\"\nimport \"github.com\/stretchr\/testify\/assert\"\n\ntype ReadWriter struct {\n\t\/\/Reader func(p []byte) (n int, err error)\n\t\/\/Writer func(p []byte) (n int, err error)\n\t\/\/Closer func() error\n\n\tPipe chan []byte\n}\n\nfunc NewReadWriter() *ReadWriter {\n\treturn &ReadWriter{\n\t\tmake(chan []byte),\n\t}\n}\n\nfunc (rw *ReadWriter) Read(p []byte) (n int, err error) {\n\tinput := <-rw.Pipe\n\tcopy(p, input)\n\treturn len(input), nil\n}\n\nfunc (rw *ReadWriter) Write(p []byte) (n int, err error) {\n\trw.Pipe <- p\n\treturn len(p), nil\n}\n\nfunc (rw *ReadWriter) Close() (err error) {\n\treturn nil\n}\n\nfunc TestManageUserInput(t *testing.T) {\n\ttests := [][]byte{\n\t\t[]byte(\"foobar\"),\n\t\t[]byte(\" \"),\n\t\t[]byte(\"\\n\"),\n\t}\n\n\tmockUser := NewReadWriter()\n\n\tsub, unsub := ManageUserInput(mockUser)\n\n\tcmdAMock := NewReadWriter()\n\tcmdA := &Cmd{\n\t\tStdin: cmdAMock,\n\t}\n\n\tsub <- cmdA\n\n\tfor i := range tests {\n\t\tmockUser.Pipe <- tests[i]\n\t\tassert.Equal(t, tests[i], <-cmdAMock.Pipe)\n\t}\n\n\tunsub <- cmdA\n}\n<commit_msg>expand test<commit_after>package main\n\nimport \"testing\"\nimport \"github.com\/stretchr\/testify\/assert\"\n\ntype ReadWriter struct {\n\t\/\/Reader func(p []byte) (n int, err error)\n\t\/\/Writer func(p []byte) (n int, err error)\n\t\/\/Closer func() error\n\n\tPipe chan []byte\n}\n\nfunc NewReadWriter() *ReadWriter {\n\treturn &ReadWriter{\n\t\t\/\/ Write() below sends to this channel. Multiple ReadWriters are written to\n\t\t\/\/ in the below test. Because the writes happen in a random order (in ManageUserInput()\n\t\t\/\/ range), and we receive in order, this can result in a block.\n\t\t\/\/ Buffer the channel to avoid the block.\n\t\tmake(chan []byte, 1),\n\t}\n}\n\nfunc (rw *ReadWriter) Read(p []byte) (n int, err error) {\n\tinput := <-rw.Pipe\n\tcopy(p, input)\n\treturn len(input), nil\n}\n\nfunc (rw *ReadWriter) Write(p []byte) (n int, err error) {\n\trw.Pipe <- p\n\treturn len(p), nil\n}\n\nfunc (rw *ReadWriter) Close() (err error) {\n\treturn nil\n}\n\n\/\/ Add and remove multiple processes, input should always reach all subscribed\n\/\/ processed.\nfunc TestManageUserInput(t *testing.T) {\n\ttests := [][]byte{\n\t\t[]byte(\"foo bar\"),\n\t\t[]byte(\"¿\\t\\n?\"),\n\t\t[]byte(\"\\xbd\\xb2\\x3d\\xbc\\x20\\xe2\\x8c\\x98\"), \/\/ malformed utf-8\n\t\t[]byte(\"\\x18\\x1f\\x00\"), \/\/ control chars and null\n\t}\n\n\tmockUser := NewReadWriter()\n\tsub, unsub := ManageUserInput(mockUser)\n\n\t\/\/ Create and add first process\n\tcmdAMock := NewReadWriter()\n\tcmdA := &Cmd{\n\t\tStdin: cmdAMock,\n\t}\n\tsub <- cmdA\n\n\t\/\/ Test one process\n\tfor i := range tests {\n\t\tmockUser.Pipe <- tests[i]\n\t\tassert.Equal(t, tests[i], <-cmdAMock.Pipe)\n\t}\n\n\t\/\/ Add second process\n\tcmdBMock := NewReadWriter()\n\tcmdB := &Cmd{\n\t\tStdin: cmdBMock,\n\t}\n\tsub <- cmdB\n\n\t\/\/ Test both processes. Note input is only sent once\n\tfor i := range tests {\n\t\tmockUser.Pipe <- tests[i]\n\t\tassert.Equal(t, tests[i], <-cmdAMock.Pipe)\n\t\tassert.Equal(t, tests[i], <-cmdBMock.Pipe)\n\t}\n\n\t\/\/ Add third proccess\n\tcmdCMock := NewReadWriter()\n\tcmdC := &Cmd{\n\t\tStdin: cmdCMock,\n\t}\n\tsub <- cmdC\n\n\t\/\/ Test all three.\n\tfor i := range tests {\n\t\tmockUser.Pipe <- tests[i]\n\t\tassert.Equal(t, tests[i], <-cmdAMock.Pipe)\n\t\tassert.Equal(t, tests[i], <-cmdBMock.Pipe)\n\t\tassert.Equal(t, tests[i], <-cmdCMock.Pipe)\n\t}\n\n\tunsub <- cmdA\n\t\/\/ Close to check that no input is routed to the removed process\n\tclose(cmdAMock.Pipe)\n\n\tfor i := range tests {\n\t\tmockUser.Pipe <- tests[i]\n\t\tassert.Equal(t, tests[i], <-cmdBMock.Pipe)\n\t\tassert.Equal(t, tests[i], <-cmdCMock.Pipe)\n\t}\n\n\tunsub <- cmdB\n\tunsub <- cmdC\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage jujuc\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/juju\/cmd\"\n\t\"launchpad.net\/gnuflag\"\n)\n\n\/\/ ActionGetCommand implements the relation-get command.\ntype ActionGetCommand struct {\n\tcmd.CommandBase\n\tctx Context\n\tkeys []string\n\tresponse interface{}\n\tout cmd.Output\n}\n\nfunc NewActionGetCommand(ctx Context) cmd.Command {\n\treturn &ActionGetCommand{ctx: ctx}\n}\n\nfunc (c *ActionGetCommand) Info() *cmd.Info {\n\tdoc := `\naction-get will print the value of the parameter at the given key, serialized\nas YAML. If multiple keys are passed, action-get will recurse into the param\nmap as needed.\n`\n\treturn &cmd.Info{\n\t\tName: \"action-get\",\n\t\tArgs: \"[<key>[.<key>.<key>...]]\",\n\t\tPurpose: \"get action parameters\",\n\t\tDoc: doc,\n\t}\n}\n\nfunc (c *ActionGetCommand) SetFlags(f *gnuflag.FlagSet) {\n\tc.out.AddFlags(f, \"smart\", cmd.DefaultFormatters)\n}\n\nfunc (c *ActionGetCommand) Init(args []string) error {\n\tif len(args) > 0 {\n\t\terr := cmd.CheckEmpty(args[1:])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.keys = strings.Split(args[0], \".\")\n\t}\n\treturn nil\n}\n\nfunc recurseMapOnKeys(keys []string, params map[string]interface{}) (interface{}, bool) {\n\tkey, rest := keys[0], keys[1:]\n\tanswer, ok := params[key]\n\n\tif len(rest) == 0 {\n\t\treturn answer, ok\n\t} else if ok {\n\t\tswitch typed := answer.(type) {\n\t\tcase map[string]interface{}:\n\t\t\treturn recurseMapOnKeys(keys[1:], typed)\n\t\tcase map[interface{}]interface{}:\n\t\t\tm := make(map[string]interface{})\n\t\t\tfor k, v := range typed {\n\t\t\t\tif tK, ok := k.(string); ok {\n\t\t\t\t\tm[tK] = v\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, false\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn recurseMapOnKeys(keys[1:], m)\n\t\tdefault:\n\t\t\treturn nil, false\n\t\t}\n\t} else {\n\t\treturn nil, false\n\t}\n\n\treturn nil, false\n}\n\nfunc (c *ActionGetCommand) Run(ctx *cmd.Context) error {\n\tparams := c.ctx.ActionParams()\n\n\tvar answer interface{}\n\n\tif len(c.keys) == 0 {\n\t\tanswer = params\n\t} else {\n\t\tanswer, _ = recurseMapOnKeys(c.keys, params)\n\t}\n\n\treturn c.out.Write(ctx, answer)\n}\n<commit_msg>Fixed incorrect comment on action-get.go<commit_after>\/\/ Copyright 2014 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage jujuc\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/juju\/cmd\"\n\t\"launchpad.net\/gnuflag\"\n)\n\n\/\/ ActionGetCommand implements the action-get command.\ntype ActionGetCommand struct {\n\tcmd.CommandBase\n\tctx Context\n\tkeys []string\n\tresponse interface{}\n\tout cmd.Output\n}\n\nfunc NewActionGetCommand(ctx Context) cmd.Command {\n\treturn &ActionGetCommand{ctx: ctx}\n}\n\nfunc (c *ActionGetCommand) Info() *cmd.Info {\n\tdoc := `\naction-get will print the value of the parameter at the given key, serialized\nas YAML. If multiple keys are passed, action-get will recurse into the param\nmap as needed.\n`\n\treturn &cmd.Info{\n\t\tName: \"action-get\",\n\t\tArgs: \"[<key>[.<key>.<key>...]]\",\n\t\tPurpose: \"get action parameters\",\n\t\tDoc: doc,\n\t}\n}\n\nfunc (c *ActionGetCommand) SetFlags(f *gnuflag.FlagSet) {\n\tc.out.AddFlags(f, \"smart\", cmd.DefaultFormatters)\n}\n\nfunc (c *ActionGetCommand) Init(args []string) error {\n\tif len(args) > 0 {\n\t\terr := cmd.CheckEmpty(args[1:])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.keys = strings.Split(args[0], \".\")\n\t}\n\treturn nil\n}\n\nfunc recurseMapOnKeys(keys []string, params map[string]interface{}) (interface{}, bool) {\n\tkey, rest := keys[0], keys[1:]\n\tanswer, ok := params[key]\n\n\tif len(rest) == 0 {\n\t\treturn answer, ok\n\t} else if ok {\n\t\tswitch typed := answer.(type) {\n\t\tcase map[string]interface{}:\n\t\t\treturn recurseMapOnKeys(keys[1:], typed)\n\t\tcase map[interface{}]interface{}:\n\t\t\tm := make(map[string]interface{})\n\t\t\tfor k, v := range typed {\n\t\t\t\tif tK, ok := k.(string); ok {\n\t\t\t\t\tm[tK] = v\n\t\t\t\t} else {\n\t\t\t\t\treturn nil, false\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn recurseMapOnKeys(keys[1:], m)\n\t\tdefault:\n\t\t\treturn nil, false\n\t\t}\n\t} else {\n\t\treturn nil, false\n\t}\n\n\treturn nil, false\n}\n\nfunc (c *ActionGetCommand) Run(ctx *cmd.Context) error {\n\tparams := c.ctx.ActionParams()\n\n\tvar answer interface{}\n\n\tif len(c.keys) == 0 {\n\t\tanswer = params\n\t} else {\n\t\tanswer, _ = recurseMapOnKeys(c.keys, params)\n\t}\n\n\treturn c.out.Write(ctx, answer)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage spdy\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/proxy\"\n\tapierrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/serializer\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/httpstream\"\n\tutilnet \"k8s.io\/apimachinery\/pkg\/util\/net\"\n\t\"k8s.io\/apimachinery\/third_party\/forked\/golang\/netutil\"\n)\n\n\/\/ SpdyRoundTripper knows how to upgrade an HTTP request to one that supports\n\/\/ multiplexed streams. After RoundTrip() is invoked, Conn will be set\n\/\/ and usable. SpdyRoundTripper implements the UpgradeRoundTripper interface.\ntype SpdyRoundTripper struct {\n\t\/\/tlsConfig holds the TLS configuration settings to use when connecting\n\t\/\/to the remote server.\n\ttlsConfig *tls.Config\n\n\t\/* TODO according to http:\/\/golang.org\/pkg\/net\/http\/#RoundTripper, a RoundTripper\n\t must be safe for use by multiple concurrent goroutines. If this is absolutely\n\t necessary, we could keep a map from http.Request to net.Conn. In practice,\n\t a client will create an http.Client, set the transport to a new insteace of\n\t SpdyRoundTripper, and use it a single time, so this hopefully won't be an issue.\n\t*\/\n\t\/\/ conn is the underlying network connection to the remote server.\n\tconn net.Conn\n\n\t\/\/ Dialer is the dialer used to connect. Used if non-nil.\n\tDialer *net.Dialer\n\n\t\/\/ proxier knows which proxy to use given a request, defaults to http.ProxyFromEnvironment\n\t\/\/ Used primarily for mocking the proxy discovery in tests.\n\tproxier func(req *http.Request) (*url.URL, error)\n\n\t\/\/ pingPeriod is a period for sending Ping frames over established\n\t\/\/ connections.\n\tpingPeriod time.Duration\n}\n\nvar _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{}\nvar _ httpstream.UpgradeRoundTripper = &SpdyRoundTripper{}\nvar _ utilnet.Dialer = &SpdyRoundTripper{}\n\n\/\/ NewRoundTripper creates a new SpdyRoundTripper that will use the specified\n\/\/ tlsConfig.\nfunc NewRoundTripper(tlsConfig *tls.Config) *SpdyRoundTripper {\n\treturn NewRoundTripperWithConfig(RoundTripperConfig{\n\t\tTLS: tlsConfig,\n\t})\n}\n\n\/\/ NewRoundTripperWithProxy creates a new SpdyRoundTripper that will use the\n\/\/ specified tlsConfig and proxy func.\nfunc NewRoundTripperWithProxy(tlsConfig *tls.Config, proxier func(*http.Request) (*url.URL, error)) *SpdyRoundTripper {\n\treturn NewRoundTripperWithConfig(RoundTripperConfig{\n\t\tTLS: tlsConfig,\n\t\tProxier: proxier,\n\t})\n}\n\n\/\/ NewRoundTripperWithConfig creates a new SpdyRoundTripper with the specified\n\/\/ configuration.\nfunc NewRoundTripperWithConfig(cfg RoundTripperConfig) *SpdyRoundTripper {\n\tif cfg.Proxier == nil {\n\t\tcfg.Proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)\n\t}\n\treturn &SpdyRoundTripper{\n\t\ttlsConfig: cfg.TLS,\n\t\tproxier: cfg.Proxier,\n\t\tpingPeriod: cfg.PingPeriod,\n\t}\n}\n\n\/\/ RoundTripperConfig is a set of options for an SpdyRoundTripper.\ntype RoundTripperConfig struct {\n\t\/\/ TLS configuration used by the round tripper.\n\tTLS *tls.Config\n\t\/\/ Proxier is a proxy function invoked on each request. Optional.\n\tProxier func(*http.Request) (*url.URL, error)\n\t\/\/ PingPeriod is a period for sending SPDY Pings on the connection.\n\t\/\/ Optional.\n\tPingPeriod time.Duration\n}\n\n\/\/ TLSClientConfig implements pkg\/util\/net.TLSClientConfigHolder for proper TLS checking during\n\/\/ proxying with a spdy roundtripper.\nfunc (s *SpdyRoundTripper) TLSClientConfig() *tls.Config {\n\treturn s.tlsConfig\n}\n\n\/\/ Dial implements k8s.io\/apimachinery\/pkg\/util\/net.Dialer.\nfunc (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) {\n\tconn, err := s.dial(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := req.Write(conn); err != nil {\n\t\tconn.Close()\n\t\treturn nil, err\n\t}\n\n\treturn conn, nil\n}\n\n\/\/ dial dials the host specified by req, using TLS if appropriate, optionally\n\/\/ using a proxy server if one is configured via environment variables.\nfunc (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {\n\tproxyURL, err := s.proxier(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif proxyURL == nil {\n\t\treturn s.dialWithoutProxy(req.Context(), req.URL)\n\t}\n\n\tswitch proxyURL.Scheme {\n\tcase \"socks5\":\n\t\treturn s.dialWithSocks5Proxy(req, proxyURL)\n\tcase \"https\", \"http\", \"\":\n\t\treturn s.dialWithHttpProxy(req, proxyURL)\n\t}\n\n\treturn nil, fmt.Errorf(\"proxy URL scheme not supported: %s\", proxyURL.Scheme)\n}\n\n\/\/ dialWithHttpProxy dials the host specified by url through an http or an https proxy.\nfunc (s *SpdyRoundTripper) dialWithHttpProxy(req *http.Request, proxyURL *url.URL) (net.Conn, error) {\n\t\/\/ ensure we use a canonical host with proxyReq\n\ttargetHost := netutil.CanonicalAddr(req.URL)\n\n\t\/\/ proxying logic adapted from http:\/\/blog.h6t.eu\/post\/74098062923\/golang-websocket-with-http-proxy-support\n\tproxyReq := http.Request{\n\t\tMethod: \"CONNECT\",\n\t\tURL: &url.URL{},\n\t\tHost: targetHost,\n\t}\n\n\tproxyReq = *proxyReq.WithContext(req.Context())\n\n\tif pa := s.proxyAuth(proxyURL); pa != \"\" {\n\t\tproxyReq.Header = http.Header{}\n\t\tproxyReq.Header.Set(\"Proxy-Authorization\", pa)\n\t}\n\n\tproxyDialConn, err := s.dialWithoutProxy(proxyReq.Context(), proxyURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/nolint:staticcheck \/\/ SA1019 ignore deprecated httputil.NewProxyClientConn\n\tproxyClientConn := httputil.NewProxyClientConn(proxyDialConn, nil)\n\t_, err = proxyClientConn.Do(&proxyReq)\n\t\/\/nolint:staticcheck \/\/ SA1019 ignore deprecated httputil.ErrPersistEOF: it might be\n\t\/\/ returned from the invocation of proxyClientConn.Do\n\tif err != nil && err != httputil.ErrPersistEOF {\n\t\treturn nil, err\n\t}\n\n\trwc, _ := proxyClientConn.Hijack()\n\n\tif req.URL.Scheme == \"https\" {\n\t\treturn s.tlsConn(proxyReq.Context(), rwc, targetHost)\n\t}\n\treturn rwc, nil\n}\n\n\/\/ dialWithSocks5Proxy dials the host specified by url through a socks5 proxy.\nfunc (s *SpdyRoundTripper) dialWithSocks5Proxy(req *http.Request, proxyURL *url.URL) (net.Conn, error) {\n\t\/\/ ensure we use a canonical host with proxyReq\n\ttargetHost := netutil.CanonicalAddr(req.URL)\n\tproxyDialAddr := netutil.CanonicalAddr(proxyURL)\n\n\tvar auth *proxy.Auth\n\tif proxyURL.User != nil {\n\t\tpass, _ := proxyURL.User.Password()\n\t\tauth = &proxy.Auth{\n\t\t\tUser: proxyURL.User.Username(),\n\t\t\tPassword: pass,\n\t\t}\n\t}\n\n\tdialer := s.Dialer\n\tif dialer == nil {\n\t\tdialer = &net.Dialer{\n\t\t\tTimeout: 30 * time.Second,\n\t\t}\n\t}\n\n\tproxyDialer, err := proxy.SOCKS5(\"tcp\", proxyDialAddr, auth, dialer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ According to the implementation of proxy.SOCKS5, the type assertion will always succeed\n\tcontextDialer, ok := proxyDialer.(proxy.ContextDialer)\n\tif !ok {\n\t\treturn nil, errors.New(\"SOCKS5 Dialer must implement ContextDialer\")\n\t}\n\n\tproxyDialConn, err := contextDialer.DialContext(req.Context(), \"tcp\", targetHost)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif req.URL.Scheme == \"https\" {\n\t\treturn s.tlsConn(req.Context(), proxyDialConn, targetHost)\n\t}\n\treturn proxyDialConn, nil\n}\n\n\/\/ tlsConn returns a TLS client side connection using rwc as the underlying transport.\nfunc (s *SpdyRoundTripper) tlsConn(ctx context.Context, rwc net.Conn, targetHost string) (net.Conn, error) {\n\n\thost, _, err := net.SplitHostPort(targetHost)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttlsConfig := s.tlsConfig\n\tswitch {\n\tcase tlsConfig == nil:\n\t\ttlsConfig = &tls.Config{ServerName: host}\n\tcase len(tlsConfig.ServerName) == 0:\n\t\ttlsConfig = tlsConfig.Clone()\n\t\ttlsConfig.ServerName = host\n\t}\n\n\ttlsConn := tls.Client(rwc, tlsConfig)\n\n\t\/\/ need to manually call Handshake() so we can call VerifyHostname() below\n\tif err := tlsConn.HandshakeContext(ctx); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Return if we were configured to skip validation\n\tif tlsConfig.InsecureSkipVerify {\n\t\treturn tlsConn, nil\n\t}\n\n\tif err := tlsConn.VerifyHostname(tlsConfig.ServerName); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn tlsConn, nil\n}\n\n\/\/ dialWithoutProxy dials the host specified by url, using TLS if appropriate.\nfunc (s *SpdyRoundTripper) dialWithoutProxy(ctx context.Context, url *url.URL) (net.Conn, error) {\n\tdialAddr := netutil.CanonicalAddr(url)\n\n\tif url.Scheme == \"http\" {\n\t\tif s.Dialer == nil {\n\t\t\tvar d net.Dialer\n\t\t\treturn d.DialContext(ctx, \"tcp\", dialAddr)\n\t\t} else {\n\t\t\treturn s.Dialer.DialContext(ctx, \"tcp\", dialAddr)\n\t\t}\n\t}\n\n\t\/\/ TODO validate the TLSClientConfig is set up?\n\tvar conn *tls.Conn\n\tvar err error\n\tif s.Dialer == nil {\n\t\tconn, err = tls.Dial(\"tcp\", dialAddr, s.tlsConfig)\n\t} else {\n\t\tconn, err = tls.DialWithDialer(s.Dialer, \"tcp\", dialAddr, s.tlsConfig)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Return if we were configured to skip validation\n\tif s.tlsConfig != nil && s.tlsConfig.InsecureSkipVerify {\n\t\treturn conn, nil\n\t}\n\n\thost, _, err := net.SplitHostPort(dialAddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif s.tlsConfig != nil && len(s.tlsConfig.ServerName) > 0 {\n\t\thost = s.tlsConfig.ServerName\n\t}\n\terr = conn.VerifyHostname(host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn conn, nil\n}\n\n\/\/ proxyAuth returns, for a given proxy URL, the value to be used for the Proxy-Authorization header\nfunc (s *SpdyRoundTripper) proxyAuth(proxyURL *url.URL) string {\n\tif proxyURL == nil || proxyURL.User == nil {\n\t\treturn \"\"\n\t}\n\tcredentials := proxyURL.User.String()\n\tencodedAuth := base64.StdEncoding.EncodeToString([]byte(credentials))\n\treturn fmt.Sprintf(\"Basic %s\", encodedAuth)\n}\n\n\/\/ RoundTrip executes the Request and upgrades it. After a successful upgrade,\n\/\/ clients may call SpdyRoundTripper.Connection() to retrieve the upgraded\n\/\/ connection.\nfunc (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {\n\theader := utilnet.CloneHeader(req.Header)\n\theader.Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)\n\theader.Add(httpstream.HeaderUpgrade, HeaderSpdy31)\n\n\tvar (\n\t\tconn net.Conn\n\t\trawResponse []byte\n\t\terr error\n\t)\n\n\tclone := utilnet.CloneRequest(req)\n\tclone.Header = header\n\tconn, err = s.Dial(clone)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponseReader := bufio.NewReader(\n\t\tio.MultiReader(\n\t\t\tbytes.NewBuffer(rawResponse),\n\t\t\tconn,\n\t\t),\n\t)\n\n\tresp, err := http.ReadResponse(responseReader, nil)\n\tif err != nil {\n\t\tif conn != nil {\n\t\t\tconn.Close()\n\t\t}\n\t\treturn nil, err\n\t}\n\n\ts.conn = conn\n\n\treturn resp, nil\n}\n\n\/\/ NewConnection validates the upgrade response, creating and returning a new\n\/\/ httpstream.Connection if there were no errors.\nfunc (s *SpdyRoundTripper) NewConnection(resp *http.Response) (httpstream.Connection, error) {\n\tconnectionHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderConnection))\n\tupgradeHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderUpgrade))\n\tif (resp.StatusCode != http.StatusSwitchingProtocols) || !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {\n\t\tdefer resp.Body.Close()\n\t\tresponseError := \"\"\n\t\tresponseErrorBytes, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tresponseError = \"unable to read error from server response\"\n\t\t} else {\n\t\t\t\/\/ TODO: I don't belong here, I should be abstracted from this class\n\t\t\tif obj, _, err := statusCodecs.UniversalDecoder().Decode(responseErrorBytes, nil, &metav1.Status{}); err == nil {\n\t\t\t\tif status, ok := obj.(*metav1.Status); ok {\n\t\t\t\t\treturn nil, &apierrors.StatusError{ErrStatus: *status}\n\t\t\t\t}\n\t\t\t}\n\t\t\tresponseError = string(responseErrorBytes)\n\t\t\tresponseError = strings.TrimSpace(responseError)\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"unable to upgrade connection: %s\", responseError)\n\t}\n\n\treturn NewClientConnectionWithPings(s.conn, s.pingPeriod)\n}\n\n\/\/ statusScheme is private scheme for the decoding here until someone fixes the TODO in NewConnection\nvar statusScheme = runtime.NewScheme()\n\n\/\/ ParameterCodec knows about query parameters used with the meta v1 API spec.\nvar statusCodecs = serializer.NewCodecFactory(statusScheme)\n\nfunc init() {\n\tstatusScheme.AddUnversionedTypes(metav1.SchemeGroupVersion,\n\t\t&metav1.Status{},\n\t)\n}\n<commit_msg>spdyroundrippter: close the connection if tls handshake fails<commit_after>\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage spdy\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/proxy\"\n\tapierrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/serializer\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/httpstream\"\n\tutilnet \"k8s.io\/apimachinery\/pkg\/util\/net\"\n\t\"k8s.io\/apimachinery\/third_party\/forked\/golang\/netutil\"\n)\n\n\/\/ SpdyRoundTripper knows how to upgrade an HTTP request to one that supports\n\/\/ multiplexed streams. After RoundTrip() is invoked, Conn will be set\n\/\/ and usable. SpdyRoundTripper implements the UpgradeRoundTripper interface.\ntype SpdyRoundTripper struct {\n\t\/\/tlsConfig holds the TLS configuration settings to use when connecting\n\t\/\/to the remote server.\n\ttlsConfig *tls.Config\n\n\t\/* TODO according to http:\/\/golang.org\/pkg\/net\/http\/#RoundTripper, a RoundTripper\n\t must be safe for use by multiple concurrent goroutines. If this is absolutely\n\t necessary, we could keep a map from http.Request to net.Conn. In practice,\n\t a client will create an http.Client, set the transport to a new insteace of\n\t SpdyRoundTripper, and use it a single time, so this hopefully won't be an issue.\n\t*\/\n\t\/\/ conn is the underlying network connection to the remote server.\n\tconn net.Conn\n\n\t\/\/ Dialer is the dialer used to connect. Used if non-nil.\n\tDialer *net.Dialer\n\n\t\/\/ proxier knows which proxy to use given a request, defaults to http.ProxyFromEnvironment\n\t\/\/ Used primarily for mocking the proxy discovery in tests.\n\tproxier func(req *http.Request) (*url.URL, error)\n\n\t\/\/ pingPeriod is a period for sending Ping frames over established\n\t\/\/ connections.\n\tpingPeriod time.Duration\n}\n\nvar _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{}\nvar _ httpstream.UpgradeRoundTripper = &SpdyRoundTripper{}\nvar _ utilnet.Dialer = &SpdyRoundTripper{}\n\n\/\/ NewRoundTripper creates a new SpdyRoundTripper that will use the specified\n\/\/ tlsConfig.\nfunc NewRoundTripper(tlsConfig *tls.Config) *SpdyRoundTripper {\n\treturn NewRoundTripperWithConfig(RoundTripperConfig{\n\t\tTLS: tlsConfig,\n\t})\n}\n\n\/\/ NewRoundTripperWithProxy creates a new SpdyRoundTripper that will use the\n\/\/ specified tlsConfig and proxy func.\nfunc NewRoundTripperWithProxy(tlsConfig *tls.Config, proxier func(*http.Request) (*url.URL, error)) *SpdyRoundTripper {\n\treturn NewRoundTripperWithConfig(RoundTripperConfig{\n\t\tTLS: tlsConfig,\n\t\tProxier: proxier,\n\t})\n}\n\n\/\/ NewRoundTripperWithConfig creates a new SpdyRoundTripper with the specified\n\/\/ configuration.\nfunc NewRoundTripperWithConfig(cfg RoundTripperConfig) *SpdyRoundTripper {\n\tif cfg.Proxier == nil {\n\t\tcfg.Proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)\n\t}\n\treturn &SpdyRoundTripper{\n\t\ttlsConfig: cfg.TLS,\n\t\tproxier: cfg.Proxier,\n\t\tpingPeriod: cfg.PingPeriod,\n\t}\n}\n\n\/\/ RoundTripperConfig is a set of options for an SpdyRoundTripper.\ntype RoundTripperConfig struct {\n\t\/\/ TLS configuration used by the round tripper.\n\tTLS *tls.Config\n\t\/\/ Proxier is a proxy function invoked on each request. Optional.\n\tProxier func(*http.Request) (*url.URL, error)\n\t\/\/ PingPeriod is a period for sending SPDY Pings on the connection.\n\t\/\/ Optional.\n\tPingPeriod time.Duration\n}\n\n\/\/ TLSClientConfig implements pkg\/util\/net.TLSClientConfigHolder for proper TLS checking during\n\/\/ proxying with a spdy roundtripper.\nfunc (s *SpdyRoundTripper) TLSClientConfig() *tls.Config {\n\treturn s.tlsConfig\n}\n\n\/\/ Dial implements k8s.io\/apimachinery\/pkg\/util\/net.Dialer.\nfunc (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) {\n\tconn, err := s.dial(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := req.Write(conn); err != nil {\n\t\tconn.Close()\n\t\treturn nil, err\n\t}\n\n\treturn conn, nil\n}\n\n\/\/ dial dials the host specified by req, using TLS if appropriate, optionally\n\/\/ using a proxy server if one is configured via environment variables.\nfunc (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {\n\tproxyURL, err := s.proxier(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif proxyURL == nil {\n\t\treturn s.dialWithoutProxy(req.Context(), req.URL)\n\t}\n\n\tswitch proxyURL.Scheme {\n\tcase \"socks5\":\n\t\treturn s.dialWithSocks5Proxy(req, proxyURL)\n\tcase \"https\", \"http\", \"\":\n\t\treturn s.dialWithHttpProxy(req, proxyURL)\n\t}\n\n\treturn nil, fmt.Errorf(\"proxy URL scheme not supported: %s\", proxyURL.Scheme)\n}\n\n\/\/ dialWithHttpProxy dials the host specified by url through an http or an https proxy.\nfunc (s *SpdyRoundTripper) dialWithHttpProxy(req *http.Request, proxyURL *url.URL) (net.Conn, error) {\n\t\/\/ ensure we use a canonical host with proxyReq\n\ttargetHost := netutil.CanonicalAddr(req.URL)\n\n\t\/\/ proxying logic adapted from http:\/\/blog.h6t.eu\/post\/74098062923\/golang-websocket-with-http-proxy-support\n\tproxyReq := http.Request{\n\t\tMethod: \"CONNECT\",\n\t\tURL: &url.URL{},\n\t\tHost: targetHost,\n\t}\n\n\tproxyReq = *proxyReq.WithContext(req.Context())\n\n\tif pa := s.proxyAuth(proxyURL); pa != \"\" {\n\t\tproxyReq.Header = http.Header{}\n\t\tproxyReq.Header.Set(\"Proxy-Authorization\", pa)\n\t}\n\n\tproxyDialConn, err := s.dialWithoutProxy(proxyReq.Context(), proxyURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/nolint:staticcheck \/\/ SA1019 ignore deprecated httputil.NewProxyClientConn\n\tproxyClientConn := httputil.NewProxyClientConn(proxyDialConn, nil)\n\t_, err = proxyClientConn.Do(&proxyReq)\n\t\/\/nolint:staticcheck \/\/ SA1019 ignore deprecated httputil.ErrPersistEOF: it might be\n\t\/\/ returned from the invocation of proxyClientConn.Do\n\tif err != nil && err != httputil.ErrPersistEOF {\n\t\treturn nil, err\n\t}\n\n\trwc, _ := proxyClientConn.Hijack()\n\n\tif req.URL.Scheme == \"https\" {\n\t\treturn s.tlsConn(proxyReq.Context(), rwc, targetHost)\n\t}\n\treturn rwc, nil\n}\n\n\/\/ dialWithSocks5Proxy dials the host specified by url through a socks5 proxy.\nfunc (s *SpdyRoundTripper) dialWithSocks5Proxy(req *http.Request, proxyURL *url.URL) (net.Conn, error) {\n\t\/\/ ensure we use a canonical host with proxyReq\n\ttargetHost := netutil.CanonicalAddr(req.URL)\n\tproxyDialAddr := netutil.CanonicalAddr(proxyURL)\n\n\tvar auth *proxy.Auth\n\tif proxyURL.User != nil {\n\t\tpass, _ := proxyURL.User.Password()\n\t\tauth = &proxy.Auth{\n\t\t\tUser: proxyURL.User.Username(),\n\t\t\tPassword: pass,\n\t\t}\n\t}\n\n\tdialer := s.Dialer\n\tif dialer == nil {\n\t\tdialer = &net.Dialer{\n\t\t\tTimeout: 30 * time.Second,\n\t\t}\n\t}\n\n\tproxyDialer, err := proxy.SOCKS5(\"tcp\", proxyDialAddr, auth, dialer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ According to the implementation of proxy.SOCKS5, the type assertion will always succeed\n\tcontextDialer, ok := proxyDialer.(proxy.ContextDialer)\n\tif !ok {\n\t\treturn nil, errors.New(\"SOCKS5 Dialer must implement ContextDialer\")\n\t}\n\n\tproxyDialConn, err := contextDialer.DialContext(req.Context(), \"tcp\", targetHost)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif req.URL.Scheme == \"https\" {\n\t\treturn s.tlsConn(req.Context(), proxyDialConn, targetHost)\n\t}\n\treturn proxyDialConn, nil\n}\n\n\/\/ tlsConn returns a TLS client side connection using rwc as the underlying transport.\nfunc (s *SpdyRoundTripper) tlsConn(ctx context.Context, rwc net.Conn, targetHost string) (net.Conn, error) {\n\n\thost, _, err := net.SplitHostPort(targetHost)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttlsConfig := s.tlsConfig\n\tswitch {\n\tcase tlsConfig == nil:\n\t\ttlsConfig = &tls.Config{ServerName: host}\n\tcase len(tlsConfig.ServerName) == 0:\n\t\ttlsConfig = tlsConfig.Clone()\n\t\ttlsConfig.ServerName = host\n\t}\n\n\ttlsConn := tls.Client(rwc, tlsConfig)\n\n\t\/\/ need to manually call Handshake() so we can call VerifyHostname() below\n\tif err := tlsConn.HandshakeContext(ctx); err != nil {\n\t\ttlsConn.Close()\n\t\treturn nil, err\n\t}\n\n\t\/\/ Return if we were configured to skip validation\n\tif tlsConfig.InsecureSkipVerify {\n\t\treturn tlsConn, nil\n\t}\n\n\tif err := tlsConn.VerifyHostname(tlsConfig.ServerName); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn tlsConn, nil\n}\n\n\/\/ dialWithoutProxy dials the host specified by url, using TLS if appropriate.\nfunc (s *SpdyRoundTripper) dialWithoutProxy(ctx context.Context, url *url.URL) (net.Conn, error) {\n\tdialAddr := netutil.CanonicalAddr(url)\n\n\tif url.Scheme == \"http\" {\n\t\tif s.Dialer == nil {\n\t\t\tvar d net.Dialer\n\t\t\treturn d.DialContext(ctx, \"tcp\", dialAddr)\n\t\t} else {\n\t\t\treturn s.Dialer.DialContext(ctx, \"tcp\", dialAddr)\n\t\t}\n\t}\n\n\t\/\/ TODO validate the TLSClientConfig is set up?\n\tvar conn *tls.Conn\n\tvar err error\n\tif s.Dialer == nil {\n\t\tconn, err = tls.Dial(\"tcp\", dialAddr, s.tlsConfig)\n\t} else {\n\t\tconn, err = tls.DialWithDialer(s.Dialer, \"tcp\", dialAddr, s.tlsConfig)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Return if we were configured to skip validation\n\tif s.tlsConfig != nil && s.tlsConfig.InsecureSkipVerify {\n\t\treturn conn, nil\n\t}\n\n\thost, _, err := net.SplitHostPort(dialAddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif s.tlsConfig != nil && len(s.tlsConfig.ServerName) > 0 {\n\t\thost = s.tlsConfig.ServerName\n\t}\n\terr = conn.VerifyHostname(host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn conn, nil\n}\n\n\/\/ proxyAuth returns, for a given proxy URL, the value to be used for the Proxy-Authorization header\nfunc (s *SpdyRoundTripper) proxyAuth(proxyURL *url.URL) string {\n\tif proxyURL == nil || proxyURL.User == nil {\n\t\treturn \"\"\n\t}\n\tcredentials := proxyURL.User.String()\n\tencodedAuth := base64.StdEncoding.EncodeToString([]byte(credentials))\n\treturn fmt.Sprintf(\"Basic %s\", encodedAuth)\n}\n\n\/\/ RoundTrip executes the Request and upgrades it. After a successful upgrade,\n\/\/ clients may call SpdyRoundTripper.Connection() to retrieve the upgraded\n\/\/ connection.\nfunc (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {\n\theader := utilnet.CloneHeader(req.Header)\n\theader.Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)\n\theader.Add(httpstream.HeaderUpgrade, HeaderSpdy31)\n\n\tvar (\n\t\tconn net.Conn\n\t\trawResponse []byte\n\t\terr error\n\t)\n\n\tclone := utilnet.CloneRequest(req)\n\tclone.Header = header\n\tconn, err = s.Dial(clone)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponseReader := bufio.NewReader(\n\t\tio.MultiReader(\n\t\t\tbytes.NewBuffer(rawResponse),\n\t\t\tconn,\n\t\t),\n\t)\n\n\tresp, err := http.ReadResponse(responseReader, nil)\n\tif err != nil {\n\t\tif conn != nil {\n\t\t\tconn.Close()\n\t\t}\n\t\treturn nil, err\n\t}\n\n\ts.conn = conn\n\n\treturn resp, nil\n}\n\n\/\/ NewConnection validates the upgrade response, creating and returning a new\n\/\/ httpstream.Connection if there were no errors.\nfunc (s *SpdyRoundTripper) NewConnection(resp *http.Response) (httpstream.Connection, error) {\n\tconnectionHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderConnection))\n\tupgradeHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderUpgrade))\n\tif (resp.StatusCode != http.StatusSwitchingProtocols) || !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {\n\t\tdefer resp.Body.Close()\n\t\tresponseError := \"\"\n\t\tresponseErrorBytes, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tresponseError = \"unable to read error from server response\"\n\t\t} else {\n\t\t\t\/\/ TODO: I don't belong here, I should be abstracted from this class\n\t\t\tif obj, _, err := statusCodecs.UniversalDecoder().Decode(responseErrorBytes, nil, &metav1.Status{}); err == nil {\n\t\t\t\tif status, ok := obj.(*metav1.Status); ok {\n\t\t\t\t\treturn nil, &apierrors.StatusError{ErrStatus: *status}\n\t\t\t\t}\n\t\t\t}\n\t\t\tresponseError = string(responseErrorBytes)\n\t\t\tresponseError = strings.TrimSpace(responseError)\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"unable to upgrade connection: %s\", responseError)\n\t}\n\n\treturn NewClientConnectionWithPings(s.conn, s.pingPeriod)\n}\n\n\/\/ statusScheme is private scheme for the decoding here until someone fixes the TODO in NewConnection\nvar statusScheme = runtime.NewScheme()\n\n\/\/ ParameterCodec knows about query parameters used with the meta v1 API spec.\nvar statusCodecs = serializer.NewCodecFactory(statusScheme)\n\nfunc init() {\n\tstatusScheme.AddUnversionedTypes(metav1.SchemeGroupVersion,\n\t\t&metav1.Status{},\n\t)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage factory\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/url\"\n\t\"path\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\tgrpcprom \"github.com\/grpc-ecosystem\/go-grpc-prometheus\"\n\t\"go.etcd.io\/etcd\/clientv3\"\n\t\"go.etcd.io\/etcd\/pkg\/transport\"\n\t\"google.golang.org\/grpc\"\n\n\tutilnet \"k8s.io\/apimachinery\/pkg\/util\/net\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/apiserver\/pkg\/server\/egressselector\"\n\t\"k8s.io\/apiserver\/pkg\/storage\"\n\t\"k8s.io\/apiserver\/pkg\/storage\/etcd3\"\n\t\"k8s.io\/apiserver\/pkg\/storage\/storagebackend\"\n\t\"k8s.io\/apiserver\/pkg\/storage\/value\"\n\t\"k8s.io\/component-base\/metrics\/legacyregistry\"\n)\n\n\/\/ The short keepalive timeout and interval have been chosen to aggressively\n\/\/ detect a failed etcd server without introducing much overhead.\nconst keepaliveTime = 30 * time.Second\nconst keepaliveTimeout = 10 * time.Second\n\n\/\/ dialTimeout is the timeout for failing to establish a connection.\n\/\/ It is set to 20 seconds as times shorter than that will cause TLS connections to fail\n\/\/ on heavily loaded arm64 CPUs (issue #64649)\nconst dialTimeout = 20 * time.Second\n\nfunc init() {\n\t\/\/ grpcprom auto-registers (via an init function) their client metrics, since we are opting out of\n\t\/\/ using the global prometheus registry and using our own wrapped global registry,\n\t\/\/ we need to explicitly register these metrics to our global registry here.\n\t\/\/ For reference: https:\/\/github.com\/kubernetes\/kubernetes\/pull\/81387\n\tlegacyregistry.RawMustRegister(grpcprom.DefaultClientMetrics)\n}\n\nfunc newETCD3HealthCheck(c storagebackend.Config) (func() error, error) {\n\t\/\/ constructing the etcd v3 client blocks and times out if etcd is not available.\n\t\/\/ retry in a loop in the background until we successfully create the client, storing the client or error encountered\n\n\tclientValue := &atomic.Value{}\n\n\tclientErrMsg := &atomic.Value{}\n\tclientErrMsg.Store(\"etcd client connection not yet established\")\n\n\tgo wait.PollUntil(time.Second, func() (bool, error) {\n\t\tclient, err := newETCD3Client(c.Transport)\n\t\tif err != nil {\n\t\t\tclientErrMsg.Store(err.Error())\n\t\t\treturn false, nil\n\t\t}\n\t\tclientValue.Store(client)\n\t\tclientErrMsg.Store(\"\")\n\t\treturn true, nil\n\t}, wait.NeverStop)\n\n\treturn func() error {\n\t\tif errMsg := clientErrMsg.Load().(string); len(errMsg) > 0 {\n\t\t\treturn fmt.Errorf(errMsg)\n\t\t}\n\t\tclient := clientValue.Load().(*clientv3.Client)\n\t\tctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)\n\t\tdefer cancel()\n\t\t\/\/ See https:\/\/github.com\/etcd-io\/etcd\/blob\/c57f8b3af865d1b531b979889c602ba14377420e\/etcdctl\/ctlv3\/command\/ep_command.go#L118\n\t\t_, err := client.Get(ctx, path.Join(\"\/\", c.Prefix, \"health\"))\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"error getting data from etcd: %v\", err)\n\t}, nil\n}\n\nfunc newETCD3Client(c storagebackend.TransportConfig) (*clientv3.Client, error) {\n\ttlsInfo := transport.TLSInfo{\n\t\tCertFile: c.CertFile,\n\t\tKeyFile: c.KeyFile,\n\t\tTrustedCAFile: c.TrustedCAFile,\n\t}\n\ttlsConfig, err := tlsInfo.ClientConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ NOTE: Client relies on nil tlsConfig\n\t\/\/ for non-secure connections, update the implicit variable\n\tif len(c.CertFile) == 0 && len(c.KeyFile) == 0 && len(c.TrustedCAFile) == 0 {\n\t\ttlsConfig = nil\n\t}\n\tnetworkContext := egressselector.Etcd.AsNetworkContext()\n\tvar egressDialer utilnet.DialFunc\n\tif c.EgressLookup != nil {\n\t\tegressDialer, err = c.EgressLookup(networkContext)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdialOptions := []grpc.DialOption{\n\t\tgrpc.WithBlock(), \/\/ block until the underlying connection is up\n\t\tgrpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor),\n\t\tgrpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor),\n\t}\n\tif egressDialer != nil {\n\t\tdialer := func(ctx context.Context, addr string) (net.Conn, error) {\n\t\t\tu, err := url.Parse(addr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn egressDialer(ctx, \"tcp\", u.Host)\n\t\t}\n\t\tdialOptions = append(dialOptions, grpc.WithContextDialer(dialer))\n\t}\n\tcfg := clientv3.Config{\n\t\tDialTimeout: dialTimeout,\n\t\tDialKeepAliveTime: keepaliveTime,\n\t\tDialKeepAliveTimeout: keepaliveTimeout,\n\t\tDialOptions: dialOptions,\n\t\tEndpoints: c.ServerList,\n\t\tTLS: tlsConfig,\n\t}\n\n\treturn clientv3.New(cfg)\n}\n\ntype runningCompactor struct {\n\tinterval time.Duration\n\tcancel context.CancelFunc\n\tclient *clientv3.Client\n\trefs int\n}\n\nvar (\n\tlock sync.Mutex\n\tcompactors = map[string]*runningCompactor{}\n)\n\n\/\/ startCompactorOnce start one compactor per transport. If the interval get smaller on repeated calls, the\n\/\/ compactor is replaced. A destroy func is returned. If all destroy funcs with the same transport are called,\n\/\/ the compactor is stopped.\nfunc startCompactorOnce(c storagebackend.TransportConfig, interval time.Duration) (func(), error) {\n\tlock.Lock()\n\tdefer lock.Unlock()\n\n\tkey := fmt.Sprintf(\"%v\", c) \/\/ gives: {[server1 server2] keyFile certFile caFile}\n\tif compactor, foundBefore := compactors[key]; !foundBefore || compactor.interval > interval {\n\t\tcompactorClient, err := newETCD3Client(c)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif foundBefore {\n\t\t\t\/\/ replace compactor\n\t\t\tcompactor.cancel()\n\t\t\tcompactor.client.Close()\n\t\t} else {\n\t\t\t\/\/ start new compactor\n\t\t\tcompactor = &runningCompactor{}\n\t\t\tcompactors[key] = compactor\n\t\t}\n\n\t\tctx, cancel := context.WithCancel(context.Background())\n\n\t\tcompactor.interval = interval\n\t\tcompactor.cancel = cancel\n\t\tcompactor.client = compactorClient\n\n\t\tetcd3.StartCompactor(ctx, compactorClient, interval)\n\t}\n\n\tcompactors[key].refs++\n\n\treturn func() {\n\t\tlock.Lock()\n\t\tdefer lock.Unlock()\n\n\t\tcompactor := compactors[key]\n\t\tcompactor.refs--\n\t\tif compactor.refs == 0 {\n\t\t\tcompactor.cancel()\n\t\t\tcompactor.client.Close()\n\t\t\tdelete(compactors, key)\n\t\t}\n\t}, nil\n}\n\nfunc newETCD3Storage(c storagebackend.Config) (storage.Interface, DestroyFunc, error) {\n\tstopCompactor, err := startCompactorOnce(c.Transport, c.CompactionInterval)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tclient, err := newETCD3Client(c.Transport)\n\tif err != nil {\n\t\tstopCompactor()\n\t\treturn nil, nil, err\n\t}\n\n\tvar once sync.Once\n\tdestroyFunc := func() {\n\t\t\/\/ we know that storage destroy funcs are called multiple times (due to reuse in subresources).\n\t\t\/\/ Hence, we only destroy once.\n\t\t\/\/ TODO: fix duplicated storage destroy calls higher level\n\t\tonce.Do(func() {\n\t\t\tstopCompactor()\n\t\t\tclient.Close()\n\t\t})\n\t}\n\ttransformer := c.Transformer\n\tif transformer == nil {\n\t\ttransformer = value.IdentityTransformer\n\t}\n\treturn etcd3.New(client, c.Codec, c.Prefix, transformer, c.Paging), destroyFunc, nil\n}\n<commit_msg>UPSTREAM: <carry>: Bug 1852056: change etcd health check timeout to 10s<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage factory\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/url\"\n\t\"path\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\tgrpcprom \"github.com\/grpc-ecosystem\/go-grpc-prometheus\"\n\t\"go.etcd.io\/etcd\/clientv3\"\n\t\"go.etcd.io\/etcd\/pkg\/transport\"\n\t\"google.golang.org\/grpc\"\n\n\tutilnet \"k8s.io\/apimachinery\/pkg\/util\/net\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/apiserver\/pkg\/server\/egressselector\"\n\t\"k8s.io\/apiserver\/pkg\/storage\"\n\t\"k8s.io\/apiserver\/pkg\/storage\/etcd3\"\n\t\"k8s.io\/apiserver\/pkg\/storage\/storagebackend\"\n\t\"k8s.io\/apiserver\/pkg\/storage\/value\"\n\t\"k8s.io\/component-base\/metrics\/legacyregistry\"\n)\n\n\/\/ The short keepalive timeout and interval have been chosen to aggressively\n\/\/ detect a failed etcd server without introducing much overhead.\nconst keepaliveTime = 30 * time.Second\nconst keepaliveTimeout = 10 * time.Second\n\n\/\/ dialTimeout is the timeout for failing to establish a connection.\n\/\/ It is set to 20 seconds as times shorter than that will cause TLS connections to fail\n\/\/ on heavily loaded arm64 CPUs (issue #64649)\nconst dialTimeout = 20 * time.Second\n\nfunc init() {\n\t\/\/ grpcprom auto-registers (via an init function) their client metrics, since we are opting out of\n\t\/\/ using the global prometheus registry and using our own wrapped global registry,\n\t\/\/ we need to explicitly register these metrics to our global registry here.\n\t\/\/ For reference: https:\/\/github.com\/kubernetes\/kubernetes\/pull\/81387\n\tlegacyregistry.RawMustRegister(grpcprom.DefaultClientMetrics)\n}\n\nfunc newETCD3HealthCheck(c storagebackend.Config) (func() error, error) {\n\t\/\/ constructing the etcd v3 client blocks and times out if etcd is not available.\n\t\/\/ retry in a loop in the background until we successfully create the client, storing the client or error encountered\n\n\tclientValue := &atomic.Value{}\n\n\tclientErrMsg := &atomic.Value{}\n\tclientErrMsg.Store(\"etcd client connection not yet established\")\n\n\tgo wait.PollUntil(time.Second, func() (bool, error) {\n\t\tclient, err := newETCD3Client(c.Transport)\n\t\tif err != nil {\n\t\t\tclientErrMsg.Store(err.Error())\n\t\t\treturn false, nil\n\t\t}\n\t\tclientValue.Store(client)\n\t\tclientErrMsg.Store(\"\")\n\t\treturn true, nil\n\t}, wait.NeverStop)\n\n\treturn func() error {\n\t\tif errMsg := clientErrMsg.Load().(string); len(errMsg) > 0 {\n\t\t\treturn fmt.Errorf(errMsg)\n\t\t}\n\t\tclient := clientValue.Load().(*clientv3.Client)\n\t\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\t\tdefer cancel()\n\t\t\/\/ See https:\/\/github.com\/etcd-io\/etcd\/blob\/c57f8b3af865d1b531b979889c602ba14377420e\/etcdctl\/ctlv3\/command\/ep_command.go#L118\n\t\t_, err := client.Get(ctx, path.Join(\"\/\", c.Prefix, \"health\"))\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"error getting data from etcd: %v\", err)\n\t}, nil\n}\n\nfunc newETCD3Client(c storagebackend.TransportConfig) (*clientv3.Client, error) {\n\ttlsInfo := transport.TLSInfo{\n\t\tCertFile: c.CertFile,\n\t\tKeyFile: c.KeyFile,\n\t\tTrustedCAFile: c.TrustedCAFile,\n\t}\n\ttlsConfig, err := tlsInfo.ClientConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ NOTE: Client relies on nil tlsConfig\n\t\/\/ for non-secure connections, update the implicit variable\n\tif len(c.CertFile) == 0 && len(c.KeyFile) == 0 && len(c.TrustedCAFile) == 0 {\n\t\ttlsConfig = nil\n\t}\n\tnetworkContext := egressselector.Etcd.AsNetworkContext()\n\tvar egressDialer utilnet.DialFunc\n\tif c.EgressLookup != nil {\n\t\tegressDialer, err = c.EgressLookup(networkContext)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdialOptions := []grpc.DialOption{\n\t\tgrpc.WithBlock(), \/\/ block until the underlying connection is up\n\t\tgrpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor),\n\t\tgrpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor),\n\t}\n\tif egressDialer != nil {\n\t\tdialer := func(ctx context.Context, addr string) (net.Conn, error) {\n\t\t\tu, err := url.Parse(addr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn egressDialer(ctx, \"tcp\", u.Host)\n\t\t}\n\t\tdialOptions = append(dialOptions, grpc.WithContextDialer(dialer))\n\t}\n\tcfg := clientv3.Config{\n\t\tDialTimeout: dialTimeout,\n\t\tDialKeepAliveTime: keepaliveTime,\n\t\tDialKeepAliveTimeout: keepaliveTimeout,\n\t\tDialOptions: dialOptions,\n\t\tEndpoints: c.ServerList,\n\t\tTLS: tlsConfig,\n\t}\n\n\treturn clientv3.New(cfg)\n}\n\ntype runningCompactor struct {\n\tinterval time.Duration\n\tcancel context.CancelFunc\n\tclient *clientv3.Client\n\trefs int\n}\n\nvar (\n\tlock sync.Mutex\n\tcompactors = map[string]*runningCompactor{}\n)\n\n\/\/ startCompactorOnce start one compactor per transport. If the interval get smaller on repeated calls, the\n\/\/ compactor is replaced. A destroy func is returned. If all destroy funcs with the same transport are called,\n\/\/ the compactor is stopped.\nfunc startCompactorOnce(c storagebackend.TransportConfig, interval time.Duration) (func(), error) {\n\tlock.Lock()\n\tdefer lock.Unlock()\n\n\tkey := fmt.Sprintf(\"%v\", c) \/\/ gives: {[server1 server2] keyFile certFile caFile}\n\tif compactor, foundBefore := compactors[key]; !foundBefore || compactor.interval > interval {\n\t\tcompactorClient, err := newETCD3Client(c)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif foundBefore {\n\t\t\t\/\/ replace compactor\n\t\t\tcompactor.cancel()\n\t\t\tcompactor.client.Close()\n\t\t} else {\n\t\t\t\/\/ start new compactor\n\t\t\tcompactor = &runningCompactor{}\n\t\t\tcompactors[key] = compactor\n\t\t}\n\n\t\tctx, cancel := context.WithCancel(context.Background())\n\n\t\tcompactor.interval = interval\n\t\tcompactor.cancel = cancel\n\t\tcompactor.client = compactorClient\n\n\t\tetcd3.StartCompactor(ctx, compactorClient, interval)\n\t}\n\n\tcompactors[key].refs++\n\n\treturn func() {\n\t\tlock.Lock()\n\t\tdefer lock.Unlock()\n\n\t\tcompactor := compactors[key]\n\t\tcompactor.refs--\n\t\tif compactor.refs == 0 {\n\t\t\tcompactor.cancel()\n\t\t\tcompactor.client.Close()\n\t\t\tdelete(compactors, key)\n\t\t}\n\t}, nil\n}\n\nfunc newETCD3Storage(c storagebackend.Config) (storage.Interface, DestroyFunc, error) {\n\tstopCompactor, err := startCompactorOnce(c.Transport, c.CompactionInterval)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tclient, err := newETCD3Client(c.Transport)\n\tif err != nil {\n\t\tstopCompactor()\n\t\treturn nil, nil, err\n\t}\n\n\tvar once sync.Once\n\tdestroyFunc := func() {\n\t\t\/\/ we know that storage destroy funcs are called multiple times (due to reuse in subresources).\n\t\t\/\/ Hence, we only destroy once.\n\t\t\/\/ TODO: fix duplicated storage destroy calls higher level\n\t\tonce.Do(func() {\n\t\t\tstopCompactor()\n\t\t\tclient.Close()\n\t\t})\n\t}\n\ttransformer := c.Transformer\n\tif transformer == nil {\n\t\ttransformer = value.IdentityTransformer\n\t}\n\treturn etcd3.New(client, c.Codec, c.Prefix, transformer, c.Paging), destroyFunc, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package snakes\n\nimport (\n \"math\/big\"\n)\n\n\nfunc MontgomeryLadderExp(a, b, c *big.Int) *big.Int {\n zero := big.NewInt(0)\n if a.Cmp(zero) == 0 && b.Cmp(zero) == 0 {\n return big.NewInt(1)\n }\n a1 := a\n a2 := new(big.Int).Mul(a, a)\n bits := b.Bits()\n for pos := len(bits) - 1; pos >= 0; pos-- {\n b := bits[pos]\n \/\/ TODO: how to get the actual bit width of Word on this platform?\n i := 7\n \/\/ On the first element, we need to skip any leading 0s.\n if pos == len(bits) - 1 {\n for ; i >= 0; i-- {\n if (b & (1 << uint(i))) != 0 {\n break\n }\n }\n i--\n }\n for ; i >= 0; i-- {\n if (b & (1 << uint(i))) == 0 {\n a2 = new(big.Int).Mul(a2, a1)\n a1 = new(big.Int).Mul(a1, a1)\n } else {\n a1 = new(big.Int).Mul(a1, a2)\n a2 = new(big.Int).Mul(a2, a2)\n }\n\n a1 = new(big.Int).Mod(a1, c)\n a2 = new(big.Int).Mod(a2, c)\n }\n }\n return a1\n}\n<commit_msg>Another comment<commit_after>package snakes\n\nimport (\n \"math\/big\"\n)\n\n\nfunc MontgomeryLadderExp(a, b, c *big.Int) *big.Int {\n zero := big.NewInt(0)\n if a.Cmp(zero) == 0 && b.Cmp(zero) == 0 {\n return big.NewInt(1)\n }\n a1 := a\n a2 := new(big.Int).Mul(a, a)\n bits := b.Bits()\n for pos := len(bits) - 1; pos >= 0; pos-- {\n b := bits[pos]\n \/\/ TODO: how to get the actual bit width of Word on this platform?\n i := 7\n \/\/ On the first element, we need to skip any leading 0s.\n if pos == len(bits) - 1 {\n for ; i >= 0; i-- {\n if (b & (1 << uint(i))) != 0 {\n break\n }\n }\n \/\/ Also skip the first bit.\n i--\n }\n for ; i >= 0; i-- {\n if (b & (1 << uint(i))) == 0 {\n a2 = new(big.Int).Mul(a2, a1)\n a1 = new(big.Int).Mul(a1, a1)\n } else {\n a1 = new(big.Int).Mul(a1, a2)\n a2 = new(big.Int).Mul(a2, a2)\n }\n\n a1 = new(big.Int).Mod(a1, c)\n a2 = new(big.Int).Mod(a2, c)\n }\n }\n return a1\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/codegangsta\/cli\"\n\tmp \"github.com\/mackerelio\/go-mackerel-plugin\"\n)\n\n\/\/ metric value structure\nvar graphdef map[string](mp.Graphs) = map[string](mp.Graphs){\n\t\"apache2.workers\": mp.Graphs{\n\t\tLabel: \"Apache Workers\",\n\t\tUnit: \"integer\",\n\t\tMetrics: [](mp.Metrics){\n\t\t\tmp.Metrics{Name: \"busy_workers\", Label: \"Busy Workers\", Diff: false},\n\t\t\tmp.Metrics{Name: \"idle_workers\", Label: \"Idle Workers\", Diff: false},\n\t\t},\n\t},\n\t\"apache2.bytes\": mp.Graphs{\n\t\tLabel: \"Apache Bytes\",\n\t\tUnit: \"integer\",\n\t\tMetrics: [](mp.Metrics){\n\t\t\tmp.Metrics{Name: \"bytes_sent\", Label: \"Bytes Sent\", Diff: false},\n\t\t},\n\t},\n\t\"apache2.cpu\": mp.Graphs{\n\t\tLabel: \"Apache CPU Load\",\n\t\tUnit: \"float\",\n\t\tMetrics: [](mp.Metrics){\n\t\t\tmp.Metrics{Name: \"cpu_load\", Label: \"CPU Load\", Diff: false},\n\t\t},\n\t},\n\t\"apache2.req\": mp.Graphs{\n\t\tLabel: \"Apache Requests\",\n\t\tUnit: \"integer\",\n\t\tMetrics: [](mp.Metrics){\n\t\t\tmp.Metrics{Name: \"requests\", Label: \"Requests\", Diff: false},\n\t\t},\n\t},\n\t\"apache2.scoreboard\": mp.Graphs{\n\t\tLabel: \"Apache Scoreboard\",\n\t\tUnit: \"integer\",\n\t\tMetrics: [](mp.Metrics){\n\t\t\tmp.Metrics{Name: \"score-_\", Label: \"Waiting for connection\", Diff: false},\n\t\t\tmp.Metrics{Name: \"score-S\", Label: \"Starting up\", Diff: false},\n\t\t\tmp.Metrics{Name: \"score-R\", Label: \"Reading request\", Diff: false},\n\t\t\tmp.Metrics{Name: \"scpre-W\", Label: \"Sending reply\", Diff: false},\n\t\t\tmp.Metrics{Name: \"score-K\", Label: \"Keepalive\", Diff: false},\n\t\t\tmp.Metrics{Name: \"score-D\", Label: \"DNS lookup\", Diff: false},\n\t\t\tmp.Metrics{Name: \"score-C\", Label: \"Closing connection\", Diff: false},\n\t\t\tmp.Metrics{Name: \"score-L\", Label: \"Logging\", Diff: false},\n\t\t\tmp.Metrics{Name: \"score-G\", Label: \"Gracefully finishing\", Diff: false},\n\t\t\tmp.Metrics{Name: \"score-I\", Label: \"Idle cleanup\", Diff: false},\n\t\t\tmp.Metrics{Name: \"score-.\", Label: \"Open slot\", Diff: false},\n\t\t},\n\t},\n}\n\n\/\/ for fetching metrics\ntype Apache2Plugin struct {\n\tHost string\n\tPort uint16\n\tPath string\n\tTempfile string\n}\n\n\/\/ Graph definition\nfunc (c Apache2Plugin) GraphDefinition() map[string](mp.Graphs) {\n\treturn graphdef\n}\n\n\/\/ main function\nfunc doMain(c *cli.Context) {\n\n\tvar apache2 Apache2Plugin\n\n\tapache2.Host = c.String(\"http_host\")\n\tapache2.Port = uint16(c.Int(\"http_port\"))\n\tapache2.Path = c.String(\"status_page\")\n\tapache2.Tempfile = c.String(\"tempfile\")\n\n\thelper := mp.NewMackerelPlugin(apache2)\n\n\tif os.Getenv(\"MACKEREL_AGENT_PLUGIN_META\") != \"\" {\n\t\thelper.OutputDefinitions()\n\t} else {\n\t\thelper.OutputValues()\n\t}\n}\n\n\/\/ fetch metrics\nfunc (c Apache2Plugin) FetchMetrics() (map[string]float64, error) {\n\tdata, err := getApache2Metrics(c.Host, c.Port, c.Path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstat := make(map[string]float64)\n\terr_stat := parseApache2Status(data, &stat)\n\tif err_stat != nil {\n\t\treturn nil, err_stat\n\t}\n\terr_score := parseApache2Scoreboard(data, &stat)\n\tif err_score != nil {\n\t\treturn nil, err_score\n\t}\n\n\treturn stat, nil\n}\n\n\/\/ parsing scoreboard from server-status?auto\nfunc parseApache2Scoreboard(str string, p *map[string]float64) error {\n\tfor _, line := range strings.Split(str, \"\\n\") {\n\t\tmatched, err := regexp.MatchString(\"Scoreboard(.*)\", line)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !matched {\n\t\t\tcontinue\n\t\t}\n\t\trecord := strings.Split(line, \":\")\n\t\tfor _, sb := range strings.Split(strings.Trim(record[1], \" \"), \"\") {\n\t\t\tname := fmt.Sprintf(\"score-%s\", sb)\n\t\t\tc, assert := (*p)[name]\n\t\t\tif !assert {\n\t\t\t\tc = 0\n\t\t\t}\n\t\t\t(*p)[name] = c + 1\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"Scoreboard data is not found.\")\n}\n\n\/\/ parsing metrics from server-status?auto\nfunc parseApache2Status(str string, p *map[string]float64) error {\n\tParams := map[string]string{\n\t\t\"Total Accesses\": \"requests\",\n\t\t\"Total kBytes\": \"bytes_sent\",\n\t\t\"CPULoad\": \"cpu_load\",\n\t\t\"BusyWorkers\": \"busy_workers\",\n\t\t\"IdleWorkers\": \"idle_workers\"}\n\n\tfor _, line := range strings.Split(str, \"\\n\") {\n\t\trecord := strings.Split(line, \":\")\n\t\t_, assert := Params[record[0]]\n\t\tif !assert {\n\t\t\tcontinue\n\t\t}\n\t\tvar err_parse error\n\t\t(*p)[Params[record[0]]], err_parse = strconv.ParseFloat(strings.Trim(record[1], \" \"), 64)\n\t\tif err_parse != nil {\n\t\t\treturn err_parse\n\t\t}\n\t}\n\n\tif len(*p) == 0 {\n\t\treturn errors.New(\"Status data not found.\")\n\t}\n\n\treturn nil\n}\n\n\/\/ Getting apache2 status from server-status module data.\nfunc getApache2Metrics(host string, port uint16, path string) (string, error) {\n\turi := \"http:\/\/\" + host + \":\" + strconv.FormatUint(uint64(port), 10) + path\n\tresp, err := http.Get(uri)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn \"\", errors.New(fmt.Sprintf(\"HTTP status error: %d\", resp.StatusCode))\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(body[:]), nil\n}\n\n\/\/ main\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"apache2_metrics\"\n\tapp.Version = Version\n\tapp.Usage = \"Get metrics from apache2.\"\n\tapp.Author = \"Yuichiro Saito\"\n\tapp.Email = \"saito@heartbeats.jp\"\n\tapp.Flags = Flags\n\tapp.Action = doMain\n\n\tapp.Run(os.Args)\n}\n<commit_msg>Stacked graph supported toward scoreboard and workers.<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/codegangsta\/cli\"\n\tmp \"github.com\/mackerelio\/go-mackerel-plugin\"\n)\n\n\/\/ metric value structure\nvar graphdef map[string](mp.Graphs) = map[string](mp.Graphs){\n\t\"apache2.workers\": mp.Graphs{\n\t\tLabel: \"Apache Workers\",\n\t\tUnit: \"integer\",\n\t\tMetrics: [](mp.Metrics){\n\t\t\tmp.Metrics{Name: \"busy_workers\", Label: \"Busy Workers\", Diff: false, Stacked: true},\n\t\t\tmp.Metrics{Name: \"idle_workers\", Label: \"Idle Workers\", Diff: false, Stacked: true},\n\t\t},\n\t},\n\t\"apache2.bytes\": mp.Graphs{\n\t\tLabel: \"Apache Bytes\",\n\t\tUnit: \"integer\",\n\t\tMetrics: [](mp.Metrics){\n\t\t\tmp.Metrics{Name: \"bytes_sent\", Label: \"Bytes Sent\", Diff: false},\n\t\t},\n\t},\n\t\"apache2.cpu\": mp.Graphs{\n\t\tLabel: \"Apache CPU Load\",\n\t\tUnit: \"float\",\n\t\tMetrics: [](mp.Metrics){\n\t\t\tmp.Metrics{Name: \"cpu_load\", Label: \"CPU Load\", Diff: false},\n\t\t},\n\t},\n\t\"apache2.req\": mp.Graphs{\n\t\tLabel: \"Apache Requests\",\n\t\tUnit: \"integer\",\n\t\tMetrics: [](mp.Metrics){\n\t\t\tmp.Metrics{Name: \"requests\", Label: \"Requests\", Diff: false},\n\t\t},\n\t},\n\t\"apache2.scoreboard\": mp.Graphs{\n\t\tLabel: \"Apache Scoreboard\",\n\t\tUnit: \"integer\",\n\t\tMetrics: [](mp.Metrics){\n\t\t\tmp.Metrics{Name: \"score-_\", Label: \"Waiting for connection\", Diff: false, Stacked: true},\n\t\t\tmp.Metrics{Name: \"score-S\", Label: \"Starting up\", Diff: false, Stacked: true},\n\t\t\tmp.Metrics{Name: \"score-R\", Label: \"Reading request\", Diff: false, Stacked: true},\n\t\t\tmp.Metrics{Name: \"scpre-W\", Label: \"Sending reply\", Diff: false, Stacked: true},\n\t\t\tmp.Metrics{Name: \"score-K\", Label: \"Keepalive\", Diff: false, Stacked: true},\n\t\t\tmp.Metrics{Name: \"score-D\", Label: \"DNS lookup\", Diff: false, Stacked: true},\n\t\t\tmp.Metrics{Name: \"score-C\", Label: \"Closing connection\", Diff: false, Stacked: true},\n\t\t\tmp.Metrics{Name: \"score-L\", Label: \"Logging\", Diff: false, Stacked: true},\n\t\t\tmp.Metrics{Name: \"score-G\", Label: \"Gracefully finishing\", Diff: false, Stacked: true},\n\t\t\tmp.Metrics{Name: \"score-I\", Label: \"Idle cleanup\", Diff: false, Stacked: true},\n\t\t\tmp.Metrics{Name: \"score-.\", Label: \"Open slot\", Diff: false, Stacked: true},\n\t\t},\n\t},\n}\n\n\/\/ for fetching metrics\ntype Apache2Plugin struct {\n\tHost string\n\tPort uint16\n\tPath string\n\tTempfile string\n}\n\n\/\/ Graph definition\nfunc (c Apache2Plugin) GraphDefinition() map[string](mp.Graphs) {\n\treturn graphdef\n}\n\n\/\/ main function\nfunc doMain(c *cli.Context) {\n\n\tvar apache2 Apache2Plugin\n\n\tapache2.Host = c.String(\"http_host\")\n\tapache2.Port = uint16(c.Int(\"http_port\"))\n\tapache2.Path = c.String(\"status_page\")\n\tapache2.Tempfile = c.String(\"tempfile\")\n\n\thelper := mp.NewMackerelPlugin(apache2)\n\n\tif os.Getenv(\"MACKEREL_AGENT_PLUGIN_META\") != \"\" {\n\t\thelper.OutputDefinitions()\n\t} else {\n\t\thelper.OutputValues()\n\t}\n}\n\n\/\/ fetch metrics\nfunc (c Apache2Plugin) FetchMetrics() (map[string]float64, error) {\n\tdata, err := getApache2Metrics(c.Host, c.Port, c.Path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstat := make(map[string]float64)\n\terr_stat := parseApache2Status(data, &stat)\n\tif err_stat != nil {\n\t\treturn nil, err_stat\n\t}\n\terr_score := parseApache2Scoreboard(data, &stat)\n\tif err_score != nil {\n\t\treturn nil, err_score\n\t}\n\n\treturn stat, nil\n}\n\n\/\/ parsing scoreboard from server-status?auto\nfunc parseApache2Scoreboard(str string, p *map[string]float64) error {\n\tfor _, line := range strings.Split(str, \"\\n\") {\n\t\tmatched, err := regexp.MatchString(\"Scoreboard(.*)\", line)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !matched {\n\t\t\tcontinue\n\t\t}\n\t\trecord := strings.Split(line, \":\")\n\t\tfor _, sb := range strings.Split(strings.Trim(record[1], \" \"), \"\") {\n\t\t\tname := fmt.Sprintf(\"score-%s\", sb)\n\t\t\tc, assert := (*p)[name]\n\t\t\tif !assert {\n\t\t\t\tc = 0\n\t\t\t}\n\t\t\t(*p)[name] = c + 1\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn errors.New(\"Scoreboard data is not found.\")\n}\n\n\/\/ parsing metrics from server-status?auto\nfunc parseApache2Status(str string, p *map[string]float64) error {\n\tParams := map[string]string{\n\t\t\"Total Accesses\": \"requests\",\n\t\t\"Total kBytes\": \"bytes_sent\",\n\t\t\"CPULoad\": \"cpu_load\",\n\t\t\"BusyWorkers\": \"busy_workers\",\n\t\t\"IdleWorkers\": \"idle_workers\"}\n\n\tfor _, line := range strings.Split(str, \"\\n\") {\n\t\trecord := strings.Split(line, \":\")\n\t\t_, assert := Params[record[0]]\n\t\tif !assert {\n\t\t\tcontinue\n\t\t}\n\t\tvar err_parse error\n\t\t(*p)[Params[record[0]]], err_parse = strconv.ParseFloat(strings.Trim(record[1], \" \"), 64)\n\t\tif err_parse != nil {\n\t\t\treturn err_parse\n\t\t}\n\t}\n\n\tif len(*p) == 0 {\n\t\treturn errors.New(\"Status data not found.\")\n\t}\n\n\treturn nil\n}\n\n\/\/ Getting apache2 status from server-status module data.\nfunc getApache2Metrics(host string, port uint16, path string) (string, error) {\n\turi := \"http:\/\/\" + host + \":\" + strconv.FormatUint(uint64(port), 10) + path\n\tresp, err := http.Get(uri)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn \"\", errors.New(fmt.Sprintf(\"HTTP status error: %d\", resp.StatusCode))\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(body[:]), nil\n}\n\n\/\/ main\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"apache2_metrics\"\n\tapp.Version = Version\n\tapp.Usage = \"Get metrics from apache2.\"\n\tapp.Author = \"Yuichiro Saito\"\n\tapp.Email = \"saito@heartbeats.jp\"\n\tapp.Flags = Flags\n\tapp.Action = doMain\n\n\tapp.Run(os.Args)\n}\n<|endoftext|>"} {"text":"<commit_before>package device\n\nimport (\n\t\"bytes\"\n\t\"github.com\/hemtjanst\/hemtjanst\/messaging\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestNewDevice(t *testing.T) {\n\td := NewDevice(\"test\", &messaging.TestingMessenger{})\n\tif d.Topic != \"test\" {\n\t\tt.Errorf(\"Expected topic of %s, got %s\", \"test\", d.Topic)\n\t}\n\n\tif d.HasFeature(\"\") {\n\t\tt.Error(\"Expected false, got \", d.HasFeature(\"\"))\n\t}\n}\n\nfunc TestPublishMeta(t *testing.T) {\n\tm := &messaging.TestingMessenger{}\n\td := NewDevice(\"lightbulb\/kitchen\", m)\n\terr := d.PublishMeta()\n\tif err != nil {\n\t\tt.Error(\"Expected to successfully publish meta, got \", err)\n\t}\n\n\tif m.Action != \"publish\" {\n\t\tt.Error(\"Expected to publish, but tried to \", m.Action)\n\t}\n\tif !reflect.DeepEqual(m.Topic, []string{\"lightbulb\/kitchen\/meta\"}) {\n\t\tt.Error(\"Expected topic to be lightbulb\/kitchen\/meta, got \", m.Topic)\n\t}\n\tif m.Qos != 1 {\n\t\tt.Error(\"Expected QoS of 1, got \", m.Qos)\n\t}\n\tif !m.Persist {\n\t\tt.Error(\"Expected persist, got \", m.Persist)\n\t}\n\tmsg := `{\"Topic\":\"lightbulb\/kitchen\",\"name\":\"\",\"manufacturer\":\"\",\"model\":\"\",\"serialNumber\":\"\",\"type\":\"\",\"feature\":null}`\n\tif !bytes.Equal(m.Message, []byte(msg)) {\n\t\tt.Errorf(\"Expected %s, got %s\", msg, string(m.Message))\n\t}\n}\n<commit_msg>:umbrella: Add tests for methods on Feature<commit_after>package device\n\nimport (\n\t\"bytes\"\n\t\"github.com\/hemtjanst\/hemtjanst\/messaging\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestNewDevice(t *testing.T) {\n\td := NewDevice(\"test\", &messaging.TestingMessenger{})\n\tif d.Topic != \"test\" {\n\t\tt.Errorf(\"Expected topic of %s, got %s\", \"test\", d.Topic)\n\t}\n\n\tif d.HasFeature(\"\") {\n\t\tt.Error(\"Expected false, got \", d.HasFeature(\"\"))\n\t}\n}\n\nfunc TestPublishMeta(t *testing.T) {\n\tm := &messaging.TestingMessenger{}\n\td := NewDevice(\"lightbulb\/kitchen\", m)\n\terr := d.PublishMeta()\n\tif err != nil {\n\t\tt.Error(\"Expected to successfully publish meta, got \", err)\n\t}\n\n\tif m.Action != \"publish\" {\n\t\tt.Error(\"Expected to publish, but tried to \", m.Action)\n\t}\n\tif !reflect.DeepEqual(m.Topic, []string{\"lightbulb\/kitchen\/meta\"}) {\n\t\tt.Error(\"Expected topic to be lightbulb\/kitchen\/meta, got \", m.Topic)\n\t}\n\tif m.Qos != 1 {\n\t\tt.Error(\"Expected QoS of 1, got \", m.Qos)\n\t}\n\tif !m.Persist {\n\t\tt.Error(\"Expected persist, got \", m.Persist)\n\t}\n\tmsg := `{\"Topic\":\"lightbulb\/kitchen\",\"name\":\"\",\"manufacturer\":\"\",\"model\":\"\",\"serialNumber\":\"\",\"type\":\"\",\"feature\":null}`\n\tif !bytes.Equal(m.Message, []byte(msg)) {\n\t\tt.Errorf(\"Expected %s, got %s\", msg, string(m.Message))\n\t}\n}\n\nfunc TestFeatureSet(t *testing.T) {\n\tm := &messaging.TestingMessenger{}\n\td := NewDevice(\"lightbulb\", m)\n\tf := &Feature{GetTopic: \"lightbulb\/on\/get\", SetTopic: \"lightbulb\/on\/set\", devRef: d}\n\td.Features = map[string]*Feature{}\n\td.Features[\"on\"] = f\n\tf.Set(\"1\")\n\n\tif m.Action != \"publish\" {\n\t\tt.Error(\"Expected to publish, but instead tried to \", m.Action)\n\t}\n\tif !reflect.DeepEqual(m.Topic, []string{\"lightbulb\/on\/set\"}) {\n\t\tt.Error(\"Expected topic to be lightbulb\/on\/set, got \", m.Topic)\n\t}\n\tif m.Qos != 1 {\n\t\tt.Error(\"Expected QoS of 1, got \", m.Qos)\n\t}\n\tif m.Persist {\n\t\tt.Error(\"Expected message without persist, got \", m.Persist)\n\t}\n\tif !bytes.Equal(m.Message, []byte(\"1\")) {\n\t\tt.Error(\"Expected message of 1, got \", string(m.Message))\n\t}\n}\n\nfunc TestFeatureOnSet(t *testing.T) {\n\tm := &messaging.TestingMessenger{}\n\td := NewDevice(\"lightbulb\", m)\n\tf := &Feature{GetTopic: \"lightbulb\/on\/get\", SetTopic: \"lightbulb\/on\/set\", devRef: d}\n\td.Features = map[string]*Feature{}\n\td.Features[\"on\"] = f\n\n\tf.OnSet(func(messaging.Message) {\n\t\treturn\n\t})\n\tif m.Action != \"subscribe\" {\n\t\tt.Error(\"Expected to subscribe, but instead tried to \", m.Action)\n\t}\n\tif !reflect.DeepEqual(m.Topic, []string{\"lightbulb\/on\/set\"}) {\n\t\tt.Error(\"Expected topic to be lightbulb\/on\/set, got \", m.Topic)\n\t}\n\tif m.Qos != 1 {\n\t\tt.Error(\"Expected QoS of 1, got \", m.Qos)\n\t}\n\tif m.Callback == nil {\n\t\tt.Error(\"Expected a callback, got nil\")\n\t}\n}\n\nfunc TestFeatureUpdate(t *testing.T) {\n\tm := &messaging.TestingMessenger{}\n\td := NewDevice(\"lightbulb\", m)\n\tf := &Feature{GetTopic: \"lightbulb\/on\/get\", SetTopic: \"lightbulb\/on\/set\", devRef: d}\n\td.Features = map[string]*Feature{}\n\td.Features[\"on\"] = f\n\tf.Update(\"1\")\n\n\tif m.Action != \"publish\" {\n\t\tt.Error(\"Expected to publish, but instead tried to \", m.Action)\n\t}\n\tif !reflect.DeepEqual(m.Topic, []string{\"lightbulb\/on\/get\"}) {\n\t\tt.Error(\"Expected topic to be lightbulb\/on\/get, got \", m.Topic)\n\t}\n\tif m.Qos != 1 {\n\t\tt.Error(\"Expected QoS of 1, got \", m.Qos)\n\t}\n\tif !m.Persist {\n\t\tt.Error(\"Expected message to persist, got \", m.Persist)\n\t}\n\tif !bytes.Equal(m.Message, []byte(\"1\")) {\n\t\tt.Error(\"Expected message of 1, got \", string(m.Message))\n\t}\n}\n\nfunc TestFeatureOnUpdate(t *testing.T) {\n\tm := &messaging.TestingMessenger{}\n\td := NewDevice(\"lightbulb\", m)\n\tf := &Feature{GetTopic: \"lightbulb\/on\/get\", SetTopic: \"lightbulb\/on\/set\", devRef: d}\n\td.Features = map[string]*Feature{}\n\td.Features[\"on\"] = f\n\n\tf.OnUpdate(func(messaging.Message) {\n\t\treturn\n\t})\n\tif m.Action != \"subscribe\" {\n\t\tt.Error(\"Expected to subscribe, but instead tried to \", m.Action)\n\t}\n\tif !reflect.DeepEqual(m.Topic, []string{\"lightbulb\/on\/get\"}) {\n\t\tt.Error(\"Expected topic to be lightbulb\/on\/get, got \", m.Topic)\n\t}\n\tif m.Qos != 1 {\n\t\tt.Error(\"Expected QoS of 1, got \", m.Qos)\n\t}\n\tif m.Callback == nil {\n\t\tt.Error(\"Expected a callback, got nil\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017 Cisco and\/or its affiliates.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage client\n\nimport (\n\t\"fmt\"\n\t\"github.com\/Shopify\/sarama\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/ligato\/cn-infra\/db\/keyval\"\n\t\"time\"\n)\n\n\/\/ Encoder defines an interface that is used as argument of producer functions.\n\/\/ It wraps the sarama.Encoder\ntype Encoder interface {\n\tsarama.Encoder\n}\n\n\/\/ ConsumerMessage encapsulates a Kafka message returned by the consumer.\ntype ConsumerMessage struct {\n\tKey, Value []byte\n\tTopic string\n\tPartition int32\n\tOffset int64\n\tTimestamp time.Time\n}\n\n\/\/ GetKey returns key associated with the message\nfunc (cm *ConsumerMessage) GetKey() string {\n\treturn string(cm.Key)\n}\n\n\/\/ GetValue return value associated with the message\nfunc (cm *ConsumerMessage) GetValue() []byte {\n\treturn cm.Value\n}\n\n\/\/ ConsumerMessage encapsulates a Kafka message returned by the consumer.\ntype ProtoConsumerMessage struct {\n\t*ConsumerMessage\n\tserializer keyval.Serializer\n}\n\n\/\/ NewProtoConsumerMessage creates new instace of ProtoConsumerMessage\nfunc NewProtoConsumerMessage(msg *ConsumerMessage, serializer keyval.Serializer) *ProtoConsumerMessage {\n\treturn &ProtoConsumerMessage{msg, serializer}\n}\n\n\/\/ GetKey returns key associated with the message\nfunc (cm *ProtoConsumerMessage) GetKey() string {\n\treturn string(cm.Key)\n}\n\n\/\/ GetValue return value associated with the message\nfunc (cm *ProtoConsumerMessage) GetValue(msg proto.Message) error {\n\terr := cm.serializer.Unmarshal(cm.ConsumerMessage.GetValue(), msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ ProducerMessage is the collection of elements passed to the Producer in order to send a message.\ntype ProducerMessage struct {\n\t\/\/ The Kafka topic for this message.\n\tTopic string\n\t\/\/ The partitioning key for this message. Pre-existing Encoders include\n\t\/\/ StringEncoder and ByteEncoder.\n\tKey Encoder\n\t\/\/ The actual message to store in Kafka. Pre-existing Encoders include\n\t\/\/ StringEncoder and ByteEncoder.\n\tValue Encoder\n\n\t\/\/ This field is used to hold arbitrary data you wish to include so it\n\t\/\/ will be available when receiving on the Successes and Errors channels.\n\t\/\/ Sarama completely ignores this field and is only to be used for\n\t\/\/ pass-through data.\n\tMetadata interface{}\n\n\t\/\/ Below this point are filled in by the producer as the message is processed\n\n\t\/\/ Offset is the offset of the message stored on the broker. This is only\n\t\/\/ guaranteed to be defined if the message was successfully delivered and\n\t\/\/ RequiredAcks is not NoResponse.\n\tOffset int64\n\t\/\/ Partition is the partition that the message was sent to. This is only\n\t\/\/ guaranteed to be defined if the message was successfully delivered.\n\tPartition int32\n}\n\n\/\/ ProducerError is the type of error generated when the producer fails to deliver a message.\n\/\/ It contains the original ProducerMessage as well as the actual error value.\ntype ProducerError struct {\n\tMsg *ProducerMessage\n\tErr error\n}\n\nfunc (ref *ProducerError) Error() string {\n\treturn ref.Err.Error()\n}\n\nfunc (ref *ProducerError) String() string {\n\treturn fmt.Sprintf(\"ProducerError: %s, error: %v\\n\", ref.Msg, ref.Err)\n}\n\nfunc (ref *ProducerMessage) String() string {\n\tvar meta string\n\tswitch t := ref.Metadata.(type) {\n\tdefault:\n\t\tmeta = fmt.Sprintf(\"unexpected type %T\", t) \/\/ %T prints whatever type t has\n\tcase string:\n\t\tmeta = t\n\tcase *string:\n\t\tmeta = *t\n\tcase []byte:\n\t\tmeta = string(t)\n\tcase bool:\n\t\tmeta = fmt.Sprintf(\"%t\", t) \/\/ t has type bool\n\tcase int:\n\t\tmeta = fmt.Sprintf(\"%d\", t) \/\/ t has type int\n\tcase *bool:\n\t\tmeta = fmt.Sprintf(\"%t\", *t) \/\/ t has type *bool\n\tcase *int:\n\t\tmeta = fmt.Sprintf(\"%d\", *t) \/\/ t has type *int\n\t}\n\n\tkey, _ := ref.Key.Encode()\n\tval, _ := ref.Value.Encode()\n\n\treturn fmt.Sprintf(\"ProducerMessage - Topic: %s, Key: %s, Value: %s, Meta: %v, Offset: %d, Partition: %d\\n\", ref.Topic, string(key), string(val), meta, ref.Offset, ref.Partition)\n}\n<commit_msg>Address golint issue<commit_after>\/\/ Copyright (c) 2017 Cisco and\/or its affiliates.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage client\n\nimport (\n\t\"fmt\"\n\t\"github.com\/Shopify\/sarama\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/ligato\/cn-infra\/db\/keyval\"\n\t\"time\"\n)\n\n\/\/ Encoder defines an interface that is used as argument of producer functions.\n\/\/ It wraps the sarama.Encoder\ntype Encoder interface {\n\tsarama.Encoder\n}\n\n\/\/ ConsumerMessage encapsulates a Kafka message returned by the consumer.\ntype ConsumerMessage struct {\n\tKey, Value []byte\n\tTopic string\n\tPartition int32\n\tOffset int64\n\tTimestamp time.Time\n}\n\n\/\/ GetKey returns key associated with the message\nfunc (cm *ConsumerMessage) GetKey() string {\n\treturn string(cm.Key)\n}\n\n\/\/ GetValue return value associated with the message\nfunc (cm *ConsumerMessage) GetValue() []byte {\n\treturn cm.Value\n}\n\n\/\/ ProtoConsumerMessage encapsulates a Kafka message returned by the consumer and provides means\n\/\/ to unmarshal the value into proto.Message.\ntype ProtoConsumerMessage struct {\n\t*ConsumerMessage\n\tserializer keyval.Serializer\n}\n\n\/\/ NewProtoConsumerMessage creates new instace of ProtoConsumerMessage\nfunc NewProtoConsumerMessage(msg *ConsumerMessage, serializer keyval.Serializer) *ProtoConsumerMessage {\n\treturn &ProtoConsumerMessage{msg, serializer}\n}\n\n\/\/ GetKey returns key associated with the message\nfunc (cm *ProtoConsumerMessage) GetKey() string {\n\treturn string(cm.Key)\n}\n\n\/\/ GetValue return value associated with the message\nfunc (cm *ProtoConsumerMessage) GetValue(msg proto.Message) error {\n\terr := cm.serializer.Unmarshal(cm.ConsumerMessage.GetValue(), msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ ProducerMessage is the collection of elements passed to the Producer in order to send a message.\ntype ProducerMessage struct {\n\t\/\/ The Kafka topic for this message.\n\tTopic string\n\t\/\/ The partitioning key for this message. Pre-existing Encoders include\n\t\/\/ StringEncoder and ByteEncoder.\n\tKey Encoder\n\t\/\/ The actual message to store in Kafka. Pre-existing Encoders include\n\t\/\/ StringEncoder and ByteEncoder.\n\tValue Encoder\n\n\t\/\/ This field is used to hold arbitrary data you wish to include so it\n\t\/\/ will be available when receiving on the Successes and Errors channels.\n\t\/\/ Sarama completely ignores this field and is only to be used for\n\t\/\/ pass-through data.\n\tMetadata interface{}\n\n\t\/\/ Below this point are filled in by the producer as the message is processed\n\n\t\/\/ Offset is the offset of the message stored on the broker. This is only\n\t\/\/ guaranteed to be defined if the message was successfully delivered and\n\t\/\/ RequiredAcks is not NoResponse.\n\tOffset int64\n\t\/\/ Partition is the partition that the message was sent to. This is only\n\t\/\/ guaranteed to be defined if the message was successfully delivered.\n\tPartition int32\n}\n\n\/\/ ProducerError is the type of error generated when the producer fails to deliver a message.\n\/\/ It contains the original ProducerMessage as well as the actual error value.\ntype ProducerError struct {\n\tMsg *ProducerMessage\n\tErr error\n}\n\nfunc (ref *ProducerError) Error() string {\n\treturn ref.Err.Error()\n}\n\nfunc (ref *ProducerError) String() string {\n\treturn fmt.Sprintf(\"ProducerError: %s, error: %v\\n\", ref.Msg, ref.Err)\n}\n\nfunc (ref *ProducerMessage) String() string {\n\tvar meta string\n\tswitch t := ref.Metadata.(type) {\n\tdefault:\n\t\tmeta = fmt.Sprintf(\"unexpected type %T\", t) \/\/ %T prints whatever type t has\n\tcase string:\n\t\tmeta = t\n\tcase *string:\n\t\tmeta = *t\n\tcase []byte:\n\t\tmeta = string(t)\n\tcase bool:\n\t\tmeta = fmt.Sprintf(\"%t\", t) \/\/ t has type bool\n\tcase int:\n\t\tmeta = fmt.Sprintf(\"%d\", t) \/\/ t has type int\n\tcase *bool:\n\t\tmeta = fmt.Sprintf(\"%t\", *t) \/\/ t has type *bool\n\tcase *int:\n\t\tmeta = fmt.Sprintf(\"%d\", *t) \/\/ t has type *int\n\t}\n\n\tkey, _ := ref.Key.Encode()\n\tval, _ := ref.Value.Encode()\n\n\treturn fmt.Sprintf(\"ProducerMessage - Topic: %s, Key: %s, Value: %s, Meta: %v, Offset: %d, Partition: %d\\n\", ref.Topic, string(key), string(val), meta, ref.Offset, ref.Partition)\n}\n<|endoftext|>"} {"text":"<commit_before>package stockfighter\n\nimport (\n\t\"fmt\"\n\t\"time\"\n)\n\ntype OrderType int\n\nconst (\n\tLimit OrderType = iota\n\tMarket\n\tFillOrKill\n\tImmediateOrCancel\n)\n\ntype Game struct {\n\tAccount string\n\tInstanceId uint64\n\tInstructions map[string]string\n\tSecondsPerTradingDay uint64\n\tTickers []string\n\tVenues []string\n}\n\ntype GameState struct {\n\tDetails struct {\n\t\tEndOfTheWorldDay uint64\n\t\tTradingDay uint64\n\t}\n\tFlash struct {\n\t\tInfo string\n\t\tWarning string\n\t\tDanger string\n\t}\n\tDone bool\n\tId uint64\n\tState string\n}\n\ntype Symbol struct {\n\tSymbol string\n\tName string\n}\n\ntype Order struct {\n\tAccount string `json:\"account\"`\n\tVenue string `json:\"venue\"`\n\tStock string `json:\"stock\"`\n\tPrice uint64 `json:\"price\"`\n\tQuantity uint64 `json:\"qty\"`\n\tDirection string `json:\"direction\"`\n\tOrderType OrderType `json:\"orderType\"`\n}\n\ntype StandingOrder struct {\n\tPrice uint64\n\tQuantity uint64 `json:\"qty\"`\n\tIsBuy bool\n}\n\ntype StandingOrderSlice []StandingOrder\n\ntype OrderBook struct {\n\tVenue string\n\tSymbol string\n\tAsks StandingOrderSlice\n\tBids StandingOrderSlice\n\tTimeStamp time.Time `json:\"ts\"`\n}\n\ntype Fill struct {\n\tPrice uint64\n\tQuantity uint64 `json:\"qty\"`\n\tTimeStamp time.Time `json:\"ts\"`\n}\n\ntype OrderState struct {\n\tVenue string\n\tSymbol string\n\tPrice uint64\n\tOriginalQuantity uint64 `json:\"originalQty\"`\n\tQuantity uint64 `json:\"qty\"`\n\tDirection string\n\tType OrderType\n\tId uint64\n\tAccount string\n\tTimestamp time.Time `json:\"ts\"`\n\tFills []Fill\n\tTotalFilled uint64\n\tOpen bool\n}\n\ntype Quote struct {\n\tVenue string\n\tSymbol string\n\tBid uint64\n\tBidSize uint64\n\tBidDepth uint64\n\tAsk uint64\n\tAskSize uint64\n\tAskDepth uint64\n\tLast uint64\n\tLastSize uint64\n\tLastTrade time.Time\n\tQuoteTime time.Time\n}\n\ntype Execution struct {\n\tAccount string\n\tVenue string\n\tSymbol string\n\tOrder OrderState\n\tStandingId uint64\n\tIncomingId uint64\n\tPrice uint64\n\tFilled uint64\n\tFilledAt time.Time\n\tStandingComplete bool\n\tIncomingComplete bool\n}\n\ntype Evidence struct {\n\tAccount string\n\tExplanationLink string `json:\"explanation_link\"`\n\tExecutiveSummary string `json:\"executive_summary\"`\n}\n\nvar orderTypes = [...]string{\n\tLimit: \"limit\",\n\tMarket: \"market\",\n\tFillOrKill: \"fill-or-kill\",\n\tImmediateOrCancel: \"immediate-or-cancel\",\n}\n\nfunc (o OrderType) MarshalText() ([]byte, error) {\n\treturn []byte(orderTypes[o]), nil\n}\n\nfunc (o OrderType) String() string {\n\treturn orderTypes[o]\n}\n\nvar orderTypeMap = map[string]OrderType{\n\t\"limit\": Limit,\n\t\"market\": Market,\n\t\"fill-or-kill\": FillOrKill,\n\t\"immediate-or-cancel\": ImmediateOrCancel,\n}\n\nfunc (o *OrderType) UnmarshalText(text []byte) error {\n\ttyp, ok := orderTypeMap[string(text)]\n\tif ok {\n\t\t*o = typ\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"Unknown order type: %s\", text)\n}\n\n\/\/ Total depth of offers\nfunc (s StandingOrderSlice) Depth() uint64 {\n\tvar depth uint64\n\tfor _, so := range s {\n\t\tdepth += so.Quantity\n\t}\n\treturn depth\n}\n<commit_msg>Fix Evidence json tag<commit_after>package stockfighter\n\nimport (\n\t\"fmt\"\n\t\"time\"\n)\n\ntype OrderType int\n\nconst (\n\tLimit OrderType = iota\n\tMarket\n\tFillOrKill\n\tImmediateOrCancel\n)\n\ntype Game struct {\n\tAccount string\n\tInstanceId uint64\n\tInstructions map[string]string\n\tSecondsPerTradingDay uint64\n\tTickers []string\n\tVenues []string\n}\n\ntype GameState struct {\n\tDetails struct {\n\t\tEndOfTheWorldDay uint64\n\t\tTradingDay uint64\n\t}\n\tFlash struct {\n\t\tInfo string\n\t\tWarning string\n\t\tDanger string\n\t}\n\tDone bool\n\tId uint64\n\tState string\n}\n\ntype Symbol struct {\n\tSymbol string\n\tName string\n}\n\ntype Order struct {\n\tAccount string `json:\"account\"`\n\tVenue string `json:\"venue\"`\n\tStock string `json:\"stock\"`\n\tPrice uint64 `json:\"price\"`\n\tQuantity uint64 `json:\"qty\"`\n\tDirection string `json:\"direction\"`\n\tOrderType OrderType `json:\"orderType\"`\n}\n\ntype StandingOrder struct {\n\tPrice uint64\n\tQuantity uint64 `json:\"qty\"`\n\tIsBuy bool\n}\n\ntype StandingOrderSlice []StandingOrder\n\ntype OrderBook struct {\n\tVenue string\n\tSymbol string\n\tAsks StandingOrderSlice\n\tBids StandingOrderSlice\n\tTimeStamp time.Time `json:\"ts\"`\n}\n\ntype Fill struct {\n\tPrice uint64\n\tQuantity uint64 `json:\"qty\"`\n\tTimeStamp time.Time `json:\"ts\"`\n}\n\ntype OrderState struct {\n\tVenue string\n\tSymbol string\n\tPrice uint64\n\tOriginalQuantity uint64 `json:\"originalQty\"`\n\tQuantity uint64 `json:\"qty\"`\n\tDirection string\n\tType OrderType\n\tId uint64\n\tAccount string\n\tTimestamp time.Time `json:\"ts\"`\n\tFills []Fill\n\tTotalFilled uint64\n\tOpen bool\n}\n\ntype Quote struct {\n\tVenue string\n\tSymbol string\n\tBid uint64\n\tBidSize uint64\n\tBidDepth uint64\n\tAsk uint64\n\tAskSize uint64\n\tAskDepth uint64\n\tLast uint64\n\tLastSize uint64\n\tLastTrade time.Time\n\tQuoteTime time.Time\n}\n\ntype Execution struct {\n\tAccount string\n\tVenue string\n\tSymbol string\n\tOrder OrderState\n\tStandingId uint64\n\tIncomingId uint64\n\tPrice uint64\n\tFilled uint64\n\tFilledAt time.Time\n\tStandingComplete bool\n\tIncomingComplete bool\n}\n\ntype Evidence struct {\n\tAccount string `json:\"account\"`\n\tExplanationLink string `json:\"explanation_link\"`\n\tExecutiveSummary string `json:\"executive_summary\"`\n}\n\nvar orderTypes = [...]string{\n\tLimit: \"limit\",\n\tMarket: \"market\",\n\tFillOrKill: \"fill-or-kill\",\n\tImmediateOrCancel: \"immediate-or-cancel\",\n}\n\nfunc (o OrderType) MarshalText() ([]byte, error) {\n\treturn []byte(orderTypes[o]), nil\n}\n\nfunc (o OrderType) String() string {\n\treturn orderTypes[o]\n}\n\nvar orderTypeMap = map[string]OrderType{\n\t\"limit\": Limit,\n\t\"market\": Market,\n\t\"fill-or-kill\": FillOrKill,\n\t\"immediate-or-cancel\": ImmediateOrCancel,\n}\n\nfunc (o *OrderType) UnmarshalText(text []byte) error {\n\ttyp, ok := orderTypeMap[string(text)]\n\tif ok {\n\t\t*o = typ\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"Unknown order type: %s\", text)\n}\n\n\/\/ Total depth of offers\nfunc (s StandingOrderSlice) Depth() uint64 {\n\tvar depth uint64\n\tfor _, so := range s {\n\t\tdepth += so.Quantity\n\t}\n\treturn depth\n}\n<|endoftext|>"} {"text":"<commit_before>package smith\n\nimport (\n\t\"encoding\/json\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n)\n\ntype ResourceState string\n\nconst (\n\tNEW ResourceState = \"\"\n\tIN_PROGRESS ResourceState = \"InProgress\"\n\tREADY ResourceState = \"Ready\"\n\tERROR ResourceState = \"Error\"\n\tTERMINAL_ERROR ResourceState = \"TerminalError\"\n)\n\nconst (\n\tSmithDomain = \"smith.atlassian.com\"\n\tSmithResourceGroup = SmithDomain\n\n\tTemplateResourcePath = \"templates\"\n\tTemplateResourceName = \"template.\" + SmithDomain\n\tTemplateResourceVersion = \"v1\"\n\tTemplateResourceKind = \"Template\"\n\tTemplateResourceGroupVersion = SmithResourceGroup + \"\/\" + TemplateResourceVersion\n\n\tTemplateNameLabel = TemplateResourceName + \"\/TemplateName\"\n\n\t\/\/ See docs\/design\/managing-resources.md\n\tTprFieldPathAnnotation = SmithDomain + \"\/TprReadyWhenFieldPath\"\n\tTprFieldValueAnnotation = SmithDomain + \"\/TprReadyWhenFieldValue\"\n)\n\ntype TemplateList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\t\/\/ Standard list metadata.\n\tMetadata metav1.ListMeta `json:\"metadata,omitempty\"`\n\n\t\/\/ Items is a list of templates.\n\tItems []Template `json:\"items\"`\n}\n\n\/\/ GetObjectKind is required to satisfy Object interface.\nfunc (tl *TemplateList) GetObjectKind() schema.ObjectKind {\n\treturn &tl.TypeMeta\n}\n\n\/\/ GetListMeta is required to satisfy ListMetaAccessor interface.\nfunc (tl *TemplateList) GetListMeta() metav1.List {\n\treturn &tl.Metadata\n}\n\n\/\/ Template describes a resources template.\ntype Template struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\n\t\/\/ Standard object metadata\n\tMetadata metav1.ObjectMeta `json:\"metadata,omitempty\"`\n\n\t\/\/ Spec is the specification of the desired behavior of the Template.\n\tSpec TemplateSpec `json:\"spec,omitempty\"`\n\n\t\/\/ Status is most recently observed status of the Template.\n\tStatus TemplateStatus `json:\"status,omitempty\"`\n}\n\n\/\/ Required to satisfy Object interface\nfunc (t *Template) GetObjectKind() schema.ObjectKind {\n\treturn &t.TypeMeta\n}\n\n\/\/ Required to satisfy ObjectMetaAccessor interface\nfunc (t *Template) GetObjectMeta() metav1.Object {\n\treturn &t.Metadata\n}\n\ntype TemplateSpec struct {\n\tResources []Resource `json:\"resources\"`\n}\n\ntype TemplateStatus struct {\n\tState ResourceState `json:\"state,omitempty\"`\n}\n\n\/\/ DependencyRef is a reference to another Resource in the same template.\ntype DependencyRef string\n\ntype Resource struct {\n\t\/\/ Name of the resource for references.\n\tName string\n\n\t\/\/ Explicit dependencies.\n\tDependsOn []DependencyRef `json:\"dependsOn,omitempty\"`\n\n\tSpec unstructured.Unstructured `json:\"spec\"`\n}\n\n\/\/ The code below is used only to work around a known problem with third-party\n\/\/ resources and ugorji. If\/when these issues are resolved, the code below\n\/\/ should no longer be required.\n\ntype templateListCopy TemplateList\ntype templateCopy Template\n\nfunc (e *Template) UnmarshalJSON(data []byte) error {\n\ttmp := templateCopy{}\n\terr := json.Unmarshal(data, &tmp)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*e = Template(tmp)\n\treturn nil\n}\n\nfunc (el *TemplateList) UnmarshalJSON(data []byte) error {\n\ttmp := templateListCopy{}\n\terr := json.Unmarshal(data, &tmp)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*el = TemplateList(tmp)\n\treturn nil\n}\n<commit_msg>Fix case in json<commit_after>package smith\n\nimport (\n\t\"encoding\/json\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n)\n\ntype ResourceState string\n\nconst (\n\tNEW ResourceState = \"\"\n\tIN_PROGRESS ResourceState = \"InProgress\"\n\tREADY ResourceState = \"Ready\"\n\tERROR ResourceState = \"Error\"\n\tTERMINAL_ERROR ResourceState = \"TerminalError\"\n)\n\nconst (\n\tSmithDomain = \"smith.atlassian.com\"\n\tSmithResourceGroup = SmithDomain\n\n\tTemplateResourcePath = \"templates\"\n\tTemplateResourceName = \"template.\" + SmithDomain\n\tTemplateResourceVersion = \"v1\"\n\tTemplateResourceKind = \"Template\"\n\tTemplateResourceGroupVersion = SmithResourceGroup + \"\/\" + TemplateResourceVersion\n\n\tTemplateNameLabel = TemplateResourceName + \"\/TemplateName\"\n\n\t\/\/ See docs\/design\/managing-resources.md\n\tTprFieldPathAnnotation = SmithDomain + \"\/TprReadyWhenFieldPath\"\n\tTprFieldValueAnnotation = SmithDomain + \"\/TprReadyWhenFieldValue\"\n)\n\ntype TemplateList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\t\/\/ Standard list metadata.\n\tMetadata metav1.ListMeta `json:\"metadata,omitempty\"`\n\n\t\/\/ Items is a list of templates.\n\tItems []Template `json:\"items\"`\n}\n\n\/\/ GetObjectKind is required to satisfy Object interface.\nfunc (tl *TemplateList) GetObjectKind() schema.ObjectKind {\n\treturn &tl.TypeMeta\n}\n\n\/\/ GetListMeta is required to satisfy ListMetaAccessor interface.\nfunc (tl *TemplateList) GetListMeta() metav1.List {\n\treturn &tl.Metadata\n}\n\n\/\/ Template describes a resources template.\ntype Template struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\n\t\/\/ Standard object metadata\n\tMetadata metav1.ObjectMeta `json:\"metadata,omitempty\"`\n\n\t\/\/ Spec is the specification of the desired behavior of the Template.\n\tSpec TemplateSpec `json:\"spec,omitempty\"`\n\n\t\/\/ Status is most recently observed status of the Template.\n\tStatus TemplateStatus `json:\"status,omitempty\"`\n}\n\n\/\/ Required to satisfy Object interface\nfunc (t *Template) GetObjectKind() schema.ObjectKind {\n\treturn &t.TypeMeta\n}\n\n\/\/ Required to satisfy ObjectMetaAccessor interface\nfunc (t *Template) GetObjectMeta() metav1.Object {\n\treturn &t.Metadata\n}\n\ntype TemplateSpec struct {\n\tResources []Resource `json:\"resources\"`\n}\n\ntype TemplateStatus struct {\n\tState ResourceState `json:\"state,omitempty\"`\n}\n\n\/\/ DependencyRef is a reference to another Resource in the same template.\ntype DependencyRef string\n\ntype Resource struct {\n\t\/\/ Name of the resource for references.\n\tName string `json:\"name\"`\n\n\t\/\/ Explicit dependencies.\n\tDependsOn []DependencyRef `json:\"dependsOn,omitempty\"`\n\n\tSpec unstructured.Unstructured `json:\"spec\"`\n}\n\n\/\/ The code below is used only to work around a known problem with third-party\n\/\/ resources and ugorji. If\/when these issues are resolved, the code below\n\/\/ should no longer be required.\n\ntype templateListCopy TemplateList\ntype templateCopy Template\n\nfunc (e *Template) UnmarshalJSON(data []byte) error {\n\ttmp := templateCopy{}\n\terr := json.Unmarshal(data, &tmp)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*e = Template(tmp)\n\treturn nil\n}\n\nfunc (el *TemplateList) UnmarshalJSON(data []byte) error {\n\ttmp := templateListCopy{}\n\terr := json.Unmarshal(data, &tmp)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*el = TemplateList(tmp)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package gocsv\n\nimport (\n\t\"encoding\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ --------------------------------------------------------------------------\n\/\/ Conversion interfaces\n\n\/\/ TypeMarshaller is implemented by any value that has a MarshalCSV method\n\/\/ This converter is used to convert the value to it string representation\ntype TypeMarshaller interface {\n\tMarshalCSV() (string, error)\n}\n\n\/\/ Stringer is implemented by any value that has a String method\n\/\/ This converter is used to convert the value to it string representation\n\/\/ This converter will be used if your value does not implement TypeMarshaller\ntype Stringer interface {\n\tString() string\n}\n\n\/\/ TypeUnmarshaller is implemented by any value that has an UnmarshalCSV method\n\/\/ This converter is used to convert a string to your value representation of that string\ntype TypeUnmarshaller interface {\n\tUnmarshalCSV(string) error\n}\n\n\/\/ NoUnmarshalFuncError is the custom error type to be raised in case there is no unmarshal function defined on type\ntype NoUnmarshalFuncError struct {\n\tmsg string\n}\n\nfunc (e NoUnmarshalFuncError) Error() string {\n\treturn e.msg\n}\n\n\/\/ NoMarshalFuncError is the custom error type to be raised in case there is no marshal function defined on type\ntype NoMarshalFuncError struct {\n\tmsg string\n}\n\nfunc (e NoMarshalFuncError) Error() string {\n\treturn e.msg\n}\n\nvar (\n\tstringerType = reflect.TypeOf((*Stringer)(nil)).Elem()\n\tmarshallerType = reflect.TypeOf((*TypeMarshaller)(nil)).Elem()\n\tunMarshallerType = reflect.TypeOf((*TypeUnmarshaller)(nil)).Elem()\n\ttextMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()\n\ttextUnMarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()\n)\n\n\/\/ --------------------------------------------------------------------------\n\/\/ Conversion helpers\n\nfunc toString(in interface{}) (string, error) {\n\tinValue := reflect.ValueOf(in)\n\n\tswitch inValue.Kind() {\n\tcase reflect.String:\n\t\treturn inValue.String(), nil\n\tcase reflect.Bool:\n\t\tb := inValue.Bool()\n\t\tif b {\n\t\t\treturn \"true\", nil\n\t\t}\n\t\treturn \"false\", nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn fmt.Sprintf(\"%v\", inValue.Int()), nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn fmt.Sprintf(\"%v\", inValue.Uint()), nil\n\tcase reflect.Float32:\n\t\treturn strconv.FormatFloat(inValue.Float(), byte('f'), -1, 32), nil\n\tcase reflect.Float64:\n\t\treturn strconv.FormatFloat(inValue.Float(), byte('f'), -1, 64), nil\n\t}\n\treturn \"\", fmt.Errorf(\"No known conversion from \" + inValue.Type().String() + \" to string\")\n}\n\nfunc toBool(in interface{}) (bool, error) {\n\tinValue := reflect.ValueOf(in)\n\n\tswitch inValue.Kind() {\n\tcase reflect.String:\n\t\ts := inValue.String()\n\t\tswitch s {\n\t\tcase \"yes\":\n\t\t\treturn true, nil\n\t\tcase \"no\", \"\":\n\t\t\treturn false, nil\n\t\tdefault:\n\t\t\treturn strconv.ParseBool(s)\n\t\t}\n\tcase reflect.Bool:\n\t\treturn inValue.Bool(), nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\ti := inValue.Int()\n\t\tif i != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\ti := inValue.Uint()\n\t\tif i != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\tcase reflect.Float32, reflect.Float64:\n\t\tf := inValue.Float()\n\t\tif f != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t}\n\treturn false, fmt.Errorf(\"No known conversion from \" + inValue.Type().String() + \" to bool\")\n}\n\nfunc toInt(in interface{}) (int64, error) {\n\tinValue := reflect.ValueOf(in)\n\n\tswitch inValue.Kind() {\n\tcase reflect.String:\n\t\ts := strings.TrimSpace(inValue.String())\n\t\tif s == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn strconv.ParseInt(s, 0, 64)\n\tcase reflect.Bool:\n\t\tif inValue.Bool() {\n\t\t\treturn 1, nil\n\t\t}\n\t\treturn 0, nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn inValue.Int(), nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn int64(inValue.Uint()), nil\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn int64(inValue.Float()), nil\n\t}\n\treturn 0, fmt.Errorf(\"No known conversion from \" + inValue.Type().String() + \" to int\")\n}\n\nfunc toUint(in interface{}) (uint64, error) {\n\tinValue := reflect.ValueOf(in)\n\n\tswitch inValue.Kind() {\n\tcase reflect.String:\n\t\ts := strings.TrimSpace(inValue.String())\n\t\tif s == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\n\t\t\/\/ support the float input\n\t\tif strings.Contains(s, \".\") {\n\t\t\tf, err := strconv.ParseFloat(s, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\treturn uint64(f), nil\n\t\t}\n\t\treturn strconv.ParseUint(s, 0, 64)\n\tcase reflect.Bool:\n\t\tif inValue.Bool() {\n\t\t\treturn 1, nil\n\t\t}\n\t\treturn 0, nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn uint64(inValue.Int()), nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn inValue.Uint(), nil\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn uint64(inValue.Float()), nil\n\t}\n\treturn 0, fmt.Errorf(\"No known conversion from \" + inValue.Type().String() + \" to uint\")\n}\n\nfunc toFloat(in interface{}) (float64, error) {\n\tinValue := reflect.ValueOf(in)\n\n\tswitch inValue.Kind() {\n\tcase reflect.String:\n\t\ts := strings.TrimSpace(inValue.String())\n\t\tif s == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn strconv.ParseFloat(s, 64)\n\tcase reflect.Bool:\n\t\tif inValue.Bool() {\n\t\t\treturn 1, nil\n\t\t}\n\t\treturn 0, nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn float64(inValue.Int()), nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn float64(inValue.Uint()), nil\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn inValue.Float(), nil\n\t}\n\treturn 0, fmt.Errorf(\"No known conversion from \" + inValue.Type().String() + \" to float\")\n}\n\nfunc setField(field reflect.Value, value string) error {\n\tif field.Kind() == reflect.Ptr {\n\t\tif field.IsNil() {\n\t\t\tfield.Set(reflect.New(field.Type().Elem()))\n\t\t}\n\t\tfield = field.Elem()\n\t}\n\n\tswitch field.Interface().(type) {\n\tcase string:\n\t\ts, err := toString(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfield.SetString(s)\n\tcase bool:\n\t\tb, err := toBool(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfield.SetBool(b)\n\tcase int, int8, int16, int32, int64:\n\t\ti, err := toInt(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfield.SetInt(i)\n\tcase uint, uint8, uint16, uint32, uint64:\n\t\tui, err := toUint(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfield.SetUint(ui)\n\tcase float32, float64:\n\t\tf, err := toFloat(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfield.SetFloat(f)\n\tdefault:\n\t\t\/\/ Not a native type, check for unmarshal method\n\t\tif err := unmarshall(field, value); err != nil {\n\t\t\tif _, ok := err.(NoUnmarshalFuncError); !ok {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t\/\/ Could not unmarshal, check for kind, e.g. renamed type from basic type\n\t\t\tswitch field.Kind() {\n\t\t\tcase reflect.String:\n\t\t\t\ts, err := toString(value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.SetString(s)\n\t\t\tcase reflect.Bool:\n\t\t\t\tb, err := toBool(value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.SetBool(b)\n\t\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\t\ti, err := toInt(value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.SetInt(i)\n\t\t\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\t\tui, err := toUint(value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.SetUint(ui)\n\t\t\tcase reflect.Float32, reflect.Float64:\n\t\t\t\tf, err := toFloat(value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.SetFloat(f)\n\t\t\tdefault:\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc getFieldAsString(field reflect.Value) (str string, err error) {\n\tswitch field.Kind() {\n\tcase reflect.Interface:\n\tcase reflect.Ptr:\n\t\tif field.IsNil() {\n\t\t\treturn \"\", nil\n\t\t}\n\t\treturn getFieldAsString(field.Elem())\n\tdefault:\n\t\t\/\/ Check if field is go native type\n\t\tswitch field.Interface().(type) {\n\t\tcase string:\n\t\t\treturn field.String(), nil\n\t\tcase bool:\n\t\t\tstr, err = toString(field.Bool())\n\t\t\tif err != nil {\n\t\t\t\treturn str, err\n\t\t\t}\n\t\tcase int, int8, int16, int32, int64:\n\t\t\tstr, err = toString(field.Int())\n\t\t\tif err != nil {\n\t\t\t\treturn str, err\n\t\t\t}\n\t\tcase uint, uint8, uint16, uint32, uint64:\n\t\t\tstr, err = toString(field.Uint())\n\t\t\tif err != nil {\n\t\t\t\treturn str, err\n\t\t\t}\n\t\tcase float32:\n\t\t\tstr, err = toString(float32(field.Float()))\n\t\t\tif err != nil {\n\t\t\t\treturn str, err\n\t\t\t}\n\t\tcase float64:\n\t\t\tstr, err = toString(field.Float())\n\t\t\tif err != nil {\n\t\t\t\treturn str, err\n\t\t\t}\n\t\tdefault:\n\t\t\t\/\/ Not a native type, check for marshal method\n\t\t\tstr, err = marshall(field)\n\t\t\tif err != nil {\n\t\t\t\tif _, ok := err.(NoMarshalFuncError); !ok {\n\t\t\t\t\treturn str, err\n\t\t\t\t}\n\t\t\t\t\/\/ If not marshal method, is field compatible with\/renamed from native type\n\t\t\t\tswitch field.Kind() {\n\t\t\t\tcase reflect.String:\n\t\t\t\t\treturn field.String(), nil\n\t\t\t\tcase reflect.Bool:\n\t\t\t\t\tstr, err = toString(field.Bool())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn str, err\n\t\t\t\t\t}\n\t\t\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\t\t\tstr, err = toString(field.Int())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn str, err\n\t\t\t\t\t}\n\t\t\t\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\t\t\tstr, err = toString(field.Uint())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn str, err\n\t\t\t\t\t}\n\t\t\t\tcase reflect.Float32:\n\t\t\t\t\tstr, err = toString(float32(field.Float()))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn str, err\n\t\t\t\t\t}\n\t\t\t\tcase reflect.Float64:\n\t\t\t\t\tstr, err = toString(field.Float())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn str, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn str, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn str, nil\n}\n\n\/\/ --------------------------------------------------------------------------\n\/\/ Un\/serializations helpers\n\nfunc unmarshall(field reflect.Value, value string) error {\n\tdupField := field\n\tunMarshallIt := func(finalField reflect.Value) error {\n\t\tif finalField.CanInterface() && finalField.Type().Implements(unMarshallerType) {\n\t\t\tif err := finalField.Interface().(TypeUnmarshaller).UnmarshalCSV(value); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t} else if finalField.CanInterface() && finalField.Type().Implements(textUnMarshalerType) { \/\/ Otherwise try to use TextMarshaller\n\t\t\tif err := finalField.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(value)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\n\t\treturn NoUnmarshalFuncError{\"No known conversion from string to \" + field.Type().String() + \", \" + field.Type().String() + \" does not implement TypeUnmarshaller\"}\n\t}\n\tfor dupField.Kind() == reflect.Interface || dupField.Kind() == reflect.Ptr {\n\t\tif dupField.IsNil() {\n\t\t\tdupField = reflect.New(field.Type().Elem())\n\t\t\tfield.Set(dupField)\n\t\t\treturn unMarshallIt(dupField)\n\t\t\tbreak\n\t\t}\n\t\tdupField = dupField.Elem()\n\t}\n\tif dupField.CanAddr() {\n\t\treturn unMarshallIt(dupField.Addr())\n\t}\n\treturn NoUnmarshalFuncError{\"No known conversion from string to \" + field.Type().String() + \", \" + field.Type().String() + \" does not implement TypeUnmarshaller\"}\n}\n\nfunc marshall(field reflect.Value) (value string, err error) {\n\tdupField := field\n\tmarshallIt := func(finalField reflect.Value) (string, error) {\n\t\tif finalField.CanInterface() && finalField.Type().Implements(marshallerType) { \/\/ Use TypeMarshaller when possible\n\t\t\treturn finalField.Interface().(TypeMarshaller).MarshalCSV()\n\t\t} else if finalField.CanInterface() && finalField.Type().Implements(stringerType) { \/\/ Otherwise try to use Stringer\n\t\t\treturn finalField.Interface().(Stringer).String(), nil\n\t\t} else if finalField.CanInterface() && finalField.Type().Implements(textMarshalerType) { \/\/ Otherwise try to use TextMarshaller\n\t\t\ttext, err := finalField.Interface().(encoding.TextMarshaler).MarshalText()\n\t\t\treturn string(text), err\n\t\t}\n\n\t\treturn value, NoMarshalFuncError{\"No known conversion from \" + field.Type().String() + \" to string, \" + field.Type().String() + \" does not implement TypeMarshaller nor Stringer\"}\n\t}\n\tfor dupField.Kind() == reflect.Interface || dupField.Kind() == reflect.Ptr {\n\t\tif dupField.IsNil() {\n\t\t\treturn value, nil\n\t\t}\n\t\tdupField = dupField.Elem()\n\t}\n\tif dupField.CanAddr() {\n\t\treturn marshallIt(dupField.Addr())\n\t}\n\treturn value, NoMarshalFuncError{\"No known conversion from \" + field.Type().String() + \" to string, \" + field.Type().String() + \" does not implement TypeMarshaller nor Stringer\"}\n}\n<commit_msg>fix vet for unreachable code<commit_after>package gocsv\n\nimport (\n\t\"encoding\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ --------------------------------------------------------------------------\n\/\/ Conversion interfaces\n\n\/\/ TypeMarshaller is implemented by any value that has a MarshalCSV method\n\/\/ This converter is used to convert the value to it string representation\ntype TypeMarshaller interface {\n\tMarshalCSV() (string, error)\n}\n\n\/\/ Stringer is implemented by any value that has a String method\n\/\/ This converter is used to convert the value to it string representation\n\/\/ This converter will be used if your value does not implement TypeMarshaller\ntype Stringer interface {\n\tString() string\n}\n\n\/\/ TypeUnmarshaller is implemented by any value that has an UnmarshalCSV method\n\/\/ This converter is used to convert a string to your value representation of that string\ntype TypeUnmarshaller interface {\n\tUnmarshalCSV(string) error\n}\n\n\/\/ NoUnmarshalFuncError is the custom error type to be raised in case there is no unmarshal function defined on type\ntype NoUnmarshalFuncError struct {\n\tmsg string\n}\n\nfunc (e NoUnmarshalFuncError) Error() string {\n\treturn e.msg\n}\n\n\/\/ NoMarshalFuncError is the custom error type to be raised in case there is no marshal function defined on type\ntype NoMarshalFuncError struct {\n\tmsg string\n}\n\nfunc (e NoMarshalFuncError) Error() string {\n\treturn e.msg\n}\n\nvar (\n\tstringerType = reflect.TypeOf((*Stringer)(nil)).Elem()\n\tmarshallerType = reflect.TypeOf((*TypeMarshaller)(nil)).Elem()\n\tunMarshallerType = reflect.TypeOf((*TypeUnmarshaller)(nil)).Elem()\n\ttextMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()\n\ttextUnMarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()\n)\n\n\/\/ --------------------------------------------------------------------------\n\/\/ Conversion helpers\n\nfunc toString(in interface{}) (string, error) {\n\tinValue := reflect.ValueOf(in)\n\n\tswitch inValue.Kind() {\n\tcase reflect.String:\n\t\treturn inValue.String(), nil\n\tcase reflect.Bool:\n\t\tb := inValue.Bool()\n\t\tif b {\n\t\t\treturn \"true\", nil\n\t\t}\n\t\treturn \"false\", nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn fmt.Sprintf(\"%v\", inValue.Int()), nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn fmt.Sprintf(\"%v\", inValue.Uint()), nil\n\tcase reflect.Float32:\n\t\treturn strconv.FormatFloat(inValue.Float(), byte('f'), -1, 32), nil\n\tcase reflect.Float64:\n\t\treturn strconv.FormatFloat(inValue.Float(), byte('f'), -1, 64), nil\n\t}\n\treturn \"\", fmt.Errorf(\"No known conversion from \" + inValue.Type().String() + \" to string\")\n}\n\nfunc toBool(in interface{}) (bool, error) {\n\tinValue := reflect.ValueOf(in)\n\n\tswitch inValue.Kind() {\n\tcase reflect.String:\n\t\ts := inValue.String()\n\t\tswitch s {\n\t\tcase \"yes\":\n\t\t\treturn true, nil\n\t\tcase \"no\", \"\":\n\t\t\treturn false, nil\n\t\tdefault:\n\t\t\treturn strconv.ParseBool(s)\n\t\t}\n\tcase reflect.Bool:\n\t\treturn inValue.Bool(), nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\ti := inValue.Int()\n\t\tif i != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\ti := inValue.Uint()\n\t\tif i != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\tcase reflect.Float32, reflect.Float64:\n\t\tf := inValue.Float()\n\t\tif f != 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t}\n\treturn false, fmt.Errorf(\"No known conversion from \" + inValue.Type().String() + \" to bool\")\n}\n\nfunc toInt(in interface{}) (int64, error) {\n\tinValue := reflect.ValueOf(in)\n\n\tswitch inValue.Kind() {\n\tcase reflect.String:\n\t\ts := strings.TrimSpace(inValue.String())\n\t\tif s == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn strconv.ParseInt(s, 0, 64)\n\tcase reflect.Bool:\n\t\tif inValue.Bool() {\n\t\t\treturn 1, nil\n\t\t}\n\t\treturn 0, nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn inValue.Int(), nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn int64(inValue.Uint()), nil\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn int64(inValue.Float()), nil\n\t}\n\treturn 0, fmt.Errorf(\"No known conversion from \" + inValue.Type().String() + \" to int\")\n}\n\nfunc toUint(in interface{}) (uint64, error) {\n\tinValue := reflect.ValueOf(in)\n\n\tswitch inValue.Kind() {\n\tcase reflect.String:\n\t\ts := strings.TrimSpace(inValue.String())\n\t\tif s == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\n\t\t\/\/ support the float input\n\t\tif strings.Contains(s, \".\") {\n\t\t\tf, err := strconv.ParseFloat(s, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\treturn uint64(f), nil\n\t\t}\n\t\treturn strconv.ParseUint(s, 0, 64)\n\tcase reflect.Bool:\n\t\tif inValue.Bool() {\n\t\t\treturn 1, nil\n\t\t}\n\t\treturn 0, nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn uint64(inValue.Int()), nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn inValue.Uint(), nil\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn uint64(inValue.Float()), nil\n\t}\n\treturn 0, fmt.Errorf(\"No known conversion from \" + inValue.Type().String() + \" to uint\")\n}\n\nfunc toFloat(in interface{}) (float64, error) {\n\tinValue := reflect.ValueOf(in)\n\n\tswitch inValue.Kind() {\n\tcase reflect.String:\n\t\ts := strings.TrimSpace(inValue.String())\n\t\tif s == \"\" {\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn strconv.ParseFloat(s, 64)\n\tcase reflect.Bool:\n\t\tif inValue.Bool() {\n\t\t\treturn 1, nil\n\t\t}\n\t\treturn 0, nil\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn float64(inValue.Int()), nil\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn float64(inValue.Uint()), nil\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn inValue.Float(), nil\n\t}\n\treturn 0, fmt.Errorf(\"No known conversion from \" + inValue.Type().String() + \" to float\")\n}\n\nfunc setField(field reflect.Value, value string) error {\n\tif field.Kind() == reflect.Ptr {\n\t\tif field.IsNil() {\n\t\t\tfield.Set(reflect.New(field.Type().Elem()))\n\t\t}\n\t\tfield = field.Elem()\n\t}\n\n\tswitch field.Interface().(type) {\n\tcase string:\n\t\ts, err := toString(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfield.SetString(s)\n\tcase bool:\n\t\tb, err := toBool(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfield.SetBool(b)\n\tcase int, int8, int16, int32, int64:\n\t\ti, err := toInt(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfield.SetInt(i)\n\tcase uint, uint8, uint16, uint32, uint64:\n\t\tui, err := toUint(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfield.SetUint(ui)\n\tcase float32, float64:\n\t\tf, err := toFloat(value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfield.SetFloat(f)\n\tdefault:\n\t\t\/\/ Not a native type, check for unmarshal method\n\t\tif err := unmarshall(field, value); err != nil {\n\t\t\tif _, ok := err.(NoUnmarshalFuncError); !ok {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t\/\/ Could not unmarshal, check for kind, e.g. renamed type from basic type\n\t\t\tswitch field.Kind() {\n\t\t\tcase reflect.String:\n\t\t\t\ts, err := toString(value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.SetString(s)\n\t\t\tcase reflect.Bool:\n\t\t\t\tb, err := toBool(value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.SetBool(b)\n\t\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\t\ti, err := toInt(value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.SetInt(i)\n\t\t\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\t\tui, err := toUint(value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.SetUint(ui)\n\t\t\tcase reflect.Float32, reflect.Float64:\n\t\t\t\tf, err := toFloat(value)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tfield.SetFloat(f)\n\t\t\tdefault:\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc getFieldAsString(field reflect.Value) (str string, err error) {\n\tswitch field.Kind() {\n\tcase reflect.Interface:\n\tcase reflect.Ptr:\n\t\tif field.IsNil() {\n\t\t\treturn \"\", nil\n\t\t}\n\t\treturn getFieldAsString(field.Elem())\n\tdefault:\n\t\t\/\/ Check if field is go native type\n\t\tswitch field.Interface().(type) {\n\t\tcase string:\n\t\t\treturn field.String(), nil\n\t\tcase bool:\n\t\t\tstr, err = toString(field.Bool())\n\t\t\tif err != nil {\n\t\t\t\treturn str, err\n\t\t\t}\n\t\tcase int, int8, int16, int32, int64:\n\t\t\tstr, err = toString(field.Int())\n\t\t\tif err != nil {\n\t\t\t\treturn str, err\n\t\t\t}\n\t\tcase uint, uint8, uint16, uint32, uint64:\n\t\t\tstr, err = toString(field.Uint())\n\t\t\tif err != nil {\n\t\t\t\treturn str, err\n\t\t\t}\n\t\tcase float32:\n\t\t\tstr, err = toString(float32(field.Float()))\n\t\t\tif err != nil {\n\t\t\t\treturn str, err\n\t\t\t}\n\t\tcase float64:\n\t\t\tstr, err = toString(field.Float())\n\t\t\tif err != nil {\n\t\t\t\treturn str, err\n\t\t\t}\n\t\tdefault:\n\t\t\t\/\/ Not a native type, check for marshal method\n\t\t\tstr, err = marshall(field)\n\t\t\tif err != nil {\n\t\t\t\tif _, ok := err.(NoMarshalFuncError); !ok {\n\t\t\t\t\treturn str, err\n\t\t\t\t}\n\t\t\t\t\/\/ If not marshal method, is field compatible with\/renamed from native type\n\t\t\t\tswitch field.Kind() {\n\t\t\t\tcase reflect.String:\n\t\t\t\t\treturn field.String(), nil\n\t\t\t\tcase reflect.Bool:\n\t\t\t\t\tstr, err = toString(field.Bool())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn str, err\n\t\t\t\t\t}\n\t\t\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\t\t\t\tstr, err = toString(field.Int())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn str, err\n\t\t\t\t\t}\n\t\t\t\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\t\t\t\tstr, err = toString(field.Uint())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn str, err\n\t\t\t\t\t}\n\t\t\t\tcase reflect.Float32:\n\t\t\t\t\tstr, err = toString(float32(field.Float()))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn str, err\n\t\t\t\t\t}\n\t\t\t\tcase reflect.Float64:\n\t\t\t\t\tstr, err = toString(field.Float())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn str, err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn str, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn str, nil\n}\n\n\/\/ --------------------------------------------------------------------------\n\/\/ Un\/serializations helpers\n\nfunc unmarshall(field reflect.Value, value string) error {\n\tdupField := field\n\tunMarshallIt := func(finalField reflect.Value) error {\n\t\tif finalField.CanInterface() && finalField.Type().Implements(unMarshallerType) {\n\t\t\tif err := finalField.Interface().(TypeUnmarshaller).UnmarshalCSV(value); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t} else if finalField.CanInterface() && finalField.Type().Implements(textUnMarshalerType) { \/\/ Otherwise try to use TextMarshaller\n\t\t\tif err := finalField.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(value)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\n\t\treturn NoUnmarshalFuncError{\"No known conversion from string to \" + field.Type().String() + \", \" + field.Type().String() + \" does not implement TypeUnmarshaller\"}\n\t}\n\tfor dupField.Kind() == reflect.Interface || dupField.Kind() == reflect.Ptr {\n\t\tif dupField.IsNil() {\n\t\t\tdupField = reflect.New(field.Type().Elem())\n\t\t\tfield.Set(dupField)\n\t\t\treturn unMarshallIt(dupField)\n\t\t}\n\t\tdupField = dupField.Elem()\n\t}\n\tif dupField.CanAddr() {\n\t\treturn unMarshallIt(dupField.Addr())\n\t}\n\treturn NoUnmarshalFuncError{\"No known conversion from string to \" + field.Type().String() + \", \" + field.Type().String() + \" does not implement TypeUnmarshaller\"}\n}\n\nfunc marshall(field reflect.Value) (value string, err error) {\n\tdupField := field\n\tmarshallIt := func(finalField reflect.Value) (string, error) {\n\t\tif finalField.CanInterface() && finalField.Type().Implements(marshallerType) { \/\/ Use TypeMarshaller when possible\n\t\t\treturn finalField.Interface().(TypeMarshaller).MarshalCSV()\n\t\t} else if finalField.CanInterface() && finalField.Type().Implements(stringerType) { \/\/ Otherwise try to use Stringer\n\t\t\treturn finalField.Interface().(Stringer).String(), nil\n\t\t} else if finalField.CanInterface() && finalField.Type().Implements(textMarshalerType) { \/\/ Otherwise try to use TextMarshaller\n\t\t\ttext, err := finalField.Interface().(encoding.TextMarshaler).MarshalText()\n\t\t\treturn string(text), err\n\t\t}\n\n\t\treturn value, NoMarshalFuncError{\"No known conversion from \" + field.Type().String() + \" to string, \" + field.Type().String() + \" does not implement TypeMarshaller nor Stringer\"}\n\t}\n\tfor dupField.Kind() == reflect.Interface || dupField.Kind() == reflect.Ptr {\n\t\tif dupField.IsNil() {\n\t\t\treturn value, nil\n\t\t}\n\t\tdupField = dupField.Elem()\n\t}\n\tif dupField.CanAddr() {\n\t\treturn marshallIt(dupField.Addr())\n\t}\n\treturn value, NoMarshalFuncError{\"No known conversion from \" + field.Type().String() + \" to string, \" + field.Type().String() + \" does not implement TypeMarshaller nor Stringer\"}\n}\n<|endoftext|>"} {"text":"<commit_before>package msgpack\n\nimport (\n\t\"encoding\"\n\t\"fmt\"\n\t\"log\"\n\t\"reflect\"\n\t\"sync\"\n\n\t\"github.com\/vmihailenco\/tagparser\"\n)\n\nvar errorType = reflect.TypeOf((*error)(nil)).Elem()\n\nvar (\n\tcustomEncoderType = reflect.TypeOf((*CustomEncoder)(nil)).Elem()\n\tcustomDecoderType = reflect.TypeOf((*CustomDecoder)(nil)).Elem()\n)\n\nvar (\n\tmarshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()\n\tunmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()\n)\n\nvar (\n\tbinaryMarshalerType = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()\n\tbinaryUnmarshalerType = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()\n)\n\ntype (\n\tencoderFunc func(*Encoder, reflect.Value) error\n\tdecoderFunc func(*Decoder, reflect.Value) error\n)\n\nvar (\n\ttypeEncMap sync.Map\n\ttypeDecMap sync.Map\n)\n\n\/\/ Register registers encoder and decoder functions for a value.\n\/\/ This is low level API and in most cases you should prefer implementing\n\/\/ Marshaler\/CustomEncoder and Unmarshaler\/CustomDecoder interfaces.\nfunc Register(value interface{}, enc encoderFunc, dec decoderFunc) {\n\ttyp := reflect.TypeOf(value)\n\tif enc != nil {\n\t\ttypeEncMap.Store(typ, enc)\n\t}\n\tif dec != nil {\n\t\ttypeDecMap.Store(typ, dec)\n\t}\n}\n\n\/\/------------------------------------------------------------------------------\n\nvar (\n\tstructs = newStructCache(false)\n\tjsonStructs = newStructCache(true)\n)\n\ntype structCache struct {\n\tm sync.Map\n\n\tuseJSONTag bool\n}\n\nfunc newStructCache(useJSONTag bool) *structCache {\n\treturn &structCache{\n\t\tuseJSONTag: useJSONTag,\n\t}\n}\n\nfunc (m *structCache) Fields(typ reflect.Type) *fields {\n\tif v, ok := m.m.Load(typ); ok {\n\t\treturn v.(*fields)\n\t}\n\n\tfs := getFields(typ, m.useJSONTag)\n\tm.m.Store(typ, fs)\n\treturn fs\n}\n\n\/\/------------------------------------------------------------------------------\n\ntype field struct {\n\tname string\n\tindex []int\n\tomitEmpty bool\n\tencoder encoderFunc\n\tdecoder decoderFunc\n}\n\nfunc (f *field) Omit(strct reflect.Value) bool {\n\tv, isNil := fieldByIndex(strct, f.index)\n\tif isNil {\n\t\treturn true\n\t}\n\treturn f.omitEmpty && isEmptyValue(v)\n}\n\nfunc (f *field) EncodeValue(e *Encoder, strct reflect.Value) error {\n\tv, isNil := fieldByIndex(strct, f.index)\n\tif isNil {\n\t\treturn e.EncodeNil()\n\t}\n\treturn f.encoder(e, v)\n}\n\nfunc (f *field) DecodeValue(d *Decoder, strct reflect.Value) error {\n\tv := fieldByIndexAlloc(strct, f.index)\n\treturn f.decoder(d, v)\n}\n\n\/\/------------------------------------------------------------------------------\n\ntype fields struct {\n\tType reflect.Type\n\tMap map[string]*field\n\tList []*field\n\tAsArray bool\n\n\thasOmitEmpty bool\n}\n\nfunc newFields(typ reflect.Type) *fields {\n\treturn &fields{\n\t\tType: typ,\n\t\tMap: make(map[string]*field, typ.NumField()),\n\t\tList: make([]*field, 0, typ.NumField()),\n\t}\n}\n\nfunc (fs *fields) Add(field *field) {\n\tfs.warnIfFieldExists(field.name)\n\tfs.Map[field.name] = field\n\tfs.List = append(fs.List, field)\n\tif field.omitEmpty {\n\t\tfs.hasOmitEmpty = true\n\t}\n}\n\nfunc (fs *fields) warnIfFieldExists(name string) {\n\tif _, ok := fs.Map[name]; ok {\n\t\tlog.Printf(\"msgpack: %s already has field=%s\", fs.Type, name)\n\t}\n}\n\nfunc (fs *fields) OmitEmpty(strct reflect.Value) []*field {\n\tif !fs.hasOmitEmpty {\n\t\treturn fs.List\n\t}\n\n\tfields := make([]*field, 0, len(fs.List))\n\n\tfor _, f := range fs.List {\n\t\tif !f.Omit(strct) {\n\t\t\tfields = append(fields, f)\n\t\t}\n\t}\n\n\treturn fields\n}\n\nfunc getFields(typ reflect.Type, useJSONTag bool) *fields {\n\tfs := newFields(typ)\n\n\tvar omitEmpty bool\n\tfor i := 0; i < typ.NumField(); i++ {\n\t\tf := typ.Field(i)\n\n\t\ttagStr := f.Tag.Get(\"msgpack\")\n\t\tif useJSONTag && tagStr == \"\" {\n\t\t\ttagStr = f.Tag.Get(\"json\")\n\t\t}\n\n\t\ttag := tagparser.Parse(tagStr)\n\t\tif tag.Name == \"-\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif f.Name == \"_msgpack\" {\n\t\t\tif tag.HasOption(\"asArray\") {\n\t\t\t\tfs.AsArray = true\n\t\t\t}\n\t\t\tif tag.HasOption(\"omitempty\") {\n\t\t\t\tomitEmpty = true\n\t\t\t}\n\t\t}\n\n\t\tif f.PkgPath != \"\" && !f.Anonymous {\n\t\t\tcontinue\n\t\t}\n\n\t\tfield := &field{\n\t\t\tname: tag.Name,\n\t\t\tindex: f.Index,\n\t\t\tomitEmpty: omitEmpty || tag.HasOption(\"omitempty\"),\n\t\t}\n\n\t\tif tag.HasOption(\"intern\") {\n\t\t\tswitch f.Type.Kind() {\n\t\t\tcase reflect.Interface:\n\t\t\t\tfield.encoder = encodeInternInterfaceValue\n\t\t\t\tfield.decoder = decodeInternInterfaceValue\n\t\t\tcase reflect.String:\n\t\t\t\tfield.encoder = encodeInternStringValue\n\t\t\t\tfield.decoder = decodeInternStringValue\n\t\t\tdefault:\n\t\t\t\terr := fmt.Errorf(\"msgpack: intern strings are not supported on %s\", f.Type)\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t} else {\n\t\t\tfield.encoder = getEncoder(f.Type)\n\t\t\tfield.decoder = getDecoder(f.Type)\n\t\t}\n\n\t\tif field.name == \"\" {\n\t\t\tfield.name = f.Name\n\t\t}\n\n\t\tif f.Anonymous && !tag.HasOption(\"noinline\") {\n\t\t\tinline := tag.HasOption(\"inline\")\n\t\t\tif inline {\n\t\t\t\tinlineFields(fs, f.Type, field, useJSONTag)\n\t\t\t} else {\n\t\t\t\tinline = shouldInline(fs, f.Type, field, useJSONTag)\n\t\t\t}\n\n\t\t\tif inline {\n\t\t\t\tif _, ok := fs.Map[field.name]; ok {\n\t\t\t\t\tlog.Printf(\"msgpack: %s already has field=%s\", fs.Type, field.name)\n\t\t\t\t}\n\t\t\t\tfs.Map[field.name] = field\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tfs.Add(field)\n\n\t\tif alias, ok := tag.Options[\"alias\"]; ok {\n\t\t\tfs.warnIfFieldExists(alias)\n\t\t\tfs.Map[alias] = field\n\t\t}\n\t}\n\treturn fs\n}\n\nvar (\n\tencodeStructValuePtr uintptr\n\tdecodeStructValuePtr uintptr\n)\n\n\/\/nolint:gochecknoinits\nfunc init() {\n\tencodeStructValuePtr = reflect.ValueOf(encodeStructValue).Pointer()\n\tdecodeStructValuePtr = reflect.ValueOf(decodeStructValue).Pointer()\n}\n\nfunc inlineFields(fs *fields, typ reflect.Type, f *field, useJSONTag bool) {\n\tinlinedFields := getFields(typ, useJSONTag).List\n\tfor _, field := range inlinedFields {\n\t\tif _, ok := fs.Map[field.name]; ok {\n\t\t\t\/\/ Don't inline shadowed fields.\n\t\t\tcontinue\n\t\t}\n\t\tfield.index = append(f.index, field.index...)\n\t\tfs.Add(field)\n\t}\n}\n\nfunc shouldInline(fs *fields, typ reflect.Type, f *field, useJSONTag bool) bool {\n\tvar encoder encoderFunc\n\tvar decoder decoderFunc\n\n\tif typ.Kind() == reflect.Struct {\n\t\tencoder = f.encoder\n\t\tdecoder = f.decoder\n\t} else {\n\t\tfor typ.Kind() == reflect.Ptr {\n\t\t\ttyp = typ.Elem()\n\t\t\tencoder = getEncoder(typ)\n\t\t\tdecoder = getDecoder(typ)\n\t\t}\n\t\tif typ.Kind() != reflect.Struct {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif reflect.ValueOf(encoder).Pointer() != encodeStructValuePtr {\n\t\treturn false\n\t}\n\tif reflect.ValueOf(decoder).Pointer() != decodeStructValuePtr {\n\t\treturn false\n\t}\n\n\tinlinedFields := getFields(typ, useJSONTag).List\n\tfor _, field := range inlinedFields {\n\t\tif _, ok := fs.Map[field.name]; ok {\n\t\t\t\/\/ Don't auto inline if there are shadowed fields.\n\t\t\treturn false\n\t\t}\n\t}\n\n\tfor _, field := range inlinedFields {\n\t\tfield.index = append(f.index, field.index...)\n\t\tfs.Add(field)\n\t}\n\treturn true\n}\n\nfunc isEmptyValue(v reflect.Value) bool {\n\tswitch v.Kind() {\n\tcase reflect.Array, reflect.Map, reflect.Slice, reflect.String:\n\t\treturn v.Len() == 0\n\tcase reflect.Bool:\n\t\treturn !v.Bool()\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn v.Int() == 0\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\treturn v.Uint() == 0\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn v.Float() == 0\n\tcase reflect.Interface, reflect.Ptr:\n\t\treturn v.IsNil()\n\t}\n\treturn false\n}\n\nfunc fieldByIndex(v reflect.Value, index []int) (_ reflect.Value, isNil bool) {\n\tif len(index) == 1 {\n\t\treturn v.Field(index[0]), false\n\t}\n\n\tfor i, idx := range index {\n\t\tif i > 0 {\n\t\t\tif v.Kind() == reflect.Ptr {\n\t\t\t\tif v.IsNil() {\n\t\t\t\t\treturn v, true\n\t\t\t\t}\n\t\t\t\tv = v.Elem()\n\t\t\t}\n\t\t}\n\t\tv = v.Field(idx)\n\t}\n\n\treturn v, false\n}\n\nfunc fieldByIndexAlloc(v reflect.Value, index []int) reflect.Value {\n\tif len(index) == 1 {\n\t\treturn v.Field(index[0])\n\t}\n\n\tfor i, idx := range index {\n\t\tif i > 0 {\n\t\t\tvar ok bool\n\t\t\tv, ok = indirectNew(v)\n\t\t\tif !ok {\n\t\t\t\treturn v\n\t\t\t}\n\t\t}\n\t\tv = v.Field(idx)\n\t}\n\n\treturn v\n}\n\nfunc indirectNew(v reflect.Value) (reflect.Value, bool) {\n\tif v.Kind() == reflect.Ptr {\n\t\tif v.IsNil() {\n\t\t\tif !v.CanSet() {\n\t\t\t\treturn v, false\n\t\t\t}\n\t\t\telemType := v.Type().Elem()\n\t\t\tif elemType.Kind() != reflect.Struct {\n\t\t\t\treturn v, false\n\t\t\t}\n\t\t\tv.Set(reflect.New(elemType))\n\t\t}\n\t\tv = v.Elem()\n\t}\n\treturn v, true\n}\n<commit_msg>Cleanup<commit_after>package msgpack\n\nimport (\n\t\"encoding\"\n\t\"fmt\"\n\t\"log\"\n\t\"reflect\"\n\t\"sync\"\n\n\t\"github.com\/vmihailenco\/tagparser\"\n)\n\nvar errorType = reflect.TypeOf((*error)(nil)).Elem()\n\nvar (\n\tcustomEncoderType = reflect.TypeOf((*CustomEncoder)(nil)).Elem()\n\tcustomDecoderType = reflect.TypeOf((*CustomDecoder)(nil)).Elem()\n)\n\nvar (\n\tmarshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()\n\tunmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()\n)\n\nvar (\n\tbinaryMarshalerType = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem()\n\tbinaryUnmarshalerType = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem()\n)\n\ntype (\n\tencoderFunc func(*Encoder, reflect.Value) error\n\tdecoderFunc func(*Decoder, reflect.Value) error\n)\n\nvar (\n\ttypeEncMap sync.Map\n\ttypeDecMap sync.Map\n)\n\n\/\/ Register registers encoder and decoder functions for a value.\n\/\/ This is low level API and in most cases you should prefer implementing\n\/\/ Marshaler\/CustomEncoder and Unmarshaler\/CustomDecoder interfaces.\nfunc Register(value interface{}, enc encoderFunc, dec decoderFunc) {\n\ttyp := reflect.TypeOf(value)\n\tif enc != nil {\n\t\ttypeEncMap.Store(typ, enc)\n\t}\n\tif dec != nil {\n\t\ttypeDecMap.Store(typ, dec)\n\t}\n}\n\n\/\/------------------------------------------------------------------------------\n\nvar (\n\tstructs = newStructCache(false)\n\tjsonStructs = newStructCache(true)\n)\n\ntype structCache struct {\n\tm sync.Map\n\n\tuseJSONTag bool\n}\n\nfunc newStructCache(useJSONTag bool) *structCache {\n\treturn &structCache{\n\t\tuseJSONTag: useJSONTag,\n\t}\n}\n\nfunc (m *structCache) Fields(typ reflect.Type) *fields {\n\tif v, ok := m.m.Load(typ); ok {\n\t\treturn v.(*fields)\n\t}\n\n\tfs := getFields(typ, m.useJSONTag)\n\tm.m.Store(typ, fs)\n\treturn fs\n}\n\n\/\/------------------------------------------------------------------------------\n\ntype field struct {\n\tname string\n\tindex []int\n\tomitEmpty bool\n\tencoder encoderFunc\n\tdecoder decoderFunc\n}\n\nfunc (f *field) Omit(strct reflect.Value) bool {\n\tv, ok := fieldByIndex(strct, f.index)\n\tif !ok {\n\t\treturn true\n\t}\n\treturn f.omitEmpty && isEmptyValue(v)\n}\n\nfunc (f *field) EncodeValue(e *Encoder, strct reflect.Value) error {\n\tv, ok := fieldByIndex(strct, f.index)\n\tif !ok {\n\t\treturn e.EncodeNil()\n\t}\n\treturn f.encoder(e, v)\n}\n\nfunc (f *field) DecodeValue(d *Decoder, strct reflect.Value) error {\n\tv := fieldByIndexAlloc(strct, f.index)\n\treturn f.decoder(d, v)\n}\n\n\/\/------------------------------------------------------------------------------\n\ntype fields struct {\n\tType reflect.Type\n\tMap map[string]*field\n\tList []*field\n\tAsArray bool\n\n\thasOmitEmpty bool\n}\n\nfunc newFields(typ reflect.Type) *fields {\n\treturn &fields{\n\t\tType: typ,\n\t\tMap: make(map[string]*field, typ.NumField()),\n\t\tList: make([]*field, 0, typ.NumField()),\n\t}\n}\n\nfunc (fs *fields) Add(field *field) {\n\tfs.warnIfFieldExists(field.name)\n\tfs.Map[field.name] = field\n\tfs.List = append(fs.List, field)\n\tif field.omitEmpty {\n\t\tfs.hasOmitEmpty = true\n\t}\n}\n\nfunc (fs *fields) warnIfFieldExists(name string) {\n\tif _, ok := fs.Map[name]; ok {\n\t\tlog.Printf(\"msgpack: %s already has field=%s\", fs.Type, name)\n\t}\n}\n\nfunc (fs *fields) OmitEmpty(strct reflect.Value) []*field {\n\tif !fs.hasOmitEmpty {\n\t\treturn fs.List\n\t}\n\n\tfields := make([]*field, 0, len(fs.List))\n\n\tfor _, f := range fs.List {\n\t\tif !f.Omit(strct) {\n\t\t\tfields = append(fields, f)\n\t\t}\n\t}\n\n\treturn fields\n}\n\nfunc getFields(typ reflect.Type, useJSONTag bool) *fields {\n\tfs := newFields(typ)\n\n\tvar omitEmpty bool\n\tfor i := 0; i < typ.NumField(); i++ {\n\t\tf := typ.Field(i)\n\n\t\ttagStr := f.Tag.Get(\"msgpack\")\n\t\tif useJSONTag && tagStr == \"\" {\n\t\t\ttagStr = f.Tag.Get(\"json\")\n\t\t}\n\n\t\ttag := tagparser.Parse(tagStr)\n\t\tif tag.Name == \"-\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif f.Name == \"_msgpack\" {\n\t\t\tif tag.HasOption(\"asArray\") {\n\t\t\t\tfs.AsArray = true\n\t\t\t}\n\t\t\tif tag.HasOption(\"omitempty\") {\n\t\t\t\tomitEmpty = true\n\t\t\t}\n\t\t}\n\n\t\tif f.PkgPath != \"\" && !f.Anonymous {\n\t\t\tcontinue\n\t\t}\n\n\t\tfield := &field{\n\t\t\tname: tag.Name,\n\t\t\tindex: f.Index,\n\t\t\tomitEmpty: omitEmpty || tag.HasOption(\"omitempty\"),\n\t\t}\n\n\t\tif tag.HasOption(\"intern\") {\n\t\t\tswitch f.Type.Kind() {\n\t\t\tcase reflect.Interface:\n\t\t\t\tfield.encoder = encodeInternInterfaceValue\n\t\t\t\tfield.decoder = decodeInternInterfaceValue\n\t\t\tcase reflect.String:\n\t\t\t\tfield.encoder = encodeInternStringValue\n\t\t\t\tfield.decoder = decodeInternStringValue\n\t\t\tdefault:\n\t\t\t\terr := fmt.Errorf(\"msgpack: intern strings are not supported on %s\", f.Type)\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t} else {\n\t\t\tfield.encoder = getEncoder(f.Type)\n\t\t\tfield.decoder = getDecoder(f.Type)\n\t\t}\n\n\t\tif field.name == \"\" {\n\t\t\tfield.name = f.Name\n\t\t}\n\n\t\tif f.Anonymous && !tag.HasOption(\"noinline\") {\n\t\t\tinline := tag.HasOption(\"inline\")\n\t\t\tif inline {\n\t\t\t\tinlineFields(fs, f.Type, field, useJSONTag)\n\t\t\t} else {\n\t\t\t\tinline = shouldInline(fs, f.Type, field, useJSONTag)\n\t\t\t}\n\n\t\t\tif inline {\n\t\t\t\tif _, ok := fs.Map[field.name]; ok {\n\t\t\t\t\tlog.Printf(\"msgpack: %s already has field=%s\", fs.Type, field.name)\n\t\t\t\t}\n\t\t\t\tfs.Map[field.name] = field\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tfs.Add(field)\n\n\t\tif alias, ok := tag.Options[\"alias\"]; ok {\n\t\t\tfs.warnIfFieldExists(alias)\n\t\t\tfs.Map[alias] = field\n\t\t}\n\t}\n\treturn fs\n}\n\nvar (\n\tencodeStructValuePtr uintptr\n\tdecodeStructValuePtr uintptr\n)\n\n\/\/nolint:gochecknoinits\nfunc init() {\n\tencodeStructValuePtr = reflect.ValueOf(encodeStructValue).Pointer()\n\tdecodeStructValuePtr = reflect.ValueOf(decodeStructValue).Pointer()\n}\n\nfunc inlineFields(fs *fields, typ reflect.Type, f *field, useJSONTag bool) {\n\tinlinedFields := getFields(typ, useJSONTag).List\n\tfor _, field := range inlinedFields {\n\t\tif _, ok := fs.Map[field.name]; ok {\n\t\t\t\/\/ Don't inline shadowed fields.\n\t\t\tcontinue\n\t\t}\n\t\tfield.index = append(f.index, field.index...)\n\t\tfs.Add(field)\n\t}\n}\n\nfunc shouldInline(fs *fields, typ reflect.Type, f *field, useJSONTag bool) bool {\n\tvar encoder encoderFunc\n\tvar decoder decoderFunc\n\n\tif typ.Kind() == reflect.Struct {\n\t\tencoder = f.encoder\n\t\tdecoder = f.decoder\n\t} else {\n\t\tfor typ.Kind() == reflect.Ptr {\n\t\t\ttyp = typ.Elem()\n\t\t\tencoder = getEncoder(typ)\n\t\t\tdecoder = getDecoder(typ)\n\t\t}\n\t\tif typ.Kind() != reflect.Struct {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif reflect.ValueOf(encoder).Pointer() != encodeStructValuePtr {\n\t\treturn false\n\t}\n\tif reflect.ValueOf(decoder).Pointer() != decodeStructValuePtr {\n\t\treturn false\n\t}\n\n\tinlinedFields := getFields(typ, useJSONTag).List\n\tfor _, field := range inlinedFields {\n\t\tif _, ok := fs.Map[field.name]; ok {\n\t\t\t\/\/ Don't auto inline if there are shadowed fields.\n\t\t\treturn false\n\t\t}\n\t}\n\n\tfor _, field := range inlinedFields {\n\t\tfield.index = append(f.index, field.index...)\n\t\tfs.Add(field)\n\t}\n\treturn true\n}\n\nfunc isEmptyValue(v reflect.Value) bool {\n\tswitch v.Kind() {\n\tcase reflect.Array, reflect.Map, reflect.Slice, reflect.String:\n\t\treturn v.Len() == 0\n\tcase reflect.Bool:\n\t\treturn !v.Bool()\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn v.Int() == 0\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\treturn v.Uint() == 0\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn v.Float() == 0\n\tcase reflect.Interface, reflect.Ptr:\n\t\treturn v.IsNil()\n\t}\n\treturn false\n}\n\nfunc fieldByIndex(v reflect.Value, index []int) (_ reflect.Value, ok bool) {\n\tif len(index) == 1 {\n\t\treturn v.Field(index[0]), true\n\t}\n\n\tfor i, idx := range index {\n\t\tif i > 0 {\n\t\t\tif v.Kind() == reflect.Ptr {\n\t\t\t\tif v.IsNil() {\n\t\t\t\t\treturn v, false\n\t\t\t\t}\n\t\t\t\tv = v.Elem()\n\t\t\t}\n\t\t}\n\t\tv = v.Field(idx)\n\t}\n\n\treturn v, true\n}\n\nfunc fieldByIndexAlloc(v reflect.Value, index []int) reflect.Value {\n\tif len(index) == 1 {\n\t\treturn v.Field(index[0])\n\t}\n\n\tfor i, idx := range index {\n\t\tif i > 0 {\n\t\t\tvar ok bool\n\t\t\tv, ok = indirectNil(v)\n\t\t\tif !ok {\n\t\t\t\treturn v\n\t\t\t}\n\t\t}\n\t\tv = v.Field(idx)\n\t}\n\n\treturn v\n}\n\nfunc indirectNil(v reflect.Value) (reflect.Value, bool) {\n\tif v.Kind() == reflect.Ptr {\n\t\tif v.IsNil() {\n\t\t\tif !v.CanSet() {\n\t\t\t\treturn v, false\n\t\t\t}\n\t\t\telemType := v.Type().Elem()\n\t\t\tif elemType.Kind() != reflect.Struct {\n\t\t\t\treturn v, false\n\t\t\t}\n\t\t\tv.Set(reflect.New(elemType))\n\t\t}\n\t\tv = v.Elem()\n\t}\n\treturn v, true\n}\n<|endoftext|>"} {"text":"<commit_before>package application\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/olekukonko\/tablewriter\"\n\t\"github.com\/spf13\/cobra\"\n\t\"gopkg.in\/yaml.v2\"\n\n\t\"github.com\/ovh\/cds\/sdk\"\n)\n\nvar (\n\tcmdApplicationPipelineScheduler = &cobra.Command{\n\t\tUse: \"scheduler\",\n\t\tShort: \"cds application pipeline scheduler\",\n\t}\n\n\tcmdApplicationPipelineSchedulerList = &cobra.Command{\n\t\tUse: \"list\",\n\t\tShort: \"cds application pipeline scheduler list <projectKey> <applicationName> <pipelineName>\",\n\t\tRun: applicationPipelineSchedulerList,\n\t}\n\n\tcmdApplicationPipelineSchedulerAdd = &cobra.Command{\n\t\tUse: \"add\",\n\t\tShort: \"cds application pipeline scheduler add <projectKey> <applicationName> <pipelineName> <cron expression> [-e environment] [-p <pipelineParam>=<value>]\",\n\t\tRun: applicationPipelineSchedulerAdd,\n\t}\n\n\tcmdApplicationPipelineSchedulerAddEnv string\n\n\tcmdApplicationPipelineSchedulerUpdate = &cobra.Command{\n\t\tUse: \"update\",\n\t\tShort: \"cds application pipeline scheduler update <projectKey> <applicationName> <pipelineName> <ID> [-c <cron expression>] [-e environment] [-p <pipelineParam>=<value>] [--disable true|false]\",\n\t\tRun: applicationPipelineSchedulerUpdate,\n\t}\n\n\tcmdApplicationPipelineSchedulerUpdateCronExpr, cmdApplicationPipelineSchedulerUpdateDisable string\n\n\tcmdApplicationPipelineSchedulerDelete = &cobra.Command{\n\t\tUse: \"delete\",\n\t\tShort: \"cds application pipeline scheduler delete <projectKey> <applicationName> <pipelineName> <ID>\",\n\t\tRun: applicationPipelineSchedulerDelete,\n\t}\n)\n\nfunc applicationPipelineSchedulerList(cmd *cobra.Command, args []string) {\n\tif len(args) != 3 {\n\t\tsdk.Exit(\"Wrong usage: %s\\n\", cmd.Short)\n\t}\n\tprojectKey := args[0]\n\tappName := args[1]\n\tpipelineName := args[2]\n\n\tps, err := sdk.GetPipelineScheduler(projectKey, appName, pipelineName)\n\tif err != nil {\n\t\tsdk.Exit(\"Error: cannot list pipeline schedulers: (%s)\\n\", err)\n\t}\n\n\ttable := tablewriter.NewWriter(os.Stdout)\n\ttable.SetHeader([]string{\"ID\", \"Frequency\", \"Parameters\", \"Environment\", \"Enabled\", \"Last Execution\", \"Next Execution\"})\n\ttable.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})\n\ttable.SetCenterSeparator(\"|\")\n\n\tfor i := range ps {\n\t\targs, err := yaml.Marshal(ps[i].Args)\n\t\tif err != nil {\n\t\t\tsdk.Exit(\"Error: cannot format output (%s)\\n\", err)\n\t\t}\n\n\t\tvar last = \"never\"\n\t\tvar next = \"unknown\"\n\n\t\tif ps[i].LastExecution != nil {\n\t\t\tloc, err := time.LoadLocation(ps[i].Timezone)\n\t\t\tif err != nil {\n\t\t\t\tlast = fmt.Sprintf(\"%v\", ps[i].LastExecution.ExecutionDate)\n\t\t\t} else {\n\t\t\t\tt := ps[i].LastExecution.ExecutionDate.In(loc)\n\t\t\t\tlast = fmt.Sprintf(\"%v\", t)\n\t\t\t}\n\t\t}\n\n\t\tif ps[i].NextExecution != nil {\n\t\t\tloc, err := time.LoadLocation(ps[i].Timezone)\n\t\t\tif err != nil {\n\t\t\t\tnext = fmt.Sprintf(\"%v\", ps[i].NextExecution.ExecutionPlannedDate)\n\t\t\t} else {\n\t\t\t\tt := ps[i].NextExecution.ExecutionPlannedDate.In(loc)\n\t\t\t\tnext = fmt.Sprintf(\"%v\", t)\n\t\t\t}\n\t\t}\n\n\t\ttable.Append([]string{\n\t\t\tfmt.Sprintf(\"%d\", ps[i].ID),\n\t\t\tps[i].Crontab, string(args),\n\t\t\tps[i].EnvironmentName,\n\t\t\tfmt.Sprintf(\"%v\", !ps[i].Disabled),\n\t\t\tlast,\n\t\t\tnext,\n\t\t})\n\t}\n\ttable.Render()\n}\n\nfunc applicationPipelineSchedulerAdd(cmd *cobra.Command, args []string) {\n\tif len(args) != 4 {\n\t\tsdk.Exit(\"Wrong usage: %s\\n\", cmd.Short)\n\t}\n\tprojectKey := args[0]\n\tappName := args[1]\n\tpipelineName := args[2]\n\tcronExpr := args[3]\n\n\tvar params []sdk.Parameter\n\t\/\/ Parameters\n\tfor i := range cmdApplicationAddPipelineParams {\n\t\tp, err := sdk.NewStringParameter(cmdApplicationAddPipelineParams[i])\n\t\tif err != nil {\n\t\t\tsdk.Exit(\"Error: cannot parse parameter '%s' (%s)\\n\", cmdApplicationAddPipelineParams[i])\n\t\t}\n\t\tparams = append(params, p)\n\t}\n\n\tif _, err := sdk.AddPipelineScheduler(projectKey, appName, pipelineName, cronExpr, cmdApplicationPipelineSchedulerAddEnv, params); err != nil {\n\t\tsdk.Exit(\"Error: cannot add pipeline scheduler : %s\\n\", err)\n\t}\n\n\tfmt.Println(\"OK\")\n\n}\n\nfunc applicationPipelineSchedulerUpdate(cmd *cobra.Command, args []string) {\n\tif len(args) != 4 {\n\t\tsdk.Exit(\"Wrong usage: %s\\n\", cmd.Short)\n\t}\n\n\tprojectKey := args[0]\n\tappName := args[1]\n\tpipelineName := args[2]\n\tids := args[3]\n\n\tid, err := strconv.ParseInt(ids, 10, 64)\n\tif err != nil {\n\t\tsdk.Exit(\"Error: invalid parameter ID : %s\", err)\n\t}\n\n\tvar params []sdk.Parameter\n\t\/\/ Parameters\n\tfor i := range cmdApplicationAddPipelineParams {\n\t\tp, err := sdk.NewStringParameter(cmdApplicationAddPipelineParams[i])\n\t\tif err != nil {\n\t\t\tsdk.Exit(\"Error: cannot parse parameter '%s' (%s)\\n\", cmdApplicationAddPipelineParams[i])\n\t\t}\n\t\tparams = append(params, p)\n\t}\n\n\tps, err := sdk.GetPipelineScheduler(projectKey, appName, pipelineName)\n\tif err != nil {\n\t\tsdk.Exit(\"Error: Unable to list pipeline schedulers: %s\", err)\n\t}\n\n\tvar s *sdk.PipelineScheduler\n\tfor i := range ps {\n\t\tif ps[i].ID == id {\n\t\t\ts = &ps[i]\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif s == nil {\n\t\tsdk.Exit(\"Error: Unable to find pipeline scheduler with id %d\", id)\n\t}\n\n\tif cmdApplicationPipelineSchedulerUpdateCronExpr != \"\" {\n\t\ts.Crontab = cmdApplicationPipelineSchedulerUpdateCronExpr\n\t}\n\n\tif cmdApplicationPipelineSchedulerUpdateDisable == \"true\" {\n\t\ts.Disabled = true\n\t} else if cmdApplicationPipelineSchedulerUpdateDisable == \"false\" {\n\t\ts.Disabled = false\n\t}\n\n\tif len(params) > 0 {\n\t\ts.Args = params\n\t}\n\n\tif cmdApplicationPipelineSchedulerAddEnv != \"\" {\n\t\ts.EnvironmentName = cmdApplicationPipelineSchedulerAddEnv\n\t}\n\n\tif _, err := sdk.UpdatePipelineScheduler(projectKey, appName, pipelineName, s); err != nil {\n\t\tsdk.Exit(\"Error: Unable to update pipeline scheduler with id %d\", id)\n\t}\n\n\tfmt.Println(\"OK\")\n}\n\nfunc applicationPipelineSchedulerDelete(cmd *cobra.Command, args []string) {\n\tif len(args) != 4 {\n\t\tsdk.Exit(\"Wrong usage: %s\\n\", cmd.Short)\n\t}\n\n\tprojectKey := args[0]\n\tappName := args[1]\n\tpipelineName := args[2]\n\tids := args[3]\n\n\tid, err := strconv.ParseInt(ids, 10, 64)\n\tif err != nil {\n\t\tsdk.Exit(\"Error: invalid parameter ID : %s\", err)\n\t}\n\n\tps, err := sdk.GetPipelineScheduler(projectKey, appName, pipelineName)\n\tif err != nil {\n\t\tsdk.Exit(\"Error: Unable to list pipeline schedulers: %s\", err)\n\t}\n\n\tvar s *sdk.PipelineScheduler\n\tfor i := range ps {\n\t\tif ps[i].ID == id {\n\t\t\ts = &ps[i]\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif s == nil {\n\t\tsdk.Exit(\"Error: Unable to find pipeline scheduler with id %d\", id)\n\t}\n\n\tif err := sdk.DeletePipelineScheduler(projectKey, appName, pipelineName, s); err != nil {\n\t\tsdk.Exit(\"Error: Unable to delete pipeline scheduler with id %d\", id)\n\t}\n\n\tfmt.Println(\"OK\")\n}\n<commit_msg>fix (cli): pipeline scheduler params (#824)<commit_after>package application\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/olekukonko\/tablewriter\"\n\t\"github.com\/spf13\/cobra\"\n\t\"gopkg.in\/yaml.v2\"\n\n\t\"github.com\/ovh\/cds\/sdk\"\n)\n\nvar (\n\tcmdApplicationPipelineScheduler = &cobra.Command{\n\t\tUse: \"scheduler\",\n\t\tShort: \"cds application pipeline scheduler\",\n\t}\n\n\tcmdApplicationPipelineSchedulerList = &cobra.Command{\n\t\tUse: \"list\",\n\t\tShort: \"cds application pipeline scheduler list <projectKey> <applicationName> <pipelineName>\",\n\t\tRun: applicationPipelineSchedulerList,\n\t}\n\n\tcmdApplicationPipelineSchedulerAdd = &cobra.Command{\n\t\tUse: \"add\",\n\t\tShort: \"cds application pipeline scheduler add <projectKey> <applicationName> <pipelineName> <cron expression> [-e environment] [-p <param>=<value>]\",\n\t\tRun: applicationPipelineSchedulerAdd,\n\t}\n\n\tcmdApplicationPipelineSchedulerAddEnv string\n\n\tcmdApplicationPipelineSchedulerUpdate = &cobra.Command{\n\t\tUse: \"update\",\n\t\tShort: \"cds application pipeline scheduler update <projectKey> <applicationName> <pipelineName> <ID> [-c <cron expression>] [-e environment] [-p <pipelineParam>=<value>] [--disable true|false]\",\n\t\tRun: applicationPipelineSchedulerUpdate,\n\t}\n\n\tcmdApplicationPipelineSchedulerUpdateCronExpr, cmdApplicationPipelineSchedulerUpdateDisable string\n\n\tcmdApplicationPipelineSchedulerDelete = &cobra.Command{\n\t\tUse: \"delete\",\n\t\tShort: \"cds application pipeline scheduler delete <projectKey> <applicationName> <pipelineName> <ID>\",\n\t\tRun: applicationPipelineSchedulerDelete,\n\t}\n)\n\nfunc applicationPipelineSchedulerList(cmd *cobra.Command, args []string) {\n\tif len(args) != 3 {\n\t\tsdk.Exit(\"Wrong usage: %s\\n\", cmd.Short)\n\t}\n\tprojectKey := args[0]\n\tappName := args[1]\n\tpipelineName := args[2]\n\n\tps, err := sdk.GetPipelineScheduler(projectKey, appName, pipelineName)\n\tif err != nil {\n\t\tsdk.Exit(\"Error: cannot list pipeline schedulers: (%s)\\n\", err)\n\t}\n\n\ttable := tablewriter.NewWriter(os.Stdout)\n\ttable.SetHeader([]string{\"ID\", \"Frequency\", \"Parameters\", \"Environment\", \"Enabled\", \"Last Execution\", \"Next Execution\"})\n\ttable.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})\n\ttable.SetCenterSeparator(\"|\")\n\n\tfor i := range ps {\n\t\targs, err := yaml.Marshal(ps[i].Args)\n\t\tif err != nil {\n\t\t\tsdk.Exit(\"Error: cannot format output (%s)\\n\", err)\n\t\t}\n\n\t\tvar last = \"never\"\n\t\tvar next = \"unknown\"\n\n\t\tif ps[i].LastExecution != nil {\n\t\t\tloc, err := time.LoadLocation(ps[i].Timezone)\n\t\t\tif err != nil {\n\t\t\t\tlast = fmt.Sprintf(\"%v\", ps[i].LastExecution.ExecutionDate)\n\t\t\t} else {\n\t\t\t\tt := ps[i].LastExecution.ExecutionDate.In(loc)\n\t\t\t\tlast = fmt.Sprintf(\"%v\", t)\n\t\t\t}\n\t\t}\n\n\t\tif ps[i].NextExecution != nil {\n\t\t\tloc, err := time.LoadLocation(ps[i].Timezone)\n\t\t\tif err != nil {\n\t\t\t\tnext = fmt.Sprintf(\"%v\", ps[i].NextExecution.ExecutionPlannedDate)\n\t\t\t} else {\n\t\t\t\tt := ps[i].NextExecution.ExecutionPlannedDate.In(loc)\n\t\t\t\tnext = fmt.Sprintf(\"%v\", t)\n\t\t\t}\n\t\t}\n\n\t\ttable.Append([]string{\n\t\t\tfmt.Sprintf(\"%d\", ps[i].ID),\n\t\t\tps[i].Crontab, string(args),\n\t\t\tps[i].EnvironmentName,\n\t\t\tfmt.Sprintf(\"%v\", !ps[i].Disabled),\n\t\t\tlast,\n\t\t\tnext,\n\t\t})\n\t}\n\ttable.Render()\n}\n\nfunc applicationPipelineSchedulerAdd(cmd *cobra.Command, args []string) {\n\tif len(args) != 4 {\n\t\tsdk.Exit(\"Wrong usage: %s\\n\", cmd.Short)\n\t}\n\tprojectKey := args[0]\n\tappName := args[1]\n\tpipelineName := args[2]\n\tcronExpr := args[3]\n\n\tvar params []sdk.Parameter\n\t\/\/ Parameters\n\tfor i := range cmdApplicationAddPipelineParams {\n\t\tp, err := sdk.NewStringParameter(cmdApplicationAddPipelineParams[i])\n\t\tif err != nil {\n\t\t\tsdk.Exit(\"Error: cannot parse parameter '%s' (%s)\\n\", cmdApplicationAddPipelineParams[i])\n\t\t}\n\t\tparams = append(params, p)\n\t}\n\n\tif _, err := sdk.AddPipelineScheduler(projectKey, appName, pipelineName, cronExpr, cmdApplicationPipelineSchedulerAddEnv, params); err != nil {\n\t\tsdk.Exit(\"Error: cannot add pipeline scheduler : %s\\n\", err)\n\t}\n\n\tfmt.Println(\"OK\")\n\n}\n\nfunc applicationPipelineSchedulerUpdate(cmd *cobra.Command, args []string) {\n\tif len(args) != 4 {\n\t\tsdk.Exit(\"Wrong usage: %s\\n\", cmd.Short)\n\t}\n\n\tprojectKey := args[0]\n\tappName := args[1]\n\tpipelineName := args[2]\n\tids := args[3]\n\n\tid, err := strconv.ParseInt(ids, 10, 64)\n\tif err != nil {\n\t\tsdk.Exit(\"Error: invalid parameter ID : %s\", err)\n\t}\n\n\tvar params []sdk.Parameter\n\t\/\/ Parameters\n\tfor i := range cmdApplicationAddPipelineParams {\n\t\tp, err := sdk.NewStringParameter(cmdApplicationAddPipelineParams[i])\n\t\tif err != nil {\n\t\t\tsdk.Exit(\"Error: cannot parse parameter '%s' (%s)\\n\", cmdApplicationAddPipelineParams[i])\n\t\t}\n\t\tparams = append(params, p)\n\t}\n\n\tps, err := sdk.GetPipelineScheduler(projectKey, appName, pipelineName)\n\tif err != nil {\n\t\tsdk.Exit(\"Error: Unable to list pipeline schedulers: %s\", err)\n\t}\n\n\tvar s *sdk.PipelineScheduler\n\tfor i := range ps {\n\t\tif ps[i].ID == id {\n\t\t\ts = &ps[i]\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif s == nil {\n\t\tsdk.Exit(\"Error: Unable to find pipeline scheduler with id %d\", id)\n\t}\n\n\tif cmdApplicationPipelineSchedulerUpdateCronExpr != \"\" {\n\t\ts.Crontab = cmdApplicationPipelineSchedulerUpdateCronExpr\n\t}\n\n\tif cmdApplicationPipelineSchedulerUpdateDisable == \"true\" {\n\t\ts.Disabled = true\n\t} else if cmdApplicationPipelineSchedulerUpdateDisable == \"false\" {\n\t\ts.Disabled = false\n\t}\n\n\tif len(params) > 0 {\n\t\ts.Args = params\n\t}\n\n\tif cmdApplicationPipelineSchedulerAddEnv != \"\" {\n\t\ts.EnvironmentName = cmdApplicationPipelineSchedulerAddEnv\n\t}\n\n\tif _, err := sdk.UpdatePipelineScheduler(projectKey, appName, pipelineName, s); err != nil {\n\t\tsdk.Exit(\"Error: Unable to update pipeline scheduler with id %d\", id)\n\t}\n\n\tfmt.Println(\"OK\")\n}\n\nfunc applicationPipelineSchedulerDelete(cmd *cobra.Command, args []string) {\n\tif len(args) != 4 {\n\t\tsdk.Exit(\"Wrong usage: %s\\n\", cmd.Short)\n\t}\n\n\tprojectKey := args[0]\n\tappName := args[1]\n\tpipelineName := args[2]\n\tids := args[3]\n\n\tid, err := strconv.ParseInt(ids, 10, 64)\n\tif err != nil {\n\t\tsdk.Exit(\"Error: invalid parameter ID : %s\", err)\n\t}\n\n\tps, err := sdk.GetPipelineScheduler(projectKey, appName, pipelineName)\n\tif err != nil {\n\t\tsdk.Exit(\"Error: Unable to list pipeline schedulers: %s\", err)\n\t}\n\n\tvar s *sdk.PipelineScheduler\n\tfor i := range ps {\n\t\tif ps[i].ID == id {\n\t\t\ts = &ps[i]\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif s == nil {\n\t\tsdk.Exit(\"Error: Unable to find pipeline scheduler with id %d\", id)\n\t}\n\n\tif err := sdk.DeletePipelineScheduler(projectKey, appName, pipelineName, s); err != nil {\n\t\tsdk.Exit(\"Error: Unable to delete pipeline scheduler with id %d\", id)\n\t}\n\n\tfmt.Println(\"OK\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package connector is used to specify how to connect to MySQL.\n\/\/ Then get a sql.*DB from it which is returned to the app.\npackage connector\n\nimport (\n\t\"database\/sql\"\n\t\"log\"\n\n\t\"github.com\/sjmudd\/mysql_defaults_file\"\n\t\"github.com\/sjmudd\/ps-top\/mylog\"\n)\n\n\/\/ ConnectMethod indicates how we want to connect to MySQL\ntype ConnectMethod int\n\nconst (\n\tdb = \"performance_schema\" \/\/ database to connect to\n\tmaxOpenConns = 5 \/\/ maximum number of connections the go driver should keep open. Hard-coded value!\n\tsqlDriver = \"mysql\" \/\/ name of the go-sql-driver to use\n\t\/\/ ConnectByDefaultsFile indicates we want to connect using a MySQL defaults file\n\tConnectByDefaultsFile ConnectMethod = iota\n\t\/\/ ConnectByConfig indicates we want to connect by various components (fields)\n\tConnectByConfig\n\t\/\/ ConnectByEnvironment indicates we want to connect by using MYSQL_DSN environment variable\n\tConnectByEnvironment\n)\n\n\/\/ Connector contains information on how to connect to MySQL\ntype Connector struct {\n\tmethod ConnectMethod\n\tconfig mysql_defaults_file.Config\n\tDB *sql.DB\n}\n\n\/\/ DefaultsFile returns the defaults file\nfunc (c Connector) DefaultsFile() string {\n\treturn c.config.Filename\n}\n\n\/\/ SetDefaultsFile specifies the defaults file to use\nfunc (c *Connector) SetDefaultsFile(defaultsFile string) {\n\tc.config.Filename = defaultsFile\n}\n\n\/\/ SetConfig sets the config using the provided configuration.\nfunc (c *Connector) SetConfig(config mysql_defaults_file.Config) {\n\tc.config = config\n}\n\n\/\/ SetConnectBy records how we want to connect\nfunc (c *Connector) SetConnectBy(method ConnectMethod) {\n\tc.method = method\n}\n\n\/\/ Connect makes a connection to the database using the previously defined settings\nfunc (c *Connector) Connect() {\n\tvar err error\n\n\tswitch {\n\tcase c.method == ConnectByConfig:\n\t\tlog.Println(\"ConnectByConfig() Connecting...\")\n\t\tc.DB, err = sql.Open(sqlDriver, mysql_defaults_file.BuildDSN(c.config, db))\n\n\tcase c.method == ConnectByDefaultsFile:\n\t\tlog.Println(\"ConnectByDefaults_file() Connecting...\")\n\t\tc.DB, err = mysql_defaults_file.Open(c.config.Filename, db)\n\n\tcase c.method == ConnectByEnvironment:\n\t\t\/*********************************************************************************\n\t\t * WARNING This functionality may be removed. WARNING *\n\t\t * *\n\t\t * Although I have implemented this it may not be good\/safe to actually use it. *\n\t\t * See: http:\/\/dev.mysql.com\/doc\/refman\/5.6\/en\/password-security-user.html *\n\t\t * Store your password in the MYSQL_PWD environment variable. See Section *\n\t\t * 2.12, “Environment Variables”. *\n\t\t *********************************************************************************\/\n\t\tlog.Println(\"ConnectByEnvironment() Connecting...\")\n\t\tc.DB, err = mysql_defaults_file.OpenUsingEnvironment(sqlDriver)\n\n\tdefault:\n\t\tmylog.Fatal(\"Connector.Connect() c.method not ConnectByDefaultsFile\/ConnectByConfig\/ConnectByEnvironment\")\n\t}\n\n\t\/\/ we catch Open...() errors here\n\tif err != nil {\n\t\tmylog.Fatal(err)\n\t}\n\n\t\/\/ without calling Ping() we don't actually connect.\n\tif err = c.DB.Ping(); err != nil {\n\t\tmylog.Fatal(err)\n\t}\n\n\t\/\/ Deliberately limit the pool size to 5 to avoid \"problems\" if any queries hang.\n\tc.DB.SetMaxOpenConns(maxOpenConns)\n}\n\n\/\/ ConnectByConfig connects to MySQL using various configuration settings\n\/\/ needed to create the DSN.\nfunc (c *Connector) ConnectByConfig(config mysql_defaults_file.Config) {\n\tc.SetConfig(config)\n\tc.SetConnectBy(ConnectByConfig)\n\tc.Connect()\n}\n\n\/\/ ConnectByDefaultsFile connects to the database with the given\n\/\/ defaults-file, or ~\/.my.cnf if not provided.\nfunc (c *Connector) ConnectByDefaultsFile(defaultsFile string) {\n\tc.SetDefaultsFile(defaultsFile)\n\tc.SetConnectBy(ConnectByDefaultsFile)\n\tc.Connect()\n}\n\n\/\/ ConnectByEnvironment connects using environment variables\nfunc (c *Connector) ConnectByEnvironment() {\n\tc.SetConnectBy(ConnectByEnvironment)\n\tc.Connect()\n}\n<commit_msg>remove some unneeded Connector methods.<commit_after>\/\/ Package connector is used to specify how to connect to MySQL.\n\/\/ Then get a sql.*DB from it which is returned to the app.\npackage connector\n\nimport (\n\t\"database\/sql\"\n\t\"log\"\n\n\t\"github.com\/sjmudd\/mysql_defaults_file\"\n\t\"github.com\/sjmudd\/ps-top\/mylog\"\n)\n\n\/\/ ConnectMethod indicates how we want to connect to MySQL\ntype ConnectMethod int\n\nconst (\n\tdb = \"performance_schema\" \/\/ database to connect to\n\tmaxOpenConns = 5 \/\/ maximum number of connections the go driver should keep open. Hard-coded value!\n\tsqlDriver = \"mysql\" \/\/ name of the go-sql-driver to use\n\t\/\/ ConnectByDefaultsFile indicates we want to connect using a MySQL defaults file\n\tConnectByDefaultsFile ConnectMethod = iota\n\t\/\/ ConnectByConfig indicates we want to connect by various components (fields)\n\tConnectByConfig\n\t\/\/ ConnectByEnvironment indicates we want to connect by using MYSQL_DSN environment variable\n\tConnectByEnvironment\n)\n\n\/\/ Connector contains information on how to connect to MySQL\ntype Connector struct {\n\tmethod ConnectMethod\n\tconfig mysql_defaults_file.Config\n\tDB *sql.DB\n}\n\n\/\/ DefaultsFile returns the defaults file\nfunc (c Connector) DefaultsFile() string {\n\treturn c.config.Filename\n}\n\n\/\/ SetConnectBy records how we want to connect\nfunc (c *Connector) SetConnectBy(method ConnectMethod) {\n\tc.method = method\n}\n\n\/\/ Connect makes a connection to the database using the previously defined settings\nfunc (c *Connector) Connect() {\n\tvar err error\n\n\tswitch {\n\tcase c.method == ConnectByConfig:\n\t\tlog.Println(\"ConnectByConfig() Connecting...\")\n\t\tc.DB, err = sql.Open(sqlDriver, mysql_defaults_file.BuildDSN(c.config, db))\n\n\tcase c.method == ConnectByDefaultsFile:\n\t\tlog.Println(\"ConnectByDefaults_file() Connecting...\")\n\t\tc.DB, err = mysql_defaults_file.Open(c.config.Filename, db)\n\n\tcase c.method == ConnectByEnvironment:\n\t\t\/*********************************************************************************\n\t\t * WARNING This functionality may be removed. WARNING *\n\t\t * *\n\t\t * Although I have implemented this it may not be good\/safe to actually use it. *\n\t\t * See: http:\/\/dev.mysql.com\/doc\/refman\/5.6\/en\/password-security-user.html *\n\t\t * Store your password in the MYSQL_PWD environment variable. See Section *\n\t\t * 2.12, “Environment Variables”. *\n\t\t *********************************************************************************\/\n\t\tlog.Println(\"ConnectByEnvironment() Connecting...\")\n\t\tc.DB, err = mysql_defaults_file.OpenUsingEnvironment(sqlDriver)\n\n\tdefault:\n\t\tmylog.Fatal(\"Connector.Connect() c.method not ConnectByDefaultsFile\/ConnectByConfig\/ConnectByEnvironment\")\n\t}\n\n\t\/\/ we catch Open...() errors here\n\tif err != nil {\n\t\tmylog.Fatal(err)\n\t}\n\n\t\/\/ without calling Ping() we don't actually connect.\n\tif err = c.DB.Ping(); err != nil {\n\t\tmylog.Fatal(err)\n\t}\n\n\t\/\/ Deliberately limit the pool size to 5 to avoid \"problems\" if any queries hang.\n\tc.DB.SetMaxOpenConns(maxOpenConns)\n}\n\n\/\/ ConnectByConfig connects to MySQL using various configuration settings\n\/\/ needed to create the DSN.\nfunc (c *Connector) ConnectByConfig(config mysql_defaults_file.Config) {\n\tc.config = config\n\tc.SetConnectBy(ConnectByConfig)\n\tc.Connect()\n}\n\n\/\/ ConnectByDefaultsFile connects to the database with the given\n\/\/ defaults-file, or ~\/.my.cnf if not provided.\nfunc (c *Connector) ConnectByDefaultsFile(defaultsFile string) {\n\tc.config = mysql_defaults_file.NewConfig(defaultsFile)\n\tc.SetConnectBy(ConnectByDefaultsFile)\n\tc.Connect()\n}\n\n\/\/ ConnectByEnvironment connects using environment variables\nfunc (c *Connector) ConnectByEnvironment() {\n\tc.SetConnectBy(ConnectByEnvironment)\n\tc.Connect()\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>公众号网页授权后获取用户基本信息支持语言入参 #344 (#345)<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>Easy away of avoid log computation<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 ThoughtWorks, Inc.\n\n\/\/ This file is part of Gauge.\n\n\/\/ Gauge is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\n\/\/ Gauge is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU General Public License for more details.\n\n\/\/ You should have received a copy of the GNU General Public License\n\/\/ along with Gauge. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/getgauge\/common\"\n\t\"github.com\/getgauge\/gauge\/config\"\n\t\"github.com\/op\/go-logging\"\n\t\"gopkg.in\/natefinch\/lumberjack.v2\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nconst LOGS_DIRECTORY = \"logs_directory\"\n\nvar log = logging.MustGetLogger(\"gauge\")\nvar apiLog = logging.MustGetLogger(\"gauge-api\")\n\nvar gaugeLogFile = filepath.Join(\"logs\", \"gauge.log\")\nvar apiLogFile = filepath.Join(\"logs\", \"api.log\")\n\nvar format = logging.MustStringFormatter(\n\t\"%{time:15:04:05.000} [%{level:.4s}] %{message}\",\n)\n\nfunc initLoggers() {\n\tinitGaugeLogger()\n\tinitApiLogger()\n}\n\nfunc initGaugeLogger() {\n\tstdOutLogger := logging.NewLogBackend(os.Stdout, \"\", 0)\n\tlogsDir := os.Getenv(LOGS_DIRECTORY)\n\tvar gaugeFileLogger logging.Backend\n\tif logsDir == \"\" {\n\t\tgaugeFileLogger = createFileLogger(gaugeLogFile, 20)\n\t} else {\n\t\tgaugeFileLogger = createFileLogger(logsDir+fmt.Sprintf(\"%c\", filepath.Separator)+\"gauge.log\", 20)\n\t}\n\tstdOutFormatter := logging.NewBackendFormatter(stdOutLogger, format)\n\tfileFormatter := logging.NewBackendFormatter(gaugeFileLogger, format)\n\n\tstdOutLoggerLeveled := logging.AddModuleLevel(stdOutFormatter)\n\tstdOutLoggerLeveled.SetLevel(loggingLevel(), \"\")\n\n\tfileLoggerLeveled := logging.AddModuleLevel(fileFormatter)\n\tfileLoggerLeveled.SetLevel(logging.DEBUG, \"\")\n\n\tlogging.SetBackend(fileLoggerLeveled, stdOutLoggerLeveled)\n}\n\nfunc initApiLogger() {\n\tlogsDir, err := filepath.Abs(os.Getenv(LOGS_DIRECTORY))\n\tvar apiFileLogger logging.Backend\n\tif logsDir == \"\" || err != nil {\n\t\tapiFileLogger = createFileLogger(apiLogFile, 10)\n\t} else {\n\t\tapiFileLogger = createFileLogger(logsDir+fmt.Sprintf(\"%c\", filepath.Separator)+\"api.log\", 10)\n\t}\n\n\tfileFormatter := logging.NewBackendFormatter(apiFileLogger, format)\n\tfileLoggerLeveled := logging.AddModuleLevel(fileFormatter)\n\tfileLoggerLeveled.SetLevel(loggingLevel(), \"\")\n\tapiLog.SetBackend(fileLoggerLeveled)\n}\n\nfunc createFileLogger(name string, size int) logging.Backend {\n\treturn logging.NewLogBackend(&lumberjack.Logger{\n\t\tFilename: getLogFile(name),\n\t\tMaxSize: size, \/\/ megabytes\n\t\tMaxBackups: 3,\n\t\tMaxAge: 28, \/\/days\n\t}, \"\", 0)\n}\n\nfunc getLogFile(fileName string) string {\n\tif config.ProjectRoot != \"\" {\n\t\treturn filepath.Join(config.ProjectRoot, fileName)\n\t} else {\n\t\tgaugeHome, err := common.GetGaugeHomeDirectory()\n\t\tif err != nil {\n\t\t\treturn fileName\n\t\t}\n\t\treturn filepath.Join(gaugeHome, fileName)\n\t}\n}\n\nfunc loggingLevel() logging.Level {\n\tif *verbosity {\n\t\treturn logging.DEBUG\n\t}\n\tif *logLevel != \"\" {\n\t\tswitch strings.ToLower(*logLevel) {\n\t\tcase \"debug\":\n\t\t\treturn logging.DEBUG\n\t\tcase \"info\":\n\t\t\treturn logging.INFO\n\t\tcase \"warning\":\n\t\t\treturn logging.WARNING\n\t\tcase \"error\":\n\t\t\treturn logging.ERROR\n\t\tcase \"critical\":\n\t\t\treturn logging.CRITICAL\n\t\tcase \"notice\":\n\t\t\treturn logging.NOTICE\n\t\t}\n\t}\n\treturn logging.INFO\n\n}\n<commit_msg>Fixing nested log directory creation. Fixes #123<commit_after>\/\/ Copyright 2015 ThoughtWorks, Inc.\n\n\/\/ This file is part of Gauge.\n\n\/\/ Gauge is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\n\/\/ Gauge is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU General Public License for more details.\n\n\/\/ You should have received a copy of the GNU General Public License\n\/\/ along with Gauge. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage main\n\nimport (\n\t\"github.com\/getgauge\/common\"\n\t\"github.com\/getgauge\/gauge\/config\"\n\t\"github.com\/op\/go-logging\"\n\t\"gopkg.in\/natefinch\/lumberjack.v2\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nconst (\n\tLOGS_DIRECTORY = \"logs_directory\"\n\tlogs = \"logs\"\n\tgaugeLogFileName = \"gauge.log\"\n\tapiLogFileName = \"api.log\"\n)\n\nvar log = logging.MustGetLogger(\"gauge\")\nvar apiLog = logging.MustGetLogger(\"gauge-api\")\n\nvar gaugeLogFile = filepath.Join(logs, gaugeLogFileName)\nvar apiLogFile = filepath.Join(logs, apiLogFileName)\n\nvar format = logging.MustStringFormatter(\n\t\"%{time:15:04:05.000} [%{level:.4s}] %{message}\",\n)\n\nfunc initLoggers() {\n\tinitGaugeLogger()\n\tinitApiLogger()\n}\n\nfunc initGaugeLogger() {\n\tstdOutLogger := logging.NewLogBackend(os.Stdout, \"\", 0)\n\tlogsDir := os.Getenv(LOGS_DIRECTORY)\n\tvar gaugeFileLogger logging.Backend\n\tif logsDir == \"\" {\n\t\tgaugeFileLogger = createFileLogger(gaugeLogFile, 20)\n\t} else {\n\t\tgaugeFileLogger = createFileLogger(filepath.Join(logsDir, gaugeLogFileName), 20)\n\t}\n\tstdOutFormatter := logging.NewBackendFormatter(stdOutLogger, format)\n\tfileFormatter := logging.NewBackendFormatter(gaugeFileLogger, format)\n\n\tstdOutLoggerLeveled := logging.AddModuleLevel(stdOutFormatter)\n\tstdOutLoggerLeveled.SetLevel(loggingLevel(), \"\")\n\n\tfileLoggerLeveled := logging.AddModuleLevel(fileFormatter)\n\tfileLoggerLeveled.SetLevel(logging.DEBUG, \"\")\n\n\tlogging.SetBackend(fileLoggerLeveled, stdOutLoggerLeveled)\n}\n\nfunc initApiLogger() {\n\tlogsDir, err := filepath.Abs(os.Getenv(LOGS_DIRECTORY))\n\tvar apiFileLogger logging.Backend\n\tif logsDir == \"\" || err != nil {\n\t\tapiFileLogger = createFileLogger(apiLogFile, 10)\n\t} else {\n\t\tapiFileLogger = createFileLogger(filepath.Join(logsDir, apiLogFileName), 10)\n\t}\n\n\tfileFormatter := logging.NewBackendFormatter(apiFileLogger, format)\n\tfileLoggerLeveled := logging.AddModuleLevel(fileFormatter)\n\tfileLoggerLeveled.SetLevel(loggingLevel(), \"\")\n\tapiLog.SetBackend(fileLoggerLeveled)\n}\n\nfunc createFileLogger(name string, size int) logging.Backend {\n\tif !filepath.IsAbs(name) {\n\t\tname = getLogFile(name)\n\t}\n\treturn logging.NewLogBackend(&lumberjack.Logger{\n\t\tFilename: name,\n\t\tMaxSize: size, \/\/ megabytes\n\t\tMaxBackups: 3,\n\t\tMaxAge: 28, \/\/days\n\t}, \"\", 0)\n}\n\nfunc getLogFile(fileName string) string {\n\tif config.ProjectRoot != \"\" {\n\t\treturn filepath.Join(config.ProjectRoot, fileName)\n\t} else {\n\t\tgaugeHome, err := common.GetGaugeHomeDirectory()\n\t\tif err != nil {\n\t\t\treturn fileName\n\t\t}\n\t\treturn filepath.Join(gaugeHome, fileName)\n\t}\n}\n\nfunc loggingLevel() logging.Level {\n\tif *verbosity {\n\t\treturn logging.DEBUG\n\t}\n\tif *logLevel != \"\" {\n\t\tswitch strings.ToLower(*logLevel) {\n\t\tcase \"debug\":\n\t\t\treturn logging.DEBUG\n\t\tcase \"info\":\n\t\t\treturn logging.INFO\n\t\tcase \"warning\":\n\t\t\treturn logging.WARNING\n\t\tcase \"error\":\n\t\t\treturn logging.ERROR\n\t\tcase \"critical\":\n\t\t\treturn logging.CRITICAL\n\t\tcase \"notice\":\n\t\t\treturn logging.NOTICE\n\t\t}\n\t}\n\treturn logging.INFO\n\n}\n<|endoftext|>"} {"text":"<commit_before>package ratelimit\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestRateLimit(t *testing.T) {\n\tif os.Getenv(\"REDIS_URL\") == \"\" {\n\t\tt.Skip(\"skipping redis test since there is no REDIS_URL\")\n\t}\n\n\tif time.Now().Minute() > 58 {\n\t\tt.Log(\"Note: The TestRateLimit test is known to have a bug if run near the top of the hour. Since the rate limiter isn't a moving window, it could end up checking against two different buckets on either side of the top of the hour, so if you see that just re-run it after you've passed the top of the hour.\")\n\t}\n\n\trateLimiter := NewRateLimiter(os.Getenv(\"REDIS_URL\"), fmt.Sprintf(\"worker-test-rl-%d\", os.Getpid()))\n\n\tok, err := rateLimiter.RateLimit(context.TODO(), \"slow\", 2, time.Hour)\n\tif err != nil {\n\t\tt.Fatalf(\"rate limiter error: %v\", err)\n\t}\n\tif !ok {\n\t\tt.Fatal(\"expected to not get rate limited, but was limited\")\n\t}\n\n\tok, err = rateLimiter.RateLimit(context.TODO(), \"slow\", 2, time.Hour)\n\tif err != nil {\n\t\tt.Fatalf(\"rate limiter error: %v\", err)\n\t}\n\tif !ok {\n\t\tt.Fatal(\"expected to not get rate limited, but was limited\")\n\t}\n\n\tok, err = rateLimiter.RateLimit(context.TODO(), \"slow\", 2, time.Hour)\n\tif err != nil {\n\t\tt.Fatalf(\"rate limiter error: %v\", err)\n\t}\n\tif ok {\n\t\tt.Fatal(\"expected to get rate limited, but was not limited\")\n\t}\n}\n<commit_msg>fix ratelimit_test.go<commit_after>package ratelimit\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestRateLimit(t *testing.T) {\n\tif os.Getenv(\"REDIS_URL\") == \"\" {\n\t\tt.Skip(\"skipping redis test since there is no REDIS_URL\")\n\t}\n\n\tif time.Now().Minute() > 58 {\n\t\tt.Log(\"Note: The TestRateLimit test is known to have a bug if run near the top of the hour. Since the rate limiter isn't a moving window, it could end up checking against two different buckets on either side of the top of the hour, so if you see that just re-run it after you've passed the top of the hour.\")\n\t}\n\n\trateLimiter := NewRateLimiter(os.Getenv(\"REDIS_URL\"), fmt.Sprintf(\"worker-test-rl-%d\", os.Getpid()))\n\n\tok, err := rateLimiter.RateLimit(context.TODO(), \"slow\", 2, time.Hour, false)\n\tif err != nil {\n\t\tt.Fatalf(\"rate limiter error: %v\", err)\n\t}\n\tif !ok {\n\t\tt.Fatal(\"expected to not get rate limited, but was limited\")\n\t}\n\n\tok, err = rateLimiter.RateLimit(context.TODO(), \"slow\", 2, time.Hour, false)\n\tif err != nil {\n\t\tt.Fatalf(\"rate limiter error: %v\", err)\n\t}\n\tif !ok {\n\t\tt.Fatal(\"expected to not get rate limited, but was limited\")\n\t}\n\n\tok, err = rateLimiter.RateLimit(context.TODO(), \"slow\", 2, time.Hour, false)\n\tif err != nil {\n\t\tt.Fatalf(\"rate limiter error: %v\", err)\n\t}\n\tif ok {\n\t\tt.Fatal(\"expected to get rate limited, but was not limited\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>updated to get byte representation for empty map<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>package header<commit_after><|endoftext|>"} {"text":"<commit_before>package ui\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\n\t\"github.com\/dorzheh\/deployer\/config\"\n\tgui \"github.com\/dorzheh\/deployer\/ui\/dialog_ui\"\n\t\"github.com\/dorzheh\/infra\/utils\"\n)\n\nfunc UiHostName(ui *gui.DialogUi) (hostname string) {\n\tfor {\n\t\tui.SetSize(8, 30)\n\t\tui.SetLabel(\"New hostname: \")\n\t\thostname = ui.Inputbox(\"\")\n\t\tif err := utils.SetHostname(hostname); err != nil {\n\t\t\tui.Output(gui.Warning, err.Error()+\".Press <OK> to proceed\", 8, 12)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\treturn\n}\n\n\/\/ setImageLocation method sets location for the VA image\nfunc UiImagePath(ui *gui.DialogUi, defaultLocation string) (location string) {\n\tfor {\n\t\tui.SetSize(6, 64)\n\t\tui.Msgbox(\"Choose VA image location.\\n\\tPress <Ok> to proceed\")\n\t\tlocation = ui.Dselect(defaultLocation)\n\t\tif _, err := os.Stat(location); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\treturn\n}\n\nfunc UiRemoteMode(ui *gui.DialogUi) bool {\n\tui.SetLabel(\"Deployment Mode:\")\n\tif answer := ui.Menu(2, \"1\", \"Local\", \"2\", \"Remote\"); answer == \"1\" {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc UiRemoteParams(ui *gui.DialogUi) (string, string, string, string) {\n\tip := ui.GetIpFromInput(\"Remote server IP:\")\n\tuser := ui.GetFromInput(ip+\" superuser:\", \"root\")\n\tvar passwd string\n\tvar keyFile string\n\tanswer := ui.Menu(2, \"1\", \"Password\", \"2\", \"Private key\")\n\tif answer == \"1\" {\n\t\tpasswd = ui.GetPasswordFromInput(ip, user)\n\t} else {\n\t\tfor {\n\t\t\tui.SetSize(6, 64)\n\t\t\tui.Msgbox(\"Path to ssh private key.\\n\\tPress <Ok> to proceed\")\n\t\t\tkeyFile = ui.Fselect(\"\")\n\t\t\tif _, err := os.Stat(keyFile); err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\treturn ip, user, passwd, keyFile\n}\n\nfunc UiNetworks(ui *gui.DialogUi, networks []string, info map[int]*config.NicInfo) (map[string]*config.NicInfo, error) {\n\tnewMap := make(map[string]*config.NicInfo)\n\tvar temp []string\n\tfor _, n := range info {\n\t\ttemp = append(temp, fmt.Sprintf(\"%s (driver type %s, %s)\", n.Name, n.Driver, n.Desc))\n\t}\n\n\tsliceLength := len(temp)\n\tfor _, net := range networks {\n\t\tvar ifaceNumStr string\n\t\tfor {\n\t\t\tui.SetSize(10+sliceLength, 55)\n\t\t\tui.SetLabel(fmt.Sprintf(\"Choose appropriate interface for \\\"%s\\\" network:\", net))\n\t\t\tifaceNumStr = ui.Menu(sliceLength, temp[0:]...)\n\t\t\tif ifaceNumStr != \"\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tifaceNumInt, err := strconv.Atoi(ifaceNumStr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tnewMap[net] = info[ifaceNumInt]\n\t}\n\treturn newMap, nil\n}\n<commit_msg>Adding UiConfirmation<commit_after>package ui\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\n\t\"github.com\/dorzheh\/deployer\/config\"\n\tgui \"github.com\/dorzheh\/deployer\/ui\/dialog_ui\"\n\t\"github.com\/dorzheh\/infra\/utils\"\n)\n\nfunc UiHostName(ui *gui.DialogUi) (hostname string) {\n\tfor {\n\t\tui.SetSize(8, 30)\n\t\tui.SetLabel(\"New hostname: \")\n\t\thostname = ui.Inputbox(\"\")\n\t\tif err := utils.SetHostname(hostname); err != nil {\n\t\t\tui.Output(gui.Warning, err.Error()+\".Press <OK> to proceed\", 8, 12)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\treturn\n}\n\n\/\/ setImageLocation method sets location for the VA image\nfunc UiImagePath(ui *gui.DialogUi, defaultLocation string) (location string) {\n\tfor {\n\t\tui.SetSize(6, 64)\n\t\tui.Msgbox(\"Choose VA image location.\\n\\tPress <Ok> to proceed\")\n\t\tlocation = ui.Dselect(defaultLocation)\n\t\tif _, err := os.Stat(location); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\treturn\n}\n\nfunc UiRemoteMode(ui *gui.DialogUi) bool {\n\tui.SetLabel(\"Deployment Mode:\")\n\tif answer := ui.Menu(2, \"1\", \"Local\", \"2\", \"Remote\"); answer == \"1\" {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc UiRemoteParams(ui *gui.DialogUi) (string, string, string, string) {\n\tip := ui.GetIpFromInput(\"Remote server IP:\")\n\tuser := ui.GetFromInput(ip+\" superuser:\", \"root\")\n\tvar passwd string\n\tvar keyFile string\n\tanswer := ui.Menu(2, \"1\", \"Password\", \"2\", \"Private key\")\n\tif answer == \"1\" {\n\t\tpasswd = ui.GetPasswordFromInput(ip, user)\n\t} else {\n\t\tfor {\n\t\t\tui.SetSize(6, 64)\n\t\t\tui.Msgbox(\"Path to ssh private key.\\n\\tPress <Ok> to proceed\")\n\t\t\tkeyFile = ui.Fselect(\"\")\n\t\t\tif _, err := os.Stat(keyFile); err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\treturn ip, user, passwd, keyFile\n}\n\nfunc UiNetworks(ui *gui.DialogUi, networks []string, info map[int]*config.NicInfo) (map[string]*config.NicInfo, error) {\n\tnewMap := make(map[string]*config.NicInfo)\n\tvar temp []string\n\tfor _, n := range info {\n\t\ttemp = append(temp, fmt.Sprintf(\"%s (driver type %s, %s)\", n.Name, n.Driver, n.Desc))\n\t}\n\n\tsliceLength := len(temp)\n\tfor _, net := range networks {\n\t\tvar ifaceNumStr string\n\t\tfor {\n\t\t\tui.SetSize(10+sliceLength, 55)\n\t\t\tui.SetLabel(fmt.Sprintf(\"Choose appropriate interface for \\\"%s\\\" network:\", net))\n\t\t\tifaceNumStr = ui.Menu(sliceLength, temp[0:]...)\n\t\t\tif ifaceNumStr != \"\" {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tifaceNumInt, err := strconv.Atoi(ifaceNumStr)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tnewMap[net] = info[ifaceNumInt]\n\t}\n\treturn newMap, nil\n}\n\nfunc UiConfirmation(ui *gui.DialogUi, buf *bytes.Buffer, height int) {\n\tbuf.WriteString(\"\\n\\nPress <OK> to proceed or <CTRL+C> to exit\")\n\tui.SetSize(height, 100)\n\tui.Msgbox(buf.String())\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Extend cursor helper func.<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>removed comments<commit_after><|endoftext|>"} {"text":"<commit_before>package hostdb\n\nimport (\n\t\"time\"\n\n\t\"github.com\/NebulousLabs\/Sia\/build\"\n)\n\nconst (\n\t\/\/ maxHostDowntime specifies the maximum amount of time that a host is\n\t\/\/ allowed to be offline while still being in the hostdb.\n\tmaxHostDowntime = 30 * 24 * time.Hour\n\n\t\/\/ minScans specifies the number of scans that a host should have before the\n\t\/\/ scans start getting compressed.\n\tminScans = 20\n\n\t\/\/ maxSettingsLen indicates how long in bytes the host settings field is\n\t\/\/ allowed to be before being ignored as a DoS attempt.\n\tmaxSettingsLen = 10e3\n\n\t\/\/ hostRequestTimeout indicates how long a host has to respond to a dial.\n\thostRequestTimeout = 2 * time.Minute\n\n\t\/\/ hostScanDeadline indicates how long a host has to complete an entire\n\t\/\/ scan.\n\thostScanDeadline = 4 * time.Minute\n\n\t\/\/ saveFrequency defines how frequently the hostdb will save to disk. Hostdb\n\t\/\/ will also save immediately prior to shutdown.\n\tsaveFrequency = 2 * time.Minute\n)\n\nvar (\n\t\/\/ hostCheckupQuantity specifies the number of hosts that get scanned every\n\t\/\/ time there is a regular scanning operation.\n\thostCheckupQuantity = build.Select(build.Var{\n\t\tStandard: int(200),\n\t\tDev: int(6),\n\t\tTesting: int(5),\n\t}).(int)\n\n\t\/\/ scanningThreads is the number of threads that will be probing hosts for\n\t\/\/ their settings and checking for reliability.\n\tscanningThreads = build.Select(build.Var{\n\t\tStandard: int(40),\n\t\tDev: int(4),\n\t\tTesting: int(3),\n\t}).(int)\n)\n\nvar (\n\t\/\/ defaultScanSleep is the amount of time that the hostdb will sleep if it\n\t\/\/ cannot successfully get a random number.\n\tdefaultScanSleep = build.Select(build.Var{\n\t\tStandard: time.Hour + time.Minute*37,\n\t\tDev: time.Minute * 5,\n\t\tTesting: time.Second * 15,\n\t}).(time.Duration)\n\n\t\/\/ maxScanSleep is the maximum amount of time that the hostdb will sleep\n\t\/\/ between performing scans of the hosts.\n\tmaxScanSleep = build.Select(build.Var{\n\t\tStandard: time.Hour * 4,\n\t\tDev: time.Minute * 10,\n\t\tTesting: time.Second * 15,\n\t}).(time.Duration)\n\n\t\/\/ minScanSleep is the minimum amount of time that the hostdb will sleep\n\t\/\/ between performing scans of the hosts.\n\tminScanSleep = build.Select(build.Var{\n\t\tStandard: time.Hour + time.Minute*20,\n\t\tDev: time.Minute * 3,\n\t\tTesting: time.Second * 14,\n\t}).(time.Duration)\n)\n<commit_msg>tweak some of the scanning constants in the host<commit_after>package hostdb\n\nimport (\n\t\"time\"\n\n\t\"github.com\/NebulousLabs\/Sia\/build\"\n)\n\nconst (\n\t\/\/ maxHostDowntime specifies the maximum amount of time that a host is\n\t\/\/ allowed to be offline while still being in the hostdb.\n\tmaxHostDowntime = 10 * 24 * time.Hour\n\n\t\/\/ minScans specifies the number of scans that a host should have before the\n\t\/\/ scans start getting compressed.\n\tminScans = 12\n\n\t\/\/ maxSettingsLen indicates how long in bytes the host settings field is\n\t\/\/ allowed to be before being ignored as a DoS attempt.\n\tmaxSettingsLen = 10e3\n\n\t\/\/ hostRequestTimeout indicates how long a host has to respond to a dial.\n\thostRequestTimeout = 2 * time.Minute\n\n\t\/\/ hostScanDeadline indicates how long a host has to complete an entire\n\t\/\/ scan.\n\thostScanDeadline = 4 * time.Minute\n\n\t\/\/ saveFrequency defines how frequently the hostdb will save to disk. Hostdb\n\t\/\/ will also save immediately prior to shutdown.\n\tsaveFrequency = 2 * time.Minute\n)\n\nvar (\n\t\/\/ hostCheckupQuantity specifies the number of hosts that get scanned every\n\t\/\/ time there is a regular scanning operation.\n\thostCheckupQuantity = build.Select(build.Var{\n\t\tStandard: int(200),\n\t\tDev: int(6),\n\t\tTesting: int(5),\n\t}).(int)\n\n\t\/\/ scanningThreads is the number of threads that will be probing hosts for\n\t\/\/ their settings and checking for reliability.\n\tscanningThreads = build.Select(build.Var{\n\t\tStandard: int(20),\n\t\tDev: int(4),\n\t\tTesting: int(3),\n\t}).(int)\n)\n\nvar (\n\t\/\/ defaultScanSleep is the amount of time that the hostdb will sleep if it\n\t\/\/ cannot successfully get a random number.\n\tdefaultScanSleep = build.Select(build.Var{\n\t\tStandard: time.Hour + time.Minute*37,\n\t\tDev: time.Minute * 5,\n\t\tTesting: time.Second * 15,\n\t}).(time.Duration)\n\n\t\/\/ maxScanSleep is the maximum amount of time that the hostdb will sleep\n\t\/\/ between performing scans of the hosts.\n\tmaxScanSleep = build.Select(build.Var{\n\t\tStandard: time.Hour * 8,\n\t\tDev: time.Minute * 10,\n\t\tTesting: time.Second * 15,\n\t}).(time.Duration)\n\n\t\/\/ minScanSleep is the minimum amount of time that the hostdb will sleep\n\t\/\/ between performing scans of the hosts.\n\tminScanSleep = build.Select(build.Var{\n\t\tStandard: time.Hour + time.Minute*20,\n\t\tDev: time.Minute * 3,\n\t\tTesting: time.Second * 14,\n\t}).(time.Duration)\n)\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"container\/list\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/brnstz\/routine\/wikimg\"\n)\n\nvar (\n\t\/\/ Print an HTML div with the hex background\n\tfmtSpec = `<div style=\"background: %s; width=100%%\"> <\/div>`\n\n\tcache = newColorCache(50000)\n)\n\n\/\/ colorCache is a cache of recent URLs to imgResponse values. It expires older\n\/\/ URLs once it contains the maximum number of values.\ntype colorCache struct {\n\thmap map[string]imgResponse\n\tcount int\n\tmax int\n\tmutex sync.RWMutex\n\texp *list.List\n}\n\n\/\/ newColorCache creates colorCache that holds max items.\nfunc newColorCache(max int) *colorCache {\n\treturn &colorCache{\n\t\thmap: map[string]imgResponse{},\n\t\tcount: 0,\n\t\tmax: max,\n\t\tmutex: sync.RWMutex{},\n\t\texp: list.New(),\n\t}\n}\n\nfunc (cc *colorCache) Add(url string, resp imgResponse) {\n\t\/\/ Lock the cache while we're adding\n\tcc.mutex.Lock()\n\n\tif cc.count >= cc.max {\n\t\t\/\/ If we've exceeded the max size, we must remove the oldest\n\t\t\/\/ element\n\n\t\t\/\/ Find the last element\n\t\tback := cc.exp.Back()\n\n\t\t\/\/ Remove it from the cache\n\t\tdelete(cc.hmap, back.Value.(string))\n\n\t\t\/\/ Also remove it from the exp list\n\t\tcc.exp.Remove(back)\n\t} else {\n\n\t\t\/\/ Otherwise, we didn't remove anything so increment count\n\t\tcc.count++\n\t}\n\n\t\/\/ Add new url to be last to expire\n\tcc.exp.PushFront(url)\n\n\t\/\/ Save its value\n\tcc.hmap[url] = resp\n\n\t\/\/ Done locking\n\tcc.mutex.Unlock()\n}\n\nfunc (cc *colorCache) Get(url string) (imgResponse, bool) {\n\tcc.mutex.RLock()\n\n\tresp, ok := cc.hmap[url]\n\n\tcc.mutex.RUnlock()\n\n\treturn resp, ok\n}\n\n\/\/ imgRequest is a request to get the first color from a URL\ntype imgRequest struct {\n\tp *wikimg.Puller\n\turl string\n\tresponses chan imgResponse\n}\n\n\/\/ imgResponse contains the result of processing an imgRequest\ntype imgResponse struct {\n\thex string\n\terr error\n}\n\n\/\/ worker takes imgRequests on the in channel, processes them and sends\n\/\/ an imgResponse back on the request's channel\nfunc worker(in chan *imgRequest) {\n\tfor req := range in {\n\t\tvar resp imgResponse\n\n\t\t\/\/ Check cache first\n\t\tresp, ok := cache.Get(req.url)\n\n\t\tif !ok {\n\t\t\t\/\/ It wasn't in the cache, so actually get it\n\t\t\t_, resp.hex, resp.err = req.p.FirstColor(req.url)\n\n\t\t\tcache.Add(req.url, resp)\n\t\t}\n\n\t\t\/\/ Send it back on our response channel\n\t\treq.responses <- resp\n\t}\n}\n\nfunc main() {\n\tvar max, workers, buffer, port int\n\n\tflag.IntVar(&max, \"max\", 100, \"maximum number of images per request\")\n\tflag.IntVar(&workers, \"workers\", 25, \"number of background workers\")\n\tflag.IntVar(&buffer, \"buffer\", 10000, \"size of buffered channels\")\n\tflag.IntVar(&port, \"port\", 8000, \"HTTP port to listen on\")\n\tflag.Parse()\n\n\t\/\/ Create a buffered channel for communicating between image\n\t\/\/ puller loop and workers\n\timgReqs := make(chan *imgRequest, buffer)\n\n\t\/\/ Create workers\n\tfor i := 0; i < workers; i++ {\n\t\tgo worker(imgReqs)\n\t}\n\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\t\/\/ Create a new image puller with our max\n\t\tp := wikimg.NewPuller(max)\n\n\t\t\/\/ Create a context with a 20 second timeout\n\t\tctx, _ := context.WithTimeout(context.Background(), time.Second*20)\n\n\t\t\/\/ Set puller's Cancel channel, so it will be closed when the\n\t\t\/\/ context times out\n\t\tp.Cancel = ctx.Done()\n\n\t\t\/\/ Create a channel for receiving responses specific\n\t\t\/\/ to this HTTP request\n\t\tresponses := make(chan imgResponse, max)\n\n\t\t\/\/ Assert our writer to a flusher, so we can stream line by line\n\t\tf, ok := w.(http.Flusher)\n\t\tif !ok {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Loop to retrieve more images\n\t\tfor {\n\t\t\timgURL, err := p.Next()\n\n\t\t\tif err == wikimg.EndOfResults {\n\t\t\t\t\/\/ Break from loop when end of results is reached\n\t\t\t\tbreak\n\n\t\t\t} else if err != nil {\n\t\t\t\t\/\/ Send error on the response channel and continue\n\t\t\t\tresponses <- imgResponse{err: err}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Create request and send on the global channel\n\t\t\timgReqs <- &imgRequest{\n\t\t\t\tp: p,\n\t\t\t\turl: imgURL,\n\t\t\t\tresponses: responses,\n\t\t\t}\n\t\t}\n\n\t\tfor i := 0; i < max; i++ {\n\t\t\t\/\/ Read a response from the channel\n\t\t\tresp := <-responses\n\n\t\t\t\/\/ If there's an error, just log it on the server\n\t\t\tif resp.err != nil {\n\t\t\t\tlog.Println(resp.err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Write a line of color\n\t\t\tfmt.Fprintf(w, fmtSpec, resp.hex)\n\t\t\tfmt.Fprintln(w)\n\t\t\tf.Flush()\n\t\t}\n\t})\n\n\thttp.ListenAndServe(fmt.Sprintf(\":%d\", port), nil)\n}\n<commit_msg>comment updates<commit_after>package main\n\nimport (\n\t\"container\/list\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/brnstz\/routine\/wikimg\"\n)\n\nvar (\n\t\/\/ Print an HTML div with the hex background\n\tfmtSpec = `<div style=\"background: %s; width=100%%\"> <\/div>`\n\n\t\/\/ cache is our global cache of urls to imgResponse values\n\tcache = newColorCache(50000)\n)\n\n\/\/ colorCache is a cache of recent URLs to imgResponse values. It expires older\n\/\/ URLs once it contains the maximum number of values.\ntype colorCache struct {\n\thmap map[string]imgResponse\n\tcount int\n\tmax int\n\tmutex sync.RWMutex\n\texp *list.List\n}\n\n\/\/ newColorCache creates colorCache that holds max items.\nfunc newColorCache(max int) *colorCache {\n\treturn &colorCache{\n\t\thmap: map[string]imgResponse{},\n\t\tcount: 0,\n\t\tmax: max,\n\t\tmutex: sync.RWMutex{},\n\t\texp: list.New(),\n\t}\n}\n\nfunc (cc *colorCache) Add(url string, resp imgResponse) {\n\t\/\/ Lock the cache while we're adding\n\tcc.mutex.Lock()\n\n\tif cc.count >= cc.max {\n\t\t\/\/ If we've exceeded the max size, we must remove the oldest\n\t\t\/\/ element\n\n\t\t\/\/ Find the last element\n\t\tback := cc.exp.Back()\n\n\t\t\/\/ Remove it from the cache\n\t\tdelete(cc.hmap, back.Value.(string))\n\n\t\t\/\/ Also remove it from the exp list\n\t\tcc.exp.Remove(back)\n\t} else {\n\n\t\t\/\/ Otherwise, we didn't remove anything so increment count\n\t\tcc.count++\n\t}\n\n\t\/\/ Add new url to be last to expire\n\tcc.exp.PushFront(url)\n\n\t\/\/ Save its value\n\tcc.hmap[url] = resp\n\n\t\/\/ Done locking\n\tcc.mutex.Unlock()\n}\n\nfunc (cc *colorCache) Get(url string) (imgResponse, bool) {\n\tcc.mutex.RLock()\n\n\t\/\/ Get it within read lock\n\tresp, ok := cc.hmap[url]\n\n\tcc.mutex.RUnlock()\n\n\treturn resp, ok\n}\n\n\/\/ imgRequest is a request to get the first color from a URL\ntype imgRequest struct {\n\tp *wikimg.Puller\n\turl string\n\tresponses chan imgResponse\n}\n\n\/\/ imgResponse contains the result of processing an imgRequest\ntype imgResponse struct {\n\thex string\n\terr error\n}\n\n\/\/ worker takes imgRequests on the in channel, processes them and sends\n\/\/ an imgResponse back on the request's channel\nfunc worker(in chan *imgRequest) {\n\tfor req := range in {\n\t\tvar resp imgResponse\n\n\t\t\/\/ Check cache first\n\t\tresp, ok := cache.Get(req.url)\n\n\t\tif !ok {\n\n\t\t\t\/\/ It wasn't in the cache, so actually get it and add it\n\t\t\t_, resp.hex, resp.err = req.p.FirstColor(req.url)\n\t\t\tcache.Add(req.url, resp)\n\t\t}\n\n\t\t\/\/ Send it back on our response channel\n\t\treq.responses <- resp\n\t}\n}\n\nfunc main() {\n\tvar max, workers, buffer, port int\n\n\tflag.IntVar(&max, \"max\", 100, \"maximum number of images per request\")\n\tflag.IntVar(&workers, \"workers\", 25, \"number of background workers\")\n\tflag.IntVar(&buffer, \"buffer\", 10000, \"size of buffered channels\")\n\tflag.IntVar(&port, \"port\", 8000, \"HTTP port to listen on\")\n\tflag.Parse()\n\n\t\/\/ Create a buffered channel for communicating between image\n\t\/\/ puller loop and workers\n\timgReqs := make(chan *imgRequest, buffer)\n\n\t\/\/ Create workers\n\tfor i := 0; i < workers; i++ {\n\t\tgo worker(imgReqs)\n\t}\n\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\t\/\/ Create a new image puller with our max\n\t\tp := wikimg.NewPuller(max)\n\n\t\t\/\/ Create a context with a 20 second timeout\n\t\tctx, _ := context.WithTimeout(context.Background(), time.Second*20)\n\n\t\t\/\/ Set puller's Cancel channel, so it will be closed when the\n\t\t\/\/ context times out\n\t\tp.Cancel = ctx.Done()\n\n\t\t\/\/ Create a channel for receiving responses specific\n\t\t\/\/ to this HTTP request\n\t\tresponses := make(chan imgResponse, max)\n\n\t\t\/\/ Assert our writer to a flusher, so we can stream line by line\n\t\tf, ok := w.(http.Flusher)\n\t\tif !ok {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Loop to retrieve more images\n\t\tfor {\n\t\t\timgURL, err := p.Next()\n\n\t\t\tif err == wikimg.EndOfResults {\n\t\t\t\t\/\/ Break from loop when end of results is reached\n\t\t\t\tbreak\n\n\t\t\t} else if err != nil {\n\t\t\t\t\/\/ Send error on the response channel and continue\n\t\t\t\tresponses <- imgResponse{err: err}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Create request and send on the global channel\n\t\t\timgReqs <- &imgRequest{\n\t\t\t\tp: p,\n\t\t\t\turl: imgURL,\n\t\t\t\tresponses: responses,\n\t\t\t}\n\t\t}\n\n\t\tfor i := 0; i < max; i++ {\n\t\t\t\/\/ Read a response from the channel\n\t\t\tresp := <-responses\n\n\t\t\t\/\/ If there's an error, just log it on the server\n\t\t\tif resp.err != nil {\n\t\t\t\tlog.Println(resp.err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Write a line of color\n\t\t\tfmt.Fprintf(w, fmtSpec, resp.hex)\n\t\t\tfmt.Fprintln(w)\n\t\t\tf.Flush()\n\t\t}\n\t})\n\n\thttp.ListenAndServe(fmt.Sprintf(\":%d\", port), nil)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Code generated by zanzibar\n\/\/ @generated\n\npackage googlenow\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\n\t\"github.com\/uber\/zanzibar\/examples\/example-gateway\/build\/clients\"\n\tzanzibar \"github.com\/uber\/zanzibar\/runtime\"\n\t\"go.uber.org\/zap\"\n\n\t\"github.com\/uber\/zanzibar\/examples\/example-gateway\/build\/clients\/googlenow\"\n)\n\n\/\/ HandleAddCredentialsRequest handles \"\/googlenow\/add-credentials\".\nfunc HandleAddCredentialsRequest(\n\tctx context.Context,\n\treq *zanzibar.ServerHTTPRequest,\n\tres *zanzibar.ServerHTTPResponse,\n\tclients *clients.Clients,\n) {\n\tvar requestBody AddCredentialsHTTPRequest\n\tif ok := req.ReadAndUnmarshalBody(&requestBody); !ok {\n\t\treturn\n\t}\n\n\tworkflow := AddCredentialsEndpoint{\n\t\tClients: clients,\n\t\tLogger: req.Logger,\n\t\tRequest: req,\n\t}\n\n\trespHeaders, err := workflow.Handle(ctx, req.Header, &requestBody)\n\tif err != nil {\n\t\treq.Logger.Warn(\"Workflow for endpoint returned error\",\n\t\t\tzap.String(\"error\", err.Error()),\n\t\t)\n\t\tres.SendErrorString(500, \"Unexpected server error\")\n\t\treturn\n\t}\n\n\tres.WriteJSONBytes(202, respHeaders, nil)\n}\n\n\/\/ AddCredentialsEndpoint calls thrift client GoogleNow.AddCredentials\ntype AddCredentialsEndpoint struct {\n\tClients *clients.Clients\n\tLogger *zap.Logger\n\tRequest *zanzibar.ServerHTTPRequest\n}\n\n\/\/ Handle calls thrift client.\nfunc (w AddCredentialsEndpoint) Handle(\n\tctx context.Context,\n\t\/\/ TODO(sindelar): Switch to zanzibar.Headers when tchannel\n\t\/\/ generation is implemented.\n\theaders http.Header,\n\tr *AddCredentialsHTTPRequest,\n) (map[string]string, error) {\n\tclientRequest := convertToAddCredentialsClientRequest(r)\n\n\tclientHeaders := map[string]string{}\n\tfor k, v := range map[string]string{\"x-uuid\": \"x-uuid\", \"x-token\": \"x-token\"} {\n\t\tclientHeaders[v] = headers.Get(k)\n\t}\n\n\trespHeaders, err := w.Clients.GoogleNow.AddCredentials(\n\t\tctx, clientHeaders, clientRequest,\n\t)\n\tif err != nil {\n\t\tw.Logger.Warn(\"Could not make client request\",\n\t\t\tzap.String(\"error\", err.Error()),\n\t\t)\n\t\treturn nil, err\n\t}\n\n\t\/\/ Filter and map response headers from client to server response.\n\tendRespHead := map[string]string{}\n\tfor k, v := range map[string]string{\"x-token\": \"x-token\", \"x-uuid\": \"x-uuid\"} {\n\t\tendRespHead[v] = respHeaders[k]\n\t}\n\n\treturn endRespHead, nil\n}\n\nfunc convertToAddCredentialsClientRequest(body *AddCredentialsHTTPRequest) *googlenowClient.AddCredentialsHTTPRequest {\n\tclientRequest := &googlenowClient.AddCredentialsHTTPRequest{}\n\n\tclientRequest.AuthCode = string(body.AuthCode)\n\n\treturn clientRequest\n}\n<commit_msg>add one more gen file<commit_after>\/\/ Code generated by zanzibar\n\/\/ @generated\n\npackage googlenow\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\n\t\"github.com\/uber\/zanzibar\/examples\/example-gateway\/build\/clients\"\n\tzanzibar \"github.com\/uber\/zanzibar\/runtime\"\n\t\"go.uber.org\/zap\"\n\n\t\"github.com\/uber\/zanzibar\/examples\/example-gateway\/build\/clients\/googlenow\"\n)\n\n\/\/ HandleAddCredentialsRequest handles \"\/googlenow\/add-credentials\".\nfunc HandleAddCredentialsRequest(\n\tctx context.Context,\n\treq *zanzibar.ServerHTTPRequest,\n\tres *zanzibar.ServerHTTPResponse,\n\tclients *clients.Clients,\n) {\n\tvar requestBody AddCredentialsHTTPRequest\n\tif ok := req.ReadAndUnmarshalBody(&requestBody); !ok {\n\t\treturn\n\t}\n\n\tworkflow := AddCredentialsEndpoint{\n\t\tClients: clients,\n\t\tLogger: req.Logger,\n\t\tRequest: req,\n\t}\n\n\trespHeaders, err := workflow.Handle(ctx, req.Header, &requestBody)\n\tif err != nil {\n\t\treq.Logger.Warn(\"Workflow for endpoint returned error\",\n\t\t\tzap.String(\"error\", err.Error()),\n\t\t)\n\t\tres.SendErrorString(500, \"Unexpected server error\")\n\t\treturn\n\t}\n\n\tres.WriteJSONBytes(202, respHeaders, nil)\n}\n\n\/\/ AddCredentialsEndpoint calls thrift client GoogleNow.AddCredentials\ntype AddCredentialsEndpoint struct {\n\tClients *clients.Clients\n\tLogger *zap.Logger\n\tRequest *zanzibar.ServerHTTPRequest\n}\n\n\/\/ Handle calls thrift client.\nfunc (w AddCredentialsEndpoint) Handle(\n\tctx context.Context,\n\t\/\/ TODO(sindelar): Switch to zanzibar.Headers when tchannel\n\t\/\/ generation is implemented.\n\theaders http.Header,\n\tr *AddCredentialsHTTPRequest,\n) (map[string]string, error) {\n\tclientRequest := convertToAddCredentialsClientRequest(r)\n\n\tclientHeaders := map[string]string{}\n\tfor k, v := range map[string]string{\"x-token\": \"x-token\", \"x-uuid\": \"x-uuid\"} {\n\t\tclientHeaders[v] = headers.Get(k)\n\t}\n\n\trespHeaders, err := w.Clients.GoogleNow.AddCredentials(\n\t\tctx, clientHeaders, clientRequest,\n\t)\n\tif err != nil {\n\t\tw.Logger.Warn(\"Could not make client request\",\n\t\t\tzap.String(\"error\", err.Error()),\n\t\t)\n\t\treturn nil, err\n\t}\n\n\t\/\/ Filter and map response headers from client to server response.\n\tendRespHead := map[string]string{}\n\tfor k, v := range map[string]string{\"x-uuid\": \"x-uuid\", \"x-token\": \"x-token\"} {\n\t\tendRespHead[v] = respHeaders[k]\n\t}\n\n\treturn endRespHead, nil\n}\n\nfunc convertToAddCredentialsClientRequest(body *AddCredentialsHTTPRequest) *googlenowClient.AddCredentialsHTTPRequest {\n\tclientRequest := &googlenowClient.AddCredentialsHTTPRequest{}\n\n\tclientRequest.AuthCode = string(body.AuthCode)\n\n\treturn clientRequest\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Better ordering of methods<commit_after><|endoftext|>"} {"text":"<commit_before>package slack\n\nimport (\n\t\"errors\"\n\t\"net\/url\"\n)\n\n\/\/ UserProfile contains all the information details of a given user\ntype UserProfile struct {\n\tFirstName string `json:\"first_name\"`\n\tLastName string `json:\"last_name\"`\n\tRealName string `json:\"real_name\"`\n\tRealNameNormalized string `json:\"real_name_normalized\"`\n\tEmail string `json:\"email\"`\n\tSkype string `json:\"skype\"`\n\tPhone string `json:\"phone\"`\n\tImage24 string `json:\"image_24\"`\n\tImage32 string `json:\"image_32\"`\n\tImage48 string `json:\"image_48\"`\n\tImage72 string `json:\"image_72\"`\n\tImage192 string `json:\"image_192\"`\n\tImageOriginal string `json:\"image_original\"`\n\tTitle string `json:\"title\"`\n}\n\n\/\/ User contains all the information of a user\ntype User struct {\n\tId string `json:\"id\"`\n\tName string `json:\"name\"`\n\tDeleted bool `json:\"deleted\"`\n\tColor string `json:\"color\"`\n\tProfile UserProfile `json:\"profile\"`\n\tIsBot bool `json:\"is_bot\"`\n\tIsAdmin bool `json:\"is_admin\"`\n\tIsOwner bool `json:\"is_owner\"`\n\tIsPrimaryOwner bool `json:\"is_primary_owner\"`\n\tIsRestricted bool `json:\"is_restricted\"`\n\tIsUltraRestricted bool `json:\"is_ultra_restricted\"`\n\tHasFiles bool `json:\"has_files\"`\n\tPresence string `json:\"presence\"`\n}\n\n\/\/ UserPresence contains details about a user online status\ntype UserPresence struct {\n\tPresence string `json:\"presence,omitempty\"`\n\tOnline bool `json:\"online,omitempty\"`\n\tAutoAway bool `json:\"auto_away,omitempty\"`\n\tManualAway bool `json:\"manual_away,omitempty\"`\n\tConnectionCount int `json:\"connection_count,omitempty\"`\n\tLastActivity JSONTime `json:\"last_activity,omitempty\"`\n}\n\ntype userResponseFull struct {\n\tMembers []User `json:\"members,omitempty\"` \/\/ ListUsers\n\tUser `json:\"user,omitempty\"` \/\/ GetUserInfo\n\tUserPresence \/\/ GetUserPresence\n\tSlackResponse\n}\n\nfunc userRequest(path string, values url.Values, debug bool) (*userResponseFull, error) {\n\tresponse := &userResponseFull{}\n\terr := parseResponse(path, values, response, debug)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !response.Ok {\n\t\treturn nil, errors.New(response.Error)\n\t}\n\treturn response, nil\n}\n\n\/\/ GetUserPresence will retrieve the current presence status of given user.\nfunc (api *Slack) GetUserPresence(userId string) (*UserPresence, error) {\n\tvalues := url.Values{\n\t\t\"token\": {api.config.token},\n\t\t\"user\": {userId},\n\t}\n\tresponse, err := userRequest(\"users.getPresence\", values, api.debug)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &response.UserPresence, nil\n}\n\n\/\/ GetUserInfo will retrive the complete user information\nfunc (api *Slack) GetUserInfo(userId string) (*User, error) {\n\tvalues := url.Values{\n\t\t\"token\": {api.config.token},\n\t\t\"user\": {userId},\n\t}\n\tresponse, err := userRequest(\"users.info\", values, api.debug)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &response.User, nil\n}\n\n\/\/ GetUsers returns the list of users (with their detailed information)\nfunc (api *Slack) GetUsers() ([]User, error) {\n\tvalues := url.Values{\n\t\t\"token\": {api.config.token},\n\t}\n\tresponse, err := userRequest(\"users.list\", values, api.debug)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn response.Members, nil\n}\n\n\/\/ SetUserAsActive marks the currently authenticated user as active\nfunc (api *Slack) SetUserAsActive() error {\n\tvalues := url.Values{\n\t\t\"token\": {api.config.token},\n\t}\n\t_, err := userRequest(\"users.setActive\", values, api.debug)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ SetUserPresence changes the currently authenticated user presence\nfunc (api *Slack) SetUserPresence(presence string) error {\n\tvalues := url.Values{\n\t\t\"token\": {api.config.token},\n\t\t\"presence\": {presence},\n\t}\n\t_, err := userRequest(\"users.setPresence\", values, api.debug)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}\n<commit_msg>Add has_2fa support<commit_after>package slack\n\nimport (\n\t\"errors\"\n\t\"net\/url\"\n)\n\n\/\/ UserProfile contains all the information details of a given user\ntype UserProfile struct {\n\tFirstName string `json:\"first_name\"`\n\tLastName string `json:\"last_name\"`\n\tRealName string `json:\"real_name\"`\n\tRealNameNormalized string `json:\"real_name_normalized\"`\n\tEmail string `json:\"email\"`\n\tSkype string `json:\"skype\"`\n\tPhone string `json:\"phone\"`\n\tImage24 string `json:\"image_24\"`\n\tImage32 string `json:\"image_32\"`\n\tImage48 string `json:\"image_48\"`\n\tImage72 string `json:\"image_72\"`\n\tImage192 string `json:\"image_192\"`\n\tImageOriginal string `json:\"image_original\"`\n\tTitle string `json:\"title\"`\n}\n\n\/\/ User contains all the information of a user\ntype User struct {\n\tId string `json:\"id\"`\n\tName string `json:\"name\"`\n\tDeleted bool `json:\"deleted\"`\n\tColor string `json:\"color\"`\n\tProfile UserProfile `json:\"profile\"`\n\tIsBot bool `json:\"is_bot\"`\n\tIsAdmin bool `json:\"is_admin\"`\n\tIsOwner bool `json:\"is_owner\"`\n\tIsPrimaryOwner bool `json:\"is_primary_owner\"`\n\tIsRestricted bool `json:\"is_restricted\"`\n\tIsUltraRestricted bool `json:\"is_ultra_restricted\"`\n\tHas2FA bool `json:\"has_2fa\"`\n\tHasFiles bool `json:\"has_files\"`\n\tPresence string `json:\"presence\"`\n}\n\n\/\/ UserPresence contains details about a user online status\ntype UserPresence struct {\n\tPresence string `json:\"presence,omitempty\"`\n\tOnline bool `json:\"online,omitempty\"`\n\tAutoAway bool `json:\"auto_away,omitempty\"`\n\tManualAway bool `json:\"manual_away,omitempty\"`\n\tConnectionCount int `json:\"connection_count,omitempty\"`\n\tLastActivity JSONTime `json:\"last_activity,omitempty\"`\n}\n\ntype userResponseFull struct {\n\tMembers []User `json:\"members,omitempty\"` \/\/ ListUsers\n\tUser `json:\"user,omitempty\"` \/\/ GetUserInfo\n\tUserPresence \/\/ GetUserPresence\n\tSlackResponse\n}\n\nfunc userRequest(path string, values url.Values, debug bool) (*userResponseFull, error) {\n\tresponse := &userResponseFull{}\n\terr := parseResponse(path, values, response, debug)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !response.Ok {\n\t\treturn nil, errors.New(response.Error)\n\t}\n\treturn response, nil\n}\n\n\/\/ GetUserPresence will retrieve the current presence status of given user.\nfunc (api *Slack) GetUserPresence(userId string) (*UserPresence, error) {\n\tvalues := url.Values{\n\t\t\"token\": {api.config.token},\n\t\t\"user\": {userId},\n\t}\n\tresponse, err := userRequest(\"users.getPresence\", values, api.debug)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &response.UserPresence, nil\n}\n\n\/\/ GetUserInfo will retrive the complete user information\nfunc (api *Slack) GetUserInfo(userId string) (*User, error) {\n\tvalues := url.Values{\n\t\t\"token\": {api.config.token},\n\t\t\"user\": {userId},\n\t}\n\tresponse, err := userRequest(\"users.info\", values, api.debug)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &response.User, nil\n}\n\n\/\/ GetUsers returns the list of users (with their detailed information)\nfunc (api *Slack) GetUsers() ([]User, error) {\n\tvalues := url.Values{\n\t\t\"token\": {api.config.token},\n\t}\n\tresponse, err := userRequest(\"users.list\", values, api.debug)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn response.Members, nil\n}\n\n\/\/ SetUserAsActive marks the currently authenticated user as active\nfunc (api *Slack) SetUserAsActive() error {\n\tvalues := url.Values{\n\t\t\"token\": {api.config.token},\n\t}\n\t_, err := userRequest(\"users.setActive\", values, api.debug)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ SetUserPresence changes the currently authenticated user presence\nfunc (api *Slack) SetUserPresence(presence string) error {\n\tvalues := url.Values{\n\t\t\"token\": {api.config.token},\n\t\t\"presence\": {presence},\n\t}\n\t_, err := userRequest(\"users.setPresence\", values, api.debug)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}\n<|endoftext|>"} {"text":"<commit_before>package collectors\n\nimport (\n\t\"os\"\n\n\t\"github.com\/StackExchange\/scollector\/opentsdb\"\n)\n\nfunc init() {\n\tcollectors = append(collectors, &IntervalCollector{F: puppet_disabled_func_linux})\n}\n\nfunc puppet_disabled_func_linux() opentsdb.MultiDataPoint {\n\tvar md opentsdb.MultiDataPoint\n\tdisabled := 0\n\tfilename := \"\/var\/lib\/puppet\/state\/agent_disabled.lock\"\n\tif _, err := os.Stat(filename); !os.IsNotExist(err) {\n\t\tdisabled = 1\n\t}\n\tAdd(&md, \"puppet.disabled\", disabled, nil)\n\treturn md\n}\n<commit_msg>cmd\/scollector: Don't send puppet enabled metric unless Puppet seems to be installed<commit_after>package collectors\n\nimport (\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/StackExchange\/scollector\/opentsdb\"\n)\n\nfunc init() {\n\tcollectors = append(collectors, &IntervalCollector{F: puppet_disabled_func_linux, init: puppetInit})\n}\n\nvar puppetExists bool\n\nconst puppetPath = \"\/var\/lib\/puppet\/\"\nconst puppetDisabled = \"\/var\/lib\/puppet\/state\/agent_disabled.lock\"\n\nfunc puppetInit() {\n\tupdate := func() {\n\t\tif _, err := os.Stat(puppetPath); err == nil {\n\t\t\tpuppetExists = true\n\t\t}\n\t}\n\tupdate()\n\tgo func() {\n\t\tfor _ = range time.Tick(time.Minute * 5) {\n\t\t\tupdate()\n\t\t}\n\t}()\n}\n\nfunc puppet_disabled_func_linux() opentsdb.MultiDataPoint {\n\tif !puppetExists {\n\t\treturn nil\n\t}\n\tvar md opentsdb.MultiDataPoint\n\tdisabled := 0\n\tif _, err := os.Stat(puppetDisabled); !os.IsNotExist(err) {\n\t\tdisabled = 1\n\t}\n\tAdd(&md, \"puppet.disabled\", disabled, nil)\n\treturn md\n}\n<|endoftext|>"} {"text":"<commit_before>package options\n\nimport (\n\t\"shadowss\/pkg\/config\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/spf13\/pflag\"\n)\n\ntype ServerOption struct {\n\tConfigFile string\n\tCpuCoreNum int\n\tEnableUDPRelay bool\n\tURL string\n\tMaxTCPConnPerPort int\n\tAPIProxyListenPort int\n}\n\nfunc NewServerOption() *ServerOption {\n\treturn &ServerOption{\n\t\tConfigFile: string(\"\"),\n\t\tCpuCoreNum: 1,\n\t\tEnableUDPRelay: false,\n\t}\n}\n\nfunc (s *ServerOption) LoadConfigFile() error {\n\tglog.V(5).Infof(\"Parse %s file\\r\\n\", s.ConfigFile)\n\terr := config.ServerCfg.Parse(s.ConfigFile)\n\tif err != nil {\n\t\tglog.Errorf(\"error reading %s: %v\\n\", s.ConfigFile, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *ServerOption) AddFlags(fs *pflag.FlagSet) {\n\n\tfs.StringVar(&s.ConfigFile, \"config-file\", s.ConfigFile, \"\"+\n\t\t\"specify a configure file for server run. \")\n\n\tfs.IntVar(&s.CpuCoreNum, \"cpu-core-num\", s.CpuCoreNum, \"\"+\n\t\t\"specify how many cpu core will be alloc for program\")\n\n\tfs.BoolVar(&s.EnableUDPRelay, \"enable-udp-relay\", s.EnableUDPRelay, \"\"+\n\t\t\"enable udp relay\")\n\n\tfs.StringVar(&s.URL, \"apiserver-url\", s.URL, \"\"+\n\t\t\"specify a api server url. \")\n\n\tfs.IntVar(&s.MaxTCPConnPerPort, \"max-tcp-conn-per-port\", 300, \"\"+\n\t\t\"specify how many tcp connection per port\")\n\n\tfs.IntVar(&s.APIProxyListenPort, \"max-tcp-conn-per-port\", 48888, \"\"+\n\t\t\"specify a port for api proxy\")\n\n}\n<commit_msg>fix options panic bug<commit_after>package options\n\nimport (\n\t\"shadowss\/pkg\/config\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/spf13\/pflag\"\n)\n\ntype ServerOption struct {\n\tConfigFile string\n\tCpuCoreNum int\n\tEnableUDPRelay bool\n\tURL string\n\tMaxTCPConnPerPort int\n\tAPIProxyListenPort int\n}\n\nfunc NewServerOption() *ServerOption {\n\treturn &ServerOption{\n\t\tConfigFile: string(\"\"),\n\t\tCpuCoreNum: 1,\n\t\tEnableUDPRelay: false,\n\t}\n}\n\nfunc (s *ServerOption) LoadConfigFile() error {\n\tglog.V(5).Infof(\"Parse %s file\\r\\n\", s.ConfigFile)\n\terr := config.ServerCfg.Parse(s.ConfigFile)\n\tif err != nil {\n\t\tglog.Errorf(\"error reading %s: %v\\n\", s.ConfigFile, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *ServerOption) AddFlags(fs *pflag.FlagSet) {\n\n\tfs.StringVar(&s.ConfigFile, \"config-file\", s.ConfigFile, \"\"+\n\t\t\"specify a configure file for server run. \")\n\n\tfs.IntVar(&s.CpuCoreNum, \"cpu-core-num\", s.CpuCoreNum, \"\"+\n\t\t\"specify how many cpu core will be alloc for program\")\n\n\tfs.BoolVar(&s.EnableUDPRelay, \"enable-udp-relay\", s.EnableUDPRelay, \"\"+\n\t\t\"enable udp relay\")\n\n\tfs.StringVar(&s.URL, \"apiserver-url\", s.URL, \"\"+\n\t\t\"specify a api server url. \")\n\n\tfs.IntVar(&s.MaxTCPConnPerPort, \"max-tcp-conn-per-port\", 300, \"\"+\n\t\t\"specify how many tcp connection per port\")\n\n\tfs.IntVar(&s.APIProxyListenPort, \"api-proxy-port\", 48888, \"\"+\n\t\t\"specify a port for api proxy\")\n\n}\n<|endoftext|>"} {"text":"<commit_before>package jiraui\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/Netflix-Skunkworks\/go-jira\"\n\tui \"github.com\/gizak\/termui\"\n\t\"github.com\/mitchellh\/go-wordwrap\"\n\t\"gopkg.in\/coryb\/yaml.v2\"\n)\n\nfunc countLabelsFromQuery(query string) map[string]int {\n\tdata, _ := runJiraQuery(query)\n\treturn countLabelsFromQueryData(data)\n}\n\nfunc countLabelsFromQueryData(data interface{}) map[string]int {\n\tcounts := make(map[string]int)\n\tissues := data.(map[string]interface{})[\"issues\"].([]interface{})\n\tfor _, issue := range issues {\n\t\tissueLabels := issue.(map[string]interface{})[\"fields\"].(map[string]interface{})[\"labels\"]\n\t\tlabels := issueLabels.([]interface{})\n\t\tif len(labels) == 0 {\n\t\t\t\/\/ \"NOT LABELLED\" isn't a valid label, so no possible conflict here.\n\t\t\tcounts[\"NOT LABELLED\"] = counts[\"NOT LABELLED\"] + 1\n\t\t} else {\n\t\t\tfor _, v := range labels {\n\t\t\t\tlabel := v.(string)\n\t\t\t\tcounts[label] = counts[label] + 1\n\t\t\t}\n\t\t}\n\t}\n\treturn counts\n}\n\nfunc RunExternalCommand(fn func() error) error {\n\tlog.Debugf(\"ShellOut() called with %q\", fn)\n\tderegisterEventHandlers()\n\tui.Clear()\n\tstty := exec.Command(\"stty\", \"-f\", \"\/dev\/tty\", \"echo\", \"opost\")\n\t_ = stty.Run()\n\terr := fn() \/\/ magic happens\n\tstty = exec.Command(\"stty\", \"-f\", \"\/dev\/tty\", \"-echo\", \"-opost\")\n\t_ = stty.Run()\n\tregisterEventHandlers()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc runShell() {\n\t_ = RunExternalCommand(\n\t\tfunc() error {\n\t\t\tcmd := exec.Command(\"bash\")\n\t\t\tcmd.Stdout, cmd.Stderr, cmd.Stdin = os.Stdout, os.Stderr, os.Stdin\n\t\t\treturn cmd.Run()\n\t\t})\n\tchangePage()\n}\n\nfunc runJiraCmdEdit(ticketId string) {\n\t_ = RunExternalCommand(\n\t\tfunc() error {\n\t\t\topts := getJiraOpts()\n\t\t\tc := jira.New(opts)\n\t\t\treturn c.CmdEdit(ticketId)\n\t\t})\n\tswitch c := currentPage.(type) {\n\tcase Refresher:\n\t\tc.Refresh()\n\t}\n\tchangePage()\n}\n\nfunc runJiraCmdCreate(project string, summary string) {\n\t_ = RunExternalCommand(\n\t\tfunc() error {\n\t\t\topts := getJiraOpts()\n\t\t\topts[\"project\"] = project\n\t\t\topts[\"summary\"] = summary\n\t\t\tc := jira.New(opts)\n\t\t\treturn c.CmdCreate()\n\t\t})\n\tswitch c := currentPage.(type) {\n\tcase Refresher:\n\t\tc.Refresh()\n\t}\n\tchangePage()\n}\n\nfunc runJiraCmdCommentNoEditor(ticketId string, comment string) {\n\topts := getJiraOpts()\n\topts[\"comment\"] = comment\n\tc := jira.New(opts)\n\tc.CmdComment(ticketId)\n}\n\nfunc runJiraCmdAssign(ticketId string, user string) {\n\topts := getJiraOpts()\n\tc := jira.New(opts)\n\tc.CmdAssign(ticketId, user)\n}\n\nfunc runJiraCmdWatch(ticketId string, watcher string, remove bool) {\n\topts := getJiraOpts()\n\tc := jira.New(opts)\n\tif watcher == \"\" {\n\t\twatcher = opts[\"user\"].(string)\n\t}\n\tc.CmdWatch(ticketId, watcher, remove)\n}\n\nfunc runJiraCmdVote(ticketId string, up bool) {\n\topts := getJiraOpts()\n\tc := jira.New(opts)\n\tc.CmdVote(ticketId, up)\n}\n\nfunc runJiraCmdLabels(ticketId string, action string, labels []string) {\n\topts := getJiraOpts()\n\tc := jira.New(opts)\n\terr := c.CmdLabels(action, ticketId, labels)\n\tif err != nil {\n\t\tlog.Errorf(\"Error writing labels: %q\", err)\n\t}\n}\n\nfunc runJiraCmdRank(ticketId, targetId string, order jira.RankOrder) {\n\topts := getJiraOpts()\n\tc := jira.New(opts)\n\terr := c.RankIssue(ticketId, targetId, order)\n\tif err != nil {\n\t\tlog.Errorf(\"Error modifying issue rank: %q\", err)\n\t}\n}\n\nfunc findTicketIdInString(line string) string {\n\tre := regexp.MustCompile(`[A-Z]{2,12}-[0-9]{1,6}`)\n\treturn strings.TrimSpace(re.FindString(line))\n}\n\nfunc runJiraQuery(query string) (interface{}, error) {\n\topts := getJiraOpts()\n\topts[\"query\"] = query\n\tc := jira.New(opts)\n\treturn c.FindIssues()\n}\n\nfunc JiraQueryAsStrings(query string, templateName string) []string {\n\topts := getJiraOpts()\n\topts[\"query\"] = query\n\tc := jira.New(opts)\n\tdata, _ := c.FindIssues()\n\tbuf := new(bytes.Buffer)\n\tif templateName == \"\" {\n\t\ttemplateName = \"jira_ui_list\"\n\t}\n\ttemplate := c.GetTemplate(templateName)\n\tif template == \"\" {\n\t\ttemplate = default_list_template\n\t}\n\tjira.RunTemplate(template, data, buf)\n\treturn strings.Split(strings.TrimSpace(buf.String()), \"\\n\")\n}\n\nfunc FetchJiraTicket(id string) (interface{}, error) {\n\topts := getJiraOpts()\n\tc := jira.New(opts)\n\treturn c.ViewIssue(id)\n}\n\nfunc JiraTicketAsStrings(data interface{}, templateName string) []string {\n\topts := getJiraOpts()\n\tc := jira.New(opts)\n\tbuf := new(bytes.Buffer)\n\ttemplate := c.GetTemplate(templateName)\n\tlog.Debugf(\"JiraTicketsAsStrings: template = %q\", template)\n\tif template == \"\" {\n\t\ttemplate = strings.Replace(default_view_template, \"ENDPOINT\", opts[\"endpoint\"].(string), 1)\n\t}\n\tjira.RunTemplate(template, data, buf)\n\treturn strings.Split(strings.TrimSpace(buf.String()), \"\\n\")\n}\n\nfunc HelpTextAsStrings(data interface{}, templateName string) []string {\n\topts := getJiraOpts()\n\tc := jira.New(opts)\n\tbuf := new(bytes.Buffer)\n\ttemplate := c.GetTemplate(templateName)\n\tif template == \"\" {\n\t\ttemplate = default_help_template\n\t}\n\tlog.Debugf(\"HelpTextAsStrings: template = %q\", template)\n\tjira.RunTemplate(template, data, buf)\n\treturn strings.Split(strings.TrimSpace(buf.String()), \"\\n\")\n}\n\nfunc WrapText(lines []string, maxWidth uint) []string {\n\tout := make([]string, 0)\n\tinsideNoformatBlock := false\n\tinsideCodeBlock := false\n\tfor _, line := range lines {\n\t\tif matched, _ := regexp.MatchString(`^\\s+\\{code`, line); matched {\n\t\t\tinsideCodeBlock = !insideCodeBlock\n\t\t} else if strings.TrimSpace(line) == \"{noformat}\" {\n\t\t\tinsideNoformatBlock = !insideNoformatBlock\n\t\t}\n\t\tif maxWidth == 0 || uint(len(line)) < maxWidth || insideCodeBlock || insideNoformatBlock {\n\t\t\tout = append(out, line)\n\t\t\tcontinue\n\t\t}\n\t\tif matched, _ := regexp.MatchString(`^[a-z_]+:\\s`, line); matched {\n\t\t\t\/\/ don't futz with single line field+value.\n\t\t\t\/\/ If they are too long, that's their fault.\n\t\t\tout = append(out, line)\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ wrap text, but preserve indenting\n\t\tre := regexp.MustCompile(`^\\s*`)\n\t\tindenting := re.FindString(line)\n\t\twrappedLines := strings.Split(wordwrap.WrapString(line, maxWidth-uint(len(indenting))), \"\\n\")\n\t\tindentedWrappedLines := make([]string, len(wrappedLines))\n\t\tfor i, wl := range wrappedLines {\n\t\t\tif i == 0 {\n\t\t\t\t\/\/ first line already has the indent\n\t\t\t\tindentedWrappedLines[i] = wl\n\t\t\t} else {\n\t\t\t\tindentedWrappedLines[i] = indenting + wl\n\t\t\t}\n\t\t}\n\t\tout = append(out, indentedWrappedLines...)\n\t}\n\treturn out\n}\n\nfunc parseYaml(file string, v map[string]interface{}) {\n\tif fh, err := ioutil.ReadFile(file); err == nil {\n\t\tlog.Debugf(\"Parsing YAML file: %s\", file)\n\t\tyaml.Unmarshal(fh, &v)\n\t}\n}\n\nfunc loadConfigs(opts map[string]interface{}) {\n\tpaths := jira.FindParentPaths(\".jira.d\/jira-ui-config.yml\")\n\tpaths = append(jira.FindParentPaths(\".jira.d\/config.yml\"), paths...)\n\tpaths = append([]string{\"\/etc\/go-jira-ui.yml\", \"\/etc\/go-jira.yml\"}, paths...)\n\n\t\/\/ iterate paths in reverse\n\tfor i := len(paths) - 1; i >= 0; i-- {\n\t\tfile := paths[i]\n\t\tif _, err := os.Stat(file); err == nil {\n\t\t\ttmp := make(map[string]interface{})\n\t\t\tparseYaml(file, tmp)\n\t\t\tfor k, v := range tmp {\n\t\t\t\tif _, ok := opts[k]; !ok {\n\t\t\t\t\tlog.Debugf(\"Setting %q to %#v from %s\", k, v, file)\n\t\t\t\t\topts[k] = v\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc doLogin(opts map[string]interface{}) error {\n\tc := jira.New(opts)\n\tfmt.Printf(\"Logging in as %s:\\n\", opts[\"user\"])\n\treturn c.CmdLogin()\n}\n\nfunc ensureLoggedIntoJira() error {\n\thomeDir := os.Getenv(\"HOME\")\n\topts := getJiraOpts()\n\ttestSessionQuery := fmt.Sprintf(\"reporter = %s\", opts[\"user\"])\n\tif _, err := os.Stat(fmt.Sprintf(\"%s\/.jira.d\/cookies.js\", homeDir)); err != nil {\n\t\treturn doLogin(opts)\n\t} else if data, err := runJiraQuery(testSessionQuery); err != nil {\n\t\treturn doLogin(opts)\n\t} else if val, ok := data.(map[string]interface{})[\"errorMessages\"]; ok {\n\t\tif len(val.([]interface{})) > 0 {\n\t\t\treturn doLogin(opts)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc getJiraOpts() map[string]interface{} {\n\tuser := os.Getenv(\"USER\")\n\thome := os.Getenv(\"HOME\")\n\tdefaultQueryFields := \"summary,created,updated,priority,status,reporter,assignee,labels\"\n\tdefaultSort := \"priority asc, created\"\n\tdefaultMaxResults := 1000\n\n\topts := make(map[string]interface{})\n\tdefaults := map[string]interface{}{\n\t\t\"user\": user,\n\t\t\"endpoint\": os.Getenv(\"JIRA_ENDPOINT\"),\n\t\t\"queryfields\": defaultQueryFields,\n\t\t\"directory\": fmt.Sprintf(\"%s\/.jira.d\/templates\", home),\n\t\t\"sort\": defaultSort,\n\t\t\"max_results\": defaultMaxResults,\n\t\t\"method\": \"GET\",\n\t\t\"quiet\": true,\n\t}\n\n\tfor k, v := range cliOpts {\n\t\tif _, ok := opts[k]; !ok {\n\t\t\tlog.Debugf(\"Setting %q to %#v from cli options\", k, v)\n\t\t\topts[k] = v\n\t\t}\n\t}\n\n\tloadConfigs(opts)\n\tfor k, v := range defaults {\n\t\tif _, ok := opts[k]; !ok {\n\t\t\tlog.Debugf(\"Setting %q to %#v from defaults\", k, v)\n\t\t\topts[k] = v\n\t\t}\n\t}\n\treturn opts\n}\n\nfunc lastLineDisplayed(ls *ScrollableList, firstLine int, correction int) int {\n\treturn firstLine + ls.Height - correction\n}\n<commit_msg>Add an regex check for a too long ticket id<commit_after>package jiraui\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/Netflix-Skunkworks\/go-jira\"\n\tui \"github.com\/gizak\/termui\"\n\t\"github.com\/mitchellh\/go-wordwrap\"\n\t\"gopkg.in\/coryb\/yaml.v2\"\n)\n\nfunc countLabelsFromQuery(query string) map[string]int {\n\tdata, _ := runJiraQuery(query)\n\treturn countLabelsFromQueryData(data)\n}\n\nfunc countLabelsFromQueryData(data interface{}) map[string]int {\n\tcounts := make(map[string]int)\n\tissues := data.(map[string]interface{})[\"issues\"].([]interface{})\n\tfor _, issue := range issues {\n\t\tissueLabels := issue.(map[string]interface{})[\"fields\"].(map[string]interface{})[\"labels\"]\n\t\tlabels := issueLabels.([]interface{})\n\t\tif len(labels) == 0 {\n\t\t\t\/\/ \"NOT LABELLED\" isn't a valid label, so no possible conflict here.\n\t\t\tcounts[\"NOT LABELLED\"] = counts[\"NOT LABELLED\"] + 1\n\t\t} else {\n\t\t\tfor _, v := range labels {\n\t\t\t\tlabel := v.(string)\n\t\t\t\tcounts[label] = counts[label] + 1\n\t\t\t}\n\t\t}\n\t}\n\treturn counts\n}\n\nfunc RunExternalCommand(fn func() error) error {\n\tlog.Debugf(\"ShellOut() called with %q\", fn)\n\tderegisterEventHandlers()\n\tui.Clear()\n\tstty := exec.Command(\"stty\", \"-f\", \"\/dev\/tty\", \"echo\", \"opost\")\n\t_ = stty.Run()\n\terr := fn() \/\/ magic happens\n\tstty = exec.Command(\"stty\", \"-f\", \"\/dev\/tty\", \"-echo\", \"-opost\")\n\t_ = stty.Run()\n\tregisterEventHandlers()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc runShell() {\n\t_ = RunExternalCommand(\n\t\tfunc() error {\n\t\t\tcmd := exec.Command(\"bash\")\n\t\t\tcmd.Stdout, cmd.Stderr, cmd.Stdin = os.Stdout, os.Stderr, os.Stdin\n\t\t\treturn cmd.Run()\n\t\t})\n\tchangePage()\n}\n\nfunc runJiraCmdEdit(ticketId string) {\n\t_ = RunExternalCommand(\n\t\tfunc() error {\n\t\t\topts := getJiraOpts()\n\t\t\tc := jira.New(opts)\n\t\t\treturn c.CmdEdit(ticketId)\n\t\t})\n\tswitch c := currentPage.(type) {\n\tcase Refresher:\n\t\tc.Refresh()\n\t}\n\tchangePage()\n}\n\nfunc runJiraCmdCreate(project string, summary string) {\n\t_ = RunExternalCommand(\n\t\tfunc() error {\n\t\t\topts := getJiraOpts()\n\t\t\topts[\"project\"] = project\n\t\t\topts[\"summary\"] = summary\n\t\t\tc := jira.New(opts)\n\t\t\treturn c.CmdCreate()\n\t\t})\n\tswitch c := currentPage.(type) {\n\tcase Refresher:\n\t\tc.Refresh()\n\t}\n\tchangePage()\n}\n\nfunc runJiraCmdCommentNoEditor(ticketId string, comment string) {\n\topts := getJiraOpts()\n\topts[\"comment\"] = comment\n\tc := jira.New(opts)\n\tc.CmdComment(ticketId)\n}\n\nfunc runJiraCmdAssign(ticketId string, user string) {\n\topts := getJiraOpts()\n\tc := jira.New(opts)\n\tc.CmdAssign(ticketId, user)\n}\n\nfunc runJiraCmdWatch(ticketId string, watcher string, remove bool) {\n\topts := getJiraOpts()\n\tc := jira.New(opts)\n\tif watcher == \"\" {\n\t\twatcher = opts[\"user\"].(string)\n\t}\n\tc.CmdWatch(ticketId, watcher, remove)\n}\n\nfunc runJiraCmdVote(ticketId string, up bool) {\n\topts := getJiraOpts()\n\tc := jira.New(opts)\n\tc.CmdVote(ticketId, up)\n}\n\nfunc runJiraCmdLabels(ticketId string, action string, labels []string) {\n\topts := getJiraOpts()\n\tc := jira.New(opts)\n\terr := c.CmdLabels(action, ticketId, labels)\n\tif err != nil {\n\t\tlog.Errorf(\"Error writing labels: %q\", err)\n\t}\n}\n\nfunc runJiraCmdRank(ticketId, targetId string, order jira.RankOrder) {\n\topts := getJiraOpts()\n\tc := jira.New(opts)\n\terr := c.RankIssue(ticketId, targetId, order)\n\tif err != nil {\n\t\tlog.Errorf(\"Error modifying issue rank: %q\", err)\n\t}\n}\n\nfunc findTicketIdInString(line string) string {\n\tre := regexp.MustCompile(`[A-Z]{2,12}-[0-9]{1,6}`)\n\tre_too_long := regexp.MustCompile(`[A-Z]{13}-[0-9]{1,6}`)\n\n\tif re_too_long.MatchString(line) {\n\t\treturn \"\"\n\t}\n\n\treturn strings.TrimSpace(re.FindString(line))\n}\n\nfunc runJiraQuery(query string) (interface{}, error) {\n\topts := getJiraOpts()\n\topts[\"query\"] = query\n\tc := jira.New(opts)\n\treturn c.FindIssues()\n}\n\nfunc JiraQueryAsStrings(query string, templateName string) []string {\n\topts := getJiraOpts()\n\topts[\"query\"] = query\n\tc := jira.New(opts)\n\tdata, _ := c.FindIssues()\n\tbuf := new(bytes.Buffer)\n\tif templateName == \"\" {\n\t\ttemplateName = \"jira_ui_list\"\n\t}\n\ttemplate := c.GetTemplate(templateName)\n\tif template == \"\" {\n\t\ttemplate = default_list_template\n\t}\n\tjira.RunTemplate(template, data, buf)\n\treturn strings.Split(strings.TrimSpace(buf.String()), \"\\n\")\n}\n\nfunc FetchJiraTicket(id string) (interface{}, error) {\n\topts := getJiraOpts()\n\tc := jira.New(opts)\n\treturn c.ViewIssue(id)\n}\n\nfunc JiraTicketAsStrings(data interface{}, templateName string) []string {\n\topts := getJiraOpts()\n\tc := jira.New(opts)\n\tbuf := new(bytes.Buffer)\n\ttemplate := c.GetTemplate(templateName)\n\tlog.Debugf(\"JiraTicketsAsStrings: template = %q\", template)\n\tif template == \"\" {\n\t\ttemplate = strings.Replace(default_view_template, \"ENDPOINT\", opts[\"endpoint\"].(string), 1)\n\t}\n\tjira.RunTemplate(template, data, buf)\n\treturn strings.Split(strings.TrimSpace(buf.String()), \"\\n\")\n}\n\nfunc HelpTextAsStrings(data interface{}, templateName string) []string {\n\topts := getJiraOpts()\n\tc := jira.New(opts)\n\tbuf := new(bytes.Buffer)\n\ttemplate := c.GetTemplate(templateName)\n\tif template == \"\" {\n\t\ttemplate = default_help_template\n\t}\n\tlog.Debugf(\"HelpTextAsStrings: template = %q\", template)\n\tjira.RunTemplate(template, data, buf)\n\treturn strings.Split(strings.TrimSpace(buf.String()), \"\\n\")\n}\n\nfunc WrapText(lines []string, maxWidth uint) []string {\n\tout := make([]string, 0)\n\tinsideNoformatBlock := false\n\tinsideCodeBlock := false\n\tfor _, line := range lines {\n\t\tif matched, _ := regexp.MatchString(`^\\s+\\{code`, line); matched {\n\t\t\tinsideCodeBlock = !insideCodeBlock\n\t\t} else if strings.TrimSpace(line) == \"{noformat}\" {\n\t\t\tinsideNoformatBlock = !insideNoformatBlock\n\t\t}\n\t\tif maxWidth == 0 || uint(len(line)) < maxWidth || insideCodeBlock || insideNoformatBlock {\n\t\t\tout = append(out, line)\n\t\t\tcontinue\n\t\t}\n\t\tif matched, _ := regexp.MatchString(`^[a-z_]+:\\s`, line); matched {\n\t\t\t\/\/ don't futz with single line field+value.\n\t\t\t\/\/ If they are too long, that's their fault.\n\t\t\tout = append(out, line)\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ wrap text, but preserve indenting\n\t\tre := regexp.MustCompile(`^\\s*`)\n\t\tindenting := re.FindString(line)\n\t\twrappedLines := strings.Split(wordwrap.WrapString(line, maxWidth-uint(len(indenting))), \"\\n\")\n\t\tindentedWrappedLines := make([]string, len(wrappedLines))\n\t\tfor i, wl := range wrappedLines {\n\t\t\tif i == 0 {\n\t\t\t\t\/\/ first line already has the indent\n\t\t\t\tindentedWrappedLines[i] = wl\n\t\t\t} else {\n\t\t\t\tindentedWrappedLines[i] = indenting + wl\n\t\t\t}\n\t\t}\n\t\tout = append(out, indentedWrappedLines...)\n\t}\n\treturn out\n}\n\nfunc parseYaml(file string, v map[string]interface{}) {\n\tif fh, err := ioutil.ReadFile(file); err == nil {\n\t\tlog.Debugf(\"Parsing YAML file: %s\", file)\n\t\tyaml.Unmarshal(fh, &v)\n\t}\n}\n\nfunc loadConfigs(opts map[string]interface{}) {\n\tpaths := jira.FindParentPaths(\".jira.d\/jira-ui-config.yml\")\n\tpaths = append(jira.FindParentPaths(\".jira.d\/config.yml\"), paths...)\n\tpaths = append([]string{\"\/etc\/go-jira-ui.yml\", \"\/etc\/go-jira.yml\"}, paths...)\n\n\t\/\/ iterate paths in reverse\n\tfor i := len(paths) - 1; i >= 0; i-- {\n\t\tfile := paths[i]\n\t\tif _, err := os.Stat(file); err == nil {\n\t\t\ttmp := make(map[string]interface{})\n\t\t\tparseYaml(file, tmp)\n\t\t\tfor k, v := range tmp {\n\t\t\t\tif _, ok := opts[k]; !ok {\n\t\t\t\t\tlog.Debugf(\"Setting %q to %#v from %s\", k, v, file)\n\t\t\t\t\topts[k] = v\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc doLogin(opts map[string]interface{}) error {\n\tc := jira.New(opts)\n\tfmt.Printf(\"Logging in as %s:\\n\", opts[\"user\"])\n\treturn c.CmdLogin()\n}\n\nfunc ensureLoggedIntoJira() error {\n\thomeDir := os.Getenv(\"HOME\")\n\topts := getJiraOpts()\n\ttestSessionQuery := fmt.Sprintf(\"reporter = %s\", opts[\"user\"])\n\tif _, err := os.Stat(fmt.Sprintf(\"%s\/.jira.d\/cookies.js\", homeDir)); err != nil {\n\t\treturn doLogin(opts)\n\t} else if data, err := runJiraQuery(testSessionQuery); err != nil {\n\t\treturn doLogin(opts)\n\t} else if val, ok := data.(map[string]interface{})[\"errorMessages\"]; ok {\n\t\tif len(val.([]interface{})) > 0 {\n\t\t\treturn doLogin(opts)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc getJiraOpts() map[string]interface{} {\n\tuser := os.Getenv(\"USER\")\n\thome := os.Getenv(\"HOME\")\n\tdefaultQueryFields := \"summary,created,updated,priority,status,reporter,assignee,labels\"\n\tdefaultSort := \"priority asc, created\"\n\tdefaultMaxResults := 1000\n\n\topts := make(map[string]interface{})\n\tdefaults := map[string]interface{}{\n\t\t\"user\": user,\n\t\t\"endpoint\": os.Getenv(\"JIRA_ENDPOINT\"),\n\t\t\"queryfields\": defaultQueryFields,\n\t\t\"directory\": fmt.Sprintf(\"%s\/.jira.d\/templates\", home),\n\t\t\"sort\": defaultSort,\n\t\t\"max_results\": defaultMaxResults,\n\t\t\"method\": \"GET\",\n\t\t\"quiet\": true,\n\t}\n\n\tfor k, v := range cliOpts {\n\t\tif _, ok := opts[k]; !ok {\n\t\t\tlog.Debugf(\"Setting %q to %#v from cli options\", k, v)\n\t\t\topts[k] = v\n\t\t}\n\t}\n\n\tloadConfigs(opts)\n\tfor k, v := range defaults {\n\t\tif _, ok := opts[k]; !ok {\n\t\t\tlog.Debugf(\"Setting %q to %#v from defaults\", k, v)\n\t\t\topts[k] = v\n\t\t}\n\t}\n\treturn opts\n}\n\nfunc lastLineDisplayed(ls *ScrollableList, firstLine int, correction int) int {\n\treturn firstLine + ls.Height - correction\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ Copyright © 2011-2017 Guy M. Allard\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\npackage stompngo\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n)\n\n\/*\n\tEncode a string per STOMP 1.1+ specifications.\n*\/\nfunc encode(s string) string {\n\tr := s\n\tfor _, tr := range codecValues {\n\t\tif strings.Index(r, tr.decoded) >= 0 {\n\t\t\tr = strings.Replace(r, tr.decoded, tr.encoded, -1)\n\t\t}\n\t}\n\treturn r\n}\n\n\/*\n\tDecode a string per STOMP 1.1+ specifications.\n*\/\nfunc decode(s string) string {\n\tr := s\n\tfor _, tr := range codecValues {\n\t\tif strings.Index(r, tr.encoded) >= 0 {\n\t\t\tr = strings.Replace(r, tr.encoded, tr.decoded, -1)\n\t\t}\n\t}\n\treturn r\n}\n\n\/*\n\tA network helper. Read from the wire until a 0x00 byte is encountered.\n*\/\nfunc readUntilNul(c *Connection) ([]uint8, error) {\n\tc.setReadDeadline()\n\tb, e := c.rdr.ReadBytes(0)\n\tif c.checkReadError(e) != nil {\n\t\treturn b, e\n\t}\n\tif len(b) == 1 {\n\t\tb = NULLBUFF\n\t} else {\n\t\tb = b[0 : len(b)-1]\n\t}\n\treturn b, e\n}\n\n\/*\n\tA network helper. Read a full message body with a known length that is\n\t> 0. Then read the trailing 'null' byte expected for STOMP frames.\n*\/\nfunc readBody(c *Connection, l int) ([]uint8, error) {\n\tb := make([]byte, l)\n\tc.setReadDeadline()\n\tn, e := io.ReadFull(c.rdr, b)\n\tif n < l { \/\/ Short read, e is ErrUnexpectedEOF\n\t\tc.log(\"SHORT READ\", n, l, e)\n\t\treturn b[0 : n-1], e\n\t}\n\tif c.checkReadError(e) != nil { \/\/ Other erors\n\t\treturn b, e\n\t}\n\tc.setReadDeadline()\n\t_, _ = c.rdr.ReadByte() \/\/ trailing NUL\n\tif c.checkReadError(e) != nil { \/\/ Other erors\n\t\treturn b, e\n\t}\n\treturn b, e\n}\n\n\/*\n\tCommon Header Validation.\n*\/\nfunc checkHeaders(h Headers, p string) error {\n\tif h == nil {\n\t\treturn EHDRNIL\n\t}\n\t\/\/ Length check\n\tif e := h.Validate(); e != nil {\n\t\treturn e\n\t}\n\t\/\/ Empty key \/ value check\n\tfor i := 0; i < len(h); i += 2 {\n\t\tif h[i] == \"\" {\n\t\t\treturn EHDRMTK\n\t\t}\n\t\tif p == SPL_10 && h[i+1] == \"\" {\n\t\t\treturn EHDRMTV\n\t\t}\n\t}\n\t\/\/ UTF8 check\n\tif p != SPL_10 {\n\t\t_, e := h.ValidateUTF8()\n\t\tif e != nil {\n\t\t\treturn e\n\t\t}\n\t}\n\treturn nil\n}\n\n\/*\n\tInternal function used by heartbeat initialization.\n*\/\nfunc max(a, b int64) int64 {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}\n\n\/*\n Debug helper. Get properly formatted destination.\n*\/\nfunc dumpmd(md MessageData) {\n\tfmt.Printf(\"Command: %s\\n\", md.Message.Command)\n\tfmt.Println(\"Headers:\")\n\tfor i := 0; i < len(md.Message.Headers); i += 2 {\n\t\tfmt.Printf(\"key:%s\\t\\tvalue:%s\\n\",\n\t\t\tmd.Message.Headers[i], md.Message.Headers[i+1])\n\t}\n\tfmt.Printf(\"Body: %s\\n\", string(md.Message.Body))\n\tif md.Error != nil {\n\t\tfmt.Printf(\"Error: %s\\n\", md.Error.Error())\n\t} else {\n\t\tfmt.Println(\"Error: nil\")\n\t}\n}\n<commit_msg>Handle reading message body when underlying connection unexpectedly closes<commit_after>\/\/\n\/\/ Copyright © 2011-2017 Guy M. Allard\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\npackage stompngo\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n)\n\n\/*\n\tEncode a string per STOMP 1.1+ specifications.\n*\/\nfunc encode(s string) string {\n\tr := s\n\tfor _, tr := range codecValues {\n\t\tif strings.Index(r, tr.decoded) >= 0 {\n\t\t\tr = strings.Replace(r, tr.decoded, tr.encoded, -1)\n\t\t}\n\t}\n\treturn r\n}\n\n\/*\n\tDecode a string per STOMP 1.1+ specifications.\n*\/\nfunc decode(s string) string {\n\tr := s\n\tfor _, tr := range codecValues {\n\t\tif strings.Index(r, tr.encoded) >= 0 {\n\t\t\tr = strings.Replace(r, tr.encoded, tr.decoded, -1)\n\t\t}\n\t}\n\treturn r\n}\n\n\/*\n\tA network helper. Read from the wire until a 0x00 byte is encountered.\n*\/\nfunc readUntilNul(c *Connection) ([]uint8, error) {\n\tc.setReadDeadline()\n\tb, e := c.rdr.ReadBytes(0)\n\tif c.checkReadError(e) != nil {\n\t\treturn b, e\n\t}\n\tif len(b) == 1 {\n\t\tb = NULLBUFF\n\t} else {\n\t\tb = b[0 : len(b)-1]\n\t}\n\treturn b, e\n}\n\n\/*\n\tA network helper. Read a full message body with a known length that is\n\t> 0. Then read the trailing 'null' byte expected for STOMP frames.\n*\/\nfunc readBody(c *Connection, l int) ([]uint8, error) {\n\tb := make([]byte, l)\n\tc.setReadDeadline()\n\tn, e := io.ReadFull(c.rdr, b)\n\tif n < l && n != 0 { \/\/ Short read, e is ErrUnexpectedEOF\n\t\tc.log(\"SHORT READ\", n, l, e)\n\t\treturn b[0 : n-1], e\n\t}\n\tif c.checkReadError(e) != nil { \/\/ Other erors\n\t\treturn b, e\n\t}\n\tc.setReadDeadline()\n\t_, _ = c.rdr.ReadByte() \/\/ trailing NUL\n\tif c.checkReadError(e) != nil { \/\/ Other erors\n\t\treturn b, e\n\t}\n\treturn b, e\n}\n\n\/*\n\tCommon Header Validation.\n*\/\nfunc checkHeaders(h Headers, p string) error {\n\tif h == nil {\n\t\treturn EHDRNIL\n\t}\n\t\/\/ Length check\n\tif e := h.Validate(); e != nil {\n\t\treturn e\n\t}\n\t\/\/ Empty key \/ value check\n\tfor i := 0; i < len(h); i += 2 {\n\t\tif h[i] == \"\" {\n\t\t\treturn EHDRMTK\n\t\t}\n\t\tif p == SPL_10 && h[i+1] == \"\" {\n\t\t\treturn EHDRMTV\n\t\t}\n\t}\n\t\/\/ UTF8 check\n\tif p != SPL_10 {\n\t\t_, e := h.ValidateUTF8()\n\t\tif e != nil {\n\t\t\treturn e\n\t\t}\n\t}\n\treturn nil\n}\n\n\/*\n\tInternal function used by heartbeat initialization.\n*\/\nfunc max(a, b int64) int64 {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}\n\n\/*\n Debug helper. Get properly formatted destination.\n*\/\nfunc dumpmd(md MessageData) {\n\tfmt.Printf(\"Command: %s\\n\", md.Message.Command)\n\tfmt.Println(\"Headers:\")\n\tfor i := 0; i < len(md.Message.Headers); i += 2 {\n\t\tfmt.Printf(\"key:%s\\t\\tvalue:%s\\n\",\n\t\t\tmd.Message.Headers[i], md.Message.Headers[i+1])\n\t}\n\tfmt.Printf(\"Body: %s\\n\", string(md.Message.Body))\n\tif md.Error != nil {\n\t\tfmt.Printf(\"Error: %s\\n\", md.Error.Error())\n\t} else {\n\t\tfmt.Println(\"Error: nil\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>pio: check error before deferring f.Close<commit_after><|endoftext|>"} {"text":"<commit_before>package flag_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t. \"code.cloudfoundry.org\/cli\/command\/flag\"\n\tflags \"github.com\/jessevdk\/go-flags\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"EnvironmentVariable\", func() {\n\tvar (\n\t\tenvVar EnvironmentVariable\n\t\tenvList []string\n\t)\n\n\tBeforeEach(func() {\n\t\tenvList = []string{\n\t\t\t\"ENVIRONMENTVARIABLE_TEST_ABC\",\n\t\t\t\"ENVIRONMENTVARIABLE_TEST_FOO_BAR\",\n\t\t\t\"ENVIRONMENTVARIABLE_TEST_ACKBAR\",\n\t\t\t\"ENVIRONMENTVARIABLE_TEST_abc\",\n\t\t}\n\n\t\tvar err error\n\t\tfor _, v := range envList {\n\t\t\terr = os.Setenv(v, \"\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t}\n\t})\n\n\tAfterEach(func() {\n\t\tvar err error\n\t\tfor _, v := range envList {\n\t\t\terr = os.Unsetenv(v)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t}\n\t})\n\n\tDescribe(\"Complete\", func() {\n\t\tContext(\"when the prefix is empty\", func() {\n\t\t\tIt(\"returns no matches\", func() {\n\t\t\t\tExpect(envVar.Complete(\"\")).To(BeEmpty())\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the prefix does not start with $\", func() {\n\t\t\tIt(\"returns no matches\", func() {\n\t\t\t\tExpect(envVar.Complete(\"A$A\")).To(BeEmpty())\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the prefix starts with $\", func() {\n\t\t\tContext(\"when only $ is specified\", func() {\n\t\t\t\tIt(\"returns all environment variables\", func() {\n\t\t\t\t\tkeyValPairs := os.Environ()\n\t\t\t\t\tenvVars := make([]string, len(keyValPairs))\n\t\t\t\t\tfor i, keyValPair := range keyValPairs {\n\t\t\t\t\t\tenvVars[i] = fmt.Sprintf(\"$%s\", strings.Split(keyValPair, \"=\")[0])\n\t\t\t\t\t}\n\n\t\t\t\t\tmatches := envVar.Complete(\"$\")\n\t\t\t\t\tExpect(matches).To(HaveLen(len(keyValPairs)))\n\t\t\t\t\tfor _, v := range envVars {\n\t\t\t\t\t\tExpect(matches).To(ContainElement(flags.Completion{Item: v}))\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when additional characters are specified\", func() {\n\t\t\t\tContext(\"when there are matching environment variables\", func() {\n\t\t\t\t\tIt(\"returns the matching environment variables\", func() {\n\t\t\t\t\t\tmatches := envVar.Complete(\"$ENVIRONMENTVARIABLE_TEST_A\")\n\t\t\t\t\t\tExpect(matches).To(HaveLen(2))\n\t\t\t\t\t\tExpect(matches).To(ConsistOf(\n\t\t\t\t\t\t\tflags.Completion{Item: \"$ENVIRONMENTVARIABLE_TEST_ABC\"},\n\t\t\t\t\t\t\tflags.Completion{Item: \"$ENVIRONMENTVARIABLE_TEST_ACKBAR\"},\n\t\t\t\t\t\t))\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"is case sensitive\", func() {\n\t\t\t\t\t\tmatches := envVar.Complete(\"$ENVIRONMENTVARIABLE_TEST_a\")\n\t\t\t\t\t\tExpect(matches).To(HaveLen(1))\n\t\t\t\t\t\tExpect(matches).To(ConsistOf(\n\t\t\t\t\t\t\tflags.Completion{Item: \"$ENVIRONMENTVARIABLE_TEST_abc\"},\n\t\t\t\t\t\t))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when there are no matching environment variables\", func() {\n\t\t\t\t\tIt(\"returns no matches\", func() {\n\t\t\t\t\t\tExpect(envVar.Complete(\"$Z\")).To(BeEmpty())\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n<commit_msg>Don't run environment_variable_test on windows<commit_after>\/\/ +build !windows\n\npackage flag_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t. \"code.cloudfoundry.org\/cli\/command\/flag\"\n\tflags \"github.com\/jessevdk\/go-flags\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"EnvironmentVariable\", func() {\n\tvar (\n\t\tenvVar EnvironmentVariable\n\t\tenvList []string\n\t)\n\n\tBeforeEach(func() {\n\t\tenvList = []string{\n\t\t\t\"ENVIRONMENTVARIABLE_TEST_ABC\",\n\t\t\t\"ENVIRONMENTVARIABLE_TEST_FOO_BAR\",\n\t\t\t\"ENVIRONMENTVARIABLE_TEST_ACKBAR\",\n\t\t\t\"ENVIRONMENTVARIABLE_TEST_abc\",\n\t\t}\n\n\t\tvar err error\n\t\tfor _, v := range envList {\n\t\t\terr = os.Setenv(v, \"\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t}\n\t})\n\n\tAfterEach(func() {\n\t\tvar err error\n\t\tfor _, v := range envList {\n\t\t\terr = os.Unsetenv(v)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t}\n\t})\n\n\tDescribe(\"Complete\", func() {\n\t\tContext(\"when the prefix is empty\", func() {\n\t\t\tIt(\"returns no matches\", func() {\n\t\t\t\tExpect(envVar.Complete(\"\")).To(BeEmpty())\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the prefix does not start with $\", func() {\n\t\t\tIt(\"returns no matches\", func() {\n\t\t\t\tExpect(envVar.Complete(\"A$A\")).To(BeEmpty())\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when the prefix starts with $\", func() {\n\t\t\tContext(\"when only $ is specified\", func() {\n\t\t\t\tIt(\"returns all environment variables\", func() {\n\t\t\t\t\tkeyValPairs := os.Environ()\n\t\t\t\t\tenvVars := make([]string, len(keyValPairs))\n\t\t\t\t\tfor i, keyValPair := range keyValPairs {\n\t\t\t\t\t\tenvVars[i] = fmt.Sprintf(\"$%s\", strings.Split(keyValPair, \"=\")[0])\n\t\t\t\t\t}\n\n\t\t\t\t\tmatches := envVar.Complete(\"$\")\n\t\t\t\t\tExpect(matches).To(HaveLen(len(keyValPairs)))\n\t\t\t\t\tfor _, v := range envVars {\n\t\t\t\t\t\tExpect(matches).To(ContainElement(flags.Completion{Item: v}))\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when additional characters are specified\", func() {\n\t\t\t\tContext(\"when there are matching environment variables\", func() {\n\t\t\t\t\tIt(\"returns the matching environment variables\", func() {\n\t\t\t\t\t\tmatches := envVar.Complete(\"$ENVIRONMENTVARIABLE_TEST_A\")\n\t\t\t\t\t\tExpect(matches).To(HaveLen(2))\n\t\t\t\t\t\tExpect(matches).To(ConsistOf(\n\t\t\t\t\t\t\tflags.Completion{Item: \"$ENVIRONMENTVARIABLE_TEST_ABC\"},\n\t\t\t\t\t\t\tflags.Completion{Item: \"$ENVIRONMENTVARIABLE_TEST_ACKBAR\"},\n\t\t\t\t\t\t))\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"is case sensitive\", func() {\n\t\t\t\t\t\tmatches := envVar.Complete(\"$ENVIRONMENTVARIABLE_TEST_a\")\n\t\t\t\t\t\tExpect(matches).To(HaveLen(1))\n\t\t\t\t\t\tExpect(matches).To(ConsistOf(\n\t\t\t\t\t\t\tflags.Completion{Item: \"$ENVIRONMENTVARIABLE_TEST_abc\"},\n\t\t\t\t\t\t))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when there are no matching environment variables\", func() {\n\t\t\t\t\tIt(\"returns no matches\", func() {\n\t\t\t\t\t\tExpect(envVar.Complete(\"$Z\")).To(BeEmpty())\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"github.com\/disintegration\/imaging\"\n\t\"github.com\/minotar\/minecraft\"\n\t\"image\"\n\t\"image\/draw\"\n\t\"image\/png\"\n\t\"io\"\n)\n\nconst (\n\tHeadX = 8\n\tHeadY = 8\n\tHeadWidth = 8\n\tHeadHeight = 8\n\n\tHelmX = 40\n\tHelmY = 8\n\tHelmWidth = 8\n\tHelmHeight = 8\n\n\tTorsoX = 20\n\tTorsoY = 20\n\tTorsoWidth = 8\n\tTorsoHeight = 12\n\n\tRaX = 44\n\tRaY = 20\n\tRaWidth = 4\n\tRaHeight = 12\n\n\tRlX = 4\n\tRlY = 20\n\tRlWidth = 4\n\tRlHeight = 12\n\n\tLaX = 36\n\tLaY = 52\n\tLaWidth = 4\n\tLaHeight = 12\n\n\tLlX = 20\n\tLlY = 52\n\tLlWidth = 4\n\tLlHeight = 12\n)\n\ntype mcSkin struct {\n\tProcessed image.Image\n\tminecraft.Skin\n}\n\nfunc (skin *mcSkin) GetHead() error {\n\timg, err := cropHead(skin.Image)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tskin.Processed = img\n\treturn nil\n}\n\nfunc (skin *mcSkin) GetHelm() error {\n\thelm, err := cropHelm(skin.Image)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tskin.Processed = helm\n\treturn nil\n}\n\nfunc (skin *mcSkin) GetBody() error {\n\t\/\/ Check if 1.8 skin (the max Y bound should be 64)\n\trender18Skin := true\n\tbounds := skin.Image.Bounds()\n\tif bounds.Max.Y != 64 {\n\t\trender18Skin = false\n\t}\n\n\thelmImg, err := cropHelm(skin.Image)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttorsoImg, err := cropImage(skin.Image, image.Rect(TorsoX, TorsoY, TorsoX+TorsoWidth, TorsoY+TorsoHeight))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\traImg, err := cropImage(skin.Image, image.Rect(RaX, RaY, RaX+RaWidth, RaY+RaHeight))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trlImg, err := cropImage(skin.Image, image.Rect(RlX, RlY, RlX+RlWidth, RlY+RlHeight))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar laImg, llImg image.Image\n\n\t\/\/ If the skin is 1.8 then we will use the left arms and legs, otherwise flip the right ones and use them.\n\tif render18Skin {\n\t\tlaImg, err = cropImage(skin.Image, image.Rect(LaX, LaY, LaX+LaWidth, LaY+LaHeight))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tllImg, err = cropImage(skin.Image, image.Rect(LlX, LlY, LlX+LlWidth, LlY+LlHeight))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tlaImg = imaging.FlipH(raImg)\n\n\t\tllImg = imaging.FlipH(rlImg)\n\t}\n\n\t\/\/ Create a blank canvas for us to draw our body on\n\tbodyImg := image.NewRGBA(image.Rect(0, 0, LaWidth+TorsoWidth+RaWidth, HeadHeight+TorsoHeight+LlHeight))\n\t\/\/ Helm\n\tdraw.Draw(bodyImg, image.Rect(LaWidth, 0, LaWidth+HelmWidth, HelmHeight), helmImg, image.Pt(0, 0), draw.Src)\n\t\/\/ Torso\n\tdraw.Draw(bodyImg, image.Rect(LaWidth, HelmHeight, LaWidth+TorsoWidth, HelmHeight+TorsoHeight), torsoImg, image.Pt(0, 0), draw.Src)\n\t\/\/ Left Arm\n\tdraw.Draw(bodyImg, image.Rect(0, HelmHeight, LaWidth, HelmHeight+LaHeight), laImg, image.Pt(0, 0), draw.Src)\n\t\/\/ Right Arm\n\tdraw.Draw(bodyImg, image.Rect(LaWidth+TorsoWidth, HelmHeight, LaWidth+TorsoWidth+RaWidth, HelmHeight+RaHeight), raImg, image.Pt(0, 0), draw.Src)\n\t\/\/ Left Leg\n\tdraw.Draw(bodyImg, image.Rect(LaWidth, HelmHeight+TorsoHeight, LaWidth+LlWidth, HelmHeight+TorsoHeight+LlHeight), llImg, image.Pt(0, 0), draw.Src)\n\t\/\/ Right Leg\n\tdraw.Draw(bodyImg, image.Rect(LaWidth+LlWidth, HelmHeight+TorsoHeight, LaWidth+LlWidth+RlWidth, HelmHeight+TorsoHeight+RlHeight), rlImg, image.Pt(0, 0), draw.Src)\n\n\tskin.Processed = bodyImg\n\treturn nil\n}\n\nfunc (skin *mcSkin) WritePNG(w io.Writer) error {\n\treturn png.Encode(w, skin.Processed)\n}\n\nfunc (skin *mcSkin) Resize(width uint) {\n\tskin.Processed = imaging.Resize(skin.Processed, int(width), 0, imaging.NearestNeighbor)\n}\n\nfunc cropHead(img image.Image) (image.Image, error) {\n\treturn cropImage(img, image.Rect(HeadX, HeadY, HeadX+HeadWidth, HeadY+HeadHeight))\n}\n\nfunc cropHelm(img image.Image) (image.Image, error) {\n\t\/\/ check if helm is solid colour - if so, it counts as transparent\n\tisSolidColour := true\n\tbaseColour := img.At(HelmX, HelmY)\n\tfor checkX := HelmX; checkX < HelmX+HelmWidth; checkX++ {\n\t\tfor checkY := HelmY; checkY < HelmY+HelmHeight; checkY++ {\n\t\t\tcheckColour := img.At(checkX, checkY)\n\t\t\tif checkColour != baseColour {\n\t\t\t\tisSolidColour = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\theadImg, err := cropHead(img)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !isSolidColour {\n\t\theadImgRGBA := headImg.(*image.RGBA)\n\n\t\thelmImg, err := cropImage(img, image.Rect(HelmX, HelmY, HelmX+HelmWidth, HelmY+HelmHeight))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tsr := helmImg.Bounds()\n\t\tdraw.Draw(headImgRGBA, sr, helmImg, sr.Min, draw.Over)\n\t}\n\n\treturn headImg, nil\n}\n\nfunc cropImage(i image.Image, d image.Rectangle) (image.Image, error) {\n\tbounds := i.Bounds()\n\tif bounds.Min.X > d.Min.X || bounds.Min.Y > d.Min.Y || bounds.Max.X < d.Max.X || bounds.Max.Y < d.Max.Y {\n\t\treturn nil, errors.New(\"Bounds invalid for crop\")\n\t}\n\n\tdims := d.Size()\n\toutIm := image.NewRGBA(image.Rect(0, 0, dims.X, dims.Y))\n\tfor x := 0; x < dims.X; x++ {\n\t\tfor y := 0; y < dims.Y; y++ {\n\t\t\toutIm.Set(x, y, i.At(d.Min.X+x, d.Min.Y+y))\n\t\t}\n\t}\n\treturn outIm, nil\n}\n<commit_msg>Remove check for alpha - treat all images as being alpha<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"github.com\/disintegration\/imaging\"\n\t\"github.com\/minotar\/minecraft\"\n\t\"image\"\n\t\"image\/draw\"\n\t\"image\/png\"\n\t\"io\"\n)\n\nconst (\n\tHeadX = 8\n\tHeadY = 8\n\tHeadWidth = 8\n\tHeadHeight = 8\n\n\tHelmX = 40\n\tHelmY = 8\n\tHelmWidth = 8\n\tHelmHeight = 8\n\n\tTorsoX = 20\n\tTorsoY = 20\n\tTorsoWidth = 8\n\tTorsoHeight = 12\n\n\tRaX = 44\n\tRaY = 20\n\tRaWidth = 4\n\tRaHeight = 12\n\n\tRlX = 4\n\tRlY = 20\n\tRlWidth = 4\n\tRlHeight = 12\n\n\tLaX = 36\n\tLaY = 52\n\tLaWidth = 4\n\tLaHeight = 12\n\n\tLlX = 20\n\tLlY = 52\n\tLlWidth = 4\n\tLlHeight = 12\n)\n\ntype mcSkin struct {\n\tProcessed image.Image\n\tminecraft.Skin\n}\n\nfunc (skin *mcSkin) GetHead() error {\n\timg, err := cropHead(skin.Image)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tskin.Processed = img\n\treturn nil\n}\n\nfunc (skin *mcSkin) GetHelm() error {\n\thelm, err := cropHelm(skin.Image)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tskin.Processed = helm\n\treturn nil\n}\n\nfunc (skin *mcSkin) GetBody() error {\n\t\/\/ Check if 1.8 skin (the max Y bound should be 64)\n\trender18Skin := true\n\tbounds := skin.Image.Bounds()\n\tif bounds.Max.Y != 64 {\n\t\trender18Skin = false\n\t}\n\n\thelmImg, err := cropHelm(skin.Image)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttorsoImg, err := cropImage(skin.Image, image.Rect(TorsoX, TorsoY, TorsoX+TorsoWidth, TorsoY+TorsoHeight))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\traImg, err := cropImage(skin.Image, image.Rect(RaX, RaY, RaX+RaWidth, RaY+RaHeight))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trlImg, err := cropImage(skin.Image, image.Rect(RlX, RlY, RlX+RlWidth, RlY+RlHeight))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar laImg, llImg image.Image\n\n\t\/\/ If the skin is 1.8 then we will use the left arms and legs, otherwise flip the right ones and use them.\n\tif render18Skin {\n\t\tlaImg, err = cropImage(skin.Image, image.Rect(LaX, LaY, LaX+LaWidth, LaY+LaHeight))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tllImg, err = cropImage(skin.Image, image.Rect(LlX, LlY, LlX+LlWidth, LlY+LlHeight))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tlaImg = imaging.FlipH(raImg)\n\n\t\tllImg = imaging.FlipH(rlImg)\n\t}\n\n\t\/\/ Create a blank canvas for us to draw our body on\n\tbodyImg := image.NewRGBA(image.Rect(0, 0, LaWidth+TorsoWidth+RaWidth, HeadHeight+TorsoHeight+LlHeight))\n\t\/\/ Helm\n\tdraw.Draw(bodyImg, image.Rect(LaWidth, 0, LaWidth+HelmWidth, HelmHeight), helmImg, image.Pt(0, 0), draw.Src)\n\t\/\/ Torso\n\tdraw.Draw(bodyImg, image.Rect(LaWidth, HelmHeight, LaWidth+TorsoWidth, HelmHeight+TorsoHeight), torsoImg, image.Pt(0, 0), draw.Src)\n\t\/\/ Left Arm\n\tdraw.Draw(bodyImg, image.Rect(0, HelmHeight, LaWidth, HelmHeight+LaHeight), laImg, image.Pt(0, 0), draw.Src)\n\t\/\/ Right Arm\n\tdraw.Draw(bodyImg, image.Rect(LaWidth+TorsoWidth, HelmHeight, LaWidth+TorsoWidth+RaWidth, HelmHeight+RaHeight), raImg, image.Pt(0, 0), draw.Src)\n\t\/\/ Left Leg\n\tdraw.Draw(bodyImg, image.Rect(LaWidth, HelmHeight+TorsoHeight, LaWidth+LlWidth, HelmHeight+TorsoHeight+LlHeight), llImg, image.Pt(0, 0), draw.Src)\n\t\/\/ Right Leg\n\tdraw.Draw(bodyImg, image.Rect(LaWidth+LlWidth, HelmHeight+TorsoHeight, LaWidth+LlWidth+RlWidth, HelmHeight+TorsoHeight+RlHeight), rlImg, image.Pt(0, 0), draw.Src)\n\n\tskin.Processed = bodyImg\n\treturn nil\n}\n\nfunc (skin *mcSkin) WritePNG(w io.Writer) error {\n\treturn png.Encode(w, skin.Processed)\n}\n\nfunc (skin *mcSkin) Resize(width uint) {\n\tskin.Processed = imaging.Resize(skin.Processed, int(width), 0, imaging.NearestNeighbor)\n}\n\nfunc cropHead(img image.Image) (image.Image, error) {\n\treturn cropImage(img, image.Rect(HeadX, HeadY, HeadX+HeadWidth, HeadY+HeadHeight))\n}\n\nfunc cropHelm(img image.Image) (image.Image, error) {\n\theadImg, err := cropHead(img)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\theadImgRGBA := headImg.(*image.RGBA)\n\n\thelmImg, err := cropImage(img, image.Rect(HelmX, HelmY, HelmX+HelmWidth, HelmY+HelmHeight))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsr := helmImg.Bounds()\n\tdraw.Draw(headImgRGBA, sr, helmImg, sr.Min, draw.Over)\n\n\treturn headImg, nil\n}\n\nfunc cropImage(i image.Image, d image.Rectangle) (image.Image, error) {\n\tbounds := i.Bounds()\n\tif bounds.Min.X > d.Min.X || bounds.Min.Y > d.Min.Y || bounds.Max.X < d.Max.X || bounds.Max.Y < d.Max.Y {\n\t\treturn nil, errors.New(\"Bounds invalid for crop\")\n\t}\n\n\tdims := d.Size()\n\toutIm := image.NewRGBA(image.Rect(0, 0, dims.X, dims.Y))\n\tfor x := 0; x < dims.X; x++ {\n\t\tfor y := 0; y < dims.Y; y++ {\n\t\t\toutIm.Set(x, y, i.At(d.Min.X+x, d.Min.Y+y))\n\t\t}\n\t}\n\treturn outIm, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Boringstreamer looks for mp3 files and broadcasts via http (live streaming.)\n\/\/\n\/\/ $ boringstreamer\n\/\/\n\/\/ or\n\/\/\n\/\/ c:\\>boringstreamer.exe\n\/\/\n\/\/ recursively looks for .mp3 files starting from current working directory and broadcasts on port 4444 for at most 42 concurrent http clients.\n\/\/\n\/\/ See -h for details.\n\/\/\n\/\/ Browse to listen (e.g. http:\/\/localhost:4444\/)\npackage main\n\nimport (\n\t\"bytes\"\n\t\"bufio\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/tcolgate\/mp3\"\n)\n\nvar (\n\taddr = flag.String(\"addr\", \":4444\", \"listen on address (format: :port or host:port)\")\n\tmaxConnections = flag.Int(\"max\", 42, \"set maximum number of streaming connections\")\n\trecursively = flag.Bool(\"r\", true, \"recursively look for music starting from path\")\n\tverbose = flag.Bool(\"v\", false, \"display verbose messages\")\n)\n\nvar debugging bool\t\/\/ controlled by hidden command line argument -debug\n\n\/\/ like \/dev\/null\ntype nullWriter struct {\n}\n\nfunc (nw nullWriter) Write(p []byte) (n int, err error) {\n\treturn len(p), nil\n}\n\ntype streamFrame []byte\n\n\/\/ client's event\ntype broadcastResult struct {\n\tqid int\n\terr error\n}\n\n\/\/ After a start() mux broadcasts audio stream to subscribed clients (ie. to http servers).\n\/\/ Clients subscribe() and unsubscribe by writing to result chanel.\ntype mux struct {\n\tsync.Mutex\n\n\tclients map[int]chan streamFrame \/\/ set of listener clients to be notified\n\tresult chan broadcastResult \/\/ clients share broadcast success-failure here\n}\n\n\/\/ subscribe(ch) adds ch to the set of channels to be received on by the clients when a new audio frame is available.\n\/\/ Returns uniq client id (qid) for ch and a broadcast result channel for the client.\n\/\/ Returns -1, nil if too many clients are already listening.\n\/\/ clients: qid, br := m.subscribe(ch)\nfunc (m *mux) subscribe(ch chan streamFrame) (int, chan broadcastResult) {\n\tm.Lock()\n\tdefer m.Unlock()\n\t\/\/ search for available qid\n\tqid := 0\n\t_, ok := m.clients[qid]\n\tfor ; ok; _, ok = m.clients[qid] {\n\t\tif qid >= *maxConnections-1 {\n\t\t\treturn -1, nil\n\t\t}\n\t\tqid++\n\t}\n\tm.clients[qid] = ch\n\tif *verbose {\n\t\tfmt.Printf(\"New connection (qid: %v), streaming to %v connections, at %v\\n\", qid, len(m.clients), time.Now().Format(time.Stamp))\n\t}\n\n\treturn qid, m.result\n}\n\n\n\/\/ start() initializes a multiplexer for raw audio streams\n\/\/ e.g: m := new(mux).start(path)\nfunc (m *mux) start(path string) *mux {\n\tm.result = make(chan broadcastResult)\n\tm.clients = make(map[int]chan streamFrame)\n\n\t\/\/ flow structure: fs -> nextFile -> nextStream -> nextFrame -> subscribed http servers -> browsers\n\tnextFile := make(chan string)\t\t\t\/\/ next file to be broadcast\n\tnextStream := make(chan io.Reader)\t\t\/\/ next raw audio stream\n\tnextFrame := make(chan streamFrame)\t\/\/ next audio frame\n\n\t\/\/ generate randomized list of files available from path\n\trand.Seed(time.Now().Unix()) \/\/ minimal randomness\n\trescan := make(chan chan string)\n\tgo func() {\n\t\tfor {\n\t\t\tfiles := <-rescan\n\t\t\tfilepath.Walk(path, func(wpath string, info os.FileInfo, err error) error {\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tif !info.Mode().IsRegular() {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tprobably := strings.HasSuffix(strings.ToLower(info.Name()), \".mp3\") \/\/ probably mp3\n\t\t\t\tif !info.IsDir() && !probably {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tfiles <- wpath \/\/ found file\n\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tclose(files)\n\t\t\ttime.Sleep(1 * time.Second) \/\/ if no files are found, poll at least with 1Hz \n\t\t}\n\t}()\n\n\t\/\/ buffer and shuffle\n\tgo func() {\n\t\tfor {\n\t\t\tfiles := make(chan string)\n\t\t\trescan <- files\n\n\t\t\tshuffled := make([]string, 0) \/\/ randomized set of files\n\n\t\t\tfor f := range files {\n\t\t\t\tselect {\n\t\t\t\tcase <- time.After(100*time.Millisecond):\t\/\/ start playing as soon as possible, but wait at least 0.1 second for shuffling\n\t\t\t\t\tnextFile <- f\t\n\t\t\t\t\tif *verbose {\n\t\t\t\t\t\tfmt.Printf(\"Next: %v\\n\", f)\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\t\/\/ shuffle files for random playback\n\t\t\t\t\t\/\/ (random permutation)\n\t\t\t\t\tif len(shuffled) == 0 {\n\t\t\t\t\t\tshuffled = append(shuffled, f)\n\t\t\t\t\t} else {\n\t\t\t\t\t\ti := rand.Intn(len(shuffled))\n\t\t\t\t\t\tshuffled = append(shuffled, shuffled[i])\n\t\t\t\t\t\tshuffled[i] = f\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ queue shuffled files\n\t\t\tfor _, f := range shuffled {\n\t\t\t\tnextFile <- f\n\t\t\t\tif *verbose {\n\t\t\t\t\tfmt.Printf(\"Next: %v\\n\", f)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ open file\n\tgo func() {\n\t\tfor {\n\t\t\tfilename := <-nextFile\n\t\t\tf, err := os.Open(filename)\n\t\t\tif err != nil {\n\t\t\t\tif debugging {\n\t\t\t\t\tlog.Printf(\"Skipped \\\"%v\\\", err=%v\", filename, err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnextStream <- bufio.NewReaderSize(f, 1024*1024)\n\t\t\tif *verbose {\n\t\t\t\tfmt.Printf(\"Now playing: %v\\n\", filename)\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ decode stream to frames and delay for frame duration\n\tgo func() {\n\t\tnullwriter := new(nullWriter)\n\t\tvar cumwait time.Duration\n\t\tfor {\n\t\t\tstreamReader := <- nextStream\n\t\t\td := mp3.NewDecoder(streamReader)\n\t\t\tvar f mp3.Frame\n\t\t\tfor {\n\t\t\t\tt0 := time.Now()\n\t\t\t\ttmp := log.Prefix()\n\t\t\t\tif !debugging {\n\t\t\t\t\tlog.SetOutput(nullwriter) \/\/ hack to silence mp3 debug\/log output\n\t\t\t\t} else {\n\t\t\t\t\tlog.SetPrefix(\"info: mp3 decode msg: \")\n\t\t\t\t}\n\t\t\t\terr := d.Decode(&f)\n\t\t\t\tlog.SetPrefix(tmp)\n\t\t\t\tif !debugging {\n\t\t\t\t\tlog.SetOutput(os.Stderr)\n\t\t\t\t}\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tif debugging {\n\t\t\t\t\t\tlog.Printf(\"Skipping frame, d.Decode() err=%v\", err)\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tbuf, err := ioutil.ReadAll(f.Reader())\n\t\t\t\tif err != nil {\n\t\t\t\t\tif debugging {\n\t\t\t\t\t\tlog.Printf(\"Skipping frame, ioutil.ReadAll() err=%v\", err)\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tnextFrame <- buf\n\n\t\t\t\ttowait := f.Duration() - time.Now().Sub(t0)\n\t\t\t\tcumwait += towait \/\/ towait can be negative -> cumwait\n\t\t\t\tif cumwait > 1*time.Second {\n\t\t\t\t\ttime.Sleep(cumwait)\n\t\t\t\t\tcumwait = 0\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ broadcast frame to clients\n\tgo func() {\n\t\tfor {\n\t\t\tf := <-nextFrame\n\t\t\t\/\/ notify clients of new audio frame or let them quit\n\t\t\tm.Lock()\n\t\t\tfor _, ch := range m.clients {\n\t\t\t\tm.Unlock()\n\t\t\t\tch <- f\n\t\t\t\tbr := <-m.result \/\/ handle quitting clients\n\t\t\t\tif br.err != nil {\n\t\t\t\t\tm.Lock()\n\t\t\t\t\tclose(m.clients[br.qid])\n\t\t\t\t\tdelete(m.clients, br.qid)\n\t\t\t\t\tm.Unlock()\n\t\t\t\t\tif debugging {\n\t\t\t\t\t\tlog.Printf(\"Connection exited, qid: %v, error %v. Now streaming to %v connections.\", br.qid, br.err, len(m.clients))\n\t\t\t\t\t} else if *verbose {\n\t\t\t\t\t\tfmt.Printf(\"Connection exited (qid: %v), streaming to %v connections, at %v\\n\", br.qid, len(m.clients), time.Now().Format(time.Stamp))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tm.Lock()\n\t\t\t}\n\t\t\tm.Unlock()\n\t\t}\n\t}()\n\n\treturn m\n}\n\ntype streamHandler struct {\n\tstream *mux\n}\n\n\/\/ chrome and firefox play mp3 audio stream directly\nfunc (sh streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tnow := time.Now().UTC()\n\tframes := make(chan streamFrame)\n\tqid, br := sh.stream.subscribe(frames)\n\tif qid < 0 {\n\t\tlog.Printf(\"Error: new connection request denied, already serving %v connections. See -h for details.\", *maxConnections)\n\t\tw.WriteHeader(http.StatusTooManyRequests)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tw.Header().Set(\"Date\", now.Format(http.TimeFormat))\n\tw.Header().Set(\"Connection\", \"Keep-Alive\")\n\tw.Header().Set(\"Cache-Control\", \"no-cache\")\n\tw.Header().Set(\"Content-Type\", \"audio\/mpeg\")\n\tw.Header().Set(\"Server\", \"BoringStreamer\/4.0\")\n\n\t\/\/ some browsers need ID3 tag to identify first frame as audio media to be played\n\t\/\/ minimal ID3 header to designate audio stream\n\tb := []byte{0x49, 0x44, 0x33, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}\n\t_, err := io.Copy(w, bytes.NewReader(b))\n\tif err == nil {\n\t\t\/\/ broadcast mp3 stream to w\n\t\tbroadcastTimeout := 4 * time.Second \/\/ timeout for slow clients\n\t\tresult := make(chan error)\n\t\tfor {\n\t\t\tbuf := <-frames\n\t\t\t\n\t\t\tgo func(r chan error, b []byte) {\n\t\t\t\t_, err = io.Copy(w, bytes.NewReader(b))\n\t\t\t\tr <- err\n\t\t\t}(result, buf)\n\t\t\t\n\t\t\tselect {\n\t\t\tcase err = <-result:\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tbr <- broadcastResult{qid, nil} \/\/ frame streamed, no error, send ack\n\t\t\tcase <-time.After(broadcastTimeout): \/\/ it's an error if io.Copy() is not finished within broadcastTimeout, ServeHTTP should exit\n\t\t\t\terr = errors.New(fmt.Sprintf(\"timeout: %v\", broadcastTimeout))\n\t\t\t}\n\t\t\t\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tbr <- broadcastResult{qid, err} \/\/ error, send nack\n}\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s [flags] [path]\\n\", os.Args[0])\n\t\tfmt.Fprintf(os.Stderr, \"Browse to listen (e.g. http:\/\/localhost:4444\/)\\n\\nflags:\\n\")\n\t\tflag.PrintDefaults()\n\t}\n\tflag.Parse()\n\tif len(flag.Args()) > 1 && flag.Args()[1] != \"-debug\" {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tpath := \".\"\n\tswitch len(flag.Args()) {\n\tcase 0:\n\t\tif *verbose {\n\t\t\tfmt.Printf(\"Using path %#v, see -h for details.\\n\", path)\n\t\t}\n\tcase 1:\n\t\tpath = flag.Args()[0]\n\tcase 2:\n\t\tpath = flag.Args()[0]\n\t\tdebugging = true\n\t}\n\n\tif *verbose {\n\t\tfmt.Printf(\"Looking for files available from \\\"%v\\\" ...\\n\", path)\n\t}\n\n\t\/\/ check if path is available\n\tmatches, err := filepath.Glob(path)\n\tif err != nil || len(matches) != 1 {\n\t\tfmt.Fprintf(os.Stderr, \"Error: \\\"%v\\\" unavailable, nothing to play.\\n\", path)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ initialize and start mp3 streamer\n\thttp.Handle(\"\/\", streamHandler{new(mux).start(path)})\n\tif *verbose {\n\t\tfmt.Printf(\"Waiting for connections on %v\\n\", *addr)\n\t}\n\n\terr = http.ListenAndServe(*addr, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<commit_msg>modify logging for more robust operations (no io during lock\/unlock<commit_after>\/\/ Boringstreamer looks for mp3 files and broadcasts via http (live streaming.)\n\/\/\n\/\/ $ boringstreamer\n\/\/\n\/\/ or\n\/\/\n\/\/ c:\\>boringstreamer.exe\n\/\/\n\/\/ recursively looks for .mp3 files starting from current working directory and broadcasts on port 4444 for at most 42 concurrent http clients.\n\/\/\n\/\/ See -h for details.\n\/\/\n\/\/ Browse to listen (e.g. http:\/\/localhost:4444\/)\npackage main\n\nimport (\n\t\"bytes\"\n\t\"bufio\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/tcolgate\/mp3\"\n)\n\nvar (\n\taddr = flag.String(\"addr\", \":4444\", \"listen on address (format: :port or host:port)\")\n\tmaxConnections = flag.Int(\"max\", 42, \"set maximum number of streaming connections\")\n\trecursively = flag.Bool(\"r\", true, \"recursively look for music starting from path\")\n\tverbose = flag.Bool(\"v\", false, \"display verbose messages\")\n)\n\nvar debugging bool\t\/\/ controlled by hidden command line argument -debug\n\n\/\/ like \/dev\/null\ntype nullWriter struct {\n}\n\nfunc (nw nullWriter) Write(p []byte) (n int, err error) {\n\treturn len(p), nil\n}\n\ntype streamFrame []byte\n\n\/\/ client's event\ntype broadcastResult struct {\n\tqid int\n\terr error\n}\n\n\/\/ After a start() mux broadcasts audio stream to subscribed clients (ie. to http servers).\n\/\/ Clients subscribe() and unsubscribe by writing to result chanel.\ntype mux struct {\n\tsync.Mutex\n\n\tclients map[int]chan streamFrame \/\/ set of listener clients to be notified\n\tresult chan broadcastResult \/\/ clients share broadcast success-failure here\n}\n\n\/\/ subscribe(ch) adds ch to the set of channels to be received on by the clients when a new audio frame is available.\n\/\/ Returns uniq client id (qid) for ch and a broadcast result channel for the client.\n\/\/ Returns -1, nil if too many clients are already listening.\n\/\/ clients: qid, br := m.subscribe(ch)\nfunc (m *mux) subscribe(ch chan streamFrame) (int, chan broadcastResult) {\n\tm.Lock()\n\t\/\/ search for available qid\n\tqid := 0\n\t_, ok := m.clients[qid]\n\tfor ; ok; _, ok = m.clients[qid] {\n\t\tif qid >= *maxConnections-1 {\n\t\t\tm.Unlock()\n\t\t\treturn -1, nil\n\t\t}\n\t\tqid++\n\t}\n\tm.clients[qid] = ch\n\tm.Unlock()\n\tif *verbose {\n\t\tfmt.Printf(\"New connection (qid: %v), streaming to %v connections, at %v\\n\", qid, len(m.clients), time.Now().Format(time.Stamp))\n\t}\n\n\treturn qid, m.result\n}\n\n\n\/\/ start() initializes a multiplexer for raw audio streams\n\/\/ e.g: m := new(mux).start(path)\nfunc (m *mux) start(path string) *mux {\n\tm.result = make(chan broadcastResult)\n\tm.clients = make(map[int]chan streamFrame)\n\n\t\/\/ flow structure: fs -> nextFile -> nextStream -> nextFrame -> subscribed http servers -> browsers\n\tnextFile := make(chan string)\t\t\t\/\/ next file to be broadcast\n\tnextStream := make(chan io.Reader)\t\t\/\/ next raw audio stream\n\tnextFrame := make(chan streamFrame)\t\/\/ next audio frame\n\n\t\/\/ generate randomized list of files available from path\n\trand.Seed(time.Now().Unix()) \/\/ minimal randomness\n\trescan := make(chan chan string)\n\tgo func() {\n\t\tfor {\n\t\t\tfiles := <-rescan\n\t\t\tfilepath.Walk(path, func(wpath string, info os.FileInfo, err error) error {\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tif !info.Mode().IsRegular() {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tprobably := strings.HasSuffix(strings.ToLower(info.Name()), \".mp3\") \/\/ probably mp3\n\t\t\t\tif !info.IsDir() && !probably {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tfiles <- wpath \/\/ found file\n\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tclose(files)\n\t\t\ttime.Sleep(1 * time.Second) \/\/ if no files are found, poll at least with 1Hz \n\t\t}\n\t}()\n\n\t\/\/ buffer and shuffle\n\tgo func() {\n\t\tfor {\n\t\t\tfiles := make(chan string)\n\t\t\trescan <- files\n\n\t\t\tshuffled := make([]string, 0) \/\/ randomized set of files\n\n\t\t\tfor f := range files {\n\t\t\t\tselect {\n\t\t\t\tcase <- time.After(100*time.Millisecond):\t\/\/ start playing as soon as possible, but wait at least 0.1 second for shuffling\n\t\t\t\t\tnextFile <- f\t\n\t\t\t\t\tif *verbose {\n\t\t\t\t\t\tfmt.Printf(\"Next: %v\\n\", f)\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\t\/\/ shuffle files for random playback\n\t\t\t\t\t\/\/ (random permutation)\n\t\t\t\t\tif len(shuffled) == 0 {\n\t\t\t\t\t\tshuffled = append(shuffled, f)\n\t\t\t\t\t} else {\n\t\t\t\t\t\ti := rand.Intn(len(shuffled))\n\t\t\t\t\t\tshuffled = append(shuffled, shuffled[i])\n\t\t\t\t\t\tshuffled[i] = f\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ queue shuffled files\n\t\t\tfor _, f := range shuffled {\n\t\t\t\tnextFile <- f\n\t\t\t\tif *verbose {\n\t\t\t\t\tfmt.Printf(\"Next: %v\\n\", f)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ open file\n\tgo func() {\n\t\tfor {\n\t\t\tfilename := <-nextFile\n\t\t\tf, err := os.Open(filename)\n\t\t\tif err != nil {\n\t\t\t\tif debugging {\n\t\t\t\t\tlog.Printf(\"Skipped \\\"%v\\\", err=%v\", filename, err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnextStream <- bufio.NewReaderSize(f, 1024*1024)\n\t\t\tif *verbose {\n\t\t\t\tfmt.Printf(\"Now playing: %v\\n\", filename)\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ decode stream to frames and delay for frame duration\n\tgo func() {\n\t\tnullwriter := new(nullWriter)\n\t\tvar cumwait time.Duration\n\t\tfor {\n\t\t\tstreamReader := <- nextStream\n\t\t\td := mp3.NewDecoder(streamReader)\n\t\t\tvar f mp3.Frame\n\t\t\tfor {\n\t\t\t\tt0 := time.Now()\n\t\t\t\ttmp := log.Prefix()\n\t\t\t\tif !debugging {\n\t\t\t\t\tlog.SetOutput(nullwriter) \/\/ hack to silence mp3 debug\/log output\n\t\t\t\t} else {\n\t\t\t\t\tlog.SetPrefix(\"info: mp3 decode msg: \")\n\t\t\t\t}\n\t\t\t\terr := d.Decode(&f)\n\t\t\t\tlog.SetPrefix(tmp)\n\t\t\t\tif !debugging {\n\t\t\t\t\tlog.SetOutput(os.Stderr)\n\t\t\t\t}\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tif debugging {\n\t\t\t\t\t\tlog.Printf(\"Skipping frame, d.Decode() err=%v\", err)\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tbuf, err := ioutil.ReadAll(f.Reader())\n\t\t\t\tif err != nil {\n\t\t\t\t\tif debugging {\n\t\t\t\t\t\tlog.Printf(\"Skipping frame, ioutil.ReadAll() err=%v\", err)\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tnextFrame <- buf\n\n\t\t\t\ttowait := f.Duration() - time.Now().Sub(t0)\n\t\t\t\tcumwait += towait \/\/ towait can be negative -> cumwait\n\t\t\t\tif cumwait > 1*time.Second {\n\t\t\t\t\ttime.Sleep(cumwait)\n\t\t\t\t\tcumwait = 0\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ broadcast frame to clients\n\tgo func() {\n\t\tfor {\n\t\t\tf := <-nextFrame\n\t\t\t\/\/ notify clients of new audio frame or let them quit\n\t\t\tm.Lock()\n\t\t\tfor _, ch := range m.clients {\n\t\t\t\tm.Unlock()\n\t\t\t\tch <- f\n\t\t\t\tbr := <-m.result \/\/ handle quitting clients\n\t\t\t\tif br.err != nil {\n\t\t\t\t\tm.Lock()\n\t\t\t\t\tclose(m.clients[br.qid])\n\t\t\t\t\tdelete(m.clients, br.qid)\n\t\t\t\t\tnclients := len(m.clients)\n\t\t\t\t\tm.Unlock()\n\t\t\t\t\tif debugging {\n\t\t\t\t\t\tlog.Printf(\"Connection exited, qid: %v, error %v. Now streaming to %v connections.\", br.qid, br.err, nclients)\n\t\t\t\t\t} else if *verbose {\n\t\t\t\t\t\tfmt.Printf(\"Connection exited, qid: %v. Now streaming to %v connections, at %v\\n\", br.qid, nclients, time.Now().Format(time.Stamp))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tm.Lock()\n\t\t\t}\n\t\t\tm.Unlock()\n\t\t}\n\t}()\n\n\treturn m\n}\n\ntype streamHandler struct {\n\tstream *mux\n}\n\n\/\/ chrome and firefox play mp3 audio stream directly\nfunc (sh streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tnow := time.Now().UTC()\n\tframes := make(chan streamFrame)\n\tqid, br := sh.stream.subscribe(frames)\n\tif qid < 0 {\n\t\tlog.Printf(\"Error: new connection request denied, already serving %v connections. See -h for details.\", *maxConnections)\n\t\tw.WriteHeader(http.StatusTooManyRequests)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tw.Header().Set(\"Date\", now.Format(http.TimeFormat))\n\tw.Header().Set(\"Connection\", \"Keep-Alive\")\n\tw.Header().Set(\"Cache-Control\", \"no-cache\")\n\tw.Header().Set(\"Content-Type\", \"audio\/mpeg\")\n\tw.Header().Set(\"Server\", \"BoringStreamer\/4.0\")\n\n\t\/\/ some browsers need ID3 tag to identify first frame as audio media to be played\n\t\/\/ minimal ID3 header to designate audio stream\n\tb := []byte{0x49, 0x44, 0x33, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}\n\t_, err := io.Copy(w, bytes.NewReader(b))\n\tif err == nil {\n\t\t\/\/ broadcast mp3 stream to w\n\t\tbroadcastTimeout := 4 * time.Second \/\/ timeout for slow clients\n\t\tresult := make(chan error)\n\t\tfor {\n\t\t\tbuf := <-frames\n\t\t\t\n\t\t\tgo func(r chan error, b []byte) {\n\t\t\t\t_, err = io.Copy(w, bytes.NewReader(b))\n\t\t\t\tr <- err\n\t\t\t}(result, buf)\n\t\t\t\n\t\t\tselect {\n\t\t\tcase err = <-result:\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tbr <- broadcastResult{qid, nil} \/\/ frame streamed, no error, send ack\n\t\t\tcase <-time.After(broadcastTimeout): \/\/ it's an error if io.Copy() is not finished within broadcastTimeout, ServeHTTP should exit\n\t\t\t\terr = errors.New(fmt.Sprintf(\"timeout: %v\", broadcastTimeout))\n\t\t\t}\n\t\t\t\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tbr <- broadcastResult{qid, err} \/\/ error, send nack\n}\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s [flags] [path]\\n\", os.Args[0])\n\t\tfmt.Fprintf(os.Stderr, \"Browse to listen (e.g. http:\/\/localhost:4444\/)\\n\\nflags:\\n\")\n\t\tflag.PrintDefaults()\n\t}\n\tflag.Parse()\n\tif len(flag.Args()) > 1 && flag.Args()[1] != \"-debug\" {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tpath := \".\"\n\tswitch len(flag.Args()) {\n\tcase 0:\n\t\tif *verbose {\n\t\t\tfmt.Printf(\"Using path %#v, see -h for details.\\n\", path)\n\t\t}\n\tcase 1:\n\t\tpath = flag.Args()[0]\n\tcase 2:\n\t\tpath = flag.Args()[0]\n\t\tdebugging = true\n\t}\n\n\tif *verbose {\n\t\tfmt.Printf(\"Looking for files available from \\\"%v\\\" ...\\n\", path)\n\t}\n\n\t\/\/ check if path is available\n\tmatches, err := filepath.Glob(path)\n\tif err != nil || len(matches) != 1 {\n\t\tfmt.Fprintf(os.Stderr, \"Error: \\\"%v\\\" unavailable, nothing to play.\\n\", path)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ initialize and start mp3 streamer\n\thttp.Handle(\"\/\", streamHandler{new(mux).start(path)})\n\tif *verbose {\n\t\tfmt.Printf(\"Waiting for connections on %v\\n\", *addr)\n\t}\n\n\terr = http.ListenAndServe(*addr, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>add GetStrings method<commit_after><|endoftext|>"} {"text":"<commit_before>package core\n\nimport (\n\t\"context\"\n\t\"sync\"\n\n\t\"v2ray.com\/core\/common\"\n\t\"v2ray.com\/core\/common\/uuid\"\n)\n\n\/\/ Server is an instance of V2Ray. At any time, there must be at most one Server instance running.\n\/\/ Deprecated. Use Instance directly.\ntype Server interface {\n\tcommon.Runnable\n}\n\n\/\/ Feature is the interface for V2Ray features. All features must implement this interface.\n\/\/ All existing features have an implementation in app directory. These features can be replaced by third-party ones.\ntype Feature interface {\n\tcommon.Runnable\n}\n\n\/\/ Instance combines all functionalities in V2Ray.\ntype Instance struct {\n\tdnsClient syncDNSClient\n\tpolicyManager syncPolicyManager\n\tdispatcher syncDispatcher\n\trouter syncRouter\n\tihm syncInboundHandlerManager\n\tohm syncOutboundHandlerManager\n\tclock syncClock\n\tcmd syncCommander\n\n\taccess sync.Mutex\n\tfeatures []Feature\n\tid uuid.UUID\n\trunning bool\n}\n\n\/\/ New returns a new V2Ray instance based on given configuration.\n\/\/ The instance is not started at this point.\n\/\/ To make sure V2Ray instance works properly, the config must contain one Dispatcher, one InboundHandlerManager and one OutboundHandlerManager. Other features are optional.\nfunc New(config *Config) (*Instance, error) {\n\tvar server = &Instance{\n\t\tid: uuid.New(),\n\t}\n\n\tif err := config.Transport.Apply(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, appSettings := range config.App {\n\t\tsettings, err := appSettings.GetInstance()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif _, err := server.CreateObject(settings); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tfor _, inbound := range config.Inbound {\n\t\trawHandler, err := server.CreateObject(inbound)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thandler, ok := rawHandler.(InboundHandler)\n\t\tif !ok {\n\t\t\treturn nil, newError(\"not an InboundHandler\")\n\t\t}\n\t\tif err := server.InboundHandlerManager().AddHandler(context.Background(), handler); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tfor _, outbound := range config.Outbound {\n\t\trawHandler, err := server.CreateObject(outbound)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thandler, ok := rawHandler.(OutboundHandler)\n\t\tif !ok {\n\t\t\treturn nil, newError(\"not an OutboundHandler\")\n\t\t}\n\t\tif err := server.OutboundHandlerManager().AddHandler(context.Background(), handler); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn server, nil\n}\n\nfunc (s *Instance) CreateObject(config interface{}) (interface{}, error) {\n\tctx := context.WithValue(context.Background(), v2rayKey, s)\n\treturn common.CreateObject(ctx, config)\n}\n\n\/\/ ID returns an unique ID for this V2Ray instance.\nfunc (s *Instance) ID() uuid.UUID {\n\treturn s.id\n}\n\n\/\/ Close shutdown the V2Ray instance.\nfunc (s *Instance) Close() error {\n\ts.access.Lock()\n\tdefer s.access.Unlock()\n\n\ts.running = false\n\tfor _, f := range s.features {\n\t\tf.Close()\n\t}\n\n\treturn nil\n}\n\n\/\/ Start starts the V2Ray instance, including all registered features. When Start returns error, the state of the instance is unknown.\n\/\/ A V2Ray instance can be started only once. Upon closing, the instance is not guaranteed to start again.\nfunc (s *Instance) Start() error {\n\ts.access.Lock()\n\tdefer s.access.Unlock()\n\n\ts.running = true\n\tfor _, f := range s.features {\n\t\tif err := f.Start(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tnewError(\"V2Ray started\").AtWarning().WriteToLog()\n\n\treturn nil\n}\n\n\/\/ RegisterFeature registers the given feature into V2Ray.\n\/\/ If feature is one of the following types, the corressponding feature in this Instance\n\/\/ will be replaced: DNSClient, PolicyManager, Router, Dispatcher, InboundHandlerManager, OutboundHandlerManager.\nfunc (s *Instance) RegisterFeature(feature interface{}, instance Feature) error {\n\tswitch feature.(type) {\n\tcase DNSClient, *DNSClient:\n\t\ts.dnsClient.Set(instance.(DNSClient))\n\tcase PolicyManager, *PolicyManager:\n\t\ts.policyManager.Set(instance.(PolicyManager))\n\tcase Router, *Router:\n\t\ts.router.Set(instance.(Router))\n\tcase Dispatcher, *Dispatcher:\n\t\ts.dispatcher.Set(instance.(Dispatcher))\n\tcase InboundHandlerManager, *InboundHandlerManager:\n\t\ts.ihm.Set(instance.(InboundHandlerManager))\n\tcase OutboundHandlerManager, *OutboundHandlerManager:\n\t\ts.ohm.Set(instance.(OutboundHandlerManager))\n\tcase Clock, *Clock:\n\t\ts.clock.Set(instance.(Clock))\n\tcase Commander, *Commander:\n\t\ts.cmd.Set(instance.(Commander))\n\t}\n\ts.access.Lock()\n\tdefer s.access.Unlock()\n\n\ts.features = append(s.features, instance)\n\tif s.running {\n\t\treturn instance.Start()\n\t}\n\treturn nil\n}\n\n\/\/ GetFeature returns a feature that was registered in this Instance. Nil if not found.\nfunc (s *Instance) GetFeature(featureType interface{}) Feature {\n\tfor _, f := range s.features {\n\t\tif hasType, ok := f.(common.HasType); ok {\n\t\t\tif hasType.Type() == featureType {\n\t\t\t\treturn f\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ DNSClient returns the DNSClient used by this Instance. The returned DNSClient is always functional.\nfunc (s *Instance) DNSClient() DNSClient {\n\treturn &(s.dnsClient)\n}\n\n\/\/ PolicyManager returns the PolicyManager used by this Instance. The returned PolicyManager is always functional.\nfunc (s *Instance) PolicyManager() PolicyManager {\n\treturn &(s.policyManager)\n}\n\n\/\/ Router returns the Router used by this Instance. The returned Router is always functional.\nfunc (s *Instance) Router() Router {\n\treturn &(s.router)\n}\n\n\/\/ Dispatcher returns the Dispatcher used by this Instance. If Dispatcher was not registered before, the returned value doesn't work, although it is not nil.\nfunc (s *Instance) Dispatcher() Dispatcher {\n\treturn &(s.dispatcher)\n}\n\n\/\/ InboundHandlerManager returns the InboundHandlerManager used by this Instance. If InboundHandlerManager was not registered before, the returned value doesn't work.\nfunc (s *Instance) InboundHandlerManager() InboundHandlerManager {\n\treturn &(s.ihm)\n}\n\n\/\/ OutboundHandlerManager returns the OutboundHandlerManager used by this Instance. If OutboundHandlerManager was not registered before, the returned value doesn't work.\nfunc (s *Instance) OutboundHandlerManager() OutboundHandlerManager {\n\treturn &(s.ohm)\n}\n\n\/\/ Clock returns the Clock used by this Instance. The returned Clock is always functional.\nfunc (s *Instance) Clock() Clock {\n\treturn &(s.clock)\n}\n\n\/\/ Commander returns the Commander used by this Instance. The returned Commander is always functional.\nfunc (s *Instance) Commander() Commander {\n\treturn &(s.cmd)\n}\n<commit_msg>details about GetFeature<commit_after>package core\n\nimport (\n\t\"context\"\n\t\"sync\"\n\n\t\"v2ray.com\/core\/common\"\n\t\"v2ray.com\/core\/common\/uuid\"\n)\n\n\/\/ Server is an instance of V2Ray. At any time, there must be at most one Server instance running.\n\/\/ Deprecated. Use Instance directly.\ntype Server interface {\n\tcommon.Runnable\n}\n\n\/\/ Feature is the interface for V2Ray features. All features must implement this interface.\n\/\/ All existing features have an implementation in app directory. These features can be replaced by third-party ones.\ntype Feature interface {\n\tcommon.Runnable\n}\n\n\/\/ Instance combines all functionalities in V2Ray.\ntype Instance struct {\n\tdnsClient syncDNSClient\n\tpolicyManager syncPolicyManager\n\tdispatcher syncDispatcher\n\trouter syncRouter\n\tihm syncInboundHandlerManager\n\tohm syncOutboundHandlerManager\n\tclock syncClock\n\tcmd syncCommander\n\n\taccess sync.Mutex\n\tfeatures []Feature\n\tid uuid.UUID\n\trunning bool\n}\n\n\/\/ New returns a new V2Ray instance based on given configuration.\n\/\/ The instance is not started at this point.\n\/\/ To make sure V2Ray instance works properly, the config must contain one Dispatcher, one InboundHandlerManager and one OutboundHandlerManager. Other features are optional.\nfunc New(config *Config) (*Instance, error) {\n\tvar server = &Instance{\n\t\tid: uuid.New(),\n\t}\n\n\tif err := config.Transport.Apply(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, appSettings := range config.App {\n\t\tsettings, err := appSettings.GetInstance()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif _, err := server.CreateObject(settings); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tfor _, inbound := range config.Inbound {\n\t\trawHandler, err := server.CreateObject(inbound)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thandler, ok := rawHandler.(InboundHandler)\n\t\tif !ok {\n\t\t\treturn nil, newError(\"not an InboundHandler\")\n\t\t}\n\t\tif err := server.InboundHandlerManager().AddHandler(context.Background(), handler); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tfor _, outbound := range config.Outbound {\n\t\trawHandler, err := server.CreateObject(outbound)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thandler, ok := rawHandler.(OutboundHandler)\n\t\tif !ok {\n\t\t\treturn nil, newError(\"not an OutboundHandler\")\n\t\t}\n\t\tif err := server.OutboundHandlerManager().AddHandler(context.Background(), handler); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn server, nil\n}\n\nfunc (s *Instance) CreateObject(config interface{}) (interface{}, error) {\n\tctx := context.WithValue(context.Background(), v2rayKey, s)\n\treturn common.CreateObject(ctx, config)\n}\n\n\/\/ ID returns an unique ID for this V2Ray instance.\nfunc (s *Instance) ID() uuid.UUID {\n\treturn s.id\n}\n\n\/\/ Close shutdown the V2Ray instance.\nfunc (s *Instance) Close() error {\n\ts.access.Lock()\n\tdefer s.access.Unlock()\n\n\ts.running = false\n\tfor _, f := range s.features {\n\t\tf.Close()\n\t}\n\n\treturn nil\n}\n\n\/\/ Start starts the V2Ray instance, including all registered features. When Start returns error, the state of the instance is unknown.\n\/\/ A V2Ray instance can be started only once. Upon closing, the instance is not guaranteed to start again.\nfunc (s *Instance) Start() error {\n\ts.access.Lock()\n\tdefer s.access.Unlock()\n\n\ts.running = true\n\tfor _, f := range s.features {\n\t\tif err := f.Start(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tnewError(\"V2Ray started\").AtWarning().WriteToLog()\n\n\treturn nil\n}\n\n\/\/ RegisterFeature registers the given feature into V2Ray.\n\/\/ If feature is one of the following types, the corressponding feature in this Instance\n\/\/ will be replaced: DNSClient, PolicyManager, Router, Dispatcher, InboundHandlerManager, OutboundHandlerManager.\nfunc (s *Instance) RegisterFeature(feature interface{}, instance Feature) error {\n\tswitch feature.(type) {\n\tcase DNSClient, *DNSClient:\n\t\ts.dnsClient.Set(instance.(DNSClient))\n\tcase PolicyManager, *PolicyManager:\n\t\ts.policyManager.Set(instance.(PolicyManager))\n\tcase Router, *Router:\n\t\ts.router.Set(instance.(Router))\n\tcase Dispatcher, *Dispatcher:\n\t\ts.dispatcher.Set(instance.(Dispatcher))\n\tcase InboundHandlerManager, *InboundHandlerManager:\n\t\ts.ihm.Set(instance.(InboundHandlerManager))\n\tcase OutboundHandlerManager, *OutboundHandlerManager:\n\t\ts.ohm.Set(instance.(OutboundHandlerManager))\n\tcase Clock, *Clock:\n\t\ts.clock.Set(instance.(Clock))\n\tcase Commander, *Commander:\n\t\ts.cmd.Set(instance.(Commander))\n\t}\n\ts.access.Lock()\n\tdefer s.access.Unlock()\n\n\ts.features = append(s.features, instance)\n\tif s.running {\n\t\treturn instance.Start()\n\t}\n\treturn nil\n}\n\n\/\/ GetFeature returns a feature that was registered in this Instance. Nil if not found.\n\/\/ The returned Feature must implement common.HasType and whose type equals the given feature type.\nfunc (s *Instance) GetFeature(featureType interface{}) Feature {\n\tfor _, f := range s.features {\n\t\tif hasType, ok := f.(common.HasType); ok {\n\t\t\tif hasType.Type() == featureType {\n\t\t\t\treturn f\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ DNSClient returns the DNSClient used by this Instance. The returned DNSClient is always functional.\nfunc (s *Instance) DNSClient() DNSClient {\n\treturn &(s.dnsClient)\n}\n\n\/\/ PolicyManager returns the PolicyManager used by this Instance. The returned PolicyManager is always functional.\nfunc (s *Instance) PolicyManager() PolicyManager {\n\treturn &(s.policyManager)\n}\n\n\/\/ Router returns the Router used by this Instance. The returned Router is always functional.\nfunc (s *Instance) Router() Router {\n\treturn &(s.router)\n}\n\n\/\/ Dispatcher returns the Dispatcher used by this Instance. If Dispatcher was not registered before, the returned value doesn't work, although it is not nil.\nfunc (s *Instance) Dispatcher() Dispatcher {\n\treturn &(s.dispatcher)\n}\n\n\/\/ InboundHandlerManager returns the InboundHandlerManager used by this Instance. If InboundHandlerManager was not registered before, the returned value doesn't work.\nfunc (s *Instance) InboundHandlerManager() InboundHandlerManager {\n\treturn &(s.ihm)\n}\n\n\/\/ OutboundHandlerManager returns the OutboundHandlerManager used by this Instance. If OutboundHandlerManager was not registered before, the returned value doesn't work.\nfunc (s *Instance) OutboundHandlerManager() OutboundHandlerManager {\n\treturn &(s.ohm)\n}\n\n\/\/ Clock returns the Clock used by this Instance. The returned Clock is always functional.\nfunc (s *Instance) Clock() Clock {\n\treturn &(s.clock)\n}\n\n\/\/ Commander returns the Commander used by this Instance. The returned Commander is always functional.\nfunc (s *Instance) Commander() Commander {\n\treturn &(s.cmd)\n}\n<|endoftext|>"} {"text":"<commit_before>package backrest\n\n\/*\n Copyright 2018 - 2020 Crunchy Data Solutions, Inc.\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\tcrv1 \"github.com\/crunchydata\/postgres-operator\/apis\/cr\/v1\"\n\t\"github.com\/crunchydata\/postgres-operator\/config\"\n\t\"github.com\/crunchydata\/postgres-operator\/events\"\n\t\"github.com\/crunchydata\/postgres-operator\/kubeapi\"\n\t\"github.com\/crunchydata\/postgres-operator\/operator\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\tv1batch \"k8s.io\/api\/batch\/v1\"\n\tmeta_v1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\t\"k8s.io\/client-go\/rest\"\n)\n\ntype backrestJobTemplateFields struct {\n\tJobName string\n\tName string\n\tClusterName string\n\tCommand string\n\tCommandOpts string\n\tPITRTarget string\n\tPodName string\n\tPGOImagePrefix string\n\tPGOImageTag string\n\tSecurityContext string\n\tPgbackrestStanza string\n\tPgbackrestDBPath string\n\tPgbackrestRepoPath string\n\tPgbackrestRepoType string\n\tBackrestLocalAndS3Storage bool\n\tPgbackrestRestoreVolumes string\n\tPgbackrestRestoreVolumeMounts string\n}\n\nvar backrestPgHostRegex = regexp.MustCompile(\"--db-host|--pg1-host\")\nvar backrestPgPathRegex = regexp.MustCompile(\"--db-path|--pg1-path\")\n\n\/\/ Backrest ...\nfunc Backrest(namespace string, clientset *kubernetes.Clientset, task *crv1.Pgtask) {\n\n\t\/\/create the Job to run the backrest command\n\n\tcmd := task.Spec.Parameters[config.LABEL_BACKREST_COMMAND]\n\n\tjobFields := backrestJobTemplateFields{\n\t\tJobName: task.Spec.Parameters[config.LABEL_JOB_NAME],\n\t\tClusterName: task.Spec.Parameters[config.LABEL_PG_CLUSTER],\n\t\tPodName: task.Spec.Parameters[config.LABEL_POD_NAME],\n\t\tSecurityContext: \"\",\n\t\tCommand: cmd,\n\t\tCommandOpts: task.Spec.Parameters[config.LABEL_BACKREST_OPTS],\n\t\tPITRTarget: \"\",\n\t\tPGOImagePrefix: operator.Pgo.Pgo.PGOImagePrefix,\n\t\tPGOImageTag: operator.Pgo.Pgo.PGOImageTag,\n\t\tPgbackrestStanza: task.Spec.Parameters[config.LABEL_PGBACKREST_STANZA],\n\t\tPgbackrestDBPath: task.Spec.Parameters[config.LABEL_PGBACKREST_DB_PATH],\n\t\tPgbackrestRepoPath: task.Spec.Parameters[config.LABEL_PGBACKREST_REPO_PATH],\n\t\tPgbackrestRestoreVolumes: \"\",\n\t\tPgbackrestRestoreVolumeMounts: \"\",\n\t\tPgbackrestRepoType: operator.GetRepoType(task.Spec.Parameters[config.LABEL_BACKREST_STORAGE_TYPE]),\n\t\tBackrestLocalAndS3Storage: operator.IsLocalAndS3Storage(task.Spec.Parameters[config.LABEL_BACKREST_STORAGE_TYPE]),\n\t}\n\n\tpodCommandOpts, err := getCommandOptsFromPod(clientset, task, namespace)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\treturn\n\t}\n\tjobFields.CommandOpts = jobFields.CommandOpts + \" \" + podCommandOpts\n\n\tvar doc2 bytes.Buffer\n\tif err := config.BackrestjobTemplate.Execute(&doc2, jobFields); err != nil {\n\t\tlog.Error(err.Error())\n\t\treturn\n\t}\n\n\tif operator.CRUNCHY_DEBUG {\n\t\tconfig.BackrestjobTemplate.Execute(os.Stdout, jobFields)\n\t}\n\n\tnewjob := v1batch.Job{}\n\terr = json.Unmarshal(doc2.Bytes(), &newjob)\n\tif err != nil {\n\t\tlog.Error(\"error unmarshalling json into Job \" + err.Error())\n\t\treturn\n\t}\n\n\t\/\/ set the container image to an override value, if one exists\n\toperator.SetContainerImageOverride(config.CONTAINER_IMAGE_PGO_BACKREST,\n\t\t&newjob.Spec.Template.Spec.Containers[0])\n\n\tnewjob.ObjectMeta.Labels[config.LABEL_PGOUSER] = task.ObjectMeta.Labels[config.LABEL_PGOUSER]\n\tnewjob.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER] = task.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER]\n\n\tbackupType := task.Spec.Parameters[config.LABEL_PGHA_BACKUP_TYPE]\n\tif backupType != \"\" {\n\t\tnewjob.ObjectMeta.Labels[config.LABEL_PGHA_BACKUP_TYPE] = backupType\n\t}\n\tkubeapi.CreateJob(clientset, &newjob, namespace)\n\n\t\/\/publish backrest backup event\n\tif cmd == \"backup\" {\n\t\ttopics := make([]string, 1)\n\t\ttopics[0] = events.EventTopicBackup\n\n\t\tf := events.EventCreateBackupFormat{\n\t\t\tEventHeader: events.EventHeader{\n\t\t\t\tNamespace: namespace,\n\t\t\t\tUsername: task.ObjectMeta.Labels[config.LABEL_PGOUSER],\n\t\t\t\tTopic: topics,\n\t\t\t\tTimestamp: time.Now(),\n\t\t\t\tEventType: events.EventCreateBackup,\n\t\t\t},\n\t\t\tClustername: jobFields.ClusterName,\n\t\t\tBackupType: \"pgbackrest\",\n\t\t}\n\n\t\terr := events.Publish(f)\n\t\tif err != nil {\n\t\t\tlog.Error(err.Error())\n\t\t}\n\t}\n\n}\n\n\/\/ CreateInitialBackup creates a Pgtask in order to initiate the initial pgBackRest backup for a cluster\n\/\/ as needed to support replica creation\nfunc CreateInitialBackup(restclient *rest.RESTClient, namespace, clusterName, podName string) (*crv1.Pgtask, error) {\n\tvar params map[string]string\n\tparams = make(map[string]string)\n\tparams[config.LABEL_PGHA_BACKUP_TYPE] = crv1.BackupTypeBootstrap\n\treturn CreateBackup(restclient, namespace, clusterName, podName, params, \"--type=full\")\n}\n\n\/\/ CreatePostFailoverBackup creates a Pgtask in order to initiate the a pgBackRest backup following a failure\n\/\/ event to ensure proper replica creation and\/or reinitialization\nfunc CreatePostFailoverBackup(restclient *rest.RESTClient, namespace, clusterName, podName string) (*crv1.Pgtask, error) {\n\tvar params map[string]string\n\tparams = make(map[string]string)\n\tparams[config.LABEL_PGHA_BACKUP_TYPE] = crv1.BackupTypeFailover\n\treturn CreateBackup(restclient, namespace, clusterName, podName, params, \"\")\n}\n\n\/\/ CreateBackup creates a Pgtask in order to initiate a pgBackRest backup\nfunc CreateBackup(restclient *rest.RESTClient, namespace, clusterName, podName string, params map[string]string,\n\tbackupOpts string) (*crv1.Pgtask, error) {\n\n\tlog.Debug(\"pgBackRest operator CreateBackup called\")\n\n\tcluster := crv1.Pgcluster{}\n\t_, err := kubeapi.Getpgcluster(restclient, &cluster, clusterName, namespace)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn nil, err\n\t}\n\n\tvar newInstance *crv1.Pgtask\n\ttaskName := \"backrest-backup-\" + cluster.Name\n\n\tspec := crv1.PgtaskSpec{}\n\tspec.Name = taskName\n\tspec.Namespace = namespace\n\n\tspec.TaskType = crv1.PgtaskBackrest\n\tspec.Parameters = make(map[string]string)\n\tspec.Parameters[config.LABEL_JOB_NAME] = \"backrest-\" + crv1.PgtaskBackrestBackup + \"-\" + cluster.Name\n\tspec.Parameters[config.LABEL_PG_CLUSTER] = cluster.Name\n\tspec.Parameters[config.LABEL_POD_NAME] = podName\n\tspec.Parameters[config.LABEL_CONTAINER_NAME] = \"database\"\n\tspec.Parameters[config.LABEL_BACKREST_COMMAND] = crv1.PgtaskBackrestBackup\n\tspec.Parameters[config.LABEL_BACKREST_OPTS] = backupOpts\n\tspec.Parameters[config.LABEL_BACKREST_STORAGE_TYPE] = cluster.Spec.UserLabels[config.LABEL_BACKREST_STORAGE_TYPE]\n\n\tfor k, v := range params {\n\t\tspec.Parameters[k] = v\n\t}\n\n\tnewInstance = &crv1.Pgtask{\n\t\tObjectMeta: meta_v1.ObjectMeta{\n\t\t\tName: taskName,\n\t\t},\n\t\tSpec: spec,\n\t}\n\tnewInstance.ObjectMeta.Labels = make(map[string]string)\n\tnewInstance.ObjectMeta.Labels[config.LABEL_PG_CLUSTER] = cluster.Name\n\tnewInstance.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER] = cluster.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER]\n\tnewInstance.ObjectMeta.Labels[config.LABEL_PGOUSER] = cluster.ObjectMeta.Labels[config.LABEL_PGOUSER]\n\n\terr = kubeapi.Createpgtask(restclient, newInstance, cluster.Namespace)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn nil, err\n\t}\n\n\treturn newInstance, nil\n}\n\n\/\/ CleanBackupResources is responsible for cleaning up Kubernetes resources from a previous\n\/\/ pgBackRest backup. Specifically, this function deletes the pgptask and job associate with a\n\/\/ previous pgBackRest backup for the cluster.\nfunc CleanBackupResources(restclient *rest.RESTClient, clientset *kubernetes.Clientset, namespace,\n\tclusterName string) error {\n\n\ttaskName := \"backrest-backup-\" + clusterName\n\t\/\/ lookup the pgBackRest backup pgtask for the cluster to determine if it exsits\n\tfound, err := kubeapi.Getpgtask(restclient, &crv1.Pgtask{}, taskName, namespace)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\n\t\/\/ if the pgBackRest backup pgtask exits, then delete it so that a new pgBackRest backup\n\t\/\/ pgtask can be created in order to initiate a new backup\n\tif found {\n\t\tlog.Debugf(\"pgtask %s was found when cleaning backup resources prior to creating a new \"+\n\t\t\t\"backrest backup pgtask and will be deleted\", taskName)\n\t\t\/\/ delete the existing pgBackRest backup pgtask\n\t\terr = kubeapi.Deletepgtask(restclient, taskName, namespace)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tlog.Debugf(\"pgtask %s was not found when cleaning backup resources prior to creating a \"+\n\t\t\t\"new backrest backup pgtask\", taskName)\n\t}\n\n\t\/\/remove previous backup job\n\tselector := config.LABEL_BACKREST_COMMAND + \"=\" + crv1.PgtaskBackrestBackup + \",\" +\n\t\tconfig.LABEL_PG_CLUSTER + \"=\" + clusterName + \",\" + config.LABEL_BACKREST + \"=true\"\n\terr = kubeapi.DeleteJobs(clientset, selector, namespace)\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\n\ttimeout := time.After(30 * time.Second)\n\ttick := time.Tick(1 * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-timeout:\n\t\t\treturn fmt.Errorf(\"Timed out waiting for deletion of pgBackRest backup job for \"+\n\t\t\t\t\"cluster %s\", clusterName)\n\t\tcase <-tick:\n\t\t\tjobList, err := kubeapi.GetJobs(clientset, selector, namespace)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif len(jobList.Items) == 0 {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ getCommandOptsFromPod adds command line options from the primary pod to a backrest job.\n\/\/ If not already specified in the command options provided in the pgtask, add the IP of the\n\/\/ primary pod as the value for the \"--db-host\" parameter. This will ensure direct\n\/\/ communication between the repo pod and the primary via the primary's IP, instead of going\n\/\/ through the primary pod's service (which could be unreliable). also if not already specified\n\/\/ in the command options provided in the pgtask, then lookup the primary pod for the cluster\n\/\/ and add the PGDATA dir of the pod as the value for the \"--db-path\" parameter\nfunc getCommandOptsFromPod(clientset *kubernetes.Clientset, task *crv1.Pgtask,\n\tnamespace string) (commandOpts string, err error) {\n\n\t\/\/ lookup the primary pod in order to determine the IP of the primary and the PGDATA directory for\n\t\/\/ the current primaty\n\tselector := fmt.Sprintf(\"%s=%s,%s in (%s,%s)\", config.LABEL_PG_CLUSTER,\n\t\ttask.Spec.Parameters[config.LABEL_PG_CLUSTER], config.LABEL_PGHA_ROLE,\n\t\t\"promoted\", \"master\")\n\tpods, err := kubeapi.GetPods(clientset, selector, namespace)\n\tif err != nil {\n\t\treturn\n\t} else if len(pods.Items) > 1 {\n\t\terr = fmt.Errorf(\"More than one primary found when creating backrest job %s\",\n\t\t\ttask.Spec.Parameters[config.LABEL_JOB_NAME])\n\t\treturn\n\t} else if len(pods.Items) == 0 {\n\t\terr = fmt.Errorf(\"Unable to find primary when creating backrest job %s\",\n\t\t\ttask.Spec.Parameters[config.LABEL_JOB_NAME])\n\t\treturn\n\t}\n\tpod := pods.Items[0]\n\n\tvar cmdOpts []string\n\n\tif !backrestPgHostRegex.MatchString(task.Spec.Parameters[config.LABEL_BACKREST_OPTS]) {\n\t\tcmdOpts = append(cmdOpts, fmt.Sprintf(\"--db-host=%s\", pod.Status.PodIP))\n\t}\n\tif !backrestPgPathRegex.MatchString(task.Spec.Parameters[config.LABEL_BACKREST_OPTS]) {\n\t\tvar podDbPath string\n\t\tfor _, envVar := range pod.Spec.Containers[0].Env {\n\t\t\tif envVar.Name == \"PGBACKREST_DB_PATH\" {\n\t\t\t\tpodDbPath = envVar.Value\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif podDbPath != \"\" {\n\t\t\tcmdOpts = append(cmdOpts, fmt.Sprintf(\"--db-path=%s\", podDbPath))\n\t\t} else {\n\t\t\tlog.Errorf(\"Unable to find PGBACKREST_DB_PATH on primary pod %s for backrest job %s\",\n\t\t\t\tpod.Name, task.Spec.Parameters[config.LABEL_JOB_NAME])\n\t\t\treturn\n\t\t}\n\t}\n\t\/\/ join options using a space\n\tcommandOpts = strings.Join(cmdOpts, \" \")\n\treturn\n}\n<commit_msg>Remove Else that Only Logs Debug Info<commit_after>package backrest\n\n\/*\n Copyright 2018 - 2020 Crunchy Data Solutions, Inc.\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\tcrv1 \"github.com\/crunchydata\/postgres-operator\/apis\/cr\/v1\"\n\t\"github.com\/crunchydata\/postgres-operator\/config\"\n\t\"github.com\/crunchydata\/postgres-operator\/events\"\n\t\"github.com\/crunchydata\/postgres-operator\/kubeapi\"\n\t\"github.com\/crunchydata\/postgres-operator\/operator\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\tv1batch \"k8s.io\/api\/batch\/v1\"\n\tmeta_v1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\t\"k8s.io\/client-go\/rest\"\n)\n\ntype backrestJobTemplateFields struct {\n\tJobName string\n\tName string\n\tClusterName string\n\tCommand string\n\tCommandOpts string\n\tPITRTarget string\n\tPodName string\n\tPGOImagePrefix string\n\tPGOImageTag string\n\tSecurityContext string\n\tPgbackrestStanza string\n\tPgbackrestDBPath string\n\tPgbackrestRepoPath string\n\tPgbackrestRepoType string\n\tBackrestLocalAndS3Storage bool\n\tPgbackrestRestoreVolumes string\n\tPgbackrestRestoreVolumeMounts string\n}\n\nvar backrestPgHostRegex = regexp.MustCompile(\"--db-host|--pg1-host\")\nvar backrestPgPathRegex = regexp.MustCompile(\"--db-path|--pg1-path\")\n\n\/\/ Backrest ...\nfunc Backrest(namespace string, clientset *kubernetes.Clientset, task *crv1.Pgtask) {\n\n\t\/\/create the Job to run the backrest command\n\n\tcmd := task.Spec.Parameters[config.LABEL_BACKREST_COMMAND]\n\n\tjobFields := backrestJobTemplateFields{\n\t\tJobName: task.Spec.Parameters[config.LABEL_JOB_NAME],\n\t\tClusterName: task.Spec.Parameters[config.LABEL_PG_CLUSTER],\n\t\tPodName: task.Spec.Parameters[config.LABEL_POD_NAME],\n\t\tSecurityContext: \"\",\n\t\tCommand: cmd,\n\t\tCommandOpts: task.Spec.Parameters[config.LABEL_BACKREST_OPTS],\n\t\tPITRTarget: \"\",\n\t\tPGOImagePrefix: operator.Pgo.Pgo.PGOImagePrefix,\n\t\tPGOImageTag: operator.Pgo.Pgo.PGOImageTag,\n\t\tPgbackrestStanza: task.Spec.Parameters[config.LABEL_PGBACKREST_STANZA],\n\t\tPgbackrestDBPath: task.Spec.Parameters[config.LABEL_PGBACKREST_DB_PATH],\n\t\tPgbackrestRepoPath: task.Spec.Parameters[config.LABEL_PGBACKREST_REPO_PATH],\n\t\tPgbackrestRestoreVolumes: \"\",\n\t\tPgbackrestRestoreVolumeMounts: \"\",\n\t\tPgbackrestRepoType: operator.GetRepoType(task.Spec.Parameters[config.LABEL_BACKREST_STORAGE_TYPE]),\n\t\tBackrestLocalAndS3Storage: operator.IsLocalAndS3Storage(task.Spec.Parameters[config.LABEL_BACKREST_STORAGE_TYPE]),\n\t}\n\n\tpodCommandOpts, err := getCommandOptsFromPod(clientset, task, namespace)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\treturn\n\t}\n\tjobFields.CommandOpts = jobFields.CommandOpts + \" \" + podCommandOpts\n\n\tvar doc2 bytes.Buffer\n\tif err := config.BackrestjobTemplate.Execute(&doc2, jobFields); err != nil {\n\t\tlog.Error(err.Error())\n\t\treturn\n\t}\n\n\tif operator.CRUNCHY_DEBUG {\n\t\tconfig.BackrestjobTemplate.Execute(os.Stdout, jobFields)\n\t}\n\n\tnewjob := v1batch.Job{}\n\terr = json.Unmarshal(doc2.Bytes(), &newjob)\n\tif err != nil {\n\t\tlog.Error(\"error unmarshalling json into Job \" + err.Error())\n\t\treturn\n\t}\n\n\t\/\/ set the container image to an override value, if one exists\n\toperator.SetContainerImageOverride(config.CONTAINER_IMAGE_PGO_BACKREST,\n\t\t&newjob.Spec.Template.Spec.Containers[0])\n\n\tnewjob.ObjectMeta.Labels[config.LABEL_PGOUSER] = task.ObjectMeta.Labels[config.LABEL_PGOUSER]\n\tnewjob.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER] = task.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER]\n\n\tbackupType := task.Spec.Parameters[config.LABEL_PGHA_BACKUP_TYPE]\n\tif backupType != \"\" {\n\t\tnewjob.ObjectMeta.Labels[config.LABEL_PGHA_BACKUP_TYPE] = backupType\n\t}\n\tkubeapi.CreateJob(clientset, &newjob, namespace)\n\n\t\/\/publish backrest backup event\n\tif cmd == \"backup\" {\n\t\ttopics := make([]string, 1)\n\t\ttopics[0] = events.EventTopicBackup\n\n\t\tf := events.EventCreateBackupFormat{\n\t\t\tEventHeader: events.EventHeader{\n\t\t\t\tNamespace: namespace,\n\t\t\t\tUsername: task.ObjectMeta.Labels[config.LABEL_PGOUSER],\n\t\t\t\tTopic: topics,\n\t\t\t\tTimestamp: time.Now(),\n\t\t\t\tEventType: events.EventCreateBackup,\n\t\t\t},\n\t\t\tClustername: jobFields.ClusterName,\n\t\t\tBackupType: \"pgbackrest\",\n\t\t}\n\n\t\terr := events.Publish(f)\n\t\tif err != nil {\n\t\t\tlog.Error(err.Error())\n\t\t}\n\t}\n\n}\n\n\/\/ CreateInitialBackup creates a Pgtask in order to initiate the initial pgBackRest backup for a cluster\n\/\/ as needed to support replica creation\nfunc CreateInitialBackup(restclient *rest.RESTClient, namespace, clusterName, podName string) (*crv1.Pgtask, error) {\n\tvar params map[string]string\n\tparams = make(map[string]string)\n\tparams[config.LABEL_PGHA_BACKUP_TYPE] = crv1.BackupTypeBootstrap\n\treturn CreateBackup(restclient, namespace, clusterName, podName, params, \"--type=full\")\n}\n\n\/\/ CreatePostFailoverBackup creates a Pgtask in order to initiate the a pgBackRest backup following a failure\n\/\/ event to ensure proper replica creation and\/or reinitialization\nfunc CreatePostFailoverBackup(restclient *rest.RESTClient, namespace, clusterName, podName string) (*crv1.Pgtask, error) {\n\tvar params map[string]string\n\tparams = make(map[string]string)\n\tparams[config.LABEL_PGHA_BACKUP_TYPE] = crv1.BackupTypeFailover\n\treturn CreateBackup(restclient, namespace, clusterName, podName, params, \"\")\n}\n\n\/\/ CreateBackup creates a Pgtask in order to initiate a pgBackRest backup\nfunc CreateBackup(restclient *rest.RESTClient, namespace, clusterName, podName string, params map[string]string,\n\tbackupOpts string) (*crv1.Pgtask, error) {\n\n\tlog.Debug(\"pgBackRest operator CreateBackup called\")\n\n\tcluster := crv1.Pgcluster{}\n\t_, err := kubeapi.Getpgcluster(restclient, &cluster, clusterName, namespace)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn nil, err\n\t}\n\n\tvar newInstance *crv1.Pgtask\n\ttaskName := \"backrest-backup-\" + cluster.Name\n\n\tspec := crv1.PgtaskSpec{}\n\tspec.Name = taskName\n\tspec.Namespace = namespace\n\n\tspec.TaskType = crv1.PgtaskBackrest\n\tspec.Parameters = make(map[string]string)\n\tspec.Parameters[config.LABEL_JOB_NAME] = \"backrest-\" + crv1.PgtaskBackrestBackup + \"-\" + cluster.Name\n\tspec.Parameters[config.LABEL_PG_CLUSTER] = cluster.Name\n\tspec.Parameters[config.LABEL_POD_NAME] = podName\n\tspec.Parameters[config.LABEL_CONTAINER_NAME] = \"database\"\n\tspec.Parameters[config.LABEL_BACKREST_COMMAND] = crv1.PgtaskBackrestBackup\n\tspec.Parameters[config.LABEL_BACKREST_OPTS] = backupOpts\n\tspec.Parameters[config.LABEL_BACKREST_STORAGE_TYPE] = cluster.Spec.UserLabels[config.LABEL_BACKREST_STORAGE_TYPE]\n\n\tfor k, v := range params {\n\t\tspec.Parameters[k] = v\n\t}\n\n\tnewInstance = &crv1.Pgtask{\n\t\tObjectMeta: meta_v1.ObjectMeta{\n\t\t\tName: taskName,\n\t\t},\n\t\tSpec: spec,\n\t}\n\tnewInstance.ObjectMeta.Labels = make(map[string]string)\n\tnewInstance.ObjectMeta.Labels[config.LABEL_PG_CLUSTER] = cluster.Name\n\tnewInstance.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER] = cluster.ObjectMeta.Labels[config.LABEL_PG_CLUSTER_IDENTIFIER]\n\tnewInstance.ObjectMeta.Labels[config.LABEL_PGOUSER] = cluster.ObjectMeta.Labels[config.LABEL_PGOUSER]\n\n\terr = kubeapi.Createpgtask(restclient, newInstance, cluster.Namespace)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn nil, err\n\t}\n\n\treturn newInstance, nil\n}\n\n\/\/ CleanBackupResources is responsible for cleaning up Kubernetes resources from a previous\n\/\/ pgBackRest backup. Specifically, this function deletes the pgptask and job associate with a\n\/\/ previous pgBackRest backup for the cluster.\nfunc CleanBackupResources(restclient *rest.RESTClient, clientset *kubernetes.Clientset, namespace,\n\tclusterName string) error {\n\n\ttaskName := \"backrest-backup-\" + clusterName\n\t\/\/ lookup the pgBackRest backup pgtask for the cluster to determine if it exsits\n\tfound, err := kubeapi.Getpgtask(restclient, &crv1.Pgtask{}, taskName, namespace)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\n\tlog.Debugf(\"pgtask %s found was %t when cleaning backup resources prior to creating a \"+\n\t\t\"new backrest backup pgtask for cluster %s\", taskName, found, clusterName)\n\t\/\/ if the pgBackRest backup pgtask was found, then delete it so that a new pgBackRest backup\n\t\/\/ pgtask can be created in order to initiate a new backup\n\tif found {\n\t\tlog.Debugf(\"deleting pgtask %s for cluster %s\", taskName, clusterName)\n\t\t\/\/ delete the existing pgBackRest backup pgtask\n\t\tif err = kubeapi.Deletepgtask(restclient, taskName, namespace); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/remove previous backup job\n\tselector := config.LABEL_BACKREST_COMMAND + \"=\" + crv1.PgtaskBackrestBackup + \",\" +\n\t\tconfig.LABEL_PG_CLUSTER + \"=\" + clusterName + \",\" + config.LABEL_BACKREST + \"=true\"\n\terr = kubeapi.DeleteJobs(clientset, selector, namespace)\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\n\ttimeout := time.After(30 * time.Second)\n\ttick := time.Tick(1 * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-timeout:\n\t\t\treturn fmt.Errorf(\"Timed out waiting for deletion of pgBackRest backup job for \"+\n\t\t\t\t\"cluster %s\", clusterName)\n\t\tcase <-tick:\n\t\t\tjobList, err := kubeapi.GetJobs(clientset, selector, namespace)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif len(jobList.Items) == 0 {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ getCommandOptsFromPod adds command line options from the primary pod to a backrest job.\n\/\/ If not already specified in the command options provided in the pgtask, add the IP of the\n\/\/ primary pod as the value for the \"--db-host\" parameter. This will ensure direct\n\/\/ communication between the repo pod and the primary via the primary's IP, instead of going\n\/\/ through the primary pod's service (which could be unreliable). also if not already specified\n\/\/ in the command options provided in the pgtask, then lookup the primary pod for the cluster\n\/\/ and add the PGDATA dir of the pod as the value for the \"--db-path\" parameter\nfunc getCommandOptsFromPod(clientset *kubernetes.Clientset, task *crv1.Pgtask,\n\tnamespace string) (commandOpts string, err error) {\n\n\t\/\/ lookup the primary pod in order to determine the IP of the primary and the PGDATA directory for\n\t\/\/ the current primaty\n\tselector := fmt.Sprintf(\"%s=%s,%s in (%s,%s)\", config.LABEL_PG_CLUSTER,\n\t\ttask.Spec.Parameters[config.LABEL_PG_CLUSTER], config.LABEL_PGHA_ROLE,\n\t\t\"promoted\", \"master\")\n\tpods, err := kubeapi.GetPods(clientset, selector, namespace)\n\tif err != nil {\n\t\treturn\n\t} else if len(pods.Items) > 1 {\n\t\terr = fmt.Errorf(\"More than one primary found when creating backrest job %s\",\n\t\t\ttask.Spec.Parameters[config.LABEL_JOB_NAME])\n\t\treturn\n\t} else if len(pods.Items) == 0 {\n\t\terr = fmt.Errorf(\"Unable to find primary when creating backrest job %s\",\n\t\t\ttask.Spec.Parameters[config.LABEL_JOB_NAME])\n\t\treturn\n\t}\n\tpod := pods.Items[0]\n\n\tvar cmdOpts []string\n\n\tif !backrestPgHostRegex.MatchString(task.Spec.Parameters[config.LABEL_BACKREST_OPTS]) {\n\t\tcmdOpts = append(cmdOpts, fmt.Sprintf(\"--db-host=%s\", pod.Status.PodIP))\n\t}\n\tif !backrestPgPathRegex.MatchString(task.Spec.Parameters[config.LABEL_BACKREST_OPTS]) {\n\t\tvar podDbPath string\n\t\tfor _, envVar := range pod.Spec.Containers[0].Env {\n\t\t\tif envVar.Name == \"PGBACKREST_DB_PATH\" {\n\t\t\t\tpodDbPath = envVar.Value\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif podDbPath != \"\" {\n\t\t\tcmdOpts = append(cmdOpts, fmt.Sprintf(\"--db-path=%s\", podDbPath))\n\t\t} else {\n\t\t\tlog.Errorf(\"Unable to find PGBACKREST_DB_PATH on primary pod %s for backrest job %s\",\n\t\t\t\tpod.Name, task.Spec.Parameters[config.LABEL_JOB_NAME])\n\t\t\treturn\n\t\t}\n\t}\n\t\/\/ join options using a space\n\tcommandOpts = strings.Join(cmdOpts, \" \")\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package libclc\n\nimport (\n \"text\/template\"\n \"bufio\"\n)\n\ntype File struct {\n Path string\n Content string\n Owner string\n Permissions string\n}\n\ntype CloudConfig struct {\n DiscoveryUrl string\n Files []*File\n}\n\nfunc WriteCloudConfig(config *CloudConfig, t *template.Template, path string) (error) {\n return WriteTemplate(\"cc\", config, t, path)\n}\n\nfunc BufferCloudConfig(config *CloudConfig, t *template.Template, w *bufio.Writer) (error) {\n return BufferTemplate(\"cc\", config, t, w)\n}\n<commit_msg>Add host path to file model.<commit_after>package libclc\n\nimport (\n \"text\/template\"\n \"bufio\"\n)\n\ntype File struct {\n HostPath string\n Path string\n Content string\n Owner string\n Permissions string\n}\n\ntype CloudConfig struct {\n DiscoveryUrl string\n Files []*File\n}\n\nfunc WriteCloudConfig(config *CloudConfig, t *template.Template, path string) (error) {\n return WriteTemplate(\"cc\", config, t, path)\n}\n\nfunc BufferCloudConfig(config *CloudConfig, t *template.Template, w *bufio.Writer) (error) {\n return BufferTemplate(\"cc\", config, t, w)\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Fix an error missing to get ResponseWriter.<commit_after><|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/sha1\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/cloudflare\/unsee\/config\"\n\t\"github.com\/cloudflare\/unsee\/models\"\n\t\"github.com\/cloudflare\/unsee\/store\"\n\t\"io\"\n\t\"net\/http\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/gin-gonic\/gin\"\n)\n\nfunc boolInSlice(boolArray []bool, value bool) bool {\n\tfor _, s := range boolArray {\n\t\tif s == value {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc noCache(c *gin.Context) {\n\tc.Header(\"Cache-Control\", \"no-cache, no-store, must-revalidate\")\n}\n\n\/\/ index view, html\nfunc index(c *gin.Context) {\n\tstart := time.Now()\n\n\tcssFiles := readAssets(\"css\")\n\tjsFiles := readAssets(\"js\")\n\tnoCache(c)\n\n\tq, qPresent := c.GetQuery(\"q\")\n\tdefaultUsed := true\n\tif qPresent {\n\t\tdefaultUsed = false\n\t}\n\n\tc.HTML(http.StatusOK, \"templates\/index.html\", gin.H{\n\t\t\"Version\": version,\n\t\t\"SentryDSN\": config.Config.SentryPublicDSN,\n\t\t\"CSSFiles\": cssFiles,\n\t\t\"JSFiles\": jsFiles,\n\t\t\"NowQ\": start.Unix(),\n\t\t\"Config\": config.Config,\n\t\t\"QFilter\": q,\n\t\t\"DefaultUsed\": defaultUsed,\n\t\t\"StaticColorLabels\": strings.Join(config.Config.ColorLabelsStatic, \" \"),\n\t})\n\n\tlog.Infof(\"[%s] %s %s took %s\", c.ClientIP(), c.Request.Method, c.Request.RequestURI, time.Since(start))\n}\n\n\/\/ Help view, html\nfunc help(c *gin.Context) {\n\tstart := time.Now()\n\tcssFiles := readAssets(\"css\")\n\tnoCache(c)\n\tc.HTML(http.StatusOK, \"templates\/help.html\", gin.H{\n\t\t\"CSSFiles\": cssFiles,\n\t\t\"SentryDSN\": config.Config.SentryPublicDSN,\n\t})\n\tlog.Infof(\"[%s] <%d> %s %s took %s\", c.ClientIP(), http.StatusOK, c.Request.Method, c.Request.RequestURI, time.Since(start))\n}\n\nfunc logAlertsView(c *gin.Context, cacheStatus string, duration time.Duration) {\n\tlog.Infof(\"[%s %s] <%d> %s %s took %s\", c.ClientIP(), cacheStatus, http.StatusOK, c.Request.Method, c.Request.RequestURI, duration)\n}\n\n\/\/ alerts endpoint, json, JS will query this via AJAX call\nfunc alerts(c *gin.Context) {\n\tnoCache(c)\n\tstart := time.Now()\n\tts, _ := start.UTC().MarshalText()\n\n\t\/\/ intialize response object, set fields that don't require any locking\n\tresp := models.UnseeAlertsResponse{}\n\tresp.Status = \"success\"\n\tresp.Timestamp = string(ts)\n\tresp.Version = version\n\n\t\/\/ update error field, needs a lock\n\terrorLock.RLock()\n\tresp.Error = string(alertManagerError)\n\terrorLock.RUnlock()\n\n\tif resp.Error != \"\" {\n\t\tapiCache.Flush()\n\t}\n\n\t\/\/ use full URI (including query args) as cache key\n\tcacheKey := c.Request.RequestURI\n\n\tdata, found := apiCache.Get(cacheKey)\n\tif found {\n\t\tc.Data(http.StatusOK, gin.MIMEJSON, data.([]byte))\n\t\tlogAlertsView(c, \"HIT\", time.Since(start))\n\t\treturn\n\t}\n\n\t\/\/ get filters\n\tapiFilters := []models.UnseeFilter{}\n\tmatchFilters, validFilters := getFiltersFromQuery(c.Query(\"q\"))\n\n\t\/\/ set pointers for data store objects, need a lock until end of view is reached\n\talerts := []models.UnseeAlertGroup{}\n\tsilences := map[string]models.UnseeSilence{}\n\tcolors := models.UnseeColorMap{}\n\tcounters := models.UnseeCountMap{}\n\tstore.StoreLock.RLock()\n\n\tvar matches int\n\tfor _, ag := range store.AlertStore.Store {\n\t\tagCopy := models.UnseeAlertGroup{\n\t\t\tID: ag.ID,\n\t\t\tLabels: ag.Labels,\n\t\t\tAlerts: []models.UnseeAlert{},\n\t\t}\n\t\th := sha1.New()\n\n\t\tfor _, alert := range ag.Alerts {\n\t\t\tresults := []bool{}\n\t\t\tif validFilters {\n\t\t\t\tfor _, filter := range matchFilters {\n\t\t\t\t\tif filter.GetIsValid() {\n\t\t\t\t\t\tmatch := filter.Match(&alert, matches)\n\t\t\t\t\t\tresults = append(results, match)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !validFilters || (boolInSlice(results, true) && !boolInSlice(results, false)) {\n\t\t\t\tmatches++\n\t\t\t\tagCopy.Alerts = append(agCopy.Alerts, alert)\n\t\t\t\taj, err := json.Marshal(alert)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(err.Error())\n\t\t\t\t\tpanic(err.Error())\n\t\t\t\t}\n\t\t\t\tio.WriteString(h, string(aj))\n\n\t\t\t\tif alert.Silenced > 0 {\n\t\t\t\t\tif silence, found := store.SilenceStore.Store[strconv.Itoa(alert.Silenced)]; found {\n\t\t\t\t\t\tsilences[strconv.Itoa(alert.Silenced)] = silence\n\t\t\t\t\t}\n\t\t\t\t\tagCopy.SilencedCount++\n\t\t\t\t\tcountLabel(counters, \"@silenced\", \"true\")\n\t\t\t\t} else {\n\t\t\t\t\tagCopy.UnsilencedCount++\n\t\t\t\t\tcountLabel(counters, \"@silenced\", \"false\")\n\t\t\t\t}\n\n\t\t\t\tfor key, value := range alert.Labels {\n\t\t\t\t\tif keyMap, foundKey := store.ColorStore.Store[key]; foundKey {\n\t\t\t\t\t\tif color, foundColor := keyMap[value]; foundColor {\n\t\t\t\t\t\t\tif _, found := colors[key]; !found {\n\t\t\t\t\t\t\t\tcolors[key] = map[string]models.UnseeLabelColor{}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcolors[key][value] = color\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tcountLabel(counters, key, value)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(agCopy.Alerts) > 0 {\n\t\t\tagCopy.Hash = fmt.Sprintf(\"%x\", h.Sum(nil))\n\t\t\talerts = append(alerts, agCopy)\n\t\t}\n\n\t}\n\n\tresp.AlertGroups = alerts\n\tresp.Silences = silences\n\tresp.Colors = colors\n\tresp.Counters = counters\n\n\tfor _, filter := range matchFilters {\n\t\taf := models.UnseeFilter{\n\t\t\tText: filter.GetRawText(),\n\t\t\tHits: filter.GetHits(),\n\t\t\tIsValid: filter.GetIsValid(),\n\t\t}\n\t\tapiFilters = append(apiFilters, af)\n\t}\n\tresp.Filters = apiFilters\n\n\tdata, err := json.Marshal(resp)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\tpanic(err)\n\t}\n\tapiCache.Set(cacheKey, data, -1)\n\tstore.StoreLock.RUnlock()\n\n\tc.Data(http.StatusOK, gin.MIMEJSON, data.([]byte))\n\tlogAlertsView(c, \"MIS\", time.Since(start))\n}\n\n\/\/ autocomplete endpoint, json, used for filter autocomplete hints\nfunc autocomplete(c *gin.Context) {\n\tnoCache(c)\n\tstart := time.Now()\n\n\tcacheKey := c.Request.RequestURI\n\tif cacheKey == \"\" {\n\t\t\/\/ FIXME c.Request.RequestURI is empty when running tests for some reason\n\t\t\/\/ needs checking, below acts as a workaround\n\t\tcacheKey = c.Request.URL.RawQuery\n\t}\n\n\tdata, found := apiCache.Get(cacheKey)\n\tif found {\n\t\tc.Data(http.StatusOK, gin.MIMEJSON, data.([]byte))\n\t\tlogAlertsView(c, \"HIT\", time.Since(start))\n\t\treturn\n\t}\n\n\tterm, found := c.GetQuery(\"term\")\n\tif !found || term == \"\" {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": \"missing term=<token> parameter\"})\n\t\tlog.Infof(\"[%s] <%d> %s %s took %s\", c.ClientIP(), http.StatusBadRequest, c.Request.Method, c.Request.RequestURI, time.Since(start))\n\t\treturn\n\t}\n\n\tacData := []string{}\n\n\tstore.StoreLock.RLock()\n\tfor _, hint := range store.AutocompleteStore.Store {\n\t\tif strings.HasPrefix(strings.ToLower(hint.Value), strings.ToLower(term)) {\n\t\t\tacData = append(acData, hint.Value)\n\t\t} else {\n\t\t\tfor _, token := range hint.Tokens {\n\t\t\t\tif strings.HasPrefix(strings.ToLower(token), strings.ToLower(term)) {\n\t\t\t\t\tacData = append(acData, hint.Value)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tstore.StoreLock.RUnlock()\n\n\tsort.Strings(acData)\n\tdata, err := json.Marshal(acData)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\tpanic(err)\n\t}\n\n\tapiCache.Set(cacheKey, data, time.Second*15)\n\n\tc.Data(http.StatusOK, gin.MIMEJSON, data.([]byte))\n\tlogAlertsView(c, \"MIS\", time.Since(start))\n}\n\nfunc favicon(c *gin.Context) {\n\tfs := newBinaryFileSystem(\"static\")\n\tfileserver := http.FileServer(fs)\n\tfileserver.ServeHTTP(c.Writer, c.Request)\n}\n<commit_msg>Don't create http.FileServer on every favicon request<commit_after>package main\n\nimport (\n\t\"crypto\/sha1\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/cloudflare\/unsee\/config\"\n\t\"github.com\/cloudflare\/unsee\/models\"\n\t\"github.com\/cloudflare\/unsee\/store\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/gin-gonic\/gin\"\n)\n\nvar (\n\t\/\/ needed for serving favicon from binary assets\n\tfaviconFileServer = http.FileServer(newBinaryFileSystem(\"static\"))\n)\n\nfunc boolInSlice(boolArray []bool, value bool) bool {\n\tfor _, s := range boolArray {\n\t\tif s == value {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc noCache(c *gin.Context) {\n\tc.Header(\"Cache-Control\", \"no-cache, no-store, must-revalidate\")\n}\n\n\/\/ index view, html\nfunc index(c *gin.Context) {\n\tstart := time.Now()\n\n\tcssFiles := readAssets(\"css\")\n\tjsFiles := readAssets(\"js\")\n\tnoCache(c)\n\n\tq, qPresent := c.GetQuery(\"q\")\n\tdefaultUsed := true\n\tif qPresent {\n\t\tdefaultUsed = false\n\t}\n\n\tc.HTML(http.StatusOK, \"templates\/index.html\", gin.H{\n\t\t\"Version\": version,\n\t\t\"SentryDSN\": config.Config.SentryPublicDSN,\n\t\t\"CSSFiles\": cssFiles,\n\t\t\"JSFiles\": jsFiles,\n\t\t\"NowQ\": start.Unix(),\n\t\t\"Config\": config.Config,\n\t\t\"QFilter\": q,\n\t\t\"DefaultUsed\": defaultUsed,\n\t\t\"StaticColorLabels\": strings.Join(config.Config.ColorLabelsStatic, \" \"),\n\t})\n\n\tlog.Infof(\"[%s] %s %s took %s\", c.ClientIP(), c.Request.Method, c.Request.RequestURI, time.Since(start))\n}\n\n\/\/ Help view, html\nfunc help(c *gin.Context) {\n\tstart := time.Now()\n\tcssFiles := readAssets(\"css\")\n\tnoCache(c)\n\tc.HTML(http.StatusOK, \"templates\/help.html\", gin.H{\n\t\t\"CSSFiles\": cssFiles,\n\t\t\"SentryDSN\": config.Config.SentryPublicDSN,\n\t})\n\tlog.Infof(\"[%s] <%d> %s %s took %s\", c.ClientIP(), http.StatusOK, c.Request.Method, c.Request.RequestURI, time.Since(start))\n}\n\nfunc logAlertsView(c *gin.Context, cacheStatus string, duration time.Duration) {\n\tlog.Infof(\"[%s %s] <%d> %s %s took %s\", c.ClientIP(), cacheStatus, http.StatusOK, c.Request.Method, c.Request.RequestURI, duration)\n}\n\n\/\/ alerts endpoint, json, JS will query this via AJAX call\nfunc alerts(c *gin.Context) {\n\tnoCache(c)\n\tstart := time.Now()\n\tts, _ := start.UTC().MarshalText()\n\n\t\/\/ intialize response object, set fields that don't require any locking\n\tresp := models.UnseeAlertsResponse{}\n\tresp.Status = \"success\"\n\tresp.Timestamp = string(ts)\n\tresp.Version = version\n\n\t\/\/ update error field, needs a lock\n\terrorLock.RLock()\n\tresp.Error = string(alertManagerError)\n\terrorLock.RUnlock()\n\n\tif resp.Error != \"\" {\n\t\tapiCache.Flush()\n\t}\n\n\t\/\/ use full URI (including query args) as cache key\n\tcacheKey := c.Request.RequestURI\n\n\tdata, found := apiCache.Get(cacheKey)\n\tif found {\n\t\tc.Data(http.StatusOK, gin.MIMEJSON, data.([]byte))\n\t\tlogAlertsView(c, \"HIT\", time.Since(start))\n\t\treturn\n\t}\n\n\t\/\/ get filters\n\tapiFilters := []models.UnseeFilter{}\n\tmatchFilters, validFilters := getFiltersFromQuery(c.Query(\"q\"))\n\n\t\/\/ set pointers for data store objects, need a lock until end of view is reached\n\talerts := []models.UnseeAlertGroup{}\n\tsilences := map[string]models.UnseeSilence{}\n\tcolors := models.UnseeColorMap{}\n\tcounters := models.UnseeCountMap{}\n\tstore.StoreLock.RLock()\n\n\tvar matches int\n\tfor _, ag := range store.AlertStore.Store {\n\t\tagCopy := models.UnseeAlertGroup{\n\t\t\tID: ag.ID,\n\t\t\tLabels: ag.Labels,\n\t\t\tAlerts: []models.UnseeAlert{},\n\t\t}\n\t\th := sha1.New()\n\n\t\tfor _, alert := range ag.Alerts {\n\t\t\tresults := []bool{}\n\t\t\tif validFilters {\n\t\t\t\tfor _, filter := range matchFilters {\n\t\t\t\t\tif filter.GetIsValid() {\n\t\t\t\t\t\tmatch := filter.Match(&alert, matches)\n\t\t\t\t\t\tresults = append(results, match)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !validFilters || (boolInSlice(results, true) && !boolInSlice(results, false)) {\n\t\t\t\tmatches++\n\t\t\t\tagCopy.Alerts = append(agCopy.Alerts, alert)\n\t\t\t\taj, err := json.Marshal(alert)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(err.Error())\n\t\t\t\t\tpanic(err.Error())\n\t\t\t\t}\n\t\t\t\tio.WriteString(h, string(aj))\n\n\t\t\t\tif alert.Silenced > 0 {\n\t\t\t\t\tif silence, found := store.SilenceStore.Store[strconv.Itoa(alert.Silenced)]; found {\n\t\t\t\t\t\tsilences[strconv.Itoa(alert.Silenced)] = silence\n\t\t\t\t\t}\n\t\t\t\t\tagCopy.SilencedCount++\n\t\t\t\t\tcountLabel(counters, \"@silenced\", \"true\")\n\t\t\t\t} else {\n\t\t\t\t\tagCopy.UnsilencedCount++\n\t\t\t\t\tcountLabel(counters, \"@silenced\", \"false\")\n\t\t\t\t}\n\n\t\t\t\tfor key, value := range alert.Labels {\n\t\t\t\t\tif keyMap, foundKey := store.ColorStore.Store[key]; foundKey {\n\t\t\t\t\t\tif color, foundColor := keyMap[value]; foundColor {\n\t\t\t\t\t\t\tif _, found := colors[key]; !found {\n\t\t\t\t\t\t\t\tcolors[key] = map[string]models.UnseeLabelColor{}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcolors[key][value] = color\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tcountLabel(counters, key, value)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(agCopy.Alerts) > 0 {\n\t\t\tagCopy.Hash = fmt.Sprintf(\"%x\", h.Sum(nil))\n\t\t\talerts = append(alerts, agCopy)\n\t\t}\n\n\t}\n\n\tresp.AlertGroups = alerts\n\tresp.Silences = silences\n\tresp.Colors = colors\n\tresp.Counters = counters\n\n\tfor _, filter := range matchFilters {\n\t\taf := models.UnseeFilter{\n\t\t\tText: filter.GetRawText(),\n\t\t\tHits: filter.GetHits(),\n\t\t\tIsValid: filter.GetIsValid(),\n\t\t}\n\t\tapiFilters = append(apiFilters, af)\n\t}\n\tresp.Filters = apiFilters\n\n\tdata, err := json.Marshal(resp)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\tpanic(err)\n\t}\n\tapiCache.Set(cacheKey, data, -1)\n\tstore.StoreLock.RUnlock()\n\n\tc.Data(http.StatusOK, gin.MIMEJSON, data.([]byte))\n\tlogAlertsView(c, \"MIS\", time.Since(start))\n}\n\n\/\/ autocomplete endpoint, json, used for filter autocomplete hints\nfunc autocomplete(c *gin.Context) {\n\tnoCache(c)\n\tstart := time.Now()\n\n\tcacheKey := c.Request.RequestURI\n\tif cacheKey == \"\" {\n\t\t\/\/ FIXME c.Request.RequestURI is empty when running tests for some reason\n\t\t\/\/ needs checking, below acts as a workaround\n\t\tcacheKey = c.Request.URL.RawQuery\n\t}\n\n\tdata, found := apiCache.Get(cacheKey)\n\tif found {\n\t\tc.Data(http.StatusOK, gin.MIMEJSON, data.([]byte))\n\t\tlogAlertsView(c, \"HIT\", time.Since(start))\n\t\treturn\n\t}\n\n\tterm, found := c.GetQuery(\"term\")\n\tif !found || term == \"\" {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"error\": \"missing term=<token> parameter\"})\n\t\tlog.Infof(\"[%s] <%d> %s %s took %s\", c.ClientIP(), http.StatusBadRequest, c.Request.Method, c.Request.RequestURI, time.Since(start))\n\t\treturn\n\t}\n\n\tacData := []string{}\n\n\tstore.StoreLock.RLock()\n\tfor _, hint := range store.AutocompleteStore.Store {\n\t\tif strings.HasPrefix(strings.ToLower(hint.Value), strings.ToLower(term)) {\n\t\t\tacData = append(acData, hint.Value)\n\t\t} else {\n\t\t\tfor _, token := range hint.Tokens {\n\t\t\t\tif strings.HasPrefix(strings.ToLower(token), strings.ToLower(term)) {\n\t\t\t\t\tacData = append(acData, hint.Value)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tstore.StoreLock.RUnlock()\n\n\tsort.Strings(acData)\n\tdata, err := json.Marshal(acData)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\tpanic(err)\n\t}\n\n\tapiCache.Set(cacheKey, data, time.Second*15)\n\n\tc.Data(http.StatusOK, gin.MIMEJSON, data.([]byte))\n\tlogAlertsView(c, \"MIS\", time.Since(start))\n}\n\nfunc favicon(c *gin.Context) {\n\tfaviconFileServer.ServeHTTP(c.Writer, c.Request)\n}\n<|endoftext|>"} {"text":"<commit_before>package mysqlclient\n\nimport (\n\t\"strings\"\n)\n\ntype DB struct {\n\tconns chan Conn\n\tusername string\n\tpassword string\n\tprotocol string\n\taddress string\n\tdatabase string\n}\n\nfunc NewDB(dataSource string, pool int) *DB {\n\tusr, pass, proto, addr, dbname := parseDataSource(dataSource)\n\tconns := make(chan Conn, pool)\n\treturn &DB{\n\t\tconns: conns,\n\t\tusername: usr,\n\t\tpassword: pass,\n\t\tprotocol: proto,\n\t\taddress: addr,\n\t\tdatabase: dbname,\n\t}\n}\n\nfunc (db *DB) GetConn() (Conn, error) {\n\tselect {\n\tcase conn := <-db.conns:\n\t\treturn conn, nil\n\tdefault:\n\t\treturn db.dial()\n\t}\n}\n\nfunc (db *DB) PutConn(conn Conn) (err error) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\terr = conn.Close()\n\t\t\treturn\n\t\t}\n\t}()\n\n\tselect {\n\tcase db.conns <- conn:\n\tdefault:\n\t\terr = conn.Close()\n\t}\n\n\treturn\n}\n\nfunc (db *DB) Close() {\n\tclose(db.conns)\n\tfor {\n\t\tconn, more := <-db.conns\n\t\tif more {\n\t\t\tconn.Close()\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (db *DB) dial() (Conn, error) {\n\treturn NewConn(db.username, db.password, db.protocol, db.address, db.database)\n}\n\nfunc parseDataSource(dataSource string) (username, password, protocol, address, database string) {\n\tparams := strings.Split(dataSource, \"@\")\n\n\tuserData := strings.Split(params[0], \":\")\n\tserverData := strings.Split(params[1], \"\/\")\n\n\tusername = userData[0]\n\tif len(userData) > 1 {\n\t\tpassword = userData[1]\n\t}\n\n\tif len(serverData) > 1 {\n\t\tdatabase = serverData[1]\n\t}\n\n\tprotoHost := strings.Split(serverData[0], \"(\")\n\tprotocol = protoHost[0]\n\taddress = protoHost[1][:len(protoHost[1])-1]\n\n\treturn\n}\n<commit_msg>Handle closed DB in GetConn function<commit_after>package mysqlclient\n\nimport (\n\t\"errors\"\n\t\"strings\"\n)\n\nvar ErrClosedDB = errors.New(\"get connection from closed DB\")\n\ntype DB struct {\n\tconns chan Conn\n\tusername string\n\tpassword string\n\tprotocol string\n\taddress string\n\tdatabase string\n}\n\nfunc NewDB(dataSource string, pool int) *DB {\n\tusr, pass, proto, addr, dbname := parseDataSource(dataSource)\n\tconns := make(chan Conn, pool)\n\treturn &DB{\n\t\tconns: conns,\n\t\tusername: usr,\n\t\tpassword: pass,\n\t\tprotocol: proto,\n\t\taddress: addr,\n\t\tdatabase: dbname,\n\t}\n}\n\nfunc (db *DB) GetConn() (Conn, error) {\n\tselect {\n\tcase conn, more := <-db.conns:\n\t\tif !more {\n\t\t\treturn Conn{}, ErrClosedDB\n\t\t}\n\t\treturn conn, nil\n\tdefault:\n\t\treturn db.dial()\n\t}\n}\n\nfunc (db *DB) PutConn(conn Conn) (err error) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\terr = conn.Close()\n\t\t\treturn\n\t\t}\n\t}()\n\n\tselect {\n\tcase db.conns <- conn:\n\tdefault:\n\t\terr = conn.Close()\n\t}\n\n\treturn\n}\n\nfunc (db *DB) Close() {\n\tclose(db.conns)\n\tfor {\n\t\tconn, more := <-db.conns\n\t\tif more {\n\t\t\tconn.Close()\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (db *DB) dial() (Conn, error) {\n\treturn NewConn(db.username, db.password, db.protocol, db.address, db.database)\n}\n\nfunc parseDataSource(dataSource string) (username, password, protocol, address, database string) {\n\tparams := strings.Split(dataSource, \"@\")\n\n\tuserData := strings.Split(params[0], \":\")\n\tserverData := strings.Split(params[1], \"\/\")\n\n\tusername = userData[0]\n\tif len(userData) > 1 {\n\t\tpassword = userData[1]\n\t}\n\n\tif len(serverData) > 1 {\n\t\tdatabase = serverData[1]\n\t}\n\n\tprotoHost := strings.Split(serverData[0], \"(\")\n\tprotocol = protoHost[0]\n\taddress = protoHost[1][:len(protoHost[1])-1]\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/md5\"\n\t\"database\/sql\"\n\n\t\"github.com\/BurntSushi\/csql\"\n\n\t\"github.com\/BurntSushi\/goim\/imdb\"\n)\n\n\/\/ tx wraps a regular SQL transaction and provides two abstractions:\n\/\/ 1) Commit may be called on tx multiple times. Each time after the first is\n\/\/ a no-op (instead of an error).\n\/\/ 2) A tx can \"create\" provide another transaction for simultaneous use.\n\/\/ With the SQLite driver, this actually returns the same tx. But with\n\/\/ PostgreSQL, a new tx is made.\ntype tx struct {\n\tdb *imdb.DB\n\tclosed bool\n\t*sql.Tx\n}\n\n\/\/ wrapTx makes a new tx from a database connection and an existing SQL\n\/\/ transaction.\nfunc wrapTx(db *imdb.DB, trans *sql.Tx) *tx {\n\treturn &tx{db, false, trans}\n}\n\n\/\/ another produces a transaction from tx. It may or may not return the same\n\/\/ transaction depending on the driver being used.\nfunc (tx *tx) another() *tx {\n\tif tx.db.Driver == \"sqlite3\" {\n\t\treturn tx\n\t}\n\ttxx, err := tx.db.Begin()\n\tcsql.Panic(err)\n\treturn wrapTx(tx.db, txx)\n}\n\n\/\/ Commit commits the transaction to the database. It is safe to call it more\n\/\/ than once.\nfunc (tx *tx) Commit() error {\n\tif !tx.closed {\n\t\ttx.closed = true\n\t\treturn tx.Tx.Commit()\n\t}\n\treturn nil\n}\n\n\/\/ atomMap stores a mapping from md5 hashes (in binary) to atom integer ids.\ntype atomMap map[[md5.Size]byte]imdb.Atom\n\n\/\/ atomizer provides a readable\/writable abstraction for accessing and creating\n\/\/ new atom identifiers.\ntype atomizer struct {\n\tdb *imdb.DB\n\tatoms atomMap\n\tnextId imdb.Atom\n\tins *csql.Inserter\n}\n\n\/\/ newAtomizer returns an atomizer that can be used to access or create new\n\/\/ atom identifiers. Note that if tx is nil, then the atomizer returned is\n\/\/ read-only (attempting to write will cause a panic).\n\/\/\n\/\/ A read-only atomizer may be accessed from multiple goroutines\n\/\/ simultaneously, but a read\/write atomizer may NOT.\n\/\/\n\/\/ If a read\/write atomizer is created, then the caller is responsible for\n\/\/ closing the transaction (which should be done immediately after a call to\n\/\/ atomizer.Close).\n\/\/\n\/\/ Note that this function loads the entire set of atoms from the database\n\/\/ into memory, so it is costly.\nfunc newAtomizer(db *imdb.DB, tx *sql.Tx) (az *atomizer, err error) {\n\tdefer csql.Safe(&err)\n\n\taz = &atomizer{db, make(atomMap, 1000000), 0, nil}\n\tif tx != nil {\n\t\tvar err error\n\t\taz.ins, err = csql.NewInserter(\n\t\t\ttx, db.Driver, \"atom\", \"id\", \"hash\")\n\t\tcsql.Panic(err)\n\t}\n\n\trs := csql.Query(db, \"SELECT id, hash FROM atom ORDER BY id ASC\")\n\tcsql.Panic(csql.ForRow(rs, az.readRow))\n\taz.nextId++\n\treturn\n}\n\n\/\/ readRow scans a row from the atom table into an atomMap.\nfunc (az *atomizer) readRow(scanner csql.RowScanner) {\n\tvar id imdb.Atom\n\tvar rawBytes sql.RawBytes\n\tcsql.Scan(scanner, &id, &rawBytes)\n\n\tvar hash [md5.Size]byte\n\thashBytes := hash[:]\n\tcopy(hashBytes, rawBytes)\n\taz.atoms[hash] = id\n\taz.nextId = id\n}\n\n\/\/ atom returns the atom associated with the key string given, along with\n\/\/ whether it already existed or not. If it didn't exist, then a new atom is\n\/\/ created and returned (along with an error if there was a problem creating\n\/\/ the atom).\nfunc (az *atomizer) atom(key []byte) (imdb.Atom, bool, error) {\n\thash := hashKey(key)\n\tif a, ok := az.atoms[hash]; ok {\n\t\treturn a, true, nil\n\t}\n\ta, err := az.add(hash)\n\treturn a, false, err\n}\n\n\/\/ atomOnlyIfExist returns an atom id for the key string given only if that\n\/\/ key string has already been atomized. If it doesn't exist, then the zero\n\/\/ atom is returned along with false. Otherwise, the atom id is returned along\n\/\/ with true.\nfunc (az *atomizer) atomOnlyIfExist(key []byte) (imdb.Atom, bool) {\n\thash := hashKey(key)\n\ta, ok := az.atoms[hash]\n\treturn a, ok\n}\n\n\/\/ add always adds the given hash to the database with a fresh and unique\n\/\/ atom identifier.\nfunc (az *atomizer) add(hash [md5.Size]byte) (imdb.Atom, error) {\n\tif az.ins == nil {\n\t\tpanic(\"cannot add atoms when opened read-only\")\n\t}\n\ta := az.nextId\n\tif err := az.ins.Exec(a, hash[:]); err != nil {\n\t\treturn 0, err\n\t}\n\taz.atoms[hash] = a\n\taz.nextId++\n\treturn a, nil\n}\n\n\/\/ Close inserts any new atoms lingering in the buffer into the database.\n\/\/ This does NOT commit the transaction.\n\/\/ If the atomizer is read-only, this is a no-op.\nfunc (az *atomizer) Close() error {\n\tif az.ins != nil {\n\t\tins := az.ins\n\t\taz.ins = nil\n\t\treturn ins.Exec()\n\t}\n\treturn nil\n}\n\n\/\/ hashKey returns a byte array corresponding to the md5 hash of the key\n\/\/ string given.\nfunc hashKey(s []byte) [md5.Size]byte {\n\tsum := md5.Sum(bytes.TrimSpace(s))\n\treturn sum\n}\n\n\/\/ listTables itemizes the tables that are updated for each list name.\nvar listTables = map[string][]string{\n\t\"movies\": []string{\n\t\t\"atom\", \"name\", \"movie\", \"tvshow\", \"episode\"},\n\t\"actors\": []string{\"atom\", \"name\", \"actor\", \"credit\"},\n\t\"sound-mix\": []string{\"sound_mix\"},\n\t\"genres\": []string{\"genre\"},\n\t\"language\": []string{\"language\"},\n\t\"locations\": []string{\"location\"},\n\t\"trivia\": []string{\"trivia\"},\n\t\"alternate-versions\": []string{\"alternate_version\"},\n\t\"taglines\": []string{\"tagline\"},\n\t\"goofs\": []string{\"goof\"},\n\t\"literature\": []string{\"literature\"},\n\t\"running-times\": []string{\"running_time\"},\n\t\"ratings\": []string{\"rating\"},\n\t\"aka-titles\": []string{\"aka_title\"},\n\t\"movie-links\": []string{\"link\"},\n\t\"color-info\": []string{\"color_info\"},\n\t\"mpaa-ratings-reasons\": []string{\"mpaa_rating\"},\n\t\"release-dates\": []string{\"release_date\"},\n\t\"quotes\": []string{\"quote\"},\n\t\"plot\": []string{\"plot\"},\n}\n<commit_msg>better formatting<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/md5\"\n\t\"database\/sql\"\n\n\t\"github.com\/BurntSushi\/csql\"\n\n\t\"github.com\/BurntSushi\/goim\/imdb\"\n)\n\n\/\/ tx wraps a regular SQL transaction and provides two abstractions:\n\/\/ 1) Commit may be called on tx multiple times. Each time after the first is\n\/\/ a no-op (instead of an error).\n\/\/ 2) A tx can \"create\" provide another transaction for simultaneous use.\n\/\/ With the SQLite driver, this actually returns the same tx. But with\n\/\/ PostgreSQL, a new tx is made.\ntype tx struct {\n\tdb *imdb.DB\n\tclosed bool\n\t*sql.Tx\n}\n\n\/\/ wrapTx makes a new tx from a database connection and an existing SQL\n\/\/ transaction.\nfunc wrapTx(db *imdb.DB, trans *sql.Tx) *tx {\n\treturn &tx{db, false, trans}\n}\n\n\/\/ another produces a transaction from tx. It may or may not return the same\n\/\/ transaction depending on the driver being used.\nfunc (tx *tx) another() *tx {\n\tif tx.db.Driver == \"sqlite3\" {\n\t\treturn tx\n\t}\n\ttxx, err := tx.db.Begin()\n\tcsql.Panic(err)\n\treturn wrapTx(tx.db, txx)\n}\n\n\/\/ Commit commits the transaction to the database. It is safe to call it more\n\/\/ than once.\nfunc (tx *tx) Commit() error {\n\tif !tx.closed {\n\t\ttx.closed = true\n\t\treturn tx.Tx.Commit()\n\t}\n\treturn nil\n}\n\n\/\/ atomMap stores a mapping from md5 hashes (in binary) to atom integer ids.\ntype atomMap map[[md5.Size]byte]imdb.Atom\n\n\/\/ atomizer provides a readable\/writable abstraction for accessing and creating\n\/\/ new atom identifiers.\ntype atomizer struct {\n\tdb *imdb.DB\n\tatoms atomMap\n\tnextId imdb.Atom\n\tins *csql.Inserter\n}\n\n\/\/ newAtomizer returns an atomizer that can be used to access or create new\n\/\/ atom identifiers. Note that if tx is nil, then the atomizer returned is\n\/\/ read-only (attempting to write will cause a panic).\n\/\/\n\/\/ A read-only atomizer may be accessed from multiple goroutines\n\/\/ simultaneously, but a read\/write atomizer may NOT.\n\/\/\n\/\/ If a read\/write atomizer is created, then the caller is responsible for\n\/\/ closing the transaction (which should be done immediately after a call to\n\/\/ atomizer.Close).\n\/\/\n\/\/ Note that this function loads the entire set of atoms from the database\n\/\/ into memory, so it is costly.\nfunc newAtomizer(db *imdb.DB, tx *sql.Tx) (az *atomizer, err error) {\n\tdefer csql.Safe(&err)\n\n\taz = &atomizer{db, make(atomMap, 1000000), 0, nil}\n\tif tx != nil {\n\t\tvar err error\n\t\taz.ins, err = csql.NewInserter(\n\t\t\ttx, db.Driver, \"atom\", \"id\", \"hash\")\n\t\tcsql.Panic(err)\n\t}\n\n\trs := csql.Query(db, \"SELECT id, hash FROM atom ORDER BY id ASC\")\n\tcsql.Panic(csql.ForRow(rs, az.readRow))\n\taz.nextId++\n\treturn\n}\n\n\/\/ readRow scans a row from the atom table into an atomMap.\nfunc (az *atomizer) readRow(scanner csql.RowScanner) {\n\tvar id imdb.Atom\n\tvar rawBytes sql.RawBytes\n\tcsql.Scan(scanner, &id, &rawBytes)\n\n\tvar hash [md5.Size]byte\n\thashBytes := hash[:]\n\tcopy(hashBytes, rawBytes)\n\taz.atoms[hash] = id\n\taz.nextId = id\n}\n\n\/\/ atom returns the atom associated with the key string given, along with\n\/\/ whether it already existed or not. If it didn't exist, then a new atom is\n\/\/ created and returned (along with an error if there was a problem creating\n\/\/ the atom).\nfunc (az *atomizer) atom(key []byte) (imdb.Atom, bool, error) {\n\thash := hashKey(key)\n\tif a, ok := az.atoms[hash]; ok {\n\t\treturn a, true, nil\n\t}\n\ta, err := az.add(hash)\n\treturn a, false, err\n}\n\n\/\/ atomOnlyIfExist returns an atom id for the key string given only if that\n\/\/ key string has already been atomized. If it doesn't exist, then the zero\n\/\/ atom is returned along with false. Otherwise, the atom id is returned along\n\/\/ with true.\nfunc (az *atomizer) atomOnlyIfExist(key []byte) (imdb.Atom, bool) {\n\thash := hashKey(key)\n\ta, ok := az.atoms[hash]\n\treturn a, ok\n}\n\n\/\/ add always adds the given hash to the database with a fresh and unique\n\/\/ atom identifier.\nfunc (az *atomizer) add(hash [md5.Size]byte) (imdb.Atom, error) {\n\tif az.ins == nil {\n\t\tpanic(\"cannot add atoms when opened read-only\")\n\t}\n\ta := az.nextId\n\tif err := az.ins.Exec(a, hash[:]); err != nil {\n\t\treturn 0, err\n\t}\n\taz.atoms[hash] = a\n\taz.nextId++\n\treturn a, nil\n}\n\n\/\/ Close inserts any new atoms lingering in the buffer into the database.\n\/\/ This does NOT commit the transaction.\n\/\/ If the atomizer is read-only, this is a no-op.\nfunc (az *atomizer) Close() error {\n\tif az.ins != nil {\n\t\tins := az.ins\n\t\taz.ins = nil\n\t\treturn ins.Exec()\n\t}\n\treturn nil\n}\n\n\/\/ hashKey returns a byte array corresponding to the md5 hash of the key\n\/\/ string given.\nfunc hashKey(s []byte) [md5.Size]byte {\n\tsum := md5.Sum(bytes.TrimSpace(s))\n\treturn sum\n}\n\n\/\/ listTables itemizes the tables that are updated for each list name.\nvar listTables = map[string][]string{\n\t\"movies\": []string{\n\t\t\"atom\", \"name\", \"movie\", \"tvshow\", \"episode\",\n\t},\n\t\"actors\": []string{\"atom\", \"name\", \"actor\", \"credit\"},\n\t\"sound-mix\": []string{\"sound_mix\"},\n\t\"genres\": []string{\"genre\"},\n\t\"language\": []string{\"language\"},\n\t\"locations\": []string{\"location\"},\n\t\"trivia\": []string{\"trivia\"},\n\t\"alternate-versions\": []string{\"alternate_version\"},\n\t\"taglines\": []string{\"tagline\"},\n\t\"goofs\": []string{\"goof\"},\n\t\"literature\": []string{\"literature\"},\n\t\"running-times\": []string{\"running_time\"},\n\t\"ratings\": []string{\"rating\"},\n\t\"aka-titles\": []string{\"aka_title\"},\n\t\"movie-links\": []string{\"link\"},\n\t\"color-info\": []string{\"color_info\"},\n\t\"mpaa-ratings-reasons\": []string{\"mpaa_rating\"},\n\t\"release-dates\": []string{\"release_date\"},\n\t\"quotes\": []string{\"quote\"},\n\t\"plot\": []string{\"plot\"},\n}\n<|endoftext|>"} {"text":"<commit_before>package buffer\n\nimport (\n\t\"github.com\/zyedidia\/clipboard\"\n\t\"github.com\/zyedidia\/micro\/v2\/internal\/util\"\n)\n\n\/\/ InBounds returns whether the given location is a valid character position in the given buffer\nfunc InBounds(pos Loc, buf *Buffer) bool {\n\tif pos.Y < 0 || pos.Y >= len(buf.lines) || pos.X < 0 || pos.X > util.CharacterCount(buf.LineBytes(pos.Y)) {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n\/\/ The Cursor struct stores the location of the cursor in the buffer\n\/\/ as well as the selection\ntype Cursor struct {\n\tbuf *Buffer\n\tLoc\n\n\t\/\/ Last cursor x position\n\tLastVisualX int\n\n\t\/\/ The current selection as a range of character numbers (inclusive)\n\tCurSelection [2]Loc\n\t\/\/ The original selection as a range of character numbers\n\t\/\/ This is used for line and word selection where it is necessary\n\t\/\/ to know what the original selection was\n\tOrigSelection [2]Loc\n\n\t\/\/ Which cursor index is this (for multiple cursors)\n\tNum int\n}\n\nfunc NewCursor(b *Buffer, l Loc) *Cursor {\n\tc := &Cursor{\n\t\tbuf: b,\n\t\tLoc: l,\n\t}\n\tc.StoreVisualX()\n\treturn c\n}\n\nfunc (c *Cursor) SetBuf(b *Buffer) {\n\tc.buf = b\n}\n\nfunc (c *Cursor) Buf() *Buffer {\n\treturn c.buf\n}\n\n\/\/ Goto puts the cursor at the given cursor's location and gives\n\/\/ the current cursor its selection too\nfunc (c *Cursor) Goto(b Cursor) {\n\tc.X, c.Y, c.LastVisualX = b.X, b.Y, b.LastVisualX\n\tc.OrigSelection, c.CurSelection = b.OrigSelection, b.CurSelection\n}\n\n\/\/ GotoLoc puts the cursor at the given cursor's location and gives\n\/\/ the current cursor its selection too\nfunc (c *Cursor) GotoLoc(l Loc) {\n\tc.X, c.Y = l.X, l.Y\n\tc.StoreVisualX()\n}\n\n\/\/ GetVisualX returns the x value of the cursor in visual spaces\nfunc (c *Cursor) GetVisualX() int {\n\tif c.X <= 0 {\n\t\tc.X = 0\n\t\treturn 0\n\t}\n\n\tbytes := c.buf.LineBytes(c.Y)\n\ttabsize := int(c.buf.Settings[\"tabsize\"].(float64))\n\tif c.X > util.CharacterCount(bytes) {\n\t\tc.X = util.CharacterCount(bytes) - 1\n\t}\n\n\treturn util.StringWidth(bytes, c.X, tabsize)\n}\n\n\/\/ GetCharPosInLine gets the char position of a visual x y\n\/\/ coordinate (this is necessary because tabs are 1 char but\n\/\/ 4 visual spaces)\nfunc (c *Cursor) GetCharPosInLine(b []byte, visualPos int) int {\n\ttabsize := int(c.buf.Settings[\"tabsize\"].(float64))\n\treturn util.GetCharPosInLine(b, visualPos, tabsize)\n}\n\n\/\/ Start moves the cursor to the start of the line it is on\nfunc (c *Cursor) Start() {\n\tc.X = 0\n\tc.LastVisualX = c.GetVisualX()\n}\n\n\/\/ StartOfText moves the cursor to the first non-whitespace rune of\n\/\/ the line it is on\nfunc (c *Cursor) StartOfText() {\n\tc.Start()\n\tfor util.IsWhitespace(c.RuneUnder(c.X)) {\n\t\tif c.X == util.CharacterCount(c.buf.LineBytes(c.Y)) {\n\t\t\tbreak\n\t\t}\n\t\tc.Right()\n\t}\n}\n\n\/\/ IsStartOfText returns whether the cursor is at the first\n\/\/ non-whitespace rune of the line it is on\nfunc (c *Cursor) IsStartOfText() bool {\n\tx := 0\n\tfor util.IsWhitespace(c.RuneUnder(x)) {\n\t\tif x == util.CharacterCount(c.buf.LineBytes(c.Y)) {\n\t\t\tbreak\n\t\t}\n\t\tx++\n\t}\n\treturn c.X == x\n}\n\n\/\/ End moves the cursor to the end of the line it is on\nfunc (c *Cursor) End() {\n\tc.X = util.CharacterCount(c.buf.LineBytes(c.Y))\n\tc.LastVisualX = c.GetVisualX()\n}\n\n\/\/ CopySelection copies the user's selection to either \"primary\"\n\/\/ or \"clipboard\"\nfunc (c *Cursor) CopySelection(target string) {\n\tif c.HasSelection() {\n\t\tif target != \"primary\" || c.buf.Settings[\"useprimary\"].(bool) {\n\t\t\tclipboard.WriteAll(string(c.GetSelection()), target)\n\t\t}\n\t}\n}\n\n\/\/ ResetSelection resets the user's selection\nfunc (c *Cursor) ResetSelection() {\n\tc.CurSelection[0] = c.buf.Start()\n\tc.CurSelection[1] = c.buf.Start()\n}\n\n\/\/ SetSelectionStart sets the start of the selection\nfunc (c *Cursor) SetSelectionStart(pos Loc) {\n\tc.CurSelection[0] = pos\n}\n\n\/\/ SetSelectionEnd sets the end of the selection\nfunc (c *Cursor) SetSelectionEnd(pos Loc) {\n\tc.CurSelection[1] = pos\n}\n\n\/\/ HasSelection returns whether or not the user has selected anything\nfunc (c *Cursor) HasSelection() bool {\n\treturn c.CurSelection[0] != c.CurSelection[1]\n}\n\n\/\/ DeleteSelection deletes the currently selected text\nfunc (c *Cursor) DeleteSelection() {\n\tif c.CurSelection[0].GreaterThan(c.CurSelection[1]) {\n\t\tc.buf.Remove(c.CurSelection[1], c.CurSelection[0])\n\t\tc.Loc = c.CurSelection[1]\n\t} else if !c.HasSelection() {\n\t\treturn\n\t} else {\n\t\tc.buf.Remove(c.CurSelection[0], c.CurSelection[1])\n\t\tc.Loc = c.CurSelection[0]\n\t}\n}\n\n\/\/ Deselect closes the cursor's current selection\n\/\/ Start indicates whether the cursor should be placed\n\/\/ at the start or end of the selection\nfunc (c *Cursor) Deselect(start bool) {\n\tif c.HasSelection() {\n\t\tif start {\n\t\t\tc.Loc = c.CurSelection[0]\n\t\t} else {\n\t\t\tc.Loc = c.CurSelection[1].Move(-1, c.buf)\n\t\t}\n\t\tc.ResetSelection()\n\t\tc.StoreVisualX()\n\t}\n}\n\n\/\/ GetSelection returns the cursor's selection\nfunc (c *Cursor) GetSelection() []byte {\n\tif InBounds(c.CurSelection[0], c.buf) && InBounds(c.CurSelection[1], c.buf) {\n\t\tif c.CurSelection[0].GreaterThan(c.CurSelection[1]) {\n\t\t\treturn c.buf.Substr(c.CurSelection[1], c.CurSelection[0])\n\t\t}\n\t\treturn c.buf.Substr(c.CurSelection[0], c.CurSelection[1])\n\t}\n\treturn []byte{}\n}\n\n\/\/ SelectLine selects the current line\nfunc (c *Cursor) SelectLine() {\n\tc.Start()\n\tc.SetSelectionStart(c.Loc)\n\tc.End()\n\tif len(c.buf.lines)-1 > c.Y {\n\t\tc.SetSelectionEnd(c.Loc.Move(1, c.buf))\n\t} else {\n\t\tc.SetSelectionEnd(c.Loc)\n\t}\n\n\tc.OrigSelection = c.CurSelection\n}\n\n\/\/ AddLineToSelection adds the current line to the selection\nfunc (c *Cursor) AddLineToSelection() {\n\tif c.Loc.LessThan(c.OrigSelection[0]) {\n\t\tc.Start()\n\t\tc.SetSelectionStart(c.Loc)\n\t\tc.SetSelectionEnd(c.OrigSelection[1])\n\t}\n\tif c.Loc.GreaterThan(c.OrigSelection[1]) {\n\t\tc.End()\n\t\tc.SetSelectionEnd(c.Loc.Move(1, c.buf))\n\t\tc.SetSelectionStart(c.OrigSelection[0])\n\t}\n\n\tif c.Loc.LessThan(c.OrigSelection[1]) && c.Loc.GreaterThan(c.OrigSelection[0]) {\n\t\tc.CurSelection = c.OrigSelection\n\t}\n}\n\n\/\/ UpN moves the cursor up N lines (if possible)\nfunc (c *Cursor) UpN(amount int) {\n\tproposedY := c.Y - amount\n\tif proposedY < 0 {\n\t\tproposedY = 0\n\t} else if proposedY >= len(c.buf.lines) {\n\t\tproposedY = len(c.buf.lines) - 1\n\t}\n\n\tbytes := c.buf.LineBytes(proposedY)\n\tc.X = c.GetCharPosInLine(bytes, c.LastVisualX)\n\n\tif c.X > util.CharacterCount(bytes) || (amount < 0 && proposedY == c.Y) {\n\t\tc.X = util.CharacterCount(bytes)\n\t}\n\n\tc.Y = proposedY\n}\n\n\/\/ DownN moves the cursor down N lines (if possible)\nfunc (c *Cursor) DownN(amount int) {\n\tc.UpN(-amount)\n}\n\n\/\/ Up moves the cursor up one line (if possible)\nfunc (c *Cursor) Up() {\n\tc.UpN(1)\n}\n\n\/\/ Down moves the cursor down one line (if possible)\nfunc (c *Cursor) Down() {\n\tc.DownN(1)\n}\n\n\/\/ Left moves the cursor left one cell (if possible) or to\n\/\/ the previous line if it is at the beginning\nfunc (c *Cursor) Left() {\n\tif c.Loc == c.buf.Start() {\n\t\treturn\n\t}\n\tif c.X > 0 {\n\t\tc.X--\n\t} else {\n\t\tc.Up()\n\t\tc.End()\n\t}\n\tc.StoreVisualX()\n}\n\n\/\/ Right moves the cursor right one cell (if possible) or\n\/\/ to the next line if it is at the end\nfunc (c *Cursor) Right() {\n\tif c.Loc == c.buf.End() {\n\t\treturn\n\t}\n\tif c.X < util.CharacterCount(c.buf.LineBytes(c.Y)) {\n\t\tc.X++\n\t} else {\n\t\tc.Down()\n\t\tc.Start()\n\t}\n\tc.StoreVisualX()\n}\n\n\/\/ Relocate makes sure that the cursor is inside the bounds\n\/\/ of the buffer If it isn't, it moves it to be within the\n\/\/ buffer's lines\nfunc (c *Cursor) Relocate() {\n\tif c.Y < 0 {\n\t\tc.Y = 0\n\t} else if c.Y >= len(c.buf.lines) {\n\t\tc.Y = len(c.buf.lines) - 1\n\t}\n\n\tif c.X < 0 {\n\t\tc.X = 0\n\t} else if c.X > util.CharacterCount(c.buf.LineBytes(c.Y)) {\n\t\tc.X = util.CharacterCount(c.buf.LineBytes(c.Y))\n\t}\n}\n\n\/\/ SelectWord selects the word the cursor is currently on\nfunc (c *Cursor) SelectWord() {\n\tif len(c.buf.LineBytes(c.Y)) == 0 {\n\t\treturn\n\t}\n\n\tif !util.IsWordChar(c.RuneUnder(c.X)) {\n\t\tc.SetSelectionStart(c.Loc)\n\t\tc.SetSelectionEnd(c.Loc.Move(1, c.buf))\n\t\tc.OrigSelection = c.CurSelection\n\t\treturn\n\t}\n\n\tforward, backward := c.X, c.X\n\n\tfor backward > 0 && util.IsWordChar(c.RuneUnder(backward-1)) {\n\t\tbackward--\n\t}\n\n\tc.SetSelectionStart(Loc{backward, c.Y})\n\tc.OrigSelection[0] = c.CurSelection[0]\n\n\tlineLen := util.CharacterCount(c.buf.LineBytes(c.Y)) - 1\n\tfor forward < lineLen && util.IsWordChar(c.RuneUnder(forward+1)) {\n\t\tforward++\n\t}\n\n\tc.SetSelectionEnd(Loc{forward, c.Y}.Move(1, c.buf))\n\tc.OrigSelection[1] = c.CurSelection[1]\n\tc.Loc = c.CurSelection[1]\n}\n\n\/\/ AddWordToSelection adds the word the cursor is currently on\n\/\/ to the selection\nfunc (c *Cursor) AddWordToSelection() {\n\tif c.Loc.GreaterThan(c.OrigSelection[0]) && c.Loc.LessThan(c.OrigSelection[1]) {\n\t\tc.CurSelection = c.OrigSelection\n\t\treturn\n\t}\n\n\tif c.Loc.LessThan(c.OrigSelection[0]) {\n\t\tbackward := c.X\n\n\t\tfor backward > 0 && util.IsWordChar(c.RuneUnder(backward-1)) {\n\t\t\tbackward--\n\t\t}\n\n\t\tc.SetSelectionStart(Loc{backward, c.Y})\n\t\tc.SetSelectionEnd(c.OrigSelection[1])\n\t}\n\n\tif c.Loc.GreaterThan(c.OrigSelection[1]) {\n\t\tforward := c.X\n\n\t\tlineLen := util.CharacterCount(c.buf.LineBytes(c.Y)) - 1\n\t\tfor forward < lineLen && util.IsWordChar(c.RuneUnder(forward+1)) {\n\t\t\tforward++\n\t\t}\n\n\t\tc.SetSelectionEnd(Loc{forward, c.Y}.Move(1, c.buf))\n\t\tc.SetSelectionStart(c.OrigSelection[0])\n\t}\n\n\tc.Loc = c.CurSelection[1]\n}\n\n\/\/ SelectTo selects from the current cursor location to the given\n\/\/ location\nfunc (c *Cursor) SelectTo(loc Loc) {\n\tif loc.GreaterThan(c.OrigSelection[0]) {\n\t\tc.SetSelectionStart(c.OrigSelection[0])\n\t\tc.SetSelectionEnd(loc)\n\t} else {\n\t\tc.SetSelectionStart(loc)\n\t\tc.SetSelectionEnd(c.OrigSelection[0])\n\t}\n}\n\n\/\/ WordRight moves the cursor one word to the right\nfunc (c *Cursor) WordRight() {\n\tfor util.IsWhitespace(c.RuneUnder(c.X)) {\n\t\tif c.X == util.CharacterCount(c.buf.LineBytes(c.Y)) {\n\t\t\tc.Right()\n\t\t\treturn\n\t\t}\n\t\tc.Right()\n\t}\n\tc.Right()\n\tfor util.IsWordChar(c.RuneUnder(c.X)) {\n\t\tif c.X == util.CharacterCount(c.buf.LineBytes(c.Y)) {\n\t\t\treturn\n\t\t}\n\t\tc.Right()\n\t}\n}\n\n\/\/ WordLeft moves the cursor one word to the left\nfunc (c *Cursor) WordLeft() {\n\tc.Left()\n\tfor util.IsWhitespace(c.RuneUnder(c.X)) {\n\t\tif c.X == 0 {\n\t\t\treturn\n\t\t}\n\t\tc.Left()\n\t}\n\tc.Left()\n\tfor util.IsWordChar(c.RuneUnder(c.X)) {\n\t\tif c.X == 0 {\n\t\t\treturn\n\t\t}\n\t\tc.Left()\n\t}\n\tc.Right()\n}\n\n\/\/ RuneUnder returns the rune under the given x position\nfunc (c *Cursor) RuneUnder(x int) rune {\n\tline := c.buf.LineBytes(c.Y)\n\tif len(line) == 0 || x >= util.CharacterCount(line) {\n\t\treturn '\\n'\n\t} else if x < 0 {\n\t\tx = 0\n\t}\n\ti := 0\n\tfor len(line) > 0 {\n\t\tr, _, size := util.DecodeCharacter(line)\n\t\tline = line[size:]\n\n\t\tif i == x {\n\t\t\treturn r\n\t\t}\n\n\t\ti++\n\t}\n\treturn '\\n'\n}\n\nfunc (c *Cursor) StoreVisualX() {\n\tc.LastVisualX = c.GetVisualX()\n}\n<commit_msg>Up arrow on first line brings to start<commit_after>package buffer\n\nimport (\n\t\"github.com\/zyedidia\/clipboard\"\n\t\"github.com\/zyedidia\/micro\/v2\/internal\/util\"\n)\n\n\/\/ InBounds returns whether the given location is a valid character position in the given buffer\nfunc InBounds(pos Loc, buf *Buffer) bool {\n\tif pos.Y < 0 || pos.Y >= len(buf.lines) || pos.X < 0 || pos.X > util.CharacterCount(buf.LineBytes(pos.Y)) {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n\/\/ The Cursor struct stores the location of the cursor in the buffer\n\/\/ as well as the selection\ntype Cursor struct {\n\tbuf *Buffer\n\tLoc\n\n\t\/\/ Last cursor x position\n\tLastVisualX int\n\n\t\/\/ The current selection as a range of character numbers (inclusive)\n\tCurSelection [2]Loc\n\t\/\/ The original selection as a range of character numbers\n\t\/\/ This is used for line and word selection where it is necessary\n\t\/\/ to know what the original selection was\n\tOrigSelection [2]Loc\n\n\t\/\/ Which cursor index is this (for multiple cursors)\n\tNum int\n}\n\nfunc NewCursor(b *Buffer, l Loc) *Cursor {\n\tc := &Cursor{\n\t\tbuf: b,\n\t\tLoc: l,\n\t}\n\tc.StoreVisualX()\n\treturn c\n}\n\nfunc (c *Cursor) SetBuf(b *Buffer) {\n\tc.buf = b\n}\n\nfunc (c *Cursor) Buf() *Buffer {\n\treturn c.buf\n}\n\n\/\/ Goto puts the cursor at the given cursor's location and gives\n\/\/ the current cursor its selection too\nfunc (c *Cursor) Goto(b Cursor) {\n\tc.X, c.Y, c.LastVisualX = b.X, b.Y, b.LastVisualX\n\tc.OrigSelection, c.CurSelection = b.OrigSelection, b.CurSelection\n}\n\n\/\/ GotoLoc puts the cursor at the given cursor's location and gives\n\/\/ the current cursor its selection too\nfunc (c *Cursor) GotoLoc(l Loc) {\n\tc.X, c.Y = l.X, l.Y\n\tc.StoreVisualX()\n}\n\n\/\/ GetVisualX returns the x value of the cursor in visual spaces\nfunc (c *Cursor) GetVisualX() int {\n\tif c.X <= 0 {\n\t\tc.X = 0\n\t\treturn 0\n\t}\n\n\tbytes := c.buf.LineBytes(c.Y)\n\ttabsize := int(c.buf.Settings[\"tabsize\"].(float64))\n\tif c.X > util.CharacterCount(bytes) {\n\t\tc.X = util.CharacterCount(bytes) - 1\n\t}\n\n\treturn util.StringWidth(bytes, c.X, tabsize)\n}\n\n\/\/ GetCharPosInLine gets the char position of a visual x y\n\/\/ coordinate (this is necessary because tabs are 1 char but\n\/\/ 4 visual spaces)\nfunc (c *Cursor) GetCharPosInLine(b []byte, visualPos int) int {\n\ttabsize := int(c.buf.Settings[\"tabsize\"].(float64))\n\treturn util.GetCharPosInLine(b, visualPos, tabsize)\n}\n\n\/\/ Start moves the cursor to the start of the line it is on\nfunc (c *Cursor) Start() {\n\tc.X = 0\n\tc.LastVisualX = c.GetVisualX()\n}\n\n\/\/ StartOfText moves the cursor to the first non-whitespace rune of\n\/\/ the line it is on\nfunc (c *Cursor) StartOfText() {\n\tc.Start()\n\tfor util.IsWhitespace(c.RuneUnder(c.X)) {\n\t\tif c.X == util.CharacterCount(c.buf.LineBytes(c.Y)) {\n\t\t\tbreak\n\t\t}\n\t\tc.Right()\n\t}\n}\n\n\/\/ IsStartOfText returns whether the cursor is at the first\n\/\/ non-whitespace rune of the line it is on\nfunc (c *Cursor) IsStartOfText() bool {\n\tx := 0\n\tfor util.IsWhitespace(c.RuneUnder(x)) {\n\t\tif x == util.CharacterCount(c.buf.LineBytes(c.Y)) {\n\t\t\tbreak\n\t\t}\n\t\tx++\n\t}\n\treturn c.X == x\n}\n\n\/\/ End moves the cursor to the end of the line it is on\nfunc (c *Cursor) End() {\n\tc.X = util.CharacterCount(c.buf.LineBytes(c.Y))\n\tc.LastVisualX = c.GetVisualX()\n}\n\n\/\/ CopySelection copies the user's selection to either \"primary\"\n\/\/ or \"clipboard\"\nfunc (c *Cursor) CopySelection(target string) {\n\tif c.HasSelection() {\n\t\tif target != \"primary\" || c.buf.Settings[\"useprimary\"].(bool) {\n\t\t\tclipboard.WriteAll(string(c.GetSelection()), target)\n\t\t}\n\t}\n}\n\n\/\/ ResetSelection resets the user's selection\nfunc (c *Cursor) ResetSelection() {\n\tc.CurSelection[0] = c.buf.Start()\n\tc.CurSelection[1] = c.buf.Start()\n}\n\n\/\/ SetSelectionStart sets the start of the selection\nfunc (c *Cursor) SetSelectionStart(pos Loc) {\n\tc.CurSelection[0] = pos\n}\n\n\/\/ SetSelectionEnd sets the end of the selection\nfunc (c *Cursor) SetSelectionEnd(pos Loc) {\n\tc.CurSelection[1] = pos\n}\n\n\/\/ HasSelection returns whether or not the user has selected anything\nfunc (c *Cursor) HasSelection() bool {\n\treturn c.CurSelection[0] != c.CurSelection[1]\n}\n\n\/\/ DeleteSelection deletes the currently selected text\nfunc (c *Cursor) DeleteSelection() {\n\tif c.CurSelection[0].GreaterThan(c.CurSelection[1]) {\n\t\tc.buf.Remove(c.CurSelection[1], c.CurSelection[0])\n\t\tc.Loc = c.CurSelection[1]\n\t} else if !c.HasSelection() {\n\t\treturn\n\t} else {\n\t\tc.buf.Remove(c.CurSelection[0], c.CurSelection[1])\n\t\tc.Loc = c.CurSelection[0]\n\t}\n}\n\n\/\/ Deselect closes the cursor's current selection\n\/\/ Start indicates whether the cursor should be placed\n\/\/ at the start or end of the selection\nfunc (c *Cursor) Deselect(start bool) {\n\tif c.HasSelection() {\n\t\tif start {\n\t\t\tc.Loc = c.CurSelection[0]\n\t\t} else {\n\t\t\tc.Loc = c.CurSelection[1].Move(-1, c.buf)\n\t\t}\n\t\tc.ResetSelection()\n\t\tc.StoreVisualX()\n\t}\n}\n\n\/\/ GetSelection returns the cursor's selection\nfunc (c *Cursor) GetSelection() []byte {\n\tif InBounds(c.CurSelection[0], c.buf) && InBounds(c.CurSelection[1], c.buf) {\n\t\tif c.CurSelection[0].GreaterThan(c.CurSelection[1]) {\n\t\t\treturn c.buf.Substr(c.CurSelection[1], c.CurSelection[0])\n\t\t}\n\t\treturn c.buf.Substr(c.CurSelection[0], c.CurSelection[1])\n\t}\n\treturn []byte{}\n}\n\n\/\/ SelectLine selects the current line\nfunc (c *Cursor) SelectLine() {\n\tc.Start()\n\tc.SetSelectionStart(c.Loc)\n\tc.End()\n\tif len(c.buf.lines)-1 > c.Y {\n\t\tc.SetSelectionEnd(c.Loc.Move(1, c.buf))\n\t} else {\n\t\tc.SetSelectionEnd(c.Loc)\n\t}\n\n\tc.OrigSelection = c.CurSelection\n}\n\n\/\/ AddLineToSelection adds the current line to the selection\nfunc (c *Cursor) AddLineToSelection() {\n\tif c.Loc.LessThan(c.OrigSelection[0]) {\n\t\tc.Start()\n\t\tc.SetSelectionStart(c.Loc)\n\t\tc.SetSelectionEnd(c.OrigSelection[1])\n\t}\n\tif c.Loc.GreaterThan(c.OrigSelection[1]) {\n\t\tc.End()\n\t\tc.SetSelectionEnd(c.Loc.Move(1, c.buf))\n\t\tc.SetSelectionStart(c.OrigSelection[0])\n\t}\n\n\tif c.Loc.LessThan(c.OrigSelection[1]) && c.Loc.GreaterThan(c.OrigSelection[0]) {\n\t\tc.CurSelection = c.OrigSelection\n\t}\n}\n\n\/\/ UpN moves the cursor up N lines (if possible)\nfunc (c *Cursor) UpN(amount int) {\n\tproposedY := c.Y - amount\n\tif proposedY < 0 {\n\t\tproposedY = 0\n\t} else if proposedY >= len(c.buf.lines) {\n\t\tproposedY = len(c.buf.lines) - 1\n\t}\n\n\tbytes := c.buf.LineBytes(proposedY)\n\tc.X = c.GetCharPosInLine(bytes, c.LastVisualX)\n\n\tif c.X > util.CharacterCount(bytes) || (amount < 0 && proposedY == c.Y) {\n\t\tc.X = util.CharacterCount(bytes)\n\t\tc.StoreVisualX()\n\t}\n\n\tif c.X < 0 || (amount > 0 && proposedY == c.Y) {\n\t\tc.X = 0\n\t\tc.StoreVisualX()\n\t}\n\n\tc.Y = proposedY\n}\n\n\/\/ DownN moves the cursor down N lines (if possible)\nfunc (c *Cursor) DownN(amount int) {\n\tc.UpN(-amount)\n}\n\n\/\/ Up moves the cursor up one line (if possible)\nfunc (c *Cursor) Up() {\n\tc.UpN(1)\n}\n\n\/\/ Down moves the cursor down one line (if possible)\nfunc (c *Cursor) Down() {\n\tc.DownN(1)\n}\n\n\/\/ Left moves the cursor left one cell (if possible) or to\n\/\/ the previous line if it is at the beginning\nfunc (c *Cursor) Left() {\n\tif c.Loc == c.buf.Start() {\n\t\treturn\n\t}\n\tif c.X > 0 {\n\t\tc.X--\n\t} else {\n\t\tc.Up()\n\t\tc.End()\n\t}\n\tc.StoreVisualX()\n}\n\n\/\/ Right moves the cursor right one cell (if possible) or\n\/\/ to the next line if it is at the end\nfunc (c *Cursor) Right() {\n\tif c.Loc == c.buf.End() {\n\t\treturn\n\t}\n\tif c.X < util.CharacterCount(c.buf.LineBytes(c.Y)) {\n\t\tc.X++\n\t} else {\n\t\tc.Down()\n\t\tc.Start()\n\t}\n\tc.StoreVisualX()\n}\n\n\/\/ Relocate makes sure that the cursor is inside the bounds\n\/\/ of the buffer If it isn't, it moves it to be within the\n\/\/ buffer's lines\nfunc (c *Cursor) Relocate() {\n\tif c.Y < 0 {\n\t\tc.Y = 0\n\t} else if c.Y >= len(c.buf.lines) {\n\t\tc.Y = len(c.buf.lines) - 1\n\t}\n\n\tif c.X < 0 {\n\t\tc.X = 0\n\t} else if c.X > util.CharacterCount(c.buf.LineBytes(c.Y)) {\n\t\tc.X = util.CharacterCount(c.buf.LineBytes(c.Y))\n\t}\n}\n\n\/\/ SelectWord selects the word the cursor is currently on\nfunc (c *Cursor) SelectWord() {\n\tif len(c.buf.LineBytes(c.Y)) == 0 {\n\t\treturn\n\t}\n\n\tif !util.IsWordChar(c.RuneUnder(c.X)) {\n\t\tc.SetSelectionStart(c.Loc)\n\t\tc.SetSelectionEnd(c.Loc.Move(1, c.buf))\n\t\tc.OrigSelection = c.CurSelection\n\t\treturn\n\t}\n\n\tforward, backward := c.X, c.X\n\n\tfor backward > 0 && util.IsWordChar(c.RuneUnder(backward-1)) {\n\t\tbackward--\n\t}\n\n\tc.SetSelectionStart(Loc{backward, c.Y})\n\tc.OrigSelection[0] = c.CurSelection[0]\n\n\tlineLen := util.CharacterCount(c.buf.LineBytes(c.Y)) - 1\n\tfor forward < lineLen && util.IsWordChar(c.RuneUnder(forward+1)) {\n\t\tforward++\n\t}\n\n\tc.SetSelectionEnd(Loc{forward, c.Y}.Move(1, c.buf))\n\tc.OrigSelection[1] = c.CurSelection[1]\n\tc.Loc = c.CurSelection[1]\n}\n\n\/\/ AddWordToSelection adds the word the cursor is currently on\n\/\/ to the selection\nfunc (c *Cursor) AddWordToSelection() {\n\tif c.Loc.GreaterThan(c.OrigSelection[0]) && c.Loc.LessThan(c.OrigSelection[1]) {\n\t\tc.CurSelection = c.OrigSelection\n\t\treturn\n\t}\n\n\tif c.Loc.LessThan(c.OrigSelection[0]) {\n\t\tbackward := c.X\n\n\t\tfor backward > 0 && util.IsWordChar(c.RuneUnder(backward-1)) {\n\t\t\tbackward--\n\t\t}\n\n\t\tc.SetSelectionStart(Loc{backward, c.Y})\n\t\tc.SetSelectionEnd(c.OrigSelection[1])\n\t}\n\n\tif c.Loc.GreaterThan(c.OrigSelection[1]) {\n\t\tforward := c.X\n\n\t\tlineLen := util.CharacterCount(c.buf.LineBytes(c.Y)) - 1\n\t\tfor forward < lineLen && util.IsWordChar(c.RuneUnder(forward+1)) {\n\t\t\tforward++\n\t\t}\n\n\t\tc.SetSelectionEnd(Loc{forward, c.Y}.Move(1, c.buf))\n\t\tc.SetSelectionStart(c.OrigSelection[0])\n\t}\n\n\tc.Loc = c.CurSelection[1]\n}\n\n\/\/ SelectTo selects from the current cursor location to the given\n\/\/ location\nfunc (c *Cursor) SelectTo(loc Loc) {\n\tif loc.GreaterThan(c.OrigSelection[0]) {\n\t\tc.SetSelectionStart(c.OrigSelection[0])\n\t\tc.SetSelectionEnd(loc)\n\t} else {\n\t\tc.SetSelectionStart(loc)\n\t\tc.SetSelectionEnd(c.OrigSelection[0])\n\t}\n}\n\n\/\/ WordRight moves the cursor one word to the right\nfunc (c *Cursor) WordRight() {\n\tfor util.IsWhitespace(c.RuneUnder(c.X)) {\n\t\tif c.X == util.CharacterCount(c.buf.LineBytes(c.Y)) {\n\t\t\tc.Right()\n\t\t\treturn\n\t\t}\n\t\tc.Right()\n\t}\n\tc.Right()\n\tfor util.IsWordChar(c.RuneUnder(c.X)) {\n\t\tif c.X == util.CharacterCount(c.buf.LineBytes(c.Y)) {\n\t\t\treturn\n\t\t}\n\t\tc.Right()\n\t}\n}\n\n\/\/ WordLeft moves the cursor one word to the left\nfunc (c *Cursor) WordLeft() {\n\tc.Left()\n\tfor util.IsWhitespace(c.RuneUnder(c.X)) {\n\t\tif c.X == 0 {\n\t\t\treturn\n\t\t}\n\t\tc.Left()\n\t}\n\tc.Left()\n\tfor util.IsWordChar(c.RuneUnder(c.X)) {\n\t\tif c.X == 0 {\n\t\t\treturn\n\t\t}\n\t\tc.Left()\n\t}\n\tc.Right()\n}\n\n\/\/ RuneUnder returns the rune under the given x position\nfunc (c *Cursor) RuneUnder(x int) rune {\n\tline := c.buf.LineBytes(c.Y)\n\tif len(line) == 0 || x >= util.CharacterCount(line) {\n\t\treturn '\\n'\n\t} else if x < 0 {\n\t\tx = 0\n\t}\n\ti := 0\n\tfor len(line) > 0 {\n\t\tr, _, size := util.DecodeCharacter(line)\n\t\tline = line[size:]\n\n\t\tif i == x {\n\t\t\treturn r\n\t\t}\n\n\t\ti++\n\t}\n\treturn '\\n'\n}\n\nfunc (c *Cursor) StoreVisualX() {\n\tc.LastVisualX = c.GetVisualX()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2017 Circonus, Inc. <support@circonus.com>\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\/\/\n\npackage check\n\nimport (\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\nfunc (c *Check) setReverseConfigs() error {\n\tc.revConfigs = nil\n\tif c.broker == nil {\n\t\treturn errors.New(\"broker is uninitialized\")\n\t}\n\tif c.checkConfig == nil {\n\t\treturn errors.New(\"check is uninitialized\")\n\t}\n\n\tif len(c.checkConfig.ReverseURLs) == 0 {\n\t\treturn errors.New(\"no reverse URLs found in check\")\n\t}\n\n\tcfgs := make(ReverseConfigs)\n\n\tfor _, rURL := range c.checkConfig.ReverseURLs {\n\t\t\/\/ Replace protocol, url.Parse does not understand 'mtev_reverse'.\n\t\t\/\/ Important part is validating what's after 'proto:\/\/'.\n\t\t\/\/ Using raw tls connections, the url protocol is not germane.\n\t\treverseURL, err := url.Parse(strings.Replace(rURL, \"mtev_reverse\", \"https\", -1))\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing check bundle reverse URL (%s)\", rURL)\n\t\t}\n\n\t\tbrokerAddr, err := net.ResolveTCPAddr(\"tcp\", reverseURL.Host)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"invalid reverse service address (%s)\", rURL)\n\t\t}\n\n\t\ttlsConfig, cn, err := c.brokerTLSConfig(reverseURL)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"creating TLS config for (%s - %s)\", c.broker.CID, rURL)\n\t\t}\n\n\t\tcfgs[cn] = ReverseConfig{\n\t\t\tCN: cn,\n\t\t\tReverseURL: reverseURL,\n\t\t\tBrokerID: c.broker.CID,\n\t\t\tBrokerAddr: brokerAddr,\n\t\t\tTLSConfig: tlsConfig,\n\t\t}\n\t}\n\n\tc.revConfigs = &cfgs\n\treturn nil\n}\n\n\/\/ FindPrimaryBrokerInstance will walk through reverse urls to locate the instance\n\/\/ in a broker cluster which is the current check owner. Returns the instance cn or error.\nfunc (c *Check) FindPrimaryBrokerInstance(cfgs *ReverseConfigs) (string, error) {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tprimaryHost := \"\"\n\tprimaryCN := \"\"\n\n\t\/\/ there is only one reverse url, broker is not clustered\n\tif len(*cfgs) == 1 {\n\t\tc.logger.Debug().Msg(\"non-clustered broker identified\")\n\t\tfor name := range *cfgs {\n\t\t\treturn name, nil\n\t\t}\n\t}\n\n\tc.logger.Debug().Msg(\"clustered broker identified, determining which owns check\")\n\t\/\/ clustered brokers, need to identify which broker is the primary for the check\n\tfor name, cfg := range *cfgs {\n\t\tclient := &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\t\tDial: (&net.Dialer{\n\t\t\t\t\tTimeout: 5 * time.Second,\n\t\t\t\t}).Dial,\n\t\t\t\tTLSHandshakeTimeout: 3 * time.Second,\n\t\t\t\tTLSClientConfig: cfg.TLSConfig, \/\/ all reverse brokers use HTTPS\/TLS\n\t\t\t\tDisableKeepAlives: true,\n\t\t\t\tMaxIdleConnsPerHost: -1,\n\t\t\t\tDisableCompression: false,\n\t\t\t},\n\t\t}\n\n\t\townerReqURL := strings.Replace(cfg.ReverseURL.String(), \"\/check\/\", \"\/checks\/owner\/\", 1)\n\t\tc.logger.Debug().Str(\"trying\", ownerReqURL).Msg(\"checking\")\n\n\t\treq, err := http.NewRequest(\"GET\", ownerReqURL, nil)\n\t\tif err != nil {\n\t\t\tc.logger.Warn().Err(err).Str(\"url\", ownerReqURL).Msg(\"creating check owner request\")\n\t\t\treturn \"\", err\n\t\t}\n\t\treq.Header.Add(\"Accept\", \"application\/json\")\n\n\t\tresp, err := client.Do(req)\n\t\tif err != nil {\n\t\t\tc.logger.Warn().Err(err).Str(\"url\", cfg.ReverseURL.String()).Msg(\"executing check owner request\")\n\t\t\tif nerr, ok := err.(net.Error); ok {\n\t\t\t\tif nerr.Timeout() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn \"\", err\n\t\t}\n\t\tresp.Body.Close() \/\/ we only care about headers\n\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusNoContent:\n\t\t\tprimaryCN = name\n\t\t\tc.logger.Debug().Str(\"cn\", primaryCN).Msg(\"found owner\")\n\t\tcase http.StatusFound:\n\t\t\tlocation := resp.Header.Get(\"Location\")\n\t\t\tif location == \"\" {\n\t\t\t\tc.logger.Warn().Msg(\"received 302 but 'Location' header missing\/blank\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.logger.Debug().Str(\"location\", location).Msg(\"received Location header\")\n\t\t\t\/\/ NOTE: this isn't actually a URL, the 'host' portion is actually the CN of\n\t\t\t\/\/ the broker detail which should be used for the reverse connection.\n\t\t\tpu, err := url.Parse(location)\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Warn().Err(err).Str(\"location\", location).Msg(\"unable to parse location\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tprimaryHost = pu.Host\n\t\t\tc.logger.Debug().Str(\"cn\", primaryCN).Msg(\"using owner from location header\")\n\t\tdefault:\n\t\t\t\/\/ try next reverse url host (e.g. if there was an error connecting to this one)\n\t\t}\n\t}\n\n\tif primaryCN == \"\" && primaryHost != \"\" {\n\t\tfor name, cfg := range *cfgs {\n\t\t\tif cfg.ReverseURL.Host == primaryHost {\n\t\t\t\tprimaryCN = name\n\t\t\t}\n\t\t}\n\t}\n\n\tif primaryCN == \"\" {\n\t\treturn \"\", &ErrNoOwnerFound{\n\t\t\tErr: \"unable to locate check owner broker instance\",\n\t\t\tCheckID: c.checkConfig.CID,\n\t\t}\n\t}\n\n\tc.logger.Debug().Str(\"cn\", primaryCN).Msg(\"check owner broker instance\")\n\treturn primaryCN, nil\n}\n<commit_msg>fix: clarify origin of reverse url (check no longer check bundle)<commit_after>\/\/ Copyright © 2017 Circonus, Inc. <support@circonus.com>\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\/\/\n\npackage check\n\nimport (\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\nfunc (c *Check) setReverseConfigs() error {\n\tc.revConfigs = nil\n\tif c.broker == nil {\n\t\treturn errors.New(\"broker is uninitialized\")\n\t}\n\tif c.checkConfig == nil {\n\t\treturn errors.New(\"check is uninitialized\")\n\t}\n\n\tif len(c.checkConfig.ReverseURLs) == 0 {\n\t\treturn errors.New(\"no reverse URLs found in check\")\n\t}\n\n\tcfgs := make(ReverseConfigs)\n\n\tfor _, rURL := range c.checkConfig.ReverseURLs {\n\t\t\/\/ Replace protocol, url.Parse does not understand 'mtev_reverse'.\n\t\t\/\/ Important part is validating what's after 'proto:\/\/'.\n\t\t\/\/ Using raw tls connections, the url protocol is not germane.\n\t\treverseURL, err := url.Parse(strings.Replace(rURL, \"mtev_reverse\", \"https\", -1))\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"parsing check reverse URL (%s)\", rURL)\n\t\t}\n\n\t\tbrokerAddr, err := net.ResolveTCPAddr(\"tcp\", reverseURL.Host)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"invalid reverse service address (%s)\", rURL)\n\t\t}\n\n\t\ttlsConfig, cn, err := c.brokerTLSConfig(reverseURL)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"creating TLS config for (%s - %s)\", c.broker.CID, rURL)\n\t\t}\n\n\t\tcfgs[cn] = ReverseConfig{\n\t\t\tCN: cn,\n\t\t\tReverseURL: reverseURL,\n\t\t\tBrokerID: c.broker.CID,\n\t\t\tBrokerAddr: brokerAddr,\n\t\t\tTLSConfig: tlsConfig,\n\t\t}\n\t}\n\n\tc.revConfigs = &cfgs\n\treturn nil\n}\n\n\/\/ FindPrimaryBrokerInstance will walk through reverse urls to locate the instance\n\/\/ in a broker cluster which is the current check owner. Returns the instance cn or error.\nfunc (c *Check) FindPrimaryBrokerInstance(cfgs *ReverseConfigs) (string, error) {\n\tc.Lock()\n\tdefer c.Unlock()\n\n\tprimaryCN := \"\"\n\n\t\/\/ there is only one reverse url, broker is not clustered\n\tif len(*cfgs) == 1 {\n\t\tc.logger.Debug().Msg(\"non-clustered broker identified\")\n\t\tfor name := range *cfgs {\n\t\t\treturn name, nil\n\t\t}\n\t}\n\n\tc.logger.Debug().Msg(\"clustered broker identified, determining which owns check\")\n\t\/\/ clustered brokers, need to identify which broker is the primary for the check\n\tfor name, cfg := range *cfgs {\n\t\tclient := &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\t\tDial: (&net.Dialer{\n\t\t\t\t\tTimeout: 5 * time.Second,\n\t\t\t\t}).Dial,\n\t\t\t\tTLSHandshakeTimeout: 3 * time.Second,\n\t\t\t\tTLSClientConfig: cfg.TLSConfig, \/\/ all reverse brokers use HTTPS\/TLS\n\t\t\t\tDisableKeepAlives: true,\n\t\t\t\tMaxIdleConnsPerHost: -1,\n\t\t\t\tDisableCompression: false,\n\t\t\t},\n\t\t}\n\n\t\townerReqURL := strings.Replace(cfg.ReverseURL.String(), \"\/check\/\", \"\/checks\/owner\/\", 1)\n\t\tc.logger.Debug().Str(\"trying\", ownerReqURL).Msg(\"checking\")\n\n\t\treq, err := http.NewRequest(\"GET\", ownerReqURL, nil)\n\t\tif err != nil {\n\t\t\tc.logger.Warn().Err(err).Str(\"url\", ownerReqURL).Msg(\"creating check owner request\")\n\t\t\treturn \"\", err\n\t\t}\n\t\treq.Header.Add(\"Accept\", \"application\/json\")\n\n\t\tresp, err := client.Do(req)\n\t\tif err != nil {\n\t\t\tc.logger.Warn().Err(err).Str(\"url\", cfg.ReverseURL.String()).Msg(\"executing check owner request\")\n\t\t\tif nerr, ok := err.(net.Error); ok {\n\t\t\t\tif nerr.Timeout() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn \"\", err\n\t\t}\n\t\tresp.Body.Close() \/\/ we only care about headers\n\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusNoContent:\n\t\t\tprimaryCN = name\n\t\t\tc.logger.Debug().Str(\"cn\", primaryCN).Msg(\"found owner\")\n\t\tcase http.StatusFound:\n\t\t\tlocation := resp.Header.Get(\"Location\")\n\t\t\tif location == \"\" {\n\t\t\t\tc.logger.Warn().Str(\"req_url\", ownerReqURL).Msg(\"received 302 but 'Location' header missing\/blank\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.logger.Debug().Str(\"location\", location).Msg(\"received Location header\")\n\t\t\t\/\/ NOTE: this isn't actually a URL, the 'host' portion is actually the CN of\n\t\t\t\/\/ the broker detail which should be used for the reverse connection.\n\t\t\tpu, err := url.Parse(strings.Replace(location, \"mtev_reverse\", \"https\", 1))\n\t\t\tif err != nil {\n\t\t\t\tc.logger.Warn().Err(err).Str(\"location\", location).Msg(\"unable to parse location\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tprimaryCN = pu.Host\n\t\t\tc.logger.Debug().Str(\"cn\", primaryCN).Msg(\"using owner from location header\")\n\t\tdefault:\n\t\t\t\/\/ try next reverse url host (e.g. if there was an error connecting to this one)\n\t\t}\n\t}\n\n\tif primaryCN == \"\" {\n\t\treturn \"\", &ErrNoOwnerFound{\n\t\t\tErr: \"unable to locate check owner broker instance\",\n\t\t\tCheckID: c.checkConfig.CID,\n\t\t}\n\t}\n\n\tif _, ok := (*cfgs)[primaryCN]; !ok {\n\t\treturn \"\", &ErrInvalidOwner{\n\t\t\tErr: \"broker owner identified with invalid CN\",\n\t\t\tCheckID: c.checkConfig.CID,\n\t\t\tBrokerCN: primaryCN,\n\t\t}\n\t}\n\n\tc.logger.Debug().Str(\"cn\", primaryCN).Msg(\"check owner broker instance\")\n\treturn primaryCN, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"flag\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/spf13\/pflag\"\n\t\"github.com\/spf13\/viper\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\tyaml \"gopkg.in\/yaml.v2\"\n)\n\nvar (\n\t\/\/ Config will hold final configuration read from the file and flags\n\tConfig configSchema\n)\n\nfunc init() {\n\tpflag.String(\"alertmanager.uri\", \"\", \"Alertmanager server URI\")\n\tpflag.Duration(\"alertmanager.interval\", time.Second*60,\n\t\t\"Interval for fetching data from Alertmanager servers\")\n\n\tpflag.Bool(\n\t\t\"annotations.default.hidden\", false,\n\t\t\"Hide all annotations by default unless explicitly listed in the 'visible' list\")\n\tpflag.StringSlice(\"annotations.hidden\", []string{},\n\t\t\"List of annotations that are hidden by default\")\n\tpflag.StringSlice(\"annotations.visible\", []string{},\n\t\t\"List of annotations that are visible by default\")\n\n\tpflag.String(\"config.dir\", \".\",\n\t\t\"Directory with configuration file to read\")\n\tpflag.String(\"config.file\", \"unsee\",\n\t\t\"Name of the configuration file to read\")\n\n\tpflag.Bool(\"debug\", false, \"Enable debug mode\")\n\n\tpflag.StringSlice(\"filters.default\", []string{}, \"List of default filters\")\n\n\tpflag.StringSlice(\"labels.color.static\", []string{},\n\t\t\"List of label names that should have the same (but distinct) color\")\n\tpflag.StringSlice(\"labels.color.unique\", []string{},\n\t\t\"List of label names that should have unique color\")\n\tpflag.StringSlice(\"labels.keep\", []string{},\n\t\t\"List of labels to keep, all other labels will be stripped\")\n\tpflag.StringSlice(\"labels.strip\", []string{}, \"List of labels to ignore\")\n\n\tpflag.Bool(\"log.config\", true, \"Log used configuration to log on startup\")\n\tpflag.String(\"log.level\", \"info\",\n\t\t\"Log level, one of: debug, info, warning, error, fatal and panic\")\n\n\tpflag.StringSlice(\"receivers.keep\", []string{},\n\t\t\"List of receivers to keep, all alerts with different receivers will be ignored\")\n\tpflag.StringSlice(\"receivers.strip\", []string{},\n\t\t\"List of receivers to not display alerts for\")\n\n\tpflag.String(\"listen.address\", \"\", \"IP\/Hostname to listen on\")\n\tpflag.Int(\"listen.port\", 8080, \"HTTP port to listen on\")\n\tpflag.String(\"listen.prefix\", \"\/\", \"URL prefix\")\n\n\tpflag.String(\"sentry.public\", \"\", \"Sentry DSN for Go exceptions\")\n\tpflag.String(\"sentry.private\", \"\", \"Sentry DSN for JavaScript exceptions\")\n}\n\n\/\/ ReadConfig will read all sources of configuration, merge all keys and\n\/\/ populate global Config variable, it should be only called on startup\nfunc (config *configSchema) Read() {\n\tv := viper.New()\n\n\tpflag.Parse()\n\n\tpflag.CommandLine.AddGoFlagSet(flag.CommandLine)\n\tpflag.Parse()\n\tv.BindPFlags(pflag.CommandLine)\n\n\tv.AutomaticEnv()\n\tv.SetEnvKeyReplacer(strings.NewReplacer(\".\", \"_\"))\n\n\t\/\/ special envs\n\t\/\/ HOST and PORT is used by gin\n\tv.BindEnv(\"listen.address\", \"HOST\")\n\tv.BindEnv(\"listen.port\", \"PORT\")\n\t\/\/ raven-go expects this\n\tv.BindEnv(\"sentry.private\", \"SENTRY_DSN\")\n\n\t\/\/ bind legacy env variables\n\tconfig.legacyEnvs(v)\n\n\tconfigFile := v.GetString(\"config.file\")\n\tconfigDir := v.GetString(\"config.dir\")\n\tv.SetConfigType(\"yaml\")\n\tv.SetConfigName(configFile)\n\tv.AddConfigPath(configDir)\n\tlog.Infof(\"Reading configuration file %s.yaml\", path.Join(configDir, configFile))\n\terr := v.ReadInConfig()\n\tif v.ConfigFileUsed() != \"\" && err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif v.ConfigFileUsed() != \"\" {\n\t\tlog.Infof(\"Config file used: %s\", v.ConfigFileUsed())\n\t}\n\n\tconfig.Alertmanager.Interval = v.GetDuration(\"alertmanager.interval\")\n\tconfig.Annotations.Default.Hidden = v.GetBool(\"annotations.default.hidden\")\n\tconfig.Annotations.Hidden = v.GetStringSlice(\"annotations.hidden\")\n\tconfig.Annotations.Visible = v.GetStringSlice(\"annotations.visible\")\n\tconfig.Debug = v.GetBool(\"debug\")\n\tconfig.Filters.Default = v.GetStringSlice(\"filters.default\")\n\tconfig.Labels.Color.Static = v.GetStringSlice(\"labels.color.static\")\n\tconfig.Labels.Color.Unique = v.GetStringSlice(\"labels.color.unique\")\n\tconfig.Labels.Keep = v.GetStringSlice(\"labels.keep\")\n\tconfig.Labels.Strip = v.GetStringSlice(\"labels.strip\")\n\tconfig.Listen.Address = v.GetString(\"listen.address\")\n\tconfig.Listen.Port = v.GetInt(\"listen.port\")\n\tconfig.Listen.Prefix = v.GetString(\"listen.prefix\")\n\tconfig.Log.Config = v.GetBool(\"log.config\")\n\tconfig.Log.Level = v.GetString(\"log.level\")\n\tconfig.Receivers.Keep = v.GetStringSlice(\"receivers.keep\")\n\tconfig.Receivers.Strip = v.GetStringSlice(\"receivers.strip\")\n\tconfig.Sentry.Private = v.GetString(\"sentry.private\")\n\tconfig.Sentry.Public = v.GetString(\"sentry.public\")\n\n\terr = v.UnmarshalKey(\"alertmanager.servers\", &config.Alertmanager.Servers)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\terr = v.UnmarshalKey(\"jira\", &config.JIRA)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ populate legacy settings if needed\n\tconfig.legacySettingsFallback()\n\n\t\/\/ accept single Alertmanager server from flag\/env if nothing is set yet\n\tif len(config.Alertmanager.Servers) == 0 && v.GetString(\"alertmanager.uri\") != \"\" {\n\t\tconfig.Alertmanager.Servers = []alertmanagerConfig{\n\t\t\talertmanagerConfig{\n\t\t\t\tName: \"default\",\n\t\t\t\tURI: v.GetString(\"alertmanager.uri\"),\n\t\t\t\tTimeout: time.Second * 40,\n\t\t\t},\n\t\t}\n\t}\n}\n\n\/\/ LogValues will dump runtime config to logs\nfunc (config *configSchema) LogValues() {\n\t\/\/ make a copy of our config so we can edit it\n\tcfg := configSchema(*config)\n\n\t\/\/ replace passwords in Alertmanager URIs with 'xxx'\n\tservers := []alertmanagerConfig{}\n\tfor _, s := range cfg.Alertmanager.Servers {\n\t\tserver := alertmanagerConfig{\n\t\t\tName: s.Name,\n\t\t\tURI: hideURLPassword(s.URI),\n\t\t\tTimeout: s.Timeout,\n\t\t}\n\t\tservers = append(servers, server)\n\t}\n\tcfg.Alertmanager.Servers = servers\n\n\tout, err := yaml.Marshal(cfg)\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\n\tlog.Info(\"Parsed configuration:\")\n\tscanner := bufio.NewScanner(bytes.NewReader(out))\n\tfor scanner.Scan() {\n\t\tlog.Info(scanner.Text())\n\t}\n}\n<commit_msg>Hide sentry secrets in startup logs<commit_after>package config\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"flag\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/spf13\/pflag\"\n\t\"github.com\/spf13\/viper\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\tyaml \"gopkg.in\/yaml.v2\"\n)\n\nvar (\n\t\/\/ Config will hold final configuration read from the file and flags\n\tConfig configSchema\n)\n\nfunc init() {\n\tpflag.String(\"alertmanager.uri\", \"\", \"Alertmanager server URI\")\n\tpflag.Duration(\"alertmanager.interval\", time.Second*60,\n\t\t\"Interval for fetching data from Alertmanager servers\")\n\n\tpflag.Bool(\n\t\t\"annotations.default.hidden\", false,\n\t\t\"Hide all annotations by default unless explicitly listed in the 'visible' list\")\n\tpflag.StringSlice(\"annotations.hidden\", []string{},\n\t\t\"List of annotations that are hidden by default\")\n\tpflag.StringSlice(\"annotations.visible\", []string{},\n\t\t\"List of annotations that are visible by default\")\n\n\tpflag.String(\"config.dir\", \".\",\n\t\t\"Directory with configuration file to read\")\n\tpflag.String(\"config.file\", \"unsee\",\n\t\t\"Name of the configuration file to read\")\n\n\tpflag.Bool(\"debug\", false, \"Enable debug mode\")\n\n\tpflag.StringSlice(\"filters.default\", []string{}, \"List of default filters\")\n\n\tpflag.StringSlice(\"labels.color.static\", []string{},\n\t\t\"List of label names that should have the same (but distinct) color\")\n\tpflag.StringSlice(\"labels.color.unique\", []string{},\n\t\t\"List of label names that should have unique color\")\n\tpflag.StringSlice(\"labels.keep\", []string{},\n\t\t\"List of labels to keep, all other labels will be stripped\")\n\tpflag.StringSlice(\"labels.strip\", []string{}, \"List of labels to ignore\")\n\n\tpflag.Bool(\"log.config\", true, \"Log used configuration to log on startup\")\n\tpflag.String(\"log.level\", \"info\",\n\t\t\"Log level, one of: debug, info, warning, error, fatal and panic\")\n\n\tpflag.StringSlice(\"receivers.keep\", []string{},\n\t\t\"List of receivers to keep, all alerts with different receivers will be ignored\")\n\tpflag.StringSlice(\"receivers.strip\", []string{},\n\t\t\"List of receivers to not display alerts for\")\n\n\tpflag.String(\"listen.address\", \"\", \"IP\/Hostname to listen on\")\n\tpflag.Int(\"listen.port\", 8080, \"HTTP port to listen on\")\n\tpflag.String(\"listen.prefix\", \"\/\", \"URL prefix\")\n\n\tpflag.String(\"sentry.public\", \"\", \"Sentry DSN for Go exceptions\")\n\tpflag.String(\"sentry.private\", \"\", \"Sentry DSN for JavaScript exceptions\")\n}\n\n\/\/ ReadConfig will read all sources of configuration, merge all keys and\n\/\/ populate global Config variable, it should be only called on startup\nfunc (config *configSchema) Read() {\n\tv := viper.New()\n\n\tpflag.Parse()\n\n\tpflag.CommandLine.AddGoFlagSet(flag.CommandLine)\n\tpflag.Parse()\n\tv.BindPFlags(pflag.CommandLine)\n\n\tv.AutomaticEnv()\n\tv.SetEnvKeyReplacer(strings.NewReplacer(\".\", \"_\"))\n\n\t\/\/ special envs\n\t\/\/ HOST and PORT is used by gin\n\tv.BindEnv(\"listen.address\", \"HOST\")\n\tv.BindEnv(\"listen.port\", \"PORT\")\n\t\/\/ raven-go expects this\n\tv.BindEnv(\"sentry.private\", \"SENTRY_DSN\")\n\n\t\/\/ bind legacy env variables\n\tconfig.legacyEnvs(v)\n\n\tconfigFile := v.GetString(\"config.file\")\n\tconfigDir := v.GetString(\"config.dir\")\n\tv.SetConfigType(\"yaml\")\n\tv.SetConfigName(configFile)\n\tv.AddConfigPath(configDir)\n\tlog.Infof(\"Reading configuration file %s.yaml\", path.Join(configDir, configFile))\n\terr := v.ReadInConfig()\n\tif v.ConfigFileUsed() != \"\" && err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif v.ConfigFileUsed() != \"\" {\n\t\tlog.Infof(\"Config file used: %s\", v.ConfigFileUsed())\n\t}\n\n\tconfig.Alertmanager.Interval = v.GetDuration(\"alertmanager.interval\")\n\tconfig.Annotations.Default.Hidden = v.GetBool(\"annotations.default.hidden\")\n\tconfig.Annotations.Hidden = v.GetStringSlice(\"annotations.hidden\")\n\tconfig.Annotations.Visible = v.GetStringSlice(\"annotations.visible\")\n\tconfig.Debug = v.GetBool(\"debug\")\n\tconfig.Filters.Default = v.GetStringSlice(\"filters.default\")\n\tconfig.Labels.Color.Static = v.GetStringSlice(\"labels.color.static\")\n\tconfig.Labels.Color.Unique = v.GetStringSlice(\"labels.color.unique\")\n\tconfig.Labels.Keep = v.GetStringSlice(\"labels.keep\")\n\tconfig.Labels.Strip = v.GetStringSlice(\"labels.strip\")\n\tconfig.Listen.Address = v.GetString(\"listen.address\")\n\tconfig.Listen.Port = v.GetInt(\"listen.port\")\n\tconfig.Listen.Prefix = v.GetString(\"listen.prefix\")\n\tconfig.Log.Config = v.GetBool(\"log.config\")\n\tconfig.Log.Level = v.GetString(\"log.level\")\n\tconfig.Receivers.Keep = v.GetStringSlice(\"receivers.keep\")\n\tconfig.Receivers.Strip = v.GetStringSlice(\"receivers.strip\")\n\tconfig.Sentry.Private = v.GetString(\"sentry.private\")\n\tconfig.Sentry.Public = v.GetString(\"sentry.public\")\n\n\terr = v.UnmarshalKey(\"alertmanager.servers\", &config.Alertmanager.Servers)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\terr = v.UnmarshalKey(\"jira\", &config.JIRA)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ populate legacy settings if needed\n\tconfig.legacySettingsFallback()\n\n\t\/\/ accept single Alertmanager server from flag\/env if nothing is set yet\n\tif len(config.Alertmanager.Servers) == 0 && v.GetString(\"alertmanager.uri\") != \"\" {\n\t\tconfig.Alertmanager.Servers = []alertmanagerConfig{\n\t\t\talertmanagerConfig{\n\t\t\t\tName: \"default\",\n\t\t\t\tURI: v.GetString(\"alertmanager.uri\"),\n\t\t\t\tTimeout: time.Second * 40,\n\t\t\t},\n\t\t}\n\t}\n}\n\n\/\/ LogValues will dump runtime config to logs\nfunc (config *configSchema) LogValues() {\n\t\/\/ make a copy of our config so we can edit it\n\tcfg := configSchema(*config)\n\n\t\/\/ replace passwords in Alertmanager URIs with 'xxx'\n\tservers := []alertmanagerConfig{}\n\tfor _, s := range cfg.Alertmanager.Servers {\n\t\tserver := alertmanagerConfig{\n\t\t\tName: s.Name,\n\t\t\tURI: hideURLPassword(s.URI),\n\t\t\tTimeout: s.Timeout,\n\t\t}\n\t\tservers = append(servers, server)\n\t}\n\tcfg.Alertmanager.Servers = servers\n\n\t\/\/ replace secret in Sentry DNS with 'xxx'\n\tif config.Sentry.Private != \"\" {\n\t\tconfig.Sentry.Private = hideURLPassword(config.Sentry.Private)\n\t}\n\n\tout, err := yaml.Marshal(cfg)\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\n\tlog.Info(\"Parsed configuration:\")\n\tscanner := bufio.NewScanner(bytes.NewReader(out))\n\tfor scanner.Scan() {\n\t\tlog.Info(scanner.Text())\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package main is an entry point of the application.\npackage main\n\nimport (\n\t\"flag\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/colegion\/goal\/internal\/skeleton\/utils\"\n\n\t\"github.com\/colegion\/contrib\/servers\/grace\"\n)\n\nvar addr = flag.String(\"http.addr\", \":8080\", \"address the application will listen on\")\n\nfunc main() {\n\t\/\/ Initialize app's HTTP handler.\n\thandler, err := utils.InitHandler(\"config\/config.ini\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Prepare a new server.\n\ts := &http.Server{\n\t\tAddr: *addr,\n\t\tHandler: handler,\n\t}\n\n\t\/\/ Start the server.\n\tlog.Printf(`Listening on \"%s\".`, s.Addr)\n\tlog.Fatal(grace.Serve(s))\n}\n<commit_msg>Use server subpackage in main.go<commit_after>\/\/ Package main is an entry point of the application.\npackage main\n\nimport (\n\t\"log\"\n\n\t\"github.com\/colegion\/goal\/internal\/skeleton\/server\"\n)\n\nfunc main() {\n\tlog.Fatal(server.Start(\"config\/app.ini\"))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package slack implements the Channel type, for getting information about\n\/\/ Slack channels, and the User type, for sending chats.\npackage slack\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/url\"\n\t\"os\"\n)\n\nconst team = \"monkeytacos\"\n\nvar apiToken string\n\n\/\/ TODO: can i get rid of one of these response types?\ntype channelResponse struct {\n\tChannel Channel `json:\"channel\"`\n\tOk bool `json:\"ok\"`\n\tErr string `json:\"error\"`\n}\n\ntype channelListResponse struct {\n\tChannels []Channel `json:\"channels\"`\n\tOk bool `json:\"ok\"`\n\tErr string `json:\"error\"`\n}\n\ntype Channel struct {\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n\tMembers []string `json:\"members\"`\n\tTeam string \/\/ set in NewChannel\n}\n\nfunc init() {\n\tapiToken = os.Getenv(\"SLACK_API_TOKEN\")\n\tif apiToken == \"\" {\n\t\tlog.Fatal(\"SLACK_API_TOKEN not set\")\n\t}\n}\n\nfunc NewChannel(name string) (Channel, error) {\n\tvar emptyChannel Channel\n\n\tqsp := &url.Values{}\n\tqsp.Set(\"channel\", name)\n\tlistURL := NewURL(\"channels.list\", qsp)\n\tcl := channelListResponse{}\n\terr := apiCall(listURL, &cl)\n\tif err != nil {\n\t\treturn emptyChannel, APIError{err.Error()}\n\t}\n\n\tif cl.Ok != true {\n\t\treturn emptyChannel, APIError{cl.Err}\n\t}\n\n\tfor _, ch := range cl.Channels {\n\t\tif ch.Name == name {\n\t\t\tch.Team = team\n\t\t\treturn ch, nil\n\t\t}\n\t}\n\n\treturn emptyChannel, fmt.Errorf(\"no channel named %q on team %q\", name, team)\n}\n\nfunc (ch Channel) String() string {\n\treturn fmt.Sprintf(\"%#v\", ch)\n}\n\nfunc (ch *Channel) UpdateMembers() error {\n\tqsp := &url.Values{}\n\tqsp.Set(\"channel\", ch.ID)\n\tchannelURL := NewURL(\"channels.info\", qsp)\n\n\tcr := channelResponse{}\n\terr := apiCall(channelURL, &cr)\n\tif err != nil {\n\t\treturn APIError{err.Error()}\n\t}\n\n\tif !cr.Ok {\n\t\treturn APIError{cr.Err}\n\t}\n\n\tch.Members = cr.Channel.Members\n\treturn nil\n}\n<commit_msg>Combine channel response structs into one<commit_after>\/\/ Package slack implements the Channel type, for getting information about\n\/\/ Slack channels, and the User type, for sending chats.\npackage slack\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/url\"\n\t\"os\"\n)\n\nconst team = \"monkeytacos\"\n\nvar apiToken string\n\nfunc init() {\n\tapiToken = os.Getenv(\"SLACK_API_TOKEN\")\n\tif apiToken == \"\" {\n\t\tlog.Fatal(\"SLACK_API_TOKEN not set\")\n\t}\n}\n\ntype channelResponse struct {\n\tChannel Channel `json:\"channel\"`\n\tChannels []Channel `json:\"channels\"`\n\tOk bool `json:\"ok\"`\n\tErr string `json:\"error\"`\n}\n\ntype Channel struct {\n\tID string `json:\"id\"`\n\tName string `json:\"name\"`\n\tMembers []string `json:\"members\"`\n\tTeam string \/\/ set in NewChannel\n}\n\nfunc NewChannel(name string) (Channel, error) {\n\tvar emptyChannel Channel\n\n\tqsp := &url.Values{}\n\tqsp.Set(\"channel\", name)\n\tlistURL := NewURL(\"channels.list\", qsp)\n\tcr := channelResponse{}\n\terr := apiCall(listURL, &cr)\n\tif err != nil {\n\t\treturn emptyChannel, APIError{err.Error()}\n\t}\n\n\tif cr.Ok != true {\n\t\treturn emptyChannel, APIError{cr.Err}\n\t}\n\n\tfor _, ch := range cr.Channels {\n\t\tif ch.Name == name {\n\t\t\tch.Team = team\n\t\t\treturn ch, nil\n\t\t}\n\t}\n\n\treturn emptyChannel, fmt.Errorf(\"no channel named %q on team %q\", name, team)\n}\n\nfunc (ch Channel) String() string {\n\treturn fmt.Sprintf(\"%#v\", ch)\n}\n\nfunc (ch *Channel) UpdateMembers() error {\n\tqsp := &url.Values{}\n\tqsp.Set(\"channel\", ch.ID)\n\tchannelURL := NewURL(\"channels.info\", qsp)\n\n\tcr := channelResponse{}\n\terr := apiCall(channelURL, &cr)\n\tif err != nil {\n\t\treturn APIError{err.Error()}\n\t}\n\n\tif !cr.Ok {\n\t\treturn APIError{cr.Err}\n\t}\n\n\tch.Members = cr.Channel.Members\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * This file is part of the KubeVirt project\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * Copyright 2018 Red Hat, Inc.\n *\n *\/\n\npackage admitters\n\nimport (\n\t\"encoding\/json\"\n\n\t\"github.com\/golang\/mock\/gomock\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\tadmissionv1 \"k8s.io\/api\/admission\/v1\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\n\tv1 \"kubevirt.io\/api\/core\/v1\"\n\tsnapshotv1 \"kubevirt.io\/api\/snapshot\/v1alpha1\"\n\t\"kubevirt.io\/client-go\/kubecli\"\n\t\"kubevirt.io\/kubevirt\/pkg\/testutils\"\n\t\"kubevirt.io\/kubevirt\/pkg\/virt-api\/webhooks\"\n\tvirtconfig \"kubevirt.io\/kubevirt\/pkg\/virt-config\"\n)\n\nvar _ = Describe(\"Validating VirtualMachineSnapshot Admitter\", func() {\n\tvmName := \"vm\"\n\tapiGroup := \"kubevirt.io\"\n\n\tconfig, _, kvInformer := testutils.NewFakeClusterConfigUsingKVConfig(&v1.KubeVirtConfiguration{})\n\n\tContext(\"Without feature gate enabled\", func() {\n\t\tIt(\"should reject anything\", func() {\n\t\t\tsnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{},\n\t\t\t}\n\n\t\t\tar := createSnapshotAdmissionReview(snapshot)\n\t\t\tresp := createTestVMSnapshotAdmitter(config, nil).Admit(ar)\n\t\t\tExpect(resp.Allowed).To(BeFalse())\n\t\t\tExpect(resp.Result.Message).Should(Equal(\"snapshot feature gate not enabled\"))\n\t\t})\n\t})\n\n\tContext(\"With feature gate enabled\", func() {\n\t\tenableFeatureGate := func(featureGate string) {\n\t\t\ttestutils.UpdateFakeKubeVirtClusterConfig(kvInformer, &v1.KubeVirt{\n\t\t\t\tSpec: v1.KubeVirtSpec{\n\t\t\t\t\tConfiguration: v1.KubeVirtConfiguration{\n\t\t\t\t\t\tDeveloperConfiguration: &v1.DeveloperConfiguration{\n\t\t\t\t\t\t\tFeatureGates: []string{featureGate},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t\tdisableFeatureGates := func() {\n\t\t\ttestutils.UpdateFakeKubeVirtClusterConfig(kvInformer, &v1.KubeVirt{\n\t\t\t\tSpec: v1.KubeVirtSpec{\n\t\t\t\t\tConfiguration: v1.KubeVirtConfiguration{\n\t\t\t\t\t\tDeveloperConfiguration: &v1.DeveloperConfiguration{\n\t\t\t\t\t\t\tFeatureGates: make([]string, 0),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\n\t\tBeforeEach(func() {\n\t\t\tenableFeatureGate(\"Snapshot\")\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tdisableFeatureGates()\n\t\t})\n\n\t\tIt(\"should reject invalid request resource\", func() {\n\t\t\tar := &admissionv1.AdmissionReview{\n\t\t\t\tRequest: &admissionv1.AdmissionRequest{\n\t\t\t\t\tResource: webhooks.VirtualMachineGroupVersionResource,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tresp := createTestVMSnapshotAdmitter(config, nil).Admit(ar)\n\t\t\tExpect(resp.Allowed).To(BeFalse())\n\t\t\tExpect(resp.Result.Message).Should(ContainSubstring(\"unexpected resource\"))\n\t\t})\n\n\t\tIt(\"should reject missing apigroup\", func() {\n\t\t\tsnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{},\n\t\t\t}\n\n\t\t\tar := createSnapshotAdmissionReview(snapshot)\n\t\t\tresp := createTestVMSnapshotAdmitter(config, nil).Admit(ar)\n\t\t\tExpect(resp.Allowed).To(BeFalse())\n\t\t\tExpect(len(resp.Result.Details.Causes)).To(Equal(1))\n\t\t\tExpect(resp.Result.Details.Causes[0].Field).To(Equal(\"spec.source.apiGroup\"))\n\t\t})\n\n\t\tIt(\"should reject when VM does not exist\", func() {\n\t\t\tsnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{\n\t\t\t\t\tSource: corev1.TypedLocalObjectReference{\n\t\t\t\t\t\tAPIGroup: &apiGroup,\n\t\t\t\t\t\tKind: \"VirtualMachine\",\n\t\t\t\t\t\tName: vmName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tar := createSnapshotAdmissionReview(snapshot)\n\t\t\tresp := createTestVMSnapshotAdmitter(config, nil).Admit(ar)\n\t\t\tExpect(resp.Allowed).To(BeFalse())\n\t\t\tExpect(len(resp.Result.Details.Causes)).To(Equal(1))\n\t\t\tExpect(resp.Result.Details.Causes[0].Field).To(Equal(\"spec.source.name\"))\n\t\t})\n\n\t\tIt(\"should reject spec update\", func() {\n\t\t\tsnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{\n\t\t\t\t\tSource: corev1.TypedLocalObjectReference{\n\t\t\t\t\t\tAPIGroup: &apiGroup,\n\t\t\t\t\t\tKind: \"VirtualMachine\",\n\t\t\t\t\t\tName: vmName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\toldSnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{\n\t\t\t\t\tSource: corev1.TypedLocalObjectReference{\n\t\t\t\t\t\tAPIGroup: &apiGroup,\n\t\t\t\t\t\tKind: \"VirtualMachine\",\n\t\t\t\t\t\tName: \"baz\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tar := createSnapshotUpdateAdmissionReview(oldSnapshot, snapshot)\n\t\t\tresp := createTestVMSnapshotAdmitter(config, nil).Admit(ar)\n\t\t\tExpect(resp.Allowed).To(BeFalse())\n\t\t\tExpect(len(resp.Result.Details.Causes)).To(Equal(1))\n\t\t\tExpect(resp.Result.Details.Causes[0].Field).To(Equal(\"spec\"))\n\t\t})\n\n\t\tIt(\"should allow metadata update\", func() {\n\t\t\toldSnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{\n\t\t\t\t\tSource: corev1.TypedLocalObjectReference{\n\t\t\t\t\t\tAPIGroup: &apiGroup,\n\t\t\t\t\t\tKind: \"VirtualMachine\",\n\t\t\t\t\t\tName: vmName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tsnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tFinalizers: []string{\"finalizer\"},\n\t\t\t\t},\n\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{\n\t\t\t\t\tSource: corev1.TypedLocalObjectReference{\n\t\t\t\t\t\tAPIGroup: &apiGroup,\n\t\t\t\t\t\tKind: \"VirtualMachine\",\n\t\t\t\t\t\tName: vmName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tar := createSnapshotUpdateAdmissionReview(oldSnapshot, snapshot)\n\t\t\tresp := createTestVMSnapshotAdmitter(config, nil).Admit(ar)\n\t\t\tExpect(resp.Allowed).To(BeTrue())\n\t\t})\n\n\t\tContext(\"when VirtualMachine exists\", func() {\n\t\t\tvar vm *v1.VirtualMachine\n\n\t\t\tBeforeEach(func() {\n\t\t\t\tvm = &v1.VirtualMachine{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: vmName,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t})\n\n\t\t\tIt(\"should accept when VM is running\", func() {\n\t\t\t\tsnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{\n\t\t\t\t\t\tSource: corev1.TypedLocalObjectReference{\n\t\t\t\t\t\t\tAPIGroup: &apiGroup,\n\t\t\t\t\t\t\tKind: \"VirtualMachine\",\n\t\t\t\t\t\t\tName: vmName,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tt := true\n\t\t\t\tvm.Spec.Running = &t\n\n\t\t\t\tar := createSnapshotAdmissionReview(snapshot)\n\t\t\t\tresp := createTestVMSnapshotAdmitter(config, vm).Admit(ar)\n\t\t\t\tExpect(resp.Allowed).To(BeTrue())\n\t\t\t})\n\n\t\t\tIt(\"should reject invalid kind\", func() {\n\t\t\t\tsnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{\n\t\t\t\t\t\tSource: corev1.TypedLocalObjectReference{\n\t\t\t\t\t\t\tAPIGroup: &apiGroup,\n\t\t\t\t\t\t\tKind: \"VirtualMachineInstance\",\n\t\t\t\t\t\t\tName: vmName,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tt := true\n\t\t\t\tvm.Spec.Running = &t\n\n\t\t\t\tar := createSnapshotAdmissionReview(snapshot)\n\t\t\t\tresp := createTestVMSnapshotAdmitter(config, vm).Admit(ar)\n\t\t\t\tExpect(resp.Allowed).To(BeFalse())\n\t\t\t\tExpect(len(resp.Result.Details.Causes)).To(Equal(1))\n\t\t\t\tExpect(resp.Result.Details.Causes[0].Field).To(Equal(\"spec.source.kind\"))\n\t\t\t})\n\n\t\t\tIt(\"should reject invalid apiGroup\", func() {\n\t\t\t\tg := \"foo.bar\"\n\t\t\t\tsnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{\n\t\t\t\t\t\tSource: corev1.TypedLocalObjectReference{\n\t\t\t\t\t\t\tAPIGroup: &g,\n\t\t\t\t\t\t\tKind: \"VirtualMachine\",\n\t\t\t\t\t\t\tName: vmName,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tt := true\n\t\t\t\tvm.Spec.Running = &t\n\n\t\t\t\tar := createSnapshotAdmissionReview(snapshot)\n\t\t\t\tresp := createTestVMSnapshotAdmitter(config, vm).Admit(ar)\n\t\t\t\tExpect(resp.Allowed).To(BeFalse())\n\t\t\t\tExpect(len(resp.Result.Details.Causes)).To(Equal(1))\n\t\t\t\tExpect(resp.Result.Details.Causes[0].Field).To(Equal(\"spec.source.apiGroup\"))\n\t\t\t})\n\n\t\t\tIt(\"should accept when VM is not running\", func() {\n\t\t\t\tsnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{\n\t\t\t\t\t\tSource: corev1.TypedLocalObjectReference{\n\t\t\t\t\t\t\tAPIGroup: &apiGroup,\n\t\t\t\t\t\t\tKind: \"VirtualMachine\",\n\t\t\t\t\t\t\tName: vmName,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tf := false\n\t\t\t\tvm.Spec.Running = &f\n\n\t\t\t\tar := createSnapshotAdmissionReview(snapshot)\n\t\t\t\tresp := createTestVMSnapshotAdmitter(config, vm).Admit(ar)\n\t\t\t\tExpect(resp.Allowed).To(BeTrue())\n\t\t\t})\n\t\t})\n\t})\n})\n\nfunc createSnapshotAdmissionReview(snapshot *snapshotv1.VirtualMachineSnapshot) *admissionv1.AdmissionReview {\n\tbytes, _ := json.Marshal(snapshot)\n\n\tar := &admissionv1.AdmissionReview{\n\t\tRequest: &admissionv1.AdmissionRequest{\n\t\t\tOperation: admissionv1.Create,\n\t\t\tNamespace: \"foo\",\n\t\t\tResource: metav1.GroupVersionResource{\n\t\t\t\tGroup: \"snapshot.kubevirt.io\",\n\t\t\t\tResource: \"virtualmachinesnapshots\",\n\t\t\t},\n\t\t\tObject: runtime.RawExtension{\n\t\t\t\tRaw: bytes,\n\t\t\t},\n\t\t},\n\t}\n\n\treturn ar\n}\n\nfunc createSnapshotUpdateAdmissionReview(old, current *snapshotv1.VirtualMachineSnapshot) *admissionv1.AdmissionReview {\n\toldBytes, _ := json.Marshal(old)\n\tcurrentBytes, _ := json.Marshal(current)\n\n\tar := &admissionv1.AdmissionReview{\n\t\tRequest: &admissionv1.AdmissionRequest{\n\t\t\tOperation: admissionv1.Update,\n\t\t\tNamespace: \"foo\",\n\t\t\tResource: metav1.GroupVersionResource{\n\t\t\t\tGroup: \"snapshot.kubevirt.io\",\n\t\t\t\tResource: \"virtualmachinesnapshots\",\n\t\t\t},\n\t\t\tObject: runtime.RawExtension{\n\t\t\t\tRaw: currentBytes,\n\t\t\t},\n\t\t\tOldObject: runtime.RawExtension{\n\t\t\t\tRaw: oldBytes,\n\t\t\t},\n\t\t},\n\t}\n\n\treturn ar\n}\n\nfunc createTestVMSnapshotAdmitter(config *virtconfig.ClusterConfig, vm *v1.VirtualMachine) *VMSnapshotAdmitter {\n\tctrl := gomock.NewController(GinkgoT())\n\tvirtClient := kubecli.NewMockKubevirtClient(ctrl)\n\tvmInterface := kubecli.NewMockVirtualMachineInterface(ctrl)\n\tvirtClient.EXPECT().VirtualMachine(gomock.Any()).Return(vmInterface).AnyTimes()\n\tif vm == nil {\n\t\terr := errors.NewNotFound(schema.GroupResource{Group: \"kubevirt.io\", Resource: \"virtualmachines\"}, \"foo\")\n\t\tvmInterface.EXPECT().Get(gomock.Any(), gomock.Any()).Return(nil, err)\n\t} else {\n\t\tvmInterface.EXPECT().Get(vm.Name, gomock.Any()).Return(vm, nil)\n\t}\n\treturn &VMSnapshotAdmitter{Config: config, Client: virtClient}\n}\n<commit_msg>Expect(len(X)).To(Equal(Y))” to “Expect(X).To(HaveLen(Y))<commit_after>\/*\n * This file is part of the KubeVirt project\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * Copyright 2018 Red Hat, Inc.\n *\n *\/\n\npackage admitters\n\nimport (\n\t\"encoding\/json\"\n\n\t\"github.com\/golang\/mock\/gomock\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\tadmissionv1 \"k8s.io\/api\/admission\/v1\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\n\tv1 \"kubevirt.io\/api\/core\/v1\"\n\tsnapshotv1 \"kubevirt.io\/api\/snapshot\/v1alpha1\"\n\t\"kubevirt.io\/client-go\/kubecli\"\n\t\"kubevirt.io\/kubevirt\/pkg\/testutils\"\n\t\"kubevirt.io\/kubevirt\/pkg\/virt-api\/webhooks\"\n\tvirtconfig \"kubevirt.io\/kubevirt\/pkg\/virt-config\"\n)\n\nvar _ = Describe(\"Validating VirtualMachineSnapshot Admitter\", func() {\n\tvmName := \"vm\"\n\tapiGroup := \"kubevirt.io\"\n\n\tconfig, _, kvInformer := testutils.NewFakeClusterConfigUsingKVConfig(&v1.KubeVirtConfiguration{})\n\n\tContext(\"Without feature gate enabled\", func() {\n\t\tIt(\"should reject anything\", func() {\n\t\t\tsnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{},\n\t\t\t}\n\n\t\t\tar := createSnapshotAdmissionReview(snapshot)\n\t\t\tresp := createTestVMSnapshotAdmitter(config, nil).Admit(ar)\n\t\t\tExpect(resp.Allowed).To(BeFalse())\n\t\t\tExpect(resp.Result.Message).Should(Equal(\"snapshot feature gate not enabled\"))\n\t\t})\n\t})\n\n\tContext(\"With feature gate enabled\", func() {\n\t\tenableFeatureGate := func(featureGate string) {\n\t\t\ttestutils.UpdateFakeKubeVirtClusterConfig(kvInformer, &v1.KubeVirt{\n\t\t\t\tSpec: v1.KubeVirtSpec{\n\t\t\t\t\tConfiguration: v1.KubeVirtConfiguration{\n\t\t\t\t\t\tDeveloperConfiguration: &v1.DeveloperConfiguration{\n\t\t\t\t\t\t\tFeatureGates: []string{featureGate},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t\tdisableFeatureGates := func() {\n\t\t\ttestutils.UpdateFakeKubeVirtClusterConfig(kvInformer, &v1.KubeVirt{\n\t\t\t\tSpec: v1.KubeVirtSpec{\n\t\t\t\t\tConfiguration: v1.KubeVirtConfiguration{\n\t\t\t\t\t\tDeveloperConfiguration: &v1.DeveloperConfiguration{\n\t\t\t\t\t\t\tFeatureGates: make([]string, 0),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\n\t\tBeforeEach(func() {\n\t\t\tenableFeatureGate(\"Snapshot\")\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tdisableFeatureGates()\n\t\t})\n\n\t\tIt(\"should reject invalid request resource\", func() {\n\t\t\tar := &admissionv1.AdmissionReview{\n\t\t\t\tRequest: &admissionv1.AdmissionRequest{\n\t\t\t\t\tResource: webhooks.VirtualMachineGroupVersionResource,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tresp := createTestVMSnapshotAdmitter(config, nil).Admit(ar)\n\t\t\tExpect(resp.Allowed).To(BeFalse())\n\t\t\tExpect(resp.Result.Message).Should(ContainSubstring(\"unexpected resource\"))\n\t\t})\n\n\t\tIt(\"should reject missing apigroup\", func() {\n\t\t\tsnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{},\n\t\t\t}\n\n\t\t\tar := createSnapshotAdmissionReview(snapshot)\n\t\t\tresp := createTestVMSnapshotAdmitter(config, nil).Admit(ar)\n\t\t\tExpect(resp.Allowed).To(BeFalse())\n\t\t\tExpect(resp.Result.Details.Causes).To(HaveLen(1))\n\t\t\tExpect(resp.Result.Details.Causes[0].Field).To(Equal(\"spec.source.apiGroup\"))\n\t\t})\n\n\t\tIt(\"should reject when VM does not exist\", func() {\n\t\t\tsnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{\n\t\t\t\t\tSource: corev1.TypedLocalObjectReference{\n\t\t\t\t\t\tAPIGroup: &apiGroup,\n\t\t\t\t\t\tKind: \"VirtualMachine\",\n\t\t\t\t\t\tName: vmName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tar := createSnapshotAdmissionReview(snapshot)\n\t\t\tresp := createTestVMSnapshotAdmitter(config, nil).Admit(ar)\n\t\t\tExpect(resp.Allowed).To(BeFalse())\n\t\t\tExpect(resp.Result.Details.Causes).To(HaveLen(1))\n\t\t\tExpect(resp.Result.Details.Causes[0].Field).To(Equal(\"spec.source.name\"))\n\t\t})\n\n\t\tIt(\"should reject spec update\", func() {\n\t\t\tsnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{\n\t\t\t\t\tSource: corev1.TypedLocalObjectReference{\n\t\t\t\t\t\tAPIGroup: &apiGroup,\n\t\t\t\t\t\tKind: \"VirtualMachine\",\n\t\t\t\t\t\tName: vmName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\toldSnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{\n\t\t\t\t\tSource: corev1.TypedLocalObjectReference{\n\t\t\t\t\t\tAPIGroup: &apiGroup,\n\t\t\t\t\t\tKind: \"VirtualMachine\",\n\t\t\t\t\t\tName: \"baz\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tar := createSnapshotUpdateAdmissionReview(oldSnapshot, snapshot)\n\t\t\tresp := createTestVMSnapshotAdmitter(config, nil).Admit(ar)\n\t\t\tExpect(resp.Allowed).To(BeFalse())\n\t\t\tExpect(resp.Result.Details.Causes).To(HaveLen(1))\n\t\t\tExpect(resp.Result.Details.Causes[0].Field).To(Equal(\"spec\"))\n\t\t})\n\n\t\tIt(\"should allow metadata update\", func() {\n\t\t\toldSnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{\n\t\t\t\t\tSource: corev1.TypedLocalObjectReference{\n\t\t\t\t\t\tAPIGroup: &apiGroup,\n\t\t\t\t\t\tKind: \"VirtualMachine\",\n\t\t\t\t\t\tName: vmName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tsnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tFinalizers: []string{\"finalizer\"},\n\t\t\t\t},\n\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{\n\t\t\t\t\tSource: corev1.TypedLocalObjectReference{\n\t\t\t\t\t\tAPIGroup: &apiGroup,\n\t\t\t\t\t\tKind: \"VirtualMachine\",\n\t\t\t\t\t\tName: vmName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tar := createSnapshotUpdateAdmissionReview(oldSnapshot, snapshot)\n\t\t\tresp := createTestVMSnapshotAdmitter(config, nil).Admit(ar)\n\t\t\tExpect(resp.Allowed).To(BeTrue())\n\t\t})\n\n\t\tContext(\"when VirtualMachine exists\", func() {\n\t\t\tvar vm *v1.VirtualMachine\n\n\t\t\tBeforeEach(func() {\n\t\t\t\tvm = &v1.VirtualMachine{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: vmName,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t})\n\n\t\t\tIt(\"should accept when VM is running\", func() {\n\t\t\t\tsnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{\n\t\t\t\t\t\tSource: corev1.TypedLocalObjectReference{\n\t\t\t\t\t\t\tAPIGroup: &apiGroup,\n\t\t\t\t\t\t\tKind: \"VirtualMachine\",\n\t\t\t\t\t\t\tName: vmName,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tt := true\n\t\t\t\tvm.Spec.Running = &t\n\n\t\t\t\tar := createSnapshotAdmissionReview(snapshot)\n\t\t\t\tresp := createTestVMSnapshotAdmitter(config, vm).Admit(ar)\n\t\t\t\tExpect(resp.Allowed).To(BeTrue())\n\t\t\t})\n\n\t\t\tIt(\"should reject invalid kind\", func() {\n\t\t\t\tsnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{\n\t\t\t\t\t\tSource: corev1.TypedLocalObjectReference{\n\t\t\t\t\t\t\tAPIGroup: &apiGroup,\n\t\t\t\t\t\t\tKind: \"VirtualMachineInstance\",\n\t\t\t\t\t\t\tName: vmName,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tt := true\n\t\t\t\tvm.Spec.Running = &t\n\n\t\t\t\tar := createSnapshotAdmissionReview(snapshot)\n\t\t\t\tresp := createTestVMSnapshotAdmitter(config, vm).Admit(ar)\n\t\t\t\tExpect(resp.Allowed).To(BeFalse())\n\t\t\t\tExpect(resp.Result.Details.Causes).To(HaveLen(1))\n\t\t\t\tExpect(resp.Result.Details.Causes[0].Field).To(Equal(\"spec.source.kind\"))\n\t\t\t})\n\n\t\t\tIt(\"should reject invalid apiGroup\", func() {\n\t\t\t\tg := \"foo.bar\"\n\t\t\t\tsnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{\n\t\t\t\t\t\tSource: corev1.TypedLocalObjectReference{\n\t\t\t\t\t\t\tAPIGroup: &g,\n\t\t\t\t\t\t\tKind: \"VirtualMachine\",\n\t\t\t\t\t\t\tName: vmName,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tt := true\n\t\t\t\tvm.Spec.Running = &t\n\n\t\t\t\tar := createSnapshotAdmissionReview(snapshot)\n\t\t\t\tresp := createTestVMSnapshotAdmitter(config, vm).Admit(ar)\n\t\t\t\tExpect(resp.Allowed).To(BeFalse())\n\t\t\t\tExpect(resp.Result.Details.Causes).To(HaveLen(1))\n\t\t\t\tExpect(resp.Result.Details.Causes[0].Field).To(Equal(\"spec.source.apiGroup\"))\n\t\t\t})\n\n\t\t\tIt(\"should accept when VM is not running\", func() {\n\t\t\t\tsnapshot := &snapshotv1.VirtualMachineSnapshot{\n\t\t\t\t\tSpec: snapshotv1.VirtualMachineSnapshotSpec{\n\t\t\t\t\t\tSource: corev1.TypedLocalObjectReference{\n\t\t\t\t\t\t\tAPIGroup: &apiGroup,\n\t\t\t\t\t\t\tKind: \"VirtualMachine\",\n\t\t\t\t\t\t\tName: vmName,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tf := false\n\t\t\t\tvm.Spec.Running = &f\n\n\t\t\t\tar := createSnapshotAdmissionReview(snapshot)\n\t\t\t\tresp := createTestVMSnapshotAdmitter(config, vm).Admit(ar)\n\t\t\t\tExpect(resp.Allowed).To(BeTrue())\n\t\t\t})\n\t\t})\n\t})\n})\n\nfunc createSnapshotAdmissionReview(snapshot *snapshotv1.VirtualMachineSnapshot) *admissionv1.AdmissionReview {\n\tbytes, _ := json.Marshal(snapshot)\n\n\tar := &admissionv1.AdmissionReview{\n\t\tRequest: &admissionv1.AdmissionRequest{\n\t\t\tOperation: admissionv1.Create,\n\t\t\tNamespace: \"foo\",\n\t\t\tResource: metav1.GroupVersionResource{\n\t\t\t\tGroup: \"snapshot.kubevirt.io\",\n\t\t\t\tResource: \"virtualmachinesnapshots\",\n\t\t\t},\n\t\t\tObject: runtime.RawExtension{\n\t\t\t\tRaw: bytes,\n\t\t\t},\n\t\t},\n\t}\n\n\treturn ar\n}\n\nfunc createSnapshotUpdateAdmissionReview(old, current *snapshotv1.VirtualMachineSnapshot) *admissionv1.AdmissionReview {\n\toldBytes, _ := json.Marshal(old)\n\tcurrentBytes, _ := json.Marshal(current)\n\n\tar := &admissionv1.AdmissionReview{\n\t\tRequest: &admissionv1.AdmissionRequest{\n\t\t\tOperation: admissionv1.Update,\n\t\t\tNamespace: \"foo\",\n\t\t\tResource: metav1.GroupVersionResource{\n\t\t\t\tGroup: \"snapshot.kubevirt.io\",\n\t\t\t\tResource: \"virtualmachinesnapshots\",\n\t\t\t},\n\t\t\tObject: runtime.RawExtension{\n\t\t\t\tRaw: currentBytes,\n\t\t\t},\n\t\t\tOldObject: runtime.RawExtension{\n\t\t\t\tRaw: oldBytes,\n\t\t\t},\n\t\t},\n\t}\n\n\treturn ar\n}\n\nfunc createTestVMSnapshotAdmitter(config *virtconfig.ClusterConfig, vm *v1.VirtualMachine) *VMSnapshotAdmitter {\n\tctrl := gomock.NewController(GinkgoT())\n\tvirtClient := kubecli.NewMockKubevirtClient(ctrl)\n\tvmInterface := kubecli.NewMockVirtualMachineInterface(ctrl)\n\tvirtClient.EXPECT().VirtualMachine(gomock.Any()).Return(vmInterface).AnyTimes()\n\tif vm == nil {\n\t\terr := errors.NewNotFound(schema.GroupResource{Group: \"kubevirt.io\", Resource: \"virtualmachines\"}, \"foo\")\n\t\tvmInterface.EXPECT().Get(gomock.Any(), gomock.Any()).Return(nil, err)\n\t} else {\n\t\tvmInterface.EXPECT().Get(vm.Name, gomock.Any()).Return(vm, nil)\n\t}\n\treturn &VMSnapshotAdmitter{Config: config, Client: virtClient}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage testutil\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"time\"\n\n\t\"v.io\/x\/devtools\/internal\/collect\"\n\t\"v.io\/x\/devtools\/internal\/tool\"\n\t\"v.io\/x\/devtools\/internal\/util\"\n\t\"v.io\/x\/devtools\/internal\/xunit\"\n)\n\n\/\/ generateXUnitTestSuite generates an xUnit test suite that\n\/\/ encapsulates the given input.\nfunc generateXUnitTestSuite(ctx *tool.Context, success bool, pkg string, duration time.Duration, output string) *xunit.TestSuite {\n\t\/\/ Generate an xUnit test suite describing the result.\n\ts := xunit.TestSuite{Name: pkg}\n\tc := xunit.TestCase{\n\t\tClassname: pkg,\n\t\tName: \"Test\",\n\t\tTime: fmt.Sprintf(\"%.2f\", duration.Seconds()),\n\t}\n\tif !success {\n\t\tfmt.Fprintf(ctx.Stdout(), \"%s ... failed\\n%v\\n\", pkg, output)\n\t\tf := xunit.Failure{\n\t\t\tMessage: \"vrpc\",\n\t\t\tData: output,\n\t\t}\n\t\tc.Failures = append(c.Failures, f)\n\t\ts.Failures++\n\t} else {\n\t\tfmt.Fprintf(ctx.Stdout(), \"%s ... ok\\n\", pkg)\n\t}\n\ts.Tests++\n\ts.Cases = append(s.Cases, c)\n\treturn &s\n}\n\n\/\/ testProdService test the given production service.\nfunc testProdService(ctx *tool.Context, service prodService) (*xunit.TestSuite, error) {\n\troot, err := util.VanadiumRoot()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbin := filepath.Join(root, \"release\", \"go\", \"bin\", \"vrpc\")\n\tvar out bytes.Buffer\n\topts := ctx.Run().Opts()\n\topts.Stdout = &out\n\topts.Stderr = &out\n\tstart := time.Now()\n\tif err := ctx.Run().TimedCommandWithOpts(DefaultTestTimeout, opts, bin, \"signature\", service.objectName); err != nil {\n\t\treturn generateXUnitTestSuite(ctx, false, service.name, time.Now().Sub(start), out.String()), nil\n\t}\n\tif !service.regexp.Match(out.Bytes()) {\n\t\tfmt.Fprintf(ctx.Stderr(), \"couldn't match regexp `%s` in output:\\n%v\\n\", service.regexp, out.String())\n\t\treturn generateXUnitTestSuite(ctx, false, service.name, time.Now().Sub(start), \"mismatching signature\"), nil\n\t}\n\treturn generateXUnitTestSuite(ctx, true, service.name, time.Now().Sub(start), \"\"), nil\n}\n\ntype prodService struct {\n\tname string\n\tobjectName string\n\tregexp *regexp.Regexp\n}\n\n\/\/ vanadiumProdServicesTest runs a test of vanadium production services.\nfunc vanadiumProdServicesTest(ctx *tool.Context, testName string, opts ...TestOpt) (_ *TestResult, e error) {\n\t\/\/ Initialize the test.\n\tcleanup, err := initTest(ctx, testName, nil)\n\tif err != nil {\n\t\treturn nil, internalTestError{err, \"Init\"}\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\t\/\/ Install the vrpc tool.\n\tif err := ctx.Run().Command(\"v23\", \"go\", \"install\", \"v.io\/x\/ref\/cmd\/vrpc\"); err != nil {\n\t\treturn nil, internalTestError{err, \"Install VRPC\"}\n\t}\n\n\t\/\/ Describe the test cases.\n\tblessingRoot, namespaceRoot := getServiceOpts(opts)\n\tallPassed, suites := true, []xunit.TestSuite{}\n\tservices := []prodService{\n\t\tprodService{\n\t\t\tname: \"mounttable\",\n\t\t\tobjectName: namespaceRoot,\n\t\t\tregexp: regexp.MustCompile(`MountTable[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tname: \"application repository\",\n\t\t\tobjectName: namespaceRoot + \"\/applications\",\n\t\t\tregexp: regexp.MustCompile(`Application[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tname: \"binary repository\",\n\t\t\tobjectName: namespaceRoot + \"\/binaries\",\n\t\t\tregexp: regexp.MustCompile(`Binary[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tname: \"macaroon service\",\n\t\t\tobjectName: namespaceRoot + \"\/identity\/\" + blessingRoot + \"\/root\/macaroon\",\n\t\t\tregexp: regexp.MustCompile(`MacaroonBlesser[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tname: \"google identity service\",\n\t\t\tobjectName: namespaceRoot + \"\/identity\/\" + blessingRoot + \"\/root\/google\",\n\t\t\tregexp: regexp.MustCompile(`OAuthBlesser[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tobjectName: namespaceRoot + \"\/identity\/\" + blessingRoot + \"\/root\/discharger\",\n\t\t\tname: \"binary discharger\",\n\t\t\tregexp: regexp.MustCompile(`Discharger[[:space:]]+interface`),\n\t\t},\n\t}\n\n\tfor _, service := range services {\n\t\tsuite, err := testProdService(ctx, service)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tallPassed = allPassed && (suite.Failures == 0)\n\t\tsuites = append(suites, *suite)\n\t}\n\n\t\/\/ Create the xUnit report.\n\tif err := xunit.CreateReport(ctx, testName, suites); err != nil {\n\t\treturn nil, err\n\t}\n\tif !allPassed {\n\t\treturn &TestResult{Status: TestFailed}, nil\n\t}\n\treturn &TestResult{Status: TestPassed}, nil\n}\n\n\/\/ getServiceOpts extracts blessing root and namespace root from the given\n\/\/ TestOpts.\nfunc getServiceOpts(opts []TestOpt) (string, string) {\n\tblessingRoot := \"dev.v.io\"\n\tnamespaceRoot := \"\/ns.dev.v.io:8101\"\n\tfor _, opt := range opts {\n\t\tswitch v := opt.(type) {\n\t\tcase BlessingsRootOpt:\n\t\t\tblessingRoot = string(v)\n\t\tcase NamespaceRootOpt:\n\t\t\tnamespaceRoot = string(v)\n\t\t}\n\t}\n\treturn blessingRoot, namespaceRoot\n}\n<commit_msg>prod-services-test: Test the HTTP service and then some.<commit_after>\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage testutil\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"time\"\n\n\t\"v.io\/x\/devtools\/internal\/collect\"\n\t\"v.io\/x\/devtools\/internal\/tool\"\n\t\"v.io\/x\/devtools\/internal\/util\"\n\t\"v.io\/x\/devtools\/internal\/xunit\"\n)\n\n\/\/ generateXUnitTestSuite generates an xUnit test suite that\n\/\/ encapsulates the given input.\nfunc generateXUnitTestSuite(ctx *tool.Context, failure *xunit.Failure, pkg string, duration time.Duration) *xunit.TestSuite {\n\t\/\/ Generate an xUnit test suite describing the result.\n\ts := xunit.TestSuite{Name: pkg}\n\tc := xunit.TestCase{\n\t\tClassname: pkg,\n\t\tName: \"Test\",\n\t\tTime: fmt.Sprintf(\"%.2f\", duration.Seconds()),\n\t}\n\tif failure != nil {\n\t\tfmt.Fprintf(ctx.Stdout(), \"%s ... failed\\n%v\\n\", pkg, failure.Data)\n\t\tc.Failures = append(c.Failures, *failure)\n\t\ts.Failures++\n\t} else {\n\t\tfmt.Fprintf(ctx.Stdout(), \"%s ... ok\\n\", pkg)\n\t}\n\ts.Tests++\n\ts.Cases = append(s.Cases, c)\n\treturn &s\n}\n\n\/\/ testSingleProdService test the given production service.\nfunc testSingleProdService(ctx *tool.Context, vroot, principalDir string, service prodService) *xunit.TestSuite {\n\tbin := filepath.Join(vroot, \"release\", \"go\", \"bin\", \"vrpc\")\n\tvar out bytes.Buffer\n\topts := ctx.Run().Opts()\n\topts.Stdout = &out\n\topts.Stderr = &out\n\tstart := time.Now()\n\tif err := ctx.Run().TimedCommandWithOpts(DefaultTestTimeout, opts, bin, \"--veyron.credentials\", principalDir, \"signature\", service.objectName); err != nil {\n\t\treturn generateXUnitTestSuite(ctx, &xunit.Failure{\"vrpc\", out.String()}, service.name, time.Now().Sub(start))\n\t}\n\tif !service.regexp.Match(out.Bytes()) {\n\t\tfmt.Fprintf(ctx.Stderr(), \"couldn't match regexp %q in output:\\n%v\\n\", service.regexp, out.String())\n\t\treturn generateXUnitTestSuite(ctx, &xunit.Failure{\"vrpc\", \"mismatching signature\"}, service.name, time.Now().Sub(start))\n\t}\n\treturn generateXUnitTestSuite(ctx, nil, service.name, time.Now().Sub(start))\n}\n\ntype prodService struct {\n\tname string \/\/ Name to use for the test description\n\tobjectName string \/\/ Object name of the service to connect to\n\tregexp *regexp.Regexp \/\/ Regexp that should match the signature output\n}\n\n\/\/ vanadiumProdServicesTest runs a test of vanadium production services.\nfunc vanadiumProdServicesTest(ctx *tool.Context, testName string, opts ...TestOpt) (_ *TestResult, e error) {\n\t\/\/ Initialize the test.\n\tcleanup, err := initTest(ctx, testName, nil)\n\tif err != nil {\n\t\treturn nil, internalTestError{err, \"Init\"}\n\t}\n\tdefer collect.Error(func() error { return cleanup() }, &e)\n\n\tvroot, err := util.VanadiumRoot()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Install the vrpc tool.\n\tif err := ctx.Run().Command(\"v23\", \"go\", \"install\", \"v.io\/x\/ref\/cmd\/vrpc\"); err != nil {\n\t\treturn nil, internalTestError{err, \"Install VRPC\"}\n\t}\n\t\/\/ Install the principal tool.\n\tif err := ctx.Run().Command(\"v23\", \"go\", \"install\", \"v.io\/x\/ref\/cmd\/principal\"); err != nil {\n\t\treturn nil, internalTestError{err, \"Install Principal\"}\n\t}\n\ttmpdir, err := ctx.Run().TempDir(\"\", \"prod-services-test\")\n\tif err != nil {\n\t\treturn nil, internalTestError{err, \"Create temporary directory\"}\n\t}\n\tdefer collect.Error(func() error { return ctx.Run().RemoveAll(tmpdir) }, &e)\n\n\tblessingRoot, namespaceRoot := getServiceOpts(opts)\n\tallPassed, suites := true, []xunit.TestSuite{}\n\n\t\/\/ Fetch the \"root\" blessing that all services are blessed by.\n\tsuite, pubkey, blessingNames := testIdentityProviderHTTP(ctx, blessingRoot)\n\tsuites = append(suites, *suite)\n\n\tif suite.Failures == 0 {\n\t\t\/\/ Setup a principal that will be used by testAllProdServices and will\n\t\t\/\/ recognize the blessings of the prod services.\n\t\tprincipalDir, err := setupPrincipal(ctx, vroot, tmpdir, pubkey, blessingNames)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, suite := range testAllProdServices(ctx, vroot, principalDir, blessingRoot, namespaceRoot) {\n\t\t\tallPassed = allPassed && (suite.Failures == 0)\n\t\t\tsuites = append(suites, *suite)\n\t\t}\n\t}\n\n\t\/\/ Create the xUnit report.\n\tif err := xunit.CreateReport(ctx, testName, suites); err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, suite := range suites {\n\t\tif suite.Failures > 0 {\n\t\t\t\/\/ At least one test failed:\n\t\t\treturn &TestResult{Status: TestFailed}, nil\n\t\t}\n\t}\n\treturn &TestResult{Status: TestPassed}, nil\n}\n\nfunc testAllProdServices(ctx *tool.Context, vroot, principalDir, blessingRoot, namespaceRoot string) []*xunit.TestSuite {\n\tservices := []prodService{\n\t\tprodService{\n\t\t\tname: \"mounttable\",\n\t\t\tobjectName: namespaceRoot,\n\t\t\tregexp: regexp.MustCompile(`MountTable[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tname: \"application repository\",\n\t\t\tobjectName: namespaceRoot + \"\/applications\",\n\t\t\tregexp: regexp.MustCompile(`Application[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tname: \"binary repository\",\n\t\t\tobjectName: namespaceRoot + \"\/binaries\",\n\t\t\tregexp: regexp.MustCompile(`Binary[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tname: \"macaroon service\",\n\t\t\tobjectName: namespaceRoot + \"\/identity\/\" + blessingRoot + \"\/root\/macaroon\",\n\t\t\tregexp: regexp.MustCompile(`MacaroonBlesser[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tname: \"google identity service\",\n\t\t\tobjectName: namespaceRoot + \"\/identity\/\" + blessingRoot + \"\/root\/google\",\n\t\t\tregexp: regexp.MustCompile(`OAuthBlesser[[:space:]]+interface`),\n\t\t},\n\t\tprodService{\n\t\t\tobjectName: namespaceRoot + \"\/identity\/\" + blessingRoot + \"\/root\/discharger\",\n\t\t\tname: \"binary discharger\",\n\t\t\tregexp: regexp.MustCompile(`Discharger[[:space:]]+interface`),\n\t\t},\n\t}\n\n\tvar suites []*xunit.TestSuite\n\tfor _, service := range services {\n\t\tsuites = append(suites, testSingleProdService(ctx, vroot, principalDir, service))\n\t}\n\treturn suites\n}\n\n\/\/ testIdentityProviderHTTP tests that the identity provider's HTTP server is\n\/\/ up and running and also fetches the set of blessing names that the provider\n\/\/ claims to be authoritative on and the public key (encoded) used by that\n\/\/ identity provider to sign certificates for blessings.\n\/\/\n\/\/ PARANOIA ALERT:\n\/\/ This function is subject to man-in-the-middle attacks because it does not\n\/\/ verify the TLS certificates presented by the server. This does open the\n\/\/ door for an attack where a parallel universe of services could be setup\n\/\/ and fool this production services test into thinking all services are\n\/\/ up and running when they may not be.\n\/\/\n\/\/ The attacker in this case will have to be able to mess with the routing\n\/\/ tables on the machine running this test, or the network routes of routers\n\/\/ used by the machine, or mess up DNS entries.\nfunc testIdentityProviderHTTP(ctx *tool.Context, blessingRoot string) (suite *xunit.TestSuite, publickey string, blessingNames []string) {\n\turl := fmt.Sprintf(\"https:\/\/%s\/auth\/blessing-root\", blessingRoot)\n\tstart := time.Now()\n\tvar response struct {\n\t\tNames []string `json:\"names\"`\n\t\tPublicKey string `json:\"publicKey\"`\n\t}\n\tresp, err := http.Get(url)\n\tif err == nil {\n\t\tdefer resp.Body.Close()\n\t\terr = json.NewDecoder(resp.Body).Decode(&response)\n\t}\n\tvar failure *xunit.Failure\n\tif err != nil {\n\t\tfailure = &xunit.Failure{\"identityd HTTP\", err.Error()}\n\t}\n\treturn generateXUnitTestSuite(ctx, failure, url, time.Now().Sub(start)), response.PublicKey, response.Names\n}\n\nfunc setupPrincipal(ctx *tool.Context, vroot, tmpdir, pubkey string, blessingNames []string) (string, error) {\n\tdir := filepath.Join(tmpdir, \"credentials\")\n\tbin := filepath.Join(vroot, \"release\", \"go\", \"bin\", \"principal\")\n\tif err := ctx.Run().TimedCommand(DefaultTestTimeout, bin, \"create\", dir, \"prod-services-tester\"); err != nil {\n\t\tfmt.Fprintf(ctx.Stderr(), \"principal create failed: %v\\n\", err)\n\t\treturn \"\", err\n\t}\n\tfor _, name := range blessingNames {\n\t\tif err := ctx.Run().TimedCommand(DefaultTestTimeout, bin, \"--veyron.credentials\", dir, \"addtoroots\", pubkey, name); err != nil {\n\t\t\tfmt.Fprintf(ctx.Stderr(), \"principal addtoroots %v %v failed: %v\\n\", pubkey, name, err)\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn dir, nil\n}\n\n\/\/ getServiceOpts extracts blessing root and namespace root from the given\n\/\/ TestOpts.\nfunc getServiceOpts(opts []TestOpt) (string, string) {\n\tblessingRoot := \"dev.v.io\"\n\tnamespaceRoot := \"\/ns.dev.v.io:8101\"\n\tfor _, opt := range opts {\n\t\tswitch v := opt.(type) {\n\t\tcase BlessingsRootOpt:\n\t\t\tblessingRoot = string(v)\n\t\tcase NamespaceRootOpt:\n\t\t\tnamespaceRoot = string(v)\n\t\t}\n\t}\n\treturn blessingRoot, namespaceRoot\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage verify\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/jacobsa\/comeback\/internal\/blob\"\n\t\"github.com\/jacobsa\/syncutil\"\n\t\"github.com\/jacobsa\/timeutil\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ Read all blobs necessary for verifying the directory structure rooted at a\n\/\/ set of backup root scores, ensuring that the entire directory structure is\n\/\/ intact in GCS.\n\/\/\n\/\/ Optionally, all file content is also read and verified. This is less\n\/\/ important than verifying directory connectedness if we trust that GCS does\n\/\/ not corrupt object metadata (where we store expected CRC32C and MD5) and\n\/\/ does correctly report the object's CRC32C and MD5 sums in listings,\n\/\/ verifying them periodically.\n\/\/\n\/\/ If work is to be preserved across runs, knownStructure should be filled in\n\/\/ with parenthood information from previously-generated records (for both\n\/\/ files and directories). Nodes that exist as keys in this map will not be\n\/\/ re-verified, except to confirm that their content still exists in allScores.\n\/\/\n\/\/ It is expected that the blob store's Load method does score verification for\n\/\/ us.\nfunc Verify(\n\tctx context.Context,\n\treadFiles bool,\n\trootScores []blob.Score,\n\tallScores []blob.Score,\n\tknownStructure map[Node][]Node,\n\trecords chan<- Record,\n\tbs blob.Store) (err error) {\n\tb := syncutil.NewBundle(ctx)\n\n\t\/\/ Explore the graph starting at the specified roots. Use an \"experimentally\n\t\/\/ determined\" parallelism, which in theory should depend on bandwidth-delay\n\t\/\/ products but in practice comes down to when the OS gets cranky about open\n\t\/\/ files.\n\tgraphNodes := make(chan graph.Node, 100)\n\tb.Add(func(ctx context.Context) (err error) {\n\t\tdefer close(graphNodes)\n\t\tconst parallelism = 128\n\n\t\tsf := newSuccessorFinder(\n\t\t\treadFiles,\n\t\t\tallScores,\n\t\t\tknownStructure,\n\t\t\trecords,\n\t\t\ttimeutil.RealClock(),\n\t\t\tbs)\n\n\t\tvar graphRoots []graph.Node\n\t\tfor _, s := range rootScores {\n\t\t\tn := Node{\n\t\t\t\tScore: s,\n\t\t\t\tDir: true,\n\t\t\t}\n\n\t\t\tgraphRoots = append(graphRoots, n)\n\t\t}\n\n\t\terr = graph.ExploreDirectedGraph(\n\t\t\tctx,\n\t\t\tsf,\n\t\t\tgraphRoots,\n\t\t\tgraphNodes,\n\t\t\tparallelism)\n\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"ExploreDirectedGraph: %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\treturn\n\t})\n\n\t\/\/ Throw away the graph nodes returned by ExploreDirectedGraph. We don't need\n\t\/\/ them; the successor finder mints records and writes them to the channel\n\t\/\/ for us.\n\tb.Add(func(ctx context.Context) (err error) {\n\t\tfor _ = range graphNodes {\n\t\t}\n\n\t\treturn\n\t})\n\n\terr = b.Join()\n\treturn\n}\n<commit_msg>Fixed verify.go.<commit_after>\/\/ Copyright 2015 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage verify\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/jacobsa\/comeback\/internal\/blob\"\n\t\"github.com\/jacobsa\/comeback\/internal\/dag\"\n\t\"github.com\/jacobsa\/timeutil\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ Read all blobs necessary for verifying the directory structure rooted at a\n\/\/ set of backup root scores, ensuring that the entire directory structure is\n\/\/ intact in GCS.\n\/\/\n\/\/ Optionally, all file content is also read and verified. This is less\n\/\/ important than verifying directory connectedness if we trust that GCS does\n\/\/ not corrupt object metadata (where we store expected CRC32C and MD5) and\n\/\/ does correctly report the object's CRC32C and MD5 sums in listings,\n\/\/ verifying them periodically.\n\/\/\n\/\/ If work is to be preserved across runs, knownStructure should be filled in\n\/\/ with parenthood information from previously-generated records (for both\n\/\/ files and directories). Nodes that exist as keys in this map will not be\n\/\/ re-verified, except to confirm that their content still exists in allScores.\n\/\/\n\/\/ It is expected that the blob store's Load method does score verification for\n\/\/ us.\nfunc Verify(\n\tctx context.Context,\n\treadFiles bool,\n\trootScores []blob.Score,\n\tallScores []blob.Score,\n\tknownStructure map[Node][]Node,\n\trecords chan<- Record,\n\tblobStore blob.Store) (err error) {\n\tclock := timeutil.RealClock()\n\n\t\/\/ Set up a dependency resolver that reads directory listings. It also takes\n\t\/\/ care of confirming that all scores (for files and directories) exist.\n\tdr := newDependencyResolver(\n\t\tallScores,\n\t\tknownStructure,\n\t\trecords,\n\t\tblobStore,\n\t\tclock)\n\n\t\/\/ Do we need to do anything for file nodes?\n\tvar visitor dag.Visitor\n\tif readFiles {\n\t\tvisitor = newVisitor(records, blobStore, clock)\n\t} else {\n\t\tvisitor = &doNothingVisitor{}\n\t}\n\n\t\/\/ Traverse the graph.\n\tvar rootNodes []dag.Node\n\tfor _, s := range rootScores {\n\t\tn := Node{\n\t\t\tScore: s,\n\t\t\tDir: true,\n\t\t}\n\n\t\trootNodes = append(rootNodes, n)\n\t}\n\n\tconst parallelism = 128\n\terr = dag.Visit(ctx, rootNodes, dr, visitor, parallelism)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"dag.Visit: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package cli\n\nimport \"os\"\nimport \"fmt\"\nimport \"bytes\"\nimport \"strings\"\n\n\/\/ CLI represents a set of defined flags. The zero value of a FlagSet\n\/\/ has no name and has ContinueOnError error handling.\ntype CLI struct {\n\tflagable\n\tparamable\n\tsubCommandable\n\t*writer\n\n\tfn commandFn\n\tname string\n\tdescription string\n\tparsed bool\n}\n\n\/\/ NewCLI returns a new cli with the specified name and\n\/\/ error handling property.\nfunc NewCLI(version, desc string, fn commandFn, paramNames ...string) *CLI {\n\tnameParts := strings.Split(os.Args[0], \"\/\")\n\tcli := new(CLI)\n\tcli.init(nameParts[len(nameParts)-1], desc, fn, paramNames...)\n\tcli.version = version\n\tcli.description = desc\n\treturn cli\n}\n\n\/\/ DefineSubCommand return a SubCommand and adds the current CLI as the parent\nfunc (cmd *CLI) DefineSubCommand(name string, desc string, fn commandFn, paramNames ...string) *SubCommand {\n\tsubcmd := cmd.subCommandable.DefineSubCommand(name, desc, fn, paramNames...)\n\tcmd.parent = subcmd\n\treturn subcmd\n}\n\n\/\/ Description returns the command description\nfunc (cmd *CLI) Description() string {\n\treturn cmd.description\n}\n\n\/\/ Name returns the command name\nfunc (cmd *CLI) Name() string {\n\tvar name string\n\tif cmd.parent != nil {\n\t\tname = strings.Join([]string{cmd.parent.Name(), cmd.name}, \" \")\n\t} else {\n\t\tname = cmd.name\n\t}\n\treturn name\n}\n\n\/\/ Parsed reports whether f.Parse has been called.\nfunc (cmd *CLI) Parsed() bool {\n\tcmd.parsed = cmd.flagable.Parsed() && cmd.paramable.Parsed() && cmd.subCommandable.Parsed()\n\treturn cmd.parsed\n}\n\n\/\/ Start starts the command with args, arg[0] is ignored\nfunc (cmd *CLI) Start(args ...string) {\n\tif args == nil {\n\t\targs = os.Args\n\t}\n\n\tif len(args) > 1 {\n\t\targs = args[1:]\n\t} else {\n\t\targs = []string{}\n\t}\n\n\t\/\/ parse flags and args\n\targs = cmd.flagable.parse(args)\n\n\t\/\/ Show a version\n\tif len(cmd.Version()) > 0 && cmd.Flag(\"version\").Get() == true {\n\t\tfmt.Println(cmd.Name(), cmd.Version())\n\t\treturn\n\t}\n\n\t\/\/ Show Help\n\tif cmd.Flag(\"help\").Get() == true {\n\t\tcmd.Usage()\n\t\treturn\n\t}\n\n\t\/\/ Parse Params\n\targs = cmd.paramable.parse(args)\n\n\tif cmd.parseSubCommands(args) {\n\t\treturn\n\t}\n\n\t\/\/ Run the function\n\tcmd.fn(cmd)\n}\n\n\/\/ UsageString returns the command usage as a string\nfunc (cmd *CLI) UsageString() string {\n\thasSubCommands := len(cmd.subCommands) > 0\n\thasParams := len(cmd.params) > 0\n\thasDescription := len(cmd.description) > 0\n\n\t\/\/ Start the Buffer\n\tvar buff bytes.Buffer\n\n\tbuff.WriteString(\"Usage:\\n\")\n\tbuff.WriteString(fmt.Sprintf(\" %s [options...]\", cmd.Name()))\n\n\t\/\/ Write Param Syntax\n\tif hasParams {\n\t\tbuff.WriteString(fmt.Sprintf(\" %s\", cmd.paramable.UsageString()))\n\t}\n\n\t\/\/ Write Sub Command Syntax\n\tif hasSubCommands {\n\t\tbuff.WriteString(\" <command> [arg...]\")\n\t}\n\n\tif hasDescription {\n\t\tbuff.WriteString(fmt.Sprintf(\"\\n\\n%s\", cmd.Description()))\n\t}\n\n\t\/\/ Write Flags Syntax\n\tbuff.WriteString(\"\\n\\nOptions:\\n\")\n\tbuff.WriteString(cmd.flagable.UsageString())\n\n\t\/\/ Write Sub Command List\n\tif hasSubCommands {\n\t\tbuff.WriteString(\"\\n\\nCommands:\\n\")\n\t\tbuff.WriteString(cmd.subCommandable.UsageString())\n\t}\n\n\t\/\/ Return buffer as string\n\treturn buff.String()\n}\n\nfunc (cmd *CLI) init(name, desc string, fn commandFn, paramNames ...string) {\n\twriter := &writer{ErrorHandling: ExitOnError}\n\tcmd.writer = writer\n\tcmd.flagable = flagable{writer: writer}\n\tcmd.paramable = paramable{writer: writer}\n\tcmd.subCommandable = subCommandable{writer: writer}\n\tcmd.name = name\n\tcmd.fn = fn\n\tcmd.description = desc\n\tcmd.setParams(paramNames...)\n\tcmd.usage = func() { fmt.Println(cmd.UsageString()) }\n}\n<commit_msg>fix subcmd bug<commit_after>package cli\n\nimport \"os\"\nimport \"fmt\"\nimport \"bytes\"\nimport \"strings\"\n\n\/\/ CLI represents a set of defined flags. The zero value of a FlagSet\n\/\/ has no name and has ContinueOnError error handling.\ntype CLI struct {\n\tflagable\n\tparamable\n\tsubCommandable\n\t*writer\n\n\tfn commandFn\n\tname string\n\tdescription string\n\tparsed bool\n}\n\n\/\/ NewCLI returns a new cli with the specified name and\n\/\/ error handling property.\nfunc NewCLI(version, desc string, fn commandFn, paramNames ...string) *CLI {\n\tnameParts := strings.Split(os.Args[0], \"\/\")\n\tcli := new(CLI)\n\tcli.init(nameParts[len(nameParts)-1], desc, fn, paramNames...)\n\tcli.version = version\n\tcli.description = desc\n\treturn cli\n}\n\n\/\/ DefineSubCommand return a SubCommand and adds the current CLI as the parent\nfunc (cmd *CLI) DefineSubCommand(name string, desc string, fn commandFn, paramNames ...string) *SubCommand {\n\tsubcmd := cmd.subCommandable.DefineSubCommand(name, desc, fn, paramNames...)\n\tsubcmd.parent = cmd\n\treturn subcmd\n}\n\n\/\/ Description returns the command description\nfunc (cmd *CLI) Description() string {\n\treturn cmd.description\n}\n\n\/\/ Name returns the command name\nfunc (cmd *CLI) Name() string {\n\tvar name string\n\tif cmd.parent != nil {\n\t\tname = strings.Join([]string{cmd.parent.Name(), cmd.name}, \" \")\n\t} else {\n\t\tname = cmd.name\n\t}\n\treturn name\n}\n\n\/\/ Parsed reports whether f.Parse has been called.\nfunc (cmd *CLI) Parsed() bool {\n\tcmd.parsed = cmd.flagable.Parsed() && cmd.paramable.Parsed() && cmd.subCommandable.Parsed()\n\treturn cmd.parsed\n}\n\n\/\/ Start starts the command with args, arg[0] is ignored\nfunc (cmd *CLI) Start(args ...string) {\n\tif args == nil {\n\t\targs = os.Args\n\t}\n\n\tif len(args) > 1 {\n\t\targs = args[1:]\n\t} else {\n\t\targs = []string{}\n\t}\n\n\t\/\/ parse flags and args\n\targs = cmd.flagable.parse(args)\n\n\t\/\/ Show a version\n\tif len(cmd.Version()) > 0 && cmd.Flag(\"version\").Get() == true {\n\t\tfmt.Println(cmd.Name(), cmd.Version())\n\t\treturn\n\t}\n\n\t\/\/ Show Help\n\tif cmd.Flag(\"help\").Get() == true {\n\t\tcmd.Usage()\n\t\treturn\n\t}\n\n\t\/\/ Parse Params\n\targs = cmd.paramable.parse(args)\n\n\tif cmd.parseSubCommands(args) {\n\t\treturn\n\t}\n\n\t\/\/ Run the function\n\tcmd.fn(cmd)\n}\n\n\/\/ UsageString returns the command usage as a string\nfunc (cmd *CLI) UsageString() string {\n\thasSubCommands := len(cmd.subCommands) > 0\n\thasParams := len(cmd.params) > 0\n\thasDescription := len(cmd.description) > 0\n\n\t\/\/ Start the Buffer\n\tvar buff bytes.Buffer\n\n\tbuff.WriteString(\"Usage:\\n\")\n\tbuff.WriteString(fmt.Sprintf(\" %s [options...]\", cmd.Name()))\n\n\t\/\/ Write Param Syntax\n\tif hasParams {\n\t\tbuff.WriteString(fmt.Sprintf(\" %s\", cmd.paramable.UsageString()))\n\t}\n\n\t\/\/ Write Sub Command Syntax\n\tif hasSubCommands {\n\t\tbuff.WriteString(\" <command> [arg...]\")\n\t}\n\n\tif hasDescription {\n\t\tbuff.WriteString(fmt.Sprintf(\"\\n\\n%s\", cmd.Description()))\n\t}\n\n\t\/\/ Write Flags Syntax\n\tbuff.WriteString(\"\\n\\nOptions:\\n\")\n\tbuff.WriteString(cmd.flagable.UsageString())\n\n\t\/\/ Write Sub Command List\n\tif hasSubCommands {\n\t\tbuff.WriteString(\"\\n\\nCommands:\\n\")\n\t\tbuff.WriteString(cmd.subCommandable.UsageString())\n\t}\n\n\t\/\/ Return buffer as string\n\treturn buff.String()\n}\n\nfunc (cmd *CLI) init(name, desc string, fn commandFn, paramNames ...string) {\n\twriter := &writer{ErrorHandling: ExitOnError}\n\tcmd.writer = writer\n\tcmd.flagable = flagable{writer: writer}\n\tcmd.paramable = paramable{writer: writer}\n\tcmd.subCommandable = subCommandable{writer: writer}\n\tcmd.name = name\n\tcmd.fn = fn\n\tcmd.description = desc\n\tcmd.setParams(paramNames...)\n\tcmd.usage = func() { fmt.Println(cmd.UsageString()) }\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage flushfs\n\nimport (\n\t\"os\"\n\t\"sync\"\n\n\t\"github.com\/jacobsa\/fuse\"\n\t\"github.com\/jacobsa\/fuse\/fuseutil\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ Create a file system containing a single file named \"foo\".\n\/\/\n\/\/ The file may be opened for reading and\/or writing. Its initial contents are\n\/\/ empty. Whenever a flush or fsync is received, the supplied function will be\n\/\/ called with the current contents of the file.\nfunc NewFileSystem(\n\treportFlush func(string),\n\treportFsync func(string)) (fs fuse.FileSystem, err error) {\n\tfs = &flushFS{}\n\treturn\n}\n\nconst fooID = fuse.RootInodeID + 1\n\ntype flushFS struct {\n\tfuseutil.NotImplementedFileSystem\n\n\tmu sync.Mutex\n\tfoo *os.File \/\/ GUARDED_BY(mu)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *flushFS) rootAttributes() fuse.InodeAttributes {\n\treturn fuse.InodeAttributes{\n\t\tNlink: 1,\n\t\tMode: 0777 | os.ModeDir,\n\t}\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *flushFS) fooAttributes() fuse.InodeAttributes {\n\treturn fuse.InodeAttributes{\n\t\tNlink: 1,\n\t\tMode: 0777,\n\t}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ File system methods\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (fs *flushFS) Init(\n\tctx context.Context,\n\treq *fuse.InitRequest) (\n\tresp *fuse.InitResponse, err error) {\n\tresp = &fuse.InitResponse{}\n\treturn\n}\n\nfunc (fs *flushFS) LookUpInode(\n\tctx context.Context,\n\treq *fuse.LookUpInodeRequest) (\n\tresp *fuse.LookUpInodeResponse, err error) {\n\tresp = &fuse.LookUpInodeResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Sanity check.\n\tif req.Parent != fuse.RootInodeID || req.Name != \"foo\" {\n\t\terr = fuse.ENOENT\n\t\treturn\n\t}\n\n\tresp.Entry = fuse.ChildInodeEntry{\n\t\tChild: fooID,\n\t\tAttributes: fs.fooAttributes(),\n\t}\n\n\treturn\n}\n\nfunc (fs *flushFS) GetInodeAttributes(\n\tctx context.Context,\n\treq *fuse.GetInodeAttributesRequest) (\n\tresp *fuse.GetInodeAttributesResponse, err error) {\n\tresp = &fuse.GetInodeAttributesResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\tswitch req.Inode {\n\tcase fuse.RootInodeID:\n\t\tresp.Attributes = fs.rootAttributes()\n\t\treturn\n\n\tcase fooID:\n\t\tresp.Attributes = fs.fooAttributes()\n\t\treturn\n\n\tdefault:\n\t\terr = fuse.ENOENT\n\t\treturn\n\t}\n}\n<commit_msg>flushFS.OpenFile.<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage flushfs\n\nimport (\n\t\"os\"\n\t\"sync\"\n\n\t\"github.com\/jacobsa\/fuse\"\n\t\"github.com\/jacobsa\/fuse\/fuseutil\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ Create a file system containing a single file named \"foo\".\n\/\/\n\/\/ The file may be opened for reading and\/or writing. Its initial contents are\n\/\/ empty. Whenever a flush or fsync is received, the supplied function will be\n\/\/ called with the current contents of the file.\nfunc NewFileSystem(\n\treportFlush func(string),\n\treportFsync func(string)) (fs fuse.FileSystem, err error) {\n\tfs = &flushFS{}\n\treturn\n}\n\nconst fooID = fuse.RootInodeID + 1\n\ntype flushFS struct {\n\tfuseutil.NotImplementedFileSystem\n\n\tmu sync.Mutex\n\tfoo *os.File \/\/ GUARDED_BY(mu)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *flushFS) rootAttributes() fuse.InodeAttributes {\n\treturn fuse.InodeAttributes{\n\t\tNlink: 1,\n\t\tMode: 0777 | os.ModeDir,\n\t}\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *flushFS) fooAttributes() fuse.InodeAttributes {\n\treturn fuse.InodeAttributes{\n\t\tNlink: 1,\n\t\tMode: 0777,\n\t}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ File system methods\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (fs *flushFS) Init(\n\tctx context.Context,\n\treq *fuse.InitRequest) (\n\tresp *fuse.InitResponse, err error) {\n\tresp = &fuse.InitResponse{}\n\treturn\n}\n\nfunc (fs *flushFS) LookUpInode(\n\tctx context.Context,\n\treq *fuse.LookUpInodeRequest) (\n\tresp *fuse.LookUpInodeResponse, err error) {\n\tresp = &fuse.LookUpInodeResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Sanity check.\n\tif req.Parent != fuse.RootInodeID || req.Name != \"foo\" {\n\t\terr = fuse.ENOENT\n\t\treturn\n\t}\n\n\tresp.Entry = fuse.ChildInodeEntry{\n\t\tChild: fooID,\n\t\tAttributes: fs.fooAttributes(),\n\t}\n\n\treturn\n}\n\nfunc (fs *flushFS) GetInodeAttributes(\n\tctx context.Context,\n\treq *fuse.GetInodeAttributesRequest) (\n\tresp *fuse.GetInodeAttributesResponse, err error) {\n\tresp = &fuse.GetInodeAttributesResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\tswitch req.Inode {\n\tcase fuse.RootInodeID:\n\t\tresp.Attributes = fs.rootAttributes()\n\t\treturn\n\n\tcase fooID:\n\t\tresp.Attributes = fs.fooAttributes()\n\t\treturn\n\n\tdefault:\n\t\terr = fuse.ENOENT\n\t\treturn\n\t}\n}\n\nfunc (fs *flushFS) OpenFile(\n\tctx context.Context,\n\treq *fuse.OpenFileRequest) (\n\tresp *fuse.OpenFileResponse, err error) {\n\tresp = &fuse.OpenFileResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Sanity check.\n\tif req.Inode != fooID {\n\t\terr = fuse.ENOSYS\n\t\treturn\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package clocky\n\nimport (\n\t\"http\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"appengine\"\n\t\"appengine\/memcache\"\n\t\"appengine\/urlfetch\"\n)\n\n\/\/ http:\/\/www.sfmta.com\/cms\/asite\/nextmunidata.htm\nconst NextBusURL = (\"http:\/\/webservices.nextbus.com\/service\/publicXMLFeed?\" +\n\t\"command=predictionsForMultiStops&a=sf-muni\" +\n\t\"&stops=1|null|4016\" +\n\t\"&stop=1|null|6297\" +\n\t\"&stops=10|null|5859\" +\n\t\"&stops=12|null|5859\" +\n\t\"&stops=27|null|35165\" +\n\t\"&stops=47|null|6825\" +\n\t\"&stops=49|null|6825\")\n\nconst WeatherURL = \"http:\/\/forecast.weather.gov\/MapClick.php?lat=37.79570&lon=-122.42100&FcstType=dwml&unit=1\"\n\nfunc fetch(r *http.Request, key, url string, expiration int32) os.Error {\n\tc := appengine.NewContext(r)\n\n\tclient := urlfetch.Client(c)\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcontents, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp.Body.Close()\n\n\titem := &memcache.Item{\n\t\tKey: key,\n\t\tValue: contents,\n\t\tExpiration: expiration,\n\t}\n\tif err := memcache.Set(c, item); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nconst nextBusKey = \"nextbus\"\nconst weatherKey = \"weather\"\n\nfunc fetchNextBus(r *http.Request) os.Error {\n\treturn fetch(r, nextBusKey, NextBusURL, 300)\n}\n\nfunc fetchWeather(r *http.Request) os.Error {\n\treturn fetch(r, weatherKey, WeatherURL, 4*3600)\n}\n\nfunc fetchNextBusHandler(w http.ResponseWriter, r *http.Request) {\n\tif err := fetchNextBus(r); err != nil {\n\t\thttp.Error(w, err.String(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tio.WriteString(w, \"ok\\n\")\n}\n\nfunc fetchWeatherHandler(w http.ResponseWriter, r *http.Request) {\n\tif err := fetchWeather(r); err != nil {\n\t\thttp.Error(w, err.String(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tio.WriteString(w, \"ok\\n\")\n}\n\nfunc warmup(w http.ResponseWriter, r *http.Request) {\n\tif err := fetchNextBus(r); err != nil {\n\t\thttp.Error(w, err.String(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif err := fetchWeather(r); err != nil {\n\t\thttp.Error(w, err.String(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tio.WriteString(w, \"ok\\n\")\n}\n\nfunc init() {\n\thttp.HandleFunc(\"\/fetch\/nextbus\", fetchNextBusHandler)\n\thttp.HandleFunc(\"\/fetch\/weather\", fetchWeatherHandler)\n\thttp.HandleFunc(\"\/ah\/_warmup\", warmup)\n}\n<commit_msg>Fixes.<commit_after>package clocky\n\nimport (\n\t\"http\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\n\t\"appengine\"\n\t\"appengine\/memcache\"\n\t\"appengine\/urlfetch\"\n)\n\n\/\/ http:\/\/www.sfmta.com\/cms\/asite\/nextmunidata.htm\nconst NextBusURL = (\"http:\/\/webservices.nextbus.com\/service\/publicXMLFeed?\" +\n\t\"command=predictionsForMultiStops&a=sf-muni\" +\n\t\"&stops=1|null|4016\" +\n\t\"&stop=1|null|6297\" +\n\t\"&stops=10|null|5859\" +\n\t\"&stops=12|null|5859\" +\n\t\"&stops=27|null|35165\" +\n\t\"&stops=47|null|6825\" +\n\t\"&stops=49|null|6825\")\n\nconst WeatherURL = \"http:\/\/forecast.weather.gov\/MapClick.php?lat=37.79570&lon=-122.42100&FcstType=dwml&unit=1\"\n\nfunc fetch(r *http.Request, key, url string, expiration int32) os.Error {\n\tc := appengine.NewContext(r)\n\n\tclient := urlfetch.Client(c)\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcontents, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp.Body.Close()\n\n\titem := &memcache.Item{\n\t\tKey: key,\n\t\tValue: contents,\n\t\tExpiration: expiration,\n\t}\n\tif err := memcache.Set(c, item); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nconst nextBusKey = \"nextbus\"\nconst weatherKey = \"weather\"\n\n\/\/ Normally called whenever a page refresh notices stale data.\nfunc fetchNextBus(r *http.Request) os.Error {\n\treturn fetch(r, nextBusKey, NextBusURL, 300)\n}\n\n\/\/ Normally called hourly by cron.\n\/\/ http:\/\/graphical.weather.gov\/xml\/mdl\/XML\/Design\/WebServicesUseGuildlines.php\nfunc fetchWeather(r *http.Request) os.Error {\n\treturn fetch(r, weatherKey, WeatherURL, 4*3600)\n}\n\nfunc fetchNextBusHandler(w http.ResponseWriter, r *http.Request) {\n\tif err := fetchNextBus(r); err != nil {\n\t\thttp.Error(w, err.String(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tio.WriteString(w, \"ok\\n\")\n}\n\nfunc fetchWeatherHandler(w http.ResponseWriter, r *http.Request) {\n\tif err := fetchWeather(r); err != nil {\n\t\thttp.Error(w, err.String(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tio.WriteString(w, \"ok\\n\")\n}\n\nfunc warmup(w http.ResponseWriter, r *http.Request) {\n\tch := make(chan os.Error)\n\tgo func() { ch <- fetchNextBus(r) }()\n\tgo func() { ch <- fetchWeather(r) }()\n\tfor i := 0; i < 2; i++ {\n\t\tif err := <-ch; err != nil {\n\t\t\thttp.Error(w, err.String(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t}\n\tio.WriteString(w, \"ok\\n\")\n}\n\nfunc init() {\n\thttp.HandleFunc(\"\/fetch\/nextbus\", fetchNextBusHandler)\n\thttp.HandleFunc(\"\/fetch\/weather\", fetchWeatherHandler)\n\thttp.HandleFunc(\"\/_ah\/warmup\", warmup)\n}\n<|endoftext|>"} {"text":"<commit_before>package ws\n\nimport (\n\t\"fmt\"\n\t\"hash\/fnv\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"sync\"\n)\n\nconst (\n\tFlagDir uint32 = 1 << iota\n\tFlagLogical\n\tFlagMount\n)\n\ntype Id uint32\n\nfunc NewId(path string) Id {\n\th := fnv.New32()\n\th.Write([]byte(path))\n\treturn Id(h.Sum32())\n}\n\ntype Dir struct {\n\tPath string\n\tChildren []*Res\n}\n\ntype Res struct {\n\tsync.Mutex\n\tId Id\n\tName string\n\tFlag uint32\n\tParent *Res\n\t*Dir\n}\n\nfunc (r *Res) Path() string {\n\tif r == nil {\n\t\treturn \"\"\n\t}\n\tif r.Dir != nil {\n\t\treturn r.Dir.Path\n\t}\n\treturn r.Parent.Path() + string(os.PathSeparator) + r.Name\n}\nfunc newChild(pa *Res, fi os.FileInfo) *Res {\n\tr := &Res{Name: fi.Name(), Parent: pa}\n\tp := r.Path()\n\tr.Id = NewId(p)\n\tif fi.IsDir() {\n\t\tr.Flag |= FlagDir\n\t\tr.Dir = &Dir{Path: p}\n\t}\n\treturn r\n}\n\ntype Watcher interface {\n\tWatch(r *Res) error\n\tClose()\n}\n\ntype Config struct {\n\tCapHint uint\n\tWatcher func(*Ws) (Watcher, error)\n}\n\ntype Ws struct {\n\tsync.RWMutex\n\tconfig Config\n\troot *Res\n\tall map[Id]*Res\n\twatcher Watcher\n}\n\n\/\/ New creates a workspace configured with c.\nfunc New(c Config) *Ws {\n\tr := &Res{Id: NewId(\"\/\")}\n\tw := &Ws{config: c, root: r, all: make(map[Id]*Res, c.CapHint)}\n\tw.all[r.Id] = r\n\treturn w\n}\n\n\/\/ Mount adds the directory tree at path to the workspace.\nfunc (w *Ws) Mount(path string) (*Res, error) {\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !fi.IsDir() {\n\t\treturn nil, fmt.Errorf(\"not a directory\")\n\t}\n\tr, err := w.mount(path)\n\tif err != nil {\n\t\treturn r, err\n\t}\n\tr.Lock()\n\terr = read(r)\n\tr.Unlock()\n\tif err != nil {\n\t\treturn r, err\n\t}\n\tw.Lock()\n\tdefer w.Unlock()\n\tw.addAllChildren(r)\n\treturn r, nil\n}\nfunc (w *Ws) mount(path string) (*Res, error) {\n\tid := NewId(path)\n\td, f := filepath.Split(path)\n\tw.Lock()\n\tdefer w.Unlock()\n\tr, ok := w.all[id]\n\tif ok {\n\t\treturn r, fmt.Errorf(\"duplicate\")\n\t}\n\tr = &Res{Id: id, Name: f, Flag: FlagDir | FlagMount, Dir: &Dir{Path: path}}\n\t\/\/ add virtual parent\n\tr.Parent = w.addParent(d[:len(d)-1])\n\tw.all[id] = r\n\treturn r, nil\n}\n\n\/\/ Close closes the workspace.\n\/\/ The workspace and all its resources are invalid afterwards.\nfunc (w *Ws) Close() {\n\tw.Lock()\n\tdefer w.Unlock()\n\tif w.watcher != nil {\n\t\tw.watcher.Close()\n\t\tw.watcher = nil\n\t}\n\t\/\/ scatter garbage\n\tfor id, r := range w.all {\n\t\tr.Parent, r.Dir = nil, nil\n\t\tdelete(w.all, id)\n\t}\n\tw.all = nil\n\tw.root = nil\n}\nfunc (w *Ws) addParent(path string) *Res {\n\tid := NewId(path)\n\tif r, ok := w.all[id]; ok {\n\t\treturn r\n\t}\n\tparent, name := filepath.Split(path)\n\tvar pa *Res\n\tif len(parent) > 0 && parent != \"\/\" {\n\t\tpa = w.addParent(parent[:len(parent)-1])\n\t}\n\tr := &Res{Id: id, Name: name, Flag: FlagDir | FlagLogical, Parent: pa}\n\tw.all[id] = r\n\treturn r\n}\nfunc read(r *Res) error {\n\tf, err := os.Open(r.Dir.Path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlist, err := f.Readdir(-1)\n\tf.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tchildren := make([]*Res, 0, len(list))\n\tfor _, fi := range list {\n\t\tchildren = append(children, newChild(r, fi))\n\t}\n\tsort.Sort(byTypeAndName(children))\n\tr.Children = children\n\tfor _, c := range children {\n\t\tif c.Flag&FlagDir != 0 {\n\t\t\tif err := read(c); err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\nfunc (w *Ws) addAllChildren(r *Res) {\n\tfor _, c := range r.Children {\n\t\tw.all[c.Id] = c\n\t\tif c.Flag&FlagDir != 0 {\n\t\t\tw.addAllChildren(c)\n\t\t}\n\t}\n\tif w.watcher != nil {\n\t\terr := w.watcher.Watch(r)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}\n}\n\ntype byTypeAndName []*Res\n\nfunc (l byTypeAndName) Len() int {\n\treturn len(l)\n}\nfunc (l byTypeAndName) Less(i, j int) bool {\n\tif isdir := l[i].Flag&FlagDir != 0; isdir != (l[j].Flag&FlagDir != 0) {\n\t\treturn isdir\n\t}\n\treturn l[i].Name < l[j].Name\n}\nfunc (l byTypeAndName) Swap(i, j int) {\n\tl[i], l[j] = l[j], l[i]\n}\n<commit_msg>adds handler callback, workspace operations and controller.<commit_after>package ws\n\nimport (\n\t\"fmt\"\n\t\"hash\/fnv\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"sync\"\n)\n\nconst (\n\tFlagDir uint32 = 1 << iota\n\tFlagLogical\n\tFlagMount\n)\n\ntype Id uint32\n\nfunc NewId(path string) Id {\n\th := fnv.New32()\n\th.Write([]byte(path))\n\treturn Id(h.Sum32())\n}\n\ntype Dir struct {\n\tPath string\n\tChildren []*Res\n}\n\ntype Res struct {\n\tsync.Mutex\n\tId Id\n\tName string\n\tFlag uint32\n\tParent *Res\n\t*Dir\n}\n\nfunc (r *Res) Path() string {\n\tif r == nil {\n\t\treturn \"\"\n\t}\n\tif r.Dir != nil {\n\t\treturn r.Dir.Path\n\t}\n\treturn r.Parent.Path() + string(os.PathSeparator) + r.Name\n}\nfunc newChild(pa *Res, fi os.FileInfo) *Res {\n\tr := &Res{Name: fi.Name(), Parent: pa}\n\tp := r.Path()\n\tr.Id = NewId(p)\n\tif fi.IsDir() {\n\t\tr.Flag |= FlagDir\n\t\tr.Dir = &Dir{Path: p}\n\t}\n\treturn r\n}\n\ntype Watcher interface {\n\tWatch(r *Res) error\n\tClose() error\n}\ntype Op uint\n\nconst (\n\tAdd Op = 1 << iota\n\tChange\n\tRemove\n\t_\n\tCreate\n\tModify\n\tDelete\n\t_\n\tWsMask Op = 0x0F\n\tFsMask Op = 0xF0\n)\n\ntype Config struct {\n\tCapHint uint\n\tWatcher func(Controller) (Watcher, error)\n\tHandler func(Op, *Res)\n}\n\nfunc (c Config) handle(op Op, r *Res) {\n\tif c.Handler != nil {\n\t\tc.Handler(op, r)\n\t}\n}\n\ntype Controller interface {\n\tControl(op Op, id Id, name string) error\n}\n\ntype Ws struct {\n\tsync.RWMutex\n\tconfig Config\n\troot *Res\n\tall map[Id]*Res\n\twatcher Watcher\n}\n\n\/\/ New creates a workspace configured with c.\nfunc New(c Config) *Ws {\n\tr := &Res{Id: NewId(\"\/\")}\n\tw := &Ws{config: c, root: r, all: make(map[Id]*Res, c.CapHint)}\n\tw.all[r.Id] = r\n\treturn w\n}\n\n\/\/ Mount adds the directory tree at path to the workspace.\nfunc (w *Ws) Mount(path string) (*Res, error) {\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !fi.IsDir() {\n\t\treturn nil, fmt.Errorf(\"not a directory\")\n\t}\n\tr, err := w.mount(path)\n\tif err != nil {\n\t\treturn r, err\n\t}\n\tr.Lock()\n\terr = read(r)\n\tr.Unlock()\n\tif err != nil {\n\t\treturn r, err\n\t}\n\tw.Lock()\n\tdefer w.Unlock()\n\tw.addAllChildren(0, r)\n\treturn r, nil\n}\nfunc (w *Ws) mount(path string) (*Res, error) {\n\tid := NewId(path)\n\td, f := filepath.Split(path)\n\tw.Lock()\n\tdefer w.Unlock()\n\tif w.watcher == nil && w.config.Watcher != nil {\n\t\twatcher, err := w.config.Watcher((*ctrl)(w))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tw.watcher = watcher\n\t}\n\tr, ok := w.all[id]\n\tif ok {\n\t\treturn r, fmt.Errorf(\"duplicate\")\n\t}\n\tr = &Res{Id: id, Name: f, Flag: FlagDir | FlagMount, Dir: &Dir{Path: path}}\n\t\/\/ add virtual parent\n\tr.Parent = w.addParent(d[:len(d)-1])\n\tw.all[id] = r\n\tw.config.handle(Add, r)\n\treturn r, nil\n}\n\n\/\/ Close closes the workspace.\n\/\/ The workspace and all its resources are invalid afterwards.\nfunc (w *Ws) Close() {\n\tw.Lock()\n\tdefer w.Unlock()\n\tif w.watcher != nil {\n\t\tw.watcher.Close()\n\t\tw.watcher = nil\n\t}\n\t\/\/ scatter garbage\n\tfor id, r := range w.all {\n\t\tr.Parent, r.Dir = nil, nil\n\t\tdelete(w.all, id)\n\t}\n\tw.all = nil\n\tw.root = nil\n}\nfunc (w *Ws) addParent(path string) *Res {\n\tid := NewId(path)\n\tif r, ok := w.all[id]; ok {\n\t\treturn r\n\t}\n\tparent, name := filepath.Split(path)\n\tvar pa *Res\n\tif len(parent) > 0 && parent != \"\/\" {\n\t\tpa = w.addParent(parent[:len(parent)-1])\n\t}\n\tr := &Res{Id: id, Name: name, Flag: FlagDir | FlagLogical, Parent: pa}\n\tw.all[id] = r\n\treturn r\n}\nfunc read(r *Res) error {\n\tf, err := os.Open(r.Dir.Path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlist, err := f.Readdir(-1)\n\tf.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\tchildren := make([]*Res, 0, len(list))\n\tfor _, fi := range list {\n\t\tchildren = append(children, newChild(r, fi))\n\t}\n\tsort.Sort(byTypeAndName(children))\n\tr.Children = children\n\tfor _, c := range children {\n\t\tif c.Flag&FlagDir != 0 {\n\t\t\tif err := read(c); err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\nfunc (w *Ws) addAllChildren(fsop Op, r *Res) {\n\tfor _, c := range r.Children {\n\t\tw.all[c.Id] = c\n\t\tw.config.handle(fsop|Add, c)\n\t\tif c.Flag&FlagDir != 0 {\n\t\t\tw.addAllChildren(fsop, c)\n\t\t}\n\t}\n\tif w.watcher != nil {\n\t\terr := w.watcher.Watch(r)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t}\n\tw.config.handle(fsop|Change, r)\n}\n\ntype ctrl Ws\n\nfunc (w *ctrl) Control(op Op, id Id, name string) error {\n\tvar r, p *Res\n\tw.Lock()\n\tdefer w.Unlock()\n\tr = w.all[id]\n\tif name != \"\" {\n\t\tp, r = r, nil\n\t\tif p != nil && p.Dir != nil {\n\t\t\tp.Lock()\n\t\t\tr = find(p.Children, name)\n\t\t\tp.Unlock()\n\t\t}\n\t}\n\tswitch {\n\tcase op&Delete != 0:\n\t\tif r == nil {\n\t\t\tbreak\n\t\t}\n\t\treturn w.remove(op, r)\n\tcase r != nil:\n\t\t\/\/ res found, modify\n\t\treturn w.change(op, r)\n\tcase p != nil:\n\t\t\/\/ parent found create child\n\t\treturn w.add(op, p, name)\n\t}\n\t\/\/ not found, ignore\n\treturn nil\n}\nfunc (w *ctrl) change(fsop Op, r *Res) error {\n\tw.config.handle(fsop|Change, r)\n\treturn nil\n}\nfunc (w *ctrl) remove(fsop Op, r *Res) error {\n\tif p := r.Parent; p != nil {\n\t\tp.Lock()\n\t\tdefer p.Unlock()\n\t\tif p.Dir != nil {\n\t\t\tp.Children = remove(p.Children, r)\n\t\t}\n\t}\n\trm := []*Res{r}\n\tif r.Dir != nil {\n\t\twalk(r.Children, func(c *Res) error {\n\t\t\trm = append(rm, c)\n\t\t\treturn nil\n\t\t})\n\t}\n\tfor i := len(rm) - 1; i >= 0; i-- {\n\t\tc := rm[i]\n\t\tw.config.handle(fsop|Remove, c)\n\t\tdelete(w.all, c.Id)\n\t}\n\treturn nil\n}\nfunc (w *ctrl) add(fsop Op, p *Res, name string) error {\n\tp.Lock()\n\tdefer p.Unlock()\n\t\/\/ new lock try again\n\tr := find(p.Children, name)\n\t\/\/ ignore duplicate\n\tif r != nil {\n\t\treturn nil\n\t}\n\tr = &Res{Name: name, Parent: p}\n\tpath := r.Path()\n\tr.Id = NewId(path)\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.Children = insert(p.Children, r)\n\tw.all[r.Id] = r\n\tif !fi.IsDir() {\n\t\t\/\/ send event\n\t\treturn nil\n\t}\n\tr.Flag |= FlagDir\n\tr.Dir = &Dir{Path: path}\n\tif err = read(r); err != nil {\n\t\treturn err\n\t}\n\tw.config.handle(fsop|Add, r)\n\t(*Ws)(w).addAllChildren(fsop, r)\n\treturn nil\n}\n\ntype byTypeAndName []*Res\n\nfunc (l byTypeAndName) Len() int {\n\treturn len(l)\n}\nfunc less(i, j *Res) bool {\n\tif isdir := i.Flag&FlagDir != 0; isdir != (j.Flag&FlagDir != 0) {\n\t\treturn isdir\n\t}\n\treturn i.Name < j.Name\n}\nfunc (l byTypeAndName) Less(i, j int) bool {\n\treturn less(l[i], l[j])\n}\nfunc (l byTypeAndName) Swap(i, j int) {\n\tl[i], l[j] = l[j], l[i]\n}\n\nfunc insert(l []*Res, r *Res) []*Res {\n\ti := sort.Search(len(l), func(i int) bool {\n\t\treturn less(r, l[i])\n\t})\n\tif i < len(l) {\n\t\tif i > 0 && l[i-1].Id == r.Id {\n\t\t\tl[i-1] = r\n\t\t\treturn l\n\t\t}\n\t\treturn append(l[:i], append([]*Res{r}, l[i:]...)...)\n\t}\n\treturn append(l, r)\n}\nfunc remove(l []*Res, r *Res) []*Res {\n\ti := sort.Search(len(l), func(i int) bool {\n\t\treturn less(r, l[i])\n\t})\n\tif i > 0 && l[i-1].Id == r.Id {\n\t\treturn append(l[:i-1], l[i:]...)\n\t}\n\treturn l\n}\n\nvar Skip = fmt.Errorf(\"skip\")\n\nfunc walk(l []*Res, f func(*Res) error) error {\n\tvar err error\n\tfor _, c := range l {\n\t\tif err = f(c); err == Skip {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif c.Dir != nil {\n\t\t\tif err = walk(c.Children, f); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\nfunc find(l []*Res, name string) *Res {\n\tfor _, r := range l {\n\t\tif r.Name == name {\n\t\t\treturn r\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Copyright 2018, 2021 Arm Limited.\n * SPDX-License-Identifier: Apache-2.0\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"os\"\n\t\"runtime\/pprof\"\n\n\t\"github.com\/ARM-software\/bob-build\/core\"\n\t\"github.com\/ARM-software\/bob-build\/internal\/utils\"\n)\n\nfunc main() {\n\t\/\/ The primary builder should use the global flag set because the\n\t\/\/ bootstrap package registers its own flags there.\n\tflag.Parse()\n\n\tcpuprofile, present := os.LookupEnv(\"BOB_CPUPROFILE\")\n\tif present && cpuprofile != \"\" {\n\t\tf, err := os.Create(cpuprofile)\n\t\tif err != nil {\n\t\t\tutils.Die(\"%v\", err)\n\t\t}\n\t\tpprof.StartCPUProfile(f)\n\t\tdefer pprof.StopCPUProfile()\n\t}\n\n\tcore.Main()\n}\n<commit_msg>Add memory profiling support<commit_after>\/*\n * Copyright 2018, 2021 Arm Limited.\n * SPDX-License-Identifier: Apache-2.0\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"os\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\n\t\"github.com\/ARM-software\/bob-build\/core\"\n\t\"github.com\/ARM-software\/bob-build\/internal\/utils\"\n)\n\nfunc main() {\n\t\/\/ The primary builder should use the global flag set because the\n\t\/\/ bootstrap package registers its own flags there.\n\tflag.Parse()\n\n\tcpuprofile, present := os.LookupEnv(\"BOB_CPUPROFILE\")\n\tif present && cpuprofile != \"\" {\n\t\tf, err := os.Create(cpuprofile)\n\t\tif err != nil {\n\t\t\tutils.Die(\"%v\", err)\n\t\t}\n\t\tpprof.StartCPUProfile(f)\n\t\tdefer pprof.StopCPUProfile()\n\t}\n\n\tcore.Main()\n\n\tmemprofile, present := os.LookupEnv(\"BOB_MEMPROFILE\")\n\tif present && memprofile != \"\" {\n\t\tf, err := os.Create(memprofile)\n\t\tif err != nil {\n\t\t\tutils.Die(\"%v\", err)\n\t\t}\n\t\truntime.GC()\n\t\tpprof.WriteHeapProfile(f)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/paked\/configure\"\n\t\"github.com\/paked\/messenger\"\n)\n\nvar (\n\tconf = configure.New()\n\tverifyToken = conf.String(\"verify-token\", \"mad-skrilla\", \"The token used to verify facebook\")\n\tverify = conf.Bool(\"should-verify\", false, \"Whether or not the app should verify itself\")\n\tpageToken = conf.String(\"page-token\", \"not skrilla\", \"The token that is used to verify the page on facebook\")\n\tappSecret = conf.String(\"app-secret\", \"\", \"The app secret from the facebook developer portal\")\n\tport = conf.Int(\"port\", 8080, \"The port used to serve the messenger bot\")\n)\n\nfunc main() {\n\tconf.Use(configure.NewFlag())\n\tconf.Use(configure.NewEnvironment())\n\tconf.Use(configure.NewJSONFromFile(\"config.json\"))\n\n\tconf.Parse()\n\n\t\/\/ Create a new messenger client\n\tclient := messenger.New(messenger.Options{\n\t\tVerify: *verify,\n\t\tAppSecret: *appSecret,\n\t\tVerifyToken: *verifyToken,\n\t\tToken: *pageToken,\n\t})\n\n\t\/\/ Setup a handler to be triggered when a message is received\n\tclient.HandleMessage(func(m messenger.Message, r *messenger.Response) {\n\t\tfmt.Printf(\"%v (Sent, %v)\\n\", m.Text, m.Time.Format(time.UnixDate))\n\n\t\tp, err := client.ProfileByID(m.Sender.ID)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Something went wrong!\", err)\n\t\t}\n\n\t\tr.Text(fmt.Sprintf(\"Hello, %v!\", p.FirstName))\n\t})\n\n\t\/\/ Setup a handler to be triggered when a message is delivered\n\tclient.HandleDelivery(func(d messenger.Delivery, r *messenger.Response) {\n\t\tfmt.Println(\"Delivered at:\", d.Watermark().Format(time.UnixDate))\n\t})\n\n\t\/\/ Setup a handler to be triggered when a message is read\n\tclient.HandleRead(func(m messenger.Read, r *messenger.Response) {\n\t\tfmt.Println(\"Read at:\", m.Watermark().Format(time.UnixDate))\n\t})\n\n\tfmt.Printf(\"Serving messenger bot on localhost:%d\\n\", *port)\n\n\thttp.ListenAndServe(fmt.Sprintf(\"localhost:%d\", *port), client.Handler())\n}\n<commit_msg>Fix example to build with latest version of the library<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/paked\/configure\"\n\t\"github.com\/paked\/messenger\"\n)\n\nvar (\n\tconf = configure.New()\n\tverifyToken = conf.String(\"verify-token\", \"mad-skrilla\", \"The token used to verify facebook\")\n\tverify = conf.Bool(\"should-verify\", false, \"Whether or not the app should verify itself\")\n\tpageToken = conf.String(\"page-token\", \"not skrilla\", \"The token that is used to verify the page on facebook\")\n\tappSecret = conf.String(\"app-secret\", \"\", \"The app secret from the facebook developer portal\")\n\tport = conf.Int(\"port\", 8080, \"The port used to serve the messenger bot\")\n)\n\nfunc main() {\n\tconf.Use(configure.NewFlag())\n\tconf.Use(configure.NewEnvironment())\n\tconf.Use(configure.NewJSONFromFile(\"config.json\"))\n\n\tconf.Parse()\n\n\t\/\/ Create a new messenger client\n\tclient := messenger.New(messenger.Options{\n\t\tVerify: *verify,\n\t\tAppSecret: *appSecret,\n\t\tVerifyToken: *verifyToken,\n\t\tToken: *pageToken,\n\t})\n\n\t\/\/ Setup a handler to be triggered when a message is received\n\tclient.HandleMessage(func(m messenger.Message, r *messenger.Response) {\n\t\tfmt.Printf(\"%v (Sent, %v)\\n\", m.Text, m.Time.Format(time.UnixDate))\n\n\t\tp, err := client.ProfileByID(m.Sender.ID)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"Something went wrong!\", err)\n\t\t}\n\n\t\tr.Text(fmt.Sprintf(\"Hello, %v!\", p.FirstName), messenger.ResponseType)\n\t})\n\n\t\/\/ Setup a handler to be triggered when a message is delivered\n\tclient.HandleDelivery(func(d messenger.Delivery, r *messenger.Response) {\n\t\tfmt.Println(\"Delivered at:\", d.Watermark().Format(time.UnixDate))\n\t})\n\n\t\/\/ Setup a handler to be triggered when a message is read\n\tclient.HandleRead(func(m messenger.Read, r *messenger.Response) {\n\t\tfmt.Println(\"Read at:\", m.Watermark().Format(time.UnixDate))\n\t})\n\n\tfmt.Printf(\"Serving messenger bot on localhost:%d\\n\", *port)\n\n\thttp.ListenAndServe(fmt.Sprintf(\"localhost:%d\", *port), client.Handler())\n}\n<|endoftext|>"} {"text":"<commit_before>package backend\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"regexp\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/go-errors\/errors\"\n\t\"github.com\/mhe\/gabi\"\n\t\"github.com\/privacybydesign\/irmago\"\n\t\"github.com\/privacybydesign\/irmago\/irmaserver\"\n)\n\nfunc Initialize(configuration *irmaserver.Configuration) error {\n\tconf = configuration\n\n\tif conf.Logger == nil {\n\t\tconf.Logger = logrus.New()\n\t\tconf.Logger.Level = logrus.DebugLevel\n\t\tconf.Logger.Formatter = &logrus.TextFormatter{}\n\t}\n\n\tif conf.IrmaConfiguration == nil {\n\t\tvar err error\n\t\tconf.IrmaConfiguration, err = irma.NewConfiguration(conf.IrmaConfigurationPath, \"\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = conf.IrmaConfiguration.ParseFolder(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc StartSession(request irma.SessionRequest) (*irma.Qr, string, error) {\n\tif err := request.Validate(); err != nil {\n\t\treturn nil, \"\", err\n\t}\n\taction := irma.ActionUnknown\n\tswitch request.(type) {\n\tcase *irma.DisclosureRequest:\n\t\taction = irma.ActionDisclosing\n\tcase *irma.SignatureRequest:\n\t\taction = irma.ActionSigning\n\tcase *irma.IssuanceRequest:\n\t\taction = irma.ActionIssuing\n\t\tif err := validateIssuanceRequest(request.(*irma.IssuanceRequest)); err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\tdefault:\n\t\tconf.Logger.Warnf(\"Attempt to start session of invalid type\")\n\t\treturn nil, \"\", errors.New(\"Invalid session type\")\n\t}\n\n\tsession := newSession(action, request)\n\tconf.Logger.Infof(\"%s session started, token %s\", action, session.token)\n\treturn &irma.Qr{\n\t\tType: action,\n\t\tURL: session.token,\n\t}, session.token, nil\n}\n\nfunc GetSessionResult(token string) *irmaserver.SessionResult {\n\tsession := sessions.get(token)\n\tif session != nil {\n\t\treturn nil\n\t}\n\treturn session.result\n}\n\nfunc HandleProtocolMessage(\n\tpath string,\n\tmethod string,\n\theaders map[string][]string,\n\tmessage []byte,\n) (status int, output []byte, result *irmaserver.SessionResult) {\n\t\/\/ Parse path into session and action\n\tif len(path) > 0 { \/\/ Remove any starting and trailing slash\n\t\tif path[0] == '\/' {\n\t\t\tpath = path[1:]\n\t\t}\n\t\tif path[len(path)-1] == '\/' {\n\t\t\tpath = path[:len(path)-1]\n\t\t}\n\t}\n\tconf.Logger.Debugf(\"Routing protocol message: %s %s\", method, path)\n\tpattern := regexp.MustCompile(\"(\\\\w+)\/?(\\\\w*)\")\n\tmatches := pattern.FindStringSubmatch(path)\n\tif len(matches) != 3 {\n\t\tconf.Logger.Warnf(\"Invalid URL: %s\", path)\n\t\tstatus, output = responseJson(nil, getError(irmaserver.ErrorInvalidRequest, \"\"))\n\t\treturn\n\t}\n\n\t\/\/ Fetch the session\n\ttoken := matches[1]\n\tverb := matches[2]\n\tsession := sessions.get(token)\n\tif session == nil {\n\t\tconf.Logger.Warnf(\"Session not found: %s\", token)\n\t\tstatus, output = responseJson(nil, getError(irmaserver.ErrorSessionUnknown, \"\"))\n\t\treturn\n\t}\n\tsession.Lock()\n\tdefer session.Unlock()\n\n\t\/\/ However we return, if the session has been finished or cancelled by any of the handlers\n\t\/\/ then we should inform the user by returning a SessionResult - but only if we have not\n\t\/\/ already done this in the past, e.g. by a previous HTTP call handled by this function\n\tdefer func() {\n\t\tif session.finished() && !session.returned {\n\t\t\tsession.returned = true\n\t\t\tresult = session.result\n\t\t}\n\t\tsessions.update(token, session)\n\t}()\n\n\t\/\/ Route to handler\n\tswitch len(verb) {\n\tcase 0:\n\t\tif method == \"DELETE\" {\n\t\t\tsession.handleDelete()\n\t\t\tstatus = http.StatusOK\n\t\t\treturn\n\t\t}\n\t\tif method == \"GET\" {\n\t\t\th := http.Header(headers)\n\t\t\tmin := &irma.ProtocolVersion{}\n\t\t\tmax := &irma.ProtocolVersion{}\n\t\t\tif err := json.Unmarshal([]byte(h.Get(irma.MinVersionHeader)), min); err != nil {\n\t\t\t\tstatus, output = responseJson(nil, session.fail(irmaserver.ErrorMalformedInput, err.Error()))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := json.Unmarshal([]byte(h.Get(irma.MaxVersionHeader)), max); err != nil {\n\t\t\t\tstatus, output = responseJson(nil, session.fail(irmaserver.ErrorMalformedInput, err.Error()))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tstatus, output = responseJson(session.handleGetRequest(min, max))\n\t\t\treturn\n\t\t}\n\t\tstatus, output = responseJson(nil, session.fail(irmaserver.ErrorInvalidRequest, \"\"))\n\t\treturn\n\tdefault:\n\t\tif method != \"POST\" {\n\t\t\tstatus, output = responseJson(nil, session.fail(irmaserver.ErrorInvalidRequest, \"\"))\n\t\t\treturn\n\t\t}\n\n\t\tif verb == \"commitments\" && session.action == irma.ActionIssuing {\n\t\t\tcommitments := &gabi.IssueCommitmentMessage{}\n\t\t\tif err := irma.UnmarshalValidate(message, commitments); err != nil {\n\t\t\t\tstatus, output = responseJson(nil, session.fail(irmaserver.ErrorMalformedInput, \"\"))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tstatus, output = responseJson(session.handlePostCommitments(commitments))\n\t\t\treturn\n\t\t}\n\t\tif verb == \"proofs\" && session.action == irma.ActionDisclosing {\n\t\t\tproofs := gabi.ProofList{}\n\t\t\tif err := irma.UnmarshalValidate(message, &proofs); err != nil {\n\t\t\t\tstatus, output = responseJson(nil, session.fail(irmaserver.ErrorMalformedInput, \"\"))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tstatus, output = responseJson(session.handlePostProofs(proofs))\n\t\t\treturn\n\t\t}\n\t\tif verb == \"proofs\" && session.action == irma.ActionSigning {\n\t\t\tsignature := &irma.SignedMessage{}\n\t\t\tif err := irma.UnmarshalValidate(message, signature); err != nil {\n\t\t\t\tstatus, output = responseJson(nil, session.fail(irmaserver.ErrorMalformedInput, \"\"))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tstatus, output = responseJson(session.handlePostSignature(signature))\n\t\t\treturn\n\t\t}\n\n\t\tstatus, output = responseJson(nil, session.fail(irmaserver.ErrorInvalidRequest, \"\"))\n\t\treturn\n\t}\n}\n<commit_msg>Use http.MethodGet and friends<commit_after>package backend\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"regexp\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/go-errors\/errors\"\n\t\"github.com\/mhe\/gabi\"\n\t\"github.com\/privacybydesign\/irmago\"\n\t\"github.com\/privacybydesign\/irmago\/irmaserver\"\n)\n\nfunc Initialize(configuration *irmaserver.Configuration) error {\n\tconf = configuration\n\n\tif conf.Logger == nil {\n\t\tconf.Logger = logrus.New()\n\t\tconf.Logger.Level = logrus.DebugLevel\n\t\tconf.Logger.Formatter = &logrus.TextFormatter{}\n\t}\n\n\tif conf.IrmaConfiguration == nil {\n\t\tvar err error\n\t\tconf.IrmaConfiguration, err = irma.NewConfiguration(conf.IrmaConfigurationPath, \"\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = conf.IrmaConfiguration.ParseFolder(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc StartSession(request irma.SessionRequest) (*irma.Qr, string, error) {\n\tif err := request.Validate(); err != nil {\n\t\treturn nil, \"\", err\n\t}\n\taction := irma.ActionUnknown\n\tswitch request.(type) {\n\tcase *irma.DisclosureRequest:\n\t\taction = irma.ActionDisclosing\n\tcase *irma.SignatureRequest:\n\t\taction = irma.ActionSigning\n\tcase *irma.IssuanceRequest:\n\t\taction = irma.ActionIssuing\n\t\tif err := validateIssuanceRequest(request.(*irma.IssuanceRequest)); err != nil {\n\t\t\treturn nil, \"\", err\n\t\t}\n\tdefault:\n\t\tconf.Logger.Warnf(\"Attempt to start session of invalid type\")\n\t\treturn nil, \"\", errors.New(\"Invalid session type\")\n\t}\n\n\tsession := newSession(action, request)\n\tconf.Logger.Infof(\"%s session started, token %s\", action, session.token)\n\treturn &irma.Qr{\n\t\tType: action,\n\t\tURL: session.token,\n\t}, session.token, nil\n}\n\nfunc GetSessionResult(token string) *irmaserver.SessionResult {\n\tsession := sessions.get(token)\n\tif session != nil {\n\t\treturn nil\n\t}\n\treturn session.result\n}\n\nfunc HandleProtocolMessage(\n\tpath string,\n\tmethod string,\n\theaders map[string][]string,\n\tmessage []byte,\n) (status int, output []byte, result *irmaserver.SessionResult) {\n\t\/\/ Parse path into session and action\n\tif len(path) > 0 { \/\/ Remove any starting and trailing slash\n\t\tif path[0] == '\/' {\n\t\t\tpath = path[1:]\n\t\t}\n\t\tif path[len(path)-1] == '\/' {\n\t\t\tpath = path[:len(path)-1]\n\t\t}\n\t}\n\tconf.Logger.Debugf(\"Routing protocol message: %s %s\", method, path)\n\tpattern := regexp.MustCompile(\"(\\\\w+)\/?(\\\\w*)\")\n\tmatches := pattern.FindStringSubmatch(path)\n\tif len(matches) != 3 {\n\t\tconf.Logger.Warnf(\"Invalid URL: %s\", path)\n\t\tstatus, output = responseJson(nil, getError(irmaserver.ErrorInvalidRequest, \"\"))\n\t\treturn\n\t}\n\n\t\/\/ Fetch the session\n\ttoken := matches[1]\n\tverb := matches[2]\n\tsession := sessions.get(token)\n\tif session == nil {\n\t\tconf.Logger.Warnf(\"Session not found: %s\", token)\n\t\tstatus, output = responseJson(nil, getError(irmaserver.ErrorSessionUnknown, \"\"))\n\t\treturn\n\t}\n\tsession.Lock()\n\tdefer session.Unlock()\n\n\t\/\/ However we return, if the session has been finished or cancelled by any of the handlers\n\t\/\/ then we should inform the user by returning a SessionResult - but only if we have not\n\t\/\/ already done this in the past, e.g. by a previous HTTP call handled by this function\n\tdefer func() {\n\t\tif session.finished() && !session.returned {\n\t\t\tsession.returned = true\n\t\t\tresult = session.result\n\t\t}\n\t\tsessions.update(token, session)\n\t}()\n\n\t\/\/ Route to handler\n\tswitch len(verb) {\n\tcase 0:\n\t\tif method == http.MethodDelete {\n\t\t\tsession.handleDelete()\n\t\t\tstatus = http.StatusOK\n\t\t\treturn\n\t\t}\n\t\tif method == http.MethodGet {\n\t\t\th := http.Header(headers)\n\t\t\tmin := &irma.ProtocolVersion{}\n\t\t\tmax := &irma.ProtocolVersion{}\n\t\t\tif err := json.Unmarshal([]byte(h.Get(irma.MinVersionHeader)), min); err != nil {\n\t\t\t\tstatus, output = responseJson(nil, session.fail(irmaserver.ErrorMalformedInput, err.Error()))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif err := json.Unmarshal([]byte(h.Get(irma.MaxVersionHeader)), max); err != nil {\n\t\t\t\tstatus, output = responseJson(nil, session.fail(irmaserver.ErrorMalformedInput, err.Error()))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tstatus, output = responseJson(session.handleGetRequest(min, max))\n\t\t\treturn\n\t\t}\n\t\tstatus, output = responseJson(nil, session.fail(irmaserver.ErrorInvalidRequest, \"\"))\n\t\treturn\n\tdefault:\n\t\tif method != http.MethodPost {\n\t\t\tstatus, output = responseJson(nil, session.fail(irmaserver.ErrorInvalidRequest, \"\"))\n\t\t\treturn\n\t\t}\n\n\t\tif verb == \"commitments\" && session.action == irma.ActionIssuing {\n\t\t\tcommitments := &gabi.IssueCommitmentMessage{}\n\t\t\tif err := irma.UnmarshalValidate(message, commitments); err != nil {\n\t\t\t\tstatus, output = responseJson(nil, session.fail(irmaserver.ErrorMalformedInput, \"\"))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tstatus, output = responseJson(session.handlePostCommitments(commitments))\n\t\t\treturn\n\t\t}\n\t\tif verb == \"proofs\" && session.action == irma.ActionDisclosing {\n\t\t\tproofs := gabi.ProofList{}\n\t\t\tif err := irma.UnmarshalValidate(message, &proofs); err != nil {\n\t\t\t\tstatus, output = responseJson(nil, session.fail(irmaserver.ErrorMalformedInput, \"\"))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tstatus, output = responseJson(session.handlePostProofs(proofs))\n\t\t\treturn\n\t\t}\n\t\tif verb == \"proofs\" && session.action == irma.ActionSigning {\n\t\t\tsignature := &irma.SignedMessage{}\n\t\t\tif err := irma.UnmarshalValidate(message, signature); err != nil {\n\t\t\t\tstatus, output = responseJson(nil, session.fail(irmaserver.ErrorMalformedInput, \"\"))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tstatus, output = responseJson(session.handlePostSignature(signature))\n\t\t\treturn\n\t\t}\n\n\t\tstatus, output = responseJson(nil, session.fail(irmaserver.ErrorInvalidRequest, \"\"))\n\t\treturn\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"os\/exec\"\n\t\"time\"\n\n\t\"launchpad.net\/juju-core\/cmd\"\n\t\"launchpad.net\/juju-core\/juju\"\n\t\"launchpad.net\/juju-core\/state\/api\"\n\t\"launchpad.net\/juju-core\/utils\"\n)\n\n\/\/ SSHCommand is responsible for launching a ssh shell on a given unit or machine.\ntype SSHCommand struct {\n\tSSHCommon\n}\n\n\/\/ SSHCommon provides common methods for SSHCommand, SCPCommand and DebugHooksCommand.\ntype SSHCommon struct {\n\tcmd.EnvCommandBase\n\tTarget string\n\tArgs []string\n\tapiClient *api.Client\n}\n\nconst sshDoc = `\nLaunch an ssh shell on the machine identified by the <target> parameter.\n<target> can be either a machine id as listed by \"juju status\" in the\n\"machines\" section or a unit name as listed in the \"services\" section.\nAny extra parameters are passsed as extra parameters to the ssh command.\n\nExamples\n\nConnect to machine 0:\n\n juju ssh 0\n\nConnect to the second mysql unit:\n\n juju ssh mysql\/0\n`\n\nfunc (c *SSHCommand) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"ssh\",\n\t\tArgs: \"<target> [<ssh args>...]\",\n\t\tPurpose: \"launch an ssh shell on a given unit or machine\",\n\t\tDoc: sshDoc,\n\t}\n}\n\nfunc (c *SSHCommand) Init(args []string) error {\n\tif len(args) == 0 {\n\t\treturn errors.New(\"no target name specified\")\n\t}\n\tc.Target, c.Args = args[0], args[1:]\n\treturn nil\n}\n\n\/\/ Run resolves c.Target to a machine, to the address of a i\n\/\/ machine or unit forks ssh passing any arguments provided.\nfunc (c *SSHCommand) Run(ctx *cmd.Context) error {\n\tif c.apiClient == nil {\n\t\tvar err error\n\t\tc.apiClient, err = c.initAPIClient()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer c.apiClient.Close()\n\t}\n\thost, err := c.hostFromTarget(c.Target)\n\tif err != nil {\n\t\treturn err\n\t}\n\targs := []string{\"-l\", \"ubuntu\", \"-t\", \"-o\", \"StrictHostKeyChecking no\", \"-o\", \"PasswordAuthentication no\", host}\n\targs = append(args, c.Args...)\n\tcmd := exec.Command(\"ssh\", args...)\n\tcmd.Stdin = ctx.Stdin\n\tcmd.Stdout = ctx.Stdout\n\tcmd.Stderr = ctx.Stderr\n\treturn cmd.Run()\n}\n\n\/\/ initAPIClient initialises the state connection.\n\/\/ It is the caller's responsibility to close the connection.\nfunc (c *SSHCommon) initAPIClient() (*api.Client, error) {\n\tvar err error\n\tc.apiClient, err = juju.NewAPIClientFromName(c.EnvName)\n\treturn c.apiClient, err\n}\n\n\/\/ attemptStarter is an interface corresponding to utils.AttemptStrategy\ntype attemptStarter interface {\n\tStart() attempt\n}\n\ntype attempt interface {\n\tNext() bool\n}\n\ntype attemptStrategy utils.AttemptStrategy\n\nfunc (s attemptStrategy) Start() attempt {\n\treturn utils.AttemptStrategy(s).Start()\n}\n\nvar sshHostFromTargetAttemptStrategy attemptStarter = attemptStrategy{\n\tTotal: 5 * time.Second,\n\tDelay: 500 * time.Millisecond,\n}\n\nfunc (c *SSHCommon) hostFromTarget(target string) (string, error) {\n\tvar addr string\n\tvar err error\n\t\/\/ A target may not initially have an address (e.g. the\n\t\/\/ address updater hasn't yet run), so we must do this in\n\t\/\/ a loop.\n\tfor a := sshHostFromTargetAttemptStrategy.Start(); a.Next(); {\n\t\taddr, err = c.apiClient.PublicAddress(target)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tlogger.Infof(\"Resolved public address of %q: %q\", target, addr)\n\treturn addr, nil\n}\n\n\/\/ AllowInterspersedFlags for ssh\/scp is set to false so that\n\/\/ flags after the unit name are passed through to ssh, for eg.\n\/\/ `juju ssh -v service-name\/0 uname -a`.\nfunc (c *SSHCommon) AllowInterspersedFlags() bool {\n\treturn false\n}\n<commit_msg>make the unit example consistent<commit_after>\/\/ Copyright 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"os\/exec\"\n\t\"time\"\n\n\t\"launchpad.net\/juju-core\/cmd\"\n\t\"launchpad.net\/juju-core\/juju\"\n\t\"launchpad.net\/juju-core\/state\/api\"\n\t\"launchpad.net\/juju-core\/utils\"\n)\n\n\/\/ SSHCommand is responsible for launching a ssh shell on a given unit or machine.\ntype SSHCommand struct {\n\tSSHCommon\n}\n\n\/\/ SSHCommon provides common methods for SSHCommand, SCPCommand and DebugHooksCommand.\ntype SSHCommon struct {\n\tcmd.EnvCommandBase\n\tTarget string\n\tArgs []string\n\tapiClient *api.Client\n}\n\nconst sshDoc = `\nLaunch an ssh shell on the machine identified by the <target> parameter.\n<target> can be either a machine id as listed by \"juju status\" in the\n\"machines\" section or a unit name as listed in the \"services\" section.\nAny extra parameters are passsed as extra parameters to the ssh command.\n\nExamples\n\nConnect to machine 0:\n\n juju ssh 0\n\nConnect to the first mysql unit:\n\n juju ssh mysql\/0\n`\n\nfunc (c *SSHCommand) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"ssh\",\n\t\tArgs: \"<target> [<ssh args>...]\",\n\t\tPurpose: \"launch an ssh shell on a given unit or machine\",\n\t\tDoc: sshDoc,\n\t}\n}\n\nfunc (c *SSHCommand) Init(args []string) error {\n\tif len(args) == 0 {\n\t\treturn errors.New(\"no target name specified\")\n\t}\n\tc.Target, c.Args = args[0], args[1:]\n\treturn nil\n}\n\n\/\/ Run resolves c.Target to a machine, to the address of a i\n\/\/ machine or unit forks ssh passing any arguments provided.\nfunc (c *SSHCommand) Run(ctx *cmd.Context) error {\n\tif c.apiClient == nil {\n\t\tvar err error\n\t\tc.apiClient, err = c.initAPIClient()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer c.apiClient.Close()\n\t}\n\thost, err := c.hostFromTarget(c.Target)\n\tif err != nil {\n\t\treturn err\n\t}\n\targs := []string{\"-l\", \"ubuntu\", \"-t\", \"-o\", \"StrictHostKeyChecking no\", \"-o\", \"PasswordAuthentication no\", host}\n\targs = append(args, c.Args...)\n\tcmd := exec.Command(\"ssh\", args...)\n\tcmd.Stdin = ctx.Stdin\n\tcmd.Stdout = ctx.Stdout\n\tcmd.Stderr = ctx.Stderr\n\treturn cmd.Run()\n}\n\n\/\/ initAPIClient initialises the state connection.\n\/\/ It is the caller's responsibility to close the connection.\nfunc (c *SSHCommon) initAPIClient() (*api.Client, error) {\n\tvar err error\n\tc.apiClient, err = juju.NewAPIClientFromName(c.EnvName)\n\treturn c.apiClient, err\n}\n\n\/\/ attemptStarter is an interface corresponding to utils.AttemptStrategy\ntype attemptStarter interface {\n\tStart() attempt\n}\n\ntype attempt interface {\n\tNext() bool\n}\n\ntype attemptStrategy utils.AttemptStrategy\n\nfunc (s attemptStrategy) Start() attempt {\n\treturn utils.AttemptStrategy(s).Start()\n}\n\nvar sshHostFromTargetAttemptStrategy attemptStarter = attemptStrategy{\n\tTotal: 5 * time.Second,\n\tDelay: 500 * time.Millisecond,\n}\n\nfunc (c *SSHCommon) hostFromTarget(target string) (string, error) {\n\tvar addr string\n\tvar err error\n\t\/\/ A target may not initially have an address (e.g. the\n\t\/\/ address updater hasn't yet run), so we must do this in\n\t\/\/ a loop.\n\tfor a := sshHostFromTargetAttemptStrategy.Start(); a.Next(); {\n\t\taddr, err = c.apiClient.PublicAddress(target)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tlogger.Infof(\"Resolved public address of %q: %q\", target, addr)\n\treturn addr, nil\n}\n\n\/\/ AllowInterspersedFlags for ssh\/scp is set to false so that\n\/\/ flags after the unit name are passed through to ssh, for eg.\n\/\/ `juju ssh -v service-name\/0 uname -a`.\nfunc (c *SSHCommon) AllowInterspersedFlags() bool {\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"path\/filepath\"\n\n\t\"github.com\/fatih\/color\"\n\t\"github.com\/yuin\/gopher-lua\"\n)\n\n\/\/go:generate .\/build_lib.sh\n\n\/\/ FindTaskFiles locates task scripts in the project dir.\nfunc FindTaskFiles(dir string) ([]string, error) {\n\tvar luaFiles []string\n\tjoin := filepath.Join\n\tfiles, err := filepath.Glob(join(dir, \"lark.lua\"))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"lark.lua: %v\", err)\n\t}\n\tluaFiles = append(luaFiles, files...)\n\tfiles, err = filepath.Glob(join(dir, \"lark_tasks\/*.lua\"))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"lark_tasks: %v\", err)\n\t}\n\tluaFiles = append(luaFiles, files...)\n\treturn luaFiles, nil\n\n}\n\n\/\/ LuaConfig contains options for a new Lua virtual machine.\ntype LuaConfig struct {\n}\n\n\/\/ LoadVM creates a lua.State from conf and returns it.\nfunc LoadVM(conf *LuaConfig) (s *lua.LState, err error) {\n\ts = lua.NewState()\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t\ts.Close()\n\t\t}\n\t}()\n\n\treturn s, nil\n}\n\n\/\/ InitLark initializes the lark library and loads files.\nfunc InitLark(c *Context, files []string) error {\n\terr := LoadLarkLib(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ This needs to come after LoadLarkLib but can't because the primary\n\t\/\/ library is a file.\n\tif len(files) > 0 {\n\t\tlog.Printf(\"loading files: %v\", files)\n\t}\n\tfor _, file := range files {\n\t\terr := c.Lua.DoFile(file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ LoadFiles loads the given files into state\nfunc LoadFiles(state *lua.LState, files []string) error {\n\tfor _, file := range files {\n\t\terr := state.DoFile(file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ LoadLarkLib loads the default lark module.\nfunc LoadLarkLib(c *Context) error {\n\terr := c.Lua.DoString(LarkLib)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tlark := c.Lua.GetGlobal(\"lark\")\n\tc.Lua.SetField(lark, \"log\", c.Lua.NewFunction(LuaLog))\n\tc.Lua.SetField(lark, \"verbose\", lua.LBool(c.Verbose))\n\treturn nil\n}\n\n\/\/ LuaLog logs a message from lua.\nfunc LuaLog(state *lua.LState) int {\n\topt := &logOpt{}\n\tvar msg string\n\tv1 := state.Get(1)\n\tif v1.Type() == lua.LTTable {\n\t\tarr := luaTableArray(state, v1.(*lua.LTable))\n\t\tif len(arr) > 0 {\n\t\t\tmsg = fmt.Sprint(arr[0])\n\t\t}\n\t} else if v1.Type() == lua.LTString {\n\t\tmsg = string(string(v1.(lua.LString)))\n\t}\n\n\tlcolor, ok := state.GetField(v1, \"color\").(lua.LString)\n\tif ok {\n\t\topt.Color = string(lcolor)\n\t}\n\n\tlogLark(msg, opt)\n\n\treturn 0\n}\n\nfunc luaTableArray(state *lua.LState, t *lua.LTable) []lua.LValue {\n\tvar vals []lua.LValue\n\tt.ForEach(func(kv, vv lua.LValue) {\n\t\tif kv.Type() == lua.LTNumber {\n\t\t\tvals = append(vals, vv)\n\t\t}\n\t})\n\treturn vals\n}\n\ntype logOpt struct {\n\tColor string\n}\n\nfunc logLark(msg string, opt *logOpt) {\n\tif opt == nil {\n\t\topt = &logOpt{}\n\t}\n\n\tvar esc func(format string, v ...interface{}) string\n\tif opt.Color != \"\" {\n\t\tesc = colorMap[opt.Color]\n\t}\n\n\tif esc != nil {\n\t\tmsg = esc(\"%s\", msg)\n\t}\n\tlog.Print(msg)\n}\n\nvar colorMap = map[string]func(format string, v ...interface{}) string{\n\t\"black\": color.BlackString,\n\t\"blue\": color.BlueString,\n\t\"cyan\": color.CyanString,\n\t\"green\": color.GreenString,\n\t\"magenta\": color.MagentaString,\n\t\"red\": color.RedString,\n\t\"white\": color.WhiteString,\n\t\"yellow\": color.YellowString,\n}\n<commit_msg>only log which files are loaded when the -v flag has been provided<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"path\/filepath\"\n\n\t\"github.com\/fatih\/color\"\n\t\"github.com\/yuin\/gopher-lua\"\n)\n\n\/\/go:generate .\/build_lib.sh\n\n\/\/ FindTaskFiles locates task scripts in the project dir.\nfunc FindTaskFiles(dir string) ([]string, error) {\n\tvar luaFiles []string\n\tjoin := filepath.Join\n\tfiles, err := filepath.Glob(join(dir, \"lark.lua\"))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"lark.lua: %v\", err)\n\t}\n\tluaFiles = append(luaFiles, files...)\n\tfiles, err = filepath.Glob(join(dir, \"lark_tasks\/*.lua\"))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"lark_tasks: %v\", err)\n\t}\n\tluaFiles = append(luaFiles, files...)\n\treturn luaFiles, nil\n\n}\n\n\/\/ LuaConfig contains options for a new Lua virtual machine.\ntype LuaConfig struct {\n}\n\n\/\/ LoadVM creates a lua.State from conf and returns it.\nfunc LoadVM(conf *LuaConfig) (s *lua.LState, err error) {\n\ts = lua.NewState()\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t\ts.Close()\n\t\t}\n\t}()\n\n\treturn s, nil\n}\n\n\/\/ InitLark initializes the lark library and loads files.\nfunc InitLark(c *Context, files []string) error {\n\terr := LoadLarkLib(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ This needs to come after LoadLarkLib but can't because the primary\n\t\/\/ library is a file.\n\tif c.Verbose && len(files) > 0 {\n\t\tlog.Printf(\"loading files: %v\", files)\n\t}\n\tfor _, file := range files {\n\t\terr := c.Lua.DoFile(file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ LoadFiles loads the given files into state\nfunc LoadFiles(state *lua.LState, files []string) error {\n\tfor _, file := range files {\n\t\terr := state.DoFile(file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ LoadLarkLib loads the default lark module.\nfunc LoadLarkLib(c *Context) error {\n\terr := c.Lua.DoString(LarkLib)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tlark := c.Lua.GetGlobal(\"lark\")\n\tc.Lua.SetField(lark, \"log\", c.Lua.NewFunction(LuaLog))\n\tc.Lua.SetField(lark, \"verbose\", lua.LBool(c.Verbose))\n\treturn nil\n}\n\n\/\/ LuaLog logs a message from lua.\nfunc LuaLog(state *lua.LState) int {\n\topt := &logOpt{}\n\tvar msg string\n\tv1 := state.Get(1)\n\tif v1.Type() == lua.LTTable {\n\t\tarr := luaTableArray(state, v1.(*lua.LTable))\n\t\tif len(arr) > 0 {\n\t\t\tmsg = fmt.Sprint(arr[0])\n\t\t}\n\t} else if v1.Type() == lua.LTString {\n\t\tmsg = string(string(v1.(lua.LString)))\n\t}\n\n\tlcolor, ok := state.GetField(v1, \"color\").(lua.LString)\n\tif ok {\n\t\topt.Color = string(lcolor)\n\t}\n\n\tlogLark(msg, opt)\n\n\treturn 0\n}\n\nfunc luaTableArray(state *lua.LState, t *lua.LTable) []lua.LValue {\n\tvar vals []lua.LValue\n\tt.ForEach(func(kv, vv lua.LValue) {\n\t\tif kv.Type() == lua.LTNumber {\n\t\t\tvals = append(vals, vv)\n\t\t}\n\t})\n\treturn vals\n}\n\ntype logOpt struct {\n\tColor string\n}\n\nfunc logLark(msg string, opt *logOpt) {\n\tif opt == nil {\n\t\topt = &logOpt{}\n\t}\n\n\tvar esc func(format string, v ...interface{}) string\n\tif opt.Color != \"\" {\n\t\tesc = colorMap[opt.Color]\n\t}\n\n\tif esc != nil {\n\t\tmsg = esc(\"%s\", msg)\n\t}\n\tlog.Print(msg)\n}\n\nvar colorMap = map[string]func(format string, v ...interface{}) string{\n\t\"black\": color.BlackString,\n\t\"blue\": color.BlueString,\n\t\"cyan\": color.CyanString,\n\t\"green\": color.GreenString,\n\t\"magenta\": color.MagentaString,\n\t\"red\": color.RedString,\n\t\"white\": color.WhiteString,\n\t\"yellow\": color.YellowString,\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Copyright 2016 Frank Wessels <fwessels@xs4all.nl>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"github.com\/s3git\/s3git-go\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar dedupe bool\nvar presignedUrls bool\nvar jsonOutput bool\n\n\/\/ snapshotCmd represents the snapshot command\nvar snapshotCmd = &cobra.Command{\n\tUse: \"snapshot\",\n\tShort: \"Manage snapshots\",\n\tLong: \"Create, checkout and list snapshots\",\n}\n\nvar snapshotCreateCmd = &cobra.Command{\n\tUse: \"create [directory]\",\n\tShort: \"Create a snapshot\",\n\tLong: \"Create a snapshot\",\n\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\tif len(args) == 0 {\n\t\t\ter(\"Directory for snapshot must be specified\")\n\t\t} else if message == \"\" {\n\t\t\ter(\"Commit message for snapshot must be specified\")\n\t\t}\n\n\t\trepo, err := s3git.OpenRepository(\".\")\n\t\tif err != nil {\n\t\t\ter(err)\n\t\t}\n\n\t\tkey, nothing, err := repo.SnapshotCreate(args[0], message)\n\t\tif err != nil {\n\t\t\ter(err)\n\t\t}\n\t\tif nothing {\n\t\t\tfmt.Println(\"No changes to snapshot\")\n\t\t} else {\n\t\t\tfmt.Printf(\"[commit %s]\\n\", key)\n\t\t}\n\t},\n}\n\nvar snapshotCheckoutCmd = &cobra.Command{\n\tUse: \"checkout [directory] ([commit])\",\n\tShort: \"Checkout a snapshot\",\n\tLong: \"Checkout a snapshot\",\n\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\t\/\/ TODO: Partial checkout would be nice (eg specify path as filter)\n\n\t\tif len(args) == 0 {\n\t\t\ter(\"Directory for snapshot must be specified\")\n\t\t}\n\n\t\trepo, err := s3git.OpenRepository(\".\")\n\t\tif err != nil {\n\t\t\ter(err)\n\t\t}\n\n\t\tvar commit string\n\t\tif len(args) == 2 {\n\t\t\tcommit = args[1]\n\t\t}\n\n\t\terr = repo.SnapshotCheckout(args[0], commit, dedupe)\n\t\tif err != nil {\n\t\t\ter(err)\n\t\t}\n\t},\n}\n\nvar snapshotListCmd = &cobra.Command{\n\tUse: \"ls ([commit])\",\n\tShort: \"List a snapshot\",\n\tLong: \"List a snapshot\",\n\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\tif len(args) == 0 {\n\t\t\ter(\"Commit for snapshot must be specified\")\n\t\t}\n\n\t\trepo, err := s3git.OpenRepository(\".\")\n\t\tif err != nil {\n\t\t\ter(err)\n\t\t}\n\n\t\tvar commit string\n\t\tif len(args) == 1 {\n\t\t\tcommit = args[0]\n\t\t}\n\n\t\t\/\/ TODO: Dump result in JSON format\n\t\terr = repo.SnapshotList(commit, presignedUrls)\n\t\tif err != nil {\n\t\t\ter(err)\n\t\t}\n\n\t},\n}\n\nvar snapshotStatusCmd = &cobra.Command{\n\tUse: \"status [directory] ([commit])\",\n\tShort: \"Show changes for snapshot\",\n\tLong: \"Show changes for snapshot\",\n\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\tif len(args) == 0 {\n\t\t\ter(\"Directory for snapshot must be specified\")\n\t\t}\n\n\t\trepo, err := s3git.OpenRepository(\".\")\n\t\tif err != nil {\n\t\t\ter(err)\n\t\t}\n\n\t\tvar commit string\n\t\tif len(args) == 2 {\n\t\t\tcommit = args[1]\n\t\t}\n\n\t\terr = repo.SnapshotStatus(args[0], commit)\n\t\tif err != nil {\n\t\t\ter(err)\n\t\t}\n\t},\n}\n\nfunc init() {\n\tRootCmd.AddCommand(snapshotCmd)\n\tsnapshotCmd.AddCommand(snapshotCreateCmd)\n\tsnapshotCmd.AddCommand(snapshotCheckoutCmd)\n\tsnapshotCmd.AddCommand(snapshotListCmd)\n\tsnapshotCmd.AddCommand(snapshotStatusCmd)\n\n\t\/\/ Local flags for create\n\tsnapshotCreateCmd.Flags().StringVarP(&message, \"message\", \"m\", \"\", \"Message for the commit of create snapshot\")\n\n\t\/\/ Local flags for checkout\n\tsnapshotCheckoutCmd.Flags().BoolVar(&dedupe, \"dedupe\", false, \"Checkout in deduped (pointers) format\")\n\n\t\/\/ Local flags for list\n\tsnapshotListCmd.Flags().BoolVar(&presignedUrls, \"presigned\", false, \"Generate presigned urls for direct access from S3\")\n\tsnapshotListCmd.Flags().BoolVar(&presignedUrls, \"json\", false, \"Output result in JSON\")\n}\n<commit_msg>Fix buglet in json flag<commit_after>\/*\n * Copyright 2016 Frank Wessels <fwessels@xs4all.nl>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"github.com\/s3git\/s3git-go\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar dedupe bool\nvar presignedUrls bool\nvar jsonOutput bool\n\n\/\/ snapshotCmd represents the snapshot command\nvar snapshotCmd = &cobra.Command{\n\tUse: \"snapshot\",\n\tShort: \"Manage snapshots\",\n\tLong: \"Create, checkout and list snapshots\",\n}\n\nvar snapshotCreateCmd = &cobra.Command{\n\tUse: \"create [directory]\",\n\tShort: \"Create a snapshot\",\n\tLong: \"Create a snapshot\",\n\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\tif len(args) == 0 {\n\t\t\ter(\"Directory for snapshot must be specified\")\n\t\t} else if message == \"\" {\n\t\t\ter(\"Commit message for snapshot must be specified\")\n\t\t}\n\n\t\trepo, err := s3git.OpenRepository(\".\")\n\t\tif err != nil {\n\t\t\ter(err)\n\t\t}\n\n\t\tkey, nothing, err := repo.SnapshotCreate(args[0], message)\n\t\tif err != nil {\n\t\t\ter(err)\n\t\t}\n\t\tif nothing {\n\t\t\tfmt.Println(\"No changes to snapshot\")\n\t\t} else {\n\t\t\tfmt.Printf(\"[commit %s]\\n\", key)\n\t\t}\n\t},\n}\n\nvar snapshotCheckoutCmd = &cobra.Command{\n\tUse: \"checkout [directory] ([commit])\",\n\tShort: \"Checkout a snapshot\",\n\tLong: \"Checkout a snapshot\",\n\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\t\/\/ TODO: Partial checkout would be nice (eg specify path as filter)\n\n\t\tif len(args) == 0 {\n\t\t\ter(\"Directory for snapshot must be specified\")\n\t\t}\n\n\t\trepo, err := s3git.OpenRepository(\".\")\n\t\tif err != nil {\n\t\t\ter(err)\n\t\t}\n\n\t\tvar commit string\n\t\tif len(args) == 2 {\n\t\t\tcommit = args[1]\n\t\t}\n\n\t\terr = repo.SnapshotCheckout(args[0], commit, dedupe)\n\t\tif err != nil {\n\t\t\ter(err)\n\t\t}\n\t},\n}\n\nvar snapshotListCmd = &cobra.Command{\n\tUse: \"ls ([commit])\",\n\tShort: \"List a snapshot\",\n\tLong: \"List a snapshot\",\n\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\tif len(args) == 0 {\n\t\t\ter(\"Commit for snapshot must be specified\")\n\t\t}\n\n\t\trepo, err := s3git.OpenRepository(\".\")\n\t\tif err != nil {\n\t\t\ter(err)\n\t\t}\n\n\t\tvar commit string\n\t\tif len(args) == 1 {\n\t\t\tcommit = args[0]\n\t\t}\n\n\t\t\/\/ TODO: Dump result in JSON format\n\t\terr = repo.SnapshotList(commit, presignedUrls)\n\t\tif err != nil {\n\t\t\ter(err)\n\t\t}\n\n\t},\n}\n\nvar snapshotStatusCmd = &cobra.Command{\n\tUse: \"status [directory] ([commit])\",\n\tShort: \"Show changes for snapshot\",\n\tLong: \"Show changes for snapshot\",\n\tRun: func(cmd *cobra.Command, args []string) {\n\n\t\tif len(args) == 0 {\n\t\t\ter(\"Directory for snapshot must be specified\")\n\t\t}\n\n\t\trepo, err := s3git.OpenRepository(\".\")\n\t\tif err != nil {\n\t\t\ter(err)\n\t\t}\n\n\t\tvar commit string\n\t\tif len(args) == 2 {\n\t\t\tcommit = args[1]\n\t\t}\n\n\t\terr = repo.SnapshotStatus(args[0], commit)\n\t\tif err != nil {\n\t\t\ter(err)\n\t\t}\n\t},\n}\n\nfunc init() {\n\tRootCmd.AddCommand(snapshotCmd)\n\tsnapshotCmd.AddCommand(snapshotCreateCmd)\n\tsnapshotCmd.AddCommand(snapshotCheckoutCmd)\n\tsnapshotCmd.AddCommand(snapshotListCmd)\n\tsnapshotCmd.AddCommand(snapshotStatusCmd)\n\n\t\/\/ Local flags for create\n\tsnapshotCreateCmd.Flags().StringVarP(&message, \"message\", \"m\", \"\", \"Message for the commit of create snapshot\")\n\n\t\/\/ Local flags for checkout\n\tsnapshotCheckoutCmd.Flags().BoolVar(&dedupe, \"dedupe\", false, \"Checkout in deduped (pointers) format\")\n\n\t\/\/ Local flags for list\n\tsnapshotListCmd.Flags().BoolVar(&presignedUrls, \"presigned\", false, \"Generate presigned urls for direct access from S3\")\n\tsnapshotListCmd.Flags().BoolVar(&jsonOutput, \"json\", false, \"Output result in JSON\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/pressly\/stackup\/client\"\n\t\"github.com\/pressly\/stackup\/config\"\n\t\"github.com\/pressly\/stackup\/terminal\"\n\n\t\"github.com\/pressly\/prefixer\"\n\n\tgossh \"golang.org\/x\/crypto\/ssh\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\n\/\/ usage prints help for an arg and exits.\nfunc usage(conf *config.Config, arg int) {\n\tlog.Println(\"Usage: sup <network> <target\/command>\\n\")\n\tswitch arg {\n\tcase 1:\n\t\t\/\/ <network> missing, print available hosts.\n\t\tlog.Println(\"Available networks (from Supfile):\")\n\t\tfor name, network := range conf.Networks {\n\t\t\tlog.Printf(\"- %v\\n\", name)\n\t\t\tfor _, host := range network.Hosts {\n\t\t\t\tlog.Printf(\" - %v\\n\", host)\n\t\t\t}\n\t\t}\n\tcase 2:\n\t\t\/\/ <target\/command> not found or missing,\n\t\t\/\/ print available targets\/commands.\n\t\tlog.Println(\"Available targets (from Supfile):\")\n\t\tfor name, commands := range conf.Targets {\n\t\t\tlog.Printf(\"- %v\", name)\n\t\t\tfor _, cmd := range commands {\n\t\t\t\tlog.Printf(\"\\t%v\\n\", cmd)\n\t\t\t}\n\t\t}\n\t\tlog.Println()\n\t\tlog.Println(\"Available commands (from Supfile):\")\n\t\tfor name, cmd := range conf.Commands {\n\t\t\tlog.Printf(\"- %v\\t%v\", name, cmd.Desc)\n\t\t}\n\t}\n\tos.Exit(1)\n}\n\n\/\/ parseArgs parses os.Args for network and commands to be run.\nfunc parseArgs(conf *config.Config) (config.Network, []config.Command) {\n\tvar commands []config.Command\n\n\t\/\/ Check for the first argument first\n\tif len(os.Args) < 2 {\n\t\tusage(conf, len(os.Args))\n\t}\n\t\/\/ Does the <network> exist?\n\tnetwork, ok := conf.Networks[os.Args[1]]\n\tif !ok {\n\t\tlog.Printf(\"Unknown network \\\"%v\\\"\\n\\n\", os.Args[1])\n\t\tusage(conf, 1)\n\t}\n\n\t\/\/ Does <network> have any hosts?\n\tif len(network.Hosts) == 0 {\n\t\tlog.Printf(\"No hosts specified for network \\\"%v\\\"\", os.Args[1])\n\t\tusage(conf, 1)\n\t}\n\n\t\/\/ Check for the second argument\n\tif len(os.Args) < 3 {\n\t\tusage(conf, len(os.Args))\n\t}\n\t\/\/ Does the <target\/command> exist?\n\ttarget, isTarget := conf.Targets[os.Args[2]]\n\tif isTarget {\n\t\t\/\/ It's the target. Loop over its commands.\n\t\tfor _, cmd := range target {\n\t\t\t\/\/ Does the target's command exist?\n\t\t\tcommand, isCommand := conf.Commands[cmd]\n\t\t\tif !isCommand {\n\t\t\t\tlog.Printf(\"Unknown command \\\"%v\\\" (from target \\\"%v\\\": %v)\\n\\n\", cmd, os.Args[2], target)\n\t\t\t\tusage(conf, 2)\n\t\t\t}\n\t\t\tcommand.Name = cmd\n\t\t\tcommands = append(commands, command)\n\t\t}\n\t} else {\n\t\t\/\/ It's probably a command. Does it exist?\n\t\tcommand, isCommand := conf.Commands[os.Args[2]]\n\t\tif !isCommand {\n\t\t\t\/\/ Not a target, nor command.\n\t\t\tlog.Printf(\"Unknown target\/command \\\"%v\\\"\\n\\n\", os.Args[2])\n\t\t\tusage(conf, 2)\n\t\t}\n\t\tcommand.Name = os.Args[2]\n\t\tcommands = append(commands, command)\n\t}\n\n\t\/\/ Check for extra arguments\n\tif len(os.Args) != 3 {\n\t\tusage(conf, len(os.Args))\n\t}\n\n\treturn network, commands\n}\n\nfunc main() {\n\tvar (\n\t\tconf config.Config\n\t\tpaddingLen int\n\t)\n\n\t\/\/ Read configuration file.\n\tdata, _ := ioutil.ReadFile(\".\/Supfile\")\n\tif err := yaml.Unmarshal(data, &conf); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Parse network and commands to be run from os.Args.\n\tnetwork, commands := parseArgs(&conf)\n\n\t\/\/ Process all ENVs into a string of form\n\t\/\/ `export FOO=\"bar\"; export BAR=\"baz\";`.\n\tenv := ``\n\tfor name, value := range conf.Env {\n\t\tenv += `export ` + name + `=\"` + value + `\";`\n\t}\n\tfor name, value := range network.Env {\n\t\tenv += `export ` + name + `=\"` + value + `\";`\n\t}\n\n\t\/\/ Create clients for every host (either SSH or Localhost).\n\tvar clients []client.Client\n\tfor _, host := range network.Hosts {\n\t\tvar c client.Client\n\n\t\tif host == \"localhost\" { \/\/ LocalhostClient\n\n\t\t\tlocalhostClient := &client.LocalhostClient{\n\t\t\t\tEnv: env,\n\t\t\t}\n\t\t\tif err := localhostClient.Connect(host); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\tc = localhostClient\n\n\t\t} else { \/\/ SSHClient\n\n\t\t\tsshClient := &client.SSHClient{\n\t\t\t\tEnv: env,\n\t\t\t}\n\t\t\tif err := sshClient.Connect(host); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tdefer sshClient.Close()\n\n\t\t\tc = sshClient\n\t\t}\n\n\t\tlen := len(c.Prefix())\n\t\tif len > paddingLen {\n\t\t\tpaddingLen = len\n\t\t}\n\n\t\tclients = append(clients, c)\n\t}\n\n\t\/\/ Run the command(s) remotely on all hosts in parallel.\n\t\/\/ Run multiple commands (from) sequentally.\n\tfor _, cmd := range commands {\n\t\ttasks, err := client.TasksFromConfigCommand(cmd)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"TasksFromConfigCommand(): \", err)\n\t\t}\n\n\t\tfor _, task := range tasks {\n\t\t\t\/\/ Run the command on all hosts in parallel.\n\t\t\tfor i, c := range clients {\n\t\t\t\tpadding := strings.Repeat(\" \", paddingLen-(len(c.Prefix())))\n\t\t\t\tcolor := terminal.Colors[i%len(terminal.Colors)]\n\n\t\t\t\tprefix := color + padding + c.Prefix() + \" | \"\n\t\t\t\terr := c.Run(task)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\"%sexit %v\", prefix, err)\n\t\t\t\t}\n\n\t\t\t\tgo func(c client.Client) {\n\t\t\t\t\tswitch t := c.(type) {\n\t\t\t\t\tcase *client.SSHClient:\n\t\t\t\t\t\tif _, err := io.Copy(os.Stdout, prefixer.New(t.RemoteStdout, prefix)); err != nil {\n\t\t\t\t\t\t\tlog.Printf(\"%sSTDOUT: %v\", t.Prefix(), err)\n\t\t\t\t\t\t}\n\t\t\t\t\tcase *client.LocalhostClient:\n\t\t\t\t\t\tif _, err := io.Copy(os.Stdout, prefixer.New(t.Stdout, prefix)); err != nil {\n\t\t\t\t\t\t\tlog.Printf(\"%sSTDOUT: %v\", t.Prefix(), err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}(c)\n\n\t\t\t\tgo func(c client.Client) {\n\t\t\t\t\tswitch t := c.(type) {\n\t\t\t\t\tcase *client.SSHClient:\n\t\t\t\t\t\tif _, err := io.Copy(os.Stderr, prefixer.New(t.RemoteStderr, prefix)); err != nil {\n\t\t\t\t\t\t\tlog.Printf(\"%sSTDERR: %v\", t.Prefix(), err)\n\t\t\t\t\t\t}\n\t\t\t\t\tcase *client.LocalhostClient:\n\t\t\t\t\t\tif _, err := io.Copy(os.Stderr, prefixer.New(t.Stderr, prefix)); err != nil {\n\t\t\t\t\t\t\tlog.Printf(\"%sSTDERR: %v\", t.Prefix(), err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}(c)\n\t\t\t}\n\n\t\t\t\/\/ Wait for all hosts to finish.\n\t\t\tfor _, c := range clients {\n\t\t\t\tif err := c.Wait(); err != nil {\n\t\t\t\t\t\/\/TODO: Handle the SSH ExitError in ssh pkg\n\t\t\t\t\te, ok := err.(*gossh.ExitError)\n\t\t\t\t\tif ok && e.ExitStatus() != 15 {\n\t\t\t\t\t\tlog.Fatalf(\"%sexit %v\", c.Prefix(), e.ExitStatus())\n\t\t\t\t\t}\n\t\t\t\t\tlog.Fatalf(\"%s: %v\", c.Prefix(), err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t}\n\n\t\/\/TODO: We should wait for all io.Copy() goroutines.\n\t\/\/TODO: We should not exit 0, if there was an error.\n}\n<commit_msg>Add some more comments to the main logic<commit_after>package main\n\nimport (\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/pressly\/stackup\/client\"\n\t\"github.com\/pressly\/stackup\/config\"\n\t\"github.com\/pressly\/stackup\/terminal\"\n\n\t\"github.com\/pressly\/prefixer\"\n\n\tgossh \"golang.org\/x\/crypto\/ssh\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\n\/\/ usage prints help for an arg and exits.\nfunc usage(conf *config.Config, arg int) {\n\tlog.Println(\"Usage: sup <network> <target\/command>\\n\")\n\tswitch arg {\n\tcase 1:\n\t\t\/\/ <network> missing, print available hosts.\n\t\tlog.Println(\"Available networks (from Supfile):\")\n\t\tfor name, network := range conf.Networks {\n\t\t\tlog.Printf(\"- %v\\n\", name)\n\t\t\tfor _, host := range network.Hosts {\n\t\t\t\tlog.Printf(\" - %v\\n\", host)\n\t\t\t}\n\t\t}\n\tcase 2:\n\t\t\/\/ <target\/command> not found or missing,\n\t\t\/\/ print available targets\/commands.\n\t\tlog.Println(\"Available targets (from Supfile):\")\n\t\tfor name, commands := range conf.Targets {\n\t\t\tlog.Printf(\"- %v\", name)\n\t\t\tfor _, cmd := range commands {\n\t\t\t\tlog.Printf(\"\\t%v\\n\", cmd)\n\t\t\t}\n\t\t}\n\t\tlog.Println()\n\t\tlog.Println(\"Available commands (from Supfile):\")\n\t\tfor name, cmd := range conf.Commands {\n\t\t\tlog.Printf(\"- %v\\t%v\", name, cmd.Desc)\n\t\t}\n\t}\n\tos.Exit(1)\n}\n\n\/\/ parseArgs parses os.Args for network and commands to be run.\nfunc parseArgs(conf *config.Config) (config.Network, []config.Command) {\n\tvar commands []config.Command\n\n\t\/\/ Check for the first argument first\n\tif len(os.Args) < 2 {\n\t\tusage(conf, len(os.Args))\n\t}\n\t\/\/ Does the <network> exist?\n\tnetwork, ok := conf.Networks[os.Args[1]]\n\tif !ok {\n\t\tlog.Printf(\"Unknown network \\\"%v\\\"\\n\\n\", os.Args[1])\n\t\tusage(conf, 1)\n\t}\n\n\t\/\/ Does <network> have any hosts?\n\tif len(network.Hosts) == 0 {\n\t\tlog.Printf(\"No hosts specified for network \\\"%v\\\"\", os.Args[1])\n\t\tusage(conf, 1)\n\t}\n\n\t\/\/ Check for the second argument\n\tif len(os.Args) < 3 {\n\t\tusage(conf, len(os.Args))\n\t}\n\t\/\/ Does the <target\/command> exist?\n\ttarget, isTarget := conf.Targets[os.Args[2]]\n\tif isTarget {\n\t\t\/\/ It's the target. Loop over its commands.\n\t\tfor _, cmd := range target {\n\t\t\t\/\/ Does the target's command exist?\n\t\t\tcommand, isCommand := conf.Commands[cmd]\n\t\t\tif !isCommand {\n\t\t\t\tlog.Printf(\"Unknown command \\\"%v\\\" (from target \\\"%v\\\": %v)\\n\\n\", cmd, os.Args[2], target)\n\t\t\t\tusage(conf, 2)\n\t\t\t}\n\t\t\tcommand.Name = cmd\n\t\t\tcommands = append(commands, command)\n\t\t}\n\t} else {\n\t\t\/\/ It's probably a command. Does it exist?\n\t\tcommand, isCommand := conf.Commands[os.Args[2]]\n\t\tif !isCommand {\n\t\t\t\/\/ Not a target, nor command.\n\t\t\tlog.Printf(\"Unknown target\/command \\\"%v\\\"\\n\\n\", os.Args[2])\n\t\t\tusage(conf, 2)\n\t\t}\n\t\tcommand.Name = os.Args[2]\n\t\tcommands = append(commands, command)\n\t}\n\n\t\/\/ Check for extra arguments\n\tif len(os.Args) != 3 {\n\t\tusage(conf, len(os.Args))\n\t}\n\n\treturn network, commands\n}\n\nfunc main() {\n\tvar (\n\t\tconf config.Config\n\t\tpaddingLen int\n\t)\n\n\t\/\/ Read configuration file.\n\tdata, _ := ioutil.ReadFile(\".\/Supfile\")\n\tif err := yaml.Unmarshal(data, &conf); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Parse network and commands to be run from os.Args.\n\tnetwork, commands := parseArgs(&conf)\n\n\t\/\/ Process all ENVs into a string of form\n\t\/\/ `export FOO=\"bar\"; export BAR=\"baz\";`.\n\tenv := ``\n\tfor name, value := range conf.Env {\n\t\tenv += `export ` + name + `=\"` + value + `\";`\n\t}\n\tfor name, value := range network.Env {\n\t\tenv += `export ` + name + `=\"` + value + `\";`\n\t}\n\n\t\/\/ Create clients for every host (either SSH or Localhost).\n\tvar clients []client.Client\n\tfor _, host := range network.Hosts {\n\t\tvar c client.Client\n\n\t\tif host == \"localhost\" { \/\/ LocalhostClient\n\n\t\t\tlocalhostClient := &client.LocalhostClient{\n\t\t\t\tEnv: env,\n\t\t\t}\n\t\t\tif err := localhostClient.Connect(host); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\n\t\t\tc = localhostClient\n\n\t\t} else { \/\/ SSHClient\n\n\t\t\tsshClient := &client.SSHClient{\n\t\t\t\tEnv: env,\n\t\t\t}\n\t\t\tif err := sshClient.Connect(host); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tdefer sshClient.Close()\n\n\t\t\tc = sshClient\n\t\t}\n\n\t\tlen := len(c.Prefix())\n\t\tif len > paddingLen {\n\t\t\tpaddingLen = len\n\t\t}\n\n\t\tclients = append(clients, c)\n\t}\n\n\t\/\/ Run command or run multiple commands defined by target sequentally.\n\tfor _, cmd := range commands {\n\t\t\/\/ Translate command into task(s).\n\t\ttasks, err := client.TasksFromConfigCommand(cmd)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"TasksFromConfigCommand(): \", err)\n\t\t}\n\n\t\t\/\/ Run tasks sequentally.\n\t\tfor _, task := range tasks {\n\n\t\t\t\/\/ Run task in parallel.\n\t\t\tfor i, c := range clients {\n\t\t\t\tpadding := strings.Repeat(\" \", paddingLen-(len(c.Prefix())))\n\t\t\t\tcolor := terminal.Colors[i%len(terminal.Colors)]\n\n\t\t\t\tprefix := color + padding + c.Prefix() + \" | \"\n\t\t\t\terr := c.Run(task)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatalf(\"%sexit %v\", prefix, err)\n\t\t\t\t}\n\n\t\t\t\t\/\/ Copy over tasks's STDOUT.\n\t\t\t\tgo func(c client.Client) {\n\t\t\t\t\tswitch t := c.(type) {\n\t\t\t\t\tcase *client.SSHClient:\n\t\t\t\t\t\tif _, err := io.Copy(os.Stdout, prefixer.New(t.RemoteStdout, prefix)); err != nil {\n\t\t\t\t\t\t\tlog.Printf(\"%sSTDOUT: %v\", t.Prefix(), err)\n\t\t\t\t\t\t}\n\t\t\t\t\tcase *client.LocalhostClient:\n\t\t\t\t\t\tif _, err := io.Copy(os.Stdout, prefixer.New(t.Stdout, prefix)); err != nil {\n\t\t\t\t\t\t\tlog.Printf(\"%sSTDOUT: %v\", t.Prefix(), err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}(c)\n\n\t\t\t\t\/\/ Copy over tasks's STDERR.\n\t\t\t\tgo func(c client.Client) {\n\t\t\t\t\tswitch t := c.(type) {\n\t\t\t\t\tcase *client.SSHClient:\n\t\t\t\t\t\tif _, err := io.Copy(os.Stderr, prefixer.New(t.RemoteStderr, prefix)); err != nil {\n\t\t\t\t\t\t\tlog.Printf(\"%sSTDERR: %v\", t.Prefix(), err)\n\t\t\t\t\t\t}\n\t\t\t\t\tcase *client.LocalhostClient:\n\t\t\t\t\t\tif _, err := io.Copy(os.Stderr, prefixer.New(t.Stderr, prefix)); err != nil {\n\t\t\t\t\t\t\tlog.Printf(\"%sSTDERR: %v\", t.Prefix(), err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}(c)\n\t\t\t}\n\n\t\t\t\/\/ Wait for every client to finish the task.\n\t\t\tfor _, c := range clients {\n\t\t\t\tif err := c.Wait(); err != nil {\n\t\t\t\t\t\/\/TODO: Handle the SSH ExitError in ssh pkg\n\t\t\t\t\te, ok := err.(*gossh.ExitError)\n\t\t\t\t\tif ok && e.ExitStatus() != 15 {\n\t\t\t\t\t\tlog.Fatalf(\"%sexit %v\", c.Prefix(), e.ExitStatus())\n\t\t\t\t\t}\n\t\t\t\t\tlog.Fatalf(\"%s: %v\", c.Prefix(), err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/TODO: We should wait for all io.Copy() goroutines.\n\t\/\/TODO: We should not exit 0, if there was an error.\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ testFlagSpec defines a flag we know about.\ntype testFlagSpec struct {\n\tboolVar bool \/\/ True if the flag is type bool\n\tpassToTest bool \/\/ pass to Test\n\tpassToAll bool \/\/ pass to test plugin and test binary\n\tpresent bool \/\/ The flag has been seen\n}\n\n\/\/ testFlagDefn is the set of flags we process.\nvar testFlagDefn = map[string]*testFlagSpec{\n\t\/\/ local to the test plugin\n\t\"q\": {boolVar: true, passToAll: true},\n\t\"v\": {boolVar: true, passToAll: true},\n\t\"cover\": {boolVar: true},\n\t\"coverpkg\": {},\n\t\"covermode\": {},\n\n\t\/\/ Passed to the test binary\n\t\"bench\": {passToTest: true},\n\t\"benchmem\": {boolVar: true, passToTest: true},\n\t\"benchtime\": {passToTest: true},\n\t\"coverprofile\": {passToTest: true},\n\t\"cpu\": {passToTest: true},\n\t\"cpuprofile\": {passToTest: true},\n\t\"memprofile\": {passToTest: true},\n\t\"memprofilerate\": {passToTest: true},\n\t\"blockprofile\": {passToTest: true},\n\t\"blockprofilerate\": {passToTest: true},\n\t\"outputdir\": {passToTest: true},\n\t\"parallel\": {passToTest: true},\n\t\"run\": {passToTest: true},\n\t\"short\": {boolVar: true, passToTest: true},\n\t\"timeout\": {passToTest: true},\n}\n\n\/\/ TestFlags appends \"-test.\" for flags that are passed to the test binary.\nfunc TestFlags(testArgs []string) []string {\n\tvar targs []string\n\tfor _, arg := range testArgs {\n\t\tvar nArg, nVal, fArg string\n\t\tfArg = arg\n\t\tif !strings.Contains(arg, \"-test.\") {\n\t\t\tnArg = strings.TrimPrefix(arg, \"-\")\n\t\t\tif strings.Contains(nArg, \"=\") {\n\t\t\t\tnArgVal := strings.Split(nArg, \"=\")\n\t\t\t\tnArg, nVal = nArgVal[0], nArgVal[1]\n\t\t\t}\n\t\t\tif val, ok := testFlagDefn[nArg]; ok {\n\t\t\t\t\/\/ Special handling for -q, needs to be -test.v when passed to the test\n\t\t\t\tif nArg == \"q\" {\n\t\t\t\t\tnArg = \"v\"\n\t\t\t\t}\n\t\t\t\tif val.passToTest || val.passToAll {\n\t\t\t\t\tfArg = \"-test.\" + nArg\n\t\t\t\t}\n\t\t\t}\n\t\t\tif nVal != \"\" {\n\t\t\t\tfArg = fArg + \"=\" + nVal\n\t\t\t}\n\t\t}\n\t\ttargs = append(targs, fArg)\n\t}\n\treturn targs\n}\n\n\/\/ TestFlagsExtraParse is used to separate known arguments from unknown\n\/\/ arguments passed on the command line. Returns a string slice of test plugin\n\/\/ arguments (parseArgs), and a slice of string arguments for the test binary\n\/\/ (extraArgs). An error is returned if an argument is used twice, or an\n\/\/ argument value is incorrect.\nfunc TestFlagsExtraParse(args []string) (parseArgs []string, extraArgs []string, err error) {\n\targsLen := len(args)\n\n\tfor x := 0; x < argsLen; x++ {\n\t\tnArg := args[x]\n\t\tval, ok := testFlagDefn[strings.TrimPrefix(nArg, \"-\")]\n\t\tif !strings.HasPrefix(nArg, \"-\") || (ok && !val.passToTest) {\n\t\t\terr = setArgFound(nArg)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tparseArgs = append(parseArgs, nArg)\n\t\t\tif ok && val.passToAll {\n\t\t\t\textraArgs = append(extraArgs, nArg)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tvar hadTestPrefix bool\n\t\thasEqual := strings.Contains(nArg, \"=\")\n\t\tif !hasEqual && (x+1 < argsLen && !strings.HasPrefix(args[x+1], \"-\")) {\n\t\t\tif strings.Contains(nArg, \"-test.\") {\n\t\t\t\thadTestPrefix = true\n\t\t\t\tnArg = strings.TrimPrefix(nArg, \"-test.\")\n\t\t\t} else {\n\t\t\t\tnArg = strings.TrimPrefix(nArg, \"-\")\n\t\t\t}\n\t\t\terr = setArgFound(nArg)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/\/ Check the spec for arguments that consume the next argument\n\t\t\tif val, ok := testFlagDefn[nArg]; ok {\n\t\t\t\tif !val.boolVar {\n\t\t\t\t\tnArg = nArg + \"=\" + args[x+1]\n\t\t\t\t\tx++\n\t\t\t\t}\n\t\t\t}\n\t\t} else if hasEqual {\n\t\t\t\/\/ The argument has an embedded value, here we can do some basic\n\t\t\t\/\/ checking.\n\t\t\tsArgs := strings.Split(nArg, \"=\")\n\t\t\ttArg, tVal := strings.TrimPrefix(sArgs[0], \"-\"), sArgs[1]\n\t\t\tif val, ok := testFlagDefn[tArg]; ok {\n\t\t\t\tif val.boolVar {\n\t\t\t\t\tif err = checkBoolFlag(tVal); err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !val.passToTest {\n\t\t\t\t\tparseArgs = append(parseArgs, nArg)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Append \"-\" to the argument, and \"-test.\" if \"-test.\" was previously\n\t\t\/\/ trimmed.\n\t\tif nArg[0] != '-' {\n\t\t\tpre := \"-\"\n\t\t\tif hadTestPrefix {\n\t\t\t\tpre = \"-test.\"\n\t\t\t}\n\t\t\tnArg = pre + nArg\n\t\t}\n\t\textraArgs = append(extraArgs, nArg)\n\t}\n\n\treturn\n}\n\n\/\/ setArgFound checks the argument spec to see if arg has already been\n\/\/ encountered. If it has, then an error is returned.\nfunc setArgFound(arg string) error {\n\tvar err error\n\tnArg := strings.TrimPrefix(arg, \"-\")\n\tif val, ok := testFlagDefn[nArg]; ok {\n\t\tif val.present {\n\t\t\terr = fmt.Errorf(\"%q flag may be set only once\", arg)\n\t\t} else {\n\t\t\ttestFlagDefn[nArg].present = true\n\t\t}\n\t}\n\treturn err\n}\n\n\/\/ checkBoolFlag checks the value to ensure it is a boolean, if not an error is\n\/\/ returned.\nfunc checkBoolFlag(value string) error {\n\tvar nErr error\n\t_, err := strconv.ParseBool(value)\n\tif err != nil {\n\t\tnErr = errors.New(\"illegal bool flag value \" + value)\n\t}\n\treturn nErr\n}\n<commit_msg>Improve gb test verbose output.<commit_after>package cmd\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ testFlagSpec defines a flag we know about.\ntype testFlagSpec struct {\n\tboolVar bool \/\/ True if the flag is type bool\n\tpassToTest bool \/\/ pass to Test\n\tpassToAll bool \/\/ pass to test plugin and test binary\n\tpresent bool \/\/ The flag has been seen\n}\n\n\/\/ testFlagDefn is the set of flags we process.\nvar testFlagDefn = map[string]*testFlagSpec{\n\t\/\/ local to the test plugin\n\t\"q\": {boolVar: true, passToTest: true, passToAll: true},\n\t\"v\": {boolVar: true, passToTest: true, passToAll: true},\n\t\"cover\": {boolVar: true},\n\t\"coverpkg\": {},\n\t\"covermode\": {},\n\n\t\/\/ Passed to the test binary\n\t\"bench\": {passToTest: true},\n\t\"benchmem\": {boolVar: true, passToTest: true},\n\t\"benchtime\": {passToTest: true},\n\t\"coverprofile\": {passToTest: true},\n\t\"cpu\": {passToTest: true},\n\t\"cpuprofile\": {passToTest: true},\n\t\"memprofile\": {passToTest: true},\n\t\"memprofilerate\": {passToTest: true},\n\t\"blockprofile\": {passToTest: true},\n\t\"blockprofilerate\": {passToTest: true},\n\t\"outputdir\": {passToTest: true},\n\t\"parallel\": {passToTest: true},\n\t\"run\": {passToTest: true},\n\t\"short\": {boolVar: true, passToTest: true},\n\t\"timeout\": {passToTest: true},\n}\n\n\/\/ TestFlags appends \"-test.\" for flags that are passed to the test binary.\nfunc TestFlags(testArgs []string) []string {\n\tvar targs []string\n\tfor _, arg := range testArgs {\n\t\tvar nArg, nVal, fArg string\n\t\tfArg = arg\n\t\tif !strings.Contains(arg, \"-test.\") {\n\t\t\tnArg = strings.TrimPrefix(arg, \"-\")\n\t\t\tif strings.Contains(nArg, \"=\") {\n\t\t\t\tnArgVal := strings.Split(nArg, \"=\")\n\t\t\t\tnArg, nVal = nArgVal[0], nArgVal[1]\n\t\t\t}\n\t\t\tif val, ok := testFlagDefn[nArg]; ok {\n\t\t\t\t\/\/ Special handling for -q, needs to be -test.v when passed to the test\n\t\t\t\tif nArg == \"q\" {\n\t\t\t\t\tnArg = \"v\"\n\t\t\t\t}\n\t\t\t\tif val.passToTest || val.passToAll {\n\t\t\t\t\tfArg = \"-test.\" + nArg\n\t\t\t\t}\n\t\t\t}\n\t\t\tif nVal != \"\" {\n\t\t\t\tfArg = fArg + \"=\" + nVal\n\t\t\t}\n\t\t}\n\t\ttargs = append(targs, fArg)\n\t}\n\treturn targs\n}\n\n\/\/ TestFlagsExtraParse is used to separate known arguments from unknown\n\/\/ arguments passed on the command line. Returns a string slice of test plugin\n\/\/ arguments (parseArgs), and a slice of string arguments for the test binary\n\/\/ (extraArgs). An error is returned if an argument is used twice, or an\n\/\/ argument value is incorrect.\nfunc TestFlagsExtraParse(args []string) (parseArgs []string, extraArgs []string, err error) {\n\targsLen := len(args)\n\n\tfor x := 0; x < argsLen; x++ {\n\t\tnArg := args[x]\n\t\tval, ok := testFlagDefn[strings.TrimPrefix(nArg, \"-\")]\n\t\tif !strings.HasPrefix(nArg, \"-\") || (ok && !val.passToTest) {\n\t\t\terr = setArgFound(nArg)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tparseArgs = append(parseArgs, nArg)\n\t\t\tif ok && val.passToAll {\n\t\t\t\textraArgs = append(extraArgs, nArg)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tvar hadTestPrefix bool\n\t\thasEqual := strings.Contains(nArg, \"=\")\n\t\tif !hasEqual && (x+1 < argsLen && !strings.HasPrefix(args[x+1], \"-\")) {\n\t\t\tif strings.Contains(nArg, \"-test.\") {\n\t\t\t\thadTestPrefix = true\n\t\t\t\tnArg = strings.TrimPrefix(nArg, \"-test.\")\n\t\t\t} else {\n\t\t\t\tnArg = strings.TrimPrefix(nArg, \"-\")\n\t\t\t}\n\t\t\terr = setArgFound(nArg)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/\/ Check the spec for arguments that consume the next argument\n\t\t\tif val, ok := testFlagDefn[nArg]; ok {\n\t\t\t\tif !val.boolVar {\n\t\t\t\t\tnArg = nArg + \"=\" + args[x+1]\n\t\t\t\t\tx++\n\t\t\t\t}\n\t\t\t}\n\t\t} else if hasEqual {\n\t\t\t\/\/ The argument has an embedded value, here we can do some basic\n\t\t\t\/\/ checking.\n\t\t\tsArgs := strings.Split(nArg, \"=\")\n\t\t\ttArg, tVal := strings.TrimPrefix(sArgs[0], \"-\"), sArgs[1]\n\t\t\tif val, ok := testFlagDefn[tArg]; ok {\n\t\t\t\tif val.boolVar {\n\t\t\t\t\tif err = checkBoolFlag(tVal); err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !val.passToTest {\n\t\t\t\t\tparseArgs = append(parseArgs, nArg)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Append \"-\" to the argument, and \"-test.\" if \"-test.\" was previously\n\t\t\/\/ trimmed.\n\t\tif nArg[0] != '-' {\n\t\t\tpre := \"-\"\n\t\t\tif hadTestPrefix {\n\t\t\t\tpre = \"-test.\"\n\t\t\t}\n\t\t\tnArg = pre + nArg\n\t\t}\n\t\textraArgs = append(extraArgs, nArg)\n\t}\n\n\treturn\n}\n\n\/\/ setArgFound checks the argument spec to see if arg has already been\n\/\/ encountered. If it has, then an error is returned.\nfunc setArgFound(arg string) error {\n\tvar err error\n\tnArg := strings.TrimPrefix(arg, \"-\")\n\tif val, ok := testFlagDefn[nArg]; ok {\n\t\tif val.present {\n\t\t\terr = fmt.Errorf(\"%q flag may be set only once\", arg)\n\t\t} else {\n\t\t\ttestFlagDefn[nArg].present = true\n\t\t}\n\t}\n\treturn err\n}\n\n\/\/ checkBoolFlag checks the value to ensure it is a boolean, if not an error is\n\/\/ returned.\nfunc checkBoolFlag(value string) error {\n\tvar nErr error\n\t_, err := strconv.ParseBool(value)\n\tif err != nil {\n\t\tnErr = errors.New(\"illegal bool flag value \" + value)\n\t}\n\treturn nErr\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2016, WSO2 Inc. (http:\/\/www.wso2.org) All Rights Reserved.\n\npackage cmd\n\nimport (\n\t\"archive\/zip\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/renstrom\/dedent\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/viper\"\n\t\"github.com\/wso2\/wum-uc\/util\"\n\t\"github.com\/wso2\/wum-uc\/constant\"\n)\n\nvar (\n\tvalidateCmdUse = \"validate <update_loc> <dist_loc>\"\n\tvalidateCmdShortDesc = \"A brief description of your command\"\n\tvalidateCmdLongDesc = dedent.Dedent(`\n\t\tA longer description that spans multiple lines and likely contains\n\t\texamples and usage of using your command.`)\n)\n\n\/\/ validateCmd represents the validate command\nvar validateCmd = &cobra.Command{\n\tUse: validateCmdUse,\n\tShort: validateCmdShortDesc,\n\tLong: validateCmdLongDesc,\n\tRun: initializeValidateCommand,\n}\n\nfunc init() {\n\tRootCmd.AddCommand(validateCmd)\n\tvalidateCmd.Flags().BoolVarP(&isDebugLogsEnabled, \"debug\", \"d\", false, \"Enable debug logs\")\n\tvalidateCmd.Flags().BoolVarP(&isTraceLogsEnabled, \"trace\", \"t\", false, \"Enable trace logs\")\n}\n\nfunc initializeValidateCommand(cmd *cobra.Command, args []string) {\n\tif len(args) < 2 || len(args) > 2 {\n\t\tutil.PrintErrorAndExit(\"Invalid number of argumants. Run 'wum-uc validate --help' to view help.\")\n\t}\n\tstartValidation(args[0], args[1])\n}\n\n\/\/Entry point of the validate command\nfunc startValidation(updateFilePath, distributionLocation string) {\n\n\tupdateFileMap := make(map[string]bool)\n\tdistributionFileMap := make(map[string]bool)\n\n\t\/\/Check update location\n\t\/\/Check 1\n\tif !strings.HasSuffix(updateFilePath, \".zip\") {\n\t\tutil.PrintErrorAndExit(\"Entered update location does not have a 'zip' extention.\")\n\t}\n\t\/\/Check 2\n\texists, err := util.IsFileExists(updateFilePath)\n\tutil.HandleError(err, \"\")\n\tif !exists {\n\t\tutil.PrintErrorAndExit(\"Update file '\" + updateFilePath + \"' does not exist.\")\n\t}\n\n\texists, err = util.IsDistributionExists(distributionLocation)\n\tutil.HandleError(err, \"Error occurred while checking '\" + distributionLocation + \"'\")\n\tif !exists {\n\t\tutil.PrintErrorAndExit(\"Distribution does not exist at \", distributionLocation)\n\t}\n\n\t\/\/Check 3\n\tlocationInfo, err := os.Stat(updateFilePath)\n\tutil.HandleError(err, \"\")\n\tmatch, err := regexp.MatchString(constant.FILENAME_REGEX, locationInfo.Name())\n\tif !match {\n\t\tutil.PrintErrorAndExit(\"Update file name does not match '\" + constant.FILENAME_REGEX + \"' regular expression.\")\n\t}\n\tupdateName := strings.TrimSuffix(locationInfo.Name(), \".zip\")\n\tviper.Set(constant.UPDATE_NAME, updateName)\n\n\tupdateFileMap, err = readUpdateZip(updateFilePath)\n\tutil.HandleError(err)\n\tlogger.Debug(updateFileMap)\n\n\t\/\/Check dist location\n\tif strings.HasSuffix(distributionLocation, \".zip\") {\n\t\tlocationInfo, err := os.Stat(distributionLocation)\n\t\tutil.HandleError(err, \"\")\n\t\tviper.Set(constant.PRODUCT_NAME, strings.TrimSuffix(locationInfo.Name(), \".zip\"))\n\n\t\tdistributionFileMap, err = readDistributionZip(distributionLocation)\n\t} else {\n\n\t}\n\n\terr = compare(updateFileMap, distributionFileMap)\n\tutil.HandleError(err)\n\n\tutil.PrintInfo(\"'\" + updateName + \"' validation successfully finished. No issues found.\")\n}\n\nfunc compare(updateFileMap, distributionFileMap map[string]bool) error {\n\tfor filePath := range updateFileMap {\n\t\tlogger.Debug(\"Searching:\", filePath)\n\t\t_, found := distributionFileMap[filePath]\n\t\tif !found {\n\t\t\treturn &util.CustomError{What: \"File not found in the distribution: '\" + filePath + \"'\" }\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc readUpdateZip(filename string) (map[string]bool, error) {\n\tfileMap := make(map[string]bool)\n\t\/\/ Create a reader out of the zip archive\n\tzipReader, err := zip.OpenReader(filename)\n\tif err != nil {\n\t}\n\tdefer zipReader.Close()\n\n\tupdateName := viper.GetString(constant.UPDATE_NAME)\n\tlogger.Debug(\"updateName:\", updateName)\n\t\/\/ Iterate through each file\/dir found in\n\tfor _, file := range zipReader.Reader.File {\n\t\tif file.FileInfo().IsDir() {\n\t\t\tlogger.Debug(\"dir:\", file.Name)\n\t\t\tlogger.Debug(\"dir:\", file.FileInfo().Name())\n\t\t\tif file.FileInfo().Name() != updateName {\n\t\t\t\tlogger.Debug(\"Checking:\", file.FileInfo().Name())\n\t\t\t\t\/\/Check 4\n\t\t\t\tprefix := filepath.Join(updateName, constant.CARBON_HOME)\n\t\t\t\thasPrefix := strings.HasPrefix(file.Name, prefix)\n\t\t\t\tif !hasPrefix {\n\t\t\t\t\treturn nil, &util.CustomError{What: \"Unknown directory found: '\" + file.Name + \"'\" }\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlogger.Debug(\"file:\", file.Name)\n\t\t\tlogger.Debug(\"file:\", file.FileInfo().Name())\n\t\t\tfullPath := filepath.Join(updateName, file.FileInfo().Name())\n\t\t\tlogger.Debug(\"fullPath:\", fullPath)\n\t\t\tswitch file.FileInfo().Name() {\n\t\t\tcase constant.UPDATE_DESCRIPTOR_FILE:\n\t\t\t\t\/\/todo: read update descriptor and validate\n\t\t\t\tif file.Name != fullPath {\n\t\t\t\t\tparent := strings.TrimSuffix(file.Name, file.FileInfo().Name())\n\t\t\t\t\treturn nil, &util.CustomError{What: \"'\" + constant.UPDATE_DESCRIPTOR_FILE + \"' found at '\" + parent + \"'. It should be in the '\" + updateName + \"' directory\" }\n\t\t\t\t}\n\t\t\tcase constant.LICENSE_FILE:\n\t\t\t\t\/\/todo: read license and validate\n\t\t\t\t\/\/This security update is licensed by WSO2 Inc. under Apache License 2.0.\n\t\t\t\tif file.Name != fullPath {\n\t\t\t\t\tparent := strings.TrimSuffix(file.Name, file.FileInfo().Name())\n\t\t\t\t\treturn nil, &util.CustomError{What: \"'\" + constant.LICENSE_FILE + \"' found at '\" + parent + \"'. It should be in the '\" + updateName + \"' directory\" }\n\t\t\t\t}\n\t\t\tcase constant.INSTRUCTIONS_FILE:\n\t\t\t\tif file.Name != fullPath {\n\t\t\t\t\tparent := strings.TrimSuffix(file.Name, file.FileInfo().Name())\n\t\t\t\t\treturn nil, &util.CustomError{What: \"'\" + constant.INSTRUCTIONS_FILE + \"' found at '\" + parent + \"'. It should be in the '\" + updateName + \"' directory\" }\n\t\t\t\t}\n\t\t\tcase constant.NOT_A_CONTRIBUTION_FILE:\n\t\t\t\tif file.Name != fullPath {\n\t\t\t\t\tparent := strings.TrimSuffix(file.Name, file.FileInfo().Name())\n\t\t\t\t\treturn nil, &util.CustomError{What: \"'\" + constant.NOT_A_CONTRIBUTION_FILE + \"' found at '\" + parent + \"'. It should be in the '\" + updateName + \"' directory\" }\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tprefix := filepath.Join(updateName, constant.CARBON_HOME)\n\t\t\t\thasPrefix := strings.HasPrefix(file.Name, prefix)\n\t\t\t\tif !hasPrefix {\n\t\t\t\t\treturn nil, &util.CustomError{What: \"Unknown file found: '\" + file.Name + \"'\" }\n\t\t\t\t}\n\t\t\t\trelativePath := strings.TrimPrefix(file.Name, prefix)\n\t\t\t\tfileMap[relativePath] = false\n\t\t\t}\n\t\t}\n\t}\n\treturn fileMap, nil\n}\n\nfunc readDistributionZip(filename string) (map[string]bool, error) {\n\tfileMap := make(map[string]bool)\n\t\/\/ Create a reader out of the zip archive\n\tzipReader, err := zip.OpenReader(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer zipReader.Close()\n\n\tproductName := viper.GetString(constant.PRODUCT_NAME)\n\t\/\/ Iterate through each file\/dir found in\n\tfor _, file := range zipReader.Reader.File {\n\t\trelativePath := strings.TrimPrefix(file.Name, productName)\n\t\tif !file.FileInfo().IsDir() {\n\t\t\tfileMap[relativePath] = false\n\t\t}\n\t}\n\treturn fileMap, nil\n}<commit_msg>Add 'update-description' validation<commit_after>\/\/ Copyright (c) 2016, WSO2 Inc. (http:\/\/www.wso2.org) All Rights Reserved.\n\npackage cmd\n\nimport (\n\t\"archive\/zip\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/renstrom\/dedent\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/viper\"\n\t\"github.com\/wso2\/wum-uc\/util\"\n\t\"github.com\/wso2\/wum-uc\/constant\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\nvar (\n\tvalidateCmdUse = \"validate <update_loc> <dist_loc>\"\n\tvalidateCmdShortDesc = \"A brief description of your command\"\n\tvalidateCmdLongDesc = dedent.Dedent(`\n\t\tA longer description that spans multiple lines and likely contains\n\t\texamples and usage of using your command.`)\n)\n\n\/\/ validateCmd represents the validate command\nvar validateCmd = &cobra.Command{\n\tUse: validateCmdUse,\n\tShort: validateCmdShortDesc,\n\tLong: validateCmdLongDesc,\n\tRun: initializeValidateCommand,\n}\n\nfunc init() {\n\tRootCmd.AddCommand(validateCmd)\n\tvalidateCmd.Flags().BoolVarP(&isDebugLogsEnabled, \"debug\", \"d\", false, \"Enable debug logs\")\n\tvalidateCmd.Flags().BoolVarP(&isTraceLogsEnabled, \"trace\", \"t\", false, \"Enable trace logs\")\n}\n\nfunc initializeValidateCommand(cmd *cobra.Command, args []string) {\n\tif len(args) < 2 || len(args) > 2 {\n\t\tutil.PrintErrorAndExit(\"Invalid number of argumants. Run 'wum-uc validate --help' to view help.\")\n\t}\n\tstartValidation(args[0], args[1])\n}\n\n\/\/Entry point of the validate command\nfunc startValidation(updateFilePath, distributionLocation string) {\n\n\tsetLogLevel()\n\tlogger.Debug(\"validate command called\")\n\n\tupdateFileMap := make(map[string]bool)\n\tdistributionFileMap := make(map[string]bool)\n\n\t\/\/Check update location\n\t\/\/Check 1\n\tif !strings.HasSuffix(updateFilePath, \".zip\") {\n\t\tutil.PrintErrorAndExit(\"Entered update location does not have a 'zip' extention.\")\n\t}\n\t\/\/Check 2\n\texists, err := util.IsFileExists(updateFilePath)\n\tutil.HandleError(err, \"\")\n\tif !exists {\n\t\tutil.PrintErrorAndExit(\"Update file '\" + updateFilePath + \"' does not exist.\")\n\t}\n\n\texists, err = util.IsDistributionExists(distributionLocation)\n\tutil.HandleError(err, \"Error occurred while checking '\" + distributionLocation + \"'\")\n\tif !exists {\n\t\tutil.PrintErrorAndExit(\"Distribution does not exist at \", distributionLocation)\n\t}\n\n\t\/\/Check 3\n\tlocationInfo, err := os.Stat(updateFilePath)\n\tutil.HandleError(err, \"\")\n\tmatch, err := regexp.MatchString(constant.FILENAME_REGEX, locationInfo.Name())\n\tif !match {\n\t\tutil.PrintErrorAndExit(\"Update file name does not match '\" + constant.FILENAME_REGEX + \"' regular expression.\")\n\t}\n\tupdateName := strings.TrimSuffix(locationInfo.Name(), \".zip\")\n\tviper.Set(constant.UPDATE_NAME, updateName)\n\n\tupdateFileMap, err = readUpdateZip(updateFilePath)\n\tutil.HandleError(err)\n\tlogger.Debug(updateFileMap)\n\n\t\/\/Check dist location\n\tif strings.HasSuffix(distributionLocation, \".zip\") {\n\t\tlocationInfo, err := os.Stat(distributionLocation)\n\t\tutil.HandleError(err, \"\")\n\t\tviper.Set(constant.PRODUCT_NAME, strings.TrimSuffix(locationInfo.Name(), \".zip\"))\n\n\t\tdistributionFileMap, err = readDistributionZip(distributionLocation)\n\t} else {\n\n\t}\n\n\terr = compare(updateFileMap, distributionFileMap)\n\tutil.HandleError(err)\n\n\tutil.PrintInfo(\"'\" + updateName + \"' validation successfully finished. No issues found.\")\n}\n\nfunc compare(updateFileMap, distributionFileMap map[string]bool) error {\n\tfor filePath := range updateFileMap {\n\t\tlogger.Debug(\"Searching:\", filePath)\n\t\t_, found := distributionFileMap[filePath]\n\t\tif !found {\n\t\t\treturn &util.CustomError{What: \"File not found in the distribution: '\" + filePath + \"'\" }\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc readUpdateZip(filename string) (map[string]bool, error) {\n\tfileMap := make(map[string]bool)\n\tupdateDescriptor := util.UpdateDescriptor{}\n\n\t\/\/ Create a reader out of the zip archive\n\tzipReader, err := zip.OpenReader(filename)\n\tif err != nil {\n\t}\n\tdefer zipReader.Close()\n\n\tupdateName := viper.GetString(constant.UPDATE_NAME)\n\tlogger.Debug(\"updateName:\", updateName)\n\t\/\/ Iterate through each file\/dir found in\n\tfor _, file := range zipReader.Reader.File {\n\t\tif file.FileInfo().IsDir() {\n\t\t\tlogger.Debug(\"dir:\", file.Name)\n\t\t\tlogger.Debug(\"dir:\", file.FileInfo().Name())\n\t\t\tif file.FileInfo().Name() != updateName {\n\t\t\t\tlogger.Debug(\"Checking:\", file.FileInfo().Name())\n\t\t\t\t\/\/Check 4\n\t\t\t\tprefix := filepath.Join(updateName, constant.CARBON_HOME)\n\t\t\t\thasPrefix := strings.HasPrefix(file.Name, prefix)\n\t\t\t\tif !hasPrefix {\n\t\t\t\t\treturn nil, &util.CustomError{What: \"Unknown directory found: '\" + file.Name + \"'\" }\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tlogger.Debug(\"file:\", file.Name)\n\t\t\tlogger.Debug(\"file:\", file.FileInfo().Name())\n\t\t\tfullPath := filepath.Join(updateName, file.FileInfo().Name())\n\t\t\tlogger.Debug(\"fullPath:\", fullPath)\n\t\t\tswitch file.FileInfo().Name() {\n\t\t\tcase constant.UPDATE_DESCRIPTOR_FILE:\n\t\t\t\t\/\/todo: read update descriptor and validate\n\t\t\t\tif file.Name != fullPath {\n\t\t\t\t\tparent := strings.TrimSuffix(file.Name, file.FileInfo().Name())\n\t\t\t\t\treturn nil, &util.CustomError{What: \"'\" + constant.UPDATE_DESCRIPTOR_FILE + \"' found at '\" + parent + \"'. It should be in the '\" + updateName + \"' directory\" }\n\t\t\t\t}\n\t\t\t\tzippedFile, err := file.Open()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tdata, err := ioutil.ReadAll(zippedFile)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\t\/\/fmt.Print(string(data))\n\n\t\t\t\tzippedFile.Close()\n\n\t\t\t\terr = yaml.Unmarshal(data, &updateDescriptor)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\terr = util.ValidateUpdateDescriptor(&updateDescriptor)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, &util.CustomError{What: \"'\" + constant.UPDATE_DESCRIPTOR_FILE + \"' is invalid. \" + err.Error() }\n\t\t\t\t}\n\t\t\tcase constant.LICENSE_FILE:\n\t\t\t\t\/\/todo: read license and validate\n\t\t\t\t\/\/This security update is licensed by WSO2 Inc. under Apache License 2.0.\n\t\t\t\tif file.Name != fullPath {\n\t\t\t\t\tparent := strings.TrimSuffix(file.Name, file.FileInfo().Name())\n\t\t\t\t\treturn nil, &util.CustomError{What: \"'\" + constant.LICENSE_FILE + \"' found at '\" + parent + \"'. It should be in the '\" + updateName + \"' directory\" }\n\t\t\t\t}\n\t\t\tcase constant.INSTRUCTIONS_FILE:\n\t\t\t\tif file.Name != fullPath {\n\t\t\t\t\tparent := strings.TrimSuffix(file.Name, file.FileInfo().Name())\n\t\t\t\t\treturn nil, &util.CustomError{What: \"'\" + constant.INSTRUCTIONS_FILE + \"' found at '\" + parent + \"'. It should be in the '\" + updateName + \"' directory\" }\n\t\t\t\t}\n\t\t\tcase constant.NOT_A_CONTRIBUTION_FILE:\n\t\t\t\tif file.Name != fullPath {\n\t\t\t\t\tparent := strings.TrimSuffix(file.Name, file.FileInfo().Name())\n\t\t\t\t\treturn nil, &util.CustomError{What: \"'\" + constant.NOT_A_CONTRIBUTION_FILE + \"' found at '\" + parent + \"'. It should be in the '\" + updateName + \"' directory\" }\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tprefix := filepath.Join(updateName, constant.CARBON_HOME)\n\t\t\t\thasPrefix := strings.HasPrefix(file.Name, prefix)\n\t\t\t\tif !hasPrefix {\n\t\t\t\t\treturn nil, &util.CustomError{What: \"Unknown file found: '\" + file.Name + \"'\" }\n\t\t\t\t}\n\t\t\t\trelativePath := strings.TrimPrefix(file.Name, prefix)\n\t\t\t\tfileMap[relativePath] = false\n\t\t\t}\n\t\t}\n\t}\n\treturn fileMap, nil\n}\n\nfunc readDistributionZip(filename string) (map[string]bool, error) {\n\tfileMap := make(map[string]bool)\n\t\/\/ Create a reader out of the zip archive\n\tzipReader, err := zip.OpenReader(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer zipReader.Close()\n\n\tproductName := viper.GetString(constant.PRODUCT_NAME)\n\t\/\/ Iterate through each file\/dir found in\n\tfor _, file := range zipReader.Reader.File {\n\t\trelativePath := strings.TrimPrefix(file.Name, productName)\n\t\tif !file.FileInfo().IsDir() {\n\t\t\tfileMap[relativePath] = false\n\t\t}\n\t}\n\treturn fileMap, nil\n}<|endoftext|>"} {"text":"<commit_before>\/\/ Package jsonseq provides methods for reading and writing JSON text sequences\n\/\/ (`application\/json-seq`) as defined in RFC 7464 (https:\/\/tools.ietf.org\/html\/rfc7464).\npackage jsonseq\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n)\n\n\/\/ ContentType is the MIME media type for JSON text sequences.\n\/\/ See: https:\/\/tools.ietf.org\/html\/rfc7464#section-4\nconst ContentType = `application\/json-seq`\n\nconst (\n\trs = 0x1E\n\tlf = 0x0A\n\tsp = 0x20\n\ttb = 0x09\n\tcr = 0x0D\n)\n\n\/\/ whitespace characters defined in https:\/\/tools.ietf.org\/html\/rfc7159#section-2.\nvar wsSet = string([]byte{sp, tb, lf, cr})\n\nconst digitSet = \"1234567980\"\n\nfunc ws(b byte) bool {\n\treturn b == sp || b == tb || b == lf || b == cr\n}\n\n\/\/ WriteRecord writes a JSON text sequence record with beginning\n\/\/ (RS) and end (LF) marker bytes.\nfunc WriteRecord(w io.Writer, json []byte) error {\n\t_, err := w.Write([]byte{rs})\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(json)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write([]byte{lf})\n\treturn err\n}\n\n\/\/ A RecordWriter delimits the start of written records with a record separator.\n\/\/\n\/\/ The standard library's json.Encoder calls Write just once for each value and\n\/\/ always with a trailing line feed, so it can be adapted very simply to emit a\n\/\/ JSON\n\/\/\n\/\/ \tencoder := json.NewEncoder(&jsonseq.RecordWriter{writer})\ntype RecordWriter struct {\n\tio.Writer\n}\n\n\/\/ Write prefixes every written record with an ASCII record separator. The caller\n\/\/ is responsible for including a trailing line feed when necessary. Calls the\n\/\/ underlying Writer exactly once.\nfunc (w *RecordWriter) Write(record []byte) (int, error) {\n\t_, err := w.Writer.Write([]byte{rs})\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tn, err := w.Writer.Write(record)\n\treturn n + 1, err\n}\n\n\/\/ A Decoder reads and decodes JSON text sequence records from an input stream.\ntype Decoder struct {\n\ts *bufio.Scanner\n}\n\n\/\/ NewDecoder creates a Decoder.\nfunc NewDecoder(r io.Reader) *Decoder {\n\ts := bufio.NewScanner(r)\n\ts.Split(ScanRecord)\n\treturn &Decoder{s: s}\n}\n\n\/\/ Decode scans the next record, or returns an error. Extra trailing data is discarded.\n\/\/ The Decoder remains valid until io.EOF is returned.\nfunc (d *Decoder) Decode(v interface{}) error {\n\tif !d.s.Scan() {\n\t\tif err := d.s.Err(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn io.EOF\n\t}\n\tb := d.s.Bytes()\n\n\tb, ok := RecordValue(b)\n\tif !ok {\n\t\treturn fmt.Errorf(\"invalid record: %q\", string(b))\n\t}\n\t\/\/ Decode the first value, and discard any remaining data.\n\treturn json.NewDecoder(bytes.NewReader(b)).Decode(v)\n}\n\n\/\/ RecordValue returns a slice containing the value from a JSON sequence record\n\/\/ and true if it can be decoded or false if the record was truncated or is\n\/\/ otherwise invalid. This is *NOT* a validation of any contained JSON, and some\n\/\/ records contain data after the first value, which is always invalid since it\n\/\/ was not preceded by a RS.\n\/\/\n\/\/ See section 2.4: Top-Level Values: numbers, true, false, and null.\n\/\/ https:\/\/tools.ietf.org\/html\/rfc7464#section-2.4\nfunc RecordValue(b []byte) ([]byte, bool) {\n\tif len(b) < 2 {\n\t\treturn b, false\n\t}\n\tif b[0] != rs {\n\t\treturn b, false\n\t}\n\t\/\/ Drop rs and leading whitespace.\n\tb = bytes.TrimLeft(b[1:], wsSet)\n\n\t\/\/ A number, true, false, or null value could be truncated if not\n\t\/\/ followed by whitespace.\n\tswitch b[0] {\n\tcase 'n':\n\t\tif bytes.HasPrefix(b, []byte(\"null\")) {\n\t\t\tif ws(b[4]) {\n\t\t\t\treturn b, true\n\t\t\t}\n\t\t\treturn b, false\n\t\t}\n\tcase 't':\n\t\tif bytes.HasPrefix(b, []byte(\"true\")) {\n\t\t\tif ws(b[4]) {\n\t\t\t\treturn b, true\n\t\t\t}\n\t\t\treturn b, false\n\t\t}\n\tcase 'f':\n\t\tif bytes.HasPrefix(b, []byte(\"false\")) {\n\t\t\tif ws(b[5]) {\n\t\t\t\treturn b, true\n\t\t\t}\n\t\t\treturn b, false\n\t\t}\n\tcase '-':\n\t\tif '0' <= b[1] && b[1] <= '9' {\n\t\t\tt := bytes.TrimLeft(b, digitSet)\n\t\t\tif len(t) > 0 && ws(t[0]) {\n\t\t\t\treturn b, true\n\t\t\t}\n\t\t\treturn b, false\n\t\t}\n\tcase '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':\n\t\tt := bytes.TrimLeft(b, digitSet)\n\t\tif len(t) > 0 && ws(t[0]) {\n\t\t\treturn b, true\n\t\t}\n\t\treturn b, false\n\t}\n\n\treturn b, true\n}\n\n\/\/ ScanRecord is a bufio.SplitFunc which splits JSON text sequence records.\n\/\/ Scanned bytes must be validated with the RecordValue function.\nfunc ScanRecord(data []byte, atEOF bool) (advance int, token []byte, err error) {\n\tif atEOF && len(data) == 0 {\n\t\treturn 0, nil, nil\n\t}\n\t\/\/ Find record start.\n\tswitch i := bytes.IndexByte(data, rs); {\n\tcase i < 0:\n\t\tif atEOF {\n\t\t\t\/\/ Partial record.\n\t\t\treturn len(data), data, nil\n\t\t}\n\t\t\/\/ Request more data.\n\t\treturn 0, nil, nil\n\tcase i > 0:\n\t\t\/\/ Partial record.\n\t\treturn i, data[0 : i-1], nil\n\t}\n\t\/\/ else i == 0\n\n\t\/\/ Drop consecutive leading rs's\n\tfor len(data) > 1 && data[1] == rs {\n\t\tdata = data[1:]\n\t}\n\n\t\/\/ Find end or next record.\n\ti := bytes.IndexByte(data[1:], rs)\n\tif i < 0 {\n\t\tif atEOF {\n\t\t\treturn len(data), data, nil\n\t\t}\n\t\t\/\/ Request more data.\n\t\treturn 0, nil, nil\n\t}\n\treturn 1 + i, data[:1+i], nil\n}\n<commit_msg>doc tweaks<commit_after>\/\/ Package jsonseq provides methods for reading and writing JSON text sequences\n\/\/ (`application\/json-seq`) as defined in RFC 7464 (https:\/\/tools.ietf.org\/html\/rfc7464).\npackage jsonseq\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n)\n\n\/\/ ContentType is the MIME media type for JSON text sequences.\n\/\/ See: https:\/\/tools.ietf.org\/html\/rfc7464#section-4\nconst ContentType = `application\/json-seq`\n\nconst (\n\trs = 0x1E\n\tlf = 0x0A\n\tsp = 0x20\n\ttb = 0x09\n\tcr = 0x0D\n)\n\n\/\/ whitespace characters defined in https:\/\/tools.ietf.org\/html\/rfc7159#section-2.\nvar wsSet = string([]byte{sp, tb, lf, cr})\n\nconst digitSet = \"1234567980\"\n\nfunc ws(b byte) bool {\n\treturn b == sp || b == tb || b == lf || b == cr\n}\n\n\/\/ WriteRecord writes a JSON text sequence record with beginning\n\/\/ (RS) and end (LF) marker bytes.\nfunc WriteRecord(w io.Writer, json []byte) error {\n\t_, err := w.Write([]byte{rs})\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(json)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write([]byte{lf})\n\treturn err\n}\n\n\/\/ A RecordWriter delimits the start of written records with a record separator.\n\/\/\n\/\/ The standard library's json.Encoder calls Write just once for each value and\n\/\/ always with a trailing line feed, so it can be adapted very simply to emit a\n\/\/ JSON text sequence.\n\/\/\n\/\/ \tencoder := json.NewEncoder(&jsonseq.RecordWriter{writer})\ntype RecordWriter struct {\n\tio.Writer\n}\n\n\/\/ Write prefixes every written record with an ASCII record separator. The caller\n\/\/ is responsible for including a trailing line feed when necessary. Calls the\n\/\/ underlying Writer exactly once.\nfunc (w *RecordWriter) Write(record []byte) (int, error) {\n\t_, err := w.Writer.Write([]byte{rs})\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tn, err := w.Writer.Write(record)\n\treturn n + 1, err\n}\n\n\/\/ A Decoder reads and decodes JSON text sequence records from an input stream.\ntype Decoder struct {\n\ts *bufio.Scanner\n}\n\n\/\/ NewDecoder creates a Decoder.\nfunc NewDecoder(r io.Reader) *Decoder {\n\ts := bufio.NewScanner(r)\n\ts.Split(ScanRecord)\n\treturn &Decoder{s: s}\n}\n\n\/\/ Decode scans the next record, or returns an error. Extra trailing data is discarded.\n\/\/ The Decoder remains valid until io.EOF is returned.\nfunc (d *Decoder) Decode(v interface{}) error {\n\tif !d.s.Scan() {\n\t\tif err := d.s.Err(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn io.EOF\n\t}\n\tb := d.s.Bytes()\n\n\tb, ok := RecordValue(b)\n\tif !ok {\n\t\treturn fmt.Errorf(\"invalid record: %q\", string(b))\n\t}\n\t\/\/ Decode the first value, and discard any remaining data.\n\treturn json.NewDecoder(bytes.NewReader(b)).Decode(v)\n}\n\n\/\/ RecordValue returns a slice containing the value from a JSON text sequence\n\/\/ record and true if it can be decoded or false if the record was truncated or is\n\/\/ otherwise invalid. This is *NOT* a validation of any contained JSON, and some\n\/\/ records contain data after the first value, which is always invalid since it\n\/\/ was not preceded by a RS.\n\/\/\n\/\/ See section 2.4: Top-Level Values: numbers, true, false, and null.\n\/\/ https:\/\/tools.ietf.org\/html\/rfc7464#section-2.4\nfunc RecordValue(b []byte) ([]byte, bool) {\n\tif len(b) < 2 {\n\t\treturn b, false\n\t}\n\tif b[0] != rs {\n\t\treturn b, false\n\t}\n\t\/\/ Drop rs and leading whitespace.\n\tb = bytes.TrimLeft(b[1:], wsSet)\n\n\t\/\/ A number, true, false, or null value could be truncated if not\n\t\/\/ followed by whitespace.\n\tswitch b[0] {\n\tcase 'n':\n\t\tif bytes.HasPrefix(b, []byte(\"null\")) {\n\t\t\tif ws(b[4]) {\n\t\t\t\treturn b, true\n\t\t\t}\n\t\t\treturn b, false\n\t\t}\n\tcase 't':\n\t\tif bytes.HasPrefix(b, []byte(\"true\")) {\n\t\t\tif ws(b[4]) {\n\t\t\t\treturn b, true\n\t\t\t}\n\t\t\treturn b, false\n\t\t}\n\tcase 'f':\n\t\tif bytes.HasPrefix(b, []byte(\"false\")) {\n\t\t\tif ws(b[5]) {\n\t\t\t\treturn b, true\n\t\t\t}\n\t\t\treturn b, false\n\t\t}\n\tcase '-':\n\t\tif '0' <= b[1] && b[1] <= '9' {\n\t\t\tt := bytes.TrimLeft(b, digitSet)\n\t\t\tif len(t) > 0 && ws(t[0]) {\n\t\t\t\treturn b, true\n\t\t\t}\n\t\t\treturn b, false\n\t\t}\n\tcase '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':\n\t\tt := bytes.TrimLeft(b, digitSet)\n\t\tif len(t) > 0 && ws(t[0]) {\n\t\t\treturn b, true\n\t\t}\n\t\treturn b, false\n\t}\n\n\treturn b, true\n}\n\n\/\/ ScanRecord is a bufio.SplitFunc which splits JSON text sequence records.\n\/\/ Scanned bytes must be validated with the RecordValue function.\nfunc ScanRecord(data []byte, atEOF bool) (advance int, token []byte, err error) {\n\tif atEOF && len(data) == 0 {\n\t\treturn 0, nil, nil\n\t}\n\t\/\/ Find record start.\n\tswitch i := bytes.IndexByte(data, rs); {\n\tcase i < 0:\n\t\tif atEOF {\n\t\t\t\/\/ Partial record.\n\t\t\treturn len(data), data, nil\n\t\t}\n\t\t\/\/ Request more data.\n\t\treturn 0, nil, nil\n\tcase i > 0:\n\t\t\/\/ Partial record.\n\t\treturn i, data[0 : i-1], nil\n\t}\n\t\/\/ else i == 0\n\n\t\/\/ Drop consecutive leading rs's\n\tfor len(data) > 1 && data[1] == rs {\n\t\tdata = data[1:]\n\t}\n\n\t\/\/ Find end or next record.\n\ti := bytes.IndexByte(data[1:], rs)\n\tif i < 0 {\n\t\tif atEOF {\n\t\t\treturn len(data), data, nil\n\t\t}\n\t\t\/\/ Request more data.\n\t\treturn 0, nil, nil\n\t}\n\treturn 1 + i, data[:1+i], nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/Command to test application without deploy:\n\/\/goapp serve app.yaml\n\/\/Command to deploy\/update application:\n\/\/goapp deploy -application golangnode0 -version 0\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\t\"appengine\"\n\t\"appengine\/urlfetch\"\n)\n\n\/\/predefined parameters\nvar maxNodes int = 10\nvar isAliveCheckPeriod int = 500 \/\/in millisecs\n\n\/\/changeable parameters\nvar statusContent string = \"Default status\"\n\n\/\/nodesStates := make(map[int]map[string]string)\n\/*\nexample for this map\nvar nodesStates map[int]map[string]string{\n\t1: map[string]string{\n\t\t\"alive\":\"1\",\n\t\t\"hasTask\":\"true\",\n\t\t\"taskStatus\":\"completed\",\n\t\t\"taskResult\":\"some_result_for_node\"\n\t},\n}\n*\/\n\ntype webPage struct {\n\tTitle string\n}\n\ntype nodeStats struct {\n\tNodeID int `json:\"ID\"`\n\tNodeCount int `json:\"nodeCount\"`\n\tHasTask bool `json:\"hasTask\"`\n\tTaskStatus string `json:\"taskStatus\"` \/\/running-copleted-loaded\n\tTaskResult string `json:\"taskResult\"`\n\tTaskFragmentBody string `json:\"taskFragmentBody\"`\n\tTaskBody string `json:\"taskBody\"`\n}\n\ntype echoMessage struct {\n\tTitle string `json:\"title\"`\n\tContent string `json:\"content\"`\n}\n\n\/*type gmailUser struct {\n\tname string\n\tpswd string\n}\n\nfunc sendMail(msg string) {\n\tmailUser := gmailUser{\n\t\t\"golangapplication@gmail.com\",\n\t\t\"\",\n\t}\n\tauth := smtp.PlainAuth(\"\",\n\t\tmailUser.name,\n\t\tmailUser.pswd,\n\t\t\"smtp.gmail.com\",\n\t)\n\terr := smtp.SendMail(\n\t\t\"smtp.gmail.com:587\",\n\t\tauth,\n\t\tmailUser.name,\n\t\t[]string{\"rec@mail.com\"},\n\t\t[]byte(msg),\n\t)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}*\/\n\n\/\/wrong func for Google App Engine deployment. Need to use appengine libs...=(\nfunc echo() {\n\n\turl := \"http:\/\/golangappnode1.appspot.com\/status\"\n\n\tvar jsonStr = []byte(`{\"msg\":\"Hello!\"}`)\n\treq, err := http.NewRequest(\"POST\", url, bytes.NewBuffer(jsonStr))\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\tstatusContent = string(body)\n\n}\n\nfunc helloWorld(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"Hello World!\")\n}\n\nfunc startPage(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"GET\":\n\t\ttemplatePage, _ := template.ParseFiles(\"start.html\")\n\t\ttemplatePage.Execute(w, &webPage{\"simplePage\"})\n\tcase \"POST\":\n\t\tr.ParseForm()\n\t\t\/\/go echo()\n\t\t\/\/fmt.Fprintf(w, \"Successful read command\/input from web-interface! Input contains - \"+r.FormValue(\"nodeId\")+\" \"+r.FormValue(\"echoContent\"))\n\t}\n}\n\nfunc statusServer(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"GET\":\n\t\tfmt.Fprintf(w, \"Get status - \"+statusContent)\n\tcase \"POST\":\n\t\tbuf := new(bytes.Buffer)\n\t\tbuf.ReadFrom(r.Body)\n\t\tnewStr := buf.String()\n\n\t\t\/*inputMsg := echoMessage{}\n\t\terr2 := json.NewDecoder(r.Body).Decode(&inputMsg)\n\t\tif err2 != nil {\n\t\t\tpanic(err2)\n\t\t}*\/\n\n\t\tthisNodeStats := nodeStats{\n\t\t\t1,\n\t\t\t2,\n\t\t\tfalse,\n\t\t\t\"not running\",\n\t\t\t\"empty\",\n\t\t\t\"empty fragment\",\n\t\t\t\"empty\",\n\t\t}\n\n\t\tjsonNodeStats, err1 := json.Marshal(thisNodeStats)\n\t\tif err1 != nil {\n\t\t\tpanic(err1)\n\t\t}\n\n\t\tfmt.Fprintf(w, \"Get data by params in POST - OK \"+string(jsonNodeStats))\n\t\t\/\/statusContent = \"POST request handled, \" + \"Node id: \" + string(nodeSends.id) + \", Echo content: \" + nodeSends.content\n\t\tstatusContent = \"POST request handled, \" + newStr \/\/+ \"Input message object content: \" + inputMsg.Title + inputMsg.Content\n\t}\n}\n\n\/*\n\/\/Functions for isAlive checking realization\nfunc periodicTask(d time.Duration, f func(time.Time)) {\n\tfor t := range time.Tick(d) {\n\t\tf(t)\n\t}\n}\n\nfunc checkAliveNodes(t time.Tick) {\n\tresp, err := http.Get(\"http:\/\/goappnode1.appspot.com\/isalive\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}\n*\/\n\nfunc isAliveServer(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprint(w, 1)\n}\n\n\/*\nfunc checkAliveStart(w http.ResponseWriter, r *http.Request) {\n\n}\n\nfunc checkAliveStop(w http.ResponseWriter, r *http.Request) {\n\n}\n*\/\n\nfunc testEcho(w http.ResponseWriter, r *http.Request) {\n\tmsg := echoMessage{\n\t\t\"Message is\",\n\t\t\"\",\n\t}\n\n\tr.ParseForm()\n\tc := appengine.NewContext(r)\n\tmsg.Content = r.FormValue(\"echoContent\")\n\n\tjsonMessage, err2 := json.Marshal(msg)\n\tif err2 != nil {\n\t\tpanic(err2)\n\t}\n\n\t\/\/jsonStr := []byte(`{\"message\":\"` + r.FormValue(\"echoContent\") + `\"}`)\n\tjsonStr := []byte(jsonMessage)\n\tbuf := bytes.NewBuffer(jsonStr)\n\tclient := http.Client{Transport: &urlfetch.Transport{Context: c}}\n\tresp, err := client.Post(\"http:\/\/goappnode\"+r.FormValue(\"nodeId\")+\".appspot.com\/status\", \"application\/octet-stream\", buf)\n\tif err != nil {\n\t\tstatusContent = err.Error()\n\t\tfmt.Println(err)\n\t}\n\trespBody, _ := ioutil.ReadAll(resp.Body)\n\tstatusContent = \"Response from node - \" + string(respBody)\n}\n\nfunc showInfo(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintln(w, \"Information page for test project.\")\n\tfmt.Fprintln(w, \"Language - Go;\")\n\tfmt.Fprintln(w, \"Platform - Google Application Engine;\")\n}\n\nfunc init() {\n\thttp.HandleFunc(\"\/\", startPage)\n\thttp.HandleFunc(\"\/helloworld\", helloWorld)\n\thttp.HandleFunc(\"\/showinfo\", showInfo)\n\t\/\/service pages\n\thttp.HandleFunc(\"\/echo\", testEcho)\n\thttp.HandleFunc(\"\/status\", statusServer)\n\thttp.HandleFunc(\"\/isalive\", isAliveServer)\n\n\t\/\/Wrong code for App Enine - server cant understand what it need to show\n\t\/\/http.ListenAndServe(\":80\", nil)\n}\n\n\/\/this func not needed for deploy on Google App Engine, init() func replace main()\n\/*\nfunc main() {\n\t\/\/fmt.Println(\"Hello, test server started on 8080 port.\\n - \/helloworld - show title page\\n - \/showinfo - show information about this thing\")\n\t\/\/http.ListenAndServe(\":8080\", nil)\n\tgo sender()\n}*\/\n<commit_msg>added simple alive checker<commit_after>\/\/Command to test application without deploy:\n\/\/goapp serve app.yaml\n\/\/Command to deploy\/update application:\n\/\/goapp deploy -application golangnode0 -version 0\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"appengine\"\n\t\"appengine\/urlfetch\"\n)\n\n\/\/predefined parameters\nvar maxNodes int = 10\nvar isAliveCheckPeriod int = 500 \/\/in millisecs\n\n\/\/changeable parameters\nvar statusContent string = \"Default status\"\nvar statusLog string = \"\"\n\n\/\/nodesStates := make(map[int]map[string]string)\n\/*\nexample for this map\nvar nodesStates map[int]map[string]string{\n\t1: map[string]string{\n\t\t\"alive\":\"1\",\n\t\t\"hasTask\":\"true\",\n\t\t\"taskStatus\":\"completed\",\n\t\t\"taskResult\":\"some_result_for_node\"\n\t},\n}\n*\/\n\ntype webPage struct {\n\tTitle string\n}\n\ntype nodeStats struct {\n\tNodeID int `json:\"ID\"`\n\tNodeCount int `json:\"nodeCount\"`\n\tHasTask bool `json:\"hasTask\"`\n\tTaskStatus string `json:\"taskStatus\"` \/\/running-copleted-loaded\n\tTaskResult string `json:\"taskResult\"`\n\tTaskFragmentBody string `json:\"taskFragmentBody\"`\n\tTaskBody string `json:\"taskBody\"`\n}\n\ntype echoMessage struct {\n\tTitle string `json:\"title\"`\n\tContent string `json:\"content\"`\n}\n\n\/\/types for periodical functions\ntype pFunc func()\n\n\/\/wrong func for Google App Engine deployment. Need to use appengine libs...=(\nfunc echo() {\n\n\turl := \"http:\/\/golangappnode1.appspot.com\/status\"\n\n\tvar jsonStr = []byte(`{\"msg\":\"Hello!\"}`)\n\treq, err := http.NewRequest(\"POST\", url, bytes.NewBuffer(jsonStr))\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\tstatusContent = string(body)\n\n}\n\nfunc helloWorld(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintf(w, \"Hello World!\")\n}\n\nfunc startPage(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"GET\":\n\t\ttemplatePage, _ := template.ParseFiles(\"start.html\")\n\t\ttemplatePage.Execute(w, &webPage{\"simplePage\"})\n\tcase \"POST\":\n\t\tr.ParseForm()\n\t\t\/\/go echo()\n\t\t\/\/fmt.Fprintf(w, \"Successful read command\/input from web-interface! Input contains - \"+r.FormValue(\"nodeId\")+\" \"+r.FormValue(\"echoContent\"))\n\t}\n}\n\nfunc statusServer(w http.ResponseWriter, r *http.Request) {\n\tswitch r.Method {\n\tcase \"GET\":\n\t\tfmt.Fprintf(w, \"Get status - \"+statusContent)\n\tcase \"POST\":\n\t\tbuf := new(bytes.Buffer)\n\t\tbuf.ReadFrom(r.Body)\n\t\tnewStr := buf.String()\n\n\t\t\/*inputMsg := echoMessage{}\n\t\terr2 := json.NewDecoder(r.Body).Decode(&inputMsg)\n\t\tif err2 != nil {\n\t\t\tpanic(err2)\n\t\t}*\/\n\n\t\tthisNodeStats := nodeStats{\n\t\t\t1,\n\t\t\t2,\n\t\t\tfalse,\n\t\t\t\"not running\",\n\t\t\t\"empty\",\n\t\t\t\"empty fragment\",\n\t\t\t\"empty\",\n\t\t}\n\n\t\tjsonNodeStats, err1 := json.Marshal(thisNodeStats)\n\t\tif err1 != nil {\n\t\t\tpanic(err1)\n\t\t}\n\n\t\tfmt.Fprintf(w, \"Get data by params in POST - OK \"+string(jsonNodeStats))\n\t\t\/\/statusContent = \"POST request handled, \" + \"Node id: \" + string(nodeSends.id) + \", Echo content: \" + nodeSends.content\n\t\tstatusContent = \"POST request handled, \" + newStr \/\/+ \"Input message object content: \" + inputMsg.Title + inputMsg.Content\n\t}\n}\n\n\/\/Functions for isAlive checking realization\nfunc checkIsAlive(nodeId int) {\n\tnodeUrl := \"http:\/\/goappnode\" + nodeId + \"0.appspot.com\/\"\n\tresp, err := http.Get(nodeUrl)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer resp.Body.Close()\n\tif resp.Status == 200 {\n\t\tstatusLog += \"Node #\" + nodeId + \" - online\"\n\t} else {\n\t\tstatusLog += \"Node #\" + nodeId + \" - offline\"\n\t}\n}\n\nfunc periodicTask(period time.Duration, task pFunc) {\n\tfor {\n\t\ttask()\n\t\ttime.Sleep(period * time.Millisecond)\n\t}\n}\n\n\/*\nfunc checkAliveNodes(t time.Tick) {\n\tresp, err := http.Get(\"http:\/\/goappnode1.appspot.com\/isalive\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}\n*\/\n\nfunc isAliveServer(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprint(w, 1)\n}\n\nfunc checkAliveStart(w http.ResponseWriter, r *http.Request) {\n\tgo periodicTask(30000, checkAlive(1))\n}\n\n\/*\nfunc checkAliveStop(w http.ResponseWriter, r *http.Request) {\n\n}\n*\/\n\nfunc testEcho(w http.ResponseWriter, r *http.Request) {\n\tmsg := echoMessage{\n\t\t\"Message is\",\n\t\t\"\",\n\t}\n\n\tr.ParseForm()\n\tc := appengine.NewContext(r)\n\tmsg.Content = r.FormValue(\"echoContent\")\n\n\tjsonMessage, err2 := json.Marshal(msg)\n\tif err2 != nil {\n\t\tpanic(err2)\n\t}\n\n\t\/\/jsonStr := []byte(`{\"message\":\"` + r.FormValue(\"echoContent\") + `\"}`)\n\tjsonStr := []byte(jsonMessage)\n\tbuf := bytes.NewBuffer(jsonStr)\n\tclient := http.Client{Transport: &urlfetch.Transport{Context: c}}\n\tresp, err := client.Post(\"http:\/\/goappnode\"+r.FormValue(\"nodeId\")+\".appspot.com\/status\", \"application\/octet-stream\", buf)\n\tif err != nil {\n\t\tstatusContent = err.Error()\n\t\tfmt.Println(err)\n\t}\n\trespBody, _ := ioutil.ReadAll(resp.Body)\n\tstatusContent = \"Response from node - \" + string(respBody)\n}\n\nfunc showInfo(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprintln(w, \"Information page for test project.\")\n\tfmt.Fprintln(w, \"Language - Go;\")\n\tfmt.Fprintln(w, \"Platform - Google Application Engine;\")\n}\n\nfunc init() {\n\t\/\/view pages\n\thttp.HandleFunc(\"\/\", startPage)\n\thttp.HandleFunc(\"\/helloworld\", helloWorld)\n\thttp.HandleFunc(\"\/showinfo\", showInfo)\n\t\/\/service pages\n\thttp.HandleFunc(\"\/echo\", testEcho)\n\thttp.HandleFunc(\"\/status\", statusServer)\n\thttp.HandleFunc(\"\/isalive\", isAliveServer)\n\thttp.HandleFunc(\"\/startcheck\", checkAliveStart)\n\n\t\/\/Wrong code for App Enine - server cant understand what it need to show\n\t\/\/http.ListenAndServe(\":80\", nil)\n}\n\n\/\/this func not needed for deploy on Google App Engine, init() func replace main()\n\/*\nfunc main() {\n\t\/\/fmt.Println(\"Hello, test server started on 8080 port.\\n - \/helloworld - show title page\\n - \/showinfo - show information about this thing\")\n\t\/\/http.ListenAndServe(\":8080\", nil)\n\tgo sender()\n}*\/\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"runtime\/debug\"\n\t\"strings\"\n\n\t\"github.com\/heroku\/heroku-cli\/Godeps\/_workspace\/src\/github.com\/lunixbochs\/vtclean\"\n\t\"github.com\/heroku\/heroku-cli\/Godeps\/_workspace\/src\/golang.org\/x\/crypto\/ssh\/terminal\"\n)\n\n\/\/ Stdout is used to mock stdout for testing\nvar Stdout io.Writer\n\n\/\/ Stderr is to mock stderr for testing\nvar Stderr io.Writer\n\n\/\/ ErrLogPath is the location of the error log\nvar ErrLogPath = filepath.Join(AppDir(), \"error.log\")\nvar errLogger = newLogger(ErrLogPath)\nvar exitFn = os.Exit\nvar debugging = isDebugging()\nvar debuggingHeaders = isDebuggingHeaders()\n\nfunc init() {\n\tStdout = os.Stdout\n\tStderr = os.Stderr\n}\n\nfunc newLogger(path string) *log.Logger {\n\terr := os.MkdirAll(filepath.Dir(path), 0777)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfile, err := os.OpenFile(path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn log.New(file, \"\", log.LstdFlags)\n}\n\n\/\/ Exit just calls os.Exit, but can be mocked out for testing\nfunc Exit(code int) {\n\texitFn(code)\n}\n\n\/\/ Err just calls `fmt.Fprint(Stderr, a...)` but can be mocked out for testing.\nfunc Err(a ...interface{}) {\n\tfmt.Fprint(Stderr, a...)\n\tLog(a...)\n}\n\n\/\/ Errf just calls `fmt.Fprintf(Stderr, a...)` but can be mocked out for testing.\nfunc Errf(format string, a ...interface{}) {\n\tfmt.Fprintf(Stderr, format, a...)\n\tLogf(format, a...)\n}\n\n\/\/ Errln just calls `fmt.Fprintln(Stderr, a...)` but can be mocked out for testing.\nfunc Errln(a ...interface{}) {\n\tfmt.Fprintln(Stderr, a...)\n\tLogln(a...)\n}\n\n\/\/ Print is used to replace `fmt.Print()` but can be mocked out for testing.\nfunc Print(a ...interface{}) {\n\tfmt.Fprint(Stdout, a...)\n}\n\n\/\/ Printf is used to replace `fmt.Printf()` but can be mocked out for testing.\nfunc Printf(format string, a ...interface{}) {\n\tfmt.Fprintf(Stdout, format, a...)\n}\n\n\/\/ Println is used to replace `fmt.Println()` but can be mocked out for testing.\nfunc Println(a ...interface{}) {\n\tfmt.Fprintln(Stdout, a...)\n}\n\n\/\/ Log is used to print debugging information\n\/\/ It will be added to the logfile in ~\/.heroku or printed out if HEROKU_DEBUG is set.\nfunc Log(a ...interface{}) {\n\terrLogger.Print(vtclean.Clean(fmt.Sprint(a...), false))\n}\n\n\/\/ Logln is used to print debugging information\n\/\/ It will be added to the logfile in ~\/.heroku\nfunc Logln(a ...interface{}) {\n\tLog(fmt.Sprintln(a...))\n}\n\n\/\/ Logf is used to print debugging information\n\/\/ It will be added to the logfile in ~\/.heroku\nfunc Logf(format string, a ...interface{}) {\n\tLog(fmt.Sprintf(format, a...))\n}\n\n\/\/ Debugln is used to print debugging information\n\/\/ It will be added to the logfile in ~\/.heroku and stderr if HEROKU_DEBUG is set.\nfunc Debugln(a ...interface{}) {\n\tLogln(a...)\n\tif debugging {\n\t\tfmt.Fprintln(Stderr, a...)\n\t}\n}\n\n\/\/ PrintError is a helper that prints out formatted error messages in red text\nfunc PrintError(e error) {\n\tif e == nil {\n\t\treturn\n\t}\n\tError(\"\\n\" + e.Error())\n\tif debugging {\n\t\tdebug.PrintStack()\n\t}\n}\n\n\/\/ Warn shows a message with excalamation points prepended to stderr\nfunc Warn(msg string) {\n\tbang := yellow(\" ▸ \")\n\tmsg = strings.TrimSpace(msg)\n\tmsg = strings.Join(strings.Split(msg, \"\\n\"), \"\\n\"+bang)\n\tErrln(bang + msg)\n}\n\n\/\/ Error shows a message with excalamation points prepended to stderr\nfunc Error(msg string) {\n\tbang := red(\" \" + errorArrow() + \" \")\n\tmsg = strings.TrimSpace(msg)\n\tmsg = strings.Join(strings.Split(msg, \"\\n\"), \"\\n\"+bang)\n\tErrln(bang + msg)\n}\n\nfunc errorArrow() string {\n\tif windows() {\n\t\treturn \"!\"\n\t}\n\treturn \"▸\"\n}\n\n\/\/ ExitIfError calls PrintError and exits if e is not null\nfunc ExitIfError(e error) {\n\tif e != nil {\n\t\tPrintError(e)\n\t\tos.Exit(1)\n\t}\n}\n\n\/\/ LogIfError logs out an error if one arises\nfunc LogIfError(e error) {\n\tif e != nil {\n\t\tLogln(e.Error())\n\t}\n}\n\nfunc isDebugging() bool {\n\tdebug := strings.ToUpper(os.Getenv(\"HEROKU_DEBUG\"))\n\tif debug == \"TRUE\" || debug == \"1\" {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc isDebuggingHeaders() bool {\n\tdebug := strings.ToUpper(os.Getenv(\"HEROKU_DEBUG_HEADERS\"))\n\tif debug == \"TRUE\" || debug == \"1\" {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc yellow(s string) string {\n\tif supportsColor() && !windows() {\n\t\treturn \"\\x1b[33m\" + s + \"\\x1b[39m\"\n\t}\n\treturn s\n}\n\nfunc red(s string) string {\n\tif supportsColor() && !windows() {\n\t\treturn \"\\x1b[31m\" + s + \"\\x1b[39m\"\n\t}\n\treturn s\n}\n\nfunc cyan(s string) string {\n\tif supportsColor() && !windows() {\n\t\treturn \"\\x1b[36m\" + s + \"\\x1b[39m\"\n\t}\n\treturn s\n}\n\nfunc windows() bool {\n\treturn runtime.GOOS == \"windows\"\n}\n\nfunc istty() bool {\n\treturn terminal.IsTerminal(int(os.Stdout.Fd()))\n}\n\nfunc supportsColor() bool {\n\tif !istty() {\n\t\treturn false\n\t}\n\tfor _, arg := range os.Args {\n\t\tif arg == \"--no-color\" {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn os.Getenv(\"COLOR\") != \"false\"\n}\n\nfunc plural(word string, count int) string {\n\tif count == 1 {\n\t\treturn word\n\t}\n\treturn word + \"s\"\n}\n<commit_msg>fix newline before errors<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"runtime\/debug\"\n\t\"strings\"\n\n\t\"github.com\/heroku\/heroku-cli\/Godeps\/_workspace\/src\/github.com\/lunixbochs\/vtclean\"\n\t\"github.com\/heroku\/heroku-cli\/Godeps\/_workspace\/src\/golang.org\/x\/crypto\/ssh\/terminal\"\n)\n\n\/\/ Stdout is used to mock stdout for testing\nvar Stdout io.Writer\n\n\/\/ Stderr is to mock stderr for testing\nvar Stderr io.Writer\n\n\/\/ ErrLogPath is the location of the error log\nvar ErrLogPath = filepath.Join(AppDir(), \"error.log\")\nvar errLogger = newLogger(ErrLogPath)\nvar exitFn = os.Exit\nvar debugging = isDebugging()\nvar debuggingHeaders = isDebuggingHeaders()\n\nfunc init() {\n\tStdout = os.Stdout\n\tStderr = os.Stderr\n}\n\nfunc newLogger(path string) *log.Logger {\n\terr := os.MkdirAll(filepath.Dir(path), 0777)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfile, err := os.OpenFile(path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0644)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn log.New(file, \"\", log.LstdFlags)\n}\n\n\/\/ Exit just calls os.Exit, but can be mocked out for testing\nfunc Exit(code int) {\n\texitFn(code)\n}\n\n\/\/ Err just calls `fmt.Fprint(Stderr, a...)` but can be mocked out for testing.\nfunc Err(a ...interface{}) {\n\tfmt.Fprint(Stderr, a...)\n\tLog(a...)\n}\n\n\/\/ Errf just calls `fmt.Fprintf(Stderr, a...)` but can be mocked out for testing.\nfunc Errf(format string, a ...interface{}) {\n\tfmt.Fprintf(Stderr, format, a...)\n\tLogf(format, a...)\n}\n\n\/\/ Errln just calls `fmt.Fprintln(Stderr, a...)` but can be mocked out for testing.\nfunc Errln(a ...interface{}) {\n\tfmt.Fprintln(Stderr, a...)\n\tLogln(a...)\n}\n\n\/\/ Print is used to replace `fmt.Print()` but can be mocked out for testing.\nfunc Print(a ...interface{}) {\n\tfmt.Fprint(Stdout, a...)\n}\n\n\/\/ Printf is used to replace `fmt.Printf()` but can be mocked out for testing.\nfunc Printf(format string, a ...interface{}) {\n\tfmt.Fprintf(Stdout, format, a...)\n}\n\n\/\/ Println is used to replace `fmt.Println()` but can be mocked out for testing.\nfunc Println(a ...interface{}) {\n\tfmt.Fprintln(Stdout, a...)\n}\n\n\/\/ Log is used to print debugging information\n\/\/ It will be added to the logfile in ~\/.heroku or printed out if HEROKU_DEBUG is set.\nfunc Log(a ...interface{}) {\n\terrLogger.Print(vtclean.Clean(fmt.Sprint(a...), false))\n}\n\n\/\/ Logln is used to print debugging information\n\/\/ It will be added to the logfile in ~\/.heroku\nfunc Logln(a ...interface{}) {\n\tLog(fmt.Sprintln(a...))\n}\n\n\/\/ Logf is used to print debugging information\n\/\/ It will be added to the logfile in ~\/.heroku\nfunc Logf(format string, a ...interface{}) {\n\tLog(fmt.Sprintf(format, a...))\n}\n\n\/\/ Debugln is used to print debugging information\n\/\/ It will be added to the logfile in ~\/.heroku and stderr if HEROKU_DEBUG is set.\nfunc Debugln(a ...interface{}) {\n\tLogln(a...)\n\tif debugging {\n\t\tfmt.Fprintln(Stderr, a...)\n\t}\n}\n\n\/\/ PrintError is a helper that prints out formatted error messages in red text\nfunc PrintError(e error) {\n\tif e == nil {\n\t\treturn\n\t}\n\tErrln()\n\tError(e.Error())\n\tif debugging {\n\t\tdebug.PrintStack()\n\t}\n}\n\n\/\/ Warn shows a message with excalamation points prepended to stderr\nfunc Warn(msg string) {\n\tbang := yellow(\" ▸ \")\n\tmsg = strings.TrimSpace(msg)\n\tmsg = strings.Join(strings.Split(msg, \"\\n\"), \"\\n\"+bang)\n\tErrln(bang + msg)\n}\n\n\/\/ Error shows a message with excalamation points prepended to stderr\nfunc Error(msg string) {\n\tbang := red(\" \" + errorArrow() + \" \")\n\tmsg = strings.TrimSpace(msg)\n\tmsg = strings.Join(strings.Split(msg, \"\\n\"), \"\\n\"+bang)\n\tErrln(bang + msg)\n}\n\nfunc errorArrow() string {\n\tif windows() {\n\t\treturn \"!\"\n\t}\n\treturn \"▸\"\n}\n\n\/\/ ExitIfError calls PrintError and exits if e is not null\nfunc ExitIfError(e error) {\n\tif e != nil {\n\t\tPrintError(e)\n\t\tos.Exit(1)\n\t}\n}\n\n\/\/ LogIfError logs out an error if one arises\nfunc LogIfError(e error) {\n\tif e != nil {\n\t\tLogln(e.Error())\n\t}\n}\n\nfunc isDebugging() bool {\n\tdebug := strings.ToUpper(os.Getenv(\"HEROKU_DEBUG\"))\n\tif debug == \"TRUE\" || debug == \"1\" {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc isDebuggingHeaders() bool {\n\tdebug := strings.ToUpper(os.Getenv(\"HEROKU_DEBUG_HEADERS\"))\n\tif debug == \"TRUE\" || debug == \"1\" {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc yellow(s string) string {\n\tif supportsColor() && !windows() {\n\t\treturn \"\\x1b[33m\" + s + \"\\x1b[39m\"\n\t}\n\treturn s\n}\n\nfunc red(s string) string {\n\tif supportsColor() && !windows() {\n\t\treturn \"\\x1b[31m\" + s + \"\\x1b[39m\"\n\t}\n\treturn s\n}\n\nfunc cyan(s string) string {\n\tif supportsColor() && !windows() {\n\t\treturn \"\\x1b[36m\" + s + \"\\x1b[39m\"\n\t}\n\treturn s\n}\n\nfunc windows() bool {\n\treturn runtime.GOOS == \"windows\"\n}\n\nfunc istty() bool {\n\treturn terminal.IsTerminal(int(os.Stdout.Fd()))\n}\n\nfunc supportsColor() bool {\n\tif !istty() {\n\t\treturn false\n\t}\n\tfor _, arg := range os.Args {\n\t\tif arg == \"--no-color\" {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn os.Getenv(\"COLOR\") != \"false\"\n}\n\nfunc plural(word string, count int) string {\n\tif count == 1 {\n\t\treturn word\n\t}\n\treturn word + \"s\"\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/Toorop\/govh\"\n\t\"github.com\/Toorop\/govh\/ip\"\n\t\/\/\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc ipHandler(cmd *Cmd) (resp string, err error) {\n\t\/\/ New govh client\n\tclient := govh.NewClient(OVH_APP_KEY, OVH_APP_SECRET, ck)\n\t\/\/ New ip ressource\n\tipr, err := ip.New(client)\n\n\tswitch cmd.Action {\n\t\/\/ List\n\tcase \"list\":\n\t\tipType := \"all\"\n\t\tif len(cmd.Args) > 2 {\n\t\t\tipType = cmd.Args[2]\n\t\t}\n\t\tips, err := ipr.List(ipType)\n\t\tif err != nil {\n\t\t\tdieError(err)\n\t\t}\n\t\tfor _, i := range ips {\n\t\t\tresp = fmt.Sprintf(\"%s%s\\r\\n\", resp, i.IP)\n\t\t}\n\t\tif len(resp) > 2 {\n\t\t\tresp = resp[0 : len(resp)-2]\n\t\t}\n\t\tbreak\n\tcase \"lb\":\n\t\tif len(cmd.Args) < 3 {\n\t\t\tdieError(\"\\\"ip lb\\\" needs an argument see doc at https:\/\/github.com\/Toorop\/govh\/blob\/master\/cli\/README.md\")\n\t\t}\n\t\tvar t []byte\n\t\tt, err = ipr.LbList()\n\t\tresp = string(t)\n\t\tbreak\n\n\tcase \"fw\":\n\t\t\/\/ ip fw ipBlock.IP list\n\t\t\/\/ ip fw x.x.x.x\/y\n\t\t\/\/ Return IP V4 list of this block which is under firewall\n\t\tif len(cmd.Args) == 4 && cmd.Args[3] == \"list\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\tips, err := ipr.FwListIpOfBlock(block)\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tfor _, i := range ips {\n\t\t\t\tresp = fmt.Sprintf(\"%s%s\\r\\n\", resp, i)\n\t\t\t}\n\t\t\tif len(resp) > 2 {\n\t\t\t\tresp = resp[0 : len(resp)-2]\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Add IP to firewall\n\t\t\/\/ cmd : ip fw ibBlock.IP ipV4 add\n\t\tif len(cmd.Args) == 5 && cmd.Args[4] == \"add\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\tif err = ipr.FwAddIp(block, cmd.Args[3]); err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tdieOk(fmt.Sprintf(\"%s added to firewall\", cmd.Args[3]))\n\t\t}\n\n\t\t\/\/ Get properties of a firewalled IP\n\t\t\/\/ ip fw ipBlock.IP ipV4 prop\n\t\tif len(cmd.Args) == 5 && cmd.Args[4] == \"prop\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\ti, err := ipr.FwGetIpProperties(block, cmd.Args[3])\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tdieOk(fmt.Sprintf(\"ipOnFirewall: %s%sEnabled: %t%sState: %s\", i.IpOnFirewall, NL, i.Enabled, NL, i.State))\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Enable firewalll for IP ipv4\n\t\t\/\/ ip fw ipVlock ipV4 enable\n\t\tif len(cmd.Args) == 5 && cmd.Args[4] == \"enable\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\terr := ipr.FwSetFirewallEnable(block, cmd.Args[3], true)\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tdieOk(\"ok\")\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Disable firewalll for IP ipv4\n\t\t\/\/ ip fw ipVlock ipV4 disable\n\t\tif len(cmd.Args) == 5 && cmd.Args[4] == \"disable\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\terr := ipr.FwSetFirewallEnable(block, cmd.Args[3], false)\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tdieOk(\"ok\")\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Remove IPv4 from firewall\n\t\t\/\/ cmd : ip fw ipBlock.IP ipV4 remove\n\t\tif len(cmd.Args) == 5 && cmd.Args[4] == \"remove\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\tif err = ipr.FwRemoveIp(block, cmd.Args[3]); err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tdieOk(fmt.Sprintf(\"%s removed from firewall\", cmd.Args[3]))\n\t\t}\n\n\t\t\/\/ Get rules sequences\n\t\t\/\/ ip fw ipBlock.IP ipV4 listRules all\n\t\tif len(cmd.Args) >= 5 && cmd.Args[4] == \"listRules\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\tstate := \"\"\n\t\t\tif len(cmd.Args) == 6 {\n\t\t\t\tstate = cmd.Args[6]\n\t\t\t}\n\t\t\tt, err := ipr.FwGetRulesSequences(block, cmd.Args[3], state)\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tvar r string\n\t\t\tif len(t) > 0 {\n\t\t\t\tr = fmt.Sprintf(\"%d\", t[0])\n\t\t\t\tfor _, s := range t[1:] {\n\t\t\t\t\tr = fmt.Sprintf(\"%s%s%d\", r, NL, s)\n\t\t\t\t}\n\t\t\t}\n\t\t\tdieOk(r)\n\t\t}\n\n\t\t\/\/ Add rule\n\t\t\/\/ ip fw ipBlock.IP ipV4 addRule rule (as Json)\n\t\tif len(cmd.Args) == 6 && cmd.Args[4] == \"addRule\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\t\/\/ Check json\n\t\t\tvar rule ip.FirewallRule2Add\n\t\t\terr := json.Unmarshal([]byte(cmd.Args[5]), &rule)\n\t\t\tif err != nil {\n\t\t\t\tdieError(\"Rule error. See doc at : https:\/\/github.com\/Toorop\/ovh-cli\", err)\n\t\t\t}\n\t\t\terr = ipr.FwAddRule(block, cmd.Args[3], rule)\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tdieOk(\"OK\")\n\t\t}\n\n\t\t\/\/ Remove rule\n\t\t\/\/ ip fw ipBlock.IP ipV4 remRule ruleSequence\n\t\tif len(cmd.Args) == 6 && cmd.Args[4] == \"remRule\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\tsequence, err := strconv.Atoi(cmd.Args[5])\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\terr = ipr.FwRemoveRule(block, cmd.Args[3], sequence)\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tdieOk(fmt.Sprintf(\"Rule %d removed\", sequence))\n\n\t\t}\n\n\t\t\/\/ Get rule\n\t\t\/\/ ip fw ipBlock.IP ipV4 getRule sequence\n\t\tif len(cmd.Args) == 6 && cmd.Args[4] == \"getRule\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\tsequence, err := strconv.Atoi(cmd.Args[5])\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\trule, err := ipr.FwGetRule(block, cmd.Args[3], sequence)\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tout := \"\"\n\t\t\tif len(rule.Protocol) > 0 {\n\t\t\t\tout = fmt.Sprintf(\"%sProtocol: %s%s\", out, rule.Protocol, NL)\n\t\t\t}\n\t\t\tif len(rule.Source) > 0 {\n\t\t\t\tout = fmt.Sprintf(\"%sSource: %s%s\", out, rule.Source, NL)\n\t\t\t}\n\t\t\tif len(rule.DestinationPort) > 0 {\n\t\t\t\tout = fmt.Sprintf(\"%sDestinationPort: %s%s\", out, rule.DestinationPort, NL)\n\t\t\t}\n\n\t\t\tout = fmt.Sprintf(\"%sSequence: %d%s\", out, rule.Sequence, NL)\n\n\t\t\tif len(rule.Options) > 0 {\n\t\t\t\tout = fmt.Sprintf(\"%sOptions: %s%s\", out, strings.Join(rule.Options, \" \"), NL)\n\t\t\t}\n\t\t\tif len(rule.Destination) > 0 {\n\t\t\t\tout = fmt.Sprintf(\"%sDestination: %s%s\", out, rule.Destination, NL)\n\t\t\t}\n\t\t\tif len(rule.Rule) > 0 {\n\t\t\t\tout = fmt.Sprintf(\"%sRule: %s%s\", out, rule.Rule, NL)\n\t\t\t}\n\t\t\tif len(rule.SourcePort) > 0 {\n\t\t\t\tout = fmt.Sprintf(\"%sSourcePort: %s%s\", out, rule.SourcePort, NL)\n\t\t\t}\n\t\t\tif len(rule.State) > 0 {\n\t\t\t\tout = fmt.Sprintf(\"%sState: %s%s\", out, rule.State, NL)\n\t\t\t}\n\t\t\tif len(rule.CreationDate) > 0 {\n\t\t\t\tout = fmt.Sprintf(\"%sCreationDate: %s%s\", out, rule.CreationDate, NL)\n\t\t\t}\n\t\t\tif len(rule.Action) > 0 {\n\t\t\t\tout = fmt.Sprintf(\"%sAction: %s%s\", out, rule.Action, NL)\n\t\t\t}\n\t\t\tdieOk(out[0 : len(out)-2])\n\n\t\t}\n\t\terr = errors.New(fmt.Sprintf(\"This action : '%s' is not valid or not implemented yet !\", strings.Join(cmd.Args, \" \")))\n\t\tbreak\n\n\tcase \"spam\":\n\t\t\/\/ List of spamming IP\n\t\t\/\/ ip spam ipBlock.IP listSpammingIp STATE\n\t\tif len(cmd.Args) >= 4 && cmd.Args[3] == \"listSpammingIp\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\tstate := \"\"\n\t\t\tif len(cmd.Args) == 5 {\n\t\t\t\tstate = cmd.Args[4]\n\t\t\t}\n\t\t\tips, err := ipr.SpamGetSpammingIps(block, state)\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tfor _, ip := range ips {\n\t\t\t\tfmt.Println(ip)\n\t\t\t}\n\t\t\tdieOk(\"\")\n\t\t}\n\n\t\t\/\/ detailed info about a spamming IP\n\t\t\/\/ ip spam ipBlock.IP ipv4 details\n\t\tif len(cmd.Args) == 5 && cmd.Args[4] == \"details\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\tspamIp, err := ipr.SpamGetSpamIp(block, cmd.Args[3])\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tdieOk(fmt.Sprintf(\"Time: %d%sDate: %s%sIpSpamming: %s%sState: %s\", spamIp.Time, NL, spamIp.Date, NL, spamIp.IpSpamming, NL, spamIp.State))\n\t\t}\n\n\t\t\/\/ Stats about a spamming IP\n\t\t\/\/ ip spam ipBlock.IP ipv4 stats FROM TO\n\t\tif len(cmd.Args) == 7 && cmd.Args[4] == \"stats\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\n\t\t\tfrom, err := strconv.ParseInt(cmd.Args[5], 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\n\t\t\tto, err := strconv.ParseInt(cmd.Args[6], 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tspamStats, err := ipr.SpamGetIpStats(block, cmd.Args[3], time.Unix(from, 0), time.Unix(to, 0))\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tif spamStats == nil {\n\t\t\t\tdieOk(\"No spam stats for this period\")\n\t\t\t}\n\t\t\tfmt.Printf(\"Blocked for the last time: %s%s\", time.Unix(spamStats.Timestamp, 0).Format(time.RFC822Z), NL)\n\t\t\tfmt.Printf(\"Number of emails sent: %d%s\", spamStats.Total, NL)\n\t\t\tfmt.Printf(\"Number of spams sent: %d%s\", spamStats.NumberOfSpams, NL)\n\t\t\tfmt.Printf(\"Average score: %d%s%s\", spamStats.AverageSpamScore, NL, NL)\n\t\t\tif len(spamStats.DetectedSpams) > 0 {\n\t\t\t\tfmt.Println(\"Detected Spams : \", NL)\n\t\t\t}\n\t\t\tfor _, ds := range spamStats.DetectedSpams {\n\t\t\t\tfmt.Println(\"\")\n\t\t\t\tfmt.Printf(\"\\tDate: %s%s\", time.Unix(ds.Date, 0).Format(time.RFC822Z), NL)\n\t\t\t\tfmt.Printf(\"\\tMessage ID: %s%s\", ds.MessageId, NL)\n\t\t\t\tfmt.Printf(\"\\tDestination IP: %s%s\", ds.DestinationIp, NL)\n\t\t\t\tfmt.Printf(\"\\tScore: %d%s\", ds.Spamscore, NL)\n\t\t\t}\n\t\t\tdieOk(\"\")\n\t\t}\n\n\t\t\/\/ Unblock\n\t\t\/\/ ip spam ipBlock.IP ipv4 unblock\n\t\tif len(cmd.Args) == 5 && cmd.Args[4] == \"unblock\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\terr := ipr.SpamUnblockSpamIp(block, cmd.Args[3])\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tdieOk(\"ok\")\n\t\t}\n\n\t\terr = errors.New(fmt.Sprintf(\"This action : '%s' is not valid or not implemented yet !\", strings.Join(cmd.Args, \" \")))\n\t\tbreak\n\tcase \"getBlockedForSpam\":\n\t\t\/\/ On va chercher les blocks\n\t\tips, err := ipr.GetBlockedForSpam()\n\t\tif err != nil {\n\t\t\tdieError(err)\n\t\t}\n\t\tif len(ips) == 0 {\n\t\t\tdieOk(\"\")\n\t\t}\n\t\tfor _, i := range ips {\n\t\t\tfmt.Println(i)\n\t\t}\n\t\tdieOk(\"\")\n\n\t\t\/\/ On les tests\n\t\tbreak\n\n\tdefault:\n\t\terr = errors.New(fmt.Sprintf(\"This action : '%s' is not valid or not implemented yet !\", strings.Join(cmd.Args, \" \")))\n\t}\n\treturn\n\n}\n<commit_msg>refactor return<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/Toorop\/govh\"\n\t\"github.com\/Toorop\/govh\/ip\"\n\t\/\/\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc ipHandler(cmd *Cmd) (err error) {\n\t\/\/ New govh client\n\tclient := govh.NewClient(OVH_APP_KEY, OVH_APP_SECRET, ck)\n\t\/\/ New ip ressource\n\tipr, err := ip.New(client)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar resp string\n\n\tswitch cmd.Action {\n\t\/\/ List\n\tcase \"list\":\n\t\tipType := \"all\"\n\t\tif len(cmd.Args) > 2 {\n\t\t\tipType = cmd.Args[2]\n\t\t}\n\t\tips, err := ipr.List(ipType)\n\t\tif err != nil {\n\t\t\tdieError(err)\n\t\t}\n\t\tfor _, i := range ips {\n\t\t\tresp = fmt.Sprintf(\"%s%s\\r\\n\", resp, i.IP)\n\t\t}\n\t\tif len(resp) > 2 {\n\t\t\tresp = resp[0 : len(resp)-2]\n\t\t}\n\t\tdieOk(resp)\n\t\tbreak\n\tcase \"lb\":\n\t\tif len(cmd.Args) < 3 {\n\t\t\tdieError(\"\\\"ip lb\\\" needs an argument see doc at https:\/\/github.com\/Toorop\/govh\/blob\/master\/cli\/README.md\")\n\t\t}\n\t\tvar t []byte\n\t\tt, err = ipr.LbList()\n\t\tresp = string(t)\n\t\tdieOk(resp)\n\t\tbreak\n\n\tcase \"fw\":\n\t\t\/\/ ip fw ipBlock.IP list\n\t\t\/\/ ip fw x.x.x.x\/y\n\t\t\/\/ Return IP V4 list of this block which is under firewall\n\t\tif len(cmd.Args) == 4 && cmd.Args[3] == \"list\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\tips, err := ipr.FwListIpOfBlock(block)\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tfor _, i := range ips {\n\t\t\t\tresp = fmt.Sprintf(\"%s%s\\r\\n\", resp, i)\n\t\t\t}\n\t\t\tif len(resp) > 2 {\n\t\t\t\tresp = resp[0 : len(resp)-2]\n\t\t\t}\n\t\t\tdieOk(resp)\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Add IP to firewall\n\t\t\/\/ cmd : ip fw ibBlock.IP ipV4 add\n\t\tif len(cmd.Args) == 5 && cmd.Args[4] == \"add\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\tif err = ipr.FwAddIp(block, cmd.Args[3]); err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tdieOk(fmt.Sprintf(\"%s added to firewall\", cmd.Args[3]))\n\t\t}\n\n\t\t\/\/ Get properties of a firewalled IP\n\t\t\/\/ ip fw ipBlock.IP ipV4 prop\n\t\tif len(cmd.Args) == 5 && cmd.Args[4] == \"prop\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\ti, err := ipr.FwGetIpProperties(block, cmd.Args[3])\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tdieOk(fmt.Sprintf(\"ipOnFirewall: %s%sEnabled: %t%sState: %s\", i.IpOnFirewall, NL, i.Enabled, NL, i.State))\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Enable firewalll for IP ipv4\n\t\t\/\/ ip fw ipVlock ipV4 enable\n\t\tif len(cmd.Args) == 5 && cmd.Args[4] == \"enable\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\terr := ipr.FwSetFirewallEnable(block, cmd.Args[3], true)\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tdieOk(\"ok\")\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Disable firewalll for IP ipv4\n\t\t\/\/ ip fw ipVlock ipV4 disable\n\t\tif len(cmd.Args) == 5 && cmd.Args[4] == \"disable\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\terr := ipr.FwSetFirewallEnable(block, cmd.Args[3], false)\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tdieOk(\"ok\")\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Remove IPv4 from firewall\n\t\t\/\/ cmd : ip fw ipBlock.IP ipV4 remove\n\t\tif len(cmd.Args) == 5 && cmd.Args[4] == \"remove\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\tif err = ipr.FwRemoveIp(block, cmd.Args[3]); err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tdieOk(fmt.Sprintf(\"%s removed from firewall\", cmd.Args[3]))\n\t\t}\n\n\t\t\/\/ Get rules sequences\n\t\t\/\/ ip fw ipBlock.IP ipV4 listRules all\n\t\tif len(cmd.Args) >= 5 && cmd.Args[4] == \"listRules\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\tstate := \"\"\n\t\t\tif len(cmd.Args) == 6 {\n\t\t\t\tstate = cmd.Args[6]\n\t\t\t}\n\t\t\tt, err := ipr.FwGetRulesSequences(block, cmd.Args[3], state)\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tvar r string\n\t\t\tif len(t) > 0 {\n\t\t\t\tr = fmt.Sprintf(\"%d\", t[0])\n\t\t\t\tfor _, s := range t[1:] {\n\t\t\t\t\tr = fmt.Sprintf(\"%s%s%d\", r, NL, s)\n\t\t\t\t}\n\t\t\t}\n\t\t\tdieOk(r)\n\t\t}\n\n\t\t\/\/ Add rule\n\t\t\/\/ ip fw ipBlock.IP ipV4 addRule rule (as Json)\n\t\tif len(cmd.Args) == 6 && cmd.Args[4] == \"addRule\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\t\/\/ Check json\n\t\t\tvar rule ip.FirewallRule2Add\n\t\t\terr := json.Unmarshal([]byte(cmd.Args[5]), &rule)\n\t\t\tif err != nil {\n\t\t\t\tdieError(\"Rule error. See doc at : https:\/\/github.com\/Toorop\/ovh-cli\", err)\n\t\t\t}\n\t\t\terr = ipr.FwAddRule(block, cmd.Args[3], rule)\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tdieOk(\"OK\")\n\t\t}\n\n\t\t\/\/ Remove rule\n\t\t\/\/ ip fw ipBlock.IP ipV4 remRule ruleSequence\n\t\tif len(cmd.Args) == 6 && cmd.Args[4] == \"remRule\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\tsequence, err := strconv.Atoi(cmd.Args[5])\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\terr = ipr.FwRemoveRule(block, cmd.Args[3], sequence)\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tdieOk(fmt.Sprintf(\"Rule %d removed\", sequence))\n\n\t\t}\n\n\t\t\/\/ Get rule\n\t\t\/\/ ip fw ipBlock.IP ipV4 getRule sequence\n\t\tif len(cmd.Args) == 6 && cmd.Args[4] == \"getRule\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\tsequence, err := strconv.Atoi(cmd.Args[5])\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\trule, err := ipr.FwGetRule(block, cmd.Args[3], sequence)\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tout := \"\"\n\t\t\tif len(rule.Protocol) > 0 {\n\t\t\t\tout = fmt.Sprintf(\"%sProtocol: %s%s\", out, rule.Protocol, NL)\n\t\t\t}\n\t\t\tif len(rule.Source) > 0 {\n\t\t\t\tout = fmt.Sprintf(\"%sSource: %s%s\", out, rule.Source, NL)\n\t\t\t}\n\t\t\tif len(rule.DestinationPort) > 0 {\n\t\t\t\tout = fmt.Sprintf(\"%sDestinationPort: %s%s\", out, rule.DestinationPort, NL)\n\t\t\t}\n\n\t\t\tout = fmt.Sprintf(\"%sSequence: %d%s\", out, rule.Sequence, NL)\n\n\t\t\tif len(rule.Options) > 0 {\n\t\t\t\tout = fmt.Sprintf(\"%sOptions: %s%s\", out, strings.Join(rule.Options, \" \"), NL)\n\t\t\t}\n\t\t\tif len(rule.Destination) > 0 {\n\t\t\t\tout = fmt.Sprintf(\"%sDestination: %s%s\", out, rule.Destination, NL)\n\t\t\t}\n\t\t\tif len(rule.Rule) > 0 {\n\t\t\t\tout = fmt.Sprintf(\"%sRule: %s%s\", out, rule.Rule, NL)\n\t\t\t}\n\t\t\tif len(rule.SourcePort) > 0 {\n\t\t\t\tout = fmt.Sprintf(\"%sSourcePort: %s%s\", out, rule.SourcePort, NL)\n\t\t\t}\n\t\t\tif len(rule.State) > 0 {\n\t\t\t\tout = fmt.Sprintf(\"%sState: %s%s\", out, rule.State, NL)\n\t\t\t}\n\t\t\tif len(rule.CreationDate) > 0 {\n\t\t\t\tout = fmt.Sprintf(\"%sCreationDate: %s%s\", out, rule.CreationDate, NL)\n\t\t\t}\n\t\t\tif len(rule.Action) > 0 {\n\t\t\t\tout = fmt.Sprintf(\"%sAction: %s%s\", out, rule.Action, NL)\n\t\t\t}\n\t\t\tdieOk(out[0 : len(out)-2])\n\n\t\t}\n\t\terr = errors.New(fmt.Sprintf(\"This action : '%s' is not valid or not implemented yet !\", strings.Join(cmd.Args, \" \")))\n\t\tbreak\n\n\tcase \"spam\":\n\t\t\/\/ List of spamming IP\n\t\t\/\/ ip spam ipBlock.IP listSpammingIp STATE\n\t\tif len(cmd.Args) >= 4 && cmd.Args[3] == \"listSpammingIp\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\tstate := \"\"\n\t\t\tif len(cmd.Args) == 5 {\n\t\t\t\tstate = cmd.Args[4]\n\t\t\t}\n\t\t\tips, err := ipr.SpamGetSpammingIps(block, state)\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tfor _, ip := range ips {\n\t\t\t\tfmt.Println(ip)\n\t\t\t}\n\t\t\tdieOk(\"\")\n\t\t}\n\n\t\t\/\/ detailed info about a spamming IP\n\t\t\/\/ ip spam ipBlock.IP ipv4 details\n\t\tif len(cmd.Args) == 5 && cmd.Args[4] == \"details\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\tspamIp, err := ipr.SpamGetSpamIp(block, cmd.Args[3])\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tdieOk(fmt.Sprintf(\"Time: %d%sDate: %s%sIpSpamming: %s%sState: %s\", spamIp.Time, NL, spamIp.Date, NL, spamIp.IpSpamming, NL, spamIp.State))\n\t\t}\n\n\t\t\/\/ Stats about a spamming IP\n\t\t\/\/ ip spam ipBlock.IP ipv4 stats FROM TO\n\t\tif len(cmd.Args) == 7 && cmd.Args[4] == \"stats\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\n\t\t\tfrom, err := strconv.ParseInt(cmd.Args[5], 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\n\t\t\tto, err := strconv.ParseInt(cmd.Args[6], 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tspamStats, err := ipr.SpamGetIpStats(block, cmd.Args[3], time.Unix(from, 0), time.Unix(to, 0))\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tif spamStats == nil {\n\t\t\t\tdieOk(\"No spam stats for this period\")\n\t\t\t}\n\t\t\tfmt.Printf(\"Blocked for the last time: %s%s\", time.Unix(spamStats.Timestamp, 0).Format(time.RFC822Z), NL)\n\t\t\tfmt.Printf(\"Number of emails sent: %d%s\", spamStats.Total, NL)\n\t\t\tfmt.Printf(\"Number of spams sent: %d%s\", spamStats.NumberOfSpams, NL)\n\t\t\tfmt.Printf(\"Average score: %d%s%s\", spamStats.AverageSpamScore, NL, NL)\n\t\t\tif len(spamStats.DetectedSpams) > 0 {\n\t\t\t\tfmt.Println(\"Detected Spams : \", NL)\n\t\t\t}\n\t\t\tfor _, ds := range spamStats.DetectedSpams {\n\t\t\t\tfmt.Println(\"\")\n\t\t\t\tfmt.Printf(\"\\tDate: %s%s\", time.Unix(ds.Date, 0).Format(time.RFC822Z), NL)\n\t\t\t\tfmt.Printf(\"\\tMessage ID: %s%s\", ds.MessageId, NL)\n\t\t\t\tfmt.Printf(\"\\tDestination IP: %s%s\", ds.DestinationIp, NL)\n\t\t\t\tfmt.Printf(\"\\tScore: %d%s\", ds.Spamscore, NL)\n\t\t\t}\n\t\t\tdieOk(\"\")\n\t\t}\n\n\t\t\/\/ Unblock\n\t\t\/\/ ip spam ipBlock.IP ipv4 unblock\n\t\tif len(cmd.Args) == 5 && cmd.Args[4] == \"unblock\" {\n\t\t\tblock := ip.IpBlock{cmd.Args[2], \"\"}\n\t\t\terr := ipr.SpamUnblockSpamIp(block, cmd.Args[3])\n\t\t\tif err != nil {\n\t\t\t\tdieError(err)\n\t\t\t}\n\t\t\tdieOk(\"ok\")\n\t\t}\n\n\t\terr = errors.New(fmt.Sprintf(\"This action : '%s' is not valid or not implemented yet !\", strings.Join(cmd.Args, \" \")))\n\t\tbreak\n\tcase \"getBlockedForSpam\":\n\t\t\/\/ On va chercher les blocks\n\t\tips, err := ipr.GetBlockedForSpam()\n\t\tif err != nil {\n\t\t\tdieError(err)\n\t\t}\n\t\tif len(ips) == 0 {\n\t\t\tdieOk(\"\")\n\t\t}\n\t\tfor _, i := range ips {\n\t\t\tfmt.Println(i)\n\t\t}\n\t\tdieOk(\"\")\n\n\t\t\/\/ On les tests\n\t\tbreak\n\n\tdefault:\n\t\terr = errors.New(fmt.Sprintf(\"This action : '%s' is not valid or not implemented yet !\", strings.Join(cmd.Args, \" \")))\n\t}\n\treturn\n\n}\n<|endoftext|>"} {"text":"<commit_before>package controller\n\nimport (\n\t\"crypto\/md5\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/tidwall\/tile38\/controller\/log\"\n\t\"github.com\/tidwall\/tile38\/core\"\n)\n\nconst backwardsBufferSize = 50000\n\n\/\/ checksum performs a simple md5 checksum on the aof file\nfunc (c *Controller) checksum(pos, size int64) (sum string, err error) {\n\tif pos+size > int64(c.aofsz) {\n\t\treturn \"\", io.EOF\n\t}\n\tvar f *os.File\n\tf, err = os.Open(c.f.Name())\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer f.Close()\n\tsumr := md5.New()\n\terr = func() error {\n\t\tif size == 0 {\n\t\t\tn, err := f.Seek(int64(c.aofsz), 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif pos >= n {\n\t\t\t\treturn io.EOF\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\t_, err = f.Seek(pos, 0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = io.CopyN(sumr, f, size)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}()\n\tif err != nil {\n\t\tif err == io.ErrUnexpectedEOF {\n\t\t\terr = io.EOF\n\t\t}\n\t\treturn \"\", err\n\t}\n\treturn fmt.Sprintf(\"%x\", sumr.Sum(nil)), nil\n}\n\nfunc connAOFMD5(conn *Conn, pos, size int64) (sum string, err error) {\n\tv, err := conn.Do(\"aofmd5\", pos, size)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif v.Error() != nil {\n\t\treturn \"\", v.Error()\n\t}\n\tsum = v.String()\n\tif len(sum) != 32 {\n\t\treturn \"\", errors.New(\"checksum not ok\")\n\t}\n\treturn sum, nil\n}\n\nfunc (c *Controller) matchChecksums(conn *Conn, pos, size int64) (match bool, err error) {\n\tsum, err := c.checksum(pos, size)\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\tcsum, err := connAOFMD5(conn, pos, size)\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\treturn csum == sum, nil\n}\n\n\/\/ followCheckSome is not a full checksum. It just \"checks some\" data.\n\/\/ We will do some various checksums on the leader until we find the correct position to start at.\nfunc (c *Controller) followCheckSome(addr string, followc uint64) (pos int64, err error) {\n\tif core.ShowDebugMessages {\n\t\tlog.Debug(\"follow:\", addr, \":check some\")\n\t}\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tif c.followc != followc {\n\t\treturn 0, errNoLongerFollowing\n\t}\n\tif c.aofsz < checksumsz {\n\t\treturn 0, nil\n\t}\n\n\tconn, err := DialTimeout(addr, time.Second*2)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer conn.Close()\n\n\tmin := int64(0)\n\tmax := int64(c.aofsz) - checksumsz\n\tlimit := int64(c.aofsz)\n\tmatch, err := c.matchChecksums(conn, min, checksumsz)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif match {\n\t\tmin += checksumsz \/\/ bump up the min\n\t\tfor {\n\t\t\tif max < min || max+checksumsz > limit {\n\t\t\t\tpos = min\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tmatch, err = c.matchChecksums(conn, max, checksumsz)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn 0, err\n\t\t\t\t}\n\t\t\t\tif match {\n\t\t\t\t\tmin = max + checksumsz\n\t\t\t\t} else {\n\t\t\t\t\tlimit = max\n\t\t\t\t}\n\t\t\t\tmax = (limit-min)\/2 - checksumsz\/2 + min \/\/ multiply\n\t\t\t}\n\t\t}\n\t}\n\tfullpos := pos\n\tfname := c.f.Name()\n\tif pos == 0 {\n\t\tc.f.Close()\n\t\tc.f, err = os.Create(fname)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"could not recreate aof, possible data loss. %s\", err.Error())\n\t\t\treturn 0, err\n\t\t}\n\t\treturn 0, nil\n\t}\n\n\t\/\/ we want to truncate at a command location\n\t\/\/ search for nearest command\n\tf, err := os.Open(c.f.Name())\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer f.Close()\n\tif _, err := f.Seek(pos, 0); err != nil {\n\t\treturn 0, err\n\t}\n\t\/\/ need to read backwards looking for null byte\n\tconst bufsz = backwardsBufferSize\n\tbuf := make([]byte, bufsz)\nouter:\n\tfor {\n\t\tif pos < int64(len(buf)) {\n\t\t\tpos = 0\n\t\t\tbreak\n\t\t}\n\t\tif _, err := f.Seek(pos-bufsz, 0); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tif _, err := io.ReadFull(f, buf); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tfor i := len(buf) - 1; i >= 0; i-- {\n\t\t\tif buf[i] == 0 {\n\t\t\t\ttpos := pos - bufsz + int64(i) - 4\n\t\t\t\tif tpos < 0 {\n\t\t\t\t\tpos = 0\n\t\t\t\t\tbreak outer \/\/ at beginning of file\n\t\t\t\t}\n\t\t\t\tif _, err := f.Seek(tpos, 0); err != nil {\n\t\t\t\t\treturn 0, err\n\t\t\t\t}\n\t\t\t\tszb := make([]byte, 4)\n\t\t\t\tif _, err := io.ReadFull(f, szb); err != nil {\n\t\t\t\t\treturn 0, err\n\t\t\t\t}\n\t\t\t\tsz2 := int64(binary.LittleEndian.Uint32(szb))\n\t\t\t\ttpos = tpos - sz2 - 4\n\t\t\t\tif tpos < 0 {\n\t\t\t\t\tcontinue \/\/ keep scanning\n\t\t\t\t}\n\t\t\t\tif _, err := f.Seek(tpos, 0); err != nil {\n\t\t\t\t\treturn 0, err\n\t\t\t\t}\n\t\t\t\tif _, err := io.ReadFull(f, szb); err != nil {\n\t\t\t\t\treturn 0, err\n\t\t\t\t}\n\t\t\t\tsz1 := int64(binary.LittleEndian.Uint32(szb))\n\t\t\t\tif sz1 == sz2 {\n\t\t\t\t\tpos = pos - bufsz + int64(i) + 1\n\t\t\t\t\tbreak outer \/\/ we found our match\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tpos -= bufsz\n\t}\n\tif pos == fullpos {\n\t\tif core.ShowDebugMessages {\n\t\t\tlog.Debug(\"follow: aof fully intact\")\n\t\t}\n\t\treturn pos, nil\n\t}\n\tlog.Warnf(\"truncating aof to %d\", pos)\n\t\/\/ any errror below are fatal.\n\tf.Close()\n\tc.f.Close()\n\tif err := os.Truncate(fname, pos); err != nil {\n\t\tlog.Fatalf(\"could not truncate aof, possible data loss. %s\", err.Error())\n\t\treturn 0, err\n\t}\n\tc.f, err = os.OpenFile(fname, os.O_CREATE|os.O_RDWR, 0600)\n\tif err != nil {\n\t\tlog.Fatalf(\"could not create aof, possible data loss. %s\", err.Error())\n\t\treturn 0, err\n\t}\n\t\/\/ reset the entire system.\n\tlog.Infof(\"reloading aof commands\")\n\tc.reset()\n\tif err := c.loadAOF(); err != nil {\n\t\tlog.Fatalf(\"could not reload aof, possible data loss. %s\", err.Error())\n\t\treturn 0, err\n\t}\n\tif int64(c.aofsz) != pos {\n\t\tlog.Fatalf(\"aof size mismatch during reload, possible data loss.\")\n\t\treturn 0, errors.New(\"?\")\n\t}\n\treturn pos, nil\n}\n<commit_msg>read backward multibulk<commit_after>package controller\n\nimport (\n\t\"crypto\/md5\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/tidwall\/resp\"\n\t\"github.com\/tidwall\/tile38\/controller\/log\"\n\t\"github.com\/tidwall\/tile38\/core\"\n)\n\n\/\/ checksum performs a simple md5 checksum on the aof file\nfunc (c *Controller) checksum(pos, size int64) (sum string, err error) {\n\tif pos+size > int64(c.aofsz) {\n\t\treturn \"\", io.EOF\n\t}\n\tvar f *os.File\n\tf, err = os.Open(c.f.Name())\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer f.Close()\n\tsumr := md5.New()\n\terr = func() error {\n\t\tif size == 0 {\n\t\t\tn, err := f.Seek(int64(c.aofsz), 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif pos >= n {\n\t\t\t\treturn io.EOF\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\t_, err = f.Seek(pos, 0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = io.CopyN(sumr, f, size)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}()\n\tif err != nil {\n\t\tif err == io.ErrUnexpectedEOF {\n\t\t\terr = io.EOF\n\t\t}\n\t\treturn \"\", err\n\t}\n\treturn fmt.Sprintf(\"%x\", sumr.Sum(nil)), nil\n}\n\nfunc connAOFMD5(conn *Conn, pos, size int64) (sum string, err error) {\n\tv, err := conn.Do(\"aofmd5\", pos, size)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif v.Error() != nil {\n\t\terrmsg := v.Error().Error()\n\t\tif errmsg == \"ERR EOF\" || errmsg == \"EOF\" {\n\t\t\treturn \"\", io.EOF\n\t\t}\n\t\treturn \"\", v.Error()\n\t}\n\tsum = v.String()\n\tif len(sum) != 32 {\n\t\treturn \"\", errors.New(\"checksum not ok\")\n\t}\n\treturn sum, nil\n}\n\nfunc (c *Controller) matchChecksums(conn *Conn, pos, size int64) (match bool, err error) {\n\tsum, err := c.checksum(pos, size)\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\tcsum, err := connAOFMD5(conn, pos, size)\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\treturn csum == sum, nil\n}\n\n\/\/ getEndOfLastValuePositionInFile is a very slow operation because it reads the file\n\/\/ backwards on byte at a time. Eek. It seek+read, seek+read, etc.\nfunc getEndOfLastValuePositionInFile(fname string, startPos int64) (int64, error) {\n\tpos := startPos\n\tf, err := os.Open(fname)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer f.Close()\n\treadByte := func() (byte, error) {\n\t\tif pos <= 0 {\n\t\t\treturn 0, io.EOF\n\t\t}\n\t\tpos--\n\t\tif _, err := f.Seek(pos, 0); err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tb := make([]byte, 1)\n\t\tif n, err := f.Read(b); err != nil {\n\t\t\treturn 0, err\n\t\t} else if n != 1 {\n\t\t\treturn 0, errors.New(\"invalid read\")\n\t\t}\n\t\treturn b[0], nil\n\t}\n\tfor {\n\t\tc, err := readByte()\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tif c == '*' {\n\t\t\tif _, err := f.Seek(pos, 0); err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\trd := resp.NewReader(f)\n\t\t\t_, telnet, n, err := rd.ReadMultiBulk()\n\t\t\tif err != nil || telnet {\n\t\t\t\tcontinue \/\/ keep reading backwards\n\t\t\t}\n\t\t\treturn pos + int64(n), nil\n\t\t}\n\t}\n}\n\n\/\/ followCheckSome is not a full checksum. It just \"checks some\" data.\n\/\/ We will do some various checksums on the leader until we find the correct position to start at.\nfunc (c *Controller) followCheckSome(addr string, followc uint64) (pos int64, err error) {\n\tif core.ShowDebugMessages {\n\t\tlog.Debug(\"follow:\", addr, \":check some\")\n\t}\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tif c.followc != followc {\n\t\treturn 0, errNoLongerFollowing\n\t}\n\tif c.aofsz < checksumsz {\n\t\treturn 0, nil\n\t}\n\n\tconn, err := DialTimeout(addr, time.Second*2)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer conn.Close()\n\n\tmin := int64(0)\n\tmax := int64(c.aofsz) - checksumsz\n\tlimit := int64(c.aofsz)\n\tmatch, err := c.matchChecksums(conn, min, checksumsz)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif match {\n\t\tmin += checksumsz \/\/ bump up the min\n\t\tfor {\n\t\t\tif max < min || max+checksumsz > limit {\n\t\t\t\tpos = min\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tmatch, err = c.matchChecksums(conn, max, checksumsz)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn 0, err\n\t\t\t\t}\n\t\t\t\tif match {\n\t\t\t\t\tmin = max + checksumsz\n\t\t\t\t} else {\n\t\t\t\t\tlimit = max\n\t\t\t\t}\n\t\t\t\tmax = (limit-min)\/2 - checksumsz\/2 + min \/\/ multiply\n\t\t\t}\n\t\t}\n\t}\n\tfullpos := pos\n\tfname := c.f.Name()\n\tif pos == 0 {\n\t\tc.f.Close()\n\t\tc.f, err = os.Create(fname)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"could not recreate aof, possible data loss. %s\", err.Error())\n\t\t\treturn 0, err\n\t\t}\n\t\treturn 0, nil\n\t}\n\n\t\/\/ we want to truncate at a command location\n\t\/\/ search for nearest command\n\tpos, err = getEndOfLastValuePositionInFile(c.f.Name(), fullpos)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tif pos == fullpos {\n\t\tif core.ShowDebugMessages {\n\t\t\tlog.Debug(\"follow: aof fully intact\")\n\t\t}\n\t\treturn pos, nil\n\t}\n\tlog.Warnf(\"truncating aof to %d\", pos)\n\t\/\/ any errror below are fatal.\n\tc.f.Close()\n\tif err := os.Truncate(fname, pos); err != nil {\n\t\tlog.Fatalf(\"could not truncate aof, possible data loss. %s\", err.Error())\n\t\treturn 0, err\n\t}\n\tc.f, err = os.OpenFile(fname, os.O_CREATE|os.O_RDWR, 0600)\n\tif err != nil {\n\t\tlog.Fatalf(\"could not create aof, possible data loss. %s\", err.Error())\n\t\treturn 0, err\n\t}\n\t\/\/ reset the entire system.\n\tlog.Infof(\"reloading aof commands\")\n\tc.reset()\n\tif err := c.loadAOF(); err != nil {\n\t\tlog.Fatalf(\"could not reload aof, possible data loss. %s\", err.Error())\n\t\treturn 0, err\n\t}\n\tif int64(c.aofsz) != pos {\n\t\tlog.Fatalf(\"aof size mismatch during reload, possible data loss.\")\n\t\treturn 0, errors.New(\"?\")\n\t}\n\treturn pos, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2013 Conformal Systems LLC.\n\/\/ Use of this source code is governed by an ISC\n\/\/ license that can be found in the LICENSE file.\n\npackage yubikey\n\nconst (\n\tnumberOfRounds = 10\n)\n\nvar (\n\trc = []byte{0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36}\n\trijndaelSbox = []byte{\n\t\t0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5,\n\t\t0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,\n\t\t0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0,\n\t\t0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,\n\t\t0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC,\n\t\t0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,\n\t\t0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A,\n\t\t0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,\n\t\t0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0,\n\t\t0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,\n\t\t0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B,\n\t\t0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,\n\t\t0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85,\n\t\t0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,\n\t\t0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5,\n\t\t0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,\n\t\t0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17,\n\t\t0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,\n\t\t0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88,\n\t\t0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,\n\t\t0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C,\n\t\t0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,\n\t\t0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9,\n\t\t0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,\n\t\t0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6,\n\t\t0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,\n\t\t0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E,\n\t\t0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,\n\t\t0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94,\n\t\t0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,\n\t\t0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68,\n\t\t0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16,\n\t}\n\trijndaelInvSbox = []byte{\n\t\t0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38,\n\t\t0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,\n\t\t0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87,\n\t\t0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,\n\t\t0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D,\n\t\t0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,\n\t\t0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2,\n\t\t0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,\n\t\t0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16,\n\t\t0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,\n\t\t0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA,\n\t\t0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,\n\t\t0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A,\n\t\t0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,\n\t\t0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02,\n\t\t0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,\n\t\t0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA,\n\t\t0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,\n\t\t0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85,\n\t\t0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,\n\t\t0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89,\n\t\t0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,\n\t\t0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20,\n\t\t0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,\n\t\t0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31,\n\t\t0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,\n\t\t0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D,\n\t\t0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,\n\t\t0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0,\n\t\t0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,\n\t\t0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26,\n\t\t0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D,\n\t}\n)\n\nfunc AesDecrypt(src []byte, key Key) []byte {\n\tstate := make([]byte, len(src))\n\tcopy(state, src)\n\n\tround_key := key\n\n\tfor i := 0; i < numberOfRounds; i++ {\n\t\tround_key[0] ^= rc[i]\n\n\t\tround_key[0] ^= rijndaelSbox[round_key[13]]\n\t\tround_key[1] ^= rijndaelSbox[round_key[14]]\n\t\tround_key[2] ^= rijndaelSbox[round_key[15]]\n\t\tround_key[3] ^= rijndaelSbox[round_key[12]]\n\n\t\tfor j := 4; j < 16; j++ {\n\t\t\tround_key[j] ^= round_key[j-4]\n\t\t}\n\t}\n\tfor i := 0; i < 0x10; i++ {\n\t\tstate[i] ^= round_key[i]\n\t}\n\tfor i := 1; i <= numberOfRounds; i++ {\n\t\t\/* First row: 0 shift, 0 4 8 12 *\/\n\t\tstate[0] = rijndaelInvSbox[state[0]]\n\t\tstate[4] = rijndaelInvSbox[state[4]]\n\t\tstate[8] = rijndaelInvSbox[state[8]]\n\t\tstate[12] = rijndaelInvSbox[state[12]]\n\n\t\t\/* Second row: -1 shift, 1 5 9 13 *\/\n\t\tj := state[13]\n\t\tstate[13] = rijndaelInvSbox[state[9]]\n\t\tstate[9] = rijndaelInvSbox[state[5]]\n\t\tstate[5] = rijndaelInvSbox[state[1]]\n\t\tstate[1] = rijndaelInvSbox[j]\n\n\t\t\/* Third row: -2 shift, 2 6 10 14 *\/\n\t\tj = state[2]\n\t\tstate[2] = rijndaelInvSbox[state[10]]\n\t\tstate[10] = rijndaelInvSbox[j]\n\t\tj = state[6]\n\t\tstate[6] = rijndaelInvSbox[state[14]]\n\t\tstate[14] = rijndaelInvSbox[j]\n\n\t\t\/* Fourth row: -3 shift, 3 7 11 15 *\/\n\t\tj = state[3]\n\t\tstate[3] = rijndaelInvSbox[state[7]]\n\t\tstate[7] = rijndaelInvSbox[state[11]]\n\t\tstate[11] = rijndaelInvSbox[state[15]]\n\t\tstate[15] = rijndaelInvSbox[j]\n\n\t\tfor j := 15; j > 3; j-- {\n\t\t\tround_key[j] ^= round_key[j-4]\n\t\t}\n\t\tround_key[0] ^=\n\t\t\t(rc[numberOfRounds-i] ^ rijndaelSbox[round_key[13]])\n\n\t\tround_key[1] ^= rijndaelSbox[round_key[14]]\n\t\tround_key[2] ^= rijndaelSbox[round_key[15]]\n\t\tround_key[3] ^= rijndaelSbox[round_key[12]]\n\n\t\tfor j := 0; j < 16; j++ {\n\t\t\tstate[j] ^= round_key[j]\n\t\t}\n\t\tif i != numberOfRounds {\n\t\t\tfor j = 0; j < 16; j += 4 {\n\t\t\t\tk1 := state[j] ^ state[j+2]\n\t\t\t\ta02x := xtime(k1)\n\t\t\t\tk2 := state[j+1] ^ state[j+3]\n\t\t\t\ta13x := xtime(k2)\n\n\t\t\t\tk1 ^= (k2 ^ xtime(state[j+1]^state[j+2]))\n\t\t\t\tk2 = k1\n\n\t\t\t\ta02xx := xtime(a02x)\n\t\t\t\ta13xx := xtime(a13x)\n\n\t\t\t\tk1 ^= (xtime(a02xx^a13xx) ^ a02xx)\n\t\t\t\tk2 ^= (xtime(a02xx^a13xx) ^ a13xx)\n\n\t\t\t\tstate[j] ^= (k1 ^ a02x)\n\t\t\t\tstate[j+1] ^= k2\n\t\t\t\tstate[j+2] ^= (k1 ^ a13x)\n\t\t\t\tstate[j+3] ^= (k2 ^ a02x ^ a13x)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn state\n}\n\nfunc AesEncrypt(src []byte, key Key) []byte {\n\tstate := make([]byte, len(src))\n\tcopy(state, src)\n\n\tround_key := key\n\n\tfor i := 0; i < 16; i++ {\n\t\tstate[i] ^= key[i]\n\t}\n\n\tfor i := 0; i < numberOfRounds; i++ {\n\t\t\/* First row: 0 shift, 0 4 8 12 *\/\n\t\tstate[0] = rijndaelSbox[state[0]]\n\t\tstate[4] = rijndaelSbox[state[4]]\n\t\tstate[8] = rijndaelSbox[state[8]]\n\t\tstate[12] = rijndaelSbox[state[12]]\n\n\t\t\/* Second row: 1 shift, 1 5 9 13 *\/\n\t\ttmp := state[1]\n\t\tstate[1] = rijndaelSbox[state[5]]\n\t\tstate[5] = rijndaelSbox[state[9]]\n\t\tstate[9] = rijndaelSbox[state[13]]\n\t\tstate[13] = rijndaelSbox[tmp]\n\n\t\t\/* Third row: 2 shift, 2 6 10 14 *\/\n\t\ttmp = state[2]\n\t\tstate[2] = rijndaelSbox[state[10]]\n\t\tstate[10] = rijndaelSbox[tmp]\n\t\ttmp = state[6]\n\t\tstate[6] = rijndaelSbox[state[14]]\n\t\tstate[14] = rijndaelSbox[tmp]\n\n\t\t\/* Fourth row: 3 shift, 3 7 11 15 *\/\n\t\ttmp = state[15]\n\t\tstate[15] = rijndaelSbox[state[11]]\n\t\tstate[11] = rijndaelSbox[state[7]]\n\t\tstate[7] = rijndaelSbox[state[3]]\n\t\tstate[3] = rijndaelSbox[tmp]\n\n\t\tif i != numberOfRounds-1 {\n\t\t\tfor k := 0; k < 16; k += 4 {\n\t\t\t\tj := state[k] ^ state[k+1]\n\t\t\t\ttmp = j ^ state[k+2] ^ state[k+3]\n\n\t\t\t\tj = xtime(j)\n\n\t\t\t\tstate[k] ^= (j ^ tmp)\n\n\t\t\t\tj = state[k+1] ^ state[k+2]\n\t\t\t\tj = xtime(j)\n\n\t\t\t\tstate[k+1] ^= (j ^ tmp)\n\n\t\t\t\tj = state[k+2] ^ state[k+3]\n\t\t\t\tj = xtime(j)\n\n\t\t\t\tstate[k+2] ^= (j ^ tmp)\n\t\t\t\tstate[k+3] = state[k] ^ state[k+1] ^ state[k+2] ^ tmp\n\t\t\t}\n\t\t}\n\n\t\tround_key[0] ^= rc[i]\n\n\t\tround_key[0] ^= rijndaelSbox[round_key[13]]\n\t\tround_key[1] ^= rijndaelSbox[round_key[14]]\n\t\tround_key[2] ^= rijndaelSbox[round_key[15]]\n\t\tround_key[3] ^= rijndaelSbox[round_key[12]]\n\n\t\tfor k := 4; k < 16; k++ {\n\t\t\tround_key[k] ^= round_key[k-4]\n\t\t}\n\t\tfor j := 0; j < 16; j++ {\n\t\t\tstate[j] ^= round_key[j]\n\t\t}\n\t}\n\treturn state\n}\n\nfunc xtime(b byte) byte {\n\tif b&0x80 > 0 {\n\t\treturn (b << 1) ^ 0x1b\n\t} else {\n\t\treturn b << 1\n\t}\n}\n<commit_msg>use stdlib aes implementation<commit_after>\/\/ Copyright (c) 2013 Conformal Systems LLC.\n\/\/ Use of this source code is governed by an ISC\n\/\/ license that can be found in the LICENSE file.\n\npackage yubikey\n\nimport (\n\t\"crypto\/aes\"\n)\n\nfunc AesDecrypt(src []byte, key Key) []byte {\n\tdst := make([]byte, len(src))\n\tcipher, _ := aes.NewCipher(key[:])\n\tcipher.Decrypt(dst, src)\n\treturn dst\n\n}\n\nfunc AesEncrypt(src []byte, key Key) []byte {\n\tdst := make([]byte, len(src))\n\tcipher, _ := aes.NewCipher(key[:])\n\tcipher.Encrypt(dst, src)\n\treturn dst\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package mq provides an ability to integrate with message broker via AMQP in a declarative way.\npackage mq\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/NeowayLabs\/wabbit\"\n\t\"github.com\/NeowayLabs\/wabbit\/amqp\"\n\t\"github.com\/NeowayLabs\/wabbit\/amqptest\"\n\t\"github.com\/NeowayLabs\/wabbit\/utils\"\n\tamqpDriver \"github.com\/streadway\/amqp\"\n)\n\nconst (\n\t\/\/ Describes states during reconnect.\n\tstatusReadyForReconnect int32 = 0\n\tstatusReconnecting = 1\n)\n\n\/\/ Used for creating connection to the fake AMQP server for tests.\nvar brokerIsMocked bool\n\ntype conn interface {\n\tChannel() (wabbit.Channel, error)\n\tClose() error\n\tNotifyClose(chan wabbit.Error) chan wabbit.Error\n}\n\n\/\/ MQ describes methods provided by message broker adapter.\ntype MQ interface {\n\t\/\/ Consumer returns consumer object by its name.\n\tConsumer(name string) (Consumer, error)\n\t\/\/ SetConsumerHandler allows you to set handler callback without getting consumer.\n\tSetConsumerHandler(name string, handler ConsumerHandler) error\n\t\/\/ AsyncProducer returns async producer. Should be used in most cases.\n\tAsyncProducer(name string) (AsyncProducer, error)\n\t\/\/ SyncProducer returns sync producer.\n\tSyncProducer(name string) (SyncProducer, error)\n\t\/\/ Error returns channel with all occurred errors.\n\t\/\/ Errors from sync producer won't be accessible.\n\tError() <-chan error\n\t\/\/ Close stop all consumers and producers and close connection to broker.\n\tClose()\n}\n\ntype mq struct {\n\tchannel wabbit.Channel\n\tconfig Config\n\tconnection conn\n\terrorChannel chan error\n\tinternalErrorChannel chan error\n\tconsumers *consumersRegistry\n\tproducers *producersRegistry\n\treconnectStatus int32 \/\/ Defines whether client is trying to reconnect or not.\n}\n\n\/\/ New initializes AMQP connection to the message broker\n\/\/ and returns adapter that provides an ability\n\/\/ to get configured consumers and producers, read occurred errors and shutdown all workers.\nfunc New(config Config) (MQ, error) {\n\tconfig.normalize()\n\n\tmq := &mq{\n\t\tconfig: config,\n\t\terrorChannel: make(chan error),\n\t\tinternalErrorChannel: make(chan error),\n\t\tconsumers: newConsumersRegistry(len(config.Consumers)),\n\t\tproducers: newProducersRegistry(len(config.Producers)),\n\t}\n\n\tif err := mq.connect(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tgo mq.errorHandler()\n\n\treturn mq, mq.initialSetup()\n}\n\n\/\/ Set handler for consumer by its name. Returns false if consumer wasn't found.\n\/\/ Can be called once for each consumer.\nfunc (mq *mq) SetConsumerHandler(name string, handler ConsumerHandler) error {\n\tconsumer, err := mq.Consumer(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconsumer.Consume(handler)\n\n\treturn nil\n}\n\n\/\/ Consumer returns a consumer by its name or error if consumer wasn't found.\nfunc (mq *mq) Consumer(name string) (consumer Consumer, err error) {\n\tconsumer, ok := mq.consumers.Get(name)\n\tif !ok {\n\t\terr = fmt.Errorf(\"consumer '%s' is not registered. Check your configuration\", name)\n\t}\n\n\treturn\n}\n\n\/\/ AsyncProducer returns an async producer by its name or error if producer wasn't found.\nfunc (mq *mq) AsyncProducer(name string) (AsyncProducer, error) {\n\titem, exists := mq.producers.Get(name)\n\tproducer, asserted := item.(*asyncProducer)\n\n\tif !exists || !asserted {\n\t\treturn nil, fmt.Errorf(\"producer '%s' is not registered. Check your configuration\", name)\n\t}\n\n\treturn producer, nil\n}\n\n\/\/ SyncProducer returns a sync producer by its name or error if producer wasn't found.\nfunc (mq *mq) SyncProducer(name string) (SyncProducer, error) {\n\titem, exists := mq.producers.Get(name)\n\tproducer, asserted := item.(*syncProducer)\n\n\tif !exists || !asserted {\n\t\treturn nil, fmt.Errorf(\"producer '%s' is not registered. Check your configuration\", name)\n\t}\n\n\treturn producer, nil\n}\n\n\/\/ Error provides an ability to access occurring errors.\nfunc (mq *mq) Error() <-chan error {\n\treturn mq.errorChannel\n}\n\n\/\/ Shutdown all workers and close connection to the message broker.\nfunc (mq *mq) Close() {\n\tmq.stopProducersAndConsumers()\n\n\tif mq.channel != nil {\n\t\tmq.channel.Close()\n\t}\n\n\tif mq.connection != nil {\n\t\tmq.connection.Close()\n\t}\n}\n\nfunc (mq *mq) connect() error {\n\tconnection, err := mq.createConnection()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tchannel, err := connection.Channel()\n\tif err != nil {\n\t\tconnection.Close()\n\n\t\treturn err\n\t}\n\n\tmq.connection = connection\n\tmq.channel = channel\n\n\tgo mq.handleCloseEvent()\n\n\treturn nil\n}\n\nfunc (mq *mq) createConnection() (conn conn, err error) {\n\tif brokerIsMocked || mq.config.TestMode {\n\t\treturn amqptest.Dial(mq.config.DSN)\n\t}\n\n\treturn amqp.Dial(mq.config.DSN)\n}\n\n\/\/ Register close handler.\n\/\/ To get more details visit https:\/\/godoc.org\/github.com\/streadway\/amqp#Connection.NotifyClose.\nfunc (mq *mq) handleCloseEvent() {\n\terr := <-mq.connection.NotifyClose(make(chan wabbit.Error))\n\tif err != nil {\n\t\tmq.internalErrorChannel <- err\n\t}\n}\n\nfunc (mq *mq) errorHandler() {\n\tfor err := range mq.internalErrorChannel {\n\t\tselect {\n\t\tcase mq.errorChannel <- err: \/\/ Proxies errors to the user.\n\t\tdefault: \/\/ For those clients who don't read errors.\n\t\t}\n\n\t\tmq.processError(err)\n\t}\n}\n\nfunc (mq *mq) processError(err error) {\n\tswitch err.(type) {\n\tcase *net.OpError:\n\t\tgo mq.reconnect()\n\tcase *utils.Error: \/\/ Broken connection. Used in tests.\n\t\tgo mq.reconnect()\n\tcase *amqpDriver.Error:\n\t\trmqErr, _ := err.(*amqpDriver.Error)\n\t\tif rmqErr.Server == false { \/\/ For example channel was closed.\n\t\t\tgo mq.reconnect()\n\t\t}\n\tdefault:\n\t\t\/\/ Wabbit error. Means that server is down.\n\t\t\/\/ Used in tests.\n\t\tif err.Error() == \"Network unreachable\" {\n\t\t\tgo mq.reconnect()\n\t\t}\n\t}\n}\n\nfunc (mq *mq) initialSetup() error {\n\tif err := mq.setupExchanges(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := mq.setupQueues(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := mq.setupProducers(); err != nil {\n\t\treturn err\n\t}\n\n\treturn mq.setupConsumers()\n}\n\n\/\/ Called after each reconnect to recreate non-durable queues and exchanges.\nfunc (mq *mq) setupAfterReconnect() error {\n\tif err := mq.setupExchanges(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := mq.setupQueues(); err != nil {\n\t\treturn err\n\t}\n\n\tmq.producers.GoEach(func(producer internalProducer) {\n\t\tif err := mq.reconnectProducer(producer); err != nil {\n\t\t\tmq.internalErrorChannel <- err\n\t\t}\n\t})\n\n\tmq.consumers.GoEach(func(consumer *consumer) {\n\t\tif err := mq.reconnectConsumer(consumer); err != nil {\n\t\t\tmq.internalErrorChannel <- err\n\t\t}\n\t})\n\n\treturn nil\n}\n\nfunc (mq *mq) setupExchanges() error {\n\tfor _, config := range mq.config.Exchanges {\n\t\tif err := mq.declareExchange(config); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (mq *mq) declareExchange(config ExchangeConfig) error {\n\treturn mq.channel.ExchangeDeclare(config.Name, config.Type, wabbit.Option(config.Options))\n}\n\nfunc (mq *mq) setupQueues() error {\n\tfor _, config := range mq.config.Queues {\n\t\tif err := mq.declareQueue(config); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (mq *mq) declareQueue(config QueueConfig) error {\n\tif _, err := mq.channel.QueueDeclare(config.Name, wabbit.Option(config.Options)); err != nil {\n\t\treturn err\n\t}\n\n\treturn mq.channel.QueueBind(config.Name, config.RoutingKey, config.Exchange, wabbit.Option(config.BindingOptions))\n}\n\nfunc (mq *mq) setupProducers() error {\n\tfor _, config := range mq.config.Producers {\n\t\tif err := mq.registerProducer(config); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (mq *mq) registerProducer(config ProducerConfig) error {\n\tif _, ok := mq.producers.Get(config.Name); ok {\n\t\treturn fmt.Errorf(`producer with name \"%s\" is already registered`, config.Name)\n\t}\n\n\tchannel, err := mq.connection.Channel()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tproducer := newInternalProducer(channel, mq.internalErrorChannel, config)\n\tproducer.init()\n\tmq.producers.Set(config.Name, producer)\n\n\treturn nil\n}\n\nfunc (mq *mq) reconnectProducer(producer internalProducer) error {\n\tchannel, err := mq.connection.Channel()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tproducer.setChannel(channel)\n\tproducer.init()\n\n\treturn nil\n}\n\nfunc (mq *mq) setupConsumers() error {\n\tfor _, config := range mq.config.Consumers {\n\t\tif err := mq.registerConsumer(config); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (mq *mq) registerConsumer(config ConsumerConfig) error {\n\tif _, ok := mq.consumers.Get(config.Name); ok {\n\t\treturn fmt.Errorf(`consumer with name \"%s\" is already registered`, config.Name)\n\t}\n\n\t\/\/ Consumer must have at least one worker.\n\tif config.Workers < 1 {\n\t\tconfig.Workers = 1\n\t}\n\n\tconsumer := newConsumer(config) \/\/ We need to save a whole config for reconnect.\n\tconsumer.prefetchCount = config.PrefetchCount\n\tconsumer.prefetchSize = config.PrefetchSize\n\n\tfor i := 0; i < config.Workers; i++ {\n\t\tworker := newWorker(mq.internalErrorChannel)\n\n\t\tif err := mq.initializeConsumersWorker(consumer, worker); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tconsumer.workers[i] = worker\n\t}\n\n\tmq.consumers.Set(config.Name, consumer) \/\/ Workers will start after consumer.Consume method call.\n\n\treturn nil\n}\n\nfunc (mq *mq) reconnectConsumer(consumer *consumer) error {\n\tfor _, worker := range consumer.workers {\n\t\tif err := mq.initializeConsumersWorker(consumer, worker); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tconsumer.consume(consumer.handler)\n\n\treturn nil\n}\n\nfunc (mq *mq) initializeConsumersWorker(consumer *consumer, worker *worker) error {\n\tchannel, err := mq.connection.Channel()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := channel.Qos(consumer.prefetchCount, consumer.prefetchSize, false); err != nil {\n\t\treturn err\n\t}\n\n\tdeliveries, err := channel.Consume(consumer.queue, \"\", consumer.options)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tworker.setChannel(channel)\n\tworker.deliveries = deliveries\n\n\treturn nil\n}\n\n\/\/ Reconnect stops current producers and consumers,\n\/\/ recreates connection to the rabbit and than runs producers and consumers.\nfunc (mq *mq) reconnect() {\n\tstartedReconnect := atomic.CompareAndSwapInt32(&mq.reconnectStatus, statusReadyForReconnect, statusReconnecting)\n\t\/\/ There is no need to start a new reconnect if the previous one is not finished yet.\n\tif !startedReconnect {\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tatomic.StoreInt32(&mq.reconnectStatus, statusReadyForReconnect)\n\t}()\n\n\ttime.Sleep(mq.config.ReconnectDelay) \/\/ TODO Add incremental sleep.\n\n\tmq.stopProducersAndConsumers()\n\n\tif err := mq.connect(); err != nil {\n\t\tmq.internalErrorChannel <- err\n\n\t\treturn\n\t}\n\n\tif err := mq.setupAfterReconnect(); err != nil {\n\t\tmq.internalErrorChannel <- err\n\t}\n}\n\nfunc (mq *mq) stopProducersAndConsumers() {\n\tmq.producers.GoEach(func(producer internalProducer) {\n\t\tproducer.Stop()\n\t})\n\n\tmq.consumers.GoEach(func(consumer *consumer) {\n\t\tconsumer.Stop()\n\t})\n}\n<commit_msg>Applied code style rules. (#31)<commit_after>\/\/ Package mq provides an ability to integrate with message broker via AMQP in a declarative way.\npackage mq\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/NeowayLabs\/wabbit\"\n\t\"github.com\/NeowayLabs\/wabbit\/amqp\"\n\t\"github.com\/NeowayLabs\/wabbit\/amqptest\"\n\t\"github.com\/NeowayLabs\/wabbit\/utils\"\n\tamqpDriver \"github.com\/streadway\/amqp\"\n)\n\nconst (\n\t\/\/ Describes states during reconnect.\n\tstatusReadyForReconnect int32 = 0\n\tstatusReconnecting int32 = 1\n)\n\n\/\/ Used for creating connection to the fake AMQP server for tests.\nvar brokerIsMocked bool\n\ntype conn interface {\n\tChannel() (wabbit.Channel, error)\n\tClose() error\n\tNotifyClose(chan wabbit.Error) chan wabbit.Error\n}\n\n\/\/ MQ describes methods provided by message broker adapter.\ntype MQ interface {\n\t\/\/ Consumer returns consumer object by its name.\n\tConsumer(name string) (Consumer, error)\n\t\/\/ SetConsumerHandler allows you to set handler callback without getting consumer.\n\tSetConsumerHandler(name string, handler ConsumerHandler) error\n\t\/\/ AsyncProducer returns async producer. Should be used in most cases.\n\tAsyncProducer(name string) (AsyncProducer, error)\n\t\/\/ SyncProducer returns sync producer.\n\tSyncProducer(name string) (SyncProducer, error)\n\t\/\/ Error returns channel with all occurred errors.\n\t\/\/ Errors from sync producer won't be accessible.\n\tError() <-chan error\n\t\/\/ Close stop all consumers and producers and close connection to broker.\n\tClose()\n}\n\ntype mq struct {\n\tchannel wabbit.Channel\n\tconfig Config\n\tconnection conn\n\terrorChannel chan error\n\tinternalErrorChannel chan error\n\tconsumers *consumersRegistry\n\tproducers *producersRegistry\n\treconnectStatus int32 \/\/ Defines whether client is trying to reconnect or not.\n}\n\n\/\/ New initializes AMQP connection to the message broker\n\/\/ and returns adapter that provides an ability\n\/\/ to get configured consumers and producers, read occurred errors and shutdown all workers.\nfunc New(config Config) (MQ, error) {\n\tconfig.normalize()\n\n\tmq := &mq{\n\t\tconfig: config,\n\t\terrorChannel: make(chan error),\n\t\tinternalErrorChannel: make(chan error),\n\t\tconsumers: newConsumersRegistry(len(config.Consumers)),\n\t\tproducers: newProducersRegistry(len(config.Producers)),\n\t}\n\n\tif err := mq.connect(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tgo mq.errorHandler()\n\n\treturn mq, mq.initialSetup()\n}\n\n\/\/ Set handler for consumer by its name. Returns false if consumer wasn't found.\n\/\/ Can be called once for each consumer.\nfunc (mq *mq) SetConsumerHandler(name string, handler ConsumerHandler) error {\n\tconsumer, err := mq.Consumer(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconsumer.Consume(handler)\n\n\treturn nil\n}\n\n\/\/ Consumer returns a consumer by its name or error if consumer wasn't found.\nfunc (mq *mq) Consumer(name string) (Consumer, error) {\n\tconsumer, ok := mq.consumers.Get(name)\n\tif !ok {\n\t\terr := fmt.Errorf(\"consumer '%s' is not registered. Check your configuration\", name)\n\n\t\treturn nil, err\n\t}\n\n\treturn consumer, nil\n}\n\n\/\/ AsyncProducer returns an async producer by its name or error if producer wasn't found.\nfunc (mq *mq) AsyncProducer(name string) (AsyncProducer, error) {\n\titem, exists := mq.producers.Get(name)\n\tproducer, asserted := item.(*asyncProducer)\n\n\tif !exists || !asserted {\n\t\treturn nil, fmt.Errorf(\"producer '%s' is not registered. Check your configuration\", name)\n\t}\n\n\treturn producer, nil\n}\n\n\/\/ SyncProducer returns a sync producer by its name or error if producer wasn't found.\nfunc (mq *mq) SyncProducer(name string) (SyncProducer, error) {\n\titem, exists := mq.producers.Get(name)\n\tproducer, asserted := item.(*syncProducer)\n\n\tif !exists || !asserted {\n\t\treturn nil, fmt.Errorf(\"producer '%s' is not registered. Check your configuration\", name)\n\t}\n\n\treturn producer, nil\n}\n\n\/\/ Error provides an ability to access occurring errors.\nfunc (mq *mq) Error() <-chan error {\n\treturn mq.errorChannel\n}\n\n\/\/ Shutdown all workers and close connection to the message broker.\nfunc (mq *mq) Close() {\n\tmq.stopProducersAndConsumers()\n\n\tif mq.channel != nil {\n\t\t_ = mq.channel.Close()\n\t}\n\n\tif mq.connection != nil {\n\t\t_ = mq.connection.Close()\n\t}\n}\n\nfunc (mq *mq) connect() error {\n\tconnection, err := mq.createConnection()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tchannel, err := connection.Channel()\n\tif err != nil {\n\t\t_ = connection.Close()\n\n\t\treturn err\n\t}\n\n\tmq.connection = connection\n\tmq.channel = channel\n\n\tgo mq.handleCloseEvent()\n\n\treturn nil\n}\n\nfunc (mq *mq) createConnection() (conn, error) {\n\tif brokerIsMocked || mq.config.TestMode {\n\t\treturn amqptest.Dial(mq.config.DSN)\n\t}\n\n\treturn amqp.Dial(mq.config.DSN)\n}\n\n\/\/ Register close handler.\n\/\/ To get more details visit https:\/\/godoc.org\/github.com\/streadway\/amqp#Connection.NotifyClose.\nfunc (mq *mq) handleCloseEvent() {\n\terr := <-mq.connection.NotifyClose(make(chan wabbit.Error))\n\tif err != nil {\n\t\tmq.internalErrorChannel <- err\n\t}\n}\n\nfunc (mq *mq) errorHandler() {\n\tfor err := range mq.internalErrorChannel {\n\t\tselect {\n\t\tcase mq.errorChannel <- err: \/\/ Proxies errors to the user.\n\t\tdefault: \/\/ For those clients who don't read errors.\n\t\t}\n\n\t\tmq.processError(err)\n\t}\n}\n\nfunc (mq *mq) processError(err error) {\n\tswitch err.(type) {\n\tcase *net.OpError:\n\t\tgo mq.reconnect()\n\tcase *utils.Error: \/\/ Broken connection. Used in tests.\n\t\tgo mq.reconnect()\n\tcase *amqpDriver.Error:\n\t\trmqErr, _ := err.(*amqpDriver.Error)\n\t\tif rmqErr.Server == false { \/\/ For example channel was closed.\n\t\t\tgo mq.reconnect()\n\t\t}\n\tdefault:\n\t\t\/\/ Wabbit error. Means that server is down.\n\t\t\/\/ Used in tests.\n\t\tif err.Error() == \"Network unreachable\" {\n\t\t\tgo mq.reconnect()\n\t\t}\n\t}\n}\n\nfunc (mq *mq) initialSetup() error {\n\tif err := mq.setupExchanges(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := mq.setupQueues(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := mq.setupProducers(); err != nil {\n\t\treturn err\n\t}\n\n\treturn mq.setupConsumers()\n}\n\n\/\/ Called after each reconnect to recreate non-durable queues and exchanges.\nfunc (mq *mq) setupAfterReconnect() error {\n\tif err := mq.setupExchanges(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := mq.setupQueues(); err != nil {\n\t\treturn err\n\t}\n\n\tmq.producers.GoEach(func(producer internalProducer) {\n\t\tif err := mq.reconnectProducer(producer); err != nil {\n\t\t\tmq.internalErrorChannel <- err\n\t\t}\n\t})\n\n\tmq.consumers.GoEach(func(consumer *consumer) {\n\t\tif err := mq.reconnectConsumer(consumer); err != nil {\n\t\t\tmq.internalErrorChannel <- err\n\t\t}\n\t})\n\n\treturn nil\n}\n\nfunc (mq *mq) setupExchanges() error {\n\tfor _, config := range mq.config.Exchanges {\n\t\tif err := mq.declareExchange(config); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (mq *mq) declareExchange(config ExchangeConfig) error {\n\treturn mq.channel.ExchangeDeclare(config.Name, config.Type, wabbit.Option(config.Options))\n}\n\nfunc (mq *mq) setupQueues() error {\n\tfor _, config := range mq.config.Queues {\n\t\tif err := mq.declareQueue(config); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (mq *mq) declareQueue(config QueueConfig) error {\n\tif _, err := mq.channel.QueueDeclare(config.Name, wabbit.Option(config.Options)); err != nil {\n\t\treturn err\n\t}\n\n\treturn mq.channel.QueueBind(config.Name, config.RoutingKey, config.Exchange, wabbit.Option(config.BindingOptions))\n}\n\nfunc (mq *mq) setupProducers() error {\n\tfor _, config := range mq.config.Producers {\n\t\tif err := mq.registerProducer(config); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (mq *mq) registerProducer(config ProducerConfig) error {\n\tif _, ok := mq.producers.Get(config.Name); ok {\n\t\treturn fmt.Errorf(`producer with name \"%s\" is already registered`, config.Name)\n\t}\n\n\tchannel, err := mq.connection.Channel()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tproducer := newInternalProducer(channel, mq.internalErrorChannel, config)\n\tproducer.init()\n\tmq.producers.Set(config.Name, producer)\n\n\treturn nil\n}\n\nfunc (mq *mq) reconnectProducer(producer internalProducer) error {\n\tchannel, err := mq.connection.Channel()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tproducer.setChannel(channel)\n\tproducer.init()\n\n\treturn nil\n}\n\nfunc (mq *mq) setupConsumers() error {\n\tfor _, config := range mq.config.Consumers {\n\t\tif err := mq.registerConsumer(config); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (mq *mq) registerConsumer(config ConsumerConfig) error {\n\tif _, ok := mq.consumers.Get(config.Name); ok {\n\t\treturn fmt.Errorf(`consumer with name \"%s\" is already registered`, config.Name)\n\t}\n\n\t\/\/ Consumer must have at least one worker.\n\tif config.Workers < 1 {\n\t\tconfig.Workers = 1\n\t}\n\n\tconsumer := newConsumer(config) \/\/ We need to save a whole config for reconnect.\n\tconsumer.prefetchCount = config.PrefetchCount\n\tconsumer.prefetchSize = config.PrefetchSize\n\n\tfor i := 0; i < config.Workers; i++ {\n\t\tworker := newWorker(mq.internalErrorChannel)\n\n\t\tif err := mq.initializeConsumersWorker(consumer, worker); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tconsumer.workers[i] = worker\n\t}\n\n\tmq.consumers.Set(config.Name, consumer) \/\/ Workers will start after consumer.Consume method call.\n\n\treturn nil\n}\n\nfunc (mq *mq) reconnectConsumer(consumer *consumer) error {\n\tfor _, worker := range consumer.workers {\n\t\tif err := mq.initializeConsumersWorker(consumer, worker); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tconsumer.consume(consumer.handler)\n\n\treturn nil\n}\n\nfunc (mq *mq) initializeConsumersWorker(consumer *consumer, worker *worker) error {\n\tchannel, err := mq.connection.Channel()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := channel.Qos(consumer.prefetchCount, consumer.prefetchSize, false); err != nil {\n\t\treturn err\n\t}\n\n\tdeliveries, err := channel.Consume(consumer.queue, \"\", consumer.options)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tworker.setChannel(channel)\n\tworker.deliveries = deliveries\n\n\treturn nil\n}\n\n\/\/ Reconnect stops current producers and consumers,\n\/\/ recreates connection to the rabbit and than runs producers and consumers.\nfunc (mq *mq) reconnect() {\n\tstartedReconnect := atomic.CompareAndSwapInt32(&mq.reconnectStatus, statusReadyForReconnect, statusReconnecting)\n\t\/\/ There is no need to start a new reconnect if the previous one is not finished yet.\n\tif !startedReconnect {\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tatomic.StoreInt32(&mq.reconnectStatus, statusReadyForReconnect)\n\t}()\n\n\ttime.Sleep(mq.config.ReconnectDelay) \/\/ TODO Add incremental sleep.\n\n\tmq.stopProducersAndConsumers()\n\n\tif err := mq.connect(); err != nil {\n\t\tmq.internalErrorChannel <- err\n\n\t\treturn\n\t}\n\n\tif err := mq.setupAfterReconnect(); err != nil {\n\t\tmq.internalErrorChannel <- err\n\t}\n}\n\nfunc (mq *mq) stopProducersAndConsumers() {\n\tmq.producers.GoEach(func(producer internalProducer) {\n\t\tproducer.Stop()\n\t})\n\n\tmq.consumers.GoEach(func(consumer *consumer) {\n\t\tconsumer.Stop()\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package hoverfly\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/SpectoLabs\/hoverfly\/core\/handlers\/v2\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"net\/http\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc BenchmarkProcessRequest(b *testing.B) {\n\n\tRegisterTestingT(b)\n\thoverfly := NewHoverflyWithConfiguration(&Configuration{\n\t\tWebserver: true,\n\t\tProxyPort: \"8500\",\n\t\tAdminPort: \"8888\",\n\t\tMode: \"simulate\",\n\t})\n\n\/\/\tsimulation := v2.SimulationViewV5{}\n\/\/\t_ = json.Unmarshal([]byte(`{\n\/\/\t\"data\": {\n\/\/\t\t\"pairs\": [{\n\/\/\t\t\t\"response\": {\n\/\/\t\t\t\t\"status\": 200,\n\/\/\t\t\t\t\"body\": \"5iNe8dxWH5Ca8pZqAfEHv3rgC0SsvKNLu6o3K\",\n\/\/\t\t\t\t\"encodedBody\": false,\n\/\/\t\t\t\t\"headers\": {\n\/\/\t\t\t\t\t\"Accept-Ranges\": [\"bytes\"],\n\/\/\t\t\t\t\t\"Cache-Control\": [\"max-age=3600\"],\n\/\/\t\t\t\t\t\"Connection\": [\"keep-alive\"],\n\/\/\t\t\t\t\t\"Content-Type\": [\"text\/html; charset=utf-8\"],\n\/\/\t\t\t\t\t\"Date\": [\"Tue, 30 May 2017 13:23:09 GMT\"],\n\/\/\t\t\t\t\t\"Etag\": [\"\\\"82b6bafbc0c4af5e3886d07802f4b62d\\\"\"],\n\/\/\t\t\t\t\t\"Hoverfly\": [\"Was-Here\"],\n\/\/\t\t\t\t\t\"Last-Modified\": [\"Fri, 19 May 2017 10:59:12 GMT\"],\n\/\/\t\t\t\t\t\"Server\": [\"nginx\"],\n\/\/\t\t\t\t\t\"Strict-Transport-Security\": [\"max-age=31556926\"],\n\/\/\t\t\t\t\t\"Transfer-Encoding\": [\"chunked\"],\n\/\/\t\t\t\t\t\"Vary\": [\"Accept-Encoding\"]\n\/\/\t\t\t\t}\n\/\/\t\t\t},\n\/\/\t\t\t\"request\": {\n\/\/\t\t\t\t\"path\": [{\n\/\/\t\t\t\t\t\"matcher\": \"exact\",\n\/\/\t\t\t\t\t\"value\": \"\/bar\"\n\/\/\t\t\t\t}],\n\/\/\t\t\t\t\"method\": [{\n\/\/\t\t\t\t\t\"matcher\": \"exact\",\n\/\/\t\t\t\t\t\"value\": \"GET\"\n\/\/\t\t\t\t}],\n\/\/\t\t\t\t\"query\": {},\n\/\/\t\t\t\t\"body\": [{\n\/\/\t\t\t\t\t\"matcher\": \"exact\",\n\/\/\t\t\t\t\t\"value\": \"\"\n\/\/\t\t\t\t}]\n\/\/\t\t\t}\n\/\/\t\t}],\n\/\/\t\t\"globalActions\": {\n\/\/\t\t\t\"delays\": []\n\/\/\t\t}\n\/\/\t},\n\/\/\t\"meta\": {\n\/\/\t\t\"schemaVersion\": \"v5\",\n\/\/\t\t\"hoverflyVersion\": \"v0.17.0\",\n\/\/\t\t\"timeExported\": \"2017-05-30T14:23:44+01:00\"\n\/\/\t}\n\/\/}`), &simulation)\n\n\ttemplated := v2.SimulationViewV5{}\n\t_ = json.Unmarshal([]byte(`{\n\t\"data\": {\n\t\t\"pairs\": [{\n\t\t\t\"response\": {\n\t\t\t\t\"status\": 200,\n\t\t\t\t\"body\": \"{\\\"st\\\": 1,\\\"sid\\\": 418,\\\"tt\\\": \\\"{{ Request.Path.[0] }}\\\",\\\"gr\\\": 0,\\\"uuid\\\": \\\"{{ randomUuid }}\\\",\\\"ip\\\": \\\"127.0.0.1\\\",\\\"ua\\\": \\\"user_agent\\\",\\\"tz\\\": -6,\\\"v\\\": 1}\",\n\t\t\t\t\"encodedBody\": false,\n\t\t\t\t\"templated\": true,\n\t\t\t\t\"headers\": {\n\t\t\t\t\t\"Accept-Ranges\": [\"bytes\"],\n\t\t\t\t\t\"Cache-Control\": [\"max-age=3600\"],\n\t\t\t\t\t\"Connection\": [\"keep-alive\"],\n\t\t\t\t\t\"Content-Type\": [\"text\/html; charset=utf-8\"],\n\t\t\t\t\t\"Date\": [\"Tue, 30 May 2017 13:23:09 GMT\"],\n\t\t\t\t\t\"Etag\": [\"\\\"82b6bafbc0c4af5e3886d07802f4b62d\\\"\"],\n\t\t\t\t\t\"Hoverfly\": [\"Was-Here\"],\n\t\t\t\t\t\"Last-Modified\": [\"Fri, 19 May 2017 10:59:12 GMT\"],\n\t\t\t\t\t\"Server\": [\"nginx\"],\n\t\t\t\t\t\"Strict-Transport-Security\": [\"max-age=31556926\"],\n\t\t\t\t\t\"Transfer-Encoding\": [\"chunked\"],\n\t\t\t\t\t\"Vary\": [\"Accept-Encoding\"]\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"request\": {\n\t\t\t\t\"path\": [{\n\t\t\t\t\t\"matcher\": \"exact\",\n\t\t\t\t\t\"value\": \"\/bar\"\n\t\t\t\t}],\n\t\t\t\t\"method\": [{\n\t\t\t\t\t\"matcher\": \"exact\",\n\t\t\t\t\t\"value\": \"GET\"\n\t\t\t\t}],\n\t\t\t\t\"query\": {},\n\t\t\t\t\"body\": [{\n\t\t\t\t\t\"matcher\": \"exact\",\n\t\t\t\t\t\"value\": \"\"\n\t\t\t\t}]\n\t\t\t}\n\t\t}],\n\t\t\"globalActions\": {\n\t\t\t\"delays\": []\n\t\t}\n\t},\n\t\"meta\": {\n\t\t\"schemaVersion\": \"v5\",\n\t\t\"hoverflyVersion\": \"v0.17.0\",\n\t\t\"timeExported\": \"2017-05-30T14:23:44+01:00\"\n\t}\n}`), &templated)\n\n\n\tfmt.Println(hoverfly.StartProxy())\n\ttime.Sleep(time.Second)\n\trequest, _ := http.NewRequest(http.MethodGet, \"http:\/\/localhost:8500\/bar\", nil)\n\tvar resp *http.Response\n\t\/\/hoverfly.PutSimulation(simulation)\n\t\/\/\n\t\/\/b.Run(\"Simple simulation\", func(b *testing.B) {\n\t\/\/\n\t\/\/\tfor n := 0; n < b.N; n++ {\n\t\/\/\t\tresp = hoverfly.processRequest(request)\n\t\/\/\t}\n\t\/\/})\n\n\n\thoverfly.PutSimulation(templated)\n\n\tb.Run(\"Templated simulation\", func(b *testing.B) {\n\n\t\tfor n := 0; n < b.N; n++ {\n\t\t\tresp = hoverfly.processRequest(request)\n\t\t}\n\t})\n\n\tExpect(resp.StatusCode).To(Equal(200))\n\n\thoverfly.StopProxy()\n}\n<commit_msg>Create sub benchmarks<commit_after>package hoverfly\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/SpectoLabs\/hoverfly\/core\/handlers\/v2\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"net\/http\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc BenchmarkProcessRequest(b *testing.B) {\n\n\tRegisterTestingT(b)\n\n\thoverfly := NewHoverflyWithConfiguration(&Configuration{\n\t\tWebserver: true,\n\t\tProxyPort: \"8500\",\n\t\tAdminPort: \"8888\",\n\t\tMode: \"simulate\",\n\t})\n\n\tsimulation := v2.SimulationViewV5{}\n\t_ = json.Unmarshal([]byte(`{\n\t\"data\": {\n\t\t\"pairs\": [{\n\t\t\t\"response\": {\n\t\t\t\t\"status\": 200,\n\t\t\t\t\"body\": \"5iNe8dxWH5Ca8pZqAfEHv3rgC0SsvKNLu6o3K\",\n\t\t\t\t\"encodedBody\": false,\n\t\t\t\t\"headers\": {\n\t\t\t\t\t\"Accept-Ranges\": [\"bytes\"],\n\t\t\t\t\t\"Cache-Control\": [\"max-age=3600\"],\n\t\t\t\t\t\"Connection\": [\"keep-alive\"],\n\t\t\t\t\t\"Content-Type\": [\"text\/html; charset=utf-8\"],\n\t\t\t\t\t\"Date\": [\"Tue, 30 May 2017 13:23:09 GMT\"],\n\t\t\t\t\t\"Etag\": [\"\\\"82b6bafbc0c4af5e3886d07802f4b62d\\\"\"],\n\t\t\t\t\t\"Hoverfly\": [\"Was-Here\"],\n\t\t\t\t\t\"Last-Modified\": [\"Fri, 19 May 2017 10:59:12 GMT\"],\n\t\t\t\t\t\"Server\": [\"nginx\"],\n\t\t\t\t\t\"Strict-Transport-Security\": [\"max-age=31556926\"],\n\t\t\t\t\t\"Transfer-Encoding\": [\"chunked\"],\n\t\t\t\t\t\"Vary\": [\"Accept-Encoding\"]\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"request\": {\n\t\t\t\t\"path\": [{\n\t\t\t\t\t\"matcher\": \"exact\",\n\t\t\t\t\t\"value\": \"\/bar\"\n\t\t\t\t}],\n\t\t\t\t\"method\": [{\n\t\t\t\t\t\"matcher\": \"exact\",\n\t\t\t\t\t\"value\": \"GET\"\n\t\t\t\t}],\n\t\t\t\t\"query\": {},\n\t\t\t\t\"body\": [{\n\t\t\t\t\t\"matcher\": \"exact\",\n\t\t\t\t\t\"value\": \"\"\n\t\t\t\t}]\n\t\t\t}\n\t\t}],\n\t\t\"globalActions\": {\n\t\t\t\"delays\": []\n\t\t}\n\t},\n\t\"meta\": {\n\t\t\"schemaVersion\": \"v5\",\n\t\t\"hoverflyVersion\": \"v0.17.0\",\n\t\t\"timeExported\": \"2017-05-30T14:23:44+01:00\"\n\t}\n}`), &simulation)\n\n\ttemplated := v2.SimulationViewV5{}\n\t_ = json.Unmarshal([]byte(`{\n\t\"data\": {\n\t\t\"pairs\": [{\n\t\t\t\"response\": {\n\t\t\t\t\"status\": 200,\n\t\t\t\t\"body\": \"{\\\"st\\\": 1,\\\"sid\\\": 418,\\\"tt\\\": \\\"{{ Request.Path.[0] }}\\\",\\\"gr\\\": 0,\\\"uuid\\\": \\\"{{ randomUuid }}\\\",\\\"ip\\\": \\\"127.0.0.1\\\",\\\"ua\\\": \\\"user_agent\\\",\\\"tz\\\": -6,\\\"v\\\": 1}\",\n\t\t\t\t\"encodedBody\": false,\n\t\t\t\t\"templated\": true,\n\t\t\t\t\"headers\": {\n\t\t\t\t\t\"Accept-Ranges\": [\"bytes\"],\n\t\t\t\t\t\"Cache-Control\": [\"max-age=3600\"],\n\t\t\t\t\t\"Connection\": [\"keep-alive\"],\n\t\t\t\t\t\"Content-Type\": [\"text\/html; charset=utf-8\"],\n\t\t\t\t\t\"Date\": [\"Tue, 30 May 2017 13:23:09 GMT\"],\n\t\t\t\t\t\"Etag\": [\"\\\"82b6bafbc0c4af5e3886d07802f4b62d\\\"\"],\n\t\t\t\t\t\"Hoverfly\": [\"Was-Here\"],\n\t\t\t\t\t\"Last-Modified\": [\"Fri, 19 May 2017 10:59:12 GMT\"],\n\t\t\t\t\t\"Server\": [\"nginx\"],\n\t\t\t\t\t\"Strict-Transport-Security\": [\"max-age=31556926\"],\n\t\t\t\t\t\"Transfer-Encoding\": [\"chunked\"],\n\t\t\t\t\t\"Vary\": [\"Accept-Encoding\"]\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"request\": {\n\t\t\t\t\"path\": [{\n\t\t\t\t\t\"matcher\": \"exact\",\n\t\t\t\t\t\"value\": \"\/bar\"\n\t\t\t\t}],\n\t\t\t\t\"method\": [{\n\t\t\t\t\t\"matcher\": \"exact\",\n\t\t\t\t\t\"value\": \"GET\"\n\t\t\t\t}],\n\t\t\t\t\"query\": {},\n\t\t\t\t\"body\": [{\n\t\t\t\t\t\"matcher\": \"exact\",\n\t\t\t\t\t\"value\": \"\"\n\t\t\t\t}]\n\t\t\t}\n\t\t}],\n\t\t\"globalActions\": {\n\t\t\t\"delays\": []\n\t\t}\n\t},\n\t\"meta\": {\n\t\t\"schemaVersion\": \"v5\",\n\t\t\"hoverflyVersion\": \"v0.17.0\",\n\t\t\"timeExported\": \"2017-05-30T14:23:44+01:00\"\n\t}\n}`), &templated)\n\n\tbenchmarks := []struct{\n\t\tname string\n\t\tsimulation v2.SimulationViewV5\n\t} {\n\t\t{\"Simple simulation\", simulation},\n\t\t{\"Templated simulation\", templated},\n\t}\n\n\tfmt.Println(hoverfly.StartProxy())\n\ttime.Sleep(time.Second)\n\trequest, _ := http.NewRequest(http.MethodGet, \"http:\/\/localhost:8500\/bar\", nil)\n\tvar resp *http.Response\n\n\tfor _, bm := range benchmarks {\n\t\thoverfly.DeleteSimulation()\n\t\thoverfly.PutSimulation(bm.simulation)\n\n\t\tb.Run(bm.name, func(b *testing.B) {\n\n\t\t\tfor n := 0; n < b.N; n++ {\n\t\t\t\tresp = hoverfly.processRequest(request)\n\t\t\t}\n\t\t})\n\t}\n\n\n\tExpect(resp.StatusCode).To(Equal(200))\n\n\thoverfly.StopProxy()\n}\n<|endoftext|>"} {"text":"<commit_before>package component\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/TheThingsNetwork\/go-account-lib\/cache\"\n\t\"github.com\/TheThingsNetwork\/go-account-lib\/claims\"\n\t\"github.com\/TheThingsNetwork\/go-account-lib\/keys\"\n\t\"github.com\/TheThingsNetwork\/go-account-lib\/oauth\"\n\t\"github.com\/TheThingsNetwork\/go-account-lib\/tokenkey\"\n\t\"github.com\/TheThingsNetwork\/ttn\/api\"\n\tpb_discovery \"github.com\/TheThingsNetwork\/ttn\/api\/discovery\"\n\t\"github.com\/TheThingsNetwork\/ttn\/utils\/errors\"\n\t\"github.com\/TheThingsNetwork\/ttn\/utils\/security\"\n\tjwt \"github.com\/dgrijalva\/jwt-go\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\/metadata\"\n)\n\n\/\/ InitAuth initializes Auth functionality\nfunc (c *Component) InitAuth() error {\n\tinits := []func() error{\n\t\tc.initAuthServers,\n\t\tc.initKeyPair,\n\t\tc.initRoots,\n\t}\n\tif c.Config.UseTLS {\n\t\tinits = append(inits, c.initTLS)\n\t}\n\n\tfor _, init := range inits {\n\t\tif err := init(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\ntype authServer struct {\n\turl string\n\tusername string\n\tpassword string\n}\n\nfunc parseAuthServer(str string) (srv authServer, err error) {\n\turl, err := url.Parse(str)\n\tif err != nil {\n\t\treturn srv, err\n\t}\n\tsrv.url = fmt.Sprintf(\"%s:\/\/%s\", url.Scheme, url.Host)\n\tif url.User != nil {\n\t\tsrv.username = url.User.Username()\n\t\tsrv.password, _ = url.User.Password()\n\t}\n\treturn srv, nil\n}\n\nfunc (c *Component) initAuthServers() error {\n\turlMap := make(map[string]string)\n\tfuncMap := make(map[string]tokenkey.TokenFunc)\n\tvar httpProvider tokenkey.Provider\n\tfor id, url := range c.Config.AuthServers {\n\t\tif strings.HasPrefix(url, \"file:\/\/\") {\n\t\t\tfile := strings.TrimPrefix(url, \"file:\/\/\")\n\t\t\tcontents, err := ioutil.ReadFile(path.Clean(file))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfuncMap[id] = func(renew bool) (*tokenkey.TokenKey, error) {\n\t\t\t\treturn &tokenkey.TokenKey{Algorithm: \"ES256\", Key: string(contents)}, nil\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tsrv, err := parseAuthServer(url)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\turlMap[id] = srv.url\n\t\tfuncMap[id] = func(renew bool) (*tokenkey.TokenKey, error) {\n\t\t\treturn httpProvider.Get(id, renew)\n\t\t}\n\t}\n\thttpProvider = tokenkey.HTTPProvider(\n\t\turlMap,\n\t\tcache.WriteTroughCacheWithFormat(c.Config.KeyDir, \"auth-%s.pub\"),\n\t)\n\tc.TokenKeyProvider = tokenkey.FuncProvider(funcMap)\n\treturn nil\n}\n\n\/\/ UpdateTokenKey updates the OAuth Bearer token key\nfunc (c *Component) UpdateTokenKey() error {\n\tif c.TokenKeyProvider == nil {\n\t\treturn errors.NewErrInternal(\"No public key provider configured for token validation\")\n\t}\n\n\t\/\/ Set up Auth Server Token Validation\n\terr := c.TokenKeyProvider.Update()\n\tif err != nil {\n\t\tc.Ctx.Warnf(\"ttn: Failed to refresh public keys for token validation: %s\", err.Error())\n\t} else {\n\t\tc.Ctx.Info(\"ttn: Got public keys for token validation\")\n\t}\n\n\treturn nil\n}\n\nfunc (c *Component) initKeyPair() error {\n\tpriv, err := security.LoadKeypair(c.Config.KeyDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.privateKey = priv\n\n\tpubPEM, _ := security.PublicPEM(priv)\n\tc.Identity.PublicKey = string(pubPEM)\n\n\treturn nil\n}\n\nfunc (c *Component) initTLS() error {\n\tcert, err := security.LoadCert(c.Config.KeyDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.Identity.Certificate = string(cert)\n\n\tprivPEM, _ := security.PrivatePEM(c.privateKey)\n\tcer, err := tls.X509KeyPair(cert, privPEM)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.tlsConfig = &tls.Config{Certificates: []tls.Certificate{cer}}\n\treturn nil\n}\n\nfunc (c *Component) initRoots() error {\n\tpath := filepath.Clean(c.Config.KeyDir + \"\/ca.cert\")\n\tcert, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tif !api.RootCAs.AppendCertsFromPEM(cert) {\n\t\treturn fmt.Errorf(\"Could not add root certificates from %s\", path)\n\t}\n\treturn nil\n}\n\n\/\/ BuildJWT builds a short-lived JSON Web Token for this component\nfunc (c *Component) BuildJWT() (string, error) {\n\tif c.privateKey != nil {\n\t\tprivPEM, err := security.PrivatePEM(c.privateKey)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn security.BuildJWT(c.Identity.Id, 20*time.Second, privPEM)\n\t}\n\treturn \"\", nil\n}\n\n\/\/ GetContext returns a context for outgoing RPC request. If token is \"\", this function will generate a short lived token from the component\nfunc (c *Component) GetContext(token string) context.Context {\n\tvar serviceName, serviceVersion, id, netAddress string\n\tif c.Identity != nil {\n\t\tserviceName = c.Identity.ServiceName\n\t\tid = c.Identity.Id\n\t\tif token == \"\" {\n\t\t\ttoken, _ = c.BuildJWT()\n\t\t}\n\t\tserviceVersion = c.Identity.ServiceVersion\n\t\tnetAddress = c.Identity.NetAddress\n\t}\n\tmd := metadata.Pairs(\n\t\t\"service-name\", serviceName,\n\t\t\"service-version\", serviceVersion,\n\t\t\"id\", id,\n\t\t\"token\", token,\n\t\t\"net-address\", netAddress,\n\t)\n\tctx := metadata.NewContext(context.Background(), md)\n\treturn ctx\n}\n\n\/\/ ExchangeAppKeyForToken enables authentication with the App Access Key\nfunc (c *Component) ExchangeAppKeyForToken(appID, key string) (string, error) {\n\tissuerID := keys.KeyIssuer(key)\n\tif issuerID == \"\" {\n\t\t\/\/ Take the first configured auth server\n\t\tfor k := range c.Config.AuthServers {\n\t\t\tissuerID = k\n\t\t\tbreak\n\t\t}\n\t\tkey = fmt.Sprintf(\"%s.%s\", issuerID, key)\n\t}\n\tissuer, ok := c.Config.AuthServers[issuerID]\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"Auth server %s not registered\", issuer)\n\t}\n\n\tsrv, _ := parseAuthServer(issuer)\n\n\toauth := oauth.OAuth(srv.url, &oauth.Client{\n\t\tID: srv.username,\n\t\tSecret: srv.password,\n\t})\n\n\ttoken, err := oauth.ExchangeAppKeyForToken(appID, key)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn token.AccessToken, nil\n}\n\n\/\/ ValidateNetworkContext validates the context of a network request (router-broker, broker-handler, etc)\nfunc (c *Component) ValidateNetworkContext(ctx context.Context) (component *pb_discovery.Announcement, err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}()\n\n\tmd, ok := metadata.FromContext(ctx)\n\tif !ok {\n\t\terr = errors.NewErrInternal(\"Could not get metadata from context\")\n\t\treturn\n\t}\n\tvar id, serviceName, token string\n\tif ids, ok := md[\"id\"]; ok && len(ids) == 1 {\n\t\tid = ids[0]\n\t}\n\tif id == \"\" {\n\t\terr = errors.NewErrInvalidArgument(\"Metadata\", \"id missing\")\n\t\treturn\n\t}\n\tif serviceNames, ok := md[\"service-name\"]; ok && len(serviceNames) == 1 {\n\t\tserviceName = serviceNames[0]\n\t}\n\tif serviceName == \"\" {\n\t\terr = errors.NewErrInvalidArgument(\"Metadata\", \"service-name missing\")\n\t\treturn\n\t}\n\tif tokens, ok := md[\"token\"]; ok && len(tokens) == 1 {\n\t\ttoken = tokens[0]\n\t}\n\n\tvar announcement *pb_discovery.Announcement\n\tannouncement, err = c.Discover(serviceName, id)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif announcement.PublicKey == \"\" {\n\t\treturn announcement, nil\n\t}\n\n\tif token == \"\" {\n\t\terr = errors.NewErrInvalidArgument(\"Metadata\", \"token missing\")\n\t\treturn\n\t}\n\n\tvar claims *jwt.StandardClaims\n\tclaims, err = security.ValidateJWT(token, []byte(announcement.PublicKey))\n\tif err != nil {\n\t\treturn\n\t}\n\tif claims.Issuer != id {\n\t\terr = errors.NewErrInvalidArgument(\"Metadata\", \"token was issued by different component id\")\n\t\treturn\n\t}\n\n\treturn announcement, nil\n}\n\n\/\/ ValidateTTNAuthContext gets a token from the context and validates it\nfunc (c *Component) ValidateTTNAuthContext(ctx context.Context) (*claims.Claims, error) {\n\ttoken, err := api.TokenFromContext(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif c.TokenKeyProvider == nil {\n\t\treturn nil, errors.NewErrInternal(\"No token provider configured\")\n\t}\n\n\tclaims, err := claims.FromToken(c.TokenKeyProvider, token)\n\tif err != nil {\n\t\treturn nil, errors.NewErrPermissionDenied(err.Error())\n\t}\n\n\treturn claims, nil\n}\n<commit_msg>Fix shadowing of Auth Server ID and URL<commit_after>package component\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/TheThingsNetwork\/go-account-lib\/cache\"\n\t\"github.com\/TheThingsNetwork\/go-account-lib\/claims\"\n\t\"github.com\/TheThingsNetwork\/go-account-lib\/keys\"\n\t\"github.com\/TheThingsNetwork\/go-account-lib\/oauth\"\n\t\"github.com\/TheThingsNetwork\/go-account-lib\/tokenkey\"\n\t\"github.com\/TheThingsNetwork\/ttn\/api\"\n\tpb_discovery \"github.com\/TheThingsNetwork\/ttn\/api\/discovery\"\n\t\"github.com\/TheThingsNetwork\/ttn\/utils\/errors\"\n\t\"github.com\/TheThingsNetwork\/ttn\/utils\/security\"\n\tjwt \"github.com\/dgrijalva\/jwt-go\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\/metadata\"\n)\n\n\/\/ InitAuth initializes Auth functionality\nfunc (c *Component) InitAuth() error {\n\tinits := []func() error{\n\t\tc.initAuthServers,\n\t\tc.initKeyPair,\n\t\tc.initRoots,\n\t}\n\tif c.Config.UseTLS {\n\t\tinits = append(inits, c.initTLS)\n\t}\n\n\tfor _, init := range inits {\n\t\tif err := init(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\ntype authServer struct {\n\turl string\n\tusername string\n\tpassword string\n}\n\nfunc parseAuthServer(str string) (srv authServer, err error) {\n\turl, err := url.Parse(str)\n\tif err != nil {\n\t\treturn srv, err\n\t}\n\tsrv.url = fmt.Sprintf(\"%s:\/\/%s\", url.Scheme, url.Host)\n\tif url.User != nil {\n\t\tsrv.username = url.User.Username()\n\t\tsrv.password, _ = url.User.Password()\n\t}\n\treturn srv, nil\n}\n\nfunc (c *Component) initAuthServers() error {\n\turlMap := make(map[string]string)\n\tfuncMap := make(map[string]tokenkey.TokenFunc)\n\tvar httpProvider tokenkey.Provider\n\tfor id, url := range c.Config.AuthServers {\n\t\tid, url := id, url \/\/ deliberately shadow these\n\t\tif strings.HasPrefix(url, \"file:\/\/\") {\n\t\t\tfile := strings.TrimPrefix(url, \"file:\/\/\")\n\t\t\tcontents, err := ioutil.ReadFile(path.Clean(file))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfuncMap[id] = func(renew bool) (*tokenkey.TokenKey, error) {\n\t\t\t\treturn &tokenkey.TokenKey{Algorithm: \"ES256\", Key: string(contents)}, nil\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tsrv, err := parseAuthServer(url)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\turlMap[id] = srv.url\n\t\tfuncMap[id] = func(renew bool) (*tokenkey.TokenKey, error) {\n\t\t\treturn httpProvider.Get(id, renew)\n\t\t}\n\t}\n\thttpProvider = tokenkey.HTTPProvider(\n\t\turlMap,\n\t\tcache.WriteTroughCacheWithFormat(c.Config.KeyDir, \"auth-%s.pub\"),\n\t)\n\tc.TokenKeyProvider = tokenkey.FuncProvider(funcMap)\n\treturn nil\n}\n\n\/\/ UpdateTokenKey updates the OAuth Bearer token key\nfunc (c *Component) UpdateTokenKey() error {\n\tif c.TokenKeyProvider == nil {\n\t\treturn errors.NewErrInternal(\"No public key provider configured for token validation\")\n\t}\n\n\t\/\/ Set up Auth Server Token Validation\n\terr := c.TokenKeyProvider.Update()\n\tif err != nil {\n\t\tc.Ctx.Warnf(\"ttn: Failed to refresh public keys for token validation: %s\", err.Error())\n\t} else {\n\t\tc.Ctx.Info(\"ttn: Got public keys for token validation\")\n\t}\n\n\treturn nil\n}\n\nfunc (c *Component) initKeyPair() error {\n\tpriv, err := security.LoadKeypair(c.Config.KeyDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.privateKey = priv\n\n\tpubPEM, _ := security.PublicPEM(priv)\n\tc.Identity.PublicKey = string(pubPEM)\n\n\treturn nil\n}\n\nfunc (c *Component) initTLS() error {\n\tcert, err := security.LoadCert(c.Config.KeyDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.Identity.Certificate = string(cert)\n\n\tprivPEM, _ := security.PrivatePEM(c.privateKey)\n\tcer, err := tls.X509KeyPair(cert, privPEM)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.tlsConfig = &tls.Config{Certificates: []tls.Certificate{cer}}\n\treturn nil\n}\n\nfunc (c *Component) initRoots() error {\n\tpath := filepath.Clean(c.Config.KeyDir + \"\/ca.cert\")\n\tcert, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tif !api.RootCAs.AppendCertsFromPEM(cert) {\n\t\treturn fmt.Errorf(\"Could not add root certificates from %s\", path)\n\t}\n\treturn nil\n}\n\n\/\/ BuildJWT builds a short-lived JSON Web Token for this component\nfunc (c *Component) BuildJWT() (string, error) {\n\tif c.privateKey != nil {\n\t\tprivPEM, err := security.PrivatePEM(c.privateKey)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn security.BuildJWT(c.Identity.Id, 20*time.Second, privPEM)\n\t}\n\treturn \"\", nil\n}\n\n\/\/ GetContext returns a context for outgoing RPC request. If token is \"\", this function will generate a short lived token from the component\nfunc (c *Component) GetContext(token string) context.Context {\n\tvar serviceName, serviceVersion, id, netAddress string\n\tif c.Identity != nil {\n\t\tserviceName = c.Identity.ServiceName\n\t\tid = c.Identity.Id\n\t\tif token == \"\" {\n\t\t\ttoken, _ = c.BuildJWT()\n\t\t}\n\t\tserviceVersion = c.Identity.ServiceVersion\n\t\tnetAddress = c.Identity.NetAddress\n\t}\n\tmd := metadata.Pairs(\n\t\t\"service-name\", serviceName,\n\t\t\"service-version\", serviceVersion,\n\t\t\"id\", id,\n\t\t\"token\", token,\n\t\t\"net-address\", netAddress,\n\t)\n\tctx := metadata.NewContext(context.Background(), md)\n\treturn ctx\n}\n\n\/\/ ExchangeAppKeyForToken enables authentication with the App Access Key\nfunc (c *Component) ExchangeAppKeyForToken(appID, key string) (string, error) {\n\tissuerID := keys.KeyIssuer(key)\n\tif issuerID == \"\" {\n\t\t\/\/ Take the first configured auth server\n\t\tfor k := range c.Config.AuthServers {\n\t\t\tissuerID = k\n\t\t\tbreak\n\t\t}\n\t\tkey = fmt.Sprintf(\"%s.%s\", issuerID, key)\n\t}\n\tissuer, ok := c.Config.AuthServers[issuerID]\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"Auth server %s not registered\", issuer)\n\t}\n\n\tsrv, _ := parseAuthServer(issuer)\n\n\toauth := oauth.OAuth(srv.url, &oauth.Client{\n\t\tID: srv.username,\n\t\tSecret: srv.password,\n\t})\n\n\ttoken, err := oauth.ExchangeAppKeyForToken(appID, key)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn token.AccessToken, nil\n}\n\n\/\/ ValidateNetworkContext validates the context of a network request (router-broker, broker-handler, etc)\nfunc (c *Component) ValidateNetworkContext(ctx context.Context) (component *pb_discovery.Announcement, err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}()\n\n\tmd, ok := metadata.FromContext(ctx)\n\tif !ok {\n\t\terr = errors.NewErrInternal(\"Could not get metadata from context\")\n\t\treturn\n\t}\n\tvar id, serviceName, token string\n\tif ids, ok := md[\"id\"]; ok && len(ids) == 1 {\n\t\tid = ids[0]\n\t}\n\tif id == \"\" {\n\t\terr = errors.NewErrInvalidArgument(\"Metadata\", \"id missing\")\n\t\treturn\n\t}\n\tif serviceNames, ok := md[\"service-name\"]; ok && len(serviceNames) == 1 {\n\t\tserviceName = serviceNames[0]\n\t}\n\tif serviceName == \"\" {\n\t\terr = errors.NewErrInvalidArgument(\"Metadata\", \"service-name missing\")\n\t\treturn\n\t}\n\tif tokens, ok := md[\"token\"]; ok && len(tokens) == 1 {\n\t\ttoken = tokens[0]\n\t}\n\n\tvar announcement *pb_discovery.Announcement\n\tannouncement, err = c.Discover(serviceName, id)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif announcement.PublicKey == \"\" {\n\t\treturn announcement, nil\n\t}\n\n\tif token == \"\" {\n\t\terr = errors.NewErrInvalidArgument(\"Metadata\", \"token missing\")\n\t\treturn\n\t}\n\n\tvar claims *jwt.StandardClaims\n\tclaims, err = security.ValidateJWT(token, []byte(announcement.PublicKey))\n\tif err != nil {\n\t\treturn\n\t}\n\tif claims.Issuer != id {\n\t\terr = errors.NewErrInvalidArgument(\"Metadata\", \"token was issued by different component id\")\n\t\treturn\n\t}\n\n\treturn announcement, nil\n}\n\n\/\/ ValidateTTNAuthContext gets a token from the context and validates it\nfunc (c *Component) ValidateTTNAuthContext(ctx context.Context) (*claims.Claims, error) {\n\ttoken, err := api.TokenFromContext(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif c.TokenKeyProvider == nil {\n\t\treturn nil, errors.NewErrInternal(\"No token provider configured\")\n\t}\n\n\tclaims, err := claims.FromToken(c.TokenKeyProvider, token)\n\tif err != nil {\n\t\treturn nil, errors.NewErrPermissionDenied(err.Error())\n\t}\n\n\treturn claims, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package qf\n\nimport (\n\tcontext \"context\"\n\tfmt \"fmt\"\n\tnet \"net\"\n\tsync \"sync\"\n\t\"testing\"\n\ttime \"time\"\n\n\tgrpc \"google.golang.org\/grpc\"\n)\n\nconst requestValue = 0x1001_1001\n\ntype testQSpec struct {\n\tquorum int\n}\n\nfunc (q testQSpec) UseReqQF(in *Request, replies []*Response) (*Response, bool) {\n\tif len(replies) < q.quorum {\n\t\treturn nil, false\n\t}\n\texpected := in.GetValue()\n\tfor _, reply := range replies {\n\t\tif expected != reply.GetResult() {\n\t\t\treturn reply, true\n\t\t}\n\t}\n\treturn replies[0], true\n}\n\nfunc (q testQSpec) IgnoreReqQF(_ *Request, replies []*Response) (*Response, bool) {\n\tif len(replies) < q.quorum {\n\t\treturn nil, false\n\t}\n\texpected := int64(requestValue)\n\tfor _, reply := range replies {\n\t\tif expected != reply.GetResult() {\n\t\t\treturn reply, true\n\t\t}\n\t}\n\treturn replies[0], true\n}\n\nfunc (q testQSpec) WithoutReqQF(replies []*Response) (*Response, bool) {\n\tif len(replies) < q.quorum {\n\t\treturn nil, false\n\t}\n\texpected := int64(requestValue)\n\tfor _, reply := range replies {\n\t\tif expected != reply.GetResult() {\n\t\t\treturn reply, true\n\t\t}\n\t}\n\treturn replies[0], true\n}\n\nfunc BenchmarkQF(b *testing.B) {\n\tfor n := 3; n < 20; n += 2 {\n\t\tquorum := n \/ 2\n\t\tqspec := &testQSpec{quorum: quorum}\n\t\trequest := &Request{Value: 1}\n\n\t\tb.Run(fmt.Sprintf(\"UseReq_%d\", n), func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\treplies := make([]*Response, 0, n)\n\t\t\t\tfor j := 0; j < n; j++ {\n\t\t\t\t\treplies = append(replies, &Response{Result: request.Value})\n\t\t\t\t\tresp, q := qspec.UseReqQF(request, replies)\n\t\t\t\t\tif q {\n\t\t\t\t\t\t_ = resp.GetResult()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tb.Run(fmt.Sprintf(\"IgnoreReq_%d\", n), func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\treplies := make([]*Response, 0, n)\n\t\t\t\tfor j := 0; j < n; j++ {\n\t\t\t\t\treplies = append(replies, &Response{Result: request.Value})\n\t\t\t\t\tresp, q := qspec.IgnoreReqQF(request, replies)\n\t\t\t\t\tif q {\n\t\t\t\t\t\t_ = resp.GetResult()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tb.Run(fmt.Sprintf(\"WithoutReq_%d\", n), func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\treplies := make([]*Response, 0, n)\n\t\t\t\tfor j := 0; j < n; j++ {\n\t\t\t\t\treplies = append(replies, &Response{Result: request.Value})\n\t\t\t\t\tresp, q := qspec.WithoutReqQF(replies)\n\t\t\t\t\tif q {\n\t\t\t\t\t\t_ = resp.GetResult()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n\/\/ TODO(meling) consider making these things generally available, at least for testing, perhaps putting some of these things in an internal package.\n\ntype portSupplier struct {\n\tp int\n\tsync.Mutex\n}\n\nfunc (p *portSupplier) get() int {\n\tp.Lock()\n\t_p := p.p\n\tp.p++\n\tp.Unlock()\n\treturn _p\n}\n\nvar supplier = portSupplier{p: 22332}\n\nfunc getListener() (net.Listener, error) {\n\treturn net.Listen(\"tcp\", fmt.Sprintf(\":%d\", supplier.get()))\n}\n\ntype testSrv struct {\n\tdummy int64\n}\n\nfunc (s testSrv) UseReq(_ context.Context, req *Request) (*Response, error) {\n\treturn &Response{Result: req.GetValue()}, nil\n}\n\nfunc (s testSrv) IgnoreReq(_ context.Context, req *Request) (*Response, error) {\n\treturn &Response{Result: req.GetValue()}, nil\n}\n\nfunc (s testSrv) WithoutReq(_ context.Context, req *Request) (*Response, error) {\n\treturn &Response{Result: req.GetValue()}, nil\n}\n\nfunc setup(b *testing.B, numServers int) ([]*grpc.Server, *Manager, *Configuration) {\n\tquorum := numServers \/ 2\n\tservers := make([]*grpc.Server, numServers)\n\taddrs := make([]string, numServers)\n\tfor i := 0; i < numServers; i++ {\n\t\tsrv := grpc.NewServer()\n\t\tRegisterQuorumFunctionServer(srv, &testSrv{})\n\t\tlis, err := getListener()\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"Failed to listen on port: %v\", err)\n\t\t}\n\t\taddrs[i] = lis.Addr().String()\n\t\tservers[i] = srv\n\t\tgo srv.Serve(lis)\n\t}\n\n\t\/\/ client setup\n\tman, err := NewManager(addrs,\n\t\tWithGrpcDialOptions(grpc.WithInsecure()),\n\t\tWithDialTimeout(10*time.Second),\n\t)\n\tif err != nil {\n\t\tb.Fatalf(\"Failed to create manager: %v\", err)\n\t}\n\tc, err := man.NewConfiguration(man.NodeIDs(), &testQSpec{quorum: quorum})\n\tif err != nil {\n\t\tb.Fatalf(\"Failed to create configuration: %v\", err)\n\t}\n\treturn servers, man, c\n}\n\nfunc BenchmarkUseReq(b *testing.B) {\n\tservers, man, c := setup(b, 5)\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\t\/\/ begin benchmarking\n\tfor i := 0; i < b.N; i++ {\n\t\tresp, err := c.UseReq(context.Background(), &Request{Value: int64(requestValue)})\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"UseReq error: %v\", err)\n\t\t}\n\t\t_ = resp.GetResult()\n\t}\n\n\tman.Close()\n\tfor _, srv := range servers {\n\t\tsrv.Stop()\n\t}\n}\n\nfunc BenchmarkIgnoreReq(b *testing.B) {\n\tservers, man, c := setup(b, 5)\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\t\/\/ begin benchmarking\n\tfor i := 0; i < b.N; i++ {\n\t\tresp, err := c.IgnoreReq(context.Background(), &Request{Value: int64(requestValue)})\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"IgnoreReq error: %v\", err)\n\t\t}\n\t\t_ = resp.GetResult()\n\t}\n\n\tman.Close()\n\tfor _, srv := range servers {\n\t\tsrv.Stop()\n\t}\n}\n\nfunc BenchmarkWithoutReq(b *testing.B) {\n\tservers, man, c := setup(b, 5)\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\t\/\/ begin benchmarking\n\tfor i := 0; i < b.N; i++ {\n\t\tresp, err := c.WithoutReq(context.Background(), &Request{Value: int64(requestValue)})\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"WithoutReq error: %v\", err)\n\t\t}\n\t\t_ = resp.GetResult()\n\t}\n\n\tman.Close()\n\tfor _, srv := range servers {\n\t\tsrv.Stop()\n\t}\n}\n<commit_msg>removed useless import names added by code editor, or something<commit_after>package qf\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"google.golang.org\/grpc\"\n)\n\nconst requestValue = 0x1001_1001\n\ntype testQSpec struct {\n\tquorum int\n}\n\nfunc (q testQSpec) UseReqQF(in *Request, replies []*Response) (*Response, bool) {\n\tif len(replies) < q.quorum {\n\t\treturn nil, false\n\t}\n\texpected := in.GetValue()\n\tfor _, reply := range replies {\n\t\tif expected != reply.GetResult() {\n\t\t\treturn reply, true\n\t\t}\n\t}\n\treturn replies[0], true\n}\n\nfunc (q testQSpec) IgnoreReqQF(_ *Request, replies []*Response) (*Response, bool) {\n\tif len(replies) < q.quorum {\n\t\treturn nil, false\n\t}\n\texpected := int64(requestValue)\n\tfor _, reply := range replies {\n\t\tif expected != reply.GetResult() {\n\t\t\treturn reply, true\n\t\t}\n\t}\n\treturn replies[0], true\n}\n\nfunc (q testQSpec) WithoutReqQF(replies []*Response) (*Response, bool) {\n\tif len(replies) < q.quorum {\n\t\treturn nil, false\n\t}\n\texpected := int64(requestValue)\n\tfor _, reply := range replies {\n\t\tif expected != reply.GetResult() {\n\t\t\treturn reply, true\n\t\t}\n\t}\n\treturn replies[0], true\n}\n\nfunc BenchmarkQF(b *testing.B) {\n\tfor n := 3; n < 20; n += 2 {\n\t\tquorum := n \/ 2\n\t\tqspec := &testQSpec{quorum: quorum}\n\t\trequest := &Request{Value: 1}\n\n\t\tb.Run(fmt.Sprintf(\"UseReq_%d\", n), func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\treplies := make([]*Response, 0, n)\n\t\t\t\tfor j := 0; j < n; j++ {\n\t\t\t\t\treplies = append(replies, &Response{Result: request.Value})\n\t\t\t\t\tresp, q := qspec.UseReqQF(request, replies)\n\t\t\t\t\tif q {\n\t\t\t\t\t\t_ = resp.GetResult()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tb.Run(fmt.Sprintf(\"IgnoreReq_%d\", n), func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\treplies := make([]*Response, 0, n)\n\t\t\t\tfor j := 0; j < n; j++ {\n\t\t\t\t\treplies = append(replies, &Response{Result: request.Value})\n\t\t\t\t\tresp, q := qspec.IgnoreReqQF(request, replies)\n\t\t\t\t\tif q {\n\t\t\t\t\t\t_ = resp.GetResult()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tb.Run(fmt.Sprintf(\"WithoutReq_%d\", n), func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\treplies := make([]*Response, 0, n)\n\t\t\t\tfor j := 0; j < n; j++ {\n\t\t\t\t\treplies = append(replies, &Response{Result: request.Value})\n\t\t\t\t\tresp, q := qspec.WithoutReqQF(replies)\n\t\t\t\t\tif q {\n\t\t\t\t\t\t_ = resp.GetResult()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n\/\/ TODO(meling) consider making these things generally available, at least for testing, perhaps putting some of these things in an internal package.\n\ntype portSupplier struct {\n\tp int\n\tsync.Mutex\n}\n\nfunc (p *portSupplier) get() int {\n\tp.Lock()\n\t_p := p.p\n\tp.p++\n\tp.Unlock()\n\treturn _p\n}\n\nvar supplier = portSupplier{p: 22332}\n\nfunc getListener() (net.Listener, error) {\n\treturn net.Listen(\"tcp\", fmt.Sprintf(\":%d\", supplier.get()))\n}\n\ntype testSrv struct {\n\tdummy int64\n}\n\nfunc (s testSrv) UseReq(_ context.Context, req *Request) (*Response, error) {\n\treturn &Response{Result: req.GetValue()}, nil\n}\n\nfunc (s testSrv) IgnoreReq(_ context.Context, req *Request) (*Response, error) {\n\treturn &Response{Result: req.GetValue()}, nil\n}\n\nfunc (s testSrv) WithoutReq(_ context.Context, req *Request) (*Response, error) {\n\treturn &Response{Result: req.GetValue()}, nil\n}\n\nfunc setup(b *testing.B, numServers int) ([]*grpc.Server, *Manager, *Configuration) {\n\tquorum := numServers \/ 2\n\tservers := make([]*grpc.Server, numServers)\n\taddrs := make([]string, numServers)\n\tfor i := 0; i < numServers; i++ {\n\t\tsrv := grpc.NewServer()\n\t\tRegisterQuorumFunctionServer(srv, &testSrv{})\n\t\tlis, err := getListener()\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"Failed to listen on port: %v\", err)\n\t\t}\n\t\taddrs[i] = lis.Addr().String()\n\t\tservers[i] = srv\n\t\tgo srv.Serve(lis)\n\t}\n\n\t\/\/ client setup\n\tman, err := NewManager(addrs,\n\t\tWithGrpcDialOptions(grpc.WithInsecure()),\n\t\tWithDialTimeout(10*time.Second),\n\t)\n\tif err != nil {\n\t\tb.Fatalf(\"Failed to create manager: %v\", err)\n\t}\n\tc, err := man.NewConfiguration(man.NodeIDs(), &testQSpec{quorum: quorum})\n\tif err != nil {\n\t\tb.Fatalf(\"Failed to create configuration: %v\", err)\n\t}\n\treturn servers, man, c\n}\n\nfunc BenchmarkUseReq(b *testing.B) {\n\tservers, man, c := setup(b, 5)\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\t\/\/ begin benchmarking\n\tfor i := 0; i < b.N; i++ {\n\t\tresp, err := c.UseReq(context.Background(), &Request{Value: int64(requestValue)})\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"UseReq error: %v\", err)\n\t\t}\n\t\t_ = resp.GetResult()\n\t}\n\n\tman.Close()\n\tfor _, srv := range servers {\n\t\tsrv.Stop()\n\t}\n}\n\nfunc BenchmarkIgnoreReq(b *testing.B) {\n\tservers, man, c := setup(b, 5)\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\t\/\/ begin benchmarking\n\tfor i := 0; i < b.N; i++ {\n\t\tresp, err := c.IgnoreReq(context.Background(), &Request{Value: int64(requestValue)})\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"IgnoreReq error: %v\", err)\n\t\t}\n\t\t_ = resp.GetResult()\n\t}\n\n\tman.Close()\n\tfor _, srv := range servers {\n\t\tsrv.Stop()\n\t}\n}\n\nfunc BenchmarkWithoutReq(b *testing.B) {\n\tservers, man, c := setup(b, 5)\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\t\/\/ begin benchmarking\n\tfor i := 0; i < b.N; i++ {\n\t\tresp, err := c.WithoutReq(context.Background(), &Request{Value: int64(requestValue)})\n\t\tif err != nil {\n\t\t\tb.Fatalf(\"WithoutReq error: %v\", err)\n\t\t}\n\t\t_ = resp.GetResult()\n\t}\n\n\tman.Close()\n\tfor _, srv := range servers {\n\t\tsrv.Stop()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package collectors\n\nimport (\n\t\"github.com\/StackExchange\/scollector\/metadata\"\n\t\"github.com\/StackExchange\/scollector\/opentsdb\"\n\t\"github.com\/StackExchange\/wmi\"\n)\n\nfunc init() {\n\tcollectors = append(collectors, &IntervalCollector{F: c_physical_disk_windows})\n\tcollectors = append(collectors, &IntervalCollector{F: c_diskspace_windows})\n}\n\nfunc c_diskspace_windows() (opentsdb.MultiDataPoint, error) {\n\tconst megabyte = 1048576\n\tvar dst []Win32_PerfFormattedData_PerfDisk_LogicalDisk\n\tvar q = wmi.CreateQuery(&dst, `WHERE Name <> '_Total'`)\n\terr := queryWmi(q, &dst)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar md opentsdb.MultiDataPoint\n\tfor _, v := range dst {\n\t\tAdd(&md, \"win.disk.fs.space_free\", v.FreeMegabytes*megabyte, opentsdb.TagSet{\"partition\": v.Name}, metadata.Gauge, metadata.Bytes, \"\")\n\t\tAdd(&md, osDiskFree, v.FreeMegabytes*megabyte, opentsdb.TagSet{\"disk\": v.Name}, metadata.Gauge, metadata.Bytes, \"\")\n\t\tif v.PercentFreeSpace != 0 {\n\t\t\tspace_total := v.FreeMegabytes * megabyte * 100 \/ v.PercentFreeSpace\n\t\t\tspace_used := space_total - v.FreeMegabytes*megabyte\n\t\t\tAdd(&md, \"win.disk.fs.space_total\", space_total, opentsdb.TagSet{\"partition\": v.Name}, metadata.Gauge, metadata.Bytes, \"\")\n\t\t\tAdd(&md, \"win.disk.fs.space_used\", space_used, opentsdb.TagSet{\"partition\": v.Name}, metadata.Gauge, metadata.Bytes, \"\")\n\t\t\tAdd(&md, osDiskTotal, space_total, opentsdb.TagSet{\"disk\": v.Name}, metadata.Gauge, metadata.Bytes, \"\")\n\t\t\tAdd(&md, osDiskUsed, space_used, opentsdb.TagSet{\"disk\": v.Name}, metadata.Gauge, metadata.Bytes, \"\")\n\t\t}\n\n\t\tAdd(&md, \"win.disk.fs.percent_free\", v.PercentFreeSpace, opentsdb.TagSet{\"partition\": v.Name}, metadata.Gauge, metadata.Pct, \"\")\n\t\tAdd(&md, osDiskPctFree, v.PercentFreeSpace, opentsdb.TagSet{\"disk\": v.Name}, metadata.Gauge, metadata.Pct, \"\")\n\t}\n\treturn md, nil\n}\n\ntype Win32_PerfFormattedData_PerfDisk_LogicalDisk struct {\n\tFreeMegabytes uint64\n\tName string\n\tPercentFreeSpace uint64\n}\n\nfunc c_physical_disk_windows() (opentsdb.MultiDataPoint, error) {\n\tvar dst []Win32_PerfRawData_PerfDisk_PhysicalDisk\n\tvar q = wmi.CreateQuery(&dst, `WHERE Name <> '_Total'`)\n\terr := queryWmi(q, &dst)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar md opentsdb.MultiDataPoint\n\tfor _, v := range dst {\n\t\tAdd(&md, \"win.disk.duration\", v.AvgDiskSecPerRead\/1000000, opentsdb.TagSet{\"disk\": v.Name, \"type\": \"read\"}, metadata.Counter, metadata.MilliSecond, \"Time, in milliseconds, of a read from the disk.\")\n\t\tAdd(&md, \"win.disk.duration\", v.AvgDiskSecPerWrite\/1000000, opentsdb.TagSet{\"disk\": v.Name, \"type\": \"write\"}, metadata.Counter, metadata.MilliSecond, \"Time, in milliseconds, of a write to the disk.\")\n\t\tAdd(&md, \"win.disk.queue\", v.AvgDiskReadQueueLength\/10000000, opentsdb.TagSet{\"disk\": v.Name, \"type\": \"read\"}, metadata.Counter, metadata.Operation, \"Number of read requests that were queued for the disk.\")\n\t\tAdd(&md, \"win.disk.queue\", v.AvgDiskWriteQueueLength\/10000000, opentsdb.TagSet{\"disk\": v.Name, \"type\": \"write\"}, metadata.Counter, metadata.Operation, \"Number of write requests that were queued for the disk.\")\n\t\tAdd(&md, \"win.disk.ops\", v.DiskReadsPerSec, opentsdb.TagSet{\"disk\": v.Name, \"type\": \"read\"}, metadata.Counter, metadata.PerSecond, \"Number of read operations on the disk.\")\n\t\tAdd(&md, \"win.disk.ops\", v.DiskWritesPerSec, opentsdb.TagSet{\"disk\": v.Name, \"type\": \"write\"}, metadata.Counter, metadata.PerSecond, \"Number of write operations on the disk.\")\n\t\tAdd(&md, \"win.disk.bytes\", v.DiskReadBytesPerSec, opentsdb.TagSet{\"disk\": v.Name, \"type\": \"read\"}, metadata.Counter, metadata.BytesPerSecond, \"Number of bytes read from the disk.\")\n\t\tAdd(&md, \"win.disk.bytes\", v.DiskWriteBytesPerSec, opentsdb.TagSet{\"disk\": v.Name, \"type\": \"write\"}, metadata.Counter, metadata.BytesPerSecond, \"Number of bytes written to the disk.\")\n\t\tAdd(&md, \"win.disk.percent_time\", v.PercentDiskReadTime\/100000, opentsdb.TagSet{\"disk\": v.Name, \"type\": \"read\"}, metadata.Counter, metadata.Pct, \"Percentage of time that the disk was busy servicing read requests.\")\n\t\tAdd(&md, \"win.disk.percent_time\", v.PercentDiskWriteTime\/100000, opentsdb.TagSet{\"disk\": v.Name, \"type\": \"write\"}, metadata.Counter, metadata.Pct, \"Percentage of time that the disk was busy servicing write requests.\")\n\t\tAdd(&md, \"win.disk.spltio\", v.SplitIOPerSec, opentsdb.TagSet{\"disk\": v.Name}, metadata.Counter, metadata.PerSecond, \"Number of requests to the disk that were split into multiple requests due to size or fragmentation.\")\n\t}\n\treturn md, nil\n}\n\n\/\/See msdn for counter types http:\/\/msdn.microsoft.com\/en-us\/library\/ms804035.aspx\ntype Win32_PerfRawData_PerfDisk_PhysicalDisk struct {\n\tAvgDiskReadQueueLength uint64\n\tAvgDiskSecPerRead uint32\n\tAvgDiskSecPerWrite uint32\n\tAvgDiskWriteQueueLength uint64\n\tDiskReadBytesPerSec uint64\n\tDiskReadsPerSec uint32\n\tDiskWriteBytesPerSec uint64\n\tDiskWritesPerSec uint32\n\tName string\n\tPercentDiskReadTime uint64\n\tPercentDiskWriteTime uint64\n\tSplitIOPerSec uint32\n}\n<commit_msg>cmd\/scollector: Move 100nS Sample conversion factors to a constant with documentation<commit_after>package collectors\n\nimport (\n\t\"github.com\/StackExchange\/scollector\/metadata\"\n\t\"github.com\/StackExchange\/scollector\/opentsdb\"\n\t\"github.com\/StackExchange\/wmi\"\n)\n\nfunc init() {\n\tcollectors = append(collectors, &IntervalCollector{F: c_physical_disk_windows})\n\tcollectors = append(collectors, &IntervalCollector{F: c_diskspace_windows})\n}\n\nconst (\n\t\/\/Converts 100nS samples to 1S samples\n\twinDisk100nS_1S = 10000000\n\n\t\/\/Converts 100nS samples to 1mS samples\n\twinDisk100nS_1mS = 1000000\n\n\t\/\/Converts 100nS samples to 0-100 Percent samples\n\twinDisk100nS_Pct = 100000\n)\n\nfunc c_diskspace_windows() (opentsdb.MultiDataPoint, error) {\n\tconst megabyte = 1048576\n\tvar dst []Win32_PerfFormattedData_PerfDisk_LogicalDisk\n\tvar q = wmi.CreateQuery(&dst, `WHERE Name <> '_Total'`)\n\terr := queryWmi(q, &dst)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar md opentsdb.MultiDataPoint\n\tfor _, v := range dst {\n\t\tAdd(&md, \"win.disk.fs.space_free\", v.FreeMegabytes*megabyte, opentsdb.TagSet{\"partition\": v.Name}, metadata.Gauge, metadata.Bytes, \"\")\n\t\tAdd(&md, osDiskFree, v.FreeMegabytes*megabyte, opentsdb.TagSet{\"disk\": v.Name}, metadata.Gauge, metadata.Bytes, \"\")\n\t\tif v.PercentFreeSpace != 0 {\n\t\t\tspace_total := v.FreeMegabytes * megabyte * 100 \/ v.PercentFreeSpace\n\t\t\tspace_used := space_total - v.FreeMegabytes*megabyte\n\t\t\tAdd(&md, \"win.disk.fs.space_total\", space_total, opentsdb.TagSet{\"partition\": v.Name}, metadata.Gauge, metadata.Bytes, \"\")\n\t\t\tAdd(&md, \"win.disk.fs.space_used\", space_used, opentsdb.TagSet{\"partition\": v.Name}, metadata.Gauge, metadata.Bytes, \"\")\n\t\t\tAdd(&md, osDiskTotal, space_total, opentsdb.TagSet{\"disk\": v.Name}, metadata.Gauge, metadata.Bytes, \"\")\n\t\t\tAdd(&md, osDiskUsed, space_used, opentsdb.TagSet{\"disk\": v.Name}, metadata.Gauge, metadata.Bytes, \"\")\n\t\t}\n\n\t\tAdd(&md, \"win.disk.fs.percent_free\", v.PercentFreeSpace, opentsdb.TagSet{\"partition\": v.Name}, metadata.Gauge, metadata.Pct, \"\")\n\t\tAdd(&md, osDiskPctFree, v.PercentFreeSpace, opentsdb.TagSet{\"disk\": v.Name}, metadata.Gauge, metadata.Pct, \"\")\n\t}\n\treturn md, nil\n}\n\ntype Win32_PerfFormattedData_PerfDisk_LogicalDisk struct {\n\tFreeMegabytes uint64\n\tName string\n\tPercentFreeSpace uint64\n}\n\nfunc c_physical_disk_windows() (opentsdb.MultiDataPoint, error) {\n\tvar dst []Win32_PerfRawData_PerfDisk_PhysicalDisk\n\tvar q = wmi.CreateQuery(&dst, `WHERE Name <> '_Total'`)\n\terr := queryWmi(q, &dst)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar md opentsdb.MultiDataPoint\n\tfor _, v := range dst {\n\t\tAdd(&md, \"win.disk.duration\", v.AvgDiskSecPerRead\/winDisk100nS_1mS, opentsdb.TagSet{\"disk\": v.Name, \"type\": \"read\"}, metadata.Counter, metadata.MilliSecond, \"Time, in milliseconds, of a read from the disk.\")\n\t\tAdd(&md, \"win.disk.duration\", v.AvgDiskSecPerWrite\/winDisk100nS_1mS, opentsdb.TagSet{\"disk\": v.Name, \"type\": \"write\"}, metadata.Counter, metadata.MilliSecond, \"Time, in milliseconds, of a write to the disk.\")\n\t\tAdd(&md, \"win.disk.queue\", v.AvgDiskReadQueueLength\/winDisk100nS_1S, opentsdb.TagSet{\"disk\": v.Name, \"type\": \"read\"}, metadata.Counter, metadata.Operation, \"Number of read requests that were queued for the disk.\")\n\t\tAdd(&md, \"win.disk.queue\", v.AvgDiskWriteQueueLength\/winDisk100nS_1S, opentsdb.TagSet{\"disk\": v.Name, \"type\": \"write\"}, metadata.Counter, metadata.Operation, \"Number of write requests that were queued for the disk.\")\n\t\tAdd(&md, \"win.disk.ops\", v.DiskReadsPerSec, opentsdb.TagSet{\"disk\": v.Name, \"type\": \"read\"}, metadata.Counter, metadata.PerSecond, \"Number of read operations on the disk.\")\n\t\tAdd(&md, \"win.disk.ops\", v.DiskWritesPerSec, opentsdb.TagSet{\"disk\": v.Name, \"type\": \"write\"}, metadata.Counter, metadata.PerSecond, \"Number of write operations on the disk.\")\n\t\tAdd(&md, \"win.disk.bytes\", v.DiskReadBytesPerSec, opentsdb.TagSet{\"disk\": v.Name, \"type\": \"read\"}, metadata.Counter, metadata.BytesPerSecond, \"Number of bytes read from the disk.\")\n\t\tAdd(&md, \"win.disk.bytes\", v.DiskWriteBytesPerSec, opentsdb.TagSet{\"disk\": v.Name, \"type\": \"write\"}, metadata.Counter, metadata.BytesPerSecond, \"Number of bytes written to the disk.\")\n\t\tAdd(&md, \"win.disk.percent_time\", v.PercentDiskReadTime\/winDisk100nS_Pct, opentsdb.TagSet{\"disk\": v.Name, \"type\": \"read\"}, metadata.Counter, metadata.Pct, \"Percentage of time that the disk was busy servicing read requests.\")\n\t\tAdd(&md, \"win.disk.percent_time\", v.PercentDiskWriteTime\/winDisk100nS_Pct, opentsdb.TagSet{\"disk\": v.Name, \"type\": \"write\"}, metadata.Counter, metadata.Pct, \"Percentage of time that the disk was busy servicing write requests.\")\n\t\tAdd(&md, \"win.disk.spltio\", v.SplitIOPerSec, opentsdb.TagSet{\"disk\": v.Name}, metadata.Counter, metadata.PerSecond, \"Number of requests to the disk that were split into multiple requests due to size or fragmentation.\")\n\t}\n\treturn md, nil\n}\n\n\/\/See msdn for counter types http:\/\/msdn.microsoft.com\/en-us\/library\/ms804035.aspx\ntype Win32_PerfRawData_PerfDisk_PhysicalDisk struct {\n\tAvgDiskReadQueueLength uint64\n\tAvgDiskSecPerRead uint32\n\tAvgDiskSecPerWrite uint32\n\tAvgDiskWriteQueueLength uint64\n\tDiskReadBytesPerSec uint64\n\tDiskReadsPerSec uint32\n\tDiskWriteBytesPerSec uint64\n\tDiskWritesPerSec uint32\n\tName string\n\tPercentDiskReadTime uint64\n\tPercentDiskWriteTime uint64\n\tSplitIOPerSec uint32\n}\n<|endoftext|>"} {"text":"<commit_before>package compressor\n\nimport (\n\t\"archive\/tar\"\n\t\"compress\/gzip\"\n\t\"io\"\n\t\"io\/ioutil\"\n)\n\ntype TarCompressor struct{}\n\nfunc NewTarCompressor() *TarCompressor {\n\treturn &TarCompressor{}\n}\n\nfunc (c *TarCompressor) Compress(compressedFile io.Writer, targetDir string, files []string) error {\n\n\tgw := gzip.NewWriter(compressedFile)\n\tdefer gw.Close()\n\ttw := tar.NewWriter(gw)\n\n\tfor _, file := range files {\n\t\tbody, _ := ioutil.ReadFile(targetDir + \"\/\" + file)\n\t\thdr := &tar.Header{\n\t\t\tName: file,\n\t\t\tMode: 0600,\n\t\t\tSize: int64(len(body)),\n\t\t}\n\t\tif err := tw.WriteHeader(hdr); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif _, err := tw.Write(body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := tw.Close(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>tarの場合でもディレクトリが格納されるように対応<commit_after>package compressor\n\nimport (\n\t\"archive\/tar\"\n\t\"compress\/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n)\n\ntype TarCompressor struct{}\n\nfunc NewTarCompressor() *TarCompressor {\n\treturn &TarCompressor{}\n}\n\nfunc (c *TarCompressor) Compress(compressedFile io.Writer, targetDir string, files []string) error {\n\n\tgw := gzip.NewWriter(compressedFile)\n\tdefer gw.Close()\n\n\ttw := tar.NewWriter(gw)\n\n\tfor _, filename := range files {\n\t\tfilepath := fmt.Sprintf(\"%s\/%s\", targetDir, filename)\n\n\t\tinfo, err := os.Stat(filepath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif info.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\tfile, err := os.Open(filepath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer file.Close()\n\n\t\thdr, err := tar.FileInfoHeader(info, \"\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\thdr.Name = filename\n\t\tif err := tw.WriteHeader(hdr); err != nil {\n\t\t\tlog.Print(err)\n\t\t\treturn err\n\t\t}\n\n\t\tif _, err = io.Copy(tw, file); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := tw.Close(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package crawler\n\nimport (\n\t\"bytes\"\n\t\"compress\/zlib\"\n\t\"crypto\/md5\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\"\n)\n\n\/\/ CheckDb - check db\nfunc CheckDb() error {\n\tdb := new(DB)\n\terr := db.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer db.Close()\n\n\tisSuccess := true\n\tmaxLen := 0\n\tvar maxLenURL string\n\tminLen := math.MaxUint32\n\tvar minLenURL string\n\thashMap := make(map[[16]byte]string)\n\terr = db.View(func(tx *Tx) error {\n\t\tc := tx.Bucket(DbBucketContents).Cursor()\n\n\t\tvar content DbContent\n\t\tfor url, contentBytes := c.First(); url != nil; url, contentBytes = c.Next() {\n\t\t\t_, err := content.UnmarshalMsg(contentBytes)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Error unmarshal value in db for url %s, message: %s\\n\", url, err)\n\t\t\t\tisSuccess = false\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tr, err := zlib.NewReader(bytes.NewReader(content.Content))\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Error unzip content for url %s, message: %s\\n\", url, err)\n\t\t\t\tisSuccess = false\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcontentOrig, err := ioutil.ReadAll(r)\n\t\t\tr.Close()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Error read unzip content for url %s, message: %s\\n\", url, err)\n\t\t\t\tisSuccess = false\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlenContent := len(contentOrig)\n\t\t\tif lenContent < minLen {\n\t\t\t\tminLen = lenContent\n\t\t\t\tminLenURL = string(url)\n\t\t\t}\n\t\t\tif lenContent > maxLen {\n\t\t\t\tmaxLen = lenContent\n\t\t\t\tmaxLenURL = string(url)\n\t\t\t}\n\n\t\t\thash := md5.Sum(contentOrig)\n\t\t\tif hash != content.Hash {\n\t\t\t\tfmt.Printf(\"Error content hash does not match for url %s\\n\", url)\n\t\t\t\tisSuccess = false\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif val, ok := hashMap[hash]; ok {\n\t\t\t\tfmt.Printf(\"Duplicated pages content:\\n%s\\n%s\\n\\n\", url, val)\n\t\t\t} else {\n\t\t\t\thashMap[hash] = string(url[:])\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif isSuccess {\n\t\tfmt.Printf(\"Min len = %d (%s)\\n\", minLen, minLenURL)\n\t\tfmt.Printf(\"Max len = %d (%s)\\n\", maxLen, maxLenURL)\n\t\tfmt.Println(\"Checking ended successfully\")\n\t}\n\treturn err\n}\n<commit_msg>рефакторинг db_check<commit_after>package crawler\n\nimport (\n\t\"bytes\"\n\t\"compress\/zlib\"\n\t\"crypto\/md5\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\"\n)\n\n\/\/ CheckDb - check db\nfunc CheckDb() error {\n\tdb := new(DB)\n\terr := db.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer db.Close()\n\n\tisSuccess := true\n\tmaxLen := 0\n\tvar maxLenURL string\n\tminLen := math.MaxUint32\n\tvar minLenURL string\n\thashMap := make(map[[16]byte]string)\n\terr = db.View(func(tx *Tx) error {\n\t\tc := tx.Bucket(DbBucketContents).Cursor()\n\n\t\tvar content DbContent\n\t\tfor url, contentBytes := c.First(); url != nil; url, contentBytes = c.Next() {\n\t\t\t_, err := content.UnmarshalMsg(contentBytes)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Error unmarshal value in db for url %s, message: %s\\n\", urlRaw, err)\n\t\t\t\tisSuccess = false\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tr, err := zlib.NewReader(bytes.NewReader(content.Content))\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Error unzip content for url %s, message: %s\\n\", urlRaw, err)\n\t\t\t\tisSuccess = false\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcontentOrig, err := ioutil.ReadAll(r)\n\t\t\tr.Close()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"Error read unzip content for url %s, message: %s\\n\", urlRaw, err)\n\t\t\t\tisSuccess = false\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlenContent := len(contentOrig)\n\t\t\tif lenContent < minLen {\n\t\t\t\tminLen = lenContent\n\t\t\t\tminLenURL = string(urlRaw)\n\t\t\t}\n\t\t\tif lenContent > maxLen {\n\t\t\t\tmaxLen = lenContent\n\t\t\t\tmaxLenURL = string(urlRaw)\n\t\t\t}\n\n\t\t\thash := md5.Sum(contentOrig)\n\t\t\tif hash != content.Hash {\n\t\t\t\tfmt.Printf(\"Error content hash does not match for url %s\\n\", urlRaw)\n\t\t\t\tisSuccess = false\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif val, ok := hashMap[hash]; ok {\n\t\t\t\tfmt.Printf(\"Duplicated pages content:\\n%s\\n%s\\n\\n\", urlRaw, val)\n\t\t\t} else {\n\t\t\t\thashMap[hash] = string(urlRaw)\n\t\t\t}\n\t\t})\n\n\t\treturn nil\n\t})\n\n\tif isSuccess {\n\t\tfmt.Printf(\"Min len = %d (%s)\\n\", minLen, minLenURL)\n\t\tfmt.Printf(\"Max len = %d (%s)\\n\", maxLen, maxLenURL)\n\t\tfmt.Println(\"Checking ended successfully\")\n\t}\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package version\n\n\/\/ Version of Functions\nvar Version = \"0.3.67\"\n<commit_msg>functions: 0.3.68 release [skip ci]<commit_after>package version\n\n\/\/ Version of Functions\nvar Version = \"0.3.68\"\n<|endoftext|>"} {"text":"<commit_before>package version\n\n\/\/ Version of Functions\nvar Version = \"0.3.59\"\n<commit_msg>functions: 0.3.60 release [skip ci]<commit_after>package version\n\n\/\/ Version of Functions\nvar Version = \"0.3.60\"\n<|endoftext|>"} {"text":"<commit_before>package version\n\n\/\/ Version of Functions\nvar Version = \"0.3.236\"\n<commit_msg>fnserver: 0.3.237 release [skip ci]<commit_after>package version\n\n\/\/ Version of Functions\nvar Version = \"0.3.237\"\n<|endoftext|>"} {"text":"<commit_before>package gateway\n\nimport (\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/NebulousLabs\/Sia\/build\"\n\t\"github.com\/NebulousLabs\/Sia\/modules\"\n)\n\n\/\/ newTestingGateway returns a gateway read to use in a testing environment.\nfunc newTestingGateway(name string, t *testing.T) *Gateway {\n\tg, err := New(\"localhost:0\", build.TempDir(\"gateway\", name))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Manually add myAddr as a node. This is necessary because g.addNode\n\t\/\/ rejects loopback addresses.\n\tid := g.mu.Lock()\n\tg.nodes[g.myAddr] = struct{}{}\n\tg.mu.Unlock(id)\n\treturn g\n}\n\nfunc TestAddress(t *testing.T) {\n\tg := newTestingGateway(\"TestAddress\", t)\n\tdefer g.Close()\n\tif g.Address() != g.myAddr {\n\t\tt.Fatal(\"Address does not return g.myAddr\")\n\t}\n\tport := modules.NetAddress(g.listener.Addr().String()).Port()\n\texpAddr := modules.NetAddress(net.JoinHostPort(\"::\", port))\n\tif g.Address() != expAddr {\n\t\tt.Fatalf(\"Wrong address: expected %v, got %v\", expAddr, g.Address())\n\t}\n}\n\nfunc TestPeers(t *testing.T) {\n\tg1 := newTestingGateway(\"TestRPC1\", t)\n\tdefer g1.Close()\n\tg2 := newTestingGateway(\"TestRPC2\", t)\n\tdefer g2.Close()\n\terr := g1.Connect(g2.Address())\n\tif err != nil {\n\t\tt.Fatal(\"failed to connect:\", err)\n\t}\n\tpeers := g1.Peers()\n\tif len(peers) != 1 || peers[0].NetAddress != g2.Address() {\n\t\tt.Fatal(\"g1 has bad peer list:\", peers)\n\t}\n\terr = g1.Disconnect(g2.Address())\n\tif err != nil {\n\t\tt.Fatal(\"failed to disconnect:\", err)\n\t}\n\tpeers = g1.Peers()\n\tif len(peers) != 0 {\n\t\tt.Fatal(\"g1 has peers after disconnect:\", peers)\n\t}\n}\n\nfunc TestNew(t *testing.T) {\n\tif _, err := New(\"\", \"\"); err == nil {\n\t\tt.Fatal(\"expecting persistDir error, got nil\")\n\t}\n\tif _, err := New(\"localhost:0\", \"\"); err == nil {\n\t\tt.Fatal(\"expecting persistDir error, got nil\")\n\t}\n\tif g, err := New(\"foo\", build.TempDir(\"gateway\", \"TestNew1\")); err == nil {\n\t\tt.Fatal(\"expecting listener error, got nil\", g.myAddr)\n\t}\n\t\/\/ create corrupted nodes.json\n\tdir := build.TempDir(\"gateway\", \"TestNew2\")\n\tos.MkdirAll(dir, 0700)\n\terr := ioutil.WriteFile(filepath.Join(dir, \"nodes.json\"), []byte{1, 2, 3}, 0660)\n\tif err != nil {\n\t\tt.Fatal(\"couldn't create corrupted file:\", err)\n\t}\n\tif _, err := New(\"localhost:0\", dir); err == nil {\n\t\tt.Fatal(\"expected load error, got nil\")\n\t}\n}\n<commit_msg>Change TestAddress to make it pass with loopback addresses<commit_after>package gateway\n\nimport (\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/NebulousLabs\/Sia\/build\"\n\t\"github.com\/NebulousLabs\/Sia\/modules\"\n)\n\n\/\/ newTestingGateway returns a gateway read to use in a testing environment.\nfunc newTestingGateway(name string, t *testing.T) *Gateway {\n\tg, err := New(\"localhost:0\", build.TempDir(\"gateway\", name))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Manually add myAddr as a node. This is necessary because g.addNode\n\t\/\/ rejects loopback addresses.\n\tid := g.mu.Lock()\n\tg.nodes[g.myAddr] = struct{}{}\n\tg.mu.Unlock(id)\n\treturn g\n}\n\n\/\/ TestAddress tests that Gateway.Address returns the address of its listener.\n\/\/ Also tests that the address is not unspecified and is a loopback address.\n\/\/ The address must be a loopback address for testing.\nfunc TestAddress(t *testing.T) {\n\tg := newTestingGateway(\"TestAddress\", t)\n\tdefer g.Close()\n\tif g.Address() != g.myAddr {\n\t\tt.Fatal(\"Address does not return g.myAddr\")\n\t}\n\tif g.Address() != modules.NetAddress(g.listener.Addr().String()) {\n\t\tt.Fatalf(\"wrong address: expected %v, got %v\", g.listener.Addr(), g.Address())\n\t}\n\thost := modules.NetAddress(g.listener.Addr().String()).Host()\n\tip := net.ParseIP(host)\n\tif ip.IsUnspecified() {\n\t\tt.Fatal(\"expected a non-unspecified address\")\n\t}\n\tif !ip.IsLoopback() {\n\t\tt.Fatal(\"expected a loopback address\")\n\t}\n}\n\nfunc TestPeers(t *testing.T) {\n\tg1 := newTestingGateway(\"TestRPC1\", t)\n\tdefer g1.Close()\n\tg2 := newTestingGateway(\"TestRPC2\", t)\n\tdefer g2.Close()\n\terr := g1.Connect(g2.Address())\n\tif err != nil {\n\t\tt.Fatal(\"failed to connect:\", err)\n\t}\n\tpeers := g1.Peers()\n\tif len(peers) != 1 || peers[0].NetAddress != g2.Address() {\n\t\tt.Fatal(\"g1 has bad peer list:\", peers)\n\t}\n\terr = g1.Disconnect(g2.Address())\n\tif err != nil {\n\t\tt.Fatal(\"failed to disconnect:\", err)\n\t}\n\tpeers = g1.Peers()\n\tif len(peers) != 0 {\n\t\tt.Fatal(\"g1 has peers after disconnect:\", peers)\n\t}\n}\n\nfunc TestNew(t *testing.T) {\n\tif _, err := New(\"\", \"\"); err == nil {\n\t\tt.Fatal(\"expecting persistDir error, got nil\")\n\t}\n\tif _, err := New(\"localhost:0\", \"\"); err == nil {\n\t\tt.Fatal(\"expecting persistDir error, got nil\")\n\t}\n\tif g, err := New(\"foo\", build.TempDir(\"gateway\", \"TestNew1\")); err == nil {\n\t\tt.Fatal(\"expecting listener error, got nil\", g.myAddr)\n\t}\n\t\/\/ create corrupted nodes.json\n\tdir := build.TempDir(\"gateway\", \"TestNew2\")\n\tos.MkdirAll(dir, 0700)\n\terr := ioutil.WriteFile(filepath.Join(dir, \"nodes.json\"), []byte{1, 2, 3}, 0660)\n\tif err != nil {\n\t\tt.Fatal(\"couldn't create corrupted file:\", err)\n\t}\n\tif _, err := New(\"localhost:0\", dir); err == nil {\n\t\tt.Fatal(\"expected load error, got nil\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package cli\n\nimport (\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"testing\"\n)\n\nconst terraformCodeExampleOutputOnly = `\noutput \"hello\" {\n value = \"Hello, World\"\n}\n`\n\nconst terraformCodeExampleGcpProvider = `\nprovider \"google\" {\n credentials = file(\"account.json\")\n project = \"my-project-id\"\n region = \"us-central1\"\n}\n\noutput \"hello\" {\n value = \"Hello, World\"\n}\n`\n\nconst terraformCodeExampleAwsProviderEmptyOriginal = `\nprovider \"aws\" {\n}\n\noutput \"hello\" {\n value = \"Hello, World\"\n}\n`\n\nconst terraformCodeExampleAwsProviderRegionOverridenExpected = `\nprovider \"aws\" {\n region = \"eu-west-1\"\n}\n\noutput \"hello\" {\n value = \"Hello, World\"\n}\n`\n\nconst terraformCodeExampleAwsProviderRegionVersionProfileOverridenExpected = `\nprovider \"aws\" {\n region = \"eu-west-1\"\n version = \"0.3.0\"\n profile = \"foo\"\n}\n\noutput \"hello\" {\n value = \"Hello, World\"\n}\n`\n\nconst terraformCodeExampleAwsProviderNonEmptyOriginal = `\nprovider \"aws\" {\n region = var.aws_region\n version = \"0.2.0\"\n}\n\noutput \"hello\" {\n value = \"Hello, World\"\n}\n`\n\nconst terraformCodeExampleAwsProviderRegionOverridenVersionNotOverriddenExpected = `\nprovider \"aws\" {\n region = \"eu-west-1\"\n version = \"0.2.0\"\n}\n\noutput \"hello\" {\n value = \"Hello, World\"\n}\n`\n\nconst terraformCodeExampleAwsMultipleProvidersOriginal = `\nprovider \"aws\" {\n region = var.aws_region\n version = \"0.2.0\"\n}\n\nprovider \"aws\" {\n alias = \"another\"\n region = var.aws_region\n version = \"0.2.0\"\n}\n\nresource \"aws_instance\" \"example\" {\n\n}\n\nprovider \"google\" {\n credentials = file(\"account.json\")\n project = \"my-project-id\"\n region = \"us-central1\"\n}\n\nprovider \"aws\" {\n alias = \"yet another\"\n region = var.aws_region\n}\n\noutput \"hello\" {\n value = \"Hello, World\"\n}\n`\n\nconst terraformCodeExampleAwsMultipleProvidersRegionOverridenExpected = `\nprovider \"aws\" {\n region = \"eu-west-1\"\n version = \"0.2.0\"\n}\n\nprovider \"aws\" {\n alias = \"another\"\n region = \"eu-west-1\"\n version = \"0.2.0\"\n}\n\nresource \"aws_instance\" \"example\" {\n\n}\n\nprovider \"google\" {\n credentials = file(\"account.json\")\n project = \"my-project-id\"\n region = \"us-central1\"\n}\n\nprovider \"aws\" {\n alias = \"yet another\"\n region = \"eu-west-1\"\n}\n\noutput \"hello\" {\n value = \"Hello, World\"\n}\n`\n\nconst terraformCodeExampleAwsMultipleProvidersRegionProfileVersionOverridenExpected = `\nprovider \"aws\" {\n region = \"eu-west-1\"\n version = \"0.3.0\"\n profile = \"foo\"\n}\n\nprovider \"aws\" {\n alias = \"another\"\n region = \"eu-west-1\"\n version = \"0.3.0\"\n profile = \"foo\"\n}\n\nresource \"aws_instance\" \"example\" {\n\n}\n\nprovider \"google\" {\n credentials = file(\"account.json\")\n project = \"my-project-id\"\n region = \"us-central1\"\n}\n\nprovider \"aws\" {\n alias = \"yet another\"\n region = \"eu-west-1\"\n version = \"0.3.0\"\n profile = \"foo\"\n}\n\noutput \"hello\" {\n value = \"Hello, World\"\n}\n`\n\nconst terraformCodeExampleAwsMultipleProvidersNonEmptyWithCommentsOriginal = `\n# Make sure comments are maintained\n# And don't interfere with parsing\nprovider \"aws\" {\n # Make sure comments are maintained\n # And don't interfere with parsing\n region = var.aws_region\n\n # Make sure comments are maintained\n # And don't interfere with parsing\n version = \"0.2.0\"\n}\n\n# Make sure comments are maintained\n# And don't interfere with parsing\nprovider \"aws\" {\n # Make sure comments are maintained\n # And don't interfere with parsing\n region = var.aws_region\n\n # Make sure comments are maintained\n # And don't interfere with parsing\n version = \"0.2.0\"\n\n # Make sure comments are maintained\n # And don't interfere with parsing\n alias = \"secondary\"\n}\n`\n\nconst terraformCodeExampleAwsMultipleProvidersNonEmptyWithCommentsRegionOverriddenExpected = `\n# Make sure comments are maintained\n# And don't interfere with parsing\nprovider \"aws\" {\n # Make sure comments are maintained\n # And don't interfere with parsing\n region = \"eu-west-1\"\n\n # Make sure comments are maintained\n # And don't interfere with parsing\n version = \"0.2.0\"\n}\n\n# Make sure comments are maintained\n# And don't interfere with parsing\nprovider \"aws\" {\n # Make sure comments are maintained\n # And don't interfere with parsing\n region = \"eu-west-1\"\n\n # Make sure comments are maintained\n # And don't interfere with parsing\n version = \"0.2.0\"\n\n # Make sure comments are maintained\n # And don't interfere with parsing\n alias = \"secondary\"\n}\n`\n\nconst terraformCodeExampleAwsMultipleProvidersNonEmptyWithCommentsRegionVersionProfileOverriddenExpected = `\n# Make sure comments are maintained\n# And don't interfere with parsing\nprovider \"aws\" {\n # Make sure comments are maintained\n # And don't interfere with parsing\n region = \"eu-west-1\"\n\n # Make sure comments are maintained\n # And don't interfere with parsing\n version = \"0.3.0\"\n profile = \"foo\"\n}\n\n# Make sure comments are maintained\n# And don't interfere with parsing\nprovider \"aws\" {\n # Make sure comments are maintained\n # And don't interfere with parsing\n region = \"eu-west-1\"\n\n # Make sure comments are maintained\n # And don't interfere with parsing\n version = \"0.3.0\"\n\n # Make sure comments are maintained\n # And don't interfere with parsing\n alias = \"secondary\"\n profile = \"foo\"\n}\n`\n\nfunc TestPatchAwsProviderInTerraformCodeHappyPath(t *testing.T) {\n\tt.Parallel()\n\n\ttestCases := []struct {\n\t\ttestName string\n\t\toriginalTerraformCode string\n\t\tattributesToOverride map[string]string\n\t\texpectedCodeWasUpdated bool\n\t\texpectedTerraformCode string\n\t}{\n\t\t{\"empty\", \"\", nil, false, \"\"},\n\t\t{\"empty with attributes\", \"\", map[string]string{\"region\": \"eu-west-1\"}, false, \"\"},\n\t\t{\"no provider\", terraformCodeExampleOutputOnly, map[string]string{\"region\": \"eu-west-1\"}, false, terraformCodeExampleOutputOnly},\n\t\t{\"no aws provider\", terraformCodeExampleGcpProvider, map[string]string{\"region\": \"eu-west-1\"}, false, terraformCodeExampleGcpProvider},\n\t\t{\"one empty aws provider, but no overrides\", terraformCodeExampleAwsProviderEmptyOriginal, nil, false, terraformCodeExampleAwsProviderEmptyOriginal},\n\t\t{\"one empty aws provider, with region override\", terraformCodeExampleAwsProviderEmptyOriginal, map[string]string{\"region\": \"eu-west-1\"}, true, terraformCodeExampleAwsProviderRegionOverridenExpected},\n\t\t{\"one empty aws provider, with region, version, profile override\", terraformCodeExampleAwsProviderEmptyOriginal, map[string]string{\"region\": \"eu-west-1\", \"version\": \"0.3.0\", \"profile\": \"foo\"}, true, terraformCodeExampleAwsProviderRegionVersionProfileOverridenExpected},\n\t\t{\"one non-empty aws provider, but no overrides\", terraformCodeExampleAwsProviderNonEmptyOriginal, nil, false, terraformCodeExampleAwsProviderNonEmptyOriginal},\n\t\t{\"one non-empty aws provider, with region override\", terraformCodeExampleAwsProviderNonEmptyOriginal, map[string]string{\"region\": \"eu-west-1\"}, true, terraformCodeExampleAwsProviderRegionOverridenVersionNotOverriddenExpected},\n\t\t{\"one non-empty aws provider, with region, version, profile override\", terraformCodeExampleAwsProviderNonEmptyOriginal, map[string]string{\"region\": \"eu-west-1\", \"version\": \"0.3.0\", \"profile\": \"foo\"}, true, terraformCodeExampleAwsProviderRegionVersionProfileOverridenExpected},\n\t\t{\"multiple providers, but no overrides\", terraformCodeExampleAwsMultipleProvidersOriginal, nil, false, terraformCodeExampleAwsMultipleProvidersOriginal},\n\t\t{\"multiple providers, with region override\", terraformCodeExampleAwsMultipleProvidersOriginal, map[string]string{\"region\": \"eu-west-1\"}, true, terraformCodeExampleAwsMultipleProvidersRegionOverridenExpected},\n\t\t{\"multiple providers, with region, version, profile override\", terraformCodeExampleAwsMultipleProvidersOriginal, map[string]string{\"region\": \"eu-west-1\", \"version\": \"0.3.0\", \"profile\": \"foo\"}, true, terraformCodeExampleAwsMultipleProvidersRegionProfileVersionOverridenExpected},\n\t\t{\"multiple providers with comments, but no overrides\", terraformCodeExampleAwsMultipleProvidersNonEmptyWithCommentsOriginal, nil, false, terraformCodeExampleAwsMultipleProvidersNonEmptyWithCommentsOriginal},\n\t\t{\"multiple providers with comments, with region override\", terraformCodeExampleAwsMultipleProvidersNonEmptyWithCommentsOriginal, map[string]string{\"region\": \"eu-west-1\"}, true, terraformCodeExampleAwsMultipleProvidersNonEmptyWithCommentsRegionOverriddenExpected},\n\t\t{\"multiple providers with comments, with region, version, profile override\", terraformCodeExampleAwsMultipleProvidersNonEmptyWithCommentsOriginal, map[string]string{\"region\": \"eu-west-1\", \"version\": \"0.3.0\", \"profile\": \"foo\"}, true, terraformCodeExampleAwsMultipleProvidersNonEmptyWithCommentsRegionVersionProfileOverriddenExpected},\n\t}\n\n\tfor _, testCase := range testCases {\n\t\t\/\/ The following is necessary to make sure testCase's values don't\n\t\t\/\/ get updated due to concurrency within the scope of t.Run(..) below\n\t\ttestCase := testCase\n\t\tt.Run(testCase.testName, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tactualTerraformCode, actualCodeWasUpdated, err := patchAwsProviderInTerraformCode(testCase.originalTerraformCode, \"test.tf\", testCase.attributesToOverride)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, testCase.expectedCodeWasUpdated, actualCodeWasUpdated)\n\t\t\tassert.Equal(t, testCase.expectedTerraformCode, actualTerraformCode)\n\t\t})\n\t}\n}\n<commit_msg>Get rid of profile from test<commit_after>package cli\n\nimport (\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"testing\"\n)\n\nconst terraformCodeExampleOutputOnly = `\noutput \"hello\" {\n value = \"Hello, World\"\n}\n`\n\nconst terraformCodeExampleGcpProvider = `\nprovider \"google\" {\n credentials = file(\"account.json\")\n project = \"my-project-id\"\n region = \"us-central1\"\n}\n\noutput \"hello\" {\n value = \"Hello, World\"\n}\n`\n\nconst terraformCodeExampleAwsProviderEmptyOriginal = `\nprovider \"aws\" {\n}\n\noutput \"hello\" {\n value = \"Hello, World\"\n}\n`\n\nconst terraformCodeExampleAwsProviderRegionOverridenExpected = `\nprovider \"aws\" {\n region = \"eu-west-1\"\n}\n\noutput \"hello\" {\n value = \"Hello, World\"\n}\n`\n\nconst terraformCodeExampleAwsProviderRegionVersionOverridenExpected = `\nprovider \"aws\" {\n region = \"eu-west-1\"\n version = \"0.3.0\"\n}\n\noutput \"hello\" {\n value = \"Hello, World\"\n}\n`\n\nconst terraformCodeExampleAwsProviderNonEmptyOriginal = `\nprovider \"aws\" {\n region = var.aws_region\n version = \"0.2.0\"\n}\n\noutput \"hello\" {\n value = \"Hello, World\"\n}\n`\n\nconst terraformCodeExampleAwsProviderRegionOverridenVersionNotOverriddenExpected = `\nprovider \"aws\" {\n region = \"eu-west-1\"\n version = \"0.2.0\"\n}\n\noutput \"hello\" {\n value = \"Hello, World\"\n}\n`\n\nconst terraformCodeExampleAwsMultipleProvidersOriginal = `\nprovider \"aws\" {\n region = var.aws_region\n version = \"0.2.0\"\n}\n\nprovider \"aws\" {\n alias = \"another\"\n region = var.aws_region\n version = \"0.2.0\"\n}\n\nresource \"aws_instance\" \"example\" {\n\n}\n\nprovider \"google\" {\n credentials = file(\"account.json\")\n project = \"my-project-id\"\n region = \"us-central1\"\n}\n\nprovider \"aws\" {\n alias = \"yet another\"\n region = var.aws_region\n}\n\noutput \"hello\" {\n value = \"Hello, World\"\n}\n`\n\nconst terraformCodeExampleAwsMultipleProvidersRegionOverridenExpected = `\nprovider \"aws\" {\n region = \"eu-west-1\"\n version = \"0.2.0\"\n}\n\nprovider \"aws\" {\n alias = \"another\"\n region = \"eu-west-1\"\n version = \"0.2.0\"\n}\n\nresource \"aws_instance\" \"example\" {\n\n}\n\nprovider \"google\" {\n credentials = file(\"account.json\")\n project = \"my-project-id\"\n region = \"us-central1\"\n}\n\nprovider \"aws\" {\n alias = \"yet another\"\n region = \"eu-west-1\"\n}\n\noutput \"hello\" {\n value = \"Hello, World\"\n}\n`\n\nconst terraformCodeExampleAwsMultipleProvidersRegionVersionOverridenExpected = `\nprovider \"aws\" {\n region = \"eu-west-1\"\n version = \"0.3.0\"\n}\n\nprovider \"aws\" {\n alias = \"another\"\n region = \"eu-west-1\"\n version = \"0.3.0\"\n}\n\nresource \"aws_instance\" \"example\" {\n\n}\n\nprovider \"google\" {\n credentials = file(\"account.json\")\n project = \"my-project-id\"\n region = \"us-central1\"\n}\n\nprovider \"aws\" {\n alias = \"yet another\"\n region = \"eu-west-1\"\n version = \"0.3.0\"\n}\n\noutput \"hello\" {\n value = \"Hello, World\"\n}\n`\n\nconst terraformCodeExampleAwsMultipleProvidersNonEmptyWithCommentsOriginal = `\n# Make sure comments are maintained\n# And don't interfere with parsing\nprovider \"aws\" {\n # Make sure comments are maintained\n # And don't interfere with parsing\n region = var.aws_region\n\n # Make sure comments are maintained\n # And don't interfere with parsing\n version = \"0.2.0\"\n}\n\n# Make sure comments are maintained\n# And don't interfere with parsing\nprovider \"aws\" {\n # Make sure comments are maintained\n # And don't interfere with parsing\n region = var.aws_region\n\n # Make sure comments are maintained\n # And don't interfere with parsing\n version = \"0.2.0\"\n\n # Make sure comments are maintained\n # And don't interfere with parsing\n alias = \"secondary\"\n}\n`\n\nconst terraformCodeExampleAwsMultipleProvidersNonEmptyWithCommentsRegionOverriddenExpected = `\n# Make sure comments are maintained\n# And don't interfere with parsing\nprovider \"aws\" {\n # Make sure comments are maintained\n # And don't interfere with parsing\n region = \"eu-west-1\"\n\n # Make sure comments are maintained\n # And don't interfere with parsing\n version = \"0.2.0\"\n}\n\n# Make sure comments are maintained\n# And don't interfere with parsing\nprovider \"aws\" {\n # Make sure comments are maintained\n # And don't interfere with parsing\n region = \"eu-west-1\"\n\n # Make sure comments are maintained\n # And don't interfere with parsing\n version = \"0.2.0\"\n\n # Make sure comments are maintained\n # And don't interfere with parsing\n alias = \"secondary\"\n}\n`\n\nconst terraformCodeExampleAwsMultipleProvidersNonEmptyWithCommentsRegionVersionOverriddenExpected = `\n# Make sure comments are maintained\n# And don't interfere with parsing\nprovider \"aws\" {\n # Make sure comments are maintained\n # And don't interfere with parsing\n region = \"eu-west-1\"\n\n # Make sure comments are maintained\n # And don't interfere with parsing\n version = \"0.3.0\"\n}\n\n# Make sure comments are maintained\n# And don't interfere with parsing\nprovider \"aws\" {\n # Make sure comments are maintained\n # And don't interfere with parsing\n region = \"eu-west-1\"\n\n # Make sure comments are maintained\n # And don't interfere with parsing\n version = \"0.3.0\"\n\n # Make sure comments are maintained\n # And don't interfere with parsing\n alias = \"secondary\"\n}\n`\n\nfunc TestPatchAwsProviderInTerraformCodeHappyPath(t *testing.T) {\n\tt.Parallel()\n\n\ttestCases := []struct {\n\t\ttestName string\n\t\toriginalTerraformCode string\n\t\tattributesToOverride map[string]string\n\t\texpectedCodeWasUpdated bool\n\t\texpectedTerraformCode string\n\t}{\n\t\t{\"empty\", \"\", nil, false, \"\"},\n\t\t{\"empty with attributes\", \"\", map[string]string{\"region\": \"eu-west-1\"}, false, \"\"},\n\t\t{\"no provider\", terraformCodeExampleOutputOnly, map[string]string{\"region\": \"eu-west-1\"}, false, terraformCodeExampleOutputOnly},\n\t\t{\"no aws provider\", terraformCodeExampleGcpProvider, map[string]string{\"region\": \"eu-west-1\"}, false, terraformCodeExampleGcpProvider},\n\t\t{\"one empty aws provider, but no overrides\", terraformCodeExampleAwsProviderEmptyOriginal, nil, false, terraformCodeExampleAwsProviderEmptyOriginal},\n\t\t{\"one empty aws provider, with region override\", terraformCodeExampleAwsProviderEmptyOriginal, map[string]string{\"region\": \"eu-west-1\"}, true, terraformCodeExampleAwsProviderRegionOverridenExpected},\n\t\t{\"one empty aws provider, with region, version override\", terraformCodeExampleAwsProviderEmptyOriginal, map[string]string{\"region\": \"eu-west-1\", \"version\": \"0.3.0\"}, true, terraformCodeExampleAwsProviderRegionVersionOverridenExpected},\n\t\t{\"one non-empty aws provider, but no overrides\", terraformCodeExampleAwsProviderNonEmptyOriginal, nil, false, terraformCodeExampleAwsProviderNonEmptyOriginal},\n\t\t{\"one non-empty aws provider, with region override\", terraformCodeExampleAwsProviderNonEmptyOriginal, map[string]string{\"region\": \"eu-west-1\"}, true, terraformCodeExampleAwsProviderRegionOverridenVersionNotOverriddenExpected},\n\t\t{\"one non-empty aws provider, with region, version override\", terraformCodeExampleAwsProviderNonEmptyOriginal, map[string]string{\"region\": \"eu-west-1\", \"version\": \"0.3.0\"}, true, terraformCodeExampleAwsProviderRegionVersionOverridenExpected},\n\t\t{\"multiple providers, but no overrides\", terraformCodeExampleAwsMultipleProvidersOriginal, nil, false, terraformCodeExampleAwsMultipleProvidersOriginal},\n\t\t{\"multiple providers, with region override\", terraformCodeExampleAwsMultipleProvidersOriginal, map[string]string{\"region\": \"eu-west-1\"}, true, terraformCodeExampleAwsMultipleProvidersRegionOverridenExpected},\n\t\t{\"multiple providers, with region, version override\", terraformCodeExampleAwsMultipleProvidersOriginal, map[string]string{\"region\": \"eu-west-1\", \"version\": \"0.3.0\"}, true, terraformCodeExampleAwsMultipleProvidersRegionVersionOverridenExpected},\n\t\t{\"multiple providers with comments, but no overrides\", terraformCodeExampleAwsMultipleProvidersNonEmptyWithCommentsOriginal, nil, false, terraformCodeExampleAwsMultipleProvidersNonEmptyWithCommentsOriginal},\n\t\t{\"multiple providers with comments, with region override\", terraformCodeExampleAwsMultipleProvidersNonEmptyWithCommentsOriginal, map[string]string{\"region\": \"eu-west-1\"}, true, terraformCodeExampleAwsMultipleProvidersNonEmptyWithCommentsRegionOverriddenExpected},\n\t\t{\"multiple providers with comments, with region, version override\", terraformCodeExampleAwsMultipleProvidersNonEmptyWithCommentsOriginal, map[string]string{\"region\": \"eu-west-1\", \"version\": \"0.3.0\"}, true, terraformCodeExampleAwsMultipleProvidersNonEmptyWithCommentsRegionVersionOverriddenExpected},\n\t}\n\n\tfor _, testCase := range testCases {\n\t\t\/\/ The following is necessary to make sure testCase's values don't\n\t\t\/\/ get updated due to concurrency within the scope of t.Run(..) below\n\t\ttestCase := testCase\n\t\tt.Run(testCase.testName, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tactualTerraformCode, actualCodeWasUpdated, err := patchAwsProviderInTerraformCode(testCase.originalTerraformCode, \"test.tf\", testCase.attributesToOverride)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, testCase.expectedCodeWasUpdated, actualCodeWasUpdated)\n\t\t\tassert.Equal(t, testCase.expectedTerraformCode, actualTerraformCode)\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package renter\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/NebulousLabs\/Sia\/crypto\"\n\t\"github.com\/NebulousLabs\/Sia\/modules\"\n\t\"github.com\/NebulousLabs\/Sia\/types\"\n\n\t\"github.com\/NebulousLabs\/errors\"\n)\n\n\/\/ cacheData contatins the data and the timestamp for the unfinished\n\/\/ download chunks\ntype cacheData struct {\n\tdata []byte\n\tlastAccess time.Time\n}\n\n\/\/ downloadPieceInfo contains all the information required to download and\n\/\/ recover a piece of a chunk from a host. It is a value in a map where the key\n\/\/ is the file contract id.\ntype downloadPieceInfo struct {\n\tindex uint64\n\troot crypto.Hash\n}\n\n\/\/ unfinishedDownloadChunk contains a chunk for a download that is in progress.\n\/\/\n\/\/ TODO: Currently, if a standby worker is needed, all of the standby workers\n\/\/ are added and the first one that is available will pick up the slack. But,\n\/\/ depending on the situation, we may only want to add a handful of workers to\n\/\/ make sure that a fast \/ optimal worker is initially able to pick up the\n\/\/ slack. This could potentially be streamlined by turning the standby array\n\/\/ into a standby heap, and then having some general scoring system for figuring\n\/\/ out how useful a worker is, and then having some threshold that a worker\n\/\/ needs to be pulled from standby to work on the download. That threshold\n\/\/ should go up every time that a worker fails, to make sure that if you have\n\/\/ repeated failures, you keep pulling in the fresh workers instead of getting\n\/\/ stuck and always rejecting all the standby workers.\ntype unfinishedDownloadChunk struct {\n\t\/\/ Fetch + Write instructions - read only or otherwise thread safe.\n\tdestination downloadDestination \/\/ Where to write the recovered logical chunk.\n\terasureCode modules.ErasureCoder\n\tmasterKey crypto.TwofishKey\n\n\t\/\/ Fetch + Write instructions - read only or otherwise thread safe.\n\tstaticChunkIndex uint64 \/\/ Required for deriving the encryption keys for each piece.\n\tstaticCacheID string \/\/ Used to uniquely identify a chunk in the chunk cache.\n\tstaticChunkMap map[types.FileContractID]downloadPieceInfo \/\/ Maps from file contract ids to the info for the piece associated with that contract\n\tstaticChunkSize uint64\n\tstaticFetchLength uint64 \/\/ Length within the logical chunk to fetch.\n\tstaticFetchOffset uint64 \/\/ Offset within the logical chunk that is being downloaded.\n\tstaticPieceSize uint64\n\tstaticWriteOffset int64 \/\/ Offet within the writer to write the completed data.\n\n\t\/\/ Fetch + Write instructions - read only or otherwise thread safe.\n\tstaticLatencyTarget time.Duration\n\tstaticNeedsMemory bool \/\/ Set to true if memory was not pre-allocated for this chunk.\n\tstaticOverdrive int\n\tstaticPriority uint64\n\n\t\/\/ Download chunk state - need mutex to access.\n\tfailed bool \/\/ Indicates if the chunk has been marked as failed.\n\tphysicalChunkData [][]byte \/\/ Used to recover the logical data.\n\tpieceUsage []bool \/\/ Which pieces are being actively fetched.\n\tpiecesCompleted int \/\/ Number of pieces that have successfully completed.\n\tpiecesRegistered int \/\/ Number of pieces that workers are actively fetching.\n\trecoveryComplete bool \/\/ Whether or not the recovery has completed and the chunk memory released.\n\tworkersRemaining int \/\/ Number of workers still able to fetch the chunk.\n\tworkersStandby []*worker \/\/ Set of workers that are able to work on this download, but are not needed unless other workers fail.\n\n\t\/\/ Memory management variables.\n\tmemoryAllocated uint64\n\n\t\/\/ The download object, mostly to update download progress.\n\tdownload *download\n\tmu sync.Mutex\n\n\t\/\/ Caching related fields\n\tchunkCache map[string]*cacheData\n\tcacheMu *sync.Mutex\n}\n\n\/\/ fail will set the chunk status to failed. The physical chunk memory will be\n\/\/ wiped and any memory allocation will be returned to the renter. The download\n\/\/ as a whole will be failed as well.\nfunc (udc *unfinishedDownloadChunk) fail(err error) {\n\tudc.failed = true\n\tudc.recoveryComplete = true\n\tfor i := range udc.physicalChunkData {\n\t\tudc.physicalChunkData[i] = nil\n\t}\n\tudc.download.managedFail(fmt.Errorf(\"chunk %v failed: %v\", udc.staticChunkIndex, err))\n\tudc.destination = nil\n}\n\n\/\/ managedCleanUp will check if the download has failed, and if not it will add\n\/\/ any standby workers which need to be added. Calling managedCleanUp too many\n\/\/ times is not harmful, however missing a call to managedCleanUp can lead to\n\/\/ dealocks.\nfunc (udc *unfinishedDownloadChunk) managedCleanUp() {\n\t\/\/ Check if the chunk is newly failed.\n\tudc.mu.Lock()\n\tif udc.workersRemaining+udc.piecesCompleted < udc.erasureCode.MinPieces() && !udc.failed {\n\t\tudc.fail(errors.New(\"not enough workers to continue download\"))\n\t}\n\t\/\/ Return any excess memory.\n\tudc.returnMemory()\n\n\t\/\/ Nothing to do if the chunk has failed.\n\tif udc.failed {\n\t\tudc.mu.Unlock()\n\t\treturn\n\t}\n\n\t\/\/ Check whether standby workers are required.\n\tchunkComplete := udc.piecesCompleted >= udc.erasureCode.MinPieces()\n\tdesiredPiecesRegistered := udc.erasureCode.MinPieces() + udc.staticOverdrive - udc.piecesCompleted\n\tstandbyWorkersRequired := !chunkComplete && udc.piecesRegistered < desiredPiecesRegistered\n\tif !standbyWorkersRequired {\n\t\tudc.mu.Unlock()\n\t\treturn\n\t}\n\n\t\/\/ Assemble a list of standby workers, release the udc lock, and then queue\n\t\/\/ the chunk into the workers. The lock needs to be released early because\n\t\/\/ holding the udc lock and the worker lock at the same time is a deadlock\n\t\/\/ risk (they interact with eachother, call functions on eachother).\n\tvar standbyWorkers []*worker\n\tfor i := 0; i < len(udc.workersStandby); i++ {\n\t\tstandbyWorkers = append(standbyWorkers, udc.workersStandby[i])\n\t}\n\tudc.workersStandby = udc.workersStandby[:0] \/\/ Workers have been taken off of standby.\n\tudc.mu.Unlock()\n\tfor i := 0; i < len(standbyWorkers); i++ {\n\t\tstandbyWorkers[i].managedQueueDownloadChunk(udc)\n\t}\n}\n\n\/\/ managedRemoveWorker will decrement a worker from the set of remaining workers\n\/\/ in the udc. After a worker has been removed, the udc needs to be cleaned up.\nfunc (udc *unfinishedDownloadChunk) managedRemoveWorker() {\n\tudc.mu.Lock()\n\tudc.workersRemaining--\n\tudc.mu.Unlock()\n\tudc.managedCleanUp()\n}\n\n\/\/ returnMemory will check on the status of all the workers and pieces, and\n\/\/ determine how much memory is safe to return to the renter. This should be\n\/\/ called each time a worker returns, and also after the chunk is recovered.\nfunc (udc *unfinishedDownloadChunk) returnMemory() {\n\t\/\/ The maximum amount of memory is the pieces completed plus the number of\n\t\/\/ workers remaining.\n\tmaxMemory := uint64(udc.workersRemaining+udc.piecesCompleted) * udc.staticPieceSize\n\t\/\/ If enough pieces have completed, max memory is the number of registered\n\t\/\/ pieces plus the number of completed pieces.\n\tif udc.piecesCompleted >= udc.erasureCode.MinPieces() {\n\t\t\/\/ udc.piecesRegistered is guaranteed to be at most equal to the number\n\t\t\/\/ of overdrive pieces, meaning it will be equal to or less than\n\t\t\/\/ initalMemory.\n\t\tmaxMemory = uint64(udc.piecesCompleted+udc.piecesRegistered) * udc.staticPieceSize\n\t}\n\t\/\/ If the chunk recovery has completed, the maximum number of pieces is the\n\t\/\/ number of registered.\n\tif udc.recoveryComplete {\n\t\tmaxMemory = uint64(udc.piecesRegistered) * udc.staticPieceSize\n\t}\n\t\/\/ Return any memory we don't need.\n\tif uint64(udc.memoryAllocated) > maxMemory {\n\t\tudc.download.memoryManager.Return(udc.memoryAllocated - maxMemory)\n\t\tudc.memoryAllocated = maxMemory\n\t}\n}\n\n\/\/ threadedRecoverLogicalData will take all of the pieces that have been\n\/\/ downloaded and encode them into the logical data which is then written to the\n\/\/ underlying writer for the download.\nfunc (udc *unfinishedDownloadChunk) threadedRecoverLogicalData() error {\n\t\/\/ Ensure cleanup occurs after the data is recovered, whether recovery\n\t\/\/ succeeds or fails.\n\tdefer udc.managedCleanUp()\n\n\t\/\/ Decrypt the chunk pieces. This doesn't need to happen under a lock,\n\t\/\/ because any thread potentially writing to the physicalChunkData array is\n\t\/\/ going to be stopped by the fact that the chunk is complete.\n\tfor i := range udc.physicalChunkData {\n\t\t\/\/ Skip empty pieces.\n\t\tif udc.physicalChunkData[i] == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tkey := deriveKey(udc.masterKey, udc.staticChunkIndex, uint64(i))\n\t\tdecryptedPiece, err := key.DecryptBytes(udc.physicalChunkData[i])\n\t\tif err != nil {\n\t\t\tudc.mu.Lock()\n\t\t\tudc.fail(err)\n\t\t\tudc.mu.Unlock()\n\t\t\treturn errors.AddContext(err, \"unable to decrypt chunk\")\n\t\t}\n\t\tudc.physicalChunkData[i] = decryptedPiece\n\t}\n\n\t\/\/ Recover the pieces into the logical chunk data.\n\t\/\/\n\t\/\/ TODO: Might be some way to recover into the downloadDestination instead\n\t\/\/ of creating a buffer and then writing that.\n\trecoverWriter := new(bytes.Buffer)\n\terr := udc.erasureCode.Recover(udc.physicalChunkData, udc.staticChunkSize, recoverWriter)\n\tif err != nil {\n\t\tudc.mu.Lock()\n\t\tudc.fail(err)\n\t\tudc.mu.Unlock()\n\t\treturn errors.AddContext(err, \"unable to recover chunk\")\n\t}\n\t\/\/ Clear out the physical chunk pieces, we do not need them anymore.\n\tfor i := range udc.physicalChunkData {\n\t\tudc.physicalChunkData[i] = nil\n\t}\n\n\t\/\/ Get recovered data\n\trecoveredData := recoverWriter.Bytes()\n\n\t\/\/ Add the chunk to the cache.\n\tudc.addChunkToCache(recoveredData)\n\n\t\/\/ Write the bytes to the requested output.\n\tstart := udc.staticFetchOffset\n\tend := udc.staticFetchOffset + udc.staticFetchLength\n\t_, err = udc.destination.WriteAt(recoveredData[start:end], udc.staticWriteOffset)\n\tif err != nil {\n\t\tudc.mu.Lock()\n\t\tudc.fail(err)\n\t\tudc.mu.Unlock()\n\t\treturn errors.AddContext(err, \"unable to write to download destination\")\n\t}\n\trecoverWriter = nil\n\n\t\/\/ Now that the download has completed and been flushed from memory, we can\n\t\/\/ release the memory that was used to store the data. Call 'cleanUp' to\n\t\/\/ trigger the memory cleanup along with some extra checks that everything\n\t\/\/ is consistent.\n\tudc.mu.Lock()\n\tudc.recoveryComplete = true\n\tudc.mu.Unlock()\n\n\t\/\/ Update the download and signal completion of this chunk.\n\tudc.download.mu.Lock()\n\tdefer udc.download.mu.Unlock()\n\tudc.download.chunksRemaining--\n\tatomic.AddUint64(&udc.download.atomicDataReceived, udc.staticFetchLength)\n\tif udc.download.chunksRemaining == 0 {\n\t\t\/\/ Download is complete, send out a notification and close the\n\t\t\/\/ destination writer.\n\t\tudc.download.endTime = time.Now()\n\t\tclose(udc.download.completeChan)\n\t\treturn udc.download.destination.Close()\n\t}\n\treturn nil\n}\n<commit_msg>also nil destination on successful download<commit_after>package renter\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/NebulousLabs\/Sia\/crypto\"\n\t\"github.com\/NebulousLabs\/Sia\/modules\"\n\t\"github.com\/NebulousLabs\/Sia\/types\"\n\n\t\"github.com\/NebulousLabs\/errors\"\n)\n\n\/\/ cacheData contatins the data and the timestamp for the unfinished\n\/\/ download chunks\ntype cacheData struct {\n\tdata []byte\n\tlastAccess time.Time\n}\n\n\/\/ downloadPieceInfo contains all the information required to download and\n\/\/ recover a piece of a chunk from a host. It is a value in a map where the key\n\/\/ is the file contract id.\ntype downloadPieceInfo struct {\n\tindex uint64\n\troot crypto.Hash\n}\n\n\/\/ unfinishedDownloadChunk contains a chunk for a download that is in progress.\n\/\/\n\/\/ TODO: Currently, if a standby worker is needed, all of the standby workers\n\/\/ are added and the first one that is available will pick up the slack. But,\n\/\/ depending on the situation, we may only want to add a handful of workers to\n\/\/ make sure that a fast \/ optimal worker is initially able to pick up the\n\/\/ slack. This could potentially be streamlined by turning the standby array\n\/\/ into a standby heap, and then having some general scoring system for figuring\n\/\/ out how useful a worker is, and then having some threshold that a worker\n\/\/ needs to be pulled from standby to work on the download. That threshold\n\/\/ should go up every time that a worker fails, to make sure that if you have\n\/\/ repeated failures, you keep pulling in the fresh workers instead of getting\n\/\/ stuck and always rejecting all the standby workers.\ntype unfinishedDownloadChunk struct {\n\t\/\/ Fetch + Write instructions - read only or otherwise thread safe.\n\tdestination downloadDestination \/\/ Where to write the recovered logical chunk.\n\terasureCode modules.ErasureCoder\n\tmasterKey crypto.TwofishKey\n\n\t\/\/ Fetch + Write instructions - read only or otherwise thread safe.\n\tstaticChunkIndex uint64 \/\/ Required for deriving the encryption keys for each piece.\n\tstaticCacheID string \/\/ Used to uniquely identify a chunk in the chunk cache.\n\tstaticChunkMap map[types.FileContractID]downloadPieceInfo \/\/ Maps from file contract ids to the info for the piece associated with that contract\n\tstaticChunkSize uint64\n\tstaticFetchLength uint64 \/\/ Length within the logical chunk to fetch.\n\tstaticFetchOffset uint64 \/\/ Offset within the logical chunk that is being downloaded.\n\tstaticPieceSize uint64\n\tstaticWriteOffset int64 \/\/ Offet within the writer to write the completed data.\n\n\t\/\/ Fetch + Write instructions - read only or otherwise thread safe.\n\tstaticLatencyTarget time.Duration\n\tstaticNeedsMemory bool \/\/ Set to true if memory was not pre-allocated for this chunk.\n\tstaticOverdrive int\n\tstaticPriority uint64\n\n\t\/\/ Download chunk state - need mutex to access.\n\tfailed bool \/\/ Indicates if the chunk has been marked as failed.\n\tphysicalChunkData [][]byte \/\/ Used to recover the logical data.\n\tpieceUsage []bool \/\/ Which pieces are being actively fetched.\n\tpiecesCompleted int \/\/ Number of pieces that have successfully completed.\n\tpiecesRegistered int \/\/ Number of pieces that workers are actively fetching.\n\trecoveryComplete bool \/\/ Whether or not the recovery has completed and the chunk memory released.\n\tworkersRemaining int \/\/ Number of workers still able to fetch the chunk.\n\tworkersStandby []*worker \/\/ Set of workers that are able to work on this download, but are not needed unless other workers fail.\n\n\t\/\/ Memory management variables.\n\tmemoryAllocated uint64\n\n\t\/\/ The download object, mostly to update download progress.\n\tdownload *download\n\tmu sync.Mutex\n\n\t\/\/ Caching related fields\n\tchunkCache map[string]*cacheData\n\tcacheMu *sync.Mutex\n}\n\n\/\/ fail will set the chunk status to failed. The physical chunk memory will be\n\/\/ wiped and any memory allocation will be returned to the renter. The download\n\/\/ as a whole will be failed as well.\nfunc (udc *unfinishedDownloadChunk) fail(err error) {\n\tudc.failed = true\n\tudc.recoveryComplete = true\n\tfor i := range udc.physicalChunkData {\n\t\tudc.physicalChunkData[i] = nil\n\t}\n\tudc.download.managedFail(fmt.Errorf(\"chunk %v failed: %v\", udc.staticChunkIndex, err))\n\tudc.destination = nil\n}\n\n\/\/ managedCleanUp will check if the download has failed, and if not it will add\n\/\/ any standby workers which need to be added. Calling managedCleanUp too many\n\/\/ times is not harmful, however missing a call to managedCleanUp can lead to\n\/\/ dealocks.\nfunc (udc *unfinishedDownloadChunk) managedCleanUp() {\n\t\/\/ Check if the chunk is newly failed.\n\tudc.mu.Lock()\n\tif udc.workersRemaining+udc.piecesCompleted < udc.erasureCode.MinPieces() && !udc.failed {\n\t\tudc.fail(errors.New(\"not enough workers to continue download\"))\n\t}\n\t\/\/ Return any excess memory.\n\tudc.returnMemory()\n\n\t\/\/ Nothing to do if the chunk has failed.\n\tif udc.failed {\n\t\tudc.mu.Unlock()\n\t\treturn\n\t}\n\n\t\/\/ Check whether standby workers are required.\n\tchunkComplete := udc.piecesCompleted >= udc.erasureCode.MinPieces()\n\tdesiredPiecesRegistered := udc.erasureCode.MinPieces() + udc.staticOverdrive - udc.piecesCompleted\n\tstandbyWorkersRequired := !chunkComplete && udc.piecesRegistered < desiredPiecesRegistered\n\tif !standbyWorkersRequired {\n\t\tudc.mu.Unlock()\n\t\treturn\n\t}\n\n\t\/\/ Assemble a list of standby workers, release the udc lock, and then queue\n\t\/\/ the chunk into the workers. The lock needs to be released early because\n\t\/\/ holding the udc lock and the worker lock at the same time is a deadlock\n\t\/\/ risk (they interact with eachother, call functions on eachother).\n\tvar standbyWorkers []*worker\n\tfor i := 0; i < len(udc.workersStandby); i++ {\n\t\tstandbyWorkers = append(standbyWorkers, udc.workersStandby[i])\n\t}\n\tudc.workersStandby = udc.workersStandby[:0] \/\/ Workers have been taken off of standby.\n\tudc.mu.Unlock()\n\tfor i := 0; i < len(standbyWorkers); i++ {\n\t\tstandbyWorkers[i].managedQueueDownloadChunk(udc)\n\t}\n}\n\n\/\/ managedRemoveWorker will decrement a worker from the set of remaining workers\n\/\/ in the udc. After a worker has been removed, the udc needs to be cleaned up.\nfunc (udc *unfinishedDownloadChunk) managedRemoveWorker() {\n\tudc.mu.Lock()\n\tudc.workersRemaining--\n\tudc.mu.Unlock()\n\tudc.managedCleanUp()\n}\n\n\/\/ returnMemory will check on the status of all the workers and pieces, and\n\/\/ determine how much memory is safe to return to the renter. This should be\n\/\/ called each time a worker returns, and also after the chunk is recovered.\nfunc (udc *unfinishedDownloadChunk) returnMemory() {\n\t\/\/ The maximum amount of memory is the pieces completed plus the number of\n\t\/\/ workers remaining.\n\tmaxMemory := uint64(udc.workersRemaining+udc.piecesCompleted) * udc.staticPieceSize\n\t\/\/ If enough pieces have completed, max memory is the number of registered\n\t\/\/ pieces plus the number of completed pieces.\n\tif udc.piecesCompleted >= udc.erasureCode.MinPieces() {\n\t\t\/\/ udc.piecesRegistered is guaranteed to be at most equal to the number\n\t\t\/\/ of overdrive pieces, meaning it will be equal to or less than\n\t\t\/\/ initalMemory.\n\t\tmaxMemory = uint64(udc.piecesCompleted+udc.piecesRegistered) * udc.staticPieceSize\n\t}\n\t\/\/ If the chunk recovery has completed, the maximum number of pieces is the\n\t\/\/ number of registered.\n\tif udc.recoveryComplete {\n\t\tmaxMemory = uint64(udc.piecesRegistered) * udc.staticPieceSize\n\t}\n\t\/\/ Return any memory we don't need.\n\tif uint64(udc.memoryAllocated) > maxMemory {\n\t\tudc.download.memoryManager.Return(udc.memoryAllocated - maxMemory)\n\t\tudc.memoryAllocated = maxMemory\n\t}\n}\n\n\/\/ threadedRecoverLogicalData will take all of the pieces that have been\n\/\/ downloaded and encode them into the logical data which is then written to the\n\/\/ underlying writer for the download.\nfunc (udc *unfinishedDownloadChunk) threadedRecoverLogicalData() error {\n\t\/\/ Ensure cleanup occurs after the data is recovered, whether recovery\n\t\/\/ succeeds or fails.\n\tdefer udc.managedCleanUp()\n\n\t\/\/ Decrypt the chunk pieces. This doesn't need to happen under a lock,\n\t\/\/ because any thread potentially writing to the physicalChunkData array is\n\t\/\/ going to be stopped by the fact that the chunk is complete.\n\tfor i := range udc.physicalChunkData {\n\t\t\/\/ Skip empty pieces.\n\t\tif udc.physicalChunkData[i] == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tkey := deriveKey(udc.masterKey, udc.staticChunkIndex, uint64(i))\n\t\tdecryptedPiece, err := key.DecryptBytes(udc.physicalChunkData[i])\n\t\tif err != nil {\n\t\t\tudc.mu.Lock()\n\t\t\tudc.fail(err)\n\t\t\tudc.mu.Unlock()\n\t\t\treturn errors.AddContext(err, \"unable to decrypt chunk\")\n\t\t}\n\t\tudc.physicalChunkData[i] = decryptedPiece\n\t}\n\n\t\/\/ Recover the pieces into the logical chunk data.\n\t\/\/\n\t\/\/ TODO: Might be some way to recover into the downloadDestination instead\n\t\/\/ of creating a buffer and then writing that.\n\trecoverWriter := new(bytes.Buffer)\n\terr := udc.erasureCode.Recover(udc.physicalChunkData, udc.staticChunkSize, recoverWriter)\n\tif err != nil {\n\t\tudc.mu.Lock()\n\t\tudc.fail(err)\n\t\tudc.mu.Unlock()\n\t\treturn errors.AddContext(err, \"unable to recover chunk\")\n\t}\n\t\/\/ Clear out the physical chunk pieces, we do not need them anymore.\n\tfor i := range udc.physicalChunkData {\n\t\tudc.physicalChunkData[i] = nil\n\t}\n\n\t\/\/ Get recovered data\n\trecoveredData := recoverWriter.Bytes()\n\n\t\/\/ Add the chunk to the cache.\n\tudc.addChunkToCache(recoveredData)\n\n\t\/\/ Write the bytes to the requested output.\n\tstart := udc.staticFetchOffset\n\tend := udc.staticFetchOffset + udc.staticFetchLength\n\t_, err = udc.destination.WriteAt(recoveredData[start:end], udc.staticWriteOffset)\n\tif err != nil {\n\t\tudc.mu.Lock()\n\t\tudc.fail(err)\n\t\tudc.mu.Unlock()\n\t\treturn errors.AddContext(err, \"unable to write to download destination\")\n\t}\n\trecoverWriter = nil\n\n\t\/\/ Now that the download has completed and been flushed from memory, we can\n\t\/\/ release the memory that was used to store the data. Call 'cleanUp' to\n\t\/\/ trigger the memory cleanup along with some extra checks that everything\n\t\/\/ is consistent.\n\tudc.mu.Lock()\n\tudc.recoveryComplete = true\n\tudc.mu.Unlock()\n\n\t\/\/ Update the download and signal completion of this chunk.\n\tudc.download.mu.Lock()\n\tdefer udc.download.mu.Unlock()\n\tudc.download.chunksRemaining--\n\tatomic.AddUint64(&udc.download.atomicDataReceived, udc.staticFetchLength)\n\tif udc.download.chunksRemaining == 0 {\n\t\t\/\/ Download is complete, send out a notification and close the\n\t\t\/\/ destination writer.\n\t\tudc.download.endTime = time.Now()\n\t\tclose(udc.download.completeChan)\n\t\terr := udc.download.destination.Close()\n\t\tudc.download.destination = nil\n\t\treturn err\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build kube\n\n\/*\n Copyright 2020 Docker Compose CLI authors\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage context\n\nimport (\n\t\"github.com\/docker\/compose-cli\/pkg\/api\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\"github.com\/docker\/compose-cli\/api\/context\/store\"\n\t\"github.com\/docker\/compose-cli\/kube\"\n)\n\nfunc init() {\n\textraCommands = append(extraCommands, createKubeCommand)\n\textraHelp = append(extraHelp, `\nCreate a Kubernetes context:\n$ docker context create k8s CONTEXT [flags]\n(see docker context create k8s --help)\n`)\n}\n\nfunc createKubeCommand() *cobra.Command {\n\tvar opts kube.ContextParams\n\tcmd := &cobra.Command{\n\t\tUse: \"kubernetes CONTEXT [flags]\",\n\t\tShort: \"Create context for a Kubernetes Cluster\",\n\t\tArgs: cobra.ExactArgs(1),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn runCreateKube(args[0], opts)\n\t\t},\n\t}\n\n\taddDescriptionFlag(cmd, &opts.Description)\n\tcmd.Flags().StringVar(&opts.KubeConfigPath, \"kubeconfig\", \"\", \"The endpoint of the Kubernetes manager\")\n\tcmd.Flags().StringVar(&opts.KubeContextName, \"kubecontext\", \"\", \"The name of the context to use in kubeconfig\")\n\tcmd.Flags().BoolVar(&opts.FromEnvironment, \"from-env\", false, \"Get endpoint and creds from env vars\")\n\treturn cmd\n}\n\nfunc runCreateKube(contextName string, opts kube.ContextParams) error {\n\tif contextExists(contextName) {\n\t\treturn errors.Wrapf(api.ErrAlreadyExists, \"context %q\", contextName)\n\t}\n\n\tcontextData, description, err := opts.CreateContextData()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn createDockerContext(contextName, store.KubeContextType, description, contextData)\n}\n<commit_msg>Fix help text when for context create and kube backend<commit_after>\/\/ +build kube\n\n\/*\n Copyright 2020 Docker Compose CLI authors\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage context\n\nimport (\n\t\"github.com\/docker\/compose-cli\/pkg\/api\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\"github.com\/docker\/compose-cli\/api\/context\/store\"\n\t\"github.com\/docker\/compose-cli\/kube\"\n)\n\nfunc init() {\n\textraCommands = append(extraCommands, createKubeCommand)\n\textraHelp = append(extraHelp, `\nCreate a Kubernetes context:\n$ docker context create kubernetes CONTEXT [flags]\n(see docker context create kubernetes --help)\n`)\n}\n\nfunc createKubeCommand() *cobra.Command {\n\tvar opts kube.ContextParams\n\tcmd := &cobra.Command{\n\t\tUse: \"kubernetes CONTEXT [flags]\",\n\t\tShort: \"Create context for a Kubernetes Cluster\",\n\t\tArgs: cobra.ExactArgs(1),\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn runCreateKube(args[0], opts)\n\t\t},\n\t}\n\n\taddDescriptionFlag(cmd, &opts.Description)\n\tcmd.Flags().StringVar(&opts.KubeConfigPath, \"kubeconfig\", \"\", \"The endpoint of the Kubernetes manager\")\n\tcmd.Flags().StringVar(&opts.KubeContextName, \"kubecontext\", \"\", \"The name of the context to use in kubeconfig\")\n\tcmd.Flags().BoolVar(&opts.FromEnvironment, \"from-env\", false, \"Get endpoint and creds from env vars\")\n\treturn cmd\n}\n\nfunc runCreateKube(contextName string, opts kube.ContextParams) error {\n\tif contextExists(contextName) {\n\t\treturn errors.Wrapf(api.ErrAlreadyExists, \"context %q\", contextName)\n\t}\n\n\tcontextData, description, err := opts.CreateContextData()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn createDockerContext(contextName, store.KubeContextType, description, contextData)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.\n\npackage vespa\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"os\"\n)\n\ntype VespaTlsConfig struct {\n\tDisableHostnameValidation bool `json:\"disable-hostname-validation\"`\n\tFiles struct {\n\t\tPrivateKey string `json:\"private-key\"`\n\t\tCaCertificates string `json:\"ca-certificates\"`\n\t\tCertificates string `json:\"certificates\"`\n\t} `json:\"files\"`\n\tAuthorizedPeers []struct {\n\t\tRequiredCredentials []struct {\n\t\t\tField string `json:\"field\"`\n\t\t\tMustMatch string `json:\"must-match\"`\n\t\t} `json:\"required-credentials\"`\n\t\tName string `json:\"name\"`\n\t\tCapabilities []string `json:\"capabilities\"`\n\t} `json:\"authorized-peers\"`\n}\n\nfunc LoadTlsConfig() (*VespaTlsConfig, error) {\n\tfn := os.Getenv(\"VESPA_TLS_CONFIG_FILE\")\n\tif fn == \"\" {\n\t\treturn nil, nil\n\t}\n\tcontents, err := os.ReadFile(fn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcodec := json.NewDecoder(bytes.NewReader(contents))\n\tvar parsedJson VespaTlsConfig\n\terr = codec.Decode(&parsedJson)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &parsedJson, nil\n}\n\nfunc ExportSecurityEnvToSh() {\n\tLoadDefaultEnv()\n\tcfg, _ := LoadTlsConfig()\n\thelper := newShellEnvExporter()\n\tif cfg == nil {\n\t\thelper.unsetVar(\"VESPA_TLS_ENABLED\")\n\t} else {\n\t\tif fn := cfg.Files.PrivateKey; fn != \"\" {\n\t\t\thelper.overrideVar(\"VESPA_TLS_PRIVATE_KEY\", fn)\n\t\t}\n\t\tif fn := cfg.Files.CaCertificates; fn != \"\" {\n\t\t\thelper.overrideVar(\"VESPA_TLS_CA_CERT\", fn)\n\t\t}\n\t\tif fn := cfg.Files.Certificates; fn != \"\" {\n\t\t\thelper.overrideVar(\"VESPA_TLS_CERT\", fn)\n\t\t}\n\t\tif cfg.DisableHostnameValidation {\n\t\t\thelper.overrideVar(\"VESPA_TLS_HOSTNAME_VALIDATION_DISABLED\", \"1\")\n\t\t} else {\n\t\t\thelper.unsetVar(\"VESPA_TLS_HOSTNAME_VALIDATION_DISABLED\")\n\t\t}\n\t\tif os.Getenv(\"VESPA_TLS_INSECURE_MIXED_MODE\") == \"\" {\n\t\t\thelper.overrideVar(\"VESPA_TLS_ENABLED\", \"1\")\n\t\t}\n\t}\n\thelper.dump()\n}\n<commit_msg>drop currently-unused fields<commit_after>\/\/ Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.\n\npackage vespa\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"os\"\n)\n\ntype VespaTlsConfig struct {\n\tDisableHostnameValidation bool `json:\"disable-hostname-validation\"`\n\tFiles struct {\n\t\tPrivateKey string `json:\"private-key\"`\n\t\tCaCertificates string `json:\"ca-certificates\"`\n\t\tCertificates string `json:\"certificates\"`\n\t} `json:\"files\"`\n}\n\nfunc LoadTlsConfig() (*VespaTlsConfig, error) {\n\tfn := os.Getenv(\"VESPA_TLS_CONFIG_FILE\")\n\tif fn == \"\" {\n\t\treturn nil, nil\n\t}\n\tcontents, err := os.ReadFile(fn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcodec := json.NewDecoder(bytes.NewReader(contents))\n\tvar parsedJson VespaTlsConfig\n\terr = codec.Decode(&parsedJson)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &parsedJson, nil\n}\n\nfunc ExportSecurityEnvToSh() {\n\tLoadDefaultEnv()\n\tcfg, _ := LoadTlsConfig()\n\thelper := newShellEnvExporter()\n\tif cfg == nil {\n\t\thelper.unsetVar(\"VESPA_TLS_ENABLED\")\n\t} else {\n\t\tif fn := cfg.Files.PrivateKey; fn != \"\" {\n\t\t\thelper.overrideVar(\"VESPA_TLS_PRIVATE_KEY\", fn)\n\t\t}\n\t\tif fn := cfg.Files.CaCertificates; fn != \"\" {\n\t\t\thelper.overrideVar(\"VESPA_TLS_CA_CERT\", fn)\n\t\t}\n\t\tif fn := cfg.Files.Certificates; fn != \"\" {\n\t\t\thelper.overrideVar(\"VESPA_TLS_CERT\", fn)\n\t\t}\n\t\tif cfg.DisableHostnameValidation {\n\t\t\thelper.overrideVar(\"VESPA_TLS_HOSTNAME_VALIDATION_DISABLED\", \"1\")\n\t\t} else {\n\t\t\thelper.unsetVar(\"VESPA_TLS_HOSTNAME_VALIDATION_DISABLED\")\n\t\t}\n\t\tif os.Getenv(\"VESPA_TLS_INSECURE_MIXED_MODE\") == \"\" {\n\t\t\thelper.overrideVar(\"VESPA_TLS_ENABLED\", \"1\")\n\t\t}\n\t}\n\thelper.dump()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2015 Jean-Philippe Couture\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in all\n\/\/ copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n\/\/ SOFTWARE.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nfunc main() {\n\tif len(os.Args) < 3 {\n\t\tprintUsage()\n\t\treturn\n\t}\n\tfn := os.Args[1]\n\tcmd := os.Args[2]\n\targs := os.Args[2:]\n\n\tsetEnvVars(loadEnvFile(fn))\n\n\tbinary, lookErr := exec.LookPath(cmd)\n\tif lookErr != nil {\n\t\tbinary = cmd\n\t}\n\n\texecErr := syscall.Exec(binary, args, os.Environ())\n\tif execErr != nil {\n\t\tfmt.Println(\"[Err] Cannot execute:\", binary)\n\t}\n}\n\nfunc printUsage() {\n\tusage := `nv - context specific environment variables\nUsage: nv <env file> <command> [arguments...]`\n\tfmt.Println(usage)\n}\n\nfunc loadEnvFile(fn string) []string {\n\tdat, err := ioutil.ReadFile(fn)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tvars := make([]string, 0)\n\tlines := bytes.Split(dat, []byte(\"\\n\"))\n\tfor _, line := range lines {\n\t\tif len(line) > 0 {\n\t\t\tvars = append(vars, string(line))\n\t\t}\n\t}\n\treturn vars\n}\n\nfunc setEnvVars(envVars []string) {\n\tfor _, envVar := range envVars {\n\t\tresult := strings.Split(envVar, \"=\")\n\t\tif len(result) >= 2 {\n\t\t\tname := result[0]\n\t\t\tvalue := strings.Join(result[1:], \"=\")\n\t\t\tos.Setenv(name, value)\n\t\t}\n\t}\n}\n<commit_msg>Check if the .env file exists and is readable.<commit_after>\/\/ Copyright (c) 2015 Jean-Philippe Couture\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in all\n\/\/ copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n\/\/ SOFTWARE.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nfunc main() {\n\tif len(os.Args) < 3 {\n\t\tprintUsage()\n\t\tos.Exit(-1)\n\t}\n\n\tfn := os.Args[1]\n\tcmd := os.Args[2]\n\targs := os.Args[2:]\n\n\tif !fileExists(fn) {\n\t\tfmt.Printf(\"[Err] '%s': No such file or directory\\n\", fn)\n\t\tos.Exit(-1)\n\t}\n\n\tif !fileReadable(fn) {\n\t\tfmt.Printf(\"[Err] '%s': Permission denied\\n\", fn)\n\t\tos.Exit(-1)\n\t}\n\n\tsetEnvVars(loadEnvFile(fn))\n\n\tbinary, lookErr := exec.LookPath(cmd)\n\tif lookErr != nil {\n\t\tbinary = cmd\n\t}\n\n\texecErr := syscall.Exec(binary, args, os.Environ())\n\tif execErr != nil {\n\t\tfmt.Println(\"[Err] Cannot execute:\", binary)\n\t\tos.Exit(-1)\n\t}\n}\n\nfunc printUsage() {\n\tusage := `nv - context specific environment variables\nUsage: nv <env file> <command> [arguments...]`\n\tfmt.Println(usage)\n}\n\nfunc loadEnvFile(fn string) []string {\n\tdat, err := ioutil.ReadFile(fn)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tvars := make([]string, 0)\n\tlines := bytes.Split(dat, []byte(\"\\n\"))\n\tfor _, line := range lines {\n\t\tif len(line) > 0 {\n\t\t\tvars = append(vars, string(line))\n\t\t}\n\t}\n\treturn vars\n}\n\nfunc setEnvVars(envVars []string) {\n\tfor _, envVar := range envVars {\n\t\tresult := strings.Split(envVar, \"=\")\n\t\tif len(result) >= 2 {\n\t\t\tname := result[0]\n\t\t\tvalue := strings.Join(result[1:], \"=\")\n\t\t\tos.Setenv(name, value)\n\t\t}\n\t}\n}\n\nfunc fileExists(filename string) bool {\n\tif _, err := os.Stat(filename); os.IsNotExist(err) {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc fileReadable(filename string) bool {\n\tif _, err := os.Stat(filename); err == nil {\n\t\treturn false\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>package dockerclient\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"log\"\n)\n\ntype ProgressDetail struct {\n\tCurrent int64\n\tTotal int64\n\tStart int64\n}\n\ntype ErrorDetail struct {\n\tCode int\n\tMessage string\n}\n\ntype BuildResponse []*JSONMessage\n\nfunc (rsp BuildResponse) Last() *JSONMessage {\n\tif len(rsp) == 0 {\n\t\treturn nil\n\t}\n\treturn rsp[len(rsp)-1]\n}\n\nfunc (rsp BuildResponse) ImageId() string {\n\tlast := rsp.Last()\n\tif last == nil {\n\t\treturn \"\"\n\t}\n\tm := imageIdRegexp.FindStringSubmatch(last.Stream)\n\tif len(m) == 2 {\n\t\treturn m[1]\n\t}\n\treturn \"\"\n}\n\nfunc (dh *DockerHost) handleBuildImageJson(r io.Reader, f func(s *JSONMessage)) (rsp BuildResponse, e error) {\n\tscanner := bufio.NewReader(r)\n\tbuf := &bytes.Buffer{}\n\tfor {\n\t\tb, e := scanner.ReadBytes('}')\n\t\tif e == io.EOF {\n\t\t\tbreak\n\t\t} else if e != nil {\n\t\t\treturn nil, e\n\t\t}\n\t\tbuf.Write(b)\n\t\tstream := &JSONMessage{}\n\t\te = json.Unmarshal(buf.Bytes(), stream)\n\t\tif e != nil {\n\t\t\tif e.Error() == \"unexpected end of JSON input\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Printf(\"ERROR: %s => %s\", e.Error(), buf.String())\n\t\t\treturn nil, e\n\t\t}\n\t\tif f != nil {\n\t\t\tf(stream)\n\t\t}\n\t\trsp = append(rsp, stream)\n\t\tbuf.Reset()\n\t}\n\treturn rsp, nil\n}\n\nfunc (dh *DockerHost) handleBuildImagePlain(r io.Reader, f func(s *JSONMessage)) (rsp BuildResponse, e error) {\n\treader := bufio.NewReader(r)\n\tfor {\n\t\tb, e := reader.ReadString('\\n')\n\t\tif e == io.EOF {\n\t\t\tbreak\n\t\t} else if e != nil {\n\t\t\treturn nil, e\n\t\t}\n\t\ts := &JSONMessage{Stream: string(b)}\n\t\trsp = append(rsp, s)\n\t\tf(s)\n\t}\n\treturn rsp, nil\n}\n<commit_msg>add ScanJson method<commit_after>package dockerclient\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"log\"\n)\n\ntype ProgressDetail struct {\n\tCurrent int64\n\tTotal int64\n\tStart int64\n}\n\ntype ErrorDetail struct {\n\tCode int\n\tMessage string\n}\n\ntype BuildResponse []*JSONMessage\n\nfunc (rsp BuildResponse) Last() *JSONMessage {\n\tif len(rsp) == 0 {\n\t\treturn nil\n\t}\n\treturn rsp[len(rsp)-1]\n}\n\nfunc (rsp BuildResponse) ImageId() string {\n\tlast := rsp.Last()\n\tif last == nil {\n\t\treturn \"\"\n\t}\n\tm := imageIdRegexp.FindStringSubmatch(last.Stream)\n\tif len(m) == 2 {\n\t\treturn m[1]\n\t}\n\treturn \"\"\n}\n\nfunc ScanJson(r io.Reader, f func(b []byte) error) error {\n\tscanner := bufio.NewReader(r)\n\tbuf := &bytes.Buffer{}\n\tfor {\n\t\tb, e := scanner.ReadBytes('}')\n\t\tif e == io.EOF {\n\t\t\tbreak\n\t\t} else if e != nil {\n\t\t\treturn e\n\t\t}\n\t\te = f(b)\n\t\tif e != nil {\n\t\t\treturn e\n\t\t}\n\t\tbuf.Reset()\n\t}\n\treturn nil\n}\n\nfunc (dh *DockerHost) handleBuildImageJson(r io.Reader, f func(s *JSONMessage)) (rsp BuildResponse, e error) {\n\tscanner := bufio.NewReader(r)\n\tbuf := &bytes.Buffer{}\n\tfor {\n\t\tb, e := scanner.ReadBytes('}')\n\t\tif e == io.EOF {\n\t\t\tbreak\n\t\t} else if e != nil {\n\t\t\treturn nil, e\n\t\t}\n\t\tbuf.Write(b)\n\t\tstream := &JSONMessage{}\n\t\te = json.Unmarshal(buf.Bytes(), stream)\n\t\tif e != nil {\n\t\t\tif e.Error() == \"unexpected end of JSON input\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Printf(\"ERROR: %s => %s\", e.Error(), buf.String())\n\t\t\treturn nil, e\n\t\t}\n\t\tif f != nil {\n\t\t\tf(stream)\n\t\t}\n\t\trsp = append(rsp, stream)\n\t\tbuf.Reset()\n\t}\n\treturn rsp, nil\n}\n\nfunc (dh *DockerHost) handleBuildImagePlain(r io.Reader, f func(s *JSONMessage)) (rsp BuildResponse, e error) {\n\treader := bufio.NewReader(r)\n\tfor {\n\t\tb, e := reader.ReadString('\\n')\n\t\tif e == io.EOF {\n\t\t\tbreak\n\t\t} else if e != nil {\n\t\t\treturn nil, e\n\t\t}\n\t\ts := &JSONMessage{Stream: string(b)}\n\t\trsp = append(rsp, s)\n\t\tf(s)\n\t}\n\treturn rsp, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package relay\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"time\"\n\n\tbasic \"github.com\/libp2p\/go-libp2p\/p2p\/host\/basic\"\n\n\tautonat \"github.com\/libp2p\/go-libp2p-autonat\"\n\tdiscovery \"github.com\/libp2p\/go-libp2p-discovery\"\n\thost \"github.com\/libp2p\/go-libp2p-host\"\n\tinet \"github.com\/libp2p\/go-libp2p-net\"\n\tpeer \"github.com\/libp2p\/go-libp2p-peer\"\n\tpstore \"github.com\/libp2p\/go-libp2p-peerstore\"\n\tma \"github.com\/multiformats\/go-multiaddr\"\n\tmanet \"github.com\/multiformats\/go-multiaddr-net\"\n)\n\nvar DesiredRelays = 3\n\n\/\/ AutoRelayHost is a Host that uses relays for connectivity when a NAT is detected.\ntype AutoRelayHost struct {\n\t*basic.BasicHost\n\tdiscover discovery.Discoverer\n\tautonat autonat.AutoNAT\n\taddrsF basic.AddrsFactory\n\n\tdisconnect chan struct{}\n\n\tmx sync.Mutex\n\trelays map[peer.ID]pstore.PeerInfo\n\taddrs []ma.Multiaddr\n}\n\nfunc NewAutoRelayHost(ctx context.Context, bhost *basic.BasicHost, discover discovery.Discoverer) *AutoRelayHost {\n\tautonat := autonat.NewAutoNAT(ctx, bhost)\n\th := &AutoRelayHost{\n\t\tBasicHost: bhost,\n\t\tdiscover: discover,\n\t\tautonat: autonat,\n\t\taddrsF: bhost.AddrsFactory,\n\t\trelays: make(map[peer.ID]pstore.PeerInfo),\n\t\tdisconnect: make(chan struct{}, 1),\n\t}\n\tbhost.AddrsFactory = h.hostAddrs\n\tbhost.Network().Notify(h)\n\tgo h.background(ctx)\n\treturn h\n}\n\nfunc (h *AutoRelayHost) hostAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {\n\th.mx.Lock()\n\tdefer h.mx.Unlock()\n\tif h.addrs != nil && h.autonat.Status() == autonat.NATStatusPrivate {\n\t\treturn h.addrs\n\t} else {\n\t\treturn h.addrsF(addrs)\n\t}\n}\n\nfunc (h *AutoRelayHost) background(ctx context.Context) {\n\tselect {\n\tcase <-time.After(autonat.AutoNATBootDelay + 30*time.Second):\n\tcase <-ctx.Done():\n\t\treturn\n\t}\n\n\tfor {\n\t\twait := autonat.AutoNATRefreshInterval\n\t\tswitch h.autonat.Status() {\n\t\tcase autonat.NATStatusUnknown:\n\t\t\twait = autonat.AutoNATRetryInterval\n\t\tcase autonat.NATStatusPublic:\n\t\tcase autonat.NATStatusPrivate:\n\t\t\th.findRelays(ctx)\n\t\t}\n\n\t\tselect {\n\t\tcase <-h.disconnect:\n\t\t\t\/\/ invalidate addrs\n\t\t\th.mx.Lock()\n\t\t\th.addrs = nil\n\t\t\th.mx.Unlock()\n\t\tcase <-time.After(wait):\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (h *AutoRelayHost) findRelays(ctx context.Context) {\n\th.mx.Lock()\n\tif len(h.relays) >= DesiredRelays {\n\t\th.mx.Unlock()\n\t\treturn\n\t}\n\tneed := DesiredRelays - len(h.relays)\n\th.mx.Unlock()\n\n\tlimit := 20\n\tfor ; need > limit; limit *= 2 {\n\t}\n\n\tdctx, cancel := context.WithTimeout(ctx, 60*time.Second)\n\tpis, err := discovery.FindPeers(dctx, h.discover, \"\/libp2p\/relay\", limit)\n\tcancel()\n\tif err != nil {\n\t\tlog.Debugf(\"error discovering relays: %s\", err.Error())\n\t\treturn\n\t}\n\n\t\/\/ TODO better relay selection strategy; this just selects random relays\n\t\/\/ but we should probably use ping latency as the selection metric\n\tshuffleRelays(pis)\n\n\tupdate := 0\n\n\tfor _, pi := range pis {\n\t\th.mx.Lock()\n\t\tif _, ok := h.relays[pi.ID]; ok {\n\t\t\th.mx.Unlock()\n\t\t\tcontinue\n\t\t}\n\t\th.mx.Unlock()\n\n\t\tcctx, cancel := context.WithTimeout(ctx, 60*time.Second)\n\t\terr = h.Connect(cctx, pi)\n\t\tcancel()\n\t\tif err != nil {\n\t\t\tlog.Debugf(\"error connecting to relay %s: %s\", pi.ID, err.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Debugf(\"connected to relay %s\", pi.ID)\n\t\th.mx.Lock()\n\t\th.relays[pi.ID] = pi\n\t\th.mx.Unlock()\n\n\t\tupdate++\n\t\tneed--\n\t\tif need == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif update > 0 || h.addrs == nil {\n\t\th.updateAddrs()\n\t}\n}\n\nfunc (h *AutoRelayHost) updateAddrs() {\n\th.doUpdateAddrs()\n\th.PushIdentify()\n}\n\nfunc (h *AutoRelayHost) doUpdateAddrs() {\n\th.mx.Lock()\n\tdefer h.mx.Unlock()\n\n\taddrs := h.addrsF(h.AllAddrs())\n\traddrs := make([]ma.Multiaddr, 0, len(addrs)+len(h.relays))\n\n\t\/\/ remove our public addresses from the list and replace them by just the public IP\n\tfor _, addr := range addrs {\n\t\tif manet.IsPublicAddr(addr) {\n\t\t\tip, err := addr.ValueForProtocol(ma.P_IP4)\n\t\t\tif err == nil {\n\t\t\t\tpub, err := ma.NewMultiaddr(fmt.Sprintf(\"\/ip4\/%s\", ip))\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\tif !containsAddr(raddrs, pub) {\n\t\t\t\t\traddrs = append(raddrs, pub)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tip, err = addr.ValueForProtocol(ma.P_IP6)\n\t\t\tif err == nil {\n\t\t\t\tpub, err := ma.NewMultiaddr(fmt.Sprintf(\"\/ip6\/%s\", ip))\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif !containsAddr(raddrs, pub) {\n\t\t\t\t\traddrs = append(raddrs, pub)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else {\n\t\t\traddrs = append(raddrs, addr)\n\t\t}\n\t}\n\n\tcircuit, err := ma.NewMultiaddr(\"\/p2p-circuit\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfor _, pi := range h.relays {\n\t\tfor _, addr := range pi.Addrs {\n\t\t\tif !manet.IsPrivateAddr(addr) {\n\t\t\t\tpub := addr.Encapsulate(circuit)\n\t\t\t\traddrs = append(raddrs, pub)\n\t\t\t}\n\t\t}\n\t}\n\n\th.addrs = raddrs\n}\n\nfunc shuffleRelays(pis []pstore.PeerInfo) {\n\tfor i := range pis {\n\t\tj := rand.Intn(i + 1)\n\t\tpis[i], pis[j] = pis[j], pis[i]\n\t}\n}\n\nfunc containsAddr(lst []ma.Multiaddr, addr ma.Multiaddr) bool {\n\tfor _, xaddr := range lst {\n\t\tif xaddr.Equal(addr) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ notify\nfunc (h *AutoRelayHost) Listen(inet.Network, ma.Multiaddr) {}\nfunc (h *AutoRelayHost) ListenClose(inet.Network, ma.Multiaddr) {}\nfunc (h *AutoRelayHost) Connected(inet.Network, inet.Conn) {}\n\nfunc (h *AutoRelayHost) Disconnected(_ inet.Network, c inet.Conn) {\n\tp := c.RemotePeer()\n\th.mx.Lock()\n\tdefer h.mx.Unlock()\n\tif _, ok := h.relays[p]; ok {\n\t\tdelete(h.relays, p)\n\t\tselect {\n\t\tcase h.disconnect <- struct{}{}:\n\t\tdefault:\n\t\t}\n\t}\n}\n\nfunc (h *AutoRelayHost) OpenedStream(inet.Network, inet.Stream) {}\nfunc (h *AutoRelayHost) ClosedStream(inet.Network, inet.Stream) {}\n\nvar _ host.Host = (*AutoRelayHost)(nil)\n<commit_msg>configurable boot delay for autorelay<commit_after>package relay\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"time\"\n\n\tbasic \"github.com\/libp2p\/go-libp2p\/p2p\/host\/basic\"\n\n\tautonat \"github.com\/libp2p\/go-libp2p-autonat\"\n\tdiscovery \"github.com\/libp2p\/go-libp2p-discovery\"\n\thost \"github.com\/libp2p\/go-libp2p-host\"\n\tinet \"github.com\/libp2p\/go-libp2p-net\"\n\tpeer \"github.com\/libp2p\/go-libp2p-peer\"\n\tpstore \"github.com\/libp2p\/go-libp2p-peerstore\"\n\tma \"github.com\/multiformats\/go-multiaddr\"\n\tmanet \"github.com\/multiformats\/go-multiaddr-net\"\n)\n\nvar (\n\tDesiredRelays = 3\n\n\tBootDelay = 90 * time.Second\n)\n\n\/\/ AutoRelayHost is a Host that uses relays for connectivity when a NAT is detected.\ntype AutoRelayHost struct {\n\t*basic.BasicHost\n\tdiscover discovery.Discoverer\n\tautonat autonat.AutoNAT\n\taddrsF basic.AddrsFactory\n\n\tdisconnect chan struct{}\n\n\tmx sync.Mutex\n\trelays map[peer.ID]pstore.PeerInfo\n\taddrs []ma.Multiaddr\n}\n\nfunc NewAutoRelayHost(ctx context.Context, bhost *basic.BasicHost, discover discovery.Discoverer) *AutoRelayHost {\n\tautonat := autonat.NewAutoNAT(ctx, bhost)\n\th := &AutoRelayHost{\n\t\tBasicHost: bhost,\n\t\tdiscover: discover,\n\t\tautonat: autonat,\n\t\taddrsF: bhost.AddrsFactory,\n\t\trelays: make(map[peer.ID]pstore.PeerInfo),\n\t\tdisconnect: make(chan struct{}, 1),\n\t}\n\tbhost.AddrsFactory = h.hostAddrs\n\tbhost.Network().Notify(h)\n\tgo h.background(ctx)\n\treturn h\n}\n\nfunc (h *AutoRelayHost) hostAddrs(addrs []ma.Multiaddr) []ma.Multiaddr {\n\th.mx.Lock()\n\tdefer h.mx.Unlock()\n\tif h.addrs != nil && h.autonat.Status() == autonat.NATStatusPrivate {\n\t\treturn h.addrs\n\t} else {\n\t\treturn h.addrsF(addrs)\n\t}\n}\n\nfunc (h *AutoRelayHost) background(ctx context.Context) {\n\tselect {\n\tcase <-time.After(autonat.AutoNATBootDelay + BootDelay):\n\tcase <-ctx.Done():\n\t\treturn\n\t}\n\n\tfor {\n\t\twait := autonat.AutoNATRefreshInterval\n\t\tswitch h.autonat.Status() {\n\t\tcase autonat.NATStatusUnknown:\n\t\t\twait = autonat.AutoNATRetryInterval\n\t\tcase autonat.NATStatusPublic:\n\t\tcase autonat.NATStatusPrivate:\n\t\t\th.findRelays(ctx)\n\t\t}\n\n\t\tselect {\n\t\tcase <-h.disconnect:\n\t\t\t\/\/ invalidate addrs\n\t\t\th.mx.Lock()\n\t\t\th.addrs = nil\n\t\t\th.mx.Unlock()\n\t\tcase <-time.After(wait):\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (h *AutoRelayHost) findRelays(ctx context.Context) {\n\th.mx.Lock()\n\tif len(h.relays) >= DesiredRelays {\n\t\th.mx.Unlock()\n\t\treturn\n\t}\n\tneed := DesiredRelays - len(h.relays)\n\th.mx.Unlock()\n\n\tlimit := 20\n\tfor ; need > limit; limit *= 2 {\n\t}\n\n\tdctx, cancel := context.WithTimeout(ctx, 60*time.Second)\n\tpis, err := discovery.FindPeers(dctx, h.discover, \"\/libp2p\/relay\", limit)\n\tcancel()\n\tif err != nil {\n\t\tlog.Debugf(\"error discovering relays: %s\", err.Error())\n\t\treturn\n\t}\n\n\t\/\/ TODO better relay selection strategy; this just selects random relays\n\t\/\/ but we should probably use ping latency as the selection metric\n\tshuffleRelays(pis)\n\n\tupdate := 0\n\n\tfor _, pi := range pis {\n\t\th.mx.Lock()\n\t\tif _, ok := h.relays[pi.ID]; ok {\n\t\t\th.mx.Unlock()\n\t\t\tcontinue\n\t\t}\n\t\th.mx.Unlock()\n\n\t\tcctx, cancel := context.WithTimeout(ctx, 60*time.Second)\n\t\terr = h.Connect(cctx, pi)\n\t\tcancel()\n\t\tif err != nil {\n\t\t\tlog.Debugf(\"error connecting to relay %s: %s\", pi.ID, err.Error())\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Debugf(\"connected to relay %s\", pi.ID)\n\t\th.mx.Lock()\n\t\th.relays[pi.ID] = pi\n\t\th.mx.Unlock()\n\n\t\tupdate++\n\t\tneed--\n\t\tif need == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif update > 0 || h.addrs == nil {\n\t\th.updateAddrs()\n\t}\n}\n\nfunc (h *AutoRelayHost) updateAddrs() {\n\th.doUpdateAddrs()\n\th.PushIdentify()\n}\n\nfunc (h *AutoRelayHost) doUpdateAddrs() {\n\th.mx.Lock()\n\tdefer h.mx.Unlock()\n\n\taddrs := h.addrsF(h.AllAddrs())\n\traddrs := make([]ma.Multiaddr, 0, len(addrs)+len(h.relays))\n\n\t\/\/ remove our public addresses from the list and replace them by just the public IP\n\tfor _, addr := range addrs {\n\t\tif manet.IsPublicAddr(addr) {\n\t\t\tip, err := addr.ValueForProtocol(ma.P_IP4)\n\t\t\tif err == nil {\n\t\t\t\tpub, err := ma.NewMultiaddr(fmt.Sprintf(\"\/ip4\/%s\", ip))\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\tif !containsAddr(raddrs, pub) {\n\t\t\t\t\traddrs = append(raddrs, pub)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tip, err = addr.ValueForProtocol(ma.P_IP6)\n\t\t\tif err == nil {\n\t\t\t\tpub, err := ma.NewMultiaddr(fmt.Sprintf(\"\/ip6\/%s\", ip))\n\t\t\t\tif err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif !containsAddr(raddrs, pub) {\n\t\t\t\t\traddrs = append(raddrs, pub)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else {\n\t\t\traddrs = append(raddrs, addr)\n\t\t}\n\t}\n\n\tcircuit, err := ma.NewMultiaddr(\"\/p2p-circuit\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfor _, pi := range h.relays {\n\t\tfor _, addr := range pi.Addrs {\n\t\t\tif !manet.IsPrivateAddr(addr) {\n\t\t\t\tpub := addr.Encapsulate(circuit)\n\t\t\t\traddrs = append(raddrs, pub)\n\t\t\t}\n\t\t}\n\t}\n\n\th.addrs = raddrs\n}\n\nfunc shuffleRelays(pis []pstore.PeerInfo) {\n\tfor i := range pis {\n\t\tj := rand.Intn(i + 1)\n\t\tpis[i], pis[j] = pis[j], pis[i]\n\t}\n}\n\nfunc containsAddr(lst []ma.Multiaddr, addr ma.Multiaddr) bool {\n\tfor _, xaddr := range lst {\n\t\tif xaddr.Equal(addr) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ notify\nfunc (h *AutoRelayHost) Listen(inet.Network, ma.Multiaddr) {}\nfunc (h *AutoRelayHost) ListenClose(inet.Network, ma.Multiaddr) {}\nfunc (h *AutoRelayHost) Connected(inet.Network, inet.Conn) {}\n\nfunc (h *AutoRelayHost) Disconnected(_ inet.Network, c inet.Conn) {\n\tp := c.RemotePeer()\n\th.mx.Lock()\n\tdefer h.mx.Unlock()\n\tif _, ok := h.relays[p]; ok {\n\t\tdelete(h.relays, p)\n\t\tselect {\n\t\tcase h.disconnect <- struct{}{}:\n\t\tdefault:\n\t\t}\n\t}\n}\n\nfunc (h *AutoRelayHost) OpenedStream(inet.Network, inet.Stream) {}\nfunc (h *AutoRelayHost) ClosedStream(inet.Network, inet.Stream) {}\n\nvar _ host.Host = (*AutoRelayHost)(nil)\n<|endoftext|>"} {"text":"<commit_before>package swarm\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\tmetrics \"github.com\/ipfs\/go-libp2p\/p2p\/metrics\"\n\tinet \"github.com\/ipfs\/go-libp2p\/p2p\/net\"\n\tpeer \"github.com\/ipfs\/go-libp2p\/p2p\/peer\"\n\ttestutil \"github.com\/ipfs\/go-libp2p\/testutil\"\n\n\tcontext \"gx\/ipfs\/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt\/go-net\/context\"\n\tma \"gx\/ipfs\/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz\/go-multiaddr\"\n)\n\nfunc EchoStreamHandler(stream inet.Stream) {\n\tgo func() {\n\t\tdefer stream.Close()\n\n\t\t\/\/ pull out the ipfs conn\n\t\tc := stream.Conn()\n\t\tlog.Infof(\"%s ponging to %s\", c.LocalPeer(), c.RemotePeer())\n\n\t\tbuf := make([]byte, 4)\n\n\t\tfor {\n\t\t\tif _, err := stream.Read(buf); err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\tlog.Error(\"ping receive error:\", err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif !bytes.Equal(buf, []byte(\"ping\")) {\n\t\t\t\tlog.Errorf(\"ping receive error: ping != %s %v\", buf, buf)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif _, err := stream.Write([]byte(\"pong\")); err != nil {\n\t\t\t\tlog.Error(\"pond send error:\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc makeDialOnlySwarm(ctx context.Context, t *testing.T) *Swarm {\n\tid := testutil.RandIdentityOrFatal(t)\n\n\tpeerstore := peer.NewPeerstore()\n\tpeerstore.AddPubKey(id.ID(), id.PublicKey())\n\tpeerstore.AddPrivKey(id.ID(), id.PrivateKey())\n\n\tswarm, err := NewSwarm(ctx, nil, id.ID(), peerstore, metrics.NewBandwidthCounter())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tswarm.SetStreamHandler(EchoStreamHandler)\n\n\treturn swarm\n}\n\nfunc makeSwarms(ctx context.Context, t *testing.T, num int) []*Swarm {\n\tswarms := make([]*Swarm, 0, num)\n\n\tfor i := 0; i < num; i++ {\n\t\tlocalnp := testutil.RandPeerNetParamsOrFatal(t)\n\n\t\tpeerstore := peer.NewPeerstore()\n\t\tpeerstore.AddPubKey(localnp.ID, localnp.PubKey)\n\t\tpeerstore.AddPrivKey(localnp.ID, localnp.PrivKey)\n\n\t\taddrs := []ma.Multiaddr{localnp.Addr}\n\t\tswarm, err := NewSwarm(ctx, addrs, localnp.ID, peerstore, metrics.NewBandwidthCounter())\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tswarm.SetStreamHandler(EchoStreamHandler)\n\t\tswarms = append(swarms, swarm)\n\t}\n\n\treturn swarms\n}\n\nfunc connectSwarms(t *testing.T, ctx context.Context, swarms []*Swarm) {\n\n\tvar wg sync.WaitGroup\n\tconnect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {\n\t\t\/\/ TODO: make a DialAddr func.\n\t\ts.peers.AddAddr(dst, addr, peer.PermanentAddrTTL)\n\t\tif _, err := s.Dial(ctx, dst); err != nil {\n\t\t\tt.Fatal(\"error swarm dialing to peer\", err)\n\t\t}\n\t\twg.Done()\n\t}\n\n\tlog.Info(\"Connecting swarms simultaneously.\")\n\tfor _, s1 := range swarms {\n\t\tfor _, s2 := range swarms {\n\t\t\tif s2.local != s1.local { \/\/ don't connect to self.\n\t\t\t\twg.Add(1)\n\t\t\t\tconnect(s1, s2.LocalPeer(), s2.ListenAddresses()[0]) \/\/ try the first.\n\t\t\t}\n\t\t}\n\t}\n\twg.Wait()\n\n\tfor _, s := range swarms {\n\t\tlog.Infof(\"%s swarm routing table: %s\", s.local, s.Peers())\n\t}\n}\n\nfunc SubtestSwarm(t *testing.T, SwarmNum int, MsgNum int) {\n\t\/\/ t.Skip(\"skipping for another test\")\n\n\tctx := context.Background()\n\tswarms := makeSwarms(ctx, t, SwarmNum)\n\n\t\/\/ connect everyone\n\tconnectSwarms(t, ctx, swarms)\n\n\t\/\/ ping\/pong\n\tfor _, s1 := range swarms {\n\t\tlog.Debugf(\"-------------------------------------------------------\")\n\t\tlog.Debugf(\"%s ping pong round\", s1.local)\n\t\tlog.Debugf(\"-------------------------------------------------------\")\n\n\t\t_, cancel := context.WithCancel(ctx)\n\t\tgot := map[peer.ID]int{}\n\t\terrChan := make(chan error, MsgNum*len(swarms))\n\t\tstreamChan := make(chan *Stream, MsgNum)\n\n\t\t\/\/ send out \"ping\" x MsgNum to every peer\n\t\tgo func() {\n\t\t\tdefer close(streamChan)\n\n\t\t\tvar wg sync.WaitGroup\n\t\t\tsend := func(p peer.ID) {\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\t\/\/ first, one stream per peer (nice)\n\t\t\t\tstream, err := s1.NewStreamWithPeer(ctx, p)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrChan <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ send out ping!\n\t\t\t\tfor k := 0; k < MsgNum; k++ { \/\/ with k messages\n\t\t\t\t\tmsg := \"ping\"\n\t\t\t\t\tlog.Debugf(\"%s %s %s (%d)\", s1.local, msg, p, k)\n\t\t\t\t\tif _, err := stream.Write([]byte(msg)); err != nil {\n\t\t\t\t\t\terrChan <- err\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t\/\/ read it later\n\t\t\t\tstreamChan <- stream\n\t\t\t}\n\n\t\t\tfor _, s2 := range swarms {\n\t\t\t\tif s2.local == s1.local {\n\t\t\t\t\tcontinue \/\/ dont send to self...\n\t\t\t\t}\n\n\t\t\t\twg.Add(1)\n\t\t\t\tgo send(s2.local)\n\t\t\t}\n\t\t\twg.Wait()\n\t\t}()\n\n\t\t\/\/ receive \"pong\" x MsgNum from every peer\n\t\tgo func() {\n\t\t\tdefer close(errChan)\n\t\t\tcount := 0\n\t\t\tcountShouldBe := MsgNum * (len(swarms) - 1)\n\t\t\tfor stream := range streamChan { \/\/ one per peer\n\t\t\t\tdefer stream.Close()\n\n\t\t\t\t\/\/ get peer on the other side\n\t\t\t\tp := stream.Conn().RemotePeer()\n\n\t\t\t\t\/\/ receive pings\n\t\t\t\tmsgCount := 0\n\t\t\t\tmsg := make([]byte, 4)\n\t\t\t\tfor k := 0; k < MsgNum; k++ { \/\/ with k messages\n\n\t\t\t\t\t\/\/ read from the stream\n\t\t\t\t\tif _, err := stream.Read(msg); err != nil {\n\t\t\t\t\t\terrChan <- err\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif string(msg) != \"pong\" {\n\t\t\t\t\t\terrChan <- fmt.Errorf(\"unexpected message: %s\", msg)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tlog.Debugf(\"%s %s %s (%d)\", s1.local, msg, p, k)\n\t\t\t\t\tmsgCount++\n\t\t\t\t}\n\n\t\t\t\tgot[p] = msgCount\n\t\t\t\tcount += msgCount\n\t\t\t}\n\n\t\t\tif count != countShouldBe {\n\t\t\t\terrChan <- fmt.Errorf(\"count mismatch: %d != %d\", count, countShouldBe)\n\t\t\t}\n\t\t}()\n\n\t\t\/\/ check any errors (blocks till consumer is done)\n\t\tfor err := range errChan {\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err.Error())\n\t\t\t}\n\t\t}\n\n\t\tlog.Debugf(\"%s got pongs\", s1.local)\n\t\tif (len(swarms) - 1) != len(got) {\n\t\t\tt.Errorf(\"got (%d) less messages than sent (%d).\", len(got), len(swarms))\n\t\t}\n\n\t\tfor p, n := range got {\n\t\t\tif n != MsgNum {\n\t\t\t\tt.Error(\"peer did not get all msgs\", p, n, \"\/\", MsgNum)\n\t\t\t}\n\t\t}\n\n\t\tcancel()\n\t\t<-time.After(10 * time.Millisecond)\n\t}\n\n\tfor _, s := range swarms {\n\t\ts.Close()\n\t}\n}\n\nfunc TestSwarm(t *testing.T) {\n\t\/\/ t.Skip(\"skipping for another test\")\n\tt.Parallel()\n\n\t\/\/ msgs := 1000\n\tmsgs := 100\n\tswarms := 5\n\tSubtestSwarm(t, swarms, msgs)\n}\n\nfunc TestBasicSwarm(t *testing.T) {\n\t\/\/ t.Skip(\"skipping for another test\")\n\tt.Parallel()\n\n\tmsgs := 1\n\tswarms := 2\n\tSubtestSwarm(t, swarms, msgs)\n}\n\nfunc TestConnHandler(t *testing.T) {\n\t\/\/ t.Skip(\"skipping for another test\")\n\tt.Parallel()\n\n\tctx := context.Background()\n\tswarms := makeSwarms(ctx, t, 5)\n\n\tgotconn := make(chan struct{}, 10)\n\tswarms[0].SetConnHandler(func(conn *Conn) {\n\t\tgotconn <- struct{}{}\n\t})\n\n\tconnectSwarms(t, ctx, swarms)\n\n\t<-time.After(time.Millisecond)\n\t\/\/ should've gotten 5 by now.\n\n\tswarms[0].SetConnHandler(nil)\n\n\texpect := 4\n\tfor i := 0; i < expect; i++ {\n\t\tselect {\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Fatal(\"failed to get connections\")\n\t\tcase <-gotconn:\n\t\t}\n\t}\n\n\tselect {\n\tcase <-gotconn:\n\t\tt.Fatalf(\"should have connected to %d swarms\", expect)\n\tdefault:\n\t}\n}\n\nfunc TestAddrBlocking(t *testing.T) {\n\tctx := context.Background()\n\tswarms := makeSwarms(ctx, t, 2)\n\n\tswarms[0].SetConnHandler(func(conn *Conn) {\n\t\tt.Fatal(\"no connections should happen!\")\n\t})\n\n\t_, block, err := net.ParseCIDR(\"127.0.0.1\/8\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tswarms[1].Filters.AddDialFilter(block)\n\n\tswarms[1].peers.AddAddr(swarms[0].LocalPeer(), swarms[0].ListenAddresses()[0], peer.PermanentAddrTTL)\n\t_, err = swarms[1].Dial(ctx, swarms[0].LocalPeer())\n\tif err == nil {\n\t\tt.Fatal(\"dial should have failed\")\n\t}\n\n\tswarms[0].peers.AddAddr(swarms[1].LocalPeer(), swarms[1].ListenAddresses()[0], peer.PermanentAddrTTL)\n\t_, err = swarms[0].Dial(ctx, swarms[1].LocalPeer())\n\tif err == nil {\n\t\tt.Fatal(\"dial should have failed\")\n\t}\n}\n\nfunc TestFilterBounds(t *testing.T) {\n\tctx := context.Background()\n\tswarms := makeSwarms(ctx, t, 2)\n\n\tconns := make(chan struct{}, 8)\n\tswarms[0].SetConnHandler(func(conn *Conn) {\n\t\tconns <- struct{}{}\n\t})\n\n\t\/\/ Address that we wont be dialing from\n\t_, block, err := net.ParseCIDR(\"192.0.0.1\/8\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ set filter on both sides, shouldnt matter\n\tswarms[1].Filters.AddDialFilter(block)\n\tswarms[0].Filters.AddDialFilter(block)\n\n\tconnectSwarms(t, ctx, swarms)\n\n\tselect {\n\tcase <-time.After(time.Second):\n\t\tt.Fatal(\"should have gotten connection\")\n\tcase <-conns:\n\t\tt.Log(\"got connect\")\n\t}\n}\n<commit_msg>Fix address filtering for \/ip6, add tests<commit_after>package swarm\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\tmetrics \"github.com\/ipfs\/go-libp2p\/p2p\/metrics\"\n\tinet \"github.com\/ipfs\/go-libp2p\/p2p\/net\"\n\tpeer \"github.com\/ipfs\/go-libp2p\/p2p\/peer\"\n\ttestutil \"github.com\/ipfs\/go-libp2p\/testutil\"\n\n\tcontext \"gx\/ipfs\/QmZy2y8t9zQH2a1b8q2ZSLKp17ATuJoCNxxyMFG5qFExpt\/go-net\/context\"\n\tma \"gx\/ipfs\/QmcobAGsCjYt5DXoq9et9L8yR8er7o7Cu3DTvpaq12jYSz\/go-multiaddr\"\n)\n\nfunc EchoStreamHandler(stream inet.Stream) {\n\tgo func() {\n\t\tdefer stream.Close()\n\n\t\t\/\/ pull out the ipfs conn\n\t\tc := stream.Conn()\n\t\tlog.Infof(\"%s ponging to %s\", c.LocalPeer(), c.RemotePeer())\n\n\t\tbuf := make([]byte, 4)\n\n\t\tfor {\n\t\t\tif _, err := stream.Read(buf); err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\tlog.Error(\"ping receive error:\", err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif !bytes.Equal(buf, []byte(\"ping\")) {\n\t\t\t\tlog.Errorf(\"ping receive error: ping != %s %v\", buf, buf)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif _, err := stream.Write([]byte(\"pong\")); err != nil {\n\t\t\t\tlog.Error(\"pond send error:\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc makeDialOnlySwarm(ctx context.Context, t *testing.T) *Swarm {\n\tid := testutil.RandIdentityOrFatal(t)\n\n\tpeerstore := peer.NewPeerstore()\n\tpeerstore.AddPubKey(id.ID(), id.PublicKey())\n\tpeerstore.AddPrivKey(id.ID(), id.PrivateKey())\n\n\tswarm, err := NewSwarm(ctx, nil, id.ID(), peerstore, metrics.NewBandwidthCounter())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tswarm.SetStreamHandler(EchoStreamHandler)\n\n\treturn swarm\n}\n\nfunc makeSwarms(ctx context.Context, t *testing.T, num int) []*Swarm {\n\tswarms := make([]*Swarm, 0, num)\n\n\tfor i := 0; i < num; i++ {\n\t\tlocalnp := testutil.RandPeerNetParamsOrFatal(t)\n\n\t\tpeerstore := peer.NewPeerstore()\n\t\tpeerstore.AddPubKey(localnp.ID, localnp.PubKey)\n\t\tpeerstore.AddPrivKey(localnp.ID, localnp.PrivKey)\n\n\t\taddrs := []ma.Multiaddr{localnp.Addr}\n\t\tswarm, err := NewSwarm(ctx, addrs, localnp.ID, peerstore, metrics.NewBandwidthCounter())\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tswarm.SetStreamHandler(EchoStreamHandler)\n\t\tswarms = append(swarms, swarm)\n\t}\n\n\treturn swarms\n}\n\nfunc connectSwarms(t *testing.T, ctx context.Context, swarms []*Swarm) {\n\n\tvar wg sync.WaitGroup\n\tconnect := func(s *Swarm, dst peer.ID, addr ma.Multiaddr) {\n\t\t\/\/ TODO: make a DialAddr func.\n\t\ts.peers.AddAddr(dst, addr, peer.PermanentAddrTTL)\n\t\tif _, err := s.Dial(ctx, dst); err != nil {\n\t\t\tt.Fatal(\"error swarm dialing to peer\", err)\n\t\t}\n\t\twg.Done()\n\t}\n\n\tlog.Info(\"Connecting swarms simultaneously.\")\n\tfor _, s1 := range swarms {\n\t\tfor _, s2 := range swarms {\n\t\t\tif s2.local != s1.local { \/\/ don't connect to self.\n\t\t\t\twg.Add(1)\n\t\t\t\tconnect(s1, s2.LocalPeer(), s2.ListenAddresses()[0]) \/\/ try the first.\n\t\t\t}\n\t\t}\n\t}\n\twg.Wait()\n\n\tfor _, s := range swarms {\n\t\tlog.Infof(\"%s swarm routing table: %s\", s.local, s.Peers())\n\t}\n}\n\nfunc SubtestSwarm(t *testing.T, SwarmNum int, MsgNum int) {\n\t\/\/ t.Skip(\"skipping for another test\")\n\n\tctx := context.Background()\n\tswarms := makeSwarms(ctx, t, SwarmNum)\n\n\t\/\/ connect everyone\n\tconnectSwarms(t, ctx, swarms)\n\n\t\/\/ ping\/pong\n\tfor _, s1 := range swarms {\n\t\tlog.Debugf(\"-------------------------------------------------------\")\n\t\tlog.Debugf(\"%s ping pong round\", s1.local)\n\t\tlog.Debugf(\"-------------------------------------------------------\")\n\n\t\t_, cancel := context.WithCancel(ctx)\n\t\tgot := map[peer.ID]int{}\n\t\terrChan := make(chan error, MsgNum*len(swarms))\n\t\tstreamChan := make(chan *Stream, MsgNum)\n\n\t\t\/\/ send out \"ping\" x MsgNum to every peer\n\t\tgo func() {\n\t\t\tdefer close(streamChan)\n\n\t\t\tvar wg sync.WaitGroup\n\t\t\tsend := func(p peer.ID) {\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\t\/\/ first, one stream per peer (nice)\n\t\t\t\tstream, err := s1.NewStreamWithPeer(ctx, p)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrChan <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ send out ping!\n\t\t\t\tfor k := 0; k < MsgNum; k++ { \/\/ with k messages\n\t\t\t\t\tmsg := \"ping\"\n\t\t\t\t\tlog.Debugf(\"%s %s %s (%d)\", s1.local, msg, p, k)\n\t\t\t\t\tif _, err := stream.Write([]byte(msg)); err != nil {\n\t\t\t\t\t\terrChan <- err\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t\/\/ read it later\n\t\t\t\tstreamChan <- stream\n\t\t\t}\n\n\t\t\tfor _, s2 := range swarms {\n\t\t\t\tif s2.local == s1.local {\n\t\t\t\t\tcontinue \/\/ dont send to self...\n\t\t\t\t}\n\n\t\t\t\twg.Add(1)\n\t\t\t\tgo send(s2.local)\n\t\t\t}\n\t\t\twg.Wait()\n\t\t}()\n\n\t\t\/\/ receive \"pong\" x MsgNum from every peer\n\t\tgo func() {\n\t\t\tdefer close(errChan)\n\t\t\tcount := 0\n\t\t\tcountShouldBe := MsgNum * (len(swarms) - 1)\n\t\t\tfor stream := range streamChan { \/\/ one per peer\n\t\t\t\tdefer stream.Close()\n\n\t\t\t\t\/\/ get peer on the other side\n\t\t\t\tp := stream.Conn().RemotePeer()\n\n\t\t\t\t\/\/ receive pings\n\t\t\t\tmsgCount := 0\n\t\t\t\tmsg := make([]byte, 4)\n\t\t\t\tfor k := 0; k < MsgNum; k++ { \/\/ with k messages\n\n\t\t\t\t\t\/\/ read from the stream\n\t\t\t\t\tif _, err := stream.Read(msg); err != nil {\n\t\t\t\t\t\terrChan <- err\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif string(msg) != \"pong\" {\n\t\t\t\t\t\terrChan <- fmt.Errorf(\"unexpected message: %s\", msg)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tlog.Debugf(\"%s %s %s (%d)\", s1.local, msg, p, k)\n\t\t\t\t\tmsgCount++\n\t\t\t\t}\n\n\t\t\t\tgot[p] = msgCount\n\t\t\t\tcount += msgCount\n\t\t\t}\n\n\t\t\tif count != countShouldBe {\n\t\t\t\terrChan <- fmt.Errorf(\"count mismatch: %d != %d\", count, countShouldBe)\n\t\t\t}\n\t\t}()\n\n\t\t\/\/ check any errors (blocks till consumer is done)\n\t\tfor err := range errChan {\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err.Error())\n\t\t\t}\n\t\t}\n\n\t\tlog.Debugf(\"%s got pongs\", s1.local)\n\t\tif (len(swarms) - 1) != len(got) {\n\t\t\tt.Errorf(\"got (%d) less messages than sent (%d).\", len(got), len(swarms))\n\t\t}\n\n\t\tfor p, n := range got {\n\t\t\tif n != MsgNum {\n\t\t\t\tt.Error(\"peer did not get all msgs\", p, n, \"\/\", MsgNum)\n\t\t\t}\n\t\t}\n\n\t\tcancel()\n\t\t<-time.After(10 * time.Millisecond)\n\t}\n\n\tfor _, s := range swarms {\n\t\ts.Close()\n\t}\n}\n\nfunc TestSwarm(t *testing.T) {\n\t\/\/ t.Skip(\"skipping for another test\")\n\tt.Parallel()\n\n\t\/\/ msgs := 1000\n\tmsgs := 100\n\tswarms := 5\n\tSubtestSwarm(t, swarms, msgs)\n}\n\nfunc TestBasicSwarm(t *testing.T) {\n\t\/\/ t.Skip(\"skipping for another test\")\n\tt.Parallel()\n\n\tmsgs := 1\n\tswarms := 2\n\tSubtestSwarm(t, swarms, msgs)\n}\n\nfunc TestConnHandler(t *testing.T) {\n\t\/\/ t.Skip(\"skipping for another test\")\n\tt.Parallel()\n\n\tctx := context.Background()\n\tswarms := makeSwarms(ctx, t, 5)\n\n\tgotconn := make(chan struct{}, 10)\n\tswarms[0].SetConnHandler(func(conn *Conn) {\n\t\tgotconn <- struct{}{}\n\t})\n\n\tconnectSwarms(t, ctx, swarms)\n\n\t<-time.After(time.Millisecond)\n\t\/\/ should've gotten 5 by now.\n\n\tswarms[0].SetConnHandler(nil)\n\n\texpect := 4\n\tfor i := 0; i < expect; i++ {\n\t\tselect {\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Fatal(\"failed to get connections\")\n\t\tcase <-gotconn:\n\t\t}\n\t}\n\n\tselect {\n\tcase <-gotconn:\n\t\tt.Fatalf(\"should have connected to %d swarms\", expect)\n\tdefault:\n\t}\n}\n\nfunc TestAddrBlocking(t *testing.T) {\n\tctx := context.Background()\n\tswarms := makeSwarms(ctx, t, 2)\n\n\tswarms[0].SetConnHandler(func(conn *Conn) {\n\t\tt.Fatalf(\"no connections should happen! -- %s\", conn)\n\t})\n\n\t_, block, err := net.ParseCIDR(\"127.0.0.1\/8\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tswarms[1].Filters.AddDialFilter(block)\n\n\tswarms[1].peers.AddAddr(swarms[0].LocalPeer(), swarms[0].ListenAddresses()[0], peer.PermanentAddrTTL)\n\t_, err = swarms[1].Dial(ctx, swarms[0].LocalPeer())\n\tif err == nil {\n\t\tt.Fatal(\"dial should have failed\")\n\t}\n\n\tswarms[0].peers.AddAddr(swarms[1].LocalPeer(), swarms[1].ListenAddresses()[0], peer.PermanentAddrTTL)\n\t_, err = swarms[0].Dial(ctx, swarms[1].LocalPeer())\n\tif err == nil {\n\t\tt.Fatal(\"dial should have failed\")\n\t}\n}\n\nfunc TestFilterBounds(t *testing.T) {\n\tctx := context.Background()\n\tswarms := makeSwarms(ctx, t, 2)\n\n\tconns := make(chan struct{}, 8)\n\tswarms[0].SetConnHandler(func(conn *Conn) {\n\t\tconns <- struct{}{}\n\t})\n\n\t\/\/ Address that we wont be dialing from\n\t_, block, err := net.ParseCIDR(\"192.0.0.1\/8\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ set filter on both sides, shouldnt matter\n\tswarms[1].Filters.AddDialFilter(block)\n\tswarms[0].Filters.AddDialFilter(block)\n\n\tconnectSwarms(t, ctx, swarms)\n\n\tselect {\n\tcase <-time.After(time.Second):\n\t\tt.Fatal(\"should have gotten connection\")\n\tcase <-conns:\n\t\tt.Log(\"got connect\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage save\n\nimport (\n\t\"os\"\n\n\t\"github.com\/jacobsa\/comeback\/graph\"\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype PathAndFileInfo struct {\n\t\/\/ The absolute path to the file (or directory, etc.).\n\tPath string\n\n\t\/\/ Information about the file, as returned by os.Lstat.\n\tInfo os.FileInfo\n}\n\n\/\/ Create a visitor that walks the directory hierarchy rooted at the given base\n\/\/ path, excluding any relative path that matches any of the supplied\n\/\/ exclusions, along with any of its descendents. Everything encountered and\n\/\/ not excluded will be emitted to the supplied channel in an unspecified\n\/\/ order. The channel will not be closed.\n\/\/\n\/\/ It is expected that node names are paths relative to the supplied base path.\n\/\/ In particular, to walk the entire hierarchy, use \"\" as the traversal root.\nfunc NewFileSystemVisitor(\n\tctx context.Context,\n\tbasePath string,\n\toutput chan<- PathAndFileInfo) (v graph.Visitor)\n<commit_msg>Fixed a build error.<commit_after>\/\/ Copyright 2015 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage save\n\nimport (\n\t\"os\"\n\n\t\"github.com\/jacobsa\/comeback\/graph\"\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype PathAndFileInfo struct {\n\t\/\/ The absolute path to the file (or directory, etc.).\n\tPath string\n\n\t\/\/ Information about the file, as returned by os.Lstat.\n\tInfo os.FileInfo\n}\n\n\/\/ Create a visitor that walks the directory hierarchy rooted at the given base\n\/\/ path, excluding any relative path that matches any of the supplied\n\/\/ exclusions, along with any of its descendents. Everything encountered and\n\/\/ not excluded will be emitted to the supplied channel in an unspecified\n\/\/ order. The channel will not be closed.\n\/\/\n\/\/ It is expected that node names are paths relative to the supplied base path.\n\/\/ In particular, to walk the entire hierarchy, use \"\" as the traversal root.\nfunc NewFileSystemVisitor(\n\tctx context.Context,\n\tbasePath string,\n\toutput chan<- PathAndFileInfo) (v graph.Visitor) {\n\tpanic(\"TODO\")\n}\n<|endoftext|>"} {"text":"<commit_before>package command\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/mitchellh\/cli\"\n\t\"sort\"\n\t\"strings\"\n)\n\n\/\/ InfoCommand is a Command implementation that queries a running\n\/\/ Consul agent for various debugging statistics for operators\ntype InfoCommand struct {\n\tUi cli.Ui\n}\n\nfunc (i *InfoCommand) Help() string {\n\thelpText := `\nUsage: consul info [options]\n\n\tProvides debugging information for operators\n\nOptions:\n\n -rpc-addr=127.0.0.1:8400 RPC address of the Consul agent.\n`\n\treturn strings.TrimSpace(helpText)\n}\n\nfunc (i *InfoCommand) Run(args []string) int {\n\tcmdFlags := flag.NewFlagSet(\"info\", flag.ContinueOnError)\n\tcmdFlags.Usage = func() { i.Ui.Output(i.Help()) }\n\trpcAddr := RPCAddrFlag(cmdFlags)\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\tclient, err := RPCClient(*rpcAddr)\n\tif err != nil {\n\t\ti.Ui.Error(fmt.Sprintf(\"Error connecting to Consul agent: %s\", err))\n\t\treturn 1\n\t}\n\tdefer client.Close()\n\n\tstats, err := client.Stats()\n\tif err != nil {\n\t\ti.Ui.Error(fmt.Sprintf(\"Error querying agent: %s\", err))\n\t\treturn 1\n\t}\n\n\t\/\/ Get the keys in sorted order\n\tkeys := make([]string, 0, len(stats))\n\tfor key := range stats {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Strings(keys)\n\n\t\/\/ Iterate over each top-level key\n\tfor _, key := range keys {\n\t\ti.Ui.Output(key + \":\")\n\n\t\t\/\/ Sort the sub-keys\n\t\tsubvals := stats[key]\n\t\tsubkeys := make([]string, 0, len(subvals))\n\t\tfor k := range subvals {\n\t\t\tsubkeys = append(subkeys, k)\n\t\t}\n\t\tsort.Strings(subkeys)\n\n\t\t\/\/ Iterate over the subkeys\n\t\tfor _, subkey := range subkeys {\n\t\t\tval := subvals[subkey]\n\t\t\ti.Ui.Output(fmt.Sprintf(\"\\t%s = %s\", subkey, val))\n\t\t}\n\t}\n\treturn 0\n}\n\nfunc (i *InfoCommand) Synopsis() string {\n\treturn \"Provides debugging information for operators\"\n}\n<commit_msg>command\/info: Warn on GOMAXPROCS = 1. Fixes #87.<commit_after>package command\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/mitchellh\/cli\"\n\t\"sort\"\n\t\"strings\"\n)\n\n\/\/ InfoCommand is a Command implementation that queries a running\n\/\/ Consul agent for various debugging statistics for operators\ntype InfoCommand struct {\n\tUi cli.Ui\n}\n\nfunc (i *InfoCommand) Help() string {\n\thelpText := `\nUsage: consul info [options]\n\n\tProvides debugging information for operators\n\nOptions:\n\n -rpc-addr=127.0.0.1:8400 RPC address of the Consul agent.\n`\n\treturn strings.TrimSpace(helpText)\n}\n\nfunc (i *InfoCommand) Run(args []string) int {\n\tcmdFlags := flag.NewFlagSet(\"info\", flag.ContinueOnError)\n\tcmdFlags.Usage = func() { i.Ui.Output(i.Help()) }\n\trpcAddr := RPCAddrFlag(cmdFlags)\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\tclient, err := RPCClient(*rpcAddr)\n\tif err != nil {\n\t\ti.Ui.Error(fmt.Sprintf(\"Error connecting to Consul agent: %s\", err))\n\t\treturn 1\n\t}\n\tdefer client.Close()\n\n\tstats, err := client.Stats()\n\tif err != nil {\n\t\ti.Ui.Error(fmt.Sprintf(\"Error querying agent: %s\", err))\n\t\treturn 1\n\t}\n\n\t\/\/ Get the keys in sorted order\n\tkeys := make([]string, 0, len(stats))\n\tfor key := range stats {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Strings(keys)\n\n\t\/\/ Iterate over each top-level key\n\tfor _, key := range keys {\n\t\ti.Ui.Output(key + \":\")\n\n\t\t\/\/ Sort the sub-keys\n\t\tsubvals := stats[key]\n\t\tsubkeys := make([]string, 0, len(subvals))\n\t\tfor k := range subvals {\n\t\t\tsubkeys = append(subkeys, k)\n\t\t}\n\t\tsort.Strings(subkeys)\n\n\t\t\/\/ Iterate over the subkeys\n\t\tfor _, subkey := range subkeys {\n\t\t\tval := subvals[subkey]\n\t\t\ti.Ui.Output(fmt.Sprintf(\"\\t%s = %s\", subkey, val))\n\t\t}\n\t}\n\n\t\/\/ Check for specific warnings\n\truntime, ok := stats[\"runtime\"]\n\tif ok {\n\t\tif maxprocs := runtime[\"max_procs\"]; maxprocs == \"1\" {\n\t\t\ti.Ui.Output(\"WARNING: It is highly recommended to set GOMAXPROCS higher than 1\")\n\t\t}\n\t}\n\treturn 0\n}\n\nfunc (i *InfoCommand) Synopsis() string {\n\treturn \"Provides debugging information for operators\"\n}\n<|endoftext|>"} {"text":"<commit_before>package command\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/consul\/api\"\n\t\"github.com\/hashicorp\/consul\/command\/agent\"\n\t\"github.com\/mitchellh\/cli\"\n)\n\nconst (\n\t\/\/ lockKillGracePeriod is how long we allow a child between\n\t\/\/ a SIGTERM and a SIGKILL. This is to let the child cleanup\n\t\/\/ any necessary state. We have to balance this with the risk\n\t\/\/ of a split-brain where multiple children may be acting as if\n\t\/\/ they hold a lock. This value is currently based on the default\n\t\/\/ lock-delay value of 15 seconds. This only affects locks and not\n\t\/\/ semaphores.\n\tlockKillGracePeriod = 5 * time.Second\n)\n\n\/\/ LockCommand is a Command implementation that is used to setup\n\/\/ a \"lock\" which manages lock acquisition and invokes a sub-process\ntype LockCommand struct {\n\tShutdownCh <-chan struct{}\n\tUi cli.Ui\n\n\tchild *os.Process\n\tchildLock sync.Mutex\n\tverbose bool\n}\n\nfunc (c *LockCommand) Help() string {\n\thelpText := `\nUsage: consul lock [options] prefix child...\n\n Acquires a lock or semaphore at a given path, and invokes a child\n process when successful. The child process can assume the lock is\n held while it executes. If the lock is lost or communication is\n disrupted the child process will be sent a SIGTERM signal and given\n time to gracefully exit. After the grace period expires the process\n will be hard terminated.\n For Consul agents on Windows, the child process is always hard\n terminated with a SIGKILL, since Windows has no POSIX compatible\n notion for SIGTERM.\n\n When -n=1, only a single lock holder or leader exists providing\n mutual exclusion. Setting a higher value switches to a semaphore\n allowing multiple holders to coordinate.\n\n The prefix provided must have write privileges.\n\nOptions:\n\n -http-addr=127.0.0.1:8500 HTTP address of the Consul agent.\n -n=1 Maximum number of allowed lock holders. If this\n value is one, it operates as a lock, otherwise\n a semaphore is used.\n -name=\"\" Optional name to associate with lock session.\n -token=\"\" ACL token to use. Defaults to that of agent.\n -verbose Enables verbose output\n`\n\treturn strings.TrimSpace(helpText)\n}\n\nfunc (c *LockCommand) Run(args []string) int {\n\tvar childDone chan struct{}\n\tvar name, token string\n\tvar limit int\n\tcmdFlags := flag.NewFlagSet(\"watch\", flag.ContinueOnError)\n\tcmdFlags.Usage = func() { c.Ui.Output(c.Help()) }\n\tcmdFlags.IntVar(&limit, \"n\", 1, \"\")\n\tcmdFlags.StringVar(&name, \"name\", \"\", \"\")\n\tcmdFlags.StringVar(&token, \"token\", \"\", \"\")\n\tcmdFlags.BoolVar(&c.verbose, \"verbose\", false, \"\")\n\thttpAddr := HTTPAddrFlag(cmdFlags)\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\t\/\/ Check the limit\n\tif limit <= 0 {\n\t\tc.Ui.Error(fmt.Sprintf(\"Lock holder limit must be positive\"))\n\t\treturn 1\n\t}\n\n\t\/\/ Verify the prefix and child are provided\n\textra := cmdFlags.Args()\n\tif len(extra) < 2 {\n\t\tc.Ui.Error(\"Key prefix and child command must be specified\")\n\t\tc.Ui.Error(\"\")\n\t\tc.Ui.Error(c.Help())\n\t\treturn 1\n\t}\n\tprefix := extra[0]\n\tprefix = strings.TrimPrefix(prefix, \"\/\")\n\tscript := strings.Join(extra[1:], \" \")\n\n\t\/\/ Calculate a session name if none provided\n\tif name == \"\" {\n\t\tname = fmt.Sprintf(\"Consul lock for '%s' at '%s'\", script, prefix)\n\t}\n\n\t\/\/ Create and test the HTTP client\n\tconf := api.DefaultConfig()\n\tconf.Address = *httpAddr\n\tconf.Token = token\n\tclient, err := api.NewClient(conf)\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error connecting to Consul agent: %s\", err))\n\t\treturn 1\n\t}\n\t_, err = client.Agent().NodeName()\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error querying Consul agent: %s\", err))\n\t\treturn 1\n\t}\n\n\t\/\/ Setup the lock or semaphore\n\tvar lu *LockUnlock\n\tif limit == 1 {\n\t\tlu, err = c.setupLock(client, prefix, name)\n\t} else {\n\t\tlu, err = c.setupSemaphore(client, limit, prefix, name)\n\t}\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Lock setup failed: %s\", err))\n\t\treturn 1\n\t}\n\n\t\/\/ Attempt the acquisition\n\tif c.verbose {\n\t\tc.Ui.Info(\"Attempting lock acquisition\")\n\t}\n\tlockCh, err := lu.lockFn(c.ShutdownCh)\n\tif lockCh == nil {\n\t\tif err == nil {\n\t\t\tc.Ui.Error(\"Shutdown triggered during lock acquisition\")\n\t\t} else {\n\t\t\tc.Ui.Error(fmt.Sprintf(\"Lock acquisition failed: %s\", err))\n\t\t}\n\t\treturn 1\n\t}\n\n\t\/\/ Check if we were shutdown but managed to still acquire the lock\n\tselect {\n\tcase <-c.ShutdownCh:\n\t\tc.Ui.Error(\"Shutdown triggered during lock acquisition\")\n\t\tgoto RELEASE\n\tdefault:\n\t}\n\n\t\/\/ Start the child process\n\tchildDone = make(chan struct{})\n\tgo func() {\n\t\tif err := c.startChild(script, childDone); err != nil {\n\t\t\tc.Ui.Error(fmt.Sprintf(\"%s\", err))\n\t\t}\n\t}()\n\n\t\/\/ Monitor for shutdown, child termination, or lock loss\n\tselect {\n\tcase <-c.ShutdownCh:\n\t\tif c.verbose {\n\t\t\tc.Ui.Info(\"Shutdown triggered, killing child\")\n\t\t}\n\tcase <-lockCh:\n\t\tif c.verbose {\n\t\t\tc.Ui.Info(\"Lock lost, killing child\")\n\t\t}\n\tcase <-childDone:\n\t\tif c.verbose {\n\t\t\tc.Ui.Info(\"Child terminated, releasing lock\")\n\t\t}\n\t\tgoto RELEASE\n\t}\n\n\t\/\/ Kill the child\n\tif err := c.killChild(childDone); err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"%s\", err))\n\t}\n\nRELEASE:\n\t\/\/ Release the lock before termination\n\tif err := lu.unlockFn(); err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Lock release failed: %s\", err))\n\t\treturn 1\n\t}\n\n\t\/\/ Cleanup the lock if no longer in use\n\tif err := lu.cleanupFn(); err != nil {\n\t\tif err != lu.inUseErr {\n\t\t\tc.Ui.Error(fmt.Sprintf(\"Lock cleanup failed: %s\", err))\n\t\t\treturn 1\n\t\t} else if c.verbose {\n\t\t\tc.Ui.Info(\"Cleanup aborted, lock in use\")\n\t\t}\n\t} else if c.verbose {\n\t\tc.Ui.Info(\"Cleanup succeeded\")\n\t}\n\treturn 0\n}\n\n\/\/ setupLock is used to setup a new Lock given the API client,\n\/\/ the key prefix to operate on, and an optional session name.\nfunc (c *LockCommand) setupLock(client *api.Client, prefix, name string) (*LockUnlock, error) {\n\t\/\/ Use the DefaultSemaphoreKey extention, this way if a lock and\n\t\/\/ semaphore are both used at the same prefix, we will get a conflict\n\t\/\/ which we can report to the user.\n\tkey := path.Join(prefix, api.DefaultSemaphoreKey)\n\tif c.verbose {\n\t\tc.Ui.Info(fmt.Sprintf(\"Setting up lock at path: %s\", key))\n\t}\n\topts := api.LockOptions{\n\t\tKey: key,\n\t\tSessionName: name,\n\t}\n\tl, err := client.LockOpts(&opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlu := &LockUnlock{\n\t\tlockFn: l.Lock,\n\t\tunlockFn: l.Unlock,\n\t\tcleanupFn: l.Destroy,\n\t\tinUseErr: api.ErrLockInUse,\n\t}\n\treturn lu, nil\n}\n\n\/\/ setupSemaphore is used to setup a new Semaphore given the\n\/\/ API client, key prefix, session name, and slot holder limit.\nfunc (c *LockCommand) setupSemaphore(client *api.Client, limit int, prefix, name string) (*LockUnlock, error) {\n\tif c.verbose {\n\t\tc.Ui.Info(fmt.Sprintf(\"Setting up semaphore (limit %d) at prefix: %s\", limit, prefix))\n\t}\n\topts := api.SemaphoreOptions{\n\t\tPrefix: prefix,\n\t\tLimit: limit,\n\t\tSessionName: name,\n\t}\n\ts, err := client.SemaphoreOpts(&opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlu := &LockUnlock{\n\t\tlockFn: s.Acquire,\n\t\tunlockFn: s.Release,\n\t\tcleanupFn: s.Destroy,\n\t\tinUseErr: api.ErrSemaphoreInUse,\n\t}\n\treturn lu, nil\n}\n\n\/\/ startChild is a long running routine used to start and\n\/\/ wait for the child process to exit.\nfunc (c *LockCommand) startChild(script string, doneCh chan struct{}) error {\n\tdefer close(doneCh)\n\tif c.verbose {\n\t\tc.Ui.Info(fmt.Sprintf(\"Starting handler '%s'\", script))\n\t}\n\t\/\/ Create the command\n\tcmd, err := agent.ExecScript(script)\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error executing handler: %s\", err))\n\t\treturn err\n\t}\n\n\t\/\/ Setup the command streams\n\tcmd.Env = append(os.Environ(),\n\t\t\"CONSUL_LOCK_HELD=true\",\n\t)\n\tcmd.Stdin = nil\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\t\/\/ Start the child process\n\tc.childLock.Lock()\n\tif err := cmd.Start(); err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error starting handler: %s\", err))\n\t\tc.childLock.Unlock()\n\t\treturn err\n\t}\n\n\t\/\/ Setup the child info\n\tc.child = cmd.Process\n\tc.childLock.Unlock()\n\n\t\/\/ Wait for the child process\n\tif err := cmd.Wait(); err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error running handler: %s\", err))\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ killChild is used to forcefully kill the child, first using SIGTERM\n\/\/ to allow for a graceful cleanup and then using SIGKILL for a hard\n\/\/ termination.\n\/\/ On Windows, the child is always hard terminated with a SIGKILL, even\n\/\/ on the first attempt.\nfunc (c *LockCommand) killChild(childDone chan struct{}) error {\n\t\/\/ Get the child process\n\tc.childLock.Lock()\n\tchild := c.child\n\tc.childLock.Unlock()\n\n\t\/\/ If there is no child process (failed to start), we can quit early\n\tif child == nil {\n\t\tif c.verbose {\n\t\t\tc.Ui.Info(\"No child process to kill\")\n\t\t}\n\t\treturn nil\n\t}\n\n\t\/\/ Attempt termination first\n\tif c.verbose {\n\t\tc.Ui.Info(fmt.Sprintf(\"Terminating child pid %d\", child.Pid))\n\t}\n\tif err := signalPid(child.Pid, syscall.SIGTERM); err != nil {\n\t\treturn fmt.Errorf(\"Failed to terminate %d: %v\", child.Pid, err)\n\t}\n\n\t\/\/ Wait for termination, or until a timeout\n\tselect {\n\tcase <-childDone:\n\t\tif c.verbose {\n\t\t\tc.Ui.Info(\"Child terminated\")\n\t\t}\n\t\treturn nil\n\tcase <-time.After(lockKillGracePeriod):\n\t\tif c.verbose {\n\t\t\tc.Ui.Info(fmt.Sprintf(\"Child did not exit after grace period of %v\",\n\t\t\t\tlockKillGracePeriod))\n\t\t}\n\t}\n\n\t\/\/ Send a final SIGKILL\n\tif c.verbose {\n\t\tc.Ui.Info(fmt.Sprintf(\"Killing child pid %d\", child.Pid))\n\t}\n\tif err := signalPid(child.Pid, syscall.SIGKILL); err != nil {\n\t\treturn fmt.Errorf(\"Failed to kill %d: %v\", child.Pid, err)\n\t}\n\treturn nil\n}\n\nfunc (c *LockCommand) Synopsis() string {\n\treturn \"Execute a command holding a lock\"\n}\n\n\/\/ LockUnlock is used to abstract over the differences between\n\/\/ a lock and a semaphore.\ntype LockUnlock struct {\n\tlockFn func(<-chan struct{}) (<-chan struct{}, error)\n\tunlockFn func() error\n\tcleanupFn func() error\n\tinUseErr error\n}\n<commit_msg>command\/lock: Pass stdin to child process when -pass-stdin passed.<commit_after>package command\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/consul\/api\"\n\t\"github.com\/hashicorp\/consul\/command\/agent\"\n\t\"github.com\/mitchellh\/cli\"\n)\n\nconst (\n\t\/\/ lockKillGracePeriod is how long we allow a child between\n\t\/\/ a SIGTERM and a SIGKILL. This is to let the child cleanup\n\t\/\/ any necessary state. We have to balance this with the risk\n\t\/\/ of a split-brain where multiple children may be acting as if\n\t\/\/ they hold a lock. This value is currently based on the default\n\t\/\/ lock-delay value of 15 seconds. This only affects locks and not\n\t\/\/ semaphores.\n\tlockKillGracePeriod = 5 * time.Second\n)\n\n\/\/ LockCommand is a Command implementation that is used to setup\n\/\/ a \"lock\" which manages lock acquisition and invokes a sub-process\ntype LockCommand struct {\n\tShutdownCh <-chan struct{}\n\tUi cli.Ui\n\n\tchild *os.Process\n\tchildLock sync.Mutex\n\tverbose bool\n}\n\nfunc (c *LockCommand) Help() string {\n\thelpText := `\nUsage: consul lock [options] prefix child...\n\n Acquires a lock or semaphore at a given path, and invokes a child\n process when successful. The child process can assume the lock is\n held while it executes. If the lock is lost or communication is\n disrupted the child process will be sent a SIGTERM signal and given\n time to gracefully exit. After the grace period expires the process\n will be hard terminated.\n For Consul agents on Windows, the child process is always hard\n terminated with a SIGKILL, since Windows has no POSIX compatible\n notion for SIGTERM.\n\n When -n=1, only a single lock holder or leader exists providing\n mutual exclusion. Setting a higher value switches to a semaphore\n allowing multiple holders to coordinate.\n\n The prefix provided must have write privileges.\n\nOptions:\n\n -http-addr=127.0.0.1:8500 HTTP address of the Consul agent.\n -n=1 Maximum number of allowed lock holders. If this\n value is one, it operates as a lock, otherwise\n a semaphore is used.\n -name=\"\" Optional name to associate with lock session.\n -token=\"\" ACL token to use. Defaults to that of agent.\n -pass-stdin Pass stdin to child process.\n -verbose Enables verbose output\n`\n\treturn strings.TrimSpace(helpText)\n}\n\nfunc (c *LockCommand) Run(args []string) int {\n\tvar childDone chan struct{}\n\tvar name, token string\n\tvar limit int\n\tvar passStdin bool\n\tcmdFlags := flag.NewFlagSet(\"watch\", flag.ContinueOnError)\n\tcmdFlags.Usage = func() { c.Ui.Output(c.Help()) }\n\tcmdFlags.IntVar(&limit, \"n\", 1, \"\")\n\tcmdFlags.StringVar(&name, \"name\", \"\", \"\")\n\tcmdFlags.StringVar(&token, \"token\", \"\", \"\")\n\tcmdFlags.BoolVar(&passStdin, \"pass-stdin\", false, \"\")\n\tcmdFlags.BoolVar(&c.verbose, \"verbose\", false, \"\")\n\thttpAddr := HTTPAddrFlag(cmdFlags)\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\t\/\/ Check the limit\n\tif limit <= 0 {\n\t\tc.Ui.Error(fmt.Sprintf(\"Lock holder limit must be positive\"))\n\t\treturn 1\n\t}\n\n\t\/\/ Verify the prefix and child are provided\n\textra := cmdFlags.Args()\n\tif len(extra) < 2 {\n\t\tc.Ui.Error(\"Key prefix and child command must be specified\")\n\t\tc.Ui.Error(\"\")\n\t\tc.Ui.Error(c.Help())\n\t\treturn 1\n\t}\n\tprefix := extra[0]\n\tprefix = strings.TrimPrefix(prefix, \"\/\")\n\tscript := strings.Join(extra[1:], \" \")\n\n\t\/\/ Calculate a session name if none provided\n\tif name == \"\" {\n\t\tname = fmt.Sprintf(\"Consul lock for '%s' at '%s'\", script, prefix)\n\t}\n\n\t\/\/ Create and test the HTTP client\n\tconf := api.DefaultConfig()\n\tconf.Address = *httpAddr\n\tconf.Token = token\n\tclient, err := api.NewClient(conf)\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error connecting to Consul agent: %s\", err))\n\t\treturn 1\n\t}\n\t_, err = client.Agent().NodeName()\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error querying Consul agent: %s\", err))\n\t\treturn 1\n\t}\n\n\t\/\/ Setup the lock or semaphore\n\tvar lu *LockUnlock\n\tif limit == 1 {\n\t\tlu, err = c.setupLock(client, prefix, name)\n\t} else {\n\t\tlu, err = c.setupSemaphore(client, limit, prefix, name)\n\t}\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Lock setup failed: %s\", err))\n\t\treturn 1\n\t}\n\n\t\/\/ Attempt the acquisition\n\tif c.verbose {\n\t\tc.Ui.Info(\"Attempting lock acquisition\")\n\t}\n\tlockCh, err := lu.lockFn(c.ShutdownCh)\n\tif lockCh == nil {\n\t\tif err == nil {\n\t\t\tc.Ui.Error(\"Shutdown triggered during lock acquisition\")\n\t\t} else {\n\t\t\tc.Ui.Error(fmt.Sprintf(\"Lock acquisition failed: %s\", err))\n\t\t}\n\t\treturn 1\n\t}\n\n\t\/\/ Check if we were shutdown but managed to still acquire the lock\n\tselect {\n\tcase <-c.ShutdownCh:\n\t\tc.Ui.Error(\"Shutdown triggered during lock acquisition\")\n\t\tgoto RELEASE\n\tdefault:\n\t}\n\n\t\/\/ Start the child process\n\tchildDone = make(chan struct{})\n\tgo func() {\n\t\tif err := c.startChild(script, childDone, passStdin); err != nil {\n\t\t\tc.Ui.Error(fmt.Sprintf(\"%s\", err))\n\t\t}\n\t}()\n\n\t\/\/ Monitor for shutdown, child termination, or lock loss\n\tselect {\n\tcase <-c.ShutdownCh:\n\t\tif c.verbose {\n\t\t\tc.Ui.Info(\"Shutdown triggered, killing child\")\n\t\t}\n\tcase <-lockCh:\n\t\tif c.verbose {\n\t\t\tc.Ui.Info(\"Lock lost, killing child\")\n\t\t}\n\tcase <-childDone:\n\t\tif c.verbose {\n\t\t\tc.Ui.Info(\"Child terminated, releasing lock\")\n\t\t}\n\t\tgoto RELEASE\n\t}\n\n\t\/\/ Kill the child\n\tif err := c.killChild(childDone); err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"%s\", err))\n\t}\n\nRELEASE:\n\t\/\/ Release the lock before termination\n\tif err := lu.unlockFn(); err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Lock release failed: %s\", err))\n\t\treturn 1\n\t}\n\n\t\/\/ Cleanup the lock if no longer in use\n\tif err := lu.cleanupFn(); err != nil {\n\t\tif err != lu.inUseErr {\n\t\t\tc.Ui.Error(fmt.Sprintf(\"Lock cleanup failed: %s\", err))\n\t\t\treturn 1\n\t\t} else if c.verbose {\n\t\t\tc.Ui.Info(\"Cleanup aborted, lock in use\")\n\t\t}\n\t} else if c.verbose {\n\t\tc.Ui.Info(\"Cleanup succeeded\")\n\t}\n\treturn 0\n}\n\n\/\/ setupLock is used to setup a new Lock given the API client,\n\/\/ the key prefix to operate on, and an optional session name.\nfunc (c *LockCommand) setupLock(client *api.Client, prefix, name string) (*LockUnlock, error) {\n\t\/\/ Use the DefaultSemaphoreKey extention, this way if a lock and\n\t\/\/ semaphore are both used at the same prefix, we will get a conflict\n\t\/\/ which we can report to the user.\n\tkey := path.Join(prefix, api.DefaultSemaphoreKey)\n\tif c.verbose {\n\t\tc.Ui.Info(fmt.Sprintf(\"Setting up lock at path: %s\", key))\n\t}\n\topts := api.LockOptions{\n\t\tKey: key,\n\t\tSessionName: name,\n\t}\n\tl, err := client.LockOpts(&opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlu := &LockUnlock{\n\t\tlockFn: l.Lock,\n\t\tunlockFn: l.Unlock,\n\t\tcleanupFn: l.Destroy,\n\t\tinUseErr: api.ErrLockInUse,\n\t}\n\treturn lu, nil\n}\n\n\/\/ setupSemaphore is used to setup a new Semaphore given the\n\/\/ API client, key prefix, session name, and slot holder limit.\nfunc (c *LockCommand) setupSemaphore(client *api.Client, limit int, prefix, name string) (*LockUnlock, error) {\n\tif c.verbose {\n\t\tc.Ui.Info(fmt.Sprintf(\"Setting up semaphore (limit %d) at prefix: %s\", limit, prefix))\n\t}\n\topts := api.SemaphoreOptions{\n\t\tPrefix: prefix,\n\t\tLimit: limit,\n\t\tSessionName: name,\n\t}\n\ts, err := client.SemaphoreOpts(&opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlu := &LockUnlock{\n\t\tlockFn: s.Acquire,\n\t\tunlockFn: s.Release,\n\t\tcleanupFn: s.Destroy,\n\t\tinUseErr: api.ErrSemaphoreInUse,\n\t}\n\treturn lu, nil\n}\n\n\/\/ startChild is a long running routine used to start and\n\/\/ wait for the child process to exit.\nfunc (c *LockCommand) startChild(script string, doneCh chan struct{}, passStdin bool) error {\n\tdefer close(doneCh)\n\tif c.verbose {\n\t\tc.Ui.Info(fmt.Sprintf(\"Starting handler '%s'\", script))\n\t}\n\t\/\/ Create the command\n\tcmd, err := agent.ExecScript(script)\n\tif err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error executing handler: %s\", err))\n\t\treturn err\n\t}\n\n\t\/\/ Setup the command streams\n\tcmd.Env = append(os.Environ(),\n\t\t\"CONSUL_LOCK_HELD=true\",\n\t)\n\tif passStdin {\n\t\tif c.verbose {\n\t\t\tc.Ui.Info(\"Stdin passed to handler process\")\n\t\t}\n\t\tcmd.Stdin = os.Stdin\n\t} else {\n\t\tcmd.Stdin = nil\n\t}\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\t\/\/ Start the child process\n\tc.childLock.Lock()\n\tif err := cmd.Start(); err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error starting handler: %s\", err))\n\t\tc.childLock.Unlock()\n\t\treturn err\n\t}\n\n\t\/\/ Setup the child info\n\tc.child = cmd.Process\n\tc.childLock.Unlock()\n\n\t\/\/ Wait for the child process\n\tif err := cmd.Wait(); err != nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Error running handler: %s\", err))\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ killChild is used to forcefully kill the child, first using SIGTERM\n\/\/ to allow for a graceful cleanup and then using SIGKILL for a hard\n\/\/ termination.\n\/\/ On Windows, the child is always hard terminated with a SIGKILL, even\n\/\/ on the first attempt.\nfunc (c *LockCommand) killChild(childDone chan struct{}) error {\n\t\/\/ Get the child process\n\tc.childLock.Lock()\n\tchild := c.child\n\tc.childLock.Unlock()\n\n\t\/\/ If there is no child process (failed to start), we can quit early\n\tif child == nil {\n\t\tif c.verbose {\n\t\t\tc.Ui.Info(\"No child process to kill\")\n\t\t}\n\t\treturn nil\n\t}\n\n\t\/\/ Attempt termination first\n\tif c.verbose {\n\t\tc.Ui.Info(fmt.Sprintf(\"Terminating child pid %d\", child.Pid))\n\t}\n\tif err := signalPid(child.Pid, syscall.SIGTERM); err != nil {\n\t\treturn fmt.Errorf(\"Failed to terminate %d: %v\", child.Pid, err)\n\t}\n\n\t\/\/ Wait for termination, or until a timeout\n\tselect {\n\tcase <-childDone:\n\t\tif c.verbose {\n\t\t\tc.Ui.Info(\"Child terminated\")\n\t\t}\n\t\treturn nil\n\tcase <-time.After(lockKillGracePeriod):\n\t\tif c.verbose {\n\t\t\tc.Ui.Info(fmt.Sprintf(\"Child did not exit after grace period of %v\",\n\t\t\t\tlockKillGracePeriod))\n\t\t}\n\t}\n\n\t\/\/ Send a final SIGKILL\n\tif c.verbose {\n\t\tc.Ui.Info(fmt.Sprintf(\"Killing child pid %d\", child.Pid))\n\t}\n\tif err := signalPid(child.Pid, syscall.SIGKILL); err != nil {\n\t\treturn fmt.Errorf(\"Failed to kill %d: %v\", child.Pid, err)\n\t}\n\treturn nil\n}\n\nfunc (c *LockCommand) Synopsis() string {\n\treturn \"Execute a command holding a lock\"\n}\n\n\/\/ LockUnlock is used to abstract over the differences between\n\/\/ a lock and a semaphore.\ntype LockUnlock struct {\n\tlockFn func(<-chan struct{}) (<-chan struct{}, error)\n\tunlockFn func() error\n\tcleanupFn func() error\n\tinUseErr error\n}\n<|endoftext|>"} {"text":"<commit_before>package command\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\nimport . \"launchpad.net\/gocheck\"\n\nvar travis = flag.Bool(\"travis\", false, \"Enable it if the tests runs in TravisCI\")\n\n\/\/ Hook up gocheck into the \"go test\" runner.\nfunc Test(t *testing.T) { TestingT(t) }\n\ntype CommandSuite struct{}\n\nvar _ = Suite(&CommandSuite{})\n\nfunc (self *CommandSuite) TestBasic(c *C) {\n\tcmd := NewCommand(\".\/tests\/test -exit=0 -time=1 -max=100000\")\n\tcmd.Run()\n\tcmd.Wait()\n\n\tresponse := cmd.GetResponse()\n\n\tc.Assert(response.Failed, Equals, false)\n\tc.Assert(response.ExitCode, Equals, 0)\n\tc.Assert(response.Stdout, HasLen, 588895)\n\tc.Assert(response.Stderr, HasLen, 0)\n\tc.Assert(response.Pid, Not(Equals), 0)\n\tc.Assert(int(response.RealTime\/time.Second), Equals, 1)\n\tc.Assert(int(response.UserTime), Not(Equals), 0)\n\tc.Assert(int(response.SysTime), Not(Equals), 0)\n\tc.Assert(int(response.Rusage.Utime.Usec), Not(Equals), 0)\n}\n\nfunc (self *CommandSuite) TestBasicWithTimeout(c *C) {\n\tcmd := NewCommand(\".\/tests\/test -exit=0 -time=2\")\n\tcmd.SetTimeout(1 * time.Second)\n\tcmd.Run()\n\tcmd.Wait()\n\n\tresponse := cmd.GetResponse()\n\n\tc.Assert(response.Failed, Equals, true)\n\tc.Assert(response.ExitCode, Equals, -1)\n\tc.Assert(int(response.RealTime\/time.Second), Equals, 1)\n\tc.Assert(int(response.UserTime), Not(Equals), 0)\n}\n\nfunc (self *CommandSuite) TestKill(c *C) {\n\tcmd := NewCommand(\".\/tests\/test -exit=0 -time=2\")\n\tcmd.Run()\n\n\tgo func() {\n\t\ttime.Sleep(1 * time.Second)\n\t\tcmd.Kill()\n\t}()\n\n\tcmd.Wait()\n\n\tresponse := cmd.GetResponse()\n\n\tc.Assert(response.Failed, Equals, true)\n\tc.Assert(response.ExitCode, Equals, -1)\n\tc.Assert(int(response.RealTime\/time.Second), Equals, 1)\n\tc.Assert(int(response.UserTime), Not(Equals), 0)\n}\n\nfunc (self *CommandSuite) TestSetUser(c *C) {\n\tif *travis {\n\t\tc.Skip(\"Running at TravisCI\")\n\t}\n\n\tcmd := NewCommand(\".\/tests\/test -exit=0 -time=1\")\n\tcmd.SetUser(\"daemon\")\n\tif err := cmd.Run(); err != nil {\n\t\tfmt.Println(err)\n\t\tc.Fail()\n\t\treturn\n\t}\n\n\tcmd.Wait()\n\n\tresponse := cmd.GetResponse()\n\n\tc.Assert(response.Failed, Equals, false)\n\tc.Assert(response.ExitCode, Equals, 0)\n}\n\nfunc (self *CommandSuite) TestSetWorkingDir(c *C) {\n\tcmd := NewCommand(\".\/test -exit=0 -wd\")\n\n\tcwd, _ := os.Getwd()\n\twd := cwd + \"\/tests\"\n\n\tcmd.SetWorkingDir(wd)\n\tcmd.Run()\n\tcmd.Wait()\n\n\tresponse := cmd.GetResponse()\n\n\tc.Assert(response.Failed, Equals, false)\n\tc.Assert(response.ExitCode, Equals, 0)\n\tc.Assert(string(response.Stdout), Equals, wd+\"\\n\")\n}\n\nfunc (self *CommandSuite) TestSetEnvironment(c *C) {\n\tcmd := NewCommand(\".\/tests\/test -exit=0 -env\")\n\tcmd.SetEnvironment([]string{\"FOO=bar\"})\n\tcmd.Run()\n\tcmd.Wait()\n\n\tresponse := cmd.GetResponse()\n\n\tc.Assert(response.Failed, Equals, false)\n\tc.Assert(response.ExitCode, Equals, 0)\n\tc.Assert(string(response.Stdout), Equals, \"FOO=bar\\n\")\n}\n<commit_msg>fix tests<commit_after>package command\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\nimport . \"launchpad.net\/gocheck\"\n\nvar travis = flag.Bool(\"travis\", false, \"Enable it if the tests runs in TravisCI\")\n\n\/\/ Hook up gocheck into the \"go test\" runner.\nfunc Test(t *testing.T) { TestingT(t) }\n\ntype CommandSuite struct{}\n\nvar _ = Suite(&CommandSuite{})\n\nfunc (self *CommandSuite) TestBasic(c *C) {\n\tcmd := NewCommand(\".\/tests\/test -exit=0 -time=1 -max=100000\")\n\tcmd.Run()\n\tcmd.Wait()\n\n\tresponse := cmd.GetResponse()\n\n\tc.Assert(response.Failed, Equals, false)\n\tc.Assert(response.ExitCode, Equals, 0)\n\tc.Assert(response.Stdout, HasLen, 588895)\n\tc.Assert(response.Stderr, HasLen, 0)\n\tc.Assert(response.Pid, Not(Equals), 0)\n\tc.Assert(int(response.RealTime\/time.Second), Equals, 1)\n\tc.Assert(int(response.UserTime), Not(Equals), 0)\n\tc.Assert(int(response.SysTime), Not(Equals), 0)\n\tc.Assert(int(response.Rusage.Utime.Usec), Not(Equals), 0)\n}\n\nfunc (self *CommandSuite) TestBasicWithTimeout(c *C) {\n\tcmd := NewCommand(\".\/tests\/test -exit=0 -time=2\")\n\tcmd.SetTimeout(1 * time.Second)\n\tcmd.Run()\n\tcmd.Wait()\n\n\tresponse := cmd.GetResponse()\n\n\tc.Assert(response.Failed, Equals, true)\n\tc.Assert(response.ExitCode, Equals, -1)\n\tc.Assert(int(response.RealTime\/time.Second), Equals, 1)\n}\n\nfunc (self *CommandSuite) TestKill(c *C) {\n\tcmd := NewCommand(\".\/tests\/test -exit=0 -time=2\")\n\tcmd.Run()\n\n\tgo func() {\n\t\ttime.Sleep(1 * time.Second)\n\t\tcmd.Kill()\n\t}()\n\n\tcmd.Wait()\n\n\tresponse := cmd.GetResponse()\n\n\tc.Assert(response.Failed, Equals, true)\n\tc.Assert(response.ExitCode, Equals, -1)\n\tc.Assert(int(response.RealTime\/time.Second), Equals, 1)\n}\n\nfunc (self *CommandSuite) TestSetUser(c *C) {\n\tif *travis {\n\t\tc.Skip(\"Running at TravisCI\")\n\t}\n\n\tcmd := NewCommand(\".\/tests\/test -exit=0 -time=1\")\n\tcmd.SetUser(\"daemon\")\n\tif err := cmd.Run(); err != nil {\n\t\tfmt.Println(err)\n\t\tc.Fail()\n\t\treturn\n\t}\n\n\tcmd.Wait()\n\n\tresponse := cmd.GetResponse()\n\n\tc.Assert(response.Failed, Equals, false)\n\tc.Assert(response.ExitCode, Equals, 0)\n}\n\nfunc (self *CommandSuite) TestSetWorkingDir(c *C) {\n\tcmd := NewCommand(\".\/test -exit=0 -wd\")\n\n\tcwd, _ := os.Getwd()\n\twd := cwd + \"\/tests\"\n\n\tcmd.SetWorkingDir(wd)\n\tcmd.Run()\n\tcmd.Wait()\n\n\tresponse := cmd.GetResponse()\n\n\tc.Assert(response.Failed, Equals, false)\n\tc.Assert(response.ExitCode, Equals, 0)\n\tc.Assert(string(response.Stdout), Equals, wd+\"\\n\")\n}\n\nfunc (self *CommandSuite) TestSetEnvironment(c *C) {\n\tcmd := NewCommand(\".\/tests\/test -exit=0 -env\")\n\tcmd.SetEnvironment([]string{\"FOO=bar\"})\n\tcmd.Run()\n\tcmd.Wait()\n\n\tresponse := cmd.GetResponse()\n\n\tc.Assert(response.Failed, Equals, false)\n\tc.Assert(response.ExitCode, Equals, 0)\n\tc.Assert(string(response.Stdout), Equals, \"FOO=bar\\n\")\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/zetamatta\/go-box\"\n\n\t\"github.com\/zetamatta\/nyagos\/readline\"\n\t\"github.com\/zetamatta\/nyagos\/shell\"\n)\n\nfunc cmd_box(ctx context.Context, cmd *shell.Cmd) (int, error) {\n\tdata, err := ioutil.ReadAll(cmd.Stdin)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\treturn 1, err\n\t}\n\tlist := strings.Split(string(data), \"\\n\")\n\tif len(list) == 0 {\n\t\treturn 1, nil\n\t}\n\tfor i := 0; i < len(list); i++ {\n\t\tlist[i] = strings.TrimSpace(list[i])\n\t}\n\tresult := box.Choice(\n\t\tlist,\n\t\treadline.Console)\n\tfmt.Fprintln(cmd.Stdout, result)\n\treturn 0, nil\n}\n<commit_msg>Fix: `box` did not print linefeed on lastline after enter typed.<commit_after>package commands\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/zetamatta\/go-box\"\n\n\t\"github.com\/zetamatta\/nyagos\/readline\"\n\t\"github.com\/zetamatta\/nyagos\/shell\"\n)\n\nfunc cmd_box(ctx context.Context, cmd *shell.Cmd) (int, error) {\n\tdata, err := ioutil.ReadAll(cmd.Stdin)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\treturn 1, err\n\t}\n\tlist := strings.Split(string(data), \"\\n\")\n\tif len(list) == 0 {\n\t\treturn 1, nil\n\t}\n\tfor i := 0; i < len(list); i++ {\n\t\tlist[i] = strings.TrimSpace(list[i])\n\t}\n\tresult := box.Choice(\n\t\tlist,\n\t\treadline.Console)\n\tfmt.Fprintln(readline.Console)\n\tfmt.Fprintln(cmd.Stdout, result)\n\treturn 0, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package common\n\nimport (\n\t\"net\"\n)\n\nconst (\n\t\/\/ Cilium constants\n\n\t\/\/ PluginPath is the docker plugins directory where docker plugin is present.\n\tPluginPath = \"\/run\/docker\/plugins\/\"\n\t\/\/ DriverSock is the cilium socket for the communication between docker and cilium.\n\tDriverSock = PluginPath + \"cilium.sock\"\n\t\/\/ CiliumPath is the path where cilium operational files are running.\n\tCiliumPath = \"\/var\/run\/cilium\/\"\n\t\/\/ CiliumSock is the cilium socket for the communication between the daemon and cilium client.\n\tCiliumSock = CiliumPath + \"cilium.sock\"\n\t\/\/ DefaultContainerMAC represents a dummy MAC address for the containers.\n\tDefaultContainerMAC = \"AA:BB:CC:DD:EE:FF\"\n\t\/\/ BPFMap is the file that contains the BPF Map for the host.\n\tBPFMap = \"\/sys\/fs\/bpf\/tc\/globals\/cilium_lxc\"\n\tBPFMapRoot = \"\/sys\/fs\/bpf\"\n\t\/\/ PolicyMapPath is the base path for the cilium policy for each local container.\n\tPolicyMapPath = \"\/sys\/fs\/bpf\/tc\/globals\/cilium_policy_\"\n\t\/\/ RFC3339Milli is the RFC3339 with milliseconds for the default timestamp format\n\t\/\/ log files.\n\tRFC3339Milli = \"2006-01-02T15:04:05.000Z07:00\"\n\n\t\/\/ Consul dedicated constants\n\n\t\/\/ OperationalPath is the base path to store the operational details in consul.\n\tOperationalPath = \"cilium-net\/operational\"\n\t\/\/ LastFreeIDKeyPath is the path where the Last free UUID is stored in consul.\n\tLastFreeIDKeyPath = OperationalPath + \"\/LastUUID\"\n\t\/\/ LabelsKeyPath is the base path where labels are stored in consul.\n\tLabelsKeyPath = OperationalPath + \"\/SHA256SUMLabels\/\"\n\t\/\/ IDKeyPath is the base path where the IDs are stored in consul.\n\tIDKeyPath = OperationalPath + \"\/ID\/\"\n\t\/\/ MaxSetOfLabels is maximum number of set of labels that can be stored in consul.\n\tMaxSetOfLabels = 0xFFFF\n\t\/\/ FirstFreeID is the first ID for which the labels should be assigned.\n\tFirstFreeID = 256\n\t\/\/ SecCtxFromHost represents reserved security context IDs reserved for special\n\t\/\/ purposes.\n\tSecCtxFromHost = 1\n\n\t\/\/ Networking dedicated constants\n\n\t\/\/ DefaultIPv6Prefix is the default IPv6 address assigned to the cilium interface.\n\tDefaultIPv6Prefix = \"beef::\"\n\t\/\/ DefaultIPv4Prefix is the IPv6 prefix used to map IPv4 addresses.\n\tDefaultIPv4Prefix = \"dead::\"\n\t\/\/ DefaultIPv4Range is the CIDR used for 6to4 communications.\n\tDefaultIPv4Range = `10.%d.0.0\/16`\n\t\/\/ DefaultIPv4Mask is the default mask for the CIDR used for 6to4 communications.\n\tDefaultIPv4Mask = 16\n\n\t\/\/ Miscellaneous dedicated constants\n\n\t\/\/ GlobalLabelPrefix is the default root path for the policy.\n\tGlobalLabelPrefix = \"io.cilium\"\n\t\/\/ CiliumLabelSource is the default label source for the labels read from containers.\n\tCiliumLabelSource = \"cilium\"\n\t\/\/ K8sLabelSource is the default label source for the labels read from kubernetes.\n\tK8sLabelSource = \"k8s\"\n\t\/\/ Label source for reserved types\n\tReservedLabelSource = \"reserved\"\n\t\/\/ EndpointsPerHost is the maximum number of endpoints allowed per host. It should\n\t\/\/ represent the same number of IPv6 addresses supported on each node.\n\tEndpointsPerHost = 0xFFFF\n\n\t\/\/ Endpoint prefixes\n\n\t\/\/ CiliumPrefix is used to distinguish cilium IDs between different ID types.\n\tCiliumPrefix = \"cilium:\/\/\"\n\t\/\/ DockerPrefix is used to distinguish docker IDs between different ID types.\n\tDockerPrefix = \"docker:\/\/\"\n)\n\nvar (\n\t\/\/ Default addressing schema\n\t\/\/\n\t\/\/ cluster:\t\t beef:beef:beef:beef::\/64\n\t\/\/ loadbalancer: beef:beef:beef:beef:<lb>::\/80\n\t\/\/ node:\t\t beef:beef:beef:beef:<lb>:<node>:<node>:\/112\n\t\/\/ lxc:\t\t\t beef:beef:beef:beef:<lb>:<node>:<node>:<lxc>\/128\n\n\t\/\/ ClusterIPv6Mask represents the CIDR Mask for the cilium cluster.\n\tClusterIPv6Mask = net.CIDRMask(64, 128)\n\t\/\/ LoadBalancerIPv6Mask represents the CIDR Mask for the cilium load balancer.\n\tLoadBalancerIPv6Mask = net.CIDRMask(80, 128)\n\t\/\/ NodeIPv6Mask represents the CIDR Mask for the cilium node.\n\tNodeIPv6Mask = net.CIDRMask(112, 128)\n\t\/\/ ContainerIPv6Mask represents the CIDR Mask for the cilium endpoint\/container.\n\tContainerIPv6Mask = net.CIDRMask(128, 128)\n)\n<commit_msg>Added specific BPF options in const.go<commit_after>package common\n\nimport (\n\t\"net\"\n)\n\nconst (\n\t\/\/ Cilium constants\n\n\t\/\/ PluginPath is the docker plugins directory where docker plugin is present.\n\tPluginPath = \"\/run\/docker\/plugins\/\"\n\t\/\/ DriverSock is the cilium socket for the communication between docker and cilium.\n\tDriverSock = PluginPath + \"cilium.sock\"\n\t\/\/ CiliumPath is the path where cilium operational files are running.\n\tCiliumPath = \"\/var\/run\/cilium\/\"\n\t\/\/ CiliumSock is the cilium socket for the communication between the daemon and cilium client.\n\tCiliumSock = CiliumPath + \"cilium.sock\"\n\t\/\/ DefaultContainerMAC represents a dummy MAC address for the containers.\n\tDefaultContainerMAC = \"AA:BB:CC:DD:EE:FF\"\n\t\/\/ BPFMap is the file that contains the BPF Map for the host.\n\tBPFMap = \"\/sys\/fs\/bpf\/tc\/globals\/cilium_lxc\"\n\tBPFMapRoot = \"\/sys\/fs\/bpf\"\n\t\/\/ PolicyMapPath is the base path for the cilium policy for each local container.\n\tPolicyMapPath = \"\/sys\/fs\/bpf\/tc\/globals\/cilium_policy_\"\n\t\/\/ RFC3339Milli is the RFC3339 with milliseconds for the default timestamp format\n\t\/\/ log files.\n\tRFC3339Milli = \"2006-01-02T15:04:05.000Z07:00\"\n\t\/\/ DisablePolicyEnforcement represent the C constant to disable policy\n\t\/\/ enforcement.\n\tDisablePolicyEnforcement = \"DISABLE_POLICY_ENFORCEMENT\"\n\t\/\/ EnableNAT46 represent the C constant to enable nat46 mode.\n\tEnableNAT46 = \"ENABLE_NAT46\"\n\n\t\/\/ Consul dedicated constants\n\n\t\/\/ OperationalPath is the base path to store the operational details in consul.\n\tOperationalPath = \"cilium-net\/operational\"\n\t\/\/ LastFreeIDKeyPath is the path where the Last free UUID is stored in consul.\n\tLastFreeIDKeyPath = OperationalPath + \"\/LastUUID\"\n\t\/\/ LabelsKeyPath is the base path where labels are stored in consul.\n\tLabelsKeyPath = OperationalPath + \"\/SHA256SUMLabels\/\"\n\t\/\/ IDKeyPath is the base path where the IDs are stored in consul.\n\tIDKeyPath = OperationalPath + \"\/ID\/\"\n\t\/\/ MaxSetOfLabels is maximum number of set of labels that can be stored in consul.\n\tMaxSetOfLabels = 0xFFFF\n\t\/\/ FirstFreeID is the first ID for which the labels should be assigned.\n\tFirstFreeID = 256\n\t\/\/ SecCtxFromHost represents reserved security context IDs reserved for special\n\t\/\/ purposes.\n\tSecCtxFromHost = 1\n\n\t\/\/ Networking dedicated constants\n\n\t\/\/ DefaultIPv6Prefix is the default IPv6 address assigned to the cilium interface.\n\tDefaultIPv6Prefix = \"beef::\"\n\t\/\/ DefaultIPv4Prefix is the IPv6 prefix used to map IPv4 addresses.\n\tDefaultIPv4Prefix = \"dead::\"\n\t\/\/ DefaultIPv4Range is the CIDR used for 6to4 communications.\n\tDefaultIPv4Range = `10.%d.0.0\/16`\n\t\/\/ DefaultIPv4Mask is the default mask for the CIDR used for 6to4 communications.\n\tDefaultIPv4Mask = 16\n\n\t\/\/ Miscellaneous dedicated constants\n\n\t\/\/ GlobalLabelPrefix is the default root path for the policy.\n\tGlobalLabelPrefix = \"io.cilium\"\n\t\/\/ CiliumLabelSource is the default label source for the labels read from containers.\n\tCiliumLabelSource = \"cilium\"\n\t\/\/ K8sLabelSource is the default label source for the labels read from kubernetes.\n\tK8sLabelSource = \"k8s\"\n\t\/\/ Label source for reserved types\n\tReservedLabelSource = \"reserved\"\n\t\/\/ EndpointsPerHost is the maximum number of endpoints allowed per host. It should\n\t\/\/ represent the same number of IPv6 addresses supported on each node.\n\tEndpointsPerHost = 0xFFFF\n\n\t\/\/ Endpoint prefixes\n\n\t\/\/ CiliumPrefix is used to distinguish cilium IDs between different ID types.\n\tCiliumPrefix = \"cilium:\/\/\"\n\t\/\/ DockerPrefix is used to distinguish docker IDs between different ID types.\n\tDockerPrefix = \"docker:\/\/\"\n)\n\nvar (\n\t\/\/ Default addressing schema\n\t\/\/\n\t\/\/ cluster:\t\t beef:beef:beef:beef::\/64\n\t\/\/ loadbalancer: beef:beef:beef:beef:<lb>::\/80\n\t\/\/ node:\t\t beef:beef:beef:beef:<lb>:<node>:<node>:\/112\n\t\/\/ lxc:\t\t\t beef:beef:beef:beef:<lb>:<node>:<node>:<lxc>\/128\n\n\t\/\/ ClusterIPv6Mask represents the CIDR Mask for the cilium cluster.\n\tClusterIPv6Mask = net.CIDRMask(64, 128)\n\t\/\/ LoadBalancerIPv6Mask represents the CIDR Mask for the cilium load balancer.\n\tLoadBalancerIPv6Mask = net.CIDRMask(80, 128)\n\t\/\/ NodeIPv6Mask represents the CIDR Mask for the cilium node.\n\tNodeIPv6Mask = net.CIDRMask(112, 128)\n\t\/\/ ContainerIPv6Mask represents the CIDR Mask for the cilium endpoint\/container.\n\tContainerIPv6Mask = net.CIDRMask(128, 128)\n)\n<|endoftext|>"} {"text":"<commit_before>package common\n\nimport \"reflect\"\n\n\/\/PageParam 分页参数\ntype PageParam struct {\n\t\/\/页数,从1开始\n\tPage int `json:\"page\"`\n\t\/\/每页的条数,>0\n\tPageSize int `json:\"page_size\"`\n}\n\n\/\/Limit 根据maxPage和maxPageSize限制Page和PageSize\nfunc (p *PageParam) Limit(maxPage, maxPageSize int) {\n\tif p.Page <= 0 {\n\t\tp.Page = 1\n\t}\n\tif p.PageSize <= 0 {\n\t\tp.PageSize = 10\n\t}\n\tif maxPage > 0 && p.Page > maxPage {\n\t\tp.Page = maxPage\n\t}\n\tif maxPageSize > 0 && p.PageSize > maxPageSize {\n\t\tp.PageSize = maxPageSize\n\t}\n}\n\n\/\/StartIndex 返回从0开始的起始索引\nfunc (p *PageParam) StartIndex() int {\n\treturn (p.Page - 1) * p.PageSize\n}\n\n\/\/EndIndex 返回从0开始的截止索引\nfunc (p *PageParam) EndIndex() int {\n\treturn p.StartIndex() + p.PageSize - 1\n}\n\n\/\/ PageResultItemsSetter 结果设置\ntype PageResultItemsSetter func(sum int, index int, elem interface{})\n\n\/\/ ResultSet is the result set with total and items\ntype ResultSet interface {\n\tSetTotal(total int64)\n\tSetData(data interface{}, itemsSetter PageResultItemsSetter)\n\tGetItemsSetter() PageResultItemsSetter\n\tCalTotalPage()\n}\n\n\/\/PageResult 分页结果\ntype PageResult struct {\n\tPageParam\n\tTotal int64 `json:\"total\"`\n\tTotalPage int64 `json:\"totalPage\"`\n\tItems interface{} `json:\"items\"`\n}\n\n\/\/ SetTotal implements ResultSet.SetTotal\nfunc (p *PageResult) SetTotal(total int64) {\n\tp.Total = total\n}\n\n\/\/ SetData implements ResultSet.SetData\nfunc (p *PageResult) SetData(data interface{}, itemsSetter PageResultItemsSetter) {\n\tp.Items = data\n\n\tif itemsSetter == nil {\n\t\treturn\n\t}\n\n\tsliceVal, _, _ := ExtractRefTuple(data)\n\tif sliceVal.Kind() != reflect.Slice {\n\t\treturn\n\t}\n\n\titemsLen := sliceVal.Len()\n\n\tfor i := 0; i < itemsLen; i++ {\n\t\titemsSetter(itemsLen, i, sliceVal.Index(i).Interface())\n\t}\n}\n\n\/\/ CalTotalPage 计算总页数\nfunc (p *PageResult) CalTotalPage() {\n\tif p.PageSize > 0 {\n\t\tif p.Total%int64(p.PageSize) == 0 {\n\t\t\tp.TotalPage = p.Total \/ int64(p.PageSize)\n\t\t} else {\n\t\t\tp.TotalPage = p.Total\/int64(p.PageSize) + 1\n\t\t}\n\t}\n}\n\n\/\/Query 基本的查询参数\ntype Query struct {\n\tPageParam\n\t\/\/ID\n\tID int64 `json:\"id\"`\n}\n<commit_msg>add cursor param<commit_after>package common\n\nimport \"reflect\"\n\n\/\/PageParam 分页参数\ntype PageParam struct {\n\t\/\/页数,从1开始\n\tPage int `json:\"page\"`\n\t\/\/每页的条数,>0\n\tPageSize int `json:\"page_size\"`\n\t\/\/游标\n\tCursor int64 `json:\"cursor\"`\n}\n\n\/\/Limit 根据maxPage和maxPageSize限制Page和PageSize\nfunc (p *PageParam) Limit(maxPage, maxPageSize int) {\n\tif p.Page <= 0 {\n\t\tp.Page = 1\n\t}\n\tif p.PageSize <= 0 {\n\t\tp.PageSize = 10\n\t}\n\tif maxPage > 0 && p.Page > maxPage {\n\t\tp.Page = maxPage\n\t}\n\tif maxPageSize > 0 && p.PageSize > maxPageSize {\n\t\tp.PageSize = maxPageSize\n\t}\n}\n\n\/\/StartIndex 返回从0开始的起始索引\nfunc (p *PageParam) StartIndex() int {\n\treturn (p.Page - 1) * p.PageSize\n}\n\n\/\/EndIndex 返回从0开始的截止索引\nfunc (p *PageParam) EndIndex() int {\n\treturn p.StartIndex() + p.PageSize - 1\n}\n\n\/\/ PageResultItemsSetter 结果设置\ntype PageResultItemsSetter func(sum int, index int, elem interface{})\n\n\/\/ ResultSet is the result set with total and items\ntype ResultSet interface {\n\tSetTotal(total int64)\n\tSetData(data interface{}, itemsSetter PageResultItemsSetter)\n\tGetItemsSetter() PageResultItemsSetter\n\tCalTotalPage()\n}\n\n\/\/PageResult 分页结果\ntype PageResult struct {\n\tPageParam\n\tTotal int64 `json:\"total\"`\n\tTotalPage int64 `json:\"totalPage\"`\n\tItems interface{} `json:\"items\"`\n}\n\n\/\/ SetTotal implements ResultSet.SetTotal\nfunc (p *PageResult) SetTotal(total int64) {\n\tp.Total = total\n}\n\n\/\/ SetData implements ResultSet.SetData\nfunc (p *PageResult) SetData(data interface{}, itemsSetter PageResultItemsSetter) {\n\tp.Items = data\n\n\tif itemsSetter == nil {\n\t\treturn\n\t}\n\n\tsliceVal, _, _ := ExtractRefTuple(data)\n\tif sliceVal.Kind() != reflect.Slice {\n\t\treturn\n\t}\n\n\titemsLen := sliceVal.Len()\n\n\tfor i := 0; i < itemsLen; i++ {\n\t\titemsSetter(itemsLen, i, sliceVal.Index(i).Interface())\n\t}\n}\n\n\/\/ CalTotalPage 计算总页数\nfunc (p *PageResult) CalTotalPage() {\n\tif p.PageSize > 0 {\n\t\tif p.Total%int64(p.PageSize) == 0 {\n\t\t\tp.TotalPage = p.Total \/ int64(p.PageSize)\n\t\t} else {\n\t\t\tp.TotalPage = p.Total\/int64(p.PageSize) + 1\n\t\t}\n\t}\n}\n\n\/\/Query 基本的查询参数\ntype Query struct {\n\tPageParam\n\t\/\/ID\n\tID int64 `json:\"id\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package common\n\nimport (\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/axgle\/mahonia\"\n\t\"github.com\/saintfish\/chardet\"\n)\n\n\/\/ RequestURL return the search result\nfunc RequestURL(url string) (*http.Response, error) {\n\tclient := &http.Client{}\n\treq, _ := http.NewRequest(\"GET\", url, nil)\n\treq.Header.Set(\"User-Agent\", GetUserAgent())\n\tresponse, err := client.Do(req)\n\treturn response, err\n}\n\n\/\/ QuickestURL return the quickest response\nfunc QuickestURL(index int, url string) int {\n\t\/\/ start := time.Now()\n\t_, err := http.Get(url)\n\tif err != nil {\n\t\treturn -1\n\t}\n\t\/\/ timeUsed := strconv.FormatFloat(time.Since(start).Seconds(), 'f', 6, 64)\n\t\/\/ fmt.Println(url, timeUsed)\n\treturn index\n}\n\n\/\/ DetectBody gbk convert to utf-8\nfunc DetectBody(body []byte) string {\n\tvar bodyString string\n\tdetector := chardet.NewTextDetector()\n\tresult, err := detector.DetectBest(body)\n\tif err != nil {\n\t\treturn string(body)\n\t}\n\tif strings.Contains(strings.ToLower(result.Charset), \"utf\") {\n\t\tbodyString = string(body)\n\t} else {\n\t\tbodyString = mahonia.NewDecoder(\"gbk\").ConvertString(string(body))\n\t}\n\treturn bodyString\n}\n\n\/\/ StringInSlice search for an element in a golang slice\nfunc StringInSlice(domain string, list []string) bool {\n\tfor _, eachDomain := range list {\n\t\tif domain == eachDomain {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ReturnDomain parse url and return the domain\nfunc ReturnDomain(currentURL string) string {\n\turlParse, _ := url.Parse(currentURL)\n\tdomain := urlParse.Host\n\treturn domain\n}\n<commit_msg>一个空格引起的更新<commit_after>package common\n\nimport (\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/axgle\/mahonia\"\n\t\"github.com\/saintfish\/chardet\"\n)\n\n\/\/ RequestURL return the search result\nfunc RequestURL(url string) (*http.Response, error) {\n\tclient := &http.Client{}\n\treq, _ := http.NewRequest(\"GET\", url, nil)\n\treq.Header.Set(\"User-Agent\", GetUserAgent())\n\tresponse, err := client.Do(req)\n\treturn response, err\n}\n\n\/\/ QuickestURL return the quickest response\nfunc QuickestURL(index int, url string) int {\n\t\/\/ start := time.Now()\n\t_, err := http.Get(url)\n\tif err != nil {\n\t\treturn -1\n\t}\n\t\/\/ timeUsed := strconv.FormatFloat(time.Since(start).Seconds(), 'f', 6, 64)\n\t\/\/ fmt.Println(url, timeUsed)\n\treturn index\n}\n\n\/\/ DetectBody gbk convert to utf-8\nfunc DetectBody(body []byte) string {\n\tvar bodyString string\n\tdetector := chardet.NewTextDetector()\n\tresult, err := detector.DetectBest(body)\n\tif err != nil {\n\t\treturn string(body)\n\t}\n\tif strings.Contains(strings.ToLower(result.Charset), \"utf\") {\n\t\tbodyString = string(body)\n\t} else {\n\t\tbodyString = mahonia.NewDecoder(\"gbk\").ConvertString(string(body))\n\t}\n\treturn bodyString\n}\n\n\/\/ StringInSlice search for an element in a golang slice\nfunc StringInSlice(domain string, list []string) bool {\n\tfor _, eachDomain := range list {\n\t\tif domain == eachDomain {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ ReturnDomain parse url and return the domain\nfunc ReturnDomain(currentURL string) string {\n\turlParse, _ := url.Parse(currentURL)\n\tdomain := urlParse.Host\n\treturn domain\n}\n<|endoftext|>"} {"text":"<commit_before>package utee\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/md5\"\n\t\"crypto\/sha1\"\n\t\"crypto\/sha256\"\n\t\"crypto\/x509\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"encoding\/pem\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/smtp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\/utf8\"\n)\n\nvar (\n\tPlainMd5 = Md5Str(\"\")\n\tPlainSha1 = Sha1Str(\"\")\n)\n\nfunc init() {\n\trand.Seed(time.Now().UTC().UnixNano())\n}\n\nfunc Md5Str(salt string) func(string) string {\n\treturn func(s string) string {\n\t\th := md5.New()\n\t\tio.WriteString(h, s)\n\t\tio.WriteString(h, salt)\n\t\treturn hex.EncodeToString(h.Sum(nil))\n\t}\n}\n\nfunc Sha1Str(salt string) func(string) string {\n\treturn func(s string) string {\n\t\th := sha1.New()\n\t\tio.WriteString(h, s)\n\t\tio.WriteString(h, salt)\n\t\treturn hex.EncodeToString(h.Sum(nil))\n\t}\n}\n\nfunc Sha256Str(salt string) func(string) string {\n\treturn func(s string) string {\n\t\th := sha256.New()\n\t\tio.WriteString(h, s)\n\t\tio.WriteString(h, salt)\n\t\treturn hex.EncodeToString(h.Sum(nil))\n\t}\n}\n\nfunc HmacSha256(s string, key string) string {\n\th := hmac.New(sha256.New, []byte(key))\n\tio.WriteString(h, s)\n\treturn hex.EncodeToString(h.Sum(nil))\n}\n\nfunc Chk(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc Log(err error, prefix ...string) {\n\tif err == nil {\n\t\treturn\n\t}\n\n\ts := \"\"\n\tif len(prefix) > 0 {\n\t\ts = prefix[0]\n\t}\n\tlog.Println(s, err)\n}\n\n\/\/ Truncate truncate string\nfunc Truncate(s string, n int) string {\n\tif n <= 0 {\n\t\treturn \"\"\n\t}\n\n\tlength := utf8.RuneCountInString(s)\n\tif length <= n {\n\t\treturn s\n\t}\n\n\tl := []rune{}\n\tfor _, r := range s {\n\t\tl = append(l, r)\n\t}\n\n\tl = l[:(length - n)]\n\treturn string(l)\n}\n\nfunc Tick(t ...time.Time) int64 {\n\tif len(t) == 0 {\n\t\treturn time.Now().UnixNano() \/ 1e6\n\t} else {\n\t\treturn t[0].UnixNano() \/ 1e6\n\t}\n}\n\nfunc TickSec() int64 {\n\treturn time.Now().Unix()\n}\n\nfunc TickHour() int64 {\n\treturn time.Now().Unix() \/ 3600 * 3600\n}\n\nfunc Millis(fmt string, timeStr string) (int64, error) {\n\tdata, err := time.Parse(fmt, timeStr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn data.UnixNano() \/ 1e6, nil\n}\n\nfunc Md5(b []byte) []byte {\n\th := md5.New()\n\th.Write(b)\n\treturn h.Sum(nil)\n}\n\nfunc DeleteMap(m map[string]interface{}, ks ...string) {\n\tfor _, v := range ks {\n\t\tdelete(m, v)\n\t}\n}\n\nfunc IsPemExpire(b []byte) (bool, error) {\n\tblock, _ := pem.Decode(b)\n\tif block == nil {\n\t\treturn false, errors.New(\"failed to parse certificate PEM\")\n\t}\n\tcert, err := x509.ParseCertificate(block.Bytes)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn cert.NotAfter.Before(time.Now()), nil\n}\n\nfunc Shuffle(src []string) []string {\n\tdest := make([]string, len(src))\n\tperm := rand.Perm(len(src))\n\tfor i, v := range perm {\n\t\tdest[v] = src[i]\n\t}\n\treturn dest\n}\n\nfunc SendMail(user, password, host, to, subject, body, mailtype string) error {\n\thp := strings.Split(host, \":\")\n\tauth := smtp.PlainAuth(\"\", user, password, hp[0])\n\tvar content_type string\n\tif mailtype == \"html\" {\n\t\tcontent_type = \"Content-Type: text\/\" + mailtype + \"; charset=UTF-8\"\n\t} else {\n\t\tcontent_type = \"Content-Type: text\/plain\" + \"; charset=UTF-8\"\n\t}\n\n\tmsg := []byte(\"To: \" + to + \"\\r\\nFrom: \" + user + \"<\" + user + \">\\r\\nSubject: \" + subject + \"\\r\\n\" + content_type + \"\\r\\n\\r\\n\" + body)\n\tsend_to := strings.Split(to, \";\")\n\terr := smtp.SendMail(host, auth, user, send_to, msg)\n\treturn err\n}\n\nfunc ParseAddr(s string) (string, int, error) {\n\ta := strings.Split(s, \":\")\n\tif len(a) != 2 {\n\t\treturn \"\", 0, fmt.Errorf(\"bad url %s\", s)\n\t}\n\tport, err := strconv.Atoi(a[1])\n\treturn a[0], port, err\n}\n\n\/\/ split a into several parts, no more than n\nfunc SplitSlice(a []string, n int) [][]string {\n\tif len(a) < n || n == 1 {\n\t\treturn [][]string{a}\n\t}\n\n\tresult := make([][]string, n)\n\tfor i, s := range a {\n\t\tidx := i % n\n\t\tresult[idx] = append(result[idx], s)\n\t}\n\treturn result\n}\n\nfunc IntToInf(src []int) []interface{} {\n\tresult := []interface{}{}\n\tfor _, v := range src {\n\t\tresult = append(result, v)\n\t}\n\treturn result\n}\n\nfunc StrToInf(src []string) []interface{} {\n\tresult := []interface{}{}\n\tfor _, v := range src {\n\t\tresult = append(result, v)\n\t}\n\treturn result\n}\n\nfunc PrintJson(any ...interface{}) {\n\tfor _, obj := range any {\n\t\tb, err := json.Marshal(obj)\n\t\tfmt.Println(err, string(b))\n\t}\n}\n<commit_msg>v1.1.1, revert uniq function<commit_after>package utee\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/md5\"\n\t\"crypto\/sha1\"\n\t\"crypto\/sha256\"\n\t\"crypto\/x509\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"encoding\/pem\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/smtp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\/utf8\"\n)\n\nvar (\n\tPlainMd5 = Md5Str(\"\")\n\tPlainSha1 = Sha1Str(\"\")\n)\n\nfunc init() {\n\trand.Seed(time.Now().UTC().UnixNano())\n}\n\nfunc Md5Str(salt string) func(string) string {\n\treturn func(s string) string {\n\t\th := md5.New()\n\t\tio.WriteString(h, s)\n\t\tio.WriteString(h, salt)\n\t\treturn hex.EncodeToString(h.Sum(nil))\n\t}\n}\n\nfunc Sha1Str(salt string) func(string) string {\n\treturn func(s string) string {\n\t\th := sha1.New()\n\t\tio.WriteString(h, s)\n\t\tio.WriteString(h, salt)\n\t\treturn hex.EncodeToString(h.Sum(nil))\n\t}\n}\n\nfunc Sha256Str(salt string) func(string) string {\n\treturn func(s string) string {\n\t\th := sha256.New()\n\t\tio.WriteString(h, s)\n\t\tio.WriteString(h, salt)\n\t\treturn hex.EncodeToString(h.Sum(nil))\n\t}\n}\n\nfunc HmacSha256(s string, key string) string {\n\th := hmac.New(sha256.New, []byte(key))\n\tio.WriteString(h, s)\n\treturn hex.EncodeToString(h.Sum(nil))\n}\n\nfunc Chk(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc Log(err error, prefix ...string) {\n\tif err == nil {\n\t\treturn\n\t}\n\n\ts := \"\"\n\tif len(prefix) > 0 {\n\t\ts = prefix[0]\n\t}\n\tlog.Println(s, err)\n}\n\n\/\/ Truncate truncate string\nfunc Truncate(s string, n int) string {\n\tif n <= 0 {\n\t\treturn \"\"\n\t}\n\n\tlength := utf8.RuneCountInString(s)\n\tif length <= n {\n\t\treturn s\n\t}\n\n\tl := []rune{}\n\tfor _, r := range s {\n\t\tl = append(l, r)\n\t}\n\n\tl = l[:(length - n)]\n\treturn string(l)\n}\n\nfunc Tick(t ...time.Time) int64 {\n\tif len(t) == 0 {\n\t\treturn time.Now().UnixNano() \/ 1e6\n\t} else {\n\t\treturn t[0].UnixNano() \/ 1e6\n\t}\n}\n\nfunc TickSec() int64 {\n\treturn time.Now().Unix()\n}\n\nfunc TickHour() int64 {\n\treturn time.Now().Unix() \/ 3600 * 3600\n}\n\nfunc Millis(fmt string, timeStr string) (int64, error) {\n\tdata, err := time.Parse(fmt, timeStr)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn data.UnixNano() \/ 1e6, nil\n}\n\nfunc Md5(b []byte) []byte {\n\th := md5.New()\n\th.Write(b)\n\treturn h.Sum(nil)\n}\n\nfunc DeleteMap(m map[string]interface{}, ks ...string) {\n\tfor _, v := range ks {\n\t\tdelete(m, v)\n\t}\n}\n\nfunc IsPemExpire(b []byte) (bool, error) {\n\tblock, _ := pem.Decode(b)\n\tif block == nil {\n\t\treturn false, errors.New(\"failed to parse certificate PEM\")\n\t}\n\tcert, err := x509.ParseCertificate(block.Bytes)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn cert.NotAfter.Before(time.Now()), nil\n}\n\nfunc Shuffle(src []string) []string {\n\tdest := make([]string, len(src))\n\tperm := rand.Perm(len(src))\n\tfor i, v := range perm {\n\t\tdest[v] = src[i]\n\t}\n\treturn dest\n}\n\nfunc SendMail(user, password, host, to, subject, body, mailtype string) error {\n\thp := strings.Split(host, \":\")\n\tauth := smtp.PlainAuth(\"\", user, password, hp[0])\n\tvar content_type string\n\tif mailtype == \"html\" {\n\t\tcontent_type = \"Content-Type: text\/\" + mailtype + \"; charset=UTF-8\"\n\t} else {\n\t\tcontent_type = \"Content-Type: text\/plain\" + \"; charset=UTF-8\"\n\t}\n\n\tmsg := []byte(\"To: \" + to + \"\\r\\nFrom: \" + user + \"<\" + user + \">\\r\\nSubject: \" + subject + \"\\r\\n\" + content_type + \"\\r\\n\\r\\n\" + body)\n\tsend_to := strings.Split(to, \";\")\n\terr := smtp.SendMail(host, auth, user, send_to, msg)\n\treturn err\n}\n\nfunc ParseAddr(s string) (string, int, error) {\n\ta := strings.Split(s, \":\")\n\tif len(a) != 2 {\n\t\treturn \"\", 0, fmt.Errorf(\"bad url %s\", s)\n\t}\n\tport, err := strconv.Atoi(a[1])\n\treturn a[0], port, err\n}\n\nfunc Unique(data []interface{}) []interface{} {\n\tm := map[interface{}]interface{}{}\n\n\tfor _, d := range data {\n\t\tm[d] = \"0\"\n\t}\n\n\tl := []interface{}{}\n\tfor key := range m {\n\t\tl = append(l, key)\n\t}\n\treturn l\n}\n\nfunc UniqueInt(data []int) []int {\n\tm := map[int]string{}\n\n\tfor _, d := range data {\n\t\tm[d] = \"0\"\n\t}\n\n\tl := []int{}\n\tfor key := range m {\n\t\tl = append(l, key)\n\t}\n\treturn l\n}\n\nfunc UniqueStr(data []string) []string {\n\tm := map[string]string{}\n\n\tfor _, d := range data {\n\t\tm[d] = \"0\"\n\t}\n\n\tl := []string{}\n\tfor key := range m {\n\t\tl = append(l, key)\n\t}\n\treturn l\n}\n\n\/\/ split a into several parts, no more than n\nfunc SplitSlice(a []string, n int) [][]string {\n\tif len(a) < n || n == 1 {\n\t\treturn [][]string{a}\n\t}\n\n\tresult := make([][]string, n)\n\tfor i, s := range a {\n\t\tidx := i % n\n\t\tresult[idx] = append(result[idx], s)\n\t}\n\treturn result\n}\n\nfunc IntToInf(src []int) []interface{} {\n\tresult := []interface{}{}\n\tfor _, v := range src {\n\t\tresult = append(result, v)\n\t}\n\treturn result\n}\n\nfunc StrToInf(src []string) []interface{} {\n\tresult := []interface{}{}\n\tfor _, v := range src {\n\t\tresult = append(result, v)\n\t}\n\treturn result\n}\n\nfunc PrintJson(any ...interface{}) {\n\tfor _, obj := range any {\n\t\tb, err := json.Marshal(obj)\n\t\tfmt.Println(err, string(b))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 Google Inc. All Rights Reserved.\n\/\/ This file is available under the Apache license.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype opcode int\n\nconst (\n\tmatch opcode = iota \/\/ Match a regular expression against input, and set the match register.\n\tcmp \/\/ Compare two values on the stack and set the match register.\n\tjnm \/\/ Jump if no match.\n\tjm \/\/ Jump if match.\n\tinc \/\/ Increment a variable value\n\tstrptime \/\/ Parse into the timestamp register\n\ttimestamp \/\/ Return value of timestamp register\n\tret \/\/ Return, end program successfully\n\tpush \/\/ Push operand onto stack\n\tcapref \/\/ Push capture group reference at operand onto stack\n\tstr \/\/ Push string constant at operand onto stack\n\tset \/\/ Set a variable value\n\tadd \/\/ Add top values on stack and push to stack\n\tsub \/\/ Subtract tpo value from second top value on stack, and push to stack.\n\tmload \/\/ Load metric at operand onto top of stack.\n\tdload \/\/ Pop operand keys and metric off stack and load datum at metric[key] onto stack.\n)\n\nvar opNames = map[opcode]string{\n\tmatch: \"match\",\n\tcmp: \"cmp\",\n\tjnm: \"jnm\",\n\tjm: \"jm\",\n\tinc: \"inc\",\n\tstrptime: \"strptime\",\n\ttimestamp: \"timestamp\",\n\tret: \"ret\",\n\tpush: \"push\",\n\tcapref: \"capref\",\n\tstr: \"str\",\n\tset: \"set\",\n\tadd: \"add\",\n\tsub: \"sub\",\n\tmload: \"mload\",\n\tdload: \"dload\",\n}\n\nvar builtin = map[string]opcode{\n\t\"strptime\": strptime,\n\t\"timestamp\": timestamp,\n}\n\ntype instr struct {\n\top opcode\n\topnd int\n}\n\n\/\/ func (i instr) String() string {\n\/\/ \treturn fmt.Sprintf(\"%s %d\", opNames[i.op], i.opnd)\n\/\/ }\n\ntype thread struct {\n\tpc int \/\/ Program counter.\n\tmatch bool \/\/ Match register.\n\tmatches map[int][]string \/\/ Match result variables.\n\ttime time.Time \/\/ Time register.\n\tstack []interface{} \/\/ Data stack.\n}\n\ntype vm struct {\n\tname string\n\tprog []instr\n\n\tre []*regexp.Regexp \/\/ Regular expression constants\n\tstr []string \/\/ String constants\n\n\tts_mem map[string]time.Time \/\/ memo of time string parse results\n\n\tt thread \/\/ Current thread of execution\n}\n\n\/\/ Push a value onto the stack\nfunc (t *thread) Push(value interface{}) {\n\tt.stack = append(t.stack, value)\n}\n\n\/\/ Pop a value off the stack\nfunc (t *thread) Pop() (value interface{}) {\n\tlast := len(t.stack) - 1\n\tvalue = t.stack[last]\n\tt.stack = t.stack[:last]\n\treturn\n}\n\n\/\/ Log a runtime error and terminate the program\nfunc (v *vm) errorf(format string, args ...interface{}) bool {\n\tlog.Printf(\"Runtime error: \"+format+\"\\n\", args...)\n\tv.t.match = false\n\treturn true\n}\n\nfunc (t *thread) PopInt() (int64, error) {\n\tval := t.Pop()\n\tswitch n := val.(type) {\n\tcase int64:\n\t\treturn n, nil\n\tcase int:\n\t\treturn int64(n), nil\n\tcase string:\n\t\tr, err := strconv.ParseInt(n, 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"conversion of %q to numeric failed: %s\", val, err)\n\t\t}\n\t\treturn r, nil\n\tcase time.Time:\n\t\treturn n.Unix(), nil\n\tcase *Datum:\n\t\treturn n.Value, nil\n\t}\n\treturn 0, fmt.Errorf(\"Unexpected numeric type %T %q\", val, val)\n}\n\n\/\/ Execute acts on the current instruction, and returns a boolean indicating\n\/\/ if the current thread should terminate.\nfunc (v *vm) execute(t *thread, i instr, input string) bool {\n\tswitch i.op {\n\tcase match:\n\t\t\/\/ match regex and store success\n\t\t\/\/ Store the results in the operandth element of the stack,\n\t\t\/\/ where i.opnd == the matched re index\n\t\tt.matches[i.opnd] = v.re[i.opnd].FindStringSubmatch(input)\n\t\tt.match = t.matches[i.opnd] != nil\n\tcase cmp:\n\t\t\/\/ Compare two elements on the stack.\n\t\t\/\/ Set the match register based on the truthiness of the comparison.\n\t\t\/\/ Operand contains the expected result.\n\t\tb, err := t.PopInt()\n\t\tif err != nil {\n\t\t\treturn v.errorf(\"%s\", err)\n\t\t}\n\t\ta, err := t.PopInt()\n\t\tif err != nil {\n\t\t\treturn v.errorf(\"%s\", err)\n\t\t}\n\n\t\tswitch i.opnd {\n\t\tcase -1:\n\t\t\tt.match = a < b\n\t\tcase 0:\n\t\t\tt.match = a == b\n\t\tcase 1:\n\t\t\tt.match = a > b\n\t\t}\n\tcase jnm:\n\t\tif !t.match {\n\t\t\tt.pc = i.opnd\n\t\t\t\/\/ Don't fall to end of loop or pc gets incremented.\n\t\t\treturn false\n\t\t}\n\tcase jm:\n\t\tif t.match {\n\t\t\tt.pc = i.opnd\n\t\t\t\/\/ Don't fall to end of loop or pc gets incremented.\n\t\t\treturn false\n\t\t}\n\tcase inc:\n\t\t\/\/ increment a counter\n\t\tvar delta int64 = 1\n\t\t\/\/ If opnd is nonzero, the delta is on the stack.\n\t\tif i.opnd > 0 {\n\t\t\tvar err error\n\t\t\tdelta, err = t.PopInt()\n\t\t\tif err != nil {\n\t\t\t\treturn v.errorf(\"%s\", err)\n\t\t\t}\n\t\t}\n\t\tswitch d := t.Pop().(type) {\n\t\tcase Incrementable:\n\t\t\td.IncBy(delta, t.time)\n\t\tcase int:\n\t\t\tm := metrics[v.name][d]\n\t\t\tm.IncBy(delta, t.time)\n\t\tdefault:\n\t\t\treturn v.errorf(\"Unexpected type to increment: %T %q\", d, d)\n\t\t}\n\n\tcase set:\n\t\t\/\/ Set a gauge\n\t\tvalue, err := t.PopInt()\n\t\tif err != nil {\n\t\t\treturn v.errorf(\"%s\", err)\n\t\t}\n\n\t\tswitch d := t.Pop().(type) {\n\t\tcase Settable:\n\t\t\td.Set(value, t.time)\n\t\tcase int:\n\t\t\tm := metrics[v.name][d]\n\t\t\tm.Set(value, t.time)\n\t\tdefault:\n\t\t\treturn v.errorf(\"Unexpected type to set: %T %q\", d, d)\n\t\t}\n\n\tcase strptime:\n\t\t\/\/ Parse a time string into the time register\n\t\tlayout := t.Pop().(string)\n\n\t\tvar ts string\n\t\tswitch s := t.Pop().(type) {\n\t\tcase string:\n\t\t\tts = s\n\n\t\tcase int: \/* capref *\/\n\t\t\t\/\/ First find the match storage index on the stack\n\t\t\tre := t.Pop().(int)\n\t\t\t\/\/ Store the result from the re'th index at the s'th index\n\t\t\tts = t.matches[re][s]\n\t\t}\n\t\tif tm, ok := v.ts_mem[ts]; !ok {\n\t\t\ttm, err := time.Parse(layout, ts)\n\t\t\tif err != nil {\n\t\t\t\treturn v.errorf(\"time.Parse(%s, %s) failed: %s\", layout, ts, err)\n\t\t\t}\n\t\t\tv.ts_mem[ts] = tm\n\t\t\tt.time = tm\n\t\t} else {\n\t\t\tt.time = tm\n\t\t}\n\n\tcase timestamp:\n\t\t\/\/ Put the time register onto the stack\n\t\tt.Push(t.time)\n\n\tcase capref:\n\t\t\/\/ Put a capture group reference onto the stack.\n\t\t\/\/ First find the match storage index on the stack,\n\t\tre := t.Pop().(int)\n\t\t\/\/ Push the result from the re'th match at operandth index\n\t\tt.Push(t.matches[re][i.opnd])\n\n\tcase str:\n\t\t\/\/ Put a string constant onto the stack\n\t\tt.Push(v.str[i.opnd])\n\n\tcase ret:\n\t\t\/\/ Exit the virtual machine.\n\t\tt.match = true\n\t\treturn true\n\n\tcase push:\n\t\t\/\/ Push a value onto the stack\n\t\tt.Push(i.opnd)\n\n\tcase add:\n\t\t\/\/ Add two values at TOS, and push result onto stack\n\t\tb, err := t.PopInt()\n\t\tif err != nil {\n\t\t\tv.errorf(\"%s\", err)\n\t\t}\n\t\ta, err := t.PopInt()\n\t\tif err != nil {\n\t\t\tv.errorf(\"%s\", err)\n\t\t}\n\t\tt.Push(a + b)\n\n\tcase sub:\n\t\t\/\/ Subtract two values at TOS, push result onto stack\n\t\tb, err := t.PopInt()\n\t\tif err != nil {\n\t\t\tv.errorf(\"%s\", err)\n\t\t}\n\t\ta, err := t.PopInt()\n\t\tif err != nil {\n\t\t\tv.errorf(\"%s\", err)\n\t\t}\n\t\tt.Push(a - b)\n\n\tcase mload:\n\t\t\/\/ Load a metric at operand onto stack\n\t\tt.Push(metrics[v.name][i.opnd])\n\n\tcase dload:\n\t\t\/\/ Load a datum from metric at TOS onto stack\n\t\tm := t.Pop().(*Metric)\n\t\tvar keys []string\n\t\tfor a := 0; a < i.opnd; a++ {\n\t\t\tkeys = append(keys, t.Pop().(string))\n\t\t}\n\t\th := key_hash(keys)\n\t\tif _, ok := m.Values[h]; !ok {\n\t\t\tm.Values[h] = &Datum{}\n\t\t}\n\t\tt.Push(m.Values[h])\n\n\tdefault:\n\t\treturn v.errorf(\"illegal instruction: %q\", i.op)\n\t}\n\tt.pc++\n\treturn false\n}\n\n\/\/ Run fetches and executes each instruction in the program on the input string\n\/\/ until termination. It returns a boolean indicating a successful action was taken.\nfunc (v *vm) Run(input string) bool {\n\tt := v.t\n\tt.stack = make([]interface{}, 0)\n\tt.matches = make(map[int][]string, 0)\n\tfor {\n\t\tif t.pc >= len(v.prog) {\n\t\t\treturn t.match\n\t\t}\n\t\ti := v.prog[t.pc]\n\t\tterminate := v.execute(&t, i, input)\n\t\tif terminate {\n\t\t\t\/\/ t.match indicates that an action was taken after a match.\n\t\t\treturn t.match\n\t\t}\n\t}\n\tpanic(\"not reached\")\n}\n\nfunc newVm(name string, re []*regexp.Regexp, str []string, prog []instr) *vm {\n\treturn &vm{\n\t\tname: name,\n\t\tre: re,\n\t\tstr: str,\n\t\tprog: prog,\n\t\tts_mem: make(map[string]time.Time, 0),\n\t}\n}\n<commit_msg>Add comment and remove return code of vm execution.<commit_after>\/\/ Copyright 2011 Google Inc. All Rights Reserved.\n\/\/ This file is available under the Apache license.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype opcode int\n\nconst (\n\tmatch opcode = iota \/\/ Match a regular expression against input, and set the match register.\n\tcmp \/\/ Compare two values on the stack and set the match register.\n\tjnm \/\/ Jump if no match.\n\tjm \/\/ Jump if match.\n\tinc \/\/ Increment a variable value\n\tstrptime \/\/ Parse into the timestamp register\n\ttimestamp \/\/ Return value of timestamp register\n\tret \/\/ Return, end program successfully\n\tpush \/\/ Push operand onto stack\n\tcapref \/\/ Push capture group reference at operand onto stack\n\tstr \/\/ Push string constant at operand onto stack\n\tset \/\/ Set a variable value\n\tadd \/\/ Add top values on stack and push to stack\n\tsub \/\/ Subtract tpo value from second top value on stack, and push to stack.\n\tmload \/\/ Load metric at operand onto top of stack.\n\tdload \/\/ Pop operand keys and metric off stack and load datum at metric[key] onto stack.\n)\n\nvar opNames = map[opcode]string{\n\tmatch: \"match\",\n\tcmp: \"cmp\",\n\tjnm: \"jnm\",\n\tjm: \"jm\",\n\tinc: \"inc\",\n\tstrptime: \"strptime\",\n\ttimestamp: \"timestamp\",\n\tret: \"ret\",\n\tpush: \"push\",\n\tcapref: \"capref\",\n\tstr: \"str\",\n\tset: \"set\",\n\tadd: \"add\",\n\tsub: \"sub\",\n\tmload: \"mload\",\n\tdload: \"dload\",\n}\n\nvar builtin = map[string]opcode{\n\t\"strptime\": strptime,\n\t\"timestamp\": timestamp,\n}\n\ntype instr struct {\n\top opcode\n\topnd int\n}\n\n\/\/ func (i instr) String() string {\n\/\/ \treturn fmt.Sprintf(\"%s %d\", opNames[i.op], i.opnd)\n\/\/ }\n\ntype thread struct {\n\tpc int \/\/ Program counter.\n\tmatch bool \/\/ Match register.\n\tmatches map[int][]string \/\/ Match result variables.\n\ttime time.Time \/\/ Time register.\n\tstack []interface{} \/\/ Data stack.\n}\n\ntype vm struct {\n\tname string\n\tprog []instr\n\n\tre []*regexp.Regexp \/\/ Regular expression constants\n\tstr []string \/\/ String constants\n\n\tts_mem map[string]time.Time \/\/ memo of time string parse results\n\n\tt thread \/\/ Current thread of execution\n}\n\n\/\/ Push a value onto the stack\nfunc (t *thread) Push(value interface{}) {\n\tt.stack = append(t.stack, value)\n}\n\n\/\/ Pop a value off the stack\nfunc (t *thread) Pop() (value interface{}) {\n\tlast := len(t.stack) - 1\n\tvalue = t.stack[last]\n\tt.stack = t.stack[:last]\n\treturn\n}\n\n\/\/ Log a runtime error and terminate the program\nfunc (v *vm) errorf(format string, args ...interface{}) bool {\n\tlog.Printf(\"Runtime error: \"+format+\"\\n\", args...)\n\tv.t.match = false\n\treturn true\n}\n\nfunc (t *thread) PopInt() (int64, error) {\n\tval := t.Pop()\n\tswitch n := val.(type) {\n\tcase int64:\n\t\treturn n, nil\n\tcase int:\n\t\treturn int64(n), nil\n\tcase string:\n\t\tr, err := strconv.ParseInt(n, 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"conversion of %q to numeric failed: %s\", val, err)\n\t\t}\n\t\treturn r, nil\n\tcase time.Time:\n\t\treturn n.Unix(), nil\n\tcase *Datum:\n\t\treturn n.Value, nil\n\t}\n\treturn 0, fmt.Errorf(\"Unexpected numeric type %T %q\", val, val)\n}\n\n\/\/ Execute acts on the current instruction, and returns a boolean indicating\n\/\/ if the current thread should terminate.\nfunc (v *vm) execute(t *thread, i instr, input string) bool {\n\tswitch i.op {\n\tcase match:\n\t\t\/\/ match regex and store success\n\t\t\/\/ Store the results in the operandth element of the stack,\n\t\t\/\/ where i.opnd == the matched re index\n\t\tt.matches[i.opnd] = v.re[i.opnd].FindStringSubmatch(input)\n\t\tt.match = t.matches[i.opnd] != nil\n\tcase cmp:\n\t\t\/\/ Compare two elements on the stack.\n\t\t\/\/ Set the match register based on the truthiness of the comparison.\n\t\t\/\/ Operand contains the expected result.\n\t\tb, err := t.PopInt()\n\t\tif err != nil {\n\t\t\treturn v.errorf(\"%s\", err)\n\t\t}\n\t\ta, err := t.PopInt()\n\t\tif err != nil {\n\t\t\treturn v.errorf(\"%s\", err)\n\t\t}\n\n\t\tswitch i.opnd {\n\t\tcase -1:\n\t\t\tt.match = a < b\n\t\tcase 0:\n\t\t\tt.match = a == b\n\t\tcase 1:\n\t\t\tt.match = a > b\n\t\t}\n\tcase jnm:\n\t\tif !t.match {\n\t\t\tt.pc = i.opnd\n\t\t\t\/\/ Don't fall to end of loop or pc gets incremented.\n\t\t\treturn false\n\t\t}\n\tcase jm:\n\t\tif t.match {\n\t\t\tt.pc = i.opnd\n\t\t\t\/\/ Don't fall to end of loop or pc gets incremented.\n\t\t\treturn false\n\t\t}\n\tcase inc:\n\t\t\/\/ increment a counter\n\t\tvar delta int64 = 1\n\t\t\/\/ If opnd is nonzero, the delta is on the stack.\n\t\tif i.opnd > 0 {\n\t\t\tvar err error\n\t\t\tdelta, err = t.PopInt()\n\t\t\tif err != nil {\n\t\t\t\treturn v.errorf(\"%s\", err)\n\t\t\t}\n\t\t}\n\t\tswitch d := t.Pop().(type) {\n\t\tcase Incrementable:\n\t\t\td.IncBy(delta, t.time)\n\t\tcase int:\n\t\t\tm := metrics[v.name][d]\n\t\t\tm.IncBy(delta, t.time)\n\t\tdefault:\n\t\t\treturn v.errorf(\"Unexpected type to increment: %T %q\", d, d)\n\t\t}\n\n\tcase set:\n\t\t\/\/ Set a gauge\n\t\tvalue, err := t.PopInt()\n\t\tif err != nil {\n\t\t\treturn v.errorf(\"%s\", err)\n\t\t}\n\n\t\tswitch d := t.Pop().(type) {\n\t\tcase Settable:\n\t\t\td.Set(value, t.time)\n\t\tcase int:\n\t\t\tm := metrics[v.name][d]\n\t\t\tm.Set(value, t.time)\n\t\tdefault:\n\t\t\treturn v.errorf(\"Unexpected type to set: %T %q\", d, d)\n\t\t}\n\n\tcase strptime:\n\t\t\/\/ Parse a time string into the time register\n\t\tlayout := t.Pop().(string)\n\n\t\tvar ts string\n\t\tswitch s := t.Pop().(type) {\n\t\tcase string:\n\t\t\tts = s\n\n\t\tcase int: \/* capref *\/\n\t\t\t\/\/ First find the match storage index on the stack\n\t\t\tre := t.Pop().(int)\n\t\t\t\/\/ Store the result from the re'th index at the s'th index\n\t\t\tts = t.matches[re][s]\n\t\t}\n\t\tif tm, ok := v.ts_mem[ts]; !ok {\n\t\t\ttm, err := time.Parse(layout, ts)\n\t\t\tif err != nil {\n\t\t\t\treturn v.errorf(\"time.Parse(%s, %s) failed: %s\", layout, ts, err)\n\t\t\t}\n\t\t\tv.ts_mem[ts] = tm\n\t\t\tt.time = tm\n\t\t} else {\n\t\t\tt.time = tm\n\t\t}\n\n\tcase timestamp:\n\t\t\/\/ Put the time register onto the stack\n\t\tt.Push(t.time)\n\n\tcase capref:\n\t\t\/\/ Put a capture group reference onto the stack.\n\t\t\/\/ First find the match storage index on the stack,\n\t\tre := t.Pop().(int)\n\t\t\/\/ Push the result from the re'th match at operandth index\n\t\tt.Push(t.matches[re][i.opnd])\n\n\tcase str:\n\t\t\/\/ Put a string constant onto the stack\n\t\tt.Push(v.str[i.opnd])\n\n\tcase ret:\n\t\t\/\/ Exit the virtual machine.\n\t\tt.match = true\n\t\treturn true\n\n\tcase push:\n\t\t\/\/ Push a value onto the stack\n\t\tt.Push(i.opnd)\n\n\tcase add:\n\t\t\/\/ Add two values at TOS, and push result onto stack\n\t\tb, err := t.PopInt()\n\t\tif err != nil {\n\t\t\tv.errorf(\"%s\", err)\n\t\t}\n\t\ta, err := t.PopInt()\n\t\tif err != nil {\n\t\t\tv.errorf(\"%s\", err)\n\t\t}\n\t\tt.Push(a + b)\n\n\tcase sub:\n\t\t\/\/ Subtract two values at TOS, push result onto stack\n\t\tb, err := t.PopInt()\n\t\tif err != nil {\n\t\t\tv.errorf(\"%s\", err)\n\t\t}\n\t\ta, err := t.PopInt()\n\t\tif err != nil {\n\t\t\tv.errorf(\"%s\", err)\n\t\t}\n\t\tt.Push(a - b)\n\n\tcase mload:\n\t\t\/\/ Load a metric at operand onto stack\n\t\tt.Push(metrics[v.name][i.opnd])\n\n\tcase dload:\n\t\t\/\/ Load a datum from metric at TOS onto stack\n\t\tm := t.Pop().(*Metric)\n\t\tvar keys []string\n\t\tfor a := 0; a < i.opnd; a++ {\n\t\t\tkeys = append(keys, t.Pop().(string))\n\t\t}\n\t\th := key_hash(keys)\n\t\tif _, ok := m.Values[h]; !ok {\n\t\t\tm.Values[h] = &Datum{}\n\t\t}\n\t\tt.Push(m.Values[h])\n\n\tdefault:\n\t\treturn v.errorf(\"illegal instruction: %q\", i.op)\n\t}\n\tt.pc++\n\treturn false\n}\n\n\/\/ Run fetches and executes each instruction in the program on the input string\n\/\/ until termination. It returns a boolean indicating a successful action was taken.\nfunc (v *vm) Run(input string) {\n\tt := v.t\n\tt.stack = make([]interface{}, 0)\n\tt.matches = make(map[int][]string, 0)\n\tfor {\n\t\tif t.pc >= len(v.prog) {\n\t\t\treturn\n\t\t}\n\t\ti := v.prog[t.pc]\n\t\tterminate := v.execute(&t, i, input)\n\t\tif terminate {\n\t\t\treturn\n\t\t}\n\t}\n\tpanic(\"not reached\")\n}\n\nfunc newVm(name string, re []*regexp.Regexp, str []string, prog []instr) *vm {\n\treturn &vm{\n\t\tname: name,\n\t\tre: re,\n\t\tstr: str,\n\t\tprog: prog,\n\t\tts_mem: make(map[string]time.Time, 0),\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 Google Inc. All Rights Reserved.\n\/\/ This file is available under the Apache license.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype opcode int\n\nconst (\n\tmatch opcode = iota \/\/ Match a regular expression against input\n\tjnm \/\/ Jump if no match\n\tinc \/\/ Increment a variable value\n\tstrptime \/\/ Parse into the timestamp register\n\ttimestamp \/\/ Return value of timestamp register\n\tret \/\/ Return, end program successfully\n\tpush \/\/ Push operand onto stack\n\tcapref \/\/ Push capture group reference at operand onto stack\n\tstr \/\/ Push string constant at operand onto stack\n\tset \/\/ Set a variable value\n\tadd \/\/ Add top values on stack and push to stack\n\tsub \/\/ Subtract tpo value from second top value on stack, and push to stack.\n\tmload \/\/ Load metric at operand onto top of stack.\n\tdload \/\/ Pop operand keys and metric off stack and load datum at metric[key] onto stack.\n)\n\nvar opNames = map[opcode]string{\n\tmatch: \"match\",\n\tjnm: \"jnm\",\n\tinc: \"inc\",\n\tstrptime: \"strptime\",\n\ttimestamp: \"timestamp\",\n\tret: \"ret\",\n\tpush: \"push\",\n\tcapref: \"capref\",\n\tstr: \"str\",\n\tset: \"set\",\n\tadd: \"add\",\n\tsub: \"sub\",\n\tmload: \"mload\",\n\tdload: \"dload\",\n}\n\nvar builtin = map[string]opcode{\n\t\"strptime\": strptime,\n\t\"timestamp\": timestamp,\n}\n\ntype instr struct {\n\top opcode\n\topnd int\n}\n\nfunc (i instr) String() string {\n\treturn fmt.Sprintf(\"%s %d\", opNames[i.op], i.opnd)\n}\n\ntype thread struct {\n\tpc int\n\treg int\n\tmatches map[int][]string\n\ttime time.Time\n\tstack []interface{}\n}\n\ntype vm struct {\n\tname string\n\tprog []instr\n\n\t\/\/ const regexps\n\tre []*regexp.Regexp\n\t\/\/ const strings\n\tstr []string\n\n\t\/\/ data segment\n\tsymtab *scope\n\n\tt thread\n}\n\nfunc (t *thread) Push(value interface{}) {\n\tt.stack = append(t.stack, value)\n}\n\nfunc (t *thread) Pop() (value interface{}) {\n\tlast := len(t.stack) - 1\n\tvalue = t.stack[last]\n\tt.stack = t.stack[:last]\n\treturn\n}\n\nfunc (v *vm) errorf(format string, args ...interface{}) bool {\n\tlog.Printf(\"Runtime error: \"+format+\"\\n\", args...)\n\tv.t.reg = 0\n\treturn true\n}\n\n\/\/ Execute acts on the current instruction, and returns a boolean indicating\n\/\/ if the current thread should terminate.\nfunc (v *vm) execute(t *thread, i instr, input string) bool {\n\tswitch i.op {\n\tcase match:\n\t\t\/\/ match regex and store success\n\t\tt.matches[i.opnd] = v.re[i.opnd].FindStringSubmatch(input)\n\t\tif t.matches[i.opnd] != nil {\n\t\t\tt.reg = 1\n\t\t} else {\n\t\t\tt.reg = 0\n\t\t}\n\tcase jnm:\n\t\tif t.reg == 0 {\n\t\t\tt.pc = i.opnd\n\t\t\treturn false\n\t\t}\n\tcase inc:\n\t\t\/\/ increment a counter\n\t\tvar delta int64 = 1\n\t\t\/\/ If opnd is nonzero, the delta is on the stack.\n\t\tif i.opnd > 0 {\n\t\t\tval := t.Pop()\n\t\t\t\/\/ Don't know what type it is on the stack though.\n\t\t\tswitch n := val.(type) {\n\t\t\tcase int:\n\t\t\t\tdelta = int64(n)\n\t\t\tcase int64:\n\t\t\t\tdelta = n\n\t\t\tcase string:\n\t\t\t\tvar err error\n\t\t\t\tdelta, err = strconv.ParseInt(n, 10, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn v.errorf(\"conversion of %q to numeric failed: %s\", val, err)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn v.errorf(\"Unexpected type %T %q\", val, val)\n\t\t\t}\n\t\t}\n\t\tswitch d := t.Pop().(type) {\n\t\tcase Incrementable:\n\t\t\td.IncBy(delta, t.time)\n\t\tcase int:\n\t\t\tm := metrics[d]\n\t\t\tm.IncBy(delta, t.time)\n\t\tdefault:\n\t\t\treturn v.errorf(\"Unexpected type to increment: %T %q\", d, d)\n\t\t}\n\n\tcase set:\n\t\t\/\/ Set a gauge\n\t\tvar value int64\n\t\tval := t.Pop()\n\t\t\/\/ Don't know what type it is on the stack though.\n\t\tswitch n := val.(type) {\n\t\tcase int:\n\t\t\tvalue = int64(n)\n\t\tcase int64:\n\t\t\tvalue = n\n\t\tcase string:\n\t\t\tvar err error\n\t\t\tvalue, err = strconv.ParseInt(n, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn v.errorf(\"conversion of %q to numeric failed: %s\", val, err)\n\t\t\t}\n\t\tcase time.Time:\n\t\t\tvalue = n.Unix()\n\t\tdefault:\n\t\t\treturn v.errorf(\"Unexpected type %T %q\\n\", val, val)\n\t\t}\n\t\tswitch d := t.Pop().(type) {\n\t\tcase Settable:\n\t\t\td.Set(value, t.time)\n\t\tcase int:\n\t\t\tm := metrics[d]\n\t\t\tm.Set(value, t.time)\n\t\tdefault:\n\t\t\treturn v.errorf(\"Unexpected type to set: %T %q\", d, d)\n\t\t}\n\n\tcase strptime:\n\t\tlayout := t.Pop().(string)\n\n\t\tvar ts string\n\t\tswitch s := t.Pop().(type) {\n\t\tcase string:\n\t\t\tts = s\n\n\t\tcase int: \/* capref *\/\n\t\t\tre := t.Pop().(int)\n\t\t\tts = t.matches[re][s]\n\t\t}\n\n\t\ttm, err := time.Parse(layout, ts)\n\t\tif err != nil {\n\t\t\treturn v.errorf(\"time.Parse(%s, %s) failed: %s\", layout, ts, err)\n\t\t}\n\t\tt.time = tm\n\n\tcase timestamp:\n\t\tt.Push(t.time)\n\n\tcase capref:\n\t\t\/\/ Get the regex number from the stack\n\t\tre := t.Pop().(int)\n\t\tt.Push(t.matches[re][i.opnd])\n\n\tcase str:\n\t\tt.Push(v.str[i.opnd])\n\n\tcase ret:\n\t\treturn true\n\n\tcase push:\n\t\tt.Push(i.opnd)\n\n\tcase add:\n\t\tvar a, b int64\n\t\tswitch d := t.Pop().(type) {\n\t\tcase *Datum:\n\t\t\ta = d.Value\n\t\tcase int64:\n\t\t\ta = d\n\t\tcase int:\n\t\t\ta = int64(d)\n\t\tcase time.Time:\n\t\t\ta = d.Unix()\n\t\tdefault:\n\t\t\treturn v.errorf(\"Unexpected type for add %T %q\\n\", d, d)\n\t\t}\n\t\tswitch d := t.Pop().(type) {\n\t\tcase *Datum:\n\t\t\tb = d.Value\n\t\tcase int64:\n\t\t\tb = d\n\t\tcase int:\n\t\t\tb = int64(d)\n\t\tcase time.Time:\n\t\t\tb = d.Unix()\n\t\tdefault:\n\t\t\treturn v.errorf(\"Unexpected type for add %T %q\\n\", d, d)\n\t\t}\n\t\tt.Push(a + b)\n\n\tcase sub:\n\t\tvar a, b int64\n\t\tswitch d := t.Pop().(type) {\n\t\tcase *Datum:\n\t\t\ta = d.Value\n\t\tcase int64:\n\t\t\ta = d\n\t\tcase int:\n\t\t\ta = int64(d)\n\t\tcase time.Time:\n\t\t\ta = d.Unix()\n\t\tdefault:\n\t\t\treturn v.errorf(\"Unexpected type for sub %T %q\\n\", d, d)\n\t\t}\n\t\tswitch d := t.Pop().(type) {\n\t\tcase *Datum:\n\t\t\tb = d.Value\n\t\tcase int64:\n\t\t\tb = d\n\t\tcase int:\n\t\t\tb = int64(d)\n\t\tcase time.Time:\n\t\t\tb = d.Unix()\n\t\tdefault:\n\t\t\treturn v.errorf(\"Unexpected type for sub %T %q\\n\", d, d)\n\t\t}\n\t\tt.Push(b - a)\n\n\tcase mload:\n\t\tt.Push(metrics[i.opnd])\n\n\tcase dload:\n\t\tm := t.Pop().(*Metric)\n\t\tvar keys []string\n\t\tfor a := 0; a < i.opnd; a++ {\n\t\t\tkeys = append(keys, t.Pop().(string))\n\t\t}\n\t\th := key_hash(keys)\n\t\tif _, ok := m.Values[h]; !ok {\n\t\t\tm.Values[h] = &Datum{}\n\t\t}\n\t\tt.Push(m.Values[h])\n\n\tdefault:\n\t\treturn v.errorf(\"illegal instruction: %q\", i.op)\n\t}\n\tt.pc++\n\treturn false\n}\n\n\/\/ Run fetches and executes each instruction in the program on the input string\n\/\/ until termination. It returns a boolean indicating a successful match.\nfunc (v *vm) Run(input string) bool {\n\tt := v.t\n\tt.stack = make([]interface{}, 0)\n\tt.matches = make(map[int][]string, 0)\n\tfor {\n\t\tif t.pc >= len(v.prog) {\n\t\t\treturn false\n\t\t}\n\t\ti := v.prog[t.pc]\n\t\tterminate := v.execute(&t, i, input)\n\t\tif terminate {\n\t\t\t\/\/ t.reg contains the result of the last match.\n\t\t\treturn t.reg == 1\n\t\t}\n\t}\n\tpanic(\"not reached\")\n}\n<commit_msg>Update comments for match and capref instructions.<commit_after>\/\/ Copyright 2011 Google Inc. All Rights Reserved.\n\/\/ This file is available under the Apache license.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype opcode int\n\nconst (\n\tmatch opcode = iota \/\/ Match a regular expression against input\n\tjnm \/\/ Jump if no match\n\tinc \/\/ Increment a variable value\n\tstrptime \/\/ Parse into the timestamp register\n\ttimestamp \/\/ Return value of timestamp register\n\tret \/\/ Return, end program successfully\n\tpush \/\/ Push operand onto stack\n\tcapref \/\/ Push capture group reference at operand onto stack\n\tstr \/\/ Push string constant at operand onto stack\n\tset \/\/ Set a variable value\n\tadd \/\/ Add top values on stack and push to stack\n\tsub \/\/ Subtract tpo value from second top value on stack, and push to stack.\n\tmload \/\/ Load metric at operand onto top of stack.\n\tdload \/\/ Pop operand keys and metric off stack and load datum at metric[key] onto stack.\n)\n\nvar opNames = map[opcode]string{\n\tmatch: \"match\",\n\tjnm: \"jnm\",\n\tinc: \"inc\",\n\tstrptime: \"strptime\",\n\ttimestamp: \"timestamp\",\n\tret: \"ret\",\n\tpush: \"push\",\n\tcapref: \"capref\",\n\tstr: \"str\",\n\tset: \"set\",\n\tadd: \"add\",\n\tsub: \"sub\",\n\tmload: \"mload\",\n\tdload: \"dload\",\n}\n\nvar builtin = map[string]opcode{\n\t\"strptime\": strptime,\n\t\"timestamp\": timestamp,\n}\n\ntype instr struct {\n\top opcode\n\topnd int\n}\n\nfunc (i instr) String() string {\n\treturn fmt.Sprintf(\"%s %d\", opNames[i.op], i.opnd)\n}\n\ntype thread struct {\n\tpc int\n\treg int\n\tmatches map[int][]string\n\ttime time.Time\n\tstack []interface{}\n}\n\ntype vm struct {\n\tname string\n\tprog []instr\n\n\t\/\/ const regexps\n\tre []*regexp.Regexp\n\t\/\/ const strings\n\tstr []string\n\n\t\/\/ data segment\n\tsymtab *scope\n\n\tt thread\n}\n\nfunc (t *thread) Push(value interface{}) {\n\tt.stack = append(t.stack, value)\n}\n\nfunc (t *thread) Pop() (value interface{}) {\n\tlast := len(t.stack) - 1\n\tvalue = t.stack[last]\n\tt.stack = t.stack[:last]\n\treturn\n}\n\nfunc (v *vm) errorf(format string, args ...interface{}) bool {\n\tlog.Printf(\"Runtime error: \"+format+\"\\n\", args...)\n\tv.t.reg = 0\n\treturn true\n}\n\n\/\/ Execute acts on the current instruction, and returns a boolean indicating\n\/\/ if the current thread should terminate.\nfunc (v *vm) execute(t *thread, i instr, input string) bool {\n\tswitch i.op {\n\tcase match:\n\t\t\/\/ match regex and store success\n\t\t\/\/ Store the results in the operandth element of the stack,\n\t\t\/\/ where i.opnd == the matched re index\n\t\tt.matches[i.opnd] = v.re[i.opnd].FindStringSubmatch(input)\n\t\tif t.matches[i.opnd] != nil {\n\t\t\tt.reg = 1\n\t\t} else {\n\t\t\tt.reg = 0\n\t\t}\n\tcase jnm:\n\t\tif t.reg == 0 {\n\t\t\tt.pc = i.opnd\n\t\t\treturn false\n\t\t}\n\tcase inc:\n\t\t\/\/ increment a counter\n\t\tvar delta int64 = 1\n\t\t\/\/ If opnd is nonzero, the delta is on the stack.\n\t\tif i.opnd > 0 {\n\t\t\tval := t.Pop()\n\t\t\t\/\/ Don't know what type it is on the stack though.\n\t\t\tswitch n := val.(type) {\n\t\t\tcase int:\n\t\t\t\tdelta = int64(n)\n\t\t\tcase int64:\n\t\t\t\tdelta = n\n\t\t\tcase string:\n\t\t\t\tvar err error\n\t\t\t\tdelta, err = strconv.ParseInt(n, 10, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn v.errorf(\"conversion of %q to numeric failed: %s\", val, err)\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn v.errorf(\"Unexpected type %T %q\", val, val)\n\t\t\t}\n\t\t}\n\t\tswitch d := t.Pop().(type) {\n\t\tcase Incrementable:\n\t\t\td.IncBy(delta, t.time)\n\t\tcase int:\n\t\t\tm := metrics[d]\n\t\t\tm.IncBy(delta, t.time)\n\t\tdefault:\n\t\t\treturn v.errorf(\"Unexpected type to increment: %T %q\", d, d)\n\t\t}\n\n\tcase set:\n\t\t\/\/ Set a gauge\n\t\tvar value int64\n\t\tval := t.Pop()\n\t\t\/\/ Don't know what type it is on the stack though.\n\t\tswitch n := val.(type) {\n\t\tcase int:\n\t\t\tvalue = int64(n)\n\t\tcase int64:\n\t\t\tvalue = n\n\t\tcase string:\n\t\t\tvar err error\n\t\t\tvalue, err = strconv.ParseInt(n, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn v.errorf(\"conversion of %q to numeric failed: %s\", val, err)\n\t\t\t}\n\t\tcase time.Time:\n\t\t\tvalue = n.Unix()\n\t\tdefault:\n\t\t\treturn v.errorf(\"Unexpected type %T %q\\n\", val, val)\n\t\t}\n\t\tswitch d := t.Pop().(type) {\n\t\tcase Settable:\n\t\t\td.Set(value, t.time)\n\t\tcase int:\n\t\t\tm := metrics[d]\n\t\t\tm.Set(value, t.time)\n\t\tdefault:\n\t\t\treturn v.errorf(\"Unexpected type to set: %T %q\", d, d)\n\t\t}\n\n\tcase strptime:\n\t\tlayout := t.Pop().(string)\n\n\t\tvar ts string\n\t\tswitch s := t.Pop().(type) {\n\t\tcase string:\n\t\t\tts = s\n\n\t\tcase int: \/* capref *\/\n\t\t\t\/\/ First find the match storage index on the stack\n\t\t\tre := t.Pop().(int)\n\t\t\t\/\/ Store the result from the re'th index at the s'th index\n\t\t\tts = t.matches[re][s]\n\t\t}\n\n\t\ttm, err := time.Parse(layout, ts)\n\t\tif err != nil {\n\t\t\treturn v.errorf(\"time.Parse(%s, %s) failed: %s\", layout, ts, err)\n\t\t}\n\t\tt.time = tm\n\n\tcase timestamp:\n\t\tt.Push(t.time)\n\n\tcase capref:\n\t\t\/\/ Find the match storage index on the stack,\n\t\tre := t.Pop().(int)\n\t\t\/\/ Push the result from the re'th match at operandth index\n\t\tt.Push(t.matches[re][i.opnd])\n\n\tcase str:\n\t\tt.Push(v.str[i.opnd])\n\n\tcase ret:\n\t\treturn true\n\n\tcase push:\n\t\tt.Push(i.opnd)\n\n\tcase add:\n\t\tvar a, b int64\n\t\tswitch d := t.Pop().(type) {\n\t\tcase *Datum:\n\t\t\ta = d.Value\n\t\tcase int64:\n\t\t\ta = d\n\t\tcase int:\n\t\t\ta = int64(d)\n\t\tcase time.Time:\n\t\t\ta = d.Unix()\n\t\tdefault:\n\t\t\treturn v.errorf(\"Unexpected type for add %T %q\\n\", d, d)\n\t\t}\n\t\tswitch d := t.Pop().(type) {\n\t\tcase *Datum:\n\t\t\tb = d.Value\n\t\tcase int64:\n\t\t\tb = d\n\t\tcase int:\n\t\t\tb = int64(d)\n\t\tcase time.Time:\n\t\t\tb = d.Unix()\n\t\tdefault:\n\t\t\treturn v.errorf(\"Unexpected type for add %T %q\\n\", d, d)\n\t\t}\n\t\tt.Push(a + b)\n\n\tcase sub:\n\t\tvar a, b int64\n\t\tswitch d := t.Pop().(type) {\n\t\tcase *Datum:\n\t\t\ta = d.Value\n\t\tcase int64:\n\t\t\ta = d\n\t\tcase int:\n\t\t\ta = int64(d)\n\t\tcase time.Time:\n\t\t\ta = d.Unix()\n\t\tdefault:\n\t\t\treturn v.errorf(\"Unexpected type for sub %T %q\\n\", d, d)\n\t\t}\n\t\tswitch d := t.Pop().(type) {\n\t\tcase *Datum:\n\t\t\tb = d.Value\n\t\tcase int64:\n\t\t\tb = d\n\t\tcase int:\n\t\t\tb = int64(d)\n\t\tcase time.Time:\n\t\t\tb = d.Unix()\n\t\tdefault:\n\t\t\treturn v.errorf(\"Unexpected type for sub %T %q\\n\", d, d)\n\t\t}\n\t\tt.Push(b - a)\n\n\tcase mload:\n\t\tt.Push(metrics[i.opnd])\n\n\tcase dload:\n\t\tm := t.Pop().(*Metric)\n\t\tvar keys []string\n\t\tfor a := 0; a < i.opnd; a++ {\n\t\t\tkeys = append(keys, t.Pop().(string))\n\t\t}\n\t\th := key_hash(keys)\n\t\tif _, ok := m.Values[h]; !ok {\n\t\t\tm.Values[h] = &Datum{}\n\t\t}\n\t\tt.Push(m.Values[h])\n\n\tdefault:\n\t\treturn v.errorf(\"illegal instruction: %q\", i.op)\n\t}\n\tt.pc++\n\treturn false\n}\n\n\/\/ Run fetches and executes each instruction in the program on the input string\n\/\/ until termination. It returns a boolean indicating a successful match.\nfunc (v *vm) Run(input string) bool {\n\tt := v.t\n\tt.stack = make([]interface{}, 0)\n\tt.matches = make(map[int][]string, 0)\n\tfor {\n\t\tif t.pc >= len(v.prog) {\n\t\t\treturn false\n\t\t}\n\t\ti := v.prog[t.pc]\n\t\tterminate := v.execute(&t, i, input)\n\t\tif terminate {\n\t\t\t\/\/ t.reg contains the result of the last match.\n\t\t\treturn t.reg == 1\n\t\t}\n\t}\n\tpanic(\"not reached\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build !windows\n\npackage graphdriver\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"syscall\"\n\n\t\"github.com\/containers\/storage\/pkg\/idtools\"\n)\n\nfunc platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {\n\tsysinfo := info.Sys()\n\tif st, ok := sysinfo.(*syscall.Stat_t); ok {\n\t\t\/\/ Map an on-disk UID\/GID pair from host to container\n\t\t\/\/ using the first map, then back to the host using the\n\t\t\/\/ second map. Skip that first step if they're 0, to\n\t\t\/\/ compensate for cases where a parent layer should\n\t\t\/\/ have had a mapped value, but didn't.\n\t\tuid, gid := int(st.Uid), int(st.Gid)\n\t\tif toContainer != nil {\n\t\t\tpair := idtools.IDPair{\n\t\t\t\tUID: uid,\n\t\t\t\tGID: gid,\n\t\t\t}\n\t\t\tmappedUid, mappedGid, err := toContainer.ToContainer(pair)\n\t\t\tif err != nil {\n\t\t\t\tif (uid != 0) || (gid != 0) {\n\t\t\t\t\treturn fmt.Errorf(\"error mapping host ID pair %#v for %q to container: %v\", pair, path, err)\n\t\t\t\t}\n\t\t\t\tmappedUid, mappedGid = uid, gid\n\t\t\t}\n\t\t\tuid, gid = mappedUid, mappedGid\n\t\t}\n\t\tif toHost != nil {\n\t\t\tpair := idtools.IDPair{\n\t\t\t\tUID: uid,\n\t\t\t\tGID: gid,\n\t\t\t}\n\t\t\tmappedPair, err := toHost.ToHost(pair)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error mapping container ID pair %#v for %q to host: %v\", pair, path, err)\n\t\t\t}\n\t\t\tuid, gid = mappedPair.UID, mappedPair.GID\n\t\t}\n\t\tif uid != int(st.Uid) || gid != int(st.Gid) {\n\t\t\tstat, err := os.Lstat(path)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"%s: lstat(%q): %v\", os.Args[0], path, err)\n\t\t\t}\n\t\t\t\/\/ Make the change.\n\t\t\tif err := syscall.Lchown(path, uid, gid); err != nil {\n\t\t\t\treturn fmt.Errorf(\"%s: chown(%q): %v\", os.Args[0], path, err)\n\t\t\t}\n\t\t\t\/\/ Restore the SUID and SGID bits if they were originally set.\n\t\t\tif (stat.Mode()&os.ModeSymlink == 0) && stat.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 {\n\t\t\t\tif err := os.Chmod(path, stat.Mode()); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"%s: chmod(%q): %v\", os.Args[0], path, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Need to preserve the Xattr security.capabilty when chowning.<commit_after>\/\/ +build !windows\n\npackage graphdriver\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"syscall\"\n\n\t\"github.com\/containers\/storage\/pkg\/idtools\"\n\t\"github.com\/containers\/storage\/pkg\/system\"\n)\n\nfunc platformLChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error {\n\tsysinfo := info.Sys()\n\tif st, ok := sysinfo.(*syscall.Stat_t); ok {\n\t\t\/\/ Map an on-disk UID\/GID pair from host to container\n\t\t\/\/ using the first map, then back to the host using the\n\t\t\/\/ second map. Skip that first step if they're 0, to\n\t\t\/\/ compensate for cases where a parent layer should\n\t\t\/\/ have had a mapped value, but didn't.\n\t\tuid, gid := int(st.Uid), int(st.Gid)\n\t\tif toContainer != nil {\n\t\t\tpair := idtools.IDPair{\n\t\t\t\tUID: uid,\n\t\t\t\tGID: gid,\n\t\t\t}\n\t\t\tmappedUid, mappedGid, err := toContainer.ToContainer(pair)\n\t\t\tif err != nil {\n\t\t\t\tif (uid != 0) || (gid != 0) {\n\t\t\t\t\treturn fmt.Errorf(\"error mapping host ID pair %#v for %q to container: %v\", pair, path, err)\n\t\t\t\t}\n\t\t\t\tmappedUid, mappedGid = uid, gid\n\t\t\t}\n\t\t\tuid, gid = mappedUid, mappedGid\n\t\t}\n\t\tif toHost != nil {\n\t\t\tpair := idtools.IDPair{\n\t\t\t\tUID: uid,\n\t\t\t\tGID: gid,\n\t\t\t}\n\t\t\tmappedPair, err := toHost.ToHost(pair)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"error mapping container ID pair %#v for %q to host: %v\", pair, path, err)\n\t\t\t}\n\t\t\tuid, gid = mappedPair.UID, mappedPair.GID\n\t\t}\n\t\tif uid != int(st.Uid) || gid != int(st.Gid) {\n\t\t\tstat, err := os.Lstat(path)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"%s: lstat(%q): %v\", os.Args[0], path, err)\n\t\t\t}\n\t\t\tcap, err := system.Lgetxattr(path, \"security.capability\")\n\t\t\tif err != nil && err != system.ErrNotSupportedPlatform {\n\t\t\t\treturn fmt.Errorf(\"%s: Lgetxattr(%q): %v\", os.Args[0], path, err)\n\t\t\t}\n\n\t\t\t\/\/ Make the change.\n\t\t\tif err := syscall.Lchown(path, uid, gid); err != nil {\n\t\t\t\treturn fmt.Errorf(\"%s: chown(%q): %v\", os.Args[0], path, err)\n\t\t\t}\n\t\t\t\/\/ Restore the SUID and SGID bits if they were originally set.\n\t\t\tif (stat.Mode()&os.ModeSymlink == 0) && stat.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 {\n\t\t\t\tif err := os.Chmod(path, stat.Mode()); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"%s: chmod(%q): %v\", os.Args[0], path, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif cap != nil {\n\t\t\t\tif err := system.Lsetxattr(path, \"security.capability\", cap, 0); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"%s: Lsetxattr(%q): %v\", os.Args[0], path, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ Copyright 2016 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/parser\"\n\t\"go\/token\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n)\n\nfunc transpile(out io.Writer, in io.Reader, debug io.Writer) error {\n\tfset := token.NewFileSet()\n\tf, err := parser.ParseFile(fset, \"sketch.go\", in, 0)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to parse file: %v\", err)\n\t}\n\n\tif debug != nil {\n\t\tast.Fprint(debug, fset, f, nil)\n\t}\n\n\tfor _, d := range f.Decls {\n\t\tif err := handleDecl(out, d); err != nil {\n\t\t\treturn fmt.Errorf(\"error handling decl %v: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc handleDecl(out io.Writer, d ast.Decl) error {\n\tswitch decl := d.(type) {\n\tcase *ast.GenDecl:\n\t\treturn handleGenDecl(out, decl)\n\tcase *ast.FuncDecl:\n\t\treturn handleFuncDecl(out, decl)\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported decl: %v\", d)\n\t}\n}\n\nfunc handleGenDecl(out io.Writer, gd *ast.GenDecl) error {\n\tfor _, s := range gd.Specs {\n\t\tvs, ok := s.(*ast.ValueSpec)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unsupported spec: %v\", s)\n\t\t}\n\t\tif len(vs.Names) > 1 {\n\t\t\treturn fmt.Errorf(\"unsupported # of value names: %v\", vs.Names)\n\t\t}\n\t\tdecl := []string{}\n\t\tif vs.Names[0].Obj.Kind == ast.Con {\n\t\t\tdecl = append(decl, \"const\")\n\t\t}\n\t\tif len(vs.Values) > 1 {\n\t\t\treturn fmt.Errorf(\"unsupported # of values: %v\", vs.Names)\n\t\t}\n\t\tl, ok := vs.Values[0].(*ast.BasicLit)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unsupported value: %v\", vs.Values[0])\n\t\t}\n\t\tif l.Kind != token.INT {\n\t\t\treturn fmt.Errorf(\"unsupported literal kind: %v\", l.Kind)\n\t\t}\n\t\tdecl = append(decl, \"int\", vs.Names[0].Name, \"=\", l.Value)\n\t\tfmt.Fprintf(out, \"%s;\\n\", strings.Join(decl, \" \"))\n\t}\n\treturn nil\n}\n\nfunc handleFuncDecl(out io.Writer, fd *ast.FuncDecl) error {\n\tif fd.Type.Results != nil {\n\t\treturn fmt.Errorf(\"unsupported return type: %v\", fd.Type.Results)\n\t}\n\tif fd.Type.Params.List != nil {\n\t\treturn fmt.Errorf(\"unsupported param type: %v\", fd.Type.Params)\n\t}\n\tfmt.Fprintf(out, \"void %s() {\\n\", fd.Name)\n\tif err := handleBlockStmt(out, fd.Body); err != nil {\n\t\treturn fmt.Errorf(\"error handling block statement for %q: %v\", fd.Name, err)\n\t}\n\tfmt.Fprintln(out, \"}\")\n\treturn nil\n}\n\nfunc handleBlockStmt(out io.Writer, bs *ast.BlockStmt) error {\n\tfor _, s := range bs.List {\n\t\tswitch st := s.(type) {\n\t\tcase *ast.ExprStmt:\n\t\t\tc, ok := st.X.(*ast.CallExpr)\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"unsupported expr: %v\", st.X)\n\t\t\t}\n\t\t\tfmt.Fprintf(out, \" \")\n\t\t\tif err := handleCallExpr(out, c); err != nil {\n\t\t\t\treturn err\n\n\t\t\t}\n\t\tcase *ast.AssignStmt:\n\t\t\tif len(st.Lhs) > 1 {\n\t\t\t\treturn fmt.Errorf(\"unsupported # of lhs exprs: %v\", st.Lhs)\n\n\t\t\t}\n\t\t\tif err := handleExpr(out, st.Lhs[0]); err != nil {\n\t\t\t\treturn fmt.Errorf(\"error handling left expr: %v\", st.Lhs[0])\n\t\t\t}\n\t\t\tfmt.Fprintf(out, \"=\")\n\t\t\tif len(st.Rhs) > 1 {\n\t\t\t\treturn fmt.Errorf(\"unsupported # of rhs exprs: %v\", st.Rhs)\n\n\t\t\t}\n\t\t\tif err := handleExpr(out, st.Rhs[0]); err != nil {\n\t\t\t\treturn fmt.Errorf(\"error handling right expr: %v\", st.Rhs[0])\n\t\t\t}\n\t\tcase *ast.IfStmt:\n\t\t\tfmt.Fprintf(out, \"if (\")\n\t\t\tif err := handleExpr(out, st.Cond); err != nil {\n\t\t\t\treturn fmt.Errorf(\"error handling if block conditionx: %v\", err)\n\t\t\t}\n\t\t\tfmt.Fprint(out, \") {\\n\")\n\t\t\tif err := handleBlockStmt(out, st.Body); err != nil {\n\t\t\t\treturn fmt.Errorf(\"error handling if block statements: %v\", err)\n\t\t\t}\n\t\t\tfmt.Fprintf(out, \"}\")\n\t\t\tif st.Else != nil {\n\t\t\t\tbs, ok := st.Else.(*ast.BlockStmt)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Errorf(\"unsupported statement: %v\", st.Else)\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(out, \" else {\\n\")\n\t\t\t\tif err := handleBlockStmt(out, bs); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"error handling else block statements: %v\", err)\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(out, \"}\")\n\t\t\t}\n\t\t\tfmt.Fprintln(out)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unsupported statement: %v\", s)\n\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc handleCallExpr(out io.Writer, c *ast.CallExpr) error {\n\tfuncName, ok := c.Fun.(*ast.Ident)\n\tif !ok {\n\t\treturn fmt.Errorf(\"unsupported func expr: %v\", c.Fun)\n\t}\n\targs := []string{}\n\tfor _, a := range c.Args {\n\t\tvar buf bytes.Buffer\n\t\tif err := handleExpr(&buf, a); err != nil {\n\t\t\treturn fmt.Errorf(\"error handling func arg expr %v: %v\", a, err)\n\t\t}\n\t\targs = append(args, buf.String())\n\t}\n\tfmt.Fprintf(out, \"%s(%s);\\n\", funcName, strings.Join(args, \", \"))\n\treturn nil\n}\n\nfunc handleBinaryExpr(out io.Writer, be *ast.BinaryExpr) error {\n\tif err := handleExpr(out, be.X); err != nil {\n\t\treturn fmt.Errorf(\"error handling left part %v of binary expr: %v\", be.X, err)\n\t}\n\tfmt.Fprint(out, be.Op)\n\tif err := handleExpr(out, be.Y); err != nil {\n\t\treturn fmt.Errorf(\"error handling right part %v of binary expr: %v\", be.Y, err)\n\t}\n\treturn nil\n}\n\nfunc handleIdent(out io.Writer, ident *ast.Ident) error {\n\tfmt.Fprintf(out, ident.Name)\n\treturn nil\n}\n\nfunc handleBasicLit(out io.Writer, lit *ast.BasicLit) error {\n\tfmt.Fprintf(out, lit.Value)\n\treturn nil\n}\n\nfunc handleExpr(out io.Writer, e ast.Expr) error {\n\tswitch expr := e.(type) {\n\tcase *ast.CallExpr:\n\t\treturn handleCallExpr(out, expr)\n\tcase *ast.BinaryExpr:\n\t\treturn handleBinaryExpr(out, expr)\n\tcase *ast.Ident:\n\t\treturn handleIdent(out, expr)\n\tcase *ast.BasicLit:\n\t\treturn handleBasicLit(out, expr)\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported expr: %v\", e)\n\t}\n}\n\nfunc main() {\n\tif err := transpile(os.Stdout, os.Stdin, os.Stderr); err != nil {\n\t\tlog.Fatalf(\"failed to transpile: %v\", err)\n\t}\n}\n<commit_msg>fix ident<commit_after>\/\/\n\/\/ Copyright 2016 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/parser\"\n\t\"go\/token\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n)\n\nfunc transpile(out io.Writer, in io.Reader, debug io.Writer) error {\n\tfset := token.NewFileSet()\n\tf, err := parser.ParseFile(fset, \"sketch.go\", in, 0)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to parse file: %v\", err)\n\t}\n\n\tif debug != nil {\n\t\tast.Fprint(debug, fset, f, nil)\n\t}\n\n\tfor _, d := range f.Decls {\n\t\tif err := handleDecl(out, d); err != nil {\n\t\t\treturn fmt.Errorf(\"error handling decl %v: %v\", d, err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc handleDecl(out io.Writer, d ast.Decl) error {\n\tswitch decl := d.(type) {\n\tcase *ast.GenDecl:\n\t\treturn handleGenDecl(out, decl)\n\tcase *ast.FuncDecl:\n\t\treturn handleFuncDecl(out, decl)\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported decl: %v\", d)\n\t}\n}\n\nfunc handleGenDecl(out io.Writer, gd *ast.GenDecl) error {\n\tfor _, s := range gd.Specs {\n\t\tvs, ok := s.(*ast.ValueSpec)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unsupported spec: %v\", s)\n\t\t}\n\t\tif len(vs.Names) > 1 {\n\t\t\treturn fmt.Errorf(\"unsupported # of value names: %v\", vs.Names)\n\t\t}\n\t\tdecl := []string{}\n\t\tif vs.Names[0].Obj.Kind == ast.Con {\n\t\t\tdecl = append(decl, \"const\")\n\t\t}\n\t\tif len(vs.Values) > 1 {\n\t\t\treturn fmt.Errorf(\"unsupported # of values: %v\", vs.Names)\n\t\t}\n\t\tl, ok := vs.Values[0].(*ast.BasicLit)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"unsupported value: %v\", vs.Values[0])\n\t\t}\n\t\tif l.Kind != token.INT {\n\t\t\treturn fmt.Errorf(\"unsupported literal kind: %v\", l.Kind)\n\t\t}\n\t\tdecl = append(decl, \"int\", vs.Names[0].Name, \"=\", l.Value)\n\t\tfmt.Fprintf(out, \"%s;\\n\", strings.Join(decl, \" \"))\n\t}\n\treturn nil\n}\n\nfunc handleFuncDecl(out io.Writer, fd *ast.FuncDecl) error {\n\tif fd.Type.Results != nil {\n\t\treturn fmt.Errorf(\"unsupported return type: %v\", fd.Type.Results)\n\t}\n\tif fd.Type.Params.List != nil {\n\t\treturn fmt.Errorf(\"unsupported param type: %v\", fd.Type.Params)\n\t}\n\tfmt.Fprintf(out, \"void %s() {\\n\", fd.Name)\n\tif err := handleBlockStmt(out, fd.Body); err != nil {\n\t\treturn fmt.Errorf(\"error handling block statement for %q: %v\", fd.Name, err)\n\t}\n\tfmt.Fprintln(out, \"}\")\n\treturn nil\n}\n\nfunc handleBlockStmt(out io.Writer, bs *ast.BlockStmt) error {\n\tfor _, s := range bs.List {\n\t\tfmt.Fprintf(out, \" \")\n\t\tswitch st := s.(type) {\n\t\tcase *ast.ExprStmt:\n\t\t\tc, ok := st.X.(*ast.CallExpr)\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"unsupported expr: %v\", st.X)\n\t\t\t}\n\t\t\tif err := handleCallExpr(out, c); err != nil {\n\t\t\t\treturn err\n\n\t\t\t}\n\t\tcase *ast.AssignStmt:\n\t\t\tif len(st.Lhs) > 1 {\n\t\t\t\treturn fmt.Errorf(\"unsupported # of lhs exprs: %v\", st.Lhs)\n\n\t\t\t}\n\t\t\tif err := handleExpr(out, st.Lhs[0]); err != nil {\n\t\t\t\treturn fmt.Errorf(\"error handling left expr: %v\", st.Lhs[0])\n\t\t\t}\n\t\t\tfmt.Fprintf(out, \"=\")\n\t\t\tif len(st.Rhs) > 1 {\n\t\t\t\treturn fmt.Errorf(\"unsupported # of rhs exprs: %v\", st.Rhs)\n\n\t\t\t}\n\t\t\tif err := handleExpr(out, st.Rhs[0]); err != nil {\n\t\t\t\treturn fmt.Errorf(\"error handling right expr: %v\", st.Rhs[0])\n\t\t\t}\n\t\tcase *ast.IfStmt:\n\t\t\tfmt.Fprintf(out, \"if (\")\n\t\t\tif err := handleExpr(out, st.Cond); err != nil {\n\t\t\t\treturn fmt.Errorf(\"error handling if block conditionx: %v\", err)\n\t\t\t}\n\t\t\tfmt.Fprint(out, \") {\\n\")\n\t\t\tif err := handleBlockStmt(out, st.Body); err != nil {\n\t\t\t\treturn fmt.Errorf(\"error handling if block statements: %v\", err)\n\t\t\t}\n\t\t\tfmt.Fprintf(out, \"}\")\n\t\t\tif st.Else != nil {\n\t\t\t\tbs, ok := st.Else.(*ast.BlockStmt)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn fmt.Errorf(\"unsupported statement: %v\", st.Else)\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(out, \" else {\\n\")\n\t\t\t\tif err := handleBlockStmt(out, bs); err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"error handling else block statements: %v\", err)\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(out, \"}\")\n\t\t\t}\n\t\t\tfmt.Fprintln(out)\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unsupported statement: %v\", s)\n\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc handleCallExpr(out io.Writer, c *ast.CallExpr) error {\n\tfuncName, ok := c.Fun.(*ast.Ident)\n\tif !ok {\n\t\treturn fmt.Errorf(\"unsupported func expr: %v\", c.Fun)\n\t}\n\targs := []string{}\n\tfor _, a := range c.Args {\n\t\tvar buf bytes.Buffer\n\t\tif err := handleExpr(&buf, a); err != nil {\n\t\t\treturn fmt.Errorf(\"error handling func arg expr %v: %v\", a, err)\n\t\t}\n\t\targs = append(args, buf.String())\n\t}\n\tfmt.Fprintf(out, \"%s(%s);\\n\", funcName, strings.Join(args, \", \"))\n\treturn nil\n}\n\nfunc handleBinaryExpr(out io.Writer, be *ast.BinaryExpr) error {\n\tif err := handleExpr(out, be.X); err != nil {\n\t\treturn fmt.Errorf(\"error handling left part %v of binary expr: %v\", be.X, err)\n\t}\n\tfmt.Fprint(out, be.Op)\n\tif err := handleExpr(out, be.Y); err != nil {\n\t\treturn fmt.Errorf(\"error handling right part %v of binary expr: %v\", be.Y, err)\n\t}\n\treturn nil\n}\n\nfunc handleIdent(out io.Writer, ident *ast.Ident) error {\n\tfmt.Fprintf(out, ident.Name)\n\treturn nil\n}\n\nfunc handleBasicLit(out io.Writer, lit *ast.BasicLit) error {\n\tfmt.Fprintf(out, lit.Value)\n\treturn nil\n}\n\nfunc handleExpr(out io.Writer, e ast.Expr) error {\n\tswitch expr := e.(type) {\n\tcase *ast.CallExpr:\n\t\treturn handleCallExpr(out, expr)\n\tcase *ast.BinaryExpr:\n\t\treturn handleBinaryExpr(out, expr)\n\tcase *ast.Ident:\n\t\treturn handleIdent(out, expr)\n\tcase *ast.BasicLit:\n\t\treturn handleBasicLit(out, expr)\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported expr: %v\", e)\n\t}\n}\n\nfunc main() {\n\tif err := transpile(os.Stdout, os.Stdin, os.Stderr); err != nil {\n\t\tlog.Fatalf(\"failed to transpile: %v\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package manifestlist\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com\/docker\/distribution\"\n\t\"github.com\/docker\/distribution\/digest\"\n\t\"github.com\/docker\/distribution\/manifest\"\n)\n\n\/\/ MediaTypeManifestList specifies the mediaType for manifest lists.\nconst MediaTypeManifestList = \"application\/vnd.docker.distribution.manifest.list.v2+json\"\n\n\/\/ SchemaVersion provides a pre-initialized version structure for this\n\/\/ packages version of the manifest.\nvar SchemaVersion = manifest.Versioned{\n\tSchemaVersion: 2,\n\tMediaType: MediaTypeManifestList,\n}\n\nfunc init() {\n\t\/\/ FIXME: Do not registry the manifest list schema as the manifest lists are\n\t\/\/ not supported by OpenShift and fetching the manifest list will result in\n\t\/\/ the import failure. If we return here, it means the docker client will ask\n\t\/\/ for the schema2 instead.\n\treturn\n\n\tmanifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {\n\t\tm := new(DeserializedManifestList)\n\t\terr := m.UnmarshalJSON(b)\n\t\tif err != nil {\n\t\t\treturn nil, distribution.Descriptor{}, err\n\t\t}\n\n\t\tdgst := digest.FromBytes(b)\n\t\treturn m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err\n\t}\n\terr := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Unable to register manifest: %s\", err))\n\t}\n}\n\n\/\/ PlatformSpec specifies a platform where a particular image manifest is\n\/\/ applicable.\ntype PlatformSpec struct {\n\t\/\/ Architecture field specifies the CPU architecture, for example\n\t\/\/ `amd64` or `ppc64`.\n\tArchitecture string `json:\"architecture\"`\n\n\t\/\/ OS specifies the operating system, for example `linux` or `windows`.\n\tOS string `json:\"os\"`\n\n\t\/\/ OSVersion is an optional field specifying the operating system\n\t\/\/ version, for example `10.0.10586`.\n\tOSVersion string `json:\"os.version,omitempty\"`\n\n\t\/\/ OSFeatures is an optional field specifying an array of strings,\n\t\/\/ each listing a required OS feature (for example on Windows `win32k`).\n\tOSFeatures []string `json:\"os.features,omitempty\"`\n\n\t\/\/ Variant is an optional field specifying a variant of the CPU, for\n\t\/\/ example `ppc64le` to specify a little-endian version of a PowerPC CPU.\n\tVariant string `json:\"variant,omitempty\"`\n\n\t\/\/ Features is an optional field specifying an array of strings, each\n\t\/\/ listing a required CPU feature (for example `sse4` or `aes`).\n\tFeatures []string `json:\"features,omitempty\"`\n}\n\n\/\/ A ManifestDescriptor references a platform-specific manifest.\ntype ManifestDescriptor struct {\n\tdistribution.Descriptor\n\n\t\/\/ Platform specifies which platform the manifest pointed to by the\n\t\/\/ descriptor runs on.\n\tPlatform PlatformSpec `json:\"platform\"`\n}\n\n\/\/ ManifestList references manifests for various platforms.\ntype ManifestList struct {\n\tmanifest.Versioned\n\n\t\/\/ Config references the image configuration as a blob.\n\tManifests []ManifestDescriptor `json:\"manifests\"`\n}\n\n\/\/ References returnes the distribution descriptors for the referenced image\n\/\/ manifests.\nfunc (m ManifestList) References() []distribution.Descriptor {\n\tdependencies := make([]distribution.Descriptor, len(m.Manifests))\n\tfor i := range m.Manifests {\n\t\tdependencies[i] = m.Manifests[i].Descriptor\n\t}\n\n\treturn dependencies\n}\n\n\/\/ DeserializedManifestList wraps ManifestList with a copy of the original\n\/\/ JSON.\ntype DeserializedManifestList struct {\n\tManifestList\n\n\t\/\/ canonical is the canonical byte representation of the Manifest.\n\tcanonical []byte\n}\n\n\/\/ FromDescriptors takes a slice of descriptors, and returns a\n\/\/ DeserializedManifestList which contains the resulting manifest list\n\/\/ and its JSON representation.\nfunc FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) {\n\tm := ManifestList{\n\t\tVersioned: SchemaVersion,\n\t}\n\n\tm.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors))\n\tcopy(m.Manifests, descriptors)\n\n\tdeserialized := DeserializedManifestList{\n\t\tManifestList: m,\n\t}\n\n\tvar err error\n\tdeserialized.canonical, err = json.MarshalIndent(&m, \"\", \" \")\n\treturn &deserialized, err\n}\n\n\/\/ UnmarshalJSON populates a new ManifestList struct from JSON data.\nfunc (m *DeserializedManifestList) UnmarshalJSON(b []byte) error {\n\tm.canonical = make([]byte, len(b), len(b))\n\t\/\/ store manifest list in canonical\n\tcopy(m.canonical, b)\n\n\t\/\/ Unmarshal canonical JSON into ManifestList object\n\tvar manifestList ManifestList\n\tif err := json.Unmarshal(m.canonical, &manifestList); err != nil {\n\t\treturn err\n\t}\n\n\tm.ManifestList = manifestList\n\n\treturn nil\n}\n\n\/\/ MarshalJSON returns the contents of canonical. If canonical is empty,\n\/\/ marshals the inner contents.\nfunc (m *DeserializedManifestList) MarshalJSON() ([]byte, error) {\n\tif len(m.canonical) > 0 {\n\t\treturn m.canonical, nil\n\t}\n\n\treturn nil, errors.New(\"JSON representation not initialized in DeserializedManifestList\")\n}\n\n\/\/ Payload returns the raw content of the manifest list. The contents can be\n\/\/ used to calculate the content identifier.\nfunc (m DeserializedManifestList) Payload() (string, []byte, error) {\n\treturn m.MediaType, m.canonical, nil\n}\n<commit_msg>UPSTREAM: docker\/distribution: <carry>: Revert \"disable manifest list registration\"<commit_after>package manifestlist\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com\/docker\/distribution\"\n\t\"github.com\/docker\/distribution\/digest\"\n\t\"github.com\/docker\/distribution\/manifest\"\n)\n\n\/\/ MediaTypeManifestList specifies the mediaType for manifest lists.\nconst MediaTypeManifestList = \"application\/vnd.docker.distribution.manifest.list.v2+json\"\n\n\/\/ SchemaVersion provides a pre-initialized version structure for this\n\/\/ packages version of the manifest.\nvar SchemaVersion = manifest.Versioned{\n\tSchemaVersion: 2,\n\tMediaType: MediaTypeManifestList,\n}\n\nfunc init() {\n\tmanifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {\n\t\tm := new(DeserializedManifestList)\n\t\terr := m.UnmarshalJSON(b)\n\t\tif err != nil {\n\t\t\treturn nil, distribution.Descriptor{}, err\n\t\t}\n\n\t\tdgst := digest.FromBytes(b)\n\t\treturn m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err\n\t}\n\terr := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Unable to register manifest: %s\", err))\n\t}\n}\n\n\/\/ PlatformSpec specifies a platform where a particular image manifest is\n\/\/ applicable.\ntype PlatformSpec struct {\n\t\/\/ Architecture field specifies the CPU architecture, for example\n\t\/\/ `amd64` or `ppc64`.\n\tArchitecture string `json:\"architecture\"`\n\n\t\/\/ OS specifies the operating system, for example `linux` or `windows`.\n\tOS string `json:\"os\"`\n\n\t\/\/ OSVersion is an optional field specifying the operating system\n\t\/\/ version, for example `10.0.10586`.\n\tOSVersion string `json:\"os.version,omitempty\"`\n\n\t\/\/ OSFeatures is an optional field specifying an array of strings,\n\t\/\/ each listing a required OS feature (for example on Windows `win32k`).\n\tOSFeatures []string `json:\"os.features,omitempty\"`\n\n\t\/\/ Variant is an optional field specifying a variant of the CPU, for\n\t\/\/ example `ppc64le` to specify a little-endian version of a PowerPC CPU.\n\tVariant string `json:\"variant,omitempty\"`\n\n\t\/\/ Features is an optional field specifying an array of strings, each\n\t\/\/ listing a required CPU feature (for example `sse4` or `aes`).\n\tFeatures []string `json:\"features,omitempty\"`\n}\n\n\/\/ A ManifestDescriptor references a platform-specific manifest.\ntype ManifestDescriptor struct {\n\tdistribution.Descriptor\n\n\t\/\/ Platform specifies which platform the manifest pointed to by the\n\t\/\/ descriptor runs on.\n\tPlatform PlatformSpec `json:\"platform\"`\n}\n\n\/\/ ManifestList references manifests for various platforms.\ntype ManifestList struct {\n\tmanifest.Versioned\n\n\t\/\/ Config references the image configuration as a blob.\n\tManifests []ManifestDescriptor `json:\"manifests\"`\n}\n\n\/\/ References returnes the distribution descriptors for the referenced image\n\/\/ manifests.\nfunc (m ManifestList) References() []distribution.Descriptor {\n\tdependencies := make([]distribution.Descriptor, len(m.Manifests))\n\tfor i := range m.Manifests {\n\t\tdependencies[i] = m.Manifests[i].Descriptor\n\t}\n\n\treturn dependencies\n}\n\n\/\/ DeserializedManifestList wraps ManifestList with a copy of the original\n\/\/ JSON.\ntype DeserializedManifestList struct {\n\tManifestList\n\n\t\/\/ canonical is the canonical byte representation of the Manifest.\n\tcanonical []byte\n}\n\n\/\/ FromDescriptors takes a slice of descriptors, and returns a\n\/\/ DeserializedManifestList which contains the resulting manifest list\n\/\/ and its JSON representation.\nfunc FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) {\n\tm := ManifestList{\n\t\tVersioned: SchemaVersion,\n\t}\n\n\tm.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors))\n\tcopy(m.Manifests, descriptors)\n\n\tdeserialized := DeserializedManifestList{\n\t\tManifestList: m,\n\t}\n\n\tvar err error\n\tdeserialized.canonical, err = json.MarshalIndent(&m, \"\", \" \")\n\treturn &deserialized, err\n}\n\n\/\/ UnmarshalJSON populates a new ManifestList struct from JSON data.\nfunc (m *DeserializedManifestList) UnmarshalJSON(b []byte) error {\n\tm.canonical = make([]byte, len(b), len(b))\n\t\/\/ store manifest list in canonical\n\tcopy(m.canonical, b)\n\n\t\/\/ Unmarshal canonical JSON into ManifestList object\n\tvar manifestList ManifestList\n\tif err := json.Unmarshal(m.canonical, &manifestList); err != nil {\n\t\treturn err\n\t}\n\n\tm.ManifestList = manifestList\n\n\treturn nil\n}\n\n\/\/ MarshalJSON returns the contents of canonical. If canonical is empty,\n\/\/ marshals the inner contents.\nfunc (m *DeserializedManifestList) MarshalJSON() ([]byte, error) {\n\tif len(m.canonical) > 0 {\n\t\treturn m.canonical, nil\n\t}\n\n\treturn nil, errors.New(\"JSON representation not initialized in DeserializedManifestList\")\n}\n\n\/\/ Payload returns the raw content of the manifest list. The contents can be\n\/\/ used to calculate the content identifier.\nfunc (m DeserializedManifestList) Payload() (string, []byte, error) {\n\treturn m.MediaType, m.canonical, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This package provides basic interfaces to I\/O primitives.\n\/\/ Its primary job is to wrap existing implementations of such primitives,\n\/\/ such as those in package os, into shared public interfaces that\n\/\/ abstract the functionality.\n\/\/ It also provides buffering primitives and some other basic operations.\npackage io\n\nimport (\n\t\"os\";\n\t\"strings\";\n)\n\n\/\/ Error represents an unexpected I\/O behavior.\ntype Error struct {\n\tos.ErrorString;\n}\n\n\/\/ ErrShortWrite means that a write accepted fewer bytes than requested\n\/\/ but failed to return an explicit error.\nvar ErrShortWrite os.Error = &Error{\"short write\"}\n\n\/\/ ErrUnexpectedEOF means that os.EOF was encountered in the\n\/\/ middle of reading a fixed-size block or data structure.\nvar ErrUnexpectedEOF os.Error = &Error{\"unexpected EOF\"}\n\n\/\/ Reader is the interface that wraps the basic Read method.\n\/\/\n\/\/ Read reads up to len(p) bytes into p. It returns the number of bytes\n\/\/ read (0 <= n <= len(p)) and any error encountered.\n\/\/ Even if Read returns n < len(p),\n\/\/ it may use all of p as scratch space during the call.\n\/\/ If some data is available but not len(p) bytes, Read conventionally\n\/\/ returns what is available rather than block waiting for more.\n\/\/\n\/\/ At the end of the input stream, Read returns 0, os.EOF.\n\/\/ Read may return a non-zero number of bytes with a non-nil err.\n\/\/ In particular, a Read that exhausts the input may return n > 0, os.EOF.\ntype Reader interface {\n\tRead(p []byte) (n int, err os.Error);\n}\n\n\/\/ Writer is the interface that wraps the basic Write method.\n\/\/\n\/\/ Write writes len(p) bytes from p to the underlying data stream.\n\/\/ It returns the number of bytes written from p (0 <= n <= len(p))\n\/\/ and any error encountered that caused the write to stop early.\n\/\/ Write must return a non-nil error if it returns n < len(p).\ntype Writer interface {\n\tWrite(p []byte) (n int, err os.Error);\n}\n\n\/\/ Closer is the interface that wraps the basic Close method.\ntype Closer interface {\n\tClose() os.Error;\n}\n\n\/\/ Seeker is the interface that wraps the basic Seek method.\n\/\/\n\/\/ Seek sets the offset for the next Read or Write to offset,\n\/\/ interpreted according to whence: 0 means relative to the origin of\n\/\/ the file, 1 means relative to the current offset, and 2 means\n\/\/ relative to the end. Seek returns the new offset and an Error, if\n\/\/ any.\ntype Seeker interface {\n\tSeek(offset int64, whence int) (ret int64, err os.Error);\n}\n\n\/\/ ReadWrite is the interface that groups the basic Read and Write methods.\ntype ReadWriter interface {\n\tReader;\n\tWriter;\n}\n\n\/\/ ReadCloser is the interface that groups the basic Read and Close methods.\ntype ReadCloser interface {\n\tReader;\n\tCloser;\n}\n\n\/\/ WriteCloser is the interface that groups the basic Write and Close methods.\ntype WriteCloser interface {\n\tWriter;\n\tCloser;\n}\n\n\/\/ ReadWriteCloser is the interface that groups the basic Read, Write and Close methods.\ntype ReadWriteCloser interface {\n\tReader;\n\tWriter;\n\tCloser;\n}\n\n\/\/ ReadSeeker is the interface that groups the basic Read and Seek methods.\ntype ReadSeeker interface {\n\tReader;\n\tSeeker;\n}\n\n\/\/ WriteSeeker is the interface that groups the basic Write and Seek methods.\ntype WriteSeeker interface {\n\tWriter;\n\tSeeker;\n}\n\n\/\/ ReadWriteSeeker is the interface that groups the basic Read, Write and Seek methods.\ntype ReadWriteSeeker interface {\n\tReader;\n\tWriter;\n\tSeeker;\n}\n\n\/\/ ReaderAt is the interface that wraps the basic ReadAt method.\n\/\/\n\/\/ ReadAt reads len(p) bytes into p starting at offset off in the\n\/\/ underlying data stream. It returns the number of bytes\n\/\/ read (0 <= n <= len(p)) and any error encountered.\n\/\/\n\/\/ Even if ReadAt returns n < len(p),\n\/\/ it may use all of p as scratch space during the call.\n\/\/ If some data is available but not len(p) bytes, ReadAt blocks\n\/\/ until either all the data is available or an error occurs.\n\/\/\n\/\/ At the end of the input stream, ReadAt returns 0, os.EOF.\n\/\/ ReadAt may return a non-zero number of bytes with a non-nil err.\n\/\/ In particular, a ReadAt that exhausts the input may return n > 0, os.EOF.\ntype ReaderAt interface {\n\tReadAt(p []byte, off int64) (n int, err os.Error);\n}\n\n\/\/ WriterAt is the interface that wraps the basic WriteAt method.\n\/\/\n\/\/ WriteAt writes len(p) bytes from p to the underlying data stream\n\/\/ at offset off. It returns the number of bytes written from p (0 <= n <= len(p))\n\/\/ and any error encountered that caused the write to stop early.\n\/\/ WriteAt must return a non-nil error if it returns n < len(p).\ntype WriterAt interface {\n\tWriteAt(p []byte, off int64) (n int, err os.Error);\n}\n\n\/\/ WriteString writes the contents of the string s to w, which accepts an array of bytes.\nfunc WriteString(w Writer, s string) (n int, err os.Error) {\n\treturn w.Write(strings.Bytes(s))\n}\n\n\/\/ ReadAtLeast reads from r into buf until it has read at least min bytes.\n\/\/ It returns the number of bytes copied and an error if fewer bytes were read.\n\/\/ The error is os.EOF only if no bytes were read.\n\/\/ If an EOF happens after reading fewer than min bytes,\n\/\/ ReadAtLeast returns ErrUnexpectedEOF.\nfunc ReadAtLeast(r Reader, buf []byte, min int) (n int, err os.Error) {\n\tn = 0;\n\tfor n < min {\n\t\tnn, e := r.Read(buf[n:]);\n\t\tif nn > 0 {\n\t\t\tn += nn\n\t\t}\n\t\tif e != nil {\n\t\t\tif e == os.EOF && n > 0 {\n\t\t\t\te = ErrUnexpectedEOF\n\t\t\t}\n\t\t\treturn n, e;\n\t\t}\n\t}\n\treturn n, nil;\n}\n\n\/\/ ReadFull reads exactly len(buf) bytes from r into buf.\n\/\/ It returns the number of bytes copied and an error if fewer bytes were read.\n\/\/ The error is os.EOF only if no bytes were read.\n\/\/ If an EOF happens after reading some but not all the bytes,\n\/\/ ReadFull returns ErrUnexpectedEOF.\nfunc ReadFull(r Reader, buf []byte) (n int, err os.Error) {\n\treturn ReadAtLeast(r, buf, len(buf))\n}\n\n\/\/ Copyn copies n bytes (or until an error) from src to dst.\n\/\/ It returns the number of bytes copied and the error, if any.\nfunc Copyn(dst Writer, src Reader, n int64) (written int64, err os.Error) {\n\tbuf := make([]byte, 32*1024);\n\tfor written < n {\n\t\tl := len(buf);\n\t\tif d := n - written; d < int64(l) {\n\t\t\tl = int(d)\n\t\t}\n\t\tnr, er := src.Read(buf[0:l]);\n\t\tif nr > 0 {\n\t\t\tnw, ew := dst.Write(buf[0:nr]);\n\t\t\tif nw > 0 {\n\t\t\t\twritten += int64(nw)\n\t\t\t}\n\t\t\tif ew != nil {\n\t\t\t\terr = ew;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tif nr != nw {\n\t\t\t\terr = ErrShortWrite;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\tif er != nil {\n\t\t\terr = er;\n\t\t\tbreak;\n\t\t}\n\t}\n\treturn written, err;\n}\n\n\/\/ Copy copies from src to dst until either EOF is reached\n\/\/ on src or an error occurs. It returns the number of bytes\n\/\/ copied and the error, if any.\nfunc Copy(dst Writer, src Reader) (written int64, err os.Error) {\n\tbuf := make([]byte, 32*1024);\n\tfor {\n\t\tnr, er := src.Read(buf);\n\t\tif nr > 0 {\n\t\t\tnw, ew := dst.Write(buf[0:nr]);\n\t\t\tif nw > 0 {\n\t\t\t\twritten += int64(nw)\n\t\t\t}\n\t\t\tif ew != nil {\n\t\t\t\terr = ew;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tif nr != nw {\n\t\t\t\terr = ErrShortWrite;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\tif er == os.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif er != nil {\n\t\t\terr = er;\n\t\t\tbreak;\n\t\t}\n\t}\n\treturn written, err;\n}\n\n\/\/ LimitReader returns a Reader that reads from r\n\/\/ but stops with os.EOF after n bytes.\nfunc LimitReader(r Reader, n int64) Reader\t{ return &limitedReader{r, n} }\n\ntype limitedReader struct {\n\tr\tReader;\n\tn\tint64;\n}\n\nfunc (l *limitedReader) Read(p []byte) (n int, err os.Error) {\n\tif l.n <= 0 {\n\t\treturn 0, os.EOF\n\t}\n\tif int64(len(p)) > l.n {\n\t\tp = p[0:l.n]\n\t}\n\tn, err = l.r.Read(p);\n\tl.n -= int64(n);\n\treturn;\n}\n\n\/\/ NewSectionReader returns a SectionReader that reads from r\n\/\/ starting at offset off and stops with os.EOF after n bytes.\nfunc NewSectionReader(r ReaderAt, off int64, n int64) *SectionReader {\n\treturn &SectionReader{r, off, off, off + n}\n}\n\n\/\/ SectionReader implements Read, Seek, and ReadAt on a section\n\/\/ of an underlying ReaderAt.\ntype SectionReader struct {\n\tr\tReaderAt;\n\tbase\tint64;\n\toff\tint64;\n\tlimit\tint64;\n}\n\nfunc (s *SectionReader) Read(p []byte) (n int, err os.Error) {\n\tif s.off >= s.limit {\n\t\treturn 0, os.EOF\n\t}\n\tif max := s.limit - s.off; int64(len(p)) > max {\n\t\tp = p[0:max]\n\t}\n\tn, err = s.r.ReadAt(p, s.off);\n\ts.off += int64(n);\n\treturn;\n}\n\nfunc (s *SectionReader) Seek(offset int64, whence int) (ret int64, err os.Error) {\n\tswitch whence {\n\tdefault:\n\t\treturn 0, os.EINVAL\n\tcase 0:\n\t\toffset += s.base\n\tcase 1:\n\t\toffset += s.off\n\tcase 2:\n\t\toffset += s.limit\n\t}\n\tif offset < s.off || offset > s.limit {\n\t\treturn 0, os.EINVAL\n\t}\n\ts.off = offset;\n\treturn offset - s.base, nil;\n}\n\nfunc (s *SectionReader) ReadAt(p []byte, off int64) (n int, err os.Error) {\n\tif off < 0 || off >= s.limit-s.base {\n\t\treturn 0, os.EOF\n\t}\n\toff += s.base;\n\tif max := s.limit - off; int64(len(p)) > max {\n\t\tp = p[0:max]\n\t}\n\treturn s.r.ReadAt(p, off);\n}\n\n\/\/ Size returns the size of the section in bytes.\nfunc (s *SectionReader) Size() int64\t{ return s.limit - s.base }\n<commit_msg>io.ReadWriter: fix doc comment (ReadWrite -> ReadWriter)<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This package provides basic interfaces to I\/O primitives.\n\/\/ Its primary job is to wrap existing implementations of such primitives,\n\/\/ such as those in package os, into shared public interfaces that\n\/\/ abstract the functionality.\n\/\/ It also provides buffering primitives and some other basic operations.\npackage io\n\nimport (\n\t\"os\";\n\t\"strings\";\n)\n\n\/\/ Error represents an unexpected I\/O behavior.\ntype Error struct {\n\tos.ErrorString;\n}\n\n\/\/ ErrShortWrite means that a write accepted fewer bytes than requested\n\/\/ but failed to return an explicit error.\nvar ErrShortWrite os.Error = &Error{\"short write\"}\n\n\/\/ ErrUnexpectedEOF means that os.EOF was encountered in the\n\/\/ middle of reading a fixed-size block or data structure.\nvar ErrUnexpectedEOF os.Error = &Error{\"unexpected EOF\"}\n\n\/\/ Reader is the interface that wraps the basic Read method.\n\/\/\n\/\/ Read reads up to len(p) bytes into p. It returns the number of bytes\n\/\/ read (0 <= n <= len(p)) and any error encountered.\n\/\/ Even if Read returns n < len(p),\n\/\/ it may use all of p as scratch space during the call.\n\/\/ If some data is available but not len(p) bytes, Read conventionally\n\/\/ returns what is available rather than block waiting for more.\n\/\/\n\/\/ At the end of the input stream, Read returns 0, os.EOF.\n\/\/ Read may return a non-zero number of bytes with a non-nil err.\n\/\/ In particular, a Read that exhausts the input may return n > 0, os.EOF.\ntype Reader interface {\n\tRead(p []byte) (n int, err os.Error);\n}\n\n\/\/ Writer is the interface that wraps the basic Write method.\n\/\/\n\/\/ Write writes len(p) bytes from p to the underlying data stream.\n\/\/ It returns the number of bytes written from p (0 <= n <= len(p))\n\/\/ and any error encountered that caused the write to stop early.\n\/\/ Write must return a non-nil error if it returns n < len(p).\ntype Writer interface {\n\tWrite(p []byte) (n int, err os.Error);\n}\n\n\/\/ Closer is the interface that wraps the basic Close method.\ntype Closer interface {\n\tClose() os.Error;\n}\n\n\/\/ Seeker is the interface that wraps the basic Seek method.\n\/\/\n\/\/ Seek sets the offset for the next Read or Write to offset,\n\/\/ interpreted according to whence: 0 means relative to the origin of\n\/\/ the file, 1 means relative to the current offset, and 2 means\n\/\/ relative to the end. Seek returns the new offset and an Error, if\n\/\/ any.\ntype Seeker interface {\n\tSeek(offset int64, whence int) (ret int64, err os.Error);\n}\n\n\/\/ ReadWriter is the interface that groups the basic Read and Write methods.\ntype ReadWriter interface {\n\tReader;\n\tWriter;\n}\n\n\/\/ ReadCloser is the interface that groups the basic Read and Close methods.\ntype ReadCloser interface {\n\tReader;\n\tCloser;\n}\n\n\/\/ WriteCloser is the interface that groups the basic Write and Close methods.\ntype WriteCloser interface {\n\tWriter;\n\tCloser;\n}\n\n\/\/ ReadWriteCloser is the interface that groups the basic Read, Write and Close methods.\ntype ReadWriteCloser interface {\n\tReader;\n\tWriter;\n\tCloser;\n}\n\n\/\/ ReadSeeker is the interface that groups the basic Read and Seek methods.\ntype ReadSeeker interface {\n\tReader;\n\tSeeker;\n}\n\n\/\/ WriteSeeker is the interface that groups the basic Write and Seek methods.\ntype WriteSeeker interface {\n\tWriter;\n\tSeeker;\n}\n\n\/\/ ReadWriteSeeker is the interface that groups the basic Read, Write and Seek methods.\ntype ReadWriteSeeker interface {\n\tReader;\n\tWriter;\n\tSeeker;\n}\n\n\/\/ ReaderAt is the interface that wraps the basic ReadAt method.\n\/\/\n\/\/ ReadAt reads len(p) bytes into p starting at offset off in the\n\/\/ underlying data stream. It returns the number of bytes\n\/\/ read (0 <= n <= len(p)) and any error encountered.\n\/\/\n\/\/ Even if ReadAt returns n < len(p),\n\/\/ it may use all of p as scratch space during the call.\n\/\/ If some data is available but not len(p) bytes, ReadAt blocks\n\/\/ until either all the data is available or an error occurs.\n\/\/\n\/\/ At the end of the input stream, ReadAt returns 0, os.EOF.\n\/\/ ReadAt may return a non-zero number of bytes with a non-nil err.\n\/\/ In particular, a ReadAt that exhausts the input may return n > 0, os.EOF.\ntype ReaderAt interface {\n\tReadAt(p []byte, off int64) (n int, err os.Error);\n}\n\n\/\/ WriterAt is the interface that wraps the basic WriteAt method.\n\/\/\n\/\/ WriteAt writes len(p) bytes from p to the underlying data stream\n\/\/ at offset off. It returns the number of bytes written from p (0 <= n <= len(p))\n\/\/ and any error encountered that caused the write to stop early.\n\/\/ WriteAt must return a non-nil error if it returns n < len(p).\ntype WriterAt interface {\n\tWriteAt(p []byte, off int64) (n int, err os.Error);\n}\n\n\/\/ WriteString writes the contents of the string s to w, which accepts an array of bytes.\nfunc WriteString(w Writer, s string) (n int, err os.Error) {\n\treturn w.Write(strings.Bytes(s))\n}\n\n\/\/ ReadAtLeast reads from r into buf until it has read at least min bytes.\n\/\/ It returns the number of bytes copied and an error if fewer bytes were read.\n\/\/ The error is os.EOF only if no bytes were read.\n\/\/ If an EOF happens after reading fewer than min bytes,\n\/\/ ReadAtLeast returns ErrUnexpectedEOF.\nfunc ReadAtLeast(r Reader, buf []byte, min int) (n int, err os.Error) {\n\tn = 0;\n\tfor n < min {\n\t\tnn, e := r.Read(buf[n:]);\n\t\tif nn > 0 {\n\t\t\tn += nn\n\t\t}\n\t\tif e != nil {\n\t\t\tif e == os.EOF && n > 0 {\n\t\t\t\te = ErrUnexpectedEOF\n\t\t\t}\n\t\t\treturn n, e;\n\t\t}\n\t}\n\treturn n, nil;\n}\n\n\/\/ ReadFull reads exactly len(buf) bytes from r into buf.\n\/\/ It returns the number of bytes copied and an error if fewer bytes were read.\n\/\/ The error is os.EOF only if no bytes were read.\n\/\/ If an EOF happens after reading some but not all the bytes,\n\/\/ ReadFull returns ErrUnexpectedEOF.\nfunc ReadFull(r Reader, buf []byte) (n int, err os.Error) {\n\treturn ReadAtLeast(r, buf, len(buf))\n}\n\n\/\/ Copyn copies n bytes (or until an error) from src to dst.\n\/\/ It returns the number of bytes copied and the error, if any.\nfunc Copyn(dst Writer, src Reader, n int64) (written int64, err os.Error) {\n\tbuf := make([]byte, 32*1024);\n\tfor written < n {\n\t\tl := len(buf);\n\t\tif d := n - written; d < int64(l) {\n\t\t\tl = int(d)\n\t\t}\n\t\tnr, er := src.Read(buf[0:l]);\n\t\tif nr > 0 {\n\t\t\tnw, ew := dst.Write(buf[0:nr]);\n\t\t\tif nw > 0 {\n\t\t\t\twritten += int64(nw)\n\t\t\t}\n\t\t\tif ew != nil {\n\t\t\t\terr = ew;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tif nr != nw {\n\t\t\t\terr = ErrShortWrite;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\tif er != nil {\n\t\t\terr = er;\n\t\t\tbreak;\n\t\t}\n\t}\n\treturn written, err;\n}\n\n\/\/ Copy copies from src to dst until either EOF is reached\n\/\/ on src or an error occurs. It returns the number of bytes\n\/\/ copied and the error, if any.\nfunc Copy(dst Writer, src Reader) (written int64, err os.Error) {\n\tbuf := make([]byte, 32*1024);\n\tfor {\n\t\tnr, er := src.Read(buf);\n\t\tif nr > 0 {\n\t\t\tnw, ew := dst.Write(buf[0:nr]);\n\t\t\tif nw > 0 {\n\t\t\t\twritten += int64(nw)\n\t\t\t}\n\t\t\tif ew != nil {\n\t\t\t\terr = ew;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t\tif nr != nw {\n\t\t\t\terr = ErrShortWrite;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\tif er == os.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif er != nil {\n\t\t\terr = er;\n\t\t\tbreak;\n\t\t}\n\t}\n\treturn written, err;\n}\n\n\/\/ LimitReader returns a Reader that reads from r\n\/\/ but stops with os.EOF after n bytes.\nfunc LimitReader(r Reader, n int64) Reader\t{ return &limitedReader{r, n} }\n\ntype limitedReader struct {\n\tr\tReader;\n\tn\tint64;\n}\n\nfunc (l *limitedReader) Read(p []byte) (n int, err os.Error) {\n\tif l.n <= 0 {\n\t\treturn 0, os.EOF\n\t}\n\tif int64(len(p)) > l.n {\n\t\tp = p[0:l.n]\n\t}\n\tn, err = l.r.Read(p);\n\tl.n -= int64(n);\n\treturn;\n}\n\n\/\/ NewSectionReader returns a SectionReader that reads from r\n\/\/ starting at offset off and stops with os.EOF after n bytes.\nfunc NewSectionReader(r ReaderAt, off int64, n int64) *SectionReader {\n\treturn &SectionReader{r, off, off, off + n}\n}\n\n\/\/ SectionReader implements Read, Seek, and ReadAt on a section\n\/\/ of an underlying ReaderAt.\ntype SectionReader struct {\n\tr\tReaderAt;\n\tbase\tint64;\n\toff\tint64;\n\tlimit\tint64;\n}\n\nfunc (s *SectionReader) Read(p []byte) (n int, err os.Error) {\n\tif s.off >= s.limit {\n\t\treturn 0, os.EOF\n\t}\n\tif max := s.limit - s.off; int64(len(p)) > max {\n\t\tp = p[0:max]\n\t}\n\tn, err = s.r.ReadAt(p, s.off);\n\ts.off += int64(n);\n\treturn;\n}\n\nfunc (s *SectionReader) Seek(offset int64, whence int) (ret int64, err os.Error) {\n\tswitch whence {\n\tdefault:\n\t\treturn 0, os.EINVAL\n\tcase 0:\n\t\toffset += s.base\n\tcase 1:\n\t\toffset += s.off\n\tcase 2:\n\t\toffset += s.limit\n\t}\n\tif offset < s.off || offset > s.limit {\n\t\treturn 0, os.EINVAL\n\t}\n\ts.off = offset;\n\treturn offset - s.base, nil;\n}\n\nfunc (s *SectionReader) ReadAt(p []byte, off int64) (n int, err os.Error) {\n\tif off < 0 || off >= s.limit-s.base {\n\t\treturn 0, os.EOF\n\t}\n\toff += s.base;\n\tif max := s.limit - off; int64(len(p)) > max {\n\t\tp = p[0:max]\n\t}\n\treturn s.r.ReadAt(p, off);\n}\n\n\/\/ Size returns the size of the section in bytes.\nfunc (s *SectionReader) Size() int64\t{ return s.limit - s.base }\n<|endoftext|>"} {"text":"<commit_before>\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this\n\/\/ file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\/\/\n\/\/ Contributor: Zack Mullaly zmullaly@mozilla.com [:zack]\n\npackage main\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"testing\"\n\n\t\"gopkg.in\/gcfg.v1\"\n)\n\ntype mozdef struct {\n\tURL string\n\tUseProxy bool\n}\n\nfunc TestConfigParsing(t *testing.T) {\n\ttestCases := []struct {\n\t\tDescription string\n\t\tConfigString string\n\t\tExpectedConfig config\n\t\tExpectError bool\n\t}{\n\t\t{\n\t\t\tDescription: \"A valid configuration should parse correctly\",\n\t\t\tConfigString: `\n [mozdef]\n url = \"testurl\"\n source = \"mozdef\"\n useProxy = true\n `,\n\t\t\tExpectedConfig: config{\n\t\t\t\tMozDef: mozdef{\n\t\t\t\t\tURL: \"testurl\",\n\t\t\t\t\tUseProxy: true,\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpectError: false,\n\t\t},\n\t\t{\n\t\t\tDescription: \"Parsing fails if UseProxy is not a boolean\",\n\t\t\tConfigString: `\n [mozdef]\n url = \"testurl\"\n source = \"mozdef\"\n useProxy = \"notbool\"\n `,\n\t\t\tExpectedConfig: config{\n\t\t\t\tMozDef: mozdef{\n\t\t\t\t\tURL: \"testurl\",\n\t\t\t\t\tUseProxy: false,\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpectError: true,\n\t\t},\n\t}\n\n\tfor caseNum, testCase := range testCases {\n\t\tt.Logf(\"Running TestConfigParsing case #%d: %s\", caseNum, testCase.Description)\n\n\t\tconfigFile, err := ioutil.TempFile(\"\", \"*.cfg\")\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif _, err = configFile.Write([]byte(testCase.ConfigString)); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err = configFile.Close(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tconf := config{}\n\t\tparseErr := gcfg.ReadFileInto(&conf, configFile.Name())\n\n\t\tgotErr := parseErr != nil\n\t\tif gotErr && !testCase.ExpectError {\n\t\t\tt.Errorf(\"Did not expect to get a parse error, but got '%s'\", parseErr.Error())\n\t\t} else if !gotErr && testCase.ExpectError {\n\t\t\tt.Errorf(\"Expected to get a parse error, but did not\")\n\t\t}\n\n\t\tos.Remove(configFile.Name())\n\n\t\tif testCase.ExpectError {\n\t\t\tcontinue\n\t\t}\n\n\t\tif conf.MozDef.URL != testCase.ExpectedConfig.MozDef.URL {\n\t\t\tt.Errorf(\n\t\t\t\t\"Expected parsed URL to be %s but it's %s\",\n\t\t\t\ttestCase.ExpectedConfig.MozDef.URL,\n\t\t\t\tconf.MozDef.URL)\n\t\t}\n\n\t\tif conf.MozDef.UseProxy != testCase.ExpectedConfig.MozDef.UseProxy {\n\t\t\tt.Errorf(\n\t\t\t\t\"Expected parsed UseProxy to be %v but it's %v\",\n\t\t\t\ttestCase.ExpectedConfig.MozDef.UseProxy,\n\t\t\t\tconf.MozDef.UseProxy)\n\t\t}\n\t}\n}\n<commit_msg>Remove old references to config.MozDef.Source from tests<commit_after>\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this\n\/\/ file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\/\/\n\/\/ Contributor: Zack Mullaly zmullaly@mozilla.com [:zack]\n\npackage main\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"testing\"\n\n\t\"gopkg.in\/gcfg.v1\"\n)\n\ntype mozdef struct {\n\tURL string\n\tUseProxy bool\n}\n\nfunc TestConfigParsing(t *testing.T) {\n\ttestCases := []struct {\n\t\tDescription string\n\t\tConfigString string\n\t\tExpectedConfig config\n\t\tExpectError bool\n\t}{\n\t\t{\n\t\t\tDescription: \"A valid configuration should parse correctly\",\n\t\t\tConfigString: `\n [mozdef]\n url = \"testurl\"\n useProxy = true\n `,\n\t\t\tExpectedConfig: config{\n\t\t\t\tMozDef: mozdef{\n\t\t\t\t\tURL: \"testurl\",\n\t\t\t\t\tUseProxy: true,\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpectError: false,\n\t\t},\n\t\t{\n\t\t\tDescription: \"Parsing fails if UseProxy is not a boolean\",\n\t\t\tConfigString: `\n [mozdef]\n url = \"testurl\"\n useProxy = \"notbool\"\n `,\n\t\t\tExpectedConfig: config{\n\t\t\t\tMozDef: mozdef{\n\t\t\t\t\tURL: \"testurl\",\n\t\t\t\t\tUseProxy: false,\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpectError: true,\n\t\t},\n\t}\n\n\tfor caseNum, testCase := range testCases {\n\t\tt.Logf(\"Running TestConfigParsing case #%d: %s\", caseNum, testCase.Description)\n\n\t\tconfigFile, err := ioutil.TempFile(\"\", \"*.cfg\")\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif _, err = configFile.Write([]byte(testCase.ConfigString)); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err = configFile.Close(); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tconf := config{}\n\t\tparseErr := gcfg.ReadFileInto(&conf, configFile.Name())\n\n\t\tgotErr := parseErr != nil\n\t\tif gotErr && !testCase.ExpectError {\n\t\t\tt.Errorf(\"Did not expect to get a parse error, but got '%s'\", parseErr.Error())\n\t\t} else if !gotErr && testCase.ExpectError {\n\t\t\tt.Errorf(\"Expected to get a parse error, but did not\")\n\t\t}\n\n\t\tos.Remove(configFile.Name())\n\n\t\tif testCase.ExpectError {\n\t\t\tcontinue\n\t\t}\n\n\t\tif conf.MozDef.URL != testCase.ExpectedConfig.MozDef.URL {\n\t\t\tt.Errorf(\n\t\t\t\t\"Expected parsed URL to be %s but it's %s\",\n\t\t\t\ttestCase.ExpectedConfig.MozDef.URL,\n\t\t\t\tconf.MozDef.URL)\n\t\t}\n\n\t\tif conf.MozDef.UseProxy != testCase.ExpectedConfig.MozDef.UseProxy {\n\t\t\tt.Errorf(\n\t\t\t\t\"Expected parsed UseProxy to be %v but it's %v\",\n\t\t\t\ttestCase.ExpectedConfig.MozDef.UseProxy,\n\t\t\t\tconf.MozDef.UseProxy)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package replication\n\nimport (\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"github.com\/satori\/go.uuid\"\n\t\"github.com\/siddontang\/go-mysql\/client\"\n\t. \"github.com\/siddontang\/go-mysql\/mysql\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype BinlogSyncer struct {\n\tc *client.Conn\n\tserverID uint32\n\n\thost string\n\tport uint16\n\tuser string\n\tpassword string\n\n\tmasterID uint32\n\n\twg sync.WaitGroup\n\n\tquit chan struct{}\n\n\tuseChecksum bool\n\n\tformat *FormatDescriptionEvent\n\n\ttables map[uint64]*TableMapEvent\n}\n\nfunc NewBinlogSyncer(serverID uint32) *BinlogSyncer {\n\tb := new(BinlogSyncer)\n\tb.serverID = serverID\n\n\tb.masterID = 0\n\n\tb.quit = make(chan struct{})\n\tb.useChecksum = false\n\n\tb.tables = make(map[uint64]*TableMapEvent)\n\n\treturn b\n}\n\nfunc (b *BinlogSyncer) Close() {\n\tclose(b.quit)\n\n\tif b.c != nil {\n\t\tb.c.Close()\n\t}\n\n\tb.wg.Wait()\n}\n\nfunc (b *BinlogSyncer) checksumUsed() error {\n\tif r, err := b.c.Execute(\"SHOW GLOBAL VARIABLES LIKE 'BINLOG_CHECKSUM'\"); err != nil {\n\t\treturn err\n\t} else {\n\t\ts, _ := r.GetString(0, 1)\n\t\tif s == \"\" || s == \"NONE\" {\n\t\t\tb.useChecksum = false\n\t\t} else {\n\t\t\tb.useChecksum = true\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (b *BinlogSyncer) GetMasterUUID() (uuid.UUID, error) {\n\tif r, err := b.c.Execute(\"SHOW GLOBAL VARIABLES LIKE 'SERVER_UUID'\"); err != nil {\n\t\treturn uuid.UUID{}, err\n\t} else {\n\t\ts, _ := r.GetString(0, 1)\n\t\tif s == \"\" || s == \"NONE\" {\n\t\t\treturn uuid.UUID{}, nil\n\t\t} else {\n\t\t\treturn uuid.FromString(s)\n\t\t}\n\t}\n}\n\nfunc (b *BinlogSyncer) RegisterSlave(host string, port uint16, user string, password string) error {\n\tb.host = host\n\tb.port = port\n\tb.user = user\n\tb.password = password\n\n\tvar err error\n\tb.c, err = client.Connect(fmt.Sprintf(\"%s:%d\", host, port), user, password, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/for mysql 5.6+, binlog has a crc32 checksum\n\t\/\/see https:\/\/github.com\/alibaba\/canal\/wiki\/BinlogChange(mysql5.6)\n\t\/\/before mysql 5.6, this will not work, don't matter.:-)\n\tif err = b.checksumUsed(); err != nil {\n\t\treturn err\n\t} else if b.useChecksum {\n\t\tif _, err = b.c.Execute(`SET @master_binlog_checksum=@@global.binlog_checksum`); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err = b.writeRegisterSlaveCommand(); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err = b.c.ReadOKPacket(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (b *BinlogSyncer) StartSync(pos Position) (*BinlogStreamer, error) {\n\terr := b.writeBinglogDumpCommand(pos)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := newBinlogStreamer()\n\n\tb.wg.Add(1)\n\tgo b.onStream(s)\n\n\treturn s, nil\n}\n\nfunc (b *BinlogSyncer) StartSyncGTID(gset *GTIDSet) (*BinlogStreamer, error) {\n\terr := b.writeBinlogDumpGTIDCommand(BINLOG_DUMP_NON_BLOCK|BINLOG_THROUGH_GTID, Position{\"\", 4}, gset.Encode())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/to do later\n\ts := newBinlogStreamer()\n\n\tb.wg.Add(1)\n\tgo b.onStream(s)\n\n\treturn s, nil\n}\n\nfunc (b *BinlogSyncer) writeBinglogDumpCommand(p Position) error {\n\t\/\/always start from position 4\n\tif p.Pos < 4 {\n\t\tp.Pos = 4\n\t}\n\n\tb.c.ResetSequence()\n\n\tdata := make([]byte, 4+1+4+2+4+len(p.Name))\n\n\tpos := 4\n\tdata[pos] = COM_BINLOG_DUMP\n\tpos++\n\n\tbinary.LittleEndian.PutUint32(data[pos:], p.Pos)\n\tpos += 4\n\n\tbinary.LittleEndian.PutUint16(data[pos:], BINLOG_DUMP_NEVER_STOP)\n\tpos += 2\n\n\tbinary.LittleEndian.PutUint32(data[pos:], b.serverID)\n\tpos += 4\n\n\tcopy(data[pos:], p.Name)\n\n\treturn b.c.WritePacket(data)\n}\n\nfunc (b *BinlogSyncer) writeBinlogDumpGTIDCommand(flags uint16, p Position, gtidData []byte) error {\n\tb.c.ResetSequence()\n\n\tdata := make([]byte, 4+1+2+4+4+len(p.Name)+8+4+len(gtidData))\n\tpos := 4\n\tdata[pos] = COM_BINLOG_DUMP_GTID\n\tpos++\n\n\tbinary.LittleEndian.PutUint16(data[pos:], flags)\n\tpos += 2\n\n\tbinary.LittleEndian.PutUint32(data[pos:], b.serverID)\n\tpos += 4\n\n\tbinary.LittleEndian.PutUint32(data[pos:], uint32(len(p.Name)))\n\tpos += 4\n\n\tn := copy(data[pos:], p.Name)\n\tpos += n\n\n\tbinary.LittleEndian.PutUint64(data[pos:], uint64(p.Pos))\n\tpos += 8\n\n\tif flags&BINLOG_THROUGH_GTID > 0 {\n\t\tbinary.LittleEndian.PutUint32(data[pos:], uint32(len(gtidData)))\n\t\tpos += 4\n\t\tn = copy(data[pos:], gtidData)\n\t\tpos += n\n\t}\n\tdata = data[0:pos]\n\n\treturn b.c.WritePacket(data)\n}\n\nfunc (b *BinlogSyncer) writeRegisterSlaveCommand() error {\n\tb.c.ResetSequence()\n\n\tdata := make([]byte, 4+1+4+1+len(b.host)+1+len(b.user)+1+len(b.password)+2+4+4)\n\tpos := 4\n\n\tdata[pos] = COM_REGISTER_SLAVE\n\tpos++\n\n\tbinary.LittleEndian.PutUint32(data[pos:], b.serverID)\n\tpos += 4\n\n\tdata[pos] = uint8(len(b.host))\n\tpos++\n\tn := copy(data[pos:], b.host)\n\tpos += n\n\n\tdata[pos] = uint8(len(b.user))\n\tpos++\n\tn = copy(data[pos:], b.user)\n\tpos += n\n\n\tdata[pos] = uint8(len(b.password))\n\tpos++\n\tn = copy(data[pos:], b.password)\n\tpos += n\n\n\tbinary.LittleEndian.PutUint16(data[pos:], b.port)\n\tpos += 2\n\n\t\/\/replication rank, not used\n\tbinary.LittleEndian.PutUint32(data[pos:], 0)\n\tpos += 4\n\n\tbinary.LittleEndian.PutUint32(data[pos:], b.masterID)\n\n\treturn b.c.WritePacket(data)\n}\n\nfunc (b *BinlogSyncer) onStream(s *BinlogStreamer) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\ts.ech <- fmt.Errorf(\"Err: %v\\n Stack: %s\", e, Pstack())\n\t\t}\n\t\tb.wg.Done()\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-b.quit:\n\t\t\ts.ech <- ErrSyncClosed\n\t\t\treturn\n\t\tdefault:\n\t\t\tdata, err := b.c.ReadPacket()\n\t\t\tif err != nil {\n\t\t\t\ts.ech <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tswitch data[0] {\n\t\t\tcase OK_HEADER:\n\t\t\t\tif err = b.parseEvent(s, data); err != nil {\n\t\t\t\t\ts.ech <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase ERR_HEADER:\n\t\t\t\terr = b.c.HandleErrorPacket(data)\n\t\t\t\ts.ech <- err\n\t\t\tcase EOF_HEADER:\n\t\t\t\t\/\/no binlog now, sleep and wait a moment again\n\t\t\t\ttime.Sleep(500 * time.Millisecond)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (b *BinlogSyncer) parseEvent(s *BinlogStreamer, data []byte) error {\n\t\/\/skip 0x00\n\tdata = data[1:]\n\n\th := new(EventHeader)\n\terr := h.Decode(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdata = data[EventHeaderSize:]\n\teventLen := int(h.EventSize) - EventHeaderSize\n\n\tif len(data) < eventLen {\n\t\treturn fmt.Errorf(\"invalid data size %d in event %s, less event length %d\", len(data), h.EventType, eventLen)\n\t}\n\n\tif b.useChecksum {\n\t\t\/\/last 4 bytes is crc32, check later\n\t\tdata = data[0 : len(data)-4]\n\t}\n\n\tevData := data\n\n\tvar e Event\n\tswitch h.EventType {\n\tcase FORMAT_DESCRIPTION_EVENT:\n\t\tb.format = &FormatDescriptionEvent{}\n\t\te = b.format\n\tcase ROTATE_EVENT:\n\t\te = &RotateEvent{}\n\tcase QUERY_EVENT:\n\t\te = &QueryEvent{}\n\tcase XID_EVENT:\n\t\te = &XIDEvent{}\n\tcase TABLE_MAP_EVENT:\n\t\tte := &TableMapEvent{}\n\t\tif b.format.EventTypeHeaderLengths[TABLE_MAP_EVENT-1] == 6 {\n\t\t\tte.tableIDSize = 4\n\t\t} else {\n\t\t\tte.tableIDSize = 6\n\t\t}\n\t\te = te\n\tcase WRITE_ROWS_EVENTv0,\n\t\tUPDATE_ROWS_EVENTv0,\n\t\tDELETE_ROWS_EVENTv0,\n\t\tWRITE_ROWS_EVENTv1,\n\t\tDELETE_ROWS_EVENTv1,\n\t\tUPDATE_ROWS_EVENTv1,\n\t\tWRITE_ROWS_EVENTv2,\n\t\tUPDATE_ROWS_EVENTv2,\n\t\tDELETE_ROWS_EVENTv2:\n\t\te = b.newRowsEvent(h)\n\tcase ROWS_QUERY_EVENT:\n\t\te = &RowsQueryEvent{}\n\tcase GTID_EVENT:\n\t\te = >IDEvent{}\n\tdefault:\n\t\te = &GenericEvent{}\n\t}\n\n\tif err := e.Decode(data); err != nil {\n\t\treturn &EventError{h, err.Error(), data}\n\t}\n\n\tif te, ok := e.(*TableMapEvent); ok {\n\t\tb.tables[te.TableID] = te\n\t}\n\n\t\/\/If MySQL restart, it may use the same table id for different tables.\n\t\/\/We must clear the table map before parsing new events.\n\t\/\/We have no better way to known whether the event is before or after restart,\n\t\/\/So we have to clear the table map on every rotate event.\n\tif h.EventType == ROTATE_EVENT {\n\t\tb.tables = make(map[uint64]*TableMapEvent)\n\t}\n\n\ts.ch <- &BinlogEvent{evData, h, e}\n\n\treturn nil\n}\n\nfunc (b *BinlogSyncer) newRowsEvent(h *EventHeader) *RowsEvent {\n\te := &RowsEvent{}\n\tif b.format.EventTypeHeaderLengths[h.EventType-1] == 6 {\n\t\te.tableIDSize = 4\n\t} else {\n\t\te.tableIDSize = 6\n\t}\n\n\te.needBitmap2 = false\n\te.tables = b.tables\n\n\tswitch h.EventType {\n\tcase WRITE_ROWS_EVENTv0:\n\t\te.Version = 0\n\tcase UPDATE_ROWS_EVENTv0:\n\t\te.Version = 0\n\tcase DELETE_ROWS_EVENTv0:\n\t\te.Version = 0\n\tcase WRITE_ROWS_EVENTv1:\n\t\te.Version = 1\n\tcase DELETE_ROWS_EVENTv1:\n\t\te.Version = 1\n\tcase UPDATE_ROWS_EVENTv1:\n\t\te.Version = 1\n\t\te.needBitmap2 = true\n\tcase WRITE_ROWS_EVENTv2:\n\t\te.Version = 2\n\tcase UPDATE_ROWS_EVENTv2:\n\t\te.Version = 2\n\t\te.needBitmap2 = true\n\tcase DELETE_ROWS_EVENTv2:\n\t\te.Version = 2\n\t}\n\n\treturn e\n}\n<commit_msg>GTID request will not block too<commit_after>package replication\n\nimport (\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"github.com\/satori\/go.uuid\"\n\t\"github.com\/siddontang\/go-mysql\/client\"\n\t. \"github.com\/siddontang\/go-mysql\/mysql\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype BinlogSyncer struct {\n\tc *client.Conn\n\tserverID uint32\n\n\thost string\n\tport uint16\n\tuser string\n\tpassword string\n\n\tmasterID uint32\n\n\twg sync.WaitGroup\n\n\tquit chan struct{}\n\n\tuseChecksum bool\n\n\tformat *FormatDescriptionEvent\n\n\ttables map[uint64]*TableMapEvent\n}\n\nfunc NewBinlogSyncer(serverID uint32) *BinlogSyncer {\n\tb := new(BinlogSyncer)\n\tb.serverID = serverID\n\n\tb.masterID = 0\n\n\tb.quit = make(chan struct{})\n\tb.useChecksum = false\n\n\tb.tables = make(map[uint64]*TableMapEvent)\n\n\treturn b\n}\n\nfunc (b *BinlogSyncer) Close() {\n\tclose(b.quit)\n\n\tif b.c != nil {\n\t\tb.c.Close()\n\t}\n\n\tb.wg.Wait()\n}\n\nfunc (b *BinlogSyncer) checksumUsed() error {\n\tif r, err := b.c.Execute(\"SHOW GLOBAL VARIABLES LIKE 'BINLOG_CHECKSUM'\"); err != nil {\n\t\treturn err\n\t} else {\n\t\ts, _ := r.GetString(0, 1)\n\t\tif s == \"\" || s == \"NONE\" {\n\t\t\tb.useChecksum = false\n\t\t} else {\n\t\t\tb.useChecksum = true\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (b *BinlogSyncer) GetMasterUUID() (uuid.UUID, error) {\n\tif r, err := b.c.Execute(\"SHOW GLOBAL VARIABLES LIKE 'SERVER_UUID'\"); err != nil {\n\t\treturn uuid.UUID{}, err\n\t} else {\n\t\ts, _ := r.GetString(0, 1)\n\t\tif s == \"\" || s == \"NONE\" {\n\t\t\treturn uuid.UUID{}, nil\n\t\t} else {\n\t\t\treturn uuid.FromString(s)\n\t\t}\n\t}\n}\n\nfunc (b *BinlogSyncer) RegisterSlave(host string, port uint16, user string, password string) error {\n\tb.host = host\n\tb.port = port\n\tb.user = user\n\tb.password = password\n\n\tvar err error\n\tb.c, err = client.Connect(fmt.Sprintf(\"%s:%d\", host, port), user, password, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/for mysql 5.6+, binlog has a crc32 checksum\n\t\/\/see https:\/\/github.com\/alibaba\/canal\/wiki\/BinlogChange(mysql5.6)\n\t\/\/before mysql 5.6, this will not work, don't matter.:-)\n\tif err = b.checksumUsed(); err != nil {\n\t\treturn err\n\t} else if b.useChecksum {\n\t\tif _, err = b.c.Execute(`SET @master_binlog_checksum=@@global.binlog_checksum`); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err = b.writeRegisterSlaveCommand(); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err = b.c.ReadOKPacket(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (b *BinlogSyncer) StartSync(pos Position) (*BinlogStreamer, error) {\n\terr := b.writeBinglogDumpCommand(pos)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := newBinlogStreamer()\n\n\tb.wg.Add(1)\n\tgo b.onStream(s)\n\n\treturn s, nil\n}\n\nfunc (b *BinlogSyncer) StartSyncGTID(gset *GTIDSet) (*BinlogStreamer, error) {\n\terr := b.writeBinlogDumpGTIDCommand(Position{\"\", 4}, gset.Encode())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/to do later\n\ts := newBinlogStreamer()\n\n\tb.wg.Add(1)\n\tgo b.onStream(s)\n\n\treturn s, nil\n}\n\nfunc (b *BinlogSyncer) writeBinglogDumpCommand(p Position) error {\n\t\/\/always start from position 4\n\tif p.Pos < 4 {\n\t\tp.Pos = 4\n\t}\n\n\tb.c.ResetSequence()\n\n\tdata := make([]byte, 4+1+4+2+4+len(p.Name))\n\n\tpos := 4\n\tdata[pos] = COM_BINLOG_DUMP\n\tpos++\n\n\tbinary.LittleEndian.PutUint32(data[pos:], p.Pos)\n\tpos += 4\n\n\tbinary.LittleEndian.PutUint16(data[pos:], BINLOG_DUMP_NEVER_STOP)\n\tpos += 2\n\n\tbinary.LittleEndian.PutUint32(data[pos:], b.serverID)\n\tpos += 4\n\n\tcopy(data[pos:], p.Name)\n\n\treturn b.c.WritePacket(data)\n}\n\nfunc (b *BinlogSyncer) writeBinlogDumpGTIDCommand(p Position, gtidData []byte) error {\n\tb.c.ResetSequence()\n\n\tdata := make([]byte, 4+1+2+4+4+len(p.Name)+8+4+len(gtidData))\n\tpos := 4\n\tdata[pos] = COM_BINLOG_DUMP_GTID\n\tpos++\n\n\tbinary.LittleEndian.PutUint16(data[pos:], 0)\n\tpos += 2\n\n\tbinary.LittleEndian.PutUint32(data[pos:], b.serverID)\n\tpos += 4\n\n\tbinary.LittleEndian.PutUint32(data[pos:], uint32(len(p.Name)))\n\tpos += 4\n\n\tn := copy(data[pos:], p.Name)\n\tpos += n\n\n\tbinary.LittleEndian.PutUint64(data[pos:], uint64(p.Pos))\n\tpos += 8\n\n\tbinary.LittleEndian.PutUint32(data[pos:], uint32(len(gtidData)))\n\tpos += 4\n\tn = copy(data[pos:], gtidData)\n\tpos += n\n\n\tdata = data[0:pos]\n\n\treturn b.c.WritePacket(data)\n}\n\nfunc (b *BinlogSyncer) writeRegisterSlaveCommand() error {\n\tb.c.ResetSequence()\n\n\tdata := make([]byte, 4+1+4+1+len(b.host)+1+len(b.user)+1+len(b.password)+2+4+4)\n\tpos := 4\n\n\tdata[pos] = COM_REGISTER_SLAVE\n\tpos++\n\n\tbinary.LittleEndian.PutUint32(data[pos:], b.serverID)\n\tpos += 4\n\n\tdata[pos] = uint8(len(b.host))\n\tpos++\n\tn := copy(data[pos:], b.host)\n\tpos += n\n\n\tdata[pos] = uint8(len(b.user))\n\tpos++\n\tn = copy(data[pos:], b.user)\n\tpos += n\n\n\tdata[pos] = uint8(len(b.password))\n\tpos++\n\tn = copy(data[pos:], b.password)\n\tpos += n\n\n\tbinary.LittleEndian.PutUint16(data[pos:], b.port)\n\tpos += 2\n\n\t\/\/replication rank, not used\n\tbinary.LittleEndian.PutUint32(data[pos:], 0)\n\tpos += 4\n\n\tbinary.LittleEndian.PutUint32(data[pos:], b.masterID)\n\n\treturn b.c.WritePacket(data)\n}\n\nfunc (b *BinlogSyncer) onStream(s *BinlogStreamer) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\ts.ech <- fmt.Errorf(\"Err: %v\\n Stack: %s\", e, Pstack())\n\t\t}\n\t\tb.wg.Done()\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase <-b.quit:\n\t\t\ts.ech <- ErrSyncClosed\n\t\t\treturn\n\t\tdefault:\n\t\t\tdata, err := b.c.ReadPacket()\n\t\t\tif err != nil {\n\t\t\t\ts.ech <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tswitch data[0] {\n\t\t\tcase OK_HEADER:\n\t\t\t\tif err = b.parseEvent(s, data); err != nil {\n\t\t\t\t\ts.ech <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase ERR_HEADER:\n\t\t\t\terr = b.c.HandleErrorPacket(data)\n\t\t\t\ts.ech <- err\n\t\t\tcase EOF_HEADER:\n\t\t\t\t\/\/no binlog now, sleep and wait a moment again\n\t\t\t\ttime.Sleep(500 * time.Millisecond)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (b *BinlogSyncer) parseEvent(s *BinlogStreamer, data []byte) error {\n\t\/\/skip 0x00\n\tdata = data[1:]\n\n\th := new(EventHeader)\n\terr := h.Decode(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdata = data[EventHeaderSize:]\n\teventLen := int(h.EventSize) - EventHeaderSize\n\n\tif len(data) < eventLen {\n\t\treturn fmt.Errorf(\"invalid data size %d in event %s, less event length %d\", len(data), h.EventType, eventLen)\n\t}\n\n\tif b.useChecksum {\n\t\t\/\/last 4 bytes is crc32, check later\n\t\tdata = data[0 : len(data)-4]\n\t}\n\n\tevData := data\n\n\tvar e Event\n\tswitch h.EventType {\n\tcase FORMAT_DESCRIPTION_EVENT:\n\t\tb.format = &FormatDescriptionEvent{}\n\t\te = b.format\n\tcase ROTATE_EVENT:\n\t\te = &RotateEvent{}\n\tcase QUERY_EVENT:\n\t\te = &QueryEvent{}\n\tcase XID_EVENT:\n\t\te = &XIDEvent{}\n\tcase TABLE_MAP_EVENT:\n\t\tte := &TableMapEvent{}\n\t\tif b.format.EventTypeHeaderLengths[TABLE_MAP_EVENT-1] == 6 {\n\t\t\tte.tableIDSize = 4\n\t\t} else {\n\t\t\tte.tableIDSize = 6\n\t\t}\n\t\te = te\n\tcase WRITE_ROWS_EVENTv0,\n\t\tUPDATE_ROWS_EVENTv0,\n\t\tDELETE_ROWS_EVENTv0,\n\t\tWRITE_ROWS_EVENTv1,\n\t\tDELETE_ROWS_EVENTv1,\n\t\tUPDATE_ROWS_EVENTv1,\n\t\tWRITE_ROWS_EVENTv2,\n\t\tUPDATE_ROWS_EVENTv2,\n\t\tDELETE_ROWS_EVENTv2:\n\t\te = b.newRowsEvent(h)\n\tcase ROWS_QUERY_EVENT:\n\t\te = &RowsQueryEvent{}\n\tcase GTID_EVENT:\n\t\te = >IDEvent{}\n\tdefault:\n\t\te = &GenericEvent{}\n\t}\n\n\tif err := e.Decode(data); err != nil {\n\t\treturn &EventError{h, err.Error(), data}\n\t}\n\n\tif te, ok := e.(*TableMapEvent); ok {\n\t\tb.tables[te.TableID] = te\n\t}\n\n\t\/\/If MySQL restart, it may use the same table id for different tables.\n\t\/\/We must clear the table map before parsing new events.\n\t\/\/We have no better way to known whether the event is before or after restart,\n\t\/\/So we have to clear the table map on every rotate event.\n\tif h.EventType == ROTATE_EVENT {\n\t\tb.tables = make(map[uint64]*TableMapEvent)\n\t}\n\n\ts.ch <- &BinlogEvent{evData, h, e}\n\n\treturn nil\n}\n\nfunc (b *BinlogSyncer) newRowsEvent(h *EventHeader) *RowsEvent {\n\te := &RowsEvent{}\n\tif b.format.EventTypeHeaderLengths[h.EventType-1] == 6 {\n\t\te.tableIDSize = 4\n\t} else {\n\t\te.tableIDSize = 6\n\t}\n\n\te.needBitmap2 = false\n\te.tables = b.tables\n\n\tswitch h.EventType {\n\tcase WRITE_ROWS_EVENTv0:\n\t\te.Version = 0\n\tcase UPDATE_ROWS_EVENTv0:\n\t\te.Version = 0\n\tcase DELETE_ROWS_EVENTv0:\n\t\te.Version = 0\n\tcase WRITE_ROWS_EVENTv1:\n\t\te.Version = 1\n\tcase DELETE_ROWS_EVENTv1:\n\t\te.Version = 1\n\tcase UPDATE_ROWS_EVENTv1:\n\t\te.Version = 1\n\t\te.needBitmap2 = true\n\tcase WRITE_ROWS_EVENTv2:\n\t\te.Version = 2\n\tcase UPDATE_ROWS_EVENTv2:\n\t\te.Version = 2\n\t\te.needBitmap2 = true\n\tcase DELETE_ROWS_EVENTv2:\n\t\te.Version = 2\n\t}\n\n\treturn e\n}\n<|endoftext|>"} {"text":"<commit_before>package controllers\n\nimport (\n\t\"regexp\"\n\t\"time\"\n\n\t\"chant\/app\/chatroom\"\n\t\"chant\/app\/models\"\n\n\t\"github.com\/revel\/revel\"\n)\n\nvar (\n\tmobile = regexp.MustCompile(\"\/Mobile|iPhone|Android|BlackBerry\/\")\n\ttimestamp = time.Now().Unix()\n)\n\n\/\/ Application ...\ntype Application struct {\n\t*revel.Controller\n}\n\n\/\/ Index handles `GET \/`\n\/\/ 1) すでにログインしてたらApp\/Indexをレンダリングする.\n\/\/ 2) ログインしていない場合、App\/Loginにリダイレクトする.\nfunc (c Application) Index(roomID, password string) revel.Result {\n\tif _, ok := c.Session[\"screen_name\"]; ok {\n\t\tuser, err := models.RestoreUserFromJSON(c.Session[\"user_raw\"])\n\t\tif err != nil {\n\t\t\t\/\/ とりあえず\n\t\t\treturn c.Redirect(\"\/login\")\n\t\t}\n\n\t\tif len(roomID) == 0 {\n\t\t\troomID = \"default\"\n\t\t}\n\t\troom := chatroom.GetRoomByPassword(roomID, password)\n\n\t\tConfig := ServerConfig{\n\t\t\tMyself: user,\n\t\t\tServer: map[string]interface{}{\n\t\t\t\t\"host\": getHost(),\n\t\t\t},\n\t\t\tAgent: map[string]interface{}{\n\t\t\t\t\"is_mobile\": mobile.MatchString(c.Request.UserAgent()),\n\t\t\t},\n\t\t\tEmojis: emojis,\n\t\t\tRoom: map[string]interface{}{\n\t\t\t\t\"name\": room.Name,\n\t\t\t\t\"token\": room.Token,\n\t\t\t},\n\t\t\tAPIs: map[string]interface{}{\n\t\t\t\t\"googlemaps\": revel.Config.StringDefault(\"googlemaps.token\", \"\"),\n\t\t\t},\n\t\t}\n\t\treturn c.Render(Config, timestamp)\n\t\t\/\/return c.Redirect(Room.Index)\n\t}\n\treturn c.Redirect(\"\/login\")\n}\n\n\/\/ Login handles `GET \/login`\n\/\/ Twitterログイン用の入り口Viewをレンダリングするだけ.\nfunc (c Application) Login() revel.Result {\n\treturn c.Render()\n}\n\n\/\/ Logout handles `GET \/logout`\nfunc (c Application) Logout() revel.Result {\n\tc.Session = revel.Session{}\n\treturn c.Redirect(\"\/login\")\n}\n\n\/\/ ServerConfig サーバサイドで取得したエニシングを\n\/\/ クライアントに埋め込みたいときにつかうサムシング.\ntype ServerConfig struct {\n\tMyself interface{} `json:\"myself\"`\n\tServer interface{} `json:\"server\"`\n\tAgent interface{} `json:\"agent\"`\n\tEmojis interface{} `json:\"emojis\"`\n\tRoom interface{} `json:\"room\"`\n\tAPIs interface{} `json:\"apis\"`\n}\n\nfunc getHost() string {\n\thost, _ := revel.Config.String(\"http.host\")\n\tport, _ := revel.Config.String(\"http.port\")\n\tif port != \"\" {\n\t\tport = \":\" + port\n\t}\n\treturn host + port\n}\n<commit_msg>Add tmp-imple of white\/black list<commit_after>package controllers\n\nimport (\n\t\"regexp\"\n\t\"time\"\n\n\t\"chant\/app\/chatroom\"\n\t\"chant\/app\/models\"\n\n\t\"github.com\/revel\/revel\"\n)\n\nvar (\n\tmobile = regexp.MustCompile(\"\/Mobile|iPhone|Android|BlackBerry\/\")\n\ttimestamp = time.Now().Unix()\n)\n\n\/\/ Application ...\ntype Application struct {\n\t*revel.Controller\n}\n\n\/\/ Index handles `GET \/`\n\/\/ 1) すでにログインしてたらApp\/Indexをレンダリングする.\n\/\/ 2) ログインしていない場合、App\/Loginにリダイレクトする.\nfunc (c Application) Index(roomID, password string) revel.Result {\n\tif _, ok := c.Session[\"screen_name\"]; ok {\n\n\t\tif !allowed(c.Session[\"screen_name\"]) {\n\t\t\treturn c.Redirect(\"\/denied\")\n\t\t}\n\n\t\tuser, err := models.RestoreUserFromJSON(c.Session[\"user_raw\"])\n\t\tif err != nil {\n\t\t\t\/\/ とりあえず\n\t\t\treturn c.Redirect(\"\/login\")\n\t\t}\n\n\t\tif len(roomID) == 0 {\n\t\t\troomID = \"default\"\n\t\t}\n\t\troom := chatroom.GetRoomByPassword(roomID, password)\n\n\t\tConfig := ServerConfig{\n\t\t\tMyself: user,\n\t\t\tServer: map[string]interface{}{\n\t\t\t\t\"host\": getHost(),\n\t\t\t},\n\t\t\tAgent: map[string]interface{}{\n\t\t\t\t\"is_mobile\": mobile.MatchString(c.Request.UserAgent()),\n\t\t\t},\n\t\t\tEmojis: emojis,\n\t\t\tRoom: map[string]interface{}{\n\t\t\t\t\"name\": room.Name,\n\t\t\t\t\"token\": room.Token,\n\t\t\t},\n\t\t\tAPIs: map[string]interface{}{\n\t\t\t\t\"googlemaps\": revel.Config.StringDefault(\"googlemaps.token\", \"\"),\n\t\t\t},\n\t\t}\n\t\treturn c.Render(Config, timestamp)\n\t\t\/\/return c.Redirect(Room.Index)\n\t}\n\treturn c.Redirect(\"\/login\")\n}\n\n\/\/ Login handles `GET \/login`\n\/\/ Twitterログイン用の入り口Viewをレンダリングするだけ.\nfunc (c Application) Login() revel.Result {\n\treturn c.Render()\n}\n\n\/\/ Logout handles `GET \/logout`\nfunc (c Application) Logout() revel.Result {\n\tc.Session = revel.Session{}\n\treturn c.Redirect(\"\/login\")\n}\n\n\/\/ ServerConfig サーバサイドで取得したエニシングを\n\/\/ クライアントに埋め込みたいときにつかうサムシング.\ntype ServerConfig struct {\n\tMyself interface{} `json:\"myself\"`\n\tServer interface{} `json:\"server\"`\n\tAgent interface{} `json:\"agent\"`\n\tEmojis interface{} `json:\"emojis\"`\n\tRoom interface{} `json:\"room\"`\n\tAPIs interface{} `json:\"apis\"`\n}\n\nfunc getHost() string {\n\thost, _ := revel.Config.String(\"http.host\")\n\tport, _ := revel.Config.String(\"http.port\")\n\tif port != \"\" {\n\t\tport = \":\" + port\n\t}\n\treturn host + port\n}\n\n\/\/ とりあえず\nfunc allowed(name string) bool {\n\t\/\/ とりあえず\n\tif blacklist(name) {\n\t\treturn false\n\t}\n\tif whitelist(name) {\n\t\treturn true\n\t}\n\t\/\/ とりあえず\n\treturn true\n}\n\n\/\/ とりあえず\nfunc whitelist(name string) bool {\n\treturn true\n}\n\n\/\/ とりあえず\nfunc blacklist(name string) bool {\n\tswitch name {\n\tcase \"excel0679\":\n\t\treturn true\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build !standalone,!containerd standalone,containerd\n\npackage main\n\nimport (\n\t\"errors\"\n\n\t\"github.com\/moby\/buildkit\/control\"\n\t\"github.com\/urfave\/cli\"\n)\n\nfunc appendFlags(f []cli.Flag) []cli.Flag {\n\treturn f\n}\n\nfunc newController(c *cli.Context, root string) (*control.Controller, error) {\n\treturn nil, errors.New(\"invalid build\")\n}\n<commit_msg>buildd: better error msg for unsupported builds<commit_after>\/\/ +build !standalone,!containerd standalone,containerd\n\npackage main\n\nimport (\n\t\"errors\"\n\n\t\"github.com\/moby\/buildkit\/control\"\n\t\"github.com\/urfave\/cli\"\n)\n\nfunc appendFlags(f []cli.Flag) []cli.Flag {\n\treturn f\n}\n\nfunc newController(c *cli.Context, root string) (*control.Controller, error) {\n\treturn nil, errors.New(\"need to build daemon with either standalone or containerd build tag\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2022 Google LLC. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage patch\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/apigee\/registry\/connection\"\n\t\"github.com\/apigee\/registry\/gapic\"\n\t\"github.com\/apigee\/registry\/rpc\"\n\t\"github.com\/apigee\/registry\/server\/registry\/names\"\n\t\"google.golang.org\/protobuf\/encoding\/protojson\"\n\t\"google.golang.org\/protobuf\/proto\"\n\t\"gopkg.in\/yaml.v3\"\n)\n\ntype Artifact struct {\n\tHeader `yaml:\",inline\"`\n\tData yaml.Node `yaml:\"data\"`\n}\n\n\/\/ ExportArtifact allows an artifact to be individually exported as a YAML file.\nfunc ExportArtifact(ctx context.Context, client *gapic.RegistryClient, message *rpc.Artifact) ([]byte, *Header, error) {\n\tif message.Contents == nil {\n\t\treq := &rpc.GetArtifactContentsRequest{\n\t\t\tName: message.Name,\n\t\t}\n\t\tbody, err := client.GetArtifactContents(ctx, req)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tmessage.Contents = body.Data\n\t}\n\tartifact, err := newArtifact(message)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tvar b bytes.Buffer\n\terr = yamlEncoder(&b).Encode(artifact)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn b.Bytes(), &artifact.Header, nil\n}\n\n\/\/ styleForYAML sets the style field on a tree of yaml.Nodes for YAML export.\nfunc styleForYAML(node *yaml.Node) {\n\tnode.Style = 0\n\tfor _, n := range node.Content {\n\t\tstyleForYAML(n)\n\t}\n}\n\n\/\/ styleForYAML sets the style field on a tree of yaml.Nodes for JSON export.\nfunc styleForJSON(node *yaml.Node) {\n\tswitch node.Kind {\n\tcase yaml.DocumentNode, yaml.SequenceNode, yaml.MappingNode:\n\t\tnode.Style = yaml.FlowStyle\n\tcase yaml.ScalarNode:\n\t\tswitch node.Tag {\n\t\tcase \"!!str\":\n\t\t\tnode.Style = yaml.DoubleQuotedStyle\n\t\tdefault:\n\t\t\tnode.Style = 0\n\t\t}\n\tcase yaml.AliasNode:\n\tdefault:\n\t}\n\tfor _, n := range node.Content {\n\t\tstyleForJSON(n)\n\t}\n}\n\nfunc removeIdAndKind(node *yaml.Node) *yaml.Node {\n\tif node.Kind == yaml.MappingNode {\n\t\tcontent := make([]*yaml.Node, 0)\n\t\tfor i := 0; i < len(node.Content); i += 2 {\n\t\t\tk := node.Content[i]\n\t\t\tif k.Value != \"id\" && k.Value != \"kind\" {\n\t\t\t\tcontent = append(content, node.Content[i])\n\t\t\t\tcontent = append(content, node.Content[i+1])\n\t\t\t}\n\t\t}\n\t\tnode.Content = content\n\t}\n\treturn node\n}\n\nfunc newArtifact(message *rpc.Artifact) (*Artifact, error) {\n\tartifactName, err := names.ParseArtifact(message.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Unmarshal the serialized protobuf containing the artifact content.\n\tm, err := protoMessageForMimeType(message.MimeType)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = proto.Unmarshal(message.Contents, m); err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Marshal the artifact content as JSON using the protobuf marshaller.\n\tvar s []byte\n\ts, err = protojson.MarshalOptions{\n\t\tUseEnumNumbers: false,\n\t\tEmitUnpopulated: true,\n\t\tIndent: \" \",\n\t\tUseProtoNames: false,\n\t}.Marshal(m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Unmarshal the JSON with yaml.v3 so that we can re-marshal it as YAML.\n\tvar doc yaml.Node\n\terr = yaml.Unmarshal([]byte(s), &doc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ The top-level node is a \"document\" node. We need to remove this before marshalling.\n\tif doc.Kind != yaml.DocumentNode || len(doc.Content) != 1 {\n\t\treturn nil, errors.New(\"failed to unmarshal artifact\")\n\t}\n\tnode := doc.Content[0]\n\t\/\/ Restyle the YAML representation so that it will be serialized with YAML defaults.\n\tstyleForYAML(node)\n\t\/\/ We exclude the id and kind fields from YAML serializations.\n\tnode = removeIdAndKind(node)\n\t\/\/ Wrap the artifact for YAML export.\n\treturn &Artifact{\n\t\tHeader: Header{\n\t\t\tApiVersion: RegistryV1,\n\t\t\tKind: kindForMimeType(message.MimeType),\n\t\t\tMetadata: Metadata{\n\t\t\t\tName: artifactName.ArtifactID(),\n\t\t\t},\n\t\t},\n\t\tData: *node,\n\t}, nil\n}\n\nfunc applyArtifactPatchBytes(ctx context.Context, client connection.Client, bytes []byte, parent string) error {\n\tvar artifact Artifact\n\terr := yaml.Unmarshal(bytes, &artifact)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn applyArtifactPatch(ctx, client, &artifact, parent)\n}\n\nfunc applyArtifactPatch(ctx context.Context, client connection.Client, content *Artifact, parent string) error {\n\t\/\/ Restyle the YAML representation so that yaml.Marshal will marshal it as JSON.\n\tstyleForJSON(&content.Data)\n\t\/\/ Marshal the YAML representation into the JSON serialization.\n\tj, err := yaml.Marshal(content.Data)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Unmarshal the JSON serialization into the message struct.\n\tvar m proto.Message\n\tm, err = protoMessageForKind(content.Kind)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = protojson.Unmarshal(j, m)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Marshal the message struct to bytes.\n\tbytes, err := proto.Marshal(m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tartifact := &rpc.Artifact{\n\t\tName: fmt.Sprintf(\"%s\/artifacts\/%s\", parent, content.Header.Metadata.Name),\n\t\tMimeType: MimeTypeForKind(content.Kind),\n\t\tContents: bytes,\n\t}\n\treq := &rpc.CreateArtifactRequest{\n\t\tParent: parent,\n\t\tArtifactId: content.Header.Metadata.Name,\n\t\tArtifact: artifact,\n\t}\n\t_, err = client.CreateArtifact(ctx, req)\n\tif err != nil {\n\t\treq := &rpc.ReplaceArtifactRequest{\n\t\t\tArtifact: artifact,\n\t\t}\n\t\t_, err = client.ReplaceArtifact(ctx, req)\n\t}\n\treturn err\n}\n\n\/\/ kindForMimeType returns the message name to be used as the \"kind\" of the artifact.\nfunc kindForMimeType(mimeType string) string {\n\tparts := strings.Split(mimeType, \".\")\n\treturn parts[len(parts)-1]\n}\n\n\/\/ protoMessageForMimeType returns an instance of the message that represents the specified type.\nfunc protoMessageForMimeType(mimeType string) (proto.Message, error) {\n\tmessageType := strings.TrimPrefix(mimeType, \"application\/octet-stream;type=\")\n\tfor k, v := range artifactMessageTypes {\n\t\tif k == messageType {\n\t\t\treturn v(), nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"unsupported message type %s\", messageType)\n}\n\n\/\/ protoMessageForMimeType returns an instance of the message that represents the specified kind.\nfunc protoMessageForKind(kind string) (proto.Message, error) {\n\tfor k, v := range artifactMessageTypes {\n\t\tif strings.HasSuffix(k, \".\"+kind) {\n\t\t\treturn v(), nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"unsupported kind %s\", kind)\n}\n\n\/\/ MimeTypeForKind returns the mime type that corresponds to a kind.\nfunc MimeTypeForKind(kind string) string {\n\tfor k := range artifactMessageTypes {\n\t\tif strings.HasSuffix(k, \".\"+kind) {\n\t\t\treturn fmt.Sprintf(\"application\/octet-stream;type=%s\", k)\n\t\t}\n\t}\n\treturn \"application\/octet-stream\"\n}\n\n\/\/ messageFactory represents functions that construct message structs.\ntype messageFactory func() proto.Message\n\n\/\/ artifactMessageTypes is the single source of truth for artifact types that can be represented in YAML.\nvar artifactMessageTypes map[string]messageFactory = map[string]messageFactory{\n\t\"google.cloud.apigeeregistry.applications.v1alpha1.StyleGuide\": func() proto.Message { return new(rpc.StyleGuide) },\n\t\"google.cloud.apigeeregistry.v1.apihub.ApiSpecExtensionList\": func() proto.Message { return new(rpc.ApiSpecExtensionList) },\n\t\"google.cloud.apigeeregistry.v1.apihub.DisplaySettings\": func() proto.Message { return new(rpc.DisplaySettings) },\n\t\"google.cloud.apigeeregistry.v1.apihub.Lifecycle\": func() proto.Message { return new(rpc.Lifecycle) },\n\t\"google.cloud.apigeeregistry.v1.apihub.ReferenceList\": func() proto.Message { return new(rpc.ReferenceList) },\n\t\"google.cloud.apigeeregistry.v1.apihub.TaxonomyList\": func() proto.Message { return new(rpc.TaxonomyList) },\n\t\"google.cloud.apigeeregistry.v1.controller.Manifest\": func() proto.Message { return new(rpc.Manifest) },\n\t\"google.cloud.apigeeregistry.v1.scoring.Score\": func() proto.Message { return new(rpc.Score) },\n\t\"google.cloud.apigeeregistry.v1.scoring.ScoreDefinition\": func() proto.Message { return new(rpc.ScoreDefinition) },\n\t\"google.cloud.apigeeregistry.v1.scoring.ScoreCard\": func() proto.Message { return new(rpc.ScoreCard) },\n\t\"google.cloud.apigeeregistry.v1.scoring.ScoreCardDefinition\": func() proto.Message { return new(rpc.ScoreCardDefinition) },\n}\n<commit_msg>Fix for missing Id in artifacts through `apply` command (#597)<commit_after>\/\/ Copyright 2022 Google LLC. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage patch\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/apigee\/registry\/connection\"\n\t\"github.com\/apigee\/registry\/gapic\"\n\t\"github.com\/apigee\/registry\/rpc\"\n\t\"github.com\/apigee\/registry\/server\/registry\/names\"\n\t\"google.golang.org\/protobuf\/encoding\/protojson\"\n\t\"google.golang.org\/protobuf\/proto\"\n\t\"gopkg.in\/yaml.v3\"\n)\n\ntype Artifact struct {\n\tHeader `yaml:\",inline\"`\n\tData yaml.Node `yaml:\"data\"`\n}\n\n\/\/ ExportArtifact allows an artifact to be individually exported as a YAML file.\nfunc ExportArtifact(ctx context.Context, client *gapic.RegistryClient, message *rpc.Artifact) ([]byte, *Header, error) {\n\tif message.Contents == nil {\n\t\treq := &rpc.GetArtifactContentsRequest{\n\t\t\tName: message.Name,\n\t\t}\n\t\tbody, err := client.GetArtifactContents(ctx, req)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tmessage.Contents = body.Data\n\t}\n\tartifact, err := newArtifact(message)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tvar b bytes.Buffer\n\terr = yamlEncoder(&b).Encode(artifact)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn b.Bytes(), &artifact.Header, nil\n}\n\n\/\/ styleForYAML sets the style field on a tree of yaml.Nodes for YAML export.\nfunc styleForYAML(node *yaml.Node) {\n\tnode.Style = 0\n\tfor _, n := range node.Content {\n\t\tstyleForYAML(n)\n\t}\n}\n\n\/\/ styleForYAML sets the style field on a tree of yaml.Nodes for JSON export.\nfunc styleForJSON(node *yaml.Node) {\n\tswitch node.Kind {\n\tcase yaml.DocumentNode, yaml.SequenceNode, yaml.MappingNode:\n\t\tnode.Style = yaml.FlowStyle\n\tcase yaml.ScalarNode:\n\t\tswitch node.Tag {\n\t\tcase \"!!str\":\n\t\t\tnode.Style = yaml.DoubleQuotedStyle\n\t\tdefault:\n\t\t\tnode.Style = 0\n\t\t}\n\tcase yaml.AliasNode:\n\tdefault:\n\t}\n\tfor _, n := range node.Content {\n\t\tstyleForJSON(n)\n\t}\n}\n\nfunc removeIdAndKind(node *yaml.Node) *yaml.Node {\n\tif node.Kind == yaml.MappingNode {\n\t\tcontent := make([]*yaml.Node, 0)\n\t\tfor i := 0; i < len(node.Content); i += 2 {\n\t\t\tk := node.Content[i]\n\t\t\tif k.Value != \"id\" && k.Value != \"kind\" {\n\t\t\t\tcontent = append(content, node.Content[i])\n\t\t\t\tcontent = append(content, node.Content[i+1])\n\t\t\t}\n\t\t}\n\t\tnode.Content = content\n\t}\n\treturn node\n}\n\nfunc newArtifact(message *rpc.Artifact) (*Artifact, error) {\n\tartifactName, err := names.ParseArtifact(message.Name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Unmarshal the serialized protobuf containing the artifact content.\n\tm, err := protoMessageForMimeType(message.MimeType)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = proto.Unmarshal(message.Contents, m); err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Marshal the artifact content as JSON using the protobuf marshaller.\n\tvar s []byte\n\ts, err = protojson.MarshalOptions{\n\t\tUseEnumNumbers: false,\n\t\tEmitUnpopulated: true,\n\t\tIndent: \" \",\n\t\tUseProtoNames: false,\n\t}.Marshal(m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ Unmarshal the JSON with yaml.v3 so that we can re-marshal it as YAML.\n\tvar doc yaml.Node\n\terr = yaml.Unmarshal([]byte(s), &doc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ The top-level node is a \"document\" node. We need to remove this before marshalling.\n\tif doc.Kind != yaml.DocumentNode || len(doc.Content) != 1 {\n\t\treturn nil, errors.New(\"failed to unmarshal artifact\")\n\t}\n\tnode := doc.Content[0]\n\t\/\/ Restyle the YAML representation so that it will be serialized with YAML defaults.\n\tstyleForYAML(node)\n\t\/\/ We exclude the id and kind fields from YAML serializations.\n\tnode = removeIdAndKind(node)\n\t\/\/ Wrap the artifact for YAML export.\n\treturn &Artifact{\n\t\tHeader: Header{\n\t\t\tApiVersion: RegistryV1,\n\t\t\tKind: kindForMimeType(message.MimeType),\n\t\t\tMetadata: Metadata{\n\t\t\t\tName: artifactName.ArtifactID(),\n\t\t\t},\n\t\t},\n\t\tData: *node,\n\t}, nil\n}\n\nfunc applyArtifactPatchBytes(ctx context.Context, client connection.Client, bytes []byte, parent string) error {\n\tvar artifact Artifact\n\terr := yaml.Unmarshal(bytes, &artifact)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn applyArtifactPatch(ctx, client, &artifact, parent)\n}\n\nfunc applyArtifactPatch(ctx context.Context, client connection.Client, content *Artifact, parent string) error {\n\t\/\/ Restyle the YAML representation so that yaml.Marshal will marshal it as JSON.\n\tstyleForJSON(&content.Data)\n\t\/\/ Marshal the YAML representation into the JSON serialization.\n\tj, err := yaml.Marshal(content.Data)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Populate Id and Kind fields in the contents of the artifact\n\tj, err = populateIdAndKind(j, content.Kind, content.Metadata.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Unmarshal the JSON serialization into the message struct.\n\tvar m proto.Message\n\tm, err = protoMessageForKind(content.Kind)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = protojson.Unmarshal(j, m)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ Marshal the message struct to bytes.\n\tbytes, err := proto.Marshal(m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tartifact := &rpc.Artifact{\n\t\tName: fmt.Sprintf(\"%s\/artifacts\/%s\", parent, content.Header.Metadata.Name),\n\t\tMimeType: MimeTypeForKind(content.Kind),\n\t\tContents: bytes,\n\t}\n\treq := &rpc.CreateArtifactRequest{\n\t\tParent: parent,\n\t\tArtifactId: content.Header.Metadata.Name,\n\t\tArtifact: artifact,\n\t}\n\t_, err = client.CreateArtifact(ctx, req)\n\tif err != nil {\n\t\treq := &rpc.ReplaceArtifactRequest{\n\t\t\tArtifact: artifact,\n\t\t}\n\t\t_, err = client.ReplaceArtifact(ctx, req)\n\t}\n\treturn err\n}\n\n\/\/ populateIdAndKind inserts the \"id\" and \"kind\" fields in the supplied json bytes.\nfunc populateIdAndKind(bytes []byte, kind, id string) ([]byte, error) {\n\tvar jsonData map[string]interface{}\n\terr := json.Unmarshal(bytes, &jsonData)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tjsonData[\"id\"] = id\n\tjsonData[\"kind\"] = kind\n\n\trBytes, err := json.Marshal(jsonData)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn rBytes, nil\n}\n\n\/\/ kindForMimeType returns the message name to be used as the \"kind\" of the artifact.\nfunc kindForMimeType(mimeType string) string {\n\tparts := strings.Split(mimeType, \".\")\n\treturn parts[len(parts)-1]\n}\n\n\/\/ protoMessageForMimeType returns an instance of the message that represents the specified type.\nfunc protoMessageForMimeType(mimeType string) (proto.Message, error) {\n\tmessageType := strings.TrimPrefix(mimeType, \"application\/octet-stream;type=\")\n\tfor k, v := range artifactMessageTypes {\n\t\tif k == messageType {\n\t\t\treturn v(), nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"unsupported message type %s\", messageType)\n}\n\n\/\/ protoMessageForMimeType returns an instance of the message that represents the specified kind.\nfunc protoMessageForKind(kind string) (proto.Message, error) {\n\tfor k, v := range artifactMessageTypes {\n\t\tif strings.HasSuffix(k, \".\"+kind) {\n\t\t\treturn v(), nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"unsupported kind %s\", kind)\n}\n\n\/\/ MimeTypeForKind returns the mime type that corresponds to a kind.\nfunc MimeTypeForKind(kind string) string {\n\tfor k := range artifactMessageTypes {\n\t\tif strings.HasSuffix(k, \".\"+kind) {\n\t\t\treturn fmt.Sprintf(\"application\/octet-stream;type=%s\", k)\n\t\t}\n\t}\n\treturn \"application\/octet-stream\"\n}\n\n\/\/ messageFactory represents functions that construct message structs.\ntype messageFactory func() proto.Message\n\n\/\/ artifactMessageTypes is the single source of truth for artifact types that can be represented in YAML.\nvar artifactMessageTypes map[string]messageFactory = map[string]messageFactory{\n\t\"google.cloud.apigeeregistry.applications.v1alpha1.StyleGuide\": func() proto.Message { return new(rpc.StyleGuide) },\n\t\"google.cloud.apigeeregistry.v1.apihub.ApiSpecExtensionList\": func() proto.Message { return new(rpc.ApiSpecExtensionList) },\n\t\"google.cloud.apigeeregistry.v1.apihub.DisplaySettings\": func() proto.Message { return new(rpc.DisplaySettings) },\n\t\"google.cloud.apigeeregistry.v1.apihub.Lifecycle\": func() proto.Message { return new(rpc.Lifecycle) },\n\t\"google.cloud.apigeeregistry.v1.apihub.ReferenceList\": func() proto.Message { return new(rpc.ReferenceList) },\n\t\"google.cloud.apigeeregistry.v1.apihub.TaxonomyList\": func() proto.Message { return new(rpc.TaxonomyList) },\n\t\"google.cloud.apigeeregistry.v1.controller.Manifest\": func() proto.Message { return new(rpc.Manifest) },\n\t\"google.cloud.apigeeregistry.v1.scoring.Score\": func() proto.Message { return new(rpc.Score) },\n\t\"google.cloud.apigeeregistry.v1.scoring.ScoreDefinition\": func() proto.Message { return new(rpc.ScoreDefinition) },\n\t\"google.cloud.apigeeregistry.v1.scoring.ScoreCard\": func() proto.Message { return new(rpc.ScoreCard) },\n\t\"google.cloud.apigeeregistry.v1.scoring.ScoreCardDefinition\": func() proto.Message { return new(rpc.ScoreCardDefinition) },\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ bridge bridges between IRC clients (RFC1459) and RobustIRC servers.\n\/\/\n\/\/ Bridge instances are supposed to be long-running, and ideally as close to the\n\/\/ IRC client as possible, e.g. on the same machine. When running on the same\n\/\/ machine, there should not be any network problems between the IRC client and\n\/\/ the bridge. Network problems between the bridge and a RobustIRC network are\n\/\/ handled transparently.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"time\"\n\n\t\"github.com\/robustirc\/robustirc\/cmd\/robustirc-bridge\/robustsession\"\n\n\t\"github.com\/sorcix\/irc\"\n)\n\nvar (\n\tnetwork = flag.String(\"network\",\n\t\t\"\",\n\t\t`DNS name to connect to (e.g. \"robustirc.net\"). The _robustirc._tcp SRV record must be present.`)\n\n\tlisten = flag.String(\"listen\",\n\t\t\"localhost:6667\",\n\t\t\"host:port to listen on for IRC connections\")\n\n\tsocks = flag.String(\"socks\", \"\", \"host:port to listen on for SOCKS5 connections\")\n\n\ttlsCAFile = flag.String(\"tls_ca_file\",\n\t\t\"\",\n\t\t\"Use the specified file as trusted CA instead of the system CAs. Useful for testing.\")\n)\n\n\/\/ TODO(secure): persistent state:\n\/\/ - the last known server(s) in the network. added to *servers\n\/\/ - for resuming sessions (later): the last seen message id, perhaps setup messages (JOINs, MODEs, …)\n\/\/ for hosted mode, this state is stored per-nickname, ideally encrypted with password\n\ntype bridge struct {\n\tnetwork string\n}\n\nfunc newBridge(network string) *bridge {\n\treturn &bridge{\n\t\tnetwork: network,\n\t}\n}\n\ntype ircsession struct {\n\tMessages chan irc.Message\n\tErrors chan error\n\n\tconn *irc.Conn\n}\n\nfunc newIrcsession(conn net.Conn) *ircsession {\n\ts := &ircsession{\n\t\tMessages: make(chan irc.Message),\n\t\tErrors: make(chan error),\n\t\tconn: irc.NewConn(conn),\n\t}\n\tgo s.getMessages()\n\treturn s\n}\n\nfunc (s *ircsession) Send(msg []byte) error {\n\tif _, err := s.conn.Write(msg); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (s *ircsession) Delete(killmsg string) error {\n\tdefer s.conn.Close()\n\n\tif killmsg != \"\" {\n\t\treturn s.conn.Encode(&irc.Message{\n\t\t\tCommand: \"ERROR\",\n\t\t\tTrailing: killmsg,\n\t\t})\n\t}\n\n\treturn nil\n}\n\nfunc (s *ircsession) getMessages() {\n\tfor {\n\t\tircmsg, err := s.conn.Decode()\n\t\tif err != nil {\n\t\t\ts.Errors <- err\n\t\t\treturn\n\t\t}\n\t\t\/\/ Skip invalid lines (to prevent nil pointer dereferences).\n\t\tif ircmsg == nil {\n\t\t\tcontinue\n\t\t}\n\t\tlog.Printf(\"<-irc: %q\\n\", ircmsg.Bytes())\n\t\ts.Messages <- *ircmsg\n\t}\n}\n\nfunc (p *bridge) handleIRC(conn net.Conn) {\n\tvar quitmsg, killmsg string\n\tvar waitingForPingReply bool\n\n\tircSession := newIrcsession(conn)\n\n\tdefer func() {\n\t\tif err := ircSession.Delete(killmsg); err != nil {\n\t\t\tlog.Printf(\"Could not properly delete IRC session: %v\\n\", err)\n\t\t}\n\t}()\n\n\trobustSession, err := robustsession.Create(p.network, *tlsCAFile)\n\tif err != nil {\n\t\tkillmsg = fmt.Sprintf(\"Could not create RobustIRC session: %v\", err)\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tlog.Printf(\"deleting robustsession…\\n\")\n\t\tif err := robustSession.Delete(quitmsg); err != nil {\n\t\t\tlog.Printf(\"Could not properly delete RobustIRC session: %v\\n\", err)\n\t\t}\n\t}()\n\n\tvar sendIRC, sendRobust []byte\n\n\tkeepaliveToNetwork := time.After(1 * time.Minute)\n\tkeepaliveToClient := time.After(1 * time.Minute)\n\tfor {\n\t\t\/\/ These two variables contain the messages to be sent to IRC\/RobustIRC\n\t\t\/\/ from the previous iteration of the state machine. That way, there is\n\t\t\/\/ only one place where the error handling happens.\n\t\tif sendIRC != nil {\n\t\t\tif err := ircSession.Send(sendIRC); err != nil {\n\t\t\t\tquitmsg = fmt.Sprintf(\"Bridge: Send to IRC client: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsendIRC = nil\n\t\t}\n\t\tif sendRobust != nil {\n\t\t\tif err := robustSession.PostMessage(string(sendRobust)); err != nil {\n\t\t\t\tkillmsg = fmt.Sprintf(\"Could not post message to RobustIRC: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tkeepaliveToNetwork = time.After(1 * time.Minute)\n\t\t\tkeepaliveToClient = time.After(1 * time.Minute)\n\t\t\tsendRobust = nil\n\t\t}\n\n\t\tselect {\n\t\tcase msg := <-robustSession.Messages:\n\t\t\tircmsg := irc.ParseMessage(msg)\n\t\t\tif ircmsg.Command == irc.PONG && len(ircmsg.Params) > 0 && ircmsg.Params[0] == \"keepalive\" {\n\t\t\t\tlog.Printf(\"Swallowing keepalive PONG from server to avoid confusing the IRC client.\\n\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tsendIRC = []byte(msg)\n\n\t\tcase err := <-robustSession.Errors:\n\t\t\tkillmsg = fmt.Sprintf(\"RobustIRC session error: %v\", err)\n\t\t\treturn\n\n\t\tcase ircmsg := <-ircSession.Messages:\n\t\t\tswitch ircmsg.Command {\n\t\t\tcase irc.PONG:\n\t\t\t\twaitingForPingReply = false\n\n\t\t\tcase irc.PING:\n\t\t\t\tsendIRC = (&irc.Message{\n\t\t\t\t\tPrefix: robustSession.IrcPrefix,\n\t\t\t\t\tCommand: irc.PONG,\n\t\t\t\t\tParams: []string{ircmsg.Params[0]},\n\t\t\t\t}).Bytes()\n\n\t\t\tcase irc.QUIT:\n\t\t\t\tquitmsg = ircmsg.Trailing\n\t\t\t\treturn\n\n\t\t\tdefault:\n\t\t\t\tsendRobust = ircmsg.Bytes()\n\t\t\t}\n\n\t\tcase err := <-ircSession.Errors:\n\t\t\tquitmsg = fmt.Sprintf(\"Bridge: Read from IRC client: %v\", err)\n\t\t\treturn\n\n\t\tcase <-keepaliveToClient:\n\t\t\t\/\/ After no traffic in either direction for 1 minute, we send a PING\n\t\t\t\/\/ message. If a PING message was already sent, this means that we did\n\t\t\t\/\/ not receive a PONG message, so we close the connection with a\n\t\t\t\/\/ timeout.\n\t\t\tif waitingForPingReply {\n\t\t\t\tquitmsg = \"Bridge: ping timeout\"\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsendIRC = (&irc.Message{\n\t\t\t\tPrefix: robustSession.IrcPrefix,\n\t\t\t\tCommand: irc.PING,\n\t\t\t\tParams: []string{\"robustirc.bridge\"},\n\t\t\t}).Bytes()\n\t\t\twaitingForPingReply = true\n\n\t\tcase <-keepaliveToNetwork:\n\t\t\tsendRobust = []byte(\"PING keepalive\")\n\t\t\tkeepaliveToNetwork = time.After(1 * time.Minute)\n\t\t}\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\trand.Seed(time.Now().Unix())\n\n\tif *network == \"\" && *socks == \"\" {\n\t\tlog.Fatal(\"You must specify either -network or -socks.\")\n\t}\n\n\t\/\/ SOCKS and IRC\n\tif *socks != \"\" && *network != \"\" {\n\t\tgo func() {\n\t\t\tif err := listenAndServeSocks(*socks); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}()\n\t}\n\n\t\/\/ SOCKS only\n\tif *socks != \"\" && *network == \"\" {\n\t\tlog.Fatal(listenAndServeSocks(*socks))\n\t}\n\n\t\/\/ IRC\n\tif *network != \"\" {\n\t\tp := newBridge(*network)\n\n\t\tln, err := net.Listen(\"tcp\", *listen)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tlog.Printf(\"RobustIRC IRC bridge listening on %q\\n\", *listen)\n\n\t\tfor {\n\t\t\tconn, err := ln.Accept()\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Could not accept IRC client connection: %v\\n\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgo p.handleIRC(conn)\n\t\t}\n\t}\n}\n<commit_msg>ircsession: close and read channels to avoid hanging goroutines<commit_after>\/\/ bridge bridges between IRC clients (RFC1459) and RobustIRC servers.\n\/\/\n\/\/ Bridge instances are supposed to be long-running, and ideally as close to the\n\/\/ IRC client as possible, e.g. on the same machine. When running on the same\n\/\/ machine, there should not be any network problems between the IRC client and\n\/\/ the bridge. Network problems between the bridge and a RobustIRC network are\n\/\/ handled transparently.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"time\"\n\n\t\"github.com\/robustirc\/robustirc\/cmd\/robustirc-bridge\/robustsession\"\n\n\t\"github.com\/sorcix\/irc\"\n)\n\nvar (\n\tnetwork = flag.String(\"network\",\n\t\t\"\",\n\t\t`DNS name to connect to (e.g. \"robustirc.net\"). The _robustirc._tcp SRV record must be present.`)\n\n\tlisten = flag.String(\"listen\",\n\t\t\"localhost:6667\",\n\t\t\"host:port to listen on for IRC connections\")\n\n\tsocks = flag.String(\"socks\", \"\", \"host:port to listen on for SOCKS5 connections\")\n\n\ttlsCAFile = flag.String(\"tls_ca_file\",\n\t\t\"\",\n\t\t\"Use the specified file as trusted CA instead of the system CAs. Useful for testing.\")\n)\n\n\/\/ TODO(secure): persistent state:\n\/\/ - the last known server(s) in the network. added to *servers\n\/\/ - for resuming sessions (later): the last seen message id, perhaps setup messages (JOINs, MODEs, …)\n\/\/ for hosted mode, this state is stored per-nickname, ideally encrypted with password\n\ntype bridge struct {\n\tnetwork string\n}\n\nfunc newBridge(network string) *bridge {\n\treturn &bridge{\n\t\tnetwork: network,\n\t}\n}\n\ntype ircsession struct {\n\tMessages chan irc.Message\n\tErrors chan error\n\n\tconn *irc.Conn\n}\n\nfunc newIrcsession(conn net.Conn) *ircsession {\n\ts := &ircsession{\n\t\tMessages: make(chan irc.Message),\n\t\tErrors: make(chan error),\n\t\tconn: irc.NewConn(conn),\n\t}\n\tgo s.getMessages()\n\treturn s\n}\n\nfunc (s *ircsession) Send(msg []byte) error {\n\tif _, err := s.conn.Write(msg); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (s *ircsession) Delete(killmsg string) error {\n\tdefer s.conn.Close()\n\t\/\/ Read all remaining values to ensure nobody is blocked on sending.\n\tdefer func() {\n\t\tgo func() {\n\t\t\tfor _ = range s.Messages {\n\t\t\t}\n\t\t}()\n\t\tgo func() {\n\t\t\tfor _ = range s.Errors {\n\t\t\t}\n\t\t}()\n\t}()\n\n\tif killmsg != \"\" {\n\t\treturn s.conn.Encode(&irc.Message{\n\t\t\tCommand: \"ERROR\",\n\t\t\tTrailing: killmsg,\n\t\t})\n\t}\n\n\treturn nil\n}\n\nfunc (s *ircsession) getMessages() {\n\tdefer close(s.Messages)\n\tdefer close(s.Errors)\n\tfor {\n\t\tircmsg, err := s.conn.Decode()\n\t\tif err != nil {\n\t\t\ts.Errors <- err\n\t\t\treturn\n\t\t}\n\t\t\/\/ Skip invalid lines (to prevent nil pointer dereferences).\n\t\tif ircmsg == nil {\n\t\t\tcontinue\n\t\t}\n\t\tlog.Printf(\"<-irc: %q\\n\", ircmsg.Bytes())\n\t\ts.Messages <- *ircmsg\n\t}\n}\n\nfunc (p *bridge) handleIRC(conn net.Conn) {\n\tvar quitmsg, killmsg string\n\tvar waitingForPingReply bool\n\n\tircSession := newIrcsession(conn)\n\n\tdefer func() {\n\t\tif err := ircSession.Delete(killmsg); err != nil {\n\t\t\tlog.Printf(\"Could not properly delete IRC session: %v\\n\", err)\n\t\t}\n\t}()\n\n\trobustSession, err := robustsession.Create(p.network, *tlsCAFile)\n\tif err != nil {\n\t\tkillmsg = fmt.Sprintf(\"Could not create RobustIRC session: %v\", err)\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\tlog.Printf(\"deleting robustsession…\\n\")\n\t\tif err := robustSession.Delete(quitmsg); err != nil {\n\t\t\tlog.Printf(\"Could not properly delete RobustIRC session: %v\\n\", err)\n\t\t}\n\t}()\n\n\tvar sendIRC, sendRobust []byte\n\n\tkeepaliveToNetwork := time.After(1 * time.Minute)\n\tkeepaliveToClient := time.After(1 * time.Minute)\n\tfor {\n\t\t\/\/ These two variables contain the messages to be sent to IRC\/RobustIRC\n\t\t\/\/ from the previous iteration of the state machine. That way, there is\n\t\t\/\/ only one place where the error handling happens.\n\t\tif sendIRC != nil {\n\t\t\tif err := ircSession.Send(sendIRC); err != nil {\n\t\t\t\tquitmsg = fmt.Sprintf(\"Bridge: Send to IRC client: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsendIRC = nil\n\t\t}\n\t\tif sendRobust != nil {\n\t\t\tif err := robustSession.PostMessage(string(sendRobust)); err != nil {\n\t\t\t\tkillmsg = fmt.Sprintf(\"Could not post message to RobustIRC: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tkeepaliveToNetwork = time.After(1 * time.Minute)\n\t\t\tkeepaliveToClient = time.After(1 * time.Minute)\n\t\t\tsendRobust = nil\n\t\t}\n\n\t\tselect {\n\t\tcase msg := <-robustSession.Messages:\n\t\t\tircmsg := irc.ParseMessage(msg)\n\t\t\tif ircmsg.Command == irc.PONG && len(ircmsg.Params) > 0 && ircmsg.Params[0] == \"keepalive\" {\n\t\t\t\tlog.Printf(\"Swallowing keepalive PONG from server to avoid confusing the IRC client.\\n\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tsendIRC = []byte(msg)\n\n\t\tcase err := <-robustSession.Errors:\n\t\t\tkillmsg = fmt.Sprintf(\"RobustIRC session error: %v\", err)\n\t\t\treturn\n\n\t\tcase ircmsg := <-ircSession.Messages:\n\t\t\tswitch ircmsg.Command {\n\t\t\tcase irc.PONG:\n\t\t\t\twaitingForPingReply = false\n\n\t\t\tcase irc.PING:\n\t\t\t\tsendIRC = (&irc.Message{\n\t\t\t\t\tPrefix: robustSession.IrcPrefix,\n\t\t\t\t\tCommand: irc.PONG,\n\t\t\t\t\tParams: []string{ircmsg.Params[0]},\n\t\t\t\t}).Bytes()\n\n\t\t\tcase irc.QUIT:\n\t\t\t\tquitmsg = ircmsg.Trailing\n\t\t\t\treturn\n\n\t\t\tdefault:\n\t\t\t\tsendRobust = ircmsg.Bytes()\n\t\t\t}\n\n\t\tcase err := <-ircSession.Errors:\n\t\t\tquitmsg = fmt.Sprintf(\"Bridge: Read from IRC client: %v\", err)\n\t\t\treturn\n\n\t\tcase <-keepaliveToClient:\n\t\t\t\/\/ After no traffic in either direction for 1 minute, we send a PING\n\t\t\t\/\/ message. If a PING message was already sent, this means that we did\n\t\t\t\/\/ not receive a PONG message, so we close the connection with a\n\t\t\t\/\/ timeout.\n\t\t\tif waitingForPingReply {\n\t\t\t\tquitmsg = \"Bridge: ping timeout\"\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsendIRC = (&irc.Message{\n\t\t\t\tPrefix: robustSession.IrcPrefix,\n\t\t\t\tCommand: irc.PING,\n\t\t\t\tParams: []string{\"robustirc.bridge\"},\n\t\t\t}).Bytes()\n\t\t\twaitingForPingReply = true\n\n\t\tcase <-keepaliveToNetwork:\n\t\t\tsendRobust = []byte(\"PING keepalive\")\n\t\t\tkeepaliveToNetwork = time.After(1 * time.Minute)\n\t\t}\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\trand.Seed(time.Now().Unix())\n\n\tif *network == \"\" && *socks == \"\" {\n\t\tlog.Fatal(\"You must specify either -network or -socks.\")\n\t}\n\n\t\/\/ SOCKS and IRC\n\tif *socks != \"\" && *network != \"\" {\n\t\tgo func() {\n\t\t\tif err := listenAndServeSocks(*socks); err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t}()\n\t}\n\n\t\/\/ SOCKS only\n\tif *socks != \"\" && *network == \"\" {\n\t\tlog.Fatal(listenAndServeSocks(*socks))\n\t}\n\n\t\/\/ IRC\n\tif *network != \"\" {\n\t\tp := newBridge(*network)\n\n\t\tln, err := net.Listen(\"tcp\", *listen)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tlog.Printf(\"RobustIRC IRC bridge listening on %q\\n\", *listen)\n\n\t\tfor {\n\t\t\tconn, err := ln.Accept()\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Could not accept IRC client connection: %v\\n\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgo p.handleIRC(conn)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/go-rtc\/stun\"\n)\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage of %s:\\n\", os.Args[0])\n\t\tfmt.Fprintln(os.Stderr, os.Args[0], \"stun.l.google.com:19302\")\n\t}\n\tflag.Parse()\n\taddr := flag.Arg(0)\n\tif len(addr) == 0 {\n\t\tfmt.Fprintln(os.Stderr, \"no address specified\")\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\tc, err := stun.Dial(\"udp\", addr)\n\tif err != nil {\n\t\tlog.Fatal(\"dial:\", err)\n\t}\n\tdeadline := time.Now().Add(time.Second * 5)\n\tif err := c.Do(stun.MustBuild(stun.TransactionID, stun.BindingRequest), deadline, func(res stun.AgentEvent) {\n\t\tif res.Error != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\t\tvar xorAddr stun.XORMappedAddress\n\t\tif err := xorAddr.GetFrom(res.Message); err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\t\tfmt.Println(xorAddr)\n\t}); err != nil {\n\t\tlog.Fatal(\"do:\", err)\n\t}\n\tif err := c.Close(); err != nil {\n\t\tlog.Fatalln(err)\n\t}\n}\n<commit_msg>stun-client: default to google STUN server<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/go-rtc\/stun\"\n)\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage of %s:\\n\", os.Args[0])\n\t\tfmt.Fprintln(os.Stderr, os.Args[0], \"stun.l.google.com:19302\")\n\t}\n\tflag.Parse()\n\taddr := flag.Arg(0)\n\tif len(addr) == 0 {\n\t\taddr = \"stun.l.google.com:19302\"\n\t}\n\tc, err := stun.Dial(\"udp\", addr)\n\tif err != nil {\n\t\tlog.Fatal(\"dial:\", err)\n\t}\n\tdeadline := time.Now().Add(time.Second * 5)\n\tif err := c.Do(stun.MustBuild(stun.TransactionID, stun.BindingRequest), deadline, func(res stun.AgentEvent) {\n\t\tif res.Error != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\t\tvar xorAddr stun.XORMappedAddress\n\t\tif err := xorAddr.GetFrom(res.Message); err != nil {\n\t\t\tlog.Fatalln(err)\n\t\t}\n\t\tfmt.Println(xorAddr)\n\t}); err != nil {\n\t\tlog.Fatal(\"do:\", err)\n\t}\n\tif err := c.Close(); err != nil {\n\t\tlog.Fatalln(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Sample\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"context\"\n\t\"github.com\/kr\/pretty\"\n\t\"golang.org\/x\/oauth2\/google\"\n\ttaskqueue \"google.golang.org\/api\/taskqueue\/v1beta2\"\n)\n\nvar (\n\tqueue = flag.String(\"queue\", \"\", \"Pull TaskQueue name.\")\n\tproject = flag.String(\"project\", \"mlab-sandbox\", \"GCP Project name.\")\n\tpayload = flag.String(\"payload\", \"\", \"A base64 encoded payload.\")\n)\n\n\/\/ Create taskqueue.Service for communicating with the taskqueue services.\nfunc CreateTaskQueueService() *taskqueue.Service {\n\t\/\/ NOTE: google.DefaultClient authenticates the returned client with\n\t\/\/ \"Application Default Credentials\". For AppEngine Flex and GCE VMs, the\n\t\/\/ application default credentials are associated with the default GCE\n\t\/\/ service account. Credentials are scoped to limit their use to specific\n\t\/\/ APIs. So, configure necessary scopes in app.yaml, or when creating a GCE\n\t\/\/ VM; scopes cannot be changed at runtime.\n\t\/\/\n\t\/\/ For more details and background see:\n\t\/\/ https:\/\/developers.google.com\/identity\/protocols\/application-default-credentials\n\tclient, err := google.DefaultClient(context.Background(),\n\t\ttaskqueue.TaskqueueScope, taskqueue.TaskqueueConsumerScope)\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to get default taskqueue client: %v \\n\", err)\n\t\treturn nil\n\t}\n\n\tservice, err := taskqueue.New(client)\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to create taskqueue service: %v\\n\", err)\n\t\treturn nil\n\t}\n\treturn service\n}\n\nfunc NewTask(queueName, payload string) *taskqueue.Task {\n\tt := &taskqueue.Task{\n\t\t\/\/ Set first available lease time to now.\n\t\tLeaseTimestamp: time.Now().Unix(),\n\t\tPayloadBase64: payload,\n\t\tQueueName: queueName,\n\t}\n\treturn t\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tapi := CreateTaskQueueService()\n\tpretty.Print(api)\n\n\treq := NewTask(*queue, *payload)\n\tpretty.Print(req)\n\n\t\/\/ WARNING: NOTE: The v1beta2 taskqueue REST API requires the prefix \"p~\"\n\t\/\/ before the project name. Without this prefix, Insert, Lease, and Delete\n\t\/\/ operations fail with 403 errors:\n\t\/\/ \"you are not allowed to make this api call\".\n\ttask, err := api.Tasks.Insert(fmt.Sprintf(\"p~%s\", *project), *queue, req).Do()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tpretty.Print(task)\n}\n<commit_msg>remove task-client, as taskqueue is obsolete<commit_after><|endoftext|>"} {"text":"<commit_before>\/*\nCopyright IBM Corp. 2017 All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\t\t http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"text\/template\"\n\n\t\"gopkg.in\/yaml.v2\"\n\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n\n\t\"bytes\"\n\t\"io\/ioutil\"\n\n\t\"github.com\/hyperledger\/fabric\/common\/tools\/cryptogen\/ca\"\n\t\"github.com\/hyperledger\/fabric\/common\/tools\/cryptogen\/msp\"\n)\n\nconst (\n\tuserBaseName = \"User\"\n\tadminBaseName = \"Admin\"\n\tdefaultHostnameTemplate = \"{{.Prefix}}{{.Index}}\"\n\tdefaultCNTemplate = \"{{.Hostname}}.{{.Domain}}\"\n)\n\ntype HostnameData struct {\n\tPrefix string\n\tIndex int\n\tDomain string\n}\n\ntype CommonNameData struct {\n\tHostname string\n\tDomain string\n}\n\ntype NodeTemplate struct {\n\tCount int `yaml:\"Count\"`\n\tStart int `yaml:\"Start\"`\n\tHostname string `yaml:\"Hostname\"`\n}\n\ntype NodeSpec struct {\n\tHostname string `yaml:\"Hostname\"`\n\tAltHostnames []string `yaml:\"AltHostnames\"`\n\tCommonName string `yaml:\"CommonName\"`\n}\n\ntype UsersSpec struct {\n\tCount int `yaml:\"Count\"`\n}\n\ntype OrgSpec struct {\n\tName string `yaml:\"Name\"`\n\tDomain string `yaml:\"Domain\"`\n\tTemplate NodeTemplate `yaml:\"Template\"`\n\tSpecs []NodeSpec `yaml:\"Specs\"`\n\tUsers UsersSpec `yaml:\"Users\"`\n}\n\ntype Config struct {\n\tOrdererOrgs []OrgSpec `yaml:\"OrdererOrgs\"`\n\tPeerOrgs []OrgSpec `yaml:\"PeerOrgs\"`\n}\n\nvar defaultConfig = `\n# ---------------------------------------------------------------------------\n# \"OrdererOrgs\" - Definition of organizations managing orderer nodes\n# ---------------------------------------------------------------------------\nOrdererOrgs:\n # ---------------------------------------------------------------------------\n # Orderer\n # ---------------------------------------------------------------------------\n - Name: Orderer\n Domain: example.com\n\n # ---------------------------------------------------------------------------\n # \"Specs\" - See PeerOrgs below for complete description\n # ---------------------------------------------------------------------------\n Specs:\n - Hostname: orderer\n\n# ---------------------------------------------------------------------------\n# \"PeerOrgs\" - Definition of organizations managing peer nodes\n# ---------------------------------------------------------------------------\nPeerOrgs:\n # ---------------------------------------------------------------------------\n # Org1\n # ---------------------------------------------------------------------------\n - Name: Org1\n Domain: org1.example.com\n\n # ---------------------------------------------------------------------------\n # \"Specs\"\n # ---------------------------------------------------------------------------\n # Uncomment this section to enable the explicit definition of hosts in your\n # configuration. Most users will want to use Template, below\n #\n # Specs is an array of Spec entries. Each Spec entry consists of two fields:\n # - Hostname: (Required) The desired hostname, sans the domain.\n # - CommonName: (Optional) Specifies the template or explicit override for\n # the CN. By default, this is the template:\n #\n # \"{{.Hostname}}.{{.Domain}}\"\n #\n # which obtains its values from the Spec.Hostname and\n # Org.Domain, respectively.\n # ---------------------------------------------------------------------------\n # Specs:\n # - Hostname: foo # implicitly \"foo.org1.example.com\"\n # CommonName: foo27.org5.example.com # overrides Hostname-based FQDN set above\n # - Hostname: bar\n # - Hostname: baz\n\n # ---------------------------------------------------------------------------\n # \"Template\"\n # ---------------------------------------------------------------------------\n # Allows for the definition of 1 or more hosts that are created sequentially\n # from a template. By default, this looks like \"peer%d\" from 0 to Count-1.\n # You may override the number of nodes (Count), the starting index (Start)\n # or the template used to construct the name (Hostname).\n #\n # Note: Template and Specs are not mutually exclusive. You may define both\n # sections and the aggregate nodes will be created for you. Take care with\n # name collisions\n # ---------------------------------------------------------------------------\n Template:\n Count: 1\n # Start: 5\n # Hostname: {{.Prefix}}{{.Index}} # default\n\n # ---------------------------------------------------------------------------\n # \"Users\"\n # ---------------------------------------------------------------------------\n # Count: The number of user accounts _in addition_ to Admin\n # ---------------------------------------------------------------------------\n Users:\n Count: 1\n\n # ---------------------------------------------------------------------------\n # Org2: See \"Org1\" for full specification\n # ---------------------------------------------------------------------------\n - Name: Org2\n Domain: org2.example.com\n Template:\n Count: 1\n Users:\n Count: 1\n`\n\n\/\/command line flags\nvar (\n\tapp = kingpin.New(\"cryptogen\", \"Utility for generating Hyperledger Fabric key material\")\n\n\tgen = app.Command(\"generate\", \"Generate key material\")\n\toutputDir = gen.Flag(\"output\", \"The output directory in which to place artifacts\").Default(\"crypto-config\").String()\n\tconfigFile = gen.Flag(\"config\", \"The configuration template to use\").File()\n\n\tshowtemplate = app.Command(\"showtemplate\", \"Show the default configuration template\")\n)\n\nfunc main() {\n\tkingpin.Version(\"0.0.1\")\n\tswitch kingpin.MustParse(app.Parse(os.Args[1:])) {\n\n\t\/\/ \"generate\" command\n\tcase gen.FullCommand():\n\t\tgenerate()\n\n\t\/\/ \"showtemplate\" command\n\tcase showtemplate.FullCommand():\n\t\tfmt.Print(defaultConfig)\n\t\tos.Exit(0)\n\t}\n\n}\n\nfunc getConfig() (*Config, error) {\n\tvar configData string\n\n\tif *configFile != nil {\n\t\tdata, err := ioutil.ReadAll(*configFile)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error reading configuration: %s\", err)\n\t\t}\n\n\t\tconfigData = string(data)\n\t} else {\n\t\tconfigData = defaultConfig\n\t}\n\n\tconfig := &Config{}\n\terr := yaml.Unmarshal([]byte(configData), &config)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error Unmarshaling YAML: %s\", err)\n\t}\n\n\treturn config, nil\n}\n\nfunc generate() {\n\n\tconfig, err := getConfig()\n\tif err != nil {\n\t\tfmt.Printf(\"Error reading config: %s\", err)\n\t\tos.Exit(-1)\n\t}\n\n\tfor _, orgSpec := range config.PeerOrgs {\n\t\terr = generateNodeSpec(&orgSpec, \"peer\")\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error processing peer configuration: %s\", err)\n\t\t\tos.Exit(-1)\n\t\t}\n\t\tgeneratePeerOrg(*outputDir, orgSpec)\n\t}\n\n\tfor _, orgSpec := range config.OrdererOrgs {\n\t\tgenerateNodeSpec(&orgSpec, \"orderer\")\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error processing orderer configuration: %s\", err)\n\t\t\tos.Exit(-1)\n\t\t}\n\t\tgenerateOrdererOrg(*outputDir, orgSpec)\n\t}\n}\n\nfunc parseTemplate(input, defaultInput string, data interface{}) (string, error) {\n\n\t\/\/ Use the default if the input is an empty string\n\tif len(input) == 0 {\n\t\tinput = defaultInput\n\t}\n\n\tt, err := template.New(\"parse\").Parse(input)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error parsing template: %s\", err)\n\t}\n\n\toutput := new(bytes.Buffer)\n\terr = t.Execute(output, data)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error executing template: %s\", err)\n\t}\n\n\treturn output.String(), nil\n}\n\nfunc generateNodeSpec(orgSpec *OrgSpec, prefix string) error {\n\t\/\/ First process all of our templated nodes\n\tfor i := 0; i < orgSpec.Template.Count; i++ {\n\t\tdata := HostnameData{\n\t\t\tPrefix: prefix,\n\t\t\tIndex: i + orgSpec.Template.Start,\n\t\t\tDomain: orgSpec.Domain,\n\t\t}\n\n\t\thostname, err := parseTemplate(orgSpec.Template.Hostname, defaultHostnameTemplate, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tspec := NodeSpec{Hostname: hostname}\n\t\torgSpec.Specs = append(orgSpec.Specs, spec)\n\t}\n\n\t\/\/ And finally touch up all specs to add the domain\n\tfor idx, spec := range orgSpec.Specs {\n\t\tdata := CommonNameData{\n\t\t\tHostname: spec.Hostname,\n\t\t\tDomain: orgSpec.Domain,\n\t\t}\n\n\t\tfinalCN, err := parseTemplate(spec.CommonName, defaultCNTemplate, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\torgSpec.Specs[idx].CommonName = finalCN\n\t}\n\n\treturn nil\n}\n\nfunc generatePeerOrg(baseDir string, orgSpec OrgSpec) {\n\n\torgName := orgSpec.Domain\n\n\tfmt.Println(orgName)\n\t\/\/ generate CA\n\torgDir := filepath.Join(baseDir, \"peerOrganizations\", orgName)\n\tcaDir := filepath.Join(orgDir, \"ca\")\n\tmspDir := filepath.Join(orgDir, \"msp\")\n\tpeersDir := filepath.Join(orgDir, \"peers\")\n\tusersDir := filepath.Join(orgDir, \"users\")\n\tadminCertsDir := filepath.Join(mspDir, \"admincerts\")\n\trootCA, err := ca.NewCA(caDir, orgName)\n\tif err != nil {\n\t\tfmt.Printf(\"Error generating CA for org %s:\\n%v\\n\", orgName, err)\n\t\tos.Exit(1)\n\t}\n\terr = msp.GenerateVerifyingMSP(mspDir, rootCA)\n\tif err != nil {\n\t\tfmt.Printf(\"Error generating MSP for org %s:\\n%v\\n\", orgName, err)\n\t\tos.Exit(1)\n\t}\n\n\tpeerNames := []string{}\n\tfor _, spec := range orgSpec.Specs {\n\t\tpeerNames = append(peerNames, spec.CommonName)\n\t}\n\tgenerateNodes(peersDir, peerNames, rootCA)\n\n\t\/\/ TODO: add ability to specify usernames\n\tusernames := []string{}\n\tfor j := 1; j <= orgSpec.Users.Count; j++ {\n\t\tusernames = append(usernames, fmt.Sprintf(\"%s%d@%s\",\n\t\t\tuserBaseName, j, orgName))\n\t}\n\t\/\/ add an admin user\n\tadminUserName := fmt.Sprintf(\"%s@%s\",\n\t\tadminBaseName, orgName)\n\n\tusernames = append(usernames, adminUserName)\n\tgenerateNodes(usersDir, usernames, rootCA)\n\n\t\/\/ copy the admin cert to the org's MSP admincerts\n\terr = copyAdminCert(usersDir, adminCertsDir, adminUserName)\n\tif err != nil {\n\t\tfmt.Printf(\"Error copying admin cert for org %s:\\n%v\\n\",\n\t\t\torgName, err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc copyAdminCert(usersDir, adminCertsDir, adminUserName string) error {\n\t\/\/ delete the contents of admincerts\n\terr := os.RemoveAll(adminCertsDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ recreate the admincerts directory\n\terr = os.MkdirAll(adminCertsDir, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = copyFile(filepath.Join(usersDir, adminUserName, \"signcerts\",\n\t\tadminUserName+\"-cert.pem\"), filepath.Join(adminCertsDir,\n\t\tadminUserName+\"-cert.pem\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}\n\nfunc generateNodes(baseDir string, nodeNames []string, rootCA *ca.CA) {\n\n\tfor _, nodeName := range nodeNames {\n\t\tnodeDir := filepath.Join(baseDir, nodeName)\n\t\terr := msp.GenerateLocalMSP(nodeDir, nodeName, rootCA)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error generating local MSP for %s:\\n%v\\n\", nodeName, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n}\n\nfunc generateOrdererOrg(baseDir string, orgSpec OrgSpec) {\n\n\torgName := orgSpec.Domain\n\n\t\/\/ generate CA\n\torgDir := filepath.Join(baseDir, \"ordererOrganizations\", orgName)\n\tcaDir := filepath.Join(orgDir, \"ca\")\n\tmspDir := filepath.Join(orgDir, \"msp\")\n\torderersDir := filepath.Join(orgDir, \"orderers\")\n\tusersDir := filepath.Join(orgDir, \"users\")\n\tadminCertsDir := filepath.Join(mspDir, \"admincerts\")\n\trootCA, err := ca.NewCA(caDir, orgName)\n\tif err != nil {\n\t\tfmt.Printf(\"Error generating CA for org %s:\\n%v\\n\", orgName, err)\n\t\tos.Exit(1)\n\t}\n\terr = msp.GenerateVerifyingMSP(mspDir, rootCA)\n\tif err != nil {\n\t\tfmt.Printf(\"Error generating MSP for org %s:\\n%v\\n\", orgName, err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ TODO: add ability to specify orderer names\n\t\/\/ for name just use default base name\n\tordererNames := []string{}\n\tfor _, spec := range orgSpec.Specs {\n\t\tordererNames = append(ordererNames, spec.CommonName)\n\t}\n\tgenerateNodes(orderersDir, ordererNames, rootCA)\n\n\tadminUserName := fmt.Sprintf(\"%s@%s\",\n\t\tadminBaseName, orgName)\n\n\t\/\/ generate an admin for the orderer org\n\tusernames := []string{}\n\t\/\/ add an admin user\n\tusernames = append(usernames, adminUserName)\n\tgenerateNodes(usersDir, usernames, rootCA)\n\n\t\/\/ copy the admin cert to the org's MSP admincerts\n\terr = copyAdminCert(usersDir, adminCertsDir, adminUserName)\n\tif err != nil {\n\t\tfmt.Printf(\"Error copying admin cert for org %s:\\n%v\\n\",\n\t\t\torgName, err)\n\t\tos.Exit(1)\n\t}\n\n}\n\nfunc copyFile(src, dst string) error {\n\tin, err := os.Open(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer in.Close()\n\tout, err := os.Create(dst)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer out.Close()\n\t_, err = io.Copy(out, in)\n\tcerr := out.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn cerr\n}\n<commit_msg>[FAB-3306] cryptogen - copy admin certs to node MSPs<commit_after>\/*\nCopyright IBM Corp. 2017 All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\t\t http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"text\/template\"\n\n\t\"gopkg.in\/yaml.v2\"\n\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n\n\t\"bytes\"\n\t\"io\/ioutil\"\n\n\t\"github.com\/hyperledger\/fabric\/common\/tools\/cryptogen\/ca\"\n\t\"github.com\/hyperledger\/fabric\/common\/tools\/cryptogen\/msp\"\n)\n\nconst (\n\tuserBaseName = \"User\"\n\tadminBaseName = \"Admin\"\n\tdefaultHostnameTemplate = \"{{.Prefix}}{{.Index}}\"\n\tdefaultCNTemplate = \"{{.Hostname}}.{{.Domain}}\"\n)\n\ntype HostnameData struct {\n\tPrefix string\n\tIndex int\n\tDomain string\n}\n\ntype CommonNameData struct {\n\tHostname string\n\tDomain string\n}\n\ntype NodeTemplate struct {\n\tCount int `yaml:\"Count\"`\n\tStart int `yaml:\"Start\"`\n\tHostname string `yaml:\"Hostname\"`\n}\n\ntype NodeSpec struct {\n\tHostname string `yaml:\"Hostname\"`\n\tAltHostnames []string `yaml:\"AltHostnames\"`\n\tCommonName string `yaml:\"CommonName\"`\n}\n\ntype UsersSpec struct {\n\tCount int `yaml:\"Count\"`\n}\n\ntype OrgSpec struct {\n\tName string `yaml:\"Name\"`\n\tDomain string `yaml:\"Domain\"`\n\tTemplate NodeTemplate `yaml:\"Template\"`\n\tSpecs []NodeSpec `yaml:\"Specs\"`\n\tUsers UsersSpec `yaml:\"Users\"`\n}\n\ntype Config struct {\n\tOrdererOrgs []OrgSpec `yaml:\"OrdererOrgs\"`\n\tPeerOrgs []OrgSpec `yaml:\"PeerOrgs\"`\n}\n\nvar defaultConfig = `\n# ---------------------------------------------------------------------------\n# \"OrdererOrgs\" - Definition of organizations managing orderer nodes\n# ---------------------------------------------------------------------------\nOrdererOrgs:\n # ---------------------------------------------------------------------------\n # Orderer\n # ---------------------------------------------------------------------------\n - Name: Orderer\n Domain: example.com\n\n # ---------------------------------------------------------------------------\n # \"Specs\" - See PeerOrgs below for complete description\n # ---------------------------------------------------------------------------\n Specs:\n - Hostname: orderer\n\n# ---------------------------------------------------------------------------\n# \"PeerOrgs\" - Definition of organizations managing peer nodes\n# ---------------------------------------------------------------------------\nPeerOrgs:\n # ---------------------------------------------------------------------------\n # Org1\n # ---------------------------------------------------------------------------\n - Name: Org1\n Domain: org1.example.com\n\n # ---------------------------------------------------------------------------\n # \"Specs\"\n # ---------------------------------------------------------------------------\n # Uncomment this section to enable the explicit definition of hosts in your\n # configuration. Most users will want to use Template, below\n #\n # Specs is an array of Spec entries. Each Spec entry consists of two fields:\n # - Hostname: (Required) The desired hostname, sans the domain.\n # - CommonName: (Optional) Specifies the template or explicit override for\n # the CN. By default, this is the template:\n #\n # \"{{.Hostname}}.{{.Domain}}\"\n #\n # which obtains its values from the Spec.Hostname and\n # Org.Domain, respectively.\n # ---------------------------------------------------------------------------\n # Specs:\n # - Hostname: foo # implicitly \"foo.org1.example.com\"\n # CommonName: foo27.org5.example.com # overrides Hostname-based FQDN set above\n # - Hostname: bar\n # - Hostname: baz\n\n # ---------------------------------------------------------------------------\n # \"Template\"\n # ---------------------------------------------------------------------------\n # Allows for the definition of 1 or more hosts that are created sequentially\n # from a template. By default, this looks like \"peer%d\" from 0 to Count-1.\n # You may override the number of nodes (Count), the starting index (Start)\n # or the template used to construct the name (Hostname).\n #\n # Note: Template and Specs are not mutually exclusive. You may define both\n # sections and the aggregate nodes will be created for you. Take care with\n # name collisions\n # ---------------------------------------------------------------------------\n Template:\n Count: 1\n # Start: 5\n # Hostname: {{.Prefix}}{{.Index}} # default\n\n # ---------------------------------------------------------------------------\n # \"Users\"\n # ---------------------------------------------------------------------------\n # Count: The number of user accounts _in addition_ to Admin\n # ---------------------------------------------------------------------------\n Users:\n Count: 1\n\n # ---------------------------------------------------------------------------\n # Org2: See \"Org1\" for full specification\n # ---------------------------------------------------------------------------\n - Name: Org2\n Domain: org2.example.com\n Template:\n Count: 1\n Users:\n Count: 1\n`\n\n\/\/command line flags\nvar (\n\tapp = kingpin.New(\"cryptogen\", \"Utility for generating Hyperledger Fabric key material\")\n\n\tgen = app.Command(\"generate\", \"Generate key material\")\n\toutputDir = gen.Flag(\"output\", \"The output directory in which to place artifacts\").Default(\"crypto-config\").String()\n\tconfigFile = gen.Flag(\"config\", \"The configuration template to use\").File()\n\n\tshowtemplate = app.Command(\"showtemplate\", \"Show the default configuration template\")\n)\n\nfunc main() {\n\tkingpin.Version(\"0.0.1\")\n\tswitch kingpin.MustParse(app.Parse(os.Args[1:])) {\n\n\t\/\/ \"generate\" command\n\tcase gen.FullCommand():\n\t\tgenerate()\n\n\t\/\/ \"showtemplate\" command\n\tcase showtemplate.FullCommand():\n\t\tfmt.Print(defaultConfig)\n\t\tos.Exit(0)\n\t}\n\n}\n\nfunc getConfig() (*Config, error) {\n\tvar configData string\n\n\tif *configFile != nil {\n\t\tdata, err := ioutil.ReadAll(*configFile)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error reading configuration: %s\", err)\n\t\t}\n\n\t\tconfigData = string(data)\n\t} else {\n\t\tconfigData = defaultConfig\n\t}\n\n\tconfig := &Config{}\n\terr := yaml.Unmarshal([]byte(configData), &config)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error Unmarshaling YAML: %s\", err)\n\t}\n\n\treturn config, nil\n}\n\nfunc generate() {\n\n\tconfig, err := getConfig()\n\tif err != nil {\n\t\tfmt.Printf(\"Error reading config: %s\", err)\n\t\tos.Exit(-1)\n\t}\n\n\tfor _, orgSpec := range config.PeerOrgs {\n\t\terr = generateNodeSpec(&orgSpec, \"peer\")\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error processing peer configuration: %s\", err)\n\t\t\tos.Exit(-1)\n\t\t}\n\t\tgeneratePeerOrg(*outputDir, orgSpec)\n\t}\n\n\tfor _, orgSpec := range config.OrdererOrgs {\n\t\tgenerateNodeSpec(&orgSpec, \"orderer\")\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error processing orderer configuration: %s\", err)\n\t\t\tos.Exit(-1)\n\t\t}\n\t\tgenerateOrdererOrg(*outputDir, orgSpec)\n\t}\n}\n\nfunc parseTemplate(input, defaultInput string, data interface{}) (string, error) {\n\n\t\/\/ Use the default if the input is an empty string\n\tif len(input) == 0 {\n\t\tinput = defaultInput\n\t}\n\n\tt, err := template.New(\"parse\").Parse(input)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error parsing template: %s\", err)\n\t}\n\n\toutput := new(bytes.Buffer)\n\terr = t.Execute(output, data)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"Error executing template: %s\", err)\n\t}\n\n\treturn output.String(), nil\n}\n\nfunc generateNodeSpec(orgSpec *OrgSpec, prefix string) error {\n\t\/\/ First process all of our templated nodes\n\tfor i := 0; i < orgSpec.Template.Count; i++ {\n\t\tdata := HostnameData{\n\t\t\tPrefix: prefix,\n\t\t\tIndex: i + orgSpec.Template.Start,\n\t\t\tDomain: orgSpec.Domain,\n\t\t}\n\n\t\thostname, err := parseTemplate(orgSpec.Template.Hostname, defaultHostnameTemplate, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tspec := NodeSpec{Hostname: hostname}\n\t\torgSpec.Specs = append(orgSpec.Specs, spec)\n\t}\n\n\t\/\/ And finally touch up all specs to add the domain\n\tfor idx, spec := range orgSpec.Specs {\n\t\tdata := CommonNameData{\n\t\t\tHostname: spec.Hostname,\n\t\t\tDomain: orgSpec.Domain,\n\t\t}\n\n\t\tfinalCN, err := parseTemplate(spec.CommonName, defaultCNTemplate, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\torgSpec.Specs[idx].CommonName = finalCN\n\t}\n\n\treturn nil\n}\n\nfunc generatePeerOrg(baseDir string, orgSpec OrgSpec) {\n\n\torgName := orgSpec.Domain\n\n\tfmt.Println(orgName)\n\t\/\/ generate CA\n\torgDir := filepath.Join(baseDir, \"peerOrganizations\", orgName)\n\tcaDir := filepath.Join(orgDir, \"ca\")\n\tmspDir := filepath.Join(orgDir, \"msp\")\n\tpeersDir := filepath.Join(orgDir, \"peers\")\n\tusersDir := filepath.Join(orgDir, \"users\")\n\tadminCertsDir := filepath.Join(mspDir, \"admincerts\")\n\trootCA, err := ca.NewCA(caDir, orgName)\n\tif err != nil {\n\t\tfmt.Printf(\"Error generating CA for org %s:\\n%v\\n\", orgName, err)\n\t\tos.Exit(1)\n\t}\n\terr = msp.GenerateVerifyingMSP(mspDir, rootCA)\n\tif err != nil {\n\t\tfmt.Printf(\"Error generating MSP for org %s:\\n%v\\n\", orgName, err)\n\t\tos.Exit(1)\n\t}\n\n\tpeerNames := []string{}\n\tfor _, spec := range orgSpec.Specs {\n\t\tpeerNames = append(peerNames, spec.CommonName)\n\t}\n\tgenerateNodes(peersDir, peerNames, rootCA)\n\n\t\/\/ TODO: add ability to specify usernames\n\tusernames := []string{}\n\tfor j := 1; j <= orgSpec.Users.Count; j++ {\n\t\tusernames = append(usernames, fmt.Sprintf(\"%s%d@%s\",\n\t\t\tuserBaseName, j, orgName))\n\t}\n\t\/\/ add an admin user\n\tadminUserName := fmt.Sprintf(\"%s@%s\",\n\t\tadminBaseName, orgName)\n\n\tusernames = append(usernames, adminUserName)\n\tgenerateNodes(usersDir, usernames, rootCA)\n\n\t\/\/ copy the admin cert to the org's MSP admincerts\n\terr = copyAdminCert(usersDir, adminCertsDir, adminUserName)\n\tif err != nil {\n\t\tfmt.Printf(\"Error copying admin cert for org %s:\\n%v\\n\",\n\t\t\torgName, err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ copy the admin cert to each of the org's peer's MSP admincerts\n\tfor _, peerName := range peerNames {\n\t\terr = copyAdminCert(usersDir, filepath.Join(peersDir, peerName,\n\t\t\t\"admincerts\"), adminUserName)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error copying admin cert for org %s peer %s:\\n%v\\n\",\n\t\t\t\torgName, peerName, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}\n\nfunc copyAdminCert(usersDir, adminCertsDir, adminUserName string) error {\n\t\/\/ delete the contents of admincerts\n\terr := os.RemoveAll(adminCertsDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ recreate the admincerts directory\n\terr = os.MkdirAll(adminCertsDir, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = copyFile(filepath.Join(usersDir, adminUserName, \"signcerts\",\n\t\tadminUserName+\"-cert.pem\"), filepath.Join(adminCertsDir,\n\t\tadminUserName+\"-cert.pem\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}\n\nfunc generateNodes(baseDir string, nodeNames []string, rootCA *ca.CA) {\n\n\tfor _, nodeName := range nodeNames {\n\t\tnodeDir := filepath.Join(baseDir, nodeName)\n\t\terr := msp.GenerateLocalMSP(nodeDir, nodeName, rootCA)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error generating local MSP for %s:\\n%v\\n\", nodeName, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n}\n\nfunc generateOrdererOrg(baseDir string, orgSpec OrgSpec) {\n\n\torgName := orgSpec.Domain\n\n\t\/\/ generate CA\n\torgDir := filepath.Join(baseDir, \"ordererOrganizations\", orgName)\n\tcaDir := filepath.Join(orgDir, \"ca\")\n\tmspDir := filepath.Join(orgDir, \"msp\")\n\torderersDir := filepath.Join(orgDir, \"orderers\")\n\tusersDir := filepath.Join(orgDir, \"users\")\n\tadminCertsDir := filepath.Join(mspDir, \"admincerts\")\n\trootCA, err := ca.NewCA(caDir, orgName)\n\tif err != nil {\n\t\tfmt.Printf(\"Error generating CA for org %s:\\n%v\\n\", orgName, err)\n\t\tos.Exit(1)\n\t}\n\terr = msp.GenerateVerifyingMSP(mspDir, rootCA)\n\tif err != nil {\n\t\tfmt.Printf(\"Error generating MSP for org %s:\\n%v\\n\", orgName, err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ TODO: add ability to specify orderer names\n\t\/\/ for name just use default base name\n\tordererNames := []string{}\n\tfor _, spec := range orgSpec.Specs {\n\t\tordererNames = append(ordererNames, spec.CommonName)\n\t}\n\tgenerateNodes(orderersDir, ordererNames, rootCA)\n\n\tadminUserName := fmt.Sprintf(\"%s@%s\",\n\t\tadminBaseName, orgName)\n\n\t\/\/ generate an admin for the orderer org\n\tusernames := []string{}\n\t\/\/ add an admin user\n\tusernames = append(usernames, adminUserName)\n\tgenerateNodes(usersDir, usernames, rootCA)\n\n\t\/\/ copy the admin cert to the org's MSP admincerts\n\terr = copyAdminCert(usersDir, adminCertsDir, adminUserName)\n\tif err != nil {\n\t\tfmt.Printf(\"Error copying admin cert for org %s:\\n%v\\n\",\n\t\t\torgName, err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ copy the admin cert to each of the org's orderers's MSP admincerts\n\tfor _, ordererName := range ordererNames {\n\t\terr = copyAdminCert(usersDir, filepath.Join(orderersDir, ordererName,\n\t\t\t\"admincerts\"), adminUserName)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Error copying admin cert for org %s orderer %s:\\n%v\\n\",\n\t\t\t\torgName, ordererName, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n}\n\nfunc copyFile(src, dst string) error {\n\tin, err := os.Open(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer in.Close()\n\tout, err := os.Create(dst)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer out.Close()\n\t_, err = io.Copy(out, in)\n\tcerr := out.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn cerr\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Serulian Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage compilergraph\n\nimport (\n\t\"github.com\/serulian\/compiler\/compilerutil\"\n\n\t\"github.com\/cayleygraph\/cayley\/quad\"\n)\n\n\/\/ nodeFilter is a filtering function for a graph query.\ntype nodeFilter func(q GraphQuery) Query\n\n\/\/ FilteredQuery is a type which wraps a GraphQuery and executes additional filtering.\ntype FilteredQuery struct {\n\tquery GraphQuery\n\tfilter nodeFilter\n}\n\n\/\/ BuildNodeIterator returns an iterator over the filtered query.\nfunc (fq FilteredQuery) BuildNodeIterator(predicates ...Predicate) NodeIterator {\n\t\/\/ Build an iterator to collect the IDs matching the inner query.\n\tit := fq.query.BuildNodeIterator()\n\tif !it.Next() {\n\t\treturn EmptyIterator{}\n\t}\n\n\t\/\/ Note that it.Next() is called in the check above, so we call it at the\n\t\/\/ *end* of each of the loop iterations. This ensure that we don't allocate\n\t\/\/ the slice unless absolutely necessary.\n\tvar nodeIds = make([]GraphNodeId, 0, 16)\n\tfor {\n\t\tnodeIds = append(nodeIds, it.Node().NodeId)\n\t\tif !it.Next() {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ Otherwise, create a new query starting from the nodes found and send it\n\t\/\/ to the filtering function.\n\tfullKindPredicate := fq.query.layer.getPrefixedPredicate(fq.query.layer.nodeKindPredicate)\n\tmarkID := compilerutil.NewUniqueId()\n\tsubQuery := fq.query.layer.StartQueryFromNodes(nodeIds...).mark(markID).save(fullKindPredicate, markID+\"-kind\")\n\tfilteredQuery := fq.filter(subQuery)\n\n\t\/\/ Build an iterator over the filtered query.\n\tfit := filteredQuery.BuildNodeIterator()\n\tif !fit.Next() {\n\t\treturn EmptyIterator{}\n\t}\n\n\t\/\/ Collect the filtered nodes.\n\tvar filtered = make([]GraphNode, 0, len(nodeIds))\n\tfor {\n\t\tnodeID := valueToNodeId(fit.getMarked(markID))\n\t\tkindValue := fit.getMarked(markID + \"-kind\")\n\t\tfiltered = append(filtered, GraphNode{nodeID, kindValue, fq.query.layer})\n\t\tif !fit.Next() {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn &nodeReturnIterator{fq.query.layer, filtered, -1}\n}\n\n\/\/ HasWhere starts a new client query.\nfunc (fq FilteredQuery) HasWhere(predicate Predicate, op clientQueryOperation, value interface{}) Query {\n\treturn getClientQuery(fq.query.layer, fq, predicate, op, value)\n}\n\n\/\/ TryGetNode executes the query and returns the single node found or false. If there is\n\/\/ more than a single node as a result of the query, the first node is returned.\nfunc (fq FilteredQuery) TryGetNode() (GraphNode, bool) {\n\treturn tryGetNode(fq.BuildNodeIterator())\n}\n\n\/\/ nodeReturnIterator is an iterator that just returns a preset list of nodes.\ntype nodeReturnIterator struct {\n\tlayer *GraphLayer\n\tfiltered []GraphNode\n\tindex int\n}\n\nfunc (nri *nodeReturnIterator) Next() bool {\n\tnri.index++\n\treturn nri.index < len(nri.filtered)\n}\n\nfunc (nri *nodeReturnIterator) Node() GraphNode {\n\treturn nri.filtered[nri.index]\n}\n\nfunc (nri *nodeReturnIterator) GetPredicate(predicate Predicate) GraphValue {\n\t\/\/ Note: This is a slightly slower path, but most filtered queries don't need extra\n\t\/\/ predicates.\n\treturn nri.Node().GetValue(predicate)\n}\n\nfunc (nri *nodeReturnIterator) TaggedValue(predicate Predicate, example TaggedValue) interface{} {\n\treturn nri.Node().GetTagged(predicate, example)\n}\n\nfunc (nri *nodeReturnIterator) getRequestedPredicate(predicate Predicate) quad.Value {\n\tpanic(\"Should not be called\")\n}\n\nfunc (nri *nodeReturnIterator) getMarked(name string) quad.Value {\n\tpanic(\"Should not be called\")\n}\n<commit_msg>Switch to faster ID generation<commit_after>\/\/ Copyright 2015 The Serulian Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage compilergraph\n\nimport (\n\t\"github.com\/cayleygraph\/cayley\/quad\"\n\tuuid \"github.com\/satori\/go.uuid\"\n)\n\n\/\/ nodeFilter is a filtering function for a graph query.\ntype nodeFilter func(q GraphQuery) Query\n\n\/\/ FilteredQuery is a type which wraps a GraphQuery and executes additional filtering.\ntype FilteredQuery struct {\n\tquery GraphQuery\n\tfilter nodeFilter\n}\n\n\/\/ BuildNodeIterator returns an iterator over the filtered query.\nfunc (fq FilteredQuery) BuildNodeIterator(predicates ...Predicate) NodeIterator {\n\t\/\/ Build an iterator to collect the IDs matching the inner query.\n\tit := fq.query.BuildNodeIterator()\n\tif !it.Next() {\n\t\treturn EmptyIterator{}\n\t}\n\n\t\/\/ Note that it.Next() is called in the check above, so we call it at the\n\t\/\/ *end* of each of the loop iterations. This ensure that we don't allocate\n\t\/\/ the slice unless absolutely necessary.\n\tvar nodeIds = make([]GraphNodeId, 0, 16)\n\tfor {\n\t\tnodeIds = append(nodeIds, it.Node().NodeId)\n\t\tif !it.Next() {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ Otherwise, create a new query starting from the nodes found and send it\n\t\/\/ to the filtering function.\n\tfullKindPredicate := fq.query.layer.getPrefixedPredicate(fq.query.layer.nodeKindPredicate)\n\tmarkID := uuid.NewV1().String()\n\tsubQuery := fq.query.layer.StartQueryFromNodes(nodeIds...).mark(markID).save(fullKindPredicate, markID+\"-kind\")\n\tfilteredQuery := fq.filter(subQuery)\n\n\t\/\/ Build an iterator over the filtered query.\n\tfit := filteredQuery.BuildNodeIterator()\n\tif !fit.Next() {\n\t\treturn EmptyIterator{}\n\t}\n\n\t\/\/ Collect the filtered nodes.\n\tvar filtered = make([]GraphNode, 0, len(nodeIds))\n\tfor {\n\t\tnodeID := valueToNodeId(fit.getMarked(markID))\n\t\tkindValue := fit.getMarked(markID + \"-kind\")\n\t\tfiltered = append(filtered, GraphNode{nodeID, kindValue, fq.query.layer})\n\t\tif !fit.Next() {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn &nodeReturnIterator{fq.query.layer, filtered, -1}\n}\n\n\/\/ HasWhere starts a new client query.\nfunc (fq FilteredQuery) HasWhere(predicate Predicate, op clientQueryOperation, value interface{}) Query {\n\treturn getClientQuery(fq.query.layer, fq, predicate, op, value)\n}\n\n\/\/ TryGetNode executes the query and returns the single node found or false. If there is\n\/\/ more than a single node as a result of the query, the first node is returned.\nfunc (fq FilteredQuery) TryGetNode() (GraphNode, bool) {\n\treturn tryGetNode(fq.BuildNodeIterator())\n}\n\n\/\/ nodeReturnIterator is an iterator that just returns a preset list of nodes.\ntype nodeReturnIterator struct {\n\tlayer *GraphLayer\n\tfiltered []GraphNode\n\tindex int\n}\n\nfunc (nri *nodeReturnIterator) Next() bool {\n\tnri.index++\n\treturn nri.index < len(nri.filtered)\n}\n\nfunc (nri *nodeReturnIterator) Node() GraphNode {\n\treturn nri.filtered[nri.index]\n}\n\nfunc (nri *nodeReturnIterator) GetPredicate(predicate Predicate) GraphValue {\n\t\/\/ Note: This is a slightly slower path, but most filtered queries don't need extra\n\t\/\/ predicates.\n\treturn nri.Node().GetValue(predicate)\n}\n\nfunc (nri *nodeReturnIterator) TaggedValue(predicate Predicate, example TaggedValue) interface{} {\n\treturn nri.Node().GetTagged(predicate, example)\n}\n\nfunc (nri *nodeReturnIterator) getRequestedPredicate(predicate Predicate) quad.Value {\n\tpanic(\"Should not be called\")\n}\n\nfunc (nri *nodeReturnIterator) getMarked(name string) quad.Value {\n\tpanic(\"Should not be called\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage crdregistration\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"k8s.io\/klog\/v2\"\n\n\tapiextensionsv1 \"k8s.io\/apiextensions-apiserver\/pkg\/apis\/apiextensions\/v1\"\n\tcrdinformers \"k8s.io\/apiextensions-apiserver\/pkg\/client\/informers\/externalversions\/apiextensions\/v1\"\n\tcrdlisters \"k8s.io\/apiextensions-apiserver\/pkg\/client\/listers\/apiextensions\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\tutilruntime \"k8s.io\/apimachinery\/pkg\/util\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\t\"k8s.io\/client-go\/util\/workqueue\"\n\tv1 \"k8s.io\/kube-aggregator\/pkg\/apis\/apiregistration\/v1\"\n\t\"k8s.io\/kube-aggregator\/pkg\/apiserver\"\n)\n\n\/\/ AutoAPIServiceRegistration is an interface which callers can re-declare locally and properly cast to for\n\/\/ adding and removing APIServices\ntype AutoAPIServiceRegistration interface {\n\t\/\/ AddAPIServiceToSync adds an API service to auto-register.\n\tAddAPIServiceToSync(in *v1.APIService)\n\t\/\/ RemoveAPIServiceToSync removes an API service to auto-register.\n\tRemoveAPIServiceToSync(name string)\n}\n\ntype crdRegistrationController struct {\n\tcrdLister crdlisters.CustomResourceDefinitionLister\n\tcrdSynced cache.InformerSynced\n\n\tapiServiceRegistration AutoAPIServiceRegistration\n\n\tsyncHandler func(groupVersion schema.GroupVersion) error\n\n\tsyncedInitialSet chan struct{}\n\n\t\/\/ queue is where incoming work is placed to de-dup and to allow \"easy\" rate limited requeues on errors\n\t\/\/ this is actually keyed by a groupVersion\n\tqueue workqueue.RateLimitingInterface\n}\n\n\/\/ NewCRDRegistrationController returns a controller which will register CRD GroupVersions with the auto APIService registration\n\/\/ controller so they automatically stay in sync.\nfunc NewCRDRegistrationController(crdinformer crdinformers.CustomResourceDefinitionInformer, apiServiceRegistration AutoAPIServiceRegistration) *crdRegistrationController {\n\tc := &crdRegistrationController{\n\t\tcrdLister: crdinformer.Lister(),\n\t\tcrdSynced: crdinformer.Informer().HasSynced,\n\t\tapiServiceRegistration: apiServiceRegistration,\n\t\tsyncedInitialSet: make(chan struct{}),\n\t\tqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), \"crd_autoregistration_controller\"),\n\t}\n\tc.syncHandler = c.handleVersionUpdate\n\n\tcrdinformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tcast := obj.(*apiextensionsv1.CustomResourceDefinition)\n\t\t\tc.enqueueCRD(cast)\n\t\t},\n\t\tUpdateFunc: func(oldObj, newObj interface{}) {\n\t\t\t\/\/ Enqueue both old and new object to make sure we remove and add appropriate API services.\n\t\t\t\/\/ The working queue will resolve any duplicates and only changes will stay in the queue.\n\t\t\tc.enqueueCRD(oldObj.(*apiextensionsv1.CustomResourceDefinition))\n\t\t\tc.enqueueCRD(newObj.(*apiextensionsv1.CustomResourceDefinition))\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\tcast, ok := obj.(*apiextensionsv1.CustomResourceDefinition)\n\t\t\tif !ok {\n\t\t\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\t\t\tif !ok {\n\t\t\t\t\tklog.V(2).Infof(\"Couldn't get object from tombstone %#v\", obj)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcast, ok = tombstone.Obj.(*apiextensionsv1.CustomResourceDefinition)\n\t\t\t\tif !ok {\n\t\t\t\t\tklog.V(2).Infof(\"Tombstone contained unexpected object: %#v\", obj)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tc.enqueueCRD(cast)\n\t\t},\n\t})\n\n\treturn c\n}\n\nfunc (c *crdRegistrationController) Run(workers int, stopCh <-chan struct{}) {\n\tdefer utilruntime.HandleCrash()\n\t\/\/ make sure the work queue is shutdown which will trigger workers to end\n\tdefer c.queue.ShutDown()\n\n\tklog.Infof(\"Starting crd-autoregister controller\")\n\tdefer klog.Infof(\"Shutting down crd-autoregister controller\")\n\n\t\/\/ wait for your secondary caches to fill before starting your work\n\tif !cache.WaitForNamedCacheSync(\"crd-autoregister\", stopCh, c.crdSynced) {\n\t\treturn\n\t}\n\n\t\/\/ process each item in the list once\n\tif crds, err := c.crdLister.List(labels.Everything()); err != nil {\n\t\tutilruntime.HandleError(err)\n\t} else {\n\t\tfor _, crd := range crds {\n\t\t\tfor _, version := range crd.Spec.Versions {\n\t\t\t\tif err := c.syncHandler(schema.GroupVersion{Group: crd.Spec.Group, Version: version.Name}); err != nil {\n\t\t\t\t\tutilruntime.HandleError(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tclose(c.syncedInitialSet)\n\n\t\/\/ start up your worker threads based on workers. Some controllers have multiple kinds of workers\n\tfor i := 0; i < workers; i++ {\n\t\t\/\/ runWorker will loop until \"something bad\" happens. The .Until will then rekick the worker\n\t\t\/\/ after one second\n\t\tgo wait.Until(c.runWorker, time.Second, stopCh)\n\t}\n\n\t\/\/ wait until we're told to stop\n\t<-stopCh\n}\n\n\/\/ WaitForInitialSync blocks until the initial set of CRD resources has been processed\nfunc (c *crdRegistrationController) WaitForInitialSync() {\n\t<-c.syncedInitialSet\n}\n\nfunc (c *crdRegistrationController) runWorker() {\n\t\/\/ hot loop until we're told to stop. processNextWorkItem will automatically wait until there's work\n\t\/\/ available, so we don't worry about secondary waits\n\tfor c.processNextWorkItem() {\n\t}\n}\n\n\/\/ processNextWorkItem deals with one key off the queue. It returns false when it's time to quit.\nfunc (c *crdRegistrationController) processNextWorkItem() bool {\n\t\/\/ pull the next work item from queue. It should be a key we use to lookup something in a cache\n\tkey, quit := c.queue.Get()\n\tif quit {\n\t\treturn false\n\t}\n\t\/\/ you always have to indicate to the queue that you've completed a piece of work\n\tdefer c.queue.Done(key)\n\n\t\/\/ do your work on the key. This method will contains your \"do stuff\" logic\n\terr := c.syncHandler(key.(schema.GroupVersion))\n\tif err == nil {\n\t\t\/\/ if you had no error, tell the queue to stop tracking history for your key. This will\n\t\t\/\/ reset things like failure counts for per-item rate limiting\n\t\tc.queue.Forget(key)\n\t\treturn true\n\t}\n\n\t\/\/ there was a failure so be sure to report it. This method allows for pluggable error handling\n\t\/\/ which can be used for things like cluster-monitoring\n\tutilruntime.HandleError(fmt.Errorf(\"%v failed with : %v\", key, err))\n\t\/\/ since we failed, we should requeue the item to work on later. This method will add a backoff\n\t\/\/ to avoid hotlooping on particular items (they're probably still not going to work right away)\n\t\/\/ and overall controller protection (everything I've done is broken, this controller needs to\n\t\/\/ calm down or it can starve other useful work) cases.\n\tc.queue.AddRateLimited(key)\n\n\treturn true\n}\n\nfunc (c *crdRegistrationController) enqueueCRD(crd *apiextensionsv1.CustomResourceDefinition) {\n\tfor _, version := range crd.Spec.Versions {\n\t\tc.queue.Add(schema.GroupVersion{Group: crd.Spec.Group, Version: version.Name})\n\t}\n}\n\nfunc (c *crdRegistrationController) handleVersionUpdate(groupVersion schema.GroupVersion) error {\n\tapiServiceName := groupVersion.Version + \".\" + groupVersion.Group\n\n\tif apiserver.APIServiceAlreadyExists(groupVersion) {\n\t\treturn nil\n\t}\n\n\t\/\/ check all CRDs. There shouldn't that many, but if we have problems later we can index them\n\tcrds, err := c.crdLister.List(labels.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, crd := range crds {\n\t\tif crd.Spec.Group != groupVersion.Group {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, version := range crd.Spec.Versions {\n\t\t\tif version.Name != groupVersion.Version || !version.Served {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tc.apiServiceRegistration.AddAPIServiceToSync(&v1.APIService{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: apiServiceName},\n\t\t\t\tSpec: v1.APIServiceSpec{\n\t\t\t\t\tGroup: groupVersion.Group,\n\t\t\t\t\tVersion: groupVersion.Version,\n\t\t\t\t\tGroupPriorityMinimum: 1000, \/\/ CRDs should have relatively low priority\n\t\t\t\t\tVersionPriority: 100, \/\/ CRDs will be sorted by kube-like versions like any other APIService with the same VersionPriority\n\t\t\t\t},\n\t\t\t})\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tc.apiServiceRegistration.RemoveAPIServiceToSync(apiServiceName)\n\treturn nil\n}\n<commit_msg>UPSTREAM: <carry>: remove apiservice from sync in CRD registration when it exists<commit_after>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage crdregistration\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"k8s.io\/klog\/v2\"\n\n\tapiextensionsv1 \"k8s.io\/apiextensions-apiserver\/pkg\/apis\/apiextensions\/v1\"\n\tcrdinformers \"k8s.io\/apiextensions-apiserver\/pkg\/client\/informers\/externalversions\/apiextensions\/v1\"\n\tcrdlisters \"k8s.io\/apiextensions-apiserver\/pkg\/client\/listers\/apiextensions\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\tutilruntime \"k8s.io\/apimachinery\/pkg\/util\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\t\"k8s.io\/client-go\/util\/workqueue\"\n\tv1 \"k8s.io\/kube-aggregator\/pkg\/apis\/apiregistration\/v1\"\n\t\"k8s.io\/kube-aggregator\/pkg\/apiserver\"\n)\n\n\/\/ AutoAPIServiceRegistration is an interface which callers can re-declare locally and properly cast to for\n\/\/ adding and removing APIServices\ntype AutoAPIServiceRegistration interface {\n\t\/\/ AddAPIServiceToSync adds an API service to auto-register.\n\tAddAPIServiceToSync(in *v1.APIService)\n\t\/\/ RemoveAPIServiceToSync removes an API service to auto-register.\n\tRemoveAPIServiceToSync(name string)\n}\n\ntype crdRegistrationController struct {\n\tcrdLister crdlisters.CustomResourceDefinitionLister\n\tcrdSynced cache.InformerSynced\n\n\tapiServiceRegistration AutoAPIServiceRegistration\n\n\tsyncHandler func(groupVersion schema.GroupVersion) error\n\n\tsyncedInitialSet chan struct{}\n\n\t\/\/ queue is where incoming work is placed to de-dup and to allow \"easy\" rate limited requeues on errors\n\t\/\/ this is actually keyed by a groupVersion\n\tqueue workqueue.RateLimitingInterface\n}\n\n\/\/ NewCRDRegistrationController returns a controller which will register CRD GroupVersions with the auto APIService registration\n\/\/ controller so they automatically stay in sync.\nfunc NewCRDRegistrationController(crdinformer crdinformers.CustomResourceDefinitionInformer, apiServiceRegistration AutoAPIServiceRegistration) *crdRegistrationController {\n\tc := &crdRegistrationController{\n\t\tcrdLister: crdinformer.Lister(),\n\t\tcrdSynced: crdinformer.Informer().HasSynced,\n\t\tapiServiceRegistration: apiServiceRegistration,\n\t\tsyncedInitialSet: make(chan struct{}),\n\t\tqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), \"crd_autoregistration_controller\"),\n\t}\n\tc.syncHandler = c.handleVersionUpdate\n\n\tcrdinformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{\n\t\tAddFunc: func(obj interface{}) {\n\t\t\tcast := obj.(*apiextensionsv1.CustomResourceDefinition)\n\t\t\tc.enqueueCRD(cast)\n\t\t},\n\t\tUpdateFunc: func(oldObj, newObj interface{}) {\n\t\t\t\/\/ Enqueue both old and new object to make sure we remove and add appropriate API services.\n\t\t\t\/\/ The working queue will resolve any duplicates and only changes will stay in the queue.\n\t\t\tc.enqueueCRD(oldObj.(*apiextensionsv1.CustomResourceDefinition))\n\t\t\tc.enqueueCRD(newObj.(*apiextensionsv1.CustomResourceDefinition))\n\t\t},\n\t\tDeleteFunc: func(obj interface{}) {\n\t\t\tcast, ok := obj.(*apiextensionsv1.CustomResourceDefinition)\n\t\t\tif !ok {\n\t\t\t\ttombstone, ok := obj.(cache.DeletedFinalStateUnknown)\n\t\t\t\tif !ok {\n\t\t\t\t\tklog.V(2).Infof(\"Couldn't get object from tombstone %#v\", obj)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tcast, ok = tombstone.Obj.(*apiextensionsv1.CustomResourceDefinition)\n\t\t\t\tif !ok {\n\t\t\t\t\tklog.V(2).Infof(\"Tombstone contained unexpected object: %#v\", obj)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tc.enqueueCRD(cast)\n\t\t},\n\t})\n\n\treturn c\n}\n\nfunc (c *crdRegistrationController) Run(workers int, stopCh <-chan struct{}) {\n\tdefer utilruntime.HandleCrash()\n\t\/\/ make sure the work queue is shutdown which will trigger workers to end\n\tdefer c.queue.ShutDown()\n\n\tklog.Infof(\"Starting crd-autoregister controller\")\n\tdefer klog.Infof(\"Shutting down crd-autoregister controller\")\n\n\t\/\/ wait for your secondary caches to fill before starting your work\n\tif !cache.WaitForNamedCacheSync(\"crd-autoregister\", stopCh, c.crdSynced) {\n\t\treturn\n\t}\n\n\t\/\/ process each item in the list once\n\tif crds, err := c.crdLister.List(labels.Everything()); err != nil {\n\t\tutilruntime.HandleError(err)\n\t} else {\n\t\tfor _, crd := range crds {\n\t\t\tfor _, version := range crd.Spec.Versions {\n\t\t\t\tif err := c.syncHandler(schema.GroupVersion{Group: crd.Spec.Group, Version: version.Name}); err != nil {\n\t\t\t\t\tutilruntime.HandleError(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tclose(c.syncedInitialSet)\n\n\t\/\/ start up your worker threads based on workers. Some controllers have multiple kinds of workers\n\tfor i := 0; i < workers; i++ {\n\t\t\/\/ runWorker will loop until \"something bad\" happens. The .Until will then rekick the worker\n\t\t\/\/ after one second\n\t\tgo wait.Until(c.runWorker, time.Second, stopCh)\n\t}\n\n\t\/\/ wait until we're told to stop\n\t<-stopCh\n}\n\n\/\/ WaitForInitialSync blocks until the initial set of CRD resources has been processed\nfunc (c *crdRegistrationController) WaitForInitialSync() {\n\t<-c.syncedInitialSet\n}\n\nfunc (c *crdRegistrationController) runWorker() {\n\t\/\/ hot loop until we're told to stop. processNextWorkItem will automatically wait until there's work\n\t\/\/ available, so we don't worry about secondary waits\n\tfor c.processNextWorkItem() {\n\t}\n}\n\n\/\/ processNextWorkItem deals with one key off the queue. It returns false when it's time to quit.\nfunc (c *crdRegistrationController) processNextWorkItem() bool {\n\t\/\/ pull the next work item from queue. It should be a key we use to lookup something in a cache\n\tkey, quit := c.queue.Get()\n\tif quit {\n\t\treturn false\n\t}\n\t\/\/ you always have to indicate to the queue that you've completed a piece of work\n\tdefer c.queue.Done(key)\n\n\t\/\/ do your work on the key. This method will contains your \"do stuff\" logic\n\terr := c.syncHandler(key.(schema.GroupVersion))\n\tif err == nil {\n\t\t\/\/ if you had no error, tell the queue to stop tracking history for your key. This will\n\t\t\/\/ reset things like failure counts for per-item rate limiting\n\t\tc.queue.Forget(key)\n\t\treturn true\n\t}\n\n\t\/\/ there was a failure so be sure to report it. This method allows for pluggable error handling\n\t\/\/ which can be used for things like cluster-monitoring\n\tutilruntime.HandleError(fmt.Errorf(\"%v failed with : %v\", key, err))\n\t\/\/ since we failed, we should requeue the item to work on later. This method will add a backoff\n\t\/\/ to avoid hotlooping on particular items (they're probably still not going to work right away)\n\t\/\/ and overall controller protection (everything I've done is broken, this controller needs to\n\t\/\/ calm down or it can starve other useful work) cases.\n\tc.queue.AddRateLimited(key)\n\n\treturn true\n}\n\nfunc (c *crdRegistrationController) enqueueCRD(crd *apiextensionsv1.CustomResourceDefinition) {\n\tfor _, version := range crd.Spec.Versions {\n\t\tc.queue.Add(schema.GroupVersion{Group: crd.Spec.Group, Version: version.Name})\n\t}\n}\n\nfunc (c *crdRegistrationController) handleVersionUpdate(groupVersion schema.GroupVersion) error {\n\tapiServiceName := groupVersion.Version + \".\" + groupVersion.Group\n\n\tif apiserver.APIServiceAlreadyExists(groupVersion) {\n\t\t\/\/ Removing APIService from sync means the CRD registration controller won't sync this APIService\n\t\t\/\/ anymore. If the APIService is managed externally, this will mean the external component can\n\t\t\/\/ update this APIService without CRD controller stomping the changes on it.\n\t\tc.apiServiceRegistration.RemoveAPIServiceToSync(apiServiceName)\n\t\treturn nil\n\t}\n\n\t\/\/ check all CRDs. There shouldn't that many, but if we have problems later we can index them\n\tcrds, err := c.crdLister.List(labels.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, crd := range crds {\n\t\tif crd.Spec.Group != groupVersion.Group {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, version := range crd.Spec.Versions {\n\t\t\tif version.Name != groupVersion.Version || !version.Served {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tc.apiServiceRegistration.AddAPIServiceToSync(&v1.APIService{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: apiServiceName},\n\t\t\t\tSpec: v1.APIServiceSpec{\n\t\t\t\t\tGroup: groupVersion.Group,\n\t\t\t\t\tVersion: groupVersion.Version,\n\t\t\t\t\tGroupPriorityMinimum: 1000, \/\/ CRDs should have relatively low priority\n\t\t\t\t\tVersionPriority: 100, \/\/ CRDs will be sorted by kube-like versions like any other APIService with the same VersionPriority\n\t\t\t\t},\n\t\t\t})\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tc.apiServiceRegistration.RemoveAPIServiceToSync(apiServiceName)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Remove now-unused stemFunc<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>Coalesce events<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage propagation_test\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\t\"testing\"\n\n\t\"go.opentelemetry.io\/otel\/oteltest\"\n\t\"go.opentelemetry.io\/otel\/propagation\"\n\t\"go.opentelemetry.io\/otel\/trace\"\n)\n\nfunc BenchmarkInject(b *testing.B) {\n\tvar t propagation.TraceContext\n\n\tinjectSubBenchmarks(b, func(ctx context.Context, b *testing.B) {\n\t\treq, _ := http.NewRequest(\"GET\", \"http:\/\/example.com\", nil)\n\t\tb.ResetTimer()\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tt.Inject(ctx, propagation.HeaderCarrier(req.Header))\n\t\t}\n\t})\n}\n\nfunc injectSubBenchmarks(b *testing.B, fn func(context.Context, *testing.B)) {\n\tb.Run(\"SampledSpanContext\", func(b *testing.B) {\n\t\tspanID, _ := trace.SpanIDFromHex(\"00f067aa0ba902b7\")\n\t\ttraceID, _ := trace.TraceIDFromHex(\"4bf92f3577b34da6a3ce929d0e0e4736\")\n\n\t\tmockTracer := oteltest.DefaultTracer()\n\t\tb.ReportAllocs()\n\t\tsc := trace.NewSpanContext(trace.SpanContextConfig{\n\t\t\tTraceID: traceID,\n\t\t\tSpanID: spanID,\n\t\t\tTraceFlags: trace.FlagsSampled,\n\t\t})\n\t\tctx := trace.ContextWithRemoteSpanContext(context.Background(), sc)\n\t\tctx, _ = mockTracer.Start(ctx, \"inject\")\n\t\tfn(ctx, b)\n\t})\n\n\tb.Run(\"WithoutSpanContext\", func(b *testing.B) {\n\t\tb.ReportAllocs()\n\t\tctx := context.Background()\n\t\tfn(ctx, b)\n\t})\n}\n\nfunc BenchmarkExtract(b *testing.B) {\n\textractSubBenchmarks(b, func(b *testing.B, req *http.Request) {\n\t\tvar propagator propagation.TraceContext\n\t\tctx := context.Background()\n\t\tb.ResetTimer()\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tpropagator.Extract(ctx, propagation.HeaderCarrier(req.Header))\n\t\t}\n\t})\n}\n\nfunc extractSubBenchmarks(b *testing.B, fn func(*testing.B, *http.Request)) {\n\tb.Run(\"Sampled\", func(b *testing.B) {\n\t\treq, _ := http.NewRequest(\"GET\", \"http:\/\/example.com\", nil)\n\t\treq.Header.Set(\"traceparent\", \"00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01\")\n\t\tb.ReportAllocs()\n\n\t\tfn(b, req)\n\t})\n\n\tb.Run(\"BogusVersion\", func(b *testing.B) {\n\t\treq, _ := http.NewRequest(\"GET\", \"http:\/\/example.com\", nil)\n\t\treq.Header.Set(\"traceparent\", \"qw-00000000000000000000000000000000-0000000000000000-01\")\n\t\tb.ReportAllocs()\n\t\tfn(b, req)\n\t})\n\n\tb.Run(\"FutureAdditionalData\", func(b *testing.B) {\n\t\treq, _ := http.NewRequest(\"GET\", \"http:\/\/example.com\", nil)\n\t\treq.Header.Set(\"traceparent\", \"02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-09-XYZxsf09\")\n\t\tb.ReportAllocs()\n\t\tfn(b, req)\n\t})\n}\n<commit_msg>Simplify trace context benchmark test (#2109)<commit_after>\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage propagation_test\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\t\"testing\"\n\n\t\"go.opentelemetry.io\/otel\/propagation\"\n\t\"go.opentelemetry.io\/otel\/trace\"\n)\n\nfunc BenchmarkInject(b *testing.B) {\n\tvar t propagation.TraceContext\n\n\tinjectSubBenchmarks(b, func(ctx context.Context, b *testing.B) {\n\t\th := http.Header{}\n\t\tb.ResetTimer()\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tt.Inject(ctx, propagation.HeaderCarrier(h))\n\t\t}\n\t})\n}\n\nfunc injectSubBenchmarks(b *testing.B, fn func(context.Context, *testing.B)) {\n\tb.Run(\"SampledSpanContext\", func(b *testing.B) {\n\t\tb.ReportAllocs()\n\t\tsc := trace.NewSpanContext(trace.SpanContextConfig{\n\t\t\tTraceID: traceID,\n\t\t\tSpanID: spanID,\n\t\t\tTraceFlags: trace.FlagsSampled,\n\t\t})\n\t\tctx := trace.ContextWithRemoteSpanContext(context.Background(), sc)\n\t\tfn(ctx, b)\n\t})\n\n\tb.Run(\"WithoutSpanContext\", func(b *testing.B) {\n\t\tb.ReportAllocs()\n\t\tctx := context.Background()\n\t\tfn(ctx, b)\n\t})\n}\n\nfunc BenchmarkExtract(b *testing.B) {\n\textractSubBenchmarks(b, func(b *testing.B, req *http.Request) {\n\t\tvar propagator propagation.TraceContext\n\t\tctx := context.Background()\n\t\tb.ResetTimer()\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tpropagator.Extract(ctx, propagation.HeaderCarrier(req.Header))\n\t\t}\n\t})\n}\n\nfunc extractSubBenchmarks(b *testing.B, fn func(*testing.B, *http.Request)) {\n\tb.Run(\"Sampled\", func(b *testing.B) {\n\t\treq, _ := http.NewRequest(\"GET\", \"http:\/\/example.com\", nil)\n\t\treq.Header.Set(\"traceparent\", \"00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01\")\n\t\tb.ReportAllocs()\n\n\t\tfn(b, req)\n\t})\n\n\tb.Run(\"BogusVersion\", func(b *testing.B) {\n\t\treq, _ := http.NewRequest(\"GET\", \"http:\/\/example.com\", nil)\n\t\treq.Header.Set(\"traceparent\", \"qw-00000000000000000000000000000000-0000000000000000-01\")\n\t\tb.ReportAllocs()\n\t\tfn(b, req)\n\t})\n\n\tb.Run(\"FutureAdditionalData\", func(b *testing.B) {\n\t\treq, _ := http.NewRequest(\"GET\", \"http:\/\/example.com\", nil)\n\t\treq.Header.Set(\"traceparent\", \"02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-09-XYZxsf09\")\n\t\tb.ReportAllocs()\n\t\tfn(b, req)\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ util.go -- various utilities\n\/\/\n\npackage srnd\n\nimport (\n \"github.com\/majestrate\/srndv2\/src\/nacl\"\n \"crypto\/sha1\"\n \"crypto\/sha512\"\n \"encoding\/base64\"\n \"encoding\/hex\"\n \"fmt\"\n \"io\"\n \"log\"\n \"net\"\n \"os\"\n \"path\/filepath\"\n \"strconv\"\n \"strings\"\n \"time\"\n)\n\nfunc DelFile(fname string) {\n if CheckFile(fname) {\n os.Remove(fname)\n }\n}\n\nfunc CheckFile(fname string) bool {\n if _, err := os.Stat(fname) ; os.IsNotExist(err) {\n return false\n }\n return true\n}\n\nfunc IsDir(dirname string) bool {\n stat, err := os.Stat(dirname)\n if err != nil {\n log.Fatal(err)\n }\n return stat.IsDir()\n}\n\n\/\/ ensure a directory exists\nfunc EnsureDir(dirname string) {\n stat, err := os.Stat(dirname)\n if os.IsNotExist(err) {\n os.Mkdir(dirname, 0755)\n } else if ! stat.IsDir() {\n os.Remove(dirname)\n os.Mkdir(dirname, 0755)\n }\n}\n\n\/\/ TODO make this work better\nfunc ValidMessageID(id string) bool {\n id_len := len(id)\n if id_len < 5 {\n return false \n }\n\n at_idx := strings.Index(id, \"@\")\n if at_idx < 3 {\n return false\n }\n \n for idx, c := range id {\n if idx == 0 {\n if c == '<' {\n continue\n }\n } else if idx == id_len - 1 {\n if c == '>' {\n continue\n }\n } else {\n if idx == at_idx {\n continue\n }\n if c >= 'a' && c <= 'z' {\n continue\n }\n if c >= 'A' && c <= 'Z' {\n continue\n }\n if c >= '0' && c <= '9' {\n continue\n }\n if c == '.' {\n continue\n }\n }\n log.Printf(\"bad message ID: %s , invalid char at %d: %c\", id, idx, c)\n return false\n }\n return true\n}\n\n\/\/ message id hash\nfunc HashMessageID(msgid string) string {\n return fmt.Sprintf(\"%x\", sha1.Sum([]byte(msgid)))\n}\n\/\/ short message id hash\n\/\/ >>hash\nfunc ShortHashMessageID(msgid string) string {\n return HashMessageID(msgid)[:10]\n}\n\ntype lineWriter struct {\n io.Writer\n wr io.Writer\n delim []byte\n}\n\nfunc NewLineWriter(wr io.Writer, delim string) io.Writer {\n return lineWriter{wr, wr, []byte(delim)}\n}\n\nfunc (self lineWriter) Write(data []byte) (n int, err error) {\n n, err = self.wr.Write(data)\n self.wr.Write(self.delim)\n return n, err\n}\n\nfunc OpenFileWriter(fname string) (io.WriteCloser, error) {\n return os.Create(fname)\n}\n\n\/\/ make a random string\nfunc randStr(length int) string {\n return hex.EncodeToString(nacl.RandBytes(length))[length:]\n}\n\n\n\/\/ time for right now as int64\nfunc timeNow() int64 {\n return time.Now().Unix()\n}\n\n\/\/ sanitize data for nntp\nfunc nntpSanitize(data string) string {\n parts := strings.Split(data, \"\\n.\\n\")\n return parts[0]\n}\n\n\ntype int64Sorter []int64\n\nfunc (self int64Sorter) Len() int {\n return len(self)\n}\n\nfunc (self int64Sorter) Less(i, j int) bool {\n return self[i] < self[j]\n}\n\n\nfunc (self int64Sorter) Swap(i, j int) {\n tmp := self[j]\n self[j] = self[i]\n self[i] = tmp\n}\n\n\n\/\/ obtain the \"real\" ip address\nfunc getRealIP(name string) string {\n if len(name) > 0 {\n ip , err := net.ResolveIPAddr(\"ip\", name)\n if err == nil {\n if ip.IP.IsGlobalUnicast() {\n return ip.IP.String()\n }\n }\n }\n return \"\"\n}\n\n\/\/ check that we have permission to access this\n\/\/ fatal on fail\nfunc checkPerms(fname string) {\n fstat, err := os.Stat(fname)\n if err != nil {\n log.Fatalf(\"Cannot access %s, %s\", fname, err)\n }\n \/\/ check if we can access this dir\n if fstat.IsDir() {\n tmpfname := filepath.Join(fname, \".test\")\n f, err := os.Create(tmpfname)\n if err != nil {\n log.Fatalf(\"No Write access in %s, %s\", fname, err)\n }\n err = f.Close()\n if err != nil {\n log.Fatalf(\"failed to close test file %s !? %s\", tmpfname, err)\n }\n err = os.Remove(tmpfname)\n if err != nil {\n log.Fatalf(\"failed to remove test file %s, %s\", tmpfname, err)\n }\n } else {\n \/\/ this isn't a dir, treat it like a regular file\n f, err := os.Open(fname)\n if err != nil {\n log.Fatalf(\"cannot read file %s, %s\", fname, err)\n }\n f.Close()\n }\n}\n\n\/\/ given an address\n\/\/ generate a new encryption key for it\n\/\/ return the encryption key and the encrypted address\nfunc newAddrEnc(addr string) (string, string) {\n key_bytes := nacl.RandBytes(32)\n key := base64.StdEncoding.EncodeToString(key_bytes)\n return key, encAddr(addr, key)\n}\n\n\/\/ xor address with a one time pad\n\/\/ if the address isn't long enough it's padded with spaces\nfunc encAddr(addr, key string) string {\n key_bytes, err := base64.StdEncoding.DecodeString(key)\n\n if err != nil {\n log.Println(\"encAddr() key base64 decode\", err)\n return \"\"\n }\n \n if len(addr) > len(key_bytes) {\n log.Println(\"encAddr() len(addr) > len(key_bytes)\")\n return \"\"\n }\n \n \/\/ pad with spaces\n for len(addr) < len(key_bytes) {\n addr += \" \"\n }\n\n addr_bytes := []byte(addr)\n res_bytes := make([]byte, len(addr_bytes))\n for idx, b := range key_bytes {\n res_bytes[idx] = addr_bytes[idx] ^ b\n }\n \n return base64.StdEncoding.EncodeToString(res_bytes)\n}\n\n\/\/ decrypt an address\n\/\/ strips any whitespaces\nfunc decAddr(encaddr, key string) string {\n encaddr_bytes, err := base64.StdEncoding.DecodeString(encaddr)\n if err != nil {\n log.Println(\"decAddr() encaddr base64 decode\", err)\n return \"\"\n }\n if len(encaddr_bytes) != len(key) {\n log.Println(\"decAddr() len(encaddr_bytes) != len(key)\")\n return \"\"\n }\n key_bytes, err := base64.StdEncoding.DecodeString(key)\n if err != nil {\n log.Println(\"decAddr() key base64 decode\", err)\n }\n res_bytes := make([]byte, len(key))\n for idx, b := range key_bytes {\n res_bytes[idx] = encaddr_bytes[idx] ^ b\n }\n res := string(res_bytes)\n return strings.Trim(res, \" \")\n}\n\n\nfunc newsgroupValidFormat(newsgroup string) bool {\n \/\/ too long newsgroup\n if len(newsgroup) > 128 {\n return false\n }\n for _, ch := range newsgroup {\n if ch >= 'a' && ch <= 'z' {\n continue\n }\n if ch >= '0' && ch <= '9' {\n continue\n }\n if ch >= 'A' && ch <= 'Z' {\n continue\n }\n if ch == '.' {\n continue\n }\n return false\n }\n return true\n}\n\n\n\/\/ generate a new signing keypair\n\/\/ public, secret\nfunc newSignKeypair() (string, string) {\n kp := nacl.GenSignKeypair()\n defer kp.Free()\n pk := kp.Public()\n sk := kp.Secret()\n return hex.EncodeToString(pk), hex.EncodeToString(sk)\n}\n\n\/\/ make a utf-8 tripcode\nfunc makeTripcode(pk string) string {\n data, err := hex.DecodeString(pk)\n if err == nil {\n tripcode := \"\"\n \/\/ here is the python code this is based off of\n \/\/ i do something slightly different but this is the base\n \/\/\n \/\/ for x in range(0, length \/ 2):\n \/\/ pub_short += '&#%i;' % (9600 + int(full_pubkey_hex[x*2:x*2+2], 16))\n \/\/ length -= length \/ 2\n \/\/ for x in range(0, length):\n \/\/ pub_short += '&#%i;' % (9600 + int(full_pubkey_hex[-(length*2):][x*2:x*2+2], 16))\n \/\/\n for _, c := range data {\n ch := 9600\n ch += int(c)\n tripcode += fmt.Sprintf(\"&#%04d;\", ch)\n }\n return tripcode\n }\n return \"[invalid]\"\n}\n\n\/\/ generate a new message id with base name\nfunc genMessageID(name string) string {\n return fmt.Sprintf(\"<%s%d@%s>\", randStr(5), timeNow(), name)\n}\n\n\/\/ time now as a string timestamp\nfunc timeNowStr() string {\n return time.Unix(timeNow(), 0).UTC().Format(time.RFC1123Z)\n}\n\n\/\/ get from a map an int given a key or fall back to a default value\nfunc mapGetInt(m map[string]string, key string, fallback int) int {\n val, ok := m[key]\n if ok {\n i, err := strconv.ParseInt(val, 10, 32)\n if err == nil {\n return int(i)\n }\n } \n return fallback\n}\n\nfunc isSage(str string) bool {\n str = strings.ToLower(str)\n return str == \"sage\" || strings.HasPrefix(str, \"sage \")\n}\n\nfunc unhex(str string) []byte {\n buff, _ := hex.DecodeString(str)\n return buff\n}\n\nfunc hexify(data []byte) string {\n return hex.EncodeToString(data)\n}\n\n\/\/ extract pubkey from secret key\n\/\/ return as base32\nfunc getSignPubkey(sk []byte) string {\n return hexify(nacl.GetSignPubkey(sk))\n}\n\n\/\/ sign data with secret key the fucky srnd way\n\/\/ return signature as base32\nfunc cryptoSign(data, sk []byte) string {\n \/\/ hash\n hash := sha512.Sum512(data)\n log.Printf(\"hash=%s len=%s\", hexify(hash[:]), len(data))\n \/\/ sign\n sig := nacl.CryptoSignFucky(hash[:], sk)\n return hexify(sig)\n}\n\n\/\/ given a tripcode after the #\n\/\/ make a private key byteslice\nfunc parseTripcodeSecret(str string) []byte {\n \/\/ try decoding hex\n raw := unhex(str)\n keylen := nacl.CryptoSignSecretLen()\n if raw == nil || len(raw) != keylen {\n \/\/ treat this as a \"regular\" chan tripcode\n \/\/ decode as bytes then pad the rest with 0s if it doesn't fit\n raw = make([]byte, keylen)\n str_bytes := []byte(str)\n if len(str_bytes) > keylen {\n copy(raw, str_bytes[:keylen])\n } else {\n copy(raw, str_bytes)\n }\n } \n return raw\n}\n<commit_msg>debugging<commit_after>\/\/\n\/\/ util.go -- various utilities\n\/\/\n\npackage srnd\n\nimport (\n \"github.com\/majestrate\/srndv2\/src\/nacl\"\n \"crypto\/sha1\"\n \"crypto\/sha512\"\n \"encoding\/base64\"\n \"encoding\/hex\"\n \"fmt\"\n \"io\"\n \"log\"\n \"net\"\n \"os\"\n \"path\/filepath\"\n \"strconv\"\n \"strings\"\n \"time\"\n)\n\nfunc DelFile(fname string) {\n if CheckFile(fname) {\n os.Remove(fname)\n }\n}\n\nfunc CheckFile(fname string) bool {\n if _, err := os.Stat(fname) ; os.IsNotExist(err) {\n return false\n }\n return true\n}\n\nfunc IsDir(dirname string) bool {\n stat, err := os.Stat(dirname)\n if err != nil {\n log.Fatal(err)\n }\n return stat.IsDir()\n}\n\n\/\/ ensure a directory exists\nfunc EnsureDir(dirname string) {\n stat, err := os.Stat(dirname)\n if os.IsNotExist(err) {\n os.Mkdir(dirname, 0755)\n } else if ! stat.IsDir() {\n os.Remove(dirname)\n os.Mkdir(dirname, 0755)\n }\n}\n\n\/\/ TODO make this work better\nfunc ValidMessageID(id string) bool {\n id_len := len(id)\n if id_len < 5 {\n return false \n }\n\n at_idx := strings.Index(id, \"@\")\n if at_idx < 3 {\n return false\n }\n \n for idx, c := range id {\n if idx == 0 {\n if c == '<' {\n continue\n }\n } else if idx == id_len - 1 {\n if c == '>' {\n continue\n }\n } else {\n if idx == at_idx {\n continue\n }\n if c >= 'a' && c <= 'z' {\n continue\n }\n if c >= 'A' && c <= 'Z' {\n continue\n }\n if c >= '0' && c <= '9' {\n continue\n }\n if c == '.' {\n continue\n }\n }\n log.Printf(\"bad message ID: len=%d %s , invalid char at %d: %c\", id_len, id, idx, c)\n return false\n }\n return true\n}\n\n\/\/ message id hash\nfunc HashMessageID(msgid string) string {\n return fmt.Sprintf(\"%x\", sha1.Sum([]byte(msgid)))\n}\n\/\/ short message id hash\n\/\/ >>hash\nfunc ShortHashMessageID(msgid string) string {\n return HashMessageID(msgid)[:10]\n}\n\ntype lineWriter struct {\n io.Writer\n wr io.Writer\n delim []byte\n}\n\nfunc NewLineWriter(wr io.Writer, delim string) io.Writer {\n return lineWriter{wr, wr, []byte(delim)}\n}\n\nfunc (self lineWriter) Write(data []byte) (n int, err error) {\n n, err = self.wr.Write(data)\n self.wr.Write(self.delim)\n return n, err\n}\n\nfunc OpenFileWriter(fname string) (io.WriteCloser, error) {\n return os.Create(fname)\n}\n\n\/\/ make a random string\nfunc randStr(length int) string {\n return hex.EncodeToString(nacl.RandBytes(length))[length:]\n}\n\n\n\/\/ time for right now as int64\nfunc timeNow() int64 {\n return time.Now().Unix()\n}\n\n\/\/ sanitize data for nntp\nfunc nntpSanitize(data string) string {\n parts := strings.Split(data, \"\\n.\\n\")\n return parts[0]\n}\n\n\ntype int64Sorter []int64\n\nfunc (self int64Sorter) Len() int {\n return len(self)\n}\n\nfunc (self int64Sorter) Less(i, j int) bool {\n return self[i] < self[j]\n}\n\n\nfunc (self int64Sorter) Swap(i, j int) {\n tmp := self[j]\n self[j] = self[i]\n self[i] = tmp\n}\n\n\n\/\/ obtain the \"real\" ip address\nfunc getRealIP(name string) string {\n if len(name) > 0 {\n ip , err := net.ResolveIPAddr(\"ip\", name)\n if err == nil {\n if ip.IP.IsGlobalUnicast() {\n return ip.IP.String()\n }\n }\n }\n return \"\"\n}\n\n\/\/ check that we have permission to access this\n\/\/ fatal on fail\nfunc checkPerms(fname string) {\n fstat, err := os.Stat(fname)\n if err != nil {\n log.Fatalf(\"Cannot access %s, %s\", fname, err)\n }\n \/\/ check if we can access this dir\n if fstat.IsDir() {\n tmpfname := filepath.Join(fname, \".test\")\n f, err := os.Create(tmpfname)\n if err != nil {\n log.Fatalf(\"No Write access in %s, %s\", fname, err)\n }\n err = f.Close()\n if err != nil {\n log.Fatalf(\"failed to close test file %s !? %s\", tmpfname, err)\n }\n err = os.Remove(tmpfname)\n if err != nil {\n log.Fatalf(\"failed to remove test file %s, %s\", tmpfname, err)\n }\n } else {\n \/\/ this isn't a dir, treat it like a regular file\n f, err := os.Open(fname)\n if err != nil {\n log.Fatalf(\"cannot read file %s, %s\", fname, err)\n }\n f.Close()\n }\n}\n\n\/\/ given an address\n\/\/ generate a new encryption key for it\n\/\/ return the encryption key and the encrypted address\nfunc newAddrEnc(addr string) (string, string) {\n key_bytes := nacl.RandBytes(32)\n key := base64.StdEncoding.EncodeToString(key_bytes)\n return key, encAddr(addr, key)\n}\n\n\/\/ xor address with a one time pad\n\/\/ if the address isn't long enough it's padded with spaces\nfunc encAddr(addr, key string) string {\n key_bytes, err := base64.StdEncoding.DecodeString(key)\n\n if err != nil {\n log.Println(\"encAddr() key base64 decode\", err)\n return \"\"\n }\n \n if len(addr) > len(key_bytes) {\n log.Println(\"encAddr() len(addr) > len(key_bytes)\")\n return \"\"\n }\n \n \/\/ pad with spaces\n for len(addr) < len(key_bytes) {\n addr += \" \"\n }\n\n addr_bytes := []byte(addr)\n res_bytes := make([]byte, len(addr_bytes))\n for idx, b := range key_bytes {\n res_bytes[idx] = addr_bytes[idx] ^ b\n }\n \n return base64.StdEncoding.EncodeToString(res_bytes)\n}\n\n\/\/ decrypt an address\n\/\/ strips any whitespaces\nfunc decAddr(encaddr, key string) string {\n encaddr_bytes, err := base64.StdEncoding.DecodeString(encaddr)\n if err != nil {\n log.Println(\"decAddr() encaddr base64 decode\", err)\n return \"\"\n }\n if len(encaddr_bytes) != len(key) {\n log.Println(\"decAddr() len(encaddr_bytes) != len(key)\")\n return \"\"\n }\n key_bytes, err := base64.StdEncoding.DecodeString(key)\n if err != nil {\n log.Println(\"decAddr() key base64 decode\", err)\n }\n res_bytes := make([]byte, len(key))\n for idx, b := range key_bytes {\n res_bytes[idx] = encaddr_bytes[idx] ^ b\n }\n res := string(res_bytes)\n return strings.Trim(res, \" \")\n}\n\n\nfunc newsgroupValidFormat(newsgroup string) bool {\n \/\/ too long newsgroup\n if len(newsgroup) > 128 {\n return false\n }\n for _, ch := range newsgroup {\n if ch >= 'a' && ch <= 'z' {\n continue\n }\n if ch >= '0' && ch <= '9' {\n continue\n }\n if ch >= 'A' && ch <= 'Z' {\n continue\n }\n if ch == '.' {\n continue\n }\n return false\n }\n return true\n}\n\n\n\/\/ generate a new signing keypair\n\/\/ public, secret\nfunc newSignKeypair() (string, string) {\n kp := nacl.GenSignKeypair()\n defer kp.Free()\n pk := kp.Public()\n sk := kp.Secret()\n return hex.EncodeToString(pk), hex.EncodeToString(sk)\n}\n\n\/\/ make a utf-8 tripcode\nfunc makeTripcode(pk string) string {\n data, err := hex.DecodeString(pk)\n if err == nil {\n tripcode := \"\"\n \/\/ here is the python code this is based off of\n \/\/ i do something slightly different but this is the base\n \/\/\n \/\/ for x in range(0, length \/ 2):\n \/\/ pub_short += '&#%i;' % (9600 + int(full_pubkey_hex[x*2:x*2+2], 16))\n \/\/ length -= length \/ 2\n \/\/ for x in range(0, length):\n \/\/ pub_short += '&#%i;' % (9600 + int(full_pubkey_hex[-(length*2):][x*2:x*2+2], 16))\n \/\/\n for _, c := range data {\n ch := 9600\n ch += int(c)\n tripcode += fmt.Sprintf(\"&#%04d;\", ch)\n }\n return tripcode\n }\n return \"[invalid]\"\n}\n\n\/\/ generate a new message id with base name\nfunc genMessageID(name string) string {\n return fmt.Sprintf(\"<%s%d@%s>\", randStr(5), timeNow(), name)\n}\n\n\/\/ time now as a string timestamp\nfunc timeNowStr() string {\n return time.Unix(timeNow(), 0).UTC().Format(time.RFC1123Z)\n}\n\n\/\/ get from a map an int given a key or fall back to a default value\nfunc mapGetInt(m map[string]string, key string, fallback int) int {\n val, ok := m[key]\n if ok {\n i, err := strconv.ParseInt(val, 10, 32)\n if err == nil {\n return int(i)\n }\n } \n return fallback\n}\n\nfunc isSage(str string) bool {\n str = strings.ToLower(str)\n return str == \"sage\" || strings.HasPrefix(str, \"sage \")\n}\n\nfunc unhex(str string) []byte {\n buff, _ := hex.DecodeString(str)\n return buff\n}\n\nfunc hexify(data []byte) string {\n return hex.EncodeToString(data)\n}\n\n\/\/ extract pubkey from secret key\n\/\/ return as base32\nfunc getSignPubkey(sk []byte) string {\n return hexify(nacl.GetSignPubkey(sk))\n}\n\n\/\/ sign data with secret key the fucky srnd way\n\/\/ return signature as base32\nfunc cryptoSign(data, sk []byte) string {\n \/\/ hash\n hash := sha512.Sum512(data)\n log.Printf(\"hash=%s len=%s\", hexify(hash[:]), len(data))\n \/\/ sign\n sig := nacl.CryptoSignFucky(hash[:], sk)\n return hexify(sig)\n}\n\n\/\/ given a tripcode after the #\n\/\/ make a private key byteslice\nfunc parseTripcodeSecret(str string) []byte {\n \/\/ try decoding hex\n raw := unhex(str)\n keylen := nacl.CryptoSignSecretLen()\n if raw == nil || len(raw) != keylen {\n \/\/ treat this as a \"regular\" chan tripcode\n \/\/ decode as bytes then pad the rest with 0s if it doesn't fit\n raw = make([]byte, keylen)\n str_bytes := []byte(str)\n if len(str_bytes) > keylen {\n copy(raw, str_bytes[:keylen])\n } else {\n copy(raw, str_bytes)\n }\n } \n return raw\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>add license<commit_after><|endoftext|>"} {"text":"<commit_before>package somaproto\n\nimport (\n\t\"github.com\/satori\/go.uuid\"\n)\n\ntype ProtoRequestOncall struct {\n\tOnCall ProtoOncall `json:\"oncall,omitempty\"`\n\tFilter ProtoOncallFilter `json:\"filter,omitempty\"`\n\tMembers []ProtoOncallMember `json:\"members,omitempty\"`\n}\n\ntype ProtoResultOncall struct {\n\tCode uint16 `json:\"code,omitempty\"`\n\tStatus string `json:\"status,omitempty\"`\n\tText []string `json:\"text,omitempty\"`\n\tOncalls []ProtoOncall `json:\"oncalls,omitempty\"`\n}\n\ntype ProtoOncall struct {\n\tId uuid.UUID `json:\"id,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tNumber string `json:\"number,omitempty\"`\n\tDetails ProtoOncallDetails `json:\"details,omitempty\"`\n}\n\ntype ProtoOncallDetails struct {\n\tCreatedAt string `json:\"createdat,omitempty\"`\n\tCreatedBy string `json:\"createdby,omitempty\"`\n\tMembers []string `json:\"members,omitempty\"`\n}\n\ntype ProtoOncallMember struct {\n\tUserName string `json:\"username,omitempty\"`\n\tUserId uuid.UUID `json\"userid,omitempty\"`\n}\n\ntype ProtoOncallFilter struct {\n\tName string `json:\"name,omitempty\"`\n\tNumber string `json:\"number,omitempty\"`\n}\n<commit_msg>oncall: add missing vim modeline<commit_after>package somaproto\n\nimport (\n\t\"github.com\/satori\/go.uuid\"\n)\n\ntype ProtoRequestOncall struct {\n\tOnCall ProtoOncall `json:\"oncall,omitempty\"`\n\tFilter ProtoOncallFilter `json:\"filter,omitempty\"`\n\tMembers []ProtoOncallMember `json:\"members,omitempty\"`\n}\n\ntype ProtoResultOncall struct {\n\tCode uint16 `json:\"code,omitempty\"`\n\tStatus string `json:\"status,omitempty\"`\n\tText []string `json:\"text,omitempty\"`\n\tOncalls []ProtoOncall `json:\"oncalls,omitempty\"`\n}\n\ntype ProtoOncall struct {\n\tId uuid.UUID `json:\"id,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tNumber string `json:\"number,omitempty\"`\n\tDetails ProtoOncallDetails `json:\"details,omitempty\"`\n}\n\ntype ProtoOncallDetails struct {\n\tCreatedAt string `json:\"createdat,omitempty\"`\n\tCreatedBy string `json:\"createdby,omitempty\"`\n\tMembers []string `json:\"members,omitempty\"`\n}\n\ntype ProtoOncallMember struct {\n\tUserName string `json:\"username,omitempty\"`\n\tUserId uuid.UUID `json\"userid,omitempty\"`\n}\n\ntype ProtoOncallFilter struct {\n\tName string `json:\"name,omitempty\"`\n\tNumber string `json:\"number,omitempty\"`\n}\n\n\/\/ vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/lxc\/lxd\/client\"\n\t\"github.com\/lxc\/lxd\/lxd-benchmark\/benchmark\"\n\t\"github.com\/lxc\/lxd\/shared\"\n\t\"github.com\/lxc\/lxd\/shared\/gnuflag\"\n\t\"github.com\/lxc\/lxd\/shared\/version\"\n)\n\nvar argCount = gnuflag.Int(\"count\", 1, \"Number of containers to create\")\nvar argParallel = gnuflag.Int(\"parallel\", -1, \"Number of threads to use\")\nvar argImage = gnuflag.String(\"image\", \"ubuntu:\", \"Image to use for the test\")\nvar argPrivileged = gnuflag.Bool(\"privileged\", false, \"Use privileged containers\")\nvar argStart = gnuflag.Bool(\"start\", true, \"Start the container after creation\")\nvar argFreeze = gnuflag.Bool(\"freeze\", false, \"Freeze the container right after start\")\nvar argReportFile = gnuflag.String(\"report-file\", \"\", \"A CSV file to write test file to. If the file is present, it will be appended to.\")\nvar argReportLabel = gnuflag.String(\"report-label\", \"\", \"A label for the report entry. By default, the action is used.\")\n\nfunc main() {\n\terr := run(os.Args)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tos.Exit(0)\n}\n\nfunc run(args []string) error {\n\t\/\/ Parse command line\n\t\/\/ \"spawn\" is being deprecated, use \"launch\" instead.\n\tif len(os.Args) == 1 || !shared.StringInSlice(os.Args[1], []string{\"launch\", \"spawn\", \"start\", \"stop\", \"delete\"}) {\n\t\tif len(os.Args) > 1 && os.Args[1] == \"--version\" {\n\t\t\tfmt.Println(version.Version)\n\t\t\treturn nil\n\t\t}\n\n\t\tout := os.Stderr\n\t\tif len(os.Args) > 1 && os.Args[1] == \"--help\" {\n\t\t\tout = os.Stdout\n\t\t}\n\t\tgnuflag.SetOut(out)\n\n\t\tfmt.Fprintf(out, \"Usage: %s launch [--count=COUNT] [--image=IMAGE] [--privileged=BOOL] [--start=BOOL] [--freeze=BOOL] [--parallel=COUNT]\\n\", os.Args[0])\n\t\tfmt.Fprintf(out, \" %s start [--parallel=COUNT]\\n\", os.Args[0])\n\t\tfmt.Fprintf(out, \" %s stop [--parallel=COUNT]\\n\", os.Args[0])\n\t\tfmt.Fprintf(out, \" %s delete [--parallel=COUNT]\\n\\n\", os.Args[0])\n\t\tgnuflag.PrintDefaults()\n\t\tfmt.Fprintf(out, \"\\n\")\n\n\t\tif len(os.Args) > 1 && os.Args[1] == \"--help\" {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn fmt.Errorf(\"A valid action (launch, start, stop, delete) must be passed.\")\n\t}\n\n\tgnuflag.Parse(true)\n\n\t\/\/ Connect to LXD\n\tc, err := lxd.ConnectLXDUnix(\"\", nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbenchmark.PrintServerInfo(c)\n\n\tvar report *benchmark.CSVReport\n\tif *argReportFile != \"\" {\n\t\treport = &benchmark.CSVReport{Filename: *argReportFile}\n\t\tif shared.PathExists(*argReportFile) {\n\t\t\terr := report.Load()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\taction := os.Args[1]\n\tvar duration time.Duration\n\tswitch action {\n\t\/\/ \"spawn\" is being deprecated.\n\tcase \"launch\", \"spawn\":\n\t\tduration, err = benchmark.LaunchContainers(\n\t\t\tc, *argCount, *argParallel, *argImage, *argPrivileged, *argStart, *argFreeze)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase \"start\":\n\t\tcontainers, err := benchmark.GetContainers(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tduration, err = benchmark.StartContainers(c, containers, *argParallel)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase \"stop\":\n\t\tcontainers, err := benchmark.GetContainers(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tduration, err = benchmark.StopContainers(c, containers, *argParallel)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase \"delete\":\n\t\tcontainers, err := benchmark.GetContainers(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tduration, err = benchmark.DeleteContainers(c, containers, *argParallel)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif report != nil {\n\t\tlabel := action\n\t\tif *argReportLabel != \"\" {\n\t\t\tlabel = *argReportLabel\n\t\t}\n\t\treport.AddRecord(label, duration)\n\t\terr := report.Write()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>lxd-benchmark: Fix golint<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/lxc\/lxd\/client\"\n\t\"github.com\/lxc\/lxd\/lxd-benchmark\/benchmark\"\n\t\"github.com\/lxc\/lxd\/shared\"\n\t\"github.com\/lxc\/lxd\/shared\/gnuflag\"\n\t\"github.com\/lxc\/lxd\/shared\/version\"\n)\n\nvar argCount = gnuflag.Int(\"count\", 1, \"Number of containers to create\")\nvar argParallel = gnuflag.Int(\"parallel\", -1, \"Number of threads to use\")\nvar argImage = gnuflag.String(\"image\", \"ubuntu:\", \"Image to use for the test\")\nvar argPrivileged = gnuflag.Bool(\"privileged\", false, \"Use privileged containers\")\nvar argStart = gnuflag.Bool(\"start\", true, \"Start the container after creation\")\nvar argFreeze = gnuflag.Bool(\"freeze\", false, \"Freeze the container right after start\")\nvar argReportFile = gnuflag.String(\"report-file\", \"\", \"A CSV file to write test file to. If the file is present, it will be appended to.\")\nvar argReportLabel = gnuflag.String(\"report-label\", \"\", \"A label for the report entry. By default, the action is used.\")\n\nfunc main() {\n\terr := run(os.Args)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tos.Exit(0)\n}\n\nfunc run(args []string) error {\n\t\/\/ Parse command line\n\t\/\/ \"spawn\" is being deprecated, use \"launch\" instead.\n\tif len(os.Args) == 1 || !shared.StringInSlice(os.Args[1], []string{\"launch\", \"spawn\", \"start\", \"stop\", \"delete\"}) {\n\t\tif len(os.Args) > 1 && os.Args[1] == \"--version\" {\n\t\t\tfmt.Println(version.Version)\n\t\t\treturn nil\n\t\t}\n\n\t\tout := os.Stderr\n\t\tif len(os.Args) > 1 && os.Args[1] == \"--help\" {\n\t\t\tout = os.Stdout\n\t\t}\n\t\tgnuflag.SetOut(out)\n\n\t\tfmt.Fprintf(out, \"Usage: %s launch [--count=COUNT] [--image=IMAGE] [--privileged=BOOL] [--start=BOOL] [--freeze=BOOL] [--parallel=COUNT]\\n\", os.Args[0])\n\t\tfmt.Fprintf(out, \" %s start [--parallel=COUNT]\\n\", os.Args[0])\n\t\tfmt.Fprintf(out, \" %s stop [--parallel=COUNT]\\n\", os.Args[0])\n\t\tfmt.Fprintf(out, \" %s delete [--parallel=COUNT]\\n\\n\", os.Args[0])\n\t\tgnuflag.PrintDefaults()\n\t\tfmt.Fprintf(out, \"\\n\")\n\n\t\tif len(os.Args) > 1 && os.Args[1] == \"--help\" {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn fmt.Errorf(\"A valid action (launch, start, stop, delete) must be passed\")\n\t}\n\n\tgnuflag.Parse(true)\n\n\t\/\/ Connect to LXD\n\tc, err := lxd.ConnectLXDUnix(\"\", nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbenchmark.PrintServerInfo(c)\n\n\tvar report *benchmark.CSVReport\n\tif *argReportFile != \"\" {\n\t\treport = &benchmark.CSVReport{Filename: *argReportFile}\n\t\tif shared.PathExists(*argReportFile) {\n\t\t\terr := report.Load()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\taction := os.Args[1]\n\tvar duration time.Duration\n\tswitch action {\n\t\/\/ \"spawn\" is being deprecated.\n\tcase \"launch\", \"spawn\":\n\t\tduration, err = benchmark.LaunchContainers(\n\t\t\tc, *argCount, *argParallel, *argImage, *argPrivileged, *argStart, *argFreeze)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase \"start\":\n\t\tcontainers, err := benchmark.GetContainers(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tduration, err = benchmark.StartContainers(c, containers, *argParallel)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase \"stop\":\n\t\tcontainers, err := benchmark.GetContainers(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tduration, err = benchmark.StopContainers(c, containers, *argParallel)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\tcase \"delete\":\n\t\tcontainers, err := benchmark.GetContainers(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tduration, err = benchmark.DeleteContainers(c, containers, *argParallel)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif report != nil {\n\t\tlabel := action\n\t\tif *argReportLabel != \"\" {\n\t\t\tlabel = *argReportLabel\n\t\t}\n\t\treport.AddRecord(label, duration)\n\t\terr := report.Write()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/*\n#define _GNU_SOURCE\n#include <errno.h>\n#include <fcntl.h>\n#include <libgen.h>\n#include <stdbool.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys\/mount.h>\n#include <sys\/stat.h>\n#include <sys\/types.h>\n#include <unistd.h>\n\nextern char* advance_arg(bool required);\nextern void error(char *msg);\nextern void attach_userns(int pid);\nextern int dosetns(int pid, char *nstype);\n\nint mkdir_p(const char *dir, mode_t mode)\n{\n\tconst char *tmp = dir;\n\tconst char *orig = dir;\n\tchar *makeme;\n\n\tdo {\n\t\tdir = tmp + strspn(tmp, \"\/\");\n\t\ttmp = dir + strcspn(dir, \"\/\");\n\t\tmakeme = strndup(orig, dir - orig);\n\t\tif (*makeme) {\n\t\t\tif (mkdir(makeme, mode) && errno != EEXIST) {\n\t\t\t\tfprintf(stderr, \"failed to create directory '%s': %s\\n\", makeme, strerror(errno));\n\t\t\t\tfree(makeme);\n\t\t\t\treturn -1;\n\t\t\t}\n\t\t}\n\t\tfree(makeme);\n\t} while(tmp != dir);\n\n\treturn 0;\n}\n\nvoid ensure_dir(char *dest) {\n\tstruct stat sb;\n\tif (stat(dest, &sb) == 0) {\n\t\tif ((sb.st_mode & S_IFMT) == S_IFDIR)\n\t\t\treturn;\n\t\tif (unlink(dest) < 0) {\n\t\t\tfprintf(stderr, \"Failed to remove old %s: %s\\n\", dest, strerror(errno));\n\t\t\t_exit(1);\n\t\t}\n\t}\n\tif (mkdir(dest, 0755) < 0) {\n\t\tfprintf(stderr, \"Failed to mkdir %s: %s\\n\", dest, strerror(errno));\n\t\t_exit(1);\n\t}\n}\n\nvoid ensure_file(char *dest) {\n\tstruct stat sb;\n\tint fd;\n\n\tif (stat(dest, &sb) == 0) {\n\t\tif ((sb.st_mode & S_IFMT) != S_IFDIR)\n\t\t\treturn;\n\t\tif (rmdir(dest) < 0) {\n\t\t\tfprintf(stderr, \"Failed to remove old %s: %s\\n\", dest, strerror(errno));\n\t\t\t_exit(1);\n\t\t}\n\t}\n\n\tfd = creat(dest, 0755);\n\tif (fd < 0) {\n\t\tfprintf(stderr, \"Failed to mkdir %s: %s\\n\", dest, strerror(errno));\n\t\t_exit(1);\n\t}\n\tclose(fd);\n}\n\nvoid create(char *src, char *dest) {\n\tchar *dirdup;\n\tchar *destdirname;\n\n\tstruct stat sb;\n\tif (stat(src, &sb) < 0) {\n\t\tfprintf(stderr, \"source %s does not exist\\n\", src);\n\t\t_exit(1);\n\t}\n\n\tdirdup = strdup(dest);\n\tif (!dirdup)\n\t\t_exit(1);\n\n\tdestdirname = dirname(dirdup);\n\n\tif (mkdir_p(destdirname, 0755) < 0) {\n\t\tfprintf(stderr, \"failed to create path: %s\\n\", destdirname);\n\t\tfree(dirdup);\n\t\t_exit(1);\n\t}\n\tfree(dirdup);\n\n\tswitch (sb.st_mode & S_IFMT) {\n\tcase S_IFDIR:\n\t\tensure_dir(dest);\n\t\treturn;\n\tdefault:\n\t\tensure_file(dest);\n\t\treturn;\n\t}\n}\n\nvoid forkdomount(pid_t pid) {\n\tchar *src, *dest, *opts;\n\n\tattach_userns(pid);\n\n\tif (dosetns(pid, \"mnt\") < 0) {\n\t\tfprintf(stderr, \"Failed setns to container mount namespace: %s\\n\", strerror(errno));\n\t\t_exit(1);\n\t}\n\n\tsrc = advance_arg(true);\n\tdest = advance_arg(true);\n\n\tcreate(src, dest);\n\n\tif (access(src, F_OK) < 0) {\n\t\tfprintf(stderr, \"Mount source doesn't exist: %s\\n\", strerror(errno));\n\t\t_exit(1);\n\t}\n\n\tif (access(dest, F_OK) < 0) {\n\t\tfprintf(stderr, \"Mount destination doesn't exist: %s\\n\", strerror(errno));\n\t\t_exit(1);\n\t}\n\n\t\/\/ Here, we always move recursively, because we sometimes allow\n\t\/\/ recursive mounts. If the mount has no kids then it doesn't matter,\n\t\/\/ but if it does, we want to move those too.\n\tif (mount(src, dest, \"none\", MS_MOVE | MS_REC, NULL) < 0) {\n\t\tfprintf(stderr, \"Failed mounting %s onto %s: %s\\n\", src, dest, strerror(errno));\n\t\t_exit(1);\n\t}\n\n\t_exit(0);\n}\n\nvoid forkdoumount(pid_t pid) {\n\tchar *path = NULL;\n\n\tif (dosetns(pid, \"mnt\") < 0) {\n\t\tfprintf(stderr, \"Failed setns to container mount namespace: %s\\n\", strerror(errno));\n\t\t_exit(1);\n\t}\n\n\tpath = advance_arg(true);\n\tif (access(path, F_OK) < 0) {\n\t\tfprintf(stderr, \"Mount path doesn't exist: %s\\n\", strerror(errno));\n\t\t_exit(1);\n\t}\n\n\tif (umount2(path, MNT_DETACH) < 0) {\n\t\tfprintf(stderr, \"Error unmounting %s: %s\\n\", path, strerror(errno));\n\t\t_exit(1);\n\t}\n\t_exit(0);\n}\n\nvoid forkmount() {\n\tchar *cur = NULL;\n\n\tchar *command = NULL;\n\tchar *rootfs = NULL;\n\tpid_t pid = 0;\n\n\t\/\/ Get the subcommand\n\tcommand = advance_arg(false);\n\tif (command == NULL || (strcmp(command, \"--help\") == 0 || strcmp(command, \"--version\") == 0 || strcmp(command, \"-h\") == 0)) {\n\t\treturn;\n\t}\n\n\t\/\/ Get the pid\n\tcur = advance_arg(false);\n\tif (cur == NULL || (strcmp(cur, \"--help\") == 0 || strcmp(cur, \"--version\") == 0 || strcmp(cur, \"-h\") == 0)) {\n\t\treturn;\n\t}\n\tpid = atoi(cur);\n\n\t\/\/ Check that we're root\n\tif (geteuid() != 0) {\n\t\tfprintf(stderr, \"Error: forkmount requires root privileges\\n\");\n\t\t_exit(1);\n\t}\n\n\t\/\/ Call the subcommands\n\tif (strcmp(command, \"mount\") == 0) {\n\t\tforkdomount(pid);\n\t} else if (strcmp(command, \"umount\") == 0) {\n\t\tforkdoumount(pid);\n\t}\n}\n*\/\nimport \"C\"\n\ntype cmdForkmount struct {\n\tglobal *cmdGlobal\n}\n\nfunc (c *cmdForkmount) Command() *cobra.Command {\n\t\/\/ Main subcommand\n\tcmd := &cobra.Command{}\n\tcmd.Use = \"forkmount\"\n\tcmd.Short = \"Perform mount operations\"\n\tcmd.Long = `Description:\n Perform mount operations\n\n This set of internal commands are used for all container mount\n operations.\n`\n\tcmd.Hidden = true\n\n\t\/\/ mount\n\tcmdMount := &cobra.Command{}\n\tcmdMount.Use = \"mount <PID> <source> <destination>\"\n\tcmdMount.Args = cobra.ExactArgs(3)\n\tcmdMount.RunE = c.Run\n\tcmd.AddCommand(cmdMount)\n\n\t\/\/ umount\n\tcmdUmount := &cobra.Command{}\n\tcmdUmount.Use = \"umount <PID> <path>\"\n\tcmdUmount.Args = cobra.ExactArgs(2)\n\tcmdUmount.RunE = c.Run\n\tcmd.AddCommand(cmdUmount)\n\n\treturn cmd\n}\n\nfunc (c *cmdForkmount) Run(cmd *cobra.Command, args []string) error {\n\treturn fmt.Errorf(\"This command should have been intercepted in cgo\")\n}\n<commit_msg>forkmount: ignore ENOENT and EINVAL on umount2()<commit_after>package main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/*\n#define _GNU_SOURCE\n#include <errno.h>\n#include <fcntl.h>\n#include <libgen.h>\n#include <stdbool.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys\/mount.h>\n#include <sys\/stat.h>\n#include <sys\/types.h>\n#include <unistd.h>\n\nextern char* advance_arg(bool required);\nextern void error(char *msg);\nextern void attach_userns(int pid);\nextern int dosetns(int pid, char *nstype);\n\nint mkdir_p(const char *dir, mode_t mode)\n{\n\tconst char *tmp = dir;\n\tconst char *orig = dir;\n\tchar *makeme;\n\n\tdo {\n\t\tdir = tmp + strspn(tmp, \"\/\");\n\t\ttmp = dir + strcspn(dir, \"\/\");\n\t\tmakeme = strndup(orig, dir - orig);\n\t\tif (*makeme) {\n\t\t\tif (mkdir(makeme, mode) && errno != EEXIST) {\n\t\t\t\tfprintf(stderr, \"failed to create directory '%s': %s\\n\", makeme, strerror(errno));\n\t\t\t\tfree(makeme);\n\t\t\t\treturn -1;\n\t\t\t}\n\t\t}\n\t\tfree(makeme);\n\t} while(tmp != dir);\n\n\treturn 0;\n}\n\nvoid ensure_dir(char *dest) {\n\tstruct stat sb;\n\tif (stat(dest, &sb) == 0) {\n\t\tif ((sb.st_mode & S_IFMT) == S_IFDIR)\n\t\t\treturn;\n\t\tif (unlink(dest) < 0) {\n\t\t\tfprintf(stderr, \"Failed to remove old %s: %s\\n\", dest, strerror(errno));\n\t\t\t_exit(1);\n\t\t}\n\t}\n\tif (mkdir(dest, 0755) < 0) {\n\t\tfprintf(stderr, \"Failed to mkdir %s: %s\\n\", dest, strerror(errno));\n\t\t_exit(1);\n\t}\n}\n\nvoid ensure_file(char *dest) {\n\tstruct stat sb;\n\tint fd;\n\n\tif (stat(dest, &sb) == 0) {\n\t\tif ((sb.st_mode & S_IFMT) != S_IFDIR)\n\t\t\treturn;\n\t\tif (rmdir(dest) < 0) {\n\t\t\tfprintf(stderr, \"Failed to remove old %s: %s\\n\", dest, strerror(errno));\n\t\t\t_exit(1);\n\t\t}\n\t}\n\n\tfd = creat(dest, 0755);\n\tif (fd < 0) {\n\t\tfprintf(stderr, \"Failed to mkdir %s: %s\\n\", dest, strerror(errno));\n\t\t_exit(1);\n\t}\n\tclose(fd);\n}\n\nvoid create(char *src, char *dest) {\n\tchar *dirdup;\n\tchar *destdirname;\n\n\tstruct stat sb;\n\tif (stat(src, &sb) < 0) {\n\t\tfprintf(stderr, \"source %s does not exist\\n\", src);\n\t\t_exit(1);\n\t}\n\n\tdirdup = strdup(dest);\n\tif (!dirdup)\n\t\t_exit(1);\n\n\tdestdirname = dirname(dirdup);\n\n\tif (mkdir_p(destdirname, 0755) < 0) {\n\t\tfprintf(stderr, \"failed to create path: %s\\n\", destdirname);\n\t\tfree(dirdup);\n\t\t_exit(1);\n\t}\n\tfree(dirdup);\n\n\tswitch (sb.st_mode & S_IFMT) {\n\tcase S_IFDIR:\n\t\tensure_dir(dest);\n\t\treturn;\n\tdefault:\n\t\tensure_file(dest);\n\t\treturn;\n\t}\n}\n\nvoid forkdomount(pid_t pid) {\n\tchar *src, *dest, *opts;\n\n\tattach_userns(pid);\n\n\tif (dosetns(pid, \"mnt\") < 0) {\n\t\tfprintf(stderr, \"Failed setns to container mount namespace: %s\\n\", strerror(errno));\n\t\t_exit(1);\n\t}\n\n\tsrc = advance_arg(true);\n\tdest = advance_arg(true);\n\n\tcreate(src, dest);\n\n\tif (access(src, F_OK) < 0) {\n\t\tfprintf(stderr, \"Mount source doesn't exist: %s\\n\", strerror(errno));\n\t\t_exit(1);\n\t}\n\n\tif (access(dest, F_OK) < 0) {\n\t\tfprintf(stderr, \"Mount destination doesn't exist: %s\\n\", strerror(errno));\n\t\t_exit(1);\n\t}\n\n\t\/\/ Here, we always move recursively, because we sometimes allow\n\t\/\/ recursive mounts. If the mount has no kids then it doesn't matter,\n\t\/\/ but if it does, we want to move those too.\n\tif (mount(src, dest, \"none\", MS_MOVE | MS_REC, NULL) < 0) {\n\t\tfprintf(stderr, \"Failed mounting %s onto %s: %s\\n\", src, dest, strerror(errno));\n\t\t_exit(1);\n\t}\n\n\t_exit(0);\n}\n\nvoid forkdoumount(pid_t pid) {\n\tint ret;\n\tchar *path = NULL;\n\n\tret = dosetns(pid, \"mnt\");\n\tif (ret < 0) {\n\t\tfprintf(stderr, \"Failed to setns to container mount namespace: %s\\n\", strerror(errno));\n\t\t_exit(1);\n\t}\n\n\tpath = advance_arg(true);\n\n\tret = umount2(path, MNT_DETACH);\n\tif (ret < 0) {\n\t\t\/\/ - ENOENT: The user must have unmounted and removed the path.\n\t\t\/\/ - EINVAL: The user must have unmounted. Other explanations\n\t\t\/\/ for EINVAL do not apply.\n\t\tif (errno == ENOENT || errno == EINVAL)\n\t\t\t_exit(0);\n\n\t\tfprintf(stderr, \"Error unmounting %s: %s\\n\", path, strerror(errno));\n\t\t_exit(1);\n\t}\n\n\t_exit(0);\n}\n\nvoid forkmount() {\n\tchar *cur = NULL;\n\n\tchar *command = NULL;\n\tchar *rootfs = NULL;\n\tpid_t pid = 0;\n\n\t\/\/ Get the subcommand\n\tcommand = advance_arg(false);\n\tif (command == NULL || (strcmp(command, \"--help\") == 0 || strcmp(command, \"--version\") == 0 || strcmp(command, \"-h\") == 0)) {\n\t\treturn;\n\t}\n\n\t\/\/ Get the pid\n\tcur = advance_arg(false);\n\tif (cur == NULL || (strcmp(cur, \"--help\") == 0 || strcmp(cur, \"--version\") == 0 || strcmp(cur, \"-h\") == 0)) {\n\t\treturn;\n\t}\n\tpid = atoi(cur);\n\n\t\/\/ Check that we're root\n\tif (geteuid() != 0) {\n\t\tfprintf(stderr, \"Error: forkmount requires root privileges\\n\");\n\t\t_exit(1);\n\t}\n\n\t\/\/ Call the subcommands\n\tif (strcmp(command, \"mount\") == 0) {\n\t\tforkdomount(pid);\n\t} else if (strcmp(command, \"umount\") == 0) {\n\t\tforkdoumount(pid);\n\t}\n}\n*\/\nimport \"C\"\n\ntype cmdForkmount struct {\n\tglobal *cmdGlobal\n}\n\nfunc (c *cmdForkmount) Command() *cobra.Command {\n\t\/\/ Main subcommand\n\tcmd := &cobra.Command{}\n\tcmd.Use = \"forkmount\"\n\tcmd.Short = \"Perform mount operations\"\n\tcmd.Long = `Description:\n Perform mount operations\n\n This set of internal commands are used for all container mount\n operations.\n`\n\tcmd.Hidden = true\n\n\t\/\/ mount\n\tcmdMount := &cobra.Command{}\n\tcmdMount.Use = \"mount <PID> <source> <destination>\"\n\tcmdMount.Args = cobra.ExactArgs(3)\n\tcmdMount.RunE = c.Run\n\tcmd.AddCommand(cmdMount)\n\n\t\/\/ umount\n\tcmdUmount := &cobra.Command{}\n\tcmdUmount.Use = \"umount <PID> <path>\"\n\tcmdUmount.Args = cobra.ExactArgs(2)\n\tcmdUmount.RunE = c.Run\n\tcmd.AddCommand(cmdUmount)\n\n\treturn cmd\n}\n\nfunc (c *cmdForkmount) Run(cmd *cobra.Command, args []string) error {\n\treturn fmt.Errorf(\"This command should have been intercepted in cgo\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"time\"\n\n\tlog \"github.com\/cihub\/seelog\"\n\t\"github.com\/goburrow\/modbus\"\n\t\"github.com\/stampzilla\/stampzilla-go\/nodes\/basenode\"\n\t\"github.com\/stampzilla\/stampzilla-go\/protocol\"\n)\n\n\/\/ MAIN - This is run when the init function is done\nfunc main() { \/*{{{*\/\n\tlog.Info(\"Starting SIMPLE node\")\n\n\t\/\/ Parse all commandline arguments, host and port parameters are added in the basenode init function\n\tflag.Parse()\n\n\t\/\/Get a config with the correct parameters\n\tconfig := basenode.NewConfig()\n\n\t\/\/Activate the config\n\tbasenode.SetConfig(config)\n\n\tnode := protocol.NewNode(\"simple\")\n\n\t\/\/ Modbus RTU\/ASCII\n\thandler := modbus.NewRTUClientHandler(\"\/dev\/ttyUSB0\")\n\thandler.BaudRate = 9600\n\thandler.DataBits = 8\n\thandler.Parity = \"E\"\n\thandler.StopBits = 1\n\thandler.SlaveId = 1\n\thandler.Timeout = 5 * time.Second\n\n\terr := handler.Connect()\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\n\tdefer handler.Close()\n\n\t\/\/REG_HC_TEMP_IN1 214 Reg\n\t\/\/REG_HC_TEMP_IN2 215 Reg\n\t\/\/REG_HC_TEMP_IN3 216 Reg\n\t\/\/REG_HC_TEMP_IN4 217 Reg\n\t\/\/REG_HC_TEMP_IN5 218 Reg\n\n\t\/\/REG_DAMPER_PWM 301 Reg\n\t\/\/REG_HC_WC_SIGNAL 204 Reg\n\n\tclient := modbus.NewClient(handler)\n\tresults, err := client.ReadInputRegisters(214, 1)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\n\tlog.Info(\"REG_HC_TEMP_IN1: \", results)\n\n\treturn\n\t\/\/Start communication with the server\n\tconnection := basenode.Connect()\n\n\t\/\/ Thit worker keeps track on our connection state, if we are connected or not\n\tgo monitorState(node, connection)\n\n\t\/\/node.AddElement(&protocol.Element{\n\t\/\/Type: protocol.ElementTypeColorPicker,\n\t\/\/Name: \"Example color picker\",\n\t\/\/Command: &protocol.Command{\n\t\/\/Cmd: \"color\",\n\t\/\/Args: []string{\"1\"},\n\t\/\/},\n\t\/\/Feedback: \"Devices[4].State\",\n\t\/\/})\n\n\tstate := NewState()\n\tnode.SetState(state)\n\n\tstate.AddDevice(\"1\", \"Dev1\", false)\n\tstate.AddDevice(\"2\", \"Dev2\", true)\n\n\t\/\/ This worker recives all incomming commands\n\tgo serverRecv(node, connection)\n\tselect {}\n} \/*}}}*\/\n\n\/\/ WORKER that monitors the current connection state\nfunc monitorState(node *protocol.Node, connection *basenode.Connection) {\n\tfor s := range connection.State {\n\t\tswitch s {\n\t\tcase basenode.ConnectionStateConnected:\n\t\t\tconnection.Send <- node.Node()\n\t\tcase basenode.ConnectionStateDisconnected:\n\t\t}\n\t}\n}\n\n\/\/ WORKER that recives all incomming commands\nfunc serverRecv(node *protocol.Node, connection *basenode.Connection) {\n\tfor d := range connection.Receive {\n\t\tprocessCommand(node, connection, d)\n\t}\n}\n\n\/\/ THis is called on each incomming command\nfunc processCommand(node *protocol.Node, connection *basenode.Connection, cmd protocol.Command) {\n\tif s, ok := node.State.(*State); ok {\n\t\tlog.Info(\"Incoming command from server:\", cmd)\n\t\tif len(cmd.Args) == 0 {\n\t\t\treturn\n\t\t}\n\t\tdevice := s.Device(cmd.Args[0])\n\n\t\tswitch cmd.Cmd {\n\t\tcase \"on\":\n\t\t\tdevice.State = true\n\t\t\tconnection.Send <- node.Node()\n\t\tcase \"off\":\n\t\t\tdevice.State = false\n\t\t\tconnection.Send <- node.Node()\n\t\tcase \"toggle\":\n\t\t\tlog.Info(\"got toggle\")\n\t\t\tif device.State {\n\t\t\t\tdevice.State = false\n\t\t\t} else {\n\t\t\t\tdevice.State = true\n\t\t\t}\n\t\t\tconnection.Send <- node.Node()\n\t\t}\n\t}\n}\n<commit_msg>got modbus working :D<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/goburrow\/modbus\"\n\t\"github.com\/stampzilla\/stampzilla-go\/nodes\/basenode\"\n\t\"github.com\/stampzilla\/stampzilla-go\/protocol\"\n)\n\n\/\/ MAIN - This is run when the init function is done\nfunc main() {\n\tlog.Println(\"Starting SIMPLE node\")\n\n\t\/\/ Parse all commandline arguments, host and port parameters are added in the basenode init function\n\tflag.Parse()\n\n\t\/\/Get a config with the correct parameters\n\tconfig := basenode.NewConfig()\n\n\t\/\/Activate the config\n\tbasenode.SetConfig(config)\n\n\tnode := protocol.NewNode(\"simple\")\n\n\t\/\/ Modbus RTU\/ASCII\n\thandler := modbus.NewRTUClientHandler(\"\/dev\/ttyUSB0\")\n\thandler.BaudRate = 9600\n\thandler.DataBits = 8\n\thandler.Parity = \"N\"\n\thandler.StopBits = 2\n\thandler.SlaveId = 1\n\thandler.Timeout = 10 * time.Second\n\thandler.Logger = log.New(os.Stdout, \"test: \", log.LstdFlags)\n\n\terr := handler.Connect()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tdefer handler.Close()\n\n\t\/\/REG_HC_TEMP_IN1 214 Reg\n\t\/\/REG_HC_TEMP_IN2 215 Reg\n\t\/\/REG_HC_TEMP_IN3 216 Reg\n\t\/\/REG_HC_TEMP_IN4 217 Reg\n\t\/\/REG_HC_TEMP_IN5 218 Reg\n\n\t\/\/REG_DAMPER_PWM 301 Reg\n\t\/\/REG_HC_WC_SIGNAL 204 Reg\n\n\tclient := modbus.NewClient(handler)\n\t\/\/results, _ := client.ReadHoldingRegisters(214, 1)\n\t\/\/if err != nil {\n\t\/\/log.Println(err)\n\t\/\/}\n\tresults, _ := client.ReadInputRegisters(214, 1)\n\tlog.Println(\"REG_HC_TEMP_IN1: \", results)\n\tresults, _ = client.ReadInputRegisters(215, 1)\n\tlog.Println(\"REG_HC_TEMP_IN2: \", results)\n\tresults, _ = client.ReadInputRegisters(216, 1)\n\tlog.Println(\"REG_HC_TEMP_IN3: \", results)\n\tresults, _ = client.ReadInputRegisters(217, 1)\n\tlog.Println(\"REG_HC_TEMP_IN4: \", results)\n\tresults, _ = client.ReadInputRegisters(218, 1)\n\tlog.Println(\"REG_HC_TEMP_IN5: \", results)\n\tresults, _ = client.ReadInputRegisters(207, 1)\n\tlog.Println(\"REG_HC_TEMP_LVL: \", results)\n\tresults, _ = client.ReadInputRegisters(301, 1)\n\tlog.Println(\"REG_DAMPER_PWM: \", results)\n\tresults, _ = client.ReadInputRegisters(204, 1)\n\tlog.Println(\"REG_HC_WC_SIGNAL: \", results)\n\tresults, _ = client.ReadInputRegisters(209, 5)\n\tlog.Println(\"REG_HC_TEMP_LVL1-5: \", results)\n\n\ttime.Sleep(time.Second * 1)\n\treturn\n\t\/\/Start communication with the server\n\tconnection := basenode.Connect()\n\n\t\/\/ Thit worker keeps track on our connection state, if we are connected or not\n\tgo monitorState(node, connection)\n\n\t\/\/node.AddElement(&protocol.Element{\n\t\/\/Type: protocol.ElementTypeColorPicker,\n\t\/\/Name: \"Example color picker\",\n\t\/\/Command: &protocol.Command{\n\t\/\/Cmd: \"color\",\n\t\/\/Args: []string{\"1\"},\n\t\/\/},\n\t\/\/Feedback: \"Devices[4].State\",\n\t\/\/})\n\n\tstate := NewState()\n\tnode.SetState(state)\n\n\tstate.AddDevice(\"1\", \"Dev1\", false)\n\tstate.AddDevice(\"2\", \"Dev2\", true)\n\n\t\/\/ This worker recives all incomming commands\n\tgo serverRecv(node, connection)\n\tselect {}\n}\n\n\/\/ WORKER that monitors the current connection state\nfunc monitorState(node *protocol.Node, connection *basenode.Connection) {\n\tfor s := range connection.State {\n\t\tswitch s {\n\t\tcase basenode.ConnectionStateConnected:\n\t\t\tconnection.Send <- node.Node()\n\t\tcase basenode.ConnectionStateDisconnected:\n\t\t}\n\t}\n}\n\n\/\/ WORKER that recives all incomming commands\nfunc serverRecv(node *protocol.Node, connection *basenode.Connection) {\n\tfor d := range connection.Receive {\n\t\tprocessCommand(node, connection, d)\n\t}\n}\n\n\/\/ THis is called on each incomming command\nfunc processCommand(node *protocol.Node, connection *basenode.Connection, cmd protocol.Command) {\n\tif s, ok := node.State.(*State); ok {\n\t\tlog.Println(\"Incoming command from server:\", cmd)\n\t\tif len(cmd.Args) == 0 {\n\t\t\treturn\n\t\t}\n\t\tdevice := s.Device(cmd.Args[0])\n\n\t\tswitch cmd.Cmd {\n\t\tcase \"on\":\n\t\t\tdevice.State = true\n\t\t\tconnection.Send <- node.Node()\n\t\tcase \"off\":\n\t\t\tdevice.State = false\n\t\t\tconnection.Send <- node.Node()\n\t\tcase \"toggle\":\n\t\t\tlog.Println(\"got toggle\")\n\t\t\tif device.State {\n\t\t\t\tdevice.State = false\n\t\t\t} else {\n\t\t\t\tdevice.State = true\n\t\t\t}\n\t\t\tconnection.Send <- node.Node()\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build freebsd\n\npackage resource\n\nimport (\n\t\"fmt\"\n\t\"os\/exec\"\n)\n\n\/\/ Service type is a resource which manages services on a\n\/\/ FreeBSD.\n\/\/\n\/\/ Example:\n\/\/ svc = resource.service.new(\"nginx\")\n\/\/ svc.state = \"running\"\n\/\/ svc.enable = true\n\/\/ svc.rcvar = \"nginx_enable\"\ntype Service struct {\n\tBase\n\n\t\/\/ If true then enable the service during boot-time\n\tEnable bool `luar:\"enable\"`\n\n\t\/\/ RCVar (see rc.subr(8)), set to {svcname}_enable by default.\n\t\/\/ If service doesn't define rcvar, you should set svc.rcvar = \"\".\n\tRCVar string `luar:\"rcvar\"`\n}\n\n\/\/ NewService creates a new resource for managing services\n\/\/ on a FreeBSD system.\nfunc NewService(name string) (Resource, error) {\n\ts := &Service{\n\t\tBase: Base{\n\t\t\tName: name,\n\t\t\tType: \"service\",\n\t\t\tState: \"running\",\n\t\t\tRequire: make([]string, 0),\n\t\t\tPresentStates: []string{\"present\", \"running\"},\n\t\t\tAbsentStates: []string{\"absent\", \"stopped\"},\n\t\t\tConcurrent: false,\n\t\t},\n\t\tEnable: true,\n\t\tRCVar: fmt.Sprintf(\"%v_enable\", name),\n\t}\n\n\treturn s, nil\n}\n\n\/\/ IsEnabled returns true if service is set to start at boot.\nfunc (s *Service) isEnabled() bool {\n\tif err := exec.Command(\"service\", s.Name, \"enabled\").Run(); err != nil {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n\/\/ Evaluate evaluates the state of the resource.\nfunc (s *Service) Evaluate() (State, error) {\n\tstate := State{\n\t\tCurrent: \"unknown\",\n\t\tWant: s.State,\n\t\tOutdated: false,\n\t}\n\n\t\/\/ TODO: handle non existent service\n\terr := exec.Command(\"service\", s.Name, \"onestatus\").Run()\n\tif err != nil {\n\t\tstate.Current = \"stopped\"\n\t} else {\n\t\tstate.Current = \"running\"\n\t}\n\n\tif s.Enable != s.isEnabled() {\n\t\tstate.Outdated = true\n\t}\n\n\treturn state, nil\n}\n\n\/\/ Create starts the service.\nfunc (s *Service) Create() error {\n\treturn exec.Command(\"service\", s.Name, \"onestart\").Run()\n}\n\n\/\/ Delete stops the service.\nfunc (s *Service) Delete() error {\n\treturn exec.Command(\"service\", s.Name, \"onestop\").Run()\n}\n\n\/\/ Update updates the service's rcvar.\nfunc (s *Service) Update() error {\n\tif s.RCVar == \"\" {\n\t\treturn nil\n\t}\n\n\trcValue := \"YES\"\n\tif !s.Enable {\n\t\trcValue = \"NO\"\n\t\tif s.RCVar == \"sendmail_enable\" {\n\t\t\t\/\/ I think sendmail is the only service, that requires NONE to be disabled.\n\t\t\trcValue = \"NONE\"\n\t\t}\n\t}\n\n\t\/\/ TODO: rcvar should probably be deleted from rc.conf, when disabling service (except for sendmail).\n\t\/\/ Currently we set it to NO.\n\treturn exec.Command(\"sysrc\", fmt.Sprintf(`%s=\"%s\"`, s.RCVar, rcValue)).Run()\n}\n\nfunc init() {\n\tservice := RegistryItem{\n\t\tType: \"service\",\n\t\tProvider: NewService,\n\t\tNamespace: DefaultNamespace,\n\t}\n\n\tRegister(service)\n}\n<commit_msg>resource: doc comment updates for FreeBSD Service resource<commit_after>\/\/ +build freebsd\n\npackage resource\n\nimport (\n\t\"fmt\"\n\t\"os\/exec\"\n)\n\n\/\/ Service type is a resource which manages services on a\n\/\/ FreeBSD system.\n\/\/\n\/\/ Example:\n\/\/ svc = resource.service.new(\"nginx\")\n\/\/ svc.state = \"running\"\n\/\/ svc.enable = true\n\/\/ svc.rcvar = \"nginx_enable\"\ntype Service struct {\n\tBase\n\n\t\/\/ If true then enable the service during boot-time\n\tEnable bool `luar:\"enable\"`\n\n\t\/\/ RCVar (see rc.subr(8)), set to {svcname}_enable by default.\n\t\/\/ If service doesn't define rcvar, you should set svc.rcvar = \"\".\n\tRCVar string `luar:\"rcvar\"`\n}\n\n\/\/ NewService creates a new resource for managing services\n\/\/ on a FreeBSD system.\nfunc NewService(name string) (Resource, error) {\n\ts := &Service{\n\t\tBase: Base{\n\t\t\tName: name,\n\t\t\tType: \"service\",\n\t\t\tState: \"running\",\n\t\t\tRequire: make([]string, 0),\n\t\t\tPresentStates: []string{\"present\", \"running\"},\n\t\t\tAbsentStates: []string{\"absent\", \"stopped\"},\n\t\t\tConcurrent: false,\n\t\t},\n\t\tEnable: true,\n\t\tRCVar: fmt.Sprintf(\"%v_enable\", name),\n\t}\n\n\treturn s, nil\n}\n\n\/\/ isEnabled returns true if service is set to start at boot.\nfunc (s *Service) isEnabled() bool {\n\tif err := exec.Command(\"service\", s.Name, \"enabled\").Run(); err != nil {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n\/\/ Evaluate evaluates the state of the resource.\nfunc (s *Service) Evaluate() (State, error) {\n\tstate := State{\n\t\tCurrent: \"unknown\",\n\t\tWant: s.State,\n\t\tOutdated: false,\n\t}\n\n\t\/\/ TODO: handle non existent service\n\terr := exec.Command(\"service\", s.Name, \"onestatus\").Run()\n\tif err != nil {\n\t\tstate.Current = \"stopped\"\n\t} else {\n\t\tstate.Current = \"running\"\n\t}\n\n\tif s.Enable != s.isEnabled() {\n\t\tstate.Outdated = true\n\t}\n\n\treturn state, nil\n}\n\n\/\/ Create starts the service.\nfunc (s *Service) Create() error {\n\treturn exec.Command(\"service\", s.Name, \"onestart\").Run()\n}\n\n\/\/ Delete stops the service.\nfunc (s *Service) Delete() error {\n\treturn exec.Command(\"service\", s.Name, \"onestop\").Run()\n}\n\n\/\/ Update updates the service's rcvar.\nfunc (s *Service) Update() error {\n\tif s.RCVar == \"\" {\n\t\treturn nil\n\t}\n\n\trcValue := \"YES\"\n\tif !s.Enable {\n\t\trcValue = \"NO\"\n\t\tif s.RCVar == \"sendmail_enable\" {\n\t\t\t\/\/ I think sendmail is the only service, that requires NONE to be disabled.\n\t\t\trcValue = \"NONE\"\n\t\t}\n\t}\n\n\t\/\/ TODO: rcvar should probably be deleted from rc.conf, when disabling service (except for sendmail).\n\t\/\/ Currently we set it to NO.\n\treturn exec.Command(\"sysrc\", fmt.Sprintf(`%s=\"%s\"`, s.RCVar, rcValue)).Run()\n}\n\nfunc init() {\n\tservice := RegistryItem{\n\t\tType: \"service\",\n\t\tProvider: NewService,\n\t\tNamespace: DefaultNamespace,\n\t}\n\n\tRegister(service)\n}\n<|endoftext|>"} {"text":"<commit_before>package test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t. \"github.com\/prometheus\/alertmanager\/test\"\n)\n\nvar somethingConfig = `\nroutes:\n- send_to: \"default\"\n group_wait: 1s\n group_interval: 1s\n\nnotification_configs:\n- name: \"default\"\n send_resolved: true\n\n webhook_configs:\n - url: 'http:\/\/localhost:8088'\n`\n\nfunc TestSomething(t *testing.T) {\n\tt.Parallel()\n\n\t\/\/ Create a new acceptance test that instantiates new Alertmanagers\n\t\/\/ with the given configuration and verifies times with the given\n\t\/\/ tollerance.\n\tat := NewAcceptanceTest(t, &AcceptanceOpts{\n\t\tTolerance: 150 * time.Millisecond,\n\t\tConfig: somethingConfig,\n\t})\n\n\t\/\/ Create a new Alertmanager process listening to a random port\n\tam := at.Alertmanager()\n\t\/\/ Create a collector to which alerts can be written and verified\n\t\/\/ against a set of expected alert notifications.\n\tco := at.Collector(\"webhook\")\n\n\t\/\/ Run something that satisfies the webhook interface to which the\n\t\/\/ Alertmanager pushes as defined by its configuration.\n\tgo NewWebhook(\":8088\", co).Run()\n\n\t\/\/ Declare pushes to be made to the Alertmanager at the given time.\n\t\/\/ Times are provided in fractions of seconds.\n\tam.Push(At(1), Alert(\"alertname\", \"test\").Active(1))\n\tam.Push(At(3.5), Alert(\"alertname\", \"test\").Active(1, 3))\n\n\t\/\/ Declare which alerts are expected to arrive at the collector within\n\t\/\/ the defined time intervals.\n\tco.Want(Between(2, 2.5), Alert(\"alertname\", \"test\").Active(1))\n\tco.Want(Between(3, 3.5), Alert(\"alertname\", \"test\").Active(1))\n\tco.Want(Between(4, 4.5), Alert(\"alertname\", \"test\").Active(1, 3))\n\n\t\/\/ Start the flow as defined above and run the checks afterwards.\n\tat.Run()\n}\n\nvar silenceConfig = `\nroutes:\n- send_to: \"default\"\n group_wait: 1s\n group_interval: 1s\n\nnotification_configs:\n- name: \"default\"\n send_resolved: true\n\n webhook_configs:\n - url: 'http:\/\/localhost:8090'\n`\n\nfunc TestSilencing(t *testing.T) {\n\tt.Parallel()\n\n\tat := NewAcceptanceTest(t, &AcceptanceOpts{\n\t\tTolerance: 150 * time.Millisecond,\n\t\tConfig: silenceConfig,\n\t})\n\n\tam := at.Alertmanager()\n\tco := at.Collector(\"webhook\")\n\n\tgo NewWebhook(\":8090\", co).Run()\n\n\t\/\/ No repeat interval is configured. Thus, we receive an alert\n\t\/\/ notification every second.\n\tam.Push(At(1), Alert(\"alertname\", \"test1\").Active(1))\n\tam.Push(At(1), Alert(\"alertname\", \"test2\").Active(1))\n\n\tco.Want(Between(2, 2.5),\n\t\tAlert(\"alertname\", \"test1\").Active(1),\n\t\tAlert(\"alertname\", \"test2\").Active(1),\n\t)\n\n\t\/\/ Add a silence that affects the first alert.\n\tsil := Silence(2, 4.5).Match(\"alertname\", \"test1\")\n\tam.SetSilence(At(2.5), sil)\n\n\tco.Want(Between(3, 3.5), Alert(\"alertname\", \"test2\").Active(1))\n\tco.Want(Between(4, 4.5), Alert(\"alertname\", \"test2\").Active(1))\n\n\t\/\/ Remove the silence so in the next interval we receive both\n\t\/\/ alerts again.\n\tam.DelSilence(At(4.5), sil)\n\n\tco.Want(Between(5, 5.5),\n\t\tAlert(\"alertname\", \"test1\").Active(1),\n\t\tAlert(\"alertname\", \"test2\").Active(1),\n\t)\n\n\t\/\/ Start the flow as defined above and run the checks afterwards.\n\tat.Run()\n}\n\nvar batchConfig = `\nroutes:\n- send_to: \"default\"\n group_wait: 1s\n group_interval: 1s\n\nnotification_configs:\n- name: \"default\"\n send_resolved: true\n repeat_interval: 5s\n\n webhook_configs:\n - url: 'http:\/\/localhost:8089'\n`\n\nfunc TestBatching(t *testing.T) {\n\tt.Parallel()\n\n\tat := NewAcceptanceTest(t, &AcceptanceOpts{\n\t\tTolerance: 150 * time.Millisecond,\n\t\tConfig: batchConfig,\n\t})\n\n\tam := at.Alertmanager()\n\tco := at.Collector(\"webhook\")\n\n\tgo NewWebhook(\":8089\", co).Run()\n\n\tam.Push(At(1.1), Alert(\"alertname\", \"test1\").Active(1))\n\tam.Push(At(1.9), Alert(\"alertname\", \"test5\").Active(1))\n\tam.Push(At(2.3),\n\t\tAlert(\"alertname\", \"test2\").Active(1.5),\n\t\tAlert(\"alertname\", \"test3\").Active(1.5),\n\t\tAlert(\"alertname\", \"test4\").Active(1.6),\n\t)\n\n\tco.Want(Between(2.0, 2.5),\n\t\tAlert(\"alertname\", \"test1\").Active(1),\n\t\tAlert(\"alertname\", \"test5\").Active(1),\n\t)\n\t\/\/ Only expect the new ones with the next group interval.\n\tco.Want(Between(3, 3.5),\n\t\tAlert(\"alertname\", \"test2\").Active(1.5),\n\t\tAlert(\"alertname\", \"test3\").Active(1.5),\n\t\tAlert(\"alertname\", \"test4\").Active(1.6),\n\t)\n\n\t\/\/ While no changes happen expect no additional notifications\n\t\/\/ until the 5s repeat interval has ended.\n\n\t\/\/ The last three notifications should sent with the first two even\n\t\/\/ though their repeat interval has not yet passed. This way fragmented\n\t\/\/ batches are unified and notification noise reduced.\n\tco.Want(Between(7, 7.5),\n\t\tAlert(\"alertname\", \"test1\").Active(1),\n\t\tAlert(\"alertname\", \"test5\").Active(1),\n\t\tAlert(\"alertname\", \"test2\").Active(1.5),\n\t\tAlert(\"alertname\", \"test3\").Active(1.5),\n\t\tAlert(\"alertname\", \"test4\").Active(1.6),\n\t)\n\n\tat.Run()\n}\n<commit_msg>Let test silence timeout by itself<commit_after>package test\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t. \"github.com\/prometheus\/alertmanager\/test\"\n)\n\nvar somethingConfig = `\nroutes:\n- send_to: \"default\"\n group_wait: 1s\n group_interval: 1s\n\nnotification_configs:\n- name: \"default\"\n send_resolved: true\n\n webhook_configs:\n - url: 'http:\/\/localhost:8088'\n`\n\nfunc TestSomething(t *testing.T) {\n\tt.Parallel()\n\n\t\/\/ Create a new acceptance test that instantiates new Alertmanagers\n\t\/\/ with the given configuration and verifies times with the given\n\t\/\/ tollerance.\n\tat := NewAcceptanceTest(t, &AcceptanceOpts{\n\t\tTolerance: 150 * time.Millisecond,\n\t\tConfig: somethingConfig,\n\t})\n\n\t\/\/ Create a new Alertmanager process listening to a random port\n\tam := at.Alertmanager()\n\t\/\/ Create a collector to which alerts can be written and verified\n\t\/\/ against a set of expected alert notifications.\n\tco := at.Collector(\"webhook\")\n\n\t\/\/ Run something that satisfies the webhook interface to which the\n\t\/\/ Alertmanager pushes as defined by its configuration.\n\tgo NewWebhook(\":8088\", co).Run()\n\n\t\/\/ Declare pushes to be made to the Alertmanager at the given time.\n\t\/\/ Times are provided in fractions of seconds.\n\tam.Push(At(1), Alert(\"alertname\", \"test\").Active(1))\n\tam.Push(At(3.5), Alert(\"alertname\", \"test\").Active(1, 3))\n\n\t\/\/ Declare which alerts are expected to arrive at the collector within\n\t\/\/ the defined time intervals.\n\tco.Want(Between(2, 2.5), Alert(\"alertname\", \"test\").Active(1))\n\tco.Want(Between(3, 3.5), Alert(\"alertname\", \"test\").Active(1))\n\tco.Want(Between(4, 4.5), Alert(\"alertname\", \"test\").Active(1, 3))\n\n\t\/\/ Start the flow as defined above and run the checks afterwards.\n\tat.Run()\n}\n\nvar silenceConfig = `\nroutes:\n- send_to: \"default\"\n group_wait: 1s\n group_interval: 1s\n\nnotification_configs:\n- name: \"default\"\n send_resolved: true\n\n webhook_configs:\n - url: 'http:\/\/localhost:8090'\n`\n\nfunc TestSilencing(t *testing.T) {\n\tt.Parallel()\n\n\tat := NewAcceptanceTest(t, &AcceptanceOpts{\n\t\tTolerance: 150 * time.Millisecond,\n\t\tConfig: silenceConfig,\n\t})\n\n\tam := at.Alertmanager()\n\tco := at.Collector(\"webhook\")\n\n\tgo NewWebhook(\":8090\", co).Run()\n\n\t\/\/ No repeat interval is configured. Thus, we receive an alert\n\t\/\/ notification every second.\n\tam.Push(At(1), Alert(\"alertname\", \"test1\").Active(1))\n\tam.Push(At(1), Alert(\"alertname\", \"test2\").Active(1))\n\n\tco.Want(Between(2, 2.5),\n\t\tAlert(\"alertname\", \"test1\").Active(1),\n\t\tAlert(\"alertname\", \"test2\").Active(1),\n\t)\n\n\t\/\/ Add a silence that affects the first alert.\n\tam.SetSilence(At(2.5), Silence(2, 4.5).Match(\"alertname\", \"test1\"))\n\n\tco.Want(Between(3, 3.5), Alert(\"alertname\", \"test2\").Active(1))\n\tco.Want(Between(4, 4.5), Alert(\"alertname\", \"test2\").Active(1))\n\n\t\/\/ Silence should be over now and we receive both alerts again.\n\n\tco.Want(Between(5, 5.5),\n\t\tAlert(\"alertname\", \"test1\").Active(1),\n\t\tAlert(\"alertname\", \"test2\").Active(1),\n\t)\n\n\t\/\/ Start the flow as defined above and run the checks afterwards.\n\tat.Run()\n}\n\nvar batchConfig = `\nroutes:\n- send_to: \"default\"\n group_wait: 1s\n group_interval: 1s\n\nnotification_configs:\n- name: \"default\"\n send_resolved: true\n repeat_interval: 5s\n\n webhook_configs:\n - url: 'http:\/\/localhost:8089'\n`\n\nfunc TestBatching(t *testing.T) {\n\tt.Parallel()\n\n\tat := NewAcceptanceTest(t, &AcceptanceOpts{\n\t\tTolerance: 150 * time.Millisecond,\n\t\tConfig: batchConfig,\n\t})\n\n\tam := at.Alertmanager()\n\tco := at.Collector(\"webhook\")\n\n\tgo NewWebhook(\":8089\", co).Run()\n\n\tam.Push(At(1.1), Alert(\"alertname\", \"test1\").Active(1))\n\tam.Push(At(1.9), Alert(\"alertname\", \"test5\").Active(1))\n\tam.Push(At(2.3),\n\t\tAlert(\"alertname\", \"test2\").Active(1.5),\n\t\tAlert(\"alertname\", \"test3\").Active(1.5),\n\t\tAlert(\"alertname\", \"test4\").Active(1.6),\n\t)\n\n\tco.Want(Between(2.0, 2.5),\n\t\tAlert(\"alertname\", \"test1\").Active(1),\n\t\tAlert(\"alertname\", \"test5\").Active(1),\n\t)\n\t\/\/ Only expect the new ones with the next group interval.\n\tco.Want(Between(3, 3.5),\n\t\tAlert(\"alertname\", \"test2\").Active(1.5),\n\t\tAlert(\"alertname\", \"test3\").Active(1.5),\n\t\tAlert(\"alertname\", \"test4\").Active(1.6),\n\t)\n\n\t\/\/ While no changes happen expect no additional notifications\n\t\/\/ until the 5s repeat interval has ended.\n\n\t\/\/ The last three notifications should sent with the first two even\n\t\/\/ though their repeat interval has not yet passed. This way fragmented\n\t\/\/ batches are unified and notification noise reduced.\n\tco.Want(Between(7, 7.5),\n\t\tAlert(\"alertname\", \"test1\").Active(1),\n\t\tAlert(\"alertname\", \"test5\").Active(1),\n\t\tAlert(\"alertname\", \"test2\").Active(1.5),\n\t\tAlert(\"alertname\", \"test3\").Active(1.5),\n\t\tAlert(\"alertname\", \"test4\").Active(1.6),\n\t)\n\n\tat.Run()\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com\/openshift\/origin\/pkg\/client\/testclient\"\n)\n\n\/\/ TestRequestProjectDefaultFlags ensures that flags default values are set.\nfunc TestRequestProjectDefaultFlags(t *testing.T) {\n\n\ttests := map[string]struct {\n\t\tflagName string\n\t\tdefaultVal string\n\t}{\n\t\t\"display name\": {\n\t\t\tflagName: \"display-name\",\n\t\t\tdefaultVal: \"\",\n\t\t},\n\t\t\"description\": {\n\t\t\tflagName: \"description\",\n\t\t\tdefaultVal: \"\",\n\t\t},\n\t\t\"skip config write\": {\n\t\t\tflagName: \"skip-config-write\",\n\t\t\tdefaultVal: strconv.FormatBool(false),\n\t\t},\n\t}\n\n\tcmd := NewCmdRequestProject(\"oc\", RequestProjectRecommendedCommandName, nil, nil, nil)\n\n\tfor _, v := range tests {\n\t\tf := cmd.Flag(v.flagName)\n\t\tif f == nil {\n\t\t\tt.Fatalf(\"expected flag %s to be registered but found none\", v.flagName)\n\t\t}\n\n\t\tif f.DefValue != v.defaultVal {\n\t\t\tt.Errorf(\"expected default value of %s for %s but found %s\", v.defaultVal, v.flagName, f.DefValue)\n\t\t}\n\t}\n}\n\n\/\/ TestRequestProjectRun ensures that Run command calls the right actions.\nfunc TestRequestProjectRun(t *testing.T) {\n\tclient := testclient.NewSimpleFake()\n\tbuf := &bytes.Buffer{}\n\n\ttest := struct {\n\t\topts *NewProjectOptions\n\t\texpectedActions []testAction\n\t\texpectedErr error\n\t}{\n\t\topts: &NewProjectOptions{\n\t\t\tOut: buf,\n\t\t\tServer: \"127.0.0.1\",\n\t\t\tClient: client,\n\t\t\tName: \"oc\",\n\t\t\tProjectName: \"yourproject\",\n\t\t},\n\t\texpectedActions: []testAction{\n\t\t\t{verb: \"list\", resource: \"newprojects\"},\n\t\t\t{verb: \"create\", resource: \"newprojects\"},\n\t\t},\n\t\texpectedErr: nil,\n\t}\n\n\texpectedOutput := fmt.Sprintf(requestProjectSwitchProjectOutput, test.opts.Name, test.opts.ProjectName, test.opts.Server)\n\n\tif err := test.opts.Run(); err != test.expectedErr {\n\t\tt.Fatalf(\"error mismatch: expected %v, got %v\", test.expectedErr, err)\n\t}\n\n\tif buf.String() != expectedOutput {\n\t\tt.Fatalf(\"error mismatch output: expected %v, got %v\", expectedOutput, buf)\n\t}\n\n\tgot := client.Actions()\n\tif len(test.expectedActions) != len(got) {\n\t\tt.Fatalf(\"action length mismatch: expected %d, got %d\", len(test.expectedActions), len(got))\n\t}\n\n\tfor i, action := range test.expectedActions {\n\t\tif !got[i].Matches(action.verb, action.resource) {\n\t\t\tt.Errorf(\"action mismatch: expected %s %s, got %s %s\", action.verb, action.resource, got[i].GetVerb(), got[i].GetResource())\n\t\t}\n\t}\n\n}\n<commit_msg>disable projectrequest unit test because the fake client is weird<commit_after>package cmd\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com\/openshift\/origin\/pkg\/client\/testclient\"\n)\n\n\/\/ TestRequestProjectDefaultFlags ensures that flags default values are set.\nfunc TestRequestProjectDefaultFlags(t *testing.T) {\n\n\ttests := map[string]struct {\n\t\tflagName string\n\t\tdefaultVal string\n\t}{\n\t\t\"display name\": {\n\t\t\tflagName: \"display-name\",\n\t\t\tdefaultVal: \"\",\n\t\t},\n\t\t\"description\": {\n\t\t\tflagName: \"description\",\n\t\t\tdefaultVal: \"\",\n\t\t},\n\t\t\"skip config write\": {\n\t\t\tflagName: \"skip-config-write\",\n\t\t\tdefaultVal: strconv.FormatBool(false),\n\t\t},\n\t}\n\n\tcmd := NewCmdRequestProject(\"oc\", RequestProjectRecommendedCommandName, nil, nil, nil)\n\n\tfor _, v := range tests {\n\t\tf := cmd.Flag(v.flagName)\n\t\tif f == nil {\n\t\t\tt.Fatalf(\"expected flag %s to be registered but found none\", v.flagName)\n\t\t}\n\n\t\tif f.DefValue != v.defaultVal {\n\t\t\tt.Errorf(\"expected default value of %s for %s but found %s\", v.defaultVal, v.flagName, f.DefValue)\n\t\t}\n\t}\n}\n\n\/\/ DISABLE_TestRequestProjectRun ensures that Run command calls the right actions.\nfunc DISABLE_TestRequestProjectRun(t *testing.T) {\n\tclient := testclient.NewSimpleFake()\n\tbuf := &bytes.Buffer{}\n\n\ttest := struct {\n\t\topts *NewProjectOptions\n\t\texpectedActions []testAction\n\t\texpectedErr error\n\t}{\n\t\topts: &NewProjectOptions{\n\t\t\tOut: buf,\n\t\t\tServer: \"127.0.0.1\",\n\t\t\tClient: client,\n\t\t\tName: \"oc\",\n\t\t\tProjectName: \"yourproject\",\n\t\t},\n\t\texpectedActions: []testAction{\n\t\t\t{verb: \"list\", resource: \"newprojects\"},\n\t\t\t{verb: \"create\", resource: \"newprojects\"},\n\t\t},\n\t\texpectedErr: nil,\n\t}\n\n\texpectedOutput := fmt.Sprintf(requestProjectSwitchProjectOutput, test.opts.Name, test.opts.ProjectName, test.opts.Server)\n\n\tif err := test.opts.Run(); err != test.expectedErr {\n\t\tt.Fatalf(\"error mismatch: expected %v, got %v\", test.expectedErr, err)\n\t}\n\n\tif buf.String() != expectedOutput {\n\t\tt.Fatalf(\"error mismatch output: expected %v, got %v\", expectedOutput, buf)\n\t}\n\n\tgot := client.Actions()\n\tif len(test.expectedActions) != len(got) {\n\t\tt.Fatalf(\"action length mismatch: expected %d, got %d\", len(test.expectedActions), len(got))\n\t}\n\n\tfor i, action := range test.expectedActions {\n\t\tif !got[i].Matches(action.verb, action.resource) {\n\t\t\tt.Errorf(\"action mismatch: expected %s %s, got %s %s\", action.verb, action.resource, got[i].GetVerb(), got[i].GetResource())\n\t\t}\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The etcd-operator Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage controller\n\nimport (\n\tapi \"github.com\/coreos\/etcd-operator\/pkg\/apis\/etcd\/v1beta2\"\n\t\"github.com\/coreos\/etcd-operator\/pkg\/backup\/backupapi\"\n\t\"github.com\/coreos\/etcd-operator\/pkg\/util\/etcdutil\"\n\t\"github.com\/coreos\/etcd-operator\/pkg\/util\/k8sutil\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/kubernetes\"\n)\n\nconst (\n\t\/\/ Copy from deployment_controller.go:\n\t\/\/ maxRetries is the number of times a restore request will be retried before it is dropped out of the queue.\n\t\/\/ With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times\n\t\/\/ an restore request is going to be requeued:\n\t\/\/\n\t\/\/ 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s\n\tmaxRetries = 15\n)\n\nfunc (r *Restore) runWorker() {\n\tfor r.processNextItem() {\n\t}\n}\n\nfunc (r *Restore) processNextItem() bool {\n\t\/\/ Wait until there is a new item in the working queue\n\tkey, quit := r.queue.Get()\n\tif quit {\n\t\treturn false\n\t}\n\t\/\/ Tell the queue that we are done with processing this key. This unblocks the key for other workers\n\t\/\/ This allows safe parallel processing because two pods with the same key are never processed in\n\t\/\/ parallel.\n\tdefer r.queue.Done(key)\n\terr := r.processItem(key.(string))\n\t\/\/ Handle the error if something went wrong during the execution of the business logic\n\tr.handleErr(err, key)\n\treturn true\n}\n\nfunc (r *Restore) processItem(key string) error {\n\tobj, exists, err := r.indexer.GetByKey(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !exists {\n\t\tcn, ok := r.clusterNames.Load(key)\n\t\tif ok {\n\t\t\tr.restoreCRs.Delete(cn)\n\t\t\tr.clusterNames.Delete(key)\n\t\t}\n\t\treturn nil\n\t}\n\treturn r.handleCR(obj.(*api.EtcdRestore), key)\n}\n\nfunc (r *Restore) handleCR(er *api.EtcdRestore, key string) error {\n\t\/\/ don't process the CR if it has a status since\n\t\/\/ having a status means that the CR has been processed before.\n\tif er.Status.Succeeded || len(er.Status.Reason) != 0 {\n\t\treturn nil\n\t}\n\tclusterName := er.Spec.BackupSpec.ClusterName\n\tr.clusterNames.Store(key, clusterName)\n\tr.restoreCRs.Store(clusterName, er)\n\terr := createSeedPod(r.kubecli, er.Spec.ClusterSpec, er.AsOwner(), r.namespace, er.Spec.ClusterSpec.Version, r.mySvcAddr, clusterName)\n\tr.reportStatus(err, er)\n\treturn err\n}\n\nfunc (r *Restore) reportStatus(rerr error, er *api.EtcdRestore) {\n\tif rerr != nil {\n\t\ter.Status.Succeeded = false\n\t\ter.Status.Reason = rerr.Error()\n\t} else {\n\t\ter.Status.Succeeded = true\n\t}\n\t_, err := r.restoreCRCli.EtcdV1beta2().EtcdRestores(r.namespace).Update(er)\n\tif err != nil {\n\t\tr.logger.Warningf(\"failed to update status of restore CR %v : (%v)\", er.Name, err)\n\t}\n}\n\nfunc createSeedPod(kubecli kubernetes.Interface, cs api.ClusterSpec, owner metav1.OwnerReference, namespace, etcdVersion, svcAddr, clusterName string) error {\n\terr := k8sutil.CreateClientService(kubecli, clusterName, namespace, owner)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = k8sutil.CreatePeerService(kubecli, clusterName, namespace, owner)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm := &etcdutil.Member{\n\t\tName: etcdutil.CreateMemberName(clusterName, 0),\n\t\tNamespace: namespace,\n\t\t\/\/ TODO: support TLS\n\t\tSecurePeer: false,\n\t\tSecureClient: false,\n\t}\n\tms := etcdutil.NewMemberSet(m)\n\tbackupURL := backupapi.BackupURLForCluster(\"http\", svcAddr, clusterName, etcdVersion, -1)\n\tcs.Cleanup()\n\tpod := k8sutil.NewSeedMemberPod(clusterName, ms, m, cs, owner, backupURL)\n\t_, err = kubecli.Core().Pods(namespace).Create(pod)\n\treturn err\n}\n\nfunc (r *Restore) handleErr(err error, key interface{}) {\n\tif err == nil {\n\t\t\/\/ Forget about the #AddRateLimited history of the key on every successful synchronization.\n\t\t\/\/ This ensures that future processing of updates for this key is not delayed because of\n\t\t\/\/ an outdated error history.\n\t\tr.queue.Forget(key)\n\t\treturn\n\t}\n\n\t\/\/ This controller retries maxRetries times if something goes wrong. After that, it stops trying.\n\tif r.queue.NumRequeues(key) < maxRetries {\n\t\tr.logger.Errorf(\"error syncing restore request (%v): %v\", key, err)\n\n\t\t\/\/ Re-enqueue the key rate limited. Based on the rate limiter on the\n\t\t\/\/ queue and the re-enqueue history, the key will be processed later again.\n\t\tr.queue.AddRateLimited(key)\n\t\treturn\n\t}\n\n\tr.queue.Forget(key)\n\t\/\/ Report that, even after several retries, we could not successfully process this key\n\tr.logger.Infof(\"dropping restore request (%v) out of the queue: %v\", key, err)\n}\n<commit_msg>controller\/restore-operator: remove init client service<commit_after>\/\/ Copyright 2017 The etcd-operator Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage controller\n\nimport (\n\tapi \"github.com\/coreos\/etcd-operator\/pkg\/apis\/etcd\/v1beta2\"\n\t\"github.com\/coreos\/etcd-operator\/pkg\/backup\/backupapi\"\n\t\"github.com\/coreos\/etcd-operator\/pkg\/util\/etcdutil\"\n\t\"github.com\/coreos\/etcd-operator\/pkg\/util\/k8sutil\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/kubernetes\"\n)\n\nconst (\n\t\/\/ Copy from deployment_controller.go:\n\t\/\/ maxRetries is the number of times a restore request will be retried before it is dropped out of the queue.\n\t\/\/ With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times\n\t\/\/ an restore request is going to be requeued:\n\t\/\/\n\t\/\/ 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s\n\tmaxRetries = 15\n)\n\nfunc (r *Restore) runWorker() {\n\tfor r.processNextItem() {\n\t}\n}\n\nfunc (r *Restore) processNextItem() bool {\n\t\/\/ Wait until there is a new item in the working queue\n\tkey, quit := r.queue.Get()\n\tif quit {\n\t\treturn false\n\t}\n\t\/\/ Tell the queue that we are done with processing this key. This unblocks the key for other workers\n\t\/\/ This allows safe parallel processing because two pods with the same key are never processed in\n\t\/\/ parallel.\n\tdefer r.queue.Done(key)\n\terr := r.processItem(key.(string))\n\t\/\/ Handle the error if something went wrong during the execution of the business logic\n\tr.handleErr(err, key)\n\treturn true\n}\n\nfunc (r *Restore) processItem(key string) error {\n\tobj, exists, err := r.indexer.GetByKey(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !exists {\n\t\tcn, ok := r.clusterNames.Load(key)\n\t\tif ok {\n\t\t\tr.restoreCRs.Delete(cn)\n\t\t\tr.clusterNames.Delete(key)\n\t\t}\n\t\treturn nil\n\t}\n\treturn r.handleCR(obj.(*api.EtcdRestore), key)\n}\n\nfunc (r *Restore) handleCR(er *api.EtcdRestore, key string) error {\n\t\/\/ don't process the CR if it has a status since\n\t\/\/ having a status means that the CR has been processed before.\n\tif er.Status.Succeeded || len(er.Status.Reason) != 0 {\n\t\treturn nil\n\t}\n\tclusterName := er.Spec.BackupSpec.ClusterName\n\tr.clusterNames.Store(key, clusterName)\n\tr.restoreCRs.Store(clusterName, er)\n\terr := createSeedPod(r.kubecli, er.Spec.ClusterSpec, er.AsOwner(), r.namespace, er.Spec.ClusterSpec.Version, r.mySvcAddr, clusterName)\n\tr.reportStatus(err, er)\n\treturn err\n}\n\nfunc (r *Restore) reportStatus(rerr error, er *api.EtcdRestore) {\n\tif rerr != nil {\n\t\ter.Status.Succeeded = false\n\t\ter.Status.Reason = rerr.Error()\n\t} else {\n\t\ter.Status.Succeeded = true\n\t}\n\t_, err := r.restoreCRCli.EtcdV1beta2().EtcdRestores(r.namespace).Update(er)\n\tif err != nil {\n\t\tr.logger.Warningf(\"failed to update status of restore CR %v : (%v)\", er.Name, err)\n\t}\n}\n\nfunc createSeedPod(kubecli kubernetes.Interface, cs api.ClusterSpec, owner metav1.OwnerReference, namespace, etcdVersion, svcAddr, clusterName string) error {\n\terr := k8sutil.CreatePeerService(kubecli, clusterName, namespace, owner)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm := &etcdutil.Member{\n\t\tName: etcdutil.CreateMemberName(clusterName, 0),\n\t\tNamespace: namespace,\n\t\t\/\/ TODO: support TLS\n\t\tSecurePeer: false,\n\t\tSecureClient: false,\n\t}\n\tms := etcdutil.NewMemberSet(m)\n\tbackupURL := backupapi.BackupURLForCluster(\"http\", svcAddr, clusterName, etcdVersion, -1)\n\tcs.Cleanup()\n\tpod := k8sutil.NewSeedMemberPod(clusterName, ms, m, cs, owner, backupURL)\n\t_, err = kubecli.Core().Pods(namespace).Create(pod)\n\treturn err\n}\n\nfunc (r *Restore) handleErr(err error, key interface{}) {\n\tif err == nil {\n\t\t\/\/ Forget about the #AddRateLimited history of the key on every successful synchronization.\n\t\t\/\/ This ensures that future processing of updates for this key is not delayed because of\n\t\t\/\/ an outdated error history.\n\t\tr.queue.Forget(key)\n\t\treturn\n\t}\n\n\t\/\/ This controller retries maxRetries times if something goes wrong. After that, it stops trying.\n\tif r.queue.NumRequeues(key) < maxRetries {\n\t\tr.logger.Errorf(\"error syncing restore request (%v): %v\", key, err)\n\n\t\t\/\/ Re-enqueue the key rate limited. Based on the rate limiter on the\n\t\t\/\/ queue and the re-enqueue history, the key will be processed later again.\n\t\tr.queue.AddRateLimited(key)\n\t\treturn\n\t}\n\n\tr.queue.Forget(key)\n\t\/\/ Report that, even after several retries, we could not successfully process this key\n\tr.logger.Infof(\"dropping restore request (%v) out of the queue: %v\", key, err)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Skaffold Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage buildpacks\n\nimport (\n\t\"io\"\n\n\t\"github.com\/buildpacks\/pack\/logging\"\n\t\"github.com\/mattn\/go-colorable\"\n\t\"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/util\"\n)\n\n\/\/ logger exists to meet the requirements of the pack logger.\ntype logger struct {\n\t*logrus.Logger\n\tout io.Writer\n}\n\nfunc NewLogger(out io.Writer) logging.Logger {\n\t\/\/ If out is not a terminal, let's make sure no colors are printed.\n\tif _, isTerm := util.IsTerminal(out); !isTerm {\n\t\tout = colorable.NewNonColorable(out)\n\t}\n\n\tl := logrus.New()\n\tl.SetOutput(out)\n\tl.SetFormatter(new(plainFormatter))\n\n\treturn &logger{\n\t\tLogger: l,\n\t\tout: out,\n\t}\n}\n\ntype plainFormatter struct{}\n\nfunc (f *plainFormatter) Format(entry *logrus.Entry) ([]byte, error) {\n\treturn []byte(entry.Message + \"\\n\"), nil\n}\n\nfunc (l *logger) Debug(msg string) {\n\tl.Logger.Debug(msg)\n}\n\nfunc (l *logger) Info(msg string) {\n\tl.Logger.Info(msg)\n}\n\nfunc (l *logger) Warn(msg string) {\n\tl.Logger.Warn(msg)\n}\n\nfunc (l *logger) Error(msg string) {\n\tl.Logger.Error(msg)\n}\n\nfunc (l *logger) Writer() io.Writer {\n\treturn l.out\n}\n\nfunc (l *logger) IsVerbose() bool {\n\treturn false\n}\n<commit_msg>Format<commit_after>\/*\nCopyright 2019 The Skaffold Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage buildpacks\n\nimport (\n\t\"io\"\n\n\t\"github.com\/buildpacks\/pack\/logging\"\n\t\"github.com\/mattn\/go-colorable\"\n\t\"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/util\"\n)\n\n\/\/ logger exists to meet the requirements of the pack logger.\ntype logger struct {\n\t*logrus.Logger\n\tout io.Writer\n}\n\nfunc NewLogger(out io.Writer) logging.Logger {\n\t\/\/ If out is not a terminal, let's make sure no colors are printed.\n\tif _, isTerm := util.IsTerminal(out); !isTerm {\n\t\tout = colorable.NewNonColorable(out)\n\t}\n\n\tl := logrus.New()\n\tl.SetOutput(out)\n\n\t\/\/ By default, logrus prefixes lines with 'INFO[XXX]'.\n\tl.SetFormatter(new(plainFormatter))\n\n\treturn &logger{\n\t\tLogger: l,\n\t\tout: out,\n\t}\n}\n\ntype plainFormatter struct{}\n\nfunc (f *plainFormatter) Format(entry *logrus.Entry) ([]byte, error) {\n\treturn []byte(entry.Message + \"\\n\"), nil\n}\n\nfunc (l *logger) Debug(msg string) {\n\tl.Logger.Debug(msg)\n}\n\nfunc (l *logger) Info(msg string) {\n\tl.Logger.Info(msg)\n}\n\nfunc (l *logger) Warn(msg string) {\n\tl.Logger.Warn(msg)\n}\n\nfunc (l *logger) Error(msg string) {\n\tl.Logger.Error(msg)\n}\n\nfunc (l *logger) Writer() io.Writer {\n\treturn l.out\n}\n\nfunc (l *logger) IsVerbose() bool {\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package conf is a configuration package. It can be used to store configuration data in multiple different pluggable backends\npackage conf\n\nimport (\n\t\"errors\"\n\t\"sync\"\n)\n\n\/\/ Default is an in-memory config backend. It's used in the SetInt and GetInt calls\nvar Default Data = NewMem()\n\n\/\/ ErrNotFound is the error returned when a config item isn't found\nvar ErrNotFound = errors.New(\"not found\")\n\n\/\/ SetInt calls Default.SetInt\nfunc SetInt(name string, i int) {\n\tDefault.SetInt(name, i)\n}\n\n\/\/ GetInt calls Default.GetInt\nfunc GetInt(name string) (int, error) {\n\treturn Default.GetInt(name)\n}\n\n\/\/ Data is the core config interface\ntype Data interface {\n\t\/\/ SetInt sets the config value at name to i, overwriting if it already exists\n\tSetInt(name string, i int)\n\t\/\/ GetInt gets the config value at name. Returns 0, ErrNotFound if not such value found\n\tGetInt(name string) (int, error)\n}\n\ntype memData struct {\n\tl *sync.RWMutex\n\tstrings map[string]string\n\tints map[string]int\n}\n\n\/\/ NewMem creates a Data implementation that stores config data in memory\nfunc NewMem() Data {\n\treturn &memData{l: &sync.RWMutex{}, strings: make(map[string]string), ints: make(map[string]int)}\n}\n\n\/\/ SetInt is the interface implementation\nfunc (m *memData) SetInt(name string, i int) {\n\tm.l.Lock()\n\tdefer m.l.Unlock()\n\tm.ints[name] = i\n}\n\n\/\/ GetInt is the interface implementation\nfunc (m *memData) GetInt(name string) (int, error) {\n\tm.l.RLock()\n\tdefer m.l.RUnlock()\n\ti, ok := m.ints[name]\n\tif !ok {\n\t\treturn 0, ErrNotFound\n\t}\n\treturn i, nil\n}\n<commit_msg>more comments<commit_after>\/\/ Package conf is a configuration package. It can be used to store configuration data in multiple different pluggable backends\npackage conf\n\nimport (\n\t\"errors\"\n\t\"sync\"\n)\n\n\/\/ Default is an in-memory config backend. It's used in the SetInt and GetInt calls, and it's the optional singleton. Some packages export this name and others don't. If it's exported, it's the library user's responsibility to set it correctly and avoid concurrency issues (like setting it from two different goroutines, which would be a race condition)\nvar Default Data = NewMem()\n\n\/\/ ErrNotFound is the error returned when a config item isn't found\nvar ErrNotFound = errors.New(\"not found\")\n\n\/\/ SetInt calls Default.SetInt\nfunc SetInt(name string, i int) {\n\tDefault.SetInt(name, i)\n}\n\n\/\/ GetInt calls Default.GetInt\nfunc GetInt(name string) (int, error) {\n\treturn Default.GetInt(name)\n}\n\n\/\/ Data is the core config interface\ntype Data interface {\n\t\/\/ SetInt sets the config value at name to i, overwriting if it already exists\n\tSetInt(name string, i int)\n\t\/\/ GetInt gets the config value at name. Returns 0, ErrNotFound if not such value found\n\tGetInt(name string) (int, error)\n}\n\ntype memData struct {\n\tl *sync.RWMutex\n\tstrings map[string]string\n\tints map[string]int\n}\n\n\/\/ NewMem creates a Data implementation that stores config data in memory\nfunc NewMem() Data {\n\treturn &memData{l: &sync.RWMutex{}, strings: make(map[string]string), ints: make(map[string]int)}\n}\n\n\/\/ SetInt is the interface implementation\nfunc (m *memData) SetInt(name string, i int) {\n\tm.l.Lock()\n\tdefer m.l.Unlock()\n\tm.ints[name] = i\n}\n\n\/\/ GetInt is the interface implementation\nfunc (m *memData) GetInt(name string) (int, error) {\n\tm.l.RLock()\n\tdefer m.l.RUnlock()\n\ti, ok := m.ints[name]\n\tif !ok {\n\t\treturn 0, ErrNotFound\n\t}\n\treturn i, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package javascript\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"strings\"\n\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/config\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/container\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/dep2\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/scan\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/unit\"\n)\n\nfunc init() {\n\tscan.Register(\"npm\", scan.DockerScanner{defaultNPM})\n\tdep2.RegisterLister(&CommonJSPackage{}, defaultNPM)\n\tdep2.RegisterResolver(npmDependencyTargetType, dep2.DockerResolver{defaultNPM})\n}\n\nconst (\n\tnodeStdlibRepoURL = \"git:\/\/github.com\/joyent\/node.git\"\n\tNodeJSStdlibUnit = \"node\"\n)\n\ntype nodeVersion struct{}\n\ntype npmVersion struct{ nodeVersion }\n\nvar (\n\tdefaultNode = nodeVersion{}\n\tdefaultNPM = &npmVersion{defaultNode}\n)\n\nfunc (_ *nodeVersion) baseDockerfile() ([]byte, error) {\n\treturn []byte(baseNPMDockerfile), nil\n}\n\nconst baseNPMDockerfile = `FROM ubuntu:14.04\nRUN apt-get update -qq\nRUN apt-get install -qqy nodejs node-gyp npm git\n\n# Some NPM modules expect the node.js interpreter to be \"node\", not \"nodejs\" (as\n# it is on Ubuntu).\nRUN ln -s \/usr\/bin\/nodejs \/usr\/bin\/node\n`\n\n\/\/ containerDir returns the directory in the Docker container to use for the\n\/\/ local directory dir.\nfunc containerDir(dir string) string {\n\treturn filepath.Join(\"\/tmp\/sg\", filepath.Base(dir))\n}\n\nfunc (v *npmVersion) BuildScanner(dir string, c *config.Repository) (*container.Command, error) {\n\tdockerfile, err := v.baseDockerfile()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconst (\n\t\tfindpkgsNPM = \"commonjs-findpkgs@0.0.5\"\n\t\tfindpkgsGit = \"git:\/\/github.com\/sourcegraph\/commonjs-findpkgs.git\"\n\t\tfindpkgsSrc = findpkgsNPM\n\t)\n\tdockerfile = append(dockerfile, []byte(\"\\n\\nRUN npm install --quiet -g \"+findpkgsNPM+\"\\n\")...)\n\n\tscanIgnores, err := json.Marshal(c.ScanIgnore)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontainerDir := containerDir(dir)\n\tcont := container.Container{\n\t\tDockerfile: dockerfile,\n\t\tRunOptions: []string{\"-v\", dir + \":\" + containerDir},\n\t\tCmd: []string{\"commonjs-findpkgs\", \"--ignore\", string(scanIgnores)},\n\t\tDir: containerDir,\n\t}\n\tcmd := container.Command{\n\t\tContainer: cont,\n\t\tTransform: func(orig []byte) ([]byte, error) {\n\t\t\tvar pkgs []*CommonJSPackage\n\t\t\terr := json.Unmarshal(orig, &pkgs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t\/\/ filter out undesirable packages\n\t\t\tvar pkgs2 []*CommonJSPackage\n\t\t\tfor _, pkg := range pkgs {\n\t\t\t\tif !strings.Contains(pkg.Dir, \"node_modules\") {\n\t\t\t\t\tpkgs2 = append(pkgs2, pkg)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ filter out undesirable source files (minified files) from\n\t\t\t\/\/ packages\n\t\t\tfor _, pkg := range pkgs {\n\t\t\t\tfor i, f := range pkg.LibFiles {\n\t\t\t\t\tif strings.HasSuffix(f, \".min.js\") {\n\t\t\t\t\t\tpkg.LibFiles = append(pkg.LibFiles[:i], pkg.LibFiles[i+1:]...)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ set other fields\n\t\t\tfor _, pkg := range pkgs2 {\n\t\t\t\tvar pkgjson struct {\n\t\t\t\t\tName string\n\t\t\t\t\tDescription string\n\t\t\t\t}\n\t\t\t\tif err := json.Unmarshal(pkg.Package, &pkgjson); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tpkg.PackageName = pkgjson.Name\n\t\t\t\tpkg.PackageDescription = pkgjson.Description\n\t\t\t}\n\n\t\t\treturn json.Marshal(pkgs2)\n\t\t},\n\t}\n\treturn &cmd, nil\n}\n\nfunc (v *npmVersion) UnmarshalSourceUnits(data []byte) ([]unit.SourceUnit, error) {\n\tif len(data) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tvar npmPackages []*CommonJSPackage\n\terr := json.Unmarshal(data, &npmPackages)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tunits := make([]unit.SourceUnit, len(npmPackages))\n\tfor i, p := range npmPackages {\n\t\tunits[i] = p\n\t}\n\n\treturn units, nil\n}\n\n\/\/ npmDependency is a name\/version pair that represents an NPM dependency. This\n\/\/ pair corresponds to the object property\/value pairs in package.json\n\/\/ \"dependency\" objects.\ntype npmDependency struct {\n\t\/\/ Name is the package name of the dependency.\n\tName string\n\n\t\/\/ Spec is the specifier of the version, which can be an NPM version number,\n\t\/\/ a tarball URL, a git\/hg clone URL, etc.\n\tSpec string\n}\n\nconst npmDependencyTargetType = \"npm-dep\"\n\nfunc (v *npmVersion) BuildResolver(dep *dep2.RawDependency, c *config.Repository) (*container.Command, error) {\n\tvar npmDep npmDependency\n\tj, _ := json.Marshal(dep.Target)\n\tjson.Unmarshal(j, &npmDep)\n\n\tdockerfile, err := v.baseDockerfile()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdockerfile = append(dockerfile, []byte(\"\\n\\nRUN npm install --quiet -g deptool@~0.0.2\\n\")...)\n\n\tcmd := container.Command{\n\t\tContainer: container.Container{\n\t\t\tDockerfile: dockerfile,\n\t\t\tCmd: []string{\"nodejs\", \"\/usr\/local\/bin\/npm-deptool\", npmDep.Name + \"@\" + npmDep.Spec},\n\t\t},\n\t\tTransform: func(orig []byte) ([]byte, error) {\n\t\t\t\/\/ resolvedDep is output from npm-deptool.\n\t\t\ttype npmDeptoolOutput struct {\n\t\t\t\tName string\n\t\t\t\tResolvedURL string `json:\"_resolved\"`\n\t\t\t\tID string `json:\"_id\"`\n\t\t\t\tRepository struct {\n\t\t\t\t\tType string\n\t\t\t\t\tURL string\n\t\t\t\t}\n\t\t\t}\n\t\t\tvar resolvedDeps map[string]npmDeptoolOutput\n\t\t\terr := json.Unmarshal(orig, &resolvedDeps)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif len(resolvedDeps) == 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"npm-deptool did not output anything for raw dependency %+v\", dep)\n\t\t\t}\n\t\t\tif len(resolvedDeps) != 1 {\n\t\t\t\treturn nil, fmt.Errorf(\"npm-deptool unexpectedly returned %d deps for raw dependency %+v: deps are %+v\", len(resolvedDeps), dep, resolvedDeps)\n\t\t\t}\n\n\t\t\tvar resolvedDep npmDeptoolOutput\n\t\t\tfor _, v := range resolvedDeps {\n\t\t\t\tresolvedDep = v\n\t\t\t}\n\n\t\t\tvar toRepoCloneURL, toRevSpec string\n\t\t\tif strings.HasPrefix(resolvedDep.ResolvedURL, \"https:\/\/registry.npmjs.org\/\") {\n\t\t\t\t\/\/ known npm package, so the repository refers to it\n\t\t\t\ttoRepoCloneURL = resolvedDep.Repository.URL\n\t\t\t} else {\n\t\t\t\t\/\/ external tarball, git repo url, etc., so the repository might\n\t\t\t\t\/\/ refer to the source repo (if this is a fork) or not be\n\t\t\t\t\/\/ present at all\n\t\t\t\tu, err := url.Parse(resolvedDep.ResolvedURL)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\ttoRevSpec = u.Fragment\n\n\t\t\t\tu.Fragment = \"\"\n\t\t\t\ttoRepoCloneURL = u.String()\n\t\t\t}\n\n\t\t\treturn json.Marshal(&dep2.ResolvedTarget{\n\t\t\t\tToRepoCloneURL: toRepoCloneURL,\n\t\t\t\tToUnitType: unit.Type((&CommonJSPackage{})),\n\t\t\t\tToUnit: resolvedDep.Name,\n\t\t\t\tToVersionString: resolvedDep.ID,\n\t\t\t\tToRevSpec: toRevSpec,\n\t\t\t})\n\t\t},\n\t}\n\treturn &cmd, nil\n}\n\n\/\/ List reads the \"dependencies\" key in the NPM package's package.json file and\n\/\/ outputs the properties as raw dependencies.\nfunc (v *npmVersion) List(dir string, unit unit.SourceUnit, c *config.Repository) ([]*dep2.RawDependency, error) {\n\tpkg := unit.(*CommonJSPackage)\n\n\tif pkg.PackageJSONFile == \"\" {\n\t\t\/\/ No package.json file, so we won't be able to find any dependencies anyway.\n\t\treturn nil, nil\n\t}\n\n\tpkgFile := filepath.Join(dir, pkg.PackageJSONFile)\n\n\tf, err := os.Open(pkgFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tvar pkgjson struct {\n\t\tDependencies map[string]string `json:\"dependencies\"`\n\t\tDevDependencies map[string]string `json:\"devDependencies\"`\n\t}\n\terr = json.NewDecoder(f).Decode(&pkgjson)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trawDeps := make([]*dep2.RawDependency, len(pkgjson.Dependencies)+len(pkgjson.DevDependencies))\n\ti := 0\n\taddDeps := func(deps map[string]string) {\n\t\tfor name, spec := range deps {\n\t\t\trawDeps[i] = &dep2.RawDependency{\n\t\t\t\tFromFile: pkg.PackageJSONFile,\n\t\t\t\tTargetType: npmDependencyTargetType,\n\t\t\t\tTarget: npmDependency{Name: name, Spec: spec},\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t}\n\taddDeps(pkgjson.Dependencies)\n\taddDeps(pkgjson.DevDependencies)\n\n\treturn rawDeps, nil\n}\n\nconst fixPhantomJSHack = \"\\n\\n# fix phantomjs bad url issue (https:\/\/github.com\/Medium\/phantomjs\/issues\/170)\\n\" + `RUN sed -ri 's\/\"phantomjs\"\\s*:\\s*\"[^\"]+\"\/\"phantomjs\":\"1.9.7-8\"\/g' package.json` + \"\\n\"\n<commit_msg>handle when npm-deptool returns >1 dependency (find it by name) (fixes #860)<commit_after>package javascript\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"strings\"\n\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/config\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/container\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/dep2\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/scan\"\n\t\"sourcegraph.com\/sourcegraph\/srcgraph\/unit\"\n)\n\nfunc init() {\n\tscan.Register(\"npm\", scan.DockerScanner{defaultNPM})\n\tdep2.RegisterLister(&CommonJSPackage{}, defaultNPM)\n\tdep2.RegisterResolver(npmDependencyTargetType, dep2.DockerResolver{defaultNPM})\n}\n\nconst (\n\tnodeStdlibRepoURL = \"git:\/\/github.com\/joyent\/node.git\"\n\tNodeJSStdlibUnit = \"node\"\n)\n\ntype nodeVersion struct{}\n\ntype npmVersion struct{ nodeVersion }\n\nvar (\n\tdefaultNode = nodeVersion{}\n\tdefaultNPM = &npmVersion{defaultNode}\n)\n\nfunc (_ *nodeVersion) baseDockerfile() ([]byte, error) {\n\treturn []byte(baseNPMDockerfile), nil\n}\n\nconst baseNPMDockerfile = `FROM ubuntu:14.04\nRUN apt-get update -qq\nRUN apt-get install -qqy nodejs node-gyp npm git\n\n# Some NPM modules expect the node.js interpreter to be \"node\", not \"nodejs\" (as\n# it is on Ubuntu).\nRUN ln -s \/usr\/bin\/nodejs \/usr\/bin\/node\n`\n\n\/\/ containerDir returns the directory in the Docker container to use for the\n\/\/ local directory dir.\nfunc containerDir(dir string) string {\n\treturn filepath.Join(\"\/tmp\/sg\", filepath.Base(dir))\n}\n\nfunc (v *npmVersion) BuildScanner(dir string, c *config.Repository) (*container.Command, error) {\n\tdockerfile, err := v.baseDockerfile()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconst (\n\t\tfindpkgsNPM = \"commonjs-findpkgs@0.0.5\"\n\t\tfindpkgsGit = \"git:\/\/github.com\/sourcegraph\/commonjs-findpkgs.git\"\n\t\tfindpkgsSrc = findpkgsNPM\n\t)\n\tdockerfile = append(dockerfile, []byte(\"\\n\\nRUN npm install --quiet -g \"+findpkgsNPM+\"\\n\")...)\n\n\tscanIgnores, err := json.Marshal(c.ScanIgnore)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontainerDir := containerDir(dir)\n\tcont := container.Container{\n\t\tDockerfile: dockerfile,\n\t\tRunOptions: []string{\"-v\", dir + \":\" + containerDir},\n\t\tCmd: []string{\"commonjs-findpkgs\", \"--ignore\", string(scanIgnores)},\n\t\tDir: containerDir,\n\t}\n\tcmd := container.Command{\n\t\tContainer: cont,\n\t\tTransform: func(orig []byte) ([]byte, error) {\n\t\t\tvar pkgs []*CommonJSPackage\n\t\t\terr := json.Unmarshal(orig, &pkgs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t\/\/ filter out undesirable packages\n\t\t\tvar pkgs2 []*CommonJSPackage\n\t\t\tfor _, pkg := range pkgs {\n\t\t\t\tif !strings.Contains(pkg.Dir, \"node_modules\") {\n\t\t\t\t\tpkgs2 = append(pkgs2, pkg)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ filter out undesirable source files (minified files) from\n\t\t\t\/\/ packages\n\t\t\tfor _, pkg := range pkgs {\n\t\t\t\tfor i, f := range pkg.LibFiles {\n\t\t\t\t\tif strings.HasSuffix(f, \".min.js\") {\n\t\t\t\t\t\tpkg.LibFiles = append(pkg.LibFiles[:i], pkg.LibFiles[i+1:]...)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ set other fields\n\t\t\tfor _, pkg := range pkgs2 {\n\t\t\t\tvar pkgjson struct {\n\t\t\t\t\tName string\n\t\t\t\t\tDescription string\n\t\t\t\t}\n\t\t\t\tif err := json.Unmarshal(pkg.Package, &pkgjson); err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tpkg.PackageName = pkgjson.Name\n\t\t\t\tpkg.PackageDescription = pkgjson.Description\n\t\t\t}\n\n\t\t\treturn json.Marshal(pkgs2)\n\t\t},\n\t}\n\treturn &cmd, nil\n}\n\nfunc (v *npmVersion) UnmarshalSourceUnits(data []byte) ([]unit.SourceUnit, error) {\n\tif len(data) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tvar npmPackages []*CommonJSPackage\n\terr := json.Unmarshal(data, &npmPackages)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tunits := make([]unit.SourceUnit, len(npmPackages))\n\tfor i, p := range npmPackages {\n\t\tunits[i] = p\n\t}\n\n\treturn units, nil\n}\n\n\/\/ npmDependency is a name\/version pair that represents an NPM dependency. This\n\/\/ pair corresponds to the object property\/value pairs in package.json\n\/\/ \"dependency\" objects.\ntype npmDependency struct {\n\t\/\/ Name is the package name of the dependency.\n\tName string\n\n\t\/\/ Spec is the specifier of the version, which can be an NPM version number,\n\t\/\/ a tarball URL, a git\/hg clone URL, etc.\n\tSpec string\n}\n\nconst npmDependencyTargetType = \"npm-dep\"\n\nfunc (v *npmVersion) BuildResolver(dep *dep2.RawDependency, c *config.Repository) (*container.Command, error) {\n\tvar npmDep npmDependency\n\tj, _ := json.Marshal(dep.Target)\n\tjson.Unmarshal(j, &npmDep)\n\n\tdockerfile, err := v.baseDockerfile()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdockerfile = append(dockerfile, []byte(\"\\n\\nRUN npm install --quiet -g deptool@~0.0.2\\n\")...)\n\n\tcmd := container.Command{\n\t\tContainer: container.Container{\n\t\t\tDockerfile: dockerfile,\n\t\t\tCmd: []string{\"nodejs\", \"\/usr\/local\/bin\/npm-deptool\", npmDep.Name + \"@\" + npmDep.Spec},\n\t\t},\n\t\tTransform: func(orig []byte) ([]byte, error) {\n\t\t\t\/\/ resolvedDep is output from npm-deptool.\n\t\t\ttype npmDeptoolOutput struct {\n\t\t\t\tName string\n\t\t\t\tResolvedURL string `json:\"_resolved\"`\n\t\t\t\tID string `json:\"_id\"`\n\t\t\t\tRepository struct {\n\t\t\t\t\tType string\n\t\t\t\t\tURL string\n\t\t\t\t}\n\t\t\t}\n\t\t\tvar resolvedDeps map[string]npmDeptoolOutput\n\t\t\terr := json.Unmarshal(orig, &resolvedDeps)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif len(resolvedDeps) == 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"npm-deptool did not output anything for raw dependency %+v\", dep)\n\t\t\t}\n\n\t\t\tvar resolvedDep *npmDeptoolOutput\n\t\t\tfor name, v := range resolvedDeps {\n\t\t\t\tif name == npmDep.Name {\n\t\t\t\t\tresolvedDep = &v\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif resolvedDep == nil {\n\t\t\t\treturn nil, fmt.Errorf(\"npm-deptool did not return info about npm package %q for raw dependency %+v: all %d resolved deps are %+v\", npmDep.Name, dep, len(resolvedDeps), resolvedDeps)\n\t\t\t}\n\n\t\t\tvar toRepoCloneURL, toRevSpec string\n\t\t\tif strings.HasPrefix(resolvedDep.ResolvedURL, \"https:\/\/registry.npmjs.org\/\") {\n\t\t\t\t\/\/ known npm package, so the repository refers to it\n\t\t\t\ttoRepoCloneURL = resolvedDep.Repository.URL\n\t\t\t} else {\n\t\t\t\t\/\/ external tarball, git repo url, etc., so the repository might\n\t\t\t\t\/\/ refer to the source repo (if this is a fork) or not be\n\t\t\t\t\/\/ present at all\n\t\t\t\tu, err := url.Parse(resolvedDep.ResolvedURL)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\ttoRevSpec = u.Fragment\n\n\t\t\t\tu.Fragment = \"\"\n\t\t\t\ttoRepoCloneURL = u.String()\n\t\t\t}\n\n\t\t\treturn json.Marshal(&dep2.ResolvedTarget{\n\t\t\t\tToRepoCloneURL: toRepoCloneURL,\n\t\t\t\tToUnitType: unit.Type((&CommonJSPackage{})),\n\t\t\t\tToUnit: resolvedDep.Name,\n\t\t\t\tToVersionString: resolvedDep.ID,\n\t\t\t\tToRevSpec: toRevSpec,\n\t\t\t})\n\t\t},\n\t}\n\treturn &cmd, nil\n}\n\n\/\/ List reads the \"dependencies\" key in the NPM package's package.json file and\n\/\/ outputs the properties as raw dependencies.\nfunc (v *npmVersion) List(dir string, unit unit.SourceUnit, c *config.Repository) ([]*dep2.RawDependency, error) {\n\tpkg := unit.(*CommonJSPackage)\n\n\tif pkg.PackageJSONFile == \"\" {\n\t\t\/\/ No package.json file, so we won't be able to find any dependencies anyway.\n\t\treturn nil, nil\n\t}\n\n\tpkgFile := filepath.Join(dir, pkg.PackageJSONFile)\n\n\tf, err := os.Open(pkgFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tvar pkgjson struct {\n\t\tDependencies map[string]string `json:\"dependencies\"`\n\t\tDevDependencies map[string]string `json:\"devDependencies\"`\n\t}\n\terr = json.NewDecoder(f).Decode(&pkgjson)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trawDeps := make([]*dep2.RawDependency, len(pkgjson.Dependencies)+len(pkgjson.DevDependencies))\n\ti := 0\n\taddDeps := func(deps map[string]string) {\n\t\tfor name, spec := range deps {\n\t\t\trawDeps[i] = &dep2.RawDependency{\n\t\t\t\tFromFile: pkg.PackageJSONFile,\n\t\t\t\tTargetType: npmDependencyTargetType,\n\t\t\t\tTarget: npmDependency{Name: name, Spec: spec},\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t}\n\taddDeps(pkgjson.Dependencies)\n\taddDeps(pkgjson.DevDependencies)\n\n\treturn rawDeps, nil\n}\n\nconst fixPhantomJSHack = \"\\n\\n# fix phantomjs bad url issue (https:\/\/github.com\/Medium\/phantomjs\/issues\/170)\\n\" + `RUN sed -ri 's\/\"phantomjs\"\\s*:\\s*\"[^\"]+\"\/\"phantomjs\":\"1.9.7-8\"\/g' package.json` + \"\\n\"\n<|endoftext|>"} {"text":"<commit_before>package wall\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/ivan1993spb\/snake-server\/engine\"\n\t\"github.com\/ivan1993spb\/snake-server\/world\"\n)\n\nconst (\n\truinsFactorAreaTiny = 0\n\truinsFactorAreaSmall = 0.09\n\truinsFactorAreaMedium = 0.12\n\truinsFactorAreaLarge = 0.15\n\truinsFactorAreaEnormous = 0.16\n\n\tsizeAreaTiny = 15 * 15\n\tsizeAreaSmall = 50 * 50\n\tsizeAreaMedium = 100 * 100\n\tsizeAreaLarge = 150 * 150\n)\n\nconst (\n\tfindLocationSuccessiveErrorLimit = 16\n\tlocationOccupiedSuccessiveLimit = 16\n\tnewWallLocationSuccessiveErrorLimit = 8\n)\n\nvar dotsMaskOne = engine.NewDotsMask([][]uint8{{1}})\n\nvar masks = []*engine.DotsMask{\n\tdotsMaskOne,\n\tengine.DotsMaskSquare2x2,\n\tengine.DotsMaskTank,\n\tengine.DotsMaskHome1,\n\tengine.DotsMaskHome2,\n\tengine.DotsMaskCross,\n\tengine.DotsMaskDiagonal,\n\tengine.DotsMaskCrossSmall,\n\tengine.DotsMaskDiagonalSmall,\n\tengine.DotsMaskLabyrinth,\n\tengine.DotsMaskTunnel1,\n\tengine.DotsMaskTunnel2,\n\tengine.DotsMaskBigHome,\n}\n\nfunc getRuinsFactor(size uint16) float32 {\n\tif size <= sizeAreaTiny {\n\t\treturn ruinsFactorAreaTiny\n\t}\n\tif size <= sizeAreaSmall {\n\t\treturn ruinsFactorAreaSmall\n\t}\n\tif size <= sizeAreaMedium {\n\t\treturn ruinsFactorAreaMedium\n\t}\n\tif size <= sizeAreaLarge {\n\t\treturn ruinsFactorAreaLarge\n\t}\n\treturn ruinsFactorAreaEnormous\n}\n\nfunc calcRuinsAreaLimit(size uint16) uint16 {\n\treturn uint16(float32(size) * getRuinsFactor(size))\n}\n\ntype RuinsGenerator struct {\n\tworld world.Interface\n\tarea engine.Area\n\n\truinsAreaLimit uint16\n\tareaOccupiedSum uint16\n\n\terrFindLocationCounter int\n\tlocationOccupiedCounter int\n\terrNewWallLocationCounter int\n\n\tmaskIndex int\n}\n\ntype ErrCreateRuinsGenerator string\n\nfunc (e ErrCreateRuinsGenerator) Error() string {\n\treturn \"cannot create ruins generator: \" + string(e)\n}\n\nfunc NewRuinsGenerator(w world.Interface) *RuinsGenerator {\n\tarea := w.Area()\n\n\treturn &RuinsGenerator{\n\t\tworld: w,\n\t\tarea: area,\n\n\t\truinsAreaLimit: calcRuinsAreaLimit(area.Size()),\n\t}\n}\n\nfunc (rg *RuinsGenerator) Done() bool {\n\treturn rg.areaOccupiedSum == rg.ruinsAreaLimit\n}\n\ntype ErrGenerateWall string\n\nfunc (e ErrGenerateWall) Error() string {\n\treturn \"generate wall error: \" + string(e)\n}\n\nfunc (rg *RuinsGenerator) Err() error {\n\tif rg.errFindLocationCounter >= findLocationSuccessiveErrorLimit {\n\t\treturn ErrGenerateWall(\"too many successive errors in location finding\")\n\t}\n\tif rg.locationOccupiedCounter >= locationOccupiedSuccessiveLimit {\n\t\treturn ErrGenerateWall(\"too many successive occupied locations\")\n\t}\n\tif rg.errNewWallLocationCounter >= newWallLocationSuccessiveErrorLimit {\n\t\treturn ErrGenerateWall(\"to many successive errors on wall creating\")\n\t}\n\treturn nil\n}\n\nfunc (rg *RuinsGenerator) GenerateWall() (*Wall, error) {\n\tif rg.Err() != nil {\n\t\treturn nil, rg.Err()\n\t}\n\n\tif rg.areaOccupiedSum == rg.ruinsAreaLimit {\n\t\treturn nil, ErrGenerateWall(\"ruins generation has been done\")\n\t}\n\n\tmask := rg.getMask()\n\n\tlocation, err := rg.findLocation(mask)\n\tif err != nil {\n\t\trg.errFindLocationCounter++\n\t\treturn nil, ErrGenerateWall(\"find location error: \" + err.Error())\n\t}\n\trg.errFindLocationCounter = 0\n\n\tif rg.world.LocationOccupied(location) {\n\t\trg.locationOccupiedCounter++\n\t\treturn nil, ErrGenerateWall(\"location occupied\")\n\t}\n\trg.locationOccupiedCounter = 0\n\n\twall, err := NewWallLocation(rg.world, location)\n\tif err != nil {\n\t\trg.errNewWallLocationCounter++\n\t\treturn nil, ErrGenerateWall(\"new wall error: \" + err.Error())\n\t}\n\trg.errNewWallLocationCounter = 0\n\n\trg.areaOccupiedSum += wall.location.DotCount()\n\n\treturn nil, err\n}\n\nfunc (rg *RuinsGenerator) getMask() *engine.DotsMask {\n\tif rg.maskIndex >= len(masks) {\n\t\trg.maskIndex = 0\n\t}\n\n\tif rg.maskIndex < len(masks)-1 {\n\t\tmask := masks[rg.maskIndex]\n\t\trg.maskIndex++\n\t\treturn mask\n\t}\n\n\tmask := masks[rg.maskIndex]\n\trg.maskIndex = 0\n\treturn mask\n}\n\nfunc (rg *RuinsGenerator) findLocation(mask *engine.DotsMask) (engine.Location, error) {\n\tif rg.areaOccupiedSum == rg.ruinsAreaLimit {\n\t\treturn engine.Location{}, nil\n\t}\n\n\tmask = mask.TurnRandom()\n\n\tif rg.area.Width() < mask.Width() || rg.area.Height() < mask.Height() {\n\t\treturn nil, fmt.Errorf(\"mask doesn't fit the area\")\n\t}\n\n\trect, err := rg.area.NewRandomRect(mask.Width(), mask.Height(), 0, 0)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot get random rect: %s\", err)\n\t}\n\n\tlocation := mask.Location(rect.X(), rect.Y())\n\tlimit := rg.ruinsAreaLimit - rg.areaOccupiedSum\n\tif location.DotCount() > limit {\n\t\tlocation = location[:limit]\n\t}\n\n\treturn location, nil\n}\n<commit_msg>Create concurrency safe mux in RuinsGenerator<commit_after>package wall\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\t\"github.com\/ivan1993spb\/snake-server\/engine\"\n\t\"github.com\/ivan1993spb\/snake-server\/world\"\n)\n\nconst (\n\truinsFactorAreaTiny = 0\n\truinsFactorAreaSmall = 0.09\n\truinsFactorAreaMedium = 0.12\n\truinsFactorAreaLarge = 0.15\n\truinsFactorAreaEnormous = 0.16\n\n\tsizeAreaTiny = 15 * 15\n\tsizeAreaSmall = 50 * 50\n\tsizeAreaMedium = 100 * 100\n\tsizeAreaLarge = 150 * 150\n)\n\nconst (\n\tfindLocationSuccessiveErrorLimit = 16\n\tlocationOccupiedSuccessiveLimit = 16\n\tnewWallLocationSuccessiveErrorLimit = 8\n)\n\nvar dotsMaskOne = engine.NewDotsMask([][]uint8{{1}})\n\nvar masks = []*engine.DotsMask{\n\tdotsMaskOne,\n\tengine.DotsMaskSquare2x2,\n\tengine.DotsMaskTank,\n\tengine.DotsMaskHome1,\n\tengine.DotsMaskHome2,\n\tengine.DotsMaskCross,\n\tengine.DotsMaskDiagonal,\n\tengine.DotsMaskCrossSmall,\n\tengine.DotsMaskDiagonalSmall,\n\tengine.DotsMaskLabyrinth,\n\tengine.DotsMaskTunnel1,\n\tengine.DotsMaskTunnel2,\n\tengine.DotsMaskBigHome,\n}\n\nfunc getRuinsFactor(size uint16) float32 {\n\tif size <= sizeAreaTiny {\n\t\treturn ruinsFactorAreaTiny\n\t}\n\tif size <= sizeAreaSmall {\n\t\treturn ruinsFactorAreaSmall\n\t}\n\tif size <= sizeAreaMedium {\n\t\treturn ruinsFactorAreaMedium\n\t}\n\tif size <= sizeAreaLarge {\n\t\treturn ruinsFactorAreaLarge\n\t}\n\treturn ruinsFactorAreaEnormous\n}\n\nfunc calcRuinsAreaLimit(size uint16) uint16 {\n\treturn uint16(float32(size) * getRuinsFactor(size))\n}\n\ntype RuinsGenerator struct {\n\tworld world.Interface\n\tarea engine.Area\n\n\truinsAreaLimit uint16\n\tareaOccupiedSum uint16\n\n\terrFindLocationCounter int\n\tlocationOccupiedCounter int\n\terrNewWallLocationCounter int\n\n\tmaskIndex int\n\n\tmux *sync.Mutex\n}\n\ntype ErrCreateRuinsGenerator string\n\nfunc (e ErrCreateRuinsGenerator) Error() string {\n\treturn \"cannot create ruins generator: \" + string(e)\n}\n\nfunc NewRuinsGenerator(w world.Interface) *RuinsGenerator {\n\tarea := w.Area()\n\n\treturn &RuinsGenerator{\n\t\tworld: w,\n\t\tarea: area,\n\n\t\truinsAreaLimit: calcRuinsAreaLimit(area.Size()),\n\n\t\tmux: &sync.Mutex{},\n\t}\n}\n\nfunc (rg *RuinsGenerator) Done() bool {\n\treturn rg.areaOccupiedSum == rg.ruinsAreaLimit\n}\n\ntype ErrGenerateWall string\n\nfunc (e ErrGenerateWall) Error() string {\n\treturn \"generate wall error: \" + string(e)\n}\n\nfunc (rg *RuinsGenerator) Err() error {\n\tif rg.errFindLocationCounter >= findLocationSuccessiveErrorLimit {\n\t\treturn ErrGenerateWall(\"too many successive errors in location finding\")\n\t}\n\tif rg.locationOccupiedCounter >= locationOccupiedSuccessiveLimit {\n\t\treturn ErrGenerateWall(\"too many successive occupied locations\")\n\t}\n\tif rg.errNewWallLocationCounter >= newWallLocationSuccessiveErrorLimit {\n\t\treturn ErrGenerateWall(\"to many successive errors on wall creating\")\n\t}\n\treturn nil\n}\n\nfunc (rg *RuinsGenerator) GenerateWall() (*Wall, error) {\n\tif rg.Err() != nil {\n\t\treturn nil, rg.Err()\n\t}\n\n\trg.mux.Lock()\n\tdefer rg.mux.Unlock()\n\n\tif rg.areaOccupiedSum == rg.ruinsAreaLimit {\n\t\treturn nil, ErrGenerateWall(\"ruins generation has been done\")\n\t}\n\n\tmask := rg.getMask()\n\n\tlocation, err := rg.findLocation(mask)\n\tif err != nil {\n\t\trg.errFindLocationCounter++\n\t\treturn nil, ErrGenerateWall(\"find location error: \" + err.Error())\n\t}\n\trg.errFindLocationCounter = 0\n\n\tif rg.world.LocationOccupied(location) {\n\t\trg.locationOccupiedCounter++\n\t\treturn nil, ErrGenerateWall(\"location occupied\")\n\t}\n\trg.locationOccupiedCounter = 0\n\n\twall, err := NewWallLocation(rg.world, location)\n\tif err != nil {\n\t\trg.errNewWallLocationCounter++\n\t\treturn nil, ErrGenerateWall(\"new wall error: \" + err.Error())\n\t}\n\trg.errNewWallLocationCounter = 0\n\n\trg.areaOccupiedSum += wall.location.DotCount()\n\n\treturn nil, err\n}\n\nfunc (rg *RuinsGenerator) getMask() *engine.DotsMask {\n\tif rg.maskIndex >= len(masks) {\n\t\trg.maskIndex = 0\n\t}\n\n\tif rg.maskIndex < len(masks)-1 {\n\t\tmask := masks[rg.maskIndex]\n\t\trg.maskIndex++\n\t\treturn mask\n\t}\n\n\tmask := masks[rg.maskIndex]\n\trg.maskIndex = 0\n\treturn mask\n}\n\nfunc (rg *RuinsGenerator) findLocation(mask *engine.DotsMask) (engine.Location, error) {\n\tif rg.areaOccupiedSum == rg.ruinsAreaLimit {\n\t\treturn engine.Location{}, nil\n\t}\n\n\tmask = mask.TurnRandom()\n\n\tif rg.area.Width() < mask.Width() || rg.area.Height() < mask.Height() {\n\t\treturn nil, fmt.Errorf(\"mask doesn't fit the area\")\n\t}\n\n\trect, err := rg.area.NewRandomRect(mask.Width(), mask.Height(), 0, 0)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot get random rect: %s\", err)\n\t}\n\n\tlocation := mask.Location(rect.X(), rect.Y())\n\tlimit := rg.ruinsAreaLimit - rg.areaOccupiedSum\n\tif location.DotCount() > limit {\n\t\tlocation = location[:limit]\n\t}\n\n\treturn location, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2014 Google Inc. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ Package factory can set up a scheduler. This code is here instead of\n\/\/ plugin\/cmd\/scheduler for both testability and reuse.\npackage factory\n\nimport (\n\t\"math\/rand\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/client\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/client\/cache\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/controller\/framework\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/fields\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/labels\"\n\talgorithm \"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/scheduler\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/util\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/plugin\/pkg\/scheduler\"\n\tschedulerapi \"github.com\/GoogleCloudPlatform\/kubernetes\/plugin\/pkg\/scheduler\/api\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/plugin\/pkg\/scheduler\/api\/validation\"\n\n\t\"github.com\/golang\/glog\"\n)\n\n\/\/ ConfigFactory knows how to fill out a scheduler config with its support functions.\ntype ConfigFactory struct {\n\tClient *client.Client\n\t\/\/ queue for pods that need scheduling\n\tPodQueue *cache.FIFO\n\t\/\/ a means to list all known scheduled pods.\n\tScheduledPodLister *cache.StoreToPodLister\n\t\/\/ a means to list all known scheduled pods and pods assumed to have been scheduled.\n\tPodLister algorithm.PodLister\n\t\/\/ a means to list all minions\n\tNodeLister *cache.StoreToNodeLister\n\t\/\/ a means to list all services\n\tServiceLister *cache.StoreToServiceLister\n\n\t\/\/ Close this to stop all reflectors\n\tStopEverything chan struct{}\n\n\tscheduledPodPopulator *framework.Controller\n\tmodeler scheduler.SystemModeler\n}\n\n\/\/ Initializes the factory.\nfunc NewConfigFactory(client *client.Client) *ConfigFactory {\n\tc := &ConfigFactory{\n\t\tClient: client,\n\t\tPodQueue: cache.NewFIFO(cache.MetaNamespaceKeyFunc),\n\t\tScheduledPodLister: &cache.StoreToPodLister{},\n\t\tNodeLister: &cache.StoreToNodeLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},\n\t\tServiceLister: &cache.StoreToServiceLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},\n\t\tStopEverything: make(chan struct{}),\n\t}\n\tmodeler := scheduler.NewSimpleModeler(&cache.StoreToPodLister{c.PodQueue}, c.ScheduledPodLister)\n\tc.modeler = modeler\n\tc.PodLister = modeler.PodLister()\n\n\t\/\/ On add\/delete to the scheduled pods, remove from the assumed pods.\n\t\/\/ We construct this here instead of in CreateFromKeys because\n\t\/\/ ScheduledPodLister is something we provide to plug in functions that\n\t\/\/ they may need to call.\n\tc.ScheduledPodLister.Store, c.scheduledPodPopulator = framework.NewInformer(\n\t\tc.createAssignedPodLW(),\n\t\t&api.Pod{},\n\t\t0,\n\t\tframework.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\tif pod, ok := obj.(*api.Pod); ok {\n\t\t\t\t\tc.modeler.ForgetPod(pod)\n\t\t\t\t}\n\t\t\t},\n\t\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t\tswitch t := obj.(type) {\n\t\t\t\tcase *api.Pod:\n\t\t\t\t\tc.modeler.ForgetPod(t)\n\t\t\t\tcase cache.DeletedFinalStateUnknown:\n\t\t\t\t\tc.modeler.ForgetPodByKey(t.Key)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t)\n\n\treturn c\n}\n\n\/\/ Create creates a scheduler with the default algorithm provider.\nfunc (f *ConfigFactory) Create() (*scheduler.Config, error) {\n\treturn f.CreateFromProvider(DefaultProvider)\n}\n\n\/\/ Creates a scheduler from the name of a registered algorithm provider.\nfunc (f *ConfigFactory) CreateFromProvider(providerName string) (*scheduler.Config, error) {\n\tglog.V(2).Infof(\"creating scheduler from algorithm provider '%v'\", providerName)\n\tprovider, err := GetAlgorithmProvider(providerName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn f.CreateFromKeys(provider.FitPredicateKeys, provider.PriorityFunctionKeys)\n}\n\n\/\/ Creates a scheduler from the configuration file\nfunc (f *ConfigFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler.Config, error) {\n\tglog.V(2).Infof(\"creating scheduler from configuration: %v\", policy)\n\n\t\/\/ validate the policy configuration\n\tif err := validation.ValidatePolicy(policy); err != nil {\n\t\treturn nil, err\n\t}\n\n\tpredicateKeys := util.NewStringSet()\n\tfor _, predicate := range policy.Predicates {\n\t\tglog.V(2).Infof(\"Registering predicate: %s\", predicate.Name)\n\t\tpredicateKeys.Insert(RegisterCustomFitPredicate(predicate))\n\t}\n\n\tpriorityKeys := util.NewStringSet()\n\tfor _, priority := range policy.Priorities {\n\t\tglog.V(2).Infof(\"Registering priority: %s\", priority.Name)\n\t\tpriorityKeys.Insert(RegisterCustomPriorityFunction(priority))\n\t}\n\n\treturn f.CreateFromKeys(predicateKeys, priorityKeys)\n}\n\n\/\/ Creates a scheduler from a set of registered fit predicate keys and priority keys.\nfunc (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys util.StringSet) (*scheduler.Config, error) {\n\tglog.V(2).Infof(\"creating scheduler with fit predicates '%v' and priority functions '%v\", predicateKeys, priorityKeys)\n\tpluginArgs := PluginFactoryArgs{\n\t\tPodLister: f.PodLister,\n\t\tServiceLister: f.ServiceLister,\n\t\tNodeLister: f.NodeLister,\n\t\tNodeInfo: f.NodeLister,\n\t}\n\tpredicateFuncs, err := getFitPredicateFunctions(predicateKeys, pluginArgs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpriorityConfigs, err := getPriorityFunctionConfigs(priorityKeys, pluginArgs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Watch and queue pods that need scheduling.\n\tcache.NewReflector(f.createUnassignedPodLW(), &api.Pod{}, f.PodQueue, 0).RunUntil(f.StopEverything)\n\n\t\/\/ Begin populating scheduled pods.\n\tgo f.scheduledPodPopulator.Run(f.StopEverything)\n\n\t\/\/ Watch minions.\n\t\/\/ Minions may be listed frequently, so provide a local up-to-date cache.\n\tcache.NewReflector(f.createMinionLW(), &api.Node{}, f.NodeLister.Store, 0).RunUntil(f.StopEverything)\n\n\t\/\/ Watch and cache all service objects. Scheduler needs to find all pods\n\t\/\/ created by the same service, so that it can spread them correctly.\n\t\/\/ Cache this locally.\n\tcache.NewReflector(f.createServiceLW(), &api.Service{}, f.ServiceLister.Store, 0).RunUntil(f.StopEverything)\n\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\n\talgo := algorithm.NewGenericScheduler(predicateFuncs, priorityConfigs, f.PodLister, r)\n\n\tpodBackoff := podBackoff{\n\t\tperPodBackoff: map[string]*backoffEntry{},\n\t\tclock: realClock{},\n\n\t\tdefaultDuration: 1 * time.Second,\n\t\tmaxDuration: 60 * time.Second,\n\t}\n\n\treturn &scheduler.Config{\n\t\tModeler: f.modeler,\n\t\tMinionLister: f.NodeLister,\n\t\tAlgorithm: algo,\n\t\tBinder: &binder{f.Client},\n\t\tNextPod: func() *api.Pod {\n\t\t\tpod := f.PodQueue.Pop().(*api.Pod)\n\t\t\tglog.V(2).Infof(\"About to try and schedule pod %v\", pod.Name)\n\t\t\treturn pod\n\t\t},\n\t\tError: f.makeDefaultErrorFunc(&podBackoff, f.PodQueue),\n\t\tStopEverything: f.StopEverything,\n\t}, nil\n}\n\n\/\/ Returns a cache.ListWatch that finds all pods that need to be\n\/\/ scheduled.\nfunc (factory *ConfigFactory) createUnassignedPodLW() *cache.ListWatch {\n\treturn cache.NewListWatchFromClient(factory.Client, \"pods\", api.NamespaceAll, fields.Set{client.PodHost: \"\"}.AsSelector())\n}\n\nfunc parseSelectorOrDie(s string) fields.Selector {\n\tselector, err := fields.ParseSelector(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn selector\n}\n\n\/\/ Returns a cache.ListWatch that finds all pods that are\n\/\/ already scheduled.\n\/\/ TODO: return a ListerWatcher interface instead?\nfunc (factory *ConfigFactory) createAssignedPodLW() *cache.ListWatch {\n\treturn cache.NewListWatchFromClient(factory.Client, \"pods\", api.NamespaceAll,\n\t\tparseSelectorOrDie(client.PodHost+\"!=\"))\n}\n\n\/\/ createMinionLW returns a cache.ListWatch that gets all changes to minions.\nfunc (factory *ConfigFactory) createMinionLW() *cache.ListWatch {\n\treturn cache.NewListWatchFromClient(factory.Client, \"nodes\", api.NamespaceAll, parseSelectorOrDie(\"\"))\n}\n\n\/\/ Lists all minions and filter out unhealthy ones, then returns\n\/\/ an enumerator for cache.Poller.\nfunc (factory *ConfigFactory) pollMinions() (cache.Enumerator, error) {\n\tallNodes, err := factory.Client.Nodes().List(labels.Everything(), fields.Everything())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnodes := &api.NodeList{\n\t\tTypeMeta: allNodes.TypeMeta,\n\t\tListMeta: allNodes.ListMeta,\n\t}\n\tfor _, node := range allNodes.Items {\n\t\tconditionMap := make(map[api.NodeConditionType]*api.NodeCondition)\n\t\tfor i := range node.Status.Conditions {\n\t\t\tcond := node.Status.Conditions[i]\n\t\t\tconditionMap[cond.Type] = &cond\n\t\t}\n\t\tif node.Spec.Unschedulable {\n\t\t\tcontinue\n\t\t}\n\t\tif condition, ok := conditionMap[api.NodeReady]; ok {\n\t\t\tif condition.Status == api.ConditionTrue {\n\t\t\t\tnodes.Items = append(nodes.Items, node)\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ If no condition is set, we get unknown node condition. In such cases,\n\t\t\t\/\/ do not add the node.\n\t\t\tglog.V(2).Infof(\"Minion %s is not available. Skipping\", node.Name)\n\t\t}\n\t}\n\treturn &nodeEnumerator{nodes}, nil\n}\n\n\/\/ Returns a cache.ListWatch that gets all changes to services.\nfunc (factory *ConfigFactory) createServiceLW() *cache.ListWatch {\n\treturn cache.NewListWatchFromClient(factory.Client, \"services\", api.NamespaceAll, parseSelectorOrDie(\"\"))\n}\n\nfunc (factory *ConfigFactory) makeDefaultErrorFunc(backoff *podBackoff, podQueue *cache.FIFO) func(pod *api.Pod, err error) {\n\treturn func(pod *api.Pod, err error) {\n\t\tglog.Errorf(\"Error scheduling %v %v: %v; retrying\", pod.Namespace, pod.Name, err)\n\t\tbackoff.gc()\n\t\t\/\/ Retry asynchronously.\n\t\t\/\/ Note that this is extremely rudimentary and we need a more real error handling path.\n\t\tgo func() {\n\t\t\tdefer util.HandleCrash()\n\t\t\tpodID := pod.Name\n\t\t\tpodNamespace := pod.Namespace\n\t\t\tbackoff.wait(podID)\n\t\t\t\/\/ Get the pod again; it may have changed\/been scheduled already.\n\t\t\tpod = &api.Pod{}\n\t\t\terr := factory.Client.Get().Namespace(podNamespace).Resource(\"pods\").Name(podID).Do().Into(pod)\n\t\t\tif err != nil {\n\t\t\t\tglog.Errorf(\"Error getting pod %v for retry: %v; abandoning\", podID, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif pod.Spec.Host == \"\" {\n\t\t\t\tpodQueue.Add(pod)\n\t\t\t}\n\t\t}()\n\t}\n}\n\n\/\/ nodeEnumerator allows a cache.Poller to enumerate items in an api.NodeList\ntype nodeEnumerator struct {\n\t*api.NodeList\n}\n\n\/\/ Len returns the number of items in the node list.\nfunc (ne *nodeEnumerator) Len() int {\n\tif ne.NodeList == nil {\n\t\treturn 0\n\t}\n\treturn len(ne.Items)\n}\n\n\/\/ Get returns the item (and ID) with the particular index.\nfunc (ne *nodeEnumerator) Get(index int) interface{} {\n\treturn &ne.Items[index]\n}\n\ntype binder struct {\n\t*client.Client\n}\n\n\/\/ Bind just does a POST binding RPC.\nfunc (b *binder) Bind(binding *api.Binding) error {\n\tglog.V(2).Infof(\"Attempting to bind %v to %v\", binding.Name, binding.Target.Name)\n\tctx := api.WithNamespace(api.NewContext(), binding.Namespace)\n\treturn b.Post().Namespace(api.NamespaceValue(ctx)).Resource(\"bindings\").Body(binding).Do().Error()\n\t\/\/ TODO: use Pods interface for binding once clusters are upgraded\n\t\/\/ return b.Pods(binding.Namespace).Bind(binding)\n}\n\ntype clock interface {\n\tNow() time.Time\n}\n\ntype realClock struct{}\n\nfunc (realClock) Now() time.Time {\n\treturn time.Now()\n}\n\ntype backoffEntry struct {\n\tbackoff time.Duration\n\tlastUpdate time.Time\n}\n\ntype podBackoff struct {\n\tperPodBackoff map[string]*backoffEntry\n\tlock sync.Mutex\n\tclock clock\n\tdefaultDuration time.Duration\n\tmaxDuration time.Duration\n}\n\nfunc (p *podBackoff) getEntry(podID string) *backoffEntry {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\tentry, ok := p.perPodBackoff[podID]\n\tif !ok {\n\t\tentry = &backoffEntry{backoff: p.defaultDuration}\n\t\tp.perPodBackoff[podID] = entry\n\t}\n\tentry.lastUpdate = p.clock.Now()\n\treturn entry\n}\n\nfunc (p *podBackoff) getBackoff(podID string) time.Duration {\n\tentry := p.getEntry(podID)\n\tduration := entry.backoff\n\tentry.backoff *= 2\n\tif entry.backoff > p.maxDuration {\n\t\tentry.backoff = p.maxDuration\n\t}\n\tglog.V(4).Infof(\"Backing off %s for pod %s\", duration.String(), podID)\n\treturn duration\n}\n\nfunc (p *podBackoff) wait(podID string) {\n\ttime.Sleep(p.getBackoff(podID))\n}\n\nfunc (p *podBackoff) gc() {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\tnow := p.clock.Now()\n\tfor podID, entry := range p.perPodBackoff {\n\t\tif now.Sub(entry.lastUpdate) > p.maxDuration {\n\t\t\tdelete(p.perPodBackoff, podID)\n\t\t}\n\t}\n}\n<commit_msg>add missing syncronization<commit_after>\/*\nCopyright 2014 Google Inc. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ Package factory can set up a scheduler. This code is here instead of\n\/\/ plugin\/cmd\/scheduler for both testability and reuse.\npackage factory\n\nimport (\n\t\"math\/rand\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/client\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/client\/cache\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/controller\/framework\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/fields\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/labels\"\n\talgorithm \"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/scheduler\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/util\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/plugin\/pkg\/scheduler\"\n\tschedulerapi \"github.com\/GoogleCloudPlatform\/kubernetes\/plugin\/pkg\/scheduler\/api\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/plugin\/pkg\/scheduler\/api\/validation\"\n\n\t\"github.com\/golang\/glog\"\n)\n\n\/\/ ConfigFactory knows how to fill out a scheduler config with its support functions.\ntype ConfigFactory struct {\n\tClient *client.Client\n\t\/\/ queue for pods that need scheduling\n\tPodQueue *cache.FIFO\n\t\/\/ a means to list all known scheduled pods.\n\tScheduledPodLister *cache.StoreToPodLister\n\t\/\/ a means to list all known scheduled pods and pods assumed to have been scheduled.\n\tPodLister algorithm.PodLister\n\t\/\/ a means to list all minions\n\tNodeLister *cache.StoreToNodeLister\n\t\/\/ a means to list all services\n\tServiceLister *cache.StoreToServiceLister\n\n\t\/\/ Close this to stop all reflectors\n\tStopEverything chan struct{}\n\n\tscheduledPodPopulator *framework.Controller\n\tmodeler scheduler.SystemModeler\n}\n\n\/\/ Initializes the factory.\nfunc NewConfigFactory(client *client.Client) *ConfigFactory {\n\tc := &ConfigFactory{\n\t\tClient: client,\n\t\tPodQueue: cache.NewFIFO(cache.MetaNamespaceKeyFunc),\n\t\tScheduledPodLister: &cache.StoreToPodLister{},\n\t\tNodeLister: &cache.StoreToNodeLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},\n\t\tServiceLister: &cache.StoreToServiceLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},\n\t\tStopEverything: make(chan struct{}),\n\t}\n\tmodeler := scheduler.NewSimpleModeler(&cache.StoreToPodLister{c.PodQueue}, c.ScheduledPodLister)\n\tc.modeler = modeler\n\tc.PodLister = modeler.PodLister()\n\n\t\/\/ On add\/delete to the scheduled pods, remove from the assumed pods.\n\t\/\/ We construct this here instead of in CreateFromKeys because\n\t\/\/ ScheduledPodLister is something we provide to plug in functions that\n\t\/\/ they may need to call.\n\tc.ScheduledPodLister.Store, c.scheduledPodPopulator = framework.NewInformer(\n\t\tc.createAssignedPodLW(),\n\t\t&api.Pod{},\n\t\t0,\n\t\tframework.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\tif pod, ok := obj.(*api.Pod); ok {\n\t\t\t\t\tc.modeler.LockedAction(func() {\n\t\t\t\t\t\tc.modeler.ForgetPod(pod)\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t},\n\t\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t\tc.modeler.LockedAction(func() {\n\t\t\t\t\tswitch t := obj.(type) {\n\t\t\t\t\tcase *api.Pod:\n\t\t\t\t\t\tc.modeler.ForgetPod(t)\n\t\t\t\t\tcase cache.DeletedFinalStateUnknown:\n\t\t\t\t\t\tc.modeler.ForgetPodByKey(t.Key)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t)\n\n\treturn c\n}\n\n\/\/ Create creates a scheduler with the default algorithm provider.\nfunc (f *ConfigFactory) Create() (*scheduler.Config, error) {\n\treturn f.CreateFromProvider(DefaultProvider)\n}\n\n\/\/ Creates a scheduler from the name of a registered algorithm provider.\nfunc (f *ConfigFactory) CreateFromProvider(providerName string) (*scheduler.Config, error) {\n\tglog.V(2).Infof(\"creating scheduler from algorithm provider '%v'\", providerName)\n\tprovider, err := GetAlgorithmProvider(providerName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn f.CreateFromKeys(provider.FitPredicateKeys, provider.PriorityFunctionKeys)\n}\n\n\/\/ Creates a scheduler from the configuration file\nfunc (f *ConfigFactory) CreateFromConfig(policy schedulerapi.Policy) (*scheduler.Config, error) {\n\tglog.V(2).Infof(\"creating scheduler from configuration: %v\", policy)\n\n\t\/\/ validate the policy configuration\n\tif err := validation.ValidatePolicy(policy); err != nil {\n\t\treturn nil, err\n\t}\n\n\tpredicateKeys := util.NewStringSet()\n\tfor _, predicate := range policy.Predicates {\n\t\tglog.V(2).Infof(\"Registering predicate: %s\", predicate.Name)\n\t\tpredicateKeys.Insert(RegisterCustomFitPredicate(predicate))\n\t}\n\n\tpriorityKeys := util.NewStringSet()\n\tfor _, priority := range policy.Priorities {\n\t\tglog.V(2).Infof(\"Registering priority: %s\", priority.Name)\n\t\tpriorityKeys.Insert(RegisterCustomPriorityFunction(priority))\n\t}\n\n\treturn f.CreateFromKeys(predicateKeys, priorityKeys)\n}\n\n\/\/ Creates a scheduler from a set of registered fit predicate keys and priority keys.\nfunc (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys util.StringSet) (*scheduler.Config, error) {\n\tglog.V(2).Infof(\"creating scheduler with fit predicates '%v' and priority functions '%v\", predicateKeys, priorityKeys)\n\tpluginArgs := PluginFactoryArgs{\n\t\tPodLister: f.PodLister,\n\t\tServiceLister: f.ServiceLister,\n\t\tNodeLister: f.NodeLister,\n\t\tNodeInfo: f.NodeLister,\n\t}\n\tpredicateFuncs, err := getFitPredicateFunctions(predicateKeys, pluginArgs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpriorityConfigs, err := getPriorityFunctionConfigs(priorityKeys, pluginArgs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Watch and queue pods that need scheduling.\n\tcache.NewReflector(f.createUnassignedPodLW(), &api.Pod{}, f.PodQueue, 0).RunUntil(f.StopEverything)\n\n\t\/\/ Begin populating scheduled pods.\n\tgo f.scheduledPodPopulator.Run(f.StopEverything)\n\n\t\/\/ Watch minions.\n\t\/\/ Minions may be listed frequently, so provide a local up-to-date cache.\n\tcache.NewReflector(f.createMinionLW(), &api.Node{}, f.NodeLister.Store, 0).RunUntil(f.StopEverything)\n\n\t\/\/ Watch and cache all service objects. Scheduler needs to find all pods\n\t\/\/ created by the same service, so that it can spread them correctly.\n\t\/\/ Cache this locally.\n\tcache.NewReflector(f.createServiceLW(), &api.Service{}, f.ServiceLister.Store, 0).RunUntil(f.StopEverything)\n\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\n\talgo := algorithm.NewGenericScheduler(predicateFuncs, priorityConfigs, f.PodLister, r)\n\n\tpodBackoff := podBackoff{\n\t\tperPodBackoff: map[string]*backoffEntry{},\n\t\tclock: realClock{},\n\n\t\tdefaultDuration: 1 * time.Second,\n\t\tmaxDuration: 60 * time.Second,\n\t}\n\n\treturn &scheduler.Config{\n\t\tModeler: f.modeler,\n\t\tMinionLister: f.NodeLister,\n\t\tAlgorithm: algo,\n\t\tBinder: &binder{f.Client},\n\t\tNextPod: func() *api.Pod {\n\t\t\tpod := f.PodQueue.Pop().(*api.Pod)\n\t\t\tglog.V(2).Infof(\"About to try and schedule pod %v\", pod.Name)\n\t\t\treturn pod\n\t\t},\n\t\tError: f.makeDefaultErrorFunc(&podBackoff, f.PodQueue),\n\t\tStopEverything: f.StopEverything,\n\t}, nil\n}\n\n\/\/ Returns a cache.ListWatch that finds all pods that need to be\n\/\/ scheduled.\nfunc (factory *ConfigFactory) createUnassignedPodLW() *cache.ListWatch {\n\treturn cache.NewListWatchFromClient(factory.Client, \"pods\", api.NamespaceAll, fields.Set{client.PodHost: \"\"}.AsSelector())\n}\n\nfunc parseSelectorOrDie(s string) fields.Selector {\n\tselector, err := fields.ParseSelector(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn selector\n}\n\n\/\/ Returns a cache.ListWatch that finds all pods that are\n\/\/ already scheduled.\n\/\/ TODO: return a ListerWatcher interface instead?\nfunc (factory *ConfigFactory) createAssignedPodLW() *cache.ListWatch {\n\treturn cache.NewListWatchFromClient(factory.Client, \"pods\", api.NamespaceAll,\n\t\tparseSelectorOrDie(client.PodHost+\"!=\"))\n}\n\n\/\/ createMinionLW returns a cache.ListWatch that gets all changes to minions.\nfunc (factory *ConfigFactory) createMinionLW() *cache.ListWatch {\n\treturn cache.NewListWatchFromClient(factory.Client, \"nodes\", api.NamespaceAll, parseSelectorOrDie(\"\"))\n}\n\n\/\/ Lists all minions and filter out unhealthy ones, then returns\n\/\/ an enumerator for cache.Poller.\nfunc (factory *ConfigFactory) pollMinions() (cache.Enumerator, error) {\n\tallNodes, err := factory.Client.Nodes().List(labels.Everything(), fields.Everything())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnodes := &api.NodeList{\n\t\tTypeMeta: allNodes.TypeMeta,\n\t\tListMeta: allNodes.ListMeta,\n\t}\n\tfor _, node := range allNodes.Items {\n\t\tconditionMap := make(map[api.NodeConditionType]*api.NodeCondition)\n\t\tfor i := range node.Status.Conditions {\n\t\t\tcond := node.Status.Conditions[i]\n\t\t\tconditionMap[cond.Type] = &cond\n\t\t}\n\t\tif node.Spec.Unschedulable {\n\t\t\tcontinue\n\t\t}\n\t\tif condition, ok := conditionMap[api.NodeReady]; ok {\n\t\t\tif condition.Status == api.ConditionTrue {\n\t\t\t\tnodes.Items = append(nodes.Items, node)\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ If no condition is set, we get unknown node condition. In such cases,\n\t\t\t\/\/ do not add the node.\n\t\t\tglog.V(2).Infof(\"Minion %s is not available. Skipping\", node.Name)\n\t\t}\n\t}\n\treturn &nodeEnumerator{nodes}, nil\n}\n\n\/\/ Returns a cache.ListWatch that gets all changes to services.\nfunc (factory *ConfigFactory) createServiceLW() *cache.ListWatch {\n\treturn cache.NewListWatchFromClient(factory.Client, \"services\", api.NamespaceAll, parseSelectorOrDie(\"\"))\n}\n\nfunc (factory *ConfigFactory) makeDefaultErrorFunc(backoff *podBackoff, podQueue *cache.FIFO) func(pod *api.Pod, err error) {\n\treturn func(pod *api.Pod, err error) {\n\t\tglog.Errorf(\"Error scheduling %v %v: %v; retrying\", pod.Namespace, pod.Name, err)\n\t\tbackoff.gc()\n\t\t\/\/ Retry asynchronously.\n\t\t\/\/ Note that this is extremely rudimentary and we need a more real error handling path.\n\t\tgo func() {\n\t\t\tdefer util.HandleCrash()\n\t\t\tpodID := pod.Name\n\t\t\tpodNamespace := pod.Namespace\n\t\t\tbackoff.wait(podID)\n\t\t\t\/\/ Get the pod again; it may have changed\/been scheduled already.\n\t\t\tpod = &api.Pod{}\n\t\t\terr := factory.Client.Get().Namespace(podNamespace).Resource(\"pods\").Name(podID).Do().Into(pod)\n\t\t\tif err != nil {\n\t\t\t\tglog.Errorf(\"Error getting pod %v for retry: %v; abandoning\", podID, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif pod.Spec.Host == \"\" {\n\t\t\t\tpodQueue.Add(pod)\n\t\t\t}\n\t\t}()\n\t}\n}\n\n\/\/ nodeEnumerator allows a cache.Poller to enumerate items in an api.NodeList\ntype nodeEnumerator struct {\n\t*api.NodeList\n}\n\n\/\/ Len returns the number of items in the node list.\nfunc (ne *nodeEnumerator) Len() int {\n\tif ne.NodeList == nil {\n\t\treturn 0\n\t}\n\treturn len(ne.Items)\n}\n\n\/\/ Get returns the item (and ID) with the particular index.\nfunc (ne *nodeEnumerator) Get(index int) interface{} {\n\treturn &ne.Items[index]\n}\n\ntype binder struct {\n\t*client.Client\n}\n\n\/\/ Bind just does a POST binding RPC.\nfunc (b *binder) Bind(binding *api.Binding) error {\n\tglog.V(2).Infof(\"Attempting to bind %v to %v\", binding.Name, binding.Target.Name)\n\tctx := api.WithNamespace(api.NewContext(), binding.Namespace)\n\treturn b.Post().Namespace(api.NamespaceValue(ctx)).Resource(\"bindings\").Body(binding).Do().Error()\n\t\/\/ TODO: use Pods interface for binding once clusters are upgraded\n\t\/\/ return b.Pods(binding.Namespace).Bind(binding)\n}\n\ntype clock interface {\n\tNow() time.Time\n}\n\ntype realClock struct{}\n\nfunc (realClock) Now() time.Time {\n\treturn time.Now()\n}\n\ntype backoffEntry struct {\n\tbackoff time.Duration\n\tlastUpdate time.Time\n}\n\ntype podBackoff struct {\n\tperPodBackoff map[string]*backoffEntry\n\tlock sync.Mutex\n\tclock clock\n\tdefaultDuration time.Duration\n\tmaxDuration time.Duration\n}\n\nfunc (p *podBackoff) getEntry(podID string) *backoffEntry {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\tentry, ok := p.perPodBackoff[podID]\n\tif !ok {\n\t\tentry = &backoffEntry{backoff: p.defaultDuration}\n\t\tp.perPodBackoff[podID] = entry\n\t}\n\tentry.lastUpdate = p.clock.Now()\n\treturn entry\n}\n\nfunc (p *podBackoff) getBackoff(podID string) time.Duration {\n\tentry := p.getEntry(podID)\n\tduration := entry.backoff\n\tentry.backoff *= 2\n\tif entry.backoff > p.maxDuration {\n\t\tentry.backoff = p.maxDuration\n\t}\n\tglog.V(4).Infof(\"Backing off %s for pod %s\", duration.String(), podID)\n\treturn duration\n}\n\nfunc (p *podBackoff) wait(podID string) {\n\ttime.Sleep(p.getBackoff(podID))\n}\n\nfunc (p *podBackoff) gc() {\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\tnow := p.clock.Now()\n\tfor podID, entry := range p.perPodBackoff {\n\t\tif now.Sub(entry.lastUpdate) > p.maxDuration {\n\t\t\tdelete(p.perPodBackoff, podID)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Fix snappy imports<commit_after><|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage factory\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/golang\/glog\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n\t\"k8s.io\/kubernetes\/plugin\/pkg\/scheduler\/algorithm\"\n\t\"k8s.io\/kubernetes\/plugin\/pkg\/scheduler\/algorithm\/predicates\"\n\t\"k8s.io\/kubernetes\/plugin\/pkg\/scheduler\/algorithm\/priorities\"\n\tschedulerapi \"k8s.io\/kubernetes\/plugin\/pkg\/scheduler\/api\"\n)\n\n\/\/ PluginFactoryArgs are passed to all plugin factory functions.\ntype PluginFactoryArgs struct {\n\tPodLister algorithm.PodLister\n\tServiceLister algorithm.ServiceLister\n\tControllerLister algorithm.ControllerLister\n\tReplicaSetLister algorithm.ReplicaSetLister\n\tStatefulSetLister algorithm.StatefulSetLister\n\tNodeLister algorithm.NodeLister\n\tNodeInfo predicates.NodeInfo\n\tPVInfo predicates.PersistentVolumeInfo\n\tPVCInfo predicates.PersistentVolumeClaimInfo\n\tHardPodAffinitySymmetricWeight int\n}\n\n\/\/ MetadataProducerFactory produces MetadataProducer from the given args.\ntype MetadataProducerFactory func(PluginFactoryArgs) algorithm.MetadataProducer\n\n\/\/ A FitPredicateFactory produces a FitPredicate from the given args.\ntype FitPredicateFactory func(PluginFactoryArgs) algorithm.FitPredicate\n\n\/\/ DEPRECATED\n\/\/ Use Map-Reduce pattern for priority functions.\n\/\/ A PriorityFunctionFactory produces a PriorityConfig from the given args.\ntype PriorityFunctionFactory func(PluginFactoryArgs) algorithm.PriorityFunction\n\n\/\/ A PriorityFunctionFactory produces map & reduce priority functions\n\/\/ from a given args.\n\/\/ FIXME: Rename to PriorityFunctionFactory.\ntype PriorityFunctionFactory2 func(PluginFactoryArgs) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction)\n\n\/\/ A PriorityConfigFactory produces a PriorityConfig from the given function and weight\ntype PriorityConfigFactory struct {\n\tFunction PriorityFunctionFactory\n\tMapReduceFunction PriorityFunctionFactory2\n\tWeight int\n}\n\nvar (\n\tschedulerFactoryMutex sync.Mutex\n\n\t\/\/ maps that hold registered algorithm types\n\tfitPredicateMap = make(map[string]FitPredicateFactory)\n\tmandatoryFitPredicates = make(map[string]bool)\n\tpriorityFunctionMap = make(map[string]PriorityConfigFactory)\n\talgorithmProviderMap = make(map[string]AlgorithmProviderConfig)\n\n\t\/\/ Registered metadata producers\n\tpriorityMetadataProducer MetadataProducerFactory\n\tpredicateMetadataProducer MetadataProducerFactory\n\n\t\/\/ get equivalence pod function\n\tgetEquivalencePodFunc algorithm.GetEquivalencePodFunc\n)\n\nconst (\n\tDefaultProvider = \"DefaultProvider\"\n)\n\ntype AlgorithmProviderConfig struct {\n\tFitPredicateKeys sets.String\n\tPriorityFunctionKeys sets.String\n}\n\n\/\/ RegisterFitPredicate registers a fit predicate with the algorithm\n\/\/ registry. Returns the name with which the predicate was registered.\nfunc RegisterFitPredicate(name string, predicate algorithm.FitPredicate) string {\n\treturn RegisterFitPredicateFactory(name, func(PluginFactoryArgs) algorithm.FitPredicate { return predicate })\n}\n\n\/\/ RegisterMandatoryFitPredicate registers a fit predicate with the algorithm registry, the predicate is used by\n\/\/ kubelet, DaemonSet; it is always included in configuration. Returns the name with which the predicate was\n\/\/ registered.\nfunc RegisterMandatoryFitPredicate(name string, predicate algorithm.FitPredicate) string {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\tvalidateAlgorithmNameOrDie(name)\n\tfitPredicateMap[name] = func(PluginFactoryArgs) algorithm.FitPredicate { return predicate }\n\tmandatoryFitPredicates[name] = true\n\treturn name\n}\n\n\/\/ RegisterFitPredicateFactory registers a fit predicate factory with the\n\/\/ algorithm registry. Returns the name with which the predicate was registered.\nfunc RegisterFitPredicateFactory(name string, predicateFactory FitPredicateFactory) string {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\tvalidateAlgorithmNameOrDie(name)\n\tfitPredicateMap[name] = predicateFactory\n\treturn name\n}\n\n\/\/ RegisterCustomFitPredicate registers a custom fit predicate with the algorithm registry.\n\/\/ Returns the name, with which the predicate was registered.\nfunc RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy) string {\n\tvar predicateFactory FitPredicateFactory\n\tvar ok bool\n\n\tvalidatePredicateOrDie(policy)\n\n\t\/\/ generate the predicate function, if a custom type is requested\n\tif policy.Argument != nil {\n\t\tif policy.Argument.ServiceAffinity != nil {\n\t\t\tpredicateFactory = func(args PluginFactoryArgs) algorithm.FitPredicate {\n\t\t\t\tpredicate, precomputationFunction := predicates.NewServiceAffinityPredicate(\n\t\t\t\t\targs.PodLister,\n\t\t\t\t\targs.ServiceLister,\n\t\t\t\t\targs.NodeInfo,\n\t\t\t\t\tpolicy.Argument.ServiceAffinity.Labels,\n\t\t\t\t)\n\n\t\t\t\t\/\/ Once we generate the predicate we should also Register the Precomputation\n\t\t\t\tpredicates.RegisterPredicatePrecomputation(policy.Name, precomputationFunction)\n\t\t\t\treturn predicate\n\t\t\t}\n\t\t} else if policy.Argument.LabelsPresence != nil {\n\t\t\tpredicateFactory = func(args PluginFactoryArgs) algorithm.FitPredicate {\n\t\t\t\treturn predicates.NewNodeLabelPredicate(\n\t\t\t\t\tpolicy.Argument.LabelsPresence.Labels,\n\t\t\t\t\tpolicy.Argument.LabelsPresence.Presence,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t} else if predicateFactory, ok = fitPredicateMap[policy.Name]; ok {\n\t\t\/\/ checking to see if a pre-defined predicate is requested\n\t\tglog.V(2).Infof(\"Predicate type %s already registered, reusing.\", policy.Name)\n\t\treturn policy.Name\n\t}\n\n\tif predicateFactory == nil {\n\t\tglog.Fatalf(\"Invalid configuration: Predicate type not found for %s\", policy.Name)\n\t}\n\n\treturn RegisterFitPredicateFactory(policy.Name, predicateFactory)\n}\n\n\/\/ IsFitPredicateRegistered is useful for testing providers.\nfunc IsFitPredicateRegistered(name string) bool {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\t_, ok := fitPredicateMap[name]\n\treturn ok\n}\n\nfunc RegisterPriorityMetadataProducerFactory(factory MetadataProducerFactory) {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\tpriorityMetadataProducer = factory\n}\n\nfunc RegisterPredicateMetadataProducerFactory(factory MetadataProducerFactory) {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\tpredicateMetadataProducer = factory\n}\n\n\/\/ DEPRECATED\n\/\/ Use Map-Reduce pattern for priority functions.\n\/\/ Registers a priority function with the algorithm registry. Returns the name,\n\/\/ with which the function was registered.\nfunc RegisterPriorityFunction(name string, function algorithm.PriorityFunction, weight int) string {\n\treturn RegisterPriorityConfigFactory(name, PriorityConfigFactory{\n\t\tFunction: func(PluginFactoryArgs) algorithm.PriorityFunction {\n\t\t\treturn function\n\t\t},\n\t\tWeight: weight,\n\t})\n}\n\n\/\/ RegisterPriorityFunction2 registers a priority function with the algorithm registry. Returns the name,\n\/\/ with which the function was registered.\n\/\/ FIXME: Rename to PriorityFunctionFactory.\nfunc RegisterPriorityFunction2(\n\tname string,\n\tmapFunction algorithm.PriorityMapFunction,\n\treduceFunction algorithm.PriorityReduceFunction,\n\tweight int) string {\n\treturn RegisterPriorityConfigFactory(name, PriorityConfigFactory{\n\t\tMapReduceFunction: func(PluginFactoryArgs) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) {\n\t\t\treturn mapFunction, reduceFunction\n\t\t},\n\t\tWeight: weight,\n\t})\n}\n\nfunc RegisterPriorityConfigFactory(name string, pcf PriorityConfigFactory) string {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\tvalidateAlgorithmNameOrDie(name)\n\tpriorityFunctionMap[name] = pcf\n\treturn name\n}\n\n\/\/ RegisterCustomPriorityFunction registers a custom priority function with the algorithm registry.\n\/\/ Returns the name, with which the priority function was registered.\nfunc RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string {\n\tvar pcf *PriorityConfigFactory\n\n\tvalidatePriorityOrDie(policy)\n\n\t\/\/ generate the priority function, if a custom priority is requested\n\tif policy.Argument != nil {\n\t\tif policy.Argument.ServiceAntiAffinity != nil {\n\t\t\tpcf = &PriorityConfigFactory{\n\t\t\t\tFunction: func(args PluginFactoryArgs) algorithm.PriorityFunction {\n\t\t\t\t\treturn priorities.NewServiceAntiAffinityPriority(\n\t\t\t\t\t\targs.PodLister,\n\t\t\t\t\t\targs.ServiceLister,\n\t\t\t\t\t\tpolicy.Argument.ServiceAntiAffinity.Label,\n\t\t\t\t\t)\n\t\t\t\t},\n\t\t\t\tWeight: policy.Weight,\n\t\t\t}\n\t\t} else if policy.Argument.LabelPreference != nil {\n\t\t\tpcf = &PriorityConfigFactory{\n\t\t\t\tMapReduceFunction: func(args PluginFactoryArgs) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) {\n\t\t\t\t\treturn priorities.NewNodeLabelPriority(\n\t\t\t\t\t\tpolicy.Argument.LabelPreference.Label,\n\t\t\t\t\t\tpolicy.Argument.LabelPreference.Presence,\n\t\t\t\t\t)\n\t\t\t\t},\n\t\t\t\tWeight: policy.Weight,\n\t\t\t}\n\t\t}\n\t} else if existingPcf, ok := priorityFunctionMap[policy.Name]; ok {\n\t\tglog.V(2).Infof(\"Priority type %s already registered, reusing.\", policy.Name)\n\t\t\/\/ set\/update the weight based on the policy\n\t\tpcf = &PriorityConfigFactory{\n\t\t\tFunction: existingPcf.Function,\n\t\t\tMapReduceFunction: existingPcf.MapReduceFunction,\n\t\t\tWeight: policy.Weight,\n\t\t}\n\t}\n\n\tif pcf == nil {\n\t\tglog.Fatalf(\"Invalid configuration: Priority type not found for %s\", policy.Name)\n\t}\n\n\treturn RegisterPriorityConfigFactory(policy.Name, *pcf)\n}\n\nfunc RegisterGetEquivalencePodFunction(equivalenceFunc algorithm.GetEquivalencePodFunc) {\n\tgetEquivalencePodFunc = equivalenceFunc\n}\n\n\/\/ IsPriorityFunctionRegistered is useful for testing providers.\nfunc IsPriorityFunctionRegistered(name string) bool {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\t_, ok := priorityFunctionMap[name]\n\treturn ok\n}\n\n\/\/ RegisterAlgorithmProvider registers a new algorithm provider with the algorithm registry. This should\n\/\/ be called from the init function in a provider plugin.\nfunc RegisterAlgorithmProvider(name string, predicateKeys, priorityKeys sets.String) string {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\tvalidateAlgorithmNameOrDie(name)\n\talgorithmProviderMap[name] = AlgorithmProviderConfig{\n\t\tFitPredicateKeys: predicateKeys,\n\t\tPriorityFunctionKeys: priorityKeys,\n\t}\n\treturn name\n}\n\n\/\/ GetAlgorithmProvider should not be used to modify providers. It is publicly visible for testing.\nfunc GetAlgorithmProvider(name string) (*AlgorithmProviderConfig, error) {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\n\tprovider, ok := algorithmProviderMap[name]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"plugin %q has not been registered\", name)\n\t}\n\n\treturn &provider, nil\n}\n\nfunc getFitPredicateFunctions(names sets.String, args PluginFactoryArgs) (map[string]algorithm.FitPredicate, error) {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\n\tpredicates := map[string]algorithm.FitPredicate{}\n\tfor _, name := range names.List() {\n\t\tfactory, ok := fitPredicateMap[name]\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"Invalid predicate name %q specified - no corresponding function found\", name)\n\t\t}\n\t\tpredicates[name] = factory(args)\n\t}\n\n\t\/\/ Always include mandatory fit predicates.\n\tfor name, mandatory := range mandatoryFitPredicates {\n\t\tif mandatory {\n\t\t\tif factory, found := fitPredicateMap[name]; found {\n\t\t\t\tpredicates[name] = factory(args)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn predicates, nil\n}\n\nfunc getPriorityMetadataProducer(args PluginFactoryArgs) (algorithm.MetadataProducer, error) {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\n\tif priorityMetadataProducer == nil {\n\t\treturn algorithm.EmptyMetadataProducer, nil\n\t}\n\treturn priorityMetadataProducer(args), nil\n}\n\nfunc getPredicateMetadataProducer(args PluginFactoryArgs) (algorithm.MetadataProducer, error) {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\n\tif predicateMetadataProducer == nil {\n\t\treturn algorithm.EmptyMetadataProducer, nil\n\t}\n\treturn predicateMetadataProducer(args), nil\n}\n\nfunc getPriorityFunctionConfigs(names sets.String, args PluginFactoryArgs) ([]algorithm.PriorityConfig, error) {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\n\tconfigs := []algorithm.PriorityConfig{}\n\tfor _, name := range names.List() {\n\t\tfactory, ok := priorityFunctionMap[name]\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"Invalid priority name %s specified - no corresponding function found\", name)\n\t\t}\n\t\tif factory.Function != nil {\n\t\t\tconfigs = append(configs, algorithm.PriorityConfig{\n\t\t\t\tFunction: factory.Function(args),\n\t\t\t\tWeight: factory.Weight,\n\t\t\t})\n\t\t} else {\n\t\t\tmapFunction, reduceFunction := factory.MapReduceFunction(args)\n\t\t\tconfigs = append(configs, algorithm.PriorityConfig{\n\t\t\t\tMap: mapFunction,\n\t\t\t\tReduce: reduceFunction,\n\t\t\t\tWeight: factory.Weight,\n\t\t\t})\n\t\t}\n\t}\n\tif err := validateSelectedConfigs(configs); err != nil {\n\t\treturn nil, err\n\t}\n\treturn configs, nil\n}\n\n\/\/ validateSelectedConfigs validates the config weights to avoid the overflow.\nfunc validateSelectedConfigs(configs []algorithm.PriorityConfig) error {\n\tvar totalPriority int\n\tfor _, config := range configs {\n\t\t\/\/ Checks totalPriority against MaxTotalPriority to avoid overflow\n\t\tif config.Weight*schedulerapi.MaxPriority > schedulerapi.MaxTotalPriority-totalPriority {\n\t\t\treturn fmt.Errorf(\"Total priority of priority functions has overflown\")\n\t\t}\n\t\ttotalPriority += config.Weight * schedulerapi.MaxPriority\n\t}\n\treturn nil\n}\n\nvar validName = regexp.MustCompile(\"^[a-zA-Z0-9]([-a-zA-Z0-9]*[a-zA-Z0-9])$\")\n\nfunc validateAlgorithmNameOrDie(name string) {\n\tif !validName.MatchString(name) {\n\t\tglog.Fatalf(\"Algorithm name %v does not match the name validation regexp \\\"%v\\\".\", name, validName)\n\t}\n}\n\nfunc validatePredicateOrDie(predicate schedulerapi.PredicatePolicy) {\n\tif predicate.Argument != nil {\n\t\tnumArgs := 0\n\t\tif predicate.Argument.ServiceAffinity != nil {\n\t\t\tnumArgs++\n\t\t}\n\t\tif predicate.Argument.LabelsPresence != nil {\n\t\t\tnumArgs++\n\t\t}\n\t\tif numArgs != 1 {\n\t\t\tglog.Fatalf(\"Exactly 1 predicate argument is required, numArgs: %v, Predicate: %s\", numArgs, predicate.Name)\n\t\t}\n\t}\n}\n\nfunc validatePriorityOrDie(priority schedulerapi.PriorityPolicy) {\n\tif priority.Argument != nil {\n\t\tnumArgs := 0\n\t\tif priority.Argument.ServiceAntiAffinity != nil {\n\t\t\tnumArgs++\n\t\t}\n\t\tif priority.Argument.LabelPreference != nil {\n\t\t\tnumArgs++\n\t\t}\n\t\tif numArgs != 1 {\n\t\t\tglog.Fatalf(\"Exactly 1 priority argument is required, numArgs: %v, Priority: %s\", numArgs, priority.Name)\n\t\t}\n\t}\n}\n\nfunc ListRegisteredFitPredicates() []string {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\n\tnames := []string{}\n\tfor name := range fitPredicateMap {\n\t\tnames = append(names, name)\n\t}\n\treturn names\n}\n\nfunc ListRegisteredPriorityFunctions() []string {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\n\tnames := []string{}\n\tfor name := range priorityFunctionMap {\n\t\tnames = append(names, name)\n\t}\n\treturn names\n}\n\n\/\/ ListAlgorithmProviders is called when listing all available algorithm providers in `kube-scheduler --help`\nfunc ListAlgorithmProviders() string {\n\tvar availableAlgorithmProviders []string\n\tfor name := range algorithmProviderMap {\n\t\tavailableAlgorithmProviders = append(availableAlgorithmProviders, name)\n\t}\n\tsort.Strings(availableAlgorithmProviders)\n\treturn strings.Join(availableAlgorithmProviders, \" | \")\n}\n<commit_msg>Replaced bool map to string set.<commit_after>\/*\nCopyright 2014 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage factory\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/golang\/glog\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n\t\"k8s.io\/kubernetes\/plugin\/pkg\/scheduler\/algorithm\"\n\t\"k8s.io\/kubernetes\/plugin\/pkg\/scheduler\/algorithm\/predicates\"\n\t\"k8s.io\/kubernetes\/plugin\/pkg\/scheduler\/algorithm\/priorities\"\n\tschedulerapi \"k8s.io\/kubernetes\/plugin\/pkg\/scheduler\/api\"\n)\n\n\/\/ PluginFactoryArgs are passed to all plugin factory functions.\ntype PluginFactoryArgs struct {\n\tPodLister algorithm.PodLister\n\tServiceLister algorithm.ServiceLister\n\tControllerLister algorithm.ControllerLister\n\tReplicaSetLister algorithm.ReplicaSetLister\n\tStatefulSetLister algorithm.StatefulSetLister\n\tNodeLister algorithm.NodeLister\n\tNodeInfo predicates.NodeInfo\n\tPVInfo predicates.PersistentVolumeInfo\n\tPVCInfo predicates.PersistentVolumeClaimInfo\n\tHardPodAffinitySymmetricWeight int\n}\n\n\/\/ MetadataProducerFactory produces MetadataProducer from the given args.\ntype MetadataProducerFactory func(PluginFactoryArgs) algorithm.MetadataProducer\n\n\/\/ A FitPredicateFactory produces a FitPredicate from the given args.\ntype FitPredicateFactory func(PluginFactoryArgs) algorithm.FitPredicate\n\n\/\/ DEPRECATED\n\/\/ Use Map-Reduce pattern for priority functions.\n\/\/ A PriorityFunctionFactory produces a PriorityConfig from the given args.\ntype PriorityFunctionFactory func(PluginFactoryArgs) algorithm.PriorityFunction\n\n\/\/ A PriorityFunctionFactory produces map & reduce priority functions\n\/\/ from a given args.\n\/\/ FIXME: Rename to PriorityFunctionFactory.\ntype PriorityFunctionFactory2 func(PluginFactoryArgs) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction)\n\n\/\/ A PriorityConfigFactory produces a PriorityConfig from the given function and weight\ntype PriorityConfigFactory struct {\n\tFunction PriorityFunctionFactory\n\tMapReduceFunction PriorityFunctionFactory2\n\tWeight int\n}\n\nvar (\n\tschedulerFactoryMutex sync.Mutex\n\n\t\/\/ maps that hold registered algorithm types\n\tfitPredicateMap = make(map[string]FitPredicateFactory)\n\tmandatoryFitPredicates = sets.NewString()\n\tpriorityFunctionMap = make(map[string]PriorityConfigFactory)\n\talgorithmProviderMap = make(map[string]AlgorithmProviderConfig)\n\n\t\/\/ Registered metadata producers\n\tpriorityMetadataProducer MetadataProducerFactory\n\tpredicateMetadataProducer MetadataProducerFactory\n\n\t\/\/ get equivalence pod function\n\tgetEquivalencePodFunc algorithm.GetEquivalencePodFunc\n)\n\nconst (\n\tDefaultProvider = \"DefaultProvider\"\n)\n\ntype AlgorithmProviderConfig struct {\n\tFitPredicateKeys sets.String\n\tPriorityFunctionKeys sets.String\n}\n\n\/\/ RegisterFitPredicate registers a fit predicate with the algorithm\n\/\/ registry. Returns the name with which the predicate was registered.\nfunc RegisterFitPredicate(name string, predicate algorithm.FitPredicate) string {\n\treturn RegisterFitPredicateFactory(name, func(PluginFactoryArgs) algorithm.FitPredicate { return predicate })\n}\n\n\/\/ RegisterMandatoryFitPredicate registers a fit predicate with the algorithm registry, the predicate is used by\n\/\/ kubelet, DaemonSet; it is always included in configuration. Returns the name with which the predicate was\n\/\/ registered.\nfunc RegisterMandatoryFitPredicate(name string, predicate algorithm.FitPredicate) string {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\tvalidateAlgorithmNameOrDie(name)\n\tfitPredicateMap[name] = func(PluginFactoryArgs) algorithm.FitPredicate { return predicate }\n\tmandatoryFitPredicates.Insert(name)\n\treturn name\n}\n\n\/\/ RegisterFitPredicateFactory registers a fit predicate factory with the\n\/\/ algorithm registry. Returns the name with which the predicate was registered.\nfunc RegisterFitPredicateFactory(name string, predicateFactory FitPredicateFactory) string {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\tvalidateAlgorithmNameOrDie(name)\n\tfitPredicateMap[name] = predicateFactory\n\treturn name\n}\n\n\/\/ RegisterCustomFitPredicate registers a custom fit predicate with the algorithm registry.\n\/\/ Returns the name, with which the predicate was registered.\nfunc RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy) string {\n\tvar predicateFactory FitPredicateFactory\n\tvar ok bool\n\n\tvalidatePredicateOrDie(policy)\n\n\t\/\/ generate the predicate function, if a custom type is requested\n\tif policy.Argument != nil {\n\t\tif policy.Argument.ServiceAffinity != nil {\n\t\t\tpredicateFactory = func(args PluginFactoryArgs) algorithm.FitPredicate {\n\t\t\t\tpredicate, precomputationFunction := predicates.NewServiceAffinityPredicate(\n\t\t\t\t\targs.PodLister,\n\t\t\t\t\targs.ServiceLister,\n\t\t\t\t\targs.NodeInfo,\n\t\t\t\t\tpolicy.Argument.ServiceAffinity.Labels,\n\t\t\t\t)\n\n\t\t\t\t\/\/ Once we generate the predicate we should also Register the Precomputation\n\t\t\t\tpredicates.RegisterPredicatePrecomputation(policy.Name, precomputationFunction)\n\t\t\t\treturn predicate\n\t\t\t}\n\t\t} else if policy.Argument.LabelsPresence != nil {\n\t\t\tpredicateFactory = func(args PluginFactoryArgs) algorithm.FitPredicate {\n\t\t\t\treturn predicates.NewNodeLabelPredicate(\n\t\t\t\t\tpolicy.Argument.LabelsPresence.Labels,\n\t\t\t\t\tpolicy.Argument.LabelsPresence.Presence,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t} else if predicateFactory, ok = fitPredicateMap[policy.Name]; ok {\n\t\t\/\/ checking to see if a pre-defined predicate is requested\n\t\tglog.V(2).Infof(\"Predicate type %s already registered, reusing.\", policy.Name)\n\t\treturn policy.Name\n\t}\n\n\tif predicateFactory == nil {\n\t\tglog.Fatalf(\"Invalid configuration: Predicate type not found for %s\", policy.Name)\n\t}\n\n\treturn RegisterFitPredicateFactory(policy.Name, predicateFactory)\n}\n\n\/\/ IsFitPredicateRegistered is useful for testing providers.\nfunc IsFitPredicateRegistered(name string) bool {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\t_, ok := fitPredicateMap[name]\n\treturn ok\n}\n\nfunc RegisterPriorityMetadataProducerFactory(factory MetadataProducerFactory) {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\tpriorityMetadataProducer = factory\n}\n\nfunc RegisterPredicateMetadataProducerFactory(factory MetadataProducerFactory) {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\tpredicateMetadataProducer = factory\n}\n\n\/\/ DEPRECATED\n\/\/ Use Map-Reduce pattern for priority functions.\n\/\/ Registers a priority function with the algorithm registry. Returns the name,\n\/\/ with which the function was registered.\nfunc RegisterPriorityFunction(name string, function algorithm.PriorityFunction, weight int) string {\n\treturn RegisterPriorityConfigFactory(name, PriorityConfigFactory{\n\t\tFunction: func(PluginFactoryArgs) algorithm.PriorityFunction {\n\t\t\treturn function\n\t\t},\n\t\tWeight: weight,\n\t})\n}\n\n\/\/ RegisterPriorityFunction2 registers a priority function with the algorithm registry. Returns the name,\n\/\/ with which the function was registered.\n\/\/ FIXME: Rename to PriorityFunctionFactory.\nfunc RegisterPriorityFunction2(\n\tname string,\n\tmapFunction algorithm.PriorityMapFunction,\n\treduceFunction algorithm.PriorityReduceFunction,\n\tweight int) string {\n\treturn RegisterPriorityConfigFactory(name, PriorityConfigFactory{\n\t\tMapReduceFunction: func(PluginFactoryArgs) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) {\n\t\t\treturn mapFunction, reduceFunction\n\t\t},\n\t\tWeight: weight,\n\t})\n}\n\nfunc RegisterPriorityConfigFactory(name string, pcf PriorityConfigFactory) string {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\tvalidateAlgorithmNameOrDie(name)\n\tpriorityFunctionMap[name] = pcf\n\treturn name\n}\n\n\/\/ RegisterCustomPriorityFunction registers a custom priority function with the algorithm registry.\n\/\/ Returns the name, with which the priority function was registered.\nfunc RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy) string {\n\tvar pcf *PriorityConfigFactory\n\n\tvalidatePriorityOrDie(policy)\n\n\t\/\/ generate the priority function, if a custom priority is requested\n\tif policy.Argument != nil {\n\t\tif policy.Argument.ServiceAntiAffinity != nil {\n\t\t\tpcf = &PriorityConfigFactory{\n\t\t\t\tFunction: func(args PluginFactoryArgs) algorithm.PriorityFunction {\n\t\t\t\t\treturn priorities.NewServiceAntiAffinityPriority(\n\t\t\t\t\t\targs.PodLister,\n\t\t\t\t\t\targs.ServiceLister,\n\t\t\t\t\t\tpolicy.Argument.ServiceAntiAffinity.Label,\n\t\t\t\t\t)\n\t\t\t\t},\n\t\t\t\tWeight: policy.Weight,\n\t\t\t}\n\t\t} else if policy.Argument.LabelPreference != nil {\n\t\t\tpcf = &PriorityConfigFactory{\n\t\t\t\tMapReduceFunction: func(args PluginFactoryArgs) (algorithm.PriorityMapFunction, algorithm.PriorityReduceFunction) {\n\t\t\t\t\treturn priorities.NewNodeLabelPriority(\n\t\t\t\t\t\tpolicy.Argument.LabelPreference.Label,\n\t\t\t\t\t\tpolicy.Argument.LabelPreference.Presence,\n\t\t\t\t\t)\n\t\t\t\t},\n\t\t\t\tWeight: policy.Weight,\n\t\t\t}\n\t\t}\n\t} else if existingPcf, ok := priorityFunctionMap[policy.Name]; ok {\n\t\tglog.V(2).Infof(\"Priority type %s already registered, reusing.\", policy.Name)\n\t\t\/\/ set\/update the weight based on the policy\n\t\tpcf = &PriorityConfigFactory{\n\t\t\tFunction: existingPcf.Function,\n\t\t\tMapReduceFunction: existingPcf.MapReduceFunction,\n\t\t\tWeight: policy.Weight,\n\t\t}\n\t}\n\n\tif pcf == nil {\n\t\tglog.Fatalf(\"Invalid configuration: Priority type not found for %s\", policy.Name)\n\t}\n\n\treturn RegisterPriorityConfigFactory(policy.Name, *pcf)\n}\n\nfunc RegisterGetEquivalencePodFunction(equivalenceFunc algorithm.GetEquivalencePodFunc) {\n\tgetEquivalencePodFunc = equivalenceFunc\n}\n\n\/\/ IsPriorityFunctionRegistered is useful for testing providers.\nfunc IsPriorityFunctionRegistered(name string) bool {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\t_, ok := priorityFunctionMap[name]\n\treturn ok\n}\n\n\/\/ RegisterAlgorithmProvider registers a new algorithm provider with the algorithm registry. This should\n\/\/ be called from the init function in a provider plugin.\nfunc RegisterAlgorithmProvider(name string, predicateKeys, priorityKeys sets.String) string {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\tvalidateAlgorithmNameOrDie(name)\n\talgorithmProviderMap[name] = AlgorithmProviderConfig{\n\t\tFitPredicateKeys: predicateKeys,\n\t\tPriorityFunctionKeys: priorityKeys,\n\t}\n\treturn name\n}\n\n\/\/ GetAlgorithmProvider should not be used to modify providers. It is publicly visible for testing.\nfunc GetAlgorithmProvider(name string) (*AlgorithmProviderConfig, error) {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\n\tprovider, ok := algorithmProviderMap[name]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"plugin %q has not been registered\", name)\n\t}\n\n\treturn &provider, nil\n}\n\nfunc getFitPredicateFunctions(names sets.String, args PluginFactoryArgs) (map[string]algorithm.FitPredicate, error) {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\n\tpredicates := map[string]algorithm.FitPredicate{}\n\tfor _, name := range names.List() {\n\t\tfactory, ok := fitPredicateMap[name]\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"Invalid predicate name %q specified - no corresponding function found\", name)\n\t\t}\n\t\tpredicates[name] = factory(args)\n\t}\n\n\t\/\/ Always include mandatory fit predicates.\n\tfor _, name := range mandatoryFitPredicates.List() {\n\t\tif factory, found := fitPredicateMap[name]; found {\n\t\t\tpredicates[name] = factory(args)\n\t\t}\n\t}\n\n\treturn predicates, nil\n}\n\nfunc getPriorityMetadataProducer(args PluginFactoryArgs) (algorithm.MetadataProducer, error) {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\n\tif priorityMetadataProducer == nil {\n\t\treturn algorithm.EmptyMetadataProducer, nil\n\t}\n\treturn priorityMetadataProducer(args), nil\n}\n\nfunc getPredicateMetadataProducer(args PluginFactoryArgs) (algorithm.MetadataProducer, error) {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\n\tif predicateMetadataProducer == nil {\n\t\treturn algorithm.EmptyMetadataProducer, nil\n\t}\n\treturn predicateMetadataProducer(args), nil\n}\n\nfunc getPriorityFunctionConfigs(names sets.String, args PluginFactoryArgs) ([]algorithm.PriorityConfig, error) {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\n\tconfigs := []algorithm.PriorityConfig{}\n\tfor _, name := range names.List() {\n\t\tfactory, ok := priorityFunctionMap[name]\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"Invalid priority name %s specified - no corresponding function found\", name)\n\t\t}\n\t\tif factory.Function != nil {\n\t\t\tconfigs = append(configs, algorithm.PriorityConfig{\n\t\t\t\tFunction: factory.Function(args),\n\t\t\t\tWeight: factory.Weight,\n\t\t\t})\n\t\t} else {\n\t\t\tmapFunction, reduceFunction := factory.MapReduceFunction(args)\n\t\t\tconfigs = append(configs, algorithm.PriorityConfig{\n\t\t\t\tMap: mapFunction,\n\t\t\t\tReduce: reduceFunction,\n\t\t\t\tWeight: factory.Weight,\n\t\t\t})\n\t\t}\n\t}\n\tif err := validateSelectedConfigs(configs); err != nil {\n\t\treturn nil, err\n\t}\n\treturn configs, nil\n}\n\n\/\/ validateSelectedConfigs validates the config weights to avoid the overflow.\nfunc validateSelectedConfigs(configs []algorithm.PriorityConfig) error {\n\tvar totalPriority int\n\tfor _, config := range configs {\n\t\t\/\/ Checks totalPriority against MaxTotalPriority to avoid overflow\n\t\tif config.Weight*schedulerapi.MaxPriority > schedulerapi.MaxTotalPriority-totalPriority {\n\t\t\treturn fmt.Errorf(\"Total priority of priority functions has overflown\")\n\t\t}\n\t\ttotalPriority += config.Weight * schedulerapi.MaxPriority\n\t}\n\treturn nil\n}\n\nvar validName = regexp.MustCompile(\"^[a-zA-Z0-9]([-a-zA-Z0-9]*[a-zA-Z0-9])$\")\n\nfunc validateAlgorithmNameOrDie(name string) {\n\tif !validName.MatchString(name) {\n\t\tglog.Fatalf(\"Algorithm name %v does not match the name validation regexp \\\"%v\\\".\", name, validName)\n\t}\n}\n\nfunc validatePredicateOrDie(predicate schedulerapi.PredicatePolicy) {\n\tif predicate.Argument != nil {\n\t\tnumArgs := 0\n\t\tif predicate.Argument.ServiceAffinity != nil {\n\t\t\tnumArgs++\n\t\t}\n\t\tif predicate.Argument.LabelsPresence != nil {\n\t\t\tnumArgs++\n\t\t}\n\t\tif numArgs != 1 {\n\t\t\tglog.Fatalf(\"Exactly 1 predicate argument is required, numArgs: %v, Predicate: %s\", numArgs, predicate.Name)\n\t\t}\n\t}\n}\n\nfunc validatePriorityOrDie(priority schedulerapi.PriorityPolicy) {\n\tif priority.Argument != nil {\n\t\tnumArgs := 0\n\t\tif priority.Argument.ServiceAntiAffinity != nil {\n\t\t\tnumArgs++\n\t\t}\n\t\tif priority.Argument.LabelPreference != nil {\n\t\t\tnumArgs++\n\t\t}\n\t\tif numArgs != 1 {\n\t\t\tglog.Fatalf(\"Exactly 1 priority argument is required, numArgs: %v, Priority: %s\", numArgs, priority.Name)\n\t\t}\n\t}\n}\n\nfunc ListRegisteredFitPredicates() []string {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\n\tnames := []string{}\n\tfor name := range fitPredicateMap {\n\t\tnames = append(names, name)\n\t}\n\treturn names\n}\n\nfunc ListRegisteredPriorityFunctions() []string {\n\tschedulerFactoryMutex.Lock()\n\tdefer schedulerFactoryMutex.Unlock()\n\n\tnames := []string{}\n\tfor name := range priorityFunctionMap {\n\t\tnames = append(names, name)\n\t}\n\treturn names\n}\n\n\/\/ ListAlgorithmProviders is called when listing all available algorithm providers in `kube-scheduler --help`\nfunc ListAlgorithmProviders() string {\n\tvar availableAlgorithmProviders []string\n\tfor name := range algorithmProviderMap {\n\t\tavailableAlgorithmProviders = append(availableAlgorithmProviders, name)\n\t}\n\tsort.Strings(availableAlgorithmProviders)\n\treturn strings.Join(availableAlgorithmProviders, \" | \")\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>remove duplicated update of MinikubeISO<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2016 Abcum Ltd\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage orbit\n\nimport (\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nfunc addModule(name string, item module) {\n\tmodules[name] = item\n}\n\nfunc addSource(name string, file string) {\n\n\tif strings.Contains(file, \"*\") {\n\n\t\tfiles, _ := filepath.Glob(file)\n\n\t\tfor i, file := range files {\n\n\t\t\tvers := filepath.Base(file)\n\n\t\t\tdata, err := ioutil.ReadFile(file)\n\t\t\tif err != nil {\n\t\t\t\tmodules[name+\"@\"+vers] = null()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tmodules[name+\"@\"+vers] = exec(data, file)\n\n\t\t\tif i == len(files)-1 {\n\t\t\t\tmodules[name] = modules[name+\"@\"+vers]\n\t\t\t\tmodules[name+\"@latest\"] = modules[name+\"@\"+vers]\n\t\t\t}\n\n\t\t}\n\n\t} else {\n\n\t\tdata, err := ioutil.ReadFile(file)\n\t\tif err != nil {\n\t\t\tmodules[name] = null()\n\t\t\treturn\n\t\t}\n\t\tmodules[name] = exec(data, file)\n\n\t}\n\n}\n<commit_msg>Don't include file extensions in module names<commit_after>\/\/ Copyright © 2016 Abcum Ltd\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage orbit\n\nimport (\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nfunc addModule(name string, item module) {\n\tmodules[name] = item\n}\n\nfunc addSource(name string, file string) {\n\n\tif strings.Contains(file, \"*\") {\n\n\t\tfiles, _ := filepath.Glob(file)\n\n\t\tfor i, file := range files {\n\n\t\t\textn := filepath.Ext(file)\n\t\t\tfull := filepath.Base(file)\n\t\t\tvers := full[0 : len(full)-len(extn)]\n\n\t\t\tdata, err := ioutil.ReadFile(file)\n\t\t\tif err != nil {\n\t\t\t\tmodules[name+\"@\"+vers] = null()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tmodules[name+\"@\"+vers] = exec(data, file)\n\n\t\t\tif i == len(files)-1 {\n\t\t\t\tmodules[name] = modules[name+\"@\"+vers]\n\t\t\t\tmodules[name+\"@latest\"] = modules[name+\"@\"+vers]\n\t\t\t}\n\n\t\t}\n\n\t} else {\n\n\t\tdata, err := ioutil.ReadFile(file)\n\t\tif err != nil {\n\t\t\tmodules[name] = null()\n\t\t\treturn\n\t\t}\n\t\tmodules[name] = exec(data, file)\n\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/* Package go140 implements interaction with the Twitter API *\/\npackage go140\n\nimport (\n\t\"github.com\/alloy-d\/goauth\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"json\"\n\t\"os\"\n)\n\ntype API struct {\n\tRoot string\n\toauth.OAuth\n}\n\ntype Status struct {\n\tDate string \"created_at\"\n\tText string\n\tLocation string \"place\"\n}\n\ntype User struct {\n\tScreenName string \"screen_name\"\n\tName string\n\tLocation string\n\tDescription string\n\tStatus *Status\n}\n\nfunc (api *API) UserByID(id uint) (*User, os.Error) {\n\turl := api.Root + \"\/1\/users\/show.json\"\n\tparams := map[string]string{\n\t\t\"id\": fmt.Sprintf(\"%d\", id),\n\t}\n\n\tresp, err := api.Get(url, params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar user User\n\terr = json.Unmarshal(data, &user)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &user, nil\n}\n\nfunc (api *API) Status() (*Status, os.Error) {\n\tuser, err := api.UserByID(api.UserID())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn user.Status, nil\n}\n\nfunc (api *API) Update(s string) (string, os.Error) {\n\tif len(s) > 140 {\n\t\treturn \"\", tweetError{\"Tweet too long!\"}\n\t}\n\n\turl := api.Root + \"\/1\/statuses\/update.json\"\n\tparams := map[string]string{\"status\": s}\n\n\t_, err := api.Post(url, params)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn \"<not yet implemented>\", nil\n}\n<commit_msg>Allow unauthenticated requests; allow getting users by screen name.<commit_after>\/* Package go140 implements interaction with the Twitter API *\/\npackage go140\n\nimport (\n\t\"github.com\/alloy-d\/goauth\"\n\t\"fmt\"\n\t\"http\"\n\t\"io\/ioutil\"\n\t\"json\"\n\t\"os\"\n\t\"url\"\n)\n\ntype API struct {\n\tRoot string\n\toauth.OAuth\n}\n\n\/\/ TODO: this is SILLY.\nfunc addQueryParams(resource string, params map[string]string) string {\n\tstr := resource\n\n\tfirst := true\n\tfor k, v := range params {\n\t\tif first {\n\t\t\tstr += \"?\"\n\t\t\tfirst = false\n\t\t} else {\n\t\t\tstr += \"&\"\n\t\t}\n\n\t\trawv, err := url.QueryUnescape(v)\n\t\tif err == nil {\n\t\t\tv = rawv\n\t\t}\n\t\tstr += k + \"=\" + url.QueryEscape(v)\n\t}\n\treturn str\n}\n\nfunc (api *API) Get(resource string, params map[string]string) (*http.Response, os.Error) {\n\tif api.Authorized() {\n\t\treturn api.OAuth.Get(resource, params)\n\t}\n\n\tfullURL := addQueryParams(resource, params)\n\treturn http.Get(fullURL)\n}\n\ntype Status struct {\n\tDate string \"created_at\"\n\tText string\n\tLocation string \"place\"\n}\n\ntype User struct {\n\tScreenName string \"screen_name\"\n\tName string\n\tLocation string\n\tDescription string\n\tStatus *Status\n}\n\nfunc (api *API) UserByID(id uint) (*User, os.Error) {\n\treturn api.user(map[string]string{\n\t\t\"id\": fmt.Sprintf(\"%d\", id),\n\t})\n}\n\nfunc (api *API) User(screen_name string) (*User, os.Error) {\n\treturn api.user(map[string]string{\n\t\t\"screen_name\": screen_name,\n\t})\n}\n\nfunc (api *API) user(params map[string]string) (*User, os.Error) {\n\turl := api.Root + \"\/1\/users\/show.json\"\n\n\tresp, err := api.Get(url, params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar user User\n\terr = json.Unmarshal(data, &user)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &user, nil\n}\n\nfunc (api *API) Status() (*Status, os.Error) {\n\tuser, err := api.UserByID(api.UserID())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn user.Status, nil\n}\n\nfunc (api *API) Update(s string) (string, os.Error) {\n\tif len(s) > 140 {\n\t\treturn \"\", tweetError{\"Tweet too long!\"}\n\t}\n\n\turl := api.Root + \"\/1\/statuses\/update.json\"\n\tparams := map[string]string{\"status\": s}\n\n\t_, err := api.Post(url, params)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn \"<not yet implemented>\", nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"strings\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/blevesearch\/bleve\"\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/ DefaultResponse Error response structure\ntype DefaultResponse struct {\n\tMessage string `json:\"message\"`\n}\n\n\/\/ SeachResultItem is a response structure for search result item\ntype SeachResultItem struct {\n\tID string `json:\"id\"`\n\tScore float64 `json:\"score\"`\n\tFields interface{} `json:\"fields\"`\n}\n\n\/\/ SearchResultsResponse is a response structure for final search results\ntype SearchResultsResponse struct {\n\tTotalResultsPages uint64 `json:\"total_results_pages\"`\n\tMoreResults bool `json:\"more_results\"`\n\tPage int `json:\"page\"`\n\tTime string `json:\"took\"`\n\tResults []SeachResultItem `json:\"results\"`\n}\n\n\/\/ Adapter type\ntype Adapter func(http.Handler) http.Handler\n\n\/\/ Adapt wraps http handlers with middlewares\nfunc Adapt(h http.Handler, adapters ...Adapter) http.Handler {\n\tfor _, adapter := range adapters {\n\t\th = adapter(h)\n\t}\n\n\treturn h\n}\n\n\/\/ Log all requests\nfunc HttpLogger() Adapter {\n\treturn func(h http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tdefer log.Infof(\"%s %s\", r.Method, r.RequestURI)\n\t\t\th.ServeHTTP(w, r)\n\t\t})\n\t}\n}\n\n\/\/ Write response as a JSON formt\nfunc writeJSONResponse(w http.ResponseWriter, i interface{}, status int) {\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.WriteHeader(status)\n\n\tif err := json.NewEncoder(w).Encode(i); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}\n\n\/\/ Validate search query\nfunc sanatizeSearchQuery(query string) (string, error) {\n\tif len(query) < 3 {\n\t\treturn query, errors.New(\"Search query should be of minimum 3 characters\")\n\t}\n\n\treturn strings.ToLower(query), nil\n}\n\n\/\/ Index page handler\nfunc indexHandler(w http.ResponseWriter, r *http.Request) {\n\twriteJSONResponse(w, DefaultResponse{\"Bankr API v3\"}, http.StatusOK)\n}\n\nfunc getGeocodeAddressHandler(w http.ResponseWriter, r *http.Request) {\n\tlatitude := r.URL.Query().Get(\"latitude\")\n\tlongitude := r.URL.Query().Get(\"longitude\")\n\tgeocodeApiKey := viper.GetString(\"geocode_api_key\")\n\tgeocodeAPIURI := viper.GetString(\"geocode_api_uri\")\n\n\tclient := &http.Client{}\n\trequest, err := http.NewRequest(\"GET\", geocodeAPIURI, nil)\n\n\tif err != nil {\n\t\tlog.Errorf(\"Error while getting location: %v\", err)\n\t\twriteJSONResponse(w, DefaultResponse{\"Error while getting location\"}, http.StatusBadGateway)\n\t}\n\n\t\/\/ Add query params to the request\n\tq := request.URL.Query()\n\tq.Add(\"latlng\", latitude+\",\"+longitude)\n\tq.Add(\"key\", geocodeApiKey)\n\trequest.URL.RawQuery = q.Encode()\n\n\tresp, err := client.Do(request)\n\tresponseData, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\tlog.Errorf(\"Error while parsing location response: %v\", err)\n\t\twriteJSONResponse(w, DefaultResponse{\"Error while getting location\"}, http.StatusBadGateway)\n\t}\n\n\tvar response map[string]interface{}\n\terr = json.Unmarshal(responseData, &response)\n\n\tif err != nil {\n\t\tlog.Errorf(\"Error while parsing unmarshaling response: %v\", err)\n\t\twriteJSONResponse(w, DefaultResponse{\"Error while getting location\"}, http.StatusBadGateway)\n\t}\n\n\twriteJSONResponse(w, response, http.StatusOK)\n}\n\n\/\/ Query search handler\nfunc searchHandler(w http.ResponseWriter, r *http.Request) {\n\tquery := r.URL.Query().Get(\"q\")\n\tpage := r.URL.Query().Get(\"p\")\n\n\tvar (\n\t\terrorRespose DefaultResponse\n\t\tsearchResults *bleve.SearchResult\n\t\tsearchResultItems []SeachResultItem\n\t\tresultsSize = 10\n\t\tpageNumber = 1\n\t\tmoreResultsAvailable = false\n\t)\n\n\t\/\/ Validate search query\n\tquery, err := sanatizeSearchQuery(query)\n\tif err != nil {\n\t\terrorRespose.Message = err.Error()\n\t\twriteJSONResponse(w, errorRespose, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t\/\/ Validate page number\n\tif page == \"\" {\n\t\tpageNumber = 1\n\t} else {\n\t\tpageNumber, err = strconv.Atoi(page)\n\t\tif err != nil {\n\t\t\terrorRespose.Message = \"Invalid page number.\"\n\t\t\twriteJSONResponse(w, errorRespose, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Search for give query and result size (startIndex + size). Start index is (pageNum - 1)\n\tsearchResults, err = querySearch(query, resultsSize, pageNumber-1)\n\tif err != nil {\n\t\tlog.Errorf(\"Error while searching query: %v\", err)\n\t\terrorRespose.Message = \"Something went wrong. Please report to admin.\"\n\t\twriteJSONResponse(w, errorRespose, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t\/\/ Create list for search items response\n\tfor _, result := range searchResults.Hits {\n\t\tsearchResultItems = append(searchResultItems, SeachResultItem{\n\t\t\tID: result.ID,\n\t\t\tScore: result.Score,\n\t\t\tFields: result.Fields,\n\t\t})\n\t}\n\n\t\/\/ Check if more available\n\tif searchResults.Total > uint64(pageNumber+resultsSize) {\n\t\tmoreResultsAvailable = true\n\t}\n\n\t\/\/ Final search response\n\tsearchResultsResponse := SearchResultsResponse{\n\t\tTotalResultsPages: searchResults.Total - 1,\n\t\tMoreResults: moreResultsAvailable,\n\t\tPage: pageNumber,\n\t\tTime: searchResults.Took.String(),\n\t\tResults: searchResultItems,\n\t}\n\n\t\/\/ Write the output\n\twriteJSONResponse(w, searchResultsResponse, http.StatusOK)\n}\n\nfunc initServer(address string) {\n\t\/\/ Server static files\n\thttp.Handle(\"\/\", http.FileServer(http.Dir(\".\/frontend\/dist\/\")))\n\n\t\/\/ API handlers\n\thttp.Handle(\"\/api\", Adapt(http.HandlerFunc(indexHandler)))\n\thttp.Handle(\"\/api\/search\", Adapt(http.HandlerFunc(searchHandler), HttpLogger()))\n\thttp.Handle(\"\/api\/location\", Adapt(http.HandlerFunc(getGeocodeAddressHandler), HttpLogger()))\n\n\t\/\/ Start the server\n\tlog.Infof(\"Starting server: http:\/\/%s\", address)\n\tif err := http.ListenAndServe(address, nil); err != nil {\n\t\tlog.Error(\"Error starting the server: \", err)\n\t}\n}\n<commit_msg>Fixed typo in a struct<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"strings\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/blevesearch\/bleve\"\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/ DefaultResponse Error response structure\ntype DefaultResponse struct {\n\tMessage string `json:\"message\"`\n}\n\n\/\/ SeachResultItem is a response structure for search result item\ntype SeachResultItem struct {\n\tID string `json:\"id\"`\n\tScore float64 `json:\"score\"`\n\tFields interface{} `json:\"fields\"`\n}\n\n\/\/ SearchResultsResponse is a response structure for final search results\ntype SearchResultsResponse struct {\n\tTotalResultsPages uint64 `json:\"total_results_pages\"`\n\tMoreResults bool `json:\"more_results\"`\n\tPage int `json:\"page\"`\n\tTime string `json:\"took\"`\n\tResults []SeachResultItem `json:\"results\"`\n}\n\n\/\/ Adapter type\ntype Adapter func(http.Handler) http.Handler\n\n\/\/ Adapt wraps http handlers with middlewares\nfunc Adapt(h http.Handler, adapters ...Adapter) http.Handler {\n\tfor _, adapter := range adapters {\n\t\th = adapter(h)\n\t}\n\n\treturn h\n}\n\n\/\/ Log all requests\nfunc HttpLogger() Adapter {\n\treturn func(h http.Handler) http.Handler {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tdefer log.Infof(\"%s %s\", r.Method, r.RequestURI)\n\t\t\th.ServeHTTP(w, r)\n\t\t})\n\t}\n}\n\n\/\/ Write response as a JSON formt\nfunc writeJSONResponse(w http.ResponseWriter, i interface{}, status int) {\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.WriteHeader(status)\n\n\tif err := json.NewEncoder(w).Encode(i); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}\n\n\/\/ Validate search query\nfunc sanatizeSearchQuery(query string) (string, error) {\n\tif len(query) < 3 {\n\t\treturn query, errors.New(\"Search query should be of minimum 3 characters\")\n\t}\n\n\treturn strings.ToLower(query), nil\n}\n\n\/\/ Index page handler\nfunc indexHandler(w http.ResponseWriter, r *http.Request) {\n\twriteJSONResponse(w, DefaultResponse{\"Bankr API v3\"}, http.StatusOK)\n}\n\nfunc getGeocodeAddressHandler(w http.ResponseWriter, r *http.Request) {\n\tlatitude := r.URL.Query().Get(\"latitude\")\n\tlongitude := r.URL.Query().Get(\"longitude\")\n\tgeocodeApiKey := viper.GetString(\"geocode_api_key\")\n\tgeocodeAPIURI := viper.GetString(\"geocode_api_uri\")\n\n\tclient := &http.Client{}\n\trequest, err := http.NewRequest(\"GET\", geocodeAPIURI, nil)\n\n\tif err != nil {\n\t\tlog.Errorf(\"Error while getting location: %v\", err)\n\t\twriteJSONResponse(w, DefaultResponse{\"Error while getting location\"}, http.StatusBadGateway)\n\t}\n\n\t\/\/ Add query params to the request\n\tq := request.URL.Query()\n\tq.Add(\"latlng\", latitude+\",\"+longitude)\n\tq.Add(\"key\", geocodeApiKey)\n\trequest.URL.RawQuery = q.Encode()\n\n\tresp, err := client.Do(request)\n\tresponseData, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\tlog.Errorf(\"Error while parsing location response: %v\", err)\n\t\twriteJSONResponse(w, DefaultResponse{\"Error while getting location\"}, http.StatusBadGateway)\n\t}\n\n\tvar response map[string]interface{}\n\terr = json.Unmarshal(responseData, &response)\n\n\tif err != nil {\n\t\tlog.Errorf(\"Error while parsing unmarshaling response: %v\", err)\n\t\twriteJSONResponse(w, DefaultResponse{\"Error while getting location\"}, http.StatusBadGateway)\n\t}\n\n\twriteJSONResponse(w, response, http.StatusOK)\n}\n\n\/\/ Query search handler\nfunc searchHandler(w http.ResponseWriter, r *http.Request) {\n\tquery := r.URL.Query().Get(\"q\")\n\tpage := r.URL.Query().Get(\"p\")\n\n\tvar (\n\t\terrorResponse DefaultResponse\n\t\tsearchResults *bleve.SearchResult\n\t\tsearchResultItems []SeachResultItem\n\t\tresultsSize = 10\n\t\tpageNumber = 1\n\t\tmoreResultsAvailable = false\n\t)\n\n\t\/\/ Validate search query\n\tquery, err := sanatizeSearchQuery(query)\n\tif err != nil {\n\t\terrorResponse.Message = err.Error()\n\t\twriteJSONResponse(w, errorResponse, http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t\/\/ Validate page number\n\tif page == \"\" {\n\t\tpageNumber = 1\n\t} else {\n\t\tpageNumber, err = strconv.Atoi(page)\n\t\tif err != nil {\n\t\t\terrorResponse.Message = \"Invalid page number.\"\n\t\t\twriteJSONResponse(w, errorResponse, http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Search for give query and result size (startIndex + size). Start index is (pageNum - 1)\n\tsearchResults, err = querySearch(query, resultsSize, pageNumber-1)\n\tif err != nil {\n\t\tlog.Errorf(\"Error while searching query: %v\", err)\n\t\terrorResponse.Message = \"Something went wrong. Please report to admin.\"\n\t\twriteJSONResponse(w, errorResponse, http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t\/\/ Create list for search items response\n\tfor _, result := range searchResults.Hits {\n\t\tsearchResultItems = append(searchResultItems, SeachResultItem{\n\t\t\tID: result.ID,\n\t\t\tScore: result.Score,\n\t\t\tFields: result.Fields,\n\t\t})\n\t}\n\n\t\/\/ Check if more available\n\tif searchResults.Total > uint64(pageNumber+resultsSize) {\n\t\tmoreResultsAvailable = true\n\t}\n\n\t\/\/ Final search response\n\tsearchResultsResponse := SearchResultsResponse{\n\t\tTotalResultsPages: searchResults.Total - 1,\n\t\tMoreResults: moreResultsAvailable,\n\t\tPage: pageNumber,\n\t\tTime: searchResults.Took.String(),\n\t\tResults: searchResultItems,\n\t}\n\n\tlog.Infof(\"Searched for term q=%v - %v results generated in %v nanoseconds\", query, searchResults.Total, searchResults.Took.Nanoseconds())\n\n\t\/\/ Write the output\n\twriteJSONResponse(w, searchResultsResponse, http.StatusOK)\n}\n\nfunc initServer(address string) {\n\t\/\/ Server static files\n\thttp.Handle(\"\/\", http.FileServer(http.Dir(\".\/frontend\/dist\/\")))\n\n\t\/\/ API handlers\n\thttp.Handle(\"\/api\", Adapt(http.HandlerFunc(indexHandler)))\n\thttp.Handle(\"\/api\/search\", Adapt(http.HandlerFunc(searchHandler), HttpLogger()))\n\thttp.Handle(\"\/api\/location\", Adapt(http.HandlerFunc(getGeocodeAddressHandler), HttpLogger()))\n\n\t\/\/ Start the server\n\tlog.Infof(\"Starting server: http:\/\/%s\", address)\n\tif err := http.ListenAndServe(address, nil); err != nil {\n\t\tlog.Error(\"Error starting the server: \", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package stockfighter provides a simple wrapper for the Stockfighter API:\n\/\/\n\/\/ https:\/\/www.stockfighter.io\/\n\/\/\n\/\/ https:\/\/starfighter.readme.io\/\npackage stockfighter\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\n\t\"github.com\/gorilla\/websocket\"\n)\n\nfunc apiUrl(path string, args ...interface{}) string {\n\treturn fmt.Sprintf(\"https:\/\/api.stockfighter.io\/ob\/api\/\"+path, args...)\n}\n\nfunc gmUrl(path string, args ...interface{}) string {\n\treturn fmt.Sprintf(\"https:\/\/api.stockfighter.io\/gm\/\"+path, args...)\n}\n\nfunc wsUrl(path string, args ...interface{}) string {\n\treturn fmt.Sprintf(\"wss:\/\/api.stockfighter.io\/ob\/api\/ws\/\"+path, args...)\n}\n\ntype apiCall interface {\n\tErr() error\n}\n\ntype response struct {\n\tOk bool\n\tError string\n}\n\nfunc (r response) Err() error {\n\tif len(r.Error) > 0 {\n\t\treturn fmt.Errorf(r.Error)\n\t}\n\treturn nil\n}\n\ntype venueResponse struct {\n\tresponse\n\tVenue string\n}\n\ntype stocksResponse struct {\n\tresponse\n\tSymbols []Symbol\n}\n\ntype orderBookResponse struct {\n\tresponse\n\tOrderBook\n}\n\ntype quoteResponse struct {\n\tresponse\n\tQuote\n}\n\ntype orderResponse struct {\n\tresponse\n\tOrderState\n}\n\ntype bulkOrderResponse struct {\n\tresponse\n\tVenue string\n\tOrders []OrderState\n}\n\ntype quoteMessage struct {\n\tOk bool\n\tQuote Quote\n}\n\ntype executionMessage struct {\n\tOk bool\n\tExecution\n}\n\ntype gameResponse struct {\n\tresponse\n\tGame\n}\n\ntype gameStateResponse struct {\n\tresponse\n\tGameState\n}\n\ntype Stockfighter struct {\n\tapiKey string\n\tdebug bool\n}\n\n\/\/ Create new Stockfighter API instance.\n\/\/ If debug is true, log all HTTP requests and responses.\nfunc NewStockfighter(apiKey string, debug bool) *Stockfighter {\n\treturn &Stockfighter{\n\t\tapiKey: apiKey,\n\t\tdebug: debug,\n\t}\n}\n\n\/\/ Check the API Is Up. If venue is a non-empty string, then check that venue.\n\/\/ Returns nil if ok, otherwise the error indicates the problem.\nfunc (sf *Stockfighter) Heartbeat(venue string) error {\n\tvar resp response\n\turl := apiUrl(\"heartbeat\")\n\tif len(venue) > 0 {\n\t\turl = apiUrl(\"venues\/%s\/heartbeat\", venue)\n\t}\n\treturn sf.do(\"GET\", url, nil, &resp)\n}\n\n\/\/ Get the stocks available for trading on a venue.\nfunc (sf *Stockfighter) Stocks(venue string) ([]Symbol, error) {\n\tvar resp stocksResponse\n\turl := apiUrl(\"venues\/%s\/stocks\", venue)\n\tif err := sf.do(\"GET\", url, nil, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.Symbols, nil\n}\n\n\/\/ Get the orderbook for a particular stock.\nfunc (sf *Stockfighter) OrderBook(venue, stock string) (*OrderBook, error) {\n\tvar resp orderBookResponse\n\turl := apiUrl(\"venues\/%s\/stocks\/%s\", venue, stock)\n\tif err := sf.do(\"GET\", url, nil, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp.OrderBook, nil\n}\n\n\/\/ Get a quick look at the most recent trade information for a stock.\nfunc (sf *Stockfighter) Quote(venue, stock string) (*Quote, error) {\n\tvar resp quoteResponse\n\turl := apiUrl(\"venues\/%s\/stocks\/%s\/quote\", venue, stock)\n\tif err := sf.do(\"GET\", url, nil, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp.Quote, nil\n}\n\n\/\/ Place an order\nfunc (sf *Stockfighter) Place(order *Order) (*OrderState, error) {\n\tbody, err := encodeJson(order)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar resp orderResponse\n\turl := apiUrl(\"venues\/%s\/stocks\/%s\/orders\", order.Venue, order.Stock)\n\tif err := sf.do(\"POST\", url, body, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp.OrderState, nil\n}\n\n\/\/ Get the status for an existing order.\nfunc (sf *Stockfighter) Status(venue, stock string, id uint64) (*OrderState, error) {\n\tvar resp orderResponse\n\turl := apiUrl(\"venues\/%s\/stocks\/%s\/orders\/%d\", venue, stock, id)\n\tif err := sf.do(\"GET\", url, nil, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp.OrderState, nil\n}\n\n\/\/ Get the statuses for all an account's orders of a stock on a venue.\n\/\/ If stock is a non-empty string, only statuses for that stock are returned\nfunc (sf *Stockfighter) StockStatus(account, venue, stock string) ([]OrderState, error) {\n\turl := apiUrl(\"venues\/%s\/accounts\/%s\/orders\", venue, account)\n\tif len(stock) > 0 {\n\t\turl = apiUrl(\"venues\/%s\/accounts\/%s\/stocks\/%s\/orders\", venue, account, stock)\n\t}\n\tvar resp bulkOrderResponse\n\tif err := sf.do(\"GET\", url, nil, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.Orders, nil\n}\n\n\/\/ Cancel an existing order\nfunc (sf *Stockfighter) Cancel(venue, stock string, id uint64) error {\n\tvar resp response\n\turl := apiUrl(\"venues\/%s\/stocks\/%s\/orders\/%d\", venue, stock, id)\n\treturn sf.do(\"DELETE\", url, nil, &resp)\n}\n\n\/\/ Subscribe to a stream of quotes for a venue.\n\/\/ If stock is a non-empy string, only quotes for that stock are returned.\nfunc (sf *Stockfighter) Quotes(account, venue, stock string) (chan *Quote, error) {\n\turl := wsUrl(\"%s\/venues\/%s\/tickertape\", account, venue)\n\tif len(stock) > 0 {\n\t\turl = wsUrl(\"%s\/venues\/%s\/tickertape\/stocks\/%s\", account, venue, stock)\n\t}\n\tc := make(chan *Quote)\n\treturn c, sf.pump(url, func(conn *websocket.Conn) error {\n\t\tvar quote quoteMessage\n\t\tif err := conn.ReadJSON("e); err != nil {\n\t\t\tclose(c)\n\t\t\treturn err\n\t\t}\n\t\tif quote.Ok {\n\t\t\tc <- "e.Quote\n\t\t}\n\t\treturn nil\n\t})\n}\n\n\/\/ Subscribe to a stream of executions for a venue.\n\/\/ If stock is a non-empy string, only executions for that stock are returned.\nfunc (sf *Stockfighter) Executions(account, venue, stock string) (chan *Execution, error) {\n\turl := wsUrl(\"%s\/venues\/%s\/executions\", account, venue)\n\tif len(stock) > 0 {\n\t\turl = wsUrl(\"%s\/venues\/%s\/executions\/stocks\/%s\", account, venue, stock)\n\t}\n\tc := make(chan *Execution)\n\treturn c, sf.pump(url, func(conn *websocket.Conn) error {\n\t\tvar execution executionMessage\n\t\tif err := conn.ReadJSON(&execution); err != nil {\n\t\t\tclose(c)\n\t\t\treturn err\n\t\t}\n\t\tif execution.Ok {\n\t\t\tc <- &execution.Execution\n\t\t}\n\t\treturn nil\n\t})\n}\n\n\/\/ Start a new level.\nfunc (sf *Stockfighter) Start(level string) (*Game, error) {\n\tvar resp gameResponse\n\turl := gmUrl(\"levels\/%s\", level)\n\tif err := sf.do(\"POST\", url, nil, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp.Game, nil\n}\n\n\/\/ Restart a level using the instance id from a previously started Game.\nfunc (sf *Stockfighter) Restart(id uint64) error {\n\tvar resp response\n\turl := gmUrl(\"instances\/%d\/restart\", id)\n\treturn sf.do(\"POST\", url, nil, &resp)\n}\n\n\/\/ Resume a level using the instance id from a previously started Game.\nfunc (sf *Stockfighter) Resume(id uint64) error {\n\tvar resp response\n\turl := gmUrl(\"instances\/%d\/resume\", id)\n\treturn sf.do(\"POST\", url, nil, &resp)\n}\n\n\/\/ Stop a level using the instance id from a previously started Game.\nfunc (sf *Stockfighter) Stop(id uint64) error {\n\tvar resp response\n\turl := gmUrl(\"instances\/%d\/stop\", id)\n\treturn sf.do(\"POST\", url, nil, &resp)\n}\n\n\/\/ Get the GameState using the instance id from a previously started Game.\nfunc (sf *Stockfighter) GameStatus(id uint64) (*GameState, error) {\n\tvar resp gameStateResponse\n\turl := gmUrl(\"instances\/%d\", id)\n\tif err := sf.do(\"GET\", url, nil, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp.GameState, nil\n}\n\nfunc (sf *Stockfighter) Judge(id uint64) error {\n\tvar resp response\n\turl := gmUrl(\"instances\/%d\/judge\", id)\n\ttest := map[string]interface{}{\"test\": \"test\"}\n\tbody, err := encodeJson(test)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn sf.do(\"POST\", url, body, &resp)\n}\n\nfunc encodeJson(v interface{}) (io.Reader, error) {\n\tvar buf bytes.Buffer\n\tif err := json.NewEncoder(&buf).Encode(v); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &buf, nil\n}\n\nfunc (sf *Stockfighter) do(method, url string, body io.Reader, value apiCall) error {\n\treq, err := http.NewRequest(method, url, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Add(\"X-Starfighter-Authorization\", sf.apiKey)\n\tif sf.debug {\n\t\tout, _ := httputil.DumpRequest(req, true)\n\t\tlog.Println(string(out))\n\t}\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif sf.debug {\n\t\tout, _ := httputil.DumpResponse(resp, true)\n\t\tlog.Println(string(out))\n\t}\n\tif err := json.NewDecoder(resp.Body).Decode(value); err != nil {\n\t\tif resp.StatusCode >= 500 {\n\t\t\treturn fmt.Errorf(resp.Status)\n\t\t}\n\t\treturn err\n\t}\n\treturn value.Err()\n}\n\nfunc (sf *Stockfighter) pump(url string, f func(*websocket.Conn) error) error {\n\tconn, _, err := websocket.DefaultDialer.Dial(url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tgo func() {\n\t\tdefer conn.Close()\n\t\tfor err := f(conn); err == nil; err = f(conn) {\n\t\t}\n\t}()\n\treturn nil\n}\n<commit_msg>add debug to web sockets<commit_after>\/\/ Package stockfighter provides a simple wrapper for the Stockfighter API:\n\/\/\n\/\/ https:\/\/www.stockfighter.io\/\n\/\/\n\/\/ https:\/\/starfighter.readme.io\/\npackage stockfighter\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\n\t\"github.com\/gorilla\/websocket\"\n)\n\nfunc apiUrl(path string, args ...interface{}) string {\n\treturn fmt.Sprintf(\"https:\/\/api.stockfighter.io\/ob\/api\/\"+path, args...)\n}\n\nfunc gmUrl(path string, args ...interface{}) string {\n\treturn fmt.Sprintf(\"https:\/\/api.stockfighter.io\/gm\/\"+path, args...)\n}\n\nfunc wsUrl(path string, args ...interface{}) string {\n\treturn fmt.Sprintf(\"wss:\/\/api.stockfighter.io\/ob\/api\/ws\/\"+path, args...)\n}\n\ntype apiCall interface {\n\tErr() error\n}\n\ntype response struct {\n\tOk bool\n\tError string\n}\n\nfunc (r response) Err() error {\n\tif len(r.Error) > 0 {\n\t\treturn fmt.Errorf(r.Error)\n\t}\n\treturn nil\n}\n\ntype venueResponse struct {\n\tresponse\n\tVenue string\n}\n\ntype stocksResponse struct {\n\tresponse\n\tSymbols []Symbol\n}\n\ntype orderBookResponse struct {\n\tresponse\n\tOrderBook\n}\n\ntype quoteResponse struct {\n\tresponse\n\tQuote\n}\n\ntype orderResponse struct {\n\tresponse\n\tOrderState\n}\n\ntype bulkOrderResponse struct {\n\tresponse\n\tVenue string\n\tOrders []OrderState\n}\n\ntype quoteMessage struct {\n\tOk bool\n\tQuote Quote\n}\n\ntype executionMessage struct {\n\tOk bool\n\tExecution\n}\n\ntype gameResponse struct {\n\tresponse\n\tGame\n}\n\ntype gameStateResponse struct {\n\tresponse\n\tGameState\n}\n\ntype Stockfighter struct {\n\tapiKey string\n\tdebug bool\n}\n\n\/\/ Create new Stockfighter API instance.\n\/\/ If debug is true, log all HTTP requests and responses.\nfunc NewStockfighter(apiKey string, debug bool) *Stockfighter {\n\treturn &Stockfighter{\n\t\tapiKey: apiKey,\n\t\tdebug: debug,\n\t}\n}\n\n\/\/ Check the API Is Up. If venue is a non-empty string, then check that venue.\n\/\/ Returns nil if ok, otherwise the error indicates the problem.\nfunc (sf *Stockfighter) Heartbeat(venue string) error {\n\tvar resp response\n\turl := apiUrl(\"heartbeat\")\n\tif len(venue) > 0 {\n\t\turl = apiUrl(\"venues\/%s\/heartbeat\", venue)\n\t}\n\treturn sf.do(\"GET\", url, nil, &resp)\n}\n\n\/\/ Get the stocks available for trading on a venue.\nfunc (sf *Stockfighter) Stocks(venue string) ([]Symbol, error) {\n\tvar resp stocksResponse\n\turl := apiUrl(\"venues\/%s\/stocks\", venue)\n\tif err := sf.do(\"GET\", url, nil, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.Symbols, nil\n}\n\n\/\/ Get the orderbook for a particular stock.\nfunc (sf *Stockfighter) OrderBook(venue, stock string) (*OrderBook, error) {\n\tvar resp orderBookResponse\n\turl := apiUrl(\"venues\/%s\/stocks\/%s\", venue, stock)\n\tif err := sf.do(\"GET\", url, nil, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp.OrderBook, nil\n}\n\n\/\/ Get a quick look at the most recent trade information for a stock.\nfunc (sf *Stockfighter) Quote(venue, stock string) (*Quote, error) {\n\tvar resp quoteResponse\n\turl := apiUrl(\"venues\/%s\/stocks\/%s\/quote\", venue, stock)\n\tif err := sf.do(\"GET\", url, nil, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp.Quote, nil\n}\n\n\/\/ Place an order\nfunc (sf *Stockfighter) Place(order *Order) (*OrderState, error) {\n\tbody, err := encodeJson(order)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar resp orderResponse\n\turl := apiUrl(\"venues\/%s\/stocks\/%s\/orders\", order.Venue, order.Stock)\n\tif err := sf.do(\"POST\", url, body, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp.OrderState, nil\n}\n\n\/\/ Get the status for an existing order.\nfunc (sf *Stockfighter) Status(venue, stock string, id uint64) (*OrderState, error) {\n\tvar resp orderResponse\n\turl := apiUrl(\"venues\/%s\/stocks\/%s\/orders\/%d\", venue, stock, id)\n\tif err := sf.do(\"GET\", url, nil, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp.OrderState, nil\n}\n\n\/\/ Get the statuses for all an account's orders of a stock on a venue.\n\/\/ If stock is a non-empty string, only statuses for that stock are returned\nfunc (sf *Stockfighter) StockStatus(account, venue, stock string) ([]OrderState, error) {\n\turl := apiUrl(\"venues\/%s\/accounts\/%s\/orders\", venue, account)\n\tif len(stock) > 0 {\n\t\turl = apiUrl(\"venues\/%s\/accounts\/%s\/stocks\/%s\/orders\", venue, account, stock)\n\t}\n\tvar resp bulkOrderResponse\n\tif err := sf.do(\"GET\", url, nil, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.Orders, nil\n}\n\n\/\/ Cancel an existing order\nfunc (sf *Stockfighter) Cancel(venue, stock string, id uint64) error {\n\tvar resp response\n\turl := apiUrl(\"venues\/%s\/stocks\/%s\/orders\/%d\", venue, stock, id)\n\treturn sf.do(\"DELETE\", url, nil, &resp)\n}\n\n\/\/ Subscribe to a stream of quotes for a venue.\n\/\/ If stock is a non-empy string, only quotes for that stock are returned.\nfunc (sf *Stockfighter) Quotes(account, venue, stock string) (chan *Quote, error) {\n\turl := wsUrl(\"%s\/venues\/%s\/tickertape\", account, venue)\n\tif len(stock) > 0 {\n\t\turl = wsUrl(\"%s\/venues\/%s\/tickertape\/stocks\/%s\", account, venue, stock)\n\t}\n\tc := make(chan *Quote)\n\treturn c, sf.pump(url, func(conn *websocket.Conn) error {\n\t\tvar quote quoteMessage\n\t\tif err := sf.decodeMessage(conn, "e); err != nil {\n\t\t\tclose(c)\n\t\t\treturn err\n\t\t}\n\t\tif quote.Ok {\n\t\t\tc <- "e.Quote\n\t\t}\n\t\treturn nil\n\t})\n}\n\n\/\/ Subscribe to a stream of executions for a venue.\n\/\/ If stock is a non-empy string, only executions for that stock are returned.\nfunc (sf *Stockfighter) Executions(account, venue, stock string) (chan *Execution, error) {\n\turl := wsUrl(\"%s\/venues\/%s\/executions\", account, venue)\n\tif len(stock) > 0 {\n\t\turl = wsUrl(\"%s\/venues\/%s\/executions\/stocks\/%s\", account, venue, stock)\n\t}\n\tc := make(chan *Execution)\n\treturn c, sf.pump(url, func(conn *websocket.Conn) error {\n\t\tvar execution executionMessage\n\t\tif err := sf.decodeMessage(conn, &execution); err != nil {\n\t\t\tclose(c)\n\t\t\treturn err\n\t\t}\n\t\tif execution.Ok {\n\t\t\tc <- &execution.Execution\n\t\t}\n\t\treturn nil\n\t})\n}\n\nfunc (sf *Stockfighter) decodeMessage(conn *websocket.Conn, v interface{}) error {\n\t_, msg, err := conn.ReadMessage()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif sf.debug {\n\t\tlog.Println(string(msg))\n\t}\n\treturn json.Unmarshal(msg, v)\n}\n\n\/\/ Start a new level.\nfunc (sf *Stockfighter) Start(level string) (*Game, error) {\n\tvar resp gameResponse\n\turl := gmUrl(\"levels\/%s\", level)\n\tif err := sf.do(\"POST\", url, nil, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp.Game, nil\n}\n\n\/\/ Restart a level using the instance id from a previously started Game.\nfunc (sf *Stockfighter) Restart(id uint64) error {\n\tvar resp response\n\turl := gmUrl(\"instances\/%d\/restart\", id)\n\treturn sf.do(\"POST\", url, nil, &resp)\n}\n\n\/\/ Resume a level using the instance id from a previously started Game.\nfunc (sf *Stockfighter) Resume(id uint64) error {\n\tvar resp response\n\turl := gmUrl(\"instances\/%d\/resume\", id)\n\treturn sf.do(\"POST\", url, nil, &resp)\n}\n\n\/\/ Stop a level using the instance id from a previously started Game.\nfunc (sf *Stockfighter) Stop(id uint64) error {\n\tvar resp response\n\turl := gmUrl(\"instances\/%d\/stop\", id)\n\treturn sf.do(\"POST\", url, nil, &resp)\n}\n\n\/\/ Get the GameState using the instance id from a previously started Game.\nfunc (sf *Stockfighter) GameStatus(id uint64) (*GameState, error) {\n\tvar resp gameStateResponse\n\turl := gmUrl(\"instances\/%d\", id)\n\tif err := sf.do(\"GET\", url, nil, &resp); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp.GameState, nil\n}\n\nfunc (sf *Stockfighter) Judge(id uint64) error {\n\tvar resp response\n\turl := gmUrl(\"instances\/%d\/judge\", id)\n\ttest := map[string]interface{}{\"test\": \"test\"}\n\tbody, err := encodeJson(test)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn sf.do(\"POST\", url, body, &resp)\n}\n\nfunc encodeJson(v interface{}) (io.Reader, error) {\n\tvar buf bytes.Buffer\n\tif err := json.NewEncoder(&buf).Encode(v); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &buf, nil\n}\n\nfunc (sf *Stockfighter) do(method, url string, body io.Reader, value apiCall) error {\n\treq, err := http.NewRequest(method, url, body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Add(\"X-Starfighter-Authorization\", sf.apiKey)\n\tif sf.debug {\n\t\tout, _ := httputil.DumpRequest(req, true)\n\t\tlog.Println(string(out))\n\t}\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif sf.debug {\n\t\tout, _ := httputil.DumpResponse(resp, true)\n\t\tlog.Println(string(out))\n\t}\n\tif err := json.NewDecoder(resp.Body).Decode(value); err != nil {\n\t\tif resp.StatusCode >= 500 {\n\t\t\treturn fmt.Errorf(resp.Status)\n\t\t}\n\t\treturn err\n\t}\n\treturn value.Err()\n}\n\nfunc (sf *Stockfighter) pump(url string, f func(*websocket.Conn) error) error {\n\tconn, _, err := websocket.DefaultDialer.Dial(url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tgo func() {\n\t\tdefer conn.Close()\n\t\tfor err := f(conn); err == nil; err = f(conn) {\n\t\t}\n\t}()\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Minimal object storage library (C) 2015 Minio, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage objectstorage\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"runtime\"\n\t\"sort\"\n\t\"time\"\n)\n\n\/\/ API - object storage API interface\ntype API interface {\n\t\/\/ Bucket Read\/Write\/Stat operations\n\tBucketAPI\n\n\t\/\/ Object Read\/Write\/Stat operations\n\tObjectAPI\n}\n\n\/\/ BucketAPI - bucket specific Read\/Write\/Stat interface\ntype BucketAPI interface {\n\tCreateBucket(bucket, acl, location string) error\n\tSetBucketACL(bucket, acl string) error\n\tStatBucket(bucket string) error\n\tDeleteBucket(bucket string) error\n\n\tListObjects(bucket, prefix string, recursive bool) <-chan ObjectOnChannel\n\tListBuckets() <-chan BucketOnChannel\n}\n\n\/\/ ObjectAPI - object specific Read\/Write\/Stat interface\ntype ObjectAPI interface {\n\tGetObject(bucket, object string, offset, length uint64) (io.ReadCloser, *ObjectMetadata, error)\n\tCreateObject(bucket, object string, size uint64, data io.Reader) (string, error)\n\tStatObject(bucket, object string) (*ObjectMetadata, error)\n\tDeleteObject(bucket, object string) error\n}\n\n\/\/ BucketOnChannel - bucket metadata over read channel\ntype BucketOnChannel struct {\n\tData *BucketMetadata\n\tErr error\n}\n\n\/\/ ObjectOnChannel - object metadata over read channel\ntype ObjectOnChannel struct {\n\tData *ObjectMetadata\n\tErr error\n}\n\n\/\/ BucketMetadata container for bucket metadata\ntype BucketMetadata struct {\n\t\/\/ The name of the bucket.\n\tName string\n\t\/\/ Date the bucket was created.\n\tCreationDate time.Time\n}\n\n\/\/ ObjectMetadata container for object metadata\ntype ObjectMetadata struct {\n\tETag string\n\tKey string\n\tLastModified time.Time\n\tSize int64\n\n\tOwner struct {\n\t\tDisplayName string\n\t\tID string\n\t}\n\n\t\/\/ The class of storage used to store the object.\n\tStorageClass string\n}\n\n\/\/ Regions s3 region map used by bucket location constraint\nvar Regions = map[string]string{\n\t\"us-gov-west-1\": \"https:\/\/s3-fips-us-gov-west-1.amazonaws.com\",\n\t\"us-east-1\": \"https:\/\/s3.amazonaws.com\",\n\t\"us-west-1\": \"https:\/\/s3-us-west-1.amazonaws.com\",\n\t\"us-west-2\": \"https:\/\/s3-us-west-2.amazonaws.com\",\n\t\"eu-west-1\": \"https:\/\/s3-eu-west-1.amazonaws.com\",\n\t\"eu-central-1\": \"https:\/\/s3-eu-central-1.amazonaws.com\",\n\t\"ap-southeast-1\": \"https:\/\/s3-ap-southeast-1.amazonaws.com\",\n\t\"ap-southeast-2\": \"https:\/\/s3-ap-southeast-2.amazonaws.com\",\n\t\"ap-northeast-1\": \"https:\/\/s3-ap-northeast-1.amazonaws.com\",\n\t\"sa-east-1\": \"https:\/\/s3-sa-east-1.amazonaws.com\",\n\t\"cn-north-1\": \"https:\/\/s3.cn-north-1.amazonaws.com.cn\",\n}\n\ntype api struct {\n\t*lowLevelAPI\n}\n\n\/\/ Config - main configuration struct used by all to set endpoint, credentials, and other options for requests.\ntype Config struct {\n\tAccessKeyID string\n\tSecretAccessKey string\n\tEndpoint string\n\tContentType string\n\t\/\/ not exported internal usage only\n\tuserAgent string\n}\n\n\/\/ Global constants\nconst (\n\tLibraryName = \"objectstorage-go\/\"\n\tLibraryVersion = \"0.1\"\n)\n\n\/\/ New - instantiate a new minio api client\nfunc New(config *Config) API {\n\t\/\/ Not configurable at the moment, but we will relook on this in future\n\tconfig.userAgent = LibraryName + \" (\" + LibraryVersion + \"; \" + runtime.GOOS + \"; \" + runtime.GOARCH + \")\"\n\treturn &api{&lowLevelAPI{config}}\n}\n\n\/\/\/ Object operations\n\n\/\/ GetObject retrieve object\n\/\/\n\/\/ Additionally it also takes range arguments to download the specified range bytes of an object.\n\/\/ For more information about the HTTP Range header, go to http:\/\/www.w3.org\/Protocols\/rfc2616\/rfc2616-sec14.html#sec14.35.\nfunc (a *api) GetObject(bucket, object string, offset, length uint64) (io.ReadCloser, *ObjectMetadata, error) {\n\t\/\/ get the the object\n\t\/\/ NOTE : returned md5sum could be the md5sum of the partial object itself\n\t\/\/ not the whole object depending on if offset range was requested or not\n\tbody, objectMetadata, err := a.getObject(bucket, object, offset, length)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn body, objectMetadata, nil\n}\n\n\/\/ completedParts is a wrapper to make parts sortable by their part number\n\/\/ multi part completion requires list of multi parts to be sorted\ntype completedParts []*completePart\n\nfunc (a completedParts) Len() int { return len(a) }\nfunc (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }\n\n\/\/ DefaultPartSize - default size per object after which PutObject becomes multipart\nvar DefaultPartSize uint64 = 1024 * 1024 * 5\n\n\/\/ CreateObject create an object in a bucket\n\/\/\n\/\/ You must have WRITE permissions on a bucket to create an object\n\/\/\n\/\/ This version of CreateObject automatically does multipart for more than 5MB worth of data\n\/\/ This default part size is not configurable currently but can be configurable in future\nfunc (a *api) CreateObject(bucket, object string, size uint64, data io.Reader) (string, error) {\n\tswitch {\n\tcase size < DefaultPartSize:\n\t\t\/\/ Single Part use case, use PutObject directly\n\t\tfor part := range MultiPart(data, DefaultPartSize) {\n\t\t\tif part.Err != nil {\n\t\t\t\treturn \"\", part.Err\n\t\t\t}\n\t\t\treturn \"\", a.putObject(bucket, object, part.Len, part.Data)\n\t\t}\n\tdefault:\n\t\tinitiateMultipartUploadResult, err := a.initiateMultipartUpload(bucket, object)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tuploadID := initiateMultipartUploadResult.UploadID\n\t\tcompleteMultipartUpload := new(completeMultipartUpload)\n\t\tfor part := range MultiPart(data, DefaultPartSize) {\n\t\t\tif part.Err != nil {\n\t\t\t\treturn \"\", part.Err\n\t\t\t}\n\t\t\tcompletePart, err := a.uploadPart(bucket, object, uploadID, part.Num, part.Len, part.Data)\n\t\t\tfmt.Println(err)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", a.abortMultipartUpload(bucket, object, uploadID)\n\t\t\t}\n\t\t\tcompleteMultipartUpload.Part = append(completeMultipartUpload.Part, completePart)\n\t\t}\n\t\tsort.Sort(completedParts(completeMultipartUpload.Part))\n\t\tcompleteMultipartUploadResult, err := a.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload)\n\t\tif err != nil {\n\t\t\treturn \"\", a.abortMultipartUpload(bucket, object, uploadID)\n\t\t}\n\t\treturn completeMultipartUploadResult.ETag, nil\n\t}\n\treturn \"\", errors.New(\"Unexpected control flow\")\n}\n\n\/\/ StatObject verify if object exists and you have permission to access it\nfunc (a *api) StatObject(bucket, object string) (*ObjectMetadata, error) {\n\treturn a.headObject(bucket, object)\n}\n\n\/\/ DeleteObject remove the object from a bucket\nfunc (a *api) DeleteObject(bucket, object string) error {\n\treturn a.deleteObject(bucket, object)\n}\n\n\/\/\/ Bucket operations\n\n\/\/ CreateBucket create a new bucket\n\/\/\n\/\/ optional arguments are acl and location - by default all buckets are created\n\/\/ with ``private`` acl and location set to US Standard if one wishes to set\n\/\/ different ACLs and Location one can set them properly.\n\/\/\n\/\/ ACL valid values\n\/\/ ------------------\n\/\/ private - owner gets full access [DEFAULT]\n\/\/ public-read - owner gets full access, others get read access\n\/\/ public-read-write - owner gets full access, others get full access too\n\/\/ ------------------\n\/\/\n\/\/ Location valid values\n\/\/ ------------------\n\/\/ [ us-west-1 | us-west-2 | eu-west-1 | eu-central-1 | ap-southeast-1 | ap-northeast-1 | ap-southeast-2 | sa-east-1 ]\n\/\/ Default - US standard\nfunc (a *api) CreateBucket(bucket, acl, location string) error {\n\treturn a.putBucket(bucket, acl, location)\n}\n\n\/\/ SetBucketACL set the permissions on an existing bucket using access control lists (ACL)\n\/\/\n\/\/ Currently supported are:\n\/\/ ------------------\n\/\/ private - owner gets full access\n\/\/ public-read - owner gets full access, others get read access\n\/\/ public-read-write - owner gets full access, others get full access too\n\/\/ ------------------\nfunc (a *api) SetBucketACL(bucket, acl string) error {\n\treturn a.putBucketACL(bucket, acl)\n}\n\n\/\/ StatBucket verify if bucket exists and you have permission to access it\nfunc (a *api) StatBucket(bucket string) error {\n\treturn a.headBucket(bucket)\n}\n\n\/\/ DeleteBucket deletes the bucket named in the URI\n\/\/ NOTE: -\n\/\/ All objects (including all object versions and delete markers)\n\/\/ in the bucket must be deleted before successfully attempting this request\nfunc (a *api) DeleteBucket(bucket string) error {\n\treturn a.deleteBucket(bucket)\n}\n\n\/\/ listObjectsInRoutine is an internal goroutine function called for listing objects\n\/\/ This function feeds data into channel\nfunc (a *api) listObjectsInRoutine(bucket, prefix string, recursive bool, ch chan ObjectOnChannel) {\n\tdefer close(ch)\n\tswitch {\n\tcase recursive == true:\n\t\tlistBucketResult, err := a.listObjects(bucket, 1000, \"\", prefix, \"\")\n\t\tif err != nil {\n\t\t\tch <- ObjectOnChannel{\n\t\t\t\tData: nil,\n\t\t\t\tErr: err,\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tfor _, object := range listBucketResult.Contents {\n\t\t\tch <- ObjectOnChannel{\n\t\t\t\tData: object,\n\t\t\t\tErr: nil,\n\t\t\t}\n\t\t}\n\t\tfor {\n\t\t\tif !listBucketResult.IsTruncated {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlistBucketResult, err = a.listObjects(bucket, 1000, listBucketResult.Marker, prefix, \"\")\n\t\t\tif err != nil {\n\t\t\t\tch <- ObjectOnChannel{\n\t\t\t\t\tData: nil,\n\t\t\t\t\tErr: err,\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, object := range listBucketResult.Contents {\n\t\t\t\tch <- ObjectOnChannel{\n\t\t\t\t\tData: object,\n\t\t\t\t\tErr: nil,\n\t\t\t\t}\n\t\t\t\tlistBucketResult.Marker = object.Key\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tlistBucketResult, err := a.listObjects(bucket, 1000, \"\", prefix, \"\/\")\n\t\tif err != nil {\n\t\t\tch <- ObjectOnChannel{\n\t\t\t\tData: nil,\n\t\t\t\tErr: err,\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tfor _, object := range listBucketResult.Contents {\n\t\t\tch <- ObjectOnChannel{\n\t\t\t\tData: object,\n\t\t\t\tErr: nil,\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ ListObjects - (List Objects) - List some objects or all recursively\n\/\/\n\/\/ ListObjects is a channel based API implemented to facilitate ease of usage of S3 API ListObjects()\n\/\/ by automatically recursively traversing all objects on a given bucket if specified.\n\/\/\n\/\/ Your input paramters are just bucket, prefix and recursive\n\/\/\n\/\/ If you enable recursive as 'true' this function will return back all the objects in a given bucket\n\/\/\n\/\/ eg:-\n\/\/ api := objectstorage.New(....)\n\/\/ for message := range api.ListObjects(\"mytestbucket\", \"starthere\", true) {\n\/\/ fmt.Println(message.Data)\n\/\/ }\n\/\/\nfunc (a *api) ListObjects(bucket string, prefix string, recursive bool) <-chan ObjectOnChannel {\n\tch := make(chan ObjectOnChannel)\n\tgo a.listObjectsInRoutine(bucket, prefix, recursive, ch)\n\treturn ch\n}\n\n\/\/ listBucketsInRoutine is an internal go routine function called for listing buckets\n\/\/ This function feeds data into channel\nfunc (a *api) listBucketsInRoutine(ch chan BucketOnChannel) {\n\tdefer close(ch)\n\tlistAllMyBucketListResults, err := a.listBuckets()\n\tif err != nil {\n\t\tch <- BucketOnChannel{\n\t\t\tData: nil,\n\t\t\tErr: err,\n\t\t}\n\t\treturn\n\t}\n\tfor _, bucket := range listAllMyBucketListResults.Buckets.Bucket {\n\t\tch <- BucketOnChannel{\n\t\t\tData: bucket,\n\t\t\tErr: nil,\n\t\t}\n\t}\n\n}\n\n\/\/ ListBuckets list of all buckets owned by the authenticated sender of the request\n\/\/\n\/\/ NOTE:\n\/\/ This call requires explicit authentication, no anonymous\n\/\/ requests are allowed for listing buckets\n\/\/\n\/\/ eg:-\n\/\/ api := objectstorage.New(....)\n\/\/ for message := range api.ListBuckets() {\n\/\/ fmt.Println(message.Data)\n\/\/ }\n\/\/\nfunc (a *api) ListBuckets() <-chan BucketOnChannel {\n\tch := make(chan BucketOnChannel)\n\tgo a.listBucketsInRoutine(ch)\n\treturn ch\n}\n<commit_msg>Fix a typo, push it in<commit_after>\/*\n * Minimal object storage library (C) 2015 Minio, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage objectstorage\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"runtime\"\n\t\"sort\"\n\t\"time\"\n)\n\n\/\/ API - object storage API interface\ntype API interface {\n\t\/\/ Bucket Read\/Write\/Stat operations\n\tBucketAPI\n\n\t\/\/ Object Read\/Write\/Stat operations\n\tObjectAPI\n}\n\n\/\/ BucketAPI - bucket specific Read\/Write\/Stat interface\ntype BucketAPI interface {\n\tCreateBucket(bucket, acl, location string) error\n\tSetBucketACL(bucket, acl string) error\n\tStatBucket(bucket string) error\n\tDeleteBucket(bucket string) error\n\n\tListObjects(bucket, prefix string, recursive bool) <-chan ObjectOnChannel\n\tListBuckets() <-chan BucketOnChannel\n}\n\n\/\/ ObjectAPI - object specific Read\/Write\/Stat interface\ntype ObjectAPI interface {\n\tGetObject(bucket, object string, offset, length uint64) (io.ReadCloser, *ObjectMetadata, error)\n\tCreateObject(bucket, object string, size uint64, data io.Reader) (string, error)\n\tStatObject(bucket, object string) (*ObjectMetadata, error)\n\tDeleteObject(bucket, object string) error\n}\n\n\/\/ BucketOnChannel - bucket metadata over read channel\ntype BucketOnChannel struct {\n\tData *BucketMetadata\n\tErr error\n}\n\n\/\/ ObjectOnChannel - object metadata over read channel\ntype ObjectOnChannel struct {\n\tData *ObjectMetadata\n\tErr error\n}\n\n\/\/ BucketMetadata container for bucket metadata\ntype BucketMetadata struct {\n\t\/\/ The name of the bucket.\n\tName string\n\t\/\/ Date the bucket was created.\n\tCreationDate time.Time\n}\n\n\/\/ ObjectMetadata container for object metadata\ntype ObjectMetadata struct {\n\tETag string\n\tKey string\n\tLastModified time.Time\n\tSize int64\n\n\tOwner struct {\n\t\tDisplayName string\n\t\tID string\n\t}\n\n\t\/\/ The class of storage used to store the object.\n\tStorageClass string\n}\n\n\/\/ Regions s3 region map used by bucket location constraint\nvar Regions = map[string]string{\n\t\"us-gov-west-1\": \"https:\/\/s3-fips-us-gov-west-1.amazonaws.com\",\n\t\"us-east-1\": \"https:\/\/s3.amazonaws.com\",\n\t\"us-west-1\": \"https:\/\/s3-us-west-1.amazonaws.com\",\n\t\"us-west-2\": \"https:\/\/s3-us-west-2.amazonaws.com\",\n\t\"eu-west-1\": \"https:\/\/s3-eu-west-1.amazonaws.com\",\n\t\"eu-central-1\": \"https:\/\/s3-eu-central-1.amazonaws.com\",\n\t\"ap-southeast-1\": \"https:\/\/s3-ap-southeast-1.amazonaws.com\",\n\t\"ap-southeast-2\": \"https:\/\/s3-ap-southeast-2.amazonaws.com\",\n\t\"ap-northeast-1\": \"https:\/\/s3-ap-northeast-1.amazonaws.com\",\n\t\"sa-east-1\": \"https:\/\/s3-sa-east-1.amazonaws.com\",\n\t\"cn-north-1\": \"https:\/\/s3.cn-north-1.amazonaws.com.cn\",\n}\n\ntype api struct {\n\t*lowLevelAPI\n}\n\n\/\/ Config - main configuration struct used by all to set endpoint, credentials, and other options for requests.\ntype Config struct {\n\tAccessKeyID string\n\tSecretAccessKey string\n\tEndpoint string\n\tContentType string\n\t\/\/ not exported internal usage only\n\tuserAgent string\n}\n\n\/\/ Global constants\nconst (\n\tLibraryName = \"objectstorage-go\/\"\n\tLibraryVersion = \"0.1\"\n)\n\n\/\/ New - instantiate a new minio api client\nfunc New(config *Config) API {\n\t\/\/ Not configurable at the moment, but we will relook on this in future\n\tconfig.userAgent = LibraryName + \" (\" + LibraryVersion + \"; \" + runtime.GOOS + \"; \" + runtime.GOARCH + \")\"\n\treturn &api{&lowLevelAPI{config}}\n}\n\n\/\/\/ Object operations\n\n\/\/ GetObject retrieve object\n\/\/\n\/\/ Additionally it also takes range arguments to download the specified range bytes of an object.\n\/\/ For more information about the HTTP Range header, go to http:\/\/www.w3.org\/Protocols\/rfc2616\/rfc2616-sec14.html#sec14.35.\nfunc (a *api) GetObject(bucket, object string, offset, length uint64) (io.ReadCloser, *ObjectMetadata, error) {\n\t\/\/ get the the object\n\t\/\/ NOTE : returned md5sum could be the md5sum of the partial object itself\n\t\/\/ not the whole object depending on if offset range was requested or not\n\tbody, objectMetadata, err := a.getObject(bucket, object, offset, length)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn body, objectMetadata, nil\n}\n\n\/\/ completedParts is a wrapper to make parts sortable by their part number\n\/\/ multi part completion requires list of multi parts to be sorted\ntype completedParts []*completePart\n\nfunc (a completedParts) Len() int { return len(a) }\nfunc (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].PartNumber }\n\n\/\/ DefaultPartSize - default size per object after which PutObject becomes multipart\nvar DefaultPartSize uint64 = 1024 * 1024 * 5\n\n\/\/ CreateObject create an object in a bucket\n\/\/\n\/\/ You must have WRITE permissions on a bucket to create an object\n\/\/\n\/\/ This version of CreateObject automatically does multipart for more than 5MB worth of data\n\/\/ This default part size is not configurable currently but can be configurable in future\nfunc (a *api) CreateObject(bucket, object string, size uint64, data io.Reader) (string, error) {\n\tswitch {\n\tcase size < DefaultPartSize:\n\t\t\/\/ Single Part use case, use PutObject directly\n\t\tfor part := range MultiPart(data, DefaultPartSize) {\n\t\t\tif part.Err != nil {\n\t\t\t\treturn \"\", part.Err\n\t\t\t}\n\t\t\treturn \"\", a.putObject(bucket, object, part.Len, part.Data)\n\t\t}\n\tdefault:\n\t\tinitiateMultipartUploadResult, err := a.initiateMultipartUpload(bucket, object)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tuploadID := initiateMultipartUploadResult.UploadID\n\t\tcompleteMultipartUpload := new(completeMultipartUpload)\n\t\tfor part := range MultiPart(data, DefaultPartSize) {\n\t\t\tif part.Err != nil {\n\t\t\t\treturn \"\", part.Err\n\t\t\t}\n\t\t\tcompletePart, err := a.uploadPart(bucket, object, uploadID, part.Num, part.Len, part.Data)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", a.abortMultipartUpload(bucket, object, uploadID)\n\t\t\t}\n\t\t\tcompleteMultipartUpload.Part = append(completeMultipartUpload.Part, completePart)\n\t\t}\n\t\tsort.Sort(completedParts(completeMultipartUpload.Part))\n\t\tcompleteMultipartUploadResult, err := a.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload)\n\t\tif err != nil {\n\t\t\treturn \"\", a.abortMultipartUpload(bucket, object, uploadID)\n\t\t}\n\t\treturn completeMultipartUploadResult.ETag, nil\n\t}\n\treturn \"\", errors.New(\"Unexpected control flow\")\n}\n\n\/\/ StatObject verify if object exists and you have permission to access it\nfunc (a *api) StatObject(bucket, object string) (*ObjectMetadata, error) {\n\treturn a.headObject(bucket, object)\n}\n\n\/\/ DeleteObject remove the object from a bucket\nfunc (a *api) DeleteObject(bucket, object string) error {\n\treturn a.deleteObject(bucket, object)\n}\n\n\/\/\/ Bucket operations\n\n\/\/ CreateBucket create a new bucket\n\/\/\n\/\/ optional arguments are acl and location - by default all buckets are created\n\/\/ with ``private`` acl and location set to US Standard if one wishes to set\n\/\/ different ACLs and Location one can set them properly.\n\/\/\n\/\/ ACL valid values\n\/\/ ------------------\n\/\/ private - owner gets full access [DEFAULT]\n\/\/ public-read - owner gets full access, others get read access\n\/\/ public-read-write - owner gets full access, others get full access too\n\/\/ ------------------\n\/\/\n\/\/ Location valid values\n\/\/ ------------------\n\/\/ [ us-west-1 | us-west-2 | eu-west-1 | eu-central-1 | ap-southeast-1 | ap-northeast-1 | ap-southeast-2 | sa-east-1 ]\n\/\/ Default - US standard\nfunc (a *api) CreateBucket(bucket, acl, location string) error {\n\treturn a.putBucket(bucket, acl, location)\n}\n\n\/\/ SetBucketACL set the permissions on an existing bucket using access control lists (ACL)\n\/\/\n\/\/ Currently supported are:\n\/\/ ------------------\n\/\/ private - owner gets full access\n\/\/ public-read - owner gets full access, others get read access\n\/\/ public-read-write - owner gets full access, others get full access too\n\/\/ ------------------\nfunc (a *api) SetBucketACL(bucket, acl string) error {\n\treturn a.putBucketACL(bucket, acl)\n}\n\n\/\/ StatBucket verify if bucket exists and you have permission to access it\nfunc (a *api) StatBucket(bucket string) error {\n\treturn a.headBucket(bucket)\n}\n\n\/\/ DeleteBucket deletes the bucket named in the URI\n\/\/ NOTE: -\n\/\/ All objects (including all object versions and delete markers)\n\/\/ in the bucket must be deleted before successfully attempting this request\nfunc (a *api) DeleteBucket(bucket string) error {\n\treturn a.deleteBucket(bucket)\n}\n\n\/\/ listObjectsInRoutine is an internal goroutine function called for listing objects\n\/\/ This function feeds data into channel\nfunc (a *api) listObjectsInRoutine(bucket, prefix string, recursive bool, ch chan ObjectOnChannel) {\n\tdefer close(ch)\n\tswitch {\n\tcase recursive == true:\n\t\tlistBucketResult, err := a.listObjects(bucket, 1000, \"\", prefix, \"\")\n\t\tif err != nil {\n\t\t\tch <- ObjectOnChannel{\n\t\t\t\tData: nil,\n\t\t\t\tErr: err,\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tfor _, object := range listBucketResult.Contents {\n\t\t\tch <- ObjectOnChannel{\n\t\t\t\tData: object,\n\t\t\t\tErr: nil,\n\t\t\t}\n\t\t}\n\t\tfor {\n\t\t\tif !listBucketResult.IsTruncated {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlistBucketResult, err = a.listObjects(bucket, 1000, listBucketResult.Marker, prefix, \"\")\n\t\t\tif err != nil {\n\t\t\t\tch <- ObjectOnChannel{\n\t\t\t\t\tData: nil,\n\t\t\t\t\tErr: err,\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor _, object := range listBucketResult.Contents {\n\t\t\t\tch <- ObjectOnChannel{\n\t\t\t\t\tData: object,\n\t\t\t\t\tErr: nil,\n\t\t\t\t}\n\t\t\t\tlistBucketResult.Marker = object.Key\n\t\t\t}\n\t\t}\n\tdefault:\n\t\tlistBucketResult, err := a.listObjects(bucket, 1000, \"\", prefix, \"\/\")\n\t\tif err != nil {\n\t\t\tch <- ObjectOnChannel{\n\t\t\t\tData: nil,\n\t\t\t\tErr: err,\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tfor _, object := range listBucketResult.Contents {\n\t\t\tch <- ObjectOnChannel{\n\t\t\t\tData: object,\n\t\t\t\tErr: nil,\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ ListObjects - (List Objects) - List some objects or all recursively\n\/\/\n\/\/ ListObjects is a channel based API implemented to facilitate ease of usage of S3 API ListObjects()\n\/\/ by automatically recursively traversing all objects on a given bucket if specified.\n\/\/\n\/\/ Your input paramters are just bucket, prefix and recursive\n\/\/\n\/\/ If you enable recursive as 'true' this function will return back all the objects in a given bucket\n\/\/\n\/\/ eg:-\n\/\/ api := objectstorage.New(....)\n\/\/ for message := range api.ListObjects(\"mytestbucket\", \"starthere\", true) {\n\/\/ fmt.Println(message.Data)\n\/\/ }\n\/\/\nfunc (a *api) ListObjects(bucket string, prefix string, recursive bool) <-chan ObjectOnChannel {\n\tch := make(chan ObjectOnChannel)\n\tgo a.listObjectsInRoutine(bucket, prefix, recursive, ch)\n\treturn ch\n}\n\n\/\/ listBucketsInRoutine is an internal go routine function called for listing buckets\n\/\/ This function feeds data into channel\nfunc (a *api) listBucketsInRoutine(ch chan BucketOnChannel) {\n\tdefer close(ch)\n\tlistAllMyBucketListResults, err := a.listBuckets()\n\tif err != nil {\n\t\tch <- BucketOnChannel{\n\t\t\tData: nil,\n\t\t\tErr: err,\n\t\t}\n\t\treturn\n\t}\n\tfor _, bucket := range listAllMyBucketListResults.Buckets.Bucket {\n\t\tch <- BucketOnChannel{\n\t\t\tData: bucket,\n\t\t\tErr: nil,\n\t\t}\n\t}\n\n}\n\n\/\/ ListBuckets list of all buckets owned by the authenticated sender of the request\n\/\/\n\/\/ NOTE:\n\/\/ This call requires explicit authentication, no anonymous\n\/\/ requests are allowed for listing buckets\n\/\/\n\/\/ eg:-\n\/\/ api := objectstorage.New(....)\n\/\/ for message := range api.ListBuckets() {\n\/\/ fmt.Println(message.Data)\n\/\/ }\n\/\/\nfunc (a *api) ListBuckets() <-chan BucketOnChannel {\n\tch := make(chan BucketOnChannel)\n\tgo a.listBucketsInRoutine(ch)\n\treturn ch\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\n\t\"github.com\/bitly\/go-nsq\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"github.com\/zenazn\/goji\/web\"\n)\n\ntype ScriptRequest struct {\n\tID int\n\tScript string `json:\"script\"`\n\tArgs []string `json:\"args\"`\n\tFiles map[string]string `json:\"files\"`\n\tCallbackURL string `json:\"callback_url\"`\n}\n\n\/\/ Handle the root route, also useful as a heartbeat.\nfunc ServiceRoot(w http.ResponseWriter, r *http.Request) {\n\tif r.Method == \"POST\" {\n\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\tif err == nil {\n\t\t\tlog.Info(\"Callback: %s\", body)\n\t\t}\n\t}\n\tw.Write([]byte(\".\"))\n}\n\n\/\/ Get a list of all the scripts in script folder.\nfunc GetAllScripts(w http.ResponseWriter, r *http.Request) {\n\n\t\/\/ Open and parse whitelist\n\tp := path.Join(config.Worker.ScriptDir, config.Worker.WhiteList)\n\tfile, err := os.Open(p)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\tscanner := bufio.NewScanner(file)\n\tscanner.Scan()\n\tlist := make([]string, 0)\n\tfor scanner.Scan() {\n\t\tlist = append(list, fmt.Sprintf(\"\\\"%s\\\"\", scanner.Text()))\n\t}\n\tvar buf bytes.Buffer\n\tbuf.WriteString(\"[\")\n\tbuf.WriteString(strings.Join(list, \", \"))\n\tbuf.WriteString(\"]\")\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(buf.Bytes())\n}\n\n\/\/ Reload the whitelist of scripts.\nfunc ReloadScripts(w http.ResponseWriter, r *http.Request) {\n\tp := path.Join(config.Worker.ScriptDir, config.Worker.WhiteList)\n\n\tdoneChan := make(chan *nsq.ProducerTransaction)\n\terr := producer.PublishAsync(\"reload\", []byte(p), doneChan)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\t<-doneChan\n\n\tvar buf bytes.Buffer\n\tbuf.WriteString(\"Reload request sent\")\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(buf.Bytes())\n}\n\n\/\/ Send details to queue for execution.\nfunc RunScript(c web.C, w http.ResponseWriter, r *http.Request) {\n\t\/\/ Parse the request\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tvar sr ScriptRequest\n\terr = json.Unmarshal(body, &sr)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tid, err := getRedisID()\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tsr.ID = id\n\tsr.Script = c.URLParams[\"name\"]\n\n\t\/\/ Queue up the request\n\tdoneChan := make(chan *nsq.ProducerTransaction)\n\tdata, err := json.Marshal(sr)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\terr = producer.PublishAsync(config.Topic, data, doneChan)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\t<-doneChan\n\tlog.Debug(\"Request queued as %d\", sr.ID)\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(data)\n}\n\n\/\/ Retrieve all logs for a specific script.\nfunc GetAllLogs(c web.C, w http.ResponseWriter, r *http.Request) {\n\tconn := redisDB.Get()\n\tdefer conn.Close()\n\n\t\/\/ LRANGE returns an array of json strings\n\treply, err := redis.Strings(conn.Do(\"ZRANGE\", c.URLParams[\"name\"], 0, -1))\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tvar buf bytes.Buffer\n\tbuf.WriteString(\"[\")\n\n\tlength := len(reply)\n\tif length > 0 {\n\t\tbuf.WriteString(reply[length-1])\n\t\tfor i := length - 2; i > 0; i-- {\n\t\t\tbuf.WriteString(\", \")\n\t\t\tbuf.WriteString(reply[i])\n\t\t}\n\t}\n\tbuf.WriteString(\"]\")\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(buf.Bytes())\n}\n\n\/\/ Retrieve a specific log of a specific script.\nfunc GetLog(c web.C, w http.ResponseWriter, r *http.Request) {\n\tconn := redisDB.Get()\n\tdefer conn.Close()\n\n\tscript := c.URLParams[\"name\"]\n\tid := c.URLParams[\"id\"]\n\n\treply, err := redis.Strings(conn.Do(\"ZRANGEBYSCORE\", script, id, id))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write([]byte(reply[0]))\n}\n<commit_msg>Match log level of enqueue with dequeue.<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\n\t\"github.com\/bitly\/go-nsq\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"github.com\/zenazn\/goji\/web\"\n)\n\ntype ScriptRequest struct {\n\tID int\n\tScript string `json:\"script\"`\n\tArgs []string `json:\"args\"`\n\tFiles map[string]string `json:\"files\"`\n\tCallbackURL string `json:\"callback_url\"`\n}\n\n\/\/ Handle the root route, also useful as a heartbeat.\nfunc ServiceRoot(w http.ResponseWriter, r *http.Request) {\n\tif r.Method == \"POST\" {\n\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\tif err == nil {\n\t\t\tlog.Info(\"Callback: %s\", body)\n\t\t}\n\t}\n\tw.Write([]byte(\".\"))\n}\n\n\/\/ Get a list of all the scripts in script folder.\nfunc GetAllScripts(w http.ResponseWriter, r *http.Request) {\n\n\t\/\/ Open and parse whitelist\n\tp := path.Join(config.Worker.ScriptDir, config.Worker.WhiteList)\n\tfile, err := os.Open(p)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\tscanner := bufio.NewScanner(file)\n\tscanner.Scan()\n\tlist := make([]string, 0)\n\tfor scanner.Scan() {\n\t\tlist = append(list, fmt.Sprintf(\"\\\"%s\\\"\", scanner.Text()))\n\t}\n\tvar buf bytes.Buffer\n\tbuf.WriteString(\"[\")\n\tbuf.WriteString(strings.Join(list, \", \"))\n\tbuf.WriteString(\"]\")\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(buf.Bytes())\n}\n\n\/\/ Reload the whitelist of scripts.\nfunc ReloadScripts(w http.ResponseWriter, r *http.Request) {\n\tp := path.Join(config.Worker.ScriptDir, config.Worker.WhiteList)\n\n\tdoneChan := make(chan *nsq.ProducerTransaction)\n\terr := producer.PublishAsync(\"reload\", []byte(p), doneChan)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\t<-doneChan\n\n\tvar buf bytes.Buffer\n\tbuf.WriteString(\"Reload request sent\")\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(buf.Bytes())\n}\n\n\/\/ Send details to queue for execution.\nfunc RunScript(c web.C, w http.ResponseWriter, r *http.Request) {\n\t\/\/ Parse the request\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tvar sr ScriptRequest\n\terr = json.Unmarshal(body, &sr)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tid, err := getRedisID()\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tsr.ID = id\n\tsr.Script = c.URLParams[\"name\"]\n\n\t\/\/ Queue up the request\n\tdoneChan := make(chan *nsq.ProducerTransaction)\n\tdata, err := json.Marshal(sr)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\terr = producer.PublishAsync(config.Topic, data, doneChan)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\t<-doneChan\n\tlog.Info(\"Request queued as %d\", sr.ID)\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(data)\n}\n\n\/\/ Retrieve all logs for a specific script.\nfunc GetAllLogs(c web.C, w http.ResponseWriter, r *http.Request) {\n\tconn := redisDB.Get()\n\tdefer conn.Close()\n\n\t\/\/ LRANGE returns an array of json strings\n\treply, err := redis.Strings(conn.Do(\"ZRANGE\", c.URLParams[\"name\"], 0, -1))\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tvar buf bytes.Buffer\n\tbuf.WriteString(\"[\")\n\n\tlength := len(reply)\n\tif length > 0 {\n\t\tbuf.WriteString(reply[length-1])\n\t\tfor i := length - 2; i > 0; i-- {\n\t\t\tbuf.WriteString(\", \")\n\t\t\tbuf.WriteString(reply[i])\n\t\t}\n\t}\n\tbuf.WriteString(\"]\")\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(buf.Bytes())\n}\n\n\/\/ Retrieve a specific log of a specific script.\nfunc GetLog(c web.C, w http.ResponseWriter, r *http.Request) {\n\tconn := redisDB.Get()\n\tdefer conn.Close()\n\n\tscript := c.URLParams[\"name\"]\n\tid := c.URLParams[\"id\"]\n\n\treply, err := redis.Strings(conn.Do(\"ZRANGEBYSCORE\", script, id, id))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write([]byte(reply[0]))\n}\n<|endoftext|>"} {"text":"<commit_before>package drax\n\nimport (\n\t\"io\"\n\t\"net\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/cpuguy83\/drax\/api\"\n\t\"github.com\/cpuguy83\/drax\/api\/errors\"\n\t\"github.com\/cpuguy83\/drax\/rpc\"\n\t\"github.com\/docker\/distribution\/registry\/api\/errcode\"\n\tlibkvstore \"github.com\/docker\/libkv\/store\"\n)\n\n\/\/ nodeRPC handles communcations for node-level actions(e.g., addNode, removeNode)\ntype nodeRPC struct {\n\t*rpc.StreamLayer\n\tr *Raft\n}\n\n\/\/ clientRPC handles communications with k\/v store clients\ntype clientRPC struct {\n\t*rpc.StreamLayer\n\ts *store\n}\n\nfunc (r *nodeRPC) addNode(req *rpc.Request) error {\n\treturn r.r.AddPeer(req.Args[0])\n}\n\nfunc (r *nodeRPC) removeNode(req *rpc.Request) error {\n\treturn r.r.RemovePeer(req.Args[0])\n}\n\nfunc (r *nodeRPC) handleConns() {\n\tfor {\n\t\tconn, err := r.Accept()\n\t\tif err != nil {\n\t\t\tif err == rpc.ErrClosedConn {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tgo r.handleConn(conn)\n\t}\n}\n\nfunc (r *nodeRPC) handleConn(conn net.Conn) {\n\tdefer conn.Close()\n\tvar req rpc.Request\n\tif err := api.Decode(&req, conn); err != nil {\n\t\tlogrus.Errorf(\"error handling K\/V RPC connection: %v\", err)\n\t\treturn\n\t}\n\n\tlogrus.Debugf(\"Got: %s(%s)\", req.Method, req.Args)\n\n\tvar h rpcHandlerFunc\n\n\tswitch req.Method {\n\tcase addNode:\n\t\th = r.addNode\n\tcase removeNode:\n\t\th = r.removeNode\n\t}\n\n\tif !r.r.IsLeader() {\n\t\tr.ProxyRequest(r.r.GetLeader(), &req, conn)\n\t\treturn\n\t}\n\n\tvar res rpc.Response\n\tif err := h(&req); err != nil {\n\t\tres.Err = err.Error()\n\t}\n\tapi.Encode(&res, conn)\n}\n\nfunc (r *clientRPC) Get(conn io.Writer, req *clientRequest) {\n\tvar res api.Response\n\tkv, err := r.s.Get(req.Key)\n\tif err != nil {\n\t\tsetError(&res, err)\n\t\tapi.Encode(&res, conn)\n\t\treturn\n\t}\n\tres.KV = libkvToKV(kv)\n\tapi.Encode(&res, conn)\n}\n\nfunc (r *clientRPC) Put(conn io.Writer, req *clientRequest) {\n\tvar res api.Response\n\terr := r.s.Put(req.Key, req.Value, &libkvstore.WriteOptions{TTL: req.TTL})\n\tif err != nil {\n\t\tsetError(&res, err)\n\t}\n\tapi.Encode(res, conn)\n}\n\nfunc waitClose(conn io.Reader, chStop chan struct{}) {\n\tbuf := make([]byte, 1)\n\tfor {\n\t\t_, err := conn.Read(buf)\n\t\tif err == io.EOF {\n\t\t\tclose(chStop)\n\t\t}\n\t\tcontinue\n\t}\n}\n\nfunc (r *clientRPC) Watch(conn io.Writer, req *clientRequest) {\n\tchStop := make(chan struct{})\n\tgo waitClose(req.body, chStop)\n\tchKv, err := r.s.Watch(req.Key, chStop)\n\tif err != nil {\n\t\treturn\n\t}\n\tfor kv := range chKv {\n\t\tapi.Encode(libkvToKV(kv), conn)\n\t}\n}\n\nfunc (r *clientRPC) WatchTree(conn io.Writer, req *clientRequest) {\n\tchStop := make(chan struct{})\n\tgo waitClose(req.body, chStop)\n\tchKv, err := r.s.WatchTree(req.Key, chStop)\n\tif err != nil {\n\t\treturn\n\t}\n\tvar apiKVList []*api.KVPair\n\tfor kvList := range chKv {\n\t\tfor _, kv := range kvList {\n\t\t\tapiKVList = append(apiKVList, libkvToKV(kv))\n\t\t}\n\t\tapi.Encode(apiKVList, conn)\n\t\tapiKVList = nil\n\t}\n}\n\nfunc (r *clientRPC) List(conn io.Writer, req *clientRequest) {\n\tvar res api.Response\n\tls, err := r.s.List(req.Key)\n\tif err != nil {\n\t\tsetError(&res, err)\n\t}\n\n\tvar apiLs []*api.KVPair\n\tfor _, kv := range ls {\n\t\tapiLs = append(apiLs, libkvToKV(kv))\n\t}\n\tres.List = apiLs\n\tapi.NewEncoder(conn).Encode(&res)\n}\n\nfunc (r *clientRPC) DeleteTree(conn io.Writer, req *clientRequest) {\n\tvar res api.Response\n\tif err := r.s.DeleteTree(req.Key); err != nil {\n\t\tsetError(&res, err)\n\t}\n\tapi.NewEncoder(conn).Encode(&res)\n}\n\nfunc (r *clientRPC) Delete(conn io.Writer, req *clientRequest) {\n\tvar res api.Response\n\tif err := r.s.Delete(req.Key); err != nil {\n\t\tsetError(&res, err)\n\t}\n\tapi.NewEncoder(conn).Encode(&res)\n}\n\nfunc (r *clientRPC) Exists(conn io.Writer, req *clientRequest) {\n\tvar res api.Response\n\texists, err := r.s.Exists(req.Key)\n\tif err != nil {\n\t\tsetError(&res, err)\n\t}\n\tres.Exists = exists\n\tapi.NewEncoder(conn).Encode(&res)\n}\n\nfunc (r *clientRPC) AtomicPut(conn io.Writer, req *clientRequest) {\n\tvar res api.Response\n\tok, kv, err := r.s.AtomicPut(req.Key, req.Value, kvToLibKV(req.Previous), &libkvstore.WriteOptions{TTL: req.TTL})\n\tif err != nil {\n\t\tsetError(&res, err)\n\t} else {\n\t\tres.Completed = ok\n\t\tres.KV = libkvToKV(kv)\n\t}\n\tapi.NewEncoder(conn).Encode(&res)\n}\n\nfunc (r *clientRPC) AtomicDelete(conn io.Writer, req *clientRequest) {\n\tvar res api.Response\n\tok, err := r.s.AtomicDelete(req.Key, kvToLibKV(req.Previous))\n\tif err != nil {\n\t\tsetError(&res, err)\n\t}\n\tres.Completed = ok\n\tapi.NewEncoder(conn).Encode(&res)\n}\n\nfunc (r *clientRPC) handleConns() {\n\tfor {\n\t\tconn, err := r.Accept()\n\t\tif err != nil {\n\t\t\tif err == rpc.ErrClosedConn {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tgo r.handleConn(conn)\n\t}\n}\n\ntype clientRPCHandlerFn func(io.Writer, *clientRequest)\ntype clientRequest struct {\n\t*api.Request\n\tbody io.Reader\n}\n\nfunc (r *clientRPC) handleConn(conn net.Conn) {\n\tdefer conn.Close()\n\tvar req api.Request\n\tif err := api.Decode(&req, conn); err != nil {\n\t\tlogrus.Errorf(\"error handling K\/V RPC connection: %v\", err)\n\t\treturn\n\t}\n\n\tlogrus.Debugf(\"Got: %s(%s)\", req.Action, req.Key)\n\n\tvar h clientRPCHandlerFn\n\n\tswitch req.Action {\n\tcase api.Get:\n\t\th = r.Get\n\tcase api.Put:\n\t\th = r.Put\n\tcase api.Watch:\n\t\th = r.Watch\n\tcase api.WatchTree:\n\t\th = r.WatchTree\n\tcase api.List:\n\t\th = r.List\n\tcase api.DeleteTree:\n\t\th = r.DeleteTree\n\tcase api.AtomicPut:\n\t\th = r.AtomicPut\n\tcase api.AtomicDelete:\n\t\th = r.AtomicDelete\n\t}\n\n\th(conn, &clientRequest{&req, conn})\n}\n\nfunc libkvToKV(kv *libkvstore.KVPair) *api.KVPair {\n\treturn &api.KVPair{\n\t\tKey: kv.Key,\n\t\tValue: kv.Value,\n\t\tLastIndex: kv.LastIndex,\n\t}\n}\n\nfunc kvToLibKV(kv *api.KVPair) *libkvstore.KVPair {\n\treturn &libkvstore.KVPair{\n\t\tKey: kv.Key,\n\t\tValue: kv.Value,\n\t\tLastIndex: kv.LastIndex,\n\t}\n}\n\nfunc setError(res *api.Response, err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\tswitch err {\n\tcase ErrKeyNotFound:\n\t\te := errors.StoreKeyNotFound.WithMessage(err.Error())\n\t\tres.Err = &e\n\tcase ErrKeyModified:\n\t\te := errors.StoreKeyModified.WithMessage(err.Error())\n\t\tres.Err = &e\n\tcase ErrCallNotSupported:\n\t\te := errcode.ErrorCodeUnsupported.WithMessage(err.Error())\n\t\tres.Err = &e\n\tdefault:\n\t\te := errcode.ErrorCodeUnknown.WithMessage(err.Error())\n\t\tres.Err = &e\n\t}\n}\n<commit_msg>Add missing delete call from Client RPC handler<commit_after>package drax\n\nimport (\n\t\"io\"\n\t\"net\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/cpuguy83\/drax\/api\"\n\t\"github.com\/cpuguy83\/drax\/api\/errors\"\n\t\"github.com\/cpuguy83\/drax\/rpc\"\n\t\"github.com\/docker\/distribution\/registry\/api\/errcode\"\n\tlibkvstore \"github.com\/docker\/libkv\/store\"\n)\n\n\/\/ nodeRPC handles communcations for node-level actions(e.g., addNode, removeNode)\ntype nodeRPC struct {\n\t*rpc.StreamLayer\n\tr *Raft\n}\n\n\/\/ clientRPC handles communications with k\/v store clients\ntype clientRPC struct {\n\t*rpc.StreamLayer\n\ts *store\n}\n\nfunc (r *nodeRPC) addNode(req *rpc.Request) error {\n\treturn r.r.AddPeer(req.Args[0])\n}\n\nfunc (r *nodeRPC) removeNode(req *rpc.Request) error {\n\treturn r.r.RemovePeer(req.Args[0])\n}\n\nfunc (r *nodeRPC) handleConns() {\n\tfor {\n\t\tconn, err := r.Accept()\n\t\tif err != nil {\n\t\t\tif err == rpc.ErrClosedConn {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tgo r.handleConn(conn)\n\t}\n}\n\nfunc (r *nodeRPC) handleConn(conn net.Conn) {\n\tdefer conn.Close()\n\tvar req rpc.Request\n\tif err := api.Decode(&req, conn); err != nil {\n\t\tlogrus.Errorf(\"error handling K\/V RPC connection: %v\", err)\n\t\treturn\n\t}\n\n\tlogrus.Debugf(\"Got: %s(%s)\", req.Method, req.Args)\n\n\tvar h rpcHandlerFunc\n\n\tswitch req.Method {\n\tcase addNode:\n\t\th = r.addNode\n\tcase removeNode:\n\t\th = r.removeNode\n\t}\n\n\tif !r.r.IsLeader() {\n\t\tr.ProxyRequest(r.r.GetLeader(), &req, conn)\n\t\treturn\n\t}\n\n\tvar res rpc.Response\n\tif err := h(&req); err != nil {\n\t\tres.Err = err.Error()\n\t}\n\tapi.Encode(&res, conn)\n}\n\nfunc (r *clientRPC) Get(conn io.Writer, req *clientRequest) {\n\tvar res api.Response\n\tkv, err := r.s.Get(req.Key)\n\tif err != nil {\n\t\tsetError(&res, err)\n\t\tapi.Encode(&res, conn)\n\t\treturn\n\t}\n\tres.KV = libkvToKV(kv)\n\tapi.Encode(&res, conn)\n}\n\nfunc (r *clientRPC) Put(conn io.Writer, req *clientRequest) {\n\tvar res api.Response\n\terr := r.s.Put(req.Key, req.Value, &libkvstore.WriteOptions{TTL: req.TTL})\n\tif err != nil {\n\t\tsetError(&res, err)\n\t}\n\tapi.Encode(res, conn)\n}\n\nfunc waitClose(conn io.Reader, chStop chan struct{}) {\n\tbuf := make([]byte, 1)\n\tfor {\n\t\t_, err := conn.Read(buf)\n\t\tif err == io.EOF {\n\t\t\tclose(chStop)\n\t\t}\n\t\tcontinue\n\t}\n}\n\nfunc (r *clientRPC) Watch(conn io.Writer, req *clientRequest) {\n\tchStop := make(chan struct{})\n\tgo waitClose(req.body, chStop)\n\tchKv, err := r.s.Watch(req.Key, chStop)\n\tif err != nil {\n\t\treturn\n\t}\n\tfor kv := range chKv {\n\t\tapi.Encode(libkvToKV(kv), conn)\n\t}\n}\n\nfunc (r *clientRPC) WatchTree(conn io.Writer, req *clientRequest) {\n\tchStop := make(chan struct{})\n\tgo waitClose(req.body, chStop)\n\tchKv, err := r.s.WatchTree(req.Key, chStop)\n\tif err != nil {\n\t\treturn\n\t}\n\tvar apiKVList []*api.KVPair\n\tfor kvList := range chKv {\n\t\tfor _, kv := range kvList {\n\t\t\tapiKVList = append(apiKVList, libkvToKV(kv))\n\t\t}\n\t\tapi.Encode(apiKVList, conn)\n\t\tapiKVList = nil\n\t}\n}\n\nfunc (r *clientRPC) List(conn io.Writer, req *clientRequest) {\n\tvar res api.Response\n\tls, err := r.s.List(req.Key)\n\tif err != nil {\n\t\tsetError(&res, err)\n\t}\n\n\tvar apiLs []*api.KVPair\n\tfor _, kv := range ls {\n\t\tapiLs = append(apiLs, libkvToKV(kv))\n\t}\n\tres.List = apiLs\n\tapi.NewEncoder(conn).Encode(&res)\n}\n\nfunc (r *clientRPC) DeleteTree(conn io.Writer, req *clientRequest) {\n\tvar res api.Response\n\tif err := r.s.DeleteTree(req.Key); err != nil {\n\t\tsetError(&res, err)\n\t}\n\tapi.NewEncoder(conn).Encode(&res)\n}\n\nfunc (r *clientRPC) Delete(conn io.Writer, req *clientRequest) {\n\tvar res api.Response\n\tif err := r.s.Delete(req.Key); err != nil {\n\t\tsetError(&res, err)\n\t}\n\tapi.NewEncoder(conn).Encode(&res)\n}\n\nfunc (r *clientRPC) Exists(conn io.Writer, req *clientRequest) {\n\tvar res api.Response\n\texists, err := r.s.Exists(req.Key)\n\tif err != nil {\n\t\tsetError(&res, err)\n\t}\n\tres.Exists = exists\n\tapi.NewEncoder(conn).Encode(&res)\n}\n\nfunc (r *clientRPC) AtomicPut(conn io.Writer, req *clientRequest) {\n\tvar res api.Response\n\tok, kv, err := r.s.AtomicPut(req.Key, req.Value, kvToLibKV(req.Previous), &libkvstore.WriteOptions{TTL: req.TTL})\n\tif err != nil {\n\t\tsetError(&res, err)\n\t} else {\n\t\tres.Completed = ok\n\t\tres.KV = libkvToKV(kv)\n\t}\n\tapi.NewEncoder(conn).Encode(&res)\n}\n\nfunc (r *clientRPC) AtomicDelete(conn io.Writer, req *clientRequest) {\n\tvar res api.Response\n\tok, err := r.s.AtomicDelete(req.Key, kvToLibKV(req.Previous))\n\tif err != nil {\n\t\tsetError(&res, err)\n\t}\n\tres.Completed = ok\n\tapi.NewEncoder(conn).Encode(&res)\n}\n\nfunc (r *clientRPC) handleConns() {\n\tfor {\n\t\tconn, err := r.Accept()\n\t\tif err != nil {\n\t\t\tif err == rpc.ErrClosedConn {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tgo r.handleConn(conn)\n\t}\n}\n\ntype clientRPCHandlerFn func(io.Writer, *clientRequest)\ntype clientRequest struct {\n\t*api.Request\n\tbody io.Reader\n}\n\nfunc (r *clientRPC) handleConn(conn net.Conn) {\n\tdefer conn.Close()\n\tvar req api.Request\n\tif err := api.Decode(&req, conn); err != nil {\n\t\tlogrus.Errorf(\"error handling K\/V RPC connection: %v\", err)\n\t\treturn\n\t}\n\n\tlogrus.Debugf(\"Got: %s(%s)\", req.Action, req.Key)\n\n\tvar h clientRPCHandlerFn\n\n\tswitch req.Action {\n\tcase api.Get:\n\t\th = r.Get\n\tcase api.Put:\n\t\th = r.Put\n\tcase api.Watch:\n\t\th = r.Watch\n\tcase api.WatchTree:\n\t\th = r.WatchTree\n\tcase api.List:\n\t\th = r.List\n\tcase api.Delete:\n\t\th = r.Delete\n\tcase api.DeleteTree:\n\t\th = r.DeleteTree\n\tcase api.AtomicPut:\n\t\th = r.AtomicPut\n\tcase api.AtomicDelete:\n\t\th = r.AtomicDelete\n\t}\n\n\th(conn, &clientRequest{&req, conn})\n}\n\nfunc libkvToKV(kv *libkvstore.KVPair) *api.KVPair {\n\treturn &api.KVPair{\n\t\tKey: kv.Key,\n\t\tValue: kv.Value,\n\t\tLastIndex: kv.LastIndex,\n\t}\n}\n\nfunc kvToLibKV(kv *api.KVPair) *libkvstore.KVPair {\n\treturn &libkvstore.KVPair{\n\t\tKey: kv.Key,\n\t\tValue: kv.Value,\n\t\tLastIndex: kv.LastIndex,\n\t}\n}\n\nfunc setError(res *api.Response, err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\tswitch err {\n\tcase ErrKeyNotFound:\n\t\te := errors.StoreKeyNotFound.WithMessage(err.Error())\n\t\tres.Err = &e\n\tcase ErrKeyModified:\n\t\te := errors.StoreKeyModified.WithMessage(err.Error())\n\t\tres.Err = &e\n\tcase ErrCallNotSupported:\n\t\te := errcode.ErrorCodeUnsupported.WithMessage(err.Error())\n\t\tres.Err = &e\n\tdefault:\n\t\te := errcode.ErrorCodeUnknown.WithMessage(err.Error())\n\t\tres.Err = &e\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n\tGo library for simple i-doit api usage\n\n\tCopyright (C) 2017 Carsten Seeger\n\n\tThis program is free software: you can redistribute it and\/or modify\n\tit under the terms of the GNU General Public License as published by\n\tthe Free Software Foundation, either version 3 of the License, or\n\t(at your option) any later version.\n\n\tThis program is distributed in the hope that it will be useful,\n\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\tGNU General Public License for more details.\n\n\tYou should have received a copy of the GNU General Public License\n\talong with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\n\t@author Carsten Seeger\n\t@copyright Copyright (C) 2017 Carsten Seeger\n\t@license http:\/\/www.gnu.org\/licenses\/gpl-3.0 GNU General Public License 3\n\t@link https:\/\/github.com\/cseeger-epages\/i-doit-go-api\n*\/\n\npackage goidoit\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n)\n\n\/\/ request id\nvar id int = 0\n\n\/\/ basic api interface\ntype ApiMethods interface {\n\tRequest() (Response, error)\n\tSearch() (SearchResponse, error)\n\tGetObjectByID() (Response, error)\n\t\/*\n\t\tLogin()\n\t\tLogout()\n\t\tIsLoggedIn()\n\t*\/\n}\n\n\/\/ api struct used for implementing the apiMethods interface\ntype Api struct {\n\turl, apikey string\n}\n\n\/\/ i-doit api request structure\n\/\/ as defined in https:\/\/kb.i-doit.com\/pages\/viewpage.action?pageId=7831613\n\/\/ also there is a list of methods available\ntype Request struct {\n\tVersion string `json:\"version\"`\n\tMethod string `json:\"method\"`\n\tParams interface{} `json:\"params\"`\n\tId int `json:\"id\"`\n}\n\n\/\/ i-doit api response structure\ntype Response struct {\n\tJsonrpc string `json:\"jsonrpc\"`\n\tResult interface{} `json:\"result\"`\n\tError IdoitError `json:\"error\"`\n}\n\n\/\/ i-doit api response structure used for search requests\n\/\/\n\/\/ the map is used to handle type assertions\ntype SearchResponse struct {\n\tJsonrpc string\n\tResult []map[string]interface{}\n\tError IdoitError\n}\n\n\/\/ i-doit api error structure\ntype IdoitError struct {\n\tCode int `json:\"code\"`\n\tMessage string `json:\"message\"`\n\tData interface{} `json:\"data\"`\n}\n\n\/\/ Apikey used for requests\ntype Apikey struct {\n\tApikey string `json:\"apikey\"`\n}\n\n\/\/ api constructor\nfunc Newapi(url string, apikey string) (*Api, error) {\n\tif len(url) != 0 && len(apikey) != 0 {\n\t\ta := Api{url, apikey}\n\t\treturn &a, nil\n\t}\n\treturn nil, errors.New(\"url or apikey empty\")\n}\n\n\/\/ Request i-doit Api using method an parameter\n\/\/ parameter should be implemented as a struct\n\/\/ be sure to use uppercase first letter for your struct\n\/\/ entrys to make it public\nfunc (a Api) Request(method string, parameters interface{}) (Response, error) {\n\n\tvar params = GetParams(a, parameters)\n\tid = getID()\n\n\tdata := Request{\n\t\tVersion: \"2.0\",\n\t\tMethod: method,\n\t\tParams: params,\n\t\tId: id,\n\t}\n\n\tdataJson, err := json.Marshal(data)\n\n\t\/\/ logging tbd\n\t\/\/fmt.Println(\"Request: \", string(dataJson))\n\n\treq, err := http.NewRequest(\"POST\", a.url, bytes.NewBuffer(dataJson))\n\tif err != nil {\n\t\tfmt.Println(\"REQUEST ERROR: \", err)\n\t\treturn Response{}, err\n\t}\n\treq.Header.Add(\"content-type\", \"application\/json\")\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tfmt.Println(\"REQUEST ERROR: \", err)\n\t\treturn Response{}, err\n\t}\n\tvar ret = ParseResponse(resp)\n\treturn ret, nil\n}\n\n\/\/ search CMDB using a string\n\/\/\n\/\/ The search function does handle type assertions\n\/\/ for simple output usage\nfunc (a *Api) Search(query string) (SearchResponse, error) {\n\tparams := struct {\n\t\tQuery string `json:\"q\"`\n\t}{query}\n\tdata, err := a.Request(\"idoit.search\", ¶ms)\n\tif err != nil {\n\t\treturn SearchResponse{}, err\n\t}\n\n\t\/\/ do type assertions for easy output handling\n\tret := SearchResponse{Jsonrpc: data.Jsonrpc, Error: data.Error}\n\n\tret.Error.Data = \"\"\n\tif data.Error.Data != nil {\n\t\tret.Error.Data = data.Error.Data.(string)\n\t}\n\n\tresults := data.Result.([]interface{})\n\tfor i := range results {\n\t\tret.Result = append(ret.Result, results[i].(map[string]interface{}))\n\t}\n\treturn ret, nil\n}\n\n\/\/ increment request id's\nfunc getID() int {\n\tid++\n\treturn id\n}\n\n\/\/ append nessesary parameters to user provided one\nfunc GetParams(a Api, parameters interface{}) interface{} {\n\n\tvar params map[string]string\n\tapikey := Apikey{a.apikey}\n\n\tjsonParameters, err := json.Marshal(parameters)\n\n\tif err != nil {\n\t\tlog.Fatal(\"JSON ERROR: \", err)\n\t}\n\n\tjson.Unmarshal(jsonParameters, ¶ms)\n\tjsonApikey, err := json.Marshal(apikey)\n\n\tif err != nil {\n\t\tlog.Fatal(\"JSON ERROR: \", err)\n\t}\n\n\tjson.Unmarshal(jsonApikey, ¶ms)\n\n\treturn params\n}\n\n\/\/ parse json response\nfunc ParseResponse(resp *http.Response) Response {\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Fatal(\"PARSING ERROR: \", err)\n\t}\n\n\t\/\/ logging tbd\n\t\/\/fmt.Println(\"Response: \", string(data))\n\n\tvar ret Response\n\t_ = json.Unmarshal(data, &ret)\n\n\treturn ret\n}\n<commit_msg>change comments for interface methods<commit_after>\/*\n\tGo library for simple i-doit api usage\n\n\tCopyright (C) 2017 Carsten Seeger\n\n\tThis program is free software: you can redistribute it and\/or modify\n\tit under the terms of the GNU General Public License as published by\n\tthe Free Software Foundation, either version 3 of the License, or\n\t(at your option) any later version.\n\n\tThis program is distributed in the hope that it will be useful,\n\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\tGNU General Public License for more details.\n\n\tYou should have received a copy of the GNU General Public License\n\talong with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\n\t@author Carsten Seeger\n\t@copyright Copyright (C) 2017 Carsten Seeger\n\t@license http:\/\/www.gnu.org\/licenses\/gpl-3.0 GNU General Public License 3\n\t@link https:\/\/github.com\/cseeger-epages\/i-doit-go-api\n*\/\n\npackage goidoit\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n)\n\n\/\/ request id\nvar id int = 0\n\n\/\/ basic api interface\ntype ApiMethods interface {\n\t\/\/ i-doit api request structure\n\t\/\/ as defined in https:\/\/kb.i-doit.com\/pages\/viewpage.action?pageId=7831613\n\t\/\/ also there is a list of methods available\n\tRequest(string, interface{}) (Response, error)\n\n\t\/\/ search CMDB using a string\n\t\/\/\n\t\/\/ The search function does handle type assertions\n\t\/\/ for simple output usage\n\tSearch(string) (SearchResponse, error)\n\n\t\/\/ get object(s) data,\n\t\/\/ single id or slice of id's can be used\n\tGetObjectsByID([]int) (Response, error)\n\t\/*\n\t\tLogin()\n\t\tLogout()\n\t\tIsLoggedIn()\n\t*\/\n}\n\n\/\/ api struct used for implementing the apiMethods interface\ntype Api struct {\n\turl, apikey string\n}\n\ntype Request struct {\n\tVersion string `json:\"version\"`\n\tMethod string `json:\"method\"`\n\tParams interface{} `json:\"params\"`\n\tId int `json:\"id\"`\n}\n\n\/\/ i-doit api response structure\ntype Response struct {\n\tJsonrpc string `json:\"jsonrpc\"`\n\tResult interface{} `json:\"result\"`\n\tError IdoitError `json:\"error\"`\n}\n\n\/\/ i-doit api response structure used for search requests\n\/\/\n\/\/ the map is used to handle type assertions\ntype SearchResponse struct {\n\tJsonrpc string\n\tResult []map[string]interface{}\n\tError IdoitError\n}\n\n\/\/ i-doit api error structure\ntype IdoitError struct {\n\tCode int `json:\"code\"`\n\tMessage string `json:\"message\"`\n\tData interface{} `json:\"data\"`\n}\n\n\/\/ Apikey used for requests\ntype Apikey struct {\n\tApikey string `json:\"apikey\"`\n}\n\n\/\/ api constructor\nfunc Newapi(url string, apikey string) (*Api, error) {\n\tif len(url) != 0 && len(apikey) != 0 {\n\t\ta := Api{url, apikey}\n\t\treturn &a, nil\n\t}\n\treturn nil, errors.New(\"url or apikey empty\")\n}\n\nfunc (a Api) Request(method string, parameters interface{}) (Response, error) {\n\n\tvar params = GetParams(a, parameters)\n\tid = getID()\n\n\tdata := Request{\n\t\tVersion: \"2.0\",\n\t\tMethod: method,\n\t\tParams: params,\n\t\tId: id,\n\t}\n\n\tdataJson, err := json.Marshal(data)\n\n\t\/\/ logging tbd\n\t\/\/fmt.Println(\"Request: \", string(dataJson))\n\n\treq, err := http.NewRequest(\"POST\", a.url, bytes.NewBuffer(dataJson))\n\tif err != nil {\n\t\tfmt.Println(\"REQUEST ERROR: \", err)\n\t\treturn Response{}, err\n\t}\n\treq.Header.Add(\"content-type\", \"application\/json\")\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tfmt.Println(\"REQUEST ERROR: \", err)\n\t\treturn Response{}, err\n\t}\n\tvar ret = ParseResponse(resp)\n\treturn ret, nil\n}\n\nfunc (a *Api) Search(query string) (SearchResponse, error) {\n\tparams := struct {\n\t\tQuery string `json:\"q\"`\n\t}{query}\n\tdata, err := a.Request(\"idoit.search\", ¶ms)\n\tif err != nil {\n\t\treturn SearchResponse{}, err\n\t}\n\n\t\/\/ do type assertions for easy output handling\n\tret := SearchResponse{Jsonrpc: data.Jsonrpc, Error: data.Error}\n\n\tret.Error.Data = \"\"\n\tif data.Error.Data != nil {\n\t\tret.Error.Data = data.Error.Data.(string)\n\t}\n\n\tresults := data.Result.([]interface{})\n\tfor i := range results {\n\t\tret.Result = append(ret.Result, results[i].(map[string]interface{}))\n\t}\n\treturn ret, nil\n}\n\n\/\/ increment request id's\nfunc getID() int {\n\tid++\n\treturn id\n}\n\n\/\/ append nessesary parameters to user provided one\nfunc GetParams(a Api, parameters interface{}) interface{} {\n\n\tvar params map[string]string\n\tapikey := Apikey{a.apikey}\n\n\tjsonParameters, err := json.Marshal(parameters)\n\n\tif err != nil {\n\t\tlog.Fatal(\"JSON ERROR: \", err)\n\t}\n\n\tjson.Unmarshal(jsonParameters, ¶ms)\n\tjsonApikey, err := json.Marshal(apikey)\n\n\tif err != nil {\n\t\tlog.Fatal(\"JSON ERROR: \", err)\n\t}\n\n\tjson.Unmarshal(jsonApikey, ¶ms)\n\n\treturn params\n}\n\n\/\/ parse json response\nfunc ParseResponse(resp *http.Response) Response {\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Fatal(\"PARSING ERROR: \", err)\n\t}\n\n\t\/\/ logging tbd\n\t\/\/fmt.Println(\"Response: \", string(data))\n\n\tvar ret Response\n\t_ = json.Unmarshal(data, &ret)\n\n\treturn ret\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\t\"github.com\/exercism\/cli\/configuration\"\n)\n\nconst VERSION = \"1.6.0\"\nconst USER_AGENT = \"github.com\/exercism\/cli v\" + VERSION\n\nvar FetchEndpoints = map[string]string{\n\t\"current\": \"\/api\/v1\/user\/assignments\/current\",\n\t\"next\": \"\/api\/v1\/user\/assignments\/next\",\n\t\"restore\": \"\/api\/v1\/user\/assignments\/restore\",\n\t\"demo\": \"\/api\/v1\/assignments\/demo\",\n\t\"exercise\": \"\/api\/v1\/assignments\",\n}\n\ntype submitResponse struct {\n\tId string\n\tStatus string\n\tLanguage string\n\tExercise string\n\tSubmissionPath string `json:\"submission_path\"`\n\tError string\n}\n\ntype submitRequest struct {\n\tKey string `json:\"key\"`\n\tCode string `json:\"code\"`\n\tPath string `json:\"path\"`\n}\n\nfunc FetchAssignments(config configuration.Config, path string) (as []Assignment, err error) {\n\turl := fmt.Sprintf(\"%s%s?key=%s\", config.Hostname, path, config.ApiKey)\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error fetching assignments: [%v]\", err)\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error fetching assignments: [%v]\", err)\n\t\treturn\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tvar apiError struct {\n\t\t\tError string `json:\"error\"`\n\t\t}\n\t\terr = json.Unmarshal(body, &apiError)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Error parsing API response: [%v]\", err)\n\t\t\treturn\n\t\t}\n\n\t\terr = fmt.Errorf(\"Error fetching assignments. HTTP Status Code: %d\\n%s\", resp.StatusCode, apiError.Error)\n\t\treturn\n\t}\n\n\tvar fr struct {\n\t\tAssignments []Assignment\n\t}\n\n\terr = json.Unmarshal(body, &fr)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error parsing API response: [%v]\", err)\n\t\treturn\n\t}\n\n\treturn fr.Assignments, err\n}\n\nfunc UnsubmitAssignment(config configuration.Config) (r string, err error) {\n\tpath := \"api\/v1\/user\/assignments\"\n\n\turl := fmt.Sprintf(\"%s\/%s?key=%s\", config.Hostname, path, config.ApiKey)\n\n\treq, err := http.NewRequest(\"DELETE\", url, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treq.Header.Set(\"User-Agent\", USER_AGENT)\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error destroying submission: [%v]\", err)\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif resp.StatusCode != http.StatusNoContent {\n\n\t\tvar ur struct {\n\t\t\tError string\n\t\t}\n\n\t\terr = json.Unmarshal(body, &ur)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\terr = fmt.Errorf(\"Status: %d, Error: %v\", resp.StatusCode, ur.Error)\n\t\treturn ur.Error, err\n\t}\n\n\treturn\n}\nfunc SubmitAssignment(config configuration.Config, filePath string, code []byte) (r submitResponse, err error) {\n\tpath := \"api\/v1\/user\/assignments\"\n\n\turl := fmt.Sprintf(\"%s\/%s\", config.Hostname, path)\n\n\tsubmission := submitRequest{Key: config.ApiKey, Code: string(code), Path: filePath}\n\tsubmissionJson, err := json.Marshal(submission)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treq, err := http.NewRequest(\"POST\", url, bytes.NewReader(submissionJson))\n\tif err != nil {\n\t\treturn\n\t}\n\n\treq.Header.Set(\"User-Agent\", USER_AGENT)\n\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error posting assignment: [%v]\", err)\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif resp.StatusCode != http.StatusCreated {\n\t\terr = json.Unmarshal(body, &r)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\terr = fmt.Errorf(\"Status: %d, Error: %v\", resp.StatusCode, r)\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(body, &r)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error parsing API response: [%v]\", err)\n\t}\n\n\treturn\n}\n<commit_msg>Bump version<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\n\t\"github.com\/exercism\/cli\/configuration\"\n)\n\nconst VERSION = \"1.6.1\"\nconst USER_AGENT = \"github.com\/exercism\/cli v\" + VERSION\n\nvar FetchEndpoints = map[string]string{\n\t\"current\": \"\/api\/v1\/user\/assignments\/current\",\n\t\"next\": \"\/api\/v1\/user\/assignments\/next\",\n\t\"restore\": \"\/api\/v1\/user\/assignments\/restore\",\n\t\"demo\": \"\/api\/v1\/assignments\/demo\",\n\t\"exercise\": \"\/api\/v1\/assignments\",\n}\n\ntype submitResponse struct {\n\tId string\n\tStatus string\n\tLanguage string\n\tExercise string\n\tSubmissionPath string `json:\"submission_path\"`\n\tError string\n}\n\ntype submitRequest struct {\n\tKey string `json:\"key\"`\n\tCode string `json:\"code\"`\n\tPath string `json:\"path\"`\n}\n\nfunc FetchAssignments(config configuration.Config, path string) (as []Assignment, err error) {\n\turl := fmt.Sprintf(\"%s%s?key=%s\", config.Hostname, path, config.ApiKey)\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error fetching assignments: [%v]\", err)\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error fetching assignments: [%v]\", err)\n\t\treturn\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tvar apiError struct {\n\t\t\tError string `json:\"error\"`\n\t\t}\n\t\terr = json.Unmarshal(body, &apiError)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"Error parsing API response: [%v]\", err)\n\t\t\treturn\n\t\t}\n\n\t\terr = fmt.Errorf(\"Error fetching assignments. HTTP Status Code: %d\\n%s\", resp.StatusCode, apiError.Error)\n\t\treturn\n\t}\n\n\tvar fr struct {\n\t\tAssignments []Assignment\n\t}\n\n\terr = json.Unmarshal(body, &fr)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error parsing API response: [%v]\", err)\n\t\treturn\n\t}\n\n\treturn fr.Assignments, err\n}\n\nfunc UnsubmitAssignment(config configuration.Config) (r string, err error) {\n\tpath := \"api\/v1\/user\/assignments\"\n\n\turl := fmt.Sprintf(\"%s\/%s?key=%s\", config.Hostname, path, config.ApiKey)\n\n\treq, err := http.NewRequest(\"DELETE\", url, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treq.Header.Set(\"User-Agent\", USER_AGENT)\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error destroying submission: [%v]\", err)\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif resp.StatusCode != http.StatusNoContent {\n\n\t\tvar ur struct {\n\t\t\tError string\n\t\t}\n\n\t\terr = json.Unmarshal(body, &ur)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\terr = fmt.Errorf(\"Status: %d, Error: %v\", resp.StatusCode, ur.Error)\n\t\treturn ur.Error, err\n\t}\n\n\treturn\n}\nfunc SubmitAssignment(config configuration.Config, filePath string, code []byte) (r submitResponse, err error) {\n\tpath := \"api\/v1\/user\/assignments\"\n\n\turl := fmt.Sprintf(\"%s\/%s\", config.Hostname, path)\n\n\tsubmission := submitRequest{Key: config.ApiKey, Code: string(code), Path: filePath}\n\tsubmissionJson, err := json.Marshal(submission)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treq, err := http.NewRequest(\"POST\", url, bytes.NewReader(submissionJson))\n\tif err != nil {\n\t\treturn\n\t}\n\n\treq.Header.Set(\"User-Agent\", USER_AGENT)\n\treq.Header.Set(\"Content-Type\", \"application\/json\")\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error posting assignment: [%v]\", err)\n\t\treturn\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif resp.StatusCode != http.StatusCreated {\n\t\terr = json.Unmarshal(body, &r)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\terr = fmt.Errorf(\"Status: %d, Error: %v\", resp.StatusCode, r)\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(body, &r)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error parsing API response: [%v]\", err)\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package apidemic\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/pmylund\/go-cache\"\n)\n\n\/\/Version is the version of apidemic. Apidemic uses semver.\nconst Version = \"0.1\"\n\nvar maxItemTime = cache.DefaultExpiration\n\nvar store = func() *cache.Cache {\n\tc := cache.New(5*time.Minute, 30*time.Second)\n\treturn c\n}()\n\n\/\/API is the struct for the json object that is passed to apidemic for registration.\ntype API struct {\n\tEndpoint string `json:\"endpoint\"`\n\tPayload map[string]interface{} `json:\"payload\"`\n}\n\n\/\/ Home renders hopme page. It renders a json response with information about the service.\nfunc Home(w http.ResponseWriter, r *http.Request) {\n\tdetails := make(map[string]interface{})\n\tdetails[\"app_name\"] = \"ApiDemic\"\n\tdetails[\"version\"] = Version\n\tdetails[\"details\"] = \"Fake JSON API response\"\n\tRenderJSON(w, http.StatusOK, details)\n\treturn\n}\n\n\/\/ RenderJSON helder for rndering JSON response, it marshals value into json and writes\n\/\/ it into w.\nfunc RenderJSON(w http.ResponseWriter, code int, value interface{}) {\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\terr := json.NewEncoder(w).Encode(value)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\n\/\/RegisterEndpoint receives API objects and registers them. The payload from the request is\n\/\/ transformed into a self aware Value that is capable of faking its own attribute.\nfunc RegisterEndpoint(w http.ResponseWriter, r *http.Request) {\n\ta := API{}\n\terr := json.NewDecoder(r.Body).Decode(&a)\n\tif err != nil {\n\t\tRenderJSON(w, http.StatusInternalServerError, NewResponse(err.Error()))\n\t\treturn\n\t}\n\tif _, ok := store.Get(a.Endpoint); ok {\n\t\tRenderJSON(w, http.StatusOK, NewResponse(\"endpoint already taken\"))\n\t\treturn\n\t}\n\tobj := NewObject()\n\terr = obj.Load(a.Payload)\n\tif err != nil {\n\t\tRenderJSON(w, http.StatusInternalServerError, NewResponse(err.Error()))\n\t\treturn\n\t}\n\tstore.Set(a.Endpoint, obj, maxItemTime)\n\tRenderJSON(w, http.StatusOK, NewResponse(\"cool\"))\n}\n\n\/\/GetEndpoint renders registered endpoints.\nfunc GetEndpoint(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tendpoint := vars[\"endpoint\"]\n\tif eVal, ok := store.Get(endpoint); ok {\n\t\tRenderJSON(w, http.StatusOK, eVal)\n\t\treturn\n\t}\n\tRenderJSON(w, http.StatusNotFound, NewResponse(\"apidemic: \"+endpoint+\" is not found\"))\n}\n\n\/\/NewResponse helper for response JSON message\nfunc NewResponse(message string) interface{} {\n\treturn struct {\n\t\tText string `json:\"text\"`\n\t}{\n\t\tmessage,\n\t}\n}\n\n\/\/NewServer returns a new apidemic server\nfunc NewServer() *mux.Router {\n\tm := mux.NewRouter()\n\tm.HandleFunc(\"\/\", Home)\n\tm.HandleFunc(\"\/register\", RegisterEndpoint).Methods(\"POST\")\n\tm.HandleFunc(\"\/api\/{endpoint}\", GetEndpoint).Methods(\"GET\")\n\treturn m\n}\n<commit_msg>Bymp to version 0.2<commit_after>package apidemic\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/pmylund\/go-cache\"\n)\n\n\/\/Version is the version of apidemic. Apidemic uses semver.\nconst Version = \"0.2\"\n\nvar maxItemTime = cache.DefaultExpiration\n\nvar store = func() *cache.Cache {\n\tc := cache.New(5*time.Minute, 30*time.Second)\n\treturn c\n}()\n\n\/\/API is the struct for the json object that is passed to apidemic for registration.\ntype API struct {\n\tEndpoint string `json:\"endpoint\"`\n\tPayload map[string]interface{} `json:\"payload\"`\n}\n\n\/\/ Home renders hopme page. It renders a json response with information about the service.\nfunc Home(w http.ResponseWriter, r *http.Request) {\n\tdetails := make(map[string]interface{})\n\tdetails[\"app_name\"] = \"ApiDemic\"\n\tdetails[\"version\"] = Version\n\tdetails[\"details\"] = \"Fake JSON API response\"\n\tRenderJSON(w, http.StatusOK, details)\n\treturn\n}\n\n\/\/ RenderJSON helder for rndering JSON response, it marshals value into json and writes\n\/\/ it into w.\nfunc RenderJSON(w http.ResponseWriter, code int, value interface{}) {\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\terr := json.NewEncoder(w).Encode(value)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\n\/\/RegisterEndpoint receives API objects and registers them. The payload from the request is\n\/\/ transformed into a self aware Value that is capable of faking its own attribute.\nfunc RegisterEndpoint(w http.ResponseWriter, r *http.Request) {\n\ta := API{}\n\terr := json.NewDecoder(r.Body).Decode(&a)\n\tif err != nil {\n\t\tRenderJSON(w, http.StatusInternalServerError, NewResponse(err.Error()))\n\t\treturn\n\t}\n\tif _, ok := store.Get(a.Endpoint); ok {\n\t\tRenderJSON(w, http.StatusOK, NewResponse(\"endpoint already taken\"))\n\t\treturn\n\t}\n\tobj := NewObject()\n\terr = obj.Load(a.Payload)\n\tif err != nil {\n\t\tRenderJSON(w, http.StatusInternalServerError, NewResponse(err.Error()))\n\t\treturn\n\t}\n\tstore.Set(a.Endpoint, obj, maxItemTime)\n\tRenderJSON(w, http.StatusOK, NewResponse(\"cool\"))\n}\n\n\/\/GetEndpoint renders registered endpoints.\nfunc GetEndpoint(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tendpoint := vars[\"endpoint\"]\n\tif eVal, ok := store.Get(endpoint); ok {\n\t\tRenderJSON(w, http.StatusOK, eVal)\n\t\treturn\n\t}\n\tRenderJSON(w, http.StatusNotFound, NewResponse(\"apidemic: \"+endpoint+\" is not found\"))\n}\n\n\/\/NewResponse helper for response JSON message\nfunc NewResponse(message string) interface{} {\n\treturn struct {\n\t\tText string `json:\"text\"`\n\t}{\n\t\tmessage,\n\t}\n}\n\n\/\/NewServer returns a new apidemic server\nfunc NewServer() *mux.Router {\n\tm := mux.NewRouter()\n\tm.HandleFunc(\"\/\", Home)\n\tm.HandleFunc(\"\/register\", RegisterEndpoint).Methods(\"POST\")\n\tm.HandleFunc(\"\/api\/{endpoint}\", GetEndpoint).Methods(\"GET\")\n\treturn m\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package fbapi provides wrappers to access the Facebook API.\npackage fbapi\n\nimport (\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/nshah\/go.fburl\"\n\t\"github.com\/nshah\/go.httpcontrol\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst redactedStub = \"$1=-- XX -- REDACTED -- XX --\"\n\nvar (\n\tinsecureSSL = flag.Bool(\n\t\t\"fbapi.insecure\", false, \"Skip SSL certificate validation.\")\n\tredact = flag.Bool(\n\t\t\"fbapi.redact\",\n\t\ttrue,\n\t\t\"When true known sensitive information will be stripped from errors.\")\n\ttimeout = flag.Duration(\n\t\t\"fbapi.timeout\",\n\t\t5*time.Second,\n\t\t\"Timeout for http requests.\")\n\tmaxTries = flag.Uint(\n\t\t\"fbapi.max-tries\",\n\t\t3,\n\t\t\"Number of retries for known safe to retry calls.\")\n\tcleanURLRegExp = regexp.MustCompile(\"(access_token|client_secret)=([^&]*)\")\n\thttpClientCache *http.Client\n)\n\n\/\/ An Error from the API.\ntype Error struct {\n\tMessage string `json:\"message\"`\n\tType string `json:\"type\"`\n\tCode int `json:\"code\"`\n\tBody []byte\n}\n\n\/\/ Wrapper for \"error\"\ntype errorResponse struct {\n\tError Error `json:\"error\"`\n}\n\n\/\/ Represents a thing that wants to modify the url.Values.\ntype Values interface {\n\tSet(url.Values)\n}\n\n\/\/ Represents an \"access_token\" for the Facebook API.\ntype Token string\n\nconst (\n\tPublicToken = Token(\"\")\n)\n\n\/\/ Generic Page options for list type queries.\ntype Page struct {\n\tLimit int\n\tOffset int\n}\n\n\/\/ Set the corresponding values for the Page.\nfunc (page Page) Set(values url.Values) {\n\tif page.Limit != 0 {\n\t\tvalues.Set(\"limit\", strconv.Itoa(page.Limit))\n\t}\n\tif page.Offset != 0 {\n\t\tvalues.Set(\"offset\", strconv.Itoa(page.Offset))\n\t}\n}\n\n\/\/ A slice of field names.\ntype Fields []string\n\n\/\/ For selecting fields.\nfunc (fields Fields) Set(values url.Values) {\n\tif len(fields) > 0 {\n\t\tvalues.Set(\"fields\", strings.Join(fields, \",\"))\n\t}\n}\n\n\/\/ Set the token if necessary.\nfunc (token Token) Set(values url.Values) {\n\tif token != PublicToken {\n\t\tvalues.Set(\"access_token\", string(token))\n\t}\n}\n\n\/\/ String representation as defined by the error interface.\nfunc (e *Error) Error() string {\n\treturn fmt.Sprintf(\"API call failed with error body:\\n%s\", string(e.Body))\n}\n\n\/\/ Disable SSL cert, useful when debugging or hitting internal self-signed certs\nfunc httpClient() *http.Client {\n\tif httpClientCache == nil {\n\t\thttpClientCache = &http.Client{\n\t\t\tTransport: &httpcontrol.Control{\n\t\t\t\tTransport: &http.Transport{\n\t\t\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: *insecureSSL},\n\t\t\t\t},\n\t\t\t\tTimeout: *timeout,\n\t\t\t\tMaxTries: *maxTries,\n\t\t\t},\n\t\t}\n\t}\n\treturn httpClientCache\n}\n\n\/\/ remove known sensitive tokens from data\nfunc cleanURL(url string) string {\n\tif *redact {\n\t\treturn cleanURLRegExp.ReplaceAllString(url, redactedStub)\n\t}\n\treturn url\n}\n\n\/\/ Make a GET Graph API request and get the raw body byte slice.\nfunc GetRaw(path string, values url.Values) ([]byte, error) {\n\tconst phpRFC3339 = `Y-m-d\\TH:i:s\\Z`\n\tvalues.Set(\"date_format\", phpRFC3339)\n\tu := &fburl.URL{\n\t\tScheme: \"https\",\n\t\tSubDomain: fburl.DGraph,\n\t\tPath: path,\n\t\tValues: values,\n\t}\n\tresp, err := httpClient().Get(u.String())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"Request for URL %s failed with error %s.\", cleanURL(u.String()), err)\n\t}\n\tdefer resp.Body.Close()\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"Request for URL %s failed because body could not be read \"+\n\t\t\t\t\"with error %s.\",\n\t\t\tcleanURL(u.String()), err)\n\t}\n\tif resp.StatusCode > 399 || resp.StatusCode < 200 {\n\t\tapiError := &errorResponse{Error{Body: b}}\n\t\terr = json.Unmarshal(b, apiError)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\"Parsing error response failed with %s:\\n%s\", err, string(b))\n\t\t}\n\t\treturn nil, &apiError.Error\n\t}\n\treturn b, nil\n}\n\n\/\/ Make a GET Graph API request.\nfunc Get(result interface{}, path string, values ...Values) error {\n\tfinal := url.Values{}\n\tfor _, v := range values {\n\t\tv.Set(final)\n\t}\n\tb, err := GetRaw(path, final)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = json.Unmarshal(b, result)\n\tif err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"Request for path %s with response %s failed with \"+\n\t\t\t\t\"json.Unmarshal error %s.\",\n\t\t\tcleanURL(path), string(b), err)\n\t}\n\treturn nil\n}\n<commit_msg>reckless username change<commit_after>\/\/ Package fbapi provides wrappers to access the Facebook API.\npackage fbapi\n\nimport (\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/daaku\/go.fburl\"\n\t\"github.com\/daaku\/go.httpcontrol\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst redactedStub = \"$1=-- XX -- REDACTED -- XX --\"\n\nvar (\n\tinsecureSSL = flag.Bool(\n\t\t\"fbapi.insecure\", false, \"Skip SSL certificate validation.\")\n\tredact = flag.Bool(\n\t\t\"fbapi.redact\",\n\t\ttrue,\n\t\t\"When true known sensitive information will be stripped from errors.\")\n\ttimeout = flag.Duration(\n\t\t\"fbapi.timeout\",\n\t\t5*time.Second,\n\t\t\"Timeout for http requests.\")\n\tmaxTries = flag.Uint(\n\t\t\"fbapi.max-tries\",\n\t\t3,\n\t\t\"Number of retries for known safe to retry calls.\")\n\tcleanURLRegExp = regexp.MustCompile(\"(access_token|client_secret)=([^&]*)\")\n\thttpClientCache *http.Client\n)\n\n\/\/ An Error from the API.\ntype Error struct {\n\tMessage string `json:\"message\"`\n\tType string `json:\"type\"`\n\tCode int `json:\"code\"`\n\tBody []byte\n}\n\n\/\/ Wrapper for \"error\"\ntype errorResponse struct {\n\tError Error `json:\"error\"`\n}\n\n\/\/ Represents a thing that wants to modify the url.Values.\ntype Values interface {\n\tSet(url.Values)\n}\n\n\/\/ Represents an \"access_token\" for the Facebook API.\ntype Token string\n\nconst (\n\tPublicToken = Token(\"\")\n)\n\n\/\/ Generic Page options for list type queries.\ntype Page struct {\n\tLimit int\n\tOffset int\n}\n\n\/\/ Set the corresponding values for the Page.\nfunc (page Page) Set(values url.Values) {\n\tif page.Limit != 0 {\n\t\tvalues.Set(\"limit\", strconv.Itoa(page.Limit))\n\t}\n\tif page.Offset != 0 {\n\t\tvalues.Set(\"offset\", strconv.Itoa(page.Offset))\n\t}\n}\n\n\/\/ A slice of field names.\ntype Fields []string\n\n\/\/ For selecting fields.\nfunc (fields Fields) Set(values url.Values) {\n\tif len(fields) > 0 {\n\t\tvalues.Set(\"fields\", strings.Join(fields, \",\"))\n\t}\n}\n\n\/\/ Set the token if necessary.\nfunc (token Token) Set(values url.Values) {\n\tif token != PublicToken {\n\t\tvalues.Set(\"access_token\", string(token))\n\t}\n}\n\n\/\/ String representation as defined by the error interface.\nfunc (e *Error) Error() string {\n\treturn fmt.Sprintf(\"API call failed with error body:\\n%s\", string(e.Body))\n}\n\n\/\/ Disable SSL cert, useful when debugging or hitting internal self-signed certs\nfunc httpClient() *http.Client {\n\tif httpClientCache == nil {\n\t\thttpClientCache = &http.Client{\n\t\t\tTransport: &httpcontrol.Control{\n\t\t\t\tTransport: &http.Transport{\n\t\t\t\t\tProxy: http.ProxyFromEnvironment,\n\t\t\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: *insecureSSL},\n\t\t\t\t},\n\t\t\t\tTimeout: *timeout,\n\t\t\t\tMaxTries: *maxTries,\n\t\t\t},\n\t\t}\n\t}\n\treturn httpClientCache\n}\n\n\/\/ remove known sensitive tokens from data\nfunc cleanURL(url string) string {\n\tif *redact {\n\t\treturn cleanURLRegExp.ReplaceAllString(url, redactedStub)\n\t}\n\treturn url\n}\n\n\/\/ Make a GET Graph API request and get the raw body byte slice.\nfunc GetRaw(path string, values url.Values) ([]byte, error) {\n\tconst phpRFC3339 = `Y-m-d\\TH:i:s\\Z`\n\tvalues.Set(\"date_format\", phpRFC3339)\n\tu := &fburl.URL{\n\t\tScheme: \"https\",\n\t\tSubDomain: fburl.DGraph,\n\t\tPath: path,\n\t\tValues: values,\n\t}\n\tresp, err := httpClient().Get(u.String())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"Request for URL %s failed with error %s.\", cleanURL(u.String()), err)\n\t}\n\tdefer resp.Body.Close()\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"Request for URL %s failed because body could not be read \"+\n\t\t\t\t\"with error %s.\",\n\t\t\tcleanURL(u.String()), err)\n\t}\n\tif resp.StatusCode > 399 || resp.StatusCode < 200 {\n\t\tapiError := &errorResponse{Error{Body: b}}\n\t\terr = json.Unmarshal(b, apiError)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\"Parsing error response failed with %s:\\n%s\", err, string(b))\n\t\t}\n\t\treturn nil, &apiError.Error\n\t}\n\treturn b, nil\n}\n\n\/\/ Make a GET Graph API request.\nfunc Get(result interface{}, path string, values ...Values) error {\n\tfinal := url.Values{}\n\tfor _, v := range values {\n\t\tv.Set(final)\n\t}\n\tb, err := GetRaw(path, final)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = json.Unmarshal(b, result)\n\tif err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"Request for path %s with response %s failed with \"+\n\t\t\t\t\"json.Unmarshal error %s.\",\n\t\t\tcleanURL(path), string(b), err)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"net\"\n\t\"strings\"\n)\n\n\/\/ Returns client authentication token from header or url params\nfunc getClientToken(c *gin.Context) string {\n\t\/\/ Try fetching auth token from headers first\n\ttoken := c.Request.Header.Get(\"Token\")\n\n\t\/\/ Try to fetch from url param if blank\n\tif token == \"\" {\n\t\tif len(c.Request.URL.Query()[\"token\"]) > 0 {\n\t\t\ttoken = c.Request.URL.Query()[\"token\"][0]\n\t\t}\n\t}\n\n\treturn token\n}\n\n\/\/ Returns a list of all available services\nfunc renderAvailableServices(c *gin.Context) {\n\t\/\/ Check if authentication is enabled\n\tif options.Auth {\n\t\ttoken := getClientToken(c)\n\n\t\tif token != options.Token {\n\t\t\tc.String(401, \"Invalid token\")\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\t}\n\n\tnames := []string{}\n\tfor _, svc := range services {\n\t\tnames = append(names, svc.Name)\n\t}\n\n\tc.String(200, strings.Join(names, \"\\n\")+\"\\n\")\n}\n\n\/\/ Returns a list of all service environments\nfunc renderServiceEnvironments(c *gin.Context) {\n\t\/\/ Check if authentication is enabled\n\tif options.Auth {\n\t\ttoken := getClientToken(c)\n\n\t\tif token != options.Token {\n\t\t\tc.String(401, \"Invalid token\")\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\t}\n\n\tserviceName := c.Params.ByName(\"service\")\n\tservice, err := getService(serviceName)\n\n\tif err != nil {\n\t\tc.String(400, err.Error()+\"\\n\")\n\t\treturn\n\t}\n\n\tnames := strings.Join(service.EnvironmentNames(), \"\\n\") + \"\\n\"\n\tc.String(200, names)\n}\n\nfunc renderServiceEnvironment(c *gin.Context) {\n\tserviceName := c.Params.ByName(\"service\")\n\tenvName := c.Params.ByName(\"env\")\n\n\tenvironment, err := getEnvironment(serviceName, envName)\n\n\t\/\/ Respond with 400 if service does not exist\n\t\/\/ Todo: maybe respond with 404\n\tif err != nil {\n\t\tc.String(400, err.Error()+\"\\n\")\n\t\treturn\n\t}\n\n\t\/\/ Get remote IP address\n\thost, _, err := net.SplitHostPort(c.Request.RemoteAddr)\n\n\tif err != nil {\n\t\tc.String(400, err.Error()+\"\\n\")\n\t\treturn\n\t}\n\n\t\/\/ Check if environment has allowed hosts. Localhost is allowed.\n\tif host != \"::1\" && len(environment.Hosts) > 0 {\n\t\t\/\/ Reject requests from non-whitelisted hosts\n\t\tif environment.HostEnabled(host) == false {\n\t\t\tc.String(401, \"Restricted\\n\")\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Fetch token from url param or from the header\n\ttoken := getClientToken(c)\n\n\t\/\/ Validate environment token if its set, otherwise check agains global token\n\tif environment.Token != \"\" {\n\t\tif token != environment.Token {\n\t\t\tc.String(401, \"Restricted\\n\")\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif options.Auth && token != options.Token {\n\t\t\tc.String(401, \"Restricted\\n\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tc.String(200, environment.ToString()+\"\\n\")\n}\n\nfunc startServer() {\n\thost := fmt.Sprintf(\"%s:%d\", options.Host, options.Port)\n\tapi := gin.Default()\n\n\tapi.GET(\"\/\", renderAvailableServices)\n\tapi.GET(\"\/:service\", renderServiceEnvironments)\n\tapi.GET(\"\/:service\/:env\", renderServiceEnvironment)\n\n\tif options.Auth {\n\t\tfmt.Println(\"authentication enabled\")\n\t}\n\n\tfmt.Println(\"starting server on\", host)\n\tapi.Run(host)\n}\n<commit_msg>Go fmt<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"strings\"\n\n\t\"github.com\/gin-gonic\/gin\"\n)\n\n\/\/ Returns client authentication token from header or url params\nfunc getClientToken(c *gin.Context) string {\n\t\/\/ Try fetching auth token from headers first\n\ttoken := c.Request.Header.Get(\"Token\")\n\n\t\/\/ Try to fetch from url param if blank\n\tif token == \"\" {\n\t\tif len(c.Request.URL.Query()[\"token\"]) > 0 {\n\t\t\ttoken = c.Request.URL.Query()[\"token\"][0]\n\t\t}\n\t}\n\n\treturn token\n}\n\n\/\/ Returns a list of all available services\nfunc renderAvailableServices(c *gin.Context) {\n\t\/\/ Check if authentication is enabled\n\tif options.Auth {\n\t\ttoken := getClientToken(c)\n\n\t\tif token != options.Token {\n\t\t\tc.String(401, \"Invalid token\")\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\t}\n\n\tnames := []string{}\n\tfor _, svc := range services {\n\t\tnames = append(names, svc.Name)\n\t}\n\n\tc.String(200, strings.Join(names, \"\\n\")+\"\\n\")\n}\n\n\/\/ Returns a list of all service environments\nfunc renderServiceEnvironments(c *gin.Context) {\n\t\/\/ Check if authentication is enabled\n\tif options.Auth {\n\t\ttoken := getClientToken(c)\n\n\t\tif token != options.Token {\n\t\t\tc.String(401, \"Invalid token\")\n\t\t\tc.Abort()\n\t\t\treturn\n\t\t}\n\t}\n\n\tserviceName := c.Params.ByName(\"service\")\n\tservice, err := getService(serviceName)\n\n\tif err != nil {\n\t\tc.String(400, err.Error()+\"\\n\")\n\t\treturn\n\t}\n\n\tnames := strings.Join(service.EnvironmentNames(), \"\\n\") + \"\\n\"\n\tc.String(200, names)\n}\n\nfunc renderServiceEnvironment(c *gin.Context) {\n\tserviceName := c.Params.ByName(\"service\")\n\tenvName := c.Params.ByName(\"env\")\n\n\tenvironment, err := getEnvironment(serviceName, envName)\n\n\t\/\/ Respond with 400 if service does not exist\n\t\/\/ Todo: maybe respond with 404\n\tif err != nil {\n\t\tc.String(400, err.Error()+\"\\n\")\n\t\treturn\n\t}\n\n\t\/\/ Get remote IP address\n\thost, _, err := net.SplitHostPort(c.Request.RemoteAddr)\n\n\tif err != nil {\n\t\tc.String(400, err.Error()+\"\\n\")\n\t\treturn\n\t}\n\n\t\/\/ Check if environment has allowed hosts. Localhost is allowed.\n\tif host != \"::1\" && len(environment.Hosts) > 0 {\n\t\t\/\/ Reject requests from non-whitelisted hosts\n\t\tif environment.HostEnabled(host) == false {\n\t\t\tc.String(401, \"Restricted\\n\")\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Fetch token from url param or from the header\n\ttoken := getClientToken(c)\n\n\t\/\/ Validate environment token if its set, otherwise check agains global token\n\tif environment.Token != \"\" {\n\t\tif token != environment.Token {\n\t\t\tc.String(401, \"Restricted\\n\")\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif options.Auth && token != options.Token {\n\t\t\tc.String(401, \"Restricted\\n\")\n\t\t\treturn\n\t\t}\n\t}\n\n\tc.String(200, environment.ToString()+\"\\n\")\n}\n\nfunc startServer() {\n\thost := fmt.Sprintf(\"%s:%d\", options.Host, options.Port)\n\tapi := gin.Default()\n\n\tapi.GET(\"\/\", renderAvailableServices)\n\tapi.GET(\"\/:service\", renderServiceEnvironments)\n\tapi.GET(\"\/:service\/:env\", renderServiceEnvironment)\n\n\tif options.Auth {\n\t\tfmt.Println(\"authentication enabled\")\n\t}\n\n\tfmt.Println(\"starting server on\", host)\n\tapi.Run(host)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ vim: ts=8 sw=8 noet ai\n\npackage perigee\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\n\/\/ The UnexpectedResponseCodeError structure represents a mismatch in understanding between server and client in terms of response codes.\n\/\/ Most often, this is due to an actual error condition (e.g., getting a 404 for a resource when you expect a 200).\n\/\/ However, it needn't always be the case (e.g., getting a 204 (No Content) response back when a 200 is expected).\ntype UnexpectedResponseCodeError struct {\n\tUrl string\n\tExpected []int\n\tActual int\n\tBody []byte\n}\n\nfunc (err *UnexpectedResponseCodeError) Error() string {\n\treturn fmt.Sprintf(\"Expected HTTP response code %d when accessing URL(%s); got %d instead with the following body:\\n%s\", err.Expected, err.Url, err.Actual, string(err.Body))\n}\n\n\/\/ Request issues an HTTP request, marshaling parameters, and unmarshaling results, as configured in the provided Options parameter.\n\/\/ The Response structure returned, if any, will include accumulated results recovered from the HTTP server.\n\/\/ See the Response structure for more details.\nfunc Request(method string, url string, opts Options) (*Response, error) {\n\tvar body io.Reader\n\tvar response Response\n\n\tclient := opts.CustomClient\n\tif client == nil {\n\t\tclient = new(http.Client)\n\t}\n\n\tcontentType := opts.ContentType\n\tif contentType == \"\" {\n\t\tcontentType = \"application\/json\"\n\t}\n\n\taccept := opts.Accept\n\tif accept == \"\" {\n\t\taccept = \"application\/json\"\n\t}\n\n\tbody = nil\n\tif opts.ReqBody != nil {\n\t\tif contentType == \"application\/json\" {\n\t\t\tbodyText, err := json.Marshal(opts.ReqBody)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbody = strings.NewReader(string(bodyText))\n\t\t\tif opts.DumpReqJson {\n\t\t\t\tlog.Printf(\"Making request:\\n%#v\\n\", string(bodyText))\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ assume opts.ReqBody implements the correct interface\n\t\t\tbody = opts.ReqBody.(io.Reader)\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(method, url, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(\"Content-Type\", contentType)\n\treq.Header.Add(\"Accept\", accept)\n\n\tif opts.ContentLength > 0 {\n\t\treq.ContentLength = opts.ContentLength\n\t\treq.Header.Add(\"Content-Length\", string(opts.ContentLength))\n\t}\n\n\tif opts.MoreHeaders != nil {\n\t\tfor k, v := range opts.MoreHeaders {\n\t\t\treq.Header.Add(k, v)\n\t\t}\n\t}\n\n\tif opts.SetHeaders != nil {\n\t\terr = opts.SetHeaders(req)\n\t\tif err != nil {\n\t\t\treturn &response, err\n\t\t}\n\t}\n\n\thttpResponse, err := client.Do(req)\n\tif httpResponse != nil {\n\t\tresponse.HttpResponse = *httpResponse\n\t\tresponse.StatusCode = httpResponse.StatusCode\n\t}\n\n\tif err != nil {\n\t\treturn &response, err\n\t}\n\t\/\/ This if-statement is legacy code, preserved for backward compatibility.\n\tif opts.StatusCode != nil {\n\t\t*opts.StatusCode = httpResponse.StatusCode\n\t}\n\n\tacceptableResponseCodes := opts.OkCodes\n\tif len(acceptableResponseCodes) != 0 {\n\t\tif not_in(httpResponse.StatusCode, acceptableResponseCodes) {\n\t\t\tb, _ := ioutil.ReadAll(httpResponse.Body)\n\t\t\thttpResponse.Body.Close()\n\t\t\treturn &response, &UnexpectedResponseCodeError{\n\t\t\t\tUrl: url,\n\t\t\t\tExpected: acceptableResponseCodes,\n\t\t\t\tActual: httpResponse.StatusCode,\n\t\t\t\tBody: b,\n\t\t\t}\n\t\t}\n\t}\n\tif opts.Results != nil {\n\t\tdefer httpResponse.Body.Close()\n\t\tjsonResult, err := ioutil.ReadAll(httpResponse.Body)\n\t\tresponse.JsonResult = jsonResult\n\t\tif err != nil {\n\t\t\treturn &response, err\n\t\t}\n\n\t\terr = json.Unmarshal(jsonResult, opts.Results)\n\t\t\/\/ This if-statement is legacy code, preserved for backward compatibility.\n\t\tif opts.ResponseJson != nil {\n\t\t\t*opts.ResponseJson = jsonResult\n\t\t}\n\t}\n\treturn &response, err\n}\n\n\/\/ not_in returns false if, and only if, the provided needle is _not_\n\/\/ in the given set of integers.\nfunc not_in(needle int, haystack []int) bool {\n\tfor _, straw := range haystack {\n\t\tif needle == straw {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ Post makes a POST request against a server using the provided HTTP client.\n\/\/ The url must be a fully-formed URL string.\n\/\/ DEPRECATED. Use Request() instead.\nfunc Post(url string, opts Options) error {\n\tr, err := Request(\"POST\", url, opts)\n\tif opts.Response != nil {\n\t\t*opts.Response = r\n\t}\n\treturn err\n}\n\n\/\/ Get makes a GET request against a server using the provided HTTP client.\n\/\/ The url must be a fully-formed URL string.\n\/\/ DEPRECATED. Use Request() instead.\nfunc Get(url string, opts Options) error {\n\tr, err := Request(\"GET\", url, opts)\n\tif opts.Response != nil {\n\t\t*opts.Response = r\n\t}\n\treturn err\n}\n\n\/\/ Delete makes a DELETE request against a server using the provided HTTP client.\n\/\/ The url must be a fully-formed URL string.\n\/\/ DEPRECATED. Use Request() instead.\nfunc Delete(url string, opts Options) error {\n\tr, err := Request(\"DELETE\", url, opts)\n\tif opts.Response != nil {\n\t\t*opts.Response = r\n\t}\n\treturn err\n}\n\n\/\/ Put makes a PUT request against a server using the provided HTTP client.\n\/\/ The url must be a fully-formed URL string.\n\/\/ DEPRECATED. Use Request() instead.\nfunc Put(url string, opts Options) error {\n\tr, err := Request(\"PUT\", url, opts)\n\tif opts.Response != nil {\n\t\t*opts.Response = r\n\t}\n\treturn err\n}\n\n\/\/ Options describes a set of optional parameters to the various request calls.\n\/\/\n\/\/ The custom client can be used for a variety of purposes beyond selecting encrypted versus unencrypted channels.\n\/\/ Transports can be defined to provide augmented logging, header manipulation, et. al.\n\/\/\n\/\/ If the ReqBody field is provided, it will be embedded as a JSON object.\n\/\/ Otherwise, provide nil.\n\/\/\n\/\/ If JSON output is to be expected from the response,\n\/\/ provide either a pointer to the container structure in Results,\n\/\/ or a pointer to a nil-initialized pointer variable.\n\/\/ The latter method will cause the unmarshaller to allocate the container type for you.\n\/\/ If no response is expected, provide a nil Results value.\n\/\/\n\/\/ The MoreHeaders map, if non-nil or empty, provides a set of headers to add to those\n\/\/ already present in the request. At present, only Accepted and Content-Type are set\n\/\/ by default.\n\/\/\n\/\/ OkCodes provides a set of acceptable, positive responses.\n\/\/\n\/\/ If provided, StatusCode specifies a pointer to an integer, which will receive the\n\/\/ returned HTTP status code, successful or not. DEPRECATED; use the Response.StatusCode field instead for new software.\n\/\/\n\/\/ ResponseJson, if specified, provides a means for returning the raw JSON. This is\n\/\/ most useful for diagnostics. DEPRECATED; use the Response.JsonResult field instead for new software.\n\/\/\n\/\/ DumpReqJson, if set to true, will cause the request to appear to stdout for debugging purposes.\n\/\/ This attribute may be removed at any time in the future; DO NOT use this attribute in production software.\n\/\/\n\/\/ Response, if set, provides a way to communicate the complete set of HTTP response, raw JSON, status code, and\n\/\/ other useful attributes back to the caller. Note that the Request() method returns a Response structure as part\n\/\/ of its public interface; you don't need to set the Response field here to use this structure. The Response field\n\/\/ exists primarily for legacy or deprecated functions.\n\/\/\n\/\/ SetHeaders allows the caller to provide code to set any custom headers programmatically. Typically, this\n\/\/ facility can invoke, e.g., SetBasicAuth() on the request to easily set up authentication.\n\/\/ Any error generated will terminate the request and will propegate back to the caller.\ntype Options struct {\n\tCustomClient *http.Client\n\tReqBody interface{}\n\tResults interface{}\n\tMoreHeaders map[string]string\n\tOkCodes []int\n\tStatusCode *int `DEPRECATED`\n\tDumpReqJson bool `UNSUPPORTED`\n\tResponseJson *[]byte `DEPRECATED`\n\tResponse **Response\n\tContentType string `json:\"Content-Type,omitempty\"`\n\tContentLength int64 `json:\"Content-Length,omitempty\"`\n\tAccept string `json:\"Accept,omitempty\"`\n\tSetHeaders func(r *http.Request) error\n}\n\n\/\/ Response contains return values from the various request calls.\n\/\/\n\/\/ HttpResponse will return the http response from the request call.\n\/\/ Note: HttpResponse.Body is always closed and will not be available from this return value.\n\/\/\n\/\/ StatusCode specifies the returned HTTP status code, successful or not.\n\/\/\n\/\/ If Results is specified in the Options:\n\/\/ - JsonResult will contain the raw return from the request call\n\/\/ This is most useful for diagnostics.\n\/\/ - Result will contain the unmarshalled json either in the Result passed in\n\/\/ or the unmarshaller will allocate the container type for you.\n\ntype Response struct {\n\tHttpResponse http.Response\n\tJsonResult []byte\n\tResults interface{}\n\tStatusCode int\n}\n<commit_msg>Ensuring that bodiless requests aren't sent with Content or Accept headers<commit_after>\/\/ vim: ts=8 sw=8 noet ai\n\npackage perigee\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\n\/\/ The UnexpectedResponseCodeError structure represents a mismatch in understanding between server and client in terms of response codes.\n\/\/ Most often, this is due to an actual error condition (e.g., getting a 404 for a resource when you expect a 200).\n\/\/ However, it needn't always be the case (e.g., getting a 204 (No Content) response back when a 200 is expected).\ntype UnexpectedResponseCodeError struct {\n\tUrl string\n\tExpected []int\n\tActual int\n\tBody []byte\n}\n\nfunc (err *UnexpectedResponseCodeError) Error() string {\n\treturn fmt.Sprintf(\"Expected HTTP response code %d when accessing URL(%s); got %d instead with the following body:\\n%s\", err.Expected, err.Url, err.Actual, string(err.Body))\n}\n\n\/\/ Request issues an HTTP request, marshaling parameters, and unmarshaling results, as configured in the provided Options parameter.\n\/\/ The Response structure returned, if any, will include accumulated results recovered from the HTTP server.\n\/\/ See the Response structure for more details.\nfunc Request(method string, url string, opts Options) (*Response, error) {\n\tvar body io.Reader\n\tvar response Response\n\n\tclient := opts.CustomClient\n\tif client == nil {\n\t\tclient = new(http.Client)\n\t}\n\n\tcontentType := opts.ContentType\n\taccept := opts.Accept\n\n\tbody = nil\n\tif opts.ReqBody != nil {\n\t\tif contentType == \"\" {\n\t\t\tcontentType = \"application\/json\"\n\t\t}\n\t\tif accept == \"\" {\n\t\t\taccept = \"application\/json\"\n\t\t}\n\n\t\tif contentType == \"application\/json\" {\n\t\t\tbodyText, err := json.Marshal(opts.ReqBody)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbody = strings.NewReader(string(bodyText))\n\t\t\tif opts.DumpReqJson {\n\t\t\t\tlog.Printf(\"Making request:\\n%#v\\n\", string(bodyText))\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ assume opts.ReqBody implements the correct interface\n\t\t\tbody = opts.ReqBody.(io.Reader)\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(method, url, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif contentType != \"\" {\n\t\treq.Header.Add(\"Content-Type\", contentType)\n\t}\n\n\tif accept != \"\" {\n\t\treq.Header.Add(\"Accept\", accept)\n\t}\n\n\tif opts.ContentLength > 0 {\n\t\treq.ContentLength = opts.ContentLength\n\t\treq.Header.Add(\"Content-Length\", string(opts.ContentLength))\n\t}\n\n\tif opts.MoreHeaders != nil {\n\t\tfor k, v := range opts.MoreHeaders {\n\t\t\treq.Header.Add(k, v)\n\t\t}\n\t}\n\n\tif opts.SetHeaders != nil {\n\t\terr = opts.SetHeaders(req)\n\t\tif err != nil {\n\t\t\treturn &response, err\n\t\t}\n\t}\n\n\thttpResponse, err := client.Do(req)\n\tif httpResponse != nil {\n\t\tresponse.HttpResponse = *httpResponse\n\t\tresponse.StatusCode = httpResponse.StatusCode\n\t}\n\n\tif err != nil {\n\t\treturn &response, err\n\t}\n\t\/\/ This if-statement is legacy code, preserved for backward compatibility.\n\tif opts.StatusCode != nil {\n\t\t*opts.StatusCode = httpResponse.StatusCode\n\t}\n\n\tacceptableResponseCodes := opts.OkCodes\n\tif len(acceptableResponseCodes) != 0 {\n\t\tif not_in(httpResponse.StatusCode, acceptableResponseCodes) {\n\t\t\tb, _ := ioutil.ReadAll(httpResponse.Body)\n\t\t\thttpResponse.Body.Close()\n\t\t\treturn &response, &UnexpectedResponseCodeError{\n\t\t\t\tUrl: url,\n\t\t\t\tExpected: acceptableResponseCodes,\n\t\t\t\tActual: httpResponse.StatusCode,\n\t\t\t\tBody: b,\n\t\t\t}\n\t\t}\n\t}\n\tif opts.Results != nil {\n\t\tdefer httpResponse.Body.Close()\n\t\tjsonResult, err := ioutil.ReadAll(httpResponse.Body)\n\t\tresponse.JsonResult = jsonResult\n\t\tif err != nil {\n\t\t\treturn &response, err\n\t\t}\n\n\t\terr = json.Unmarshal(jsonResult, opts.Results)\n\t\t\/\/ This if-statement is legacy code, preserved for backward compatibility.\n\t\tif opts.ResponseJson != nil {\n\t\t\t*opts.ResponseJson = jsonResult\n\t\t}\n\t}\n\treturn &response, err\n}\n\n\/\/ not_in returns false if, and only if, the provided needle is _not_\n\/\/ in the given set of integers.\nfunc not_in(needle int, haystack []int) bool {\n\tfor _, straw := range haystack {\n\t\tif needle == straw {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ Post makes a POST request against a server using the provided HTTP client.\n\/\/ The url must be a fully-formed URL string.\n\/\/ DEPRECATED. Use Request() instead.\nfunc Post(url string, opts Options) error {\n\tr, err := Request(\"POST\", url, opts)\n\tif opts.Response != nil {\n\t\t*opts.Response = r\n\t}\n\treturn err\n}\n\n\/\/ Get makes a GET request against a server using the provided HTTP client.\n\/\/ The url must be a fully-formed URL string.\n\/\/ DEPRECATED. Use Request() instead.\nfunc Get(url string, opts Options) error {\n\tr, err := Request(\"GET\", url, opts)\n\tif opts.Response != nil {\n\t\t*opts.Response = r\n\t}\n\treturn err\n}\n\n\/\/ Delete makes a DELETE request against a server using the provided HTTP client.\n\/\/ The url must be a fully-formed URL string.\n\/\/ DEPRECATED. Use Request() instead.\nfunc Delete(url string, opts Options) error {\n\tr, err := Request(\"DELETE\", url, opts)\n\tif opts.Response != nil {\n\t\t*opts.Response = r\n\t}\n\treturn err\n}\n\n\/\/ Put makes a PUT request against a server using the provided HTTP client.\n\/\/ The url must be a fully-formed URL string.\n\/\/ DEPRECATED. Use Request() instead.\nfunc Put(url string, opts Options) error {\n\tr, err := Request(\"PUT\", url, opts)\n\tif opts.Response != nil {\n\t\t*opts.Response = r\n\t}\n\treturn err\n}\n\n\/\/ Options describes a set of optional parameters to the various request calls.\n\/\/\n\/\/ The custom client can be used for a variety of purposes beyond selecting encrypted versus unencrypted channels.\n\/\/ Transports can be defined to provide augmented logging, header manipulation, et. al.\n\/\/\n\/\/ If the ReqBody field is provided, it will be embedded as a JSON object.\n\/\/ Otherwise, provide nil.\n\/\/\n\/\/ If JSON output is to be expected from the response,\n\/\/ provide either a pointer to the container structure in Results,\n\/\/ or a pointer to a nil-initialized pointer variable.\n\/\/ The latter method will cause the unmarshaller to allocate the container type for you.\n\/\/ If no response is expected, provide a nil Results value.\n\/\/\n\/\/ The MoreHeaders map, if non-nil or empty, provides a set of headers to add to those\n\/\/ already present in the request. At present, only Accepted and Content-Type are set\n\/\/ by default.\n\/\/\n\/\/ OkCodes provides a set of acceptable, positive responses.\n\/\/\n\/\/ If provided, StatusCode specifies a pointer to an integer, which will receive the\n\/\/ returned HTTP status code, successful or not. DEPRECATED; use the Response.StatusCode field instead for new software.\n\/\/\n\/\/ ResponseJson, if specified, provides a means for returning the raw JSON. This is\n\/\/ most useful for diagnostics. DEPRECATED; use the Response.JsonResult field instead for new software.\n\/\/\n\/\/ DumpReqJson, if set to true, will cause the request to appear to stdout for debugging purposes.\n\/\/ This attribute may be removed at any time in the future; DO NOT use this attribute in production software.\n\/\/\n\/\/ Response, if set, provides a way to communicate the complete set of HTTP response, raw JSON, status code, and\n\/\/ other useful attributes back to the caller. Note that the Request() method returns a Response structure as part\n\/\/ of its public interface; you don't need to set the Response field here to use this structure. The Response field\n\/\/ exists primarily for legacy or deprecated functions.\n\/\/\n\/\/ SetHeaders allows the caller to provide code to set any custom headers programmatically. Typically, this\n\/\/ facility can invoke, e.g., SetBasicAuth() on the request to easily set up authentication.\n\/\/ Any error generated will terminate the request and will propegate back to the caller.\ntype Options struct {\n\tCustomClient *http.Client\n\tReqBody interface{}\n\tResults interface{}\n\tMoreHeaders map[string]string\n\tOkCodes []int\n\tStatusCode *int `DEPRECATED`\n\tDumpReqJson bool `UNSUPPORTED`\n\tResponseJson *[]byte `DEPRECATED`\n\tResponse **Response\n\tContentType string `json:\"Content-Type,omitempty\"`\n\tContentLength int64 `json:\"Content-Length,omitempty\"`\n\tAccept string `json:\"Accept,omitempty\"`\n\tSetHeaders func(r *http.Request) error\n}\n\n\/\/ Response contains return values from the various request calls.\n\/\/\n\/\/ HttpResponse will return the http response from the request call.\n\/\/ Note: HttpResponse.Body is always closed and will not be available from this return value.\n\/\/\n\/\/ StatusCode specifies the returned HTTP status code, successful or not.\n\/\/\n\/\/ If Results is specified in the Options:\n\/\/ - JsonResult will contain the raw return from the request call\n\/\/ This is most useful for diagnostics.\n\/\/ - Result will contain the unmarshalled json either in the Result passed in\n\/\/ or the unmarshaller will allocate the container type for you.\n\ntype Response struct {\n\tHttpResponse http.Response\n\tJsonResult []byte\n\tResults interface{}\n\tStatusCode int\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Don't pass SinceSeconds as this seems to cause logs to sometimes not appear.<commit_after><|endoftext|>"} {"text":"<commit_before>package app\n\nimport (\n\t\"bytes\"\n\t\"image\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"os\"\n\n\t_ \"image\/jpeg\"\n\t_ \"image\/png\"\n\n\tfb \"github.com\/huandu\/facebook\"\n\n\t\"appengine\"\n\t\"appengine\/datastore\"\n\t\"appengine\/urlfetch\"\n)\n\nvar FbApp = fb.New(\"526791527487217\", \"e314e5fc761425d59ea9e2666c63e108\")\nvar aboutParams = fb.Params{\n\t\"method\": fb.GET,\n\t\"relative_url\": \"me\",\n\t\"fields\": \"name,email,gender,age_range,hometown\",\n}\n\nvar photoParams = fb.Params{\n\t\"method\": fb.GET,\n\t\"relative_url\": \"me\/picture?width=320&height=320&redirect=false\",\n}\n\nfunc init() {\n\thttp.HandleFunc(\"\/static\/\", StaticHandler)\n\thttp.HandleFunc(\"\/\", MainHandler)\n\n\tfb.Debug = fb.DEBUG_ALL\n\tFbApp.EnableAppsecretProof = true\n}\n\nfunc StaticHandler(w http.ResponseWriter, r *http.Request) {\n\tpath := \".\" + r.URL.Path\n\n\tif f, err := os.Stat(path); err == nil && !f.IsDir() {\n\t\thttp.ServeFile(w, r, path)\n\t\treturn\n\t}\n\n\thttp.NotFound(w, r)\n}\n\nfunc MainHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch {\n\tcase r.URL.Path != \"\/\":\n\t\thttp.NotFound(w, r)\n\tcase r.Method == \"GET\":\n\t\thandleGet(w, r)\n\tcase r.Method == \"POST\":\n\t\thandlePost(w, r)\n\t}\n\n\treturn\n}\n\nfunc handleGet(w http.ResponseWriter, r *http.Request) {\n\tcontext := appengine.NewContext(r)\n\tdata, err := ioutil.ReadFile(\"index.html\")\n\tcheck(err, context)\n\tw.Write(data)\n}\n\ntype Log struct {\n\tName string\n\tGender string\n\tParty string\n\tEmail string\n\tAgeRange string\n\tHometown string\n}\n\nfunc handlePost(w http.ResponseWriter, r *http.Request) {\n\tcontext := appengine.NewContext(r)\n\n\tr.ParseForm()\n\taccess_token := r.FormValue(\"access_token\")\n\tparty := r.FormValue(\"party\")\n\tcontext.Infof(\"party = %s\", party)\n\n\tsession := FbApp.Session(access_token)\n\tsession.HttpClient = urlfetch.Client(context)\n\terr := session.Validate()\n\tcheck(err, context)\n\n\tresults, err := session.BatchApi(aboutParams, photoParams)\n\tcheck(err, context)\n\n\taboutBatch, err := results[0].Batch()\n\tcheck(err, context)\n\tphotoBatch, err := results[1].Batch()\n\tcheck(err, context)\n\n\taboutResp := aboutBatch.Result\n\tphotoResp := photoBatch.Result\n\n\tSaveAboutUser(&aboutResp, context)\n\tprofilePicture := GetUserPhoto(&photoResp, context)\n\n\timagebytes := addLogo(profilePicture, party, context)\n\tform, mime := CreateImageForm(imagebytes, context, r.Host)\n\n\turl := \"https:\/\/graph.facebook.com\/me\/photos\" +\n\t\t\"?access_token=\" + access_token +\n\t\t\"&appsecret_proof=\" + session.AppsecretProof()\n\t\/\/+ \"&no_story=true\"\n\n\tuploadResquest, _ := http.NewRequest(\"POST\", url, &form)\n\tuploadResquest.Header.Set(\"Content-Type\", mime)\n\tuploadResp, _ := session.Request(uploadResquest)\n\tcheck(err, context)\n\n\tvar photoID string\n\tuploadResp.DecodeField(\"id\", &photoID)\n\tredirectUrl := \"https:\/\/facebook.com\/photo.php?fbid=\" + photoID + \"&makeprofile=1&prof\"\n\thttp.Redirect(w, r, redirectUrl, 303)\n}\n\nfunc SaveAboutUser(aboutResp *fb.Result, context appengine.Context) {\n\tvar log Log\n\taboutResp.Decode(&log)\n\n\tvar ageRange map[string]string\n\taboutResp.DecodeField(\"age_range\", &ageRange)\n\tlog.AgeRange = ageRange[\"min\"]\n\n\t_, err := datastore.Put(context,\n\t\tdatastore.NewIncompleteKey(context, \"log\", nil),\n\t\t&log)\n\tcheck(err, context)\n}\n\nfunc GetUserPhoto(photoResp *fb.Result, context appengine.Context) *image.Image {\n\tvar dataField fb.Result\n\tphotoResp.DecodeField(\"data\", &dataField)\n\n\tvar url string\n\tdataField.DecodeField(\"url\", &url)\n\n\tclient := urlfetch.Client(context)\n\tresp, err := client.Get(url)\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\tcheck(err, context)\n\n\treader := bytes.NewReader(data)\n\tprofilePicture, _, err := image.Decode(reader)\n\tcheck(err, context)\n\n\treturn &profilePicture\n}\n\nfunc CreateImageForm(imageBytes []byte, context appengine.Context, host string) (bytes.Buffer, string) {\n\tvar formBuffer bytes.Buffer\n\tmultiWriter := multipart.NewWriter(&formBuffer)\n\n\timageField, err := multiWriter.CreateFormFile(\"source\", \"election.png\")\n\tcheck(err, context)\n\n\timageBuffer := bytes.NewBuffer(imageBytes)\n\t_, err = io.Copy(imageField, imageBuffer)\n\tcheck(err, context)\n\n\tmessageField, err := multiWriter.CreateFormField(\"caption\")\n\tcheck(err, context)\n\t_, err = messageField.Write([]byte(\"Created at http:\/\/\" + host))\n\tcheck(err, context)\n\n\tmultiWriter.Close()\n\n\treturn formBuffer, multiWriter.FormDataContentType()\n}\n<commit_msg>added privacy url<commit_after>package app\n\nimport (\n\t\"bytes\"\n\t\"image\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"mime\/multipart\"\n\t\"net\/http\"\n\t\"os\"\n\n\t_ \"image\/jpeg\"\n\t_ \"image\/png\"\n\n\tfb \"github.com\/huandu\/facebook\"\n\n\t\"appengine\"\n\t\"appengine\/datastore\"\n\t\"appengine\/urlfetch\"\n)\n\nvar FbApp = fb.New(\"526791527487217\", \"e314e5fc761425d59ea9e2666c63e108\")\nvar aboutParams = fb.Params{\n\t\"method\": fb.GET,\n\t\"relative_url\": \"me\",\n\t\"fields\": \"name,email,gender,age_range,hometown\",\n}\n\nvar photoParams = fb.Params{\n\t\"method\": fb.GET,\n\t\"relative_url\": \"me\/picture?width=320&height=320&redirect=false\",\n}\n\nfunc init() {\n\thttp.HandleFunc(\"\/static\/\", StaticHandler)\n\thttp.HandleFunc(\"\/\", MainHandler)\n\n\tfb.Debug = fb.DEBUG_ALL\n\tFbApp.EnableAppsecretProof = true\n}\n\nfunc StaticHandler(w http.ResponseWriter, r *http.Request) {\n\tpath := \".\" + r.URL.Path\n\n\tif f, err := os.Stat(path); err == nil && !f.IsDir() {\n\t\thttp.ServeFile(w, r, path)\n\t\treturn\n\t}\n\n\thttp.NotFound(w, r)\n}\n\nfunc MainHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch {\n\tcase r.URL.Path == \"\/privacy\":\n\t\tw.Write([]byte(\"Coming Soon\"))\n\tcase r.URL.Path != \"\/\":\n\t\thttp.NotFound(w, r)\n\tcase r.Method == \"GET\":\n\t\thandleGet(w, r)\n\tcase r.Method == \"POST\":\n\t\thandlePost(w, r)\n\t}\n\n\treturn\n}\n\nfunc handleGet(w http.ResponseWriter, r *http.Request) {\n\tcontext := appengine.NewContext(r)\n\tdata, err := ioutil.ReadFile(\"index.html\")\n\tcheck(err, context)\n\tw.Write(data)\n}\n\ntype Log struct {\n\tName string\n\tGender string\n\tParty string\n\tEmail string\n\tAgeRange string\n\tHometown string\n}\n\nfunc handlePost(w http.ResponseWriter, r *http.Request) {\n\tcontext := appengine.NewContext(r)\n\n\tr.ParseForm()\n\taccess_token := r.FormValue(\"access_token\")\n\tparty := r.FormValue(\"party\")\n\tcontext.Infof(\"party = %s\", party)\n\n\tsession := FbApp.Session(access_token)\n\tsession.HttpClient = urlfetch.Client(context)\n\terr := session.Validate()\n\tcheck(err, context)\n\n\tresults, err := session.BatchApi(aboutParams, photoParams)\n\tcheck(err, context)\n\n\taboutBatch, err := results[0].Batch()\n\tcheck(err, context)\n\tphotoBatch, err := results[1].Batch()\n\tcheck(err, context)\n\n\taboutResp := aboutBatch.Result\n\tphotoResp := photoBatch.Result\n\n\tSaveAboutUser(&aboutResp, context)\n\tprofilePicture := GetUserPhoto(&photoResp, context)\n\n\timagebytes := addLogo(profilePicture, party, context)\n\tform, mime := CreateImageForm(imagebytes, context, r.Host)\n\n\turl := \"https:\/\/graph.facebook.com\/me\/photos\" +\n\t\t\"?access_token=\" + access_token +\n\t\t\"&appsecret_proof=\" + session.AppsecretProof()\n\t\/\/+ \"&no_story=true\"\n\n\tuploadResquest, _ := http.NewRequest(\"POST\", url, &form)\n\tuploadResquest.Header.Set(\"Content-Type\", mime)\n\tuploadResp, _ := session.Request(uploadResquest)\n\tcheck(err, context)\n\n\tvar photoID string\n\tuploadResp.DecodeField(\"id\", &photoID)\n\tredirectUrl := \"https:\/\/facebook.com\/photo.php?fbid=\" + photoID + \"&makeprofile=1&prof\"\n\thttp.Redirect(w, r, redirectUrl, 303)\n}\n\nfunc SaveAboutUser(aboutResp *fb.Result, context appengine.Context) {\n\tvar log Log\n\taboutResp.Decode(&log)\n\n\tvar ageRange map[string]string\n\taboutResp.DecodeField(\"age_range\", &ageRange)\n\tlog.AgeRange = ageRange[\"min\"]\n\n\t_, err := datastore.Put(context,\n\t\tdatastore.NewIncompleteKey(context, \"log\", nil),\n\t\t&log)\n\tcheck(err, context)\n}\n\nfunc GetUserPhoto(photoResp *fb.Result, context appengine.Context) *image.Image {\n\tvar dataField fb.Result\n\tphotoResp.DecodeField(\"data\", &dataField)\n\n\tvar url string\n\tdataField.DecodeField(\"url\", &url)\n\n\tclient := urlfetch.Client(context)\n\tresp, err := client.Get(url)\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\tcheck(err, context)\n\n\treader := bytes.NewReader(data)\n\tprofilePicture, _, err := image.Decode(reader)\n\tcheck(err, context)\n\n\treturn &profilePicture\n}\n\nfunc CreateImageForm(imageBytes []byte, context appengine.Context, host string) (bytes.Buffer, string) {\n\tvar formBuffer bytes.Buffer\n\tmultiWriter := multipart.NewWriter(&formBuffer)\n\n\timageField, err := multiWriter.CreateFormFile(\"source\", \"election.png\")\n\tcheck(err, context)\n\n\timageBuffer := bytes.NewBuffer(imageBytes)\n\t_, err = io.Copy(imageField, imageBuffer)\n\tcheck(err, context)\n\n\tmessageField, err := multiWriter.CreateFormField(\"caption\")\n\tcheck(err, context)\n\t_, err = messageField.Write([]byte(\"Created at http:\/\/\" + host))\n\tcheck(err, context)\n\n\tmultiWriter.Close()\n\n\treturn formBuffer, multiWriter.FormDataContentType()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"database\/sql\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\t\"reflect\"\n\t\"regexp\"\n\n\t_ \"github.com\/mattn\/go-sqlite3\"\n\n\t\"github.com\/gorilla\/securecookie\"\n\t\"github.com\/gorilla\/sessions\"\n\t\"github.com\/microcosm-cc\/bluemonday\"\n\t\"github.com\/mt2d2\/forum\/model\"\n\t\"github.com\/russross\/blackfriday\"\n)\n\nfunc convertToMarkdown(markdown string) template.HTML {\n\tunsafe := blackfriday.MarkdownCommon([]byte(markdown))\n\n\tpolicy := bluemonday.UGCPolicy()\n\tpolicy.AllowElements(\"video\", \"audio\", \"source\")\n\tpolicy.AllowAttrs(\"controls\").OnElements(\"video\", \"audio\")\n\tpolicy.AllowAttrs(\"src\").Matching(regexp.MustCompile(`[\\p{L}\\p{N}\\s\\-_',:\\[\\]!\\.\/\\\\\\(\\)&]*`)).Globally()\n\n\thtml := policy.SanitizeBytes(unsafe)\n\treturn template.HTML(html)\n}\n\nfunc isLastElement(x int, list interface{}) bool {\n\tval := reflect.ValueOf(list)\n\tif val.Kind() == reflect.Ptr && !val.IsNil() {\n\t\tval = val.Elem()\n\t}\n\n\treturn x == val.Len()-1\n}\n\ntype breadCrumb struct{ URL, Title string }\n\ntype app struct {\n\ttemplates *template.Template\n\tdb *sql.DB\n\tsessions *sessions.CookieStore\n\tbreadCrumbs []breadCrumb\n}\n\nfunc newApp() *app {\n\tdb, err := sql.Open(\"sqlite3\", *db)\n\tif err != nil {\n\t\tlog.Panicln(err)\n\t}\n\n\tfuncMap := template.FuncMap{\n\t\t\"markDown\": convertToMarkdown,\n\t\t\"last\": isLastElement}\n\n\ttemplates, err := template.New(\"\").Funcs(funcMap).ParseFiles(\n\t\t\"templates\/header.html\",\n\t\t\"templates\/footer.html\",\n\t\t\"templates\/index.html\",\n\t\t\"templates\/forum.html\",\n\t\t\"templates\/topic.html\",\n\t\t\"templates\/addPost.html\",\n\t\t\"templates\/addTopic.html\",\n\t\t\"templates\/register.html\",\n\t\t\"templates\/login.html\",\n\t)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tsessionStore := sessions.NewCookieStore(securecookie.GenerateRandomKey(64), securecookie.GenerateRandomKey(32))\n\n\tbreadCrumbs := make([]breadCrumb, 0, 1)\n\tbreadCrumbs = append(breadCrumbs, breadCrumb{\"\/\", \"Index\"})\n\treturn &app{templates, db, sessionStore, breadCrumbs}\n}\n\nfunc (app *app) destroy() {\n\tapp.db.Close()\n}\n\nfunc (app *app) addBreadCrumb(url, title string) {\n\tapp.breadCrumbs = append(app.breadCrumbs, breadCrumb{url, title})\n}\n\nfunc (app *app) useBreadCrumbs() *[]breadCrumb {\n\tret := app.breadCrumbs\n\tapp.breadCrumbs = app.breadCrumbs[:1]\n\treturn &ret\n}\n\nfunc (app *app) addErrorFlashes(w http.ResponseWriter, r *http.Request, errs []error) {\n\tfor _, err := range errs {\n\t\tapp.addErrorFlash(w, r, err)\n\t}\n}\n\nfunc (app *app) addErrorFlash(w http.ResponseWriter, r *http.Request, error error) {\n\tapp.addFlash(w, r, error.Error(), \"error\")\n}\n\nfunc (app *app) addSuccessFlash(w http.ResponseWriter, r *http.Request, str string) {\n\tapp.addFlash(w, r, str, \"success\")\n}\n\nfunc (app *app) addFlash(w http.ResponseWriter, r *http.Request, content interface{}, key string) {\n\tsession, _ := app.sessions.Get(r, \"forumSession\")\n\tsession.AddFlash(content, key)\n\tsession.Save(r, w)\n}\n\nfunc (app *app) renderTemplate(w http.ResponseWriter, r *http.Request, tmpl string, data map[string]interface{}) {\n\tsession, _ := app.sessions.Get(r, \"forumSession\")\n\n\tdata[\"breadCrumbs\"] = app.useBreadCrumbs()\n\tdata[\"errorFlashes\"] = session.Flashes(\"error\")\n\tdata[\"successFlashes\"] = session.Flashes(\"success\")\n\n\tif userID, ok := session.Values[\"user_id\"].(int); ok {\n\t\tuser, err := model.FindOneUserById(app.db, userID)\n\t\tif err == nil {\n\t\t\tdata[\"user\"] = user\n\t\t}\n\t}\n\n\tsession.Save(r, w)\n\n\terr := app.templates.ExecuteTemplate(w, tmpl+\".html\", data)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n<commit_msg>use go.rice<commit_after>package main\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\t\"reflect\"\n\t\"regexp\"\n\n\t\"github.com\/GeertJohan\/go.rice\"\n\t_ \"github.com\/mattn\/go-sqlite3\"\n\n\t\"github.com\/gorilla\/securecookie\"\n\t\"github.com\/gorilla\/sessions\"\n\t\"github.com\/microcosm-cc\/bluemonday\"\n\t\"github.com\/mt2d2\/forum\/model\"\n\t\"github.com\/russross\/blackfriday\"\n)\n\nfunc convertToMarkdown(markdown string) template.HTML {\n\tunsafe := blackfriday.MarkdownCommon([]byte(markdown))\n\n\tpolicy := bluemonday.UGCPolicy()\n\tpolicy.AllowElements(\"video\", \"audio\", \"source\")\n\tpolicy.AllowAttrs(\"controls\").OnElements(\"video\", \"audio\")\n\tpolicy.AllowAttrs(\"src\").Matching(regexp.MustCompile(`[\\p{L}\\p{N}\\s\\-_',:\\[\\]!\\.\/\\\\\\(\\)&]*`)).Globally()\n\n\thtml := policy.SanitizeBytes(unsafe)\n\treturn template.HTML(html)\n}\n\nfunc isLastElement(x int, list interface{}) bool {\n\tval := reflect.ValueOf(list)\n\tif val.Kind() == reflect.Ptr && !val.IsNil() {\n\t\tval = val.Elem()\n\t}\n\n\treturn x == val.Len()-1\n}\n\ntype breadCrumb struct{ URL, Title string }\n\ntype app struct {\n\ttemplates *template.Template\n\tdb *sql.DB\n\tsessions *sessions.CookieStore\n\tbreadCrumbs []breadCrumb\n}\n\nfunc embedTemplate(box *rice.Box, tplName string) string {\n\ttpl, err := box.String(tplName)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"{{define \\\"%s\\\"}}%s{{end}}\", tplName, tpl)\n}\n\nfunc newApp() *app {\n\tdb, err := sql.Open(\"sqlite3\", *db)\n\tif err != nil {\n\t\tlog.Panicln(err)\n\t}\n\n\tfuncMap := template.FuncMap{\n\t\t\"markDown\": convertToMarkdown,\n\t\t\"last\": isLastElement}\n\n\ttemplateBox := rice.MustFindBox(\"templates\")\n\ttemplates := template.New(\"\").Funcs(funcMap)\n\ttemplates.Parse(embedTemplate(templateBox, \"header.html\"))\n\ttemplates.Parse(embedTemplate(templateBox, \"footer.html\"))\n\ttemplates.Parse(embedTemplate(templateBox, \"index.html\"))\n\ttemplates.Parse(embedTemplate(templateBox, \"forum.html\"))\n\ttemplates.Parse(embedTemplate(templateBox, \"topic.html\"))\n\ttemplates.Parse(embedTemplate(templateBox, \"addPost.html\"))\n\ttemplates.Parse(embedTemplate(templateBox, \"addTopic.html\"))\n\ttemplates.Parse(embedTemplate(templateBox, \"register.html\"))\n\ttemplates.Parse(embedTemplate(templateBox, \"login.html\"))\n\n\tsessionStore := sessions.NewCookieStore(securecookie.GenerateRandomKey(64), securecookie.GenerateRandomKey(32))\n\n\tbreadCrumbs := make([]breadCrumb, 0, 1)\n\tbreadCrumbs = append(breadCrumbs, breadCrumb{\"\/\", \"Index\"})\n\treturn &app{templates, db, sessionStore, breadCrumbs}\n}\n\nfunc (app *app) destroy() {\n\tapp.db.Close()\n}\n\nfunc (app *app) addBreadCrumb(url, title string) {\n\tapp.breadCrumbs = append(app.breadCrumbs, breadCrumb{url, title})\n}\n\nfunc (app *app) useBreadCrumbs() *[]breadCrumb {\n\tret := app.breadCrumbs\n\tapp.breadCrumbs = app.breadCrumbs[:1]\n\treturn &ret\n}\n\nfunc (app *app) addErrorFlashes(w http.ResponseWriter, r *http.Request, errs []error) {\n\tfor _, err := range errs {\n\t\tapp.addErrorFlash(w, r, err)\n\t}\n}\n\nfunc (app *app) addErrorFlash(w http.ResponseWriter, r *http.Request, error error) {\n\tapp.addFlash(w, r, error.Error(), \"error\")\n}\n\nfunc (app *app) addSuccessFlash(w http.ResponseWriter, r *http.Request, str string) {\n\tapp.addFlash(w, r, str, \"success\")\n}\n\nfunc (app *app) addFlash(w http.ResponseWriter, r *http.Request, content interface{}, key string) {\n\tsession, _ := app.sessions.Get(r, \"forumSession\")\n\tsession.AddFlash(content, key)\n\tsession.Save(r, w)\n}\n\nfunc (app *app) renderTemplate(w http.ResponseWriter, r *http.Request, tmpl string, data map[string]interface{}) {\n\tsession, _ := app.sessions.Get(r, \"forumSession\")\n\n\tdata[\"breadCrumbs\"] = app.useBreadCrumbs()\n\tdata[\"errorFlashes\"] = session.Flashes(\"error\")\n\tdata[\"successFlashes\"] = session.Flashes(\"success\")\n\n\tif userID, ok := session.Values[\"user_id\"].(int); ok {\n\t\tuser, err := model.FindOneUserById(app.db, userID)\n\t\tif err == nil {\n\t\t\tdata[\"user\"] = user\n\t\t}\n\t}\n\n\tsession.Save(r, w)\n\n\tw.Header().Set(\"Content-Type\", \"text\/html\")\n\terr := app.templates.ExecuteTemplate(w, tmpl+\".html\", data)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package cli\n\nimport (\n\t\"fmt\"\n\t\"html\/template\"\n\t\"os\"\n\n\t\"io\/ioutil\"\n\n\t\"reflect\"\n\n\t\"github.com\/fatih\/color\"\n\t\"github.com\/olekukonko\/tablewriter\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\ntype App struct {\n\tName string\n\tUsage string\n\tLongDescription string\n\tCopyright string\n\tParsingOrder []ParsingType\n\tCommand *Command\n\tAuthors []Author\n\tHelpCommands map[string]bool\n\tAction func()\n\tConfigFlag string\n\tRemoveColor bool\n\targs []string\n}\n\ntype ParsingType int\n\nconst (\n\tEnvironmentVariables ParsingType = iota\n\tJsonConfig\n\tYamlConfig\n\tTomlConfig\n\tCliFlags\n)\n\nvar ParingTypeStringMap = map[ParsingType]string{\n\tEnvironmentVariables: \"Environment\",\n\tJsonConfig: \"JSON Config\",\n\tYamlConfig: \"YAML Config\",\n\tTomlConfig: \"Toml Config\",\n\tCliFlags: \"CLI Flag\",\n}\n\n\/\/ Create a new application with default values set.\nfunc NewApp() *App {\n\treturn &App{\n\t\tName: \"cli\",\n\t\tAuthors: make([]Author, 0),\n\t\tHelpCommands: map[string]bool{\n\t\t\t\"--help\": true,\n\t\t\t\"-h\": true,\n\t\t\t\"help\": true,\n\t\t},\n\t\tParsingOrder: []ParsingType{\n\t\t\tEnvironmentVariables,\n\t\t\tJsonConfig,\n\t\t\tYamlConfig,\n\t\t\tTomlConfig,\n\t\t\tCliFlags,\n\t\t},\n\t}\n}\n\n\/\/ Run the app. Should be called with:\n\/\/ app := cli.NewApp()\n\/\/ app.Run(os.Args)\nfunc (a *App) Run(args []string) {\n\ta.args = args[1:]\n\n\ta.checkForConfig()\n\ta.parseCommands()\n\n\terr := a.flagSet.Parse(a.args)\n\tif err != nil {\n\t\treturn\n\t}\n}\n\nfunc (a *App) checkForConfig() {\n\n}\n\nfunc (a *App) parseCommands() {\n\tif a.Command == nil {\n\t\tlog.Fatal(\"No command attached to the app!\")\n\t}\n\ta.Command.buildTree(nil)\n\ta.Command.assignArguments(a.args)\n\n\tif helpCommand, isHelp := a.Command.isHelpCommand(a.HelpCommands); isHelp {\n\t\tfmt.Println(\"help.\", helpCommand.Name)\n\t\treturn\n\t}\n\n\tif err := a.Command.parseFlags(); err != nil {\n\t\t\/\/ TODO: Fix me.\n\t\treturn\n\t}\n\n\ta.parseByOrder()\n}\n\nfunc (a *App) parseByOrder() error {\n\tsettingsMap := newMappedSettings()\n\n\tfor _, order := range a.ParsingOrder {\n\t\tswitch order {\n\t\tcase EnvironmentVariables:\n\t\t\tsettingsMap.addArray(a.Command.parseEnvVars())\n\n\t\tcase JsonConfig:\n\n\t\tcase YamlConfig:\n\n\t\tcase TomlConfig:\n\n\t\tcase CliFlags:\n\t\t\tsettingsMap.addArray(a.Command.getSetFlags())\n\t\t}\n\t}\n\tsettingsMap.PrintDuplicates(a.Command.GetActiveCommands())\n\tsettingsMap.PrintDuplicatesStdout(a.RemoveColor)\n\treturn nil\n}\n\ntype mappedSettings struct {\n\tMainMap map[string]map[string][]activeSetting `json:\"main_map\"`\n}\n\nfunc newMappedSettings() *mappedSettings {\n\treturn &mappedSettings{\n\t\tMainMap: make(map[string]map[string][]activeSetting),\n\t}\n}\n\nfunc (m *mappedSettings) addArray(settings []activeSetting) {\n\tfor _, setting := range settings {\n\t\tif m.MainMap[setting.CommandPath] == nil {\n\t\t\tm.MainMap[setting.CommandPath] = make(map[string][]activeSetting)\n\t\t}\n\t\tif m.MainMap[setting.CommandPath][setting.VariableName] == nil {\n\t\t\tm.MainMap[setting.CommandPath][setting.VariableName] = make([]activeSetting, 0)\n\t\t}\n\t\tm.MainMap[setting.CommandPath][setting.VariableName] = append(m.MainMap[setting.CommandPath][setting.VariableName], setting)\n\t}\n}\n\n\/\/ Helper to print duplciates in table format to Stdout.\nfunc (m *mappedSettings) PrintDuplicates(commands []*Command) {\n\ttable := tablewriter.NewWriter(os.Stdout)\n\ttable.SetHeader([]string{\"Command\", \"Variable\", \"Source\", \"Value\", \"Type\", \"Status\"})\n\tfor _, command := range commands {\n\t\texpandedName := command.GetExpandedName()\n\t\tif m.MainMap[expandedName] == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, settings := range m.MainMap[expandedName] {\n\t\t\tlength := len(settings)\n\t\t\tfor i, setting := range settings {\n\t\t\t\tvar status string\n\t\t\t\tif i == length-1 {\n\t\t\t\t\tstatus = \"✔ Used\"\n\t\t\t\t} else {\n\t\t\t\t\tstatus = \"x Ignored\"\n\t\t\t\t}\n\t\t\t\trow := []string{\n\t\t\t\t\texpandedName,\n\t\t\t\t\tsetting.VariableName,\n\t\t\t\t\tParingTypeStringMap[setting.Source],\n\t\t\t\t\tfmt.Sprintf(\"%s\", setting.Value),\n\t\t\t\t\treflect.TypeOf(setting.Value).String(),\n\t\t\t\t\tstatus,\n\t\t\t\t}\n\t\t\t\tif setting.Source == EnvironmentVariables {\n\t\t\t\t\trow[1] += \" (\" + convertNameToOS(setting.VariableName) + \")\"\n\t\t\t\t}\n\t\t\t\ttable.Append(row)\n\t\t\t}\n\t\t}\n\t}\n\ttable.Render()\n}\n\n\/\/ Use a custom formatted string to print duplicates on Stdout.\nfunc (m *mappedSettings) PrintDuplicatesStdout(noColor bool) {\n\tt := template.New(\"duplicates\")\n\tfuncMap := template.FuncMap{\n\t\t\"blue\": color.BlueString,\n\t\t\"red\": color.RedString,\n\t\t\"green\": color.GreenString,\n\t\t\"bold\": color.New(color.Bold).Sprint,\n\t\t\"sourceString\": func(p ParsingType) string {\n\t\t\treturn ParingTypeStringMap[p]\n\t\t},\n\t\t\"plus1\": func(x int) int {\n\t\t\treturn x + 1\n\t\t},\n\t}\n\tif noColor {\n\t\tfuncMap[\"blue\"] = identityString\n\t\tfuncMap[\"red\"] = identityString\n\t\tfuncMap[\"green\"] = identityString\n\t\tfuncMap[\"bold\"] = identityString\n\t}\n\tt.Funcs(funcMap)\n\tt.Parse(`{{ range $command, $variables := . -}}\n{{ blue \"Configuration:\"}} {{ bold $command }}\n\t{{ range $key, $vars := $variables -}}\n\t{{ range $k, $var := $vars }}{{ $length := len $vars -}}\n\t\t{{ if eq $length (plus1 $k) -}}\n\t\t\t{{ green $key }} = {{ green $var.Value }}\n\t\t{{ green \"set from\" }} {{ sourceString $var.Source -}} \n\t\t{{ else -}} \n\t\t\t{{ red $key }} = {{ red $var.Value }}\n\t\t{{ red \"ignored from\" }} {{ sourceString $var.Source -}} \n\t\t{{ end }}\n\t{{ end -}}\n\t{{ end }}\n{{ end }}`)\n\tt.Execute(os.Stdout, m.MainMap)\n}\n\nfunc identityString(s string) string {\n\treturn s\n}\n<commit_msg>more unused flags<commit_after>package cli\n\nimport (\n\t\"fmt\"\n\t\"html\/template\"\n\t\"os\"\n\n\t\"io\/ioutil\"\n\n\t\"reflect\"\n\n\t\"github.com\/fatih\/color\"\n\t\"github.com\/olekukonko\/tablewriter\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\ntype App struct {\n\tName string\n\tUsage string\n\tLongDescription string\n\tCopyright string\n\tParsingOrder []ParsingType\n\tCommand *Command\n\tAuthors []Author\n\tHelpCommands map[string]bool\n\tAction func()\n\tConfigFlag string\n\tRemoveColor bool\n\targs []string\n}\n\ntype ParsingType int\n\nconst (\n\tEnvironmentVariables ParsingType = iota\n\tJsonConfig\n\tYamlConfig\n\tTomlConfig\n\tCliFlags\n)\n\nvar ParingTypeStringMap = map[ParsingType]string{\n\tEnvironmentVariables: \"Environment\",\n\tJsonConfig: \"JSON Config\",\n\tYamlConfig: \"YAML Config\",\n\tTomlConfig: \"Toml Config\",\n\tCliFlags: \"CLI Flag\",\n}\n\n\/\/ Create a new application with default values set.\nfunc NewApp() *App {\n\treturn &App{\n\t\tName: \"cli\",\n\t\tAuthors: make([]Author, 0),\n\t\tHelpCommands: map[string]bool{\n\t\t\t\"--help\": true,\n\t\t\t\"-h\": true,\n\t\t\t\"help\": true,\n\t\t},\n\t\tParsingOrder: []ParsingType{\n\t\t\tEnvironmentVariables,\n\t\t\tJsonConfig,\n\t\t\tYamlConfig,\n\t\t\tTomlConfig,\n\t\t\tCliFlags,\n\t\t},\n\t}\n}\n\n\/\/ Run the app. Should be called with:\n\/\/ app := cli.NewApp()\n\/\/ app.Run(os.Args)\nfunc (a *App) Run(args []string) {\n\ta.args = args[1:]\n\n\ta.checkForConfig()\n\ta.parseCommands()\n\n}\n\nfunc (a *App) checkForConfig() {\n\n}\n\nfunc (a *App) parseCommands() {\n\tif a.Command == nil {\n\t\tlog.Fatal(\"No command attached to the app!\")\n\t}\n\ta.Command.buildTree(nil)\n\ta.Command.assignArguments(a.args)\n\n\tif helpCommand, isHelp := a.Command.isHelpCommand(a.HelpCommands); isHelp {\n\t\tfmt.Println(\"help.\", helpCommand.Name)\n\t\treturn\n\t}\n\n\tif err := a.Command.parseFlags(); err != nil {\n\t\t\/\/ TODO: Fix me.\n\t\treturn\n\t}\n\n\ta.parseByOrder()\n}\n\nfunc (a *App) parseByOrder() error {\n\tsettingsMap := newMappedSettings()\n\n\tfor _, order := range a.ParsingOrder {\n\t\tswitch order {\n\t\tcase EnvironmentVariables:\n\t\t\tsettingsMap.addArray(a.Command.parseEnvVars())\n\n\t\tcase JsonConfig:\n\n\t\tcase YamlConfig:\n\n\t\tcase TomlConfig:\n\n\t\tcase CliFlags:\n\t\t\tsettingsMap.addArray(a.Command.getSetFlags())\n\t\t}\n\t}\n\tsettingsMap.PrintDuplicates(a.Command.GetActiveCommands())\n\tsettingsMap.PrintDuplicatesStdout(a.RemoveColor)\n\treturn nil\n}\n\ntype mappedSettings struct {\n\tMainMap map[string]map[string][]activeSetting `json:\"main_map\"`\n}\n\nfunc newMappedSettings() *mappedSettings {\n\treturn &mappedSettings{\n\t\tMainMap: make(map[string]map[string][]activeSetting),\n\t}\n}\n\nfunc (m *mappedSettings) addArray(settings []activeSetting) {\n\tfor _, setting := range settings {\n\t\tif m.MainMap[setting.CommandPath] == nil {\n\t\t\tm.MainMap[setting.CommandPath] = make(map[string][]activeSetting)\n\t\t}\n\t\tif m.MainMap[setting.CommandPath][setting.VariableName] == nil {\n\t\t\tm.MainMap[setting.CommandPath][setting.VariableName] = make([]activeSetting, 0)\n\t\t}\n\t\tm.MainMap[setting.CommandPath][setting.VariableName] = append(m.MainMap[setting.CommandPath][setting.VariableName], setting)\n\t}\n}\n\n\/\/ Helper to print duplciates in table format to Stdout.\nfunc (m *mappedSettings) PrintDuplicates(commands []*Command) {\n\ttable := tablewriter.NewWriter(os.Stdout)\n\ttable.SetHeader([]string{\"Command\", \"Variable\", \"Source\", \"Value\", \"Type\", \"Status\"})\n\tfor _, command := range commands {\n\t\texpandedName := command.GetExpandedName()\n\t\tif m.MainMap[expandedName] == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, settings := range m.MainMap[expandedName] {\n\t\t\tlength := len(settings)\n\t\t\tfor i, setting := range settings {\n\t\t\t\tvar status string\n\t\t\t\tif i == length-1 {\n\t\t\t\t\tstatus = \"✔ Used\"\n\t\t\t\t} else {\n\t\t\t\t\tstatus = \"x Ignored\"\n\t\t\t\t}\n\t\t\t\trow := []string{\n\t\t\t\t\texpandedName,\n\t\t\t\t\tsetting.VariableName,\n\t\t\t\t\tParingTypeStringMap[setting.Source],\n\t\t\t\t\tfmt.Sprintf(\"%s\", setting.Value),\n\t\t\t\t\treflect.TypeOf(setting.Value).String(),\n\t\t\t\t\tstatus,\n\t\t\t\t}\n\t\t\t\tif setting.Source == EnvironmentVariables {\n\t\t\t\t\trow[1] += \" (\" + convertNameToOS(setting.VariableName) + \")\"\n\t\t\t\t}\n\t\t\t\ttable.Append(row)\n\t\t\t}\n\t\t}\n\t}\n\ttable.Render()\n}\n\n\/\/ Use a custom formatted string to print duplicates on Stdout.\nfunc (m *mappedSettings) PrintDuplicatesStdout(noColor bool) {\n\tt := template.New(\"duplicates\")\n\tfuncMap := template.FuncMap{\n\t\t\"blue\": color.BlueString,\n\t\t\"red\": color.RedString,\n\t\t\"green\": color.GreenString,\n\t\t\"bold\": color.New(color.Bold).Sprint,\n\t\t\"sourceString\": func(p ParsingType) string {\n\t\t\treturn ParingTypeStringMap[p]\n\t\t},\n\t\t\"plus1\": func(x int) int {\n\t\t\treturn x + 1\n\t\t},\n\t}\n\tif noColor {\n\t\tfuncMap[\"blue\"] = identityString\n\t\tfuncMap[\"red\"] = identityString\n\t\tfuncMap[\"green\"] = identityString\n\t\tfuncMap[\"bold\"] = identityString\n\t}\n\tt.Funcs(funcMap)\n\tt.Parse(`{{ range $command, $variables := . -}}\n{{ blue \"Configuration:\"}} {{ bold $command }}\n\t{{ range $key, $vars := $variables -}}\n\t{{ range $k, $var := $vars }}{{ $length := len $vars -}}\n\t\t{{ if eq $length (plus1 $k) -}}\n\t\t\t{{ green $key }} = {{ green $var.Value }}\n\t\t{{ green \"set from\" }} {{ sourceString $var.Source -}} \n\t\t{{ else -}} \n\t\t\t{{ red $key }} = {{ red $var.Value }}\n\t\t{{ red \"ignored from\" }} {{ sourceString $var.Source -}} \n\t\t{{ end }}\n\t{{ end -}}\n\t{{ end }}\n{{ end }}`)\n\tt.Execute(os.Stdout, m.MainMap)\n}\n\nfunc identityString(s string) string {\n\treturn s\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Fredrik Ehnbom\n\/\/ Use of this source code is governed by a 2-clause\n\/\/ BSD-style license that can be found in the LICENSE file.\n\npackage text\n\nimport (\n\t\"testing\"\n)\n\n\/\/ Verified against ST3\nfunc TestRegionIntersects(t *testing.T) {\n\ttype Test struct {\n\t\ta, b Region\n\t\tc bool\n\t}\n\tvar tests = []Test{\n\t\t{Region{10, 20}, Region{25, 35}, false},\n\t\t{Region{25, 35}, Region{10, 20}, false},\n\t\t{Region{10, 25}, Region{20, 35}, true},\n\t\t{Region{20, 35}, Region{10, 25}, true},\n\t\t{Region{10, 25}, Region{15, 20}, true},\n\t\t{Region{15, 20}, Region{10, 25}, true},\n\t\t{Region{5, 10}, Region{10, 23}, false},\n\t\t{Region{5, 10}, Region{5, 10}, true},\n\t\t{Region{0, 0}, Region{0, 0}, true},\n\t\t{Region{1, 1}, Region{1, 1}, true},\n\t\t{Region{23, 24}, Region{10, 23}, false},\n\t}\n\tfor _, test := range tests {\n\t\tif res := test.a.Intersects(test.b); res != test.c {\n\t\t\tt.Errorf(\"Expected %v, but got %v, %v\", test.c, res, test)\n\t\t}\n\t}\n}\n\n\/\/ Verified against ST3\nfunc TestRegionIntersection(t *testing.T) {\n\tvar tests = [][]Region{\n\t\t{{10, 20}, {25, 35}, {0, 0}},\n\t\t{{25, 35}, {10, 20}, {0, 0}},\n\t\t{{10, 25}, {20, 35}, {20, 25}},\n\t\t{{20, 35}, {10, 25}, {20, 25}},\n\t\t{{10, 25}, {15, 20}, {15, 20}},\n\t\t{{15, 20}, {10, 25}, {15, 20}},\n\t\t{{5, 10}, {10, 23}, {0, 0}},\n\t\t{{5, 10}, {5, 10}, {5, 10}},\n\t\t{{1, 1}, {1, 1}, {0, 0}},\n\t}\n\tfor _, test := range tests {\n\t\tif res := test[0].Intersection(test[1]); res != test[2] {\n\t\t\tt.Errorf(\"Expected intersection %v, but got %v, %v\", test[2], res, test)\n\t\t}\n\t}\n}\n\nfunc TestClip(t *testing.T) {\n\ttests := [][]Region{\n\t\t{{10, 20}, {25, 35}, {10, 20}},\n\t\t{{10, 20}, {0, 5}, {10, 20}},\n\t\t{{10, 20}, {0, 11}, {11, 20}},\n\t\t{{10, 20}, {0, 15}, {15, 20}},\n\t\t{{10, 20}, {15, 30}, {10, 15}},\n\t\t{{10, 20}, {20, 30}, {10, 20}},\n\t\t{{10, 20}, {0, 30}, {10, 20}},\n\t\t{{10, 20}, {10, 20}, {10, 20}},\n\t}\n\tfor i := range tests {\n\t\ta := tests[i][0]\n\t\tignoreRegion := tests[i][1]\n\t\ta = a.Clip(ignoreRegion)\n\t\tif a != tests[i][2] {\n\t\t\tt.Errorf(\"Expected %v, got: %v\", tests[i][2], a)\n\t\t}\n\t}\n}\n\n\/\/ Verified against ST3\nfunc TestContains(t *testing.T) {\n\ttype Test struct {\n\t\tr Region\n\t\tpos int\n\t\tc bool\n\t}\n\ttests := []Test{\n\t\t{Region{0, 0}, 0, true},\n\t\t{Region{10, 10}, 10, true},\n\t\t{Region{10, 11}, 10, true},\n\t\t{Region{10, 11}, 11, true},\n\t\t{Region{10, 11}, 12, false},\n\t\t{Region{10, 11}, 9, false},\n\t}\n\tfor _, test := range tests {\n\t\tif res := test.r.Contains(test.pos); res != test.c {\n\t\t\tt.Errorf(\"Expected %v, but got %v, %v, %v\", test.c, res, test.r, test.pos)\n\t\t}\n\t}\n}\n<commit_msg>text: Add ST3 verified TestCover<commit_after>\/\/ Copyright 2013 Fredrik Ehnbom\n\/\/ Use of this source code is governed by a 2-clause\n\/\/ BSD-style license that can be found in the LICENSE file.\n\npackage text\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n)\n\n\/\/ Verified against ST3\nfunc TestRegionIntersects(t *testing.T) {\n\ttype Test struct {\n\t\ta, b Region\n\t\tc bool\n\t}\n\tvar tests = []Test{\n\t\t{Region{10, 20}, Region{25, 35}, false},\n\t\t{Region{25, 35}, Region{10, 20}, false},\n\t\t{Region{10, 25}, Region{20, 35}, true},\n\t\t{Region{20, 35}, Region{10, 25}, true},\n\t\t{Region{10, 25}, Region{15, 20}, true},\n\t\t{Region{15, 20}, Region{10, 25}, true},\n\t\t{Region{5, 10}, Region{10, 23}, false},\n\t\t{Region{5, 10}, Region{5, 10}, true},\n\t\t{Region{0, 0}, Region{0, 0}, true},\n\t\t{Region{1, 1}, Region{1, 1}, true},\n\t\t{Region{23, 24}, Region{10, 23}, false},\n\t}\n\tfor _, test := range tests {\n\t\tif res := test.a.Intersects(test.b); res != test.c {\n\t\t\tt.Errorf(\"Expected %v, but got %v, %v\", test.c, res, test)\n\t\t}\n\t}\n}\n\n\/\/ Verified against ST3\nfunc TestRegionIntersection(t *testing.T) {\n\tvar tests = [][]Region{\n\t\t{{10, 20}, {25, 35}, {0, 0}},\n\t\t{{25, 35}, {10, 20}, {0, 0}},\n\t\t{{10, 25}, {20, 35}, {20, 25}},\n\t\t{{20, 35}, {10, 25}, {20, 25}},\n\t\t{{10, 25}, {15, 20}, {15, 20}},\n\t\t{{15, 20}, {10, 25}, {15, 20}},\n\t\t{{5, 10}, {10, 23}, {0, 0}},\n\t\t{{5, 10}, {5, 10}, {5, 10}},\n\t\t{{1, 1}, {1, 1}, {0, 0}},\n\t}\n\tfor _, test := range tests {\n\t\tif res := test[0].Intersection(test[1]); res != test[2] {\n\t\t\tt.Errorf(\"Expected intersection %v, but got %v, %v\", test[2], res, test)\n\t\t}\n\t}\n}\n\nfunc TestClip(t *testing.T) {\n\ttests := [][]Region{\n\t\t{{10, 20}, {25, 35}, {10, 20}},\n\t\t{{10, 20}, {0, 5}, {10, 20}},\n\t\t{{10, 20}, {0, 11}, {11, 20}},\n\t\t{{10, 20}, {0, 15}, {15, 20}},\n\t\t{{10, 20}, {15, 30}, {10, 15}},\n\t\t{{10, 20}, {20, 30}, {10, 20}},\n\t\t{{10, 20}, {0, 30}, {10, 20}},\n\t\t{{10, 20}, {10, 20}, {10, 20}},\n\t}\n\tfor i := range tests {\n\t\ta := tests[i][0]\n\t\tignoreRegion := tests[i][1]\n\t\ta = a.Clip(ignoreRegion)\n\t\tif a != tests[i][2] {\n\t\t\tt.Errorf(\"Expected %v, got: %v\", tests[i][2], a)\n\t\t}\n\t}\n}\n\n\/\/ Verified against ST3\nfunc TestContains(t *testing.T) {\n\ttype Test struct {\n\t\tr Region\n\t\tpos int\n\t\tc bool\n\t}\n\ttests := []Test{\n\t\t{Region{0, 0}, 0, true},\n\t\t{Region{10, 10}, 10, true},\n\t\t{Region{10, 11}, 10, true},\n\t\t{Region{10, 11}, 11, true},\n\t\t{Region{10, 11}, 12, false},\n\t\t{Region{10, 11}, 9, false},\n\t}\n\tfor _, test := range tests {\n\t\tif res := test.r.Contains(test.pos); res != test.c {\n\t\t\tt.Errorf(\"Expected %v, but got %v, %v, %v\", test.c, res, test.r, test.pos)\n\t\t}\n\t}\n}\n\n\/\/ Verified against ST3\nfunc TestCover(t *testing.T) {\n\ttests := []struct {\n\t\ta, b Region\n\t\tout Region\n\t}{\n\t\t{Region{0, 1}, Region{1, 0}, Region{0, 1}},\n\t\t{Region{1, 0}, Region{0, 1}, Region{1, 0}},\n\t\t{Region{1, 0}, Region{5, 10}, Region{10, 0}},\n\t\t{Region{5, 10}, Region{1, 0}, Region{0, 10}},\n\t}\n\tfor _, test := range tests {\n\t\tif res := test.a.Cover(test.b); !reflect.DeepEqual(res, test.out) {\n\t\t\tt.Errorf(\"Expected %v, but got %v\", test.out, res)\n\t\t}\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package backend\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/compose-spec\/compose-go\/cli\"\n\t\"github.com\/compose-spec\/compose-go\/types\"\n\t\"github.com\/docker\/ecs-plugin\/pkg\/compose\"\n\t\"github.com\/docker\/ecs-plugin\/pkg\/progress\"\n)\n\nfunc (b *Backend) Up(ctx context.Context, options *cli.ProjectOptions) error {\n\tproject, err := cli.ProjectFromOptions(options)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = b.api.CheckRequirements(ctx, b.Region)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcluster, err := b.GetCluster(ctx, project)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttemplate, err := b.Convert(project)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvpc, err := b.GetVPC(ctx, project)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsubNets, err := b.api.GetSubNets(ctx, vpc)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(subNets) < 2 {\n\t\treturn fmt.Errorf(\"VPC %s should have at least 2 associated subnets in different availability zones\", vpc)\n\t}\n\n\tlb, err := b.GetLoadBalancer(ctx, project)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tparameters := map[string]string{\n\t\tParameterClusterName: cluster,\n\t\tParameterVPCId: vpc,\n\t\tParameterSubnet1Id: subNets[0],\n\t\tParameterSubnet2Id: subNets[1],\n\t\tParameterLoadBalancerARN: lb,\n\t}\n\n\tupdate, err := b.api.StackExists(ctx, project.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\toperation := compose.StackCreate\n\tif update {\n\t\toperation = compose.StackUpdate\n\t\tchangeset, err := b.api.CreateChangeSet(ctx, project.Name, template, parameters)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = b.api.UpdateStack(ctx, changeset)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\terr = b.api.CreateStack(ctx, project.Name, template, parameters)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor k := range template.Resources {\n\t\tb.writer.Event(progress.Event{\n\t\t\tID: k,\n\t\t\tStatus: progress.Working,\n\t\t\tStatusText: \"Pending\",\n\t\t})\n\t}\n\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)\n\tgo func() {\n\t\t<-signalChan\n\t\tfmt.Println(\"user interrupted deployment. Deleting stack...\")\n\t\tb.Down(ctx, options)\n\t}()\n\n\terr = b.WaitStackCompletion(ctx, project.Name, operation)\n\t\/\/ update status for external resources (LB and cluster)\n\tloadBalancerName := fmt.Sprintf(\"%.32s\", fmt.Sprintf(\"%sLoadBalancer\", strings.Title(project.Name)))\n\tfor k := range template.Resources {\n\t\tswitch k {\n\t\tcase \"Cluster\":\n\t\t\tif cluster == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\tcase loadBalancerName:\n\t\t\tif lb == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t\tb.writer.Event(progress.Event{\n\t\t\tID: k,\n\t\t\tStatus: progress.Done,\n\t\t\tStatusText: \"\",\n\t\t})\n\t}\n\treturn err\n}\n\nfunc (b Backend) GetVPC(ctx context.Context, project *types.Project) (string, error) {\n\t\/\/check compose file for custom VPC selected\n\tif vpc, ok := project.Extensions[compose.ExtensionVPC]; ok {\n\t\tvpcID := vpc.(string)\n\t\tok, err := b.api.VpcExists(ctx, vpcID)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif !ok {\n\t\t\treturn \"\", fmt.Errorf(\"VPC does not exist: %s\", vpc)\n\t\t}\n\t\treturn vpcID, nil\n\t}\n\tdefaultVPC, err := b.api.GetDefaultVPC(ctx)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn defaultVPC, nil\n}\n\nfunc (b Backend) GetLoadBalancer(ctx context.Context, project *types.Project) (string, error) {\n\t\/\/check compose file for custom VPC selected\n\tif ext, ok := project.Extensions[compose.ExtensionLB]; ok {\n\t\tlb := ext.(string)\n\t\tok, err := b.api.LoadBalancerExists(ctx, lb)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif !ok {\n\t\t\treturn \"\", fmt.Errorf(\"load balancer does not exist: %s\", lb)\n\t\t}\n\t\treturn lb, nil\n\t}\n\treturn \"\", nil\n}\n\nfunc (b Backend) GetCluster(ctx context.Context, project *types.Project) (string, error) {\n\t\/\/check compose file for custom VPC selected\n\tif ext, ok := project.Extensions[compose.ExtensionCluster]; ok {\n\t\tcluster := ext.(string)\n\t\tok, err := b.api.ClusterExists(ctx, cluster)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif !ok {\n\t\t\treturn \"\", fmt.Errorf(\"cluster does not exist: %s\", cluster)\n\t\t}\n\t\treturn cluster, nil\n\t}\n\treturn \"\", nil\n}\n<commit_msg>Don't pretend we know resources to be created<commit_after>package backend\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n\n\t\"github.com\/compose-spec\/compose-go\/cli\"\n\t\"github.com\/compose-spec\/compose-go\/types\"\n\t\"github.com\/docker\/ecs-plugin\/pkg\/compose\"\n)\n\nfunc (b *Backend) Up(ctx context.Context, options *cli.ProjectOptions) error {\n\tproject, err := cli.ProjectFromOptions(options)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = b.api.CheckRequirements(ctx, b.Region)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcluster, err := b.GetCluster(ctx, project)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttemplate, err := b.Convert(project)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvpc, err := b.GetVPC(ctx, project)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsubNets, err := b.api.GetSubNets(ctx, vpc)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(subNets) < 2 {\n\t\treturn fmt.Errorf(\"VPC %s should have at least 2 associated subnets in different availability zones\", vpc)\n\t}\n\n\tlb, err := b.GetLoadBalancer(ctx, project)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tparameters := map[string]string{\n\t\tParameterClusterName: cluster,\n\t\tParameterVPCId: vpc,\n\t\tParameterSubnet1Id: subNets[0],\n\t\tParameterSubnet2Id: subNets[1],\n\t\tParameterLoadBalancerARN: lb,\n\t}\n\n\tupdate, err := b.api.StackExists(ctx, project.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\toperation := compose.StackCreate\n\tif update {\n\t\toperation = compose.StackUpdate\n\t\tchangeset, err := b.api.CreateChangeSet(ctx, project.Name, template, parameters)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = b.api.UpdateStack(ctx, changeset)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\terr = b.api.CreateStack(ctx, project.Name, template, parameters)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tsignalChan := make(chan os.Signal, 1)\n\tsignal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)\n\tgo func() {\n\t\t<-signalChan\n\t\tfmt.Println(\"user interrupted deployment. Deleting stack...\")\n\t\tb.Down(ctx, options)\n\t}()\n\n\terr = b.WaitStackCompletion(ctx, project.Name, operation)\n\treturn err\n}\n\nfunc (b Backend) GetVPC(ctx context.Context, project *types.Project) (string, error) {\n\t\/\/check compose file for custom VPC selected\n\tif vpc, ok := project.Extensions[compose.ExtensionVPC]; ok {\n\t\tvpcID := vpc.(string)\n\t\tok, err := b.api.VpcExists(ctx, vpcID)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif !ok {\n\t\t\treturn \"\", fmt.Errorf(\"VPC does not exist: %s\", vpc)\n\t\t}\n\t\treturn vpcID, nil\n\t}\n\tdefaultVPC, err := b.api.GetDefaultVPC(ctx)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn defaultVPC, nil\n}\n\nfunc (b Backend) GetLoadBalancer(ctx context.Context, project *types.Project) (string, error) {\n\t\/\/check compose file for custom VPC selected\n\tif ext, ok := project.Extensions[compose.ExtensionLB]; ok {\n\t\tlb := ext.(string)\n\t\tok, err := b.api.LoadBalancerExists(ctx, lb)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif !ok {\n\t\t\treturn \"\", fmt.Errorf(\"load balancer does not exist: %s\", lb)\n\t\t}\n\t\treturn lb, nil\n\t}\n\treturn \"\", nil\n}\n\nfunc (b Backend) GetCluster(ctx context.Context, project *types.Project) (string, error) {\n\t\/\/check compose file for custom VPC selected\n\tif ext, ok := project.Extensions[compose.ExtensionCluster]; ok {\n\t\tcluster := ext.(string)\n\t\tok, err := b.api.ClusterExists(ctx, cluster)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif !ok {\n\t\t\treturn \"\", fmt.Errorf(\"cluster does not exist: %s\", cluster)\n\t\t}\n\t\treturn cluster, nil\n\t}\n\treturn \"\", nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The LUCI Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\n\t\"cloud.google.com\/go\/bigquery\"\n\t\"cloud.google.com\/go\/spanner\"\n\t\"golang.org\/x\/sync\/errgroup\"\n\t\"google.golang.org\/api\/googleapi\"\n\t\"google.golang.org\/api\/option\"\n\n\t\"go.chromium.org\/luci\/common\/bq\"\n\t\"go.chromium.org\/luci\/common\/errors\"\n\t\"go.chromium.org\/luci\/common\/logging\"\n\t\"go.chromium.org\/luci\/common\/sync\/parallel\"\n\t\"go.chromium.org\/luci\/server\/auth\"\n\n\t\"go.chromium.org\/luci\/resultdb\/internal\"\n\t\"go.chromium.org\/luci\/resultdb\/internal\/span\"\n\tbqpb \"go.chromium.org\/luci\/resultdb\/proto\/bq\/v1\"\n\tpb \"go.chromium.org\/luci\/resultdb\/proto\/rpc\/v1\"\n)\n\nconst (\n\tmaxInvocationGraphSize = 1000\n\tmaxBatchSize = 500\n)\n\n\/\/ inserter is implemented by bigquery.Inserter.\ntype inserter interface {\n\t\/\/ Put uploads one or more rows to the BigQuery service.\n\tPut(ctx context.Context, src interface{}) error\n}\n\nfunc getLUCIProject(ctx context.Context, invID span.InvocationID) (string, error) {\n\trealm, err := span.ReadInvocationRealm(ctx, span.Client(ctx).Single(), invID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tproject, _, err := internal.ParseRealm(realm)\n\tif err != nil {\n\t\treturn \"\", errors.Annotate(err, \"invocation %q\", invID.Name()).Err()\n\t}\n\treturn project, nil\n}\n\nfunc getBQClient(ctx context.Context, luciProject string, bqExport *pb.BigQueryExport) (*bigquery.Client, error) {\n\ttr, err := auth.GetRPCTransport(ctx, auth.AsProject, auth.WithProject(luciProject), auth.WithScopes(bigquery.Scope))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn bigquery.NewClient(ctx, bqExport.Project, option.WithHTTPClient(&http.Client{\n\t\tTransport: tr,\n\t}))\n}\n\n\/\/ ensureBQTable creates a BQ table if it doesn't exist.\nfunc ensureBQTable(ctx context.Context, client *bigquery.Client, bqExport *pb.BigQueryExport) error {\n\tt := client.Dataset(bqExport.Dataset).Table(bqExport.Table)\n\n\t\/\/ Check the existence of table.\n\t\/\/ TODO(chanli): Cache the check result.\n\t_, err := t.Metadata(ctx)\n\tapiErr, ok := err.(*googleapi.Error)\n\tswitch {\n\tcase ok && apiErr.Code == http.StatusNotFound:\n\t\t\/\/ Table doesn't exist.\n\t\tbreak\n\tcase ok && apiErr.Code == http.StatusForbidden:\n\t\t\/\/ No read table permission.\n\t\treturn permanentInvocationTaskErrTag.Apply(err)\n\tdefault:\n\t\t\/\/ Either no err or the err is not special cases above, simply return.\n\t\treturn err\n\t}\n\n\t\/\/ Table doesn't exist. Create one.\n\terr = t.Create(ctx, nil)\n\tapiErr, ok = err.(*googleapi.Error)\n\tswitch {\n\tcase err == nil:\n\t\tlogging.Infof(ctx, \"Created BigQuery table %s.%s.%s\", bqExport.Project, bqExport.Dataset, bqExport.Table)\n\t\treturn nil\n\tcase ok && apiErr.Code == http.StatusConflict:\n\t\t\/\/ Table just got created. This is fine.\n\t\treturn nil\n\tcase ok && apiErr.Code == http.StatusForbidden:\n\t\t\/\/ No create table permission.\n\t\treturn permanentInvocationTaskErrTag.Apply(err)\n\tdefault:\n\t\treturn err\n\t}\n}\n\ntype testVariantKey struct {\n\ttestID string\n\tvariantHash string\n}\n\n\/\/ queryExoneratedTestVariants reads exonerated test variants matching the predicate.\nfunc queryExoneratedTestVariants(ctx context.Context, txn *spanner.ReadOnlyTransaction, invIDs span.InvocationIDSet) (map[testVariantKey]struct{}, error) {\n\tst := spanner.NewStatement(`\n\t\tSELECT DISTINCT TestId, VariantHash,\n\t\tFROM TestExonerations\n\t\tWHERE InvocationId IN UNNEST(@invIDs)\n\t`)\n\tst.Params[\"invIDs\"] = invIDs\n\n\ttvs := map[testVariantKey]struct{}{}\n\tvar b span.Buffer\n\terr := span.Query(ctx, \"exonerated test variants\", txn, st, func(row *spanner.Row) error {\n\t\tvar key testVariantKey\n\t\tif err := b.FromSpanner(row, &key.testID, &key.variantHash); err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttvs[key] = struct{}{}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn tvs, nil\n}\n\nfunc generateBQRow(inv *pb.Invocation, tr *pb.TestResult, exonerated bool) *bq.Row {\n\treturn &bq.Row{\n\t\tMessage: &bqpb.TestResultRow{\n\t\t\tInvocation: &bqpb.TestResultRow_Invocation{\n\t\t\t\tId: string(span.MustParseInvocationName(inv.Name)),\n\t\t\t\tInterrupted: inv.Interrupted,\n\t\t\t\tTags: inv.Tags,\n\t\t\t},\n\t\t\tResult: tr,\n\t\t\tExoneration: &bqpb.TestResultRow_TestExoneration{\n\t\t\t\tExonerated: exonerated,\n\t\t\t},\n\t\t},\n\t\tInsertID: tr.Name,\n\t}\n}\n\nfunc queryTestResultsStreaming(ctx context.Context, txn *spanner.ReadOnlyTransaction, inv *pb.Invocation, q span.TestResultQuery, exoneratedTestVariants map[testVariantKey]struct{}, maxBatchSize int, batchC chan []*bq.Row) error {\n\trows := make([]*bq.Row, 0, maxBatchSize)\n\terr := span.QueryTestResultsStreaming(ctx, txn, q, func(tr *pb.TestResult, variantHash string) error {\n\t\t_, exonerated := exoneratedTestVariants[testVariantKey{testID: tr.TestId, variantHash: variantHash}]\n\t\trows = append(rows, generateBQRow(inv, tr, exonerated))\n\t\tif len(rows) >= maxBatchSize {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn ctx.Err()\n\t\t\tcase batchC <- rows:\n\t\t\t}\n\t\t\trows = make([]*bq.Row, 0, maxBatchSize)\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(rows) > 0 {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tcase batchC <- rows:\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc batchExportRows(ctx context.Context, ins inserter, batchC chan []*bq.Row) error {\n\treturn parallel.WorkPool(10, func(workC chan<- func() error) {\n\t\tfor rows := range batchC {\n\t\t\trows := rows\n\t\t\tworkC <- func() error {\n\t\t\t\terr := ins.Put(ctx, rows)\n\t\t\t\tif apiErr, ok := err.(*googleapi.Error); ok && apiErr.Code == http.StatusForbidden {\n\t\t\t\t\terr = permanentInvocationTaskErrTag.Apply(err)\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t})\n}\n\n\/\/ exportTestResultsToBigQuery queries test results in Spanner then exports them to BigQuery.\nfunc exportTestResultsToBigQuery(ctx context.Context, ins inserter, invID span.InvocationID, bqExport *pb.BigQueryExport, maxBatchSize int) error {\n\ttxn := span.Client(ctx).ReadOnlyTransaction()\n\tdefer txn.Close()\n\n\tinv, err := span.ReadInvocationFull(ctx, txn, invID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif inv.State != pb.Invocation_FINALIZED {\n\t\treturn errors.Reason(\"%s is not finalized yet\", invID.Name()).Err()\n\t}\n\n\t\/\/ Get the invocation set.\n\tinvIDs, err := span.ReadReachableInvocations(ctx, txn, maxInvocationGraphSize, span.NewInvocationIDSet(invID))\n\tif err != nil {\n\t\tif span.TooManyInvocationsTag.In(err) {\n\t\t\terr = permanentInvocationTaskErrTag.Apply(err)\n\t\t}\n\t\treturn err\n\t}\n\n\texoneratedTestVariants, err := queryExoneratedTestVariants(ctx, txn, invIDs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Query test results and export to BigQuery.\n\tbatchC := make(chan []*bq.Row)\n\n\t\/\/ Batch exports rows to BigQuery.\n\teg, ctx := errgroup.WithContext(ctx)\n\n\teg.Go(func() error {\n\t\treturn batchExportRows(ctx, ins, batchC)\n\t})\n\n\tq := span.TestResultQuery{\n\t\tPredicate: bqExport.GetTestResults().GetPredicate(),\n\t\tInvocationIDs: invIDs,\n\t\tSelectVariantHash: true,\n\t}\n\teg.Go(func() error {\n\t\tdefer close(batchC)\n\t\treturn queryTestResultsStreaming(ctx, txn, inv, q, exoneratedTestVariants, maxBatchSize, batchC)\n\t})\n\n\treturn eg.Wait()\n}\n\n\/\/ exportResultsToBigQuery exports results of an invocation to a BigQuery table.\nfunc exportResultsToBigQuery(ctx context.Context, invID span.InvocationID, bqExport *pb.BigQueryExport) error {\n\tluciProject, err := getLUCIProject(ctx, invID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient, err := getBQClient(ctx, luciProject, bqExport)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer client.Close()\n\n\tif err := ensureBQTable(ctx, client, bqExport); err != nil {\n\t\treturn err\n\t}\n\n\tins := client.Dataset(bqExport.Dataset).Table(bqExport.Table).Inserter()\n\treturn exportTestResultsToBigQuery(ctx, ins, invID, bqExport, maxBatchSize)\n}\n<commit_msg>[resultdb] Trim artifact fetch_url<commit_after>\/\/ Copyright 2019 The LUCI Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\n\t\"cloud.google.com\/go\/bigquery\"\n\t\"cloud.google.com\/go\/spanner\"\n\t\"golang.org\/x\/sync\/errgroup\"\n\t\"google.golang.org\/api\/googleapi\"\n\t\"google.golang.org\/api\/option\"\n\n\t\"go.chromium.org\/luci\/common\/bq\"\n\t\"go.chromium.org\/luci\/common\/errors\"\n\t\"go.chromium.org\/luci\/common\/logging\"\n\t\"go.chromium.org\/luci\/common\/sync\/parallel\"\n\t\"go.chromium.org\/luci\/server\/auth\"\n\n\t\"go.chromium.org\/luci\/resultdb\/internal\"\n\t\"go.chromium.org\/luci\/resultdb\/internal\/span\"\n\tbqpb \"go.chromium.org\/luci\/resultdb\/proto\/bq\/v1\"\n\tpb \"go.chromium.org\/luci\/resultdb\/proto\/rpc\/v1\"\n)\n\nconst (\n\tmaxInvocationGraphSize = 1000\n\tmaxBatchSize = 500\n)\n\n\/\/ inserter is implemented by bigquery.Inserter.\ntype inserter interface {\n\t\/\/ Put uploads one or more rows to the BigQuery service.\n\tPut(ctx context.Context, src interface{}) error\n}\n\nfunc getLUCIProject(ctx context.Context, invID span.InvocationID) (string, error) {\n\trealm, err := span.ReadInvocationRealm(ctx, span.Client(ctx).Single(), invID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tproject, _, err := internal.ParseRealm(realm)\n\tif err != nil {\n\t\treturn \"\", errors.Annotate(err, \"invocation %q\", invID.Name()).Err()\n\t}\n\treturn project, nil\n}\n\nfunc getBQClient(ctx context.Context, luciProject string, bqExport *pb.BigQueryExport) (*bigquery.Client, error) {\n\ttr, err := auth.GetRPCTransport(ctx, auth.AsProject, auth.WithProject(luciProject), auth.WithScopes(bigquery.Scope))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn bigquery.NewClient(ctx, bqExport.Project, option.WithHTTPClient(&http.Client{\n\t\tTransport: tr,\n\t}))\n}\n\n\/\/ ensureBQTable creates a BQ table if it doesn't exist.\nfunc ensureBQTable(ctx context.Context, client *bigquery.Client, bqExport *pb.BigQueryExport) error {\n\tt := client.Dataset(bqExport.Dataset).Table(bqExport.Table)\n\n\t\/\/ Check the existence of table.\n\t\/\/ TODO(chanli): Cache the check result.\n\t_, err := t.Metadata(ctx)\n\tapiErr, ok := err.(*googleapi.Error)\n\tswitch {\n\tcase ok && apiErr.Code == http.StatusNotFound:\n\t\t\/\/ Table doesn't exist.\n\t\tbreak\n\tcase ok && apiErr.Code == http.StatusForbidden:\n\t\t\/\/ No read table permission.\n\t\treturn permanentInvocationTaskErrTag.Apply(err)\n\tdefault:\n\t\t\/\/ Either no err or the err is not special cases above, simply return.\n\t\treturn err\n\t}\n\n\t\/\/ Table doesn't exist. Create one.\n\terr = t.Create(ctx, nil)\n\tapiErr, ok = err.(*googleapi.Error)\n\tswitch {\n\tcase err == nil:\n\t\tlogging.Infof(ctx, \"Created BigQuery table %s.%s.%s\", bqExport.Project, bqExport.Dataset, bqExport.Table)\n\t\treturn nil\n\tcase ok && apiErr.Code == http.StatusConflict:\n\t\t\/\/ Table just got created. This is fine.\n\t\treturn nil\n\tcase ok && apiErr.Code == http.StatusForbidden:\n\t\t\/\/ No create table permission.\n\t\treturn permanentInvocationTaskErrTag.Apply(err)\n\tdefault:\n\t\treturn err\n\t}\n}\n\ntype testVariantKey struct {\n\ttestID string\n\tvariantHash string\n}\n\n\/\/ queryExoneratedTestVariants reads exonerated test variants matching the predicate.\nfunc queryExoneratedTestVariants(ctx context.Context, txn *spanner.ReadOnlyTransaction, invIDs span.InvocationIDSet) (map[testVariantKey]struct{}, error) {\n\tst := spanner.NewStatement(`\n\t\tSELECT DISTINCT TestId, VariantHash,\n\t\tFROM TestExonerations\n\t\tWHERE InvocationId IN UNNEST(@invIDs)\n\t`)\n\tst.Params[\"invIDs\"] = invIDs\n\n\ttvs := map[testVariantKey]struct{}{}\n\tvar b span.Buffer\n\terr := span.Query(ctx, \"exonerated test variants\", txn, st, func(row *spanner.Row) error {\n\t\tvar key testVariantKey\n\t\tif err := b.FromSpanner(row, &key.testID, &key.variantHash); err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttvs[key] = struct{}{}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn tvs, nil\n}\n\nfunc generateBQRow(inv *pb.Invocation, tr *pb.TestResult, exonerated bool) *bq.Row {\n\treturn &bq.Row{\n\t\tMessage: &bqpb.TestResultRow{\n\t\t\tInvocation: &bqpb.TestResultRow_Invocation{\n\t\t\t\tId: string(span.MustParseInvocationName(inv.Name)),\n\t\t\t\tInterrupted: inv.Interrupted,\n\t\t\t\tTags: inv.Tags,\n\t\t\t},\n\t\t\tResult: tr,\n\t\t\tExoneration: &bqpb.TestResultRow_TestExoneration{\n\t\t\t\tExonerated: exonerated,\n\t\t\t},\n\t\t},\n\t\tInsertID: tr.Name,\n\t}\n}\n\nfunc queryTestResultsStreaming(ctx context.Context, txn *spanner.ReadOnlyTransaction, inv *pb.Invocation, q span.TestResultQuery, exoneratedTestVariants map[testVariantKey]struct{}, maxBatchSize int, batchC chan []*bq.Row) error {\n\trows := make([]*bq.Row, 0, maxBatchSize)\n\terr := span.QueryTestResultsStreaming(ctx, txn, q, func(tr *pb.TestResult, variantHash string) error {\n\t\ttrimTestResultForBigQuery(tr)\n\n\t\t_, exonerated := exoneratedTestVariants[testVariantKey{testID: tr.TestId, variantHash: variantHash}]\n\t\trows = append(rows, generateBQRow(inv, tr, exonerated))\n\t\tif len(rows) >= maxBatchSize {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn ctx.Err()\n\t\t\tcase batchC <- rows:\n\t\t\t}\n\t\t\trows = make([]*bq.Row, 0, maxBatchSize)\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(rows) > 0 {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tcase batchC <- rows:\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc batchExportRows(ctx context.Context, ins inserter, batchC chan []*bq.Row) error {\n\treturn parallel.WorkPool(10, func(workC chan<- func() error) {\n\t\tfor rows := range batchC {\n\t\t\trows := rows\n\t\t\tworkC <- func() error {\n\t\t\t\terr := ins.Put(ctx, rows)\n\t\t\t\tif apiErr, ok := err.(*googleapi.Error); ok && apiErr.Code == http.StatusForbidden {\n\t\t\t\t\terr = permanentInvocationTaskErrTag.Apply(err)\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t})\n}\n\n\/\/ exportTestResultsToBigQuery queries test results in Spanner then exports them to BigQuery.\nfunc exportTestResultsToBigQuery(ctx context.Context, ins inserter, invID span.InvocationID, bqExport *pb.BigQueryExport, maxBatchSize int) error {\n\ttxn := span.Client(ctx).ReadOnlyTransaction()\n\tdefer txn.Close()\n\n\tinv, err := span.ReadInvocationFull(ctx, txn, invID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif inv.State != pb.Invocation_FINALIZED {\n\t\treturn errors.Reason(\"%s is not finalized yet\", invID.Name()).Err()\n\t}\n\n\t\/\/ Get the invocation set.\n\tinvIDs, err := span.ReadReachableInvocations(ctx, txn, maxInvocationGraphSize, span.NewInvocationIDSet(invID))\n\tif err != nil {\n\t\tif span.TooManyInvocationsTag.In(err) {\n\t\t\terr = permanentInvocationTaskErrTag.Apply(err)\n\t\t}\n\t\treturn err\n\t}\n\n\texoneratedTestVariants, err := queryExoneratedTestVariants(ctx, txn, invIDs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Query test results and export to BigQuery.\n\tbatchC := make(chan []*bq.Row)\n\n\t\/\/ Batch exports rows to BigQuery.\n\teg, ctx := errgroup.WithContext(ctx)\n\n\teg.Go(func() error {\n\t\treturn batchExportRows(ctx, ins, batchC)\n\t})\n\n\tq := span.TestResultQuery{\n\t\tPredicate: bqExport.GetTestResults().GetPredicate(),\n\t\tInvocationIDs: invIDs,\n\t\tSelectVariantHash: true,\n\t}\n\teg.Go(func() error {\n\t\tdefer close(batchC)\n\t\treturn queryTestResultsStreaming(ctx, txn, inv, q, exoneratedTestVariants, maxBatchSize, batchC)\n\t})\n\n\treturn eg.Wait()\n}\n\n\/\/ exportResultsToBigQuery exports results of an invocation to a BigQuery table.\nfunc exportResultsToBigQuery(ctx context.Context, invID span.InvocationID, bqExport *pb.BigQueryExport) error {\n\tluciProject, err := getLUCIProject(ctx, invID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tclient, err := getBQClient(ctx, luciProject, bqExport)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer client.Close()\n\n\tif err := ensureBQTable(ctx, client, bqExport); err != nil {\n\t\treturn err\n\t}\n\n\tins := client.Dataset(bqExport.Dataset).Table(bqExport.Table).Inserter()\n\treturn exportTestResultsToBigQuery(ctx, ins, invID, bqExport, maxBatchSize)\n}\n\n\/\/ trimTestResultForBigQuery trims fields that should not go to BigQuery.\nfunc trimTestResultForBigQuery(tr *pb.TestResult) {\n\ttrimArts := func(arts []*pb.Artifact) {\n\t\tfor _, a := range arts {\n\t\t\ta.FetchUrl = \"\"\n\t\t}\n\t}\n\ttrimArts(tr.InputArtifacts)\n\ttrimArts(tr.OutputArtifacts)\n}\n<|endoftext|>"} {"text":"<commit_before>package cli_test\n\nimport \"testing\"\n\nfunc TestRunDetached(t *testing.T) {\n\trun(t, []Command{\n\t\tDeployCommand(\"latest\", \"v1\"),\n\t\t{\n\t\t\t\"run -d migration -a acme-inc\",\n\t\t\t\"Ran `migration` on acme-inc as run, detached.\",\n\t\t},\n\t})\n}\n\nfunc TestRunAttached(t *testing.T) {\n\trun(t, []Command{\n\t\tDeployCommand(\"latest\", \"v1\"),\n\t\t{\n\t\t\t\"run migration -a acme-inc\",\n\t\t\t\"Fake output for `migration` on acme-inc\",\n\t\t},\n\t})\n}\n<commit_msg>Disable run tests.<commit_after>package cli_test\n\nimport \"testing\"\n\nfunc testRunDetached(t *testing.T) {\n\trun(t, []Command{\n\t\tDeployCommand(\"latest\", \"v1\"),\n\t\t{\n\t\t\t\"run -d migration -a acme-inc\",\n\t\t\t\"Ran `migration` on acme-inc as run, detached.\",\n\t\t},\n\t})\n}\n\nfunc testRunAttached(t *testing.T) {\n\trun(t, []Command{\n\t\tDeployCommand(\"latest\", \"v1\"),\n\t\t{\n\t\t\t\"run migration -a acme-inc\",\n\t\t\t\"Fake output for `migration` on acme-inc\",\n\t\t},\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package web\n\nimport (\n\t\"github.com\/gin-gonic\/contrib\/gzip\"\n\t\"github.com\/gin-gonic\/contrib\/static\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"monitor\"\n\t\"time\"\n)\n\nvar active_monitor *monitor.Monitor\nvar start_time time.Time\n\nfunc New(m *monitor.Monitor) *gin.Engine {\n\t_ = gzip.Gzip\n\n\tactive_monitor = m\n\n\tgin.SetMode(gin.ReleaseMode)\n\n\tr := gin.Default()\n\tr.Use(gzip.Gzip(gzip.DefaultCompression))\n\tr.LoadHTMLGlob(\"src\/web\/templates\/*\")\n\n\tr.GET(\"\/\", getIndex)\n\tr.GET(\"\/api\/snapshots\", getSnapshots) \/\/, gzip.Gzip(gzip.DefaultCompression))\n\n\tr.Use(static.Serve(\"\/\", static.LocalFile(\"src\/web\/static\/\", false)))\n\n\tstart_time = time.Now()\n\n\treturn r\n}\n\nfunc getIndex(c *gin.Context) {\n\tc.HTML(200, \"index.html\", gin.H{\n\t\t\"StartTime\": start_time,\n\t\t\"Interval\": int64(active_monitor.Interval \/ 1e6),\n\t\t\"Mode\": gin.Mode(),\n\t})\n}\n\nfunc getSnapshots(c *gin.Context) {\n\tnow := time.Now()\n\n\trecent := active_monitor.GetRecentSnapshots().Filter(func(s monitor.Snapshot) bool {\n\t\tres := isTimestampInLast(s.Timestamp, now, 60*time.Second) ||\n\t\t\t(isTimestampInLast(s.Timestamp, now, 5*time.Minute) && isSignificantTimestamp(s.Timestamp, 10*time.Second)) ||\n\t\t\t(isTimestampInLast(s.Timestamp, now, 2*time.Hour) && isSignificantTimestamp(s.Timestamp, 10*time.Minute)) ||\n\t\t\t(isTimestampInLast(s.Timestamp, now, 24*time.Hour) && isSignificantTimestamp(s.Timestamp, 30*time.Minute))\n\n\t\treturn res\n\t})\n\n\tc.JSON(200, recent)\n}\n\nfunc isTimestampInLast(s, now time.Time, dur time.Duration) bool {\n\treturn now.Sub(s) < dur\n}\n\nfunc isSignificantTimestamp(s time.Time, frequency time.Duration) bool {\n\treturn s.UnixNano()%int64(frequency) < int64(active_monitor.Interval)\n}\n<commit_msg>Changing frequencies again<commit_after>package web\n\nimport (\n\t\"github.com\/gin-gonic\/contrib\/gzip\"\n\t\"github.com\/gin-gonic\/contrib\/static\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"monitor\"\n\t\"time\"\n)\n\nvar active_monitor *monitor.Monitor\nvar start_time time.Time\n\nfunc New(m *monitor.Monitor) *gin.Engine {\n\t_ = gzip.Gzip\n\n\tactive_monitor = m\n\n\tgin.SetMode(gin.ReleaseMode)\n\n\tr := gin.Default()\n\tr.Use(gzip.Gzip(gzip.DefaultCompression))\n\tr.LoadHTMLGlob(\"src\/web\/templates\/*\")\n\n\tr.GET(\"\/\", getIndex)\n\tr.GET(\"\/api\/snapshots\", getSnapshots) \/\/, gzip.Gzip(gzip.DefaultCompression))\n\n\tr.Use(static.Serve(\"\/\", static.LocalFile(\"src\/web\/static\/\", false)))\n\n\tstart_time = time.Now()\n\n\treturn r\n}\n\nfunc getIndex(c *gin.Context) {\n\tc.HTML(200, \"index.html\", gin.H{\n\t\t\"StartTime\": start_time,\n\t\t\"Interval\": int64(active_monitor.Interval \/ 1e6),\n\t\t\"Mode\": gin.Mode(),\n\t})\n}\n\nfunc getSnapshots(c *gin.Context) {\n\tnow := time.Now()\n\n\trecent := active_monitor.GetRecentSnapshots().Filter(func(s monitor.Snapshot) bool {\n\t\tres := isTimestampInLast(s.Timestamp, now, 60*time.Second) ||\n\t\t\t(isTimestampInLast(s.Timestamp, now, 5*time.Minute) && isSignificantTimestamp(s.Timestamp, 10*time.Second)) ||\n\t\t\t(isTimestampInLast(s.Timestamp, now, 2*time.Hour) && isSignificantTimestamp(s.Timestamp, 5*time.Minute)) ||\n\t\t\t(isTimestampInLast(s.Timestamp, now, 48*time.Hour) && isSignificantTimestamp(s.Timestamp, 30*time.Minute))\n\n\t\treturn res\n\t})\n\n\tc.JSON(200, recent)\n}\n\nfunc isTimestampInLast(s, now time.Time, dur time.Duration) bool {\n\treturn now.Sub(s) < dur\n}\n\nfunc isSignificantTimestamp(s time.Time, frequency time.Duration) bool {\n\treturn s.UnixNano()%int64(frequency) < int64(active_monitor.Interval)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Паттерн Мост (Bridge)\n\/\/\n\npackage bridge\n\n\/\/ Тип Carer, описывает интерфейс автомобиля\ntype Carer interface {\n\tRase() string \/\/ автомобиль умеет ездить (для этого и нужен двигатель)\n}\n\n\/\/ Тип Enginer, описывает интерфейс двигателя\n\/\/ Каждый двигатель должен его реализовать\ntype Enginer interface {\n\tGetSound() string \/\/ Возвращает звук двигателя\n}\n\n\/\/ Тип Car, реализует автомобиль\ntype Car struct {\n\tengine Enginer\n}\n\n\/\/ Машина едет\nfunc (self *Car) Rase() string {\n\treturn self.engine.GetSound()\n}\n\n\/\/ Тип EngineSuzuki, реализует двигатель Suzuki\ntype EngineSuzuki struct {\n}\n\n\/\/ Метод отвечает за завук двигателя\nfunc (self *EngineSuzuki) GetSound() string {\n\treturn \"SssuuuuZzzuuuuKkiiiii\"\n}\n\n\/\/ Тип EngineHonda, реализует двигатель Honda\ntype EngineHonda struct {\n}\n\n\/\/ Метод отвечает за завук двигателя\nfunc (self *EngineHonda) GetSound() string {\n\treturn \"HhoooNnnnnnnnnDddaaaaaaa\"\n}\n\n\/\/ Тип EngineLada, реализует двигатель АвтоВаза\ntype EngineLada struct {\n}\n\n\/\/ Метод отвечает за завук двигателя\nfunc (self *EngineLada) GetSound() string {\n\treturn \"PhhhhPhhhhPhPhPhPhPh\"\n}\n<commit_msg>Fixed typo<commit_after>\/\/ Паттерн Мост (Bridge)\n\/\/\n\npackage bridge\n\n\/\/ Тип Carer, описывает интерфейс автомобиля\ntype Carer interface {\n\tRase() string \/\/ автомобиль умеет ездить (для этого и нужен двигатель)\n}\n\n\/\/ Тип Enginer, описывает интерфейс двигателя\n\/\/ Каждый двигатель должен его реализовать\ntype Enginer interface {\n\tGetSound() string \/\/ Возвращает звук двигателя\n}\n\n\/\/ Тип Car, реализует автомобиль\ntype Car struct {\n\tengine Enginer\n}\n\n\/\/ Машина едет\nfunc (self *Car) Rase() string {\n\treturn self.engine.GetSound()\n}\n\n\/\/ Тип EngineSuzuki, реализует двигатель Suzuki\ntype EngineSuzuki struct {\n}\n\n\/\/ Метод отвечает за звук двигателя\nfunc (self *EngineSuzuki) GetSound() string {\n\treturn \"SssuuuuZzzuuuuKkiiiii\"\n}\n\n\/\/ Тип EngineHonda, реализует двигатель Honda\ntype EngineHonda struct {\n}\n\n\/\/ Метод отвечает за звук двигателя\nfunc (self *EngineHonda) GetSound() string {\n\treturn \"HhoooNnnnnnnnnDddaaaaaaa\"\n}\n\n\/\/ Тип EngineLada, реализует двигатель АвтоВаза\ntype EngineLada struct {\n}\n\n\/\/ Метод отвечает за звук двигателя\nfunc (self *EngineLada) GetSound() string {\n\treturn \"PhhhhPhhhhPhPhPhPhPh\"\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage misc\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\n\t\"github.com\/coreos\/mantle\/kola\/register\"\n\t\"github.com\/coreos\/mantle\/platform\"\n)\n\nfunc init() {\n\tregister.Register(®ister.Test{\n\t\tRun: VerityVerify,\n\t\tClusterSize: 1,\n\t\tName: \"coreos.verity.verify\",\n\t\tPlatforms: []string{\"qemu\", \"aws\", \"gce\"},\n\t})\n}\n\n\/\/ VerityVerify asserts that the filesystem mounted on \/usr matches the\n\/\/ dm-verity hash that is embedded in the CoreOS kernel.\nfunc VerityVerify(c platform.TestCluster) error {\n\tm := c.Machines()[0]\n\n\t\/\/ extract verity hash from kernel\n\thash, err := m.SSH(\"dd if=\/boot\/coreos\/vmlinuz-a skip=64 count=64 bs=1 2>\/dev\/null\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to extract verity hash from kernel: %v: %v\", hash, err)\n\t}\n\n\t\/\/ find \/usr dev\n\tusrdev, err := m.SSH(\"findmnt -no SOURCE \/usr\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to find device for \/usr: %v: %v\", usrdev, err)\n\t}\n\n\t\/\/ figure out partition size for hash dev offset\n\toffset, err := m.SSH(\"sudo e2size \" + string(usrdev))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to find \/usr partition size: %v: %v\", offset, err)\n\t}\n\n\toffset = bytes.TrimSpace(offset)\n\tveritycmd := fmt.Sprintf(\"sudo veritysetup verify --verbose --hash-offset=%s %s %s %s\", offset, usrdev, usrdev, hash)\n\n\tverify, err := m.SSH(veritycmd)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"verity hash verification on %s failed: %v: %v\", usrdev, verify, err)\n\t}\n\n\treturn nil\n}\n<commit_msg>kola\/tests\/misc: implement coreos.verity.corruption<commit_after>\/\/ Copyright 2016 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage misc\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/coreos\/mantle\/kola\/register\"\n\t\"github.com\/coreos\/mantle\/platform\"\n)\n\nfunc init() {\n\tregister.Register(®ister.Test{\n\t\tRun: VerityVerify,\n\t\tClusterSize: 1,\n\t\tName: \"coreos.verity.verify\",\n\t\tPlatforms: []string{\"qemu\", \"aws\", \"gce\"},\n\t})\n\tregister.Register(®ister.Test{\n\t\tRun: VerityCorruption,\n\t\tClusterSize: 1,\n\t\tName: \"coreos.verity.corruption\",\n\t\tPlatforms: []string{\"qemu\", \"aws\", \"gce\"},\n\t})\n}\n\n\/\/ Verity verification tests.\n\/\/ TODO(mischief): seems like a good candidate for kolet.\n\n\/\/ VerityVerify asserts that the filesystem mounted on \/usr matches the\n\/\/ dm-verity hash that is embedded in the CoreOS kernel.\nfunc VerityVerify(c platform.TestCluster) error {\n\tm := c.Machines()[0]\n\n\t\/\/ extract verity hash from kernel\n\thash, err := m.SSH(\"dd if=\/boot\/coreos\/vmlinuz-a skip=64 count=64 bs=1 2>\/dev\/null\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to extract verity hash from kernel: %v: %v\", hash, err)\n\t}\n\n\t\/\/ find \/usr dev\n\tusrdev, err := m.SSH(\"findmnt -no SOURCE \/usr\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to find device for \/usr: %v: %v\", usrdev, err)\n\t}\n\n\t\/\/ XXX: if the \/usr dev is \/dev\/mapper\/usr, we're on a verity enabled\n\t\/\/ image, so use dmsetup to find the real device.\n\tif strings.TrimSpace(string(usrdev)) == \"\/dev\/mapper\/usr\" {\n\t\tusrdev, err = m.SSH(\"echo -n \/dev\/$(sudo dmsetup info --noheadings -Co blkdevs_used usr)\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to find device for \/usr: %v: %v\", usrdev, err)\n\t\t}\n\t}\n\n\t\/\/ figure out partition size for hash dev offset\n\toffset, err := m.SSH(\"sudo e2size \" + string(usrdev))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to find \/usr partition size: %v: %v\", offset, err)\n\t}\n\n\toffset = bytes.TrimSpace(offset)\n\tveritycmd := fmt.Sprintf(\"sudo veritysetup verify --verbose --hash-offset=%s %s %s %s\", offset, usrdev, usrdev, hash)\n\n\tverify, err := m.SSH(veritycmd)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"verity hash verification on %s failed: %v: %v\", usrdev, verify, err)\n\t}\n\n\treturn nil\n}\n\n\/\/ VerityCorruption asserts that a machine will fail to read a file from a\n\/\/ verify filesystem whose blocks have been modified.\nfunc VerityCorruption(c platform.TestCluster) error {\n\tm := c.Machines()[0]\n\t\/\/ figure out if we are actually using verity\n\tout, err := m.SSH(\"sudo veritysetup status usr\")\n\tif err != nil && bytes.Equal(out, []byte(\"\/dev\/mapper\/usr is inactive.\")) {\n\t\t\/\/ verity not in use, so skip.\n\t\treturn register.Skip\n\t} else if err != nil {\n\t\treturn fmt.Errorf(\"failed checking verity status: %s: %v\", out, err)\n\t}\n\n\t\/\/ assert that dm shows verity is in use and the device is valid (V)\n\tout, err = m.SSH(\"sudo dmsetup --target verity status usr\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed checking dmsetup status of usr: %s: %v\", out, err)\n\t}\n\n\tfields := strings.Fields(string(out))\n\tif len(fields) != 4 {\n\t\treturn fmt.Errorf(\"failed checking dmsetup status of usr: not enough fields in output (got %d)\", len(fields))\n\t}\n\n\tif fields[3] != \"V\" {\n\t\treturn fmt.Errorf(\"dmsetup status usr reports verity is not valid!\")\n\t}\n\n\t\/\/ corrupt a file on disk and flush disk caches.\n\t\/\/ try setting NAME=CoreOS to NAME=LulzOS in \/usr\/lib\/os-release\n\n\t\/\/ get usr device, probably vda3\n\tusrdev, err := m.SSH(\"echo \/dev\/$(sudo dmsetup info --noheadings -Co blkdevs_used usr)\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed getting \/usr device from dmsetup: %s: %v\", out, err)\n\t}\n\n\t\/\/ poke bytes into \/usr\/lib\/os-release\n\tout, err = m.SSH(fmt.Sprintf(`echo NAME=LulzOS | sudo dd of=%s seek=$(expr $(sudo debugfs -R \"blocks \/lib\/os-release\" %s 2>\/dev\/null) \\* 4096) bs=1 2>\/dev\/null`, usrdev, usrdev))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed overwriting disk block: %s: %v\", out, err)\n\t}\n\n\t\/\/ make sure we flush everything so cat has to go through to the device backing verity.\n\tout, err = m.SSH(\"sudo \/bin\/sh -c 'sync; echo -n 3 >\/proc\/sys\/vm\/drop_caches'\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed dropping disk caches: %s: %v\", out, err)\n\t}\n\n\t\/\/ read the file back. if we can read it successfully, verity did not do its job.\n\tout, err = m.SSH(\"cat \/usr\/lib\/os-release\")\n\tif err == nil {\n\t\treturn fmt.Errorf(\"verity did not prevent reading a corrupted file!\")\n\t}\n\n\t\/\/ assert that dm shows verity device is now corrupted (C)\n\tout, err = m.SSH(\"sudo dmsetup --target verity status usr\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed checking dmsetup status of usr: %s: %v\", out, err)\n\t}\n\n\tfields = strings.Fields(string(out))\n\tif len(fields) != 4 {\n\t\treturn fmt.Errorf(\"failed checking dmsetup status of usr: not enough fields in output (got %d)\", len(fields))\n\t}\n\n\tif fields[3] != \"C\" {\n\t\treturn fmt.Errorf(\"dmsetup status usr reports verity is valid after corruption!\")\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package bsdiff\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\thumanize \"github.com\/dustin\/go-humanize\"\n\t\"github.com\/itchio\/wharf\/state\"\n)\n\ntype chunk struct {\n\taddOldStart int\n\taddNewStart int\n\taddLength int\n\tcopyStart int\n\tcopyEnd int\n\toffset int\n\teoc bool\n}\n\ntype SendChunkFunc func(c chunk)\n\ntype blockWorkerState struct {\n\tconsumed chan bool\n\twork chan int\n\tchunks chan chunk\n}\n\nfunc (ctx *DiffContext) doPartitioned(obuf []byte, obuflen int, nbuf []byte, nbuflen int, memstats *runtime.MemStats, writeMessage WriteMessageFunc, consumer *state.Consumer) error {\n\tvar err error\n\n\tpartitions := ctx.Partitions\n\tif partitions >= len(obuf)-1 {\n\t\tpartitions = 1\n\t}\n\n\tconsumer.ProgressLabel(fmt.Sprintf(\"Sorting %s...\", humanize.IBytes(uint64(obuflen))))\n\tconsumer.Progress(0.0)\n\n\tstartTime := time.Now()\n\n\tpmemstats := &runtime.MemStats{}\n\truntime.ReadMemStats(pmemstats)\n\toldAlloc := pmemstats.TotalAlloc\n\n\tif ctx.I == nil {\n\t\tctx.I = make([]int, len(obuf))\n\t\tbeforeAlloc := time.Now()\n\t\tfmt.Fprintf(os.Stderr, \"\\nAllocated %d-int I in %s\\n\", len(obuf), time.Since(beforeAlloc))\n\t} else {\n\t\tfor len(ctx.I) < len(obuf) {\n\t\t\tlenBefore := len(ctx.I)\n\t\t\tbeforeAlloc := time.Now()\n\t\t\tctx.I = make([]int, len(obuf))\n\t\t\tfmt.Fprintf(os.Stderr, \"\\nGrown I from %d to %d in %s\\n\", lenBefore, len(ctx.I), time.Since(beforeAlloc))\n\t\t}\n\t}\n\n\tpsa := NewPSA(partitions, obuf, ctx.I)\n\n\truntime.ReadMemStats(pmemstats)\n\tnewAlloc := pmemstats.TotalAlloc\n\tfmt.Fprintf(os.Stderr, \"\\nAlloc difference after PSA: %s. Size of I: %s\\n\", humanize.IBytes(uint64(newAlloc-oldAlloc)), humanize.IBytes(uint64(8*len(psa.I))))\n\n\tif ctx.Stats != nil {\n\t\tctx.Stats.TimeSpentSorting += time.Since(startTime)\n\t}\n\n\tif ctx.MeasureMem {\n\t\truntime.ReadMemStats(memstats)\n\t\tfmt.Fprintf(os.Stderr, \"\\nAllocated bytes after qsufsort: %s (%s total)\", humanize.IBytes(uint64(memstats.Alloc)), humanize.IBytes(uint64(memstats.TotalAlloc)))\n\t}\n\n\tbsdc := &Control{}\n\n\tconsumer.ProgressLabel(fmt.Sprintf(\"Preparing to scan %s...\", humanize.IBytes(uint64(nbuflen))))\n\tconsumer.Progress(0.0)\n\n\tstartTime = time.Now()\n\n\tvar waitConsumeTime int64\n\tvar waitEnqueueTime int64\n\tvar idleWorkerTime int64\n\tvar sendingWorkerTime int64\n\n\tanalyzeBlock := func(nbuflen int, nbuf []byte, offset int, chunks chan chunk) {\n\t\tvar lenf int\n\n\t\t\/\/ Compute the differences, writing ctrl as we go\n\t\tvar scan, pos, length int\n\t\tvar lastscan, lastpos, lastoffset int\n\n\t\tfor scan < nbuflen {\n\t\t\tvar oldscore int\n\t\t\tscan += length\n\n\t\t\tfor scsc := scan; scan < nbuflen; scan++ {\n\t\t\t\tpos, length = psa.search(nbuf[scan:])\n\n\t\t\t\tfor ; scsc < scan+length; scsc++ {\n\t\t\t\t\tif scsc+lastoffset < obuflen &&\n\t\t\t\t\t\tobuf[scsc+lastoffset] == nbuf[scsc] {\n\t\t\t\t\t\toldscore++\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif (length == oldscore && length != 0) || length > oldscore+8 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tif scan+lastoffset < obuflen && obuf[scan+lastoffset] == nbuf[scan] {\n\t\t\t\t\toldscore--\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif length != oldscore || scan == nbuflen {\n\t\t\t\tvar s, Sf int\n\t\t\t\tlenf = 0\n\t\t\t\tfor i := int(0); lastscan+i < scan && lastpos+i < obuflen; {\n\t\t\t\t\tif obuf[lastpos+i] == nbuf[lastscan+i] {\n\t\t\t\t\t\ts++\n\t\t\t\t\t}\n\t\t\t\t\ti++\n\t\t\t\t\tif s*2-i > Sf*2-lenf {\n\t\t\t\t\t\tSf = s\n\t\t\t\t\t\tlenf = i\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tlenb := 0\n\t\t\t\tif scan < nbuflen {\n\t\t\t\t\tvar s, Sb int\n\t\t\t\t\tfor i := int(1); (scan >= lastscan+i) && (pos >= i); i++ {\n\t\t\t\t\t\tif obuf[pos-i] == nbuf[scan-i] {\n\t\t\t\t\t\t\ts++\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif s*2-i > Sb*2-lenb {\n\t\t\t\t\t\t\tSb = s\n\t\t\t\t\t\t\tlenb = i\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif lastscan+lenf > scan-lenb {\n\t\t\t\t\toverlap := (lastscan + lenf) - (scan - lenb)\n\t\t\t\t\ts := int(0)\n\t\t\t\t\tSs := int(0)\n\t\t\t\t\tlens := int(0)\n\t\t\t\t\tfor i := int(0); i < overlap; i++ {\n\t\t\t\t\t\tif nbuf[lastscan+lenf-overlap+i] == obuf[lastpos+lenf-overlap+i] {\n\t\t\t\t\t\t\ts++\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif nbuf[scan-lenb+i] == obuf[pos-lenb+i] {\n\t\t\t\t\t\t\ts--\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif s > Ss {\n\t\t\t\t\t\t\tSs = s\n\t\t\t\t\t\t\tlens = i + 1\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tlenf += lens - overlap\n\t\t\t\t\tlenb -= lens\n\t\t\t\t}\n\n\t\t\t\tc := chunk{\n\t\t\t\t\taddOldStart: lastpos,\n\t\t\t\t\taddNewStart: lastscan,\n\t\t\t\t\taddLength: lenf,\n\t\t\t\t\tcopyStart: lastscan + lenf,\n\t\t\t\t\tcopyEnd: scan - lenb,\n\t\t\t\t\toffset: offset,\n\t\t\t\t}\n\n\t\t\t\tif c.addLength > 0 || (c.copyEnd != c.copyStart) {\n\t\t\t\t\t\/\/ if not a no-op, send\n\t\t\t\t\tbeforeSend := time.Now()\n\t\t\t\t\tchunks <- c\n\t\t\t\t\tatomic.AddInt64(&sendingWorkerTime, int64(time.Since(beforeSend)))\n\t\t\t\t}\n\n\t\t\t\tlastscan = scan - lenb\n\t\t\t\tlastpos = pos - lenb\n\t\t\t\tlastoffset = pos - scan\n\t\t\t}\n\t\t}\n\n\t\tbeforeSend := time.Now()\n\t\tchunks <- chunk{eoc: true}\n\t\tatomic.AddInt64(&sendingWorkerTime, int64(time.Since(beforeSend)))\n\t}\n\n\tblockSize := 256 * 1024\n\tnumBlocks := (nbuflen + blockSize - 1) \/ blockSize\n\n\tif numBlocks < partitions {\n\t\tblockSize = nbuflen \/ partitions\n\t\tnumBlocks = (nbuflen + blockSize - 1) \/ blockSize\n\t}\n\n\tnumWorkers := partitions * 8\n\n\t\/\/ fmt.Fprintf(os.Stderr, \"Divvying %s in %d block(s) of %s (with %d workers)\\n\",\n\t\/\/ \thumanize.IBytes(uint64(nbuflen)),\n\t\/\/ \tnumBlocks,\n\t\/\/ \thumanize.IBytes(uint64(blockSize)),\n\t\/\/ \tpartitions,\n\t\/\/ )\n\n\tblockWorkersState := make([]blockWorkerState, numWorkers)\n\n\t\/\/ initialize all channels\n\tfor i := 0; i < numWorkers; i++ {\n\t\tblockWorkersState[i].work = make(chan int, 1)\n\t\tblockWorkersState[i].chunks = make(chan chunk, 256)\n\t\tblockWorkersState[i].consumed = make(chan bool, 1)\n\t\tblockWorkersState[i].consumed <- true\n\t}\n\n\tfor i := 0; i < numWorkers; i++ {\n\t\tgo func(workerState blockWorkerState, workerIndex int) {\n\t\t\tlastWorkTime := time.Now()\n\t\t\tfor blockIndex := range workerState.work {\n\t\t\t\tatomic.AddInt64(&idleWorkerTime, int64(time.Since(lastWorkTime)))\n\n\t\t\t\t\/\/ fmt.Fprintf(os.Stderr, \"\\nWorker %d should analyze block %d\", workerIndex, blockIndex)\n\t\t\t\tboundary := blockSize * blockIndex\n\t\t\t\trealBlockSize := blockSize\n\t\t\t\tif blockIndex == numBlocks-1 {\n\t\t\t\t\trealBlockSize = nbuflen - boundary\n\t\t\t\t}\n\t\t\t\t\/\/ fmt.Fprintf(os.Stderr, \"Analyzing %s block at %d\\n\", humanize.IBytes(uint64(realBlockSize)), i)\n\n\t\t\t\tanalyzeBlock(realBlockSize, nbuf[boundary:boundary+realBlockSize], boundary, workerState.chunks)\n\t\t\t\t\/\/ fmt.Fprintf(os.Stderr, \"\\nWorker %d done analyzing block %d\", workerIndex, blockIndex)\n\n\t\t\t\tlastWorkTime = time.Now()\n\t\t\t}\n\t\t}(blockWorkersState[i], i)\n\t}\n\n\tgo func() {\n\t\tworkerIndex := 0\n\n\t\tfor i := 0; i < numBlocks; i++ {\n\t\t\tbeforeConsume := time.Now()\n\t\t\t<-blockWorkersState[workerIndex].consumed\n\t\t\tatomic.AddInt64(&waitConsumeTime, int64(time.Since(beforeConsume)))\n\n\t\t\tbeforeEnqueue := time.Now()\n\t\t\tblockWorkersState[workerIndex].work <- i\n\t\t\tatomic.AddInt64(&waitEnqueueTime, int64(time.Since(beforeEnqueue)))\n\n\t\t\tworkerIndex = (workerIndex + 1) % numWorkers\n\t\t}\n\n\t\tfor workerIndex := 0; workerIndex < numWorkers; workerIndex++ {\n\t\t\tclose(blockWorkersState[workerIndex].work)\n\t\t}\n\t\t\/\/ fmt.Fprintf(os.Stderr, \"Sent all blockworks\\n\")\n\t}()\n\n\tif ctx.MeasureMem {\n\t\truntime.ReadMemStats(memstats)\n\t\tfmt.Fprintf(os.Stderr, \"\\nAllocated bytes after scan-prepare: %s (%s total)\", humanize.IBytes(uint64(memstats.Alloc)), humanize.IBytes(uint64(memstats.TotalAlloc)))\n\t}\n\n\tvar prevChunk chunk\n\tfirst := true\n\n\tconsumer.ProgressLabel(fmt.Sprintf(\"Scanning %s (%d blocks of %s)...\", humanize.IBytes(uint64(nbuflen)), numBlocks, humanize.IBytes(uint64(blockSize))))\n\n\tworkerIndex := 0\n\tfor blockIndex := 0; blockIndex < numBlocks; blockIndex++ {\n\t\t\/\/ fmt.Fprintf(os.Stderr, \"\\nWaiting on worker %d for block %d\", workerIndex, blockIndex)\n\t\tconsumer.Progress(float64(blockIndex) \/ float64(numBlocks))\n\t\tstate := blockWorkersState[workerIndex]\n\n\t\tfor chunk := range state.chunks {\n\t\t\t\/\/ fmt.Fprintf(os.Stderr, \"\\nFor block %d, received chunk %#v\", blockIndex, chunk)\n\t\t\tif chunk.eoc {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif first {\n\t\t\t\tfirst = false\n\t\t\t} else {\n\t\t\t\tbsdc.Seek = int64(chunk.addOldStart - (prevChunk.addOldStart + prevChunk.addLength))\n\n\t\t\t\t\/\/ fmt.Fprintf(os.Stderr, \"%d bytes add, %d bytes copy\\n\", len(bsdc.Add), len(bsdc.Copy))\n\n\t\t\t\terr := writeMessage(bsdc)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tctx.db.Reset()\n\t\t\tctx.db.Grow(chunk.addLength)\n\n\t\t\taddNewStart := chunk.addNewStart + chunk.offset\n\n\t\t\tfor i := 0; i < chunk.addLength; i++ {\n\t\t\t\tctx.db.WriteByte(nbuf[addNewStart+i] - obuf[chunk.addOldStart+i])\n\t\t\t}\n\n\t\t\tbsdc.Add = ctx.db.Bytes()\n\t\t\tbsdc.Copy = nbuf[chunk.offset+chunk.copyStart : chunk.offset+chunk.copyEnd]\n\n\t\t\tif ctx.Stats != nil && ctx.Stats.BiggestAdd < int64(len(bsdc.Add)) {\n\t\t\t\tctx.Stats.BiggestAdd = int64(len(bsdc.Add))\n\t\t\t}\n\n\t\t\tprevChunk = chunk\n\t\t}\n\n\t\tstate.consumed <- true\n\t\tworkerIndex = (workerIndex + 1) % numWorkers\n\t}\n\n\t\/\/ fmt.Fprintf(os.Stderr, \"%d bytes add, %d bytes copy\\n\", len(bsdc.Add), len(bsdc.Copy))\n\n\tbsdc.Seek = 0\n\terr = writeMessage(bsdc)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif ctx.Stats != nil {\n\t\tctx.Stats.TimeSpentScanning += time.Since(startTime)\n\t}\n\n\tif ctx.MeasureMem {\n\t\truntime.ReadMemStats(memstats)\n\t\tconsumer.Debugf(\"\\nAllocated bytes after scan: %s (%s total)\", humanize.IBytes(uint64(memstats.Alloc)), humanize.IBytes(uint64(memstats.TotalAlloc)))\n\t\tfmt.Fprintf(os.Stderr, \"\\nAllocated bytes after scan: %s (%s total)\", humanize.IBytes(uint64(memstats.Alloc)), humanize.IBytes(uint64(memstats.TotalAlloc)))\n\t}\n\n\tfmt.Fprintf(os.Stderr, \"\\nStats: waitConsume %s, waitEnqueue %s, idleWorker %s, sendingWorker %s\\n\",\n\t\ttime.Duration(waitConsumeTime),\n\t\ttime.Duration(waitEnqueueTime),\n\t\ttime.Duration(idleWorkerTime),\n\t\ttime.Duration(sendingWorkerTime),\n\t)\n\n\tbsdc.Reset()\n\tbsdc.Eof = true\n\terr = writeMessage(bsdc)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>up block size to 1MB<commit_after>package bsdiff\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\thumanize \"github.com\/dustin\/go-humanize\"\n\t\"github.com\/itchio\/wharf\/state\"\n)\n\ntype chunk struct {\n\taddOldStart int\n\taddNewStart int\n\taddLength int\n\tcopyStart int\n\tcopyEnd int\n\toffset int\n\teoc bool\n}\n\ntype SendChunkFunc func(c chunk)\n\ntype blockWorkerState struct {\n\tconsumed chan bool\n\twork chan int\n\tchunks chan chunk\n}\n\nfunc (ctx *DiffContext) doPartitioned(obuf []byte, obuflen int, nbuf []byte, nbuflen int, memstats *runtime.MemStats, writeMessage WriteMessageFunc, consumer *state.Consumer) error {\n\tvar err error\n\n\tpartitions := ctx.Partitions\n\tif partitions >= len(obuf)-1 {\n\t\tpartitions = 1\n\t}\n\n\tconsumer.ProgressLabel(fmt.Sprintf(\"Sorting %s...\", humanize.IBytes(uint64(obuflen))))\n\tconsumer.Progress(0.0)\n\n\tstartTime := time.Now()\n\n\tpmemstats := &runtime.MemStats{}\n\truntime.ReadMemStats(pmemstats)\n\toldAlloc := pmemstats.TotalAlloc\n\n\tif ctx.I == nil {\n\t\tctx.I = make([]int, len(obuf))\n\t\tbeforeAlloc := time.Now()\n\t\tfmt.Fprintf(os.Stderr, \"\\nAllocated %d-int I in %s\\n\", len(obuf), time.Since(beforeAlloc))\n\t} else {\n\t\tfor len(ctx.I) < len(obuf) {\n\t\t\tlenBefore := len(ctx.I)\n\t\t\tbeforeAlloc := time.Now()\n\t\t\tctx.I = make([]int, len(obuf))\n\t\t\tfmt.Fprintf(os.Stderr, \"\\nGrown I from %d to %d in %s\\n\", lenBefore, len(ctx.I), time.Since(beforeAlloc))\n\t\t}\n\t}\n\n\tpsa := NewPSA(partitions, obuf, ctx.I)\n\n\truntime.ReadMemStats(pmemstats)\n\tnewAlloc := pmemstats.TotalAlloc\n\tfmt.Fprintf(os.Stderr, \"\\nAlloc difference after PSA: %s. Size of I: %s\\n\", humanize.IBytes(uint64(newAlloc-oldAlloc)), humanize.IBytes(uint64(8*len(psa.I))))\n\n\tif ctx.Stats != nil {\n\t\tctx.Stats.TimeSpentSorting += time.Since(startTime)\n\t}\n\n\tif ctx.MeasureMem {\n\t\truntime.ReadMemStats(memstats)\n\t\tfmt.Fprintf(os.Stderr, \"\\nAllocated bytes after qsufsort: %s (%s total)\", humanize.IBytes(uint64(memstats.Alloc)), humanize.IBytes(uint64(memstats.TotalAlloc)))\n\t}\n\n\tbsdc := &Control{}\n\n\tconsumer.ProgressLabel(fmt.Sprintf(\"Preparing to scan %s...\", humanize.IBytes(uint64(nbuflen))))\n\tconsumer.Progress(0.0)\n\n\tstartTime = time.Now()\n\n\tvar waitConsumeTime int64\n\tvar waitEnqueueTime int64\n\tvar idleWorkerTime int64\n\tvar sendingWorkerTime int64\n\n\tanalyzeBlock := func(nbuflen int, nbuf []byte, offset int, chunks chan chunk) {\n\t\tvar lenf int\n\n\t\t\/\/ Compute the differences, writing ctrl as we go\n\t\tvar scan, pos, length int\n\t\tvar lastscan, lastpos, lastoffset int\n\n\t\tfor scan < nbuflen {\n\t\t\tvar oldscore int\n\t\t\tscan += length\n\n\t\t\tfor scsc := scan; scan < nbuflen; scan++ {\n\t\t\t\tpos, length = psa.search(nbuf[scan:])\n\n\t\t\t\tfor ; scsc < scan+length; scsc++ {\n\t\t\t\t\tif scsc+lastoffset < obuflen &&\n\t\t\t\t\t\tobuf[scsc+lastoffset] == nbuf[scsc] {\n\t\t\t\t\t\toldscore++\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif (length == oldscore && length != 0) || length > oldscore+8 {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tif scan+lastoffset < obuflen && obuf[scan+lastoffset] == nbuf[scan] {\n\t\t\t\t\toldscore--\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif length != oldscore || scan == nbuflen {\n\t\t\t\tvar s, Sf int\n\t\t\t\tlenf = 0\n\t\t\t\tfor i := int(0); lastscan+i < scan && lastpos+i < obuflen; {\n\t\t\t\t\tif obuf[lastpos+i] == nbuf[lastscan+i] {\n\t\t\t\t\t\ts++\n\t\t\t\t\t}\n\t\t\t\t\ti++\n\t\t\t\t\tif s*2-i > Sf*2-lenf {\n\t\t\t\t\t\tSf = s\n\t\t\t\t\t\tlenf = i\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tlenb := 0\n\t\t\t\tif scan < nbuflen {\n\t\t\t\t\tvar s, Sb int\n\t\t\t\t\tfor i := int(1); (scan >= lastscan+i) && (pos >= i); i++ {\n\t\t\t\t\t\tif obuf[pos-i] == nbuf[scan-i] {\n\t\t\t\t\t\t\ts++\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif s*2-i > Sb*2-lenb {\n\t\t\t\t\t\t\tSb = s\n\t\t\t\t\t\t\tlenb = i\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif lastscan+lenf > scan-lenb {\n\t\t\t\t\toverlap := (lastscan + lenf) - (scan - lenb)\n\t\t\t\t\ts := int(0)\n\t\t\t\t\tSs := int(0)\n\t\t\t\t\tlens := int(0)\n\t\t\t\t\tfor i := int(0); i < overlap; i++ {\n\t\t\t\t\t\tif nbuf[lastscan+lenf-overlap+i] == obuf[lastpos+lenf-overlap+i] {\n\t\t\t\t\t\t\ts++\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif nbuf[scan-lenb+i] == obuf[pos-lenb+i] {\n\t\t\t\t\t\t\ts--\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif s > Ss {\n\t\t\t\t\t\t\tSs = s\n\t\t\t\t\t\t\tlens = i + 1\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tlenf += lens - overlap\n\t\t\t\t\tlenb -= lens\n\t\t\t\t}\n\n\t\t\t\tc := chunk{\n\t\t\t\t\taddOldStart: lastpos,\n\t\t\t\t\taddNewStart: lastscan,\n\t\t\t\t\taddLength: lenf,\n\t\t\t\t\tcopyStart: lastscan + lenf,\n\t\t\t\t\tcopyEnd: scan - lenb,\n\t\t\t\t\toffset: offset,\n\t\t\t\t}\n\n\t\t\t\tif c.addLength > 0 || (c.copyEnd != c.copyStart) {\n\t\t\t\t\t\/\/ if not a no-op, send\n\t\t\t\t\tbeforeSend := time.Now()\n\t\t\t\t\tchunks <- c\n\t\t\t\t\tatomic.AddInt64(&sendingWorkerTime, int64(time.Since(beforeSend)))\n\t\t\t\t}\n\n\t\t\t\tlastscan = scan - lenb\n\t\t\t\tlastpos = pos - lenb\n\t\t\t\tlastoffset = pos - scan\n\t\t\t}\n\t\t}\n\n\t\tbeforeSend := time.Now()\n\t\tchunks <- chunk{eoc: true}\n\t\tatomic.AddInt64(&sendingWorkerTime, int64(time.Since(beforeSend)))\n\t}\n\n\tblockSize := 1024 * 1024\n\tnumBlocks := (nbuflen + blockSize - 1) \/ blockSize\n\n\tif numBlocks < partitions {\n\t\tblockSize = nbuflen \/ partitions\n\t\tnumBlocks = (nbuflen + blockSize - 1) \/ blockSize\n\t}\n\n\tnumWorkers := partitions * 8\n\n\t\/\/ fmt.Fprintf(os.Stderr, \"Divvying %s in %d block(s) of %s (with %d workers)\\n\",\n\t\/\/ \thumanize.IBytes(uint64(nbuflen)),\n\t\/\/ \tnumBlocks,\n\t\/\/ \thumanize.IBytes(uint64(blockSize)),\n\t\/\/ \tpartitions,\n\t\/\/ )\n\n\tblockWorkersState := make([]blockWorkerState, numWorkers)\n\n\t\/\/ initialize all channels\n\tfor i := 0; i < numWorkers; i++ {\n\t\tblockWorkersState[i].work = make(chan int, 1)\n\t\tblockWorkersState[i].chunks = make(chan chunk, 256)\n\t\tblockWorkersState[i].consumed = make(chan bool, 1)\n\t\tblockWorkersState[i].consumed <- true\n\t}\n\n\tfor i := 0; i < numWorkers; i++ {\n\t\tgo func(workerState blockWorkerState, workerIndex int) {\n\t\t\tlastWorkTime := time.Now()\n\t\t\tfor blockIndex := range workerState.work {\n\t\t\t\tatomic.AddInt64(&idleWorkerTime, int64(time.Since(lastWorkTime)))\n\n\t\t\t\t\/\/ fmt.Fprintf(os.Stderr, \"\\nWorker %d should analyze block %d\", workerIndex, blockIndex)\n\t\t\t\tboundary := blockSize * blockIndex\n\t\t\t\trealBlockSize := blockSize\n\t\t\t\tif blockIndex == numBlocks-1 {\n\t\t\t\t\trealBlockSize = nbuflen - boundary\n\t\t\t\t}\n\t\t\t\t\/\/ fmt.Fprintf(os.Stderr, \"Analyzing %s block at %d\\n\", humanize.IBytes(uint64(realBlockSize)), i)\n\n\t\t\t\tanalyzeBlock(realBlockSize, nbuf[boundary:boundary+realBlockSize], boundary, workerState.chunks)\n\t\t\t\t\/\/ fmt.Fprintf(os.Stderr, \"\\nWorker %d done analyzing block %d\", workerIndex, blockIndex)\n\n\t\t\t\tlastWorkTime = time.Now()\n\t\t\t}\n\t\t}(blockWorkersState[i], i)\n\t}\n\n\tgo func() {\n\t\tworkerIndex := 0\n\n\t\tfor i := 0; i < numBlocks; i++ {\n\t\t\tbeforeConsume := time.Now()\n\t\t\t<-blockWorkersState[workerIndex].consumed\n\t\t\tatomic.AddInt64(&waitConsumeTime, int64(time.Since(beforeConsume)))\n\n\t\t\tbeforeEnqueue := time.Now()\n\t\t\tblockWorkersState[workerIndex].work <- i\n\t\t\tatomic.AddInt64(&waitEnqueueTime, int64(time.Since(beforeEnqueue)))\n\n\t\t\tworkerIndex = (workerIndex + 1) % numWorkers\n\t\t}\n\n\t\tfor workerIndex := 0; workerIndex < numWorkers; workerIndex++ {\n\t\t\tclose(blockWorkersState[workerIndex].work)\n\t\t}\n\t\t\/\/ fmt.Fprintf(os.Stderr, \"Sent all blockworks\\n\")\n\t}()\n\n\tif ctx.MeasureMem {\n\t\truntime.ReadMemStats(memstats)\n\t\tfmt.Fprintf(os.Stderr, \"\\nAllocated bytes after scan-prepare: %s (%s total)\", humanize.IBytes(uint64(memstats.Alloc)), humanize.IBytes(uint64(memstats.TotalAlloc)))\n\t}\n\n\tvar prevChunk chunk\n\tfirst := true\n\n\tconsumer.ProgressLabel(fmt.Sprintf(\"Scanning %s (%d blocks of %s)...\", humanize.IBytes(uint64(nbuflen)), numBlocks, humanize.IBytes(uint64(blockSize))))\n\n\tworkerIndex := 0\n\tfor blockIndex := 0; blockIndex < numBlocks; blockIndex++ {\n\t\t\/\/ fmt.Fprintf(os.Stderr, \"\\nWaiting on worker %d for block %d\", workerIndex, blockIndex)\n\t\tconsumer.Progress(float64(blockIndex) \/ float64(numBlocks))\n\t\tstate := blockWorkersState[workerIndex]\n\n\t\tfor chunk := range state.chunks {\n\t\t\t\/\/ fmt.Fprintf(os.Stderr, \"\\nFor block %d, received chunk %#v\", blockIndex, chunk)\n\t\t\tif chunk.eoc {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif first {\n\t\t\t\tfirst = false\n\t\t\t} else {\n\t\t\t\tbsdc.Seek = int64(chunk.addOldStart - (prevChunk.addOldStart + prevChunk.addLength))\n\n\t\t\t\t\/\/ fmt.Fprintf(os.Stderr, \"%d bytes add, %d bytes copy\\n\", len(bsdc.Add), len(bsdc.Copy))\n\n\t\t\t\terr := writeMessage(bsdc)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tctx.db.Reset()\n\t\t\tctx.db.Grow(chunk.addLength)\n\n\t\t\taddNewStart := chunk.addNewStart + chunk.offset\n\n\t\t\tfor i := 0; i < chunk.addLength; i++ {\n\t\t\t\tctx.db.WriteByte(nbuf[addNewStart+i] - obuf[chunk.addOldStart+i])\n\t\t\t}\n\n\t\t\tbsdc.Add = ctx.db.Bytes()\n\t\t\tbsdc.Copy = nbuf[chunk.offset+chunk.copyStart : chunk.offset+chunk.copyEnd]\n\n\t\t\tif ctx.Stats != nil && ctx.Stats.BiggestAdd < int64(len(bsdc.Add)) {\n\t\t\t\tctx.Stats.BiggestAdd = int64(len(bsdc.Add))\n\t\t\t}\n\n\t\t\tprevChunk = chunk\n\t\t}\n\n\t\tstate.consumed <- true\n\t\tworkerIndex = (workerIndex + 1) % numWorkers\n\t}\n\n\t\/\/ fmt.Fprintf(os.Stderr, \"%d bytes add, %d bytes copy\\n\", len(bsdc.Add), len(bsdc.Copy))\n\n\tbsdc.Seek = 0\n\terr = writeMessage(bsdc)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif ctx.Stats != nil {\n\t\tctx.Stats.TimeSpentScanning += time.Since(startTime)\n\t}\n\n\tif ctx.MeasureMem {\n\t\truntime.ReadMemStats(memstats)\n\t\tconsumer.Debugf(\"\\nAllocated bytes after scan: %s (%s total)\", humanize.IBytes(uint64(memstats.Alloc)), humanize.IBytes(uint64(memstats.TotalAlloc)))\n\t\tfmt.Fprintf(os.Stderr, \"\\nAllocated bytes after scan: %s (%s total)\", humanize.IBytes(uint64(memstats.Alloc)), humanize.IBytes(uint64(memstats.TotalAlloc)))\n\t}\n\n\tfmt.Fprintf(os.Stderr, \"\\nStats: waitConsume %s, waitEnqueue %s, idleWorker %s, sendingWorker %s\\n\",\n\t\ttime.Duration(waitConsumeTime),\n\t\ttime.Duration(waitEnqueueTime),\n\t\ttime.Duration(idleWorkerTime),\n\t\ttime.Duration(sendingWorkerTime),\n\t)\n\n\tbsdc.Reset()\n\tbsdc.Eof = true\n\terr = writeMessage(bsdc)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2014, Google Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package protected_objects stores, searches and chains protected objects like keys\n\/\/ and files.\n\npackage protected_objects \n\nimport (\n\t\"container\/list\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"time\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/jlmucb\/cloudproxy\/go\/tpm2\"\n)\n\nfunc PrintObject(obj *ObjectMessage) {\n\tfmt.Printf(\"Object %s, epoch %d\\n\", *obj.ObjId.ObjName, *obj.ObjId.ObjEpoch)\n\tfmt.Printf(\"\\ttype %s, status %s, notbefore: %s, notafter: %s\\n\", *obj.ObjType,\n\t\t*obj.ObjStatus, *obj.NotBefore, *obj.NotAfter)\n\tfmt.Printf(\"Object value: %x\\n\", obj.ObjVal)\n}\n\nfunc PrintProtectedObject(obj *ProtectedObjectMessage) {\n\tfmt.Printf(\"Object %s, epoch %d\\n\", *obj.ProtectedObjId.ObjName, *obj.ProtectedObjId.ObjEpoch)\n\tfmt.Printf(\"Object %s, epoch %d\\n\", *obj.ProtectorObjId.ObjName, *obj.ProtectorObjId.ObjEpoch)\n}\n\nfunc PrintNode(obj *NodeMessage) {\n\tfmt.Printf(\"ProtectedObject %s, epoch %d\\n\", *obj.ProtectedObjId.ObjName,\n\t\t *obj.ProtectedObjId.ObjEpoch)\n\tfmt.Printf(\"ProtectorObject %s, epoch %d\\n\", *obj.ProtectorObjId.ObjName,\n\t\t *obj.ProtectorObjId.ObjEpoch)\n}\n\nfunc CreateObject(name string, epoch int32, obj_type *string, status *string, notBefore *time.Time,\n\t\tnotAfter *time.Time, v []byte) (*ObjectMessage, error) {\n\tobj_id := &ObjectIdMessage {\n\t\tObjName: &name,\n\t\tObjEpoch: &epoch,\n\t}\n\tstr_notBefore := notBefore.String()\n\tstr_notAfter := notAfter.String()\n\tobj := &ObjectMessage {\n\t\tObjId: obj_id,\n\t\tObjType: obj_type,\n\t\tObjStatus: status,\n\t\tNotBefore: &str_notBefore,\n\t\tNotAfter: &str_notAfter,\n\t\tObjVal: v,\n\t}\n\treturn obj, nil\n}\n\nfunc AddObject(l *list.List, obj interface{}) error {\n\tl.PushFront(obj)\n\treturn nil\n}\n\nfunc DeleteObject(l *list.List, name string, epoch int32) error {\n\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\to := e.Value.(*ObjectMessage)\n\t\tif *o.ObjId.ObjName == name && *o.ObjId.ObjEpoch == epoch {\n\t\t\tl.Remove(e)\n\t\t\tbreak;\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc DeleteProtectedObject(l *list.List, name string, epoch int32) error {\n\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\to := e.Value.(*ProtectedObjectMessage)\n\t\tif *o.ProtectedObjId.ObjName == name && *o.ProtectedObjId.ObjEpoch == epoch {\n\t\t\tl.Remove(e)\n\t\t\tbreak;\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc FindProtectedNodes(l *list.List, name string, epoch int32) (*list.List) {\n\tr := list.New()\n\n\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\to := e.Value.(*NodeMessage)\n\t\tif epoch != 0 && epoch != *o.ProtectedObjId.ObjEpoch {\n\t\t\tcontinue\n\t\t}\n\t\tif name == *o.ProtectedObjId.ObjName {\n\t\t\tr.PushFront(o)\n\t\t}\n }\n\treturn r\n}\n\nfunc FindProtectorNodes(l *list.List, name string, epoch int32) (*list.List) {\n\tr := list.New()\n\n\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\to := e.Value.(*NodeMessage)\n\t\tif epoch != 0 && epoch != *o.ProtectorObjId.ObjEpoch {\n\t\t\tcontinue\n\t\t}\n\t\tif name == *o.ProtectorObjId.ObjName {\n\t\t\tr.PushFront(o)\n\t\t}\n }\n\treturn r\n}\n\nfunc FindObject(l *list.List, name string, epoch int32) (*ObjectMessage) {\n\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\to := e.Value.(*ObjectMessage)\n\t\tif epoch != 0 && epoch != *o.ObjId.ObjEpoch {\n\t\t\tcontinue\n\t\t}\n\t\tif name == *o.ObjId.ObjName {\n\t\t\treturn o\n\t\t}\n }\n\treturn nil\n}\n\nfunc GetLatestEpoch(l *list.List, name string, epoch int32) (*ObjectMessage) {\n\treturn nil\n}\n\nfunc GetEarliestEpoch(l *list.List, name string, epoch int32) (*ObjectMessage) {\n\treturn nil\n}\n\nfunc SaveProtectedObjects(l *list.List, file string) error {\n\tvar po_store ProtectedObjectStoreMessage\n\n\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\to := e.Value.(*ProtectedObjectMessage)\n\t\tp := new(ProtectedObjectMessage)\n\t\tp.ProtectedObjId.ObjName = o.ProtectedObjId.ObjName\n\t\tp.ProtectedObjId.ObjEpoch = o.ProtectedObjId.ObjEpoch\n\t\tp.ProtectorObjId.ObjName = o.ProtectorObjId.ObjName\n\t\tp.ProtectorObjId.ObjEpoch = o.ProtectorObjId.ObjEpoch\n\t\tp.Blob= o.Blob\n\t\tpo_store.ProtectedObjects = append(po_store.ProtectedObjects, p)\n\t}\n\tb, err := proto.Marshal(&po_store)\n\tif err != nil {\n\t\treturn err\n\t}\n\tioutil.WriteFile(file, b, 0644)\n\treturn nil\n}\n\nfunc SaveNodes(l *list.List, file string) error {\n\tvar node_store NodeStoreMessage\n\n\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\to := e.Value.(*NodeMessage)\n\t\tp := new(NodeMessage)\n\t\tp.ProtectedObjId.ObjName = o.ProtectedObjId.ObjName\n\t\tp.ProtectedObjId.ObjEpoch = o.ProtectedObjId.ObjEpoch\n\t\tp.ProtectorObjId.ObjName = o.ProtectorObjId.ObjName\n\t\tp.ProtectorObjId.ObjEpoch = o.ProtectorObjId.ObjEpoch\n\t\tnode_store.NodeObjects = append(node_store.NodeObjects, p)\n\t}\n\tb, err := proto.Marshal(&node_store)\n\tif err != nil {\n\t\treturn err\n\t}\n\tioutil.WriteFile(file, b, 0644)\n\treturn nil\n}\n\n\/\/ nil is error return\nfunc SaveObjects(l *list.List, file string) error {\n\tvar o_store ObjectStoreMessage\n\n\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\to := e.Value.(ObjectMessage)\n\t\tp := new(ObjectMessage)\n\t\tp.ObjId = new(ObjectIdMessage)\n\t\tp.ObjId.ObjName = o.ObjId.ObjName\n\t\tp.ObjId.ObjEpoch = o.ObjId.ObjEpoch\n\t\tp.ObjType = o.ObjType\n\t\tp.ObjStatus = o.ObjStatus\n\t\tp.NotBefore = o.NotBefore\n\t\tp.NotAfter = o.NotAfter\n\t\tp.ObjVal = o.ObjVal\n\t\to_store.Objects = append(o_store.Objects, p)\n\t}\n\tb, err := proto.Marshal(&o_store)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tioutil.WriteFile(file, b, 0644)\n\treturn nil\n}\n\nfunc LoadProtectedObjects(file string) (*list.List) {\n\tvar po_store ProtectedObjectStoreMessage\n\t\n\tbuf, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\treturn nil\n\t}\n\terr = proto.Unmarshal(buf, &po_store)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tl := list.New()\n\tfor _, v := range(po_store.ProtectedObjects) {\n\t\to := new(ProtectedObjectMessage)\n\t\to.ProtectorObjId.ObjName = v.ProtectorObjId.ObjName\n\t\to.ProtectorObjId.ObjEpoch = v.ProtectorObjId.ObjEpoch\n\t\to.ProtectedObjId.ObjName = v.ProtectedObjId.ObjName\n\t\to.ProtectedObjId.ObjEpoch = v.ProtectedObjId.ObjEpoch\n\t\to.Blob = v.Blob\n\t\tl.PushFront(o)\n\t}\n\treturn l\n}\n\nfunc LoadNodes(file string) (*list.List) {\n\tvar node_store NodeStoreMessage\n\n\tbuf, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\treturn nil\n\t}\n\terr = proto.Unmarshal(buf, &node_store)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tl := list.New()\n\tfor _, v := range(node_store.NodeObjects) {\n\t\to := new(NodeMessage)\n\t\to.ProtectedObjId.ObjName = v.ProtectedObjId.ObjName\n\t\to.ProtectedObjId.ObjEpoch = v.ProtectedObjId.ObjEpoch\n\t\to.ProtectorObjId.ObjName = v.ProtectorObjId.ObjName\n\t\to.ProtectorObjId.ObjEpoch = v.ProtectorObjId.ObjEpoch\n\t\tl.PushFront(o)\n\t}\n\treturn l\n}\n\nfunc LoadObjects(file string) (*list.List) {\n\tvar o_store ObjectStoreMessage\n\n\tbuf, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\treturn nil\n\t}\n\terr = proto.Unmarshal(buf, &o_store)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tl := list.New()\n\tfor _, v := range(o_store.Objects) {\n\t\to := new(ObjectMessage)\n\t\to.ObjId = new(ObjectIdMessage)\n\t\to.ObjId.ObjName = v.ObjId.ObjName\n\t\to.ObjId.ObjEpoch = v.ObjId.ObjEpoch\n\n\t\to.ObjType = v.ObjType\n\t\to.ObjStatus = v.ObjStatus\n\t\to.NotBefore = v.NotBefore\n\t\to.NotAfter = v.NotAfter\n\t\to.ObjVal = v.ObjVal\n\t\tl.PushFront(*o)\n\t}\n\treturn l\n}\n\nfunc MakeProtectedObject(obj ObjectMessage, protectorName string, protectorEpoch int32,\n\t\tprotectorKeys []byte) (*ProtectedObjectMessage, error) {\n\tp := new(ProtectedObjectMessage)\n\tp.ProtectedObjId = new(ObjectIdMessage)\n\tp.ProtectorObjId = new(ObjectIdMessage)\n\tp.ProtectedObjId.ObjName = obj.ObjId.ObjName\n\tp.ProtectedObjId.ObjEpoch =obj.ObjId.ObjEpoch\n\tp.ProtectorObjId.ObjName = &protectorName\n\tp.ProtectorObjId.ObjEpoch = &protectorEpoch\n\tunencrypted, err := proto.Marshal(&obj)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Can't make Protected Object\")\n\t}\n\tencrypted, err := tpm2.Protect(protectorKeys, unencrypted)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Can't Protect Object\")\n\t}\n\tp.Blob = encrypted\n\treturn p, nil\n}\n\nfunc RecoverProtectedObject(obj *ProtectedObjectMessage, protectorKeys []byte) (*ObjectMessage, error) {\n\tp := new(ObjectMessage)\n\tunencrypted, err := tpm2.Unprotect(protectorKeys, obj.Blob)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Can't make Unprotect Object\")\n\t}\n\terr = proto.Unmarshal(unencrypted, p)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Can't Unmarshal Object\")\n\t}\n\treturn p, nil\n}\n\n<commit_msg>StoreObjects<commit_after>\/\/ Copyright (c) 2014, Google Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package protected_objects stores, searches and chains protected objects like keys\n\/\/ and files.\n\npackage protected_objects \n\nimport (\n\t\"container\/list\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"time\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/jlmucb\/cloudproxy\/go\/tpm2\"\n)\n\nfunc PrintObject(obj *ObjectMessage) {\n\tfmt.Printf(\"Object %s, epoch %d\\n\", *obj.ObjId.ObjName, *obj.ObjId.ObjEpoch)\n\tfmt.Printf(\"\\ttype %s, status %s, notbefore: %s, notafter: %s\\n\", *obj.ObjType,\n\t\t*obj.ObjStatus, *obj.NotBefore, *obj.NotAfter)\n\tfmt.Printf(\"Object value: %x\\n\", obj.ObjVal)\n}\n\nfunc PrintProtectedObject(obj *ProtectedObjectMessage) {\n\tfmt.Printf(\"Object %s, epoch %d\\n\", *obj.ProtectedObjId.ObjName, *obj.ProtectedObjId.ObjEpoch)\n\tfmt.Printf(\"Object %s, epoch %d\\n\", *obj.ProtectorObjId.ObjName, *obj.ProtectorObjId.ObjEpoch)\n}\n\nfunc PrintNode(obj *NodeMessage) {\n\tfmt.Printf(\"ProtectedObject %s, epoch %d\\n\", *obj.ProtectedObjId.ObjName,\n\t\t *obj.ProtectedObjId.ObjEpoch)\n\tfmt.Printf(\"ProtectorObject %s, epoch %d\\n\", *obj.ProtectorObjId.ObjName,\n\t\t *obj.ProtectorObjId.ObjEpoch)\n}\n\nfunc CreateObject(name string, epoch int32, obj_type *string, status *string, notBefore *time.Time,\n\t\tnotAfter *time.Time, v []byte) (*ObjectMessage, error) {\n\tobj_id := &ObjectIdMessage {\n\t\tObjName: &name,\n\t\tObjEpoch: &epoch,\n\t}\n\tstr_notBefore := notBefore.String()\n\tstr_notAfter := notAfter.String()\n\tobj := &ObjectMessage {\n\t\tObjId: obj_id,\n\t\tObjType: obj_type,\n\t\tObjStatus: status,\n\t\tNotBefore: &str_notBefore,\n\t\tNotAfter: &str_notAfter,\n\t\tObjVal: v,\n\t}\n\treturn obj, nil\n}\n\nfunc AddObject(l *list.List, obj interface{}) error {\n\tl.PushFront(obj)\n\treturn nil\n}\n\nfunc DeleteObject(l *list.List, name string, epoch int32) error {\n\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\to := e.Value.(*ObjectMessage)\n\t\tif *o.ObjId.ObjName == name && *o.ObjId.ObjEpoch == epoch {\n\t\t\tl.Remove(e)\n\t\t\tbreak;\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc DeleteProtectedObject(l *list.List, name string, epoch int32) error {\n\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\to := e.Value.(*ProtectedObjectMessage)\n\t\tif *o.ProtectedObjId.ObjName == name && *o.ProtectedObjId.ObjEpoch == epoch {\n\t\t\tl.Remove(e)\n\t\t\tbreak;\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc FindProtectedNodes(l *list.List, name string, epoch int32) (*list.List) {\n\tr := list.New()\n\n\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\to := e.Value.(*NodeMessage)\n\t\tif epoch != 0 && epoch != *o.ProtectedObjId.ObjEpoch {\n\t\t\tcontinue\n\t\t}\n\t\tif name == *o.ProtectedObjId.ObjName {\n\t\t\tr.PushFront(o)\n\t\t}\n }\n\treturn r\n}\n\nfunc FindProtectorNodes(l *list.List, name string, epoch int32) (*list.List) {\n\tr := list.New()\n\n\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\to := e.Value.(*NodeMessage)\n\t\tif epoch != 0 && epoch != *o.ProtectorObjId.ObjEpoch {\n\t\t\tcontinue\n\t\t}\n\t\tif name == *o.ProtectorObjId.ObjName {\n\t\t\tr.PushFront(o)\n\t\t}\n }\n\treturn r\n}\n\nfunc FindObject(l *list.List, name string, epoch int32) (*ObjectMessage) {\n\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\to := e.Value.(*ObjectMessage)\n\t\tif epoch != 0 && epoch != *o.ObjId.ObjEpoch {\n\t\t\tcontinue\n\t\t}\n\t\tif name == *o.ObjId.ObjName {\n\t\t\treturn o\n\t\t}\n }\n\treturn nil\n}\n\nfunc GetLatestEpoch(l *list.List, name string, epoch int32) (*ObjectMessage) {\n\treturn nil\n}\n\nfunc GetEarliestEpoch(l *list.List, name string, epoch int32) (*ObjectMessage) {\n\treturn nil\n}\n\nfunc SaveProtectedObjects(l *list.List, file string) error {\n\tvar po_store ProtectedObjectStoreMessage\n\n\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\to := e.Value.(ProtectedObjectMessage)\n\t\tp := new(ProtectedObjectMessage)\n\t\tp.ProtectedObjId.ObjName = o.ProtectedObjId.ObjName\n\t\tp.ProtectedObjId.ObjEpoch = o.ProtectedObjId.ObjEpoch\n\t\tp.ProtectorObjId.ObjName = o.ProtectorObjId.ObjName\n\t\tp.ProtectorObjId.ObjEpoch = o.ProtectorObjId.ObjEpoch\n\t\tp.Blob= o.Blob\n\t\tpo_store.ProtectedObjects = append(po_store.ProtectedObjects, p)\n\t}\n\tb, err := proto.Marshal(&po_store)\n\tif err != nil {\n\t\treturn err\n\t}\n\tioutil.WriteFile(file, b, 0644)\n\treturn nil\n}\n\nfunc SaveNodes(l *list.List, file string) error {\n\tvar node_store NodeStoreMessage\n\n\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\to := e.Value.(NodeMessage)\n\t\tp := new(NodeMessage)\n\t\tp.ProtectedObjId.ObjName = o.ProtectedObjId.ObjName\n\t\tp.ProtectedObjId.ObjEpoch = o.ProtectedObjId.ObjEpoch\n\t\tp.ProtectorObjId.ObjName = o.ProtectorObjId.ObjName\n\t\tp.ProtectorObjId.ObjEpoch = o.ProtectorObjId.ObjEpoch\n\t\tnode_store.NodeObjects = append(node_store.NodeObjects, p)\n\t}\n\tb, err := proto.Marshal(&node_store)\n\tif err != nil {\n\t\treturn err\n\t}\n\tioutil.WriteFile(file, b, 0644)\n\treturn nil\n}\n\n\/\/ nil is error return\nfunc SaveObjects(l *list.List, file string) error {\n\tvar o_store ObjectStoreMessage\n\n\tfor e := l.Front(); e != nil; e = e.Next() {\n\t\to := e.Value.(ObjectMessage)\n\t\tp := new(ObjectMessage)\n\t\tp.ObjId = new(ObjectIdMessage)\n\t\tp.ObjId.ObjName = o.ObjId.ObjName\n\t\tp.ObjId.ObjEpoch = o.ObjId.ObjEpoch\n\t\tp.ObjType = o.ObjType\n\t\tp.ObjStatus = o.ObjStatus\n\t\tp.NotBefore = o.NotBefore\n\t\tp.NotAfter = o.NotAfter\n\t\tp.ObjVal = o.ObjVal\n\t\to_store.Objects = append(o_store.Objects, p)\n\t}\n\tb, err := proto.Marshal(&o_store)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tioutil.WriteFile(file, b, 0644)\n\treturn nil\n}\n\nfunc LoadProtectedObjects(file string) (*list.List) {\n\tvar po_store ProtectedObjectStoreMessage\n\n\tbuf, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\treturn nil\n\t}\n\terr = proto.Unmarshal(buf, &po_store)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tl := list.New()\n\tfor _, v := range(po_store.ProtectedObjects) {\n\t\to := new(ProtectedObjectMessage)\n\t\to.ProtectorObjId.ObjName = v.ProtectorObjId.ObjName\n\t\to.ProtectorObjId.ObjEpoch = v.ProtectorObjId.ObjEpoch\n\t\to.ProtectedObjId.ObjName = v.ProtectedObjId.ObjName\n\t\to.ProtectedObjId.ObjEpoch = v.ProtectedObjId.ObjEpoch\n\t\to.Blob = v.Blob\n\t\tl.PushFront(*o)\n\t}\n\treturn l\n}\n\nfunc LoadNodes(file string) (*list.List) {\n\tvar node_store NodeStoreMessage\n\n\tbuf, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\treturn nil\n\t}\n\terr = proto.Unmarshal(buf, &node_store)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tl := list.New()\n\tfor _, v := range(node_store.NodeObjects) {\n\t\to := new(NodeMessage)\n\t\to.ProtectedObjId.ObjName = v.ProtectedObjId.ObjName\n\t\to.ProtectedObjId.ObjEpoch = v.ProtectedObjId.ObjEpoch\n\t\to.ProtectorObjId.ObjName = v.ProtectorObjId.ObjName\n\t\to.ProtectorObjId.ObjEpoch = v.ProtectorObjId.ObjEpoch\n\t\tl.PushFront(*o)\n\t}\n\treturn l\n}\n\nfunc LoadObjects(file string) (*list.List) {\n\tvar o_store ObjectStoreMessage\n\n\tbuf, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\treturn nil\n\t}\n\terr = proto.Unmarshal(buf, &o_store)\n\tif err != nil {\n\t\treturn nil\n\t}\n\tl := list.New()\n\tfor _, v := range(o_store.Objects) {\n\t\to := new(ObjectMessage)\n\t\tPrintObject(v)\n\t\to.ObjId = new(ObjectIdMessage)\n\t\to.ObjId.ObjName = v.ObjId.ObjName\n\t\to.ObjId.ObjEpoch = v.ObjId.ObjEpoch\n\n\t\to.ObjType = v.ObjType\n\t\to.ObjStatus = v.ObjStatus\n\t\to.NotBefore = v.NotBefore\n\t\to.NotAfter = v.NotAfter\n\t\to.ObjVal = v.ObjVal\n\t\tl.PushFront(*o)\n\t}\n\treturn l\n}\n\nfunc MakeProtectedObject(obj ObjectMessage, protectorName string, protectorEpoch int32,\n\t\tprotectorKeys []byte) (*ProtectedObjectMessage, error) {\n\tp := new(ProtectedObjectMessage)\n\tp.ProtectedObjId = new(ObjectIdMessage)\n\tp.ProtectorObjId = new(ObjectIdMessage)\n\tp.ProtectedObjId.ObjName = obj.ObjId.ObjName\n\tp.ProtectedObjId.ObjEpoch =obj.ObjId.ObjEpoch\n\tp.ProtectorObjId.ObjName = &protectorName\n\tp.ProtectorObjId.ObjEpoch = &protectorEpoch\n\tunencrypted, err := proto.Marshal(&obj)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Can't make Protected Object\")\n\t}\n\tencrypted, err := tpm2.Protect(protectorKeys, unencrypted)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Can't Protect Object\")\n\t}\n\tp.Blob = encrypted\n\treturn p, nil\n}\n\nfunc RecoverProtectedObject(obj *ProtectedObjectMessage, protectorKeys []byte) (*ObjectMessage, error) {\n\tp := new(ObjectMessage)\n\tunencrypted, err := tpm2.Unprotect(protectorKeys, obj.Blob)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Can't make Unprotect Object\")\n\t}\n\terr = proto.Unmarshal(unencrypted, p)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Can't Unmarshal Object\")\n\t}\n\treturn p, nil\n}\n\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t. \"koding\/db\/models\"\n\thelper \"koding\/db\/mongodb\/modelhelper\"\n)\n\nvar POST_TYPES = [5]string{\n\t\"JBlogPost\",\n\t\"JDiscussion\",\n\t\"JCodeSnip\",\n\t\"JTutorial\",\n\t\"JStatusUpdate\",\n}\n\nconst LIMIT = 100\n\ntype JPost struct {\n\tTitle string `bson:\"title,omitempty\"`\n\tOpinionCount int `bson:\"opinionCount,omitempty\"`\n}\n\nvar ErrAlreadyMigrated = errors.New(\"already migrated\")\n\nfunc main() {\n\tinitPublisher()\n\tdefer shutdown()\n\n\tfor _, postType := range POST_TYPES {\n\t\tlog.Notice(\"Starting \\\"%s\\\" migration\", postType)\n\t\tm := &Migrator{\n\t\t\tPostType: postType,\n\t\t}\n\t\tif err := initialize(m); err != nil {\n\t\t\tlog.Error(\"An error occured during migration: %s\", err.Error())\n\t\t}\n\t\tGetMigrationCompletedReport(m)\n\t}\n}\n\nfunc initialize(m *Migrator) error {\n\tcount, err := helper.CountPosts(helper.Selector{}, helper.Options{}, m.PostType)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Info(\"Found %v posts\", count)\n\n\treturn migrate(m)\n}\n\nfunc migrate(m *Migrator) error {\n\to := helper.Options{\n\t\tSort: \"meta.createdAt\", \/\/start from the oldest\n\t\tLimit: LIMIT,\n\t\tSkip: m.Index,\n\t}\n\tposts, err := helper.GetSomePosts(helper.Selector{}, o, m.PostType)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ no more post to migrate\n\tif len(posts) == 0 {\n\t\treturn nil\n\t}\n\n\tfor _, post := range posts {\n\t\tm.Id = post.Id.Hex()\n\t\tpost.Id = helper.NewObjectId()\n\t\tm.NewId = post.Id.Hex()\n\t\tm.Index++\n\t\tif err := verifyOrigin(&post); err != nil {\n\t\t\tReportError(m, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := insertNewStatusUpdate(&post, m); err != nil {\n\t\t\tReportError(m, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := migrateTags(&post, m); err != nil {\n\t\t\tReportError(m, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := migrateOrigin(&post); err != nil {\n\t\t\tReportError(m, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tcommenters, err := migrateComments(&post, m)\n\t\tif err != nil {\n\t\t\tReportError(m, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := migrateOpinions(&post, m, commenters); err != nil {\n\t\t\tReportError(m, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tReportSuccess(m)\n\t}\n\treturn migrate(m)\n}\n\nfunc insertNewStatusUpdate(p *Post, m *Migrator) error {\n\tif p.MigrationStatus == \"Completed\" {\n\t\treturn ErrAlreadyMigrated\n\t}\n\texists, err := helper.CheckGroupExistence(p.Group)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !exists {\n\t\treturn fmt.Errorf(\"Group \\\"%s\\\" not found\", p.Group)\n\t}\n\t\/\/ some status update have negative timestamp\n\tif p.Meta.CreatedAt.Unix() < 0 {\n\t\treturn fmt.Errorf(\"got nil timestamp for\")\n\t}\n\tp.Meta.Likes = 0 \/\/ CtF: because we are not migrating activities it is reset\n\n\tif err := migrateCodesnip(p, m); err != nil {\n\t\treturn err\n\t}\n\n\tsu := p.ConvertToStatusUpdate()\n\treturn helper.AddStatusUpdate(su)\n}\n\nfunc migrateCodesnip(p *Post, m *Migrator) error {\n\tif m.PostType != \"JCodeSnip\" {\n\t\treturn nil\n\t}\n\n\tif len(p.Attachments) > 0 {\n\t\tbody, ok := p.Attachments[0][\"content\"]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Codesnip content not found\")\n\t\t}\n\t\tp.Body = fmt.Sprintf(\"`%s`\", body)\n\t}\n\treturn nil\n}\n\nfunc migrateTags(p *Post, m *Migrator) error {\n\ts := helper.Selector{\n\t\t\"sourceId\": helper.GetObjectId(m.Id),\n\t\t\"as\": \"tag\",\n\t\t\"targetName\": \"JTag\",\n\t}\n\trels, err := helper.GetAllRelationships(s)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Info(\"%v tags found\", len(rels))\n\n\tfor _, r := range rels {\n\t\ttagId := r.TargetId.Hex()\n\t\t\/\/ first check tag existence\n\t\texists, err := helper.CheckTagExistence(tagId)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !exists {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ update tag relationships\n\t\tr.Id = helper.NewObjectId()\n\t\tr.SourceId = p.Id\n\t\tr.SourceName = \"JNewStatusUpdate\"\n\t\tif err := helper.AddRelationship(&r); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsr := swapRelationship(&r, \"post\")\n\t\tif err := helper.AddRelationship(sr); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/append tag to status update body\n\t\tp.Body += fmt.Sprintf(\" |#:JTag:%s|\", tagId)\n\t}\n\t\/\/ if post is tagged\n\tif len(rels) > 0 {\n\t\tsu := p.ConvertToStatusUpdate()\n\t\treturn helper.UpdateStatusUpdate(su)\n\t}\n\n\treturn nil\n}\n\nfunc migrateComments(p *Post, m *Migrator) (map[string]bool, error) {\n\taccounts := make(map[string]bool)\n\t\/\/ get all comments\n\ts := helper.Selector{\n\t\t\"sourceId\": helper.GetObjectId(m.Id),\n\t\t\"as\": \"reply\",\n\t\t\"targetName\": \"JComment\",\n\t}\n\trels, err := helper.GetAllRelationships(s)\n\tif err != nil {\n\t\treturn accounts, err\n\t}\n\tlog.Info(\"%v comments found\", len(rels))\n\t\/\/ posts does not have any comments\n\tif len(rels) == 0 {\n\t\treturn accounts, nil\n\t}\n\n\tcount := 0\n\n\tfor _, r := range rels {\n\t\tcomment, err := helper.GetCommentById(r.TargetId.Hex())\n\t\tif err != nil {\n\t\t\tif err == helper.ErrNotFound {\n\t\t\t\tlog.Info(\"Comment not found - Id: %s\", r.TargetId.Hex())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn accounts, err\n\t\t}\n\n\t\t\/\/ check origin existence\n\t\toriginId := comment.OriginId.Hex()\n\t\toriginExists, err := helper.CheckAccountExistence(originId)\n\t\tif err != nil {\n\t\t\treturn accounts, err\n\t\t}\n\t\tif !originExists {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ add relationship: JNewStatusUpdate -> reply -> JComment\n\t\tr.Id = helper.NewObjectId()\n\t\tr.SourceId = p.Id\n\t\tr.SourceName = \"JNewStatusUpdate\"\n\t\tif err := helper.AddRelationship(&r); err != nil {\n\t\t\treturn accounts, err\n\t\t}\n\n\t\t\/\/ get unique commenters for each post\n\t\tif _, exist := accounts[originId]; !exist {\n\t\t\taccounts[originId] = true\n\t\t\tmigrateCommentOrigin(comment, p)\n\t\t}\n\t\tcount++\n\t}\n\n\tp.RepliesCount = count\n\n\treturn accounts, nil\n}\n\n\/\/ migrateOpinions migrates opinions to comments\n\/\/ JDiscussion opinion JOpinion\n\/\/ JAccount creator JOpinion\nfunc migrateOpinions(p *Post, m *Migrator, commenters map[string]bool) error {\n\tif m.PostType != \"JDiscussion\" && m.PostType != \"JTutorial\" {\n\t\treturn nil\n\t}\n\ts := helper.Selector{\n\t\t\"sourceId\": helper.GetObjectId(m.Id),\n\t\t\"as\": \"opinion\",\n\t\t\"targetName\": \"JOpinion\",\n\t}\n\trels, err := helper.GetAllRelationships(s)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Info(\"%v opinions found\", len(rels))\n\t\/\/ post does not have any opinion\n\tif len(rels) == 0 {\n\t\treturn nil\n\t}\n\tcount := 0\n\tfor _, r := range rels {\n\t\topinion, err := helper.GetOpinionById(r.TargetId.Hex())\n\t\tif err != nil {\n\t\t\tif err == helper.ErrNotFound {\n\t\t\t\tlog.Info(\"Opinion not found - Id: %s\", r.TargetId.Hex())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\t\/\/ check origin existence\n\t\toriginId := opinion.OriginId.Hex()\n\t\toriginExists, err := helper.CheckAccountExistence(originId)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !originExists {\n\t\t\tcontinue\n\t\t}\n\t\tcomment, err := convertOpinionToComment(opinion, p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif _, exist := commenters[originId]; !exist {\n\t\t\tcommenters[originId] = true\n\t\t\tif err := migrateCommentOrigin(comment, opinion); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tcount++\n\t}\n\tp.RepliesCount += count\n\treturn nil\n}\n\n\/\/ convertOpinionToComment first converts opinion to comment and persists it.\n\/\/ Adds Relationship: JNewStatusUpdate -> reply -> JComment\n\/\/ JAccount -> creator -> JComment\nfunc convertOpinionToComment(opinion *Post, post *Post) (*Comment, error) {\n\tc := &Comment{\n\t\tId: helper.NewObjectId(),\n\t\tBody: opinion.Body,\n\t\tOriginType: opinion.OriginType,\n\t\tOriginId: opinion.OriginId,\n\t\tMeta: opinion.Meta,\n\t}\n\tc.Meta.Likes = 0 \/\/ TODO not sure about it\n\tif err := helper.AddComment(c); err != nil {\n\t\treturn c, err\n\t}\n\n\t\/\/ Add relationship: JNewStatusUpdate -> reply -> JComment\n\tr := &Relationship{\n\t\tId: helper.NewObjectId(),\n\t\tSourceId: post.Id,\n\t\tSourceName: \"JNewStatusUpdate\",\n\t\tTargetId: c.Id,\n\t\tTargetName: \"JComment\",\n\t\tAs: \"reply\",\n\t\tTimeStamp: opinion.Meta.CreatedAt,\n\t}\n\tif err := helper.AddRelationship(r); err != nil {\n\t\treturn c, err\n\t}\n\treturn c, addCommentCreator(c, r)\n}\n\n\/\/ addCommentCreator inserts a new relationship as JAccount -> creator -> JComment\nfunc addCommentCreator(c *Comment, r *Relationship) error {\n\tr.Id = helper.NewObjectId()\n\tr.SourceId = c.OriginId\n\tr.SourceName = \"JAccount\"\n\tr.As = \"creator\"\n\tr.TimeStamp = c.Meta.CreatedAt\n\treturn helper.AddRelationship(r)\n}\n\n\/\/ migrateCommentOrigins inserts commenter and follower relationships\n\/\/ JNewStatusUpdate -> commenter -> JAccount\n\/\/ JNewStatusUpdate -> follower -> JAccount\nfunc migrateCommentOrigin(c *Comment, p *Post) error {\n\tr := &Relationship{\n\t\tId: helper.NewObjectId(),\n\t\tSourceId: p.Id,\n\t\tSourceName: \"JNewStatusUpdate\",\n\t\tTargetId: c.OriginId,\n\t\tTargetName: \"JAccount\",\n\t\tAs: \"commenter\",\n\t\tTimeStamp: c.Meta.CreatedAt,\n\t}\n\tif err := helper.AddRelationship(r); err != nil {\n\t\treturn err\n\t}\n\tr.Id = helper.NewObjectId()\n\tr.As = \"follower\"\n\treturn helper.AddRelationship(r)\n}\n\nfunc migrateOrigin(p *Post) error {\n\tr := &Relationship{\n\t\tId: helper.NewObjectId(),\n\t\tSourceId: p.OriginId,\n\t\tSourceName: \"JAccount\",\n\t\tTargetId: p.Id,\n\t\tTargetName: \"JNewStatusUpdate\",\n\t\tTimeStamp: p.Meta.CreatedAt,\n\t\tAs: \"creator\",\n\t}\n\tif err := helper.AddRelationship(r); err != nil {\n\t\treturn err\n\t}\n\n\tr = swapRelationship(r, \"author\")\n\treturn helper.AddRelationship(r)\n}\n\nfunc verifyOrigin(p *Post) error {\n\toriginId := p.OriginId.Hex()\n\tif originId == \"\" {\n\t\treturn fmt.Errorf(\"Empty origin id\")\n\t}\n\n\tresult, err := helper.CheckAccountExistence(originId)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !result {\n\t\treturn fmt.Errorf(\"Origin not found - %v\", originId)\n\t}\n\n\treturn nil\n}\n\n\/\/ swapTagRelation swaps source and target data of relationships. It is used\n\/\/ for converting bidirectional relationships.\nfunc swapRelationship(r *Relationship, as string) *Relationship {\n\treturn &Relationship{\n\t\tId: helper.NewObjectId(),\n\t\tAs: as,\n\t\tSourceId: r.TargetId,\n\t\tSourceName: r.TargetName,\n\t\tTargetId: r.SourceId,\n\t\tTargetName: r.SourceName,\n\t\tTimeStamp: r.TimeStamp,\n\t\tData: r.Data,\n\t}\n}\n<commit_msg>Migration: migration can be started from the last interruption point<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t. \"koding\/db\/models\"\n\thelper \"koding\/db\/mongodb\/modelhelper\"\n)\n\nvar POST_TYPES = [5]string{\n\t\"JBlogPost\",\n\t\"JDiscussion\",\n\t\"JCodeSnip\",\n\t\"JTutorial\",\n\t\"JStatusUpdate\",\n}\n\nconst LIMIT = 100\n\ntype JPost struct {\n\tTitle string `bson:\"title,omitempty\"`\n\tOpinionCount int `bson:\"opinionCount,omitempty\"`\n}\n\nvar ErrAlreadyMigrated = errors.New(\"already migrated\")\n\nfunc main() {\n\tinitPublisher()\n\tdefer shutdown()\n\n\tfor _, postType := range POST_TYPES {\n\t\tlog.Notice(\"Starting \\\"%s\\\" migration\", postType)\n\t\tm := &Migrator{\n\t\t\tPostType: postType,\n\t\t}\n\t\tif err := initialize(m); err != nil {\n\t\t\tlog.Error(\"An error occured during migration: %s\", err.Error())\n\t\t}\n\t\tGetMigrationCompletedReport(m)\n\t}\n}\n\nfunc initialize(m *Migrator) error {\n\tcount, err := helper.CountPosts(helper.Selector{}, helper.Options{}, m.PostType)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Info(\"Found %v posts\", count)\n\n\treturn migrate(m)\n}\n\nfunc migrate(m *Migrator) error {\n\to := helper.Options{\n\t\tSort: \"meta.createdAt\", \/\/start from the oldest\n\t\tLimit: LIMIT,\n\t\tSkip: m.Index,\n\t}\n\tposts, err := helper.GetSomePosts(helper.Selector{}, o, m.PostType)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ no more post to migrate\n\tif len(posts) == 0 {\n\t\treturn nil\n\t}\n\n\tfor _, post := range posts {\n\t\tm.Id = post.Id.Hex()\n\t\toldPost := post\n\t\tpost.Id = helper.NewObjectId()\n\t\tm.NewId = post.Id.Hex()\n\t\tm.Index++\n\t\tif err := verifyOrigin(&post); err != nil {\n\t\t\tReportError(m, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := insertNewStatusUpdate(&post, m); err != nil {\n\t\t\tReportError(m, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := updatePostStatus(&oldPost, m, \"Started\"); err != nil {\n\t\t\tReportError(m, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := migrateTags(&post, m); err != nil {\n\t\t\tReportError(m, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := migrateOrigin(&post, m); err != nil {\n\t\t\tReportError(m, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tcommenters, err := migrateComments(&post, m)\n\t\tif err != nil {\n\t\t\tReportError(m, err)\n\t\t\tcontinue\n\t\t}\n\t\tif err := migrateOpinions(&post, m, commenters); err != nil {\n\t\t\tReportError(m, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := updatePostStatus(&oldPost, m, \"Completed\"); err != nil {\n\t\t\tReportError(m, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ updates repliesCount\n\t\tsu := post.ConvertToStatusUpdate()\n\t\tsu.MigrationStatus = m.PostType\n\t\tif err := helper.UpdateStatusUpdate(su); err != nil {\n\t\t\tReportError(m, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tReportSuccess(m)\n\t}\n\treturn migrate(m)\n}\n\nfunc updatePostStatus(p *Post, m *Migrator, status string) error {\n\tp.MigrationStatus = status\n\treturn helper.UpdatePost(p, m.PostType)\n}\n\nfunc updateRelationshipStatus(r Relationship, status string) {\n\tr.MigrationStatus = status\n\thelper.UpdateRelationship(&r)\n}\n\nfunc insertNewStatusUpdate(p *Post, m *Migrator) error {\n\tif p.MigrationStatus == \"Completed\" {\n\t\treturn ErrAlreadyMigrated\n\t}\n\t\/\/ it seems post is already migrated with some incomplete relationships\n\tif p.MigrationStatus == \"Started\" {\n\t\tep, err := helper.GetStatusUpdate(helper.Selector{\"slug\": p.Slug})\n\t\tp.Id = ep.Id\n\t\treturn err\n\t}\n\texists, err := helper.CheckGroupExistence(p.Group)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !exists {\n\t\treturn fmt.Errorf(\"Group \\\"%s\\\" not found\", p.Group)\n\t}\n\t\/\/ some status update have negative timestamp\n\tif p.Meta.CreatedAt.Unix() < 0 {\n\t\treturn fmt.Errorf(\"got nil timestamp for\")\n\t}\n\tp.Meta.Likes = 0 \/\/ CtF: because we are not migrating activities it is reset\n\n\tif err := migrateCodesnip(p, m); err != nil {\n\t\treturn err\n\t}\n\n\tsu := p.ConvertToStatusUpdate()\n\treturn helper.AddStatusUpdate(su)\n}\n\nfunc migrateCodesnip(p *Post, m *Migrator) error {\n\tif m.PostType != \"JCodeSnip\" {\n\t\treturn nil\n\t}\n\n\tif len(p.Attachments) > 0 {\n\t\tcodesnip, ok := p.Attachments[0][\"content\"]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Codesnip content not found\")\n\t\t}\n\t\t\/\/ concatenate post body with codesnip\n\t\tp.Body = fmt.Sprintf(\"%s \\n\\n ```%s``` \\n\\n\", p.Body, codesnip)\n\t\tp.Attachments = make([]map[string]interface{}, 0)\n\t}\n\treturn nil\n}\n\nfunc migrateTags(p *Post, m *Migrator) error {\n\ts := helper.Selector{\n\t\t\"sourceId\": helper.GetObjectId(m.Id),\n\t\t\"as\": \"tag\",\n\t\t\"targetName\": \"JTag\",\n\t}\n\trels, err := helper.GetAllRelationships(s)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Info(\"%v tags found\", len(rels))\n\n\tfor _, r := range rels {\n\t\t\/\/ relationship already migrated\n\t\tif r.MigrationStatus == \"Completed\" {\n\t\t\tcontinue\n\t\t}\n\t\ttagId := r.TargetId.Hex()\n\t\t\/\/ first check tag existence\n\t\texists, err := helper.CheckTagExistence(tagId)\n\t\tif err != nil {\n\t\t\tupdateRelationshipStatus(r, \"Error\")\n\t\t\treturn err\n\t\t}\n\t\tif !exists {\n\t\t\tupdateRelationshipStatus(r, \"Error\")\n\t\t\tcontinue\n\t\t}\n\t\tor := r \/\/ copy old relationship\n\t\t\/\/ update tag relationships\n\t\tr.Id = helper.NewObjectId()\n\t\tr.SourceId = p.Id\n\t\tr.SourceName = \"JNewStatusUpdate\"\n\t\tif err := helper.AddRelationship(&r); err != nil {\n\t\t\tupdateRelationshipStatus(or, \"Error\")\n\t\t\treturn err\n\t\t}\n\t\tsr := swapRelationship(&r, \"post\") \/\/ CtF: leaking relationship\n\t\tif err := helper.AddRelationship(sr); err != nil {\n\t\t\tupdateRelationshipStatus(or, \"Error\")\n\t\t\treturn err\n\t\t}\n\t\t\/\/append tag to status update body\n\t\tp.Body += fmt.Sprintf(\" |#:JTag:%s|\", tagId)\n\t\tupdateRelationshipStatus(or, \"Completed\")\n\t}\n\n\treturn nil\n}\n\nfunc migrateComments(p *Post, m *Migrator) (map[string]bool, error) {\n\taccounts := make(map[string]bool)\n\t\/\/ get all comments\n\ts := helper.Selector{\n\t\t\"sourceId\": helper.GetObjectId(m.Id),\n\t\t\"as\": \"reply\",\n\t\t\"targetName\": \"JComment\",\n\t}\n\trels, err := helper.GetAllRelationships(s)\n\tif err != nil {\n\t\treturn accounts, err\n\t}\n\tlog.Info(\"%v comments found\", len(rels))\n\t\/\/ posts does not have any comments\n\tif len(rels) == 0 {\n\t\treturn accounts, nil\n\t}\n\n\tcount := 0\n\n\tfor _, r := range rels {\n\t\tor := r\n\t\tcomment, err := helper.GetCommentById(r.TargetId.Hex())\n\t\tif err != nil {\n\t\t\tupdateRelationshipStatus(or, \"Error\")\n\t\t\tif err == helper.ErrNotFound {\n\t\t\t\tlog.Info(\"Comment not found - Id: %s\", r.TargetId.Hex())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn accounts, err\n\t\t}\n\n\t\t\/\/ check origin existence\n\t\toriginId := comment.OriginId.Hex()\n\t\toriginExists, err := helper.CheckAccountExistence(originId)\n\t\tif err != nil {\n\t\t\tupdateRelationshipStatus(or, \"Error\")\n\t\t\treturn accounts, err\n\t\t}\n\t\tif !originExists {\n\t\t\tupdateRelationshipStatus(or, \"Error\")\n\t\t\tcontinue\n\t\t}\n\t\tif r.MigrationStatus != \"Completed\" {\n\t\t\t\/\/ add relationship: JNewStatusUpdate -> reply -> JComment\n\t\t\tr.Id = helper.NewObjectId()\n\t\t\tr.SourceId = p.Id\n\t\t\tr.SourceName = \"JNewStatusUpdate\"\n\t\t\tif err := helper.AddRelationship(&r); err != nil {\n\t\t\t\tupdateRelationshipStatus(or, \"Error\")\n\t\t\t\treturn accounts, err\n\t\t\t}\n\n\t\t\t\/\/ get unique commenters for each post\n\t\t\tif _, exist := accounts[originId]; !exist {\n\t\t\t\taccounts[originId] = true\n\t\t\t\tif err := migrateCommentOrigin(comment, p, m); err != nil {\n\t\t\t\t\treturn accounts, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tupdateRelationshipStatus(or, \"Completed\")\n\t\t} else {\n\t\t\taccounts[originId] = true\n\t\t}\n\n\t\tcount++\n\n\t}\n\n\tp.RepliesCount = count\n\n\treturn accounts, nil\n}\n\n\/\/ migrateOpinions migrates opinions to comments\n\/\/ JDiscussion opinion JOpinion\n\/\/ JAccount creator JOpinion\nfunc migrateOpinions(p *Post, m *Migrator, commenters map[string]bool) error {\n\tif m.PostType != \"JDiscussion\" && m.PostType != \"JTutorial\" {\n\t\treturn nil\n\t}\n\ts := helper.Selector{\n\t\t\"sourceId\": helper.GetObjectId(m.Id),\n\t\t\"as\": \"opinion\",\n\t\t\"targetName\": \"JOpinion\",\n\t}\n\trels, err := helper.GetAllRelationships(s)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Info(\"%v opinions found\", len(rels))\n\t\/\/ post does not have any opinion\n\tif len(rels) == 0 {\n\t\treturn nil\n\t}\n\tcount := 0\n\tfor _, r := range rels {\n\t\tor := r\n\t\topinion, err := helper.GetOpinionById(r.TargetId.Hex())\n\t\tif err != nil {\n\t\t\tupdateRelationshipStatus(or, \"Error\")\n\t\t\tif err == helper.ErrNotFound {\n\t\t\t\tlog.Info(\"Opinion not found - Id: %s\", r.TargetId.Hex())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\t\/\/ check origin existence\n\t\toriginId := opinion.OriginId.Hex()\n\t\toriginExists, err := helper.CheckAccountExistence(originId)\n\t\tif err != nil {\n\t\t\tupdateRelationshipStatus(or, \"Error\")\n\t\t\treturn err\n\t\t}\n\t\tif !originExists {\n\t\t\tcontinue\n\t\t}\n\t\tif r.MigrationStatus != \"Completed\" {\n\t\t\tcomment, err := convertOpinionToComment(opinion, p)\n\t\t\tif err != nil {\n\t\t\t\tupdateRelationshipStatus(or, \"Error\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, exist := commenters[originId]; !exist {\n\t\t\t\tcommenters[originId] = true\n\t\t\t\tif err := migrateCommentOrigin(comment, p, m); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tupdateRelationshipStatus(or, \"Completed\")\n\t\t} else {\n\t\t\tcommenters[originId] = true\n\t\t}\n\n\t\tcount++\n\t}\n\tp.RepliesCount += count\n\treturn nil\n}\n\n\/\/ convertOpinionToComment first converts opinion to comment and persists it.\n\/\/ Adds Relationship: JNewStatusUpdate -> reply -> JComment\n\/\/ JAccount -> creator -> JComment\nfunc convertOpinionToComment(opinion *Post, post *Post) (*Comment, error) {\n\tc := &Comment{\n\t\tId: helper.NewObjectId(),\n\t\tBody: opinion.Body,\n\t\tOriginType: opinion.OriginType,\n\t\tOriginId: opinion.OriginId,\n\t\tMeta: opinion.Meta,\n\t}\n\tc.Meta.Likes = 0 \/\/ TODO not sure about it\n\tif err := helper.AddComment(c); err != nil {\n\t\treturn c, err\n\t}\n\n\t\/\/ Add relationship: JNewStatusUpdate -> reply -> JComment\n\tr := &Relationship{\n\t\tId: helper.NewObjectId(),\n\t\tSourceId: post.Id,\n\t\tSourceName: \"JNewStatusUpdate\",\n\t\tTargetId: c.Id,\n\t\tTargetName: \"JComment\",\n\t\tAs: \"reply\",\n\t\tTimeStamp: opinion.Meta.CreatedAt,\n\t}\n\tif err := helper.AddRelationship(r); err != nil {\n\t\treturn c, err\n\t}\n\treturn c, addCommentCreator(c, r)\n}\n\n\/\/ addCommentCreator inserts a new relationship as JAccount -> creator -> JComment\nfunc addCommentCreator(c *Comment, r *Relationship) error {\n\tr.Id = helper.NewObjectId()\n\tr.SourceId = c.OriginId\n\tr.SourceName = \"JAccount\"\n\tr.As = \"creator\"\n\tr.TimeStamp = c.Meta.CreatedAt\n\treturn helper.AddRelationship(r)\n}\n\n\/\/ migrateCommentOrigins inserts commenter and follower relationships\n\/\/ JNewStatusUpdate -> commenter -> JAccount\n\/\/ JNewStatusUpdate -> follower -> JAccount\nfunc migrateCommentOrigin(c *Comment, p *Post, m *Migrator) error {\n\ts := helper.Selector{\n\t\t\"sourceId\": helper.GetObjectId(m.Id),\n\t\t\"targetId\": c.OriginId,\n\t\t\"as\": \"commenter\",\n\t}\n\tr, err := helper.GetRelationship(s)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"commenter not found\")\n\t}\n\tif r.MigrationStatus != \"Completed\" {\n\t\tor := r\n\t\tr.Id = helper.NewObjectId()\n\t\tr.SourceId = p.Id\n\t\tr.SourceName = \"JNewStatusUpdate\"\n\t\tif err := helper.AddRelationship(&r); err != nil {\n\t\t\tupdateRelationshipStatus(or, \"Error\")\n\t\t\treturn err\n\t\t}\n\t}\n\ts[\"as\"] = \"follower\"\n\tr, err = helper.GetRelationship(s)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"follower not found\")\n\t}\n\n\tif r.MigrationStatus != \"Completed\" {\n\t\tor := r\n\t\tr.Id = helper.NewObjectId()\n\t\tr.SourceId = p.Id\n\t\tr.SourceName = \"JNewStatusUpdate\"\n\t\tif err := helper.AddRelationship(&r); err != nil {\n\t\t\tupdateRelationshipStatus(or, \"Error\")\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc migrateOrigin(p *Post, m *Migrator) error {\n\ts := helper.Selector{\n\t\t\"as\": \"creator\",\n\t\t\"targetId\": helper.GetObjectId(m.Id),\n\t\t\"sourceId\": p.OriginId,\n\t}\n\tr, err := helper.GetRelationship(s)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creator not found\")\n\t}\n\n\tif r.MigrationStatus != \"Completed\" {\n\t\tor := r\n\t\tr.Id = helper.NewObjectId()\n\t\tr.TargetId = p.Id\n\t\tr.TargetName = \"JNewStatusUpdate\"\n\t\tif err := helper.AddRelationship(&r); err != nil {\n\t\t\tupdateRelationshipStatus(or, \"Error\")\n\t\t\treturn err\n\t\t}\n\t\tupdateRelationshipStatus(or, \"Completed\")\n\t}\n\ts = helper.Selector{\n\t\t\"as\": \"author\",\n\t\t\"sourceId\": helper.GetObjectId(m.Id),\n\t\t\"targetId\": p.OriginId,\n\t}\n\tr, err = helper.GetRelationship(s)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"author not found\")\n\t}\n\n\tif r.MigrationStatus != \"Completed\" {\n\t\tor := r\n\t\tr.Id = helper.NewObjectId()\n\t\tr.SourceId = p.Id\n\t\tr.SourceName = \"JNewStatusUpdate\"\n\t\tif err := helper.AddRelationship(&r); err != nil {\n\t\t\tupdateRelationshipStatus(or, \"Error\")\n\t\t\treturn err\n\t\t}\n\t\tupdateRelationshipStatus(or, \"Completed\")\n\t}\n\n\treturn nil\n}\n\nfunc verifyOrigin(p *Post) error {\n\toriginId := p.OriginId.Hex()\n\tif originId == \"\" {\n\t\treturn fmt.Errorf(\"Empty origin id\")\n\t}\n\n\tresult, err := helper.CheckAccountExistence(originId)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !result {\n\t\treturn fmt.Errorf(\"Origin not found - %v\", originId)\n\t}\n\n\treturn nil\n}\n\n\/\/ swapTagRelation swaps source and target data of relationships. It is used\n\/\/ for converting bidirectional relationships.\nfunc swapRelationship(r *Relationship, as string) *Relationship {\n\treturn &Relationship{\n\t\tId: helper.NewObjectId(),\n\t\tAs: as,\n\t\tSourceId: r.TargetId,\n\t\tSourceName: r.TargetName,\n\t\tTargetId: r.SourceId,\n\t\tTargetName: r.SourceName,\n\t\tTimeStamp: r.TimeStamp,\n\t\tData: r.Data,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"strings\"\n \"os\"\n \"strconv\"\n \"github.com\/codegangsta\/cli\"\n \"github.com\/JFrogDev\/artifactory-cli-go\/commands\"\n \"github.com\/JFrogDev\/artifactory-cli-go\/utils\"\n)\n\nvar dryRun bool\nvar url string\nvar user string\nvar password string\nvar props string\nvar recursive bool\nvar flat bool\nvar useRegExp bool\nvar minSplitSize int64\nvar splitCount int\n\nfunc main() {\n defer utils.RemoveTempDir()\n\n app := cli.NewApp()\n app.Name = \"Artifactory CLI\"\n app.Usage = \"See https:\/\/github.com\/JFrogDev\/artifactory-cli-go for usage instructions.\"\n\n app.Commands = []cli.Command{\n {\n Name: \"upload\",\n Flags: GetUploadFlags(),\n Aliases: []string{\"u\"},\n Usage: \"upload <local path> <repo path>\",\n Action: func(c *cli.Context) {\n Upload(c)\n },\n },\n {\n Name: \"download\",\n Flags: GetDownloadFlags(),\n Aliases: []string{\"d\"},\n Usage: \"download <repo path>\",\n Action: func(c *cli.Context) {\n Download(c)\n },\n },\n }\n\n app.Run(os.Args)\n}\n\nfunc GetFlags() []cli.Flag {\n return []cli.Flag{\n cli.StringFlag{\n Name: \"url\",\n Usage: \"[Mandatiry] Artifactory URL\",\n },\n cli.StringFlag{\n Name: \"user\",\n Usage: \"[Optional] Artifactory user\",\n },\n cli.StringFlag{\n Name: \"password\",\n Usage: \"[Optional] Artifactory password\",\n },\n }\n}\n\nfunc GetUploadFlags() []cli.Flag {\n flags := []cli.Flag{\n nil,nil,nil,nil,nil,nil,nil,nil,\n }\n copy(flags[0:3], GetFlags())\n flags[3] = cli.StringFlag{\n Name: \"props\",\n Usage: \"[Optional] List of properties in the form of key1=value1;key2=value2,... to be attached to the uploaded artifacts.\",\n }\n flags[4] = cli.StringFlag{\n Name: \"recursive\",\n Value: \"\",\n Usage: \"[Default: true] Set to false if you do not wish to collect artifacts in sub-folders to be uploaded to Artifactory.\",\n }\n flags[5] = cli.BoolFlag{\n Name: \"flat\",\n Usage: \"[Default: false] If not set to true, and the upload path ends with a slash, files are uploaded according to their file system hierarchy.\",\n }\n flags[6] = cli.BoolFlag{\n Name: \"regexp\",\n Usage: \"[Default: false] Set to true to use a regular expression instead of wildcards expression to collect files to upload.\",\n }\n flags[7] = cli.BoolFlag{\n Name: \"dry-run\",\n Usage: \"[Default: false] Set to true to disable communication with Artifactory.\",\n }\n return flags\n}\n\nfunc GetDownloadFlags() []cli.Flag {\n flags := []cli.Flag{\n nil,nil,nil,nil,nil,nil,nil,nil,\n }\n copy(flags[0:3], GetFlags())\n flags[3] = cli.StringFlag{\n Name: \"props\",\n Usage: \"[Optional] List of properties in the form of key1=value1;key2=value2,... Only artifacts with these properties will be downloaded.\",\n }\n flags[4] = cli.StringFlag{\n Name: \"recursive\",\n Value: \"\",\n Usage: \"[Default: true] Set to false if you do not wish to include the download of artifacts inside sub-folders in Artifactory.\",\n }\n flags[5] = cli.BoolFlag{\n Name: \"flat\",\n Usage: \"[Default: false] Set to true if you do not wish to have the Artifactory repository path structure created locally for your downloaded files.\",\n }\n flags[6] = cli.StringFlag{\n Name: \"min-split\",\n Value: \"\",\n Usage: \"[Default: 5120] Minimum file size in KB to split into ranges when downloading. Set to -1 for no splits.\",\n }\n flags[7] = cli.StringFlag{\n Name: \"split-count\",\n Value: \"\",\n Usage: \"[Default: 3] Number of parts to split a file when downloading. Set to 0 for no splits.\",\n }\n return flags\n}\n\nfunc InitFlags(c *cli.Context) {\n url = GetMandatoryFlag(c, \"url\")\n if !strings.HasSuffix(url, \"\/\") {\n url += \"\/\"\n }\n\n user = c.String(\"user\")\n password = c.String(\"password\")\n props = c.String(\"props\")\n dryRun = c.Bool(\"dry-run\")\n flat = c.Bool(\"flat\")\n useRegExp = c.Bool(\"regexp\")\n var err error\n if c.String(\"min-split\") == \"\" {\n minSplitSize = 5120\n } else {\n minSplitSize, err = strconv.ParseInt(c.String(\"min-split\"), 10, 64)\n if err != nil {\n utils.Exit(\"The '--min-split' option should have a numeric value. Try 'art download --help'.\")\n }\n }\n if c.String(\"split-count\") == \"\" {\n splitCount = 3\n } else {\n splitCount, err = strconv.Atoi(c.String(\"split-count\"))\n if err != nil {\n utils.Exit(\"The '--split-count' option should have a numeric value. Try 'art download --help'.\")\n }\n if splitCount > 15 {\n utils.Exit(\"The '--split-count' option value is limitted to a maximum of 15.\")\n }\n if splitCount < 0 {\n utils.Exit(\"The '--split-count' option cannot have a negative value.\")\n }\n }\n\n if c.String(\"recursive\") == \"\" {\n recursive = true\n } else {\n recursive = c.Bool(\"recursive\")\n }\n}\n\nfunc Download(c *cli.Context) {\n InitFlags(c)\n if len(c.Args()) != 1 {\n utils.Exit(\"Wrong number of arguments. Try 'art download --help'.\")\n }\n pattern := c.Args()[0]\n commands.Download(url, pattern, props, user, password, recursive, flat, dryRun, minSplitSize, splitCount)\n}\n\nfunc Upload(c *cli.Context) {\n InitFlags(c)\n size := len(c.Args())\n if size != 2 {\n utils.Exit(\"Wrong number of arguments. Try 'art upload --help'.\")\n }\n localPath := c.Args()[0]\n targetPath := c.Args()[1]\n commands.Upload(url, localPath, targetPath, recursive, flat, props, user, password, useRegExp, dryRun)\n}\n\n\/\/ Get a CLI flagg. If the flag does not exist, utils.Exit with a message.\nfunc GetMandatoryFlag(c *cli.Context, flag string) string {\n value := c.String(flag)\n if value == \"\" {\n utils.Exit(\"The --\" + flag + \" flag is mandatory\")\n }\n return value\n}<commit_msg>Start of Issue #1 Add the ability to save url,user,pass in a conf file<commit_after>package main\n\nimport (\n \"strings\"\n \"os\"\n \"os\/user\"\n \"strconv\"\n \"github.com\/codegangsta\/cli\"\n \"github.com\/JFrogDev\/artifactory-cli-go\/commands\"\n \"github.com\/JFrogDev\/artifactory-cli-go\/utils\"\n)\n\nvar dryRun bool\nvar url string\nvar username string\nvar password string\nvar props string\nvar recursive bool\nvar flat bool\nvar useRegExp bool\nvar minSplitSize int64\nvar splitCount int\nvar confFile string\n\nfunc main() {\n defer utils.RemoveTempDir()\n\n app := cli.NewApp()\n app.Name = \"Artifactory CLI\"\n app.Usage = \"See https:\/\/github.com\/JFrogDev\/artifactory-cli-go for usage instructions.\"\n\n app.Commands = []cli.Command{\n {\n Name: \"config\",\n Flags: GetFlags(),\n Aliases: []string{\"c\"},\n Usage: \"config\",\n Action: func(c *cli.Context) {\n Config(c)\n },\n },\n {\n Name: \"upload\",\n Flags: GetUploadFlags(),\n Aliases: []string{\"u\"},\n Usage: \"upload <local path> <repo path>\",\n Action: func(c *cli.Context) {\n Upload(c)\n },\n },\n {\n Name: \"download\",\n Flags: GetDownloadFlags(),\n Aliases: []string{\"d\"},\n Usage: \"download <repo path>\",\n Action: func(c *cli.Context) {\n Download(c)\n },\n },\n }\n\n app.Run(os.Args)\n}\n\nfunc GetFlags() []cli.Flag {\n return []cli.Flag{\n cli.StringFlag{\n Name: \"url\",\n Usage: \"[Mandatory] Artifactory URL\",\n },\n cli.StringFlag{\n Name: \"user\",\n Usage: \"[Optional] Artifactory username\",\n },\n cli.StringFlag{\n Name: \"password\",\n Usage: \"[Optional] Artifactory password\",\n },\n }\n}\n\nfunc GetUploadFlags() []cli.Flag {\n flags := []cli.Flag{\n nil,nil,nil,nil,nil,nil,nil,nil,\n }\n copy(flags[0:3], GetFlags())\n flags[3] = cli.StringFlag{\n Name: \"props\",\n Usage: \"[Optional] List of properties in the form of key1=value1;key2=value2,... to be attached to the uploaded artifacts.\",\n }\n flags[4] = cli.StringFlag{\n Name: \"recursive\",\n Value: \"\",\n Usage: \"[Default: true] Set to false if you do not wish to collect artifacts in sub-folders to be uploaded to Artifactory.\",\n }\n flags[5] = cli.BoolFlag{\n Name: \"flat\",\n Usage: \"[Default: false] If not set to true, and the upload path ends with a slash, files are uploaded according to their file system hierarchy.\",\n }\n flags[6] = cli.BoolFlag{\n Name: \"regexp\",\n Usage: \"[Default: false] Set to true to use a regular expression instead of wildcards expression to collect files to upload.\",\n }\n flags[7] = cli.BoolFlag{\n Name: \"dry-run\",\n Usage: \"[Default: false] Set to true to disable communication with Artifactory.\",\n }\n return flags\n}\n\nfunc GetDownloadFlags() []cli.Flag {\n flags := []cli.Flag{\n nil,nil,nil,nil,nil,nil,nil,nil,\n }\n copy(flags[0:3], GetFlags())\n flags[3] = cli.StringFlag{\n Name: \"props\",\n Usage: \"[Optional] List of properties in the form of key1=value1;key2=value2,... Only artifacts with these properties will be downloaded.\",\n }\n flags[4] = cli.StringFlag{\n Name: \"recursive\",\n Value: \"\",\n Usage: \"[Default: true] Set to false if you do not wish to include the download of artifacts inside sub-folders in Artifactory.\",\n }\n flags[5] = cli.BoolFlag{\n Name: \"flat\",\n Usage: \"[Default: false] Set to true if you do not wish to have the Artifactory repository path structure created locally for your downloaded files.\",\n }\n flags[6] = cli.StringFlag{\n Name: \"min-split\",\n Value: \"\",\n Usage: \"[Default: 5120] Minimum file size in KB to split into ranges when downloading. Set to -1 for no splits.\",\n }\n flags[7] = cli.StringFlag{\n Name: \"split-count\",\n Value: \"\",\n Usage: \"[Default: 3] Number of parts to split a file when downloading. Set to 0 for no splits.\",\n }\n return flags\n}\n\nfunc InitFlags(c *cli.Context) {\n url = GetMandatoryFlag(c, \"url\")\n if !strings.HasSuffix(url, \"\/\") {\n url += \"\/\"\n }\n\n username = c.String(\"user\")\n password = c.String(\"password\")\n props = c.String(\"props\")\n dryRun = c.Bool(\"dry-run\")\n flat = c.Bool(\"flat\")\n useRegExp = c.Bool(\"regexp\")\n var err error\n if c.String(\"min-split\") == \"\" {\n minSplitSize = 5120\n } else {\n minSplitSize, err = strconv.ParseInt(c.String(\"min-split\"), 10, 64)\n if err != nil {\n utils.Exit(\"The '--min-split' option should have a numeric value. Try 'art download --help'.\")\n }\n }\n if c.String(\"split-count\") == \"\" {\n splitCount = 3\n } else {\n splitCount, err = strconv.Atoi(c.String(\"split-count\"))\n if err != nil {\n utils.Exit(\"The '--split-count' option should have a numeric value. Try 'art download --help'.\")\n }\n if splitCount > 15 {\n utils.Exit(\"The '--split-count' option value is limitted to a maximum of 15.\")\n }\n if splitCount < 0 {\n utils.Exit(\"The '--split-count' option cannot have a negative value.\")\n }\n }\n\n if c.String(\"recursive\") == \"\" {\n recursive = true\n } else {\n recursive = c.Bool(\"recursive\")\n }\n}\n\nfunc Config(c *cli.Context) {\n usr, err := user.Current()\n utils.CheckError(err)\n confFile = usr.HomeDir + \"\/.jfrog\/cli.conf\"\n println(\"Looking for config file '\" + confFile + \"'\")\n if len(c.Args()) == 0 {\n if !utils.IsPathExists(confFile) {\n println(\"CLI conf file does not exists\")\n } else {\n println(\"CLI conf file content:\")\n \/\/ TODO: Read the flags from the conf and display\n }\n } else {\n key := c.Args()[0]\n val := c.Args()[1]\n println(\"Adding \"+key+\"=\"+val+\" to the CLI conf file\")\n \/\/ TODO: Add or modify the entry in the conf file and create the file if needed\n }\n}\n\nfunc Download(c *cli.Context) {\n InitFlags(c)\n if len(c.Args()) != 1 {\n utils.Exit(\"Wrong number of arguments. Try 'art download --help'.\")\n }\n pattern := c.Args()[0]\n commands.Download(url, pattern, props, username, password, recursive, flat, dryRun, minSplitSize, splitCount)\n}\n\nfunc Upload(c *cli.Context) {\n InitFlags(c)\n size := len(c.Args())\n if size != 2 {\n utils.Exit(\"Wrong number of arguments. Try 'art upload --help'.\")\n }\n localPath := c.Args()[0]\n targetPath := c.Args()[1]\n commands.Upload(url, localPath, targetPath, recursive, flat, props, username, password, useRegExp, dryRun)\n}\n\n\/\/ Get a CLI flagg. If the flag does not exist, utils.Exit with a message.\nfunc GetMandatoryFlag(c *cli.Context, flag string) string {\n value := c.String(flag)\n if value == \"\" {\n utils.Exit(\"The --\" + flag + \" flag is mandatory\")\n }\n return value\n}<|endoftext|>"} {"text":"<commit_before>package model\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/oinume\/lekcije\/server\/bootstrap\"\n\t\"github.com\/oinume\/lekcije\/server\/errors\"\n\t\"github.com\/oinume\/lekcije\/server\/util\"\n)\n\ntype TestHelper struct {\n\tdbURL string\n\tdb *gorm.DB\n\tmCountryService *MCountryService\n}\n\nfunc NewTestHelper() *TestHelper {\n\treturn &TestHelper{}\n}\n\nfunc (h *TestHelper) DB() *gorm.DB {\n\tif h.db != nil {\n\t\treturn h.db\n\t}\n\tbootstrap.CheckCLIEnvVars()\n\th.dbURL = ReplaceToTestDBURL(bootstrap.CLIEnvVars.DBURL())\n\tdb, err := OpenDB(h.dbURL, 1, true \/* TODO: env *\/)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to OpenDB(): err=%v\", err))\n\t}\n\th.db = db\n\treturn db\n}\n\nfunc (h *TestHelper) getDBName(dbURL string) string {\n\tif index := strings.LastIndex(dbURL, \"\/\"); index != -1 {\n\t\treturn dbURL[index+1:]\n\t}\n\treturn \"\"\n}\n\nfunc (h *TestHelper) LoadAllTables(db *gorm.DB) []string {\n\ttype Table struct {\n\t\tName string `gorm:\"column:table_name\"`\n\t}\n\ttables := []Table{}\n\tsql := \"SELECT table_name FROM information_schema.tables WHERE table_schema = ?\"\n\tif err := db.Raw(sql, h.getDBName(h.dbURL)).Scan(&tables).Error; err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to select table names: err=%v\", err))\n\t}\n\n\ttableNames := []string{}\n\tfor _, t := range tables {\n\t\tif t.Name == \"goose_db_version\" {\n\t\t\tcontinue\n\t\t}\n\t\ttableNames = append(tableNames, t.Name)\n\t}\n\treturn tableNames\n}\n\nfunc (h *TestHelper) TruncateAllTables(db *gorm.DB) {\n\tfmt.Printf(\"TruncateAllTables() called!\\n--- stack ---\\n%+v\\n\", errors.NewInternalError().StackTrace())\n\ttables := h.LoadAllTables(db)\n\tfor _, t := range tables {\n\t\tif strings.HasPrefix(t, \"m_\") {\n\t\t\tcontinue\n\t\t}\n\t\tif err := db.Exec(\"TRUNCATE TABLE \" + t).Error; err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Failed to truncate table: table=%v, err=%v\", t, err))\n\t\t}\n\t}\n}\n\nfunc (h *TestHelper) CreateUser(name, email string) *User {\n\tdb := h.DB()\n\tuser, err := NewUserService(db).Create(name, email)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to CreateUser(): err=%v\", err))\n\t}\n\treturn user\n}\n\nfunc (h *TestHelper) CreateRandomUser() *User {\n\tname := util.RandomString(16)\n\treturn h.CreateUser(name, name+\"@example.com\")\n}\n\nfunc (h *TestHelper) CreateUserGoogle(googleID string, userID uint32) *UserGoogle {\n\tuserGoogle := &UserGoogle{\n\t\tGoogleID: googleID,\n\t\tUserID: userID,\n\t}\n\tif err := h.DB().Create(userGoogle).Error; err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to CreateUserGoogle(): %v\", err))\n\t}\n\treturn userGoogle\n}\n\nfunc (h *TestHelper) CreateTeacher(id uint32, name string) *Teacher {\n\tdb := h.DB()\n\tteacher := &Teacher{\n\t\tID: id,\n\t\tName: name,\n\t\tGender: \"female\",\n\t\tBirthday: time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC),\n\t}\n\tif err := NewTeacherService(db).CreateOrUpdate(teacher); err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to CreateTeacher(): err=%v\", err))\n\t}\n\treturn teacher\n}\n\nfunc (h *TestHelper) CreateRandomTeacher() *Teacher {\n\treturn h.CreateTeacher(uint32(util.RandomInt(99999)), util.RandomString(6))\n}\n\nfunc (h *TestHelper) LoadMCountries() *MCountries {\n\tdb := h.DB()\n\t\/\/ TODO: cache\n\tmCountries, err := NewMCountryService(db).LoadAll()\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to MCountryService.LoadAll(): err=%v\", err))\n\t}\n\treturn mCountries\n}\n\nfunc (h *TestHelper) CreateFollowingTeacher(userID uint32, teacher *Teacher) *FollowingTeacher {\n\tnow := time.Now()\n\tft, err := NewFollowingTeacherService(h.DB()).FollowTeacher(userID, teacher, now)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to FollowTeacher(): err=%v\", err))\n\t}\n\treturn ft\n}\n<commit_msg>Fix debug flag<commit_after>package model\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/oinume\/lekcije\/server\/bootstrap\"\n\t\"github.com\/oinume\/lekcije\/server\/errors\"\n\t\"github.com\/oinume\/lekcije\/server\/util\"\n)\n\ntype TestHelper struct {\n\tdbURL string\n\tdb *gorm.DB\n\tmCountryService *MCountryService\n}\n\nfunc NewTestHelper() *TestHelper {\n\treturn &TestHelper{}\n}\n\nfunc (h *TestHelper) DB() *gorm.DB {\n\tif h.db != nil {\n\t\treturn h.db\n\t}\n\tbootstrap.CheckCLIEnvVars()\n\th.dbURL = ReplaceToTestDBURL(bootstrap.CLIEnvVars.DBURL())\n\tdb, err := OpenDB(h.dbURL, 1, false \/* TODO: env *\/)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to OpenDB(): err=%v\", err))\n\t}\n\th.db = db\n\treturn db\n}\n\nfunc (h *TestHelper) getDBName(dbURL string) string {\n\tif index := strings.LastIndex(dbURL, \"\/\"); index != -1 {\n\t\treturn dbURL[index+1:]\n\t}\n\treturn \"\"\n}\n\nfunc (h *TestHelper) LoadAllTables(db *gorm.DB) []string {\n\ttype Table struct {\n\t\tName string `gorm:\"column:table_name\"`\n\t}\n\ttables := []Table{}\n\tsql := \"SELECT table_name FROM information_schema.tables WHERE table_schema = ?\"\n\tif err := db.Raw(sql, h.getDBName(h.dbURL)).Scan(&tables).Error; err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to select table names: err=%v\", err))\n\t}\n\n\ttableNames := []string{}\n\tfor _, t := range tables {\n\t\tif t.Name == \"goose_db_version\" {\n\t\t\tcontinue\n\t\t}\n\t\ttableNames = append(tableNames, t.Name)\n\t}\n\treturn tableNames\n}\n\nfunc (h *TestHelper) TruncateAllTables(db *gorm.DB) {\n\tfmt.Printf(\"TruncateAllTables() called!\\n--- stack ---\\n%+v\\n\", errors.NewInternalError().StackTrace())\n\ttables := h.LoadAllTables(db)\n\tfor _, t := range tables {\n\t\tif strings.HasPrefix(t, \"m_\") {\n\t\t\tcontinue\n\t\t}\n\t\tif err := db.Exec(\"TRUNCATE TABLE \" + t).Error; err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Failed to truncate table: table=%v, err=%v\", t, err))\n\t\t}\n\t}\n}\n\nfunc (h *TestHelper) CreateUser(name, email string) *User {\n\tdb := h.DB()\n\tuser, err := NewUserService(db).Create(name, email)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to CreateUser(): err=%v\", err))\n\t}\n\treturn user\n}\n\nfunc (h *TestHelper) CreateRandomUser() *User {\n\tname := util.RandomString(16)\n\treturn h.CreateUser(name, name+\"@example.com\")\n}\n\nfunc (h *TestHelper) CreateUserGoogle(googleID string, userID uint32) *UserGoogle {\n\tuserGoogle := &UserGoogle{\n\t\tGoogleID: googleID,\n\t\tUserID: userID,\n\t}\n\tif err := h.DB().Create(userGoogle).Error; err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to CreateUserGoogle(): %v\", err))\n\t}\n\treturn userGoogle\n}\n\nfunc (h *TestHelper) CreateTeacher(id uint32, name string) *Teacher {\n\tdb := h.DB()\n\tteacher := &Teacher{\n\t\tID: id,\n\t\tName: name,\n\t\tGender: \"female\",\n\t\tBirthday: time.Date(2001, 1, 1, 0, 0, 0, 0, time.UTC),\n\t}\n\tif err := NewTeacherService(db).CreateOrUpdate(teacher); err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to CreateTeacher(): err=%v\", err))\n\t}\n\treturn teacher\n}\n\nfunc (h *TestHelper) CreateRandomTeacher() *Teacher {\n\treturn h.CreateTeacher(uint32(util.RandomInt(99999)), util.RandomString(6))\n}\n\nfunc (h *TestHelper) LoadMCountries() *MCountries {\n\tdb := h.DB()\n\t\/\/ TODO: cache\n\tmCountries, err := NewMCountryService(db).LoadAll()\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to MCountryService.LoadAll(): err=%v\", err))\n\t}\n\treturn mCountries\n}\n\nfunc (h *TestHelper) CreateFollowingTeacher(userID uint32, teacher *Teacher) *FollowingTeacher {\n\tnow := time.Now()\n\tft, err := NewFollowingTeacherService(h.DB()).FollowTeacher(userID, teacher, now)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to FollowTeacher(): err=%v\", err))\n\t}\n\treturn ft\n}\n<|endoftext|>"} {"text":"<commit_before>package bagman\n\nimport (\n\t\"archive\/tar\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"crypto\/md5\"\n\t\"crypto\/sha256\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\t\"github.com\/APTrust\/bagins\"\n\t\"github.com\/APTrust\/bagman\/fluctus\/models\"\n\t\"github.com\/nu7hatch\/gouuid\"\n\t\"github.com\/rakyll\/magicmime\"\n)\n\n\/\/ magicMime is the MimeMagic database. We want\n\/\/ just one copy of this open at a time.\nvar magicMime *magicmime.Magic\n\n\/\/ Untars the file at the specified tarFilePath and returns a list\n\/\/ of files that were untarred from the archive. Check\n\/\/ result.Error to ensure there were no errors.\n\/\/ tarFilePath is the tarFilePath to the tar file that you want to unpack.\n\/\/ instDomain is the domain name of the institution that owns the bag.\n\/\/ bagName is the name of the tar file, minus the \".tar\" extension.\nfunc Untar(tarFilePath, instDomain, bagName string) (result *TarResult) {\n\n\t\/\/ Set up our result\n\ttarResult := new(TarResult)\n\tabsInputFile, err := filepath.Abs(tarFilePath)\n\tif err != nil {\n\t\ttarResult.ErrorMessage = fmt.Sprintf(\"Before untarring, could not determine \" +\n\t\t\t\"absolute path to downloaded file: %v\", err)\n\t\treturn tarResult\n\t}\n\ttarResult.InputFile = absInputFile\n\n\t\/\/ Open the tar file for reading.\n\tfile, err := os.Open(tarFilePath)\n\tif file != nil {\n\t\tdefer file.Close()\n\t}\n\tif err != nil {\n\t\ttarResult.ErrorMessage = fmt.Sprintf(\"Could not open file %s for untarring: %v\",\n\t\t\ttarFilePath, err)\n\t\treturn tarResult\n\t}\n\n\t\/\/ Record the name of the top-level directory in the tar\n\t\/\/ file. Our spec says that the name of the directory into\n\t\/\/ which the file untars should be the same as the tar file\n\t\/\/ name, minus the .tar extension. So uva-123.tar should\n\t\/\/ untar into a directory called uva-123. This is required\n\t\/\/ so that IntellectualObject and GenericFile identifiers\n\t\/\/ in Fedora can be traced back to the named bag from which\n\t\/\/ they came. Other parts of bagman, such as the file cleanup\n\t\/\/ routines, assume that the untarred directory name will\n\t\/\/ match the tar file name, as the spec demands. When the names\n\t\/\/ do not match, the cleanup routines will not clean up the\n\t\/\/ untarred files, and we'll end up losing a lot of disk space.\n\ttopLevelDir := \"\"\n\n\t\/\/ Untar the file and record the results.\n\ttarReader := tar.NewReader(file)\n\n\tfor {\n\t\theader, err := tarReader.Next();\n\t\tif err != nil && err.Error() == \"EOF\" {\n\t\t\tbreak \/\/ end of archive\n\t\t}\n\t\tif err != nil {\n\t\t\ttarResult.ErrorMessage = fmt.Sprintf(\"Error reading tar file header: %v\", err)\n\t\t\treturn tarResult\n\t\t}\n\n\t\t\/\/ Top-level dir will be the first header entry.\n\t\tif topLevelDir == \"\" {\n\t\t\ttopLevelDir = strings.Replace(header.Name, \"\/\", \"\", 1)\n\t\t\texpectedDir := path.Base(tarFilePath)\n\t\t\tif strings.HasSuffix(expectedDir, \".tar\") {\n\t\t\t\texpectedDir = expectedDir[0:len(expectedDir) - 4]\n\t\t\t}\n\t\t\tif (topLevelDir != expectedDir) {\n\t\t\t\ttarResult.ErrorMessage = fmt.Sprintf(\n\t\t\t\t\t\"Bag '%s' should untar to a folder named '%s', but \" +\n\t\t\t\t\t\t\"it untars to '%s'. Please repackage and re-upload this bag.\",\n\t\t\t\t\tpath.Base(tarFilePath), expectedDir, topLevelDir)\n\t\t\t\treturn tarResult\n\t\t\t}\n\t\t}\n\n\t\toutputPath := filepath.Join(filepath.Dir(absInputFile), header.Name)\n\t\ttarDirectory := strings.Split(header.Name, \"\/\")[0]\n\t\tif tarResult.OutputDir == \"\" {\n\t\t\ttarResult.OutputDir = filepath.Join(filepath.Dir(absInputFile), tarDirectory)\n\t\t}\n\n\t\t\/\/ Make sure the directory that we're about to write into exists.\n\t\terr = os.MkdirAll(filepath.Dir(outputPath), 0755)\n\t\tif err != nil {\n\t\t\ttarResult.ErrorMessage = fmt.Sprintf(\"Could not create destination file '%s' \" +\n\t\t\t\t\"while unpacking tar archive: %v\", outputPath, err)\n\t\t\treturn tarResult\n\t\t}\n\n\t\t\/\/ Copy the file, if it's an actual file. Otherwise, ignore it and record\n\t\t\/\/ a warning. The bag library does not deal with items like symlinks.\n\t\tif header.Typeflag == tar.TypeReg || header.Typeflag == tar.TypeRegA {\n\t\t\tif strings.Contains(header.Name, \"data\/\") {\n\t\t\t\tgenericFile := buildGenericFile(tarReader, filepath.Dir(absInputFile), header.Name,\n\t\t\t\t\theader.Size, header.ModTime)\n\t\t\t\tgenericFile.Identifier = fmt.Sprintf(\"%s\/%s\/%s\", instDomain, bagName, genericFile.Path)\n\t\t\t\tgenericFile.IdentifierAssigned = time.Now()\n\t\t\t\ttarResult.GenericFiles = append(tarResult.GenericFiles, genericFile)\n\t\t\t} else {\n\t\t\t\terr = saveFile(outputPath, tarReader)\n\t\t\t\tif err != nil {\n\t\t\t\t\ttarResult.ErrorMessage = fmt.Sprintf(\"Error copying file from tar archive \" +\n\t\t\t\t\t\t\"to '%s': %v\", outputPath, err)\n\t\t\t\t\treturn tarResult\n\t\t\t\t}\n\t\t\t}\n\n\t\t\toutputRelativePath := strings.Replace(outputPath, tarResult.OutputDir + \"\/\", \"\", 1)\n\t\t\ttarResult.FilesUnpacked = append(tarResult.FilesUnpacked, outputRelativePath)\n\n\t\t} else if header.Typeflag != tar.TypeDir {\n\t\t\ttarResult.Warnings = append(tarResult.Warnings,\n\t\t\t\tfmt.Sprintf(\"Ignoring item %s of type %c because it's neither a file nor a directory\",\n\t\t\t\t\theader.Name, header.Typeflag))\n\t\t}\n\t}\n\tsort.Strings(tarResult.FilesUnpacked)\n\treturn tarResult\n}\n\n\/\/ Reads an untarred bag. The tarFilePath parameter should be a path to\n\/\/ a directory that contains the bag, info and manifest files.\n\/\/ The bag content should be in the data directory under tarFilePath.\n\/\/ Check result.Error to ensure there were no errors.\nfunc ReadBag(tarFilePath string) (result *BagReadResult) {\n\tbagReadResult := new(BagReadResult)\n\tbagReadResult.Path = tarFilePath\n\n\t\/\/ Final param to bagins.ReadBag is the name of the checksum file.\n\t\/\/ That param defaults to manifest-md5.txt, which is what it\n\t\/\/ should be for bags we're fetching from the S3 receiving buckets.\n\tbag, err := bagins.ReadBag(tarFilePath, []string{\"bagit.txt\", \"bag-info.txt\", \"aptrust-info.txt\"}, \"\")\n\tif err != nil {\n\t\tbagReadResult.ErrorMessage = fmt.Sprintf(\"Error unpacking bag: %v\", err)\n\t\treturn bagReadResult\n\t}\n\n\tfileNames, err := bag.ListFiles()\n\tif err!= nil {\n\t\tbagReadResult.ErrorMessage = fmt.Sprintf(\"Could not list bag files: %v\", err)\n\t\treturn bagReadResult\n\t}\n\n\terrMsg := \"\"\n\tbagReadResult.Files = make([]string, len(fileNames))\n\thasBagit := false\n\thasMd5Manifest := false\n\thasDataFiles := false\n\tfor index, fileName := range fileNames {\n\t\tbagReadResult.Files[index] = fileName\n\t\tif fileName == \"bagit.txt\" {\n\t\t\thasBagit = true\n\t\t} else if fileName == \"manifest-md5.txt\" {\n\t\t\thasMd5Manifest = true\n\t\t} else if strings.HasPrefix(fileName, \"data\/\") {\n\t\t\thasDataFiles = true\n\t\t}\n\t}\n\tif !hasBagit { errMsg += \"Bag is missing bagit.txt file. \" }\n\tif !hasMd5Manifest { errMsg += \"Bag is missing manifest-md5.txt file. \" }\n\tif !hasDataFiles { errMsg += \"Bag's data directory is missing or empty. \" }\n\n\textractTags(bag, bagReadResult)\n\n\tchecksumErrors := bag.Manifest.RunChecksums()\n\tif len(checksumErrors) > 0 {\n\t\terrMsg += \"The following checksums could not be verified: \"\n\t\tbagReadResult.ChecksumErrors = make([]error, len(checksumErrors))\n\t\tfor i, err := range checksumErrors {\n\t\t\tbagReadResult.ChecksumErrors[i] = err\n\t\t\terrMsg += err.Error() + \". \"\n\t\t}\n\t}\n\n\tif errMsg != \"\" {\n\t\tbagReadResult.ErrorMessage += fmt.Sprintf(errMsg)\n\t}\n\n\treturn bagReadResult\n}\n\n\/\/ Extract all of the tags from tag files \"bagit.txt\", \"bag-info.txt\",\n\/\/ and \"aptrust-info.txt\", and put those tags into the Tags member\n\/\/ of the BagReadResult structure.\nfunc extractTags(bag *bagins.Bag, bagReadResult *BagReadResult) {\n\ttagFiles := []string{\"bagit.txt\", \"bag-info.txt\", \"aptrust-info.txt\"}\n\taccessRights := \"\"\n\tfor _, file := range tagFiles {\n\t\ttagFile, err := bag.TagFile(file)\n\t\tif err != nil {\n\t\t\tbagReadResult.ErrorMessage = fmt.Sprintf(\"Error reading tags from bag: %v\", err)\n\t\t\treturn\n\t\t}\n\t\ttagFields := tagFile.Data.Fields()\n\n\t\tfor _, tagField := range tagFields {\n\t\t\ttag := Tag{ tagField.Label(), tagField.Value() }\n\t\t\tbagReadResult.Tags = append(bagReadResult.Tags, tag)\n\n\t\t\tlcLabel := strings.ToLower(tag.Label)\n\t\t\tif lcLabel == \"access\" {\n\t\t\t\taccessRights = strings.ToLower(tag.Value)\n\t\t\t} else if accessRights == \"\" && lcLabel == \"rights\" {\n\t\t\t\taccessRights = strings.ToLower(tag.Value)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Make sure access rights are valid, or Fluctus will reject\n\t\/\/ this data when we try to register it.\n\taccessValid := false\n\tfor _, value := range(models.AccessRights) {\n\t\tif accessRights == value {\n\t\t\taccessValid = true\n\t\t}\n\t}\n\tif false == accessValid {\n\t\tbagReadResult.ErrorMessage += fmt.Sprintf(\"Access (rights) value '%s' is not valid. \", accessRights)\n\t}\n}\n\n\/\/ Saves a file from the tar archive to local disk. This function\n\/\/ used to save non-data files (manifests, tag files, etc.)\nfunc saveFile(destination string, tarReader *tar.Reader) (error) {\n\toutputWriter, err := os.OpenFile(destination, os.O_CREATE | os.O_WRONLY, 0644)\n\tif outputWriter != nil {\n\t\tdefer outputWriter.Close()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = io.Copy(outputWriter, tarReader);\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ buildGenericFile saves a data file from the tar archive to disk,\n\/\/ then returns a struct with data we'll need to construct the\n\/\/ GenericFile object in Fedora later.\nfunc buildGenericFile(tarReader *tar.Reader, tarDirectory string, fileName string, size int64, modTime time.Time) (gf *GenericFile) {\n\tgf = &GenericFile{}\n\tgf.Path = fileName[strings.Index(fileName, \"\/data\/\") + 1:len(fileName)]\n\tabsPath, err := filepath.Abs(filepath.Join(tarDirectory, fileName))\n\tif err != nil {\n\t\tgf.ErrorMessage = fmt.Sprintf(\"Path error: %v\", err)\n\t\treturn gf\n\t}\n\tuuid, err := uuid.NewV4()\n\tif err != nil {\n\t\tgf.ErrorMessage = fmt.Sprintf(\"UUID error: %v\", err)\n\t\treturn gf\n\t}\n\tgf.Uuid = uuid.String()\n\tgf.UuidGenerated = time.Now().UTC()\n\tgf.Size = size\n\tgf.Modified = modTime\n\n\t\/\/ Set up a MultiWriter to stream data ONCE to file,\n\t\/\/ md5 and sha256. We don't want to process the stream\n\t\/\/ three separate times.\n\toutputWriter, err := os.OpenFile(absPath, os.O_CREATE | os.O_WRONLY, 0644)\n\tif outputWriter != nil {\n\t\tdefer outputWriter.Close()\n\t}\n\tif err != nil {\n\t\tgf.ErrorMessage = fmt.Sprintf(\"Error opening writing to %s: %v\", absPath, err)\n\t\treturn gf\n\t}\n\tmd5Hash := md5.New()\n\tshaHash := sha256.New()\n\tmultiWriter := io.MultiWriter(md5Hash, shaHash, outputWriter)\n\tio.Copy(multiWriter, tarReader)\n\n\tgf.Md5 = fmt.Sprintf(\"%x\", md5Hash.Sum(nil))\n\tgf.Sha256 = fmt.Sprintf(\"%x\", shaHash.Sum(nil))\n\tgf.Sha256Generated = time.Now().UTC()\n\n\t\/\/ Open the Mime Magic DB only once.\n\tif magicMime == nil {\n\t\tmagicMime, err = magicmime.New()\n\t\tif err != nil {\n\t\t\tgf.ErrorMessage = fmt.Sprintf(\"Error opening MimeMagic database: %v\", err)\n\t\t\treturn gf\n\t\t}\n\t}\n\n\tgf.MimeType = \"application\/binary\"\n\tmimetype, _ := magicMime.TypeByFile(absPath)\n\tif gf.MimeType != \"\" {\n\t\tgf.MimeType = mimetype\n\t}\n\n\treturn gf\n}\n<commit_msg>Fixed problem in setting topLevelDir<commit_after>package bagman\n\nimport (\n\t\"archive\/tar\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"crypto\/md5\"\n\t\"crypto\/sha256\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\t\"github.com\/APTrust\/bagins\"\n\t\"github.com\/APTrust\/bagman\/fluctus\/models\"\n\t\"github.com\/nu7hatch\/gouuid\"\n\t\"github.com\/rakyll\/magicmime\"\n)\n\n\/\/ magicMime is the MimeMagic database. We want\n\/\/ just one copy of this open at a time.\nvar magicMime *magicmime.Magic\n\n\/\/ Untars the file at the specified tarFilePath and returns a list\n\/\/ of files that were untarred from the archive. Check\n\/\/ result.Error to ensure there were no errors.\n\/\/ tarFilePath is the tarFilePath to the tar file that you want to unpack.\n\/\/ instDomain is the domain name of the institution that owns the bag.\n\/\/ bagName is the name of the tar file, minus the \".tar\" extension.\nfunc Untar(tarFilePath, instDomain, bagName string) (result *TarResult) {\n\n\t\/\/ Set up our result\n\ttarResult := new(TarResult)\n\tabsInputFile, err := filepath.Abs(tarFilePath)\n\tif err != nil {\n\t\ttarResult.ErrorMessage = fmt.Sprintf(\"Before untarring, could not determine \" +\n\t\t\t\"absolute path to downloaded file: %v\", err)\n\t\treturn tarResult\n\t}\n\ttarResult.InputFile = absInputFile\n\n\t\/\/ Open the tar file for reading.\n\tfile, err := os.Open(tarFilePath)\n\tif file != nil {\n\t\tdefer file.Close()\n\t}\n\tif err != nil {\n\t\ttarResult.ErrorMessage = fmt.Sprintf(\"Could not open file %s for untarring: %v\",\n\t\t\ttarFilePath, err)\n\t\treturn tarResult\n\t}\n\n\t\/\/ Record the name of the top-level directory in the tar\n\t\/\/ file. Our spec says that the name of the directory into\n\t\/\/ which the file untars should be the same as the tar file\n\t\/\/ name, minus the .tar extension. So uva-123.tar should\n\t\/\/ untar into a directory called uva-123. This is required\n\t\/\/ so that IntellectualObject and GenericFile identifiers\n\t\/\/ in Fedora can be traced back to the named bag from which\n\t\/\/ they came. Other parts of bagman, such as the file cleanup\n\t\/\/ routines, assume that the untarred directory name will\n\t\/\/ match the tar file name, as the spec demands. When the names\n\t\/\/ do not match, the cleanup routines will not clean up the\n\t\/\/ untarred files, and we'll end up losing a lot of disk space.\n\ttopLevelDir := \"\"\n\n\t\/\/ Untar the file and record the results.\n\ttarReader := tar.NewReader(file)\n\n\tfor {\n\t\theader, err := tarReader.Next();\n\t\tif err != nil && err.Error() == \"EOF\" {\n\t\t\tbreak \/\/ end of archive\n\t\t}\n\t\tif err != nil {\n\t\t\ttarResult.ErrorMessage = fmt.Sprintf(\"Error reading tar file header: %v\", err)\n\t\t\treturn tarResult\n\t\t}\n\n\t\t\/\/ Top-level dir will be the first header entry.\n\t\tif header.Typeflag == tar.TypeDir && topLevelDir == \"\" {\n\t\t\ttopLevelDir = strings.Replace(header.Name, \"\/\", \"\", 1)\n\t\t\texpectedDir := path.Base(tarFilePath)\n\t\t\tif strings.HasSuffix(expectedDir, \".tar\") {\n\t\t\t\texpectedDir = expectedDir[0:len(expectedDir) - 4]\n\t\t\t}\n\t\t\tif (topLevelDir != expectedDir) {\n\t\t\t\ttarResult.ErrorMessage = fmt.Sprintf(\n\t\t\t\t\t\"Bag '%s' should untar to a folder named '%s', but \" +\n\t\t\t\t\t\t\"it untars to '%s'. Please repackage and re-upload this bag.\",\n\t\t\t\t\tpath.Base(tarFilePath), expectedDir, topLevelDir)\n\t\t\t\treturn tarResult\n\t\t\t}\n\t\t}\n\n\t\toutputPath := filepath.Join(filepath.Dir(absInputFile), header.Name)\n\t\ttarDirectory := strings.Split(header.Name, \"\/\")[0]\n\t\tif tarResult.OutputDir == \"\" {\n\t\t\ttarResult.OutputDir = filepath.Join(filepath.Dir(absInputFile), tarDirectory)\n\t\t}\n\n\t\t\/\/ Make sure the directory that we're about to write into exists.\n\t\terr = os.MkdirAll(filepath.Dir(outputPath), 0755)\n\t\tif err != nil {\n\t\t\ttarResult.ErrorMessage = fmt.Sprintf(\"Could not create destination file '%s' \" +\n\t\t\t\t\"while unpacking tar archive: %v\", outputPath, err)\n\t\t\treturn tarResult\n\t\t}\n\n\t\t\/\/ Copy the file, if it's an actual file. Otherwise, ignore it and record\n\t\t\/\/ a warning. The bag library does not deal with items like symlinks.\n\t\tif header.Typeflag == tar.TypeReg || header.Typeflag == tar.TypeRegA {\n\t\t\tif strings.Contains(header.Name, \"data\/\") {\n\t\t\t\tgenericFile := buildGenericFile(tarReader, filepath.Dir(absInputFile), header.Name,\n\t\t\t\t\theader.Size, header.ModTime)\n\t\t\t\tgenericFile.Identifier = fmt.Sprintf(\"%s\/%s\/%s\", instDomain, bagName, genericFile.Path)\n\t\t\t\tgenericFile.IdentifierAssigned = time.Now()\n\t\t\t\ttarResult.GenericFiles = append(tarResult.GenericFiles, genericFile)\n\t\t\t} else {\n\t\t\t\terr = saveFile(outputPath, tarReader)\n\t\t\t\tif err != nil {\n\t\t\t\t\ttarResult.ErrorMessage = fmt.Sprintf(\"Error copying file from tar archive \" +\n\t\t\t\t\t\t\"to '%s': %v\", outputPath, err)\n\t\t\t\t\treturn tarResult\n\t\t\t\t}\n\t\t\t}\n\n\t\t\toutputRelativePath := strings.Replace(outputPath, tarResult.OutputDir + \"\/\", \"\", 1)\n\t\t\ttarResult.FilesUnpacked = append(tarResult.FilesUnpacked, outputRelativePath)\n\n\t\t} else if header.Typeflag != tar.TypeDir {\n\t\t\ttarResult.Warnings = append(tarResult.Warnings,\n\t\t\t\tfmt.Sprintf(\"Ignoring item %s of type %c because it's neither a file nor a directory\",\n\t\t\t\t\theader.Name, header.Typeflag))\n\t\t}\n\t}\n\tsort.Strings(tarResult.FilesUnpacked)\n\treturn tarResult\n}\n\n\/\/ Reads an untarred bag. The tarFilePath parameter should be a path to\n\/\/ a directory that contains the bag, info and manifest files.\n\/\/ The bag content should be in the data directory under tarFilePath.\n\/\/ Check result.Error to ensure there were no errors.\nfunc ReadBag(tarFilePath string) (result *BagReadResult) {\n\tbagReadResult := new(BagReadResult)\n\tbagReadResult.Path = tarFilePath\n\n\t\/\/ Final param to bagins.ReadBag is the name of the checksum file.\n\t\/\/ That param defaults to manifest-md5.txt, which is what it\n\t\/\/ should be for bags we're fetching from the S3 receiving buckets.\n\tbag, err := bagins.ReadBag(tarFilePath, []string{\"bagit.txt\", \"bag-info.txt\", \"aptrust-info.txt\"}, \"\")\n\tif err != nil {\n\t\tbagReadResult.ErrorMessage = fmt.Sprintf(\"Error unpacking bag: %v\", err)\n\t\treturn bagReadResult\n\t}\n\n\tfileNames, err := bag.ListFiles()\n\tif err!= nil {\n\t\tbagReadResult.ErrorMessage = fmt.Sprintf(\"Could not list bag files: %v\", err)\n\t\treturn bagReadResult\n\t}\n\n\terrMsg := \"\"\n\tbagReadResult.Files = make([]string, len(fileNames))\n\thasBagit := false\n\thasMd5Manifest := false\n\thasDataFiles := false\n\tfor index, fileName := range fileNames {\n\t\tbagReadResult.Files[index] = fileName\n\t\tif fileName == \"bagit.txt\" {\n\t\t\thasBagit = true\n\t\t} else if fileName == \"manifest-md5.txt\" {\n\t\t\thasMd5Manifest = true\n\t\t} else if strings.HasPrefix(fileName, \"data\/\") {\n\t\t\thasDataFiles = true\n\t\t}\n\t}\n\tif !hasBagit { errMsg += \"Bag is missing bagit.txt file. \" }\n\tif !hasMd5Manifest { errMsg += \"Bag is missing manifest-md5.txt file. \" }\n\tif !hasDataFiles { errMsg += \"Bag's data directory is missing or empty. \" }\n\n\textractTags(bag, bagReadResult)\n\n\tchecksumErrors := bag.Manifest.RunChecksums()\n\tif len(checksumErrors) > 0 {\n\t\terrMsg += \"The following checksums could not be verified: \"\n\t\tbagReadResult.ChecksumErrors = make([]error, len(checksumErrors))\n\t\tfor i, err := range checksumErrors {\n\t\t\tbagReadResult.ChecksumErrors[i] = err\n\t\t\terrMsg += err.Error() + \". \"\n\t\t}\n\t}\n\n\tif errMsg != \"\" {\n\t\tbagReadResult.ErrorMessage += fmt.Sprintf(errMsg)\n\t}\n\n\treturn bagReadResult\n}\n\n\/\/ Extract all of the tags from tag files \"bagit.txt\", \"bag-info.txt\",\n\/\/ and \"aptrust-info.txt\", and put those tags into the Tags member\n\/\/ of the BagReadResult structure.\nfunc extractTags(bag *bagins.Bag, bagReadResult *BagReadResult) {\n\ttagFiles := []string{\"bagit.txt\", \"bag-info.txt\", \"aptrust-info.txt\"}\n\taccessRights := \"\"\n\tfor _, file := range tagFiles {\n\t\ttagFile, err := bag.TagFile(file)\n\t\tif err != nil {\n\t\t\tbagReadResult.ErrorMessage = fmt.Sprintf(\"Error reading tags from bag: %v\", err)\n\t\t\treturn\n\t\t}\n\t\ttagFields := tagFile.Data.Fields()\n\n\t\tfor _, tagField := range tagFields {\n\t\t\ttag := Tag{ tagField.Label(), tagField.Value() }\n\t\t\tbagReadResult.Tags = append(bagReadResult.Tags, tag)\n\n\t\t\tlcLabel := strings.ToLower(tag.Label)\n\t\t\tif lcLabel == \"access\" {\n\t\t\t\taccessRights = strings.ToLower(tag.Value)\n\t\t\t} else if accessRights == \"\" && lcLabel == \"rights\" {\n\t\t\t\taccessRights = strings.ToLower(tag.Value)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Make sure access rights are valid, or Fluctus will reject\n\t\/\/ this data when we try to register it.\n\taccessValid := false\n\tfor _, value := range(models.AccessRights) {\n\t\tif accessRights == value {\n\t\t\taccessValid = true\n\t\t}\n\t}\n\tif false == accessValid {\n\t\tbagReadResult.ErrorMessage += fmt.Sprintf(\"Access (rights) value '%s' is not valid. \", accessRights)\n\t}\n}\n\n\/\/ Saves a file from the tar archive to local disk. This function\n\/\/ used to save non-data files (manifests, tag files, etc.)\nfunc saveFile(destination string, tarReader *tar.Reader) (error) {\n\toutputWriter, err := os.OpenFile(destination, os.O_CREATE | os.O_WRONLY, 0644)\n\tif outputWriter != nil {\n\t\tdefer outputWriter.Close()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = io.Copy(outputWriter, tarReader);\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ buildGenericFile saves a data file from the tar archive to disk,\n\/\/ then returns a struct with data we'll need to construct the\n\/\/ GenericFile object in Fedora later.\nfunc buildGenericFile(tarReader *tar.Reader, tarDirectory string, fileName string, size int64, modTime time.Time) (gf *GenericFile) {\n\tgf = &GenericFile{}\n\tgf.Path = fileName[strings.Index(fileName, \"\/data\/\") + 1:len(fileName)]\n\tabsPath, err := filepath.Abs(filepath.Join(tarDirectory, fileName))\n\tif err != nil {\n\t\tgf.ErrorMessage = fmt.Sprintf(\"Path error: %v\", err)\n\t\treturn gf\n\t}\n\tuuid, err := uuid.NewV4()\n\tif err != nil {\n\t\tgf.ErrorMessage = fmt.Sprintf(\"UUID error: %v\", err)\n\t\treturn gf\n\t}\n\tgf.Uuid = uuid.String()\n\tgf.UuidGenerated = time.Now().UTC()\n\tgf.Size = size\n\tgf.Modified = modTime\n\n\t\/\/ Set up a MultiWriter to stream data ONCE to file,\n\t\/\/ md5 and sha256. We don't want to process the stream\n\t\/\/ three separate times.\n\toutputWriter, err := os.OpenFile(absPath, os.O_CREATE | os.O_WRONLY, 0644)\n\tif outputWriter != nil {\n\t\tdefer outputWriter.Close()\n\t}\n\tif err != nil {\n\t\tgf.ErrorMessage = fmt.Sprintf(\"Error opening writing to %s: %v\", absPath, err)\n\t\treturn gf\n\t}\n\tmd5Hash := md5.New()\n\tshaHash := sha256.New()\n\tmultiWriter := io.MultiWriter(md5Hash, shaHash, outputWriter)\n\tio.Copy(multiWriter, tarReader)\n\n\tgf.Md5 = fmt.Sprintf(\"%x\", md5Hash.Sum(nil))\n\tgf.Sha256 = fmt.Sprintf(\"%x\", shaHash.Sum(nil))\n\tgf.Sha256Generated = time.Now().UTC()\n\n\t\/\/ Open the Mime Magic DB only once.\n\tif magicMime == nil {\n\t\tmagicMime, err = magicmime.New()\n\t\tif err != nil {\n\t\t\tgf.ErrorMessage = fmt.Sprintf(\"Error opening MimeMagic database: %v\", err)\n\t\t\treturn gf\n\t\t}\n\t}\n\n\tgf.MimeType = \"application\/binary\"\n\tmimetype, _ := magicMime.TypeByFile(absPath)\n\tif gf.MimeType != \"\" {\n\t\tgf.MimeType = mimetype\n\t}\n\n\treturn gf\n}\n<|endoftext|>"} {"text":"<commit_before>package storage\n\nimport (\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/dancannon\/gorethink\"\n\t\"github.com\/docker\/notary\/storage\/rethinkdb\"\n)\n\n\/\/ RDBTUFFile is a tuf file record\ntype RDBTUFFile struct {\n\trethinkdb.Timing\n\tGunRoleVersion []interface{} `gorethink:\"gun_role_version\"`\n\tGun string `gorethink:\"gun\"`\n\tRole string `gorethink:\"role\"`\n\tVersion int `gorethink:\"version\"`\n\tSha256 string `gorethink:\"sha256\"`\n\tData []byte `gorethink:\"data\"`\n}\n\n\/\/ TableName returns the table name for the record type\nfunc (r RDBTUFFile) TableName() string {\n\treturn \"tuf_files\"\n}\n\n\/\/ RDBKey is the public key record\ntype RDBKey struct {\n\trethinkdb.Timing\n\tGun string `gorethink:\"gun\"`\n\tRole string `gorethink:\"role\"`\n\tCipher string `gorethink:\"cipher\"`\n\tPublic []byte `gorethink:\"public\"`\n}\n\n\/\/ TableName returns the table name for the record type\nfunc (r RDBKey) TableName() string {\n\treturn \"tuf_keys\"\n}\n\n\/\/ RethinkDB implements a MetaStore against the Rethink Database\ntype RethinkDB struct {\n\tdbName string\n\tsess *gorethink.Session\n}\n\n\/\/ NewRethinkDBStorage initializes a RethinkDB object\nfunc NewRethinkDBStorage(dbName string, sess *gorethink.Session) RethinkDB {\n\treturn RethinkDB{\n\t\tdbName: dbName,\n\t\tsess: sess,\n\t}\n}\n\n\/\/ GetKey returns the cipher and public key for the given GUN and role.\n\/\/ If the GUN+role don't exist, returns an error.\nfunc (rdb RethinkDB) GetKey(gun, role string) (cipher string, public []byte, err error) {\n\tvar key RDBKey\n\tres, err := gorethink.DB(rdb.dbName).Table(key.TableName()).GetAllByIndex(\n\t\trdbGunRoleIdx,\n\t\tgun,\n\t\trole,\n\t).Run(rdb.sess)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tdefer res.Close()\n\terr = res.One(&key)\n\tif err == gorethink.ErrEmptyResult {\n\t\treturn \"\", nil, &ErrNoKey{gun: gun}\n\t}\n\treturn key.Cipher, key.Public, err\n}\n\n\/\/ SetKey sets the cipher and public key for the given GUN and role if\n\/\/ it doesn't already exist. Otherwise an error is returned.\nfunc (rdb RethinkDB) SetKey(gun, role, cipher string, public []byte) error {\n\tnow := time.Now()\n\tkey := RDBKey{\n\t\tTiming: rethinkdb.Timing{\n\t\t\tCreatedAt: now,\n\t\t\tUpdatedAt: now,\n\t\t},\n\t\tGun: gun,\n\t\tRole: role,\n\t\tCipher: cipher,\n\t\tPublic: public,\n\t}\n\t_, err := gorethink.DB(rdb.dbName).Table(key.TableName()).Insert(key).RunWrite(rdb.sess)\n\treturn err\n}\n\n\/\/ UpdateCurrent adds new metadata version for the given GUN if and only\n\/\/ if it's a new role, or the version is greater than the current version\n\/\/ for the role. Otherwise an error is returned.\nfunc (rdb RethinkDB) UpdateCurrent(gun string, update MetaUpdate) error {\n\tnow := time.Now()\n\tchecksum := sha256.Sum256(update.Data)\n\tfile := RDBTUFFile{\n\t\tTiming: rethinkdb.Timing{\n\t\t\tCreatedAt: now,\n\t\t\tUpdatedAt: now,\n\t\t},\n\t\tGunRoleVersion: []interface{}{gun, update.Role, update.Version},\n\t\tGun: gun,\n\t\tRole: update.Role,\n\t\tVersion: update.Version,\n\t\tSha256: hex.EncodeToString(checksum[:]),\n\t\tData: update.Data,\n\t}\n\t_, err := gorethink.DB(rdb.dbName).Table(file.TableName()).Insert(\n\t\tfile,\n\t\tgorethink.InsertOpts{\n\t\t\tConflict: \"error\", \/\/ default but explicit for clarity of intent\n\t\t},\n\t).RunWrite(rdb.sess)\n\tif err != nil && gorethink.IsConflictErr(err) {\n\t\treturn &ErrOldVersion{}\n\t}\n\treturn err\n}\n\n\/\/ UpdateMany adds multiple new metadata for the given GUN. RethinkDB does\n\/\/ not support transactions, therefore we will attempt to insert the timestamp\n\/\/ first as this represents a published version of the repo. If this is successful,\n\/\/ we will insert the remaining roles (in any order). If any of those roles\n\/\/ errors on insert, we will do a best effort rollback, at a minimum attempting\n\/\/ to delete the timestamp so nobody pulls a broken repo.\nfunc (rdb RethinkDB) UpdateMany(gun string, updates []MetaUpdate) error {\n\tfor _, up := range updates {\n\t\tif err := rdb.UpdateCurrent(gun, up); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ GetCurrent returns the modification date and data part of the metadata for\n\/\/ the latest version of the given GUN and role. If there is no data for\n\/\/ the given GUN and role, an error is returned.\nfunc (rdb RethinkDB) GetCurrent(gun, role string) (created *time.Time, data []byte, err error) {\n\tfile := RDBTUFFile{}\n\tres, err := gorethink.DB(rdb.dbName).Table(file.TableName()).GetAllByIndex(\n\t\trdbGunRoleIdx,\n\t\tgun,\n\t\trole,\n\t).OrderBy(gorethink.Desc(\"version\")).Run(rdb.sess)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer res.Close()\n\tif res.IsNil() {\n\t\treturn nil, nil, ErrNotFound{}\n\t}\n\terr = res.One(&file)\n\tif err == gorethink.ErrEmptyResult {\n\t\treturn nil, nil, ErrNotFound{}\n\t}\n\treturn &file.CreatedAt, file.Data, err\n}\n\n\/\/ GetChecksum returns the given TUF role file and creation date for the\n\/\/ GUN with the provided checksum. If the given (gun, role, checksum) are\n\/\/ not found, it returns storage.ErrNotFound\nfunc (rdb RethinkDB) GetChecksum(gun, role, checksum string) (created *time.Time, data []byte, err error) {\n\tvar file RDBTUFFile\n\tres, err := gorethink.DB(rdb.dbName).Table(file.TableName()).GetAllByIndex(\n\t\trdbGunRoleSha256Idx,\n\t\tgun,\n\t\trole,\n\t\tchecksum,\n\t).Run(rdb.sess)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer res.Close()\n\tif res.IsNil() {\n\t\treturn nil, nil, ErrNotFound{}\n\t}\n\terr = res.One(&file)\n\tif err == gorethink.ErrEmptyResult {\n\t\treturn nil, nil, ErrNotFound{}\n\t}\n\treturn &file.CreatedAt, file.Data, err\n}\n\n\/\/ Delete removes all metadata for a given GUN. It does not return an\n\/\/ error if no metadata exists for the given GUN.\nfunc (rdb RethinkDB) Delete(gun string) error {\n\t_, err := gorethink.DB(rdb.dbName).Table(RDBTUFFile{}.TableName()).GetAllByIndex(\n\t\t\"gun\",\n\t\tgun,\n\t).Delete().RunWrite(rdb.sess)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to delete %s from database: %s\", gun, err.Error())\n\t}\n\treturn nil\n}\n\n\/\/ Bootstrap sets up the database and tables\nfunc (rdb RethinkDB) Bootstrap() error {\n\treturn rethinkdb.SetupDB(rdb.sess, rdb.dbName, []rethinkdb.Table{\n\t\ttufFiles,\n\t\tkeys,\n\t})\n}\n\n\/\/ CheckHealth is currently a noop\nfunc (rdb RethinkDB) CheckHealth() error {\n\treturn nil\n}\n<commit_msg>Use explicit string slices for secondary index lookup<commit_after>package storage\n\nimport (\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/dancannon\/gorethink\"\n\t\"github.com\/docker\/notary\/storage\/rethinkdb\"\n)\n\n\/\/ RDBTUFFile is a tuf file record\ntype RDBTUFFile struct {\n\trethinkdb.Timing\n\tGunRoleVersion []interface{} `gorethink:\"gun_role_version\"`\n\tGun string `gorethink:\"gun\"`\n\tRole string `gorethink:\"role\"`\n\tVersion int `gorethink:\"version\"`\n\tSha256 string `gorethink:\"sha256\"`\n\tData []byte `gorethink:\"data\"`\n}\n\n\/\/ TableName returns the table name for the record type\nfunc (r RDBTUFFile) TableName() string {\n\treturn \"tuf_files\"\n}\n\n\/\/ RDBKey is the public key record\ntype RDBKey struct {\n\trethinkdb.Timing\n\tGun string `gorethink:\"gun\"`\n\tRole string `gorethink:\"role\"`\n\tCipher string `gorethink:\"cipher\"`\n\tPublic []byte `gorethink:\"public\"`\n}\n\n\/\/ TableName returns the table name for the record type\nfunc (r RDBKey) TableName() string {\n\treturn \"tuf_keys\"\n}\n\n\/\/ RethinkDB implements a MetaStore against the Rethink Database\ntype RethinkDB struct {\n\tdbName string\n\tsess *gorethink.Session\n}\n\n\/\/ NewRethinkDBStorage initializes a RethinkDB object\nfunc NewRethinkDBStorage(dbName string, sess *gorethink.Session) RethinkDB {\n\treturn RethinkDB{\n\t\tdbName: dbName,\n\t\tsess: sess,\n\t}\n}\n\n\/\/ GetKey returns the cipher and public key for the given GUN and role.\n\/\/ If the GUN+role don't exist, returns an error.\nfunc (rdb RethinkDB) GetKey(gun, role string) (cipher string, public []byte, err error) {\n\tvar key RDBKey\n\tres, err := gorethink.DB(rdb.dbName).Table(key.TableName()).GetAllByIndex(\n\t\trdbGunRoleIdx, []string{gun, role},\n\t).Run(rdb.sess)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tdefer res.Close()\n\terr = res.One(&key)\n\tif err == gorethink.ErrEmptyResult {\n\t\treturn \"\", nil, &ErrNoKey{gun: gun}\n\t}\n\treturn key.Cipher, key.Public, err\n}\n\n\/\/ SetKey sets the cipher and public key for the given GUN and role if\n\/\/ it doesn't already exist. Otherwise an error is returned.\nfunc (rdb RethinkDB) SetKey(gun, role, cipher string, public []byte) error {\n\tnow := time.Now()\n\tkey := RDBKey{\n\t\tTiming: rethinkdb.Timing{\n\t\t\tCreatedAt: now,\n\t\t\tUpdatedAt: now,\n\t\t},\n\t\tGun: gun,\n\t\tRole: role,\n\t\tCipher: cipher,\n\t\tPublic: public,\n\t}\n\t_, err := gorethink.DB(rdb.dbName).Table(key.TableName()).Insert(key).RunWrite(rdb.sess)\n\treturn err\n}\n\n\/\/ UpdateCurrent adds new metadata version for the given GUN if and only\n\/\/ if it's a new role, or the version is greater than the current version\n\/\/ for the role. Otherwise an error is returned.\nfunc (rdb RethinkDB) UpdateCurrent(gun string, update MetaUpdate) error {\n\tnow := time.Now()\n\tchecksum := sha256.Sum256(update.Data)\n\tfile := RDBTUFFile{\n\t\tTiming: rethinkdb.Timing{\n\t\t\tCreatedAt: now,\n\t\t\tUpdatedAt: now,\n\t\t},\n\t\tGunRoleVersion: []interface{}{gun, update.Role, update.Version},\n\t\tGun: gun,\n\t\tRole: update.Role,\n\t\tVersion: update.Version,\n\t\tSha256: hex.EncodeToString(checksum[:]),\n\t\tData: update.Data,\n\t}\n\t_, err := gorethink.DB(rdb.dbName).Table(file.TableName()).Insert(\n\t\tfile,\n\t\tgorethink.InsertOpts{\n\t\t\tConflict: \"error\", \/\/ default but explicit for clarity of intent\n\t\t},\n\t).RunWrite(rdb.sess)\n\tif err != nil && gorethink.IsConflictErr(err) {\n\t\treturn &ErrOldVersion{}\n\t}\n\treturn err\n}\n\n\/\/ UpdateMany adds multiple new metadata for the given GUN. RethinkDB does\n\/\/ not support transactions, therefore we will attempt to insert the timestamp\n\/\/ first as this represents a published version of the repo. If this is successful,\n\/\/ we will insert the remaining roles (in any order). If any of those roles\n\/\/ errors on insert, we will do a best effort rollback, at a minimum attempting\n\/\/ to delete the timestamp so nobody pulls a broken repo.\nfunc (rdb RethinkDB) UpdateMany(gun string, updates []MetaUpdate) error {\n\tfor _, up := range updates {\n\t\tif err := rdb.UpdateCurrent(gun, up); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ GetCurrent returns the modification date and data part of the metadata for\n\/\/ the latest version of the given GUN and role. If there is no data for\n\/\/ the given GUN and role, an error is returned.\nfunc (rdb RethinkDB) GetCurrent(gun, role string) (created *time.Time, data []byte, err error) {\n\tfile := RDBTUFFile{}\n\tres, err := gorethink.DB(rdb.dbName).Table(file.TableName()).GetAllByIndex(\n\t\trdbGunRoleIdx, []string{gun, role},\n\t).OrderBy(gorethink.Desc(\"version\")).Run(rdb.sess)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer res.Close()\n\tif res.IsNil() {\n\t\treturn nil, nil, ErrNotFound{}\n\t}\n\terr = res.One(&file)\n\tif err == gorethink.ErrEmptyResult {\n\t\treturn nil, nil, ErrNotFound{}\n\t}\n\treturn &file.CreatedAt, file.Data, err\n}\n\n\/\/ GetChecksum returns the given TUF role file and creation date for the\n\/\/ GUN with the provided checksum. If the given (gun, role, checksum) are\n\/\/ not found, it returns storage.ErrNotFound\nfunc (rdb RethinkDB) GetChecksum(gun, role, checksum string) (created *time.Time, data []byte, err error) {\n\tvar file RDBTUFFile\n\tres, err := gorethink.DB(rdb.dbName).Table(file.TableName()).GetAllByIndex(\n\t\trdbGunRoleSha256Idx, []string{gun, role, checksum},\n\t).Run(rdb.sess)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer res.Close()\n\tif res.IsNil() {\n\t\treturn nil, nil, ErrNotFound{}\n\t}\n\terr = res.One(&file)\n\tif err == gorethink.ErrEmptyResult {\n\t\treturn nil, nil, ErrNotFound{}\n\t}\n\treturn &file.CreatedAt, file.Data, err\n}\n\n\/\/ Delete removes all metadata for a given GUN. It does not return an\n\/\/ error if no metadata exists for the given GUN.\nfunc (rdb RethinkDB) Delete(gun string) error {\n\t_, err := gorethink.DB(rdb.dbName).Table(RDBTUFFile{}.TableName()).GetAllByIndex(\n\t\t\"gun\", []string{gun},\n\t).Delete().RunWrite(rdb.sess)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to delete %s from database: %s\", gun, err.Error())\n\t}\n\treturn nil\n}\n\n\/\/ Bootstrap sets up the database and tables\nfunc (rdb RethinkDB) Bootstrap() error {\n\treturn rethinkdb.SetupDB(rdb.sess, rdb.dbName, []rethinkdb.Table{\n\t\ttufFiles,\n\t\tkeys,\n\t})\n}\n\n\/\/ CheckHealth is currently a noop\nfunc (rdb RethinkDB) CheckHealth() error {\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 MSolution.IO\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"context\"\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"github.com\/trackit\/jsonlog\"\n\n\t\"github.com\/trackit\/trackit-server\/aws\/pricings\"\n\t\"github.com\/trackit\/trackit-server\/db\"\n\t\"github.com\/trackit\/trackit-server\/models\"\n)\n\n\/\/ taskFetchPricings fetches the EC2 pricings and saves them in the database\nfunc taskFetchPricings(ctx context.Context) (err error) {\n\tlogger := jsonlog.LoggerFromContextOrDefault(ctx)\n\tres, err := pricings.FetchEc2Pricings(ctx)\n\tif err != nil {\n\t\tlogger.Error(\"Failed to retrieve ec2 pricings\", err.Error())\n\t\treturn\n\t}\n\tserializedPricing, err := json.Marshal(res)\n\tif err != nil {\n\t\tlogger.Error(\"Failed to serialize ec2 pricings\", err.Error())\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tvar tx *sql.Tx\n\tdefer func() {\n\t\tif tx != nil {\n\t\t\tif err != nil {\n\t\t\t\ttx.Rollback()\n\t\t\t} else {\n\t\t\t\ttx.Commit()\n\t\t\t}\n\t\t}\n\t}()\n\tif tx, err = db.Db.BeginTx(ctx, nil); err != nil {\n\t\tlogger.Error(\"Failed to initiate sql transaction\", err.Error())\n\t\treturn\n\t} else {\n\t\tec2PricingDb, _ := models.AwsPricingByProduct(tx, pricings.EC2ServiceCode)\n\t\tif ec2PricingDb == nil {\n\t\t\tec2PricingDb = &models.AwsPricing{\n\t\t\t\tProduct: pricings.EC2ServiceCode,\n\t\t\t}\n\t\t}\n\t\tec2PricingDb.Pricing = serializedPricing\n\t\terr = ec2PricingDb.Save(tx)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"Failed to save ec2 pricings\", err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n<commit_msg>removed debug println<commit_after>\/\/ Copyright 2018 MSolution.IO\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"context\"\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"github.com\/trackit\/jsonlog\"\n\n\t\"github.com\/trackit\/trackit-server\/aws\/pricings\"\n\t\"github.com\/trackit\/trackit-server\/db\"\n\t\"github.com\/trackit\/trackit-server\/models\"\n)\n\n\/\/ taskFetchPricings fetches the EC2 pricings and saves them in the database\nfunc taskFetchPricings(ctx context.Context) (err error) {\n\tlogger := jsonlog.LoggerFromContextOrDefault(ctx)\n\tres, err := pricings.FetchEc2Pricings(ctx)\n\tif err != nil {\n\t\tlogger.Error(\"Failed to retrieve ec2 pricings\", err.Error())\n\t\treturn\n\t}\n\tserializedPricing, err := json.Marshal(res)\n\tif err != nil {\n\t\tlogger.Error(\"Failed to serialize ec2 pricings\", err.Error())\n\t\treturn\n\t}\n\tvar tx *sql.Tx\n\tdefer func() {\n\t\tif tx != nil {\n\t\t\tif err != nil {\n\t\t\t\ttx.Rollback()\n\t\t\t} else {\n\t\t\t\ttx.Commit()\n\t\t\t}\n\t\t}\n\t}()\n\tif tx, err = db.Db.BeginTx(ctx, nil); err != nil {\n\t\tlogger.Error(\"Failed to initiate sql transaction\", err.Error())\n\t\treturn\n\t} else {\n\t\tec2PricingDb, _ := models.AwsPricingByProduct(tx, pricings.EC2ServiceCode)\n\t\tif ec2PricingDb == nil {\n\t\t\tec2PricingDb = &models.AwsPricing{\n\t\t\t\tProduct: pricings.EC2ServiceCode,\n\t\t\t}\n\t\t}\n\t\tec2PricingDb.Pricing = serializedPricing\n\t\terr = ec2PricingDb.Save(tx)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"Failed to save ec2 pricings\", err.Error())\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ © 2013 the Bits Authors under the MIT license. See AUTHORS for the list of authors.\n\n\/\/ Package bit implements functionality for reading streams of bits from an io.Reader.\npackage bit\n\nimport (\n\t\"io\"\n)\n\n\/\/ Reader provides methods for reading bits.\ntype Reader struct {\n\tin io.Reader\n\tb uint8\n\tn uint\n}\n\nvar mask = [...]uint8{\n\t0x0,\n\t0x1,\n\t0x3,\n\t0x7,\n\t0xF,\n\t0x1F,\n\t0x3F,\n\t0x7F,\n\t0xFF,\n}\n\n\/\/ NewReader returns a new Reader that reads bits the given io.Reader.\nfunc NewReader(r io.Reader) *Reader {\n\treturn &Reader{in: r}\n}\n\n\/\/ Uint8 reads and return n bits, up to 8. It panicks if n is greater than 8.\nfunc (r *Reader) Uint8(n uint) (uint8, error) {\n\tif n > 8 {\n\t\tpanic(\"Too many bits for Uint8\")\n\t}\n\n\tvar vl uint8\n\tfor n > 0 {\n\t\tif r.n == 0 {\n\t\t\tif err := r.nextByte(); err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\n\t\tm := r.n\n\t\tif r.n >= n {\n\t\t\tm = n\n\t\t}\n\n\t\tshift := r.n - m\n\t\tb := (r.b >> shift) & mask[m]\n\t\tvl = (vl << m) | b\n\n\t\tn -= m\n\t\tr.n -= m\n\t}\n\n\treturn vl, nil\n}\n\nfunc (r *Reader) nextByte() error {\n\tvar b [1]uint8\n\tif _, err := io.ReadFull(r.in, b[:]); err != nil {\n\t\treturn err\n\t}\n\tr.b = b[0]\n\tr.n = 8\n\treturn nil\n}\n\n\/\/ Uint64 reads and return n bits, up to 64. It panicks if n is greater than 64.\nfunc (r *Reader) Uint64(n uint) (uint64, error) {\n\tif n > 64 {\n\t\tpanic(\"Too many bits for Uint64\")\n\t}\n\tvar vl uint64\n\tfor n > 0 {\n\t\tm := n\n\t\tif n > 8 {\n\t\t\tm = 8\n\t\t}\n\t\tb, err := r.Uint8(m)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tvl = (vl << m) | uint64(b)\n\t\tn -= m\n\t}\n\treturn vl, nil\n}\n<commit_msg>Define mask before type Reader.<commit_after>\/\/ © 2013 the Bits Authors under the MIT license. See AUTHORS for the list of authors.\n\n\/\/ Package bit implements functionality for reading streams of bits from an io.Reader.\npackage bit\n\nimport (\n\t\"io\"\n)\n\nvar mask = [...]uint8{\n\t0x0,\n\t0x1,\n\t0x3,\n\t0x7,\n\t0xF,\n\t0x1F,\n\t0x3F,\n\t0x7F,\n\t0xFF,\n}\n\n\/\/ Reader provides methods for reading bits.\ntype Reader struct {\n\tin io.Reader\n\tb uint8\n\tn uint\n}\n\n\/\/ NewReader returns a new Reader that reads bits the given io.Reader.\nfunc NewReader(r io.Reader) *Reader {\n\treturn &Reader{in: r}\n}\n\n\/\/ Uint8 reads and return n bits, up to 8. It panicks if n is greater than 8.\nfunc (r *Reader) Uint8(n uint) (uint8, error) {\n\tif n > 8 {\n\t\tpanic(\"Too many bits for Uint8\")\n\t}\n\n\tvar vl uint8\n\tfor n > 0 {\n\t\tif r.n == 0 {\n\t\t\tif err := r.nextByte(); err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\n\t\tm := r.n\n\t\tif r.n >= n {\n\t\t\tm = n\n\t\t}\n\n\t\tshift := r.n - m\n\t\tb := (r.b >> shift) & mask[m]\n\t\tvl = (vl << m) | b\n\n\t\tn -= m\n\t\tr.n -= m\n\t}\n\n\treturn vl, nil\n}\n\nfunc (r *Reader) nextByte() error {\n\tvar b [1]uint8\n\tif _, err := io.ReadFull(r.in, b[:]); err != nil {\n\t\treturn err\n\t}\n\tr.b = b[0]\n\tr.n = 8\n\treturn nil\n}\n\n\/\/ Uint64 reads and return n bits, up to 64. It panicks if n is greater than 64.\nfunc (r *Reader) Uint64(n uint) (uint64, error) {\n\tif n > 64 {\n\t\tpanic(\"Too many bits for Uint64\")\n\t}\n\tvar vl uint64\n\tfor n > 0 {\n\t\tm := n\n\t\tif n > 8 {\n\t\t\tm = 8\n\t\t}\n\t\tb, err := r.Uint8(m)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tvl = (vl << m) | uint64(b)\n\t\tn -= m\n\t}\n\treturn vl, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package bob\n\nimport (\n\t\"github.com\/rafecolton\/bob\/dclient\"\n\t\"github.com\/rafecolton\/bob\/log\"\n\t\"github.com\/rafecolton\/bob\/parser\"\n)\n\nimport (\n\t\"github.com\/deckarep\/golang-set\"\n\t\"github.com\/onsi\/gocleanup\"\n)\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n)\n\n\/*\nA Builder is the struct that actually does the work of moving files around and\nexecuting the commands that do the docker build.\n*\/\ntype Builder struct {\n\tdockerClient dclient.DockerClient\n\tlog.Logger\n\tworkdir string\n\tisRegular bool\n\tnextSubSequence *parser.SubSequence\n\tStderr io.Writer\n\tStdout io.Writer\n\tBuilderfile string\n}\n\n\/*\nSetNextSubSequence sets the next subsequence within bob to be processed. This\nfunction is exported because it is used explicitly in tests, but in Build(), it\nis intended to be used as a helper function.\n*\/\nfunc (bob *Builder) SetNextSubSequence(subSeq *parser.SubSequence) {\n\tbob.nextSubSequence = subSeq\n}\n\n\/*\nNewBuilder returns an instance of a Builder struct. The function exists in\ncase we want to initialize our Builders with something.\n*\/\nfunc NewBuilder(logger log.Logger, shouldBeRegular bool) *Builder {\n\tif logger == nil {\n\t\tlogger = &log.NullLogger{}\n\t}\n\n\tclient, err := dclient.NewDockerClient(logger, shouldBeRegular)\n\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn &Builder{\n\t\tdockerClient: client,\n\t\tLogger: logger,\n\t\tisRegular: shouldBeRegular,\n\t\tStdout: log.NewOutWriter(logger, \"@{!w} -----> @{g}%s@{|}\"),\n\t\tStderr: log.NewOutWriter(logger, \"@{!w} -----> @{r}%s@{|}\"),\n\t}\n}\n\n\/*\nBuild is currently a placeholder function but will eventually be used to do the\nactual work of building.\n*\/\nfunc (bob *Builder) Build(commandSequence *parser.CommandSequence) error {\n\tfor _, seq := range commandSequence.Commands {\n\t\tbob.CleanWorkdir()\n\t\tbob.SetNextSubSequence(seq)\n\t\tbob.Setup()\n\n\t\tworkdir := bob.Workdir()\n\n\t\tbob.Printf(\"Running commands for \\\"%s\\\"\\n\", seq.Metadata.Name)\n\n\t\tvar imageID string\n\t\tvar err error\n\n\t\tfor _, cmd := range seq.SubCommand {\n\t\t\tcmd.Stdout = bob.Stdout\n\t\t\tcmd.Stderr = bob.Stderr\n\t\t\tcmd.Dir = workdir\n\n\t\t\tif cmd.Path == \"docker\" {\n\t\t\t\tpath, err := exec.LookPath(\"docker\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tcmd.Path = path\n\t\t\t}\n\n\t\t\tswitch cmd.Args[1] {\n\t\t\tcase \"build\":\n\t\t\t\tbob.Printf(\"running command %s\\n\", cmd.Args)\n\n\t\t\t\tif err := cmd.Run(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\timageID, err = bob.LatestImageTaggedWithUUID(seq.Metadata.UUID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tcase \"tag\":\n\t\t\t\tcmd.Args[2] = imageID\n\t\t\t\tbob.Printf(\"running command %s\\n\", cmd.Args)\n\n\t\t\t\tif err := cmd.Run(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tcase \"push\":\n\t\t\t\tbob.Printf(\"running command %s\\n\", cmd.Args)\n\n\t\t\t\tif err := cmd.Run(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"oops, looks like the command you're asking me to run is improperly formed: %s\\n\", cmd.Args)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/*\nSetup moves all of the correct files into place in the temporary directory in\norder to perform the docker build.\n*\/\nfunc (bob *Builder) Setup() error {\n\tif bob.nextSubSequence == nil {\n\t\treturn errors.New(\"no command sub sequence set, cannot perform setup\")\n\t}\n\n\tmeta := bob.nextSubSequence.Metadata\n\tfileSet := mapset.NewSet()\n\n\tif len(meta.Included) == 0 {\n\t\tfiles, err := ioutil.ReadDir(bob.Repodir())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, v := range files {\n\t\t\tfileSet.Add(v.Name())\n\t\t}\n\t} else {\n\t\tfor _, v := range meta.Included {\n\t\t\tfileSet.Add(v)\n\t\t}\n\t}\n\n\t\/\/ subtract any excludes from fileSet\n\tfor _, exclude := range meta.Excluded {\n\t\tif fileSet.Contains(exclude) {\n\t\t\tfileSet.Remove(exclude)\n\t\t}\n\t}\n\n\tif fileSet.Contains(\"Dockerfile\") {\n\t\tfileSet.Remove(\"Dockerfile\")\n\t}\n\n\t\/\/ add the Dockerfile\n\tfileSet.Add(meta.Dockerfile)\n\n\tworkdir := bob.Workdir()\n\trepodir := bob.Repodir()\n\n\t\/\/ copy the actual files over\n\tfor file := range fileSet.Iter() {\n\t\tsrc := fmt.Sprintf(\"%s\/%s\", repodir, file)\n\t\tdest := fmt.Sprintf(\"%s\/%s\", workdir, file)\n\n\t\tif file == meta.Dockerfile {\n\t\t\tdest = fmt.Sprintf(\"%s\/%s\", workdir, \"Dockerfile\")\n\t\t}\n\n\t\tfileInfo, err := os.Stat(src)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif fileInfo.IsDir() {\n\t\t\terr = CopyDir(src, dest)\n\t\t} else {\n\t\t\terr = CopyFile(src, dest)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/*\nRepodir is the dir from which we are using files for our docker builds.\n*\/\nfunc (bob *Builder) Repodir() string {\n\tif !bob.isRegular {\n\t\trepoDir := \"spec\/fixtures\/repodir\"\n\t\treturn fmt.Sprintf(\"%s\/%s\", os.ExpandEnv(\"${PWD}\"), repoDir)\n\t}\n\treturn filepath.Dir(bob.Builderfile)\n}\n\n\/*\nWorkdir returns bob's working directory.\n*\/\nfunc (bob *Builder) Workdir() string {\n\treturn bob.workdir\n}\n\nfunc (bob *Builder) generateWorkDir() string {\n\tif !bob.isRegular {\n\t\tspecWorkdir := \"spec\/fixtures\/workdir\"\n\t\treturn fmt.Sprintf(\"%s\/%s\", os.ExpandEnv(\"${PWD}\"), specWorkdir)\n\t}\n\n\ttmp, err := ioutil.TempDir(\"\", \"bob\")\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\tgocleanup.Register(func() {\n\t\tos.RemoveAll(tmp)\n\t})\n\n\treturn tmp\n}\n\n\/*\nCleanWorkdir effectively does a rm -rf and mkdir -p on bob's workdir. Intended\nto be used before using the workdir (i.e. before new command groups).\n*\/\nfunc (bob *Builder) CleanWorkdir() error {\n\tworkdir := bob.generateWorkDir()\n\tbob.workdir = workdir\n\n\tif !bob.isRegular {\n\t\treadme := fmt.Sprintf(\"%s\/README.txt\", workdir)\n\t\tos.RemoveAll(workdir)\n\t\terr := os.MkdirAll(workdir, 0755)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfile, err := os.Create(readme)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer file.Close()\n\n\t\tbytes := []byte(\"This directory tree is used for specs - please do not modify.\\n\")\n\t\tif _, err := file.Write(bytes); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tos.RemoveAll(workdir)\n\terr := os.MkdirAll(workdir, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/*\nLatestImageTaggedWithUUID accepts a uuid and invokes the underlying utility\nDockerClient to determine the id of the most recently created image tagged with\nthe provided uuid.\n*\/\nfunc (bob *Builder) LatestImageTaggedWithUUID(uuid string) (string, error) {\n\tid, err := bob.dockerClient.LatestImageTaggedWithUUID(uuid)\n\tif err != nil {\n\t\tbob.Println(err)\n\t\treturn \"\", err\n\t}\n\n\treturn id, nil\n}\n<commit_msg>[TRIVIAL] adjusting logging output format<commit_after>package bob\n\nimport (\n\t\"github.com\/rafecolton\/bob\/dclient\"\n\t\"github.com\/rafecolton\/bob\/log\"\n\t\"github.com\/rafecolton\/bob\/parser\"\n)\n\nimport (\n\t\"github.com\/deckarep\/golang-set\"\n\t\"github.com\/onsi\/gocleanup\"\n)\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n)\n\n\/*\nA Builder is the struct that actually does the work of moving files around and\nexecuting the commands that do the docker build.\n*\/\ntype Builder struct {\n\tdockerClient dclient.DockerClient\n\tlog.Logger\n\tworkdir string\n\tisRegular bool\n\tnextSubSequence *parser.SubSequence\n\tStderr io.Writer\n\tStdout io.Writer\n\tBuilderfile string\n}\n\n\/*\nSetNextSubSequence sets the next subsequence within bob to be processed. This\nfunction is exported because it is used explicitly in tests, but in Build(), it\nis intended to be used as a helper function.\n*\/\nfunc (bob *Builder) SetNextSubSequence(subSeq *parser.SubSequence) {\n\tbob.nextSubSequence = subSeq\n}\n\n\/*\nNewBuilder returns an instance of a Builder struct. The function exists in\ncase we want to initialize our Builders with something.\n*\/\nfunc NewBuilder(logger log.Logger, shouldBeRegular bool) *Builder {\n\tif logger == nil {\n\t\tlogger = &log.NullLogger{}\n\t}\n\n\tclient, err := dclient.NewDockerClient(logger, shouldBeRegular)\n\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn &Builder{\n\t\tdockerClient: client,\n\t\tLogger: logger,\n\t\tisRegular: shouldBeRegular,\n\t\tStdout: log.NewOutWriter(logger, \"@{!w} -----> @{g}%s@{|}\"),\n\t\tStderr: log.NewOutWriter(logger, \"@{!w} -----> @{r}%s@{|}\"),\n\t}\n}\n\n\/*\nBuild is currently a placeholder function but will eventually be used to do the\nactual work of building.\n*\/\nfunc (bob *Builder) Build(commandSequence *parser.CommandSequence) error {\n\tfor _, seq := range commandSequence.Commands {\n\t\tbob.CleanWorkdir()\n\t\tbob.SetNextSubSequence(seq)\n\t\tbob.Setup()\n\n\t\tworkdir := bob.Workdir()\n\n\t\tbob.Printf(\"Running commands for \\\"%s\\\"\\n\", seq.Metadata.Name)\n\n\t\tvar imageID string\n\t\tvar err error\n\n\t\tfor _, cmd := range seq.SubCommand {\n\t\t\tcmd.Stdout = bob.Stdout\n\t\t\tcmd.Stderr = bob.Stderr\n\t\t\tcmd.Dir = workdir\n\n\t\t\tif cmd.Path == \"docker\" {\n\t\t\t\tpath, err := exec.LookPath(\"docker\")\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tcmd.Path = path\n\t\t\t}\n\n\t\t\tswitch cmd.Args[1] {\n\t\t\tcase \"build\":\n\t\t\t\tbob.Printf(\"running command %s\\n\", cmd.Args)\n\n\t\t\t\tif err := cmd.Run(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\timageID, err = bob.LatestImageTaggedWithUUID(seq.Metadata.UUID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tcase \"tag\":\n\t\t\t\tcmd.Args[2] = imageID\n\t\t\t\tbob.Printf(\"running command %s\\n\", cmd.Args)\n\n\t\t\t\tif err := cmd.Run(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tcase \"push\":\n\t\t\t\tbob.Printf(\"running command %s\\n\", cmd.Args)\n\n\t\t\t\tif err := cmd.Run(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\"oops, looks like the command you're asking me to run is improperly formed: %s\\n\", cmd.Args)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/*\nSetup moves all of the correct files into place in the temporary directory in\norder to perform the docker build.\n*\/\nfunc (bob *Builder) Setup() error {\n\tif bob.nextSubSequence == nil {\n\t\treturn errors.New(\"no command sub sequence set, cannot perform setup\")\n\t}\n\n\tmeta := bob.nextSubSequence.Metadata\n\tfileSet := mapset.NewSet()\n\n\tif len(meta.Included) == 0 {\n\t\tfiles, err := ioutil.ReadDir(bob.Repodir())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, v := range files {\n\t\t\tfileSet.Add(v.Name())\n\t\t}\n\t} else {\n\t\tfor _, v := range meta.Included {\n\t\t\tfileSet.Add(v)\n\t\t}\n\t}\n\n\t\/\/ subtract any excludes from fileSet\n\tfor _, exclude := range meta.Excluded {\n\t\tif fileSet.Contains(exclude) {\n\t\t\tfileSet.Remove(exclude)\n\t\t}\n\t}\n\n\tif fileSet.Contains(\"Dockerfile\") {\n\t\tfileSet.Remove(\"Dockerfile\")\n\t}\n\n\t\/\/ add the Dockerfile\n\tfileSet.Add(meta.Dockerfile)\n\n\tworkdir := bob.Workdir()\n\trepodir := bob.Repodir()\n\n\t\/\/ copy the actual files over\n\tfor file := range fileSet.Iter() {\n\t\tsrc := fmt.Sprintf(\"%s\/%s\", repodir, file)\n\t\tdest := fmt.Sprintf(\"%s\/%s\", workdir, file)\n\n\t\tif file == meta.Dockerfile {\n\t\t\tdest = fmt.Sprintf(\"%s\/%s\", workdir, \"Dockerfile\")\n\t\t}\n\n\t\tfileInfo, err := os.Stat(src)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif fileInfo.IsDir() {\n\t\t\terr = CopyDir(src, dest)\n\t\t} else {\n\t\t\terr = CopyFile(src, dest)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/*\nRepodir is the dir from which we are using files for our docker builds.\n*\/\nfunc (bob *Builder) Repodir() string {\n\tif !bob.isRegular {\n\t\trepoDir := \"spec\/fixtures\/repodir\"\n\t\treturn fmt.Sprintf(\"%s\/%s\", os.ExpandEnv(\"${PWD}\"), repoDir)\n\t}\n\treturn filepath.Dir(bob.Builderfile)\n}\n\n\/*\nWorkdir returns bob's working directory.\n*\/\nfunc (bob *Builder) Workdir() string {\n\treturn bob.workdir\n}\n\nfunc (bob *Builder) generateWorkDir() string {\n\tif !bob.isRegular {\n\t\tspecWorkdir := \"spec\/fixtures\/workdir\"\n\t\treturn fmt.Sprintf(\"%s\/%s\", os.ExpandEnv(\"${PWD}\"), specWorkdir)\n\t}\n\n\ttmp, err := ioutil.TempDir(\"\", \"bob\")\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\tgocleanup.Register(func() {\n\t\tos.RemoveAll(tmp)\n\t})\n\n\treturn tmp\n}\n\n\/*\nCleanWorkdir effectively does a rm -rf and mkdir -p on bob's workdir. Intended\nto be used before using the workdir (i.e. before new command groups).\n*\/\nfunc (bob *Builder) CleanWorkdir() error {\n\tworkdir := bob.generateWorkDir()\n\tbob.workdir = workdir\n\n\tif !bob.isRegular {\n\t\treadme := fmt.Sprintf(\"%s\/README.txt\", workdir)\n\t\tos.RemoveAll(workdir)\n\t\terr := os.MkdirAll(workdir, 0755)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfile, err := os.Create(readme)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer file.Close()\n\n\t\tbytes := []byte(\"This directory tree is used for specs - please do not modify.\\n\")\n\t\tif _, err := file.Write(bytes); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tos.RemoveAll(workdir)\n\terr := os.MkdirAll(workdir, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/*\nLatestImageTaggedWithUUID accepts a uuid and invokes the underlying utility\nDockerClient to determine the id of the most recently created image tagged with\nthe provided uuid.\n*\/\nfunc (bob *Builder) LatestImageTaggedWithUUID(uuid string) (string, error) {\n\tid, err := bob.dockerClient.LatestImageTaggedWithUUID(uuid)\n\tif err != nil {\n\t\tbob.Println(err)\n\t\treturn \"\", err\n\t}\n\n\treturn id, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/go-telegram-bot-api\/telegram-bot-api\"\n)\n\nvar bot *tgbotapi.BotAPI\n\nfunc botRun() error {\n\tvar err error\n\tbot, err = tgbotapi.NewBotAPI(cfg.Bot.Token)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbot.Debug = cfg.Bot.Debug\n\n\tlog.Printf(\"Authorized on account %s\", bot.Self.UserName)\n\n\t_, err = bot.SetWebhook(tgbotapi.NewWebhookWithCert(fmt.Sprintf(\"%s%s\/%s\", cfg.HTTP.Host, cfg.HTTP.Port, cfg.Bot.Token), cfg.HTTP.PublicKey))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tupdates := bot.ListenForWebhook(fmt.Sprintf(\"\/%s\", bot.Token))\n\tgo func() {\n\t\tif err := http.ListenAndServeTLS(cfg.HTTP.Port, cfg.HTTP.PublicKey, cfg.HTTP.PrivateKey, nil); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\tfor update := range updates {\n\t\tmsgRouter(update)\n\t}\n\treturn nil\n}\n\nfunc msgRouter(update tgbotapi.Update) error {\n\tswitch {\n\tcase update.Message.IsCommand():\n\t\treturn isCommand(update)\n\tcase update.InlineQuery != nil:\n\t\treturn isInline(update)\n\tcase update.Message.Chat.IsPrivate() || bot.IsMessageToMe(*update.Message):\n\t\treturn isMessage(update)\n\t}\n\treturn nil\n}\n\nfunc isCommand(update tgbotapi.Update) error {\n\tmsg := strings.Trim(update.Message.CommandArguments(), \" \")\n\n\tswitch update.Message.Command() {\n\tcase \"s\":\n\t\tif msg == \"\" {\n\t\t\treturn sendMsg(update, HelpMsg)\n\t\t}\n\t\ttxt, err := search(msg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn sendMsg(update, txt)\n\tcase \"daily\":\n\t\treturn isDaily(update)\n\tdefault:\n\t\treturn sendMsg(update, HelpMsg)\n\t}\n\treturn nil\n}\n\nfunc isMessage(update tgbotapi.Update) error {\n\ttxt, err := search(update.Message.Text)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn sendMsg(update, txt)\n}\n\nfunc isInline(update tgbotapi.Update) error {\n\tfmt.Println(update.InlineQuery)\n\treturn nil\n}\n\nfunc isDaily(update tgbotapi.Update) error {\n\ttxt, err := daily()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn sendMsg(update, txt)\n}\n\nfunc sendMsg(update tgbotapi.Update, txt string) error {\n\tmsg := tgbotapi.NewMessage(update.Message.Chat.ID, txt)\n\tmsg.ParseMode = \"HTML\"\n\tmsg.DisableWebPagePreview = true\n\tif _, err := bot.Send(msg); err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>update<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/go-telegram-bot-api\/telegram-bot-api\"\n)\n\nvar bot *tgbotapi.BotAPI\n\nfunc botRun() error {\n\tvar err error\n\tbot, err = tgbotapi.NewBotAPI(cfg.Bot.Token)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbot.Debug = cfg.Bot.Debug\n\n\tlog.Printf(\"Authorized on account %s\", bot.Self.UserName)\n\n\t_, err = bot.SetWebhook(tgbotapi.NewWebhookWithCert(fmt.Sprintf(\"%s%s\/%s\", cfg.HTTP.Host, cfg.HTTP.Port, cfg.Bot.Token), cfg.HTTP.PublicKey))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tupdates := bot.ListenForWebhook(fmt.Sprintf(\"\/%s\", bot.Token))\n\tgo func() {\n\t\tif err := http.ListenAndServeTLS(cfg.HTTP.Port, cfg.HTTP.PublicKey, cfg.HTTP.PrivateKey, nil); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\tfor update := range updates {\n\t\tmsgRouter(update)\n\t}\n\treturn nil\n}\n\nfunc msgRouter(update tgbotapi.Update) error {\n\tswitch {\n\tcase update.InlineQuery != nil:\n\t\treturn isInline(update)\n\tcase update.Message != nil && update.Message.IsCommand():\n\t\treturn isCommand(update)\n\tcase update.Message != nil && (update.Message.Chat.IsPrivate() || bot.IsMessageToMe(*update.Message)):\n\t\treturn isMessage(update)\n\t}\n\treturn nil\n}\n\nfunc isCommand(update tgbotapi.Update) error {\n\tmsg := strings.Trim(update.Message.CommandArguments(), \" \")\n\n\tswitch update.Message.Command() {\n\tcase \"s\":\n\t\tif msg == \"\" {\n\t\t\treturn sendMsg(update, HelpMsg)\n\t\t}\n\t\ttxt, err := search(msg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn sendMsg(update, txt)\n\tcase \"daily\":\n\t\treturn isDaily(update)\n\tdefault:\n\t\treturn sendMsg(update, HelpMsg)\n\t}\n\treturn nil\n}\n\nfunc isMessage(update tgbotapi.Update) error {\n\ttxt, err := search(update.Message.Text)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn sendMsg(update, txt)\n}\n\nfunc isInline(update tgbotapi.Update) error {\n\tfmt.Println(update.InlineQuery)\n\treturn nil\n}\n\nfunc isDaily(update tgbotapi.Update) error {\n\ttxt, err := daily()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn sendMsg(update, txt)\n}\n\nfunc sendMsg(update tgbotapi.Update, txt string) error {\n\tmsg := tgbotapi.NewMessage(update.Message.Chat.ID, txt)\n\tmsg.ParseMode = \"HTML\"\n\tmsg.DisableWebPagePreview = true\n\tif _, err := bot.Send(msg); err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/textproto\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc (bot *Bot) reduceJoins() {\n\tbot.joins--\n}\n\n\/\/ Bot struct for main config\ntype Bot struct {\n\tserver string\n\tgroupserver string\n\tport string\n\tgroupport string\n\toauth string\n\tnick string\n\tinconn net.Conn\n\tmainconn net.Conn\n\tconnlist []Connection\n\tconnactive bool\n\tgroupconn net.Conn\n\tgroupconnactive bool\n\tjoins int\n\ttoJoin []string\n}\n\n\/\/ NewBot main config\nfunc NewBot() *Bot {\n\treturn &Bot{\n\t\tserver: \"irc.chat.twitch.tv\",\n\t\tgroupserver: \"group.tmi.twitch.tv\",\n\t\tport: \"80\",\n\t\tgroupport: \"443\",\n\t\toauth: \"\",\n\t\tnick: \"\",\n\t\tinconn: nil,\n\t\tmainconn: nil,\n\t\tconnlist: make([]Connection, 0),\n\t\tconnactive: false,\n\t\tgroupconn: nil,\n\t\tgroupconnactive: false,\n\t\tjoins: 0,\n\t}\n}\n\nfunc (bot *Bot) join(channel string) {\n\tfor !bot.connactive {\n\t\tlog.Printf(\"chat connection not active yet\")\n\t\ttime.Sleep(time.Second)\n\t}\n\n\tif bot.joins < 45 {\n\t\tfmt.Fprintf(bot.mainconn, \"JOIN %s\\r\\n\", channel)\n\t\tlog.Printf(\"[chat] joined %s\", channel)\n\t\tbot.joins++\n\t\ttime.AfterFunc(10*time.Second, bot.reduceJoins)\n\t} else {\n\t\tlog.Printf(\"[chat] in queue to join %s\", channel)\n\t\ttime.Sleep(time.Second)\n\t\tbot.join(channel)\n\t}\n}\n\nfunc (bot *Bot) part(channel string) {\n\tfor !bot.connactive {\n\t\tlog.Printf(\"chat connection not active yet\")\n\t\ttime.Sleep(time.Second)\n\t}\n\tfmt.Fprintf(bot.mainconn, \"PART %s\\r\\n\", channel)\n\tlog.Printf(\"[chat] parted %s\", channel)\n}\n\n\/\/ ListenToConnection listen\nfunc (bot *Bot) ListenToConnection(conn net.Conn) {\n\treader := bufio.NewReader(conn)\n\ttp := textproto.NewReader(reader)\n\tfor {\n\t\tline, err := tp.ReadLine()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error reading from chat connection: %s\", err)\n\t\t\tbot.CreateConnection()\n\t\t\tbreak \/\/ break loop on errors\n\t\t}\n\t\tif strings.Contains(line, \"tmi.twitch.tv 001\") {\n\t\t\tbot.connactive = true\n\t\t}\n\t\tif strings.Contains(line, \"PING \") {\n\t\t\tfmt.Fprintf(conn, \"PONG tmi.twitch.tv\\r\\n\")\n\t\t}\n\t\tfmt.Fprintf(bot.inconn, line+\"\\r\\n\")\n\t}\n}\n\n\/\/ ListenToGroupConnection validate connection is running and listen to it\nfunc (bot *Bot) ListenToGroupConnection(conn net.Conn) {\n\treader := bufio.NewReader(conn)\n\ttp := textproto.NewReader(reader)\n\tfor {\n\t\tline, err := tp.ReadLine()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error reading from group connection: %s\", err)\n\t\t\tbot.CreateGroupConnection()\n\t\t\tbreak\n\t\t}\n\t\tif strings.Contains(line, \"tmi.twitch.tv 001\") {\n\t\t\tbot.groupconnactive = true\n\t\t}\n\t\tif strings.Contains(line, \"PING \") {\n\t\t\tfmt.Fprintf(conn, \"PONG tmi.twitch.tv\\r\\n\")\n\t\t}\n\t\tfmt.Fprintf(bot.inconn, line+\"\\r\\n\")\n\t}\n}\n\n\/\/ CreateConnection Add a new connection\nfunc (bot *Bot) CreateConnection() {\n\tconn, err := net.Dial(\"tcp\", bot.server+\":\"+bot.port)\n\tif err != nil {\n\t\tlog.Println(\"unable to connect to chat IRC server \", err)\n\t\tbot.CreateConnection()\n\t\treturn\n\t}\n\tfmt.Fprintf(conn, \"PASS %s\\r\\n\", bot.oauth)\n\tfmt.Fprintf(conn, \"USER %s\\r\\n\", bot.nick)\n\tfmt.Fprintf(conn, \"NICK %s\\r\\n\", bot.nick)\n\tfmt.Fprintf(conn, \"CAP REQ :twitch.tv\/tags\\r\\n\") \/\/ enable ircv3 tags\n\tfmt.Fprintf(conn, \"CAP REQ :twitch.tv\/commands\\r\\n\") \/\/ enable roomstate and such\n\tlog.Printf(\"new connection to chat IRC server %s (%s)\\n\", bot.server, conn.RemoteAddr())\n\n\tconnnection := NewConnection(conn)\n\tbot.connlist = append(bot.connlist, connnection)\n\n\tif len(bot.connlist) == 1 {\n\t\tbot.mainconn = conn\n\t}\n\n\tgo bot.ListenToConnection(conn)\n\n}\n\n\/\/ CreateGroupConnection creates connection to recevie and send whispers\nfunc (bot *Bot) CreateGroupConnection() {\n\tconn, err := net.Dial(\"tcp\", bot.groupserver+\":\"+bot.groupport)\n\tif err != nil {\n\t\tlog.Println(\"unable to connect to group IRC server \", err)\n\t\tbot.CreateGroupConnection()\n\t\treturn\n\t}\n\tfmt.Fprintf(conn, \"PASS %s\\r\\n\", bot.oauth)\n\tfmt.Fprintf(conn, \"USER %s\\r\\n\", bot.nick)\n\tfmt.Fprintf(conn, \"NICK %s\\r\\n\", bot.nick)\n\tfmt.Fprintf(conn, \"CAP REQ :twitch.tv\/tags\\r\\n\") \/\/ enable ircv3 tags\n\tfmt.Fprintf(conn, \"CAP REQ :twitch.tv\/commands\\r\\n\") \/\/ enable roomstate and such\n\tlog.Printf(\"new connection to group IRC server %s (%s)\\n\", bot.groupserver, conn.RemoteAddr())\n\n\tbot.groupconn = conn\n\n\tgo bot.ListenToGroupConnection(conn)\n}\n\n\/\/ shuffle simple array shuffle functino\nfunc shuffle(a []Connection) {\n\tfor i := range a {\n\t\tj := rand.Intn(i + 1)\n\t\ta[i], a[j] = a[j], a[i]\n\t}\n}\n\n\/\/ Message to send a message\nfunc (bot *Bot) Message(message string) {\n\tmessage = strings.TrimSpace(message)\n\tfor !bot.connactive {\n\t\t\/\/ wait for connection to become active\n\t}\n\tshuffle(bot.connlist)\n\tfor i := 0; i < len(bot.connlist); i++ {\n\t\tif bot.connlist[i].messages < 90 {\n\t\t\tbot.connlist[i].Message(message)\n\t\t\treturn\n\t\t}\n\t}\n\t\/\/ open new connection when others too full\n\tlog.Printf(\"opened new connection, total: %d\", len(bot.connlist))\n\tbot.CreateConnection()\n\tbot.Message(message)\n}\n\n\/\/ Whisper to send whispers\nfunc (bot *Bot) Whisper(message string) {\n\tfor !bot.groupconnactive {\n\t\t\/\/ wait for connection to become active\n\t}\n\tfmt.Fprintf(bot.groupconn, \"PRIVMSG #jtv :\"+message+\"\\r\\n\")\n\tlog.Printf(message)\n}\n<commit_msg>styling fix<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/textproto\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc (bot *Bot) reduceJoins() {\n\tbot.joins--\n}\n\n\/\/ Bot struct for main config\ntype Bot struct {\n\tserver string\n\tgroupserver string\n\tport string\n\tgroupport string\n\toauth string\n\tnick string\n\tinconn net.Conn\n\tmainconn net.Conn\n\tconnlist []Connection\n\tconnactive bool\n\tgroupconn net.Conn\n\tgroupconnactive bool\n\tjoins int\n\ttoJoin []string\n}\n\n\/\/ NewBot main config\nfunc NewBot() *Bot {\n\treturn &Bot{\n\t\tserver: \"irc.chat.twitch.tv\",\n\t\tgroupserver: \"group.tmi.twitch.tv\",\n\t\tport: \"80\",\n\t\tgroupport: \"443\",\n\t\toauth: \"\",\n\t\tnick: \"\",\n\t\tinconn: nil,\n\t\tmainconn: nil,\n\t\tconnlist: make([]Connection, 0),\n\t\tconnactive: false,\n\t\tgroupconn: nil,\n\t\tgroupconnactive: false,\n\t\tjoins: 0,\n\t}\n}\n\nfunc (bot *Bot) join(channel string) {\n\tfor !bot.connactive {\n\t\tlog.Printf(\"chat connection not active yet\")\n\t\ttime.Sleep(time.Second)\n\t}\n\n\tif bot.joins < 45 {\n\t\tfmt.Fprintf(bot.mainconn, \"JOIN %s\\r\\n\", channel)\n\t\tlog.Printf(\"[chat] joined %s\", channel)\n\t\tbot.joins++\n\t\ttime.AfterFunc(10*time.Second, bot.reduceJoins)\n\t} else {\n\t\tlog.Printf(\"[chat] in queue to join %s\", channel)\n\t\ttime.Sleep(time.Second)\n\t\tbot.join(channel)\n\t}\n}\n\nfunc (bot *Bot) part(channel string) {\n\tfor !bot.connactive {\n\t\tlog.Printf(\"chat connection not active yet\")\n\t\ttime.Sleep(time.Second)\n\t}\n\tfmt.Fprintf(bot.mainconn, \"PART %s\\r\\n\", channel)\n\tlog.Printf(\"[chat] parted %s\", channel)\n}\n\n\/\/ ListenToConnection listen\nfunc (bot *Bot) ListenToConnection(conn net.Conn) {\n\treader := bufio.NewReader(conn)\n\ttp := textproto.NewReader(reader)\n\tfor {\n\t\tline, err := tp.ReadLine()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error reading from chat connection: %s\", err)\n\t\t\tbot.CreateConnection()\n\t\t\tbreak \/\/ break loop on errors\n\t\t}\n\t\tif strings.Contains(line, \"tmi.twitch.tv 001\") {\n\t\t\tbot.connactive = true\n\t\t}\n\t\tif strings.Contains(line, \"PING \") {\n\t\t\tfmt.Fprintf(conn, \"PONG tmi.twitch.tv\\r\\n\")\n\t\t}\n\t\tfmt.Fprintf(bot.inconn, line+\"\\r\\n\")\n\t}\n}\n\n\/\/ ListenToGroupConnection validate connection is running and listen to it\nfunc (bot *Bot) ListenToGroupConnection(conn net.Conn) {\n\treader := bufio.NewReader(conn)\n\ttp := textproto.NewReader(reader)\n\tfor {\n\t\tline, err := tp.ReadLine()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error reading from group connection: %s\", err)\n\t\t\tbot.CreateGroupConnection()\n\t\t\tbreak\n\t\t}\n\t\tif strings.Contains(line, \"tmi.twitch.tv 001\") {\n\t\t\tbot.groupconnactive = true\n\t\t}\n\t\tif strings.Contains(line, \"PING \") {\n\t\t\tfmt.Fprintf(conn, \"PONG tmi.twitch.tv\\r\\n\")\n\t\t}\n\t\tfmt.Fprintf(bot.inconn, line+\"\\r\\n\")\n\t}\n}\n\n\/\/ CreateConnection Add a new connection\nfunc (bot *Bot) CreateConnection() {\n\tconn, err := net.Dial(\"tcp\", bot.server+\":\"+bot.port)\n\tif err != nil {\n\t\tlog.Println(\"unable to connect to chat IRC server \", err)\n\t\tbot.CreateConnection()\n\t\treturn\n\t}\n\n\tfmt.Fprintf(conn, \"PASS %s\\r\\n\", bot.oauth)\n\tfmt.Fprintf(conn, \"USER %s\\r\\n\", bot.nick)\n\tfmt.Fprintf(conn, \"NICK %s\\r\\n\", bot.nick)\n\tfmt.Fprintf(conn, \"CAP REQ :twitch.tv\/tags\\r\\n\") \/\/ enable ircv3 tags\n\tfmt.Fprintf(conn, \"CAP REQ :twitch.tv\/commands\\r\\n\") \/\/ enable roomstate and such\n\tlog.Printf(\"new connection to chat IRC server %s (%s)\\n\", bot.server, conn.RemoteAddr())\n\n\tconnnection := NewConnection(conn)\n\tbot.connlist = append(bot.connlist, connnection)\n\n\tif len(bot.connlist) == 1 {\n\t\tbot.mainconn = conn\n\t}\n\n\tgo bot.ListenToConnection(conn)\n}\n\n\/\/ CreateGroupConnection creates connection to recevie and send whispers\nfunc (bot *Bot) CreateGroupConnection() {\n\tconn, err := net.Dial(\"tcp\", bot.groupserver+\":\"+bot.groupport)\n\tif err != nil {\n\t\tlog.Println(\"unable to connect to group IRC server \", err)\n\t\tbot.CreateGroupConnection()\n\t\treturn\n\t}\n\tfmt.Fprintf(conn, \"PASS %s\\r\\n\", bot.oauth)\n\tfmt.Fprintf(conn, \"USER %s\\r\\n\", bot.nick)\n\tfmt.Fprintf(conn, \"NICK %s\\r\\n\", bot.nick)\n\tfmt.Fprintf(conn, \"CAP REQ :twitch.tv\/tags\\r\\n\") \/\/ enable ircv3 tags\n\tfmt.Fprintf(conn, \"CAP REQ :twitch.tv\/commands\\r\\n\") \/\/ enable roomstate and such\n\tlog.Printf(\"new connection to group IRC server %s (%s)\\n\", bot.groupserver, conn.RemoteAddr())\n\n\tbot.groupconn = conn\n\n\tgo bot.ListenToGroupConnection(conn)\n}\n\n\/\/ shuffle simple array shuffle functino\nfunc shuffle(a []Connection) {\n\tfor i := range a {\n\t\tj := rand.Intn(i + 1)\n\t\ta[i], a[j] = a[j], a[i]\n\t}\n}\n\n\/\/ Message to send a message\nfunc (bot *Bot) Message(message string) {\n\tmessage = strings.TrimSpace(message)\n\tfor !bot.connactive {\n\t\t\/\/ wait for connection to become active\n\t}\n\tshuffle(bot.connlist)\n\tfor i := 0; i < len(bot.connlist); i++ {\n\t\tif bot.connlist[i].messages < 90 {\n\t\t\tbot.connlist[i].Message(message)\n\t\t\treturn\n\t\t}\n\t}\n\t\/\/ open new connection when others too full\n\tlog.Printf(\"opened new connection, total: %d\", len(bot.connlist))\n\tbot.CreateConnection()\n\tbot.Message(message)\n}\n\n\/\/ Whisper to send whispers\nfunc (bot *Bot) Whisper(message string) {\n\tfor !bot.groupconnactive {\n\t\t\/\/ wait for connection to become active\n\t}\n\tfmt.Fprintf(bot.groupconn, \"PRIVMSG #jtv :\"+message+\"\\r\\n\")\n\tlog.Printf(message)\n}\n<|endoftext|>"} {"text":"<commit_before>package instructions\n\nimport (\n\t\"jvmgo\/jvm\/rtda\"\n\trtc \"jvmgo\/jvm\/rtda\/class\"\n\t\"jvmgo\/util\"\n)\n\n\/\/ Push item from run-time constant pool\ntype ldc struct{ Index8Instruction }\n\nfunc (self *ldc) Execute(frame *rtda.Frame) {\n\t_ldc(frame, self.index)\n}\n\n\/\/ Push item from run-time constant pool (wide index)\ntype ldc_w struct{ Index16Instruction }\n\nfunc (self *ldc_w) Execute(frame *rtda.Frame) {\n\t_ldc(frame, self.index)\n}\n\nfunc _ldc(frame *rtda.Frame, index uint) {\n\tstack := frame.OperandStack()\n\tcp := frame.Method().Class().ConstantPool()\n\tc := cp.GetConstant(index)\n\n\tswitch c.(type) {\n\tcase int32:\n\t\tstack.PushInt(c.(int32))\n\tcase float32:\n\t\tstack.PushFloat(c.(float32))\n\tcase *rtc.ConstantString:\n\t\tkString := c.(*rtc.ConstantString)\n\t\tif kString.JStr() == nil {\n\t\t\tstrObj := rtda.NewJString(kString.GoStr(), frame) \/\/ already interned\n\t\t\tkString.SetJStr(strObj)\n\t\t}\n\t\tstack.PushRef(kString.JStr())\n\tcase *rtc.ConstantClass: \/\/ todo\n\t\tkClass := c.(*rtc.ConstantClass)\n\t\tclassObj := kClass.Class().JClass()\n\t\tstack.PushRef(classObj)\n\tdefault:\n\t\t\/\/ todo\n\t\t\/\/ ref to MethodType or MethodHandle\n\t\tutil.Panicf(\"todo: ldc! %v\\n\", c)\n\t}\n}\n\n\/\/ Push long or double from run-time constant pool (wide index)\ntype ldc2_w struct{ Index16Instruction }\n\nfunc (self *ldc2_w) Execute(frame *rtda.Frame) {\n\tstack := frame.OperandStack()\n\tcp := frame.Method().Class().ConstantPool()\n\tc := cp.GetConstant(self.index)\n\n\tswitch c.(type) {\n\tcase int64:\n\t\tstack.PushLong(c.(int64))\n\tcase float64:\n\t\tstack.PushDouble(c.(float64))\n\t\t\/\/ todo\n\t}\n}\n<commit_msg>code refactor<commit_after>package instructions\n\nimport (\n\t\"jvmgo\/jvm\/rtda\"\n\trtc \"jvmgo\/jvm\/rtda\/class\"\n\t\"jvmgo\/util\"\n)\n\n\/\/ Push item from run-time constant pool\ntype ldc struct{ Index8Instruction }\n\nfunc (self *ldc) Execute(frame *rtda.Frame) {\n\t_ldc(frame, self.index)\n}\n\n\/\/ Push item from run-time constant pool (wide index)\ntype ldc_w struct{ Index16Instruction }\n\nfunc (self *ldc_w) Execute(frame *rtda.Frame) {\n\t_ldc(frame, self.index)\n}\n\nfunc _ldc(frame *rtda.Frame, index uint) {\n\tstack := frame.OperandStack()\n\tcp := frame.Method().Class().ConstantPool()\n\tc := cp.GetConstant(index)\n\n\tswitch c.(type) {\n\tcase int32:\n\t\tstack.PushInt(c.(int32))\n\tcase float32:\n\t\tstack.PushFloat(c.(float32))\n\tcase *rtc.ConstantString:\n\t\tkString := c.(*rtc.ConstantString)\n\t\tif kString.JStr() == nil {\n\t\t\tstrObj := rtda.NewJString(kString.GoStr(), frame) \/\/ already interned\n\t\t\tkString.SetJStr(strObj)\n\t\t}\n\t\tstack.PushRef(kString.JStr())\n\tcase *rtc.ConstantClass:\n\t\tkClass := c.(*rtc.ConstantClass)\n\t\tclassObj := kClass.Class().JClass()\n\t\tstack.PushRef(classObj)\n\tdefault:\n\t\t\/\/ todo\n\t\t\/\/ ref to MethodType or MethodHandle\n\t\tutil.Panicf(\"todo: ldc! %v\", c)\n\t}\n}\n\n\/\/ Push long or double from run-time constant pool (wide index)\ntype ldc2_w struct{ Index16Instruction }\n\nfunc (self *ldc2_w) Execute(frame *rtda.Frame) {\n\tstack := frame.OperandStack()\n\tcp := frame.Method().Class().ConstantPool()\n\tc := cp.GetConstant(self.index)\n\n\tswitch c.(type) {\n\tcase int64:\n\t\tstack.PushLong(c.(int64))\n\tcase float64:\n\t\tstack.PushDouble(c.(float64))\n\tdefault:\n\t\tutil.Panicf(\"ldc2_w! %v\", c)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage release\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"code.gitea.io\/gitea\/models\"\n\t\"code.gitea.io\/gitea\/modules\/git\"\n\t\"code.gitea.io\/gitea\/modules\/log\"\n\t\"code.gitea.io\/gitea\/modules\/notification\"\n\t\"code.gitea.io\/gitea\/modules\/repository\"\n\t\"code.gitea.io\/gitea\/modules\/storage\"\n\t\"code.gitea.io\/gitea\/modules\/timeutil\"\n)\n\nfunc createTag(gitRepo *git.Repository, rel *models.Release) error {\n\t\/\/ Only actual create when publish.\n\tif !rel.IsDraft {\n\t\tif !gitRepo.IsTagExist(rel.TagName) {\n\t\t\tcommit, err := gitRepo.GetCommit(rel.Target)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"GetCommit: %v\", err)\n\t\t\t}\n\n\t\t\t\/\/ Trim '--' prefix to prevent command line argument vulnerability.\n\t\t\trel.TagName = strings.TrimPrefix(rel.TagName, \"--\")\n\t\t\tif err = gitRepo.CreateTag(rel.TagName, commit.ID.String()); err != nil {\n\t\t\t\tif strings.Contains(err.Error(), \"is not a valid tag name\") {\n\t\t\t\t\treturn models.ErrInvalidTagName{\n\t\t\t\t\t\tTagName: rel.TagName,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t\trel.LowerTagName = strings.ToLower(rel.TagName)\n\t\t\t\/\/ Prepare Notify\n\t\t\tif err := rel.LoadAttributes(); err != nil {\n\t\t\t\tlog.Error(\"LoadAttributes: %v\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tnotification.NotifyPushCommits(\n\t\t\t\trel.Publisher, rel.Repo,\n\t\t\t\t&repository.PushUpdateOptions{\n\t\t\t\t\tRefFullName: git.TagPrefix + rel.TagName,\n\t\t\t\t\tOldCommitID: git.EmptySHA,\n\t\t\t\t\tNewCommitID: commit.ID.String(),\n\t\t\t\t}, repository.NewPushCommits())\n\t\t\tnotification.NotifyCreateRef(rel.Publisher, rel.Repo, \"tag\", git.TagPrefix+rel.TagName)\n\t\t\trel.CreatedUnix = timeutil.TimeStampNow()\n\t\t}\n\t\tcommit, err := gitRepo.GetTagCommit(rel.TagName)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"GetTagCommit: %v\", err)\n\t\t}\n\n\t\trel.Sha1 = commit.ID.String()\n\t\trel.NumCommits, err = commit.CommitsCount()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"CommitsCount: %v\", err)\n\t\t}\n\n\t\tu, err := models.GetUserByEmail(commit.Author.Email)\n\t\tif err == nil {\n\t\t\trel.PublisherID = u.ID\n\t\t}\n\n\t} else {\n\t\trel.CreatedUnix = timeutil.TimeStampNow()\n\t}\n\treturn nil\n}\n\n\/\/ CreateRelease creates a new release of repository.\nfunc CreateRelease(gitRepo *git.Repository, rel *models.Release, attachmentUUIDs []string) error {\n\tisExist, err := models.IsReleaseExist(rel.RepoID, rel.TagName)\n\tif err != nil {\n\t\treturn err\n\t} else if isExist {\n\t\treturn models.ErrReleaseAlreadyExist{\n\t\t\tTagName: rel.TagName,\n\t\t}\n\t}\n\n\tif err = createTag(gitRepo, rel); err != nil {\n\t\treturn err\n\t}\n\n\trel.LowerTagName = strings.ToLower(rel.TagName)\n\tif err = models.InsertRelease(rel); err != nil {\n\t\treturn err\n\t}\n\n\tif err = models.AddReleaseAttachments(rel.ID, attachmentUUIDs); err != nil {\n\t\treturn err\n\t}\n\n\tif !rel.IsDraft {\n\t\tnotification.NotifyNewRelease(rel)\n\t}\n\n\treturn nil\n}\n\n\/\/ UpdateReleaseOrCreatReleaseFromTag updates information of a release or create release from tag.\nfunc UpdateReleaseOrCreatReleaseFromTag(doer *models.User, gitRepo *git.Repository, rel *models.Release, attachmentUUIDs []string, isCreate bool) (err error) {\n\tif err = createTag(gitRepo, rel); err != nil {\n\t\treturn err\n\t}\n\trel.LowerTagName = strings.ToLower(rel.TagName)\n\n\tif err = models.UpdateRelease(models.DefaultDBContext(), rel); err != nil {\n\t\treturn err\n\t}\n\n\tif err = models.AddReleaseAttachments(rel.ID, attachmentUUIDs); err != nil {\n\t\tlog.Error(\"AddReleaseAttachments: %v\", err)\n\t}\n\n\tif !isCreate {\n\t\tnotification.NotifyUpdateRelease(doer, rel)\n\t\treturn\n\t}\n\n\tif !rel.IsDraft {\n\t\tnotification.NotifyNewRelease(rel)\n\t}\n\n\treturn err\n}\n\n\/\/ DeleteReleaseByID deletes a release and corresponding Git tag by given ID.\nfunc DeleteReleaseByID(id int64, doer *models.User, delTag bool) error {\n\trel, err := models.GetReleaseByID(id)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"GetReleaseByID: %v\", err)\n\t}\n\n\trepo, err := models.GetRepositoryByID(rel.RepoID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"GetRepositoryByID: %v\", err)\n\t}\n\n\tif delTag {\n\t\tif stdout, err := git.NewCommand(\"tag\", \"-d\", rel.TagName).\n\t\t\tSetDescription(fmt.Sprintf(\"DeleteReleaseByID (git tag -d): %d\", rel.ID)).\n\t\t\tRunInDir(repo.RepoPath()); err != nil && !strings.Contains(err.Error(), \"not found\") {\n\t\t\tlog.Error(\"DeleteReleaseByID (git tag -d): %d in %v Failed:\\nStdout: %s\\nError: %v\", rel.ID, repo, stdout, err)\n\t\t\treturn fmt.Errorf(\"git tag -d: %v\", err)\n\t\t}\n\n\t\tif err := models.DeleteReleaseByID(id); err != nil {\n\t\t\treturn fmt.Errorf(\"DeleteReleaseByID: %v\", err)\n\t\t}\n\t} else {\n\t\trel.IsTag = true\n\n\t\tif err = models.UpdateRelease(models.DefaultDBContext(), rel); err != nil {\n\t\t\treturn fmt.Errorf(\"Update: %v\", err)\n\t\t}\n\t}\n\n\trel.Repo = repo\n\tif err = rel.LoadAttributes(); err != nil {\n\t\treturn fmt.Errorf(\"LoadAttributes: %v\", err)\n\t}\n\n\tif err := models.DeleteAttachmentsByRelease(rel.ID); err != nil {\n\t\treturn fmt.Errorf(\"DeleteAttachments: %v\", err)\n\t}\n\n\tfor i := range rel.Attachments {\n\t\tattachment := rel.Attachments[i]\n\t\tif err := storage.Attachments.Delete(attachment.RelativePath()); err != nil {\n\t\t\tlog.Error(\"Delete attachment %s of release %s failed: %v\", attachment.UUID, rel.ID, err)\n\t\t}\n\t}\n\n\tnotification.NotifyDeleteRelease(doer, rel)\n\n\treturn nil\n}\n<commit_msg>Send webhook when tag is removed via Web UI (#14015)<commit_after>\/\/ Copyright 2019 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage release\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"code.gitea.io\/gitea\/models\"\n\t\"code.gitea.io\/gitea\/modules\/git\"\n\t\"code.gitea.io\/gitea\/modules\/log\"\n\t\"code.gitea.io\/gitea\/modules\/notification\"\n\t\"code.gitea.io\/gitea\/modules\/repository\"\n\t\"code.gitea.io\/gitea\/modules\/storage\"\n\t\"code.gitea.io\/gitea\/modules\/timeutil\"\n)\n\nfunc createTag(gitRepo *git.Repository, rel *models.Release) error {\n\t\/\/ Only actual create when publish.\n\tif !rel.IsDraft {\n\t\tif !gitRepo.IsTagExist(rel.TagName) {\n\t\t\tcommit, err := gitRepo.GetCommit(rel.Target)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"GetCommit: %v\", err)\n\t\t\t}\n\n\t\t\t\/\/ Trim '--' prefix to prevent command line argument vulnerability.\n\t\t\trel.TagName = strings.TrimPrefix(rel.TagName, \"--\")\n\t\t\tif err = gitRepo.CreateTag(rel.TagName, commit.ID.String()); err != nil {\n\t\t\t\tif strings.Contains(err.Error(), \"is not a valid tag name\") {\n\t\t\t\t\treturn models.ErrInvalidTagName{\n\t\t\t\t\t\tTagName: rel.TagName,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t\trel.LowerTagName = strings.ToLower(rel.TagName)\n\t\t\t\/\/ Prepare Notify\n\t\t\tif err := rel.LoadAttributes(); err != nil {\n\t\t\t\tlog.Error(\"LoadAttributes: %v\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tnotification.NotifyPushCommits(\n\t\t\t\trel.Publisher, rel.Repo,\n\t\t\t\t&repository.PushUpdateOptions{\n\t\t\t\t\tRefFullName: git.TagPrefix + rel.TagName,\n\t\t\t\t\tOldCommitID: git.EmptySHA,\n\t\t\t\t\tNewCommitID: commit.ID.String(),\n\t\t\t\t}, repository.NewPushCommits())\n\t\t\tnotification.NotifyCreateRef(rel.Publisher, rel.Repo, \"tag\", git.TagPrefix+rel.TagName)\n\t\t\trel.CreatedUnix = timeutil.TimeStampNow()\n\t\t}\n\t\tcommit, err := gitRepo.GetTagCommit(rel.TagName)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"GetTagCommit: %v\", err)\n\t\t}\n\n\t\trel.Sha1 = commit.ID.String()\n\t\trel.NumCommits, err = commit.CommitsCount()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"CommitsCount: %v\", err)\n\t\t}\n\n\t\tu, err := models.GetUserByEmail(commit.Author.Email)\n\t\tif err == nil {\n\t\t\trel.PublisherID = u.ID\n\t\t}\n\n\t} else {\n\t\trel.CreatedUnix = timeutil.TimeStampNow()\n\t}\n\treturn nil\n}\n\n\/\/ CreateRelease creates a new release of repository.\nfunc CreateRelease(gitRepo *git.Repository, rel *models.Release, attachmentUUIDs []string) error {\n\tisExist, err := models.IsReleaseExist(rel.RepoID, rel.TagName)\n\tif err != nil {\n\t\treturn err\n\t} else if isExist {\n\t\treturn models.ErrReleaseAlreadyExist{\n\t\t\tTagName: rel.TagName,\n\t\t}\n\t}\n\n\tif err = createTag(gitRepo, rel); err != nil {\n\t\treturn err\n\t}\n\n\trel.LowerTagName = strings.ToLower(rel.TagName)\n\tif err = models.InsertRelease(rel); err != nil {\n\t\treturn err\n\t}\n\n\tif err = models.AddReleaseAttachments(rel.ID, attachmentUUIDs); err != nil {\n\t\treturn err\n\t}\n\n\tif !rel.IsDraft {\n\t\tnotification.NotifyNewRelease(rel)\n\t}\n\n\treturn nil\n}\n\n\/\/ UpdateReleaseOrCreatReleaseFromTag updates information of a release or create release from tag.\nfunc UpdateReleaseOrCreatReleaseFromTag(doer *models.User, gitRepo *git.Repository, rel *models.Release, attachmentUUIDs []string, isCreate bool) (err error) {\n\tif err = createTag(gitRepo, rel); err != nil {\n\t\treturn err\n\t}\n\trel.LowerTagName = strings.ToLower(rel.TagName)\n\n\tif err = models.UpdateRelease(models.DefaultDBContext(), rel); err != nil {\n\t\treturn err\n\t}\n\n\tif err = models.AddReleaseAttachments(rel.ID, attachmentUUIDs); err != nil {\n\t\tlog.Error(\"AddReleaseAttachments: %v\", err)\n\t}\n\n\tif !isCreate {\n\t\tnotification.NotifyUpdateRelease(doer, rel)\n\t\treturn\n\t}\n\n\tif !rel.IsDraft {\n\t\tnotification.NotifyNewRelease(rel)\n\t}\n\n\treturn err\n}\n\n\/\/ DeleteReleaseByID deletes a release and corresponding Git tag by given ID.\nfunc DeleteReleaseByID(id int64, doer *models.User, delTag bool) error {\n\trel, err := models.GetReleaseByID(id)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"GetReleaseByID: %v\", err)\n\t}\n\n\trepo, err := models.GetRepositoryByID(rel.RepoID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"GetRepositoryByID: %v\", err)\n\t}\n\n\tif delTag {\n\t\tif stdout, err := git.NewCommand(\"tag\", \"-d\", rel.TagName).\n\t\t\tSetDescription(fmt.Sprintf(\"DeleteReleaseByID (git tag -d): %d\", rel.ID)).\n\t\t\tRunInDir(repo.RepoPath()); err != nil && !strings.Contains(err.Error(), \"not found\") {\n\t\t\tlog.Error(\"DeleteReleaseByID (git tag -d): %d in %v Failed:\\nStdout: %s\\nError: %v\", rel.ID, repo, stdout, err)\n\t\t\treturn fmt.Errorf(\"git tag -d: %v\", err)\n\t\t}\n\n\t\tnotification.NotifyPushCommits(\n\t\t\tdoer, repo,\n\t\t\t&repository.PushUpdateOptions{\n\t\t\t\tRefFullName: git.TagPrefix + rel.TagName,\n\t\t\t\tOldCommitID: rel.Sha1,\n\t\t\t\tNewCommitID: git.EmptySHA,\n\t\t\t}, repository.NewPushCommits())\n\t\tnotification.NotifyDeleteRef(doer, repo, \"tag\", git.TagPrefix+rel.TagName)\n\n\t\tif err := models.DeleteReleaseByID(id); err != nil {\n\t\t\treturn fmt.Errorf(\"DeleteReleaseByID: %v\", err)\n\t\t}\n\t} else {\n\t\trel.IsTag = true\n\n\t\tif err = models.UpdateRelease(models.DefaultDBContext(), rel); err != nil {\n\t\t\treturn fmt.Errorf(\"Update: %v\", err)\n\t\t}\n\t}\n\n\trel.Repo = repo\n\tif err = rel.LoadAttributes(); err != nil {\n\t\treturn fmt.Errorf(\"LoadAttributes: %v\", err)\n\t}\n\n\tif err := models.DeleteAttachmentsByRelease(rel.ID); err != nil {\n\t\treturn fmt.Errorf(\"DeleteAttachments: %v\", err)\n\t}\n\n\tfor i := range rel.Attachments {\n\t\tattachment := rel.Attachments[i]\n\t\tif err := storage.Attachments.Delete(attachment.RelativePath()); err != nil {\n\t\t\tlog.Error(\"Delete attachment %s of release %s failed: %v\", attachment.UUID, rel.ID, err)\n\t\t}\n\t}\n\n\tnotification.NotifyDeleteRelease(doer, rel)\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package router\n\nimport (\n\t\"encoding\/json\"\n\t\"sync\/atomic\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t\"github.com\/onsi\/ginkgo\/config\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n\t\"github.com\/pivotal-golang\/lager\"\n\t\"github.com\/pivotal-golang\/lager\/lagertest\"\n\n\t\"testing\"\n\n\t\"github.com\/cloudfoundry-incubator\/cf-tcp-router-acceptance-tests\/helpers\"\n\t\"github.com\/cloudfoundry-incubator\/cf-tcp-router\/testutil\"\n\t\"github.com\/cloudfoundry-incubator\/routing-api\"\n\t\"github.com\/cloudfoundry-incubator\/routing-api\/db\"\n\t\"github.com\/cloudfoundry-incubator\/uaa-token-fetcher\"\n)\n\nfunc TestRouter(t *testing.T) {\n\tRegisterFailHandler(Fail)\n\tRunSpecs(t, \"Router Suite\")\n}\n\nconst preallocatedExternalPorts = 100\n\nvar (\n\tsampleReceiverPath string\n\texternalIP string\n\trouterApiConfig helpers.RouterApiConfig\n\tlogger lager.Logger\n\troutingApiClient routing_api.Client\n\texternalPort uint32\n\tbucketSize int\n\tcontainerPort uint32\n)\n\nfunc validateTcpRouteMapping(tcpRouteMapping db.TcpRouteMapping) bool {\n\tif tcpRouteMapping.TcpRoute.RouterGroupGuid == \"\" {\n\t\treturn false\n\t}\n\n\tif tcpRouteMapping.TcpRoute.ExternalPort <= 0 {\n\t\treturn false\n\t}\n\n\tif tcpRouteMapping.HostIP == \"\" {\n\t\treturn false\n\t}\n\n\tif tcpRouteMapping.HostPort <= 0 {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc nextExternalPort() int {\n\tport := int(atomic.AddUint32(&externalPort, uint32(1))) + (GinkgoParallelNode()-1)*bucketSize\n\tlogger.Info(\"next-external-port\", lager.Data{\"ginkgo-parallel-node\": GinkgoParallelNode(), \"externalPort\": port})\n\treturn port\n}\n\nfunc nextContainerPort() int {\n\tport := int(atomic.AddUint32(&containerPort, uint32(1))) + (GinkgoParallelNode()-1)*bucketSize\n\tlogger.Info(\"next-container-port\", lager.Data{\"ginkgo-parallel-node\": GinkgoParallelNode(), \"containerPort\": port})\n\treturn port\n}\n\nvar _ = SynchronizedBeforeSuite(func() []byte {\n\n\tcleanupRoutes()\n\n\tsampleReceiver, err := gexec.Build(\"github.com\/cloudfoundry-incubator\/cf-tcp-router-acceptance-tests\/assets\/tcp-sample-receiver\", \"-race\")\n\tExpect(err).NotTo(HaveOccurred())\n\tpayload, err := json.Marshal(map[string]string{\n\t\t\"sample-receiver\": sampleReceiver,\n\t})\n\n\tExpect(err).NotTo(HaveOccurred())\n\n\treturn payload\n}, func(payload []byte) {\n\tcontext := map[string]string{}\n\n\terr := json.Unmarshal(payload, &context)\n\tExpect(err).NotTo(HaveOccurred())\n\n\tsampleReceiverPath = context[\"sample-receiver\"]\n\texternalIP = testutil.GetExternalIP()\n\trouterApiConfig = helpers.LoadConfig()\n\tlogger = lagertest.NewTestLogger(\"test\")\n\n\troutingApiClient = routing_api.NewClient(routerApiConfig.RoutingApiUrl)\n\toauth := token_fetcher.OAuthConfig{\n\t\tTokenEndpoint: routerApiConfig.OAuth.TokenEndpoint,\n\t\tClientName: routerApiConfig.OAuth.ClientName,\n\t\tClientSecret: routerApiConfig.OAuth.ClientSecret,\n\t\tPort: routerApiConfig.OAuth.Port,\n\t}\n\ttokenFetcher := token_fetcher.NewTokenFetcher(&oauth)\n\ttoken, err := tokenFetcher.FetchToken()\n\tExpect(err).ToNot(HaveOccurred())\n\troutingApiClient.SetToken(token.AccessToken)\n\texternalPort = 59999\n\tcontainerPort = 5000\n\tbucketSize = preallocatedExternalPorts \/ config.GinkgoConfig.ParallelTotal\n})\n\nvar _ = SynchronizedAfterSuite(func() {\n}, func() {\n\tgexec.CleanupBuildArtifacts()\n})\n\nfunc cleanupRoutes() {\n\trouterApiConfig := helpers.LoadConfig()\n\troutingApiClient := routing_api.NewClient(routerApiConfig.RoutingApiUrl)\n\toauth := token_fetcher.OAuthConfig{\n\t\tTokenEndpoint: routerApiConfig.OAuth.TokenEndpoint,\n\t\tClientName: routerApiConfig.OAuth.ClientName,\n\t\tClientSecret: routerApiConfig.OAuth.ClientSecret,\n\t\tPort: routerApiConfig.OAuth.Port,\n\t}\n\ttokenFetcher := token_fetcher.NewTokenFetcher(&oauth)\n\ttoken, err := tokenFetcher.FetchToken()\n\tExpect(err).ToNot(HaveOccurred())\n\troutingApiClient.SetToken(token.AccessToken)\n\n\t\/\/ Cleaning up all the pre-existing routes.\n\ttcpRouteMappings, err := routingApiClient.TcpRouteMappings()\n\tExpect(err).ToNot(HaveOccurred())\n\tdeleteTcpRouteMappings := make([]db.TcpRouteMapping, 0)\n\tfor _, tcpRouteMapping := range tcpRouteMappings {\n\t\tif validateTcpRouteMapping(tcpRouteMapping) {\n\t\t\tdeleteTcpRouteMappings = append(deleteTcpRouteMappings, tcpRouteMapping)\n\t\t}\n\t}\n\terr = routingApiClient.DeleteTcpRouteMappings(deleteTcpRouteMappings)\n\tExpect(err).ToNot(HaveOccurred())\n}\n<commit_msg>Changes to use updated token fetcher.<commit_after>package router\n\nimport (\n\t\"encoding\/json\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t\"github.com\/onsi\/ginkgo\/config\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gexec\"\n\t\"github.com\/pivotal-golang\/clock\"\n\t\"github.com\/pivotal-golang\/lager\"\n\t\"github.com\/pivotal-golang\/lager\/lagertest\"\n\n\t\"testing\"\n\n\t\"github.com\/cloudfoundry-incubator\/cf-tcp-router-acceptance-tests\/helpers\"\n\t\"github.com\/cloudfoundry-incubator\/cf-tcp-router\/testutil\"\n\t\"github.com\/cloudfoundry-incubator\/routing-api\"\n\t\"github.com\/cloudfoundry-incubator\/routing-api\/db\"\n\t\"github.com\/cloudfoundry-incubator\/uaa-token-fetcher\"\n)\n\nfunc TestRouter(t *testing.T) {\n\tRegisterFailHandler(Fail)\n\tRunSpecs(t, \"Router Suite\")\n}\n\nconst preallocatedExternalPorts = 100\n\nvar (\n\tsampleReceiverPath string\n\texternalIP string\n\trouterApiConfig helpers.RouterApiConfig\n\tlogger lager.Logger\n\troutingApiClient routing_api.Client\n\texternalPort uint32\n\tbucketSize int\n\tcontainerPort uint32\n)\n\nfunc validateTcpRouteMapping(tcpRouteMapping db.TcpRouteMapping) bool {\n\tif tcpRouteMapping.TcpRoute.RouterGroupGuid == \"\" {\n\t\treturn false\n\t}\n\n\tif tcpRouteMapping.TcpRoute.ExternalPort <= 0 {\n\t\treturn false\n\t}\n\n\tif tcpRouteMapping.HostIP == \"\" {\n\t\treturn false\n\t}\n\n\tif tcpRouteMapping.HostPort <= 0 {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc nextExternalPort() int {\n\tport := int(atomic.AddUint32(&externalPort, uint32(1))) + (GinkgoParallelNode()-1)*bucketSize\n\tlogger.Info(\"next-external-port\", lager.Data{\"ginkgo-parallel-node\": GinkgoParallelNode(), \"externalPort\": port})\n\treturn port\n}\n\nfunc nextContainerPort() int {\n\tport := int(atomic.AddUint32(&containerPort, uint32(1))) + (GinkgoParallelNode()-1)*bucketSize\n\tlogger.Info(\"next-container-port\", lager.Data{\"ginkgo-parallel-node\": GinkgoParallelNode(), \"containerPort\": port})\n\treturn port\n}\n\nvar _ = SynchronizedBeforeSuite(func() []byte {\n\tcleanupRoutes(lagertest.NewTestLogger(\"cleanup\"))\n\n\tsampleReceiver, err := gexec.Build(\"github.com\/cloudfoundry-incubator\/cf-tcp-router-acceptance-tests\/assets\/tcp-sample-receiver\", \"-race\")\n\tExpect(err).NotTo(HaveOccurred())\n\tpayload, err := json.Marshal(map[string]string{\n\t\t\"sample-receiver\": sampleReceiver,\n\t})\n\n\tExpect(err).NotTo(HaveOccurred())\n\n\treturn payload\n}, func(payload []byte) {\n\tcontext := map[string]string{}\n\n\terr := json.Unmarshal(payload, &context)\n\tExpect(err).NotTo(HaveOccurred())\n\tlogger = lagertest.NewTestLogger(\"test\")\n\tsampleReceiverPath = context[\"sample-receiver\"]\n\texternalIP = testutil.GetExternalIP()\n\trouterApiConfig = helpers.LoadConfig()\n\n\troutingApiClient = routing_api.NewClient(routerApiConfig.RoutingApiUrl)\n\n\ttokenFetcher, err := createTokenFetcher(logger, routerApiConfig)\n\tExpect(err).ToNot(HaveOccurred())\n\n\ttoken, err := tokenFetcher.FetchToken(true)\n\tExpect(err).ToNot(HaveOccurred())\n\troutingApiClient.SetToken(token.AccessToken)\n\texternalPort = 59999\n\tcontainerPort = 5000\n\tbucketSize = preallocatedExternalPorts \/ config.GinkgoConfig.ParallelTotal\n})\n\nvar _ = SynchronizedAfterSuite(func() {\n}, func() {\n\tgexec.CleanupBuildArtifacts()\n})\n\nfunc cleanupRoutes(logger lager.Logger) {\n\trouterApiConfig := helpers.LoadConfig()\n\troutingApiClient := routing_api.NewClient(routerApiConfig.RoutingApiUrl)\n\n\ttokenFetcher, err := createTokenFetcher(logger, routerApiConfig)\n\tExpect(err).ToNot(HaveOccurred())\n\n\ttoken, err := tokenFetcher.FetchToken(true)\n\tExpect(err).ToNot(HaveOccurred())\n\troutingApiClient.SetToken(token.AccessToken)\n\n\t\/\/ Cleaning up all the pre-existing routes.\n\ttcpRouteMappings, err := routingApiClient.TcpRouteMappings()\n\tExpect(err).ToNot(HaveOccurred())\n\tdeleteTcpRouteMappings := make([]db.TcpRouteMapping, 0)\n\tfor _, tcpRouteMapping := range tcpRouteMappings {\n\t\tif validateTcpRouteMapping(tcpRouteMapping) {\n\t\t\tdeleteTcpRouteMappings = append(deleteTcpRouteMappings, tcpRouteMapping)\n\t\t}\n\t}\n\terr = routingApiClient.DeleteTcpRouteMappings(deleteTcpRouteMappings)\n\tExpect(err).ToNot(HaveOccurred())\n}\n\nfunc createTokenFetcher(logger lager.Logger, routerApiConfig helpers.RouterApiConfig) (token_fetcher.TokenFetcher, error) {\n\toauth := token_fetcher.OAuthConfig{\n\t\tTokenEndpoint: routerApiConfig.OAuth.TokenEndpoint,\n\t\tClientName: routerApiConfig.OAuth.ClientName,\n\t\tClientSecret: routerApiConfig.OAuth.ClientSecret,\n\t\tPort: routerApiConfig.OAuth.Port,\n\t}\n\tclock := clock.NewClock()\n\n\tlogger.Debug(\"creating-uaa-token-fetcher\")\n\n\ttokenFetcherConfig := token_fetcher.TokenFetcherConfig{\n\t\tMaxNumberOfRetries: uint32(3),\n\t\tRetryInterval: 5 * time.Second,\n\t\tExpirationBufferTime: int64(30),\n\t}\n\treturn token_fetcher.NewTokenFetcher(logger, &oauth, tokenFetcherConfig, clock)\n}\n<|endoftext|>"} {"text":"<commit_before>package perfTestUtils\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar mockedFs FileSystem = mockFs{}\n\ntype mockFs struct{}\n\nfunc (mockFs) Open(name string) (File, error) { return &mockedFile{}, nil }\nfunc (mockFs) Create(name string) (File, error) {\n\tif strings.Contains(name, \"FAIL\") {\n\t\treturn nil, fmt.Errorf(\"requested mock FAIL!\")\n\t}\n\treturn &mockedFile{}, nil\n}\n\ntype mockedFile struct {\n\tContent []byte\n\tr *strings.Reader\n}\n\nfunc (mockedFile) Readdir(n int) (fi []os.FileInfo, err error) {\n\tif n == -1 {\n\t\treturn make([]os.FileInfo, 10), nil\n\t} else {\n\t\treturn nil, fmt.Errorf(\"Mock dir error!\")\n\t}\n}\nfunc (mockedFile) Close() error { return nil }\n\nfunc (mockedFile) Write(p []byte) (n int, err error) { return io.WriteString(os.Stdout, string(p)) }\nfunc (m *mockedFile) Read(p []byte) (n int, err error) {\n\tif m.r == nil {\n\t\tm.r = strings.NewReader(string(m.Content))\n\t}\n\treturn m.r.Read(p)\n}\n\nfunc TestReadBasePerfFile(t *testing.T) {\n\tbs := &BasePerfStats{\n\t\tBasePeakMemory: 10e6 - 10e3,\n\t}\n\tbma := make([]uint64, 0)\n\tfor i := 0; i < 100; i++ {\n\t\tbma = append(bma, bs.BasePeakMemory+uint64(10e5*rand.Int()))\n\t}\n\tbs.MemoryAudit = bma\n\tbsrt := make(map[string]int64)\n\tbsrt[\"service 1\"] = 3e6\n\tbsrt[\"service 2\"] = 2e6\n\tbsrt[\"service 3\"] = 4e6\n\tbs.BaseServiceResponseTimes = bsrt\n\n\tb, err := json.Marshal(bs)\n\tif err != nil {\n\t\tt.Errorf(\"expected to be nil: %v\\n\", err)\n\t}\n\tt.Logf(\"%s\\n\", b)\n\ttoTest, err := ReadBasePerfFile(bytes.NewReader(b))\n\tassert.Nil(t, err)\n\tassert.NotNil(t, toTest)\n\tassert.IsType(t, new(BasePerfStats), toTest)\n\tassert.Equal(t, 100, len(toTest.MemoryAudit))\n}\n\nfunc TestReadBasePerfFileErrUnmarshal(t *testing.T) {\n\ttoTest, err := ReadBasePerfFile(bytes.NewReader([]byte(\"test\")))\n\tassert.NotNil(t, err)\n\tassert.NotNil(t, toTest)\n\tassert.IsType(t, new(BasePerfStats), toTest)\n\tassert.Equal(t, 0, len(toTest.MemoryAudit))\n\tassert.Equal(t, `invalid character 'e' in literal true (expecting 'r')`, err.Error())\n}\n\nfunc TestCalcPeakMemoryVariancePercentage(t *testing.T) {\n\tvp := CalcPeakMemoryVariancePercentage(100, 110)\n\tassert.Equal(t, float64(10), vp)\n\n\tvp = CalcPeakMemoryVariancePercentage(100, 90)\n\tassert.Equal(t, float64(-10), vp)\n}\n\nfunc BenchmarkCalcPeakMemoryVariancePercentage(t *testing.B) {\n\tfor i := 0; i < t.N; i++ {\n\t\tCalcPeakMemoryVariancePercentage(100, 90)\n\t}\n}\n\nfunc TestCalcAverageResponseTime(t *testing.T) {\n\ttimes := make([]int64, 0)\n\tfor i := int64(200); i >= 0; i-- {\n\t\ttimes = append(times, i*1243)\n\t}\n\tavg := CalcAverageResponseTime(times, 2)\n\tassert.Equal(t, int64(111870), avg)\n}\n\nfunc TestCalcAverageResponseVariancePercentage(t *testing.T) {\n\tvp := CalcAverageResponseVariancePercentage(110, 100)\n\tassert.Equal(t, float64(10), vp)\n\n\tvp = CalcAverageResponseVariancePercentage(90, 100)\n\tassert.Equal(t, float64(-10), vp)\n}\n\nfunc BenchmarkCalcAverageResponseVariancePercentage(t *testing.B) {\n\tfor i := 0; i < t.N; i++ {\n\t\tCalcAverageResponseVariancePercentage(100, 90)\n\t}\n}\n\nfunc TestPopulateBasePerfStats(t *testing.T) {\n\tps := &PerfStats{\n\t\tTestTimeStart: time.Now(),\n\t\tPeakMemory: 10e6,\n\t}\n\n\tbs := &BasePerfStats{\n\t\tBaseServiceResponseTimes: make(map[string]int64),\n\t}\n\n\tpma := make([]uint64, 0)\n\tfor i := 0; i < 100; i++ {\n\t\tpma = append(pma, ps.PeakMemory+uint64(5e5*rand.Int()))\n\t}\n\tps.MemoryAudit = pma\n\n\tps.TestPartitions = []TestPartition{TestPartition{Count: 0, TestName: \"StartUp\"}, TestPartition{Count: 30, TestName: \"service 1\"}, TestPartition{Count: 60, TestName: \"service 2\"}, TestPartition{Count: 90, TestName: \"service 3\"}}\n\n\tpsrt := make(map[string]int64)\n\tpsrt[\"service 1\"] = 3e5\n\tpsrt[\"service 2\"] = 2e5\n\tps.ServiceResponseTimes = psrt\n\n\tpopulateBasePerfStats(ps, bs, false)\n\tassert.Equal(t, bs.BasePeakMemory, ps.PeakMemory)\n\tassert.Equal(t, bs.MemoryAudit, ps.MemoryAudit)\n\tassert.Equal(t, bs.BaseServiceResponseTimes, ps.ServiceResponseTimes)\n\tassert.Equal(t, bs.ModifiedDate, bs.GenerationDate)\n}\n\nfunc TestValidateResponseStatusCode(t *testing.T) {\n\tassert.True(t, ValidateResponseStatusCode(http.StatusOK, http.StatusOK, \"test\"))\n\tassert.False(t, ValidateResponseStatusCode(http.StatusOK, http.StatusInternalServerError, \"test\"))\n}\n\nfunc TestValidateServiceResponseTime(t *testing.T) {\n\tassert.True(t, ValidateServiceResponseTime(10, \"test\"))\n\tassert.False(t, ValidateServiceResponseTime(-10, \"test\"))\n\tassert.False(t, ValidateServiceResponseTime(0, \"test\"))\n}\n\nfunc TestValidatePeakMemoryVariance(t *testing.T) {\n\tassert.True(t, ValidatePeakMemoryVariance(15, 0.1))\n\tassert.False(t, ValidatePeakMemoryVariance(15, 16.5))\n\tassert.True(t, ValidatePeakMemoryVariance(15, 15))\n}\n\nfunc TestValidateAverageServiceResponeTimeVariance(t *testing.T) {\n\tassert.True(t, ValidateAverageServiceResponseTimeVariance(15, 10))\n\tassert.True(t, ValidateAverageServiceResponseTimeVariance(15, 15))\n\tassert.False(t, ValidateAverageServiceResponseTimeVariance(15, 16))\n}\n\nfunc TestGenerateEnvBasePerfOutputFile(t *testing.T) {\n\tps := &PerfStats{\n\t\tTestTimeStart: time.Now(),\n\t\tPeakMemory: 10e6,\n\t}\n\n\tbs := &BasePerfStats{\n\t\tBaseServiceResponseTimes: make(map[string]int64),\n\t}\n\n\tpma := make([]uint64, 0)\n\tfor i := 0; i < 100; i++ {\n\t\tpma = append(pma, ps.PeakMemory+uint64(5e5*rand.Int()))\n\t}\n\tps.MemoryAudit = pma\n\n\tps.TestPartitions = []TestPartition{TestPartition{Count: 0, TestName: \"StartUp\"}, TestPartition{Count: 30, TestName: \"service 1\"}, TestPartition{Count: 60, TestName: \"service 2\"}, TestPartition{Count: 90, TestName: \"service 3\"}}\n\n\tpsrt := make(map[string]int64)\n\tpsrt[\"service 1\"] = 3e5\n\tpsrt[\"service 2\"] = 2e5\n\tps.ServiceResponseTimes = psrt\n\n\twillCallOsExit := false\n\texit := func(i int) { willCallOsExit = true }\n\n\tGenerateEnvBasePerfOutputFile(ps, bs, &Config{ReBaseMemory: true, BaseStatsOutputDir: \"env\", TargetHost: \"localhost\"}, exit, mockedFs)\n\tassert.False(t, willCallOsExit)\n}\n\nfunc TestGenerateEnvBasePerfOutputFileFailCreate(t *testing.T) {\n\tps := &PerfStats{\n\t\tTestTimeStart: time.Now(),\n\t\tPeakMemory: 10e6,\n\t}\n\n\tbs := &BasePerfStats{\n\t\tBaseServiceResponseTimes: make(map[string]int64),\n\t}\n\n\tpma := make([]uint64, 0)\n\tfor i := 0; i < 100; i++ {\n\t\tpma = append(pma, ps.PeakMemory+uint64(5e5*rand.Int()))\n\t}\n\tps.MemoryAudit = pma\n\n\tps.TestPartitions = []TestPartition{TestPartition{Count: 0, TestName: \"StartUp\"}, TestPartition{Count: 30, TestName: \"service 1\"}, TestPartition{Count: 60, TestName: \"service 2\"}, TestPartition{Count: 90, TestName: \"service 3\"}}\n\n\tpsrt := make(map[string]int64)\n\tpsrt[\"service 1\"] = 3e5\n\tpsrt[\"service 2\"] = 2e5\n\tps.ServiceResponseTimes = psrt\n\n\twillCallOsExit := false\n\texit := func(i int) { willCallOsExit = true }\n\n\tGenerateEnvBasePerfOutputFile(ps, bs, &Config{ReBaseMemory: true, BaseStatsOutputDir: \"env\", ExecutionHost: \"FAIL\"}, exit, mockedFs)\n\tassert.True(t, willCallOsExit)\n}\n\nfunc TestValidateBasePerfStat(t *testing.T) {\n\tbs := &BasePerfStats{}\n\tassert.False(t, validateBasePerfStat(bs))\n\n\tbs.BaseServiceResponseTimes = map[string]int64{\"service 1\": 123, \"service 2\": -1}\n\tassert.False(t, validateBasePerfStat(bs))\n\n\tbs.BaseServiceResponseTimes = map[string]int64{\"service 1\": 123, \"service 2\": 321}\n\tbs.BasePeakMemory = 12\n\tbs.GenerationDate = \"aaa\"\n\tbs.ModifiedDate = \"bbb\"\n\tbs.MemoryAudit = []uint64{1, 2, 3}\n\tassert.True(t, validateBasePerfStat(bs))\n}\n<commit_msg>Updated unit tests to reflect skipMemCheck option.<commit_after>package perfTestUtils\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\n\nvar mockedFs FileSystem = mockFs{}\n\ntype mockFs struct{}\n\nfunc (mockFs) Open(name string) (File, error) { return &mockedFile{}, nil }\nfunc (mockFs) Create(name string) (File, error) {\n\tif strings.Contains(name, \"FAIL\") {\n\t\treturn nil, fmt.Errorf(\"requested mock FAIL!\")\n\t}\n\treturn &mockedFile{}, nil\n}\n\ntype mockedFile struct {\n\tContent []byte\n\tr *strings.Reader\n}\n\nfunc (mockedFile) Readdir(n int) (fi []os.FileInfo, err error) {\n\tif n == -1 {\n\t\treturn make([]os.FileInfo, 10), nil\n\t} else {\n\t\treturn nil, fmt.Errorf(\"Mock dir error!\")\n\t}\n}\nfunc (mockedFile) Close() error { return nil }\n\nfunc (mockedFile) Write(p []byte) (n int, err error) { return io.WriteString(os.Stdout, string(p)) }\nfunc (m *mockedFile) Read(p []byte) (n int, err error) {\n\tif m.r == nil {\n\t\tm.r = strings.NewReader(string(m.Content))\n\t}\n\treturn m.r.Read(p)\n}\n\nfunc TestReadBasePerfFile(t *testing.T) {\n\tbs := &BasePerfStats{\n\t\tBasePeakMemory: 10e6 - 10e3,\n\t}\n\tbma := make([]uint64, 0)\n\tfor i := 0; i < 100; i++ {\n\t\tbma = append(bma, bs.BasePeakMemory+uint64(10e5*rand.Int()))\n\t}\n\tbs.MemoryAudit = bma\n\tbsrt := make(map[string]int64)\n\tbsrt[\"service 1\"] = 3e6\n\tbsrt[\"service 2\"] = 2e6\n\tbsrt[\"service 3\"] = 4e6\n\tbs.BaseServiceResponseTimes = bsrt\n\n\tb, err := json.Marshal(bs)\n\tif err != nil {\n\t\tt.Errorf(\"expected to be nil: %v\\n\", err)\n\t}\n\tt.Logf(\"%s\\n\", b)\n\ttoTest, err := ReadBasePerfFile(bytes.NewReader(b))\n\tassert.Nil(t, err)\n\tassert.NotNil(t, toTest)\n\tassert.IsType(t, new(BasePerfStats), toTest)\n\tassert.Equal(t, 100, len(toTest.MemoryAudit))\n}\n\nfunc TestReadBasePerfFileErrUnmarshal(t *testing.T) {\n\ttoTest, err := ReadBasePerfFile(bytes.NewReader([]byte(\"test\")))\n\tassert.NotNil(t, err)\n\tassert.NotNil(t, toTest)\n\tassert.IsType(t, new(BasePerfStats), toTest)\n\tassert.Equal(t, 0, len(toTest.MemoryAudit))\n\tassert.Equal(t, `invalid character 'e' in literal true (expecting 'r')`, err.Error())\n}\n\nfunc TestCalcPeakMemoryVariancePercentage(t *testing.T) {\n\tvp := CalcPeakMemoryVariancePercentage(100, 110)\n\tassert.Equal(t, float64(10), vp)\n\n\tvp = CalcPeakMemoryVariancePercentage(100, 90)\n\tassert.Equal(t, float64(-10), vp)\n}\n\nfunc BenchmarkCalcPeakMemoryVariancePercentage(t *testing.B) {\n\tfor i := 0; i < t.N; i++ {\n\t\tCalcPeakMemoryVariancePercentage(100, 90)\n\t}\n}\n\nfunc TestCalcAverageResponseTime(t *testing.T) {\n\ttimes := make([]int64, 0)\n\tfor i := int64(200); i >= 0; i-- {\n\t\ttimes = append(times, i*1243)\n\t}\n\tavg := CalcAverageResponseTime(times, 2)\n\tassert.Equal(t, int64(111870), avg)\n}\n\nfunc TestCalcAverageResponseVariancePercentage(t *testing.T) {\n\tvp := CalcAverageResponseVariancePercentage(110, 100)\n\tassert.Equal(t, float64(10), vp)\n\n\tvp = CalcAverageResponseVariancePercentage(90, 100)\n\tassert.Equal(t, float64(-10), vp)\n}\n\nfunc BenchmarkCalcAverageResponseVariancePercentage(t *testing.B) {\n\tfor i := 0; i < t.N; i++ {\n\t\tCalcAverageResponseVariancePercentage(100, 90)\n\t}\n}\n\nfunc TestPopulateBasePerfStats(t *testing.T) {\n\tps := &PerfStats{\n\t\tTestTimeStart: time.Now(),\n\t\tPeakMemory: 10e6,\n\t}\n\n\tbs := &BasePerfStats{\n\t\tBaseServiceResponseTimes: make(map[string]int64),\n\t}\n\n\tpma := make([]uint64, 0)\n\tfor i := 0; i < 100; i++ {\n\t\tpma = append(pma, ps.PeakMemory+uint64(5e5*rand.Int()))\n\t}\n\tps.MemoryAudit = pma\n\n\tps.TestPartitions = []TestPartition{TestPartition{Count: 0, TestName: \"StartUp\"}, TestPartition{Count: 30, TestName: \"service 1\"}, TestPartition{Count: 60, TestName: \"service 2\"}, TestPartition{Count: 90, TestName: \"service 3\"}}\n\n\tpsrt := make(map[string]int64)\n\tpsrt[\"service 1\"] = 3e5\n\tpsrt[\"service 2\"] = 2e5\n\tps.ServiceResponseTimes = psrt\n\n\tpopulateBasePerfStats(ps, bs, false)\n\tassert.Equal(t, bs.BasePeakMemory, ps.PeakMemory)\n\tassert.Equal(t, bs.MemoryAudit, ps.MemoryAudit)\n\tassert.Equal(t, bs.BaseServiceResponseTimes, ps.ServiceResponseTimes)\n\tassert.Equal(t, bs.ModifiedDate, bs.GenerationDate)\n}\n\nfunc TestValidateResponseStatusCode(t *testing.T) {\n\tassert.True(t, ValidateResponseStatusCode(http.StatusOK, http.StatusOK, \"test\"))\n\tassert.False(t, ValidateResponseStatusCode(http.StatusOK, http.StatusInternalServerError, \"test\"))\n}\n\nfunc TestValidateServiceResponseTime(t *testing.T) {\n\tassert.True(t, ValidateServiceResponseTime(10, \"test\"))\n\tassert.False(t, ValidateServiceResponseTime(-10, \"test\"))\n\tassert.False(t, ValidateServiceResponseTime(0, \"test\"))\n}\n\nfunc TestValidatePeakMemoryVariance(t *testing.T) {\n\tassert.True(t, ValidatePeakMemoryVariance(15, 0.1))\n\tassert.False(t, ValidatePeakMemoryVariance(15, 16.5))\n\tassert.True(t, ValidatePeakMemoryVariance(15, 15))\n}\n\nfunc TestValidateAverageServiceResponeTimeVariance(t *testing.T) {\n\tassert.True(t, ValidateAverageServiceResponseTimeVariance(15, 10))\n\tassert.True(t, ValidateAverageServiceResponseTimeVariance(15, 15))\n\tassert.False(t, ValidateAverageServiceResponseTimeVariance(15, 16))\n}\n\nfunc TestGenerateEnvBasePerfOutputFile(t *testing.T) {\n\tps := &PerfStats{\n\t\tTestTimeStart: time.Now(),\n\t\tPeakMemory: 10e6,\n\t}\n\n\tbs := &BasePerfStats{\n\t\tBaseServiceResponseTimes: make(map[string]int64),\n\t}\n\n\tpma := make([]uint64, 0)\n\tfor i := 0; i < 100; i++ {\n\t\tpma = append(pma, ps.PeakMemory+uint64(5e5*rand.Int()))\n\t}\n\tps.MemoryAudit = pma\n\n\tps.TestPartitions = []TestPartition{TestPartition{Count: 0, TestName: \"StartUp\"}, TestPartition{Count: 30, TestName: \"service 1\"}, TestPartition{Count: 60, TestName: \"service 2\"}, TestPartition{Count: 90, TestName: \"service 3\"}}\n\n\tpsrt := make(map[string]int64)\n\tpsrt[\"service 1\"] = 3e5\n\tpsrt[\"service 2\"] = 2e5\n\tps.ServiceResponseTimes = psrt\n\n\twillCallOsExit := false\n\texit := func(i int) { willCallOsExit = true }\n\n\tGenerateEnvBasePerfOutputFile(ps, bs, &Config{ReBaseMemory: true, BaseStatsOutputDir: \"env\", TargetHost: \"localhost\"}, exit, mockedFs)\n\tassert.False(t, willCallOsExit)\n}\n\nfunc TestGenerateEnvBasePerfOutputFileFailCreate(t *testing.T) {\n\tps := &PerfStats{\n\t\tTestTimeStart: time.Now(),\n\t\tPeakMemory: 10e6,\n\t}\n\n\tbs := &BasePerfStats{\n\t\tBaseServiceResponseTimes: make(map[string]int64),\n\t}\n\n\tpma := make([]uint64, 0)\n\tfor i := 0; i < 100; i++ {\n\t\tpma = append(pma, ps.PeakMemory+uint64(5e5*rand.Int()))\n\t}\n\tps.MemoryAudit = pma\n\n\tps.TestPartitions = []TestPartition{TestPartition{Count: 0, TestName: \"StartUp\"}, TestPartition{Count: 30, TestName: \"service 1\"}, TestPartition{Count: 60, TestName: \"service 2\"}, TestPartition{Count: 90, TestName: \"service 3\"}}\n\n\tpsrt := make(map[string]int64)\n\tpsrt[\"service 1\"] = 3e5\n\tpsrt[\"service 2\"] = 2e5\n\tps.ServiceResponseTimes = psrt\n\n\twillCallOsExit := false\n\texit := func(i int) { willCallOsExit = true }\n\n\tGenerateEnvBasePerfOutputFile(ps, bs, &Config{ReBaseMemory: true, BaseStatsOutputDir: \"env\", ExecutionHost: \"FAIL\"}, exit, mockedFs)\n\tassert.True(t, willCallOsExit)\n}\n\nfunc TestValidateBasePerfStat(t *testing.T) {\n\tc := &Config{}\n\tc.SetDefaults()\n\n\tbs := &BasePerfStats{}\n\tassert.False(t, validateBasePerfStat(bs, c))\n\n\tbs.BaseServiceResponseTimes = map[string]int64{\"service 1\": 123, \"service 2\": -1}\n\tassert.False(t, validateBasePerfStat(bs, c))\n\n\tbs.BaseServiceResponseTimes = map[string]int64{\"service 1\": 123, \"service 2\": 321}\n\tbs.BasePeakMemory = 12\n\tbs.GenerationDate = \"aaa\"\n\tbs.ModifiedDate = \"bbb\"\n\tbs.MemoryAudit = []uint64{1, 2, 3}\n\tassert.True(t, validateBasePerfStat(bs, c))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017, Oracle and\/or its affiliates. All rights reserved.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/MustWin\/baremetal-sdk-go\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\n\t\"github.com\/oracle\/terraform-provider-baremetal\/client\"\n\t\"github.com\/oracle\/terraform-provider-baremetal\/crud\"\n)\n\nfunc LoadBalancerResource() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: createLoadBalancer,\n\t\tRead: readLoadBalancer,\n\t\tUpdate: updateLoadBalancer,\n\t\tDelete: deleteLoadBalancer,\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\/\/ Required {\n\t\t\t\"compartment_id\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"shape\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"subnet_ids\": {\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t},\n\t\t\t\"display_name\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t},\n\t\t\t\/\/ }\n\t\t\t\/\/ Computed {\n\t\t\t\"id\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"ip_addresses\": {\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tComputed: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t},\n\t\t\t\"state\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"time_created\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc createLoadBalancer(d *schema.ResourceData, m interface{}) (e error) {\n\tsync := &LoadBalancerResourceCrud{}\n\tsync.D = d\n\tsync.Client = m.(client.BareMetalClient)\n\treturn crud.CreateResource(d, sync)\n}\n\nfunc readLoadBalancer(d *schema.ResourceData, m interface{}) (e error) {\n\tsync := &LoadBalancerResourceCrud{}\n\tsync.D = d\n\tsync.Client = m.(client.BareMetalClient)\n\treturn crud.ReadResource(sync)\n}\n\nfunc updateLoadBalancer(d *schema.ResourceData, m interface{}) (e error) {\n\tsync := &LoadBalancerResourceCrud{}\n\tsync.D = d\n\tsync.Client = m.(client.BareMetalClient)\n\treturn crud.UpdateResource(d, sync)\n}\n\nfunc deleteLoadBalancer(d *schema.ResourceData, m interface{}) (e error) {\n\tsync := &LoadBalancerResourceCrud{}\n\tsync.D = d\n\tsync.Client = m.(client.BareMetalClient)\n\treturn crud.DeleteResource(d, sync)\n}\n\n\/\/ LoadBalancerResourceCrud wraps a baremetal.LoadBalancer to support crud\ntype LoadBalancerResourceCrud struct {\n\tcrud.BaseCrud\n\tWorkRequest *baremetal.WorkRequest\n\tResource *baremetal.LoadBalancer\n}\n\n\/\/ ID delegates to the load balancer ID, falling back to the work request ID\nfunc (s *LoadBalancerResourceCrud) ID() string {\n\tlog.Printf(\"[DEBUG] lb.LoadBalancerResourceCrud.ID()\")\n\tlog.Printf(\"[DEBUG] lb.LoadBalancerResourceCrud.ID: Resource: %#v\", s.Resource)\n\tif s.Resource != nil && s.Resource.ID != \"\" {\n\t\tlog.Printf(\"[DEBUG] lb.LoadBalancerResourceCrud.ID: Resource.ID: %#v\", s.Resource.ID)\n\t\treturn s.Resource.ID\n\t}\n\tlog.Printf(\"[DEBUG] lb.LoadBalancerResourceCrud.ID: WorkRequest: %#v\", s.WorkRequest)\n\tif s.WorkRequest != nil {\n\t\tlog.Printf(\"[DEBUG] lb.LoadBalancerResourceCrud.ID: WorkRequest.State: %s\", s.WorkRequest.State)\n\t\tif s.WorkRequest.State == baremetal.WorkRequestSucceeded {\n\t\t\tlog.Printf(\"[DEBUG] lb.LoadBalancerResourceCrud.ID: WorkRequest.LoadBalancerID: %#v\", s.WorkRequest.LoadBalancerID)\n\t\t\treturn s.WorkRequest.LoadBalancerID\n\t\t} else {\n\t\t\tlog.Printf(\"[DEBUG] lb.LoadBalancerResourceCrud.ID: WorkRequest.ID: %s\", s.WorkRequest.ID)\n\t\t\treturn s.WorkRequest.ID\n\t\t}\n\t}\n\tlog.Printf(\"[DEBUG] lb.LoadBalancerResourceCrud.ID: Resource & WorkRequest are nil, returning \\\"\\\"\")\n\treturn \"\"\n}\n\n\/\/ RefreshWorkRequest returns the last updated workRequest\nfunc (s *LoadBalancerResourceCrud) RefreshWorkRequest() (*baremetal.WorkRequest, error) {\n\tif s.WorkRequest == nil {\n\t\treturn nil, nil\n\t}\n\twr, err := s.Client.GetWorkRequest(s.WorkRequest.ID, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts.WorkRequest = wr\n\treturn wr, nil\n}\n\n\/\/ CreatedPending returns the resource states which qualify as \"creating\"\nfunc (s *LoadBalancerResourceCrud) CreatedPending() []string {\n\treturn []string{\n\t\tbaremetal.ResourceWaitingForWorkRequest,\n\t\tbaremetal.ResourceCreating,\n\t}\n}\n\n\/\/ CreatedTarget returns the resource states which qualify as \"created\"\nfunc (s *LoadBalancerResourceCrud) CreatedTarget() []string {\n\treturn []string{\n\t\tbaremetal.ResourceActive,\n\t}\n}\n\n\/\/ DeletedPending returns the resource states which qualify as \"deleting\"\nfunc (s *LoadBalancerResourceCrud) DeletedPending() []string {\n\treturn []string{\n\t\tbaremetal.ResourceWaitingForWorkRequest,\n\t\tbaremetal.ResourceDeleting,\n\t}\n}\n\n\/\/ DeletedTarget returns the resource states which qualify as \"deleted\"\nfunc (s *LoadBalancerResourceCrud) DeletedTarget() []string {\n\treturn []string{baremetal.ResourceDeleted}\n}\n\n\/\/ Create makes a request to create a new load balancer from the resourceData\n\/\/ It should leave the work request set up\nfunc (s *LoadBalancerResourceCrud) Create() (e error) {\n\tsns := []string{}\n\tfor _, v := range s.D.Get(\"subnet_ids\").([]interface{}) {\n\t\tsns = append(sns, v.(string))\n\t}\n\n\topts := &baremetal.CreateOptions{}\n\topts.DisplayName = s.D.Get(\"display_name\").(string)\n\n\tworkReqID, e := s.Client.CreateLoadBalancer(\n\t\tnil,\n\t\tnil,\n\t\ts.D.Get(\"compartment_id\").(string),\n\t\tnil,\n\t\ts.D.Get(\"shape\").(string),\n\t\tsns,\n\t\topts)\n\n\tif e != nil {\n\t\treturn\n\t}\n\ts.WorkRequest, e = s.Client.GetWorkRequest(workReqID, nil)\n\ts.D.Set(\"state\", s.WorkRequest.State)\n\treturn\n}\n\n\/\/ Get makes a request to get the load balancer, populating s.Resource.\nfunc (s *LoadBalancerResourceCrud) Get() (e error) {\n\t\/\/ key: {workRequestID} || {loadBalancerID}\n\tid := s.D.Id()\n\tlog.Printf(\"[DEBUG] lb.LoadBalancerBackendResource.Get: ID: %#v\", id)\n\tif id == \"\" {\n\t\tpanic(fmt.Sprintf(\"LoadBalancer had empty ID: %#v Resource: %#V\", s, s.Resource))\n\t}\n\twr := s.WorkRequest\n\tlog.Printf(\"[DEBUG] lb.LoadBalancerBackendResource.Get: WorkRequest: %#v\", wr)\n\tstate := s.D.Get(\"state\").(string)\n\tlog.Printf(\"[DEBUG] lb.LoadBalancerBackendResource.Get: State: %#v\", state)\n\n\t\/\/ NOTE: if the id is for a work request, refresh its state and loadBalancerID. then refresh the load balancer\n\tif strings.HasPrefix(id, \"ocid1.loadbalancerworkrequest.\") {\n\t\tlog.Printf(\"[DEBUG] lb.LoadBalancerBackendResource.Get: ID is for WorkRequest, refreshing\")\n\t\ts.WorkRequest, e = s.Client.GetWorkRequest(id, nil)\n\t\tlog.Printf(\"[DEBUG] lb.LoadBalancerBackendResource.Get: WorkRequest: %#v\", s.WorkRequest)\n\t\ts.D.Set(\"state\", s.WorkRequest.State)\n\t\tif s.WorkRequest.State == baremetal.WorkRequestSucceeded {\n\t\t\tid = s.WorkRequest.LoadBalancerID\n\t\t\tif id == \"\" {\n\t\t\t\tpanic(fmt.Sprintf(\"WorkRequest had empty LoadBalancerID: %#v\", s.WorkRequest))\n\t\t\t}\n\t\t\ts.D.SetId(id)\n\t\t\t\/\/ unset work request on success\n\t\t\ts.WorkRequest = nil\n\t\t} else {\n\t\t\t\/\/ We do not have a LoadBalancerID, so we short-circuit out\n\t\t\treturn\n\n\t\t}\n\t}\n\n\tif !strings.HasPrefix(id, \"ocid1.loadbalancer.\") {\n\t\tpanic(fmt.Sprintf(\"Cannot request loadbalancer with this ID, expected it to begin with \\\"ocid1.loadbalancer.\\\", but was: %#v\", id))\n\t}\n\tlog.Printf(\"[DEBUG] lb.LoadBalancerBackendResource.Get: ID: %#v\", id)\n\tif id == \"\" {\n\t\tpanic(fmt.Sprintf(\"LoadBalancer had empty ID: %#v Resource: %#V\", s, s.Resource))\n\t}\n\ts.Resource, e = s.Client.GetLoadBalancer(id, nil)\n\n\treturn\n}\n\n\/\/ Update makes a request to update the load balancer\nfunc (s *LoadBalancerResourceCrud) Update() (e error) {\n\topts := &baremetal.UpdateOptions{}\n\tif displayName, ok := s.D.GetOk(\"display_name\"); ok {\n\t\topts.DisplayName = displayName.(string)\n\t}\n\n\tvar workReqID string\n\tworkReqID, e = s.Client.UpdateLoadBalancer(s.D.Id(), opts)\n\tif e != nil {\n\t\treturn\n\t}\n\ts.WorkRequest, e = s.Client.GetWorkRequest(workReqID, nil)\n\treturn\n}\n\n\/\/ SetData populates the resourceData from the model\nfunc (s *LoadBalancerResourceCrud) SetData() {\n\t\/\/ The first time this is called, we haven't actually fetched the resource yet, we just got a work request\n\tif s.Resource != nil && s.Resource.ID != \"\" {\n\t\ts.D.SetId(s.Resource.ID)\n\t\ts.D.Set(\"compartment_id\", s.Resource.CompartmentID)\n\t\ts.D.Set(\"display_name\", s.Resource.DisplayName)\n\t\ts.D.Set(\"shape\", s.Resource.Shape)\n\t\ts.D.Set(\"subnet_ids\", s.Resource.SubnetIDs)\n\t\t\/\/ Computed\n\t\ts.D.Set(\"id\", s.Resource.ID)\n\t\ts.D.Set(\"state\", s.Resource.State)\n\t\ts.D.Set(\"time_created\", s.Resource.TimeCreated.String())\n\t\tip_addresses := make([]string, len(s.Resource.IPAddresses))\n\t\tfor i, ad := range s.Resource.IPAddresses {\n\t\t\tip_addresses[i] = ad.IPAddress\n\t\t}\n\t\ts.D.Set(\"ip_addresses\", ip_addresses)\n\t}\n}\n\n\/\/ Delete makes a request to delete the load balancer\nfunc (s *LoadBalancerResourceCrud) Delete() (e error) {\n\tvar workReqID string\n\tworkReqID, e = s.Client.DeleteLoadBalancer(s.D.Id(), nil)\n\tif e != nil {\n\t\treturn\n\t}\n\ts.WorkRequest, e = s.Client.GetWorkRequest(workReqID, nil)\n\treturn\n}\n<commit_msg>Fix state transitions for load balancers. Closes #84.<commit_after>\/\/ Copyright (c) 2017, Oracle and\/or its affiliates. All rights reserved.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/MustWin\/baremetal-sdk-go\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\n\t\"github.com\/oracle\/terraform-provider-baremetal\/client\"\n\t\"github.com\/oracle\/terraform-provider-baremetal\/crud\"\n)\n\nfunc LoadBalancerResource() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: createLoadBalancer,\n\t\tRead: readLoadBalancer,\n\t\tUpdate: updateLoadBalancer,\n\t\tDelete: deleteLoadBalancer,\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\/\/ Required {\n\t\t\t\"compartment_id\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"shape\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"subnet_ids\": {\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t},\n\t\t\t\"display_name\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t},\n\t\t\t\/\/ }\n\t\t\t\/\/ Computed {\n\t\t\t\"id\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"ip_addresses\": {\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tComputed: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t},\n\t\t\t\"state\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"time_created\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc createLoadBalancer(d *schema.ResourceData, m interface{}) (e error) {\n\tsync := &LoadBalancerResourceCrud{}\n\tsync.D = d\n\tsync.Client = m.(client.BareMetalClient)\n\treturn crud.CreateResource(d, sync)\n}\n\nfunc readLoadBalancer(d *schema.ResourceData, m interface{}) (e error) {\n\tsync := &LoadBalancerResourceCrud{}\n\tsync.D = d\n\tsync.Client = m.(client.BareMetalClient)\n\treturn crud.ReadResource(sync)\n}\n\nfunc updateLoadBalancer(d *schema.ResourceData, m interface{}) (e error) {\n\tsync := &LoadBalancerResourceCrud{}\n\tsync.D = d\n\tsync.Client = m.(client.BareMetalClient)\n\treturn crud.UpdateResource(d, sync)\n}\n\nfunc deleteLoadBalancer(d *schema.ResourceData, m interface{}) (e error) {\n\tsync := &LoadBalancerResourceCrud{}\n\tsync.D = d\n\tsync.Client = m.(client.BareMetalClient)\n\treturn crud.DeleteResource(d, sync)\n}\n\n\/\/ LoadBalancerResourceCrud wraps a baremetal.LoadBalancer to support crud\ntype LoadBalancerResourceCrud struct {\n\tcrud.BaseCrud\n\tWorkRequest *baremetal.WorkRequest\n\tResource *baremetal.LoadBalancer\n}\n\n\/\/ ID delegates to the load balancer ID, falling back to the work request ID\nfunc (s *LoadBalancerResourceCrud) ID() string {\n\tlog.Printf(\"[DEBUG] lb.LoadBalancerResourceCrud.ID()\")\n\tlog.Printf(\"[DEBUG] lb.LoadBalancerResourceCrud.ID: Resource: %#v\", s.Resource)\n\tif s.Resource != nil && s.Resource.ID != \"\" {\n\t\tlog.Printf(\"[DEBUG] lb.LoadBalancerResourceCrud.ID: Resource.ID: %#v\", s.Resource.ID)\n\t\treturn s.Resource.ID\n\t}\n\tlog.Printf(\"[DEBUG] lb.LoadBalancerResourceCrud.ID: WorkRequest: %#v\", s.WorkRequest)\n\tif s.WorkRequest != nil {\n\t\tlog.Printf(\"[DEBUG] lb.LoadBalancerResourceCrud.ID: WorkRequest.State: %s\", s.WorkRequest.State)\n\t\tif s.WorkRequest.State == baremetal.WorkRequestSucceeded {\n\t\t\tlog.Printf(\"[DEBUG] lb.LoadBalancerResourceCrud.ID: WorkRequest.LoadBalancerID: %#v\", s.WorkRequest.LoadBalancerID)\n\t\t\treturn s.WorkRequest.LoadBalancerID\n\t\t} else {\n\t\t\tlog.Printf(\"[DEBUG] lb.LoadBalancerResourceCrud.ID: WorkRequest.ID: %s\", s.WorkRequest.ID)\n\t\t\treturn s.WorkRequest.ID\n\t\t}\n\t}\n\tlog.Printf(\"[DEBUG] lb.LoadBalancerResourceCrud.ID: Resource & WorkRequest are nil, returning \\\"\\\"\")\n\treturn \"\"\n}\n\n\/\/ RefreshWorkRequest returns the last updated workRequest\nfunc (s *LoadBalancerResourceCrud) RefreshWorkRequest() (*baremetal.WorkRequest, error) {\n\tif s.WorkRequest == nil {\n\t\treturn nil, nil\n\t}\n\twr, err := s.Client.GetWorkRequest(s.WorkRequest.ID, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts.WorkRequest = wr\n\treturn wr, nil\n}\n\n\/\/ CreatedPending returns the resource states which qualify as \"creating\"\nfunc (s *LoadBalancerResourceCrud) CreatedPending() []string {\n\treturn []string{\n\t\tbaremetal.ResourceWaitingForWorkRequest,\n\t\tbaremetal.ResourceCreating,\n\t\tbaremetal.WorkRequestAccepted,\n\t\tbaremetal.WorkRequestInProgress,\n\t}\n}\n\n\/\/ CreatedTarget returns the resource states which qualify as \"created\"\nfunc (s *LoadBalancerResourceCrud) CreatedTarget() []string {\n\treturn []string{\n\t\tbaremetal.ResourceActive,\n\t}\n}\n\n\/\/ DeletedPending returns the resource states which qualify as \"deleting\"\nfunc (s *LoadBalancerResourceCrud) DeletedPending() []string {\n\treturn []string{\n\t\tbaremetal.ResourceWaitingForWorkRequest,\n\t\tbaremetal.ResourceDeleting,\n\t\tbaremetal.WorkRequestAccepted,\n\t\tbaremetal.WorkRequestInProgress,\n\t}\n}\n\n\/\/ DeletedTarget returns the resource states which qualify as \"deleted\"\nfunc (s *LoadBalancerResourceCrud) DeletedTarget() []string {\n\treturn []string{baremetal.ResourceDeleted}\n}\n\n\/\/ Create makes a request to create a new load balancer from the resourceData\n\/\/ It should leave the work request set up\nfunc (s *LoadBalancerResourceCrud) Create() (e error) {\n\tsns := []string{}\n\tfor _, v := range s.D.Get(\"subnet_ids\").([]interface{}) {\n\t\tsns = append(sns, v.(string))\n\t}\n\n\topts := &baremetal.CreateOptions{}\n\topts.DisplayName = s.D.Get(\"display_name\").(string)\n\n\tworkReqID, e := s.Client.CreateLoadBalancer(\n\t\tnil,\n\t\tnil,\n\t\ts.D.Get(\"compartment_id\").(string),\n\t\tnil,\n\t\ts.D.Get(\"shape\").(string),\n\t\tsns,\n\t\topts)\n\n\tif e != nil {\n\t\treturn\n\t}\n\ts.WorkRequest, e = s.Client.GetWorkRequest(workReqID, nil)\n\ts.D.Set(\"state\", s.WorkRequest.State)\n\treturn\n}\n\n\/\/ Get makes a request to get the load balancer, populating s.Resource.\nfunc (s *LoadBalancerResourceCrud) Get() (e error) {\n\t\/\/ key: {workRequestID} || {loadBalancerID}\n\tid := s.D.Id()\n\tlog.Printf(\"[DEBUG] lb.LoadBalancerBackendResource.Get: ID: %#v\", id)\n\tif id == \"\" {\n\t\tpanic(fmt.Sprintf(\"LoadBalancer had empty ID: %#v Resource: %#V\", s, s.Resource))\n\t}\n\twr := s.WorkRequest\n\tlog.Printf(\"[DEBUG] lb.LoadBalancerBackendResource.Get: WorkRequest: %#v\", wr)\n\tstate := s.D.Get(\"state\").(string)\n\tlog.Printf(\"[DEBUG] lb.LoadBalancerBackendResource.Get: State: %#v\", state)\n\n\t\/\/ NOTE: if the id is for a work request, refresh its state and loadBalancerID. then refresh the load balancer\n\tif strings.HasPrefix(id, \"ocid1.loadbalancerworkrequest.\") {\n\t\tlog.Printf(\"[DEBUG] lb.LoadBalancerBackendResource.Get: ID is for WorkRequest, refreshing\")\n\t\ts.WorkRequest, e = s.Client.GetWorkRequest(id, nil)\n\t\tlog.Printf(\"[DEBUG] lb.LoadBalancerBackendResource.Get: WorkRequest: %#v\", s.WorkRequest)\n\t\ts.D.Set(\"state\", s.WorkRequest.State)\n\t\tif s.WorkRequest.State == baremetal.WorkRequestSucceeded {\n\t\t\tid = s.WorkRequest.LoadBalancerID\n\t\t\tif id == \"\" {\n\t\t\t\tpanic(fmt.Sprintf(\"WorkRequest had empty LoadBalancerID: %#v\", s.WorkRequest))\n\t\t\t}\n\t\t\ts.D.SetId(id)\n\t\t\t\/\/ unset work request on success\n\t\t\ts.WorkRequest = nil\n\t\t} else {\n\t\t\t\/\/ We do not have a LoadBalancerID, so we short-circuit out\n\t\t\treturn\n\n\t\t}\n\t}\n\n\tif !strings.HasPrefix(id, \"ocid1.loadbalancer.\") {\n\t\tpanic(fmt.Sprintf(\"Cannot request loadbalancer with this ID, expected it to begin with \\\"ocid1.loadbalancer.\\\", but was: %#v\", id))\n\t}\n\tlog.Printf(\"[DEBUG] lb.LoadBalancerBackendResource.Get: ID: %#v\", id)\n\tif id == \"\" {\n\t\tpanic(fmt.Sprintf(\"LoadBalancer had empty ID: %#v Resource: %#V\", s, s.Resource))\n\t}\n\ts.Resource, e = s.Client.GetLoadBalancer(id, nil)\n\n\treturn\n}\n\n\/\/ Update makes a request to update the load balancer\nfunc (s *LoadBalancerResourceCrud) Update() (e error) {\n\topts := &baremetal.UpdateOptions{}\n\tif displayName, ok := s.D.GetOk(\"display_name\"); ok {\n\t\topts.DisplayName = displayName.(string)\n\t}\n\n\tvar workReqID string\n\tworkReqID, e = s.Client.UpdateLoadBalancer(s.D.Id(), opts)\n\tif e != nil {\n\t\treturn\n\t}\n\ts.WorkRequest, e = s.Client.GetWorkRequest(workReqID, nil)\n\treturn\n}\n\n\/\/ SetData populates the resourceData from the model\nfunc (s *LoadBalancerResourceCrud) SetData() {\n\t\/\/ The first time this is called, we haven't actually fetched the resource yet, we just got a work request\n\tif s.Resource != nil && s.Resource.ID != \"\" {\n\t\ts.D.SetId(s.Resource.ID)\n\t\ts.D.Set(\"compartment_id\", s.Resource.CompartmentID)\n\t\ts.D.Set(\"display_name\", s.Resource.DisplayName)\n\t\ts.D.Set(\"shape\", s.Resource.Shape)\n\t\ts.D.Set(\"subnet_ids\", s.Resource.SubnetIDs)\n\t\t\/\/ Computed\n\t\ts.D.Set(\"id\", s.Resource.ID)\n\t\ts.D.Set(\"state\", s.Resource.State)\n\t\ts.D.Set(\"time_created\", s.Resource.TimeCreated.String())\n\t\tip_addresses := make([]string, len(s.Resource.IPAddresses))\n\t\tfor i, ad := range s.Resource.IPAddresses {\n\t\t\tip_addresses[i] = ad.IPAddress\n\t\t}\n\t\ts.D.Set(\"ip_addresses\", ip_addresses)\n\t}\n}\n\n\/\/ Delete makes a request to delete the load balancer\nfunc (s *LoadBalancerResourceCrud) Delete() (e error) {\n\tvar workReqID string\n\tworkReqID, e = s.Client.DeleteLoadBalancer(s.D.Id(), nil)\n\tif e != nil {\n\t\treturn\n\t}\n\ts.WorkRequest, e = s.Client.GetWorkRequest(workReqID, nil)\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux\n\npackage shadowsocks\n\nimport (\n\t\"encoding\/binary\"\n\t\"net\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nconst SO_ORIGINAL_DST = 80\n\nfunc ntohs(in uint16) (out uint16) {\n\tdata := [2]byte{}\n\tbinary.LittleEndian.PutUint16(data[:], in)\n\tif uint16(data[0]) == in&0xff { \/\/ LE\n\t\treturn binary.BigEndian.Uint16(data[:])\n\t} else { \/\/ BE\n\t\treturn in\n\t}\n}\n\nfunc getOrigAddr(conn *net.TCPConn) (*net.TCPAddr, error) {\n\tf, err := conn.File()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfd := f.Fd()\n\tsyscall.SetNonblock(int(fd), true)\n\n\tsaddr := syscall.RawSockaddrInet4{}\n\tsize := unsafe.Sizeof(saddr)\n\t_, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, uintptr(fd), syscall.SOL_IP, SO_ORIGINAL_DST, uintptr(unsafe.Pointer(&saddr)), uintptr(unsafe.Pointer(&size)), 0)\n\tif errno == 0 {\n\t\tres := &net.TCPAddr{\n\t\t\tIP: net.IPv4(saddr.Addr[0], saddr.Addr[1], saddr.Addr[2], saddr.Addr[3]),\n\t\t\tPort: int(ntohs(saddr.Port)),\n\t\t}\n\t\treturn res, nil\n\t}\n\n\tsaddr6 := syscall.RawSockaddrInet6{}\n\tsize6 := unsafe.Sizeof(saddr6)\n\t_, _, errno6 := syscall.Syscall6(syscall.SYS_GETSOCKOPT, uintptr(fd), syscall.SOL_IPV6, SO_ORIGINAL_DST, uintptr(unsafe.Pointer(&saddr6)), uintptr(unsafe.Pointer(&size6)), 0)\n\tif errno6 == 0 {\n\t\tres := &net.TCPAddr{\n\t\t\tIP: net.IP(append([]byte(nil), saddr6.Addr[:]...)),\n\t\t\tPort: int(ntohs(saddr6.Port)),\n\t\t}\n\t\treturn res, nil\n\t}\n\treturn nil, nil\n}\n\nfunc DetectRedir(tconn SSConn) bool {\n\tc := tconn.(PlainConn).TCPConn\n\taddr := c.LocalAddr().(*net.TCPAddr)\n\torig, err := getOrigAddr(c)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif orig.IP.Equal(addr.IP) && orig.Port == addr.Port {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (ctx *ClientContext) HandleRedir(tconn SSConn, buf *SSBuffer) error {\n\treturn nil\n}\n<commit_msg>handle redir<commit_after>\/\/ +build linux\n\npackage shadowsocks\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"net\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nconst SO_ORIGINAL_DST = 80\n\nfunc ntohs(in uint16) (out uint16) {\n\tdata := [2]byte{}\n\tbinary.LittleEndian.PutUint16(data[:], in)\n\tif uint16(data[0]) == in&0xff { \/\/ LE\n\t\treturn binary.BigEndian.Uint16(data[:])\n\t} else { \/\/ BE\n\t\treturn in\n\t}\n}\n\nfunc getOrigAddr(conn *net.TCPConn) (*net.TCPAddr, error) {\n\tf, err := conn.File()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\tfd := f.Fd()\n\tsyscall.SetNonblock(int(fd), true)\n\n\tsaddr := syscall.RawSockaddrInet4{}\n\tsize := unsafe.Sizeof(saddr)\n\t_, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, uintptr(fd), syscall.SOL_IP, SO_ORIGINAL_DST, uintptr(unsafe.Pointer(&saddr)), uintptr(unsafe.Pointer(&size)), 0)\n\tif errno == 0 {\n\t\tres := &net.TCPAddr{\n\t\t\tIP: net.IPv4(saddr.Addr[0], saddr.Addr[1], saddr.Addr[2], saddr.Addr[3]),\n\t\t\tPort: int(ntohs(saddr.Port)),\n\t\t}\n\t\treturn res, nil\n\t}\n\n\tsaddr6 := syscall.RawSockaddrInet6{}\n\tsize6 := unsafe.Sizeof(saddr6)\n\t_, _, errno6 := syscall.Syscall6(syscall.SYS_GETSOCKOPT, uintptr(fd), syscall.SOL_IPV6, SO_ORIGINAL_DST, uintptr(unsafe.Pointer(&saddr6)), uintptr(unsafe.Pointer(&size6)), 0)\n\tif errno6 == 0 {\n\t\tres := &net.TCPAddr{\n\t\t\tIP: net.IP(append([]byte(nil), saddr6.Addr[:]...)),\n\t\t\tPort: int(ntohs(saddr6.Port)),\n\t\t}\n\t\treturn res, nil\n\t}\n\treturn nil, nil\n}\n\nfunc DetectRedir(tconn SSConn) bool {\n\tc := tconn.(PlainConn).TCPConn\n\taddr := c.LocalAddr().(*net.TCPAddr)\n\torig, err := getOrigAddr(c)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif orig == nil || orig.IP.Equal(addr.IP) && orig.Port == addr.Port {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (ctx *ClientContext) HandleRedir(tconn SSConn, buf *SSBuffer) (err error) {\n\trbuf := NewBuffer()\n\taddr, _ := getOrigAddr(tconn.(PlainConn).TCPConn)\n\tif bytes.Equal(addr.IP[:12], v4InV6Prefix) {\n\t\tbuf.buf = buf.buf[:7]\n\t\tbuf.buf[0] = 0x01\n\t\tcopy(buf.buf[1:5], addr.IP[12:])\n\t\tbinary.BigEndian.PutUint16(buf.buf[5:], uint16(addr.Port))\n\t} else {\n\t\tbuf.buf = buf.buf[:19]\n\t\tbuf.buf[0] = 0x04\n\t\tcopy(buf.buf[1:17], addr.IP[:])\n\t\tbinary.BigEndian.PutUint16(buf.buf[17:], uint16(addr.Port))\n\t}\n\n\tvar rconn net.Conn\n\trconn, err = net.Dial(\"tcp\", ctx.serverAddr)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer rconn.Close()\n\trconn.(*net.TCPConn).SetNoDelay(true)\n\ttrconn := PlainConn{rconn.(*net.TCPConn)}\n\twrconn := ctx.cipherFactory.Wrap(trconn)\n\n\tres := make(chan error, 1)\n\trres := make(chan error, 1)\n\tgo Pipe(tconn, wrconn, buf, res)\n\tgo Pipe(wrconn, tconn, rbuf, rres)\n\n\tselect {\n\tcase err = <-res:\n\tcase err = <-rres:\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright 2016 Vastech SA (PTY) LTD\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage report\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/izakmarais\/reporter\/grafana\"\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n)\n\nconst dashJSON = `\n{\"Dashboard\":\n\t{\n\t\t\"Title\":\"My first dashboard\",\n\t\t\"Rows\":\n\t\t[{\"Panels\":\n\t\t\t[{\"Type\":\"singlestat\", \"Id\":1},\n\t\t\t {\"Type\":\"graph\", \"Id\":22}]\n\t\t},\n\t\t{\"Panels\":\n\t\t\t[{\"Type\":\"singlestat\", \"Id\":33}]\n\t\t}]\n\t},\n\"Meta\":\n\t{\"Slug\":\"testDash\"}\n}`\n\ntype mockGrafanaClient struct {\n}\n\nfunc (m mockGrafanaClient) GetDashboard(dashName string) (grafana.Dashboard, error) {\n\treturn grafana.NewDashboard([]byte(dashJSON)), nil\n}\n\nfunc (m mockGrafanaClient) GetPanelPng(p grafana.Panel, dashName string, t grafana.TimeRange) (io.ReadCloser, error) {\n\treturn ioutil.NopCloser(bytes.NewBuffer([]byte(\"Not actually a png\"))), nil\n}\n\nfunc TestReport(t *testing.T) {\n\tConvey(\"When generating a report\", t, func() {\n\t\tvar gClient mockGrafanaClient\n\t\trep := New(gClient, \"testDash\", grafana.TimeRange{\"1453206447000\", \"1453213647000\"})\n\t\tdefer rep.Clean()\n\n\t\tConvey(\"When rendering images\", func() {\n\t\t\tdashboard,_ := gClient.GetDashboard(\"\")\n\t\t\trep.renderPNGsParallel(dashboard)\n\n\t\t\tConvey(\"It should create a temporary folder\", func() {\n\t\t\t\t_, err := os.Stat(rep.tmpDir)\n\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t})\n\n\t\t\tConvey(\"It should copy the file to the image folder\", func() {\n\t\t\t\t_, err := os.Stat(rep.imgDirPath() + \"\/image1.png\")\n\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t})\n\n\t\t\tConvey(\"It should create one file per panel\", func() {\n\t\t\t\tf, err := os.Open(rep.imgDirPath())\n\t\t\t\tdefer f.Close()\n\t\t\t\tfiles, err := f.Readdir(0)\n\t\t\t\tSo(files, ShouldHaveLength, 3)\n\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"When genereting the Tex file\", func() {\n\t\t\tdashboard,_ := gClient.GetDashboard(\"\")\n\t\t\ttemplate_file := \"templates\/default.tex\"\n\t\t\trep.generateTeXFile(dashboard, template_file)\n\t\t\tf, err := os.Open(rep.texPath())\n\t\t\tdefer f.Close()\n\n\t\t\tConvey(\"It should create a file in the temporary folder\", func() {\n\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t})\n\n\t\t\tConvey(\"The file should contain reference to the template data\", func() {\n\t\t\t\tvar buf bytes.Buffer\n\t\t\t\tio.Copy(&buf, f)\n\t\t\t\ts := string(buf.Bytes())\n\n\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\tConvey(\"Including the Title\", func() {\n\t\t\t\t\tSo(s, ShouldContainSubstring, \"My first dashboard\")\n\n\t\t\t\t})\n\t\t\t\tConvey(\"and the images\", func() {\n\t\t\t\t\tSo(s, ShouldContainSubstring, \"image1\")\n\t\t\t\t\tSo(s, ShouldContainSubstring, \"image22\")\n\t\t\t\t\tSo(s, ShouldContainSubstring, \"image33\")\n\n\t\t\t\t})\n\t\t\t\tConvey(\"and the time range\", func() {\n\t\t\t\t\tSo(s, ShouldContainSubstring, \"Tue Jan 19 12:27:27 UTC 2016\")\n\t\t\t\t\tSo(s, ShouldContainSubstring, \"Tue Jan 19 14:27:27 UTC 2016\")\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"Clean() should remove the temporary folder\", func() {\n\t\t\trep.Clean()\n\n\t\t\t_, err := os.Stat(rep.tmpDir)\n\t\t\tSo(os.IsNotExist(err), ShouldBeTrue)\n\t\t})\n\t})\n\n}\n<commit_msg>fixed test<commit_after>\/*\n Copyright 2016 Vastech SA (PTY) LTD\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage report\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/izakmarais\/reporter\/grafana\"\n\t. \"github.com\/smartystreets\/goconvey\/convey\"\n)\n\nconst dashJSON = `\n{\"Dashboard\":\n\t{\n\t\t\"Title\":\"My first dashboard\",\n\t\t\"Rows\":\n\t\t[{\"Panels\":\n\t\t\t[{\"Type\":\"singlestat\", \"Id\":1},\n\t\t\t {\"Type\":\"graph\", \"Id\":22}]\n\t\t},\n\t\t{\"Panels\":\n\t\t\t[{\"Type\":\"singlestat\", \"Id\":33}]\n\t\t}]\n\t},\n\"Meta\":\n\t{\"Slug\":\"testDash\"}\n}`\n\ntype mockGrafanaClient struct {\n}\n\nfunc (m mockGrafanaClient) GetDashboard(dashName string) (grafana.Dashboard, error) {\n\treturn grafana.NewDashboard([]byte(dashJSON)), nil\n}\n\nfunc (m mockGrafanaClient) GetPanelPng(p grafana.Panel, dashName string, t grafana.TimeRange) (io.ReadCloser, error) {\n\treturn ioutil.NopCloser(bytes.NewBuffer([]byte(\"Not actually a png\"))), nil\n}\n\nfunc TestReport(t *testing.T) {\n\tConvey(\"When generating a report\", t, func() {\n\t\tvar gClient mockGrafanaClient\n\t\trep := New(gClient, \"testDash\", grafana.TimeRange{\"1453206447000\", \"1453213647000\"}, \"\")\n\t\tdefer rep.Clean()\n\n\t\tConvey(\"When rendering images\", func() {\n\t\t\tdashboard, _ := gClient.GetDashboard(\"\")\n\t\t\trep.renderPNGsParallel(dashboard)\n\n\t\t\tConvey(\"It should create a temporary folder\", func() {\n\t\t\t\t_, err := os.Stat(rep.tmpDir)\n\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t})\n\n\t\t\tConvey(\"It should copy the file to the image folder\", func() {\n\t\t\t\t_, err := os.Stat(rep.imgDirPath() + \"\/image1.png\")\n\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t})\n\n\t\t\tConvey(\"It should create one file per panel\", func() {\n\t\t\t\tf, err := os.Open(rep.imgDirPath())\n\t\t\t\tdefer f.Close()\n\t\t\t\tfiles, err := f.Readdir(0)\n\t\t\t\tSo(files, ShouldHaveLength, 3)\n\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"When genereting the Tex file\", func() {\n\t\t\tdashboard, _ := gClient.GetDashboard(\"\")\n\t\t\trep.generateTeXFile(dashboard)\n\t\t\tf, err := os.Open(rep.texPath())\n\t\t\tdefer f.Close()\n\n\t\t\tConvey(\"It should create a file in the temporary folder\", func() {\n\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t})\n\n\t\t\tConvey(\"The file should contain reference to the template data\", func() {\n\t\t\t\tvar buf bytes.Buffer\n\t\t\t\tio.Copy(&buf, f)\n\t\t\t\ts := string(buf.Bytes())\n\n\t\t\t\tSo(err, ShouldBeNil)\n\t\t\t\tConvey(\"Including the Title\", func() {\n\t\t\t\t\tSo(s, ShouldContainSubstring, \"My first dashboard\")\n\n\t\t\t\t})\n\t\t\t\tConvey(\"and the images\", func() {\n\t\t\t\t\tSo(s, ShouldContainSubstring, \"image1\")\n\t\t\t\t\tSo(s, ShouldContainSubstring, \"image22\")\n\t\t\t\t\tSo(s, ShouldContainSubstring, \"image33\")\n\n\t\t\t\t})\n\t\t\t\tConvey(\"and the time range\", func() {\n\t\t\t\t\tSo(s, ShouldContainSubstring, \"Tue Jan 19 12:27:27 UTC 2016\")\n\t\t\t\t\tSo(s, ShouldContainSubstring, \"Tue Jan 19 14:27:27 UTC 2016\")\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t\tConvey(\"Clean() should remove the temporary folder\", func() {\n\t\t\trep.Clean()\n\n\t\t\t_, err := os.Stat(rep.tmpDir)\n\t\t\tSo(os.IsNotExist(err), ShouldBeTrue)\n\t\t})\n\t})\n\n}\n<|endoftext|>"} {"text":"<commit_before>package vsolver\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"testing\"\n\n\t\"github.com\/Masterminds\/semver\"\n)\n\nvar basicResult Result\n\nfunc init() {\n\tsv1, _ := semver.NewVersion(\"1.0.0\")\n\tbasicResult = Result{\n\t\tAttempts: 1,\n\t\tProjects: []ProjectAtom{\n\t\t\tProjectAtom{\n\t\t\t\tName: \"github.com\/sdboyer\/testrepo\",\n\t\t\t\tVersion: Version{\n\t\t\t\t\tType: V_Branch,\n\t\t\t\t\tInfo: \"master\",\n\t\t\t\t\tUnderlying: \"4d59fb584b15a94d7401e356d2875c472d76ef45\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tProjectAtom{\n\t\t\t\tName: \"github.com\/Masterminds\/VCSTestRepo\",\n\t\t\t\tVersion: Version{\n\t\t\t\t\tType: V_Semver,\n\t\t\t\t\tInfo: \"1.0.0\",\n\t\t\t\t\tUnderlying: \"30605f6ac35fcb075ad0bfa9296f90a7d891523e\",\n\t\t\t\t\tSemVer: sv1,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc TestResultCreateVendorTree(t *testing.T) {\n\tr := basicResult\n\tr.SolveFailure = fmt.Errorf(\"dummy error\")\n\n\ttmp := path.Join(os.TempDir(), \"vsolvtest\")\n\tos.RemoveAll(tmp)\n\t\/\/fmt.Println(tmp)\n\n\tsm, err := NewSourceManager(path.Join(tmp, \"cache\"), path.Join(tmp, \"base\"), true, false, dummyAnalyzer{})\n\tif err != nil {\n\t\tt.Errorf(\"NewSourceManager errored unexpectedly: %q\", err)\n\t}\n\n\terr = r.CreateVendorTree(path.Join(tmp, \"export\"), sm)\n\tif err == fmt.Errorf(\"Cannot create vendor tree from failed solution. Failure was dummy error\") {\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected error due to result having solve failure, but no error\")\n\t\t} else {\n\t\t\tt.Errorf(\"Expected error due to result having solve failure, but got %s\", err)\n\t\t}\n\t}\n\n\tr.SolveFailure = nil\n\terr = r.CreateVendorTree(path.Join(tmp, \"export\"), sm)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error while creating vendor tree: %s\", err)\n\t}\n\n\t\/\/ TODO add more checks\n}\n<commit_msg>Add CreateVendorTree benchmark<commit_after>package vsolver\n\nimport (\n\t\"fmt\"\n\t\"go\/build\"\n\t\"os\"\n\t\"path\"\n\t\"testing\"\n\n\t\"github.com\/Masterminds\/semver\"\n)\n\nvar basicResult Result\nvar kub ProjectAtom\n\n\/\/ An analyzer that passes nothing back, but doesn't error. This expressly\n\/\/ creates a situation that shouldn't be able to happen from a general solver\n\/\/ perspective, so it's only useful for particular situations in tests\ntype passthruAnalyzer struct{}\n\nfunc (passthruAnalyzer) GetInfo(ctx build.Context, p ProjectName) (ProjectInfo, error) {\n\treturn ProjectInfo{}, nil\n}\n\nfunc init() {\n\tsv1, _ := semver.NewVersion(\"1.0.0\")\n\tbasicResult = Result{\n\t\tAttempts: 1,\n\t\tProjects: []ProjectAtom{\n\t\t\tProjectAtom{\n\t\t\t\tName: \"github.com\/sdboyer\/testrepo\",\n\t\t\t\tVersion: Version{\n\t\t\t\t\tType: V_Branch,\n\t\t\t\t\tInfo: \"master\",\n\t\t\t\t\tUnderlying: \"4d59fb584b15a94d7401e356d2875c472d76ef45\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tProjectAtom{\n\t\t\t\tName: \"github.com\/Masterminds\/VCSTestRepo\",\n\t\t\t\tVersion: Version{\n\t\t\t\t\tType: V_Semver,\n\t\t\t\t\tInfo: \"1.0.0\",\n\t\t\t\t\tUnderlying: \"30605f6ac35fcb075ad0bfa9296f90a7d891523e\",\n\t\t\t\t\tSemVer: sv1,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t\/\/ just in case something needs punishing, kubernetes is happy to oblige\n\tsv2, _ := semver.NewVersion(\"v1.2.2\")\n\tkub = ProjectAtom{\n\t\tName: \"github.com\/kubernetes\/kubernetes\",\n\t\tVersion: Version{\n\t\t\tType: V_Semver,\n\t\t\tInfo: \"v1.2.2\",\n\t\t\tUnderlying: \"528f879e7d3790ea4287687ef0ab3f2a01cc2718\",\n\t\t\tSemVer: sv2,\n\t\t},\n\t}\n}\n\nfunc TestResultCreateVendorTree(t *testing.T) {\n\tr := basicResult\n\tr.SolveFailure = fmt.Errorf(\"dummy error\")\n\n\ttmp := path.Join(os.TempDir(), \"vsolvtest\")\n\tos.RemoveAll(tmp)\n\n\tsm, err := NewSourceManager(path.Join(tmp, \"cache\"), path.Join(tmp, \"base\"), true, false, passthruAnalyzer{})\n\tif err != nil {\n\t\tt.Errorf(\"NewSourceManager errored unexpectedly: %q\", err)\n\t}\n\n\terr = r.CreateVendorTree(path.Join(tmp, \"export\"), sm)\n\tif err == fmt.Errorf(\"Cannot create vendor tree from failed solution. Failure was dummy error\") {\n\t\tif err == nil {\n\t\t\tt.Errorf(\"Expected error due to result having solve failure, but no error\")\n\t\t} else {\n\t\t\tt.Errorf(\"Expected error due to result having solve failure, but got %s\", err)\n\t\t}\n\t}\n\n\tr.SolveFailure = nil\n\terr = r.CreateVendorTree(path.Join(tmp, \"export\"), sm)\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error while creating vendor tree: %s\", err)\n\t}\n\n\t\/\/ TODO add more checks\n}\n\nfunc BenchmarkCreateVendorTree(b *testing.B) {\n\t\/\/ We're fs-bound here, so restrict to single parallelism\n\tb.SetParallelism(1)\n\n\tr := basicResult\n\ttmp := path.Join(os.TempDir(), \"vsolvtest\")\n\n\tclean := true\n\tsm, err := NewSourceManager(path.Join(tmp, \"cache\"), path.Join(tmp, \"base\"), true, true, passthruAnalyzer{})\n\tif err != nil {\n\t\tb.Errorf(\"NewSourceManager errored unexpectedly: %q\", err)\n\t\tclean = false\n\t}\n\n\t\/\/ Prefetch the projects before timer starts\n\tfor _, pa := range r.Projects {\n\t\t_, err := sm.GetProjectInfo(pa)\n\t\tif err != nil {\n\t\t\tb.Errorf(\"failed getting project info during prefetch: %s\", err)\n\t\t\tclean = false\n\t\t}\n\t}\n\n\tif clean {\n\t\tb.ResetTimer()\n\t\tb.StopTimer()\n\t\texp := path.Join(tmp, \"export\")\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\/\/ Order the loop this way to make it easy to disable final cleanup, to\n\t\t\t\/\/ ease manual inspection\n\t\t\tos.RemoveAll(exp)\n\t\t\tb.StartTimer()\n\t\t\terr = r.CreateVendorTree(exp, sm)\n\t\t\tb.StopTimer()\n\t\t\tif err != nil {\n\t\t\t\tb.Errorf(\"unexpected error after %v iterations: %s\", i, err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tsm.Release()\n\tos.RemoveAll(tmp) \/\/ comment this to leave temp dir behind for inspection\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/koding\/kite\"\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"log\"\n\t\"github.com\/bitly\/go-nsq\"\n\t\"runtime\"\n \"github.com\/nsabine\/microservices\/controller\/controllerlib\"\n\n)\n\nvar client *kite.Client\nvar k \t *kite.Kite\n\nfunc main() {\n\tfmt.Println(\"Starting Robber\")\n\n\tk = kite.New(\"robber\", \"1.0.0\")\n\tk.Config.Port = 6001\n\tk.Config.DisableAuthentication = true\n\n client = k.NewClient(\"http:\/\/\" + os.Getenv(\"CONTROLLER_SERVICE_HOST\") + \":\" + os.Getenv(\"CONTROLLER_SERVICE_PORT\") + \"\/kite\")\n connected, err := client.DialForever()\n if err != nil {\n k.Log.Fatal(err.Error())\n }\n\n \/\/ Wait until connected\n <-connected\n\n\truntime.GOMAXPROCS(2)\n\twg := &sync.WaitGroup{}\n\twg.Add(2)\n\tgo startKite()\n\tgo startMessaging()\n\twg.Wait()\n}\n\nfunc startKite() {\n\tk.HandleFunc(\"hello\", hello)\n\tfmt.Println(\"Robber starting kite\")\n\tk.Run()\n}\n\nfunc startMessaging() {\n\tfmt.Println(\"Robber configuring NSQ\")\n\n\tconfig := nsq.NewConfig()\n\tq, _ := nsq.NewConsumer(\"tick\", \"ch\", config)\n\tq.AddHandler(nsq.HandlerFunc(func(message *nsq.Message) error {\n\t\tlog.Printf(\"Got a message: %s\", string(message.Body[:]))\n\t\ttick()\n\t\treturn nil\n\t}))\n\n\tfmt.Println(\"Trying to connect to NSQ: \" + os.Getenv(\"MESSAGING_SERVICE_HOST\") + \":4150\")\n\n\terr := q.ConnectToNSQD(os.Getenv(\"MESSAGING_SERVICE_HOST\") + \":4150\")\n\tif err != nil {\n\t\tlog.Panic(\"Could not connect\")\n\t}\n\tfmt.Println(\"Robber starting NSQ\")\n}\n\nfunc hello(r *kite.Request) (interface{}, error) {\n\n\tfmt.Println(\"Robber got hello\")\n\n\t\/\/ You can return anything as result, as long as it is JSON marshalable.\n\treturn nil, nil\n}\n\nfunc tick() {\n\tresponse, err := client.Tell(\"update\", &controllerlib.UpdateRequest{\n\t\tId: 1,\n\t\tType: \"robber\",\n\t\tXPos: 1,\n\t\tYPos: 2,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n<commit_msg>fixing syntax<commit_after>package main\n\nimport (\n\t\"github.com\/koding\/kite\"\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"log\"\n\t\"github.com\/bitly\/go-nsq\"\n\t\"runtime\"\n \"github.com\/nsabine\/microservices\/controller\/controllerlib\"\n\n)\n\nvar client *kite.Client\nvar k \t *kite.Kite\n\nfunc main() {\n\tfmt.Println(\"Starting Robber\")\n\n\tk = kite.New(\"robber\", \"1.0.0\")\n\tk.Config.Port = 6001\n\tk.Config.DisableAuthentication = true\n\n client = k.NewClient(\"http:\/\/\" + os.Getenv(\"CONTROLLER_SERVICE_HOST\") + \":\" + os.Getenv(\"CONTROLLER_SERVICE_PORT\") + \"\/kite\")\n connected, err := client.DialForever()\n if err != nil {\n k.Log.Fatal(err.Error())\n }\n\n \/\/ Wait until connected\n <-connected\n\n\truntime.GOMAXPROCS(2)\n\twg := &sync.WaitGroup{}\n\twg.Add(2)\n\tgo startKite()\n\tgo startMessaging()\n\twg.Wait()\n}\n\nfunc startKite() {\n\tk.HandleFunc(\"hello\", hello)\n\tfmt.Println(\"Robber starting kite\")\n\tk.Run()\n}\n\nfunc startMessaging() {\n\tfmt.Println(\"Robber configuring NSQ\")\n\n\tconfig := nsq.NewConfig()\n\tq, _ := nsq.NewConsumer(\"tick\", \"ch\", config)\n\tq.AddHandler(nsq.HandlerFunc(func(message *nsq.Message) error {\n\t\tlog.Printf(\"Got a message: %s\", string(message.Body[:]))\n\t\ttick()\n\t\treturn nil\n\t}))\n\n\tfmt.Println(\"Trying to connect to NSQ: \" + os.Getenv(\"MESSAGING_SERVICE_HOST\") + \":4150\")\n\n\terr := q.ConnectToNSQD(os.Getenv(\"MESSAGING_SERVICE_HOST\") + \":4150\")\n\tif err != nil {\n\t\tlog.Panic(\"Could not connect\")\n\t}\n\tfmt.Println(\"Robber starting NSQ\")\n}\n\nfunc hello(r *kite.Request) (interface{}, error) {\n\n\tfmt.Println(\"Robber got hello\")\n\n\t\/\/ You can return anything as result, as long as it is JSON marshalable.\n\treturn nil, nil\n}\n\nfunc tick() {\n\t_, err := client.Tell(\"update\", &controllerlib.UpdateRequest{\n\t\tId: 1,\n\t\tType: \"robber\",\n\t\tXPos: 1,\n\t\tYPos: 2,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package tview\n\nimport (\n\t\"github.com\/gdamore\/tcell\"\n)\n\n\/\/ Box implements Primitive with a background and optional elements such as a\n\/\/ border and a title. Most subclasses keep their content contained in the box\n\/\/ but don't necessarily have to.\n\/\/\n\/\/ Note that all classes which subclass from Box will also have access to its\n\/\/ functions.\n\/\/\n\/\/ See https:\/\/github.com\/rivo\/tview\/wiki\/Box for an example.\ntype Box struct {\n\t\/\/ The position of the rect.\n\tx, y, width, height int\n\n\t\/\/ The inner rect reserved for the box's content.\n\tinnerX, innerY, innerWidth, innerHeight int\n\n\t\/\/ Border padding.\n\tpaddingTop, paddingBottom, paddingLeft, paddingRight int\n\n\t\/\/ The box's background color.\n\tbackgroundColor tcell.Color\n\n\t\/\/ Whether or not a border is drawn, reducing the box's space for content by\n\t\/\/ two in width and height.\n\tborder bool\n\n\t\/\/ The color of the border.\n\tborderColor tcell.Color\n\n\t\/\/ The style attributes of the border.\n\tborderAttributes tcell.AttrMask\n\n\t\/\/ The title. Only visible if there is a border, too.\n\ttitle string\n\n\t\/\/ The color of the title.\n\ttitleColor tcell.Color\n\n\t\/\/ The alignment of the title.\n\ttitleAlign int\n\n\t\/\/ Provides a way to find out if this box has focus. We always go through\n\t\/\/ this interface because it may be overridden by implementing classes.\n\tfocus Focusable\n\n\t\/\/ Whether or not this box has focus.\n\thasFocus bool\n\n\t\/\/ An optional capture function which receives a key event and returns the\n\t\/\/ event to be forwarded to the primitive's default input handler (nil if\n\t\/\/ nothing should be forwarded).\n\tinputCapture func(event *tcell.EventKey) *tcell.EventKey\n\n\t\/\/ An optional function which is called before the box is drawn.\n\tdraw func(screen tcell.Screen, x, y, width, height int) (int, int, int, int)\n}\n\n\/\/ NewBox returns a Box without a border.\nfunc NewBox() *Box {\n\tb := &Box{\n\t\twidth: 15,\n\t\theight: 10,\n\t\tinnerX: -1, \/\/ Mark as uninitialized.\n\t\tbackgroundColor: Styles.PrimitiveBackgroundColor,\n\t\tborderColor: Styles.BorderColor,\n\t\ttitleColor: Styles.TitleColor,\n\t\ttitleAlign: AlignCenter,\n\t}\n\tb.focus = b\n\treturn b\n}\n\n\/\/ SetBorderPadding sets the size of the borders around the box content.\nfunc (b *Box) SetBorderPadding(top, bottom, left, right int) *Box {\n\tb.paddingTop, b.paddingBottom, b.paddingLeft, b.paddingRight = top, bottom, left, right\n\treturn b\n}\n\n\/\/ GetRect returns the current position of the rectangle, x, y, width, and\n\/\/ height.\nfunc (b *Box) GetRect() (int, int, int, int) {\n\treturn b.x, b.y, b.width, b.height\n}\n\n\/\/ GetInnerRect returns the position of the inner rectangle (x, y, width,\n\/\/ height), without the border and without any padding. Width and height values\n\/\/ will clamp to 0 and thus never be negative.\nfunc (b *Box) GetInnerRect() (int, int, int, int) {\n\tif b.innerX >= 0 {\n\t\treturn b.innerX, b.innerY, b.innerWidth, b.innerHeight\n\t}\n\tx, y, width, height := b.GetRect()\n\tif b.border {\n\t\tx++\n\t\ty++\n\t\twidth -= 2\n\t\theight -= 2\n\t}\n\tx, y, width, height = x+b.paddingLeft,\n\t\ty+b.paddingTop,\n\t\twidth-b.paddingLeft-b.paddingRight,\n\t\theight-b.paddingTop-b.paddingBottom\n\tif width < 0 {\n\t\twidth = 0\n\t}\n\tif height < 0 {\n\t\theight = 0\n\t}\n\treturn x, y, width, height\n}\n\n\/\/ SetRect sets a new position of the primitive. Note that this has no effect\n\/\/ if this primitive is part of a layout (e.g. Flex, Grid) or if it was added\n\/\/ like this:\n\/\/\n\/\/ application.SetRoot(b, true)\nfunc (b *Box) SetRect(x, y, width, height int) {\n\tb.x = x\n\tb.y = y\n\tb.width = width\n\tb.height = height\n\tb.innerX = -1 \/\/ Mark inner rect as uninitialized.\n}\n\n\/\/ SetDrawFunc sets a callback function which is invoked after the box primitive\n\/\/ has been drawn. This allows you to add a more individual style to the box\n\/\/ (and all primitives which extend it).\n\/\/\n\/\/ The function is provided with the box's dimensions (set via SetRect()). It\n\/\/ must return the box's inner dimensions (x, y, width, height) which will be\n\/\/ returned by GetInnerRect(), used by descendent primitives to draw their own\n\/\/ content.\nfunc (b *Box) SetDrawFunc(handler func(screen tcell.Screen, x, y, width, height int) (int, int, int, int)) *Box {\n\tb.draw = handler\n\treturn b\n}\n\n\/\/ GetDrawFunc returns the callback function which was installed with\n\/\/ SetDrawFunc() or nil if no such function has been installed.\nfunc (b *Box) GetDrawFunc() func(screen tcell.Screen, x, y, width, height int) (int, int, int, int) {\n\treturn b.draw\n}\n\n\/\/ WrapInputHandler wraps an input handler (see InputHandler()) with the\n\/\/ functionality to capture input (see SetInputCapture()) before passing it\n\/\/ on to the provided (default) input handler.\n\/\/\n\/\/ This is only meant to be used by subclassing primitives.\nfunc (b *Box) WrapInputHandler(inputHandler func(*tcell.EventKey, func(p Primitive))) func(*tcell.EventKey, func(p Primitive)) {\n\treturn func(event *tcell.EventKey, setFocus func(p Primitive)) {\n\t\tif b.inputCapture != nil {\n\t\t\tevent = b.inputCapture(event)\n\t\t}\n\t\tif event != nil && inputHandler != nil {\n\t\t\tinputHandler(event, setFocus)\n\t\t}\n\t}\n}\n\n\/\/ InputHandler returns nil.\nfunc (b *Box) InputHandler() func(event *tcell.EventKey, setFocus func(p Primitive)) {\n\treturn b.WrapInputHandler(nil)\n}\n\n\/\/ SetInputCapture installs a function which captures key events before they are\n\/\/ forwarded to the primitive's default key event handler. This function can\n\/\/ then choose to forward that key event (or a different one) to the default\n\/\/ handler by returning it. If nil is returned, the default handler will not\n\/\/ be called.\n\/\/\n\/\/ Providing a nil handler will remove a previously existing handler.\n\/\/\n\/\/ Note that this function will not have an effect on primitives composed of\n\/\/ other primitives, such as Form, Flex, or Grid. Key events are only captured\n\/\/ by the primitives that have focus (e.g. InputField) and only one primitive\n\/\/ can have focus at a time. Composing primitives such as Form pass the focus on\n\/\/ to their contained primitives and thus never receive any key events\n\/\/ themselves. Therefore, they cannot intercept key events.\nfunc (b *Box) SetInputCapture(capture func(event *tcell.EventKey) *tcell.EventKey) *Box {\n\tb.inputCapture = capture\n\treturn b\n}\n\n\/\/ GetInputCapture returns the function installed with SetInputCapture() or nil\n\/\/ if no such function has been installed.\nfunc (b *Box) GetInputCapture() func(event *tcell.EventKey) *tcell.EventKey {\n\treturn b.inputCapture\n}\n\n\/\/ SetBackgroundColor sets the box's background color.\nfunc (b *Box) SetBackgroundColor(color tcell.Color) *Box {\n\tb.backgroundColor = color\n\treturn b\n}\n\n\/\/ SetBorder sets the flag indicating whether or not the box should have a\n\/\/ border.\nfunc (b *Box) SetBorder(show bool) *Box {\n\tb.border = show\n\treturn b\n}\n\n\/\/ SetBorderColor sets the box's border color.\nfunc (b *Box) SetBorderColor(color tcell.Color) *Box {\n\tb.borderColor = color\n\treturn b\n}\n\n\/\/ SetBorderAttributes sets the border's style attributes. You can combine\n\/\/ different attributes using bitmask operations:\n\/\/\n\/\/ box.SetBorderAttributes(tcell.AttrUnderline | tcell.AttrBold)\nfunc (b *Box) SetBorderAttributes(attr tcell.AttrMask) *Box {\n\tb.borderAttributes = attr\n\treturn b\n}\n\n\/\/ SetTitle sets the box's title.\nfunc (b *Box) SetTitle(title string) *Box {\n\tb.title = title\n\treturn b\n}\n\n\/\/ GetTitle returns the box's current title.\nfunc (b *Box) GetTitle() string {\n\treturn b.title\n}\n\n\/\/ SetTitleColor sets the box's title color.\nfunc (b *Box) SetTitleColor(color tcell.Color) *Box {\n\tb.titleColor = color\n\treturn b\n}\n\n\/\/ SetTitleAlign sets the alignment of the title, one of AlignLeft, AlignCenter,\n\/\/ or AlignRight.\nfunc (b *Box) SetTitleAlign(align int) *Box {\n\tb.titleAlign = align\n\treturn b\n}\n\n\/\/ Draw draws this primitive onto the screen.\nfunc (b *Box) Draw(screen tcell.Screen) {\n\t\/\/ Don't draw anything if there is no space.\n\tif b.width <= 0 || b.height <= 0 {\n\t\treturn\n\t}\n\n\tdef := tcell.StyleDefault\n\n\t\/\/ Fill background.\n\tbackground := def.Background(b.backgroundColor)\n\tif b.backgroundColor != tcell.ColorDefault {\n\t\tfor y := b.y; y < b.y+b.height; y++ {\n\t\t\tfor x := b.x; x < b.x+b.width; x++ {\n\t\t\t\tscreen.SetContent(x, y, ' ', nil, background)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Draw border.\n\tif b.border && b.width >= 2 && b.height >= 2 {\n\t\tborder := background.Foreground(b.borderColor) | tcell.Style(b.borderAttributes)\n\t\tvar vertical, horizontal, topLeft, topRight, bottomLeft, bottomRight rune\n\t\tif b.focus.HasFocus() {\n\t\t\thorizontal = Borders.HorizontalFocus\n\t\t\tvertical = Borders.VerticalFocus\n\t\t\ttopLeft = Borders.TopLeftFocus\n\t\t\ttopRight = Borders.TopRightFocus\n\t\t\tbottomLeft = Borders.BottomLeftFocus\n\t\t\tbottomRight = Borders.BottomRightFocus\n\t\t} else {\n\t\t\thorizontal = Borders.Horizontal\n\t\t\tvertical = Borders.Vertical\n\t\t\ttopLeft = Borders.TopLeft\n\t\t\ttopRight = Borders.TopRight\n\t\t\tbottomLeft = Borders.BottomLeft\n\t\t\tbottomRight = Borders.BottomRight\n\t\t}\n\t\tfor x := b.x + 1; x < b.x+b.width-1; x++ {\n\t\t\tscreen.SetContent(x, b.y, horizontal, nil, border)\n\t\t\tscreen.SetContent(x, b.y+b.height-1, horizontal, nil, border)\n\t\t}\n\t\tfor y := b.y + 1; y < b.y+b.height-1; y++ {\n\t\t\tscreen.SetContent(b.x, y, vertical, nil, border)\n\t\t\tscreen.SetContent(b.x+b.width-1, y, vertical, nil, border)\n\t\t}\n\t\tscreen.SetContent(b.x, b.y, topLeft, nil, border)\n\t\tscreen.SetContent(b.x+b.width-1, b.y, topRight, nil, border)\n\t\tscreen.SetContent(b.x, b.y+b.height-1, bottomLeft, nil, border)\n\t\tscreen.SetContent(b.x+b.width-1, b.y+b.height-1, bottomRight, nil, border)\n\n\t\t\/\/ Draw title.\n\t\tif b.title != \"\" && b.width >= 4 {\n\t\t\tprinted, _ := Print(screen, b.title, b.x+1, b.y, b.width-2, b.titleAlign, b.titleColor)\n\t\t\tif len(b.title)-printed > 0 && printed > 0 {\n\t\t\t\t_, _, style, _ := screen.GetContent(b.x+b.width-2, b.y)\n\t\t\t\tfg, _, _ := style.Decompose()\n\t\t\t\tPrint(screen, string(SemigraphicsHorizontalEllipsis), b.x+b.width-2, b.y, 1, AlignLeft, fg)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Call custom draw function.\n\tif b.draw != nil {\n\t\tb.innerX, b.innerY, b.innerWidth, b.innerHeight = b.draw(screen, b.x, b.y, b.width, b.height)\n\t} else {\n\t\t\/\/ Remember the inner rect.\n\t\tb.innerX = -1\n\t\tb.innerX, b.innerY, b.innerWidth, b.innerHeight = b.GetInnerRect()\n\t}\n}\n\n\/\/ Focus is called when this primitive receives focus.\nfunc (b *Box) Focus(delegate func(p Primitive)) {\n\tb.hasFocus = true\n}\n\n\/\/ Blur is called when this primitive loses focus.\nfunc (b *Box) Blur() {\n\tb.hasFocus = false\n}\n\n\/\/ HasFocus returns whether or not this primitive has focus.\nfunc (b *Box) HasFocus() bool {\n\treturn b.hasFocus\n}\n\n\/\/ GetFocusable returns the item's Focusable.\nfunc (b *Box) GetFocusable() Focusable {\n\treturn b.focus\n}\n<commit_msg>Clarifying the role of the Box class better. Resolves #373<commit_after>package tview\n\nimport (\n\t\"github.com\/gdamore\/tcell\"\n)\n\n\/\/ Box implements the Primitive interface with an empty background and optional\n\/\/ elements such as a border and a title. Box itself does not hold any content\n\/\/ but serves as the superclass of all other primitives. Subclasses add their\n\/\/ own content, typically (but not necessarily) keeping their content within the\n\/\/ box's rectangle.\n\/\/\n\/\/ Box provides a number of utility functions available to all primitives.\n\/\/\n\/\/ See https:\/\/github.com\/rivo\/tview\/wiki\/Box for an example.\ntype Box struct {\n\t\/\/ The position of the rect.\n\tx, y, width, height int\n\n\t\/\/ The inner rect reserved for the box's content.\n\tinnerX, innerY, innerWidth, innerHeight int\n\n\t\/\/ Border padding.\n\tpaddingTop, paddingBottom, paddingLeft, paddingRight int\n\n\t\/\/ The box's background color.\n\tbackgroundColor tcell.Color\n\n\t\/\/ Whether or not a border is drawn, reducing the box's space for content by\n\t\/\/ two in width and height.\n\tborder bool\n\n\t\/\/ The color of the border.\n\tborderColor tcell.Color\n\n\t\/\/ The style attributes of the border.\n\tborderAttributes tcell.AttrMask\n\n\t\/\/ The title. Only visible if there is a border, too.\n\ttitle string\n\n\t\/\/ The color of the title.\n\ttitleColor tcell.Color\n\n\t\/\/ The alignment of the title.\n\ttitleAlign int\n\n\t\/\/ Provides a way to find out if this box has focus. We always go through\n\t\/\/ this interface because it may be overridden by implementing classes.\n\tfocus Focusable\n\n\t\/\/ Whether or not this box has focus.\n\thasFocus bool\n\n\t\/\/ An optional capture function which receives a key event and returns the\n\t\/\/ event to be forwarded to the primitive's default input handler (nil if\n\t\/\/ nothing should be forwarded).\n\tinputCapture func(event *tcell.EventKey) *tcell.EventKey\n\n\t\/\/ An optional function which is called before the box is drawn.\n\tdraw func(screen tcell.Screen, x, y, width, height int) (int, int, int, int)\n}\n\n\/\/ NewBox returns a Box without a border.\nfunc NewBox() *Box {\n\tb := &Box{\n\t\twidth: 15,\n\t\theight: 10,\n\t\tinnerX: -1, \/\/ Mark as uninitialized.\n\t\tbackgroundColor: Styles.PrimitiveBackgroundColor,\n\t\tborderColor: Styles.BorderColor,\n\t\ttitleColor: Styles.TitleColor,\n\t\ttitleAlign: AlignCenter,\n\t}\n\tb.focus = b\n\treturn b\n}\n\n\/\/ SetBorderPadding sets the size of the borders around the box content.\nfunc (b *Box) SetBorderPadding(top, bottom, left, right int) *Box {\n\tb.paddingTop, b.paddingBottom, b.paddingLeft, b.paddingRight = top, bottom, left, right\n\treturn b\n}\n\n\/\/ GetRect returns the current position of the rectangle, x, y, width, and\n\/\/ height.\nfunc (b *Box) GetRect() (int, int, int, int) {\n\treturn b.x, b.y, b.width, b.height\n}\n\n\/\/ GetInnerRect returns the position of the inner rectangle (x, y, width,\n\/\/ height), without the border and without any padding. Width and height values\n\/\/ will clamp to 0 and thus never be negative.\nfunc (b *Box) GetInnerRect() (int, int, int, int) {\n\tif b.innerX >= 0 {\n\t\treturn b.innerX, b.innerY, b.innerWidth, b.innerHeight\n\t}\n\tx, y, width, height := b.GetRect()\n\tif b.border {\n\t\tx++\n\t\ty++\n\t\twidth -= 2\n\t\theight -= 2\n\t}\n\tx, y, width, height = x+b.paddingLeft,\n\t\ty+b.paddingTop,\n\t\twidth-b.paddingLeft-b.paddingRight,\n\t\theight-b.paddingTop-b.paddingBottom\n\tif width < 0 {\n\t\twidth = 0\n\t}\n\tif height < 0 {\n\t\theight = 0\n\t}\n\treturn x, y, width, height\n}\n\n\/\/ SetRect sets a new position of the primitive. Note that this has no effect\n\/\/ if this primitive is part of a layout (e.g. Flex, Grid) or if it was added\n\/\/ like this:\n\/\/\n\/\/ application.SetRoot(b, true)\nfunc (b *Box) SetRect(x, y, width, height int) {\n\tb.x = x\n\tb.y = y\n\tb.width = width\n\tb.height = height\n\tb.innerX = -1 \/\/ Mark inner rect as uninitialized.\n}\n\n\/\/ SetDrawFunc sets a callback function which is invoked after the box primitive\n\/\/ has been drawn. This allows you to add a more individual style to the box\n\/\/ (and all primitives which extend it).\n\/\/\n\/\/ The function is provided with the box's dimensions (set via SetRect()). It\n\/\/ must return the box's inner dimensions (x, y, width, height) which will be\n\/\/ returned by GetInnerRect(), used by descendent primitives to draw their own\n\/\/ content.\nfunc (b *Box) SetDrawFunc(handler func(screen tcell.Screen, x, y, width, height int) (int, int, int, int)) *Box {\n\tb.draw = handler\n\treturn b\n}\n\n\/\/ GetDrawFunc returns the callback function which was installed with\n\/\/ SetDrawFunc() or nil if no such function has been installed.\nfunc (b *Box) GetDrawFunc() func(screen tcell.Screen, x, y, width, height int) (int, int, int, int) {\n\treturn b.draw\n}\n\n\/\/ WrapInputHandler wraps an input handler (see InputHandler()) with the\n\/\/ functionality to capture input (see SetInputCapture()) before passing it\n\/\/ on to the provided (default) input handler.\n\/\/\n\/\/ This is only meant to be used by subclassing primitives.\nfunc (b *Box) WrapInputHandler(inputHandler func(*tcell.EventKey, func(p Primitive))) func(*tcell.EventKey, func(p Primitive)) {\n\treturn func(event *tcell.EventKey, setFocus func(p Primitive)) {\n\t\tif b.inputCapture != nil {\n\t\t\tevent = b.inputCapture(event)\n\t\t}\n\t\tif event != nil && inputHandler != nil {\n\t\t\tinputHandler(event, setFocus)\n\t\t}\n\t}\n}\n\n\/\/ InputHandler returns nil.\nfunc (b *Box) InputHandler() func(event *tcell.EventKey, setFocus func(p Primitive)) {\n\treturn b.WrapInputHandler(nil)\n}\n\n\/\/ SetInputCapture installs a function which captures key events before they are\n\/\/ forwarded to the primitive's default key event handler. This function can\n\/\/ then choose to forward that key event (or a different one) to the default\n\/\/ handler by returning it. If nil is returned, the default handler will not\n\/\/ be called.\n\/\/\n\/\/ Providing a nil handler will remove a previously existing handler.\n\/\/\n\/\/ Note that this function will not have an effect on primitives composed of\n\/\/ other primitives, such as Form, Flex, or Grid. Key events are only captured\n\/\/ by the primitives that have focus (e.g. InputField) and only one primitive\n\/\/ can have focus at a time. Composing primitives such as Form pass the focus on\n\/\/ to their contained primitives and thus never receive any key events\n\/\/ themselves. Therefore, they cannot intercept key events.\nfunc (b *Box) SetInputCapture(capture func(event *tcell.EventKey) *tcell.EventKey) *Box {\n\tb.inputCapture = capture\n\treturn b\n}\n\n\/\/ GetInputCapture returns the function installed with SetInputCapture() or nil\n\/\/ if no such function has been installed.\nfunc (b *Box) GetInputCapture() func(event *tcell.EventKey) *tcell.EventKey {\n\treturn b.inputCapture\n}\n\n\/\/ SetBackgroundColor sets the box's background color.\nfunc (b *Box) SetBackgroundColor(color tcell.Color) *Box {\n\tb.backgroundColor = color\n\treturn b\n}\n\n\/\/ SetBorder sets the flag indicating whether or not the box should have a\n\/\/ border.\nfunc (b *Box) SetBorder(show bool) *Box {\n\tb.border = show\n\treturn b\n}\n\n\/\/ SetBorderColor sets the box's border color.\nfunc (b *Box) SetBorderColor(color tcell.Color) *Box {\n\tb.borderColor = color\n\treturn b\n}\n\n\/\/ SetBorderAttributes sets the border's style attributes. You can combine\n\/\/ different attributes using bitmask operations:\n\/\/\n\/\/ box.SetBorderAttributes(tcell.AttrUnderline | tcell.AttrBold)\nfunc (b *Box) SetBorderAttributes(attr tcell.AttrMask) *Box {\n\tb.borderAttributes = attr\n\treturn b\n}\n\n\/\/ SetTitle sets the box's title.\nfunc (b *Box) SetTitle(title string) *Box {\n\tb.title = title\n\treturn b\n}\n\n\/\/ GetTitle returns the box's current title.\nfunc (b *Box) GetTitle() string {\n\treturn b.title\n}\n\n\/\/ SetTitleColor sets the box's title color.\nfunc (b *Box) SetTitleColor(color tcell.Color) *Box {\n\tb.titleColor = color\n\treturn b\n}\n\n\/\/ SetTitleAlign sets the alignment of the title, one of AlignLeft, AlignCenter,\n\/\/ or AlignRight.\nfunc (b *Box) SetTitleAlign(align int) *Box {\n\tb.titleAlign = align\n\treturn b\n}\n\n\/\/ Draw draws this primitive onto the screen.\nfunc (b *Box) Draw(screen tcell.Screen) {\n\t\/\/ Don't draw anything if there is no space.\n\tif b.width <= 0 || b.height <= 0 {\n\t\treturn\n\t}\n\n\tdef := tcell.StyleDefault\n\n\t\/\/ Fill background.\n\tbackground := def.Background(b.backgroundColor)\n\tif b.backgroundColor != tcell.ColorDefault {\n\t\tfor y := b.y; y < b.y+b.height; y++ {\n\t\t\tfor x := b.x; x < b.x+b.width; x++ {\n\t\t\t\tscreen.SetContent(x, y, ' ', nil, background)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Draw border.\n\tif b.border && b.width >= 2 && b.height >= 2 {\n\t\tborder := background.Foreground(b.borderColor) | tcell.Style(b.borderAttributes)\n\t\tvar vertical, horizontal, topLeft, topRight, bottomLeft, bottomRight rune\n\t\tif b.focus.HasFocus() {\n\t\t\thorizontal = Borders.HorizontalFocus\n\t\t\tvertical = Borders.VerticalFocus\n\t\t\ttopLeft = Borders.TopLeftFocus\n\t\t\ttopRight = Borders.TopRightFocus\n\t\t\tbottomLeft = Borders.BottomLeftFocus\n\t\t\tbottomRight = Borders.BottomRightFocus\n\t\t} else {\n\t\t\thorizontal = Borders.Horizontal\n\t\t\tvertical = Borders.Vertical\n\t\t\ttopLeft = Borders.TopLeft\n\t\t\ttopRight = Borders.TopRight\n\t\t\tbottomLeft = Borders.BottomLeft\n\t\t\tbottomRight = Borders.BottomRight\n\t\t}\n\t\tfor x := b.x + 1; x < b.x+b.width-1; x++ {\n\t\t\tscreen.SetContent(x, b.y, horizontal, nil, border)\n\t\t\tscreen.SetContent(x, b.y+b.height-1, horizontal, nil, border)\n\t\t}\n\t\tfor y := b.y + 1; y < b.y+b.height-1; y++ {\n\t\t\tscreen.SetContent(b.x, y, vertical, nil, border)\n\t\t\tscreen.SetContent(b.x+b.width-1, y, vertical, nil, border)\n\t\t}\n\t\tscreen.SetContent(b.x, b.y, topLeft, nil, border)\n\t\tscreen.SetContent(b.x+b.width-1, b.y, topRight, nil, border)\n\t\tscreen.SetContent(b.x, b.y+b.height-1, bottomLeft, nil, border)\n\t\tscreen.SetContent(b.x+b.width-1, b.y+b.height-1, bottomRight, nil, border)\n\n\t\t\/\/ Draw title.\n\t\tif b.title != \"\" && b.width >= 4 {\n\t\t\tprinted, _ := Print(screen, b.title, b.x+1, b.y, b.width-2, b.titleAlign, b.titleColor)\n\t\t\tif len(b.title)-printed > 0 && printed > 0 {\n\t\t\t\t_, _, style, _ := screen.GetContent(b.x+b.width-2, b.y)\n\t\t\t\tfg, _, _ := style.Decompose()\n\t\t\t\tPrint(screen, string(SemigraphicsHorizontalEllipsis), b.x+b.width-2, b.y, 1, AlignLeft, fg)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Call custom draw function.\n\tif b.draw != nil {\n\t\tb.innerX, b.innerY, b.innerWidth, b.innerHeight = b.draw(screen, b.x, b.y, b.width, b.height)\n\t} else {\n\t\t\/\/ Remember the inner rect.\n\t\tb.innerX = -1\n\t\tb.innerX, b.innerY, b.innerWidth, b.innerHeight = b.GetInnerRect()\n\t}\n}\n\n\/\/ Focus is called when this primitive receives focus.\nfunc (b *Box) Focus(delegate func(p Primitive)) {\n\tb.hasFocus = true\n}\n\n\/\/ Blur is called when this primitive loses focus.\nfunc (b *Box) Blur() {\n\tb.hasFocus = false\n}\n\n\/\/ HasFocus returns whether or not this primitive has focus.\nfunc (b *Box) HasFocus() bool {\n\treturn b.hasFocus\n}\n\n\/\/ GetFocusable returns the item's Focusable.\nfunc (b *Box) GetFocusable() Focusable {\n\treturn b.focus\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Factom Foundation\n\/\/ Use of this source code is governed by the MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n)\n\nfunc buy(args []string) error {\n\tos.Args = args\n\n\tvar (\n\t\tamt string = \"0\"\n\t\tserv = flag.String(\"s\", \"localhost:8088\", \"path to the factomclient\")\n\t)\n\tflag.Parse()\n\targs = flag.Args()\n\tif len(args) < 1 {\n\t\treturn man(\"buy\")\n\t}\n\tserver := \"http:\/\/\" + *serv + \"\/v1\/buycredit\"\n\tdata := url.Values{\n\t\t\"to\": {\"wallet\"},\n\t\t\"amount\": {amt},\n\t}\n\n\tresp, err := http.PostForm(server, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\tfmt.Println(string(p))\n\n\treturn nil\n}\n<commit_msg>argument parsing for buy.go<commit_after>\/\/ Copyright 2015 Factom Foundation\n\/\/ Use of this source code is governed by the MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n)\n\nfunc buy(args []string) error {\n\tos.Args = args\n\n\tvar (\n\t\tamt, addr string = \"0\", \"wallet\"\n\t\tserv = flag.String(\"s\", \"localhost:8088\", \"path to the factomclient\")\n\t)\n\tflag.Parse()\n\targs = flag.Args()\n\tif len(args) < 1 {\n\t\treturn man(\"buy\")\n\t}\n\tamt = args[0]\n\tif len(args) > 1 {\n\t\taddr = args[1]\n\t}\n\n\tserver := \"http:\/\/\" + *serv + \"\/v1\/buycredit\"\n\tdata := url.Values{\n\t\t\"to\": {addr},\n\t\t\"amount\": {amt},\n\t}\n\n\tresp, err := http.PostForm(server, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tp, err := ioutil.ReadAll(resp.Body)\n\tresp.Body.Close()\n\tfmt.Println(string(p))\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package bwt\n\nimport (\n\t\"errors\"\n\t\"sort\"\n\n\t\"github.com\/shenwei356\/util\/byteutil\"\n)\n\n\/\/ ErrEndSymbolExisted means you should choose another EndSymbol\nvar ErrEndSymbolExisted = errors.New(\"bwt: end-symbol existed in string\")\n\n\/\/ Transform returns Burrows–Wheeler transform of a byte slice.\n\/\/ See https:\/\/en.wikipedia.org\/wiki\/Burrows%E2%80%93Wheeler_transform\nfunc Transform(s []byte, es byte) ([]byte, [][]byte, error) {\n\tcount := byteutil.CountOfByte(s)\n\tif _, ok := count[es]; ok {\n\t\treturn nil, nil, ErrEndSymbolExisted\n\t}\n\ts = append(s, es)\n\tn := len(s)\n\n\trotations := make([][]byte, n)\n\ti := 0\n\tfor j := 0; j < n; j++ {\n\t\trotations[i] = append(s[n-j:], s[0:n-j]...)\n\t\ti++\n\t}\n\tsort.Sort(byteutil.SliceOfByteSlice(rotations))\n\n\tbwt := make([]byte, n)\n\ti = 0\n\tfor _, t := range rotations {\n\t\tbwt[i] = t[n-1]\n\t\ti++\n\t}\n\treturn bwt, rotations, nil\n}\n\n\/\/ InverseTransform reverses the bwt to original byte slice\nfunc InverseTransform(t []byte, es byte) []byte {\n\tn := len(t)\n\tlines := make([][]byte, n)\n\tfor i := 0; i < n; i++ {\n\t\tlines[i] = make([]byte, n)\n\t}\n\n\tfor i := 0; i < n; i++ {\n\t\tfor j := 0; j < n; j++ {\n\t\t\tlines[j][n-1-i] = t[j]\n\t\t}\n\t\tsort.Sort(byteutil.SliceOfByteSlice(lines))\n\t}\n\n\ts := make([]byte, n-1)\n\tfor _, line := range lines {\n\t\tif line[n-1] == es {\n\t\t\ts = line[0 : n-1]\n\t\t\tbreak\n\t\t}\n\t}\n\treturn s\n}\n\n\/\/ SuffixArray returns the suffix array of s\nfunc SuffixArray(s []byte) []int {\n\tn := len(s)\n\tsuffixMap := make(map[string]int, n)\n\tfor i := 0; i < n; i++ {\n\t\tsuffixMap[string(s[i:])] = i\n\t}\n\tsuffixes := make([]string, n)\n\ti := 0\n\tfor suffix := range suffixMap {\n\t\tsuffixes[i] = suffix\n\t\ti++\n\t}\n\tindice := make([]int, n+1)\n\tindice[0] = n\n\ti = 1\n\tsort.Strings(suffixes)\n\tfor _, suffix := range suffixes {\n\t\tindice[i] = suffixMap[suffix]\n\t\ti++\n\t}\n\treturn indice\n}\n\n\/\/ ErrInvalidSuffixArray means length of sa is not equal to 1+len(s)\nvar ErrInvalidSuffixArray = errors.New(\"bwt: invalid suffix array\")\n\n\/\/ FromSuffixArray compute BWT from sa\nfunc FromSuffixArray(s []byte, sa []int, es byte) ([]byte, error) {\n\tif len(s)+1 != len(sa) || sa[0] != len(s) {\n\t\treturn nil, ErrInvalidSuffixArray\n\t}\n\tbwt := make([]byte, len(sa))\n\tbwt[0] = s[len(s)-1]\n\tfor i := 1; i < len(sa); i++ {\n\t\tif sa[i] == 0 {\n\t\t\tbwt[i] = es\n\t\t} else {\n\t\t\tbwt[i] = s[sa[i]-1]\n\t\t}\n\t}\n\treturn bwt, nil\n}\n<commit_msg>optimize SuffixArray<commit_after>package bwt\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"sort\"\n\n\t\"github.com\/shenwei356\/util\/byteutil\"\n)\n\n\/\/ ErrEndSymbolExisted means you should choose another EndSymbol\nvar ErrEndSymbolExisted = errors.New(\"bwt: end-symbol existed in string\")\n\n\/\/ Transform returns Burrows–Wheeler transform of a byte slice.\n\/\/ See https:\/\/en.wikipedia.org\/wiki\/Burrows%E2%80%93Wheeler_transform\nfunc Transform(s []byte, es byte) ([]byte, [][]byte, error) {\n\tcount := byteutil.CountOfByte(s)\n\tif _, ok := count[es]; ok {\n\t\treturn nil, nil, ErrEndSymbolExisted\n\t}\n\ts = append(s, es)\n\tn := len(s)\n\n\trotations := make([][]byte, n)\n\ti := 0\n\tfor j := 0; j < n; j++ {\n\t\trotations[i] = append(s[n-j:], s[0:n-j]...)\n\t\ti++\n\t}\n\tsort.Sort(byteutil.SliceOfByteSlice(rotations))\n\n\tbwt := make([]byte, n)\n\ti = 0\n\tfor _, t := range rotations {\n\t\tbwt[i] = t[n-1]\n\t\ti++\n\t}\n\treturn bwt, rotations, nil\n}\n\n\/\/ InverseTransform reverses the bwt to original byte slice\nfunc InverseTransform(t []byte, es byte) []byte {\n\tn := len(t)\n\tlines := make([][]byte, n)\n\tfor i := 0; i < n; i++ {\n\t\tlines[i] = make([]byte, n)\n\t}\n\n\tfor i := 0; i < n; i++ {\n\t\tfor j := 0; j < n; j++ {\n\t\t\tlines[j][n-1-i] = t[j]\n\t\t}\n\t\tsort.Sort(byteutil.SliceOfByteSlice(lines))\n\t}\n\n\ts := make([]byte, n-1)\n\tfor _, line := range lines {\n\t\tif line[n-1] == es {\n\t\t\ts = line[0 : n-1]\n\t\t\tbreak\n\t\t}\n\t}\n\treturn s\n}\n\n\/\/ SuffixArray returns the suffix array of s\nfunc SuffixArray(s []byte) []int {\n\tsa := make([]int, len(s)+1)\n\tsa[0] = len(s)\n\n\tfor i := 0; i < len(s); i++ {\n\t\tsa[i+1] = i\n\t}\n\tsort.Slice(sa[1:], func(i, j int) bool {\n\t\treturn bytes.Compare(s[sa[i+1]:], s[sa[j+1]:]) < 0\n\t})\n\treturn sa\n}\n\n\/\/ ErrInvalidSuffixArray means length of sa is not equal to 1+len(s)\nvar ErrInvalidSuffixArray = errors.New(\"bwt: invalid suffix array\")\n\n\/\/ FromSuffixArray compute BWT from sa\nfunc FromSuffixArray(s []byte, sa []int, es byte) ([]byte, error) {\n\tif len(s)+1 != len(sa) || sa[0] != len(s) {\n\t\treturn nil, ErrInvalidSuffixArray\n\t}\n\tbwt := make([]byte, len(sa))\n\tbwt[0] = s[len(s)-1]\n\tfor i := 1; i < len(sa); i++ {\n\t\tif sa[i] == 0 {\n\t\t\tbwt[i] = es\n\t\t} else {\n\t\t\tbwt[i] = s[sa[i]-1]\n\t\t}\n\t}\n\treturn bwt, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package hpcloud\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n)\n\nfunc (a Access) ActivateCDNContainer(container string) error {\n\tclient := &http.Client{}\n\tpath := fmt.Sprintf(\"%s%s\/%s\", CDN_URL, a.TenantID, container)\n\treq, err := http.NewRequest(\"PUT\", path, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Add(\"X-Auth-Token\", a.AuthToken())\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == http.StatusCreated {\n\t\treturn nil\n\t}\n\treturn errors.New(fmt.Sprintf(\"Non-201 status code: %d\", resp.StatusCode))\n}\n\nfunc (a Access) ListCDNEnabledContainers(enabled_only bool) (*CDNContainers, error) {\n\tclient := &http.Client{}\n\tqstring := \"?format=json\"\n\tif enabled_only {\n\t\tqstring = qstring + \"&enabled_only=true\"\n\t}\n\tpath := fmt.Sprintf(\"%s%s%s\", CDN_URL, a.TenantID, qstring)\n\treq, err := http.NewRequest(\"GET\", path, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"X-Auth-Token\", a.AuthToken())\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode == http.StatusOK {\n\t\tc := &CDNContainers{}\n\t\terr = json.Unmarshal(b, c)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn c, nil\n\t}\n\treturn nil, errors.New(fmt.Sprintf(\"Non-200 status code: %d\", resp.StatusCode))\n}\n\nfunc (a Access) UpdateCDNEnabledContainerMetadata(container string, data map[string]string) error {\n\tclient := &http.Client{}\n\tpath := fmt.Sprintf(\"%s%s\/%s\", CDN_URL, a.TenantID, container)\n\treq, err := http.NewRequest(\"POST\", path, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Add(\"X-Auth-Token\", a.AuthToken())\n\tfor key, value := range data {\n\t\treq.Header.Add(key, value)\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == http.StatusAccepted {\n\t\treturn nil\n\t}\n\treturn errors.New(fmt.Sprintf(\"Non-202 status code: %d\", resp.StatusCode))\n\n}\n\nfunc (a Access) RetrieveCDNEnabledContainerMetadata(container string) (*http.Header, error) {\n\tclient := &http.Client{}\n\tpath := fmt.Sprintf(\"%s%s\/%s\", CDN_URL, a.TenantID, container)\n\treq, err := http.NewRequest(\"HEAD\", path, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"X-Auth-Token\", a.AuthToken())\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == http.StatusNoContent {\n\t\treturn &resp.Header, nil\n\t}\n\treturn nil, errors.New(fmt.Sprintf(\"Non-205 status code: %d\", resp.StatusCode))\n}\n\nfunc (a Access) DisableCDNEnabledContainer(container string) {\n\n}\n\nfunc (a Access) DeleteCDNEnabledContainer(container string) {\n\n}\n\ntype CDNContainers []CDNContainer\ntype CDNContainer struct {\n\tName string `json:\"name\"`\n\tCDNEnabled bool `json:\"cdn_enabled\"`\n\tTTL int64 `json:\"ttl\"`\n\tCDNUri string `json:\"x-cdn-uri\"`\n\tSSLCDNUri string `json:\"x-cdn-ssl-uri\"`\n\tLogRetention bool `json:\"log_retention\"`\n}\n<commit_msg>Added the Disabling of CDN containers.<commit_after>package hpcloud\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n)\n\nfunc (a Access) baseCDNRequest(method, container string, StatusCode int) error {\n\tclient := &http.Client{}\n\tpath := fmt.Sprintf(\"%s%s\/%s\", CDN_URL, a.TenantID, container)\n\treq, err := http.NewRequest(method, path, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Add(\"X-Auth-Token\", a.AuthToken())\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == StatusCode {\n\t\treturn nil\n\t}\n\treturn errors.New(fmt.Sprintf(\"Non-%d status code: %d\", StatusCode, resp.StatusCode))\n\n}\n\nfunc (a Access) ActivateCDNContainer(container string) error {\n\treturn a.baseCDNRequest(\"PUT\", container, http.StatusCreated)\n}\n\nfunc (a Access) ListCDNEnabledContainers(enabled_only bool) (*CDNContainers, error) {\n\tclient := &http.Client{}\n\tqstring := \"?format=json\"\n\tif enabled_only {\n\t\tqstring = qstring + \"&enabled_only=true\"\n\t}\n\tpath := fmt.Sprintf(\"%s%s%s\", CDN_URL, a.TenantID, qstring)\n\treq, err := http.NewRequest(\"GET\", path, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"X-Auth-Token\", a.AuthToken())\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode == http.StatusOK {\n\t\tc := &CDNContainers{}\n\t\terr = json.Unmarshal(b, c)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn c, nil\n\t}\n\treturn nil, errors.New(fmt.Sprintf(\"Non-200 status code: %d\", resp.StatusCode))\n}\n\nfunc (a Access) UpdateCDNEnabledContainerMetadata(container string, data map[string]string) error {\n\tclient := &http.Client{}\n\tpath := fmt.Sprintf(\"%s%s\/%s\", CDN_URL, a.TenantID, container)\n\treq, err := http.NewRequest(\"POST\", path, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treq.Header.Add(\"X-Auth-Token\", a.AuthToken())\n\tfor key, value := range data {\n\t\treq.Header.Add(key, value)\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == http.StatusAccepted {\n\t\treturn nil\n\t}\n\treturn errors.New(fmt.Sprintf(\"Non-202 status code: %d\", resp.StatusCode))\n\n}\n\nfunc (a Access) RetrieveCDNEnabledContainerMetadata(container string) (*http.Header, error) {\n\tclient := &http.Client{}\n\tpath := fmt.Sprintf(\"%s%s\/%s\", CDN_URL, a.TenantID, container)\n\treq, err := http.NewRequest(\"HEAD\", path, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"X-Auth-Token\", a.AuthToken())\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode == http.StatusNoContent {\n\t\treturn &resp.Header, nil\n\t}\n\treturn nil, errors.New(fmt.Sprintf(\"Non-205 status code: %d\", resp.StatusCode))\n}\n\nfunc (a Access) DisableCDNEnabledContainer(container string) error {\n\treturn a.UpdateCDNEnabledContainerMetadata(container, map[string]string{\n\t\t\"X-CDN-Enabled\": \"False\",\n\t})\n}\n\nfunc (a Access) DeleteCDNEnabledContainer(container string) {\n\n}\n\ntype CDNContainers []CDNContainer\ntype CDNContainer struct {\n\tName string `json:\"name\"`\n\tCDNEnabled bool `json:\"cdn_enabled\"`\n\tTTL int64 `json:\"ttl\"`\n\tCDNUri string `json:\"x-cdn-uri\"`\n\tSSLCDNUri string `json:\"x-cdn-ssl-uri\"`\n\tLogRetention bool `json:\"log_retention\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package cfg\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\/\/\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc ParseFile(path string) (map[string]string, error) {\n\tvar buffer bytes.Buffer\n\tif _, err := os.Stat(path); err != nil {\n\t\treturn nil, err\n\t}\n\tfl, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer fl.Close()\n\t_, err = io.Copy(&buffer, fl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm, err := parse(&buffer)\n\tif err != nil {\n\t\treturn nil, errors.New(path + \" :: \" + err.Error())\n\t}\n\treturn m, nil\n}\n\nfunc ParseString(val string) (map[string]string, error) {\n\tvar buffer bytes.Buffer\n\t_, err := buffer.WriteString(val)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parse(&buffer)\n}\n\nfunc parse(buf *bytes.Buffer) (map[string]string, error) {\n\tinsideVar := false\n\tvar varName bytes.Buffer\n\tvar varVal bytes.Buffer\n\tb := bufio.NewReader(buf)\n\tlc := 0\n\tmp := make(map[string]string, 0)\n\tfor {\n\t\tl, e := b.ReadString('\\n')\n\t\tif e != nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\t\/\/fmt.Println(\"!RIOT! \", l)\n\t\t\ta := []rune(l)\n\t\t\tif a[0] == '#' && !insideVar {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor i := 0; i < len(a); i++ {\n\t\t\t\t\/\/fmt.Printf(\" %s (%v) \", string(a[i]), lc)\n\t\t\t\tif !insideVar {\n\t\t\t\t\tfmt.Print(insideVar, \" \")\n\t\t\t\t\tswitch a[i] {\n\t\t\t\t\tcase '=':\n\t\t\t\t\t\t\/\/fmt.Print(\" is '=' \")\n\t\t\t\t\t\tif varName.Len() < 1 {\n\t\t\t\t\t\t\treturn nil, errors.New(\"Syntax error at line \" + strconv.Itoa(lc) + \" (\" + strconv.Itoa(i) + \"): \" + l)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tinsideVar = true\n\t\t\t\t\tcase '\\\\', '\\'', '\"', '#', '^', '&':\n\t\t\t\t\t\treturn nil, errors.New(\"Syntax error at line \" + strconv.Itoa(lc) + \" (\" + strconv.Itoa(i) + \"): \" + l)\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tvarName.WriteRune(a[i])\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t\/\/fmt.Print(insideVar)\n\t\t\t\t\tif i == len(a)-1 && a[i] == '\\\\' {\n\t\t\t\t\t\tvarVal.WriteRune('\\n')\n\t\t\t\t\t\t\/\/fmt.Print(\"\\n\")\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if i == len(a)-1 {\n\t\t\t\t\t\t\/\/fmt.Print(\"inside var will be cause because EOL\\n\")\n\t\t\t\t\t\tinsideVar = false\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\t\/\/TODO: parse \"\\\" better\n\t\t\t\t\tvarVal.WriteRune(a[i])\n\t\t\t\t}\n\t\t\t}\n\t\t\tlc++\n\t\t\tif !insideVar {\n\t\t\t\tv00 := varName.String()\n\t\t\t\tv01 := varVal.String()\n\t\t\t\t\/\/fmt.Println(\"v00\", v00, \"v01\", v01)\n\t\t\t\tmp[strings.TrimSpace(v00)] = strings.TrimSpace(v01)\n\t\t\t\tvarName.Truncate(0)\n\t\t\t\tvarVal.Truncate(0)\n\t\t\t}\n\t\t}\n\n\t}\n\treturn mp, nil\n}\n<commit_msg>removed fmt<commit_after>package cfg\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\/\/\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc ParseFile(path string) (map[string]string, error) {\n\tvar buffer bytes.Buffer\n\tif _, err := os.Stat(path); err != nil {\n\t\treturn nil, err\n\t}\n\tfl, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer fl.Close()\n\t_, err = io.Copy(&buffer, fl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm, err := parse(&buffer)\n\tif err != nil {\n\t\treturn nil, errors.New(path + \" :: \" + err.Error())\n\t}\n\treturn m, nil\n}\n\nfunc ParseString(val string) (map[string]string, error) {\n\tvar buffer bytes.Buffer\n\t_, err := buffer.WriteString(val)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn parse(&buffer)\n}\n\nfunc parse(buf *bytes.Buffer) (map[string]string, error) {\n\tinsideVar := false\n\tvar varName bytes.Buffer\n\tvar varVal bytes.Buffer\n\tb := bufio.NewReader(buf)\n\tlc := 0\n\tmp := make(map[string]string, 0)\n\tfor {\n\t\tl, e := b.ReadString('\\n')\n\t\tif e != nil {\n\t\t\tbreak\n\t\t} else {\n\t\t\t\/\/fmt.Println(\"!RIOT! \", l)\n\t\t\ta := []rune(l)\n\t\t\tif a[0] == '#' && !insideVar {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor i := 0; i < len(a); i++ {\n\t\t\t\t\/\/fmt.Printf(\" %s (%v) \", string(a[i]), lc)\n\t\t\t\tif !insideVar {\n\t\t\t\t\t\/\/fmt.Print(insideVar, \" \")\n\t\t\t\t\tswitch a[i] {\n\t\t\t\t\tcase '=':\n\t\t\t\t\t\t\/\/fmt.Print(\" is '=' \")\n\t\t\t\t\t\tif varName.Len() < 1 {\n\t\t\t\t\t\t\treturn nil, errors.New(\"Syntax error at line \" + strconv.Itoa(lc) + \" (\" + strconv.Itoa(i) + \"): \" + l)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tinsideVar = true\n\t\t\t\t\tcase '\\\\', '\\'', '\"', '#', '^', '&':\n\t\t\t\t\t\treturn nil, errors.New(\"Syntax error at line \" + strconv.Itoa(lc) + \" (\" + strconv.Itoa(i) + \"): \" + l)\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tvarName.WriteRune(a[i])\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\t\/\/fmt.Print(insideVar)\n\t\t\t\t\tif i == len(a)-1 && a[i] == '\\\\' {\n\t\t\t\t\t\tvarVal.WriteRune('\\n')\n\t\t\t\t\t\t\/\/fmt.Print(\"\\n\")\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t} else if i == len(a)-1 {\n\t\t\t\t\t\t\/\/fmt.Print(\"inside var will be cause because EOL\\n\")\n\t\t\t\t\t\tinsideVar = false\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\t\/\/TODO: parse \"\\\" better\n\t\t\t\t\tvarVal.WriteRune(a[i])\n\t\t\t\t}\n\t\t\t}\n\t\t\tlc++\n\t\t\tif !insideVar {\n\t\t\t\tv00 := varName.String()\n\t\t\t\tv01 := varVal.String()\n\t\t\t\t\/\/fmt.Println(\"v00\", v00, \"v01\", v01)\n\t\t\t\tmp[strings.TrimSpace(v00)] = strings.TrimSpace(v01)\n\t\t\t\tvarName.Truncate(0)\n\t\t\t\tvarVal.Truncate(0)\n\t\t\t}\n\t\t}\n\n\t}\n\treturn mp, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package cgp\n\nimport (\n\t\"math\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype CGPFunction func([]float64) float64\ntype EvalFunction func(Individual) float64\ntype RndConstFunction func() float64\n\ntype CGPOptions struct {\n\tPopSize int\n\tNumGenes int\n\tMutationRate float64\n\tNumInputs int\n\tNumOutputs int\n\tMaxArity int\n\tFunctionList []CGPFunction\n\tRandConst RndConstFunction\n\tEvaluator EvalFunction\n\tRand *rand.Rand\n}\n\ntype cgp struct {\n\tOptions CGPOptions\n\tPopulation []Individual\n}\n\nfunc New(options CGPOptions) *cgp {\n\n\tif options.PopSize < 2 {\n\t\tpanic(\"Population size must be at least 2.\")\n\t}\n\tif options.NumGenes < 0 {\n\t\tpanic(\"NumGenes can't be negative.\")\n\t}\n\tif options.MutationRate < 0 || options.MutationRate > 1 {\n\t\tpanic(\"Mutation rate must be between 0 and 1.\")\n\t}\n\tif options.NumInputs < 0 {\n\t\tpanic(\"NumInputs can't be negative.\")\n\t}\n\tif options.NumOutputs < 1 {\n\t\tpanic(\"At least one output is necessary.\")\n\t}\n\tif options.MaxArity < 0 {\n\t\tpanic(\"MaxArity can't be negative.\")\n\t}\n\tif len(options.FunctionList) == 0 {\n\t\tpanic(\"At least one function must be provided.\")\n\t}\n\tif options.RandConst == nil {\n\t\tpanic(\"You must supply a RandConst function.\")\n\t}\n\tif options.Evaluator == nil {\n\t\tpanic(\"You must supply an Evaluator function.\")\n\t}\n\n\tif options.Rand == nil {\n\t\toptions.Rand = rand.New(rand.NewSource(time.Now().UnixNano()))\n\t}\n\n\tresult := &cgp{\n\t\tOptions: options,\n\t\tPopulation: make([]Individual, 1, options.PopSize),\n\t}\n\n\tresult.Population[0] = NewIndividual(&options)\n\n\treturn result\n}\n\nfunc (cgp *cgp) RunGeneration() {\n\t\/\/ Create offspring\n\tcgp.Population = cgp.Population[0:1]\n\tfor i := 1; i < cgp.Options.PopSize; i++ {\n\t\tcgp.Population = append(cgp.Population, cgp.Population[0].Mutate())\n\t}\n\n\t\/\/ Evaluate offspring (in parallel)\n\tvar wg sync.WaitGroup\n\tfor i := 1; i < cgp.Options.PopSize; i++ {\n\t\t\/\/ If the individual computes the same function as the parent, skip\n\t\t\/\/ evaluation and just use the parent's fitness\n\t\tif cgp.Population[i].CacheID() == cgp.Population[0].CacheID() {\n\t\t\tcgp.Population[i].Fitness = cgp.Population[0].Fitness\n\t\t} else {\n\t\t\t\/\/ Individual is different from parent, compute fitness\n\t\t\twg.Add(1)\n\t\t\tgo func(i int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tcgp.Population[i].Fitness = cgp.Options.Evaluator(cgp.Population[i])\n\t\t\t}(i)\n\t\t}\n\t}\n\twg.Wait()\n\n\t\/\/ Replace parent with best offspring\n\tbestFitness := math.Inf(1)\n\tbestIndividual := 0\n\tfor i := 1; i < cgp.Options.PopSize; i++ {\n\t\tif cgp.Population[i].Fitness < bestFitness {\n\t\t\tbestFitness = cgp.Population[i].Fitness\n\t\t\tbestIndividual = i\n\t\t}\n\t}\n\n\tif bestFitness <= cgp.Population[0].Fitness {\n\t\tcgp.Population[0] = cgp.Population[bestIndividual]\n\t}\n}\n<commit_msg>Count the number of evaluations<commit_after>package cgp\n\nimport (\n\t\"math\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype CGPFunction func([]float64) float64\ntype EvalFunction func(Individual) float64\ntype RndConstFunction func() float64\n\ntype CGPOptions struct {\n\tPopSize int\n\tNumGenes int\n\tMutationRate float64\n\tNumInputs int\n\tNumOutputs int\n\tMaxArity int\n\tFunctionList []CGPFunction\n\tRandConst RndConstFunction\n\tEvaluator EvalFunction\n\tRand *rand.Rand\n}\n\ntype cgp struct {\n\tOptions CGPOptions\n\tPopulation []Individual\n\tNumEvaluations int\n}\n\nfunc New(options CGPOptions) *cgp {\n\n\tif options.PopSize < 2 {\n\t\tpanic(\"Population size must be at least 2.\")\n\t}\n\tif options.NumGenes < 0 {\n\t\tpanic(\"NumGenes can't be negative.\")\n\t}\n\tif options.MutationRate < 0 || options.MutationRate > 1 {\n\t\tpanic(\"Mutation rate must be between 0 and 1.\")\n\t}\n\tif options.NumInputs < 0 {\n\t\tpanic(\"NumInputs can't be negative.\")\n\t}\n\tif options.NumOutputs < 1 {\n\t\tpanic(\"At least one output is necessary.\")\n\t}\n\tif options.MaxArity < 0 {\n\t\tpanic(\"MaxArity can't be negative.\")\n\t}\n\tif len(options.FunctionList) == 0 {\n\t\tpanic(\"At least one function must be provided.\")\n\t}\n\tif options.RandConst == nil {\n\t\tpanic(\"You must supply a RandConst function.\")\n\t}\n\tif options.Evaluator == nil {\n\t\tpanic(\"You must supply an Evaluator function.\")\n\t}\n\n\tif options.Rand == nil {\n\t\toptions.Rand = rand.New(rand.NewSource(time.Now().UnixNano()))\n\t}\n\n\tresult := &cgp{\n\t\tOptions: options,\n\t\tPopulation: make([]Individual, 1, options.PopSize),\n\t\tNumEvaluations: 0,\n\t}\n\n\tresult.Population[0] = NewIndividual(&options)\n\n\treturn result\n}\n\nfunc (cgp *cgp) RunGeneration() {\n\t\/\/ Create offspring\n\tcgp.Population = cgp.Population[0:1]\n\tfor i := 1; i < cgp.Options.PopSize; i++ {\n\t\tcgp.Population = append(cgp.Population, cgp.Population[0].Mutate())\n\t}\n\n\t\/\/ Evaluate offspring (in parallel)\n\tvar wg sync.WaitGroup\n\tfor i := 1; i < cgp.Options.PopSize; i++ {\n\t\t\/\/ If the individual computes the same function as the parent, skip\n\t\t\/\/ evaluation and just use the parent's fitness\n\t\tif cgp.Population[i].CacheID() == cgp.Population[0].CacheID() {\n\t\t\tcgp.Population[i].Fitness = cgp.Population[0].Fitness\n\t\t} else {\n\t\t\t\/\/ Individual is different from parent, compute fitness\n\t\t\twg.Add(1)\n\t\t\tcgp.NumEvaluations += 1\n\t\t\tgo func(i int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tcgp.Population[i].Fitness = cgp.Options.Evaluator(cgp.Population[i])\n\t\t\t}(i)\n\t\t}\n\t}\n\twg.Wait()\n\n\t\/\/ Replace parent with best offspring\n\tbestFitness := math.Inf(1)\n\tbestIndividual := 0\n\tfor i := 1; i < cgp.Options.PopSize; i++ {\n\t\tif cgp.Population[i].Fitness < bestFitness {\n\t\t\tbestFitness = cgp.Population[i].Fitness\n\t\t\tbestIndividual = i\n\t\t}\n\t}\n\n\tif bestFitness <= cgp.Population[0].Fitness {\n\t\tcgp.Population[0] = cgp.Population[bestIndividual]\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package pastis\n\nimport (\n\t\"testing\"\n)\n\nfunc Test_Pastis_URLMatch(t *testing.T) {\n\tok, params := Match(Regexp(\"\/hello\/:name\"), \"\/hello\/guregodevo\")\n\texpect(t, ok, true)\n\texpect(t, params[\"name\"], \"guregodevo\")\n\n\tok, params = Match(Regexp(\"\/hello\/:name\/:id\"), \"\/hello\/guregodevo\/1234\")\n\texpect(t, ok, true)\n\texpect(t, params[\"name\"], \"guregodevo\")\n\texpect(t, params[\"id\"], \"1234\")\n\n}\n\nfunc Test_Pastis_RegexpMatch(t *testing.T) {\n\tok, params := Match(Regexp(\"^\/comment\/(?P<id>\\\\d+)$\"), \"\/comment\/123\")\n\texpect(t, ok, true)\n\texpect(t, params[\"id\"], \"123\")\n}\n\nfunc Test_Pastis_ComplexRegexpMatch(t *testing.T) {\n\tok, params := Match(Regexp(\"^\/dashboards\/:dashboardid\/chart\/(?P<chartid>)$\"), \"\/dashboards\/1\/chart\/\")\n\texpect(t, ok, true)\n\t\/\/expect(t, params[\"chartid\"], \"2\")\n\texpect(t, params[\"dashboardid\"], \"1\")\n}\n<commit_msg>Added edge case for URL pattern matching test<commit_after>package pastis\n\nimport (\n\t\"testing\"\n)\n\nfunc Test_Pastis_URLMatch(t *testing.T) {\n\tok, params := Match(Regexp(\"\/hello\/:name\"), \"\/hello\/guregodevo\")\n\texpect(t, ok, true)\n\texpect(t, params[\"name\"], \"guregodevo\")\n\n\tok, params = Match(Regexp(\"\/hello\/:name\/:id\"), \"\/hello\/guregodevo\/1234\")\n\texpect(t, ok, true)\n\texpect(t, params[\"name\"], \"guregodevo\")\n\texpect(t, params[\"id\"], \"1234\")\n\n}\n\nfunc Test_Pastis_RegexpMatch(t *testing.T) {\n\tok, params := Match(Regexp(\"^\/comment\/(?P<id>\\\\d+)$\"), \"\/comment\/123\")\n\texpect(t, ok, true)\n\texpect(t, params[\"id\"], \"123\")\n}\n\nfunc Test_Pastis_ComplexRegexpMatch(t *testing.T) {\n\tregexp := \"^\/dashboards\/:dashboardid\/chart\/(?P<chartid>[0-9]*)$\"\n\tok, params := Match(Regexp(regexp), \"\/dashboards\/1\/chart\/2\")\n\texpect(t, ok, true)\n\texpect(t, params[\"chartid\"], \"2\")\n\texpect(t, params[\"dashboardid\"], \"1\")\n\n\tok, params = Match(Regexp(regexp), \"\/dashboards\/1\/chart\/\")\n\texpect(t, ok, true)\n\texpect(t, params[\"chartid\"], \"\")\n\texpect(t, params[\"dashboardid\"], \"1\")\n}\n\n<|endoftext|>"} {"text":"<commit_before>package cli\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"text\/template\"\n\n\t\"github.com\/armon\/go-radix\"\n)\n\n\/\/ CLI contains the state necessary to run subcommands and parse the\n\/\/ command line arguments.\n\/\/\n\/\/ CLI also supports nested subcommands, such as \"cli foo bar\". To use\n\/\/ nested subcommands, the key in the Commands mapping below contains the\n\/\/ full subcommand. In this example, it would be \"foo bar\".\n\/\/\n\/\/ If you use a CLI with nested subcommands, some semantics change due to\n\/\/ ambiguities:\n\/\/\n\/\/ * We use longest prefix matching to find a matching subcommand. This\n\/\/ means if you register \"foo bar\" and the user executes \"cli foo qux\",\n\/\/ the \"foo\" commmand will be executed with the arg \"qux\". It is up to\n\/\/ you to handle these args. One option is to just return the special\n\/\/ help return code `RunResultHelp` to display help and exit.\n\/\/\n\/\/ * The help flag \"-h\" or \"-help\" will look at all args to determine\n\/\/ the help function. For example: \"otto apps list -h\" will show the\n\/\/ help for \"apps list\" but \"otto apps -h\" will show it for \"apps\".\n\/\/ In the normal CLI, only the first subcommand is used.\n\/\/\n\/\/ * The help flag will list any subcommands that a command takes\n\/\/ as well as the command's help itself. If there are no subcommands,\n\/\/ it will note this. If the CLI itself has no subcommands, this entire\n\/\/ section is omitted.\n\/\/\n\/\/ * Any parent commands that don't exist are automatically created as\n\/\/ no-op commands that just show help for other subcommands. For example,\n\/\/ if you only register \"foo bar\", then \"foo\" is automatically created.\n\/\/\ntype CLI struct {\n\t\/\/ Args is the list of command-line arguments received excluding\n\t\/\/ the name of the app. For example, if the command \".\/cli foo bar\"\n\t\/\/ was invoked, then Args should be []string{\"foo\", \"bar\"}.\n\tArgs []string\n\n\t\/\/ Commands is a mapping of subcommand names to a factory function\n\t\/\/ for creating that Command implementation. If there is a command\n\t\/\/ with a blank string \"\", then it will be used as the default command\n\t\/\/ if no subcommand is specified.\n\t\/\/\n\t\/\/ If the key has a space in it, this will create a nested subcommand.\n\t\/\/ For example, if the key is \"foo bar\", then to access it our CLI\n\t\/\/ must be accessed with \".\/cli foo bar\". See the docs for CLI for\n\t\/\/ notes on how this changes some other behavior of the CLI as well.\n\tCommands map[string]CommandFactory\n\n\t\/\/ Name defines the name of the CLI.\n\tName string\n\n\t\/\/ Version of the CLI.\n\tVersion string\n\n\t\/\/ HelpFunc and HelpWriter are used to output help information, if\n\t\/\/ requested.\n\t\/\/\n\t\/\/ HelpFunc is the function called to generate the generic help\n\t\/\/ text that is shown if help must be shown for the CLI that doesn't\n\t\/\/ pertain to a specific command.\n\t\/\/\n\t\/\/ HelpWriter is the Writer where the help text is outputted to. If\n\t\/\/ not specified, it will default to Stderr.\n\tHelpFunc HelpFunc\n\tHelpWriter io.Writer\n\n\tonce sync.Once\n\tcommandTree *radix.Tree\n\tcommandNested bool\n\tisHelp bool\n\tsubcommand string\n\tsubcommandArgs []string\n\ttopFlags []string\n\n\tisVersion bool\n}\n\n\/\/ NewClI returns a new CLI instance with sensible defaults.\nfunc NewCLI(app, version string) *CLI {\n\treturn &CLI{\n\t\tName: app,\n\t\tVersion: version,\n\t\tHelpFunc: BasicHelpFunc(app),\n\t}\n\n}\n\n\/\/ IsHelp returns whether or not the help flag is present within the\n\/\/ arguments.\nfunc (c *CLI) IsHelp() bool {\n\tc.once.Do(c.init)\n\treturn c.isHelp\n}\n\n\/\/ IsVersion returns whether or not the version flag is present within the\n\/\/ arguments.\nfunc (c *CLI) IsVersion() bool {\n\tc.once.Do(c.init)\n\treturn c.isVersion\n}\n\n\/\/ Run runs the actual CLI based on the arguments given.\nfunc (c *CLI) Run() (int, error) {\n\tc.once.Do(c.init)\n\n\t\/\/ Just show the version and exit if instructed.\n\tif c.IsVersion() && c.Version != \"\" {\n\t\tc.HelpWriter.Write([]byte(c.Version + \"\\n\"))\n\t\treturn 1, nil\n\t}\n\n\t\/\/ If there is an invalid flag, then error\n\tif len(c.topFlags) > 0 {\n\t\tc.HelpWriter.Write([]byte(\n\t\t\t\"Invalid flags before the subcommand. If these flags are for\\n\" +\n\t\t\t\t\"the subcommand, please put them after the subcommand.\\n\\n\"))\n\t\tc.HelpWriter.Write([]byte(c.HelpFunc(c.Commands) + \"\\n\"))\n\t\treturn 1, nil\n\t}\n\n\t\/\/ Attempt to get the factory function for creating the command\n\t\/\/ implementation. If the command is invalid or blank, it is an error.\n\traw, ok := c.commandTree.Get(c.Subcommand())\n\tif !ok {\n\t\tc.HelpWriter.Write([]byte(c.HelpFunc(c.Commands) + \"\\n\"))\n\t\treturn 1, nil\n\t}\n\n\tcommand, err := raw.(CommandFactory)()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t\/\/ If we've been instructed to just print the help, then print it\n\tif c.IsHelp() {\n\t\tc.commandHelp(command)\n\t\treturn 1, nil\n\t}\n\n\tcode := command.Run(c.SubcommandArgs())\n\tif code == RunResultHelp {\n\t\t\/\/ Requesting help\n\t\tc.commandHelp(command)\n\t\treturn 1, nil\n\t}\n\n\treturn code, nil\n}\n\n\/\/ Subcommand returns the subcommand that the CLI would execute. For\n\/\/ example, a CLI from \"--version version --help\" would return a Subcommand\n\/\/ of \"version\"\nfunc (c *CLI) Subcommand() string {\n\tc.once.Do(c.init)\n\treturn c.subcommand\n}\n\n\/\/ SubcommandArgs returns the arguments that will be passed to the\n\/\/ subcommand.\nfunc (c *CLI) SubcommandArgs() []string {\n\tc.once.Do(c.init)\n\treturn c.subcommandArgs\n}\n\nfunc (c *CLI) init() {\n\tif c.HelpFunc == nil {\n\t\tc.HelpFunc = BasicHelpFunc(\"app\")\n\n\t\tif c.Name != \"\" {\n\t\t\tc.HelpFunc = BasicHelpFunc(c.Name)\n\t\t}\n\t}\n\n\tif c.HelpWriter == nil {\n\t\tc.HelpWriter = os.Stderr\n\t}\n\n\t\/\/ Build our command tree\n\tc.commandTree = radix.New()\n\tc.commandNested = false\n\tfor k, v := range c.Commands {\n\t\tk = strings.TrimSpace(k)\n\t\tc.commandTree.Insert(k, v)\n\t\tif strings.ContainsRune(k, ' ') {\n\t\t\tc.commandNested = true\n\t\t}\n\t}\n\n\t\/\/ Go through the key and fill in any missing parent commands\n\tif c.commandNested {\n\t\tvar walkFn radix.WalkFn\n\t\ttoInsert := make(map[string]struct{})\n\t\twalkFn = func(k string, raw interface{}) bool {\n\t\t\tidx := strings.LastIndex(k, \" \")\n\t\t\tif idx == -1 {\n\t\t\t\t\/\/ If there is no space, just ignore top level commands\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\t\/\/ Trim up to that space so we can get the expected parent\n\t\t\tk = k[:idx]\n\t\t\tif _, ok := c.commandTree.Get(k); ok {\n\t\t\t\t\/\/ Yay we have the parent!\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\t\/\/ We're missing the parent, so let's insert this\n\t\t\ttoInsert[k] = struct{}{}\n\n\t\t\t\/\/ Call the walk function recursively so we check this one too\n\t\t\treturn walkFn(k, nil)\n\t\t}\n\n\t\t\/\/ Walk!\n\t\tc.commandTree.Walk(walkFn)\n\n\t\t\/\/ Insert any that we're missing\n\t\tfor k, _ := range toInsert {\n\t\t\tvar f CommandFactory = func() (Command, error) {\n\t\t\t\treturn &MockCommand{\n\t\t\t\t\tHelpText: \"This command is accessed by using one of the subcommands below.\",\n\t\t\t\t\tRunResult: RunResultHelp,\n\t\t\t\t}, nil\n\t\t\t}\n\n\t\t\tc.commandTree.Insert(k, f)\n\t\t}\n\t}\n\n\t\/\/ Process the args\n\tc.processArgs()\n}\n\nfunc (c *CLI) commandHelp(command Command) {\n\t\/\/ Get the template to use\n\ttpl := strings.TrimSpace(defaultHelpTemplate)\n\tif t, ok := command.(CommandHelpTemplate); ok {\n\t\ttpl = t.HelpTemplate()\n\t}\n\tif !strings.HasSuffix(tpl, \"\\n\") {\n\t\ttpl += \"\\n\"\n\t}\n\n\t\/\/ Parse it\n\tt, err := template.New(\"root\").Parse(tpl)\n\tif err != nil {\n\t\tt = template.Must(template.New(\"root\").Parse(fmt.Sprintf(\n\t\t\t\"Internal error! Failed to parse command help template: %s\\n\", err)))\n\t}\n\n\t\/\/ Template data\n\tdata := map[string]interface{}{\n\t\t\"Name\": c.Name,\n\t\t\"Help\": command.Help(),\n\t}\n\n\t\/\/ Build subcommand list if we have it\n\tvar subcommands []map[string]interface{}\n\tif c.commandNested {\n\t\t\/\/ Get the matching keys\n\t\tvar keys []string\n\t\tprefix := c.Subcommand() + \" \"\n\t\tc.commandTree.WalkPrefix(prefix, func(k string, raw interface{}) bool {\n\t\t\tkeys = append(keys, k)\n\t\t\treturn false\n\t\t})\n\n\t\t\/\/ Sort the keys\n\t\tsort.Strings(keys)\n\n\t\t\/\/ Figure out the padding length\n\t\tvar longest int\n\t\tfor _, k := range keys {\n\t\t\tif v := len(k); v > longest {\n\t\t\t\tlongest = v\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Go through and create their structures\n\t\tsubcommands = make([]map[string]interface{}, len(keys))\n\t\tfor i, k := range keys {\n\t\t\traw, ok := c.commandTree.Get(k)\n\t\t\tif !ok {\n\t\t\t\t\/\/ We just checked that it should be here above. If it is\n\t\t\t\t\/\/ isn't, there are serious problems.\n\t\t\t\tpanic(\"value is missing\")\n\t\t\t}\n\n\t\t\t\/\/ Get the command\n\t\t\tsub, err := raw.(CommandFactory)()\n\t\t\tif err != nil {\n\t\t\t\tc.HelpWriter.Write([]byte(fmt.Sprintf(\n\t\t\t\t\t\"Error instantiating %q: %s\", k, err)))\n\t\t\t}\n\n\t\t\t\/\/ Determine some info\n\t\t\tname := strings.TrimPrefix(k, prefix)\n\n\t\t\tsubcommands[i] = map[string]interface{}{\n\t\t\t\t\"Name\": name,\n\t\t\t\t\"NameAligned\": name + strings.Repeat(\" \", longest-len(k)),\n\t\t\t\t\"Help\": sub.Help(),\n\t\t\t\t\"Synopsis\": sub.Synopsis(),\n\t\t\t}\n\t\t}\n\t}\n\tdata[\"Subcommands\"] = subcommands\n\n\t\/\/ Write\n\terr = t.Execute(c.HelpWriter, data)\n\tif err == nil {\n\t\treturn\n\t}\n\n\t\/\/ An error, just output...\n\tc.HelpWriter.Write([]byte(fmt.Sprintf(\n\t\t\"Internal error rendering help: %s\", err)))\n}\n\nfunc (c *CLI) processArgs() {\n\tfor i, arg := range c.Args {\n\t\tif c.subcommand == \"\" {\n\t\t\t\/\/ Check for version and help flags if not in a subcommand\n\t\t\tif arg == \"-v\" || arg == \"-version\" || arg == \"--version\" {\n\t\t\t\tc.isVersion = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif arg == \"-h\" || arg == \"-help\" || arg == \"--help\" {\n\t\t\t\tc.isHelp = true\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif arg != \"\" && arg[0] == '-' {\n\t\t\t\t\/\/ Record the arg...\n\t\t\t\tc.topFlags = append(c.topFlags, arg)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ If we didn't find a subcommand yet and this is the first non-flag\n\t\t\/\/ argument, then this is our subcommand. j\n\t\tif c.subcommand == \"\" && arg != \"\" && arg[0] != '-' {\n\t\t\tc.subcommand = arg\n\t\t\tif c.commandNested {\n\t\t\t\t\/\/ Nested CLI, the subcommand is actually the entire\n\t\t\t\t\/\/ arg list up to a flag that is still a valid subcommand.\n\t\t\t\tk, _, ok := c.commandTree.LongestPrefix(strings.Join(c.Args[i:], \" \"))\n\t\t\t\tif ok {\n\t\t\t\t\tc.subcommand = k\n\t\t\t\t\ti += strings.Count(k, \" \")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ The remaining args the subcommand arguments\n\t\t\tc.subcommandArgs = c.Args[i+1:]\n\t\t}\n\t}\n\n\t\/\/ If we never found a subcommand and support a default command, then\n\t\/\/ switch to using that.\n\tif c.subcommand == \"\" {\n\t\tif _, ok := c.Commands[\"\"]; ok {\n\t\t\targs := c.topFlags\n\t\t\targs = append(args, c.subcommandArgs...)\n\t\t\tc.topFlags = nil\n\t\t\tc.subcommandArgs = args\n\t\t}\n\t}\n}\n\nconst defaultHelpTemplate = `\n{{.Help}}{{if gt (len .Subcommands) 0}}\n\nSubcommands:\n{{ range $value := .Subcommands }}\n {{ $value.NameAligned }} {{ $value.Synopsis }}{{ end }}\n{{ end }}\n`\n<commit_msg>Some options returns exit status 0.<commit_after>package cli\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"text\/template\"\n\n\t\"github.com\/armon\/go-radix\"\n)\n\n\/\/ CLI contains the state necessary to run subcommands and parse the\n\/\/ command line arguments.\n\/\/\n\/\/ CLI also supports nested subcommands, such as \"cli foo bar\". To use\n\/\/ nested subcommands, the key in the Commands mapping below contains the\n\/\/ full subcommand. In this example, it would be \"foo bar\".\n\/\/\n\/\/ If you use a CLI with nested subcommands, some semantics change due to\n\/\/ ambiguities:\n\/\/\n\/\/ * We use longest prefix matching to find a matching subcommand. This\n\/\/ means if you register \"foo bar\" and the user executes \"cli foo qux\",\n\/\/ the \"foo\" commmand will be executed with the arg \"qux\". It is up to\n\/\/ you to handle these args. One option is to just return the special\n\/\/ help return code `RunResultHelp` to display help and exit.\n\/\/\n\/\/ * The help flag \"-h\" or \"-help\" will look at all args to determine\n\/\/ the help function. For example: \"otto apps list -h\" will show the\n\/\/ help for \"apps list\" but \"otto apps -h\" will show it for \"apps\".\n\/\/ In the normal CLI, only the first subcommand is used.\n\/\/\n\/\/ * The help flag will list any subcommands that a command takes\n\/\/ as well as the command's help itself. If there are no subcommands,\n\/\/ it will note this. If the CLI itself has no subcommands, this entire\n\/\/ section is omitted.\n\/\/\n\/\/ * Any parent commands that don't exist are automatically created as\n\/\/ no-op commands that just show help for other subcommands. For example,\n\/\/ if you only register \"foo bar\", then \"foo\" is automatically created.\n\/\/\ntype CLI struct {\n\t\/\/ Args is the list of command-line arguments received excluding\n\t\/\/ the name of the app. For example, if the command \".\/cli foo bar\"\n\t\/\/ was invoked, then Args should be []string{\"foo\", \"bar\"}.\n\tArgs []string\n\n\t\/\/ Commands is a mapping of subcommand names to a factory function\n\t\/\/ for creating that Command implementation. If there is a command\n\t\/\/ with a blank string \"\", then it will be used as the default command\n\t\/\/ if no subcommand is specified.\n\t\/\/\n\t\/\/ If the key has a space in it, this will create a nested subcommand.\n\t\/\/ For example, if the key is \"foo bar\", then to access it our CLI\n\t\/\/ must be accessed with \".\/cli foo bar\". See the docs for CLI for\n\t\/\/ notes on how this changes some other behavior of the CLI as well.\n\tCommands map[string]CommandFactory\n\n\t\/\/ Name defines the name of the CLI.\n\tName string\n\n\t\/\/ Version of the CLI.\n\tVersion string\n\n\t\/\/ HelpFunc and HelpWriter are used to output help information, if\n\t\/\/ requested.\n\t\/\/\n\t\/\/ HelpFunc is the function called to generate the generic help\n\t\/\/ text that is shown if help must be shown for the CLI that doesn't\n\t\/\/ pertain to a specific command.\n\t\/\/\n\t\/\/ HelpWriter is the Writer where the help text is outputted to. If\n\t\/\/ not specified, it will default to Stderr.\n\tHelpFunc HelpFunc\n\tHelpWriter io.Writer\n\n\tonce sync.Once\n\tcommandTree *radix.Tree\n\tcommandNested bool\n\tisHelp bool\n\tsubcommand string\n\tsubcommandArgs []string\n\ttopFlags []string\n\n\tisVersion bool\n}\n\n\/\/ NewClI returns a new CLI instance with sensible defaults.\nfunc NewCLI(app, version string) *CLI {\n\treturn &CLI{\n\t\tName: app,\n\t\tVersion: version,\n\t\tHelpFunc: BasicHelpFunc(app),\n\t}\n\n}\n\n\/\/ IsHelp returns whether or not the help flag is present within the\n\/\/ arguments.\nfunc (c *CLI) IsHelp() bool {\n\tc.once.Do(c.init)\n\treturn c.isHelp\n}\n\n\/\/ IsVersion returns whether or not the version flag is present within the\n\/\/ arguments.\nfunc (c *CLI) IsVersion() bool {\n\tc.once.Do(c.init)\n\treturn c.isVersion\n}\n\n\/\/ Run runs the actual CLI based on the arguments given.\nfunc (c *CLI) Run() (int, error) {\n\tc.once.Do(c.init)\n\n\t\/\/ Just show the version and exit if instructed.\n\tif c.IsVersion() && c.Version != \"\" {\n\t\tc.HelpWriter.Write([]byte(c.Version + \"\\n\"))\n\t\treturn 0, nil\n\t}\n\n\t\/\/ If there is an invalid flag, then error\n\tif len(c.topFlags) > 0 {\n\t\tc.HelpWriter.Write([]byte(\n\t\t\t\"Invalid flags before the subcommand. If these flags are for\\n\" +\n\t\t\t\t\"the subcommand, please put them after the subcommand.\\n\\n\"))\n\t\tc.HelpWriter.Write([]byte(c.HelpFunc(c.Commands) + \"\\n\"))\n\t\treturn 1, nil\n\t}\n\n\t\/\/ Attempt to get the factory function for creating the command\n\t\/\/ implementation. If the command is invalid or blank, it is an error.\n\traw, ok := c.commandTree.Get(c.Subcommand())\n\tif !ok {\n\t\tc.HelpWriter.Write([]byte(c.HelpFunc(c.Commands) + \"\\n\"))\n\t\treturn 1, nil\n\t}\n\n\tcommand, err := raw.(CommandFactory)()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t\/\/ If we've been instructed to just print the help, then print it\n\tif c.IsHelp() {\n\t\tc.commandHelp(command)\n\t\treturn 0, nil\n\t}\n\n\tcode := command.Run(c.SubcommandArgs())\n\tif code == RunResultHelp {\n\t\t\/\/ Requesting help\n\t\tc.commandHelp(command)\n\t\treturn 1, nil\n\t}\n\n\treturn code, nil\n}\n\n\/\/ Subcommand returns the subcommand that the CLI would execute. For\n\/\/ example, a CLI from \"--version version --help\" would return a Subcommand\n\/\/ of \"version\"\nfunc (c *CLI) Subcommand() string {\n\tc.once.Do(c.init)\n\treturn c.subcommand\n}\n\n\/\/ SubcommandArgs returns the arguments that will be passed to the\n\/\/ subcommand.\nfunc (c *CLI) SubcommandArgs() []string {\n\tc.once.Do(c.init)\n\treturn c.subcommandArgs\n}\n\nfunc (c *CLI) init() {\n\tif c.HelpFunc == nil {\n\t\tc.HelpFunc = BasicHelpFunc(\"app\")\n\n\t\tif c.Name != \"\" {\n\t\t\tc.HelpFunc = BasicHelpFunc(c.Name)\n\t\t}\n\t}\n\n\tif c.HelpWriter == nil {\n\t\tc.HelpWriter = os.Stderr\n\t}\n\n\t\/\/ Build our command tree\n\tc.commandTree = radix.New()\n\tc.commandNested = false\n\tfor k, v := range c.Commands {\n\t\tk = strings.TrimSpace(k)\n\t\tc.commandTree.Insert(k, v)\n\t\tif strings.ContainsRune(k, ' ') {\n\t\t\tc.commandNested = true\n\t\t}\n\t}\n\n\t\/\/ Go through the key and fill in any missing parent commands\n\tif c.commandNested {\n\t\tvar walkFn radix.WalkFn\n\t\ttoInsert := make(map[string]struct{})\n\t\twalkFn = func(k string, raw interface{}) bool {\n\t\t\tidx := strings.LastIndex(k, \" \")\n\t\t\tif idx == -1 {\n\t\t\t\t\/\/ If there is no space, just ignore top level commands\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\t\/\/ Trim up to that space so we can get the expected parent\n\t\t\tk = k[:idx]\n\t\t\tif _, ok := c.commandTree.Get(k); ok {\n\t\t\t\t\/\/ Yay we have the parent!\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\t\/\/ We're missing the parent, so let's insert this\n\t\t\ttoInsert[k] = struct{}{}\n\n\t\t\t\/\/ Call the walk function recursively so we check this one too\n\t\t\treturn walkFn(k, nil)\n\t\t}\n\n\t\t\/\/ Walk!\n\t\tc.commandTree.Walk(walkFn)\n\n\t\t\/\/ Insert any that we're missing\n\t\tfor k, _ := range toInsert {\n\t\t\tvar f CommandFactory = func() (Command, error) {\n\t\t\t\treturn &MockCommand{\n\t\t\t\t\tHelpText: \"This command is accessed by using one of the subcommands below.\",\n\t\t\t\t\tRunResult: RunResultHelp,\n\t\t\t\t}, nil\n\t\t\t}\n\n\t\t\tc.commandTree.Insert(k, f)\n\t\t}\n\t}\n\n\t\/\/ Process the args\n\tc.processArgs()\n}\n\nfunc (c *CLI) commandHelp(command Command) {\n\t\/\/ Get the template to use\n\ttpl := strings.TrimSpace(defaultHelpTemplate)\n\tif t, ok := command.(CommandHelpTemplate); ok {\n\t\ttpl = t.HelpTemplate()\n\t}\n\tif !strings.HasSuffix(tpl, \"\\n\") {\n\t\ttpl += \"\\n\"\n\t}\n\n\t\/\/ Parse it\n\tt, err := template.New(\"root\").Parse(tpl)\n\tif err != nil {\n\t\tt = template.Must(template.New(\"root\").Parse(fmt.Sprintf(\n\t\t\t\"Internal error! Failed to parse command help template: %s\\n\", err)))\n\t}\n\n\t\/\/ Template data\n\tdata := map[string]interface{}{\n\t\t\"Name\": c.Name,\n\t\t\"Help\": command.Help(),\n\t}\n\n\t\/\/ Build subcommand list if we have it\n\tvar subcommands []map[string]interface{}\n\tif c.commandNested {\n\t\t\/\/ Get the matching keys\n\t\tvar keys []string\n\t\tprefix := c.Subcommand() + \" \"\n\t\tc.commandTree.WalkPrefix(prefix, func(k string, raw interface{}) bool {\n\t\t\tkeys = append(keys, k)\n\t\t\treturn false\n\t\t})\n\n\t\t\/\/ Sort the keys\n\t\tsort.Strings(keys)\n\n\t\t\/\/ Figure out the padding length\n\t\tvar longest int\n\t\tfor _, k := range keys {\n\t\t\tif v := len(k); v > longest {\n\t\t\t\tlongest = v\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Go through and create their structures\n\t\tsubcommands = make([]map[string]interface{}, len(keys))\n\t\tfor i, k := range keys {\n\t\t\traw, ok := c.commandTree.Get(k)\n\t\t\tif !ok {\n\t\t\t\t\/\/ We just checked that it should be here above. If it is\n\t\t\t\t\/\/ isn't, there are serious problems.\n\t\t\t\tpanic(\"value is missing\")\n\t\t\t}\n\n\t\t\t\/\/ Get the command\n\t\t\tsub, err := raw.(CommandFactory)()\n\t\t\tif err != nil {\n\t\t\t\tc.HelpWriter.Write([]byte(fmt.Sprintf(\n\t\t\t\t\t\"Error instantiating %q: %s\", k, err)))\n\t\t\t}\n\n\t\t\t\/\/ Determine some info\n\t\t\tname := strings.TrimPrefix(k, prefix)\n\n\t\t\tsubcommands[i] = map[string]interface{}{\n\t\t\t\t\"Name\": name,\n\t\t\t\t\"NameAligned\": name + strings.Repeat(\" \", longest-len(k)),\n\t\t\t\t\"Help\": sub.Help(),\n\t\t\t\t\"Synopsis\": sub.Synopsis(),\n\t\t\t}\n\t\t}\n\t}\n\tdata[\"Subcommands\"] = subcommands\n\n\t\/\/ Write\n\terr = t.Execute(c.HelpWriter, data)\n\tif err == nil {\n\t\treturn\n\t}\n\n\t\/\/ An error, just output...\n\tc.HelpWriter.Write([]byte(fmt.Sprintf(\n\t\t\"Internal error rendering help: %s\", err)))\n}\n\nfunc (c *CLI) processArgs() {\n\tfor i, arg := range c.Args {\n\t\tif c.subcommand == \"\" {\n\t\t\t\/\/ Check for version and help flags if not in a subcommand\n\t\t\tif arg == \"-v\" || arg == \"-version\" || arg == \"--version\" {\n\t\t\t\tc.isVersion = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif arg == \"-h\" || arg == \"-help\" || arg == \"--help\" {\n\t\t\t\tc.isHelp = true\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif arg != \"\" && arg[0] == '-' {\n\t\t\t\t\/\/ Record the arg...\n\t\t\t\tc.topFlags = append(c.topFlags, arg)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ If we didn't find a subcommand yet and this is the first non-flag\n\t\t\/\/ argument, then this is our subcommand. j\n\t\tif c.subcommand == \"\" && arg != \"\" && arg[0] != '-' {\n\t\t\tc.subcommand = arg\n\t\t\tif c.commandNested {\n\t\t\t\t\/\/ Nested CLI, the subcommand is actually the entire\n\t\t\t\t\/\/ arg list up to a flag that is still a valid subcommand.\n\t\t\t\tk, _, ok := c.commandTree.LongestPrefix(strings.Join(c.Args[i:], \" \"))\n\t\t\t\tif ok {\n\t\t\t\t\tc.subcommand = k\n\t\t\t\t\ti += strings.Count(k, \" \")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ The remaining args the subcommand arguments\n\t\t\tc.subcommandArgs = c.Args[i+1:]\n\t\t}\n\t}\n\n\t\/\/ If we never found a subcommand and support a default command, then\n\t\/\/ switch to using that.\n\tif c.subcommand == \"\" {\n\t\tif _, ok := c.Commands[\"\"]; ok {\n\t\t\targs := c.topFlags\n\t\t\targs = append(args, c.subcommandArgs...)\n\t\t\tc.topFlags = nil\n\t\t\tc.subcommandArgs = args\n\t\t}\n\t}\n}\n\nconst defaultHelpTemplate = `\n{{.Help}}{{if gt (len .Subcommands) 0}}\n\nSubcommands:\n{{ range $value := .Subcommands }}\n {{ $value.NameAligned }} {{ $value.Synopsis }}{{ end }}\n{{ end }}\n`\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright GoFrame Author(https:\/\/goframe.org). All Rights Reserved.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the MIT License.\n\/\/ If a copy of the MIT was not distributed with gm file,\n\/\/ You can obtain one at https:\/\/github.com\/gogf\/gf.\n\npackage gset_test\n\nimport (\n\t\"fmt\"\n\t\"github.com\/gogf\/gf\/v2\/container\/gset\"\n)\n\nfunc ExampleIntSet_Contains() {\n\tvar set gset.IntSet\n\tset.Add(1)\n\tfmt.Println(set.Contains(1))\n\tfmt.Println(set.Contains(2))\n\n\t\/\/ Output:\n\t\/\/ true\n\t\/\/ false\n}\n<commit_msg>IntSet finish<commit_after>\/\/ Copyright GoFrame Author(https:\/\/goframe.org). All Rights Reserved.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the MIT License.\n\/\/ If a copy of the MIT was not distributed with gm file,\n\/\/ You can obtain one at https:\/\/github.com\/gogf\/gf.\n\npackage gset_test\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/gogf\/gf\/v2\/container\/gset\"\n\t\"github.com\/gogf\/gf\/v2\/frame\/g\"\n)\n\n\/\/ New create and returns a new set, which contains un-repeated items.\n\/\/ The parameter `safe` is used to specify whether using set in concurrent-safety,\n\/\/ which is false in default.\nfunc ExampleNewIntSet() {\n\tintSet := gset.NewIntSet(true)\n\tintSet.Add([]int{1, 2, 3}...)\n\tfmt.Println(intSet.Slice())\n\n\t\/\/ May Output:\n\t\/\/ [2 1 3]\n}\n\n\/\/ NewIntSetFrom returns a new set from `items`.\nfunc ExampleNewFrom() {\n\tintSet := gset.NewIntSetFrom([]int{1, 2, 3}, true)\n\tfmt.Println(intSet.Slice())\n\n\t\/\/ May Output:\n\t\/\/ [2 1 3]\n}\n\n\/\/ Add adds one or multiple items to the set.\nfunc ExampleIntSet_Add() {\n\tintSet := gset.NewIntSetFrom([]int{1, 2, 3}, true)\n\tintSet.Add(1)\n\tfmt.Println(intSet.Slice())\n\tfmt.Println(intSet.AddIfNotExist(1))\n\n\t\/\/ Mya Output:\n\t\/\/ [1 2 3]\n\t\/\/ false\n}\n\n\/\/ AddIfNotExist checks whether item exists in the set,\n\/\/ it adds the item to set and returns true if it does not exists in the set,\n\/\/ or else it does nothing and returns false.\nfunc ExampleIntSet_AddIfNotExist() {\n\tintSet := gset.NewIntSetFrom([]int{1, 2, 3}, true)\n\tintSet.Add(1)\n\tfmt.Println(intSet.Slice())\n\tfmt.Println(intSet.AddIfNotExist(1))\n\n\t\/\/ Mya Output:\n\t\/\/ [1 2 3]\n\t\/\/ false\n}\n\n\/\/ AddIfNotExistFunc checks whether item exists in the set,\n\/\/ it adds the item to set and returns true if it does not exists in the set and function `f` returns true,\n\/\/ or else it does nothing and returns false.\n\/\/ Note that, the function `f` is executed without writing lock.\nfunc ExampleIntSet_AddIfNotExistFunc() {\n\tintSet := gset.NewIntSetFrom([]int{1, 2, 3}, true)\n\tintSet.Add(1)\n\tfmt.Println(intSet.Slice())\n\tfmt.Println(intSet.AddIfNotExistFunc(5, func() bool {\n\t\treturn true\n\t}))\n\n\t\/\/ May Output:\n\t\/\/ [1 2 3]\n\t\/\/ true\n}\n\n\/\/ AddIfNotExistFunc checks whether item exists in the set,\n\/\/ it adds the item to set and returns true if it does not exists in the set and function `f` returns true,\n\/\/ or else it does nothing and returns false.\n\/\/ Note that, the function `f` is executed without writing lock.\nfunc ExampleIntSet_AddIfNotExistFuncLock() {\n\tintSet := gset.NewIntSetFrom([]int{1, 2, 3}, true)\n\tintSet.Add(1)\n\tfmt.Println(intSet.Slice())\n\tfmt.Println(intSet.AddIfNotExistFuncLock(4, func() bool {\n\t\treturn true\n\t}))\n\n\t\/\/ May Output:\n\t\/\/ [1 2 3]\n\t\/\/ true\n}\n\n\/\/ Clear deletes all items of the set.\nfunc ExampleIntSet_Clear() {\n\tintSet := gset.NewIntSetFrom([]int{1, 2, 3}, true)\n\tfmt.Println(intSet.Size())\n\tintSet.Clear()\n\tfmt.Println(intSet.Size())\n\n\t\/\/ Output:\n\t\/\/ 3\n\t\/\/ 0\n}\n\n\/\/ Complement returns a new set which is the complement from `set` to `full`.\n\/\/ Which means, all the items in `newSet` are in `full` and not in `set`.\n\/\/ It returns the difference between `full` and `set` if the given set `full` is not the full set of `set`.\nfunc ExampleIntSet_Complement() {\n\tintSet := gset.NewIntSetFrom([]int{1, 2, 3, 4, 5}, true)\n\ts := gset.NewIntSetFrom([]int{1, 2, 3}, true)\n\tfmt.Println(s.Complement(intSet).Slice())\n\n\t\/\/ May Output:\n\t\/\/ [4 5]\n}\n\n\/\/ Contains checks whether the set contains `item`.\nfunc ExampleIntSet_Contains() {\n\tvar set1 gset.IntSet\n\tset1.Add(1, 4, 5, 6, 7)\n\tfmt.Println(set1.Contains(1))\n\n\tvar set2 gset.IntSet\n\tset2.Add(1, 4, 5, 6, 7)\n\tfmt.Println(set2.Contains(8))\n\n\t\/\/ Output:\n\t\/\/ true\n\t\/\/ false\n}\n\n\/\/ Diff returns a new set which is the difference set from `set` to `other`.\n\/\/ Which means, all the items in `newSet` are in `set` but not in `other`.\nfunc ExampleIntSet_Diff() {\n\ts1 := gset.NewIntSetFrom([]int{1, 2, 3}, true)\n\ts2 := gset.NewIntSetFrom([]int{1, 2, 3, 4}, true)\n\tfmt.Println(s2.Diff(s1).Slice())\n\n\t\/\/ Output:\n\t\/\/ [4]\n}\n\n\/\/ Equal checks whether the two sets equal.\nfunc ExampleIntSet_Equal() {\n\ts1 := gset.NewIntSetFrom([]int{1, 2, 3}, true)\n\ts2 := gset.NewIntSetFrom([]int{1, 2, 3, 4}, true)\n\tfmt.Println(s2.Equal(s1))\n\n\ts3 := gset.NewIntSetFrom([]int{1, 2, 3}, true)\n\ts4 := gset.NewIntSetFrom([]int{1, 2, 3}, true)\n\tfmt.Println(s3.Equal(s4))\n\n\t\/\/ Output:\n\t\/\/ false\n\t\/\/ true\n}\n\n\/\/ Intersect returns a new set which is the intersection from `set` to `other`.\n\/\/ Which means, all the items in `newSet` are in `set` and also in `other`.\nfunc ExampleIntSet_Intersect() {\n\ts1 := gset.NewIntSet(true)\n\ts1.Add([]int{1, 2, 3}...)\n\tvar s2 gset.IntSet\n\ts2.Add([]int{1, 2, 3, 4}...)\n\tfmt.Println(s2.Intersect(s1).Slice())\n\n\t\/\/ May Output:\n\t\/\/ [1 2 3]\n}\n\n\/\/ IsSubsetOf checks whether the current set is a sub-set of `other`\nfunc ExampleIntSet_IsSubsetOf() {\n\ts1 := gset.NewIntSet(true)\n\ts1.Add([]int{1, 2, 3, 4}...)\n\tvar s2 gset.IntSet\n\ts2.Add([]int{1, 2, 4}...)\n\tfmt.Println(s2.IsSubsetOf(s1))\n\n\t\/\/ Output:\n\t\/\/ true\n}\n\n\/\/ Iterator iterates the set readonly with given callback function `f`,\n\/\/ if `f` returns true then continue iterating; or false to stop.\nfunc ExampleIntSet_Iterator() {\n\ts1 := gset.NewIntSet(true)\n\ts1.Add([]int{1, 2, 3, 4}...)\n\ts1.Iterator(func(v int) bool {\n\t\tfmt.Println(\"Iterator\", v)\n\t\treturn true\n\t})\n\t\/\/ May Output:\n\t\/\/ Iterator 2\n\t\/\/ Iterator 3\n\t\/\/ Iterator 1\n\t\/\/ Iterator 4\n}\n\n\/\/ Join joins items with a string `glue`.\nfunc ExampleIntSet_Join() {\n\ts1 := gset.NewIntSet(true)\n\ts1.Add([]int{1, 2, 3, 4}...)\n\tfmt.Println(s1.Join(\",\"))\n\n\t\/\/ May Output:\n\t\/\/ 3,4,1,2\n}\n\n\/\/ LockFunc locks writing with callback function `f`.\nfunc ExampleIntSet_LockFunc() {\n\ts1 := gset.NewIntSet(true)\n\ts1.Add([]int{1, 2}...)\n\ts1.LockFunc(func(m map[int]struct{}) {\n\t\tm[3] = struct{}{}\n\t})\n\tfmt.Println(s1.Slice())\n\n\t\/\/ May Output\n\t\/\/ [2 3 1]\n}\n\n\/\/ MarshalJSON implements the interface MarshalJSON for json.Marshal.\nfunc ExampleIntSet_MarshalJSON() {\n\ttype Student struct {\n\t\tId int\n\t\tName string\n\t\tScores *gset.IntSet\n\t}\n\ts := Student{\n\t\tId: 1,\n\t\tName: \"john\",\n\t\tScores: gset.NewIntSetFrom([]int{100, 99, 98}, true),\n\t}\n\tb, _ := json.Marshal(s)\n\tfmt.Println(string(b))\n\n\t\/\/ May Output:\n\t\/\/ {\"Id\":1,\"Name\":\"john\",\"Scores\":[100,99,98]}\n}\n\n\/\/ Merge adds items from `others` sets into `set`.\nfunc ExampleIntSet_Merge() {\n\ts1 := gset.NewIntSet(true)\n\ts1.Add([]int{1, 2, 3, 4}...)\n\n\ts2 := gset.NewIntSet(true)\n\tfmt.Println(s1.Merge(s2).Slice())\n\n\t\/\/ May Output:\n\t\/\/ [1 2 3 4]\n}\n\n\/\/ Pops randomly pops an item from set.\nfunc ExampleIntSet_Pop() {\n\ts1 := gset.NewIntSet(true)\n\ts1.Add([]int{1, 2, 3, 4}...)\n\n\tfmt.Println(s1.Pop())\n\n\t\/\/ May Output:\n\t\/\/ 1\n}\n\n\/\/ Pops randomly pops `size` items from set.\n\/\/ It returns all items if size == -1.\nfunc ExampleIntSet_Pops() {\n\ts1 := gset.NewIntSet(true)\n\ts1.Add([]int{1, 2, 3, 4}...)\n\tfor _, v := range s1.Pops(2) {\n\t\tfmt.Println(v)\n\t}\n\n\t\/\/ May Output:\n\t\/\/ 1\n\t\/\/ 2\n}\n\n\/\/ RLockFunc locks reading with callback function `f`.\nfunc ExampleIntSet_RLockFunc() {\n\ts1 := gset.NewIntSet(true)\n\ts1.Add([]int{1, 2, 3, 4}...)\n\ts1.RLockFunc(func(m map[int]struct{}) {\n\t\tfmt.Println(m)\n\t})\n\n\t\/\/ Output:\n\t\/\/ map[1:{} 2:{} 3:{} 4:{}]\n}\n\n\/\/ Remove deletes `item` from set.\nfunc ExampleIntSet_Remove() {\n\ts1 := gset.NewIntSet(true)\n\ts1.Add([]int{1, 2, 3, 4}...)\n\ts1.Remove(1)\n\tfmt.Println(s1.Slice())\n\n\t\/\/ Output:\n\t\/\/ [3 4 2]\n}\n\n\/\/ Size returns the size of the set.\nfunc ExampleIntSet_Size() {\n\ts1 := gset.NewIntSet(true)\n\ts1.Add([]int{1, 2, 3, 4}...)\n\tfmt.Println(s1.Size())\n\n\t\/\/ Output:\n\t\/\/ 4\n}\n\n\/\/ Slice returns the a of items of the set as slice.\nfunc ExampleIntSet_Slice() {\n\ts1 := gset.NewIntSet(true)\n\ts1.Add([]int{1, 2, 3, 4}...)\n\tfmt.Println(s1.Slice())\n\n\t\/\/ May Output:\n\t\/\/ [1, 2, 3, 4]\n}\n\n\/\/ String returns items as a string, which implements like json.Marshal does.\nfunc ExampleIntSet_String() {\n\ts1 := gset.NewIntSet(true)\n\ts1.Add([]int{1, 2, 3, 4}...)\n\tfmt.Println(s1.String())\n\n\t\/\/ May Output:\n\t\/\/ [1,2,3,4]\n}\n\n\/\/ Sum sums items. Note: The items should be converted to int type,\n\/\/ or you'd get a result that you unexpected.\nfunc ExampleIntSet_Sum() {\n\ts1 := gset.NewIntSet(true)\n\ts1.Add([]int{1, 2, 3, 4}...)\n\tfmt.Println(s1.Sum())\n\n\t\/\/ Output:\n\t\/\/ 10\n}\n\n\/\/ Union returns a new set which is the union of `set` and `other`.\n\/\/ Which means, all the items in `newSet` are in `set` or in `other`.\nfunc ExampleIntSet_Union() {\n\ts1 := gset.NewIntSet(true)\n\ts1.Add([]int{1, 2, 3, 4}...)\n\ts2 := gset.NewIntSet(true)\n\ts2.Add([]int{1, 2, 4}...)\n\tfmt.Println(s1.Union(s2).Slice())\n\n\t\/\/ May Output:\n\t\/\/ [3 4 1 2]\n}\n\n\/\/ UnmarshalJSON implements the interface UnmarshalJSON for json.Unmarshal.\nfunc ExampleIntSet_UnmarshalJSON() {\n\tb := []byte(`{\"Id\":1,\"Name\":\"john\",\"Scores\":[100,99,98]}`)\n\ttype Student struct {\n\t\tId int\n\t\tName string\n\t\tScores *gset.IntSet\n\t}\n\ts := Student{}\n\tjson.Unmarshal(b, &s)\n\tfmt.Println(s)\n\n\t\/\/ May Output:\n\t\/\/ {1 john [100,99,98]}\n}\n\n\/\/ UnmarshalValue is an interface implement which sets any type of value for set.\nfunc ExampleIntSet_UnmarshalValue() {\n\tb := []byte(`{\"Id\":1,\"Name\":\"john\",\"Scores\":100,99,98}`)\n\ttype Student struct {\n\t\tId int\n\t\tName string\n\t\tScores *gset.IntSet\n\t}\n\ts := Student{}\n\tjson.Unmarshal(b, &s)\n\tfmt.Println(s)\n\n\t\/\/ May Output:\n\t\/\/ {1 john [100,99,98]}\n}\n\n\/\/ Walk applies a user supplied function `f` to every item of set.\nfunc ExampleIntSet_Walk() {\n\tvar (\n\t\tset gset.IntSet\n\t\tnames = g.SliceInt{1, 0}\n\t\tprefix = 10\n\t)\n\tset.Add(names...)\n\t\/\/ Add prefix for given table names.\n\tset.Walk(func(item int) int {\n\t\treturn prefix + item\n\t})\n\tfmt.Println(set.Slice())\n\n\t\/\/ May Output:\n\t\/\/ [12 60]\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/dorsha\/go2oo\/actions\"\n\t\"github.com\/dorsha\/go2oo\/restUtil\"\n)\n\n\/\/ consts for all cli commands\nconst (\n\tgetConfigItems = \"get-config-items\"\n\tgetContentPacks = \"show-content-packs\"\n\ttrigger = \"trigger\"\n)\n\nconst (\n\trestURI = \"\/rest\/latest\/\"\n)\n\nvar (\n\turl = flag.String(\"url\", \"http:\/\/localhost:8080\/oo\", \"The URL of Central (i.e. http:\/\/localhost:8080\/oo)\")\n\tuser = flag.String(\"user\", \"\", \"User name for Central\")\n\tpassword = flag.String(\"password\", \"\", \"Password for Central\")\n\taction = flag.String(\"action\", \"\", \"What do you want to do? Avialble actions: \"+getConfigItems+\",\"+getContentPacks+\",\"+trigger)\n\tuuid = flag.String(\"uuid\", \"\", \"Flow uuid\")\n)\n\nfunc main() {\n\tflag.Parse()\n\n\tif len(*action) == 0 {\n\t\tfmt.Println(\"Action must be specified\")\n\t}\n\n\tclient := restUtil.CreateHTTPClient()\n\trestURL := *url + restURI\n\n\tswitch {\n\tcase *action == getConfigItems:\n\t\tfmt.Println(\"Getting configuration items from: \" + *url)\n\t\tci := &actions.ConfigItem{}\n\t\trestUtil.Get(*client, restURL+\"\/config\", &ci.Props, *user, *password)\n\t\tci.HandleResponse()\n\tcase *action == getContentPacks:\n\t\tfmt.Println(\"Getting content packs items from: \" + *url)\n\t\tcps := &actions.ContentPacks{}\n\t\trestUtil.Get(*client, restURL+\"\/content-packs\", cps, *user, *password)\n\t\tcps.HandleResponse()\n\tcase *action == trigger:\n\t\tif len(*uuid) == 0 {\n\t\t\tfmt.Println(\"UUID must be specidied (--uuid)\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfmt.Println(\"Triggering flow: \" + *uuid)\n\t\ttrigger := &actions.Trigger{FlowUUID: *uuid}\n\t\ttriggerResp := new(actions.TriggerResponse)\n\t\trestUtil.Post(*client, restURL+\"executions\", trigger, triggerResp, *user, *password)\n\t\ttriggerResp.HandleResponse()\n\t}\n}\n<commit_msg>Fixed typo<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/dorsha\/go2oo\/actions\"\n\t\"github.com\/dorsha\/go2oo\/restUtil\"\n)\n\n\/\/ consts for all cli commands\nconst (\n\tgetConfigItems = \"get-config-items\"\n\tgetContentPacks = \"show-content-packs\"\n\ttrigger = \"trigger\"\n)\n\nconst (\n\trestURI = \"\/rest\/latest\/\"\n)\n\nvar (\n\turl = flag.String(\"url\", \"http:\/\/localhost:8080\/oo\", \"The URL of Central (i.e. http:\/\/localhost:8080\/oo)\")\n\tuser = flag.String(\"user\", \"\", \"User name for Central\")\n\tpassword = flag.String(\"password\", \"\", \"Password for Central\")\n\taction = flag.String(\"action\", \"\", \"What do you want to do? Avialble actions: \"+getConfigItems+\",\"+getContentPacks+\",\"+trigger)\n\tuuid = flag.String(\"uuid\", \"\", \"Flow uuid\")\n)\n\nfunc main() {\n\tflag.Parse()\n\n\tif len(*action) == 0 {\n\t\tfmt.Println(\"Action must be specified\")\n\t}\n\n\tclient := restUtil.CreateHTTPClient()\n\trestURL := *url + restURI\n\n\tswitch {\n\tcase *action == getConfigItems:\n\t\tfmt.Println(\"Getting configuration items from: \" + *url)\n\t\tci := &actions.ConfigItem{}\n\t\trestUtil.Get(*client, restURL+\"\/config\", &ci.Props, *user, *password)\n\t\tci.HandleResponse()\n\tcase *action == getContentPacks:\n\t\tfmt.Println(\"Getting content packs items from: \" + *url)\n\t\tcps := &actions.ContentPacks{}\n\t\trestUtil.Get(*client, restURL+\"\/content-packs\", cps, *user, *password)\n\t\tcps.HandleResponse()\n\tcase *action == trigger:\n\t\tif len(*uuid) == 0 {\n\t\t\tfmt.Println(\"UUID must be specified (--uuid)\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfmt.Println(\"Triggering flow: \" + *uuid)\n\t\ttrigger := &actions.Trigger{FlowUUID: *uuid}\n\t\ttriggerResp := new(actions.TriggerResponse)\n\t\trestUtil.Post(*client, restURL+\"executions\", trigger, triggerResp, *user, *password)\n\t\ttriggerResp.HandleResponse()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"runtime\"\n\n\t\"github.com\/Maki-Daisuke\/go-lines\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"golang.org\/x\/text\/encoding\/japanese\"\n\t\"golang.org\/x\/text\/transform\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\n\/\/ Exit codes are int values that represent an exit code for a particular error.\nconst (\n\tExitCodeOK int = 0\n\tExitCodeError int = 1 + iota\n)\n\n\/\/ EventFunc watches logcat line.\n\/\/ if line contains param.trigger, exec param.command.\ntype EventFunc func(param *CLIParameter, line *string, item *LogcatItem)\n\n\/\/ CLI is the command line object\ntype CLI struct {\n\tinStream io.Reader\n\toutStream, errStream io.Writer\n\teventFunc EventFunc\n\teventTrigger *regexp.Regexp\n}\n\n\/\/ CLIParameter represents parameters to execute command.\ntype CLIParameter struct {\n\tformat,\n\ttrigger,\n\tcommand *string\n}\n\nvar (\n\tformatter Formatter\n\tparser Parser\n\twriter io.Writer\n)\n\nfunc init() {\n}\n\n\/\/ Run invokes the CLI with the given arguments.\nfunc (cli *CLI) Run(args []string) int {\n\n\tparam := cli.initParameter(args)\n\terr := cli.verifyParameter(param)\n\tif err != nil {\n\t\tfmt.Fprintln(cli.errStream, err.Error())\n\t\tlog.Debug(err.Error())\n\t\treturn ExitCodeError\n\t}\n\n\t\/\/ let's start\n\tfor line := range lines.Lines(cli.inStream) {\n\t\titem := cli.parseLine(param, line)\n\t\tcli.eventFunc(param, &line, &item)\n\t}\n\n\tlog.Debugf(\"run finished\")\n\treturn ExitCodeOK\n}\n\n\/\/ exec parse and format\nfunc (cli *CLI) parseLine(param *CLIParameter, line string) LogcatItem {\n\titem := parser.Parse(line)\n\tif item == nil {\n\t\treturn nil\n\t}\n\toutput := formatter.Format(*param.format, &item)\n\tfmt.Fprintln(writer, output)\n\treturn item\n}\n\n\/\/ dont execute command.\nfunc (cli *CLI) execCommandNot(param *CLIParameter, line *string, item *LogcatItem) {\n}\n\n\/\/ execute command if\nfunc (cli *CLI) execCommand(param *CLIParameter, line *string, item *LogcatItem) {\n\tif !cli.eventTrigger.MatchString(*line) {\n\t\treturn\n\t}\n\tlog.Debugf(\"--command start: \\\"%s\\\" on \\\"%s\\\"\", *param.command, *line)\n\n\tfor k := range formatMap {\n\t\tos.Setenv(k, (*item)[k])\n\t}\n\n\tvar cmd *exec.Cmd\n\tif runtime.GOOS == \"windows\" {\n\t\tcmd = exec.Command(os.Getenv(\"COMSPEC\"), \"\/c\", *param.command)\n\t} else {\n\t\tcmd = exec.Command(os.Getenv(\"SHELL\"), \"-c\", *param.command)\n\t}\n\tcmd.Stdout = cli.errStream\n\terr := cmd.Start()\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\t\/\/ cmd.Wait()\n\n\tlog.Debugf(\"--command finish: \\\"%s\\\"\", *param.command)\n}\n\nfunc (cli *CLI) initParameter(args []string) *CLIParameter {\n\t\/\/ setup kingpin & parse args\n\tvar (\n\t\tapp = kingpin.New(Name, Message[\"commandDescription\"])\n\t\tformat = app.Arg(\"format\", Message[\"helpFormat\"]).Default(DefaultFormat).String()\n\t\ttrigger = app.Flag(\"on\", Message[\"helpTrigger\"]).Short('o').String()\n\t\tcommand = app.Flag(\"command\", Message[\"helpCommand\"]).Short('c').String()\n\t\tencode = app.Flag(\"encode\", Message[\"helpEncode\"]).String()\n\t\ttoCsv = app.Flag(\"toCsv\", Message[\"helpToCsv\"]).Bool()\n\t)\n\tapp.HelpFlag.Short('h')\n\tapp.Version(Version)\n\tkingpin.MustParse(app.Parse(args[1:]))\n\n\t\/\/ if trigger not exists, not execute anything.\n\tif *trigger == \"\" {\n\t\tcli.eventFunc = cli.execCommandNot\n\t} else {\n\t\tcli.eventFunc = cli.execCommand\n\t\tcli.eventTrigger = regexp.MustCompile(*trigger)\n\t}\n\n\tif *toCsv {\n\t\tformatter = &csvFormatter{}\n\t} else {\n\t\tformatter = &defaultFormatter{}\n\t}\n\n\tif *encode == \"shift-jis\" {\n\t\twriter = transform.NewWriter(cli.outStream, japanese.ShiftJIS.NewEncoder())\n\t} else {\n\t\twriter = cli.outStream\n\t}\n\n\t\/\/ convert format (long => short)\n\tnormarized := formatter.Normarize(*format)\n\tformat = &normarized\n\tparser = &logcatParser{}\n\n\tlog.WithFields(log.Fields{\"format\": *format, \"trigger\": *trigger, \"command\": *command}).Debug(\"Parameter initialized.\")\n\treturn &CLIParameter{\n\t\tformat: format,\n\t\ttrigger: trigger,\n\t\tcommand: command,\n\t}\n}\n\nfunc (cli *CLI) verifyParameter(param *CLIParameter) error {\n\treturn formatter.Verify(*param.format)\n}\n<commit_msg>change flag<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"runtime\"\n\n\t\"github.com\/Maki-Daisuke\/go-lines\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"golang.org\/x\/text\/encoding\/japanese\"\n\t\"golang.org\/x\/text\/transform\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\n\/\/ Exit codes are int values that represent an exit code for a particular error.\nconst (\n\tExitCodeOK int = 0\n\tExitCodeError int = 1 + iota\n)\n\n\/\/ EventFunc watches logcat line.\n\/\/ if line contains param.trigger, exec param.command.\ntype EventFunc func(param *CLIParameter, line *string, item *LogcatItem)\n\n\/\/ CLI is the command line object\ntype CLI struct {\n\tinStream io.Reader\n\toutStream, errStream io.Writer\n\teventFunc EventFunc\n\teventTrigger *regexp.Regexp\n}\n\n\/\/ CLIParameter represents parameters to execute command.\ntype CLIParameter struct {\n\tformat,\n\ttrigger,\n\tcommand *string\n}\n\nvar (\n\tformatter Formatter\n\tparser Parser\n\twriter io.Writer\n)\n\nfunc init() {\n}\n\n\/\/ Run invokes the CLI with the given arguments.\nfunc (cli *CLI) Run(args []string) int {\n\n\tparam := cli.initParameter(args)\n\terr := cli.verifyParameter(param)\n\tif err != nil {\n\t\tfmt.Fprintln(cli.errStream, err.Error())\n\t\tlog.Debug(err.Error())\n\t\treturn ExitCodeError\n\t}\n\n\t\/\/ let's start\n\tfor line := range lines.Lines(cli.inStream) {\n\t\titem := cli.parseLine(param, line)\n\t\tcli.eventFunc(param, &line, &item)\n\t}\n\n\tlog.Debugf(\"run finished\")\n\treturn ExitCodeOK\n}\n\n\/\/ exec parse and format\nfunc (cli *CLI) parseLine(param *CLIParameter, line string) LogcatItem {\n\titem := parser.Parse(line)\n\tif item == nil {\n\t\treturn nil\n\t}\n\toutput := formatter.Format(*param.format, &item)\n\tfmt.Fprintln(writer, output)\n\treturn item\n}\n\n\/\/ dont execute command.\nfunc (cli *CLI) execCommandNot(param *CLIParameter, line *string, item *LogcatItem) {\n}\n\n\/\/ execute command if\nfunc (cli *CLI) execCommand(param *CLIParameter, line *string, item *LogcatItem) {\n\tif !cli.eventTrigger.MatchString(*line) {\n\t\treturn\n\t}\n\tlog.Debugf(\"--command start: \\\"%s\\\" on \\\"%s\\\"\", *param.command, *line)\n\n\tfor k := range formatMap {\n\t\tos.Setenv(k, (*item)[k])\n\t}\n\n\tvar cmd *exec.Cmd\n\tif runtime.GOOS == \"windows\" {\n\t\tcmd = exec.Command(os.Getenv(\"COMSPEC\"), \"\/c\", *param.command)\n\t} else {\n\t\tcmd = exec.Command(os.Getenv(\"SHELL\"), \"-c\", *param.command)\n\t}\n\tcmd.Stdout = cli.errStream\n\terr := cmd.Start()\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\t\/\/ cmd.Wait()\n\n\tlog.Debugf(\"--command finish: \\\"%s\\\"\", *param.command)\n}\n\nfunc (cli *CLI) initParameter(args []string) *CLIParameter {\n\t\/\/ setup kingpin & parse args\n\tvar (\n\t\tapp = kingpin.New(Name, Message[\"commandDescription\"])\n\t\tformat = app.Arg(\"format\", Message[\"helpFormat\"]).Default(DefaultFormat).String()\n\t\ttrigger = app.Flag(\"on\", Message[\"helpTrigger\"]).Short('o').String()\n\t\tcommand = app.Flag(\"command\", Message[\"helpCommand\"]).Short('c').String()\n\t\tencode = app.Flag(\"encode\", Message[\"helpEncode\"]).String()\n\t\ttoCsv = app.Flag(\"to-csv\", Message[\"helpToCsv\"]).Bool()\n\t)\n\tapp.HelpFlag.Short('h')\n\tapp.Version(Version)\n\tkingpin.MustParse(app.Parse(args[1:]))\n\n\t\/\/ if trigger not exists, not execute anything.\n\tif *trigger == \"\" {\n\t\tcli.eventFunc = cli.execCommandNot\n\t} else {\n\t\tcli.eventFunc = cli.execCommand\n\t\tcli.eventTrigger = regexp.MustCompile(*trigger)\n\t}\n\n\tif *toCsv {\n\t\tformatter = &csvFormatter{}\n\t} else {\n\t\tformatter = &defaultFormatter{}\n\t}\n\n\tif *encode == \"shift-jis\" {\n\t\twriter = transform.NewWriter(cli.outStream, japanese.ShiftJIS.NewEncoder())\n\t} else {\n\t\twriter = cli.outStream\n\t}\n\n\t\/\/ convert format (long => short)\n\tnormarized := formatter.Normarize(*format)\n\tformat = &normarized\n\tparser = &logcatParser{}\n\n\tlog.WithFields(log.Fields{\"format\": *format, \"trigger\": *trigger, \"command\": *command}).Debug(\"Parameter initialized.\")\n\treturn &CLIParameter{\n\t\tformat: format,\n\t\ttrigger: trigger,\n\t\tcommand: command,\n\t}\n}\n\nfunc (cli *CLI) verifyParameter(param *CLIParameter) error {\n\treturn formatter.Verify(*param.format)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"os\"\n\n\t\"github.com\/urfave\/cli\"\n)\n\ntype CLI struct {\n\tinstaller Installer\n\tConfig *Config\n\tlogger *Logger\n\tError error\n}\n\ntype PackageFile struct {\n\tPackages []string\n}\n\nconst (\n\tAPP_VERSION string = \"0.1.0\"\n\tDEFAULT_REPOSITORY string = \"github.com\"\n)\n\nfunc NewCli() *CLI {\n\tlogger := NewLogger(os.Stdout, os.Stderr)\n\tc := &CLI{\n\t\tinstaller: NewSalesforceInstaller(logger),\n\t\tlogger: logger,\n\t\tConfig: &Config{},\n\t}\n\treturn c\n}\n\nfunc (c *CLI) Run(args []string) (err error) {\n\tapp := cli.NewApp()\n\tapp.Name = \"spm\"\n\n\tapp.Usage = \"Salesforce Package Manager\"\n\tapp.Version = APP_VERSION\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"install\",\n\t\t\tAliases: []string{\"i\"},\n\t\t\tUsage: \"Install salesforce packages on public remote repository(i.g. github)\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"username, u\",\n\t\t\t\t\tDestination: &c.Config.Username,\n\t\t\t\t\tEnvVar: \"SF_USERNAME\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"password, p\",\n\t\t\t\t\tDestination: &c.Config.Password,\n\t\t\t\t\tEnvVar: \"SF_PASSWORD\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"endpoint, e\",\n\t\t\t\t\tValue: \"login.salesforce.com\",\n\t\t\t\t\tDestination: &c.Config.Endpoint,\n\t\t\t\t\tEnvVar: \"SF_ENDPOINT\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"apiversion\",\n\t\t\t\t\tValue: \"38.0\",\n\t\t\t\t\tDestination: &c.Config.ApiVersion,\n\t\t\t\t\tEnvVar: \"SF_APIVERSION\",\n\t\t\t\t},\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"pollSeconds\",\n\t\t\t\t\tValue: 5,\n\t\t\t\t\tDestination: &c.Config.PollSeconds,\n\t\t\t\t\tEnvVar: \"SF_POLLSECONDS\",\n\t\t\t\t},\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"timeoutSeconds\",\n\t\t\t\t\tValue: 0,\n\t\t\t\t\tDestination: &c.Config.TimeoutSeconds,\n\t\t\t\t\tEnvVar: \"SF_TIMEOUTSECONDS\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"packages, P\",\n\t\t\t\t\tDestination: &c.Config.PackageFile,\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"clone-only\",\n\t\t\t\t\tDestination: &c.Config.IsCloneOnly,\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"directory, -d\",\n\t\t\t\t\tDestination: &c.Config.Directory,\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\turls, err := loadInstallUrls(c.Config.PackageFile, ctx.Args().First())\n\t\t\t\tif err != nil {\n\t\t\t\t\tc.Error = err\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tif len(urls) == 0 {\n\t\t\t\t\tc.Error = errors.New(\"Repository not specified\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\terr = c.installer.Initialize(c.Config)\n\t\t\t\tif err != nil {\n\t\t\t\t\tc.Error = err\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tc.Error = c.installer.Install(urls)\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\n\tapp.Run(args)\n\tif c.Error != nil {\n\t\tc.logger.Error(c.Error)\n\t}\n\treturn c.Error\n}\n<commit_msg>Add download subcommand<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"os\"\n\n\t\"github.com\/urfave\/cli\"\n)\n\ntype CLI struct {\n\tinstaller Installer\n\tdownloader Downloader\n\tConfig *Config\n\tlogger *Logger\n\tError error\n}\n\ntype PackageFile struct {\n\tPackages []string\n}\n\nconst (\n\tAPP_VERSION string = \"0.1.0\"\n\tDEFAULT_REPOSITORY string = \"github.com\"\n)\n\nfunc NewCli() *CLI {\n\tlogger := NewLogger(os.Stdout, os.Stderr)\n\tc := &CLI{\n\t\tinstaller: NewSalesforceInstaller(logger),\n\t\tdownloader: NewSalesforceDownloader(logger),\n\t\tlogger: logger,\n\t\tConfig: &Config{},\n\t}\n\treturn c\n}\n\nfunc (c *CLI) Run(args []string) (err error) {\n\tapp := cli.NewApp()\n\tapp.Name = \"spm\"\n\n\tapp.Usage = \"Salesforce Package Manager\"\n\tapp.Version = APP_VERSION\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"install\",\n\t\t\tAliases: []string{\"i\"},\n\t\t\tUsage: \"Install salesforce packages on public remote repository(i.g. github)\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"username, u\",\n\t\t\t\t\tDestination: &c.Config.Username,\n\t\t\t\t\tEnvVar: \"SF_USERNAME\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"password, p\",\n\t\t\t\t\tDestination: &c.Config.Password,\n\t\t\t\t\tEnvVar: \"SF_PASSWORD\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"endpoint, e\",\n\t\t\t\t\tValue: \"login.salesforce.com\",\n\t\t\t\t\tDestination: &c.Config.Endpoint,\n\t\t\t\t\tEnvVar: \"SF_ENDPOINT\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"apiversion\",\n\t\t\t\t\tValue: \"38.0\",\n\t\t\t\t\tDestination: &c.Config.ApiVersion,\n\t\t\t\t\tEnvVar: \"SF_APIVERSION\",\n\t\t\t\t},\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"pollSeconds\",\n\t\t\t\t\tValue: 5,\n\t\t\t\t\tDestination: &c.Config.PollSeconds,\n\t\t\t\t\tEnvVar: \"SF_POLLSECONDS\",\n\t\t\t\t},\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"timeoutSeconds\",\n\t\t\t\t\tValue: 0,\n\t\t\t\t\tDestination: &c.Config.TimeoutSeconds,\n\t\t\t\t\tEnvVar: \"SF_TIMEOUTSECONDS\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"packages, P\",\n\t\t\t\t\tDestination: &c.Config.PackageFile,\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"clone-only\",\n\t\t\t\t\tDestination: &c.Config.IsCloneOnly,\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"directory, d\",\n\t\t\t\t\tDestination: &c.Config.Directory,\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\turls, err := loadInstallUrls(c.Config.PackageFile, ctx.Args().First())\n\t\t\t\tif err != nil {\n\t\t\t\t\tc.Error = err\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tif len(urls) == 0 {\n\t\t\t\t\tc.Error = errors.New(\"Repository not specified\")\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\terr = c.installer.Initialize(c.Config)\n\t\t\t\tif err != nil {\n\t\t\t\t\tc.Error = err\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tc.Error = c.installer.Install(urls)\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"clone\",\n\t\t\tAliases: []string{\"c\"},\n\t\t\tUsage: \"Download metadata from salesforce organization\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"username, u\",\n\t\t\t\t\tDestination: &c.Config.Username,\n\t\t\t\t\tEnvVar: \"SF_USERNAME\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"password, p\",\n\t\t\t\t\tDestination: &c.Config.Password,\n\t\t\t\t\tEnvVar: \"SF_PASSWORD\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"endpoint, e\",\n\t\t\t\t\tValue: \"login.salesforce.com\",\n\t\t\t\t\tDestination: &c.Config.Endpoint,\n\t\t\t\t\tEnvVar: \"SF_ENDPOINT\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"apiversion\",\n\t\t\t\t\tValue: \"38.0\",\n\t\t\t\t\tDestination: &c.Config.ApiVersion,\n\t\t\t\t\tEnvVar: \"SF_APIVERSION\",\n\t\t\t\t},\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"pollSeconds\",\n\t\t\t\t\tValue: 5,\n\t\t\t\t\tDestination: &c.Config.PollSeconds,\n\t\t\t\t\tEnvVar: \"SF_POLLSECONDS\",\n\t\t\t\t},\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"timeoutSeconds\",\n\t\t\t\t\tValue: 0,\n\t\t\t\t\tDestination: &c.Config.TimeoutSeconds,\n\t\t\t\t\tEnvVar: \"SF_TIMEOUTSECONDS\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{\n\t\t\t\t\tName: \"package, P\",\n\t\t\t\t\tDestination: &c.Config.PackageFile,\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: func(ctx *cli.Context) error {\n\t\t\t\terr = c.installer.Initialize(c.Config)\n\t\t\t\tif err != nil {\n\t\t\t\t\tc.Error = err\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tc.Error = c.downloader.Download()\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\n\tapp.Run(args)\n\tif c.Error != nil {\n\t\tc.logger.Error(c.Error)\n\t}\n\treturn c.Error\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014-2015 Apptimist, Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build !nocli\n\npackage main\n\nimport (\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"syscall\"\n\n\t\"github.com\/rocky\/go-gnureadline\"\n)\n\nfunc (adm *Adm) CLI() (err error) {\n\tproc, err := os.FindProcess(os.Getpid())\n\tif err != nil {\n\t\treturn\n\t}\n\thome := os.Getenv(\"HOME\")\n\thistory := filepath.Join(home, \".asnadm_history\")\n\tdefer gnureadline.WriteHistory(history)\n\trc := filepath.Join(home, \".asnadmrc\")\n\tprompt := \"asnadm: \"\n\tif s := adm.asn.Debug.String(); s != \"\" {\n\t\tprompt = s + \"# \"\n\t}\n\tif _, err = os.Stat(rc); err == nil {\n\t\tif err = gnureadline.ReadInitFile(rc); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif _, err = os.Stat(history); err == nil {\n\t\tgnureadline.ReadHistory(history)\n\t} else {\n\t\terr = nil\n\t}\n\tgnureadline.StifleHistory(32)\n\tdone := make(Done, 1)\n\tdefer close(done)\n\twinch := make(chan os.Signal, 1)\n\tsignal.Notify(winch, syscall.SIGWINCH)\n\tdefer signal.Stop(winch)\n\tquit := false\n\tline := \"\"\n\tgo func() {\n\t\tfor {\n\t\t\tvar rlerr error\n\t\t\tline = \"\"\n\t\t\tline, rlerr = gnureadline.Readline(prompt, true)\n\t\t\tif rlerr != nil || quit {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\terr = adm.cmdLine(line)\n\t\t\tif err != nil || quit {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tdone <- nil\n\t}()\n\tdefer gnureadline.Rl_reset_terminal(\"\")\n\tfor {\n\t\tselect {\n\t\tcase <-done:\n\t\t\treturn\n\t\tcase pdu, opened := <-adm.clich:\n\t\t\tif !opened {\n\t\t\t\tquit = true\n\t\t\t\tproc.Signal(os.Kill)\n\t\t\t\t<-done\n\t\t\t\treturn\n\t\t\t} else if line == \"\" {\n\t\t\t\tprintln()\n\t\t\t\tadm.ObjDump(pdu)\n\t\t\t\tgnureadline.Rl_resize_terminal()\n\t\t\t} else {\n\t\t\t\tadm.ObjDump(pdu)\n\t\t\t}\n\t\tcase <-winch:\n\t\t\tgnureadline.Rl_resize_terminal()\n\t\t}\n\t}\n}\n<commit_msg>Print but don't quit cli on error<commit_after>\/\/ Copyright 2014-2015 Apptimist, Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build !nocli\n\npackage main\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"syscall\"\n\n\t\"github.com\/rocky\/go-gnureadline\"\n)\n\nfunc (adm *Adm) CLI() (err error) {\n\tproc, err := os.FindProcess(os.Getpid())\n\tif err != nil {\n\t\treturn\n\t}\n\thome := os.Getenv(\"HOME\")\n\thistory := filepath.Join(home, \".asnadm_history\")\n\tdefer gnureadline.WriteHistory(history)\n\trc := filepath.Join(home, \".asnadmrc\")\n\tprompt := \"asnadm: \"\n\tif s := adm.asn.Debug.String(); s != \"\" {\n\t\tprompt = s + \"# \"\n\t}\n\tif _, err = os.Stat(rc); err == nil {\n\t\tif err = gnureadline.ReadInitFile(rc); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tif _, err = os.Stat(history); err == nil {\n\t\tgnureadline.ReadHistory(history)\n\t} else {\n\t\terr = nil\n\t}\n\tgnureadline.StifleHistory(32)\n\tdone := make(Done, 1)\n\tdefer close(done)\n\twinch := make(chan os.Signal, 1)\n\tsignal.Notify(winch, syscall.SIGWINCH)\n\tdefer signal.Stop(winch)\n\tquit := false\n\tline := \"\"\n\tgo func() {\n\t\tfor {\n\t\t\tvar rlerr error\n\t\t\tline = \"\"\n\t\t\tline, rlerr = gnureadline.Readline(prompt, true)\n\t\t\tif rlerr != nil || quit {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\terr = adm.cmdLine(line)\n\t\t\tif err != nil {\n\t\t\t\tprintln(err.Error())\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tquit = true\n\t\t\t\t} else {\n\t\t\t\t\terr = nil\n\t\t\t\t}\n\t\t\t}\n\t\t\tif quit {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tdone <- nil\n\t}()\n\tdefer gnureadline.Rl_reset_terminal(\"\")\n\tfor !quit {\n\t\tselect {\n\t\tcase <-done:\n\t\t\treturn\n\t\tcase pdu, opened := <-adm.clich:\n\t\t\tif !opened {\n\t\t\t\tquit = true\n\t\t\t\tproc.Signal(os.Kill)\n\t\t\t\t<-done\n\t\t\t} else if line == \"\" {\n\t\t\t\tprintln()\n\t\t\t\tadm.ObjDump(pdu)\n\t\t\t\tgnureadline.Rl_resize_terminal()\n\t\t\t} else {\n\t\t\t\tadm.ObjDump(pdu)\n\t\t\t}\n\t\tcase <-winch:\n\t\t\tgnureadline.Rl_resize_terminal()\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"text\/template\"\n\n\t\"github.com\/k0kubun\/pp\"\n)\n\nconst helpTemplate = `Execute commands via docker-compose\n\nUsage:\n {{ .Name }} COMMAND [args...]\n {{ .Name }} COMMAND -h|--help\n {{ .Name }} [options]\n\nOptions:\n -h, --help Show this\n -v, --version Show {{ .Name }} version\n --debug Debug context and configuration\n\nCommands:\n{{- range $name, $sub := .Substitution }}\n {{ printf $.NameFormat $name }}{{ if ne $sub.Summary \"\" }} # {{ $sub.Summary }}{{ end }}\n{{- end }}\n`\n\n\/\/ CLI is an object holding states\ntype CLI struct {\n\t*Context\n\tConfig *Config\n\tArgs []string\n\tRunInContainer bool\n\n\tStdin io.Reader\n\tStdout io.Writer\n\tStderr io.Writer\n}\n\n\/\/ NewCLI creates a new CLI instance\nfunc NewCLI(ctx *Context, cfg *Config, args []string) *CLI {\n\treturn &CLI{\n\t\tContext: ctx,\n\t\tConfig: cfg,\n\t\tArgs: args[1:],\n\t\tRunInContainer: true,\n\n\t\tStdin: os.Stdin,\n\t\tStdout: os.Stdout,\n\t\tStderr: os.Stderr,\n\t}\n}\n\n\/\/ Run executes commands\nfunc (c *CLI) Run() error {\n\tc.setup()\n\tc.substituteCommand()\n\n\tswitch c.Args[0] {\n\tcase \"-h\", \"--help\", \".help\":\n\t\treturn c.ExecHelp()\n\tcase \"-v\", \"--version\":\n\t\treturn c.ExecVersion()\n\tcase \"--debug\":\n\t\treturn c.ExecDebug()\n\tcase \".sub-help\":\n\t\treturn c.ExecSubHelp()\n\t}\n\n\tif c.RunInContainer {\n\t\treturn c.run()\n\t}\n\n\treturn c.exec(c.Args[0], c.Args[1:]...)\n}\n\nfunc (c *CLI) setup() {\n\tos.Setenv(\"COMPOSE_PROJECT_NAME\", c.Config.ProjectName)\n\tos.Setenv(\"DOCKER_HOST_IP\", c.IP)\n}\n\nfunc (c *CLI) substituteCommand() {\n\tif len(c.Args) == 0 {\n\t\tc.Args = []string{\".help\"}\n\t\treturn\n\t}\n\n\tif s, ok := c.Substitution[c.Args[0]]; ok {\n\t\tc.Args[0] = s.Command\n\t\tc.RunInContainer = s.RunInContainer\n\n\t\tif s.HelpFile != \"\" && len(c.Args) > 1 {\n\t\t\tswitch c.Args[1] {\n\t\t\tcase \"-h\", \"--help\":\n\t\t\t\tc.Args = []string{\".sub-help\", s.HelpFile}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (c *CLI) exec(name string, args ...string) error {\n\tcmd := exec.Command(name, args...)\n\tif name == \"docker-compose\" {\n\t\tcmd.Dir = c.BaseDir\n\t} else {\n\t\tcmd.Dir = c.RootDir\n\t}\n\tcmd.Stdin = c.Stdin\n\tcmd.Stdout = c.Stdout\n\tcmd.Stderr = c.Stderr\n\treturn cmd.Run()\n}\n\nfunc (c *CLI) run() error {\n\tif err := c.exec(\"docker-compose\", \"up\", \"-d\"); err != nil {\n\t\treturn err\n\t}\n\n\targs := append([]string{\n\t\t\"exec\",\n\t\tc.Config.MainService,\n\t}, c.Args...)\n\n\treturn c.exec(\"docker-compose\", args...)\n}\n\n\/\/ ExecVersion prints version info\nfunc (c *CLI) ExecVersion() error {\n\tfmt.Fprintf(c.Stdout, \"%s (revision %s)\\n\", Version, Revision)\n\treturn nil\n}\n\n\/\/ ExecDebug prints internal state objects\nfunc (c *CLI) ExecDebug() error {\n\tpp.Fprintln(c.Stdout, c.Context)\n\tpp.Fprintln(c.Stdout, c.Config)\n\treturn nil\n}\n\n\/\/ ExecHelp shows help contents\nfunc (c *CLI) ExecHelp() error {\n\tmaxNameLen := 0\n\tfor name := range c.Substitution {\n\t\tif l := len(name); l > maxNameLen {\n\t\t\tmaxNameLen = l\n\t\t}\n\t}\n\n\tfor _, s := range c.Substitution {\n\t\tif s.HelpFile == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\ts.Summary, _ = loadHelpFile(s.HelpFile)\n\t}\n\n\ttmpl := template.Must(template.New(\"help\").Parse(helpTemplate))\n\treturn tmpl.Execute(c.Stderr, map[string]interface{}{\n\t\t\"Substitution\": c.Substitution,\n\t\t\"NameFormat\": fmt.Sprintf(\"%%-%ds\", maxNameLen+1),\n\t\t\"Name\": \"rid\",\n\t})\n}\n\n\/\/ ExecSubHelp shows help contents for a custom sub-command\nfunc (c *CLI) ExecSubHelp() error {\n\t_, description := loadHelpFile(c.Args[1])\n\tfmt.Fprint(c.Stderr, description)\n\treturn nil\n}\n<commit_msg>Remove orphans<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"text\/template\"\n\n\t\"github.com\/k0kubun\/pp\"\n)\n\nconst helpTemplate = `Execute commands via docker-compose\n\nUsage:\n {{ .Name }} COMMAND [args...]\n {{ .Name }} COMMAND -h|--help\n {{ .Name }} [options]\n\nOptions:\n -h, --help Show this\n -v, --version Show {{ .Name }} version\n --debug Debug context and configuration\n\nCommands:\n{{- range $name, $sub := .Substitution }}\n {{ printf $.NameFormat $name }}{{ if ne $sub.Summary \"\" }} # {{ $sub.Summary }}{{ end }}\n{{- end }}\n`\n\n\/\/ CLI is an object holding states\ntype CLI struct {\n\t*Context\n\tConfig *Config\n\tArgs []string\n\tRunInContainer bool\n\n\tStdin io.Reader\n\tStdout io.Writer\n\tStderr io.Writer\n}\n\n\/\/ NewCLI creates a new CLI instance\nfunc NewCLI(ctx *Context, cfg *Config, args []string) *CLI {\n\treturn &CLI{\n\t\tContext: ctx,\n\t\tConfig: cfg,\n\t\tArgs: args[1:],\n\t\tRunInContainer: true,\n\n\t\tStdin: os.Stdin,\n\t\tStdout: os.Stdout,\n\t\tStderr: os.Stderr,\n\t}\n}\n\n\/\/ Run executes commands\nfunc (c *CLI) Run() error {\n\tc.setup()\n\tc.substituteCommand()\n\n\tswitch c.Args[0] {\n\tcase \"-h\", \"--help\", \".help\":\n\t\treturn c.ExecHelp()\n\tcase \"-v\", \"--version\":\n\t\treturn c.ExecVersion()\n\tcase \"--debug\":\n\t\treturn c.ExecDebug()\n\tcase \".sub-help\":\n\t\treturn c.ExecSubHelp()\n\t}\n\n\tif c.RunInContainer {\n\t\treturn c.run()\n\t}\n\n\treturn c.exec(c.Args[0], c.Args[1:]...)\n}\n\nfunc (c *CLI) setup() {\n\tos.Setenv(\"COMPOSE_PROJECT_NAME\", c.Config.ProjectName)\n\tos.Setenv(\"DOCKER_HOST_IP\", c.IP)\n}\n\nfunc (c *CLI) substituteCommand() {\n\tif len(c.Args) == 0 {\n\t\tc.Args = []string{\".help\"}\n\t\treturn\n\t}\n\n\tif s, ok := c.Substitution[c.Args[0]]; ok {\n\t\tc.Args[0] = s.Command\n\t\tc.RunInContainer = s.RunInContainer\n\n\t\tif s.HelpFile != \"\" && len(c.Args) > 1 {\n\t\t\tswitch c.Args[1] {\n\t\t\tcase \"-h\", \"--help\":\n\t\t\t\tc.Args = []string{\".sub-help\", s.HelpFile}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (c *CLI) exec(name string, args ...string) error {\n\tcmd := exec.Command(name, args...)\n\tif name == \"docker-compose\" {\n\t\tcmd.Dir = c.BaseDir\n\t} else {\n\t\tcmd.Dir = c.RootDir\n\t}\n\tcmd.Stdin = c.Stdin\n\tcmd.Stdout = c.Stdout\n\tcmd.Stderr = c.Stderr\n\treturn cmd.Run()\n}\n\nfunc (c *CLI) run() error {\n\tif err := c.exec(\"docker-compose\", \"up\", \"-d\", \"--remove-orphans\"); err != nil {\n\t\treturn err\n\t}\n\n\targs := append([]string{\n\t\t\"exec\",\n\t\tc.Config.MainService,\n\t}, c.Args...)\n\n\treturn c.exec(\"docker-compose\", args...)\n}\n\n\/\/ ExecVersion prints version info\nfunc (c *CLI) ExecVersion() error {\n\tfmt.Fprintf(c.Stdout, \"%s (revision %s)\\n\", Version, Revision)\n\treturn nil\n}\n\n\/\/ ExecDebug prints internal state objects\nfunc (c *CLI) ExecDebug() error {\n\tpp.Fprintln(c.Stdout, c.Context)\n\tpp.Fprintln(c.Stdout, c.Config)\n\treturn nil\n}\n\n\/\/ ExecHelp shows help contents\nfunc (c *CLI) ExecHelp() error {\n\tmaxNameLen := 0\n\tfor name := range c.Substitution {\n\t\tif l := len(name); l > maxNameLen {\n\t\t\tmaxNameLen = l\n\t\t}\n\t}\n\n\tfor _, s := range c.Substitution {\n\t\tif s.HelpFile == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\ts.Summary, _ = loadHelpFile(s.HelpFile)\n\t}\n\n\ttmpl := template.Must(template.New(\"help\").Parse(helpTemplate))\n\treturn tmpl.Execute(c.Stderr, map[string]interface{}{\n\t\t\"Substitution\": c.Substitution,\n\t\t\"NameFormat\": fmt.Sprintf(\"%%-%ds\", maxNameLen+1),\n\t\t\"Name\": \"rid\",\n\t})\n}\n\n\/\/ ExecSubHelp shows help contents for a custom sub-command\nfunc (c *CLI) ExecSubHelp() error {\n\t_, description := loadHelpFile(c.Args[1])\n\tfmt.Fprint(c.Stderr, description)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Let's Encrypt client to go!\n\/\/ CLI application for generating Let's Encrypt certificates using the ACME package.\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/urfave\/cli\"\n\t\"github.com\/xenolf\/lego\/acme\"\n)\n\n\/\/ Logger is used to log errors; if nil, the default log.Logger is used.\nvar Logger *log.Logger\n\n\/\/ logger is an helper function to retrieve the available logger\nfunc logger() *log.Logger {\n\tif Logger == nil {\n\t\tLogger = log.New(os.Stderr, \"\", log.LstdFlags)\n\t}\n\treturn Logger\n}\n\nvar gittag string\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"lego\"\n\tapp.Usage = \"Let's Encrypt client written in Go\"\n\n\tversion := \"0.3.1\"\n\tif strings.HasPrefix(gittag, \"v\") {\n\t\tversion = gittag\n\t}\n\n\tapp.Version = version\n\n\tacme.UserAgent = \"lego\/\" + app.Version\n\n\tdefaultPath := \"\"\n\tcwd, err := os.Getwd()\n\tif err == nil {\n\t\tdefaultPath = path.Join(cwd, \".lego\")\n\t}\n\n\tapp.Before = func(c *cli.Context) error {\n\t\tif c.GlobalString(\"path\") == \"\" {\n\t\t\tlogger().Fatal(\"Could not determine current working directory. Please pass --path.\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"run\",\n\t\t\tUsage: \"Register an account, then create and install a certificate\",\n\t\t\tAction: run,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"no-bundle\",\n\t\t\t\t\tUsage: \"Do not create a certificate bundle by adding the issuers certificate to the new certificate.\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"must-staple\",\n\t\t\t\t\tUsage: \"Include the OCSP must staple TLS extension in the CSR and generated certificate. Only works if the CSR is generated by lego.\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"revoke\",\n\t\t\tUsage: \"Revoke a certificate\",\n\t\t\tAction: revoke,\n\t\t},\n\t\t{\n\t\t\tName: \"renew\",\n\t\t\tUsage: \"Renew a certificate\",\n\t\t\tAction: renew,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"days\",\n\t\t\t\t\tValue: 0,\n\t\t\t\t\tUsage: \"The number of days left on a certificate to renew it.\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"reuse-key\",\n\t\t\t\t\tUsage: \"Used to indicate you want to reuse your current private key for the new certificate.\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"no-bundle\",\n\t\t\t\t\tUsage: \"Do not create a certificate bundle by adding the issuers certificate to the new certificate.\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"must-staple\",\n\t\t\t\t\tUsage: \"Include the OCSP must staple TLS extension in the CSR and generated certificate. Only works if the CSR is generated by lego.\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"dnshelp\",\n\t\t\tUsage: \"Shows additional help for the --dns global option\",\n\t\t\tAction: dnshelp,\n\t\t},\n\t}\n\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"domains, d\",\n\t\t\tUsage: \"Add domains to the process\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"csr, c\",\n\t\t\tUsage: \"Certificate signing request filename, if an external CSR is to be used\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"server, s\",\n\t\t\tValue: \"https:\/\/acme-v01.api.letsencrypt.org\/directory\",\n\t\t\tUsage: \"CA hostname (and optionally :port). The server certificate must be trusted in order to avoid further modifications to the client.\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"email, m\",\n\t\t\tUsage: \"Email used for registration and recovery contact.\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"accept-tos, a\",\n\t\t\tUsage: \"By setting this flag to true you indicate that you accept the current Let's Encrypt terms of service.\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"key-type, k\",\n\t\t\tValue: \"rsa2048\",\n\t\t\tUsage: \"Key type to use for private keys. Supported: rsa2048, rsa4096, rsa8192, ec256, ec384\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"path\",\n\t\t\tUsage: \"Directory to use for storing the data\",\n\t\t\tValue: defaultPath,\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"exclude, x\",\n\t\t\tUsage: \"Explicitly disallow solvers by name from being used. Solvers: \\\"http-01\\\", \\\"tls-sni-01\\\".\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"webroot\",\n\t\t\tUsage: \"Set the webroot folder to use for HTTP based challenges to write directly in a file in .well-known\/acme-challenge\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"memcached-host\",\n\t\t\tUsage: \"Set the memcached host(s) to use for HTTP based challenges. Challenges will be written to all specified hosts.\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"http\",\n\t\t\tUsage: \"Set the port and interface to use for HTTP based challenges to listen on. Supported: interface:port or :port\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"tls\",\n\t\t\tUsage: \"Set the port and interface to use for TLS based challenges to listen on. Supported: interface:port or :port\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"dns\",\n\t\t\tUsage: \"Solve a DNS challenge using the specified provider. Disables all other challenges. Run 'lego dnshelp' for help on usage.\",\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"http-timeout\",\n\t\t\tUsage: \"Set the HTTP timeout value to a specific value in seconds. The default is 10 seconds.\",\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"dns-timeout\",\n\t\t\tUsage: \"Set the DNS timeout value to a specific value in seconds. The default is 10 seconds.\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"dns-resolvers\",\n\t\t\tUsage: \"Set the resolvers to use for performing recursive DNS queries. Supported: host:port. The default is to use Google's DNS resolvers.\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"pem\",\n\t\t\tUsage: \"Generate a .pem file by concatanating the .key and .crt files together.\",\n\t\t},\n\t}\n\n\terr = app.Run(os.Args)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc dnshelp(c *cli.Context) error {\n\tfmt.Printf(\n\t\t`Credentials for DNS providers must be passed through environment variables.\n\nHere is an example bash command using the CloudFlare DNS provider:\n\n $ CLOUDFLARE_EMAIL=foo@bar.com \\\n CLOUDFLARE_API_KEY=b9841238feb177a84330febba8a83208921177bffe733 \\\n lego --dns cloudflare --domains www.example.com --email me@bar.com run\n\n`)\n\n\tw := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\\t', 0)\n\tfmt.Fprintln(w, \"Valid providers and their associated credential environment variables:\")\n\tfmt.Fprintln(w)\n\tfmt.Fprintln(w, \"\\tazure:\\tAZURE_CLIENT_ID, AZURE_CLIENT_SECRET, AZURE_SUBSCRIPTION_ID, AZURE_TENANT_ID, AZURE_RESOURCE_GROUP\")\n\tfmt.Fprintln(w, \"\\tauroradns:\\tAURORA_USER_ID, AURORA_KEY, AURORA_ENDPOINT\")\n\tfmt.Fprintln(w, \"\\tcloudflare:\\tCLOUDFLARE_EMAIL, CLOUDFLARE_API_KEY\")\n\tfmt.Fprintln(w, \"\\tdigitalocean:\\tDO_AUTH_TOKEN\")\n\tfmt.Fprintln(w, \"\\tdnsimple:\\tDNSIMPLE_EMAIL, DNSIMPLE_API_KEY\")\n\tfmt.Fprintln(w, \"\\tdnsmadeeasy:\\tDNSMADEEASY_API_KEY, DNSMADEEASY_API_SECRET\")\n\tfmt.Fprintln(w, \"\\texoscale:\\tEXOSCALE_API_KEY, EXOSCALE_API_SECRET, EXOSCALE_ENDPOINT\")\n\tfmt.Fprintln(w, \"\\tgandi:\\tGANDI_API_KEY\")\n\tfmt.Fprintln(w, \"\\tgcloud:\\tGCE_PROJECT\")\n\tfmt.Fprintln(w, \"\\tlinode:\\tLINODE_API_KEY\")\n\tfmt.Fprintln(w, \"\\tmanual:\\tnone\")\n\tfmt.Fprintln(w, \"\\tnamecheap:\\tNAMECHEAP_API_USER, NAMECHEAP_API_KEY\")\n\tfmt.Fprintln(w, \"\\trackspace:\\tRACKSPACE_USER, RACKSPACE_API_KEY\")\n\tfmt.Fprintln(w, \"\\trfc2136:\\tRFC2136_TSIG_KEY, RFC2136_TSIG_SECRET,\\n\\t\\tRFC2136_TSIG_ALGORITHM, RFC2136_NAMESERVER\")\n\tfmt.Fprintln(w, \"\\troute53:\\tAWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION\")\n\tfmt.Fprintln(w, \"\\tdyn:\\tDYN_CUSTOMER_NAME, DYN_USER_NAME, DYN_PASSWORD\")\n\tfmt.Fprintln(w, \"\\tvultr:\\tVULTR_API_KEY\")\n\tfmt.Fprintln(w, \"\\tovh:\\tOVH_ENDPOINT, OVH_APPLICATION_KEY, OVH_APPLICATION_SECRET, OVH_CONSUMER_KEY\")\n\tfmt.Fprintln(w, \"\\tpdns:\\tPDNS_API_KEY, PDNS_API_URL\")\n\tfmt.Fprintln(w, \"\\tdnspod:\\tDNSPOD_API_KEY\")\n\tw.Flush()\n\n\tfmt.Println(`\nFor a more detailed explanation of a DNS provider's credential variables,\nplease consult their online documentation.`)\n\n\treturn nil\n}\n<commit_msg>Update DNSimple instructions to use new env key (#374)<commit_after>\/\/ Let's Encrypt client to go!\n\/\/ CLI application for generating Let's Encrypt certificates using the ACME package.\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/urfave\/cli\"\n\t\"github.com\/xenolf\/lego\/acme\"\n)\n\n\/\/ Logger is used to log errors; if nil, the default log.Logger is used.\nvar Logger *log.Logger\n\n\/\/ logger is an helper function to retrieve the available logger\nfunc logger() *log.Logger {\n\tif Logger == nil {\n\t\tLogger = log.New(os.Stderr, \"\", log.LstdFlags)\n\t}\n\treturn Logger\n}\n\nvar gittag string\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"lego\"\n\tapp.Usage = \"Let's Encrypt client written in Go\"\n\n\tversion := \"0.3.1\"\n\tif strings.HasPrefix(gittag, \"v\") {\n\t\tversion = gittag\n\t}\n\n\tapp.Version = version\n\n\tacme.UserAgent = \"lego\/\" + app.Version\n\n\tdefaultPath := \"\"\n\tcwd, err := os.Getwd()\n\tif err == nil {\n\t\tdefaultPath = path.Join(cwd, \".lego\")\n\t}\n\n\tapp.Before = func(c *cli.Context) error {\n\t\tif c.GlobalString(\"path\") == \"\" {\n\t\t\tlogger().Fatal(\"Could not determine current working directory. Please pass --path.\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"run\",\n\t\t\tUsage: \"Register an account, then create and install a certificate\",\n\t\t\tAction: run,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"no-bundle\",\n\t\t\t\t\tUsage: \"Do not create a certificate bundle by adding the issuers certificate to the new certificate.\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"must-staple\",\n\t\t\t\t\tUsage: \"Include the OCSP must staple TLS extension in the CSR and generated certificate. Only works if the CSR is generated by lego.\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"revoke\",\n\t\t\tUsage: \"Revoke a certificate\",\n\t\t\tAction: revoke,\n\t\t},\n\t\t{\n\t\t\tName: \"renew\",\n\t\t\tUsage: \"Renew a certificate\",\n\t\t\tAction: renew,\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.IntFlag{\n\t\t\t\t\tName: \"days\",\n\t\t\t\t\tValue: 0,\n\t\t\t\t\tUsage: \"The number of days left on a certificate to renew it.\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"reuse-key\",\n\t\t\t\t\tUsage: \"Used to indicate you want to reuse your current private key for the new certificate.\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"no-bundle\",\n\t\t\t\t\tUsage: \"Do not create a certificate bundle by adding the issuers certificate to the new certificate.\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{\n\t\t\t\t\tName: \"must-staple\",\n\t\t\t\t\tUsage: \"Include the OCSP must staple TLS extension in the CSR and generated certificate. Only works if the CSR is generated by lego.\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"dnshelp\",\n\t\t\tUsage: \"Shows additional help for the --dns global option\",\n\t\t\tAction: dnshelp,\n\t\t},\n\t}\n\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"domains, d\",\n\t\t\tUsage: \"Add domains to the process\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"csr, c\",\n\t\t\tUsage: \"Certificate signing request filename, if an external CSR is to be used\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"server, s\",\n\t\t\tValue: \"https:\/\/acme-v01.api.letsencrypt.org\/directory\",\n\t\t\tUsage: \"CA hostname (and optionally :port). The server certificate must be trusted in order to avoid further modifications to the client.\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"email, m\",\n\t\t\tUsage: \"Email used for registration and recovery contact.\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"accept-tos, a\",\n\t\t\tUsage: \"By setting this flag to true you indicate that you accept the current Let's Encrypt terms of service.\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"key-type, k\",\n\t\t\tValue: \"rsa2048\",\n\t\t\tUsage: \"Key type to use for private keys. Supported: rsa2048, rsa4096, rsa8192, ec256, ec384\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"path\",\n\t\t\tUsage: \"Directory to use for storing the data\",\n\t\t\tValue: defaultPath,\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"exclude, x\",\n\t\t\tUsage: \"Explicitly disallow solvers by name from being used. Solvers: \\\"http-01\\\", \\\"tls-sni-01\\\".\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"webroot\",\n\t\t\tUsage: \"Set the webroot folder to use for HTTP based challenges to write directly in a file in .well-known\/acme-challenge\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"memcached-host\",\n\t\t\tUsage: \"Set the memcached host(s) to use for HTTP based challenges. Challenges will be written to all specified hosts.\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"http\",\n\t\t\tUsage: \"Set the port and interface to use for HTTP based challenges to listen on. Supported: interface:port or :port\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"tls\",\n\t\t\tUsage: \"Set the port and interface to use for TLS based challenges to listen on. Supported: interface:port or :port\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"dns\",\n\t\t\tUsage: \"Solve a DNS challenge using the specified provider. Disables all other challenges. Run 'lego dnshelp' for help on usage.\",\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"http-timeout\",\n\t\t\tUsage: \"Set the HTTP timeout value to a specific value in seconds. The default is 10 seconds.\",\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"dns-timeout\",\n\t\t\tUsage: \"Set the DNS timeout value to a specific value in seconds. The default is 10 seconds.\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"dns-resolvers\",\n\t\t\tUsage: \"Set the resolvers to use for performing recursive DNS queries. Supported: host:port. The default is to use Google's DNS resolvers.\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"pem\",\n\t\t\tUsage: \"Generate a .pem file by concatanating the .key and .crt files together.\",\n\t\t},\n\t}\n\n\terr = app.Run(os.Args)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc dnshelp(c *cli.Context) error {\n\tfmt.Printf(\n\t\t`Credentials for DNS providers must be passed through environment variables.\n\nHere is an example bash command using the CloudFlare DNS provider:\n\n $ CLOUDFLARE_EMAIL=foo@bar.com \\\n CLOUDFLARE_API_KEY=b9841238feb177a84330febba8a83208921177bffe733 \\\n lego --dns cloudflare --domains www.example.com --email me@bar.com run\n\n`)\n\n\tw := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\\t', 0)\n\tfmt.Fprintln(w, \"Valid providers and their associated credential environment variables:\")\n\tfmt.Fprintln(w)\n\tfmt.Fprintln(w, \"\\tazure:\\tAZURE_CLIENT_ID, AZURE_CLIENT_SECRET, AZURE_SUBSCRIPTION_ID, AZURE_TENANT_ID, AZURE_RESOURCE_GROUP\")\n\tfmt.Fprintln(w, \"\\tauroradns:\\tAURORA_USER_ID, AURORA_KEY, AURORA_ENDPOINT\")\n\tfmt.Fprintln(w, \"\\tcloudflare:\\tCLOUDFLARE_EMAIL, CLOUDFLARE_API_KEY\")\n\tfmt.Fprintln(w, \"\\tdigitalocean:\\tDO_AUTH_TOKEN\")\n\tfmt.Fprintln(w, \"\\tdnsimple:\\tDNSIMPLE_EMAIL, DNSIMPLE_OAUTH_TOKEN\")\n\tfmt.Fprintln(w, \"\\tdnsmadeeasy:\\tDNSMADEEASY_API_KEY, DNSMADEEASY_API_SECRET\")\n\tfmt.Fprintln(w, \"\\texoscale:\\tEXOSCALE_API_KEY, EXOSCALE_API_SECRET, EXOSCALE_ENDPOINT\")\n\tfmt.Fprintln(w, \"\\tgandi:\\tGANDI_API_KEY\")\n\tfmt.Fprintln(w, \"\\tgcloud:\\tGCE_PROJECT\")\n\tfmt.Fprintln(w, \"\\tlinode:\\tLINODE_API_KEY\")\n\tfmt.Fprintln(w, \"\\tmanual:\\tnone\")\n\tfmt.Fprintln(w, \"\\tnamecheap:\\tNAMECHEAP_API_USER, NAMECHEAP_API_KEY\")\n\tfmt.Fprintln(w, \"\\trackspace:\\tRACKSPACE_USER, RACKSPACE_API_KEY\")\n\tfmt.Fprintln(w, \"\\trfc2136:\\tRFC2136_TSIG_KEY, RFC2136_TSIG_SECRET,\\n\\t\\tRFC2136_TSIG_ALGORITHM, RFC2136_NAMESERVER\")\n\tfmt.Fprintln(w, \"\\troute53:\\tAWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION\")\n\tfmt.Fprintln(w, \"\\tdyn:\\tDYN_CUSTOMER_NAME, DYN_USER_NAME, DYN_PASSWORD\")\n\tfmt.Fprintln(w, \"\\tvultr:\\tVULTR_API_KEY\")\n\tfmt.Fprintln(w, \"\\tovh:\\tOVH_ENDPOINT, OVH_APPLICATION_KEY, OVH_APPLICATION_SECRET, OVH_CONSUMER_KEY\")\n\tfmt.Fprintln(w, \"\\tpdns:\\tPDNS_API_KEY, PDNS_API_URL\")\n\tfmt.Fprintln(w, \"\\tdnspod:\\tDNSPOD_API_KEY\")\n\tw.Flush()\n\n\tfmt.Println(`\nFor a more detailed explanation of a DNS provider's credential variables,\nplease consult their online documentation.`)\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/go:build !windows\n\/\/ +build !windows\n\npackage archive\n\nimport (\n\t\"archive\/tar\"\n\t\"errors\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"syscall\"\n\n\t\"github.com\/containers\/storage\/pkg\/idtools\"\n\t\"github.com\/containers\/storage\/pkg\/system\"\n\t\"golang.org\/x\/sys\/unix\"\n)\n\n\/\/ fixVolumePathPrefix does platform specific processing to ensure that if\n\/\/ the path being passed in is not in a volume path format, convert it to one.\nfunc fixVolumePathPrefix(srcPath string) string {\n\treturn srcPath\n}\n\n\/\/ getWalkRoot calculates the root path when performing a TarWithOptions.\n\/\/ We use a separate function as this is platform specific. On Linux, we\n\/\/ can't use filepath.Join(srcPath,include) because this will clean away\n\/\/ a trailing \".\" or \"\/\" which may be important.\nfunc getWalkRoot(srcPath string, include string) string {\n\treturn srcPath + string(filepath.Separator) + include\n}\n\n\/\/ CanonicalTarNameForPath returns platform-specific filepath\n\/\/ to canonical posix-style path for tar archival. p is relative\n\/\/ path.\nfunc CanonicalTarNameForPath(p string) (string, error) {\n\treturn p, nil \/\/ already unix-style\n}\n\n\/\/ chmodTarEntry is used to adjust the file permissions used in tar header based\n\/\/ on the platform the archival is done.\n\nfunc chmodTarEntry(perm os.FileMode) os.FileMode {\n\treturn perm \/\/ noop for unix as golang APIs provide perm bits correctly\n}\n\nfunc setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {\n\ts, ok := stat.(*syscall.Stat_t)\n\n\tif ok {\n\t\t\/\/ Currently go does not fill in the major\/minors\n\t\tif s.Mode&unix.S_IFBLK != 0 ||\n\t\t\ts.Mode&unix.S_IFCHR != 0 {\n\t\t\thdr.Devmajor = int64(major(uint64(s.Rdev))) \/\/ nolint: unconvert\n\t\t\thdr.Devminor = int64(minor(uint64(s.Rdev))) \/\/ nolint: unconvert\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc getInodeFromStat(stat interface{}) (inode uint64, err error) {\n\ts, ok := stat.(*syscall.Stat_t)\n\n\tif ok {\n\t\tinode = s.Ino\n\t}\n\n\treturn\n}\n\nfunc getFileUIDGID(stat interface{}) (idtools.IDPair, error) {\n\ts, ok := stat.(*syscall.Stat_t)\n\n\tif !ok {\n\t\treturn idtools.IDPair{}, errors.New(\"cannot convert stat value to syscall.Stat_t\")\n\t}\n\treturn idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil\n}\n\nfunc major(device uint64) uint64 {\n\treturn (device >> 8) & 0xfff\n}\n\nfunc minor(device uint64) uint64 {\n\treturn (device & 0xff) | ((device >> 12) & 0xfff00)\n}\n\n\/\/ handleTarTypeBlockCharFifo is an OS-specific helper function used by\n\/\/ createTarFile to handle the following types of header: Block; Char; Fifo\nfunc handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {\n\tmode := uint32(hdr.Mode & 07777)\n\tswitch hdr.Typeflag {\n\tcase tar.TypeBlock:\n\t\tmode |= unix.S_IFBLK\n\tcase tar.TypeChar:\n\t\tmode |= unix.S_IFCHR\n\tcase tar.TypeFifo:\n\t\tmode |= unix.S_IFIFO\n\t}\n\n\treturn system.Mknod(path, mode, system.Mkdev(hdr.Devmajor, hdr.Devminor))\n}\n\n\/\/ Hardlink without symlinks\nfunc handleLLink(targetPath, path string) error {\n\t\/\/ Note: on Linux, the link syscall will not follow symlinks.\n\t\/\/ This behavior is implementation-dependent since\n\t\/\/ POSIX.1-2008 so to make it clear that we need non-symlink\n\t\/\/ following here we use the linkat syscall which has a flags\n\t\/\/ field to select symlink following or not.\n\treturn unix.Linkat(unix.AT_FDCWD, targetPath, unix.AT_FDCWD, path, 0)\n}\n<commit_msg>Use \/\/nolint<commit_after>\/\/go:build !windows\n\/\/ +build !windows\n\npackage archive\n\nimport (\n\t\"archive\/tar\"\n\t\"errors\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"syscall\"\n\n\t\"github.com\/containers\/storage\/pkg\/idtools\"\n\t\"github.com\/containers\/storage\/pkg\/system\"\n\t\"golang.org\/x\/sys\/unix\"\n)\n\n\/\/ fixVolumePathPrefix does platform specific processing to ensure that if\n\/\/ the path being passed in is not in a volume path format, convert it to one.\nfunc fixVolumePathPrefix(srcPath string) string {\n\treturn srcPath\n}\n\n\/\/ getWalkRoot calculates the root path when performing a TarWithOptions.\n\/\/ We use a separate function as this is platform specific. On Linux, we\n\/\/ can't use filepath.Join(srcPath,include) because this will clean away\n\/\/ a trailing \".\" or \"\/\" which may be important.\nfunc getWalkRoot(srcPath string, include string) string {\n\treturn srcPath + string(filepath.Separator) + include\n}\n\n\/\/ CanonicalTarNameForPath returns platform-specific filepath\n\/\/ to canonical posix-style path for tar archival. p is relative\n\/\/ path.\nfunc CanonicalTarNameForPath(p string) (string, error) {\n\treturn p, nil \/\/ already unix-style\n}\n\n\/\/ chmodTarEntry is used to adjust the file permissions used in tar header based\n\/\/ on the platform the archival is done.\n\nfunc chmodTarEntry(perm os.FileMode) os.FileMode {\n\treturn perm \/\/ noop for unix as golang APIs provide perm bits correctly\n}\n\nfunc setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {\n\ts, ok := stat.(*syscall.Stat_t)\n\n\tif ok {\n\t\t\/\/ Currently go does not fill in the major\/minors\n\t\tif s.Mode&unix.S_IFBLK != 0 ||\n\t\t\ts.Mode&unix.S_IFCHR != 0 {\n\t\t\thdr.Devmajor = int64(major(uint64(s.Rdev))) \/\/nolint: unconvert\n\t\t\thdr.Devminor = int64(minor(uint64(s.Rdev))) \/\/nolint: unconvert\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc getInodeFromStat(stat interface{}) (inode uint64, err error) {\n\ts, ok := stat.(*syscall.Stat_t)\n\n\tif ok {\n\t\tinode = s.Ino\n\t}\n\n\treturn\n}\n\nfunc getFileUIDGID(stat interface{}) (idtools.IDPair, error) {\n\ts, ok := stat.(*syscall.Stat_t)\n\n\tif !ok {\n\t\treturn idtools.IDPair{}, errors.New(\"cannot convert stat value to syscall.Stat_t\")\n\t}\n\treturn idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil\n}\n\nfunc major(device uint64) uint64 {\n\treturn (device >> 8) & 0xfff\n}\n\nfunc minor(device uint64) uint64 {\n\treturn (device & 0xff) | ((device >> 12) & 0xfff00)\n}\n\n\/\/ handleTarTypeBlockCharFifo is an OS-specific helper function used by\n\/\/ createTarFile to handle the following types of header: Block; Char; Fifo\nfunc handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {\n\tmode := uint32(hdr.Mode & 07777)\n\tswitch hdr.Typeflag {\n\tcase tar.TypeBlock:\n\t\tmode |= unix.S_IFBLK\n\tcase tar.TypeChar:\n\t\tmode |= unix.S_IFCHR\n\tcase tar.TypeFifo:\n\t\tmode |= unix.S_IFIFO\n\t}\n\n\treturn system.Mknod(path, mode, system.Mkdev(hdr.Devmajor, hdr.Devminor))\n}\n\n\/\/ Hardlink without symlinks\nfunc handleLLink(targetPath, path string) error {\n\t\/\/ Note: on Linux, the link syscall will not follow symlinks.\n\t\/\/ This behavior is implementation-dependent since\n\t\/\/ POSIX.1-2008 so to make it clear that we need non-symlink\n\t\/\/ following here we use the linkat syscall which has a flags\n\t\/\/ field to select symlink following or not.\n\treturn unix.Linkat(unix.AT_FDCWD, targetPath, unix.AT_FDCWD, path, 0)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright The Helm Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage loader\n\nimport (\n\t\"archive\/tar\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"helm.sh\/helm\/v3\/pkg\/chart\"\n)\n\nvar drivePathPattern = regexp.MustCompile(`^[a-zA-Z]:\/`)\n\n\/\/ FileLoader loads a chart from a file\ntype FileLoader string\n\n\/\/ Load loads a chart\nfunc (l FileLoader) Load() (*chart.Chart, error) {\n\treturn LoadFile(string(l))\n}\n\n\/\/ LoadFile loads from an archive file.\nfunc LoadFile(name string) (*chart.Chart, error) {\n\tif fi, err := os.Stat(name); err != nil {\n\t\treturn nil, err\n\t} else if fi.IsDir() {\n\t\treturn nil, errors.New(\"cannot load a directory\")\n\t}\n\n\traw, err := os.Open(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer raw.Close()\n\n\terr = ensureArchive(name, raw)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc, err := LoadArchive(raw)\n\tif err != nil {\n\t\tif err == gzip.ErrHeader {\n\t\t\treturn nil, fmt.Errorf(\"file '%s' does not appear to be a valid chart file (details: %s)\", name, err)\n\t\t}\n\t}\n\treturn c, err\n}\n\n\/\/ ensureArchive's job is to return an informative error if the file does not appear to be a gzipped archive.\n\/\/\n\/\/ Sometimes users will provide a values.yaml for an argument where a chart is expected. One common occurrence\n\/\/ of this is invoking `helm template values.yaml mychart` which would otherwise produce a confusing error\n\/\/ if we didn't check for this.\nfunc ensureArchive(name string, raw *os.File) error {\n\tdefer raw.Seek(0, 0) \/\/ reset read offset to allow archive loading to proceed.\n\n\t\/\/ Check the file format to give us a chance to provide the user with more actionable feedback.\n\tbuffer := make([]byte, 512)\n\t_, err := raw.Read(buffer)\n\tif err != nil && err != io.EOF {\n\t\treturn fmt.Errorf(\"file '%s' cannot be read: %s\", name, err)\n\t}\n\tif contentType := http.DetectContentType(buffer); contentType != \"application\/x-gzip\" {\n\t\t\/\/ TODO: Is there a way to reliably test if a file content is YAML? ghodss\/yaml accepts a wide\n\t\t\/\/ variety of content (Makefile, .zshrc) as valid YAML without errors.\n\n\t\t\/\/ Wrong content type. Let's check if it's yaml and give an extra hint?\n\t\tif strings.HasSuffix(name, \".yml\") || strings.HasSuffix(name, \".yaml\") {\n\t\t\treturn fmt.Errorf(\"file '%s' seems to be a YAML file, but expected a gzipped archive\", name)\n\t\t}\n\t\treturn fmt.Errorf(\"file '%s' does not appear to be a gzipped archive; got '%s'\", name, contentType)\n\t}\n\treturn nil\n}\n\n\/\/ LoadArchiveFiles reads in files out of an archive into memory. This function\n\/\/ performs important path security checks and should always be used before\n\/\/ expanding a tarball\nfunc LoadArchiveFiles(in io.Reader) ([]*BufferedFile, error) {\n\tunzipped, err := gzip.NewReader(in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer unzipped.Close()\n\n\tfiles := []*BufferedFile{}\n\ttr := tar.NewReader(unzipped)\n\tfor {\n\t\tb := bytes.NewBuffer(nil)\n\t\thd, err := tr.Next()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif hd.FileInfo().IsDir() {\n\t\t\t\/\/ Use this instead of hd.Typeflag because we don't have to do any\n\t\t\t\/\/ inference chasing.\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Archive could contain \\ if generated on Windows\n\t\tdelimiter := \"\/\"\n\t\tif strings.ContainsRune(hd.Name, '\\\\') {\n\t\t\tdelimiter = \"\\\\\"\n\t\t}\n\n\t\tparts := strings.Split(hd.Name, delimiter)\n\t\tn := strings.Join(parts[1:], delimiter)\n\n\t\t\/\/ Normalize the path to the \/ delimiter\n\t\tn = strings.ReplaceAll(n, delimiter, \"\/\")\n\n\t\tif path.IsAbs(n) {\n\t\t\treturn nil, errors.New(\"chart illegally contains absolute paths\")\n\t\t}\n\n\t\tn = path.Clean(n)\n\t\tif n == \".\" {\n\t\t\t\/\/ In this case, the original path was relative when it should have been absolute.\n\t\t\treturn nil, errors.Errorf(\"chart illegally contains content outside the base directory: %q\", hd.Name)\n\t\t}\n\t\tif strings.HasPrefix(n, \"..\") {\n\t\t\treturn nil, errors.New(\"chart illegally references parent directory\")\n\t\t}\n\n\t\t\/\/ In some particularly arcane acts of path creativity, it is possible to intermix\n\t\t\/\/ UNIX and Windows style paths in such a way that you produce a result of the form\n\t\t\/\/ c:\/foo even after all the built-in absolute path checks. So we explicitly check\n\t\t\/\/ for this condition.\n\t\tif drivePathPattern.MatchString(n) {\n\t\t\treturn nil, errors.New(\"chart contains illegally named files\")\n\t\t}\n\n\t\tif parts[0] == \"Chart.yaml\" {\n\t\t\treturn nil, errors.New(\"chart yaml not in base directory\")\n\t\t}\n\n\t\tif _, err := io.Copy(b, tr); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfiles = append(files, &BufferedFile{Name: n, Data: b.Bytes()})\n\t\tb.Reset()\n\t}\n\n\tif len(files) == 0 {\n\t\treturn nil, errors.New(\"no files in chart archive\")\n\t}\n\treturn files, nil\n}\n\n\/\/ LoadArchive loads from a reader containing a compressed tar archive.\nfunc LoadArchive(in io.Reader) (*chart.Chart, error) {\n\tfiles, err := LoadArchiveFiles(in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn LoadFiles(files)\n}\n<commit_msg>fix: ignore pax header files in chart validation<commit_after>\/*\nCopyright The Helm Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage loader\n\nimport (\n\t\"archive\/tar\"\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"helm.sh\/helm\/v3\/pkg\/chart\"\n)\n\nvar drivePathPattern = regexp.MustCompile(`^[a-zA-Z]:\/`)\n\n\/\/ FileLoader loads a chart from a file\ntype FileLoader string\n\n\/\/ Load loads a chart\nfunc (l FileLoader) Load() (*chart.Chart, error) {\n\treturn LoadFile(string(l))\n}\n\n\/\/ LoadFile loads from an archive file.\nfunc LoadFile(name string) (*chart.Chart, error) {\n\tif fi, err := os.Stat(name); err != nil {\n\t\treturn nil, err\n\t} else if fi.IsDir() {\n\t\treturn nil, errors.New(\"cannot load a directory\")\n\t}\n\n\traw, err := os.Open(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer raw.Close()\n\n\terr = ensureArchive(name, raw)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc, err := LoadArchive(raw)\n\tif err != nil {\n\t\tif err == gzip.ErrHeader {\n\t\t\treturn nil, fmt.Errorf(\"file '%s' does not appear to be a valid chart file (details: %s)\", name, err)\n\t\t}\n\t}\n\treturn c, err\n}\n\n\/\/ ensureArchive's job is to return an informative error if the file does not appear to be a gzipped archive.\n\/\/\n\/\/ Sometimes users will provide a values.yaml for an argument where a chart is expected. One common occurrence\n\/\/ of this is invoking `helm template values.yaml mychart` which would otherwise produce a confusing error\n\/\/ if we didn't check for this.\nfunc ensureArchive(name string, raw *os.File) error {\n\tdefer raw.Seek(0, 0) \/\/ reset read offset to allow archive loading to proceed.\n\n\t\/\/ Check the file format to give us a chance to provide the user with more actionable feedback.\n\tbuffer := make([]byte, 512)\n\t_, err := raw.Read(buffer)\n\tif err != nil && err != io.EOF {\n\t\treturn fmt.Errorf(\"file '%s' cannot be read: %s\", name, err)\n\t}\n\tif contentType := http.DetectContentType(buffer); contentType != \"application\/x-gzip\" {\n\t\t\/\/ TODO: Is there a way to reliably test if a file content is YAML? ghodss\/yaml accepts a wide\n\t\t\/\/ variety of content (Makefile, .zshrc) as valid YAML without errors.\n\n\t\t\/\/ Wrong content type. Let's check if it's yaml and give an extra hint?\n\t\tif strings.HasSuffix(name, \".yml\") || strings.HasSuffix(name, \".yaml\") {\n\t\t\treturn fmt.Errorf(\"file '%s' seems to be a YAML file, but expected a gzipped archive\", name)\n\t\t}\n\t\treturn fmt.Errorf(\"file '%s' does not appear to be a gzipped archive; got '%s'\", name, contentType)\n\t}\n\treturn nil\n}\n\n\/\/ LoadArchiveFiles reads in files out of an archive into memory. This function\n\/\/ performs important path security checks and should always be used before\n\/\/ expanding a tarball\nfunc LoadArchiveFiles(in io.Reader) ([]*BufferedFile, error) {\n\tunzipped, err := gzip.NewReader(in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer unzipped.Close()\n\n\tfiles := []*BufferedFile{}\n\ttr := tar.NewReader(unzipped)\n\tfor {\n\t\tb := bytes.NewBuffer(nil)\n\t\thd, err := tr.Next()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif hd.FileInfo().IsDir() {\n\t\t\t\/\/ Use this instead of hd.Typeflag because we don't have to do any\n\t\t\t\/\/ inference chasing.\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch hd.Typeflag {\n\t\t\/\/ We don't want to process these extension header files.\n\t\tcase tar.TypeXGlobalHeader, tar.TypeXHeader:\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Archive could contain \\ if generated on Windows\n\t\tdelimiter := \"\/\"\n\t\tif strings.ContainsRune(hd.Name, '\\\\') {\n\t\t\tdelimiter = \"\\\\\"\n\t\t}\n\n\t\tparts := strings.Split(hd.Name, delimiter)\n\t\tn := strings.Join(parts[1:], delimiter)\n\n\t\t\/\/ Normalize the path to the \/ delimiter\n\t\tn = strings.ReplaceAll(n, delimiter, \"\/\")\n\n\t\tif path.IsAbs(n) {\n\t\t\treturn nil, errors.New(\"chart illegally contains absolute paths\")\n\t\t}\n\n\t\tn = path.Clean(n)\n\t\tif n == \".\" {\n\t\t\t\/\/ In this case, the original path was relative when it should have been absolute.\n\t\t\treturn nil, errors.Errorf(\"chart illegally contains content outside the base directory: %q\", hd.Name)\n\t\t}\n\t\tif strings.HasPrefix(n, \"..\") {\n\t\t\treturn nil, errors.New(\"chart illegally references parent directory\")\n\t\t}\n\n\t\t\/\/ In some particularly arcane acts of path creativity, it is possible to intermix\n\t\t\/\/ UNIX and Windows style paths in such a way that you produce a result of the form\n\t\t\/\/ c:\/foo even after all the built-in absolute path checks. So we explicitly check\n\t\t\/\/ for this condition.\n\t\tif drivePathPattern.MatchString(n) {\n\t\t\treturn nil, errors.New(\"chart contains illegally named files\")\n\t\t}\n\n\t\tif parts[0] == \"Chart.yaml\" {\n\t\t\treturn nil, errors.New(\"chart yaml not in base directory\")\n\t\t}\n\n\t\tif _, err := io.Copy(b, tr); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfiles = append(files, &BufferedFile{Name: n, Data: b.Bytes()})\n\t\tb.Reset()\n\t}\n\n\tif len(files) == 0 {\n\t\treturn nil, errors.New(\"no files in chart archive\")\n\t}\n\treturn files, nil\n}\n\n\/\/ LoadArchive loads from a reader containing a compressed tar archive.\nfunc LoadArchive(in io.Reader) (*chart.Chart, error) {\n\tfiles, err := LoadArchiveFiles(in)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn LoadFiles(files)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build windows\n\n\/*\n * Minio Client (C) 2015 Minio, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this fs except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage fs\n\nimport (\n\t\"path\/filepath\"\n\t\"syscall\"\n\n\t\"github.com\/minio\/minio\/pkg\/iodine\"\n)\n\nfunc normalizePath(path string) (string, error) {\n\tif filepath.VolumeName(path) == \"\" && filepath.HasPrefix(path, \"\\\\\") {\n\t\tpath, err = syscall.FullPath(path)\n\t\tif err != nil {\n\t\t\treturn \"\", iodine.New(err, nil)\n\t\t}\n\t}\n\treturn path, nil\n}\n<commit_msg>Add missing var error for windows, minor change pushing in<commit_after>\/\/ +build windows\n\n\/*\n * Minio Client (C) 2015 Minio, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this fs except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage fs\n\nimport (\n\t\"path\/filepath\"\n\t\"syscall\"\n\n\t\"github.com\/minio\/minio\/pkg\/iodine\"\n)\n\nfunc normalizePath(path string) (string, error) {\n\tvar err error\n\tif filepath.VolumeName(path) == \"\" && filepath.HasPrefix(path, \"\\\\\") {\n\t\tpath, err = syscall.FullPath(path)\n\t\tif err != nil {\n\t\t\treturn \"\", iodine.New(err, nil)\n\t\t}\n\t}\n\treturn path, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\tkcmd \"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/kubectl\/cmd\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\"github.com\/openshift\/origin\/pkg\/cmd\/cli\/describe\"\n\t\"github.com\/openshift\/origin\/pkg\/cmd\/util\/clientcmd\"\n)\n\nfunc tab(original string) string {\n\tlines := []string{}\n\tscanner := bufio.NewScanner(strings.NewReader(original))\n\tfor scanner.Scan() {\n\t\tlines = append(lines, \" \"+scanner.Text())\n\t}\n\treturn strings.Join(lines, \"\\n\")\n}\n\nconst (\n\tgetLong = `Display one or many resources.\n\nPossible resources include builds, buildConfigs, services, pods, etc.`\n\n\tgetExample = ` \/\/ List all pods in ps output format.\n $ %[1]s get pods\n\n \/\/ List a single replication controller with specified ID in ps output format.\n $ %[1]s get replicationController 1234-56-7890-234234-456456\n\n \/\/ List a single pod in JSON output format.\n $ %[1]s get -o json pod 1234-56-7890-234234-456456\n\n \/\/ Return only the status value of the specified pod.\n $ %[1]s get -o template pod 1234-56-7890-234234-456456 --template={{.currentState.status}}`\n)\n\n\/\/ NewCmdGet is a wrapper for the Kubernetes cli get command\nfunc NewCmdGet(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tp := describe.NewHumanReadablePrinter(false)\n\tvalidArgs := p.HandledResources()\n\n\tcmd := kcmd.NewCmdGet(f.Factory, out)\n\tcmd.Long = getLong\n\tcmd.Example = fmt.Sprintf(getExample, fullName)\n\tcmd.ValidArgs = validArgs\n\treturn cmd\n}\n\nconst (\n\tupdateLong = `Update a resource by filename or stdin.\n\nJSON and YAML formats are accepted.`\n\n\tupdateExample = ` \/\/ Update a pod using the data in pod.json.\n $ %[1]s update -f pod.json\n\n \/\/ Update a pod based on the JSON passed into stdin.\n $ cat pod.json | %[1]s update -f -\n\n \/\/ Update a pod by downloading it, applying the patch, then updating. Requires apiVersion be specified.\n $ %[1]s update pods my-pod --patch='{ \"apiVersion\": \"v1beta1\", \"desiredState\": { \"manifest\": [{ \"cpu\": 100 }]}}'`\n)\n\n\/\/ NewCmdUpdate is a wrapper for the Kubernetes cli update command\nfunc NewCmdUpdate(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdUpdate(f.Factory, out)\n\tcmd.Long = updateLong\n\tcmd.Example = fmt.Sprintf(updateExample, fullName)\n\treturn cmd\n}\n\nconst (\n\tdeleteLong = `Delete a resource by filename, stdin, resource and ID, or by resources and label selector.\n\nJSON and YAML formats are accepted.\n\nIf both a filename and command line arguments are passed, the command line\narguments are used and the filename is ignored.\n\nNote that the delete command does NOT do resource version checks, so if someone\nsubmits an update to a resource right when you submit a delete, their update\nwill be lost along with the rest of the resource.`\n\n\tdeleteExample = ` \/\/ Delete a pod using the type and ID specified in pod.json.\n $ %[1]s delete -f pod.json\n\n \/\/ Delete a pod based on the type and ID in the JSON passed into stdin.\n $ cat pod.json | %[1]s delete -f -\n\n \/\/ Delete pods and services with label name=myLabel.\n $ %[1]s delete pods,services -l name=myLabel\n\n \/\/ Delete a pod with ID 1234-56-7890-234234-456456.\n $ %[1]s delete pod 1234-56-7890-234234-456456\n\n \/\/ Delete all pods\n $ %[1]s delete pods --all`\n)\n\n\/\/ NewCmdDelete is a wrapper for the Kubernetes cli delete command\nfunc NewCmdDelete(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdDelete(f.Factory, out)\n\tcmd.Long = deleteLong\n\tcmd.Example = fmt.Sprintf(deleteExample, fullName)\n\treturn cmd\n}\n\nconst (\n\tlog_long = `Print the logs for a container in a pod. If the pod has only one container, the container name is optional.`\n\n\tlog_example = ` \/\/ Returns snapshot of ruby-container logs from pod 123456-7890.\n $ %[1]s log 123456-7890 ruby-container\n\n \/\/ Starts streaming of ruby-container logs from pod 123456-7890.\n $ %[1]s log -f 123456-7890 ruby-container`\n)\n\n\/\/ NewCmdLog is a wrapper for the Kubernetes cli log command\nfunc NewCmdLog(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdLog(f.Factory, out)\n\tcmd.Long = log_long\n\tcmd.Example = fmt.Sprintf(log_example, fullName)\n\treturn cmd\n}\n\nconst (\n\tcreateLong = `Create a resource by filename or stdin.\n\nJSON and YAML formats are accepted.`\n\n\tcreateExample = ` \/\/ Create a pod using the data in pod.json.\n $ %[1]s create -f pod.json\n\n \/\/ Create a pod based on the JSON passed into stdin.\n $ cat pod.json | %[1]s create -f -`\n)\n\n\/\/ NewCmdCreate is a wrapper for the Kubernetes cli create command\nfunc NewCmdCreate(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdCreate(f.Factory, out)\n\tcmd.Long = createLong\n\tcmd.Example = fmt.Sprintf(createExample, fullName)\n\treturn cmd\n}\n\nconst (\n\texecLong = `Execute a command in a container.`\n\n\texecExample = ` \/\/ Get output from running 'date' in ruby-container from pod 123456-7890\n $ %[1]s exec -p 123456-7890 -c ruby-container date\n\n \/\/ Switch to raw terminal mode, sends stdin to 'bash' in ruby-container from pod 123456-780 and sends stdout\/stderr from 'bash' back to the client\n $ %[1]s exec -p 123456-7890 -c ruby-container -i -t -- bash -il`\n)\n\n\/\/ NewCmdExec is a wrapper for the Kubernetes cli exec command\nfunc NewCmdExec(fullName string, f *clientcmd.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdExec(f.Factory, cmdIn, cmdOut, cmdErr)\n\tcmd.Long = execLong\n\tcmd.Example = fmt.Sprintf(execExample, fullName)\n\treturn cmd\n}\n\nconst (\n\tportForwardLong = `Forward 1 or more local ports to a pod.`\n\n\tportForwardExample = ` \/\/ Listens on ports 5000 and 6000 locally, forwarding data to\/from ports 5000 and 6000 in the pod\n $ %[1]s port-forward -p mypod 5000 6000\n\n \/\/ Listens on port 8888 locally, forwarding to 5000 in the pod\n $ %[1]s port-forward -p mypod 8888:5000\n\n \/\/ Listens on a random port locally, forwarding to 5000 in the pod\n $ %[1]s port-forward -p mypod :5000\n\n \/\/ Listens on a random port locally, forwarding to 5000 in the pod\n $ %[1]s port-forward -p mypod 0:5000`\n)\n\n\/\/ NewCmdPortForward is a wrapper for the Kubernetes cli port-forward command\nfunc NewCmdPortForward(fullName string, f *clientcmd.Factory) *cobra.Command {\n\tcmd := kcmd.NewCmdPortForward(f.Factory)\n\tcmd.Long = portForwardLong\n\tcmd.Example = fmt.Sprintf(portForwardExample, fullName)\n\treturn cmd\n}\n\nconst (\n\tdescribeLong = `Show details of a specific resource.\n\nThis command joins many API calls together to form a detailed description of a\ngiven resource.`\n\n\tdescribeExample = ` \/\/ Provide details about the ruby-20-centos7 image repository\n $ %[1]s describe imageRepository ruby-20-centos7\n\n \/\/ Provide details about the ruby-sample-build build configuration\n $ %[1]s describe bc ruby-sample-build`\n)\n\n\/\/ NewCmdDescribe is a wrapper for the Kubernetes cli describe command\nfunc NewCmdDescribe(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdDescribe(f.Factory, out)\n\tcmd.Long = describeLong\n\tcmd.Example = fmt.Sprintf(describeExample, fullName)\n\tcmd.ValidArgs = describe.DescribableResources()\n\treturn cmd\n}\n\nconst (\n\tproxyLong = `Run a proxy to the Kubernetes API server.`\n\n\tproxyExample = ` \/\/ Run a proxy to kubernetes apiserver on port 8011, serving static content from .\/local\/www\/\n $ %[1]s proxy --port=8011 --www=.\/local\/www\/\n\n \/\/ Run a proxy to kubernetes apiserver, changing the api prefix to k8s-api\n \/\/ This makes e.g. the pods api available at localhost:8011\/k8s-api\/v1beta1\/pods\/\n $ %[1]s proxy --api-prefix=k8s-api`\n)\n\n\/\/ NewCmdProxy is a wrapper for the Kubernetes cli proxy command\nfunc NewCmdProxy(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdProxy(f.Factory, out)\n\tcmd.Long = proxyLong\n\tcmd.Example = fmt.Sprintf(proxyExample, fullName)\n\treturn cmd\n}\n\nconst (\n\tresizeLong = `Set a new size for a Replication Controller either directly or via its Deployment Configuration.\n\nResize also allows users to specify one or more preconditions for the resize action.\nIf --current-replicas or --resource-version is specified, it is validated before the\nresize is attempted, and it is guaranteed that the precondition holds true when the\nresize is sent to the server.`\n\tresizeExample = `\/\/ Resize replication controller named 'foo' to 3.\n$ %[1]s resize --replicas=3 replicationcontrollers foo\n\n\/\/ If the replication controller named foo's current size is 2, resize foo to 3.\n$ %[1]s resize --current-replicas=2 --replicas=3 replicationcontrollers foo`\n)\n\n\/\/ NewCmdResize is a wrapper for the Kubernetes cli resize command\nfunc NewCmdResize(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdResize(f.Factory, out)\n\tcmd.Long = resizeLong\n\tcmd.Example = fmt.Sprintf(resizeExample, fullName)\n\treturn cmd\n}\n\nconst (\n\tstopLong = `Gracefully shut down a resource by id or filename.\n\nAttempts to shut down and delete a resource that supports graceful termination.\nIf the resource is resizable it will be resized to 0 before deletion.`\n\n\tstopExample = `\/\/ Shut down foo.\n$ %[1]s stop replicationcontroller foo\n\n\/\/ Stop pods and services with label name=myLabel.\n$ %[1]s stop pods,services -l name=myLabel\n\n\/\/ Shut down the service defined in service.json\n$ %[1]s stop -f service.json\n\n\/\/ Shut down all resources in the path\/to\/resources directory\n$ %[1]s stop -f path\/to\/resources`\n)\n\n\/\/ NewCmdStop is a wrapper for the Kubernetes cli stop command\nfunc NewCmdStop(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdStop(f.Factory, out)\n\tcmd.Long = stopLong\n\tcmd.Example = fmt.Sprintf(stopExample, fullName)\n\treturn cmd\n}\n\nconst (\n\tlabelLong = `Update the labels on a resource.\n\nIf --overwrite is true, then existing labels can be overwritten, otherwise attempting to overwrite a label will result in an error.\nIf --resource-version is specified, then updates will use this resource version, otherwise the existing resource-version will be used.`\n\n\tlabelExample = `\/\/ Update pod 'foo' with the label 'unhealthy' and the value 'true'.\n$ %[1]s label pods foo unhealthy=true\n\n\/\/ Update pod 'foo' with the label 'status' and the value 'unhealthy', overwriting any existing value.\n$ %[1]s label --overwrite pods foo status=unhealthy\n\n\/\/ Update all pods in the namespace\n$ %[1]s label pods --all status=unhealthy\n\n\/\/ Update pod 'foo' only if the resource is unchanged from version 1.\n$ %[1]s label pods foo status=unhealthy --resource-version=1\n\n\/\/ Update pod 'foo' by removing a label named 'bar' if it exists.\n\/\/ Does not require the --overwrite flag.\n$ %[1]s label pods foo bar-`\n)\n\n\/\/ NewCmdLabel is a wrapper for the Kubernetes cli label command\nfunc NewCmdLabel(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdLabel(f.Factory, out)\n\tcmd.Long = labelLong\n\tcmd.Example = fmt.Sprintf(labelExample, fullName)\n\treturn cmd\n}\n<commit_msg>Beef up osc label help message<commit_after>package cmd\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\tkcmd \"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/kubectl\/cmd\"\n\tkutil \"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/util\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\"github.com\/openshift\/origin\/pkg\/cmd\/cli\/describe\"\n\t\"github.com\/openshift\/origin\/pkg\/cmd\/util\/clientcmd\"\n)\n\nfunc tab(original string) string {\n\tlines := []string{}\n\tscanner := bufio.NewScanner(strings.NewReader(original))\n\tfor scanner.Scan() {\n\t\tlines = append(lines, \" \"+scanner.Text())\n\t}\n\treturn strings.Join(lines, \"\\n\")\n}\n\nconst (\n\tgetLong = `Display one or many resources.\n\nPossible resources include builds, buildConfigs, services, pods, etc.`\n\n\tgetExample = ` \/\/ List all pods in ps output format.\n $ %[1]s get pods\n\n \/\/ List a single replication controller with specified ID in ps output format.\n $ %[1]s get replicationController 1234-56-7890-234234-456456\n\n \/\/ List a single pod in JSON output format.\n $ %[1]s get -o json pod 1234-56-7890-234234-456456\n\n \/\/ Return only the status value of the specified pod.\n $ %[1]s get -o template pod 1234-56-7890-234234-456456 --template={{.currentState.status}}`\n)\n\n\/\/ NewCmdGet is a wrapper for the Kubernetes cli get command\nfunc NewCmdGet(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tp := describe.NewHumanReadablePrinter(false)\n\tvalidArgs := p.HandledResources()\n\n\tcmd := kcmd.NewCmdGet(f.Factory, out)\n\tcmd.Long = getLong\n\tcmd.Example = fmt.Sprintf(getExample, fullName)\n\tcmd.ValidArgs = validArgs\n\treturn cmd\n}\n\nconst (\n\tupdateLong = `Update a resource by filename or stdin.\n\nJSON and YAML formats are accepted.`\n\n\tupdateExample = ` \/\/ Update a pod using the data in pod.json.\n $ %[1]s update -f pod.json\n\n \/\/ Update a pod based on the JSON passed into stdin.\n $ cat pod.json | %[1]s update -f -\n\n \/\/ Update a pod by downloading it, applying the patch, then updating. Requires apiVersion be specified.\n $ %[1]s update pods my-pod --patch='{ \"apiVersion\": \"v1beta1\", \"desiredState\": { \"manifest\": [{ \"cpu\": 100 }]}}'`\n)\n\n\/\/ NewCmdUpdate is a wrapper for the Kubernetes cli update command\nfunc NewCmdUpdate(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdUpdate(f.Factory, out)\n\tcmd.Long = updateLong\n\tcmd.Example = fmt.Sprintf(updateExample, fullName)\n\treturn cmd\n}\n\nconst (\n\tdeleteLong = `Delete a resource by filename, stdin, resource and ID, or by resources and label selector.\n\nJSON and YAML formats are accepted.\n\nIf both a filename and command line arguments are passed, the command line\narguments are used and the filename is ignored.\n\nNote that the delete command does NOT do resource version checks, so if someone\nsubmits an update to a resource right when you submit a delete, their update\nwill be lost along with the rest of the resource.`\n\n\tdeleteExample = ` \/\/ Delete a pod using the type and ID specified in pod.json.\n $ %[1]s delete -f pod.json\n\n \/\/ Delete a pod based on the type and ID in the JSON passed into stdin.\n $ cat pod.json | %[1]s delete -f -\n\n \/\/ Delete pods and services with label name=myLabel.\n $ %[1]s delete pods,services -l name=myLabel\n\n \/\/ Delete a pod with ID 1234-56-7890-234234-456456.\n $ %[1]s delete pod 1234-56-7890-234234-456456\n\n \/\/ Delete all pods\n $ %[1]s delete pods --all`\n)\n\n\/\/ NewCmdDelete is a wrapper for the Kubernetes cli delete command\nfunc NewCmdDelete(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdDelete(f.Factory, out)\n\tcmd.Long = deleteLong\n\tcmd.Example = fmt.Sprintf(deleteExample, fullName)\n\treturn cmd\n}\n\nconst (\n\tlog_long = `Print the logs for a container in a pod. If the pod has only one container, the container name is optional.`\n\n\tlog_example = ` \/\/ Returns snapshot of ruby-container logs from pod 123456-7890.\n $ %[1]s log 123456-7890 ruby-container\n\n \/\/ Starts streaming of ruby-container logs from pod 123456-7890.\n $ %[1]s log -f 123456-7890 ruby-container`\n)\n\n\/\/ NewCmdLog is a wrapper for the Kubernetes cli log command\nfunc NewCmdLog(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdLog(f.Factory, out)\n\tcmd.Long = log_long\n\tcmd.Example = fmt.Sprintf(log_example, fullName)\n\treturn cmd\n}\n\nconst (\n\tcreateLong = `Create a resource by filename or stdin.\n\nJSON and YAML formats are accepted.`\n\n\tcreateExample = ` \/\/ Create a pod using the data in pod.json.\n $ %[1]s create -f pod.json\n\n \/\/ Create a pod based on the JSON passed into stdin.\n $ cat pod.json | %[1]s create -f -`\n)\n\n\/\/ NewCmdCreate is a wrapper for the Kubernetes cli create command\nfunc NewCmdCreate(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdCreate(f.Factory, out)\n\tcmd.Long = createLong\n\tcmd.Example = fmt.Sprintf(createExample, fullName)\n\treturn cmd\n}\n\nconst (\n\texecLong = `Execute a command in a container.`\n\n\texecExample = ` \/\/ Get output from running 'date' in ruby-container from pod 123456-7890\n $ %[1]s exec -p 123456-7890 -c ruby-container date\n\n \/\/ Switch to raw terminal mode, sends stdin to 'bash' in ruby-container from pod 123456-780 and sends stdout\/stderr from 'bash' back to the client\n $ %[1]s exec -p 123456-7890 -c ruby-container -i -t -- bash -il`\n)\n\n\/\/ NewCmdExec is a wrapper for the Kubernetes cli exec command\nfunc NewCmdExec(fullName string, f *clientcmd.Factory, cmdIn io.Reader, cmdOut, cmdErr io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdExec(f.Factory, cmdIn, cmdOut, cmdErr)\n\tcmd.Long = execLong\n\tcmd.Example = fmt.Sprintf(execExample, fullName)\n\treturn cmd\n}\n\nconst (\n\tportForwardLong = `Forward 1 or more local ports to a pod.`\n\n\tportForwardExample = ` \/\/ Listens on ports 5000 and 6000 locally, forwarding data to\/from ports 5000 and 6000 in the pod\n $ %[1]s port-forward -p mypod 5000 6000\n\n \/\/ Listens on port 8888 locally, forwarding to 5000 in the pod\n $ %[1]s port-forward -p mypod 8888:5000\n\n \/\/ Listens on a random port locally, forwarding to 5000 in the pod\n $ %[1]s port-forward -p mypod :5000\n\n \/\/ Listens on a random port locally, forwarding to 5000 in the pod\n $ %[1]s port-forward -p mypod 0:5000`\n)\n\n\/\/ NewCmdPortForward is a wrapper for the Kubernetes cli port-forward command\nfunc NewCmdPortForward(fullName string, f *clientcmd.Factory) *cobra.Command {\n\tcmd := kcmd.NewCmdPortForward(f.Factory)\n\tcmd.Long = portForwardLong\n\tcmd.Example = fmt.Sprintf(portForwardExample, fullName)\n\treturn cmd\n}\n\nconst (\n\tdescribeLong = `Show details of a specific resource.\n\nThis command joins many API calls together to form a detailed description of a\ngiven resource.`\n\n\tdescribeExample = ` \/\/ Provide details about the ruby-20-centos7 image repository\n $ %[1]s describe imageRepository ruby-20-centos7\n\n \/\/ Provide details about the ruby-sample-build build configuration\n $ %[1]s describe bc ruby-sample-build`\n)\n\n\/\/ NewCmdDescribe is a wrapper for the Kubernetes cli describe command\nfunc NewCmdDescribe(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdDescribe(f.Factory, out)\n\tcmd.Long = describeLong\n\tcmd.Example = fmt.Sprintf(describeExample, fullName)\n\tcmd.ValidArgs = describe.DescribableResources()\n\treturn cmd\n}\n\nconst (\n\tproxyLong = `Run a proxy to the Kubernetes API server.`\n\n\tproxyExample = ` \/\/ Run a proxy to kubernetes apiserver on port 8011, serving static content from .\/local\/www\/\n $ %[1]s proxy --port=8011 --www=.\/local\/www\/\n\n \/\/ Run a proxy to kubernetes apiserver, changing the api prefix to k8s-api\n \/\/ This makes e.g. the pods api available at localhost:8011\/k8s-api\/v1beta1\/pods\/\n $ %[1]s proxy --api-prefix=k8s-api`\n)\n\n\/\/ NewCmdProxy is a wrapper for the Kubernetes cli proxy command\nfunc NewCmdProxy(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdProxy(f.Factory, out)\n\tcmd.Long = proxyLong\n\tcmd.Example = fmt.Sprintf(proxyExample, fullName)\n\treturn cmd\n}\n\nconst (\n\tresizeLong = `Set a new size for a Replication Controller either directly or via its Deployment Configuration.\n\nResize also allows users to specify one or more preconditions for the resize action.\nIf --current-replicas or --resource-version is specified, it is validated before the\nresize is attempted, and it is guaranteed that the precondition holds true when the\nresize is sent to the server.`\n\tresizeExample = `\/\/ Resize replication controller named 'foo' to 3.\n$ %[1]s resize --replicas=3 replicationcontrollers foo\n\n\/\/ If the replication controller named foo's current size is 2, resize foo to 3.\n$ %[1]s resize --current-replicas=2 --replicas=3 replicationcontrollers foo`\n)\n\n\/\/ NewCmdResize is a wrapper for the Kubernetes cli resize command\nfunc NewCmdResize(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdResize(f.Factory, out)\n\tcmd.Long = resizeLong\n\tcmd.Example = fmt.Sprintf(resizeExample, fullName)\n\treturn cmd\n}\n\nconst (\n\tstopLong = `Gracefully shut down a resource by id or filename.\n\nAttempts to shut down and delete a resource that supports graceful termination.\nIf the resource is resizable it will be resized to 0 before deletion.`\n\n\tstopExample = `\/\/ Shut down foo.\n$ %[1]s stop replicationcontroller foo\n\n\/\/ Stop pods and services with label name=myLabel.\n$ %[1]s stop pods,services -l name=myLabel\n\n\/\/ Shut down the service defined in service.json\n$ %[1]s stop -f service.json\n\n\/\/ Shut down all resources in the path\/to\/resources directory\n$ %[1]s stop -f path\/to\/resources`\n)\n\n\/\/ NewCmdStop is a wrapper for the Kubernetes cli stop command\nfunc NewCmdStop(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdStop(f.Factory, out)\n\tcmd.Long = stopLong\n\tcmd.Example = fmt.Sprintf(stopExample, fullName)\n\treturn cmd\n}\n\nconst (\n\tlabelLong = `Update the labels on a resource.\n\nA valid label value is consisted of letters and\/or numbers with a max length of %[1]d characters.\nIf --overwrite is true, then existing labels can be overwritten, otherwise attempting to overwrite a label will result in an error.\nIf --resource-version is specified, then updates will use this resource version, otherwise the existing resource-version will be used.`\n\n\tlabelExample = `\/\/ Update pod 'foo' with the label 'unhealthy' and the value 'true'.\n$ %[1]s label pods foo unhealthy=true\n\n\/\/ Update pod 'foo' with the label 'status' and the value 'unhealthy', overwriting any existing value.\n$ %[1]s label --overwrite pods foo status=unhealthy\n\n\/\/ Update all pods in the namespace\n$ %[1]s label pods --all status=unhealthy\n\n\/\/ Update pod 'foo' only if the resource is unchanged from version 1.\n$ %[1]s label pods foo status=unhealthy --resource-version=1\n\n\/\/ Update pod 'foo' by removing a label named 'bar' if it exists.\n\/\/ Does not require the --overwrite flag.\n$ %[1]s label pods foo bar-`\n)\n\n\/\/ NewCmdLabel is a wrapper for the Kubernetes cli label command\nfunc NewCmdLabel(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdLabel(f.Factory, out)\n\tcmd.Long = fmt.Sprintf(labelLong, kutil.LabelValueMaxLength)\n\tcmd.Example = fmt.Sprintf(labelExample, fullName)\n\treturn cmd\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\nimport (\n\t\"time\"\n\n\t\"github.com\/hellofresh\/logging-go\"\n\t\"github.com\/kelseyhightower\/envconfig\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/ Specification for basic configurations\ntype Specification struct {\n\tPort int `envconfig:\"PORT\"`\n\tDebug bool `envconfig:\"DEBUG\"`\n\tGraceTimeOut int64 `envconfig:\"GRACE_TIMEOUT\"`\n\tMaxIdleConnsPerHost int `envconfig:\"MAX_IDLE_CONNS_PER_HOST\"`\n\tBackendFlushInterval time.Duration `envconfig:\"BACKEND_FLUSH_INTERVAL\"`\n\tCloseIdleConnsPeriod time.Duration `envconfig:\"CLOSE_IDLE_CONNS_PERIOD\"`\n\tLog logging.LogConfig\n\tWeb Web\n\tDatabase Database\n\tStorage Storage\n\tStats Stats\n\tTracing Tracing\n\tTLS TLS\n}\n\n\/\/ Web represents the API configurations\ntype Web struct {\n\tPort int `envconfig:\"API_PORT\"`\n\tReadOnly bool `envconfig:\"API_READONLY\"`\n\tCredentials Credentials\n\tTLS TLS\n}\n\n\/\/ TLS represents the TLS configurations\ntype TLS struct {\n\tPort int `envconfig:\"PORT\"`\n\tCertFile string `envconfig:\"CERT_PATH\"`\n\tKeyFile string `envconfig:\"KEY_PATH\"`\n\tRedirect bool `envconfig:\"REDIRECT\"`\n}\n\n\/\/ IsHTTPS checks if you have https enabled\nfunc (s *TLS) IsHTTPS() bool {\n\treturn s.CertFile != \"\" && s.KeyFile != \"\"\n}\n\n\/\/ Storage holds the configuration for a storage\ntype Storage struct {\n\tDSN string `envconfig:\"STORAGE_DSN\"`\n}\n\n\/\/ Database holds the configuration for a database\ntype Database struct {\n\tDSN string `envconfig:\"DATABASE_DSN\"`\n}\n\n\/\/ Stats holds the configuration for stats\ntype Stats struct {\n\tDSN string `envconfig:\"STATS_DSN\"`\n\tPrefix string `envconfig:\"STATS_PREFIX\"`\n\tIDs string `envconfig:\"STATS_IDS\"`\n\tAutoDiscoverThreshold uint `envconfig:\"STATS_AUTO_DISCOVER_THRESHOLD\"`\n\tAutoDiscoverWhiteList []string `envconfig:\"STATS_AUTO_DISCOVER_WHITE_LIST\"`\n\tErrorsSection string `envconfig:\"STATS_ERRORS_SECTION\"`\n}\n\n\/\/ Credentials represents the credentials that are going to be\n\/\/ used by admin JWT configuration\ntype Credentials struct {\n\t\/\/ Algorithm defines admin JWT signing algorithm.\n\t\/\/ Currently the following algorithms are supported: HS256, HS384, HS512.\n\tAlgorithm string `envconfig:\"ALGORITHM\"`\n\tSecret string `envconfig:\"SECRET\"`\n\tGithub Github\n\tBasic Basic\n}\n\n\/\/ Basic holds the basic users configurations\ntype Basic struct {\n\tUsers []BasicUsersConfig `envconfig:\"BASIC_ORGANIZATIONS\"`\n}\n\n\/\/ BasicUsersConfig represents an user configuration\ntype BasicUsersConfig struct {\n\tUsername string `json:\"username,omitempty\"`\n\tPassword string `json:\"password,omitempty\"`\n}\n\n\/\/ Github holds the github configurations\ntype Github struct {\n\tOrganizations []string `envconfig:\"GITHUB_ORGANIZATIONS\"`\n\tTeams []GitHubTeamConfig `envconfig:\"GITHUB_TEAMS\"`\n}\n\n\/\/ GitHubTeamConfig represents a team configuration\ntype GitHubTeamConfig struct {\n\tOrganizationName string `json:\"organization_name,omitempty\"`\n\tTeamName string `json:\"team_name,omitempty\"`\n}\n\n\/\/ IsConfigured checks if github is enabled\nfunc (auth *Github) IsConfigured() bool {\n\treturn len(auth.Organizations) > 0 ||\n\t\tlen(auth.Teams) > 0\n}\n\n\/\/ GoogleCloudTracing holds the Google Application Default Credentials\ntype GoogleCloudTracing struct {\n\tProjectID string `envconfig:\"TRACING_GC_PROJECT_ID\"`\n\tEmail string `envconfig:\"TRACING_GC_EMAIL\"`\n\tPrivateKey string `envconfig:\"TRACING_GC_PRIVATE_KEY\"`\n\tPrivateKeyID string `envconfig:\"TRACING_GC_PRIVATE_ID\"`\n}\n\n\/\/ AppdashTracing holds the Appdash tracing configuration\ntype AppdashTracing struct {\n\tDSN string `envconfig:\"TRACING_APPDASH_DSN\"`\n\tURL string `envconfig:\"TRACING_APPDASH_URL\"`\n}\n\n\/\/ Tracing represents the distributed tracing configuration\ntype Tracing struct {\n\tGoogleCloudTracing GoogleCloudTracing `mapstructure:\"googleCloud\"`\n\tAppdashTracing AppdashTracing `mapstructure:\"appdash\"`\n}\n\n\/\/ IsGoogleCloudEnabled checks if google cloud is enabled\nfunc (t Tracing) IsGoogleCloudEnabled() bool {\n\treturn len(t.GoogleCloudTracing.Email) > 0 && len(t.GoogleCloudTracing.PrivateKey) > 0 && len(t.GoogleCloudTracing.PrivateKeyID) > 0 && len(t.GoogleCloudTracing.ProjectID) > 0\n}\n\n\/\/ IsAppdashEnabled checks if appdash is enabled\nfunc (t Tracing) IsAppdashEnabled() bool {\n\treturn len(t.AppdashTracing.DSN) > 0\n}\n\nfunc init() {\n\tviper.SetDefault(\"port\", \"8080\")\n\tviper.SetDefault(\"tls.port\", \"8433\")\n\tviper.SetDefault(\"tls.redirect\", true)\n\tviper.SetDefault(\"backendFlushInterval\", \"20ms\")\n\tviper.SetDefault(\"database.dsn\", \"file:\/\/\/etc\/janus\")\n\tviper.SetDefault(\"storage.dsn\", \"memory:\/\/localhost\")\n\tviper.SetDefault(\"web.port\", \"8081\")\n\tviper.SetDefault(\"web.tls.port\", \"8444\")\n\tviper.SetDefault(\"web.tls.redisrect\", true)\n\tviper.SetDefault(\"web.credentials.algorithm\", \"HS256\")\n\tviper.SetDefault(\"web.credentials.username\", \"admin\")\n\tviper.SetDefault(\"web.credentials.password\", \"admin\")\n\tviper.SetDefault(\"stats.dsn\", \"log:\/\/\")\n\tviper.SetDefault(\"stats.errorsSection\", \"error-log\")\n\n\tlogging.InitDefaults(viper.GetViper(), \"log\")\n}\n\n\/\/Load configuration variables\nfunc Load(configFile string) (*Specification, error) {\n\tif configFile != \"\" {\n\t\tviper.SetConfigFile(configFile)\n\t} else {\n\t\tviper.SetConfigName(\"janus\")\n\t\tviper.AddConfigPath(\"\/etc\/janus\")\n\t\tviper.AddConfigPath(\".\")\n\t}\n\n\tif err := viper.ReadInConfig(); err != nil {\n\t\tlog.WithError(err).Warn(\"No config file found\")\n\t\treturn LoadEnv()\n\t}\n\n\tvar config Specification\n\tif err := viper.Unmarshal(&config); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &config, nil\n}\n\n\/\/LoadEnv loads configuration from environment variables\nfunc LoadEnv() (*Specification, error) {\n\tvar config Specification\n\n\tif err := viper.Unmarshal(&config); err != nil {\n\t\treturn nil, err\n\t}\n\n\terr := envconfig.Process(\"\", &config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &config, nil\n}\n<commit_msg>Added defaults<commit_after>package config\n\nimport (\n\t\"time\"\n\n\t\"github.com\/hellofresh\/logging-go\"\n\t\"github.com\/kelseyhightower\/envconfig\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/ Specification for basic configurations\ntype Specification struct {\n\tPort int `envconfig:\"PORT\"`\n\tDebug bool `envconfig:\"DEBUG\"`\n\tGraceTimeOut int64 `envconfig:\"GRACE_TIMEOUT\"`\n\tMaxIdleConnsPerHost int `envconfig:\"MAX_IDLE_CONNS_PER_HOST\"`\n\tBackendFlushInterval time.Duration `envconfig:\"BACKEND_FLUSH_INTERVAL\"`\n\tCloseIdleConnsPeriod time.Duration `envconfig:\"CLOSE_IDLE_CONNS_PERIOD\"`\n\tLog logging.LogConfig\n\tWeb Web\n\tDatabase Database\n\tStorage Storage\n\tStats Stats\n\tTracing Tracing\n\tTLS TLS\n}\n\n\/\/ Web represents the API configurations\ntype Web struct {\n\tPort int `envconfig:\"API_PORT\"`\n\tReadOnly bool `envconfig:\"API_READONLY\"`\n\tCredentials Credentials\n\tTLS TLS\n}\n\n\/\/ TLS represents the TLS configurations\ntype TLS struct {\n\tPort int `envconfig:\"PORT\"`\n\tCertFile string `envconfig:\"CERT_PATH\"`\n\tKeyFile string `envconfig:\"KEY_PATH\"`\n\tRedirect bool `envconfig:\"REDIRECT\"`\n}\n\n\/\/ IsHTTPS checks if you have https enabled\nfunc (s *TLS) IsHTTPS() bool {\n\treturn s.CertFile != \"\" && s.KeyFile != \"\"\n}\n\n\/\/ Storage holds the configuration for a storage\ntype Storage struct {\n\tDSN string `envconfig:\"STORAGE_DSN\"`\n}\n\n\/\/ Database holds the configuration for a database\ntype Database struct {\n\tDSN string `envconfig:\"DATABASE_DSN\"`\n}\n\n\/\/ Stats holds the configuration for stats\ntype Stats struct {\n\tDSN string `envconfig:\"STATS_DSN\"`\n\tPrefix string `envconfig:\"STATS_PREFIX\"`\n\tIDs string `envconfig:\"STATS_IDS\"`\n\tAutoDiscoverThreshold uint `envconfig:\"STATS_AUTO_DISCOVER_THRESHOLD\"`\n\tAutoDiscoverWhiteList []string `envconfig:\"STATS_AUTO_DISCOVER_WHITE_LIST\"`\n\tErrorsSection string `envconfig:\"STATS_ERRORS_SECTION\"`\n}\n\n\/\/ Credentials represents the credentials that are going to be\n\/\/ used by admin JWT configuration\ntype Credentials struct {\n\t\/\/ Algorithm defines admin JWT signing algorithm.\n\t\/\/ Currently the following algorithms are supported: HS256, HS384, HS512.\n\tAlgorithm string `envconfig:\"ALGORITHM\"`\n\tSecret string `envconfig:\"SECRET\"`\n\tGithub Github\n\tBasic Basic\n}\n\n\/\/ Basic holds the basic users configurations\ntype Basic struct {\n\tUsers []BasicUsersConfig `envconfig:\"BASIC_ORGANIZATIONS\"`\n}\n\n\/\/ BasicUsersConfig represents an user configuration\ntype BasicUsersConfig struct {\n\tUsername string `json:\"username,omitempty\"`\n\tPassword string `json:\"password,omitempty\"`\n}\n\n\/\/ Github holds the github configurations\ntype Github struct {\n\tOrganizations []string `envconfig:\"GITHUB_ORGANIZATIONS\"`\n\tTeams []GitHubTeamConfig `envconfig:\"GITHUB_TEAMS\"`\n}\n\n\/\/ GitHubTeamConfig represents a team configuration\ntype GitHubTeamConfig struct {\n\tOrganizationName string `json:\"organization_name,omitempty\"`\n\tTeamName string `json:\"team_name,omitempty\"`\n}\n\n\/\/ IsConfigured checks if github is enabled\nfunc (auth *Github) IsConfigured() bool {\n\treturn len(auth.Organizations) > 0 ||\n\t\tlen(auth.Teams) > 0\n}\n\n\/\/ GoogleCloudTracing holds the Google Application Default Credentials\ntype GoogleCloudTracing struct {\n\tProjectID string `envconfig:\"TRACING_GC_PROJECT_ID\"`\n\tEmail string `envconfig:\"TRACING_GC_EMAIL\"`\n\tPrivateKey string `envconfig:\"TRACING_GC_PRIVATE_KEY\"`\n\tPrivateKeyID string `envconfig:\"TRACING_GC_PRIVATE_ID\"`\n}\n\n\/\/ AppdashTracing holds the Appdash tracing configuration\ntype AppdashTracing struct {\n\tDSN string `envconfig:\"TRACING_APPDASH_DSN\"`\n\tURL string `envconfig:\"TRACING_APPDASH_URL\"`\n}\n\n\/\/ Tracing represents the distributed tracing configuration\ntype Tracing struct {\n\tGoogleCloudTracing GoogleCloudTracing `mapstructure:\"googleCloud\"`\n\tAppdashTracing AppdashTracing `mapstructure:\"appdash\"`\n}\n\n\/\/ IsGoogleCloudEnabled checks if google cloud is enabled\nfunc (t Tracing) IsGoogleCloudEnabled() bool {\n\treturn len(t.GoogleCloudTracing.Email) > 0 && len(t.GoogleCloudTracing.PrivateKey) > 0 && len(t.GoogleCloudTracing.PrivateKeyID) > 0 && len(t.GoogleCloudTracing.ProjectID) > 0\n}\n\n\/\/ IsAppdashEnabled checks if appdash is enabled\nfunc (t Tracing) IsAppdashEnabled() bool {\n\treturn len(t.AppdashTracing.DSN) > 0\n}\n\nfunc init() {\n\tviper.SetDefault(\"port\", \"8080\")\n\tviper.SetDefault(\"tls.port\", \"8433\")\n\tviper.SetDefault(\"tls.redirect\", true)\n\tviper.SetDefault(\"backendFlushInterval\", \"20ms\")\n\tviper.SetDefault(\"database.dsn\", \"file:\/\/\/etc\/janus\")\n\tviper.SetDefault(\"storage.dsn\", \"memory:\/\/localhost\")\n\tviper.SetDefault(\"web.port\", \"8081\")\n\tviper.SetDefault(\"web.tls.port\", \"8444\")\n\tviper.SetDefault(\"web.tls.redisrect\", true)\n\tviper.SetDefault(\"web.credentials.algorithm\", \"HS256\")\n\tviper.SetDefault(\"web.credentials.basic.users\", []map[string]string{\n\t\t{\"username\": \"admin\", \"password\": \"admin\"},\n\t})\n\tviper.SetDefault(\"stats.dsn\", \"log:\/\/\")\n\tviper.SetDefault(\"stats.errorsSection\", \"error-log\")\n\n\tlogging.InitDefaults(viper.GetViper(), \"log\")\n}\n\n\/\/Load configuration variables\nfunc Load(configFile string) (*Specification, error) {\n\tif configFile != \"\" {\n\t\tviper.SetConfigFile(configFile)\n\t} else {\n\t\tviper.SetConfigName(\"janus\")\n\t\tviper.AddConfigPath(\"\/etc\/janus\")\n\t\tviper.AddConfigPath(\".\")\n\t}\n\n\tif err := viper.ReadInConfig(); err != nil {\n\t\tlog.WithError(err).Warn(\"No config file found\")\n\t\treturn LoadEnv()\n\t}\n\n\tvar config Specification\n\tif err := viper.Unmarshal(&config); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &config, nil\n}\n\n\/\/LoadEnv loads configuration from environment variables\nfunc LoadEnv() (*Specification, error) {\n\tvar config Specification\n\n\tif err := viper.Unmarshal(&config); err != nil {\n\t\treturn nil, err\n\t}\n\n\terr := envconfig.Process(\"\", &config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &config, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright © 2014–5 Brad Ackerman.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\n*\/\n\npackage dbaccess\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/backerman\/evego\/pkg\/types\"\n)\n\nfunc (db *sqlDb) NumJumpsID(fromSystemID, toSystemID int) (int, error) {\n\t\/\/ This function will be implemented differently depending on the\n\t\/\/ backend database.\n\tif fromSystemID == toSystemID {\n\t\t\/\/ These are the same system.\n\t\treturn 0, nil\n\t}\n\tswitch db.dbType {\n\tcase SQLite:\n\t\tvar numRows int\n\t\terr := db.countJumpsStmt.Get(&numRows, fromSystemID, toSystemID)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\t\/\/ numRows has a header and then one row for each jump in the route.\n\t\t\/\/ If there is no route, we get a header and nothing else.\n\t\t\/\/\n\t\t\/\/ Therefore, if numRows-1 is 0, there is no route; otherwise, the\n\t\t\/\/ route contains numRows-1 jumps.\n\t\tif numRows == 1 {\n\t\t\treturn -1, nil\n\t\t}\n\t\treturn numRows - 1, nil\n\tdefault:\n\t\treturn -1, fmt.Errorf(\"Routing is not supported for this database type.\")\n\t}\n\n}\n\nfunc (db *sqlDb) NumJumps(fromSystem, toSystem *types.SolarSystem) (int, error) {\n\treturn db.NumJumpsID(fromSystem.ID, toSystem.ID)\n}\n<commit_msg>Remove old routing code from pkg\/dbaccess.<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019-2020 Authors of Hubble\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage filters\n\nimport (\n\t\"context\"\n\tflowpb \"github.com\/cilium\/cilium\/api\/v1\/flow\"\n\tv1 \"github.com\/cilium\/cilium\/pkg\/hubble\/api\/v1\"\n)\n\nfunc filterByReplyField(replyParams []bool) FilterFunc {\n\treturn func(ev *v1.Event) bool {\n\t\tif len(replyParams) == 0 {\n\t\t\treturn true\n\t\t}\n\t\tswitch f := ev.Event.(type) {\n\t\tcase v1.Flow:\n\t\t\t\/\/ FIXME: For dropped flows, we handle `is_reply=unknown` as\n\t\t\t\/\/ `is_reply=false`. This is for compatibility with older clients\n\t\t\t\/\/ (such as Hubble UI) which assume this filter applies to the\n\t\t\t\/\/ deprecated `reply` field, where dropped flows always have\n\t\t\t\/\/ `reply=false`.\n\t\t\tif f.GetIsReply() == nil && f.GetVerdict() != flowpb.Verdict_DROPPED {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\treply := f.GetIsReply().GetValue()\n\t\t\tfor _, replyParam := range replyParams {\n\t\t\t\tif reply == replyParam {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n}\n\n\/\/ ReplyFilter implements filtering for reply flows\ntype ReplyFilter struct{}\n\n\/\/ OnBuildFilter builds a reply filter\nfunc (r *ReplyFilter) OnBuildFilter(ctx context.Context, ff *flowpb.FlowFilter) ([]FilterFunc, error) {\n\tvar fs []FilterFunc\n\n\tif ff.GetReply() != nil {\n\t\tfs = append(fs, filterByReplyField(ff.GetReply()))\n\t}\n\n\treturn fs, nil\n}\n<commit_msg>filters: Remove dependency on v1.Flow interface<commit_after>\/\/ Copyright 2019-2020 Authors of Hubble\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage filters\n\nimport (\n\t\"context\"\n\n\tflowpb \"github.com\/cilium\/cilium\/api\/v1\/flow\"\n\tv1 \"github.com\/cilium\/cilium\/pkg\/hubble\/api\/v1\"\n)\n\nfunc filterByReplyField(replyParams []bool) FilterFunc {\n\treturn func(ev *v1.Event) bool {\n\t\tif len(replyParams) == 0 {\n\t\t\treturn true\n\t\t}\n\t\tswitch f := ev.Event.(type) {\n\t\tcase *flowpb.Flow:\n\t\t\t\/\/ FIXME: For dropped flows, we handle `is_reply=unknown` as\n\t\t\t\/\/ `is_reply=false`. This is for compatibility with older clients\n\t\t\t\/\/ (such as Hubble UI) which assume this filter applies to the\n\t\t\t\/\/ deprecated `reply` field, where dropped flows always have\n\t\t\t\/\/ `reply=false`.\n\t\t\tif f.GetIsReply() == nil && f.GetVerdict() != flowpb.Verdict_DROPPED {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\treply := f.GetIsReply().GetValue()\n\t\t\tfor _, replyParam := range replyParams {\n\t\t\t\tif reply == replyParam {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n}\n\n\/\/ ReplyFilter implements filtering for reply flows\ntype ReplyFilter struct{}\n\n\/\/ OnBuildFilter builds a reply filter\nfunc (r *ReplyFilter) OnBuildFilter(ctx context.Context, ff *flowpb.FlowFilter) ([]FilterFunc, error) {\n\tvar fs []FilterFunc\n\n\tif ff.GetReply() != nil {\n\t\tfs = append(fs, filterByReplyField(ff.GetReply()))\n\t}\n\n\treturn fs, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016-2017 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cidrmap\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"unsafe\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/bpf\"\n\t\"github.com\/cilium\/cilium\/pkg\/logging\"\n\t\"github.com\/cilium\/cilium\/pkg\/logging\/logfields\"\n\n\t\"github.com\/sirupsen\/logrus\"\n)\n\nvar log = logging.DefaultLogger\n\nconst (\n\tMapName = \"cilium_cidr_\"\n)\n\n\/\/ CIDRMap refers to an LPM trie map at 'path'.\ntype CIDRMap struct {\n\tpath string\n\tFd int\n\tAddrSize int \/\/ max prefix length in bytes, 4 for IPv4, 16 for IPv6\n\tPrefixlen uint32\n}\n\nconst (\n\tMAX_KEYS = 1024\n\tLPM_MAP_VALUE_SIZE = 1\n)\n\ntype cidrKey struct {\n\tPrefixlen uint32\n\tNet [16]byte\n}\n\nfunc (cm *CIDRMap) cidrKeyInit(cidr net.IPNet) (key cidrKey) {\n\tones, _ := cidr.Mask.Size()\n\tkey.Prefixlen = uint32(ones)\n\t\/\/ IPv4 address can be represented by 16 byte slice in 'cidr.IP',\n\t\/\/ in which case the address is at the end of the slice.\n\tcopy(key.Net[:], cidr.IP[len(cidr.IP)-cm.AddrSize:len(cidr.IP)])\n\treturn\n}\n\nfunc (cm *CIDRMap) keyCidrInit(key cidrKey) (cidr net.IPNet) {\n\tcidr.Mask = net.CIDRMask(int(key.Prefixlen), cm.AddrSize*8)\n\tcidr.IP = make(net.IP, cm.AddrSize)\n\tcopy(cidr.IP[len(cidr.IP)-cm.AddrSize:len(cidr.IP)], key.Net[:])\n\treturn\n}\n\n\/\/ InsertCIDR inserts an entry to 'cm' with key 'cidr'. Value is currently not\n\/\/ used.\nfunc (cm *CIDRMap) InsertCIDR(cidr net.IPNet) error {\n\tkey := cm.cidrKeyInit(cidr)\n\tentry := [LPM_MAP_VALUE_SIZE]byte{}\n\tif cm.Prefixlen != 0 && cm.Prefixlen != key.Prefixlen {\n\t\treturn fmt.Errorf(\"Unable to update element with different prefixlen than map!\")\n\t}\n\treturn bpf.UpdateElement(cm.Fd, unsafe.Pointer(&key), unsafe.Pointer(&entry), 0)\n}\n\n\/\/ DeleteCIDR deletes an entry from 'cm' with key 'cidr'.\nfunc (cm *CIDRMap) DeleteCIDR(cidr net.IPNet) error {\n\tkey := cm.cidrKeyInit(cidr)\n\tif cm.Prefixlen != 0 && cm.Prefixlen != key.Prefixlen {\n\t\treturn fmt.Errorf(\"Unable to delete element with different prefixlen than map!\")\n\t}\n\treturn bpf.DeleteElement(cm.Fd, unsafe.Pointer(&key))\n}\n\n\/\/ CIDRExists returns true if 'cidr' exists in map 'cm'\nfunc (cm *CIDRMap) CIDRExists(cidr net.IPNet) bool {\n\tkey := cm.cidrKeyInit(cidr)\n\tvar entry [LPM_MAP_VALUE_SIZE]byte\n\treturn bpf.LookupElement(cm.Fd, unsafe.Pointer(&key), unsafe.Pointer(&entry)) == nil\n}\n\n\/\/ CIDRNext returns next CIDR entry in map 'cm'\nfunc (cm *CIDRMap) CIDRNext(cidr *net.IPNet) *net.IPNet {\n\tvar key, keyNext cidrKey\n\tif cidr != nil {\n\t\tkey = cm.cidrKeyInit(*cidr)\n\t}\n\terr := bpf.GetNextKey(cm.Fd, unsafe.Pointer(&key), unsafe.Pointer(&keyNext))\n\tif err != nil {\n\t\treturn nil\n\t}\n\tout := cm.keyCidrInit(keyNext)\n\treturn &out\n}\n\n\/\/ CIDRDump walks map 'cm' and dumps all CIDR entries\nfunc (cm *CIDRMap) CIDRDump(to []string) []string {\n\tvar key, keyNext *net.IPNet\n\tfor {\n\t\tkeyNext = cm.CIDRNext(key)\n\t\tif keyNext == nil {\n\t\t\treturn to\n\t\t}\n\t\tkey = keyNext\n\t\tto = append(to, key.String())\n\t}\n}\n\n\/\/ String returns the path of the map.\nfunc (cm *CIDRMap) String() string {\n\tif cm == nil {\n\t\treturn \"\"\n\t}\n\treturn cm.path\n}\n\n\/\/ Close closes the FD of the given CIDRMap\nfunc (cm *CIDRMap) Close() error {\n\tif cm == nil {\n\t\treturn nil\n\t}\n\treturn bpf.ObjClose(cm.Fd)\n}\n\n\/\/ OpenMap opens a new CIDRMap. 'bool' returns 'true' if the map was\n\/\/ created, and 'false' if the map already existed. prefixdyn denotes\n\/\/ whether element's prefixlen can vary and we thus need to use a LPM\n\/\/ trie instead of hash table.\nfunc OpenMap(path string, prefixlen int, prefixdyn bool) (*CIDRMap, bool, error) {\n\treturn OpenMapElems(path, prefixlen, prefixdyn, MAX_KEYS)\n}\n\n\/\/ OpenMapElems is the same as OpenMap only with defined maxelem as argument.\nfunc OpenMapElems(path string, prefixlen int, prefixdyn bool, maxelem uint32) (*CIDRMap, bool, error) {\n\tvar typeMap = bpf.BPF_MAP_TYPE_LPM_TRIE\n\tvar prefix = 0\n\n\tif prefixdyn == false {\n\t\ttypeMap = bpf.BPF_MAP_TYPE_HASH\n\t\tprefix = prefixlen\n\t}\n\tif prefixlen <= 0 {\n\t\treturn nil, false, fmt.Errorf(\"prefixlen must be > 0\")\n\t}\n\tbytes := (prefixlen-1)\/8 + 1\n\tfd, isNewMap, err := bpf.OpenOrCreateMap(\n\t\tpath,\n\t\ttypeMap,\n\t\tuint32(unsafe.Sizeof(uint32(0))+uintptr(bytes)),\n\t\tuint32(LPM_MAP_VALUE_SIZE),\n\t\tmaxelem,\n\t\tbpf.BPF_F_NO_PREALLOC,\n\t)\n\n\tif err != nil {\n\t\tlog.Debug(\"Kernel does not support CIDR maps, using hash table instead.\")\n\t\ttypeMap = bpf.BPF_MAP_TYPE_HASH\n\t\tfd, isNewMap, err = bpf.OpenOrCreateMap(\n\t\t\tpath,\n\t\t\ttypeMap,\n\t\t\tuint32(unsafe.Sizeof(uint32(0))+uintptr(bytes)),\n\t\t\tuint32(LPM_MAP_VALUE_SIZE),\n\t\t\tmaxelem,\n\t\t\tbpf.BPF_F_NO_PREALLOC,\n\t\t)\n\t\tif err != nil {\n\t\t\tscopedLog := log.WithError(err).WithField(logfields.Path, path)\n\t\t\tscopedLog.Warning(\"Failed to create CIDR map\")\n\t\t\treturn nil, false, err\n\t\t}\n\t}\n\n\tm := &CIDRMap{path: path, Fd: fd, AddrSize: bytes, Prefixlen: uint32(prefix)}\n\n\tlog.WithFields(logrus.Fields{\n\t\tlogfields.Path: path,\n\t\t\"fd\": fd,\n\t\t\"LPM\": typeMap == bpf.BPF_MAP_TYPE_LPM_TRIE,\n\t}).Debug(\"Created CIDR map\")\n\n\treturn m, isNewMap, nil\n}\n<commit_msg>cidrmap: Log when inserting\/removing entries<commit_after>\/\/ Copyright 2016-2017 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cidrmap\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"unsafe\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/bpf\"\n\t\"github.com\/cilium\/cilium\/pkg\/logging\"\n\t\"github.com\/cilium\/cilium\/pkg\/logging\/logfields\"\n\n\t\"github.com\/sirupsen\/logrus\"\n)\n\nvar log = logging.DefaultLogger\n\nconst (\n\tMapName = \"cilium_cidr_\"\n)\n\n\/\/ CIDRMap refers to an LPM trie map at 'path'.\ntype CIDRMap struct {\n\tpath string\n\tFd int\n\tAddrSize int \/\/ max prefix length in bytes, 4 for IPv4, 16 for IPv6\n\tPrefixlen uint32\n}\n\nconst (\n\tMAX_KEYS = 1024\n\tLPM_MAP_VALUE_SIZE = 1\n)\n\ntype cidrKey struct {\n\tPrefixlen uint32\n\tNet [16]byte\n}\n\nfunc (cm *CIDRMap) cidrKeyInit(cidr net.IPNet) (key cidrKey) {\n\tones, _ := cidr.Mask.Size()\n\tkey.Prefixlen = uint32(ones)\n\t\/\/ IPv4 address can be represented by 16 byte slice in 'cidr.IP',\n\t\/\/ in which case the address is at the end of the slice.\n\tcopy(key.Net[:], cidr.IP[len(cidr.IP)-cm.AddrSize:len(cidr.IP)])\n\treturn\n}\n\nfunc (cm *CIDRMap) keyCidrInit(key cidrKey) (cidr net.IPNet) {\n\tcidr.Mask = net.CIDRMask(int(key.Prefixlen), cm.AddrSize*8)\n\tcidr.IP = make(net.IP, cm.AddrSize)\n\tcopy(cidr.IP[len(cidr.IP)-cm.AddrSize:len(cidr.IP)], key.Net[:])\n\treturn\n}\n\n\/\/ InsertCIDR inserts an entry to 'cm' with key 'cidr'. Value is currently not\n\/\/ used.\nfunc (cm *CIDRMap) InsertCIDR(cidr net.IPNet) error {\n\tkey := cm.cidrKeyInit(cidr)\n\tentry := [LPM_MAP_VALUE_SIZE]byte{}\n\tif cm.Prefixlen != 0 && cm.Prefixlen != key.Prefixlen {\n\t\treturn fmt.Errorf(\"Unable to update element with different prefixlen than map!\")\n\t}\n\tlog.WithField(logfields.Path, cm.path).Debugf(\"Inserting CIDR entry %s\", cidr.String())\n\treturn bpf.UpdateElement(cm.Fd, unsafe.Pointer(&key), unsafe.Pointer(&entry), 0)\n}\n\n\/\/ DeleteCIDR deletes an entry from 'cm' with key 'cidr'.\nfunc (cm *CIDRMap) DeleteCIDR(cidr net.IPNet) error {\n\tkey := cm.cidrKeyInit(cidr)\n\tif cm.Prefixlen != 0 && cm.Prefixlen != key.Prefixlen {\n\t\treturn fmt.Errorf(\"Unable to delete element with different prefixlen than map!\")\n\t}\n\tlog.WithField(logfields.Path, cm.path).Debugf(\"Removing CIDR entry %s\", cidr.String())\n\treturn bpf.DeleteElement(cm.Fd, unsafe.Pointer(&key))\n}\n\n\/\/ CIDRExists returns true if 'cidr' exists in map 'cm'\nfunc (cm *CIDRMap) CIDRExists(cidr net.IPNet) bool {\n\tkey := cm.cidrKeyInit(cidr)\n\tvar entry [LPM_MAP_VALUE_SIZE]byte\n\treturn bpf.LookupElement(cm.Fd, unsafe.Pointer(&key), unsafe.Pointer(&entry)) == nil\n}\n\n\/\/ CIDRNext returns next CIDR entry in map 'cm'\nfunc (cm *CIDRMap) CIDRNext(cidr *net.IPNet) *net.IPNet {\n\tvar key, keyNext cidrKey\n\tif cidr != nil {\n\t\tkey = cm.cidrKeyInit(*cidr)\n\t}\n\terr := bpf.GetNextKey(cm.Fd, unsafe.Pointer(&key), unsafe.Pointer(&keyNext))\n\tif err != nil {\n\t\treturn nil\n\t}\n\tout := cm.keyCidrInit(keyNext)\n\treturn &out\n}\n\n\/\/ CIDRDump walks map 'cm' and dumps all CIDR entries\nfunc (cm *CIDRMap) CIDRDump(to []string) []string {\n\tvar key, keyNext *net.IPNet\n\tfor {\n\t\tkeyNext = cm.CIDRNext(key)\n\t\tif keyNext == nil {\n\t\t\treturn to\n\t\t}\n\t\tkey = keyNext\n\t\tto = append(to, key.String())\n\t}\n}\n\n\/\/ String returns the path of the map.\nfunc (cm *CIDRMap) String() string {\n\tif cm == nil {\n\t\treturn \"\"\n\t}\n\treturn cm.path\n}\n\n\/\/ Close closes the FD of the given CIDRMap\nfunc (cm *CIDRMap) Close() error {\n\tif cm == nil {\n\t\treturn nil\n\t}\n\treturn bpf.ObjClose(cm.Fd)\n}\n\n\/\/ OpenMap opens a new CIDRMap. 'bool' returns 'true' if the map was\n\/\/ created, and 'false' if the map already existed. prefixdyn denotes\n\/\/ whether element's prefixlen can vary and we thus need to use a LPM\n\/\/ trie instead of hash table.\nfunc OpenMap(path string, prefixlen int, prefixdyn bool) (*CIDRMap, bool, error) {\n\treturn OpenMapElems(path, prefixlen, prefixdyn, MAX_KEYS)\n}\n\n\/\/ OpenMapElems is the same as OpenMap only with defined maxelem as argument.\nfunc OpenMapElems(path string, prefixlen int, prefixdyn bool, maxelem uint32) (*CIDRMap, bool, error) {\n\tvar typeMap = bpf.BPF_MAP_TYPE_LPM_TRIE\n\tvar prefix = 0\n\n\tif prefixdyn == false {\n\t\ttypeMap = bpf.BPF_MAP_TYPE_HASH\n\t\tprefix = prefixlen\n\t}\n\tif prefixlen <= 0 {\n\t\treturn nil, false, fmt.Errorf(\"prefixlen must be > 0\")\n\t}\n\tbytes := (prefixlen-1)\/8 + 1\n\tfd, isNewMap, err := bpf.OpenOrCreateMap(\n\t\tpath,\n\t\ttypeMap,\n\t\tuint32(unsafe.Sizeof(uint32(0))+uintptr(bytes)),\n\t\tuint32(LPM_MAP_VALUE_SIZE),\n\t\tmaxelem,\n\t\tbpf.BPF_F_NO_PREALLOC,\n\t)\n\n\tif err != nil {\n\t\tlog.Debug(\"Kernel does not support CIDR maps, using hash table instead.\")\n\t\ttypeMap = bpf.BPF_MAP_TYPE_HASH\n\t\tfd, isNewMap, err = bpf.OpenOrCreateMap(\n\t\t\tpath,\n\t\t\ttypeMap,\n\t\t\tuint32(unsafe.Sizeof(uint32(0))+uintptr(bytes)),\n\t\t\tuint32(LPM_MAP_VALUE_SIZE),\n\t\t\tmaxelem,\n\t\t\tbpf.BPF_F_NO_PREALLOC,\n\t\t)\n\t\tif err != nil {\n\t\t\tscopedLog := log.WithError(err).WithField(logfields.Path, path)\n\t\t\tscopedLog.Warning(\"Failed to create CIDR map\")\n\t\t\treturn nil, false, err\n\t\t}\n\t}\n\n\tm := &CIDRMap{path: path, Fd: fd, AddrSize: bytes, Prefixlen: uint32(prefix)}\n\n\tlog.WithFields(logrus.Fields{\n\t\tlogfields.Path: path,\n\t\t\"fd\": fd,\n\t\t\"LPM\": typeMap == bpf.BPF_MAP_TYPE_LPM_TRIE,\n\t}).Debug(\"Created CIDR map\")\n\n\treturn m, isNewMap, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package k8s\n\nimport (\n\t\"fmt\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/event\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/util\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/util\/retry\"\n\tmeta \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\tapi \"k8s.io\/client-go\/pkg\/api\/v1\"\n\t\"k8s.io\/kubernetes\/pkg\/kubectl\/resource\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ EndpointsForManifests returns endpoints for specified manifest\nfunc (p *Plugin) EndpointsForManifests(namespace, deployName, targetManifest string, eventLog *event.Log) (map[string]string, error) {\n\tkubeClient, err := p.NewClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thelmKube := p.NewHelmKube(deployName, eventLog)\n\n\tinfos, err := helmKube.BuildUnstructured(namespace, strings.NewReader(targetManifest))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tendpoints := make(map[string]string)\n\n\tfor _, info := range infos {\n\t\tif info.Mapping.GroupVersionKind.Kind == \"Service\" { \/\/ nolint: goconst\n\n\t\t\tendpointsErr := p.addEndpointsFromService(kubeClient, info, endpoints)\n\t\t\tif endpointsErr != nil {\n\t\t\t\treturn nil, endpointsErr\n\t\t\t}\n\t\t}\n\t}\n\n\treturn endpoints, nil\n}\n\n\/\/ addEndpointsFromService searches for the available endpoints in specified service and writes them into provided map\nfunc (p *Plugin) addEndpointsFromService(kubeClient kubernetes.Interface, info *resource.Info, endpoints map[string]string) error {\n\tservice, getErr := kubeClient.CoreV1().Services(info.Namespace).Get(info.Name, meta.GetOptions{})\n\tif getErr != nil {\n\t\treturn getErr\n\t}\n\n\tif service.Spec.Type == api.ServiceTypeNodePort {\n\t\tfor _, port := range service.Spec.Ports {\n\t\t\tsURL := fmt.Sprintf(\"%s:%d\", p.ExternalAddress, port.NodePort)\n\t\t\taddEndpointsForServicePort(port, sURL, endpoints)\n\t\t}\n\t} else if service.Spec.Type == api.ServiceTypeLoadBalancer {\n\t\tingress := service.Status.LoadBalancer.Ingress\n\n\t\t\/\/ wait for LB external IP to be provisioned\n\t\tok := retry.Do(15*time.Minute, 10*time.Second, func() bool {\n\t\t\tservice, getErr = kubeClient.CoreV1().Services(info.Namespace).Get(info.Name, meta.GetOptions{})\n\t\t\tif getErr != nil {\n\t\t\t\tpanic(fmt.Sprintf(\"Error while getting Service %s in namespace %s\", info.Name, info.Namespace))\n\t\t\t}\n\n\t\t\tingress = service.Status.LoadBalancer.Ingress\n\t\t\tif ingress == nil {\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\texternalAddress := \"\"\n\t\t\tfor _, entry := range ingress {\n\t\t\t\tif entry.Hostname != \"\" {\n\t\t\t\t\texternalAddress = entry.Hostname\n\t\t\t\t} else if entry.IP != \"\" {\n\t\t\t\t\texternalAddress = entry.IP\n\t\t\t\t}\n\t\t\t\tif externalAddress == \"\" {\n\t\t\t\t\tpanic(fmt.Sprintf(\"Got empty LoadBalancerIngress for Service %s in namespace %s\", info.Name, info.Namespace))\n\t\t\t\t} else {\n\t\t\t\t\t\/\/ handle only first ingress entry for LB\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tfor _, port := range service.Spec.Ports {\n\t\t\t\tsURL := fmt.Sprintf(\"%s:%d\", externalAddress, port.Port)\n\t\t\t\taddEndpointsForServicePort(port, sURL, endpoints)\n\t\t\t}\n\n\t\t\treturn true\n\t\t})\n\n\t\tif ingress == nil || !ok {\n\t\t\treturn fmt.Errorf(\"unable to get endpoints for Service type LoadBalancer (%s in %s)\", info.Name, info.Name)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc addEndpointsForServicePort(port api.ServicePort, sURL string, endpoints map[string]string) {\n\tif util.StringContainsAny(port.Name, \"https\") {\n\t\tsURL = \"https:\/\/\" + sURL\n\t} else if util.StringContainsAny(port.Name, \"ui\", \"rest\", \"http\", \"grafana\", \"service\") {\n\t\tsURL = \"http:\/\/\" + sURL\n\t}\n\tname := port.Name\n\tif len(name) == 0 {\n\t\tname = port.TargetPort.String()\n\t}\n\tendpoints[name] = sURL\n}\n<commit_msg>Stop retying failed endpoints fetch as it'll be retried during next enforce<commit_after>package k8s\n\nimport (\n\t\"fmt\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/event\"\n\t\"github.com\/Aptomi\/aptomi\/pkg\/util\"\n\tmeta \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\tapi \"k8s.io\/client-go\/pkg\/api\/v1\"\n\t\"k8s.io\/kubernetes\/pkg\/kubectl\/resource\"\n\t\"strings\"\n)\n\n\/\/ EndpointsForManifests returns endpoints for specified manifest\nfunc (p *Plugin) EndpointsForManifests(namespace, deployName, targetManifest string, eventLog *event.Log) (map[string]string, error) {\n\tkubeClient, err := p.NewClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thelmKube := p.NewHelmKube(deployName, eventLog)\n\n\tinfos, err := helmKube.BuildUnstructured(namespace, strings.NewReader(targetManifest))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tendpoints := make(map[string]string)\n\n\tfor _, info := range infos {\n\t\tif info.Mapping.GroupVersionKind.Kind == \"Service\" { \/\/ nolint: goconst\n\n\t\t\tendpointsErr := p.addEndpointsFromService(kubeClient, info, endpoints)\n\t\t\tif endpointsErr != nil {\n\t\t\t\treturn nil, endpointsErr\n\t\t\t}\n\t\t}\n\t}\n\n\treturn endpoints, nil\n}\n\n\/\/ addEndpointsFromService searches for the available endpoints in specified service and writes them into provided map\nfunc (p *Plugin) addEndpointsFromService(kubeClient kubernetes.Interface, info *resource.Info, endpoints map[string]string) error {\n\tservice, getErr := kubeClient.CoreV1().Services(info.Namespace).Get(info.Name, meta.GetOptions{})\n\tif getErr != nil {\n\t\treturn getErr\n\t}\n\n\tif service.Spec.Type == api.ServiceTypeNodePort {\n\t\tfor _, port := range service.Spec.Ports {\n\t\t\tsURL := fmt.Sprintf(\"%s:%d\", p.ExternalAddress, port.NodePort)\n\t\t\taddEndpointsForServicePort(port, sURL, endpoints)\n\t\t}\n\t} else if service.Spec.Type == api.ServiceTypeLoadBalancer {\n\t\tingress := service.Status.LoadBalancer.Ingress\n\n\t\tif ingress == nil {\n\t\t\treturn fmt.Errorf(\"no Ingress for Service type LoadBalancer (%s in %s)\", info.Name, info.Name)\n\t\t}\n\n\t\texternalAddress := \"\"\n\t\tfor _, entry := range ingress {\n\t\t\tif entry.Hostname != \"\" {\n\t\t\t\texternalAddress = entry.Hostname\n\t\t\t} else if entry.IP != \"\" {\n\t\t\t\texternalAddress = entry.IP\n\t\t\t}\n\t\t\tif externalAddress == \"\" {\n\t\t\t\tfmt.Errorf(\"got empty Ingress for Service type LoadBalancer (%s in %s)\", info.Name, info.Namespace)\n\t\t\t} else {\n\t\t\t\t\/\/ handle only first ingress entry for LB\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tfor _, port := range service.Spec.Ports {\n\t\t\tsURL := fmt.Sprintf(\"%s:%d\", externalAddress, port.Port)\n\t\t\taddEndpointsForServicePort(port, sURL, endpoints)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc addEndpointsForServicePort(port api.ServicePort, sURL string, endpoints map[string]string) {\n\tif util.StringContainsAny(port.Name, \"https\") {\n\t\tsURL = \"https:\/\/\" + sURL\n\t} else if util.StringContainsAny(port.Name, \"ui\", \"rest\", \"http\", \"grafana\", \"service\") {\n\t\tsURL = \"http:\/\/\" + sURL\n\t}\n\tname := port.Name\n\tif len(name) == 0 {\n\t\tname = port.TargetPort.String()\n\t}\n\tendpoints[name] = sURL\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Skaffold Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage debug\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/serializer\/json\"\n\t\"k8s.io\/client-go\/kubernetes\/scheme\"\n\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/docker\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/graph\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/kubernetes\/manifest\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/runner\/runcontext\"\n)\n\nvar (\n\tdecodeFromYaml = scheme.Codecs.UniversalDeserializer().Decode\n\tencodeAsYaml = func(o runtime.Object) ([]byte, error) {\n\t\ts := json.NewYAMLSerializer(json.DefaultMetaFactory, scheme.Scheme, scheme.Scheme)\n\t\tvar b bytes.Buffer\n\t\tw := bufio.NewWriter(&b)\n\t\tif err := s.Encode(o, w); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tw.Flush()\n\t\treturn b.Bytes(), nil\n\t}\n)\n\n\/\/ ApplyDebuggingTransforms applies language-platform-specific transforms to a list of manifests.\nfunc ApplyDebuggingTransforms(l manifest.ManifestList, builds []graph.Artifact, registries manifest.Registries) (manifest.ManifestList, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tretriever := func(image string) (imageConfiguration, error) {\n\t\tif artifact := findArtifact(image, builds); artifact != nil {\n\t\t\treturn retrieveImageConfiguration(ctx, artifact, registries.InsecureRegistries)\n\t\t}\n\t\treturn imageConfiguration{}, fmt.Errorf(\"no build artifact for %q\", image)\n\t}\n\treturn applyDebuggingTransforms(l, retriever, registries.DebugHelpersRegistry)\n}\n\nfunc applyDebuggingTransforms(l manifest.ManifestList, retriever configurationRetriever, debugHelpersRegistry string) (manifest.ManifestList, error) {\n\tvar updated manifest.ManifestList\n\tfor _, manifest := range l {\n\t\tobj, _, err := decodeFromYaml(manifest, nil, nil)\n\t\tif err != nil {\n\t\t\tlogrus.Debugf(\"Unable to interpret manifest for debugging: %v\\n\", err)\n\t\t} else if transformManifest(obj, retriever, debugHelpersRegistry) {\n\t\t\tmanifest, err = encodeAsYaml(obj)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"marshalling yaml: %w\", err)\n\t\t\t}\n\t\t\tif logrus.IsLevelEnabled(logrus.DebugLevel) {\n\t\t\t\tlogrus.Debugln(\"Applied debugging transform:\\n\", string(manifest))\n\t\t\t}\n\t\t}\n\t\tupdated = append(updated, manifest)\n\t}\n\n\treturn updated, nil\n}\n\n\/\/ findArtifact finds the corresponding artifact for the given image.\n\/\/ If `builds` is empty, then treat all `image` images as a build artifact.\nfunc findArtifact(image string, builds []graph.Artifact) *graph.Artifact {\n\tif len(builds) == 0 {\n\t\tlogrus.Debugf(\"No build artifacts specified: using image as-is %q\", image)\n\t\treturn &graph.Artifact{ImageName: image, Tag: image}\n\t}\n\tfor _, artifact := range builds {\n\t\tif image == artifact.ImageName || image == artifact.Tag {\n\t\t\tlogrus.Debugf(\"Found artifact for image %q\", image)\n\t\t\treturn &artifact\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ retrieveImageConfiguration retrieves the image container configuration for\n\/\/ the given build artifact\nfunc retrieveImageConfiguration(ctx context.Context, artifact *graph.Artifact, insecureRegistries map[string]bool) (imageConfiguration, error) {\n\t\/\/ TODO: use the proper RunContext\n\tapiClient, err := docker.NewAPIClient(&runcontext.RunContext{\n\t\tInsecureRegistries: insecureRegistries,\n\t})\n\tif err != nil {\n\t\treturn imageConfiguration{}, fmt.Errorf(\"could not connect to local docker daemon: %w\", err)\n\t}\n\n\t\/\/ the apiClient will go to the remote registry if local docker daemon is not available\n\tmanifest, err := apiClient.ConfigFile(ctx, artifact.Tag)\n\tif err != nil {\n\t\tlogrus.Debugf(\"Error retrieving image manifest for %v: %v\", artifact.Tag, err)\n\t\treturn imageConfiguration{}, fmt.Errorf(\"retrieving image config for %q: %w\", artifact.Tag, err)\n\t}\n\n\tconfig := manifest.Config\n\tlogrus.Debugf(\"Retrieved local image configuration for %v: %v\", artifact.Tag, config)\n\treturn imageConfiguration{\n\t\tartifact: artifact.ImageName,\n\t\tenv: envAsMap(config.Env),\n\t\tentrypoint: config.Entrypoint,\n\t\targuments: config.Cmd,\n\t\tlabels: config.Labels,\n\t\tworkingDir: config.WorkingDir,\n\t}, nil\n}\n\n\/\/ envAsMap turns an array of environment \"NAME=value\" strings into a map\nfunc envAsMap(env []string) map[string]string {\n\tresult := make(map[string]string)\n\tfor _, pair := range env {\n\t\ts := strings.SplitN(pair, \"=\", 2)\n\t\tresult[s[0]] = s[1]\n\t}\n\treturn result\n}\n<commit_msg>Avoid aliasing in image configuration (#5804)<commit_after>\/*\nCopyright 2019 The Skaffold Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage debug\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/serializer\/json\"\n\t\"k8s.io\/client-go\/kubernetes\/scheme\"\n\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/docker\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/graph\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/kubernetes\/manifest\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/runner\/runcontext\"\n)\n\nvar (\n\tdecodeFromYaml = scheme.Codecs.UniversalDeserializer().Decode\n\tencodeAsYaml = func(o runtime.Object) ([]byte, error) {\n\t\ts := json.NewYAMLSerializer(json.DefaultMetaFactory, scheme.Scheme, scheme.Scheme)\n\t\tvar b bytes.Buffer\n\t\tw := bufio.NewWriter(&b)\n\t\tif err := s.Encode(o, w); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tw.Flush()\n\t\treturn b.Bytes(), nil\n\t}\n)\n\n\/\/ ApplyDebuggingTransforms applies language-platform-specific transforms to a list of manifests.\nfunc ApplyDebuggingTransforms(l manifest.ManifestList, builds []graph.Artifact, registries manifest.Registries) (manifest.ManifestList, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tretriever := func(image string) (imageConfiguration, error) {\n\t\tif artifact := findArtifact(image, builds); artifact != nil {\n\t\t\treturn retrieveImageConfiguration(ctx, artifact, registries.InsecureRegistries)\n\t\t}\n\t\treturn imageConfiguration{}, fmt.Errorf(\"no build artifact for %q\", image)\n\t}\n\treturn applyDebuggingTransforms(l, retriever, registries.DebugHelpersRegistry)\n}\n\nfunc applyDebuggingTransforms(l manifest.ManifestList, retriever configurationRetriever, debugHelpersRegistry string) (manifest.ManifestList, error) {\n\tvar updated manifest.ManifestList\n\tfor _, manifest := range l {\n\t\tobj, _, err := decodeFromYaml(manifest, nil, nil)\n\t\tif err != nil {\n\t\t\tlogrus.Debugf(\"Unable to interpret manifest for debugging: %v\\n\", err)\n\t\t} else if transformManifest(obj, retriever, debugHelpersRegistry) {\n\t\t\tmanifest, err = encodeAsYaml(obj)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"marshalling yaml: %w\", err)\n\t\t\t}\n\t\t\tif logrus.IsLevelEnabled(logrus.DebugLevel) {\n\t\t\t\tlogrus.Debugln(\"Applied debugging transform:\\n\", string(manifest))\n\t\t\t}\n\t\t}\n\t\tupdated = append(updated, manifest)\n\t}\n\n\treturn updated, nil\n}\n\n\/\/ findArtifact finds the corresponding artifact for the given image.\n\/\/ If `builds` is empty, then treat all `image` images as a build artifact.\nfunc findArtifact(image string, builds []graph.Artifact) *graph.Artifact {\n\tif len(builds) == 0 {\n\t\tlogrus.Debugf(\"No build artifacts specified: using image as-is %q\", image)\n\t\treturn &graph.Artifact{ImageName: image, Tag: image}\n\t}\n\tfor _, artifact := range builds {\n\t\tif image == artifact.ImageName || image == artifact.Tag {\n\t\t\tlogrus.Debugf(\"Found artifact for image %q\", image)\n\t\t\treturn &artifact\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ retrieveImageConfiguration retrieves the image container configuration for\n\/\/ the given build artifact\nfunc retrieveImageConfiguration(ctx context.Context, artifact *graph.Artifact, insecureRegistries map[string]bool) (imageConfiguration, error) {\n\t\/\/ TODO: use the proper RunContext\n\tapiClient, err := docker.NewAPIClient(&runcontext.RunContext{\n\t\tInsecureRegistries: insecureRegistries,\n\t})\n\tif err != nil {\n\t\treturn imageConfiguration{}, fmt.Errorf(\"could not connect to local docker daemon: %w\", err)\n\t}\n\n\t\/\/ the apiClient will go to the remote registry if local docker daemon is not available\n\tmanifest, err := apiClient.ConfigFile(ctx, artifact.Tag)\n\tif err != nil {\n\t\tlogrus.Debugf(\"Error retrieving image manifest for %v: %v\", artifact.Tag, err)\n\t\treturn imageConfiguration{}, fmt.Errorf(\"retrieving image config for %q: %w\", artifact.Tag, err)\n\t}\n\n\tconfig := manifest.Config\n\tlogrus.Debugf(\"Retrieved local image configuration for %v: %v\", artifact.Tag, config)\n\t\/\/ need to duplicate slices as apiClient caches requests\n\treturn imageConfiguration{\n\t\tartifact: artifact.ImageName,\n\t\tenv: envAsMap(config.Env),\n\t\tentrypoint: dupArray(config.Entrypoint),\n\t\targuments: dupArray(config.Cmd),\n\t\tlabels: dupMap(config.Labels),\n\t\tworkingDir: config.WorkingDir,\n\t}, nil\n}\n\n\/\/ envAsMap turns an array of environment \"NAME=value\" strings into a map\nfunc envAsMap(env []string) map[string]string {\n\tresult := make(map[string]string)\n\tfor _, pair := range env {\n\t\ts := strings.SplitN(pair, \"=\", 2)\n\t\tresult[s[0]] = s[1]\n\t}\n\treturn result\n}\n\nfunc dupArray(s []string) []string {\n\tif len(s) == 0 {\n\t\treturn nil\n\t}\n\tdup := make([]string, len(s))\n\tcopy(dup, s)\n\treturn dup\n}\n\nfunc dupMap(s map[string]string) map[string]string {\n\tif len(s) == 0 {\n\t\treturn nil\n\t}\n\tdup := make(map[string]string, len(s))\n\tfor k, v := range s {\n\t\tdup[k] = v\n\t}\n\treturn dup\n}\n<|endoftext|>"} {"text":"<commit_before>package storage\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestParseCommit(t *testing.T) {\n\tfoo := `tree 40279100b292dd26bfda150adf1c4fd5a4e52ffe\nparent ae51e9d1b987f9086cbc65e694f06759bc62e743\nauthor First Lastname <first.lastname@example.com> 1505935797 -0700\ncommitter Second Lastname <second.lastname@example.com> 1505935797 -0700\n\ndo something very useful to conquer the world\n\nmy\nawesome\n\nbody`\n\n\tcommit, err := parseCommit(bytes.NewBufferString(foo), \"99cc2f794893815dfc69ab1ba3370ef3e7a9fed2\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"99cc2f794893815dfc69ab1ba3370ef3e7a9fed2\", commit.Hash)\n\tassert.Equal(t, \"40279100b292dd26bfda150adf1c4fd5a4e52ffe\", commit.Tree)\n\tassert.Equal(t, \"ae51e9d1b987f9086cbc65e694f06759bc62e743\", commit.Parent)\n\tassert.Equal(t, \"First Lastname\", commit.Author.Name)\n\tassert.Equal(t, \"first.lastname@example.com\", commit.Author.Email)\n\tassert.Equal(t, int64(1505935797), commit.Author.Date.Unix())\n\tassert.Equal(t, \"Second Lastname\", commit.Committer.Name)\n\tassert.Equal(t, \"second.lastname@example.com\", commit.Committer.Email)\n\tassert.Equal(t, int64(1505935797), commit.Committer.Date.Unix())\n\tassert.Equal(t, \"do something very useful to conquer the world\", commit.Message)\n\tassert.Equal(t, \"my\\nawesome\\n\\nbody\", commit.Body)\n}\n<commit_msg>cleanup test<commit_after>package storage\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestParseCommit(t *testing.T) {\n\tfoo := `tree 40279100b292dd26bfda150adf1c4fd5a4e52ffe\nparent ae51e9d1b987f9086cbc65e694f06759bc62e743\nauthor First Lastname <first.lastname@example.com> 1505935797 -0700\ncommitter Second Lastname <second.lastname@example.com> 1505935797 -0700\n\ndo something very useful to conquer the world\n\nmy\nawesome\n\nbody`\n\texpected := Commit{\n\t\tHash: \"99cc2f794893815dfc69ab1ba3370ef3e7a9fed2\",\n\t\tTree: \"40279100b292dd26bfda150adf1c4fd5a4e52ffe\",\n\t\tParent: \"ae51e9d1b987f9086cbc65e694f06759bc62e743\",\n\t\tAuthor: Author{\n\t\t\tName: \"First Lastname\",\n\t\t\tEmail: \"first.lastname@example.com\",\n\t\t\tDate: time.Unix(1505935797, 0),\n\t\t},\n\t\tCommitter: Author{\n\t\t\tName: \"Second Lastname\",\n\t\t\tEmail: \"second.lastname@example.com\",\n\t\t\tDate: time.Unix(1505935797, 0),\n\t\t},\n\t\tMessage: \"do something very useful to conquer the world\",\n\t\tBody: \"my\\nawesome\\n\\nbody\",\n\t}\n\tcommit, err := parseCommit(bytes.NewBufferString(foo), \"99cc2f794893815dfc69ab1ba3370ef3e7a9fed2\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, expected, commit)\n}\n<|endoftext|>"} {"text":"<commit_before>package hash\n\nimport (\n\t\"bufio\"\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"hash\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n)\n\ntype Hash string\n\nvar (\n\tencoder hash.Hash = sha256.New()\n)\n\nfunc File(f *os.File, blockSize int64) (hashes []string, err error) {\n\tstat, err := f.Stat()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\n\t\/\/ calculate number of parts\n\tsize := stat.Size()\n\tparts := (size \/ blockSize) + 1\n\thashes = make([]string, parts)\n\treader := bufio.NewReaderSize(f, int(blockSize))\n\n\tlog.Printf(\"number of parts %d with a size of %d\", parts, size)\n\n\tvar i int64\n\n\tfor i = 0; i < parts; i++ {\n\t\toffset := blockSize * i\n\t\t_, err = f.Seek(offset, 0)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\n\t\thash, err := FilePart(reader, blockSize, offset)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t} else {\n\t\t\thashes[i] = hash\n\t\t}\n\n\t}\n\tlog.Printf(\"Finished\")\n\n\treturn\n}\n\nfunc FilePart(b *bufio.Reader, blockSize int64, offset int64) (hash string, err error) {\n\titerations := blockSize \/ 128\n\tbufferRest := blockSize % 128\n\n\tif bufferRest > 0 {\n\t\titerations++\n\t}\n\n\tfor i := 0; i < int(iterations); i++ {\n\t\tbuffer := make([]byte, 128)\n\n\t\t_, err = b.Read(buffer)\n\n\t\tif err != nil && err != io.EOF {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\n\t\t_, err = encoder.Write(buffer)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\thash = hex.EncodeToString(encoder.Sum(nil))\n\tlog.Printf(\"%s - %s\", hash, err)\n\n\treturn\n}\n<commit_msg>Fix some memory consumption errors<commit_after>package hash\n\nimport (\n\t\"bufio\"\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"hash\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n)\n\ntype Hash string\n\nvar (\n\tencoder hash.Hash = sha256.New()\n)\n\nfunc File(f *os.File, blockSize int64) (hashes []string, err error) {\n\tstat, err := f.Stat()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\n\t\/\/ calculate number of parts\n\tsize := stat.Size()\n\tparts := (size \/ blockSize) + 1\n\thashes = make([]string, parts)\n\treader := bufio.NewReaderSize(f, 128)\n\n\tlog.Printf(\"number of parts %d with a size of %d\", parts, size)\n\n\tvar i int64\n\n\tfor i = 0; i < parts; i++ {\n\t\toffset := blockSize * i\n\t\t_, err = f.Seek(offset, 0)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\n\t\thash, err := FilePart(reader, blockSize, offset)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t} else {\n\t\t\thashes[i] = hash\n\t\t}\n\n\t}\n\tlog.Printf(\"Finished\")\n\n\treturn\n}\n\nfunc FilePart(b *bufio.Reader, blockSize int64, offset int64) (hash string, err error) {\n\titerations := blockSize \/ 128\n\tbufferRest := blockSize % 128\n\n\tif bufferRest > 0 {\n\t\titerations++\n\t}\n\n\tfor i := 0; i < int(iterations); i++ {\n\t\tbuffer := make([]byte, 128)\n\n\t\t_, err = b.Read(buffer)\n\n\t\tif err != nil && err != io.EOF {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\n\t\t_, err = encoder.Write(buffer)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\thash = hex.EncodeToString(encoder.Sum(nil))\n\tlog.Printf(\"%s - %s\", hash, err)\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package activekit\n\nimport \"fmt\"\n\nfunc Promt(promt string) string {\n\tfmt.Print(promt)\n\treturn Input()\n}\n<commit_msg>add variable args to promt func<commit_after>package activekit\n\nimport \"fmt\"\n\nfunc Promt(promt string, vars ...interface{}) string {\n\tfmt.Printf(promt, vars...)\n\treturn Input()\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage homedir\n\nimport (\n\t\"os\"\n\t\"runtime\"\n)\n\n\/\/ HomeDir returns the home directory for the current user\nfunc HomeDir() string {\n\tif runtime.GOOS == \"windows\" {\n\t\tif homeDrive, homePath := os.Getenv(\"HOMEDRIVE\"), os.Getenv(\"HOMEPATH\"); len(homeDrive) > 0 && len(homePath) > 0 {\n\t\t\thomeDir := homeDrive + homePath\n\t\t\tif _, err := os.Stat(homeDir); err == nil {\n\t\t\t\treturn homeDir\n\t\t\t}\n\t\t}\n\t\tif userProfile := os.Getenv(\"USERPROFILE\"); len(userProfile) > 0 {\n\t\t\tif _, err := os.Stat(userProfile); err == nil {\n\t\t\t\treturn userProfile\n\t\t\t}\n\t\t}\n\t}\n\treturn os.Getenv(\"HOME\")\n}\n<commit_msg>Added support for HOME environment variable on Windows<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage homedir\n\nimport (\n\t\"os\"\n\t\"runtime\"\n)\n\n\/\/ HomeDir returns the home directory for the current user\nfunc HomeDir() string {\n\tif runtime.GOOS == \"windows\" {\n\n\t\t\/\/ First prefer the HOME environmental variable\n\t\tif home := os.Getenv(\"HOME\"); len(home) > 0 {\n\t\t\tif _, err := os.Stat(home); err == nil {\n\t\t\t\treturn home\n\t\t\t}\n\t\t}\n\t\tif homeDrive, homePath := os.Getenv(\"HOMEDRIVE\"), os.Getenv(\"HOMEPATH\"); len(homeDrive) > 0 && len(homePath) > 0 {\n\t\t\thomeDir := homeDrive + homePath\n\t\t\tif _, err := os.Stat(homeDir); err == nil {\n\t\t\t\treturn homeDir\n\t\t\t}\n\t\t}\n\t\tif userProfile := os.Getenv(\"USERPROFILE\"); len(userProfile) > 0 {\n\t\t\tif _, err := os.Stat(userProfile); err == nil {\n\t\t\t\treturn userProfile\n\t\t\t}\n\t\t}\n\t}\n\treturn os.Getenv(\"HOME\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The etcd-operator Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage k8sutil\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd-operator\/pkg\/backup\/backupapi\"\n\t\"github.com\/coreos\/etcd-operator\/pkg\/spec\"\n\t\"github.com\/coreos\/etcd-operator\/pkg\/util\/constants\"\n\t\"github.com\/coreos\/etcd-operator\/pkg\/util\/etcdutil\"\n\t\"github.com\/coreos\/etcd-operator\/pkg\/util\/retryutil\"\n\n\t\"k8s.io\/client-go\/kubernetes\"\n\t\"k8s.io\/client-go\/pkg\/api\"\n\tapierrors \"k8s.io\/client-go\/pkg\/api\/errors\"\n\t\"k8s.io\/client-go\/pkg\/api\/meta\"\n\t\"k8s.io\/client-go\/pkg\/api\/meta\/metatypes\"\n\t\"k8s.io\/client-go\/pkg\/api\/unversioned\"\n\t\"k8s.io\/client-go\/pkg\/api\/v1\"\n\t\"k8s.io\/client-go\/pkg\/labels\"\n\t\"k8s.io\/client-go\/pkg\/runtime\"\n\t\"k8s.io\/client-go\/pkg\/runtime\/serializer\"\n\t\"k8s.io\/client-go\/pkg\/util\/intstr\"\n\t\"k8s.io\/client-go\/rest\"\n)\n\nconst (\n\tetcdVolumeMountDir = \"\/var\/etcd\"\n\tdataDir = etcdVolumeMountDir + \"\/data\"\n\tbackupFile = \"\/var\/etcd\/latest.backup\"\n\tetcdVersionAnnotationKey = \"etcd.version\"\n\tannotationPrometheusScrape = \"prometheus.io\/scrape\"\n\tannotationPrometheusPort = \"prometheus.io\/port\"\n)\n\nfunc GetEtcdVersion(pod *v1.Pod) string {\n\treturn pod.Annotations[etcdVersionAnnotationKey]\n}\n\nfunc SetEtcdVersion(pod *v1.Pod, version string) {\n\tpod.Annotations[etcdVersionAnnotationKey] = version\n}\n\nfunc GetPodNames(pods []*v1.Pod) []string {\n\tres := []string{}\n\tfor _, p := range pods {\n\t\tres = append(res, p.Name)\n\t}\n\treturn res\n}\n\nfunc makeRestoreInitContainerSpec(backupAddr, name, token, version string) string {\n\tspec := []v1.Container{\n\t\t{\n\t\t\tName: \"fetch-backup\",\n\t\t\tImage: \"tutum\/curl\",\n\t\t\tCommand: []string{\n\t\t\t\t\"\/bin\/sh\", \"-c\",\n\t\t\t\tfmt.Sprintf(\"curl -o %s %s\", backupFile, backupapi.NewBackupURL(\"http\", backupAddr, version)),\n\t\t\t},\n\t\t\tVolumeMounts: etcdVolumeMounts(),\n\t\t},\n\t\t{\n\t\t\tName: \"restore-datadir\",\n\t\t\tImage: EtcdImageName(version),\n\t\t\tCommand: []string{\n\t\t\t\t\"\/bin\/sh\", \"-c\",\n\t\t\t\tfmt.Sprintf(\"ETCDCTL_API=3 etcdctl snapshot restore %[1]s\"+\n\t\t\t\t\t\" --name %[2]s\"+\n\t\t\t\t\t\" --initial-cluster %[2]s=http:\/\/%[2]s:2380\"+\n\t\t\t\t\t\" --initial-cluster-token %[3]s\"+\n\t\t\t\t\t\" --initial-advertise-peer-urls http:\/\/%[2]s:2380\"+\n\t\t\t\t\t\" --data-dir %[4]s\", backupFile, name, token, dataDir),\n\t\t\t},\n\t\t\tVolumeMounts: etcdVolumeMounts(),\n\t\t},\n\t}\n\tb, err := json.Marshal(spec)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(b)\n}\n\nfunc EtcdImageName(version string) string {\n\treturn fmt.Sprintf(\"quay.io\/coreos\/etcd:v%v\", version)\n}\n\nfunc GetNodePortString(srv *v1.Service) string {\n\treturn fmt.Sprint(srv.Spec.Ports[0].NodePort)\n}\n\nfunc PodWithNodeSelector(p *v1.Pod, ns map[string]string) *v1.Pod {\n\tp.Spec.NodeSelector = ns\n\treturn p\n}\n\nfunc BackupServiceAddr(clusterName string) string {\n\treturn fmt.Sprintf(\"%s:%d\", BackupServiceName(clusterName), constants.DefaultBackupPodHTTPPort)\n}\n\nfunc BackupServiceName(clusterName string) string {\n\treturn fmt.Sprintf(\"%s-backup-sidecar\", clusterName)\n}\n\nfunc CreateMemberService(kubecli kubernetes.Interface, ns string, svc *v1.Service) (*v1.Service, error) {\n\tretSvc, err := kubecli.CoreV1().Services(ns).Create(svc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn retSvc, nil\n}\n\nfunc CreateEtcdService(kubecli kubernetes.Interface, clusterName, ns string, owner metatypes.OwnerReference) (*v1.Service, error) {\n\tsvc := newEtcdServiceManifest(clusterName)\n\taddOwnerRefToObject(svc.GetObjectMeta(), owner)\n\tretSvc, err := kubecli.CoreV1().Services(ns).Create(svc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn retSvc, nil\n}\n\n\/\/ CreateAndWaitPod is a workaround for self hosted and util for testing.\n\/\/ We should eventually get rid of this in critical code path and move it to test util.\nfunc CreateAndWaitPod(kubecli kubernetes.Interface, ns string, pod *v1.Pod, timeout time.Duration) (*v1.Pod, error) {\n\t_, err := kubecli.CoreV1().Pods(ns).Create(pod)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinterval := 3 * time.Second\n\tvar retPod *v1.Pod\n\tretryutil.Retry(interval, int(timeout\/(interval)), func() (bool, error) {\n\t\tretPod, err = kubecli.CoreV1().Pods(ns).Get(pod.Name)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tswitch retPod.Status.Phase {\n\t\tcase v1.PodRunning:\n\t\t\treturn true, nil\n\t\tcase v1.PodPending:\n\t\t\treturn false, nil\n\t\tdefault:\n\t\t\treturn false, fmt.Errorf(\"unexpected pod status.phase: %v\", retPod.Status.Phase)\n\t\t}\n\t})\n\n\treturn retPod, nil\n}\n\nfunc newEtcdServiceManifest(clusterName string) *v1.Service {\n\tlabels := map[string]string{\n\t\t\"app\": \"etcd\",\n\t\t\"etcd_cluster\": clusterName,\n\t}\n\tsvc := &v1.Service{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName: clusterName,\n\t\t\tLabels: labels,\n\t\t},\n\t\tSpec: v1.ServiceSpec{\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tName: \"client\",\n\t\t\t\t\tPort: 2379,\n\t\t\t\t\tTargetPort: intstr.FromInt(2379),\n\t\t\t\t\tProtocol: v1.ProtocolTCP,\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: labels,\n\t\t},\n\t}\n\treturn svc\n}\n\n\/\/ TODO: converge the port logic with member ClientAddr() and PeerAddr()\nfunc NewMemberServiceManifest(etcdName, clusterName string, owner metatypes.OwnerReference) *v1.Service {\n\tsvc := &v1.Service{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName: etcdName,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"etcd_cluster\": clusterName,\n\t\t\t},\n\t\t\tAnnotations: map[string]string{\n\t\t\t\tannotationPrometheusScrape: \"true\",\n\t\t\t\tannotationPrometheusPort: \"2379\",\n\t\t\t},\n\t\t},\n\t\tSpec: v1.ServiceSpec{\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tName: \"server\",\n\t\t\t\t\tPort: 2380,\n\t\t\t\t\tTargetPort: intstr.FromInt(2380),\n\t\t\t\t\tProtocol: v1.ProtocolTCP,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"client\",\n\t\t\t\t\tPort: 2379,\n\t\t\t\t\tTargetPort: intstr.FromInt(2379),\n\t\t\t\t\tProtocol: v1.ProtocolTCP,\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: map[string]string{\n\t\t\t\t\"app\": \"etcd\",\n\t\t\t\t\"etcd_node\": etcdName,\n\t\t\t\t\"etcd_cluster\": clusterName,\n\t\t\t},\n\t\t},\n\t}\n\taddOwnerRefToObject(svc.GetObjectMeta(), owner)\n\treturn svc\n}\n\nfunc AddRecoveryToPod(pod *v1.Pod, clusterName, name, token string, cs spec.ClusterSpec) {\n\tpod.Annotations[v1.PodInitContainersBetaAnnotationKey] =\n\t\tmakeRestoreInitContainerSpec(BackupServiceAddr(clusterName), name, token, cs.Version)\n}\n\nfunc addOwnerRefToObject(o meta.Object, r metatypes.OwnerReference) {\n\to.SetOwnerReferences(append(o.GetOwnerReferences(), r))\n}\n\nfunc NewEtcdPod(m *etcdutil.Member, initialCluster []string, clusterName, state, token string, cs spec.ClusterSpec, owner metatypes.OwnerReference) *v1.Pod {\n\tcommands := fmt.Sprintf(\"\/usr\/local\/bin\/etcd --data-dir=%s --name=%s --initial-advertise-peer-urls=%s \"+\n\t\t\"--listen-peer-urls=http:\/\/0.0.0.0:2380 --listen-client-urls=http:\/\/0.0.0.0:2379 --advertise-client-urls=%s \"+\n\t\t\"--initial-cluster=%s --initial-cluster-state=%s\",\n\t\tdataDir, m.Name, m.PeerAddr(), m.ClientAddr(), strings.Join(initialCluster, \",\"), state)\n\tif state == \"new\" {\n\t\tcommands = fmt.Sprintf(\"%s --initial-cluster-token=%s\", commands, token)\n\t}\n\tcontainer := containerWithLivenessProbe(etcdContainer(commands, cs.Version), etcdLivenessProbe())\n\tif cs.Pod != nil {\n\t\tcontainer = containerWithRequirements(container, cs.Pod.Resources)\n\t}\n\tpod := &v1.Pod{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName: m.Name,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"app\": \"etcd\",\n\t\t\t\t\"etcd_node\": m.Name,\n\t\t\t\t\"etcd_cluster\": clusterName,\n\t\t\t},\n\t\t\tAnnotations: map[string]string{},\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tContainers: []v1.Container{container},\n\t\t\tRestartPolicy: v1.RestartPolicyNever,\n\t\t\tVolumes: []v1.Volume{\n\t\t\t\t{Name: \"etcd-data\", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},\n\t\t\t},\n\t\t},\n\t}\n\n\tSetEtcdVersion(pod, cs.Version)\n\n\tif cs.Pod != nil {\n\t\tif cs.Pod.AntiAffinity {\n\t\t\tpod = PodWithAntiAffinity(pod, clusterName)\n\t\t}\n\n\t\tif len(cs.Pod.NodeSelector) != 0 {\n\t\t\tpod = PodWithNodeSelector(pod, cs.Pod.NodeSelector)\n\t\t}\n\t}\n\taddOwnerRefToObject(pod.GetObjectMeta(), owner)\n\treturn pod\n}\n\nfunc MustNewKubeClient() kubernetes.Interface {\n\tcfg, err := InClusterConfig()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn kubernetes.NewForConfigOrDie(cfg)\n}\n\nfunc InClusterConfig() (*rest.Config, error) {\n\t\/\/ Work around https:\/\/github.com\/kubernetes\/kubernetes\/issues\/40973\n\t\/\/ See https:\/\/github.com\/coreos\/etcd-operator\/issues\/731#issuecomment-283804819\n\tif len(os.Getenv(\"KUBERNETES_SERVICE_HOST\")) == 0 {\n\t\taddrs, err := net.LookupHost(\"kubernetes.default.svc\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tos.Setenv(\"KUBERNETES_SERVICE_HOST\", addrs[0])\n\t}\n\tif len(os.Getenv(\"KUBERNETES_SERVICE_PORT\")) == 0 {\n\t\tos.Setenv(\"KUBERNETES_SERVICE_PORT\", \"443\")\n\t}\n\treturn rest.InClusterConfig()\n}\n\nfunc NewTPRClient() (*rest.RESTClient, error) {\n\tconfig, err := InClusterConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig.GroupVersion = &unversioned.GroupVersion{\n\t\tGroup: spec.TPRGroup,\n\t\tVersion: spec.TPRVersion,\n\t}\n\tconfig.APIPath = \"\/apis\"\n\tconfig.ContentType = runtime.ContentTypeJSON\n\tconfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs}\n\n\trestcli, err := rest.RESTClientFor(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn restcli, nil\n}\n\nfunc IsKubernetesResourceAlreadyExistError(err error) bool {\n\treturn apierrors.IsAlreadyExists(err)\n}\n\nfunc IsKubernetesResourceNotFoundError(err error) bool {\n\treturn apierrors.IsNotFound(err)\n}\n\n\/\/ We are using internal api types for cluster related.\nfunc ClusterListOpt(clusterName string) v1.ListOptions {\n\treturn v1.ListOptions{\n\t\tLabelSelector: labels.SelectorFromSet(newLablesForCluster(clusterName)).String(),\n\t}\n}\n\nfunc newLablesForCluster(clusterName string) map[string]string {\n\treturn map[string]string{\n\t\t\"etcd_cluster\": clusterName,\n\t\t\"app\": \"etcd\",\n\t}\n}\n<commit_msg>*: remove unused GetNodePortString()<commit_after>\/\/ Copyright 2016 The etcd-operator Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage k8sutil\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd-operator\/pkg\/backup\/backupapi\"\n\t\"github.com\/coreos\/etcd-operator\/pkg\/spec\"\n\t\"github.com\/coreos\/etcd-operator\/pkg\/util\/constants\"\n\t\"github.com\/coreos\/etcd-operator\/pkg\/util\/etcdutil\"\n\t\"github.com\/coreos\/etcd-operator\/pkg\/util\/retryutil\"\n\n\t\"k8s.io\/client-go\/kubernetes\"\n\t\"k8s.io\/client-go\/pkg\/api\"\n\tapierrors \"k8s.io\/client-go\/pkg\/api\/errors\"\n\t\"k8s.io\/client-go\/pkg\/api\/meta\"\n\t\"k8s.io\/client-go\/pkg\/api\/meta\/metatypes\"\n\t\"k8s.io\/client-go\/pkg\/api\/unversioned\"\n\t\"k8s.io\/client-go\/pkg\/api\/v1\"\n\t\"k8s.io\/client-go\/pkg\/labels\"\n\t\"k8s.io\/client-go\/pkg\/runtime\"\n\t\"k8s.io\/client-go\/pkg\/runtime\/serializer\"\n\t\"k8s.io\/client-go\/pkg\/util\/intstr\"\n\t\"k8s.io\/client-go\/rest\"\n)\n\nconst (\n\tetcdVolumeMountDir = \"\/var\/etcd\"\n\tdataDir = etcdVolumeMountDir + \"\/data\"\n\tbackupFile = \"\/var\/etcd\/latest.backup\"\n\tetcdVersionAnnotationKey = \"etcd.version\"\n\tannotationPrometheusScrape = \"prometheus.io\/scrape\"\n\tannotationPrometheusPort = \"prometheus.io\/port\"\n)\n\nfunc GetEtcdVersion(pod *v1.Pod) string {\n\treturn pod.Annotations[etcdVersionAnnotationKey]\n}\n\nfunc SetEtcdVersion(pod *v1.Pod, version string) {\n\tpod.Annotations[etcdVersionAnnotationKey] = version\n}\n\nfunc GetPodNames(pods []*v1.Pod) []string {\n\tres := []string{}\n\tfor _, p := range pods {\n\t\tres = append(res, p.Name)\n\t}\n\treturn res\n}\n\nfunc makeRestoreInitContainerSpec(backupAddr, name, token, version string) string {\n\tspec := []v1.Container{\n\t\t{\n\t\t\tName: \"fetch-backup\",\n\t\t\tImage: \"tutum\/curl\",\n\t\t\tCommand: []string{\n\t\t\t\t\"\/bin\/sh\", \"-c\",\n\t\t\t\tfmt.Sprintf(\"curl -o %s %s\", backupFile, backupapi.NewBackupURL(\"http\", backupAddr, version)),\n\t\t\t},\n\t\t\tVolumeMounts: etcdVolumeMounts(),\n\t\t},\n\t\t{\n\t\t\tName: \"restore-datadir\",\n\t\t\tImage: EtcdImageName(version),\n\t\t\tCommand: []string{\n\t\t\t\t\"\/bin\/sh\", \"-c\",\n\t\t\t\tfmt.Sprintf(\"ETCDCTL_API=3 etcdctl snapshot restore %[1]s\"+\n\t\t\t\t\t\" --name %[2]s\"+\n\t\t\t\t\t\" --initial-cluster %[2]s=http:\/\/%[2]s:2380\"+\n\t\t\t\t\t\" --initial-cluster-token %[3]s\"+\n\t\t\t\t\t\" --initial-advertise-peer-urls http:\/\/%[2]s:2380\"+\n\t\t\t\t\t\" --data-dir %[4]s\", backupFile, name, token, dataDir),\n\t\t\t},\n\t\t\tVolumeMounts: etcdVolumeMounts(),\n\t\t},\n\t}\n\tb, err := json.Marshal(spec)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn string(b)\n}\n\nfunc EtcdImageName(version string) string {\n\treturn fmt.Sprintf(\"quay.io\/coreos\/etcd:v%v\", version)\n}\nfunc PodWithNodeSelector(p *v1.Pod, ns map[string]string) *v1.Pod {\n\tp.Spec.NodeSelector = ns\n\treturn p\n}\n\nfunc BackupServiceAddr(clusterName string) string {\n\treturn fmt.Sprintf(\"%s:%d\", BackupServiceName(clusterName), constants.DefaultBackupPodHTTPPort)\n}\n\nfunc BackupServiceName(clusterName string) string {\n\treturn fmt.Sprintf(\"%s-backup-sidecar\", clusterName)\n}\n\nfunc CreateMemberService(kubecli kubernetes.Interface, ns string, svc *v1.Service) (*v1.Service, error) {\n\tretSvc, err := kubecli.CoreV1().Services(ns).Create(svc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn retSvc, nil\n}\n\nfunc CreateEtcdService(kubecli kubernetes.Interface, clusterName, ns string, owner metatypes.OwnerReference) (*v1.Service, error) {\n\tsvc := newEtcdServiceManifest(clusterName)\n\taddOwnerRefToObject(svc.GetObjectMeta(), owner)\n\tretSvc, err := kubecli.CoreV1().Services(ns).Create(svc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn retSvc, nil\n}\n\n\/\/ CreateAndWaitPod is a workaround for self hosted and util for testing.\n\/\/ We should eventually get rid of this in critical code path and move it to test util.\nfunc CreateAndWaitPod(kubecli kubernetes.Interface, ns string, pod *v1.Pod, timeout time.Duration) (*v1.Pod, error) {\n\t_, err := kubecli.CoreV1().Pods(ns).Create(pod)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinterval := 3 * time.Second\n\tvar retPod *v1.Pod\n\tretryutil.Retry(interval, int(timeout\/(interval)), func() (bool, error) {\n\t\tretPod, err = kubecli.CoreV1().Pods(ns).Get(pod.Name)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tswitch retPod.Status.Phase {\n\t\tcase v1.PodRunning:\n\t\t\treturn true, nil\n\t\tcase v1.PodPending:\n\t\t\treturn false, nil\n\t\tdefault:\n\t\t\treturn false, fmt.Errorf(\"unexpected pod status.phase: %v\", retPod.Status.Phase)\n\t\t}\n\t})\n\n\treturn retPod, nil\n}\n\nfunc newEtcdServiceManifest(clusterName string) *v1.Service {\n\tlabels := map[string]string{\n\t\t\"app\": \"etcd\",\n\t\t\"etcd_cluster\": clusterName,\n\t}\n\tsvc := &v1.Service{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName: clusterName,\n\t\t\tLabels: labels,\n\t\t},\n\t\tSpec: v1.ServiceSpec{\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tName: \"client\",\n\t\t\t\t\tPort: 2379,\n\t\t\t\t\tTargetPort: intstr.FromInt(2379),\n\t\t\t\t\tProtocol: v1.ProtocolTCP,\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: labels,\n\t\t},\n\t}\n\treturn svc\n}\n\n\/\/ TODO: converge the port logic with member ClientAddr() and PeerAddr()\nfunc NewMemberServiceManifest(etcdName, clusterName string, owner metatypes.OwnerReference) *v1.Service {\n\tsvc := &v1.Service{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName: etcdName,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"etcd_cluster\": clusterName,\n\t\t\t},\n\t\t\tAnnotations: map[string]string{\n\t\t\t\tannotationPrometheusScrape: \"true\",\n\t\t\t\tannotationPrometheusPort: \"2379\",\n\t\t\t},\n\t\t},\n\t\tSpec: v1.ServiceSpec{\n\t\t\tPorts: []v1.ServicePort{\n\t\t\t\t{\n\t\t\t\t\tName: \"server\",\n\t\t\t\t\tPort: 2380,\n\t\t\t\t\tTargetPort: intstr.FromInt(2380),\n\t\t\t\t\tProtocol: v1.ProtocolTCP,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"client\",\n\t\t\t\t\tPort: 2379,\n\t\t\t\t\tTargetPort: intstr.FromInt(2379),\n\t\t\t\t\tProtocol: v1.ProtocolTCP,\n\t\t\t\t},\n\t\t\t},\n\t\t\tSelector: map[string]string{\n\t\t\t\t\"app\": \"etcd\",\n\t\t\t\t\"etcd_node\": etcdName,\n\t\t\t\t\"etcd_cluster\": clusterName,\n\t\t\t},\n\t\t},\n\t}\n\taddOwnerRefToObject(svc.GetObjectMeta(), owner)\n\treturn svc\n}\n\nfunc AddRecoveryToPod(pod *v1.Pod, clusterName, name, token string, cs spec.ClusterSpec) {\n\tpod.Annotations[v1.PodInitContainersBetaAnnotationKey] =\n\t\tmakeRestoreInitContainerSpec(BackupServiceAddr(clusterName), name, token, cs.Version)\n}\n\nfunc addOwnerRefToObject(o meta.Object, r metatypes.OwnerReference) {\n\to.SetOwnerReferences(append(o.GetOwnerReferences(), r))\n}\n\nfunc NewEtcdPod(m *etcdutil.Member, initialCluster []string, clusterName, state, token string, cs spec.ClusterSpec, owner metatypes.OwnerReference) *v1.Pod {\n\tcommands := fmt.Sprintf(\"\/usr\/local\/bin\/etcd --data-dir=%s --name=%s --initial-advertise-peer-urls=%s \"+\n\t\t\"--listen-peer-urls=http:\/\/0.0.0.0:2380 --listen-client-urls=http:\/\/0.0.0.0:2379 --advertise-client-urls=%s \"+\n\t\t\"--initial-cluster=%s --initial-cluster-state=%s\",\n\t\tdataDir, m.Name, m.PeerAddr(), m.ClientAddr(), strings.Join(initialCluster, \",\"), state)\n\tif state == \"new\" {\n\t\tcommands = fmt.Sprintf(\"%s --initial-cluster-token=%s\", commands, token)\n\t}\n\tcontainer := containerWithLivenessProbe(etcdContainer(commands, cs.Version), etcdLivenessProbe())\n\tif cs.Pod != nil {\n\t\tcontainer = containerWithRequirements(container, cs.Pod.Resources)\n\t}\n\tpod := &v1.Pod{\n\t\tObjectMeta: v1.ObjectMeta{\n\t\t\tName: m.Name,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"app\": \"etcd\",\n\t\t\t\t\"etcd_node\": m.Name,\n\t\t\t\t\"etcd_cluster\": clusterName,\n\t\t\t},\n\t\t\tAnnotations: map[string]string{},\n\t\t},\n\t\tSpec: v1.PodSpec{\n\t\t\tContainers: []v1.Container{container},\n\t\t\tRestartPolicy: v1.RestartPolicyNever,\n\t\t\tVolumes: []v1.Volume{\n\t\t\t\t{Name: \"etcd-data\", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},\n\t\t\t},\n\t\t},\n\t}\n\n\tSetEtcdVersion(pod, cs.Version)\n\n\tif cs.Pod != nil {\n\t\tif cs.Pod.AntiAffinity {\n\t\t\tpod = PodWithAntiAffinity(pod, clusterName)\n\t\t}\n\n\t\tif len(cs.Pod.NodeSelector) != 0 {\n\t\t\tpod = PodWithNodeSelector(pod, cs.Pod.NodeSelector)\n\t\t}\n\t}\n\taddOwnerRefToObject(pod.GetObjectMeta(), owner)\n\treturn pod\n}\n\nfunc MustNewKubeClient() kubernetes.Interface {\n\tcfg, err := InClusterConfig()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn kubernetes.NewForConfigOrDie(cfg)\n}\n\nfunc InClusterConfig() (*rest.Config, error) {\n\t\/\/ Work around https:\/\/github.com\/kubernetes\/kubernetes\/issues\/40973\n\t\/\/ See https:\/\/github.com\/coreos\/etcd-operator\/issues\/731#issuecomment-283804819\n\tif len(os.Getenv(\"KUBERNETES_SERVICE_HOST\")) == 0 {\n\t\taddrs, err := net.LookupHost(\"kubernetes.default.svc\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tos.Setenv(\"KUBERNETES_SERVICE_HOST\", addrs[0])\n\t}\n\tif len(os.Getenv(\"KUBERNETES_SERVICE_PORT\")) == 0 {\n\t\tos.Setenv(\"KUBERNETES_SERVICE_PORT\", \"443\")\n\t}\n\treturn rest.InClusterConfig()\n}\n\nfunc NewTPRClient() (*rest.RESTClient, error) {\n\tconfig, err := InClusterConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig.GroupVersion = &unversioned.GroupVersion{\n\t\tGroup: spec.TPRGroup,\n\t\tVersion: spec.TPRVersion,\n\t}\n\tconfig.APIPath = \"\/apis\"\n\tconfig.ContentType = runtime.ContentTypeJSON\n\tconfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: api.Codecs}\n\n\trestcli, err := rest.RESTClientFor(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn restcli, nil\n}\n\nfunc IsKubernetesResourceAlreadyExistError(err error) bool {\n\treturn apierrors.IsAlreadyExists(err)\n}\n\nfunc IsKubernetesResourceNotFoundError(err error) bool {\n\treturn apierrors.IsNotFound(err)\n}\n\n\/\/ We are using internal api types for cluster related.\nfunc ClusterListOpt(clusterName string) v1.ListOptions {\n\treturn v1.ListOptions{\n\t\tLabelSelector: labels.SelectorFromSet(newLablesForCluster(clusterName)).String(),\n\t}\n}\n\nfunc newLablesForCluster(clusterName string) map[string]string {\n\treturn map[string]string{\n\t\t\"etcd_cluster\": clusterName,\n\t\t\"app\": \"etcd\",\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cinder\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com\/golang\/glog\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/resource\"\n\t\"k8s.io\/kubernetes\/pkg\/cloudprovider\/providers\/openstack\"\n\t\"k8s.io\/kubernetes\/pkg\/types\"\n\t\"k8s.io\/kubernetes\/pkg\/util\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/exec\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/mount\"\n\t\"k8s.io\/kubernetes\/pkg\/volume\"\n)\n\n\/\/ This is the primary entrypoint for volume plugins.\nfunc ProbeVolumePlugins() []volume.VolumePlugin {\n\treturn []volume.VolumePlugin{&cinderPlugin{nil}}\n}\n\ntype cinderPlugin struct {\n\thost volume.VolumeHost\n}\n\nvar _ volume.VolumePlugin = &cinderPlugin{}\nvar _ volume.PersistentVolumePlugin = &cinderPlugin{}\nvar _ volume.DeletableVolumePlugin = &cinderPlugin{}\nvar _ volume.ProvisionableVolumePlugin = &cinderPlugin{}\n\nconst (\n\tcinderVolumePluginName = \"kubernetes.io\/cinder\"\n)\n\nfunc (plugin *cinderPlugin) Init(host volume.VolumeHost) {\n\tplugin.host = host\n}\n\nfunc (plugin *cinderPlugin) Name() string {\n\treturn cinderVolumePluginName\n}\n\nfunc (plugin *cinderPlugin) CanSupport(spec *volume.Spec) bool {\n\treturn (spec.Volume != nil && spec.Volume.Cinder != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Cinder != nil)\n}\n\nfunc (plugin *cinderPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {\n\treturn []api.PersistentVolumeAccessMode{\n\t\tapi.ReadWriteOnce,\n\t}\n}\n\nfunc (plugin *cinderPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Builder, error) {\n\treturn plugin.newBuilderInternal(spec, pod.UID, &CinderDiskUtil{}, plugin.host.GetMounter())\n}\n\nfunc (plugin *cinderPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Builder, error) {\n\tvar cinder *api.CinderVolumeSource\n\tif spec.Volume != nil && spec.Volume.Cinder != nil {\n\t\tcinder = spec.Volume.Cinder\n\t} else {\n\t\tcinder = spec.PersistentVolume.Spec.Cinder\n\t}\n\n\tpdName := cinder.VolumeID\n\tfsType := cinder.FSType\n\treadOnly := cinder.ReadOnly\n\n\treturn &cinderVolumeBuilder{\n\t\tcinderVolume: &cinderVolume{\n\t\t\tpodUID: podUID,\n\t\t\tvolName: spec.Name(),\n\t\t\tpdName: pdName,\n\t\t\tmounter: mounter,\n\t\t\tmanager: manager,\n\t\t\tplugin: plugin,\n\t\t},\n\t\tfsType: fsType,\n\t\treadOnly: readOnly,\n\t\tblockDeviceMounter: &cinderSafeFormatAndMount{mounter, exec.New()}}, nil\n}\n\nfunc (plugin *cinderPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) {\n\treturn plugin.newCleanerInternal(volName, podUID, &CinderDiskUtil{}, plugin.host.GetMounter())\n}\n\nfunc (plugin *cinderPlugin) newCleanerInternal(volName string, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Cleaner, error) {\n\treturn &cinderVolumeCleaner{\n\t\t&cinderVolume{\n\t\t\tpodUID: podUID,\n\t\t\tvolName: volName,\n\t\t\tmanager: manager,\n\t\t\tmounter: mounter,\n\t\t\tplugin: plugin,\n\t\t}}, nil\n}\n\nfunc (plugin *cinderPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {\n\treturn plugin.newDeleterInternal(spec, &CinderDiskUtil{})\n}\n\nfunc (plugin *cinderPlugin) newDeleterInternal(spec *volume.Spec, manager cdManager) (volume.Deleter, error) {\n\tif spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Cinder == nil {\n\t\treturn nil, fmt.Errorf(\"spec.PersistentVolumeSource.Cinder is nil\")\n\t}\n\treturn &cinderVolumeDeleter{\n\t\t&cinderVolume{\n\t\t\tvolName: spec.Name(),\n\t\t\tpdName: spec.PersistentVolume.Spec.Cinder.VolumeID,\n\t\t\tmanager: manager,\n\t\t\tplugin: plugin,\n\t\t}}, nil\n}\n\nfunc (plugin *cinderPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {\n\tif len(options.AccessModes) == 0 {\n\t\toptions.AccessModes = plugin.GetAccessModes()\n\t}\n\treturn plugin.newProvisionerInternal(options, &CinderDiskUtil{})\n}\n\nfunc (plugin *cinderPlugin) newProvisionerInternal(options volume.VolumeOptions, manager cdManager) (volume.Provisioner, error) {\n\treturn &cinderVolumeProvisioner{\n\t\tcinderVolume: &cinderVolume{\n\t\t\tmanager: manager,\n\t\t\tplugin: plugin,\n\t\t},\n\t\toptions: options,\n\t}, nil\n}\n\nfunc (plugin *cinderPlugin) getCloudProvider() (*openstack.OpenStack, error) {\n\tcloud := plugin.host.GetCloudProvider()\n\tif cloud == nil {\n\t\tglog.Errorf(\"Cloud provider not initialized properly\")\n\t\treturn nil, errors.New(\"Cloud provider not initialized properly\")\n\t}\n\n\tos := cloud.(*openstack.OpenStack)\n\tif os == nil {\n\t\treturn nil, errors.New(\"Invalid cloud provider: expected OpenStack\")\n\t}\n\treturn os, nil\n}\n\n\/\/ Abstract interface to PD operations.\ntype cdManager interface {\n\t\/\/ Attaches the disk to the kubelet's host machine.\n\tAttachDisk(builder *cinderVolumeBuilder, globalPDPath string) error\n\t\/\/ Detaches the disk from the kubelet's host machine.\n\tDetachDisk(cleaner *cinderVolumeCleaner) error\n\t\/\/ Creates a volume\n\tCreateVolume(provisioner *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, err error)\n\t\/\/ Deletes a volume\n\tDeleteVolume(deleter *cinderVolumeDeleter) error\n}\n\nvar _ volume.Builder = &cinderVolumeBuilder{}\n\ntype cinderVolumeBuilder struct {\n\t*cinderVolume\n\tfsType string\n\treadOnly bool\n\tblockDeviceMounter mount.Interface\n}\n\n\/\/ cinderPersistentDisk volumes are disk resources provided by C3\n\/\/ that are attached to the kubelet's host machine and exposed to the pod.\ntype cinderVolume struct {\n\tvolName string\n\tpodUID types.UID\n\t\/\/ Unique identifier of the volume, used to find the disk resource in the provider.\n\tpdName string\n\t\/\/ Filesystem type, optional.\n\tfsType string\n\t\/\/ Specifies the partition to mount\n\t\/\/partition string\n\t\/\/ Specifies whether the disk will be attached as read-only.\n\treadOnly bool\n\t\/\/ Utility interface that provides API calls to the provider to attach\/detach disks.\n\tmanager cdManager\n\t\/\/ Mounter interface that provides system calls to mount the global path to the pod local path.\n\tmounter mount.Interface\n\t\/\/ diskMounter provides the interface that is used to mount the actual block device.\n\tblockDeviceMounter mount.Interface\n\tplugin *cinderPlugin\n\tvolume.MetricsNil\n}\n\nfunc detachDiskLogError(cd *cinderVolume) {\n\terr := cd.manager.DetachDisk(&cinderVolumeCleaner{cd})\n\tif err != nil {\n\t\tglog.Warningf(\"Failed to detach disk: %v (%v)\", cd, err)\n\t}\n}\n\nfunc (b *cinderVolumeBuilder) GetAttributes() volume.Attributes {\n\treturn volume.Attributes{\n\t\tReadOnly: b.readOnly,\n\t\tManaged: !b.readOnly,\n\t\tSupportsOwnershipManagement: true,\n\t\tSupportsSELinux: true,\n\t}\n}\n\nfunc (b *cinderVolumeBuilder) SetUp() error {\n\treturn b.SetUpAt(b.GetPath())\n}\n\n\/\/ SetUp attaches the disk and bind mounts to the volume path.\nfunc (b *cinderVolumeBuilder) SetUpAt(dir string) error {\n\t\/\/ TODO: handle failed mounts here.\n\tnotmnt, err := b.mounter.IsLikelyNotMountPoint(dir)\n\tglog.V(4).Infof(\"PersistentDisk set up: %s %v %v\", dir, !notmnt, err)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\tif !notmnt {\n\t\treturn nil\n\t}\n\tglobalPDPath := makeGlobalPDName(b.plugin.host, b.pdName)\n\tif err := b.manager.AttachDisk(b, globalPDPath); err != nil {\n\t\treturn err\n\t}\n\n\toptions := []string{\"bind\"}\n\tif b.readOnly {\n\t\toptions = append(options, \"ro\")\n\t}\n\n\tif err := os.MkdirAll(dir, 0750); err != nil {\n\t\t\/\/ TODO: we should really eject the attach\/detach out into its own control loop.\n\t\tdetachDiskLogError(b.cinderVolume)\n\t\treturn err\n\t}\n\n\t\/\/ Perform a bind mount to the full path to allow duplicate mounts of the same PD.\n\terr = b.mounter.Mount(globalPDPath, dir, \"\", options)\n\tif err != nil {\n\t\tnotmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)\n\t\tif mntErr != nil {\n\t\t\tglog.Errorf(\"IsLikelyNotMountPoint check failed: %v\", mntErr)\n\t\t\treturn err\n\t\t}\n\t\tif !notmnt {\n\t\t\tif mntErr = b.mounter.Unmount(dir); mntErr != nil {\n\t\t\t\tglog.Errorf(\"Failed to unmount: %v\", mntErr)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tnotmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)\n\t\t\tif mntErr != nil {\n\t\t\t\tglog.Errorf(\"IsLikelyNotMountPoint check failed: %v\", mntErr)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !notmnt {\n\t\t\t\t\/\/ This is very odd, we don't expect it. We'll try again next sync loop.\n\t\t\t\tglog.Errorf(\"%s is still mounted, despite call to unmount(). Will try again next sync loop.\", b.GetPath())\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tos.Remove(dir)\n\t\t\/\/ TODO: we should really eject the attach\/detach out into its own control loop.\n\t\tdetachDiskLogError(b.cinderVolume)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc makeGlobalPDName(host volume.VolumeHost, devName string) string {\n\treturn path.Join(host.GetPluginDir(cinderVolumePluginName), \"mounts\", devName)\n}\n\nfunc (cd *cinderVolume) GetPath() string {\n\tname := cinderVolumePluginName\n\treturn cd.plugin.host.GetPodVolumeDir(cd.podUID, util.EscapeQualifiedNameForDisk(name), cd.volName)\n}\n\ntype cinderVolumeCleaner struct {\n\t*cinderVolume\n}\n\nvar _ volume.Cleaner = &cinderVolumeCleaner{}\n\nfunc (c *cinderVolumeCleaner) TearDown() error {\n\treturn c.TearDownAt(c.GetPath())\n}\n\n\/\/ Unmounts the bind mount, and detaches the disk only if the PD\n\/\/ resource was the last reference to that disk on the kubelet.\nfunc (c *cinderVolumeCleaner) TearDownAt(dir string) error {\n\tnotmnt, err := c.mounter.IsLikelyNotMountPoint(dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif notmnt {\n\t\treturn os.Remove(dir)\n\t}\n\trefs, err := mount.GetMountRefs(c.mounter, dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := c.mounter.Unmount(dir); err != nil {\n\t\treturn err\n\t}\n\tglog.Infof(\"successfully unmounted: %s\\n\", dir)\n\n\t\/\/ If refCount is 1, then all bind mounts have been removed, and the\n\t\/\/ remaining reference is the global mount. It is safe to detach.\n\tif len(refs) == 1 {\n\t\tc.pdName = path.Base(refs[0])\n\t\tif err := c.manager.DetachDisk(c); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tnotmnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)\n\tif mntErr != nil {\n\t\tglog.Errorf(\"IsLikelyNotMountPoint check failed: %v\", mntErr)\n\t\treturn err\n\t}\n\tif !notmnt {\n\t\tif err := os.Remove(dir); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\ntype cinderVolumeDeleter struct {\n\t*cinderVolume\n}\n\nvar _ volume.Deleter = &cinderVolumeDeleter{}\n\nfunc (r *cinderVolumeDeleter) GetPath() string {\n\tname := cinderVolumePluginName\n\treturn r.plugin.host.GetPodVolumeDir(r.podUID, util.EscapeQualifiedNameForDisk(name), r.volName)\n}\n\nfunc (r *cinderVolumeDeleter) Delete() error {\n\treturn r.manager.DeleteVolume(r)\n}\n\ntype cinderVolumeProvisioner struct {\n\t*cinderVolume\n\toptions volume.VolumeOptions\n}\n\nvar _ volume.Provisioner = &cinderVolumeProvisioner{}\n\nfunc (c *cinderVolumeProvisioner) Provision(pv *api.PersistentVolume) error {\n\tvolumeID, sizeGB, err := c.manager.CreateVolume(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpv.Spec.PersistentVolumeSource.Cinder.VolumeID = volumeID\n\tpv.Spec.Capacity = api.ResourceList{\n\t\tapi.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf(\"%dGi\", sizeGB)),\n\t}\n\treturn nil\n}\n\nfunc (c *cinderVolumeProvisioner) NewPersistentVolumeTemplate() (*api.PersistentVolume, error) {\n\t\/\/ Provide dummy api.PersistentVolume.Spec, it will be filled in\n\t\/\/ cinderVolumeProvisioner.Provision()\n\treturn &api.PersistentVolume{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tGenerateName: \"pv-cinder-\",\n\t\t\tLabels: map[string]string{},\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"kubernetes.io\/createdby\": \"cinder-dynamic-provisioner\",\n\t\t\t},\n\t\t},\n\t\tSpec: api.PersistentVolumeSpec{\n\t\t\tPersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,\n\t\t\tAccessModes: c.options.AccessModes,\n\t\t\tCapacity: api.ResourceList{\n\t\t\t\tapi.ResourceName(api.ResourceStorage): c.options.Capacity,\n\t\t\t},\n\t\t\tPersistentVolumeSource: api.PersistentVolumeSource{\n\t\t\t\tCinder: &api.CinderVolumeSource{\n\t\t\t\t\tVolumeID: \"dummy\",\n\t\t\t\t\tFSType: \"ext4\",\n\t\t\t\t\tReadOnly: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}, nil\n\n}\n<commit_msg>Add support for flex volume. Flex volume adds support for thirdparty(vendor) volumes and custom mounts.<commit_after>\/*\nCopyright 2015 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cinder\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com\/golang\/glog\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/resource\"\n\t\"k8s.io\/kubernetes\/pkg\/cloudprovider\/providers\/openstack\"\n\t\"k8s.io\/kubernetes\/pkg\/types\"\n\t\"k8s.io\/kubernetes\/pkg\/util\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/exec\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/mount\"\n\t\"k8s.io\/kubernetes\/pkg\/volume\"\n)\n\n\/\/ This is the primary entrypoint for volume plugins.\nfunc ProbeVolumePlugins() []volume.VolumePlugin {\n\treturn []volume.VolumePlugin{&cinderPlugin{nil}}\n}\n\ntype cinderPlugin struct {\n\thost volume.VolumeHost\n}\n\nvar _ volume.VolumePlugin = &cinderPlugin{}\nvar _ volume.PersistentVolumePlugin = &cinderPlugin{}\nvar _ volume.DeletableVolumePlugin = &cinderPlugin{}\nvar _ volume.ProvisionableVolumePlugin = &cinderPlugin{}\n\nconst (\n\tcinderVolumePluginName = \"kubernetes.io\/cinder\"\n)\n\nfunc (plugin *cinderPlugin) Init(host volume.VolumeHost) error {\n\tplugin.host = host\n\treturn nil\n}\n\nfunc (plugin *cinderPlugin) Name() string {\n\treturn cinderVolumePluginName\n}\n\nfunc (plugin *cinderPlugin) CanSupport(spec *volume.Spec) bool {\n\treturn (spec.Volume != nil && spec.Volume.Cinder != nil) || (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Cinder != nil)\n}\n\nfunc (plugin *cinderPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {\n\treturn []api.PersistentVolumeAccessMode{\n\t\tapi.ReadWriteOnce,\n\t}\n}\n\nfunc (plugin *cinderPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Builder, error) {\n\treturn plugin.newBuilderInternal(spec, pod.UID, &CinderDiskUtil{}, plugin.host.GetMounter())\n}\n\nfunc (plugin *cinderPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Builder, error) {\n\tvar cinder *api.CinderVolumeSource\n\tif spec.Volume != nil && spec.Volume.Cinder != nil {\n\t\tcinder = spec.Volume.Cinder\n\t} else {\n\t\tcinder = spec.PersistentVolume.Spec.Cinder\n\t}\n\n\tpdName := cinder.VolumeID\n\tfsType := cinder.FSType\n\treadOnly := cinder.ReadOnly\n\n\treturn &cinderVolumeBuilder{\n\t\tcinderVolume: &cinderVolume{\n\t\t\tpodUID: podUID,\n\t\t\tvolName: spec.Name(),\n\t\t\tpdName: pdName,\n\t\t\tmounter: mounter,\n\t\t\tmanager: manager,\n\t\t\tplugin: plugin,\n\t\t},\n\t\tfsType: fsType,\n\t\treadOnly: readOnly,\n\t\tblockDeviceMounter: &cinderSafeFormatAndMount{mounter, exec.New()}}, nil\n}\n\nfunc (plugin *cinderPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) {\n\treturn plugin.newCleanerInternal(volName, podUID, &CinderDiskUtil{}, plugin.host.GetMounter())\n}\n\nfunc (plugin *cinderPlugin) newCleanerInternal(volName string, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Cleaner, error) {\n\treturn &cinderVolumeCleaner{\n\t\t&cinderVolume{\n\t\t\tpodUID: podUID,\n\t\t\tvolName: volName,\n\t\t\tmanager: manager,\n\t\t\tmounter: mounter,\n\t\t\tplugin: plugin,\n\t\t}}, nil\n}\n\nfunc (plugin *cinderPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {\n\treturn plugin.newDeleterInternal(spec, &CinderDiskUtil{})\n}\n\nfunc (plugin *cinderPlugin) newDeleterInternal(spec *volume.Spec, manager cdManager) (volume.Deleter, error) {\n\tif spec.PersistentVolume != nil && spec.PersistentVolume.Spec.Cinder == nil {\n\t\treturn nil, fmt.Errorf(\"spec.PersistentVolumeSource.Cinder is nil\")\n\t}\n\treturn &cinderVolumeDeleter{\n\t\t&cinderVolume{\n\t\t\tvolName: spec.Name(),\n\t\t\tpdName: spec.PersistentVolume.Spec.Cinder.VolumeID,\n\t\t\tmanager: manager,\n\t\t\tplugin: plugin,\n\t\t}}, nil\n}\n\nfunc (plugin *cinderPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {\n\tif len(options.AccessModes) == 0 {\n\t\toptions.AccessModes = plugin.GetAccessModes()\n\t}\n\treturn plugin.newProvisionerInternal(options, &CinderDiskUtil{})\n}\n\nfunc (plugin *cinderPlugin) newProvisionerInternal(options volume.VolumeOptions, manager cdManager) (volume.Provisioner, error) {\n\treturn &cinderVolumeProvisioner{\n\t\tcinderVolume: &cinderVolume{\n\t\t\tmanager: manager,\n\t\t\tplugin: plugin,\n\t\t},\n\t\toptions: options,\n\t}, nil\n}\n\nfunc (plugin *cinderPlugin) getCloudProvider() (*openstack.OpenStack, error) {\n\tcloud := plugin.host.GetCloudProvider()\n\tif cloud == nil {\n\t\tglog.Errorf(\"Cloud provider not initialized properly\")\n\t\treturn nil, errors.New(\"Cloud provider not initialized properly\")\n\t}\n\n\tos := cloud.(*openstack.OpenStack)\n\tif os == nil {\n\t\treturn nil, errors.New(\"Invalid cloud provider: expected OpenStack\")\n\t}\n\treturn os, nil\n}\n\n\/\/ Abstract interface to PD operations.\ntype cdManager interface {\n\t\/\/ Attaches the disk to the kubelet's host machine.\n\tAttachDisk(builder *cinderVolumeBuilder, globalPDPath string) error\n\t\/\/ Detaches the disk from the kubelet's host machine.\n\tDetachDisk(cleaner *cinderVolumeCleaner) error\n\t\/\/ Creates a volume\n\tCreateVolume(provisioner *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, err error)\n\t\/\/ Deletes a volume\n\tDeleteVolume(deleter *cinderVolumeDeleter) error\n}\n\nvar _ volume.Builder = &cinderVolumeBuilder{}\n\ntype cinderVolumeBuilder struct {\n\t*cinderVolume\n\tfsType string\n\treadOnly bool\n\tblockDeviceMounter mount.Interface\n}\n\n\/\/ cinderPersistentDisk volumes are disk resources provided by C3\n\/\/ that are attached to the kubelet's host machine and exposed to the pod.\ntype cinderVolume struct {\n\tvolName string\n\tpodUID types.UID\n\t\/\/ Unique identifier of the volume, used to find the disk resource in the provider.\n\tpdName string\n\t\/\/ Filesystem type, optional.\n\tfsType string\n\t\/\/ Specifies the partition to mount\n\t\/\/partition string\n\t\/\/ Specifies whether the disk will be attached as read-only.\n\treadOnly bool\n\t\/\/ Utility interface that provides API calls to the provider to attach\/detach disks.\n\tmanager cdManager\n\t\/\/ Mounter interface that provides system calls to mount the global path to the pod local path.\n\tmounter mount.Interface\n\t\/\/ diskMounter provides the interface that is used to mount the actual block device.\n\tblockDeviceMounter mount.Interface\n\tplugin *cinderPlugin\n\tvolume.MetricsNil\n}\n\nfunc detachDiskLogError(cd *cinderVolume) {\n\terr := cd.manager.DetachDisk(&cinderVolumeCleaner{cd})\n\tif err != nil {\n\t\tglog.Warningf(\"Failed to detach disk: %v (%v)\", cd, err)\n\t}\n}\n\nfunc (b *cinderVolumeBuilder) GetAttributes() volume.Attributes {\n\treturn volume.Attributes{\n\t\tReadOnly: b.readOnly,\n\t\tManaged: !b.readOnly,\n\t\tSupportsOwnershipManagement: true,\n\t\tSupportsSELinux: true,\n\t}\n}\n\nfunc (b *cinderVolumeBuilder) SetUp() error {\n\treturn b.SetUpAt(b.GetPath())\n}\n\n\/\/ SetUp attaches the disk and bind mounts to the volume path.\nfunc (b *cinderVolumeBuilder) SetUpAt(dir string) error {\n\t\/\/ TODO: handle failed mounts here.\n\tnotmnt, err := b.mounter.IsLikelyNotMountPoint(dir)\n\tglog.V(4).Infof(\"PersistentDisk set up: %s %v %v\", dir, !notmnt, err)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn err\n\t}\n\tif !notmnt {\n\t\treturn nil\n\t}\n\tglobalPDPath := makeGlobalPDName(b.plugin.host, b.pdName)\n\tif err := b.manager.AttachDisk(b, globalPDPath); err != nil {\n\t\treturn err\n\t}\n\n\toptions := []string{\"bind\"}\n\tif b.readOnly {\n\t\toptions = append(options, \"ro\")\n\t}\n\n\tif err := os.MkdirAll(dir, 0750); err != nil {\n\t\t\/\/ TODO: we should really eject the attach\/detach out into its own control loop.\n\t\tdetachDiskLogError(b.cinderVolume)\n\t\treturn err\n\t}\n\n\t\/\/ Perform a bind mount to the full path to allow duplicate mounts of the same PD.\n\terr = b.mounter.Mount(globalPDPath, dir, \"\", options)\n\tif err != nil {\n\t\tnotmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)\n\t\tif mntErr != nil {\n\t\t\tglog.Errorf(\"IsLikelyNotMountPoint check failed: %v\", mntErr)\n\t\t\treturn err\n\t\t}\n\t\tif !notmnt {\n\t\t\tif mntErr = b.mounter.Unmount(dir); mntErr != nil {\n\t\t\t\tglog.Errorf(\"Failed to unmount: %v\", mntErr)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tnotmnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)\n\t\t\tif mntErr != nil {\n\t\t\t\tglog.Errorf(\"IsLikelyNotMountPoint check failed: %v\", mntErr)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !notmnt {\n\t\t\t\t\/\/ This is very odd, we don't expect it. We'll try again next sync loop.\n\t\t\t\tglog.Errorf(\"%s is still mounted, despite call to unmount(). Will try again next sync loop.\", b.GetPath())\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tos.Remove(dir)\n\t\t\/\/ TODO: we should really eject the attach\/detach out into its own control loop.\n\t\tdetachDiskLogError(b.cinderVolume)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc makeGlobalPDName(host volume.VolumeHost, devName string) string {\n\treturn path.Join(host.GetPluginDir(cinderVolumePluginName), \"mounts\", devName)\n}\n\nfunc (cd *cinderVolume) GetPath() string {\n\tname := cinderVolumePluginName\n\treturn cd.plugin.host.GetPodVolumeDir(cd.podUID, util.EscapeQualifiedNameForDisk(name), cd.volName)\n}\n\ntype cinderVolumeCleaner struct {\n\t*cinderVolume\n}\n\nvar _ volume.Cleaner = &cinderVolumeCleaner{}\n\nfunc (c *cinderVolumeCleaner) TearDown() error {\n\treturn c.TearDownAt(c.GetPath())\n}\n\n\/\/ Unmounts the bind mount, and detaches the disk only if the PD\n\/\/ resource was the last reference to that disk on the kubelet.\nfunc (c *cinderVolumeCleaner) TearDownAt(dir string) error {\n\tnotmnt, err := c.mounter.IsLikelyNotMountPoint(dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif notmnt {\n\t\treturn os.Remove(dir)\n\t}\n\trefs, err := mount.GetMountRefs(c.mounter, dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := c.mounter.Unmount(dir); err != nil {\n\t\treturn err\n\t}\n\tglog.Infof(\"successfully unmounted: %s\\n\", dir)\n\n\t\/\/ If refCount is 1, then all bind mounts have been removed, and the\n\t\/\/ remaining reference is the global mount. It is safe to detach.\n\tif len(refs) == 1 {\n\t\tc.pdName = path.Base(refs[0])\n\t\tif err := c.manager.DetachDisk(c); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tnotmnt, mntErr := c.mounter.IsLikelyNotMountPoint(dir)\n\tif mntErr != nil {\n\t\tglog.Errorf(\"IsLikelyNotMountPoint check failed: %v\", mntErr)\n\t\treturn err\n\t}\n\tif !notmnt {\n\t\tif err := os.Remove(dir); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\ntype cinderVolumeDeleter struct {\n\t*cinderVolume\n}\n\nvar _ volume.Deleter = &cinderVolumeDeleter{}\n\nfunc (r *cinderVolumeDeleter) GetPath() string {\n\tname := cinderVolumePluginName\n\treturn r.plugin.host.GetPodVolumeDir(r.podUID, util.EscapeQualifiedNameForDisk(name), r.volName)\n}\n\nfunc (r *cinderVolumeDeleter) Delete() error {\n\treturn r.manager.DeleteVolume(r)\n}\n\ntype cinderVolumeProvisioner struct {\n\t*cinderVolume\n\toptions volume.VolumeOptions\n}\n\nvar _ volume.Provisioner = &cinderVolumeProvisioner{}\n\nfunc (c *cinderVolumeProvisioner) Provision(pv *api.PersistentVolume) error {\n\tvolumeID, sizeGB, err := c.manager.CreateVolume(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tpv.Spec.PersistentVolumeSource.Cinder.VolumeID = volumeID\n\tpv.Spec.Capacity = api.ResourceList{\n\t\tapi.ResourceName(api.ResourceStorage): resource.MustParse(fmt.Sprintf(\"%dGi\", sizeGB)),\n\t}\n\treturn nil\n}\n\nfunc (c *cinderVolumeProvisioner) NewPersistentVolumeTemplate() (*api.PersistentVolume, error) {\n\t\/\/ Provide dummy api.PersistentVolume.Spec, it will be filled in\n\t\/\/ cinderVolumeProvisioner.Provision()\n\treturn &api.PersistentVolume{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tGenerateName: \"pv-cinder-\",\n\t\t\tLabels: map[string]string{},\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"kubernetes.io\/createdby\": \"cinder-dynamic-provisioner\",\n\t\t\t},\n\t\t},\n\t\tSpec: api.PersistentVolumeSpec{\n\t\t\tPersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,\n\t\t\tAccessModes: c.options.AccessModes,\n\t\t\tCapacity: api.ResourceList{\n\t\t\t\tapi.ResourceName(api.ResourceStorage): c.options.Capacity,\n\t\t\t},\n\t\t\tPersistentVolumeSource: api.PersistentVolumeSource{\n\t\t\t\tCinder: &api.CinderVolumeSource{\n\t\t\t\t\tVolumeID: \"dummy\",\n\t\t\t\t\tFSType: \"ext4\",\n\t\t\t\t\tReadOnly: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}, nil\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2015 Victor Antonovich <victor@antonovich.me>\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/pflag\"\n\n\t\"bytes\"\n\t\"k8s.io\/kubernetes\/pkg\/util\"\n)\n\nconst (\n\tFLAG_STDERR_THRESH = \"stderrthreshold\"\n\tFLAG_RUN_ONCE = \"once\"\n\tFLAG_DRY_RUN = \"dry-run\"\n\tFLAG_MASTER = \"master\"\n\tFLAG_CONFIG = \"config\"\n\tFLAG_POLL_TIME = \"poll-time\"\n\tFLAG_TEMPLATE = \"template\"\n\tFLAG_HELP_MD = \"help-md\"\n)\n\nfunc newCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"kube-template\",\n\t\tLong: \"Watches Kubernetes for updates, writing output of a series of templates to files.\",\n\t\tRun: runCmd,\n\t}\n\tinitCmd(cmd)\n\treturn cmd\n}\n\nfunc initCmd(cmd *cobra.Command) {\n\t\/\/ Command-related flags set\n\tf := cmd.Flags()\n\tf.Bool(FLAG_DRY_RUN, false, \"don't write template output, dump result to stdout\")\n\tf.Bool(FLAG_RUN_ONCE, false, \"run template processing once and exit\")\n\tf.String(FLAG_MASTER, \"\", fmt.Sprintf(\"Kubernetes API server address (default is %s)\", DEFAULT_MASTER_HOST))\n\tf.DurationP(FLAG_POLL_TIME, \"p\", 15*time.Second, \"Kubernetes API server poll time\")\n\tf.StringVarP(&cfgFile, FLAG_CONFIG, \"c\", \"\", fmt.Sprintf(\"config file (default is .\/%s.(yaml|json))\", CFG_FILE))\n\tf.StringSliceP(FLAG_TEMPLATE, \"t\", nil, `adds a new template to watch on disk in the format\n\t\t'templatePath:outputPath[:command]'. This option is additive\n\t\tand may be specified multiple times for multiple templates`)\n\tf.Bool(FLAG_HELP_MD, false, \"get help in Markdown format\")\n\t\/\/ Merge glog-related flags\n\t\/\/ FIXME probably we shouldn't use k8s utils there\n\tpflag.CommandLine.AddFlagSet(f)\n\tutil.InitFlags()\n\tutil.InitLogs()\n\tdefer util.FlushLogs()\n}\n\nfunc runCmd(cmd *cobra.Command, args []string) {\n\tif f, _ := cmd.Flags().GetBool(FLAG_HELP_MD); f {\n\t\tout := new(bytes.Buffer)\n\t\tcobra.GenMarkdown(cmd, out)\n\t\tfmt.Println(out)\n\t\treturn\n\t}\n\n\tconfig, err := newConfig(cmd)\n\tif err != nil {\n\t\tglog.Fatalf(\"configuration error: %v\", err)\n\t}\n\tif len(config.TemplateDescriptors) == 0 {\n\t\tglog.Fatalf(\"no templates to process (use --help to get configuration options), exiting...\")\n\t}\n\n\t\/\/ Start application\n\tapp, err := newApp(config)\n\tif err != nil {\n\t\tglog.Fatalf(\"can't create application: %v\", err)\n\t}\n\n\tgo app.Start()\n\n\t\/\/ Listen for signals\n\tsignalCh := make(chan os.Signal, 1)\n\tsignal.Notify(signalCh,\n\t\tsyscall.SIGHUP,\n\t\tsyscall.SIGINT,\n\t\tsyscall.SIGTERM,\n\t\tsyscall.SIGQUIT,\n\t)\n\n\t\/\/ Event loop\nEventLoop:\n\tfor {\n\t\tselect {\n\t\tcase signal := <-signalCh:\n\t\t\tglog.V(2).Infof(\"received %v signal, stopping\", signal)\n\t\t\tapp.Stop()\n\t\tcase <-app.doneCh:\n\t\t\tbreak EventLoop\n\t\t}\n\t}\n}\n<commit_msg>Fixed double flags parsing<commit_after>\/\/ Copyright © 2015 Victor Antonovich <victor@antonovich.me>\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/pflag\"\n\n\t\"bytes\"\n\t\"k8s.io\/kubernetes\/pkg\/util\"\n)\n\nconst (\n\tFLAG_RUN_ONCE = \"once\"\n\tFLAG_DRY_RUN = \"dry-run\"\n\tFLAG_MASTER = \"master\"\n\tFLAG_CONFIG = \"config\"\n\tFLAG_POLL_TIME = \"poll-time\"\n\tFLAG_TEMPLATE = \"template\"\n\tFLAG_HELP_MD = \"help-md\"\n)\n\nfunc newCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"kube-template\",\n\t\tLong: \"Watches Kubernetes for updates, writing output of a series of templates to files.\",\n\t\tRun: runCmd,\n\t}\n\tinitCmd(cmd)\n\treturn cmd\n}\n\nfunc initCmd(cmd *cobra.Command) {\n\t\/\/ Command-related flags set\n\tf := cmd.Flags()\n\tf.Bool(FLAG_DRY_RUN, false, \"don't write template output, dump result to stdout\")\n\tf.Bool(FLAG_RUN_ONCE, false, \"run template processing once and exit\")\n\tf.String(FLAG_MASTER, \"\", fmt.Sprintf(\"Kubernetes API server address (default is %s)\", DEFAULT_MASTER_HOST))\n\tf.DurationP(FLAG_POLL_TIME, \"p\", 15*time.Second, \"Kubernetes API server poll time\")\n\tf.StringVarP(&cfgFile, FLAG_CONFIG, \"c\", \"\", fmt.Sprintf(\"config file (default is .\/%s.(yaml|json))\", CFG_FILE))\n\tf.StringSliceP(FLAG_TEMPLATE, \"t\", nil, `adds a new template to watch on disk in the format\n\t\t'templatePath:outputPath[:command]'. This option is additive\n\t\tand may be specified multiple times for multiple templates`)\n\tf.Bool(FLAG_HELP_MD, false, \"get help in Markdown format\")\n\t\/\/ Merge flags\n\tpflag.CommandLine.SetNormalizeFunc(func(_ *pflag.FlagSet, name string) pflag.NormalizedName {\n\t\tif strings.Contains(name, \"_\") {\n\t\t\treturn pflag.NormalizedName(strings.Replace(name, \"_\", \"-\", -1))\n\t\t}\n\t\treturn pflag.NormalizedName(name)\n\t})\n\tpflag.CommandLine.AddFlagSet(f)\n\tpflag.CommandLine.AddGoFlagSet(flag.CommandLine)\n\t\/\/ Init logs\n\t\/\/ FIXME probably we shouldn't use k8s utils there\n\tutil.InitLogs()\n\tdefer util.FlushLogs()\n}\n\nfunc runCmd(cmd *cobra.Command, _ []string) {\n\tif f, _ := cmd.Flags().GetBool(FLAG_HELP_MD); f {\n\t\tout := new(bytes.Buffer)\n\t\tcobra.GenMarkdown(cmd, out)\n\t\tfmt.Println(out)\n\t\treturn\n\t}\n\n\tconfig, err := newConfig(cmd)\n\tif err != nil {\n\t\tglog.Fatalf(\"configuration error: %v\", err)\n\t}\n\tif len(config.TemplateDescriptors) == 0 {\n\t\tglog.Fatalf(\"no templates to process (use --help to get configuration options), exiting...\")\n\t}\n\n\t\/\/ Start application\n\tapp, err := newApp(config)\n\tif err != nil {\n\t\tglog.Fatalf(\"can't create application: %v\", err)\n\t}\n\n\tgo app.Start()\n\n\t\/\/ Listen for signals\n\tsignalCh := make(chan os.Signal, 1)\n\tsignal.Notify(signalCh,\n\t\tsyscall.SIGHUP,\n\t\tsyscall.SIGINT,\n\t\tsyscall.SIGTERM,\n\t\tsyscall.SIGQUIT,\n\t)\n\n\t\/\/ Event loop\nEventLoop:\n\tfor {\n\t\tselect {\n\t\tcase signal := <-signalCh:\n\t\t\tglog.V(2).Infof(\"received %v signal, stopping\", signal)\n\t\t\tapp.Stop()\n\t\tcase <-app.doneCh:\n\t\t\tbreak EventLoop\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ radius commands\npackage main\n\nimport (\n\t\"io\"\n\t\"radiusd\/config\"\n\t\"radiusd\/model\"\n\t\"radiusd\/queue\"\n\t\"radiusd\/radius\"\n\t\"net\"\n)\n\nfunc auth(w io.Writer, req *radius.Packet) {\n\tif e := radius.ValidateAuthRequest(req); e != \"\" {\n\t\tconfig.Log.Printf(\"auth.begin e=\" + e)\n\t\treturn\n\t}\n\n\tuser := string(req.Attrs[radius.UserName].Value)\n\traw := req.Attrs[radius.UserPassword].Value\n\tpass := radius.DecryptPassword(raw, req)\n\n\tif config.Verbose {\n\t\tconfig.Log.Printf(\"auth user=%s pass=%s\", user, pass)\n\t}\n\tlimits, e := model.Auth(user, pass)\n\tif e != nil {\n\t\tconfig.Log.Printf(\"auth.begin e=\" + e.Error())\n\t\treturn\n\t}\n\n\tconns, e := model.Conns(user)\n\tif e != nil {\n\t\tconfig.Log.Printf(\"auth.begin e=\" + e.Error())\n\t\treturn\n\t}\n\tconfig.Log.Printf(\"User conns=%d, max=%d\", conns, limits.SimultaneousUse)\n\t\/*if conns >= state.SimultaneousUse {\n\t\tw.Write(radius.DefaultPacket(req, radius.AccessReject, \"Max conns reached\"))\n\t\treturn\n\t}*\/\n\n\tif limits.Ok {\n\t\treply := []radius.PubAttr{}\n\t\tif limits.DedicatedIP != nil {\n\t\t\treply = append(reply, radius.PubAttr{\n\t\t\t\tType: radius.FramedIPAddress,\n\t\t\t\tValue: net.ParseIP(*limits.DedicatedIP).To4(),\n\t\t\t})\n\t\t}\n\t\tif limits.Ratelimit != nil {\n\t\t\t\/\/ \tMT-Rate-Limit = MikrotikRateLimit\n\t\t\treply = append(reply, radius.VendorAttr{\n\t\t\t\tType: radius.VendorSpecific,\n\t\t\t\tVendorId: radius.MikrotikVendor,\n\t\t\t\tValues: []radius.VendorAttrString{radius.VendorAttrString{\n\t\t\t\t\tType: radius.MikrotikRateLimit,\n\t\t\t\t\tValue: *limits.Ratelimit,\n\t\t\t\t}},\n\t\t\t}.Encode())\n\t\t}\n\n\t\treply = append(reply, radius.PubAttr{Type: radius.PortLimit, Value: radius.EncodeFour(limits.SimultaneousUse-conns)})\n\t\tw.Write(req.Response(radius.AccountingResponse, reply))\n\t\treturn\n\t}\n\n\tw.Write(radius.DefaultPacket(req, radius.AccessReject, \"Invalid user\/pass\"))\n}\n\nfunc acctBegin(w io.Writer, req *radius.Packet) {\n\tif e := radius.ValidateAcctRequest(req); e != \"\" {\n\t\tconfig.Log.Printf(\"WARN: acct.begin err=\" + e)\n\t\treturn\n\t}\n\tif _, there := req.Attrs[radius.FramedIPAddress]; !there {\n\t\tconfig.Log.Printf(\"WARN: acct.begin missing FramedIPAddress\")\n\t\treturn\n\t}\n\n\tuser := string(req.Attrs[radius.UserName].Value)\n\tsess := string(req.Attrs[radius.AcctSessionId].Value)\n\tnasIp := radius.DecodeIP(req.Attrs[radius.NASIPAddress].Value).String()\n\tclientIp := radius.DecodeIP(req.Attrs[radius.CallingStationId].Value).String()\n\tassignedIp := radius.DecodeIP(req.Attrs[radius.FramedIPAddress].Value).String()\n\n\tif config.Verbose {\n\t\tconfig.Log.Printf(\"acct.begin sess=%s for user=%s on nasIP=%s\", sess, user, nasIp)\n\t}\n\treply := []radius.PubAttr{}\n\t_, e := model.Limits(user)\n\tif e != nil {\n\t\tif e == model.ErrNoRows {\n\t\t\tconfig.Log.Printf(\"acct.begin received invalid user=\" + user)\n\t\t\treturn\n\t\t}\n\t\tconfig.Log.Printf(\"acct.begin e=\" + e.Error())\n\t\treturn\n\t}\n\n\tif e := model.SessionAdd(sess, user, nasIp, assignedIp, clientIp); e != nil {\n\t\tconfig.Log.Printf(\"acct.begin e=%s\", e.Error())\n\t\treturn\n\t}\n\tw.Write(req.Response(radius.AccountingResponse, reply))\n}\n\nfunc acctUpdate(w io.Writer, req *radius.Packet) {\n\tif e := radius.ValidateAcctRequest(req); e != \"\" {\n\t\tconfig.Log.Printf(\"acct.update e=\" + e)\n\t\treturn\n\t}\n\n\tsess := model.Session{\n\t\tBytesIn: radius.DecodeFour(req.Attrs[radius.AcctInputOctets].Value),\n\t\tBytesOut: radius.DecodeFour(req.Attrs[radius.AcctOutputOctets].Value),\n\t\tPacketsIn: radius.DecodeFour(req.Attrs[radius.AcctInputPackets].Value),\n\t\tPacketsOut: radius.DecodeFour(req.Attrs[radius.AcctOutputPackets].Value),\n\t\tSessionID: string(req.Attrs[radius.AcctSessionId].Value),\n\t\tSessionTime: radius.DecodeFour(req.Attrs[radius.AcctSessionTime].Value),\n\t\tUser: string(req.Attrs[radius.UserName].Value),\n\t\tNasIP: radius.DecodeIP(req.Attrs[radius.NASIPAddress].Value).String(),\n\t}\n\n\tif config.Verbose {\n\t\tconfig.Log.Printf(\n\t\t\t\"acct.update sess=%s for user=%s on NasIP=%s sessTime=%d octetsIn=%d octetsOut=%d\",\n\t\t\tsess.SessionID, sess.User, sess.NasIP, sess.SessionTime, sess.BytesIn, sess.BytesOut,\n\t\t)\n\t}\n\tif e := model.SessionUpdate(sess); e != nil {\n\t\tconfig.Log.Printf(\"acct.update e=\" + e.Error())\n\t\treturn\n\t}\n\n\tqueue.Queue(sess.User, sess.BytesIn, sess.BytesOut)\n\tw.Write(radius.DefaultPacket(req, radius.AccountingResponse, \"Updated accounting.\"))\n}\n\nfunc acctStop(w io.Writer, req *radius.Packet) {\n\tif e := radius.ValidateAcctRequest(req); e != \"\" {\n\t\tconfig.Log.Printf(\"acct.stop e=\" + e)\n\t\treturn\n\t}\n\tuser := string(req.Attrs[radius.UserName].Value)\n\tsess := string(req.Attrs[radius.AcctSessionId].Value)\n\tnasIp := radius.DecodeIP(req.Attrs[radius.NASIPAddress].Value).String()\n\n\tsessTime := radius.DecodeFour(req.Attrs[radius.AcctSessionTime].Value)\n\toctIn := radius.DecodeFour(req.Attrs[radius.AcctInputOctets].Value)\n\toctOut := radius.DecodeFour(req.Attrs[radius.AcctOutputOctets].Value)\n\n\tif config.Verbose {\n\t\tconfig.Log.Printf(\n\t\t\t\"acct.stop sess=%s for user=%s sessTime=%d octetsIn=%d octetsOut=%d\",\n\t\t\tsess, user, sessTime, octIn, octOut,\n\t\t)\n\t}\n\tif e := model.SessionLog(sess, user, nasIp); e != nil {\n\t\tconfig.Log.Printf(\"acct.update e=\" + e.Error())\n\t\treturn\n\t}\n\tif e := model.SessionRemove(sess, user, nasIp); e != nil {\n\t\tconfig.Log.Printf(\"acct.update e=\" + e.Error())\n\t\treturn\n\t}\n\n\tqueue.Queue(user, octIn, octOut)\n\tw.Write(radius.DefaultPacket(req, radius.AccountingResponse, \"Finished accounting.\"))\n}\n<commit_msg>Cleanup. Remove unneeded logmsg<commit_after>\/\/ radius commands\npackage main\n\nimport (\n\t\"io\"\n\t\"radiusd\/config\"\n\t\"radiusd\/model\"\n\t\"radiusd\/queue\"\n\t\"radiusd\/radius\"\n\t\"net\"\n)\n\nfunc auth(w io.Writer, req *radius.Packet) {\n\tif e := radius.ValidateAuthRequest(req); e != \"\" {\n\t\tconfig.Log.Printf(\"auth.begin e=\" + e)\n\t\treturn\n\t}\n\n\tuser := string(req.Attrs[radius.UserName].Value)\n\traw := req.Attrs[radius.UserPassword].Value\n\tpass := radius.DecryptPassword(raw, req)\n\n\tif config.Verbose {\n\t\tconfig.Log.Printf(\"auth user=%s pass=%s\", user, pass)\n\t}\n\tlimits, e := model.Auth(user, pass)\n\tif e != nil {\n\t\tconfig.Log.Printf(\"auth.begin e=\" + e.Error())\n\t\treturn\n\t}\n\n\tconns, e := model.Conns(user)\n\tif e != nil {\n\t\tconfig.Log.Printf(\"auth.begin e=\" + e.Error())\n\t\treturn\n\t}\n\t\/*if conns >= state.SimultaneousUse {\n\t\tw.Write(radius.DefaultPacket(req, radius.AccessReject, \"Max conns reached\"))\n\t\treturn\n\t}*\/\n\n\tif limits.Ok {\n\t\treply := []radius.PubAttr{}\n\t\tif limits.DedicatedIP != nil {\n\t\t\treply = append(reply, radius.PubAttr{\n\t\t\t\tType: radius.FramedIPAddress,\n\t\t\t\tValue: net.ParseIP(*limits.DedicatedIP).To4(),\n\t\t\t})\n\t\t}\n\t\tif limits.Ratelimit != nil {\n\t\t\t\/\/ \tMT-Rate-Limit = MikrotikRateLimit\n\t\t\treply = append(reply, radius.VendorAttr{\n\t\t\t\tType: radius.VendorSpecific,\n\t\t\t\tVendorId: radius.MikrotikVendor,\n\t\t\t\tValues: []radius.VendorAttrString{radius.VendorAttrString{\n\t\t\t\t\tType: radius.MikrotikRateLimit,\n\t\t\t\t\tValue: *limits.Ratelimit,\n\t\t\t\t}},\n\t\t\t}.Encode())\n\t\t}\n\n\t\treply = append(reply, radius.PubAttr{Type: radius.PortLimit, Value: radius.EncodeFour(limits.SimultaneousUse-conns)})\n\t\tw.Write(req.Response(radius.AccountingResponse, reply))\n\t\treturn\n\t}\n\n\tw.Write(radius.DefaultPacket(req, radius.AccessReject, \"Invalid user\/pass\"))\n}\n\nfunc acctBegin(w io.Writer, req *radius.Packet) {\n\tif e := radius.ValidateAcctRequest(req); e != \"\" {\n\t\tconfig.Log.Printf(\"WARN: acct.begin err=\" + e)\n\t\treturn\n\t}\n\tif _, there := req.Attrs[radius.FramedIPAddress]; !there {\n\t\tconfig.Log.Printf(\"WARN: acct.begin missing FramedIPAddress\")\n\t\treturn\n\t}\n\n\tuser := string(req.Attrs[radius.UserName].Value)\n\tsess := string(req.Attrs[radius.AcctSessionId].Value)\n\tnasIp := radius.DecodeIP(req.Attrs[radius.NASIPAddress].Value).String()\n\tclientIp := radius.DecodeIP(req.Attrs[radius.CallingStationId].Value).String()\n\tassignedIp := radius.DecodeIP(req.Attrs[radius.FramedIPAddress].Value).String()\n\n\tif config.Verbose {\n\t\tconfig.Log.Printf(\"acct.begin sess=%s for user=%s on nasIP=%s\", sess, user, nasIp)\n\t}\n\treply := []radius.PubAttr{}\n\t_, e := model.Limits(user)\n\tif e != nil {\n\t\tif e == model.ErrNoRows {\n\t\t\tconfig.Log.Printf(\"acct.begin received invalid user=\" + user)\n\t\t\treturn\n\t\t}\n\t\tconfig.Log.Printf(\"acct.begin e=\" + e.Error())\n\t\treturn\n\t}\n\n\tif e := model.SessionAdd(sess, user, nasIp, assignedIp, clientIp); e != nil {\n\t\tconfig.Log.Printf(\"acct.begin e=%s\", e.Error())\n\t\treturn\n\t}\n\tw.Write(req.Response(radius.AccountingResponse, reply))\n}\n\nfunc acctUpdate(w io.Writer, req *radius.Packet) {\n\tif e := radius.ValidateAcctRequest(req); e != \"\" {\n\t\tconfig.Log.Printf(\"acct.update e=\" + e)\n\t\treturn\n\t}\n\n\tsess := model.Session{\n\t\tBytesIn: radius.DecodeFour(req.Attrs[radius.AcctInputOctets].Value),\n\t\tBytesOut: radius.DecodeFour(req.Attrs[radius.AcctOutputOctets].Value),\n\t\tPacketsIn: radius.DecodeFour(req.Attrs[radius.AcctInputPackets].Value),\n\t\tPacketsOut: radius.DecodeFour(req.Attrs[radius.AcctOutputPackets].Value),\n\t\tSessionID: string(req.Attrs[radius.AcctSessionId].Value),\n\t\tSessionTime: radius.DecodeFour(req.Attrs[radius.AcctSessionTime].Value),\n\t\tUser: string(req.Attrs[radius.UserName].Value),\n\t\tNasIP: radius.DecodeIP(req.Attrs[radius.NASIPAddress].Value).String(),\n\t}\n\n\tif config.Verbose {\n\t\tconfig.Log.Printf(\n\t\t\t\"acct.update sess=%s for user=%s on NasIP=%s sessTime=%d octetsIn=%d octetsOut=%d\",\n\t\t\tsess.SessionID, sess.User, sess.NasIP, sess.SessionTime, sess.BytesIn, sess.BytesOut,\n\t\t)\n\t}\n\tif e := model.SessionUpdate(sess); e != nil {\n\t\tconfig.Log.Printf(\"acct.update e=\" + e.Error())\n\t\treturn\n\t}\n\n\tqueue.Queue(sess.User, sess.BytesIn, sess.BytesOut)\n\tw.Write(radius.DefaultPacket(req, radius.AccountingResponse, \"Updated accounting.\"))\n}\n\nfunc acctStop(w io.Writer, req *radius.Packet) {\n\tif e := radius.ValidateAcctRequest(req); e != \"\" {\n\t\tconfig.Log.Printf(\"acct.stop e=\" + e)\n\t\treturn\n\t}\n\tuser := string(req.Attrs[radius.UserName].Value)\n\tsess := string(req.Attrs[radius.AcctSessionId].Value)\n\tnasIp := radius.DecodeIP(req.Attrs[radius.NASIPAddress].Value).String()\n\n\tsessTime := radius.DecodeFour(req.Attrs[radius.AcctSessionTime].Value)\n\toctIn := radius.DecodeFour(req.Attrs[radius.AcctInputOctets].Value)\n\toctOut := radius.DecodeFour(req.Attrs[radius.AcctOutputOctets].Value)\n\n\tif config.Verbose {\n\t\tconfig.Log.Printf(\n\t\t\t\"acct.stop sess=%s for user=%s sessTime=%d octetsIn=%d octetsOut=%d\",\n\t\t\tsess, user, sessTime, octIn, octOut,\n\t\t)\n\t}\n\tif e := model.SessionLog(sess, user, nasIp); e != nil {\n\t\tconfig.Log.Printf(\"acct.update e=\" + e.Error())\n\t\treturn\n\t}\n\tif e := model.SessionRemove(sess, user, nasIp); e != nil {\n\t\tconfig.Log.Printf(\"acct.update e=\" + e.Error())\n\t\treturn\n\t}\n\n\tqueue.Queue(user, octIn, octOut)\n\tw.Write(radius.DefaultPacket(req, radius.AccountingResponse, \"Finished accounting.\"))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"os\"\n)\n\nfunc getRequestedStorage(c *cli.Context) *Storage {\n\tstorage, err := NewStorage(os.ExpandEnv(c.GlobalString(\"ctxfile\")))\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn storage\n}\n\nfunc stopContext(c *cli.Context) {\n\tstorage := getRequestedStorage(c)\n\n\tcurrentContext := storage.GetCurrentContext()\n\n\tif currentContext == nil {\n\t\tfmt.Println(\"No current context. Start a context first!\")\n\t\treturn\n\t}\n\n\tcurrentContext.Stop()\n\tstorage.Save()\n}\n\nfunc switchContext(c *cli.Context) {\n\tif len(c.Args()) != 1 {\n\t\tfmt.Println(\"You must provide the id of the context\")\n\t\treturn\n\t}\n\n\tcontextId := c.Args()[0]\n\tstorage := getRequestedStorage(c)\n\n\terr := storage.SwitchContext(contextId)\n\tif err != nil {\n\t\tfmt.Printf(\"%s\", err)\n\t\treturn\n\t}\n\n\tfmt.Printf(\"You're working on %s\", contextId)\n}\n\nfunc info(c *cli.Context) {\n\tstorage := getRequestedStorage(c)\n\tcontext := storage.GetCurrentContext()\n\tfmt.Printf(\"%s\\t%s\", context.Id, context.GetTotalDuration().String())\n}\n\nfunc list(c *cli.Context) {\n\tstorage := getRequestedStorage(c)\n\tfor _, contextId := range storage.GetContextIds() {\n\t\tfmt.Println(contextId)\n\t}\n}\n<commit_msg>Add duration formatting<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/codegangsta\/cli\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n)\n\nfunc getRequestedStorage(c *cli.Context) *Storage {\n\tstorage, err := NewStorage(os.ExpandEnv(c.GlobalString(\"ctxfile\")))\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn storage\n}\n\nfunc stopContext(c *cli.Context) {\n\tstorage := getRequestedStorage(c)\n\n\tcurrentContext := storage.GetCurrentContext()\n\n\tif currentContext == nil {\n\t\tfmt.Println(\"No current context. Start a context first!\")\n\t\treturn\n\t}\n\n\tcurrentContext.Stop()\n\tstorage.Save()\n}\n\nfunc switchContext(c *cli.Context) {\n\tif len(c.Args()) != 1 {\n\t\tfmt.Println(\"You must provide the id of the context\")\n\t\treturn\n\t}\n\n\tcontextId := c.Args()[0]\n\tstorage := getRequestedStorage(c)\n\n\terr := storage.SwitchContext(contextId)\n\tif err != nil {\n\t\tfmt.Printf(\"%s\", err)\n\t\treturn\n\t}\n\n\tfmt.Printf(\"You're working on %s\", contextId)\n}\n\nfunc fmtDuration(duration time.Duration) string {\n\treturn fmt.Sprintf(\"%sm\", strconv.FormatInt(int64(duration\/time.Minute), 10))\n}\n\nfunc info(c *cli.Context) {\n\tstorage := getRequestedStorage(c)\n\tcontext := storage.GetCurrentContext()\n\tfmt.Printf(\"%s %s\", context.Id, fmtDuration(context.GetTotalDuration()))\n}\n\nfunc list(c *cli.Context) {\n\tstorage := getRequestedStorage(c)\n\tfor _, contextId := range storage.GetContextIds() {\n\t\tfmt.Println(contextId)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package container\n\nimport (\n\t\"github.com\/zenoss\/glog\"\n\t\"github.com\/zenoss\/serviced\"\n\t\"github.com\/zenoss\/serviced\/commons\/subprocess\"\n\t\"github.com\/zenoss\/serviced\/dao\"\n\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sort\"\n\t\"strconv\"\n\t\"syscall\"\n\t\"time\"\n)\n\nvar (\n\tErrInvalidCommand = errors.New(\"container: invalid command\")\n\tErrInvalidEndpoint = errors.New(\"container: invalid endpoint\")\n\tErrInvalidTenantID = errors.New(\"container: invalid tenant id\")\n\tErrInvalidServicedID = errors.New(\"container: invalid serviced id\")\n)\n\n\/\/ ControllerOptions are options to be run when starting a new proxy server\ntype ControllerOptions struct {\n\tServicedEndpoint string\n\tService struct {\n\t\tID string \/\/ The uuid of the service to launch\n\t\tTenantID string \/\/ The tentant ID of the service\n\t\tAutorestart bool \/\/ Controller will restart the service if it exits\n\t\tCommand []string \/\/ The command to launch\n\t}\n\tMux struct { \/\/ TCPMUX configuration: RFC 1078\n\t\tEnabled bool \/\/ True if muxing is used\n\t\tPort int \/\/ the TCP port to use\n\t\tTLS bool \/\/ True if TLS is used\n\t\tKeyPEMFile string \/\/ Path to the key file when TLS is used\n\t\tCertPEMFile string \/\/ Path to the cert file when TLS is used\n\t}\n\tLogforwarder struct { \/\/ Logforwarder configuration\n\t\tEnabled bool \/\/ True if enabled\n\t\tPath string \/\/ Path to the logforwarder program\n\t\tConfigFile string \/\/\n\t}\n\tMetric struct {\n\t\tAddress string \/\/ TCP port to host the metric service, :22350\n\t\tRemoteEndoint string \/\/ The url to forward metric queries\n\t}\n}\n\n\/\/ Controller is a object to manage the operations withing a container. For example,\n\/\/ it creates the managed service instance, logstash forwarding, port forwarding, etc.\ntype Controller struct {\n\toptions ControllerOptions\n\tmetricForwarder *MetricForwarder\n\tlogforwarder *subprocess.Instance\n\tlogforwarderExited chan error\n\tclosing chan chan error\n}\n\ntype Closer interface {\n\tClose() error\n}\n\nfunc (c *Controller) Close() error {\n\treturn nil\n}\n\n\/\/ NewController\nfunc NewController(options ControllerOptions) (*Controller, error) {\n\tc := &Controller{\n\t\toptions: options,\n\t}\n\tc.closing = make(chan chan error)\n\n\tif len(options.ServicedEndpoint) <= 0 {\n\t\treturn nil, ErrInvalidEndpoint\n\t}\n\n\tif options.Logforwarder.Enabled {\n\t\t\/\/ make sure we pick up any logfile that was modified within the\n\t\t\/\/ last three years\n\t\t\/\/ TODO: Either expose the 3 years a configurable or get rid of it\n\t\tlogforwarder, exited, err := subprocess.New(time.Second,\n\t\t\toptions.Logforwarder.Path,\n\t\t\t\"-old-files-hours=26280\",\n\t\t\t\"-config\", options.Logforwarder.ConfigFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tc.logforwarder = logforwarder\n\t\tc.logforwarderExited = exited\n\t}\n\n\t\/\/build metric redirect url -- assumes 8444 is port mapped\n\tmetric_redirect := options.Metric.RemoteEndoint\n\tif len(metric_redirect) == 0 {\n\t\tglog.V(1).Infof(\"container.Controller does not have metric forwarding\")\n\t} else {\n\t\tif len(options.Service.TenantID) == 0 {\n\t\t\treturn nil, ErrInvalidTenantID\n\t\t}\n\t\tif len(options.Service.ID) > 0 {\n\t\t\treturn nil, ErrInvalidServicedID\n\t\t}\n\t\tmetric_redirect += \"&controlplane_service_id=\" + options.Service.ID\n\t\tmetric_redirect += \"?controlplane_tenant_id=\" + options.Service.TenantID\n\t\t\/\/build and serve the container metric forwarder\n\t\tforwarder, err := NewMetricForwarder(options.Metric.Address, metric_redirect)\n\t\tif err != nil {\n\t\t\treturn c, err\n\t\t}\n\t\tc.metricForwarder = forwarder\n\t}\n\n\tglog.Infof(\"command: %v [%d]\", options.Service.Command, len(options.Service.Command))\n\tif len(options.Service.Command) < 1 {\n\t\tglog.Errorf(\"Invalid commandif \")\n\t\treturn c, ErrInvalidCommand\n\t}\n\n\treturn c, nil\n}\n\nfunc (c *Controller) Run() (err error) {\n\n\tsigc := make(chan os.Signal, 1)\n\tsignal.Notify(sigc,\n\t\tsyscall.SIGINT,\n\t\tsyscall.SIGTERM,\n\t\tsyscall.SIGQUIT)\n\n\targs := []string{}\n\tif len(c.options.Service.Command) > 1 {\n\t\targs = c.options.Service.Command[1:]\n\t}\n\n\tservice, serviceExited, _ := subprocess.New(time.Second*10, c.options.Service.Command[0], args...)\n\n\tfor {\n\t\tselect {\n\t\tcase sig := <-sigc:\n\t\t\tswitch sig {\n\t\t\tcase syscall.SIGTERM:\n\t\t\t\tc.options.Service.Autorestart = false\n\t\t\tcase syscall.SIGQUIT:\n\t\t\t\tc.options.Service.Autorestart = false\n\t\t\tcase syscall.SIGINT:\n\t\t\t\tc.options.Service.Autorestart = false\n\t\t\t}\n\t\t\tglog.Infof(\"notifying subprocess of signal %v\", sig)\n\t\t\tservice.Notify(sig)\n\t\t\tselect {\n\t\t\tcase <-serviceExited:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\n\t\tcase <-time.After(time.Second * 10):\n\t\t\tc.handleRemotePorts()\n\n\t\tcase <-serviceExited:\n\t\t\tif !c.options.Service.Autorestart {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tglog.Infof(\"restarting service process\")\n\t\t\tservice, serviceExited, _ = subprocess.New(time.Second*10, c.options.Service.Command[0], args...)\n\n\t\t}\n\t}\n\treturn\n}\n\nfunc (c *Controller) handleRemotePorts() {\n\tclient, err := serviced.NewLBClient(c.options.ServicedEndpoint)\n\tif err != nil {\n\t\tglog.Errorf(\"Could not create a client to endpoint: %s, %s\", c.options.ServicedEndpoint, err)\n\t\treturn\n\t}\n\tdefer client.Close()\n\n\tvar endpoints map[string][]*dao.ApplicationEndpoint\n\terr = client.GetServiceEndpoints(c.options.Service.ID, &endpoints)\n\tif err != nil {\n\t\tglog.Errorf(\"Error getting application endpoints for service %s: %s\", c.options.Service.ID, err)\n\t\treturn\n\t}\n\n\tfor key, endpointList := range endpoints {\n\t\tif len(endpointList) <= 0 {\n\t\t\tif proxy, ok := proxies[key]; ok {\n\t\t\t\temptyAddressList := make([]string, 0)\n\t\t\t\tproxy.SetNewAddresses(emptyAddressList)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\taddresses := make([]string, len(endpointList))\n\t\tfor i, endpoint := range endpointList {\n\t\t\tglog.Infof(\"endpoints: %s, %v\", key, *endpoint)\n\t\t\taddresses[i] = fmt.Sprintf(\"%s:%d\", endpoint.HostIp, endpoint.HostPort)\n\t\t}\n\t\tsort.Strings(addresses)\n\n\t\tvar (\n\t\t\tproxy *serviced.Proxy\n\t\t\tok bool\n\t\t)\n\n\t\tif proxy, ok = proxies[key]; !ok {\n\t\t\tglog.Infof(\"Attempting port map for: %s -> %+v\", key, *endpointList[0])\n\n\t\t\t\/\/ setup a new proxy\n\t\t\tlistener, err := net.Listen(\"tcp4\", fmt.Sprintf(\":%d\", endpointList[0].ContainerPort))\n\t\t\tif err != nil {\n\t\t\t\tglog.Errorf(\"Could not bind to port: %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tproxy, err = serviced.NewProxy(\n\t\t\t\tfmt.Sprintf(\"%v\", endpointList[0]),\n\t\t\t\tuint16(c.options.Mux.Port),\n\t\t\t\tc.options.Mux.TLS,\n\t\t\t\tlistener)\n\t\t\tif err != nil {\n\t\t\t\tglog.Errorf(\"Could not build proxy %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tglog.Infof(\"Success binding port: %s -> %+v\", key, proxy)\n\t\t\tproxies[key] = proxy\n\n\t\t\tif ep := endpointList[0]; ep.VirtualAddress != \"\" {\n\t\t\t\tp := strconv.FormatUint(uint64(ep.ContainerPort), 10)\n\t\t\t\terr := vifs.RegisterVirtualAddress(ep.VirtualAddress, p, ep.Protocol)\n\t\t\t\tif err != nil {\n\t\t\t\t\tglog.Errorf(\"Error creating virtual address: %+v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tproxy.SetNewAddresses(addresses)\n\t}\n\n}\n\nvar (\n\tproxies map[string]*serviced.Proxy\n\tvifs *VIFRegistry\n\tnextip int\n)\n\nfunc init() {\n\tproxies = make(map[string]*serviced.Proxy)\n\tvifs = NewVIFRegistry()\n\tnextip = 1\n}\n<commit_msg>use exec to launch commands<commit_after>package container\n\nimport (\n\t\"github.com\/zenoss\/glog\"\n\t\"github.com\/zenoss\/serviced\"\n\t\"github.com\/zenoss\/serviced\/commons\/subprocess\"\n\t\"github.com\/zenoss\/serviced\/dao\"\n\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n)\n\nvar (\n\tErrInvalidCommand = errors.New(\"container: invalid command\")\n\tErrInvalidEndpoint = errors.New(\"container: invalid endpoint\")\n\tErrInvalidTenantID = errors.New(\"container: invalid tenant id\")\n\tErrInvalidServicedID = errors.New(\"container: invalid serviced id\")\n)\n\n\/\/ ControllerOptions are options to be run when starting a new proxy server\ntype ControllerOptions struct {\n\tServicedEndpoint string\n\tService struct {\n\t\tID string \/\/ The uuid of the service to launch\n\t\tTenantID string \/\/ The tentant ID of the service\n\t\tAutorestart bool \/\/ Controller will restart the service if it exits\n\t\tCommand []string \/\/ The command to launch\n\t}\n\tMux struct { \/\/ TCPMUX configuration: RFC 1078\n\t\tEnabled bool \/\/ True if muxing is used\n\t\tPort int \/\/ the TCP port to use\n\t\tTLS bool \/\/ True if TLS is used\n\t\tKeyPEMFile string \/\/ Path to the key file when TLS is used\n\t\tCertPEMFile string \/\/ Path to the cert file when TLS is used\n\t}\n\tLogforwarder struct { \/\/ Logforwarder configuration\n\t\tEnabled bool \/\/ True if enabled\n\t\tPath string \/\/ Path to the logforwarder program\n\t\tConfigFile string \/\/\n\t}\n\tMetric struct {\n\t\tAddress string \/\/ TCP port to host the metric service, :22350\n\t\tRemoteEndoint string \/\/ The url to forward metric queries\n\t}\n}\n\n\/\/ Controller is a object to manage the operations withing a container. For example,\n\/\/ it creates the managed service instance, logstash forwarding, port forwarding, etc.\ntype Controller struct {\n\toptions ControllerOptions\n\tmetricForwarder *MetricForwarder\n\tlogforwarder *subprocess.Instance\n\tlogforwarderExited chan error\n\tclosing chan chan error\n}\n\ntype Closer interface {\n\tClose() error\n}\n\nfunc (c *Controller) Close() error {\n\treturn nil\n}\n\n\/\/ NewController\nfunc NewController(options ControllerOptions) (*Controller, error) {\n\tc := &Controller{\n\t\toptions: options,\n\t}\n\tc.closing = make(chan chan error)\n\n\tif len(options.ServicedEndpoint) <= 0 {\n\t\treturn nil, ErrInvalidEndpoint\n\t}\n\n\tif options.Logforwarder.Enabled {\n\t\t\/\/ make sure we pick up any logfile that was modified within the\n\t\t\/\/ last three years\n\t\t\/\/ TODO: Either expose the 3 years a configurable or get rid of it\n\t\tlogforwarder, exited, err := subprocess.New(time.Second,\n\t\t\toptions.Logforwarder.Path,\n\t\t\t\"-old-files-hours=26280\",\n\t\t\t\"-config\", options.Logforwarder.ConfigFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tc.logforwarder = logforwarder\n\t\tc.logforwarderExited = exited\n\t}\n\n\t\/\/build metric redirect url -- assumes 8444 is port mapped\n\tmetric_redirect := options.Metric.RemoteEndoint\n\tif len(metric_redirect) == 0 {\n\t\tglog.V(1).Infof(\"container.Controller does not have metric forwarding\")\n\t} else {\n\t\tif len(options.Service.TenantID) == 0 {\n\t\t\treturn nil, ErrInvalidTenantID\n\t\t}\n\t\tif len(options.Service.ID) > 0 {\n\t\t\treturn nil, ErrInvalidServicedID\n\t\t}\n\t\tmetric_redirect += \"&controlplane_service_id=\" + options.Service.ID\n\t\tmetric_redirect += \"?controlplane_tenant_id=\" + options.Service.TenantID\n\t\t\/\/build and serve the container metric forwarder\n\t\tforwarder, err := NewMetricForwarder(options.Metric.Address, metric_redirect)\n\t\tif err != nil {\n\t\t\treturn c, err\n\t\t}\n\t\tc.metricForwarder = forwarder\n\t}\n\n\tglog.Infof(\"command: %v [%d]\", options.Service.Command, len(options.Service.Command))\n\tif len(options.Service.Command) < 1 {\n\t\tglog.Errorf(\"Invalid commandif \")\n\t\treturn c, ErrInvalidCommand\n\t}\n\n\treturn c, nil\n}\n\nfunc (c *Controller) Run() (err error) {\n\n\tsigc := make(chan os.Signal, 1)\n\tsignal.Notify(sigc,\n\t\tsyscall.SIGINT,\n\t\tsyscall.SIGTERM,\n\t\tsyscall.SIGQUIT)\n\n\targs := []string{\"-c\", \"exec \" + strings.Join(c.options.Service.Command, \" \")}\n\n\tservice, serviceExited, _ := subprocess.New(time.Second*10, \"\/bin\/sh\", args...)\n\n\tvar restartAfter <-chan time.Time\n\tfor {\n\t\tselect {\n\t\tcase sig := <-sigc:\n\t\t\tswitch sig {\n\t\t\tcase syscall.SIGTERM:\n\t\t\t\tc.options.Service.Autorestart = false\n\t\t\tcase syscall.SIGQUIT:\n\t\t\t\tc.options.Service.Autorestart = false\n\t\t\tcase syscall.SIGINT:\n\t\t\t\tc.options.Service.Autorestart = false\n\t\t\t}\n\t\t\tglog.Infof(\"notifying subprocess of signal %v\", sig)\n\t\t\tservice.Notify(sig)\n\t\t\tselect {\n\t\t\tcase <-serviceExited:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\n\t\tcase <-time.After(time.Second * 10):\n\t\t\tc.handleRemotePorts()\n\n\t\tcase <-serviceExited:\n\t\t\tif !c.options.Service.Autorestart {\n\t\t\t\treturn\n\t\t\t}\n\t\t\trestartAfter = time.After(time.Second * 10)\n\n\t\tcase <-restartAfter:\n\t\t\tif !c.options.Service.Autorestart {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tglog.Infof(\"restarting service process\")\n\t\t\tservice, serviceExited, _ = subprocess.New(time.Second*10, c.options.Service.Command[0], args...)\n\t\t\trestartAfter = nil\n\n\t\t}\n\t}\n\treturn\n}\n\nfunc (c *Controller) handleRemotePorts() {\n\tclient, err := serviced.NewLBClient(c.options.ServicedEndpoint)\n\tif err != nil {\n\t\tglog.Errorf(\"Could not create a client to endpoint: %s, %s\", c.options.ServicedEndpoint, err)\n\t\treturn\n\t}\n\tdefer client.Close()\n\n\tvar endpoints map[string][]*dao.ApplicationEndpoint\n\terr = client.GetServiceEndpoints(c.options.Service.ID, &endpoints)\n\tif err != nil {\n\t\tglog.Errorf(\"Error getting application endpoints for service %s: %s\", c.options.Service.ID, err)\n\t\treturn\n\t}\n\n\tfor key, endpointList := range endpoints {\n\t\tif len(endpointList) <= 0 {\n\t\t\tif proxy, ok := proxies[key]; ok {\n\t\t\t\temptyAddressList := make([]string, 0)\n\t\t\t\tproxy.SetNewAddresses(emptyAddressList)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\taddresses := make([]string, len(endpointList))\n\t\tfor i, endpoint := range endpointList {\n\t\t\tglog.Infof(\"endpoints: %s, %v\", key, *endpoint)\n\t\t\taddresses[i] = fmt.Sprintf(\"%s:%d\", endpoint.HostIp, endpoint.HostPort)\n\t\t}\n\t\tsort.Strings(addresses)\n\n\t\tvar (\n\t\t\tproxy *serviced.Proxy\n\t\t\tok bool\n\t\t)\n\n\t\tif proxy, ok = proxies[key]; !ok {\n\t\t\tglog.Infof(\"Attempting port map for: %s -> %+v\", key, *endpointList[0])\n\n\t\t\t\/\/ setup a new proxy\n\t\t\tlistener, err := net.Listen(\"tcp4\", fmt.Sprintf(\":%d\", endpointList[0].ContainerPort))\n\t\t\tif err != nil {\n\t\t\t\tglog.Errorf(\"Could not bind to port: %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tproxy, err = serviced.NewProxy(\n\t\t\t\tfmt.Sprintf(\"%v\", endpointList[0]),\n\t\t\t\tuint16(c.options.Mux.Port),\n\t\t\t\tc.options.Mux.TLS,\n\t\t\t\tlistener)\n\t\t\tif err != nil {\n\t\t\t\tglog.Errorf(\"Could not build proxy %s\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tglog.Infof(\"Success binding port: %s -> %+v\", key, proxy)\n\t\t\tproxies[key] = proxy\n\n\t\t\tif ep := endpointList[0]; ep.VirtualAddress != \"\" {\n\t\t\t\tp := strconv.FormatUint(uint64(ep.ContainerPort), 10)\n\t\t\t\terr := vifs.RegisterVirtualAddress(ep.VirtualAddress, p, ep.Protocol)\n\t\t\t\tif err != nil {\n\t\t\t\t\tglog.Errorf(\"Error creating virtual address: %+v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tproxy.SetNewAddresses(addresses)\n\t}\n\n}\n\nvar (\n\tproxies map[string]*serviced.Proxy\n\tvifs *VIFRegistry\n\tnextip int\n)\n\nfunc init() {\n\tproxies = make(map[string]*serviced.Proxy)\n\tvifs = NewVIFRegistry()\n\tnextip = 1\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\n\tmicrobadger \"github.com\/microscaling\/microbadger\/api\"\n\n\t\"github.com\/microscaling\/microscaling\/api\"\n\t\"github.com\/microscaling\/microscaling\/demand\"\n\t\"github.com\/microscaling\/microscaling\/utils\"\n)\n\n\/\/ LabelConfig is used when we retrieve get image config from the Microscaling server and then\n\/\/ get label config from MicroBadger APIs\ntype LabelConfig struct {\n\tAPIAddress string\n\tKubeConfig string\n\tKubeNamespace string\n}\n\n\/\/ compile-time assert that we implement the right interface\nvar _ Config = (*LabelConfig)(nil)\n\n\/\/ NewLabelConfig gets a new LabelConfig\nfunc NewLabelConfig(APIAddress string) *LabelConfig {\n\treturn &LabelConfig{\n\t\tAPIAddress: APIAddress,\n\t}\n}\n\n\/\/ NewKubeLabelConfig gets a new LabelConfig for Kubernetes\nfunc NewKubeLabelConfig(APIAddress string, KubeConfig string, KubeNamespace string) *LabelConfig {\n\treturn &LabelConfig{\n\t\tAPIAddress: APIAddress,\n\t\tKubeConfig: KubeConfig,\n\t\tKubeNamespace: KubeNamespace,\n\t}\n}\n\n\/\/ GetApps retrieves task config from the server using the API, and then gets scaling parameters from labels using MicroBadger\nfunc (l *LabelConfig) GetApps(userID string) (tasks []*demand.Task, maxContainers int, err error) {\n\ttasks, maxContainers, err = api.GetApps(l.APIAddress, userID)\n\tfor _, task := range tasks {\n\t\tif l.KubeNamespace != \"\" {\n\t\t\ttask.Image, err = l.getImageFromKubeDeployment(task.Name)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"Failed to get image for deployment %s: %v\", task.Image, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tlabels, err := microbadger.GetLabels(task.Image)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to get labels for %s: %v\", task.Image, err)\n\t\t} else {\n\t\t\tparseLabels(task, labels)\n\t\t}\n\t}\n\treturn\n}\n\nfunc parseLabels(task *demand.Task, labels map[string]string) {\n\t\/\/ Make sure there's a lower-case version of all labels (don't overwrite a\n\t\/\/ lower-case one if it's already there)\n\tfor k, v := range labels {\n\t\tkl := strings.ToLower(k)\n\t\tif kl != k {\n\t\t\tif _, ok := labels[kl]; !ok {\n\t\t\t\tlabels[kl] = v\n\t\t\t}\n\t\t}\n\t}\n\n\tif isScalable, ok := labels[\"com.microscaling.is-scalable\"]; ok {\n\t\tif b, err := strconv.ParseBool(isScalable); err == nil {\n\t\t\ttask.IsScalable = b\n\t\t}\n\t}\n\n\tv, err := parseIntLabel(labels, \"com.microscaling.priority\")\n\tif err == nil {\n\t\ttask.Priority = v\n\t}\n\n\tv, err = parseIntLabel(labels, \"com.microscaling.max-delta\")\n\tif err == nil {\n\t\ttask.MaxDelta = v\n\t}\n\n\tv, err = parseIntLabel(labels, \"com.microscaling.min-containers\")\n\tif err == nil {\n\t\ttask.MinContainers = v\n\t}\n\n\tv, err = parseIntLabel(labels, \"com.microscaling.max-containers\")\n\tif err == nil {\n\t\ttask.MaxContainers = v\n\t}\n}\n\nfunc parseIntLabel(labels map[string]string, key string) (intVal int, err error) {\n\tif val, ok := labels[key]; ok {\n\t\tintVal, err = strconv.Atoi(val)\n\t}\n\n\tif err != nil {\n\t\tlog.Infof(\"Ignoring bad value for label %s\", key)\n\t}\n\treturn\n}\n\nfunc (l *LabelConfig) getImageFromKubeDeployment(appName string) (imageName string, err error) {\n\tclientset, err := utils.NewKubeClientset(l.KubeConfig, l.KubeNamespace)\n\tif err != nil {\n\t\tlog.Errorf(\"Error creating Kubernetes clientset: %v\", err)\n\t\treturn\n\t}\n\n\td, err := clientset.Extensions().Deployments(l.KubeNamespace).Get(appName)\n\tif err != nil {\n\t\tlog.Errorf(\"Error getting deployment %s: %v\", appName, err)\n\t\treturn\n\t}\n\n\tpodSpec := d.Spec.Template.Spec\n\tcontainers := len(podSpec.Containers)\n\n\tif containers == 1 {\n\t\timageName = podSpec.Containers[0].Image\n\t\tlog.Debugf(\"Got image %s for deployment %s\", imageName, appName)\n\t} else {\n\t\t\/\/ TODO!! Support pods with multiple containers\n\t\treturn \"\", fmt.Errorf(\"Error expected 1 container per pod but found %d\", containers)\n\t}\n\n\treturn imageName, err\n}\n<commit_msg>Use a separate KubeLabelConfig type for getting labels when using Kubernetes<commit_after>package config\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\n\tmicrobadger \"github.com\/microscaling\/microbadger\/api\"\n\n\t\"github.com\/microscaling\/microscaling\/api\"\n\t\"github.com\/microscaling\/microscaling\/demand\"\n\t\"github.com\/microscaling\/microscaling\/utils\"\n)\n\n\/\/ LabelConfig is used when we retrieve get image config from the Microscaling server and then\n\/\/ get label config from MicroBadger APIs\ntype LabelConfig struct {\n\tAPIAddress string\n}\n\n\/\/ KubeLabelConfig is used when we retrieve get image config from the Microscaling server and then\n\/\/ get label config from MicroBadger APIs\ntype KubeLabelConfig struct {\n\tAPIAddress string\n\tKubeConfig string\n\tKubeNamespace string\n}\n\n\/\/ compile-time assert that we implement the right interface\nvar _ Config = (*LabelConfig)(nil)\n\n\/\/ NewLabelConfig gets a new LabelConfig\nfunc NewLabelConfig(APIAddress string) *LabelConfig {\n\treturn &LabelConfig{\n\t\tAPIAddress: APIAddress,\n\t}\n}\n\n\/\/ NewKubeLabelConfig gets a new KubeLabelConfig\nfunc NewKubeLabelConfig(APIAddress string, KubeConfig string, KubeNamespace string) *KubeLabelConfig {\n\treturn &KubeLabelConfig{\n\t\tAPIAddress: APIAddress,\n\t\tKubeConfig: KubeConfig,\n\t\tKubeNamespace: KubeNamespace,\n\t}\n}\n\n\/\/ GetApps retrieves task config from the server using the API, and then gets scaling parameters from labels using MicroBadger\nfunc (l *LabelConfig) GetApps(userID string) (tasks []*demand.Task, maxContainers int, err error) {\n\ttasks, maxContainers, err = api.GetApps(l.APIAddress, userID)\n\tfor _, task := range tasks {\n\t\tlabels, err := microbadger.GetLabels(task.Image)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to get labels for %s: %v\", task.Image, err)\n\t\t} else {\n\t\t\tparseLabels(task, labels)\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ GetApps retrieves task config from the server using the API, gets the Docker image from the Kubernetes deployments\n\/\/ API and then gets scaling parameters from labels using MicroBadger\nfunc (kl *KubeLabelConfig) GetApps(userID string) (tasks []*demand.Task, maxContainers int, err error) {\n\ttasks, maxContainers, err = api.GetApps(kl.APIAddress, userID)\n\tfor _, task := range tasks {\n\t\ttask.Image, err = kl.getImageFromKubeDeployment(task.Name)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to get image for deployment %s: %v\", task.Image, err)\n\t\t\treturn\n\t\t}\n\n\t\tlabels, err := microbadger.GetLabels(task.Image)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to get labels for %s: %v\", task.Image, err)\n\t\t} else {\n\t\t\tparseLabels(task, labels)\n\t\t}\n\t}\n\treturn\n}\n\nfunc parseLabels(task *demand.Task, labels map[string]string) {\n\t\/\/ Make sure there's a lower-case version of all labels (don't overwrite a\n\t\/\/ lower-case one if it's already there)\n\tfor k, v := range labels {\n\t\tkl := strings.ToLower(k)\n\t\tif kl != k {\n\t\t\tif _, ok := labels[kl]; !ok {\n\t\t\t\tlabels[kl] = v\n\t\t\t}\n\t\t}\n\t}\n\n\tif isScalable, ok := labels[\"com.microscaling.is-scalable\"]; ok {\n\t\tif b, err := strconv.ParseBool(isScalable); err == nil {\n\t\t\ttask.IsScalable = b\n\t\t}\n\t}\n\n\tv, err := parseIntLabel(labels, \"com.microscaling.priority\")\n\tif err == nil {\n\t\ttask.Priority = v\n\t}\n\n\tv, err = parseIntLabel(labels, \"com.microscaling.max-delta\")\n\tif err == nil {\n\t\ttask.MaxDelta = v\n\t}\n\n\tv, err = parseIntLabel(labels, \"com.microscaling.min-containers\")\n\tif err == nil {\n\t\ttask.MinContainers = v\n\t}\n\n\tv, err = parseIntLabel(labels, \"com.microscaling.max-containers\")\n\tif err == nil {\n\t\ttask.MaxContainers = v\n\t}\n}\n\nfunc parseIntLabel(labels map[string]string, key string) (intVal int, err error) {\n\tif val, ok := labels[key]; ok {\n\t\tintVal, err = strconv.Atoi(val)\n\t}\n\n\tif err != nil {\n\t\tlog.Infof(\"Ignoring bad value for label %s\", key)\n\t}\n\treturn\n}\n\nfunc (kl *KubeLabelConfig) getImageFromKubeDeployment(appName string) (imageName string, err error) {\n\tclientset, err := utils.NewKubeClientset(kl.KubeConfig, kl.KubeNamespace)\n\tif err != nil {\n\t\tlog.Errorf(\"Error creating Kubernetes clientset: %v\", err)\n\t\treturn\n\t}\n\n\td, err := clientset.Extensions().Deployments(kl.KubeNamespace).Get(appName)\n\tif err != nil {\n\t\tlog.Errorf(\"Error getting deployment %s: %v\", appName, err)\n\t\treturn\n\t}\n\n\tpodSpec := d.Spec.Template.Spec\n\tcontainers := len(podSpec.Containers)\n\n\tif containers == 1 {\n\t\timageName = podSpec.Containers[0].Image\n\t\tlog.Debugf(\"Got image %s for deployment %s\", imageName, appName)\n\t} else {\n\t\t\/\/ TODO!! Support pods with multiple containers\n\t\treturn \"\", fmt.Errorf(\"Error expected 1 container per pod but found %d\", containers)\n\t}\n\n\treturn imageName, err\n}\n<|endoftext|>"} {"text":"<commit_before>package platform\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/ascii85\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"syscall\"\n\t\"unsafe\"\n\n\t\"golang.org\/x\/sys\/windows\"\n)\n\nvar (\n\tuserenv = windows.MustLoadDLL(\"userenv.dll\")\n\tprocCreateProfile = userenv.MustFindProc(\"CreateProfile\")\n\tprocDeleteProfile = userenv.MustFindProc(\"DeleteProfileW\")\n\tprocGetProfilesDirectory = userenv.MustFindProc(\"GetProfilesDirectoryW\")\n)\n\n\/\/ createProfile, creates the profile and home directory of the user identified\n\/\/ by Security Identifier sid.\nfunc createProfile(sid, username string) (string, error) {\n\tconst S_OK = 0x00000000\n\tpsid, err := syscall.UTF16PtrFromString(sid)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tpusername, err := syscall.UTF16PtrFromString(username)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvar pathbuf [260]uint16\n\tr1, _, e1 := syscall.Syscall6(procCreateProfile.Addr(), 4,\n\t\tuintptr(unsafe.Pointer(psid)), \/\/ _In_ LPCWSTR pszUserSid\n\t\tuintptr(unsafe.Pointer(pusername)), \/\/ _In_ LPCWSTR pszUserName\n\t\tuintptr(unsafe.Pointer(&pathbuf[0])), \/\/ _Out_ LPWSTR pszProfilePath\n\t\tuintptr(len(pathbuf)), \/\/ _In_ DWORD cchProfilePath\n\t\t0, \/\/ unused\n\t\t0, \/\/ unused\n\t)\n\tif r1 != S_OK {\n\t\tif e1 == 0 {\n\t\t\treturn \"\", os.NewSyscallError(\"CreateProfile\", syscall.EINVAL)\n\t\t}\n\t\treturn \"\", os.NewSyscallError(\"CreateProfile\", e1)\n\t}\n\tprofilePath := syscall.UTF16ToString(pathbuf[0:])\n\treturn profilePath, nil\n}\n\n\/\/ deleteProfile, deletes the profile and home directory of the user identified\n\/\/ by Security Identifier sid.\nfunc deleteProfile(sid string) error {\n\tpsid, err := syscall.UTF16PtrFromString(sid)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr1, _, e1 := syscall.Syscall(procDeleteProfile.Addr(), 3,\n\t\tuintptr(unsafe.Pointer(psid)), \/\/ _In_ LPCTSTR lpSidString,\n\t\t0, \/\/ _In_opt_ LPCTSTR lpProfilePath,\n\t\t0, \/\/ _In_opt_ LPCTSTR lpComputerName\n\t)\n\tif r1 == 0 {\n\t\tif e1 == 0 {\n\t\t\treturn os.NewSyscallError(\"DeleteProfile\", syscall.EINVAL)\n\t\t}\n\t\treturn os.NewSyscallError(\"DeleteProfile\", e1)\n\t}\n\treturn nil\n}\n\n\/\/ getProfilesDirectory, returns the path to the root directory where user\n\/\/ profiles are stored (typically C:\\Users).\nfunc getProfilesDirectory() (string, error) {\n\tvar buf [syscall.MAX_PATH]uint16\n\tn := uint32(len(buf))\n\tr1, _, e1 := syscall.Syscall(procGetProfilesDirectory.Addr(), 2,\n\t\tuintptr(unsafe.Pointer(&buf[0])), \/\/ _Out_ LPTSTR lpProfilesDir,\n\t\tuintptr(unsafe.Pointer(&n)), \/\/ _Inout_ LPDWORD lpcchSize\n\t\t0,\n\t)\n\tif r1 == 0 {\n\t\tif e1 == 0 {\n\t\t\treturn \"\", os.NewSyscallError(\"GetProfilesDirectory\", syscall.EINVAL)\n\t\t}\n\t\treturn \"\", os.NewSyscallError(\"GetProfilesDirectory\", e1)\n\t}\n\ts := syscall.UTF16ToString(buf[0:])\n\treturn s, nil\n}\n\n\/\/ userHomeDirectory returns the home directory for user username. An error\n\/\/ is returned is the user profiles directory cannot be found or if the home\n\/\/ directory is invalid.\n\/\/\n\/\/ This is a minimal implementation that relies upon Windows naming home\n\/\/ directories after user names (i.e. the home directory of user \"foo\" is\n\/\/ C:\\Users\\foo). This is the typical behavior when creating local users\n\/\/ but is not guaranteed.\n\/\/\n\/\/ A more complete implementation may be possible with the LoadUserProfile\n\/\/ syscall.\nfunc userHomeDirectory(username string) (string, error) {\n\tpath, err := getProfilesDirectory()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thome := filepath.Join(path, username)\n\tfi, err := os.Stat(home) \/\/ safe to use os pkg here, len(home) < MAX_PATH\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif !fi.IsDir() {\n\t\treturn \"\", fmt.Errorf(\"not a directory: %s\", home)\n\t}\n\treturn home, nil\n}\n\nfunc isSpecial(c byte) bool {\n\treturn ('!' <= c && c <= '\/') || (':' <= c && c <= '@') ||\n\t\t('[' <= c && c <= '`') || ('{' <= c && c <= '~')\n}\n\n\/\/ validPassword, checks if password s meets the Windows complexity\n\/\/ requirements defined here:\n\/\/\n\/\/ https:\/\/technet.microsoft.com\/en-us\/library\/hh994562(v=ws.11).aspx\n\/\/\nfunc validPassword(s string) bool {\n\tvar (\n\t\tdigits bool\n\t\tspecial bool\n\t\talphaLow bool\n\t\talphaHigh bool\n\t)\n\tif len(s) < 8 {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(s); i++ {\n\t\tswitch c := s[i]; {\n\t\tcase '0' <= c && c <= '9':\n\t\t\tdigits = true\n\t\tcase 'a' <= c && c <= 'z':\n\t\t\talphaLow = true\n\t\tcase 'A' <= c && c <= 'Z':\n\t\t\talphaHigh = true\n\t\tcase isSpecial(c):\n\t\t\tspecial = true\n\t\t}\n\t}\n\tvar n int\n\tif digits {\n\t\tn++\n\t}\n\tif special {\n\t\tn++\n\t}\n\tif alphaLow {\n\t\tn++\n\t}\n\tif alphaHigh {\n\t\tn++\n\t}\n\treturn n >= 3\n}\n\n\/\/ generatePassword, returns a 14 char ascii85 encoded password.\nfunc generatePassword() (string, error) {\n\tconst Length = 14\n\n\tin := make([]byte, ascii85.MaxEncodedLen(Length))\n\tif _, err := io.ReadFull(rand.Reader, in); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tout := make([]byte, ascii85.MaxEncodedLen(len(in)))\n\tif n := ascii85.Encode(out, in); n < Length {\n\t\treturn \"\", errors.New(\"short password\")\n\t}\n\treturn string(out[:Length]), nil\n}\n\n\/\/ randomPassword, returns a ascii85 encoded 14 char password\n\/\/ if the password is longer than 14 chars NET.exe will ask\n\/\/ for confirmation due to backwards compatibility issues with\n\/\/ Windows prior to Windows 2000.\nfunc randomPassword() (string, error) {\n\tlimit := 100\n\tfor ; limit >= 0; limit-- {\n\t\ts, err := generatePassword()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif validPassword(s) {\n\t\t\treturn s, nil\n\t\t}\n\t}\n\treturn \"\", errors.New(\"failed to generate valid Windows password\")\n}\n\nfunc userExists(name string) bool {\n\t_, _, t, err := syscall.LookupSID(\"\", name)\n\treturn err == nil && t == syscall.SidTypeUser\n}\n\nfunc createUserProfile(username string) error {\n\tif userExists(username) {\n\t\treturn fmt.Errorf(\"user account already exists: %s\", username)\n\t}\n\n\t\/\/ Create local user\n\tpassword, err := randomPassword()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcreateCmd := exec.Command(\"NET.exe\", \"USER\", username, password, \"\/ADD\")\n\tcreateOut, err := createCmd.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating user (%s): %s\", err, string(createOut))\n\t}\n\n\t\/\/ Add to Administrators group\n\tgroupCmd := exec.Command(\"NET.exe\", \"LOCALGROUP\", \"Administrators\", username, \"\/ADD\")\n\tgroupOut, err := groupCmd.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error adding user to Administrator group (%s): %s\",\n\t\t\terr, string(groupOut))\n\t}\n\n\tsid, _, _, err := syscall.LookupSID(\"\", username)\n\tif err != nil {\n\t\treturn err\n\t}\n\tssid, err := sid.String()\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = createProfile(ssid, username)\n\treturn err\n}\n\nfunc deleteUserProfile(username string) error {\n\tsid, _, _, err := syscall.LookupSID(\"\", username)\n\tif err != nil {\n\t\treturn err\n\t}\n\tssid, err := sid.String()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := deleteProfile(ssid); err != nil {\n\t\treturn err\n\t}\n\n\tcmd := exec.Command(\"NET.exe\", \"USER\", username, \"\/DELETE\")\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error deleting user (%s): %s\", err, string(out))\n\t}\n\n\treturn nil\n}\n<commit_msg>Lazily load system DLLs on Windows platform<commit_after>package platform\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/ascii85\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"syscall\"\n\t\"unsafe\"\n\n\t\"golang.org\/x\/sys\/windows\"\n)\n\nvar (\n\tuserenv = windows.NewLazySystemDLL(\"userenv.dll\")\n\tprocCreateProfile = userenv.NewProc(\"CreateProfile\")\n\tprocDeleteProfile = userenv.NewProc(\"DeleteProfileW\")\n\tprocGetProfilesDirectory = userenv.NewProc(\"GetProfilesDirectoryW\")\n)\n\n\/\/ createProfile, creates the profile and home directory of the user identified\n\/\/ by Security Identifier sid.\nfunc createProfile(sid, username string) (string, error) {\n\tconst S_OK = 0x00000000\n\tif err := procCreateProfile.Find(); err != nil {\n\t\treturn \"\", err\n\t}\n\tpsid, err := syscall.UTF16PtrFromString(sid)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tpusername, err := syscall.UTF16PtrFromString(username)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvar pathbuf [260]uint16\n\tr1, _, e1 := syscall.Syscall6(procCreateProfile.Addr(), 4,\n\t\tuintptr(unsafe.Pointer(psid)), \/\/ _In_ LPCWSTR pszUserSid\n\t\tuintptr(unsafe.Pointer(pusername)), \/\/ _In_ LPCWSTR pszUserName\n\t\tuintptr(unsafe.Pointer(&pathbuf[0])), \/\/ _Out_ LPWSTR pszProfilePath\n\t\tuintptr(len(pathbuf)), \/\/ _In_ DWORD cchProfilePath\n\t\t0, \/\/ unused\n\t\t0, \/\/ unused\n\t)\n\tif r1 != S_OK {\n\t\tif e1 == 0 {\n\t\t\treturn \"\", os.NewSyscallError(\"CreateProfile\", syscall.EINVAL)\n\t\t}\n\t\treturn \"\", os.NewSyscallError(\"CreateProfile\", e1)\n\t}\n\tprofilePath := syscall.UTF16ToString(pathbuf[0:])\n\treturn profilePath, nil\n}\n\n\/\/ deleteProfile, deletes the profile and home directory of the user identified\n\/\/ by Security Identifier sid.\nfunc deleteProfile(sid string) error {\n\tif err := procDeleteProfile.Find(); err != nil {\n\t\treturn err\n\t}\n\tpsid, err := syscall.UTF16PtrFromString(sid)\n\tif err != nil {\n\t\treturn err\n\t}\n\tr1, _, e1 := syscall.Syscall(procDeleteProfile.Addr(), 3,\n\t\tuintptr(unsafe.Pointer(psid)), \/\/ _In_ LPCTSTR lpSidString,\n\t\t0, \/\/ _In_opt_ LPCTSTR lpProfilePath,\n\t\t0, \/\/ _In_opt_ LPCTSTR lpComputerName\n\t)\n\tif r1 == 0 {\n\t\tif e1 == 0 {\n\t\t\treturn os.NewSyscallError(\"DeleteProfile\", syscall.EINVAL)\n\t\t}\n\t\treturn os.NewSyscallError(\"DeleteProfile\", e1)\n\t}\n\treturn nil\n}\n\n\/\/ getProfilesDirectory, returns the path to the root directory where user\n\/\/ profiles are stored (typically C:\\Users).\nfunc getProfilesDirectory() (string, error) {\n\tif err := procGetProfilesDirectory.Find(); err != nil {\n\t\treturn \"\", err\n\t}\n\tvar buf [syscall.MAX_PATH]uint16\n\tn := uint32(len(buf))\n\tr1, _, e1 := syscall.Syscall(procGetProfilesDirectory.Addr(), 2,\n\t\tuintptr(unsafe.Pointer(&buf[0])), \/\/ _Out_ LPTSTR lpProfilesDir,\n\t\tuintptr(unsafe.Pointer(&n)), \/\/ _Inout_ LPDWORD lpcchSize\n\t\t0,\n\t)\n\tif r1 == 0 {\n\t\tif e1 == 0 {\n\t\t\treturn \"\", os.NewSyscallError(\"GetProfilesDirectory\", syscall.EINVAL)\n\t\t}\n\t\treturn \"\", os.NewSyscallError(\"GetProfilesDirectory\", e1)\n\t}\n\ts := syscall.UTF16ToString(buf[0:])\n\treturn s, nil\n}\n\n\/\/ userHomeDirectory returns the home directory for user username. An error\n\/\/ is returned is the user profiles directory cannot be found or if the home\n\/\/ directory is invalid.\n\/\/\n\/\/ This is a minimal implementation that relies upon Windows naming home\n\/\/ directories after user names (i.e. the home directory of user \"foo\" is\n\/\/ C:\\Users\\foo). This is the typical behavior when creating local users\n\/\/ but is not guaranteed.\n\/\/\n\/\/ A more complete implementation may be possible with the LoadUserProfile\n\/\/ syscall.\nfunc userHomeDirectory(username string) (string, error) {\n\tpath, err := getProfilesDirectory()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thome := filepath.Join(path, username)\n\tfi, err := os.Stat(home) \/\/ safe to use os pkg here, len(home) < MAX_PATH\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif !fi.IsDir() {\n\t\treturn \"\", fmt.Errorf(\"not a directory: %s\", home)\n\t}\n\treturn home, nil\n}\n\nfunc isSpecial(c byte) bool {\n\treturn ('!' <= c && c <= '\/') || (':' <= c && c <= '@') ||\n\t\t('[' <= c && c <= '`') || ('{' <= c && c <= '~')\n}\n\n\/\/ validPassword, checks if password s meets the Windows complexity\n\/\/ requirements defined here:\n\/\/\n\/\/ https:\/\/technet.microsoft.com\/en-us\/library\/hh994562(v=ws.11).aspx\n\/\/\nfunc validPassword(s string) bool {\n\tvar (\n\t\tdigits bool\n\t\tspecial bool\n\t\talphaLow bool\n\t\talphaHigh bool\n\t)\n\tif len(s) < 8 {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(s); i++ {\n\t\tswitch c := s[i]; {\n\t\tcase '0' <= c && c <= '9':\n\t\t\tdigits = true\n\t\tcase 'a' <= c && c <= 'z':\n\t\t\talphaLow = true\n\t\tcase 'A' <= c && c <= 'Z':\n\t\t\talphaHigh = true\n\t\tcase isSpecial(c):\n\t\t\tspecial = true\n\t\t}\n\t}\n\tvar n int\n\tif digits {\n\t\tn++\n\t}\n\tif special {\n\t\tn++\n\t}\n\tif alphaLow {\n\t\tn++\n\t}\n\tif alphaHigh {\n\t\tn++\n\t}\n\treturn n >= 3\n}\n\n\/\/ generatePassword, returns a 14 char ascii85 encoded password.\nfunc generatePassword() (string, error) {\n\tconst Length = 14\n\n\tin := make([]byte, ascii85.MaxEncodedLen(Length))\n\tif _, err := io.ReadFull(rand.Reader, in); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tout := make([]byte, ascii85.MaxEncodedLen(len(in)))\n\tif n := ascii85.Encode(out, in); n < Length {\n\t\treturn \"\", errors.New(\"short password\")\n\t}\n\treturn string(out[:Length]), nil\n}\n\n\/\/ randomPassword, returns a ascii85 encoded 14 char password\n\/\/ if the password is longer than 14 chars NET.exe will ask\n\/\/ for confirmation due to backwards compatibility issues with\n\/\/ Windows prior to Windows 2000.\nfunc randomPassword() (string, error) {\n\tlimit := 100\n\tfor ; limit >= 0; limit-- {\n\t\ts, err := generatePassword()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif validPassword(s) {\n\t\t\treturn s, nil\n\t\t}\n\t}\n\treturn \"\", errors.New(\"failed to generate valid Windows password\")\n}\n\nfunc userExists(name string) bool {\n\t_, _, t, err := syscall.LookupSID(\"\", name)\n\treturn err == nil && t == syscall.SidTypeUser\n}\n\nfunc createUserProfile(username string) error {\n\tif userExists(username) {\n\t\treturn fmt.Errorf(\"user account already exists: %s\", username)\n\t}\n\n\t\/\/ Create local user\n\tpassword, err := randomPassword()\n\tif err != nil {\n\t\treturn err\n\t}\n\tcreateCmd := exec.Command(\"NET.exe\", \"USER\", username, password, \"\/ADD\")\n\tcreateOut, err := createCmd.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating user (%s): %s\", err, string(createOut))\n\t}\n\n\t\/\/ Add to Administrators group\n\tgroupCmd := exec.Command(\"NET.exe\", \"LOCALGROUP\", \"Administrators\", username, \"\/ADD\")\n\tgroupOut, err := groupCmd.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error adding user to Administrator group (%s): %s\",\n\t\t\terr, string(groupOut))\n\t}\n\n\tsid, _, _, err := syscall.LookupSID(\"\", username)\n\tif err != nil {\n\t\treturn err\n\t}\n\tssid, err := sid.String()\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = createProfile(ssid, username)\n\treturn err\n}\n\nfunc deleteUserProfile(username string) error {\n\tsid, _, _, err := syscall.LookupSID(\"\", username)\n\tif err != nil {\n\t\treturn err\n\t}\n\tssid, err := sid.String()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := deleteProfile(ssid); err != nil {\n\t\treturn err\n\t}\n\n\tcmd := exec.Command(\"NET.exe\", \"USER\", username, \"\/DELETE\")\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error deleting user (%s): %s\", err, string(out))\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package chartcontrib\n\nimport (\n\t\"github.com\/golang\/freetype\/truetype\"\n\t\"golang.org\/x\/image\/math\/fixed\"\n\t\"log\"\n\t\"math\"\n\n\t\"github.com\/wcharczuk\/go-chart\"\n\t\"github.com\/wcharczuk\/go-chart\/drawing\"\n)\n\ntype MyRange struct {\n\t*chart.ContinuousRange\n\tcount int\n\tlinespacing float64\n}\n\nfunc (r MyRange) GetTicks(re chart.Renderer, defaults chart.Style, vf chart.ValueFormatter) []chart.Tick {\n\tlog.Println(\"MyRange.GetTicks called\")\n\tlog.Println(r.GetDelta())\n\tlog.Println(r.GetDomain())\n\tlog.Println(r.GetMin())\n\tlog.Println(r.GetMax())\n\n\tcount := r.count\n\tif count == 0 {\n\t\tlog.Println(\"count is zero\")\n\t\tlog.Println(\"domain\", r.GetDomain())\n\t\tlog.Println(\"font\", defaults.GetFont().Name(truetype.NameIDFontFullName))\n\t\tlog.Println(\"size\", defaults.GetFontSize())\n\t\tlog.Println(\"bounds\", defaults.GetFont().Bounds(fixed.Int26_6(defaults.GetFontSize())))\n\t\tfont_height := float64(defaults.GetFont().Bounds(fixed.Int26_6(defaults.GetFontSize())).Max.Y)\n\t\tlog.Println(\"font height\", font_height)\n\n\t\tlog.Println(float64(r.GetDomain()) \/ float64(font_height))\n\t\tlog.Println(float64(r.GetDomain()) \/ float64(font_height) \/ r.linespacing)\n\t\tcount = int(float64(r.GetDomain()) \/ float64(font_height) \/ r.linespacing)\n\t\tlog.Println(\"count\", count)\n\n\t\textents := drawing.Extents(defaults.GetFont(), defaults.GetFontSize())\n\t\tlog.Println(\"ascent\", extents.Ascent)\n\t\tlog.Println(\"descent\", extents.Descent)\n\t\tlog.Println(\"height\", extents.Height)\n\n\t\tcount = int(float64(r.GetDomain()) \/ (float64(extents.Height) * r.linespacing))\n\t\tlog.Println(\"count\", count)\n\t}\n\n\tsteplength := r.GetDelta() \/ float64(count)\n\tlog.Println(steplength)\n\n\tfactor := math.Pow10(int(math.Log10(steplength)))\n\tlog.Println(factor)\n\n\tnormlength := steplength \/ factor\n\tlog.Println(normlength)\n\n\tvar minindex float64\n\tmindiff := math.Inf(1)\n\tfor _, steptry := range []float64{.1, .2, .5, 1, 2, 2.5, 5, 10} {\n\t\tdiff := math.Abs(normlength - steptry)\n\t\tlog.Println(\"diff\", steptry, diff)\n\t\tif diff < mindiff {\n\t\t\tmindiff = diff\n\t\t\tminindex = steptry\n\t\t}\n\t}\n\tlog.Println(\"min\", minindex)\n\n\tnewsteplength := float64(minindex) * factor\n\tlog.Println(newsteplength)\n\n\tmin := r.GetMin()\n\tmax := r.GetMax()\n\n\tticks := []chart.Tick{}\n\tfor actual := chart.Math.RoundUp(min, newsteplength); actual <= max; actual += newsteplength {\n\t\tvalue := float64(actual)\n\t\tticks = append(ticks, chart.Tick{Value: value, Label: vf(value)})\n\t}\n\n\treturn ticks\n}\n\nfunc ContinuousRangeWithTicksLinespacing(linespacing float64) MyRange {\n\treturn MyRange{ContinuousRange: &chart.ContinuousRange{}, linespacing: linespacing}\n}\n\nfunc ContinuousRangeWithTicksCount(count int) MyRange {\n\treturn MyRange{ContinuousRange: &chart.ContinuousRange{}, count: count}\n}\n<commit_msg>add documentation, remove some debug output<commit_after>package chartcontrib\n\nimport (\n\t\"log\"\n\t\"math\"\n\n\t\"github.com\/wcharczuk\/go-chart\"\n\t\"github.com\/wcharczuk\/go-chart\/drawing\"\n)\n\ntype MyRange struct {\n\t*chart.ContinuousRange\n\tcount int\n\tlinespacing float64\n}\n\nfunc (r MyRange) GetTicks(re chart.Renderer, defaults chart.Style, vf chart.ValueFormatter) []chart.Tick {\n\tcount := r.count\n\tif count == 0 {\n\t\textents := drawing.Extents(defaults.GetFont(), defaults.GetFontSize())\n\t\tlog.Println(\"height\", extents.Height)\n\n\t\tcount = int(float64(r.GetDomain()) \/ (float64(extents.Height) * r.linespacing))\n\t\tlog.Println(\"count\", count)\n\t}\n\n\tsteplength := r.GetDelta() \/ float64(count)\n\tlog.Println(steplength)\n\n\tfactor := math.Pow10(int(math.Log10(steplength)))\n\tlog.Println(factor)\n\n\tnormlength := steplength \/ factor\n\tlog.Println(normlength)\n\n\tvar minindex float64\n\tmindiff := math.Inf(1)\n\tfor _, steptry := range []float64{.1, .2, .5, 1, 2, 2.5, 5, 10} {\n\t\tdiff := math.Abs(normlength - steptry)\n\t\tlog.Println(\"diff\", steptry, diff)\n\t\tif diff < mindiff {\n\t\t\tmindiff = diff\n\t\t\tminindex = steptry\n\t\t}\n\t}\n\tlog.Println(\"min\", minindex)\n\n\tnewsteplength := float64(minindex) * factor\n\tlog.Println(newsteplength)\n\n\tmin := r.GetMin()\n\tmax := r.GetMax()\n\n\tticks := []chart.Tick{}\n\tfor actual := chart.Math.RoundUp(min, newsteplength); actual <= max; actual += newsteplength {\n\t\tvalue := float64(actual)\n\t\tticks = append(ticks, chart.Tick{Value: value, Label: vf(value)})\n\t}\n\n\treturn ticks\n}\n\n\/\/ ContinuousRangeWithTicksLinespacing renders \"nice\" ticks on a YAxis, depending on the linespacing parameter.\n\/\/ the actual linespacing depends on the min\/max values and the height of the axis.\nfunc ContinuousRangeWithTicksLinespacing(linespacing float64) MyRange {\n\treturn MyRange{ContinuousRange: &chart.ContinuousRange{}, linespacing: linespacing}\n}\n\n\/\/ ContinuousRangeWithTicksCount renders \"nice\" ticks on a YAxis, depending on the count parameter.\n\/\/ the actual tick count depends on the min\/max values of the axis.\nfunc ContinuousRangeWithTicksCount(count int) MyRange {\n\treturn MyRange{ContinuousRange: &chart.ContinuousRange{}, count: count}\n}\n<|endoftext|>"} {"text":"<commit_before>package mealplanner\n\nimport (\n\t\"appengine\/datastore\"\n\t\"appengine\"\n\t\"io\"\n\t\"os\"\n\t\"json\"\n\t\"fmt\"\n)\n\ntype backup struct {\n\tDishes []Dish\n\tIngredients []Ingredient\n\tMeasuredIngredients map[string][]MeasuredIngredient\n\tTags map[string][]Word\n\tPairings map[string][]Pairing\n\tMenus []Menu\n}\n\ntype importer struct {\n\tcontext\n\t\/\/ decoded JSON data being imported\n\tjsonData backup\n\t\/\/ mapping from the string-id present in the jsonData\n \/\/ to the actual datastore key of the entity after import\n\t\/\/ only for the case that the string-id isn't a valid key for our\n \/\/ library\n\tfixUpKeys map[string]*datastore.Key\n\t\/\/ an index of tags, (dish|ingredient)key -> tagstr -> tagkey\n \/\/ keyed based on the actual datastore key we will use, not\n \/\/ the string from json\n\tallTags map[string]map[string]*datastore.Key\n\t\/\/ slice of all new tags to be added\n\tnewTags []interface{}\n\t\/\/ slice of the keys for the new tags to be added\n\tnewTagKeys []*datastore.Key\n}\n\nfunc importFile(c *context, file io.Reader) {\n\tdatastore.RunInTransaction(c.c, func(tc appengine.Context) os.Error {\n\t\tdecoder := json.NewDecoder(file)\n\t\tdata := backup{}\n\t\terr := decoder.Decode(&data)\n\t\tcheck(err)\n\t\tworker := &importer{\n\t\t\t\tcontext: *c,\n\t\t\t\tjsonData : data,\n\t\t\t\tfixUpKeys : make(map[string]*datastore.Key),\n\t\t\t\tallTags : make(map[string]map[string]*datastore.Key),\n\t\t\t\tnewTags : make([]interface{}, 0, 100),\n\t\t\t\tnewTagKeys : make([]*datastore.Key, 0, 100),\n\t\t}\n\t\tworker.c = tc\n\t\tworker.doImport()\n\t\treturn nil\n\t}, nil)\n}\n\nfunc (self *importer) doImport() {\n\tself.indexCurrentTags()\n\tself.importIngredients()\n\tself.importDishes()\n\tself.importMeasuredIngredients()\n\tself.importPairings()\n\tself.importMenus()\n\t\/\/ add the tags we collected\n\t_, err := datastore.PutMulti(self.c, self.newTagKeys, self.newTags)\n\tcheck(err)\n}\n\nfunc (self *importer) indexCurrentTags() {\n\t\/\/ build an index of tags, (dish|ingredient)key -> tagstr -> tagkey\n\tquery := self.NewQuery(\"Tags\")\n\titer := query.Run(self.c)\n\tword := &Word{}\n\tfor key, err := iter.Next(word);\n\t\terr == nil;\n\t\tkey, err = iter.Next(word) {\n\t\tparent := key.Parent().Encode()\n\t\tvar m map[string]*datastore.Key\n\t\tif m, ok := self.allTags[parent]; !ok {\n\t\t\tm = make(map[string]*datastore.Key)\n\t\t\tself.allTags[parent] = m\n\t\t}\n\t\tm[word.Word] = key\n\t}\n}\n\nfunc (self *importer) restoreKey(encoded string, parent *datastore.Key) *datastore.Key {\n\tkey, err := datastore.DecodeKey(encoded)\n\tcheck(err)\n\tif newKey, found := self.fixUpKeys[encoded]; found {\n\t\treturn newKey\n\t}\n\tif !self.isInLibrary(key) {\n\t\tnewKey := datastore.NewIncompleteKey(self.c, key.Kind(), parent)\n\t\tself.fixUpKeys[encoded] = newKey\n\t\treturn newKey\n\t}\n\treturn key\n}\n\nfunc (self *importer) importIngredients() {\n\t\/\/ get the previously listed ingredients\n\t\/\/ build an index by name\n\tprevIngredientsByName := self.indexItems(self.NewQuery(\"Ingredient\"),\n\t\t&Ingredient{},\n\t\tfunc(key *datastore.Key, item interface{}) string {\n\t\t\treturn item.(Ingredient).Name\n\t\t})\n\tputItems := make([]interface{}, 0, len(self.jsonData.Ingredients))\n\tputKeys := make([]*datastore.Key, 0, len(self.jsonData.Ingredients))\n\tputIds := make([]string, 0, len(self.jsonData.Ingredients))\n\n\t\/\/ prepare all the ingredients\n\tfor index, _ := range self.jsonData.Ingredients {\n\t\ti := &self.jsonData.Ingredients[index]\n\t\tid := i.Id\n\t\tkey := self.restoreKey(id, self.lid)\n\t\tif key.Incomplete() {\n\t\t\t\/\/ check if we have an item of the same name already\n\t\t\tif ikey, ok := prevIngredientsByName[i.Name]; ok {\n\t\t\t\tself.fixUpKeys[id] = ikey\n\t\t\t\tkey = ikey\n\t\t\t}\n\t\t}\n\t\ti.Id = \"\"\n\t\tputItems = append(putItems, i)\n\t\tputKeys = append(putKeys, key)\n\t\tputIds = append(putIds, id)\n\t}\n\t\/\/ put all the ingredients\n\toutKeys, err := datastore.PutMulti(self.c, putKeys, putItems)\n\tcheck(err)\n \/\/ update the fixUpKeys for any new items\n\tfor index, putKey := range putKeys {\n\t\tif putKey.Incomplete() {\n\t\t\tself.fixUpKeys[putIds[index]] = outKeys[index]\n\t\t}\n\t}\n\n\t\/\/ add tags\n\tself.importTags(putIds, outKeys)\n\t\/\/ update keywords\n\tfor index, _ := range putItems {\n\t\ting := putItems[index].(*Ingredient)\n\t\twords := make(map[string]bool)\n\t\taddWords(ing.Name, words)\n\t\taddWords(ing.Category, words)\n\t\tfor tag, _ := range self.allTags[putIds[index]] {\n\t\t\taddWords(tag, words)\n\t\t}\n\t\tupdateKeywords(self.c, outKeys[index], words)\n\t}\n}\n\nfunc (self *importer) importDishes() {\n\t\/\/ get the previously listed dishes\n\t\/\/ build an index by name\n\tprevDishesByImportId := self.indexItems(self.NewQuery(\"Dish\"), &Dish{},\n\t\tfunc(key *datastore.Key, item interface{}) string {\n\t\t\treturn item.(Dish).Id\n\t\t})\n\t\/\/ lists for dishes being written\n\tcount := len(self.jsonData.Dishes)\n\tputItems := make([]interface{}, 0, count)\n\tputKeys := make([]*datastore.Key, 0, count)\n\tputIds := make([]string, 0, count)\n\n\t\/\/ prepare all the dishes\n\tfor index, _ := range self.jsonData.Dishes {\n\t\ti := &self.jsonData.Dishes[index]\n\t\tid := i.Id\n\t\tkey := self.restoreKey(id, self.lid)\n\t\tif key.Incomplete() {\n\t\t\t\/\/ check if we have an item of the same name already\n\t\t\tif ikey, ok := prevDishesByImportId[id]; ok {\n\t\t\t\tself.fixUpKeys[id] = ikey\n\t\t\t\tkey = ikey\n\t\t\t}\n\t\t}\n\t\tputItems = append(putItems, i)\n\t\tputKeys = append(putKeys, key)\n\t\tputIds = append(putIds, id)\n\t}\n\t\/\/ put all the dishes\n\toutKeys, err := datastore.PutMulti(self.c, putKeys, putItems)\n\tcheck(err)\n \/\/ update the fixUpKeys for any new items\n\tfor index, putKey := range putKeys {\n\t\tif putKey.Incomplete() {\n\t\t\tself.fixUpKeys[putIds[index]] = outKeys[index]\n\t\t}\n\t}\n\n\t\/\/ add tags\n\tself.importTags(putIds, outKeys)\n\t\/\/ update keywords\n\tfor index, _ := range putItems {\n\t\tdish := putItems[index].(*Dish)\n\t\twords := make(map[string]bool)\n\t\taddWords(dish.Name, words)\n\t\taddWords(dish.Source, words)\n\t\tfor tag, _ := range self.allTags[putIds[index]] {\n\t\t\taddWords(tag, words)\n\t\t}\n\t\tupdateKeywords(self.c, outKeys[index], words)\n\t}\n}\n\n\/\/jsonData.MeasuredIngredients map[string][]MeasuredIngredient\nfunc (self *importer) importMeasuredIngredients() {\n\tmiKeyFunc := func (key *datastore.Key, item interface{}) string {\n\t\t\t\treturn key.Parent().Encode() + item.(MeasuredIngredient).Ingredient.Encode();\n\t\t\t}\n\t\/\/ index existing items by their parent dish and the ingredient\n\t\/\/ they reference\n\tprevMIs := self.indexItems(self.NewQuery(\"MeasuredIngredient\"),\n\t\t\t &MeasuredIngredient{}, miKeyFunc)\n\tcount := len(self.jsonData.MeasuredIngredients)\n\tputItems := make([]interface{}, 0, count)\n\tputKeys := make([]*datastore.Key, 0, count)\n\n\tfor dishId, jsonMis := range self.jsonData.MeasuredIngredients {\n\t\tdishKey := self.restoreKey(dishId, self.lid)\n\t\tdishKeyEncoded := dishKey.Encode()\n\t\tfor index, _ := range jsonMis {\n\t\t\tjsonMi := &jsonMis[index]\n\t\t\tmiKey := self.restoreKey(jsonMi.Id, dishKey)\n\t\t\tingKey := self.restoreKey(jsonMi.Ingredient.Encode(), self.lid)\n\t\t\tif miKey.Incomplete() {\n\t\t\t\tmiIndexKey := dishKeyEncoded + ingKey.Encode()\n\t\t\t\tif existingKey, found := prevMIs[miIndexKey]; found {\n\t\t\t\t\tmiKey = existingKey\n\t\t\t\t}\n\t\t\t}\n\t\t\tjsonMi.Ingredient = ingKey\n\t\t\tjsonMi.Id = \"\"\n\t\t\tputItems = append(putItems, jsonMi)\n\t\t\tputKeys = append(putKeys, miKey)\n\t\t}\n\t}\n\tif len(putKeys) > 0 {\n\t\t_, err := datastore.PutMulti(self.c, putKeys, putItems)\n\t\tcheck(err)\n\t}\n}\n\n\/\/jsonData.Pairings map[string][]Pairing\nfunc (self *importer) importPairings() {\n\tpairingKeyFunc := func (key *datastore.Key, item interface{}) string {\n\t\t\t\t\t return key.Parent().Encode() + item.(Pairing).Other.Encode() + item.(Pairing).Description;\n\t\t\t\t }\n\t\/\/ index existing items by their parent dish and the ingredient\n\t\/\/ they reference\n\tprevPairings := self.indexItems(self.NewQuery(\"Pairing\"),\n\t\t\t &Pairing{}, pairingKeyFunc)\n\tcount := len(self.jsonData.Pairings)\n\tputItems := make([]interface{}, 0, count)\n\tputKeys := make([]*datastore.Key, 0, count)\n\n\tfor dishId, jsonPairings := range self.jsonData.Pairings {\n\t\tdishKey := self.restoreKey(dishId, self.lid)\n\t\tdishKeyEncoded := dishKey.Encode()\n\t\tfor index, _ := range jsonPairings {\n\t\t\tjsonPairing := &jsonPairings[index]\n\t\t\tpairingKey := self.restoreKey(jsonPairing.Id, dishKey)\n\t\t\totherKey := self.restoreKey(jsonPairing.Other.Encode(), self.lid)\n\t\t\tpairingIndexKey := dishKeyEncoded + otherKey.Encode() + jsonPairing.Description\n\t\t\t\/\/ add the new pairing if it wasn't already present\n\t\t\tif _, found := prevPairings[pairingIndexKey]; !found {\n\t\t\t\tjsonPairing.Other = otherKey\n\t\t\t\tjsonPairing.Id = \"\"\n\t\t\t\tputItems = append(putItems, jsonPairing)\n\t\t\t\tputKeys = append(putKeys, pairingKey)\n\t\t\t}\n\t\t}\n\t}\n\tif len(putKeys) > 0 {\n\t\t_, err := datastore.PutMulti(self.c, putKeys, putItems)\n\t\tcheck(err)\n\t}\n}\n\nfunc (self *importer) importMenus() {\n\tmenuKeyFunc := func (key *datastore.Key, item interface{}) string {\n\t\t\t\t\t\t return item.(Menu).Name\n\t\t\t\t\t }\n\t\/\/ index existing items by their name\n\tprevMenus := self.indexItems(self.NewQuery(\"Menu\"), &Menu{},\n\t\t\t\t\t\t menuKeyFunc)\n\tcount := len(self.jsonData.Menus)\n\tputItems := make([]interface{}, 0, count)\n\tputKeys := make([]*datastore.Key, 0, count)\n\tfor index, _ := range self.jsonData.Menus {\n\t\tjsonMenu := &self.jsonData.Menus[index]\n\t\tkey := self.restoreKey(jsonMenu.Id, self.lid)\n\t\tif key.Incomplete() {\n\t\t\tif existingKey, found := prevMenus[jsonMenu.Name]; found {\n\t\t\t\tkey = existingKey\n\t\t\t}\n\t\t}\n\t\tfor index, dishKey := range jsonMenu.Dishes {\n\t\t\tjsonMenu.Dishes[index] = self.restoreKey(dishKey.Encode(), self.lid)\n\t\t}\n\t\tjsonMenu.Id = \"\"\n\t\tputItems = append(putItems, jsonMenu)\n\t\tputKeys = append(putKeys, key)\n\t}\n\tif len(putKeys) > 0 {\n\t\t_, err := datastore.PutMulti(self.c, putKeys, putItems)\n\t\tcheck(err)\n\t}\n}\n\n\n\/\/ jsonData.Tags map[string][]Word\n\/\/ take a slice of ids as appeared in json and slice of\n\/\/ keys as now stored for items with tags\n\/\/ add any tags for these items that were in json but not yet stored\nfunc (self *importer) importTags (ids []string, keys []*datastore.Key) {\n\tfor index, parentKey := range keys {\n\t\tparentId := ids[index]\n\t\tif importTags, ok := self.jsonData.Tags[parentId]; ok {\n\t\t\tvar myTags map[string]*datastore.Key\n\t\t\t\/\/ if this item doesn't have a tags collection yet,\n\t\t\t\/\/ add it\n\t\t\tif myTags, ok = self.allTags[parentId]; !ok {\n\t\t\t\tmyTags = make(map[string]*datastore.Key)\n\t\t\t\tself.allTags[parentId] = myTags\n\t\t\t}\n\t\t\t\/\/ go through tags from json\n\t\t\tfor _, tag := range importTags {\n\t\t\t\tif _, found := myTags[tag.Word]; !found {\n\t\t\t\t\t\/\/ this tag doesn't exist yet, add to our list\n\t\t\t\t\tself.newTags = append(self.newTags, &Word{\"\",tag.Word})\n\t\t\t\t\tnewTagKey := datastore.NewIncompleteKey(self.c, \"Tags\", parentKey)\n\t\t\t\t\tself.newTagKeys = append(self.newTagKeys, newTagKey)\n\t\t\t\t\tmyTags[tag.Word] = newTagKey\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (self *importer) indexItems(query *datastore.Query, item interface{},\n\tkeyFunc func (*datastore.Key, interface{}) string ) map[string]*datastore.Key {\n\tindex := make(map[string]*datastore.Key)\n\titer := query.Run(self.c)\n\tfor dataKey, err := iter.Next(item);\n\t\t\terr == nil;\n\t\t\tdataKey, err = iter.Next(item) {\n\t\tindexKey := keyFunc(dataKey, item)\n\t\tif len(indexKey) > 0 {\n\t\t\tindex[indexKey] = dataKey\n\t\t}\n\t}\n\treturn index\n}\n\n<commit_msg>fixing defects when importing items to a non-empty library<commit_after>package mealplanner\n\nimport (\n\t\"appengine\/datastore\"\n\t\"appengine\"\n\t\"io\"\n\t\"os\"\n\t\"json\"\n\t\/\/\"fmt\"\n)\n\ntype backup struct {\n\tDishes []Dish\n\tIngredients []Ingredient\n\tMeasuredIngredients map[string][]MeasuredIngredient\n\tTags map[string][]Word\n\tPairings map[string][]Pairing\n\tMenus []Menu\n}\n\ntype importer struct {\n\tcontext\n\t\/\/ decoded JSON data being imported\n\tjsonData backup\n\t\/\/ mapping from the string-id present in the jsonData\n \/\/ to the actual datastore key of the entity after import\n\t\/\/ only for the case that the string-id isn't a valid key for our\n \/\/ library\n\tfixUpKeys map[string]*datastore.Key\n\t\/\/ an index of tags, (dish|ingredient)key -> tagstr -> dummy\n \/\/ keyed based on the actual datastore key we will use, not\n \/\/ the string from json\n\tallTags map[string]map[string] bool\n\t\/\/ slice of all new tags to be added\n\tnewTags []interface{}\n\t\/\/ slice of the keys for the new tags to be added\n\tnewTagKeys []*datastore.Key\n}\n\nfunc importFile(c *context, file io.Reader) {\n\tdatastore.RunInTransaction(c.c, func(tc appengine.Context) os.Error {\n\t\tdecoder := json.NewDecoder(file)\n\t\tdata := backup{}\n\t\terr := decoder.Decode(&data)\n\t\tcheck(err)\n\t\tworker := &importer{\n\t\t\t\tcontext: *c,\n\t\t\t\tjsonData : data,\n\t\t\t\tfixUpKeys : make(map[string]*datastore.Key),\n\t\t\t\tallTags : make(map[string]map[string] bool),\n\t\t\t\tnewTags : make([]interface{}, 0, 100),\n\t\t\t\tnewTagKeys : make([]*datastore.Key, 0, 100),\n\t\t}\n\t\tworker.c = tc\n\t\tworker.doImport()\n\t\treturn nil\n\t}, nil)\n}\n\nfunc (self *importer) doImport() {\n\t\/\/fmt.Fprintf(self.w, \"indexTags %v\\n\", self.allTags)\n\tself.indexCurrentTags()\n\t\/\/self.debugPrintTags()\n\t\/\/fmt.Fprintf(self.w, \"ingredients\\n\")\n\tself.importIngredients()\n\t\/\/fmt.Fprintf(self.w, \"Dishes\\n\")\n\tself.importDishes()\n\t\/\/self.debugPrintTags()\n\t\/\/fmt.Fprintf(self.w, \"MIs\\n\")\n\tself.importMeasuredIngredients()\n\t\/\/fmt.Fprintf(self.w, \"pairings\\n\")\n\tself.importPairings()\n\t\/\/fmt.Fprintf(self.w, \"menus\\n\")\n\tself.importMenus()\n\t\/\/fmt.Fprintf(self.w, \"tags\\n\")\n\t\/\/ add the tags we collected\n\t_, err := datastore.PutMulti(self.c, self.newTagKeys, self.newTags)\n\tcheck(err)\n}\n\nfunc (self *importer) indexCurrentTags() {\n\t\/\/ build an index of tags, (dish|ingredient)key -> tagstr -> tagkey\n\tquery := self.NewQuery(\"Tags\")\n\titer := query.Run(self.c)\n\tword := &Word{}\n\tfor key, err := iter.Next(word);\n\t\terr == nil;\n\t\tkey, err = iter.Next(word) {\n\t\tparent := key.Parent().Encode()\n\t\tvar m map[string]bool\n\t\tvar found bool\n\t\t\/\/fmt.Fprintf(self.w, \"indexTags parent %v %v\\n\", parent, word.Word)\n\t\tif m, found = self.allTags[parent]; !found {\n\t\t\t\/\/fmt.Fprintf(self.w, \"indexTags make\\n\")\n\t\t\tm = make(map[string]bool)\n\t\t\tself.allTags[parent] = m\n\t\t}\n\t\t\/\/fmt.Fprintf(self.w, \"indexTags m %v\\n\", m)\n\t\tm[word.Word] = true\n\t}\n}\n\nfunc (self *importer) debugPrintTags() {\n\tj, _ := json.MarshalIndent(self.allTags, \"\", \"\\t\")\n\tself.w.Write(j)\n}\n\nfunc (self *importer) restoreKey(encoded string, parent *datastore.Key) *datastore.Key {\n\tkey, err := datastore.DecodeKey(encoded)\n\tcheck(err)\n\tif newKey, found := self.fixUpKeys[encoded]; found {\n\t\treturn newKey\n\t}\n\tif !self.isInLibrary(key) {\n\t\tnewKey := datastore.NewIncompleteKey(self.c, key.Kind(), parent)\n\t\tself.fixUpKeys[encoded] = newKey\n\t\treturn newKey\n\t}\n\treturn key\n}\n\nfunc (self *importer) importIngredients() {\n\t\/\/ get the previously listed ingredients\n\t\/\/ build an index by name\n\tprevIngredientsByName := self.indexItems(self.NewQuery(\"Ingredient\"),\n\t\t&Ingredient{},\n\t\tfunc(key *datastore.Key, item interface{}) string {\n\t\t\treturn item.(*Ingredient).Name\n\t\t})\n\tputItems := make([]interface{}, 0, len(self.jsonData.Ingredients))\n\tputKeys := make([]*datastore.Key, 0, len(self.jsonData.Ingredients))\n\tputIds := make([]string, 0, len(self.jsonData.Ingredients))\n\n\t\/\/ prepare all the ingredients\n\tfor index, _ := range self.jsonData.Ingredients {\n\t\ti := &self.jsonData.Ingredients[index]\n\t\tid := i.Id\n\t\tkey := self.restoreKey(id, self.lid)\n\t\tif key.Incomplete() {\n\t\t\t\/\/ check if we have an item of the same name already\n\t\t\tif ikey, ok := prevIngredientsByName[i.Name]; ok {\n\t\t\t\tself.fixUpKeys[id] = ikey\n\t\t\t\tkey = ikey\n\t\t\t}\n\t\t}\n\t\ti.Id = \"\"\n\t\tputItems = append(putItems, i)\n\t\tputKeys = append(putKeys, key)\n\t\tputIds = append(putIds, id)\n\t}\n\t\/\/ put all the ingredients\n\toutKeys, err := datastore.PutMulti(self.c, putKeys, putItems)\n\tcheck(err)\n \/\/ update the fixUpKeys for any new items\n\tfor index, putKey := range putKeys {\n\t\tif putKey.Incomplete() {\n\t\t\tself.fixUpKeys[putIds[index]] = outKeys[index]\n\t\t}\n\t}\n\n\t\/\/ add tags\n\tself.importTags(putIds, outKeys)\n\t\/\/ update keywords\n\tfor index, _ := range putItems {\n\t\ting := putItems[index].(*Ingredient)\n\t\twords := make(map[string]bool)\n\t\taddWords(ing.Name, words)\n\t\taddWords(ing.Category, words)\n\t\tfor tag, _ := range self.allTags[outKeys[index].Encode()] {\n\t\t\taddWords(tag, words)\n\t\t}\n\t\tupdateKeywords(self.c, outKeys[index], words)\n\t}\n}\n\nfunc (self *importer) importDishes() {\n\t\/\/ get the previously listed dishes\n\t\/\/ build an index by name\n\tprevDishesByImportId := self.indexItems(self.NewQuery(\"Dish\"), &Dish{},\n\t\tfunc(key *datastore.Key, item interface{}) string {\n\t\t\treturn item.(*Dish).Id\n\t\t})\n\t\/\/ lists for dishes being written\n\tcount := len(self.jsonData.Dishes)\n\tputItems := make([]interface{}, 0, count)\n\tputKeys := make([]*datastore.Key, 0, count)\n\tputIds := make([]string, 0, count)\n\n\t\/\/ prepare all the dishes\n\tfor index, _ := range self.jsonData.Dishes {\n\t\ti := &self.jsonData.Dishes[index]\n\t\tid := i.Id\n\t\tkey := self.restoreKey(id, self.lid)\n\t\tif key.Incomplete() {\n\t\t\t\/\/ check if we have an item of the same name already\n\t\t\tif ikey, ok := prevDishesByImportId[id]; ok {\n\t\t\t\tself.fixUpKeys[id] = ikey\n\t\t\t\tkey = ikey\n\t\t\t}\n\t\t}\n\t\tputItems = append(putItems, i)\n\t\tputKeys = append(putKeys, key)\n\t\tputIds = append(putIds, id)\n\t}\n\t\/\/ put all the dishes\n\toutKeys, err := datastore.PutMulti(self.c, putKeys, putItems)\n\tcheck(err)\n \/\/ update the fixUpKeys for any new items\n\tfor index, putKey := range putKeys {\n\t\tif putKey.Incomplete() {\n\t\t\tself.fixUpKeys[putIds[index]] = outKeys[index]\n\t\t}\n\t}\n\n\t\/\/ add tags\n\tself.importTags(putIds, outKeys)\n\t\/\/ update keywords\n\tfor index, _ := range putItems {\n\t\tdish := putItems[index].(*Dish)\n\t\twords := make(map[string]bool)\n\t\taddWords(dish.Name, words)\n\t\taddWords(dish.Source, words)\n\t\tfor tag, _ := range self.allTags[outKeys[index].Encode()] {\n\t\t\taddWords(tag, words)\n\t\t}\n\t\tupdateKeywords(self.c, outKeys[index], words)\n\t}\n}\n\n\/\/jsonData.MeasuredIngredients map[string][]MeasuredIngredient\nfunc (self *importer) importMeasuredIngredients() {\n\tmiKeyFunc := func (key *datastore.Key, item interface{}) string {\n\t\t\t\treturn key.Parent().Encode() + item.(*MeasuredIngredient).Ingredient.Encode();\n\t\t\t}\n\t\/\/ index existing items by their parent dish and the ingredient\n\t\/\/ they reference\n\tprevMIs := self.indexItems(self.NewQuery(\"MeasuredIngredient\"),\n\t\t\t &MeasuredIngredient{}, miKeyFunc)\n\tcount := len(self.jsonData.MeasuredIngredients)\n\tputItems := make([]interface{}, 0, count)\n\tputKeys := make([]*datastore.Key, 0, count)\n\n\tfor dishId, jsonMis := range self.jsonData.MeasuredIngredients {\n\t\tdishKey := self.restoreKey(dishId, self.lid)\n\t\tdishKeyEncoded := dishKey.Encode()\n\t\tfor index, _ := range jsonMis {\n\t\t\tjsonMi := &jsonMis[index]\n\t\t\tmiKey := self.restoreKey(jsonMi.Id, dishKey)\n\t\t\tingKey := self.restoreKey(jsonMi.Ingredient.Encode(), self.lid)\n\t\t\tif miKey.Incomplete() {\n\t\t\t\tmiIndexKey := dishKeyEncoded + ingKey.Encode()\n\t\t\t\tif existingKey, found := prevMIs[miIndexKey]; found {\n\t\t\t\t\tmiKey = existingKey\n\t\t\t\t}\n\t\t\t}\n\t\t\tjsonMi.Ingredient = ingKey\n\t\t\tjsonMi.Id = \"\"\n\t\t\tputItems = append(putItems, jsonMi)\n\t\t\tputKeys = append(putKeys, miKey)\n\t\t}\n\t}\n\tif len(putKeys) > 0 {\n\t\t_, err := datastore.PutMulti(self.c, putKeys, putItems)\n\t\tcheck(err)\n\t}\n}\n\n\/\/jsonData.Pairings map[string][]Pairing\nfunc (self *importer) importPairings() {\n\tpairingKeyFunc := func (key *datastore.Key, item interface{}) string {\n\t\t\t\t\t return key.Parent().Encode() + item.(*Pairing).Other.Encode() + item.(*Pairing).Description;\n\t\t\t\t }\n\t\/\/ index existing items by their parent dish and the ingredient\n\t\/\/ they reference\n\tprevPairings := self.indexItems(self.NewQuery(\"Pairing\"),\n\t\t\t &Pairing{}, pairingKeyFunc)\n\tcount := len(self.jsonData.Pairings)\n\tputItems := make([]interface{}, 0, count)\n\tputKeys := make([]*datastore.Key, 0, count)\n\n\tfor dishId, jsonPairings := range self.jsonData.Pairings {\n\t\tdishKey := self.restoreKey(dishId, self.lid)\n\t\tdishKeyEncoded := dishKey.Encode()\n\t\tfor index, _ := range jsonPairings {\n\t\t\tjsonPairing := &jsonPairings[index]\n\t\t\tpairingKey := self.restoreKey(jsonPairing.Id, dishKey)\n\t\t\totherKey := self.restoreKey(jsonPairing.Other.Encode(), self.lid)\n\t\t\tpairingIndexKey := dishKeyEncoded + otherKey.Encode() + jsonPairing.Description\n\t\t\t\/\/ add the new pairing if it wasn't already present\n\t\t\tif _, found := prevPairings[pairingIndexKey]; !found {\n\t\t\t\tjsonPairing.Other = otherKey\n\t\t\t\tjsonPairing.Id = \"\"\n\t\t\t\tputItems = append(putItems, jsonPairing)\n\t\t\t\tputKeys = append(putKeys, pairingKey)\n\t\t\t}\n\t\t}\n\t}\n\tif len(putKeys) > 0 {\n\t\t_, err := datastore.PutMulti(self.c, putKeys, putItems)\n\t\tcheck(err)\n\t}\n}\n\nfunc (self *importer) importMenus() {\n\tmenuKeyFunc := func (key *datastore.Key, item interface{}) string {\n\t\t\t\t\t\t return item.(*Menu).Name\n\t\t\t\t\t }\n\t\/\/ index existing items by their name\n\tprevMenus := self.indexItems(self.NewQuery(\"Menu\"), &Menu{},\n\t\t\t\t\t\t menuKeyFunc)\n\tcount := len(self.jsonData.Menus)\n\tputItems := make([]interface{}, 0, count)\n\tputKeys := make([]*datastore.Key, 0, count)\n\tfor index, _ := range self.jsonData.Menus {\n\t\tjsonMenu := &self.jsonData.Menus[index]\n\t\tkey := self.restoreKey(jsonMenu.Id, self.lid)\n\t\tif key.Incomplete() {\n\t\t\tif existingKey, found := prevMenus[jsonMenu.Name]; found {\n\t\t\t\tkey = existingKey\n\t\t\t}\n\t\t}\n\t\tfor index, dishKey := range jsonMenu.Dishes {\n\t\t\tjsonMenu.Dishes[index] = self.restoreKey(dishKey.Encode(), self.lid)\n\t\t}\n\t\tjsonMenu.Id = \"\"\n\t\tputItems = append(putItems, jsonMenu)\n\t\tputKeys = append(putKeys, key)\n\t}\n\tif len(putKeys) > 0 {\n\t\t_, err := datastore.PutMulti(self.c, putKeys, putItems)\n\t\tcheck(err)\n\t}\n}\n\n\n\/\/ jsonData.Tags map[string][]Word\n\/\/ take a slice of ids as appeared in json and slice of\n\/\/ keys as now stored for items with tags\n\/\/ add any tags for these items that were in json but not yet stored\nfunc (self *importer) importTags (ids []string, keys []*datastore.Key) {\n\tfor index, parentKey := range keys {\n\t\tparentId := ids[index]\n\t\tdestId := keys[index].Encode()\n\t\tif importTags, ok := self.jsonData.Tags[parentId]; ok {\n\t\t\tvar myTags map[string]bool\n\t\t\t\/\/ if this item doesn't have a tags collection yet,\n\t\t\t\/\/ add it\n\t\t\tvar found bool\n\t\t\tif myTags, found = self.allTags[destId]; !found {\n\t\t\t\tmyTags = make(map[string]bool)\n\t\t\t\tself.allTags[destId] = myTags\n\t\t\t}\n\t\t\t\/\/ go through tags from json\n\t\t\tfor _, tag := range importTags {\n\t\t\t\tif _, found := myTags[tag.Word]; !found {\n\t\t\t\t\t\/\/ this tag doesn't exist yet, add to our list\n\t\t\t\t\tself.newTags = append(self.newTags, &Word{\"\",tag.Word})\n\t\t\t\t\tnewTagKey := datastore.NewIncompleteKey(self.c, \"Tags\", parentKey)\n\t\t\t\t\tself.newTagKeys = append(self.newTagKeys, newTagKey)\n\t\t\t\t\tmyTags[tag.Word] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (self *importer) indexItems(query *datastore.Query, item interface{},\n\tkeyFunc func (*datastore.Key, interface{}) string ) map[string]*datastore.Key {\n\tindex := make(map[string]*datastore.Key)\n\titer := query.Run(self.c)\n\tfor dataKey, err := iter.Next(item);\n\t\t\terr == nil;\n\t\t\tdataKey, err = iter.Next(item) {\n\t\tindexKey := keyFunc(dataKey, item)\n\t\tif len(indexKey) > 0 {\n\t\t\tindex[indexKey] = dataKey\n\t\t}\n\t}\n\treturn index\n}\n\n<|endoftext|>"} {"text":"<commit_before>package resource\n\nimport (\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\ttfjson \"github.com\/hashicorp\/terraform-json\"\n\ttftest \"github.com\/hashicorp\/terraform-plugin-test\/v2\"\n\ttesting \"github.com\/mitchellh\/go-testing-interface\"\n\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/terraform\"\n)\n\nfunc testStepNewConfig(t testing.T, c TestCase, wd *tftest.WorkingDir, step TestStep) error {\n\tt.Helper()\n\n\tspewConf := spew.NewDefaultConfig()\n\tspewConf.SortKeys = true\n\n\tvar idRefreshCheck *terraform.ResourceState\n\tidRefresh := c.IDRefreshName != \"\"\n\n\tif !step.Destroy {\n\t\tvar state *terraform.State\n\t\terr := runProviderCommand(t, func() error {\n\t\t\tstate = getState(t, wd)\n\t\t\treturn nil\n\t\t}, wd, c.ProviderFactories)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := testStepTaint(state, step); err != nil {\n\t\t\tt.Fatalf(\"Error when tainting resources: %s\", err)\n\t\t}\n\t}\n\n\twd.RequireSetConfig(t, step.Config)\n\n\t\/\/ require a refresh before applying\n\t\/\/ failing to do this will result in data sources not being updated\n\terr := runProviderCommand(t, func() error {\n\t\twd.RequireRefresh(t)\n\t\treturn nil\n\t}, wd, c.ProviderFactories)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !step.PlanOnly {\n\t\terr := runProviderCommand(t, func() error {\n\t\t\treturn wd.Apply()\n\t\t}, wd, c.ProviderFactories)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar state *terraform.State\n\t\terr = runProviderCommand(t, func() error {\n\t\t\tstate = getState(t, wd)\n\t\t\treturn nil\n\t\t}, wd, c.ProviderFactories)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif step.Check != nil {\n\t\t\tstate.IsBinaryDrivenTest = true\n\t\t\tif err := step.Check(state); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Test for perpetual diffs by performing a plan, a refresh, and another plan\n\n\t\/\/ do a plan\n\terr = runProviderCommand(t, func() error {\n\t\twd.RequireCreatePlan(t)\n\t\treturn nil\n\t}, wd, c.ProviderFactories)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar plan *tfjson.Plan\n\terr = runProviderCommand(t, func() error {\n\t\tplan = wd.RequireSavedPlan(t)\n\t\treturn nil\n\t}, wd, c.ProviderFactories)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !planIsEmpty(plan) {\n\t\tif step.ExpectNonEmptyPlan {\n\t\t\tt.Log(\"[INFO] Got non-empty plan, as expected\")\n\t\t} else {\n\n\t\t\tt.Fatalf(\"After applying this test step, the plan was not empty. %s\", spewConf.Sdump(plan))\n\t\t}\n\t}\n\n\t\/\/ do a refresh\n\tif !c.PreventPostDestroyRefresh {\n\t\terr := runProviderCommand(t, func() error {\n\t\t\twd.RequireRefresh(t)\n\t\t\treturn nil\n\t\t}, wd, c.ProviderFactories)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ do another plan\n\terr = runProviderCommand(t, func() error {\n\t\twd.RequireCreatePlan(t)\n\t\treturn nil\n\t}, wd, c.ProviderFactories)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = runProviderCommand(t, func() error {\n\t\tplan = wd.RequireSavedPlan(t)\n\t\treturn nil\n\t}, wd, c.ProviderFactories)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ check if plan is empty\n\tif !planIsEmpty(plan) {\n\t\tif step.ExpectNonEmptyPlan {\n\t\t\tt.Log(\"[INFO] Got non-empty plan, as expected\")\n\t\t} else {\n\n\t\t\tt.Fatalf(\"After applying this test step and performing a `terraform refresh`, the plan was not empty. %s\", spewConf.Sdump(plan))\n\t\t}\n\t}\n\n\t\/\/ ID-ONLY REFRESH\n\t\/\/ If we've never checked an id-only refresh and our state isn't\n\t\/\/ empty, find the first resource and test it.\n\tvar state *terraform.State\n\terr = runProviderCommand(t, func() error {\n\t\tstate = getState(t, wd)\n\t\treturn nil\n\t}, wd, c.ProviderFactories)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif idRefresh && idRefreshCheck == nil && !state.Empty() {\n\t\t\/\/ Find the first non-nil resource in the state\n\t\tfor _, m := range state.Modules {\n\t\t\tif len(m.Resources) > 0 {\n\t\t\t\tif v, ok := m.Resources[c.IDRefreshName]; ok {\n\t\t\t\t\tidRefreshCheck = v\n\t\t\t\t}\n\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t\/\/ If we have an instance to check for refreshes, do it\n\t\t\/\/ immediately. We do it in the middle of another test\n\t\t\/\/ because it shouldn't affect the overall state (refresh\n\t\t\/\/ is read-only semantically) and we want to fail early if\n\t\t\/\/ this fails. If refresh isn't read-only, then this will have\n\t\t\/\/ caught a different bug.\n\t\tif idRefreshCheck != nil {\n\t\t\tif err := testIDRefresh(c, t, wd, step, idRefreshCheck); err != nil {\n\t\t\t\tt.Fatalf(\n\t\t\t\t\t\"[ERROR] Test: ID-only test failed: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>Return config errors during TestSteps.<commit_after>package resource\n\nimport (\n\t\"github.com\/davecgh\/go-spew\/spew\"\n\ttfjson \"github.com\/hashicorp\/terraform-json\"\n\ttftest \"github.com\/hashicorp\/terraform-plugin-test\/v2\"\n\ttesting \"github.com\/mitchellh\/go-testing-interface\"\n\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/terraform\"\n)\n\nfunc testStepNewConfig(t testing.T, c TestCase, wd *tftest.WorkingDir, step TestStep) error {\n\tt.Helper()\n\n\tspewConf := spew.NewDefaultConfig()\n\tspewConf.SortKeys = true\n\n\tvar idRefreshCheck *terraform.ResourceState\n\tidRefresh := c.IDRefreshName != \"\"\n\n\tif !step.Destroy {\n\t\tvar state *terraform.State\n\t\terr := runProviderCommand(t, func() error {\n\t\t\tstate = getState(t, wd)\n\t\t\treturn nil\n\t\t}, wd, c.ProviderFactories)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := testStepTaint(state, step); err != nil {\n\t\t\tt.Fatalf(\"Error when tainting resources: %s\", err)\n\t\t}\n\t}\n\n\twd.RequireSetConfig(t, step.Config)\n\n\t\/\/ require a refresh before applying\n\t\/\/ failing to do this will result in data sources not being updated\n\terr := runProviderCommand(t, func() error {\n\t\treturn wd.Refresh()\n\t}, wd, c.ProviderFactories)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !step.PlanOnly {\n\t\terr := runProviderCommand(t, func() error {\n\t\t\treturn wd.Apply()\n\t\t}, wd, c.ProviderFactories)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar state *terraform.State\n\t\terr = runProviderCommand(t, func() error {\n\t\t\tstate = getState(t, wd)\n\t\t\treturn nil\n\t\t}, wd, c.ProviderFactories)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif step.Check != nil {\n\t\t\tstate.IsBinaryDrivenTest = true\n\t\t\tif err := step.Check(state); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Test for perpetual diffs by performing a plan, a refresh, and another plan\n\n\t\/\/ do a plan\n\terr = runProviderCommand(t, func() error {\n\t\treturn wd.CreatePlan()\n\t}, wd, c.ProviderFactories)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar plan *tfjson.Plan\n\terr = runProviderCommand(t, func() error {\n\t\tplan = wd.RequireSavedPlan(t)\n\t\treturn nil\n\t}, wd, c.ProviderFactories)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !planIsEmpty(plan) {\n\t\tif step.ExpectNonEmptyPlan {\n\t\t\tt.Log(\"[INFO] Got non-empty plan, as expected\")\n\t\t} else {\n\n\t\t\tt.Fatalf(\"After applying this test step, the plan was not empty. %s\", spewConf.Sdump(plan))\n\t\t}\n\t}\n\n\t\/\/ do a refresh\n\tif !c.PreventPostDestroyRefresh {\n\t\terr := runProviderCommand(t, func() error {\n\t\t\treturn wd.Refresh()\n\t\t}, wd, c.ProviderFactories)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ do another plan\n\terr = runProviderCommand(t, func() error {\n\t\treturn wd.CreatePlan()\n\t}, wd, c.ProviderFactories)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = runProviderCommand(t, func() error {\n\t\tplan = wd.RequireSavedPlan(t)\n\t\treturn nil\n\t}, wd, c.ProviderFactories)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ check if plan is empty\n\tif !planIsEmpty(plan) {\n\t\tif step.ExpectNonEmptyPlan {\n\t\t\tt.Log(\"[INFO] Got non-empty plan, as expected\")\n\t\t} else {\n\n\t\t\tt.Fatalf(\"After applying this test step and performing a `terraform refresh`, the plan was not empty. %s\", spewConf.Sdump(plan))\n\t\t}\n\t}\n\n\t\/\/ ID-ONLY REFRESH\n\t\/\/ If we've never checked an id-only refresh and our state isn't\n\t\/\/ empty, find the first resource and test it.\n\tvar state *terraform.State\n\terr = runProviderCommand(t, func() error {\n\t\tstate = getState(t, wd)\n\t\treturn nil\n\t}, wd, c.ProviderFactories)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif idRefresh && idRefreshCheck == nil && !state.Empty() {\n\t\t\/\/ Find the first non-nil resource in the state\n\t\tfor _, m := range state.Modules {\n\t\t\tif len(m.Resources) > 0 {\n\t\t\t\tif v, ok := m.Resources[c.IDRefreshName]; ok {\n\t\t\t\t\tidRefreshCheck = v\n\t\t\t\t}\n\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\t\/\/ If we have an instance to check for refreshes, do it\n\t\t\/\/ immediately. We do it in the middle of another test\n\t\t\/\/ because it shouldn't affect the overall state (refresh\n\t\t\/\/ is read-only semantically) and we want to fail early if\n\t\t\/\/ this fails. If refresh isn't read-only, then this will have\n\t\t\/\/ caught a different bug.\n\t\tif idRefreshCheck != nil {\n\t\t\tif err := testIDRefresh(c, t, wd, step, idRefreshCheck); err != nil {\n\t\t\t\tt.Fatalf(\n\t\t\t\t\t\"[ERROR] Test: ID-only test failed: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage lease_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/googlecloudplatform\/gcsfuse\/lease\"\n\t. \"github.com\/jacobsa\/ogletest\"\n)\n\nfunc TestFileLeaser(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Boilerplate\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nconst limitBytes = 17\n\ntype FileLeaserTest struct {\n\tfl *lease.FileLeaser\n}\n\nvar _ SetUpInterface = &FileLeaserTest{}\n\nfunc init() { RegisterTestSuite(&FileLeaserTest{}) }\n\nfunc (t *FileLeaserTest) SetUp(ti *TestInfo) {\n\tt.fl = lease.NewFileLeaser(\"\", limitBytes)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *FileLeaserTest) ReadWriteLeaseInitialState() {\n\tAssertFalse(true, \"TODO\")\n}\n\nfunc (t *FileLeaserTest) ModifyThenObserveReadWriteLease() {\n\tAssertFalse(true, \"TODO\")\n}\n\nfunc (t *FileLeaserTest) DowngradeThenObserve() {\n\tAssertFalse(true, \"TODO\")\n}\n\nfunc (t *FileLeaserTest) DowngradeThenUpgradeThenObserve() {\n\tAssertFalse(true, \"TODO\")\n}\n\nfunc (t *FileLeaserTest) DowngradeAboveCapacity() {\n\tAssertFalse(true, \"TODO\")\n}\n\nfunc (t *FileLeaserTest) WriteCausesEviction() {\n\tAssertFalse(true, \"TODO\")\n}\n\nfunc (t *FileLeaserTest) WriteAtCausesEviction() {\n\tAssertFalse(true, \"TODO\")\n}\n\nfunc (t *FileLeaserTest) TruncateCausesEviction() {\n\tAssertFalse(true, \"TODO\")\n}\n\nfunc (t *FileLeaserTest) EvictionIsLRU() {\n\tAssertFalse(true, \"TODO\")\n}\n\nfunc (t *FileLeaserTest) NothingAvailableToEvict() {\n\tAssertFalse(true, \"TODO\")\n}\n<commit_msg>FileLeaserTest.ReadWriteLeaseInitialState<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage lease_test\n\nimport (\n\t\"io\"\n\t\"testing\"\n\n\t\"github.com\/googlecloudplatform\/gcsfuse\/lease\"\n\t. \"github.com\/jacobsa\/ogletest\"\n)\n\nfunc TestFileLeaser(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Boilerplate\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nconst limitBytes = 17\n\ntype FileLeaserTest struct {\n\tfl *lease.FileLeaser\n}\n\nvar _ SetUpInterface = &FileLeaserTest{}\n\nfunc init() { RegisterTestSuite(&FileLeaserTest{}) }\n\nfunc (t *FileLeaserTest) SetUp(ti *TestInfo) {\n\tt.fl = lease.NewFileLeaser(\"\", limitBytes)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *FileLeaserTest) ReadWriteLeaseInitialState() {\n\trwl := t.fl.NewFile()\n\tbuf := make([]byte, 1024)\n\n\tvar n int\n\tvar off int64\n\tvar err error\n\n\t\/\/ Size\n\tsize, err := rwl.Size()\n\tAssertEq(nil, err)\n\tExpectEq(0, size)\n\n\t\/\/ Seek\n\toff, err = rwl.Seek(0, 2)\n\tAssertEq(nil, err)\n\tExpectEq(0, off)\n\n\t\/\/ Read\n\tn, err = rwl.Read(buf)\n\tExpectEq(io.EOF, err)\n\tExpectEq(0, n)\n\n\t\/\/ ReadAt\n\tn, err = rwl.ReadAt(buf, 0)\n\tExpectEq(io.EOF, err)\n\tExpectEq(0, n)\n}\n\nfunc (t *FileLeaserTest) ModifyThenObserveReadWriteLease() {\n\tAssertFalse(true, \"TODO\")\n}\n\nfunc (t *FileLeaserTest) DowngradeThenObserve() {\n\tAssertFalse(true, \"TODO\")\n}\n\nfunc (t *FileLeaserTest) DowngradeThenUpgradeThenObserve() {\n\tAssertFalse(true, \"TODO\")\n}\n\nfunc (t *FileLeaserTest) DowngradeAboveCapacity() {\n\tAssertFalse(true, \"TODO\")\n}\n\nfunc (t *FileLeaserTest) WriteCausesEviction() {\n\tAssertFalse(true, \"TODO\")\n}\n\nfunc (t *FileLeaserTest) WriteAtCausesEviction() {\n\tAssertFalse(true, \"TODO\")\n}\n\nfunc (t *FileLeaserTest) TruncateCausesEviction() {\n\tAssertFalse(true, \"TODO\")\n}\n\nfunc (t *FileLeaserTest) EvictionIsLRU() {\n\tAssertFalse(true, \"TODO\")\n}\n\nfunc (t *FileLeaserTest) NothingAvailableToEvict() {\n\tAssertFalse(true, \"TODO\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage lease\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n)\n\n\/\/ A read-write wrapper around a file. Unlike a read lease, this cannot be\n\/\/ revoked.\n\/\/\n\/\/ All methods are safe for concurrent access.\ntype ReadWriteLease interface {\n\t\/\/ Methods with semantics matching *os.File.\n\tio.ReadWriteSeeker\n\tio.ReaderAt\n\tio.WriterAt\n\tTruncate(size int64) (err error)\n\n\t\/\/ Return the current size of the underlying file.\n\tSize() (size int64, err error)\n\n\t\/\/ Downgrade to a read lease, releasing any resources pinned by this lease to\n\t\/\/ the pool that may be revoked, as with any read lease. After downgrading,\n\t\/\/ this lease must not be used again.\n\tDowngrade() (rl ReadLease)\n}\n\ntype readWriteLease struct {\n\tmu sync.Mutex\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Dependencies\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ The leaser that issued this lease.\n\tleaser *fileLeaser\n\n\t\/\/ The underlying file, set to nil once downgraded.\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\tfile *os.File\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Mutable state\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ The cumulative number of bytes we have reported to the leaser using\n\t\/\/ fileLeaser.addReadWriteByteDelta. When the size changes, we report the\n\t\/\/ difference between the new size and this value.\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\treportedSize int64\n}\n\nvar _ ReadWriteLease = &readWriteLease{}\n\n\/\/ size is the size that the leaser has already recorded for us.\nfunc newReadWriteLease(\n\tleaser *fileLeaser,\n\tsize int64,\n\tfile *os.File) (rwl *readWriteLease) {\n\trwl = &readWriteLease{\n\t\tleaser: leaser,\n\t\tfile: file,\n\t\treportedSize: size,\n\t}\n\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Public interface\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Read(p []byte) (n int, err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\tn, err = rwl.file.Read(p)\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Write(p []byte) (n int, err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\t\/\/ Ensure that we reconcile our size when we're done.\n\tdefer rwl.reconcileSize()\n\n\t\/\/ Call through.\n\tn, err = rwl.file.Write(p)\n\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Seek(\n\toffset int64,\n\twhence int) (off int64, err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\toff, err = rwl.file.Seek(offset, whence)\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) ReadAt(p []byte, off int64) (n int, err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\tn, err = rwl.file.ReadAt(p, off)\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) WriteAt(p []byte, off int64) (n int, err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\t\/\/ Ensure that we reconcile our size when we're done.\n\tdefer rwl.reconcileSize()\n\n\t\/\/ Call through.\n\tn, err = rwl.file.WriteAt(p, off)\n\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Truncate(size int64) (err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\t\/\/ Ensure that we reconcile our size when we're done.\n\tdefer rwl.reconcileSize()\n\n\t\/\/ Call through.\n\terr = rwl.file.Truncate(size)\n\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Size() (size int64, err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\tsize, err = rwl.sizeLocked()\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Downgrade() (rl ReadLease) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\t\/\/ Ensure that we will crash if used again.\n\tf := rwl.file\n\trwl.file = nil\n\n\t\/\/ On error, log an error then return a read lease that looks like it was\n\t\/\/ born revoked.\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error downgrading: %v\", err)\n\t\t\trl = &alwaysRevokedReadLease{}\n\t\t}\n\t}()\n\n\t\/\/ Find the current size under the lock.\n\tsize, err := rwl.sizeLocked()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"sizeLocked: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Call the leaser.\n\trl = rwl.leaser.downgrade(rwl, size, f)\n\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ LOCKS_REQUIRED(rwl.mu)\nfunc (rwl *readWriteLease) sizeLocked() (size int64, err error) {\n\t\/\/ Stat the file to get its size.\n\tfi, err := rwl.file.Stat()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Stat: %v\", err)\n\t\treturn\n\t}\n\n\tsize = fi.Size()\n\treturn\n}\n\n\/\/ Notify the leaser if our size has changed. Log errors when we fail to find\n\/\/ our size.\n\/\/\n\/\/ LOCKS_REQUIRED(rwl.mu)\n\/\/ LOCKS_EXCLUDED(rwl.leaser.mu)\nfunc (rwl *readWriteLease) reconcileSize() {\n\tvar err error\n\n\t\/\/ Find our size.\n\tsize, err := rwl.sizeLocked()\n\tif err != nil {\n\t\tlog.Println(\"Error getting size for reconciliation:\", err)\n\t\treturn\n\t}\n\n\t\/\/ Let the leaser know about any change.\n\tdelta := size - rwl.reportedSize\n\tif delta != 0 {\n\t\trwl.leaser.addReadWriteByteDelta(delta)\n\t\trwl.reportedSize = size\n\t}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ alwaysRevokedReadLease\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype alwaysRevokedReadLease struct {\n\tsize int64\n}\n\nfunc (rl *alwaysRevokedReadLease) Read(p []byte) (n int, err error) {\n\terr = &RevokedError{}\n\treturn\n}\n\nfunc (rl *alwaysRevokedReadLease) Seek(\n\toffset int64,\n\twhence int) (off int64, err error) {\n\terr = &RevokedError{}\n\treturn\n}\n\nfunc (rl *alwaysRevokedReadLease) ReadAt(\n\tp []byte, off int64) (n int, err error) {\n\terr = &RevokedError{}\n\treturn\n}\n\nfunc (rl *alwaysRevokedReadLease) Size() (size int64) {\n\tsize = rl.size\n\treturn\n}\n\nfunc (rl *alwaysRevokedReadLease) Revoked() (revoked bool) {\n\trevoked = true\n\treturn\n}\n\nfunc (rl *alwaysRevokedReadLease) Upgrade() (rwl ReadWriteLease, err error) {\n\terr = &RevokedError{}\n\treturn\n}\n\nfunc (rl *alwaysRevokedReadLease) Revoke() {\n}\n<commit_msg>Fixed a bug.<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage lease\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n)\n\n\/\/ A read-write wrapper around a file. Unlike a read lease, this cannot be\n\/\/ revoked.\n\/\/\n\/\/ All methods are safe for concurrent access.\ntype ReadWriteLease interface {\n\t\/\/ Methods with semantics matching *os.File.\n\tio.ReadWriteSeeker\n\tio.ReaderAt\n\tio.WriterAt\n\tTruncate(size int64) (err error)\n\n\t\/\/ Return the current size of the underlying file.\n\tSize() (size int64, err error)\n\n\t\/\/ Downgrade to a read lease, releasing any resources pinned by this lease to\n\t\/\/ the pool that may be revoked, as with any read lease. After downgrading,\n\t\/\/ this lease must not be used again.\n\tDowngrade() (rl ReadLease)\n}\n\ntype readWriteLease struct {\n\tmu sync.Mutex\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Dependencies\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ The leaser that issued this lease.\n\tleaser *fileLeaser\n\n\t\/\/ The underlying file, set to nil once downgraded.\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\tfile *os.File\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Mutable state\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ The cumulative number of bytes we have reported to the leaser using\n\t\/\/ fileLeaser.addReadWriteByteDelta. When the size changes, we report the\n\t\/\/ difference between the new size and this value.\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\treportedSize int64\n}\n\nvar _ ReadWriteLease = &readWriteLease{}\n\n\/\/ size is the size that the leaser has already recorded for us.\nfunc newReadWriteLease(\n\tleaser *fileLeaser,\n\tsize int64,\n\tfile *os.File) (rwl *readWriteLease) {\n\trwl = &readWriteLease{\n\t\tleaser: leaser,\n\t\tfile: file,\n\t\treportedSize: size,\n\t}\n\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Public interface\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Read(p []byte) (n int, err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\tn, err = rwl.file.Read(p)\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Write(p []byte) (n int, err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\t\/\/ Ensure that we reconcile our size when we're done.\n\tdefer rwl.reconcileSize()\n\n\t\/\/ Call through.\n\tn, err = rwl.file.Write(p)\n\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Seek(\n\toffset int64,\n\twhence int) (off int64, err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\toff, err = rwl.file.Seek(offset, whence)\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) ReadAt(p []byte, off int64) (n int, err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\tn, err = rwl.file.ReadAt(p, off)\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) WriteAt(p []byte, off int64) (n int, err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\t\/\/ Ensure that we reconcile our size when we're done.\n\tdefer rwl.reconcileSize()\n\n\t\/\/ Call through.\n\tn, err = rwl.file.WriteAt(p, off)\n\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Truncate(size int64) (err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\t\/\/ Ensure that we reconcile our size when we're done.\n\tdefer rwl.reconcileSize()\n\n\t\/\/ Call through.\n\terr = rwl.file.Truncate(size)\n\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Size() (size int64, err error) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\tsize, err = rwl.sizeLocked()\n\treturn\n}\n\n\/\/ LOCKS_EXCLUDED(rwl.mu)\nfunc (rwl *readWriteLease) Downgrade() (rl ReadLease) {\n\trwl.mu.Lock()\n\tdefer rwl.mu.Unlock()\n\n\t\/\/ Ensure that we will crash if used again.\n\tdefer func() {\n\t\trwl.file = nil\n\t}()\n\n\t\/\/ On error, log an error then return a read lease that looks like it was\n\t\/\/ born revoked.\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error downgrading: %v\", err)\n\t\t\trl = &alwaysRevokedReadLease{}\n\t\t}\n\t}()\n\n\t\/\/ Find the current size under the lock.\n\tsize, err := rwl.sizeLocked()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"sizeLocked: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Call the leaser.\n\trl = rwl.leaser.downgrade(rwl, size, rwl.file)\n\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ LOCKS_REQUIRED(rwl.mu)\nfunc (rwl *readWriteLease) sizeLocked() (size int64, err error) {\n\t\/\/ Stat the file to get its size.\n\tfi, err := rwl.file.Stat()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Stat: %v\", err)\n\t\treturn\n\t}\n\n\tsize = fi.Size()\n\treturn\n}\n\n\/\/ Notify the leaser if our size has changed. Log errors when we fail to find\n\/\/ our size.\n\/\/\n\/\/ LOCKS_REQUIRED(rwl.mu)\n\/\/ LOCKS_EXCLUDED(rwl.leaser.mu)\nfunc (rwl *readWriteLease) reconcileSize() {\n\tvar err error\n\n\t\/\/ Find our size.\n\tsize, err := rwl.sizeLocked()\n\tif err != nil {\n\t\tlog.Println(\"Error getting size for reconciliation:\", err)\n\t\treturn\n\t}\n\n\t\/\/ Let the leaser know about any change.\n\tdelta := size - rwl.reportedSize\n\tif delta != 0 {\n\t\trwl.leaser.addReadWriteByteDelta(delta)\n\t\trwl.reportedSize = size\n\t}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ alwaysRevokedReadLease\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype alwaysRevokedReadLease struct {\n\tsize int64\n}\n\nfunc (rl *alwaysRevokedReadLease) Read(p []byte) (n int, err error) {\n\terr = &RevokedError{}\n\treturn\n}\n\nfunc (rl *alwaysRevokedReadLease) Seek(\n\toffset int64,\n\twhence int) (off int64, err error) {\n\terr = &RevokedError{}\n\treturn\n}\n\nfunc (rl *alwaysRevokedReadLease) ReadAt(\n\tp []byte, off int64) (n int, err error) {\n\terr = &RevokedError{}\n\treturn\n}\n\nfunc (rl *alwaysRevokedReadLease) Size() (size int64) {\n\tsize = rl.size\n\treturn\n}\n\nfunc (rl *alwaysRevokedReadLease) Revoked() (revoked bool) {\n\trevoked = true\n\treturn\n}\n\nfunc (rl *alwaysRevokedReadLease) Upgrade() (rwl ReadWriteLease, err error) {\n\terr = &RevokedError{}\n\treturn\n}\n\nfunc (rl *alwaysRevokedReadLease) Revoke() {\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Sporting Exchange Limited. All rights reserved.\n\/\/ Use of this source code is governed by a free license that can be\n\/\/ found in the LICENSE file.\n\npackage nitro\n\nimport (\n\t\"crypto\/sha1\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"expvar\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"path\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ maxTrips limits the number of data round trips to the Nitro API on behalf of\n\/\/ single call. In the extreme case, a total of 2*maxTrips will be made: two\n\/\/ data round trips, and two corresponding cookie refresh trips.\nconst maxTrips = 2\n\n\/\/ responseTimeout limits the wait time for response of single HTTP round trip.\nconst responseTimeout = 5 * time.Second\n\n\/\/ MaxConnsPerHost limits the number of connections simultaneously established\n\/\/ by Client. The limit exists to help avoid nitro error 446 (Connection limit\n\/\/ to CFE exceeded).\nconst MaxConnsPerHost = 16\n\n\/\/ Nitro-level status codes\nconst (\n\tstatusOK = 0\n\tstatusAuth1 = 354 \/\/ Invalid username or password\n\tstatusAuth2 = 444 \/\/ Session expired or killed. Please login again\n\tstatusAuth3 = 2138 \/\/ Not authorized to execute this command\n)\n\n\/\/ Statistics\nvar (\n\tstatClientRequests = expvar.NewMap(\"nitro.client.Requests\")\n\tstatClientResponses = expvar.NewMap(\"nitro.client.Responses\")\n\tstatClientErrors = expvar.NewMap(\"nitro.client.Errors\")\n\tstatClientMillis = expvar.NewMap(\"nitro.client.Millis\")\n)\n\n\/\/ Client wraps http.Client in order to provide access to the Nitro API.\ntype Client struct {\n\tclient *http.Client\n\tconn connLimit\n\taddr string\n\tauth struct {\n\t\tusername string\n\t\tpassword string\n\t\tcookie cookie\n\t}\n\n\tConfig ConfigService\n\tStat StatService\n}\n\n\/\/ NewClient returns Nitro API client that will request all resources from addr\n\/\/ using the provided HTTP client.\n\/\/\n\/\/ For maximum efficiency, pass custom http.Client with http.Transport's\n\/\/ MaxIdleConnsPerHost set to MaxConnsPerHost. If nil client is passed, such\n\/\/ optimal client will be allocated automatically.\n\/\/\n\/\/ The client remains valid for use despite any errors encountered.\nfunc NewClient(client *http.Client, addr, username, password string) *Client {\n\tif client == nil {\n\t\tclient = &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tMaxIdleConnsPerHost: MaxConnsPerHost,\n\t\t\t},\n\t\t}\n\t}\n\tc := &Client{\n\t\tclient: client,\n\t\tconn: make(connLimit, MaxConnsPerHost),\n\t\taddr: addr,\n\t}\n\tc.Stat.client = c\n\tc.Config.client = c\n\tc.auth.username = username\n\tc.auth.password = password\n\treturn c\n}\n\n\/\/ Close releases all resources held by the Client.\nfunc (c *Client) Close() {\n\tc.client.Transport.(*http.Transport).CloseIdleConnections()\n}\n\nfunc (c *Client) url() string {\n\treturn fmt.Sprintf(\"https:\/\/%s\/nitro\/v1\", c.addr)\n}\n\nfunc (c *Client) do(r response, path string) (err error) {\n\tstatKey := fmt.Sprintf(\"addr=%s path=%s\", c.addr, cleanPath(path))\n\tstart := time.Now()\n\tdefer func() {\n\t\tstatKey += fmt.Sprintf(\" error=%v\", err != nil)\n\t\tstatClientResponses.Add(statKey, 1)\n\t\tstatClientMillis.Add(statKey, time.Since(start).Nanoseconds()\/1e6)\n\t}()\n\n\tvar errors []error\n\n\tfor i := 0; i < maxTrips; i++ {\n\t\tif err := c.auth.cookie.Refresh(c); err != nil {\n\t\t\terrors = append(errors, err)\n\t\t\tbreak\n\t\t}\n\t\treq := newRequest(c.url(), path, c.auth.cookie.Get())\n\t\tif err := c.roundtrip(r, req); err != nil {\n\t\t\terrors = append(errors, err)\n\t\t\tif _, ok := err.(authError); ok {\n\t\t\t\tlog.Printf(\"nitro: attempting cookie refresh due to authentication error: %q\", err)\n\t\t\t\tc.auth.cookie.Reset()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\terrors = nil\n\t\tbreak\n\t}\n\n\tif len(errors) > 0 {\n\t\terr := fmt.Errorf(\"request error: %v\", join(errors))\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *Client) roundtrip(r response, req *http.Request) error {\n\tc.conn.Wait()\n\tdefer c.conn.Done()\n\n\trespChan := make(chan httpResponse, 1)\n\n\tgo func() {\n\t\tresp, err := c.client.Do(req)\n\t\trespChan <- httpResponse{resp, err}\n\t}()\n\n\tvar resp httpResponse\n\tselect {\n\tcase <-time.After(responseTimeout):\n\t\tc.client.Transport.(*http.Transport).CancelRequest(req)\n\t\tstatClientErrors.Add(\"type=httpTimeout\", 1)\n\t\treturn fmt.Errorf(\"Get %s: response timeout\", req.URL)\n\n\tcase resp = <-respChan:\n\t\t\/\/ ok\n\t}\n\n\tif resp.error != nil {\n\t\tstatClientErrors.Add(\"type=httpTransport\", 1)\n\t\treturn resp.error\n\t}\n\n\tdefer func() {\n\t\tioutil.ReadAll(resp.Body)\n\t\tresp.Body.Close()\n\t}()\n\n\tswitch code := resp.StatusCode; code {\n\tdefault:\n\t\terr := fmt.Errorf(\"got http code %d (%s)\", code, http.StatusText(code))\n\t\tstatClientErrors.Add(\"type=httpStatus\", 1)\n\t\treturn err\n\n\tcase http.StatusUnauthorized:\n\t\terr := fmt.Errorf(\"got http code %d (%s)\", code, http.StatusText(code))\n\t\tstatClientErrors.Add(\"type=httpStatus\", 1)\n\t\treturn authError{err}\n\n\tcase http.StatusOK:\n\t\t\/\/ ok\n\n\tcase http.StatusCreated:\n\t\t\/\/ ok\n\t}\n\n\treader := errorChecker{Reader: resp.Body}\n\tif err := json.NewDecoder(&reader).Decode(r); err != nil {\n\t\tif reader.Err {\n\t\t\tstatClientErrors.Add(\"type=httpTransport\", 1)\n\t\t} else {\n\t\t\tstatClientErrors.Add(\"type=JSON\", 1)\n\t\t}\n\t\treturn err\n\t}\n\n\tswitch code := r.errorCode(); code {\n\tdefault:\n\t\terr := fmt.Errorf(\"Get %s: server error: %s (code %d)\",\n\t\t\tresp.Request.URL, r.message(), code)\n\t\tstatClientErrors.Add(\"type=Server\", 1)\n\t\treturn err\n\n\tcase statusAuth1, statusAuth2, statusAuth3:\n\t\terr := fmt.Errorf(\"Get %s: server auth error: %s (code %d)\",\n\t\t\tresp.Request.URL, r.message(), code)\n\t\tstatClientErrors.Add(\"type=ServerAuth\", 1)\n\t\treturn authError{err}\n\n\tcase statusOK:\n\t\t\/\/ ok\n\t}\n\n\treturn nil\n}\n\ntype cookie struct {\n\t*http.Cookie\n\tmu sync.Mutex\n}\n\nfunc (c *cookie) Get() *http.Cookie {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\treturn c.Cookie\n}\n\nfunc (c *cookie) Reset() {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tc.Cookie = nil\n}\n\nfunc (c *cookie) Refresh(client *Client) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tif c.Cookie != nil {\n\t\treturn nil\n\t}\n\tcookie, err := newCookie(client)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cookie refresh error: %v\", err)\n\t}\n\tc.Cookie = cookie\n\tlog.Printf(\"nitro: cookie refresh hash=%s\", c)\n\treturn nil\n}\n\n\/\/ String returns a crypto hash of the cookie, which prevents leaking auth\n\/\/ secret via the log file.\nfunc (c *cookie) String() string {\n\tin := []byte(c.Cookie.String())\n\tout := sha1.Sum(in)\n\treturn hex.EncodeToString(out[:])\n}\n\nfunc newCookie(c *Client) (*http.Cookie, error) {\n\tvar r responseSessionID\n\treq := newSessionRequest(c.url(), c.auth.username, c.auth.password)\n\tif err := c.roundtrip(&r, req); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &http.Cookie{Name: \"sessionid\", Value: r.SessionID}, nil\n}\n\nfunc newSessionRequest(url, username, password string) *http.Request {\n\tdumper := &requestDumper{}\n\tclient := &http.Client{Transport: dumper}\n\tclient.PostForm(url, newSessionForm(username, password))\n\treturn dumper.req\n}\n\nfunc newSessionForm(username, password string) url.Values {\n\tvar data struct {\n\t\tLogin struct {\n\t\t\tUsername string `json:\"username\"`\n\t\t\tPassword string `json:\"password\"`\n\t\t} `json:\"login\"`\n\t}\n\tdata.Login.Username = username\n\tdata.Login.Password = password\n\tbuf, err := json.Marshal(data)\n\tif err != nil {\n\t\tlog.Panicf(\"internal error: %v\", err)\n\t}\n\treturn url.Values{\"object\": []string{string(buf)}}\n}\n\nfunc newRequest(url, path string, cookie *http.Cookie) *http.Request {\n\treq, err := http.NewRequest(\"GET\", fmt.Sprintf(\"%s\/%s\", url, path), nil)\n\tif err != nil {\n\t\tlog.Panicf(\"internal error: %v\", err)\n\t}\n\treq.AddCookie(cookie)\n\treturn req\n}\n\n\/\/ cleanPath ensures path p is safe for use as expvar tag value.\nfunc cleanPath(p string) string {\n\tswitch {\n\tdefault:\n\t\treturn p\n\n\tcase strings.HasPrefix(p, \"config\/\"):\n\t\treturn path.Dir(p)\n\t}\n}\n\n\/\/ connLimit is a semaphore used to limit the number of active connections.\ntype connLimit chan bool\n\nfunc (cl connLimit) Wait() { cl <- true }\nfunc (cl connLimit) Done() { <-cl }\n\ntype authError struct {\n\terror\n}\n\ntype httpResponse struct {\n\t*http.Response\n\terror\n}\n\n\/\/ requestDumper is a http.RoundTripper that can be used to intercept\n\/\/ requests created by http.Client.\ntype requestDumper struct {\n\treq *http.Request\n}\n\nfunc (rd *requestDumper) RoundTrip(req *http.Request) (*http.Response, error) {\n\trd.req = req\n\treturn nil, errors.New(\"dummy transport\")\n}\n\n\/\/ join combines multiple errors.\nfunc join(errors []error) error {\n\tvar s []string\n\tfor _, err := range errors {\n\t\ts = append(s, err.Error())\n\t}\n\terr := fmt.Errorf(\"%s\", strings.Join(s, \": \"))\n\treturn err\n}\n\n\/\/ errorChecker sets Err to true if the underlying io.Reader encountered an\n\/\/ error.\ntype errorChecker struct {\n\tio.Reader\n\tErr bool\n}\n\nfunc (r *errorChecker) Read(b []byte) (int, error) {\n\tn, err := r.Reader.Read(b)\n\tif err != nil {\n\t\tr.Err = true\n\t}\n\treturn n, err\n}\n<commit_msg>collect-netscaler: change auth URL<commit_after>\/\/ Copyright 2014 The Sporting Exchange Limited. All rights reserved.\n\/\/ Use of this source code is governed by a free license that can be\n\/\/ found in the LICENSE file.\n\npackage nitro\n\nimport (\n\t\"crypto\/sha1\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"expvar\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"path\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ maxTrips limits the number of data round trips to the Nitro API on behalf of\n\/\/ single call. In the extreme case, a total of 2*maxTrips will be made: two\n\/\/ data round trips, and two corresponding cookie refresh trips.\nconst maxTrips = 2\n\n\/\/ responseTimeout limits the wait time for response of single HTTP round trip.\nconst responseTimeout = 5 * time.Second\n\n\/\/ MaxConnsPerHost limits the number of connections simultaneously established\n\/\/ by Client. The limit exists to help avoid nitro error 446 (Connection limit\n\/\/ to CFE exceeded).\nconst MaxConnsPerHost = 16\n\n\/\/ Nitro-level status codes\nconst (\n\tstatusOK = 0\n\tstatusAuth1 = 354 \/\/ Invalid username or password\n\tstatusAuth2 = 444 \/\/ Session expired or killed. Please login again\n\tstatusAuth3 = 2138 \/\/ Not authorized to execute this command\n)\n\n\/\/ Statistics\nvar (\n\tstatClientRequests = expvar.NewMap(\"nitro.client.Requests\")\n\tstatClientResponses = expvar.NewMap(\"nitro.client.Responses\")\n\tstatClientErrors = expvar.NewMap(\"nitro.client.Errors\")\n\tstatClientMillis = expvar.NewMap(\"nitro.client.Millis\")\n)\n\n\/\/ Client wraps http.Client in order to provide access to the Nitro API.\ntype Client struct {\n\tclient *http.Client\n\tconn connLimit\n\taddr string\n\tauth struct {\n\t\tusername string\n\t\tpassword string\n\t\tcookie cookie\n\t}\n\n\tConfig ConfigService\n\tStat StatService\n}\n\n\/\/ NewClient returns Nitro API client that will request all resources from addr\n\/\/ using the provided HTTP client.\n\/\/\n\/\/ For maximum efficiency, pass custom http.Client with http.Transport's\n\/\/ MaxIdleConnsPerHost set to MaxConnsPerHost. If nil client is passed, such\n\/\/ optimal client will be allocated automatically.\n\/\/\n\/\/ The client remains valid for use despite any errors encountered.\nfunc NewClient(client *http.Client, addr, username, password string) *Client {\n\tif client == nil {\n\t\tclient = &http.Client{\n\t\t\tTransport: &http.Transport{\n\t\t\t\tMaxIdleConnsPerHost: MaxConnsPerHost,\n\t\t\t},\n\t\t}\n\t}\n\tc := &Client{\n\t\tclient: client,\n\t\tconn: make(connLimit, MaxConnsPerHost),\n\t\taddr: addr,\n\t}\n\tc.Stat.client = c\n\tc.Config.client = c\n\tc.auth.username = username\n\tc.auth.password = password\n\treturn c\n}\n\n\/\/ Close releases all resources held by the Client.\nfunc (c *Client) Close() {\n\tc.client.Transport.(*http.Transport).CloseIdleConnections()\n}\n\nfunc (c *Client) url() string {\n\treturn fmt.Sprintf(\"https:\/\/%s\/nitro\/v1\", c.addr)\n}\n\nfunc (c *Client) do(r response, path string) (err error) {\n\tstatKey := fmt.Sprintf(\"addr=%s path=%s\", c.addr, cleanPath(path))\n\tstart := time.Now()\n\tdefer func() {\n\t\tstatKey += fmt.Sprintf(\" error=%v\", err != nil)\n\t\tstatClientResponses.Add(statKey, 1)\n\t\tstatClientMillis.Add(statKey, time.Since(start).Nanoseconds()\/1e6)\n\t}()\n\n\tvar errors []error\n\n\tfor i := 0; i < maxTrips; i++ {\n\t\tif err := c.auth.cookie.Refresh(c); err != nil {\n\t\t\terrors = append(errors, err)\n\t\t\tbreak\n\t\t}\n\t\treq := newRequest(c.url(), path, c.auth.cookie.Get())\n\t\tif err := c.roundtrip(r, req); err != nil {\n\t\t\terrors = append(errors, err)\n\t\t\tif _, ok := err.(authError); ok {\n\t\t\t\tlog.Printf(\"nitro: attempting cookie refresh due to authentication error: %q\", err)\n\t\t\t\tc.auth.cookie.Reset()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\terrors = nil\n\t\tbreak\n\t}\n\n\tif len(errors) > 0 {\n\t\terr := fmt.Errorf(\"request error: %v\", join(errors))\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *Client) roundtrip(r response, req *http.Request) error {\n\tc.conn.Wait()\n\tdefer c.conn.Done()\n\n\trespChan := make(chan httpResponse, 1)\n\n\tgo func() {\n\t\tresp, err := c.client.Do(req)\n\t\trespChan <- httpResponse{resp, err}\n\t}()\n\n\tvar resp httpResponse\n\tselect {\n\tcase <-time.After(responseTimeout):\n\t\tc.client.Transport.(*http.Transport).CancelRequest(req)\n\t\tstatClientErrors.Add(\"type=httpTimeout\", 1)\n\t\treturn fmt.Errorf(\"Get %s: response timeout\", req.URL)\n\n\tcase resp = <-respChan:\n\t\t\/\/ ok\n\t}\n\n\tif resp.error != nil {\n\t\tstatClientErrors.Add(\"type=httpTransport\", 1)\n\t\treturn resp.error\n\t}\n\n\tdefer func() {\n\t\tioutil.ReadAll(resp.Body)\n\t\tresp.Body.Close()\n\t}()\n\n\tswitch code := resp.StatusCode; code {\n\tdefault:\n\t\terr := fmt.Errorf(\"got http code %d (%s)\", code, http.StatusText(code))\n\t\tstatClientErrors.Add(\"type=httpStatus\", 1)\n\t\treturn err\n\n\tcase http.StatusUnauthorized:\n\t\terr := fmt.Errorf(\"got http code %d (%s)\", code, http.StatusText(code))\n\t\tstatClientErrors.Add(\"type=httpStatus\", 1)\n\t\treturn authError{err}\n\n\tcase http.StatusOK:\n\t\t\/\/ ok\n\n\tcase http.StatusCreated:\n\t\t\/\/ ok\n\t}\n\n\treader := errorChecker{Reader: resp.Body}\n\tif err := json.NewDecoder(&reader).Decode(r); err != nil {\n\t\tif reader.Err {\n\t\t\tstatClientErrors.Add(\"type=httpTransport\", 1)\n\t\t} else {\n\t\t\tstatClientErrors.Add(\"type=JSON\", 1)\n\t\t}\n\t\treturn err\n\t}\n\n\tswitch code := r.errorCode(); code {\n\tdefault:\n\t\terr := fmt.Errorf(\"Get %s: server error: %s (code %d)\",\n\t\t\tresp.Request.URL, r.message(), code)\n\t\tstatClientErrors.Add(\"type=Server\", 1)\n\t\treturn err\n\n\tcase statusAuth1, statusAuth2, statusAuth3:\n\t\terr := fmt.Errorf(\"Get %s: server auth error: %s (code %d)\",\n\t\t\tresp.Request.URL, r.message(), code)\n\t\tstatClientErrors.Add(\"type=ServerAuth\", 1)\n\t\treturn authError{err}\n\n\tcase statusOK:\n\t\t\/\/ ok\n\t}\n\n\treturn nil\n}\n\ntype cookie struct {\n\t*http.Cookie\n\tmu sync.Mutex\n}\n\nfunc (c *cookie) Get() *http.Cookie {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\treturn c.Cookie\n}\n\nfunc (c *cookie) Reset() {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tc.Cookie = nil\n}\n\nfunc (c *cookie) Refresh(client *Client) error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tif c.Cookie != nil {\n\t\treturn nil\n\t}\n\tcookie, err := newCookie(client)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cookie refresh error: %v\", err)\n\t}\n\tc.Cookie = cookie\n\tlog.Printf(\"nitro: cookie refresh hash=%s\", c)\n\treturn nil\n}\n\n\/\/ String returns a crypto hash of the cookie, which prevents leaking auth\n\/\/ secret via the log file.\nfunc (c *cookie) String() string {\n\tin := []byte(c.Cookie.String())\n\tout := sha1.Sum(in)\n\treturn hex.EncodeToString(out[:])\n}\n\nfunc newCookie(c *Client) (*http.Cookie, error) {\n\tvar r responseSessionID\n\treq := newSessionRequest(c.url()+\"\/config\/login\", c.auth.username, c.auth.password)\n\tif err := c.roundtrip(&r, req); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &http.Cookie{Name: \"sessionid\", Value: r.SessionID}, nil\n}\n\nfunc newSessionRequest(url, username, password string) *http.Request {\n\tdumper := &requestDumper{}\n\tclient := &http.Client{Transport: dumper}\n\tclient.PostForm(url, newSessionForm(username, password))\n\treturn dumper.req\n}\n\nfunc newSessionForm(username, password string) url.Values {\n\tvar data struct {\n\t\tLogin struct {\n\t\t\tUsername string `json:\"username\"`\n\t\t\tPassword string `json:\"password\"`\n\t\t} `json:\"login\"`\n\t}\n\tdata.Login.Username = username\n\tdata.Login.Password = password\n\tbuf, err := json.Marshal(data)\n\tif err != nil {\n\t\tlog.Panicf(\"internal error: %v\", err)\n\t}\n\treturn url.Values{\"object\": []string{string(buf)}}\n}\n\nfunc newRequest(url, path string, cookie *http.Cookie) *http.Request {\n\treq, err := http.NewRequest(\"GET\", fmt.Sprintf(\"%s\/%s\", url, path), nil)\n\tif err != nil {\n\t\tlog.Panicf(\"internal error: %v\", err)\n\t}\n\treq.AddCookie(cookie)\n\treturn req\n}\n\n\/\/ cleanPath ensures path p is safe for use as expvar tag value.\nfunc cleanPath(p string) string {\n\tswitch {\n\tdefault:\n\t\treturn p\n\n\tcase strings.HasPrefix(p, \"config\/\"):\n\t\treturn path.Dir(p)\n\t}\n}\n\n\/\/ connLimit is a semaphore used to limit the number of active connections.\ntype connLimit chan bool\n\nfunc (cl connLimit) Wait() { cl <- true }\nfunc (cl connLimit) Done() { <-cl }\n\ntype authError struct {\n\terror\n}\n\ntype httpResponse struct {\n\t*http.Response\n\terror\n}\n\n\/\/ requestDumper is a http.RoundTripper that can be used to intercept\n\/\/ requests created by http.Client.\ntype requestDumper struct {\n\treq *http.Request\n}\n\nfunc (rd *requestDumper) RoundTrip(req *http.Request) (*http.Response, error) {\n\trd.req = req\n\treturn nil, errors.New(\"dummy transport\")\n}\n\n\/\/ join combines multiple errors.\nfunc join(errors []error) error {\n\tvar s []string\n\tfor _, err := range errors {\n\t\ts = append(s, err.Error())\n\t}\n\terr := fmt.Errorf(\"%s\", strings.Join(s, \": \"))\n\treturn err\n}\n\n\/\/ errorChecker sets Err to true if the underlying io.Reader encountered an\n\/\/ error.\ntype errorChecker struct {\n\tio.Reader\n\tErr bool\n}\n\nfunc (r *errorChecker) Read(b []byte) (int, error) {\n\tn, err := r.Reader.Read(b)\n\tif err != nil {\n\t\tr.Err = true\n\t}\n\treturn n, err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage flushfs\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\n\t\"github.com\/jacobsa\/fuse\"\n\t\"github.com\/jacobsa\/fuse\/fuseutil\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ Create a file system whose sole contents are a file named \"foo\" and a\n\/\/ directory named \"bar\".\n\/\/\n\/\/ The file may be opened for reading and\/or writing. Its initial contents are\n\/\/ empty. Whenever a flush or fsync is received, the supplied function will be\n\/\/ called with the current contents of the file and its status returned.\n\/\/\n\/\/ The directory cannot be modified.\nfunc NewFileSystem(\n\treportFlush func(string) error,\n\treportFsync func(string) error) (fs fuse.FileSystem, err error) {\n\tfs = &flushFS{\n\t\treportFlush: reportFlush,\n\t\treportFsync: reportFsync,\n\t}\n\n\treturn\n}\n\nconst (\n\tfooID = fuse.RootInodeID + 1 + iota\n\tbarID\n)\n\ntype flushFS struct {\n\tfuseutil.NotImplementedFileSystem\n\treportFlush func(string) error\n\treportFsync func(string) error\n\n\tmu sync.Mutex\n\tfooContents []byte \/\/ GUARDED_BY(mu)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *flushFS) rootAttributes() fuse.InodeAttributes {\n\treturn fuse.InodeAttributes{\n\t\tNlink: 1,\n\t\tMode: 0777 | os.ModeDir,\n\t}\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *flushFS) fooAttributes() fuse.InodeAttributes {\n\treturn fuse.InodeAttributes{\n\t\tNlink: 1,\n\t\tMode: 0777,\n\t\tSize: uint64(len(fs.fooContents)),\n\t}\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *flushFS) barAttributes() fuse.InodeAttributes {\n\treturn fuse.InodeAttributes{\n\t\tNlink: 1,\n\t\tMode: 0777 | os.ModeDir,\n\t}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ File system methods\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (fs *flushFS) Init(\n\tctx context.Context,\n\treq *fuse.InitRequest) (\n\tresp *fuse.InitResponse, err error) {\n\tresp = &fuse.InitResponse{}\n\treturn\n}\n\nfunc (fs *flushFS) LookUpInode(\n\tctx context.Context,\n\treq *fuse.LookUpInodeRequest) (\n\tresp *fuse.LookUpInodeResponse, err error) {\n\tresp = &fuse.LookUpInodeResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Sanity check.\n\tif req.Parent != fuse.RootInodeID {\n\t\terr = fuse.ENOENT\n\t\treturn\n\t}\n\n\t\/\/ Set up the entry.\n\tswitch req.Name {\n\tcase \"foo\":\n\t\tresp.Entry = fuse.ChildInodeEntry{\n\t\t\tChild: fooID,\n\t\t\tAttributes: fs.fooAttributes(),\n\t\t}\n\n\tcase \"bar\":\n\t\tresp.Entry = fuse.ChildInodeEntry{\n\t\t\tChild: barID,\n\t\t\tAttributes: fs.barAttributes(),\n\t\t}\n\n\tdefault:\n\t\terr = fuse.ENOENT\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (fs *flushFS) GetInodeAttributes(\n\tctx context.Context,\n\treq *fuse.GetInodeAttributesRequest) (\n\tresp *fuse.GetInodeAttributesResponse, err error) {\n\tresp = &fuse.GetInodeAttributesResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\tswitch req.Inode {\n\tcase fuse.RootInodeID:\n\t\tresp.Attributes = fs.rootAttributes()\n\t\treturn\n\n\tcase fooID:\n\t\tresp.Attributes = fs.fooAttributes()\n\t\treturn\n\n\tcase barID:\n\t\tresp.Attributes = fs.barAttributes()\n\t\treturn\n\n\tdefault:\n\t\terr = fuse.ENOENT\n\t\treturn\n\t}\n}\n\nfunc (fs *flushFS) OpenFile(\n\tctx context.Context,\n\treq *fuse.OpenFileRequest) (\n\tresp *fuse.OpenFileResponse, err error) {\n\tresp = &fuse.OpenFileResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Sanity check.\n\tif req.Inode != fooID {\n\t\terr = fuse.ENOSYS\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (fs *flushFS) ReadFile(\n\tctx context.Context,\n\treq *fuse.ReadFileRequest) (\n\tresp *fuse.ReadFileResponse, err error) {\n\tresp = &fuse.ReadFileResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Ensure the offset is in range.\n\tif req.Offset > int64(len(fs.fooContents)) {\n\t\treturn\n\t}\n\n\t\/\/ Read what we can.\n\tresp.Data = make([]byte, req.Size)\n\tcopy(resp.Data, fs.fooContents[req.Offset:])\n\n\treturn\n}\n\nfunc (fs *flushFS) WriteFile(\n\tctx context.Context,\n\treq *fuse.WriteFileRequest) (\n\tresp *fuse.WriteFileResponse, err error) {\n\tresp = &fuse.WriteFileResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Ensure that the contents slice is long enough.\n\tnewLen := int(req.Offset) + len(req.Data)\n\tif len(fs.fooContents) < newLen {\n\t\tpadding := make([]byte, newLen-len(fs.fooContents))\n\t\tfs.fooContents = append(fs.fooContents, padding...)\n\t}\n\n\t\/\/ Copy in the data.\n\tn := copy(fs.fooContents[req.Offset:], req.Data)\n\n\t\/\/ Sanity check.\n\tif n != len(req.Data) {\n\t\tpanic(fmt.Sprintf(\"Unexpected short copy: %v\", n))\n\t}\n\n\treturn\n}\n\nfunc (fs *flushFS) SyncFile(\n\tctx context.Context,\n\treq *fuse.SyncFileRequest) (\n\tresp *fuse.SyncFileResponse, err error) {\n\tresp = &fuse.SyncFileResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\terr = fs.reportFsync(string(fs.fooContents))\n\treturn\n}\n\nfunc (fs *flushFS) FlushFile(\n\tctx context.Context,\n\treq *fuse.FlushFileRequest) (\n\tresp *fuse.FlushFileResponse, err error) {\n\tresp = &fuse.FlushFileResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\terr = fs.reportFlush(string(fs.fooContents))\n\treturn\n}\n\nfunc (fs *flushFS) OpenDir(\n\tctx context.Context,\n\treq *fuse.OpenDirRequest) (\n\tresp *fuse.OpenDirResponse, err error) {\n\tresp = &fuse.OpenDirResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Sanity check.\n\tif req.Inode != barID {\n\t\terr = fuse.ENOSYS\n\t\treturn\n\t}\n\n\treturn\n}\n<commit_msg>Fixed several build errors.<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage flushfs\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\n\t\"github.com\/jacobsa\/fuse\"\n\t\"github.com\/jacobsa\/fuse\/fuseops\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ Create a file system whose sole contents are a file named \"foo\" and a\n\/\/ directory named \"bar\".\n\/\/\n\/\/ The file may be opened for reading and\/or writing. Its initial contents are\n\/\/ empty. Whenever a flush or fsync is received, the supplied function will be\n\/\/ called with the current contents of the file and its status returned.\n\/\/\n\/\/ The directory cannot be modified.\nfunc NewFileSystem(\n\treportFlush func(string) error,\n\treportFsync func(string) error) (server fuse.Server, err error) {\n\tserver = &flushFS{\n\t\treportFlush: reportFlush,\n\t\treportFsync: reportFsync,\n\t}\n\n\treturn\n}\n\nconst (\n\tfooID = fuseops.RootInodeID + 1 + iota\n\tbarID\n)\n\ntype flushFS struct {\n\treportFlush func(string) error\n\treportFsync func(string) error\n\n\tmu sync.Mutex\n\tfooContents []byte \/\/ GUARDED_BY(mu)\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *flushFS) rootAttributes() fuseops.InodeAttributes {\n\treturn fuseops.InodeAttributes{\n\t\tNlink: 1,\n\t\tMode: 0777 | os.ModeDir,\n\t}\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *flushFS) fooAttributes() fuseops.InodeAttributes {\n\treturn fuseops.InodeAttributes{\n\t\tNlink: 1,\n\t\tMode: 0777,\n\t\tSize: uint64(len(fs.fooContents)),\n\t}\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *flushFS) barAttributes() fuseops.InodeAttributes {\n\treturn fuseops.InodeAttributes{\n\t\tNlink: 1,\n\t\tMode: 0777 | os.ModeDir,\n\t}\n}\n\n\/\/ LOCKS_REQUIRED(fs.mu)\nfunc (fs *flushFS) ServeOps(c *fuse.Connection)\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Op methods\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (fs *flushFS) Init(\n\tctx context.Context,\n\treq *fuse.InitRequest) (\n\tresp *fuse.InitResponse, err error) {\n\tresp = &fuse.InitResponse{}\n\treturn\n}\n\nfunc (fs *flushFS) LookUpInode(\n\tctx context.Context,\n\treq *fuse.LookUpInodeRequest) (\n\tresp *fuse.LookUpInodeResponse, err error) {\n\tresp = &fuse.LookUpInodeResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Sanity check.\n\tif req.Parent != fuseops.RootInodeID {\n\t\terr = fuse.ENOENT\n\t\treturn\n\t}\n\n\t\/\/ Set up the entry.\n\tswitch req.Name {\n\tcase \"foo\":\n\t\tresp.Entry = fuse.ChildInodeEntry{\n\t\t\tChild: fooID,\n\t\t\tAttributes: fs.fooAttributes(),\n\t\t}\n\n\tcase \"bar\":\n\t\tresp.Entry = fuse.ChildInodeEntry{\n\t\t\tChild: barID,\n\t\t\tAttributes: fs.barAttributes(),\n\t\t}\n\n\tdefault:\n\t\terr = fuse.ENOENT\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (fs *flushFS) GetInodeAttributes(\n\tctx context.Context,\n\treq *fuse.GetInodeAttributesRequest) (\n\tresp *fuse.GetInodeAttributesResponse, err error) {\n\tresp = &fuse.GetInodeAttributesResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\tswitch req.Inode {\n\tcase fuseops.RootInodeID:\n\t\tresp.Attributes = fs.rootAttributes()\n\t\treturn\n\n\tcase fooID:\n\t\tresp.Attributes = fs.fooAttributes()\n\t\treturn\n\n\tcase barID:\n\t\tresp.Attributes = fs.barAttributes()\n\t\treturn\n\n\tdefault:\n\t\terr = fuse.ENOENT\n\t\treturn\n\t}\n}\n\nfunc (fs *flushFS) OpenFile(\n\tctx context.Context,\n\treq *fuse.OpenFileRequest) (\n\tresp *fuse.OpenFileResponse, err error) {\n\tresp = &fuse.OpenFileResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Sanity check.\n\tif req.Inode != fooID {\n\t\terr = fuse.ENOSYS\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (fs *flushFS) ReadFile(\n\tctx context.Context,\n\treq *fuse.ReadFileRequest) (\n\tresp *fuse.ReadFileResponse, err error) {\n\tresp = &fuse.ReadFileResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Ensure the offset is in range.\n\tif req.Offset > int64(len(fs.fooContents)) {\n\t\treturn\n\t}\n\n\t\/\/ Read what we can.\n\tresp.Data = make([]byte, req.Size)\n\tcopy(resp.Data, fs.fooContents[req.Offset:])\n\n\treturn\n}\n\nfunc (fs *flushFS) WriteFile(\n\tctx context.Context,\n\treq *fuse.WriteFileRequest) (\n\tresp *fuse.WriteFileResponse, err error) {\n\tresp = &fuse.WriteFileResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Ensure that the contents slice is long enough.\n\tnewLen := int(req.Offset) + len(req.Data)\n\tif len(fs.fooContents) < newLen {\n\t\tpadding := make([]byte, newLen-len(fs.fooContents))\n\t\tfs.fooContents = append(fs.fooContents, padding...)\n\t}\n\n\t\/\/ Copy in the data.\n\tn := copy(fs.fooContents[req.Offset:], req.Data)\n\n\t\/\/ Sanity check.\n\tif n != len(req.Data) {\n\t\tpanic(fmt.Sprintf(\"Unexpected short copy: %v\", n))\n\t}\n\n\treturn\n}\n\nfunc (fs *flushFS) SyncFile(\n\tctx context.Context,\n\treq *fuse.SyncFileRequest) (\n\tresp *fuse.SyncFileResponse, err error) {\n\tresp = &fuse.SyncFileResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\terr = fs.reportFsync(string(fs.fooContents))\n\treturn\n}\n\nfunc (fs *flushFS) FlushFile(\n\tctx context.Context,\n\treq *fuse.FlushFileRequest) (\n\tresp *fuse.FlushFileResponse, err error) {\n\tresp = &fuse.FlushFileResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\terr = fs.reportFlush(string(fs.fooContents))\n\treturn\n}\n\nfunc (fs *flushFS) OpenDir(\n\tctx context.Context,\n\treq *fuse.OpenDirRequest) (\n\tresp *fuse.OpenDirResponse, err error) {\n\tresp = &fuse.OpenDirResponse{}\n\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\n\t\/\/ Sanity check.\n\tif req.Inode != barID {\n\t\terr = fuse.ENOSYS\n\t\treturn\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package plugin\n\nimport (\n\t\"github.com\/layeh\/gopus\"\n\t\"github.com\/layeh\/gumble\/gumble\"\n\t\"github.com\/robertkrimen\/otto\"\n)\n\nfunc (in *Instance) apiAudioPlay(call otto.FunctionCall) otto.Value {\n\tif in.audio.IsPlaying() {\n\t\treturn otto.FalseValue()\n\t}\n\tobj := call.Argument(0).Object()\n\tif obj == nil {\n\t\treturn otto.FalseValue()\n\t}\n\n\tfilenameValue, _ := obj.Get(\"filename\")\n\tcallbackValue, _ := obj.Get(\"callback\")\n\n\tif callbackValue.IsFunction() {\n\t\tin.audio.Done = func() {\n\t\t\tin.audio.Done = nil\n\t\t\tin.callValue(callbackValue)\n\t\t}\n\t}\n\n\tif enc := in.client.AudioEncoder(); enc != nil {\n\t\tenc.SetApplication(gopus.Audio)\n\t}\n\n\tin.audio.Play(filenameValue.String())\n\treturn otto.TrueValue()\n}\n\nfunc (in *Instance) apiAudioNewTarget(call otto.FunctionCall) otto.Value {\n\tid, err := call.Argument(0).ToInteger()\n\tif err != nil {\n\t\treturn otto.UndefinedValue()\n\t}\n\n\ttarget := &gumble.VoiceTarget{}\n\ttarget.SetID(int(id))\n\tvalue, _ := in.state.ToValue(target)\n\treturn value\n}\n\nfunc (in *Instance) apiAudioBitrate(call otto.FunctionCall) otto.Value {\n\tencoder := in.client.AudioEncoder()\n\tvalue, _ := in.state.ToValue(encoder.Bitrate())\n\treturn value\n}\n\nfunc (in *Instance) apiAudioSetBitrate(call otto.FunctionCall) otto.Value {\n\tbitrate, err := call.Argument(0).ToInteger()\n\tif err != nil {\n\t\treturn otto.UndefinedValue()\n\t}\n\tin.client.AudioEncoder().SetBitrate(int(bitrate))\n\treturn otto.UndefinedValue()\n}\n\nfunc (in *Instance) apiAudioVolume(call otto.FunctionCall) otto.Value {\n\tvalue, _ := in.state.ToValue(in.audio.Volume)\n\treturn value\n}\n\nfunc (in *Instance) apiAudioSetVolume(call otto.FunctionCall) otto.Value {\n\tvolume, err := call.Argument(0).ToFloat()\n\tif err != nil {\n\t\treturn otto.UndefinedValue()\n\t}\n\tin.audio.Volume = float32(volume)\n\treturn otto.UndefinedValue()\n}\n\nfunc (in *Instance) apiAudioSetTarget(call otto.FunctionCall) otto.Value {\n\tif len(call.ArgumentList) == 0 {\n\t\tin.client.SetVoiceTarget(nil)\n\t\treturn otto.TrueValue()\n\t}\n\ttarget, err := call.Argument(0).Export()\n\tif err != nil {\n\t\treturn otto.UndefinedValue()\n\t}\n\tvoiceTarget := target.(*gumble.VoiceTarget)\n\tin.client.Send(voiceTarget)\n\tin.client.SetVoiceTarget(voiceTarget)\n\treturn otto.TrueValue()\n}\n\nfunc (in *Instance) apiAudioStop(call otto.FunctionCall) otto.Value {\n\tin.audio.Stop()\n\treturn otto.UndefinedValue()\n}\n\nfunc (in *Instance) apiAudioIsPlaying(call otto.FunctionCall) otto.Value {\n\tif in.audio.IsPlaying() {\n\t\treturn otto.TrueValue()\n\t} else {\n\t\treturn otto.FalseValue()\n\t}\n}\n<commit_msg>change according to gumble_ffmpeg's API<commit_after>package plugin\n\nimport (\n\t\"github.com\/layeh\/gopus\"\n\t\"github.com\/layeh\/gumble\/gumble\"\n\t\"github.com\/robertkrimen\/otto\"\n)\n\nfunc (in *Instance) apiAudioPlay(call otto.FunctionCall) otto.Value {\n\tif in.audio.IsPlaying() {\n\t\treturn otto.FalseValue()\n\t}\n\tobj := call.Argument(0).Object()\n\tif obj == nil {\n\t\treturn otto.FalseValue()\n\t}\n\n\tfilenameValue, _ := obj.Get(\"filename\")\n\tcallbackValue, _ := obj.Get(\"callback\")\n\n\tif enc := in.client.AudioEncoder(); enc != nil {\n\t\tenc.SetApplication(gopus.Audio)\n\t}\n\n\tin.audio.Play(filenameValue.String(), func() {\n\t\tif callbackValue.IsFunction() {\n\t\t\tin.callValue(callbackValue)\n\t\t}\n\t})\n\treturn otto.TrueValue()\n}\n\nfunc (in *Instance) apiAudioNewTarget(call otto.FunctionCall) otto.Value {\n\tid, err := call.Argument(0).ToInteger()\n\tif err != nil {\n\t\treturn otto.UndefinedValue()\n\t}\n\n\ttarget := &gumble.VoiceTarget{}\n\ttarget.SetID(int(id))\n\tvalue, _ := in.state.ToValue(target)\n\treturn value\n}\n\nfunc (in *Instance) apiAudioBitrate(call otto.FunctionCall) otto.Value {\n\tencoder := in.client.AudioEncoder()\n\tvalue, _ := in.state.ToValue(encoder.Bitrate())\n\treturn value\n}\n\nfunc (in *Instance) apiAudioSetBitrate(call otto.FunctionCall) otto.Value {\n\tbitrate, err := call.Argument(0).ToInteger()\n\tif err != nil {\n\t\treturn otto.UndefinedValue()\n\t}\n\tin.client.AudioEncoder().SetBitrate(int(bitrate))\n\treturn otto.UndefinedValue()\n}\n\nfunc (in *Instance) apiAudioVolume(call otto.FunctionCall) otto.Value {\n\tvalue, _ := in.state.ToValue(in.audio.Volume)\n\treturn value\n}\n\nfunc (in *Instance) apiAudioSetVolume(call otto.FunctionCall) otto.Value {\n\tvolume, err := call.Argument(0).ToFloat()\n\tif err != nil {\n\t\treturn otto.UndefinedValue()\n\t}\n\tin.audio.Volume = float32(volume)\n\treturn otto.UndefinedValue()\n}\n\nfunc (in *Instance) apiAudioSetTarget(call otto.FunctionCall) otto.Value {\n\tif len(call.ArgumentList) == 0 {\n\t\tin.client.SetVoiceTarget(nil)\n\t\treturn otto.TrueValue()\n\t}\n\ttarget, err := call.Argument(0).Export()\n\tif err != nil {\n\t\treturn otto.UndefinedValue()\n\t}\n\tvoiceTarget := target.(*gumble.VoiceTarget)\n\tin.client.Send(voiceTarget)\n\tin.client.SetVoiceTarget(voiceTarget)\n\treturn otto.TrueValue()\n}\n\nfunc (in *Instance) apiAudioStop(call otto.FunctionCall) otto.Value {\n\tin.audio.Stop()\n\treturn otto.UndefinedValue()\n}\n\nfunc (in *Instance) apiAudioIsPlaying(call otto.FunctionCall) otto.Value {\n\tif in.audio.IsPlaying() {\n\t\treturn otto.TrueValue()\n\t} else {\n\t\treturn otto.FalseValue()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package raymondhelpers\n\nimport (\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/aymerick\/raymond\"\n\t\"github.com\/grokify\/gotilla\/time\/timeutil\"\n)\n\n\/\/ RegisterAll registers helpers for the Raymond Handlebars template\n\/\/ engine.\nfunc RegisterAll() {\n\tRegisterStringSafe()\n\tRegisterTimeSafe()\n}\n\nfunc RegisterTimeSafe() {\n\traymond.RegisterHelper(\"timeRfc3339\", func(t time.Time) raymond.SafeString {\n\t\treturn raymond.SafeString(t.Format(time.RFC3339))\n\t})\n\traymond.RegisterHelper(\"timeRfc3339ymd\", func(t time.Time) raymond.SafeString {\n\t\treturn raymond.SafeString(t.Format(timeutil.RFC3339YMD))\n\t})\n}\n\nfunc RegisterStringSafe() {\n\traymond.RegisterHelper(\"spaceToHyphen\", func(s string) raymond.SafeString {\n\t\treturn raymond.SafeString(regexp.MustCompile(`[\\s-]+`).ReplaceAllString(s, \"-\"))\n\t})\n\traymond.RegisterHelper(\"spaceToUnderscore\", func(s string) raymond.SafeString {\n\t\treturn raymond.SafeString(regexp.MustCompile(`[\\s_]+`).ReplaceAllString(s, \"_\"))\n\t})\n\traymond.RegisterHelper(\"toLower\", func(s string) raymond.SafeString {\n\t\treturn raymond.SafeString(strings.ToLower(s))\n\t})\n}\n<commit_msg>add defaultUnknown raymondhelper<commit_after>package raymondhelpers\n\nimport (\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/aymerick\/raymond\"\n\t\"github.com\/grokify\/gotilla\/time\/timeutil\"\n)\n\n\/\/ RegisterAll registers helpers for the Raymond Handlebars template\n\/\/ engine.\nfunc RegisterAll() {\n\tRegisterStringSafe()\n\tRegisterTimeSafe()\n}\n\nfunc RegisterTimeSafe() {\n\traymond.RegisterHelper(\"timeRfc3339\", func(t time.Time) raymond.SafeString {\n\t\treturn raymond.SafeString(t.Format(time.RFC3339))\n\t})\n\traymond.RegisterHelper(\"timeRfc3339ymd\", func(t time.Time) raymond.SafeString {\n\t\treturn raymond.SafeString(t.Format(timeutil.RFC3339YMD))\n\t})\n}\n\nfunc RegisterStringSafe() {\n\traymond.RegisterHelper(\"spaceToHyphen\", func(s string) raymond.SafeString {\n\t\treturn raymond.SafeString(regexp.MustCompile(`[\\s-]+`).ReplaceAllString(s, \"-\"))\n\t})\n\traymond.RegisterHelper(\"spaceToUnderscore\", func(s string) raymond.SafeString {\n\t\treturn raymond.SafeString(regexp.MustCompile(`[\\s_]+`).ReplaceAllString(s, \"_\"))\n\t})\n\traymond.RegisterHelper(\"toLower\", func(s string) raymond.SafeString {\n\t\treturn raymond.SafeString(strings.ToLower(s))\n\t})\n\traymond.RegisterHelper(\"defaultUnknown\", func(s string) raymond.SafeString {\n\t\tif len(strings.TrimSpace(s)) == 0 {\n\t\t\treturn raymond.SafeString(\"unknown\")\n\t\t}\n\t\treturn raymond.SafeString(s)\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package nsone\n\nimport (\n\t\"fmt\"\n\t\"github.com\/bobtfish\/go-nsone-api\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"regexp\"\n\t\"strconv\"\n)\n\nfunc monitoringJobResource() *schema.Resource {\n\treturn &schema.Resource{\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t},\n\t\t\t\"active\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tRequired: true,\n\t\t\t},\n\t\t\t\"regions\": &schema.Schema{\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tRequired: true,\n\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"job_type\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"frequency\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tRequired: true,\n\t\t\t},\n\t\t\t\"rapid_recheck\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tRequired: true,\n\t\t\t},\n\t\t\t\"policy\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tValidateFunc: func(v interface{}, k string) (ws []string, es []error) {\n\t\t\t\t\tvalue := v.(string)\n\t\t\t\t\tif !regexp.MustCompile(`^(all|one|quorum)$`).MatchString(value) {\n\t\t\t\t\t\tes = append(es, fmt.Errorf(\n\t\t\t\t\t\t\t\"only all, one, quorum allowed in %q\", k))\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"notes\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"config\": &schema.Schema{\n\t\t\t\tType: schema.TypeMap,\n\t\t\t\tRequired: true,\n\t\t\t},\n\t\t\t\"notify_delay\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"notify_repeat\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"notify_failback\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"notify_regional\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"notify_list\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"rules\": &schema.Schema{\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\"value\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"comparison\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"key\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tCreate: MonitoringJobCreate,\n\t\tRead: MonitoringJobRead,\n\t\tUpdate: MonitoringJobUpdate,\n\t\tDelete: MonitoringJobDelete,\n\t}\n}\n\nfunc monitoringJobToResourceData(d *schema.ResourceData, r *nsone.MonitoringJob) error {\n\td.SetId(r.Id)\n\treturn nil\n}\n\nfunc resourceDataToMonitoringJob(r *nsone.MonitoringJob, d *schema.ResourceData) error {\n\tr.Id = d.Id()\n\tr.Name = d.Get(\"name\").(string)\n\tr.JobType = d.Get(\"job_type\").(string)\n\tr.Active = d.Get(\"active\").(bool)\n\traw_regions := d.Get(\"regions\").([]interface{})\n\tr.Regions = make([]string, len(raw_regions))\n\tfor i, v := range raw_regions {\n\t\tr.Regions[i] = v.(string)\n\t}\n\tr.Frequency = d.Get(\"frequency\").(int)\n\tr.RapidRecheck = d.Get(\"rapid_recheck\").(bool)\n\tvar raw_rules []interface{}\n\tif r := d.Get(\"rules\"); r != nil {\n\t\traw_rules = r.([]interface{})\n\t}\n\tr.Rules = make([]nsone.MonitoringJobRule, len(raw_rules))\n\tfor i, v := range raw_rules {\n\t\trule := v.(map[string]interface{})\n\t\tr.Rules[i] = nsone.MonitoringJobRule{\n\t\t\tValue: rule[\"value\"].(int),\n\t\t\tComparison: rule[\"comparison\"].(string),\n\t\t\tKey: rule[\"key\"].(string),\n\t\t}\n\t}\n\tconfig := make(map[string]interface{})\n\tif raw_config := d.Get(\"config\"); raw_config != nil {\n\t\tfor k, v := range raw_config.(map[string]interface{}) {\n\t\t\tif i, err := strconv.Atoi(v.(string)); err == nil {\n\t\t\t\tconfig[k] = i\n\t\t\t} else {\n\t\t\t\tconfig[k] = v\n\t\t\t}\n\t\t}\n\t}\n\tr.Config = config\n\tr.RegionScope = \"fixed\"\n\tr.Policy = d.Get(\"policy\").(string)\n\tif v, ok := d.GetOk(\"notes\"); ok {\n\t\tr.Notes = v.(string)\n\t}\n\tr.Frequency = d.Get(\"frequency\").(int)\n\tif v, ok := d.GetOk(\"notify_delay\"); ok {\n\t\tr.NotifyDelay = v.(int)\n\t}\n\tif v, ok := d.GetOk(\"notify_repeat\"); ok {\n\t\tr.NotifyRepeat = v.(int)\n\t}\n\tif v, ok := d.GetOk(\"notify_regional\"); ok {\n\t\tr.NotifyRegional = v.(bool)\n\t}\n\tif v, ok := d.GetOk(\"notify_failback\"); ok {\n\t\tr.NotifyFailback = v.(bool)\n\t}\n\tif v, ok := d.GetOk(\"notify_list\"); ok {\n\t\tr.NotifyList = v.(string)\n\t}\n\treturn nil\n}\n\nfunc MonitoringJobCreate(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*nsone.APIClient)\n\tmj := nsone.MonitoringJob{}\n\tif err := resourceDataToMonitoringJob(&mj, d); err != nil {\n\t\treturn err\n\t}\n\tif err := client.CreateMonitoringJob(&mj); err != nil {\n\t\treturn err\n\t}\n\treturn monitoringJobToResourceData(d, &mj)\n}\n\nfunc MonitoringJobRead(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*nsone.APIClient)\n\tmj, err := client.GetMonitoringJob(d.Id())\n\tif err != nil {\n\t\treturn err\n\t}\n\tmonitoringJobToResourceData(d, &mj)\n\treturn nil\n}\n\nfunc MonitoringJobDelete(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*nsone.APIClient)\n\terr := client.DeleteMonitoringJob(d.Id())\n\td.SetId(\"\")\n\treturn err\n}\n\nfunc MonitoringJobUpdate(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*nsone.APIClient)\n\tmj := nsone.MonitoringJob{\n\t\tId: d.Id(),\n\t}\n\tif err := resourceDataToMonitoringJob(&mj, d); err != nil {\n\t\treturn err\n\t}\n\tif err := client.UpdateMonitoringJob(&mj); err != nil {\n\t\treturn err\n\t}\n\tmonitoringJobToResourceData(d, &mj)\n\treturn nil\n}\n<commit_msg>Pack rules from schema into structs<commit_after>package nsone\n\nimport (\n\t\"fmt\"\n\t\"github.com\/bobtfish\/go-nsone-api\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"regexp\"\n\t\"strconv\"\n)\n\nfunc monitoringJobResource() *schema.Resource {\n\treturn &schema.Resource{\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"id\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t},\n\t\t\t\"active\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tRequired: true,\n\t\t\t},\n\t\t\t\"regions\": &schema.Schema{\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tRequired: true,\n\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"job_type\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"frequency\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tRequired: true,\n\t\t\t},\n\t\t\t\"rapid_recheck\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tRequired: true,\n\t\t\t},\n\t\t\t\"policy\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tValidateFunc: func(v interface{}, k string) (ws []string, es []error) {\n\t\t\t\t\tvalue := v.(string)\n\t\t\t\t\tif !regexp.MustCompile(`^(all|one|quorum)$`).MatchString(value) {\n\t\t\t\t\t\tes = append(es, fmt.Errorf(\n\t\t\t\t\t\t\t\"only all, one, quorum allowed in %q\", k))\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"notes\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"config\": &schema.Schema{\n\t\t\t\tType: schema.TypeMap,\n\t\t\t\tRequired: true,\n\t\t\t},\n\t\t\t\"notify_delay\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"notify_repeat\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"notify_failback\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"notify_regional\": &schema.Schema{\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"notify_list\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"rules\": &schema.Schema{\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Resource{\n\t\t\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\t\t\"value\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeInt,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"comparison\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"key\": &schema.Schema{\n\t\t\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\t\t\tRequired: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tCreate: MonitoringJobCreate,\n\t\tRead: MonitoringJobRead,\n\t\tUpdate: MonitoringJobUpdate,\n\t\tDelete: MonitoringJobDelete,\n\t}\n}\n\nfunc monitoringJobToResourceData(d *schema.ResourceData, r *nsone.MonitoringJob) error {\n\td.SetId(r.Id)\n\treturn nil\n}\n\nfunc resourceDataToMonitoringJob(r *nsone.MonitoringJob, d *schema.ResourceData) error {\n\tr.Id = d.Id()\n\tr.Name = d.Get(\"name\").(string)\n\tr.JobType = d.Get(\"job_type\").(string)\n\tr.Active = d.Get(\"active\").(bool)\n\traw_regions := d.Get(\"regions\").([]interface{})\n\tr.Regions = make([]string, len(raw_regions))\n\tfor i, v := range raw_regions {\n\t\tr.Regions[i] = v.(string)\n\t}\n\tr.Frequency = d.Get(\"frequency\").(int)\n\tr.RapidRecheck = d.Get(\"rapid_recheck\").(bool)\n\tvar raw_rules []interface{}\n\tif raw_rules := d.Get(\"rules\"); raw_rules != nil {\n\t\tr.Rules = make([]nsone.MonitoringJobRule, len(raw_rules.([]interface{})))\n\t\tfor i, v := range raw_rules.([]interface{}) {\n\t\t\trule := v.(map[string]interface{})\n\t\t\tr.Rules[i] = nsone.MonitoringJobRule{\n\t\t\t\tValue: rule[\"value\"].(int),\n\t\t\t\tComparison: rule[\"comparison\"].(string),\n\t\t\t\tKey: rule[\"key\"].(string),\n\t\t\t}\n\t\t}\n\t} else {\n\t\tr.Rules = make([]nsone.MonitoringJobRule, 0)\n\t}\n\tfor i, v := range raw_rules {\n\t\trule := v.(map[string]interface{})\n\t\tr.Rules[i] = nsone.MonitoringJobRule{\n\t\t\tValue: rule[\"value\"].(int),\n\t\t\tComparison: rule[\"comparison\"].(string),\n\t\t\tKey: rule[\"key\"].(string),\n\t\t}\n\t}\n\tconfig := make(map[string]interface{})\n\tif raw_config := d.Get(\"config\"); raw_config != nil {\n\t\tfor k, v := range raw_config.(map[string]interface{}) {\n\t\t\tif i, err := strconv.Atoi(v.(string)); err == nil {\n\t\t\t\tconfig[k] = i\n\t\t\t} else {\n\t\t\t\tconfig[k] = v\n\t\t\t}\n\t\t}\n\t}\n\tr.Config = config\n\tr.RegionScope = \"fixed\"\n\tr.Policy = d.Get(\"policy\").(string)\n\tif v, ok := d.GetOk(\"notes\"); ok {\n\t\tr.Notes = v.(string)\n\t}\n\tr.Frequency = d.Get(\"frequency\").(int)\n\tif v, ok := d.GetOk(\"notify_delay\"); ok {\n\t\tr.NotifyDelay = v.(int)\n\t}\n\tif v, ok := d.GetOk(\"notify_repeat\"); ok {\n\t\tr.NotifyRepeat = v.(int)\n\t}\n\tif v, ok := d.GetOk(\"notify_regional\"); ok {\n\t\tr.NotifyRegional = v.(bool)\n\t}\n\tif v, ok := d.GetOk(\"notify_failback\"); ok {\n\t\tr.NotifyFailback = v.(bool)\n\t}\n\tif v, ok := d.GetOk(\"notify_list\"); ok {\n\t\tr.NotifyList = v.(string)\n\t}\n\treturn nil\n}\n\nfunc MonitoringJobCreate(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*nsone.APIClient)\n\tmj := nsone.MonitoringJob{}\n\tif err := resourceDataToMonitoringJob(&mj, d); err != nil {\n\t\treturn err\n\t}\n\tif err := client.CreateMonitoringJob(&mj); err != nil {\n\t\treturn err\n\t}\n\treturn monitoringJobToResourceData(d, &mj)\n}\n\nfunc MonitoringJobRead(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*nsone.APIClient)\n\tmj, err := client.GetMonitoringJob(d.Id())\n\tif err != nil {\n\t\treturn err\n\t}\n\tmonitoringJobToResourceData(d, &mj)\n\treturn nil\n}\n\nfunc MonitoringJobDelete(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*nsone.APIClient)\n\terr := client.DeleteMonitoringJob(d.Id())\n\td.SetId(\"\")\n\treturn err\n}\n\nfunc MonitoringJobUpdate(d *schema.ResourceData, meta interface{}) error {\n\tclient := meta.(*nsone.APIClient)\n\tmj := nsone.MonitoringJob{\n\t\tId: d.Id(),\n\t}\n\tif err := resourceDataToMonitoringJob(&mj, d); err != nil {\n\t\treturn err\n\t}\n\tif err := client.UpdateMonitoringJob(&mj); err != nil {\n\t\treturn err\n\t}\n\tmonitoringJobToResourceData(d, &mj)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package mcstoreapi\n\nimport (\n\t\"crypto\/tls\"\n\n\t\"path\"\n\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"fmt\"\n\n\t\"github.com\/materials-commons\/gohandy\/ezhttp\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/app\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/app\/flow\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/db\/schema\"\n\t\"github.com\/parnurzeal\/gorequest\"\n\t\"gnd.la\/net\/urlutil\"\n)\n\ntype ServerAPI struct {\n\tagent *gorequest.SuperAgent\n\tclient *ezhttp.EzClient\n}\n\n\/\/ NewServerAPI creates a new ServerAPI\nfunc NewServerAPI() *ServerAPI {\n\treturn &ServerAPI{\n\t\tagent: gorequest.New().TLSClientConfig(&tls.Config{InsecureSkipVerify: true}),\n\t\tclient: MCClient(),\n\t}\n}\n\n\/\/ CreateUploadRequest will request an upload request from the server. If an existing\n\/\/ upload matches the request then server will send the existing upload request.\nfunc (s *ServerAPI) CreateUpload(req CreateUploadRequest) (*CreateUploadResponse, error) {\n\tvar uploadResponse CreateUploadResponse\n\tsc, err := s.client.JSON(&req).JSONPost(Url(\"\/upload\"), &uploadResponse)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = HTTPStatusToError(sc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &uploadResponse, nil\n}\n\n\/\/ SendFlowData will send the data for a flow request.\nfunc (s *ServerAPI) SendFlowData(req *flow.Request) (*UploadChunkResponse, error) {\n\tparams := req.ToParamsMap()\n\tsc, err, body := s.client.PostFileBytes(Url(\"\/upload\/chunk\"), \"\/tmp\/test.txt\", \"chunkData\",\n\t\treq.Chunk, params)\n\tswitch {\n\tcase err != nil:\n\t\treturn nil, err\n\tcase sc != 200:\n\t\treturn nil, app.ErrInternal\n\tdefault:\n\t\tvar uploadResp UploadChunkResponse\n\t\tif err := ToJSON(body, &uploadResp); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &uploadResp, nil\n\t}\n}\n\n\/\/ ListUploadRequests will return all the upload requests for a given project ID.\nfunc (s *ServerAPI) ListUploadRequests(projectID string) ([]UploadEntry, error) {\n\tr, body, errs := s.agent.Get(Url(\"\/upload\/\" + projectID)).End()\n\tif err := ToError(r, errs); err != nil {\n\t\treturn nil, err\n\t}\n\tvar entries []UploadEntry\n\terr := ToJSON(body, &entries)\n\treturn entries, err\n}\n\n\/\/ DeleteUploadRequest will delete a given upload request.\nfunc (s *ServerAPI) DeleteUploadRequest(uploadID string) error {\n\tr, _, errs := s.agent.Delete(Url(\"\/upload\/\" + uploadID)).End()\n\treturn ToError(r, errs)\n}\n\n\/\/ This really doesn't belong here as the server code is in a different server. However\n\/\/ it logically belongs here as far as the client is concerned.\n\n\/\/ userLogin contains the user password used to retrieve the users apikey.\ntype userLogin struct {\n\tPassword string `json:\"password\"`\n}\n\n\/\/ GetUserAPIKey will return the users APIKey\nfunc (s *ServerAPI) GetUserAPIKey(username, password string) (apikey string, err error) {\n\tl := userLogin{\n\t\tPassword: password,\n\t}\n\tapiURL := urlutil.MustJoin(MCUrl(), path.Join(\"api\", \"user\", username, \"apikey\"))\n\tr, body, errs := s.agent.Put(apiURL).Send(l).End()\n\tif err := ToError(r, errs); err != nil {\n\t\treturn apikey, err\n\t}\n\n\tvar u schema.User\n\terr = ToJSON(body, &u)\n\treturn u.APIKey, err\n}\n\ntype DirectoryRequest struct {\n\tProjectName string\n\tProjectID string\n\tPath string\n}\n\nfunc (s *ServerAPI) GetDirectory(req DirectoryRequest) (directoryID string, err error) {\n\tvar projectBasedPath string\n\tif projectBasedPath, err = toProjectPath(req.ProjectName, req.Path); err != nil {\n\t\treturn directoryID, err\n\t}\n\n\tgetDirReq := GetDirectoryRequest{\n\t\tPath: projectBasedPath,\n\t\tProjectID: req.ProjectID,\n\t}\n\tr, body, errs := s.agent.Post(Url(\"\/project2\/directory\")).Send(getDirReq).End()\n\tif err = ToError(r, errs); err != nil {\n\t\treturn directoryID, err\n\t}\n\n\tvar dirResponse GetDirectoryResponse\n\tif err = ToJSON(body, &dirResponse); err != nil {\n\t\treturn directoryID, err\n\t}\n\n\treturn dirResponse.DirectoryID, nil\n}\n\ntype ServerDir struct {\n\tID string `json:\"id\"`\n\tType string `json:\"_type\"`\n\tSize int64 `json:\"size\"`\n\tName string `json:\"name\"`\n\tPath string `json:\"path\"`\n\tChecksum string `json:\"checksum\"`\n\tChildren []ServerDir `json:\"children\"`\n}\n\nfunc (s *ServerAPI) GetDirectoryList(projectID, directoryID string) (*ServerDir, error) {\n\tif directoryID == \"\" {\n\t\tdirectoryID = \"top\"\n\t}\n\n\tvar dir ServerDir\n\tapiURL := path.Join(\"\/v2\", \"projects\", projectID, \"directories\", directoryID)\n\tif sc, err := s.client.JSONGet(Url(apiURL), &dir); err != nil {\n\t\treturn nil, err\n\t} else if err = HTTPStatusToError(sc); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &dir, nil\n}\n\nfunc toProjectPath(projectName, path string) (string, error) {\n\ti := strings.Index(path, projectName)\n\tif i == -1 {\n\t\treturn \"\", app.ErrInvalid\n\t}\n\treturn filepath.ToSlash(path[i:]), nil\n}\n\nfunc (s *ServerAPI) CreateProject(req CreateProjectRequest) (*CreateProjectResponse, error) {\n\tvar response CreateProjectResponse\n\tsc, err := s.client.JSON(&req).JSONPost(Url(\"\/projects\"), &response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = HTTPStatusToError(sc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &response, nil\n}\n\nfunc (s *ServerAPI) DownloadFile(projectID, fileID, fpath string) error {\n\tfmt.Println(\"DownloadFile:\", projectID, fileID, fpath)\n\tout, err := os.Create(fpath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer out.Close()\n\n\tfileURL := Url(filepath.Join(\"\/datafiles\/static\", fileID)) + \"&original=true\"\n\tresp, err := http.Get(fileURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar n int64\n\tn, err = io.Copy(out, resp.Body)\n\tfmt.Println(\" wrote bytes:\", n)\n\treturn err\n}\n\ntype ServerFile struct {\n\tID string `json:\"id\"`\n\tChecksum string `json:\"checksum\"`\n\tSize int64 `json:\"size\"`\n}\n\nfunc (s *ServerAPI) GetFileForPath(projectID, fpath string) (*ServerFile, error) {\n\tfilePathArg := struct {\n\t\tFilePath string `json:\"file_path\"`\n\t}{\n\t\tFilePath: fpath,\n\t}\n\n\turlPath := path.Join(\"\/v2\", \"projects\", projectID, \"files_by_path\")\n\tr, body, errs := s.agent.Put(Url(urlPath)).Send(filePathArg).End()\n\tif err := ToError(r, errs); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar f ServerFile\n\tif err := ToJSON(body, &f); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &f, nil\n}\n<commit_msg>Fix building url paths.<commit_after>package mcstoreapi\n\nimport (\n\t\"crypto\/tls\"\n\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"fmt\"\n\n\t\"github.com\/materials-commons\/gohandy\/ezhttp\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/app\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/app\/flow\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/db\/schema\"\n\t\"github.com\/parnurzeal\/gorequest\"\n\t\"gnd.la\/net\/urlutil\"\n)\n\ntype ServerAPI struct {\n\tagent *gorequest.SuperAgent\n\tclient *ezhttp.EzClient\n}\n\n\/\/ NewServerAPI creates a new ServerAPI\nfunc NewServerAPI() *ServerAPI {\n\treturn &ServerAPI{\n\t\tagent: gorequest.New().TLSClientConfig(&tls.Config{InsecureSkipVerify: true}),\n\t\tclient: MCClient(),\n\t}\n}\n\n\/\/ CreateUploadRequest will request an upload request from the server. If an existing\n\/\/ upload matches the request then server will send the existing upload request.\nfunc (s *ServerAPI) CreateUpload(req CreateUploadRequest) (*CreateUploadResponse, error) {\n\tvar uploadResponse CreateUploadResponse\n\tsc, err := s.client.JSON(&req).JSONPost(Url(\"\/upload\"), &uploadResponse)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = HTTPStatusToError(sc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &uploadResponse, nil\n}\n\n\/\/ SendFlowData will send the data for a flow request.\nfunc (s *ServerAPI) SendFlowData(req *flow.Request) (*UploadChunkResponse, error) {\n\tparams := req.ToParamsMap()\n\tsc, err, body := s.client.PostFileBytes(Url(\"\/upload\/chunk\"), \"\/tmp\/test.txt\", \"chunkData\",\n\t\treq.Chunk, params)\n\tswitch {\n\tcase err != nil:\n\t\treturn nil, err\n\tcase sc != 200:\n\t\treturn nil, app.ErrInternal\n\tdefault:\n\t\tvar uploadResp UploadChunkResponse\n\t\tif err := ToJSON(body, &uploadResp); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &uploadResp, nil\n\t}\n}\n\n\/\/ ListUploadRequests will return all the upload requests for a given project ID.\nfunc (s *ServerAPI) ListUploadRequests(projectID string) ([]UploadEntry, error) {\n\tr, body, errs := s.agent.Get(Url(\"\/upload\/\" + projectID)).End()\n\tif err := ToError(r, errs); err != nil {\n\t\treturn nil, err\n\t}\n\tvar entries []UploadEntry\n\terr := ToJSON(body, &entries)\n\treturn entries, err\n}\n\n\/\/ DeleteUploadRequest will delete a given upload request.\nfunc (s *ServerAPI) DeleteUploadRequest(uploadID string) error {\n\tr, _, errs := s.agent.Delete(Url(\"\/upload\/\" + uploadID)).End()\n\treturn ToError(r, errs)\n}\n\n\/\/ This really doesn't belong here as the server code is in a different server. However\n\/\/ it logically belongs here as far as the client is concerned.\n\n\/\/ userLogin contains the user password used to retrieve the users apikey.\ntype userLogin struct {\n\tPassword string `json:\"password\"`\n}\n\n\/\/ GetUserAPIKey will return the users APIKey\nfunc (s *ServerAPI) GetUserAPIKey(username, password string) (apikey string, err error) {\n\tl := userLogin{\n\t\tPassword: password,\n\t}\n\tapiURL := urlutil.MustJoin(MCUrl(), \"api\/user\/\"+username+\"\/apikey\")\n\tr, body, errs := s.agent.Put(apiURL).Send(l).End()\n\tif err := ToError(r, errs); err != nil {\n\t\treturn apikey, err\n\t}\n\n\tvar u schema.User\n\terr = ToJSON(body, &u)\n\treturn u.APIKey, err\n}\n\ntype DirectoryRequest struct {\n\tProjectName string\n\tProjectID string\n\tPath string\n}\n\nfunc (s *ServerAPI) GetDirectory(req DirectoryRequest) (directoryID string, err error) {\n\tvar projectBasedPath string\n\tif projectBasedPath, err = toProjectPath(req.ProjectName, req.Path); err != nil {\n\t\treturn directoryID, err\n\t}\n\n\tgetDirReq := GetDirectoryRequest{\n\t\tPath: projectBasedPath,\n\t\tProjectID: req.ProjectID,\n\t}\n\tr, body, errs := s.agent.Post(Url(\"\/project2\/directory\")).Send(getDirReq).End()\n\tif err = ToError(r, errs); err != nil {\n\t\treturn directoryID, err\n\t}\n\n\tvar dirResponse GetDirectoryResponse\n\tif err = ToJSON(body, &dirResponse); err != nil {\n\t\treturn directoryID, err\n\t}\n\n\treturn dirResponse.DirectoryID, nil\n}\n\ntype ServerDir struct {\n\tID string `json:\"id\"`\n\tType string `json:\"_type\"`\n\tSize int64 `json:\"size\"`\n\tName string `json:\"name\"`\n\tPath string `json:\"path\"`\n\tChecksum string `json:\"checksum\"`\n\tChildren []ServerDir `json:\"children\"`\n}\n\nfunc (s *ServerAPI) GetDirectoryList(projectID, directoryID string) (*ServerDir, error) {\n\tif directoryID == \"\" {\n\t\tdirectoryID = \"top\"\n\t}\n\n\tvar dir ServerDir\n\tapiURL := \"\/v2\/projects\/\" + projectID + \"\/directories\/\" + directoryID\n\tif sc, err := s.client.JSONGet(Url(apiURL), &dir); err != nil {\n\t\treturn nil, err\n\t} else if err = HTTPStatusToError(sc); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &dir, nil\n}\n\nfunc toProjectPath(projectName, path string) (string, error) {\n\ti := strings.Index(path, projectName)\n\tif i == -1 {\n\t\treturn \"\", app.ErrInvalid\n\t}\n\treturn filepath.ToSlash(path[i:]), nil\n}\n\nfunc (s *ServerAPI) CreateProject(req CreateProjectRequest) (*CreateProjectResponse, error) {\n\tvar response CreateProjectResponse\n\tsc, err := s.client.JSON(&req).JSONPost(Url(\"\/projects\"), &response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = HTTPStatusToError(sc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &response, nil\n}\n\nfunc (s *ServerAPI) DownloadFile(projectID, fileID, fpath string) error {\n\tfmt.Println(\"DownloadFile:\", projectID, fileID, fpath)\n\tout, err := os.Create(fpath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer out.Close()\n\n\tfileURL := Url(\"\/datafiles\/static\/\"+fileID) + \"&original=true\"\n\tresp, err := http.Get(fileURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar n int64\n\tn, err = io.Copy(out, resp.Body)\n\tfmt.Println(\" wrote bytes:\", n)\n\treturn err\n}\n\ntype ServerFile struct {\n\tID string `json:\"id\"`\n\tChecksum string `json:\"checksum\"`\n\tSize int64 `json:\"size\"`\n}\n\nfunc (s *ServerAPI) GetFileForPath(projectID, fpath string) (*ServerFile, error) {\n\tfilePathArg := struct {\n\t\tFilePath string `json:\"file_path\"`\n\t}{\n\t\tFilePath: fpath,\n\t}\n\n\turlPath := \"\/v2\/projects\/\" + projectID + \"\/files_by_path\"\n\tr, body, errs := s.agent.Put(Url(urlPath)).Send(filePathArg).End()\n\tif err := ToError(r, errs); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar f ServerFile\n\tif err := ToJSON(body, &f); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &f, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright IBM Corp. 2016 All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage noopssinglechain\n\nimport (\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/hyperledger\/fabric\/core\/chaincode\"\n\t\"github.com\/hyperledger\/fabric\/core\/committer\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/kvledger\"\n\t\"github.com\/hyperledger\/fabric\/core\/util\"\n\t\"github.com\/hyperledger\/fabric\/protos\/common\"\n\t\"github.com\/hyperledger\/fabric\/protos\/orderer\"\n\tputils \"github.com\/hyperledger\/fabric\/protos\/utils\"\n\t\"github.com\/op\/go-logging\"\n\t\"github.com\/spf13\/viper\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\n\t\"fmt\"\n\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\"\n\t\"github.com\/hyperledger\/fabric\/core\/peer\"\n\t\"github.com\/hyperledger\/fabric\/gossip\/gossip\"\n\t\"github.com\/hyperledger\/fabric\/gossip\/integration\"\n\tgossip_proto \"github.com\/hyperledger\/fabric\/gossip\/proto\"\n\t\"github.com\/hyperledger\/fabric\/gossip\/state\"\n\tpb \"github.com\/hyperledger\/fabric\/protos\/peer\"\n)\n\nvar logger *logging.Logger \/\/ package-level logger\n\nfunc init() {\n\tlogger = logging.MustGetLogger(\"committer\")\n\tlogging.SetLevel(logging.DEBUG, logger.Module)\n}\n\n\/\/ DeliverService used to communicate with orderers to obtain\n\/\/ new block and send the to the committer service\ntype DeliverService struct {\n\tclient orderer.AtomicBroadcast_DeliverClient\n\twindowSize uint64\n\tunAcknowledged uint64\n\tcommitter *committer.LedgerCommitter\n\n\tstateProvider state.GossipStateProvider\n\tgossip gossip.Gossip\n\tconn *grpc.ClientConn\n\n\tstopFlag int32\n\tstopChan chan bool\n}\n\n\/\/ StopDeliveryService sends stop to the delivery service reference\nfunc StopDeliveryService(service *DeliverService) {\n\tif service != nil {\n\t\tservice.Stop()\n\t}\n}\n\n\/\/ NewDeliverService construction function to create and initilize\n\/\/ delivery service instance\nfunc NewDeliverService(chainID string, address string, grpcServer *grpc.Server) *DeliverService {\n\tif viper.GetBool(\"peer.committer.enabled\") {\n\t\tlogger.Infof(\"Creating committer for single noops endorser\")\n\n\t\tdeliverService := &DeliverService{\n\t\t\t\/\/ Instance of RawLedger\n\t\t\tcommitter: committer.NewLedgerCommitter(kvledger.GetLedger(chainID)),\n\t\t\twindowSize: 10,\n\t\t\tstopChan: make(chan bool),\n\t\t}\n\n\t\tdeliverService.initStateProvider(address, grpcServer)\n\n\t\treturn deliverService\n\t}\n\tlogger.Infof(\"Committer disabled\")\n\treturn nil\n}\n\nfunc (d *DeliverService) startDeliver() error {\n\tlogger.Info(\"Starting deliver service client\")\n\terr := d.initDeliver()\n\n\tif err != nil {\n\t\tlogger.Errorf(\"Can't initiate deliver protocol [%s]\", err)\n\t\treturn err\n\t}\n\n\theight, err := d.committer.LedgerHeight()\n\tif err != nil {\n\t\tlogger.Errorf(\"Can't get legder height from committer [%s]\", err)\n\t\treturn err\n\t}\n\n\tif height > 0 {\n\t\tlogger.Debugf(\"Starting deliver with block [%d]\", height)\n\t\tif err := d.seekLatestFromCommitter(height); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t} else {\n\t\tlogger.Debug(\"Starting deliver with olders block\")\n\t\tif err := d.seekOldest(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\td.readUntilClose()\n\n\treturn nil\n}\n\nfunc (d *DeliverService) initDeliver() error {\n\topts := []grpc.DialOption{grpc.WithInsecure(), grpc.WithTimeout(3 * time.Second), grpc.WithBlock()}\n\tendpoint := viper.GetString(\"peer.committer.ledger.orderer\")\n\tconn, err := grpc.Dial(endpoint, opts...)\n\tif err != nil {\n\t\tlogger.Errorf(\"Cannot dial to %s, because of %s\", endpoint, err)\n\t\treturn err\n\t}\n\tvar abc orderer.AtomicBroadcast_DeliverClient\n\tabc, err = orderer.NewAtomicBroadcastClient(conn).Deliver(context.TODO())\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to initialize atomic broadcast, due to %s\", err)\n\t\treturn err\n\t}\n\n\t\/\/ Atomic Broadcast Deliver Client\n\td.client = abc\n\td.conn = conn\n\treturn nil\n\n}\n\nfunc (d *DeliverService) stopDeliver() {\n\tif d.conn != nil {\n\t\td.conn.Close()\n\t}\n}\n\nfunc (d *DeliverService) initStateProvider(address string, grpcServer *grpc.Server) error {\n\tbootstrap := viper.GetStringSlice(\"peer.gossip.bootstrap\")\n\tlogger.Debug(\"Initializing state provideer, endpoint = \", address, \" bootstrap set = \", bootstrap)\n\n\tgossip, gossipComm := integration.NewGossipComponent(address, grpcServer, bootstrap...)\n\n\td.gossip = gossip\n\td.stateProvider = state.NewGossipStateProvider(gossip, gossipComm, d.committer)\n\treturn nil\n}\n\n\/\/ Start the delivery service to read the block via delivery\n\/\/ protocol from the orderers\nfunc (d *DeliverService) Start() {\n\tgo d.checkLeaderAndRunDeliver()\n}\n\n\/\/ Stop all service and release resources\nfunc (d *DeliverService) Stop() {\n\tatomic.StoreInt32(&d.stopFlag, 1)\n\td.stopDeliver()\n\td.stopChan <- true\n\td.stateProvider.Stop()\n\td.gossip.Stop()\n}\n\nfunc (d *DeliverService) checkLeaderAndRunDeliver() {\n\n\tisLeader := viper.GetBool(\"peer.gossip.orgLeader\")\n\n\tif isLeader {\n\t\td.startDeliver()\n\t} else {\n\t\t<-d.stopChan\n\t}\n}\n\nfunc (d *DeliverService) seekOldest() error {\n\treturn d.client.Send(&orderer.DeliverUpdate{\n\t\tType: &orderer.DeliverUpdate_Seek{\n\t\t\tSeek: &orderer.SeekInfo{\n\t\t\t\tStart: orderer.SeekInfo_OLDEST,\n\t\t\t\tWindowSize: d.windowSize,\n\t\t\t\tChainID: util.GetTestChainID(),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc (d *DeliverService) seekLatestFromCommitter(height uint64) error {\n\treturn d.client.Send(&orderer.DeliverUpdate{\n\t\tType: &orderer.DeliverUpdate_Seek{\n\t\t\tSeek: &orderer.SeekInfo{\n\t\t\t\tStart: orderer.SeekInfo_SPECIFIED,\n\t\t\t\tWindowSize: d.windowSize,\n\t\t\t\tSpecifiedNumber: height,\n\t\t\t\tChainID: util.GetTestChainID(),\n\t\t\t},\n\t\t},\n\t})\n}\n\n\/\/ Internal function to check whenever we need to finish listening\n\/\/ for new messages to arrive\nfunc (d *DeliverService) isDone() bool {\n\n\treturn atomic.LoadInt32(&d.stopFlag) == 1\n}\n\nfunc isTxValidForVscc(payload *common.Payload, envBytes []byte) error {\n\t\/\/ TODO: Extract the VSCC\/policy from LCCC as soon as this is ready\n\tvscc := \"vscc\"\n\n\tchainName := payload.Header.ChainHeader.ChainID\n\tif chainName == \"\" {\n\t\terr := fmt.Errorf(\"transaction header does not contain an chain ID\")\n\t\tlogger.Errorf(\"%s\", err)\n\t\treturn err\n\t}\n\n\ttxid := \"N\/A\" \/\/ FIXME: is that appropriate?\n\n\t\/\/ build arguments for VSCC invocation\n\t\/\/ args[0] - function name (not used now)\n\t\/\/ args[1] - serialized Envelope\n\targs := [][]byte{[]byte(\"\"), envBytes}\n\n\t\/\/ create VSCC invocation proposal\n\tvsccCis := &pb.ChaincodeInvocationSpec{ChaincodeSpec: &pb.ChaincodeSpec{Type: pb.ChaincodeSpec_GOLANG, ChaincodeID: &pb.ChaincodeID{Name: vscc}, CtorMsg: &pb.ChaincodeInput{Args: args}}}\n\tprop, err := putils.CreateProposalFromCIS(txid, chainName, vsccCis, []byte(\"\"))\n\tif err != nil {\n\t\tlogger.Errorf(\"Cannot create a proposal to invoke VSCC, err %s\\n\", err)\n\t\treturn err\n\t}\n\n\t\/\/ get context for the chaincode execution\n\tvar txsim ledger.TxSimulator\n\tlgr := kvledger.GetLedger(chainName)\n\ttxsim, err = lgr.NewTxSimulator()\n\tif err != nil {\n\t\tlogger.Errorf(\"Cannot obtain tx simulator, err %s\\n\", err)\n\t\treturn err\n\t}\n\tdefer txsim.Done()\n\tctxt := context.WithValue(context.Background(), chaincode.TXSimulatorKey, txsim)\n\n\t\/\/ invoke VSCC\n\t_, _, err = chaincode.ExecuteChaincode(ctxt, chainName, txid, prop, vscc, args)\n\tif err != nil {\n\t\tlogger.Errorf(\"VSCC check failed for transaction, error %s\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (d *DeliverService) readUntilClose() {\n\tfor {\n\t\tmsg, err := d.client.Recv()\n\t\tif err != nil {\n\t\t\tlogger.Warningf(\"Receive error: %s\", err.Error())\n\t\t\tif d.isDone() {\n\t\t\t\t<-d.stopChan\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tswitch t := msg.Type.(type) {\n\t\tcase *orderer.DeliverResponse_Error:\n\t\t\tif t.Error == common.Status_SUCCESS {\n\t\t\t\tlogger.Warning(\"ERROR! Received success in error field\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlogger.Warning(\"Got error \", t)\n\t\tcase *orderer.DeliverResponse_Block:\n\t\t\tseqNum := t.Block.Header.Number\n\t\t\tblock := &common.Block{}\n\t\t\tblock.Header = t.Block.Header\n\t\t\tblock.Metadata = t.Block.Metadata\n\t\t\tblock.Data = &common.BlockData{}\n\t\t\tfor _, d := range t.Block.Data.Data {\n\t\t\t\tif d != nil {\n\t\t\t\t\tif env, err := putils.GetEnvelopeFromBlock(d); err != nil {\n\t\t\t\t\t\tfmt.Printf(\"Error getting tx from block(%s)\\n\", err)\n\t\t\t\t\t} else if env != nil {\n\t\t\t\t\t\t\/\/ validate the transaction: here we check that the transaction\n\t\t\t\t\t\t\/\/ is properly formed, properly signed and that the security\n\t\t\t\t\t\t\/\/ chain binding proposal to endorsements to tx holds. We do\n\t\t\t\t\t\t\/\/ NOT check the validity of endorsements, though. That's a\n\t\t\t\t\t\t\/\/ job for VSCC below\n\t\t\t\t\t\tpayload, _, err := peer.ValidateTransaction(env)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\/\/ TODO: this code needs to receive a bit more attention and discussion:\n\t\t\t\t\t\t\t\/\/ it's not clear what it means if a transaction which causes a failure\n\t\t\t\t\t\t\t\/\/ in validation is just dropped on the floor\n\t\t\t\t\t\t\tlogger.Errorf(\"Invalid transaction, error %s\", err)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\/\/the payload is used to get headers\n\t\t\t\t\t\t\terr = isTxValidForVscc(payload, d)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\/\/ TODO: this code needs to receive a bit more attention and discussion:\n\t\t\t\t\t\t\t\t\/\/ it's not clear what it means if a transaction which causes a failure\n\t\t\t\t\t\t\t\t\/\/ in validation is just dropped on the floor\n\t\t\t\t\t\t\t\tlogger.Errorf(\"isTxValidForVscc returned error %s\", err)\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif t, err := proto.Marshal(env); err == nil {\n\t\t\t\t\t\t\t\tblock.Data.Data = append(block.Data.Data, t)\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tfmt.Printf(\"Cannot marshal transactoins %s\\n\", err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlogger.Warning(\"Nil tx from block\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tnumberOfPeers := len(d.gossip.GetPeers())\n\t\t\t\/\/ Create payload with a block received\n\t\t\tpayload := createPayload(seqNum, block)\n\t\t\t\/\/ Use payload to create gossip message\n\t\t\tgossipMsg := createGossipMsg(payload)\n\t\t\tlogger.Debugf(\"Adding payload locally, buffer seqNum = [%d], peers number [%d]\", seqNum, numberOfPeers)\n\t\t\t\/\/ Add payload to local state payloads buffer\n\t\t\td.stateProvider.AddPayload(payload)\n\t\t\t\/\/ Gossip messages with other nodes\n\t\t\tlogger.Debugf(\"Gossiping block [%d], peers number [%d]\", seqNum, numberOfPeers)\n\t\t\td.gossip.Gossip(gossipMsg)\n\n\t\t\td.unAcknowledged++\n\t\t\tif d.unAcknowledged >= d.windowSize\/2 {\n\t\t\t\tlogger.Warningf(\"Sending acknowledgement [%d]\", t.Block.Header.Number)\n\t\t\t\terr = d.client.Send(&orderer.DeliverUpdate{\n\t\t\t\t\tType: &orderer.DeliverUpdate_Acknowledgement{\n\t\t\t\t\t\tAcknowledgement: &orderer.Acknowledgement{\n\t\t\t\t\t\t\tNumber: seqNum,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\td.unAcknowledged = 0\n\t\t\t}\n\t\tdefault:\n\t\t\tlogger.Warning(\"Received unknown: \", t)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc createGossipMsg(payload *gossip_proto.Payload) *gossip_proto.GossipMessage {\n\tgossipMsg := &gossip_proto.GossipMessage{\n\t\tNonce: 0,\n\t\tContent: &gossip_proto.GossipMessage_DataMsg{\n\t\t\tDataMsg: &gossip_proto.DataMessage{\n\t\t\t\tPayload: payload,\n\t\t\t},\n\t\t},\n\t}\n\treturn gossipMsg\n}\n\nfunc createPayload(seqNum uint64, block *common.Block) *gossip_proto.Payload {\n\tmarshaledBlock, _ := proto.Marshal(block)\n\treturn &gossip_proto.Payload{\n\t\tData: marshaledBlock,\n\t\tSeqNum: seqNum,\n\t}\n}\n<commit_msg>[FAB-1275] fix peer process doesn't stop issue<commit_after>\/*\nCopyright IBM Corp. 2016 All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage noopssinglechain\n\nimport (\n\t\"time\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/hyperledger\/fabric\/core\/chaincode\"\n\t\"github.com\/hyperledger\/fabric\/core\/committer\"\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\/kvledger\"\n\t\"github.com\/hyperledger\/fabric\/core\/util\"\n\t\"github.com\/hyperledger\/fabric\/protos\/common\"\n\t\"github.com\/hyperledger\/fabric\/protos\/orderer\"\n\tputils \"github.com\/hyperledger\/fabric\/protos\/utils\"\n\t\"github.com\/op\/go-logging\"\n\t\"github.com\/spf13\/viper\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\n\t\"fmt\"\n\n\t\"github.com\/hyperledger\/fabric\/core\/ledger\"\n\t\"github.com\/hyperledger\/fabric\/core\/peer\"\n\t\"github.com\/hyperledger\/fabric\/gossip\/gossip\"\n\t\"github.com\/hyperledger\/fabric\/gossip\/integration\"\n\tgossip_proto \"github.com\/hyperledger\/fabric\/gossip\/proto\"\n\t\"github.com\/hyperledger\/fabric\/gossip\/state\"\n\tpb \"github.com\/hyperledger\/fabric\/protos\/peer\"\n)\n\nvar logger *logging.Logger \/\/ package-level logger\n\nfunc init() {\n\tlogger = logging.MustGetLogger(\"committer\")\n\tlogging.SetLevel(logging.DEBUG, logger.Module)\n}\n\n\/\/ DeliverService used to communicate with orderers to obtain\n\/\/ new block and send the to the committer service\ntype DeliverService struct {\n\tclient orderer.AtomicBroadcast_DeliverClient\n\twindowSize uint64\n\tunAcknowledged uint64\n\tcommitter *committer.LedgerCommitter\n\n\tstateProvider state.GossipStateProvider\n\tgossip gossip.Gossip\n\tconn *grpc.ClientConn\n}\n\n\/\/ StopDeliveryService sends stop to the delivery service reference\nfunc StopDeliveryService(service *DeliverService) {\n\tif service != nil {\n\t\tservice.Stop()\n\t}\n}\n\n\/\/ NewDeliverService construction function to create and initilize\n\/\/ delivery service instance\nfunc NewDeliverService(chainID string, address string, grpcServer *grpc.Server) *DeliverService {\n\tif viper.GetBool(\"peer.committer.enabled\") {\n\t\tlogger.Infof(\"Creating committer for single noops endorser\")\n\n\t\tdeliverService := &DeliverService{\n\t\t\t\/\/ Instance of RawLedger\n\t\t\tcommitter: committer.NewLedgerCommitter(kvledger.GetLedger(chainID)),\n\t\t\twindowSize: 10,\n\t\t}\n\n\t\tdeliverService.initStateProvider(address, grpcServer)\n\n\t\treturn deliverService\n\t}\n\tlogger.Infof(\"Committer disabled\")\n\treturn nil\n}\n\nfunc (d *DeliverService) startDeliver() error {\n\tlogger.Info(\"Starting deliver service client\")\n\terr := d.initDeliver()\n\n\tif err != nil {\n\t\tlogger.Errorf(\"Can't initiate deliver protocol [%s]\", err)\n\t\treturn err\n\t}\n\n\theight, err := d.committer.LedgerHeight()\n\tif err != nil {\n\t\tlogger.Errorf(\"Can't get legder height from committer [%s]\", err)\n\t\treturn err\n\t}\n\n\tif height > 0 {\n\t\tlogger.Debugf(\"Starting deliver with block [%d]\", height)\n\t\tif err := d.seekLatestFromCommitter(height); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t} else {\n\t\tlogger.Debug(\"Starting deliver with olders block\")\n\t\tif err := d.seekOldest(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\td.readUntilClose()\n\n\treturn nil\n}\n\nfunc (d *DeliverService) initDeliver() error {\n\topts := []grpc.DialOption{grpc.WithInsecure(), grpc.WithTimeout(3 * time.Second), grpc.WithBlock()}\n\tendpoint := viper.GetString(\"peer.committer.ledger.orderer\")\n\tconn, err := grpc.Dial(endpoint, opts...)\n\tif err != nil {\n\t\tlogger.Errorf(\"Cannot dial to %s, because of %s\", endpoint, err)\n\t\treturn err\n\t}\n\tvar abc orderer.AtomicBroadcast_DeliverClient\n\tabc, err = orderer.NewAtomicBroadcastClient(conn).Deliver(context.TODO())\n\tif err != nil {\n\t\tlogger.Errorf(\"Unable to initialize atomic broadcast, due to %s\", err)\n\t\treturn err\n\t}\n\n\t\/\/ Atomic Broadcast Deliver Client\n\td.client = abc\n\td.conn = conn\n\treturn nil\n\n}\n\nfunc (d *DeliverService) stopDeliver() {\n\tif d.conn != nil {\n\t\td.conn.Close()\n\t}\n}\n\nfunc (d *DeliverService) initStateProvider(address string, grpcServer *grpc.Server) error {\n\tbootstrap := viper.GetStringSlice(\"peer.gossip.bootstrap\")\n\tlogger.Debug(\"Initializing state provideer, endpoint = \", address, \" bootstrap set = \", bootstrap)\n\n\tgossip, gossipComm := integration.NewGossipComponent(address, grpcServer, bootstrap...)\n\n\td.gossip = gossip\n\td.stateProvider = state.NewGossipStateProvider(gossip, gossipComm, d.committer)\n\treturn nil\n}\n\n\/\/ Start the delivery service to read the block via delivery\n\/\/ protocol from the orderers\nfunc (d *DeliverService) Start() {\n\tgo d.checkLeaderAndRunDeliver()\n}\n\n\/\/ Stop all service and release resources\nfunc (d *DeliverService) Stop() {\n\td.stopDeliver()\n\td.stateProvider.Stop()\n\td.gossip.Stop()\n}\n\nfunc (d *DeliverService) checkLeaderAndRunDeliver() {\n\n\tisLeader := viper.GetBool(\"peer.gossip.orgLeader\")\n\n\tif isLeader {\n\t\td.startDeliver()\n\t}\n}\n\nfunc (d *DeliverService) seekOldest() error {\n\treturn d.client.Send(&orderer.DeliverUpdate{\n\t\tType: &orderer.DeliverUpdate_Seek{\n\t\t\tSeek: &orderer.SeekInfo{\n\t\t\t\tStart: orderer.SeekInfo_OLDEST,\n\t\t\t\tWindowSize: d.windowSize,\n\t\t\t\tChainID: util.GetTestChainID(),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc (d *DeliverService) seekLatestFromCommitter(height uint64) error {\n\treturn d.client.Send(&orderer.DeliverUpdate{\n\t\tType: &orderer.DeliverUpdate_Seek{\n\t\t\tSeek: &orderer.SeekInfo{\n\t\t\t\tStart: orderer.SeekInfo_SPECIFIED,\n\t\t\t\tWindowSize: d.windowSize,\n\t\t\t\tSpecifiedNumber: height,\n\t\t\t\tChainID: util.GetTestChainID(),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc isTxValidForVscc(payload *common.Payload, envBytes []byte) error {\n\t\/\/ TODO: Extract the VSCC\/policy from LCCC as soon as this is ready\n\tvscc := \"vscc\"\n\n\tchainName := payload.Header.ChainHeader.ChainID\n\tif chainName == \"\" {\n\t\terr := fmt.Errorf(\"transaction header does not contain an chain ID\")\n\t\tlogger.Errorf(\"%s\", err)\n\t\treturn err\n\t}\n\n\ttxid := \"N\/A\" \/\/ FIXME: is that appropriate?\n\n\t\/\/ build arguments for VSCC invocation\n\t\/\/ args[0] - function name (not used now)\n\t\/\/ args[1] - serialized Envelope\n\targs := [][]byte{[]byte(\"\"), envBytes}\n\n\t\/\/ create VSCC invocation proposal\n\tvsccCis := &pb.ChaincodeInvocationSpec{ChaincodeSpec: &pb.ChaincodeSpec{Type: pb.ChaincodeSpec_GOLANG, ChaincodeID: &pb.ChaincodeID{Name: vscc}, CtorMsg: &pb.ChaincodeInput{Args: args}}}\n\tprop, err := putils.CreateProposalFromCIS(txid, chainName, vsccCis, []byte(\"\"))\n\tif err != nil {\n\t\tlogger.Errorf(\"Cannot create a proposal to invoke VSCC, err %s\\n\", err)\n\t\treturn err\n\t}\n\n\t\/\/ get context for the chaincode execution\n\tvar txsim ledger.TxSimulator\n\tlgr := kvledger.GetLedger(chainName)\n\ttxsim, err = lgr.NewTxSimulator()\n\tif err != nil {\n\t\tlogger.Errorf(\"Cannot obtain tx simulator, err %s\\n\", err)\n\t\treturn err\n\t}\n\tdefer txsim.Done()\n\tctxt := context.WithValue(context.Background(), chaincode.TXSimulatorKey, txsim)\n\n\t\/\/ invoke VSCC\n\t_, _, err = chaincode.ExecuteChaincode(ctxt, chainName, txid, prop, vscc, args)\n\tif err != nil {\n\t\tlogger.Errorf(\"VSCC check failed for transaction, error %s\", err)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (d *DeliverService) readUntilClose() {\n\tfor {\n\t\tmsg, err := d.client.Recv()\n\t\tif err != nil {\n\t\t\tlogger.Warningf(\"Receive error: %s\", err.Error())\n\t\t\treturn\n\t\t}\n\t\tswitch t := msg.Type.(type) {\n\t\tcase *orderer.DeliverResponse_Error:\n\t\t\tif t.Error == common.Status_SUCCESS {\n\t\t\t\tlogger.Warning(\"ERROR! Received success in error field\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tlogger.Warning(\"Got error \", t)\n\t\tcase *orderer.DeliverResponse_Block:\n\t\t\tseqNum := t.Block.Header.Number\n\t\t\tblock := &common.Block{}\n\t\t\tblock.Header = t.Block.Header\n\t\t\tblock.Metadata = t.Block.Metadata\n\t\t\tblock.Data = &common.BlockData{}\n\t\t\tfor _, d := range t.Block.Data.Data {\n\t\t\t\tif d != nil {\n\t\t\t\t\tif env, err := putils.GetEnvelopeFromBlock(d); err != nil {\n\t\t\t\t\t\tfmt.Printf(\"Error getting tx from block(%s)\\n\", err)\n\t\t\t\t\t} else if env != nil {\n\t\t\t\t\t\t\/\/ validate the transaction: here we check that the transaction\n\t\t\t\t\t\t\/\/ is properly formed, properly signed and that the security\n\t\t\t\t\t\t\/\/ chain binding proposal to endorsements to tx holds. We do\n\t\t\t\t\t\t\/\/ NOT check the validity of endorsements, though. That's a\n\t\t\t\t\t\t\/\/ job for VSCC below\n\t\t\t\t\t\tpayload, _, err := peer.ValidateTransaction(env)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\/\/ TODO: this code needs to receive a bit more attention and discussion:\n\t\t\t\t\t\t\t\/\/ it's not clear what it means if a transaction which causes a failure\n\t\t\t\t\t\t\t\/\/ in validation is just dropped on the floor\n\t\t\t\t\t\t\tlogger.Errorf(\"Invalid transaction, error %s\", err)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\/\/the payload is used to get headers\n\t\t\t\t\t\t\terr = isTxValidForVscc(payload, d)\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\t\/\/ TODO: this code needs to receive a bit more attention and discussion:\n\t\t\t\t\t\t\t\t\/\/ it's not clear what it means if a transaction which causes a failure\n\t\t\t\t\t\t\t\t\/\/ in validation is just dropped on the floor\n\t\t\t\t\t\t\t\tlogger.Errorf(\"isTxValidForVscc returned error %s\", err)\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif t, err := proto.Marshal(env); err == nil {\n\t\t\t\t\t\t\t\tblock.Data.Data = append(block.Data.Data, t)\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tfmt.Printf(\"Cannot marshal transactoins %s\\n\", err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tlogger.Warning(\"Nil tx from block\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tnumberOfPeers := len(d.gossip.GetPeers())\n\t\t\t\/\/ Create payload with a block received\n\t\t\tpayload := createPayload(seqNum, block)\n\t\t\t\/\/ Use payload to create gossip message\n\t\t\tgossipMsg := createGossipMsg(payload)\n\t\t\tlogger.Debugf(\"Adding payload locally, buffer seqNum = [%d], peers number [%d]\", seqNum, numberOfPeers)\n\t\t\t\/\/ Add payload to local state payloads buffer\n\t\t\td.stateProvider.AddPayload(payload)\n\t\t\t\/\/ Gossip messages with other nodes\n\t\t\tlogger.Debugf(\"Gossiping block [%d], peers number [%d]\", seqNum, numberOfPeers)\n\t\t\td.gossip.Gossip(gossipMsg)\n\n\t\t\td.unAcknowledged++\n\t\t\tif d.unAcknowledged >= d.windowSize\/2 {\n\t\t\t\tlogger.Warningf(\"Sending acknowledgement [%d]\", t.Block.Header.Number)\n\t\t\t\terr = d.client.Send(&orderer.DeliverUpdate{\n\t\t\t\t\tType: &orderer.DeliverUpdate_Acknowledgement{\n\t\t\t\t\t\tAcknowledgement: &orderer.Acknowledgement{\n\t\t\t\t\t\t\tNumber: seqNum,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\td.unAcknowledged = 0\n\t\t\t}\n\t\tdefault:\n\t\t\tlogger.Warning(\"Received unknown: \", t)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc createGossipMsg(payload *gossip_proto.Payload) *gossip_proto.GossipMessage {\n\tgossipMsg := &gossip_proto.GossipMessage{\n\t\tNonce: 0,\n\t\tContent: &gossip_proto.GossipMessage_DataMsg{\n\t\t\tDataMsg: &gossip_proto.DataMessage{\n\t\t\t\tPayload: payload,\n\t\t\t},\n\t\t},\n\t}\n\treturn gossipMsg\n}\n\nfunc createPayload(seqNum uint64, block *common.Block) *gossip_proto.Payload {\n\tmarshaledBlock, _ := proto.Marshal(block)\n\treturn &gossip_proto.Payload{\n\t\tData: marshaledBlock,\n\t\tSeqNum: seqNum,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package yalzo\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc ReadCSV(r io.Reader) ([]Todo, []Todo, error) {\n\tscanner := bufio.NewScanner(r)\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\ttodos := make([]Todo, 0, 100)\n\tarchs := make([]Todo, 0, 100)\n\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\titems := strings.Split(line, \",\")\n\t\tarch := false\n\n\t\tno, err := strconv.Atoi(strings.TrimSpace(items[0]))\n\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tif strings.TrimSpace(items[3]) == \"true\" {\n\t\t\tarch = true\n\t\t}\n\n\t\ttodo := &Todo{\n\t\t\tno: no,\n\t\t\tlabel: strings.TrimSpace(items[1]),\n\t\t\ttitle: strings.TrimSpace(items[2]),\n\t\t\tisArchived: arch,\n\t\t}\n\n\t\tif arch {\n\t\t\tarchs = append(archs, (*todo))\n\t\t} else {\n\t\t\ttodos = append(todos, (*todo))\n\t\t}\n\t}\n\treturn todos, archs, nil\n}\n\nfunc SaveCSV(todos []Todo, archs []Todo, r io.Reader) {\n\n}\n<commit_msg>SaveCSVの実装<commit_after>package yalzo\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc ReadCSV(fp *os.File) ([]Todo, []Todo, error) {\n\tscanner := bufio.NewScanner(fp)\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\ttodos := make([]Todo, 0, 100)\n\tarchs := make([]Todo, 0, 100)\n\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\titems := strings.Split(line, \",\")\n\t\tarch := false\n\n\t\tno, err := strconv.Atoi(strings.TrimSpace(items[0]))\n\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tif strings.TrimSpace(items[3]) == \"true\" {\n\t\t\tarch = true\n\t\t}\n\n\t\ttodo := &Todo{\n\t\t\tno: no,\n\t\t\tlabel: strings.TrimSpace(items[1]),\n\t\t\ttitle: strings.TrimSpace(items[2]),\n\t\t\tisArchived: arch,\n\t\t}\n\n\t\tif arch {\n\t\t\tarchs = append(archs, (*todo))\n\t\t} else {\n\t\t\ttodos = append(todos, (*todo))\n\t\t}\n\t}\n\treturn todos, archs, nil\n}\n\nfunc SaveCSV(todos []Todo, archs []Todo, fp *os.File) {\n\tw := bufio.NewWriter(fp)\n\tcsv_list := append(createTodoCSV(todos), createTodoCSV(archs)...)\n\tbuf := bytes.NewBufferString(\"\")\n\tfor i := 0; i < len(csv_list); i++ {\n\t\tbuf.WriteString(csv_list[i])\n\t\t\/\/ if not last offset, append return '\\n' to tail.\n\t\tif i+1 != len(csv_list[i]) {\n\t\t\tbuf.WriteString(\"\\n\")\n\t\t}\n\t}\n}\n\nfunc createTodoCSV(todos []Todo) []string {\n\tcsv := make([]string, 0, len(todos))\n\tfor i := 0; i < len(todos); i++ {\n\t\ttodo := todos[i]\n\t\tbuf := bytes.NewBufferString(strconv.Itoa(todo.no))\n\t\tbuf.WriteString(\",\")\n\t\tbuf.WriteString(todo.label)\n\t\tbuf.WriteString(\",\")\n\t\tbuf.WriteString(todo.title)\n\t\tbuf.WriteString(\",\")\n\t\tif todo.isArchived {\n\t\t\tbuf.WriteString(\"true\")\n\t\t} else {\n\t\t\tbuf.WriteString(\"false\")\n\t\t}\n\t\tcsv[i] = buf.String()\n\t}\n\treturn csv\n}\n<|endoftext|>"} {"text":"<commit_before>package ctx\n\nimport \"sync\"\n\n\/\/ Doner can block until something is done\ntype Doner interface {\n\tDone() <-chan struct{}\n}\n\ntype doneChan <-chan struct{}\n\nfunc (dc doneChan) Done() <-chan struct{} { return dc }\n\n\/\/ Lift takes a chan and wraps it in a Doner\nfunc Lift(c <-chan struct{}) Doner { return doneChan(c) }\n\n\/\/ Tick returns a <-chan whose range ends when the underlying context cancels\nfunc Tick(d Doner) <-chan struct{} {\n\tcq := make(chan struct{})\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-d.Done():\n\t\t\t\tclose(cq)\n\t\t\t\treturn\n\t\t\tcase cq <- struct{}{}:\n\t\t\t}\n\t\t}\n\t}()\n\treturn cq\n}\n\n\/\/ Defer guarantees that a function will be called after a context has cancelled\nfunc Defer(d Doner, cb func()) {\n\tgo func() {\n\t\t<-d.Done()\n\t\tcb()\n\t}()\n}\n\n\/\/ Link ties the lifetime of the Doners to each other. Link returns a channel\n\/\/ that fires if ANY of the constituent Doners have fired.\nfunc Link(doners ...Doner) <-chan struct{} {\n\tc := make(chan struct{})\n\tcancel := func() { close(c) }\n\n\tvar once sync.Once\n\tfor _, d := range doners {\n\t\tDefer(d, func() { once.Do(cancel) })\n\t}\n\n\treturn c\n}\n\n\/\/ Join returns a channel that receives when all constituent Doners have fired\nfunc Join(doners ...Doner) <-chan struct{} {\n\tvar wg sync.WaitGroup\n\twg.Add(len(doners))\n\tfor _, d := range doners {\n\t\tDefer(d, wg.Done)\n\t}\n\n\tcq := make(chan struct{})\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(cq)\n\t}()\n\treturn cq\n}\n\n\/\/ FTick calls a function in a loop until the Doner has fired\nfunc FTick(d Doner, f func()) {\n\tfor _ = range Tick(d) {\n\t\tf()\n\t}\n}\n<commit_msg>Add AsContext and FTimerTick<commit_after>package ctx\n\nimport (\n\t\"context\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ Doner can block until something is done\ntype Doner interface {\n\tDone() <-chan struct{}\n}\n\ntype doneChan <-chan struct{}\n\nfunc (dc doneChan) Done() <-chan struct{} { return dc }\n\n\/\/ Lift takes a chan and wraps it in a Doner\nfunc Lift(c <-chan struct{}) Doner { return doneChan(c) }\n\n\/\/ AsContext creates a context that fires when the Doner fires\nfunc AsContext(d Doner) context.Context {\n\tc, cancel := context.WithCancel(context.Background())\n\tDefer(d, cancel)\n\treturn c\n}\n\n\/\/ Tick returns a <-chan whose range ends when the underlying context cancels\nfunc Tick(d Doner) <-chan struct{} {\n\tcq := make(chan struct{})\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-d.Done():\n\t\t\t\tclose(cq)\n\t\t\t\treturn\n\t\t\tcase cq <- struct{}{}:\n\t\t\t}\n\t\t}\n\t}()\n\treturn cq\n}\n\n\/\/ Defer guarantees that a function will be called after a context has cancelled\nfunc Defer(d Doner, cb func()) {\n\tgo func() {\n\t\t<-d.Done()\n\t\tcb()\n\t}()\n}\n\n\/\/ Link ties the lifetime of the Doners to each other. Link returns a channel\n\/\/ that fires if ANY of the constituent Doners have fired.\nfunc Link(doners ...Doner) <-chan struct{} {\n\tc := make(chan struct{})\n\tcancel := func() { close(c) }\n\n\tvar once sync.Once\n\tfor _, d := range doners {\n\t\tDefer(d, func() { once.Do(cancel) })\n\t}\n\n\treturn c\n}\n\n\/\/ Join returns a channel that receives when all constituent Doners have fired\nfunc Join(doners ...Doner) <-chan struct{} {\n\tvar wg sync.WaitGroup\n\twg.Add(len(doners))\n\tfor _, d := range doners {\n\t\tDefer(d, wg.Done)\n\t}\n\n\tcq := make(chan struct{})\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(cq)\n\t}()\n\treturn cq\n}\n\n\/\/ FTick calls a function in a loop until the Doner has fired\nfunc FTick(d Doner, f func()) {\n\tfor _ = range Tick(d) {\n\t\tf()\n\t}\n}\n\n\/\/ FTimerTick calls a function repeatedly at a given internval, until the Doner\n\/\/ has fired.\nfunc FTimerTick(d Doner, t time.Duration, f func()) {\n\tfor _ = range time.NewTicker(t).C {\n\t\tselect {\n\t\tcase <-d.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tf()\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package service\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/gomodule\/redigo\/redis\"\n\t\"github.com\/kataras\/iris\/core\/errors\"\n)\n\nvar (\n\t\/\/ ErrRedisClosed an error with message 'Redis is already closed'\n\tErrRedisClosed = errors.New(\"Redis is already closed\")\n\t\/\/ ErrKeyNotFound an error with message 'Key $thekey doesn't found'\n\tErrKeyNotFound = errors.New(\"Key '%s' doesn't found\")\n)\n\n\/\/ Service the Redis service, contains the config and the redis pool\ntype Service struct {\n\t\/\/ Connected is true when the Service has already connected\n\tConnected bool\n\t\/\/ Config the redis config for this redis\n\tConfig *Config\n\tpool *redis.Pool\n}\n\n\/\/ PingPong sends a ping and receives a pong, if no pong received then returns false and filled error\nfunc (r *Service) PingPong() (bool, error) {\n\tc := r.pool.Get()\n\tdefer c.Close()\n\tmsg, err := c.Do(\"PING\")\n\tif err != nil || msg == nil {\n\t\treturn false, err\n\t}\n\treturn (msg == \"PONG\"), nil\n}\n\n\/\/ CloseConnection closes the redis connection\nfunc (r *Service) CloseConnection() error {\n\tif r.pool != nil {\n\t\treturn r.pool.Close()\n\t}\n\treturn ErrRedisClosed\n}\n\n\/\/ Set sets a key-value to the redis store.\n\/\/ The expiration is setted by the MaxAgeSeconds.\nfunc (r *Service) Set(key string, value interface{}, secondsLifetime int64) (err error) {\n\tc := r.pool.Get()\n\tdefer c.Close()\n\tif c.Err() != nil {\n\t\treturn c.Err()\n\t}\n\n\t\/\/ if has expiration, then use the \"EX\" to delete the key automatically.\n\tif secondsLifetime > 0 {\n\t\t_, err = c.Do(\"SETEX\", r.Config.Prefix+key, secondsLifetime, value)\n\t} else {\n\t\t_, err = c.Do(\"SET\", r.Config.Prefix+key, value)\n\t}\n\n\treturn\n}\n\n\/\/ Get returns value, err by its key\n\/\/returns nil and a filled error if something bad happened.\nfunc (r *Service) Get(key string) (interface{}, error) {\n\tc := r.pool.Get()\n\tdefer c.Close()\n\tif err := c.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tredisVal, err := c.Do(\"GET\", r.Config.Prefix+key)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif redisVal == nil {\n\t\treturn nil, ErrKeyNotFound.Format(key)\n\t}\n\treturn redisVal, nil\n}\n\n\/\/ TTL returns the seconds to expire, if the key has expiration and error if action failed.\n\/\/ Read more at: https:\/\/redis.io\/commands\/ttl\nfunc (r *Service) TTL(key string) (seconds int64, hasExpiration bool, found bool) {\n\tc := r.pool.Get()\n\tdefer c.Close()\n\tredisVal, err := c.Do(\"TTL\", r.Config.Prefix+key)\n\tif err != nil {\n\t\treturn -2, false, false\n\t}\n\tseconds = redisVal.(int64)\n\t\/\/ if -1 means the key has unlimited life time.\n\thasExpiration = seconds > -1\n\t\/\/ if -2 means key does not exist.\n\tfound = !(c.Err() != nil || seconds == -2)\n\treturn\n}\n\nfunc (r *Service) updateTTLConn(c redis.Conn, key string, newSecondsLifeTime int64) error {\n\treply, err := c.Do(\"EXPIRE\", r.Config.Prefix+key, newSecondsLifeTime)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ https:\/\/redis.io\/commands\/expire#return-value\n\t\/\/\n\t\/\/ 1 if the timeout was set.\n\t\/\/ 0 if key does not exist.\n\tif hadTTLOrExists, ok := reply.(int); ok {\n\t\tif hadTTLOrExists == 1 {\n\t\t\treturn nil\n\t\t} else if hadTTLOrExists == 0 {\n\t\t\treturn fmt.Errorf(\"unable to update expiration, the key '%s' was stored without ttl\", key)\n\t\t} \/\/ do not check for -1.\n\t}\n\n\treturn nil\n}\n\n\/\/ UpdateTTL will update the ttl of a key.\n\/\/ Using the \"EXPIRE\" command.\n\/\/ Read more at: https:\/\/redis.io\/commands\/expire#refreshing-expires\nfunc (r *Service) UpdateTTL(key string, newSecondsLifeTime int64) error {\n\tc := r.pool.Get()\n\tdefer c.Close()\n\terr := c.Err()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn r.updateTTLConn(c, key, newSecondsLifeTime)\n}\n\n\/\/ UpdateTTLMany like `UpdateTTL` but for all keys starting with that \"prefix\",\n\/\/ it is a bit faster operation if you need to update all sessions keys (although it can be even faster if we used hash but this will limit other features),\n\/\/ look the `sessions\/Database#OnUpdateExpiration` for example.\nfunc (r *Service) UpdateTTLMany(prefix string, newSecondsLifeTime int64) error {\n\tc := r.pool.Get()\n\tdefer c.Close()\n\tif err := c.Err(); err != nil {\n\t\treturn err\n\t}\n\n\tkeys, err := r.getKeysConn(c, prefix)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, key := range keys {\n\t\tif err = r.updateTTLConn(c, key, newSecondsLifeTime); err != nil { \/\/ fail on first error.\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn err\n}\n\n\/\/ GetAll returns all redis entries using the \"SCAN\" command (2.8+).\nfunc (r *Service) GetAll() (interface{}, error) {\n\tc := r.pool.Get()\n\tdefer c.Close()\n\tif err := c.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tredisVal, err := c.Do(\"SCAN\", 0) \/\/ 0 -> cursor\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif redisVal == nil {\n\t\treturn nil, err\n\t}\n\n\treturn redisVal, nil\n}\n\nfunc (r *Service) getKeysConn(c redis.Conn, prefix string) ([]string, error) {\n\tif err := c.Send(\"SCAN\", 0, \"MATCH\", r.Config.Prefix+prefix+\"*\", \"COUNT\", 9999999999); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := c.Flush(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treply, err := c.Receive()\n\tif err != nil || reply == nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ it returns []interface, with two entries, the first one is \"0\" and the second one is a slice of the keys as []interface{uint8....}.\n\n\tif keysInterface, ok := reply.([]interface{}); ok {\n\t\tif len(keysInterface) == 2 {\n\t\t\t\/\/ take the second, it must contain the slice of keys.\n\t\t\tif keysSliceAsBytes, ok := keysInterface[1].([]interface{}); ok {\n\t\t\t\tkeys := make([]string, len(keysSliceAsBytes), len(keysSliceAsBytes))\n\t\t\t\tfor i, k := range keysSliceAsBytes {\n\t\t\t\t\tkeys[i] = fmt.Sprintf(\"%s\", k)[len(r.Config.Prefix):]\n\t\t\t\t}\n\n\t\t\t\treturn keys, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil, nil\n}\n\n\/\/ GetKeys returns all redis keys using the \"SCAN\" with MATCH command.\n\/\/ Read more at: https:\/\/redis.io\/commands\/scan#the-match-option.\nfunc (r *Service) GetKeys(prefix string) ([]string, error) {\n\tc := r.pool.Get()\n\tdefer c.Close()\n\tif err := c.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r.getKeysConn(c, prefix)\n}\n\n\/\/ GetBytes returns value, err by its key\n\/\/ you can use utils.Deserialize((.GetBytes(\"yourkey\"),&theobject{})\n\/\/returns nil and a filled error if something wrong happens\nfunc (r *Service) GetBytes(key string) ([]byte, error) {\n\tc := r.pool.Get()\n\tdefer c.Close()\n\tif err := c.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tredisVal, err := c.Do(\"GET\", r.Config.Prefix+key)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif redisVal == nil {\n\t\treturn nil, ErrKeyNotFound.Format(key)\n\t}\n\n\treturn redis.Bytes(redisVal, err)\n}\n\n\/\/ Delete removes redis entry by specific key\nfunc (r *Service) Delete(key string) error {\n\tc := r.pool.Get()\n\tdefer c.Close()\n\n\t_, err := c.Do(\"DEL\", r.Config.Prefix+key)\n\treturn err\n}\n\nfunc dial(network string, addr string, pass string) (redis.Conn, error) {\n\tif network == \"\" {\n\t\tnetwork = DefaultRedisNetwork\n\t}\n\tif addr == \"\" {\n\t\taddr = DefaultRedisAddr\n\t}\n\tc, err := redis.Dial(network, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif pass != \"\" {\n\t\tif _, err = c.Do(\"AUTH\", pass); err != nil {\n\t\t\tc.Close()\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn c, err\n}\n\n\/\/ Connect connects to the redis, called only once\nfunc (r *Service) Connect() {\n\tc := r.Config\n\n\tif c.IdleTimeout <= 0 {\n\t\tc.IdleTimeout = DefaultRedisIdleTimeout\n\t}\n\n\tif c.Network == \"\" {\n\t\tc.Network = DefaultRedisNetwork\n\t}\n\n\tif c.Addr == \"\" {\n\t\tc.Addr = DefaultRedisAddr\n\t}\n\n\tpool := &redis.Pool{IdleTimeout: DefaultRedisIdleTimeout, MaxIdle: c.MaxIdle, MaxActive: c.MaxActive}\n\tpool.TestOnBorrow = func(c redis.Conn, t time.Time) error {\n\t\t_, err := c.Do(\"PING\")\n\t\treturn err\n\t}\n\n\tif c.Database != \"\" {\n\t\tpool.Dial = func() (redis.Conn, error) {\n\t\t\tred, err := dial(c.Network, c.Addr, c.Password)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif _, err = red.Do(\"SELECT\", c.Database); err != nil {\n\t\t\t\tred.Close()\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn red, err\n\t\t}\n\t} else {\n\t\tpool.Dial = func() (redis.Conn, error) {\n\t\t\treturn dial(c.Network, c.Addr, c.Password)\n\t\t}\n\t}\n\tr.Connected = true\n\tr.pool = pool\n}\n\n\/\/ New returns a Redis service filled by the passed config\n\/\/ to connect call the .Connect().\nfunc New(cfg ...Config) *Service {\n\tc := DefaultConfig()\n\tif len(cfg) > 0 {\n\t\tc = cfg[0]\n\t}\n\tr := &Service{pool: &redis.Pool{}, Config: &c}\n\treturn r\n}\n<commit_msg>session\/redis: fix unused service config var. IdleTimeout witch was replaced by default values<commit_after>package service\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/gomodule\/redigo\/redis\"\n\t\"github.com\/kataras\/iris\/core\/errors\"\n)\n\nvar (\n\t\/\/ ErrRedisClosed an error with message 'Redis is already closed'\n\tErrRedisClosed = errors.New(\"Redis is already closed\")\n\t\/\/ ErrKeyNotFound an error with message 'Key $thekey doesn't found'\n\tErrKeyNotFound = errors.New(\"Key '%s' doesn't found\")\n)\n\n\/\/ Service the Redis service, contains the config and the redis pool\ntype Service struct {\n\t\/\/ Connected is true when the Service has already connected\n\tConnected bool\n\t\/\/ Config the redis config for this redis\n\tConfig *Config\n\tpool *redis.Pool\n}\n\n\/\/ PingPong sends a ping and receives a pong, if no pong received then returns false and filled error\nfunc (r *Service) PingPong() (bool, error) {\n\tc := r.pool.Get()\n\tdefer c.Close()\n\tmsg, err := c.Do(\"PING\")\n\tif err != nil || msg == nil {\n\t\treturn false, err\n\t}\n\treturn (msg == \"PONG\"), nil\n}\n\n\/\/ CloseConnection closes the redis connection\nfunc (r *Service) CloseConnection() error {\n\tif r.pool != nil {\n\t\treturn r.pool.Close()\n\t}\n\treturn ErrRedisClosed\n}\n\n\/\/ Set sets a key-value to the redis store.\n\/\/ The expiration is setted by the MaxAgeSeconds.\nfunc (r *Service) Set(key string, value interface{}, secondsLifetime int64) (err error) {\n\tc := r.pool.Get()\n\tdefer c.Close()\n\tif c.Err() != nil {\n\t\treturn c.Err()\n\t}\n\n\t\/\/ if has expiration, then use the \"EX\" to delete the key automatically.\n\tif secondsLifetime > 0 {\n\t\t_, err = c.Do(\"SETEX\", r.Config.Prefix+key, secondsLifetime, value)\n\t} else {\n\t\t_, err = c.Do(\"SET\", r.Config.Prefix+key, value)\n\t}\n\n\treturn\n}\n\n\/\/ Get returns value, err by its key\n\/\/returns nil and a filled error if something bad happened.\nfunc (r *Service) Get(key string) (interface{}, error) {\n\tc := r.pool.Get()\n\tdefer c.Close()\n\tif err := c.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tredisVal, err := c.Do(\"GET\", r.Config.Prefix+key)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif redisVal == nil {\n\t\treturn nil, ErrKeyNotFound.Format(key)\n\t}\n\treturn redisVal, nil\n}\n\n\/\/ TTL returns the seconds to expire, if the key has expiration and error if action failed.\n\/\/ Read more at: https:\/\/redis.io\/commands\/ttl\nfunc (r *Service) TTL(key string) (seconds int64, hasExpiration bool, found bool) {\n\tc := r.pool.Get()\n\tdefer c.Close()\n\tredisVal, err := c.Do(\"TTL\", r.Config.Prefix+key)\n\tif err != nil {\n\t\treturn -2, false, false\n\t}\n\tseconds = redisVal.(int64)\n\t\/\/ if -1 means the key has unlimited life time.\n\thasExpiration = seconds > -1\n\t\/\/ if -2 means key does not exist.\n\tfound = !(c.Err() != nil || seconds == -2)\n\treturn\n}\n\nfunc (r *Service) updateTTLConn(c redis.Conn, key string, newSecondsLifeTime int64) error {\n\treply, err := c.Do(\"EXPIRE\", r.Config.Prefix+key, newSecondsLifeTime)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ https:\/\/redis.io\/commands\/expire#return-value\n\t\/\/\n\t\/\/ 1 if the timeout was set.\n\t\/\/ 0 if key does not exist.\n\tif hadTTLOrExists, ok := reply.(int); ok {\n\t\tif hadTTLOrExists == 1 {\n\t\t\treturn nil\n\t\t} else if hadTTLOrExists == 0 {\n\t\t\treturn fmt.Errorf(\"unable to update expiration, the key '%s' was stored without ttl\", key)\n\t\t} \/\/ do not check for -1.\n\t}\n\n\treturn nil\n}\n\n\/\/ UpdateTTL will update the ttl of a key.\n\/\/ Using the \"EXPIRE\" command.\n\/\/ Read more at: https:\/\/redis.io\/commands\/expire#refreshing-expires\nfunc (r *Service) UpdateTTL(key string, newSecondsLifeTime int64) error {\n\tc := r.pool.Get()\n\tdefer c.Close()\n\terr := c.Err()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn r.updateTTLConn(c, key, newSecondsLifeTime)\n}\n\n\/\/ UpdateTTLMany like `UpdateTTL` but for all keys starting with that \"prefix\",\n\/\/ it is a bit faster operation if you need to update all sessions keys (although it can be even faster if we used hash but this will limit other features),\n\/\/ look the `sessions\/Database#OnUpdateExpiration` for example.\nfunc (r *Service) UpdateTTLMany(prefix string, newSecondsLifeTime int64) error {\n\tc := r.pool.Get()\n\tdefer c.Close()\n\tif err := c.Err(); err != nil {\n\t\treturn err\n\t}\n\n\tkeys, err := r.getKeysConn(c, prefix)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, key := range keys {\n\t\tif err = r.updateTTLConn(c, key, newSecondsLifeTime); err != nil { \/\/ fail on first error.\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn err\n}\n\n\/\/ GetAll returns all redis entries using the \"SCAN\" command (2.8+).\nfunc (r *Service) GetAll() (interface{}, error) {\n\tc := r.pool.Get()\n\tdefer c.Close()\n\tif err := c.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tredisVal, err := c.Do(\"SCAN\", 0) \/\/ 0 -> cursor\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif redisVal == nil {\n\t\treturn nil, err\n\t}\n\n\treturn redisVal, nil\n}\n\nfunc (r *Service) getKeysConn(c redis.Conn, prefix string) ([]string, error) {\n\tif err := c.Send(\"SCAN\", 0, \"MATCH\", r.Config.Prefix+prefix+\"*\", \"COUNT\", 9999999999); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := c.Flush(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treply, err := c.Receive()\n\tif err != nil || reply == nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ it returns []interface, with two entries, the first one is \"0\" and the second one is a slice of the keys as []interface{uint8....}.\n\n\tif keysInterface, ok := reply.([]interface{}); ok {\n\t\tif len(keysInterface) == 2 {\n\t\t\t\/\/ take the second, it must contain the slice of keys.\n\t\t\tif keysSliceAsBytes, ok := keysInterface[1].([]interface{}); ok {\n\t\t\t\tkeys := make([]string, len(keysSliceAsBytes), len(keysSliceAsBytes))\n\t\t\t\tfor i, k := range keysSliceAsBytes {\n\t\t\t\t\tkeys[i] = fmt.Sprintf(\"%s\", k)[len(r.Config.Prefix):]\n\t\t\t\t}\n\n\t\t\t\treturn keys, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil, nil\n}\n\n\/\/ GetKeys returns all redis keys using the \"SCAN\" with MATCH command.\n\/\/ Read more at: https:\/\/redis.io\/commands\/scan#the-match-option.\nfunc (r *Service) GetKeys(prefix string) ([]string, error) {\n\tc := r.pool.Get()\n\tdefer c.Close()\n\tif err := c.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn r.getKeysConn(c, prefix)\n}\n\n\/\/ GetBytes returns value, err by its key\n\/\/ you can use utils.Deserialize((.GetBytes(\"yourkey\"),&theobject{})\n\/\/returns nil and a filled error if something wrong happens\nfunc (r *Service) GetBytes(key string) ([]byte, error) {\n\tc := r.pool.Get()\n\tdefer c.Close()\n\tif err := c.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tredisVal, err := c.Do(\"GET\", r.Config.Prefix+key)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif redisVal == nil {\n\t\treturn nil, ErrKeyNotFound.Format(key)\n\t}\n\n\treturn redis.Bytes(redisVal, err)\n}\n\n\/\/ Delete removes redis entry by specific key\nfunc (r *Service) Delete(key string) error {\n\tc := r.pool.Get()\n\tdefer c.Close()\n\n\t_, err := c.Do(\"DEL\", r.Config.Prefix+key)\n\treturn err\n}\n\nfunc dial(network string, addr string, pass string) (redis.Conn, error) {\n\tif network == \"\" {\n\t\tnetwork = DefaultRedisNetwork\n\t}\n\tif addr == \"\" {\n\t\taddr = DefaultRedisAddr\n\t}\n\tc, err := redis.Dial(network, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif pass != \"\" {\n\t\tif _, err = c.Do(\"AUTH\", pass); err != nil {\n\t\t\tc.Close()\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn c, err\n}\n\n\/\/ Connect connects to the redis, called only once\nfunc (r *Service) Connect() {\n\tc := r.Config\n\n\tif c.IdleTimeout <= 0 {\n\t\tc.IdleTimeout = DefaultRedisIdleTimeout\n\t}\n\n\tif c.Network == \"\" {\n\t\tc.Network = DefaultRedisNetwork\n\t}\n\n\tif c.Addr == \"\" {\n\t\tc.Addr = DefaultRedisAddr\n\t}\n\n\tpool := &redis.Pool{IdleTimeout: c.IdleTimeout, MaxIdle: c.MaxIdle, MaxActive: c.MaxActive}\n\tpool.TestOnBorrow = func(c redis.Conn, t time.Time) error {\n\t\t_, err := c.Do(\"PING\")\n\t\treturn err\n\t}\n\n\tif c.Database != \"\" {\n\t\tpool.Dial = func() (redis.Conn, error) {\n\t\t\tred, err := dial(c.Network, c.Addr, c.Password)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif _, err = red.Do(\"SELECT\", c.Database); err != nil {\n\t\t\t\tred.Close()\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn red, err\n\t\t}\n\t} else {\n\t\tpool.Dial = func() (redis.Conn, error) {\n\t\t\treturn dial(c.Network, c.Addr, c.Password)\n\t\t}\n\t}\n\tr.Connected = true\n\tr.pool = pool\n}\n\n\/\/ New returns a Redis service filled by the passed config\n\/\/ to connect call the .Connect().\nfunc New(cfg ...Config) *Service {\n\tc := DefaultConfig()\n\tif len(cfg) > 0 {\n\t\tc = cfg[0]\n\t}\n\tr := &Service{pool: &redis.Pool{}, Config: &c}\n\treturn r\n}\n<|endoftext|>"} {"text":"<commit_before>package sso\n\nimport (\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\/dependency\/provider\/oauth\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\/dependency\/provider\/password\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\/dependency\/sso\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\/dependency\/userprofile\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\/response\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/auth\/authinfo\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/auth\/authtoken\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/auth\/role\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/server\/skydb\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/server\/skyerr\"\n)\n\ntype respHandler struct {\n\tRoleStore role.Store\n\tTokenStore authtoken.Store\n\tAuthInfoStore authinfo.Store\n\tOAuthAuthProvider oauth.Provider\n\tPasswordAuthProvider password.Provider\n\tUserProfileStore userprofile.Store\n\tUserID string\n}\n\nfunc (h respHandler) loginActionResp(oauthAuthInfo sso.AuthInfo) (resp interface{}, err error) {\n\t\/\/ action => login\n\tvar info authinfo.AuthInfo\n\tcreateNewUser, err := h.handleLogin(oauthAuthInfo, &info)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Create or update user profile\n\tvar userProfile userprofile.UserProfile\n\t\/\/ oauthAuthInfo.ProviderUserProfile may contains attributes like \"id\",\n\t\/\/ and it is not allowed to use it in SDK.\n\t\/\/ so here we will save authData as providerUserProfile\n\tproviderUserProfile := oauthAuthInfo.ProviderAuthData\n\tif createNewUser {\n\t\tuserProfile, err = h.UserProfileStore.CreateUserProfile(info.ID, &info, providerUserProfile)\n\t} else {\n\t\tuserProfile, err = h.UserProfileStore.UpdateUserProfile(info.ID, &info, providerUserProfile)\n\t}\n\tif err != nil {\n\t\t\/\/ TODO:\n\t\t\/\/ return proper error\n\t\terr = skyerr.NewError(skyerr.UnexpectedError, \"Unable to save user profile\")\n\t\treturn\n\t}\n\n\t\/\/ Create auth token\n\tvar token authtoken.Token\n\ttoken, err = h.TokenStore.NewToken(info.ID)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif err = h.TokenStore.Put(&token); err != nil {\n\t\tpanic(err)\n\t}\n\n\tresp = response.NewAuthResponse(info, userProfile, token.AccessToken)\n\n\t\/\/ Populate the activity time to user\n\tnow := timeNow()\n\tinfo.LastSeenAt = &now\n\tif err = h.AuthInfoStore.UpdateAuth(&info); err != nil {\n\t\terr = skyerr.MakeError(err)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (h respHandler) linkActionResp(oauthAuthInfo sso.AuthInfo) (resp interface{}, err error) {\n\t\/\/ action => link\n\t\/\/ check if provider user is already linked\n\t_, err = h.OAuthAuthProvider.GetPrincipalByProviderUserID(oauthAuthInfo.ProviderName, oauthAuthInfo.ProviderUserID)\n\tif err == nil {\n\t\terr = skyerr.NewError(skyerr.InvalidArgument, \"user linked to the provider already\")\n\t\treturn resp, err\n\t}\n\n\tif err != skydb.ErrUserNotFound {\n\t\t\/\/ some other error\n\t\treturn resp, err\n\t}\n\n\t\/\/ check if user is already linked\n\t_, err = h.OAuthAuthProvider.GetPrincipalByUserID(oauthAuthInfo.ProviderName, h.UserID)\n\tif err == nil {\n\t\terr = skyerr.NewError(skyerr.InvalidArgument, \"provider account already linked with existing user\")\n\t\treturn resp, err\n\t}\n\n\tif err != skydb.ErrUserNotFound {\n\t\t\/\/ some other error\n\t\treturn resp, err\n\t}\n\n\tvar info authinfo.AuthInfo\n\tif err = h.AuthInfoStore.GetAuth(h.UserID, &info); err != nil {\n\t\terr = skyerr.NewError(skyerr.ResourceNotFound, \"user not found\")\n\t\treturn resp, err\n\t}\n\n\t_, err = h.createPrincipalByOAuthInfo(info.ID, oauthAuthInfo)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\tresp = \"OK\"\n\treturn\n}\n\nfunc (h respHandler) handleLogin(\n\toauthAuthInfo sso.AuthInfo,\n\tinfo *authinfo.AuthInfo,\n) (createNewUser bool, err error) {\n\tnow := timeNow()\n\n\tprincipal, err := h.OAuthAuthProvider.GetPrincipalByProviderUserID(oauthAuthInfo.ProviderName, oauthAuthInfo.ProviderUserID)\n\tif err != nil {\n\t\tif err != skydb.ErrUserNotFound {\n\t\t\treturn\n\t\t}\n\t\terr = nil\n\t}\n\n\tif valid := h.PasswordAuthProvider.IsAuthDataValid(oauthAuthInfo.ProviderAuthData); valid {\n\t\t\/\/ provider authData matches app's authRecordKeys,\n\t\t\/\/ then it starts auto-link procedure.\n\t\t\/\/\n\t\t\/\/ for example, if oauthAuthInfo.ProviderAuthData is {\"email\", \"john.doe@example.com\"},\n\t\t\/\/ it will be a valid authData if authRecordKeys is [[\"username\"], [\"email\"]] or [[\"email\"]]\n\t\t\/\/ so, the oauthAuthInfo.ProviderAuthDat can be used as a password principal authData\n\t\tprincipal, err = h.authLinkUser(oauthAuthInfo)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif principal == nil {\n\t\tcreateNewUser = true\n\t\t\/\/ if there is no existed user\n\t\t\/\/ signup a new user\n\t\t*info = authinfo.NewAuthInfo()\n\t\tinfo.LastLoginAt = &now\n\n\t\t\/\/ Get default roles\n\t\tdefaultRoles, e := h.RoleStore.GetDefaultRoles()\n\t\tif e != nil {\n\t\t\terr = skyerr.NewError(skyerr.InternalQueryInvalid, \"unable to query default roles\")\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Assign default roles\n\t\tinfo.Roles = defaultRoles\n\n\t\t\/\/ Create AuthInfo\n\t\tif e = h.AuthInfoStore.CreateAuth(info); e != nil {\n\t\t\tif e == skydb.ErrUserDuplicated {\n\t\t\t\terr = skyerr.NewError(skyerr.Duplicated, \"user duplicated\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/\/ TODO:\n\t\t\t\/\/ return proper error\n\t\t\terr = skyerr.NewError(skyerr.UnexpectedError, \"Unable to save auth info\")\n\t\t\treturn\n\t\t}\n\n\t\t_, err = h.createPrincipalByOAuthInfo(info.ID, oauthAuthInfo)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\terr = h.createEmptyPasswordPrincipal(info.ID, oauthAuthInfo)\n\t} else {\n\t\tprincipal.AccessTokenResp = oauthAuthInfo.ProviderAccessTokenResp\n\t\tprincipal.UserProfile = oauthAuthInfo.ProviderUserProfile\n\t\tprincipal.UpdatedAt = &now\n\n\t\tif err = h.OAuthAuthProvider.UpdatePrincipal(principal); err != nil {\n\t\t\terr = skyerr.MakeError(err)\n\t\t\treturn\n\t\t}\n\n\t\tif e := h.AuthInfoStore.GetAuth(principal.UserID, info); e != nil {\n\t\t\tif err == skydb.ErrUserNotFound {\n\t\t\t\terr = skyerr.NewError(skyerr.ResourceNotFound, \"User not found\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr = skyerr.NewError(skyerr.ResourceNotFound, \"User not found\")\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc (h respHandler) authLinkUser(oauthAuthInfo sso.AuthInfo) (*oauth.Principal, error) {\n\tprincipal := password.Principal{}\n\te := h.PasswordAuthProvider.GetPrincipalByAuthData(oauthAuthInfo.ProviderAuthData, &principal)\n\tif e == nil {\n\t\tuserID := principal.UserID\n\t\t\/\/ link user\n\t\tprincipal, err := h.createPrincipalByOAuthInfo(userID, oauthAuthInfo)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &principal, nil\n\t}\n\n\treturn nil, nil\n}\n\nfunc (h respHandler) createPrincipalByOAuthInfo(userID string, oauthAuthInfo sso.AuthInfo) (oauth.Principal, error) {\n\tnow := timeNow()\n\tprincipal := oauth.NewPrincipal()\n\tprincipal.UserID = userID\n\tprincipal.ProviderName = oauthAuthInfo.ProviderName\n\tprincipal.ProviderUserID = oauthAuthInfo.ProviderUserID\n\tprincipal.AccessTokenResp = oauthAuthInfo.ProviderAccessTokenResp\n\tprincipal.UserProfile = oauthAuthInfo.ProviderUserProfile\n\tprincipal.CreatedAt = &now\n\tprincipal.UpdatedAt = &now\n\terr := h.OAuthAuthProvider.CreatePrincipal(principal)\n\treturn principal, err\n}\n\nfunc (h respHandler) createEmptyPasswordPrincipal(userID string, oauthAuthInfo sso.AuthInfo) error {\n\tif valid := h.PasswordAuthProvider.IsAuthDataValid(oauthAuthInfo.ProviderAuthData); valid {\n\t\t\/\/ if ProviderAuthData mastches authRecordKeys, and it can't be link with current account,\n\t\t\/\/ we also creates an empty password principal for later the user can set password to it\n\t\treturn h.PasswordAuthProvider.CreatePrincipalsByAuthData(userID, \"\", oauthAuthInfo.ProviderAuthData)\n\t}\n\n\treturn nil\n}\n<commit_msg>Should link user only when oauth principal can't be found<commit_after>package sso\n\nimport (\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\/dependency\/provider\/oauth\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\/dependency\/provider\/password\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\/dependency\/sso\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\/dependency\/userprofile\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\/response\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/auth\/authinfo\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/auth\/authtoken\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/auth\/role\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/server\/skydb\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/server\/skyerr\"\n)\n\ntype respHandler struct {\n\tRoleStore role.Store\n\tTokenStore authtoken.Store\n\tAuthInfoStore authinfo.Store\n\tOAuthAuthProvider oauth.Provider\n\tPasswordAuthProvider password.Provider\n\tUserProfileStore userprofile.Store\n\tUserID string\n}\n\nfunc (h respHandler) loginActionResp(oauthAuthInfo sso.AuthInfo) (resp interface{}, err error) {\n\t\/\/ action => login\n\tvar info authinfo.AuthInfo\n\tcreateNewUser, err := h.handleLogin(oauthAuthInfo, &info)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Create or update user profile\n\tvar userProfile userprofile.UserProfile\n\t\/\/ oauthAuthInfo.ProviderUserProfile may contains attributes like \"id\",\n\t\/\/ and it is not allowed to use it in SDK.\n\t\/\/ so here we will save authData as providerUserProfile\n\tproviderUserProfile := oauthAuthInfo.ProviderAuthData\n\tif createNewUser {\n\t\tuserProfile, err = h.UserProfileStore.CreateUserProfile(info.ID, &info, providerUserProfile)\n\t} else {\n\t\tuserProfile, err = h.UserProfileStore.UpdateUserProfile(info.ID, &info, providerUserProfile)\n\t}\n\tif err != nil {\n\t\t\/\/ TODO:\n\t\t\/\/ return proper error\n\t\terr = skyerr.NewError(skyerr.UnexpectedError, \"Unable to save user profile\")\n\t\treturn\n\t}\n\n\t\/\/ Create auth token\n\tvar token authtoken.Token\n\ttoken, err = h.TokenStore.NewToken(info.ID)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif err = h.TokenStore.Put(&token); err != nil {\n\t\tpanic(err)\n\t}\n\n\tresp = response.NewAuthResponse(info, userProfile, token.AccessToken)\n\n\t\/\/ Populate the activity time to user\n\tnow := timeNow()\n\tinfo.LastSeenAt = &now\n\tif err = h.AuthInfoStore.UpdateAuth(&info); err != nil {\n\t\terr = skyerr.MakeError(err)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc (h respHandler) linkActionResp(oauthAuthInfo sso.AuthInfo) (resp interface{}, err error) {\n\t\/\/ action => link\n\t\/\/ check if provider user is already linked\n\t_, err = h.OAuthAuthProvider.GetPrincipalByProviderUserID(oauthAuthInfo.ProviderName, oauthAuthInfo.ProviderUserID)\n\tif err == nil {\n\t\terr = skyerr.NewError(skyerr.InvalidArgument, \"user linked to the provider already\")\n\t\treturn resp, err\n\t}\n\n\tif err != skydb.ErrUserNotFound {\n\t\t\/\/ some other error\n\t\treturn resp, err\n\t}\n\n\t\/\/ check if user is already linked\n\t_, err = h.OAuthAuthProvider.GetPrincipalByUserID(oauthAuthInfo.ProviderName, h.UserID)\n\tif err == nil {\n\t\terr = skyerr.NewError(skyerr.InvalidArgument, \"provider account already linked with existing user\")\n\t\treturn resp, err\n\t}\n\n\tif err != skydb.ErrUserNotFound {\n\t\t\/\/ some other error\n\t\treturn resp, err\n\t}\n\n\tvar info authinfo.AuthInfo\n\tif err = h.AuthInfoStore.GetAuth(h.UserID, &info); err != nil {\n\t\terr = skyerr.NewError(skyerr.ResourceNotFound, \"user not found\")\n\t\treturn resp, err\n\t}\n\n\t_, err = h.createPrincipalByOAuthInfo(info.ID, oauthAuthInfo)\n\tif err != nil {\n\t\treturn resp, err\n\t}\n\tresp = \"OK\"\n\treturn\n}\n\nfunc (h respHandler) handleLogin(\n\toauthAuthInfo sso.AuthInfo,\n\tinfo *authinfo.AuthInfo,\n) (createNewUser bool, err error) {\n\tprincipal, err := h.findPrincipal(oauthAuthInfo)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tnow := timeNow()\n\tif principal == nil {\n\t\tcreateNewUser = true\n\t\t\/\/ if there is no existed user\n\t\t\/\/ signup a new user\n\t\t*info = authinfo.NewAuthInfo()\n\t\tinfo.LastLoginAt = &now\n\n\t\t\/\/ Get default roles\n\t\tdefaultRoles, e := h.RoleStore.GetDefaultRoles()\n\t\tif e != nil {\n\t\t\terr = skyerr.NewError(skyerr.InternalQueryInvalid, \"unable to query default roles\")\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Assign default roles\n\t\tinfo.Roles = defaultRoles\n\n\t\t\/\/ Create AuthInfo\n\t\tif e = h.AuthInfoStore.CreateAuth(info); e != nil {\n\t\t\tif e == skydb.ErrUserDuplicated {\n\t\t\t\terr = skyerr.NewError(skyerr.Duplicated, \"user duplicated\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\t\/\/ TODO:\n\t\t\t\/\/ return proper error\n\t\t\terr = skyerr.NewError(skyerr.UnexpectedError, \"Unable to save auth info\")\n\t\t\treturn\n\t\t}\n\n\t\t_, err = h.createPrincipalByOAuthInfo(info.ID, oauthAuthInfo)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\terr = h.createEmptyPasswordPrincipal(info.ID, oauthAuthInfo)\n\t} else {\n\t\tprincipal.AccessTokenResp = oauthAuthInfo.ProviderAccessTokenResp\n\t\tprincipal.UserProfile = oauthAuthInfo.ProviderUserProfile\n\t\tprincipal.UpdatedAt = &now\n\n\t\tif err = h.OAuthAuthProvider.UpdatePrincipal(principal); err != nil {\n\t\t\terr = skyerr.MakeError(err)\n\t\t\treturn\n\t\t}\n\n\t\tif e := h.AuthInfoStore.GetAuth(principal.UserID, info); e != nil {\n\t\t\tif err == skydb.ErrUserNotFound {\n\t\t\t\terr = skyerr.NewError(skyerr.ResourceNotFound, \"User not found\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr = skyerr.NewError(skyerr.ResourceNotFound, \"User not found\")\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc (h respHandler) findPrincipal(oauthAuthInfo sso.AuthInfo) (*oauth.Principal, error) {\n\t\/\/ find oauth principal from principal_oauth\n\tprincipal, err := h.OAuthAuthProvider.GetPrincipalByProviderUserID(oauthAuthInfo.ProviderName, oauthAuthInfo.ProviderUserID)\n\tif err != nil {\n\t\tif err != skydb.ErrUserNotFound {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\treturn principal, nil\n\t}\n\n\t\/\/ if oauth principal doesn't exist, try to link existed password principal\n\tif valid := h.PasswordAuthProvider.IsAuthDataValid(oauthAuthInfo.ProviderAuthData); valid {\n\t\t\/\/ provider authData matches app's authRecordKeys,\n\t\t\/\/ then it starts auto-link procedure.\n\t\t\/\/\n\t\t\/\/ for example, if oauthAuthInfo.ProviderAuthData is {\"email\", \"john.doe@example.com\"},\n\t\t\/\/ it will be a valid authData if authRecordKeys is [[\"username\"], [\"email\"]] or [[\"email\"]]\n\t\t\/\/ so, the oauthAuthInfo.ProviderAuthDat can be used as a password principal authData\n\t\treturn h.authLinkUser(oauthAuthInfo)\n\t}\n\n\treturn nil, nil\n}\n\nfunc (h respHandler) authLinkUser(oauthAuthInfo sso.AuthInfo) (*oauth.Principal, error) {\n\tpasswordPrincipal := password.Principal{}\n\te := h.PasswordAuthProvider.GetPrincipalByAuthData(oauthAuthInfo.ProviderAuthData, &passwordPrincipal)\n\tif e == nil {\n\t\tuserID := passwordPrincipal.UserID\n\t\t\/\/ link password principal to oauth principal\n\t\toauthPrincipal, err := h.createPrincipalByOAuthInfo(userID, oauthAuthInfo)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn &oauthPrincipal, nil\n\t}\n\n\treturn nil, nil\n}\n\nfunc (h respHandler) createPrincipalByOAuthInfo(userID string, oauthAuthInfo sso.AuthInfo) (oauth.Principal, error) {\n\tnow := timeNow()\n\tprincipal := oauth.NewPrincipal()\n\tprincipal.UserID = userID\n\tprincipal.ProviderName = oauthAuthInfo.ProviderName\n\tprincipal.ProviderUserID = oauthAuthInfo.ProviderUserID\n\tprincipal.AccessTokenResp = oauthAuthInfo.ProviderAccessTokenResp\n\tprincipal.UserProfile = oauthAuthInfo.ProviderUserProfile\n\tprincipal.CreatedAt = &now\n\tprincipal.UpdatedAt = &now\n\terr := h.OAuthAuthProvider.CreatePrincipal(principal)\n\treturn principal, err\n}\n\nfunc (h respHandler) createEmptyPasswordPrincipal(userID string, oauthAuthInfo sso.AuthInfo) error {\n\tif valid := h.PasswordAuthProvider.IsAuthDataValid(oauthAuthInfo.ProviderAuthData); valid {\n\t\t\/\/ if ProviderAuthData mastches authRecordKeys, and it can't be link with current account,\n\t\t\/\/ we also creates an empty password principal for later the user can set password to it\n\t\treturn h.PasswordAuthProvider.CreatePrincipalsByAuthData(userID, \"\", oauthAuthInfo.ProviderAuthData)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package requests\n\nimport (\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/rancher\/rancher\/pkg\/auth\/audit\"\n\t\"github.com\/rancher\/rancher\/pkg\/auth\/providers\"\n\t\"github.com\/rancher\/rancher\/pkg\/auth\/util\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"k8s.io\/apiserver\/pkg\/endpoints\/request\"\n)\n\nfunc NewAuthenticatedFilter(next http.Handler) http.Handler {\n\treturn &authHeaderHandler{\n\t\tnext: next,\n\t}\n}\n\ntype authHeaderHandler struct {\n\tnext http.Handler\n}\n\nfunc (h authHeaderHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\tuserInfo, authed := request.UserFrom(req.Context())\n\t\/\/ checking for system:cattle:error user keeps the old behavior of always returning 401 when authentication fails\n\tif !authed || userInfo.GetName() == \"system:cattle:error\" {\n\t\tutil.ReturnHTTPError(rw, req, 401, ErrMustAuthenticate.Error())\n\t\treturn\n\t}\n\n\t\/\/clean extra that is not part of userInfo\n\tfor header := range req.Header {\n\t\tif strings.HasPrefix(header, \"Impersonate-Extra-\") {\n\t\t\tkey := strings.TrimPrefix(header, \"Impersonate-Extra-\")\n\t\t\tif !providers.IsValidUserExtraAttribute(key) {\n\t\t\t\treq.Header.Del(header)\n\t\t\t}\n\t\t}\n\t}\n\n\treq.Header.Set(\"Impersonate-User\", userInfo.GetName())\n\treq.Header.Del(\"Impersonate-Group\")\n\tfor _, group := range userInfo.GetGroups() {\n\t\treq.Header.Add(\"Impersonate-Group\", group)\n\t}\n\n\tfor key, extras := range userInfo.GetExtra() {\n\t\tfor _, s := range extras {\n\t\t\treq.Header.Add(\"Impersonate-Extra-\"+key, s)\n\t\t}\n\t}\n\n\tlogrus.Tracef(\"Rancher Auth Filter ##headers %v: \", req.Header)\n\n\tauditUser, ok := audit.FromContext(req.Context())\n\tif ok {\n\t\tauditUser.Name = userInfo.GetName()\n\t\tauditUser.Group = userInfo.GetGroups()\n\t}\n\n\th.next.ServeHTTP(rw, req)\n}\n\nfunc NewRequireAuthenticatedFilter(pathPrefix string, ignorePrefix ...string) func(next http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn &authedFilter{\n\t\t\tnext: next,\n\t\t\tpathPrefix: pathPrefix,\n\t\t\tignorePrefix: ignorePrefix,\n\t\t}\n\t}\n}\n\ntype authedFilter struct {\n\tnext http.Handler\n\tpathPrefix string\n\tignorePrefix []string\n}\n\nfunc (h authedFilter) matches(path string) bool {\n\tif strings.HasPrefix(path, h.pathPrefix) {\n\t\tfor _, prefix := range h.ignorePrefix {\n\t\t\tif strings.HasPrefix(path, prefix) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (h authedFilter) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\tif h.matches(req.URL.Path) {\n\t\tuserInfo, authed := request.UserFrom(req.Context())\n\t\t\/\/ checking for system:cattle:error user keeps the old behavior of always returning 401 when authentication fails\n\t\tif !authed || userInfo.GetName() == \"system:cattle:error\" {\n\t\t\tutil.ReturnHTTPError(rw, req, 401, ErrMustAuthenticate.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\th.next.ServeHTTP(rw, req)\n}\n<commit_msg>Do not add empty Impersonate-Extra header<commit_after>package requests\n\nimport (\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/rancher\/rancher\/pkg\/auth\/audit\"\n\t\"github.com\/rancher\/rancher\/pkg\/auth\/providers\"\n\t\"github.com\/rancher\/rancher\/pkg\/auth\/util\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"k8s.io\/apiserver\/pkg\/endpoints\/request\"\n)\n\nfunc NewAuthenticatedFilter(next http.Handler) http.Handler {\n\treturn &authHeaderHandler{\n\t\tnext: next,\n\t}\n}\n\ntype authHeaderHandler struct {\n\tnext http.Handler\n}\n\nfunc (h authHeaderHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\tuserInfo, authed := request.UserFrom(req.Context())\n\t\/\/ checking for system:cattle:error user keeps the old behavior of always returning 401 when authentication fails\n\tif !authed || userInfo.GetName() == \"system:cattle:error\" {\n\t\tutil.ReturnHTTPError(rw, req, 401, ErrMustAuthenticate.Error())\n\t\treturn\n\t}\n\n\t\/\/clean extra that is not part of userInfo\n\tfor header := range req.Header {\n\t\tif strings.HasPrefix(header, \"Impersonate-Extra-\") {\n\t\t\tkey := strings.TrimPrefix(header, \"Impersonate-Extra-\")\n\t\t\tif !providers.IsValidUserExtraAttribute(key) {\n\t\t\t\treq.Header.Del(header)\n\t\t\t}\n\t\t}\n\t}\n\n\treq.Header.Set(\"Impersonate-User\", userInfo.GetName())\n\treq.Header.Del(\"Impersonate-Group\")\n\tfor _, group := range userInfo.GetGroups() {\n\t\treq.Header.Add(\"Impersonate-Group\", group)\n\t}\n\n\tfor key, extras := range userInfo.GetExtra() {\n\t\tfor _, s := range extras {\n\t\t\tif s != \"\" {\n\t\t\t\treq.Header.Add(\"Impersonate-Extra-\"+key, s)\n\t\t\t}\n\t\t}\n\t}\n\n\tlogrus.Tracef(\"Rancher Auth Filter ##headers %v: \", req.Header)\n\n\tauditUser, ok := audit.FromContext(req.Context())\n\tif ok {\n\t\tauditUser.Name = userInfo.GetName()\n\t\tauditUser.Group = userInfo.GetGroups()\n\t}\n\n\th.next.ServeHTTP(rw, req)\n}\n\nfunc NewRequireAuthenticatedFilter(pathPrefix string, ignorePrefix ...string) func(next http.Handler) http.Handler {\n\treturn func(next http.Handler) http.Handler {\n\t\treturn &authedFilter{\n\t\t\tnext: next,\n\t\t\tpathPrefix: pathPrefix,\n\t\t\tignorePrefix: ignorePrefix,\n\t\t}\n\t}\n}\n\ntype authedFilter struct {\n\tnext http.Handler\n\tpathPrefix string\n\tignorePrefix []string\n}\n\nfunc (h authedFilter) matches(path string) bool {\n\tif strings.HasPrefix(path, h.pathPrefix) {\n\t\tfor _, prefix := range h.ignorePrefix {\n\t\t\tif strings.HasPrefix(path, prefix) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (h authedFilter) ServeHTTP(rw http.ResponseWriter, req *http.Request) {\n\tif h.matches(req.URL.Path) {\n\t\tuserInfo, authed := request.UserFrom(req.Context())\n\t\t\/\/ checking for system:cattle:error user keeps the old behavior of always returning 401 when authentication fails\n\t\tif !authed || userInfo.GetName() == \"system:cattle:error\" {\n\t\t\tutil.ReturnHTTPError(rw, req, 401, ErrMustAuthenticate.Error())\n\t\t\treturn\n\t\t}\n\t}\n\n\th.next.ServeHTTP(rw, req)\n}\n<|endoftext|>"} {"text":"<commit_before>package scene\n\nimport (\n\t\"github.com\/pankona\/gomo-simra\/simra\"\n\n\t\"golang.org\/x\/mobile\/asset\"\n)\n\ntype sound struct {\n\tgame *game\n}\n\n\/*\natk_fire.mp3\natk_hammer.mp3\natk_sword.mp3\n\/\/bgm1.mp3\n\/\/bgm2.mp3\n\/\/bgm3.mp3\nenemy_atk.mp3\n\/\/enemy_dead.mp3\n\/\/enemy_spawn.mp3\n\/\/player_dead.mp3\n\/\/recall.mp3\n\/\/select.mp3\n\/\/start_game.mp3\n\/\/summoning.mp3\n*\/\n\nfunc (s *sound) play(assetName string) {\n\ta := simra.NewAudio()\n\tresource, err := asset.Open(assetName)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\ta.Play(resource, false, func(err error) {})\n}\n\nfunc (s *sound) OnEvent(i interface{}) {\n\tc, ok := i.(*command)\n\tif !ok {\n\t\tpanic(\"unexpected command received. fatal.\")\n\t}\n\tswitch c.commandtype {\n\tcase commandSpawn:\n\t\td := c.data.(uniter)\n\t\tif d.IsAlly() {\n\t\t\ts.play(\"summoning.mp3\")\n\t\t} else {\n\t\t\ts.play(\"enemy_spawn.mp3\")\n\t\t}\n\n\tcase commandDead:\n\t\td := c.data.(uniter)\n\t\tif d.IsAlly() {\n\t\t\ts.play(\"player_dead.mp3\")\n\t\t} else {\n\t\t\ts.play(\"enemy_dead.mp3\")\n\t\t}\n\n\tcase commandRecall:\n\t\ts.play(\"summoning.mp3\")\n\n\tcase commandRecalled:\n\t\ts.play(\"recall.mp3\")\n\n\tcase commandUpdateSelection:\n\t\ts.play(\"select.mp3\")\n\n\tcase commandAttack:\n\t\td := c.data.(uniter)\n\t\tswitch d.GetUnitType() {\n\t\tcase \"player1\":\n\t\t\ts.play(\"atk_sword.mp3\")\n\t\tcase \"player2\":\n\t\t\ts.play(\"atk_hammer.mp3\")\n\t\tcase \"player3\":\n\t\t\ts.play(\"atk_fire.mp3\")\n\t\tcase \"enemy1\":\n\t\t\ts.play(\"enemy_atk.mp3\")\n\t\tcase \"enemy2\":\n\t\t\ts.play(\"enemy_atk.mp3\")\n\t\t}\n\t}\n}\n<commit_msg>refactoring. remove unnecessary comment out.<commit_after>package scene\n\nimport (\n\t\"github.com\/pankona\/gomo-simra\/simra\"\n\n\t\"golang.org\/x\/mobile\/asset\"\n)\n\ntype sound struct {\n\tgame *game\n}\n\nfunc (s *sound) play(assetName string) {\n\ta := simra.NewAudio()\n\tresource, err := asset.Open(assetName)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\ta.Play(resource, false, func(err error) {})\n}\n\nfunc (s *sound) OnEvent(i interface{}) {\n\tc, ok := i.(*command)\n\tif !ok {\n\t\tpanic(\"unexpected command received. fatal.\")\n\t}\n\tswitch c.commandtype {\n\tcase commandSpawn:\n\t\td := c.data.(uniter)\n\t\tif d.IsAlly() {\n\t\t\ts.play(\"summoning.mp3\")\n\t\t} else {\n\t\t\ts.play(\"enemy_spawn.mp3\")\n\t\t}\n\n\tcase commandDead:\n\t\td := c.data.(uniter)\n\t\tif d.IsAlly() {\n\t\t\ts.play(\"player_dead.mp3\")\n\t\t} else {\n\t\t\ts.play(\"enemy_dead.mp3\")\n\t\t}\n\n\tcase commandRecall:\n\t\ts.play(\"summoning.mp3\")\n\n\tcase commandRecalled:\n\t\ts.play(\"recall.mp3\")\n\n\tcase commandUpdateSelection:\n\t\ts.play(\"select.mp3\")\n\n\tcase commandAttack:\n\t\td := c.data.(uniter)\n\t\tswitch d.GetUnitType() {\n\t\tcase \"player1\":\n\t\t\ts.play(\"atk_sword.mp3\")\n\t\tcase \"player2\":\n\t\t\ts.play(\"atk_hammer.mp3\")\n\t\tcase \"player3\":\n\t\t\ts.play(\"atk_fire.mp3\")\n\t\tcase \"enemy1\":\n\t\t\ts.play(\"enemy_atk.mp3\")\n\t\tcase \"enemy2\":\n\t\t\ts.play(\"enemy_atk.mp3\")\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package scram\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n)\n\nfunc ExtractProof(mess []byte) ([]byte, error) {\n\tvar b64proof []byte\n\terr := eachToken(mess, ',', func(token []byte) error {\n\t\tk, v := extractKeyValue(token, '=')\n\n\t\tif k[0] == 'p' {\n\t\t\tif len(b64proof) != 0 {\n\t\t\t\treturn WrongClientMessage(\"More then one proof provided\")\n\t\t\t}\n\t\t\tb64proof = v\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\tif len(b64proof) == 0 {\n\t\treturn []byte{}, WrongClientMessage(\"Proof not found\")\n\t}\n\n\tproof := make([]byte, base64.StdEncoding.DecodedLen(len(b64proof)))\n\tif _, err := base64.StdEncoding.Decode(proof, b64proof); err != nil {\n\t\treturn []byte{}, nil\n\t}\n\n\treturn proof, nil\n}\n\nfunc eachToken(mess []byte, sep byte, predicate func(token []byte) error) error {\n\tfor _, token := range bytes.Split(mess, []byte{sep}) {\n\t\tif err := predicate(token); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc validateMessage(mess []byte) error {\n\tif mess[0] != 'y' && mess[0] != 'n' && mess[0] != 'p' {\n\t\treturn WrongClientMessage(\"Wrong start byte\")\n\t}\n\treturn nil\n}\n\nfunc extractKeyValue(token []byte, sep byte) ([]byte, []byte) {\n\tkv := bytes.SplitN(token, []byte{sep}, 2)\n\treturn kv[0], kv[1]\n}\n\nfunc saslDePrep(username []byte) string {\n\t\/\/ TODO implement real logic\n\treturn string(username)\n}\n\nfunc saslPrepare(username string) string {\n\t\/\/panic(\"Not implemented\")\n\treturn username\n}\n\nfunc byteXOR(left, right []byte) []byte {\n\tres := make([]byte, len(left))\n\tfor i := range left {\n\t\tres[i] = left[i] ^ right[i]\n\t}\n\treturn res\n}\n<commit_msg>Added comment for ExtractProof method<commit_after>package scram\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n)\n\n\/\/ Extracts proof from Server First message and Base64 decodes it.\n\/\/ Doens't do any checks except checks that there is at least one\n\/\/ proof, not more than one proof and proof is Base64-encoded\nfunc ExtractProof(mess []byte) ([]byte, error) {\n\tvar b64proof []byte\n\terr := eachToken(mess, ',', func(token []byte) error {\n\t\tk, v := extractKeyValue(token, '=')\n\n\t\tif k[0] == 'p' {\n\t\t\tif len(b64proof) != 0 {\n\t\t\t\treturn WrongClientMessage(\"More then one proof provided\")\n\t\t\t}\n\t\t\tb64proof = v\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\tif len(b64proof) == 0 {\n\t\treturn []byte{}, WrongClientMessage(\"Proof not found\")\n\t}\n\n\tproof := make([]byte, base64.StdEncoding.DecodedLen(len(b64proof)))\n\tif _, err := base64.StdEncoding.Decode(proof, b64proof); err != nil {\n\t\treturn []byte{}, nil\n\t}\n\n\treturn proof, nil\n}\n\nfunc eachToken(mess []byte, sep byte, predicate func(token []byte) error) error {\n\tfor _, token := range bytes.Split(mess, []byte{sep}) {\n\t\tif err := predicate(token); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc validateMessage(mess []byte) error {\n\tif mess[0] != 'y' && mess[0] != 'n' && mess[0] != 'p' {\n\t\treturn WrongClientMessage(\"Wrong start byte\")\n\t}\n\treturn nil\n}\n\nfunc extractKeyValue(token []byte, sep byte) ([]byte, []byte) {\n\tkv := bytes.SplitN(token, []byte{sep}, 2)\n\treturn kv[0], kv[1]\n}\n\nfunc saslDePrep(username []byte) string {\n\t\/\/ TODO implement real logic\n\treturn string(username)\n}\n\nfunc saslPrepare(username string) string {\n\t\/\/panic(\"Not implemented\")\n\treturn username\n}\n\nfunc byteXOR(left, right []byte) []byte {\n\tres := make([]byte, len(left))\n\tfor i := range left {\n\t\tres[i] = left[i] ^ right[i]\n\t}\n\treturn res\n}\n<|endoftext|>"} {"text":"<commit_before>package core\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"time\"\n\n\tinfluxdb \"github.com\/influxdb\/influxdb\/client\"\n\n\t\"..\/utils\"\n)\n\n\/*\n\tContainer's stats\n*\/\ntype Stat struct {\n\tContainerID string\n\tTime time.Time\n\tSizeRootFs uint64\n\tSizeRw uint64\n\tSizeMemory uint64\n\tNetBandwithRX uint64\n\tNetBandwithTX uint64\n\tCPUUsage uint64\n\tRunning bool\n}\n\n\/*\n\tHTTP GET options\n*\/\ntype Options struct {\n\tSince int\n\tBefore int\n\tLimit int\n}\n\nconst (\n\tStatsMeasurements = \"cstats\"\n)\n\n\/*\n\tClient variables\n*\/\nvar (\n\t\/\/ DB\n\tDB *influxdb.Client\n)\n\n\/*\n\tInitialize InfluxDB connection\n*\/\nfunc InitDB() {\n\tvar err error\n\n\t\/\/ Parse InfluxDB server URL\n\tu, err := url.Parse(fmt.Sprintf(\"http:\/\/%s:%d\", DGConfig.DockerGuard.InfluxDB.IP, DGConfig.DockerGuard.InfluxDB.Port))\n\tif err != nil {\n\t\tl.Critical(\"Can't parse InfluxDB config :\", err)\n\t}\n\n\t\/\/ Make InfluxDB config\n\tconf := influxdb.Config{\n\t\tURL: *u,\n\t\tUsername: os.Getenv(\"INFLUX_USER\"),\n\t\tPassword: os.Getenv(\"INFLUX_PWD\"),\n\t}\n\n\t\/\/ Connect to InfluxDB server\n\tDB, err = influxdb.NewClient(conf)\n\tif err != nil {\n\t\tl.Critical(\"Can't connect to InfluxDB:\", err)\n\t}\n\n\t\/\/ Test InfluxDB server connectivity\n\tdur, ver, err := DB.Ping()\n\tif err != nil {\n\t\tl.Critical(\"Can't ping InfluxDB:\", err)\n\t}\n\tl.Verbose(\"Connected to InfluxDB! ping:\", dur, \"\/ version:\", ver)\n\n\t\/\/ Create DB if doesn't exist\n\t_, err = queryDB(DB, \"create database \"+DGConfig.DockerGuard.InfluxDB.DB)\n\tif err != nil {\n\t\tif err.Error() != \"database already exists\" {\n\t\t\tl.Critical(\"Create DB:\", err)\n\t\t}\n\t}\n}\n\n\/*\n\tSend a query to InfluxDB server\n*\/\nfunc queryDB(con *influxdb.Client, cmd string) (res []influxdb.Result, err error) {\n\tq := influxdb.Query{\n\t\tCommand: cmd,\n\t\tDatabase: DGConfig.DockerGuard.InfluxDB.DB,\n\t}\n\tif response, err := con.Query(q); err == nil {\n\t\tif response.Error() != nil {\n\t\t\treturn res, response.Error()\n\t\t}\n\t\tres = response.Results\n\t}\n\treturn\n}\n\n\/*\n\tParse Options\n*\/\nfunc GetOptions(r *http.Request) Options {\n\tvar options Options \/\/ Returned options\n\tvar err error \/\/ Error handling\n\n\t\/\/ Get url parameters\n\toS := r.URL.Query().Get(\"since\")\n\toB := r.URL.Query().Get(\"before\")\n\toL := r.URL.Query().Get(\"limit\")\n\n\t\/\/ Format parameters to int and set options\n\toSInt, err := utils.S2I(oS)\n\tif err != nil {\n\t\toptions.Since = -1\n\t} else {\n\t\toptions.Since = oSInt\n\t}\n\toBInt, err := utils.S2I(oB)\n\tif err != nil {\n\t\toptions.Before = -1\n\t} else {\n\t\toptions.Before = oBInt\n\t}\n\toLInt, err := utils.S2I(oL)\n\tif err != nil {\n\t\toptions.Limit = -1\n\t} else {\n\t\toptions.Limit = oLInt\n\t}\n\n\treturn options\n}\n\n\/*\n\tInsert a stat\n*\/\nfunc (s *Stat) Insert() error {\n\tvar pts = make([]influxdb.Point, 1) \/\/ InfluxDB point\n\tvar err error \/\/ Error handling\n\n\tl.Silly(\"Insert stat:\", s)\n\t\/\/ Make InfluxDB point\n\tpts[0] = influxdb.Point{\n\t\tMeasurement: StatsMeasurements,\n\t\tTags: map[string]string{\n\t\t\t\"containerid\": s.ContainerID,\n\t\t},\n\t\tFields: map[string]interface{}{\n\t\t\t\"sizerootfs\": s.SizeRootFs,\n\t\t\t\"sizerw\": s.SizeRw,\n\t\t\t\"sizememory\": s.SizeMemory,\n\t\t\t\"netbandwithrx\": s.NetBandwithRX,\n\t\t\t\"netbandwithtx\": s.NetBandwithTX,\n\t\t\t\"cpuusage\": s.CPUUsage,\n\t\t\t\"running\": s.Running,\n\t\t},\n\t\tTime: time.Now(),\n\t\tPrecision: \"s\",\n\t}\n\n\t\/\/ InfluxDB batch points\n\tbps := influxdb.BatchPoints{\n\t\tPoints: pts,\n\t\tDatabase: DGConfig.DockerGuard.InfluxDB.DB,\n\t\tRetentionPolicy: \"default\",\n\t}\n\n\t\/\/ Write point in InfluxDB server\n\ttimer := time.Now()\n\t_, err = DB.Write(bps)\n\tif err != nil {\n\t\tl.Error(\"Failed to write in InfluxDB:\", bps, \". Error:\", err)\n\t} else {\n\t\tl.Silly(\"Stat inserted in \", time.Since(timer), \":\", bps)\n\t}\n\n\treturn err\n}\n\n\/*\n\tInsert some stats\n*\/\nfunc InsertStats(stats []Stat) error {\n\tif len(stats) < 1 {\n\t\treturn errors.New(\"len(stats) < 1\")\n\t}\n\n\tvar pts = make([]influxdb.Point, len(stats)) \/\/ InfluxDB point\n\tvar err error \/\/ Error handling\n\n\tl.Silly(\"Insert stats:\", stats)\n\t\/\/ Make InfluxDB points\n\tfor i := 0; i < len(stats); i++ {\n\t\tpts[i] = influxdb.Point{\n\t\t\tMeasurement: StatsMeasurements,\n\t\t\tTags: map[string]string{\n\t\t\t\t\"containerid\": stats[i].ContainerID,\n\t\t\t},\n\t\t\tFields: map[string]interface{}{\n\t\t\t\t\"sizerootfs\": stats[i].SizeRootFs,\n\t\t\t\t\"sizerw\": stats[i].SizeRw,\n\t\t\t\t\"sizememory\": stats[i].SizeMemory,\n\t\t\t\t\"netbandwithrx\": stats[i].NetBandwithRX,\n\t\t\t\t\"netbandwithtx\": stats[i].NetBandwithTX,\n\t\t\t\t\"cpuusage\": stats[i].CPUUsage,\n\t\t\t\t\"running\": stats[i].Running,\n\t\t\t},\n\t\t\tTime: time.Now(),\n\t\t\tPrecision: \"s\",\n\t\t}\n\t}\n\n\t\/\/ InfluxDB batch points\n\tbps := influxdb.BatchPoints{\n\t\tPoints: pts,\n\t\tDatabase: DGConfig.DockerGuard.InfluxDB.DB,\n\t\tRetentionPolicy: \"default\",\n\t}\n\n\t\/\/ Write points in InfluxDB server\n\ttimer := time.Now()\n\t_, err = DB.Write(bps)\n\tif err != nil {\n\t\tl.Error(\"Failed to write in InfluxDB:\", bps, \". Error:\", err)\n\t} else {\n\t\tl.Silly(\"Stat inserted in \", time.Since(timer), \":\", bps)\n\t}\n\n\treturn err\n}\n\n\/*\n\tGet container's last stat\n*\/\nfunc (c *Container) GetLastStat() (Stat, error) {\n\tvar stat Stat \/\/ Returned stat\n\tvar err error \/\/ Error handling\n\n\tquery := `\tSELECT \tlast(cpuusage),\n\t\t\t\t\t\tlast(netbandwithrx),\n\t\t\t\t\t\tlast(netbandwithtx),\n\t\t\t\t\t\tlast(running),\n\t\t\t\t\t\tlast(sizememory),\n\t\t\t\t\t\tlast(sizerootfs),\n\t\t\t\t\t\tlast(sizerw) \n\t\t\t\tFROM cstats\n\t\t\t\tWHERE containerid = '` + c.CID + `'`\n\n\t\/\/ Send query\n\tres, err := queryDB(DB, query)\n\tif err != nil {\n\t\treturn stat, err\n\t}\n\n\t\/\/ Get results\n\tfor _, row := range res[0].Series[0].Values {\n\t\tvar statValues [8]int64\n\t\tif len(row) != 8 {\n\t\t\treturn stat, errors.New(fmt.Sprintf(\"GetLastStat: Wrong stat length: %d != 8\", len(row)))\n\t\t}\n\t\tfor i := 1; i <= 7; i++ {\n\t\t\tif i == 4 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstatValues[i], err = row[i].(json.Number).Int64()\n\t\t\tif err != nil {\n\t\t\t\treturn stat, errors.New(\"GetLastStat: Can't parse value: \" + row[i].(string))\n\t\t\t}\n\t\t}\n\n\t\tstat.ContainerID = c.CID\n\t\tstat.CPUUsage = uint64(statValues[1])\n\t\tstat.NetBandwithRX = uint64(statValues[2])\n\t\tstat.NetBandwithTX = uint64(statValues[3])\n\t\tstat.Running = row[4].(bool)\n\t\tstat.SizeMemory = uint64(statValues[5])\n\t\tstat.SizeRootFs = uint64(statValues[6])\n\t\tstat.SizeRw = uint64(statValues[7])\n\t}\n\n\treturn stat, err\n}\n\n\/*\n\tGet stats by container id\n*\/\nfunc GetStatsByContainerCID(containerCID string, o Options) ([]Stat, error) {\n\tvar stats []Stat \/\/ List of stats to return\n\tvar oS, oB string \/\/ Query options\n\tvar err error \/\/ Error handling\n\n\tquery := `\tSELECT *\n\t\t\t\tFROM cstats\n\t\t\t\tWHERE containerid = '` + containerCID + `'`\n\n\t\/\/ Add options\n\tif o.Since != -1 || o.Before != -1 {\n\t\tif o.Since != -1 && o.Before != -1 {\n\t\t\toS = fmt.Sprintf(\"%d\", o.Since)\n\t\t\toB = fmt.Sprintf(\"%d\", o.Before)\n\t\t} else if o.Since == -1 || o.Before != -1 {\n\t\t\toS = fmt.Sprintf(\"%d\", 0)\n\t\t\toB = fmt.Sprintf(\"%d\", o.Before)\n\t\t} else if o.Since != -1 || o.Before == -1 {\n\t\t\toS = fmt.Sprintf(\"%d\", o.Since)\n\t\t\toB = fmt.Sprintf(\"%d\", 2000000000)\n\t\t}\n\t\tquery += fmt.Sprintf(\" AND time > '%s' AND time < '%s'\", oS, oB)\n\t}\n\tif o.Limit != -1 {\n\t\tquery += fmt.Sprintf(\" LIMIT %d\", o.Limit)\n\t}\n\n\t\/\/ Send query\n\tres, err := queryDB(DB, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Get results\n\tfor _, row := range res[0].Series[0].Values {\n\t\tvar stat Stat\n\t\tvar statValues [8]int64\n\t\tif len(row) != 8 {\n\t\t\treturn nil, errors.New(fmt.Sprintf(\"GetLastStat: Wrong stat length: %d != 8\", len(row)))\n\t\t}\n\t\tfor i := 1; i <= 7; i++ {\n\t\t\tif i == 4 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstatValues[i], err = row[i].(json.Number).Int64()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(\"GetLastStat: Can't parse value: \" + row[i].(string))\n\t\t\t}\n\t\t}\n\n\t\tstat.Time, _ = time.Parse(time.RFC3339, row[0].(string))\n\t\tstat.ContainerID = containerCID\n\t\tstat.CPUUsage = uint64(statValues[1])\n\t\tstat.NetBandwithRX = uint64(statValues[2])\n\t\tstat.NetBandwithTX = uint64(statValues[3])\n\t\tstat.Running = row[4].(bool)\n\t\tstat.SizeMemory = uint64(statValues[5])\n\t\tstat.SizeRootFs = uint64(statValues[6])\n\t\tstat.SizeRw = uint64(statValues[7])\n\n\t\tstats = append(stats, stat)\n\t}\n\treturn stats, nil\n}\n\n\/*\n\tGet stats by probe name\n*\/\nfunc GetStatsByContainerProbeID(probeName string, o Options) ([]Stat, error) {\n\tvar containers []Container \/\/ List of containers in the probe\n\tvar stats []Stat \/\/ List of stats to return\n\tvar err error \/\/ Error handling\n\n\t\/\/ Get list of containers in the probe\n\tcontainers, err = GetContainersByProbe(probeName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Get stats for each containers\n\tfor _, container := range containers {\n\t\ttmpStats, err := GetStatsByContainerCID(container.CID, o)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, tmpStat := range tmpStats {\n\t\t\tstats = append(stats, tmpStat)\n\t\t}\n\t}\n\n\treturn stats, nil\n}\n\n\/*\n\tGet stats populated by probe name\n*\/\nfunc GetStatsPByContainerProbeID(probeName string, o Options) ([]StatPopulated, error) {\n\tvar containers []Container \/\/ List of containers in the probe\n\tvar statsP []StatPopulated \/\/ List of stats populated to return\n\tvar err error \/\/ Error handling\n\n\t\/\/ Get list of containers in the probe\n\tcontainers, err = GetContainersByProbe(probeName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Get stats for each containers\n\tfor _, container := range containers {\n\t\ttmpStats, err := GetStatsByContainerCID(container.CID, o)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, tmpStat := range tmpStats {\n\t\t\tstatP := StatPopulated{\n\t\t\t\tContainer: container,\n\t\t\t\tTime: tmpStat.Time,\n\t\t\t\tSizeRootFs: tmpStat.SizeRootFs,\n\t\t\t\tSizeRw: tmpStat.SizeRw,\n\t\t\t\tSizeMemory: tmpStat.SizeMemory,\n\t\t\t\tNetBandwithRX: tmpStat.NetBandwithRX,\n\t\t\t\tNetBandwithTX: tmpStat.NetBandwithTX,\n\t\t\t\tCPUUsage: tmpStat.CPUUsage,\n\t\t\t\tRunning: tmpStat.Running,\n\t\t\t}\n\n\t\t\tstatsP = append(statsP, statP)\n\t\t}\n\t}\n\n\treturn statsP, nil\n}\n<commit_msg>Fix since\/before\/limit for \/stats\/probe\/{name}<commit_after>package core\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"time\"\n\n\tinfluxdb \"github.com\/influxdb\/influxdb\/client\"\n\n\t\"..\/utils\"\n)\n\n\/*\n\tContainer's stats\n*\/\ntype Stat struct {\n\tContainerID string\n\tTime time.Time\n\tSizeRootFs uint64\n\tSizeRw uint64\n\tSizeMemory uint64\n\tNetBandwithRX uint64\n\tNetBandwithTX uint64\n\tCPUUsage uint64\n\tRunning bool\n}\n\n\/*\n\tHTTP GET options\n*\/\ntype Options struct {\n\tSince string\n\tBefore string\n\tLimit int\n}\n\nconst (\n\tStatsMeasurements = \"cstats\"\n)\n\n\/*\n\tClient variables\n*\/\nvar (\n\t\/\/ DB\n\tDB *influxdb.Client\n)\n\n\/*\n\tInitialize InfluxDB connection\n*\/\nfunc InitDB() {\n\tvar err error\n\n\t\/\/ Parse InfluxDB server URL\n\tu, err := url.Parse(fmt.Sprintf(\"http:\/\/%s:%d\", DGConfig.DockerGuard.InfluxDB.IP, DGConfig.DockerGuard.InfluxDB.Port))\n\tif err != nil {\n\t\tl.Critical(\"Can't parse InfluxDB config :\", err)\n\t}\n\n\t\/\/ Make InfluxDB config\n\tconf := influxdb.Config{\n\t\tURL: *u,\n\t\tUsername: os.Getenv(\"INFLUX_USER\"),\n\t\tPassword: os.Getenv(\"INFLUX_PWD\"),\n\t}\n\n\t\/\/ Connect to InfluxDB server\n\tDB, err = influxdb.NewClient(conf)\n\tif err != nil {\n\t\tl.Critical(\"Can't connect to InfluxDB:\", err)\n\t}\n\n\t\/\/ Test InfluxDB server connectivity\n\tdur, ver, err := DB.Ping()\n\tif err != nil {\n\t\tl.Critical(\"Can't ping InfluxDB:\", err)\n\t}\n\tl.Verbose(\"Connected to InfluxDB! ping:\", dur, \"\/ version:\", ver)\n\n\t\/\/ Create DB if doesn't exist\n\t_, err = queryDB(DB, \"create database \"+DGConfig.DockerGuard.InfluxDB.DB)\n\tif err != nil {\n\t\tif err.Error() != \"database already exists\" {\n\t\t\tl.Critical(\"Create DB:\", err)\n\t\t}\n\t}\n}\n\n\/*\n\tSend a query to InfluxDB server\n*\/\nfunc queryDB(con *influxdb.Client, cmd string) (res []influxdb.Result, err error) {\n\tq := influxdb.Query{\n\t\tCommand: cmd,\n\t\tDatabase: DGConfig.DockerGuard.InfluxDB.DB,\n\t}\n\tif response, err := con.Query(q); err == nil {\n\t\tif response.Error() != nil {\n\t\t\treturn res, response.Error()\n\t\t}\n\t\tres = response.Results\n\t}\n\treturn\n}\n\n\/*\n\tParse Options\n*\/\nfunc GetOptions(r *http.Request) Options {\n\tvar options Options \/\/ Returned options\n\tvar err error \/\/ Error handling\n\n\t\/\/ Get url parameters\n\toS := r.URL.Query().Get(\"since\")\n\toB := r.URL.Query().Get(\"before\")\n\toL := r.URL.Query().Get(\"limit\")\n\n\t\/\/ Set options\n\toptions.Since = oS\n\toptions.Before = oB\n\toLInt, err := utils.S2I(oL)\n\tif err != nil {\n\t\toptions.Limit = -1\n\t} else {\n\t\toptions.Limit = oLInt\n\t}\n\n\treturn options\n}\n\n\/*\n\tInsert a stat\n*\/\nfunc (s *Stat) Insert() error {\n\tvar pts = make([]influxdb.Point, 1) \/\/ InfluxDB point\n\tvar err error \/\/ Error handling\n\n\tl.Silly(\"Insert stat:\", s)\n\t\/\/ Make InfluxDB point\n\tpts[0] = influxdb.Point{\n\t\tMeasurement: StatsMeasurements,\n\t\tTags: map[string]string{\n\t\t\t\"containerid\": s.ContainerID,\n\t\t},\n\t\tFields: map[string]interface{}{\n\t\t\t\"sizerootfs\": s.SizeRootFs,\n\t\t\t\"sizerw\": s.SizeRw,\n\t\t\t\"sizememory\": s.SizeMemory,\n\t\t\t\"netbandwithrx\": s.NetBandwithRX,\n\t\t\t\"netbandwithtx\": s.NetBandwithTX,\n\t\t\t\"cpuusage\": s.CPUUsage,\n\t\t\t\"running\": s.Running,\n\t\t},\n\t\tTime: time.Now(),\n\t\tPrecision: \"s\",\n\t}\n\n\t\/\/ InfluxDB batch points\n\tbps := influxdb.BatchPoints{\n\t\tPoints: pts,\n\t\tDatabase: DGConfig.DockerGuard.InfluxDB.DB,\n\t\tRetentionPolicy: \"default\",\n\t}\n\n\t\/\/ Write point in InfluxDB server\n\ttimer := time.Now()\n\t_, err = DB.Write(bps)\n\tif err != nil {\n\t\tl.Error(\"Failed to write in InfluxDB:\", bps, \". Error:\", err)\n\t} else {\n\t\tl.Silly(\"Stat inserted in \", time.Since(timer), \":\", bps)\n\t}\n\n\treturn err\n}\n\n\/*\n\tInsert some stats\n*\/\nfunc InsertStats(stats []Stat) error {\n\tif len(stats) < 1 {\n\t\treturn errors.New(\"len(stats) < 1\")\n\t}\n\n\tvar pts = make([]influxdb.Point, len(stats)) \/\/ InfluxDB point\n\tvar err error \/\/ Error handling\n\n\tl.Silly(\"Insert stats:\", stats)\n\t\/\/ Make InfluxDB points\n\tfor i := 0; i < len(stats); i++ {\n\t\tpts[i] = influxdb.Point{\n\t\t\tMeasurement: StatsMeasurements,\n\t\t\tTags: map[string]string{\n\t\t\t\t\"containerid\": stats[i].ContainerID,\n\t\t\t},\n\t\t\tFields: map[string]interface{}{\n\t\t\t\t\"sizerootfs\": stats[i].SizeRootFs,\n\t\t\t\t\"sizerw\": stats[i].SizeRw,\n\t\t\t\t\"sizememory\": stats[i].SizeMemory,\n\t\t\t\t\"netbandwithrx\": stats[i].NetBandwithRX,\n\t\t\t\t\"netbandwithtx\": stats[i].NetBandwithTX,\n\t\t\t\t\"cpuusage\": stats[i].CPUUsage,\n\t\t\t\t\"running\": stats[i].Running,\n\t\t\t},\n\t\t\tTime: time.Now(),\n\t\t\tPrecision: \"s\",\n\t\t}\n\t}\n\n\t\/\/ InfluxDB batch points\n\tbps := influxdb.BatchPoints{\n\t\tPoints: pts,\n\t\tDatabase: DGConfig.DockerGuard.InfluxDB.DB,\n\t\tRetentionPolicy: \"default\",\n\t}\n\n\t\/\/ Write points in InfluxDB server\n\ttimer := time.Now()\n\t_, err = DB.Write(bps)\n\tif err != nil {\n\t\tl.Error(\"Failed to write in InfluxDB:\", bps, \". Error:\", err)\n\t} else {\n\t\tl.Silly(\"Stat inserted in \", time.Since(timer), \":\", bps)\n\t}\n\n\treturn err\n}\n\n\/*\n\tGet container's last stat\n*\/\nfunc (c *Container) GetLastStat() (Stat, error) {\n\tvar stat Stat \/\/ Returned stat\n\tvar err error \/\/ Error handling\n\n\tquery := `\tSELECT \tlast(cpuusage),\n\t\t\t\t\t\tlast(netbandwithrx),\n\t\t\t\t\t\tlast(netbandwithtx),\n\t\t\t\t\t\tlast(running),\n\t\t\t\t\t\tlast(sizememory),\n\t\t\t\t\t\tlast(sizerootfs),\n\t\t\t\t\t\tlast(sizerw) \n\t\t\t\tFROM cstats\n\t\t\t\tWHERE containerid = '` + c.CID + `'`\n\n\t\/\/ Send query\n\tres, err := queryDB(DB, query)\n\tif err != nil {\n\t\treturn stat, err\n\t}\n\n\t\/\/ Get results\n\tfor _, row := range res[0].Series[0].Values {\n\t\tvar statValues [8]int64\n\t\tif len(row) != 8 {\n\t\t\treturn stat, errors.New(fmt.Sprintf(\"GetLastStat: Wrong stat length: %d != 8\", len(row)))\n\t\t}\n\t\tfor i := 1; i <= 7; i++ {\n\t\t\tif i == 4 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstatValues[i], err = row[i].(json.Number).Int64()\n\t\t\tif err != nil {\n\t\t\t\treturn stat, errors.New(\"GetLastStat: Can't parse value: \" + row[i].(string))\n\t\t\t}\n\t\t}\n\n\t\tstat.ContainerID = c.CID\n\t\tstat.CPUUsage = uint64(statValues[1])\n\t\tstat.NetBandwithRX = uint64(statValues[2])\n\t\tstat.NetBandwithTX = uint64(statValues[3])\n\t\tstat.Running = row[4].(bool)\n\t\tstat.SizeMemory = uint64(statValues[5])\n\t\tstat.SizeRootFs = uint64(statValues[6])\n\t\tstat.SizeRw = uint64(statValues[7])\n\t}\n\n\treturn stat, err\n}\n\n\/*\n\tGet stats by container id\n*\/\nfunc GetStatsByContainerCID(containerCID string, o Options) ([]Stat, error) {\n\tvar stats []Stat \/\/ List of stats to return\n\tvar query string \/\/ InfluxDB query\n\tvar oS, oB string \/\/ Query options\n\tvar err error \/\/ Error handling\n\n\tvar sinceT, beforeT time.Time\n\tvar betweenDuration time.Duration\n\tvar groupByTime int\n\n\t\/\/ Check limitations\n\tif o.Limit > 90000 {\n\t\treturn nil, errors.New(fmt.Sprintf(\"limit is to damn high! (%d)\", o.Limit))\n\t}\n\n\t\/\/ Make InfluxDB query\n\tif o.Limit == -1 {\n\t\tquery = `\tSELECT *\n\t\t\t\t\tFROM cstats\n\t\t\t\t\tWHERE time < now()\n\t\t\t\t\tAND containerid = '` + containerCID + `'`\n\t} else {\n\t\tquery = `\tSELECT\tmean(cpuusage) as cpuusage,\n\t\t\t\t\t\t\tmean(netbandwithrx) as netbandwithrx,\n\t\t\t\t\t\t\tmean(netbandwithtx) as netbandwithtx,\n\t\t\t\t\t\t\tmean(running) as running,\n\t\t\t\t\t\t\tmean(sizememory) as sizememory,\n\t\t\t\t\t\t\tmean(sizerootfs) as sizerootfs,\n\t\t\t\t\t\t\tmean(sizerw) as sizerw\n\t\t\t\t\tFROM cstats\n\t\t\t\t\tWHERE time < now()\n\t\t\t\t\tAND containerid = '` + containerCID + `'`\n\t}\n\n\t\/\/ Add options\n\tif o.Since != \"\" || o.Before != \"\" {\n\t\tif o.Since != \"\" && o.Before != \"\" {\n\t\t\toS = \"'\" + o.Since + \"'\"\n\t\t\toB = \"'\" + o.Before + \"'\"\n\t\t\tif o.Limit != -1 {\n\t\t\t\tsinceT, err = time.Parse(time.RFC3339, o.Since)\n\t\t\t\tbeforeT, err = time.Parse(time.RFC3339, o.Before)\n\t\t\t}\n\t\t} else if o.Since == \"\" || o.Before != \"\" {\n\t\t\toS = \"now() - 1d\"\n\t\t\toB = \"'\" + o.Before + \"'\"\n\t\t\tif o.Limit != -1 {\n\t\t\t\tsinceT = time.Now().Add(time.Hour * (-24))\n\t\t\t\tbeforeT, err = time.Parse(time.RFC3339, o.Before)\n\t\t\t}\n\t\t} else if o.Since != \"\" || o.Before == \"\" {\n\t\t\toS = \"'\" + o.Since + \"'\"\n\t\t\toB = \"now()\"\n\t\t\tif o.Limit != -1 {\n\t\t\t\tsinceT, err = time.Parse(time.RFC3339, o.Since)\n\t\t\t\tbeforeT = time.Now()\n\t\t\t}\n\t\t}\n\t} else {\n\t\toS = \"now() - 1d\"\n\t\toB = \"now()\"\n\t\tif o.Limit != -1 {\n\t\t\tsinceT = time.Now().Add(time.Hour * (-24))\n\t\t\tbeforeT = time.Now()\n\t\t}\n\t}\n\tquery += fmt.Sprintf(\" AND time > %s AND time < %s \", oS, oB)\n\n\t\/\/ If limit is defined, calculate the interval\n\tif o.Limit != -1 {\n\t\tbetweenDuration = beforeT.Sub(sinceT)\n\t\tgroupByTime = int(float64(betweenDuration.Seconds()) \/ float64(o.Limit) * 1000)\n\t\tquery += fmt.Sprintf(\" GROUP BY time(%dms)\", groupByTime)\n\t}\n\n\t\/\/ Send query\n\tl.Debug(\"GetStatsByContainerCID: InfluxDB query:\", query)\n\tres, err := queryDB(DB, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Check if not found\n\tif len(res) < 1 || len(res[0].Series) < 1 {\n\t\treturn nil, errors.New(\"GetStatsByContainerCID: Not found\")\n\t}\n\n\t\/\/ Get results\n\tfor _, row := range res[0].Series[0].Values {\n\t\tvar stat Stat\n\t\tvar statValues [8]float64\n\t\tif len(row) != 8 {\n\t\t\treturn nil, errors.New(fmt.Sprintf(\"GetStatsByContainerCID: Wrong stat length: %d != 8\", len(row)))\n\t\t}\n\t\tfor i := 1; i <= 7; i++ {\n\t\t\tif i == 4 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif row[i] == nil {\n\t\t\t\tstatValues[i] = 0\n\t\t\t} else {\n\t\t\t\tstatValues[i], err = row[i].(json.Number).Float64()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, errors.New(\"GetStatsByContainerCID: Can't parse value: \" + fmt.Sprintf(\"%s\", row[i]))\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\n\t\tstat.Time, _ = time.Parse(time.RFC3339, row[0].(string))\n\t\tstat.ContainerID = containerCID\n\t\tstat.CPUUsage = uint64(statValues[1])\n\t\tstat.NetBandwithRX = uint64(statValues[2])\n\t\tstat.NetBandwithTX = uint64(statValues[3])\n\t\tif row[4] == nil || o.Limit != -1 {\n\t\t\tstat.Running = false\n\t\t} else {\n\t\t\tstat.Running = row[4].(bool)\n\t\t}\n\t\tstat.SizeMemory = uint64(statValues[5])\n\t\tstat.SizeRootFs = uint64(statValues[6])\n\t\tstat.SizeRw = uint64(statValues[7])\n\n\t\tstats = append(stats, stat)\n\t}\n\n\t\/\/ Fix stats limit\n\tif o.Limit != -1 {\n\t\tstats = stats[(len(stats) - o.Limit):]\n\t}\n\n\treturn stats, nil\n}\n\n\/*\n\tGet stats by probe name\n*\/\nfunc GetStatsByContainerProbeID(probeName string, o Options) ([]Stat, error) {\n\tvar containers []Container \/\/ List of containers in the probe\n\tvar stats []Stat \/\/ List of stats to return\n\tvar err error \/\/ Error handling\n\n\t\/\/ Get list of containers in the probe\n\tcontainers, err = GetContainersByProbe(probeName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Get stats for each containers\n\tfor _, container := range containers {\n\t\ttmpStats, err := GetStatsByContainerCID(container.CID, o)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, tmpStat := range tmpStats {\n\t\t\tstats = append(stats, tmpStat)\n\t\t}\n\t}\n\n\treturn stats, nil\n}\n\n\/*\n\tGet stats populated by probe name\n*\/\nfunc GetStatsPByContainerProbeID(probeName string, o Options) ([]StatPopulated, error) {\n\tvar containers []Container \/\/ List of containers in the probe\n\tvar statsP []StatPopulated \/\/ List of stats populated to return\n\tvar err error \/\/ Error handling\n\n\t\/\/ Get list of containers in the probe\n\tcontainers, err = GetContainersByProbe(probeName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Get stats for each containers\n\tfor _, container := range containers {\n\t\ttmpStats, err := GetStatsByContainerCID(container.CID, o)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, tmpStat := range tmpStats {\n\t\t\tstatP := StatPopulated{\n\t\t\t\tContainer: container,\n\t\t\t\tTime: tmpStat.Time,\n\t\t\t\tSizeRootFs: tmpStat.SizeRootFs,\n\t\t\t\tSizeRw: tmpStat.SizeRw,\n\t\t\t\tSizeMemory: tmpStat.SizeMemory,\n\t\t\t\tNetBandwithRX: tmpStat.NetBandwithRX,\n\t\t\t\tNetBandwithTX: tmpStat.NetBandwithTX,\n\t\t\t\tCPUUsage: tmpStat.CPUUsage,\n\t\t\t\tRunning: tmpStat.Running,\n\t\t\t}\n\n\t\t\tstatsP = append(statsP, statP)\n\t\t}\n\t}\n\n\treturn statsP, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package status\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/mgutz\/ansi\"\n)\n\ntype Status int\n\nconst (\n\tOk = iota\n\tNok\n\tUnknown\n)\n\nconst (\n\tOkString = \"ok\"\n\tNokString = \"not ok\"\n\tUnknownString = \"unknown\"\n)\n\nfunc (s Status) String() string {\n\tvar out string\n\tswitch s {\n\tcase Ok:\n\t\tout = OkString\n\tcase Nok:\n\t\tout = NokString\n\tcase Unknown:\n\t\tout = UnknownString\n\t}\n\treturn out\n}\n\nfunc FromString(in string) (Status, error) {\n\tswitch strings.ToLower(in) {\n\tcase OkString:\n\t\treturn Ok, nil\n\tcase NokString:\n\t\treturn Nok, nil\n\tcase UnknownString:\n\t\treturn Unknown, nil\n\tdefault:\n\t\treturn Unknown, errors.New(fmt.Sprintf(\"String '%s' is not a valid status\", in))\n\t}\n}\n\nfunc (s Status) ToInt() int {\n\treturn int(s)\n}\n\nfunc (s Status) Colorize(in string) string {\n\tvar out string\n\tswitch s {\n\tcase Ok:\n\t\tout = ansi.Color(in, \"green\")\n\tcase Nok:\n\t\tout = ansi.Color(in, \"red+b\")\n\tcase Unknown:\n\t\tout = ansi.Color(in, \"cyan+b\")\n\t}\n\treturn out\n}\n\nfunc BoolAsStatus(ok bool) Status {\n\tif ok {\n\t\treturn Ok\n\t} else {\n\t\treturn Nok\n\t}\n}\n\nfunc (s Status) ToBool() bool {\n\tif s == Nok {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (s *Status) UnmarshalYAML(func(interface{}) error) {\n\treturn func(b interface{}) error {\n\t\tst, err := FromString(b.(string))\n\t\ts = &st\n\t\treturn err\n\t}\n}\n<commit_msg>improved un\/-marshaling of rules (does work)<commit_after>package status\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/mgutz\/ansi\"\n)\n\ntype Status int\n\nconst (\n\tOk = iota\n\tNok\n\tUnknown\n)\n\nconst (\n\tOkString = \"ok\"\n\tNokString = \"not ok\"\n\tUnknownString = \"unknown\"\n)\n\nfunc (s Status) String() string {\n\tvar out string\n\tswitch s {\n\tcase Ok:\n\t\tout = OkString\n\tcase Nok:\n\t\tout = NokString\n\tcase Unknown:\n\t\tout = UnknownString\n\t}\n\treturn out\n}\n\nfunc FromString(in string) (Status, error) {\n\tswitch strings.ToLower(in) {\n\tcase OkString:\n\t\treturn Ok, nil\n\tcase NokString:\n\t\treturn Nok, nil\n\tcase UnknownString:\n\t\treturn Unknown, nil\n\tdefault:\n\t\treturn Unknown, errors.New(fmt.Sprintf(\"String '%s' is not a valid status\", in))\n\t}\n}\n\nfunc (s Status) ToInt() int {\n\treturn int(s)\n}\n\nfunc (s Status) Colorize(in string) string {\n\tvar out string\n\tswitch s {\n\tcase Ok:\n\t\tout = ansi.Color(in, \"green\")\n\tcase Nok:\n\t\tout = ansi.Color(in, \"red+b\")\n\tcase Unknown:\n\t\tout = ansi.Color(in, \"cyan+b\")\n\t}\n\treturn out\n}\n\nfunc BoolAsStatus(ok bool) Status {\n\tif ok {\n\t\treturn Ok\n\t} else {\n\t\treturn Nok\n\t}\n}\n\nfunc (s Status) ToBool() bool {\n\tif s == Nok {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (s *Status) UnmarshalYAML(unmarshal func(interface{}) error) error {\n\tvar aux string\n\tif err := unmarshal(&aux); err != nil {\n\t\treturn err\n\t}\n\tst, err := FromString(aux)\n\ts = &st\n\treturn err\n}\n\nfunc (s Status) MarshalYAML() (interface{}, error) {\n\treturn s.String(), nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage log5go is a simple, powerful logging framework for Go, loosely based on Java's log4j.\nLoggers support log4j's basic log levels out of the box: TRACE, DEBUG, INFO, WARN, ERROR,\nand FATAL, but additional levels can be integrated into the framework while still respecting\nthe level hierarchy. log5go can log to the console or to one or more files and supports\nlog file archiving and rotation.\n\nBasics\n\nLoggers are configured using a builder pattern starting with NewLog(LogLevel) and\nterminating either with Build() or BuildAndRegister(string). The former simply constructs a\nnew logger and hands it back to the caller. The latter registers the logger internally\nusing the caller's source path and supplied key. This results in package-safe loggers\nthat can be statically retrieved in other parts of the package using GetLog(string).\n\nExamples\n\nThe following example creates a file logger and registers it with the name \"db\":\n\n log, err := log5go.Log(log5go.LogDebug).ToFile(\"\/var\/log\", \"myprog_db.log\").Register(\"db\")\n\nAll package local code will be able to retrieve the same logger by calling:\n\n log, err := log5go.GetLog(\"db\")\n\nThis allows logging to be unobtrusive in code, since any package-local code can easily\nobtain the desired logger without the need to create a global variable.\n\nThe following example creates a file logger with a log rotation scheme:\n\n log, err := log5go.Log(log5go.LogAll).ToFile(\"\/var\/log\", \"myprog.log\").WithRotation(log5go.RollDaily, 7).Build()\n\nIn this example, the logger will archive the log file daily at midnight, maintaining a maximum\nof 7 archived log files. (A timestamp is appended to the name of each log file and an attempt is\nmade to delete the file that was created 8 days ago.)\n*\/\npackage log5go\n\n\/\/ Package version info\nconst VERSION = \"0.5.0\"\n<commit_msg>version bump<commit_after>\/*\nPackage log5go is a simple, powerful logging framework for Go, loosely based on Java's log4j.\nLoggers support log4j's basic log levels out of the box: TRACE, DEBUG, INFO, WARN, ERROR,\nand FATAL, but additional levels can be integrated into the framework while still respecting\nthe level hierarchy. log5go can log to the console or to one or more files and supports\nlog file archiving and rotation.\n\nBasics\n\nLoggers are configured using a builder pattern starting with NewLog(LogLevel) and\nterminating either with Build() or BuildAndRegister(string). The former simply constructs a\nnew logger and hands it back to the caller. The latter registers the logger internally\nusing the caller's source path and supplied key. This results in package-safe loggers\nthat can be statically retrieved in other parts of the package using GetLog(string).\n\nExamples\n\nThe following example creates a file logger and registers it with the name \"db\":\n\n log, err := log5go.Log(log5go.LogDebug).ToFile(\"\/var\/log\", \"myprog_db.log\").Register(\"db\")\n\nAll package local code will be able to retrieve the same logger by calling:\n\n log, err := log5go.GetLog(\"db\")\n\nThis allows logging to be unobtrusive in code, since any package-local code can easily\nobtain the desired logger without the need to create a global variable.\n\nThe following example creates a file logger with a log rotation scheme:\n\n log, err := log5go.Log(log5go.LogAll).ToFile(\"\/var\/log\", \"myprog.log\").WithRotation(log5go.RollDaily, 7).Build()\n\nIn this example, the logger will archive the log file daily at midnight, maintaining a maximum\nof 7 archived log files. (A timestamp is appended to the name of each log file and an attempt is\nmade to delete the file that was created 8 days ago.)\n*\/\npackage log5go\n\n\/\/ Package version info\nconst VERSION = \"0.6.0\"\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage toil provides simple functionality for managing toilers (i.e., workers).\n\nUsage\n\nTo use, create one or more types that implement the toil.Toiler interface. For example:\n\n\ttype myToiler struct{}\n\t\n\tfunc newMyToiler() {\n\t\n\t\ttoiler := myToiler{}\n\t\n\t\treturn &toiler\n\t}\n\t\n\tfunc (toiler *myToiler) Toil() {\n\t\t\/\/@TODO: Do work here.\n\t}\n\nThen create a toil.Group. For example:\n\n\tvar (\n\t\tToilerGroup = toil.NewGroup()\n\t)\n\nThen register one of more toilers (i.e., types that implement the toil.Toiler interface)\nwith the toiler group. For example:\n\n\ttoiler := newMyToiler()\n\n\tToilerGroup.Register(toiler)\n\nThen, you can call the Toil method of the toiler group in place like main(). For example:\n\n\tfunc main() {\n\t\n\t\t\/\/ ...\n\t\n\t\t\/\/ Calling the Toil() method on the toiler group\n\t\t\/\/ will cause it to call the Toil() method of\n\t\t\/\/ each toiler registered with it.\n\t\t\/\/\n\t\t\/\/ Thus causing each of those toilers registered\n\t\t\/\/ with it to start doing its work (whatever that\n\t\t\/\/ happens to be) all at the same time, simultaneously.\n\t\t\/\/\n\t\t\/\/ This will block until all the toilers registered\n\t\t\/\/ in this toiler group's Toil() methods finishes\n\t\t\/\/ (either because it returned gracefully or because\n\t\t\/\/ it panic()ed).\n\t\tToilerGroup.Toil()\n\t\n\t\t\/\/ ...\n\t\n\t}\n\nObservers\n\nA toiler's Toil method can finish in one of two ways. Either it will return gracefully, or\nit will panic().\n\nThe toiler group is OK with either.\n\nBut also, the toiler group provides the toiler with a convenient way of being notified\nof each case.\n\nIf a toiler also has a Terminated() method, then the toiler group will call the toiler's\nTerminated() method when the toiler's Toil() method has returned gracefully. For example:\n\n\ttype myToiler struct{}\n\t\n\tfunc newMyToiler() {\n\t\n\t\ttoiler := myToiler{}\n\t\n\t\treturn &toiler\n\t}\n\t\n\tfunc (toiler *myToiler) Toil() {\n\t\t\/\/@TODO: Do work here.\n\t}\n\t\n\tfunc (toiler *myToiler) Terminated() {\n\t\t\/\/@TODO: Do something with this notification.\n\t}\n\nIf a toiler also has a Recovered() method, then the toiler group will call the toiler's\nRecovered() method when the toiler's Toil() method has panic()ed. For example:\n\n\ttype myToiler struct{}\n\t\n\tfunc newMyToiler() {\n\t\n\t\ttoiler := myToiler{}\n\t\n\t\treturn &toiler\n\t}\n\t\n\tfunc (toiler *myToiler) Toil() {\n\t\t\/\/@TODO: Do work here.\n\t}\n\t\n\tfunc (toiler *myToiler) Recovered() {\n\t\t\/\/@TODO: Do something with this notification.\n\t}\n\nAnd of course, a toiler can take advantage of both of these notifications and have\nboth a Recovered() and Terminated() method. For example:\n\n\ttype myToiler struct{}\n\t\n\tfunc newMyToiler() {\n\t\n\t\ttoiler := myToiler{}\n\t\n\t\treturn &toiler\n\t}\n\t\n\tfunc (toiler *myToiler) Toil() {\n\t\t\/\/@TODO: Do work here.\n\t}\n\t\n\tfunc (toiler *myToiler) Recovered() {\n\t\t\/\/@TODO: Do something with this notification.\n\t}\n\t\n\tfunc (toiler *myToiler) Terminated() {\n\t\t\/\/@TODO: Do something with this notification.\n\t}\n\n*\/\npackage toil\n<commit_msg>updated docs<commit_after>\/*\nPackage toil provides simple functionality for managing toilers (i.e., workers).\n\nUsage\n\nTo use, create one or more types that implement the toil.Toiler interface. For example:\n\n\ttype awesomeToiler struct{}\n\t\n\tfunc newAwesomeToiler() {\n\t\n\t\ttoiler := awesomeToiler{}\n\t\n\t\treturn &toiler\n\t}\n\t\n\tfunc (toiler *awesomeToiler) Toil() {\n\t\t\/\/@TODO: Do work here.\n\t\t\/\/\n\t\t\/\/ And me this block (i.e., not return)\n\t\t\/\/ until the work is done.\n\t}\n\nThen create a toil.Group. For example:\n\n\tvar (\n\t\tToilerGroup = toil.NewGroup()\n\t)\n\nThen register one of more toilers (i.e., types that implement the toil.Toiler interface)\nwith the toiler group. For example:\n\n\ttoiler := newAwesomeToiler()\n\n\tToilerGroup.Register(toiler)\n\nThen, you can call the Toil method of the toiler group in place like main(). For example:\n\n\tfunc main() {\n\t\n\t\t\/\/ ...\n\t\n\t\t\/\/ Calling the Toil() method on the toiler group\n\t\t\/\/ will cause it to call the Toil() method of\n\t\t\/\/ each toiler registered with it.\n\t\t\/\/\n\t\t\/\/ Thus causing each of those toilers registered\n\t\t\/\/ with it to start doing its work (whatever that\n\t\t\/\/ happens to be) all at the same time, simultaneously.\n\t\t\/\/\n\t\t\/\/ This will block until all the toilers registered\n\t\t\/\/ in this toiler group's Toil() methods finishes\n\t\t\/\/ (either because it returned gracefully or because\n\t\t\/\/ it panic()ed).\n\t\tToilerGroup.Toil()\n\t\n\t\t\/\/ ...\n\t\n\t}\n\nObservers\n\nA toiler's Toil method can finish in one of two ways. Either it will return gracefully, or\nit will panic().\n\nThe toiler group is OK with either.\n\nBut also, the toiler group provides the toiler with a convenient way of being notified\nof each case.\n\nIf a toiler also has a Terminated() method, then the toiler group will call the toiler's\nTerminated() method when the toiler's Toil() method has returned gracefully. For example:\n\n\ttype awesomeToiler struct{}\n\t\n\tfunc newAwesomeToiler() {\n\t\n\t\ttoiler := awesomeToiler{}\n\t\n\t\treturn &toiler\n\t}\n\t\n\tfunc (toiler *awesomeToiler) Toil() {\n\t\t\/\/@TODO: Do work here.\n\t}\n\t\n\tfunc (toiler *awesomeToiler) Terminated() {\n\t\t\/\/@TODO: Do something with this notification.\n\t}\n\nIf a toiler also has a Recovered() method, then the toiler group will call the toiler's\nRecovered() method when the toiler's Toil() method has panic()ed. For example:\n\n\ttype awesomeToiler struct{}\n\t\n\tfunc newAwesomeToiler() {\n\t\n\t\ttoiler := awesomeToiler{}\n\t\n\t\treturn &toiler\n\t}\n\t\n\tfunc (toiler *awesomeToiler) Toil() {\n\t\t\/\/@TODO: Do work here.\n\t}\n\t\n\tfunc (toiler *awesomeToiler) Recovered() {\n\t\t\/\/@TODO: Do something with this notification.\n\t}\n\nAnd of course, a toiler can take advantage of both of these notifications and have\nboth a Recovered() and Terminated() method. For example:\n\n\ttype awesomeToiler struct{}\n\t\n\tfunc newAwesomeToiler() {\n\t\n\t\ttoiler := awesomeToiler{}\n\t\n\t\treturn &toiler\n\t}\n\t\n\tfunc (toiler *awesomeToiler) Toil() {\n\t\t\/\/@TODO: Do work here.\n\t}\n\t\n\tfunc (toiler *awesomeToiler) Recovered() {\n\t\t\/\/@TODO: Do something with this notification.\n\t}\n\t\n\tfunc (toiler *awesomeToiler) Terminated() {\n\t\t\/\/@TODO: Do something with this notification.\n\t}\n\n*\/\npackage toil\n<|endoftext|>"} {"text":"<commit_before>\/**\n * Copyright 2014 Paul Querna\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n *\/\n\n\/\/ Package otp implements both HOTP and TOTP based\n\/\/ one time passcodes in a Google Authenticator compatible manner.\n\/\/\n\/\/ When adding a TOTP for a user, you must store the \"secret\" value\n\/\/ persistently. It is reocmmend to store the in an encrypted field in your\n\/\/ datastore. Due to how TOTP works, it is not possible to store a hash\n\/\/ for the secret value like you would a password.\n\/\/\n\/\/ To enroll a user, you must first generate an OTP for them. Google\n\/\/ Authenticator supports using a QR code as an enrollment method:\n\/\/\n\/\/\timport (\n\/\/\t\t\"github.com\/pquerna\/otp\/totp\"\n\/\/\n\/\/\t\t\"bytes\"\n\/\/\t\t\"image\/png\"\n\/\/\t)\n\/\/\n\/\/\tkey, err := totp.Generate(totp.GenerateOpts{\n\/\/\t\t\tIssuer: \"Example.com\",\n\/\/\t\t\tAccountName: \"alice@example.com\"\n\/\/\t})\n\/\/\n\/\/\t\/\/ Convert TOTP key into a QR code encoded as a PNG image.\n\/\/\tvar buf bytes.Buffer\n\/\/\timg, err := key.Image(200, 200)\n\/\/\tpng.Encode(&buf, img)\n\/\/\n\/\/\t\/\/ display the QR code to the user.\n\/\/\tdisplay(buf.Bytes())\n\/\/\n\/\/\t\/\/ Now Validate that the user's successfully added the passcode.\n\/\/\tpasscode := promptForPasscode()\n\/\/\tvalid := totp.Validate(passcode, key.Secret())\n\/\/\n\/\/\tif valid {\n\/\/\t\t\/\/ User successfully used their TOTP, save it to your backend!\n\/\/\t\tstoreSecret(\"alice@example.com\", key.Secret())\n\/\/\t}\n\/\/\n\/\/ Validating a TOTP passcode is very easy, just prompt the user for a passcode\n\/\/ and retrieve the associated user's previously stored secret.\n\/\/\timport \"github.com\/pquerna\/otp\/totp\"\n\/\/\n\/\/\tpasscode := promptForPasscode()\n\/\/\tsecret := getSecret(\"alice@example.com\")\n\/\/\n\/\/\tvalid := totp.Validate(passcode, secret)\n\/\/\n\/\/\tif valid {\n\/\/\t\t\/\/ Success! continue login process.\n\/\/\t}\npackage otp\n<commit_msg>Fix doc spelling error.<commit_after>\/**\n * Copyright 2014 Paul Querna\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n *\/\n\n\/\/ Package otp implements both HOTP and TOTP based\n\/\/ one time passcodes in a Google Authenticator compatible manner.\n\/\/\n\/\/ When adding a TOTP for a user, you must store the \"secret\" value\n\/\/ persistently. It is recommend to store the in an encrypted field in your\n\/\/ datastore. Due to how TOTP works, it is not possible to store a hash\n\/\/ for the secret value like you would a password.\n\/\/\n\/\/ To enroll a user, you must first generate an OTP for them. Google\n\/\/ Authenticator supports using a QR code as an enrollment method:\n\/\/\n\/\/\timport (\n\/\/\t\t\"github.com\/pquerna\/otp\/totp\"\n\/\/\n\/\/\t\t\"bytes\"\n\/\/\t\t\"image\/png\"\n\/\/\t)\n\/\/\n\/\/\tkey, err := totp.Generate(totp.GenerateOpts{\n\/\/\t\t\tIssuer: \"Example.com\",\n\/\/\t\t\tAccountName: \"alice@example.com\"\n\/\/\t})\n\/\/\n\/\/\t\/\/ Convert TOTP key into a QR code encoded as a PNG image.\n\/\/\tvar buf bytes.Buffer\n\/\/\timg, err := key.Image(200, 200)\n\/\/\tpng.Encode(&buf, img)\n\/\/\n\/\/\t\/\/ display the QR code to the user.\n\/\/\tdisplay(buf.Bytes())\n\/\/\n\/\/\t\/\/ Now Validate that the user's successfully added the passcode.\n\/\/\tpasscode := promptForPasscode()\n\/\/\tvalid := totp.Validate(passcode, key.Secret())\n\/\/\n\/\/\tif valid {\n\/\/\t\t\/\/ User successfully used their TOTP, save it to your backend!\n\/\/\t\tstoreSecret(\"alice@example.com\", key.Secret())\n\/\/\t}\n\/\/\n\/\/ Validating a TOTP passcode is very easy, just prompt the user for a passcode\n\/\/ and retrieve the associated user's previously stored secret.\n\/\/\timport \"github.com\/pquerna\/otp\/totp\"\n\/\/\n\/\/\tpasscode := promptForPasscode()\n\/\/\tsecret := getSecret(\"alice@example.com\")\n\/\/\n\/\/\tvalid := totp.Validate(passcode, secret)\n\/\/\n\/\/\tif valid {\n\/\/\t\t\/\/ Success! continue login process.\n\/\/\t}\npackage otp\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage junos provides automation for Junos (Juniper Networks) devices.\n\nEstablishing A Session\n\nTo connect to a Junos device, the process is fairly straightforward.\n\n jnpr := junos.NewSession(host, user, password)\n defer jnpr.Close()\n\nCompare Rollback Configurations\n\nIf you want to view the difference between the current configuration and a rollback\none, then you can use the RollbackDiff() function.\n\n diff, err := jnpr.RollbackDiff(3)\n if err != nil {\n fmt.Println(err)\n }\n fmt.Println(diff)\n\nThis will output exactly how it does on the CLI when you \"| compare.\"\n\nRolling Back to a Previous State\n\nYou can also rollback to a previous state, or the \"rescue\" configuration by using\nthe RollbackConfig() function:\n\n err := jnpr.RollbackConfig(3)\n if err != nil {\n fmt.Println(err)\n }\n\n \/\/ Create a rescue config from the active configuration.\n jnpr.Rescue(\"save\")\n\n \/\/ You can also delete a rescue config.\n jnpr.Rescue(\"delete\")\n\n \/\/ Rollback to the \"rescue\" configuration.\n err := jnpr.RollbackConfig(\"rescue\")\n if err != nil {\n fmt.Println(err)\n }\n\nDevice Configuration\n\nWhen configuring a device, it is good practice to lock the configuration database,\nload the config, commit the configuration, and then unlock the configuration database.\n\nYou can do this with the following functions:\n\n Lock(), Commit(), Unlock()\n\nThere are multiple ways to commit a configuration as well:\n\n \/\/ Commit the configuration as normal\n Commit()\n\n \/\/ Check the configuration for any syntax errors (NOTE: you must still issue a Commit())\n CommitCheck()\n\n \/\/ Commit at a later time, i.e. 4:30 PM\n CommitAt(\"16:30:00\")\n\n \/\/ Rollback configuration if a Commit() is not issued within the given <minutes>.\n CommitConfirmed(15)\n\nYou can configure the Junos device by uploading a local file, or pulling from an\nFTP\/HTTP server. The LoadConfig() function takes three arguments:\n\n filename or URL, format, and commit-on-load\n\nIf you specify a URL, it must be in the following format:\n\n ftp:\/\/user@password:path-to-file\n http:\/\/user@password\/path-to-file\n\nThe format of the commands within the file must be one of the following types:\n\n set\n \/\/ system name-server 1.1.1.1\n\n text\n \/\/ system {\n \/\/ name-server 1.1.1.1;\n \/\/ }\n\n xml\n \/\/ <system>\n \/\/ <name-server>\n \/\/ <name>1.1.1.1<\/name>\n \/\/ <\/name-server>\n \/\/ <\/system>\n\nIf the third option is \"true\" then after the configuration is loaded, a commit\nwill be issued. If set to \"false,\" you will have to commit the configuration\nusing the Commit() function.\n\n jnpr.Lock()\n err := jnpr.LoadConfig(\"path-to-file.txt\", \"set\", true)\n if err != nil {\n fmt.Println(err)\n }\n jnpr.Unlock()\n\nYou don't have to use Lock() and Unlock() if you wish, but if by chance someone\nelse tries to edit the device configuration at the same time, there can be conflics\nand most likely an error will be returned.\n*\/\npackage junos\n<commit_msg>Updated documentation<commit_after>\/*\nPackage junos provides automation for Junos (Juniper Networks) devices.\n\nEstablishing A Session\n\nTo connect to a Junos device, the process is fairly straightforward.\n\n jnpr := junos.NewSession(host, user, password)\n defer jnpr.Close()\n\nCompare Rollback Configurations\n\nIf you want to view the difference between the current configuration and a rollback\none, then you can use the RollbackDiff() function.\n\n diff, err := jnpr.RollbackDiff(3)\n if err != nil {\n fmt.Println(err)\n }\n fmt.Println(diff)\n\nThis will output exactly how it does on the CLI when you \"| compare.\"\n\nRolling Back to a Previous State\n\nYou can also rollback to a previous state, or the \"rescue\" configuration by using\nthe RollbackConfig() function:\n\n err := jnpr.RollbackConfig(3)\n if err != nil {\n fmt.Println(err)\n }\n\n \/\/ Create a rescue config from the active configuration.\n jnpr.Rescue(\"save\")\n\n \/\/ You can also delete a rescue config.\n jnpr.Rescue(\"delete\")\n\n \/\/ Rollback to the \"rescue\" configuration.\n err := jnpr.RollbackConfig(\"rescue\")\n if err != nil {\n fmt.Println(err)\n }\n\nDevice Configuration\n\nWhen configuring a device, it is good practice to lock the configuration database,\nload the config, commit the configuration, and then unlock the configuration database.\n\nYou can do this with the following functions:\n\n Lock(), Commit(), Unlock()\n\nThere are multiple ways to commit a configuration as well:\n\n \/\/ Commit the configuration as normal\n Commit()\n\n \/\/ Check the configuration for any syntax errors (NOTE: you must still issue a Commit())\n CommitCheck()\n\n \/\/ Commit at a later time, i.e. 4:30 PM\n CommitAt(\"16:30:00\")\n\n \/\/ Rollback configuration if a Commit() is not issued within the given <minutes>.\n CommitConfirm(15)\n\nYou can configure the Junos device by uploading a local file, or pulling from an\nFTP\/HTTP server. The LoadConfig() function takes three arguments:\n\n filename or URL, format, and commit-on-load\n\nIf you specify a URL, it must be in the following format:\n\n ftp:\/\/user@password:path-to-file\n http:\/\/user@password\/path-to-file\n\nThe format of the commands within the file must be one of the following types:\n\n set\n \/\/ system name-server 1.1.1.1\n\n text\n \/\/ system {\n \/\/ name-server 1.1.1.1;\n \/\/ }\n\n xml\n \/\/ <system>\n \/\/ <name-server>\n \/\/ <name>1.1.1.1<\/name>\n \/\/ <\/name-server>\n \/\/ <\/system>\n\nIf the third option is \"true\" then after the configuration is loaded, a commit\nwill be issued. If set to \"false,\" you will have to commit the configuration\nusing the Commit() function.\n\n jnpr.Lock()\n err := jnpr.LoadConfig(\"path-to-file.txt\", \"set\", true)\n if err != nil {\n fmt.Println(err)\n }\n jnpr.Unlock()\n\nYou don't have to use Lock() and Unlock() if you wish, but if by chance someone\nelse tries to edit the device configuration at the same time, there can be conflics\nand most likely an error will be returned.\n*\/\npackage junos\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage junos allows you to run commands on and configure Junos devices.\n\nEstablishing a session\n\tjnpr := junos.NewSession(host, user, password)\n\nLocking the configuration\n\terr := jnpr.Lock()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\nCommiting the configuration\n\terr = jnpr.Commit()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n \nUnlocking the configuration\n\terr = jnpr.Unlock()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\nCompare the current configuration to a rollback config.\n\tdiff, err := jnpr.RollbackDiff(3)\n\tif err != nil {\n\t\tfmt.Printf(\"Error: %s\", err)\n\t}\n\tfmt.Println(diff)\n \nThe output from `RollbackDiff()` will be exactly as it is running the \"| compare\" command on the CLI:\n\n [edit forwarding-options helpers bootp server 192.168.10.2]\n - routing-instance srx-vr;\n [edit forwarding-options helpers bootp server 192.168.10.3]\n - routing-instance srx-vr;\n [edit security address-book global]\n address server1 { ... }\n + address dc-console 192.168.20.15\/32;\n + address dc-laptop 192.168.22.7\/32;\n [edit security zones security-zone vendors interfaces]\n reth0.1000 { ... }\n + reth0.520 {\n + host-inbound-traffic {\n + system-services {\n + dhcp;\n + ping;\n + }\n + }\n + }\n\nRollback to an older configuration.\n\terr := jnpr.RollbackConfig(2)\n\tif err != nil {\n\t\tfmt.Printf(\"Error: %s\", err)\n\t}\n\nRun operational mode commands, such as \"show.\"\n\toutput, err := jnpr.Command(\"show version\", \"text\")\n\tif err != nil {\n\t\tfmt.Printf(\"Error: %s\", err)\n\t}\n\tfmt.Println(output)\n\nWhen you specify \"text,\" the output will be just like it is on the CLI:\n\n node0:\n --------------------------------------------------------------------------\n Hostname: firewall-1\n Model: srx240h2\n JUNOS Software Release [12.1X47-D10.4]\n\n node1:\n --------------------------------------------------------------------------\n Hostname: firewall-2\n Model: srx240h2\n JUNOS Software Release [12.1X47-D10.4]\n\n*\/\npackage junos<commit_msg>Updated documentation<commit_after>\/*\nPackage junos allows you to run commands on and configure Junos devices.\n\nEstablishing a session\n\tjnpr := junos.NewSession(host, user, password)\n\nLocking the configuration\n\terr := jnpr.Lock()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\nCommiting the configuration\n\terr = jnpr.Commit()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n \nUnlocking the configuration\n\terr = jnpr.Unlock()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\nCompare the current configuration to a rollback config.\n\tdiff, err := jnpr.RollbackDiff(3)\n\tif err != nil {\n\t\tfmt.Printf(\"Error: %s\", err)\n\t}\n\tfmt.Println(diff)\n \nThe output from RollbackDiff() will be exactly as it is running the \"| compare\" command on the CLI:\n\n [edit forwarding-options helpers bootp server 192.168.10.2]\n - routing-instance srx-vr;\n [edit forwarding-options helpers bootp server 192.168.10.3]\n - routing-instance srx-vr;\n [edit security address-book global]\n address server1 { ... }\n + address dc-console 192.168.20.15\/32;\n + address dc-laptop 192.168.22.7\/32;\n [edit security zones security-zone vendors interfaces]\n reth0.1000 { ... }\n + reth0.520 {\n + host-inbound-traffic {\n + system-services {\n + dhcp;\n + ping;\n + }\n + }\n + }\n\nRollback to an older configuration.\n\terr := jnpr.RollbackConfig(2)\n\tif err != nil {\n\t\tfmt.Printf(\"Error: %s\", err)\n\t}\n\nRun operational mode commands, such as \"show.\"\n\toutput, err := jnpr.Command(\"show version\", \"text\")\n\tif err != nil {\n\t\tfmt.Printf(\"Error: %s\", err)\n\t}\n\tfmt.Println(output)\n\nWhen you specify \"text,\" the output will be formatted exactly as it is on the CLI:\n\n node0:\n --------------------------------------------------------------------------\n Hostname: firewall-1\n Model: srx240h2\n JUNOS Software Release [12.1X47-D10.4]\n\n node1:\n --------------------------------------------------------------------------\n Hostname: firewall-2\n Model: srx240h2\n JUNOS Software Release [12.1X47-D10.4]\n\n*\/\npackage junos<|endoftext|>"} {"text":"<commit_before>\/\/ Package chapi provides clients and data structures for working with the Companies House API.\npackage chapi\n\n\/\/ TODO(js) Documentation - write a package-level quickstart\/overview\/tutorial.\n<commit_msg>Commit to kick-off Travis and Codecov<commit_after>\/\/ Package chapi provides clients and data structures for working with the Companies House API.\npackage chapi\n\n\/\/ TODO(js) Documentation - write a package-level quickstart\/overview\/tutorial.\n\/\/\n<|endoftext|>"} {"text":"<commit_before>\/\/ cfitsio is a wrapper around the CFITSIO library.\npackage cfitsio\n\n\/\/ EOF\n<commit_msg>doc: streamlining<commit_after>\/\/ Package cfitsio is a wrapper around the CFITSIO library.\npackage cfitsio\n\n\/\/ EOF\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package ebiten provides graphics and input API to develop a 2D game.\n\/\/\n\/\/ You can start the game by calling the function RunGame.\n\/\/\n\/\/ \/\/ Game implements ebiten.Game interface.\n\/\/ type Game struct{}\n\/\/\n\/\/ \/\/ Update proceeds the game state.\n\/\/ \/\/ Update is called every tick (1\/60 [s] by default).\n\/\/ func (g *Game) Update(screen *ebiten.Image) error {\n\/\/ \/\/ Write your game's logical update.\n\/\/ return nil\n\/\/ }\n\/\/\n\/\/ \/\/ Draw draws the game screen.\n\/\/ \/\/ Draw is called every frame (typically 1\/60[s] for 60Hz display).\n\/\/ func (g *Game) Draw(screen *ebiten.Image) error {\n\/\/ \/\/ Write your game's rendering.\n\/\/ }\n\/\/\n\/\/ \/\/ Layout takes the outside size (e.g., the window size) and returns the (logical) screen size.\n\/\/ \/\/ If you don't have to adjust the screen size with the outside size, just return a fixed size.\n\/\/ func (g *Game) Layout(outsideWidth, outsideHeight int) (screenWidth, screenHeight int) {\n\/\/ return 320, 240\n\/\/ }\n\/\/\n\/\/ func main() {\n\/\/ game := &Game{}\n\/\/ \/\/ Sepcify the window size as you like. Here, a doulbed size is specified.\n\/\/ ebiten.SetWindowSize(640, 480)\n\/\/ ebiten.SetWindowTitle(\"Your game's title\")\n\/\/ \/\/ Call ebiten.RunGame to start your game loop.\n\/\/ if err := ebiten.RunGame(game); err != nil {\n\/\/ log.Fatal(err)\n\/\/ }\n\/\/ }\n\/\/\n\/\/ For backward compatibility, you can use a shorthand style Run.\n\/\/\n\/\/ \/\/ update proceeds the game state.\n\/\/ \/\/ update is called every frame (1\/60 [s]).\n\/\/ func update(screen *ebiten.Image) error {\n\/\/\n\/\/ \/\/ Write your game's logical update.\n\/\/\n\/\/ if ebiten.IsDrawingSkipped() {\n\/\/ \/\/ When the game is running slowly, the rendering result\n\/\/ \/\/ will not be adopted.\n\/\/ return nil\n\/\/ }\n\/\/\n\/\/ \/\/ Write your game's rendering.\n\/\/\n\/\/ return nil\n\/\/ }\n\/\/\n\/\/ func main() {\n\/\/ \/\/ Call ebiten.Run to start your game loop.\n\/\/ if err := ebiten.Run(update, 320, 240, 2, \"Your game's title\"); err != nil {\n\/\/ log.Fatal(err)\n\/\/ }\n\/\/ }\n\/\/\n\/\/ In the API document, 'the main thread' means the goroutine in init(), main() and their callees without 'go'\n\/\/ statement. It is assured that 'the main thread' runs on the OS main thread. There are some Ebiten functions that\n\/\/ must be called on the main thread under some conditions (typically, before ebiten.RunGame is called).\n\/\/\n\/\/ Environment variables\n\/\/\n\/\/ `EBITEN_SCREENSHOT_KEY` environment variable specifies the key\n\/\/ to take a screenshot. For example, if you run your game with\n\/\/ `EBITEN_SCREENSHOT_KEY=q`, you can take a game screen's screenshot\n\/\/ by pressing Q key. This works only on desktops.\n\/\/\n\/\/ `EBITEN_INTERNAL_IMAGES_KEY` environment variable specifies the key\n\/\/ to dump all the internal images. This is valid only when the build tag\n\/\/ 'ebitendebug' is specified. This works only on desktops.\n\/\/\n\/\/ Build tags\n\/\/\n\/\/ `ebitendebug` outputs a log of graphics commands. This is useful to know what happens in Ebiten. In general, the\n\/\/ number of graphics commands affects the performance of your game.\n\/\/\n\/\/ `ebitengl` forces to use OpenGL in any environments.\npackage ebiten\n<commit_msg>docs: Draw function's signature was wrong (again)<commit_after>\/\/ Copyright 2014 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package ebiten provides graphics and input API to develop a 2D game.\n\/\/\n\/\/ You can start the game by calling the function RunGame.\n\/\/\n\/\/ \/\/ Game implements ebiten.Game interface.\n\/\/ type Game struct{}\n\/\/\n\/\/ \/\/ Update proceeds the game state.\n\/\/ \/\/ Update is called every tick (1\/60 [s] by default).\n\/\/ func (g *Game) Update(screen *ebiten.Image) error {\n\/\/ \/\/ Write your game's logical update.\n\/\/ return nil\n\/\/ }\n\/\/\n\/\/ \/\/ Draw draws the game screen.\n\/\/ \/\/ Draw is called every frame (typically 1\/60[s] for 60Hz display).\n\/\/ func (g *Game) Draw(screen *ebiten.Image) {\n\/\/ \/\/ Write your game's rendering.\n\/\/ }\n\/\/\n\/\/ \/\/ Layout takes the outside size (e.g., the window size) and returns the (logical) screen size.\n\/\/ \/\/ If you don't have to adjust the screen size with the outside size, just return a fixed size.\n\/\/ func (g *Game) Layout(outsideWidth, outsideHeight int) (screenWidth, screenHeight int) {\n\/\/ return 320, 240\n\/\/ }\n\/\/\n\/\/ func main() {\n\/\/ game := &Game{}\n\/\/ \/\/ Sepcify the window size as you like. Here, a doulbed size is specified.\n\/\/ ebiten.SetWindowSize(640, 480)\n\/\/ ebiten.SetWindowTitle(\"Your game's title\")\n\/\/ \/\/ Call ebiten.RunGame to start your game loop.\n\/\/ if err := ebiten.RunGame(game); err != nil {\n\/\/ log.Fatal(err)\n\/\/ }\n\/\/ }\n\/\/\n\/\/ For backward compatibility, you can use a shorthand style Run.\n\/\/\n\/\/ \/\/ update proceeds the game state.\n\/\/ \/\/ update is called every frame (1\/60 [s]).\n\/\/ func update(screen *ebiten.Image) error {\n\/\/\n\/\/ \/\/ Write your game's logical update.\n\/\/\n\/\/ if ebiten.IsDrawingSkipped() {\n\/\/ \/\/ When the game is running slowly, the rendering result\n\/\/ \/\/ will not be adopted.\n\/\/ return nil\n\/\/ }\n\/\/\n\/\/ \/\/ Write your game's rendering.\n\/\/\n\/\/ return nil\n\/\/ }\n\/\/\n\/\/ func main() {\n\/\/ \/\/ Call ebiten.Run to start your game loop.\n\/\/ if err := ebiten.Run(update, 320, 240, 2, \"Your game's title\"); err != nil {\n\/\/ log.Fatal(err)\n\/\/ }\n\/\/ }\n\/\/\n\/\/ In the API document, 'the main thread' means the goroutine in init(), main() and their callees without 'go'\n\/\/ statement. It is assured that 'the main thread' runs on the OS main thread. There are some Ebiten functions that\n\/\/ must be called on the main thread under some conditions (typically, before ebiten.RunGame is called).\n\/\/\n\/\/ Environment variables\n\/\/\n\/\/ `EBITEN_SCREENSHOT_KEY` environment variable specifies the key\n\/\/ to take a screenshot. For example, if you run your game with\n\/\/ `EBITEN_SCREENSHOT_KEY=q`, you can take a game screen's screenshot\n\/\/ by pressing Q key. This works only on desktops.\n\/\/\n\/\/ `EBITEN_INTERNAL_IMAGES_KEY` environment variable specifies the key\n\/\/ to dump all the internal images. This is valid only when the build tag\n\/\/ 'ebitendebug' is specified. This works only on desktops.\n\/\/\n\/\/ Build tags\n\/\/\n\/\/ `ebitendebug` outputs a log of graphics commands. This is useful to know what happens in Ebiten. In general, the\n\/\/ number of graphics commands affects the performance of your game.\n\/\/\n\/\/ `ebitengl` forces to use OpenGL in any environments.\npackage ebiten\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage junos provides automation for Junos (Juniper Networks) devices, as\nwell as interaction with Junos Space.\n\nEstablishing A Session\n\nTo connect to a Junos device, the process is fairly straightforward.\n\n jnpr := junos.NewSession(host, user, password)\n defer jnpr.Close()\n\nViewing The Configuration\n\nTo View the entire configuration, use the keyword \"full\" for the second\nargument. If anything else outside of \"full\" is specified, it will return\nthe configuration of that section only. So \"security\" would return everything\nunder the \"security\" stanza.\n\n \/\/ Output format can be \"text\" or \"xml\"\n config, err := jnpr.GetConfig(\"full\", \"text\")\n if err != nil {\n fmt.Println(err)\n }\n fmt.Println(config)\n\n \/\/ Viewing only a certain part of the configuration\n routing, err := jnpr.GetConfig(\"routing-instances\", \"text\")\n if err != nil {\n fmt.Println(err)\n }\n fmt.Println(routing)\n\nCompare Rollback Configurations\n\nIf you want to view the difference between the current configuration and a rollback\none, then you can use the ConfigDiff() function.\n\n diff, err := jnpr.ConfigDiff(3)\n if err != nil {\n fmt.Println(err)\n }\n fmt.Println(diff)\n\nThis will output exactly how it does on the CLI when you \"| compare.\"\n\nRolling Back to a Previous State\n\nYou can also rollback to a previous state, or the \"rescue\" configuration by using\nthe RollbackConfig() function:\n\n err := jnpr.RollbackConfig(3)\n if err != nil {\n fmt.Println(err)\n }\n\n \/\/ Create a rescue config from the active configuration.\n jnpr.Rescue(\"save\")\n\n \/\/ You can also delete a rescue config.\n jnpr.Rescue(\"delete\")\n\n \/\/ Rollback to the \"rescue\" configuration.\n err := jnpr.RollbackConfig(\"rescue\")\n if err != nil {\n fmt.Println(err)\n }\n\nDevice Configuration\n\nWhen configuring a device, it is good practice to lock the configuration database,\nload the config, commit the configuration, and then unlock the configuration database.\n\nYou can do this with the following functions:\n\n Lock(), Commit(), Unlock()\n\nThere are multiple ways to commit a configuration as well:\n\n \/\/ Commit the configuration as normal\n Commit()\n\n \/\/ Check the configuration for any syntax errors (NOTE: you must still issue a Commit())\n CommitCheck()\n\n \/\/ Commit at a later time, i.e. 4:30 PM\n CommitAt(\"16:30:00\")\n\n \/\/ Rollback configuration if a Commit() is not issued within the given <minutes>.\n CommitConfirm(15)\n\nYou can configure the Junos device by uploading a local file, or pulling from an\nFTP\/HTTP server. The LoadConfig() function takes three arguments:\n\n filename or URL, format, and commit-on-load\n\nIf you specify a URL, it must be in the following format:\n\n ftp:\/\/<username>:<password>@hostname\/pathname\/file-name\n http:\/\/<username>:<password>@hostname\/pathname\/file-name\n\n Note: The default value for the FTP path variable is the user’s home directory. Thus,\n by default the file path to the configuration file is relative to the user directory.\n To specify an absolute path when using FTP, start the path with the characters %2F;\n for example: ftp:\/\/username:password@hostname\/%2Fpath\/filename.\n\nThe format of the commands within the file must be one of the following types:\n\n set\n \/\/ system name-server 1.1.1.1\n\n text\n \/\/ system {\n \/\/ name-server 1.1.1.1;\n \/\/ }\n\n xml\n \/\/ <system>\n \/\/ <name-server>\n \/\/ <name>1.1.1.1<\/name>\n \/\/ <\/name-server>\n \/\/ <\/system>\n\nIf the third option is \"true\" then after the configuration is loaded, a commit\nwill be issued. If set to \"false,\" you will have to commit the configuration\nusing the Commit() function.\n\n jnpr.Lock()\n err := jnpr.LoadConfig(\"path-to-file.txt\", \"set\", true)\n if err != nil {\n fmt.Println(err)\n }\n jnpr.Unlock()\n\nYou don't have to use Lock() and Unlock() if you wish, but if by chance someone\nelse tries to edit the device configuration at the same time, there can be conflics\nand most likely an error will be returned.\n\nRunning Commands\n\nYou can run operational mode commands such as \"show\" and \"request\" by using the\nCommand() function. Output formats can be \"text\" or \"xml.\"\n\n \/\/ Results returned in text format\n output, err := jnpr.Command(\"show chassis hardware\", \"text\")\n\n \/\/ Results returned in XML format\n output, err := jnpr.Command(\"show chassis hardware\", \"xml\")\n\nViewing Platform and Software Information\n\nWhen you call the Facts() function, it prints out the platform and software information:\n\n jnpr.Facts()\n\n \/\/ Returns output similar to the following\n node0\n --------------------------------------------------------------------------\n Hostname: firewall-1\n Model: SRX240H2\n Version: 12.1X47-D10.4\n\n node1\n --------------------------------------------------------------------------\n Hostname: firewall-1\n Model: SRX240H2\n Version: 12.1X47-D10.4\n \nConnecting and Viewing Device Information from Junos Space\n\n \/\/ Establish a connection to a Junos Space server.\n\tspace, err := junos.NewServer(\"space.company.com\", \"admin\", \"juniper123\")\n if err != nil {\n fmt.Println(err)\n }\n \n \/\/ Get the list of devices.\n d, err := space.Devices()\n if err != nil {\n fmt.Println(err)\n }\n \n \/\/ Iterate over our device list and display some information about them.\n for _, device := range d.Devices {\n fmt.Printf(\"Name: %s, IP Address: %s, Platform: %s\\n\", device.Name, device.IP, device.Platform)\n }\n*\/\npackage junos\n<commit_msg>Ran gofmt<commit_after>\/*\nPackage junos provides automation for Junos (Juniper Networks) devices, as\nwell as interaction with Junos Space.\n\nEstablishing A Session\n\nTo connect to a Junos device, the process is fairly straightforward.\n\n jnpr := junos.NewSession(host, user, password)\n defer jnpr.Close()\n\nViewing The Configuration\n\nTo View the entire configuration, use the keyword \"full\" for the second\nargument. If anything else outside of \"full\" is specified, it will return\nthe configuration of that section only. So \"security\" would return everything\nunder the \"security\" stanza.\n\n \/\/ Output format can be \"text\" or \"xml\"\n config, err := jnpr.GetConfig(\"full\", \"text\")\n if err != nil {\n fmt.Println(err)\n }\n fmt.Println(config)\n\n \/\/ Viewing only a certain part of the configuration\n routing, err := jnpr.GetConfig(\"routing-instances\", \"text\")\n if err != nil {\n fmt.Println(err)\n }\n fmt.Println(routing)\n\nCompare Rollback Configurations\n\nIf you want to view the difference between the current configuration and a rollback\none, then you can use the ConfigDiff() function.\n\n diff, err := jnpr.ConfigDiff(3)\n if err != nil {\n fmt.Println(err)\n }\n fmt.Println(diff)\n\nThis will output exactly how it does on the CLI when you \"| compare.\"\n\nRolling Back to a Previous State\n\nYou can also rollback to a previous state, or the \"rescue\" configuration by using\nthe RollbackConfig() function:\n\n err := jnpr.RollbackConfig(3)\n if err != nil {\n fmt.Println(err)\n }\n\n \/\/ Create a rescue config from the active configuration.\n jnpr.Rescue(\"save\")\n\n \/\/ You can also delete a rescue config.\n jnpr.Rescue(\"delete\")\n\n \/\/ Rollback to the \"rescue\" configuration.\n err := jnpr.RollbackConfig(\"rescue\")\n if err != nil {\n fmt.Println(err)\n }\n\nDevice Configuration\n\nWhen configuring a device, it is good practice to lock the configuration database,\nload the config, commit the configuration, and then unlock the configuration database.\n\nYou can do this with the following functions:\n\n Lock(), Commit(), Unlock()\n\nThere are multiple ways to commit a configuration as well:\n\n \/\/ Commit the configuration as normal\n Commit()\n\n \/\/ Check the configuration for any syntax errors (NOTE: you must still issue a Commit())\n CommitCheck()\n\n \/\/ Commit at a later time, i.e. 4:30 PM\n CommitAt(\"16:30:00\")\n\n \/\/ Rollback configuration if a Commit() is not issued within the given <minutes>.\n CommitConfirm(15)\n\nYou can configure the Junos device by uploading a local file, or pulling from an\nFTP\/HTTP server. The LoadConfig() function takes three arguments:\n\n filename or URL, format, and commit-on-load\n\nIf you specify a URL, it must be in the following format:\n\n ftp:\/\/<username>:<password>@hostname\/pathname\/file-name\n http:\/\/<username>:<password>@hostname\/pathname\/file-name\n\n Note: The default value for the FTP path variable is the user’s home directory. Thus,\n by default the file path to the configuration file is relative to the user directory.\n To specify an absolute path when using FTP, start the path with the characters %2F;\n for example: ftp:\/\/username:password@hostname\/%2Fpath\/filename.\n\nThe format of the commands within the file must be one of the following types:\n\n set\n \/\/ system name-server 1.1.1.1\n\n text\n \/\/ system {\n \/\/ name-server 1.1.1.1;\n \/\/ }\n\n xml\n \/\/ <system>\n \/\/ <name-server>\n \/\/ <name>1.1.1.1<\/name>\n \/\/ <\/name-server>\n \/\/ <\/system>\n\nIf the third option is \"true\" then after the configuration is loaded, a commit\nwill be issued. If set to \"false,\" you will have to commit the configuration\nusing the Commit() function.\n\n jnpr.Lock()\n err := jnpr.LoadConfig(\"path-to-file.txt\", \"set\", true)\n if err != nil {\n fmt.Println(err)\n }\n jnpr.Unlock()\n\nYou don't have to use Lock() and Unlock() if you wish, but if by chance someone\nelse tries to edit the device configuration at the same time, there can be conflics\nand most likely an error will be returned.\n\nRunning Commands\n\nYou can run operational mode commands such as \"show\" and \"request\" by using the\nCommand() function. Output formats can be \"text\" or \"xml.\"\n\n \/\/ Results returned in text format\n output, err := jnpr.Command(\"show chassis hardware\", \"text\")\n\n \/\/ Results returned in XML format\n output, err := jnpr.Command(\"show chassis hardware\", \"xml\")\n\nViewing Platform and Software Information\n\nWhen you call the Facts() function, it prints out the platform and software information:\n\n jnpr.Facts()\n\n \/\/ Returns output similar to the following\n node0\n --------------------------------------------------------------------------\n Hostname: firewall-1\n Model: SRX240H2\n Version: 12.1X47-D10.4\n\n node1\n --------------------------------------------------------------------------\n Hostname: firewall-1\n Model: SRX240H2\n Version: 12.1X47-D10.4\n\nConnecting and Viewing Device Information from Junos Space\n\n \/\/ Establish a connection to a Junos Space server.\n\tspace, err := junos.NewServer(\"space.company.com\", \"admin\", \"juniper123\")\n if err != nil {\n fmt.Println(err)\n }\n\n \/\/ Get the list of devices.\n d, err := space.Devices()\n if err != nil {\n fmt.Println(err)\n }\n\n \/\/ Iterate over our device list and display some information about them.\n for _, device := range d.Devices {\n fmt.Printf(\"Name: %s, IP Address: %s, Platform: %s\\n\", device.Name, device.IP, device.Platform)\n }\n*\/\npackage junos\n<|endoftext|>"} {"text":"<commit_before>\/\/ This file is part of graze\/golang-service\n\/\/\n\/\/ Copyright (c) 2016 Nature Delivered Ltd. <https:\/\/www.graze.com>\n\/\/\n\/\/ For the full copyright and license information, please view the LICENSE\n\/\/ file that was distributed with this source code.\n\/\/\n\/\/ license: https:\/\/github.com\/graze\/golang-service\/blob\/master\/LICENSE\n\/\/ link: https:\/\/github.com\/graze\/golang-service\n\n\/*\nPackage golang-service is a set of packages that provide many tools for helping create services in golang\n\ngolang-service contains the following packages:\n\nThe logging package provides a set of http.Handler logging handlers to write specific logs about requests\n\nThe logging\/handlers package provides a set of handlers that read from ENV variables to create the logging handlers\n\nThe nettest package provides a set of helpers for use when testing networks\n*\/\npackage golangservice\n\nimport (\n _ \"github.com\/graze\/golang-service\/logging\"\n _ \"github.com\/graze\/golang-service\/handlers\"\n _ \"github.com\/graze\/golang-service\/nettest\"\n)\n<commit_msg>update doc.go<commit_after>\/\/ This file is part of graze\/golang-service\n\/\/\n\/\/ Copyright (c) 2016 Nature Delivered Ltd. <https:\/\/www.graze.com>\n\/\/\n\/\/ For the full copyright and license information, please view the LICENSE\n\/\/ file that was distributed with this source code.\n\/\/\n\/\/ license: https:\/\/github.com\/graze\/golang-service\/blob\/master\/LICENSE\n\/\/ link: https:\/\/github.com\/graze\/golang-service\n\n\/*\ngolang-service is a set of packages to help with creating services using golang for logging and testing\n\ngolang-service contains the following packages:\n\nThe logging package provides a set of http.Handler logging handlers to write specific logs about requests\n\nThe handlers package provides a set of handlers that read from ENV variables to create the logging handlers\n\nThe nettest package provides a set of helpers for use when testing networks\n*\/\npackage golangservice\n\nimport (\n _ \"github.com\/graze\/golang-service\/logging\"\n _ \"github.com\/graze\/golang-service\/handlers\"\n _ \"github.com\/graze\/golang-service\/nettest\"\n)\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The rspace Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Doc is a simple document printer that produces the doc comments\n\/\/ for its argument symbols, using a more Go-like UI than godoc.\n\/\/ It can also search for symbols by looking in all packages, for instance:\n\/\/\tdoc isupper\n\/\/ will find unicode.IsUpper.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/build\"\n\t\"go\/parser\"\n\t\"go\/printer\"\n\t\"go\/token\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"unicode\"\n\t\"unicode\/utf8\"\n)\n\nconst usageDoc = `Find documentation for names.\nusage:\n\tdoc pkg.name # doc io.Writer\n\tdoc pkg name # doc fmt Printf\n\tdoc name # doc isupper: finds unicode.IsUpper\npkg is the last component of any package, e.g. fmt, parser\nname is the name of an exported symbol; case is ignored in matches.\n`\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, usageDoc)\n\tos.Exit(2)\n}\n\nfunc main() {\n\tflag.Parse()\n\tvar pkg, name string\n\tswitch flag.NArg() {\n\tcase 1:\n\t\tif strings.Contains(flag.Arg(0), \".\") {\n\t\t\tpkg, name = split(flag.Arg(0))\n\t\t} else {\n\t\t\tname = flag.Arg(0)\n\t\t}\n\tcase 2:\n\t\tpkg, name = flag.Arg(0), flag.Arg(1)\n\tdefault:\n\t\tusage()\n\t}\n\tfor _, path := range paths(pkg) {\n\t\tlookInDirectory(path, name)\n\t}\n}\n\nfunc split(arg string) (pkg, name string) {\n\tstr := strings.Split(arg, \".\")\n\tif len(str) != 2 {\n\t\tusage()\n\t}\n\treturn str[0], str[1]\n}\n\nfunc paths(pkg string) []string {\n\tgoroot := os.Getenv(\"GOROOT\")\n\tif goroot == \"\" {\n\t\tfmt.Fprintf(os.Stderr, \"doc: $GOROOT not set\\n\")\n\t\tos.Exit(2)\n\t}\n\tpkgs := pathsFor(goroot, pkg)\n\tgopath := os.Getenv(\"GOPATH\")\n\tif gopath != \"\" {\n\t\tfor _, root := range splitGopath(gopath) {\n\t\t\tpkgs = append(pkgs, pathsFor(root, pkg)...)\n\t\t}\n\t}\n\treturn pkgs\n}\n\nfunc splitGopath(gopath string) []string {\n\t\/\/ TODO: Assumes Unix.\n\treturn strings.Split(gopath, \":\")\n}\n\n\/\/ pathsFor recursively walks the tree looking for possible directories for the package:\n\/\/ those whose basename is pkg.\nfunc pathsFor(root, pkg string) []string {\n\troot = path.Join(root, \"src\")\n\tpkgPaths := make([]string, 0, 10)\n\tvisit := func(pathName string, f os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ One package per directory. Ignore the files themselves.\n\t\tif !f.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ No .hg or other dot nonsense please.\n\t\tif strings.Contains(pathName, \"\/.\") { \/\/ TODO: Unix-specific?\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\t\/\/ Is the last element of the path correct\n\t\tif pkg == \"\" || path.Base(pathName) == pkg {\n\t\t\tpkgPaths = append(pkgPaths, pathName)\n\t\t}\n\t\treturn nil\n\t}\n\n\tfilepath.Walk(root, visit)\n\treturn pkgPaths\n}\n\n\/\/ lookInDirectory looks in the package (if any) in the directory for the named exported identifier.\nfunc lookInDirectory(directory, name string) {\n\tpkg, err := build.Default.ImportDir(directory, 0)\n\tif err != nil {\n\t\t\/\/ If it's just that there are no go source files, that's fine.\n\t\tif _, nogo := err.(*build.NoGoError); nogo {\n\t\t\treturn\n\t\t}\n\t\t\/\/ Non-fatal: we are doing a recursive walk and there may be other directories.\n\t\treturn\n\t}\n\tvar fileNames []string\n\tfileNames = append(fileNames, pkg.GoFiles...)\n\tprefixDirectory(directory, fileNames)\n\tdoPackage(fileNames, name)\n}\n\n\/\/ prefixDirectory places the directory name on the beginning of each name in the list.\nfunc prefixDirectory(directory string, names []string) {\n\tif directory != \".\" {\n\t\tfor i, name := range names {\n\t\t\tnames[i] = filepath.Join(directory, name)\n\t\t}\n\t}\n}\n\n\/\/ File is a wrapper for the state of a file used in the parser.\n\/\/ The parse tree walkers are all methods of this type.\ntype File struct {\n\tfset *token.FileSet\n\tname string\n\tident string\n\tfile *ast.File\n\tcomments ast.CommentMap\n}\n\n\/\/ doPackage analyzes the single package constructed from the named files, looking for\n\/\/ the definition of ident.\nfunc doPackage(fileNames []string, ident string) {\n\tvar files []*File\n\tvar astFiles []*ast.File\n\tfs := token.NewFileSet()\n\tfor _, name := range fileNames {\n\t\tf, err := os.Open(name)\n\t\tif err != nil {\n\t\t\t\/\/ Warn but continue to next package.\n\t\t\tfmt.Fprintf(os.Stderr, \"%s: %s\", name, err)\n\t\t\treturn\n\t\t}\n\t\tdefer f.Close()\n\t\tdata, err := ioutil.ReadAll(f)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s: %s\", name, err)\n\t\t\treturn\n\t\t}\n\t\tparsedFile, err := parser.ParseFile(fs, name, bytes.NewReader(data), parser.ParseComments)\n\t\tif err != nil {\n\t\t\t\/\/ Noisy - just ignore.\n\t\t\t\/\/ fmt.Fprintf(os.Stderr, \"%s: %s\", name, err)\n\t\t\treturn\n\t\t}\n\t\tthisFile := &File{\n\t\t\tfset: fs,\n\t\t\tname: name,\n\t\t\tident: ident,\n\t\t\tfile: parsedFile,\n\t\t\tcomments: ast.NewCommentMap(fs, parsedFile, parsedFile.Comments),\n\t\t}\n\t\tfiles = append(files, thisFile)\n\t\tastFiles = append(astFiles, parsedFile)\n\t}\n\tfor _, file := range files {\n\t\tast.Walk(file, file.file)\n\t}\n}\n\n\/\/ Visit implements the ast.Visitor interface.\nfunc (f *File) Visit(node ast.Node) ast.Visitor {\n\tswitch n := node.(type) {\n\tcase *ast.GenDecl:\n\t\t\/\/ Variables, constants, types.\n\t\tfor _, spec := range n.Specs {\n\t\t\tswitch spec := spec.(type) {\n\t\t\tcase *ast.ValueSpec:\n\t\t\t\tfor _, ident := range spec.Names {\n\t\t\t\t\tif equal(ident.Name, f.ident) {\n\t\t\t\t\t\tf.printNode(n, n.Doc)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase *ast.TypeSpec:\n\t\t\t\tif equal(spec.Name.Name, f.ident) {\n\t\t\t\t\tf.printNode(n, n.Doc)\n\t\t\t\t}\n\t\t\tcase *ast.ImportSpec:\n\t\t\t\tcontinue \/\/ Don't care.\n\t\t\t}\n\t\t}\n\tcase *ast.FuncDecl:\n\t\t\/\/ Methods, top-level functions.\n\t\tif equal(n.Name.Name, f.ident) {\n\t\t\tn.Body = nil \/\/ Do not print the function body.\n\t\t\tf.printNode(n, n.Doc)\n\t\t}\n\t}\n\treturn f\n}\n\nfunc equal(n1, n2 string) bool {\n\t\/\/ n1 must be exported.\n\tr, _ := utf8.DecodeRuneInString(n1)\n\tif !unicode.IsUpper(r) {\n\t\treturn false\n\t}\n\treturn strings.ToLower(n1) == strings.ToLower(n2)\n}\n\nfunc (f *File) printNode(node ast.Node, comments *ast.CommentGroup) {\n\tcommentedNode := printer.CommentedNode{Node: node}\n\tif comments != nil {\n\t\tcommentedNode.Comments = []*ast.CommentGroup{comments}\n\t}\n\tvar b bytes.Buffer\n\tprinter.Fprint(&b, f.fset, &commentedNode)\n\tposn := f.fset.Position(node.Pos())\n\tfmt.Printf(\"%s:%d:\\n%s\\n\\n\", posn.Filename, posn.Line, b.Bytes())\n}\n<commit_msg>doc: tweak usage doc<commit_after>\/\/ Copyright 2013 The rspace Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Doc is a simple document printer that produces the doc comments\n\/\/ for its argument symbols, using a more Go-like UI than godoc.\n\/\/ It can also search for symbols by looking in all packages, for instance:\n\/\/\tdoc isupper\n\/\/ will find unicode.IsUpper.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/build\"\n\t\"go\/parser\"\n\t\"go\/printer\"\n\t\"go\/token\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"unicode\"\n\t\"unicode\/utf8\"\n)\n\nconst usageDoc = `Find documentation for names.\nusage:\n\tdoc pkg.name # \"doc io.Writer\"\n\tdoc pkg name # \"doc fmt Printf\"\n\tdoc name # \"doc isupper\" finds unicode.IsUpper\npkg is the last component of any package, e.g. fmt, parser\nname is the name of an exported symbol; case is ignored in matches.\n`\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, usageDoc)\n\tos.Exit(2)\n}\n\nfunc main() {\n\tflag.Parse()\n\tvar pkg, name string\n\tswitch flag.NArg() {\n\tcase 1:\n\t\tif strings.Contains(flag.Arg(0), \".\") {\n\t\t\tpkg, name = split(flag.Arg(0))\n\t\t} else {\n\t\t\tname = flag.Arg(0)\n\t\t}\n\tcase 2:\n\t\tpkg, name = flag.Arg(0), flag.Arg(1)\n\tdefault:\n\t\tusage()\n\t}\n\tfor _, path := range paths(pkg) {\n\t\tlookInDirectory(path, name)\n\t}\n}\n\nfunc split(arg string) (pkg, name string) {\n\tstr := strings.Split(arg, \".\")\n\tif len(str) != 2 {\n\t\tusage()\n\t}\n\treturn str[0], str[1]\n}\n\nfunc paths(pkg string) []string {\n\tgoroot := os.Getenv(\"GOROOT\")\n\tif goroot == \"\" {\n\t\tfmt.Fprintf(os.Stderr, \"doc: $GOROOT not set\\n\")\n\t\tos.Exit(2)\n\t}\n\tpkgs := pathsFor(goroot, pkg)\n\tgopath := os.Getenv(\"GOPATH\")\n\tif gopath != \"\" {\n\t\tfor _, root := range splitGopath(gopath) {\n\t\t\tpkgs = append(pkgs, pathsFor(root, pkg)...)\n\t\t}\n\t}\n\treturn pkgs\n}\n\nfunc splitGopath(gopath string) []string {\n\t\/\/ TODO: Assumes Unix.\n\treturn strings.Split(gopath, \":\")\n}\n\n\/\/ pathsFor recursively walks the tree looking for possible directories for the package:\n\/\/ those whose basename is pkg.\nfunc pathsFor(root, pkg string) []string {\n\troot = path.Join(root, \"src\")\n\tpkgPaths := make([]string, 0, 10)\n\tvisit := func(pathName string, f os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ One package per directory. Ignore the files themselves.\n\t\tif !f.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ No .hg or other dot nonsense please.\n\t\tif strings.Contains(pathName, \"\/.\") { \/\/ TODO: Unix-specific?\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\t\/\/ Is the last element of the path correct\n\t\tif pkg == \"\" || path.Base(pathName) == pkg {\n\t\t\tpkgPaths = append(pkgPaths, pathName)\n\t\t}\n\t\treturn nil\n\t}\n\n\tfilepath.Walk(root, visit)\n\treturn pkgPaths\n}\n\n\/\/ lookInDirectory looks in the package (if any) in the directory for the named exported identifier.\nfunc lookInDirectory(directory, name string) {\n\tpkg, err := build.Default.ImportDir(directory, 0)\n\tif err != nil {\n\t\t\/\/ If it's just that there are no go source files, that's fine.\n\t\tif _, nogo := err.(*build.NoGoError); nogo {\n\t\t\treturn\n\t\t}\n\t\t\/\/ Non-fatal: we are doing a recursive walk and there may be other directories.\n\t\treturn\n\t}\n\tvar fileNames []string\n\tfileNames = append(fileNames, pkg.GoFiles...)\n\tprefixDirectory(directory, fileNames)\n\tdoPackage(fileNames, name)\n}\n\n\/\/ prefixDirectory places the directory name on the beginning of each name in the list.\nfunc prefixDirectory(directory string, names []string) {\n\tif directory != \".\" {\n\t\tfor i, name := range names {\n\t\t\tnames[i] = filepath.Join(directory, name)\n\t\t}\n\t}\n}\n\n\/\/ File is a wrapper for the state of a file used in the parser.\n\/\/ The parse tree walkers are all methods of this type.\ntype File struct {\n\tfset *token.FileSet\n\tname string\n\tident string\n\tfile *ast.File\n\tcomments ast.CommentMap\n}\n\n\/\/ doPackage analyzes the single package constructed from the named files, looking for\n\/\/ the definition of ident.\nfunc doPackage(fileNames []string, ident string) {\n\tvar files []*File\n\tvar astFiles []*ast.File\n\tfs := token.NewFileSet()\n\tfor _, name := range fileNames {\n\t\tf, err := os.Open(name)\n\t\tif err != nil {\n\t\t\t\/\/ Warn but continue to next package.\n\t\t\tfmt.Fprintf(os.Stderr, \"%s: %s\", name, err)\n\t\t\treturn\n\t\t}\n\t\tdefer f.Close()\n\t\tdata, err := ioutil.ReadAll(f)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s: %s\", name, err)\n\t\t\treturn\n\t\t}\n\t\tparsedFile, err := parser.ParseFile(fs, name, bytes.NewReader(data), parser.ParseComments)\n\t\tif err != nil {\n\t\t\t\/\/ Noisy - just ignore.\n\t\t\t\/\/ fmt.Fprintf(os.Stderr, \"%s: %s\", name, err)\n\t\t\treturn\n\t\t}\n\t\tthisFile := &File{\n\t\t\tfset: fs,\n\t\t\tname: name,\n\t\t\tident: ident,\n\t\t\tfile: parsedFile,\n\t\t\tcomments: ast.NewCommentMap(fs, parsedFile, parsedFile.Comments),\n\t\t}\n\t\tfiles = append(files, thisFile)\n\t\tastFiles = append(astFiles, parsedFile)\n\t}\n\tfor _, file := range files {\n\t\tast.Walk(file, file.file)\n\t}\n}\n\n\/\/ Visit implements the ast.Visitor interface.\nfunc (f *File) Visit(node ast.Node) ast.Visitor {\n\tswitch n := node.(type) {\n\tcase *ast.GenDecl:\n\t\t\/\/ Variables, constants, types.\n\t\tfor _, spec := range n.Specs {\n\t\t\tswitch spec := spec.(type) {\n\t\t\tcase *ast.ValueSpec:\n\t\t\t\tfor _, ident := range spec.Names {\n\t\t\t\t\tif equal(ident.Name, f.ident) {\n\t\t\t\t\t\tf.printNode(n, n.Doc)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase *ast.TypeSpec:\n\t\t\t\tif equal(spec.Name.Name, f.ident) {\n\t\t\t\t\tf.printNode(n, n.Doc)\n\t\t\t\t}\n\t\t\tcase *ast.ImportSpec:\n\t\t\t\tcontinue \/\/ Don't care.\n\t\t\t}\n\t\t}\n\tcase *ast.FuncDecl:\n\t\t\/\/ Methods, top-level functions.\n\t\tif equal(n.Name.Name, f.ident) {\n\t\t\tn.Body = nil \/\/ Do not print the function body.\n\t\t\tf.printNode(n, n.Doc)\n\t\t}\n\t}\n\treturn f\n}\n\nfunc equal(n1, n2 string) bool {\n\t\/\/ n1 must be exported.\n\tr, _ := utf8.DecodeRuneInString(n1)\n\tif !unicode.IsUpper(r) {\n\t\treturn false\n\t}\n\treturn strings.ToLower(n1) == strings.ToLower(n2)\n}\n\nfunc (f *File) printNode(node ast.Node, comments *ast.CommentGroup) {\n\tcommentedNode := printer.CommentedNode{Node: node}\n\tif comments != nil {\n\t\tcommentedNode.Comments = []*ast.CommentGroup{comments}\n\t}\n\tvar b bytes.Buffer\n\tprinter.Fprint(&b, f.fset, &commentedNode)\n\tposn := f.fset.Position(node.Pos())\n\tfmt.Printf(\"%s:%d:\\n%s\\n\\n\", posn.Filename, posn.Line, b.Bytes())\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Hanayo is the Ripple front-end web server.\npackage main\n\n\/\/ version is the current version of hanayo\nconst version = \"v1.3.6\"\n<commit_msg>⬆️ v1.3.7 ⬆️<commit_after>\/\/ Hanayo is the Ripple front-end web server.\npackage main\n\n\/\/ version is the current version of hanayo\nconst version = \"v1.3.7\"\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package uax29 provides implementations of Unicode text segmentation: https:\/\/unicode.org\/reports\/tr29\/\n\/\/ See subpackages for words, sentences and graphemes.\npackage uax29\n<commit_msg>Doc update<commit_after>\/\/ Package uax29 provides Unicode text segmentation (UAX #29) for words, sentences and graphemes.\n\/\/ See respective sub-packages for those implementations.\n\/\/\n\/\/ This top-level package provides base functionality & types.\n\/\/\n\/\/ For more information on the UAX #29 spec: https:\/\/unicode.org\/reports\/tr29\/\npackage uax29\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package hstspreload has 4 parts:\n\/\/\n\/\/ - A Go package with functions to check HSTS preload requirements.\n\/\/\n\/\/ - The `hstspreload` command line tool, which can be installed with:\n\/\/\n\/\/ go get github.com\/chromium\/hstspreload\/cmd\/hstspreload\n\/\/\n\/\/ - The `transport_security_state_static_generate.go` script, which can\n\/\/ be installed with:\n\/\/\n\/\/ go get github.com\/chromium\/hstspreload\/cmd\/transport_security_state_static_generate\n\/\/\n\/\/ - Source code for hstspreload.appspot.com\npackage hstspreload\n<commit_msg>Update main package doc string.<commit_after>\/\/ Package hstspreload has 5 parts:\n\/\/\n\/\/ - The `hstspreload` package with functions to check HSTS preload requirements.\n\/\/\n\/\/ - The `chromiumpreload` package, to query Chromium preload list state.\n\/\/\n\/\/ - The `hstspreload` command line tool.\n\/\/\n\/\/ - The `transport_security_state_static_generate` script.\n\/\/\n\/\/ - Source code for hstspreload.appspot.com\npackage hstspreload\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package gangliamr provides metrics backed by Ganglia.\n\/\/\n\/\/ The underlying in-memory metrics are used from:\n\/\/ http:\/\/godoc.org\/github.com\/daaku\/go.metrics. Application code should use\n\/\/ the interfaces defined in that package in order to not be Ganglia specific.\n\/\/\n\/\/ The underlying Ganglia library is:\n\/\/ http:\/\/godoc.org\/github.com\/daaku\/go.ganglia\/gmetric.\n\/\/\n\/\/ A handful of metrics types are provided, and they all have a similar form.\n\/\/ The \"name\" property is always required, all other metadata properties are\n\/\/ optional. The metric instances are also automatically created upon\n\/\/ registration.\n\/\/\n\/\/ The common set of properties for the metrics are:\n\/\/\n\/\/ \/\/ The name is used as the file name, and also the title unless one is\n\/\/ \/\/ explicitly provided. This property is required.\n\/\/ Name string\n\/\/\n\/\/ \/\/ The title is for human consumption and is shown atop the graph.\n\/\/ Title string\n\/\/\n\/\/ \/\/ The units are shown in the graph to provide context to the numbers.\n\/\/ \/\/ The default value varies based on the metric type.\n\/\/ Units string\n\/\/\n\/\/ \/\/ Descriptions serve as documentation.\n\/\/ Description string\n\/\/\n\/\/ \/\/ The groups ensure your metric is kept alongside sibling metrics.\n\/\/ Groups []string\npackage gangliamr\n<commit_msg>name => Name in doc<commit_after>\/\/ Package gangliamr provides metrics backed by Ganglia.\n\/\/\n\/\/ The underlying in-memory metrics are used from:\n\/\/ http:\/\/godoc.org\/github.com\/daaku\/go.metrics. Application code should use\n\/\/ the interfaces defined in that package in order to not be Ganglia specific.\n\/\/\n\/\/ The underlying Ganglia library is:\n\/\/ http:\/\/godoc.org\/github.com\/daaku\/go.ganglia\/gmetric.\n\/\/\n\/\/ A handful of metrics types are provided, and they all have a similar form.\n\/\/ The \"Name\" property is always required, all other metadata properties are\n\/\/ optional. The metric instances are also automatically created upon\n\/\/ registration.\n\/\/\n\/\/ The common set of properties for the metrics are:\n\/\/\n\/\/ \/\/ The name is used as the file name, and also the title unless one is\n\/\/ \/\/ explicitly provided. This property is required.\n\/\/ Name string\n\/\/\n\/\/ \/\/ The title is for human consumption and is shown atop the graph.\n\/\/ Title string\n\/\/\n\/\/ \/\/ The units are shown in the graph to provide context to the numbers.\n\/\/ \/\/ The default value varies based on the metric type.\n\/\/ Units string\n\/\/\n\/\/ \/\/ Descriptions serve as documentation.\n\/\/ Description string\n\/\/\n\/\/ \/\/ The groups ensure your metric is kept alongside sibling metrics.\n\/\/ Groups []string\npackage gangliamr\n<|endoftext|>"} {"text":"<commit_before>\/*\n\tPackage color extends fmt.Printf with verbs for terminal color highlighting. All it does is replace the verbs with the appropriate terminal escape sequence.\n\n\tHighlight verbs:\n\n\t\t%h[attrs]\t\tuses the attrs to highlight the following text\n\t\t%r\t\t\tan abbreviation for %h[reset]\n\n\tattrs is a + separated list of Colors (e.g. fgRed) or Attributes (e.g. bold).\n\n\tMultiple highlight verbs do not reset preceeding verbs, they add onto them.\n\tFor example, if you set the foreground to green in the first verb, then set the background to red in the second, any text following the second will have a green foreground and a red background.\n\n\tThe syntax reference is included in REFERENCE.md.\n\n\tStandard Colors:\n\t\t\/\/ \"panic:\" with a red foreground then normal \"rip\"\n\t\tcolor.Printf(\"%h[fgRed]panic:%r rip\\n\")\n\n\t\t\/\/ \"panic:\" with a brightRed background then normal \"rip\"\n\t\tcolor.Printf(\"%h[bgBrightRed]panic:%r rip\\n\")\n\n\tAttributes:\n\t\t\/\/ bold \"panic:\" then normal \"rip\"\n\t\tcolor.Printf(\"%h[bold]panic:%r rip\\n\")\n\n\t\t\/\/ underlined \"panic:\" with then normal \"rip\"\n\t\tcolor.Printf(\"%h[underline]panic:%r rip\\n\")\n\n\t256 Colors:\n\t\t\/\/ \"panic:\" with a green foreground then normal \"rip\"\n\t\tcolor.Printf(\"%h[fg2]panic:%r rip\\n\")\n\n\t\t\/\/ \"panic:\" with a bright green background then normal \"rip\"\n\t\tcolor.Printf(\"%h[bg10]panic:%r rip\\n\")\n\n\tMixing Colors and Attributes:\n\t\t\/\/ bolded \"panic:\" with a green foreground then normal \"rip\"\n\t\tcolor.Printf(\"%h[fgGreen+bold]panic:%r rip\\n\")\n\n\t\t\/\/ underlined \"panic:\" with a bright black background then normal \"rip\"\n\t\tcolor.Printf(\"%h[bg8+underline]panic:%r rip\\n\")\n\n\tHow does reset behave?\n\t\t\/\/ bolded \"panic:\" with a blue foreground\n\t\t\/\/ then bolded \"rip\" with a green foreground and bright black background\n\t\tcolor.Printf(\"%h[fgBlue+bold]panic: %h[bg8]rip\\n\")\n\n\t\t\/\/ bolded \"hi\" with a green foreground and bright black background\n\t\tfmt.Printf(\"hi\")\n\n\t\t\/\/ finally resets the highlighting\n\t\tcolor.Printf(\"%r\")\n\n\tlog.Logger wrapper:\n\t\tlogger := color.NewLogger(os.Stderr, \"\", 0)\n\n\t\t\/\/ prints hi in red\n\t\tlogger.Printf(\"%h[fgRed]hi%r\")\n\n\t\t\/\/ prints hi in red and then exits\n\t\tlogger.Fatalf(\"%h[fgRed]hi%r\")\n\n*\/\npackage color\n<commit_msg>link to ref<commit_after>\/*\n\tPackage color extends fmt.Printf with verbs for terminal color highlighting. All it does is replace the verbs with the appropriate terminal escape sequence.\n\n\tHighlight verbs:\n\n\t\t%h[attrs]\t\tuses the attrs to highlight the following text\n\t\t%r\t\t\tan abbreviation for %h[reset]\n\n\tattrs is a + separated list of Colors (e.g. fgRed) or Attributes (e.g. bold).\n\n\tMultiple highlight verbs do not reset preceeding verbs, they add onto them.\n\tFor example, if you set the foreground to green in the first verb, then set the background to red in the second, any text following the second will have a green foreground and a red background.\n\n\tThe syntax reference is here: https:\/\/github.com\/nhooyr\/color\/blob\/master\/REFERENCE.md<Paste>\n\n\tStandard Colors:\n\t\t\/\/ \"panic:\" with a red foreground then normal \"rip\"\n\t\tcolor.Printf(\"%h[fgRed]panic:%r rip\\n\")\n\n\t\t\/\/ \"panic:\" with a brightRed background then normal \"rip\"\n\t\tcolor.Printf(\"%h[bgBrightRed]panic:%r rip\\n\")\n\n\tAttributes:\n\t\t\/\/ bold \"panic:\" then normal \"rip\"\n\t\tcolor.Printf(\"%h[bold]panic:%r rip\\n\")\n\n\t\t\/\/ underlined \"panic:\" with then normal \"rip\"\n\t\tcolor.Printf(\"%h[underline]panic:%r rip\\n\")\n\n\t256 Colors:\n\t\t\/\/ \"panic:\" with a green foreground then normal \"rip\"\n\t\tcolor.Printf(\"%h[fg2]panic:%r rip\\n\")\n\n\t\t\/\/ \"panic:\" with a bright green background then normal \"rip\"\n\t\tcolor.Printf(\"%h[bg10]panic:%r rip\\n\")\n\n\tMixing Colors and Attributes:\n\t\t\/\/ bolded \"panic:\" with a green foreground then normal \"rip\"\n\t\tcolor.Printf(\"%h[fgGreen+bold]panic:%r rip\\n\")\n\n\t\t\/\/ underlined \"panic:\" with a bright black background then normal \"rip\"\n\t\tcolor.Printf(\"%h[bg8+underline]panic:%r rip\\n\")\n\n\tHow does reset behave?\n\t\t\/\/ bolded \"panic:\" with a blue foreground\n\t\t\/\/ then bolded \"rip\" with a green foreground and bright black background\n\t\tcolor.Printf(\"%h[fgBlue+bold]panic: %h[bg8]rip\\n\")\n\n\t\t\/\/ bolded \"hi\" with a green foreground and bright black background\n\t\tfmt.Printf(\"hi\")\n\n\t\t\/\/ finally resets the highlighting\n\t\tcolor.Printf(\"%r\")\n\n\tlog.Logger wrapper:\n\t\tlogger := color.NewLogger(os.Stderr, \"\", 0)\n\n\t\t\/\/ prints hi in red\n\t\tlogger.Printf(\"%h[fgRed]hi%r\")\n\n\t\t\/\/ prints hi in red and then exits\n\t\tlogger.Fatalf(\"%h[fgRed]hi%r\")\n\n*\/\npackage color\n<|endoftext|>"} {"text":"<commit_before>package rrstorage\n\nimport (\n\t\"bytes\"\n\t\"github.com\/cheggaaa\/pb\"\n\t\"crypto\/hmac\"\n\t\"crypto\/sha1\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"github.com\/songtianyi\/rrframework\/logs\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\ntype UfileStorage struct {\n\tPublicKey string\n\tPrivateKey string\n\tBucketName string\n}\n\nconst (\n\tEXPIRE = 3600\n\tSUFFIX = \".ufile.ucloud.cn\"\n\tMAX_PUT_SIZE = 50 * (1 << 20)\n\tMAX_GET_SIZE = 50 * (1 << 20)\n\tPARTIAL_SIZE = 4 * (1 << 20)\n)\n\nfunc CreateUfileStorage(pub, pri, bun string) StorageWrapper {\n\ts := &UfileStorage{\n\t\tPublicKey: pub,\n\t\tPrivateKey: pri,\n\t\tBucketName: bun,\n\t}\n\treturn s\n}\n\nfunc (s *UfileStorage) signheader(method, ctype, bucket, filename string) string {\n\tdata := method + \"\\n\"\n\tdata += \"\\n\" \/\/Content-MD5 empty\n\tdata += ctype + \"\\n\" \/\/Content-Type\n\tdata += \"\\n\" \/\/Date empty\n\tdata += \"\/\" + bucket + \"\/\" + filename\n\n\th := hmac.New(sha1.New, []byte(s.PrivateKey))\n\th.Write([]byte(data))\n\treturn base64.StdEncoding.EncodeToString(h.Sum(nil))\n}\n\ntype initResponse struct {\n\tUploadId string\n\tBlkSize int\n\tBucket string\n\tKey string\n}\n\nfunc (s *UfileStorage) initiateMultipartUpload(filename string) (*initResponse, error) {\n\tsign := s.signheader(\"POST\", \"application\/octet-stream\", s.BucketName, filename)\n\n\tauth := \"UCloud\" + \" \" + s.PublicKey + \":\" + sign\n\tclient := &http.Client{}\n\turl := \"http:\/\/\" + s.BucketName + SUFFIX + \"\/\" + filename + \"?uploads\"\n\treq, err := http.NewRequest(\"POST\", url, nil)\n\n\treq.Header.Add(\"Authorization\", auth)\n\treq.Header.Add(\"Content-Type\", \"application\/octet-stream\")\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tdefer resp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"initiateMultipartUpload failed, %s\", string(body))\n\t}\n\tvar res initResponse\n\tif err := json.Unmarshal(body, &res); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &res, nil\n}\n\ntype uploadResponse struct {\n\tPartNumber int\n}\n\nfunc (s *UfileStorage) uploadPart(content []byte, info *initResponse, partNum int) (*uploadResponse, string, error) {\n\tsign := s.signheader(\"PUT\", \"application\/octet-stream\", info.Bucket, info.Key)\n\n\tauth := \"UCloud\" + \" \" + s.PublicKey + \":\" + sign\n\tclient := &http.Client{}\n\turl := \"http:\/\/\" + info.Bucket + SUFFIX + \"\/\" + info.Key + \"?uploadId=\" + info.UploadId + \"&partNumber=\" + strconv.Itoa(partNum)\n\treq, err := http.NewRequest(\"PUT\", url, bytes.NewReader(content))\n\n\treq.Header.Add(\"Authorization\", auth)\n\treq.Header.Add(\"Content-Type\", \"application\/octet-stream\")\n\treq.Header.Add(\"Content-Length\", strconv.Itoa(info.BlkSize))\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tdefer resp.Body.Close()\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn nil, \"\", fmt.Errorf(\"uploadPart failed, %s\", string(body))\n\t}\n\tvar res uploadResponse\n\tif err := json.Unmarshal(body, &res); err != nil {\n\t\treturn nil, \"\", err\n\t}\n\treturn &res, resp.Header.Get(\"ETag\"), nil\n}\n\ntype finishResponse struct {\n\tBucket string\n\tKey string\n\tFileSize int\n}\n\nfunc (s *UfileStorage) finishMultipartUpload(info *initResponse, etags string) (*finishResponse, error) {\n\tsign := s.signheader(\"POST\", \"text\/plain\", info.Bucket, info.Key)\n\n\tauth := \"UCloud\" + \" \" + s.PublicKey + \":\" + sign\n\tclient := &http.Client{}\n\turl := \"http:\/\/\" + info.Bucket + SUFFIX + \"\/\" + info.Key + \"?uploadId=\" + info.UploadId + \"&newKey=\" + info.Key\n\treq, err := http.NewRequest(\"POST\", url, strings.NewReader(etags))\n\n\treq.Header.Add(\"Authorization\", auth)\n\treq.Header.Add(\"Content-Length\", strconv.Itoa(len(etags)))\n\treq.Header.Add(\"Content-Type\", \"text\/plain\")\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tdefer resp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"finishMultipartUpload failed, %s\", string(body))\n\t}\n\tvar res finishResponse\n\tif err := json.Unmarshal(body, &res); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &res, nil\n}\n\nfunc (s *UfileStorage) put(content []byte, filename string) error {\n\t\/\/ sign\n\tsign := s.signheader(\"PUT\", \"application\/octet-stream\", s.BucketName, filename)\n\tauth := \"UCloud\" + \" \" + s.PublicKey + \":\" + sign\n\tclient := &http.Client{}\n\turl := \"http:\/\/\" + s.BucketName + SUFFIX + \"\/\" + filename\n\treq, err := http.NewRequest(\"PUT\", url, bytes.NewReader(content))\n\n\treq.Header.Add(\"Authorization\", auth)\n\treq.Header.Add(\"Content-Type\", \"application\/octet-stream\")\n\treq.Header.Add(\"Content-Length\", strconv.Itoa(len(content)))\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode != 200 {\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tdefer resp.Body.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn fmt.Errorf(\"put file failed, %s\", string(body))\n\t}\n\treturn nil\n}\n\nfunc (s *UfileStorage) Save(content []byte, filename string) error {\n\n\tsize := len(content)\n\tif size > MAX_PUT_SIZE {\n\t\t\/\/ > 50M\n\t\tinitRes, err := s.initiateMultipartUpload(filename)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tnum := size \/ initRes.BlkSize\n\t\tbar := pb.StartNew(num+1)\n\t\tetags := make([]string, 0)\n\t\tvar wg sync.WaitGroup\n\t\tfor i := 0; i < num; i++ {\n\t\t\twg.Add(1)\n\t\t\tgo func(j int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tpart := content[j*initRes.BlkSize : (j+1)*initRes.BlkSize]\n\t\t\t\t_, etag, err := s.uploadPart(part, initRes, j)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogs.Error(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tetags = append(etags, etag)\n\t\t\t\tbar.Increment()\n\t\t\t}(i)\n\t\t}\n\t\t\/\/ TODO concurrency limit\n\t\t\/\/ TODO capture error\n\t\twg.Wait()\n\t\tif num*initRes.BlkSize < size {\n\t\t\t\/\/ remaining part\n\t\t\tpart := content[num*initRes.BlkSize:]\n\t\t\t_, etag, err := s.uploadPart(part, initRes, num)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tetags = append(etags, etag)\n\t\t\tbar.Increment()\n\t\t}\n\t\t_, err = s.finishMultipartUpload(initRes, strings.Join(etags, \",\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbar.Finish()\n\n\t} else {\n\t\treturn s.put(content, filename)\n\t}\n\treturn nil\n}\n\ntype fileItem struct {\n\tBucketName string\n\tFileName string\n\tHash string\n\tMimeType string\n\tSize int\n\tCreateTime int\n\tModifyTime int\n}\n\ntype fileList struct {\n\tBucketName string\n\tBucketId string\n\tNextMarker string\n\tDataSet []fileItem\n}\n\nfunc (s *UfileStorage) PrefixFileList(prefix string) (*fileList, error) {\n\t\/\/ sign\n\tsign := s.signheader(\"GET\", \"\", s.BucketName, \"\")\n\tauth := \"UCloud\" + \" \" + s.PublicKey + \":\" + sign\n\tclient := &http.Client{}\n\turl := \"http:\/\/\" + s.BucketName + SUFFIX + \"\/?list&prefix=\" + prefix\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\n\treq.Header.Add(\"Authorization\", auth)\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tdefer resp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"PrefixFileList failed, %s\", string(body))\n\t}\n\tvar res fileList\n\tif err := json.Unmarshal(body, &res); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &res, nil\n}\n\nfunc (s *UfileStorage) getFile(filename, brange string) ([]byte, int, error) {\n\t\/\/ sign\n\tsign := s.signheader(\"GET\", \"\", s.BucketName, filename)\n\tauth := \"UCloud\" + \" \" + s.PublicKey + \":\" + sign\n\tclient := &http.Client{}\n\turl := \"http:\/\/\" + s.BucketName + SUFFIX + \"\/\" + filename\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\n\treq.Header.Add(\"Authorization\", auth)\n\treq.Header.Add(\"Range\", brange)\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tdefer resp.Body.Close()\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\tif resp.StatusCode != 206 && resp.StatusCode != 200 {\n\t\treturn nil, 0, fmt.Errorf(\"getFile failed, %s\", string(body))\n\t}\n\tsize := 0\n\tif resp.StatusCode == 200 {\n\t\t\/\/ complete\n\t\tsize, _ = strconv.Atoi(resp.Header.Get(\"Content-Length\"))\n\t} else if resp.StatusCode == 206 {\n\t\t\/\/ partial\n\t\tsize, _ = strconv.Atoi(strings.Split(resp.Header.Get(\"Content-Range\"), \"\/\")[1])\n\t}\n\treturn body, size, nil\n}\n\nfunc (s *UfileStorage) Fetch(filename string) ([]byte, error) {\n\tb, size, err := s.getFile(filename, \"bytes=0-\"+strconv.Itoa(MAX_GET_SIZE-1))\n\tif err != nil {\n\t\treturn b, err\n\t}\n\tlb := len(b)\n\tif lb == size {\n\t\t\/\/ downloaded\n\t\treturn b, nil\n\t}\n\t\/\/ partial\n\tsize -= lb\n\tnum := size \/ PARTIAL_SIZE\n\tbar := pb.StartNew(num+1)\n\t\/\/ TODO concurrency\n\tfor i := 0; i <= num; i++ {\n\t\tbrange := \"bytes=\"\n\t\tbrange += strconv.Itoa(i*PARTIAL_SIZE+lb) + \"-\"\n\t\tbrange += strconv.Itoa((i+1)*PARTIAL_SIZE + lb - 1)\n\t\tbp, _, err := s.getFile(filename, brange)\n\t\tif err != nil {\n\t\t\tlogs.Error(err)\n\t\t\tcontinue\n\t\t}\n\t\tb = append(b, bp...)\n\t\tbar.Increment()\n\t}\n\tbar.Finish()\n\treturn b, nil\n}\n<commit_msg>add ufile uploading concurrency limit<commit_after>package rrstorage\n\nimport (\n\t\"bytes\"\n\t\"github.com\/cheggaaa\/pb\"\n\t\"crypto\/hmac\"\n\t\"crypto\/sha1\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"github.com\/songtianyi\/rrframework\/logs\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\ntype UfileStorage struct {\n\tPublicKey string\n\tPrivateKey string\n\tBucketName string\n\n\tusema chan struct{}\t\/\/ uploading concurrency limit\n}\n\nconst (\n\tEXPIRE = 3600\n\tSUFFIX = \".ufile.ucloud.cn\"\n\tMAX_PUT_SIZE = 50 * (1 << 20)\n\tMAX_GET_SIZE = 50 * (1 << 20)\n\tPARTIAL_SIZE = 4 * (1 << 20)\n)\n\nfunc CreateUfileStorage(pub, pri, bun string, ucl int) StorageWrapper {\n\ts := &UfileStorage{\n\t\tPublicKey: pub,\n\t\tPrivateKey: pri,\n\t\tBucketName: bun,\n\t\tusema: make(chan struct{}, ucl),\n\t}\n\treturn s\n}\n\nfunc (s *UfileStorage) signheader(method, ctype, bucket, filename string) string {\n\tdata := method + \"\\n\"\n\tdata += \"\\n\" \/\/Content-MD5 empty\n\tdata += ctype + \"\\n\" \/\/Content-Type\n\tdata += \"\\n\" \/\/Date empty\n\tdata += \"\/\" + bucket + \"\/\" + filename\n\n\th := hmac.New(sha1.New, []byte(s.PrivateKey))\n\th.Write([]byte(data))\n\treturn base64.StdEncoding.EncodeToString(h.Sum(nil))\n}\n\ntype initResponse struct {\n\tUploadId string\n\tBlkSize int\n\tBucket string\n\tKey string\n}\n\nfunc (s *UfileStorage) initiateMultipartUpload(filename string) (*initResponse, error) {\n\tsign := s.signheader(\"POST\", \"application\/octet-stream\", s.BucketName, filename)\n\n\tauth := \"UCloud\" + \" \" + s.PublicKey + \":\" + sign\n\tclient := &http.Client{}\n\turl := \"http:\/\/\" + s.BucketName + SUFFIX + \"\/\" + filename + \"?uploads\"\n\treq, err := http.NewRequest(\"POST\", url, nil)\n\n\treq.Header.Add(\"Authorization\", auth)\n\treq.Header.Add(\"Content-Type\", \"application\/octet-stream\")\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tdefer resp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"initiateMultipartUpload failed, %s\", string(body))\n\t}\n\tvar res initResponse\n\tif err := json.Unmarshal(body, &res); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &res, nil\n}\n\ntype uploadResponse struct {\n\tPartNumber int\n}\n\nfunc (s *UfileStorage) uploadPart(content []byte, info *initResponse, partNum int) (*uploadResponse, string, error) {\n\tsign := s.signheader(\"PUT\", \"application\/octet-stream\", info.Bucket, info.Key)\n\n\tauth := \"UCloud\" + \" \" + s.PublicKey + \":\" + sign\n\tclient := &http.Client{}\n\turl := \"http:\/\/\" + info.Bucket + SUFFIX + \"\/\" + info.Key + \"?uploadId=\" + info.UploadId + \"&partNumber=\" + strconv.Itoa(partNum)\n\treq, err := http.NewRequest(\"PUT\", url, bytes.NewReader(content))\n\n\treq.Header.Add(\"Authorization\", auth)\n\treq.Header.Add(\"Content-Type\", \"application\/octet-stream\")\n\treq.Header.Add(\"Content-Length\", strconv.Itoa(info.BlkSize))\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tdefer resp.Body.Close()\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn nil, \"\", fmt.Errorf(\"uploadPart failed, %s\", string(body))\n\t}\n\tvar res uploadResponse\n\tif err := json.Unmarshal(body, &res); err != nil {\n\t\treturn nil, \"\", err\n\t}\n\treturn &res, resp.Header.Get(\"ETag\"), nil\n}\n\ntype finishResponse struct {\n\tBucket string\n\tKey string\n\tFileSize int\n}\n\nfunc (s *UfileStorage) finishMultipartUpload(info *initResponse, etags string) (*finishResponse, error) {\n\tsign := s.signheader(\"POST\", \"text\/plain\", info.Bucket, info.Key)\n\n\tauth := \"UCloud\" + \" \" + s.PublicKey + \":\" + sign\n\tclient := &http.Client{}\n\turl := \"http:\/\/\" + info.Bucket + SUFFIX + \"\/\" + info.Key + \"?uploadId=\" + info.UploadId + \"&newKey=\" + info.Key\n\treq, err := http.NewRequest(\"POST\", url, strings.NewReader(etags))\n\n\treq.Header.Add(\"Authorization\", auth)\n\treq.Header.Add(\"Content-Length\", strconv.Itoa(len(etags)))\n\treq.Header.Add(\"Content-Type\", \"text\/plain\")\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tdefer resp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"finishMultipartUpload failed, %s\", string(body))\n\t}\n\tvar res finishResponse\n\tif err := json.Unmarshal(body, &res); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &res, nil\n}\n\nfunc (s *UfileStorage) put(content []byte, filename string) error {\n\t\/\/ sign\n\tsign := s.signheader(\"PUT\", \"application\/octet-stream\", s.BucketName, filename)\n\tauth := \"UCloud\" + \" \" + s.PublicKey + \":\" + sign\n\tclient := &http.Client{}\n\turl := \"http:\/\/\" + s.BucketName + SUFFIX + \"\/\" + filename\n\treq, err := http.NewRequest(\"PUT\", url, bytes.NewReader(content))\n\n\treq.Header.Add(\"Authorization\", auth)\n\treq.Header.Add(\"Content-Type\", \"application\/octet-stream\")\n\treq.Header.Add(\"Content-Length\", strconv.Itoa(len(content)))\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode != 200 {\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tdefer resp.Body.Close()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn fmt.Errorf(\"put file failed, %s\", string(body))\n\t}\n\treturn nil\n}\n\nfunc (s *UfileStorage) Save(content []byte, filename string) error {\n\n\tsize := len(content)\n\tif size > MAX_PUT_SIZE {\n\t\t\/\/ > 50M\n\t\tinitRes, err := s.initiateMultipartUpload(filename)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tnum := size \/ initRes.BlkSize\n\t\tbar := pb.StartNew(num+1)\n\t\tetags := make([]string, 0)\n\t\tvar wg sync.WaitGroup\n\t\tfor i := 0; i < num; i++ {\n\t\t\ts.usema <- struct{}{}\n\t\t\twg.Add(1)\n\t\t\tgo func(j int) {\n\t\t\t\tdefer func() {\n\t\t\t\t\twg.Done()\n\t\t\t\t\t<-s.usema\n\t\t\t\t}()\n\t\t\t\tpart := content[j*initRes.BlkSize : (j+1)*initRes.BlkSize]\n\t\t\t\t_, etag, err := s.uploadPart(part, initRes, j)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogs.Error(err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tetags = append(etags, etag)\n\t\t\t\tbar.Increment()\n\t\t\t}(i)\n\t\t}\n\t\t\/\/ TODO capture error\n\t\twg.Wait()\n\t\tif num*initRes.BlkSize < size {\n\t\t\t\/\/ remaining part\n\t\t\tpart := content[num*initRes.BlkSize:]\n\t\t\t_, etag, err := s.uploadPart(part, initRes, num)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tetags = append(etags, etag)\n\t\t\tbar.Increment()\n\t\t}\n\t\t_, err = s.finishMultipartUpload(initRes, strings.Join(etags, \",\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tbar.Finish()\n\n\t} else {\n\t\treturn s.put(content, filename)\n\t}\n\treturn nil\n}\n\ntype fileItem struct {\n\tBucketName string\n\tFileName string\n\tHash string\n\tMimeType string\n\tSize int\n\tCreateTime int\n\tModifyTime int\n}\n\ntype fileList struct {\n\tBucketName string\n\tBucketId string\n\tNextMarker string\n\tDataSet []fileItem\n}\n\nfunc (s *UfileStorage) PrefixFileList(prefix string) (*fileList, error) {\n\t\/\/ sign\n\tsign := s.signheader(\"GET\", \"\", s.BucketName, \"\")\n\tauth := \"UCloud\" + \" \" + s.PublicKey + \":\" + sign\n\tclient := &http.Client{}\n\turl := \"http:\/\/\" + s.BucketName + SUFFIX + \"\/?list&prefix=\" + prefix\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\n\treq.Header.Add(\"Authorization\", auth)\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tdefer resp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"PrefixFileList failed, %s\", string(body))\n\t}\n\tvar res fileList\n\tif err := json.Unmarshal(body, &res); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &res, nil\n}\n\nfunc (s *UfileStorage) getFile(filename, brange string) ([]byte, int, error) {\n\t\/\/ sign\n\tsign := s.signheader(\"GET\", \"\", s.BucketName, filename)\n\tauth := \"UCloud\" + \" \" + s.PublicKey + \":\" + sign\n\tclient := &http.Client{}\n\turl := \"http:\/\/\" + s.BucketName + SUFFIX + \"\/\" + filename\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\n\treq.Header.Add(\"Authorization\", auth)\n\treq.Header.Add(\"Range\", brange)\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tdefer resp.Body.Close()\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\tif resp.StatusCode != 206 && resp.StatusCode != 200 {\n\t\treturn nil, 0, fmt.Errorf(\"getFile failed, %s\", string(body))\n\t}\n\tsize := 0\n\tif resp.StatusCode == 200 {\n\t\t\/\/ complete\n\t\tsize, _ = strconv.Atoi(resp.Header.Get(\"Content-Length\"))\n\t} else if resp.StatusCode == 206 {\n\t\t\/\/ partial\n\t\tsize, _ = strconv.Atoi(strings.Split(resp.Header.Get(\"Content-Range\"), \"\/\")[1])\n\t}\n\treturn body, size, nil\n}\n\nfunc (s *UfileStorage) Fetch(filename string) ([]byte, error) {\n\tb, size, err := s.getFile(filename, \"bytes=0-\"+strconv.Itoa(MAX_GET_SIZE-1))\n\tif err != nil {\n\t\treturn b, err\n\t}\n\tlb := len(b)\n\tif lb == size {\n\t\t\/\/ downloaded\n\t\treturn b, nil\n\t}\n\t\/\/ partial\n\tsize -= lb\n\tnum := size \/ PARTIAL_SIZE\n\tbar := pb.StartNew(num+1)\n\t\/\/ TODO concurrency\n\tfor i := 0; i <= num; i++ {\n\t\tbrange := \"bytes=\"\n\t\tbrange += strconv.Itoa(i*PARTIAL_SIZE+lb) + \"-\"\n\t\tbrange += strconv.Itoa((i+1)*PARTIAL_SIZE + lb - 1)\n\t\tbp, _, err := s.getFile(filename, brange)\n\t\tif err != nil {\n\t\t\tlogs.Error(err)\n\t\t\tcontinue\n\t\t}\n\t\tb = append(b, bp...)\n\t\tbar.Increment()\n\t}\n\tbar.Finish()\n\treturn b, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package sdl\n\n\/*\n#include \"sdl_wrapper.h\"\n\nstatic inline int _SDL_GetSystemRAM()\n{\n#if (SDL_VERSION_ATLEAST(2,0,1))\n return SDL_GetSystemRAM();\n#else\n return 0;\n#endif\n}\n\nstatic inline SDL_bool _SDL_HasAVX()\n{\n#if (SDL_VERSION_ATLEAST(2,0,2))\n return SDL_HasAVX();\n#else\n return SDL_FALSE;\n#endif\n}\n\n*\/\nimport \"C\"\n\nconst CACHELINE_SIZE = C.SDL_CACHELINE_SIZE\n\n\/\/ GetCPUCount (https:\/\/wiki.libsdl.org\/SDL_GetCPUCount)\nfunc GetCPUCount() int {\n\treturn int(C.SDL_GetCPUCount())\n}\n\n\/\/ GetCPUCacheLineSize (https:\/\/wiki.libsdl.org\/SDL_GetCPUCacheLineSize)\nfunc GetCPUCacheLineSize() int {\n\treturn int(C.SDL_GetCPUCacheLineSize())\n}\n\n\/\/ HasRDTSC (https:\/\/wiki.libsdl.org\/SDL_HasRDTSC)\nfunc HasRDTSC() bool {\n\treturn C.SDL_HasRDTSC() > 0\n}\n\n\/\/ HasAltiVec (https:\/\/wiki.libsdl.org\/SDL_HasAltiVec)\nfunc HasAltiVec() bool {\n\treturn C.SDL_HasAltiVec() > 0\n}\n\n\/\/ HasMMX (https:\/\/wiki.libsdl.org\/SDL_HasMMX)\nfunc HasMMX() bool {\n\treturn C.SDL_HasMMX() > 0\n}\n\n\/\/ Has3DNow (https:\/\/wiki.libsdl.org\/SDL_Has3DNow)\nfunc Has3DNow() bool {\n\treturn C.SDL_Has3DNow() > 0\n}\n\n\/\/ HasSSE (https:\/\/wiki.libsdl.org\/SDL_HasSSE)\nfunc HasSSE() bool {\n\treturn C.SDL_HasSSE() > 0\n}\n\n\/\/ HasSSE2 (https:\/\/wiki.libsdl.org\/SDL_HasSSE2)\nfunc HasSSE2() bool {\n\treturn C.SDL_HasSSE2() > 0\n}\n\n\/\/ HasSSE3 (https:\/\/wiki.libsdl.org\/SDL_HasSSE3)\nfunc HasSSE3() bool {\n\treturn C.SDL_HasSSE3() > 0\n}\n\n\/\/ HasSSE41 (https:\/\/wiki.libsdl.org\/SDL_HasSSE41)\nfunc HasSSE41() bool {\n\treturn C.SDL_HasSSE41() > 0\n}\n\n\/\/ HasSSE42 (https:\/\/wiki.libsdl.org\/SDL_HasSSE42)\nfunc HasSSE42() bool {\n\treturn C.SDL_HasSSE42() > 0\n}\n\n\/\/ GetSystemRAM (https:\/\/wiki.libsdl.org\/SDL_GetSystemRAM)\nfunc GetSystemRAM() int {\n\treturn int(C._SDL_GetSystemRAM())\n}\n\nfunc HasAVX() bool {\n\treturn C._SDL_HasAVX() > 0\n}\n<commit_msg>sdl: cpuinfo: add warning message to SDL_GetSystemRAM() and SDL_HasAVX() if used on older SDL2 versions<commit_after>package sdl\n\n\/*\n#include \"sdl_wrapper.h\"\n\n#if !(SDL_VERSION_ATLEAST(2,0,1))\n#pragma message(\"SDL_GetSystemRAM is not supported before SDL 2.0.1\")\nstatic inline int SDL_GetSystemRAM()\n{\n\treturn -1;\n}\n#endif\n\n#if !(SDL_VERSION_ATLEAST(2,0,2))\n#pragma message(\"SDL_HasAVX is not supported before SDL 2.0.2\")\nstatic inline SDL_bool SDL_HasAVX()\n{\n\treturn SDL_FALSE;\n}\n#endif\n\n*\/\nimport \"C\"\n\nconst CACHELINE_SIZE = C.SDL_CACHELINE_SIZE\n\n\/\/ GetCPUCount (https:\/\/wiki.libsdl.org\/SDL_GetCPUCount)\nfunc GetCPUCount() int {\n\treturn int(C.SDL_GetCPUCount())\n}\n\n\/\/ GetCPUCacheLineSize (https:\/\/wiki.libsdl.org\/SDL_GetCPUCacheLineSize)\nfunc GetCPUCacheLineSize() int {\n\treturn int(C.SDL_GetCPUCacheLineSize())\n}\n\n\/\/ HasRDTSC (https:\/\/wiki.libsdl.org\/SDL_HasRDTSC)\nfunc HasRDTSC() bool {\n\treturn C.SDL_HasRDTSC() > 0\n}\n\n\/\/ HasAltiVec (https:\/\/wiki.libsdl.org\/SDL_HasAltiVec)\nfunc HasAltiVec() bool {\n\treturn C.SDL_HasAltiVec() > 0\n}\n\n\/\/ HasMMX (https:\/\/wiki.libsdl.org\/SDL_HasMMX)\nfunc HasMMX() bool {\n\treturn C.SDL_HasMMX() > 0\n}\n\n\/\/ Has3DNow (https:\/\/wiki.libsdl.org\/SDL_Has3DNow)\nfunc Has3DNow() bool {\n\treturn C.SDL_Has3DNow() > 0\n}\n\n\/\/ HasSSE (https:\/\/wiki.libsdl.org\/SDL_HasSSE)\nfunc HasSSE() bool {\n\treturn C.SDL_HasSSE() > 0\n}\n\n\/\/ HasSSE2 (https:\/\/wiki.libsdl.org\/SDL_HasSSE2)\nfunc HasSSE2() bool {\n\treturn C.SDL_HasSSE2() > 0\n}\n\n\/\/ HasSSE3 (https:\/\/wiki.libsdl.org\/SDL_HasSSE3)\nfunc HasSSE3() bool {\n\treturn C.SDL_HasSSE3() > 0\n}\n\n\/\/ HasSSE41 (https:\/\/wiki.libsdl.org\/SDL_HasSSE41)\nfunc HasSSE41() bool {\n\treturn C.SDL_HasSSE41() > 0\n}\n\n\/\/ HasSSE42 (https:\/\/wiki.libsdl.org\/SDL_HasSSE42)\nfunc HasSSE42() bool {\n\treturn C.SDL_HasSSE42() > 0\n}\n\n\/\/ GetSystemRAM (https:\/\/wiki.libsdl.org\/SDL_GetSystemRAM)\nfunc GetSystemRAM() int {\n\treturn int(C.SDL_GetSystemRAM())\n}\n\nfunc HasAVX() bool {\n\treturn C.SDL_HasAVX() > 0\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2017, 2018 Damon Revoe. All rights reserved.\n\/\/ Use of this source code is governed by the MIT\n\/\/ license, which can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ configureEnv is a cache of environment variables that is\n\/\/ reused when configuring multiple packages.\ntype configureEnv struct {\n\tenvSansPkgConfigPath []string\n\torigPkgConfigPath string \/\/ original value of PKG_CONFIG_PATH\n\tbuildDir string\n\tpkgBuildDir map[string]string\n}\n\nconst pkgConfigPathVarName = \"PKG_CONFIG_PATH\"\n\nfunc dropEnvVar(env []string, i int) []string {\n\tif i < len(env)-1 {\n\t\tenv[i] = env[len(env)-1]\n\t}\n\treturn env[:len(env)-1]\n}\n\nfunc prepareConfigureEnv(buildDir string) *configureEnv {\n\tce := &configureEnv{\n\t\tos.Environ(),\n\t\t\"\",\n\t\tbuildDir,\n\t\tmap[string]string{}}\n\n\tfor i, v := range ce.envSansPkgConfigPath {\n\t\tif strings.HasPrefix(v, pkgConfigPathVarName+\"=\") {\n\t\t\tce.envSansPkgConfigPath = dropEnvVar(\n\t\t\t\tce.envSansPkgConfigPath, i)\n\t\t\tce.origPkgConfigPath = strings.SplitN(v, \"=\", 2)[1]\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn ce\n}\n\nfunc (ce *configureEnv) addPackageBuildDir(pkgName string) {\n\tce.pkgBuildDir[pkgName] = path.Join(ce.buildDir, pkgName)\n}\n\nfunc (ce *configureEnv) makeEnv(pd *packageDefinition) []string {\n\tvar configuredPackagePathames []string\n\n\tfor _, dep := range pd.allRequired {\n\t\tdepBuildDir, found := ce.pkgBuildDir[dep.PackageName]\n\t\tif found {\n\t\t\tconfiguredPackagePathames = append(\n\t\t\t\tconfiguredPackagePathames, depBuildDir)\n\t\t}\n\t}\n\n\tpkgConfigPath := strings.Join(configuredPackagePathames, \":\")\n\n\tif len(pkgConfigPath) == 0 {\n\t\tif len(ce.origPkgConfigPath) == 0 {\n\t\t\treturn ce.envSansPkgConfigPath\n\t\t}\n\t\tpkgConfigPath = ce.origPkgConfigPath\n\t} else if len(ce.origPkgConfigPath) > 0 {\n\t\tpkgConfigPath += \":\"\n\t\tpkgConfigPath += ce.origPkgConfigPath\n\t}\n\n\treturn append(ce.envSansPkgConfigPath,\n\t\tpkgConfigPathVarName+\"=\"+pkgConfigPath)\n}\n\nfunc configurePackage(installDir, pkgRootDir string, pd *packageDefinition,\n\tcfgEnv *configureEnv, conftab *Conftab) error {\n\tfmt.Println(\"[configure] \" + pd.PackageName)\n\n\tpkgBuildDir := path.Join(cfgEnv.buildDir, pd.PackageName)\n\n\tconfigurePathname, err := filepath.Rel(pkgBuildDir,\n\t\tpath.Join(pkgRootDir, pd.PackageName, \"configure\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = os.MkdirAll(pkgBuildDir, os.FileMode(0775))\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tconfigureArgs := conftab.getConfigureArgs(pd.PackageName)\n\tconfigureArgs = append(configureArgs, \"--quiet\", \"--prefix=\"+installDir)\n\n\tconfigureCmd := exec.Command(configurePathname, configureArgs...)\n\tconfigureCmd.Dir = pkgBuildDir\n\tconfigureCmd.Stdout = os.Stdout\n\tconfigureCmd.Stderr = os.Stderr\n\tconfigureCmd.Env = cfgEnv.makeEnv(pd)\n\tif err := configureCmd.Run(); err != nil {\n\t\treturn errors.New(configurePathname + \": \" + err.Error())\n\t}\n\n\treturn nil\n}\n\nfunc configurePackages(args []string) error {\n\tws, err := loadWorkspace()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpi, err := readPackageDefinitions(ws.wp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbuildDir := ws.buildDir()\n\n\tcfgEnv := prepareConfigureEnv(buildDir)\n\n\tif configuredPackageDirs, err := ioutil.ReadDir(buildDir); err == nil {\n\t\t\/\/ Register packages that already exist\n\t\t\/\/ in the build directory.\n\t\tfor _, dir := range configuredPackageDirs {\n\t\t\tcfgEnv.addPackageBuildDir(dir.Name())\n\t\t}\n\t}\n\n\tvar selection packageDefinitionList\n\n\tif len(args) > 0 {\n\t\tselection, err = packageRangesToFlatSelection(pi, args)\n\t} else {\n\t\tselection, err = readPackageSelection(pi, ws.absPrivateDir)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, pd := range selection {\n\t\tcfgEnv.addPackageBuildDir(pd.PackageName)\n\t}\n\n\tconftab, err := readConftab(\n\t\tpath.Join(ws.absPrivateDir, conftabFilename))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinstallDir := ws.installDir()\n\tpkgRootDir := ws.generatedPkgRootDir()\n\n\tfor _, pd := range selection {\n\t\terr := configurePackage(installDir, pkgRootDir, pd,\n\t\t\tcfgEnv, conftab)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ configureCmd represents the configure command\nvar configureCmd = &cobra.Command{\n\tUse: \"configure [package_range...]\",\n\tShort: \"Configure all selected packages \" +\n\t\t\"or the specified package range\",\n\tRun: func(_ *cobra.Command, args []string) {\n\t\tif err := configurePackages(args); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t},\n}\n\nfunc init() {\n\trootCmd.AddCommand(configureCmd)\n\n\tconfigureCmd.Flags().SortFlags = false\n\taddQuietFlag(configureCmd)\n\taddWorkspaceDirFlag(configureCmd)\n}\n<commit_msg>Use absolute pathname for configure script<commit_after>\/\/ Copyright (C) 2017, 2018 Damon Revoe. All rights reserved.\n\/\/ Use of this source code is governed by the MIT\n\/\/ license, which can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"strings\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ configureEnv is a cache of environment variables that is\n\/\/ reused when configuring multiple packages.\ntype configureEnv struct {\n\tenvSansPkgConfigPath []string\n\torigPkgConfigPath string \/\/ original value of PKG_CONFIG_PATH\n\tbuildDir string\n\tpkgBuildDir map[string]string\n}\n\nconst pkgConfigPathVarName = \"PKG_CONFIG_PATH\"\n\nfunc dropEnvVar(env []string, i int) []string {\n\tif i < len(env)-1 {\n\t\tenv[i] = env[len(env)-1]\n\t}\n\treturn env[:len(env)-1]\n}\n\nfunc prepareConfigureEnv(buildDir string) *configureEnv {\n\tce := &configureEnv{\n\t\tos.Environ(),\n\t\t\"\",\n\t\tbuildDir,\n\t\tmap[string]string{}}\n\n\tfor i, v := range ce.envSansPkgConfigPath {\n\t\tif strings.HasPrefix(v, pkgConfigPathVarName+\"=\") {\n\t\t\tce.envSansPkgConfigPath = dropEnvVar(\n\t\t\t\tce.envSansPkgConfigPath, i)\n\t\t\tce.origPkgConfigPath = strings.SplitN(v, \"=\", 2)[1]\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn ce\n}\n\nfunc (ce *configureEnv) addPackageBuildDir(pkgName string) {\n\tce.pkgBuildDir[pkgName] = path.Join(ce.buildDir, pkgName)\n}\n\nfunc (ce *configureEnv) makeEnv(pd *packageDefinition) []string {\n\tvar configuredPackagePathames []string\n\n\tfor _, dep := range pd.allRequired {\n\t\tdepBuildDir, found := ce.pkgBuildDir[dep.PackageName]\n\t\tif found {\n\t\t\tconfiguredPackagePathames = append(\n\t\t\t\tconfiguredPackagePathames, depBuildDir)\n\t\t}\n\t}\n\n\tpkgConfigPath := strings.Join(configuredPackagePathames, \":\")\n\n\tif len(pkgConfigPath) == 0 {\n\t\tif len(ce.origPkgConfigPath) == 0 {\n\t\t\treturn ce.envSansPkgConfigPath\n\t\t}\n\t\tpkgConfigPath = ce.origPkgConfigPath\n\t} else if len(ce.origPkgConfigPath) > 0 {\n\t\tpkgConfigPath += \":\"\n\t\tpkgConfigPath += ce.origPkgConfigPath\n\t}\n\n\treturn append(ce.envSansPkgConfigPath,\n\t\tpkgConfigPathVarName+\"=\"+pkgConfigPath)\n}\n\nfunc configurePackage(installDir, pkgRootDir string, pd *packageDefinition,\n\tcfgEnv *configureEnv, conftab *Conftab) error {\n\tfmt.Println(\"[configure] \" + pd.PackageName)\n\n\tconfigurePathname := path.Join(pkgRootDir, pd.PackageName, \"configure\")\n\n\tpkgBuildDir := path.Join(cfgEnv.buildDir, pd.PackageName)\n\n\terr := os.MkdirAll(pkgBuildDir, os.FileMode(0775))\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\tconfigureArgs := conftab.getConfigureArgs(pd.PackageName)\n\tconfigureArgs = append(configureArgs, \"--quiet\", \"--prefix=\"+installDir)\n\n\tconfigureCmd := exec.Command(configurePathname, configureArgs...)\n\tconfigureCmd.Dir = pkgBuildDir\n\tconfigureCmd.Stdout = os.Stdout\n\tconfigureCmd.Stderr = os.Stderr\n\tconfigureCmd.Env = cfgEnv.makeEnv(pd)\n\tif err := configureCmd.Run(); err != nil {\n\t\treturn errors.New(configurePathname + \": \" + err.Error())\n\t}\n\n\treturn nil\n}\n\nfunc configurePackages(args []string) error {\n\tws, err := loadWorkspace()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpi, err := readPackageDefinitions(ws.wp)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbuildDir := ws.buildDir()\n\n\tcfgEnv := prepareConfigureEnv(buildDir)\n\n\tif configuredPackageDirs, err := ioutil.ReadDir(buildDir); err == nil {\n\t\t\/\/ Register packages that already exist\n\t\t\/\/ in the build directory.\n\t\tfor _, dir := range configuredPackageDirs {\n\t\t\tcfgEnv.addPackageBuildDir(dir.Name())\n\t\t}\n\t}\n\n\tvar selection packageDefinitionList\n\n\tif len(args) > 0 {\n\t\tselection, err = packageRangesToFlatSelection(pi, args)\n\t} else {\n\t\tselection, err = readPackageSelection(pi, ws.absPrivateDir)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, pd := range selection {\n\t\tcfgEnv.addPackageBuildDir(pd.PackageName)\n\t}\n\n\tconftab, err := readConftab(\n\t\tpath.Join(ws.absPrivateDir, conftabFilename))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinstallDir := ws.installDir()\n\tpkgRootDir := ws.generatedPkgRootDir()\n\n\tfor _, pd := range selection {\n\t\terr := configurePackage(installDir, pkgRootDir, pd,\n\t\t\tcfgEnv, conftab)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ configureCmd represents the configure command\nvar configureCmd = &cobra.Command{\n\tUse: \"configure [package_range...]\",\n\tShort: \"Configure all selected packages \" +\n\t\t\"or the specified package range\",\n\tRun: func(_ *cobra.Command, args []string) {\n\t\tif err := configurePackages(args); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t},\n}\n\nfunc init() {\n\trootCmd.AddCommand(configureCmd)\n\n\tconfigureCmd.Flags().SortFlags = false\n\taddQuietFlag(configureCmd)\n\taddWorkspaceDirFlag(configureCmd)\n}\n<|endoftext|>"} {"text":"<commit_before>package goftp\n\nimport (\n\t\"bufio\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar REGEX_PWD_PATH *regexp.Regexp = regexp.MustCompile(`\\\"(.*)\\\"`)\n\ntype FTP struct {\n\tconn net.Conn\n\n\taddr string\n\n\tdebug bool\n\ttlsconfig *tls.Config\n\n\treader *bufio.Reader\n\twriter *bufio.Writer\n}\n\nfunc (ftp *FTP) Close() {\n\tftp.conn.Close()\n}\n\ntype WalkFunc func(path string, info os.FileMode, err error) error\ntype RetrFunc func(r io.Reader) error\n\nfunc parseLine(line string) (perm string, t string, filename string) {\n\tfor _, v := range strings.Split(line, \";\") {\n\t\tv2 := strings.Split(v, \"=\")\n\n\t\tswitch v2[0] {\n\t\tcase \"perm\":\n\t\t\tperm = v2[1]\n\t\tcase \"type\":\n\t\t\tt = v2[1]\n\t\tdefault:\n\t\t\tfilename = v[1 : len(v)-2]\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ walks recursively through path and call walkfunc for each file\nfunc (ftp *FTP) Walk(path string, walkFn WalkFunc) (err error) {\n\t\/*\n\t\tif err = walkFn(path, os.ModeDir, nil); err != nil {\n\t\t\tif err == filepath.SkipDir {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t*\/\n\tif ftp.debug {\n\t\tlog.Printf(\"Walking: '%s'\\n\", path)\n\t}\n\n\tvar lines []string\n\n\tif lines, err = ftp.List(path); err != nil {\n\t\treturn\n\t}\n\n\tfor _, line := range lines {\n\t\t_, t, subpath := parseLine(line)\n\n\t\tswitch t {\n\t\tcase \"dir\":\n\t\t\tif subpath == \".\" {\n\t\t\t} else if subpath == \"..\" {\n\t\t\t} else {\n\t\t\t\tif err = ftp.Walk(path+subpath+\"\/\", walkFn); err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"file\":\n\t\t\tif err = walkFn(path+subpath, os.FileMode(0), nil); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ send quit to the server and close the connection\nfunc (ftp *FTP) Quit() (err error) {\n\tif _, err := ftp.cmd(\"221\", \"QUIT\"); err != nil {\n\t\treturn err\n\t}\n\n\tftp.conn.Close()\n\tftp.conn = nil\n\n\treturn nil\n}\n\n\/\/ will send a NOOP (no operation) to the server\nfunc (ftp *FTP) Noop() (err error) {\n\t_, err = ftp.cmd(\"200\", \"NOOP\")\n\treturn\n}\n\n\/\/ private function to send command and compare return code with expects\nfunc (ftp *FTP) cmd(expects string, command string, args ...interface{}) (line string, err error) {\n\tif err = ftp.send(command, args...); err != nil {\n\t\treturn\n\t}\n\n\tif line, err = ftp.receive(); err != nil {\n\t\treturn\n\t}\n\n\tif !strings.HasPrefix(line, expects) {\n\t\terr = errors.New(line)\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ rename file\nfunc (ftp *FTP) Rename(from string, to string) (err error) {\n\tif _, err = ftp.cmd(\"350\", \"RNFR %s\", from); err != nil {\n\t\treturn\n\t}\n\n\tif _, err = ftp.cmd(\"250\", \"RNTO %s\", to); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ make directory\nfunc (ftp *FTP) Mkd(path string) error {\n\t_, err := ftp.cmd(\"257\", \"MKD %s\", path)\n\treturn err\n}\n\n\/\/ get current path\nfunc (ftp *FTP) Pwd() (path string, err error) {\n\tvar line string\n\tif line, err = ftp.cmd(\"257\", \"PWD\"); err != nil {\n\t\treturn\n\t}\n\n\tres := REGEX_PWD_PATH.FindAllStringSubmatch(line[4:], -1)\n\n\tpath = res[0][1]\n\treturn\n}\n\n\/\/ change current path\nfunc (ftp *FTP) Cwd(path string) (err error) {\n\t_, err = ftp.cmd(\"250\", \"CWD %s\", path)\n\treturn\n}\n\n\/\/ delete file\nfunc (ftp *FTP) Dele(path string) (err error) {\n\tif err = ftp.send(\"DELE %s\", path); err != nil {\n\t\treturn\n\t}\n\n\tvar line string\n\tif line, err = ftp.receive(); err != nil {\n\t\treturn\n\t}\n\n\tif !strings.HasPrefix(line, \"250\") {\n\t\treturn errors.New(line)\n\t}\n\n\treturn\n}\n\n\/\/ secures the ftp connection by using TLS\nfunc (ftp *FTP) AuthTLS(config tls.Config) error {\n\tif _, err := ftp.cmd(\"234\", \"AUTH TLS\"); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ wrap tls on existing connection\n\tftp.tlsconfig = &config\n\n\tftp.conn = tls.Client(ftp.conn, &config)\n\tftp.writer = bufio.NewWriter(ftp.conn)\n\tftp.reader = bufio.NewReader(ftp.conn)\n\n\tif _, err := ftp.cmd(\"200\", \"PBSZ 0\"); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := ftp.cmd(\"200\", \"PROT P\"); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\nfunc (ftp *FTP) ReadAndDiscard() (int, error) {\n\tvar i int\n\tvar err error\n\tbuffer_size:=ftp.reader.Buffered()\n\tfor i = 0; i < buffer_size; i++ {\t\t\n\t\tif _, err=ftp.reader.ReadByte(); err != nil {\n\t\t\treturn i,err\n\t\t}\n }\n\treturn i,err\n}\n\n\/\/ change transfer type\nfunc (ftp *FTP) Type(t string) error {\n\t_, err := ftp.cmd(\"200\", \"TYPE %s\", t)\n\treturn err\n}\n\nfunc (ftp *FTP) receiveLine() (string, error) {\n\tline, err := ftp.reader.ReadString('\\n')\n\n\tif ftp.debug {\n\t\tlog.Printf(\"< %s\", line)\n\t}\n\n\treturn line, err\n}\n\nfunc (ftp *FTP) receive() (string, error) {\n\tline, err := ftp.receiveLine()\n\n\tif err != nil {\n\t\treturn line, err\n\t}\n\n\tif (len(line) >= 4) && (line[3] == '-') {\n\t\tnextLine := \"\"\n\t\t\/\/ This is a continuation of output line\n\t\tnextLine, err = ftp.receive()\n\t\tline = line + nextLine\n\t}\n\n\treturn line, err\n}\n\nfunc (ftp *FTP) send(command string, arguments ...interface{}) error {\n\tif ftp.debug {\n\t\tlog.Printf(\"> %s\", fmt.Sprintf(command, arguments...))\n\t}\n\n\tcommand = fmt.Sprintf(command, arguments...)\n\tcommand += \"\\r\\n\"\n\n\tif _, err := ftp.writer.WriteString(command); err != nil {\n\t\treturn err\n\t}\n\n\tif err := ftp.writer.Flush(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ enables passive data connection and returns port number\nfunc (ftp *FTP) Pasv() (port int, err error) {\n\tvar line string\n\tif line, err = ftp.cmd(\"227\", \"PASV\"); err != nil {\n\t\treturn\n\t}\n\n\tre, err := regexp.Compile(`\\((.*)\\)`)\n\n\tres := re.FindAllStringSubmatch(line, -1)\n\n\ts := strings.Split(res[0][1], \",\")\n\n\tl1, _ := strconv.Atoi(s[len(s)-2])\n\tl2, _ := strconv.Atoi(s[len(s)-1])\n\n\tport = l1<<8 + l2\n\n\treturn\n}\n\n\/\/ open new data connection\nfunc (ftp *FTP) newConnection(port int) (conn net.Conn, err error) {\n\taddr := fmt.Sprintf(\"%s:%d\", strings.Split(ftp.addr, \":\")[0], port)\n\n\tif ftp.debug {\n\t\tlog.Printf(\"Connecting to %s\\n\", addr)\n\t}\n\n\tif conn, err = net.Dial(\"tcp\", addr); err != nil {\n\t\treturn\n\t}\n\n\tif ftp.tlsconfig != nil {\n\t\tconn = tls.Client(conn, ftp.tlsconfig)\n\t}\n\n\treturn\n}\n\n\/\/ upload file\nfunc (ftp *FTP) Stor(path string, r io.Reader) (err error) {\n\tif err = ftp.Type(\"I\"); err != nil {\n\t\treturn\n\t}\n\n\tvar port int\n\tif port, err = ftp.Pasv(); err != nil {\n\t\treturn\n\t}\n\n\tif err = ftp.send(\"STOR %s\", path); err != nil {\n\t\treturn\n\t}\n\n\tvar pconn net.Conn\n\tif pconn, err = ftp.newConnection(port); err != nil {\n\t\treturn\n\t}\n\n\tvar line string\n\tif line, err = ftp.receive(); err != nil {\n\t\treturn\n\t}\n\n\tif !strings.HasPrefix(line, \"150\") {\n\t\terr = errors.New(line)\n\t\treturn\n\t}\n\n\tif _, err = io.Copy(pconn, r); err != nil {\n\t\treturn\n\t}\n\n\tpconn.Close()\n\n\tif line, err = ftp.receive(); err != nil {\n\t\treturn\n\t}\n\n\tif !strings.HasPrefix(line, \"226\") {\n\t\terr = errors.New(line)\n\t\treturn\n\t}\n\n\treturn\n\n}\n\n\/\/ retrieves file\nfunc (ftp *FTP) Retr(path string, retrFn RetrFunc) (s string, err error) {\n\tif err = ftp.Type(\"I\"); err != nil {\n\t\treturn\n\t}\n\n\tvar port int\n\tif port, err = ftp.Pasv(); err != nil {\n\t\treturn\n\t}\n\n\tif err = ftp.send(\"RETR %s\", path); err != nil {\n\t\treturn\n\t}\n\n\tvar pconn net.Conn\n\tif pconn, err = ftp.newConnection(port); err != nil {\n\t\treturn\n\t}\n\n\tvar line string\n\tif line, err = ftp.receive(); err != nil {\n\t\treturn\n\t}\n\n\tif !strings.HasPrefix(line, \"150\") {\n\t\terr = errors.New(line)\n\t\treturn\n\t}\n\n\tif err = retrFn(pconn); err != nil {\n\t\treturn\n\t}\n\n\tpconn.Close()\n\n\tif line, err = ftp.receive(); err != nil {\n\t\treturn\n\t}\n\n\tif !strings.HasPrefix(line, \"226\") {\n\t\terr = errors.New(line)\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ list the path (or current directory)\nfunc (ftp *FTP) List(path string) (files []string, err error) {\n\tif err = ftp.Type(\"A\"); err != nil {\n\t\treturn\n\t}\n\n\tvar port int\n\tif port, err = ftp.Pasv(); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ check if MLSD works\n\tif err = ftp.send(\"MLSD %s\", path); err != nil {\n\t}\n\n\tvar pconn net.Conn\n\tif pconn, err = ftp.newConnection(port); err != nil {\n\t\treturn\n\t}\n\n\tvar line string\n\tif line, err = ftp.receive(); err != nil {\n\t\treturn\n\t}\n\n\tif !strings.HasPrefix(line, \"150\") {\n\t\t\/\/ MLSD failed, lets try LIST\n\t\tif err = ftp.send(\"LIST %s\", path); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif line, err = ftp.receive(); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif !strings.HasPrefix(line, \"150\") {\n\t\t\t\/\/ Really list is not working here\n\t\t\terr = errors.New(line)\n\t\t\treturn\n\t\t}\n\t}\n\n\treader := bufio.NewReader(pconn)\n\n\tfor {\n\t\tline, err = reader.ReadString('\\n')\n\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tfiles = append(files, string(line))\n\t}\n\n\tpconn.Close()\n\n\tif line, err = ftp.receive(); err != nil {\n\t\treturn\n\t}\n\n\tif !strings.HasPrefix(line, \"226\") {\n\t\terr = errors.New(line)\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ login to the server\nfunc (ftp *FTP) Login(username string, password string) (err error) {\n\tif _, err = ftp.cmd(\"331\", \"USER %s\", username); err != nil {\n\t\tif strings.HasPrefix(err.Error(), \"230\") {\n\t\t\t\/\/ Ok, probably anonymous server\n\t\t\t\/\/ but login was fine, so return no error\n\t\t\terr = nil\n\t\t} else {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif _, err = ftp.cmd(\"230\", \"PASS %s\", password); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ connect to server, debug is OFF\nfunc Connect(addr string) (*FTP, error) {\n\tvar err error\n\tvar conn net.Conn\n\n\tif conn, err = net.Dial(\"tcp\", addr); err != nil {\n\t\treturn nil, err\n\t}\n\n\twriter := bufio.NewWriter(conn)\n\treader := bufio.NewReader(conn)\n\n\treader.ReadString('\\n')\n\n\treturn &FTP{conn: conn, addr: addr, reader: reader, writer: writer, debug: false}, nil\n}\n\n\/\/ connect to server, debug is ON\nfunc ConnectDbg(addr string) (*FTP, error) {\n\tvar err error\n\tvar conn net.Conn\n\n\tif conn, err = net.Dial(\"tcp\", addr); err != nil {\n\t\treturn nil, err\n\t}\n\n\twriter := bufio.NewWriter(conn)\n\treader := bufio.NewReader(conn)\n\n\tvar line string\n\n\tline, err = reader.ReadString('\\n')\n\n\tlog.Print(line)\n\n\treturn &FTP{conn: conn, addr: addr, reader: reader, writer: writer, debug: true}, nil\n}\n<commit_msg>still not work<commit_after>package goftp\n\nimport (\n\t\"bufio\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar REGEX_PWD_PATH *regexp.Regexp = regexp.MustCompile(`\\\"(.*)\\\"`)\n\ntype FTP struct {\n\tconn net.Conn\n\n\taddr string\n\n\tdebug bool\n\ttlsconfig *tls.Config\n\n\treader *bufio.Reader\n\twriter *bufio.Writer\n}\n\nfunc (ftp *FTP) Close() {\n\tftp.conn.Close()\n}\n\ntype WalkFunc func(path string, info os.FileMode, err error) error\ntype RetrFunc func(r io.Reader) error\n\nfunc parseLine(line string) (perm string, t string, filename string) {\n\tfor _, v := range strings.Split(line, \";\") {\n\t\tv2 := strings.Split(v, \"=\")\n\n\t\tswitch v2[0] {\n\t\tcase \"perm\":\n\t\t\tperm = v2[1]\n\t\tcase \"type\":\n\t\t\tt = v2[1]\n\t\tdefault:\n\t\t\tfilename = v[1 : len(v)-2]\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ walks recursively through path and call walkfunc for each file\nfunc (ftp *FTP) Walk(path string, walkFn WalkFunc) (err error) {\n\t\/*\n\t\tif err = walkFn(path, os.ModeDir, nil); err != nil {\n\t\t\tif err == filepath.SkipDir {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t*\/\n\tif ftp.debug {\n\t\tlog.Printf(\"Walking: '%s'\\n\", path)\n\t}\n\n\tvar lines []string\n\n\tif lines, err = ftp.List(path); err != nil {\n\t\treturn\n\t}\n\n\tfor _, line := range lines {\n\t\t_, t, subpath := parseLine(line)\n\n\t\tswitch t {\n\t\tcase \"dir\":\n\t\t\tif subpath == \".\" {\n\t\t\t} else if subpath == \"..\" {\n\t\t\t} else {\n\t\t\t\tif err = ftp.Walk(path+subpath+\"\/\", walkFn); err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"file\":\n\t\t\tif err = walkFn(path+subpath, os.FileMode(0), nil); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ send quit to the server and close the connection\nfunc (ftp *FTP) Quit() (err error) {\n\tif _, err := ftp.cmd(\"221\", \"QUIT\"); err != nil {\n\t\treturn err\n\t}\n\n\tftp.conn.Close()\n\tftp.conn = nil\n\n\treturn nil\n}\n\n\/\/ will send a NOOP (no operation) to the server\nfunc (ftp *FTP) Noop() (err error) {\n\t_, err = ftp.cmd(\"200\", \"NOOP\")\n\treturn\n}\n\n\/\/ private function to send command and compare return code with expects\nfunc (ftp *FTP) cmd(expects string, command string, args ...interface{}) (line string, err error) {\n\tif err = ftp.send(command, args...); err != nil {\n\t\treturn\n\t}\n\n\tif line, err = ftp.receive(); err != nil {\n\t\treturn\n\t}\n\n\tif !strings.HasPrefix(line, expects) {\n\t\terr = errors.New(line)\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ rename file\nfunc (ftp *FTP) Rename(from string, to string) (err error) {\n\tif _, err = ftp.cmd(\"350\", \"RNFR %s\", from); err != nil {\n\t\treturn\n\t}\n\n\tif _, err = ftp.cmd(\"250\", \"RNTO %s\", to); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ make directory\nfunc (ftp *FTP) Mkd(path string) error {\n\t_, err := ftp.cmd(\"257\", \"MKD %s\", path)\n\treturn err\n}\n\n\/\/ get current path\nfunc (ftp *FTP) Pwd() (path string, err error) {\n\tvar line string\n\tif line, err = ftp.cmd(\"257\", \"PWD\"); err != nil {\n\t\treturn\n\t}\n\n\tres := REGEX_PWD_PATH.FindAllStringSubmatch(line[4:], -1)\n\n\tpath = res[0][1]\n\treturn\n}\n\n\/\/ change current path\nfunc (ftp *FTP) Cwd(path string) (err error) {\n\t_, err = ftp.cmd(\"250\", \"CWD %s\", path)\n\treturn\n}\n\n\/\/ delete file\nfunc (ftp *FTP) Dele(path string) (err error) {\n\tif err = ftp.send(\"DELE %s\", path); err != nil {\n\t\treturn\n\t}\n\n\tvar line string\n\tif line, err = ftp.receive(); err != nil {\n\t\treturn\n\t}\n\n\tif !strings.HasPrefix(line, \"250\") {\n\t\treturn errors.New(line)\n\t}\n\n\treturn\n}\n\n\/\/ secures the ftp connection by using TLS\nfunc (ftp *FTP) AuthTLS(config tls.Config) error {\n\tif _, err := ftp.cmd(\"234\", \"AUTH TLS\"); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ wrap tls on existing connection\n\tftp.tlsconfig = &config\n\n\tftp.conn = tls.Client(ftp.conn, &config)\n\tftp.writer = bufio.NewWriter(ftp.conn)\n\tftp.reader = bufio.NewReader(ftp.conn)\n\n\tif _, err := ftp.cmd(\"200\", \"PBSZ 0\"); err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := ftp.cmd(\"200\", \"PROT P\"); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ read all the buffered bytes and return\nfunc (ftp *FTP) ReadAndDiscard() (int, error) {\n\tfmt.Println(\"svuoto\")\n\tvar i int\n\tvar err error\n\tbuffer_size := ftp.reader.Buffered()\n\tfor i = 0; i < buffer_size; i++ {\n\t\tif _, err = ftp.reader.ReadByte(); err != nil {\n\t\t\treturn i, err\n\t\t}\n\t}\n\treturn i, err\n}\n\n\/\/ change transfer type\nfunc (ftp *FTP) Type(t string) error {\n\t_, err := ftp.cmd(\"200\", \"TYPE %s\", t)\n\treturn err\n}\n\nfunc (ftp *FTP) receiveLine() (string, error) {\n\tline, err := ftp.reader.ReadString('\\n')\n\n\tif ftp.debug {\n\t\tlog.Printf(\"< %s\", line)\n\t}\n\n\treturn line, err\n}\n\nfunc (ftp *FTP) receive() (string, error) {\n\tline, err := ftp.receiveLine()\n\n\tif err != nil {\n\t\treturn line, err\n\t}\n\n\tif (len(line) >= 4) && (line[3] == '-') {\n\t\tnextLine := \"\"\n\t\t\/\/ This is a continuation of output line\n\t\tnextLine, err = ftp.receive()\n\t\tline = line + nextLine\n\t}\n\n\treturn line, err\n}\n\nfunc (ftp *FTP) send(command string, arguments ...interface{}) error {\n\tif ftp.debug {\n\t\tlog.Printf(\"> %s\", fmt.Sprintf(command, arguments...))\n\t}\n\n\tcommand = fmt.Sprintf(command, arguments...)\n\tcommand += \"\\r\\n\"\n\n\tif _, err := ftp.writer.WriteString(command); err != nil {\n\t\treturn err\n\t}\n\n\tif err := ftp.writer.Flush(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ enables passive data connection and returns port number\nfunc (ftp *FTP) Pasv() (port int, err error) {\n\tvar line string\n\tif line, err = ftp.cmd(\"227\", \"PASV\"); err != nil {\n\t\treturn\n\t}\n\n\tre, err := regexp.Compile(`\\((.*)\\)`)\n\n\tres := re.FindAllStringSubmatch(line, -1)\n\n\ts := strings.Split(res[0][1], \",\")\n\n\tl1, _ := strconv.Atoi(s[len(s)-2])\n\tl2, _ := strconv.Atoi(s[len(s)-1])\n\n\tport = l1<<8 + l2\n\n\treturn\n}\n\n\/\/ open new data connection\nfunc (ftp *FTP) newConnection(port int) (conn net.Conn, err error) {\n\taddr := fmt.Sprintf(\"%s:%d\", strings.Split(ftp.addr, \":\")[0], port)\n\n\tif ftp.debug {\n\t\tlog.Printf(\"Connecting to %s\\n\", addr)\n\t}\n\n\tif conn, err = net.Dial(\"tcp\", addr); err != nil {\n\t\treturn\n\t}\n\n\tif ftp.tlsconfig != nil {\n\t\tconn = tls.Client(conn, ftp.tlsconfig)\n\t}\n\n\treturn\n}\n\n\/\/ upload file\nfunc (ftp *FTP) Stor(path string, r io.Reader) (err error) {\n\tif err = ftp.Type(\"I\"); err != nil {\n\t\treturn\n\t}\n\n\tvar port int\n\tif port, err = ftp.Pasv(); err != nil {\n\t\treturn\n\t}\n\n\tif err = ftp.send(\"STOR %s\", path); err != nil {\n\t\treturn\n\t}\n\n\tvar pconn net.Conn\n\tif pconn, err = ftp.newConnection(port); err != nil {\n\t\treturn\n\t}\n\n\tvar line string\n\tif line, err = ftp.receive(); err != nil {\n\t\treturn\n\t}\n\n\tif !strings.HasPrefix(line, \"150\") {\n\t\terr = errors.New(line)\n\t\treturn\n\t}\n\n\tif _, err = io.Copy(pconn, r); err != nil {\n\t\treturn\n\t}\n\n\tpconn.Close()\n\n\tif line, err = ftp.receive(); err != nil {\n\t\treturn\n\t}\n\n\tif !strings.HasPrefix(line, \"226\") {\n\t\terr = errors.New(line)\n\t\treturn\n\t}\n\n\treturn\n\n}\n\n\/\/ retrieves file\nfunc (ftp *FTP) Retr(path string, retrFn RetrFunc) (s string, err error) {\n\tif err = ftp.Type(\"I\"); err != nil {\n\t\treturn\n\t}\n\n\tvar port int\n\tif port, err = ftp.Pasv(); err != nil {\n\t\treturn\n\t}\n\n\tif err = ftp.send(\"RETR %s\", path); err != nil {\n\t\treturn\n\t}\n\n\tvar pconn net.Conn\n\tif pconn, err = ftp.newConnection(port); err != nil {\n\t\treturn\n\t}\n\n\tvar line string\n\tif line, err = ftp.receive(); err != nil {\n\t\treturn\n\t}\n\n\tif !strings.HasPrefix(line, \"150\") {\n\t\terr = errors.New(line)\n\t\treturn\n\t}\n\n\tif err = retrFn(pconn); err != nil {\n\t\treturn\n\t}\n\n\tpconn.Close()\n\n\tif line, err = ftp.receive(); err != nil {\n\t\treturn\n\t}\n\n\tif !strings.HasPrefix(line, \"226\") {\n\t\terr = errors.New(line)\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ list the path (or current directory)\nfunc (ftp *FTP) List(path string) (files []string, err error) {\n\tif err = ftp.Type(\"A\"); err != nil {\n\t\treturn\n\t}\n\n\tvar port int\n\tif port, err = ftp.Pasv(); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ check if MLSD works\n\tif err = ftp.send(\"MLSD %s\", path); err != nil {\n\t}\n\n\tvar pconn net.Conn\n\tif pconn, err = ftp.newConnection(port); err != nil {\n\t\treturn\n\t}\n\n\tvar line string\n\tif line, err = ftp.receive(); err != nil {\n\t\treturn\n\t}\n\n\tif !strings.HasPrefix(line, \"150\") {\n\t\t\/\/ MLSD failed, lets try LIST\n\t\tif err = ftp.send(\"LIST %s\", path); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif line, err = ftp.receive(); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif !strings.HasPrefix(line, \"150\") {\n\t\t\t\/\/ Really list is not working here\n\t\t\terr = errors.New(line)\n\t\t\treturn\n\t\t}\n\t}\n\n\treader := bufio.NewReader(pconn)\n\n\tfor {\n\t\tline, err = reader.ReadString('\\n')\n\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tfiles = append(files, string(line))\n\t}\n\n\tpconn.Close()\n\n\tif line, err = ftp.receive(); err != nil {\n\t\treturn\n\t}\n\n\tif !strings.HasPrefix(line, \"226\") {\n\t\terr = errors.New(line)\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ login on server with strange login behavior\nfunc (ftp *FTP) SmartLogin(username string, password string) (err error) {\n\t\/\/ Maybe the server has some useless words to say. Make him talk\n\terr = ftp.Noop()\n\tif err != nil && strings.HasPrefix(err.Error(), \"220\") {\n\t\t\/\/ Maybe with another Noop the server will ask us to login?\n\t\terr = ftp.Noop()\n\t\tif err != nil && strings.HasPrefix(err.Error(), \"530\") {\n\t\t\t\/\/ ok, let's login\n\n\t\t\t\/\/ftp.Login(username, password)\n\n\t\t\tif _, err = ftp.cmd(\"530\", \"USER %s\", username); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, err = ftp.cmd(\"230\", \"PASS %s\", password); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t}\n\t\/\/ Nothing strange... let's try a normal login\n\treturn ftp.Login(username, password)\n}\n\n\/\/ login to the server\nfunc (ftp *FTP) Login(username string, password string) (err error) {\n\tif _, err = ftp.cmd(\"331\", \"USER %s\", username); err != nil {\n\t\tif strings.HasPrefix(err.Error(), \"230\") {\n\t\t\t\/\/ Ok, probably anonymous server\n\t\t\t\/\/ but login was fine, so return no error\n\t\t\terr = nil\n\t\t} else {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif _, err = ftp.cmd(\"230\", \"PASS %s\", password); err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\n\/\/ connect to server, debug is OFF\nfunc Connect(addr string) (*FTP, error) {\n\tvar err error\n\tvar conn net.Conn\n\n\tif conn, err = net.Dial(\"tcp\", addr); err != nil {\n\t\treturn nil, err\n\t}\n\n\twriter := bufio.NewWriter(conn)\n\treader := bufio.NewReader(conn)\n\n\treader.ReadString('\\n')\n\n\treturn &FTP{conn: conn, addr: addr, reader: reader, writer: writer, debug: false}, nil\n}\n\n\/\/ connect to server, debug is ON\nfunc ConnectDbg(addr string) (*FTP, error) {\n\tvar err error\n\tvar conn net.Conn\n\n\tif conn, err = net.Dial(\"tcp\", addr); err != nil {\n\t\treturn nil, err\n\t}\n\n\twriter := bufio.NewWriter(conn)\n\treader := bufio.NewReader(conn)\n\n\tvar line string\n\n\tline, err = reader.ReadString('\\n')\n\n\tlog.Print(line)\n\n\treturn &FTP{conn: conn, addr: addr, reader: reader, writer: writer, debug: true}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nconst (\n\tansiEraseDisplay = \"\\033[2J\"\n\tansiResetCursor = \"\\033[H\"\n\tcarriageReturn = \"\\015\"\n\tdefaultPrompt = \">> \"\n)\n\nvar originalSttyState bytes.Buffer\nvar winRows uint16\nvar winCols uint16\n\ntype winsize struct {\n\trows, cols, xpixel, ypixel uint16\n}\n\nfunc getWinsize() winsize {\n\tws := winsize{}\n\tsyscall.Syscall(syscall.SYS_IOCTL,\n\t\tuintptr(0), uintptr(syscall.TIOCGWINSZ),\n\t\tuintptr(unsafe.Pointer(&ws)))\n\treturn ws\n}\n\n\/\/ TODO: This is wrong: stdin should be the TTY\nfunc getSttyState(state *bytes.Buffer) (err error) {\n\tcmd := exec.Command(\"stty\", \"-g\")\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = state\n\treturn cmd.Run()\n}\n\n\/\/ TODO: This is wrong: stdin and stdout should be the TTY\nfunc setSttyState(state *bytes.Buffer) (err error) {\n\tcmd := exec.Command(\"stty\", state.String())\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\treturn cmd.Run()\n}\n\nfunc NewTTY() (t *TTY, err error) {\n\tfh, err := os.OpenFile(\"\/dev\/tty\", os.O_RDWR, 0666)\n\tif err != nil {\n\t\treturn\n\t}\n\tt = &TTY{fh, defaultPrompt}\n\treturn\n}\n\ntype TTY struct {\n\t*os.File\n\tprompt string\n}\n\n\/\/ Clears the screen and sets the cursor to first row, first column\nfunc (t *TTY) resetScreen() {\n\tfmt.Fprint(t.File, ansiEraseDisplay+ansiResetCursor)\n}\n\n\/\/ Print prompt with `in`\nfunc (t *TTY) printPrompt(in []byte) {\n\tfmt.Fprintf(t.File, t.prompt+\"%s\", in)\n}\n\n\/\/ Positions the cursor after the prompt and `inlen` colums to the right\nfunc (t *TTY) cursorAfterPrompt(inlen int) {\n\tt.setCursorPos(0, len(t.prompt)+inlen)\n}\n\n\/\/ Sets the cursor to `line` and `col`\nfunc (t *TTY) setCursorPos(line int, col int) {\n\tfmt.Fprintf(t.File, \"\\033[%d;%dH\", line+1, col+1)\n}\n\nfunc init() {\n\tws := getWinsize()\n\twinRows = ws.rows\n\twinCols = ws.cols\n}\n\nfunc main() {\n\terr := getSttyState(&originalSttyState)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t\/\/ TODO: this needs to be run when the process is interrupted\n\tdefer setSttyState(&originalSttyState)\n\n\tsetSttyState(bytes.NewBufferString(\"cbreak\"))\n\tsetSttyState(bytes.NewBufferString(\"-echo\"))\n\n\tcmdTemplate := \"ag {{}}\"\n\tplaceholder := \"{{}}\"\n\n\ttty, err := NewTTY()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tprinter := NewPrinter(tty, int(winCols), int(winRows)-3)\n\n\trunner := &Runner{\n\t\tprinter: printer,\n\t\ttemplate: cmdTemplate,\n\t\tplaceholder: placeholder,\n\t\tbuf: new(bytes.Buffer),\n\t}\n\n\t\/\/ TODO: Clean this up. This is a mess.\n\tvar input []byte = make([]byte, 0)\n\tvar b []byte = make([]byte, 1)\n\n\tfor {\n\t\ttty.resetScreen()\n\t\ttty.printPrompt(input[:len(input)])\n\n\t\tif len(input) > 0 {\n\t\t\trunner.killCurrent()\n\n\t\t\tfmt.Fprintf(tty, \"\\n\")\n\n\t\t\tgo func() {\n\t\t\t\trunner.runWithInput(input[:len(input)])\n\t\t\t\ttty.cursorAfterPrompt(len(input))\n\t\t\t}()\n\t\t}\n\n\t\tos.Stdin.Read(b)\n\t\tswitch b[0] {\n\t\tcase 127:\n\t\t\t\/\/ Backspace\n\t\t\tif len(input) > 1 {\n\t\t\t\tinput = input[:len(input)-1]\n\t\t\t} else if len(input) == 1 {\n\t\t\t\tinput = nil\n\t\t\t}\n\t\tcase 4, 10, 13:\n\t\t\t\/\/ Ctrl-D, line feed, carriage return\n\t\t\trunner.writeCmdStdout(os.Stdout)\n\t\t\treturn\n\t\tdefault:\n\t\t\t\/\/ TODO: Default is wrong here. Only append printable characters to\n\t\t\t\/\/ input\n\t\t\tinput = append(input, b...)\n\t\t}\n\t}\n}\n<commit_msg>Reset screen before printing final output<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nconst (\n\tansiEraseDisplay = \"\\033[2J\"\n\tansiResetCursor = \"\\033[H\"\n\tcarriageReturn = \"\\015\"\n\tdefaultPrompt = \">> \"\n)\n\nvar originalSttyState bytes.Buffer\nvar winRows uint16\nvar winCols uint16\n\ntype winsize struct {\n\trows, cols, xpixel, ypixel uint16\n}\n\nfunc getWinsize() winsize {\n\tws := winsize{}\n\tsyscall.Syscall(syscall.SYS_IOCTL,\n\t\tuintptr(0), uintptr(syscall.TIOCGWINSZ),\n\t\tuintptr(unsafe.Pointer(&ws)))\n\treturn ws\n}\n\n\/\/ TODO: This is wrong: stdin should be the TTY\nfunc getSttyState(state *bytes.Buffer) (err error) {\n\tcmd := exec.Command(\"stty\", \"-g\")\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = state\n\treturn cmd.Run()\n}\n\n\/\/ TODO: This is wrong: stdin and stdout should be the TTY\nfunc setSttyState(state *bytes.Buffer) (err error) {\n\tcmd := exec.Command(\"stty\", state.String())\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\treturn cmd.Run()\n}\n\nfunc NewTTY() (t *TTY, err error) {\n\tfh, err := os.OpenFile(\"\/dev\/tty\", os.O_RDWR, 0666)\n\tif err != nil {\n\t\treturn\n\t}\n\tt = &TTY{fh, defaultPrompt}\n\treturn\n}\n\ntype TTY struct {\n\t*os.File\n\tprompt string\n}\n\n\/\/ Clears the screen and sets the cursor to first row, first column\nfunc (t *TTY) resetScreen() {\n\tfmt.Fprint(t.File, ansiEraseDisplay+ansiResetCursor)\n}\n\n\/\/ Print prompt with `in`\nfunc (t *TTY) printPrompt(in []byte) {\n\tfmt.Fprintf(t.File, t.prompt+\"%s\", in)\n}\n\n\/\/ Positions the cursor after the prompt and `inlen` colums to the right\nfunc (t *TTY) cursorAfterPrompt(inlen int) {\n\tt.setCursorPos(0, len(t.prompt)+inlen)\n}\n\n\/\/ Sets the cursor to `line` and `col`\nfunc (t *TTY) setCursorPos(line int, col int) {\n\tfmt.Fprintf(t.File, \"\\033[%d;%dH\", line+1, col+1)\n}\n\nfunc init() {\n\tws := getWinsize()\n\twinRows = ws.rows\n\twinCols = ws.cols\n}\n\nfunc main() {\n\terr := getSttyState(&originalSttyState)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t\/\/ TODO: this needs to be run when the process is interrupted\n\tdefer setSttyState(&originalSttyState)\n\n\tsetSttyState(bytes.NewBufferString(\"cbreak\"))\n\tsetSttyState(bytes.NewBufferString(\"-echo\"))\n\n\tcmdTemplate := \"ag {{}}\"\n\tplaceholder := \"{{}}\"\n\n\ttty, err := NewTTY()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tprinter := NewPrinter(tty, int(winCols), int(winRows)-3)\n\n\trunner := &Runner{\n\t\tprinter: printer,\n\t\ttemplate: cmdTemplate,\n\t\tplaceholder: placeholder,\n\t\tbuf: new(bytes.Buffer),\n\t}\n\n\t\/\/ TODO: Clean this up. This is a mess.\n\tvar input []byte = make([]byte, 0)\n\tvar b []byte = make([]byte, 1)\n\n\tfor {\n\t\ttty.resetScreen()\n\t\ttty.printPrompt(input[:len(input)])\n\n\t\tif len(input) > 0 {\n\t\t\trunner.killCurrent()\n\n\t\t\tfmt.Fprintf(tty, \"\\n\")\n\n\t\t\tgo func() {\n\t\t\t\trunner.runWithInput(input[:len(input)])\n\t\t\t\ttty.cursorAfterPrompt(len(input))\n\t\t\t}()\n\t\t}\n\n\t\tos.Stdin.Read(b)\n\t\tswitch b[0] {\n\t\tcase 127:\n\t\t\t\/\/ Backspace\n\t\t\tif len(input) > 1 {\n\t\t\t\tinput = input[:len(input)-1]\n\t\t\t} else if len(input) == 1 {\n\t\t\t\tinput = nil\n\t\t\t}\n\t\tcase 4, 10, 13:\n\t\t\t\/\/ Ctrl-D, line feed, carriage return\n\t\t\ttty.resetScreen()\n\t\t\trunner.writeCmdStdout(os.Stdout)\n\t\t\treturn\n\t\tdefault:\n\t\t\t\/\/ TODO: Default is wrong here. Only append printable characters to\n\t\t\t\/\/ input\n\t\t\tinput = append(input, b...)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package sus\n\nimport(\n\t`github.com\/qedus\/nds`\n\t`golang.org\/x\/net\/context`\n\t`google.golang.org\/appengine\/datastore`\n)\n\n\/\/ Creates and configures a store that stores entities in Google AppEngines memcache and datastore.\n\/\/ github.com\/qedus\/nds is used for strongly consistent automatic caching.\nfunc NewGaeStore(ctx context.Context, kind string, idf IdFactory, vf VersionFactory) Store {\n\tgetKey := func(ctx context.Context, id string) *datastore.Key {\n\t\treturn datastore.NewKey(ctx, kind, id, 0, nil)\n\t}\n\n\tgetMulti := func(ids []string) (vs []Version, err error) {\n\t\tcount := len(ids)\n\t\tvs = make([]Version, count, count)\n\t\tks := make([]*datastore.Key, count, count)\n\t\tfor i := 0; i < count; i++ {\n\t\t\tvs[i] = vf()\n\t\t\tks[i] = getKey(ctx, ids[i])\n\t\t}\n\t\terr = nds.GetMulti(ctx, ks, vs)\n\t\treturn\n\t}\n\n\tputMulti := func(ids []string, vs []Version) (err error) {\n\t\tcount := len(ids)\n\t\tks := make([]*datastore.Key, count, count)\n\t\tfor i := 0; i < count; i++ {\n\t\t\tks[i] = getKey(ctx, ids[i])\n\t\t}\n\t\t_, err = nds.PutMulti(ctx, ks, vs)\n\t\treturn\n\t}\n\n\tdelMulti := func(ids []string) error {\n\t\tcount := len(ids)\n\t\tks := make([]*datastore.Key, count, count)\n\t\tfor i := 0; i < count; i++ {\n\t\t\tks[i] = getKey(ctx, ids[i])\n\t\t}\n\t\treturn nds.DeleteMulti(ctx, ks)\n\t}\n\n\tisNonExtantError := func(err error) bool {\n\t\treturn err == datastore.ErrNoSuchEntity\n\t}\n\n\trit := func(tran Transaction) error {\n\t\treturn nds.RunInTransaction(ctx, func(ctx context.Context)error{return tran()}, &datastore.TransactionOptions{XG:true})\n\t}\n\n\treturn NewStore(getMulti, putMulti, delMulti, idf, vf, isNonExtantError,rit)\n}<commit_msg>no need to pass in ctx<commit_after>package sus\n\nimport(\n\t`github.com\/qedus\/nds`\n\t`golang.org\/x\/net\/context`\n\t`google.golang.org\/appengine\/datastore`\n)\n\n\/\/ Creates and configures a store that stores entities in Google AppEngines memcache and datastore.\n\/\/ github.com\/qedus\/nds is used for strongly consistent automatic caching.\nfunc NewGaeStore(kind string, idf IdFactory, vf VersionFactory) Store {\n\tvar tranCtx context.Context\n\n\tgetKey := func(ctx context.Context, id string) *datastore.Key {\n\t\treturn datastore.NewKey(ctx, kind, id, 0, nil)\n\t}\n\n\tgetMulti := func(ids []string) (vs []Version, err error) {\n\t\tcount := len(ids)\n\t\tvs = make([]Version, count, count)\n\t\tks := make([]*datastore.Key, count, count)\n\t\tfor i := 0; i < count; i++ {\n\t\t\tvs[i] = vf()\n\t\t\tks[i] = getKey(tranCtx, ids[i])\n\t\t}\n\t\terr = nds.GetMulti(tranCtx, ks, vs)\n\t\treturn\n\t}\n\n\tputMulti := func(ids []string, vs []Version) (err error) {\n\t\tcount := len(ids)\n\t\tks := make([]*datastore.Key, count, count)\n\t\tfor i := 0; i < count; i++ {\n\t\t\tks[i] = getKey(tranCtx, ids[i])\n\t\t}\n\t\t_, err = nds.PutMulti(tranCtx, ks, vs)\n\t\treturn\n\t}\n\n\tdelMulti := func(ids []string) error {\n\t\tcount := len(ids)\n\t\tks := make([]*datastore.Key, count, count)\n\t\tfor i := 0; i < count; i++ {\n\t\t\tks[i] = getKey(tranCtx, ids[i])\n\t\t}\n\t\treturn nds.DeleteMulti(tranCtx, ks)\n\t}\n\n\tisNonExtantError := func(err error) bool {\n\t\treturn err == datastore.ErrNoSuchEntity\n\t}\n\n\trit := func(tran Transaction) error {\n\t\treturn nds.RunInTransaction(context.Background(), func(ctx context.Context)error{\n\t\t\ttranCtx = ctx\n\t\t\treturn tran()\n\t\t}, &datastore.TransactionOptions{XG:true})\n\t}\n\n\treturn NewStore(getMulti, putMulti, delMulti, idf, vf, isNonExtantError,rit)\n}<|endoftext|>"} {"text":"<commit_before>package content\n\nimport \"net\/http\"\n\n\/\/ Item should only be embedded into content type structs.\ntype Item struct {\n\tID int `json:\"id\"`\n\tSlug string `json:\"slug\"`\n\tTimestamp int64 `json:\"timestamp\"`\n\tUpdated int64 `json:\"updated\"`\n}\n\n\/\/ Time partially implements the Sortable interface\nfunc (i Item) Time() int64 {\n\treturn i.Timestamp\n}\n\n\/\/ Touch partially implements the Sortable interface\nfunc (i Item) Touch() int64 {\n\treturn i.Updated\n}\n\n\/\/ ItemID partially implements the Sortable interface\nfunc (i Item) ItemID() int {\n\treturn i.ID\n}\n\n\/\/ SetSlug sets the item's slug for its URL\nfunc (i *Item) SetSlug(slug string) {\n\ti.Slug = slug\n}\n\n\/\/ SetItemID sets the Item's ID field\nfunc (i *Item) SetItemID(id int) {\n\ti.ID = id\n}\n\n\/\/ BeforeSave is a no-op to ensure structs which embed Item implement Hookable\nfunc (i Item) BeforeSave(req *http.Request) error {\n\treturn nil\n}\n\n\/\/ AfterSave is a no-op to ensure structs which embed Item implement Hookable\nfunc (i Item) AfterSave(req *http.Request) error {\n\treturn nil\n}\n\n\/\/ BeforeDelete is a no-op to ensure structs which embed Item implement Hookable\nfunc (i Item) BeforeDelete(req *http.Request) error {\n\treturn nil\n}\n\n\/\/ AfterDelete is a no-op to ensure structs which embed Item implement Hookable\nfunc (i Item) AfterDelete(req *http.Request) error {\n\treturn nil\n}\n\n\/\/ BeforeReject is a no-op to ensure structs which embed Item implement Hookable\nfunc (i Item) BeforeReject(req *http.Request) error {\n\treturn nil\n}\n\n\/\/ AfterReject is a no-op to ensure structs which embed Item implement Hookable\nfunc (i Item) AfterReject(req *http.Request) error {\n\treturn nil\n}\n\n\/\/ Sluggable makes a struct locatable by URL with it's own path\n\/\/ As an Item implementing Sluggable, slugs may overlap. If this is an issue,\n\/\/ make your content struct (or one which imbeds Item) implement Sluggable\n\/\/ and it will override the slug created by Item's SetSlug with your struct's\ntype Sluggable interface {\n\tSetSlug(string)\n}\n\n\/\/ Identifiable enables a struct to have its ID set. Typically this is done\n\/\/ to set an ID to -1 indicating it is new for DB inserts, since by default\n\/\/ a newly initialized struct would have an ID of 0, the int zero-value, and\n\/\/ BoltDB's starting key per bucket is 0, thus overwriting the first record.\ntype Identifiable interface {\n\tSetItemID(int)\n}\n\n\/\/ Hookable provides our user with an easy way to intercept or add functionality\n\/\/ to the different lifecycles\/events a struct may encounter. Item implements\n\/\/ Hookable with no-ops so our user can override only whichever ones necessary.\ntype Hookable interface {\n\tBeforeSave(req *http.Request) error\n\tAfterSave(req *http.Request) error\n\n\tBeforeDelete(req *http.Request) error\n\tAfterDelete(req *http.Request) error\n\n\tBeforeApprove(req *http.Request) error\n\tAfterApprove(req *http.Request) error\n\n\tBeforeReject(req *http.Request) error\n\tAfterReject(req *http.Request) error\n}\n<commit_msg>testing revert Item to pointer in method reciever for hooks -- interface methods of emedded types may not be promoted to outer type if it is not same pointer\/value type<commit_after>package content\n\nimport \"net\/http\"\n\n\/\/ Item should only be embedded into content type structs.\ntype Item struct {\n\tID int `json:\"id\"`\n\tSlug string `json:\"slug\"`\n\tTimestamp int64 `json:\"timestamp\"`\n\tUpdated int64 `json:\"updated\"`\n}\n\n\/\/ Time partially implements the Sortable interface\nfunc (i Item) Time() int64 {\n\treturn i.Timestamp\n}\n\n\/\/ Touch partially implements the Sortable interface\nfunc (i Item) Touch() int64 {\n\treturn i.Updated\n}\n\n\/\/ ItemID partially implements the Sortable interface\nfunc (i Item) ItemID() int {\n\treturn i.ID\n}\n\n\/\/ SetSlug sets the item's slug for its URL\nfunc (i *Item) SetSlug(slug string) {\n\ti.Slug = slug\n}\n\n\/\/ SetItemID sets the Item's ID field\nfunc (i *Item) SetItemID(id int) {\n\ti.ID = id\n}\n\n\/\/ BeforeSave is a no-op to ensure structs which embed Item implement Hookable\nfunc (i *Item) BeforeSave(req *http.Request) error {\n\treturn nil\n}\n\n\/\/ AfterSave is a no-op to ensure structs which embed Item implement Hookable\nfunc (i *Item) AfterSave(req *http.Request) error {\n\treturn nil\n}\n\n\/\/ BeforeDelete is a no-op to ensure structs which embed Item implement Hookable\nfunc (i *Item) BeforeDelete(req *http.Request) error {\n\treturn nil\n}\n\n\/\/ AfterDelete is a no-op to ensure structs which embed Item implement Hookable\nfunc (i *Item) AfterDelete(req *http.Request) error {\n\treturn nil\n}\n\n\/\/ BeforeReject is a no-op to ensure structs which embed Item implement Hookable\nfunc (i *Item) BeforeReject(req *http.Request) error {\n\treturn nil\n}\n\n\/\/ AfterReject is a no-op to ensure structs which embed Item implement Hookable\nfunc (i *Item) AfterReject(req *http.Request) error {\n\treturn nil\n}\n\n\/\/ Sluggable makes a struct locatable by URL with it's own path\n\/\/ As an Item implementing Sluggable, slugs may overlap. If this is an issue,\n\/\/ make your content struct (or one which imbeds Item) implement Sluggable\n\/\/ and it will override the slug created by Item's SetSlug with your struct's\ntype Sluggable interface {\n\tSetSlug(string)\n}\n\n\/\/ Identifiable enables a struct to have its ID set. Typically this is done\n\/\/ to set an ID to -1 indicating it is new for DB inserts, since by default\n\/\/ a newly initialized struct would have an ID of 0, the int zero-value, and\n\/\/ BoltDB's starting key per bucket is 0, thus overwriting the first record.\ntype Identifiable interface {\n\tSetItemID(int)\n}\n\n\/\/ Hookable provides our user with an easy way to intercept or add functionality\n\/\/ to the different lifecycles\/events a struct may encounter. Item implements\n\/\/ Hookable with no-ops so our user can override only whichever ones necessary.\ntype Hookable interface {\n\tBeforeSave(req *http.Request) error\n\tAfterSave(req *http.Request) error\n\n\tBeforeDelete(req *http.Request) error\n\tAfterDelete(req *http.Request) error\n\n\tBeforeApprove(req *http.Request) error\n\tAfterApprove(req *http.Request) error\n\n\tBeforeReject(req *http.Request) error\n\tAfterReject(req *http.Request) error\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n)\n\ntype item struct {\n\tid string\n\tcategory string\n\ttitle string\n\turl string\n}\n\nfunc ScrapeMuusikoidenNet() {\n\tdoc, err := goquery.NewDocument(\"https:\/\/muusikoiden.net\/tori\/?category=0\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tidFromURL, _ := regexp.Compile(\"\/(\\\\d+)$\")\n\n\tdoc.Find(\"td.tori_title\").Each(func(i int, titleContainer *goquery.Selection) {\n\t\t\/\/ TODO \"Myydään\" ääkköset are mangled\n\t\tcategory := titleContainer.Find(\"b\").Text()\n\t\tlink := titleContainer.Find(\"a\")\n\t\ttitle := link.Text()\n\t\turl, _ := link.Attr(\"href\")\n\t\tid := idFromURL.FindStringSubmatch(url)[1]\n\t\titem := item{id, category, title, url}\n\t\tfmt.Printf(\"%s\\n\", item)\n\t})\n}\n\nfunc main() {\n\tScrapeMuusikoidenNet()\n}\n<commit_msg>Oh god<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n)\n\ntype item struct {\n\tid string\n\tcategory string\n\ttitle string\n\turl string\n}\n\nfunc ScrapeMuusikoidenNet() {\n\tdoc, err := goquery.NewDocument(\"https:\/\/muusikoiden.net\/tori\/?category=0\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tidFromURL, _ := regexp.Compile(\"\/(\\\\d+)$\")\n\n\telements := doc.Find(\"td.tori_title\")\n\n\titems := make([]item, elements.Length())\n\n\telements.Each(func(i int, titleContainer *goquery.Selection) {\n\t\t\/\/ TODO \"Myydään\" ääkköset are mangled\n\t\tcategory := titleContainer.Find(\"b\").Text()\n\t\tlink := titleContainer.Find(\"a\")\n\t\ttitle := link.Text()\n\t\turl, _ := link.Attr(\"href\")\n\t\tid := idFromURL.FindStringSubmatch(url)[1]\n\t\titem := item{id, category, title, url}\n\t\titems[i] = item\n\t})\n\n\tfmt.Printf(\"%s\\n\", items)\n}\n\nfunc main() {\n\tScrapeMuusikoidenNet()\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2014 Google Inc. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage election\n\nimport (\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\ntype slowService struct {\n\tt *testing.T\n\ton bool\n\t\/\/ We explicitly have no lock to prove that\n\t\/\/ Start and Stop are not called concurrently.\n\tchanges chan<- bool\n\twg sync.WaitGroup\n}\n\nfunc (s *slowService) Validate(d, c Master) {\n\t\/\/ noop\n}\n\nfunc (s *slowService) Start() {\n\tif s.on {\n\t\ts.t.Errorf(\"started already on service\")\n\t}\n\tdefer s.wg.Add(1)\n\ttime.Sleep(2 * time.Millisecond)\n\ts.on = true\n\ts.changes <- true\n}\n\nfunc (s *slowService) Stop() {\n\tif !s.on {\n\t\ts.t.Errorf(\"stopped already off service\")\n\t}\n\tdefer s.wg.Done()\n\ttime.Sleep(2 * time.Millisecond)\n\ts.on = false\n\ts.changes <- false\n\n}\n\nfunc Test(t *testing.T) {\n\tm := NewFake()\n\tchanges := make(chan bool, 1500)\n\ts := &slowService{t: t, changes: changes}\n\tgo Notify(m, \"\", \"me\", s)\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tfor i := 0; i < 500; i++ {\n\t\t\tfor _, key := range []string{\"me\", \"notme\", \"alsonotme\"} {\n\t\t\t\tm.ChangeMaster(Master(key))\n\t\t\t}\n\t\t}\n\t\tclose(done)\n\t}()\n\n\t<-done\n\ttime.Sleep(8 * time.Millisecond)\n\tch := make(chan struct{})\n\tgo func() {\n\t\tdefer close(ch)\n\t\ts.wg.Wait()\n\t}()\n\tselect {\n\tcase <-time.After(1 * time.Second):\n\t\tt.Fatalf(\"timed out waiting for slow service to catch up\")\n\tcase <-ch: \/\/ expected\n\t}\n\n\tclose(changes)\n\n\tchangeList := []bool{}\n\tfor {\n\t\tchange, ok := <-changes\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tchangeList = append(changeList, change)\n\t}\n\n\tif len(changeList) > 1000 {\n\t\tt.Errorf(\"unexpected number of changes: %v\", len(changeList))\n\t}\n}\n<commit_msg>better fix for flaky test failures caused by writes to a closed chan<commit_after>\/*\nCopyright 2014 Google Inc. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage election\n\nimport (\n\t\"testing\"\n\t\"time\"\n)\n\ntype slowService struct {\n\tt *testing.T\n\ton bool\n\t\/\/ We explicitly have no lock to prove that\n\t\/\/ Start and Stop are not called concurrently.\n\tchanges chan<- bool\n\tdone <-chan struct{}\n}\n\nfunc (s *slowService) Validate(d, c Master) {\n\t\/\/ noop\n}\n\nfunc (s *slowService) Start() {\n\tselect {\n\tcase <-s.done:\n\t\treturn \/\/ avoid writing to closed changes chan\n\tdefault:\n\t}\n\tif s.on {\n\t\ts.t.Errorf(\"started already on service\")\n\t}\n\ttime.Sleep(2 * time.Millisecond)\n\ts.on = true\n\ts.changes <- true\n}\n\nfunc (s *slowService) Stop() {\n\tselect {\n\tcase <-s.done:\n\t\treturn \/\/ avoid writing to closed changes chan\n\tdefault:\n\t}\n\tif !s.on {\n\t\ts.t.Errorf(\"stopped already off service\")\n\t}\n\ttime.Sleep(2 * time.Millisecond)\n\ts.on = false\n\ts.changes <- false\n}\n\nfunc Test(t *testing.T) {\n\tm := NewFake()\n\tchanges := make(chan bool, 1500)\n\tdone := make(chan struct{})\n\ts := &slowService{t: t, changes: changes, done: done}\n\tgo Notify(m, \"\", \"me\", s)\n\n\tgo func() {\n\t\tfor i := 0; i < 500; i++ {\n\t\t\tfor _, key := range []string{\"me\", \"notme\", \"alsonotme\"} {\n\t\t\t\tm.ChangeMaster(Master(key))\n\t\t\t}\n\t\t}\n\t\tclose(done)\n\t}()\n\n\t<-done\n\ttime.Sleep(8 * time.Millisecond)\n\n\tclose(changes)\n\n\tchangeList := []bool{}\n\tfor {\n\t\tchange, ok := <-changes\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tchangeList = append(changeList, change)\n\t}\n\n\tif len(changeList) > 1000 {\n\t\tt.Errorf(\"unexpected number of changes: %v\", len(changeList))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage client\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\tclientapi \"github.com\/cilium\/cilium\/api\/v1\/health\/client\"\n\t\"github.com\/cilium\/cilium\/api\/v1\/health\/models\"\n\t\"github.com\/cilium\/cilium\/pkg\/health\/defaults\"\n\n\truntime_client \"github.com\/go-openapi\/runtime\/client\"\n\t\"github.com\/go-openapi\/strfmt\"\n)\n\n\/\/ Client is a client for cilium health\ntype Client struct {\n\tclientapi.CiliumHealth\n}\n\nfunc configureTransport(tr *http.Transport, proto, addr string) *http.Transport {\n\tif tr == nil {\n\t\ttr = &http.Transport{}\n\t}\n\n\tif proto == \"unix\" {\n\t\t\/\/ No need for compression in local communications.\n\t\ttr.DisableCompression = true\n\t\ttr.Dial = func(_, _ string) (net.Conn, error) {\n\t\t\treturn net.Dial(proto, addr)\n\t\t}\n\t} else {\n\t\ttr.Proxy = http.ProxyFromEnvironment\n\t\ttr.Dial = (&net.Dialer{}).Dial\n\t}\n\n\treturn tr\n}\n\n\/\/ NewDefaultClient creates a client with default parameters connecting to UNIX domain socket.\nfunc NewDefaultClient() (*Client, error) {\n\treturn NewClient(\"\")\n}\n\n\/\/ NewClient creates a client for the given `host`.\nfunc NewClient(host string) (*Client, error) {\n\tif host == \"\" {\n\t\t\/\/ Check if environment variable points to socket\n\t\te := os.Getenv(defaults.SockPathEnv)\n\t\tif e == \"\" {\n\t\t\t\/\/ If unset, fall back to default value\n\t\t\te = defaults.SockPath\n\t\t}\n\t\thost = \"unix:\/\/\" + e\n\t}\n\ttmp := strings.SplitN(host, \":\/\/\", 2)\n\tif len(tmp) != 2 {\n\t\treturn nil, fmt.Errorf(\"invalid host format '%s'\", host)\n\t}\n\n\tswitch tmp[0] {\n\tcase \"tcp\":\n\t\tif _, err := url.Parse(\"tcp:\/\/\" + tmp[1]); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thost = \"http:\/\/\" + tmp[1]\n\tcase \"unix\":\n\t\thost = tmp[1]\n\t}\n\n\ttransport := configureTransport(nil, tmp[0], host)\n\thttpClient := &http.Client{Transport: transport}\n\tclientTrans := runtime_client.NewWithClient(tmp[1], clientapi.DefaultBasePath,\n\t\tclientapi.DefaultSchemes, httpClient)\n\treturn &Client{*clientapi.New(clientTrans, strfmt.Default)}, nil\n}\n\n\/\/ Hint tries to improve the error message displayed to the user.\nfunc Hint(err error) error {\n\tif err == nil {\n\t\treturn err\n\t}\n\te, _ := url.PathUnescape(err.Error())\n\tif strings.Contains(err.Error(), defaults.SockPath) {\n\t\treturn fmt.Errorf(\"%s\\nIs the agent running?\", e)\n\t}\n\treturn fmt.Errorf(\"%s\", e)\n}\n\nfunc connectivityStatusHealthy(cs *models.ConnectivityStatus) bool {\n\treturn cs != nil && cs.Status == \"\"\n}\n\nfunc formatConnectivityStatus(w io.Writer, cs *models.ConnectivityStatus, path, indent string) {\n\tstatus := cs.Status\n\tif connectivityStatusHealthy(cs) {\n\t\tlatency := time.Duration(cs.Latency)\n\t\tstatus = fmt.Sprintf(\"OK, RTT=%s\", latency)\n\t}\n\tfmt.Fprintf(w, \"%s%s:\\t%s\\n\", indent, path, status)\n}\n\nfunc formatPathStatus(w io.Writer, name string, cp *models.PathStatus, indent string, verbose bool) {\n\tif cp == nil {\n\t\tif verbose {\n\t\t\tfmt.Fprintf(w, \"%s%s connectivity:\\tnil\\n\", indent, name)\n\t\t}\n\t\treturn\n\t}\n\tfmt.Fprintf(w, \"%s%s connectivity to %s:\\n\", indent, name, cp.IP)\n\tindent = fmt.Sprintf(\"%s \", indent)\n\n\tif cp.Icmp != nil {\n\t\tformatConnectivityStatus(w, cp.Icmp, \"ICMP\", indent)\n\t}\n\tif cp.HTTP != nil {\n\t\tformatConnectivityStatus(w, cp.HTTP, \"HTTP via L3\", indent)\n\t}\n}\n\nfunc pathIsHealthy(cp *models.PathStatus) bool {\n\tif cp == nil {\n\t\treturn false\n\t}\n\n\tstatuses := []*models.ConnectivityStatus{\n\t\tcp.Icmp,\n\t\tcp.HTTP,\n\t}\n\tfor _, status := range statuses {\n\t\tif !connectivityStatusHealthy(status) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc nodeIsHealthy(node *models.NodeStatus) bool {\n\treturn pathIsHealthy(node.Host.PrimaryAddress) &&\n\t\t(node.Endpoint == nil || pathIsHealthy(node.Endpoint))\n}\n\nfunc nodeIsLocalhost(node *models.NodeStatus, self *models.SelfStatus) bool {\n\treturn self != nil && node.Name == self.Name\n}\n\nfunc formatNodeStatus(w io.Writer, node *models.NodeStatus, printAll, succinct, verbose, localhost bool) {\n\tlocalStr := \"\"\n\tif localhost {\n\t\tlocalStr = \" (localhost)\"\n\t}\n\tif succinct {\n\t\tif printAll || !nodeIsHealthy(node) {\n\t\t\tfmt.Fprintf(w, \" %s%s\\t%s\\t%t\\t%t\\n\", node.Name,\n\t\t\t\tlocalStr, node.Host.PrimaryAddress.IP,\n\t\t\t\tpathIsHealthy(node.Host.PrimaryAddress),\n\t\t\t\tpathIsHealthy(node.Endpoint))\n\t\t}\n\t} else {\n\t\tfmt.Fprintf(w, \" %s%s:\\n\", node.Name, localStr)\n\t\tformatPathStatus(w, \"Host\", node.Host.PrimaryAddress, \" \", verbose)\n\t\tif verbose && len(node.Host.SecondaryAddresses) > 0 {\n\t\t\tfor _, addr := range node.Host.SecondaryAddresses {\n\t\t\t\tformatPathStatus(w, \"Secondary\", addr, \" \", verbose)\n\t\t\t}\n\t\t}\n\t\tformatPathStatus(w, \"Endpoint\", node.Endpoint, \" \", verbose)\n\t}\n}\n\n\/\/ FormatHealthStatusResponse writes a HealthStatusResponse as a string to the\n\/\/ writer.\n\/\/\n\/\/ 'printAll', if true, causes all nodes to be printed regardless of status\n\/\/ 'succinct', if true, causes node health to be output as one line per node\n\/\/ 'verbose', if true, overrides 'succinct' and prints all information\n\/\/ 'maxLines', if nonzero, determines the maximum number of lines to print\nfunc FormatHealthStatusResponse(w io.Writer, sr *models.HealthStatusResponse, printAll, succinct, verbose bool, maxLines int) {\n\tvar (\n\t\thealthy int\n\t\tlocalhost *models.NodeStatus\n\t)\n\tfor _, node := range sr.Nodes {\n\t\tif nodeIsHealthy(node) {\n\t\t\thealthy++\n\t\t}\n\t\tif nodeIsLocalhost(node, sr.Local) {\n\t\t\tlocalhost = node\n\t\t}\n\t}\n\tif succinct {\n\t\tfmt.Fprintf(w, \"Cluster health:\\t%d\/%d reachable\\t(%s)\\n\",\n\t\t\thealthy, len(sr.Nodes), sr.Timestamp)\n\t\tif printAll || healthy < len(sr.Nodes) {\n\t\t\tfmt.Fprintf(w, \" Name\\tIP\\tReachable\\tEndpoints reachable\\n\")\n\t\t}\n\t} else {\n\t\tfmt.Fprintf(w, \"Probe time:\\t%s\\n\", sr.Timestamp)\n\t\tfmt.Fprintf(w, \"Nodes:\\n\")\n\t}\n\n\tif localhost != nil {\n\t\tformatNodeStatus(w, localhost, printAll, succinct, verbose, true)\n\t\tmaxLines--\n\t}\n\n\tnodes := sr.Nodes\n\tsort.Slice(nodes, func(i, j int) bool {\n\t\treturn strings.Compare(nodes[i].Name, nodes[j].Name) < 0\n\t})\n\tfor n, node := range nodes {\n\t\tif maxLines > 0 && n > maxLines {\n\t\t\tbreak\n\t\t}\n\t\tif node == localhost {\n\t\t\tcontinue\n\t\t}\n\t\tformatNodeStatus(w, node, printAll, succinct, verbose, false)\n\t}\n\tif maxLines > 0 && len(sr.Nodes)-healthy > maxLines {\n\t\tfmt.Fprintf(w, \" ...\")\n\t}\n}\n\n\/\/ GetAndFormatHealthStatus fetches the health status from the cilium-health\n\/\/ daemon via the default channel and formats its output as a string to the\n\/\/ writer.\n\/\/\n\/\/ 'succinct', 'verbose' and 'maxLines' are handled the same as in\n\/\/ FormatHealthStatusResponse().\nfunc GetAndFormatHealthStatus(w io.Writer, succinct, verbose bool, maxLines int) {\n\tclient, err := NewClient(\"\")\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Cluster health:\\t\\t\\tClient error: %s\\n\", err)\n\t\treturn\n\t}\n\thr, err := client.Connectivity.GetStatus(nil)\n\tif err != nil {\n\t\t\/\/ The regular `cilium status` output will print the reason why.\n\t\tfmt.Fprintf(w, \"Cluster health:\\t\\t\\tWarning\\tcilium-health daemon unreachable\\n\")\n\t\treturn\n\t}\n\tFormatHealthStatusResponse(w, hr.Payload, verbose, succinct, verbose, maxLines)\n}\n<commit_msg>pkg\/health\/client: PathIsHealthy as a public function<commit_after>\/\/ Copyright 2017 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage client\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\tclientapi \"github.com\/cilium\/cilium\/api\/v1\/health\/client\"\n\t\"github.com\/cilium\/cilium\/api\/v1\/health\/models\"\n\t\"github.com\/cilium\/cilium\/pkg\/health\/defaults\"\n\n\truntime_client \"github.com\/go-openapi\/runtime\/client\"\n\t\"github.com\/go-openapi\/strfmt\"\n)\n\n\/\/ Client is a client for cilium health\ntype Client struct {\n\tclientapi.CiliumHealth\n}\n\nfunc configureTransport(tr *http.Transport, proto, addr string) *http.Transport {\n\tif tr == nil {\n\t\ttr = &http.Transport{}\n\t}\n\n\tif proto == \"unix\" {\n\t\t\/\/ No need for compression in local communications.\n\t\ttr.DisableCompression = true\n\t\ttr.Dial = func(_, _ string) (net.Conn, error) {\n\t\t\treturn net.Dial(proto, addr)\n\t\t}\n\t} else {\n\t\ttr.Proxy = http.ProxyFromEnvironment\n\t\ttr.Dial = (&net.Dialer{}).Dial\n\t}\n\n\treturn tr\n}\n\n\/\/ NewDefaultClient creates a client with default parameters connecting to UNIX domain socket.\nfunc NewDefaultClient() (*Client, error) {\n\treturn NewClient(\"\")\n}\n\n\/\/ NewClient creates a client for the given `host`.\nfunc NewClient(host string) (*Client, error) {\n\tif host == \"\" {\n\t\t\/\/ Check if environment variable points to socket\n\t\te := os.Getenv(defaults.SockPathEnv)\n\t\tif e == \"\" {\n\t\t\t\/\/ If unset, fall back to default value\n\t\t\te = defaults.SockPath\n\t\t}\n\t\thost = \"unix:\/\/\" + e\n\t}\n\ttmp := strings.SplitN(host, \":\/\/\", 2)\n\tif len(tmp) != 2 {\n\t\treturn nil, fmt.Errorf(\"invalid host format '%s'\", host)\n\t}\n\n\tswitch tmp[0] {\n\tcase \"tcp\":\n\t\tif _, err := url.Parse(\"tcp:\/\/\" + tmp[1]); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\thost = \"http:\/\/\" + tmp[1]\n\tcase \"unix\":\n\t\thost = tmp[1]\n\t}\n\n\ttransport := configureTransport(nil, tmp[0], host)\n\thttpClient := &http.Client{Transport: transport}\n\tclientTrans := runtime_client.NewWithClient(tmp[1], clientapi.DefaultBasePath,\n\t\tclientapi.DefaultSchemes, httpClient)\n\treturn &Client{*clientapi.New(clientTrans, strfmt.Default)}, nil\n}\n\n\/\/ Hint tries to improve the error message displayed to the user.\nfunc Hint(err error) error {\n\tif err == nil {\n\t\treturn err\n\t}\n\te, _ := url.PathUnescape(err.Error())\n\tif strings.Contains(err.Error(), defaults.SockPath) {\n\t\treturn fmt.Errorf(\"%s\\nIs the agent running?\", e)\n\t}\n\treturn fmt.Errorf(\"%s\", e)\n}\n\nfunc connectivityStatusHealthy(cs *models.ConnectivityStatus) bool {\n\treturn cs != nil && cs.Status == \"\"\n}\n\nfunc formatConnectivityStatus(w io.Writer, cs *models.ConnectivityStatus, path, indent string) {\n\tstatus := cs.Status\n\tif connectivityStatusHealthy(cs) {\n\t\tlatency := time.Duration(cs.Latency)\n\t\tstatus = fmt.Sprintf(\"OK, RTT=%s\", latency)\n\t}\n\tfmt.Fprintf(w, \"%s%s:\\t%s\\n\", indent, path, status)\n}\n\nfunc formatPathStatus(w io.Writer, name string, cp *models.PathStatus, indent string, verbose bool) {\n\tif cp == nil {\n\t\tif verbose {\n\t\t\tfmt.Fprintf(w, \"%s%s connectivity:\\tnil\\n\", indent, name)\n\t\t}\n\t\treturn\n\t}\n\tfmt.Fprintf(w, \"%s%s connectivity to %s:\\n\", indent, name, cp.IP)\n\tindent = fmt.Sprintf(\"%s \", indent)\n\n\tif cp.Icmp != nil {\n\t\tformatConnectivityStatus(w, cp.Icmp, \"ICMP\", indent)\n\t}\n\tif cp.HTTP != nil {\n\t\tformatConnectivityStatus(w, cp.HTTP, \"HTTP via L3\", indent)\n\t}\n}\n\n\/\/ PathIsHealthy checks whether ICMP and TCP(HTTP) connectivity to the given\n\/\/ path is available.\nfunc PathIsHealthy(cp *models.PathStatus) bool {\n\tif cp == nil {\n\t\treturn false\n\t}\n\n\tstatuses := []*models.ConnectivityStatus{\n\t\tcp.Icmp,\n\t\tcp.HTTP,\n\t}\n\tfor _, status := range statuses {\n\t\tif !connectivityStatusHealthy(status) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc nodeIsHealthy(node *models.NodeStatus) bool {\n\treturn PathIsHealthy(node.Host.PrimaryAddress) &&\n\t\t(node.Endpoint == nil || PathIsHealthy(node.Endpoint))\n}\n\nfunc nodeIsLocalhost(node *models.NodeStatus, self *models.SelfStatus) bool {\n\treturn self != nil && node.Name == self.Name\n}\n\nfunc formatNodeStatus(w io.Writer, node *models.NodeStatus, printAll, succinct, verbose, localhost bool) {\n\tlocalStr := \"\"\n\tif localhost {\n\t\tlocalStr = \" (localhost)\"\n\t}\n\tif succinct {\n\t\tif printAll || !nodeIsHealthy(node) {\n\t\t\tfmt.Fprintf(w, \" %s%s\\t%s\\t%t\\t%t\\n\", node.Name,\n\t\t\t\tlocalStr, node.Host.PrimaryAddress.IP,\n\t\t\t\tPathIsHealthy(node.Host.PrimaryAddress),\n\t\t\t\tPathIsHealthy(node.Endpoint))\n\t\t}\n\t} else {\n\t\tfmt.Fprintf(w, \" %s%s:\\n\", node.Name, localStr)\n\t\tformatPathStatus(w, \"Host\", node.Host.PrimaryAddress, \" \", verbose)\n\t\tif verbose && len(node.Host.SecondaryAddresses) > 0 {\n\t\t\tfor _, addr := range node.Host.SecondaryAddresses {\n\t\t\t\tformatPathStatus(w, \"Secondary\", addr, \" \", verbose)\n\t\t\t}\n\t\t}\n\t\tformatPathStatus(w, \"Endpoint\", node.Endpoint, \" \", verbose)\n\t}\n}\n\n\/\/ FormatHealthStatusResponse writes a HealthStatusResponse as a string to the\n\/\/ writer.\n\/\/\n\/\/ 'printAll', if true, causes all nodes to be printed regardless of status\n\/\/ 'succinct', if true, causes node health to be output as one line per node\n\/\/ 'verbose', if true, overrides 'succinct' and prints all information\n\/\/ 'maxLines', if nonzero, determines the maximum number of lines to print\nfunc FormatHealthStatusResponse(w io.Writer, sr *models.HealthStatusResponse, printAll, succinct, verbose bool, maxLines int) {\n\tvar (\n\t\thealthy int\n\t\tlocalhost *models.NodeStatus\n\t)\n\tfor _, node := range sr.Nodes {\n\t\tif nodeIsHealthy(node) {\n\t\t\thealthy++\n\t\t}\n\t\tif nodeIsLocalhost(node, sr.Local) {\n\t\t\tlocalhost = node\n\t\t}\n\t}\n\tif succinct {\n\t\tfmt.Fprintf(w, \"Cluster health:\\t%d\/%d reachable\\t(%s)\\n\",\n\t\t\thealthy, len(sr.Nodes), sr.Timestamp)\n\t\tif printAll || healthy < len(sr.Nodes) {\n\t\t\tfmt.Fprintf(w, \" Name\\tIP\\tReachable\\tEndpoints reachable\\n\")\n\t\t}\n\t} else {\n\t\tfmt.Fprintf(w, \"Probe time:\\t%s\\n\", sr.Timestamp)\n\t\tfmt.Fprintf(w, \"Nodes:\\n\")\n\t}\n\n\tif localhost != nil {\n\t\tformatNodeStatus(w, localhost, printAll, succinct, verbose, true)\n\t\tmaxLines--\n\t}\n\n\tnodes := sr.Nodes\n\tsort.Slice(nodes, func(i, j int) bool {\n\t\treturn strings.Compare(nodes[i].Name, nodes[j].Name) < 0\n\t})\n\tfor n, node := range nodes {\n\t\tif maxLines > 0 && n > maxLines {\n\t\t\tbreak\n\t\t}\n\t\tif node == localhost {\n\t\t\tcontinue\n\t\t}\n\t\tformatNodeStatus(w, node, printAll, succinct, verbose, false)\n\t}\n\tif maxLines > 0 && len(sr.Nodes)-healthy > maxLines {\n\t\tfmt.Fprintf(w, \" ...\")\n\t}\n}\n\n\/\/ GetAndFormatHealthStatus fetches the health status from the cilium-health\n\/\/ daemon via the default channel and formats its output as a string to the\n\/\/ writer.\n\/\/\n\/\/ 'succinct', 'verbose' and 'maxLines' are handled the same as in\n\/\/ FormatHealthStatusResponse().\nfunc GetAndFormatHealthStatus(w io.Writer, succinct, verbose bool, maxLines int) {\n\tclient, err := NewClient(\"\")\n\tif err != nil {\n\t\tfmt.Fprintf(w, \"Cluster health:\\t\\t\\tClient error: %s\\n\", err)\n\t\treturn\n\t}\n\thr, err := client.Connectivity.GetStatus(nil)\n\tif err != nil {\n\t\t\/\/ The regular `cilium status` output will print the reason why.\n\t\tfmt.Fprintf(w, \"Cluster health:\\t\\t\\tWarning\\tcilium-health daemon unreachable\\n\")\n\t\treturn\n\t}\n\tFormatHealthStatusResponse(w, hr.Payload, verbose, succinct, verbose, maxLines)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2022 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage planner\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"reflect\"\n\n\tplanv1alpha1 \"github.com\/GoogleContainerTools\/kpt\/pkg\/api\/plan\/v1alpha1\"\n\t\"github.com\/GoogleContainerTools\/kpt\/pkg\/live\"\n\t\"github.com\/GoogleContainerTools\/kpt\/pkg\/status\"\n\t\"github.com\/GoogleContainerTools\/kpt\/thirdparty\/cli-utils\/apply\"\n\tapierrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/meta\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/client-go\/dynamic\"\n\t\"k8s.io\/kubectl\/pkg\/cmd\/util\"\n\t\"sigs.k8s.io\/cli-utils\/pkg\/apply\/event\"\n\t\"sigs.k8s.io\/cli-utils\/pkg\/common\"\n\t\"sigs.k8s.io\/cli-utils\/pkg\/inventory\"\n\t\"sigs.k8s.io\/cli-utils\/pkg\/object\"\n)\n\nfunc NewClusterPlanner(f util.Factory) (*ClusterPlanner, error) {\n\tfetcher, err := NewResourceFetcher(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinvClient, err := inventory.NewClient(f, live.WrapInventoryObj, live.InvToUnstructuredFunc, inventory.StatusPolicyNone)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstatusPoller, err := status.NewStatusPoller(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tapplier, err := apply.NewApplierBuilder().\n\t\tWithFactory(f).\n\t\tWithInventoryClient(invClient).\n\t\tWithStatusPoller(statusPoller).\n\t\tBuild()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ClusterPlanner{\n\t\tapplier: applier,\n\t\tresourceFetcher: fetcher,\n\t}, nil\n}\n\ntype Applier interface {\n\tRun(ctx context.Context, invInfo inventory.Info, objects object.UnstructuredSet, options apply.ApplierOptions) <-chan event.Event\n}\n\ntype ResourceFetcher interface {\n\tFetchResource(ctx context.Context, id object.ObjMetadata) (*unstructured.Unstructured, bool, error)\n}\n\ntype ClusterPlanner struct {\n\tapplier Applier\n\tresourceFetcher ResourceFetcher\n}\n\ntype Options struct {\n\tServerSideOptions common.ServerSideOptions\n}\n\nfunc (r *ClusterPlanner) BuildPlan(ctx context.Context, inv inventory.Info, objects []*unstructured.Unstructured, o Options) (*planv1alpha1.Plan, error) {\n\tactions, err := r.dryRunForPlan(ctx, inv, objects, o)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &planv1alpha1.Plan{\n\t\tResourceMeta: planv1alpha1.ResourceMeta,\n\t\tSpec: planv1alpha1.PlanSpec{\n\t\t\tActions: actions,\n\t\t},\n\t}, nil\n}\n\nfunc (r *ClusterPlanner) dryRunForPlan(ctx context.Context, inv inventory.Info,\n\tobjects []*unstructured.Unstructured, o Options) ([]planv1alpha1.Action, error) {\n\n\teventCh := r.applier.Run(ctx, inv, objects, apply.ApplierOptions{\n\t\tDryRunStrategy: common.DryRunServer,\n\t\tServerSideOptions: o.ServerSideOptions,\n\t})\n\n\tvar actions []planv1alpha1.Action\n\tvar err error\n\tfor e := range eventCh {\n\t\tif e.Type == event.InitType {\n\t\t\t\/\/ This event includes all resources that will be applied, pruned or deleted, so\n\t\t\t\/\/ we make sure we fetch all the resources from the cluster.\n\t\t\t\/\/ TODO: See if we can update the actuation library to provide the pre-actuation\n\t\t\t\/\/ versions of the resources as part of the regular run. This solution is not great\n\t\t\t\/\/ as fetching all resources will take time.\n\t\t\ta, err := r.fetchResources(ctx, e)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tactions = a\n\t\t}\n\t\tif e.Type == event.ErrorType {\n\t\t\t\/\/ Update the err variable here, but wait for the channel to close\n\t\t\t\/\/ before we return from the function.\n\t\t\t\/\/ Since ErrorEvents are considered fatal, there should only be sent\n\t\t\t\/\/ and it will be followed by the channel being closed.\n\t\t\terr = e.ErrorEvent.Err\n\t\t}\n\t\t\/\/ For the Apply, Prune and Delete event types, we just capture the result\n\t\t\/\/ of the dry-run operation for the specific resource.\n\t\tswitch e.Type {\n\t\tcase event.ApplyType:\n\t\t\tid := e.ApplyEvent.Identifier\n\t\t\tindex := indexForIdentifier(id, actions)\n\t\t\ta := actions[index]\n\t\t\tactions[index] = handleApplyEvent(e, a)\n\t\tcase event.PruneType:\n\t\t\tid := e.PruneEvent.Identifier\n\t\t\tindex := indexForIdentifier(id, actions)\n\t\t\ta := actions[index]\n\t\t\tactions[index] = handlePruneEvent(e, a)\n\t\t\/\/ Prune and Delete are essentially the same thing, but the actuation\n\t\t\/\/ library return Prune events when resources are deleted by omission\n\t\t\/\/ during apply, and Delete events from the destroyer. Supporting both\n\t\t\/\/ here for completeness.\n\t\tcase event.DeleteType:\n\t\t\tid := e.DeleteEvent.Identifier\n\t\t\tindex := indexForIdentifier(id, actions)\n\t\t\ta := actions[index]\n\t\t\tactions[index] = handleDeleteEvent(e, a)\n\t\t}\n\t}\n\treturn actions, err\n}\n\nfunc handleApplyEvent(e event.Event, a planv1alpha1.Action) planv1alpha1.Action {\n\tif e.ApplyEvent.Error != nil {\n\t\ta.Type = planv1alpha1.Error\n\t\ta.Error = e.ApplyEvent.Error.Error()\n\t} else {\n\t\tswitch e.ApplyEvent.Operation {\n\t\tcase event.Unchanged:\n\t\t\ta.Type = planv1alpha1.Skip\n\t\tcase event.ServersideApplied:\n\t\t\ta.After = e.ApplyEvent.Resource\n\t\t\tif a.Before != nil {\n\t\t\t\t\/\/ TODO: Unclear if we should diff the full resources here. It doesn't work\n\t\t\t\t\/\/ well with client-side apply as the managedFields property shows up as\n\t\t\t\t\/\/ changes. It also means there is a race with controllers that might change\n\t\t\t\t\/\/ the status of resources.\n\t\t\t\tif reflect.DeepEqual(a.Before, a.After) {\n\t\t\t\t\ta.Type = planv1alpha1.Unchanged\n\t\t\t\t} else {\n\t\t\t\t\ta.Type = planv1alpha1.Update\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\ta.Type = planv1alpha1.Create\n\t\t\t}\n\t\t}\n\t}\n\treturn a\n}\n\nfunc handlePruneEvent(e event.Event, a planv1alpha1.Action) planv1alpha1.Action {\n\tif e.PruneEvent.Error != nil {\n\t\ta.Type = planv1alpha1.Error\n\t\ta.Error = e.PruneEvent.Error.Error()\n\t} else {\n\t\tswitch e.PruneEvent.Operation {\n\t\tcase event.Pruned:\n\t\t\ta.Type = planv1alpha1.Delete\n\t\t\/\/ Lifecycle directives can cause resources to remain in the\n\t\t\/\/ live state even if they would normally be pruned.\n\t\t\/\/ TODO: Handle reason for skipped resources that has recently\n\t\t\/\/ been added to the actuation library.\n\t\tcase event.PruneSkipped:\n\t\t\ta.Type = planv1alpha1.Skip\n\t\t}\n\t}\n\treturn a\n}\n\nfunc handleDeleteEvent(e event.Event, a planv1alpha1.Action) planv1alpha1.Action {\n\tif e.DeleteEvent.Error != nil {\n\t\ta.Type = planv1alpha1.Error\n\t\ta.Error = e.DeleteEvent.Error.Error()\n\t} else {\n\t\tswitch e.DeleteEvent.Operation {\n\t\tcase event.Deleted:\n\t\t\ta.Type = planv1alpha1.Delete\n\t\tcase event.DeleteSkipped:\n\t\t\ta.Type = planv1alpha1.Skip\n\t\t}\n\t}\n\treturn a\n}\n\nfunc (r *ClusterPlanner) fetchResources(ctx context.Context, e event.Event) ([]planv1alpha1.Action, error) {\n\tvar actions []planv1alpha1.Action\n\tfor _, ag := range e.InitEvent.ActionGroups {\n\t\t\/\/ We only care about the Apply, Prune and Delete actions.\n\t\tif !(ag.Action == event.ApplyAction || ag.Action == event.PruneAction || ag.Action == event.DeleteAction) {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, id := range ag.Identifiers {\n\t\t\tu, _, err := r.resourceFetcher.FetchResource(ctx, id)\n\t\t\t\/\/ If the type doesn't exist in the cluster, then the resource itself doesn't exist.\n\t\t\tif err != nil && !meta.IsNoMatchError(err) {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tactions = append(actions, planv1alpha1.Action{\n\t\t\t\tGroup: id.GroupKind.Group,\n\t\t\t\tKind: id.GroupKind.Kind,\n\t\t\t\tName: id.Name,\n\t\t\t\tNamespace: id.Namespace,\n\t\t\t\tBefore: u,\n\t\t\t})\n\t\t}\n\t}\n\treturn actions, nil\n}\n\ntype resourceFetcher struct {\n\tdynamicClient dynamic.Interface\n\tmapper meta.RESTMapper\n}\n\nfunc NewResourceFetcher(f util.Factory) (ResourceFetcher, error) {\n\tdc, err := f.DynamicClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmapper, err := f.ToRESTMapper()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resourceFetcher{\n\t\tdynamicClient: dc,\n\t\tmapper: mapper,\n\t}, nil\n}\n\nfunc (rf *resourceFetcher) FetchResource(ctx context.Context, id object.ObjMetadata) (*unstructured.Unstructured, bool, error) {\n\tmapping, err := rf.mapper.RESTMapping(id.GroupKind)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\tvar r dynamic.ResourceInterface\n\tif mapping.Scope == meta.RESTScopeRoot {\n\t\tr = rf.dynamicClient.Resource(mapping.Resource)\n\t} else {\n\t\tr = rf.dynamicClient.Resource(mapping.Resource).Namespace(id.Namespace)\n\t}\n\tu, err := r.Get(ctx, id.Name, metav1.GetOptions{})\n\tif err != nil && !apierrors.IsNotFound(err) {\n\t\treturn nil, false, err\n\t}\n\n\tif apierrors.IsNotFound(err) {\n\t\treturn nil, false, nil\n\t}\n\treturn u, true, nil\n}\n\nfunc indexForIdentifier(id object.ObjMetadata, actions []planv1alpha1.Action) int {\n\tfor i := range actions {\n\t\ta := actions[i]\n\t\tif a.Group == id.GroupKind.Group &&\n\t\t\ta.Kind == id.GroupKind.Kind &&\n\t\t\ta.Name == id.Name &&\n\t\t\ta.Namespace == id.Namespace {\n\t\t\treturn i\n\t\t}\n\t}\n\tpanic(fmt.Errorf(\"unknown identifier %s\", id.String()))\n}\n<commit_msg>chore: fix lint whitespace failure (#3171)<commit_after>\/\/ Copyright 2022 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage planner\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"reflect\"\n\n\tplanv1alpha1 \"github.com\/GoogleContainerTools\/kpt\/pkg\/api\/plan\/v1alpha1\"\n\t\"github.com\/GoogleContainerTools\/kpt\/pkg\/live\"\n\t\"github.com\/GoogleContainerTools\/kpt\/pkg\/status\"\n\t\"github.com\/GoogleContainerTools\/kpt\/thirdparty\/cli-utils\/apply\"\n\tapierrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/meta\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/client-go\/dynamic\"\n\t\"k8s.io\/kubectl\/pkg\/cmd\/util\"\n\t\"sigs.k8s.io\/cli-utils\/pkg\/apply\/event\"\n\t\"sigs.k8s.io\/cli-utils\/pkg\/common\"\n\t\"sigs.k8s.io\/cli-utils\/pkg\/inventory\"\n\t\"sigs.k8s.io\/cli-utils\/pkg\/object\"\n)\n\nfunc NewClusterPlanner(f util.Factory) (*ClusterPlanner, error) {\n\tfetcher, err := NewResourceFetcher(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinvClient, err := inventory.NewClient(f, live.WrapInventoryObj, live.InvToUnstructuredFunc, inventory.StatusPolicyNone)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstatusPoller, err := status.NewStatusPoller(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tapplier, err := apply.NewApplierBuilder().\n\t\tWithFactory(f).\n\t\tWithInventoryClient(invClient).\n\t\tWithStatusPoller(statusPoller).\n\t\tBuild()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &ClusterPlanner{\n\t\tapplier: applier,\n\t\tresourceFetcher: fetcher,\n\t}, nil\n}\n\ntype Applier interface {\n\tRun(ctx context.Context, invInfo inventory.Info, objects object.UnstructuredSet, options apply.ApplierOptions) <-chan event.Event\n}\n\ntype ResourceFetcher interface {\n\tFetchResource(ctx context.Context, id object.ObjMetadata) (*unstructured.Unstructured, bool, error)\n}\n\ntype ClusterPlanner struct {\n\tapplier Applier\n\tresourceFetcher ResourceFetcher\n}\n\ntype Options struct {\n\tServerSideOptions common.ServerSideOptions\n}\n\nfunc (r *ClusterPlanner) BuildPlan(ctx context.Context, inv inventory.Info, objects []*unstructured.Unstructured, o Options) (*planv1alpha1.Plan, error) {\n\tactions, err := r.dryRunForPlan(ctx, inv, objects, o)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &planv1alpha1.Plan{\n\t\tResourceMeta: planv1alpha1.ResourceMeta,\n\t\tSpec: planv1alpha1.PlanSpec{\n\t\t\tActions: actions,\n\t\t},\n\t}, nil\n}\n\nfunc (r *ClusterPlanner) dryRunForPlan(\n\tctx context.Context,\n\tinv inventory.Info,\n\tobjects []*unstructured.Unstructured,\n\to Options,\n) ([]planv1alpha1.Action, error) {\n\teventCh := r.applier.Run(ctx, inv, objects, apply.ApplierOptions{\n\t\tDryRunStrategy: common.DryRunServer,\n\t\tServerSideOptions: o.ServerSideOptions,\n\t})\n\n\tvar actions []planv1alpha1.Action\n\tvar err error\n\tfor e := range eventCh {\n\t\tif e.Type == event.InitType {\n\t\t\t\/\/ This event includes all resources that will be applied, pruned or deleted, so\n\t\t\t\/\/ we make sure we fetch all the resources from the cluster.\n\t\t\t\/\/ TODO: See if we can update the actuation library to provide the pre-actuation\n\t\t\t\/\/ versions of the resources as part of the regular run. This solution is not great\n\t\t\t\/\/ as fetching all resources will take time.\n\t\t\ta, err := r.fetchResources(ctx, e)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tactions = a\n\t\t}\n\t\tif e.Type == event.ErrorType {\n\t\t\t\/\/ Update the err variable here, but wait for the channel to close\n\t\t\t\/\/ before we return from the function.\n\t\t\t\/\/ Since ErrorEvents are considered fatal, there should only be sent\n\t\t\t\/\/ and it will be followed by the channel being closed.\n\t\t\terr = e.ErrorEvent.Err\n\t\t}\n\t\t\/\/ For the Apply, Prune and Delete event types, we just capture the result\n\t\t\/\/ of the dry-run operation for the specific resource.\n\t\tswitch e.Type {\n\t\tcase event.ApplyType:\n\t\t\tid := e.ApplyEvent.Identifier\n\t\t\tindex := indexForIdentifier(id, actions)\n\t\t\ta := actions[index]\n\t\t\tactions[index] = handleApplyEvent(e, a)\n\t\tcase event.PruneType:\n\t\t\tid := e.PruneEvent.Identifier\n\t\t\tindex := indexForIdentifier(id, actions)\n\t\t\ta := actions[index]\n\t\t\tactions[index] = handlePruneEvent(e, a)\n\t\t\/\/ Prune and Delete are essentially the same thing, but the actuation\n\t\t\/\/ library return Prune events when resources are deleted by omission\n\t\t\/\/ during apply, and Delete events from the destroyer. Supporting both\n\t\t\/\/ here for completeness.\n\t\tcase event.DeleteType:\n\t\t\tid := e.DeleteEvent.Identifier\n\t\t\tindex := indexForIdentifier(id, actions)\n\t\t\ta := actions[index]\n\t\t\tactions[index] = handleDeleteEvent(e, a)\n\t\t}\n\t}\n\treturn actions, err\n}\n\nfunc handleApplyEvent(e event.Event, a planv1alpha1.Action) planv1alpha1.Action {\n\tif e.ApplyEvent.Error != nil {\n\t\ta.Type = planv1alpha1.Error\n\t\ta.Error = e.ApplyEvent.Error.Error()\n\t} else {\n\t\tswitch e.ApplyEvent.Operation {\n\t\tcase event.Unchanged:\n\t\t\ta.Type = planv1alpha1.Skip\n\t\tcase event.ServersideApplied:\n\t\t\ta.After = e.ApplyEvent.Resource\n\t\t\tif a.Before != nil {\n\t\t\t\t\/\/ TODO: Unclear if we should diff the full resources here. It doesn't work\n\t\t\t\t\/\/ well with client-side apply as the managedFields property shows up as\n\t\t\t\t\/\/ changes. It also means there is a race with controllers that might change\n\t\t\t\t\/\/ the status of resources.\n\t\t\t\tif reflect.DeepEqual(a.Before, a.After) {\n\t\t\t\t\ta.Type = planv1alpha1.Unchanged\n\t\t\t\t} else {\n\t\t\t\t\ta.Type = planv1alpha1.Update\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\ta.Type = planv1alpha1.Create\n\t\t\t}\n\t\t}\n\t}\n\treturn a\n}\n\nfunc handlePruneEvent(e event.Event, a planv1alpha1.Action) planv1alpha1.Action {\n\tif e.PruneEvent.Error != nil {\n\t\ta.Type = planv1alpha1.Error\n\t\ta.Error = e.PruneEvent.Error.Error()\n\t} else {\n\t\tswitch e.PruneEvent.Operation {\n\t\tcase event.Pruned:\n\t\t\ta.Type = planv1alpha1.Delete\n\t\t\/\/ Lifecycle directives can cause resources to remain in the\n\t\t\/\/ live state even if they would normally be pruned.\n\t\t\/\/ TODO: Handle reason for skipped resources that has recently\n\t\t\/\/ been added to the actuation library.\n\t\tcase event.PruneSkipped:\n\t\t\ta.Type = planv1alpha1.Skip\n\t\t}\n\t}\n\treturn a\n}\n\nfunc handleDeleteEvent(e event.Event, a planv1alpha1.Action) planv1alpha1.Action {\n\tif e.DeleteEvent.Error != nil {\n\t\ta.Type = planv1alpha1.Error\n\t\ta.Error = e.DeleteEvent.Error.Error()\n\t} else {\n\t\tswitch e.DeleteEvent.Operation {\n\t\tcase event.Deleted:\n\t\t\ta.Type = planv1alpha1.Delete\n\t\tcase event.DeleteSkipped:\n\t\t\ta.Type = planv1alpha1.Skip\n\t\t}\n\t}\n\treturn a\n}\n\nfunc (r *ClusterPlanner) fetchResources(ctx context.Context, e event.Event) ([]planv1alpha1.Action, error) {\n\tvar actions []planv1alpha1.Action\n\tfor _, ag := range e.InitEvent.ActionGroups {\n\t\t\/\/ We only care about the Apply, Prune and Delete actions.\n\t\tif !(ag.Action == event.ApplyAction || ag.Action == event.PruneAction || ag.Action == event.DeleteAction) {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, id := range ag.Identifiers {\n\t\t\tu, _, err := r.resourceFetcher.FetchResource(ctx, id)\n\t\t\t\/\/ If the type doesn't exist in the cluster, then the resource itself doesn't exist.\n\t\t\tif err != nil && !meta.IsNoMatchError(err) {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tactions = append(actions, planv1alpha1.Action{\n\t\t\t\tGroup: id.GroupKind.Group,\n\t\t\t\tKind: id.GroupKind.Kind,\n\t\t\t\tName: id.Name,\n\t\t\t\tNamespace: id.Namespace,\n\t\t\t\tBefore: u,\n\t\t\t})\n\t\t}\n\t}\n\treturn actions, nil\n}\n\ntype resourceFetcher struct {\n\tdynamicClient dynamic.Interface\n\tmapper meta.RESTMapper\n}\n\nfunc NewResourceFetcher(f util.Factory) (ResourceFetcher, error) {\n\tdc, err := f.DynamicClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmapper, err := f.ToRESTMapper()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resourceFetcher{\n\t\tdynamicClient: dc,\n\t\tmapper: mapper,\n\t}, nil\n}\n\nfunc (rf *resourceFetcher) FetchResource(ctx context.Context, id object.ObjMetadata) (*unstructured.Unstructured, bool, error) {\n\tmapping, err := rf.mapper.RESTMapping(id.GroupKind)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\tvar r dynamic.ResourceInterface\n\tif mapping.Scope == meta.RESTScopeRoot {\n\t\tr = rf.dynamicClient.Resource(mapping.Resource)\n\t} else {\n\t\tr = rf.dynamicClient.Resource(mapping.Resource).Namespace(id.Namespace)\n\t}\n\tu, err := r.Get(ctx, id.Name, metav1.GetOptions{})\n\tif err != nil && !apierrors.IsNotFound(err) {\n\t\treturn nil, false, err\n\t}\n\n\tif apierrors.IsNotFound(err) {\n\t\treturn nil, false, nil\n\t}\n\treturn u, true, nil\n}\n\nfunc indexForIdentifier(id object.ObjMetadata, actions []planv1alpha1.Action) int {\n\tfor i := range actions {\n\t\ta := actions[i]\n\t\tif a.Group == id.GroupKind.Group &&\n\t\t\ta.Kind == id.GroupKind.Kind &&\n\t\t\ta.Name == id.Name &&\n\t\t\ta.Namespace == id.Namespace {\n\t\t\treturn i\n\t\t}\n\t}\n\tpanic(fmt.Errorf(\"unknown identifier %s\", id.String()))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 zebra project\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/coreswitch\/component\"\n\t\"github.com\/coreswitch\/zebra\/pkg\/packet\/bgp\"\n\t\"github.com\/coreswitch\/zebra\/pkg\/server\/module\"\n)\n\nfunc main() {\n\tfmt.Println(\"Starting bgpd\")\n\tmodule.Init()\n\n\tserver := bgp.NewServer(65100)\n\n\tserverComponent := &bgp.ServerComponent{\n\t\tServer: server,\n\t}\n\tgrpcComponent := &bgp.GrpcComponent{\n\t\tServer: server,\n\t}\n\n\tsystemMap := component.SystemMap{\n\t\t\"server\": serverComponent,\n\t\t\"grpc\": component.ComponentWith(grpcComponent, \"server\"),\n\t}\n\tsystemMap.Start()\n\n\terr := server.RouterIdSet(\"192.168.55.1\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\terr = server.NeighborAdd(\"192.168.55.2\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\terr = server.NeighborRemoteAsSet(\"192.168.55.2\", 65520)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\terr = server.NeighborAfiSafiSet(\"192.168.55.2\", bgp.AFI_IP, bgp.SAFI_UNICAST)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\terr = server.NeighborAfiSafiSet(\"192.168.55.2\", bgp.AFI_IP6, bgp.SAFI_UNICAST)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tch := make(chan struct{})\n\t<-ch\n}\n<commit_msg>Remove old bgpd's main.<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gofer\n\nimport (\n\t\"fmt\"\n\t\"syscall\"\n\n\t\"gvisor.googlesource.com\/gvisor\/pkg\/p9\"\n\t\"gvisor.googlesource.com\/gvisor\/pkg\/sentry\/context\"\n\t\"gvisor.googlesource.com\/gvisor\/pkg\/sentry\/device\"\n\t\"gvisor.googlesource.com\/gvisor\/pkg\/sentry\/fs\"\n\t\"gvisor.googlesource.com\/gvisor\/pkg\/syserror\"\n\t\"gvisor.googlesource.com\/gvisor\/pkg\/tcpip\/transport\/unix\"\n)\n\n\/\/ Lookup loads an Inode at name into a Dirent based on the session's cache\n\/\/ policy.\nfunc (i *inodeOperations) Lookup(ctx context.Context, dir *fs.Inode, name string) (*fs.Dirent, error) {\n\tif i.session().cachePolicy != cacheNone {\n\t\t\/\/ Check to see if we have readdirCache that indicates the\n\t\t\/\/ child does not exist. Avoid holding readdirMu longer than\n\t\t\/\/ we need to.\n\t\ti.readdirMu.Lock()\n\t\tif i.readdirCache != nil && !i.readdirCache.Contains(name) {\n\t\t\t\/\/ No such child. Return a negative dirent.\n\t\t\ti.readdirMu.Unlock()\n\t\t\treturn fs.NewNegativeDirent(name), nil\n\t\t}\n\t\ti.readdirMu.Unlock()\n\t}\n\n\t\/\/ Get a p9.File for name.\n\tqids, newFile, mask, p9attr, err := i.fileState.file.walkGetAttr(ctx, []string{name})\n\tif err != nil {\n\t\tif err == syscall.ENOENT {\n\t\t\tif i.session().cachePolicy != cacheNone {\n\t\t\t\t\/\/ Return a negative Dirent. It will stay cached until something\n\t\t\t\t\/\/ is created over it.\n\t\t\t\treturn fs.NewNegativeDirent(name), nil\n\t\t\t}\n\t\t\treturn nil, syserror.ENOENT\n\t\t}\n\t\treturn nil, err\n\t}\n\n\t\/\/ Construct the Inode operations.\n\tsattr, node := newInodeOperations(ctx, i.fileState.s, newFile, qids[0], mask, p9attr)\n\n\t\/\/ Construct a positive Dirent.\n\treturn fs.NewDirent(fs.NewInode(node, dir.MountSource, sattr), name), nil\n}\n\n\/\/ Creates a new Inode at name and returns its File based on the session's cache policy.\n\/\/\n\/\/ Ownership is currently ignored.\nfunc (i *inodeOperations) Create(ctx context.Context, dir *fs.Inode, name string, flags fs.FileFlags, perm fs.FilePermissions) (*fs.File, error) {\n\t\/\/ Create replaces the directory fid with the newly created\/opened\n\t\/\/ file, so clone this directory so it doesn't change out from under\n\t\/\/ this node.\n\t_, newFile, err := i.fileState.file.walk(ctx, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Map the FileFlags to p9 OpenFlags.\n\tvar openFlags p9.OpenFlags\n\tswitch {\n\tcase flags.Read && flags.Write:\n\t\topenFlags = p9.ReadWrite\n\tcase flags.Read:\n\t\topenFlags = p9.ReadOnly\n\tcase flags.Write:\n\t\topenFlags = p9.WriteOnly\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"Create called with unknown or unset open flags: %v\", flags))\n\t}\n\n\towner := fs.FileOwnerFromContext(ctx)\n\thostFile, err := newFile.create(ctx, name, openFlags, p9.FileMode(perm.LinuxMode()), p9.UID(owner.UID), p9.GID(owner.GID))\n\tif err != nil {\n\t\t\/\/ Could not create the file.\n\t\treturn nil, err\n\t}\n\n\ti.touchModificationTime(ctx)\n\n\t\/\/ Get the attributes of the file.\n\tqid, mask, p9attr, err := getattr(ctx, newFile)\n\tif err != nil {\n\t\tnewFile.close(ctx)\n\t\treturn nil, err\n\t}\n\n\t\/\/ Get an unopened p9.File for the file we created so that it can be\n\t\/\/ cloned and re-opened multiple times after creation.\n\t_, unopened, err := i.fileState.file.walk(ctx, []string{name})\n\tif err != nil {\n\t\tnewFile.close(ctx)\n\t\treturn nil, err\n\t}\n\n\t\/\/ Construct the InodeOperations.\n\tsattr, iops := newInodeOperations(ctx, i.fileState.s, unopened, qid, mask, p9attr)\n\n\t\/\/ Construct the positive Dirent.\n\td := fs.NewDirent(fs.NewInode(iops, dir.MountSource, sattr), name)\n\tdefer d.DecRef()\n\n\t\/\/ Construct the new file, caching the handles if allowed.\n\th := &handles{\n\t\tFile: newFile,\n\t\tHost: hostFile,\n\t}\n\tif isFileCachable(iops.session(), d.Inode) {\n\t\tiops.fileState.setHandlesForCachedIO(flags, h)\n\t}\n\treturn NewFile(ctx, d, flags, iops, h), nil\n}\n\n\/\/ CreateLink uses Create to create a symlink between oldname and newname.\nfunc (i *inodeOperations) CreateLink(ctx context.Context, dir *fs.Inode, oldname string, newname string) error {\n\towner := fs.FileOwnerFromContext(ctx)\n\tif _, err := i.fileState.file.symlink(ctx, oldname, newname, p9.UID(owner.UID), p9.GID(owner.GID)); err != nil {\n\t\treturn err\n\t}\n\ti.touchModificationTime(ctx)\n\treturn nil\n}\n\n\/\/ CreateHardLink implements InodeOperations.CreateHardLink.\nfunc (i *inodeOperations) CreateHardLink(ctx context.Context, _ *fs.Inode, target *fs.Inode, newName string) error {\n\ttargetOpts, ok := target.InodeOperations.(*inodeOperations)\n\tif !ok {\n\t\treturn syscall.EXDEV\n\t}\n\n\tif err := i.fileState.file.link(ctx, &targetOpts.fileState.file, newName); err != nil {\n\t\treturn err\n\t}\n\t\/\/ TODO: Don't increase link count because we can't properly accounts for links\n\t\/\/ with gofers.\n\ti.touchModificationTime(ctx)\n\treturn nil\n}\n\n\/\/ CreateDirectory uses Create to create a directory named s under inodeOperations.\nfunc (i *inodeOperations) CreateDirectory(ctx context.Context, dir *fs.Inode, s string, perm fs.FilePermissions) error {\n\towner := fs.FileOwnerFromContext(ctx)\n\tif _, err := i.fileState.file.mkdir(ctx, s, p9.FileMode(perm.LinuxMode()), p9.UID(owner.UID), p9.GID(owner.GID)); err != nil {\n\t\treturn err\n\t}\n\tif i.session().cachePolicy == cacheAll {\n\t\t\/\/ Increase link count.\n\t\ti.cachingInodeOps.IncLinks(ctx)\n\n\t\t\/\/ Invalidate readdir cache.\n\t\ti.markDirectoryDirty()\n\t}\n\treturn nil\n}\n\n\/\/ Bind implements InodeOperations.\nfunc (i *inodeOperations) Bind(ctx context.Context, dir *fs.Inode, name string, ep unix.BoundEndpoint, perm fs.FilePermissions) error {\n\tif i.session().endpoints == nil {\n\t\treturn syscall.EOPNOTSUPP\n\t}\n\n\t\/\/ Create replaces the directory fid with the newly created\/opened\n\t\/\/ file, so clone this directory so it doesn't change out from under\n\t\/\/ this node.\n\t_, newFile, err := i.fileState.file.walk(ctx, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Stabilize the endpoint map while creation is in progress.\n\tunlock := i.session().endpoints.lock()\n\tdefer unlock()\n\n\t\/\/ Create a regular file in the gofer and then mark it as a socket by\n\t\/\/ adding this inode key in the 'endpoints' map.\n\towner := fs.FileOwnerFromContext(ctx)\n\thostFile, err := newFile.create(ctx, name, p9.ReadWrite, p9.FileMode(perm.LinuxMode()), p9.UID(owner.UID), p9.GID(owner.GID))\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ We're not going to use this file.\n\thostFile.Close()\n\n\ti.touchModificationTime(ctx)\n\n\t\/\/ Get the attributes of the file to create inode key.\n\tqid, _, attr, err := getattr(ctx, newFile)\n\tif err != nil {\n\t\tnewFile.close(ctx)\n\t\treturn err\n\t}\n\n\tkey := device.MultiDeviceKey{\n\t\tDevice: attr.RDev,\n\t\tSecondaryDevice: i.session().connID,\n\t\tInode: qid.Path,\n\t}\n\ti.session().endpoints.add(key, ep)\n\n\treturn nil\n}\n\n\/\/ CreateFifo implements fs.InodeOperations.CreateFifo. Gofer nodes do not support the\n\/\/ creation of fifos and always returns EOPNOTSUPP.\nfunc (*inodeOperations) CreateFifo(context.Context, *fs.Inode, string, fs.FilePermissions) error {\n\treturn syscall.EOPNOTSUPP\n}\n\n\/\/ Remove implements InodeOperations.Remove.\nfunc (i *inodeOperations) Remove(ctx context.Context, dir *fs.Inode, name string) error {\n\tvar key device.MultiDeviceKey\n\tremoveSocket := false\n\tif i.session().endpoints != nil {\n\t\t\/\/ Find out if file being deleted is a socket that needs to be\n\t\t\/\/ removed from endpoint map.\n\t\tif d, err := i.Lookup(ctx, dir, name); err == nil {\n\t\t\tdefer d.DecRef()\n\t\t\tif fs.IsSocket(d.Inode.StableAttr) {\n\t\t\t\tchild := d.Inode.InodeOperations.(*inodeOperations)\n\t\t\t\tkey = child.fileState.key\n\t\t\t\tremoveSocket = true\n\n\t\t\t\t\/\/ Stabilize the endpoint map while deletion is in progress.\n\t\t\t\tunlock := i.session().endpoints.lock()\n\t\t\t\tdefer unlock()\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := i.fileState.file.unlinkAt(ctx, name, 0); err != nil {\n\t\treturn err\n\t}\n\tif removeSocket {\n\t\ti.session().endpoints.remove(key)\n\t}\n\ti.touchModificationTime(ctx)\n\n\treturn nil\n}\n\n\/\/ Remove implements InodeOperations.RemoveDirectory.\nfunc (i *inodeOperations) RemoveDirectory(ctx context.Context, dir *fs.Inode, name string) error {\n\t\/\/ 0x200 = AT_REMOVEDIR.\n\tif err := i.fileState.file.unlinkAt(ctx, name, 0x200); err != nil {\n\t\treturn err\n\t}\n\tif i.session().cachePolicy == cacheAll {\n\t\t\/\/ Decrease link count and updates atime.\n\t\ti.cachingInodeOps.DecLinks(ctx)\n\n\t\t\/\/ Invalidate readdir cache.\n\t\ti.markDirectoryDirty()\n\t}\n\treturn nil\n}\n\n\/\/ Rename renames this node.\nfunc (i *inodeOperations) Rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string) error {\n\t\/\/ Unwrap the new parent to a *inodeOperations.\n\tnewParentInodeOperations, ok := newParent.InodeOperations.(*inodeOperations)\n\tif !ok {\n\t\treturn syscall.EXDEV\n\t}\n\n\t\/\/ Unwrap the old parent to a *inodeOperations.\n\toldParentInodeOperations, ok := oldParent.InodeOperations.(*inodeOperations)\n\tif !ok {\n\t\treturn syscall.EXDEV\n\t}\n\n\t\/\/ Do the rename.\n\tif err := i.fileState.file.rename(ctx, newParentInodeOperations.fileState.file, newName); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Update cached state.\n\tif i.session().cachePolicy == cacheAll {\n\t\t\/\/ Is the renamed entity a directory? Fix link counts.\n\t\tif fs.IsDir(i.fileState.sattr) {\n\t\t\toldParentInodeOperations.cachingInodeOps.DecLinks(ctx)\n\t\t\tnewParentInodeOperations.cachingInodeOps.IncLinks(ctx)\n\t\t}\n\n\t\t\/\/ Mark old directory dirty.\n\t\toldParentInodeOperations.markDirectoryDirty()\n\t\tif oldParent != newParent {\n\t\t\t\/\/ Mark new directory dirty.\n\t\t\tnewParentInodeOperations.markDirectoryDirty()\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (i *inodeOperations) touchModificationTime(ctx context.Context) {\n\tif i.session().cachePolicy == cacheAll {\n\t\ti.cachingInodeOps.TouchModificationTime(ctx)\n\n\t\t\/\/ Invalidate readdir cache.\n\t\ti.markDirectoryDirty()\n\t}\n}\n\n\/\/ markDirectoryDirty marks any cached data dirty for this directory. This is necessary in order\n\/\/ to ensure that this node does not retain stale state throughout its lifetime across multiple\n\/\/ open directory handles.\n\/\/\n\/\/ Currently this means invalidating any readdir caches.\nfunc (i *inodeOperations) markDirectoryDirty() {\n\ti.readdirMu.Lock()\n\tdefer i.readdirMu.Unlock()\n\ti.readdirCache = nil\n}\n<commit_msg>Increment link count in CreateHardlink<commit_after>\/\/ Copyright 2018 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gofer\n\nimport (\n\t\"fmt\"\n\t\"syscall\"\n\n\t\"gvisor.googlesource.com\/gvisor\/pkg\/p9\"\n\t\"gvisor.googlesource.com\/gvisor\/pkg\/sentry\/context\"\n\t\"gvisor.googlesource.com\/gvisor\/pkg\/sentry\/device\"\n\t\"gvisor.googlesource.com\/gvisor\/pkg\/sentry\/fs\"\n\t\"gvisor.googlesource.com\/gvisor\/pkg\/syserror\"\n\t\"gvisor.googlesource.com\/gvisor\/pkg\/tcpip\/transport\/unix\"\n)\n\n\/\/ Lookup loads an Inode at name into a Dirent based on the session's cache\n\/\/ policy.\nfunc (i *inodeOperations) Lookup(ctx context.Context, dir *fs.Inode, name string) (*fs.Dirent, error) {\n\tif i.session().cachePolicy != cacheNone {\n\t\t\/\/ Check to see if we have readdirCache that indicates the\n\t\t\/\/ child does not exist. Avoid holding readdirMu longer than\n\t\t\/\/ we need to.\n\t\ti.readdirMu.Lock()\n\t\tif i.readdirCache != nil && !i.readdirCache.Contains(name) {\n\t\t\t\/\/ No such child. Return a negative dirent.\n\t\t\ti.readdirMu.Unlock()\n\t\t\treturn fs.NewNegativeDirent(name), nil\n\t\t}\n\t\ti.readdirMu.Unlock()\n\t}\n\n\t\/\/ Get a p9.File for name.\n\tqids, newFile, mask, p9attr, err := i.fileState.file.walkGetAttr(ctx, []string{name})\n\tif err != nil {\n\t\tif err == syscall.ENOENT {\n\t\t\tif i.session().cachePolicy != cacheNone {\n\t\t\t\t\/\/ Return a negative Dirent. It will stay cached until something\n\t\t\t\t\/\/ is created over it.\n\t\t\t\treturn fs.NewNegativeDirent(name), nil\n\t\t\t}\n\t\t\treturn nil, syserror.ENOENT\n\t\t}\n\t\treturn nil, err\n\t}\n\n\t\/\/ Construct the Inode operations.\n\tsattr, node := newInodeOperations(ctx, i.fileState.s, newFile, qids[0], mask, p9attr)\n\n\t\/\/ Construct a positive Dirent.\n\treturn fs.NewDirent(fs.NewInode(node, dir.MountSource, sattr), name), nil\n}\n\n\/\/ Creates a new Inode at name and returns its File based on the session's cache policy.\n\/\/\n\/\/ Ownership is currently ignored.\nfunc (i *inodeOperations) Create(ctx context.Context, dir *fs.Inode, name string, flags fs.FileFlags, perm fs.FilePermissions) (*fs.File, error) {\n\t\/\/ Create replaces the directory fid with the newly created\/opened\n\t\/\/ file, so clone this directory so it doesn't change out from under\n\t\/\/ this node.\n\t_, newFile, err := i.fileState.file.walk(ctx, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Map the FileFlags to p9 OpenFlags.\n\tvar openFlags p9.OpenFlags\n\tswitch {\n\tcase flags.Read && flags.Write:\n\t\topenFlags = p9.ReadWrite\n\tcase flags.Read:\n\t\topenFlags = p9.ReadOnly\n\tcase flags.Write:\n\t\topenFlags = p9.WriteOnly\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"Create called with unknown or unset open flags: %v\", flags))\n\t}\n\n\towner := fs.FileOwnerFromContext(ctx)\n\thostFile, err := newFile.create(ctx, name, openFlags, p9.FileMode(perm.LinuxMode()), p9.UID(owner.UID), p9.GID(owner.GID))\n\tif err != nil {\n\t\t\/\/ Could not create the file.\n\t\treturn nil, err\n\t}\n\n\ti.touchModificationTime(ctx)\n\n\t\/\/ Get the attributes of the file.\n\tqid, mask, p9attr, err := getattr(ctx, newFile)\n\tif err != nil {\n\t\tnewFile.close(ctx)\n\t\treturn nil, err\n\t}\n\n\t\/\/ Get an unopened p9.File for the file we created so that it can be\n\t\/\/ cloned and re-opened multiple times after creation.\n\t_, unopened, err := i.fileState.file.walk(ctx, []string{name})\n\tif err != nil {\n\t\tnewFile.close(ctx)\n\t\treturn nil, err\n\t}\n\n\t\/\/ Construct the InodeOperations.\n\tsattr, iops := newInodeOperations(ctx, i.fileState.s, unopened, qid, mask, p9attr)\n\n\t\/\/ Construct the positive Dirent.\n\td := fs.NewDirent(fs.NewInode(iops, dir.MountSource, sattr), name)\n\tdefer d.DecRef()\n\n\t\/\/ Construct the new file, caching the handles if allowed.\n\th := &handles{\n\t\tFile: newFile,\n\t\tHost: hostFile,\n\t}\n\tif isFileCachable(iops.session(), d.Inode) {\n\t\tiops.fileState.setHandlesForCachedIO(flags, h)\n\t}\n\treturn NewFile(ctx, d, flags, iops, h), nil\n}\n\n\/\/ CreateLink uses Create to create a symlink between oldname and newname.\nfunc (i *inodeOperations) CreateLink(ctx context.Context, dir *fs.Inode, oldname string, newname string) error {\n\towner := fs.FileOwnerFromContext(ctx)\n\tif _, err := i.fileState.file.symlink(ctx, oldname, newname, p9.UID(owner.UID), p9.GID(owner.GID)); err != nil {\n\t\treturn err\n\t}\n\ti.touchModificationTime(ctx)\n\treturn nil\n}\n\n\/\/ CreateHardLink implements InodeOperations.CreateHardLink.\nfunc (i *inodeOperations) CreateHardLink(ctx context.Context, _ *fs.Inode, target *fs.Inode, newName string) error {\n\ttargetOpts, ok := target.InodeOperations.(*inodeOperations)\n\tif !ok {\n\t\treturn syscall.EXDEV\n\t}\n\n\tif err := i.fileState.file.link(ctx, &targetOpts.fileState.file, newName); err != nil {\n\t\treturn err\n\t}\n\tif i.session().cachePolicy == cacheAll {\n\t\t\/\/ Increase link count.\n\t\ttargetOpts.cachingInodeOps.IncLinks(ctx)\n\t}\n\ti.touchModificationTime(ctx)\n\treturn nil\n}\n\n\/\/ CreateDirectory uses Create to create a directory named s under inodeOperations.\nfunc (i *inodeOperations) CreateDirectory(ctx context.Context, dir *fs.Inode, s string, perm fs.FilePermissions) error {\n\towner := fs.FileOwnerFromContext(ctx)\n\tif _, err := i.fileState.file.mkdir(ctx, s, p9.FileMode(perm.LinuxMode()), p9.UID(owner.UID), p9.GID(owner.GID)); err != nil {\n\t\treturn err\n\t}\n\tif i.session().cachePolicy == cacheAll {\n\t\t\/\/ Increase link count.\n\t\ti.cachingInodeOps.IncLinks(ctx)\n\n\t\t\/\/ Invalidate readdir cache.\n\t\ti.markDirectoryDirty()\n\t}\n\treturn nil\n}\n\n\/\/ Bind implements InodeOperations.\nfunc (i *inodeOperations) Bind(ctx context.Context, dir *fs.Inode, name string, ep unix.BoundEndpoint, perm fs.FilePermissions) error {\n\tif i.session().endpoints == nil {\n\t\treturn syscall.EOPNOTSUPP\n\t}\n\n\t\/\/ Create replaces the directory fid with the newly created\/opened\n\t\/\/ file, so clone this directory so it doesn't change out from under\n\t\/\/ this node.\n\t_, newFile, err := i.fileState.file.walk(ctx, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Stabilize the endpoint map while creation is in progress.\n\tunlock := i.session().endpoints.lock()\n\tdefer unlock()\n\n\t\/\/ Create a regular file in the gofer and then mark it as a socket by\n\t\/\/ adding this inode key in the 'endpoints' map.\n\towner := fs.FileOwnerFromContext(ctx)\n\thostFile, err := newFile.create(ctx, name, p9.ReadWrite, p9.FileMode(perm.LinuxMode()), p9.UID(owner.UID), p9.GID(owner.GID))\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ We're not going to use this file.\n\thostFile.Close()\n\n\ti.touchModificationTime(ctx)\n\n\t\/\/ Get the attributes of the file to create inode key.\n\tqid, _, attr, err := getattr(ctx, newFile)\n\tif err != nil {\n\t\tnewFile.close(ctx)\n\t\treturn err\n\t}\n\n\tkey := device.MultiDeviceKey{\n\t\tDevice: attr.RDev,\n\t\tSecondaryDevice: i.session().connID,\n\t\tInode: qid.Path,\n\t}\n\ti.session().endpoints.add(key, ep)\n\n\treturn nil\n}\n\n\/\/ CreateFifo implements fs.InodeOperations.CreateFifo. Gofer nodes do not support the\n\/\/ creation of fifos and always returns EOPNOTSUPP.\nfunc (*inodeOperations) CreateFifo(context.Context, *fs.Inode, string, fs.FilePermissions) error {\n\treturn syscall.EOPNOTSUPP\n}\n\n\/\/ Remove implements InodeOperations.Remove.\nfunc (i *inodeOperations) Remove(ctx context.Context, dir *fs.Inode, name string) error {\n\tvar key device.MultiDeviceKey\n\tremoveSocket := false\n\tif i.session().endpoints != nil {\n\t\t\/\/ Find out if file being deleted is a socket that needs to be\n\t\t\/\/ removed from endpoint map.\n\t\tif d, err := i.Lookup(ctx, dir, name); err == nil {\n\t\t\tdefer d.DecRef()\n\t\t\tif fs.IsSocket(d.Inode.StableAttr) {\n\t\t\t\tchild := d.Inode.InodeOperations.(*inodeOperations)\n\t\t\t\tkey = child.fileState.key\n\t\t\t\tremoveSocket = true\n\n\t\t\t\t\/\/ Stabilize the endpoint map while deletion is in progress.\n\t\t\t\tunlock := i.session().endpoints.lock()\n\t\t\t\tdefer unlock()\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := i.fileState.file.unlinkAt(ctx, name, 0); err != nil {\n\t\treturn err\n\t}\n\tif removeSocket {\n\t\ti.session().endpoints.remove(key)\n\t}\n\ti.touchModificationTime(ctx)\n\n\treturn nil\n}\n\n\/\/ Remove implements InodeOperations.RemoveDirectory.\nfunc (i *inodeOperations) RemoveDirectory(ctx context.Context, dir *fs.Inode, name string) error {\n\t\/\/ 0x200 = AT_REMOVEDIR.\n\tif err := i.fileState.file.unlinkAt(ctx, name, 0x200); err != nil {\n\t\treturn err\n\t}\n\tif i.session().cachePolicy == cacheAll {\n\t\t\/\/ Decrease link count and updates atime.\n\t\ti.cachingInodeOps.DecLinks(ctx)\n\n\t\t\/\/ Invalidate readdir cache.\n\t\ti.markDirectoryDirty()\n\t}\n\treturn nil\n}\n\n\/\/ Rename renames this node.\nfunc (i *inodeOperations) Rename(ctx context.Context, oldParent *fs.Inode, oldName string, newParent *fs.Inode, newName string) error {\n\t\/\/ Unwrap the new parent to a *inodeOperations.\n\tnewParentInodeOperations, ok := newParent.InodeOperations.(*inodeOperations)\n\tif !ok {\n\t\treturn syscall.EXDEV\n\t}\n\n\t\/\/ Unwrap the old parent to a *inodeOperations.\n\toldParentInodeOperations, ok := oldParent.InodeOperations.(*inodeOperations)\n\tif !ok {\n\t\treturn syscall.EXDEV\n\t}\n\n\t\/\/ Do the rename.\n\tif err := i.fileState.file.rename(ctx, newParentInodeOperations.fileState.file, newName); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Update cached state.\n\tif i.session().cachePolicy == cacheAll {\n\t\t\/\/ Is the renamed entity a directory? Fix link counts.\n\t\tif fs.IsDir(i.fileState.sattr) {\n\t\t\toldParentInodeOperations.cachingInodeOps.DecLinks(ctx)\n\t\t\tnewParentInodeOperations.cachingInodeOps.IncLinks(ctx)\n\t\t}\n\n\t\t\/\/ Mark old directory dirty.\n\t\toldParentInodeOperations.markDirectoryDirty()\n\t\tif oldParent != newParent {\n\t\t\t\/\/ Mark new directory dirty.\n\t\t\tnewParentInodeOperations.markDirectoryDirty()\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (i *inodeOperations) touchModificationTime(ctx context.Context) {\n\tif i.session().cachePolicy == cacheAll {\n\t\ti.cachingInodeOps.TouchModificationTime(ctx)\n\n\t\t\/\/ Invalidate readdir cache.\n\t\ti.markDirectoryDirty()\n\t}\n}\n\n\/\/ markDirectoryDirty marks any cached data dirty for this directory. This is necessary in order\n\/\/ to ensure that this node does not retain stale state throughout its lifetime across multiple\n\/\/ open directory handles.\n\/\/\n\/\/ Currently this means invalidating any readdir caches.\nfunc (i *inodeOperations) markDirectoryDirty() {\n\ti.readdirMu.Lock()\n\tdefer i.readdirMu.Unlock()\n\ti.readdirCache = nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Skaffold Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage event\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/config\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/event\/proto\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/schema\/latest\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/version\"\n\t\"github.com\/golang\/protobuf\/ptypes\"\n\t\"github.com\/pkg\/errors\"\n\t\"google.golang.org\/grpc\"\n)\n\nconst (\n\tNotStarted = \"Not Started\"\n\tInProgress = \"In Progress\"\n\tComplete = \"Complete\"\n\tFailed = \"Failed\"\n)\n\nvar (\n\tev *eventHandler\n\tonce sync.Once\n\tpluginMode bool\n\n\tcli proto.SkaffoldServiceClient \/\/ for plugin RPC connections\n)\n\ntype eventHandler struct {\n\teventLog []proto.LogEntry\n\tlogLock sync.Mutex\n\n\tstate *proto.State\n\tstateLock sync.Mutex\n\n\tlisteners []chan proto.LogEntry\n}\n\nfunc (ev *eventHandler) RegisterListener(listener chan proto.LogEntry) {\n\tev.listeners = append(ev.listeners, listener)\n}\n\nfunc (ev *eventHandler) logEvent(entry proto.LogEntry) {\n\tev.logLock.Lock()\n\n\tfor _, c := range ev.listeners {\n\t\tc <- entry\n\t}\n\tev.eventLog = append(ev.eventLog, entry)\n\n\tev.logLock.Unlock()\n}\n\nfunc (ev *eventHandler) forEachEvent(callback func(*proto.LogEntry) error) error {\n\tc := make(chan proto.LogEntry)\n\n\tev.logLock.Lock()\n\n\tfor _, entry := range ev.eventLog {\n\t\tif err := callback(&entry); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tev.RegisterListener(c)\n\n\tev.logLock.Unlock()\n\n\tfor {\n\t\tentry := <-c\n\t\tif err := callback(&entry); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\n\/\/ InitializeState instantiates the global state of the skaffold runner, as well as the event log.\n\/\/ It returns a shutdown callback for tearing down the grpc server, which the runner is responsible for calling.\n\/\/ This function can only be called once.\nfunc InitializeState(build *latest.BuildConfig, deploy *latest.DeployConfig, opts *config.SkaffoldOptions) (func() error, error) {\n\tvar err error\n\tserverShutdown := func() error { return nil }\n\tonce.Do(func() {\n\t\tbuilds := map[string]string{}\n\t\tdeploys := map[string]string{}\n\t\tif build != nil {\n\t\t\tfor _, a := range build.Artifacts {\n\t\t\t\tbuilds[a.ImageName] = NotStarted\n\t\t\t\tdeploys[a.ImageName] = NotStarted\n\t\t\t}\n\t\t}\n\n\t\tev = &eventHandler{\n\t\t\tstate: &proto.State{\n\t\t\t\tBuildState: &proto.BuildState{\n\t\t\t\t\tArtifacts: builds,\n\t\t\t\t},\n\t\t\t\tDeployState: &proto.DeployState{\n\t\t\t\t\tStatus: NotStarted,\n\t\t\t\t},\n\t\t\t\tForwardedPorts: make(map[string]*proto.PortEvent),\n\t\t\t},\n\t\t}\n\n\t\tif opts.EnableRPC {\n\t\t\tserverShutdown, err = newStatusServer(opts.RPCPort)\n\t\t\tif err != nil {\n\t\t\t\terr = errors.Wrap(err, \"creating status server\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n\treturn serverShutdown, err\n}\n\nfunc SetupRPCClient(opts *config.SkaffoldOptions) error {\n\tpluginMode = true\n\tconn, err := grpc.Dial(fmt.Sprintf(\":%d\", opts.RPCPort), grpc.WithInsecure())\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"opening gRPC connection to remote skaffold process\")\n\t}\n\tcli = proto.NewSkaffoldServiceClient(conn)\n\treturn nil\n}\n\nfunc Handle(event *proto.Event) {\n\tif pluginMode {\n\t\tgo cli.Handle(context.Background(), event)\n\t} else {\n\t\tgo handle(event)\n\t}\n}\n\nfunc handle(event *proto.Event) {\n\tlogEntry := &proto.LogEntry{\n\t\tTimestamp: ptypes.TimestampNow(),\n\t\tEvent: event,\n\t}\n\n\tswitch e := event.GetEventType().(type) {\n\tcase *proto.Event_BuildEvent:\n\t\tbe := e.BuildEvent\n\t\tev.stateLock.Lock()\n\t\tev.state.BuildState.Artifacts[be.Artifact] = be.Status\n\t\tev.stateLock.Unlock()\n\t\tswitch be.Status {\n\t\tcase InProgress:\n\t\t\tlogEntry.Entry = fmt.Sprintf(\"Build started for artifact %s\", be.Artifact)\n\t\tcase Complete:\n\t\t\tlogEntry.Entry = fmt.Sprintf(\"Build completed for artifact %s\", be.Artifact)\n\t\tcase Failed:\n\t\t\tlogEntry.Entry = fmt.Sprintf(\"Build failed for artifact %s\", be.Artifact)\n\t\t\t\/\/ logEntry.Err = be.Err\n\t\tdefault:\n\t\t}\n\tcase *proto.Event_DeployEvent:\n\t\tde := e.DeployEvent\n\t\tev.stateLock.Lock()\n\t\tev.state.DeployState.Status = de.Status\n\t\tev.stateLock.Unlock()\n\t\tswitch de.Status {\n\t\tcase InProgress:\n\t\t\tlogEntry.Entry = \"Deploy started\"\n\t\tcase Complete:\n\t\t\tlogEntry.Entry = \"Deploy complete\"\n\t\tcase Failed:\n\t\t\tlogEntry.Entry = \"Deploy failed\"\n\t\t\t\/\/ logEntry.Err = de.Err\n\t\tdefault:\n\t\t}\n\tcase *proto.Event_PortEvent:\n\t\tpe := e.PortEvent\n\t\tev.stateLock.Lock()\n\t\tev.state.ForwardedPorts[pe.ContainerName] = pe\n\t\tev.stateLock.Unlock()\n\t\tlogEntry.Entry = fmt.Sprintf(\"Forwarding container %s to local port %d\", pe.ContainerName, pe.LocalPort)\n\tdefault:\n\t\treturn\n\t}\n\n\tev.logEvent(*logEntry)\n}\n\nfunc LogSkaffoldMetadata(info *version.Info) {\n\tev.logEvent(proto.LogEntry{\n\t\tTimestamp: ptypes.TimestampNow(),\n\t\tEvent: &proto.Event{\n\t\t\tEventType: &proto.Event_MetaEvent{\n\t\t\t\tMetaEvent: &proto.MetaEvent{\n\t\t\t\t\tEntry: fmt.Sprintf(\"Starting Skaffold: %+v\", info),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n}\n<commit_msg>Don’t block while sending old events<commit_after>\/*\nCopyright 2019 The Skaffold Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage event\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sync\"\n\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/config\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/event\/proto\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/schema\/latest\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/version\"\n\t\"github.com\/golang\/protobuf\/ptypes\"\n\t\"github.com\/pkg\/errors\"\n\t\"google.golang.org\/grpc\"\n)\n\nconst (\n\tNotStarted = \"Not Started\"\n\tInProgress = \"In Progress\"\n\tComplete = \"Complete\"\n\tFailed = \"Failed\"\n)\n\nvar (\n\tev *eventHandler\n\tonce sync.Once\n\tpluginMode bool\n\n\tcli proto.SkaffoldServiceClient \/\/ for plugin RPC connections\n)\n\ntype eventHandler struct {\n\teventLog []proto.LogEntry\n\tlogLock sync.Mutex\n\n\tstate *proto.State\n\tstateLock sync.Mutex\n\n\tlisteners []chan proto.LogEntry\n}\n\nfunc (ev *eventHandler) RegisterListener(listener chan proto.LogEntry) {\n\tev.listeners = append(ev.listeners, listener)\n}\n\nfunc (ev *eventHandler) logEvent(entry proto.LogEntry) {\n\tev.logLock.Lock()\n\n\tfor _, c := range ev.listeners {\n\t\tc <- entry\n\t}\n\tev.eventLog = append(ev.eventLog, entry)\n\n\tev.logLock.Unlock()\n}\n\nfunc (ev *eventHandler) forEachEvent(callback func(*proto.LogEntry) error) error {\n\tc := make(chan proto.LogEntry)\n\n\tev.logLock.Lock()\n\n\toldEvents := make([]proto.LogEntry, len(ev.eventLog))\n\tcopy(oldEvents, ev.eventLog)\n\tev.RegisterListener(c)\n\n\tev.logLock.Unlock()\n\n\tfor _, entry := range oldEvents {\n\t\tif err := callback(&entry); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor {\n\t\tentry := <-c\n\t\tif err := callback(&entry); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\n\/\/ InitializeState instantiates the global state of the skaffold runner, as well as the event log.\n\/\/ It returns a shutdown callback for tearing down the grpc server, which the runner is responsible for calling.\n\/\/ This function can only be called once.\nfunc InitializeState(build *latest.BuildConfig, deploy *latest.DeployConfig, opts *config.SkaffoldOptions) (func() error, error) {\n\tvar err error\n\tserverShutdown := func() error { return nil }\n\tonce.Do(func() {\n\t\tbuilds := map[string]string{}\n\t\tdeploys := map[string]string{}\n\t\tif build != nil {\n\t\t\tfor _, a := range build.Artifacts {\n\t\t\t\tbuilds[a.ImageName] = NotStarted\n\t\t\t\tdeploys[a.ImageName] = NotStarted\n\t\t\t}\n\t\t}\n\n\t\tev = &eventHandler{\n\t\t\tstate: &proto.State{\n\t\t\t\tBuildState: &proto.BuildState{\n\t\t\t\t\tArtifacts: builds,\n\t\t\t\t},\n\t\t\t\tDeployState: &proto.DeployState{\n\t\t\t\t\tStatus: NotStarted,\n\t\t\t\t},\n\t\t\t\tForwardedPorts: make(map[string]*proto.PortEvent),\n\t\t\t},\n\t\t}\n\n\t\tif opts.EnableRPC {\n\t\t\tserverShutdown, err = newStatusServer(opts.RPCPort)\n\t\t\tif err != nil {\n\t\t\t\terr = errors.Wrap(err, \"creating status server\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n\treturn serverShutdown, err\n}\n\nfunc SetupRPCClient(opts *config.SkaffoldOptions) error {\n\tpluginMode = true\n\tconn, err := grpc.Dial(fmt.Sprintf(\":%d\", opts.RPCPort), grpc.WithInsecure())\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"opening gRPC connection to remote skaffold process\")\n\t}\n\tcli = proto.NewSkaffoldServiceClient(conn)\n\treturn nil\n}\n\nfunc Handle(event *proto.Event) {\n\tif pluginMode {\n\t\tgo cli.Handle(context.Background(), event)\n\t} else {\n\t\tgo handle(event)\n\t}\n}\n\nfunc handle(event *proto.Event) {\n\tlogEntry := &proto.LogEntry{\n\t\tTimestamp: ptypes.TimestampNow(),\n\t\tEvent: event,\n\t}\n\n\tswitch e := event.GetEventType().(type) {\n\tcase *proto.Event_BuildEvent:\n\t\tbe := e.BuildEvent\n\t\tev.stateLock.Lock()\n\t\tev.state.BuildState.Artifacts[be.Artifact] = be.Status\n\t\tev.stateLock.Unlock()\n\t\tswitch be.Status {\n\t\tcase InProgress:\n\t\t\tlogEntry.Entry = fmt.Sprintf(\"Build started for artifact %s\", be.Artifact)\n\t\tcase Complete:\n\t\t\tlogEntry.Entry = fmt.Sprintf(\"Build completed for artifact %s\", be.Artifact)\n\t\tcase Failed:\n\t\t\tlogEntry.Entry = fmt.Sprintf(\"Build failed for artifact %s\", be.Artifact)\n\t\t\t\/\/ logEntry.Err = be.Err\n\t\tdefault:\n\t\t}\n\tcase *proto.Event_DeployEvent:\n\t\tde := e.DeployEvent\n\t\tev.stateLock.Lock()\n\t\tev.state.DeployState.Status = de.Status\n\t\tev.stateLock.Unlock()\n\t\tswitch de.Status {\n\t\tcase InProgress:\n\t\t\tlogEntry.Entry = \"Deploy started\"\n\t\tcase Complete:\n\t\t\tlogEntry.Entry = \"Deploy complete\"\n\t\tcase Failed:\n\t\t\tlogEntry.Entry = \"Deploy failed\"\n\t\t\t\/\/ logEntry.Err = de.Err\n\t\tdefault:\n\t\t}\n\tcase *proto.Event_PortEvent:\n\t\tpe := e.PortEvent\n\t\tev.stateLock.Lock()\n\t\tev.state.ForwardedPorts[pe.ContainerName] = pe\n\t\tev.stateLock.Unlock()\n\t\tlogEntry.Entry = fmt.Sprintf(\"Forwarding container %s to local port %d\", pe.ContainerName, pe.LocalPort)\n\tdefault:\n\t\treturn\n\t}\n\n\tev.logEvent(*logEntry)\n}\n\nfunc LogSkaffoldMetadata(info *version.Info) {\n\tev.logEvent(proto.LogEntry{\n\t\tTimestamp: ptypes.TimestampNow(),\n\t\tEvent: &proto.Event{\n\t\t\tEventType: &proto.Event_MetaEvent{\n\t\t\t\tMetaEvent: &proto.MetaEvent{\n\t\t\t\t\tEntry: fmt.Sprintf(\"Starting Skaffold: %+v\", info),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017 Couchbase, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ \t\thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage zap\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"math\"\n\t\"reflect\"\n\t\"sort\"\n\n\t\"github.com\/blevesearch\/bleve\/index\"\n\t\"github.com\/blevesearch\/bleve\/index\/scorch\/segment\"\n\t\"github.com\/blevesearch\/bleve\/size\"\n\t\"github.com\/golang\/snappy\"\n)\n\nvar reflectStaticSizedocValueReader int\n\nfunc init() {\n\tvar dvi docValueReader\n\treflectStaticSizedocValueReader = int(reflect.TypeOf(dvi).Size())\n}\n\ntype docNumTermsVisitor func(docNum uint64, terms []byte) error\n\ntype docVisitState struct {\n\tdvrs map[uint16]*docValueReader\n\tsegment *Segment\n}\n\ntype docValueReader struct {\n\tfield string\n\tcurChunkNum uint64\n\tchunkOffsets []uint64\n\tdvDataLoc uint64\n\tcurChunkHeader []MetaData\n\tcurChunkData []byte \/\/ compressed data cache\n\tuncompressed []byte \/\/ temp buf for snappy decompression\n}\n\nfunc (di *docValueReader) size() int {\n\treturn reflectStaticSizedocValueReader + size.SizeOfPtr +\n\t\tlen(di.field) +\n\t\tlen(di.chunkOffsets)*size.SizeOfUint64 +\n\t\tlen(di.curChunkHeader)*reflectStaticSizeMetaData +\n\t\tlen(di.curChunkData)\n}\n\nfunc (di *docValueReader) cloneInto(rv *docValueReader) *docValueReader {\n\tif rv == nil {\n\t\trv = &docValueReader{}\n\t}\n\n\trv.field = di.field\n\trv.curChunkNum = math.MaxUint64\n\trv.chunkOffsets = di.chunkOffsets \/\/ immutable, so it's sharable\n\trv.dvDataLoc = di.dvDataLoc\n\trv.curChunkHeader = nil\n\trv.curChunkData = nil\n\trv.uncompressed = nil\n\n\treturn rv\n}\n\nfunc (di *docValueReader) fieldName() string {\n\treturn di.field\n}\n\nfunc (di *docValueReader) curChunkNumber() uint64 {\n\treturn di.curChunkNum\n}\n\nfunc (s *SegmentBase) loadFieldDocValueReader(field string,\n\tfieldDvLocStart, fieldDvLocEnd uint64) (*docValueReader, error) {\n\t\/\/ get the docValue offset for the given fields\n\tif fieldDvLocStart == fieldNotUninverted {\n\t\treturn nil, fmt.Errorf(\"loadFieldDocValueReader: \"+\n\t\t\t\"no docValues found for field: %s\", field)\n\t}\n\n\t\/\/ read the number of chunks, and chunk offsets position\n\tvar numChunks, chunkOffsetsPosition uint64\n\n\tif fieldDvLocEnd-fieldDvLocStart > 16 {\n\t\tnumChunks = binary.BigEndian.Uint64(s.mem[fieldDvLocEnd-8 : fieldDvLocEnd])\n\t\t\/\/ read the length of chunk offsets\n\t\tchunkOffsetsLen := binary.BigEndian.Uint64(s.mem[fieldDvLocEnd-16 : fieldDvLocEnd-8])\n\t\t\/\/ acquire position of chunk offsets\n\t\tchunkOffsetsPosition = (fieldDvLocEnd - 16) - chunkOffsetsLen\n\t}\n\n\tfdvIter := &docValueReader{\n\t\tcurChunkNum: math.MaxUint64,\n\t\tfield: field,\n\t\tchunkOffsets: make([]uint64, int(numChunks)),\n\t}\n\n\t\/\/ read the chunk offsets\n\tvar offset uint64\n\tfor i := 0; i < int(numChunks); i++ {\n\t\tloc, read := binary.Uvarint(s.mem[chunkOffsetsPosition+offset : chunkOffsetsPosition+offset+binary.MaxVarintLen64])\n\t\tif read <= 0 {\n\t\t\treturn nil, fmt.Errorf(\"corrupted chunk offset during segment load\")\n\t\t}\n\t\tfdvIter.chunkOffsets[i] = loc\n\t\toffset += uint64(read)\n\t}\n\n\t\/\/ set the data offset\n\tfdvIter.dvDataLoc = fieldDvLocStart\n\n\treturn fdvIter, nil\n}\n\nfunc (di *docValueReader) loadDvChunk(chunkNumber uint64, s *SegmentBase) error {\n\t\/\/ advance to the chunk where the docValues\n\t\/\/ reside for the given docNum\n\tdestChunkDataLoc, curChunkEnd := di.dvDataLoc, di.dvDataLoc\n\tstart, end := readChunkBoundary(int(chunkNumber), di.chunkOffsets)\n\tif start >= end {\n\t\tdi.curChunkHeader = di.curChunkHeader[:0]\n\t\tdi.curChunkData = nil\n\t\tdi.curChunkNum = chunkNumber\n\t\tdi.uncompressed = di.uncompressed[:0]\n\t\treturn nil\n\t}\n\n\tdestChunkDataLoc += start\n\tcurChunkEnd += end\n\n\t\/\/ read the number of docs reside in the chunk\n\tnumDocs, read := binary.Uvarint(s.mem[destChunkDataLoc : destChunkDataLoc+binary.MaxVarintLen64])\n\tif read <= 0 {\n\t\treturn fmt.Errorf(\"failed to read the chunk\")\n\t}\n\tchunkMetaLoc := destChunkDataLoc + uint64(read)\n\n\toffset := uint64(0)\n\tdi.curChunkHeader = make([]MetaData, int(numDocs))\n\tfor i := 0; i < int(numDocs); i++ {\n\t\tdi.curChunkHeader[i].DocNum, read = binary.Uvarint(s.mem[chunkMetaLoc+offset : chunkMetaLoc+offset+binary.MaxVarintLen64])\n\t\toffset += uint64(read)\n\t\tdi.curChunkHeader[i].DocDvOffset, read = binary.Uvarint(s.mem[chunkMetaLoc+offset : chunkMetaLoc+offset+binary.MaxVarintLen64])\n\t\toffset += uint64(read)\n\t}\n\n\tcompressedDataLoc := chunkMetaLoc + offset\n\tdataLength := curChunkEnd - compressedDataLoc\n\tdi.curChunkData = s.mem[compressedDataLoc : compressedDataLoc+dataLength]\n\tdi.curChunkNum = chunkNumber\n\tdi.uncompressed = di.uncompressed[:0]\n\treturn nil\n}\n\nfunc (di *docValueReader) iterateAllDocValues(s *SegmentBase, visitor docNumTermsVisitor) error {\n\tfor i := 0; i < len(di.chunkOffsets); i++ {\n\t\terr := di.loadDvChunk(uint64(i), s)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif di.curChunkData == nil || len(di.curChunkHeader) <= 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ uncompress the already loaded data\n\t\tuncompressed, err := snappy.Decode(di.uncompressed[:cap(di.uncompressed)], di.curChunkData)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdi.uncompressed = uncompressed\n\n\t\tstart := uint64(0)\n\t\tfor _, entry := range di.curChunkHeader {\n\t\t\terr = visitor(entry.DocNum, uncompressed[start:entry.DocDvOffset])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tstart = entry.DocDvOffset\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (di *docValueReader) visitDocValues(docNum uint64,\n\tvisitor index.DocumentFieldTermVisitor) error {\n\t\/\/ binary search the term locations for the docNum\n\tstart, end := di.getDocValueLocs(docNum)\n\tif start == math.MaxUint64 || end == math.MaxUint64 || start == end {\n\t\treturn nil\n\t}\n\n\tvar uncompressed []byte\n\tvar err error\n\t\/\/ use the uncompressed copy if available\n\tif len(di.uncompressed) > 0 {\n\t\tuncompressed = di.uncompressed\n\t} else {\n\t\t\/\/ uncompress the already loaded data\n\t\tuncompressed, err = snappy.Decode(di.uncompressed[:cap(di.uncompressed)], di.curChunkData)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdi.uncompressed = uncompressed\n\t}\n\n\t\/\/ pick the terms for the given docNum\n\tuncompressed = uncompressed[start:end]\n\tfor {\n\t\ti := bytes.Index(uncompressed, termSeparatorSplitSlice)\n\t\tif i < 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tvisitor(di.field, uncompressed[0:i])\n\t\tuncompressed = uncompressed[i+1:]\n\t}\n\n\treturn nil\n}\n\nfunc (di *docValueReader) getDocValueLocs(docNum uint64) (uint64, uint64) {\n\ti := sort.Search(len(di.curChunkHeader), func(i int) bool {\n\t\treturn di.curChunkHeader[i].DocNum >= docNum\n\t})\n\tif i < len(di.curChunkHeader) && di.curChunkHeader[i].DocNum == docNum {\n\t\treturn ReadDocValueBoundary(i, di.curChunkHeader)\n\t}\n\treturn math.MaxUint64, math.MaxUint64\n}\n\n\/\/ VisitDocumentFieldTerms is an implementation of the\n\/\/ DocumentFieldTermVisitable interface\nfunc (s *Segment) VisitDocumentFieldTerms(localDocNum uint64, fields []string,\n\tvisitor index.DocumentFieldTermVisitor, dvsIn segment.DocVisitState) (\n\tsegment.DocVisitState, error) {\n\tdvs, ok := dvsIn.(*docVisitState)\n\tif !ok || dvs == nil {\n\t\tdvs = &docVisitState{}\n\t} else {\n\t\tif dvs.segment != s {\n\t\t\tdvs.segment = s\n\t\t\tdvs.dvrs = nil\n\t\t}\n\t}\n\n\tvar fieldIDPlus1 uint16\n\tif dvs.dvrs == nil {\n\t\tdvs.dvrs = make(map[uint16]*docValueReader, len(fields))\n\t\tfor _, field := range fields {\n\t\t\tif fieldIDPlus1, ok = s.fieldsMap[field]; !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfieldID := fieldIDPlus1 - 1\n\t\t\tif dvIter, exists := s.fieldDvReaders[fieldID]; exists &&\n\t\t\t\tdvIter != nil {\n\t\t\t\tdvs.dvrs[fieldID] = dvIter.cloneInto(dvs.dvrs[fieldID])\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ find the chunkNumber where the docValues are stored\n\tdocInChunk := localDocNum \/ uint64(s.chunkFactor)\n\tvar dvr *docValueReader\n\tfor _, field := range fields {\n\t\tif fieldIDPlus1, ok = s.fieldsMap[field]; !ok {\n\t\t\tcontinue\n\t\t}\n\t\tfieldID := fieldIDPlus1 - 1\n\t\tif dvr, ok = dvs.dvrs[fieldID]; ok && dvr != nil {\n\t\t\t\/\/ check if the chunk is already loaded\n\t\t\tif docInChunk != dvr.curChunkNumber() {\n\t\t\t\terr := dvr.loadDvChunk(docInChunk, &s.SegmentBase)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn dvs, err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t_ = dvr.visitDocValues(localDocNum, visitor)\n\t\t}\n\t}\n\treturn dvs, nil\n}\n\n\/\/ VisitableDocValueFields returns the list of fields with\n\/\/ persisted doc value terms ready to be visitable using the\n\/\/ VisitDocumentFieldTerms method.\nfunc (s *Segment) VisitableDocValueFields() ([]string, error) {\n\treturn s.fieldDvNames, nil\n}\n<commit_msg>scorch zap optimize docValueReader curChunkHeader slice reuse<commit_after>\/\/ Copyright (c) 2017 Couchbase, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ \t\thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage zap\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"math\"\n\t\"reflect\"\n\t\"sort\"\n\n\t\"github.com\/blevesearch\/bleve\/index\"\n\t\"github.com\/blevesearch\/bleve\/index\/scorch\/segment\"\n\t\"github.com\/blevesearch\/bleve\/size\"\n\t\"github.com\/golang\/snappy\"\n)\n\nvar reflectStaticSizedocValueReader int\n\nfunc init() {\n\tvar dvi docValueReader\n\treflectStaticSizedocValueReader = int(reflect.TypeOf(dvi).Size())\n}\n\ntype docNumTermsVisitor func(docNum uint64, terms []byte) error\n\ntype docVisitState struct {\n\tdvrs map[uint16]*docValueReader\n\tsegment *Segment\n}\n\ntype docValueReader struct {\n\tfield string\n\tcurChunkNum uint64\n\tchunkOffsets []uint64\n\tdvDataLoc uint64\n\tcurChunkHeader []MetaData\n\tcurChunkData []byte \/\/ compressed data cache\n\tuncompressed []byte \/\/ temp buf for snappy decompression\n}\n\nfunc (di *docValueReader) size() int {\n\treturn reflectStaticSizedocValueReader + size.SizeOfPtr +\n\t\tlen(di.field) +\n\t\tlen(di.chunkOffsets)*size.SizeOfUint64 +\n\t\tlen(di.curChunkHeader)*reflectStaticSizeMetaData +\n\t\tlen(di.curChunkData)\n}\n\nfunc (di *docValueReader) cloneInto(rv *docValueReader) *docValueReader {\n\tif rv == nil {\n\t\trv = &docValueReader{}\n\t}\n\n\trv.field = di.field\n\trv.curChunkNum = math.MaxUint64\n\trv.chunkOffsets = di.chunkOffsets \/\/ immutable, so it's sharable\n\trv.dvDataLoc = di.dvDataLoc\n\trv.curChunkHeader = rv.curChunkHeader[:0]\n\trv.curChunkData = nil\n\trv.uncompressed = rv.uncompressed[:0]\n\n\treturn rv\n}\n\nfunc (di *docValueReader) fieldName() string {\n\treturn di.field\n}\n\nfunc (di *docValueReader) curChunkNumber() uint64 {\n\treturn di.curChunkNum\n}\n\nfunc (s *SegmentBase) loadFieldDocValueReader(field string,\n\tfieldDvLocStart, fieldDvLocEnd uint64) (*docValueReader, error) {\n\t\/\/ get the docValue offset for the given fields\n\tif fieldDvLocStart == fieldNotUninverted {\n\t\treturn nil, fmt.Errorf(\"loadFieldDocValueReader: \"+\n\t\t\t\"no docValues found for field: %s\", field)\n\t}\n\n\t\/\/ read the number of chunks, and chunk offsets position\n\tvar numChunks, chunkOffsetsPosition uint64\n\n\tif fieldDvLocEnd-fieldDvLocStart > 16 {\n\t\tnumChunks = binary.BigEndian.Uint64(s.mem[fieldDvLocEnd-8 : fieldDvLocEnd])\n\t\t\/\/ read the length of chunk offsets\n\t\tchunkOffsetsLen := binary.BigEndian.Uint64(s.mem[fieldDvLocEnd-16 : fieldDvLocEnd-8])\n\t\t\/\/ acquire position of chunk offsets\n\t\tchunkOffsetsPosition = (fieldDvLocEnd - 16) - chunkOffsetsLen\n\t}\n\n\tfdvIter := &docValueReader{\n\t\tcurChunkNum: math.MaxUint64,\n\t\tfield: field,\n\t\tchunkOffsets: make([]uint64, int(numChunks)),\n\t}\n\n\t\/\/ read the chunk offsets\n\tvar offset uint64\n\tfor i := 0; i < int(numChunks); i++ {\n\t\tloc, read := binary.Uvarint(s.mem[chunkOffsetsPosition+offset : chunkOffsetsPosition+offset+binary.MaxVarintLen64])\n\t\tif read <= 0 {\n\t\t\treturn nil, fmt.Errorf(\"corrupted chunk offset during segment load\")\n\t\t}\n\t\tfdvIter.chunkOffsets[i] = loc\n\t\toffset += uint64(read)\n\t}\n\n\t\/\/ set the data offset\n\tfdvIter.dvDataLoc = fieldDvLocStart\n\n\treturn fdvIter, nil\n}\n\nfunc (di *docValueReader) loadDvChunk(chunkNumber uint64, s *SegmentBase) error {\n\t\/\/ advance to the chunk where the docValues\n\t\/\/ reside for the given docNum\n\tdestChunkDataLoc, curChunkEnd := di.dvDataLoc, di.dvDataLoc\n\tstart, end := readChunkBoundary(int(chunkNumber), di.chunkOffsets)\n\tif start >= end {\n\t\tdi.curChunkHeader = di.curChunkHeader[:0]\n\t\tdi.curChunkData = nil\n\t\tdi.curChunkNum = chunkNumber\n\t\tdi.uncompressed = di.uncompressed[:0]\n\t\treturn nil\n\t}\n\n\tdestChunkDataLoc += start\n\tcurChunkEnd += end\n\n\t\/\/ read the number of docs reside in the chunk\n\tnumDocs, read := binary.Uvarint(s.mem[destChunkDataLoc : destChunkDataLoc+binary.MaxVarintLen64])\n\tif read <= 0 {\n\t\treturn fmt.Errorf(\"failed to read the chunk\")\n\t}\n\tchunkMetaLoc := destChunkDataLoc + uint64(read)\n\n\toffset := uint64(0)\n\tif cap(di.curChunkHeader) < int(numDocs) {\n\t\tdi.curChunkHeader = make([]MetaData, int(numDocs))\n\t} else {\n\t\tdi.curChunkHeader = di.curChunkHeader[:int(numDocs)]\n\t}\n\tfor i := 0; i < int(numDocs); i++ {\n\t\tdi.curChunkHeader[i].DocNum, read = binary.Uvarint(s.mem[chunkMetaLoc+offset : chunkMetaLoc+offset+binary.MaxVarintLen64])\n\t\toffset += uint64(read)\n\t\tdi.curChunkHeader[i].DocDvOffset, read = binary.Uvarint(s.mem[chunkMetaLoc+offset : chunkMetaLoc+offset+binary.MaxVarintLen64])\n\t\toffset += uint64(read)\n\t}\n\n\tcompressedDataLoc := chunkMetaLoc + offset\n\tdataLength := curChunkEnd - compressedDataLoc\n\tdi.curChunkData = s.mem[compressedDataLoc : compressedDataLoc+dataLength]\n\tdi.curChunkNum = chunkNumber\n\tdi.uncompressed = di.uncompressed[:0]\n\treturn nil\n}\n\nfunc (di *docValueReader) iterateAllDocValues(s *SegmentBase, visitor docNumTermsVisitor) error {\n\tfor i := 0; i < len(di.chunkOffsets); i++ {\n\t\terr := di.loadDvChunk(uint64(i), s)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif di.curChunkData == nil || len(di.curChunkHeader) <= 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ uncompress the already loaded data\n\t\tuncompressed, err := snappy.Decode(di.uncompressed[:cap(di.uncompressed)], di.curChunkData)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdi.uncompressed = uncompressed\n\n\t\tstart := uint64(0)\n\t\tfor _, entry := range di.curChunkHeader {\n\t\t\terr = visitor(entry.DocNum, uncompressed[start:entry.DocDvOffset])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tstart = entry.DocDvOffset\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (di *docValueReader) visitDocValues(docNum uint64,\n\tvisitor index.DocumentFieldTermVisitor) error {\n\t\/\/ binary search the term locations for the docNum\n\tstart, end := di.getDocValueLocs(docNum)\n\tif start == math.MaxUint64 || end == math.MaxUint64 || start == end {\n\t\treturn nil\n\t}\n\n\tvar uncompressed []byte\n\tvar err error\n\t\/\/ use the uncompressed copy if available\n\tif len(di.uncompressed) > 0 {\n\t\tuncompressed = di.uncompressed\n\t} else {\n\t\t\/\/ uncompress the already loaded data\n\t\tuncompressed, err = snappy.Decode(di.uncompressed[:cap(di.uncompressed)], di.curChunkData)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdi.uncompressed = uncompressed\n\t}\n\n\t\/\/ pick the terms for the given docNum\n\tuncompressed = uncompressed[start:end]\n\tfor {\n\t\ti := bytes.Index(uncompressed, termSeparatorSplitSlice)\n\t\tif i < 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tvisitor(di.field, uncompressed[0:i])\n\t\tuncompressed = uncompressed[i+1:]\n\t}\n\n\treturn nil\n}\n\nfunc (di *docValueReader) getDocValueLocs(docNum uint64) (uint64, uint64) {\n\ti := sort.Search(len(di.curChunkHeader), func(i int) bool {\n\t\treturn di.curChunkHeader[i].DocNum >= docNum\n\t})\n\tif i < len(di.curChunkHeader) && di.curChunkHeader[i].DocNum == docNum {\n\t\treturn ReadDocValueBoundary(i, di.curChunkHeader)\n\t}\n\treturn math.MaxUint64, math.MaxUint64\n}\n\n\/\/ VisitDocumentFieldTerms is an implementation of the\n\/\/ DocumentFieldTermVisitable interface\nfunc (s *Segment) VisitDocumentFieldTerms(localDocNum uint64, fields []string,\n\tvisitor index.DocumentFieldTermVisitor, dvsIn segment.DocVisitState) (\n\tsegment.DocVisitState, error) {\n\tdvs, ok := dvsIn.(*docVisitState)\n\tif !ok || dvs == nil {\n\t\tdvs = &docVisitState{}\n\t} else {\n\t\tif dvs.segment != s {\n\t\t\tdvs.segment = s\n\t\t\tdvs.dvrs = nil\n\t\t}\n\t}\n\n\tvar fieldIDPlus1 uint16\n\tif dvs.dvrs == nil {\n\t\tdvs.dvrs = make(map[uint16]*docValueReader, len(fields))\n\t\tfor _, field := range fields {\n\t\t\tif fieldIDPlus1, ok = s.fieldsMap[field]; !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfieldID := fieldIDPlus1 - 1\n\t\t\tif dvIter, exists := s.fieldDvReaders[fieldID]; exists &&\n\t\t\t\tdvIter != nil {\n\t\t\t\tdvs.dvrs[fieldID] = dvIter.cloneInto(dvs.dvrs[fieldID])\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ find the chunkNumber where the docValues are stored\n\tdocInChunk := localDocNum \/ uint64(s.chunkFactor)\n\tvar dvr *docValueReader\n\tfor _, field := range fields {\n\t\tif fieldIDPlus1, ok = s.fieldsMap[field]; !ok {\n\t\t\tcontinue\n\t\t}\n\t\tfieldID := fieldIDPlus1 - 1\n\t\tif dvr, ok = dvs.dvrs[fieldID]; ok && dvr != nil {\n\t\t\t\/\/ check if the chunk is already loaded\n\t\t\tif docInChunk != dvr.curChunkNumber() {\n\t\t\t\terr := dvr.loadDvChunk(docInChunk, &s.SegmentBase)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn dvs, err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t_ = dvr.visitDocValues(localDocNum, visitor)\n\t\t}\n\t}\n\treturn dvs, nil\n}\n\n\/\/ VisitableDocValueFields returns the list of fields with\n\/\/ persisted doc value terms ready to be visitable using the\n\/\/ VisitDocumentFieldTerms method.\nfunc (s *Segment) VisitableDocValueFields() ([]string, error) {\n\treturn s.fieldDvNames, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package transporter\n\nimport (\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/compose\/transporter\/pkg\/adaptor\"\n\t\"github.com\/compose\/transporter\/pkg\/events\"\n\t\"github.com\/compose\/transporter\/pkg\/state\"\n)\n\n\/\/ VERSION the library\nconst (\n\tVERSION = \"0.0.2\"\n)\n\n\/\/ A Pipeline is a the end to end description of a transporter data flow.\n\/\/ including the source, sink, and all the transformers along the way\ntype Pipeline struct {\n\tsource *Node\n\temitter events.Emitter\n\tsessionStore state.SessionStore\n\tmetricsTicker *time.Ticker\n\n\t\/\/ Err is the fatal error that was sent from the adaptor\n\t\/\/ that caused us to stop this process. If this is nil, then\n\t\/\/ the transporter is running\n\tErr error\n\tsessionTicker *time.Ticker\n}\n\n\/\/ NewDefaultPipeline returns a new Transporter Pipeline with the given node tree, and\n\/\/ uses the events.HttpPostEmitter to deliver metrics.\n\/\/ eg.\n\/\/ source :=\n\/\/ \ttransporter.NewNode(\"source\", \"mongo\", adaptor.Config{\"uri\": \"mongodb:\/\/localhost\/\", \"namespace\": \"boom.foo\", \"debug\": false, \"tail\": true}).\n\/\/ \t \tAdd(transporter.NewNode(\"out\", \"file\", adaptor.Config{\"uri\": \"stdout:\/\/\"}))\n\/\/ pipeline, err := transporter.NewDefaultPipeline(source, events.Api{URI: \"http:\/\/localhost\/endpoint\"}, 1*time.Second)\n\/\/ if err != nil {\n\/\/ \t fmt.Println(err)\n\/\/ \t os.Exit(1)\n\/\/ }\n\/\/ pipeline.Run()\nfunc NewDefaultPipeline(source *Node, uri, key, pid string, interval time.Duration) (*Pipeline, error) {\n\temitter := events.NewHTTPPostEmitter(uri, key, pid)\n\tsessionStore := state.NewFilestore(pid, \"\/tmp\/transporter.state\")\n\treturn NewPipeline(source, emitter, interval, sessionStore, 10*time.Second)\n}\n\n\/\/ NewPipeline creates a new Transporter Pipeline using the given tree of nodes, and Event Emitter\n\/\/ eg.\n\/\/ source :=\n\/\/ \ttransporter.NewNode(\"source\", \"mongo\", adaptor.Config{\"uri\": \"mongodb:\/\/localhost\/\", \"namespace\": \"boom.foo\", \"debug\": false, \"tail\": true}).\n\/\/ \t \tAdd(transporter.NewNode(\"out\", \"file\", adaptor.Config{\"uri\": \"stdout:\/\/\"}))\n\/\/ pipeline, err := transporter.NewPipeline(source, events.NewNoopEmitter(), 1*time.Second, state.NewFilestore(pid, \"\/tmp\/transporter.state\"), 10*time.Second)\n\/\/ if err != nil {\n\/\/ \t fmt.Println(err)\n\/\/ \t os.Exit(1)\n\/\/ }\n\/\/ pipeline.Run()\nfunc NewPipeline(source *Node, emitter events.Emitter, interval time.Duration, sessionStore state.SessionStore, sessionInterval time.Duration) (*Pipeline, error) {\n\tpipeline := &Pipeline{\n\t\tsource: source,\n\t\temitter: emitter,\n\t\tmetricsTicker: time.NewTicker(interval),\n\t}\n\n\tif sessionStore != nil {\n\t\tpipeline.sessionStore = sessionStore\n\t\tpipeline.sessionTicker = time.NewTicker(sessionInterval)\n\t}\n\n\t\/\/ init the pipeline\n\terr := pipeline.source.Init(interval)\n\tif err != nil {\n\t\treturn pipeline, err\n\t}\n\n\t\/\/ init the emitter with the right chan\n\tpipeline.emitter.Init(source.pipe.Event)\n\n\t\/\/ start the emitters\n\tgo pipeline.startErrorListener(source.pipe.Err)\n\tgo pipeline.startMetricsGatherer()\n\tif sessionStore != nil {\n\t\tgo pipeline.startStateSaver()\n\t}\n\tpipeline.emitter.Start()\n\n\tpipeline.initState()\n\n\treturn pipeline, nil\n}\n\nfunc (pipeline *Pipeline) String() string {\n\tout := pipeline.source.String()\n\treturn out\n}\n\n\/\/ Stop sends a stop signal to the emitter and all the nodes, whether they are running or not.\n\/\/ the node's database adaptors are expected to clean up after themselves, and stop will block until\n\/\/ all nodes have stopped successfully\nfunc (pipeline *Pipeline) Stop() {\n\tpipeline.source.Stop()\n\tpipeline.emitter.Stop()\n\tif pipeline.sessionStore != nil {\n\t\tpipeline.sessionTicker.Stop()\n\t}\n\tpipeline.metricsTicker.Stop()\n}\n\n\/\/ Run the pipeline\nfunc (pipeline *Pipeline) Run() error {\n\tendpoints := pipeline.source.Endpoints()\n\t\/\/ send a boot event\n\tpipeline.source.pipe.Event <- events.NewBootEvent(time.Now().Unix(), VERSION, endpoints)\n\n\t\/\/ start the source\n\terr := pipeline.source.Start()\n\tif err != nil && pipeline.Err == nil {\n\t\tpipeline.Err = err \/\/ only set it if it hasn't been set already.\n\t}\n\n\t\/\/ pipeline has stopped, emit one last round of metrics and send the exit event\n\tpipeline.emitMetrics()\n\tpipeline.setState()\n\tpipeline.source.pipe.Event <- events.NewExitEvent(time.Now().Unix(), VERSION, endpoints)\n\n\t\/\/ the source has exited, stop all the other nodes\n\tpipeline.Stop()\n\n\treturn pipeline.Err\n}\n\n\/\/ start error listener consumes all the events on the pipe's Err channel, and stops the pipeline\n\/\/ when it receives one\nfunc (pipeline *Pipeline) startErrorListener(cherr chan error) {\n\tfor err := range cherr {\n\t\tif aerr, ok := err.(adaptor.Error); ok {\n\t\t\tpipeline.source.pipe.Event <- events.NewErrorEvent(time.Now().Unix(), aerr.Path, aerr.Record, aerr.Error())\n\t\t\tif aerr.Lvl == adaptor.ERROR || aerr.Lvl == adaptor.CRITICAL {\n\t\t\t\tlog.Println(aerr)\n\t\t\t}\n\t\t} else {\n\t\t\tif pipeline.Err == nil {\n\t\t\t\tpipeline.Err = err\n\t\t\t}\n\t\t\tpipeline.Stop()\n\t\t}\n\t}\n}\n\nfunc (pipeline *Pipeline) startMetricsGatherer() {\n\tfor _ = range pipeline.metricsTicker.C {\n\t\tpipeline.emitMetrics()\n\t}\n}\n\n\/\/ emit the metrics\nfunc (pipeline *Pipeline) emitMetrics() {\n\n\tfrontier := make([]*Node, 1)\n\tfrontier[0] = pipeline.source\n\n\tfor {\n\t\t\/\/ pop the first item\n\t\tnode := frontier[0]\n\t\tfrontier = frontier[1:]\n\n\t\t\/\/ do something with the node\n\t\tpipeline.source.pipe.Event <- events.NewMetricsEvent(time.Now().Unix(), node.Path(), node.pipe.MessageCount)\n\n\t\t\/\/ add this nodes children to the frontier\n\t\tfor _, child := range node.Children {\n\t\t\tfrontier = append(frontier, child)\n\t\t}\n\n\t\t\/\/ if we're empty\n\t\tif len(frontier) == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (pipeline *Pipeline) startStateSaver() {\n\tfor _ = range pipeline.sessionTicker.C {\n\t\tpipeline.setState()\n\t}\n}\n\nfunc (pipeline *Pipeline) setState() {\n\tfrontier := make([]*Node, 1)\n\tfrontier[0] = pipeline.source\n\n\tfor {\n\t\t\/\/ pop the first item\n\t\tnode := frontier[0]\n\t\tfrontier = frontier[1:]\n\n\t\t\/\/ do something with the node\n\t\tif node.Type != \"transformer\" {\n\t\t\tpipeline.sessionStore.Set(node.Path(), &state.MsgState{Msg: node.pipe.LastMsg, Extra: node.pipe.ExtraState})\n\t\t}\n\n\t\t\/\/ add this nodes children to the frontier\n\t\tfor _, child := range node.Children {\n\t\t\tfrontier = append(frontier, child)\n\t\t}\n\n\t\t\/\/ if we're empty\n\t\tif len(frontier) == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (pipeline *Pipeline) initState() {\n\tfrontier := make([]*Node, 1)\n\tfrontier[0] = pipeline.source\n\n\tfor {\n\t\t\/\/ pop the first item\n\t\tnode := frontier[0]\n\t\tfrontier = frontier[1:]\n\n\t\t\/\/ do something with the node\n\t\tif node.Type != \"transformer\" {\n\t\t\tnodeState, _ := pipeline.sessionStore.Get(node.Path())\n\t\t\tif nodeState != nil {\n\t\t\t\tnode.pipe.LastMsg = nodeState.Msg\n\t\t\t\tnode.pipe.ExtraState = nodeState.Extra\n\t\t\t}\n\t\t}\n\n\t\t\/\/ add this nodes children to the frontier\n\t\tfor _, child := range node.Children {\n\t\t\tfrontier = append(frontier, child)\n\t\t}\n\n\t\t\/\/ if we're empty\n\t\tif len(frontier) == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n}\n<commit_msg>dont store state unless LastMsg isnt nil<commit_after>package transporter\n\nimport (\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/compose\/transporter\/pkg\/adaptor\"\n\t\"github.com\/compose\/transporter\/pkg\/events\"\n\t\"github.com\/compose\/transporter\/pkg\/state\"\n)\n\n\/\/ VERSION the library\nconst (\n\tVERSION = \"0.0.2\"\n)\n\n\/\/ A Pipeline is a the end to end description of a transporter data flow.\n\/\/ including the source, sink, and all the transformers along the way\ntype Pipeline struct {\n\tsource *Node\n\temitter events.Emitter\n\tsessionStore state.SessionStore\n\tmetricsTicker *time.Ticker\n\n\t\/\/ Err is the fatal error that was sent from the adaptor\n\t\/\/ that caused us to stop this process. If this is nil, then\n\t\/\/ the transporter is running\n\tErr error\n\tsessionTicker *time.Ticker\n}\n\n\/\/ NewDefaultPipeline returns a new Transporter Pipeline with the given node tree, and\n\/\/ uses the events.HttpPostEmitter to deliver metrics.\n\/\/ eg.\n\/\/ source :=\n\/\/ \ttransporter.NewNode(\"source\", \"mongo\", adaptor.Config{\"uri\": \"mongodb:\/\/localhost\/\", \"namespace\": \"boom.foo\", \"debug\": false, \"tail\": true}).\n\/\/ \t \tAdd(transporter.NewNode(\"out\", \"file\", adaptor.Config{\"uri\": \"stdout:\/\/\"}))\n\/\/ pipeline, err := transporter.NewDefaultPipeline(source, events.Api{URI: \"http:\/\/localhost\/endpoint\"}, 1*time.Second)\n\/\/ if err != nil {\n\/\/ \t fmt.Println(err)\n\/\/ \t os.Exit(1)\n\/\/ }\n\/\/ pipeline.Run()\nfunc NewDefaultPipeline(source *Node, uri, key, pid string, interval time.Duration) (*Pipeline, error) {\n\temitter := events.NewHTTPPostEmitter(uri, key, pid)\n\tsessionStore := state.NewFilestore(pid, \"\/tmp\/transporter.state\")\n\treturn NewPipeline(source, emitter, interval, sessionStore, 10*time.Second)\n}\n\n\/\/ NewPipeline creates a new Transporter Pipeline using the given tree of nodes, and Event Emitter\n\/\/ eg.\n\/\/ source :=\n\/\/ \ttransporter.NewNode(\"source\", \"mongo\", adaptor.Config{\"uri\": \"mongodb:\/\/localhost\/\", \"namespace\": \"boom.foo\", \"debug\": false, \"tail\": true}).\n\/\/ \t \tAdd(transporter.NewNode(\"out\", \"file\", adaptor.Config{\"uri\": \"stdout:\/\/\"}))\n\/\/ pipeline, err := transporter.NewPipeline(source, events.NewNoopEmitter(), 1*time.Second, state.NewFilestore(pid, \"\/tmp\/transporter.state\"), 10*time.Second)\n\/\/ if err != nil {\n\/\/ \t fmt.Println(err)\n\/\/ \t os.Exit(1)\n\/\/ }\n\/\/ pipeline.Run()\nfunc NewPipeline(source *Node, emitter events.Emitter, interval time.Duration, sessionStore state.SessionStore, sessionInterval time.Duration) (*Pipeline, error) {\n\tpipeline := &Pipeline{\n\t\tsource: source,\n\t\temitter: emitter,\n\t\tmetricsTicker: time.NewTicker(interval),\n\t}\n\n\tif sessionStore != nil {\n\t\tpipeline.sessionStore = sessionStore\n\t\tpipeline.sessionTicker = time.NewTicker(sessionInterval)\n\t}\n\n\t\/\/ init the pipeline\n\terr := pipeline.source.Init(interval)\n\tif err != nil {\n\t\treturn pipeline, err\n\t}\n\n\t\/\/ init the emitter with the right chan\n\tpipeline.emitter.Init(source.pipe.Event)\n\n\t\/\/ start the emitters\n\tgo pipeline.startErrorListener(source.pipe.Err)\n\tgo pipeline.startMetricsGatherer()\n\tif sessionStore != nil {\n\t\tgo pipeline.startStateSaver()\n\t}\n\tpipeline.emitter.Start()\n\n\tpipeline.initState()\n\n\treturn pipeline, nil\n}\n\nfunc (pipeline *Pipeline) String() string {\n\tout := pipeline.source.String()\n\treturn out\n}\n\n\/\/ Stop sends a stop signal to the emitter and all the nodes, whether they are running or not.\n\/\/ the node's database adaptors are expected to clean up after themselves, and stop will block until\n\/\/ all nodes have stopped successfully\nfunc (pipeline *Pipeline) Stop() {\n\tpipeline.source.Stop()\n\tpipeline.emitter.Stop()\n\tif pipeline.sessionStore != nil {\n\t\tpipeline.sessionTicker.Stop()\n\t}\n\tpipeline.metricsTicker.Stop()\n}\n\n\/\/ Run the pipeline\nfunc (pipeline *Pipeline) Run() error {\n\tendpoints := pipeline.source.Endpoints()\n\t\/\/ send a boot event\n\tpipeline.source.pipe.Event <- events.NewBootEvent(time.Now().Unix(), VERSION, endpoints)\n\n\t\/\/ start the source\n\terr := pipeline.source.Start()\n\tif err != nil && pipeline.Err == nil {\n\t\tpipeline.Err = err \/\/ only set it if it hasn't been set already.\n\t}\n\n\t\/\/ pipeline has stopped, emit one last round of metrics and send the exit event\n\tpipeline.emitMetrics()\n\tpipeline.setState()\n\tpipeline.source.pipe.Event <- events.NewExitEvent(time.Now().Unix(), VERSION, endpoints)\n\n\t\/\/ the source has exited, stop all the other nodes\n\tpipeline.Stop()\n\n\treturn pipeline.Err\n}\n\n\/\/ start error listener consumes all the events on the pipe's Err channel, and stops the pipeline\n\/\/ when it receives one\nfunc (pipeline *Pipeline) startErrorListener(cherr chan error) {\n\tfor err := range cherr {\n\t\tif aerr, ok := err.(adaptor.Error); ok {\n\t\t\tpipeline.source.pipe.Event <- events.NewErrorEvent(time.Now().Unix(), aerr.Path, aerr.Record, aerr.Error())\n\t\t\tif aerr.Lvl == adaptor.ERROR || aerr.Lvl == adaptor.CRITICAL {\n\t\t\t\tlog.Println(aerr)\n\t\t\t}\n\t\t} else {\n\t\t\tif pipeline.Err == nil {\n\t\t\t\tpipeline.Err = err\n\t\t\t}\n\t\t\tpipeline.Stop()\n\t\t}\n\t}\n}\n\nfunc (pipeline *Pipeline) startMetricsGatherer() {\n\tfor _ = range pipeline.metricsTicker.C {\n\t\tpipeline.emitMetrics()\n\t}\n}\n\n\/\/ emit the metrics\nfunc (pipeline *Pipeline) emitMetrics() {\n\n\tfrontier := make([]*Node, 1)\n\tfrontier[0] = pipeline.source\n\n\tfor {\n\t\t\/\/ pop the first item\n\t\tnode := frontier[0]\n\t\tfrontier = frontier[1:]\n\n\t\t\/\/ do something with the node\n\t\tpipeline.source.pipe.Event <- events.NewMetricsEvent(time.Now().Unix(), node.Path(), node.pipe.MessageCount)\n\n\t\t\/\/ add this nodes children to the frontier\n\t\tfor _, child := range node.Children {\n\t\t\tfrontier = append(frontier, child)\n\t\t}\n\n\t\t\/\/ if we're empty\n\t\tif len(frontier) == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (pipeline *Pipeline) startStateSaver() {\n\tfor _ = range pipeline.sessionTicker.C {\n\t\tpipeline.setState()\n\t}\n}\n\nfunc (pipeline *Pipeline) setState() {\n\tfrontier := make([]*Node, 1)\n\tfrontier[0] = pipeline.source\n\n\tfor {\n\t\t\/\/ pop the first item\n\t\tnode := frontier[0]\n\t\tfrontier = frontier[1:]\n\n\t\t\/\/ do something with the node\n\t\tif node.Type != \"transformer\" && node.pipe.LastMsg != nil {\n\t\t\tpipeline.sessionStore.Set(node.Path(), &state.MsgState{Msg: node.pipe.LastMsg, Extra: node.pipe.ExtraState})\n\t\t}\n\n\t\t\/\/ add this nodes children to the frontier\n\t\tfor _, child := range node.Children {\n\t\t\tfrontier = append(frontier, child)\n\t\t}\n\n\t\t\/\/ if we're empty\n\t\tif len(frontier) == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (pipeline *Pipeline) initState() {\n\tfrontier := make([]*Node, 1)\n\tfrontier[0] = pipeline.source\n\n\tfor {\n\t\t\/\/ pop the first item\n\t\tnode := frontier[0]\n\t\tfrontier = frontier[1:]\n\n\t\t\/\/ do something with the node\n\t\tif node.Type != \"transformer\" {\n\t\t\tnodeState, _ := pipeline.sessionStore.Get(node.Path())\n\t\t\tif nodeState != nil {\n\t\t\t\tnode.pipe.LastMsg = nodeState.Msg\n\t\t\t\tnode.pipe.ExtraState = nodeState.Extra\n\t\t\t}\n\t\t}\n\n\t\t\/\/ add this nodes children to the frontier\n\t\tfor _, child := range node.Children {\n\t\t\tfrontier = append(frontier, child)\n\t\t}\n\n\t\t\/\/ if we're empty\n\t\tif len(frontier) == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package relay\n\nimport (\n\t\"time\"\n\n\t\"gopkg.in\/dedis\/onet.v1\/log\"\n\t\"strconv\"\n)\n\n\/*\nThis first timeout happens after a short delay. Clients will not be considered disconnected yet,\nbut if we use UDP, it can mean that a client missed a broadcast, and we re-sent the message.\nIf the round was *not* done, we do another timeout (Phase 2), and then, clients\/trustees will be considered\nonline if they didn't answer by that time.\n*\/\nfunc (p *PriFiLibRelayInstance) checkIfRoundHasEndedAfterTimeOut_Phase1(roundID int32) {\n\n\ttime.Sleep(TIMEOUT_PHASE_1)\n\n\tif !p.relayState.currentDCNetRound.isStillInRound(roundID) {\n\t\treturn \/\/everything went well, it's great !\n\t}\n\n\tif p.stateMachine.State() == \"SHUTDOWN\" {\n\t\treturn \/\/nothing to ensure in that case\n\t}\n\n\tlog.Error(\"waitAndCheckIfClientsSentData : We seem to be stuck in round\", roundID, \". Phase 1 timeout.\")\n\n\tmissingClientCiphers, missingTrusteesCiphers := p.relayState.bufferManager.MissingCiphersForCurrentRound()\n\n\t\/\/If we're using UDP, client might have missed the broadcast, re-sending\n\tif p.relayState.UseUDP {\n\t\tfor clientID := range missingClientCiphers {\n\t\t\tlog.Error(\"Relay : Client \" + strconv.Itoa(clientID) + \" didn't sent us is cipher for round \" + strconv.Itoa(int(roundID)) + \". Phase 1 timeout. Re-sending...\")\n\t\t\textraInfo := \"(client \" + strconv.Itoa(clientID) + \", round \" + strconv.Itoa(int(p.relayState.currentDCNetRound.CurrentRound())) + \")\"\n\t\t\tp.messageSender.SendToClientWithLog(clientID, p.relayState.currentDCNetRound.GetDataAlreadySent(), extraInfo)\n\t\t}\n\t}\n\n\tif len(missingClientCiphers) > 0 || len(missingTrusteesCiphers) > 0 {\n\t\t\/\/if we're not done (we miss data), wait another timeout, after which clients\/trustees will be considered offline\n\t\tgo p.checkIfRoundHasEndedAfterTimeOut_Phase2(roundID)\n\t}\n\n\t\/\/this shouldn't happen frequently (it means that the timeout 1 was fired, but the round finished almost at the same time)\n}\n\n\/*\nThis second timeout happens after a longer delay. Clients and trustees will be considered offline if they haven't send data yet\n*\/\nfunc (p *PriFiLibRelayInstance) checkIfRoundHasEndedAfterTimeOut_Phase2(roundID int32) {\n\n\ttime.Sleep(TIMEOUT_PHASE_2)\n\n\tif !p.relayState.currentDCNetRound.isStillInRound(roundID) {\n\t\t\/\/everything went well, it's great !\n\t\treturn\n\t}\n\n\tif p.stateMachine.State() == \"SHUTDOWN\" {\n\t\t\/\/nothing to ensure in that case\n\t\treturn\n\t}\n\n\tlog.Error(\"waitAndCheckIfClientsSentData : We seem to be stuck in round\", roundID, \". Phase 2 timeout.\")\n\n\tlog.Lvl3(\"Stopping experiment, if any.\")\n\toutput := make([]string, 1)\n\toutput[0] = \"aborted-round-\" + strconv.Itoa(int(roundID))\n\tp.relayState.ExperimentResultChannel <- output\n\n\tmissingClientCiphers, missingTrusteesCiphers := p.relayState.bufferManager.MissingCiphersForCurrentRound()\n\tp.relayState.timeoutHandler(missingClientCiphers, missingTrusteesCiphers)\n}\n<commit_msg>When timeout2 happens, sends also the partial simulation results<commit_after>package relay\n\nimport (\n\t\"time\"\n\n\t\"gopkg.in\/dedis\/onet.v1\/log\"\n\t\"strconv\"\n)\n\n\/*\nThis first timeout happens after a short delay. Clients will not be considered disconnected yet,\nbut if we use UDP, it can mean that a client missed a broadcast, and we re-sent the message.\nIf the round was *not* done, we do another timeout (Phase 2), and then, clients\/trustees will be considered\nonline if they didn't answer by that time.\n*\/\nfunc (p *PriFiLibRelayInstance) checkIfRoundHasEndedAfterTimeOut_Phase1(roundID int32) {\n\n\ttime.Sleep(TIMEOUT_PHASE_1)\n\n\tif !p.relayState.currentDCNetRound.isStillInRound(roundID) {\n\t\treturn \/\/everything went well, it's great !\n\t}\n\n\tif p.stateMachine.State() == \"SHUTDOWN\" {\n\t\treturn \/\/nothing to ensure in that case\n\t}\n\n\tlog.Error(\"waitAndCheckIfClientsSentData : We seem to be stuck in round\", roundID, \". Phase 1 timeout.\")\n\n\tmissingClientCiphers, missingTrusteesCiphers := p.relayState.bufferManager.MissingCiphersForCurrentRound()\n\n\t\/\/If we're using UDP, client might have missed the broadcast, re-sending\n\tif p.relayState.UseUDP {\n\t\tfor clientID := range missingClientCiphers {\n\t\t\tlog.Error(\"Relay : Client \" + strconv.Itoa(clientID) + \" didn't sent us is cipher for round \" + strconv.Itoa(int(roundID)) + \". Phase 1 timeout. Re-sending...\")\n\t\t\textraInfo := \"(client \" + strconv.Itoa(clientID) + \", round \" + strconv.Itoa(int(p.relayState.currentDCNetRound.CurrentRound())) + \")\"\n\t\t\tp.messageSender.SendToClientWithLog(clientID, p.relayState.currentDCNetRound.GetDataAlreadySent(), extraInfo)\n\t\t}\n\t}\n\n\tif len(missingClientCiphers) > 0 || len(missingTrusteesCiphers) > 0 {\n\t\t\/\/if we're not done (we miss data), wait another timeout, after which clients\/trustees will be considered offline\n\t\tgo p.checkIfRoundHasEndedAfterTimeOut_Phase2(roundID)\n\t}\n\n\t\/\/this shouldn't happen frequently (it means that the timeout 1 was fired, but the round finished almost at the same time)\n}\n\n\/*\nThis second timeout happens after a longer delay. Clients and trustees will be considered offline if they haven't send data yet\n*\/\nfunc (p *PriFiLibRelayInstance) checkIfRoundHasEndedAfterTimeOut_Phase2(roundID int32) {\n\n\ttime.Sleep(TIMEOUT_PHASE_2)\n\n\tif !p.relayState.currentDCNetRound.isStillInRound(roundID) {\n\t\t\/\/everything went well, it's great !\n\t\treturn\n\t}\n\n\tif p.stateMachine.State() == \"SHUTDOWN\" {\n\t\t\/\/nothing to ensure in that case\n\t\treturn\n\t}\n\n\tlog.Error(\"waitAndCheckIfClientsSentData : We seem to be stuck in round\", roundID, \". Phase 2 timeout.\")\n\n\tlog.Lvl3(\"Stopping experiment, if any.\")\n\toutput := p.relayState.ExperimentResultData\n\toutput = append(output, \"!!aborted-round-\"+strconv.Itoa(int(roundID)))\n\tp.relayState.ExperimentResultChannel <- output\n\n\tmissingClientCiphers, missingTrusteesCiphers := p.relayState.bufferManager.MissingCiphersForCurrentRound()\n\tp.relayState.timeoutHandler(missingClientCiphers, missingTrusteesCiphers)\n}\n<|endoftext|>"} {"text":"<commit_before>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"testing\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/codeartifact\"\n\t\"github.com\/hashicorp\/go-multierror\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/helper\/acctest\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/terraform\"\n)\n\nfunc init() {\n\tresource.AddTestSweepers(\"aws_codeartifact_repository\", &resource.Sweeper{\n\t\tName: \"aws_codeartifact_repository\",\n\t\tF: testSweepCodeArtifactRepositories,\n\t})\n}\n\nfunc testSweepCodeArtifactRepositories(region string) error {\n\tclient, err := sharedClientForRegion(region)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting client: %w\", err)\n\t}\n\tconn := client.(*AWSClient).codeartifactconn\n\tinput := &codeartifact.ListRepositoriesInput{}\n\tvar sweeperErrs *multierror.Error\n\n\terr = conn.ListRepositoriesPages(input, func(page *codeartifact.ListRepositoriesOutput, lastPage bool) bool {\n\t\tfor _, repositoryPtr := range page.Repositories {\n\t\t\tif repositoryPtr == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\trepository := aws.StringValue(repositoryPtr.Name)\n\t\t\tinput := &codeartifact.DeleteRepositoryInput{\n\t\t\t\tRepository: repositoryPtr.Name,\n\t\t\t\tDomain: repositoryPtr.DomainName,\n\t\t\t\tDomainOwner: repositoryPtr.DomainOwner,\n\t\t\t}\n\n\t\t\tlog.Printf(\"[INFO] Deleting CodeArtifact Repository: %s\", repository)\n\n\t\t\t_, err := conn.DeleteRepository(input)\n\n\t\t\tif err != nil {\n\t\t\t\tsweeperErr := fmt.Errorf(\"error deleting CodeArtifact Repository (%s): %w\", repository, err)\n\t\t\t\tlog.Printf(\"[ERROR] %s\", sweeperErr)\n\t\t\t\tsweeperErrs = multierror.Append(sweeperErrs, sweeperErr)\n\t\t\t}\n\t\t}\n\n\t\treturn !lastPage\n\t})\n\n\tif testSweepSkipSweepError(err) {\n\t\tlog.Printf(\"[WARN] Skipping CodeArtifact Repository sweep for %s: %s\", region, err)\n\t\treturn nil\n\t}\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error listing CodeArtifact Repositories: %w\", err)\n\t}\n\n\treturn sweeperErrs.ErrorOrNil()\n}\n\nfunc TestAccAWSCodeArtifactRepository_basic(t *testing.T) {\n\trName := acctest.RandomWithPrefix(\"tf-acc-test\")\n\tresourceName := \"aws_codeartifact_repository.test\"\n\n\tresource.ParallelTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckAWSCodeArtifactRepositoryDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccAWSCodeArtifactRepositoryBasicConfig(rName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckAWSCodeArtifactRepositoryExists(resourceName),\n\t\t\t\t\ttestAccCheckResourceAttrRegionalARN(resourceName, \"arn\", \"codeartifact\", fmt.Sprintf(\"repository\/%s\/%s\", rName, rName)),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"repository\", rName),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"domain\", rName),\n\t\t\t\t\tresource.TestCheckResourceAttrPair(resourceName, \"domain_owner\", \"aws_codeartifact_domain\", \"owner\"),\n\t\t\t\t\tresource.TestCheckResourceAttrPair(resourceName, \"administrator_account\", \"aws_codeartifact_domain\", \"owner\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"description\", \"\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"upstream.#\", \"0\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"external_connections.#\", \"0\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceName: resourceName,\n\t\t\t\tImportState: true,\n\t\t\t\tImportStateVerify: true,\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccAWSCodeArtifactRepository_owner(t *testing.T) {\n\trName := acctest.RandomWithPrefix(\"tf-acc-test\")\n\tresourceName := \"aws_codeartifact_repository.test\"\n\n\tresource.ParallelTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckAWSCodeArtifactRepositoryDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccAWSCodeArtifactRepositoryOwnerConfig(rName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckAWSCodeArtifactRepositoryExists(resourceName),\n\t\t\t\t\ttestAccCheckResourceAttrRegionalARN(resourceName, \"arn\", \"codeartifact\", fmt.Sprintf(\"repository\/%s\/%s\", rName, rName)),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"repository\", rName),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"domain\", rName),\n\t\t\t\t\tresource.TestCheckResourceAttrPair(resourceName, \"domain_owner\", \"aws_codeartifact_domain\", \"owner\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"description\", \"\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"upstream.#\", \"0\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"external_connections.#\", \"0\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceName: resourceName,\n\t\t\t\tImportState: true,\n\t\t\t\tImportStateVerify: true,\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccAWSCodeArtifactRepository_description(t *testing.T) {\n\trName := acctest.RandomWithPrefix(\"tf-acc-test\")\n\tresourceName := \"aws_codeartifact_repository.test\"\n\n\tresource.ParallelTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckAWSCodeArtifactRepositoryDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccAWSCodeArtifactRepositoryDescConfig(rName, \"desc\"),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckAWSCodeArtifactRepositoryExists(resourceName),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"description\", \"desc\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceName: resourceName,\n\t\t\t\tImportState: true,\n\t\t\t\tImportStateVerify: true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccAWSCodeArtifactRepositoryDescConfig(rName, \"desc2\"),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckAWSCodeArtifactRepositoryExists(resourceName),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"description\", \"desc2\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccAWSCodeArtifactRepository_upstreams(t *testing.T) {\n\trName := acctest.RandomWithPrefix(\"tf-acc-test\")\n\tresourceName := \"aws_codeartifact_repository.test\"\n\n\tresource.ParallelTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckAWSCodeArtifactRepositoryDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccAWSCodeArtifactRepositoryUpstreamsConfig1(rName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckAWSCodeArtifactRepositoryExists(resourceName),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"upstream.#\", \"1\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"upstream.0.repository_name\", fmt.Sprintf(\"%s-upstream1\", rName)),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceName: resourceName,\n\t\t\t\tImportState: true,\n\t\t\t\tImportStateVerify: true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccAWSCodeArtifactRepositoryUpstreamsConfig2(rName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckAWSCodeArtifactRepositoryExists(resourceName),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"upstream.#\", \"2\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"upstream.0.repository_name\", fmt.Sprintf(\"%s-upstream1\", rName)),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"upstream.1.repository_name\", fmt.Sprintf(\"%s-upstream2\", rName)),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccAWSCodeArtifactRepositoryUpstreamsConfig1(rName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckAWSCodeArtifactRepositoryExists(resourceName),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"upstream.#\", \"1\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"upstream.0.repository_name\", fmt.Sprintf(\"%s-upstream1\", rName)),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccAWSCodeArtifactRepository_disappears(t *testing.T) {\n\trName := acctest.RandomWithPrefix(\"tf-acc-test\")\n\tresourceName := \"aws_codeartifact_repository.test\"\n\n\tresource.ParallelTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckAWSCodeArtifactRepositoryDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccAWSCodeArtifactRepositoryBasicConfig(rName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckAWSCodeArtifactRepositoryExists(resourceName),\n\t\t\t\t\ttestAccCheckResourceDisappears(testAccProvider, resourceAwsCodeArtifactRepository(), resourceName),\n\t\t\t\t),\n\t\t\t\tExpectNonEmptyPlan: true,\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc testAccCheckAWSCodeArtifactRepositoryExists(n string) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"no CodeArtifact repository set\")\n\t\t}\n\n\t\tconn := testAccProvider.Meta().(*AWSClient).codeartifactconn\n\t\towner, domain, repo, err := decodeCodeArtifactRepositoryID(rs.Primary.ID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = conn.DescribeRepository(&codeartifact.DescribeRepositoryInput{\n\t\t\tRepository: aws.String(repo),\n\t\t\tDomain: aws.String(domain),\n\t\t\tDomainOwner: aws.String(owner),\n\t\t})\n\n\t\treturn err\n\t}\n}\n\nfunc testAccCheckAWSCodeArtifactRepositoryDestroy(s *terraform.State) error {\n\tfor _, rs := range s.RootModule().Resources {\n\t\tif rs.Type != \"aws_codeartifact_repository\" {\n\t\t\tcontinue\n\t\t}\n\n\t\towner, domain, repo, err := decodeCodeArtifactRepositoryID(rs.Primary.ID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconn := testAccProvider.Meta().(*AWSClient).codeartifactconn\n\t\tresp, err := conn.DescribeRepository(&codeartifact.DescribeRepositoryInput{\n\t\t\tRepository: aws.String(repo),\n\t\t\tDomain: aws.String(domain),\n\t\t\tDomainOwner: aws.String(owner),\n\t\t})\n\n\t\tif err == nil {\n\t\t\tif aws.StringValue(resp.Repository.Name) == repo &&\n\t\t\t\taws.StringValue(resp.Repository.DomainName) == domain &&\n\t\t\t\taws.StringValue(resp.Repository.DomainOwner) == owner {\n\t\t\t\treturn fmt.Errorf(\"CodeArtifact Repository %s in Domain %s still exists\", repo, domain)\n\t\t\t}\n\t\t}\n\n\t\tif isAWSErr(err, codeartifact.ErrCodeResourceNotFoundException, \"\") {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc testAccAWSCodeArtifactRepositoryBasicConfig(rName string) string {\n\treturn fmt.Sprintf(`\nresource \"aws_kms_key\" \"test\" {\n description = %[1]q\n deletion_window_in_days = 7\n}\n\nresource \"aws_codeartifact_domain\" \"test\" {\n domain = %[1]q\n encryption_key = aws_kms_key.test.arn\n}\n\nresource \"aws_codeartifact_repository\" \"test\" {\n repository = %[1]q\n domain = aws_codeartifact_domain.test.domain\n}\n`, rName)\n}\n\nfunc testAccAWSCodeArtifactRepositoryOwnerConfig(rName string) string {\n\treturn fmt.Sprintf(`\nresource \"aws_kms_key\" \"test\" {\n description = %[1]q\n deletion_window_in_days = 7\n}\n\nresource \"aws_codeartifact_domain\" \"test\" {\n domain = %[1]q\n encryption_key = aws_kms_key.test.arn\n}\n\nresource \"aws_codeartifact_repository\" \"test\" {\n repository = %[1]q\n domain = aws_codeartifact_domain.test.domain\n domain_owner = aws_codeartifact_domain.test.owner\n}\n`, rName)\n}\n\nfunc testAccAWSCodeArtifactRepositoryDescConfig(rName, desc string) string {\n\treturn fmt.Sprintf(`\nresource \"aws_kms_key\" \"test\" {\n description = %[1]q\n deletion_window_in_days = 7\n}\n\nresource \"aws_codeartifact_domain\" \"test\" {\n domain = %[1]q\n encryption_key = aws_kms_key.test.arn\n}\n\nresource \"aws_codeartifact_repository\" \"test\" {\n repository = %[1]q\n domain = aws_codeartifact_domain.test.domain\n description = %[2]q\n}\n`, rName, desc)\n}\n\nfunc testAccAWSCodeArtifactRepositoryUpstreamsConfig1(rName string) string {\n\treturn fmt.Sprintf(`\nresource \"aws_kms_key\" \"test\" {\n description = %[1]q\n deletion_window_in_days = 7\n}\n\nresource \"aws_codeartifact_domain\" \"test\" {\n domain = %[1]q\n encryption_key = aws_kms_key.test.arn\n}\n\nresource \"aws_codeartifact_repository\" \"upstream1\" {\n repository = \"%[1]s-upstream1\"\n domain = aws_codeartifact_domain.test.domain\n}\n\nresource \"aws_codeartifact_repository\" \"test\" {\n repository = %[1]q\n domain = aws_codeartifact_domain.test.domain\n\n upstream {\n repository_name = aws_codeartifact_repository.upstream1.repository\n }\n}\n`, rName)\n}\n\nfunc testAccAWSCodeArtifactRepositoryUpstreamsConfig2(rName string) string {\n\treturn fmt.Sprintf(`\nresource \"aws_kms_key\" \"test\" {\n description = %[1]q\n deletion_window_in_days = 7\n}\n\nresource \"aws_codeartifact_domain\" \"test\" {\n domain = %[1]q\n encryption_key = aws_kms_key.test.arn\n}\n\nresource \"aws_codeartifact_repository\" \"upstream1\" {\n repository = \"%[1]s-upstream1\"\n domain = aws_codeartifact_domain.test.domain\n}\n\nresource \"aws_codeartifact_repository\" \"upstream2\" {\n repository = \"%[1]s-upstream2\"\n domain = aws_codeartifact_domain.test.domain\n}\n\nresource \"aws_codeartifact_repository\" \"test\" {\n repository = %[1]q\n domain = aws_codeartifact_domain.test.domain\n\n upstream {\n repository_name = aws_codeartifact_repository.upstream1.repository\n }\n\n upstream {\n repository_name = aws_codeartifact_repository.upstream2.repository\n }\n}\n`, rName)\n}\n<commit_msg>add owner test<commit_after>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"testing\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/codeartifact\"\n\t\"github.com\/hashicorp\/go-multierror\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/helper\/acctest\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/terraform\"\n)\n\nfunc init() {\n\tresource.AddTestSweepers(\"aws_codeartifact_repository\", &resource.Sweeper{\n\t\tName: \"aws_codeartifact_repository\",\n\t\tF: testSweepCodeArtifactRepositories,\n\t})\n}\n\nfunc testSweepCodeArtifactRepositories(region string) error {\n\tclient, err := sharedClientForRegion(region)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting client: %w\", err)\n\t}\n\tconn := client.(*AWSClient).codeartifactconn\n\tinput := &codeartifact.ListRepositoriesInput{}\n\tvar sweeperErrs *multierror.Error\n\n\terr = conn.ListRepositoriesPages(input, func(page *codeartifact.ListRepositoriesOutput, lastPage bool) bool {\n\t\tfor _, repositoryPtr := range page.Repositories {\n\t\t\tif repositoryPtr == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\trepository := aws.StringValue(repositoryPtr.Name)\n\t\t\tinput := &codeartifact.DeleteRepositoryInput{\n\t\t\t\tRepository: repositoryPtr.Name,\n\t\t\t\tDomain: repositoryPtr.DomainName,\n\t\t\t\tDomainOwner: repositoryPtr.DomainOwner,\n\t\t\t}\n\n\t\t\tlog.Printf(\"[INFO] Deleting CodeArtifact Repository: %s\", repository)\n\n\t\t\t_, err := conn.DeleteRepository(input)\n\n\t\t\tif err != nil {\n\t\t\t\tsweeperErr := fmt.Errorf(\"error deleting CodeArtifact Repository (%s): %w\", repository, err)\n\t\t\t\tlog.Printf(\"[ERROR] %s\", sweeperErr)\n\t\t\t\tsweeperErrs = multierror.Append(sweeperErrs, sweeperErr)\n\t\t\t}\n\t\t}\n\n\t\treturn !lastPage\n\t})\n\n\tif testSweepSkipSweepError(err) {\n\t\tlog.Printf(\"[WARN] Skipping CodeArtifact Repository sweep for %s: %s\", region, err)\n\t\treturn nil\n\t}\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error listing CodeArtifact Repositories: %w\", err)\n\t}\n\n\treturn sweeperErrs.ErrorOrNil()\n}\n\nfunc TestAccAWSCodeArtifactRepository_basic(t *testing.T) {\n\trName := acctest.RandomWithPrefix(\"tf-acc-test\")\n\tresourceName := \"aws_codeartifact_repository.test\"\n\n\tresource.ParallelTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckAWSCodeArtifactRepositoryDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccAWSCodeArtifactRepositoryBasicConfig(rName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckAWSCodeArtifactRepositoryExists(resourceName),\n\t\t\t\t\ttestAccCheckResourceAttrRegionalARN(resourceName, \"arn\", \"codeartifact\", fmt.Sprintf(\"repository\/%s\/%s\", rName, rName)),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"repository\", rName),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"domain\", rName),\n\t\t\t\t\tresource.TestCheckResourceAttrPair(resourceName, \"domain_owner\", \"aws_codeartifact_domain.test\", \"owner\"),\n\t\t\t\t\tresource.TestCheckResourceAttrPair(resourceName, \"administrator_account\", \"aws_codeartifact_domain.test\", \"owner\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"description\", \"\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"upstream.#\", \"0\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"external_connections.#\", \"0\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceName: resourceName,\n\t\t\t\tImportState: true,\n\t\t\t\tImportStateVerify: true,\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccAWSCodeArtifactRepository_owner(t *testing.T) {\n\trName := acctest.RandomWithPrefix(\"tf-acc-test\")\n\tresourceName := \"aws_codeartifact_repository.test\"\n\n\tresource.ParallelTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckAWSCodeArtifactRepositoryDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccAWSCodeArtifactRepositoryOwnerConfig(rName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckAWSCodeArtifactRepositoryExists(resourceName),\n\t\t\t\t\ttestAccCheckResourceAttrRegionalARN(resourceName, \"arn\", \"codeartifact\", fmt.Sprintf(\"repository\/%s\/%s\", rName, rName)),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"repository\", rName),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"domain\", rName),\n\t\t\t\t\tresource.TestCheckResourceAttrPair(resourceName, \"domain_owner\", \"aws_codeartifact_domain.test\", \"owner\"),\n\t\t\t\t\tresource.TestCheckResourceAttrPair(resourceName, \"administrator_account\", \"aws_codeartifact_domain.test\", \"owner\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"description\", \"\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"upstream.#\", \"0\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"external_connections.#\", \"0\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceName: resourceName,\n\t\t\t\tImportState: true,\n\t\t\t\tImportStateVerify: true,\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccAWSCodeArtifactRepository_description(t *testing.T) {\n\trName := acctest.RandomWithPrefix(\"tf-acc-test\")\n\tresourceName := \"aws_codeartifact_repository.test\"\n\n\tresource.ParallelTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckAWSCodeArtifactRepositoryDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccAWSCodeArtifactRepositoryDescConfig(rName, \"desc\"),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckAWSCodeArtifactRepositoryExists(resourceName),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"description\", \"desc\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceName: resourceName,\n\t\t\t\tImportState: true,\n\t\t\t\tImportStateVerify: true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccAWSCodeArtifactRepositoryDescConfig(rName, \"desc2\"),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckAWSCodeArtifactRepositoryExists(resourceName),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"description\", \"desc2\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccAWSCodeArtifactRepository_upstreams(t *testing.T) {\n\trName := acctest.RandomWithPrefix(\"tf-acc-test\")\n\tresourceName := \"aws_codeartifact_repository.test\"\n\n\tresource.ParallelTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckAWSCodeArtifactRepositoryDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccAWSCodeArtifactRepositoryUpstreamsConfig1(rName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckAWSCodeArtifactRepositoryExists(resourceName),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"upstream.#\", \"1\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"upstream.0.repository_name\", fmt.Sprintf(\"%s-upstream1\", rName)),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceName: resourceName,\n\t\t\t\tImportState: true,\n\t\t\t\tImportStateVerify: true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccAWSCodeArtifactRepositoryUpstreamsConfig2(rName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckAWSCodeArtifactRepositoryExists(resourceName),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"upstream.#\", \"2\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"upstream.0.repository_name\", fmt.Sprintf(\"%s-upstream1\", rName)),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"upstream.1.repository_name\", fmt.Sprintf(\"%s-upstream2\", rName)),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccAWSCodeArtifactRepositoryUpstreamsConfig1(rName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckAWSCodeArtifactRepositoryExists(resourceName),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"upstream.#\", \"1\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(resourceName, \"upstream.0.repository_name\", fmt.Sprintf(\"%s-upstream1\", rName)),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccAWSCodeArtifactRepository_disappears(t *testing.T) {\n\trName := acctest.RandomWithPrefix(\"tf-acc-test\")\n\tresourceName := \"aws_codeartifact_repository.test\"\n\n\tresource.ParallelTest(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckAWSCodeArtifactRepositoryDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccAWSCodeArtifactRepositoryBasicConfig(rName),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckAWSCodeArtifactRepositoryExists(resourceName),\n\t\t\t\t\ttestAccCheckResourceDisappears(testAccProvider, resourceAwsCodeArtifactRepository(), resourceName),\n\t\t\t\t),\n\t\t\t\tExpectNonEmptyPlan: true,\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc testAccCheckAWSCodeArtifactRepositoryExists(n string) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"no CodeArtifact repository set\")\n\t\t}\n\n\t\tconn := testAccProvider.Meta().(*AWSClient).codeartifactconn\n\t\towner, domain, repo, err := decodeCodeArtifactRepositoryID(rs.Primary.ID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = conn.DescribeRepository(&codeartifact.DescribeRepositoryInput{\n\t\t\tRepository: aws.String(repo),\n\t\t\tDomain: aws.String(domain),\n\t\t\tDomainOwner: aws.String(owner),\n\t\t})\n\n\t\treturn err\n\t}\n}\n\nfunc testAccCheckAWSCodeArtifactRepositoryDestroy(s *terraform.State) error {\n\tfor _, rs := range s.RootModule().Resources {\n\t\tif rs.Type != \"aws_codeartifact_repository\" {\n\t\t\tcontinue\n\t\t}\n\n\t\towner, domain, repo, err := decodeCodeArtifactRepositoryID(rs.Primary.ID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconn := testAccProvider.Meta().(*AWSClient).codeartifactconn\n\t\tresp, err := conn.DescribeRepository(&codeartifact.DescribeRepositoryInput{\n\t\t\tRepository: aws.String(repo),\n\t\t\tDomain: aws.String(domain),\n\t\t\tDomainOwner: aws.String(owner),\n\t\t})\n\n\t\tif err == nil {\n\t\t\tif aws.StringValue(resp.Repository.Name) == repo &&\n\t\t\t\taws.StringValue(resp.Repository.DomainName) == domain &&\n\t\t\t\taws.StringValue(resp.Repository.DomainOwner) == owner {\n\t\t\t\treturn fmt.Errorf(\"CodeArtifact Repository %s in Domain %s still exists\", repo, domain)\n\t\t\t}\n\t\t}\n\n\t\tif isAWSErr(err, codeartifact.ErrCodeResourceNotFoundException, \"\") {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc testAccAWSCodeArtifactRepositoryBasicConfig(rName string) string {\n\treturn fmt.Sprintf(`\nresource \"aws_kms_key\" \"test\" {\n description = %[1]q\n deletion_window_in_days = 7\n}\n\nresource \"aws_codeartifact_domain\" \"test\" {\n domain = %[1]q\n encryption_key = aws_kms_key.test.arn\n}\n\nresource \"aws_codeartifact_repository\" \"test\" {\n repository = %[1]q\n domain = aws_codeartifact_domain.test.domain\n}\n`, rName)\n}\n\nfunc testAccAWSCodeArtifactRepositoryOwnerConfig(rName string) string {\n\treturn fmt.Sprintf(`\nresource \"aws_kms_key\" \"test\" {\n description = %[1]q\n deletion_window_in_days = 7\n}\n\nresource \"aws_codeartifact_domain\" \"test\" {\n domain = %[1]q\n encryption_key = aws_kms_key.test.arn\n}\n\nresource \"aws_codeartifact_repository\" \"test\" {\n repository = %[1]q\n domain = aws_codeartifact_domain.test.domain\n domain_owner = aws_codeartifact_domain.test.owner\n}\n`, rName)\n}\n\nfunc testAccAWSCodeArtifactRepositoryDescConfig(rName, desc string) string {\n\treturn fmt.Sprintf(`\nresource \"aws_kms_key\" \"test\" {\n description = %[1]q\n deletion_window_in_days = 7\n}\n\nresource \"aws_codeartifact_domain\" \"test\" {\n domain = %[1]q\n encryption_key = aws_kms_key.test.arn\n}\n\nresource \"aws_codeartifact_repository\" \"test\" {\n repository = %[1]q\n domain = aws_codeartifact_domain.test.domain\n description = %[2]q\n}\n`, rName, desc)\n}\n\nfunc testAccAWSCodeArtifactRepositoryUpstreamsConfig1(rName string) string {\n\treturn fmt.Sprintf(`\nresource \"aws_kms_key\" \"test\" {\n description = %[1]q\n deletion_window_in_days = 7\n}\n\nresource \"aws_codeartifact_domain\" \"test\" {\n domain = %[1]q\n encryption_key = aws_kms_key.test.arn\n}\n\nresource \"aws_codeartifact_repository\" \"upstream1\" {\n repository = \"%[1]s-upstream1\"\n domain = aws_codeartifact_domain.test.domain\n}\n\nresource \"aws_codeartifact_repository\" \"test\" {\n repository = %[1]q\n domain = aws_codeartifact_domain.test.domain\n\n upstream {\n repository_name = aws_codeartifact_repository.upstream1.repository\n }\n}\n`, rName)\n}\n\nfunc testAccAWSCodeArtifactRepositoryUpstreamsConfig2(rName string) string {\n\treturn fmt.Sprintf(`\nresource \"aws_kms_key\" \"test\" {\n description = %[1]q\n deletion_window_in_days = 7\n}\n\nresource \"aws_codeartifact_domain\" \"test\" {\n domain = %[1]q\n encryption_key = aws_kms_key.test.arn\n}\n\nresource \"aws_codeartifact_repository\" \"upstream1\" {\n repository = \"%[1]s-upstream1\"\n domain = aws_codeartifact_domain.test.domain\n}\n\nresource \"aws_codeartifact_repository\" \"upstream2\" {\n repository = \"%[1]s-upstream2\"\n domain = aws_codeartifact_domain.test.domain\n}\n\nresource \"aws_codeartifact_repository\" \"test\" {\n repository = %[1]q\n domain = aws_codeartifact_domain.test.domain\n\n upstream {\n repository_name = aws_codeartifact_repository.upstream1.repository\n }\n\n upstream {\n repository_name = aws_codeartifact_repository.upstream2.repository\n }\n}\n`, rName)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2021 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage wrappers\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"net\/http\"\n\t\"syscall\"\n\n\t\"github.com\/jacobsa\/fuse\/fuseops\"\n\t\"github.com\/jacobsa\/fuse\/fuseutil\"\n\t\"google.golang.org\/api\/googleapi\"\n)\n\nfunc errno(err error) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Use existing FS errno\n\tvar errno syscall.Errno\n\tif errors.As(err, &errno) {\n\t\treturn errno\n\t}\n\n\t\/\/ FS op is interrupted\n\tif errors.Is(err, context.Canceled) {\n\t\treturn syscall.EINTR\n\t}\n\n\t\/\/ Translate API errors into an FS errno\n\tvar apiErr *googleapi.Error\n\tif errors.As(err, &apiErr) {\n\t\tswitch apiErr.Code {\n\t\tcase http.StatusForbidden:\n\t\t\treturn syscall.EACCES\n\t\t}\n\t}\n\n\t\/\/ Unknown errors\n\treturn syscall.EIO\n}\n\n\/\/ WithErrorMapping wraps a FileSystem, processing the returned errors, and\n\/\/ mapping them into syscall.Errno that can be understood by FUSE.\nfunc WithErrorMapping(wrapped fuseutil.FileSystem) fuseutil.FileSystem {\n\treturn &errorMapping{wrapped: wrapped}\n}\n\ntype errorMapping struct {\n\twrapped fuseutil.FileSystem\n}\n\nfunc (fs *errorMapping) Destroy() {\n\tfs.wrapped.Destroy()\n}\n\nfunc (fs *errorMapping) StatFS(\n\tctx context.Context,\n\top *fuseops.StatFSOp) error {\n\terr := fs.wrapped.StatFS(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) LookUpInode(\n\tctx context.Context,\n\top *fuseops.LookUpInodeOp) error {\n\terr := fs.wrapped.LookUpInode(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) GetInodeAttributes(\n\tctx context.Context,\n\top *fuseops.GetInodeAttributesOp) error {\n\terr := fs.wrapped.GetInodeAttributes(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) SetInodeAttributes(\n\tctx context.Context,\n\top *fuseops.SetInodeAttributesOp) error {\n\terr := fs.wrapped.SetInodeAttributes(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) ForgetInode(\n\tctx context.Context,\n\top *fuseops.ForgetInodeOp) error {\n\terr := fs.wrapped.ForgetInode(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) MkDir(\n\tctx context.Context,\n\top *fuseops.MkDirOp) error {\n\terr := fs.wrapped.MkDir(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) MkNode(\n\tctx context.Context,\n\top *fuseops.MkNodeOp) error {\n\terr := fs.wrapped.MkNode(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) CreateFile(\n\tctx context.Context,\n\top *fuseops.CreateFileOp) error {\n\terr := fs.wrapped.CreateFile(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) CreateLink(\n\tctx context.Context,\n\top *fuseops.CreateLinkOp) error {\n\terr := fs.wrapped.CreateLink(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) CreateSymlink(\n\tctx context.Context,\n\top *fuseops.CreateSymlinkOp) error {\n\terr := fs.wrapped.CreateSymlink(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) Rename(\n\tctx context.Context,\n\top *fuseops.RenameOp) error {\n\terr := fs.wrapped.Rename(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) RmDir(\n\tctx context.Context,\n\top *fuseops.RmDirOp) error {\n\terr := fs.wrapped.RmDir(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) Unlink(\n\tctx context.Context,\n\top *fuseops.UnlinkOp) error {\n\terr := fs.wrapped.Unlink(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) OpenDir(\n\tctx context.Context,\n\top *fuseops.OpenDirOp) error {\n\terr := fs.wrapped.OpenDir(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) ReadDir(\n\tctx context.Context,\n\top *fuseops.ReadDirOp) error {\n\terr := fs.wrapped.ReadDir(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) ReleaseDirHandle(\n\tctx context.Context,\n\top *fuseops.ReleaseDirHandleOp) error {\n\terr := fs.wrapped.ReleaseDirHandle(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) OpenFile(\n\tctx context.Context,\n\top *fuseops.OpenFileOp) error {\n\terr := fs.wrapped.OpenFile(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) ReadFile(\n\tctx context.Context,\n\top *fuseops.ReadFileOp) error {\n\terr := fs.wrapped.ReadFile(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) WriteFile(\n\tctx context.Context,\n\top *fuseops.WriteFileOp) error {\n\terr := fs.wrapped.WriteFile(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) SyncFile(\n\tctx context.Context,\n\top *fuseops.SyncFileOp) error {\n\terr := fs.wrapped.SyncFile(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) FlushFile(\n\tctx context.Context,\n\top *fuseops.FlushFileOp) error {\n\terr := fs.wrapped.FlushFile(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) ReleaseFileHandle(\n\tctx context.Context,\n\top *fuseops.ReleaseFileHandleOp) error {\n\terr := fs.wrapped.ReleaseFileHandle(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) ReadSymlink(\n\tctx context.Context,\n\top *fuseops.ReadSymlinkOp) error {\n\terr := fs.wrapped.ReadSymlink(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) RemoveXattr(\n\tctx context.Context,\n\top *fuseops.RemoveXattrOp) error {\n\terr := fs.wrapped.RemoveXattr(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) GetXattr(\n\tctx context.Context,\n\top *fuseops.GetXattrOp) error {\n\terr := fs.wrapped.GetXattr(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) ListXattr(\n\tctx context.Context,\n\top *fuseops.ListXattrOp) error {\n\terr := fs.wrapped.ListXattr(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) SetXattr(\n\tctx context.Context,\n\top *fuseops.SetXattrOp) error {\n\terr := fs.wrapped.SetXattr(ctx, op)\n\treturn errno(err)\n}\n\nfunc (fs *errorMapping) Fallocate(\n\tctx context.Context,\n\top *fuseops.FallocateOp) error {\n\terr := fs.wrapped.Fallocate(ctx, op)\n\treturn errno(err)\n}\n<commit_msg>Log file system op errors before they are converted into an errno<commit_after>\/\/ Copyright 2021 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage wrappers\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"log\"\n\t\"net\/http\"\n\t\"syscall\"\n\n\t\"github.com\/googlecloudplatform\/gcsfuse\/internal\/logger\"\n\t\"github.com\/jacobsa\/fuse\/fuseops\"\n\t\"github.com\/jacobsa\/fuse\/fuseutil\"\n\t\"google.golang.org\/api\/googleapi\"\n)\n\nfunc errno(err error) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Use existing em errno\n\tvar errno syscall.Errno\n\tif errors.As(err, &errno) {\n\t\treturn errno\n\t}\n\n\t\/\/ em op is interrupted\n\tif errors.Is(err, context.Canceled) {\n\t\treturn syscall.EINTR\n\t}\n\n\t\/\/ Translate API errors into an em errno\n\tvar apiErr *googleapi.Error\n\tif errors.As(err, &apiErr) {\n\t\tswitch apiErr.Code {\n\t\tcase http.StatusForbidden:\n\t\t\treturn syscall.EACCES\n\t\t}\n\t}\n\n\t\/\/ Unknown errors\n\treturn syscall.EIO\n}\n\n\/\/ WithErrorMapping wraps a FileSystem, processing the returned errors, and\n\/\/ mapping them into syscall.Errno that can be understood by FUSE.\nfunc WithErrorMapping(wrapped fuseutil.FileSystem) fuseutil.FileSystem {\n\treturn &errorMapping{\n\t\twrapped: wrapped,\n\t\tlogger: logger.NewError(\"\"),\n\t}\n}\n\ntype errorMapping struct {\n\twrapped fuseutil.FileSystem\n\tlogger *log.Logger\n}\n\nfunc (em *errorMapping) mapError(op string, err error) error {\n\tfsErr := errno(err)\n\tem.logger.Printf(\"%s: %v: %v\", op, fsErr, err)\n\treturn fsErr\n}\n\nfunc (em *errorMapping) Destroy() {\n\tem.wrapped.Destroy()\n}\n\nfunc (em *errorMapping) StatFS(\n\tctx context.Context,\n\top *fuseops.StatFSOp) error {\n\terr := em.wrapped.StatFS(ctx, op)\n\treturn em.mapError(\"StatFS\", err)\n}\n\nfunc (em *errorMapping) LookUpInode(\n\tctx context.Context,\n\top *fuseops.LookUpInodeOp) error {\n\terr := em.wrapped.LookUpInode(ctx, op)\n\treturn em.mapError(\"LookUpInode\", err)\n}\n\nfunc (em *errorMapping) GetInodeAttributes(\n\tctx context.Context,\n\top *fuseops.GetInodeAttributesOp) error {\n\terr := em.wrapped.GetInodeAttributes(ctx, op)\n\treturn em.mapError(\"GetInodeAttributes\", err)\n}\n\nfunc (em *errorMapping) SetInodeAttributes(\n\tctx context.Context,\n\top *fuseops.SetInodeAttributesOp) error {\n\terr := em.wrapped.SetInodeAttributes(ctx, op)\n\treturn em.mapError(\"SetInodeAttributes\", err)\n}\n\nfunc (em *errorMapping) ForgetInode(\n\tctx context.Context,\n\top *fuseops.ForgetInodeOp) error {\n\terr := em.wrapped.ForgetInode(ctx, op)\n\treturn em.mapError(\"ForgetInode\", err)\n}\n\nfunc (em *errorMapping) MkDir(\n\tctx context.Context,\n\top *fuseops.MkDirOp) error {\n\terr := em.wrapped.MkDir(ctx, op)\n\treturn em.mapError(\"MkDir\", err)\n}\n\nfunc (em *errorMapping) MkNode(\n\tctx context.Context,\n\top *fuseops.MkNodeOp) error {\n\terr := em.wrapped.MkNode(ctx, op)\n\treturn em.mapError(\"MkNode\", err)\n}\n\nfunc (em *errorMapping) CreateFile(\n\tctx context.Context,\n\top *fuseops.CreateFileOp) error {\n\terr := em.wrapped.CreateFile(ctx, op)\n\treturn em.mapError(\"CreateFile\", err)\n}\n\nfunc (em *errorMapping) CreateLink(\n\tctx context.Context,\n\top *fuseops.CreateLinkOp) error {\n\terr := em.wrapped.CreateLink(ctx, op)\n\treturn em.mapError(\"CreateLink\", err)\n}\n\nfunc (em *errorMapping) CreateSymlink(\n\tctx context.Context,\n\top *fuseops.CreateSymlinkOp) error {\n\terr := em.wrapped.CreateSymlink(ctx, op)\n\treturn em.mapError(\"CreateSymlink\", err)\n}\n\nfunc (em *errorMapping) Rename(\n\tctx context.Context,\n\top *fuseops.RenameOp) error {\n\terr := em.wrapped.Rename(ctx, op)\n\treturn em.mapError(\"Rename\", err)\n}\n\nfunc (em *errorMapping) RmDir(\n\tctx context.Context,\n\top *fuseops.RmDirOp) error {\n\terr := em.wrapped.RmDir(ctx, op)\n\treturn em.mapError(\"RmDir\", err)\n}\n\nfunc (em *errorMapping) Unlink(\n\tctx context.Context,\n\top *fuseops.UnlinkOp) error {\n\terr := em.wrapped.Unlink(ctx, op)\n\treturn em.mapError(\"Unlink\", err)\n}\n\nfunc (em *errorMapping) OpenDir(\n\tctx context.Context,\n\top *fuseops.OpenDirOp) error {\n\terr := em.wrapped.OpenDir(ctx, op)\n\treturn em.mapError(\"OpenDir\", err)\n}\n\nfunc (em *errorMapping) ReadDir(\n\tctx context.Context,\n\top *fuseops.ReadDirOp) error {\n\terr := em.wrapped.ReadDir(ctx, op)\n\treturn em.mapError(\"ReadDir\", err)\n}\n\nfunc (em *errorMapping) ReleaseDirHandle(\n\tctx context.Context,\n\top *fuseops.ReleaseDirHandleOp) error {\n\terr := em.wrapped.ReleaseDirHandle(ctx, op)\n\treturn em.mapError(\"ReleaseDirHandle\", err)\n}\n\nfunc (em *errorMapping) OpenFile(\n\tctx context.Context,\n\top *fuseops.OpenFileOp) error {\n\terr := em.wrapped.OpenFile(ctx, op)\n\treturn em.mapError(\"OpenFile\", err)\n}\n\nfunc (em *errorMapping) ReadFile(\n\tctx context.Context,\n\top *fuseops.ReadFileOp) error {\n\terr := em.wrapped.ReadFile(ctx, op)\n\treturn em.mapError(\"ReadFile\", err)\n}\n\nfunc (em *errorMapping) WriteFile(\n\tctx context.Context,\n\top *fuseops.WriteFileOp) error {\n\terr := em.wrapped.WriteFile(ctx, op)\n\treturn em.mapError(\"WriteFile\", err)\n}\n\nfunc (em *errorMapping) SyncFile(\n\tctx context.Context,\n\top *fuseops.SyncFileOp) error {\n\terr := em.wrapped.SyncFile(ctx, op)\n\treturn em.mapError(\"SyncFile\", err)\n}\n\nfunc (em *errorMapping) FlushFile(\n\tctx context.Context,\n\top *fuseops.FlushFileOp) error {\n\terr := em.wrapped.FlushFile(ctx, op)\n\treturn em.mapError(\"FlushFile\", err)\n}\n\nfunc (em *errorMapping) ReleaseFileHandle(\n\tctx context.Context,\n\top *fuseops.ReleaseFileHandleOp) error {\n\terr := em.wrapped.ReleaseFileHandle(ctx, op)\n\treturn em.mapError(\"ReleaseFileHandle\", err)\n}\n\nfunc (em *errorMapping) ReadSymlink(\n\tctx context.Context,\n\top *fuseops.ReadSymlinkOp) error {\n\terr := em.wrapped.ReadSymlink(ctx, op)\n\treturn em.mapError(\"ReadSymlink\", err)\n}\n\nfunc (em *errorMapping) RemoveXattr(\n\tctx context.Context,\n\top *fuseops.RemoveXattrOp) error {\n\terr := em.wrapped.RemoveXattr(ctx, op)\n\treturn em.mapError(\"RemoveXattr\", err)\n}\n\nfunc (em *errorMapping) GetXattr(\n\tctx context.Context,\n\top *fuseops.GetXattrOp) error {\n\terr := em.wrapped.GetXattr(ctx, op)\n\treturn em.mapError(\"GetXattr\", err)\n}\n\nfunc (em *errorMapping) ListXattr(\n\tctx context.Context,\n\top *fuseops.ListXattrOp) error {\n\terr := em.wrapped.ListXattr(ctx, op)\n\treturn em.mapError(\"ListXattr\", err)\n}\n\nfunc (em *errorMapping) SetXattr(\n\tctx context.Context,\n\top *fuseops.SetXattrOp) error {\n\terr := em.wrapped.SetXattr(ctx, op)\n\treturn em.mapError(\"SetXattr\", err)\n}\n\nfunc (em *errorMapping) Fallocate(\n\tctx context.Context,\n\top *fuseops.FallocateOp) error {\n\terr := em.wrapped.Fallocate(ctx, op)\n\treturn em.mapError(\"Fallocate\", err)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage ssl\n\nimport (\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"crypto\/sha1\"\n\t\"crypto\/x509\"\n\t\"crypto\/x509\/pkix\"\n\t\"encoding\/hex\"\n\t\"encoding\/pem\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/big\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\n\t\"k8s.io\/ingress\/core\/pkg\/ingress\"\n)\n\n\/\/ AddOrUpdateCertAndKey creates a .pem file wth the cert and the key with the specified name\nfunc AddOrUpdateCertAndKey(name string, cert, key, ca []byte) (*ingress.SSLCert, error) {\n\tpemName := fmt.Sprintf(\"%v.pem\", name)\n\tpemFileName := fmt.Sprintf(\"%v\/%v\", ingress.DefaultSSLDirectory, pemName)\n\n\ttempPemFile, err := ioutil.TempFile(ingress.DefaultSSLDirectory, pemName)\n\n\tglog.V(3).Infof(\"Creating temp file %v for Keypair: %v\", tempPemFile.Name(), pemName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not create temp pem file %v: %v\", pemFileName, err)\n\t}\n\n\t_, err = tempPemFile.Write(cert)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not write to pem file %v: %v\", tempPemFile.Name(), err)\n\t}\n\t_, err = tempPemFile.Write([]byte(\"\\n\"))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not write to pem file %v: %v\", tempPemFile.Name(), err)\n\t}\n\t_, err = tempPemFile.Write(key)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not write to pem file %v: %v\", tempPemFile.Name(), err)\n\t}\n\n\terr = tempPemFile.Close()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not close temp pem file %v: %v\", tempPemFile.Name(), err)\n\t}\n\n\tpemCerts, err := ioutil.ReadFile(tempPemFile.Name())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpemBlock, _ := pem.Decode(pemCerts)\n\tif pemBlock == nil {\n\t\treturn nil, fmt.Errorf(\"No valid PEM formatted block found\")\n\t}\n\n\t\/\/ If the file does not start with 'BEGIN CERTIFICATE' it's invalid and must not be used.\n\tif pemBlock.Type != \"CERTIFICATE\" {\n\t\t_ = os.Remove(tempPemFile.Name())\n\t\treturn nil, fmt.Errorf(\"Certificate %v contains invalid data, and must be created with 'kubectl create secret tls'\", name)\n\t}\n\n\tpemCert, err := x509.ParseCertificate(pemBlock.Bytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcn := []string{pemCert.Subject.CommonName}\n\tif len(pemCert.DNSNames) > 0 {\n\t\tcn = append(cn, pemCert.DNSNames...)\n\t}\n\n\terr = os.Rename(tempPemFile.Name(), pemFileName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not move temp pem file %v to destination %v: %v\", tempPemFile.Name(), pemFileName, err)\n\t}\n\n\tif len(ca) > 0 {\n\t\tbundle := x509.NewCertPool()\n\t\tbundle.AppendCertsFromPEM(ca)\n\t\topts := x509.VerifyOptions{\n\t\t\tRoots: bundle,\n\t\t}\n\n\t\t_, err := pemCert.Verify(opts)\n\t\tif err != nil {\n\t\t\toe := fmt.Sprintf(\"failed to verify certificate chain: \\n\\t%s\\n\", err)\n\t\t\treturn nil, errors.New(oe)\n\t\t}\n\n\t\tcaFile, err := os.OpenFile(pemFileName, os.O_RDWR|os.O_APPEND, 0600)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Could not open file %v for writing additional CA chains: %v\", pemFileName, err)\n\t\t}\n\n\t\tdefer caFile.Close()\n\t\t_, err = caFile.Write([]byte(\"\\n\"))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not append CA to cert file %v: %v\", pemFileName, err)\n\t\t}\n\t\tcaFile.Write(ca)\n\t\tcaFile.Write([]byte(\"\\n\"))\n\n\t\treturn &ingress.SSLCert{\n\t\t\tCAFileName: pemFileName,\n\t\t\tPemFileName: pemFileName,\n\t\t\tPemSHA: PemSHA1(pemFileName),\n\t\t\tCN: cn,\n\t\t}, nil\n\t}\n\n\treturn &ingress.SSLCert{\n\t\tPemFileName: pemFileName,\n\t\tPemSHA: PemSHA1(pemFileName),\n\t\tCN: cn,\n\t}, nil\n}\n\n\/\/ AddCertAuth creates a .pem file with the specified CAs to be used in Cert Authentication\n\/\/ If it's already exists, it's clobbered.\nfunc AddCertAuth(name string, ca []byte) (*ingress.SSLCert, error) {\n\n\tcaName := fmt.Sprintf(\"ca-%v.pem\", name)\n\tcaFileName := fmt.Sprintf(\"%v\/%v\", ingress.DefaultSSLDirectory, caName)\n\n\tpemCABlock, _ := pem.Decode(ca)\n\tif pemCABlock == nil {\n\t\treturn nil, fmt.Errorf(\"No valid PEM formatted block found\")\n\t}\n\t\/\/ If the first certificate does not start with 'BEGIN CERTIFICATE' it's invalid and must not be used.\n\tif pemCABlock.Type != \"CERTIFICATE\" {\n\t\treturn nil, fmt.Errorf(\"CA File %v contains invalid data, and must be created only with PEM formated certificates\", name)\n\t}\n\n\t_, err := x509.ParseCertificate(pemCABlock.Bytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = ioutil.WriteFile(caFileName, ca, 0644)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not write CA file %v: %v\", caFileName, err)\n\t}\n\n\tglog.V(3).Infof(\"Created CA Certificate for authentication: %v\", caFileName)\n\treturn &ingress.SSLCert{\n\t\tCAFileName: caFileName,\n\t\tPemFileName: caFileName,\n\t\tPemSHA: PemSHA1(caFileName),\n\t}, nil\n}\n\n\/\/ SearchDHParamFile iterates all the secrets mounted inside the \/etc\/nginx-ssl directory\n\/\/ in order to find a file with the name dhparam.pem. If such file exists it will\n\/\/ returns the path. If not it just returns an empty string\nfunc SearchDHParamFile(baseDir string) string {\n\tfiles, _ := ioutil.ReadDir(baseDir)\n\tfor _, file := range files {\n\t\tif !file.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\tdhPath := fmt.Sprintf(\"%v\/%v\/dhparam.pem\", baseDir, file.Name())\n\t\tif _, err := os.Stat(dhPath); err == nil {\n\t\t\tglog.Infof(\"using file '%v' for parameter ssl_dhparam\", dhPath)\n\t\t\treturn dhPath\n\t\t}\n\t}\n\n\tglog.Warning(\"no file dhparam.pem found in secrets\")\n\treturn \"\"\n}\n\n\/\/ PemSHA1 returns the SHA1 of a pem file. This is used to\n\/\/ reload NGINX in case a secret with a SSL certificate changed.\nfunc PemSHA1(filename string) string {\n\thasher := sha1.New()\n\ts, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\thasher.Write(s)\n\treturn hex.EncodeToString(hasher.Sum(nil))\n}\n\n\/\/ GetFakeSSLCert creates a Self Signed Certificate\n\/\/ Based in the code https:\/\/golang.org\/src\/crypto\/tls\/generate_cert.go\nfunc GetFakeSSLCert() ([]byte, []byte) {\n\n\tvar priv interface{}\n\tvar err error\n\n\tpriv, err = rsa.GenerateKey(rand.Reader, 2048)\n\n\tif err != nil {\n\t\tglog.Fatalf(\"failed to generate fake private key: %s\", err)\n\t}\n\n\tnotBefore := time.Now()\n\t\/\/ This certificate is valid for 365 days\n\tnotAfter := notBefore.Add(365 * 24 * time.Hour)\n\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\n\tif err != nil {\n\t\tglog.Fatalf(\"failed to generate fake serial number: %s\", err)\n\t}\n\n\ttemplate := x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"Acme Co\"},\n\t\t\tCommonName: \"Kubernetes Ingress Controller Fake Certificate\",\n\t\t},\n\t\tNotBefore: notBefore,\n\t\tNotAfter: notAfter,\n\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\t\tDNSNames: []string{\"ingress.local\"},\n\t}\n\tderBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.(*rsa.PrivateKey).PublicKey, priv)\n\tif err != nil {\n\t\tglog.Fatalf(\"Failed to create fake certificate: %s\", err)\n\t}\n\n\tcert := pem.EncodeToMemory(&pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes})\n\n\tkey := pem.EncodeToMemory(&pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(priv.(*rsa.PrivateKey))})\n\n\treturn cert, key\n}\n<commit_msg>Temporary PEM Files cleanup<commit_after>\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage ssl\n\nimport (\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"crypto\/sha1\"\n\t\"crypto\/x509\"\n\t\"crypto\/x509\/pkix\"\n\t\"encoding\/hex\"\n\t\"encoding\/pem\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/big\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\n\t\"k8s.io\/ingress\/core\/pkg\/ingress\"\n)\n\n\/\/ AddOrUpdateCertAndKey creates a .pem file wth the cert and the key with the specified name\nfunc AddOrUpdateCertAndKey(name string, cert, key, ca []byte) (*ingress.SSLCert, error) {\n\tpemName := fmt.Sprintf(\"%v.pem\", name)\n\tpemFileName := fmt.Sprintf(\"%v\/%v\", ingress.DefaultSSLDirectory, pemName)\n\n\ttempPemFile, err := ioutil.TempFile(ingress.DefaultSSLDirectory, pemName)\n\n\tglog.V(3).Infof(\"Creating temp file %v for Keypair: %v\", tempPemFile.Name(), pemName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not create temp pem file %v: %v\", pemFileName, err)\n\t}\n\n\t_, err = tempPemFile.Write(cert)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not write to pem file %v: %v\", tempPemFile.Name(), err)\n\t}\n\t_, err = tempPemFile.Write([]byte(\"\\n\"))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not write to pem file %v: %v\", tempPemFile.Name(), err)\n\t}\n\t_, err = tempPemFile.Write(key)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not write to pem file %v: %v\", tempPemFile.Name(), err)\n\t}\n\n\terr = tempPemFile.Close()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not close temp pem file %v: %v\", tempPemFile.Name(), err)\n\t}\n\n\tpemCerts, err := ioutil.ReadFile(tempPemFile.Name())\n\tif err != nil {\n\t\t_ = os.Remove(tempPemFile.Name())\n\t\treturn nil, err\n\t}\n\n\tpemBlock, _ := pem.Decode(pemCerts)\n\tif pemBlock == nil {\n\t\t_ = os.Remove(tempPemFile.Name())\n\t\treturn nil, fmt.Errorf(\"No valid PEM formatted block found\")\n\t}\n\n\t\/\/ If the file does not start with 'BEGIN CERTIFICATE' it's invalid and must not be used.\n\tif pemBlock.Type != \"CERTIFICATE\" {\n\t\t_ = os.Remove(tempPemFile.Name())\n\t\treturn nil, fmt.Errorf(\"Certificate %v contains invalid data, and must be created with 'kubectl create secret tls'\", name)\n\t}\n\n\tpemCert, err := x509.ParseCertificate(pemBlock.Bytes)\n\tif err != nil {\n\t\t_ = os.Remove(tempPemFile.Name())\n\t\treturn nil, err\n\t}\n\n\tcn := []string{pemCert.Subject.CommonName}\n\tif len(pemCert.DNSNames) > 0 {\n\t\tcn = append(cn, pemCert.DNSNames...)\n\t}\n\n\terr = os.Rename(tempPemFile.Name(), pemFileName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not move temp pem file %v to destination %v: %v\", tempPemFile.Name(), pemFileName, err)\n\t}\n\n\tif len(ca) > 0 {\n\t\tbundle := x509.NewCertPool()\n\t\tbundle.AppendCertsFromPEM(ca)\n\t\topts := x509.VerifyOptions{\n\t\t\tRoots: bundle,\n\t\t}\n\n\t\t_, err := pemCert.Verify(opts)\n\t\tif err != nil {\n\t\t\toe := fmt.Sprintf(\"failed to verify certificate chain: \\n\\t%s\\n\", err)\n\t\t\treturn nil, errors.New(oe)\n\t\t}\n\n\t\tcaFile, err := os.OpenFile(pemFileName, os.O_RDWR|os.O_APPEND, 0600)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Could not open file %v for writing additional CA chains: %v\", pemFileName, err)\n\t\t}\n\n\t\tdefer caFile.Close()\n\t\t_, err = caFile.Write([]byte(\"\\n\"))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not append CA to cert file %v: %v\", pemFileName, err)\n\t\t}\n\t\tcaFile.Write(ca)\n\t\tcaFile.Write([]byte(\"\\n\"))\n\n\t\treturn &ingress.SSLCert{\n\t\t\tCAFileName: pemFileName,\n\t\t\tPemFileName: pemFileName,\n\t\t\tPemSHA: PemSHA1(pemFileName),\n\t\t\tCN: cn,\n\t\t}, nil\n\t}\n\n\treturn &ingress.SSLCert{\n\t\tPemFileName: pemFileName,\n\t\tPemSHA: PemSHA1(pemFileName),\n\t\tCN: cn,\n\t}, nil\n}\n\n\/\/ AddCertAuth creates a .pem file with the specified CAs to be used in Cert Authentication\n\/\/ If it's already exists, it's clobbered.\nfunc AddCertAuth(name string, ca []byte) (*ingress.SSLCert, error) {\n\n\tcaName := fmt.Sprintf(\"ca-%v.pem\", name)\n\tcaFileName := fmt.Sprintf(\"%v\/%v\", ingress.DefaultSSLDirectory, caName)\n\n\tpemCABlock, _ := pem.Decode(ca)\n\tif pemCABlock == nil {\n\t\treturn nil, fmt.Errorf(\"No valid PEM formatted block found\")\n\t}\n\t\/\/ If the first certificate does not start with 'BEGIN CERTIFICATE' it's invalid and must not be used.\n\tif pemCABlock.Type != \"CERTIFICATE\" {\n\t\treturn nil, fmt.Errorf(\"CA File %v contains invalid data, and must be created only with PEM formated certificates\", name)\n\t}\n\n\t_, err := x509.ParseCertificate(pemCABlock.Bytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = ioutil.WriteFile(caFileName, ca, 0644)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not write CA file %v: %v\", caFileName, err)\n\t}\n\n\tglog.V(3).Infof(\"Created CA Certificate for authentication: %v\", caFileName)\n\treturn &ingress.SSLCert{\n\t\tCAFileName: caFileName,\n\t\tPemFileName: caFileName,\n\t\tPemSHA: PemSHA1(caFileName),\n\t}, nil\n}\n\n\/\/ SearchDHParamFile iterates all the secrets mounted inside the \/etc\/nginx-ssl directory\n\/\/ in order to find a file with the name dhparam.pem. If such file exists it will\n\/\/ returns the path. If not it just returns an empty string\nfunc SearchDHParamFile(baseDir string) string {\n\tfiles, _ := ioutil.ReadDir(baseDir)\n\tfor _, file := range files {\n\t\tif !file.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\tdhPath := fmt.Sprintf(\"%v\/%v\/dhparam.pem\", baseDir, file.Name())\n\t\tif _, err := os.Stat(dhPath); err == nil {\n\t\t\tglog.Infof(\"using file '%v' for parameter ssl_dhparam\", dhPath)\n\t\t\treturn dhPath\n\t\t}\n\t}\n\n\tglog.Warning(\"no file dhparam.pem found in secrets\")\n\treturn \"\"\n}\n\n\/\/ PemSHA1 returns the SHA1 of a pem file. This is used to\n\/\/ reload NGINX in case a secret with a SSL certificate changed.\nfunc PemSHA1(filename string) string {\n\thasher := sha1.New()\n\ts, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\thasher.Write(s)\n\treturn hex.EncodeToString(hasher.Sum(nil))\n}\n\n\/\/ GetFakeSSLCert creates a Self Signed Certificate\n\/\/ Based in the code https:\/\/golang.org\/src\/crypto\/tls\/generate_cert.go\nfunc GetFakeSSLCert() ([]byte, []byte) {\n\n\tvar priv interface{}\n\tvar err error\n\n\tpriv, err = rsa.GenerateKey(rand.Reader, 2048)\n\n\tif err != nil {\n\t\tglog.Fatalf(\"failed to generate fake private key: %s\", err)\n\t}\n\n\tnotBefore := time.Now()\n\t\/\/ This certificate is valid for 365 days\n\tnotAfter := notBefore.Add(365 * 24 * time.Hour)\n\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\n\tif err != nil {\n\t\tglog.Fatalf(\"failed to generate fake serial number: %s\", err)\n\t}\n\n\ttemplate := x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"Acme Co\"},\n\t\t\tCommonName: \"Kubernetes Ingress Controller Fake Certificate\",\n\t\t},\n\t\tNotBefore: notBefore,\n\t\tNotAfter: notAfter,\n\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\t\tDNSNames: []string{\"ingress.local\"},\n\t}\n\tderBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.(*rsa.PrivateKey).PublicKey, priv)\n\tif err != nil {\n\t\tglog.Fatalf(\"Failed to create fake certificate: %s\", err)\n\t}\n\n\tcert := pem.EncodeToMemory(&pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes})\n\n\tkey := pem.EncodeToMemory(&pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(priv.(*rsa.PrivateKey))})\n\n\treturn cert, key\n}\n<|endoftext|>"} {"text":"<commit_before>package plist\n\nimport (\n\t\"testing\"\n\t\"testing\/quick\"\n)\n\nfunc TestCFData(t *testing.T) {\n\tf := func(data []byte) []byte { return data }\n\tg := func(data []byte) []byte {\n\t\tcfData := convertBytesToCFData(data)\n\t\tdefer cfRelease(cfTypeRef(cfData))\n\t\treturn convertCFDataToBytes(cfData)\n\t}\n\tif err := quick.CheckEqual(f, g, nil); err != nil {\n\t\tt.Error(err)\n\t}\n}\n<commit_msg>Add a test for CFStrings<commit_after>package plist\n\nimport (\n\t\"testing\"\n\t\"testing\/quick\"\n)\n\nfunc TestCFData(t *testing.T) {\n\tf := func(data []byte) []byte { return data }\n\tg := func(data []byte) []byte {\n\t\tcfData := convertBytesToCFData(data)\n\t\tif cfData == nil {\n\t\t\tt.Fatal(\"CFDataRef is NULL\")\n\t\t}\n\t\tdefer cfRelease(cfTypeRef(cfData))\n\t\treturn convertCFDataToBytes(cfData)\n\t}\n\tif err := quick.CheckEqual(f, g, nil); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestCFString(t *testing.T) {\n\t\/\/ because the generator for string produces invalid strings,\n\t\/\/ lets generate []runes instead and convert those to strings in the function\n\tf := func(runes []rune) string { return string(runes) }\n\tg := func(runes []rune) string {\n\t\tcfStr := convertStringToCFString(string(runes))\n\t\tif cfStr == nil {\n\t\t\tt.Fatal(\"CFStringRef is NULL (%#v)\", runes)\n\t\t}\n\t\tdefer cfRelease(cfTypeRef(cfStr))\n\t\treturn convertCFStringToString(cfStr)\n\t}\n\tif err := quick.CheckEqual(f, g, nil); err != nil {\n\t\tt.Error(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package peerstore\n\nimport (\n\t\"context\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\tpeer \"github.com\/libp2p\/go-libp2p-peer\"\n\taddr \"github.com\/libp2p\/go-libp2p-peerstore\/addr\"\n\tma \"github.com\/multiformats\/go-multiaddr\"\n)\n\nconst (\n\n\t\/\/ TempAddrTTL is the ttl used for a short lived address\n\tTempAddrTTL = time.Second * 10\n\n\t\/\/ ProviderAddrTTL is the TTL of an address we've received from a provider.\n\t\/\/ This is also a temporary address, but lasts longer. After this expires,\n\t\/\/ the records we return will require an extra lookup.\n\tProviderAddrTTL = time.Minute * 10\n\n\t\/\/ RecentlyConnectedAddrTTL is used when we recently connected to a peer.\n\t\/\/ It means that we are reasonably certain of the peer's address.\n\tRecentlyConnectedAddrTTL = time.Minute * 10\n\n\t\/\/ OwnObservedAddrTTL is used for our own external addresses observed by peers.\n\tOwnObservedAddrTTL = time.Minute * 10\n\n\t\/\/ PermanentAddrTTL is the ttl for a \"permanent address\" (e.g. bootstrap nodes)\n\t\/\/ if we haven't shipped you an update to ipfs in 356 days\n\t\/\/ we probably arent running the same bootstrap nodes...\n\tPermanentAddrTTL = time.Hour * 24 * 356\n\n\t\/\/ ConnectedAddrTTL is the ttl used for the addresses of a peer to whom\n\t\/\/ we're connected directly. This is basically permanent, as we will\n\t\/\/ clear them + re-add under a TempAddrTTL after disconnecting.\n\tConnectedAddrTTL = PermanentAddrTTL\n)\n\ntype expiringAddr struct {\n\tAddr ma.Multiaddr\n\tTTL time.Time\n}\n\nfunc (e *expiringAddr) ExpiredBy(t time.Time) bool {\n\treturn t.After(e.TTL)\n}\n\ntype addrSet map[string]expiringAddr\n\n\/\/ AddrManager manages addresses.\n\/\/ The zero-value is ready to be used.\ntype AddrManager struct {\n\taddrmu sync.Mutex \/\/ guards addrs\n\taddrs map[peer.ID]addrSet\n\n\taddrSubs map[peer.ID][]*addrSub\n}\n\n\/\/ ensures the AddrManager is initialized.\n\/\/ So we can use the zero value.\nfunc (mgr *AddrManager) init() {\n\tif mgr.addrs == nil {\n\t\tmgr.addrs = make(map[peer.ID]addrSet)\n\t}\n\tif mgr.addrSubs == nil {\n\t\tmgr.addrSubs = make(map[peer.ID][]*addrSub)\n\t}\n}\n\nfunc (mgr *AddrManager) Peers() []peer.ID {\n\tmgr.addrmu.Lock()\n\tdefer mgr.addrmu.Unlock()\n\tif mgr.addrs == nil {\n\t\treturn nil\n\t}\n\n\tpids := make([]peer.ID, 0, len(mgr.addrs))\n\tfor pid := range mgr.addrs {\n\t\tpids = append(pids, pid)\n\t}\n\treturn pids\n}\n\n\/\/ AddAddr calls AddAddrs(p, []ma.Multiaddr{addr}, ttl)\nfunc (mgr *AddrManager) AddAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {\n\tmgr.AddAddrs(p, []ma.Multiaddr{addr}, ttl)\n}\n\n\/\/ AddAddrs gives AddrManager addresses to use, with a given ttl\n\/\/ (time-to-live), after which the address is no longer valid.\n\/\/ If the manager has a longer TTL, the operation is a no-op for that address\nfunc (mgr *AddrManager) AddAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {\n\tmgr.addrmu.Lock()\n\tdefer mgr.addrmu.Unlock()\n\n\t\/\/ if ttl is zero, exit. nothing to do.\n\tif ttl <= 0 {\n\t\treturn\n\t}\n\n\t\/\/ so zero value can be used\n\tmgr.init()\n\n\tamap, found := mgr.addrs[p]\n\tif !found {\n\t\tamap = make(addrSet)\n\t\tmgr.addrs[p] = amap\n\t}\n\n\tsubs := mgr.addrSubs[p]\n\n\t\/\/ only expand ttls\n\texp := time.Now().Add(ttl)\n\tfor _, addr := range addrs {\n\t\tif addr == nil {\n\t\t\tlog.Warningf(\"was passed nil multiaddr for %s\", p)\n\t\t\tcontinue\n\t\t}\n\n\t\taddrstr := string(addr.Bytes())\n\t\ta, found := amap[addrstr]\n\t\tif !found || exp.After(a.TTL) {\n\t\t\tamap[addrstr] = expiringAddr{Addr: addr, TTL: exp}\n\n\t\t\tfor _, sub := range subs {\n\t\t\t\tsub.pubAddr(addr)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ SetAddr calls mgr.SetAddrs(p, addr, ttl)\nfunc (mgr *AddrManager) SetAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {\n\tmgr.SetAddrs(p, []ma.Multiaddr{addr}, ttl)\n}\n\n\/\/ SetAddrs sets the ttl on addresses. This clears any TTL there previously.\n\/\/ This is used when we receive the best estimate of the validity of an address.\nfunc (mgr *AddrManager) SetAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {\n\tmgr.addrmu.Lock()\n\tdefer mgr.addrmu.Unlock()\n\n\t\/\/ so zero value can be used\n\tmgr.init()\n\n\tamap, found := mgr.addrs[p]\n\tif !found {\n\t\tamap = make(addrSet)\n\t\tmgr.addrs[p] = amap\n\t}\n\n\tsubs := mgr.addrSubs[p]\n\n\texp := time.Now().Add(ttl)\n\tfor _, addr := range addrs {\n\t\tif addr == nil {\n\t\t\tlog.Warningf(\"was passed nil multiaddr for %s\", p)\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ re-set all of them for new ttl.\n\t\taddrs := string(addr.Bytes())\n\n\t\tif ttl > 0 {\n\t\t\tamap[addrs] = expiringAddr{Addr: addr, TTL: exp}\n\n\t\t\tfor _, sub := range subs {\n\t\t\t\tsub.pubAddr(addr)\n\t\t\t}\n\t\t} else {\n\t\t\tdelete(amap, addrs)\n\t\t}\n\t}\n}\n\n\/\/ Addresses returns all known (and valid) addresses for a given\nfunc (mgr *AddrManager) Addrs(p peer.ID) []ma.Multiaddr {\n\tmgr.addrmu.Lock()\n\tdefer mgr.addrmu.Unlock()\n\n\t\/\/ not initialized? nothing to give.\n\tif mgr.addrs == nil {\n\t\treturn nil\n\t}\n\n\tmaddrs, found := mgr.addrs[p]\n\tif !found {\n\t\treturn nil\n\t}\n\n\tnow := time.Now()\n\tgood := make([]ma.Multiaddr, 0, len(maddrs))\n\tvar expired []string\n\tfor s, m := range maddrs {\n\t\tif m.ExpiredBy(now) {\n\t\t\texpired = append(expired, s)\n\t\t} else {\n\t\t\tgood = append(good, m.Addr)\n\t\t}\n\t}\n\n\t\/\/ clean up the expired ones.\n\tfor _, s := range expired {\n\t\tdelete(maddrs, s)\n\t}\n\tif len(maddrs) == 0 {\n\t\tdelete(mgr.addrs, p)\n\t}\n\treturn good\n}\n\n\/\/ ClearAddresses removes all previously stored addresses\nfunc (mgr *AddrManager) ClearAddrs(p peer.ID) {\n\tmgr.addrmu.Lock()\n\tdefer mgr.addrmu.Unlock()\n\tmgr.init()\n\n\tdelete(mgr.addrs, p)\n}\n\nfunc (mgr *AddrManager) removeSub(p peer.ID, s *addrSub) {\n\tmgr.addrmu.Lock()\n\tdefer mgr.addrmu.Unlock()\n\tsubs := mgr.addrSubs[p]\n\tif len(subs) == 1 {\n\t\tif subs[0] != s {\n\t\t\treturn\n\t\t}\n\t\tdelete(mgr.addrSubs, p)\n\t\treturn\n\t}\n\tfor i, v := range subs {\n\t\tif v == s {\n\t\t\tsubs[i] = subs[len(subs)-1]\n\t\t\tsubs[len(subs)-1] = nil\n\t\t\tmgr.addrSubs[p] = subs[:len(subs)-1]\n\t\t\treturn\n\t\t}\n\t}\n}\n\ntype addrSub struct {\n\tpubch chan ma.Multiaddr\n\tlk sync.Mutex\n\tbuffer []ma.Multiaddr\n\tctx context.Context\n}\n\nfunc (s *addrSub) pubAddr(a ma.Multiaddr) {\n\tselect {\n\tcase s.pubch <- a:\n\tcase <-s.ctx.Done():\n\t}\n}\n\nfunc (mgr *AddrManager) AddrStream(ctx context.Context, p peer.ID) <-chan ma.Multiaddr {\n\tmgr.addrmu.Lock()\n\tdefer mgr.addrmu.Unlock()\n\tmgr.init()\n\n\tsub := &addrSub{pubch: make(chan ma.Multiaddr), ctx: ctx}\n\n\tout := make(chan ma.Multiaddr)\n\n\tmgr.addrSubs[p] = append(mgr.addrSubs[p], sub)\n\n\tbaseaddrset := mgr.addrs[p]\n\tvar initial []ma.Multiaddr\n\tfor _, a := range baseaddrset {\n\t\tinitial = append(initial, a.Addr)\n\t}\n\n\tsort.Sort(addr.AddrList(initial))\n\n\tgo func(buffer []ma.Multiaddr) {\n\t\tdefer close(out)\n\n\t\tsent := make(map[string]bool)\n\t\tvar outch chan ma.Multiaddr\n\n\t\tfor _, a := range buffer {\n\t\t\tsent[a.String()] = true\n\t\t}\n\n\t\tvar next ma.Multiaddr\n\t\tif len(buffer) > 0 {\n\t\t\tnext = buffer[0]\n\t\t\tbuffer = buffer[1:]\n\t\t\toutch = out\n\t\t}\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase outch <- next:\n\t\t\t\tif len(buffer) > 0 {\n\t\t\t\t\tnext = buffer[0]\n\t\t\t\t\tbuffer = buffer[1:]\n\t\t\t\t} else {\n\t\t\t\t\toutch = nil\n\t\t\t\t\tnext = nil\n\t\t\t\t}\n\t\t\tcase naddr := <-sub.pubch:\n\t\t\t\tif sent[naddr.String()] {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tsent[naddr.String()] = true\n\t\t\t\tif next == nil {\n\t\t\t\t\tnext = naddr\n\t\t\t\t\toutch = out\n\t\t\t\t} else {\n\t\t\t\t\tbuffer = append(buffer, naddr)\n\t\t\t\t}\n\t\t\tcase <-ctx.Done():\n\t\t\t\tmgr.removeSub(p, sub)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t}(initial)\n\n\treturn out\n}\n<commit_msg>preallocate AddrStream buffer<commit_after>package peerstore\n\nimport (\n\t\"context\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\tpeer \"github.com\/libp2p\/go-libp2p-peer\"\n\taddr \"github.com\/libp2p\/go-libp2p-peerstore\/addr\"\n\tma \"github.com\/multiformats\/go-multiaddr\"\n)\n\nconst (\n\n\t\/\/ TempAddrTTL is the ttl used for a short lived address\n\tTempAddrTTL = time.Second * 10\n\n\t\/\/ ProviderAddrTTL is the TTL of an address we've received from a provider.\n\t\/\/ This is also a temporary address, but lasts longer. After this expires,\n\t\/\/ the records we return will require an extra lookup.\n\tProviderAddrTTL = time.Minute * 10\n\n\t\/\/ RecentlyConnectedAddrTTL is used when we recently connected to a peer.\n\t\/\/ It means that we are reasonably certain of the peer's address.\n\tRecentlyConnectedAddrTTL = time.Minute * 10\n\n\t\/\/ OwnObservedAddrTTL is used for our own external addresses observed by peers.\n\tOwnObservedAddrTTL = time.Minute * 10\n\n\t\/\/ PermanentAddrTTL is the ttl for a \"permanent address\" (e.g. bootstrap nodes)\n\t\/\/ if we haven't shipped you an update to ipfs in 356 days\n\t\/\/ we probably arent running the same bootstrap nodes...\n\tPermanentAddrTTL = time.Hour * 24 * 356\n\n\t\/\/ ConnectedAddrTTL is the ttl used for the addresses of a peer to whom\n\t\/\/ we're connected directly. This is basically permanent, as we will\n\t\/\/ clear them + re-add under a TempAddrTTL after disconnecting.\n\tConnectedAddrTTL = PermanentAddrTTL\n)\n\ntype expiringAddr struct {\n\tAddr ma.Multiaddr\n\tTTL time.Time\n}\n\nfunc (e *expiringAddr) ExpiredBy(t time.Time) bool {\n\treturn t.After(e.TTL)\n}\n\ntype addrSet map[string]expiringAddr\n\n\/\/ AddrManager manages addresses.\n\/\/ The zero-value is ready to be used.\ntype AddrManager struct {\n\taddrmu sync.Mutex \/\/ guards addrs\n\taddrs map[peer.ID]addrSet\n\n\taddrSubs map[peer.ID][]*addrSub\n}\n\n\/\/ ensures the AddrManager is initialized.\n\/\/ So we can use the zero value.\nfunc (mgr *AddrManager) init() {\n\tif mgr.addrs == nil {\n\t\tmgr.addrs = make(map[peer.ID]addrSet)\n\t}\n\tif mgr.addrSubs == nil {\n\t\tmgr.addrSubs = make(map[peer.ID][]*addrSub)\n\t}\n}\n\nfunc (mgr *AddrManager) Peers() []peer.ID {\n\tmgr.addrmu.Lock()\n\tdefer mgr.addrmu.Unlock()\n\tif mgr.addrs == nil {\n\t\treturn nil\n\t}\n\n\tpids := make([]peer.ID, 0, len(mgr.addrs))\n\tfor pid := range mgr.addrs {\n\t\tpids = append(pids, pid)\n\t}\n\treturn pids\n}\n\n\/\/ AddAddr calls AddAddrs(p, []ma.Multiaddr{addr}, ttl)\nfunc (mgr *AddrManager) AddAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {\n\tmgr.AddAddrs(p, []ma.Multiaddr{addr}, ttl)\n}\n\n\/\/ AddAddrs gives AddrManager addresses to use, with a given ttl\n\/\/ (time-to-live), after which the address is no longer valid.\n\/\/ If the manager has a longer TTL, the operation is a no-op for that address\nfunc (mgr *AddrManager) AddAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {\n\tmgr.addrmu.Lock()\n\tdefer mgr.addrmu.Unlock()\n\n\t\/\/ if ttl is zero, exit. nothing to do.\n\tif ttl <= 0 {\n\t\treturn\n\t}\n\n\t\/\/ so zero value can be used\n\tmgr.init()\n\n\tamap, found := mgr.addrs[p]\n\tif !found {\n\t\tamap = make(addrSet)\n\t\tmgr.addrs[p] = amap\n\t}\n\n\tsubs := mgr.addrSubs[p]\n\n\t\/\/ only expand ttls\n\texp := time.Now().Add(ttl)\n\tfor _, addr := range addrs {\n\t\tif addr == nil {\n\t\t\tlog.Warningf(\"was passed nil multiaddr for %s\", p)\n\t\t\tcontinue\n\t\t}\n\n\t\taddrstr := string(addr.Bytes())\n\t\ta, found := amap[addrstr]\n\t\tif !found || exp.After(a.TTL) {\n\t\t\tamap[addrstr] = expiringAddr{Addr: addr, TTL: exp}\n\n\t\t\tfor _, sub := range subs {\n\t\t\t\tsub.pubAddr(addr)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ SetAddr calls mgr.SetAddrs(p, addr, ttl)\nfunc (mgr *AddrManager) SetAddr(p peer.ID, addr ma.Multiaddr, ttl time.Duration) {\n\tmgr.SetAddrs(p, []ma.Multiaddr{addr}, ttl)\n}\n\n\/\/ SetAddrs sets the ttl on addresses. This clears any TTL there previously.\n\/\/ This is used when we receive the best estimate of the validity of an address.\nfunc (mgr *AddrManager) SetAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration) {\n\tmgr.addrmu.Lock()\n\tdefer mgr.addrmu.Unlock()\n\n\t\/\/ so zero value can be used\n\tmgr.init()\n\n\tamap, found := mgr.addrs[p]\n\tif !found {\n\t\tamap = make(addrSet)\n\t\tmgr.addrs[p] = amap\n\t}\n\n\tsubs := mgr.addrSubs[p]\n\n\texp := time.Now().Add(ttl)\n\tfor _, addr := range addrs {\n\t\tif addr == nil {\n\t\t\tlog.Warningf(\"was passed nil multiaddr for %s\", p)\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ re-set all of them for new ttl.\n\t\taddrs := string(addr.Bytes())\n\n\t\tif ttl > 0 {\n\t\t\tamap[addrs] = expiringAddr{Addr: addr, TTL: exp}\n\n\t\t\tfor _, sub := range subs {\n\t\t\t\tsub.pubAddr(addr)\n\t\t\t}\n\t\t} else {\n\t\t\tdelete(amap, addrs)\n\t\t}\n\t}\n}\n\n\/\/ Addresses returns all known (and valid) addresses for a given\nfunc (mgr *AddrManager) Addrs(p peer.ID) []ma.Multiaddr {\n\tmgr.addrmu.Lock()\n\tdefer mgr.addrmu.Unlock()\n\n\t\/\/ not initialized? nothing to give.\n\tif mgr.addrs == nil {\n\t\treturn nil\n\t}\n\n\tmaddrs, found := mgr.addrs[p]\n\tif !found {\n\t\treturn nil\n\t}\n\n\tnow := time.Now()\n\tgood := make([]ma.Multiaddr, 0, len(maddrs))\n\tvar expired []string\n\tfor s, m := range maddrs {\n\t\tif m.ExpiredBy(now) {\n\t\t\texpired = append(expired, s)\n\t\t} else {\n\t\t\tgood = append(good, m.Addr)\n\t\t}\n\t}\n\n\t\/\/ clean up the expired ones.\n\tfor _, s := range expired {\n\t\tdelete(maddrs, s)\n\t}\n\tif len(maddrs) == 0 {\n\t\tdelete(mgr.addrs, p)\n\t}\n\treturn good\n}\n\n\/\/ ClearAddresses removes all previously stored addresses\nfunc (mgr *AddrManager) ClearAddrs(p peer.ID) {\n\tmgr.addrmu.Lock()\n\tdefer mgr.addrmu.Unlock()\n\tmgr.init()\n\n\tdelete(mgr.addrs, p)\n}\n\nfunc (mgr *AddrManager) removeSub(p peer.ID, s *addrSub) {\n\tmgr.addrmu.Lock()\n\tdefer mgr.addrmu.Unlock()\n\tsubs := mgr.addrSubs[p]\n\tif len(subs) == 1 {\n\t\tif subs[0] != s {\n\t\t\treturn\n\t\t}\n\t\tdelete(mgr.addrSubs, p)\n\t\treturn\n\t}\n\tfor i, v := range subs {\n\t\tif v == s {\n\t\t\tsubs[i] = subs[len(subs)-1]\n\t\t\tsubs[len(subs)-1] = nil\n\t\t\tmgr.addrSubs[p] = subs[:len(subs)-1]\n\t\t\treturn\n\t\t}\n\t}\n}\n\ntype addrSub struct {\n\tpubch chan ma.Multiaddr\n\tlk sync.Mutex\n\tbuffer []ma.Multiaddr\n\tctx context.Context\n}\n\nfunc (s *addrSub) pubAddr(a ma.Multiaddr) {\n\tselect {\n\tcase s.pubch <- a:\n\tcase <-s.ctx.Done():\n\t}\n}\n\nfunc (mgr *AddrManager) AddrStream(ctx context.Context, p peer.ID) <-chan ma.Multiaddr {\n\tmgr.addrmu.Lock()\n\tdefer mgr.addrmu.Unlock()\n\tmgr.init()\n\n\tsub := &addrSub{pubch: make(chan ma.Multiaddr), ctx: ctx}\n\n\tout := make(chan ma.Multiaddr)\n\n\tmgr.addrSubs[p] = append(mgr.addrSubs[p], sub)\n\n\tbaseaddrset := mgr.addrs[p]\n\tinitial := make([]ma.Multiaddr, 0, len(baseaddrset))\n\tfor _, a := range baseaddrset {\n\t\tinitial = append(initial, a.Addr)\n\t}\n\n\tsort.Sort(addr.AddrList(initial))\n\n\tgo func(buffer []ma.Multiaddr) {\n\t\tdefer close(out)\n\n\t\tsent := make(map[string]bool, len(buffer))\n\t\tvar outch chan ma.Multiaddr\n\n\t\tfor _, a := range buffer {\n\t\t\tsent[a.String()] = true\n\t\t}\n\n\t\tvar next ma.Multiaddr\n\t\tif len(buffer) > 0 {\n\t\t\tnext = buffer[0]\n\t\t\tbuffer = buffer[1:]\n\t\t\toutch = out\n\t\t}\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase outch <- next:\n\t\t\t\tif len(buffer) > 0 {\n\t\t\t\t\tnext = buffer[0]\n\t\t\t\t\tbuffer = buffer[1:]\n\t\t\t\t} else {\n\t\t\t\t\toutch = nil\n\t\t\t\t\tnext = nil\n\t\t\t\t}\n\t\t\tcase naddr := <-sub.pubch:\n\t\t\t\tif sent[naddr.String()] {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tsent[naddr.String()] = true\n\t\t\t\tif next == nil {\n\t\t\t\t\tnext = naddr\n\t\t\t\t\toutch = out\n\t\t\t\t} else {\n\t\t\t\t\tbuffer = append(buffer, naddr)\n\t\t\t\t}\n\t\t\tcase <-ctx.Done():\n\t\t\t\tmgr.removeSub(p, sub)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t}(initial)\n\n\treturn out\n}\n<|endoftext|>"} {"text":"<commit_before>package scheduler\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/jhunt\/go-log\"\n\n\t\"github.com\/shieldproject\/shield\/db\"\n)\n\nvar next = 0\n\ntype Chore struct {\n\tID string\n\tTaskUUID string\n\tEncryption string\n\n\tDo func(chore Chore)\n\n\tStdout chan string\n\tStderr chan string\n\tExit chan int\n\tCancel chan bool\n}\n\nfunc NewChore(id string, do func(Chore)) Chore {\n\tnext += 1\n\treturn Chore{\n\t\tID: fmt.Sprintf(\"%s-%08d\", time.Now().Format(\"20060102-150405\"), next),\n\t\tTaskUUID: id,\n\t\tDo: do,\n\n\t\tStdout: make(chan string),\n\t\tStderr: make(chan string),\n\t\tExit: make(chan int),\n\t\tCancel: make(chan bool),\n\t}\n}\n\nfunc (chore Chore) String() string {\n\treturn fmt.Sprintf(\"chore %s\", chore.ID)\n}\n\nfunc (chore Chore) Infof(msg string, args ...interface{}) {\n\tlog.Debugf(chore.String()+\": stdout: \"+msg, args...)\n\tchore.Stdout <- fmt.Sprintf(msg+\"\\n\", args...)\n}\n\nfunc (chore Chore) Errorf(msg string, args ...interface{}) {\n\tlog.Debugf(chore.String()+\": stderr: \"+msg, args...)\n\tchore.Stderr <- fmt.Sprintf(msg+\"\\n\", args...)\n}\n\nfunc (chore Chore) UnixExit(rc int) {\n\tdefer func() {\n\t\trecover()\n\t}()\n\n\tchore.Exit <- rc\n\tclose(chore.Exit)\n\tlog.Debugf(\"%s: exiting %d\", chore, rc)\n}\n\nfunc (w *Worker) Execute(chore Chore) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Errorf(\"%s: %s\", chore, err)\n\t\t\tw.db.UpdateTaskLog(chore.TaskUUID, fmt.Sprintf(\"\\n\\nERROR: %s\\n\\n\", err))\n\n\t\t\tlog.Errorf(\"%s: FAILING task '%s' in database\", chore, chore.TaskUUID)\n\t\t\tw.db.FailTask(chore.TaskUUID, time.Now())\n\t\t}\n\t}()\n\n\tvar wait sync.WaitGroup\n\n\tw.Reserve(chore.TaskUUID)\n\tdefer w.Release()\n\n\tlog.Infof(\"%s: %s executing chore for task '%s'\", chore, w, chore.TaskUUID)\n\tw.db.StartTask(chore.TaskUUID, time.Now())\n\n\tlog.Debugf(\"%s: spinning up [stderr] goroutine to watch chore stderr and update the task log...\", chore)\n\twait.Add(1)\n\tgo func() {\n\t\tfor s := range chore.Stderr {\n\t\t\tw.db.UpdateTaskLog(chore.TaskUUID, s)\n\t\t}\n\t\tlog.Debugf(\"%s: no more standard error; [stderr] goroutine shutting down...\", chore)\n\t\twait.Done()\n\t}()\n\n\tlog.Debugf(\"%s: spinning up [stdout] goroutine to watch chore stdout and accumulate the output...\", chore)\n\toutput := \"\"\n\twait.Add(1)\n\tgo func() {\n\t\tfor s := range chore.Stdout {\n\t\t\toutput += s\n\t\t}\n\t\tlog.Debugf(\"%s: no more standard output; [stdout] goroutine shutting down...\", chore)\n\t\twait.Done()\n\t}()\n\n\tlog.Debugf(\"%s: spinning up [exit] goroutine to watch chore exit status and remember it...\", chore)\n\trc := 0\n\twait.Add(1)\n\tgo func() {\n\t\trc = <-chore.Exit\n\t\tlog.Debugf(\"%s: rc %d noted; [exit] goroutine shutting down...\", chore, rc)\n\t\twait.Done()\n\t}()\n\n\tlog.Debugf(\"%s: spinning up [main] goroutine to execute chore `do' function...\", chore)\n\twait.Add(1)\n\tgo func() {\n\t\tchore.Do(chore)\n\t\tlog.Debugf(\"%s: chore execution complete; [main] goroutine shutting down...\", chore)\n\n\t\tchore.UnixExit(0) \/* catch-all *\/\n\t\tclose(chore.Stderr)\n\t\tclose(chore.Stdout)\n\t\twait.Done()\n\t}()\n\n\tlog.Debugf(\"%s: waiting for chore to complete...\", chore)\n\twait.Wait()\n\tw.db.UpdateTaskLog(chore.TaskUUID, \"\\n\\n------\\n\")\n\n\ttask, err := w.db.GetTask(chore.TaskUUID)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"failed to retrieve task '%s' from database: %s\", chore.TaskUUID, err))\n\t}\n\n\tswitch task.Op {\n\tcase db.BackupOperation:\n\t\toutput = strings.TrimSpace(output)\n\t\tw.db.UpdateTaskLog(task.UUID, fmt.Sprintf(\"BACKUP: `%s`\\n\", output))\n\n\t\tif rc != 0 {\n\t\t\tlog.Debugf(\"%s: FAILING task '%s' in database\", chore, chore.TaskUUID)\n\t\t\tw.db.FailTask(chore.TaskUUID, time.Now())\n\t\t\treturn\n\t\t}\n\n\t\tlog.Infof(\"%s: parsing output of %s operation, '%s'\", chore, task.Op, output)\n\t\tvar v struct {\n\t\t\tKey string `json:\"key\"`\n\t\t\tSize int64 `json:\"archive_size\"`\n\t\t\tCompression string `json:\"compression\"`\n\t\t}\n\t\terr := json.Unmarshal([]byte(output), &v)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"failed to unmarshal output [%s] from %s operation: %s\", output, task.Op, err))\n\t\t}\n\n\t\tif v.Key == \"\" {\n\t\t\tpanic(fmt.Errorf(\"%s: no restore key detected in %s operation output\", chore, task.Op))\n\t\t}\n\n\t\tif v.Compression == \"\" {\n\t\t\t\/* older shield-pipes will always bzip2; and if they aren't\n\t\t\t reporting their compression type, it's gotta be bzip2 *\/\n\t\t\tv.Compression = \"bzip2\"\n\t\t}\n\n\t\tw.db.UpdateTaskLog(task.UUID, fmt.Sprintf(\"BACKUP: restore key = %s\\n\", v.Key))\n\t\tw.db.UpdateTaskLog(task.UUID, fmt.Sprintf(\"BACKUP: archive size = %d bytes\\n\", v.Size))\n\t\tw.db.UpdateTaskLog(task.UUID, fmt.Sprintf(\"BACKUP: compression = %s\\n\", v.Compression))\n\n\t\tlog.Infof(\"%s: restore key for this %s operation is '%s'\", chore, task.Op, v.Key)\n\t\t_, err = w.db.CreateTaskArchive(task.UUID, task.ArchiveUUID, v.Key, time.Now(),\n\t\t\tchore.Encryption, v.Compression, v.Size, task.TenantUUID)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"failed to create task archive database record '%s': %s\", task.ArchiveUUID, err))\n\t\t}\n\n\t\tw.db.UpdateTaskLog(task.UUID, \"\\nBACKUP: recalculating cloud storage usage statistics...\\n\")\n\t\tstore, err := w.db.GetStore(task.StoreUUID)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"%s: failed to retrieve store from the database: %s\", chore, err)\n\t\t\tw.db.UpdateTaskLog(task.UUID, \"WARNING: store usage statistics were NOT updated...\\n\")\n\n\t\t} else {\n\t\t\tstore.StorageUsed += v.Size\n\t\t\tstore.ArchiveCount += 1\n\t\t\tstore.DailyIncrease += v.Size\n\t\t\terr := w.db.UpdateStore(store)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"%s: failed to update store in the database: %s\", chore, err)\n\t\t\t\tw.db.UpdateTaskLog(task.UUID, \"WARNING: store usage statistics were NOT updated...\\n\")\n\t\t\t} else {\n\t\t\t\tw.db.UpdateTaskLog(task.UUID, \" ... store usage statistics updated.\\n\")\n\t\t\t}\n\t\t}\n\n\t\ttenant, err := w.db.GetTenant(task.TenantUUID)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"%s: failed to retrieve tenant from the database: %s\", chore, err)\n\t\t\tw.db.UpdateTaskLog(task.UUID, \"WARNING: tenant usage statistics were not updated...\\n\")\n\n\t\t} else {\n\t\t\ttenant.StorageUsed += v.Size\n\t\t\ttenant.ArchiveCount += 1\n\t\t\ttenant.DailyIncrease += v.Size\n\t\t\t_, err := w.db.UpdateTenant(tenant)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"%s: failed to update tenant in the database: %s\", chore, err)\n\t\t\t\tw.db.UpdateTaskLog(task.UUID, \"WARNING: tenant usage statistics were not updated...\\n\")\n\t\t\t} else {\n\t\t\t\tw.db.UpdateTaskLog(task.UUID, \" ... tenant usage statistics updated.\\n\")\n\t\t\t}\n\t\t}\n\t\tw.db.UpdateTaskLog(task.UUID, \"\\n\\n\")\n\n\tcase db.PurgeOperation:\n\t\tif rc != 0 {\n\t\t\tlog.Debugf(\"%s: FAILING task '%s' in database\", chore, chore.TaskUUID)\n\t\t\tw.db.UpdateTaskLog(task.UUID, \"\\nPURGE: operation failed; keeping archive metadata intact.\\n\")\n\t\t\tw.db.UpdateTaskLog(task.UUID, \"PURGE: will try again later...\\n\")\n\t\t\tw.db.FailTask(chore.TaskUUID, time.Now())\n\t\t\treturn\n\t\t}\n\n\t\tlog.Infof(\"%s: purged archive '%s' from storage\", chore, task.ArchiveUUID)\n\t\terr = w.db.PurgeArchive(task.ArchiveUUID)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"%s: failed to purge the archive record from the database: %s\", chore, err))\n\t\t}\n\n\t\tw.db.UpdateTaskLog(task.UUID, \"\\nPURGE: recalculating cloud storage usage statistics...\\n\")\n\t\tarchive, err := w.db.GetArchive(task.ArchiveUUID)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"%s: failed to retrieve archive record from the database: %s\", chore, err))\n\t\t}\n\n\t\tstore, err := w.db.GetStore(task.StoreUUID)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"%s: failed to retrieve store from the database: %s\", chore, err)\n\t\t\tw.db.UpdateTaskLog(task.UUID, \"WARNING: store usage statistics were NOT updated...\\n\")\n\n\t\t} else {\n\t\t\tstore.StorageUsed -= archive.Size\n\t\t\tstore.ArchiveCount -= 1\n\t\t\tstore.DailyIncrease -= archive.Size\n\t\t\terr := w.db.UpdateStore(store)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"%s: failed to update store in the database: %s\", chore, err)\n\t\t\t\tw.db.UpdateTaskLog(task.UUID, \"WARNING: store usage statistics were NOT updated...\\n\")\n\t\t\t} else {\n\t\t\t\tw.db.UpdateTaskLog(task.UUID, \" ... store usage statistics updated.\\n\")\n\t\t\t}\n\t\t}\n\n\t\ttenant, err := w.db.GetTenant(task.TenantUUID)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"%s: failed to retrieve tenant from the database: %s\", chore, err)\n\t\t\tw.db.UpdateTaskLog(task.UUID, \"WARNING: tenant usage statistics were not updated...\\n\")\n\n\t\t} else {\n\t\t\ttenant.StorageUsed -= archive.Size\n\t\t\ttenant.ArchiveCount -= 1\n\t\t\ttenant.DailyIncrease -= archive.Size\n\t\t\t_, err := w.db.UpdateTenant(tenant)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"%s: failed to update tenant in the database: %s\", chore, err)\n\t\t\t\tw.db.UpdateTaskLog(task.UUID, \"WARNING: tenant usage statistics were not updated...\\n\")\n\t\t\t} else {\n\t\t\t\tw.db.UpdateTaskLog(task.UUID, \" ... tenant usage statistics updated.\\n\")\n\t\t\t}\n\t\t}\n\t\tw.db.UpdateTaskLog(task.UUID, \"\\n\\n\")\n\n\tcase db.TestStoreOperation:\n\t\tvar v struct {\n\t\t\tHealthy bool `json:\"healthy\"`\n\t\t}\n\n\t\tstore, err := w.db.GetStore(task.StoreUUID)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"failed to retrieve store '%s' from database: %s\", task.StoreUUID, err))\n\t\t}\n\t\tif store == nil {\n\t\t\tpanic(fmt.Errorf(\"store '%s' not found in database\", task.StoreUUID))\n\t\t}\n\n\t\tif rc == 0 {\n\t\t\terr = json.Unmarshal([]byte(output), &v)\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Errorf(\"failed to unmarshal output [%s] from %s operation: %s\", output, task.Op, err))\n\t\t\t}\n\t\t\tif v.Healthy {\n\t\t\t\tif store.Healthy != v.Healthy {\n\t\t\t\t\tw.db.UpdateTaskLog(task.UUID, \"\\nTEST-STORE: marking storage system as HEALTHY (recovery).\\n\")\n\t\t\t\t} else {\n\t\t\t\t\tw.db.UpdateTaskLog(task.UUID, \"\\nTEST-STORE: storage is still HEALTHY.\\n\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tw.db.UpdateTaskLog(task.UUID, \"\\nTEST-STORE: marking storage system as UNHEALTHY.\\n\")\n\t\t\t}\n\t\t\tstore.Healthy = v.Healthy\n\n\t\t} else {\n\t\t\tstore.Healthy = false\n\t\t\tw.db.UpdateTaskLog(task.UUID, \"\\nTEST-STORE: marking storage system as UNHEALTHY.\\n\")\n\t\t}\n\n\t\terr = w.db.UpdateStore(store)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"failed to update store '%s' record in database: %s\", task.StoreUUID, err))\n\t\t}\n\n\t\tif rc != 0 {\n\t\t\tlog.Debugf(\"%s: FAILING task '%s' in database\", chore, chore.TaskUUID)\n\t\t\tw.db.FailTask(chore.TaskUUID, time.Now())\n\t\t\treturn\n\t\t}\n\n\tcase db.AgentStatusOperation:\n\t\tif rc != 0 {\n\t\t\tlog.Debugf(\"%s: FAILING task '%s' in database\", chore, chore.TaskUUID)\n\t\t\tw.db.FailTask(chore.TaskUUID, time.Now())\n\t\t\treturn\n\t\t}\n\n\t\tvar v struct {\n\t\t\tName string `json:\"name\"`\n\t\t\tVersion string `json:\"version\"`\n\t\t\tHealth string `json:\"health\"`\n\t\t}\n\n\t\terr = json.Unmarshal([]byte(output), &v)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"failed to unmarshal output [%s] from %s operation: %s\", output, task.Op, err))\n\t\t}\n\n\t\tagents, err := w.db.GetAllAgents(&db.AgentFilter{Address: task.Agent})\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"failed to retrieve agent '%s' from database: %s\", task.Agent, err))\n\t\t}\n\t\tif len(agents) != 1 {\n\t\t\tpanic(fmt.Errorf(\"found %d agent records for address '%s' (expected 1)\", len(agents), task.Agent))\n\t\t}\n\n\t\tagent := agents[0]\n\t\tagent.Name = v.Name\n\t\tagent.Version = v.Version\n\t\tagent.Status = v.Health\n\t\tagent.RawMeta = output\n\t\tagent.LastCheckedAt = time.Now().Unix()\n\t\terr = w.db.UpdateAgent(agent)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"failed to update agent '%s' record in database: %s\", task.Agent, err))\n\t\t}\n\t}\n\n\tlog.Debugf(\"%s: completing task '%s' in database\", chore, chore.TaskUUID)\n\tw.db.CompleteTask(chore.TaskUUID, time.Now())\n}\n<commit_msg>Update Agent Status on failed agent-status tasks<commit_after>package scheduler\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/jhunt\/go-log\"\n\n\t\"github.com\/shieldproject\/shield\/db\"\n)\n\nvar next = 0\n\ntype Chore struct {\n\tID string\n\tTaskUUID string\n\tEncryption string\n\n\tDo func(chore Chore)\n\n\tStdout chan string\n\tStderr chan string\n\tExit chan int\n\tCancel chan bool\n}\n\nfunc NewChore(id string, do func(Chore)) Chore {\n\tnext += 1\n\treturn Chore{\n\t\tID: fmt.Sprintf(\"%s-%08d\", time.Now().Format(\"20060102-150405\"), next),\n\t\tTaskUUID: id,\n\t\tDo: do,\n\n\t\tStdout: make(chan string),\n\t\tStderr: make(chan string),\n\t\tExit: make(chan int),\n\t\tCancel: make(chan bool),\n\t}\n}\n\nfunc (chore Chore) String() string {\n\treturn fmt.Sprintf(\"chore %s\", chore.ID)\n}\n\nfunc (chore Chore) Infof(msg string, args ...interface{}) {\n\tlog.Debugf(chore.String()+\": stdout: \"+msg, args...)\n\tchore.Stdout <- fmt.Sprintf(msg+\"\\n\", args...)\n}\n\nfunc (chore Chore) Errorf(msg string, args ...interface{}) {\n\tlog.Debugf(chore.String()+\": stderr: \"+msg, args...)\n\tchore.Stderr <- fmt.Sprintf(msg+\"\\n\", args...)\n}\n\nfunc (chore Chore) UnixExit(rc int) {\n\tdefer func() {\n\t\trecover()\n\t}()\n\n\tchore.Exit <- rc\n\tclose(chore.Exit)\n\tlog.Debugf(\"%s: exiting %d\", chore, rc)\n}\n\nfunc (w *Worker) Execute(chore Chore) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Errorf(\"%s: %s\", chore, err)\n\t\t\tw.db.UpdateTaskLog(chore.TaskUUID, fmt.Sprintf(\"\\n\\nERROR: %s\\n\\n\", err))\n\n\t\t\tlog.Errorf(\"%s: FAILING task '%s' in database\", chore, chore.TaskUUID)\n\t\t\tw.db.FailTask(chore.TaskUUID, time.Now())\n\t\t}\n\t}()\n\n\tvar wait sync.WaitGroup\n\n\tw.Reserve(chore.TaskUUID)\n\tdefer w.Release()\n\n\tlog.Infof(\"%s: %s executing chore for task '%s'\", chore, w, chore.TaskUUID)\n\tw.db.StartTask(chore.TaskUUID, time.Now())\n\n\tlog.Debugf(\"%s: spinning up [stderr] goroutine to watch chore stderr and update the task log...\", chore)\n\twait.Add(1)\n\tgo func() {\n\t\tfor s := range chore.Stderr {\n\t\t\tw.db.UpdateTaskLog(chore.TaskUUID, s)\n\t\t}\n\t\tlog.Debugf(\"%s: no more standard error; [stderr] goroutine shutting down...\", chore)\n\t\twait.Done()\n\t}()\n\n\tlog.Debugf(\"%s: spinning up [stdout] goroutine to watch chore stdout and accumulate the output...\", chore)\n\toutput := \"\"\n\twait.Add(1)\n\tgo func() {\n\t\tfor s := range chore.Stdout {\n\t\t\toutput += s\n\t\t}\n\t\tlog.Debugf(\"%s: no more standard output; [stdout] goroutine shutting down...\", chore)\n\t\twait.Done()\n\t}()\n\n\tlog.Debugf(\"%s: spinning up [exit] goroutine to watch chore exit status and remember it...\", chore)\n\trc := 0\n\twait.Add(1)\n\tgo func() {\n\t\trc = <-chore.Exit\n\t\tlog.Debugf(\"%s: rc %d noted; [exit] goroutine shutting down...\", chore, rc)\n\t\twait.Done()\n\t}()\n\n\tlog.Debugf(\"%s: spinning up [main] goroutine to execute chore `do' function...\", chore)\n\twait.Add(1)\n\tgo func() {\n\t\tchore.Do(chore)\n\t\tlog.Debugf(\"%s: chore execution complete; [main] goroutine shutting down...\", chore)\n\n\t\tchore.UnixExit(0) \/* catch-all *\/\n\t\tclose(chore.Stderr)\n\t\tclose(chore.Stdout)\n\t\twait.Done()\n\t}()\n\n\tlog.Debugf(\"%s: waiting for chore to complete...\", chore)\n\twait.Wait()\n\tw.db.UpdateTaskLog(chore.TaskUUID, \"\\n\\n------\\n\")\n\n\ttask, err := w.db.GetTask(chore.TaskUUID)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"failed to retrieve task '%s' from database: %s\", chore.TaskUUID, err))\n\t}\n\n\tswitch task.Op {\n\tcase db.BackupOperation:\n\t\toutput = strings.TrimSpace(output)\n\t\tw.db.UpdateTaskLog(task.UUID, fmt.Sprintf(\"BACKUP: `%s`\\n\", output))\n\n\t\tif rc != 0 {\n\t\t\tlog.Debugf(\"%s: FAILING task '%s' in database\", chore, chore.TaskUUID)\n\t\t\tw.db.FailTask(chore.TaskUUID, time.Now())\n\t\t\treturn\n\t\t}\n\n\t\tlog.Infof(\"%s: parsing output of %s operation, '%s'\", chore, task.Op, output)\n\t\tvar v struct {\n\t\t\tKey string `json:\"key\"`\n\t\t\tSize int64 `json:\"archive_size\"`\n\t\t\tCompression string `json:\"compression\"`\n\t\t}\n\t\terr := json.Unmarshal([]byte(output), &v)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"failed to unmarshal output [%s] from %s operation: %s\", output, task.Op, err))\n\t\t}\n\n\t\tif v.Key == \"\" {\n\t\t\tpanic(fmt.Errorf(\"%s: no restore key detected in %s operation output\", chore, task.Op))\n\t\t}\n\n\t\tif v.Compression == \"\" {\n\t\t\t\/* older shield-pipes will always bzip2; and if they aren't\n\t\t\t reporting their compression type, it's gotta be bzip2 *\/\n\t\t\tv.Compression = \"bzip2\"\n\t\t}\n\n\t\tw.db.UpdateTaskLog(task.UUID, fmt.Sprintf(\"BACKUP: restore key = %s\\n\", v.Key))\n\t\tw.db.UpdateTaskLog(task.UUID, fmt.Sprintf(\"BACKUP: archive size = %d bytes\\n\", v.Size))\n\t\tw.db.UpdateTaskLog(task.UUID, fmt.Sprintf(\"BACKUP: compression = %s\\n\", v.Compression))\n\n\t\tlog.Infof(\"%s: restore key for this %s operation is '%s'\", chore, task.Op, v.Key)\n\t\t_, err = w.db.CreateTaskArchive(task.UUID, task.ArchiveUUID, v.Key, time.Now(),\n\t\t\tchore.Encryption, v.Compression, v.Size, task.TenantUUID)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"failed to create task archive database record '%s': %s\", task.ArchiveUUID, err))\n\t\t}\n\n\t\tw.db.UpdateTaskLog(task.UUID, \"\\nBACKUP: recalculating cloud storage usage statistics...\\n\")\n\t\tstore, err := w.db.GetStore(task.StoreUUID)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"%s: failed to retrieve store from the database: %s\", chore, err)\n\t\t\tw.db.UpdateTaskLog(task.UUID, \"WARNING: store usage statistics were NOT updated...\\n\")\n\n\t\t} else {\n\t\t\tstore.StorageUsed += v.Size\n\t\t\tstore.ArchiveCount += 1\n\t\t\tstore.DailyIncrease += v.Size\n\t\t\terr := w.db.UpdateStore(store)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"%s: failed to update store in the database: %s\", chore, err)\n\t\t\t\tw.db.UpdateTaskLog(task.UUID, \"WARNING: store usage statistics were NOT updated...\\n\")\n\t\t\t} else {\n\t\t\t\tw.db.UpdateTaskLog(task.UUID, \" ... store usage statistics updated.\\n\")\n\t\t\t}\n\t\t}\n\n\t\ttenant, err := w.db.GetTenant(task.TenantUUID)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"%s: failed to retrieve tenant from the database: %s\", chore, err)\n\t\t\tw.db.UpdateTaskLog(task.UUID, \"WARNING: tenant usage statistics were not updated...\\n\")\n\n\t\t} else {\n\t\t\ttenant.StorageUsed += v.Size\n\t\t\ttenant.ArchiveCount += 1\n\t\t\ttenant.DailyIncrease += v.Size\n\t\t\t_, err := w.db.UpdateTenant(tenant)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"%s: failed to update tenant in the database: %s\", chore, err)\n\t\t\t\tw.db.UpdateTaskLog(task.UUID, \"WARNING: tenant usage statistics were not updated...\\n\")\n\t\t\t} else {\n\t\t\t\tw.db.UpdateTaskLog(task.UUID, \" ... tenant usage statistics updated.\\n\")\n\t\t\t}\n\t\t}\n\t\tw.db.UpdateTaskLog(task.UUID, \"\\n\\n\")\n\n\tcase db.PurgeOperation:\n\t\tif rc != 0 {\n\t\t\tlog.Debugf(\"%s: FAILING task '%s' in database\", chore, chore.TaskUUID)\n\t\t\tw.db.UpdateTaskLog(task.UUID, \"\\nPURGE: operation failed; keeping archive metadata intact.\\n\")\n\t\t\tw.db.UpdateTaskLog(task.UUID, \"PURGE: will try again later...\\n\")\n\t\t\tw.db.FailTask(chore.TaskUUID, time.Now())\n\t\t\treturn\n\t\t}\n\n\t\tlog.Infof(\"%s: purged archive '%s' from storage\", chore, task.ArchiveUUID)\n\t\terr = w.db.PurgeArchive(task.ArchiveUUID)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"%s: failed to purge the archive record from the database: %s\", chore, err))\n\t\t}\n\n\t\tw.db.UpdateTaskLog(task.UUID, \"\\nPURGE: recalculating cloud storage usage statistics...\\n\")\n\t\tarchive, err := w.db.GetArchive(task.ArchiveUUID)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"%s: failed to retrieve archive record from the database: %s\", chore, err))\n\t\t}\n\n\t\tstore, err := w.db.GetStore(task.StoreUUID)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"%s: failed to retrieve store from the database: %s\", chore, err)\n\t\t\tw.db.UpdateTaskLog(task.UUID, \"WARNING: store usage statistics were NOT updated...\\n\")\n\n\t\t} else {\n\t\t\tstore.StorageUsed -= archive.Size\n\t\t\tstore.ArchiveCount -= 1\n\t\t\tstore.DailyIncrease -= archive.Size\n\t\t\terr := w.db.UpdateStore(store)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"%s: failed to update store in the database: %s\", chore, err)\n\t\t\t\tw.db.UpdateTaskLog(task.UUID, \"WARNING: store usage statistics were NOT updated...\\n\")\n\t\t\t} else {\n\t\t\t\tw.db.UpdateTaskLog(task.UUID, \" ... store usage statistics updated.\\n\")\n\t\t\t}\n\t\t}\n\n\t\ttenant, err := w.db.GetTenant(task.TenantUUID)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"%s: failed to retrieve tenant from the database: %s\", chore, err)\n\t\t\tw.db.UpdateTaskLog(task.UUID, \"WARNING: tenant usage statistics were not updated...\\n\")\n\n\t\t} else {\n\t\t\ttenant.StorageUsed -= archive.Size\n\t\t\ttenant.ArchiveCount -= 1\n\t\t\ttenant.DailyIncrease -= archive.Size\n\t\t\t_, err := w.db.UpdateTenant(tenant)\n\t\t\tif err != nil {\n\t\t\t\tlog.Errorf(\"%s: failed to update tenant in the database: %s\", chore, err)\n\t\t\t\tw.db.UpdateTaskLog(task.UUID, \"WARNING: tenant usage statistics were not updated...\\n\")\n\t\t\t} else {\n\t\t\t\tw.db.UpdateTaskLog(task.UUID, \" ... tenant usage statistics updated.\\n\")\n\t\t\t}\n\t\t}\n\t\tw.db.UpdateTaskLog(task.UUID, \"\\n\\n\")\n\n\tcase db.TestStoreOperation:\n\t\tvar v struct {\n\t\t\tHealthy bool `json:\"healthy\"`\n\t\t}\n\n\t\tstore, err := w.db.GetStore(task.StoreUUID)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"failed to retrieve store '%s' from database: %s\", task.StoreUUID, err))\n\t\t}\n\t\tif store == nil {\n\t\t\tpanic(fmt.Errorf(\"store '%s' not found in database\", task.StoreUUID))\n\t\t}\n\n\t\tif rc == 0 {\n\t\t\terr = json.Unmarshal([]byte(output), &v)\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Errorf(\"failed to unmarshal output [%s] from %s operation: %s\", output, task.Op, err))\n\t\t\t}\n\t\t\tif v.Healthy {\n\t\t\t\tif store.Healthy != v.Healthy {\n\t\t\t\t\tw.db.UpdateTaskLog(task.UUID, \"\\nTEST-STORE: marking storage system as HEALTHY (recovery).\\n\")\n\t\t\t\t} else {\n\t\t\t\t\tw.db.UpdateTaskLog(task.UUID, \"\\nTEST-STORE: storage is still HEALTHY.\\n\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tw.db.UpdateTaskLog(task.UUID, \"\\nTEST-STORE: marking storage system as UNHEALTHY.\\n\")\n\t\t\t}\n\t\t\tstore.Healthy = v.Healthy\n\n\t\t} else {\n\t\t\tstore.Healthy = false\n\t\t\tw.db.UpdateTaskLog(task.UUID, \"\\nTEST-STORE: marking storage system as UNHEALTHY.\\n\")\n\t\t}\n\n\t\terr = w.db.UpdateStore(store)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Errorf(\"failed to update store '%s' record in database: %s\", task.StoreUUID, err))\n\t\t}\n\n\t\tif rc != 0 {\n\t\t\tlog.Debugf(\"%s: FAILING task '%s' in database\", chore, chore.TaskUUID)\n\t\t\tw.db.FailTask(chore.TaskUUID, time.Now())\n\t\t\treturn\n\t\t}\n\n\tcase db.AgentStatusOperation:\n\t\tagent, err := w.db.GetAgentByAddress(task.Agent)\n\t\tif err != nil {\n\t\t\tlog.Debugf(\"%s: FAILING task '%s' in database\", chore, chore.TaskUUID)\n\t\t\tw.db.FailTask(chore.TaskUUID, time.Now())\n\t\t\tpanic(fmt.Errorf(\"failed to retrieve agent '%s' from database: %s\", task.Agent, err))\n\t\t}\n\t\tif agent == nil {\n\t\t\tlog.Debugf(\"%s: FAILING task '%s' in database\", chore, chore.TaskUUID)\n\t\t\tw.db.FailTask(chore.TaskUUID, time.Now())\n\t\t\tpanic(fmt.Errorf(\"failed to retrieve agent '%s' from database: no such agent\", task.Agent))\n\t\t}\n\n\t\tif rc == 0 {\n\t\t\tvar v struct {\n\t\t\t\tName string `json:\"name\"`\n\t\t\t\tVersion string `json:\"version\"`\n\t\t\t\tHealth string `json:\"health\"`\n\t\t\t}\n\n\t\t\terr = json.Unmarshal([]byte(output), &v)\n\t\t\tif err != nil {\n\t\t\t\tlog.Debugf(\"%s: FAILING task '%s' in database\", chore, chore.TaskUUID)\n\t\t\t\tw.db.FailTask(chore.TaskUUID, time.Now())\n\t\t\t\tpanic(fmt.Errorf(\"failed to unmarshal output [%s] from %s operation: %s\", output, task.Op, err))\n\t\t\t}\n\n\t\t\tagent.Name = v.Name\n\t\t\tagent.Version = v.Version\n\t\t\tagent.Status = v.Health\n\t\t\tagent.RawMeta = output\n\t\t\tagent.LastCheckedAt = time.Now().Unix()\n\t\t} else {\n\t\t\tagent.Status = \"error\"\n\t\t\tagent.LastCheckedAt = time.Now().Unix()\n\t\t}\n\t\terr = w.db.UpdateAgent(agent)\n\t\tif err != nil {\n\t\t\tlog.Debugf(\"%s: FAILING task '%s' in database\", chore, chore.TaskUUID)\n\t\t\tw.db.FailTask(chore.TaskUUID, time.Now())\n\t\t\tpanic(fmt.Errorf(\"failed to update agent '%s' record in database: %s\", task.Agent, err))\n\t\t}\n\n\t\tif rc != 0 {\n\t\t\tlog.Debugf(\"%s: FAILING task '%s' in database\", chore, chore.TaskUUID)\n\t\t\tw.db.FailTask(chore.TaskUUID, time.Now())\n\t\t\treturn\n\t\t}\n\t}\n\n\tlog.Debugf(\"%s: completing task '%s' in database\", chore, chore.TaskUUID)\n\tw.db.CompleteTask(chore.TaskUUID, time.Now())\n}\n<|endoftext|>"} {"text":"<commit_before>package core\n\nimport (\n\t\"errors\"\n\t\"io\"\n\n\t\"github.com\/toorop\/gopenstack\/objectstorage\/v1\"\n)\n\n\/\/ DiskStore represents a physical disk store\ntype openstackStore struct {\n\tRegion string\n\tContainer string\n}\n\n\/\/ newOpenstackStore check object storage and return a new openstackStore\nfunc newOpenstackStore() (*openstackStore, error) {\n\tosPath := objectstorageV1.NewOsPathFromPath(Cfg.GetStoreSource())\n\tif !osPath.IsContainer() {\n\t\treturn nil, errors.New(\"path \" + Cfg.GetStoreDriver() + \" is not a path to a valid openstack container\")\n\t}\n\t\/\/ container exists ?\n\tcontainer := &objectstorageV1.Container{\n\t\tRegion: osPath.Region,\n\t\tName: osPath.Container,\n\t}\n\terr := container.Put(&objectstorageV1.ContainerRequestParameters{\n\t\tIfNoneMatch: true,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstore := &openstackStore{\n\t\tRegion: osPath.Region,\n\t\tContainer: osPath.Container,\n\t}\n\treturn store, nil\n}\n\n\/\/ Put save key value in store\nfunc (s *openstackStore) Put(key string, reader io.Reader) error {\n\tif key == \"\" {\n\t\treturn errors.New(\"store.Put: key is empty\")\n\t}\n\tobject := objectstorageV1.Object{\n\t\tName: key,\n\t\tRegion: s.Region,\n\t\tContainer: s.Container,\n\t\tRawData: reader,\n\t}\n\treturn object.Put(&objectstorageV1.ObjectRequestParameters{\n\t\tIfNoneMatch: true,\n\t})\n}\n\n\/\/ Get returns io.Reader corresponding to key\nfunc (s *openstackStore) Get(key string) (io.Reader, error) {\n\tif key == \"\" {\n\t\treturn nil, errors.New(\"store.Get: key is empty\")\n\t}\n\tobject := objectstorageV1.Object{\n\t\tName: key,\n\t\tRegion: s.Region,\n\t\tContainer: s.Container,\n\t}\n\terr := object.Get(nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn object.RawData, nil\n}\n\n\/\/ Del\nfunc (s *openstackStore) Del(key string) error {\n\tif key == \"\" {\n\t\treturn errors.New(\"store.Del: key is empty\")\n\t}\n\tobject := objectstorageV1.Object{\n\t\tName: key,\n\t\tRegion: s.Region,\n\t\tContainer: s.Container,\n\t}\n\treturn object.Delete(false)\n}\n<commit_msg>no error on store.Del if object is missing<commit_after>package core\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"strings\"\n\n\t\"github.com\/toorop\/gopenstack\/objectstorage\/v1\"\n)\n\n\/\/ DiskStore represents a physical disk store\ntype openstackStore struct {\n\tRegion string\n\tContainer string\n}\n\n\/\/ newOpenstackStore check object storage and return a new openstackStore\nfunc newOpenstackStore() (*openstackStore, error) {\n\tosPath := objectstorageV1.NewOsPathFromPath(Cfg.GetStoreSource())\n\tif !osPath.IsContainer() {\n\t\treturn nil, errors.New(\"path \" + Cfg.GetStoreDriver() + \" is not a path to a valid openstack container\")\n\t}\n\t\/\/ container exists ?\n\tcontainer := &objectstorageV1.Container{\n\t\tRegion: osPath.Region,\n\t\tName: osPath.Container,\n\t}\n\terr := container.Put(&objectstorageV1.ContainerRequestParameters{\n\t\tIfNoneMatch: true,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstore := &openstackStore{\n\t\tRegion: osPath.Region,\n\t\tContainer: osPath.Container,\n\t}\n\treturn store, nil\n}\n\n\/\/ Put save key value in store\nfunc (s *openstackStore) Put(key string, reader io.Reader) error {\n\tif key == \"\" {\n\t\treturn errors.New(\"store.Put: key is empty\")\n\t}\n\tobject := objectstorageV1.Object{\n\t\tName: key,\n\t\tRegion: s.Region,\n\t\tContainer: s.Container,\n\t\tRawData: reader,\n\t}\n\treturn object.Put(&objectstorageV1.ObjectRequestParameters{\n\t\tIfNoneMatch: true,\n\t})\n}\n\n\/\/ Get returns io.Reader corresponding to key\nfunc (s *openstackStore) Get(key string) (io.Reader, error) {\n\tif key == \"\" {\n\t\treturn nil, errors.New(\"store.Get: key is empty\")\n\t}\n\tobject := objectstorageV1.Object{\n\t\tName: key,\n\t\tRegion: s.Region,\n\t\tContainer: s.Container,\n\t}\n\terr := object.Get(nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn object.RawData, nil\n}\n\n\/\/ Del\nfunc (s *openstackStore) Del(key string) error {\n\tif key == \"\" {\n\t\treturn errors.New(\"store.Del: key is empty\")\n\t}\n\tobject := objectstorageV1.Object{\n\t\tName: key,\n\t\tRegion: s.Region,\n\t\tContainer: s.Container,\n\t}\n\terr := object.Delete(false)\n\tif err != nil && strings.HasPrefix(\"404 Not Found\", err.Error()) {\n\t\terr = nil\n\t}\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package vcs\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"text\/template\"\n\n\t\"github.com\/hashicorp\/errwrap\"\n)\n\nvar PostCheckoutTmpl = template.Must(template.New(\"name\").Parse(`#!\/bin\/sh\nsourceclock start\n`))\n\nvar PrepCommitTmpl = template.Must(template.New(\"name\").Parse(`#!\/bin\/sh\nprintf \"$(cat $1)[$(sourceclock split)]\" > \"$1\"\n`))\n\nvar PostCommitTmpl = template.Must(template.New(\"name\").Parse(`#!\/bin\/sh\nsourceclock lap\n`))\n\ntype Git struct {\n\tdir string\n}\n\nfunc NewGit(dir string) *Git {\n\treturn &Git{\n\t\tdir: filepath.Join(dir, \".git\"),\n\t}\n}\n\nfunc (g *Git) Name() string { return \"git\" }\nfunc (g *Git) Supported() bool {\n\tfi, err := os.Stat(g.dir)\n\tif err != nil || !fi.IsDir() {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (g *Git) Hook() error {\n\thpath := filepath.Join(g.dir, \"hooks\")\n\n\t\/\/post checkout: start()\n\tpostchf, err := os.Create(filepath.Join(hpath, \"post-checkout\"))\n\tif err != nil {\n\t\treturn errwrap.Wrapf(fmt.Sprintf(\"Failed to create post-checkout '%s': {{err}}\", postchf.Name()), err)\n\t}\n\n\terr = postchf.Chmod(0766)\n\tif err != nil {\n\t\treturn errwrap.Wrapf(fmt.Sprintf(\"Failed to make post-checkout file '%s' executable: {{err}}\", hpath), err)\n\t}\n\n\terr = PostCheckoutTmpl.Execute(postchf, struct{}{})\n\tif err != nil {\n\t\treturn errwrap.Wrapf(\"Failed to run post-checkout template: {{err}}\", err)\n\t}\n\n\t\/\/prepare commit msg: split()\n\tprepcof, err := os.Create(filepath.Join(hpath, \"prepare-commit-msg\"))\n\tif err != nil {\n\t\treturn errwrap.Wrapf(fmt.Sprintf(\"Failed to create prepare-commit-msg '%s': {{err}}\", postchf.Name()), err)\n\t}\n\n\terr = prepcof.Chmod(0766)\n\tif err != nil {\n\t\treturn errwrap.Wrapf(fmt.Sprintf(\"Failed to make prepare-commit-msg file '%s' executable: {{err}}\", hpath), err)\n\t}\n\n\terr = PrepCommitTmpl.Execute(prepcof, struct{}{})\n\tif err != nil {\n\t\treturn errwrap.Wrapf(\"Failed to run post-commit template: {{err}}\", err)\n\t}\n\n\t\/\/post commit: lap()\n\tpostcof, err := os.Create(filepath.Join(hpath, \"post-commit\"))\n\tif err != nil {\n\t\treturn errwrap.Wrapf(fmt.Sprintf(\"Failed to create post-commit '%s': {{err}}\", postchf.Name()), err)\n\t}\n\n\terr = postcof.Chmod(0766)\n\tif err != nil {\n\t\treturn errwrap.Wrapf(fmt.Sprintf(\"Failed to make post-commit file '%s' executable: {{err}}\", hpath), err)\n\t}\n\n\terr = PostCommitTmpl.Execute(postcof, struct{}{})\n\tif err != nil {\n\t\treturn errwrap.Wrapf(\"Failed to run post-commit template: {{err}}\", err)\n\t}\n\n\treturn nil\n}\n<commit_msg>added todos [1m30s]<commit_after>package vcs\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"text\/template\"\n\n\t\"github.com\/hashicorp\/errwrap\"\n)\n\nvar PostCheckoutTmpl = template.Must(template.New(\"name\").Parse(`#!\/bin\/sh\n# @todo handle checkout files endge case\n\nsourceclock start\n`))\n\nvar PrepCommitTmpl = template.Must(template.New(\"name\").Parse(`#!\/bin\/sh\n# @todo handle merge\/rebase kind of commits\n\nprintf \"$(cat $1) [$(sourceclock split)]\" > \"$1\"\n`))\n\nvar PostCommitTmpl = template.Must(template.New(\"name\").Parse(`#!\/bin\/sh\n# @todo handle merge\/rebase kind of commits\n\nsourceclock lap\n`))\n\ntype Git struct {\n\tdir string\n}\n\nfunc NewGit(dir string) *Git {\n\treturn &Git{\n\t\tdir: filepath.Join(dir, \".git\"),\n\t}\n}\n\nfunc (g *Git) Name() string { return \"git\" }\nfunc (g *Git) Supported() bool {\n\tfi, err := os.Stat(g.dir)\n\tif err != nil || !fi.IsDir() {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (g *Git) Hook() error {\n\thpath := filepath.Join(g.dir, \"hooks\")\n\n\t\/\/post checkout: start()\n\tpostchf, err := os.Create(filepath.Join(hpath, \"post-checkout\"))\n\tif err != nil {\n\t\treturn errwrap.Wrapf(fmt.Sprintf(\"Failed to create post-checkout '%s': {{err}}\", postchf.Name()), err)\n\t}\n\n\terr = postchf.Chmod(0766)\n\tif err != nil {\n\t\treturn errwrap.Wrapf(fmt.Sprintf(\"Failed to make post-checkout file '%s' executable: {{err}}\", hpath), err)\n\t}\n\n\terr = PostCheckoutTmpl.Execute(postchf, struct{}{})\n\tif err != nil {\n\t\treturn errwrap.Wrapf(\"Failed to run post-checkout template: {{err}}\", err)\n\t}\n\n\t\/\/prepare commit msg: split()\n\tprepcof, err := os.Create(filepath.Join(hpath, \"prepare-commit-msg\"))\n\tif err != nil {\n\t\treturn errwrap.Wrapf(fmt.Sprintf(\"Failed to create prepare-commit-msg '%s': {{err}}\", postchf.Name()), err)\n\t}\n\n\terr = prepcof.Chmod(0766)\n\tif err != nil {\n\t\treturn errwrap.Wrapf(fmt.Sprintf(\"Failed to make prepare-commit-msg file '%s' executable: {{err}}\", hpath), err)\n\t}\n\n\terr = PrepCommitTmpl.Execute(prepcof, struct{}{})\n\tif err != nil {\n\t\treturn errwrap.Wrapf(\"Failed to run post-commit template: {{err}}\", err)\n\t}\n\n\t\/\/post commit: lap()\n\tpostcof, err := os.Create(filepath.Join(hpath, \"post-commit\"))\n\tif err != nil {\n\t\treturn errwrap.Wrapf(fmt.Sprintf(\"Failed to create post-commit '%s': {{err}}\", postchf.Name()), err)\n\t}\n\n\terr = postcof.Chmod(0766)\n\tif err != nil {\n\t\treturn errwrap.Wrapf(fmt.Sprintf(\"Failed to make post-commit file '%s' executable: {{err}}\", hpath), err)\n\t}\n\n\terr = PostCommitTmpl.Execute(postcof, struct{}{})\n\tif err != nil {\n\t\treturn errwrap.Wrapf(\"Failed to run post-commit template: {{err}}\", err)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage docker\n\nimport (\n\t\"archive\/tar\"\n\t\"bytes\"\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"strings\"\n\n\tdocker \"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/tsuru\/tsuru\/app\/image\"\n\t\"github.com\/tsuru\/tsuru\/builder\"\n\ttsuruErrors \"github.com\/tsuru\/tsuru\/errors\"\n\t\"github.com\/tsuru\/tsuru\/log\"\n\t\"github.com\/tsuru\/tsuru\/net\"\n\t\"github.com\/tsuru\/tsuru\/provision\"\n\t\"github.com\/tsuru\/tsuru\/provision\/dockercommon\"\n\t\"github.com\/tsuru\/tsuru\/safe\"\n)\n\nvar _ builder.Builder = &dockerBuilder{}\n\nfunc (b *dockerBuilder) PlatformAdd(opts builder.PlatformOptions) error {\n\treturn b.buildPlatform(opts.Name, opts.Args, opts.Output, opts.Input)\n}\n\nfunc (b *dockerBuilder) PlatformUpdate(opts builder.PlatformOptions) error {\n\treturn b.buildPlatform(opts.Name, opts.Args, opts.Output, opts.Input)\n}\n\nfunc (b *dockerBuilder) buildPlatform(name string, args map[string]string, w io.Writer, r io.Reader) error {\n\tvar inputStream io.Reader\n\tvar dockerfileURL string\n\tif r != nil {\n\t\tdata, err := ioutil.ReadAll(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar buf bytes.Buffer\n\t\twriter := tar.NewWriter(&buf)\n\t\twriter.WriteHeader(&tar.Header{\n\t\t\tName: \"Dockerfile\",\n\t\t\tMode: 0644,\n\t\t\tSize: int64(len(data)),\n\t\t})\n\t\twriter.Write(data)\n\t\twriter.Close()\n\t\tinputStream = &buf\n\t} else {\n\t\tdockerfileURL = args[\"dockerfile\"]\n\t\tif dockerfileURL == \"\" {\n\t\t\treturn errors.New(\"Dockerfile is required\")\n\t\t}\n\t\tif _, err := url.ParseRequestURI(dockerfileURL); err != nil {\n\t\t\treturn errors.New(\"Dockerfile parameter must be a URL\")\n\t\t}\n\t}\n\timageName := image.PlatformImageName(name)\n\tclient, err := getDockerClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\tclient.HTTPClient = net.Dial5FullUnlimitedClient\n\tbuildOptions := docker.BuildImageOptions{\n\t\tName: imageName,\n\t\tPull: true,\n\t\tNoCache: true,\n\t\tRmTmpContainer: true,\n\t\tRemote: dockerfileURL,\n\t\tInputStream: inputStream,\n\t\tOutputStream: w,\n\t\tInactivityTimeout: net.StreamInactivityTimeout,\n\t\tRawJSONStream: true,\n\t}\n\terr = client.BuildImage(buildOptions)\n\tif err != nil {\n\t\treturn err\n\t}\n\tparts := strings.Split(imageName, \":\")\n\tvar tag string\n\tif len(parts) > 2 {\n\t\timageName = strings.Join(parts[:len(parts)-1], \":\")\n\t\ttag = parts[len(parts)-1]\n\t} else if len(parts) > 1 {\n\t\timageName = parts[0]\n\t\ttag = parts[1]\n\t} else {\n\t\timageName = parts[0]\n\t\ttag = \"latest\"\n\t}\n\tvar buf safe.Buffer\n\tpushOpts := docker.PushImageOptions{\n\t\tName: imageName,\n\t\tTag: tag,\n\t\tOutputStream: &buf,\n\t\tInactivityTimeout: net.StreamInactivityTimeout,\n\t\tRawJSONStream: true,\n\t}\n\terr = client.PushImage(pushOpts, dockercommon.RegistryAuthConfig())\n\tif err != nil {\n\t\tlog.Errorf(\"[docker] Failed to push image %q (%s): %s\", name, err, buf.String())\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc getDockerClient() (*docker.Client, error) {\n\tprovisioners, err := provision.Registry()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar client *docker.Client\n\tmultiErr := tsuruErrors.NewMultiError()\n\tfor _, p := range provisioners {\n\t\tif provisioner, ok := p.(provision.BuilderDeploy); ok {\n\t\t\tclient, err = provisioner.GetDockerClient(nil)\n\t\t\tif err != nil {\n\t\t\t\tmultiErr.Add(err)\n\t\t\t} else if client != nil {\n\t\t\t\treturn client, nil\n\t\t\t}\n\t\t}\n\t}\n\tif multiErr.Len() > 0 {\n\t\treturn nil, multiErr\n\t}\n\treturn nil, errors.New(\"No Docker nodes available\")\n}\n\nfunc (b *dockerBuilder) PlatformRemove(name string) error {\n\tclient, err := getDockerClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\timg, err := client.InspectImage(image.PlatformImageName(name))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = client.RemoveImage(img.ID)\n\tif err != nil && err == docker.ErrNoSuchImage {\n\t\tlog.Errorf(\"error removing image %s from Docker: no such image\", name)\n\t\treturn nil\n\t}\n\treturn err\n}\n<commit_msg>builder\/docker: change only timeout in httpclient<commit_after>\/\/ Copyright 2017 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage docker\n\nimport (\n\t\"archive\/tar\"\n\t\"bytes\"\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"strings\"\n\n\tdocker \"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/tsuru\/tsuru\/app\/image\"\n\t\"github.com\/tsuru\/tsuru\/builder\"\n\ttsuruErrors \"github.com\/tsuru\/tsuru\/errors\"\n\t\"github.com\/tsuru\/tsuru\/log\"\n\t\"github.com\/tsuru\/tsuru\/net\"\n\t\"github.com\/tsuru\/tsuru\/provision\"\n\t\"github.com\/tsuru\/tsuru\/provision\/dockercommon\"\n\t\"github.com\/tsuru\/tsuru\/safe\"\n)\n\nvar _ builder.Builder = &dockerBuilder{}\n\nfunc (b *dockerBuilder) PlatformAdd(opts builder.PlatformOptions) error {\n\treturn b.buildPlatform(opts.Name, opts.Args, opts.Output, opts.Input)\n}\n\nfunc (b *dockerBuilder) PlatformUpdate(opts builder.PlatformOptions) error {\n\treturn b.buildPlatform(opts.Name, opts.Args, opts.Output, opts.Input)\n}\n\nfunc (b *dockerBuilder) buildPlatform(name string, args map[string]string, w io.Writer, r io.Reader) error {\n\tvar inputStream io.Reader\n\tvar dockerfileURL string\n\tif r != nil {\n\t\tdata, err := ioutil.ReadAll(r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tvar buf bytes.Buffer\n\t\twriter := tar.NewWriter(&buf)\n\t\twriter.WriteHeader(&tar.Header{\n\t\t\tName: \"Dockerfile\",\n\t\t\tMode: 0644,\n\t\t\tSize: int64(len(data)),\n\t\t})\n\t\twriter.Write(data)\n\t\twriter.Close()\n\t\tinputStream = &buf\n\t} else {\n\t\tdockerfileURL = args[\"dockerfile\"]\n\t\tif dockerfileURL == \"\" {\n\t\t\treturn errors.New(\"Dockerfile is required\")\n\t\t}\n\t\tif _, err := url.ParseRequestURI(dockerfileURL); err != nil {\n\t\t\treturn errors.New(\"Dockerfile parameter must be a URL\")\n\t\t}\n\t}\n\timageName := image.PlatformImageName(name)\n\tclient, err := getDockerClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\tclient.HTTPClient.Timeout = 0\n\tbuildOptions := docker.BuildImageOptions{\n\t\tName: imageName,\n\t\tPull: true,\n\t\tNoCache: true,\n\t\tRmTmpContainer: true,\n\t\tRemote: dockerfileURL,\n\t\tInputStream: inputStream,\n\t\tOutputStream: w,\n\t\tInactivityTimeout: net.StreamInactivityTimeout,\n\t\tRawJSONStream: true,\n\t}\n\terr = client.BuildImage(buildOptions)\n\tif err != nil {\n\t\treturn err\n\t}\n\tparts := strings.Split(imageName, \":\")\n\tvar tag string\n\tif len(parts) > 2 {\n\t\timageName = strings.Join(parts[:len(parts)-1], \":\")\n\t\ttag = parts[len(parts)-1]\n\t} else if len(parts) > 1 {\n\t\timageName = parts[0]\n\t\ttag = parts[1]\n\t} else {\n\t\timageName = parts[0]\n\t\ttag = \"latest\"\n\t}\n\tvar buf safe.Buffer\n\tpushOpts := docker.PushImageOptions{\n\t\tName: imageName,\n\t\tTag: tag,\n\t\tOutputStream: &buf,\n\t\tInactivityTimeout: net.StreamInactivityTimeout,\n\t\tRawJSONStream: true,\n\t}\n\terr = client.PushImage(pushOpts, dockercommon.RegistryAuthConfig())\n\tif err != nil {\n\t\tlog.Errorf(\"[docker] Failed to push image %q (%s): %s\", name, err, buf.String())\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc getDockerClient() (*docker.Client, error) {\n\tprovisioners, err := provision.Registry()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar client *docker.Client\n\tmultiErr := tsuruErrors.NewMultiError()\n\tfor _, p := range provisioners {\n\t\tif provisioner, ok := p.(provision.BuilderDeploy); ok {\n\t\t\tclient, err = provisioner.GetDockerClient(nil)\n\t\t\tif err != nil {\n\t\t\t\tmultiErr.Add(err)\n\t\t\t} else if client != nil {\n\t\t\t\treturn client, nil\n\t\t\t}\n\t\t}\n\t}\n\tif multiErr.Len() > 0 {\n\t\treturn nil, multiErr\n\t}\n\treturn nil, errors.New(\"No Docker nodes available\")\n}\n\nfunc (b *dockerBuilder) PlatformRemove(name string) error {\n\tclient, err := getDockerClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\timg, err := client.InspectImage(image.PlatformImageName(name))\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = client.RemoveImage(img.ID)\n\tif err != nil && err == docker.ErrNoSuchImage {\n\t\tlog.Errorf(\"error removing image %s from Docker: no such image\", name)\n\t\treturn nil\n\t}\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ The amazonebs package contains a packer.Builder implementation that\n\/\/ builds OMIs for Outscale OAPI.\n\/\/\n\/\/ In general, there are two types of OMIs that can be created: ebs-backed or\n\/\/ instance-store. This builder _only_ builds ebs-backed images.\npackage bsu\n\nimport (\n\t\"crypto\/tls\"\n\t\"log\"\n\t\"net\/http\"\n\n\tosccommon \"github.com\/hashicorp\/packer\/builder\/osc\/common\"\n\t\"github.com\/hashicorp\/packer\/common\"\n\t\"github.com\/hashicorp\/packer\/helper\/config\"\n\t\"github.com\/hashicorp\/packer\/helper\/multistep\"\n\t\"github.com\/hashicorp\/packer\/packer\"\n\t\"github.com\/hashicorp\/packer\/template\/interpolate\"\n\t\"github.com\/outscale\/osc-go\/oapi\"\n)\n\n\/\/ The unique ID for this builder\nconst BuilderId = \"oapi.outscale.bsu\"\n\ntype Config struct {\n\tcommon.PackerConfig `mapstructure:\",squash\"`\n\tosccommon.AccessConfig `mapstructure:\",squash\"`\n\tosccommon.OMIConfig `mapstructure:\",squash\"`\n\tosccommon.BlockDevices `mapstructure:\",squash\"`\n\tosccommon.RunConfig `mapstructure:\",squash\"`\n\tVolumeRunTags osccommon.TagMap `mapstructure:\"run_volume_tags\"`\n\n\tctx interpolate.Context\n}\n\ntype Builder struct {\n\tconfig Config\n\trunner multistep.Runner\n}\n\nfunc (b *Builder) Prepare(raws ...interface{}) ([]string, error) {\n\tb.config.ctx.Funcs = osccommon.TemplateFuncs\n\terr := config.Decode(&b.config, &config.DecodeOpts{\n\t\tInterpolate: true,\n\t\tInterpolateContext: &b.config.ctx,\n\t\tInterpolateFilter: &interpolate.RenderFilter{\n\t\t\tExclude: []string{\n\t\t\t\t\"omi_description\",\n\t\t\t\t\"run_tags\",\n\t\t\t\t\"run_volume_tags\",\n\t\t\t\t\"spot_tags\",\n\t\t\t\t\"snapshot_tags\",\n\t\t\t\t\"tags\",\n\t\t\t},\n\t\t},\n\t}, raws...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif b.config.PackerConfig.PackerForce {\n\t\tb.config.OMIForceDeregister = true\n\t}\n\n\t\/\/ Accumulate any errors\n\tvar errs *packer.MultiError\n\terrs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...)\n\terrs = packer.MultiErrorAppend(errs,\n\t\tb.config.OMIConfig.Prepare(&b.config.AccessConfig, &b.config.ctx)...)\n\terrs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(&b.config.ctx)...)\n\terrs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...)\n\n\tif errs != nil && len(errs.Errors) > 0 {\n\t\treturn nil, errs\n\t}\n\n\tpacker.LogSecretFilter.Set(b.config.AccessKey, b.config.SecretKey, b.config.Token)\n\treturn nil, nil\n}\n\nfunc (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {\n\tclientConfig, err := b.config.Config()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tskipClient := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\t},\n\t}\n\n\toapiconn := oapi.NewClient(clientConfig, skipClient)\n\n\t\/\/ Setup the state bag and initial state for the steps\n\tstate := new(multistep.BasicStateBag)\n\tstate.Put(\"config\", &b.config)\n\tstate.Put(\"oapi\", oapiconn)\n\tstate.Put(\"clientConfig\", clientConfig)\n\tstate.Put(\"hook\", hook)\n\tstate.Put(\"ui\", ui)\n\n\tsteps := []multistep.Step{\n\t\t&osccommon.StepPreValidate{\n\t\t\tDestOmiName: b.config.OMIName,\n\t\t\tForceDeregister: b.config.OMIForceDeregister,\n\t\t},\n\t}\n\n\tb.runner = common.NewRunner(steps, b.config.PackerConfig, ui)\n\tb.runner.Run(state)\n\n\t\/\/ If there was an error, return that\n\tif rawErr, ok := state.GetOk(\"error\"); ok {\n\t\treturn nil, rawErr.(error)\n\t}\n\n\treturn nil, nil\n}\n\nfunc (b *Builder) Cancel() {\n\tif b.runner != nil {\n\t\tlog.Println(\"Cancelling the step runner...\")\n\t\tb.runner.Cancel()\n\t}\n}\n<commit_msg>feature: bsu, run implementation add steps before createOMI step<commit_after>\/\/ The amazonebs package contains a packer.Builder implementation that\n\/\/ builds OMIs for Outscale OAPI.\n\/\/\n\/\/ In general, there are two types of OMIs that can be created: ebs-backed or\n\/\/ instance-store. This builder _only_ builds ebs-backed images.\npackage bsu\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\n\tosccommon \"github.com\/hashicorp\/packer\/builder\/osc\/common\"\n\t\"github.com\/hashicorp\/packer\/common\"\n\t\"github.com\/hashicorp\/packer\/helper\/communicator\"\n\t\"github.com\/hashicorp\/packer\/helper\/config\"\n\t\"github.com\/hashicorp\/packer\/helper\/multistep\"\n\t\"github.com\/hashicorp\/packer\/packer\"\n\t\"github.com\/hashicorp\/packer\/template\/interpolate\"\n\t\"github.com\/outscale\/osc-go\/oapi\"\n)\n\n\/\/ The unique ID for this builder\nconst BuilderId = \"oapi.outscale.bsu\"\n\ntype Config struct {\n\tcommon.PackerConfig `mapstructure:\",squash\"`\n\tosccommon.AccessConfig `mapstructure:\",squash\"`\n\tosccommon.OMIConfig `mapstructure:\",squash\"`\n\tosccommon.BlockDevices `mapstructure:\",squash\"`\n\tosccommon.RunConfig `mapstructure:\",squash\"`\n\tVolumeRunTags osccommon.TagMap `mapstructure:\"run_volume_tags\"`\n\n\tctx interpolate.Context\n}\n\ntype Builder struct {\n\tconfig Config\n\trunner multistep.Runner\n}\n\nfunc (b *Builder) Prepare(raws ...interface{}) ([]string, error) {\n\tb.config.ctx.Funcs = osccommon.TemplateFuncs\n\terr := config.Decode(&b.config, &config.DecodeOpts{\n\t\tInterpolate: true,\n\t\tInterpolateContext: &b.config.ctx,\n\t\tInterpolateFilter: &interpolate.RenderFilter{\n\t\t\tExclude: []string{\n\t\t\t\t\"omi_description\",\n\t\t\t\t\"run_tags\",\n\t\t\t\t\"run_volume_tags\",\n\t\t\t\t\"spot_tags\",\n\t\t\t\t\"snapshot_tags\",\n\t\t\t\t\"tags\",\n\t\t\t},\n\t\t},\n\t}, raws...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif b.config.PackerConfig.PackerForce {\n\t\tb.config.OMIForceDeregister = true\n\t}\n\n\t\/\/ Accumulate any errors\n\tvar errs *packer.MultiError\n\terrs = packer.MultiErrorAppend(errs, b.config.AccessConfig.Prepare(&b.config.ctx)...)\n\terrs = packer.MultiErrorAppend(errs,\n\t\tb.config.OMIConfig.Prepare(&b.config.AccessConfig, &b.config.ctx)...)\n\terrs = packer.MultiErrorAppend(errs, b.config.BlockDevices.Prepare(&b.config.ctx)...)\n\terrs = packer.MultiErrorAppend(errs, b.config.RunConfig.Prepare(&b.config.ctx)...)\n\n\tif errs != nil && len(errs.Errors) > 0 {\n\t\treturn nil, errs\n\t}\n\n\tpacker.LogSecretFilter.Set(b.config.AccessKey, b.config.SecretKey, b.config.Token)\n\treturn nil, nil\n}\n\nfunc (b *Builder) Run(ui packer.Ui, hook packer.Hook, cache packer.Cache) (packer.Artifact, error) {\n\tclientConfig, err := b.config.Config()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tskipClient := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\t},\n\t}\n\n\toapiconn := oapi.NewClient(clientConfig, skipClient)\n\n\t\/\/ Setup the state bag and initial state for the steps\n\tstate := new(multistep.BasicStateBag)\n\tstate.Put(\"config\", &b.config)\n\tstate.Put(\"oapi\", oapiconn)\n\tstate.Put(\"clientConfig\", clientConfig)\n\tstate.Put(\"hook\", hook)\n\tstate.Put(\"ui\", ui)\n\n\tsteps := []multistep.Step{\n\t\t&osccommon.StepPreValidate{\n\t\t\tDestOmiName: b.config.OMIName,\n\t\t\tForceDeregister: b.config.OMIForceDeregister,\n\t\t},\n\t\t&osccommon.StepSourceOMIInfo{\n\t\t\tSourceOmi: b.config.SourceOmi,\n\t\t\tEnableOMISriovNetSupport: b.config.OMISriovNetSupport,\n\t\t\tEnableOMIENASupport: b.config.OMIENASupport,\n\t\t\tOmiFilters: b.config.SourceOmiFilter,\n\t\t\tOMIVirtType: b.config.OMIVirtType, \/\/TODO: Remove if it is not used\n\t\t},\n\t\t&osccommon.StepNetworkInfo{\n\t\t\tNetId: b.config.NetId,\n\t\t\tNetFilter: b.config.NetFilter,\n\t\t\tSecurityGroupIds: b.config.SecurityGroupIds,\n\t\t\tSecurityGroupFilter: b.config.SecurityGroupFilter,\n\t\t\tSubnetId: b.config.SubnetId,\n\t\t\tSubnetFilter: b.config.SubnetFilter,\n\t\t\tSubregionName: b.config.Subregion,\n\t\t},\n\t\t&osccommon.StepKeyPair{\n\t\t\tDebug: b.config.PackerDebug,\n\t\t\tComm: &b.config.RunConfig.Comm,\n\t\t\tDebugKeyPath: fmt.Sprintf(\"oapi_%s\", b.config.PackerBuildName),\n\t\t},\n\t\t&osccommon.StepSecurityGroup{\n\t\t\tSecurityGroupFilter: b.config.SecurityGroupFilter,\n\t\t\tSecurityGroupIds: b.config.SecurityGroupIds,\n\t\t\tCommConfig: &b.config.RunConfig.Comm,\n\t\t\tTemporarySGSourceCidr: b.config.TemporarySGSourceCidr,\n\t\t},\n\t\t&osccommon.StepCleanupVolumes{\n\t\t\tBlockDevices: b.config.BlockDevices,\n\t\t},\n\t\t&osccommon.StepRunSourceVm{\n\t\t\tAssociatePublicIpAddress: b.config.AssociatePublicIpAddress,\n\t\t\tBlockDevices: b.config.BlockDevices,\n\t\t\tComm: &b.config.RunConfig.Comm,\n\t\t\tCtx: b.config.ctx,\n\t\t\tDebug: b.config.PackerDebug,\n\t\t\tBsuOptimized: b.config.BsuOptimized,\n\t\t\tEnableT2Unlimited: b.config.EnableT2Unlimited,\n\t\t\tExpectedRootDevice: \"ebs\", \/\/ should it be bsu\n\t\t\tIamVmProfile: b.config.IamVmProfile,\n\t\t\tVmInitiatedShutdownBehavior: b.config.VmInitiatedShutdownBehavior,\n\t\t\tVmType: b.config.VmType,\n\t\t\tIsRestricted: false,\n\t\t\tSourceOMI: b.config.SourceOmi,\n\t\t\tTags: b.config.RunTags,\n\t\t\tUserData: b.config.UserData,\n\t\t\tUserDataFile: b.config.UserDataFile,\n\t\t\tVolumeTags: b.config.VolumeRunTags,\n\t\t},\n\t\t&osccommon.StepGetPassword{\n\t\t\tDebug: b.config.PackerDebug,\n\t\t\tComm: &b.config.RunConfig.Comm,\n\t\t\tTimeout: b.config.WindowsPasswordTimeout,\n\t\t\tBuildName: b.config.PackerBuildName,\n\t\t},\n\t\t&communicator.StepConnect{\n\t\t\tConfig: &b.config.RunConfig.Comm,\n\t\t\tHost: osccommon.SSHHost(\n\t\t\t\toapiconn,\n\t\t\t\tb.config.Comm.SSHInterface),\n\t\t\tSSHConfig: b.config.RunConfig.Comm.SSHConfigFunc(),\n\t\t},\n\t\t&common.StepProvision{},\n\t\t&common.StepCleanupTempKeys{\n\t\t\tComm: &b.config.RunConfig.Comm,\n\t\t},\n\t\t&osccommon.StepStopBSUBackedVm{\n\t\t\tSkip: false,\n\t\t\tDisableStopVm: b.config.DisableStopVm,\n\t\t},\n\t\t&osccommon.StepUpdateBSUBackedVm{\n\t\t\tEnableAMISriovNetSupport: b.config.OMISriovNetSupport,\n\t\t\tEnableAMIENASupport: b.config.OMIENASupport,\n\t\t},\n\t\t&osccommon.StepDeregisterOMI{\n\t\t\tAccessConfig: &b.config.AccessConfig,\n\t\t\tForceDeregister: b.config.OMIForceDeregister,\n\t\t\tForceDeleteSnapshot: b.config.OMIForceDeleteSnapshot,\n\t\t\tOMIName: b.config.OMIName,\n\t\t\tRegions: b.config.OMIRegions,\n\t\t},\n\t}\n\n\tb.runner = common.NewRunner(steps, b.config.PackerConfig, ui)\n\tb.runner.Run(state)\n\n\t\/\/ If there was an error, return that\n\tif rawErr, ok := state.GetOk(\"error\"); ok {\n\t\treturn nil, rawErr.(error)\n\t}\n\n\treturn nil, nil\n}\n\nfunc (b *Builder) Cancel() {\n\tif b.runner != nil {\n\t\tlog.Println(\"Cancelling the step runner...\")\n\t\tb.runner.Cancel()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nconst Version = \"0.1.2\"\n<commit_msg>feat(version): bumo<commit_after>package main\n\nconst Version = \"0.1.3\"\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016-2020 The Libsacloud Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage libsacloud\n\n\/\/ Version バージョン\nconst Version = \"2.1.5\"\n<commit_msg>Bump to v2.1.6<commit_after>\/\/ Copyright 2016-2020 The Libsacloud Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage libsacloud\n\n\/\/ Version バージョン\nconst Version = \"2.1.6\"\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport \"github.com\/tsaikd\/KDGoLib\/version\"\n\nfunc init() {\n\tversion.VERSION = \"1.0.1\"\n}\n<commit_msg>1.0.2<commit_after>package main\n\nimport \"github.com\/tsaikd\/KDGoLib\/version\"\n\nfunc init() {\n\tversion.VERSION = \"1.0.2\"\n}\n<|endoftext|>"} {"text":"<commit_before>package superast\n\nimport (\n\t\"go\/ast\"\n\t\"go\/token\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\tDefault = iota\n\tIfBody\n\tIfElse\n\tFuncBody\n)\n\ntype AST struct {\n\tcurID int\n\tRootBlock *block\n\tnodeStack []ast.Node\n\tstmtsStack []*[]stmt\n\tfset *token.FileSet\n\tpos token.Pos\n}\n\nfunc NewAST(fset *token.FileSet) *AST {\n\ta := &AST{\n\t\tfset: fset,\n\t}\n\ta.RootBlock = &block{\n\t\tid: a.newID(),\n\t\tStmts: make([]stmt, 0),\n\t}\n\ta.pushStmts(&a.RootBlock.Stmts)\n\treturn a\n}\n\nfunc (a *AST) newID() id {\n\ti := a.curID\n\ta.curID++\n\treturn id{ID: i}\n}\n\nfunc (a *AST) newPos(p token.Pos) pos {\n\tposition := a.fset.Position(p)\n\treturn pos{Line: position.Line, Col: position.Column}\n}\n\nfunc (a *AST) nodePos(n ast.Node) pos {\n\treturn a.newPos(n.Pos())\n}\n\nfunc (a *AST) curPos() pos {\n\treturn a.newPos(a.pos)\n}\n\nfunc (a *AST) pushNode(node ast.Node) {\n\ta.nodeStack = append(a.nodeStack, node)\n}\n\nfunc (a *AST) curNode() ast.Node {\n\treturn a.nodeStack[len(a.nodeStack)-1]\n}\n\nfunc (a *AST) popNode() {\n\ta.nodeStack = a.nodeStack[:len(a.nodeStack)-1]\n}\n\nfunc (a *AST) pushStmts(stmts *[]stmt) {\n\ta.stmtsStack = append(a.stmtsStack, stmts)\n}\n\nfunc (a *AST) curStmts() *[]stmt {\n\treturn a.stmtsStack[len(a.stmtsStack)-1]\n}\n\nfunc (a *AST) addStmt(s stmt) {\n\tcurStmts := a.curStmts()\n\t*curStmts = append(*curStmts, s)\n}\n\nfunc (a *AST) popStmts() {\n\ta.stmtsStack = a.stmtsStack[:len(a.stmtsStack)-1]\n}\n\nfunc exprString(x ast.Expr) string {\n\tswitch t := x.(type) {\n\tcase *ast.Ident:\n\t\treturn t.Name\n\tcase *ast.BasicLit:\n\t\treturn t.Value\n\tcase *ast.SelectorExpr:\n\t\treturn exprString(t.X) + \".\" + t.Sel.Name\n\tcase *ast.StarExpr:\n\t\treturn exprString(t.X)\n\t}\n\treturn \"\"\n}\n\nfunc exprValue(x ast.Expr) value {\n\tswitch t := x.(type) {\n\tcase *ast.BasicLit:\n\t\tswitch t.Kind {\n\t\tcase token.INT:\n\t\t\ti, _ := strconv.ParseInt(t.Value, 10, 0)\n\t\t\treturn i\n\t\tcase token.FLOAT:\n\t\t\tf, _ := strconv.ParseFloat(t.Value, 64)\n\t\t\treturn f\n\t\tcase token.CHAR:\n\t\t\tr, _, _, _ := strconv.UnquoteChar(t.Value, '\\'')\n\t\t\treturn r\n\t\tcase token.STRING:\n\t\t\ts, _ := strconv.Unquote(t.Value)\n\t\t\treturn s\n\t\t}\n\t\treturn t.Value\n\t}\n\treturn nil\n}\n\nfunc exprType(x ast.Expr) *dataType {\n\tif s := exprString(x); s != \"\" {\n\t\treturn &dataType{\n\t\t\tName: s,\n\t\t}\n\t}\n\tswitch t := x.(type) {\n\tcase *ast.ArrayType:\n\t\treturn &dataType{\n\t\t\tName: \"vector\",\n\t\t\tSubType: exprType(t.Elt),\n\t\t}\n\t}\n\treturn nil\n}\n\ntype namedType struct {\n\tvName string\n\tdType *dataType\n\tnode ast.Node\n}\n\nfunc flattenNames(baseType ast.Expr, names []*ast.Ident) []namedType {\n\tt := exprType(baseType)\n\tif len(names) == 0 {\n\t\treturn []namedType{\n\t\t\t{vName: \"\", dType: t},\n\t\t}\n\t}\n\tvar types []namedType\n\tfor _, n := range names {\n\t\ttypes = append(types, namedType{\n\t\t\tvName: n.Name,\n\t\t\tdType: t,\n\t\t\tnode: n,\n\t\t})\n\t}\n\treturn types\n}\n\nfunc flattenFieldList(fieldList *ast.FieldList) []namedType {\n\tif fieldList == nil {\n\t\treturn nil\n\t}\n\tvar types []namedType\n\tfor _, f := range fieldList.List {\n\t\tfor _, t := range flattenNames(f.Type, f.Names) {\n\t\t\ttypes = append(types, t)\n\t\t}\n\t}\n\treturn types\n}\n\nvar basicLitName = map[token.Token]string{\n\ttoken.INT: \"int\",\n\ttoken.FLOAT: \"double\",\n\ttoken.CHAR: \"char\",\n\ttoken.STRING: \"string\",\n}\n\nvar zeroValues = map[string]value{\n\t\"int\": new(int),\n\t\"double\": new(float64),\n\t\"char\": new(rune),\n\t\"string\": new(string),\n}\n\nfunc (a *AST) parseExpr(expr ast.Expr) expr {\n\tswitch x := expr.(type) {\n\tcase *ast.Ident:\n\t\treturn &identifier{\n\t\t\tid: a.newID(),\n\t\t\tpos: a.nodePos(x),\n\t\t\tType: \"identifier\",\n\t\t\tValue: x.Name,\n\t\t}\n\tcase *ast.BasicLit:\n\t\tlType, _ := basicLitName[x.Kind]\n\t\treturn &identifier{\n\t\t\tid: a.newID(),\n\t\t\tpos: a.nodePos(x),\n\t\t\tType: lType,\n\t\t\tValue: exprValue(x),\n\t\t}\n\tcase *ast.UnaryExpr:\n\t\treturn &unary{\n\t\t\tid: a.newID(),\n\t\t\tpos: a.nodePos(x),\n\t\t\tType: x.Op.String(),\n\t\t\tExpr: a.parseExpr(x.X),\n\t\t}\n\tcase *ast.CallExpr:\n\t\tcall := &funcCall{\n\t\t\tid: a.newID(),\n\t\t\tpos: a.nodePos(x),\n\t\t\tType: \"function-call\",\n\t\t\tName: exprString(x.Fun),\n\t\t}\n\t\tfor _, e := range x.Args {\n\t\t\tcall.Args = append(call.Args, a.parseExpr(e))\n\t\t}\n\t\treturn call\n\tcase *ast.BinaryExpr:\n\t\treturn &binary{\n\t\t\tid: a.newID(),\n\t\t\tpos: a.nodePos(x),\n\t\t\tType: x.Op.String(),\n\t\t\tLeft: a.parseExpr(x.X),\n\t\t\tRight: a.parseExpr(x.Y),\n\t\t}\n\tcase *ast.ParenExpr:\n\t\treturn a.parseExpr(x.X)\n\tdefault:\n\t\tlog.Printf(\"Unknown expression: %#v\", x)\n\t}\n\treturn nil\n}\n\nfunc (a *AST) assignIdToDataType(dType *dataType) *dataType {\n\tif dType == nil {\n\t\treturn nil\n\t}\n\tdTypeCopy := *dType\n\tdTypeCopy.id = a.newID()\n\tif dTypeCopy.SubType != nil {\n\t\tdTypeCopy.SubType = a.assignIdToDataType(dTypeCopy.SubType)\n\t}\n\treturn &dTypeCopy\n}\n\nfunc (a *AST) Visit(node ast.Node) ast.Visitor {\n\tif node == nil {\n\t\tswitch a.curNode().(type) {\n\t\tcase *ast.BlockStmt:\n\t\t\ta.popStmts()\n\t\t}\n\t\ta.popNode()\n\t\treturn nil\n\t}\n\ta.pos = node.Pos()\n\tlog.Printf(\"%s%#v\", strings.Repeat(\" \", len(a.nodeStack)), node)\n\tswitch x := node.(type) {\n\tcase *ast.TypeSpec:\n\t\tn := \"\"\n\t\tif x.Name != nil {\n\t\t\tn = exprString(x.Name)\n\t\t}\n\t\tswitch t := x.Type.(type) {\n\t\tcase *ast.StructType:\n\t\t\tdecl := &structDecl{\n\t\t\t\tid: a.newID(),\n\t\t\t\tpos: a.curPos(),\n\t\t\t\tType: \"struct-declaration\",\n\t\t\t\tName: n,\n\t\t\t}\n\t\t\tfor _, f := range flattenFieldList(t.Fields) {\n\t\t\t\tattr := varDecl{\n\t\t\t\t\tid: a.newID(),\n\t\t\t\t\tpos: a.curPos(),\n\t\t\t\t\tType: \"variable-declaration\",\n\t\t\t\t\tName: f.vName,\n\t\t\t\t\tDataType: a.assignIdToDataType(f.dType),\n\t\t\t\t}\n\t\t\t\tdecl.Attrs = append(decl.Attrs, attr)\n\t\t\t}\n\t\t\ta.addStmt(decl)\n\t\t}\n\t\treturn nil\n\tcase *ast.BasicLit:\n\t\tlit := a.parseExpr(x)\n\t\ta.addStmt(lit)\n\t\treturn nil\n\tcase *ast.UnaryExpr:\n\t\tunary := a.parseExpr(x)\n\t\ta.addStmt(unary)\n\t\treturn nil\n\tcase *ast.CallExpr:\n\t\tcall := a.parseExpr(x)\n\t\ta.addStmt(call)\n\t\treturn nil\n\tcase *ast.FuncDecl:\n\t\tname := x.Name.Name\n\t\tvar retType *dataType\n\t\tresults := flattenFieldList(x.Type.Results)\n\t\tswitch len(results) {\n\t\tcase 1:\n\t\t\tretType = results[0].dType\n\t\t}\n\t\tfn := &funcDecl{\n\t\t\tid: a.newID(),\n\t\t\tpos: a.nodePos(x),\n\t\t\tType: \"function-declaration\",\n\t\t\tName: name,\n\t\t\tRetType: a.assignIdToDataType(retType),\n\t\t\tBlock: &block{\n\t\t\t\tid: a.newID(),\n\t\t\t\tStmts: make([]stmt, 0),\n\t\t\t},\n\t\t}\n\t\tfor _, f := range flattenFieldList(x.Type.Params) {\n\t\t\tparam := varDecl{\n\t\t\t\tid: a.newID(),\n\t\t\t\tpos: a.nodePos(f.node),\n\t\t\t\tType: \"variable-declaration\",\n\t\t\t\tName: f.vName,\n\t\t\t\tDataType: a.assignIdToDataType(f.dType),\n\t\t\t}\n\t\t\tfn.Params = append(fn.Params, param)\n\t\t}\n\t\ta.addStmt(fn)\n\t\ta.pushStmts(&fn.Block.Stmts)\n\tcase *ast.DeclStmt:\n\t\tgd, _ := x.Decl.(*ast.GenDecl)\n\t\tfor _, spec := range gd.Specs {\n\t\t\ts, ok := spec.(*ast.ValueSpec)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor i, t := range flattenNames(s.Type, s.Names) {\n\t\t\t\tvType := t.dType.Name\n\t\t\t\tv, _ := zeroValues[vType]\n\t\t\t\tif s.Values != nil {\n\t\t\t\t\tv = exprValue(s.Values[i])\n\t\t\t\t}\n\t\t\t\tdecl := &varDecl{\n\t\t\t\t\tid: a.newID(),\n\t\t\t\t\tpos: a.curPos(),\n\t\t\t\t\tType: \"variable-declaration\",\n\t\t\t\t\tName: t.vName,\n\t\t\t\t\tDataType: a.assignIdToDataType(t.dType),\n\t\t\t\t\tInit: &identifier{\n\t\t\t\t\t\tid: a.newID(),\n\t\t\t\t\t\tpos: a.curPos(),\n\t\t\t\t\t\tType: vType,\n\t\t\t\t\t\tValue: v,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\ta.addStmt(decl)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\tcase *ast.AssignStmt:\n\t\tfor i, l := range x.Lhs {\n\t\t\tr := x.Rhs[i]\n\t\t\tvar t string\n\t\t\tswitch rx := r.(type) {\n\t\t\tcase *ast.BasicLit:\n\t\t\t\tt, _ = basicLitName[rx.Kind]\n\t\t\tcase *ast.CompositeLit:\n\t\t\t\tt = exprString(rx.Type)\n\t\t\t}\n\t\t\tvar s stmt\n\t\t\tif x.Tok == token.DEFINE {\n\t\t\t\ts = &varDecl{\n\t\t\t\t\tid: a.newID(),\n\t\t\t\t\tpos: a.curPos(),\n\t\t\t\t\tType: \"variable-declaration\",\n\t\t\t\t\tName: exprString(l),\n\t\t\t\t\tDataType: &dataType{\n\t\t\t\t\t\tid: a.newID(),\n\t\t\t\t\t\tName: t,\n\t\t\t\t\t},\n\t\t\t\t\tInit: a.parseExpr(r),\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\ts = &binary{\n\t\t\t\t\tid: a.newID(),\n\t\t\t\t\tpos: a.curPos(),\n\t\t\t\t\tType: x.Tok.String(),\n\t\t\t\t\tLeft: a.parseExpr(l),\n\t\t\t\t\tRight: a.parseExpr(r),\n\t\t\t\t}\n\t\t\t}\n\t\t\ta.addStmt(s)\n\t\t}\n\t\treturn nil\n\tcase *ast.ReturnStmt:\n\t\tret := &retStmt{\n\t\t\tid: a.newID(),\n\t\t\tpos: a.nodePos(x),\n\t\t\tType: \"return\",\n\t\t}\n\t\tif len(x.Results) > 0 {\n\t\t\tret.Expr = a.parseExpr(x.Results[0])\n\t\t}\n\t\ta.addStmt(ret)\n\t\treturn nil\n\tcase *ast.IfStmt:\n\t\tcond := &conditional{\n\t\t\tid: a.newID(),\n\t\t\tpos: a.nodePos(x),\n\t\t\tType: \"conditional\",\n\t\t\tCond: a.parseExpr(x.Cond),\n\t\t\tThen: &block{\n\t\t\t\tid: a.newID(),\n\t\t\t\tStmts: make([]stmt, 0),\n\t\t\t},\n\t\t}\n\t\ta.addStmt(cond)\n\t\tif x.Else != nil {\n\t\t\tcond.Else = &block{\n\t\t\t\tid: a.newID(),\n\t\t\t\tStmts: make([]stmt, 0),\n\t\t\t}\n\t\t\ta.pushStmts(&cond.Else.Stmts)\n\t\t\ta.pushNode(node)\n\t\t}\n\t\ta.pushStmts(&cond.Then.Stmts)\n\tcase *ast.File:\n\tcase *ast.BlockStmt:\n\tcase *ast.ExprStmt:\n\tcase *ast.GenDecl:\n\tdefault:\n\t\treturn nil\n\t}\n\ta.pushNode(node)\n\treturn a\n}\n<commit_msg>Simplify var naming<commit_after>package superast\n\nimport (\n\t\"go\/ast\"\n\t\"go\/token\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\tDefault = iota\n\tIfBody\n\tIfElse\n\tFuncBody\n)\n\ntype AST struct {\n\tcurID int\n\tRootBlock *block\n\tnodeStack []ast.Node\n\tstmtsStack []*[]stmt\n\tfset *token.FileSet\n\tpos token.Pos\n}\n\nfunc NewAST(fset *token.FileSet) *AST {\n\ta := &AST{\n\t\tfset: fset,\n\t}\n\ta.RootBlock = &block{\n\t\tid: a.newID(),\n\t\tStmts: make([]stmt, 0),\n\t}\n\ta.pushStmts(&a.RootBlock.Stmts)\n\treturn a\n}\n\nfunc (a *AST) newID() id {\n\ti := a.curID\n\ta.curID++\n\treturn id{ID: i}\n}\n\nfunc (a *AST) newPos(p token.Pos) pos {\n\tposition := a.fset.Position(p)\n\treturn pos{Line: position.Line, Col: position.Column}\n}\n\nfunc (a *AST) nodePos(n ast.Node) pos {\n\treturn a.newPos(n.Pos())\n}\n\nfunc (a *AST) curPos() pos {\n\treturn a.newPos(a.pos)\n}\n\nfunc (a *AST) pushNode(node ast.Node) {\n\ta.nodeStack = append(a.nodeStack, node)\n}\n\nfunc (a *AST) curNode() ast.Node {\n\treturn a.nodeStack[len(a.nodeStack)-1]\n}\n\nfunc (a *AST) popNode() {\n\ta.nodeStack = a.nodeStack[:len(a.nodeStack)-1]\n}\n\nfunc (a *AST) pushStmts(stmts *[]stmt) {\n\ta.stmtsStack = append(a.stmtsStack, stmts)\n}\n\nfunc (a *AST) curStmts() *[]stmt {\n\treturn a.stmtsStack[len(a.stmtsStack)-1]\n}\n\nfunc (a *AST) addStmt(s stmt) {\n\tcurStmts := a.curStmts()\n\t*curStmts = append(*curStmts, s)\n}\n\nfunc (a *AST) popStmts() {\n\ta.stmtsStack = a.stmtsStack[:len(a.stmtsStack)-1]\n}\n\nfunc exprString(x ast.Expr) string {\n\tswitch t := x.(type) {\n\tcase *ast.Ident:\n\t\treturn t.Name\n\tcase *ast.BasicLit:\n\t\treturn t.Value\n\tcase *ast.SelectorExpr:\n\t\treturn exprString(t.X) + \".\" + t.Sel.Name\n\tcase *ast.StarExpr:\n\t\treturn exprString(t.X)\n\t}\n\treturn \"\"\n}\n\nfunc exprValue(x ast.Expr) value {\n\tswitch t := x.(type) {\n\tcase *ast.BasicLit:\n\t\tswitch t.Kind {\n\t\tcase token.INT:\n\t\t\ti, _ := strconv.ParseInt(t.Value, 10, 0)\n\t\t\treturn i\n\t\tcase token.FLOAT:\n\t\t\tf, _ := strconv.ParseFloat(t.Value, 64)\n\t\t\treturn f\n\t\tcase token.CHAR:\n\t\t\tr, _, _, _ := strconv.UnquoteChar(t.Value, '\\'')\n\t\t\treturn r\n\t\tcase token.STRING:\n\t\t\ts, _ := strconv.Unquote(t.Value)\n\t\t\treturn s\n\t\t}\n\t\treturn t.Value\n\t}\n\treturn nil\n}\n\nfunc exprType(x ast.Expr) *dataType {\n\tif s := exprString(x); s != \"\" {\n\t\treturn &dataType{\n\t\t\tName: s,\n\t\t}\n\t}\n\tswitch t := x.(type) {\n\tcase *ast.ArrayType:\n\t\treturn &dataType{\n\t\t\tName: \"vector\",\n\t\t\tSubType: exprType(t.Elt),\n\t\t}\n\t}\n\treturn nil\n}\n\ntype namedType struct {\n\tvName string\n\tdType *dataType\n\tnode ast.Node\n}\n\nfunc flattenNames(baseType ast.Expr, names []*ast.Ident) []namedType {\n\tt := exprType(baseType)\n\tif len(names) == 0 {\n\t\treturn []namedType{\n\t\t\t{vName: \"\", dType: t},\n\t\t}\n\t}\n\tvar types []namedType\n\tfor _, n := range names {\n\t\ttypes = append(types, namedType{\n\t\t\tvName: n.Name,\n\t\t\tdType: t,\n\t\t\tnode: n,\n\t\t})\n\t}\n\treturn types\n}\n\nfunc flattenFieldList(fieldList *ast.FieldList) []namedType {\n\tif fieldList == nil {\n\t\treturn nil\n\t}\n\tvar types []namedType\n\tfor _, f := range fieldList.List {\n\t\tfor _, t := range flattenNames(f.Type, f.Names) {\n\t\t\ttypes = append(types, t)\n\t\t}\n\t}\n\treturn types\n}\n\nvar basicLitName = map[token.Token]string{\n\ttoken.INT: \"int\",\n\ttoken.FLOAT: \"double\",\n\ttoken.CHAR: \"char\",\n\ttoken.STRING: \"string\",\n}\n\nvar zeroValues = map[string]value{\n\t\"int\": new(int),\n\t\"double\": new(float64),\n\t\"char\": new(rune),\n\t\"string\": new(string),\n}\n\nfunc (a *AST) parseExpr(expr ast.Expr) expr {\n\tswitch x := expr.(type) {\n\tcase *ast.Ident:\n\t\treturn &identifier{\n\t\t\tid: a.newID(),\n\t\t\tpos: a.nodePos(x),\n\t\t\tType: \"identifier\",\n\t\t\tValue: x.Name,\n\t\t}\n\tcase *ast.BasicLit:\n\t\tlType, _ := basicLitName[x.Kind]\n\t\treturn &identifier{\n\t\t\tid: a.newID(),\n\t\t\tpos: a.nodePos(x),\n\t\t\tType: lType,\n\t\t\tValue: exprValue(x),\n\t\t}\n\tcase *ast.UnaryExpr:\n\t\treturn &unary{\n\t\t\tid: a.newID(),\n\t\t\tpos: a.nodePos(x),\n\t\t\tType: x.Op.String(),\n\t\t\tExpr: a.parseExpr(x.X),\n\t\t}\n\tcase *ast.CallExpr:\n\t\tcall := &funcCall{\n\t\t\tid: a.newID(),\n\t\t\tpos: a.nodePos(x),\n\t\t\tType: \"function-call\",\n\t\t\tName: exprString(x.Fun),\n\t\t}\n\t\tfor _, e := range x.Args {\n\t\t\tcall.Args = append(call.Args, a.parseExpr(e))\n\t\t}\n\t\treturn call\n\tcase *ast.BinaryExpr:\n\t\treturn &binary{\n\t\t\tid: a.newID(),\n\t\t\tpos: a.nodePos(x),\n\t\t\tType: x.Op.String(),\n\t\t\tLeft: a.parseExpr(x.X),\n\t\t\tRight: a.parseExpr(x.Y),\n\t\t}\n\tcase *ast.ParenExpr:\n\t\treturn a.parseExpr(x.X)\n\tdefault:\n\t\tlog.Printf(\"Unknown expression: %#v\", x)\n\t}\n\treturn nil\n}\n\nfunc (a *AST) assignIdToDataType(dType *dataType) *dataType {\n\tif dType == nil {\n\t\treturn nil\n\t}\n\tdTypeCopy := *dType\n\tdTypeCopy.id = a.newID()\n\tif dTypeCopy.SubType != nil {\n\t\tdTypeCopy.SubType = a.assignIdToDataType(dTypeCopy.SubType)\n\t}\n\treturn &dTypeCopy\n}\n\nfunc (a *AST) Visit(node ast.Node) ast.Visitor {\n\tif node == nil {\n\t\tswitch a.curNode().(type) {\n\t\tcase *ast.BlockStmt:\n\t\t\ta.popStmts()\n\t\t}\n\t\ta.popNode()\n\t\treturn nil\n\t}\n\ta.pos = node.Pos()\n\tlog.Printf(\"%s%#v\", strings.Repeat(\" \", len(a.nodeStack)), node)\n\tswitch x := node.(type) {\n\tcase *ast.TypeSpec:\n\t\tn := \"\"\n\t\tif x.Name != nil {\n\t\t\tn = exprString(x.Name)\n\t\t}\n\t\tswitch t := x.Type.(type) {\n\t\tcase *ast.StructType:\n\t\t\td := &structDecl{\n\t\t\t\tid: a.newID(),\n\t\t\t\tpos: a.curPos(),\n\t\t\t\tType: \"struct-declaration\",\n\t\t\t\tName: n,\n\t\t\t}\n\t\t\tfor _, f := range flattenFieldList(t.Fields) {\n\t\t\t\tattr := varDecl{\n\t\t\t\t\tid: a.newID(),\n\t\t\t\t\tpos: a.curPos(),\n\t\t\t\t\tType: \"variable-declaration\",\n\t\t\t\t\tName: f.vName,\n\t\t\t\t\tDataType: a.assignIdToDataType(f.dType),\n\t\t\t\t}\n\t\t\t\td.Attrs = append(d.Attrs, attr)\n\t\t\t}\n\t\t\ta.addStmt(d)\n\t\t}\n\t\treturn nil\n\tcase *ast.BasicLit:\n\t\tl := a.parseExpr(x)\n\t\ta.addStmt(l)\n\t\treturn nil\n\tcase *ast.UnaryExpr:\n\t\tu := a.parseExpr(x)\n\t\ta.addStmt(u)\n\t\treturn nil\n\tcase *ast.CallExpr:\n\t\tc := a.parseExpr(x)\n\t\ta.addStmt(c)\n\t\treturn nil\n\tcase *ast.FuncDecl:\n\t\tname := x.Name.Name\n\t\tvar retType *dataType\n\t\tresults := flattenFieldList(x.Type.Results)\n\t\tswitch len(results) {\n\t\tcase 1:\n\t\t\tretType = results[0].dType\n\t\t}\n\t\td := &funcDecl{\n\t\t\tid: a.newID(),\n\t\t\tpos: a.nodePos(x),\n\t\t\tType: \"function-declaration\",\n\t\t\tName: name,\n\t\t\tRetType: a.assignIdToDataType(retType),\n\t\t\tBlock: &block{\n\t\t\t\tid: a.newID(),\n\t\t\t\tStmts: make([]stmt, 0),\n\t\t\t},\n\t\t}\n\t\tfor _, f := range flattenFieldList(x.Type.Params) {\n\t\t\tparam := varDecl{\n\t\t\t\tid: a.newID(),\n\t\t\t\tpos: a.nodePos(f.node),\n\t\t\t\tType: \"variable-declaration\",\n\t\t\t\tName: f.vName,\n\t\t\t\tDataType: a.assignIdToDataType(f.dType),\n\t\t\t}\n\t\t\td.Params = append(d.Params, param)\n\t\t}\n\t\ta.addStmt(d)\n\t\ta.pushStmts(&d.Block.Stmts)\n\tcase *ast.DeclStmt:\n\t\tgd, _ := x.Decl.(*ast.GenDecl)\n\t\tfor _, spec := range gd.Specs {\n\t\t\ts, ok := spec.(*ast.ValueSpec)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor i, t := range flattenNames(s.Type, s.Names) {\n\t\t\t\tvType := t.dType.Name\n\t\t\t\tv, _ := zeroValues[vType]\n\t\t\t\tif s.Values != nil {\n\t\t\t\t\tv = exprValue(s.Values[i])\n\t\t\t\t}\n\t\t\t\td := &varDecl{\n\t\t\t\t\tid: a.newID(),\n\t\t\t\t\tpos: a.curPos(),\n\t\t\t\t\tType: \"variable-declaration\",\n\t\t\t\t\tName: t.vName,\n\t\t\t\t\tDataType: a.assignIdToDataType(t.dType),\n\t\t\t\t\tInit: &identifier{\n\t\t\t\t\t\tid: a.newID(),\n\t\t\t\t\t\tpos: a.curPos(),\n\t\t\t\t\t\tType: vType,\n\t\t\t\t\t\tValue: v,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\ta.addStmt(d)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\tcase *ast.AssignStmt:\n\t\tfor i, l := range x.Lhs {\n\t\t\tr := x.Rhs[i]\n\t\t\tvar t string\n\t\t\tswitch rx := r.(type) {\n\t\t\tcase *ast.BasicLit:\n\t\t\t\tt, _ = basicLitName[rx.Kind]\n\t\t\tcase *ast.CompositeLit:\n\t\t\t\tt = exprString(rx.Type)\n\t\t\t}\n\t\t\tvar s stmt\n\t\t\tif x.Tok == token.DEFINE {\n\t\t\t\ts = &varDecl{\n\t\t\t\t\tid: a.newID(),\n\t\t\t\t\tpos: a.curPos(),\n\t\t\t\t\tType: \"variable-declaration\",\n\t\t\t\t\tName: exprString(l),\n\t\t\t\t\tDataType: &dataType{\n\t\t\t\t\t\tid: a.newID(),\n\t\t\t\t\t\tName: t,\n\t\t\t\t\t},\n\t\t\t\t\tInit: a.parseExpr(r),\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\ts = &binary{\n\t\t\t\t\tid: a.newID(),\n\t\t\t\t\tpos: a.curPos(),\n\t\t\t\t\tType: x.Tok.String(),\n\t\t\t\t\tLeft: a.parseExpr(l),\n\t\t\t\t\tRight: a.parseExpr(r),\n\t\t\t\t}\n\t\t\t}\n\t\t\ta.addStmt(s)\n\t\t}\n\t\treturn nil\n\tcase *ast.ReturnStmt:\n\t\tr := &retStmt{\n\t\t\tid: a.newID(),\n\t\t\tpos: a.nodePos(x),\n\t\t\tType: \"return\",\n\t\t}\n\t\tif len(x.Results) > 0 {\n\t\t\tr.Expr = a.parseExpr(x.Results[0])\n\t\t}\n\t\ta.addStmt(r)\n\t\treturn nil\n\tcase *ast.IfStmt:\n\t\tc := &conditional{\n\t\t\tid: a.newID(),\n\t\t\tpos: a.nodePos(x),\n\t\t\tType: \"conditional\",\n\t\t\tCond: a.parseExpr(x.Cond),\n\t\t\tThen: &block{\n\t\t\t\tid: a.newID(),\n\t\t\t\tStmts: make([]stmt, 0),\n\t\t\t},\n\t\t}\n\t\ta.addStmt(c)\n\t\tif x.Else != nil {\n\t\t\tc.Else = &block{\n\t\t\t\tid: a.newID(),\n\t\t\t\tStmts: make([]stmt, 0),\n\t\t\t}\n\t\t\ta.pushStmts(&c.Else.Stmts)\n\t\t\ta.pushNode(node)\n\t\t}\n\t\ta.pushStmts(&c.Then.Stmts)\n\tcase *ast.File:\n\tcase *ast.BlockStmt:\n\tcase *ast.ExprStmt:\n\tcase *ast.GenDecl:\n\tdefault:\n\t\treturn nil\n\t}\n\ta.pushNode(node)\n\treturn a\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package youtube provides loading audio from video files for given youtube channels\npackage youtube\n\nimport (\n\t\"context\"\n\t\"encoding\/xml\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n\n\tlog \"github.com\/go-pkgz\/lgr\"\n\t\"github.com\/google\/uuid\"\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/umputun\/feed-master\/app\/feed\"\n\t\"github.com\/umputun\/feed-master\/app\/youtube\/channel\"\n)\n\n\/\/go:generate moq -out mocks\/downloader.go -pkg mocks -skip-ensure -fmt goimports . DownloaderService\n\/\/go:generate moq -out mocks\/channel.go -pkg mocks -skip-ensure -fmt goimports . ChannelService\n\/\/go:generate moq -out mocks\/store.go -pkg mocks -skip-ensure -fmt goimports . StoreService\n\n\/\/ Service loads audio from youtube channels\ntype Service struct {\n\tChannels []ChannelInfo\n\tDownloader DownloaderService\n\tChannelService ChannelService\n\tStore StoreService\n\tCheckDuration time.Duration\n\tRSSFileStore RSSFileStore\n\tKeepPerChannel int\n\tRootURL string\n}\n\n\/\/ ChannelInfo is a pait of channel ID and name\ntype ChannelInfo struct {\n\tName string\n\tID string\n}\n\n\/\/ DownloaderService is an interface for downloading audio from youtube\ntype DownloaderService interface {\n\tGet(ctx context.Context, id string, fname string) (file string, err error)\n}\n\n\/\/ ChannelService is an interface for getting channel entries, i.e. the list of videos\ntype ChannelService interface {\n\tGet(ctx context.Context, chanID string) ([]channel.Entry, error)\n}\n\n\/\/ StoreService is an interface for storing and loading metadata about downloaded audio\ntype StoreService interface {\n\tSave(entry channel.Entry) (bool, error)\n\tLoad(channelID string, max int) ([]channel.Entry, error)\n\tExist(entry channel.Entry) (bool, error)\n\tRemoveOld(channelID string, keep int) ([]string, error)\n}\n\n\/\/ Do is a blocking function that downloads audio from youtube channels and updates metadata\nfunc (s *Service) Do(ctx context.Context) error {\n\tlog.Printf(\"[INFO] Starting youtube service\")\n\ttick := time.NewTicker(s.CheckDuration)\n\tdefer tick.Stop()\n\n\tif err := s.procChannels(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"failed to process channels\")\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tcase <-tick.C:\n\t\t\tif err := s.procChannels(ctx); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"failed to process channels\")\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ RSSFeed generates RSS feed for given channel\nfunc (s *Service) RSSFeed(cinfo ChannelInfo) (string, error) {\n\tentries, err := s.Store.Load(cinfo.ID, s.KeepPerChannel)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to get channel entries\")\n\t}\n\n\tif len(entries) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\titems := []feed.Item{}\n\tfor _, entry := range entries {\n\n\t\tfileURL := s.RootURL + \"\/\" + path.Base(entry.File)\n\n\t\tvar fileSize int\n\t\tif fileInfo, fiErr := os.Stat(entry.File); fiErr != nil {\n\t\t\tlog.Printf(\"[WARN] failed to get file size for %s: %v\", entry.File, fiErr)\n\t\t} else {\n\t\t\tfileSize = int(fileInfo.Size())\n\t\t}\n\n\t\titems = append(items, feed.Item{\n\t\t\tTitle: entry.Title,\n\t\t\tDescription: entry.Media.Description,\n\t\t\tLink: entry.Link.Href,\n\t\t\tPubDate: entry.Published.Format(time.RFC822Z),\n\t\t\tGUID: entry.ChannelID + \"::\" + entry.VideoID,\n\t\t\tAuthor: entry.Author.Name,\n\t\t\tEnclosure: feed.Enclosure{\n\t\t\t\tURL: fileURL,\n\t\t\t\tType: \"audio\/mpeg\",\n\t\t\t\tLength: fileSize,\n\t\t\t},\n\t\t\tDT: time.Now(),\n\t\t})\n\t}\n\n\trss := feed.Rss2{\n\t\tVersion: \"2.0\",\n\t\tItemList: items,\n\t\tTitle: entries[0].Author.Name,\n\t\tDescription: \"generated by feed-master\",\n\t\tLink: entries[0].Author.URI,\n\t\tPubDate: items[0].PubDate,\n\t\tLastBuildDate: time.Now().Format(time.RFC822Z),\n\t}\n\n\tb, err := xml.MarshalIndent(&rss, \"\", \" \")\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to marshal rss\")\n\t}\n\n\treturn string(b), nil\n}\n\nfunc (s *Service) procChannels(ctx context.Context) error {\n\tfor _, chanInfo := range s.Channels {\n\t\tentries, err := s.ChannelService.Get(ctx, chanInfo.ID)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[WARN] failed to get channel entries for %s: %s\", chanInfo.ID, err)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Printf(\"[INFO] got %d entries for %s, limit to %d\", len(entries), chanInfo.Name, s.KeepPerChannel)\n\t\tchanged := false\n\t\tfor i, entry := range entries {\n\t\t\tif i >= s.KeepPerChannel {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\texists, exErr := s.Store.Exist(entry)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(exErr, \"failed to check if entry %s exists\", entry.VideoID)\n\t\t\t}\n\t\t\tif exists {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Printf(\"[INFO] new entry %s, %s, %s\", entry.VideoID, entry.Title, chanInfo.Name)\n\t\t\tfile, downErr := s.Downloader.Get(ctx, entry.VideoID, uuid.New().String())\n\t\t\tif downErr != nil {\n\t\t\t\tlog.Printf(\"[WARN] failed to download %s: %s\", entry.VideoID, downErr)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Printf(\"[DEBUG] downloaded %s (%s) to %s, channel: %+v\", entry.VideoID, entry.Title, file, chanInfo)\n\t\t\tentry.File = file\n\t\t\tok, saveErr := s.Store.Save(entry)\n\t\t\tif saveErr != nil {\n\t\t\t\treturn errors.Wrapf(saveErr, \"failed to save entry %+v\", entry)\n\t\t\t}\n\t\t\tif !ok {\n\t\t\t\tlog.Printf(\"[WARN] attempt to save dup entry %+v\", entry)\n\t\t\t}\n\t\t\tchanged = true\n\t\t\tlog.Printf(\"[INFO] saved %s (%s) to %s, channel: %+v\", entry.VideoID, entry.Title, file, chanInfo)\n\t\t}\n\n\t\tif changed { \/\/ save rss feed to fs if there are new entries\n\t\t\trss, rssErr := s.RSSFeed(chanInfo)\n\t\t\tif rssErr != nil {\n\t\t\t\tlog.Printf(\"[WARN] failed to generate rss for %s: %s\", chanInfo.Name, rssErr)\n\t\t\t} else {\n\t\t\t\tif err := s.RSSFileStore.Save(chanInfo.ID, rss); err != nil {\n\t\t\t\t\tlog.Printf(\"[WARN] failed to save rss for %s: %s\", chanInfo.Name, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ remove old entries and files\n\t\tfiles, rmErr := s.Store.RemoveOld(chanInfo.ID, s.KeepPerChannel)\n\t\tif rmErr != nil {\n\t\t\treturn errors.Wrapf(rmErr, \"failed to remove old meta data for %s\", chanInfo.ID)\n\t\t}\n\t\tfor _, f := range files {\n\t\t\tif e := os.Remove(f); e != nil {\n\t\t\t\tlog.Printf(\"[WARN] failed to remove file %s: %s\", f, e)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Printf(\"[INFO] removed %s for %s (%s)\", f, chanInfo.ID, chanInfo.Name)\n\t\t}\n\t}\n\tlog.Printf(\"[DEBUG] processed channels completed, total %d\", len(s.Channels))\n\treturn nil\n}\n<commit_msg>keep extra episode<commit_after>\/\/ Package youtube provides loading audio from video files for given youtube channels\npackage youtube\n\nimport (\n\t\"context\"\n\t\"encoding\/xml\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n\n\tlog \"github.com\/go-pkgz\/lgr\"\n\t\"github.com\/google\/uuid\"\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/umputun\/feed-master\/app\/feed\"\n\t\"github.com\/umputun\/feed-master\/app\/youtube\/channel\"\n)\n\n\/\/go:generate moq -out mocks\/downloader.go -pkg mocks -skip-ensure -fmt goimports . DownloaderService\n\/\/go:generate moq -out mocks\/channel.go -pkg mocks -skip-ensure -fmt goimports . ChannelService\n\/\/go:generate moq -out mocks\/store.go -pkg mocks -skip-ensure -fmt goimports . StoreService\n\n\/\/ Service loads audio from youtube channels\ntype Service struct {\n\tChannels []ChannelInfo\n\tDownloader DownloaderService\n\tChannelService ChannelService\n\tStore StoreService\n\tCheckDuration time.Duration\n\tRSSFileStore RSSFileStore\n\tKeepPerChannel int\n\tRootURL string\n}\n\n\/\/ ChannelInfo is a pait of channel ID and name\ntype ChannelInfo struct {\n\tName string\n\tID string\n}\n\n\/\/ DownloaderService is an interface for downloading audio from youtube\ntype DownloaderService interface {\n\tGet(ctx context.Context, id string, fname string) (file string, err error)\n}\n\n\/\/ ChannelService is an interface for getting channel entries, i.e. the list of videos\ntype ChannelService interface {\n\tGet(ctx context.Context, chanID string) ([]channel.Entry, error)\n}\n\n\/\/ StoreService is an interface for storing and loading metadata about downloaded audio\ntype StoreService interface {\n\tSave(entry channel.Entry) (bool, error)\n\tLoad(channelID string, max int) ([]channel.Entry, error)\n\tExist(entry channel.Entry) (bool, error)\n\tRemoveOld(channelID string, keep int) ([]string, error)\n}\n\n\/\/ Do is a blocking function that downloads audio from youtube channels and updates metadata\nfunc (s *Service) Do(ctx context.Context) error {\n\tlog.Printf(\"[INFO] Starting youtube service\")\n\ttick := time.NewTicker(s.CheckDuration)\n\tdefer tick.Stop()\n\n\tif err := s.procChannels(ctx); err != nil {\n\t\treturn errors.Wrap(err, \"failed to process channels\")\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tcase <-tick.C:\n\t\t\tif err := s.procChannels(ctx); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"failed to process channels\")\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ RSSFeed generates RSS feed for given channel\nfunc (s *Service) RSSFeed(cinfo ChannelInfo) (string, error) {\n\tentries, err := s.Store.Load(cinfo.ID, s.KeepPerChannel)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to get channel entries\")\n\t}\n\n\tif len(entries) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\titems := []feed.Item{}\n\tfor _, entry := range entries {\n\n\t\tfileURL := s.RootURL + \"\/\" + path.Base(entry.File)\n\n\t\tvar fileSize int\n\t\tif fileInfo, fiErr := os.Stat(entry.File); fiErr != nil {\n\t\t\tlog.Printf(\"[WARN] failed to get file size for %s: %v\", entry.File, fiErr)\n\t\t} else {\n\t\t\tfileSize = int(fileInfo.Size())\n\t\t}\n\n\t\titems = append(items, feed.Item{\n\t\t\tTitle: entry.Title,\n\t\t\tDescription: entry.Media.Description,\n\t\t\tLink: entry.Link.Href,\n\t\t\tPubDate: entry.Published.Format(time.RFC822Z),\n\t\t\tGUID: entry.ChannelID + \"::\" + entry.VideoID,\n\t\t\tAuthor: entry.Author.Name,\n\t\t\tEnclosure: feed.Enclosure{\n\t\t\t\tURL: fileURL,\n\t\t\t\tType: \"audio\/mpeg\",\n\t\t\t\tLength: fileSize,\n\t\t\t},\n\t\t\tDT: time.Now(),\n\t\t})\n\t}\n\n\trss := feed.Rss2{\n\t\tVersion: \"2.0\",\n\t\tItemList: items,\n\t\tTitle: entries[0].Author.Name,\n\t\tDescription: \"generated by feed-master\",\n\t\tLink: entries[0].Author.URI,\n\t\tPubDate: items[0].PubDate,\n\t\tLastBuildDate: time.Now().Format(time.RFC822Z),\n\t}\n\n\tb, err := xml.MarshalIndent(&rss, \"\", \" \")\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to marshal rss\")\n\t}\n\n\treturn string(b), nil\n}\n\nfunc (s *Service) procChannels(ctx context.Context) error {\n\tfor _, chanInfo := range s.Channels {\n\t\tentries, err := s.ChannelService.Get(ctx, chanInfo.ID)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[WARN] failed to get channel entries for %s: %s\", chanInfo.ID, err)\n\t\t\tcontinue\n\t\t}\n\t\tlog.Printf(\"[INFO] got %d entries for %s, limit to %d\", len(entries), chanInfo.Name, s.KeepPerChannel)\n\t\tchanged := false\n\t\tfor i, entry := range entries {\n\t\t\tif i >= s.KeepPerChannel {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\texists, exErr := s.Store.Exist(entry)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrapf(exErr, \"failed to check if entry %s exists\", entry.VideoID)\n\t\t\t}\n\t\t\tif exists {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Printf(\"[INFO] new entry %s, %s, %s\", entry.VideoID, entry.Title, chanInfo.Name)\n\t\t\tfile, downErr := s.Downloader.Get(ctx, entry.VideoID, uuid.New().String())\n\t\t\tif downErr != nil {\n\t\t\t\tlog.Printf(\"[WARN] failed to download %s: %s\", entry.VideoID, downErr)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Printf(\"[DEBUG] downloaded %s (%s) to %s, channel: %+v\", entry.VideoID, entry.Title, file, chanInfo)\n\t\t\tentry.File = file\n\t\t\tok, saveErr := s.Store.Save(entry)\n\t\t\tif saveErr != nil {\n\t\t\t\treturn errors.Wrapf(saveErr, \"failed to save entry %+v\", entry)\n\t\t\t}\n\t\t\tif !ok {\n\t\t\t\tlog.Printf(\"[WARN] attempt to save dup entry %+v\", entry)\n\t\t\t}\n\t\t\tchanged = true\n\t\t\tlog.Printf(\"[INFO] saved %s (%s) to %s, channel: %+v\", entry.VideoID, entry.Title, file, chanInfo)\n\t\t}\n\n\t\tif changed { \/\/ save rss feed to fs if there are new entries\n\t\t\trss, rssErr := s.RSSFeed(chanInfo)\n\t\t\tif rssErr != nil {\n\t\t\t\tlog.Printf(\"[WARN] failed to generate rss for %s: %s\", chanInfo.Name, rssErr)\n\t\t\t} else {\n\t\t\t\tif err := s.RSSFileStore.Save(chanInfo.ID, rss); err != nil {\n\t\t\t\t\tlog.Printf(\"[WARN] failed to save rss for %s: %s\", chanInfo.Name, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ remove old entries and files\n\t\tfiles, rmErr := s.Store.RemoveOld(chanInfo.ID, s.KeepPerChannel+1)\n\t\tif rmErr != nil {\n\t\t\treturn errors.Wrapf(rmErr, \"failed to remove old meta data for %s\", chanInfo.ID)\n\t\t}\n\t\tfor _, f := range files {\n\t\t\tif e := os.Remove(f); e != nil {\n\t\t\t\tlog.Printf(\"[WARN] failed to remove file %s: %s\", f, e)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlog.Printf(\"[INFO] removed %s for %s (%s)\", f, chanInfo.ID, chanInfo.Name)\n\t\t}\n\t}\n\tlog.Printf(\"[DEBUG] processed channels completed, total %d\", len(s.Channels))\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>added some simple rest server<commit_after><|endoftext|>"} {"text":"<commit_before>package db\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/chaosxu\/nerv\/lib\/model\"\n)\n\n\/\/ Define callbacks for deleting,support cascade deleting\nfunc init() {\n\tgorm.DefaultCallback.Delete().Replace(\"gorm:begin_transaction\", beginTransactionCallback)\n\tgorm.DefaultCallback.Delete().Replace(\"gorm:before_delete\", beforeDeleteCallback)\n\tgorm.DefaultCallback.Delete().After(\"gorm:before_delete\").Replace(\"chaosxu:before_delete_associations\", beforeDeleteAssociationsCallback)\n\tgorm.DefaultCallback.Delete().Replace(\"gorm:delete\", deleteCallback)\n\t\/\/gorm.DefaultCallback.Delete().After(\"gorm:delete\").Replace(\"chaosxu:after_delete_associations\", afterDeleteAssociationsCallback)\n\tgorm.DefaultCallback.Delete().Replace(\"gorm:after_delete\", afterDeleteCallback)\n\tgorm.DefaultCallback.Delete().Replace(\"gorm:commit_or_rollback_transaction\", commitOrRollbackTransactionCallback)\n}\n\nfunc beginTransactionCallback(scope *gorm.Scope) {\n\tscope.Begin()\n}\n\n\/\/ beforeDeleteCallback will invoke `BeforeDelete` method before deleting\nfunc beforeDeleteCallback(scope *gorm.Scope) {\n\tif !scope.HasError() {\n\t\tscope.CallMethod(\"BeforeDelete\")\n\t}\n}\n\n\/\/ deleteCallback used to delete data from database or set deleted_at to current time (when using with soft delete)\nfunc deleteCallback(scope *gorm.Scope) {\n\tlogCodeLine()\n\n\tif !scope.HasError() {\n\t\tvar extraOption string\n\t\tif str, ok := scope.Get(\"gorm:delete_option\"); ok {\n\t\t\textraOption = fmt.Sprint(str)\n\t\t}\n\n\t\tif !scope.Search.Unscoped && scope.HasColumn(\"DeletedAt\") {\n\t\t\tscope.Raw(fmt.Sprintf(\n\t\t\t\t\"UPDATE %v SET deleted_at=%v%v%v\",\n\t\t\t\tscope.QuotedTableName(),\n\t\t\t\tscope.AddToVars(gorm.NowFunc()),\n\t\t\t\taddExtraSpaceIfExist(scope.CombinedConditionSql()),\n\t\t\t\taddExtraSpaceIfExist(extraOption),\n\t\t\t)).Exec()\n\t\t} else {\n\t\t\tscope.Raw(fmt.Sprintf(\n\t\t\t\t\"DELETE FROM %v%v%v\",\n\t\t\t\tscope.QuotedTableName(),\n\t\t\t\taddExtraSpaceIfExist(scope.CombinedConditionSql()),\n\t\t\t\taddExtraSpaceIfExist(extraOption),\n\t\t\t)).Exec()\n\t\t}\n\t}\n}\n\n\/\/ afterDeleteCallback will invoke `AfterDelete` method after deleting\nfunc afterDeleteCallback(scope *gorm.Scope) {\n\tif !scope.HasError() {\n\t\tscope.CallMethod(\"AfterDelete\")\n\t}\n}\n\nfunc commitOrRollbackTransactionCallback(scope *gorm.Scope) {\n\tscope.CommitOrRollback()\n}\n\n\/\/cascade deleting\nfunc beforeDeleteAssociationsCallback(scope *gorm.Scope) {\n\tlogCodeLine()\n\t\/\/TBD config gorm:delete_associations\n\t\/\/if !scope.shouldDeleteAssociations() {\n\t\/\/\treturn\n\t\/\/}\n\tfor _, field := range scope.Fields() {\n\n\t\tif relationship := field.Relationship; relationship != nil && relationship.Kind == \"has_many\" {\n\t\t\t\/\/TBD:Now only support one foreign field and unit type\n\t\t\tforeignValue := scope.IndirectValue().FieldByName(relationship.AssociationForeignFieldNames[0]).Uint()\n\t\t\t\/\/fmt.Println(foreignValue)\n\t\t\tsql := fmt.Sprintf(\"%s = ?\", relationship.ForeignDBNames[0])\n\t\t\t\/\/fmt.Println(sql)\n\t\t\tclass := field.Field.Type().Elem()\n\t\t\tfmt.Println(class.Name())\n\t\t\tif err := DB.Unscoped().Delete(model.Models[class.Name()].Type, sql, foreignValue).Error; err != nil {\n\t\t\t\tscope.Err(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc afterDeleteAssociationsCallback(scope *gorm.Scope) {\n\tlogCodeLine()\n\t\/\/if !scope.shouldSaveAssociations() {\n\t\/\/\treturn\n\t\/\/}\n\t\/\/for _, field := range scope.Fields() {\n\t\/\/\tif scope.changeableField(field) && !field.IsBlank && !field.IsIgnored {\n\t\/\/\t\tif relationship := field.Relationship; relationship != nil &&\n\t\/\/\t\t\t\t(relationship.Kind == \"has_one\" || relationship.Kind == \"has_many\" || relationship.Kind == \"many_to_many\") {\n\t\/\/\t\t\tvalue := field.Field\n\t\/\/\n\t\/\/\t\t\tswitch value.Kind() {\n\t\/\/\t\t\tcase reflect.Slice:\n\t\/\/\t\t\t\tfor i := 0; i < value.Len(); i++ {\n\t\/\/\t\t\t\t\tnewDB := scope.NewDB()\n\t\/\/\t\t\t\t\telem := value.Index(i).Addr().Interface()\n\t\/\/\t\t\t\t\tnewScope := newDB.NewScope(elem)\n\t\/\/\n\t\/\/\t\t\t\t\tif relationship.JoinTableHandler == nil && len(relationship.ForeignFieldNames) != 0 {\n\t\/\/\t\t\t\t\t\tfor idx, fieldName := range relationship.ForeignFieldNames {\n\t\/\/\t\t\t\t\t\t\tassociationForeignName := relationship.AssociationForeignDBNames[idx]\n\t\/\/\t\t\t\t\t\t\tif f, ok := scope.FieldByName(associationForeignName); ok {\n\t\/\/\t\t\t\t\t\t\t\tscope.Err(newScope.SetColumn(fieldName, f.Field.Interface()))\n\t\/\/\t\t\t\t\t\t\t}\n\t\/\/\t\t\t\t\t\t}\n\t\/\/\t\t\t\t\t}\n\t\/\/\n\t\/\/\t\t\t\t\tif relationship.PolymorphicType != \"\" {\n\t\/\/\t\t\t\t\t\tscope.Err(newScope.SetColumn(relationship.PolymorphicType, scope.TableName()))\n\t\/\/\t\t\t\t\t}\n\t\/\/\n\t\/\/\t\t\t\t\tscope.Err(newDB.Save(elem).Error)\n\t\/\/\n\t\/\/\t\t\t\t\tif joinTableHandler := relationship.JoinTableHandler; joinTableHandler != nil {\n\t\/\/\t\t\t\t\t\tscope.Err(joinTableHandler.Add(joinTableHandler, newDB, scope.Value, newScope.Value))\n\t\/\/\t\t\t\t\t}\n\t\/\/\t\t\t\t}\n\t\/\/\t\t\tdefault:\n\t\/\/\t\t\t\telem := value.Addr().Interface()\n\t\/\/\t\t\t\tnewScope := scope.New(elem)\n\t\/\/\t\t\t\tif len(relationship.ForeignFieldNames) != 0 {\n\t\/\/\t\t\t\t\tfor idx, fieldName := range relationship.ForeignFieldNames {\n\t\/\/\t\t\t\t\t\tassociationForeignName := relationship.AssociationForeignDBNames[idx]\n\t\/\/\t\t\t\t\t\tif f, ok := scope.FieldByName(associationForeignName); ok {\n\t\/\/\t\t\t\t\t\t\tscope.Err(newScope.SetColumn(fieldName, f.Field.Interface()))\n\t\/\/\t\t\t\t\t\t}\n\t\/\/\t\t\t\t\t}\n\t\/\/\t\t\t\t}\n\t\/\/\n\t\/\/\t\t\t\tif relationship.PolymorphicType != \"\" {\n\t\/\/\t\t\t\t\tscope.Err(newScope.SetColumn(relationship.PolymorphicType, scope.TableName()))\n\t\/\/\t\t\t\t}\n\t\/\/\t\t\t\tscope.Err(scope.NewDB().Save(elem).Error)\n\t\/\/\t\t\t}\n\t\/\/\t\t}\n\t\/\/\t}\n}\n<commit_msg>Fix delete<commit_after>package db\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/chaosxu\/nerv\/lib\/model\"\n)\n\n\/\/ Define callbacks for deleting,support cascade deleting\nfunc init() {\n\tgorm.DefaultCallback.Delete().Replace(\"gorm:begin_transaction\", beginTransactionCallback)\n\tgorm.DefaultCallback.Delete().Replace(\"gorm:before_delete\", beforeDeleteCallback)\n\tgorm.DefaultCallback.Delete().After(\"gorm:before_delete\").Replace(\"chaosxu:before_delete_associations\", beforeDeleteAssociationsCallback)\n\tgorm.DefaultCallback.Delete().Replace(\"gorm:delete\", deleteCallback)\n\tgorm.DefaultCallback.Delete().After(\"gorm:delete\").Replace(\"chaosxu:after_delete_associations\", afterDeleteAssociationsCallback)\n\tgorm.DefaultCallback.Delete().Replace(\"gorm:after_delete\", afterDeleteCallback)\n\tgorm.DefaultCallback.Delete().Replace(\"gorm:commit_or_rollback_transaction\", commitOrRollbackTransactionCallback)\n}\n\nfunc beginTransactionCallback(scope *gorm.Scope) {\n\tscope.Begin()\n}\n\n\/\/ beforeDeleteCallback will invoke `BeforeDelete` method before deleting\nfunc beforeDeleteCallback(scope *gorm.Scope) {\n\tif !scope.HasError() {\n\t\tscope.CallMethod(\"BeforeDelete\")\n\t}\n}\n\n\/\/ deleteCallback used to delete data from database or set deleted_at to current time (when using with soft delete)\nfunc deleteCallback(scope *gorm.Scope) {\n\tlogCodeLine()\n\n\tif !scope.HasError() {\n\t\tvar extraOption string\n\t\tif str, ok := scope.Get(\"gorm:delete_option\"); ok {\n\t\t\textraOption = fmt.Sprint(str)\n\t\t}\n\n\t\tif !scope.Search.Unscoped && scope.HasColumn(\"DeletedAt\") {\n\t\t\tscope.Raw(fmt.Sprintf(\n\t\t\t\t\"UPDATE %v SET deleted_at=%v%v%v\",\n\t\t\t\tscope.QuotedTableName(),\n\t\t\t\tscope.AddToVars(gorm.NowFunc()),\n\t\t\t\taddExtraSpaceIfExist(scope.CombinedConditionSql()),\n\t\t\t\taddExtraSpaceIfExist(extraOption),\n\t\t\t)).Exec()\n\t\t} else {\n\t\t\tscope.Raw(fmt.Sprintf(\n\t\t\t\t\"DELETE FROM %v%v%v\",\n\t\t\t\tscope.QuotedTableName(),\n\t\t\t\taddExtraSpaceIfExist(scope.CombinedConditionSql()),\n\t\t\t\taddExtraSpaceIfExist(extraOption),\n\t\t\t)).Exec()\n\t\t}\n\t}\n}\n\n\/\/ afterDeleteCallback will invoke `AfterDelete` method after deleting\nfunc afterDeleteCallback(scope *gorm.Scope) {\n\tif !scope.HasError() {\n\t\tscope.CallMethod(\"AfterDelete\")\n\t}\n}\n\nfunc commitOrRollbackTransactionCallback(scope *gorm.Scope) {\n\tscope.CommitOrRollback()\n}\n\n\/\/cascade deleting\nfunc beforeDeleteAssociationsCallback(scope *gorm.Scope) {\n\tlogCodeLine()\n\t\/\/TBD config gorm:delete_associations\n\t\/\/if !scope.shouldDeleteAssociations() {\n\t\/\/\treturn\n\t\/\/}\n\tfor _, field := range scope.Fields() {\n\n\t\tif relationship := field.Relationship; relationship != nil && relationship.Kind == \"has_many\" {\n\t\t\t\/\/TBD:Now only support one foreign field and unit type\n\t\t\tforeignValue := scope.IndirectValue().FieldByName(relationship.AssociationForeignFieldNames[0]).Uint()\n\t\t\t\/\/fmt.Println(foreignValue)\n\t\t\tsql := fmt.Sprintf(\"%s = ?\", relationship.ForeignDBNames[0])\n\t\t\t\/\/fmt.Println(sql)\n\t\t\tclass := field.Field.Type().Elem()\n\t\t\tfmt.Println(class.Name())\n\t\t\tif err := DB.Unscoped().Delete(model.Models[class.Name()].Type, sql, foreignValue).Error; err != nil {\n\t\t\t\tscope.Err(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc afterDeleteAssociationsCallback(scope *gorm.Scope) {\n\tlogCodeLine()\n\t\/\/if !scope.shouldSaveAssociations() {\n\t\/\/\treturn\n\t\/\/}\n\t\/\/for _, field := range scope.Fields() {\n\t\/\/\tif scope.changeableField(field) && !field.IsBlank && !field.IsIgnored {\n\t\/\/\t\tif relationship := field.Relationship; relationship != nil &&\n\t\/\/\t\t\t\t(relationship.Kind == \"has_one\" || relationship.Kind == \"has_many\" || relationship.Kind == \"many_to_many\") {\n\t\/\/\t\t\tvalue := field.Field\n\t\/\/\n\t\/\/\t\t\tswitch value.Kind() {\n\t\/\/\t\t\tcase reflect.Slice:\n\t\/\/\t\t\t\tfor i := 0; i < value.Len(); i++ {\n\t\/\/\t\t\t\t\tnewDB := scope.NewDB()\n\t\/\/\t\t\t\t\telem := value.Index(i).Addr().Interface()\n\t\/\/\t\t\t\t\tnewScope := newDB.NewScope(elem)\n\t\/\/\n\t\/\/\t\t\t\t\tif relationship.JoinTableHandler == nil && len(relationship.ForeignFieldNames) != 0 {\n\t\/\/\t\t\t\t\t\tfor idx, fieldName := range relationship.ForeignFieldNames {\n\t\/\/\t\t\t\t\t\t\tassociationForeignName := relationship.AssociationForeignDBNames[idx]\n\t\/\/\t\t\t\t\t\t\tif f, ok := scope.FieldByName(associationForeignName); ok {\n\t\/\/\t\t\t\t\t\t\t\tscope.Err(newScope.SetColumn(fieldName, f.Field.Interface()))\n\t\/\/\t\t\t\t\t\t\t}\n\t\/\/\t\t\t\t\t\t}\n\t\/\/\t\t\t\t\t}\n\t\/\/\n\t\/\/\t\t\t\t\tif relationship.PolymorphicType != \"\" {\n\t\/\/\t\t\t\t\t\tscope.Err(newScope.SetColumn(relationship.PolymorphicType, scope.TableName()))\n\t\/\/\t\t\t\t\t}\n\t\/\/\n\t\/\/\t\t\t\t\tscope.Err(newDB.Save(elem).Error)\n\t\/\/\n\t\/\/\t\t\t\t\tif joinTableHandler := relationship.JoinTableHandler; joinTableHandler != nil {\n\t\/\/\t\t\t\t\t\tscope.Err(joinTableHandler.Add(joinTableHandler, newDB, scope.Value, newScope.Value))\n\t\/\/\t\t\t\t\t}\n\t\/\/\t\t\t\t}\n\t\/\/\t\t\tdefault:\n\t\/\/\t\t\t\telem := value.Addr().Interface()\n\t\/\/\t\t\t\tnewScope := scope.New(elem)\n\t\/\/\t\t\t\tif len(relationship.ForeignFieldNames) != 0 {\n\t\/\/\t\t\t\t\tfor idx, fieldName := range relationship.ForeignFieldNames {\n\t\/\/\t\t\t\t\t\tassociationForeignName := relationship.AssociationForeignDBNames[idx]\n\t\/\/\t\t\t\t\t\tif f, ok := scope.FieldByName(associationForeignName); ok {\n\t\/\/\t\t\t\t\t\t\tscope.Err(newScope.SetColumn(fieldName, f.Field.Interface()))\n\t\/\/\t\t\t\t\t\t}\n\t\/\/\t\t\t\t\t}\n\t\/\/\t\t\t\t}\n\t\/\/\n\t\/\/\t\t\t\tif relationship.PolymorphicType != \"\" {\n\t\/\/\t\t\t\t\tscope.Err(newScope.SetColumn(relationship.PolymorphicType, scope.TableName()))\n\t\/\/\t\t\t\t}\n\t\/\/\t\t\t\tscope.Err(scope.NewDB().Save(elem).Error)\n\t\/\/\t\t\t}\n\t\/\/\t\t}\n\t\/\/\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Factom Foundation\n\/\/ Use of this source code is governed by the MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\/\/\t\"crypto\/sha256\"\n\t\/\/\t\"encoding\/hex\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/FactomProject\/cli\"\n\t\"github.com\/FactomProject\/factom\"\n)\n\nvar get = func() *fctCmd {\n\tcmd := new(fctCmd)\n\tcmd.helpMsg = \"factom-cli get head|dblock|height|chain|eblock|entry|firstentry\"\n\tcmd.description = \"get Block or Entry data from factomd\"\n\tcmd.execFunc = func(args []string) {\n\t\tos.Args = args\n\t\tflag.Parse()\n\t\targs = flag.Args()\n\n\t\tc := cli.New()\n\t\tc.Handle(\"head\", getHead)\n\t\tc.Handle(\"height\", getHeight)\n\t\tc.Handle(\"dblock\", getDBlock)\n\t\tc.Handle(\"chainhead\", getChainHead)\n\t\tc.Handle(\"eblock\", getEBlock)\n\t\tc.Handle(\"entry\", getEntry)\n\t\tc.Handle(\"firstentry\", getFirstEntry)\n\t\tc.HandleDefaultFunc(func(args []string) {\n\t\t\tfmt.Println(cmd.helpMsg)\n\t\t})\n\t\tc.Execute(args)\n\t}\n\thelp.Add(\"get\", cmd)\n\treturn cmd\n}()\n\nvar getHead = func() *fctCmd {\n\tcmd := new(fctCmd)\n\tcmd.helpMsg = \"factom-cli get head\"\n\tcmd.description = \"Get the keymr of the last completed directory block\"\n\tcmd.execFunc = func(args []string) {\n\t\thead, err := factom.GetDBlockHead()\n\t\tif err != nil {\n\t\t\terrorln(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(head.KeyMR)\n\t}\n\thelp.Add(\"get head\", cmd)\n\treturn cmd\n}()\n\nvar getHeight = func() *fctCmd {\n\tcmd := new(fctCmd)\n\tcmd.helpMsg = \"factom-cli get height\"\n\tcmd.description = \"Get the current directory block height\"\n\tcmd.execFunc = func(args []string) {\n\t\theight, err := factom.GetDBlockHeight()\n\t\tif err != nil {\n\t\t\terrorln(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(height)\n\t}\n\thelp.Add(\"get height\", cmd)\n\treturn cmd\n}()\n\nvar getDBlock = func() *fctCmd {\n\tcmd := new(fctCmd)\n\tcmd.helpMsg = \"factom-cli get dblock KEYMR\"\n\tcmd.description = \"Get dblock contents by merkle root\"\n\tcmd.execFunc = func(args []string) {\n\t\tos.Args = args\n\t\tflag.Parse()\n\t\targs = flag.Args()\n\t\tif len(args) < 1 {\n\t\t\tfmt.Println(cmd.helpMsg)\n\t\t\treturn\n\t\t}\n\n\t\tkeymr := args[0]\n\t\tdblock, err := factom.GetDBlock(keymr)\n\t\tif err != nil {\n\t\t\terrorln(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(dblock)\n\t}\n\thelp.Add(\"get dblock\", cmd)\n\treturn cmd\n}()\n\nvar getChainHead = func() *fctCmd {\n\tcmd := new(fctCmd)\n\tcmd.helpMsg = \"factom-cli get chainhead CHAINID\"\n\tcmd.description = \"Get ebhead by chainid\"\n\tcmd.execFunc = func(args []string) {\n\t\tos.Args = args\n\t\tflag.Parse()\n\t\targs = flag.Args()\n\t\tif len(args) < 1 {\n\t\t\tfmt.Println(cmd.helpMsg)\n\t\t\treturn\n\t\t}\n\n\t\tchainid := args[0]\n\t\tchain, err := factom.GetChainHead(chainid)\n\t\tif err != nil {\n\t\t\terrorln(err)\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Println(chain.ChainHead)\n\t}\n\thelp.Add(\"get chainhead\", cmd)\n\treturn cmd\n}()\n\nvar getEBlock = func() *fctCmd {\n\tcmd := new(fctCmd)\n\tcmd.helpMsg = \"factom-cli get eblock KEYMR\"\n\tcmd.description = \"Get eblock by merkle root\"\n\tcmd.execFunc = func(args []string) {\n\t\tos.Args = args\n\t\tflag.Parse()\n\t\targs = flag.Args()\n\t\tif len(args) < 1 {\n\t\t\tfmt.Println(cmd.helpMsg)\n\t\t\treturn\n\t\t}\n\n\t\tkeymr := args[0]\n\t\teblock, err := factom.GetEBlock(keymr)\n\t\tif err != nil {\n\t\t\terrorln(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(eblock)\n\t}\n\thelp.Add(\"get eblock\", cmd)\n\treturn cmd\n}()\n\nvar getEntry = func() *fctCmd {\n\tcmd := new(fctCmd)\n\tcmd.helpMsg = \"factom-cli get entry HASH\"\n\tcmd.description = \"Get entry by hash\"\n\tcmd.execFunc = func(args []string) {\n\t\tos.Args = args\n\t\tflag.Parse()\n\t\targs = flag.Args()\n\t\tif len(args) < 1 {\n\t\t\tfmt.Println(cmd.helpMsg)\n\t\t\treturn\n\t\t}\n\n\t\thash := args[0]\n\t\tentry, err := factom.GetEntry(hash)\n\t\tif err != nil {\n\t\t\terrorln(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(entry)\n\t}\n\thelp.Add(\"get entry\", cmd)\n\treturn cmd\n}()\n\nvar getFirstEntry = func() *fctCmd {\n\tcmd := new(fctCmd)\n\tcmd.helpMsg = \"factom-cli get firstentry CHAINID\"\n\tcmd.description = \"Get the first entry from a chain\"\n\tcmd.execFunc = func(args []string) {\n\t\tos.Args = args\n\t\tflag.Parse()\n\t\targs = flag.Args()\n\t\tif len(args) < 1 {\n\t\t\tfmt.Println(cmd.helpMsg)\n\t\t\treturn\n\t\t}\n\n\t\tchainid := args[0]\n\t\tentry, err := factom.GetFirstEntry(chainid)\n\t\tif err != nil {\n\t\t\terrorln(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(entry)\n\t}\n\thelp.Add(\"get firstentry\", cmd)\n\treturn cmd\n}()\n<commit_msg>help msg for get<commit_after>\/\/ Copyright 2016 Factom Foundation\n\/\/ Use of this source code is governed by the MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\/\/\t\"crypto\/sha256\"\n\t\/\/\t\"encoding\/hex\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/FactomProject\/cli\"\n\t\"github.com\/FactomProject\/factom\"\n)\n\nvar get = func() *fctCmd {\n\tcmd := new(fctCmd)\n\tcmd.helpMsg = \"factom-cli get head|dblock|height|chainhead|eblock|entry|firstentry\"\n\tcmd.description = \"get Block or Entry data from factomd\"\n\tcmd.execFunc = func(args []string) {\n\t\tos.Args = args\n\t\tflag.Parse()\n\t\targs = flag.Args()\n\n\t\tc := cli.New()\n\t\tc.Handle(\"head\", getHead)\n\t\tc.Handle(\"height\", getHeight)\n\t\tc.Handle(\"dblock\", getDBlock)\n\t\tc.Handle(\"chainhead\", getChainHead)\n\t\tc.Handle(\"eblock\", getEBlock)\n\t\tc.Handle(\"entry\", getEntry)\n\t\tc.Handle(\"firstentry\", getFirstEntry)\n\t\tc.HandleDefaultFunc(func(args []string) {\n\t\t\tfmt.Println(cmd.helpMsg)\n\t\t})\n\t\tc.Execute(args)\n\t}\n\thelp.Add(\"get\", cmd)\n\treturn cmd\n}()\n\nvar getHead = func() *fctCmd {\n\tcmd := new(fctCmd)\n\tcmd.helpMsg = \"factom-cli get head\"\n\tcmd.description = \"Get the keymr of the last completed directory block\"\n\tcmd.execFunc = func(args []string) {\n\t\thead, err := factom.GetDBlockHead()\n\t\tif err != nil {\n\t\t\terrorln(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(head.KeyMR)\n\t}\n\thelp.Add(\"get head\", cmd)\n\treturn cmd\n}()\n\nvar getHeight = func() *fctCmd {\n\tcmd := new(fctCmd)\n\tcmd.helpMsg = \"factom-cli get height\"\n\tcmd.description = \"Get the current directory block height\"\n\tcmd.execFunc = func(args []string) {\n\t\theight, err := factom.GetDBlockHeight()\n\t\tif err != nil {\n\t\t\terrorln(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(height)\n\t}\n\thelp.Add(\"get height\", cmd)\n\treturn cmd\n}()\n\nvar getDBlock = func() *fctCmd {\n\tcmd := new(fctCmd)\n\tcmd.helpMsg = \"factom-cli get dblock KEYMR\"\n\tcmd.description = \"Get dblock contents by merkle root\"\n\tcmd.execFunc = func(args []string) {\n\t\tos.Args = args\n\t\tflag.Parse()\n\t\targs = flag.Args()\n\t\tif len(args) < 1 {\n\t\t\tfmt.Println(cmd.helpMsg)\n\t\t\treturn\n\t\t}\n\n\t\tkeymr := args[0]\n\t\tdblock, err := factom.GetDBlock(keymr)\n\t\tif err != nil {\n\t\t\terrorln(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(dblock)\n\t}\n\thelp.Add(\"get dblock\", cmd)\n\treturn cmd\n}()\n\nvar getChainHead = func() *fctCmd {\n\tcmd := new(fctCmd)\n\tcmd.helpMsg = \"factom-cli get chainhead CHAINID\"\n\tcmd.description = \"Get ebhead by chainid\"\n\tcmd.execFunc = func(args []string) {\n\t\tos.Args = args\n\t\tflag.Parse()\n\t\targs = flag.Args()\n\t\tif len(args) < 1 {\n\t\t\tfmt.Println(cmd.helpMsg)\n\t\t\treturn\n\t\t}\n\n\t\tchainid := args[0]\n\t\tchain, err := factom.GetChainHead(chainid)\n\t\tif err != nil {\n\t\t\terrorln(err)\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Println(chain.ChainHead)\n\t}\n\thelp.Add(\"get chainhead\", cmd)\n\treturn cmd\n}()\n\nvar getEBlock = func() *fctCmd {\n\tcmd := new(fctCmd)\n\tcmd.helpMsg = \"factom-cli get eblock KEYMR\"\n\tcmd.description = \"Get eblock by merkle root\"\n\tcmd.execFunc = func(args []string) {\n\t\tos.Args = args\n\t\tflag.Parse()\n\t\targs = flag.Args()\n\t\tif len(args) < 1 {\n\t\t\tfmt.Println(cmd.helpMsg)\n\t\t\treturn\n\t\t}\n\n\t\tkeymr := args[0]\n\t\teblock, err := factom.GetEBlock(keymr)\n\t\tif err != nil {\n\t\t\terrorln(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(eblock)\n\t}\n\thelp.Add(\"get eblock\", cmd)\n\treturn cmd\n}()\n\nvar getEntry = func() *fctCmd {\n\tcmd := new(fctCmd)\n\tcmd.helpMsg = \"factom-cli get entry HASH\"\n\tcmd.description = \"Get entry by hash\"\n\tcmd.execFunc = func(args []string) {\n\t\tos.Args = args\n\t\tflag.Parse()\n\t\targs = flag.Args()\n\t\tif len(args) < 1 {\n\t\t\tfmt.Println(cmd.helpMsg)\n\t\t\treturn\n\t\t}\n\n\t\thash := args[0]\n\t\tentry, err := factom.GetEntry(hash)\n\t\tif err != nil {\n\t\t\terrorln(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(entry)\n\t}\n\thelp.Add(\"get entry\", cmd)\n\treturn cmd\n}()\n\nvar getFirstEntry = func() *fctCmd {\n\tcmd := new(fctCmd)\n\tcmd.helpMsg = \"factom-cli get firstentry CHAINID\"\n\tcmd.description = \"Get the first entry from a chain\"\n\tcmd.execFunc = func(args []string) {\n\t\tos.Args = args\n\t\tflag.Parse()\n\t\targs = flag.Args()\n\t\tif len(args) < 1 {\n\t\t\tfmt.Println(cmd.helpMsg)\n\t\t\treturn\n\t\t}\n\n\t\tchainid := args[0]\n\t\tentry, err := factom.GetFirstEntry(chainid)\n\t\tif err != nil {\n\t\t\terrorln(err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(entry)\n\t}\n\thelp.Add(\"get firstentry\", cmd)\n\treturn cmd\n}()\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The etcd-operator Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage v1beta2\n\nimport metav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\nconst (\n\tBackupStorageTypeS3 BackupStorageType = \"S3\"\n\n\tAWSSecretCredentialsFileName = \"credentials\"\n\tAWSSecretConfigFileName = \"config\"\n)\n\ntype BackupStorageType string\n\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\n\/\/ EtcdBackupList is a list of EtcdBackup.\ntype EtcdBackupList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata\"`\n\tItems []EtcdBackup `json:\"items\"`\n}\n\n\/\/ +genclient\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\n\/\/ EtcdBackup represents a Kubernetes EtcdBackup Custom Resource.\ntype EtcdBackup struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ObjectMeta `json:\"metadata\"`\n\tSpec BackupSpec `json:\"spec\"`\n\tStatus BackupCRStatus `json:\"status,omitempty\"`\n}\n\n\/\/ BackupSpec contains a backup specification for an etcd cluster.\ntype BackupSpec struct {\n\t\/\/ EtcdEndpoints specifies the endpoints of an etcd cluster.\n\t\/\/ When multiple endpoints are given, the backup operator retrieves\n\t\/\/ the backup from the endpoint that has the most up-to-date state.\n\t\/\/ The given endpoints must belong to the same etcd cluster.\n\tEtcdEndpoints []string `json:\"etcdEndpoints,omitempty\"`\n\t\/\/ StorageType is the etcd backup storage type.\n\t\/\/ We need this field because CRD doesn't support validation against invalid fields\n\t\/\/ and we cannot verify invalid backup storage source.\n\tStorageType BackupStorageType `json:\"storageType\"`\n\t\/\/ BackupSource is the backup storage source.\n\tBackupSource `json:\",inline\"`\n\t\/\/ ClientTLSSecret is the secret containing the etcd TLS client certs and\n\t\/\/ must contain the following data items:\n\t\/\/ data:\n\t\/\/ \"etcd-client.crt\": <pem-encoded-cert>\n\t\/\/ \"etcd-client.key\": <pem-encoded-key>\n\t\/\/ \"etcd-client-ca.crt\": <pem-encoded-ca-cert>\n\tClientTLSSecret string `json:\"clientTLSSecret,omitempty\"`\n}\n\n\/\/ BackupSource contains the supported backup sources.\ntype BackupSource struct {\n\t\/\/ S3 defines the S3 backup source spec.\n\tS3 *S3BackupSource `json:\"s3,omitempty\"`\n}\n\n\/\/ BackupCRStatus represents the status of the EtcdBackup Custom Resource.\ntype BackupCRStatus struct {\n\t\/\/ Succeeded indicates if the backup has Succeeded.\n\tSucceeded bool `json:\"succeeded\"`\n\t\/\/ Reason indicates the reason for any backup related failures.\n\tReason string `json:\"Reason,omitempty\"`\n}\n\n\/\/ S3BackupSource provides the spec how to store backups on S3.\ntype S3BackupSource struct {\n\t\/\/ Path is the full s3 path where the backup is saved.\n\t\/\/ The format of the path must be: \"<s3-bucket-name>\/<path-to-backup-file>\"\n\t\/\/ e.g: \"mybucket\/etcd.backup\"\n\tPath string `json:\"path\"`\n\n\t\/\/ The name of the secret object that stores the AWS credential and config files.\n\t\/\/ The file name of the credential MUST be 'credentials'.\n\t\/\/ The file name of the config MUST be 'config'.\n\t\/\/ The profile to use in both files will be 'default'.\n\t\/\/\n\t\/\/ AWSSecret overwrites the default etcd operator wide AWS credential and config.\n\tAWSSecret string `json:\"awsSecret\"`\n}\n<commit_msg>apis: add EtcdVersion and EtcdRevision to BackupCRStatus (#1788)<commit_after>\/\/ Copyright 2017 The etcd-operator Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage v1beta2\n\nimport metav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\nconst (\n\tBackupStorageTypeS3 BackupStorageType = \"S3\"\n\n\tAWSSecretCredentialsFileName = \"credentials\"\n\tAWSSecretConfigFileName = \"config\"\n)\n\ntype BackupStorageType string\n\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\n\/\/ EtcdBackupList is a list of EtcdBackup.\ntype EtcdBackupList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata\"`\n\tItems []EtcdBackup `json:\"items\"`\n}\n\n\/\/ +genclient\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\n\/\/ EtcdBackup represents a Kubernetes EtcdBackup Custom Resource.\ntype EtcdBackup struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ObjectMeta `json:\"metadata\"`\n\tSpec BackupSpec `json:\"spec\"`\n\tStatus BackupCRStatus `json:\"status,omitempty\"`\n}\n\n\/\/ BackupSpec contains a backup specification for an etcd cluster.\ntype BackupSpec struct {\n\t\/\/ EtcdEndpoints specifies the endpoints of an etcd cluster.\n\t\/\/ When multiple endpoints are given, the backup operator retrieves\n\t\/\/ the backup from the endpoint that has the most up-to-date state.\n\t\/\/ The given endpoints must belong to the same etcd cluster.\n\tEtcdEndpoints []string `json:\"etcdEndpoints,omitempty\"`\n\t\/\/ StorageType is the etcd backup storage type.\n\t\/\/ We need this field because CRD doesn't support validation against invalid fields\n\t\/\/ and we cannot verify invalid backup storage source.\n\tStorageType BackupStorageType `json:\"storageType\"`\n\t\/\/ BackupSource is the backup storage source.\n\tBackupSource `json:\",inline\"`\n\t\/\/ ClientTLSSecret is the secret containing the etcd TLS client certs and\n\t\/\/ must contain the following data items:\n\t\/\/ data:\n\t\/\/ \"etcd-client.crt\": <pem-encoded-cert>\n\t\/\/ \"etcd-client.key\": <pem-encoded-key>\n\t\/\/ \"etcd-client-ca.crt\": <pem-encoded-ca-cert>\n\tClientTLSSecret string `json:\"clientTLSSecret,omitempty\"`\n}\n\n\/\/ BackupSource contains the supported backup sources.\ntype BackupSource struct {\n\t\/\/ S3 defines the S3 backup source spec.\n\tS3 *S3BackupSource `json:\"s3,omitempty\"`\n}\n\n\/\/ BackupCRStatus represents the status of the EtcdBackup Custom Resource.\ntype BackupCRStatus struct {\n\t\/\/ Succeeded indicates if the backup has Succeeded.\n\tSucceeded bool `json:\"succeeded\"`\n\t\/\/ Reason indicates the reason for any backup related failures.\n\tReason string `json:\"Reason,omitempty\"`\n\t\/\/ EtcdVersion is the version of the backup etcd server.\n\tEtcdVersion string `json:\"etcdVersion,omitempty\"`\n\t\/\/ EtcdRevision is the revision of etcd's KV store where the backup is performed on.\n\tEtcdRevision string `json:\"etcdRevision,omitempty\"`\n}\n\n\/\/ S3BackupSource provides the spec how to store backups on S3.\ntype S3BackupSource struct {\n\t\/\/ Path is the full s3 path where the backup is saved.\n\t\/\/ The format of the path must be: \"<s3-bucket-name>\/<path-to-backup-file>\"\n\t\/\/ e.g: \"mybucket\/etcd.backup\"\n\tPath string `json:\"path\"`\n\n\t\/\/ The name of the secret object that stores the AWS credential and config files.\n\t\/\/ The file name of the credential MUST be 'credentials'.\n\t\/\/ The file name of the config MUST be 'config'.\n\t\/\/ The profile to use in both files will be 'default'.\n\t\/\/\n\t\/\/ AWSSecret overwrites the default etcd operator wide AWS credential and config.\n\tAWSSecret string `json:\"awsSecret\"`\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n\tThis is the server package.\n\tThe purpouse of this package is to map a connection to each player(who is online) so we have a comunication chanel.\n\n*\/\npackage server\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/fzzy\/sockjs-go\/sockjs\"\n\t\"log\"\n\t\"net\/http\"\n\t\"warcluster\/db_manager\"\n\t\"warcluster\/entities\"\n)\n\nvar HOST string \/\/Server scope constant that keeps the server host address.\nvar PORT int \/\/Server scope constant that keeps the server port number.\nvar IS_RUNNING bool \/\/Server scope variable that represents the is active flag.\n\nvar sessions *sockjs.SessionPool = sockjs.NewSessionPool() \/\/This is the SockJs sessions pull (a list of all the currently active client's sessions).\n\n\/*This function goes trough all the procedurs needed for the werver to be initialized.\n1.Create an empty connections pool\n2.Starts the listening foe messages loop.*\/\nfunc Start(host string, port int) error {\n\tlog.Print(\"Server is starting...\")\n\tif IS_RUNNING {\n\t\treturn errors.New(\"Server is already started!\")\n\t} else {\n\t\tHOST = host\n\t\tPORT = port\n\t\tIS_RUNNING = true\n\t}\n\tlog.Println(\"Server is up and running!\")\n\tmux := sockjs.NewServeMux(http.DefaultServeMux)\n\tconf := sockjs.NewConfig()\n\n\thttp.HandleFunc(\"\/console\", staticHandler)\n\thttp.Handle(\"\/static\", http.FileServer(http.Dir(\".\/static\")))\n\tmux.Handle(\"\/universe\", handler, conf)\n\n\tif err := http.ListenAndServe(fmt.Sprintf(\"%v:%v\", HOST, PORT), mux); err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\treturn Stop()\n}\n\n\/\/Die biatch and get the fuck out.\nfunc Stop() error {\n\tlog.Println(\"Server is shutting down...\")\n\tif !IS_RUNNING {\n\t\terr := errors.New(\"Server is already stopped!\")\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\n\tIS_RUNNING = false\n\tlog.Println(\"Server has stopped.\")\n\treturn nil\n}\n\n\/\/Stop + Start = Restart\nfunc Restart() {\n\tStop()\n\tStart(HOST, PORT)\n}\n\n\/\/Returns the HTML page needed to display the debug page (server \"chat\" window).\nfunc staticHandler(w http.ResponseWriter, r *http.Request) {\n\thttp.ServeFile(w, r, \".\/static\/index.html\")\n}\n\n\/*This function is called from the message handler to parse the first message for every new connection.\nIt check for existing user in the DB and logs him if the password is correct.\nIf the user is new he is initiated and a new home planet nad solar system are generated.*\/\nfunc login(session sockjs.Session) (*Client, error) {\n\tnickname, player := authenticate(session)\n\n\tclient := &Client{\n\t\tSession: session,\n\t\tNickname: nickname,\n\t\tPlayer: player,\n\t}\n\n\thome_planet_entity, _ := db_manager.GetEntity(client.Player.HomePlanet)\n\thome_planet := home_planet_entity.(entities.Planet)\n\tsession.Send([]byte(fmt.Sprintf(\"{\\\"Command\\\": \\\"login_success\\\", \\\"Username\\\": \\\"%s\\\", \\\"Position\\\": [%d, %d] }\",\n\t\tclient.Nickname, home_planet.GetCoords()[0], home_planet.GetCoords()[1])))\n\treturn client, nil\n}\n\n\/*On the first rescived message from each connection the server will call the handler.\nSo it can complete the following actions:\n1.Adding a new session to the session pool.\n2.Call the login func to validate the connection\n3.If the connection is valid enters \"while true\" state and uses ParseRequest to parse the requests. Shocking right?!?!*\/\nfunc handler(session sockjs.Session) {\n\tsessions.Add(session)\n\tdefer sessions.Remove(session)\n\n\tif client, err := login(session); err == nil {\n\t\tfor {\n\t\t\tmessage := session.Receive()\n\t\t\tif message == nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif request, err := UnmarshalRequest(message, client); err == nil {\n\t\t\t\tif action, err := ParseRequest(request); err == nil {\n\t\t\t\t\tif err := action(request); err != nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tlog.Println(\"Error in server.main.handler:\", err.Error())\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Println(\"Error in server.main.handler:\", err.Error())\n\t\t\t}\n\t\t}\n\t} else {\n\t\tsession.End()\n\t}\n}\n<commit_msg>Make parsing requests a little bit more safe<commit_after>\/*\n\tThis is the server package.\n\tThe purpouse of this package is to map a connection to each player(who is online) so we have a comunication chanel.\n\n*\/\npackage server\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/fzzy\/sockjs-go\/sockjs\"\n\t\"log\"\n\t\"net\/http\"\n\t\"warcluster\/db_manager\"\n\t\"warcluster\/entities\"\n)\n\nvar HOST string \/\/Server scope constant that keeps the server host address.\nvar PORT int \/\/Server scope constant that keeps the server port number.\nvar IS_RUNNING bool \/\/Server scope variable that represents the is active flag.\n\nvar sessions *sockjs.SessionPool = sockjs.NewSessionPool() \/\/This is the SockJs sessions pull (a list of all the currently active client's sessions).\n\n\/*This function goes trough all the procedurs needed for the werver to be initialized.\n1.Create an empty connections pool\n2.Starts the listening foe messages loop.*\/\nfunc Start(host string, port int) error {\n\tlog.Print(\"Server is starting...\")\n\tif IS_RUNNING {\n\t\treturn errors.New(\"Server is already started!\")\n\t} else {\n\t\tHOST = host\n\t\tPORT = port\n\t\tIS_RUNNING = true\n\t}\n\tlog.Println(\"Server is up and running!\")\n\tmux := sockjs.NewServeMux(http.DefaultServeMux)\n\tconf := sockjs.NewConfig()\n\n\thttp.HandleFunc(\"\/console\", staticHandler)\n\thttp.Handle(\"\/static\", http.FileServer(http.Dir(\".\/static\")))\n\tmux.Handle(\"\/universe\", handler, conf)\n\n\tif err := http.ListenAndServe(fmt.Sprintf(\"%v:%v\", HOST, PORT), mux); err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\treturn Stop()\n}\n\n\/\/Die biatch and get the fuck out.\nfunc Stop() error {\n\tlog.Println(\"Server is shutting down...\")\n\tif !IS_RUNNING {\n\t\terr := errors.New(\"Server is already stopped!\")\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\n\tIS_RUNNING = false\n\tlog.Println(\"Server has stopped.\")\n\treturn nil\n}\n\n\/\/Stop + Start = Restart\nfunc Restart() {\n\tStop()\n\tStart(HOST, PORT)\n}\n\n\/\/Returns the HTML page needed to display the debug page (server \"chat\" window).\nfunc staticHandler(w http.ResponseWriter, r *http.Request) {\n\thttp.ServeFile(w, r, \".\/static\/index.html\")\n}\n\n\/*This function is called from the message handler to parse the first message for every new connection.\nIt check for existing user in the DB and logs him if the password is correct.\nIf the user is new he is initiated and a new home planet nad solar system are generated.*\/\nfunc login(session sockjs.Session) (*Client, error) {\n\tnickname, player := authenticate(session)\n\n\tclient := &Client{\n\t\tSession: session,\n\t\tNickname: nickname,\n\t\tPlayer: player,\n\t}\n\n\thome_planet_entity, _ := db_manager.GetEntity(client.Player.HomePlanet)\n\thome_planet := home_planet_entity.(entities.Planet)\n\tsession.Send([]byte(fmt.Sprintf(\"{\\\"Command\\\": \\\"login_success\\\", \\\"Username\\\": \\\"%s\\\", \\\"Position\\\": [%d, %d] }\",\n\t\tclient.Nickname, home_planet.GetCoords()[0], home_planet.GetCoords()[1])))\n\treturn client, nil\n}\n\n\/*On the first rescived message from each connection the server will call the handler.\nSo it can complete the following actions:\n1.Adding a new session to the session pool.\n2.Call the login func to validate the connection\n3.If the connection is valid enters \"while true\" state and uses ParseRequest to parse the requests. Shocking right?!?!*\/\nfunc handler(session sockjs.Session) {\n\tsessions.Add(session)\n\tdefer sessions.Remove(session)\n\tdefer func() {\n\t\tif panicked := recover(); panicked != nil {\n\t\t\treturn\n\t\t}\n\t}()\n\n\tif client, err := login(session); err == nil {\n\t\tfor {\n\t\t\tmessage := session.Receive()\n\t\t\tif message == nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif request, err := UnmarshalRequest(message, client); err == nil {\n\t\t\t\tif action, err := ParseRequest(request); err == nil {\n\t\t\t\t\tif err := action(request); err != nil {\n\t\t\t\t\t\tlog.Println(err)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tlog.Println(\"Error in server.main.handler:\", err.Error())\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Println(\"Error in server.main.handler:\", err.Error())\n\t\t\t}\n\t\t}\n\t} else {\n\t\tsession.End()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage namespace\n\nimport (\n\t\"time\"\n\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/client\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/client\/cache\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/controller\/framework\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/fields\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/labels\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/runtime\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/util\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/watch\"\n)\n\n\/\/ NamespaceManager is responsible for performing actions dependent upon a namespace phase\ntype NamespaceManager struct {\n\tcontroller *framework.Controller\n\tStopEverything chan struct{}\n}\n\n\/\/ NewNamespaceManager creates a new NamespaceManager\nfunc NewNamespaceManager(kubeClient client.Interface, resyncPeriod time.Duration) *NamespaceManager {\n\t_, controller := framework.NewInformer(\n\t\t&cache.ListWatch{\n\t\t\tListFunc: func() (runtime.Object, error) {\n\t\t\t\treturn kubeClient.Namespaces().List(labels.Everything(), fields.Everything())\n\t\t\t},\n\t\t\tWatchFunc: func(resourceVersion string) (watch.Interface, error) {\n\t\t\t\treturn kubeClient.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)\n\t\t\t},\n\t\t},\n\t\t&api.Namespace{},\n\t\tresyncPeriod,\n\t\tframework.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\tnamespace := obj.(*api.Namespace)\n\t\t\t\tsyncNamespace(kubeClient, *namespace)\n\t\t\t},\n\t\t\tUpdateFunc: func(oldObj, newObj interface{}) {\n\t\t\t\tnamespace := newObj.(*api.Namespace)\n\t\t\t\tsyncNamespace(kubeClient, *namespace)\n\t\t\t},\n\t\t},\n\t)\n\n\treturn &NamespaceManager{\n\t\tcontroller: controller,\n\t}\n}\n\n\/\/ Run begins observing the system. It starts a goroutine and returns immediately.\nfunc (nm *NamespaceManager) Run() {\n\tif nm.StopEverything == nil {\n\t\tnm.StopEverything = make(chan struct{})\n\t\tgo nm.controller.Run(nm.StopEverything)\n\t}\n}\n\n\/\/ Stop gracefully shutsdown this controller\nfunc (nm *NamespaceManager) Stop() {\n\tif nm.StopEverything != nil {\n\t\tclose(nm.StopEverything)\n\t\tnm.StopEverything = nil\n\t}\n}\n\n\/\/ finalized returns true if the spec.finalizers is empty list\nfunc finalized(namespace api.Namespace) bool {\n\treturn len(namespace.Spec.Finalizers) == 0\n}\n\n\/\/ finalize will finalize the namespace for kubernetes\nfunc finalize(kubeClient client.Interface, namespace api.Namespace) (*api.Namespace, error) {\n\tnamespaceFinalize := api.Namespace{}\n\tnamespaceFinalize.ObjectMeta = namespace.ObjectMeta\n\tnamespaceFinalize.Spec = namespace.Spec\n\tfinalizerSet := util.NewStringSet()\n\tfor i := range namespace.Spec.Finalizers {\n\t\tif namespace.Spec.Finalizers[i] != api.FinalizerKubernetes {\n\t\t\tfinalizerSet.Insert(string(namespace.Spec.Finalizers[i]))\n\t\t}\n\t}\n\tnamespaceFinalize.Spec.Finalizers = make([]api.FinalizerName, 0, len(finalizerSet))\n\tfor _, value := range finalizerSet.List() {\n\t\tnamespaceFinalize.Spec.Finalizers = append(namespaceFinalize.Spec.Finalizers, api.FinalizerName(value))\n\t}\n\treturn kubeClient.Namespaces().Finalize(&namespaceFinalize)\n}\n\n\/\/ deleteAllContent will delete all content known to the system in a namespace\nfunc deleteAllContent(kubeClient client.Interface, namespace string) (err error) {\n\terr = deleteServiceAccounts(kubeClient, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = deleteServices(kubeClient, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = deleteReplicationControllers(kubeClient, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = deletePods(kubeClient, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = deleteSecrets(kubeClient, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = deletePersistentVolumeClaims(kubeClient, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = deleteLimitRanges(kubeClient, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = deleteResourceQuotas(kubeClient, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = deleteEvents(kubeClient, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ syncNamespace makes namespace life-cycle decisions\nfunc syncNamespace(kubeClient client.Interface, namespace api.Namespace) (err error) {\n\tif namespace.DeletionTimestamp == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ if there is a deletion timestamp, and the status is not terminating, then update status\n\tif !namespace.DeletionTimestamp.IsZero() && namespace.Status.Phase != api.NamespaceTerminating {\n\t\tnewNamespace := api.Namespace{}\n\t\tnewNamespace.ObjectMeta = namespace.ObjectMeta\n\t\tnewNamespace.Status = namespace.Status\n\t\tnewNamespace.Status.Phase = api.NamespaceTerminating\n\t\tresult, err := kubeClient.Namespaces().Status(&newNamespace)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ work with the latest copy so we can proceed to clean up right away without another interval\n\t\tnamespace = *result\n\t}\n\n\t\/\/ if the namespace is already finalized, delete it\n\tif finalized(namespace) {\n\t\terr = kubeClient.Namespaces().Delete(namespace.Name)\n\t\treturn err\n\t}\n\n\t\/\/ there may still be content for us to remove\n\terr = deleteAllContent(kubeClient, namespace.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ we have removed content, so mark it finalized by us\n\tresult, err := finalize(kubeClient, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ now check if all finalizers have reported that we delete now\n\tif finalized(*result) {\n\t\terr = kubeClient.Namespaces().Delete(namespace.Name)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc deleteLimitRanges(kubeClient client.Interface, ns string) error {\n\titems, err := kubeClient.LimitRanges(ns).List(labels.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range items.Items {\n\t\terr := kubeClient.LimitRanges(ns).Delete(items.Items[i].Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc deleteResourceQuotas(kubeClient client.Interface, ns string) error {\n\tresourceQuotas, err := kubeClient.ResourceQuotas(ns).List(labels.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range resourceQuotas.Items {\n\t\terr := kubeClient.ResourceQuotas(ns).Delete(resourceQuotas.Items[i].Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc deleteServiceAccounts(kubeClient client.Interface, ns string) error {\n\titems, err := kubeClient.ServiceAccounts(ns).List(labels.Everything(), fields.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range items.Items {\n\t\terr := kubeClient.ServiceAccounts(ns).Delete(items.Items[i].Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc deleteServices(kubeClient client.Interface, ns string) error {\n\titems, err := kubeClient.Services(ns).List(labels.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range items.Items {\n\t\terr := kubeClient.Services(ns).Delete(items.Items[i].Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc deleteReplicationControllers(kubeClient client.Interface, ns string) error {\n\titems, err := kubeClient.ReplicationControllers(ns).List(labels.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range items.Items {\n\t\terr := kubeClient.ReplicationControllers(ns).Delete(items.Items[i].Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc deletePods(kubeClient client.Interface, ns string) error {\n\titems, err := kubeClient.Pods(ns).List(labels.Everything(), fields.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range items.Items {\n\t\terr := kubeClient.Pods(ns).Delete(items.Items[i].Name, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc deleteEvents(kubeClient client.Interface, ns string) error {\n\titems, err := kubeClient.Events(ns).List(labels.Everything(), fields.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range items.Items {\n\t\terr := kubeClient.Events(ns).Delete(items.Items[i].Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc deleteSecrets(kubeClient client.Interface, ns string) error {\n\titems, err := kubeClient.Secrets(ns).List(labels.Everything(), fields.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range items.Items {\n\t\terr := kubeClient.Secrets(ns).Delete(items.Items[i].Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc deletePersistentVolumeClaims(kubeClient client.Interface, ns string) error {\n\titems, err := kubeClient.PersistentVolumeClaims(ns).List(labels.Everything(), fields.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range items.Items {\n\t\terr := kubeClient.PersistentVolumeClaims(ns).Delete(items.Items[i].Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Namespace controller must wait for terminating resources<commit_after>\/*\nCopyright 2015 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage namespace\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/api\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/client\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/client\/cache\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/controller\/framework\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/fields\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/labels\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/runtime\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/util\"\n\t\"github.com\/GoogleCloudPlatform\/kubernetes\/pkg\/watch\"\n)\n\n\/\/ NamespaceManager is responsible for performing actions dependent upon a namespace phase\ntype NamespaceManager struct {\n\tcontroller *framework.Controller\n\tStopEverything chan struct{}\n}\n\n\/\/ NewNamespaceManager creates a new NamespaceManager\nfunc NewNamespaceManager(kubeClient client.Interface, resyncPeriod time.Duration) *NamespaceManager {\n\t_, controller := framework.NewInformer(\n\t\t&cache.ListWatch{\n\t\t\tListFunc: func() (runtime.Object, error) {\n\t\t\t\treturn kubeClient.Namespaces().List(labels.Everything(), fields.Everything())\n\t\t\t},\n\t\t\tWatchFunc: func(resourceVersion string) (watch.Interface, error) {\n\t\t\t\treturn kubeClient.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)\n\t\t\t},\n\t\t},\n\t\t&api.Namespace{},\n\t\tresyncPeriod,\n\t\tframework.ResourceEventHandlerFuncs{\n\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\tnamespace := obj.(*api.Namespace)\n\t\t\t\tsyncNamespace(kubeClient, *namespace)\n\t\t\t},\n\t\t\tUpdateFunc: func(oldObj, newObj interface{}) {\n\t\t\t\tnamespace := newObj.(*api.Namespace)\n\t\t\t\tsyncNamespace(kubeClient, *namespace)\n\t\t\t},\n\t\t},\n\t)\n\n\treturn &NamespaceManager{\n\t\tcontroller: controller,\n\t}\n}\n\n\/\/ Run begins observing the system. It starts a goroutine and returns immediately.\nfunc (nm *NamespaceManager) Run() {\n\tif nm.StopEverything == nil {\n\t\tnm.StopEverything = make(chan struct{})\n\t\tgo nm.controller.Run(nm.StopEverything)\n\t}\n}\n\n\/\/ Stop gracefully shutsdown this controller\nfunc (nm *NamespaceManager) Stop() {\n\tif nm.StopEverything != nil {\n\t\tclose(nm.StopEverything)\n\t\tnm.StopEverything = nil\n\t}\n}\n\n\/\/ finalized returns true if the spec.finalizers is empty list\nfunc finalized(namespace api.Namespace) bool {\n\treturn len(namespace.Spec.Finalizers) == 0\n}\n\n\/\/ finalize will finalize the namespace for kubernetes\nfunc finalize(kubeClient client.Interface, namespace api.Namespace) (*api.Namespace, error) {\n\tnamespaceFinalize := api.Namespace{}\n\tnamespaceFinalize.ObjectMeta = namespace.ObjectMeta\n\tnamespaceFinalize.Spec = namespace.Spec\n\tfinalizerSet := util.NewStringSet()\n\tfor i := range namespace.Spec.Finalizers {\n\t\tif namespace.Spec.Finalizers[i] != api.FinalizerKubernetes {\n\t\t\tfinalizerSet.Insert(string(namespace.Spec.Finalizers[i]))\n\t\t}\n\t}\n\tnamespaceFinalize.Spec.Finalizers = make([]api.FinalizerName, 0, len(finalizerSet))\n\tfor _, value := range finalizerSet.List() {\n\t\tnamespaceFinalize.Spec.Finalizers = append(namespaceFinalize.Spec.Finalizers, api.FinalizerName(value))\n\t}\n\treturn kubeClient.Namespaces().Finalize(&namespaceFinalize)\n}\n\n\/\/ deleteAllContent will delete all content known to the system in a namespace\nfunc deleteAllContent(kubeClient client.Interface, namespace string) (err error) {\n\terr = deleteServiceAccounts(kubeClient, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = deleteServices(kubeClient, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = deleteReplicationControllers(kubeClient, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\testimate, err := deletePods(kubeClient, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = deleteSecrets(kubeClient, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = deletePersistentVolumeClaims(kubeClient, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = deleteLimitRanges(kubeClient, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = deleteResourceQuotas(kubeClient, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = deleteEvents(kubeClient, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif estimate > 0 {\n\t\treturn fmt.Errorf(\"some resources are being gracefully deleted, estimate %d seconds\", estimate)\n\t}\n\treturn nil\n}\n\n\/\/ syncNamespace makes namespace life-cycle decisions\nfunc syncNamespace(kubeClient client.Interface, namespace api.Namespace) (err error) {\n\tif namespace.DeletionTimestamp == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ if there is a deletion timestamp, and the status is not terminating, then update status\n\tif !namespace.DeletionTimestamp.IsZero() && namespace.Status.Phase != api.NamespaceTerminating {\n\t\tnewNamespace := api.Namespace{}\n\t\tnewNamespace.ObjectMeta = namespace.ObjectMeta\n\t\tnewNamespace.Status = namespace.Status\n\t\tnewNamespace.Status.Phase = api.NamespaceTerminating\n\t\tresult, err := kubeClient.Namespaces().Status(&newNamespace)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ work with the latest copy so we can proceed to clean up right away without another interval\n\t\tnamespace = *result\n\t}\n\n\t\/\/ if the namespace is already finalized, delete it\n\tif finalized(namespace) {\n\t\terr = kubeClient.Namespaces().Delete(namespace.Name)\n\t\treturn err\n\t}\n\n\t\/\/ there may still be content for us to remove\n\terr = deleteAllContent(kubeClient, namespace.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ we have removed content, so mark it finalized by us\n\tresult, err := finalize(kubeClient, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ now check if all finalizers have reported that we delete now\n\tif finalized(*result) {\n\t\terr = kubeClient.Namespaces().Delete(namespace.Name)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc deleteLimitRanges(kubeClient client.Interface, ns string) error {\n\titems, err := kubeClient.LimitRanges(ns).List(labels.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range items.Items {\n\t\terr := kubeClient.LimitRanges(ns).Delete(items.Items[i].Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc deleteResourceQuotas(kubeClient client.Interface, ns string) error {\n\tresourceQuotas, err := kubeClient.ResourceQuotas(ns).List(labels.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range resourceQuotas.Items {\n\t\terr := kubeClient.ResourceQuotas(ns).Delete(resourceQuotas.Items[i].Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc deleteServiceAccounts(kubeClient client.Interface, ns string) error {\n\titems, err := kubeClient.ServiceAccounts(ns).List(labels.Everything(), fields.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range items.Items {\n\t\terr := kubeClient.ServiceAccounts(ns).Delete(items.Items[i].Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc deleteServices(kubeClient client.Interface, ns string) error {\n\titems, err := kubeClient.Services(ns).List(labels.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range items.Items {\n\t\terr := kubeClient.Services(ns).Delete(items.Items[i].Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc deleteReplicationControllers(kubeClient client.Interface, ns string) error {\n\titems, err := kubeClient.ReplicationControllers(ns).List(labels.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range items.Items {\n\t\terr := kubeClient.ReplicationControllers(ns).Delete(items.Items[i].Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc deletePods(kubeClient client.Interface, ns string) (int64, error) {\n\titems, err := kubeClient.Pods(ns).List(labels.Everything(), fields.Everything())\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\testimate := int64(0)\n\tfor i := range items.Items {\n\t\tif items.Items[i].Spec.TerminationGracePeriodSeconds != nil {\n\t\t\tgrace := *items.Items[i].Spec.TerminationGracePeriodSeconds\n\t\t\tif grace > estimate {\n\t\t\t\testimate = grace\n\t\t\t}\n\t\t}\n\t\terr := kubeClient.Pods(ns).Delete(items.Items[i].Name, nil)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\treturn estimate, nil\n}\n\nfunc deleteEvents(kubeClient client.Interface, ns string) error {\n\titems, err := kubeClient.Events(ns).List(labels.Everything(), fields.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range items.Items {\n\t\terr := kubeClient.Events(ns).Delete(items.Items[i].Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc deleteSecrets(kubeClient client.Interface, ns string) error {\n\titems, err := kubeClient.Secrets(ns).List(labels.Everything(), fields.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range items.Items {\n\t\terr := kubeClient.Secrets(ns).Delete(items.Items[i].Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc deletePersistentVolumeClaims(kubeClient client.Interface, ns string) error {\n\titems, err := kubeClient.PersistentVolumeClaims(ns).List(labels.Everything(), fields.Everything())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := range items.Items {\n\t\terr := kubeClient.PersistentVolumeClaims(ns).Delete(items.Items[i].Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage etcd\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/coreos\/go-etcd\/etcd\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/latest\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/rest\/resttest\"\n\t\"k8s.io\/kubernetes\/pkg\/expapi\"\n\t\"k8s.io\/kubernetes\/pkg\/fields\"\n\t\"k8s.io\/kubernetes\/pkg\/labels\"\n\tetcdgeneric \"k8s.io\/kubernetes\/pkg\/registry\/generic\/etcd\"\n\t\"k8s.io\/kubernetes\/pkg\/registry\/registrytest\"\n\t\"k8s.io\/kubernetes\/pkg\/runtime\"\n\tetcdstorage \"k8s.io\/kubernetes\/pkg\/storage\/etcd\"\n\t\"k8s.io\/kubernetes\/pkg\/tools\"\n\t\"k8s.io\/kubernetes\/pkg\/tools\/etcdtest\"\n)\n\nconst (\n\tPASS = iota\n\tFAIL\n)\n\nfunc newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) {\n\tetcdStorage, fakeClient := registrytest.NewEtcdStorage(t)\n\treturn NewREST(etcdStorage), fakeClient\n}\n\n\/\/ createController is a helper function that returns a controller with the updated resource version.\nfunc createController(storage *REST, dc expapi.Daemon, t *testing.T) (expapi.Daemon, error) {\n\tctx := api.WithNamespace(api.NewContext(), dc.Namespace)\n\tobj, err := storage.Create(ctx, &dc)\n\tif err != nil {\n\t\tt.Errorf(\"Failed to create controller, %v\", err)\n\t}\n\tnewDc := obj.(*expapi.Daemon)\n\treturn *newDc, nil\n}\n\nfunc validNewDaemon() *expapi.Daemon {\n\treturn &expapi.Daemon{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: \"foo\",\n\t\t\tNamespace: api.NamespaceDefault,\n\t\t},\n\t\tSpec: expapi.DaemonSpec{\n\t\t\tSelector: map[string]string{\"a\": \"b\"},\n\t\t\tTemplate: &api.PodTemplateSpec{\n\t\t\t\tObjectMeta: api.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\"a\": \"b\"},\n\t\t\t\t},\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tContainers: []api.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"test\",\n\t\t\t\t\t\t\tImage: \"test_image\",\n\t\t\t\t\t\t\tImagePullPolicy: api.PullIfNotPresent,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tRestartPolicy: api.RestartPolicyAlways,\n\t\t\t\t\tDNSPolicy: api.DNSClusterFirst,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nvar validDaemon = *validNewDaemon()\n\nfunc TestCreate(t *testing.T) {\n\tstorage, fakeClient := newStorage(t)\n\ttest := resttest.New(t, storage, fakeClient.SetError)\n\tcontroller := validNewDaemon()\n\tcontroller.ObjectMeta = api.ObjectMeta{}\n\ttest.TestCreate(\n\t\t\/\/ valid\n\t\tcontroller,\n\t\tfunc(ctx api.Context, obj runtime.Object) error {\n\t\t\treturn registrytest.SetObject(fakeClient, storage.KeyFunc, ctx, obj)\n\t\t},\n\t\tfunc(ctx api.Context, obj runtime.Object) (runtime.Object, error) {\n\t\t\treturn registrytest.GetObject(fakeClient, storage.KeyFunc, storage.NewFunc, ctx, obj)\n\t\t},\n\t\t\/\/ invalid (invalid selector)\n\t\t&expapi.Daemon{\n\t\t\tSpec: expapi.DaemonSpec{\n\t\t\t\tSelector: map[string]string{},\n\t\t\t\tTemplate: validDaemon.Spec.Template,\n\t\t\t},\n\t\t},\n\t)\n}\n\nfunc TestUpdate(t *testing.T) {\n\tstorage, fakeClient := newStorage(t)\n\ttest := resttest.New(t, storage, fakeClient.SetError)\n\ttest.TestUpdate(\n\t\t\/\/ valid\n\t\tvalidNewDaemon(),\n\t\tfunc(ctx api.Context, obj runtime.Object) error {\n\t\t\treturn registrytest.SetObject(fakeClient, storage.KeyFunc, ctx, obj)\n\t\t},\n\t\tfunc(resourceVersion uint64) {\n\t\t\tregistrytest.SetResourceVersion(fakeClient, resourceVersion)\n\t\t},\n\t\tfunc(ctx api.Context, obj runtime.Object) (runtime.Object, error) {\n\t\t\treturn registrytest.GetObject(fakeClient, storage.KeyFunc, storage.NewFunc, ctx, obj)\n\t\t},\n\t\t\/\/ updateFunc\n\t\tfunc(obj runtime.Object) runtime.Object {\n\t\t\tobject := obj.(*expapi.Daemon)\n\t\t\tobject.Spec.Template.Spec.NodeSelector = map[string]string{\"c\": \"d\"}\n\t\t\treturn object\n\t\t},\n\t\t\/\/ invalid updateFunc\n\t\tfunc(obj runtime.Object) runtime.Object {\n\t\t\tobject := obj.(*expapi.Daemon)\n\t\t\tobject.UID = \"newUID\"\n\t\t\treturn object\n\t\t},\n\t\tfunc(obj runtime.Object) runtime.Object {\n\t\t\tobject := obj.(*expapi.Daemon)\n\t\t\tobject.Name = \"\"\n\t\t\treturn object\n\t\t},\n\t\tfunc(obj runtime.Object) runtime.Object {\n\t\t\tobject := obj.(*expapi.Daemon)\n\t\t\tobject.Spec.Template.Spec.RestartPolicy = api.RestartPolicyOnFailure\n\t\t\treturn object\n\t\t},\n\t\tfunc(obj runtime.Object) runtime.Object {\n\t\t\tobject := obj.(*expapi.Daemon)\n\t\t\tobject.Spec.Selector = map[string]string{}\n\t\t\treturn object\n\t\t},\n\t)\n}\n\nfunc TestEtcdGetController(t *testing.T) {\n\tstorage, fakeClient := newStorage(t)\n\ttest := resttest.New(t, storage, fakeClient.SetError)\n\ttest.TestGet(validNewDaemon())\n}\n\nfunc TestEtcdListControllers(t *testing.T) {\n\tstorage, fakeClient := newStorage(t)\n\ttest := resttest.New(t, storage, fakeClient.SetError)\n\tkey := etcdtest.AddPrefix(storage.KeyRootFunc(test.TestContext()))\n\ttest.TestList(\n\t\tvalidNewDaemon(),\n\t\tfunc(objects []runtime.Object) []runtime.Object {\n\t\t\treturn registrytest.SetObjectsForKey(fakeClient, key, objects)\n\t\t},\n\t\tfunc(resourceVersion uint64) {\n\t\t\tregistrytest.SetResourceVersion(fakeClient, resourceVersion)\n\t\t})\n}\n\nfunc TestEtcdDeleteController(t *testing.T) {\n\tctx := api.NewDefaultContext()\n\tstorage, fakeClient := newStorage(t)\n\tkey, err := storage.KeyFunc(ctx, validDaemon.Name)\n\tkey = etcdtest.AddPrefix(key)\n\n\tfakeClient.Set(key, runtime.EncodeOrDie(latest.Codec, validNewDaemon()), 0)\n\tobj, err := storage.Delete(ctx, validDaemon.Name, nil)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\tif status, ok := obj.(*api.Status); !ok {\n\t\tt.Errorf(\"Expected status of delete, got %#v\", status)\n\t} else if status.Status != api.StatusSuccess {\n\t\tt.Errorf(\"Expected success, got %#v\", status.Status)\n\t}\n\tif len(fakeClient.DeletedKeys) != 1 {\n\t\tt.Errorf(\"Expected 1 delete, found %#v\", fakeClient.DeletedKeys)\n\t}\n\tif fakeClient.DeletedKeys[0] != key {\n\t\tt.Errorf(\"Unexpected key: %s, expected %s\", fakeClient.DeletedKeys[0], key)\n\t}\n}\n\nfunc TestEtcdWatchController(t *testing.T) {\n\tctx := api.NewDefaultContext()\n\tstorage, fakeClient := newStorage(t)\n\twatching, err := storage.Watch(ctx,\n\t\tlabels.Everything(),\n\t\tfields.Everything(),\n\t\t\"1\",\n\t)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tfakeClient.WaitForWatchCompletion()\n\n\tselect {\n\tcase _, ok := <-watching.ResultChan():\n\t\tif !ok {\n\t\t\tt.Errorf(\"watching channel should be open\")\n\t\t}\n\tdefault:\n\t}\n\tfakeClient.WatchInjectError <- nil\n\tif _, ok := <-watching.ResultChan(); ok {\n\t\tt.Errorf(\"watching channel should be closed\")\n\t}\n\twatching.Stop()\n}\n\n\/\/ Tests that we can watch for the creation of daemon controllers with specified labels.\nfunc TestEtcdWatchControllersMatch(t *testing.T) {\n\tctx := api.WithNamespace(api.NewDefaultContext(), validDaemon.Namespace)\n\tstorage, fakeClient := newStorage(t)\n\tfakeClient.ExpectNotFoundGet(etcdgeneric.NamespaceKeyRootFunc(ctx, \"\/registry\/pods\"))\n\n\twatching, err := storage.Watch(ctx,\n\t\tlabels.SelectorFromSet(validDaemon.Spec.Selector),\n\t\tfields.Everything(),\n\t\t\"1\",\n\t)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tfakeClient.WaitForWatchCompletion()\n\n\t\/\/ The watcher above is waiting for these Labels, on receiving them it should\n\t\/\/ apply the ControllerStatus decorator, which lists pods, causing a query against\n\t\/\/ the \/registry\/pods endpoint of the etcd client.\n\tcontroller := &expapi.Daemon{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: \"foo\",\n\t\t\tLabels: validDaemon.Spec.Selector,\n\t\t\tNamespace: \"default\",\n\t\t},\n\t}\n\tcontrollerBytes, _ := latest.Codec.Encode(controller)\n\tfakeClient.WatchResponse <- &etcd.Response{\n\t\tAction: \"create\",\n\t\tNode: &etcd.Node{\n\t\t\tValue: string(controllerBytes),\n\t\t},\n\t}\n\tselect {\n\tcase _, ok := <-watching.ResultChan():\n\t\tif !ok {\n\t\t\tt.Errorf(\"watching channel should be open\")\n\t\t}\n\tcase <-time.After(time.Millisecond * 100):\n\t\tt.Error(\"unexpected timeout from result channel\")\n\t}\n\twatching.Stop()\n}\n\n\/\/ Tests that we can watch for daemon controllers with specified fields.\nfunc TestEtcdWatchControllersFields(t *testing.T) {\n\tctx := api.WithNamespace(api.NewDefaultContext(), validDaemon.Namespace)\n\tstorage, fakeClient := newStorage(t)\n\tfakeClient.ExpectNotFoundGet(etcdgeneric.NamespaceKeyRootFunc(ctx, \"\/registry\/pods\"))\n\n\ttestFieldMap := map[int][]fields.Set{\n\t\tPASS: {\n\t\t\t{\"metadata.name\": \"foo\"},\n\t\t},\n\t\tFAIL: {\n\t\t\t{\"metadata.name\": \"bar\"},\n\t\t\t{\"name\": \"foo\"},\n\t\t},\n\t}\n\ttestEtcdActions := []string{\n\t\tetcdstorage.EtcdCreate,\n\t\tetcdstorage.EtcdSet,\n\t\tetcdstorage.EtcdCAS,\n\t\tetcdstorage.EtcdDelete}\n\n\tcontroller := &expapi.Daemon{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: \"foo\",\n\t\t\tLabels: validDaemon.Spec.Selector,\n\t\t\tNamespace: \"default\",\n\t\t},\n\t\tStatus: expapi.DaemonStatus{\n\t\t\tCurrentNumberScheduled: 2,\n\t\t\tNumberMisscheduled: 1,\n\t\t\tDesiredNumberScheduled: 4,\n\t\t},\n\t}\n\tcontrollerBytes, _ := latest.Codec.Encode(controller)\n\n\tfor expectedResult, fieldSet := range testFieldMap {\n\t\tfor _, field := range fieldSet {\n\t\t\tfor _, action := range testEtcdActions {\n\t\t\t\twatching, err := storage.Watch(ctx,\n\t\t\t\t\tlabels.Everything(),\n\t\t\t\t\tfield.AsSelector(),\n\t\t\t\t\t\"1\",\n\t\t\t\t)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t\t\t}\n\t\t\t\tvar prevNode *etcd.Node = nil\n\t\t\t\tnode := &etcd.Node{\n\t\t\t\t\tValue: string(controllerBytes),\n\t\t\t\t}\n\t\t\t\tif action == etcdstorage.EtcdDelete {\n\t\t\t\t\tprevNode = node\n\t\t\t\t}\n\t\t\t\tfakeClient.WaitForWatchCompletion()\n\t\t\t\tfakeClient.WatchResponse <- &etcd.Response{\n\t\t\t\t\tAction: action,\n\t\t\t\t\tNode: node,\n\t\t\t\t\tPrevNode: prevNode,\n\t\t\t\t}\n\n\t\t\t\tselect {\n\t\t\t\tcase r, ok := <-watching.ResultChan():\n\t\t\t\t\tif expectedResult == FAIL {\n\t\t\t\t\t\tt.Errorf(\"Unexpected result from channel %#v\", r)\n\t\t\t\t\t}\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tt.Errorf(\"watching channel should be open\")\n\t\t\t\t\t}\n\t\t\t\tcase <-time.After(time.Millisecond * 100):\n\t\t\t\t\tif expectedResult == PASS {\n\t\t\t\t\t\tt.Error(\"unexpected timeout from result channel\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\twatching.Stop()\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestEtcdWatchControllersNotMatch(t *testing.T) {\n\tctx := api.NewDefaultContext()\n\tstorage, fakeClient := newStorage(t)\n\tfakeClient.ExpectNotFoundGet(etcdgeneric.NamespaceKeyRootFunc(ctx, \"\/registry\/pods\"))\n\n\twatching, err := storage.Watch(ctx,\n\t\tlabels.SelectorFromSet(labels.Set{\"name\": \"foo\"}),\n\t\tfields.Everything(),\n\t\t\"1\",\n\t)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tfakeClient.WaitForWatchCompletion()\n\n\tcontroller := &expapi.Daemon{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: \"bar\",\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"name\": \"bar\",\n\t\t\t},\n\t\t},\n\t}\n\tcontrollerBytes, _ := latest.Codec.Encode(controller)\n\tfakeClient.WatchResponse <- &etcd.Response{\n\t\tAction: \"create\",\n\t\tNode: &etcd.Node{\n\t\t\tValue: string(controllerBytes),\n\t\t},\n\t}\n\n\tselect {\n\tcase <-watching.ResultChan():\n\t\tt.Error(\"unexpected result from result channel\")\n\tcase <-time.After(time.Millisecond * 100):\n\t\t\/\/ expected case\n\t}\n}\n\nfunc TestDelete(t *testing.T) {\n\tctx := api.NewDefaultContext()\n\tstorage, fakeClient := newStorage(t)\n\ttest := resttest.New(t, storage, fakeClient.SetError)\n\tkey, _ := storage.KeyFunc(ctx, validDaemon.Name)\n\tkey = etcdtest.AddPrefix(key)\n\n\tcreateFn := func() runtime.Object {\n\t\tdc := validNewDaemon()\n\t\tdc.ResourceVersion = \"1\"\n\t\tfakeClient.Data[key] = tools.EtcdResponseWithError{\n\t\t\tR: &etcd.Response{\n\t\t\t\tNode: &etcd.Node{\n\t\t\t\t\tValue: runtime.EncodeOrDie(latest.Codec, dc),\n\t\t\t\t\tModifiedIndex: 1,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\treturn dc\n\t}\n\tgracefulSetFn := func() bool {\n\t\t\/\/ If the controller is still around after trying to delete either the delete\n\t\t\/\/ failed, or we're deleting it gracefully.\n\t\tif fakeClient.Data[key].R.Node != nil {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\ttest.TestDelete(createFn, gracefulSetFn)\n}\n<commit_msg>Use testapi.Codec in daemon etcd tests<commit_after>\/*\nCopyright 2015 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage etcd\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/coreos\/go-etcd\/etcd\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/rest\/resttest\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/testapi\"\n\t\"k8s.io\/kubernetes\/pkg\/expapi\"\n\t\"k8s.io\/kubernetes\/pkg\/fields\"\n\t\"k8s.io\/kubernetes\/pkg\/labels\"\n\tetcdgeneric \"k8s.io\/kubernetes\/pkg\/registry\/generic\/etcd\"\n\t\"k8s.io\/kubernetes\/pkg\/registry\/registrytest\"\n\t\"k8s.io\/kubernetes\/pkg\/runtime\"\n\tetcdstorage \"k8s.io\/kubernetes\/pkg\/storage\/etcd\"\n\t\"k8s.io\/kubernetes\/pkg\/tools\"\n\t\"k8s.io\/kubernetes\/pkg\/tools\/etcdtest\"\n)\n\nconst (\n\tPASS = iota\n\tFAIL\n)\n\nfunc newStorage(t *testing.T) (*REST, *tools.FakeEtcdClient) {\n\tetcdStorage, fakeClient := registrytest.NewEtcdStorage(t)\n\treturn NewREST(etcdStorage), fakeClient\n}\n\n\/\/ createController is a helper function that returns a controller with the updated resource version.\nfunc createController(storage *REST, dc expapi.Daemon, t *testing.T) (expapi.Daemon, error) {\n\tctx := api.WithNamespace(api.NewContext(), dc.Namespace)\n\tobj, err := storage.Create(ctx, &dc)\n\tif err != nil {\n\t\tt.Errorf(\"Failed to create controller, %v\", err)\n\t}\n\tnewDc := obj.(*expapi.Daemon)\n\treturn *newDc, nil\n}\n\nfunc validNewDaemon() *expapi.Daemon {\n\treturn &expapi.Daemon{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: \"foo\",\n\t\t\tNamespace: api.NamespaceDefault,\n\t\t},\n\t\tSpec: expapi.DaemonSpec{\n\t\t\tSelector: map[string]string{\"a\": \"b\"},\n\t\t\tTemplate: &api.PodTemplateSpec{\n\t\t\t\tObjectMeta: api.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\"a\": \"b\"},\n\t\t\t\t},\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tContainers: []api.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"test\",\n\t\t\t\t\t\t\tImage: \"test_image\",\n\t\t\t\t\t\t\tImagePullPolicy: api.PullIfNotPresent,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tRestartPolicy: api.RestartPolicyAlways,\n\t\t\t\t\tDNSPolicy: api.DNSClusterFirst,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nvar validDaemon = *validNewDaemon()\n\nfunc TestCreate(t *testing.T) {\n\tstorage, fakeClient := newStorage(t)\n\ttest := resttest.New(t, storage, fakeClient.SetError)\n\tcontroller := validNewDaemon()\n\tcontroller.ObjectMeta = api.ObjectMeta{}\n\ttest.TestCreate(\n\t\t\/\/ valid\n\t\tcontroller,\n\t\tfunc(ctx api.Context, obj runtime.Object) error {\n\t\t\treturn registrytest.SetObject(fakeClient, storage.KeyFunc, ctx, obj)\n\t\t},\n\t\tfunc(ctx api.Context, obj runtime.Object) (runtime.Object, error) {\n\t\t\treturn registrytest.GetObject(fakeClient, storage.KeyFunc, storage.NewFunc, ctx, obj)\n\t\t},\n\t\t\/\/ invalid (invalid selector)\n\t\t&expapi.Daemon{\n\t\t\tSpec: expapi.DaemonSpec{\n\t\t\t\tSelector: map[string]string{},\n\t\t\t\tTemplate: validDaemon.Spec.Template,\n\t\t\t},\n\t\t},\n\t)\n}\n\nfunc TestUpdate(t *testing.T) {\n\tstorage, fakeClient := newStorage(t)\n\ttest := resttest.New(t, storage, fakeClient.SetError)\n\ttest.TestUpdate(\n\t\t\/\/ valid\n\t\tvalidNewDaemon(),\n\t\tfunc(ctx api.Context, obj runtime.Object) error {\n\t\t\treturn registrytest.SetObject(fakeClient, storage.KeyFunc, ctx, obj)\n\t\t},\n\t\tfunc(resourceVersion uint64) {\n\t\t\tregistrytest.SetResourceVersion(fakeClient, resourceVersion)\n\t\t},\n\t\tfunc(ctx api.Context, obj runtime.Object) (runtime.Object, error) {\n\t\t\treturn registrytest.GetObject(fakeClient, storage.KeyFunc, storage.NewFunc, ctx, obj)\n\t\t},\n\t\t\/\/ updateFunc\n\t\tfunc(obj runtime.Object) runtime.Object {\n\t\t\tobject := obj.(*expapi.Daemon)\n\t\t\tobject.Spec.Template.Spec.NodeSelector = map[string]string{\"c\": \"d\"}\n\t\t\treturn object\n\t\t},\n\t\t\/\/ invalid updateFunc\n\t\tfunc(obj runtime.Object) runtime.Object {\n\t\t\tobject := obj.(*expapi.Daemon)\n\t\t\tobject.UID = \"newUID\"\n\t\t\treturn object\n\t\t},\n\t\tfunc(obj runtime.Object) runtime.Object {\n\t\t\tobject := obj.(*expapi.Daemon)\n\t\t\tobject.Name = \"\"\n\t\t\treturn object\n\t\t},\n\t\tfunc(obj runtime.Object) runtime.Object {\n\t\t\tobject := obj.(*expapi.Daemon)\n\t\t\tobject.Spec.Template.Spec.RestartPolicy = api.RestartPolicyOnFailure\n\t\t\treturn object\n\t\t},\n\t\tfunc(obj runtime.Object) runtime.Object {\n\t\t\tobject := obj.(*expapi.Daemon)\n\t\t\tobject.Spec.Selector = map[string]string{}\n\t\t\treturn object\n\t\t},\n\t)\n}\n\nfunc TestEtcdGetController(t *testing.T) {\n\tstorage, fakeClient := newStorage(t)\n\ttest := resttest.New(t, storage, fakeClient.SetError)\n\ttest.TestGet(validNewDaemon())\n}\n\nfunc TestEtcdListControllers(t *testing.T) {\n\tstorage, fakeClient := newStorage(t)\n\ttest := resttest.New(t, storage, fakeClient.SetError)\n\tkey := etcdtest.AddPrefix(storage.KeyRootFunc(test.TestContext()))\n\ttest.TestList(\n\t\tvalidNewDaemon(),\n\t\tfunc(objects []runtime.Object) []runtime.Object {\n\t\t\treturn registrytest.SetObjectsForKey(fakeClient, key, objects)\n\t\t},\n\t\tfunc(resourceVersion uint64) {\n\t\t\tregistrytest.SetResourceVersion(fakeClient, resourceVersion)\n\t\t})\n}\n\nfunc TestEtcdDeleteController(t *testing.T) {\n\tctx := api.NewDefaultContext()\n\tstorage, fakeClient := newStorage(t)\n\tkey, err := storage.KeyFunc(ctx, validDaemon.Name)\n\tkey = etcdtest.AddPrefix(key)\n\n\tfakeClient.Set(key, runtime.EncodeOrDie(testapi.Codec(), validNewDaemon()), 0)\n\tobj, err := storage.Delete(ctx, validDaemon.Name, nil)\n\tif err != nil {\n\t\tt.Errorf(\"unexpected error: %v\", err)\n\t}\n\tif status, ok := obj.(*api.Status); !ok {\n\t\tt.Errorf(\"Expected status of delete, got %#v\", status)\n\t} else if status.Status != api.StatusSuccess {\n\t\tt.Errorf(\"Expected success, got %#v\", status.Status)\n\t}\n\tif len(fakeClient.DeletedKeys) != 1 {\n\t\tt.Errorf(\"Expected 1 delete, found %#v\", fakeClient.DeletedKeys)\n\t}\n\tif fakeClient.DeletedKeys[0] != key {\n\t\tt.Errorf(\"Unexpected key: %s, expected %s\", fakeClient.DeletedKeys[0], key)\n\t}\n}\n\nfunc TestEtcdWatchController(t *testing.T) {\n\tctx := api.NewDefaultContext()\n\tstorage, fakeClient := newStorage(t)\n\twatching, err := storage.Watch(ctx,\n\t\tlabels.Everything(),\n\t\tfields.Everything(),\n\t\t\"1\",\n\t)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tfakeClient.WaitForWatchCompletion()\n\n\tselect {\n\tcase _, ok := <-watching.ResultChan():\n\t\tif !ok {\n\t\t\tt.Errorf(\"watching channel should be open\")\n\t\t}\n\tdefault:\n\t}\n\tfakeClient.WatchInjectError <- nil\n\tif _, ok := <-watching.ResultChan(); ok {\n\t\tt.Errorf(\"watching channel should be closed\")\n\t}\n\twatching.Stop()\n}\n\n\/\/ Tests that we can watch for the creation of daemon controllers with specified labels.\nfunc TestEtcdWatchControllersMatch(t *testing.T) {\n\tctx := api.WithNamespace(api.NewDefaultContext(), validDaemon.Namespace)\n\tstorage, fakeClient := newStorage(t)\n\tfakeClient.ExpectNotFoundGet(etcdgeneric.NamespaceKeyRootFunc(ctx, \"\/registry\/pods\"))\n\n\twatching, err := storage.Watch(ctx,\n\t\tlabels.SelectorFromSet(validDaemon.Spec.Selector),\n\t\tfields.Everything(),\n\t\t\"1\",\n\t)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tfakeClient.WaitForWatchCompletion()\n\n\t\/\/ The watcher above is waiting for these Labels, on receiving them it should\n\t\/\/ apply the ControllerStatus decorator, which lists pods, causing a query against\n\t\/\/ the \/registry\/pods endpoint of the etcd client.\n\tcontroller := &expapi.Daemon{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: \"foo\",\n\t\t\tLabels: validDaemon.Spec.Selector,\n\t\t\tNamespace: \"default\",\n\t\t},\n\t}\n\tcontrollerBytes, _ := testapi.Codec().Encode(controller)\n\tfakeClient.WatchResponse <- &etcd.Response{\n\t\tAction: \"create\",\n\t\tNode: &etcd.Node{\n\t\t\tValue: string(controllerBytes),\n\t\t},\n\t}\n\tselect {\n\tcase _, ok := <-watching.ResultChan():\n\t\tif !ok {\n\t\t\tt.Errorf(\"watching channel should be open\")\n\t\t}\n\tcase <-time.After(time.Millisecond * 100):\n\t\tt.Error(\"unexpected timeout from result channel\")\n\t}\n\twatching.Stop()\n}\n\n\/\/ Tests that we can watch for daemon controllers with specified fields.\nfunc TestEtcdWatchControllersFields(t *testing.T) {\n\tctx := api.WithNamespace(api.NewDefaultContext(), validDaemon.Namespace)\n\tstorage, fakeClient := newStorage(t)\n\tfakeClient.ExpectNotFoundGet(etcdgeneric.NamespaceKeyRootFunc(ctx, \"\/registry\/pods\"))\n\n\ttestFieldMap := map[int][]fields.Set{\n\t\tPASS: {\n\t\t\t{\"metadata.name\": \"foo\"},\n\t\t},\n\t\tFAIL: {\n\t\t\t{\"metadata.name\": \"bar\"},\n\t\t\t{\"name\": \"foo\"},\n\t\t},\n\t}\n\ttestEtcdActions := []string{\n\t\tetcdstorage.EtcdCreate,\n\t\tetcdstorage.EtcdSet,\n\t\tetcdstorage.EtcdCAS,\n\t\tetcdstorage.EtcdDelete}\n\n\tcontroller := &expapi.Daemon{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: \"foo\",\n\t\t\tLabels: validDaemon.Spec.Selector,\n\t\t\tNamespace: \"default\",\n\t\t},\n\t\tStatus: expapi.DaemonStatus{\n\t\t\tCurrentNumberScheduled: 2,\n\t\t\tNumberMisscheduled: 1,\n\t\t\tDesiredNumberScheduled: 4,\n\t\t},\n\t}\n\tcontrollerBytes, _ := testapi.Codec().Encode(controller)\n\n\tfor expectedResult, fieldSet := range testFieldMap {\n\t\tfor _, field := range fieldSet {\n\t\t\tfor _, action := range testEtcdActions {\n\t\t\t\twatching, err := storage.Watch(ctx,\n\t\t\t\t\tlabels.Everything(),\n\t\t\t\t\tfield.AsSelector(),\n\t\t\t\t\t\"1\",\n\t\t\t\t)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t\t\t\t}\n\t\t\t\tvar prevNode *etcd.Node = nil\n\t\t\t\tnode := &etcd.Node{\n\t\t\t\t\tValue: string(controllerBytes),\n\t\t\t\t}\n\t\t\t\tif action == etcdstorage.EtcdDelete {\n\t\t\t\t\tprevNode = node\n\t\t\t\t}\n\t\t\t\tfakeClient.WaitForWatchCompletion()\n\t\t\t\tfakeClient.WatchResponse <- &etcd.Response{\n\t\t\t\t\tAction: action,\n\t\t\t\t\tNode: node,\n\t\t\t\t\tPrevNode: prevNode,\n\t\t\t\t}\n\n\t\t\t\tselect {\n\t\t\t\tcase r, ok := <-watching.ResultChan():\n\t\t\t\t\tif expectedResult == FAIL {\n\t\t\t\t\t\tt.Errorf(\"Unexpected result from channel %#v\", r)\n\t\t\t\t\t}\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tt.Errorf(\"watching channel should be open\")\n\t\t\t\t\t}\n\t\t\t\tcase <-time.After(time.Millisecond * 100):\n\t\t\t\t\tif expectedResult == PASS {\n\t\t\t\t\t\tt.Error(\"unexpected timeout from result channel\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\twatching.Stop()\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestEtcdWatchControllersNotMatch(t *testing.T) {\n\tctx := api.NewDefaultContext()\n\tstorage, fakeClient := newStorage(t)\n\tfakeClient.ExpectNotFoundGet(etcdgeneric.NamespaceKeyRootFunc(ctx, \"\/registry\/pods\"))\n\n\twatching, err := storage.Watch(ctx,\n\t\tlabels.SelectorFromSet(labels.Set{\"name\": \"foo\"}),\n\t\tfields.Everything(),\n\t\t\"1\",\n\t)\n\tif err != nil {\n\t\tt.Fatalf(\"unexpected error: %v\", err)\n\t}\n\tfakeClient.WaitForWatchCompletion()\n\n\tcontroller := &expapi.Daemon{\n\t\tObjectMeta: api.ObjectMeta{\n\t\t\tName: \"bar\",\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"name\": \"bar\",\n\t\t\t},\n\t\t},\n\t}\n\tcontrollerBytes, _ := testapi.Codec().Encode(controller)\n\tfakeClient.WatchResponse <- &etcd.Response{\n\t\tAction: \"create\",\n\t\tNode: &etcd.Node{\n\t\t\tValue: string(controllerBytes),\n\t\t},\n\t}\n\n\tselect {\n\tcase <-watching.ResultChan():\n\t\tt.Error(\"unexpected result from result channel\")\n\tcase <-time.After(time.Millisecond * 100):\n\t\t\/\/ expected case\n\t}\n}\n\nfunc TestDelete(t *testing.T) {\n\tctx := api.NewDefaultContext()\n\tstorage, fakeClient := newStorage(t)\n\ttest := resttest.New(t, storage, fakeClient.SetError)\n\tkey, _ := storage.KeyFunc(ctx, validDaemon.Name)\n\tkey = etcdtest.AddPrefix(key)\n\n\tcreateFn := func() runtime.Object {\n\t\tdc := validNewDaemon()\n\t\tdc.ResourceVersion = \"1\"\n\t\tfakeClient.Data[key] = tools.EtcdResponseWithError{\n\t\t\tR: &etcd.Response{\n\t\t\t\tNode: &etcd.Node{\n\t\t\t\t\tValue: runtime.EncodeOrDie(testapi.Codec(), dc),\n\t\t\t\t\tModifiedIndex: 1,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\treturn dc\n\t}\n\tgracefulSetFn := func() bool {\n\t\t\/\/ If the controller is still around after trying to delete either the delete\n\t\t\/\/ failed, or we're deleting it gracefully.\n\t\tif fakeClient.Data[key].R.Node != nil {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\ttest.TestDelete(createFn, gracefulSetFn)\n}\n<|endoftext|>"} {"text":"<commit_before>package class\n\nfunc createVtable(class *Class) {\n\tsuperVtable := getSuperVtable(class)\n\tnewVirtualMethodCount := countNewVirtualMethod(class)\n\n\tnewVtable := make([]*Method, len(superVtable), len(superVtable) + newVirtualMethodCount)\n\tcopy(newVtable, superVtable)\n\n\tfor _, m := range class.methods {\n\t\tif isVirtualMethod(m) {\n\t\t\tif i := search(superVtable, m); i > -1 {\n\t\t\t\tnewVtable[i] = m \/\/ override\n\t\t\t} else {\n\t\t\t\tnewVtable = append(newVtable, m)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc getSuperVtable(class *Class) []*Method {\n\tif class.superClass != nil {\n\t\treturn class.superClass.vtable\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc countNewVirtualMethod(class *Class) int {\n\tsuperVtable := getSuperVtable(class)\n\n\tcount := 0\n\tfor _, m := range class.methods {\n\t\tif isVirtualMethod(m) && search(superVtable, m) < 0 {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}\n\nfunc isVirtualMethod(method *Method) bool {\n\treturn !method.IsStatic() &&\n\t\t!method.IsFinal() &&\n\t\t!method.IsPrivate() &&\n\t\tmethod.Name() != constructorName\n}\n\nfunc search(vtable []*Method, m *Method) int {\n\tfor i, vm := range vtable {\n\t\tif vm.name == m.name && vm.descriptor == m.descriptor {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n<commit_msg>code refactor<commit_after>package class\n\nfunc createVtable(class *Class) {\n\tsuperVtable := getSuperVtable(class)\n\tnewVirtualMethodCount := countNewVirtualMethod(class)\n\n\tnewCap := len(superVtable) + newVirtualMethodCount\n\tnewVtable := make([]*Method, len(superVtable), newCap)\n\tcopy(newVtable, superVtable)\n\n\tfor _, m := range class.methods {\n\t\tif isVirtualMethod(m) {\n\t\t\tif i := search(superVtable, m); i > -1 {\n\t\t\t\tnewVtable[i] = m \/\/ override\n\t\t\t} else {\n\t\t\t\tnewVtable = append(newVtable, m)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc getSuperVtable(class *Class) []*Method {\n\tif class.superClass != nil {\n\t\treturn class.superClass.vtable\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc countNewVirtualMethod(class *Class) int {\n\tsuperVtable := getSuperVtable(class)\n\n\tcount := 0\n\tfor _, m := range class.methods {\n\t\tif isVirtualMethod(m) && search(superVtable, m) < 0 {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}\n\nfunc isVirtualMethod(method *Method) bool {\n\treturn !method.IsStatic() &&\n\t\t!method.IsFinal() &&\n\t\t!method.IsPrivate() &&\n\t\tmethod.Name() != constructorName\n}\n\nfunc search(vtable []*Method, m *Method) int {\n\tfor i, vm := range vtable {\n\t\tif vm.name == m.name && vm.descriptor == m.descriptor {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"fmt\"\n\t\"net\/smtp\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst (\n\tADDR = \"127.0.0.1:9026\"\n\tNAME = \"mx.test.server\"\n)\n\nfunc with(t *testing.T, f func(Server)) {\n\ts, err := Listen(ADDR, NAME)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttime.Sleep(time.Millisecond)\n\tf(s)\n\n\ts.Close()\n}\n\nfunc TestSenderRecipientBodyAndQuit(t *testing.T) {\n\tassert := assert.New(t)\n\n\twith(t, func(s Server) {\n\t\tc, err := smtp.Dial(ADDR)\n\t\tassert.Nil(err)\n\n\t\tassert.Nil(c.Mail(\"sender@example.org\"))\n\t\tassert.Nil(c.Rcpt(\"recipient@example.net\"))\n\n\t\twc, err := c.Data()\n\t\tassert.Nil(err)\n\t\t_, err = fmt.Fprintf(wc, \"This is the email body\")\n\t\tassert.Nil(err)\n\t\tassert.Nil(wc.Close())\n\n\t\tcalled := make(chan struct{})\n\t\ts.Handle(func(msg Message) {\n\t\t\tassert.Equal(\"sender@example.org\", msg.Sender)\n\t\t\tassert.Equal(1, len(msg.Recipients))\n\t\t\tassert.Equal(\"recipient@example.net\", msg.Recipients[0])\n\t\t\tassert.Equal(\"This is the email body\\n\", msg.Data)\n\t\t\tclose(called)\n\t\t})\n\n\t\tassert.Nil(c.Quit())\n\n\t\tselect {\n\t\tcase <-called:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Log(\"timed out\")\n\t\t\tt.Fail()\n\t\t}\n\t})\n}\n\nfunc TestSenderRecipientBodyAndQuitWithReset(t *testing.T) {\n\tassert := assert.New(t)\n\n\twith(t, func(s Server) {\n\t\tc, err := smtp.Dial(ADDR)\n\t\tassert.Nil(err)\n\n\t\tassert.Nil(c.Mail(\"sender@example.org\"))\n\t\tassert.Nil(c.Rcpt(\"recipient@example.net\"))\n\n\t\twc, err := c.Data()\n\t\tassert.Nil(err)\n\t\t_, err = fmt.Fprintf(wc, \"This is the email body\")\n\t\tassert.Nil(err)\n\t\tassert.Nil(wc.Close())\n\n\t\tassert.Nil(c.Reset())\n\n\t\tassert.Nil(c.Mail(\"sender2@example.org\"))\n\t\tassert.Nil(c.Rcpt(\"recipient2@example.net\"))\n\n\t\twc, err = c.Data()\n\t\tassert.Nil(err)\n\t\t_, err = fmt.Fprintf(wc, \"This is the email body2\")\n\t\tassert.Nil(err)\n\t\tassert.Nil(wc.Close())\n\n\t\tcalled := make(chan struct{})\n\t\ts.Handle(func(msg Message) {\n\t\t\tassert.Equal(\"sender2@example.org\", msg.Sender)\n\t\t\tassert.Equal(1, len(msg.Recipients))\n\t\t\tassert.Equal(\"recipient2@example.net\", msg.Recipients[0])\n\t\t\tassert.Equal(\"This is the email body2\\n\", msg.Data)\n\t\t\tclose(called)\n\t\t})\n\n\t\tcalled2 := make(chan struct{})\n\t\ts.Handle(func(msg Message) {\n\t\t\tassert.Equal(\"sender2@example.org\", msg.Sender)\n\t\t\tassert.Equal(1, len(msg.Recipients))\n\t\t\tassert.Equal(\"recipient2@example.net\", msg.Recipients[0])\n\t\t\tassert.Equal(\"This is the email body2\\n\", msg.Data)\n\t\t\tclose(called2)\n\t\t})\n\n\t\tassert.Nil(c.Quit())\n\n\t\tselect {\n\t\tcase <-called:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Log(\"timed out\")\n\t\t\tt.Fail()\n\t\t}\n\n\t\tselect {\n\t\tcase <-called2:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Log(\"timed out2\")\n\t\t\tt.Fail()\n\t\t}\n\t})\n}\n\nfunc TestVerify(t *testing.T) {\n\tassert := assert.New(t)\n\n\twith(t, func(s Server) {\n\t\tc, err := smtp.Dial(ADDR)\n\t\tassert.Nil(err)\n\n\t\tassert.Equal(c.Verify(\"sender@example.org\").Error(), \"502 Command not implemented%!(EXTRA []interface {}=[])\")\n\n\t\tassert.Nil(c.Quit())\n\t})\n}\n<commit_msg>Add test sending to multiple recipients<commit_after>package server\n\nimport (\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"fmt\"\n\t\"net\/smtp\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst (\n\tADDR = \"127.0.0.1:9026\"\n\tNAME = \"mx.test.server\"\n)\n\nfunc with(t *testing.T, f func(Server)) {\n\ts, err := Listen(ADDR, NAME)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttime.Sleep(time.Millisecond)\n\tf(s)\n\n\ts.Close()\n}\n\nfunc TestSenderRecipientBodyAndQuit(t *testing.T) {\n\tassert := assert.New(t)\n\n\twith(t, func(s Server) {\n\t\tc, err := smtp.Dial(ADDR)\n\t\tassert.Nil(err)\n\n\t\tassert.Nil(c.Mail(\"sender@example.org\"))\n\t\tassert.Nil(c.Rcpt(\"recipient@example.net\"))\n\n\t\twc, err := c.Data()\n\t\tassert.Nil(err)\n\t\t_, err = fmt.Fprintf(wc, \"This is the email body\")\n\t\tassert.Nil(err)\n\t\tassert.Nil(wc.Close())\n\n\t\tcalled := make(chan struct{})\n\t\ts.Handle(func(msg Message) {\n\t\t\tassert.Equal(\"sender@example.org\", msg.Sender)\n\t\t\tassert.Equal(1, len(msg.Recipients))\n\t\t\tassert.Equal(\"recipient@example.net\", msg.Recipients[0])\n\t\t\tassert.Equal(\"This is the email body\\n\", msg.Data)\n\t\t\tclose(called)\n\t\t})\n\n\t\tassert.Nil(c.Quit())\n\n\t\tselect {\n\t\tcase <-called:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Log(\"timed out\")\n\t\t\tt.Fail()\n\t\t}\n\t})\n}\n\nfunc TestMessageToMultipleRecipients(t *testing.T) {\n\tassert := assert.New(t)\n\n\twith(t, func(s Server) {\n\t\tc, err := smtp.Dial(ADDR)\n\t\tassert.Nil(err)\n\n\t\tassert.Nil(c.Mail(\"sender@example.org\"))\n\t\tassert.Nil(c.Rcpt(\"recipient1@example.net\"))\n\t\tassert.Nil(c.Rcpt(\"recipient2@example.net\"))\n\t\tassert.Nil(c.Rcpt(\"recipient3@example.net\"))\n\n\t\twc, err := c.Data()\n\t\tassert.Nil(err)\n\t\t_, err = fmt.Fprintf(wc, \"This is the email body\")\n\t\tassert.Nil(err)\n\t\tassert.Nil(wc.Close())\n\n\t\tcalled := make(chan struct{})\n\t\ts.Handle(func(msg Message) {\n\t\t\tassert.Equal(\"sender@example.org\", msg.Sender)\n\t\t\tassert.Equal(3, len(msg.Recipients))\n\t\t\tassert.Equal(\"recipient1@example.net\", msg.Recipients[0])\n\t\t\tassert.Equal(\"recipient2@example.net\", msg.Recipients[1])\n\t\t\tassert.Equal(\"recipient3@example.net\", msg.Recipients[2])\n\t\t\tassert.Equal(\"This is the email body\\n\", msg.Data)\n\t\t\tclose(called)\n\t\t})\n\n\t\tassert.Nil(c.Quit())\n\n\t\tselect {\n\t\tcase <-called:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Log(\"timed out\")\n\t\t\tt.Fail()\n\t\t}\n\t})\n}\n\nfunc TestSenderRecipientBodyAndQuitWithReset(t *testing.T) {\n\tassert := assert.New(t)\n\n\twith(t, func(s Server) {\n\t\tc, err := smtp.Dial(ADDR)\n\t\tassert.Nil(err)\n\n\t\tassert.Nil(c.Mail(\"sender@example.org\"))\n\t\tassert.Nil(c.Rcpt(\"recipient@example.net\"))\n\n\t\twc, err := c.Data()\n\t\tassert.Nil(err)\n\t\t_, err = fmt.Fprintf(wc, \"This is the email body\")\n\t\tassert.Nil(err)\n\t\tassert.Nil(wc.Close())\n\n\t\tassert.Nil(c.Reset())\n\n\t\tassert.Nil(c.Mail(\"sender2@example.org\"))\n\t\tassert.Nil(c.Rcpt(\"recipient2@example.net\"))\n\n\t\twc, err = c.Data()\n\t\tassert.Nil(err)\n\t\t_, err = fmt.Fprintf(wc, \"This is the email body2\")\n\t\tassert.Nil(err)\n\t\tassert.Nil(wc.Close())\n\n\t\tcalled := make(chan struct{})\n\t\ts.Handle(func(msg Message) {\n\t\t\tassert.Equal(\"sender2@example.org\", msg.Sender)\n\t\t\tassert.Equal(1, len(msg.Recipients))\n\t\t\tassert.Equal(\"recipient2@example.net\", msg.Recipients[0])\n\t\t\tassert.Equal(\"This is the email body2\\n\", msg.Data)\n\t\t\tclose(called)\n\t\t})\n\n\t\tcalled2 := make(chan struct{})\n\t\ts.Handle(func(msg Message) {\n\t\t\tassert.Equal(\"sender2@example.org\", msg.Sender)\n\t\t\tassert.Equal(1, len(msg.Recipients))\n\t\t\tassert.Equal(\"recipient2@example.net\", msg.Recipients[0])\n\t\t\tassert.Equal(\"This is the email body2\\n\", msg.Data)\n\t\t\tclose(called2)\n\t\t})\n\n\t\tassert.Nil(c.Quit())\n\n\t\tselect {\n\t\tcase <-called:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Log(\"timed out\")\n\t\t\tt.Fail()\n\t\t}\n\n\t\tselect {\n\t\tcase <-called2:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Log(\"timed out2\")\n\t\t\tt.Fail()\n\t\t}\n\t})\n}\n\nfunc TestVerify(t *testing.T) {\n\tassert := assert.New(t)\n\n\twith(t, func(s Server) {\n\t\tc, err := smtp.Dial(ADDR)\n\t\tassert.Nil(err)\n\n\t\tassert.Equal(c.Verify(\"sender@example.org\").Error(), \"502 Command not implemented%!(EXTRA []interface {}=[])\")\n\n\t\tassert.Nil(c.Quit())\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"fmt\"\n\t\"net\/smtp\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst (\n\tADDR = \"127.0.0.1:9026\"\n\tNAME = \"mx.test.server\"\n)\n\nfunc with(t *testing.T, f func(Server)) {\n\ts, err := Listen(ADDR, NAME)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttime.Sleep(time.Millisecond)\n\tf(s)\n\n\ts.Close()\n}\n\nfunc TestConnect(t *testing.T) {\n\tassert := assert.New(t)\n\n\twith(t, func(s Server) {\n\t\t_, err := smtp.Dial(ADDR)\n\t\tassert.Nil(err)\n\t})\n}\n\nfunc TestSender(t *testing.T) {\n\tassert := assert.New(t)\n\n\twith(t, func(s Server) {\n\t\tc, err := smtp.Dial(ADDR)\n\t\tassert.Nil(err)\n\n\t\tassert.Nil(c.Mail(\"sender@example.org\"))\n\t})\n}\n\nfunc TestSenderAndRecipient(t *testing.T) {\n\tassert := assert.New(t)\n\n\twith(t, func(s Server) {\n\t\tc, err := smtp.Dial(ADDR)\n\t\tassert.Nil(err)\n\n\t\tassert.Nil(c.Mail(\"sender@example.org\"))\n\t\tassert.Nil(c.Rcpt(\"recipient@example.net\"))\n\t})\n}\n\nfunc TestSenderRecipientAndBody(t *testing.T) {\n\tassert := assert.New(t)\n\n\twith(t, func(s Server) {\n\t\tc, err := smtp.Dial(ADDR)\n\t\tassert.Nil(err)\n\n\t\tassert.Nil(c.Mail(\"sender@example.org\"))\n\t\tassert.Nil(c.Rcpt(\"recipient@example.net\"))\n\n\t\twc, err := c.Data()\n\t\tassert.Nil(err)\n\t\t_, err = fmt.Fprintf(wc, \"This is the email body\")\n\t\tassert.Nil(err)\n\t\tassert.Nil(wc.Close())\n\t})\n}\n\nfunc TestSenderRecipientBodyAndQuit(t *testing.T) {\n\tassert := assert.New(t)\n\n\twith(t, func(s Server) {\n\t\tc, err := smtp.Dial(ADDR)\n\t\tassert.Nil(err)\n\n\t\tassert.Nil(c.Mail(\"sender@example.org\"))\n\t\tassert.Nil(c.Rcpt(\"recipient@example.net\"))\n\n\t\twc, err := c.Data()\n\t\tassert.Nil(err)\n\t\t_, err = fmt.Fprintf(wc, \"This is the email body\")\n\t\tassert.Nil(err)\n\t\tassert.Nil(wc.Close())\n\n\t\tcalled := make(chan struct{})\n\t\ts.Handle(func(msg Message) {\n\t\t\tassert.Equal(\"sender@example.org\", msg.Sender)\n\t\t\tassert.Equal(1, len(msg.Recipients))\n\t\t\tassert.Equal(\"recipient@example.net\", msg.Recipients[0])\n\t\t\tassert.Equal(\"This is the email body\\n\", msg.Data)\n\t\t\tclose(called)\n\t\t})\n\n\t\tassert.Nil(c.Quit())\n\n\t\tselect {\n\t\tcase <-called:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Log(\"timed out\")\n\t\t\tt.Fail()\n\t\t}\n\t})\n}\n\nfunc TestSenderRecipientBodyAndQuitWithReset(t *testing.T) {\n\tassert := assert.New(t)\n\n\twith(t, func(s Server) {\n\t\tc, err := smtp.Dial(ADDR)\n\t\tassert.Nil(err)\n\n\t\tassert.Nil(c.Mail(\"sender@example.org\"))\n\t\tassert.Nil(c.Rcpt(\"recipient@example.net\"))\n\n\t\twc, err := c.Data()\n\t\tassert.Nil(err)\n\t\t_, err = fmt.Fprintf(wc, \"This is the email body\")\n\t\tassert.Nil(err)\n\t\tassert.Nil(wc.Close())\n\n\t\tassert.Nil(c.Reset())\n\n\t\tassert.Nil(c.Mail(\"sender2@example.org\"))\n\t\tassert.Nil(c.Rcpt(\"recipient2@example.net\"))\n\n\t\twc, err = c.Data()\n\t\tassert.Nil(err)\n\t\t_, err = fmt.Fprintf(wc, \"This is the email body2\")\n\t\tassert.Nil(err)\n\t\tassert.Nil(wc.Close())\n\n\t\tcalled := make(chan struct{})\n\t\ts.Handle(func(msg Message) {\n\t\t\tassert.Equal(\"sender2@example.org\", msg.Sender)\n\t\t\tassert.Equal(1, len(msg.Recipients))\n\t\t\tassert.Equal(\"recipient2@example.net\", msg.Recipients[0])\n\t\t\tassert.Equal(\"This is the email body2\\n\", msg.Data)\n\t\t\tclose(called)\n\t\t})\n\n\t\tcalled2 := make(chan struct{})\n\t\ts.Handle(func(msg Message) {\n\t\t\tassert.Equal(\"sender2@example.org\", msg.Sender)\n\t\t\tassert.Equal(1, len(msg.Recipients))\n\t\t\tassert.Equal(\"recipient2@example.net\", msg.Recipients[0])\n\t\t\tassert.Equal(\"This is the email body2\\n\", msg.Data)\n\t\t\tclose(called2)\n\t\t})\n\n\t\tassert.Nil(c.Quit())\n\n\t\tselect {\n\t\tcase <-called:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Log(\"timed out\")\n\t\t\tt.Fail()\n\t\t}\n\n\t\tselect {\n\t\tcase <-called2:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Log(\"timed out2\")\n\t\t\tt.Fail()\n\t\t}\n\t})\n}\n\nfunc TestVerify(t *testing.T) {\n\tassert := assert.New(t)\n\n\twith(t, func(s Server) {\n\t\tc, err := smtp.Dial(ADDR)\n\t\tassert.Nil(err)\n\n\t\tassert.Equal(c.Verify(\"sender@example.org\").Error(), \"502 Command not implemented%!(EXTRA []interface {}=[])\")\n\n\t\tassert.Nil(c.Quit())\n\t})\n}\n<commit_msg>Delete pointless tests<commit_after>package server\n\nimport (\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"fmt\"\n\t\"net\/smtp\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst (\n\tADDR = \"127.0.0.1:9026\"\n\tNAME = \"mx.test.server\"\n)\n\nfunc with(t *testing.T, f func(Server)) {\n\ts, err := Listen(ADDR, NAME)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttime.Sleep(time.Millisecond)\n\tf(s)\n\n\ts.Close()\n}\n\nfunc TestSenderRecipientBodyAndQuit(t *testing.T) {\n\tassert := assert.New(t)\n\n\twith(t, func(s Server) {\n\t\tc, err := smtp.Dial(ADDR)\n\t\tassert.Nil(err)\n\n\t\tassert.Nil(c.Mail(\"sender@example.org\"))\n\t\tassert.Nil(c.Rcpt(\"recipient@example.net\"))\n\n\t\twc, err := c.Data()\n\t\tassert.Nil(err)\n\t\t_, err = fmt.Fprintf(wc, \"This is the email body\")\n\t\tassert.Nil(err)\n\t\tassert.Nil(wc.Close())\n\n\t\tcalled := make(chan struct{})\n\t\ts.Handle(func(msg Message) {\n\t\t\tassert.Equal(\"sender@example.org\", msg.Sender)\n\t\t\tassert.Equal(1, len(msg.Recipients))\n\t\t\tassert.Equal(\"recipient@example.net\", msg.Recipients[0])\n\t\t\tassert.Equal(\"This is the email body\\n\", msg.Data)\n\t\t\tclose(called)\n\t\t})\n\n\t\tassert.Nil(c.Quit())\n\n\t\tselect {\n\t\tcase <-called:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Log(\"timed out\")\n\t\t\tt.Fail()\n\t\t}\n\t})\n}\n\nfunc TestSenderRecipientBodyAndQuitWithReset(t *testing.T) {\n\tassert := assert.New(t)\n\n\twith(t, func(s Server) {\n\t\tc, err := smtp.Dial(ADDR)\n\t\tassert.Nil(err)\n\n\t\tassert.Nil(c.Mail(\"sender@example.org\"))\n\t\tassert.Nil(c.Rcpt(\"recipient@example.net\"))\n\n\t\twc, err := c.Data()\n\t\tassert.Nil(err)\n\t\t_, err = fmt.Fprintf(wc, \"This is the email body\")\n\t\tassert.Nil(err)\n\t\tassert.Nil(wc.Close())\n\n\t\tassert.Nil(c.Reset())\n\n\t\tassert.Nil(c.Mail(\"sender2@example.org\"))\n\t\tassert.Nil(c.Rcpt(\"recipient2@example.net\"))\n\n\t\twc, err = c.Data()\n\t\tassert.Nil(err)\n\t\t_, err = fmt.Fprintf(wc, \"This is the email body2\")\n\t\tassert.Nil(err)\n\t\tassert.Nil(wc.Close())\n\n\t\tcalled := make(chan struct{})\n\t\ts.Handle(func(msg Message) {\n\t\t\tassert.Equal(\"sender2@example.org\", msg.Sender)\n\t\t\tassert.Equal(1, len(msg.Recipients))\n\t\t\tassert.Equal(\"recipient2@example.net\", msg.Recipients[0])\n\t\t\tassert.Equal(\"This is the email body2\\n\", msg.Data)\n\t\t\tclose(called)\n\t\t})\n\n\t\tcalled2 := make(chan struct{})\n\t\ts.Handle(func(msg Message) {\n\t\t\tassert.Equal(\"sender2@example.org\", msg.Sender)\n\t\t\tassert.Equal(1, len(msg.Recipients))\n\t\t\tassert.Equal(\"recipient2@example.net\", msg.Recipients[0])\n\t\t\tassert.Equal(\"This is the email body2\\n\", msg.Data)\n\t\t\tclose(called2)\n\t\t})\n\n\t\tassert.Nil(c.Quit())\n\n\t\tselect {\n\t\tcase <-called:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Log(\"timed out\")\n\t\t\tt.Fail()\n\t\t}\n\n\t\tselect {\n\t\tcase <-called2:\n\t\tcase <-time.After(time.Second):\n\t\t\tt.Log(\"timed out2\")\n\t\t\tt.Fail()\n\t\t}\n\t})\n}\n\nfunc TestVerify(t *testing.T) {\n\tassert := assert.New(t)\n\n\twith(t, func(s Server) {\n\t\tc, err := smtp.Dial(ADDR)\n\t\tassert.Nil(err)\n\n\t\tassert.Equal(c.Verify(\"sender@example.org\").Error(), \"502 Command not implemented%!(EXTRA []interface {}=[])\")\n\n\t\tassert.Nil(c.Quit())\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package cnmidori_test\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com\/northbright\/cnmidori\"\n\t\"github.com\/northbright\/pathhelper\"\n)\n\nconst (\n\tsettingsStr string = `\n{\n \"redis-servers\":[\n {\"name\":\"user\", \"addr\":\"localhost:6379\", \"password\":\"123456\"},\n {\"name\":\"data\", \"addr\":\"localhost:6380\", \"password\":\"123456\"}\n ]\n}\n\n `\n)\n\nfunc ExampleNewServer() {\n\tserverRoot, _ := pathhelper.GetCurrentExecDir()\n\n\tsettingsFile := path.Join(serverRoot, \"settings.json\")\n\tif err := ioutil.WriteFile(settingsFile, []byte(settingsStr), 0755); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ioutil.WriteFile() error: %v\\n\", err)\n\t\treturn\n\t}\n\n\tserver, err := cnmidori.NewServer(settingsFile)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"NewServer(%v) error: %v\\n\", settingsFile, err)\n\t\treturn\n\t}\n\tfmt.Fprintf(os.Stderr, \"NewServer() OK. server = %v\\n\", server)\n\t\/\/ Output:\n}\n<commit_msg>Remove blank line in JSON string.<commit_after>package cnmidori_test\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com\/northbright\/cnmidori\"\n\t\"github.com\/northbright\/pathhelper\"\n)\n\nconst (\n\tsettingsStr string = `\n{\n \"redis-servers\":[\n {\"name\":\"user\", \"addr\":\"localhost:6379\", \"password\":\"123456\"},\n {\"name\":\"data\", \"addr\":\"localhost:6380\", \"password\":\"123456\"}\n ]\n}`\n)\n\nfunc ExampleNewServer() {\n\tserverRoot, _ := pathhelper.GetCurrentExecDir()\n\n\tsettingsFile := path.Join(serverRoot, \"settings.json\")\n\tif err := ioutil.WriteFile(settingsFile, []byte(settingsStr), 0755); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ioutil.WriteFile() error: %v\\n\", err)\n\t\treturn\n\t}\n\n\tserver, err := cnmidori.NewServer(settingsFile)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"NewServer(%v) error: %v\\n\", settingsFile, err)\n\t\treturn\n\t}\n\tfmt.Fprintf(os.Stderr, \"NewServer() OK. server = %v\\n\", server)\n\t\/\/ Output:\n}\n<|endoftext|>"} {"text":"<commit_before>package lfs\n\nimport (\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/docker\/docker\/api\/types\/container\"\n\t\"github.com\/docker\/docker\/api\/types\/network\"\n\t\"github.com\/docker\/docker\/client\"\n\t\"github.com\/docker\/go-connections\/nat\"\n\tgit \"github.com\/libgit2\/git2go\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ git lfs track\n\/\/ Since the git-lfs devs discourage using git-lfs in go projects we're just\n\/\/ calling the git-lfs CLI.\nfunc Track(filename, repositoryLocation string) (string, error) {\n\n\tcmd := exec.Command(\"git-lfs\", \"track\", filename)\n\tcmd.Dir = repositoryLocation\n\tout, err := cmd.Output()\n\n\t\/\/ wait to ensure .gitattributes file is up to date.\n\t\/\/ a monument to all my sins.\n\ttime.Sleep(2 * time.Second)\n\n\toutput := string(out)\n\toutput = strings.TrimRight(output, \"\\n\")\n\n\treturn output, err\n}\n\n\/\/ Adds and commits data found in path\nfunc AddAndCommitData(path, msg string) (string, error) {\n\n\tchanges, repositoryLocation, err := AddData(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar commitId string\n\tif changes {\n\t\tcommitId, err = commit(repositoryLocation, msg)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn commitId, nil\n}\n\n\/\/ Add a file to the index\nfunc Add(filename string) error {\n\t\/\/ Open repository at path.\n\trepo, _, err := openContainingRepository(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Check if the file has been changed and commit it if it has.\n\tchanged, err := fileChanged(repo, filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif changed {\n\t\terr := addToIndex(repo, filename)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc openContainingRepository(filename string) (repo *git.Repository,\n\trepositoryLocation string, err error) {\n\n\t\/\/ Strip path from filename\n\tpath := filepath.Dir(filename)\n\tpath, err = filepath.Abs(path)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\t\/\/ Open repository at path.\n\treturn openRepository(path)\n\n}\n\n\/\/ Add and commit file\nfunc AddAndCommit(filename, msg string) (string, error) {\n\t\/\/ Open repository at path.\n\trepo, repositoryLocation, err := openContainingRepository(filename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Check if the file has been changed and commit it if it has.\n\tchanged, err := fileChanged(repo, filename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar commitId string\n\n\tif changed {\n\t\terr := addToIndex(repo, filename)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tcommitId, err = commit(repositoryLocation, msg)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t} else {\n\t\treturn \"\", errors.New(\"No changes. Nothing to commit\")\n\t}\n\n\treturn commitId, nil\n}\n\n\/\/ Add the data at the path to the index. Will return true if there's anything\n\/\/ to be committed.\nfunc AddData(path string) (changes bool, repositoryLocation string, err error) {\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn false, \"\", err\n\t}\n\tdefer os.Chdir(wd)\n\n\trepo, repositoryLocation, err := openRepository(path)\n\tif err != nil {\n\t\treturn false, \"\", err\n\t}\n\n\tos.Chdir(repositoryLocation)\n\n\tdataPath, err := filepath.Rel(repositoryLocation, path)\n\tif err != nil {\n\t\treturn false, \"\", err\n\t}\n\n\t\/\/ ensure git-lfs tracks all files recursively by adding ** pattern, see\n\t\/\/ git PATTERN FORMAT description for more details.\n\tdataPattern := \"\" + dataPath + \"\/**\"\n\n\tgitAttr := \".gitattributes\"\n\n\t\/\/ if pattern already exists don't rerun the track command\n\tb, err := ioutil.ReadFile(gitAttr)\n\tif err != nil {\n\t\tpe := err.(*os.PathError)\n\t\tif pe.Err.Error() != \"no such file or directory\" {\n\t\t\treturn false, \"\", err\n\t\t}\n\t}\n\n\tif !strings.Contains(string(b), dataPattern) {\n\t\toutput, err := Track(dataPattern, repositoryLocation)\n\t\tif err != nil {\n\t\t\treturn false, \"\", errors.Wrap(err, \"Could not track files using git-lfs: \"+output)\n\t\t}\n\t}\n\n\tchanged, err := fileChanged(repo, gitAttr)\n\tif err != nil {\n\t\treturn false, \"\", err\n\t}\n\n\tif changed {\n\t\terr := addToIndex(repo, gitAttr)\n\t\tif err != nil {\n\t\t\treturn false, \"\", err\n\t\t}\n\t\tchanges = changed\n\t}\n\n\tchanged = false\n\n\terr = filepath.Walk(dataPath, func(path string, info os.FileInfo, err error) error {\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tchangedFile, err := fileChanged(repo, path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ One file is changed we can return\n\t\tif changedFile {\n\t\t\tchanged = true\n\t\t\treturn nil\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn false, \"\", err\n\t}\n\tif changed {\n\t\toutput, err := add(dataPath, repositoryLocation)\n\t\tif err != nil {\n\t\t\treturn false, \"\", errors.Wrap(err, \"Could not add files \"+output)\n\t\t}\n\t}\n\tchanges = changed\n\treturn changes, repositoryLocation, nil\n\n}\n\n\/\/ Add a path to the index.\nfunc addToIndex(repo *git.Repository, path string) error {\n\n\tindex, err := repo.Index()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = index.AddByPath(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = index.WriteTree()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = index.Write()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn err\n}\n\n\/\/ Removes the last directory in a path and returns it.\nfunc popLastDirectory(path string) string {\n\n\t\/\/ split the path into a list of dirs \/a\/b\/c --> [a,b,c] then remove\n\t\/\/ the last one and create a new path --> \/a\/b\n\tlist := strings.Split(path, \"\/\")\n\tlist = list[0 : len(list)-1]\n\tpath = \"\/\" + filepath.Join(list...)\n\treturn path\n}\n\n\/\/ Returns true if file is new, modified or deleted.\nfunc fileChanged(repo *git.Repository, path string) (bool, error) {\n\tstatus, err := repo.StatusFile(path)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif status == git.StatusWtNew || status == git.StatusWtModified ||\n\t\tstatus == git.StatusWtDeleted {\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}\n\n\/\/ Commits staged changes.\nfunc commit(path, msg string) (string, error) {\n\n\trepo, err := git.OpenRepository(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tindex, err := repo.Index()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttreeId, err := index.WriteTree()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttree, err := repo.LookupTree(treeId)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = index.Write()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar sig = &git.Signature{\n\t\t\"walrus\",\n\t\t\"walrus@github.com\/fjukstad\/walrus\",\n\t\ttime.Now(),\n\t}\n\n\tvar commitId *git.Oid\n\n\tcurrentBranch, err := repo.Head()\n\tif err != nil {\n\t\tcommitId, err = repo.CreateCommit(\"HEAD\", sig, sig, msg, tree)\n\t} else {\n\t\tcurrentTip, err := repo.LookupCommit(currentBranch.Target())\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tcommitId, err = repo.CreateCommit(\"HEAD\", sig, sig, msg, tree, currentTip)\n\t}\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = index.Write()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn commitId.String(), nil\n}\n\n\/\/ Will try to open a git repository located at the given path. If it is not\n\/\/ found it will traverse the directory tree outwards until i either finds a\n\/\/ repository or hits the root. If no repository is found it will initialize one\n\/\/ in the current working directory.\nfunc openRepository(path string) (repo *git.Repository, repositoryPath string, err error) {\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\tfor {\n\t\trepo, err = git.OpenRepository(path)\n\t\tif err != nil {\n\t\t\tpath = popLastDirectory(path)\n\t\t\t\/\/ Root hit\n\t\t\tif path == \"\/\" {\n\t\t\t\tpath = wd\n\t\t\t\tlog.Println(\"Output directory is not in a git repository. Creating one in \" + path)\n\t\t\t\trepo, err = git.InitRepository(wd, false)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, \"\", errors.Wrap(err, \"Could not initialize git repository\")\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn repo, path, nil\n}\n\n\/\/ git add\n\/\/ To speed up dev time for the prototype, use the exec pkg not git2go package\n\/\/ to add files. Future versions will get rid of this hacky way of doing things\n\/\/ by creating the blobs, softlinks etc. but that's for later!\nfunc add(path, repositoryLocation string) (string, error) {\n\tcmd := exec.Command(\"git\", \"add\", path)\n\tcmd.Dir = repositoryLocation\n\tout, err := cmd.Output()\n\toutput := string(out)\n\toutput = strings.TrimRight(output, \"\\n\")\n\treturn output, err\n}\n\n\/\/ Starts a git-lfs server in a Docker container\nfunc StartServer(mountDir string) error {\n\n\tc, err := client.NewEnvClient()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Could not create Docker client\")\n\t}\n\n\timage := \"fjukstad\/lfs-server\"\n\t_, err = c.ImagePull(context.Background(), image,\n\t\ttypes.ImagePullOptions{})\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Could not pull iamge\")\n\t}\n\n\thostPath, err := filepath.Abs(mountDir)\n\tif err != nil {\n\t\treturn errors.Wrap(err,\n\t\t\t\"Could not create absolute git-lfs directory path\")\n\t}\n\n\tbind := hostPath + \":\/lfs\"\n\n\tps := make(nat.PortSet)\n\tps[\"9999\/tcp\"] = struct{}{}\n\n\tpm := make(nat.PortMap)\n\tpm[\"9999\/tcp\"] = []nat.PortBinding{nat.PortBinding{\"0.0.0.0\", \"9999\"}}\n\n\tresp, err := c.ContainerCreate(context.Background(),\n\t\t&container.Config{Image: image,\n\t\t\tExposedPorts: ps},\n\t\t&container.HostConfig{\n\t\t\tBinds: []string{bind},\n\t\t\tPortBindings: pm},\n\t\t&network.NetworkingConfig{},\n\t\t\"git-lfs-server\")\n\n\tif err != nil || resp.ID == \" \" {\n\t\treturn errors.Wrap(err, \"Could not create git-lfs server container\")\n\t}\n\n\tcontainerId := resp.ID\n\n\terr = c.ContainerStart(context.Background(), containerId,\n\t\ttypes.ContainerStartOptions{})\n\treturn err\n\n}\n\n\/\/ Get the head id of the repository found at hostpath\nfunc GetHead(hostpath string) (string, error) {\n\n\trepo, _, err := openRepository(hostpath)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Could not open repository\")\n\t}\n\n\tref, err := repo.Head()\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Could not get head\")\n\t}\n\n\thead := ref.Target()\n\n\treturn head.String(), nil\n}\n\nfunc PrintDiff(path, id string) (string, error) {\n\tpath, err := filepath.Abs(path)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Could not get absolute path of output directory\")\n\t}\n\n\trepo, _, err := openRepository(path)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Could not open repository\")\n\t}\n\n\tindex, err := repo.Index()\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Could not get head\")\n\t}\n\n\toid, err := git.NewOid(id)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Could not create oid for id \"+id)\n\t}\n\n\tcommit, err := repo.LookupCommit(oid)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Could not lookup commit id \"+id)\n\t}\n\n\toldTree, err := commit.Tree()\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Could not lookup tree\")\n\t}\n\n\tdiff, err := repo.DiffTreeToIndex(oldTree, index, &git.DiffOptions{})\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Could not diff tree to index\")\n\t}\n\n\tstats, err := diff.Stats()\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Could not get diff stats\")\n\t}\n\n\treturn stats.String(git.DiffStatsFull, 80)\n}\n\nfunc Reset(path, id string) error {\n\tpath, err := filepath.Abs(path)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Could not get absolute path of output directory\")\n\t}\n\n\trepo, _, err := openRepository(path)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Could not open repository\")\n\t}\n\n\toid, err := git.NewOid(id)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Could not create oid for id \"+id)\n\t}\n\n\tcommit, err := repo.LookupCommit(oid)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Could not lookup commit id \"+id)\n\t}\n\n\terr = repo.ResetToCommit(commit, git.ResetMixed, &git.CheckoutOpts{})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Could not reset to id \"+id)\n\t}\n\treturn nil\n\n}\n<commit_msg>rm newlines<commit_after>package lfs\n\nimport (\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/docker\/docker\/api\/types\/container\"\n\t\"github.com\/docker\/docker\/api\/types\/network\"\n\t\"github.com\/docker\/docker\/client\"\n\t\"github.com\/docker\/go-connections\/nat\"\n\tgit \"github.com\/libgit2\/git2go\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ git lfs track\n\/\/ Since the git-lfs devs discourage using git-lfs in go projects we're just\n\/\/ calling the git-lfs CLI.\nfunc Track(filename, repositoryLocation string) (string, error) {\n\n\tcmd := exec.Command(\"git-lfs\", \"track\", filename)\n\tcmd.Dir = repositoryLocation\n\tout, err := cmd.Output()\n\n\t\/\/ wait to ensure .gitattributes file is up to date.\n\t\/\/ a monument to all my sins.\n\ttime.Sleep(2 * time.Second)\n\n\toutput := string(out)\n\toutput = strings.TrimRight(output, \"\\n\")\n\n\treturn output, err\n}\n\n\/\/ Adds and commits data found in path\nfunc AddAndCommitData(path, msg string) (string, error) {\n\n\tchanges, repositoryLocation, err := AddData(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar commitId string\n\tif changes {\n\t\tcommitId, err = commit(repositoryLocation, msg)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn commitId, nil\n}\n\n\/\/ Add a file to the index\nfunc Add(filename string) error {\n\t\/\/ Open repository at path.\n\trepo, _, err := openContainingRepository(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Check if the file has been changed and commit it if it has.\n\tchanged, err := fileChanged(repo, filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif changed {\n\t\terr := addToIndex(repo, filename)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc openContainingRepository(filename string) (repo *git.Repository,\n\trepositoryLocation string, err error) {\n\n\t\/\/ Strip path from filename\n\tpath := filepath.Dir(filename)\n\tpath, err = filepath.Abs(path)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\t\/\/ Open repository at path.\n\treturn openRepository(path)\n}\n\n\/\/ Add and commit file\nfunc AddAndCommit(filename, msg string) (string, error) {\n\t\/\/ Open repository at path.\n\trepo, repositoryLocation, err := openContainingRepository(filename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Check if the file has been changed and commit it if it has.\n\tchanged, err := fileChanged(repo, filename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar commitId string\n\n\tif changed {\n\t\terr := addToIndex(repo, filename)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tcommitId, err = commit(repositoryLocation, msg)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t} else {\n\t\treturn \"\", errors.New(\"No changes. Nothing to commit\")\n\t}\n\n\treturn commitId, nil\n}\n\n\/\/ Add the data at the path to the index. Will return true if there's anything\n\/\/ to be committed.\nfunc AddData(path string) (changes bool, repositoryLocation string, err error) {\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn false, \"\", err\n\t}\n\tdefer os.Chdir(wd)\n\n\trepo, repositoryLocation, err := openRepository(path)\n\tif err != nil {\n\t\treturn false, \"\", err\n\t}\n\n\tos.Chdir(repositoryLocation)\n\n\tdataPath, err := filepath.Rel(repositoryLocation, path)\n\tif err != nil {\n\t\treturn false, \"\", err\n\t}\n\n\t\/\/ ensure git-lfs tracks all files recursively by adding ** pattern, see\n\t\/\/ git PATTERN FORMAT description for more details.\n\tdataPattern := \"\" + dataPath + \"\/**\"\n\n\tgitAttr := \".gitattributes\"\n\n\t\/\/ if pattern already exists don't rerun the track command\n\tb, err := ioutil.ReadFile(gitAttr)\n\tif err != nil {\n\t\tpe := err.(*os.PathError)\n\t\tif pe.Err.Error() != \"no such file or directory\" {\n\t\t\treturn false, \"\", err\n\t\t}\n\t}\n\n\tif !strings.Contains(string(b), dataPattern) {\n\t\toutput, err := Track(dataPattern, repositoryLocation)\n\t\tif err != nil {\n\t\t\treturn false, \"\", errors.Wrap(err, \"Could not track files using git-lfs: \"+output)\n\t\t}\n\t}\n\n\tchanged, err := fileChanged(repo, gitAttr)\n\tif err != nil {\n\t\treturn false, \"\", err\n\t}\n\n\tif changed {\n\t\terr := addToIndex(repo, gitAttr)\n\t\tif err != nil {\n\t\t\treturn false, \"\", err\n\t\t}\n\t\tchanges = changed\n\t}\n\n\tchanged = false\n\n\terr = filepath.Walk(dataPath, func(path string, info os.FileInfo, err error) error {\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tchangedFile, err := fileChanged(repo, path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ One file is changed we can return\n\t\tif changedFile {\n\t\t\tchanged = true\n\t\t\treturn nil\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn false, \"\", err\n\t}\n\tif changed {\n\t\toutput, err := add(dataPath, repositoryLocation)\n\t\tif err != nil {\n\t\t\treturn false, \"\", errors.Wrap(err, \"Could not add files \"+output)\n\t\t}\n\t}\n\tchanges = changed\n\n\treturn changes, repositoryLocation, nil\n}\n\n\/\/ Add a path to the index.\nfunc addToIndex(repo *git.Repository, path string) error {\n\n\tindex, err := repo.Index()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = index.AddByPath(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = index.WriteTree()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = index.Write()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn err\n}\n\n\/\/ Removes the last directory in a path and returns it.\nfunc popLastDirectory(path string) string {\n\n\t\/\/ split the path into a list of dirs \/a\/b\/c --> [a,b,c] then remove\n\t\/\/ the last one and create a new path --> \/a\/b\n\tlist := strings.Split(path, \"\/\")\n\tlist = list[0 : len(list)-1]\n\tpath = \"\/\" + filepath.Join(list...)\n\treturn path\n}\n\n\/\/ Returns true if file is new, modified or deleted.\nfunc fileChanged(repo *git.Repository, path string) (bool, error) {\n\tstatus, err := repo.StatusFile(path)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif status == git.StatusWtNew || status == git.StatusWtModified ||\n\t\tstatus == git.StatusWtDeleted {\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}\n\n\/\/ Commits staged changes.\nfunc commit(path, msg string) (string, error) {\n\n\trepo, err := git.OpenRepository(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tindex, err := repo.Index()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttreeId, err := index.WriteTree()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttree, err := repo.LookupTree(treeId)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = index.Write()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar sig = &git.Signature{\n\t\t\"walrus\",\n\t\t\"walrus@github.com\/fjukstad\/walrus\",\n\t\ttime.Now(),\n\t}\n\n\tvar commitId *git.Oid\n\n\tcurrentBranch, err := repo.Head()\n\tif err != nil {\n\t\tcommitId, err = repo.CreateCommit(\"HEAD\", sig, sig, msg, tree)\n\t} else {\n\t\tcurrentTip, err := repo.LookupCommit(currentBranch.Target())\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tcommitId, err = repo.CreateCommit(\"HEAD\", sig, sig, msg, tree, currentTip)\n\t}\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = index.Write()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn commitId.String(), nil\n}\n\n\/\/ Will try to open a git repository located at the given path. If it is not\n\/\/ found it will traverse the directory tree outwards until i either finds a\n\/\/ repository or hits the root. If no repository is found it will initialize one\n\/\/ in the current working directory.\nfunc openRepository(path string) (repo *git.Repository, repositoryPath string, err error) {\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\tfor {\n\t\trepo, err = git.OpenRepository(path)\n\t\tif err != nil {\n\t\t\tpath = popLastDirectory(path)\n\t\t\t\/\/ Root hit\n\t\t\tif path == \"\/\" {\n\t\t\t\tpath = wd\n\t\t\t\tlog.Println(\"Output directory is not in a git repository. Creating one in \" + path)\n\t\t\t\trepo, err = git.InitRepository(wd, false)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, \"\", errors.Wrap(err, \"Could not initialize git repository\")\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn repo, path, nil\n}\n\n\/\/ git add\n\/\/ To speed up dev time for the prototype, use the exec pkg not git2go package\n\/\/ to add files. Future versions will get rid of this hacky way of doing things\n\/\/ by creating the blobs, softlinks etc. but that's for later!\nfunc add(path, repositoryLocation string) (string, error) {\n\tcmd := exec.Command(\"git\", \"add\", path)\n\tcmd.Dir = repositoryLocation\n\tout, err := cmd.Output()\n\toutput := string(out)\n\toutput = strings.TrimRight(output, \"\\n\")\n\treturn output, err\n}\n\n\/\/ Starts a git-lfs server in a Docker container\nfunc StartServer(mountDir string) error {\n\n\tc, err := client.NewEnvClient()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Could not create Docker client\")\n\t}\n\n\timage := \"fjukstad\/lfs-server\"\n\t_, err = c.ImagePull(context.Background(), image,\n\t\ttypes.ImagePullOptions{})\n\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Could not pull iamge\")\n\t}\n\n\thostPath, err := filepath.Abs(mountDir)\n\tif err != nil {\n\t\treturn errors.Wrap(err,\n\t\t\t\"Could not create absolute git-lfs directory path\")\n\t}\n\n\tbind := hostPath + \":\/lfs\"\n\n\tps := make(nat.PortSet)\n\tps[\"9999\/tcp\"] = struct{}{}\n\n\tpm := make(nat.PortMap)\n\tpm[\"9999\/tcp\"] = []nat.PortBinding{nat.PortBinding{\"0.0.0.0\", \"9999\"}}\n\n\tresp, err := c.ContainerCreate(context.Background(),\n\t\t&container.Config{Image: image,\n\t\t\tExposedPorts: ps},\n\t\t&container.HostConfig{\n\t\t\tBinds: []string{bind},\n\t\t\tPortBindings: pm},\n\t\t&network.NetworkingConfig{},\n\t\t\"git-lfs-server\")\n\n\tif err != nil || resp.ID == \" \" {\n\t\treturn errors.Wrap(err, \"Could not create git-lfs server container\")\n\t}\n\n\tcontainerId := resp.ID\n\n\terr = c.ContainerStart(context.Background(), containerId,\n\t\ttypes.ContainerStartOptions{})\n\treturn err\n}\n\n\/\/ Get the head id of the repository found at hostpath\nfunc GetHead(hostpath string) (string, error) {\n\n\trepo, _, err := openRepository(hostpath)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Could not open repository\")\n\t}\n\n\tref, err := repo.Head()\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Could not get head\")\n\t}\n\n\thead := ref.Target()\n\n\treturn head.String(), nil\n}\n\nfunc PrintDiff(path, id string) (string, error) {\n\tpath, err := filepath.Abs(path)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Could not get absolute path of output directory\")\n\t}\n\n\trepo, _, err := openRepository(path)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Could not open repository\")\n\t}\n\n\tindex, err := repo.Index()\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Could not get head\")\n\t}\n\n\toid, err := git.NewOid(id)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Could not create oid for id \"+id)\n\t}\n\n\tcommit, err := repo.LookupCommit(oid)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Could not lookup commit id \"+id)\n\t}\n\n\toldTree, err := commit.Tree()\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Could not lookup tree\")\n\t}\n\n\tdiff, err := repo.DiffTreeToIndex(oldTree, index, &git.DiffOptions{})\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Could not diff tree to index\")\n\t}\n\n\tstats, err := diff.Stats()\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Could not get diff stats\")\n\t}\n\n\treturn stats.String(git.DiffStatsFull, 80)\n}\n\nfunc Reset(path, id string) error {\n\tpath, err := filepath.Abs(path)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Could not get absolute path of output directory\")\n\t}\n\n\trepo, _, err := openRepository(path)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Could not open repository\")\n\t}\n\n\toid, err := git.NewOid(id)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Could not create oid for id \"+id)\n\t}\n\n\tcommit, err := repo.LookupCommit(oid)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Could not lookup commit id \"+id)\n\t}\n\n\terr = repo.ResetToCommit(commit, git.ResetMixed, &git.CheckoutOpts{})\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Could not reset to id \"+id)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package keyboard\n\nimport (\n\t\"log\"\n\t\"os\"\n\n\t\"gobot.io\/x\/gobot\"\n)\n\nconst (\n\t\/\/ Key board event\n\tKey = \"key\"\n)\n\n\/\/ Driver is gobot software device to the keyboard\ntype Driver struct {\n\tname string\n\tconnect func(*Driver) (err error)\n\tlisten func(*Driver)\n\tstdin *os.File\n\tgobot.Eventer\n}\n\n\/\/ NewDriver returns a new keyboard Driver.\n\/\/\nfunc NewDriver() *Driver {\n\tk := &Driver{\n\t\tname: \"Keyboard\",\n\t\tconnect: func(k *Driver) (err error) {\n\t\t\tif err := configure(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tk.stdin = os.Stdin\n\t\t\treturn\n\t\t},\n\t\tlisten: func(k *Driver) {\n\t\t\tctrlc := bytes{3}\n\n\t\t\tfor {\n\t\t\t\tvar keybuf bytes\n\t\t\t\tk.stdin.Read(keybuf[0:3])\n\n\t\t\t\tif keybuf == ctrlc {\n\t\t\t\t\tproc, err := os.FindProcess(os.Getpid())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t\t}\n\n\t\t\t\t\tproc.Signal(os.Interrupt)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tk.Publish(Key, Parse(keybuf))\n\n\t\t\t}\n\t\t},\n\t\tEventer: gobot.NewEventer(),\n\t}\n\n\tk.AddEvent(Key)\n\n\treturn k\n}\n\n\/\/ Name returns the Driver Name\nfunc (k *Driver) Name() string { return k.name }\n\n\/\/ SetName sets the Driver Name\nfunc (k *Driver) SetName(n string) { k.name = n }\n\n\/\/ Connection returns the Driver Connection\nfunc (k *Driver) Connection() gobot.Connection { return nil }\n\n\/\/ Start initializes keyboard by grabbing key events as they come in and\n\/\/ publishing each as a key event\nfunc (k *Driver) Start() (err error) {\n\tif err = k.connect(k); err != nil {\n\t\treturn err\n\t}\n\n\tgo k.listen(k)\n\n\treturn\n}\n\n\/\/ Halt stops keyboard driver\nfunc (k *Driver) Halt() (err error) {\n\tif originalState != \"\" {\n\t\treturn restore()\n\t}\n\treturn\n}\n<commit_msg>keyboard: use new improved default namer to avoid API conflicts<commit_after>package keyboard\n\nimport (\n\t\"log\"\n\t\"os\"\n\n\t\"gobot.io\/x\/gobot\"\n)\n\nconst (\n\t\/\/ Key board event\n\tKey = \"key\"\n)\n\n\/\/ Driver is gobot software device to the keyboard\ntype Driver struct {\n\tname string\n\tconnect func(*Driver) (err error)\n\tlisten func(*Driver)\n\tstdin *os.File\n\tgobot.Eventer\n}\n\n\/\/ NewDriver returns a new keyboard Driver.\n\/\/\nfunc NewDriver() *Driver {\n\tk := &Driver{\n\t\tname: gobot.DefaultName(\"Keyboard\"),\n\t\tconnect: func(k *Driver) (err error) {\n\t\t\tif err := configure(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tk.stdin = os.Stdin\n\t\t\treturn\n\t\t},\n\t\tlisten: func(k *Driver) {\n\t\t\tctrlc := bytes{3}\n\n\t\t\tfor {\n\t\t\t\tvar keybuf bytes\n\t\t\t\tk.stdin.Read(keybuf[0:3])\n\n\t\t\t\tif keybuf == ctrlc {\n\t\t\t\t\tproc, err := os.FindProcess(os.Getpid())\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t\t}\n\n\t\t\t\t\tproc.Signal(os.Interrupt)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\tk.Publish(Key, Parse(keybuf))\n\n\t\t\t}\n\t\t},\n\t\tEventer: gobot.NewEventer(),\n\t}\n\n\tk.AddEvent(Key)\n\n\treturn k\n}\n\n\/\/ Name returns the Driver Name\nfunc (k *Driver) Name() string { return k.name }\n\n\/\/ SetName sets the Driver Name\nfunc (k *Driver) SetName(n string) { k.name = n }\n\n\/\/ Connection returns the Driver Connection\nfunc (k *Driver) Connection() gobot.Connection { return nil }\n\n\/\/ Start initializes keyboard by grabbing key events as they come in and\n\/\/ publishing each as a key event\nfunc (k *Driver) Start() (err error) {\n\tif err = k.connect(k); err != nil {\n\t\treturn err\n\t}\n\n\tgo k.listen(k)\n\n\treturn\n}\n\n\/\/ Halt stops keyboard driver\nfunc (k *Driver) Halt() (err error) {\n\tif originalState != \"\" {\n\t\treturn restore()\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"golang.org\/x\/net\/html\"\n\t\"net\/http\"\n)\n\n\/\/sequence:\n\/\/ Type: ElementNode\n\/\/ Data: h2\n\/\/ Attr.Key: id\n\/\/ Attr.Val: pkg-overview\n\nvar requiredNodePath *html.Node\n\nfunc init() {\n\n\t\/\/n := html.Node{}\n\trequiredNodePath.Type = html.ElementNode\n\trequiredNodePath.Data = \"h2\"\n\ta := html.Attribute{}\n\ta.Key = \"id\"\n\ta.Val = \"pkg-overview\"\n\trequiredNodePath.Attr = append(requiredNodePath.Attr, a)\n\n\t\/\/requiredNodePath = append(requiredNodePath, n)\n\n}\n\nfunc main() {\n\turl := `https:\/\/godoc.org\/golang.org\/x\/oauth2`\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tn, err := html.Parse(resp.Body)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tFindMatchingNode(n, requiredNodePath, isMatchingNode)\n\n}\n\nfunc isMatchingNode(n *html.Node, matchWith *html.Node) bool {\n\tif n.Type == matchWith.Type && n.Data == matchWith.Data {\n\t\tmatchCnt := 0\n\t\tfor _, a := range matchWith.Attr {\n\t\t\tfor _, w := range n.Attr {\n\t\t\t\tif a.Key == w.Key && a.Val == w.Val {\n\t\t\t\t\tmatchCnt++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif matchCnt != len(matchWith.Attr) {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n\n}\n\ntype WalkFunc func(*html.Node, *html.Node) bool\n\nfunc FindMatchingNode(root *html.Node, match *html.Node, walkF WalkFunc) {\n\tvar f func(*html.Node)\n\tf = func(n *html.Node) {\n\t\tif walkF(n, match) {\n\t\t\tfmt.Println(nodeTypeText(n.Type), n.Data, n.Attr)\n\t\t}\n\n\t\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\t\tf(c)\n\t\t}\n\t}\n\tf(root)\n}\n\nfunc nodeTypeText(nodeType html.NodeType) string {\n\tswitch nodeType {\n\tcase html.ErrorNode:\n\t\treturn \"ErrorNode\"\n\tcase html.TextNode:\n\t\treturn \"TextNode\"\n\tcase html.DocumentNode:\n\t\treturn \"DocumentNode\"\n\tcase html.ElementNode:\n\t\treturn \"ElementNode\"\n\tcase html.CommentNode:\n\t\treturn \"CommentNode\"\n\tcase html.DoctypeNode:\n\t\treturn \"DoctypeNode\"\n\t}\n\treturn \"\"\n\n}\n<commit_msg>html parsing get desired section related wip<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"golang.org\/x\/net\/html\"\n\t\"net\/http\"\n)\n\n\/\/sequence:\n\/\/ Type: ElementNode\n\/\/ Data: h2\n\/\/ Attr.Key: id\n\/\/ Attr.Val: pkg-overview\n\nvar requiredNodePath []*html.Node\n\nfunc init() {\n\t\/\/TODO: these additions are not reflecting\n\tn := html.Node{}\n\tn.Type = html.ElementNode\n\tn.Data = \"div\"\n\ta := html.Attribute{}\n\ta.Key = \"class\"\n\ta.Val = \"post-footer-line post-footer-line-1\"\n\tn.Attr = append(n.Attr, a)\n\trequiredNodePath = append(requiredNodePath, &n)\n\n\tn = html.Node{}\n\tn.Type = html.ElementNode\n\tn.Data = \"div\"\n\ta = html.Attribute{}\n\ta.Key = \"class\"\n\ta.Val = \"post-author vcard\"\n\tn.Attr = append(n.Attr, a)\n\trequiredNodePath = append(requiredNodePath, &n)\n\n\tn = html.Node{}\n\tn.Type = html.ElementNode\n\tn.Data = \"span\"\n\ta = html.Attribute{}\n\ta.Key = \"itemprop\"\n\ta.Val = \"name\"\n\tn.Attr = append(n.Attr, a)\n\trequiredNodePath = append(requiredNodePath, &n)\n\n\tn = html.Node{}\n\tn.Type = html.TextNode\n\trequiredNodePath = append(requiredNodePath, &n)\n}\n\nfunc main() {\n\t\/\/url := `https:\/\/godoc.org\/golang.org\/x\/oauth2`\n\t\/\/url := `https:\/\/socketloop.com\/tutorials\/golang-read-file`\n\turl := `http:\/\/goblog.qwest.io\/2017\/09\/protobuf-for-go-quick-reference.html`\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tn, err := html.Parse(resp.Body)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tFindMatchingNode(n, requiredNodePath, isMatchingNode)\n\n}\n\nfunc isMatchingNode(n *html.Node, matchWith *html.Node) bool {\n\tif n.Type == matchWith.Type && n.Data == matchWith.Data { \/\/ TODO: check only on non blank fields\n\t\tmatchCnt := 0\n\t\tfor _, a := range matchWith.Attr {\n\t\t\tfor _, w := range n.Attr {\n\t\t\t\tif a.Key == w.Key && a.Val == w.Val {\n\t\t\t\t\tmatchCnt++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif matchCnt != len(matchWith.Attr) {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}\n\ntype WalkFunc func(*html.Node, *html.Node) bool\n\nfunc FindMatchingNode(root *html.Node, match []*html.Node, walkF WalkFunc) {\n\tfor _, m:= range match {\n\t\tfmt.Println(nodeTypeText(m.Type), m.Data, m.Attr)\n\t\tvar f func(*html.Node)*html.Node\n\t\tf = func(n *html.Node) *html.Node {\n\t\t\t\/\/fmt.Println(nodeTypeText(n.Type), n.Data, n.Attr)\n\t\t\tif walkF(n, m) {\n\t\t\t\t\/\/fmt.Println(nodeTypeText(n.Type), n.Data, n.Attr)\n\t\t\t\treturn n\n\t\t\t}\n\n\t\t\tfor c := n.FirstChild; c != nil; c = c.NextSibling {\n\t\t\t\tf(c)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tf(root)\n\t}\n}\n\nfunc nodeTypeText(nodeType html.NodeType) string {\n\tswitch nodeType {\n\tcase html.ErrorNode:\n\t\treturn \"ErrorNode\"\n\tcase html.TextNode:\n\t\treturn \"TextNode\"\n\tcase html.DocumentNode:\n\t\treturn \"DocumentNode\"\n\tcase html.ElementNode:\n\t\treturn \"ElementNode\"\n\tcase html.CommentNode:\n\t\treturn \"CommentNode\"\n\tcase html.DoctypeNode:\n\t\treturn \"DoctypeNode\"\n\t}\n\treturn \"\"\n\n}\n<|endoftext|>"} {"text":"<commit_before>package ansibleGen\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n)\n\ntype testArgs struct {\n\tName string\n\tCustomRoles string\n\tGalaxyRoles string\n}\n\nvar testProjectArgs = testArgs{}\n\nfunc testProject() AnsibleProject {\n\ttestProjectArgs.Name = \"my_test_name\"\n\ttestProjectArgs.CustomRoles = \"crole1,crole2\"\n\ttestProjectArgs.GalaxyRoles = \"grole1,grole2,grole3\"\n\treturn *NewAnsibleProject(testProjectArgs.Name, testProjectArgs.CustomRoles, testProjectArgs.GalaxyRoles)\n}\n\nfunc Test_NewAnsibleProject(t *testing.T) {\n\n\tap := testProject()\n\tapType := reflect.TypeOf(ap).Kind()\n\tif apType != reflect.Struct {\n\t\tt.Errorf(\"NewAnsibleProject didn't return an struct\")\n\t}\n\n}\n\nfunc Test_ProjectHasAName(t *testing.T) {\n\tproject := testProject()\n\tif project.Name != testProjectArgs.Name {\n\t\tt.Errorf(\"Project has a name, wanted %s, got %s\", project.Name, testProjectArgs.Name)\n\t}\n}\n\nfunc Test_ProjectHasAnArrayOfRoles(t *testing.T) {\n\tproject := testProject()\n\twant := len(splitRoles(testProjectArgs.CustomRoles))\n\tif got := len(project.CustomRoles); got != want {\n\t\tt.Errorf(\"Project has custom roles, wanted %d, got %d\", want, got)\n\t}\n\tprojectCustomRoleType := reflect.TypeOf(project.CustomRoles)\n\troleType := reflect.TypeOf([]AnsibleRole{})\n\tif projectCustomRoleType != roleType {\n\t\tt.Errorf(\"Project has %d custom roles of type %s, expected of type %s\", want, roleType, projectCustomRoleType)\n\t}\n}\n\nfunc Test_ProjectHasGalaxyRoles(t *testing.T) {\n\tproject := testProject()\n\twant := len(splitRoles(testProjectArgs.GalaxyRoles))\n\tif got := len(project.GalaxyRoles); got != want {\n\t\tt.Errorf(\"Project has Galaxy roles, wanted %d, got %d\", want, got)\n\t}\n}\n\nfunc Test_SplitRoles(t *testing.T) {\n\ttype args struct {\n\t\troles string\n\t}\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twant int\n\t}{\n\t\t{\"Split two roles\", args{\"role1,role2\"}, 2},\n\t\t{\"Only one role\", args{\"role\"}, 1},\n\t\t{\"No role\", args{\"\"}, 0},\n\t\t{\"Three roles\", args{\"role1, role2, role3\"}, 3},\n\t}\n\tfor _, tt := range tests {\n\t\tif got := len(splitRoles(tt.args.roles)); got != tt.want {\n\t\t\tt.Errorf(\"%q. splitRoles() = %d, want %d\", tt.name, got, tt.want)\n\t\t}\n\t}\n}\n\nfunc Test_SplitCustomRoles(t *testing.T) {\n\ttype args struct {\n\t\troles string\n\t}\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twant int\n\t}{\n\t\t{\"Split two roles\", args{\"role1,role2\"}, 2},\n\t\t{\"Only one role\", args{\"role\"}, 1},\n\t\t{\"No role\", args{\"\"}, 0},\n\t\t{\"Three roles\", args{\"role1, role2, role3\"}, 3},\n\t}\n\tfor _, tt := range tests {\n\t\tif got := len(splitCustomRoles(tt.args.roles)); got != tt.want {\n\t\t\tt.Errorf(\"%q. splitCustomRoles() = %d, want %d\", tt.name, got, tt.want)\n\t\t}\n\t}\n}\n\nfunc Test_getProjectTreeTemplate(t *testing.T) {\n\tprojectName := \"a_project_name\"\n\ttree := getProjectTreeTemplate(projectName)\n\tif tree.Name != projectName {\n\t\tt.Errorf(\"The tree structure does not have the project name\")\n\t}\n\tif tree.Folders[0].Name != \"group_vars\" {\n\t\tt.Error(\"The tree structure is not correct\")\n\t}\n}\n\nfunc Test_ProjectHasATreeStructure(t *testing.T) {\n\tp := testProject()\n\tif len(p.TreeStructure.Folders) == 0 {\n\t\tt.Error(\"Tree structure for the project is empty\")\n\t}\n}\n\nfunc Test_ProjectAddsRoles(t *testing.T) {\n\tp := testProject()\n\trolesIndex := p.rolesFolderIndex(\"roles\")\n\tif len(p.TreeStructure.Folders[rolesIndex].Folders) != 2 {\n\t\tt.Error(\"Project does not have the roles in the tree structure\")\n\t}\n}\n<commit_msg>Better test correpondence<commit_after>package ansibleGen\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n)\n\ntype testArgs struct {\n\tName string\n\tCustomRoles string\n\tGalaxyRoles string\n}\n\nvar testProjectArgs = testArgs{}\n\nfunc testProject() AnsibleProject {\n\ttestProjectArgs.Name = \"my_test_name\"\n\ttestProjectArgs.CustomRoles = \"crole1,crole2\"\n\ttestProjectArgs.GalaxyRoles = \"grole1,grole2,grole3\"\n\treturn *NewAnsibleProject(testProjectArgs.Name, testProjectArgs.CustomRoles, testProjectArgs.GalaxyRoles)\n}\n\nfunc Test_NewAnsibleProject(t *testing.T) {\n\n\tap := testProject()\n\tapType := reflect.TypeOf(ap).Kind()\n\tif apType != reflect.Struct {\n\t\tt.Errorf(\"NewAnsibleProject didn't return an struct\")\n\t}\n\n}\n\nfunc Test_ProjectHasAName(t *testing.T) {\n\tproject := testProject()\n\tif project.Name != testProjectArgs.Name {\n\t\tt.Errorf(\"Project has a name, wanted %s, got %s\", project.Name, testProjectArgs.Name)\n\t}\n}\n\nfunc Test_ProjectHasAnArrayOfRoles(t *testing.T) {\n\tproject := testProject()\n\twant := len(splitRoles(testProjectArgs.CustomRoles))\n\tif got := len(project.CustomRoles); got != want {\n\t\tt.Errorf(\"Project has custom roles, wanted %d, got %d\", want, got)\n\t}\n\tprojectCustomRoleType := reflect.TypeOf(project.CustomRoles)\n\troleType := reflect.TypeOf([]AnsibleRole{})\n\tif projectCustomRoleType != roleType {\n\t\tt.Errorf(\"Project has %d custom roles of type %s, expected of type %s\", want, roleType, projectCustomRoleType)\n\t}\n}\n\nfunc Test_ProjectHasGalaxyRoles(t *testing.T) {\n\tproject := testProject()\n\twant := len(splitRoles(testProjectArgs.GalaxyRoles))\n\tif got := len(project.GalaxyRoles); got != want {\n\t\tt.Errorf(\"Project has Galaxy roles, wanted %d, got %d\", want, got)\n\t}\n}\n\nfunc Test_SplitRoles(t *testing.T) {\n\ttype args struct {\n\t\troles string\n\t}\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twant int\n\t}{\n\t\t{\"Split two roles\", args{\"role1,role2\"}, 2},\n\t\t{\"Only one role\", args{\"role\"}, 1},\n\t\t{\"No role\", args{\"\"}, 0},\n\t\t{\"Three roles\", args{\"role1, role2, role3\"}, 3},\n\t}\n\tfor _, tt := range tests {\n\t\tif got := len(splitRoles(tt.args.roles)); got != tt.want {\n\t\t\tt.Errorf(\"%q. splitRoles() = %d, want %d\", tt.name, got, tt.want)\n\t\t}\n\t}\n}\n\nfunc Test_SplitCustomRoles(t *testing.T) {\n\ttype args struct {\n\t\troles string\n\t}\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twant int\n\t}{\n\t\t{\"Split two roles\", args{\"role1,role2\"}, 2},\n\t\t{\"Only one role\", args{\"role\"}, 1},\n\t\t{\"No role\", args{\"\"}, 0},\n\t\t{\"Three roles\", args{\"role1, role2, role3\"}, 3},\n\t}\n\tfor _, tt := range tests {\n\t\tif got := len(splitCustomRoles(tt.args.roles)); got != tt.want {\n\t\t\tt.Errorf(\"%q. splitCustomRoles() = %d, want %d\", tt.name, got, tt.want)\n\t\t}\n\t}\n}\n\nfunc Test_getProjectTreeTemplate(t *testing.T) {\n\tprojectName := \"a_project_name\"\n\ttree := getProjectTreeTemplate(projectName)\n\tif tree.Name != projectName {\n\t\tt.Errorf(\"The tree structure does not have the project name\")\n\t}\n\tif tree.Folders[0].Name != \"group_vars\" {\n\t\tt.Error(\"The tree structure is not correct\")\n\t}\n}\n\nfunc Test_ProjectHasATreeStructure(t *testing.T) {\n\tp := testProject()\n\tif len(p.TreeStructure.Folders) == 0 {\n\t\tt.Error(\"Tree structure for the project is empty\")\n\t}\n}\n\nfunc Test_ProjectAddsRoles(t *testing.T) {\n\tp := testProject()\n\trolesIndex := p.rolesFolderIndex(\"roles\")\n\tif len(p.TreeStructure.Folders[rolesIndex].Folders) != len(splitRoles(testProjectArgs.CustomRoles)) {\n\t\tt.Error(\"Project does not have the roles in the tree structure\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package sqlpaxos\n\nimport \"net\"\nimport \"fmt\"\nimport \"net\/rpc\"\nimport \"log\"\nimport \"time\"\nimport \"paxos\"\nimport \"sync\"\nimport \"os\"\nimport \"syscall\"\nimport \"encoding\/gob\"\nimport \"math\/rand\"\nimport \"strconv\"\nimport \"math\"\nimport \"barista\"\nimport \"encoding\/json\"\nimport \"logger\"\nimport \"db\"\n\nconst Debug=0\n\nfunc DPrintf(format string, a ...interface{}) (n int, err error) {\n if Debug > 0 {\n log.Printf(format, a...)\n }\n return\n}\n\ntype LastSeen struct {\n RequestId int \n Reply interface{}\n}\n\ntype SQLPaxos struct {\n mu sync.Mutex\n l net.Listener\n me int\n dead bool \/\/ for testing\n unreliable bool \/\/ for testing\n px *paxos.Paxos\n\n \/\/ Your definitions here.\n ops map[int]Op \/\/ log of operations\n replies map[int]interface{} \/\/ the replies for this sequence number\n done map[int]bool \/\/ true if we can delete the data for this sequence number\n data map[string]string \/\/ the database\n lastSeen map[int64]LastSeen \/\/ the last request\/reply for this client\n connections map[int64]*db.DBManager \/\/ connections per client. Limited to a single connection per client\n next int \/\/ the next sequence number to be executed\n logger *logger.Logger \/\/ logger to write paxos log to file\n}\n\nfunc (sp *SQLPaxos) execute(op Op) interface{} {\n \n testing := false\n if testing {\n args := op.Args\n reply := ExecReply{}\n \n \/\/ @TODO remove this\n if op.NoOp {\n return reply\n }\n\n \/\/ @TODO remove get & put\n key := args.(ExecArgs).Key\n if args.(ExecArgs).Type == Put {\n \/\/ execute the put\n\n prevValue, ok := sp.data[key]\n if ok {\n reply.Value = prevValue\n } else {\n reply.Value = \"\"\n }\n\n if args.(ExecArgs).DoHash {\n sp.data[key] = strconv.Itoa(int(hash(reply.Value + args.(ExecArgs).Value)))\n } else {\n sp.data[key] = args.(ExecArgs).Value\n }\n\n reply.Err = OK\n\n } else if args.(ExecArgs).Type == Get {\n \/\/ execute the get\n\n value, ok := sp.data[key]\n if ok {\n reply.Value = value\n reply.Err = OK \n } else {\n reply.Value = \"\"\n reply.Err = ErrNoKey\n }\n } \n\n return reply\n\n } else {\n \/\/ not testing\n\n \/\/ write op to file\n err := sp.WriteToLog(op)\n if err != nil {\n \/\/ log something\n }\n\n switch {\n case op.Type == Open:\n return sp.OpenHelper(op.Args.(OpenArgs), op.SeqNum)\n case op.Type == Close:\n return sp.CloseHelper(op.Args.(CloseArgs), op.SeqNum)\n case op.Type == Execute:\n return sp.ExecuteHelper(op.Args.(ExecArgs), op.SeqNum)\n }\n }\n return nil\n}\n\nfunc (sp *SQLPaxos) WriteToLog(op Op) error {\n b, err := json.Marshal(op)\n if err != nil {\n return err\n }\n return sp.logger.WriteToLog(b)\n}\n\n\nfunc (sp *SQLPaxos) ExecuteHelper(args ExecArgs, seqnum int) ExecReply {\n rows, columns, err := sp.UpdateDatabase(args.ClientId, args.Query, args.QueryParams, seqnum)\n if err != OK {\n \/\/ log something\n return ExecReply{Err:err}\n }\n\n tuples := []*barista.Tuple{}\n for _, row := range rows {\n tuple := barista.Tuple{Cells: &row}\n tuples = append(tuples, &tuple)\n }\n \n result_set := new(barista.ResultSet)\n \/\/result_set.Con = con. @TODO: this will not be populating this\n result_set.Tuples = &tuples\n result_set.FieldNames = &columns\n return ExecReply{Result:result_set, Err:OK}\n}\n\nfunc (sp *SQLPaxos) OpenHelper(args OpenArgs, seqnum int) OpenReply {\n reply := OpenReply{}\n _, ok := sp.connections[args.ClientId]\n if ok {\n reply.Err = ConnAlreadyOpen\n } else {\n manager := new(db.DBManager)\n reply.Err = errorToErr(manager.OpenConnection(args.User, args.Password, args.Database))\n sp.connections[args.ClientId] = manager\n }\n _, _, err := sp.UpdateDatabase(args.ClientId, \"\", nil, seqnum)\n if err != OK {\n \/\/ log something\n }\n return reply\n}\n\nfunc errorToErr(error error) Err {\n if error != nil {\n return Err(error.Error())\n } else {\n return OK\n }\n}\n\nfunc (sp *SQLPaxos) CloseHelper(args CloseArgs, seqnum int) CloseReply {\n _, _, err := sp.UpdateDatabase(args.ClientId, \"\", nil, seqnum)\n reply := CloseReply{}\n _, ok := sp.connections[args.ClientId]\n if !ok {\n reply.Err = ConnAlreadyClosed\n } else {\n reply.Err = errorToErr(sp.connections[args.ClientId].CloseConnection())\n delete(sp.connections, args.ClientId) \/\/only delete on successful close?\n }\n if err != OK {\n \/\/ log something\n }\n return reply\n}\n\nfunc (sp *SQLPaxos) convertQueryParams(query_params [][]byte) []interface{} {\n params := make([]interface{}, len(query_params))\n for i, param := range query_params {\n params[i] = param\n }\n return params\n}\n\n\n\/\/ note that NoOps don't update the state table\nfunc (sp *SQLPaxos) UpdateDatabase(clientId int64, query string, query_params [][]byte, seqnum int) ([][][]byte, []string, Err) {\n tx, err := sp.connections[clientId].BeginTxn()\n \n rows := make([][][]byte, 0)\n columns := make([]string, 0)\n\n if err != nil || tx == nil {\n return rows, columns, errorToErr(err)\n }\n\n if query != \"\" {\n params := sp.convertQueryParams(query_params)\n rows, columns, err = sp.connections[clientId].QueryTxn(tx, query, params...)\n }\n\n update := \"UPDATE SQLPaxosLog SET lastSeqNum=\" + strconv.Itoa(seqnum) + \";\"\n _, errUpdate := sp.connections[clientId].ExecTxn(tx, update, nil)\n if errUpdate != nil {\n fmt.Println(\"Error updating SQLPaxosLog: \", errUpdate)\n }\n\n errEnd := sp.connections[clientId].EndTxn(tx)\n if errEnd != nil {\n fmt.Println(\"Error committing txn: \", errEnd)\n }\n\n return rows, columns, errorToErr(err)\n}\n\nfunc (sp *SQLPaxos) fillHoles(next int, seq int) interface{} {\n \n var reply interface{}\n\n \/\/ make sure there are no holes in the log before our operation\n for i := next; i <= seq; i++ {\n nwaits := 0\n for !sp.dead {\n\tif _, ok := sp.ops[i]; ok || sp.next > i {\n \t break\n }\n\n decided, v_i := sp.px.Status(i)\n if decided {\n \/\/ the operation in slot i has been decided\n sp.ops[i] = v_i.(Op)\n break\n } else {\n nwaits++\n sp.mu.Unlock()\n if nwaits == 5 || nwaits == 10 {\n \/\/ propose a no-op\n sp.px.Start(i, Op{NoOp: true})\n } else if nwaits > 10 {\n time.Sleep(100 * time.Millisecond)\n } else {\n time.Sleep(10 * time.Millisecond)\n }\n sp.mu.Lock()\n }\n }\n\n if i == sp.next {\n \/\/ the operation at slot i is next to be executed\n\tr, executed := sp.checkIfExecuted(sp.ops[i])\n if executed {\n \t sp.replies[i] = r\n\t} else {\n\t r := sp.execute(sp.ops[i])\n\t sp.replies[i] = r\n\t sp.lastSeen[getOpClientId(sp.ops[i])] = LastSeen{ RequestId: getOpRequestId(sp.ops[i]), Reply: r }\n\t}\n sp.next++\n }\n\n if i == seq {\n reply = sp.replies[i]\n }\n }\n\n return reply\n} \n\nfunc getOpClientId(op Op) int64 {\n switch {\n case op.Type == Open:\n return op.Args.(OpenArgs).ClientId;\n case op.Type == Close:\n return op.Args.(CloseArgs).ClientId;\n case op.Type == Execute:\n return op.Args.(ExecArgs).ClientId;\n }\n return -1;\n}\n\nfunc getOpRequestId(op Op) int {\n switch {\n case op.Type == Open:\n return op.Args.(OpenArgs).RequestId;\n case op.Type == Close:\n return op.Args.(CloseArgs).RequestId;\n case op.Type == Execute:\n return op.Args.(ExecArgs).RequestId;\n }\n return -1;\n}\n\n\/\/ @TODO: update to support multiple types of operations\nfunc (sp *SQLPaxos) checkIfExecuted(op Op) (interface{}, bool) {\n \/\/ need some casting here\n lastSeen, ok := sp.lastSeen[getOpClientId(op)]\n if ok {\n if lastSeen.RequestId == getOpRequestId(op) {\n return lastSeen.Reply, true\n } else if lastSeen.RequestId > getOpRequestId(op) {\n return nil, true \/\/ empty reply since this is an old request\n }\n }\n\n return nil, false\n}\n\nfunc (sp *SQLPaxos) reserveSlot(op Op) int {\n\n \/\/ propose this operation for slot seq\n seq := sp.px.Max() + 1\n v := op\n sp.px.Start(seq, v)\n\n nwaits := 0\n for !sp.dead {\n decided, v_a := sp.px.Status(seq)\n if decided && v_a != nil && getOpClientId(v_a.(Op)) == getOpClientId(v) && \n getOpRequestId(v_a.(Op)) == getOpRequestId(v) {\n \/\/ we successfully claimed this slot for our operation\n if _, ok := sp.ops[seq]; !ok {\n\t v.SeqNum = seq\n sp.ops[seq] = v\n }\n break\n } else if decided {\n \/\/ another proposer got this slot, so try to get our operation in a new slot\n seq = int(math.Max(float64(sp.px.Max() + 1), float64(seq + 1)))\n sp.px.Start(seq, v)\n nwaits = 0\n } else {\n nwaits++\n \tsp.mu.Unlock()\n if nwaits == 5 || nwaits == 10 {\n \/\/ re-propose our operation\n sp.px.Start(seq, v)\n \t} else if nwaits > 10 {\n time.Sleep(100 * time.Millisecond)\n } else {\n time.Sleep(10 * time.Millisecond)\n \t}\n \tsp.mu.Lock()\n }\n }\n v.SeqNum = seq \/\/ update sequence number\n return seq\n}\n\nfunc (sp *SQLPaxos) freeMemory(seq int) {\n\n sp.done[seq] = true\n minNotDone := seq + 1\n for i := seq; i >= 0; i-- {\n _, ok := sp.ops[i]\n if ok {\n if done, ok := sp.done[i]; ok && done || sp.ops[i].NoOp {\n delete(sp.ops, i)\n delete(sp.replies, i)\n delete(sp.done, i)\n } else {\n minNotDone = i\n }\n }\n }\n\n sp.px.Done(minNotDone - 1)\n}\n\n\/\/@Make it work for multiple types of arguments\nfunc (sp *SQLPaxos) commit(op Op) interface{} {\n\n sp.mu.Lock()\n defer sp.mu.Unlock()\n\n \/\/ first check if this request has already been executed\n reply, ok := sp.checkIfExecuted(op)\n if ok {\n return reply\n }\n\n \/\/ reserve a slot in the paxos log for this operation\n seq := sp.reserveSlot(op)\n\n next := sp.next\n if next > seq {\n \/\/ our operation has already been executed\n reply = sp.replies[seq]\n } else {\n \/\/ fill holes in the log and execute our operation\n reply = sp.fillHoles(next, seq)\n }\n\n \/\/ delete un-needed log entries to free up memory\n sp.freeMemory(seq)\n\n return reply\n}\n\nfunc (sp *SQLPaxos) ExecuteSQL(args *ExecArgs, reply *ExecReply) error {\n \/\/ execute this operation and store the response in r\n op := Op{Type:Execute, Args: *args}\n r := sp.commit(op)\n\n if r != nil {\n reply.Result = r.(ExecReply).Result\n reply.Value = r.(ExecReply).Value\n reply.Err = r.(ExecReply).Err\n }\n\n return nil\n}\n\n\/\/ open the connection to the database\nfunc (sp *SQLPaxos) Open(args *OpenArgs, reply *OpenReply) error {\n \/\/ execute this operation and store the response in r\n op := Op{Type:Open, Args: *args}\n r := sp.commit(op)\n\n if r != nil {\n reply.Err = r.(OpenReply).Err\n }\n\n return nil\n}\n\n\/\/ close the connection to the database\nfunc (sp *SQLPaxos) Close(args *CloseArgs, reply *CloseReply) error {\n \/\/ execute this operation and store the response in r\n op := Op{Type:Close, Args: *args}\n r := sp.commit(op)\n\n if r != nil {\n reply.Err = r.(CloseReply).Err\n }\n\n return nil\n}\n\n\/\/ tell the server to shut itself down.\nfunc (sp *SQLPaxos) kill() {\n sp.dead = true\n sp.l.Close()\n sp.px.Kill()\n}\n\n\/\/\n\/\/ servers[] contains the ports of the set of\n\/\/ servers that will cooperate via Paxos to\n\/\/ form the fault-tolerant key\/value service.\n\/\/ me is the index of the current server in servers[].\n\/\/\nfunc StartServer(servers []string, me int) *SQLPaxos {\n \/\/ call gob.Register on structures you want\n \/\/ Go's RPC library to marshall\/unmarshall.\n gob.Register(Op{})\n gob.Register(ExecArgs{})\n\n sp := new(SQLPaxos)\n sp.me = me\n\n \/\/ Your initialization code here.\n sp.ops = make(map[int]Op)\n sp.data = make(map[string]string)\n sp.replies = make(map[int]interface{})\n sp.done = make(map[int]bool)\n sp.lastSeen = make(map[int64]LastSeen)\n sp.next = 0\n sp.connections = make(map[int64]*db.DBManager)\n sp.logger = logger.Make(\"sqlpaxos_log.txt\")\n \n rpcs := rpc.NewServer()\n rpcs.Register(sp)\n\n sp.px = paxos.Make(servers, me, rpcs)\n\n \/\/os.Remove(servers[me]) \/\/ only needed for \"unix\"\n \/\/l, e := net.Listen(\"unix\", servers[me]);\n l, e := net.Listen(\"tcp\", servers[me]);\n if e != nil {\n log.Fatal(\"listen error: \", e);\n }\n sp.l = l\n\n\n \/\/ please do not change any of the following code,\n \/\/ or do anything to subvert it.\n\n go func() {\n for sp.dead == false {\n conn, err := sp.l.Accept()\n if err == nil && sp.dead == false {\n if sp.unreliable && (rand.Int63() % 1000) < 100 {\n \/\/ discard the request.\n conn.Close()\n } else if sp.unreliable && (rand.Int63() % 1000) < 200 {\n \/\/ process the request but force discard of reply.\n \/\/c1 := conn.(*net.UnixConn)\n\t c1 := conn.(*net.TCPConn)\n f, _ := c1.File()\n err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)\n if err != nil {\n fmt.Printf(\"shutdown: %v\\n\", err)\n }\n go rpcs.ServeConn(conn)\n } else {\n go rpcs.ServeConn(conn)\n }\n } else if err == nil {\n conn.Close()\n }\n if err != nil && sp.dead == false {\n fmt.Printf(\"SQLPaxos(%v) accept: %v\\n\", me, err.Error())\n\tsp.kill()\n }\n }\n }()\n\n return sp\n}\n\n<commit_msg>fixes<commit_after>package sqlpaxos\n\nimport \"net\"\nimport \"fmt\"\nimport \"net\/rpc\"\nimport \"log\"\nimport \"time\"\nimport \"paxos\"\nimport \"sync\"\nimport \"os\"\nimport \"syscall\"\nimport \"encoding\/gob\"\nimport \"math\/rand\"\nimport \"strconv\"\nimport \"math\"\nimport \"barista\"\nimport \"encoding\/json\"\nimport \"logger\"\nimport \"db\"\n\nconst Debug=0\n\nfunc DPrintf(format string, a ...interface{}) (n int, err error) {\n if Debug > 0 {\n log.Printf(format, a...)\n }\n return\n}\n\ntype LastSeen struct {\n RequestId int \n Reply interface{}\n}\n\ntype SQLPaxos struct {\n mu sync.Mutex\n l net.Listener\n me int\n dead bool \/\/ for testing\n unreliable bool \/\/ for testing\n px *paxos.Paxos\n\n \/\/ Your definitions here.\n ops map[int]Op \/\/ log of operations\n replies map[int]interface{} \/\/ the replies for this sequence number\n done map[int]bool \/\/ true if we can delete the data for this sequence number\n data map[string]string \/\/ the database\n lastSeen map[int64]LastSeen \/\/ the last request\/reply for this client\n connections map[int64]*db.DBManager \/\/ connections per client. Limited to a single connection per client\n next int \/\/ the next sequence number to be executed\n logger *logger.Logger \/\/ logger to write paxos log to file\n}\n\nfunc (sp *SQLPaxos) execute(op Op) interface{} {\n \n testing := false\n if testing {\n args := op.Args\n reply := ExecReply{}\n \n \/\/ @TODO remove this\n if op.NoOp {\n return reply\n }\n\n \/\/ @TODO remove get & put\n key := args.(ExecArgs).Key\n if args.(ExecArgs).Type == Put {\n \/\/ execute the put\n\n prevValue, ok := sp.data[key]\n if ok {\n reply.Value = prevValue\n } else {\n reply.Value = \"\"\n }\n\n if args.(ExecArgs).DoHash {\n sp.data[key] = strconv.Itoa(int(hash(reply.Value + args.(ExecArgs).Value)))\n } else {\n sp.data[key] = args.(ExecArgs).Value\n }\n\n reply.Err = OK\n\n } else if args.(ExecArgs).Type == Get {\n \/\/ execute the get\n\n value, ok := sp.data[key]\n if ok {\n reply.Value = value\n reply.Err = OK \n } else {\n reply.Value = \"\"\n reply.Err = ErrNoKey\n }\n } \n\n return reply\n\n } else {\n \/\/ not testing\n\n \/\/ write op to file\n err := sp.WriteToLog(op)\n if err != nil {\n \/\/ log something\n }\n\n switch {\n case op.Type == Open:\n return sp.OpenHelper(op.Args.(OpenArgs), op.SeqNum)\n case op.Type == Close:\n return sp.CloseHelper(op.Args.(CloseArgs), op.SeqNum)\n case op.Type == Execute:\n return sp.ExecuteHelper(op.Args.(ExecArgs), op.SeqNum)\n }\n }\n return nil\n}\n\nfunc (sp *SQLPaxos) WriteToLog(op Op) error {\n b, err := json.Marshal(op)\n if err != nil {\n return err\n }\n return sp.logger.WriteToLog(b)\n}\n\n\nfunc (sp *SQLPaxos) ExecuteHelper(args ExecArgs, seqnum int) ExecReply {\n rows, columns, err := sp.UpdateDatabase(args.ClientId, args.Query, args.QueryParams, seqnum)\n if err != OK {\n \/\/ log something\n return ExecReply{Err:err}\n }\n\n tuples := []*barista.Tuple{}\n for _, row := range rows {\n tuple := barista.Tuple{Cells: &row}\n tuples = append(tuples, &tuple)\n }\n \n result_set := new(barista.ResultSet)\n \/\/result_set.Con = con. @TODO: this will not be populating this\n result_set.Tuples = &tuples\n result_set.FieldNames = &columns\n return ExecReply{Result:result_set, Err:OK}\n}\n\nfunc (sp *SQLPaxos) OpenHelper(args OpenArgs, seqnum int) OpenReply {\n reply := OpenReply{}\n _, ok := sp.connections[args.ClientId]\n if ok {\n reply.Err = ConnAlreadyOpen\n } else {\n manager := new(db.DBManager)\n reply.Err = errorToErr(manager.OpenConnection(args.User, args.Password, args.Database))\n sp.connections[args.ClientId] = manager\n }\n _, _, err := sp.UpdateDatabase(args.ClientId, \"\", nil, seqnum)\n if err != OK {\n \/\/ log something\n }\n return reply\n}\n\nfunc errorToErr(error error) Err {\n if error != nil {\n return Err(error.Error())\n } else {\n return OK\n }\n}\n\nfunc (sp *SQLPaxos) CloseHelper(args CloseArgs, seqnum int) CloseReply {\n _, _, err := sp.UpdateDatabase(args.ClientId, \"\", nil, seqnum)\n reply := CloseReply{}\n _, ok := sp.connections[args.ClientId]\n if !ok {\n reply.Err = ConnAlreadyClosed\n } else {\n reply.Err = errorToErr(sp.connections[args.ClientId].CloseConnection())\n delete(sp.connections, args.ClientId) \/\/only delete on successful close?\n }\n if err != OK {\n \/\/ log something\n }\n return reply\n}\n\nfunc (sp *SQLPaxos) convertQueryParams(query_params [][]byte) []interface{} {\n params := make([]interface{}, len(query_params))\n for i, param := range query_params {\n params[i] = param\n }\n return params\n}\n\n\n\/\/ note that NoOps don't update the state table\nfunc (sp *SQLPaxos) UpdateDatabase(clientId int64, query string, query_params [][]byte, seqnum int) ([][][]byte, []string, Err) {\n tx, err := sp.connections[clientId].BeginTxn()\n \n rows := make([][][]byte, 0)\n columns := make([]string, 0)\n\n if err != nil || tx == nil {\n return rows, columns, errorToErr(err)\n }\n\n if query != \"\" {\n params := sp.convertQueryParams(query_params)\n rows, columns, err = sp.connections[clientId].QueryTxn(tx, query, params...)\n }\n\n update := \"UPDATE SQLPaxosLog SET lastSeqNum=\" + strconv.Itoa(seqnum) + \";\"\n _, errUpdate := sp.connections[clientId].ExecTxn(tx, update, nil)\n if errUpdate != nil {\n fmt.Println(\"Error updating SQLPaxosLog: \", errUpdate)\n }\n\n errEnd := sp.connections[clientId].EndTxn(tx)\n if errEnd != nil {\n fmt.Println(\"Error committing txn: \", errEnd)\n }\n\n return rows, columns, errorToErr(err)\n}\n\nfunc (sp *SQLPaxos) fillHoles(next int, seq int) interface{} {\n \n var reply interface{}\n\n \/\/ make sure there are no holes in the log before our operation\n for i := next; i <= seq; i++ {\n nwaits := 0\n for !sp.dead {\n\tif _, ok := sp.ops[i]; ok || sp.next > i {\n \t break\n }\n\n decided, v_i := sp.px.Status(i)\n if decided {\n \/\/ the operation in slot i has been decided\n sp.ops[i] = v_i.(Op)\n break\n } else {\n nwaits++\n sp.mu.Unlock()\n if nwaits == 5 || nwaits == 10 {\n \/\/ propose a no-op\n sp.px.Start(i, Op{NoOp: true})\n } else if nwaits > 10 {\n time.Sleep(100 * time.Millisecond)\n } else {\n time.Sleep(10 * time.Millisecond)\n }\n sp.mu.Lock()\n }\n }\n\n if i == sp.next {\n \/\/ the operation at slot i is next to be executed\n\tr, executed := sp.checkIfExecuted(sp.ops[i])\n if executed {\n \t sp.replies[i] = r\n\t} else {\n\t r := sp.execute(sp.ops[i])\n\t sp.replies[i] = r\n\t sp.lastSeen[getOpClientId(sp.ops[i])] = LastSeen{ RequestId: getOpRequestId(sp.ops[i]), Reply: r }\n\t}\n sp.next++\n }\n\n if i == seq {\n reply = sp.replies[i]\n }\n }\n\n return reply\n} \n\nfunc getOpClientId(op Op) int64 {\n switch {\n case op.Type == Open:\n return op.Args.(OpenArgs).ClientId;\n case op.Type == Close:\n return op.Args.(CloseArgs).ClientId;\n case op.Type == Execute:\n return op.Args.(ExecArgs).ClientId;\n }\n return -1;\n}\n\nfunc getOpRequestId(op Op) int {\n switch {\n case op.Type == Open:\n return op.Args.(OpenArgs).RequestId;\n case op.Type == Close:\n return op.Args.(CloseArgs).RequestId;\n case op.Type == Execute:\n return op.Args.(ExecArgs).RequestId;\n }\n return -1;\n}\n\n\/\/ @TODO: update to support multiple types of operations\nfunc (sp *SQLPaxos) checkIfExecuted(op Op) (interface{}, bool) {\n \/\/ need some casting here\n lastSeen, ok := sp.lastSeen[getOpClientId(op)]\n if ok {\n if lastSeen.RequestId == getOpRequestId(op) {\n return lastSeen.Reply, true\n } else if lastSeen.RequestId > getOpRequestId(op) {\n return nil, true \/\/ empty reply since this is an old request\n }\n }\n\n return nil, false\n}\n\nfunc (sp *SQLPaxos) reserveSlot(op Op) int {\n\n \/\/ propose this operation for slot seq\n seq := sp.px.Max() + 1\n v := op\n sp.px.Start(seq, v)\n\n nwaits := 0\n for !sp.dead {\n decided, v_a := sp.px.Status(seq)\n if decided && v_a != nil && getOpClientId(v_a.(Op)) == getOpClientId(v) && \n getOpRequestId(v_a.(Op)) == getOpRequestId(v) {\n \/\/ we successfully claimed this slot for our operation\n if _, ok := sp.ops[seq]; !ok {\n\t v.SeqNum = seq\n sp.ops[seq] = v\n }\n break\n } else if decided {\n \/\/ another proposer got this slot, so try to get our operation in a new slot\n seq = int(math.Max(float64(sp.px.Max() + 1), float64(seq + 1)))\n sp.px.Start(seq, v)\n nwaits = 0\n } else {\n nwaits++\n \tsp.mu.Unlock()\n if nwaits == 5 || nwaits == 10 {\n \/\/ re-propose our operation\n sp.px.Start(seq, v)\n \t} else if nwaits > 10 {\n time.Sleep(100 * time.Millisecond)\n } else {\n time.Sleep(10 * time.Millisecond)\n \t}\n \tsp.mu.Lock()\n }\n }\n v.SeqNum = seq \/\/ update sequence number\n return seq\n}\n\nfunc (sp *SQLPaxos) freeMemory(seq int) {\n\n sp.done[seq] = true\n minNotDone := seq + 1\n for i := seq; i >= 0; i-- {\n _, ok := sp.ops[i]\n if ok {\n if done, ok := sp.done[i]; ok && done || sp.ops[i].NoOp {\n delete(sp.ops, i)\n delete(sp.replies, i)\n delete(sp.done, i)\n } else {\n minNotDone = i\n }\n }\n }\n\n sp.px.Done(minNotDone - 1)\n}\n\n\/\/@Make it work for multiple types of arguments\nfunc (sp *SQLPaxos) commit(op Op) interface{} {\n\n sp.mu.Lock()\n defer sp.mu.Unlock()\n\n \/\/ first check if this request has already been executed\n reply, ok := sp.checkIfExecuted(op)\n if ok {\n return reply\n }\n\n \/\/ reserve a slot in the paxos log for this operation\n seq := sp.reserveSlot(op)\n\n next := sp.next\n if next > seq {\n \/\/ our operation has already been executed\n reply = sp.replies[seq]\n } else {\n \/\/ fill holes in the log and execute our operation\n reply = sp.fillHoles(next, seq)\n }\n\n \/\/ delete un-needed log entries to free up memory\n sp.freeMemory(seq)\n\n return reply\n}\n\nfunc (sp *SQLPaxos) ExecuteSQL(args *ExecArgs, reply *ExecReply) error {\n \/\/ execute this operation and store the response in r\n op := Op{Type:Execute, Args: *args}\n r := sp.commit(op)\n\n if r != nil {\n reply.Result = r.(ExecReply).Result\n reply.Value = r.(ExecReply).Value\n reply.Err = r.(ExecReply).Err\n }\n\n return nil\n}\n\n\/\/ open the connection to the database\nfunc (sp *SQLPaxos) Open(args *OpenArgs, reply *OpenReply) error {\n \/\/ execute this operation and store the response in r\n op := Op{Type:Open, Args: *args}\n r := sp.commit(op)\n\n if r != nil {\n reply.Err = r.(OpenReply).Err\n }\n\n return nil\n}\n\n\/\/ close the connection to the database\nfunc (sp *SQLPaxos) Close(args *CloseArgs, reply *CloseReply) error {\n \/\/ execute this operation and store the response in r\n op := Op{Type:Close, Args: *args}\n r := sp.commit(op)\n\n if r != nil {\n reply.Err = r.(CloseReply).Err\n }\n\n return nil\n}\n\n\/\/ tell the server to shut itself down.\nfunc (sp *SQLPaxos) kill() {\n sp.dead = true\n sp.l.Close()\n sp.px.Kill()\n}\n\n\/\/\n\/\/ servers[] contains the ports of the set of\n\/\/ servers that will cooperate via Paxos to\n\/\/ form the fault-tolerant key\/value service.\n\/\/ me is the index of the current server in servers[].\n\/\/\nfunc StartServer(servers []string, me int) *SQLPaxos {\n \/\/ call gob.Register on structures you want\n \/\/ Go's RPC library to marshall\/unmarshall.\n gob.Register(Op{})\n gob.Register(ExecArgs{})\n\n sp := new(SQLPaxos)\n sp.me = me\n\n \/\/ Your initialization code here.\n sp.ops = make(map[int]Op)\n sp.data = make(map[string]string)\n sp.replies = make(map[int]interface{})\n sp.done = make(map[int]bool)\n sp.lastSeen = make(map[int64]LastSeen)\n sp.next = 0\n sp.connections = make(map[int64]*db.DBManager)\n sp.logger = logger.Make(\"sqlpaxos_log.txt\")\n \n rpcs := rpc.NewServer()\n rpcs.Register(sp)\n\n sp.px = paxos.Make(servers, me, rpcs)\n\n os.Remove(servers[me]) \/\/ only needed for \"unix\"\n l, e := net.Listen(\"unix\", servers[me]);\n \/\/l, e := net.Listen(\"tcp\", servers[me]);\n if e != nil {\n log.Fatal(\"listen error: \", e);\n }\n sp.l = l\n\n\n \/\/ please do not change any of the following code,\n \/\/ or do anything to subvert it.\n\n go func() {\n for sp.dead == false {\n conn, err := sp.l.Accept()\n if err == nil && sp.dead == false {\n if sp.unreliable && (rand.Int63() % 1000) < 100 {\n \/\/ discard the request.\n conn.Close()\n } else if sp.unreliable && (rand.Int63() % 1000) < 200 {\n \/\/ process the request but force discard of reply.\n c1 := conn.(*net.UnixConn)\n\t \/\/c1 := conn.(*net.TCPConn)\n f, _ := c1.File()\n err := syscall.Shutdown(int(f.Fd()), syscall.SHUT_WR)\n if err != nil {\n fmt.Printf(\"shutdown: %v\\n\", err)\n }\n go rpcs.ServeConn(conn)\n } else {\n go rpcs.ServeConn(conn)\n }\n } else if err == nil {\n conn.Close()\n }\n if err != nil && sp.dead == false {\n fmt.Printf(\"SQLPaxos(%v) accept: %v\\n\", me, err.Error())\n\tsp.kill()\n }\n }\n }()\n\n return sp\n}\n\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n)\n\nconst VERSION = \"v0.0.2\"\n\nvar logger = log.New(os.Stderr, \"\", 0)\n\ntype OriginConfig struct {\n\tAddr string\n\tDirect bool\n}\n\nfunc main() {\n\tport := flag.Int(\"p\", 0, \"\")\n\tip := flag.String(\"i\", \"127.0.0.1\", \"\")\n\toriginPort := flag.Int(\"P\", 0, \"\")\n\toriginHost := flag.String(\"H\", \"\", \"\")\n\toriginDirect := flag.Bool(\"D\", false, \"\")\n\tcertPath := flag.String(\"c\", \"\", \"\")\n\tkeyPath := flag.String(\"k\", \"\", \"\")\n\tlogFormat := flag.String(\"l\", \"default\", \"\")\n\tversion := flag.Bool(\"version\", false, \"\")\n\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s [OPTIONS]\\n\\n\", os.Args[0])\n\t\tfmt.Println(\"Options:\")\n\t\tfmt.Println(\" -p: Port (Default: 443)\")\n\t\tfmt.Println(\" -i: IP Address (Default: 127.0.0.1)\")\n\t\tfmt.Println(\" -P: Origin port\")\n\t\tfmt.Println(\" -H: Origin host\")\n\t\tfmt.Println(\" -D: Use HTTP\/2 direct mode to connect origin\")\n\t\tfmt.Println(\" -c: Certificate file\")\n\t\tfmt.Println(\" -k: Certificate key file\")\n\t\tfmt.Println(\" -l: Log format (default or json, Default: default)\")\n\t\tfmt.Println(\" --version: Display version information and exit.\")\n\t\tfmt.Println(\" --help: Display this help and exit.\")\n\t\tos.Exit(1)\n\t}\n\n\tflag.Parse()\n\n\tif *version {\n\t\tlogger.Printf(\"h2analyzer %s\\n\", VERSION)\n\t\tos.Exit(0)\n\t}\n\n\tif *port == 0 {\n\t\t*port = 443\n\t}\n\taddr := fmt.Sprintf(\"%s:%d\", *ip, *port)\n\n\tif *originPort == 0 {\n\t\tlogger.Fatalln(\"Origin port is not specified\")\n\t}\n\tif *originHost == \"\" {\n\t\tlogger.Fatalln(\"Origin host is not specified\")\n\t}\n\toriginConfig := OriginConfig{\n\t\tAddr: fmt.Sprintf(\"%s:%d\", *originHost, *originPort),\n\t\tDirect: *originDirect,\n\t}\n\n\tvar formatter Formatter\n\tif *logFormat == \"json\" {\n\t\tformatter = JSONFormatter\n\t} else {\n\t\tformatter = DefaultFormatter\n\t}\n\n\tcert, err := tls.LoadX509KeyPair(*certPath, *keyPath)\n\tif err != nil {\n\t\tlogger.Fatalln(\"Invalid certificate file\")\n\t}\n\n\tconfig := &tls.Config{}\n\tconfig.Certificates = []tls.Certificate{cert}\n\tconfig.NextProtos = append(config.NextProtos, \"h2\", \"h2-16\", \"h2-15\", \"h2-14\")\n\n\tserver, err := tls.Listen(\"tcp\", addr, config)\n\tif err != nil {\n\t\tlogger.Fatalf(\"Could not bind address - %s\\n\", addr)\n\t}\n\n\tdefer server.Close()\n\n\tfor {\n\t\tremoteConn, err := server.Accept()\n\t\tif err != nil {\n\t\t\tlogger.Printf(\"Unable to accept: %s\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tgo handlePeer(remoteConn, originConfig, formatter)\n\t}\n}\n\nfunc handlePeer(remoteConn net.Conn, originConfig OriginConfig, formatter Formatter) {\n\tvar originConn net.Conn\n\tvar err error\n\n\tdefer remoteConn.Close()\n\n\tdumper := NewFrameDumper(remoteConn.RemoteAddr(), formatter)\n\tdefer dumper.Close()\n\n\tremoteCh, remoteErrCh := handleConnection(remoteConn)\n\n\tselect {\n\tcase chunk := <-remoteCh:\n\t\tconnState := remoteConn.(*tls.Conn).ConnectionState()\n\t\tdumper.DumpConnectionState(connState)\n\n\t\tconfig := &tls.Config{}\n\t\tconfig.NextProtos = append(config.NextProtos, connState.NegotiatedProtocol)\n\t\tconfig.CipherSuites = []uint16{connState.CipherSuite}\n\t\tconfig.ServerName = connState.ServerName\n\t\tconfig.InsecureSkipVerify = true\n\n\t\tif originConfig.Direct {\n\t\t\toriginConn, err = net.Dial(\"tcp\", originConfig.Addr)\n\t\t} else {\n\t\t\tdialer := new(net.Dialer)\n\t\t\toriginConn, err = tls.DialWithDialer(dialer, \"tcp\", originConfig.Addr, config)\n\t\t}\n\t\tif err != nil {\n\t\t\tlogger.Printf(\"Unable to connect to the origin: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tdefer originConn.Close()\n\n\t\t_, err = originConn.Write(chunk)\n\t\tif err != nil {\n\t\t\tlogger.Printf(\"Unable to write data to the origin: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tdumper.DumpFrame(chunk, true)\n\n\tcase err := <-remoteErrCh:\n\t\tif err != io.EOF {\n\t\t\tlogger.Printf(\"Connection error: %s\", err)\n\t\t}\n\t\treturn\n\t}\n\n\toriginCh, originErrCh := handleConnection(originConn)\n\n\tfor {\n\t\tselect {\n\t\tcase chunk := <-remoteCh:\n\t\t\t_, err := originConn.Write(chunk)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Printf(\"Unable to write data to the origin: %s\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdumper.DumpFrame(chunk, true)\n\n\t\tcase err := <-remoteErrCh:\n\t\t\tif err != io.EOF {\n\t\t\t\tlogger.Printf(\"Connection error: %s\", err)\n\t\t\t}\n\t\t\treturn\n\n\t\tcase chunk := <-originCh:\n\t\t\t_, err := remoteConn.Write(chunk)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Printf(\"Unable to write data to the connection: %s\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdumper.DumpFrame(chunk, false)\n\n\t\tcase err := <-originErrCh:\n\t\t\tif err != io.EOF {\n\t\t\t\tlogger.Printf(\"Origin error: %s\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc handleConnection(conn net.Conn) (<-chan []byte, <-chan error) {\n\tdataCh := make(chan []byte)\n\terrCh := make(chan error, 1)\n\n\tgo func() {\n\t\tfor {\n\t\t\tbuf := make([]byte, 16384)\n\n\t\t\tn, err := conn.Read(buf)\n\t\t\tif err != nil {\n\t\t\t\terrCh <- err\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tdataCh <- buf[:n]\n\t\t}\n\t}()\n\n\treturn dataCh, errCh\n}\n<commit_msg>Fix command name<commit_after>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n)\n\nconst VERSION = \"v0.0.2\"\n\nvar logger = log.New(os.Stderr, \"\", 0)\n\ntype OriginConfig struct {\n\tAddr string\n\tDirect bool\n}\n\nfunc main() {\n\tport := flag.Int(\"p\", 0, \"\")\n\tip := flag.String(\"i\", \"127.0.0.1\", \"\")\n\toriginPort := flag.Int(\"P\", 0, \"\")\n\toriginHost := flag.String(\"H\", \"\", \"\")\n\toriginDirect := flag.Bool(\"D\", false, \"\")\n\tcertPath := flag.String(\"c\", \"\", \"\")\n\tkeyPath := flag.String(\"k\", \"\", \"\")\n\tlogFormat := flag.String(\"l\", \"default\", \"\")\n\tversion := flag.Bool(\"version\", false, \"\")\n\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s [OPTIONS]\\n\\n\", os.Args[0])\n\t\tfmt.Println(\"Options:\")\n\t\tfmt.Println(\" -p: Port (Default: 443)\")\n\t\tfmt.Println(\" -i: IP Address (Default: 127.0.0.1)\")\n\t\tfmt.Println(\" -P: Origin port\")\n\t\tfmt.Println(\" -H: Origin host\")\n\t\tfmt.Println(\" -D: Use HTTP\/2 direct mode to connect origin\")\n\t\tfmt.Println(\" -c: Certificate file\")\n\t\tfmt.Println(\" -k: Certificate key file\")\n\t\tfmt.Println(\" -l: Log format (default or json, Default: default)\")\n\t\tfmt.Println(\" --version: Display version information and exit.\")\n\t\tfmt.Println(\" --help: Display this help and exit.\")\n\t\tos.Exit(1)\n\t}\n\n\tflag.Parse()\n\n\tif *version {\n\t\tlogger.Printf(\"h2a %s\\n\", VERSION)\n\t\tos.Exit(0)\n\t}\n\n\tif *port == 0 {\n\t\t*port = 443\n\t}\n\taddr := fmt.Sprintf(\"%s:%d\", *ip, *port)\n\n\tif *originPort == 0 {\n\t\tlogger.Fatalln(\"Origin port is not specified\")\n\t}\n\tif *originHost == \"\" {\n\t\tlogger.Fatalln(\"Origin host is not specified\")\n\t}\n\toriginConfig := OriginConfig{\n\t\tAddr: fmt.Sprintf(\"%s:%d\", *originHost, *originPort),\n\t\tDirect: *originDirect,\n\t}\n\n\tvar formatter Formatter\n\tif *logFormat == \"json\" {\n\t\tformatter = JSONFormatter\n\t} else {\n\t\tformatter = DefaultFormatter\n\t}\n\n\tcert, err := tls.LoadX509KeyPair(*certPath, *keyPath)\n\tif err != nil {\n\t\tlogger.Fatalln(\"Invalid certificate file\")\n\t}\n\n\tconfig := &tls.Config{}\n\tconfig.Certificates = []tls.Certificate{cert}\n\tconfig.NextProtos = append(config.NextProtos, \"h2\", \"h2-16\", \"h2-15\", \"h2-14\")\n\n\tserver, err := tls.Listen(\"tcp\", addr, config)\n\tif err != nil {\n\t\tlogger.Fatalf(\"Could not bind address - %s\\n\", addr)\n\t}\n\n\tdefer server.Close()\n\n\tfor {\n\t\tremoteConn, err := server.Accept()\n\t\tif err != nil {\n\t\t\tlogger.Printf(\"Unable to accept: %s\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tgo handlePeer(remoteConn, originConfig, formatter)\n\t}\n}\n\nfunc handlePeer(remoteConn net.Conn, originConfig OriginConfig, formatter Formatter) {\n\tvar originConn net.Conn\n\tvar err error\n\n\tdefer remoteConn.Close()\n\n\tdumper := NewFrameDumper(remoteConn.RemoteAddr(), formatter)\n\tdefer dumper.Close()\n\n\tremoteCh, remoteErrCh := handleConnection(remoteConn)\n\n\tselect {\n\tcase chunk := <-remoteCh:\n\t\tconnState := remoteConn.(*tls.Conn).ConnectionState()\n\t\tdumper.DumpConnectionState(connState)\n\n\t\tconfig := &tls.Config{}\n\t\tconfig.NextProtos = append(config.NextProtos, connState.NegotiatedProtocol)\n\t\tconfig.CipherSuites = []uint16{connState.CipherSuite}\n\t\tconfig.ServerName = connState.ServerName\n\t\tconfig.InsecureSkipVerify = true\n\n\t\tif originConfig.Direct {\n\t\t\toriginConn, err = net.Dial(\"tcp\", originConfig.Addr)\n\t\t} else {\n\t\t\tdialer := new(net.Dialer)\n\t\t\toriginConn, err = tls.DialWithDialer(dialer, \"tcp\", originConfig.Addr, config)\n\t\t}\n\t\tif err != nil {\n\t\t\tlogger.Printf(\"Unable to connect to the origin: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tdefer originConn.Close()\n\n\t\t_, err = originConn.Write(chunk)\n\t\tif err != nil {\n\t\t\tlogger.Printf(\"Unable to write data to the origin: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tdumper.DumpFrame(chunk, true)\n\n\tcase err := <-remoteErrCh:\n\t\tif err != io.EOF {\n\t\t\tlogger.Printf(\"Connection error: %s\", err)\n\t\t}\n\t\treturn\n\t}\n\n\toriginCh, originErrCh := handleConnection(originConn)\n\n\tfor {\n\t\tselect {\n\t\tcase chunk := <-remoteCh:\n\t\t\t_, err := originConn.Write(chunk)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Printf(\"Unable to write data to the origin: %s\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdumper.DumpFrame(chunk, true)\n\n\t\tcase err := <-remoteErrCh:\n\t\t\tif err != io.EOF {\n\t\t\t\tlogger.Printf(\"Connection error: %s\", err)\n\t\t\t}\n\t\t\treturn\n\n\t\tcase chunk := <-originCh:\n\t\t\t_, err := remoteConn.Write(chunk)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Printf(\"Unable to write data to the connection: %s\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdumper.DumpFrame(chunk, false)\n\n\t\tcase err := <-originErrCh:\n\t\t\tif err != io.EOF {\n\t\t\t\tlogger.Printf(\"Origin error: %s\", err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc handleConnection(conn net.Conn) (<-chan []byte, <-chan error) {\n\tdataCh := make(chan []byte)\n\terrCh := make(chan error, 1)\n\n\tgo func() {\n\t\tfor {\n\t\t\tbuf := make([]byte, 16384)\n\n\t\t\tn, err := conn.Read(buf)\n\t\t\tif err != nil {\n\t\t\t\terrCh <- err\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tdataCh <- buf[:n]\n\t\t}\n\t}()\n\n\treturn dataCh, errCh\n}\n<|endoftext|>"} {"text":"<commit_before>package cfitsio\n\n\/\/ #include \"go-cfitsio.h\"\nimport \"C\"\nimport \"unsafe\"\n\ntype HduType int\n\nconst (\n\tImageHdu HduType = C.IMAGE_HDU \/\/ Primary Array or IMAGE HDU\n\tAsciiTbl HduType = C.ASCII_TBL \/\/ ASCII table HDU\n\tBinaryTbl HduType = C.BINARY_TBL \/\/ Binary table HDU\n\tAnyHdy HduType = C.ANY_HDU \/\/ matches any HDU type\n)\n\n\/\/ MoveAbsHdu moves to a different HDU in the file\nfunc (f *File) MoveAbsHdu(hdu int) (HduType, error) {\n\tc_hdu := C.int(hdu)\n\tc_htype := C.int(0)\n\tc_status := C.int(0)\n\n\tC.fits_movabs_hdu(f.c, c_hdu, &c_htype, &c_status)\n\tif c_status > 0 {\n\t\treturn HduType(c_htype), to_err(c_status)\n\t}\n\n\treturn HduType(c_htype), nil\n}\n\n\/\/ MoveRelHdu moves to a different HDU in the file\nfunc (f *File) MoveRelHdu(n int) (HduType, error) {\n\tc_n := C.int(n)\n\tc_htype := C.int(0)\n\tc_status := C.int(0)\n\n\tC.fits_movrel_hdu(f.c, c_n, &c_htype, &c_status)\n\tif c_status > 0 {\n\t\treturn HduType(c_htype), to_err(c_status)\n\t}\n\n\treturn HduType(c_htype), nil\n}\n\n\/\/ MoveNamHdu moves to a different HDU in the file\nfunc (f *File) MoveNamHdu(hdu HduType, extname string, extvers int) error {\n\tc_hdu := C.int(hdu)\n\tc_name := C.CString(extname)\n\tdefer C.free(unsafe.Pointer(c_name))\n\tc_vers := C.int(extvers)\n\tc_status := C.int(0)\n\n\tC.fits_movnam_hdu(f.c, c_hdu, c_name, c_vers, &c_status)\n\tif c_status > 0 {\n\t\treturn to_err(c_status)\n\t}\n\n\treturn nil\n}\n\n\/\/ NumHdus returns the total number of HDUs in the FITS file.\n\/\/ This returns the number of completely defined HDUs in the file. If a new HDU has just been added to the FITS file, then that last HDU will only be counted if it has been closed, or if data has been written to the HDU. The current HDU remains unchanged by this routine.\nfunc (f *File) NumHdus() (int, error) {\n\tc_n := C.int(0)\n\tc_status := C.int(0)\n\tC.fits_get_num_hdus(f.c, &c_n, &c_status)\n\tif c_status > 0 {\n\t\treturn 0, to_err(c_status)\n\t}\n\n\treturn int(c_n), nil\n}\n\n\/\/ HduNum returns the number of the current HDU (CHDU) in the FITS file (where the primary array = 1). This function returns the HDU number rather than a status value.\nfunc (f *File) HduNum() int {\n\n\tc_n := C.int(0)\n\tC.fits_get_hdu_num(f.c, &c_n)\n\treturn int(c_n)\n}\n\n\/\/ HduType returns the type of the current HDU in the FITS file. The possible values for hdutype are: IMAGE_HDU, ASCII_TBL, or BINARY_TBL.\nfunc (f *File) HduType() (HduType, error) {\n\tc_hdu := C.int(0)\n\tc_status := C.int(0)\n\tC.fits_get_hdu_type(f.c, &c_hdu, &c_status)\n\tif c_status > 0 {\n\t\treturn 0, to_err(c_status)\n\t}\n\n\treturn HduType(c_hdu), nil\n}\n\n\/\/ EOF\n<commit_msg>hdu: wrap fits_copy_file<commit_after>package cfitsio\n\n\/\/ #include \"go-cfitsio.h\"\nimport \"C\"\nimport \"unsafe\"\n\ntype HduType int\n\nconst (\n\tImageHdu HduType = C.IMAGE_HDU \/\/ Primary Array or IMAGE HDU\n\tAsciiTbl HduType = C.ASCII_TBL \/\/ ASCII table HDU\n\tBinaryTbl HduType = C.BINARY_TBL \/\/ Binary table HDU\n\tAnyHdy HduType = C.ANY_HDU \/\/ matches any HDU type\n)\n\n\/\/ MoveAbsHdu moves to a different HDU in the file\nfunc (f *File) MoveAbsHdu(hdu int) (HduType, error) {\n\tc_hdu := C.int(hdu)\n\tc_htype := C.int(0)\n\tc_status := C.int(0)\n\n\tC.fits_movabs_hdu(f.c, c_hdu, &c_htype, &c_status)\n\tif c_status > 0 {\n\t\treturn HduType(c_htype), to_err(c_status)\n\t}\n\n\treturn HduType(c_htype), nil\n}\n\n\/\/ MoveRelHdu moves to a different HDU in the file\nfunc (f *File) MoveRelHdu(n int) (HduType, error) {\n\tc_n := C.int(n)\n\tc_htype := C.int(0)\n\tc_status := C.int(0)\n\n\tC.fits_movrel_hdu(f.c, c_n, &c_htype, &c_status)\n\tif c_status > 0 {\n\t\treturn HduType(c_htype), to_err(c_status)\n\t}\n\n\treturn HduType(c_htype), nil\n}\n\n\/\/ MoveNamHdu moves to a different HDU in the file\nfunc (f *File) MoveNamHdu(hdu HduType, extname string, extvers int) error {\n\tc_hdu := C.int(hdu)\n\tc_name := C.CString(extname)\n\tdefer C.free(unsafe.Pointer(c_name))\n\tc_vers := C.int(extvers)\n\tc_status := C.int(0)\n\n\tC.fits_movnam_hdu(f.c, c_hdu, c_name, c_vers, &c_status)\n\tif c_status > 0 {\n\t\treturn to_err(c_status)\n\t}\n\n\treturn nil\n}\n\n\/\/ NumHdus returns the total number of HDUs in the FITS file.\n\/\/ This returns the number of completely defined HDUs in the file. If a new HDU has just been added to the FITS file, then that last HDU will only be counted if it has been closed, or if data has been written to the HDU. The current HDU remains unchanged by this routine.\nfunc (f *File) NumHdus() (int, error) {\n\tc_n := C.int(0)\n\tc_status := C.int(0)\n\tC.fits_get_num_hdus(f.c, &c_n, &c_status)\n\tif c_status > 0 {\n\t\treturn 0, to_err(c_status)\n\t}\n\n\treturn int(c_n), nil\n}\n\n\/\/ HduNum returns the number of the current HDU (CHDU) in the FITS file (where the primary array = 1). This function returns the HDU number rather than a status value.\nfunc (f *File) HduNum() int {\n\n\tc_n := C.int(0)\n\tC.fits_get_hdu_num(f.c, &c_n)\n\treturn int(c_n)\n}\n\n\/\/ HduType returns the type of the current HDU in the FITS file. The possible values for hdutype are: IMAGE_HDU, ASCII_TBL, or BINARY_TBL.\nfunc (f *File) HduType() (HduType, error) {\n\tc_hdu := C.int(0)\n\tc_status := C.int(0)\n\tC.fits_get_hdu_type(f.c, &c_hdu, &c_status)\n\tif c_status > 0 {\n\t\treturn 0, to_err(c_status)\n\t}\n\n\treturn HduType(c_hdu), nil\n}\n\n\/\/ Copy all or part of the HDUs in the FITS file associated with infptr and append them to the end of the FITS file associated with outfptr. If 'previous' is true, then any HDUs preceding the current HDU in the input file will be copied to the output file. Similarly, 'current' and 'following' determine whether the current HDU, and\/or any following HDUs in the input file will be copied to the output file. Thus, if all 3 parameters are true, then the entire input file will be copied. On exit, the current HDU in the input file will be unchanged, and the last HDU in the output file will be the current HDU.\nfunc (f *File) Copy(out *File, previous, current, following bool) error {\n\tc_previous := C.int(0)\n\tif previous {\n\t\tc_previous = C.int(1)\n\t}\n\tc_current := C.int(0)\n\tif current {\n\t\tc_current = C.int(1)\n\t}\n\tc_following := C.int(0)\n\tif following {\n\t\tc_following = C.int(1)\n\t}\n\tc_status := C.int(0)\n\tC.fits_copy_file(f.c, out.c, c_previous, c_current, c_following, &c_status)\n\tif c_status > 0 {\n\t\treturn to_err(c_status)\n\t}\n\treturn nil\n}\n\n\/\/ EOF\n<|endoftext|>"} {"text":"<commit_before>package model_test\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/crezam\/actions-on-google-golang\/internal\/test\"\n\t\"github.com\/crezam\/actions-on-google-golang\/model\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestRequestParsing(t *testing.T) {\n\n\tvar req model.ApiAiRequest\n\n\tfile, _ := os.Open(\".\/data\/sample_request1.json\")\n\tdec := json.NewDecoder(file)\n\n\terr := dec.Decode(&req)\n\n\t\/\/ test if any issues decoding file\n\ttest.Ok(t, err)\n\n\t\/\/ assert correct parsing\n\ttest.Equals(t, \"209eefa7-adb5-4d03-a8b9-9f7ae68a0c11\", req.Id)\n\n\texpectedTimestamp, _ := time.Parse(time.RFC3339Nano, \"2016-10-10T07:41:40.098Z\")\n\ttest.Equals(t, expectedTimestamp, req.Timestamp)\n\n\ttest.Equals(t, \"Hi, my name is Sam!\", req.Result.ResolvedQuery)\n\ttest.Equals(t, \"agent\", req.Result.Source)\n\ttest.Equals(t, \"greetings\", req.Result.Action)\n\ttest.Equals(t, false, req.Result.ActionIncomplete)\n\ttest.Equals(t, \"Sam\", req.Result.Parameters[\"user_name\"])\n\ttest.Equals(t, \"\", req.Result.Parameters[\"school\"])\n\n\ttest.Equals(t, \"greetings\", req.Result.Contexts[0].Name)\n\ttest.Equals(t, \"Sam\", req.Result.Contexts[0].Parameters[\"user_name\"])\n\ttest.Equals(t, \"Sam!\", req.Result.Contexts[0].Parameters[\"user_name.original\"])\n\n\ttest.Equals(t, \"373a354b-c15a-4a60-ac9d-a9f2aee76cb4\", req.Result.Metadata.IntentID)\n\ttest.Equals(t, \"true\", req.Result.Metadata.WebhookUsed)\n\ttest.Equals(t, \"greetings\", req.Result.Metadata.IntentName)\n\n\ttest.Equals(t, \"Nice to meet you, Sam!\", req.Result.Fulfillment.Speech)\n\n\ttest.Equals(t, float64(1), req.Result.Score)\n\n\ttest.Equals(t, \"...\", req.OriginalRequest.Data.User.UserID)\n\ttest.Equals(t, \"Sam\", req.OriginalRequest.Data.User.Profile.DisplayName)\n\ttest.Equals(t, \"Sam\", req.OriginalRequest.Data.User.Profile.GivenName)\n\ttest.Equals(t, \"Johnson\", req.OriginalRequest.Data.User.Profile.FamilyName)\n\n\ttest.Equals(t, \"...\", req.OriginalRequest.Data.User.AccessToken)\n\n\ttest.Equals(t, 123.456, req.OriginalRequest.Device.Location.Coordinates.latitude)\n\n\n\n\n\n\n}\n<commit_msg>Fix test, incorrect nested field<commit_after>package model_test\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/crezam\/actions-on-google-golang\/internal\/test\"\n\t\"github.com\/crezam\/actions-on-google-golang\/model\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestRequestParsing(t *testing.T) {\n\n\tvar req model.ApiAiRequest\n\n\tfile, _ := os.Open(\".\/data\/sample_request1.json\")\n\tdec := json.NewDecoder(file)\n\n\terr := dec.Decode(&req)\n\n\t\/\/ test if any issues decoding file\n\ttest.Ok(t, err)\n\n\t\/\/ assert correct parsing\n\ttest.Equals(t, \"209eefa7-adb5-4d03-a8b9-9f7ae68a0c11\", req.Id)\n\n\texpectedTimestamp, _ := time.Parse(time.RFC3339Nano, \"2016-10-10T07:41:40.098Z\")\n\ttest.Equals(t, expectedTimestamp, req.Timestamp)\n\n\ttest.Equals(t, \"Hi, my name is Sam!\", req.Result.ResolvedQuery)\n\ttest.Equals(t, \"agent\", req.Result.Source)\n\ttest.Equals(t, \"greetings\", req.Result.Action)\n\ttest.Equals(t, false, req.Result.ActionIncomplete)\n\ttest.Equals(t, \"Sam\", req.Result.Parameters[\"user_name\"])\n\ttest.Equals(t, \"\", req.Result.Parameters[\"school\"])\n\n\ttest.Equals(t, \"greetings\", req.Result.Contexts[0].Name)\n\ttest.Equals(t, \"Sam\", req.Result.Contexts[0].Parameters[\"user_name\"])\n\ttest.Equals(t, \"Sam!\", req.Result.Contexts[0].Parameters[\"user_name.original\"])\n\n\ttest.Equals(t, \"373a354b-c15a-4a60-ac9d-a9f2aee76cb4\", req.Result.Metadata.IntentID)\n\ttest.Equals(t, \"true\", req.Result.Metadata.WebhookUsed)\n\ttest.Equals(t, \"greetings\", req.Result.Metadata.IntentName)\n\n\ttest.Equals(t, \"Nice to meet you, Sam!\", req.Result.Fulfillment.Speech)\n\n\ttest.Equals(t, float64(1), req.Result.Score)\n\n\ttest.Equals(t, \"...\", req.OriginalRequest.Data.User.UserID)\n\ttest.Equals(t, \"Sam\", req.OriginalRequest.Data.User.Profile.DisplayName)\n\ttest.Equals(t, \"Sam\", req.OriginalRequest.Data.User.Profile.GivenName)\n\ttest.Equals(t, \"Johnson\", req.OriginalRequest.Data.User.Profile.FamilyName)\n\n\ttest.Equals(t, \"...\", req.OriginalRequest.Data.User.AccessToken)\n\n\ttest.Equals(t, 123.456, req.OriginalRequest.Data.Device.Location.Coordinates.Latitude)\n\n\n\n\n\n\n}\n<|endoftext|>"} {"text":"<commit_before>package models\n\nimport (\n\t\"bytes\"\n\t\"time\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"encoding\/base64\"\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"github.com\/astaxie\/beego\"\n\t\"github.com\/astaxie\/beego\/logs\"\n\t\"github.com\/astaxie\/beego\/orm\"\n\t\"github.com\/lifei6671\/mindoc\/conf\"\n\t\"github.com\/lifei6671\/mindoc\/converter\"\n\t\"github.com\/lifei6671\/mindoc\/utils\"\n\t\"github.com\/russross\/blackfriday\"\n)\n\ntype BookResult struct {\n\tBookId int `json:\"book_id\"`\n\tBookName string `json:\"book_name\"`\n\tIdentify string `json:\"identify\"`\n\tOrderIndex int `json:\"order_index\"`\n\tDescription string `json:\"description\"`\n\tPublisher string `json:\"publisher\"`\n\tPrivatelyOwned int `json:\"privately_owned\"`\n\tPrivateToken string `json:\"private_token\"`\n\tDocCount int `json:\"doc_count\"`\n\tCommentStatus string `json:\"comment_status\"`\n\tCommentCount int `json:\"comment_count\"`\n\tCreateTime time.Time `json:\"create_time\"`\n\tCreateName string `json:\"create_name\"`\n\tModifyTime time.Time `json:\"modify_time\"`\n\tCover string `json:\"cover\"`\n\tTheme string `json:\"theme\"`\n\tLabel string `json:\"label\"`\n\tMemberId int `json:\"member_id\"`\n\tEditor string `json:\"editor\"`\n\tAutoRelease bool `json:\"auto_release\"`\n\n\tRelationshipId int `json:\"relationship_id\"`\n\tRoleId int `json:\"role_id\"`\n\tRoleName string `json:\"role_name\"`\n\tStatus int\n\n\tLastModifyText string `json:\"last_modify_text\"`\n\tIsDisplayComment bool `json:\"is_display_comment\"`\n}\n\nfunc NewBookResult() *BookResult {\n\treturn &BookResult{}\n}\n\n\/\/ 根据项目标识查询项目以及指定用户权限的信息.\nfunc (m *BookResult) FindByIdentify(identify string, member_id int) (*BookResult, error) {\n\tif identify == \"\" || member_id <= 0 {\n\t\treturn m, ErrInvalidParameter\n\t}\n\to := orm.NewOrm()\n\n\tbook := NewBook()\n\n\terr := o.QueryTable(book.TableNameWithPrefix()).Filter(\"identify\", identify).One(book)\n\n\tif err != nil {\n\t\treturn m, err\n\t}\n\n\trelationship := NewRelationship()\n\n\terr = o.QueryTable(relationship.TableNameWithPrefix()).Filter(\"book_id\", book.BookId).Filter(\"member_id\", member_id).One(relationship)\n\n\tif err != nil {\n\t\treturn m, err\n\t}\n\tvar relationship2 Relationship\n\n\terr = o.QueryTable(relationship.TableNameWithPrefix()).Filter(\"book_id\", book.BookId).Filter(\"role_id\", 0).One(&relationship2)\n\n\tif err != nil {\n\t\tlogs.Error(\"根据项目标识查询项目以及指定用户权限的信息 => \", err)\n\t\treturn m, ErrPermissionDenied\n\t}\n\n\tmember, err := NewMember().Find(relationship2.MemberId)\n\tif err != nil {\n\t\treturn m, err\n\t}\n\n\tm = NewBookResult().ToBookResult(*book)\n\n\tm.CreateName = member.Account\n\tm.MemberId = relationship.MemberId\n\tm.RoleId = relationship.RoleId\n\tm.RelationshipId = relationship.RelationshipId\n\n\tif m.RoleId == conf.BookFounder {\n\t\tm.RoleName = \"创始人\"\n\t} else if m.RoleId == conf.BookAdmin {\n\t\tm.RoleName = \"管理员\"\n\t} else if m.RoleId == conf.BookEditor {\n\t\tm.RoleName = \"编辑者\"\n\t} else if m.RoleId == conf.BookObserver {\n\t\tm.RoleName = \"观察者\"\n\t}\n\n\tdoc := NewDocument()\n\n\terr = o.QueryTable(doc.TableNameWithPrefix()).Filter(\"book_id\", book.BookId).OrderBy(\"modify_time\").One(doc)\n\n\tif err == nil {\n\t\tmember2 := NewMember()\n\t\tmember2.Find(doc.ModifyAt)\n\n\t\tm.LastModifyText = member2.Account + \" 于 \" + doc.ModifyTime.Format(\"2006-01-02 15:04:05\")\n\t}\n\n\treturn m, nil\n}\n\nfunc (m *BookResult) FindToPager(pageIndex, pageSize int) (books []*BookResult, totalCount int, err error) {\n\to := orm.NewOrm()\n\n\tcount, err := o.QueryTable(NewBook().TableNameWithPrefix()).Count()\n\n\tif err != nil {\n\t\treturn\n\t}\n\ttotalCount = int(count)\n\n\tsql := `SELECT\n\t\t\tbook.*,rel.relationship_id,rel.role_id,m.account AS create_name\n\t\tFROM md_books AS book\n\t\t\tLEFT JOIN md_relationship AS rel ON rel.book_id = book.book_id AND rel.role_id = 0\n\t\t\tLEFT JOIN md_members AS m ON rel.member_id = m.member_id\n\t\tORDER BY book.order_index DESC ,book.book_id DESC LIMIT ?,?`\n\n\toffset := (pageIndex - 1) * pageSize\n\n\t_, err = o.Raw(sql, offset, pageSize).QueryRows(&books)\n\n\treturn\n}\n\n\/\/实体转换\nfunc (m *BookResult) ToBookResult(book Book) *BookResult {\n\n\tm.BookId = book.BookId\n\tm.BookName = book.BookName\n\tm.Identify = book.Identify\n\tm.OrderIndex = book.OrderIndex\n\tm.Description = strings.Replace(book.Description, \"\\r\\n\", \"<br\/>\", -1)\n\tm.PrivatelyOwned = book.PrivatelyOwned\n\tm.PrivateToken = book.PrivateToken\n\tm.DocCount = book.DocCount\n\tm.CommentStatus = book.CommentStatus\n\tm.CommentCount = book.CommentCount\n\tm.CreateTime = book.CreateTime\n\tm.ModifyTime = book.ModifyTime\n\tm.Cover = book.Cover\n\tm.Label = book.Label\n\tm.Status = book.Status\n\tm.Editor = book.Editor\n\tm.Theme = book.Theme\n\tm.AutoRelease = book.AutoRelease == 1\n\tm.Publisher = book.Publisher\n\n\tif book.Theme == \"\" {\n\t\tm.Theme = \"default\"\n\t}\n\tif book.Editor == \"\" {\n\t\tm.Editor = \"markdown\"\n\t}\n\treturn m\n}\n\nfunc (m *BookResult) Converter(sessionId string) (ConvertBookResult, error) {\n\n\tconvertBookResult := ConvertBookResult{}\n\n\toutputPath := filepath.Join(conf.WorkingDirectory,\"uploads\",\"books\", strconv.Itoa(m.BookId))\n\tviewPath := beego.BConfig.WebConfig.ViewsPath\n\n\tpdfpath := filepath.Join(outputPath, \"book.pdf\")\n\tepubpath := filepath.Join(outputPath, \"book.epub\")\n\tmobipath := filepath.Join(outputPath, \"book.mobi\")\n\tdocxpath := filepath.Join(outputPath, \"book.docx\")\n\n\t\/\/先将转换的文件储存到临时目录\n\ttempOutputPath := filepath.Join(os.TempDir(),\"sessionId\") \/\/filepath.Abs(filepath.Join(\"cache\", sessionId))\n\n\tos.MkdirAll(outputPath, 0766)\n\tos.MkdirAll(tempOutputPath, 0766)\n\n\n\tif utils.FileExists(pdfpath) && utils.FileExists(epubpath) && utils.FileExists(mobipath) && utils.FileExists(docxpath) {\n\t\tconvertBookResult.EpubPath = epubpath\n\t\tconvertBookResult.MobiPath = mobipath\n\t\tconvertBookResult.PDFPath = pdfpath\n\t\tconvertBookResult.WordPath = docxpath\n\t\treturn convertBookResult, nil\n\t}\n\n\n\tdocs, err := NewDocument().FindListByBookId(m.BookId)\n\tif err != nil {\n\t\treturn convertBookResult, err\n\t}\n\n\ttocList := make([]converter.Toc, 0)\n\n\tfor _, item := range docs {\n\t\tif item.ParentId == 0 {\n\t\t\ttoc := converter.Toc{\n\t\t\t\tId: item.DocumentId,\n\t\t\t\tLink: strconv.Itoa(item.DocumentId) + \".html\",\n\t\t\t\tPid: item.ParentId,\n\t\t\t\tTitle: item.DocumentName,\n\t\t\t}\n\n\t\t\ttocList = append(tocList, toc)\n\t\t}\n\t}\n\tfor _, item := range docs {\n\t\tif item.ParentId != 0 {\n\t\t\ttoc := converter.Toc{\n\t\t\t\tId: item.DocumentId,\n\t\t\t\tLink: strconv.Itoa(item.DocumentId) + \".html\",\n\t\t\t\tPid: item.ParentId,\n\t\t\t\tTitle: item.DocumentName,\n\t\t\t}\n\t\t\ttocList = append(tocList, toc)\n\t\t}\n\t}\n\n\tebookConfig := converter.Config{\n\t\tCharset: \"utf-8\",\n\t\tCover: m.Cover,\n\t\tTimestamp: time.Now().Format(\"2006-01-02 15:04:05\"),\n\t\tDescription: string(blackfriday.MarkdownBasic([]byte(m.Description))),\n\t\tFooter: \"<p style='color:#8E8E8E;font-size:12px;'>本文档使用 <a href='https:\/\/www.iminho.me' style='text-decoration:none;color:#1abc9c;font-weight:bold;'>MinDoc<\/a> 构建 <span style='float:right'>- _PAGENUM_ -<\/span><\/p>\",\n\t\tHeader: \"<p style='color:#8E8E8E;font-size:12px;'>_SECTION_<\/p>\",\n\t\tIdentifier: \"\",\n\t\tLanguage: \"zh-CN\",\n\t\tCreator: m.CreateName,\n\t\tPublisher: m.Publisher,\n\t\tContributor: m.Publisher,\n\t\tTitle: m.BookName,\n\t\tFormat: []string{\"epub\", \"mobi\", \"pdf\", \"docx\"},\n\t\tFontSize: \"14\",\n\t\tPaperSize: \"a4\",\n\t\tMarginLeft: \"72\",\n\t\tMarginRight: \"72\",\n\t\tMarginTop: \"72\",\n\t\tMarginBottom: \"72\",\n\t\tToc: tocList,\n\t\tMore: []string{},\n\t}\n\n\tif tempOutputPath, err = filepath.Abs(tempOutputPath); err != nil {\n\t\tbeego.Error(\"导出目录配置错误:\" + err.Error())\n\t\treturn convertBookResult, err\n\t}\n\n\tfor _, item := range docs {\n\t\tname := strconv.Itoa(item.DocumentId)\n\t\tfpath := filepath.Join(tempOutputPath, name+\".html\")\n\n\t\tf, err := os.OpenFile(fpath, os.O_CREATE|os.O_RDWR, 0777)\n\t\tif err != nil {\n\t\t\treturn convertBookResult, err\n\t\t}\n\t\tvar buf bytes.Buffer\n\n\t\tif err := beego.ExecuteViewPathTemplate(&buf, \"document\/export.tpl\", viewPath, map[string]interface{}{\"Model\": m, \"Lists\": item, \"BaseUrl\": conf.BaseUrl}); err != nil {\n\t\t\treturn convertBookResult, err\n\t\t}\n\t\thtml := buf.String()\n\n\t\tif err != nil {\n\n\t\t\tf.Close()\n\t\t\treturn convertBookResult, err\n\t\t}\n\n\t\tbufio := bytes.NewReader(buf.Bytes())\n\n\t\tdoc, err := goquery.NewDocumentFromReader(bufio)\n\t\tdoc.Find(\"img\").Each(func(i int, contentSelection *goquery.Selection) {\n\t\t\tif src, ok := contentSelection.Attr(\"src\"); ok && strings.HasPrefix(src, \"\/\") {\n\t\t\t\t\/\/contentSelection.SetAttr(\"src\", baseUrl + src)\n\t\t\t\tspath := filepath.Join(conf.WorkingDirectory, src)\n\n\t\t\t\tif ff, e := ioutil.ReadFile(spath); e == nil {\n\n\t\t\t\t\tencodeString := base64.StdEncoding.EncodeToString(ff)\n\n\t\t\t\t\tsrc = \"data:image\/\" + filepath.Ext(src) + \";base64,\" + encodeString\n\n\t\t\t\t\tcontentSelection.SetAttr(\"src\", src)\n\t\t\t\t}\n\n\t\t\t}\n\t\t})\n\n\t\thtml, err = doc.Html()\n\t\tif err != nil {\n\t\t\tf.Close()\n\t\t\treturn convertBookResult, err\n\t\t}\n\n\t\t\/\/ html = strings.Replace(html, \"<img src=\\\"\/uploads\", \"<img src=\\\"\" + c.BaseUrl() + \"\/uploads\", -1)\n\n\t\tf.WriteString(html)\n\t\tf.Close()\n\t}\n\teBookConverter := &converter.Converter{\n\t\tBasePath: tempOutputPath,\n\t\tConfig: ebookConfig,\n\t\tDebug: true,\n\t}\n\n\tif err := eBookConverter.Convert(); err != nil {\n\t\tbeego.Error(\"转换文件错误:\" + m.BookName + \" => \" + err.Error())\n\t\treturn convertBookResult, err\n\t}\n\tbeego.Info(\"文档转换完成:\" + m.BookName)\n\tdefer func(p string) {\n\t\tos.RemoveAll(p)\n\t}(tempOutputPath)\n\n\tutils.CopyFile(mobipath, filepath.Join(tempOutputPath, \"output\", \"book.mobi\"))\n\tutils.CopyFile(pdfpath, filepath.Join(tempOutputPath, \"output\", \"book.pdf\"))\n\tutils.CopyFile(epubpath, filepath.Join(tempOutputPath, \"output\", \"book.epub\"))\n\tutils.CopyFile(docxpath, filepath.Join(tempOutputPath, \"output\", \"book.docx\"))\n\n\tconvertBookResult.MobiPath = mobipath\n\tconvertBookResult.PDFPath = pdfpath\n\tconvertBookResult.EpubPath = epubpath\n\tconvertBookResult.WordPath = docxpath\n\n\treturn convertBookResult, nil\n}\n<commit_msg>修复导出目录路径BUG<commit_after>package models\n\nimport (\n\t\"bytes\"\n\t\"time\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"encoding\/base64\"\n\t\"github.com\/PuerkitoBio\/goquery\"\n\t\"github.com\/astaxie\/beego\"\n\t\"github.com\/astaxie\/beego\/logs\"\n\t\"github.com\/astaxie\/beego\/orm\"\n\t\"github.com\/lifei6671\/mindoc\/conf\"\n\t\"github.com\/lifei6671\/mindoc\/converter\"\n\t\"github.com\/lifei6671\/mindoc\/utils\"\n\t\"github.com\/russross\/blackfriday\"\n)\n\ntype BookResult struct {\n\tBookId int `json:\"book_id\"`\n\tBookName string `json:\"book_name\"`\n\tIdentify string `json:\"identify\"`\n\tOrderIndex int `json:\"order_index\"`\n\tDescription string `json:\"description\"`\n\tPublisher string `json:\"publisher\"`\n\tPrivatelyOwned int `json:\"privately_owned\"`\n\tPrivateToken string `json:\"private_token\"`\n\tDocCount int `json:\"doc_count\"`\n\tCommentStatus string `json:\"comment_status\"`\n\tCommentCount int `json:\"comment_count\"`\n\tCreateTime time.Time `json:\"create_time\"`\n\tCreateName string `json:\"create_name\"`\n\tModifyTime time.Time `json:\"modify_time\"`\n\tCover string `json:\"cover\"`\n\tTheme string `json:\"theme\"`\n\tLabel string `json:\"label\"`\n\tMemberId int `json:\"member_id\"`\n\tEditor string `json:\"editor\"`\n\tAutoRelease bool `json:\"auto_release\"`\n\n\tRelationshipId int `json:\"relationship_id\"`\n\tRoleId int `json:\"role_id\"`\n\tRoleName string `json:\"role_name\"`\n\tStatus int\n\n\tLastModifyText string `json:\"last_modify_text\"`\n\tIsDisplayComment bool `json:\"is_display_comment\"`\n}\n\nfunc NewBookResult() *BookResult {\n\treturn &BookResult{}\n}\n\n\/\/ 根据项目标识查询项目以及指定用户权限的信息.\nfunc (m *BookResult) FindByIdentify(identify string, member_id int) (*BookResult, error) {\n\tif identify == \"\" || member_id <= 0 {\n\t\treturn m, ErrInvalidParameter\n\t}\n\to := orm.NewOrm()\n\n\tbook := NewBook()\n\n\terr := o.QueryTable(book.TableNameWithPrefix()).Filter(\"identify\", identify).One(book)\n\n\tif err != nil {\n\t\treturn m, err\n\t}\n\n\trelationship := NewRelationship()\n\n\terr = o.QueryTable(relationship.TableNameWithPrefix()).Filter(\"book_id\", book.BookId).Filter(\"member_id\", member_id).One(relationship)\n\n\tif err != nil {\n\t\treturn m, err\n\t}\n\tvar relationship2 Relationship\n\n\terr = o.QueryTable(relationship.TableNameWithPrefix()).Filter(\"book_id\", book.BookId).Filter(\"role_id\", 0).One(&relationship2)\n\n\tif err != nil {\n\t\tlogs.Error(\"根据项目标识查询项目以及指定用户权限的信息 => \", err)\n\t\treturn m, ErrPermissionDenied\n\t}\n\n\tmember, err := NewMember().Find(relationship2.MemberId)\n\tif err != nil {\n\t\treturn m, err\n\t}\n\n\tm = NewBookResult().ToBookResult(*book)\n\n\tm.CreateName = member.Account\n\tm.MemberId = relationship.MemberId\n\tm.RoleId = relationship.RoleId\n\tm.RelationshipId = relationship.RelationshipId\n\n\tif m.RoleId == conf.BookFounder {\n\t\tm.RoleName = \"创始人\"\n\t} else if m.RoleId == conf.BookAdmin {\n\t\tm.RoleName = \"管理员\"\n\t} else if m.RoleId == conf.BookEditor {\n\t\tm.RoleName = \"编辑者\"\n\t} else if m.RoleId == conf.BookObserver {\n\t\tm.RoleName = \"观察者\"\n\t}\n\n\tdoc := NewDocument()\n\n\terr = o.QueryTable(doc.TableNameWithPrefix()).Filter(\"book_id\", book.BookId).OrderBy(\"modify_time\").One(doc)\n\n\tif err == nil {\n\t\tmember2 := NewMember()\n\t\tmember2.Find(doc.ModifyAt)\n\n\t\tm.LastModifyText = member2.Account + \" 于 \" + doc.ModifyTime.Format(\"2006-01-02 15:04:05\")\n\t}\n\n\treturn m, nil\n}\n\nfunc (m *BookResult) FindToPager(pageIndex, pageSize int) (books []*BookResult, totalCount int, err error) {\n\to := orm.NewOrm()\n\n\tcount, err := o.QueryTable(NewBook().TableNameWithPrefix()).Count()\n\n\tif err != nil {\n\t\treturn\n\t}\n\ttotalCount = int(count)\n\n\tsql := `SELECT\n\t\t\tbook.*,rel.relationship_id,rel.role_id,m.account AS create_name\n\t\tFROM md_books AS book\n\t\t\tLEFT JOIN md_relationship AS rel ON rel.book_id = book.book_id AND rel.role_id = 0\n\t\t\tLEFT JOIN md_members AS m ON rel.member_id = m.member_id\n\t\tORDER BY book.order_index DESC ,book.book_id DESC LIMIT ?,?`\n\n\toffset := (pageIndex - 1) * pageSize\n\n\t_, err = o.Raw(sql, offset, pageSize).QueryRows(&books)\n\n\treturn\n}\n\n\/\/实体转换\nfunc (m *BookResult) ToBookResult(book Book) *BookResult {\n\n\tm.BookId = book.BookId\n\tm.BookName = book.BookName\n\tm.Identify = book.Identify\n\tm.OrderIndex = book.OrderIndex\n\tm.Description = strings.Replace(book.Description, \"\\r\\n\", \"<br\/>\", -1)\n\tm.PrivatelyOwned = book.PrivatelyOwned\n\tm.PrivateToken = book.PrivateToken\n\tm.DocCount = book.DocCount\n\tm.CommentStatus = book.CommentStatus\n\tm.CommentCount = book.CommentCount\n\tm.CreateTime = book.CreateTime\n\tm.ModifyTime = book.ModifyTime\n\tm.Cover = book.Cover\n\tm.Label = book.Label\n\tm.Status = book.Status\n\tm.Editor = book.Editor\n\tm.Theme = book.Theme\n\tm.AutoRelease = book.AutoRelease == 1\n\tm.Publisher = book.Publisher\n\n\tif book.Theme == \"\" {\n\t\tm.Theme = \"default\"\n\t}\n\tif book.Editor == \"\" {\n\t\tm.Editor = \"markdown\"\n\t}\n\treturn m\n}\n\nfunc (m *BookResult) Converter(sessionId string) (ConvertBookResult, error) {\n\n\tconvertBookResult := ConvertBookResult{}\n\n\toutputPath := filepath.Join(conf.WorkingDirectory,\"uploads\",\"books\", strconv.Itoa(m.BookId))\n\tviewPath := beego.BConfig.WebConfig.ViewsPath\n\n\tpdfpath := filepath.Join(outputPath, \"book.pdf\")\n\tepubpath := filepath.Join(outputPath, \"book.epub\")\n\tmobipath := filepath.Join(outputPath, \"book.mobi\")\n\tdocxpath := filepath.Join(outputPath, \"book.docx\")\n\n\t\/\/先将转换的文件储存到临时目录\n\ttempOutputPath := filepath.Join(os.TempDir(),sessionId) \/\/filepath.Abs(filepath.Join(\"cache\", sessionId))\n\n\tos.MkdirAll(outputPath, 0766)\n\tos.MkdirAll(tempOutputPath, 0766)\n\n\tdefer func(p string) {\n\t\tos.RemoveAll(p)\n\t}(tempOutputPath)\n\t\n\tif utils.FileExists(pdfpath) && utils.FileExists(epubpath) && utils.FileExists(mobipath) && utils.FileExists(docxpath) {\n\t\tconvertBookResult.EpubPath = epubpath\n\t\tconvertBookResult.MobiPath = mobipath\n\t\tconvertBookResult.PDFPath = pdfpath\n\t\tconvertBookResult.WordPath = docxpath\n\t\treturn convertBookResult, nil\n\t}\n\n\n\tdocs, err := NewDocument().FindListByBookId(m.BookId)\n\tif err != nil {\n\t\treturn convertBookResult, err\n\t}\n\n\ttocList := make([]converter.Toc, 0)\n\n\tfor _, item := range docs {\n\t\tif item.ParentId == 0 {\n\t\t\ttoc := converter.Toc{\n\t\t\t\tId: item.DocumentId,\n\t\t\t\tLink: strconv.Itoa(item.DocumentId) + \".html\",\n\t\t\t\tPid: item.ParentId,\n\t\t\t\tTitle: item.DocumentName,\n\t\t\t}\n\n\t\t\ttocList = append(tocList, toc)\n\t\t}\n\t}\n\tfor _, item := range docs {\n\t\tif item.ParentId != 0 {\n\t\t\ttoc := converter.Toc{\n\t\t\t\tId: item.DocumentId,\n\t\t\t\tLink: strconv.Itoa(item.DocumentId) + \".html\",\n\t\t\t\tPid: item.ParentId,\n\t\t\t\tTitle: item.DocumentName,\n\t\t\t}\n\t\t\ttocList = append(tocList, toc)\n\t\t}\n\t}\n\n\tebookConfig := converter.Config{\n\t\tCharset: \"utf-8\",\n\t\tCover: m.Cover,\n\t\tTimestamp: time.Now().Format(\"2006-01-02 15:04:05\"),\n\t\tDescription: string(blackfriday.MarkdownBasic([]byte(m.Description))),\n\t\tFooter: \"<p style='color:#8E8E8E;font-size:12px;'>本文档使用 <a href='https:\/\/www.iminho.me' style='text-decoration:none;color:#1abc9c;font-weight:bold;'>MinDoc<\/a> 构建 <span style='float:right'>- _PAGENUM_ -<\/span><\/p>\",\n\t\tHeader: \"<p style='color:#8E8E8E;font-size:12px;'>_SECTION_<\/p>\",\n\t\tIdentifier: \"\",\n\t\tLanguage: \"zh-CN\",\n\t\tCreator: m.CreateName,\n\t\tPublisher: m.Publisher,\n\t\tContributor: m.Publisher,\n\t\tTitle: m.BookName,\n\t\tFormat: []string{\"epub\", \"mobi\", \"pdf\", \"docx\"},\n\t\tFontSize: \"14\",\n\t\tPaperSize: \"a4\",\n\t\tMarginLeft: \"72\",\n\t\tMarginRight: \"72\",\n\t\tMarginTop: \"72\",\n\t\tMarginBottom: \"72\",\n\t\tToc: tocList,\n\t\tMore: []string{},\n\t}\n\n\tif tempOutputPath, err = filepath.Abs(tempOutputPath); err != nil {\n\t\tbeego.Error(\"导出目录配置错误:\" + err.Error())\n\t\treturn convertBookResult, err\n\t}\n\n\tfor _, item := range docs {\n\t\tname := strconv.Itoa(item.DocumentId)\n\t\tfpath := filepath.Join(tempOutputPath, name+\".html\")\n\n\t\tf, err := os.OpenFile(fpath, os.O_CREATE|os.O_RDWR, 0777)\n\t\tif err != nil {\n\t\t\treturn convertBookResult, err\n\t\t}\n\t\tvar buf bytes.Buffer\n\n\t\tif err := beego.ExecuteViewPathTemplate(&buf, \"document\/export.tpl\", viewPath, map[string]interface{}{\"Model\": m, \"Lists\": item, \"BaseUrl\": conf.BaseUrl}); err != nil {\n\t\t\treturn convertBookResult, err\n\t\t}\n\t\thtml := buf.String()\n\n\t\tif err != nil {\n\n\t\t\tf.Close()\n\t\t\treturn convertBookResult, err\n\t\t}\n\n\t\tbufio := bytes.NewReader(buf.Bytes())\n\n\t\tdoc, err := goquery.NewDocumentFromReader(bufio)\n\t\tdoc.Find(\"img\").Each(func(i int, contentSelection *goquery.Selection) {\n\t\t\tif src, ok := contentSelection.Attr(\"src\"); ok && strings.HasPrefix(src, \"\/\") {\n\t\t\t\t\/\/contentSelection.SetAttr(\"src\", baseUrl + src)\n\t\t\t\tspath := filepath.Join(conf.WorkingDirectory, src)\n\n\t\t\t\tif ff, e := ioutil.ReadFile(spath); e == nil {\n\n\t\t\t\t\tencodeString := base64.StdEncoding.EncodeToString(ff)\n\n\t\t\t\t\tsrc = \"data:image\/\" + filepath.Ext(src) + \";base64,\" + encodeString\n\n\t\t\t\t\tcontentSelection.SetAttr(\"src\", src)\n\t\t\t\t}\n\n\t\t\t}\n\t\t})\n\n\t\thtml, err = doc.Html()\n\t\tif err != nil {\n\t\t\tf.Close()\n\t\t\treturn convertBookResult, err\n\t\t}\n\n\t\t\/\/ html = strings.Replace(html, \"<img src=\\\"\/uploads\", \"<img src=\\\"\" + c.BaseUrl() + \"\/uploads\", -1)\n\n\t\tf.WriteString(html)\n\t\tf.Close()\n\t}\n\teBookConverter := &converter.Converter{\n\t\tBasePath: tempOutputPath,\n\t\tConfig: ebookConfig,\n\t\tDebug: true,\n\t}\n\n\tif err := eBookConverter.Convert(); err != nil {\n\t\tbeego.Error(\"转换文件错误:\" + m.BookName + \" => \" + err.Error())\n\t\treturn convertBookResult, err\n\t}\n\tbeego.Info(\"文档转换完成:\" + m.BookName)\n\n\n\tutils.CopyFile(mobipath, filepath.Join(tempOutputPath, \"output\", \"book.mobi\"))\n\tutils.CopyFile(pdfpath, filepath.Join(tempOutputPath, \"output\", \"book.pdf\"))\n\tutils.CopyFile(epubpath, filepath.Join(tempOutputPath, \"output\", \"book.epub\"))\n\tutils.CopyFile(docxpath, filepath.Join(tempOutputPath, \"output\", \"book.docx\"))\n\n\tconvertBookResult.MobiPath = mobipath\n\tconvertBookResult.PDFPath = pdfpath\n\tconvertBookResult.EpubPath = epubpath\n\tconvertBookResult.WordPath = docxpath\n\n\treturn convertBookResult, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package terraform\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\n\t\"github.com\/hashicorp\/errwrap\"\n\t\"github.com\/hashicorp\/terraform\/dag\"\n)\n\n\/\/ ContextGraphWalker is the GraphWalker implementation used with the\n\/\/ Context struct to walk and evaluate the graph.\ntype ContextGraphWalker struct {\n\tNullGraphWalker\n\n\t\/\/ Configurable values\n\tContext *Context\n\tOperation walkOperation\n\n\t\/\/ Outputs, do not set these. Do not read these while the graph\n\t\/\/ is being walked.\n\tValidationWarnings []string\n\tValidationErrors []error\n\n\terrorLock sync.Mutex\n\tonce sync.Once\n\tcontexts map[string]*BuiltinEvalContext\n\tcontextLock sync.Mutex\n\tinterpolaterVars map[string]map[string]string\n\tinterpolaterVarLock sync.Mutex\n\tproviderCache map[string]ResourceProvider\n\tproviderConfigCache map[string]*ResourceConfig\n\tproviderLock sync.Mutex\n\tprovisionerCache map[string]ResourceProvisioner\n\tprovisionerLock sync.Mutex\n}\n\nfunc (w *ContextGraphWalker) EnterPath(path []string) EvalContext {\n\tw.once.Do(w.init)\n\n\tw.contextLock.Lock()\n\tdefer w.contextLock.Unlock()\n\n\t\/\/ If we already have a context for this path cached, use that\n\tkey := PathCacheKey(path)\n\tif ctx, ok := w.contexts[key]; ok {\n\t\treturn ctx\n\t}\n\n\t\/\/ Setup the variables for this interpolater\n\tvariables := make(map[string]string)\n\tif len(path) <= 1 {\n\t\tfor k, v := range w.Context.variables {\n\t\t\tvariables[k] = v\n\t\t}\n\t}\n\tw.interpolaterVarLock.Lock()\n\tif m, ok := w.interpolaterVars[key]; ok {\n\t\tfor k, v := range m {\n\t\t\tvariables[k] = v\n\t\t}\n\t}\n\tw.interpolaterVars[key] = variables\n\tw.interpolaterVarLock.Unlock()\n\n\tctx := &BuiltinEvalContext{\n\t\tPathValue: path,\n\t\tHooks: w.Context.hooks,\n\t\tInputValue: w.Context.uiInput,\n\t\tProviders: w.Context.providers,\n\t\tProviderCache: w.providerCache,\n\t\tProviderConfigCache: w.providerConfigCache,\n\t\tProviderInputConfig: w.Context.providerInputConfig,\n\t\tProviderLock: &w.providerLock,\n\t\tProvisioners: w.Context.provisioners,\n\t\tProvisionerCache: w.provisionerCache,\n\t\tProvisionerLock: &w.provisionerLock,\n\t\tDiffValue: w.Context.diff,\n\t\tDiffLock: &w.Context.diffLock,\n\t\tStateValue: w.Context.state,\n\t\tStateLock: &w.Context.stateLock,\n\t\tInterpolater: &Interpolater{\n\t\t\tOperation: w.Operation,\n\t\t\tModule: w.Context.module,\n\t\t\tState: w.Context.state,\n\t\t\tStateLock: &w.Context.stateLock,\n\t\t\tVariables: variables,\n\t\t},\n\t\tInterpolaterVars: w.interpolaterVars,\n\t\tInterpolaterVarLock: &w.interpolaterVarLock,\n\t}\n\n\tw.contexts[key] = ctx\n\treturn ctx\n}\n\nfunc (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode {\n\tlog.Printf(\"[TRACE] Entering eval tree: %s\", dag.VertexName(v))\n\n\t\/\/ Acquire a lock on the semaphore\n\tw.Context.parallelSem.Acquire()\n\n\t\/\/ We want to filter the evaluation tree to only include operations\n\t\/\/ that belong in this operation.\n\treturn EvalFilter(n, EvalNodeFilterOp(w.Operation))\n}\n\nfunc (w *ContextGraphWalker) ExitEvalTree(\n\tv dag.Vertex, output interface{}, err error) error {\n\tlog.Printf(\"[TRACE] Exiting eval tree: %s\", dag.VertexName(v))\n\n\t\/\/ Release the semaphore\n\tw.Context.parallelSem.Release()\n\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Acquire the lock because anything is going to require a lock.\n\tw.errorLock.Lock()\n\tdefer w.errorLock.Unlock()\n\n\t\/\/ Try to get a validation error out of it. If its not a validation\n\t\/\/ error, then just record the normal error.\n\tverr, ok := err.(*EvalValidateError)\n\tif !ok {\n\t\treturn err\n\t}\n\n\tfor _, msg := range verr.Warnings {\n\t\tw.ValidationWarnings = append(\n\t\t\tw.ValidationWarnings,\n\t\t\tfmt.Sprintf(\"%s: %s\", dag.VertexName(v), msg))\n\t}\n\tfor _, e := range verr.Errors {\n\t\tw.ValidationErrors = append(\n\t\t\tw.ValidationErrors,\n\t\t\terrwrap.Wrapf(fmt.Sprintf(\"%s: {{err}}\", dag.VertexName(v)), e))\n\t}\n\n\treturn nil\n}\n\nfunc (w *ContextGraphWalker) init() {\n\tw.contexts = make(map[string]*BuiltinEvalContext, 5)\n\tw.providerCache = make(map[string]ResourceProvider, 5)\n\tw.providerConfigCache = make(map[string]*ResourceConfig, 5)\n\tw.provisionerCache = make(map[string]ResourceProvisioner, 5)\n\tw.interpolaterVars = make(map[string]map[string]string, 5)\n}\n<commit_msg>core: log eval tree operations<commit_after>package terraform\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\n\t\"github.com\/hashicorp\/errwrap\"\n\t\"github.com\/hashicorp\/terraform\/dag\"\n)\n\n\/\/ ContextGraphWalker is the GraphWalker implementation used with the\n\/\/ Context struct to walk and evaluate the graph.\ntype ContextGraphWalker struct {\n\tNullGraphWalker\n\n\t\/\/ Configurable values\n\tContext *Context\n\tOperation walkOperation\n\n\t\/\/ Outputs, do not set these. Do not read these while the graph\n\t\/\/ is being walked.\n\tValidationWarnings []string\n\tValidationErrors []error\n\n\terrorLock sync.Mutex\n\tonce sync.Once\n\tcontexts map[string]*BuiltinEvalContext\n\tcontextLock sync.Mutex\n\tinterpolaterVars map[string]map[string]string\n\tinterpolaterVarLock sync.Mutex\n\tproviderCache map[string]ResourceProvider\n\tproviderConfigCache map[string]*ResourceConfig\n\tproviderLock sync.Mutex\n\tprovisionerCache map[string]ResourceProvisioner\n\tprovisionerLock sync.Mutex\n}\n\nfunc (w *ContextGraphWalker) EnterPath(path []string) EvalContext {\n\tw.once.Do(w.init)\n\n\tw.contextLock.Lock()\n\tdefer w.contextLock.Unlock()\n\n\t\/\/ If we already have a context for this path cached, use that\n\tkey := PathCacheKey(path)\n\tif ctx, ok := w.contexts[key]; ok {\n\t\treturn ctx\n\t}\n\n\t\/\/ Setup the variables for this interpolater\n\tvariables := make(map[string]string)\n\tif len(path) <= 1 {\n\t\tfor k, v := range w.Context.variables {\n\t\t\tvariables[k] = v\n\t\t}\n\t}\n\tw.interpolaterVarLock.Lock()\n\tif m, ok := w.interpolaterVars[key]; ok {\n\t\tfor k, v := range m {\n\t\t\tvariables[k] = v\n\t\t}\n\t}\n\tw.interpolaterVars[key] = variables\n\tw.interpolaterVarLock.Unlock()\n\n\tctx := &BuiltinEvalContext{\n\t\tPathValue: path,\n\t\tHooks: w.Context.hooks,\n\t\tInputValue: w.Context.uiInput,\n\t\tProviders: w.Context.providers,\n\t\tProviderCache: w.providerCache,\n\t\tProviderConfigCache: w.providerConfigCache,\n\t\tProviderInputConfig: w.Context.providerInputConfig,\n\t\tProviderLock: &w.providerLock,\n\t\tProvisioners: w.Context.provisioners,\n\t\tProvisionerCache: w.provisionerCache,\n\t\tProvisionerLock: &w.provisionerLock,\n\t\tDiffValue: w.Context.diff,\n\t\tDiffLock: &w.Context.diffLock,\n\t\tStateValue: w.Context.state,\n\t\tStateLock: &w.Context.stateLock,\n\t\tInterpolater: &Interpolater{\n\t\t\tOperation: w.Operation,\n\t\t\tModule: w.Context.module,\n\t\t\tState: w.Context.state,\n\t\t\tStateLock: &w.Context.stateLock,\n\t\t\tVariables: variables,\n\t\t},\n\t\tInterpolaterVars: w.interpolaterVars,\n\t\tInterpolaterVarLock: &w.interpolaterVarLock,\n\t}\n\n\tw.contexts[key] = ctx\n\treturn ctx\n}\n\nfunc (w *ContextGraphWalker) EnterEvalTree(v dag.Vertex, n EvalNode) EvalNode {\n\tlog.Printf(\"[TRACE] [%s] Entering eval tree: %s\",\n\t\tw.Operation, dag.VertexName(v))\n\n\t\/\/ Acquire a lock on the semaphore\n\tw.Context.parallelSem.Acquire()\n\n\t\/\/ We want to filter the evaluation tree to only include operations\n\t\/\/ that belong in this operation.\n\treturn EvalFilter(n, EvalNodeFilterOp(w.Operation))\n}\n\nfunc (w *ContextGraphWalker) ExitEvalTree(\n\tv dag.Vertex, output interface{}, err error) error {\n\tlog.Printf(\"[TRACE] [%s] Exiting eval tree: %s\",\n\t\tw.Operation, dag.VertexName(v))\n\n\t\/\/ Release the semaphore\n\tw.Context.parallelSem.Release()\n\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Acquire the lock because anything is going to require a lock.\n\tw.errorLock.Lock()\n\tdefer w.errorLock.Unlock()\n\n\t\/\/ Try to get a validation error out of it. If its not a validation\n\t\/\/ error, then just record the normal error.\n\tverr, ok := err.(*EvalValidateError)\n\tif !ok {\n\t\treturn err\n\t}\n\n\tfor _, msg := range verr.Warnings {\n\t\tw.ValidationWarnings = append(\n\t\t\tw.ValidationWarnings,\n\t\t\tfmt.Sprintf(\"%s: %s\", dag.VertexName(v), msg))\n\t}\n\tfor _, e := range verr.Errors {\n\t\tw.ValidationErrors = append(\n\t\t\tw.ValidationErrors,\n\t\t\terrwrap.Wrapf(fmt.Sprintf(\"%s: {{err}}\", dag.VertexName(v)), e))\n\t}\n\n\treturn nil\n}\n\nfunc (w *ContextGraphWalker) init() {\n\tw.contexts = make(map[string]*BuiltinEvalContext, 5)\n\tw.providerCache = make(map[string]ResourceProvider, 5)\n\tw.providerConfigCache = make(map[string]*ResourceConfig, 5)\n\tw.provisionerCache = make(map[string]ResourceProvisioner, 5)\n\tw.interpolaterVars = make(map[string]map[string]string, 5)\n}\n<|endoftext|>"} {"text":"<commit_before>package shard\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"sync\"\n\n\t\"github.com\/unigraph\/rdb\"\n)\n\nvar ShardNameFn = func(i uint) string { return fmt.Sprintf(\"%03d\", i) }\n\ntype Shard struct {\n\tdbs []*rdb.DB\n}\n\nfunc Open(opts *rdb.Options, name string, shardsNum uint) (*Shard, error) {\n\tif err := checkValid(name, shardsNum); err != nil {\n\t\treturn nil, err\n\t}\n\ts := &Shard{}\n\tfor i := uint(0); i < shardsNum; i++ {\n\t\tsName := filepath.Join(name, ShardNameFn(i))\n\t\tdb, err := rdb.OpenDb(opts, sName)\n\t\tif err != nil {\n\t\t\ts.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\ts.dbs = append(s.dbs, db)\n\t}\n\treturn s, nil\n}\n\nfunc OpenForReadOnly(opts *rdb.Options, name string, shardsNum uint, errorIfLogFileExist bool) (*Shard, error) {\n\tif err := checkValid(name, shardsNum); err != nil {\n\t\treturn nil, err\n\t}\n\ts := &Shard{}\n\tfor i := uint(0); i < shardsNum; i++ {\n\t\tsName := filepath.Join(name, ShardNameFn(i))\n\t\tdb, err := rdb.OpenDbForReadOnly(opts, sName, errorIfLogFileExist)\n\t\tif err != nil {\n\t\t\ts.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\ts.dbs = append(s.dbs, db)\n\t}\n\treturn s, nil\n}\n\ntype errors []error\n\nfunc (e errors) Error() string {\n\tres := \"\"\n\tfor _, err := range e {\n\t\tres += err.Error()\n\t}\n\treturn res\n}\n\nfunc (s *Shard) Flush(opts *rdb.FlushOptions) error {\n\twg := sync.WaitGroup{}\n\twg.Add(len(s.dbs))\n\terr := errors(nil)\n\tl := sync.RWMutex{}\n\tfor _, db := range s.dbs {\n\t\tgo func(db *rdb.DB) {\n\t\t\tif e := db.Flush(opts); e != nil {\n\t\t\t\tl.Lock()\n\t\t\t\terr = append(err, e)\n\t\t\t\tl.Unlock()\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(db)\n\t}\n\twg.Wait()\n\tif len(err) == 0 {\n\t\treturn nil\n\t}\n\treturn err\n}\n\nfunc (s *Shard) CompactRange(r rdb.Range) {\n\twg := sync.WaitGroup{}\n\twg.Add(len(s.dbs))\n\tfor _, db := range s.dbs {\n\t\tgo func(db *rdb.DB) {\n\t\t\tdb.CompactRange(r)\n\t\t\twg.Done()\n\t\t}(db)\n\t}\n\twg.Wait()\n}\n\nfunc (s *Shard) DBs() []*rdb.DB {\n\treturn append([]*rdb.DB(nil), s.dbs...)\n}\n\nfunc (s *Shard) Close() {\n\twg := sync.WaitGroup{}\n\twg.Add(len(s.dbs))\n\tfor _, db := range s.dbs {\n\t\tgo func(db *rdb.DB) {\n\t\t\tdb.Close()\n\t\t\twg.Done()\n\t\t}(db)\n\t}\n\twg.Wait()\n}\n\nfunc GetShardNum(name string) uint {\n\tif files, err := ioutil.ReadDir(name); os.IsNotExist(err) {\n\t\treturn 0\n\t} else {\n\t\tshards := map[string]bool{}\n\t\tfor _, file := range files {\n\t\t\tif matched, _ := regexp.MatchString(`\\d{3}`, file.Name()); matched {\n\t\t\t\tshards[file.Name()] = true\n\t\t\t}\n\t\t}\n\t\treturn uint(len(shards))\n\t}\n}\n\nfunc checkValid(name string, shardsNum uint) error {\n\tif shardsNum == 0 || shardsNum > 999 {\n\t\treturn fmt.Errorf(\"Number of shards has to be bigger than 0 and lower than 1000\")\n\t}\n\tfiles, err := ioutil.ReadDir(name)\n\tif os.IsNotExist(err) { \/\/ does not exists, let's create empty\n\t\treturn os.Mkdir(name, 0700)\n\t} else if err != nil { \/\/ some other error related to ReadDir\n\t\treturn err\n\t} else { \/\/ exists, let's check the content\n\t\tshards := map[string]bool{}\n\t\tfor _, file := range files {\n\t\t\tif matched, _ := regexp.MatchString(`\\d{3}`, file.Name()); matched {\n\t\t\t\tshards[file.Name()] = true\n\t\t\t}\n\t\t}\n\t\tif len(shards) != 0 {\n\t\t\tif uint(len(shards)) != shardsNum {\n\t\t\t\treturn fmt.Errorf(\"Wrong number of shards provided (%v)\", len(shards))\n\t\t\t}\n\t\t\tfor i := uint(0); i < shardsNum; i++ {\n\t\t\t\tsName := ShardNameFn(i)\n\t\t\t\tif !shards[sName] {\n\t\t\t\t\treturn fmt.Errorf(\"Wrong number of shards provided (%v)\", len(shards))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>update<commit_after>package shard\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"sync\"\n\n\t\"github.com\/unigraph\/rdb\"\n)\n\nvar ShardNameFn = func(i uint) string { return fmt.Sprintf(\"%03d\", i) }\n\ntype Shard struct {\n\tdbs []*rdb.DB\n}\n\nfunc Open(opts *rdb.Options, name string, shardsNum uint) (*Shard, error) {\n\tif err := checkValid(name, shardsNum); err != nil {\n\t\treturn nil, err\n\t}\n\ts := &Shard{}\n\tfor i := uint(0); i < shardsNum; i++ {\n\t\tsName := filepath.Join(name, ShardNameFn(i))\n\t\tdb, err := rdb.OpenDb(opts, sName)\n\t\tif err != nil {\n\t\t\ts.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\ts.dbs = append(s.dbs, db)\n\t}\n\treturn s, nil\n}\n\nfunc OpenForReadOnly(opts *rdb.Options, name string, shardsNum uint, errorIfLogFileExist bool) (*Shard, error) {\n\tif err := checkValid(name, shardsNum); err != nil {\n\t\treturn nil, err\n\t}\n\ts := &Shard{}\n\tfor i := uint(0); i < shardsNum; i++ {\n\t\tsName := filepath.Join(name, ShardNameFn(i))\n\t\tdb, err := rdb.OpenDbForReadOnly(opts, sName, errorIfLogFileExist)\n\t\tif err != nil {\n\t\t\ts.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\ts.dbs = append(s.dbs, db)\n\t}\n\treturn s, nil\n}\n\ntype errors []error\n\nfunc (e errors) Error() string {\n\tres := \"\"\n\tfor _, err := range e {\n\t\tres += err.Error()\n\t}\n\treturn res\n}\n\nfunc (s *Shard) Flush(opts *rdb.FlushOptions) error {\n\twg := sync.WaitGroup{}\n\twg.Add(len(s.dbs))\n\terr := errors(nil)\n\tl := sync.RWMutex{}\n\tfor _, db := range s.dbs {\n\t\tgo func(db *rdb.DB) {\n\t\t\tif e := db.Flush(opts); e != nil {\n\t\t\t\tl.Lock()\n\t\t\t\terr = append(err, e)\n\t\t\t\tl.Unlock()\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(db)\n\t}\n\twg.Wait()\n\tif len(err) == 0 {\n\t\treturn nil\n\t}\n\treturn err\n}\n\nfunc (s *Shard) CompactRange(r rdb.Range) {\n\twg := sync.WaitGroup{}\n\twg.Add(len(s.dbs))\n\tfor _, db := range s.dbs {\n\t\tgo func(db *rdb.DB) {\n\t\t\tdb.CompactRange(r)\n\t\t\twg.Done()\n\t\t}(db)\n\t}\n\twg.Wait()\n}\n\nfunc (s *Shard) DBs() []*rdb.DB {\n\treturn append([]*rdb.DB(nil), s.dbs...)\n}\n\nfunc (s *Shard) Close() {\n\twg := sync.WaitGroup{}\n\twg.Add(len(s.dbs))\n\tfor _, db := range s.dbs {\n\t\tgo func(db *rdb.DB) {\n\t\t\tdb.Close()\n\t\t\twg.Done()\n\t\t}(db)\n\t}\n\twg.Wait()\n}\n\nfunc GetShardNum(name string) uint {\n\tif files, err := ioutil.ReadDir(name); os.IsNotExist(err) {\n\t\treturn 0\n\t} else {\n\t\tshards := map[string]bool{}\n\t\tfor _, file := range files {\n\t\t\tshards[file.Name()] = true\n\t\t}\n\t\ti := 0\n\t\tfor shards[ShardNameFn(uint(i))] {\n\t\t\ti++\n\t\t}\n\t\tif len(shards) != i {\n\t\t\treturn 0\n\t\t}\n\t\treturn uint(i)\n\t}\n}\n\nfunc checkValid(name string, shardsNum uint) error {\n\tif shardsNum == 0 || shardsNum > 999 {\n\t\treturn fmt.Errorf(\"Number of shards has to be bigger than 0 and lower than 1000\")\n\t}\n\tfiles, err := ioutil.ReadDir(name)\n\tif os.IsNotExist(err) { \/\/ does not exists, let's create empty\n\t\treturn os.Mkdir(name, 0700)\n\t} else if err != nil { \/\/ some other error related to ReadDir\n\t\treturn err\n\t} else { \/\/ exists, let's check the content\n\t\tshards := map[string]bool{}\n\t\tfor _, file := range files {\n\t\t\tif matched, _ := regexp.MatchString(`\\d{3}`, file.Name()); matched {\n\t\t\t\tshards[file.Name()] = true\n\t\t\t}\n\t\t}\n\t\tif len(shards) != 0 {\n\t\t\tif uint(len(shards)) != shardsNum {\n\t\t\t\treturn fmt.Errorf(\"Wrong number of shards provided (%v)\", len(shards))\n\t\t\t}\n\t\t\tfor i := uint(0); i < shardsNum; i++ {\n\t\t\t\tsName := ShardNameFn(i)\n\t\t\t\tif !shards[sName] {\n\t\t\t\t\treturn fmt.Errorf(\"Wrong number of shards provided (%v)\", len(shards))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package logger\n\nimport (\n\t\"io\"\n\t\"os\"\n\n\tlog \"github.com\/inconshreveable\/log15\"\n\t\"github.com\/xgfone\/go-tools\/log\/handler\"\n)\n\ntype closingHandler struct {\n\tio.WriteCloser\n\tlog.Handler\n}\n\nfunc (h *closingHandler) Close() error {\n\treturn h.WriteCloser.Close()\n}\n\nfunc TimedRotatingFileHandler(fmtr log.Format, filename string, backupCount, interval int) (h log.Handler, err error) {\n\tdefer func() {\n\t\tif _err := recover(); _err != nil {\n\t\t\terr = _err.(error)\n\t\t\treturn\n\t\t}\n\t}()\n\n\t_h := handler.NewTimedRotatingFile(filename)\n\t_h.SetBackupCount(backupCount).SetInterval(interval)\n\n\treturn closingHandler{_h, log.StreamHandler(_h, fmtr)}, nil\n}\n\ntype Logger struct {\n\tlog.Logger\n}\n\nfunc NewLogger(level, filepath string) (logger *Logger, err error) {\n\t\/\/ Logger := log.New(os.Stderr, \"app\", log.LstdFlags|log.Lshortfile)\n\n\tvar lvl log.Lvl\n\tif _level, _err := log.LvlFromString(level); _err != nil {\n\t\terr = _err\n\t\treturn\n\t} else {\n\t\tlvl = _level\n\t}\n\n\tvar handler log.Handler\n\tif filepath == \"\" {\n\t\thandler = log.StreamHandler(os.Stderr, log.LogfmtFormat())\n\t} else {\n\t\thandler, err = TimedRotatingFileHandler(log.LogfmtFormat(), filepath, 31, 1)\n\t\t\/\/ handler, err = log.FileHandler(filepath, log.LogfmtFormat())\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\t\/\/ handler = log.SyncHandler(handler)\n\n\t\/\/shandler := log.CallerFuncHandler(handler)\n\tshandler := log.CallerFileHandler(handler)\n\tchandler := log.CallerStackHandler(\"%v\", handler)\n\n\thandlers := log.MultiHandler(\n\t\tlog.LvlFilterHandler(log.LvlCrit, chandler),\n\t\tlog.LvlFilterHandler(lvl, shandler),\n\t)\n\n\t_logger := log.New()\n\t_logger.SetHandler(handlers)\n\n\tlogger = &Logger{Logger: _logger}\n\n\treturn\n}\n<commit_msg>Remove go logger<commit_after><|endoftext|>"} {"text":"<commit_before>package chartplugin\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"image\/color\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/dustin\/go-humanize\"\n\t\"github.com\/gonum\/plot\"\n\t\"github.com\/gonum\/plot\/plotter\"\n\t\"github.com\/gonum\/plot\/plotutil\"\n\t\"github.com\/gonum\/plot\/vg\"\n\t\"github.com\/iopred\/bruxism\"\n\t\"github.com\/iopred\/discordgo\"\n)\n\ntype chartPlugin struct {\n\tbruxism.SimplePlugin\n\n\tcooldown map[string]time.Time\n}\n\nvar randomDirection = []string{\n\t\"up\",\n\t\"down\",\n\t\"flat\",\n}\n\nvar randomY = []string{\n\t\"interest\",\n\t\"care\",\n\t\"success\",\n\t\"fail\",\n\t\"happiness\",\n\t\"sadness\",\n\t\"money\",\n}\n\nvar randomX = []string{\n\t\"time\",\n\t\"releases\",\n\t\"days\",\n\t\"years\",\n}\n\nfunc (p *chartPlugin) random(list []string) string {\n\treturn list[rand.Intn(len(list))]\n}\n\nfunc (p *chartPlugin) randomChart(service bruxism.Service) string {\n\tticks := \"\"\n\tif service.Name() == bruxism.DiscordServiceName {\n\t\tticks = \"`\"\n\t}\n\n\treturn fmt.Sprintf(\"%s%schart %s %s, %s%s\", ticks, service.CommandPrefix(), p.random(randomDirection), p.random(randomY), p.random(randomX), ticks)\n}\n\nfunc (p *chartPlugin) helpFunc(bot *bruxism.Bot, service bruxism.Service, message bruxism.Message, detailed bool) []string {\n\thelp := bruxism.CommandHelp(service, \"chart\", \"<up|down|flat> <vertical message>, <horizontal message>\", \"Creates a chart trending in the desired direction.\")\n\n\tif detailed {\n\t\thelp = append(help, []string{\n\t\t\t\"Examples:\",\n\t\t\tbruxism.CommandHelp(service, \"chart\", \"down interest, time\", \"Creates a chart showing declining interest over time\")[0],\n\t\t}...)\n\t}\n\n\treturn help\n}\n\nfunc (p *chartPlugin) messageFunc(bot *bruxism.Bot, service bruxism.Service, message bruxism.Message) {\n\tif service.IsMe(message) {\n\t\treturn\n\t}\n\n\tif !bruxism.MatchesCommand(service, \"chart\", message) {\n\t\treturn\n\t}\n\n\tcooldown := p.cooldown[message.UserID()]\n\tif cooldown.After(time.Now()) {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Sorry %s, you need to wait %s before creating another chart.\", message.UserName(), humanize.Time(cooldown)))\n\t\treturn\n\t}\n\tp.cooldown[message.UserID()] = time.Now().Add(10 * time.Second)\n\n\tquery, parts := bruxism.ParseCommand(service, message)\n\tif len(parts) == 0 {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Invalid chart eg: %s\", p.randomChart(service)))\n\t\treturn\n\t}\n\n\tstart, end := 0.5, 0.5\n\n\tswitch parts[0] {\n\tcase \"up\":\n\t\tstart, end = 0, 1\n\tcase \"down\":\n\t\tstart, end = 1, 0\n\tcase \"flat\":\n\tcase \"straight\":\n\tdefault:\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Invalid chart direction. eg: %s\", p.randomChart(service)))\n\t\treturn\n\t}\n\n\taxes := strings.Split(query[len(parts[0]):], \",\")\n\tif len(axes) != 2 {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Invalid chart axis labels eg: %s\", p.randomChart(service)))\n\t\treturn\n\t}\n\n\tpl, err := plot.New()\n\tif err != nil {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Error making chart, sorry! eg: %s\", p.randomChart(service)))\n\t\treturn\n\t}\n\n\tservice.Typing(message.Channel())\n\n\tpl.Y.Label.Text = axes[0]\n\tpl.X.Label.Text = axes[1]\n\n\tnum := 5 + rand.Intn(15)\n\n\tstart *= float64(num)\n\tend *= float64(num)\n\n\tpts := make(plotter.XYs, num)\n\tfor i := range pts {\n\t\tpts[i].X = float64(i) + rand.Float64()*0.5 - 0.2\n\t\tpts[i].Y = start + float64(end-start)\/float64(num-1)*float64(i) + rand.Float64()*0.5 - 0.25\n\t}\n\n\tpl.X.Tick.Label.Color = color.Transparent\n\tpl.Y.Tick.Label.Color = color.Transparent\n\n\tpl.X.Min = -0.5\n\tpl.X.Max = float64(num) + 0.5\n\n\tpl.Y.Min = -0.5\n\tpl.Y.Max = float64(num) + 0.5\n\n\tlpLine, lpPoints, err := plotter.NewLinePoints(pts)\n\tif err != nil {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Sorry %s, there was a problem creating your chart.\", message.UserName()))\n\t}\n\tlpLine.Color = plotutil.Color(rand.Int())\n\tlpLine.Width = vg.Points(1 + 0.5*rand.Float64())\n\tlpLine.Dashes = plotutil.Dashes(rand.Int())\n\tlpPoints.Shape = plotutil.Shape(rand.Int())\n\tlpPoints.Color = lpLine.Color\n\n\tpl.Add(lpLine, lpPoints)\n\n\tw, err := pl.WriterTo(320, 240, \"png\")\n\tif err != nil {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Sorry %s, there was a problem creating your chart.\", message.UserName()))\n\t\treturn\n\t}\n\n\tb := &bytes.Buffer{}\n\tw.WriteTo(b)\n\n\tgo func() {\n\t\tif service.Name() == bruxism.DiscordServiceName {\n\t\t\tdiscord := service.(*bruxism.Discord)\n\t\t\tp, err := discord.UserChannelPermissions(message.UserID(), message.Channel())\n\t\t\tif err == nil && p&discordgo.PermissionAttachFiles == discordgo.PermissionAttachFiles {\n\t\t\t\tservice.SendFile(message.Channel(), \"chart.png\", b)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\turl, err := bot.UploadToImgur(b, \"chart.png\")\n\t\tif err != nil {\n\t\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Sorry %s, there was a problem uploading the chart to imgur.\", message.UserName()))\n\t\t\tlog.Println(\"Error uploading chart: \", err)\n\t\t\treturn\n\t\t}\n\n\t\tif service.Name() == bruxism.DiscordServiceName {\n\t\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Here's your chart <@%s>: %s\", message.UserID(), url))\n\t\t} else {\n\t\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Here's your chart %s: %s\", message.UserName(), url))\n\t\t}\n\t}()\n}\n\n\/\/ New will create a new chart plugin.\nfunc New() bruxism.Plugin {\n\tp := &chartPlugin{\n\t\tSimplePlugin: *bruxism.NewSimplePlugin(\"Chart\"),\n\t\tcooldown: map[string]time.Time{},\n\t}\n\tp.MessageFunc = p.messageFunc\n\tp.HelpFunc = p.helpFunc\n\treturn p\n}\n<commit_msg>Update gonum.<commit_after>package chartplugin\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"image\/color\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/dustin\/go-humanize\"\n\t\"gonum.org\/v1\/plot\"\n\t\"gonum.org\/v1\/plot\/plotter\"\n\t\"gonum.org\/v1\/plot\/plotutil\"\n\t\"gonum.org\/v1\/plot\/vg\"\n\t\"github.com\/iopred\/bruxism\"\n\t\"github.com\/iopred\/discordgo\"\n)\n\ntype chartPlugin struct {\n\tbruxism.SimplePlugin\n\n\tcooldown map[string]time.Time\n}\n\nvar randomDirection = []string{\n\t\"up\",\n\t\"down\",\n\t\"flat\",\n}\n\nvar randomY = []string{\n\t\"interest\",\n\t\"care\",\n\t\"success\",\n\t\"fail\",\n\t\"happiness\",\n\t\"sadness\",\n\t\"money\",\n}\n\nvar randomX = []string{\n\t\"time\",\n\t\"releases\",\n\t\"days\",\n\t\"years\",\n}\n\nfunc (p *chartPlugin) random(list []string) string {\n\treturn list[rand.Intn(len(list))]\n}\n\nfunc (p *chartPlugin) randomChart(service bruxism.Service) string {\n\tticks := \"\"\n\tif service.Name() == bruxism.DiscordServiceName {\n\t\tticks = \"`\"\n\t}\n\n\treturn fmt.Sprintf(\"%s%schart %s %s, %s%s\", ticks, service.CommandPrefix(), p.random(randomDirection), p.random(randomY), p.random(randomX), ticks)\n}\n\nfunc (p *chartPlugin) helpFunc(bot *bruxism.Bot, service bruxism.Service, message bruxism.Message, detailed bool) []string {\n\thelp := bruxism.CommandHelp(service, \"chart\", \"<up|down|flat> <vertical message>, <horizontal message>\", \"Creates a chart trending in the desired direction.\")\n\n\tif detailed {\n\t\thelp = append(help, []string{\n\t\t\t\"Examples:\",\n\t\t\tbruxism.CommandHelp(service, \"chart\", \"down interest, time\", \"Creates a chart showing declining interest over time\")[0],\n\t\t}...)\n\t}\n\n\treturn help\n}\n\nfunc (p *chartPlugin) messageFunc(bot *bruxism.Bot, service bruxism.Service, message bruxism.Message) {\n\tif service.IsMe(message) {\n\t\treturn\n\t}\n\n\tif !bruxism.MatchesCommand(service, \"chart\", message) {\n\t\treturn\n\t}\n\n\tcooldown := p.cooldown[message.UserID()]\n\tif cooldown.After(time.Now()) {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Sorry %s, you need to wait %s before creating another chart.\", message.UserName(), humanize.Time(cooldown)))\n\t\treturn\n\t}\n\tp.cooldown[message.UserID()] = time.Now().Add(10 * time.Second)\n\n\tquery, parts := bruxism.ParseCommand(service, message)\n\tif len(parts) == 0 {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Invalid chart eg: %s\", p.randomChart(service)))\n\t\treturn\n\t}\n\n\tstart, end := 0.5, 0.5\n\n\tswitch parts[0] {\n\tcase \"up\":\n\t\tstart, end = 0, 1\n\tcase \"down\":\n\t\tstart, end = 1, 0\n\tcase \"flat\":\n\tcase \"straight\":\n\tdefault:\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Invalid chart direction. eg: %s\", p.randomChart(service)))\n\t\treturn\n\t}\n\n\taxes := strings.Split(query[len(parts[0]):], \",\")\n\tif len(axes) != 2 {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Invalid chart axis labels eg: %s\", p.randomChart(service)))\n\t\treturn\n\t}\n\n\tpl, err := plot.New()\n\tif err != nil {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Error making chart, sorry! eg: %s\", p.randomChart(service)))\n\t\treturn\n\t}\n\n\tservice.Typing(message.Channel())\n\n\tpl.Y.Label.Text = axes[0]\n\tpl.X.Label.Text = axes[1]\n\n\tnum := 5 + rand.Intn(15)\n\n\tstart *= float64(num)\n\tend *= float64(num)\n\n\tpts := make(plotter.XYs, num)\n\tfor i := range pts {\n\t\tpts[i].X = float64(i) + rand.Float64()*0.5 - 0.2\n\t\tpts[i].Y = start + float64(end-start)\/float64(num-1)*float64(i) + rand.Float64()*0.5 - 0.25\n\t}\n\n\tpl.X.Tick.Label.Color = color.Transparent\n\tpl.Y.Tick.Label.Color = color.Transparent\n\n\tpl.X.Min = -0.5\n\tpl.X.Max = float64(num) + 0.5\n\n\tpl.Y.Min = -0.5\n\tpl.Y.Max = float64(num) + 0.5\n\n\tlpLine, lpPoints, err := plotter.NewLinePoints(pts)\n\tif err != nil {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Sorry %s, there was a problem creating your chart.\", message.UserName()))\n\t}\n\tlpLine.Color = plotutil.Color(rand.Int())\n\tlpLine.Width = vg.Points(1 + 0.5*rand.Float64())\n\tlpLine.Dashes = plotutil.Dashes(rand.Int())\n\tlpPoints.Shape = plotutil.Shape(rand.Int())\n\tlpPoints.Color = lpLine.Color\n\n\tpl.Add(lpLine, lpPoints)\n\n\tw, err := pl.WriterTo(320, 240, \"png\")\n\tif err != nil {\n\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Sorry %s, there was a problem creating your chart.\", message.UserName()))\n\t\treturn\n\t}\n\n\tb := &bytes.Buffer{}\n\tw.WriteTo(b)\n\n\tgo func() {\n\t\tif service.Name() == bruxism.DiscordServiceName {\n\t\t\tdiscord := service.(*bruxism.Discord)\n\t\t\tp, err := discord.UserChannelPermissions(message.UserID(), message.Channel())\n\t\t\tif err == nil && p&discordgo.PermissionAttachFiles == discordgo.PermissionAttachFiles {\n\t\t\t\tservice.SendFile(message.Channel(), \"chart.png\", b)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\turl, err := bot.UploadToImgur(b, \"chart.png\")\n\t\tif err != nil {\n\t\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Sorry %s, there was a problem uploading the chart to imgur.\", message.UserName()))\n\t\t\tlog.Println(\"Error uploading chart: \", err)\n\t\t\treturn\n\t\t}\n\n\t\tif service.Name() == bruxism.DiscordServiceName {\n\t\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Here's your chart <@%s>: %s\", message.UserID(), url))\n\t\t} else {\n\t\t\tservice.SendMessage(message.Channel(), fmt.Sprintf(\"Here's your chart %s: %s\", message.UserName(), url))\n\t\t}\n\t}()\n}\n\n\/\/ New will create a new chart plugin.\nfunc New() bruxism.Plugin {\n\tp := &chartPlugin{\n\t\tSimplePlugin: *bruxism.NewSimplePlugin(\"Chart\"),\n\t\tcooldown: map[string]time.Time{},\n\t}\n\tp.MessageFunc = p.messageFunc\n\tp.HelpFunc = p.helpFunc\n\treturn p\n}\n<|endoftext|>"} {"text":"<commit_before>package parse \/\/ import \"github.com\/tdewolff\/parse\"\n\nimport (\n\t\"errors\"\n\t\"io\"\n)\n\n\/\/ MinBuf and MaxBuf are the initial and maximal internal buffer size.\nvar MinBuf = 1024\nvar MaxBuf = 1048576 \/\/ upper limit 1MB\n\n\/\/ ErrBufferExceeded is returned when the internal buffer exceeds 4096 bytes, a string or comment must thus be smaller than 4kB!\nvar ErrBufferExceeded = errors.New(\"max buffer exceeded\")\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ ShiftBuffer is a buffered reader that allows peeking forward and shifting, taking an io.Reader.\ntype ShiftBuffer struct {\n\tr io.Reader\n\terr error\n\n\tbuf []byte\n\tpos int\n\tend int\n}\n\n\/\/ NewShiftBufferReader returns a new ShiftBuffer for a given io.Reader.\nfunc NewShiftBuffer(r io.Reader) *ShiftBuffer {\n\t\/\/ If reader has the bytes in memory already, use that instead!\n\tif buffer, ok := r.(interface {\n\t\tBytes() []byte\n\t}); ok {\n\t\treturn &ShiftBuffer{\n\t\t\terr: io.EOF,\n\t\t\tbuf: buffer.Bytes(),\n\t\t}\n\t}\n\tz := &ShiftBuffer{\n\t\tr: r,\n\t\tbuf: make([]byte, 0, MinBuf),\n\t}\n\tz.Peek(0)\n\treturn z\n}\n\n\/\/ Err returns the error.\nfunc (z *ShiftBuffer) Err() error {\n\tif z.err == io.EOF && z.end < len(z.buf) {\n\t\treturn nil\n\t}\n\treturn z.err\n}\n\n\/\/ IsEOF returns true when it has encountered EOF and thus loaded the last buffer in memory.\nfunc (z *ShiftBuffer) IsEOF() bool {\n\treturn z.err == io.EOF\n}\n\n\/\/ Move advances the 0 position of read.\nfunc (z *ShiftBuffer) Move(n int) {\n\tz.end += n\n}\n\n\/\/ MoveTo sets the 0 position of read.\nfunc (z *ShiftBuffer) MoveTo(n int) {\n\tz.end = z.pos + n\n}\n\n\/\/ Pos returns the 0 position of read.\nfunc (z *ShiftBuffer) Pos() int {\n\treturn z.end - z.pos\n}\n\n\/\/ Peek returns the ith byte and possible does a reallocation\nfunc (z *ShiftBuffer) Peek(i int) byte {\n\tend := z.end + i\n\tif end >= len(z.buf) {\n\t\tif z.err != nil {\n\t\t\treturn 0\n\t\t}\n\n\t\t\/\/ reallocate a new buffer (possibly larger)\n\t\tc := cap(z.buf)\n\t\td := end - z.pos\n\t\tvar buf1 []byte\n\t\tif 2*d > c {\n\t\t\tif 2*c > MaxBuf {\n\t\t\t\tz.err = ErrBufferExceeded\n\t\t\t\treturn 0\n\t\t\t}\n\t\t\tbuf1 = make([]byte, d, 2*c)\n\t\t} else {\n\t\t\tbuf1 = z.buf[:d]\n\t\t}\n\t\tcopy(buf1, z.buf[z.pos:end])\n\n\t\t\/\/ Read in to fill the buffer till capacity\n\t\tvar n int\n\t\tn, z.err = z.r.Read(buf1[d:cap(buf1)])\n\t\tend -= z.pos\n\t\tz.end -= z.pos\n\t\tz.pos, z.buf = 0, buf1[:d+n]\n\t\tif n == 0 {\n\t\t\treturn 0\n\t\t}\n\t}\n\treturn z.buf[end]\n}\n\n\/\/ PeekRune returns the rune of the ith byte.\nfunc (z *ShiftBuffer) PeekRune(i int) rune {\n\t\/\/ from unicode\/utf8\n\tc := z.Peek(i)\n\tif c < 0xC0 {\n\t\treturn rune(c)\n\t} else if c < 0xE0 {\n\t\treturn rune(c&0x1F)<<6 | rune(z.Peek(i+1)&0x3F)\n\t} else if c < 0xF0 {\n\t\treturn rune(c&0x0F)<<12 | rune(z.Peek(i+1)&0x3F)<<6 | rune(z.Peek(i+2)&0x3F)\n\t} else {\n\t\treturn rune(c&0x07)<<18 | rune(z.Peek(i+1)&0x3F)<<12 | rune(z.Peek(i+2)&0x3F)<<6 | rune(z.Peek(i+3)&0x3F)\n\t}\n}\n\n\/\/ Bytes returns the bytes of the current selection.\nfunc (z *ShiftBuffer) Bytes() []byte {\n\treturn z.buf[z.pos:z.end]\n}\n\n\/\/ Shift returns the bytes of the current selection and advances the position.\nfunc (z *ShiftBuffer) Shift() []byte {\n\tb := z.buf[z.pos:z.end]\n\tz.pos = z.end\n\treturn b\n}\n<commit_msg>Skip functionality<commit_after>package parse \/\/ import \"github.com\/tdewolff\/parse\"\n\nimport (\n\t\"errors\"\n\t\"io\"\n)\n\n\/\/ MinBuf and MaxBuf are the initial and maximal internal buffer size.\nvar MinBuf = 1024\nvar MaxBuf = 1048576 \/\/ upper limit 1MB\n\n\/\/ ErrBufferExceeded is returned when the internal buffer exceeds 4096 bytes, a string or comment must thus be smaller than 4kB!\nvar ErrBufferExceeded = errors.New(\"max buffer exceeded\")\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ ShiftBuffer is a buffered reader that allows peeking forward and shifting, taking an io.Reader.\ntype ShiftBuffer struct {\n\tr io.Reader\n\terr error\n\n\tbuf []byte\n\tpos int\n\tend int\n}\n\n\/\/ NewShiftBufferReader returns a new ShiftBuffer for a given io.Reader.\nfunc NewShiftBuffer(r io.Reader) *ShiftBuffer {\n\t\/\/ If reader has the bytes in memory already, use that instead!\n\tif buffer, ok := r.(interface {\n\t\tBytes() []byte\n\t}); ok {\n\t\treturn &ShiftBuffer{\n\t\t\terr: io.EOF,\n\t\t\tbuf: buffer.Bytes(),\n\t\t}\n\t}\n\tz := &ShiftBuffer{\n\t\tr: r,\n\t\tbuf: make([]byte, 0, MinBuf),\n\t}\n\tz.Peek(0)\n\treturn z\n}\n\n\/\/ Err returns the error.\nfunc (z *ShiftBuffer) Err() error {\n\tif z.err == io.EOF && z.end < len(z.buf) {\n\t\treturn nil\n\t}\n\treturn z.err\n}\n\n\/\/ IsEOF returns true when it has encountered EOF and thus loaded the last buffer in memory.\nfunc (z *ShiftBuffer) IsEOF() bool {\n\treturn z.err == io.EOF\n}\n\n\/\/ Move advances the 0 position of read.\nfunc (z *ShiftBuffer) Move(n int) {\n\tz.end += n\n}\n\n\/\/ MoveTo sets the 0 position of read.\nfunc (z *ShiftBuffer) MoveTo(n int) {\n\tz.end = z.pos + n\n}\n\n\/\/ Pos returns the 0 position of read.\nfunc (z *ShiftBuffer) Pos() int {\n\treturn z.end - z.pos\n}\n\n\/\/ Peek returns the ith byte and possible does a reallocation\nfunc (z *ShiftBuffer) Peek(i int) byte {\n\tend := z.end + i\n\tif end >= len(z.buf) {\n\t\tif z.err != nil {\n\t\t\treturn 0\n\t\t}\n\n\t\t\/\/ reallocate a new buffer (possibly larger)\n\t\tc := cap(z.buf)\n\t\td := end - z.pos\n\t\tvar buf1 []byte\n\t\tif 2*d > c {\n\t\t\tif 2*c > MaxBuf {\n\t\t\t\tz.err = ErrBufferExceeded\n\t\t\t\treturn 0\n\t\t\t}\n\t\t\tbuf1 = make([]byte, d, 2*c)\n\t\t} else {\n\t\t\tbuf1 = z.buf[:d]\n\t\t}\n\t\tcopy(buf1, z.buf[z.pos:end])\n\n\t\t\/\/ Read in to fill the buffer till capacity\n\t\tvar n int\n\t\tn, z.err = z.r.Read(buf1[d:cap(buf1)])\n\t\tend -= z.pos\n\t\tz.end -= z.pos\n\t\tz.pos, z.buf = 0, buf1[:d+n]\n\t\tif n == 0 {\n\t\t\treturn 0\n\t\t}\n\t}\n\treturn z.buf[end]\n}\n\n\/\/ PeekRune returns the rune of the ith byte.\nfunc (z *ShiftBuffer) PeekRune(i int) rune {\n\t\/\/ from unicode\/utf8\n\tc := z.Peek(i)\n\tif c < 0xC0 {\n\t\treturn rune(c)\n\t} else if c < 0xE0 {\n\t\treturn rune(c&0x1F)<<6 | rune(z.Peek(i+1)&0x3F)\n\t} else if c < 0xF0 {\n\t\treturn rune(c&0x0F)<<12 | rune(z.Peek(i+1)&0x3F)<<6 | rune(z.Peek(i+2)&0x3F)\n\t} else {\n\t\treturn rune(c&0x07)<<18 | rune(z.Peek(i+1)&0x3F)<<12 | rune(z.Peek(i+2)&0x3F)<<6 | rune(z.Peek(i+3)&0x3F)\n\t}\n}\n\n\/\/ Bytes returns the bytes of the current selection.\nfunc (z *ShiftBuffer) Bytes() []byte {\n\treturn z.buf[z.pos:z.end]\n}\n\n\/\/ Shift returns the bytes of the current selection and collapses the position.\nfunc (z *ShiftBuffer) Shift() []byte {\n\tb := z.buf[z.pos:z.end]\n\tz.pos = z.end\n\treturn b\n}\n\n\/\/ Skip collapses the position.\nfunc (z *ShiftBuffer) Skip() {\n\tz.pos = z.end\n}\n<|endoftext|>"} {"text":"<commit_before>package idp\n\nimport (\n\t\"crypto\/rsa\"\n\t\"net\/http\"\n\t\"time\"\n\n\tjwt \"github.com\/dgrijalva\/jwt-go\"\n\t\"github.com\/gorilla\/sessions\"\n\thclient \"github.com\/ory-am\/hydra\/client\"\n\thjwk \"github.com\/ory-am\/hydra\/jwk\"\n\thoauth2 \"github.com\/ory-am\/hydra\/oauth2\"\n\thydra \"github.com\/ory-am\/hydra\/sdk\"\n\t\"github.com\/patrickmn\/go-cache\"\n)\n\nconst (\n\tVerifyPublicKey = \"VerifyPublic\"\n\tConsentPrivateKey = \"ConsentPrivate\"\n)\n\nfunc ClientInfoKey(clientID string) string {\n\treturn \"ClientInfo:\" + clientID\n}\n\nvar encryptionkey = \"something-very-secret\"\n\n\/\/ Identity Provider's options\ntype IDPConfig struct {\n\t\/\/ Client id issued by Hydra\n\tClientID string `yaml:\"client_id\"`\n\n\t\/\/ Client secret issued by Hydra\n\tClientSecret string `yaml:\"client_secret\"`\n\n\t\/\/ Hydra's address\n\tClusterURL string `yaml:\"hydra_address\"`\n\n\t\/\/ Expiration time of internal key cache\n\tKeyCacheExpiration time.Duration `yaml:\"key_cache_expiration\"`\n\n\t\/\/ Expiration time of internal clientid cache\n\tClientCacheExpiration time.Duration `yaml:\"client_cache_expiration\"`\n\n\t\/\/ Internal cache cleanup interval\n\tCacheCleanupInterval time.Duration `yaml:\"cache_cleanup_interval\"`\n\n\t\/\/ Expiration time of internal clientid cache\n\tChallengeExpiration time.Duration `yaml:\"challenge_expiration\"`\n\n\t\/\/ Gorilla sessions Store for storing the Challenge.\n\tChallengeStore sessions.Store\n}\n\n\/\/ Identity Provider helper\ntype IDP struct {\n\tconfig *IDPConfig\n\n\t\/\/ Communication with Hydra\n\thc *hydra.Client\n\n\t\/\/ Http client for communicating with Hydra\n\tclient *http.Client\n\n\t\/\/ Cache for all private and public keys\n\tcache *cache.Cache\n\n\t\/\/ Prepared cookie options for creating and deleting cookies\n\tcreateChallengeCookieOptions *sessions.Options\n\tdeleteChallengeCookieOptions *sessions.Options\n}\n\n\/\/ Create the Identity Provider helper\nfunc NewIDP(config *IDPConfig) *IDP {\n\tvar idp = new(IDP)\n\tidp.config = config\n\n\tidp.cache = cache.New(config.KeyCacheExpiration, config.CacheCleanupInterval)\n\tidp.cache.OnEvicted(func(key string, value interface{}) { idp.refreshCache(key) })\n\n\tidp.createChallengeCookieOptions = new(sessions.Options)\n\tidp.createChallengeCookieOptions.Path = \"\/\"\n\tidp.createChallengeCookieOptions.MaxAge = int(config.ChallengeExpiration.Seconds())\n\tidp.createChallengeCookieOptions.Secure = true \/\/ Send only via https\n\tidp.createChallengeCookieOptions.HttpOnly = false\n\n\tidp.deleteChallengeCookieOptions = new(sessions.Options)\n\tidp.deleteChallengeCookieOptions.Path = \"\/\"\n\tidp.deleteChallengeCookieOptions.MaxAge = -1 \/\/ Mark for deletion\n\tidp.deleteChallengeCookieOptions.Secure = true \/\/ Send only via https\n\tidp.deleteChallengeCookieOptions.HttpOnly = false\n\n\treturn idp\n}\n\nfunc (idp *IDP) cacheConsentKey() error {\n\tconsentKey, err := idp.downloadConsentKey()\n\n\tduration := cache.DefaultExpiration\n\tif err != nil {\n\t\t\/\/ re-cache the result even if there's an error, but\n\t\t\/\/ do it with a shorter timeout. This will ensure we\n\t\t\/\/ try to refresh the key once that timeout expires,\n\t\t\/\/ otherwise we'll _never_ refresh the key again.\n\t\tduration = idp.config.CacheCleanupInterval\n\t}\n\n\tidp.cache.Set(ConsentPrivateKey, consentKey, duration)\n\treturn err\n}\n\nfunc (idp *IDP) cacheVerificationKey() error {\n\tverifyKey, err := idp.downloadVerificationKey()\n\n\tduration := cache.DefaultExpiration\n\tif err != nil {\n\t\t\/\/ re-cache the result even if there's an error, but\n\t\t\/\/ do it with a shorter timeout. This will ensure we\n\t\t\/\/ try to refresh the key once that timeout expires,\n\t\t\/\/ otherwise we'll _never_ refresh the key again.\n\t\tduration = idp.config.CacheCleanupInterval\n\t}\n\n\tidp.cache.Set(VerifyPublicKey, verifyKey, duration)\n\treturn err\n}\n\n\/\/ Called when any key expires\nfunc (idp *IDP) refreshCache(key string) {\n\tswitch key {\n\tcase VerifyPublicKey:\n\t\tidp.cacheVerificationKey()\n\t\treturn\n\n\tcase ConsentPrivateKey:\n\t\tidp.cacheConsentKey()\n\t\treturn\n\n\tdefault:\n\t\t\/\/ Will get here for client IDs.\n\t\t\/\/ Fine to just let them expire, the next request from that\n\t\t\/\/ client will trigger a refresh\n\t\treturn\n\t}\n}\n\n\/\/ Downloads the hydra's public key\nfunc (idp *IDP) downloadVerificationKey() (*rsa.PublicKey, error) {\n\n\tjwk, err := idp.hc.JWK.GetKey(hoauth2.ConsentChallengeKey, \"public\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trsaKey, ok := hjwk.First(jwk.Keys).Key.(*rsa.PublicKey)\n\tif !ok {\n\t\treturn nil, ErrorBadPublicKey\n\t}\n\n\treturn rsaKey, nil\n}\n\n\/\/ Downloads the private key used for signing the consent\nfunc (idp *IDP) downloadConsentKey() (*rsa.PrivateKey, error) {\n\tjwk, err := idp.hc.JWK.GetKey(hoauth2.ConsentEndpointKey, \"private\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trsaKey, ok := hjwk.First(jwk.Keys).Key.(*rsa.PrivateKey)\n\tif !ok {\n\t\treturn nil, ErrorBadPrivateKey\n\t}\n\n\treturn rsaKey, nil\n}\n\n\/\/ Connect to Hydra\nfunc (idp *IDP) Connect(verifyTLS bool) error {\n\tvar err error\n\tif verifyTLS {\n\t\tidp.hc, err = hydra.Connect(\n\t\t\thydra.ClientID(idp.config.ClientID),\n\t\t\thydra.ClientSecret(idp.config.ClientSecret),\n\t\t\thydra.ClusterURL(idp.config.ClusterURL),\n\t\t)\n\t} else {\n\t\tidp.hc, err = hydra.Connect(\n\t\t\thydra.ClientID(idp.config.ClientID),\n\t\t\thydra.ClientSecret(idp.config.ClientSecret),\n\t\t\thydra.ClusterURL(idp.config.ClusterURL),\n\t\t\thydra.SkipTLSVerify(),\n\t\t)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = idp.cacheVerificationKey()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = idp.cacheConsentKey()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Parse and verify the challenge JWT\nfunc (idp *IDP) getChallengeToken(challengeString string) (*jwt.Token, error) {\n\ttoken, err := jwt.Parse(challengeString, func(token *jwt.Token) (interface{}, error) {\n\t\t_, ok := token.Method.(*jwt.SigningMethodRSA)\n\t\tif !ok {\n\t\t\treturn nil, ErrorBadSigningMethod\n\t\t}\n\n\t\treturn idp.getVerificationKey()\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !token.Valid {\n\t\treturn nil, ErrorInvalidToken\n\t}\n\n\treturn token, nil\n}\n\nfunc (idp *IDP) getConsentKey() (*rsa.PrivateKey, error) {\n\tdata, ok := idp.cache.Get(ConsentPrivateKey)\n\tif !ok {\n\t\treturn nil, ErrorNotInCache\n\t}\n\n\tkey, ok := data.(*rsa.PrivateKey)\n\tif !ok {\n\t\treturn nil, ErrorBadKey\n\t}\n\n\treturn key, nil\n}\n\nfunc (idp *IDP) getVerificationKey() (*rsa.PublicKey, error) {\n\tdata, ok := idp.cache.Get(VerifyPublicKey)\n\tif !ok {\n\t\treturn nil, ErrorNotInCache\n\t}\n\n\tkey, ok := data.(*rsa.PublicKey)\n\tif !ok {\n\t\treturn nil, ErrorBadKey\n\t}\n\n\treturn key, nil\n}\n\nfunc (idp *IDP) getClient(clientID string) (*hclient.Client, error) {\n\tclientKey := ClientInfoKey(clientID)\n\tdata, ok := idp.cache.Get(clientKey)\n\tif ok {\n\t\tif data != nil {\n\t\t\tclient := data.(*hclient.Client)\n\t\t\treturn client, nil\n\t\t}\n\t\treturn nil, ErrorNoSuchClient\n\t}\n\n\tclient, err := idp.hc.Client.GetClient(clientID)\n\tif err != nil {\n\t\t\/\/ Either the client isn't registered in hydra, or maybe hydra is\n\t\t\/\/ having some problem. Either way, ensure we don't hit hydra again\n\t\t\/\/ for this client if someone (maybe an attacker) retries quickly.\n\t\tidp.cache.Set(clientKey, nil, idp.config.ClientCacheExpiration)\n\t\treturn nil, err\n\t}\n\n\tc := client.(*hclient.Client)\n\tidp.cache.Set(clientKey, client, idp.config.ClientCacheExpiration)\n\treturn c, nil\n}\n\n\/\/ Create a new Challenge. The request will contain all the necessary information from Hydra, passed in the URL.\nfunc (idp *IDP) NewChallenge(r *http.Request, user string) (challenge *Challenge, err error) {\n\ttokenStr := r.FormValue(\"challenge\")\n\tif tokenStr == \"\" {\n\t\t\/\/ No challenge token\n\t\terr = ErrorBadRequest\n\t\treturn\n\t}\n\n\ttoken, err := idp.getChallengeToken(tokenStr)\n\tif err != nil {\n\t\t\/\/ Most probably, token can't be verified or parsed\n\t\treturn\n\t}\n\tclaims := token.Claims.(jwt.MapClaims)\n\n\tchallenge = new(Challenge)\n\tchallenge.Expires = time.Unix(int64(claims[\"exp\"].(float64)), 0)\n\tif challenge.Expires.Before(time.Now()) {\n\t\tchallenge = nil\n\t\terr = ErrorChallengeExpired\n\t\treturn\n\t}\n\n\t\/\/ Get data from the challenge jwt\n\tchallenge.Client, err = idp.getClient(claims[\"aud\"].(string))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tchallenge.Redirect = claims[\"redir\"].(string)\n\tchallenge.User = user\n\tchallenge.idp = idp\n\n\tscopes := claims[\"scp\"].([]interface{})\n\tchallenge.Scopes = make([]string, len(scopes), len(scopes))\n\tfor i, scope := range scopes {\n\t\tchallenge.Scopes[i] = scope.(string)\n\t}\n\n\treturn\n}\n\n\/\/ Get the Challenge from a cookie, using Gorilla sessions\nfunc (idp *IDP) GetChallenge(r *http.Request) (*Challenge, error) {\n\tsession, err := idp.config.ChallengeStore.Get(r, SessionCookieName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tchallenge, ok := session.Values[SessionCookieName].(*Challenge)\n\tif !ok {\n\t\treturn nil, ErrorBadChallengeCookie\n\t}\n\n\tif challenge.Expires.Before(time.Now()) {\n\t\treturn nil, ErrorChallengeExpired\n\t}\n\n\tchallenge.idp = idp\n\n\treturn challenge, nil\n}\n\n\/\/ Closes connection to Hydra, cleans cache etc.\nfunc (idp *IDP) Close() {\n\tidp.client = nil\n\n\t\/\/ Removes all keys from the cache\n\tidp.cache.Flush()\n}\n<commit_msg>Fixed changes in Hydra's SDK<commit_after>package idp\n\nimport (\n\t\"crypto\/rsa\"\n\t\"net\/http\"\n\t\"time\"\n\n\tjwt \"github.com\/dgrijalva\/jwt-go\"\n\t\"github.com\/gorilla\/sessions\"\n\thclient \"github.com\/ory-am\/hydra\/client\"\n\thjwk \"github.com\/ory-am\/hydra\/jwk\"\n\thoauth2 \"github.com\/ory-am\/hydra\/oauth2\"\n\thydra \"github.com\/ory-am\/hydra\/sdk\"\n\t\"github.com\/patrickmn\/go-cache\"\n)\n\nconst (\n\tVerifyPublicKey = \"VerifyPublic\"\n\tConsentPrivateKey = \"ConsentPrivate\"\n)\n\nfunc ClientInfoKey(clientID string) string {\n\treturn \"ClientInfo:\" + clientID\n}\n\nvar encryptionkey = \"something-very-secret\"\n\n\/\/ Identity Provider's options\ntype IDPConfig struct {\n\t\/\/ Client id issued by Hydra\n\tClientID string `yaml:\"client_id\"`\n\n\t\/\/ Client secret issued by Hydra\n\tClientSecret string `yaml:\"client_secret\"`\n\n\t\/\/ Hydra's address\n\tClusterURL string `yaml:\"hydra_address\"`\n\n\t\/\/ Expiration time of internal key cache\n\tKeyCacheExpiration time.Duration `yaml:\"key_cache_expiration\"`\n\n\t\/\/ Expiration time of internal clientid cache\n\tClientCacheExpiration time.Duration `yaml:\"client_cache_expiration\"`\n\n\t\/\/ Internal cache cleanup interval\n\tCacheCleanupInterval time.Duration `yaml:\"cache_cleanup_interval\"`\n\n\t\/\/ Expiration time of internal clientid cache\n\tChallengeExpiration time.Duration `yaml:\"challenge_expiration\"`\n\n\t\/\/ Gorilla sessions Store for storing the Challenge.\n\tChallengeStore sessions.Store\n}\n\n\/\/ Identity Provider helper\ntype IDP struct {\n\tconfig *IDPConfig\n\n\t\/\/ Communication with Hydra\n\thc *hydra.Client\n\n\t\/\/ Http client for communicating with Hydra\n\tclient *http.Client\n\n\t\/\/ Cache for all private and public keys\n\tcache *cache.Cache\n\n\t\/\/ Prepared cookie options for creating and deleting cookies\n\tcreateChallengeCookieOptions *sessions.Options\n\tdeleteChallengeCookieOptions *sessions.Options\n}\n\n\/\/ Create the Identity Provider helper\nfunc NewIDP(config *IDPConfig) *IDP {\n\tvar idp = new(IDP)\n\tidp.config = config\n\n\tidp.cache = cache.New(config.KeyCacheExpiration, config.CacheCleanupInterval)\n\tidp.cache.OnEvicted(func(key string, value interface{}) { idp.refreshCache(key) })\n\n\tidp.createChallengeCookieOptions = new(sessions.Options)\n\tidp.createChallengeCookieOptions.Path = \"\/\"\n\tidp.createChallengeCookieOptions.MaxAge = int(config.ChallengeExpiration.Seconds())\n\tidp.createChallengeCookieOptions.Secure = true \/\/ Send only via https\n\tidp.createChallengeCookieOptions.HttpOnly = false\n\n\tidp.deleteChallengeCookieOptions = new(sessions.Options)\n\tidp.deleteChallengeCookieOptions.Path = \"\/\"\n\tidp.deleteChallengeCookieOptions.MaxAge = -1 \/\/ Mark for deletion\n\tidp.deleteChallengeCookieOptions.Secure = true \/\/ Send only via https\n\tidp.deleteChallengeCookieOptions.HttpOnly = false\n\n\treturn idp\n}\n\nfunc (idp *IDP) cacheConsentKey() error {\n\tconsentKey, err := idp.downloadConsentKey()\n\n\tduration := cache.DefaultExpiration\n\tif err != nil {\n\t\t\/\/ re-cache the result even if there's an error, but\n\t\t\/\/ do it with a shorter timeout. This will ensure we\n\t\t\/\/ try to refresh the key once that timeout expires,\n\t\t\/\/ otherwise we'll _never_ refresh the key again.\n\t\tduration = idp.config.CacheCleanupInterval\n\t}\n\n\tidp.cache.Set(ConsentPrivateKey, consentKey, duration)\n\treturn err\n}\n\nfunc (idp *IDP) cacheVerificationKey() error {\n\tverifyKey, err := idp.downloadVerificationKey()\n\n\tduration := cache.DefaultExpiration\n\tif err != nil {\n\t\t\/\/ re-cache the result even if there's an error, but\n\t\t\/\/ do it with a shorter timeout. This will ensure we\n\t\t\/\/ try to refresh the key once that timeout expires,\n\t\t\/\/ otherwise we'll _never_ refresh the key again.\n\t\tduration = idp.config.CacheCleanupInterval\n\t}\n\n\tidp.cache.Set(VerifyPublicKey, verifyKey, duration)\n\treturn err\n}\n\n\/\/ Called when any key expires\nfunc (idp *IDP) refreshCache(key string) {\n\tswitch key {\n\tcase VerifyPublicKey:\n\t\tidp.cacheVerificationKey()\n\t\treturn\n\n\tcase ConsentPrivateKey:\n\t\tidp.cacheConsentKey()\n\t\treturn\n\n\tdefault:\n\t\t\/\/ Will get here for client IDs.\n\t\t\/\/ Fine to just let them expire, the next request from that\n\t\t\/\/ client will trigger a refresh\n\t\treturn\n\t}\n}\n\n\/\/ Downloads the hydra's public key\nfunc (idp *IDP) downloadVerificationKey() (*rsa.PublicKey, error) {\n\n\tjwk, err := idp.hc.JSONWebKeys.GetKey(hoauth2.ConsentChallengeKey, \"public\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trsaKey, ok := hjwk.First(jwk.Keys).Key.(*rsa.PublicKey)\n\tif !ok {\n\t\treturn nil, ErrorBadPublicKey\n\t}\n\n\treturn rsaKey, nil\n}\n\n\/\/ Downloads the private key used for signing the consent\nfunc (idp *IDP) downloadConsentKey() (*rsa.PrivateKey, error) {\n\tjwk, err := idp.hc.JSONWebKeys.GetKey(hoauth2.ConsentEndpointKey, \"private\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trsaKey, ok := hjwk.First(jwk.Keys).Key.(*rsa.PrivateKey)\n\tif !ok {\n\t\treturn nil, ErrorBadPrivateKey\n\t}\n\n\treturn rsaKey, nil\n}\n\n\/\/ Connect to Hydra\nfunc (idp *IDP) Connect(verifyTLS bool) error {\n\tvar err error\n\tif verifyTLS {\n\t\tidp.hc, err = hydra.Connect(\n\t\t\thydra.ClientID(idp.config.ClientID),\n\t\t\thydra.ClientSecret(idp.config.ClientSecret),\n\t\t\thydra.ClusterURL(idp.config.ClusterURL),\n\t\t)\n\t} else {\n\t\tidp.hc, err = hydra.Connect(\n\t\t\thydra.ClientID(idp.config.ClientID),\n\t\t\thydra.ClientSecret(idp.config.ClientSecret),\n\t\t\thydra.ClusterURL(idp.config.ClusterURL),\n\t\t\thydra.SkipTLSVerify(),\n\t\t)\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = idp.cacheVerificationKey()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = idp.cacheConsentKey()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Parse and verify the challenge JWT\nfunc (idp *IDP) getChallengeToken(challengeString string) (*jwt.Token, error) {\n\ttoken, err := jwt.Parse(challengeString, func(token *jwt.Token) (interface{}, error) {\n\t\t_, ok := token.Method.(*jwt.SigningMethodRSA)\n\t\tif !ok {\n\t\t\treturn nil, ErrorBadSigningMethod\n\t\t}\n\n\t\treturn idp.getVerificationKey()\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !token.Valid {\n\t\treturn nil, ErrorInvalidToken\n\t}\n\n\treturn token, nil\n}\n\nfunc (idp *IDP) getConsentKey() (*rsa.PrivateKey, error) {\n\tdata, ok := idp.cache.Get(ConsentPrivateKey)\n\tif !ok {\n\t\treturn nil, ErrorNotInCache\n\t}\n\n\tkey, ok := data.(*rsa.PrivateKey)\n\tif !ok {\n\t\treturn nil, ErrorBadKey\n\t}\n\n\treturn key, nil\n}\n\nfunc (idp *IDP) getVerificationKey() (*rsa.PublicKey, error) {\n\tdata, ok := idp.cache.Get(VerifyPublicKey)\n\tif !ok {\n\t\treturn nil, ErrorNotInCache\n\t}\n\n\tkey, ok := data.(*rsa.PublicKey)\n\tif !ok {\n\t\treturn nil, ErrorBadKey\n\t}\n\n\treturn key, nil\n}\n\nfunc (idp *IDP) getClient(clientID string) (*hclient.Client, error) {\n\tclientKey := ClientInfoKey(clientID)\n\tdata, ok := idp.cache.Get(clientKey)\n\tif ok {\n\t\tif data != nil {\n\t\t\tclient := data.(*hclient.Client)\n\t\t\treturn client, nil\n\t\t}\n\t\treturn nil, ErrorNoSuchClient\n\t}\n\n\tclient, err := idp.hc.Clients.GetClient(clientID)\n\tif err != nil {\n\t\t\/\/ Either the client isn't registered in hydra, or maybe hydra is\n\t\t\/\/ having some problem. Either way, ensure we don't hit hydra again\n\t\t\/\/ for this client if someone (maybe an attacker) retries quickly.\n\t\tidp.cache.Set(clientKey, nil, idp.config.ClientCacheExpiration)\n\t\treturn nil, err\n\t}\n\n\tc := client.(*hclient.Client)\n\tidp.cache.Set(clientKey, client, idp.config.ClientCacheExpiration)\n\treturn c, nil\n}\n\n\/\/ Create a new Challenge. The request will contain all the necessary information from Hydra, passed in the URL.\nfunc (idp *IDP) NewChallenge(r *http.Request, user string) (challenge *Challenge, err error) {\n\ttokenStr := r.FormValue(\"challenge\")\n\tif tokenStr == \"\" {\n\t\t\/\/ No challenge token\n\t\terr = ErrorBadRequest\n\t\treturn\n\t}\n\n\ttoken, err := idp.getChallengeToken(tokenStr)\n\tif err != nil {\n\t\t\/\/ Most probably, token can't be verified or parsed\n\t\treturn\n\t}\n\tclaims := token.Claims.(jwt.MapClaims)\n\n\tchallenge = new(Challenge)\n\tchallenge.Expires = time.Unix(int64(claims[\"exp\"].(float64)), 0)\n\tif challenge.Expires.Before(time.Now()) {\n\t\tchallenge = nil\n\t\terr = ErrorChallengeExpired\n\t\treturn\n\t}\n\n\t\/\/ Get data from the challenge jwt\n\tchallenge.Client, err = idp.getClient(claims[\"aud\"].(string))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tchallenge.Redirect = claims[\"redir\"].(string)\n\tchallenge.User = user\n\tchallenge.idp = idp\n\n\tscopes := claims[\"scp\"].([]interface{})\n\tchallenge.Scopes = make([]string, len(scopes), len(scopes))\n\tfor i, scope := range scopes {\n\t\tchallenge.Scopes[i] = scope.(string)\n\t}\n\n\treturn\n}\n\n\/\/ Get the Challenge from a cookie, using Gorilla sessions\nfunc (idp *IDP) GetChallenge(r *http.Request) (*Challenge, error) {\n\tsession, err := idp.config.ChallengeStore.Get(r, SessionCookieName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tchallenge, ok := session.Values[SessionCookieName].(*Challenge)\n\tif !ok {\n\t\treturn nil, ErrorBadChallengeCookie\n\t}\n\n\tif challenge.Expires.Before(time.Now()) {\n\t\treturn nil, ErrorChallengeExpired\n\t}\n\n\tchallenge.idp = idp\n\n\treturn challenge, nil\n}\n\n\/\/ Closes connection to Hydra, cleans cache etc.\nfunc (idp *IDP) Close() {\n\tidp.client = nil\n\n\t\/\/ Removes all keys from the cache\n\tidp.cache.Flush()\n}\n<|endoftext|>"} {"text":"<commit_before>package e2e\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\t\"github.com\/blang\/semver\/v4\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t\"github.com\/onsi\/ginkgo\/extensions\/table\"\n\t. \"github.com\/onsi\/gomega\"\n\toperatorsv1alpha1 \"github.com\/operator-framework\/api\/pkg\/operators\/v1alpha1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"sigs.k8s.io\/controller-runtime\/pkg\/client\"\n\n\t\"github.com\/operator-framework\/operator-lifecycle-manager\/test\/e2e\/ctx\"\n)\n\nvar missingAPI = `{\"apiVersion\":\"verticalpodautoscalers.autoscaling.k8s.io\/v1\",\"kind\":\"VerticalPodAutoscaler\",\"metadata\":{\"name\":\"my.thing\",\"namespace\":\"foo\"}}`\n\nvar _ = Describe(\"Not found APIs\", func() {\n\tBeforeEach(func() {\n\t\tcsv := newCSV(\"test-csv\", testNamespace, \"\", semver.Version{}, nil, nil, nil)\n\t\tExpect(ctx.Ctx().Client().Create(context.TODO(), &csv)).To(Succeed())\n\t})\n\tAfterEach(func() {\n\t\tTearDown(testNamespace)\n\t})\n\n\tWhen(\"objects with APIs that are not on-cluster are created in the installplan\", func() {\n\t\t\/\/ each entry is an installplan with a deprecated resource\n\t\ttype payload struct {\n\t\t\tname string\n\t\t\tIP *operatorsv1alpha1.InstallPlan\n\t\t\terrMessage string\n\t\t}\n\n\t\ttableEntries := []table.TableEntry{\n\t\t\ttable.Entry(\"contains an entry with a missing API not found on cluster \", payload{\n\t\t\t\tname: \"installplan contains a missing API\",\n\t\t\t\tIP: &operatorsv1alpha1.InstallPlan{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tNamespace: *namespace, \/\/ this is necessary due to ginkgo table semantics, see https:\/\/github.com\/onsi\/ginkgo\/issues\/378\n\t\t\t\t\t\tName: \"test-plan-api\",\n\t\t\t\t\t},\n\t\t\t\t\tSpec: operatorsv1alpha1.InstallPlanSpec{\n\t\t\t\t\t\tApproval: operatorsv1alpha1.ApprovalAutomatic,\n\t\t\t\t\t\tApproved: true,\n\t\t\t\t\t\tClusterServiceVersionNames: []string{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\terrMessage: \"api-server resource not found installing VerticalPodAutoscaler my.thing: GroupVersionKind \" +\n\t\t\t\t\t\"verticalpodautoscalers.autoscaling.k8s.io\/v1, Kind=VerticalPodAutoscaler not found on the cluster\",\n\t\t\t}),\n\t\t}\n\n\t\ttable.DescribeTable(\"the ip enters a failed state with a helpful error message\", func(tt payload) {\n\t\t\tExpect(ctx.Ctx().Client().Create(context.Background(), tt.IP)).To(Succeed())\n\n\t\t\ttt.IP.Status = operatorsv1alpha1.InstallPlanStatus{\n\t\t\t\tPhase: operatorsv1alpha1.InstallPlanPhaseInstalling,\n\t\t\t\tCatalogSources: []string{},\n\t\t\t\tPlan: []*operatorsv1alpha1.Step{\n\t\t\t\t\t{\n\t\t\t\t\t\tResolving: \"test-csv\",\n\t\t\t\t\t\tStatus: operatorsv1alpha1.StepStatusUnknown,\n\t\t\t\t\t\tResource: operatorsv1alpha1.StepResource{\n\t\t\t\t\t\t\tName: \"my.thing\",\n\t\t\t\t\t\t\tGroup: \"verticalpodautoscalers.autoscaling.k8s.io\",\n\t\t\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\t\t\tKind: \"VerticalPodAutoscaler\",\n\t\t\t\t\t\t\tManifest: missingAPI,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tExpect(ctx.Ctx().Client().Status().Update(context.Background(), tt.IP)).To(Succeed(), \"failed to update the resource\")\n\n\t\t\t\/\/ The IP sits in the Installing phase with the GVK missing error\n\t\t\tEventually(func() (*operatorsv1alpha1.InstallPlan, error) {\n\t\t\t\treturn tt.IP, ctx.Ctx().Client().Get(context.Background(), client.ObjectKeyFromObject(tt.IP), tt.IP)\n\t\t\t}).Should(And(HavePhase(operatorsv1alpha1.InstallPlanPhaseInstalling)), HaveMessage(tt.errMessage))\n\n\t\t\t\/\/ Eventually the IP fails with the GVK missing error, after installplan retries, which is by default 1 minute.\n\t\t\tEventually(func() (*operatorsv1alpha1.InstallPlan, error) {\n\t\t\t\treturn tt.IP, ctx.Ctx().Client().Get(context.Background(), client.ObjectKeyFromObject(tt.IP), tt.IP)\n\t\t\t}, 2*time.Minute).Should(And(HavePhase(operatorsv1alpha1.InstallPlanPhaseFailed)), HaveMessage(tt.errMessage))\n\t\t}, tableEntries...)\n\t})\n})\n<commit_msg>Move deprecated e2e to one namespace per spec (#2707)<commit_after>package e2e\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/blang\/semver\/v4\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\toperatorsv1 \"github.com\/operator-framework\/api\/pkg\/operators\/v1\"\n\toperatorsv1alpha1 \"github.com\/operator-framework\/api\/pkg\/operators\/v1alpha1\"\n\t\"github.com\/operator-framework\/operator-lifecycle-manager\/test\/e2e\/ctx\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"sigs.k8s.io\/controller-runtime\/pkg\/client\"\n)\n\nvar missingAPI = `{\"apiVersion\":\"verticalpodautoscalers.autoscaling.k8s.io\/v1\",\"kind\":\"VerticalPodAutoscaler\",\"metadata\":{\"name\":\"my.thing\",\"namespace\":\"foo\"}}`\n\nvar _ = Describe(\"Not found APIs\", func() {\n\n\tvar ns corev1.Namespace\n\n\tBeforeEach(func() {\n\t\tnamespaceName := genName(\"deprecated-e2e-\")\n\t\tog := operatorsv1.OperatorGroup{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: fmt.Sprintf(\"%s-operatorgroup\", namespaceName),\n\t\t\t\tNamespace: namespaceName,\n\t\t\t},\n\t\t}\n\t\tns = SetupGeneratedTestNamespaceWithOperatorGroup(namespaceName, og)\n\n\t\tcsv := newCSV(\"test-csv\", ns.GetName(), \"\", semver.Version{}, nil, nil, nil)\n\t\tExpect(ctx.Ctx().Client().Create(context.TODO(), &csv)).To(Succeed())\n\t})\n\n\tAfterEach(func() {\n\t\tTeardownNamespace(ns.GetName())\n\t})\n\n\tContext(\"objects with APIs that are not on-cluster are created in the installplan\", func() {\n\t\tWhen(\"installplan contains a missing API\", func() {\n\t\t\tIt(\"the ip enters a failed state with a helpful error message\", func() {\n\t\t\t\tip := &operatorsv1alpha1.InstallPlan{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"test-plan-api\",\n\t\t\t\t\t\tNamespace: ns.GetName(),\n\t\t\t\t\t},\n\t\t\t\t\tSpec: operatorsv1alpha1.InstallPlanSpec{\n\t\t\t\t\t\tApproval: operatorsv1alpha1.ApprovalAutomatic,\n\t\t\t\t\t\tApproved: true,\n\t\t\t\t\t\tClusterServiceVersionNames: []string{},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tExpect(ctx.Ctx().Client().Create(context.Background(), ip)).To(Succeed())\n\n\t\t\t\tip.Status = operatorsv1alpha1.InstallPlanStatus{\n\t\t\t\t\tPhase: operatorsv1alpha1.InstallPlanPhaseInstalling,\n\t\t\t\t\tCatalogSources: []string{},\n\t\t\t\t\tPlan: []*operatorsv1alpha1.Step{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tResolving: \"test-csv\",\n\t\t\t\t\t\t\tStatus: operatorsv1alpha1.StepStatusUnknown,\n\t\t\t\t\t\t\tResource: operatorsv1alpha1.StepResource{\n\t\t\t\t\t\t\t\tName: \"my.thing\",\n\t\t\t\t\t\t\t\tGroup: \"verticalpodautoscalers.autoscaling.k8s.io\",\n\t\t\t\t\t\t\t\tVersion: \"v1\",\n\t\t\t\t\t\t\t\tKind: \"VerticalPodAutoscaler\",\n\t\t\t\t\t\t\t\tManifest: missingAPI,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tExpect(ctx.Ctx().Client().Status().Update(context.Background(), ip)).To(Succeed(), \"failed to update the resource\")\n\n\t\t\t\terrMessage := \"api-server resource not found installing VerticalPodAutoscaler my.thing: GroupVersionKind \" +\n\t\t\t\t\t\"verticalpodautoscalers.autoscaling.k8s.io\/v1, Kind=VerticalPodAutoscaler not found on the cluster\"\n\t\t\t\t\/\/ The IP sits in the Installing phase with the GVK missing error\n\t\t\t\tEventually(func() (*operatorsv1alpha1.InstallPlan, error) {\n\t\t\t\t\treturn ip, ctx.Ctx().Client().Get(context.Background(), client.ObjectKeyFromObject(ip), ip)\n\t\t\t\t}).Should(And(HavePhase(operatorsv1alpha1.InstallPlanPhaseInstalling)), HaveMessage(errMessage))\n\n\t\t\t\t\/\/ Eventually the IP fails with the GVK missing error, after installplan retries, which is by default 1 minute.\n\t\t\t\tEventually(func() (*operatorsv1alpha1.InstallPlan, error) {\n\t\t\t\t\treturn ip, ctx.Ctx().Client().Get(context.Background(), client.ObjectKeyFromObject(ip), ip)\n\t\t\t\t}, 2*time.Minute).Should(And(HavePhase(operatorsv1alpha1.InstallPlanPhaseFailed)), HaveMessage(errMessage))\n\t\t\t})\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package aranGO\n\nimport (\n \"testing\"\n \"github.com\/stretchr\/testify\/assert\"\n)\n\n\/\/ Configure to start testing\nvar(\n TestCollection = \"apps\"\n TestDoc DocTest\n TestDbName = \"goleat\"\n TestUsername = \"\"\n TestPassword = \"\"\n verbose = false\n TestServer = \"http:\/\/localhost:8529\"\n s *Session\n)\n\n\/\/ document to test\ntype DocTest struct {\n Document \/\/ arango Document to save id, key, rev\n}\n\nfunc TestSimple(t *testing.T){\n \/\/ connect\n s ,err := Connect(TestServer, TestUsername, TestPassword, verbose)\n assert.Nil(t,err)\n\n db := s.DB(TestDbName)\n assert.NotNil(t,db)\n\n c := db.Col(TestCollection)\n assert.NotNil(t,c)\n\n \/\/ Any\n err = c.Any(&TestDoc)\n assert.Equal(t,TestDoc.Error,false)\n assert.Nil(t,err)\n\n \/\/ Example\n cur, err := c.Example(map[string]interface{}{},0,10)\n assert.Equal(t,TestDoc.Error,false)\n assert.Nil(t,err)\n assert.NotNil(t,cur)\n\n \/\/ need to add new functions!\n}\n<commit_msg>Added `save` and `FetchOne` tests<commit_after>package aranGO\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\n\/\/ Configure to start testing\nvar (\n\tTestCollection = \"apps\"\n\tTestDoc DocTest\n\tTestDbName = \"goleat\"\n\tTestUsername = \"\"\n\tTestPassword = \"\"\n\tverbose = false\n\tTestServer = \"http:\/\/localhost:8529\"\n\ts *Session\n)\n\n\/\/ document to test\ntype DocTest struct {\n\tDocument \/\/ arango Document to save id, key, rev\n\tText string\n}\n\nfunc TestSimple(t *testing.T) {\n\t\/\/ connect\n\ts, err := Connect(TestServer, TestUsername, TestPassword, verbose)\n\tassert.Nil(t, err)\n\n\t\/\/ Create the db\n\ts.CreateDB(TestDbName, nil)\n\n\tdb := s.DB(TestDbName)\n\tassert.NotNil(t, db)\n\n\tc := db.Col(TestCollection)\n\tassert.NotNil(t, c)\n\n\t\/\/ Any\n\terr = c.Any(&TestDoc)\n\tassert.Equal(t, TestDoc.Error, false)\n\tassert.Nil(t, err)\n\n\t\/\/ Save\n\tvar saveTestDoc DocTest\n\tsaveTestDoc.Text = \"Stringy string\"\n\terr = c.Save(saveTestDoc)\n\tassert.Nil(t, err)\n\n\t\/\/ Example\n\tcur, err := c.Example(map[string]interface{}{}, 0, 10)\n\tassert.Equal(t, TestDoc.Error, false)\n\tassert.Nil(t, err)\n\tassert.NotNil(t, cur)\n\n\tvar newTestDoc DocTest\n\tmoreFiles := cur.FetchOne(newTestDoc)\n\tassert.Equal(t, moreFiles, false)\n\tassert.Equal(t, saveTestDoc, newTestDoc)\n\n\t\/\/ need to add new functions!\n\n}\n<|endoftext|>"} {"text":"<commit_before>package ofp10\n\nimport \"github.com\/kopwei\/goof\/protocols\/ofpgeneral\"\n\n\/\/enum ofp_stats_types {\nconst (\n\t\/* Description of this OpenFlow switch.\n\t * The request body is empty.\n\t * The reply body is struct ofp_desc_stats. *\/\n\tOfpStatsTypeDesc = iota\n\n\t\/* Individual flow statistics.\n\t * The request body is struct ofp_flow_stats_request.\n\t * The reply body is an array of struct ofp_flow_stats. *\/\n\tOfpStatsTypeFlow\n\n\t\/* Aggregate flow statistics.\n\t * The request body is struct ofp_aggregate_stats_request.\n\t * The reply body is struct ofp_aggregate_stats_reply. *\/\n\tOfpStatsTypeAggregate\n\n\t\/* Flow table statistics.\n\t * The request body is empty.\n\t * The reply body is an array of struct ofp_table_stats. *\/\n\tOfpStatsTypeTable\n\n\t\/* Physical port statistics.\n\t * The request body is struct ofp_port_stats_request.\n\t * The reply body is an array of struct ofp_port_stats. *\/\n\tOfpStatsTypePort\n\n\t\/* Queue statistics for a port\n\t * The request body defines the port\n\t * The reply body is an array of struct ofp_queue_stats *\/\n\tOfpStatsTypeQueue\n\n\t\/* Vendor extension.\n\t * The request and reply bodies begin with a 32-bit vendor ID, which takes\n\t * the same form as in \"struct ofp_vendor_header\". The request and reply\n\t * bodies are otherwise vendor-defined. *\/\n\tOfpStatsTypeVendor = 0xffff\n)\n\n\/\/ enum ofp_stats_reply_flags {\nconst (\n\tOfpStatsReplyMore = iota \/* More replies to follow. *\/\n)\n\nconst (\n\tdescStrLen = 256\n\tserialNumLen = 32\n)\n\n\/\/ OfpStatsReqMsg represents the structure of Stats request msg\ntype OfpStatsReqMsg struct {\n\tHeader ofpgeneral.OfpHeader\n\tType uint16 \/* One of the OFPST_* constants. *\/\n\tFlags uint16 \/* OFPSF_REQ_* flags (none yet defined). *\/\n\tBody []byte \/* Body of the request. *\/\n}\n\n\/\/ OfpStatsReplyMsg represents the structure of stats reply msg\ntype OfpStatsReplyMsg struct {\n\tHeader ofpgeneral.OfpHeader\n\tType uint16 \/* One of the OFPST_* constants. *\/\n\tFlags uint16 \/* OFPSF_REPLY_* flags. *\/\n\tBody []byte \/* Body of the reply. *\/\n}\n\n\/\/ OfpDescStats represents the structure of descriptive stats\ntype OfpDescStats struct {\n\tManufacurerDesc [descStrLen]byte \/* Manufacturer description. *\/\n\tHwDesc [descStrLen]byte \/* Hardware description. *\/\n\tSwDesc [descStrLen]byte \/* Software description. *\/\n\tSerialNum [serialNumLen]byte \/* Serial number. *\/\n\tDatapathDesc [descStrLen]byte \/* Human readable description of datapath. *\/\n}\n\n\/\/ OfpFlowStatsReq represents the structure body for ofp_stats_request of type OFPST_FLOW.\ntype OfpFlowStatsReq struct {\n\tMatch OfpMatch \/\/ Fields to match.\n\tTableID uint8 \/\/ ID of table to read (from ofp_table_stats), 0xff for all tables or 0xfe for emergency.\n\t\/\/uint8_t pad; \/* Align to 32 bits. *\/\n\tOutPort uint16 \/\/ Require matching entries to include this as an output port. A value of OFPP_NONE indicates no restriction.\n}\n\n\/\/ OfpFlowStats represents the structure body of reply to OFPST_FLOW request.\ntype OfpFlowStats struct {\n\tLength uint16 \/* Length of this entry. *\/\n\tTableID uint8 \/* ID of table flow came from. *\/\n\t\/\/uint8_t pad;\n\tMatch OfpMatch \/* Description of fields. *\/\n\tDurationSec uint32 \/* Time flow has been alive in seconds. *\/\n\tDurationNanoSec uint32 \/* Time flow has been alive in nanoseconds beyond\n\t duration_sec. *\/\n\tPriority uint16 \/* Priority of the entry. Only meaningful\n\t when this is not an exact-match entry. *\/\n\tIdleTimeout uint16 \/* Number of seconds idle before expiration. *\/\n\tHardTimeout uint16 \/* Number of seconds before expiration. *\/\n\t\/\/uint8_t pad2[6]; \/* Align to 64-bits. *\/\n\tCookie uint64 \/* Opaque controller-issued identifier. *\/\n\tPacketCount uint64 \/* Number of packets in flow. *\/\n\tByteCount uint64 \/* Number of bytes in flow. *\/\n\tActions []OfpActionHeader \/* Actions. *\/\n}\n\n\/\/ OfpAggStatsRequest represents the structure body for ofp_stats_request of type OFPST_AGGREGATE.\ntype OfpAggStatsRequest struct {\n\tMatch OfpMatch \/* Fields to match. *\/\n\tTableID uint8 \/* ID of table to read (from ofp_table_stats) 0xff for all tables or 0xfe for emergency. *\/\n\t\/\/uint8_t pad; \/* Align to 32 bits. *\/\n\tOutPort uint16 \/* Require matching entries to include this as an output port. A value of OFPP_NONE\n\t indicates no restriction. *\/\n}\n\n\/\/ OfpAggStatsReply represents the structure body of reply to OFPST_AGGREGATE request. *\/\ntype OfpAggStatsReply struct {\n\tPacketCount uint64 \/* Number of packets in flows. *\/\n\tByteCount uint64 \/* Number of bytes in flows. *\/\n\tFlowCount uint32 \/* Number of flows. *\/\n\t\/\/uint8_t pad[4]; \/* Align to 64 bits. *\/\n}\n\n\/\/ OfpTableStats represents the structure body of reply to OFPST_TABLE request.\ntype OfpTableStats struct {\n\tTableID uint8 \/\/ Identifier of table. Lower numbered tables are consulted first.\n\t\/\/uint8_t pad[3]; \/* Align to 32-bits. *\/\n\tName [32]byte\n\tWildCards uint32 \/* Bitmap of OFPFW_* wildcards that are supported by the table. *\/\n\tMaxEntries uint32 \/* Max number of entries supported. *\/\n\tActiveCount uint32 \/* Number of active entries. *\/\n\tLookupCount uint64 \/* Number of packets looked up in table. *\/\n\tMatchedCount uint64 \/* Number of packets that hit table. *\/\n}\n\n\/\/ OfpPortStatsRequest represents structure body for ofp_stats_request of type OFPST_PORT.\ntype OfpPortStatsRequest struct {\n\t\/\/ PortNo is the OFPST_PORT message must request statistics\n\t\/\/ either for a single port (specified in\n\t\/\/ port_no) or for all ports (if port_no ==\n\t\/\/ OFPP_NONE).\n\tPortNo uint16\n\t\/\/uint8_t pad[6];\n}\n\n\/\/ OfpPortStats represents the structure body of reply to OFPST_PORT request. If a counter is unsupported, set\n\/\/ the field to all ones.\ntype OfpPortStats struct {\n\tPortNo uint16\n\t\/\/uint8_t pad[6]; \/* Align to 64-bits. *\/\n\tRxPackets uint64 \/* Number of received packets. *\/\n\tTxPackets uint64 \/* Number of transmitted packets. *\/\n\tRxBytes uint64 \/* Number of received bytes. *\/\n\tTxBytes uint64 \/* Number of transmitted bytes. *\/\n\tRxDropped uint64 \/* Number of packets dropped by RX. *\/\n\tTxDropped uint64 \/* Number of packets dropped by TX. *\/\n\tRxErrors uint64 \/* Number of receive errors. This is a super-set\n\t of more specific receive errors and should be\n\t greater than or equal to the sum of all\n\t rx_*_err values. *\/\n\tTxErrors uint64 \/* Number of transmit errors. This is a super-set\n\t of more specific transmit errors and should be\n\t greater than or equal to the sum of all\n\t tx_*_err values (none currently defined.) *\/\n\tRxFrameErr uint64 \/* Number of frame alignment errors. *\/\n\tRxOverErr uint64 \/* Number of packets with RX overrun. *\/\n\tRxCrcErr uint64 \/* Number of CRC errors. *\/\n\tCollisions uint64 \/* Number of collisions. *\/\n}\n\n\/\/ OfpVendorHeader represents the header structure of Vendor extension.\ntype OfpVendorHeader struct {\n\tHeader ofpgeneral.OfpHeader \/* Type OFPT_VENDOR. *\/\n\tVendor uint32 \/* Vendor ID:\n\t * - MSB 0: low-order bytes are IEEE OUI.\n\t * - MSB != 0: defined by OpenFlow\n\t * consortium. *\/\n\t\/* Vendor-defined arbitrary additional data. *\/\n}\n\n\/\/ OfpQueueStatsReq represents the ofp queue stats query structure\ntype OfpQueueStatsReq struct {\n\tPortNo uint16 \/* All ports if OFPT_ALL. *\/\n\t\/\/uint8_t pad[2]; \/* Align to 32-bits. *\/\n\tQueueID uint32 \/* All queues if OFPQ_ALL. *\/\n}\n\n\/\/ OfpQueueStats represents the queue stats info\ntype OfpQueueStats struct {\n\tPortNo uint16\n\t\/\/uint8_t pad[2]; \/* Align to 32-bits. *\/\n\tQueueID uint32 \/* Queue i.d *\/\n\tTxBytes uint64 \/* Number of transmitted bytes. *\/\n\tTxPackets uint64 \/* Number of transmitted packets. *\/\n\tTxErrors uint64 \/* Number of packets dropped due to overrun. *\/\n}\n<commit_msg>Fixed the name conflict<commit_after>package ofp10\n\nimport \"github.com\/kopwei\/goof\/protocols\/ofpgeneral\"\n\n\/\/enum ofp_stats_types {\nconst (\n\t\/* Description of this OpenFlow switch.\n\t * The request body is empty.\n\t * The reply body is struct ofp_desc_stats. *\/\n\tOfpStatsTypeDesc = iota\n\n\t\/* Individual flow statistics.\n\t * The request body is struct ofp_flow_stats_request.\n\t * The reply body is an array of struct ofp_flow_stats. *\/\n\tOfpStatsTypeFlow\n\n\t\/* Aggregate flow statistics.\n\t * The request body is struct ofp_aggregate_stats_request.\n\t * The reply body is struct ofp_aggregate_stats_reply. *\/\n\tOfpStatsTypeAggregate\n\n\t\/* Flow table statistics.\n\t * The request body is empty.\n\t * The reply body is an array of struct ofp_table_stats. *\/\n\tOfpStatsTypeTable\n\n\t\/* Physical port statistics.\n\t * The request body is struct ofp_port_stats_request.\n\t * The reply body is an array of struct ofp_port_stats. *\/\n\tOfpStatsTypePort\n\n\t\/* Queue statistics for a port\n\t * The request body defines the port\n\t * The reply body is an array of struct ofp_queue_stats *\/\n\tOfpStatsTypeQueue\n\n\t\/* Vendor extension.\n\t * The request and reply bodies begin with a 32-bit vendor ID, which takes\n\t * the same form as in \"struct ofp_vendor_header\". The request and reply\n\t * bodies are otherwise vendor-defined. *\/\n\tOfpStatsTypeVendor = 0xffff\n)\n\n\/\/ enum ofp_stats_reply_flags {\nconst (\n\tOfpStatsReplyMore = iota \/* More replies to follow. *\/\n)\n\nconst (\n\tdescStrLen = 256\n\tserialNumLen = 32\n)\n\n\/\/ OfpStatsReqMsg represents the structure of Stats request msg\ntype OfpStatsReqMsg struct {\n\tHeader ofpgeneral.OfpHeader\n\tType uint16 \/* One of the OFPST_* constants. *\/\n\tFlags uint16 \/* OFPSF_REQ_* flags (none yet defined). *\/\n\tBody []byte \/* Body of the request. *\/\n}\n\n\/\/ OfpStatsReplyMsg represents the structure of stats reply msg\ntype OfpStatsReplyMsg struct {\n\tHeader ofpgeneral.OfpHeader\n\tType uint16 \/* One of the OFPST_* constants. *\/\n\tFlags uint16 \/* OFPSF_REPLY_* flags. *\/\n\tBody []byte \/* Body of the reply. *\/\n}\n\n\/\/ OfpDescStats represents the structure of descriptive stats\ntype OfpDescStats struct {\n\tManufacurerDesc [descStrLen]byte \/* Manufacturer description. *\/\n\tHwDesc [descStrLen]byte \/* Hardware description. *\/\n\tSwDesc [descStrLen]byte \/* Software description. *\/\n\tSerialNum [serialNumLen]byte \/* Serial number. *\/\n\tDatapathDesc [descStrLen]byte \/* Human readable description of datapath. *\/\n}\n\n\/\/ OfpFlowStatsReq represents the structure body for ofp_stats_request of type OFPST_FLOW.\ntype OfpFlowStatsReq struct {\n\tMatch OfpMatch \/\/ Fields to match.\n\tTableID uint8 \/\/ ID of table to read (from ofp_table_stats), 0xff for all tables or 0xfe for emergency.\n\t\/\/uint8_t pad; \/* Align to 32 bits. *\/\n\tOutPort uint16 \/\/ Require matching entries to include this as an output port. A value of OFPP_NONE indicates no restriction.\n}\n\n\/\/ OfpFlowStats represents the structure body of reply to OFPST_FLOW request.\ntype OfpFlowStats struct {\n\tLength uint16 \/* Length of this entry. *\/\n\tTableID uint8 \/* ID of table flow came from. *\/\n\t\/\/uint8_t pad;\n\tMatch OfpMatch \/* Description of fields. *\/\n\tDurationSec uint32 \/* Time flow has been alive in seconds. *\/\n\tDurationNanoSec uint32 \/* Time flow has been alive in nanoseconds beyond\n\t duration_sec. *\/\n\tPriority uint16 \/* Priority of the entry. Only meaningful\n\t when this is not an exact-match entry. *\/\n\tIdleTimeout uint16 \/* Number of seconds idle before expiration. *\/\n\tHardTimeout uint16 \/* Number of seconds before expiration. *\/\n\t\/\/uint8_t pad2[6]; \/* Align to 64-bits. *\/\n\tCookie uint64 \/* Opaque controller-issued identifier. *\/\n\tPacketCount uint64 \/* Number of packets in flow. *\/\n\tByteCount uint64 \/* Number of bytes in flow. *\/\n\tActions []OfpActionHeader \/* Actions. *\/\n}\n\n\/\/ OfpAggStatsRequest represents the structure body for ofp_stats_request of type OFPST_AGGREGATE.\ntype OfpAggStatsRequest struct {\n\tMatch OfpMatch \/* Fields to match. *\/\n\tTableID uint8 \/* ID of table to read (from ofp_table_stats) 0xff for all tables or 0xfe for emergency. *\/\n\t\/\/uint8_t pad; \/* Align to 32 bits. *\/\n\tOutPort uint16 \/* Require matching entries to include this as an output port. A value of OFPP_NONE\n\t indicates no restriction. *\/\n}\n\n\/\/ OfpAggStatsReply represents the structure body of reply to OFPST_AGGREGATE request. *\/\ntype OfpAggStatsReply struct {\n\tPacketCount uint64 \/* Number of packets in flows. *\/\n\tByteCount uint64 \/* Number of bytes in flows. *\/\n\tFlowCount uint32 \/* Number of flows. *\/\n\t\/\/uint8_t pad[4]; \/* Align to 64 bits. *\/\n}\n\n\/\/ OfpTableStats represents the structure body of reply to OFPST_TABLE request.\ntype OfpTableStats struct {\n\tTableID uint8 \/\/ Identifier of table. Lower numbered tables are consulted first.\n\t\/\/uint8_t pad[3]; \/* Align to 32-bits. *\/\n\tName [32]byte\n\tWildCards uint32 \/* Bitmap of OFPFW_* wildcards that are supported by the table. *\/\n\tMaxEntries uint32 \/* Max number of entries supported. *\/\n\tActiveCount uint32 \/* Number of active entries. *\/\n\tLookupCount uint64 \/* Number of packets looked up in table. *\/\n\tMatchedCount uint64 \/* Number of packets that hit table. *\/\n}\n\n\/\/ OfpPortStatsRequest represents structure body for ofp_stats_request of type OFPST_PORT.\ntype OfpPortStatsRequest struct {\n\t\/\/ PortNo is the OFPST_PORT message must request statistics\n\t\/\/ either for a single port (specified in\n\t\/\/ port_no) or for all ports (if port_no ==\n\t\/\/ OFPP_NONE).\n\tPortNo uint16\n\t\/\/uint8_t pad[6];\n}\n\n\/\/ OfpPortStats represents the structure body of reply to OFPST_PORT request. If a counter is unsupported, set\n\/\/ the field to all ones.\ntype OfpPortStats struct {\n\tPortNo uint16\n\t\/\/uint8_t pad[6]; \/* Align to 64-bits. *\/\n\tRxPackets uint64 \/* Number of received packets. *\/\n\tTxPackets uint64 \/* Number of transmitted packets. *\/\n\tRxBytes uint64 \/* Number of received bytes. *\/\n\tTxBytes uint64 \/* Number of transmitted bytes. *\/\n\tRxDropped uint64 \/* Number of packets dropped by RX. *\/\n\tTxDropped uint64 \/* Number of packets dropped by TX. *\/\n\tRxErrors uint64 \/* Number of receive errors. This is a super-set\n\t of more specific receive errors and should be\n\t greater than or equal to the sum of all\n\t rx_*_err values. *\/\n\tTxErrors uint64 \/* Number of transmit errors. This is a super-set\n\t of more specific transmit errors and should be\n\t greater than or equal to the sum of all\n\t tx_*_err values (none currently defined.) *\/\n\tRxFrameErr uint64 \/* Number of frame alignment errors. *\/\n\tRxOverErr uint64 \/* Number of packets with RX overrun. *\/\n\tRxCrcErr uint64 \/* Number of CRC errors. *\/\n\tCollisions uint64 \/* Number of collisions. *\/\n}\n\n\/\/ OfpVendorHeader represents the header structure of Vendor extension.\ntype OfpVendorHeader struct {\n\tHeader ofpgeneral.OfpHeader \/* Type OFPT_VENDOR. *\/\n\tVendor uint32 \/* Vendor ID:\n\t * - MSB 0: low-order bytes are IEEE OUI.\n\t * - MSB != 0: defined by OpenFlow\n\t * consortium. *\/\n\t\/* Vendor-defined arbitrary additional data. *\/\n}\n\n\/\/ OfpQueueStatsReq represents the ofp queue stats query structure\ntype OfpQueueStatsReq struct {\n\tPortNo uint16 \/* All ports if OFPT_ALL. *\/\n\t\/\/uint8_t pad[2]; \/* Align to 32-bits. *\/\n\tQueueID uint32 \/* All queues if OFPQ_ALL. *\/\n}\n\n\/\/ OfpQueueStatsInfo represents the queue stats info\ntype OfpQueueStatsInfo struct {\n\tPortNo uint16\n\t\/\/uint8_t pad[2]; \/* Align to 32-bits. *\/\n\tQueueID uint32 \/* Queue i.d *\/\n\tTxBytes uint64 \/* Number of transmitted bytes. *\/\n\tTxPackets uint64 \/* Number of transmitted packets. *\/\n\tTxErrors uint64 \/* Number of packets dropped due to overrun. *\/\n}\n<|endoftext|>"} {"text":"<commit_before>package algorithm_test\n\nimport (\n\t. \"github.com\/onsi\/ginkgo\/extensions\/table\"\n)\n\nvar _ = DescribeTable(\"Input resolving\",\n\t(Example).Run,\n\n\tEntry(\"can fan-in\", Example{\n\t\tDB: DB{\n\t\t\t\/\/ pass a and b\n\t\t\t{Job: \"simple-a\", BuildID: 1, Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\t\t\t{Job: \"simple-b\", BuildID: 2, Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\n\t\t\t\/\/ pass a but not b\n\t\t\t{Job: \"simple-a\", BuildID: 3, Resource: \"resource-x\", Version: \"rxv2\", CheckOrder: 2},\n\t\t},\n\n\t\tInputs: Inputs{\n\t\t\t{\n\t\t\t\tName: \"resource-x\",\n\t\t\t\tResource: \"resource-x\",\n\t\t\t\tPassed: []string{\"simple-a\", \"simple-b\"},\n\t\t\t},\n\t\t},\n\n\t\t\/\/ no v2 as it hasn't passed b\n\t\tResult: Result{\"resource-x\": \"rxv1\"},\n\t}),\n\n\tEntry(\"propagates resources together\", Example{\n\t\tDB: DB{\n\t\t\t{Job: \"simple-a\", BuildID: 1, Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\t\t\t{Job: \"simple-a\", BuildID: 1, Resource: \"resource-y\", Version: \"ryv1\", CheckOrder: 1},\n\t\t},\n\n\t\tInputs: Inputs{\n\t\t\t{Name: \"resource-x\", Resource: \"resource-x\", Passed: []string{\"simple-a\"}},\n\t\t\t{Name: \"resource-y\", Resource: \"resource-y\", Passed: []string{\"simple-a\"}},\n\t\t},\n\n\t\tResult: Result{\n\t\t\t\"resource-x\": \"rxv1\",\n\t\t\t\"resource-y\": \"ryv1\",\n\t\t},\n\t}),\n\n\tEntry(\"correlates inputs by build, allowing resources to skip jobs\", Example{\n\t\tDB: DB{\n\t\t\t{Job: \"simple-a\", BuildID: 1, Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\t\t\t{Job: \"simple-a\", BuildID: 1, Resource: \"resource-y\", Version: \"ryv1\", CheckOrder: 1},\n\n\t\t\t{Job: \"fan-in\", BuildID: 3, Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\n\t\t\t{Job: \"simple-a\", BuildID: 4, Resource: \"resource-x\", Version: \"rxv2\", CheckOrder: 2},\n\t\t\t{Job: \"simple-a\", BuildID: 4, Resource: \"resource-y\", Version: \"ryv2\", CheckOrder: 2},\n\t\t},\n\n\t\tInputs: Inputs{\n\t\t\t{Name: \"resource-x\", Resource: \"resource-x\", Passed: []string{\"simple-a\", \"fan-in\"}},\n\t\t\t{Name: \"resource-y\", Resource: \"resource-y\", Passed: []string{\"simple-a\"}},\n\t\t},\n\n\t\tResult: Result{\n\t\t\t\"resource-x\": \"rxv1\",\n\n\t\t\t\/\/ not ryv2, as it didn't make it through build relating simple-a to fan-in\n\t\t\t\"resource-y\": \"ryv1\",\n\t\t},\n\t}),\n\n\tEntry(\"finds only versions that passed through together\", Example{\n\t\tDB: DB{\n\t\t\t{Job: \"simple-a\", BuildID: 1, Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\t\t\t{Job: \"simple-a\", BuildID: 1, Resource: \"resource-y\", Version: \"ryv1\", CheckOrder: 1},\n\t\t\t{Job: \"simple-b\", BuildID: 2, Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\t\t\t{Job: \"simple-b\", BuildID: 2, Resource: \"resource-y\", Version: \"ryv1\", CheckOrder: 1},\n\n\t\t\t{Job: \"simple-a\", BuildID: 3, Resource: \"resource-x\", Version: \"rxv3\", CheckOrder: 2},\n\t\t\t{Job: \"simple-a\", BuildID: 3, Resource: \"resource-y\", Version: \"ryv3\", CheckOrder: 2},\n\t\t\t{Job: \"simple-b\", BuildID: 4, Resource: \"resource-x\", Version: \"rxv3\", CheckOrder: 2},\n\t\t\t{Job: \"simple-b\", BuildID: 4, Resource: \"resource-y\", Version: \"ryv3\", CheckOrder: 2},\n\n\t\t\t{Job: \"simple-a\", BuildID: 3, Resource: \"resource-x\", Version: \"rxv2\", CheckOrder: 1},\n\t\t\t{Job: \"simple-a\", BuildID: 3, Resource: \"resource-y\", Version: \"ryv4\", CheckOrder: 1},\n\n\t\t\t{Job: \"simple-b\", BuildID: 4, Resource: \"resource-x\", Version: \"rxv4\", CheckOrder: 1},\n\t\t\t{Job: \"simple-b\", BuildID: 4, Resource: \"resource-y\", Version: \"rxv4\", CheckOrder: 1},\n\n\t\t\t{Job: \"simple-b\", BuildID: 5, Resource: \"resource-x\", Version: \"rxv4\", CheckOrder: 1},\n\t\t\t{Job: \"simple-b\", BuildID: 5, Resource: \"resource-y\", Version: \"rxv2\", CheckOrder: 1},\n\t\t},\n\n\t\tInputs: Inputs{\n\t\t\t{Name: \"resource-x\", Resource: \"resource-x\", Passed: []string{\"simple-a\", \"simple-b\"}},\n\t\t\t{Name: \"resource-y\", Resource: \"resource-y\", Passed: []string{\"simple-a\", \"simple-b\"}},\n\t\t},\n\n\t\tResult: Result{\n\t\t\t\"resource-x\": \"rxv3\",\n\t\t\t\"resource-y\": \"ryv3\",\n\t\t},\n\t}),\n\n\tEntry(\"can collect distinct versions of resources without correlating by job\", Example{\n\t\tDB: DB{\n\t\t\t{Job: \"simple-a\", BuildID: 1, Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\t\t\t{Job: \"simple-b\", BuildID: 2, Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\t\t\t{Job: \"simple-b\", BuildID: 2, Resource: \"resource-x\", Version: \"rxv2\", CheckOrder: 2},\n\t\t},\n\n\t\tInputs: Inputs{\n\t\t\t{Name: \"simple-a-resource-x\", Resource: \"resource-x\", Passed: []string{\"simple-a\"}},\n\t\t\t{Name: \"simple-b-resource-x\", Resource: \"resource-x\", Passed: []string{\"simple-b\"}},\n\t\t},\n\n\t\tResult: Result{\n\t\t\t\"simple-a-resource-x\": \"rxv1\",\n\t\t\t\"simple-b-resource-x\": \"rxv2\",\n\t\t},\n\t}),\n\n\tEntry(\"resolves passed constraints with common jobs\", Example{\n\t\tDB: DB{\n\t\t\t{Job: \"shared-job\", BuildID: 1, Resource: \"resource-1\", Version: \"r1-common-to-shared-and-j1\", CheckOrder: 1},\n\t\t\t{Job: \"shared-job\", BuildID: 1, Resource: \"resource-2\", Version: \"r2-common-to-shared-and-j2\", CheckOrder: 1},\n\t\t\t{Job: \"job-1\", BuildID: 2, Resource: \"resource-1\", Version: \"r1-common-to-shared-and-j1\", CheckOrder: 1},\n\t\t\t{Job: \"job-2\", BuildID: 3, Resource: \"resource-2\", Version: \"r2-common-to-shared-and-j2\", CheckOrder: 1},\n\t\t},\n\n\t\tInputs: Inputs{\n\t\t\t{\n\t\t\t\tName: \"input-1\",\n\t\t\t\tResource: \"resource-1\",\n\t\t\t\tPassed: []string{\"shared-job\", \"job-1\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"input-2\",\n\t\t\t\tResource: \"resource-2\",\n\t\t\t\tPassed: []string{\"shared-job\", \"job-2\"},\n\t\t\t},\n\t\t},\n\n\t\tResult: Result{\n\t\t\t\"input-1\": \"r1-common-to-shared-and-j1\",\n\t\t\t\"input-2\": \"r2-common-to-shared-and-j2\",\n\t\t},\n\t}),\n\n\tEntry(\"resolves passed constraints with common jobs, skipping versions that are not common to builds of all jobs\", Example{\n\t\tDB: DB{\n\t\t\t{Job: \"shared-job\", BuildID: 1, Resource: \"resource-1\", Version: \"r1-common-to-shared-and-j1\", CheckOrder: 1},\n\t\t\t{Job: \"shared-job\", BuildID: 1, Resource: \"resource-2\", Version: \"r2-common-to-shared-and-j2\", CheckOrder: 1},\n\t\t\t{Job: \"job-1\", BuildID: 2, Resource: \"resource-1\", Version: \"r1-common-to-shared-and-j1\", CheckOrder: 1},\n\t\t\t{Job: \"job-2\", BuildID: 3, Resource: \"resource-2\", Version: \"r2-common-to-shared-and-j2\", CheckOrder: 1},\n\n\t\t\t{Job: \"shared-job\", BuildID: 4, Resource: \"resource-1\", Version: \"new-r1-common-to-shared-and-j1\", CheckOrder: 2},\n\t\t\t{Job: \"shared-job\", BuildID: 4, Resource: \"resource-2\", Version: \"new-r2-common-to-shared-and-j2\", CheckOrder: 2},\n\t\t\t{Job: \"job-1\", BuildID: 5, Resource: \"resource-1\", Version: \"new-r1-common-to-shared-and-j1\", CheckOrder: 2},\n\t\t},\n\n\t\tInputs: Inputs{\n\t\t\t{\n\t\t\t\tName: \"input-1\",\n\t\t\t\tResource: \"resource-1\",\n\t\t\t\tPassed: []string{\"shared-job\", \"job-1\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"input-2\",\n\t\t\t\tResource: \"resource-2\",\n\t\t\t\tPassed: []string{\"shared-job\", \"job-2\"},\n\t\t\t},\n\t\t},\n\n\t\tResult: Result{\n\t\t\t\"input-1\": \"r1-common-to-shared-and-j1\",\n\t\t\t\"input-2\": \"r2-common-to-shared-and-j2\",\n\t\t},\n\t}),\n\n\tEntry(\"finds the latest version for inputs with no passed constraints\", Example{\n\t\tDB: DB{\n\t\t\t\/\/ build outputs\n\t\t\t{Job: \"simple-a\", BuildID: 1, Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\t\t\t{Job: \"simple-a\", BuildID: 1, Resource: \"resource-y\", Version: \"ryv1\", CheckOrder: 1},\n\n\t\t\t\/\/ the versions themselves\n\t\t\t\/\/ note: normally there's one of these for each version, including ones\n\t\t\t\/\/ that appear as outputs\n\t\t\t{Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\t\t\t{Resource: \"resource-y\", Version: \"ryv2\", CheckOrder: 2},\n\t\t\t{Resource: \"resource-x\", Version: \"rxv2\", CheckOrder: 2},\n\t\t\t{Resource: \"resource-y\", Version: \"ryv3\", CheckOrder: 3},\n\t\t\t{Resource: \"resource-x\", Version: \"rxv3\", CheckOrder: 3},\n\t\t\t{Resource: \"resource-y\", Version: \"ryv4\", CheckOrder: 4},\n\t\t\t{Resource: \"resource-x\", Version: \"rxv4\", CheckOrder: 4},\n\t\t\t{Resource: \"resource-y\", Version: \"ryv5\", CheckOrder: 5},\n\t\t\t{Resource: \"resource-x\", Version: \"rxv5\", CheckOrder: 5},\n\t\t},\n\n\t\tInputs: Inputs{\n\t\t\t{\n\t\t\t\tName: \"resource-x\",\n\t\t\t\tResource: \"resource-x\",\n\t\t\t\tPassed: []string{\"simple-a\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"resource-x-unconstrained\",\n\t\t\t\tResource: \"resource-x\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"resource-y-unconstrained\",\n\t\t\t\tResource: \"resource-y\",\n\t\t\t},\n\t\t},\n\n\t\tResult: Result{\n\t\t\t\"resource-x\": \"rxv1\",\n\t\t\t\"resource-x-unconstrained\": \"rxv5\",\n\t\t\t\"resource-y-unconstrained\": \"ryv5\",\n\t\t},\n\t}),\n\n\tEntry(\"check orders take precedence over version ID\", Example{\n\t\tDB: DB{\n\t\t\t{Resource: \"resource-x\", Version: \"rxv2\", CheckOrder: 2},\n\t\t\t{Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\t\t},\n\n\t\tInputs: Inputs{\n\t\t\t{Name: \"resource-x\", Resource: \"resource-x\"},\n\t\t},\n\n\t\tResult: Result{\n\t\t\t\"resource-x\": \"rxv2\",\n\t\t},\n\t}),\n\n\tEntry(\"bosh memory leak regression test\", Example{\n\t\tLoadDB: \"testdata\/bosh-versions.json\",\n\n\t\tInputs: Inputs{\n\t\t\t{\n\t\t\t\tName: \"bosh-src\",\n\t\t\t\tResource: \"bosh-src\",\n\t\t\t\tPassed: []string{\n\t\t\t\t\t\"unit-1.9\",\n\t\t\t\t\t\"unit-2.1\",\n\t\t\t\t\t\"integration-2.1-mysql\",\n\t\t\t\t\t\"integration-1.9-postgres\",\n\t\t\t\t\t\"integration-2.1-postgres\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"bosh-load-tests\",\n\t\t\t\tResource: \"bosh-load-tests\",\n\t\t\t},\n\t\t},\n\n\t\tResult: Result{\n\t\t\t\"bosh-src\": \"imported-r88v9814\",\n\t\t\t\"bosh-load-tests\": \"imported-r89v7204\",\n\t\t},\n\t}),\n\n\tEntry(\"concourse deploy high cpi regression test\", Example{\n\t\tLoadDB: \"testdata\/concourse-versions-high-cpu-deploy.json\",\n\n\t\tInputs: Inputs{\n\t\t\t{\n\t\t\t\tName: \"concourse\",\n\t\t\t\tResource: \"concourse\",\n\t\t\t\tPassed: []string{\n\t\t\t\t\t\"testflight\",\n\t\t\t\t\t\"bin-testflight\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"version\",\n\t\t\t\tResource: \"version\",\n\t\t\t\tPassed: []string{\n\t\t\t\t\t\"testflight\",\n\t\t\t\t\t\"bin-testflight\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"candidate-release\",\n\t\t\t\tResource: \"candidate-release\",\n\t\t\t\tPassed: []string{\n\t\t\t\t\t\"testflight\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"garden-linux-release\",\n\t\t\t\tResource: \"garden-linux\",\n\t\t\t\tPassed: []string{\n\t\t\t\t\t\"testflight\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"bin-rc\",\n\t\t\t\tResource: \"bin-rc\",\n\t\t\t\tPassed: []string{\n\t\t\t\t\t\"bin-testflight\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"bosh-stemcell\",\n\t\t\t\tResource: \"aws-stemcell\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"deployments\",\n\t\t\t\tResource: \"deployments\",\n\t\t\t},\n\t\t},\n\n\t\tResult: Result{\n\t\t\t\"candidate-release\": \"imported-r238v448886\",\n\t\t\t\"deployments\": \"imported-r45v448469\",\n\t\t\t\"bosh-stemcell\": \"imported-r48v443997\",\n\t\t\t\"bin-rc\": \"imported-r765v448889\",\n\t\t\t\"garden-linux-release\": \"imported-r17v443811\",\n\t\t\t\"version\": \"imported-r12v448884\",\n\t\t\t\"concourse\": \"imported-r62v448881\",\n\t\t},\n\t}),\n)\n<commit_msg>fix typo<commit_after>package algorithm_test\n\nimport (\n\t. \"github.com\/onsi\/ginkgo\/extensions\/table\"\n)\n\nvar _ = DescribeTable(\"Input resolving\",\n\t(Example).Run,\n\n\tEntry(\"can fan-in\", Example{\n\t\tDB: DB{\n\t\t\t\/\/ pass a and b\n\t\t\t{Job: \"simple-a\", BuildID: 1, Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\t\t\t{Job: \"simple-b\", BuildID: 2, Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\n\t\t\t\/\/ pass a but not b\n\t\t\t{Job: \"simple-a\", BuildID: 3, Resource: \"resource-x\", Version: \"rxv2\", CheckOrder: 2},\n\t\t},\n\n\t\tInputs: Inputs{\n\t\t\t{\n\t\t\t\tName: \"resource-x\",\n\t\t\t\tResource: \"resource-x\",\n\t\t\t\tPassed: []string{\"simple-a\", \"simple-b\"},\n\t\t\t},\n\t\t},\n\n\t\t\/\/ no v2 as it hasn't passed b\n\t\tResult: Result{\"resource-x\": \"rxv1\"},\n\t}),\n\n\tEntry(\"propagates resources together\", Example{\n\t\tDB: DB{\n\t\t\t{Job: \"simple-a\", BuildID: 1, Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\t\t\t{Job: \"simple-a\", BuildID: 1, Resource: \"resource-y\", Version: \"ryv1\", CheckOrder: 1},\n\t\t},\n\n\t\tInputs: Inputs{\n\t\t\t{Name: \"resource-x\", Resource: \"resource-x\", Passed: []string{\"simple-a\"}},\n\t\t\t{Name: \"resource-y\", Resource: \"resource-y\", Passed: []string{\"simple-a\"}},\n\t\t},\n\n\t\tResult: Result{\n\t\t\t\"resource-x\": \"rxv1\",\n\t\t\t\"resource-y\": \"ryv1\",\n\t\t},\n\t}),\n\n\tEntry(\"correlates inputs by build, allowing resources to skip jobs\", Example{\n\t\tDB: DB{\n\t\t\t{Job: \"simple-a\", BuildID: 1, Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\t\t\t{Job: \"simple-a\", BuildID: 1, Resource: \"resource-y\", Version: \"ryv1\", CheckOrder: 1},\n\n\t\t\t{Job: \"fan-in\", BuildID: 3, Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\n\t\t\t{Job: \"simple-a\", BuildID: 4, Resource: \"resource-x\", Version: \"rxv2\", CheckOrder: 2},\n\t\t\t{Job: \"simple-a\", BuildID: 4, Resource: \"resource-y\", Version: \"ryv2\", CheckOrder: 2},\n\t\t},\n\n\t\tInputs: Inputs{\n\t\t\t{Name: \"resource-x\", Resource: \"resource-x\", Passed: []string{\"simple-a\", \"fan-in\"}},\n\t\t\t{Name: \"resource-y\", Resource: \"resource-y\", Passed: []string{\"simple-a\"}},\n\t\t},\n\n\t\tResult: Result{\n\t\t\t\"resource-x\": \"rxv1\",\n\n\t\t\t\/\/ not ryv2, as it didn't make it through build relating simple-a to fan-in\n\t\t\t\"resource-y\": \"ryv1\",\n\t\t},\n\t}),\n\n\tEntry(\"finds only versions that passed through together\", Example{\n\t\tDB: DB{\n\t\t\t{Job: \"simple-a\", BuildID: 1, Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\t\t\t{Job: \"simple-a\", BuildID: 1, Resource: \"resource-y\", Version: \"ryv1\", CheckOrder: 1},\n\t\t\t{Job: \"simple-b\", BuildID: 2, Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\t\t\t{Job: \"simple-b\", BuildID: 2, Resource: \"resource-y\", Version: \"ryv1\", CheckOrder: 1},\n\n\t\t\t{Job: \"simple-a\", BuildID: 3, Resource: \"resource-x\", Version: \"rxv3\", CheckOrder: 2},\n\t\t\t{Job: \"simple-a\", BuildID: 3, Resource: \"resource-y\", Version: \"ryv3\", CheckOrder: 2},\n\t\t\t{Job: \"simple-b\", BuildID: 4, Resource: \"resource-x\", Version: \"rxv3\", CheckOrder: 2},\n\t\t\t{Job: \"simple-b\", BuildID: 4, Resource: \"resource-y\", Version: \"ryv3\", CheckOrder: 2},\n\n\t\t\t{Job: \"simple-a\", BuildID: 3, Resource: \"resource-x\", Version: \"rxv2\", CheckOrder: 1},\n\t\t\t{Job: \"simple-a\", BuildID: 3, Resource: \"resource-y\", Version: \"ryv4\", CheckOrder: 1},\n\n\t\t\t{Job: \"simple-b\", BuildID: 4, Resource: \"resource-x\", Version: \"rxv4\", CheckOrder: 1},\n\t\t\t{Job: \"simple-b\", BuildID: 4, Resource: \"resource-y\", Version: \"rxv4\", CheckOrder: 1},\n\n\t\t\t{Job: \"simple-b\", BuildID: 5, Resource: \"resource-x\", Version: \"rxv4\", CheckOrder: 1},\n\t\t\t{Job: \"simple-b\", BuildID: 5, Resource: \"resource-y\", Version: \"rxv2\", CheckOrder: 1},\n\t\t},\n\n\t\tInputs: Inputs{\n\t\t\t{Name: \"resource-x\", Resource: \"resource-x\", Passed: []string{\"simple-a\", \"simple-b\"}},\n\t\t\t{Name: \"resource-y\", Resource: \"resource-y\", Passed: []string{\"simple-a\", \"simple-b\"}},\n\t\t},\n\n\t\tResult: Result{\n\t\t\t\"resource-x\": \"rxv3\",\n\t\t\t\"resource-y\": \"ryv3\",\n\t\t},\n\t}),\n\n\tEntry(\"can collect distinct versions of resources without correlating by job\", Example{\n\t\tDB: DB{\n\t\t\t{Job: \"simple-a\", BuildID: 1, Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\t\t\t{Job: \"simple-b\", BuildID: 2, Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\t\t\t{Job: \"simple-b\", BuildID: 2, Resource: \"resource-x\", Version: \"rxv2\", CheckOrder: 2},\n\t\t},\n\n\t\tInputs: Inputs{\n\t\t\t{Name: \"simple-a-resource-x\", Resource: \"resource-x\", Passed: []string{\"simple-a\"}},\n\t\t\t{Name: \"simple-b-resource-x\", Resource: \"resource-x\", Passed: []string{\"simple-b\"}},\n\t\t},\n\n\t\tResult: Result{\n\t\t\t\"simple-a-resource-x\": \"rxv1\",\n\t\t\t\"simple-b-resource-x\": \"rxv2\",\n\t\t},\n\t}),\n\n\tEntry(\"resolves passed constraints with common jobs\", Example{\n\t\tDB: DB{\n\t\t\t{Job: \"shared-job\", BuildID: 1, Resource: \"resource-1\", Version: \"r1-common-to-shared-and-j1\", CheckOrder: 1},\n\t\t\t{Job: \"shared-job\", BuildID: 1, Resource: \"resource-2\", Version: \"r2-common-to-shared-and-j2\", CheckOrder: 1},\n\t\t\t{Job: \"job-1\", BuildID: 2, Resource: \"resource-1\", Version: \"r1-common-to-shared-and-j1\", CheckOrder: 1},\n\t\t\t{Job: \"job-2\", BuildID: 3, Resource: \"resource-2\", Version: \"r2-common-to-shared-and-j2\", CheckOrder: 1},\n\t\t},\n\n\t\tInputs: Inputs{\n\t\t\t{\n\t\t\t\tName: \"input-1\",\n\t\t\t\tResource: \"resource-1\",\n\t\t\t\tPassed: []string{\"shared-job\", \"job-1\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"input-2\",\n\t\t\t\tResource: \"resource-2\",\n\t\t\t\tPassed: []string{\"shared-job\", \"job-2\"},\n\t\t\t},\n\t\t},\n\n\t\tResult: Result{\n\t\t\t\"input-1\": \"r1-common-to-shared-and-j1\",\n\t\t\t\"input-2\": \"r2-common-to-shared-and-j2\",\n\t\t},\n\t}),\n\n\tEntry(\"resolves passed constraints with common jobs, skipping versions that are not common to builds of all jobs\", Example{\n\t\tDB: DB{\n\t\t\t{Job: \"shared-job\", BuildID: 1, Resource: \"resource-1\", Version: \"r1-common-to-shared-and-j1\", CheckOrder: 1},\n\t\t\t{Job: \"shared-job\", BuildID: 1, Resource: \"resource-2\", Version: \"r2-common-to-shared-and-j2\", CheckOrder: 1},\n\t\t\t{Job: \"job-1\", BuildID: 2, Resource: \"resource-1\", Version: \"r1-common-to-shared-and-j1\", CheckOrder: 1},\n\t\t\t{Job: \"job-2\", BuildID: 3, Resource: \"resource-2\", Version: \"r2-common-to-shared-and-j2\", CheckOrder: 1},\n\n\t\t\t{Job: \"shared-job\", BuildID: 4, Resource: \"resource-1\", Version: \"new-r1-common-to-shared-and-j1\", CheckOrder: 2},\n\t\t\t{Job: \"shared-job\", BuildID: 4, Resource: \"resource-2\", Version: \"new-r2-common-to-shared-and-j2\", CheckOrder: 2},\n\t\t\t{Job: \"job-1\", BuildID: 5, Resource: \"resource-1\", Version: \"new-r1-common-to-shared-and-j1\", CheckOrder: 2},\n\t\t},\n\n\t\tInputs: Inputs{\n\t\t\t{\n\t\t\t\tName: \"input-1\",\n\t\t\t\tResource: \"resource-1\",\n\t\t\t\tPassed: []string{\"shared-job\", \"job-1\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"input-2\",\n\t\t\t\tResource: \"resource-2\",\n\t\t\t\tPassed: []string{\"shared-job\", \"job-2\"},\n\t\t\t},\n\t\t},\n\n\t\tResult: Result{\n\t\t\t\"input-1\": \"r1-common-to-shared-and-j1\",\n\t\t\t\"input-2\": \"r2-common-to-shared-and-j2\",\n\t\t},\n\t}),\n\n\tEntry(\"finds the latest version for inputs with no passed constraints\", Example{\n\t\tDB: DB{\n\t\t\t\/\/ build outputs\n\t\t\t{Job: \"simple-a\", BuildID: 1, Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\t\t\t{Job: \"simple-a\", BuildID: 1, Resource: \"resource-y\", Version: \"ryv1\", CheckOrder: 1},\n\n\t\t\t\/\/ the versions themselves\n\t\t\t\/\/ note: normally there's one of these for each version, including ones\n\t\t\t\/\/ that appear as outputs\n\t\t\t{Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\t\t\t{Resource: \"resource-y\", Version: \"ryv2\", CheckOrder: 2},\n\t\t\t{Resource: \"resource-x\", Version: \"rxv2\", CheckOrder: 2},\n\t\t\t{Resource: \"resource-y\", Version: \"ryv3\", CheckOrder: 3},\n\t\t\t{Resource: \"resource-x\", Version: \"rxv3\", CheckOrder: 3},\n\t\t\t{Resource: \"resource-y\", Version: \"ryv4\", CheckOrder: 4},\n\t\t\t{Resource: \"resource-x\", Version: \"rxv4\", CheckOrder: 4},\n\t\t\t{Resource: \"resource-y\", Version: \"ryv5\", CheckOrder: 5},\n\t\t\t{Resource: \"resource-x\", Version: \"rxv5\", CheckOrder: 5},\n\t\t},\n\n\t\tInputs: Inputs{\n\t\t\t{\n\t\t\t\tName: \"resource-x\",\n\t\t\t\tResource: \"resource-x\",\n\t\t\t\tPassed: []string{\"simple-a\"},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"resource-x-unconstrained\",\n\t\t\t\tResource: \"resource-x\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"resource-y-unconstrained\",\n\t\t\t\tResource: \"resource-y\",\n\t\t\t},\n\t\t},\n\n\t\tResult: Result{\n\t\t\t\"resource-x\": \"rxv1\",\n\t\t\t\"resource-x-unconstrained\": \"rxv5\",\n\t\t\t\"resource-y-unconstrained\": \"ryv5\",\n\t\t},\n\t}),\n\n\tEntry(\"check orders take precedence over version ID\", Example{\n\t\tDB: DB{\n\t\t\t{Resource: \"resource-x\", Version: \"rxv2\", CheckOrder: 2},\n\t\t\t{Resource: \"resource-x\", Version: \"rxv1\", CheckOrder: 1},\n\t\t},\n\n\t\tInputs: Inputs{\n\t\t\t{Name: \"resource-x\", Resource: \"resource-x\"},\n\t\t},\n\n\t\tResult: Result{\n\t\t\t\"resource-x\": \"rxv2\",\n\t\t},\n\t}),\n\n\tEntry(\"bosh memory leak regression test\", Example{\n\t\tLoadDB: \"testdata\/bosh-versions.json\",\n\n\t\tInputs: Inputs{\n\t\t\t{\n\t\t\t\tName: \"bosh-src\",\n\t\t\t\tResource: \"bosh-src\",\n\t\t\t\tPassed: []string{\n\t\t\t\t\t\"unit-1.9\",\n\t\t\t\t\t\"unit-2.1\",\n\t\t\t\t\t\"integration-2.1-mysql\",\n\t\t\t\t\t\"integration-1.9-postgres\",\n\t\t\t\t\t\"integration-2.1-postgres\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"bosh-load-tests\",\n\t\t\t\tResource: \"bosh-load-tests\",\n\t\t\t},\n\t\t},\n\n\t\tResult: Result{\n\t\t\t\"bosh-src\": \"imported-r88v9814\",\n\t\t\t\"bosh-load-tests\": \"imported-r89v7204\",\n\t\t},\n\t}),\n\n\tEntry(\"concourse deploy high cpu regression test\", Example{\n\t\tLoadDB: \"testdata\/concourse-versions-high-cpu-deploy.json\",\n\n\t\tInputs: Inputs{\n\t\t\t{\n\t\t\t\tName: \"concourse\",\n\t\t\t\tResource: \"concourse\",\n\t\t\t\tPassed: []string{\n\t\t\t\t\t\"testflight\",\n\t\t\t\t\t\"bin-testflight\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"version\",\n\t\t\t\tResource: \"version\",\n\t\t\t\tPassed: []string{\n\t\t\t\t\t\"testflight\",\n\t\t\t\t\t\"bin-testflight\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"candidate-release\",\n\t\t\t\tResource: \"candidate-release\",\n\t\t\t\tPassed: []string{\n\t\t\t\t\t\"testflight\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"garden-linux-release\",\n\t\t\t\tResource: \"garden-linux\",\n\t\t\t\tPassed: []string{\n\t\t\t\t\t\"testflight\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"bin-rc\",\n\t\t\t\tResource: \"bin-rc\",\n\t\t\t\tPassed: []string{\n\t\t\t\t\t\"bin-testflight\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"bosh-stemcell\",\n\t\t\t\tResource: \"aws-stemcell\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"deployments\",\n\t\t\t\tResource: \"deployments\",\n\t\t\t},\n\t\t},\n\n\t\tResult: Result{\n\t\t\t\"candidate-release\": \"imported-r238v448886\",\n\t\t\t\"deployments\": \"imported-r45v448469\",\n\t\t\t\"bosh-stemcell\": \"imported-r48v443997\",\n\t\t\t\"bin-rc\": \"imported-r765v448889\",\n\t\t\t\"garden-linux-release\": \"imported-r17v443811\",\n\t\t\t\"version\": \"imported-r12v448884\",\n\t\t\t\"concourse\": \"imported-r62v448881\",\n\t\t},\n\t}),\n)\n<|endoftext|>"} {"text":"<commit_before>package critters\n\ntype Slimupation uint64\n\nconst (\n\tSlimupationMage Slimupation = iota\n\tSlimupationBrute\n\tSlimupationSticky\n\n\tslimupationCount\n)\n\nvar slimupationInfo = [slimupationCount]struct {\n\ttitle string\n\tflavor string\n}{\n\tSlimupationMage: {\n\t\ttitle: \"mage\",\n\t\tflavor: \"with magical properties.\"},\n\tSlimupationBrute: {\n\t\ttitle: \"brute\",\n\t\tflavor: \"that can bench press a whole hero.\"},\n\tSlimupationSticky: {\n\t\ttitle: \"citizen\",\n\t\tflavor: \"that's a little stickier than most.\"},\n}\n<commit_msg>whoops, please kill me<commit_after>package critter\n\ntype Slimupation uint64\n\nconst (\n\tSlimupationMage Slimupation = iota\n\tSlimupationBrute\n\tSlimupationSticky\n\n\tslimupationCount\n)\n\nvar slimupationInfo = [slimupationCount]struct {\n\ttitle string\n\tflavor string\n}{\n\tSlimupationMage: {\n\t\ttitle: \"mage\",\n\t\tflavor: \"with magical properties.\"},\n\tSlimupationBrute: {\n\t\ttitle: \"brute\",\n\t\tflavor: \"that can bench press a whole hero.\"},\n\tSlimupationSticky: {\n\t\ttitle: \"citizen\",\n\t\tflavor: \"that's a little stickier than most.\"},\n}\n<|endoftext|>"} {"text":"<commit_before>package cron\n\nimport (\n\t\"fmt\"\n\t\"github.com\/Cepave\/alarm\/g\"\n\t\"github.com\/Cepave\/common\/model\"\n\t\"github.com\/Cepave\/common\/utils\"\n)\n\nfunc BuildCommonSMSContent(event *model.Event) string {\n\treturn fmt.Sprintf(\n\t\t\"[P%d][%s][%s][][%s %s %s %s %s%s%s][O%d %s]\",\n\t\tevent.Priority(),\n\t\tevent.Status,\n\t\tevent.Endpoint,\n\t\tevent.Note(),\n\t\tevent.Func(),\n\t\tevent.Metric(),\n\t\tutils.SortedTags(event.PushedTags),\n\t\tutils.ReadableFloat(event.LeftValue),\n\t\tevent.Operator(),\n\t\tutils.ReadableFloat(event.RightValue()),\n\t\tevent.CurrentStep,\n\t\tevent.FormattedTime(),\n\t)\n}\n\nfunc BuildCommonMailContent(event *model.Event) string {\n\tlink := g.Link(event)\n\ttdtl := `style=\"border: 1px solid #ccc; background: #FFF4F4;\"`\n\ttdtr := `style=\"border: 1px solid #ccc; border-left: none;\"`\n\ttdl := `style=\"border: 1px solid #ccc; border-top: none; background: #FFF4F4;\"`\n\ttdr := `style=\"border: 1px solid #ccc; border-top: none; border-left: none;\"`\n\treturn fmt.Sprintf(\n\t\t`<html><head><meta charset=\"utf-8\"><\/head>\n\t\t<body>\n\t\t\t<table border=\"0\" cellpadding=\"5\" cellspacing=\"0\">\n <tr>\n <td %s >%s<\/td>\n <td %s >%d<\/td><\/tr>\n <tr>\n <td %s>Endpoint:<\/td>\n <td %s>%s<\/td>\n <\/tr>\n <tr>\n <td %s>Metric:<\/td>\n <td %s>%s<\/td>\n <\/tr>\n <tr>\n <td %s>Tags:<\/td>\n <td %s>%s<\/td>\n <\/tr>\n <tr>\n <td %s>%s<\/td>\n <td %s>%s%s%s<\/td>\n <\/tr>\n <tr>\n <td %s>Note:<\/td>\n <td %s>%s<\/td>\n <\/tr>\n <tr>\n <td %s>Max:<\/td>\n <td %s>%d<\/td>\n <\/tr>\n <tr>\n <td %s>Current:<\/td>\n <td %s>%d<\/td>\n <\/tr>\n <tr>\n <td %s>Timesramp:<\/td>\n <td %s>%s<\/td>\n <\/tr>\n <\/table>\n\t\t\t<br>\n\t\t\t<a href=\"%s\">%s<\/a>\n\t\t<\/body><\/html>`,\n\n\t\ttdtl, event.Status, tdtr, event.Priority(),\n\t\ttdl, tdr, event.Endpoint,\n\t\ttdl, tdr, event.Metric(),\n\t\ttdl, tdr, utils.SortedTags(event.PushedTags),\n\t\ttdl, event.Func(), tdr, utils.ReadableFloat(event.LeftValue), event.Operator(),\tutils.ReadableFloat(event.RightValue()),\n\t\ttdl, tdr, event.Note(),\n\t\ttdl, tdr, event.MaxStep(),\n\t\ttdl, tdr, event.CurrentStep,\n\t\ttdl, tdr, event.FormattedTime(),\n\t\tlink,\n\t\tlink,\n\t)\n}\n\nfunc BuildCommonQQContent(event *model.Event) string {\n\tlink := g.Link(event)\n\treturn fmt.Sprintf(\n\t\t\"%s\\r\\nP%d\\r\\nEndpoint:%s\\r\\nMetric:%s\\r\\nTags:%s\\r\\n%s: %s%s%s\\r\\nNote:%s\\r\\nMax:%d, Current:%d\\r\\nTimestamp:%s\\r\\n%s\\r\\n\",\n\t\tevent.Status,\n\t\tevent.Priority(),\n\t\tevent.Endpoint,\n\t\tevent.Metric(),\n\t\tutils.SortedTags(event.PushedTags),\n\t\tevent.Func(),\n\t\tutils.ReadableFloat(event.LeftValue),\n\t\tevent.Operator(),\n\t\tutils.ReadableFloat(event.RightValue()),\n\t\tevent.Note(),\n\t\tevent.MaxStep(),\n\t\tevent.CurrentStep,\n\t\tevent.FormattedTime(),\n\t\tlink,\n\t)\n}\n\nfunc GenerateSmsContent(event *model.Event) string {\n\treturn BuildCommonSMSContent(event)\n}\n\nfunc GenerateMailContent(event *model.Event) string {\n\treturn BuildCommonMailContent(event)\n}\n\nfunc GenerateQQContent(event *model.Event) string {\n\treturn BuildCommonQQContent(event)\n}\n\nfunc GenerateServerchanContent(event *model.Event) string {\n\treturn BuildCommonQQContent(event)\n}<commit_msg>Fix mail content in sometime can't see table.<commit_after>package cron\n\nimport (\n\t\"fmt\"\n\t\"github.com\/Cepave\/alarm\/g\"\n\t\"github.com\/Cepave\/common\/model\"\n\t\"github.com\/Cepave\/common\/utils\"\n)\n\nfunc BuildCommonSMSContent(event *model.Event) string {\n\treturn fmt.Sprintf(\n\t\t\"[P%d][%s][%s][][%s %s %s %s %s%s%s][O%d %s]\",\n\t\tevent.Priority(),\n\t\tevent.Status,\n\t\tevent.Endpoint,\n\t\tevent.Note(),\n\t\tevent.Func(),\n\t\tevent.Metric(),\n\t\tutils.SortedTags(event.PushedTags),\n\t\tutils.ReadableFloat(event.LeftValue),\n\t\tevent.Operator(),\n\t\tutils.ReadableFloat(event.RightValue()),\n\t\tevent.CurrentStep,\n\t\tevent.FormattedTime(),\n\t)\n}\n\nfunc BuildCommonMailContent(event *model.Event) string {\n\tlink := g.Link(event)\n\ttdtl := `style=\"border: 1px solid #ccc; background: #FFF4F4;\"`\n\ttdtr := `style=\"border: 1px solid #ccc; border-left: none;\"`\n\ttdl := `style=\"border: 1px solid #ccc; border-top: none; background: #FFF4F4;\"`\n\ttdr := `style=\"border: 1px solid #ccc; border-top: none; border-left: none;\"`\n\treturn fmt.Sprintf(\n\t\t`<html><head><meta charset=\"utf-8\"><\/head>\n\t\t<body>\n\t\t\t<table border=\"0\" cellpadding=\"5\" cellspacing=\"0\">\n <tr>\n <td %s >%s <\/td>\n <td %s >%d <\/td><\/tr>\n <tr>\n <td %s>Endpoint:<\/td>\n <td %s>%s <\/td>\n <\/tr>\n <tr>\n <td %s>Metric:<\/td>\n <td %s>%s <\/td>\n <\/tr>\n <tr>\n <td %s>Tags:<\/td>\n <td %s>%s <\/td>\n <\/tr>\n <tr>\n <td %s>%s<\/td>\n <td %s>%s%s%s <\/td>\n <\/tr>\n <tr>\n <td %s>Note:<\/td>\n <td %s>%s <\/td>\n <\/tr>\n <tr>\n <td %s>Max:<\/td>\n <td %s>%d <\/td>\n <\/tr>\n <tr>\n <td %s>Current:<\/td>\n <td %s>%d <\/td>\n <\/tr>\n <tr>\n <td %s>Timesramp:<\/td>\n <td %s>%s <\/td>\n <\/tr>\n <\/table>\n\t\t\t<br>\n\t\t\t<a href=\"%s\">%s<\/a>\n\t\t<\/body><\/html>`,\n\n\t\ttdtl, event.Status, tdtr, event.Priority(),\n\t\ttdl, tdr, event.Endpoint,\n\t\ttdl, tdr, event.Metric(),\n\t\ttdl, tdr, utils.SortedTags(event.PushedTags),\n\t\ttdl, event.Func(), tdr, utils.ReadableFloat(event.LeftValue), event.Operator(),\tutils.ReadableFloat(event.RightValue()),\n\t\ttdl, tdr, event.Note(),\n\t\ttdl, tdr, event.MaxStep(),\n\t\ttdl, tdr, event.CurrentStep,\n\t\ttdl, tdr, event.FormattedTime(),\n\t\tlink,\n\t\tlink,\n\t)\n}\n\nfunc BuildCommonQQContent(event *model.Event) string {\n\tlink := g.Link(event)\n\treturn fmt.Sprintf(\n\t\t\"%s\\r\\nP%d\\r\\nEndpoint:%s\\r\\nMetric:%s\\r\\nTags:%s\\r\\n%s: %s%s%s\\r\\nNote:%s\\r\\nMax:%d, Current:%d\\r\\nTimestamp:%s\\r\\n%s\\r\\n\",\n\t\tevent.Status,\n\t\tevent.Priority(),\n\t\tevent.Endpoint,\n\t\tevent.Metric(),\n\t\tutils.SortedTags(event.PushedTags),\n\t\tevent.Func(),\n\t\tutils.ReadableFloat(event.LeftValue),\n\t\tevent.Operator(),\n\t\tutils.ReadableFloat(event.RightValue()),\n\t\tevent.Note(),\n\t\tevent.MaxStep(),\n\t\tevent.CurrentStep,\n\t\tevent.FormattedTime(),\n\t\tlink,\n\t)\n}\n\nfunc GenerateSmsContent(event *model.Event) string {\n\treturn BuildCommonSMSContent(event)\n}\n\nfunc GenerateMailContent(event *model.Event) string {\n\treturn BuildCommonMailContent(event)\n}\n\nfunc GenerateQQContent(event *model.Event) string {\n\treturn BuildCommonQQContent(event)\n}\n\nfunc GenerateServerchanContent(event *model.Event) string {\n\treturn BuildCommonQQContent(event)\n}\n<|endoftext|>"} {"text":"<commit_before>package shadowsocks\n\nimport (\n\t\"sync\"\n\n\t\"v2ray.com\/core\/app\"\n\t\"v2ray.com\/core\/common\/buf\"\n\t\"v2ray.com\/core\/common\/bufio\"\n\t\"v2ray.com\/core\/common\/log\"\n\tv2net \"v2ray.com\/core\/common\/net\"\n\t\"v2ray.com\/core\/common\/protocol\"\n\t\"v2ray.com\/core\/common\/retry\"\n\t\"v2ray.com\/core\/proxy\"\n\t\"v2ray.com\/core\/transport\/internet\"\n\t\"v2ray.com\/core\/transport\/ray\"\n)\n\ntype Client struct {\n\tserverPicker protocol.ServerPicker\n\tmeta *proxy.OutboundHandlerMeta\n}\n\nfunc NewClient(config *ClientConfig, space app.Space, meta *proxy.OutboundHandlerMeta) (*Client, error) {\n\tserverList := protocol.NewServerList()\n\tfor _, rec := range config.Server {\n\t\tserverList.AddServer(protocol.NewServerSpecFromPB(*rec))\n\t}\n\tclient := &Client{\n\t\tserverPicker: protocol.NewRoundRobinServerPicker(serverList),\n\t\tmeta: meta,\n\t}\n\n\treturn client, nil\n}\n\nfunc (v *Client) Dispatch(destination v2net.Destination, payload *buf.Buffer, ray ray.OutboundRay) {\n\tdefer payload.Release()\n\tdefer ray.OutboundInput().Release()\n\tdefer ray.OutboundOutput().Close()\n\n\tnetwork := destination.Network\n\n\tvar server *protocol.ServerSpec\n\tvar conn internet.Connection\n\n\terr := retry.ExponentialBackoff(5, 100).On(func() error {\n\t\tserver = v.serverPicker.PickServer()\n\t\tdest := server.Destination()\n\t\tdest.Network = network\n\t\trawConn, err := internet.Dial(v.meta.Address, dest, v.meta.GetDialerOptions())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconn = rawConn\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlog.Warning(\"Shadowsocks|Client: Failed to find an available destination:\", err)\n\t\treturn\n\t}\n\tlog.Info(\"Shadowsocks|Client: Tunneling request to \", destination, \" via \", server.Destination())\n\n\tconn.SetReusable(false)\n\n\trequest := &protocol.RequestHeader{\n\t\tVersion: Version,\n\t\tAddress: destination.Address,\n\t\tPort: destination.Port,\n\t}\n\tif destination.Network == v2net.Network_TCP {\n\t\trequest.Command = protocol.RequestCommandTCP\n\t} else {\n\t\trequest.Command = protocol.RequestCommandUDP\n\t}\n\n\tuser := server.PickUser()\n\trawAccount, err := user.GetTypedAccount()\n\tif err != nil {\n\t\tlog.Warning(\"Shadowsocks|Client: Failed to get a valid user account: \", err)\n\t\treturn\n\t}\n\taccount := rawAccount.(*ShadowsocksAccount)\n\trequest.User = user\n\n\tif account.OneTimeAuth == Account_Auto || account.OneTimeAuth == Account_Enabled {\n\t\trequest.Option |= RequestOptionOneTimeAuth\n\t}\n\n\tif request.Command == protocol.RequestCommandTCP {\n\t\tbufferedWriter := bufio.NewWriter(conn)\n\t\tdefer bufferedWriter.Release()\n\n\t\tbodyWriter, err := WriteTCPRequest(request, bufferedWriter)\n\t\tdefer bodyWriter.Release()\n\n\t\tif err != nil {\n\t\t\tlog.Info(\"Shadowsock|Client: Failed to write request: \", err)\n\t\t\treturn\n\t\t}\n\n\t\tif err := bodyWriter.Write(payload); err != nil {\n\t\t\tlog.Info(\"Shadowsocks|Client: Failed to write payload: \", err)\n\t\t\treturn\n\t\t}\n\n\t\tvar responseMutex sync.Mutex\n\t\tresponseMutex.Lock()\n\n\t\tgo func() {\n\t\t\tdefer responseMutex.Unlock()\n\n\t\t\tresponseReader, err := ReadTCPResponse(user, conn)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warning(\"Shadowsocks|Client: Failed to read response: \", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif err := buf.PipeUntilEOF(responseReader, ray.OutboundOutput()); err != nil {\n\t\t\t\tlog.Info(\"Shadowsocks|Client: Failed to transport all TCP response: \", err)\n\t\t\t}\n\t\t}()\n\n\t\tbufferedWriter.SetCached(false)\n\t\tif err := buf.PipeUntilEOF(ray.OutboundInput(), bodyWriter); err != nil {\n\t\t\tlog.Info(\"Shadowsocks|Client: Failed to trasnport all TCP request: \", err)\n\t\t}\n\n\t\tresponseMutex.Lock()\n\t}\n\n\tif request.Command == protocol.RequestCommandUDP {\n\t\ttimedReader := v2net.NewTimeOutReader(16, conn)\n\t\tvar responseMutex sync.Mutex\n\t\tresponseMutex.Lock()\n\n\t\tgo func() {\n\t\t\tdefer responseMutex.Unlock()\n\n\t\t\treader := &UDPReader{\n\t\t\t\tReader: timedReader,\n\t\t\t\tUser: user,\n\t\t\t}\n\n\t\t\tif err := buf.PipeUntilEOF(reader, ray.OutboundOutput()); err != nil {\n\t\t\t\tlog.Info(\"Shadowsocks|Client: Failed to transport all UDP response: \", err)\n\t\t\t}\n\t\t}()\n\n\t\twriter := &UDPWriter{\n\t\t\tWriter: conn,\n\t\t\tRequest: request,\n\t\t}\n\t\tif !payload.IsEmpty() {\n\t\t\tif err := writer.Write(payload); err != nil {\n\t\t\t\tlog.Info(\"Shadowsocks|Client: Failed to write payload: \", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err := buf.PipeUntilEOF(ray.OutboundInput(), writer); err != nil {\n\t\t\tlog.Info(\"Shadowsocks|Client: Failed to transport all UDP request: \", err)\n\t\t}\n\n\t\tresponseMutex.Lock()\n\t}\n}\n\ntype ClientFactory struct{}\n\nfunc (v *ClientFactory) StreamCapability() v2net.NetworkList {\n\treturn v2net.NetworkList{\n\t\tNetwork: []v2net.Network{v2net.Network_TCP, v2net.Network_RawTCP},\n\t}\n}\n\nfunc (v *ClientFactory) Create(space app.Space, rawConfig interface{}, meta *proxy.OutboundHandlerMeta) (proxy.OutboundHandler, error) {\n\treturn NewClient(rawConfig.(*ClientConfig), space, meta)\n}\n<commit_msg>Fix shadowsocks client handling proxied connection<commit_after>package shadowsocks\n\nimport (\n\t\"sync\"\n\n\t\"v2ray.com\/core\/app\"\n\t\"v2ray.com\/core\/common\/buf\"\n\t\"v2ray.com\/core\/common\/bufio\"\n\t\"v2ray.com\/core\/common\/log\"\n\tv2net \"v2ray.com\/core\/common\/net\"\n\t\"v2ray.com\/core\/common\/protocol\"\n\t\"v2ray.com\/core\/common\/retry\"\n\t\"v2ray.com\/core\/proxy\"\n\t\"v2ray.com\/core\/transport\/internet\"\n\t\"v2ray.com\/core\/transport\/ray\"\n)\n\ntype Client struct {\n\tserverPicker protocol.ServerPicker\n\tmeta *proxy.OutboundHandlerMeta\n}\n\nfunc NewClient(config *ClientConfig, space app.Space, meta *proxy.OutboundHandlerMeta) (*Client, error) {\n\tserverList := protocol.NewServerList()\n\tfor _, rec := range config.Server {\n\t\tserverList.AddServer(protocol.NewServerSpecFromPB(*rec))\n\t}\n\tclient := &Client{\n\t\tserverPicker: protocol.NewRoundRobinServerPicker(serverList),\n\t\tmeta: meta,\n\t}\n\n\treturn client, nil\n}\n\nfunc (v *Client) Dispatch(destination v2net.Destination, payload *buf.Buffer, ray ray.OutboundRay) {\n\tdefer payload.Release()\n\tdefer ray.OutboundInput().Release()\n\tdefer ray.OutboundOutput().Close()\n\n\tnetwork := destination.Network\n\n\tvar server *protocol.ServerSpec\n\tvar conn internet.Connection\n\n\terr := retry.ExponentialBackoff(5, 100).On(func() error {\n\t\tserver = v.serverPicker.PickServer()\n\t\tdest := server.Destination()\n\t\tdest.Network = network\n\t\trawConn, err := internet.Dial(v.meta.Address, dest, v.meta.GetDialerOptions())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tconn = rawConn\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlog.Warning(\"Shadowsocks|Client: Failed to find an available destination:\", err)\n\t\treturn\n\t}\n\tlog.Info(\"Shadowsocks|Client: Tunneling request to \", destination, \" via \", server.Destination())\n\n\tconn.SetReusable(false)\n\n\trequest := &protocol.RequestHeader{\n\t\tVersion: Version,\n\t\tAddress: destination.Address,\n\t\tPort: destination.Port,\n\t}\n\tif destination.Network == v2net.Network_TCP {\n\t\trequest.Command = protocol.RequestCommandTCP\n\t} else {\n\t\trequest.Command = protocol.RequestCommandUDP\n\t}\n\n\tuser := server.PickUser()\n\trawAccount, err := user.GetTypedAccount()\n\tif err != nil {\n\t\tlog.Warning(\"Shadowsocks|Client: Failed to get a valid user account: \", err)\n\t\treturn\n\t}\n\taccount := rawAccount.(*ShadowsocksAccount)\n\trequest.User = user\n\n\tif account.OneTimeAuth == Account_Auto || account.OneTimeAuth == Account_Enabled {\n\t\trequest.Option |= RequestOptionOneTimeAuth\n\t}\n\n\tif request.Command == protocol.RequestCommandTCP {\n\t\tbufferedWriter := bufio.NewWriter(conn)\n\t\tdefer bufferedWriter.Release()\n\n\t\tbodyWriter, err := WriteTCPRequest(request, bufferedWriter)\n\t\tdefer bodyWriter.Release()\n\n\t\tif err != nil {\n\t\t\tlog.Info(\"Shadowsock|Client: Failed to write request: \", err)\n\t\t\treturn\n\t\t}\n\n\t\tif !payload.IsEmpty() {\n\t\t\tif err := bodyWriter.Write(payload); err != nil {\n\t\t\t\tlog.Info(\"Shadowsocks|Client: Failed to write payload: \", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tvar responseMutex sync.Mutex\n\t\tresponseMutex.Lock()\n\n\t\tgo func() {\n\t\t\tdefer responseMutex.Unlock()\n\n\t\t\tresponseReader, err := ReadTCPResponse(user, conn)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warning(\"Shadowsocks|Client: Failed to read response: \", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif err := buf.PipeUntilEOF(responseReader, ray.OutboundOutput()); err != nil {\n\t\t\t\tlog.Info(\"Shadowsocks|Client: Failed to transport all TCP response: \", err)\n\t\t\t}\n\t\t}()\n\n\t\tbufferedWriter.SetCached(false)\n\t\tif err := buf.PipeUntilEOF(ray.OutboundInput(), bodyWriter); err != nil {\n\t\t\tlog.Info(\"Shadowsocks|Client: Failed to trasnport all TCP request: \", err)\n\t\t}\n\n\t\tresponseMutex.Lock()\n\t}\n\n\tif request.Command == protocol.RequestCommandUDP {\n\t\ttimedReader := v2net.NewTimeOutReader(16, conn)\n\t\tvar responseMutex sync.Mutex\n\t\tresponseMutex.Lock()\n\n\t\tgo func() {\n\t\t\tdefer responseMutex.Unlock()\n\n\t\t\treader := &UDPReader{\n\t\t\t\tReader: timedReader,\n\t\t\t\tUser: user,\n\t\t\t}\n\n\t\t\tif err := buf.PipeUntilEOF(reader, ray.OutboundOutput()); err != nil {\n\t\t\t\tlog.Info(\"Shadowsocks|Client: Failed to transport all UDP response: \", err)\n\t\t\t}\n\t\t}()\n\n\t\twriter := &UDPWriter{\n\t\t\tWriter: conn,\n\t\t\tRequest: request,\n\t\t}\n\t\tif !payload.IsEmpty() {\n\t\t\tif err := writer.Write(payload); err != nil {\n\t\t\t\tlog.Info(\"Shadowsocks|Client: Failed to write payload: \", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err := buf.PipeUntilEOF(ray.OutboundInput(), writer); err != nil {\n\t\t\tlog.Info(\"Shadowsocks|Client: Failed to transport all UDP request: \", err)\n\t\t}\n\n\t\tresponseMutex.Lock()\n\t}\n}\n\ntype ClientFactory struct{}\n\nfunc (v *ClientFactory) StreamCapability() v2net.NetworkList {\n\treturn v2net.NetworkList{\n\t\tNetwork: []v2net.Network{v2net.Network_TCP, v2net.Network_RawTCP},\n\t}\n}\n\nfunc (v *ClientFactory) Create(space app.Space, rawConfig interface{}, meta *proxy.OutboundHandlerMeta) (proxy.OutboundHandler, error) {\n\treturn NewClient(rawConfig.(*ClientConfig), space, meta)\n}\n<|endoftext|>"} {"text":"<commit_before>package plugins\n\nimport (\n\t\"strings\"\n\n\t\"fmt\"\n\t\"time\"\n\n\t\"bytes\"\n\n\t\"github.com\/Seklfreak\/Robyul2\/cache\"\n\t\"github.com\/Seklfreak\/Robyul2\/helpers\"\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/bwmarrin\/discordgo\"\n)\n\ntype dmAction func(args []string, in *discordgo.Message, out **discordgo.MessageSend) (next dmAction)\n\ntype DM struct{}\n\nconst (\n\tDMReceiveChannelIDKey = \"dm:receive:channel-id\"\n)\n\nfunc (dm *DM) Commands() []string {\n\treturn []string{\n\t\t\"dm\",\n\t\t\"dms\",\n\t}\n}\n\nfunc (dm *DM) Init(session *discordgo.Session) {\n\n}\n\nfunc (dm *DM) Uninit(session *discordgo.Session) {\n\n}\n\nfunc (dm *DM) Action(command string, content string, msg *discordgo.Message, session *discordgo.Session) {\n\tdefer helpers.Recover()\n\n\tsession.ChannelTyping(msg.ChannelID)\n\n\tvar result *discordgo.MessageSend\n\targs := strings.Fields(content)\n\n\taction := dm.actionStart\n\tfor action != nil {\n\t\taction = action(args, msg, &result)\n\t}\n}\n\nfunc (dm *DM) actionStart(args []string, in *discordgo.Message, out **discordgo.MessageSend) dmAction {\n\tcache.GetSession().ChannelTyping(in.ChannelID)\n\n\tif len(args) < 1 {\n\t\t*out = dm.newMsg(\"bot.arguments.too-few\")\n\t\treturn dm.actionFinish\n\t}\n\n\tswitch args[0] {\n\tcase \"send\":\n\t\treturn dm.actionSend\n\tcase \"receive\":\n\t\treturn dm.actionReceive\n\t}\n\n\t*out = dm.newMsg(\"bot.arguments.invalid\")\n\treturn dm.actionFinish\n}\n\nfunc (dm *DM) actionSend(args []string, in *discordgo.Message, out **discordgo.MessageSend) dmAction {\n\tif !helpers.IsRobyulMod(in.Author.ID) {\n\t\t*out = dm.newMsg(\"robyulmod.no_permission\")\n\t\treturn dm.actionFinish\n\t}\n\n\tif !(len(args) >= 3 || (len(args) >= 2 && len(in.Attachments) > 0)) {\n\t\t*out = dm.newMsg(\"bot.arguments.too-few\")\n\t\treturn dm.actionFinish\n\t}\n\n\ttargetUser, err := helpers.GetUserFromMention(args[1])\n\tif err != nil {\n\t\t*out = dm.newMsg(\"bot.arguments.invalid\")\n\t\treturn dm.actionFinish\n\t}\n\n\tdmChannel, err := cache.GetSession().UserChannelCreate(targetUser.ID)\n\thelpers.Relax(err)\n\n\tparts := strings.Split(in.Content, args[1])\n\tif len(parts) < 2 {\n\t\t*out = dm.newMsg(\"bot.arguments.too-few\")\n\t\treturn dm.actionFinish\n\t}\n\tdmMessage := strings.TrimSpace(strings.Join(parts[1:], args[1]))\n\n\tdmMessageSend := &discordgo.MessageSend{\n\t\tContent: dmMessage,\n\t}\n\tvar dmAttachmentUrl string\n\tif len(in.Attachments) > 0 {\n\t\tdmAttachmentUrl = in.Attachments[0].URL\n\t\tdmFile := helpers.NetGet(dmAttachmentUrl)\n\t\tdmMessageSend.File = &discordgo.File{Name: in.Attachments[0].Filename, Reader: bytes.NewReader(dmFile)}\n\t}\n\n\t_, err = helpers.SendComplex(dmChannel.ID, dmMessageSend)\n\tif err != nil {\n\t\tif errD, ok := err.(*discordgo.RESTError); ok && errD.Message.Code == discordgo.ErrCodeCannotSendMessagesToThisUser {\n\t\t\t*out = dm.newMsg(\"plugins.dm.send-error-cannot-dm\")\n\t\t\treturn dm.actionFinish\n\t\t}\n\t}\n\thelpers.Relax(err)\n\tdm.logger().WithField(\"RecipientUserID\", args[1]).WithField(\"AuthorUserID\", in.Author.ID).\n\t\tInfo(\"send a DM: \" + dmMessage + \" Attachment: \" + dmAttachmentUrl)\n\n\t*out = dm.newMsg(helpers.GetTextF(\"plugins.dm.send-success\", targetUser.Username))\n\treturn dm.actionFinish\n}\n\nfunc (dm *DM) actionReceive(args []string, in *discordgo.Message, out **discordgo.MessageSend) dmAction {\n\tif !helpers.IsRobyulMod(in.Author.ID) {\n\t\t*out = dm.newMsg(\"robyulmod.no_permission\")\n\t\treturn dm.actionFinish\n\t}\n\n\tvar err error\n\tvar targetChannel *discordgo.Channel\n\tif len(args) >= 2 {\n\t\ttargetChannel, err = helpers.GetChannelFromMention(in, args[1])\n\t\thelpers.Relax(err)\n\t}\n\n\tif targetChannel != nil && targetChannel.ID != \"\" {\n\t\terr = helpers.SetBotConfigString(DMReceiveChannelIDKey, targetChannel.ID)\n\t} else {\n\t\terr = helpers.SetBotConfigString(DMReceiveChannelIDKey, \"\")\n\t}\n\n\t*out = dm.newMsg(\"plugins.dm.receive-success\")\n\treturn dm.actionFinish\n}\n\nfunc (dm *DM) actionFinish(args []string, in *discordgo.Message, out **discordgo.MessageSend) dmAction {\n\t_, err := helpers.SendComplex(in.ChannelID, *out)\n\thelpers.Relax(err)\n\n\treturn nil\n}\n\nfunc (dm *DM) newMsg(content string) *discordgo.MessageSend {\n\treturn &discordgo.MessageSend{Content: helpers.GetText(content)}\n}\n\nfunc (dm *DM) logger() *logrus.Entry {\n\treturn cache.GetLogger().WithField(\"module\", \"dm\")\n}\n\nfunc (dm *DM) OnMessage(content string, msg *discordgo.Message, session *discordgo.Session) {\n\tif msg.Author.Bot == true {\n\t\treturn\n\t}\n\n\tchannel, err := helpers.GetChannel(msg.ChannelID)\n\thelpers.Relax(err)\n\n\tif channel.Type != discordgo.ChannelTypeDM {\n\t\treturn\n\t}\n\n\tdmChannelID, _ := helpers.GetBotConfigString(DMReceiveChannelIDKey)\n\tif dmChannelID != \"\" {\n\t\terr = dm.repostDM(dmChannelID, msg)\n\t\thelpers.RelaxLog(err)\n\t}\n}\n\nfunc (dm *DM) repostDM(channelID string, message *discordgo.Message) (err error) {\n\treceived, err := message.Timestamp.Parse()\n\tif err != nil {\n\t\treceived = time.Now()\n\t}\n\n\tchannel, err := helpers.GetChannel(channelID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontent := message.Content\n\tfor _, attachment := range message.Attachments {\n\t\tcontent += \"\\n\" + attachment.URL\n\t}\n\n\tembed := &discordgo.MessageEmbed{\n\t\tAuthor: &discordgo.MessageEmbedAuthor{\n\t\t\tName: fmt.Sprintf(\"@%s DM'd Robyul:\", message.Author.Username),\n\t\t},\n\t\tDescription: content,\n\t\tColor: 0x0FADED,\n\t\tFooter: &discordgo.MessageEmbedFooter{\n\t\t\tText: fmt.Sprintf(\"User ID: %s | Received at %s\",\n\t\t\t\tmessage.Author.ID, received.Format(time.ANSIC)),\n\t\t},\n\t\tFields: []*discordgo.MessageEmbedField{\n\t\t\t{\n\t\t\t\tName: \"Reply:\",\n\t\t\t\tValue: fmt.Sprintf(\"`%sdm send %s <your message>`\",\n\t\t\t\t\thelpers.GetPrefixForServer(channel.GuildID), message.Author.ID),\n\t\t\t\tInline: false,\n\t\t\t},\n\t\t},\n\t}\n\tif message.Author.Avatar != \"\" {\n\t\tembed.Author.IconURL = message.Author.AvatarURL(\"128\")\n\t}\n\n\t_, err = helpers.SendEmbed(channel.ID, embed)\n\treturn err\n}\n\nfunc (dm *DM) OnGuildMemberAdd(member *discordgo.Member, session *discordgo.Session) {\n\n}\n\nfunc (dm *DM) OnGuildMemberRemove(member *discordgo.Member, session *discordgo.Session) {\n\n}\n\nfunc (dm *DM) OnMessageDelete(msg *discordgo.MessageDelete, session *discordgo.Session) {\n\n}\n\nfunc (dm *DM) OnReactionAdd(reaction *discordgo.MessageReactionAdd, session *discordgo.Session) {\n\n}\n\nfunc (dm *DM) OnReactionRemove(reaction *discordgo.MessageReactionRemove, session *discordgo.Session) {\n\n}\n\nfunc (dm *DM) OnGuildBanAdd(user *discordgo.GuildBanAdd, session *discordgo.Session) {\n\n}\n\nfunc (dm *DM) OnGuildBanRemove(user *discordgo.GuildBanRemove, session *discordgo.Session) {\n\n}\n<commit_msg>[dm] don't post DMs with empty content<commit_after>package plugins\n\nimport (\n\t\"strings\"\n\n\t\"fmt\"\n\t\"time\"\n\n\t\"bytes\"\n\n\t\"github.com\/Seklfreak\/Robyul2\/cache\"\n\t\"github.com\/Seklfreak\/Robyul2\/helpers\"\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/bwmarrin\/discordgo\"\n)\n\ntype dmAction func(args []string, in *discordgo.Message, out **discordgo.MessageSend) (next dmAction)\n\ntype DM struct{}\n\nconst (\n\tDMReceiveChannelIDKey = \"dm:receive:channel-id\"\n)\n\nfunc (dm *DM) Commands() []string {\n\treturn []string{\n\t\t\"dm\",\n\t\t\"dms\",\n\t}\n}\n\nfunc (dm *DM) Init(session *discordgo.Session) {\n\n}\n\nfunc (dm *DM) Uninit(session *discordgo.Session) {\n\n}\n\nfunc (dm *DM) Action(command string, content string, msg *discordgo.Message, session *discordgo.Session) {\n\tdefer helpers.Recover()\n\n\tsession.ChannelTyping(msg.ChannelID)\n\n\tvar result *discordgo.MessageSend\n\targs := strings.Fields(content)\n\n\taction := dm.actionStart\n\tfor action != nil {\n\t\taction = action(args, msg, &result)\n\t}\n}\n\nfunc (dm *DM) actionStart(args []string, in *discordgo.Message, out **discordgo.MessageSend) dmAction {\n\tcache.GetSession().ChannelTyping(in.ChannelID)\n\n\tif len(args) < 1 {\n\t\t*out = dm.newMsg(\"bot.arguments.too-few\")\n\t\treturn dm.actionFinish\n\t}\n\n\tswitch args[0] {\n\tcase \"send\":\n\t\treturn dm.actionSend\n\tcase \"receive\":\n\t\treturn dm.actionReceive\n\t}\n\n\t*out = dm.newMsg(\"bot.arguments.invalid\")\n\treturn dm.actionFinish\n}\n\nfunc (dm *DM) actionSend(args []string, in *discordgo.Message, out **discordgo.MessageSend) dmAction {\n\tif !helpers.IsRobyulMod(in.Author.ID) {\n\t\t*out = dm.newMsg(\"robyulmod.no_permission\")\n\t\treturn dm.actionFinish\n\t}\n\n\tif !(len(args) >= 3 || (len(args) >= 2 && len(in.Attachments) > 0)) {\n\t\t*out = dm.newMsg(\"bot.arguments.too-few\")\n\t\treturn dm.actionFinish\n\t}\n\n\ttargetUser, err := helpers.GetUserFromMention(args[1])\n\tif err != nil {\n\t\t*out = dm.newMsg(\"bot.arguments.invalid\")\n\t\treturn dm.actionFinish\n\t}\n\n\tdmChannel, err := cache.GetSession().UserChannelCreate(targetUser.ID)\n\thelpers.Relax(err)\n\n\tparts := strings.Split(in.Content, args[1])\n\tif len(parts) < 2 {\n\t\t*out = dm.newMsg(\"bot.arguments.too-few\")\n\t\treturn dm.actionFinish\n\t}\n\tdmMessage := strings.TrimSpace(strings.Join(parts[1:], args[1]))\n\n\tdmMessageSend := &discordgo.MessageSend{\n\t\tContent: dmMessage,\n\t}\n\tvar dmAttachmentUrl string\n\tif len(in.Attachments) > 0 {\n\t\tdmAttachmentUrl = in.Attachments[0].URL\n\t\tdmFile := helpers.NetGet(dmAttachmentUrl)\n\t\tdmMessageSend.File = &discordgo.File{Name: in.Attachments[0].Filename, Reader: bytes.NewReader(dmFile)}\n\t}\n\n\t_, err = helpers.SendComplex(dmChannel.ID, dmMessageSend)\n\tif err != nil {\n\t\tif errD, ok := err.(*discordgo.RESTError); ok && errD.Message.Code == discordgo.ErrCodeCannotSendMessagesToThisUser {\n\t\t\t*out = dm.newMsg(\"plugins.dm.send-error-cannot-dm\")\n\t\t\treturn dm.actionFinish\n\t\t}\n\t}\n\thelpers.Relax(err)\n\tdm.logger().WithField(\"RecipientUserID\", args[1]).WithField(\"AuthorUserID\", in.Author.ID).\n\t\tInfo(\"send a DM: \" + dmMessage + \" Attachment: \" + dmAttachmentUrl)\n\n\t*out = dm.newMsg(helpers.GetTextF(\"plugins.dm.send-success\", targetUser.Username))\n\treturn dm.actionFinish\n}\n\nfunc (dm *DM) actionReceive(args []string, in *discordgo.Message, out **discordgo.MessageSend) dmAction {\n\tif !helpers.IsRobyulMod(in.Author.ID) {\n\t\t*out = dm.newMsg(\"robyulmod.no_permission\")\n\t\treturn dm.actionFinish\n\t}\n\n\tvar err error\n\tvar targetChannel *discordgo.Channel\n\tif len(args) >= 2 {\n\t\ttargetChannel, err = helpers.GetChannelFromMention(in, args[1])\n\t\thelpers.Relax(err)\n\t}\n\n\tif targetChannel != nil && targetChannel.ID != \"\" {\n\t\terr = helpers.SetBotConfigString(DMReceiveChannelIDKey, targetChannel.ID)\n\t} else {\n\t\terr = helpers.SetBotConfigString(DMReceiveChannelIDKey, \"\")\n\t}\n\n\t*out = dm.newMsg(\"plugins.dm.receive-success\")\n\treturn dm.actionFinish\n}\n\nfunc (dm *DM) actionFinish(args []string, in *discordgo.Message, out **discordgo.MessageSend) dmAction {\n\t_, err := helpers.SendComplex(in.ChannelID, *out)\n\thelpers.Relax(err)\n\n\treturn nil\n}\n\nfunc (dm *DM) newMsg(content string) *discordgo.MessageSend {\n\treturn &discordgo.MessageSend{Content: helpers.GetText(content)}\n}\n\nfunc (dm *DM) logger() *logrus.Entry {\n\treturn cache.GetLogger().WithField(\"module\", \"dm\")\n}\n\nfunc (dm *DM) OnMessage(content string, msg *discordgo.Message, session *discordgo.Session) {\n\tif msg.Author.Bot == true {\n\t\treturn\n\t}\n\n\tchannel, err := helpers.GetChannel(msg.ChannelID)\n\thelpers.Relax(err)\n\n\tif channel.Type != discordgo.ChannelTypeDM {\n\t\treturn\n\t}\n\n\tdmChannelID, _ := helpers.GetBotConfigString(DMReceiveChannelIDKey)\n\tif dmChannelID != \"\" {\n\t\terr = dm.repostDM(dmChannelID, msg)\n\t\thelpers.RelaxLog(err)\n\t}\n}\n\nfunc (dm *DM) repostDM(channelID string, message *discordgo.Message) (err error) {\n\treceived, err := message.Timestamp.Parse()\n\tif err != nil {\n\t\treceived = time.Now()\n\t}\n\n\tchannel, err := helpers.GetChannel(channelID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcontent := message.Content\n\tfor _, attachment := range message.Attachments {\n\t\tcontent += \"\\n\" + attachment.URL\n\t}\n\tcontent = strings.TrimSpace(content)\n\n\tif content == \"\" {\n\t\treturn nil\n\t}\n\n\tembed := &discordgo.MessageEmbed{\n\t\tAuthor: &discordgo.MessageEmbedAuthor{\n\t\t\tName: fmt.Sprintf(\"@%s DM'd Robyul:\", message.Author.Username),\n\t\t},\n\t\tDescription: content,\n\t\tColor: 0x0FADED,\n\t\tFooter: &discordgo.MessageEmbedFooter{\n\t\t\tText: fmt.Sprintf(\"User ID: %s | Received at %s\",\n\t\t\t\tmessage.Author.ID, received.Format(time.ANSIC)),\n\t\t},\n\t\tFields: []*discordgo.MessageEmbedField{\n\t\t\t{\n\t\t\t\tName: \"Reply:\",\n\t\t\t\tValue: fmt.Sprintf(\"`%sdm send %s <your message>`\",\n\t\t\t\t\thelpers.GetPrefixForServer(channel.GuildID), message.Author.ID),\n\t\t\t\tInline: false,\n\t\t\t},\n\t\t},\n\t}\n\tif message.Author.Avatar != \"\" {\n\t\tembed.Author.IconURL = message.Author.AvatarURL(\"128\")\n\t}\n\n\t_, err = helpers.SendEmbed(channel.ID, embed)\n\treturn err\n}\n\nfunc (dm *DM) OnGuildMemberAdd(member *discordgo.Member, session *discordgo.Session) {\n\n}\n\nfunc (dm *DM) OnGuildMemberRemove(member *discordgo.Member, session *discordgo.Session) {\n\n}\n\nfunc (dm *DM) OnMessageDelete(msg *discordgo.MessageDelete, session *discordgo.Session) {\n\n}\n\nfunc (dm *DM) OnReactionAdd(reaction *discordgo.MessageReactionAdd, session *discordgo.Session) {\n\n}\n\nfunc (dm *DM) OnReactionRemove(reaction *discordgo.MessageReactionRemove, session *discordgo.Session) {\n\n}\n\nfunc (dm *DM) OnGuildBanAdd(user *discordgo.GuildBanAdd, session *discordgo.Session) {\n\n}\n\nfunc (dm *DM) OnGuildBanRemove(user *discordgo.GuildBanRemove, session *discordgo.Session) {\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\n\/\/ $ glacier us-east-1 job inventory <vault> <topic> <description>\n\/\/ $ glacier us-east-1 job archive <vault> <archive> <topic> <description>\n\/\/ $ glacier us-east-1 job list <vault>\n\/\/ $ glacier us-east-1 job describe <vault> <job>\n\/\/ $ glacier us-east-1 job get inventory <vault> <job>\n\/\/ $ glacier us-east-1 job get archive <vault> <job> <file>\n\nfunc job(args []string) {\n\tif len(args) < 1 {\n\t\tfmt.Println(\"no job command\")\n\t\tos.Exit(1)\n\t}\n\tcommand := args[0]\n\targs = args[1:]\n\n\tswitch command {\n\tcase \"inventory\":\n\t\tif len(args) < 1 {\n\t\t\tfmt.Println(\"no vault\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\tvault := args[0]\n\t\targs = args[1:]\n\n\t\tvar description, topic string\n\t\tif len(args) > 0 {\n\t\t\ttopic = args[0]\n\t\t}\n\t\tif len(args) > 1 {\n\t\t\tdescription = args[1]\n\t\t}\n\n\t\tjobId, err := connection.InitiateInventoryJob(vault, topic, description)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfmt.Println(jobId)\n\n\tcase \"archive\":\n\t\tif len(args) < 2 {\n\t\t\tfmt.Println(\"no vault\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\tvault := args[0]\n\t\tarchive := args[1]\n\t\targs = args[2:]\n\n\t\tvar description, topic string\n\t\tif len(args) > 0 {\n\t\t\ttopic = args[0]\n\t\t}\n\t\tif len(args) > 1 {\n\t\t\tdescription = args[1]\n\t\t}\n\n\t\tjobId, err := connection.InitiateRetrievalJob(vault, archive, topic, description)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfmt.Println(jobId)\n\n\tcase \"list\":\n\t\tif len(args) < 1 {\n\t\t\tfmt.Println(\"no vault\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\tvault := args[0]\n\n\t\tjobs, _, err := connection.ListJobs(vault, \"\", \"\", \"\", 0)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tfor _, v := range jobs {\n\t\t\tfmt.Println(\"Action:\", v.Action)\n\t\t\tif v.Action == \"ArchiveRetrieval\" {\n\t\t\t\tfmt.Println(\"Archive ID:\", v.ArchiveId)\n\t\t\t\tfmt.Println(\"Archive Size:\", v.ArchiveSizeInBytes, prettySize(v.ArchiveSizeInBytes))\n\t\t\t}\n\t\t\tfmt.Println(\"Completed:\", v.Completed)\n\t\t\tif v.Completed {\n\t\t\t\tfmt.Println(\"Completion Date:\", v.CompletionDate)\n\t\t\t}\n\t\t\tfmt.Println(\"Creation Date:\", v.CreationDate)\n\t\t\tif v.Action == \"InventoryRetrieval\" { \/\/ only know size when completed?\n\t\t\t\tfmt.Println(\"Invenotry Size:\", v.InventorySizeInBytes, prettySize(uint64(v.InventorySizeInBytes)))\n\t\t\t}\n\t\t\tfmt.Println(\"Job Description:\", v.JobDescription)\n\t\t\tfmt.Println(\"Job ID:\", v.JobId)\n\t\t\tif v.Action == \"ArchiveRetrieval\" {\n\t\t\t\tfmt.Println(\"SHA256 Tree Hash:\", v.SHA256TreeHash)\n\t\t\t}\n\t\t\tfmt.Println(\"SNS Topic:\", v.SNSTopic)\n\t\t\tfmt.Println(\"Status Code:\", v.StatusCode)\n\t\t\tfmt.Println(\"Status Message:\", v.StatusMessage)\n\t\t\tfmt.Println(\"Vault ARN:\", v.VaultARN)\n\t\t\tfmt.Println()\n\t\t}\n\n\tcase \"describe\":\n\t\tif len(args) < 2 {\n\t\t\tfmt.Println(\"no vault and\/or job id\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\tvault := args[0]\n\t\tjobId := args[1]\n\n\t\tjob, err := connection.DescribeJob(vault, jobId)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tfmt.Println(\"Action:\", job.Action)\n\t\tif job.Action == \"ArchiveRetrieval\" {\n\t\t\tfmt.Println(\"Archive ID:\", job.ArchiveId)\n\t\t\tfmt.Println(\"Archive Size:\", job.ArchiveSizeInBytes, prettySize(job.ArchiveSizeInBytes))\n\t\t}\n\t\tfmt.Println(\"Completed:\", job.Completed)\n\t\tif job.Completed {\n\t\t\tfmt.Println(\"Completion Date:\", job.CompletionDate)\n\t\t}\n\t\tfmt.Println(\"Creation Date:\", job.CreationDate)\n\t\tif job.Action == \"InventoryRetrieval\" { \/\/ only know size when completed?\n\t\t\tfmt.Println(\"Invenotry Size:\", job.InventorySizeInBytes, prettySize(uint64(job.InventorySizeInBytes)))\n\t\t}\n\t\tfmt.Println(\"Job Description:\", job.JobDescription)\n\t\tfmt.Println(\"Job ID:\", job.JobId)\n\t\tif job.Action == \"ArchiveRetrieval\" {\n\t\t\tfmt.Println(\"SHA256 Tree Hash:\", job.SHA256TreeHash)\n\t\t}\n\t\tfmt.Println(\"SNS Topic:\", job.SNSTopic)\n\t\tfmt.Println(\"Status Code:\", job.StatusCode)\n\t\tfmt.Println(\"Status Message:\", job.StatusMessage)\n\t\tfmt.Println(\"Vault ARN:\", job.VaultARN)\n\n\tcase \"get\":\n\t\tif len(args) < 1 {\n\t\t\tfmt.Println(\"no job sub command\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\tsubCommand := args[0]\n\t\targs = args[1:]\n\n\t\tswitch subCommand {\n\t\tcase \"inventory\":\n\t\t\tif len(args) < 2 {\n\t\t\t\tfmt.Println(\"no vault and\/or job id\")\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tvault := args[0]\n\t\t\tjob := args[1]\n\n\t\t\tinventory, err := connection.GetInventoryJob(vault, job)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\n\t\t\tfmt.Println(\"Vault ARN:\", inventory.VaultARN)\n\t\t\tfmt.Println(\"Inventory Date:\", inventory.InventoryDate)\n\t\t\tfor _, v := range inventory.ArchiveList {\n\t\t\t\tfmt.Println(\"Archive ID:\", v.ArchiveId)\n\t\t\t\tfmt.Println(\"Archive Description:\", v.ArchiveDescription)\n\t\t\t\tfmt.Println(\"Creation Date:\", v.CreationDate)\n\t\t\t\tfmt.Println(\"Size:\", v.Size)\n\t\t\t\tfmt.Println(\"SHA256 Tree Hash:\", v.SHA256TreeHash)\n\t\t\t\tfmt.Println()\n\t\t\t}\n\n\t\tcase \"archive\":\n\t\t\t\/\/ TODO retrieve parts and handle errors\n\t\t\tif len(args) < 3 {\n\t\t\t\tfmt.Println(\"no vault, job id, and\/or output file\")\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tvault := args[0]\n\t\t\tjob := args[1]\n\t\t\tfileName := args[2]\n\n\t\t\tfile, err := os.Create(fileName)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tdefer file.Close()\n\n\t\t\tarchive, err := connection.GetRetrievalJob(vault, job, 0, 0)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tdefer archive.Close()\n\n\t\t\t_, err = io.Copy(file, archive)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\tdefault:\n\t\t\tfmt.Println(\"unknown job sub command:\", subCommand)\n\t\t\tos.Exit(1)\n\t\t}\n\tdefault:\n\t\tfmt.Println(\"unknown job command:\", command)\n\t\tos.Exit(1)\n\t}\n}\n<commit_msg>Avoid printing useless information.<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\n\/\/ $ glacier us-east-1 job inventory <vault> <topic> <description>\n\/\/ $ glacier us-east-1 job archive <vault> <archive> <topic> <description>\n\/\/ $ glacier us-east-1 job list <vault>\n\/\/ $ glacier us-east-1 job describe <vault> <job>\n\/\/ $ glacier us-east-1 job get inventory <vault> <job>\n\/\/ $ glacier us-east-1 job get archive <vault> <job> <file>\n\nfunc job(args []string) {\n\tif len(args) < 1 {\n\t\tfmt.Println(\"no job command\")\n\t\tos.Exit(1)\n\t}\n\tcommand := args[0]\n\targs = args[1:]\n\n\tswitch command {\n\tcase \"inventory\":\n\t\tif len(args) < 1 {\n\t\t\tfmt.Println(\"no vault\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\tvault := args[0]\n\t\targs = args[1:]\n\n\t\tvar description, topic string\n\t\tif len(args) > 0 {\n\t\t\ttopic = args[0]\n\t\t}\n\t\tif len(args) > 1 {\n\t\t\tdescription = args[1]\n\t\t}\n\n\t\tjobId, err := connection.InitiateInventoryJob(vault, topic, description)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfmt.Println(jobId)\n\n\tcase \"archive\":\n\t\tif len(args) < 2 {\n\t\t\tfmt.Println(\"no vault\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\tvault := args[0]\n\t\tarchive := args[1]\n\t\targs = args[2:]\n\n\t\tvar description, topic string\n\t\tif len(args) > 0 {\n\t\t\ttopic = args[0]\n\t\t}\n\t\tif len(args) > 1 {\n\t\t\tdescription = args[1]\n\t\t}\n\n\t\tjobId, err := connection.InitiateRetrievalJob(vault, archive, topic, description)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfmt.Println(jobId)\n\n\tcase \"list\":\n\t\tif len(args) < 1 {\n\t\t\tfmt.Println(\"no vault\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\tvault := args[0]\n\n\t\tjobs, _, err := connection.ListJobs(vault, \"\", \"\", \"\", 0)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tfor _, v := range jobs {\n\t\t\tfmt.Println(\"Action:\", v.Action)\n\t\t\tif v.Action == \"ArchiveRetrieval\" {\n\t\t\t\tfmt.Println(\"Archive ID:\", v.ArchiveId)\n\t\t\t\tfmt.Println(\"Archive Size:\", v.ArchiveSizeInBytes, prettySize(v.ArchiveSizeInBytes))\n\t\t\t}\n\t\t\tfmt.Println(\"Completed:\", v.Completed)\n\t\t\tif v.Completed {\n\t\t\t\tfmt.Println(\"Completion Date:\", v.CompletionDate)\n\t\t\t}\n\t\t\tfmt.Println(\"Creation Date:\", v.CreationDate)\n\t\t\tif v.Completed && v.Action == \"InventoryRetrieval\" {\n\t\t\t\tfmt.Println(\"Invenotry Size:\", v.InventorySizeInBytes, prettySize(uint64(v.InventorySizeInBytes)))\n\t\t\t}\n\t\t\tfmt.Println(\"Job Description:\", v.JobDescription)\n\t\t\tfmt.Println(\"Job ID:\", v.JobId)\n\t\t\tif v.Action == \"ArchiveRetrieval\" {\n\t\t\t\tfmt.Println(\"SHA256 Tree Hash:\", v.SHA256TreeHash)\n\t\t\t}\n\t\t\tfmt.Println(\"SNS Topic:\", v.SNSTopic)\n\t\t\tfmt.Println(\"Status Code:\", v.StatusCode)\n\t\t\tfmt.Println(\"Status Message:\", v.StatusMessage)\n\t\t\tfmt.Println(\"Vault ARN:\", v.VaultARN)\n\t\t\tfmt.Println()\n\t\t}\n\n\tcase \"describe\":\n\t\tif len(args) < 2 {\n\t\t\tfmt.Println(\"no vault and\/or job id\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\tvault := args[0]\n\t\tjobId := args[1]\n\n\t\tjob, err := connection.DescribeJob(vault, jobId)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tfmt.Println(\"Action:\", job.Action)\n\t\tif job.Action == \"ArchiveRetrieval\" {\n\t\t\tfmt.Println(\"Archive ID:\", job.ArchiveId)\n\t\t\tfmt.Println(\"Archive Size:\", job.ArchiveSizeInBytes, prettySize(job.ArchiveSizeInBytes))\n\t\t}\n\t\tfmt.Println(\"Completed:\", job.Completed)\n\t\tif job.Completed {\n\t\t\tfmt.Println(\"Completion Date:\", job.CompletionDate)\n\t\t}\n\t\tfmt.Println(\"Creation Date:\", job.CreationDate)\n\t\tif job.Completed && job.Action == \"InventoryRetrieval\" {\n\t\t\tfmt.Println(\"Invenotry Size:\", job.InventorySizeInBytes, prettySize(uint64(job.InventorySizeInBytes)))\n\t\t}\n\t\tfmt.Println(\"Job Description:\", job.JobDescription)\n\t\tfmt.Println(\"Job ID:\", job.JobId)\n\t\tif job.Action == \"ArchiveRetrieval\" {\n\t\t\tfmt.Println(\"SHA256 Tree Hash:\", job.SHA256TreeHash)\n\t\t}\n\t\tfmt.Println(\"SNS Topic:\", job.SNSTopic)\n\t\tfmt.Println(\"Status Code:\", job.StatusCode)\n\t\tfmt.Println(\"Status Message:\", job.StatusMessage)\n\t\tfmt.Println(\"Vault ARN:\", job.VaultARN)\n\n\tcase \"get\":\n\t\tif len(args) < 1 {\n\t\t\tfmt.Println(\"no job sub command\")\n\t\t\tos.Exit(1)\n\t\t}\n\t\tsubCommand := args[0]\n\t\targs = args[1:]\n\n\t\tswitch subCommand {\n\t\tcase \"inventory\":\n\t\t\tif len(args) < 2 {\n\t\t\t\tfmt.Println(\"no vault and\/or job id\")\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tvault := args[0]\n\t\t\tjob := args[1]\n\n\t\t\tinventory, err := connection.GetInventoryJob(vault, job)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\n\t\t\tfmt.Println(\"Vault ARN:\", inventory.VaultARN)\n\t\t\tfmt.Println(\"Inventory Date:\", inventory.InventoryDate)\n\t\t\tfor _, v := range inventory.ArchiveList {\n\t\t\t\tfmt.Println(\"Archive ID:\", v.ArchiveId)\n\t\t\t\tfmt.Println(\"Archive Description:\", v.ArchiveDescription)\n\t\t\t\tfmt.Println(\"Creation Date:\", v.CreationDate)\n\t\t\t\tfmt.Println(\"Size:\", v.Size)\n\t\t\t\tfmt.Println(\"SHA256 Tree Hash:\", v.SHA256TreeHash)\n\t\t\t\tfmt.Println()\n\t\t\t}\n\n\t\tcase \"archive\":\n\t\t\t\/\/ TODO retrieve parts and handle errors\n\t\t\tif len(args) < 3 {\n\t\t\t\tfmt.Println(\"no vault, job id, and\/or output file\")\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tvault := args[0]\n\t\t\tjob := args[1]\n\t\t\tfileName := args[2]\n\n\t\t\tfile, err := os.Create(fileName)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tdefer file.Close()\n\n\t\t\tarchive, err := connection.GetRetrievalJob(vault, job, 0, 0)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tdefer archive.Close()\n\n\t\t\t_, err = io.Copy(file, archive)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\tdefault:\n\t\t\tfmt.Println(\"unknown job sub command:\", subCommand)\n\t\t\tos.Exit(1)\n\t\t}\n\tdefault:\n\t\tfmt.Println(\"unknown job command:\", command)\n\t\tos.Exit(1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"code.google.com\/p\/gosshold\/ssh\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"launchpad.net\/goamz\/aws\"\n\t\"launchpad.net\/goamz\/ec2\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nvar ErrNoInstancesFound = errors.New(\"No instances found; run provisioner first\")\n\nconst AfterDeployHookScript = \".moltar-after-deploy\"\n\ntype Job struct {\n\tregion aws.Region\n\tenv string\n\tcluster string\n\tproject string\n\tpackageNames []string\n\tinstances []*ec2.Instance\n\tinstanceSshClients map[*ec2.Instance]*ssh.ClientConn\n\tinstanceLoggers map[*ec2.Instance]*log.Logger\n\toutput io.Writer\n\tlogger *log.Logger\n\tinstallVersionRev uint64\n\tshouldOutputAnsiEscapes bool\n}\n\nfunc getInstancesTagged(ec2client *ec2.EC2, project string, env string, cluster string, packageName string) (instances []*ec2.Instance, err error) {\n\tinstanceFilter := ec2.NewFilter()\n\tinstanceFilter.Add(\"instance-state-name\", \"running\")\n\tinstanceFilter.Add(\"tag:Project\", project)\n\tqueryEnv := env\n\tif env == \"\" {\n\t\tqueryEnv = \"*\"\n\t}\n\tinstanceFilter.Add(\"tag:Environment\", queryEnv)\n\tif cluster != \"\" {\n\t\tinstanceFilter.Add(\"tag:Cluster\", cluster)\n\t}\n\n\tif packageName != \"\" {\n\t\tinstanceFilter.Add(\"tag:Packages\", \"*|\"+packageName+\"|*\")\n\t}\n\n\tinstancesResp, err := ec2client.Instances(nil, instanceFilter)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tinstances = make([]*ec2.Instance, 0, 20)\n\tfor _, res := range instancesResp.Reservations {\n\t\tfor _, inst := range res.Instances {\n\t\t\tnewInst := inst\n\t\t\tinstances = append(instances, &newInst)\n\t\t}\n\t}\n\n\treturn instances, nil\n}\n\nfunc NewJob(awsConf AWSConf, env string, cluster string, project string, packageNames []string, output io.Writer, shouldOutputAnsiEscapes bool) (job *Job, err error) {\n\te := ec2.New(awsConf.Auth, awsConf.Region)\n\n\tvar searchPackageNames []string\n\tif len(packageNames) == 0 {\n\t\tsearchPackageNames = []string{\"\"}\n\t} else {\n\t\tsearchPackageNames = packageNames[:]\n\t}\n\n\tinstancesSet := map[string]*ec2.Instance{}\n\tfor _, packageName := range searchPackageNames {\n\t\tinstances, err := getInstancesTagged(e, project, env, cluster, packageName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, instance := range instances {\n\t\t\tinstancesSet[instance.InstanceId] = instance\n\t\t}\n\t}\n\n\tinstances := make([]*ec2.Instance, 0, len(instancesSet))\n\tfor _, instance := range instancesSet {\n\t\tinstances = append(instances, instance)\n\t}\n\n\tif len(instances) == 0 {\n\t\treturn nil, ErrNoInstancesFound\n\t}\n\n\tlogger := log.New(output, \"\", 0)\n\n\treturn &Job{region: awsConf.Region, env: env, cluster: cluster,\n\t\tproject: project, packageNames: packageNames, instances: instances,\n\t\tinstanceSshClients: make(map[*ec2.Instance]*ssh.ClientConn),\n\t\tinstanceLoggers: make(map[*ec2.Instance]*log.Logger),\n\t\toutput: output, logger: logger,\n\t\tshouldOutputAnsiEscapes: shouldOutputAnsiEscapes}, nil\n}\n\nfunc (self *Job) Exec(cmd string) (errs []error) {\n\terrChan := make(chan error, len(self.instances))\n\terrs = make([]error, 0, len(self.instances))\n\n\tfor _, instance := range self.instances {\n\t\tgo func(inst ec2.Instance) {\n\t\t\tconn, err := self.sshClient(&inst)\n\t\t\tif err != nil {\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlogger := self.instanceLogger(&inst)\n\t\t\t_, returnChan, err := sshRunOutLogger(conn, cmd, logger, nil)\n\t\t\tif err == nil {\n\t\t\t\terr = <-returnChan\n\t\t\t} else {\n\t\t\t\terrChan <- err\n\t\t\t}\n\t\t\terrChan <- err\n\t\t}(*instance)\n\t}\n\tstartStdinRead()\n\n\tfor _ = range self.instances {\n\t\tif err := <-errChan; err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\treturn\n}\n\nfunc (self *Job) ExecList(cmds []string) (errs []error) {\n\tfor _, cmd := range cmds {\n\t\tfmt.Printf(\"\\n%s\\n\\n\", cmd)\n\t\terrs = self.Exec(cmd)\n\t\tif len(errs) > 0 {\n\t\t\treturn\n\t\t}\n\t}\n\treturn []error{}\n}\n\nfunc (self *Job) Deploy() (errs []error) {\n\terrs = self.ExecList([]string{\n\t\t\"sudo apt-get update -qq\",\n\t\t\"sudo DEBIAN_FRONTEND=noninteractive apt-get install -qy '\" +\n\t\t\tstrings.Join(self.packageNames, \"' '\") + \"'\",\n\t\t\"sudo DEBIAN_FRONTEND=noninteractive apt-get autoremove -yq\",\n\t\t\"sudo apt-get clean -yq\",\n\t})\n\tif len(errs) > 0 {\n\t\treturn\n\t}\n\n\tif _, err := os.Stat(AfterDeployHookScript); err != nil {\n\t\treturn\n\t}\n\n\tprepareExec()\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn\n\t}\n\tsyscall.Exec(path.Join(pwd, AfterDeployHookScript),\n\t\t[]string{AfterDeployHookScript},\n\t\tappend(os.Environ(), \"ENV=\"+self.env))\n\n\treturn\n}\n\nfunc (self *Job) Ssh(criteria string, sshArgs []string) (err error) {\n\tsshPath, err := exec.LookPath(\"ssh\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar instance *ec2.Instance\n\tmatches := self.instances\n\n\tif criteria != \"-1\" {\n\t\tif criteria != \"\" {\n\t\t\tmatches = make([]*ec2.Instance, 0, len(self.instances))\n\t\t\tfor _, instance = range self.instances {\n\t\t\t\tif matchCriteria(instance, criteria) {\n\t\t\t\t\tinstanceLogName(instance)\n\t\t\t\t\tmatches = append(matches, instance)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(matches) == 0 {\n\t\t\tself.logger.Fatalf(\"Instance '%s' not found\\n\", criteria)\n\t\t} else if len(matches) > 1 {\n\t\t\tself.logger.Printf(\"Multiple matches for '%s' found:\\n\", criteria)\n\t\t\tself.printInstances(matches)\n\t\t\tself.logger.Fatal(\"\")\n\t\t}\n\t}\n\n\tinstance = matches[0]\n\n\texecArgs := []string{\"ssh\"}\n\tkeyFile := self.keyFile()\n\tif keyFile != \"\" {\n\t\texecArgs = append(execArgs, \"-i\", keyFile)\n\t}\n\n\texecArgs = append(execArgs,\n\t\tfmt.Sprintf(\"%s@%s\", self.sshUserName(instance), instance.DNSName))\n\texecArgs = append(execArgs, sshArgs...)\n\n\tfPrintShellCommand(self.output, \"\", execArgs)\n\tfmt.Fprintln(self.output, \"\")\n\n\tprepareExec()\n\terr = syscall.Exec(sshPath, execArgs, os.Environ())\n\treturn\n}\n\nfunc (self *Job) Scp(args []string) (err error) {\n\tscpPath, err := exec.LookPath(\"scp\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdefaultArgs := []string{\"-q\"}\n\tkeyFile := self.keyFile()\n\tif keyFile != \"\" {\n\t\tdefaultArgs = append(defaultArgs, []string{\n\t\t\t\"-i\", keyFile,\n\t\t}...)\n\t}\n\tscpArgs := make([]string, len(defaultArgs)+len(args))\n\tcopy(scpArgs, defaultArgs)\n\tcopy(scpArgs[len(defaultArgs):], args)\n\n\tvar dstIndex = -1\n\tfor i, arg := range scpArgs {\n\t\tif arg[0] == ':' {\n\t\t\tdstIndex = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif dstIndex == -1 {\n\t\tdstIndex = len(scpArgs)\n\t\tscpArgs = append(scpArgs, \":\")\n\t}\n\n\terrChan := make(chan error, len(self.instances))\n\n\tfor _, instance := range self.instances {\n\t\tgo func(instance *ec2.Instance) {\n\t\t\tvar err error\n\t\t\targs := make([]string, len(scpArgs))\n\t\t\tcopy(args, scpArgs)\n\n\t\t\tlogger := self.instanceLogger(instance)\n\t\t\targs[dstIndex] = fmt.Sprintf(\"%s@%s%s\",\n\t\t\t\tself.sshUserName(instance), instance.DNSName, args[dstIndex])\n\n\t\t\tfPrintShellCommand(self.output, \"scp\", args)\n\n\t\t\tcmd := exec.Command(scpPath, args...)\n\t\t\toutPipeRead, outPipeWrite, err := os.Pipe()\n\t\t\tif err != nil {\n\t\t\t\tlogger.Printf(\"error creating pipe: %s\\n\", err)\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcmd.Stdout = outPipeWrite\n\t\t\tcmd.Stderr = outPipeWrite\n\n\t\t\terr = cmd.Start()\n\t\t\tif err != nil {\n\t\t\t\tlogger.Printf(\"error starting scp: %s\\n\", err)\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\toutPipeWrite.Close()\n\t\t\tstdoutReader := bufio.NewReader(outPipeRead)\n\t\t\tfor {\n\t\t\t\tin, err := stdoutReader.ReadString('\\n')\n\t\t\t\tif (err == io.EOF && in != \"\") || err == nil {\n\t\t\t\t\tlogger.Print(in)\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\terr = cmd.Wait()\n\t\t\toutPipeRead.Close()\n\t\t\tif err != nil {\n\t\t\t\tlogger.Printf(\"error running scp: %s\\n\", err)\n\t\t\t}\n\t\t\terrChan <- err\n\t\t}(instance)\n\t}\n\n\tvar scpErr error\n\tfor _ = range self.instances {\n\t\tscpErr = <-errChan\n\t\tif err == nil && scpErr != nil {\n\t\t\terr = errors.New(\"at least one scp failed\")\n\t\t}\n\t}\n\treturn\n}\n\nfunc (self *Job) List() (err error) {\n\tself.printInstances(self.instances)\n\treturn nil\n}\n\nfunc (self *Job) Hostname(instanceName string) (err error) {\n\tfor _, instance := range self.instances {\n\t\tif instanceLogName(instance) == instanceName {\n\t\t\tfmt.Fprintln(self.output, instance.DNSName)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn errors.New(instanceName + \" not found\")\n}\n\n\/\/\/ Subtasks\n\nfunc (self *Job) sshClient(i *ec2.Instance) (conn *ssh.ClientConn, err error) {\n\tconn = self.instanceSshClients[i]\n\tif conn == nil {\n\t\tconn, err = self.sshDial(i)\n\t\tif err == nil {\n\t\t\tself.instanceSshClients[i] = conn\n\t\t}\n\t}\n\treturn\n}\n\nfunc (self *Job) instanceLogger(i *ec2.Instance) (logger *log.Logger) {\n\tlogger = self.instanceLoggers[i]\n\tif logger == nil {\n\t\tprefix := instanceLogName(i)\n\t\tif self.shouldOutputAnsiEscapes {\n\t\t\tprefix = \"\\033[1m\" + prefix + \"\\033[0m\"\n\t\t}\n\t\tlogger = log.New(self.output, prefix+\" \", 0)\n\t\tself.instanceLoggers[i] = logger\n\t}\n\treturn\n}\n\nfunc (self *Job) keyFile() (path string) {\n\tfileName := self.project\n\tif len(self.packageNames) > 0 {\n\t\tfileName += fmt.Sprintf(\"-%s\", self.packageNames[0])\n\t}\n\tpath = fmt.Sprintf(os.ExpandEnv(\"${HOME}\/Google Drive\/%s Ops\/Keys\/%s.pem\"),\n\t\tself.project, fileName)\n\n\tif _, err := os.Stat(path); err == nil {\n\t\treturn path\n\t} else {\n\t\treturn \"\"\n\t}\n}\n\nfunc (self *Job) sshUserName(_ *ec2.Instance) (userName string) {\n\t\/\/ TODO: be more clever about this\n\treturn \"ubuntu\"\n}\n\nfunc (self *Job) sshDial(i *ec2.Instance) (conn *ssh.ClientConn, err error) {\n\tconn, err = sshDial(i.DNSName+\":22\", self.sshUserName(i), self.keyFile())\n\treturn\n}\n\nfunc (self *Job) printInstances(instances []*ec2.Instance) {\n\tfields := make([][]string, len(instances))\n\tfor i, instance := range instances {\n\t\tfields[i] = []string{instance.InstanceId, instanceLogName(instance),\n\t\t\tinstance.DNSName}\n\t}\n\tfmt.Fprint(self.output, formatTable(fields))\n}\n\nfunc instanceLogName(i *ec2.Instance) string {\n\tfor _, tag := range i.Tags {\n\t\tif tag.Key == \"Name\" && tag.Value != \"\" {\n\t\t\treturn tag.Value\n\t\t}\n\t}\n\treturn i.InstanceId\n}\n\nfunc fPrintShellCommand(w io.Writer, n string, cmd []string) {\n\tif n != \"\" {\n\t\tfmt.Fprintf(w, \"%s \", n)\n\t}\n\tfor i, cmdPart := range cmd {\n\t\t\/\/ TODO: this escaping will work most of the time, but isn't that great\n\t\tif strings.ContainsAny(cmdPart, \" $\") {\n\t\t\tfmt.Fprintf(w, \"'%s'\", cmdPart)\n\t\t} else {\n\t\t\tfmt.Fprint(w, cmdPart)\n\t\t}\n\t\tif i < (len(cmd) - 1) {\n\t\t\tfmt.Fprint(w, \" \")\n\t\t}\n\t}\n\tfmt.Fprint(w, \"\\n\")\n}\n\nfunc matchCriteria(instance *ec2.Instance, criteria string) bool {\n\tvar found bool\n\tfor _, value := range strings.Split(criteria, \"\/\") {\n\t\tfound = false\n\t\tfor _, tag := range instance.Tags {\n\t\t\tif strings.Contains(tag.Value, value) {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !strings.Contains(instance.InstanceId, value) && !strings.Contains(instance.PrivateDNSName, value) && !strings.Contains(instance.DNSName, value) && found == false {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc prepareExec() {\n\t\/* There appears to be a bug with goamz where some fds are left open, and\n\t * just closing them causes a crash. If we ask all fds > 2 to close on\n\t * exec, all is well.\n\t *\/\n\tvar rlimit syscall.Rlimit\n\terr := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tmaxFds := int(rlimit.Cur)\n\tfor fd := 3; fd < maxFds; fd++ {\n\t\tsyscall.CloseOnExec(fd)\n\t}\n}\n<commit_msg>Package matching is now \"all instances\" rather than \"any instances\"<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"code.google.com\/p\/gosshold\/ssh\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"launchpad.net\/goamz\/aws\"\n\t\"launchpad.net\/goamz\/ec2\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nvar ErrNoInstancesFound = errors.New(\"No instances found; run provisioner first\")\n\nconst AfterDeployHookScript = \".moltar-after-deploy\"\n\ntype Job struct {\n\tregion aws.Region\n\tenv string\n\tcluster string\n\tproject string\n\tpackageNames []string\n\tinstances []*ec2.Instance\n\tinstanceSshClients map[*ec2.Instance]*ssh.ClientConn\n\tinstanceLoggers map[*ec2.Instance]*log.Logger\n\toutput io.Writer\n\tlogger *log.Logger\n\tinstallVersionRev uint64\n\tshouldOutputAnsiEscapes bool\n}\n\nfunc getInstancesTagged(ec2client *ec2.EC2, project string, env string, cluster string, packageName string) (instances []*ec2.Instance, err error) {\n\tinstanceFilter := ec2.NewFilter()\n\tinstanceFilter.Add(\"instance-state-name\", \"running\")\n\tinstanceFilter.Add(\"tag:Project\", project)\n\tqueryEnv := env\n\tif env == \"\" {\n\t\tqueryEnv = \"*\"\n\t}\n\tinstanceFilter.Add(\"tag:Environment\", queryEnv)\n\tif cluster != \"\" {\n\t\tinstanceFilter.Add(\"tag:Cluster\", cluster)\n\t}\n\n\tif packageName != \"\" {\n\t\tinstanceFilter.Add(\"tag:Packages\", \"*|\"+packageName+\"|*\")\n\t}\n\n\tinstancesResp, err := ec2client.Instances(nil, instanceFilter)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tinstances = make([]*ec2.Instance, 0, 20)\n\tfor _, res := range instancesResp.Reservations {\n\t\tfor _, inst := range res.Instances {\n\t\t\tnewInst := inst\n\t\t\tinstances = append(instances, &newInst)\n\t\t}\n\t}\n\n\treturn instances, nil\n}\n\nfunc NewJob(awsConf AWSConf, env string, cluster string, project string, packageNames []string, output io.Writer, shouldOutputAnsiEscapes bool) (job *Job, err error) {\n\te := ec2.New(awsConf.Auth, awsConf.Region)\n\n\tvar searchPackageNames []string\n\tif len(packageNames) == 0 {\n\t\tsearchPackageNames = []string{\"\"}\n\t} else {\n\t\tsearchPackageNames = packageNames[:]\n\t}\n\n\tinstancesSet := map[string]*ec2.Instance{}\n\tinstancesCount := map[string]int{}\n\tfor _, packageName := range searchPackageNames {\n\t\tinstances, err := getInstancesTagged(e, project, env, cluster, packageName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, instance := range instances {\n\t\t\tinstancesSet[instance.InstanceId] = instance\n\t\t\tinstancesCount[instance.InstanceId] += 1\n\t\t}\n\t}\n\n\tinstances := make([]*ec2.Instance, 0, len(instancesSet))\n\tfor _, instance := range instancesSet {\n\t\tif instancesCount[instance.InstanceId] == len(searchPackageNames) {\n\t\t\tinstances = append(instances, instance)\n\t\t}\n\t}\n\n\tif len(instances) == 0 {\n\t\treturn nil, ErrNoInstancesFound\n\t}\n\n\tlogger := log.New(output, \"\", 0)\n\n\treturn &Job{region: awsConf.Region, env: env, cluster: cluster,\n\t\tproject: project, packageNames: packageNames, instances: instances,\n\t\tinstanceSshClients: make(map[*ec2.Instance]*ssh.ClientConn),\n\t\tinstanceLoggers: make(map[*ec2.Instance]*log.Logger),\n\t\toutput: output, logger: logger,\n\t\tshouldOutputAnsiEscapes: shouldOutputAnsiEscapes}, nil\n}\n\nfunc (self *Job) Exec(cmd string) (errs []error) {\n\terrChan := make(chan error, len(self.instances))\n\terrs = make([]error, 0, len(self.instances))\n\n\tfor _, instance := range self.instances {\n\t\tgo func(inst ec2.Instance) {\n\t\t\tconn, err := self.sshClient(&inst)\n\t\t\tif err != nil {\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tlogger := self.instanceLogger(&inst)\n\t\t\t_, returnChan, err := sshRunOutLogger(conn, cmd, logger, nil)\n\t\t\tif err == nil {\n\t\t\t\terr = <-returnChan\n\t\t\t} else {\n\t\t\t\terrChan <- err\n\t\t\t}\n\t\t\terrChan <- err\n\t\t}(*instance)\n\t}\n\tstartStdinRead()\n\n\tfor _ = range self.instances {\n\t\tif err := <-errChan; err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\treturn\n}\n\nfunc (self *Job) ExecList(cmds []string) (errs []error) {\n\tfor _, cmd := range cmds {\n\t\tfmt.Printf(\"\\n%s\\n\\n\", cmd)\n\t\terrs = self.Exec(cmd)\n\t\tif len(errs) > 0 {\n\t\t\treturn\n\t\t}\n\t}\n\treturn []error{}\n}\n\nfunc (self *Job) Deploy() (errs []error) {\n\terrs = self.ExecList([]string{\n\t\t\"sudo apt-get update -qq\",\n\t\t\"sudo DEBIAN_FRONTEND=noninteractive apt-get install -qy '\" +\n\t\t\tstrings.Join(self.packageNames, \"' '\") + \"'\",\n\t\t\"sudo DEBIAN_FRONTEND=noninteractive apt-get autoremove -yq\",\n\t\t\"sudo apt-get clean -yq\",\n\t})\n\tif len(errs) > 0 {\n\t\treturn\n\t}\n\n\tif _, err := os.Stat(AfterDeployHookScript); err != nil {\n\t\treturn\n\t}\n\n\tprepareExec()\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn\n\t}\n\tsyscall.Exec(path.Join(pwd, AfterDeployHookScript),\n\t\t[]string{AfterDeployHookScript},\n\t\tappend(os.Environ(), \"ENV=\"+self.env))\n\n\treturn\n}\n\nfunc (self *Job) Ssh(criteria string, sshArgs []string) (err error) {\n\tsshPath, err := exec.LookPath(\"ssh\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar instance *ec2.Instance\n\tmatches := self.instances\n\n\tif criteria != \"-1\" {\n\t\tif criteria != \"\" {\n\t\t\tmatches = make([]*ec2.Instance, 0, len(self.instances))\n\t\t\tfor _, instance = range self.instances {\n\t\t\t\tif matchCriteria(instance, criteria) {\n\t\t\t\t\tinstanceLogName(instance)\n\t\t\t\t\tmatches = append(matches, instance)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif len(matches) == 0 {\n\t\t\tself.logger.Fatalf(\"Instance '%s' not found\\n\", criteria)\n\t\t} else if len(matches) > 1 {\n\t\t\tself.logger.Printf(\"Multiple matches for '%s' found:\\n\", criteria)\n\t\t\tself.printInstances(matches)\n\t\t\tself.logger.Fatal(\"\")\n\t\t}\n\t}\n\n\tinstance = matches[0]\n\n\texecArgs := []string{\"ssh\"}\n\tkeyFile := self.keyFile()\n\tif keyFile != \"\" {\n\t\texecArgs = append(execArgs, \"-i\", keyFile)\n\t}\n\n\texecArgs = append(execArgs,\n\t\tfmt.Sprintf(\"%s@%s\", self.sshUserName(instance), instance.DNSName))\n\texecArgs = append(execArgs, sshArgs...)\n\n\tfPrintShellCommand(self.output, \"\", execArgs)\n\tfmt.Fprintln(self.output, \"\")\n\n\tprepareExec()\n\terr = syscall.Exec(sshPath, execArgs, os.Environ())\n\treturn\n}\n\nfunc (self *Job) Scp(args []string) (err error) {\n\tscpPath, err := exec.LookPath(\"scp\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdefaultArgs := []string{\"-q\"}\n\tkeyFile := self.keyFile()\n\tif keyFile != \"\" {\n\t\tdefaultArgs = append(defaultArgs, []string{\n\t\t\t\"-i\", keyFile,\n\t\t}...)\n\t}\n\tscpArgs := make([]string, len(defaultArgs)+len(args))\n\tcopy(scpArgs, defaultArgs)\n\tcopy(scpArgs[len(defaultArgs):], args)\n\n\tvar dstIndex = -1\n\tfor i, arg := range scpArgs {\n\t\tif arg[0] == ':' {\n\t\t\tdstIndex = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif dstIndex == -1 {\n\t\tdstIndex = len(scpArgs)\n\t\tscpArgs = append(scpArgs, \":\")\n\t}\n\n\terrChan := make(chan error, len(self.instances))\n\n\tfor _, instance := range self.instances {\n\t\tgo func(instance *ec2.Instance) {\n\t\t\tvar err error\n\t\t\targs := make([]string, len(scpArgs))\n\t\t\tcopy(args, scpArgs)\n\n\t\t\tlogger := self.instanceLogger(instance)\n\t\t\targs[dstIndex] = fmt.Sprintf(\"%s@%s%s\",\n\t\t\t\tself.sshUserName(instance), instance.DNSName, args[dstIndex])\n\n\t\t\tfPrintShellCommand(self.output, \"scp\", args)\n\n\t\t\tcmd := exec.Command(scpPath, args...)\n\t\t\toutPipeRead, outPipeWrite, err := os.Pipe()\n\t\t\tif err != nil {\n\t\t\t\tlogger.Printf(\"error creating pipe: %s\\n\", err)\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tcmd.Stdout = outPipeWrite\n\t\t\tcmd.Stderr = outPipeWrite\n\n\t\t\terr = cmd.Start()\n\t\t\tif err != nil {\n\t\t\t\tlogger.Printf(\"error starting scp: %s\\n\", err)\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\toutPipeWrite.Close()\n\t\t\tstdoutReader := bufio.NewReader(outPipeRead)\n\t\t\tfor {\n\t\t\t\tin, err := stdoutReader.ReadString('\\n')\n\t\t\t\tif (err == io.EOF && in != \"\") || err == nil {\n\t\t\t\t\tlogger.Print(in)\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\terr = cmd.Wait()\n\t\t\toutPipeRead.Close()\n\t\t\tif err != nil {\n\t\t\t\tlogger.Printf(\"error running scp: %s\\n\", err)\n\t\t\t}\n\t\t\terrChan <- err\n\t\t}(instance)\n\t}\n\n\tvar scpErr error\n\tfor _ = range self.instances {\n\t\tscpErr = <-errChan\n\t\tif err == nil && scpErr != nil {\n\t\t\terr = errors.New(\"at least one scp failed\")\n\t\t}\n\t}\n\treturn\n}\n\nfunc (self *Job) List() (err error) {\n\tself.printInstances(self.instances)\n\treturn nil\n}\n\nfunc (self *Job) Hostname(instanceName string) (err error) {\n\tfor _, instance := range self.instances {\n\t\tif instanceLogName(instance) == instanceName {\n\t\t\tfmt.Fprintln(self.output, instance.DNSName)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn errors.New(instanceName + \" not found\")\n}\n\n\/\/\/ Subtasks\n\nfunc (self *Job) sshClient(i *ec2.Instance) (conn *ssh.ClientConn, err error) {\n\tconn = self.instanceSshClients[i]\n\tif conn == nil {\n\t\tconn, err = self.sshDial(i)\n\t\tif err == nil {\n\t\t\tself.instanceSshClients[i] = conn\n\t\t}\n\t}\n\treturn\n}\n\nfunc (self *Job) instanceLogger(i *ec2.Instance) (logger *log.Logger) {\n\tlogger = self.instanceLoggers[i]\n\tif logger == nil {\n\t\tprefix := instanceLogName(i)\n\t\tif self.shouldOutputAnsiEscapes {\n\t\t\tprefix = \"\\033[1m\" + prefix + \"\\033[0m\"\n\t\t}\n\t\tlogger = log.New(self.output, prefix+\" \", 0)\n\t\tself.instanceLoggers[i] = logger\n\t}\n\treturn\n}\n\nfunc (self *Job) keyFile() (path string) {\n\tfileName := self.project\n\tif len(self.packageNames) > 0 {\n\t\tfileName += fmt.Sprintf(\"-%s\", self.packageNames[0])\n\t}\n\tpath = fmt.Sprintf(os.ExpandEnv(\"${HOME}\/Google Drive\/%s Ops\/Keys\/%s.pem\"),\n\t\tself.project, fileName)\n\n\tif _, err := os.Stat(path); err == nil {\n\t\treturn path\n\t} else {\n\t\treturn \"\"\n\t}\n}\n\nfunc (self *Job) sshUserName(_ *ec2.Instance) (userName string) {\n\t\/\/ TODO: be more clever about this\n\treturn \"ubuntu\"\n}\n\nfunc (self *Job) sshDial(i *ec2.Instance) (conn *ssh.ClientConn, err error) {\n\tconn, err = sshDial(i.DNSName+\":22\", self.sshUserName(i), self.keyFile())\n\treturn\n}\n\nfunc (self *Job) printInstances(instances []*ec2.Instance) {\n\tfields := make([][]string, len(instances))\n\tfor i, instance := range instances {\n\t\tfields[i] = []string{instance.InstanceId, instanceLogName(instance),\n\t\t\tinstance.DNSName}\n\t}\n\tfmt.Fprint(self.output, formatTable(fields))\n}\n\nfunc instanceLogName(i *ec2.Instance) string {\n\tfor _, tag := range i.Tags {\n\t\tif tag.Key == \"Name\" && tag.Value != \"\" {\n\t\t\treturn tag.Value\n\t\t}\n\t}\n\treturn i.InstanceId\n}\n\nfunc fPrintShellCommand(w io.Writer, n string, cmd []string) {\n\tif n != \"\" {\n\t\tfmt.Fprintf(w, \"%s \", n)\n\t}\n\tfor i, cmdPart := range cmd {\n\t\t\/\/ TODO: this escaping will work most of the time, but isn't that great\n\t\tif strings.ContainsAny(cmdPart, \" $\") {\n\t\t\tfmt.Fprintf(w, \"'%s'\", cmdPart)\n\t\t} else {\n\t\t\tfmt.Fprint(w, cmdPart)\n\t\t}\n\t\tif i < (len(cmd) - 1) {\n\t\t\tfmt.Fprint(w, \" \")\n\t\t}\n\t}\n\tfmt.Fprint(w, \"\\n\")\n}\n\nfunc matchCriteria(instance *ec2.Instance, criteria string) bool {\n\tvar found bool\n\tfor _, value := range strings.Split(criteria, \"\/\") {\n\t\tfound = false\n\t\tfor _, tag := range instance.Tags {\n\t\t\tif strings.Contains(tag.Value, value) {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !strings.Contains(instance.InstanceId, value) && !strings.Contains(instance.PrivateDNSName, value) && !strings.Contains(instance.DNSName, value) && found == false {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc prepareExec() {\n\t\/* There appears to be a bug with goamz where some fds are left open, and\n\t * just closing them causes a crash. If we ask all fds > 2 to close on\n\t * exec, all is well.\n\t *\/\n\tvar rlimit syscall.Rlimit\n\terr := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tmaxFds := int(rlimit.Cur)\n\tfor fd := 3; fd < maxFds; fd++ {\n\t\tsyscall.CloseOnExec(fd)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013-2014 Aerospike, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage aerospike\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"hash\"\n\n\t\"github.com\/aerospike\/aerospike-client-go\/pkg\/ripemd160\"\n\t. \"github.com\/aerospike\/aerospike-client-go\/types\"\n\tParticleType \"github.com\/aerospike\/aerospike-client-go\/types\/particle_type\"\n\tBuffer \"github.com\/aerospike\/aerospike-client-go\/utils\/buffer\"\n)\n\n\/\/ Key is the unique record identifier. Records can be identified using a specified namespace,\n\/\/ an optional set name, and a user defined key which must be unique within a set.\n\/\/ Records can also be identified by namespace\/digest which is the combination used\n\/\/ on the server.\ntype Key struct {\n\t\/\/ namespace. Equivalent to database name.\n\tnamespace string\n\n\t\/\/ Optional set name. Equivalent to database table.\n\tsetName string\n\n\t\/\/ Unique server hash value generated from set name and user key.\n\tdigest []byte\n\n\t\/\/ Original user key. This key is immediately converted to a hash digest.\n\t\/\/ This key is not used or returned by the server by default. If the user key needs\n\t\/\/ to persist on the server, use one of the following methods:\n\t\/\/\n\t\/\/ Set \"WritePolicy.sendKey\" to true. In this case, the key will be sent to the server for storage on writes\n\t\/\/ and retrieved on multi-record scans and queries.\n\t\/\/ Explicitly store and retrieve the key in a bin.\n\tuserKey Value\n}\n\n\/\/ Namespace returns key's namespace.\nfunc (ky *Key) Namespace() string {\n\treturn ky.namespace\n}\n\n\/\/ SetName returns key's set name.\nfunc (ky *Key) SetName() string {\n\treturn ky.setName\n}\n\n\/\/ Value returns key's value.\nfunc (ky *Key) Value() Value {\n\treturn ky.userKey\n}\n\n\/\/ Digest returns key digest.\nfunc (ky *Key) Digest() []byte {\n\treturn ky.digest\n}\n\n\/\/ Equals uses key digests to compare key equality.\nfunc (ky *Key) Equals(other *Key) bool {\n\treturn bytes.Equal(ky.digest, other.digest)\n}\n\n\/\/ String implements Stringer interface and returns string representation of key.\nfunc (ky *Key) String() string {\n\tif ky.userKey != nil {\n\t\treturn fmt.Sprintf(\"%s:%s:%s:%v\", ky.namespace, ky.setName, ky.userKey.String(), Buffer.BytesToHexString(ky.digest))\n\t}\n\treturn fmt.Sprintf(\"%s:%s::%v\", ky.namespace, ky.setName, Buffer.BytesToHexString(ky.digest))\n}\n\n\/\/ NewKey initializes a key from namespace, optional set name and user key.\n\/\/ The set name and user defined key are converted to a digest before sending to the server.\n\/\/ The server handles record identifiers by digest only.\nfunc NewKey(namespace string, setName string, key interface{}) (newKey *Key, err error) {\n\tnewKey = &Key{\n\t\tnamespace: namespace,\n\t\tsetName: setName,\n\t\tuserKey: NewValue(key),\n\t}\n\n\tnewKey.digest, err = computeDigest(&newKey.setName, NewValue(key))\n\n\treturn newKey, err\n}\n\n\/\/ Generate unique server hash value from set name, key type and user defined key.\n\/\/ The hash function is RIPEMD-160 (a 160 bit hash).\nfunc computeDigest(setName *string, key Value) ([]byte, error) {\n\tkeyType := key.GetType()\n\n\tif keyType == ParticleType.NULL {\n\t\treturn nil, NewAerospikeError(PARAMETER_ERROR, \"Invalid key: nil\")\n\t}\n\n\t\/\/ retrieve hash from hash pool\n\th := hashPool.Get().(hash.Hash)\n\th.Reset()\n\n\t\/\/ write will not fail; no error checking necessary\n\th.Write([]byte(*setName))\n\th.Write([]byte{byte(keyType)})\n\th.Write(key.getBytes())\n\n\t\/\/ put hash object back to the pool\n\thashPool.Put(h)\n\n\treturn h.Sum(nil), nil\n}\n\n\/\/ hash pool\nvar hashPool *Pool\n\nfunc init() {\n\thashPool = NewPool(1024)\n\thashPool.New = func() interface{} {\n\t\treturn ripemd160.New()\n\t}\n}\n<commit_msg>fixed minor bug<commit_after>\/\/ Copyright 2013-2014 Aerospike, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage aerospike\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"hash\"\n\n\t\"github.com\/aerospike\/aerospike-client-go\/pkg\/ripemd160\"\n\t. \"github.com\/aerospike\/aerospike-client-go\/types\"\n\tParticleType \"github.com\/aerospike\/aerospike-client-go\/types\/particle_type\"\n\tBuffer \"github.com\/aerospike\/aerospike-client-go\/utils\/buffer\"\n)\n\n\/\/ Key is the unique record identifier. Records can be identified using a specified namespace,\n\/\/ an optional set name, and a user defined key which must be unique within a set.\n\/\/ Records can also be identified by namespace\/digest which is the combination used\n\/\/ on the server.\ntype Key struct {\n\t\/\/ namespace. Equivalent to database name.\n\tnamespace string\n\n\t\/\/ Optional set name. Equivalent to database table.\n\tsetName string\n\n\t\/\/ Unique server hash value generated from set name and user key.\n\tdigest []byte\n\n\t\/\/ Original user key. This key is immediately converted to a hash digest.\n\t\/\/ This key is not used or returned by the server by default. If the user key needs\n\t\/\/ to persist on the server, use one of the following methods:\n\t\/\/\n\t\/\/ Set \"WritePolicy.sendKey\" to true. In this case, the key will be sent to the server for storage on writes\n\t\/\/ and retrieved on multi-record scans and queries.\n\t\/\/ Explicitly store and retrieve the key in a bin.\n\tuserKey Value\n}\n\n\/\/ Namespace returns key's namespace.\nfunc (ky *Key) Namespace() string {\n\treturn ky.namespace\n}\n\n\/\/ SetName returns key's set name.\nfunc (ky *Key) SetName() string {\n\treturn ky.setName\n}\n\n\/\/ Value returns key's value.\nfunc (ky *Key) Value() Value {\n\treturn ky.userKey\n}\n\n\/\/ Digest returns key digest.\nfunc (ky *Key) Digest() []byte {\n\treturn ky.digest\n}\n\n\/\/ Equals uses key digests to compare key equality.\nfunc (ky *Key) Equals(other *Key) bool {\n\treturn bytes.Equal(ky.digest, other.digest)\n}\n\n\/\/ String implements Stringer interface and returns string representation of key.\nfunc (ky *Key) String() string {\n\tif ky.userKey != nil {\n\t\treturn fmt.Sprintf(\"%s:%s:%s:%v\", ky.namespace, ky.setName, ky.userKey.String(), Buffer.BytesToHexString(ky.digest))\n\t}\n\treturn fmt.Sprintf(\"%s:%s::%v\", ky.namespace, ky.setName, Buffer.BytesToHexString(ky.digest))\n}\n\n\/\/ NewKey initializes a key from namespace, optional set name and user key.\n\/\/ The set name and user defined key are converted to a digest before sending to the server.\n\/\/ The server handles record identifiers by digest only.\nfunc NewKey(namespace string, setName string, key interface{}) (newKey *Key, err error) {\n\tnewKey = &Key{\n\t\tnamespace: namespace,\n\t\tsetName: setName,\n\t\tuserKey: NewValue(key),\n\t}\n\n\tnewKey.digest, err = computeDigest(&newKey.setName, NewValue(key))\n\n\treturn newKey, err\n}\n\n\/\/ Generate unique server hash value from set name, key type and user defined key.\n\/\/ The hash function is RIPEMD-160 (a 160 bit hash).\nfunc computeDigest(setName *string, key Value) ([]byte, error) {\n\tkeyType := key.GetType()\n\n\tif keyType == ParticleType.NULL {\n\t\treturn nil, NewAerospikeError(PARAMETER_ERROR, \"Invalid key: nil\")\n\t}\n\n\t\/\/ retrieve hash from hash pool\n\th := hashPool.Get().(hash.Hash)\n\th.Reset()\n\n\t\/\/ write will not fail; no error checking necessary\n\th.Write([]byte(*setName))\n\th.Write([]byte{byte(keyType)})\n\th.Write(key.getBytes())\n\n\tres := h.Sum(nil)\n\n\t\/\/ put hash object back to the pool\n\thashPool.Put(h)\n\n\treturn res, nil\n}\n\n\/\/ hash pool\nvar hashPool *Pool\n\nfunc init() {\n\thashPool = NewPool(4096)\n\thashPool.New = func() interface{} {\n\t\treturn ripemd160.New()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package ari\n\nconst (\n\t\/\/ ApplicationKey is the key kind for ARI Application resources.\n\tApplicationKey = \"application\"\n\n\t\/\/ BridgeKey is the key kind for the ARI Bridge resources.\n\tBridgeKey = \"bridge\"\n\n\t\/\/ ChannelKey is the key kind for the ARI Channel resource\n\tChannelKey = \"channel\"\n)\n\n\/\/ Key identifies a unique resource in the system\ntype Key struct {\n\t\/\/ Kind indicates the type of resource the Key points to. e.g., \"channel\",\n\t\/\/ \"bridge\", etc.\n\tKind string `json:\"kind\"`\n\n\t\/\/ ID indicates the unique identifier of the resource\n\tID string `json:\"id\"`\n\n\t\/\/ Node indicates the unique identifier of the Asterisk node on which the\n\t\/\/ resource exists or will be created\n\tNode string `json:\"node,omitempty\"`\n\n\t\/\/ Dialog indicates a named scope of the resource, for receiving events\n\tDialog string `json:\"dialog,omitempty\"`\n}\n\n\/\/ KeyOptionFunc is a functional argument alias for providing options for ARI keys\ntype KeyOptionFunc func(Key)\n\n\/\/ WithDialog sets the given dialog identifier on the key.\nfunc WithDialog(dialog string) KeyOptionFunc {\n\treturn func(key Key) {\n\t\tkey.Dialog = dialog\n\t}\n}\n\n\/\/ WithNode sets the given node identifier on the key.\nfunc WithNode(node string) KeyOptionFunc {\n\treturn func(key Key) {\n\t\tkey.Node = node\n\t}\n}\n\n\/\/ NewKey builds a new key given the kind, identifier, and any optional arguments.\nfunc NewKey(kind string, id string, opts ...KeyOptionFunc) *Key {\n\tk := Key{\n\t\tKind: kind,\n\t\tID: id,\n\t}\n\tfor _, o := range opts {\n\t\to(k)\n\t}\n\n\treturn &k\n}\n<commit_msg>ari.key - WithApp and Key helpers<commit_after>package ari\n\nconst (\n\t\/\/ ApplicationKey is the key kind for ARI Application resources.\n\tApplicationKey = \"application\"\n\n\t\/\/ BridgeKey is the key kind for the ARI Bridge resources.\n\tBridgeKey = \"bridge\"\n\n\t\/\/ ChannelKey is the key kind for the ARI Channel resource\n\tChannelKey = \"channel\"\n)\n\n\/\/ Key identifies a unique resource in the system\ntype Key struct {\n\t\/\/ Kind indicates the type of resource the Key points to. e.g., \"channel\",\n\t\/\/ \"bridge\", etc.\n\tKind string `json:\"kind\"`\n\n\t\/\/ ID indicates the unique identifier of the resource\n\tID string `json:\"id\"`\n\n\t\/\/ Node indicates the unique identifier of the Asterisk node on which the\n\t\/\/ resource exists or will be created\n\tNode string `json:\"node,omitempty\"`\n\n\t\/\/ Dialog indicates a named scope of the resource, for receiving events\n\tDialog string `json:\"dialog,omitempty\"`\n\n\t\/\/ App indiciates the ARI application that this key is bound to.\n\tApp string `json:\"app,omitempty\"`\n}\n\n\/\/ KeyOptionFunc is a functional argument alias for providing options for ARI keys\ntype KeyOptionFunc func(Key)\n\n\/\/ WithDialog sets the given dialog identifier on the key.\nfunc WithDialog(dialog string) KeyOptionFunc {\n\treturn func(key Key) {\n\t\tkey.Dialog = dialog\n\t}\n}\n\n\/\/ WithNode sets the given node identifier on the key.\nfunc WithNode(node string) KeyOptionFunc {\n\treturn func(key Key) {\n\t\tkey.Node = node\n\t}\n}\n\n\/\/ WithApp sets the given node identifier on the key.\nfunc WithApp(app string) KeyOptionFunc {\n\treturn func(key Key) {\n\t\tkey.App = app\n\t}\n}\n\n\/\/ WithParent copies the partial key fields Node, Application, Dialog from the parent key\nfunc WithParent(parent *Key) KeyOptionFunc {\n\treturn func(key Key) {\n\t\tkey.Node = parent.Node\n\t\tkey.Dialog = parent.Dialog\n\t\tkey.App = parent.App\n\t}\n}\n\n\/\/ NewKey builds a new key given the kind, identifier, and any optional arguments.\nfunc NewKey(kind string, id string, opts ...KeyOptionFunc) *Key {\n\tk := Key{\n\t\tKind: kind,\n\t\tID: id,\n\t}\n\tfor _, o := range opts {\n\t\to(k)\n\t}\n\n\treturn &k\n}\n\n\/\/ AppKey returns a key that is bound to the given application.\nfunc AppKey(app string) *Key {\n\treturn NewKey(\"\", \"\", WithApp(app))\n}\n\n\/\/ DialogKey returns a key that is bound to the given dialog.\nfunc DialogKey(dialog string) *Key {\n\treturn NewKey(\"\", \"\", WithDialog(dialog))\n}\n\n\/\/ NodeKey returns a key that is bound to the given application and node\nfunc NodeKey(app, node string) *Key {\n\treturn NewKey(\"\", \"\", WithApp(app), WithNode(node))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 go-dockerclient authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage docker\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/docker\/docker\/api\/types\/swarm\"\n)\n\n\/\/ NoSuchService is the error returned when a given service does not exist.\ntype NoSuchService struct {\n\tID string\n\tErr error\n}\n\nfunc (err *NoSuchService) Error() string {\n\tif err.Err != nil {\n\t\treturn err.Err.Error()\n\t}\n\treturn \"No such service: \" + err.ID\n}\n\n\/\/ CreateServiceOptions specify parameters to the CreateService function.\n\/\/\n\/\/ See https:\/\/goo.gl\/KrVjHz for more details.\ntype CreateServiceOptions struct {\n\tAuth AuthConfiguration `qs:\"-\"`\n\tswarm.ServiceSpec\n\tContext context.Context\n}\n\n\/\/ CreateService creates a new service, returning the service instance\n\/\/ or an error in case of failure.\n\/\/\n\/\/ See https:\/\/goo.gl\/KrVjHz for more details.\nfunc (c *Client) CreateService(opts CreateServiceOptions) (*swarm.Service, error) {\n\theaders, err := headersWithAuth(opts.Auth)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpath := \"\/services\/create?\" + queryString(opts)\n\tresp, err := c.do(http.MethodPost, path, doOptions{\n\t\theaders: headers,\n\t\tdata: opts.ServiceSpec,\n\t\tforceJSON: true,\n\t\tcontext: opts.Context,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tvar service swarm.Service\n\tif err := json.NewDecoder(resp.Body).Decode(&service); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &service, nil\n}\n\n\/\/ RemoveServiceOptions encapsulates options to remove a service.\n\/\/\n\/\/ See https:\/\/goo.gl\/Tqrtya for more details.\ntype RemoveServiceOptions struct {\n\tID string `qs:\"-\"`\n\tContext context.Context\n}\n\n\/\/ RemoveService removes a service, returning an error in case of failure.\n\/\/\n\/\/ See https:\/\/goo.gl\/Tqrtya for more details.\nfunc (c *Client) RemoveService(opts RemoveServiceOptions) error {\n\tpath := \"\/services\/\" + opts.ID\n\tresp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context})\n\tif err != nil {\n\t\tif e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {\n\t\t\treturn &NoSuchService{ID: opts.ID}\n\t\t}\n\t\treturn err\n\t}\n\tresp.Body.Close()\n\treturn nil\n}\n\n\/\/ UpdateServiceOptions specify parameters to the UpdateService function.\n\/\/\n\/\/ See https:\/\/goo.gl\/wu3MmS for more details.\ntype UpdateServiceOptions struct {\n\tAuth AuthConfiguration `qs:\"-\"`\n\tswarm.ServiceSpec `qs:\"-\"`\n\tContext context.Context\n\tVersion uint64\n\tRollback string\n}\n\n\/\/ UpdateService updates the service at ID with the options\n\/\/\n\/\/ See https:\/\/goo.gl\/wu3MmS for more details.\nfunc (c *Client) UpdateService(id string, opts UpdateServiceOptions) error {\n\theaders, err := headersWithAuth(opts.Auth)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := c.do(http.MethodPost, \"\/services\/\"+id+\"\/update?\"+queryString(opts), doOptions{\n\t\theaders: headers,\n\t\tdata: opts.ServiceSpec,\n\t\tforceJSON: true,\n\t\tcontext: opts.Context,\n\t})\n\tif err != nil {\n\t\tif e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {\n\t\t\treturn &NoSuchService{ID: id}\n\t\t}\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\treturn nil\n}\n\n\/\/ InspectService returns information about a service by its ID.\n\/\/\n\/\/ See https:\/\/goo.gl\/dHmr75 for more details.\nfunc (c *Client) InspectService(id string) (*swarm.Service, error) {\n\tpath := \"\/services\/\" + id\n\tresp, err := c.do(http.MethodGet, path, doOptions{})\n\tif err != nil {\n\t\tif e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {\n\t\t\treturn nil, &NoSuchService{ID: id}\n\t\t}\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tvar service swarm.Service\n\tif err := json.NewDecoder(resp.Body).Decode(&service); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &service, nil\n}\n\n\/\/ ListServicesOptions specify parameters to the ListServices function.\n\/\/\n\/\/ See https:\/\/goo.gl\/DwvNMd for more details.\ntype ListServicesOptions struct {\n\tFilters map[string][]string\n\tContext context.Context\n}\n\n\/\/ ListServices returns a slice of services matching the given criteria.\n\/\/\n\/\/ See https:\/\/goo.gl\/DwvNMd for more details.\nfunc (c *Client) ListServices(opts ListServicesOptions) ([]swarm.Service, error) {\n\tpath := \"\/services?\" + queryString(opts)\n\tresp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tvar services []swarm.Service\n\tif err := json.NewDecoder(resp.Body).Decode(&services); err != nil {\n\t\treturn nil, err\n\t}\n\treturn services, nil\n}\n\n\/\/ LogsServiceOptions represents the set of options used when getting logs from a\n\/\/ service.\ntype LogsServiceOptions struct {\n\tContext context.Context\n\tService string `qs:\"-\"`\n\tOutputStream io.Writer `qs:\"-\"`\n\tErrorStream io.Writer `qs:\"-\"`\n\tInactivityTimeout time.Duration `qs:\"-\"`\n\tTail string\n\tSince int64\n\n\t\/\/ Use raw terminal? Usually true when the container contains a TTY.\n\tRawTerminal bool `qs:\"-\"`\n\tFollow bool\n\tStdout bool\n\tStderr bool\n\tTimestamps bool\n\tDetails bool\n}\n\n\/\/ GetServiceLogs gets stdout and stderr logs from the specified service.\n\/\/\n\/\/ When LogsServiceOptions.RawTerminal is set to false, go-dockerclient will multiplex\n\/\/ the streams and send the containers stdout to LogsServiceOptions.OutputStream, and\n\/\/ stderr to LogsServiceOptions.ErrorStream.\n\/\/\n\/\/ When LogsServiceOptions.RawTerminal is true, callers will get the raw stream on\n\/\/ LogsServiceOptions.OutputStream.\nfunc (c *Client) GetServiceLogs(opts LogsServiceOptions) error {\n\tif opts.Service == \"\" {\n\t\treturn &NoSuchService{ID: opts.Service}\n\t}\n\tif opts.Tail == \"\" {\n\t\topts.Tail = \"all\"\n\t}\n\tpath := \"\/services\/\" + opts.Service + \"\/logs?\" + queryString(opts)\n\treturn c.stream(http.MethodGet, path, streamOptions{\n\t\tsetRawTerminal: opts.RawTerminal,\n\t\tstdout: opts.OutputStream,\n\t\tstderr: opts.ErrorStream,\n\t\tinactivityTimeout: opts.InactivityTimeout,\n\t\tcontext: opts.Context,\n\t})\n}\n<commit_msg>swarm_service: add Status to ListServicesOptions<commit_after>\/\/ Copyright 2016 go-dockerclient authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage docker\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/docker\/docker\/api\/types\/swarm\"\n)\n\n\/\/ NoSuchService is the error returned when a given service does not exist.\ntype NoSuchService struct {\n\tID string\n\tErr error\n}\n\nfunc (err *NoSuchService) Error() string {\n\tif err.Err != nil {\n\t\treturn err.Err.Error()\n\t}\n\treturn \"No such service: \" + err.ID\n}\n\n\/\/ CreateServiceOptions specify parameters to the CreateService function.\n\/\/\n\/\/ See https:\/\/goo.gl\/KrVjHz for more details.\ntype CreateServiceOptions struct {\n\tAuth AuthConfiguration `qs:\"-\"`\n\tswarm.ServiceSpec\n\tContext context.Context\n}\n\n\/\/ CreateService creates a new service, returning the service instance\n\/\/ or an error in case of failure.\n\/\/\n\/\/ See https:\/\/goo.gl\/KrVjHz for more details.\nfunc (c *Client) CreateService(opts CreateServiceOptions) (*swarm.Service, error) {\n\theaders, err := headersWithAuth(opts.Auth)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpath := \"\/services\/create?\" + queryString(opts)\n\tresp, err := c.do(http.MethodPost, path, doOptions{\n\t\theaders: headers,\n\t\tdata: opts.ServiceSpec,\n\t\tforceJSON: true,\n\t\tcontext: opts.Context,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tvar service swarm.Service\n\tif err := json.NewDecoder(resp.Body).Decode(&service); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &service, nil\n}\n\n\/\/ RemoveServiceOptions encapsulates options to remove a service.\n\/\/\n\/\/ See https:\/\/goo.gl\/Tqrtya for more details.\ntype RemoveServiceOptions struct {\n\tID string `qs:\"-\"`\n\tContext context.Context\n}\n\n\/\/ RemoveService removes a service, returning an error in case of failure.\n\/\/\n\/\/ See https:\/\/goo.gl\/Tqrtya for more details.\nfunc (c *Client) RemoveService(opts RemoveServiceOptions) error {\n\tpath := \"\/services\/\" + opts.ID\n\tresp, err := c.do(http.MethodDelete, path, doOptions{context: opts.Context})\n\tif err != nil {\n\t\tif e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {\n\t\t\treturn &NoSuchService{ID: opts.ID}\n\t\t}\n\t\treturn err\n\t}\n\tresp.Body.Close()\n\treturn nil\n}\n\n\/\/ UpdateServiceOptions specify parameters to the UpdateService function.\n\/\/\n\/\/ See https:\/\/goo.gl\/wu3MmS for more details.\ntype UpdateServiceOptions struct {\n\tAuth AuthConfiguration `qs:\"-\"`\n\tswarm.ServiceSpec `qs:\"-\"`\n\tContext context.Context\n\tVersion uint64\n\tRollback string\n}\n\n\/\/ UpdateService updates the service at ID with the options\n\/\/\n\/\/ See https:\/\/goo.gl\/wu3MmS for more details.\nfunc (c *Client) UpdateService(id string, opts UpdateServiceOptions) error {\n\theaders, err := headersWithAuth(opts.Auth)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := c.do(http.MethodPost, \"\/services\/\"+id+\"\/update?\"+queryString(opts), doOptions{\n\t\theaders: headers,\n\t\tdata: opts.ServiceSpec,\n\t\tforceJSON: true,\n\t\tcontext: opts.Context,\n\t})\n\tif err != nil {\n\t\tif e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {\n\t\t\treturn &NoSuchService{ID: id}\n\t\t}\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\treturn nil\n}\n\n\/\/ InspectService returns information about a service by its ID.\n\/\/\n\/\/ See https:\/\/goo.gl\/dHmr75 for more details.\nfunc (c *Client) InspectService(id string) (*swarm.Service, error) {\n\tpath := \"\/services\/\" + id\n\tresp, err := c.do(http.MethodGet, path, doOptions{})\n\tif err != nil {\n\t\tif e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {\n\t\t\treturn nil, &NoSuchService{ID: id}\n\t\t}\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tvar service swarm.Service\n\tif err := json.NewDecoder(resp.Body).Decode(&service); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &service, nil\n}\n\n\/\/ ListServicesOptions specify parameters to the ListServices function.\n\/\/\n\/\/ See https:\/\/goo.gl\/DwvNMd for more details.\ntype ListServicesOptions struct {\n\tFilters map[string][]string\n\tStatus bool\n\tContext context.Context\n}\n\n\/\/ ListServices returns a slice of services matching the given criteria.\n\/\/\n\/\/ See https:\/\/goo.gl\/DwvNMd for more details.\nfunc (c *Client) ListServices(opts ListServicesOptions) ([]swarm.Service, error) {\n\tpath := \"\/services?\" + queryString(opts)\n\tresp, err := c.do(http.MethodGet, path, doOptions{context: opts.Context})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tvar services []swarm.Service\n\tif err := json.NewDecoder(resp.Body).Decode(&services); err != nil {\n\t\treturn nil, err\n\t}\n\treturn services, nil\n}\n\n\/\/ LogsServiceOptions represents the set of options used when getting logs from a\n\/\/ service.\ntype LogsServiceOptions struct {\n\tContext context.Context\n\tService string `qs:\"-\"`\n\tOutputStream io.Writer `qs:\"-\"`\n\tErrorStream io.Writer `qs:\"-\"`\n\tInactivityTimeout time.Duration `qs:\"-\"`\n\tTail string\n\tSince int64\n\n\t\/\/ Use raw terminal? Usually true when the container contains a TTY.\n\tRawTerminal bool `qs:\"-\"`\n\tFollow bool\n\tStdout bool\n\tStderr bool\n\tTimestamps bool\n\tDetails bool\n}\n\n\/\/ GetServiceLogs gets stdout and stderr logs from the specified service.\n\/\/\n\/\/ When LogsServiceOptions.RawTerminal is set to false, go-dockerclient will multiplex\n\/\/ the streams and send the containers stdout to LogsServiceOptions.OutputStream, and\n\/\/ stderr to LogsServiceOptions.ErrorStream.\n\/\/\n\/\/ When LogsServiceOptions.RawTerminal is true, callers will get the raw stream on\n\/\/ LogsServiceOptions.OutputStream.\nfunc (c *Client) GetServiceLogs(opts LogsServiceOptions) error {\n\tif opts.Service == \"\" {\n\t\treturn &NoSuchService{ID: opts.Service}\n\t}\n\tif opts.Tail == \"\" {\n\t\topts.Tail = \"all\"\n\t}\n\tpath := \"\/services\/\" + opts.Service + \"\/logs?\" + queryString(opts)\n\treturn c.stream(http.MethodGet, path, streamOptions{\n\t\tsetRawTerminal: opts.RawTerminal,\n\t\tstdout: opts.OutputStream,\n\t\tstderr: opts.ErrorStream,\n\t\tinactivityTimeout: opts.InactivityTimeout,\n\t\tcontext: opts.Context,\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package rest\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n)\n\ntype responseLogRecord struct {\n\tStatusCode int\n\tResponseTime *time.Duration\n\tHttpMethod string\n\tRequestURI string\n}\n\nfunc (self *ResourceHandler) logResponseRecord(record *responseLogRecord) {\n\tif self.EnableLogAsJson {\n\t\tb, err := json.Marshal(record)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tself.Logger.Printf(\"%s\", b)\n\t} else {\n\t\tself.Logger.Printf(\"%d %v %s %s\",\n\t\t\trecord.StatusCode,\n\t\t\trecord.ResponseTime,\n\t\t\trecord.HttpMethod,\n\t\t\trecord.RequestURI,\n\t\t)\n\t}\n}\n\nfunc (self *ResourceHandler) logWrapper(h http.HandlerFunc) http.HandlerFunc {\n\n\t\/\/ set a default Logger\n\tif self.Logger == nil {\n\t\tself.Logger = log.New(os.Stderr, \"\", log.LstdFlags)\n\t}\n\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\t\/\/ call the handler\n\t\th(w, r)\n\n\t\tself.logResponseRecord(&responseLogRecord{\n\t\t\tself.env.getVar(r, \"statusCode\").(int),\n\t\t\tself.env.getVar(r, \"elapsedTime\").(*time.Duration),\n\t\t\tr.Method,\n\t\t\tr.URL.RequestURI(),\n\t\t})\n\t}\n}\n<commit_msg>Color logging<commit_after>package rest\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n)\n\ntype responseLogRecord struct {\n\tStatusCode int\n\tResponseTime *time.Duration\n\tHttpMethod string\n\tRequestURI string\n}\n\nfunc (self *ResourceHandler) logResponseRecord(record *responseLogRecord) {\n\tif self.EnableLogAsJson {\n\t\tb, err := json.Marshal(record)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tself.Logger.Printf(\"%s\", b)\n\t} else {\n\t\tstatusCodeColor := \"0;32\"\n\t\tif record.StatusCode >= 400 && record.StatusCode < 500 {\n\t\t\tstatusCodeColor = \"1;33\"\n\t\t} else if record.StatusCode >= 500 {\n\t\t\tstatusCodeColor = \"0;31\"\n\t\t}\n\t\tself.Logger.Printf(\"\\033[%sm%d\\033[0m \\033[36;1m%.2fms\\033[0m %s %s\",\n\t\t\tstatusCodeColor,\n\t\t\trecord.StatusCode,\n\t\t\tfloat64(record.ResponseTime.Nanoseconds()\/1e4)\/100.0,\n\t\t\trecord.HttpMethod,\n\t\t\trecord.RequestURI,\n\t\t)\n\t}\n}\n\nfunc (self *ResourceHandler) logWrapper(h http.HandlerFunc) http.HandlerFunc {\n\n\t\/\/ set a default Logger\n\tif self.Logger == nil {\n\t\tself.Logger = log.New(os.Stderr, \"\", log.LstdFlags)\n\t}\n\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\n\t\t\/\/ call the handler\n\t\th(w, r)\n\n\t\tself.logResponseRecord(&responseLogRecord{\n\t\t\tself.env.getVar(r, \"statusCode\").(int),\n\t\t\tself.env.getVar(r, \"elapsedTime\").(*time.Duration),\n\t\t\tr.Method,\n\t\t\tr.URL.RequestURI(),\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package log provide an easy to use logging package that supports level-based and asynchronized logging.\n\/\/ It's designed to be used as a drop-in replacement of the standard log package\npackage log\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"time\"\n)\n\nconst (\n\tLOG_LEVEL_TRACE = 0\n\tLOG_LEVEL_DEBUG = 1\n\tLOG_LEVEL_INFO = 2\n\tLOG_LEVEL_WARN = 3\n\tLOG_LEVEL_ERROR = 4\n\tLOG_LEVEL_FATAL = 5\n)\n\ntype HTTPLogWriter struct {\n\turl string\n}\n\ntype LogMessage struct {\n\tdata []byte\n}\n\nconst DEFAULT_QUEUE_SIZE = 100\n\ntype AsyncLogWriter struct {\n\tw io.Writer\n\tqueue chan LogMessage\n\tclosing bool\n\tclosed chan int\n}\n\nfunc NewAsyncLogWriter(w io.Writer, n int) *AsyncLogWriter {\n\tif n <= 0 {\n\t\tn = DEFAULT_QUEUE_SIZE\n\t}\n\tqueue := make(chan LogMessage, n)\n\n\taw := &AsyncLogWriter{\n\t\tqueue: queue,\n\t\tw: w,\n\t\tclosing: false,\n\t\tclosed: make(chan int),\n\t}\n\n\tgo func(w *AsyncLogWriter) {\n\t\tfor !w.closing {\n\t\t\t\/\/ process all queued messages\n\t\t\tfor msg := range w.queue {\n\t\t\t\t_, err := w.w.Write(msg.data)\n\t\t\t\tif err != nil {\n\t\t\t\t\t\/\/ the writer failed to write the message somehow,\n\t\t\t\t\t\/\/ we just discard the message here, but other implementations\n\t\t\t\t\t\/\/ might try to resend the message\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tw.closed <- 1 \/\/ all messages are processed. ready to close\n\t}(aw)\n\n\treturn aw\n}\n\nfunc (w *AsyncLogWriter) Close() {\n\tw.closing = true\n\t<-w.closed\n}\n\nfunc (w *AsyncLogWriter) Write(data []byte) (n int, err error) {\n\tw.queue <- LogMessage{data: data}\n\treturn len(data), nil\n}\n\ntype LogFormatter interface {\n\tFormat(t time.Time, level int, message string) string\n}\n\ntype Logger struct {\n\tlevel int\n\tpath string\n\tfname string\n\twriter io.Writer\n\twriteCloser io.WriteCloser\n\tformatter LogFormatter\n}\n\nfunc (w *HTTPLogWriter) Write(data []byte) (n int, err error) {\n\tresp, err := http.Post(w.url, \"html\/text\", bytes.NewReader(data))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer resp.Body.Close()\n\treturn len(data), err\n}\n\n\/\/ DefaultLogFormatter format log message in this format: \"INFO: 2006-01-02T15:04:05 (UTC): log message...\"\ntype DefaultLogFormatter struct {\n}\n\nfunc (f *DefaultLogFormatter) Format(t time.Time, level int, message string) string {\n\ttimeStr := t.UTC().Format(\"2006-01-02T15:04:05 (MST)\")\n\treturn fmt.Sprintf(\"%s: %s: %s\\n\", loglevel2string(level), timeStr, message)\n}\n\n\/\/ New creates a new logger with the given writer\nfunc New(w io.Writer, loglevel int) *Logger {\n\treturn &Logger{\n\t\tlevel: loglevel,\n\t\twriter: w,\n\t\tformatter: &DefaultLogFormatter{},\n\t}\n}\n\n\/\/ NewHTTPLogger creates a logger that sends log to a http server\nfunc NewHTTPLogger(url string, loglevel int) *Logger {\n\treturn &Logger{\n\t\tlevel: loglevel,\n\t\twriter: NewAsyncLogWriter(&HTTPLogWriter{url: url}, DEFAULT_QUEUE_SIZE),\n\t\tformatter: &DefaultLogFormatter{},\n\t}\n}\n\n\/\/ NewFileLogger creates a new logger which writes logs to the specified logpath and filename\nfunc NewFileLogger(logpath string, fname string, loglevel int) *Logger {\n\n\t\/\/ create the log directory if not exists\n\terr := os.MkdirAll(logpath, 0750)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ use program name as log filename\n\tif fname == \"\" {\n\t\tfname = path.Base(os.Args[0])\n\t}\n\tfilepath := fmt.Sprintf(\"%s\/%s.log\", logpath, fname)\n\n\t\/\/ open the log file\n\tfile, err := os.OpenFile(filepath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0640)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn &Logger{\n\t\tlevel: loglevel,\n\t\tpath: logpath,\n\t\tfname: fname,\n\t\twriteCloser: file,\n\t\twriter: file,\n\t\tformatter: &DefaultLogFormatter{},\n\t}\n}\n\n\/\/ SetLogLevel sets the current log level of the logger\nfunc (logger *Logger) SetLogLevel(level int) {\n\tlogger.level = level\n}\n\n\/\/ Close closes the writer of the logger.\nfunc (logger *Logger) Close() {\n\tif logger.writeCloser != nil {\n\t\tlogger.writeCloser.Close()\n\t}\n}\n\n\/\/ Writer returns current writer of the logger.\nfunc (logger *Logger) Writer() io.Writer {\n\treturn logger.writer\n}\n\n\/\/ Print logs a formatted message at LOG_LEVEL_INFO level\nfunc (logger *Logger) Print(v ...interface{}) {\n\ts := fmt.Sprint(v...)\n\tmsg := logger.formatter.Format(time.Now(), logger.level, s)\n\tif logger.Writer() != nil {\n\t\tlogger.Writer().Write([]byte(msg))\n\t}\n}\n\n\/\/ Println logs a formatted message at LOG_LEVEL_INFO level\nfunc (logger *Logger) Println(v ...interface{}) {\n\ts := fmt.Sprintln(v...)\n\tmsg := logger.formatter.Format(time.Now(), logger.level, s)\n\tif logger.Writer() != nil {\n\t\tlogger.Writer().Write([]byte(msg))\n\t}\n}\n\n\/\/ Println logs a formatted message at LOG_LEVEL_INFO level\nfunc (logger *Logger) Printf(format string, v ...interface{}) {\n\ts := fmt.Sprintf(format, v...)\n\tmsg := logger.formatter.Format(time.Now(), logger.level, s)\n\tif logger.Writer() != nil {\n\t\tlogger.Writer().Write([]byte(msg))\n\t}\n}\n\n\/\/ Log logs a formatted message at the given log level\nfunc (logger *Logger) Log(loglevel int, v ...interface{}) {\n\tif loglevel >= logger.level {\n\t\ts := fmt.Sprint(v...)\n\t\tmsg := logger.formatter.Format(time.Now(), loglevel, s)\n\t\tif logger.Writer() != nil {\n\t\t\tlogger.Writer().Write([]byte(msg))\n\t\t}\n\t}\n}\n\n\/\/ Logf logs a formatted message at the given log level\nfunc (logger *Logger) Logf(loglevel int, format string, v ...interface{}) {\n\tif loglevel >= logger.level {\n\t\ts := fmt.Sprintf(format, v...)\n\t\tmsg := logger.formatter.Format(time.Now(), loglevel, s)\n\t\tif logger.Writer() != nil {\n\t\t\tlogger.Writer().Write([]byte(msg))\n\t\t}\n\t}\n}\n\n\/\/ Logln logs a formatted message at the given log level\nfunc (logger *Logger) Logln(loglevel int, v ...interface{}) {\n\tif loglevel >= logger.level {\n\t\ts := fmt.Sprintln(v...)\n\t\tmsg := logger.formatter.Format(time.Now(), loglevel, s)\n\t\tif logger.Writer() != nil {\n\t\t\tlogger.Writer().Write([]byte(msg))\n\t\t}\n\t}\n}\n\n\/\/ Trace logs a formatted message at log level: LOG_LEVEL_TRACE\nfunc (logger *Logger) Trace(v ...interface{}) {\n\tlogger.Log(LOG_LEVEL_TRACE, v...)\n}\n\n\/\/ Tracef logs a formatted message at log level: LOG_LEVEL_TRACE\nfunc (logger *Logger) Tracef(fmt string, v ...interface{}) {\n\tlogger.Logf(LOG_LEVEL_TRACE, fmt, v...)\n}\n\n\/\/ Tracef logs a formatted message at log level: LOG_LEVEL_TRACE\nfunc (logger *Logger) Traceln(v ...interface{}) {\n\tlogger.Logln(LOG_LEVEL_TRACE, v...)\n}\n\n\/\/ Debug logs a formatted message at log level: LOG_LEVEL_DEBUG\nfunc (logger *Logger) Debug(v ...interface{}) {\n\tlogger.Log(LOG_LEVEL_DEBUG, v...)\n}\n\n\/\/ Debugf logs a formatted message at log level: LOG_LEVEL_DEBUG\nfunc (logger *Logger) Debugf(format string, v ...interface{}) {\n\tlogger.Logf(LOG_LEVEL_DEBUG, format, v...)\n}\n\n\/\/ Debugln logs a formatted message at log level: LOG_LEVEL_DEBUG\nfunc (logger *Logger) Debugln(v ...interface{}) {\n\tlogger.Logln(LOG_LEVEL_DEBUG, v...)\n}\n\n\/\/ Info logs a formatted message at log level: LOG_LEVEL_INFO\nfunc (logger *Logger) Info(v ...interface{}) {\n\tlogger.Log(LOG_LEVEL_INFO, v...)\n}\n\n\/\/ Infof logs a formatted message at log level: LOG_LEVEL_INFO\nfunc (logger *Logger) Infof(format string, v ...interface{}) {\n\tlogger.Logf(LOG_LEVEL_INFO, format, v...)\n}\n\n\/\/ Infoln logs a formatted message at log level: LOG_LEVEL_INFO\nfunc (logger *Logger) Infoln(v ...interface{}) {\n\tlogger.Logln(LOG_LEVEL_INFO, v...)\n}\n\n\/\/ Warn logs a formatted message at log level: LOG_LEVEL_WARN\nfunc (logger *Logger) Warn(v ...interface{}) {\n\tlogger.Log(LOG_LEVEL_WARN, v...)\n}\n\n\/\/ Warnf logs a formatted message at log level: LOG_LEVEL_WARN\nfunc (logger *Logger) Warnf(format string, v ...interface{}) {\n\tlogger.Logf(LOG_LEVEL_WARN, format, v...)\n}\n\n\/\/ Warnln logs a formatted message at log level: LOG_LEVEL_WARN\nfunc (logger *Logger) Warnln(v ...interface{}) {\n\tlogger.Logln(LOG_LEVEL_WARN, v...)\n}\n\n\/\/ Error logs a formatted message at log level: LOG_LEVEL_ERROR\nfunc (logger *Logger) Error(v ...interface{}) {\n\tlogger.Log(LOG_LEVEL_ERROR, v...)\n}\n\n\/\/ Errorf logs a formatted message at log level: LOG_LEVEL_ERROR\nfunc (logger *Logger) Errorf(format string, v ...interface{}) {\n\tlogger.Logf(LOG_LEVEL_ERROR, format, v...)\n}\n\n\/\/ Errorln logs a formatted message at log level: LOG_LEVEL_ERROR\nfunc (logger *Logger) Errorln(v ...interface{}) {\n\tlogger.Logln(LOG_LEVEL_ERROR, v...)\n}\n\n\/\/ Fatal logs a formatted message at log level: LOG_LEVEL_FATAL then calls os.Exit(1)\nfunc (logger *Logger) Fatal(v ...interface{}) {\n\tlogger.Log(LOG_LEVEL_FATAL, v...)\n\tos.Exit(1)\n}\n\n\/\/ Fatalf logs a formatted message at log level: LOG_LEVEL_FATAL then calls os.Exit(1)\nfunc (logger *Logger) Fatalf(format string, v ...interface{}) {\n\tlogger.Logf(LOG_LEVEL_FATAL, format, v...)\n\tos.Exit(1)\n}\n\n\/\/ Panic logs a formatted message at log level: LOG_LEVEL_FATAL then calls os.Exit(1)\nfunc (logger *Logger) Fatalln(v ...interface{}) {\n\tlogger.Logln(LOG_LEVEL_FATAL, v...)\n\tos.Exit(1)\n}\n\n\/\/ Panic logs a message at log level: LOG_LEVEL_FATAL then calls panic()\nfunc (logger *Logger) Panic(v ...interface{}) {\n\tlogger.Log(LOG_LEVEL_FATAL, v...)\n\tpanic(nil)\n}\n\n\/\/ Panicf logs a formatted message at log level: LOG_LEVEL_FATAL then calls panic()\nfunc (logger *Logger) Panicf(format string, v ...interface{}) {\n\tlogger.Logf(LOG_LEVEL_FATAL, format, v...)\n\tpanic(nil)\n}\n\n\/\/ Panicln logs a formatted message at log level: LOG_LEVEL_FATAL then calls panic()\nfunc (logger *Logger) Panicln(v ...interface{}) {\n\tlogger.Logln(LOG_LEVEL_FATAL, v...)\n\tpanic(nil)\n}\n\nfunc loglevel2string(level int) string {\n\tswitch level {\n\tcase LOG_LEVEL_TRACE:\n\t\treturn \"TRACE\"\n\tcase LOG_LEVEL_DEBUG:\n\t\treturn \"DEBUG\"\n\tcase LOG_LEVEL_INFO:\n\t\treturn \"INFO\"\n\tcase LOG_LEVEL_WARN:\n\t\treturn \"WARN\"\n\tcase LOG_LEVEL_ERROR:\n\t\treturn \"ERROR\"\n\tcase LOG_LEVEL_FATAL:\n\t\treturn \"FATAL\"\n\tdefault:\n\t\treturn \"Unknown\"\n\t}\n}\n<commit_msg>Add String2LogLevel and LogLevel2String functions<commit_after>\/\/ Package log provide an easy to use logging package that supports level-based and asynchronized logging.\n\/\/ It's designed to be used as a drop-in replacement of the standard log package\npackage log\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tLOG_LEVEL_TRACE = 0\n\tLOG_LEVEL_DEBUG = 1\n\tLOG_LEVEL_INFO = 2\n\tLOG_LEVEL_WARN = 3\n\tLOG_LEVEL_ERROR = 4\n\tLOG_LEVEL_FATAL = 5\n)\n\ntype HTTPLogWriter struct {\n\turl string\n}\n\ntype LogMessage struct {\n\tdata []byte\n}\n\nconst DEFAULT_QUEUE_SIZE = 100\n\ntype AsyncLogWriter struct {\n\tw io.Writer\n\tqueue chan LogMessage\n\tclosing bool\n\tclosed chan int\n}\n\nfunc NewAsyncLogWriter(w io.Writer, n int) *AsyncLogWriter {\n\tif n <= 0 {\n\t\tn = DEFAULT_QUEUE_SIZE\n\t}\n\tqueue := make(chan LogMessage, n)\n\n\taw := &AsyncLogWriter{\n\t\tqueue: queue,\n\t\tw: w,\n\t\tclosing: false,\n\t\tclosed: make(chan int),\n\t}\n\n\tgo func(w *AsyncLogWriter) {\n\t\tfor !w.closing {\n\t\t\t\/\/ process all queued messages\n\t\t\tfor msg := range w.queue {\n\t\t\t\t_, err := w.w.Write(msg.data)\n\t\t\t\tif err != nil {\n\t\t\t\t\t\/\/ the writer failed to write the message somehow,\n\t\t\t\t\t\/\/ we just discard the message here, but other implementations\n\t\t\t\t\t\/\/ might try to resend the message\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tw.closed <- 1 \/\/ all messages are processed. ready to close\n\t}(aw)\n\n\treturn aw\n}\n\nfunc (w *AsyncLogWriter) Close() {\n\tw.closing = true\n\t<-w.closed\n}\n\nfunc (w *AsyncLogWriter) Write(data []byte) (n int, err error) {\n\tw.queue <- LogMessage{data: data}\n\treturn len(data), nil\n}\n\ntype LogFormatter interface {\n\tFormat(t time.Time, level int, message string) string\n}\n\ntype Logger struct {\n\tlevel int\n\tpath string\n\tfname string\n\twriter io.Writer\n\twriteCloser io.WriteCloser\n\tformatter LogFormatter\n}\n\nfunc (w *HTTPLogWriter) Write(data []byte) (n int, err error) {\n\tresp, err := http.Post(w.url, \"html\/text\", bytes.NewReader(data))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer resp.Body.Close()\n\treturn len(data), err\n}\n\n\/\/ DefaultLogFormatter format log message in this format: \"INFO: 2006-01-02T15:04:05 (UTC): log message...\"\ntype DefaultLogFormatter struct {\n}\n\nfunc (f *DefaultLogFormatter) Format(t time.Time, level int, message string) string {\n\ttimeStr := t.UTC().Format(\"2006-01-02T15:04:05 (MST)\")\n\treturn fmt.Sprintf(\"%s: %s: %s\\n\", LogLevel2String(level), timeStr, message)\n}\n\n\/\/ New creates a new logger with the given writer\nfunc New(w io.Writer, loglevel int) *Logger {\n\treturn &Logger{\n\t\tlevel: loglevel,\n\t\twriter: w,\n\t\tformatter: &DefaultLogFormatter{},\n\t}\n}\n\n\/\/ NewHTTPLogger creates a logger that sends log to a http server\nfunc NewHTTPLogger(url string, loglevel int) *Logger {\n\treturn &Logger{\n\t\tlevel: loglevel,\n\t\twriter: NewAsyncLogWriter(&HTTPLogWriter{url: url}, DEFAULT_QUEUE_SIZE),\n\t\tformatter: &DefaultLogFormatter{},\n\t}\n}\n\n\/\/ NewFileLogger creates a new logger which writes logs to the specified logpath and filename\nfunc NewFileLogger(logpath string, fname string, loglevel int) *Logger {\n\n\t\/\/ create the log directory if not exists\n\terr := os.MkdirAll(logpath, 0750)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ use program name as log filename\n\tif fname == \"\" {\n\t\tfname = path.Base(os.Args[0])\n\t}\n\tfilepath := fmt.Sprintf(\"%s\/%s.log\", logpath, fname)\n\n\t\/\/ open the log file\n\tfile, err := os.OpenFile(filepath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0640)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn &Logger{\n\t\tlevel: loglevel,\n\t\tpath: logpath,\n\t\tfname: fname,\n\t\twriteCloser: file,\n\t\twriter: file,\n\t\tformatter: &DefaultLogFormatter{},\n\t}\n}\n\n\/\/ SetLogLevel sets the current log level of the logger\nfunc (logger *Logger) SetLogLevel(level int) {\n\tlogger.level = level\n}\n\n\/\/ Close closes the writer of the logger.\nfunc (logger *Logger) Close() {\n\tif logger.writeCloser != nil {\n\t\tlogger.writeCloser.Close()\n\t}\n}\n\n\/\/ Writer returns current writer of the logger.\nfunc (logger *Logger) Writer() io.Writer {\n\treturn logger.writer\n}\n\n\/\/ Print logs a formatted message at LOG_LEVEL_INFO level\nfunc (logger *Logger) Print(v ...interface{}) {\n\ts := fmt.Sprint(v...)\n\tmsg := logger.formatter.Format(time.Now(), logger.level, s)\n\tif logger.Writer() != nil {\n\t\tlogger.Writer().Write([]byte(msg))\n\t}\n}\n\n\/\/ Println logs a formatted message at LOG_LEVEL_INFO level\nfunc (logger *Logger) Println(v ...interface{}) {\n\ts := fmt.Sprintln(v...)\n\tmsg := logger.formatter.Format(time.Now(), logger.level, s)\n\tif logger.Writer() != nil {\n\t\tlogger.Writer().Write([]byte(msg))\n\t}\n}\n\n\/\/ Println logs a formatted message at LOG_LEVEL_INFO level\nfunc (logger *Logger) Printf(format string, v ...interface{}) {\n\ts := fmt.Sprintf(format, v...)\n\tmsg := logger.formatter.Format(time.Now(), logger.level, s)\n\tif logger.Writer() != nil {\n\t\tlogger.Writer().Write([]byte(msg))\n\t}\n}\n\n\/\/ Log logs a formatted message at the given log level\nfunc (logger *Logger) Log(loglevel int, v ...interface{}) {\n\tif loglevel >= logger.level {\n\t\ts := fmt.Sprint(v...)\n\t\tmsg := logger.formatter.Format(time.Now(), loglevel, s)\n\t\tif logger.Writer() != nil {\n\t\t\tlogger.Writer().Write([]byte(msg))\n\t\t}\n\t}\n}\n\n\/\/ Logf logs a formatted message at the given log level\nfunc (logger *Logger) Logf(loglevel int, format string, v ...interface{}) {\n\tif loglevel >= logger.level {\n\t\ts := fmt.Sprintf(format, v...)\n\t\tmsg := logger.formatter.Format(time.Now(), loglevel, s)\n\t\tif logger.Writer() != nil {\n\t\t\tlogger.Writer().Write([]byte(msg))\n\t\t}\n\t}\n}\n\n\/\/ Logln logs a formatted message at the given log level\nfunc (logger *Logger) Logln(loglevel int, v ...interface{}) {\n\tif loglevel >= logger.level {\n\t\ts := fmt.Sprintln(v...)\n\t\tmsg := logger.formatter.Format(time.Now(), loglevel, s)\n\t\tif logger.Writer() != nil {\n\t\t\tlogger.Writer().Write([]byte(msg))\n\t\t}\n\t}\n}\n\n\/\/ Trace logs a formatted message at log level: LOG_LEVEL_TRACE\nfunc (logger *Logger) Trace(v ...interface{}) {\n\tlogger.Log(LOG_LEVEL_TRACE, v...)\n}\n\n\/\/ Tracef logs a formatted message at log level: LOG_LEVEL_TRACE\nfunc (logger *Logger) Tracef(fmt string, v ...interface{}) {\n\tlogger.Logf(LOG_LEVEL_TRACE, fmt, v...)\n}\n\n\/\/ Tracef logs a formatted message at log level: LOG_LEVEL_TRACE\nfunc (logger *Logger) Traceln(v ...interface{}) {\n\tlogger.Logln(LOG_LEVEL_TRACE, v...)\n}\n\n\/\/ Debug logs a formatted message at log level: LOG_LEVEL_DEBUG\nfunc (logger *Logger) Debug(v ...interface{}) {\n\tlogger.Log(LOG_LEVEL_DEBUG, v...)\n}\n\n\/\/ Debugf logs a formatted message at log level: LOG_LEVEL_DEBUG\nfunc (logger *Logger) Debugf(format string, v ...interface{}) {\n\tlogger.Logf(LOG_LEVEL_DEBUG, format, v...)\n}\n\n\/\/ Debugln logs a formatted message at log level: LOG_LEVEL_DEBUG\nfunc (logger *Logger) Debugln(v ...interface{}) {\n\tlogger.Logln(LOG_LEVEL_DEBUG, v...)\n}\n\n\/\/ Info logs a formatted message at log level: LOG_LEVEL_INFO\nfunc (logger *Logger) Info(v ...interface{}) {\n\tlogger.Log(LOG_LEVEL_INFO, v...)\n}\n\n\/\/ Infof logs a formatted message at log level: LOG_LEVEL_INFO\nfunc (logger *Logger) Infof(format string, v ...interface{}) {\n\tlogger.Logf(LOG_LEVEL_INFO, format, v...)\n}\n\n\/\/ Infoln logs a formatted message at log level: LOG_LEVEL_INFO\nfunc (logger *Logger) Infoln(v ...interface{}) {\n\tlogger.Logln(LOG_LEVEL_INFO, v...)\n}\n\n\/\/ Warn logs a formatted message at log level: LOG_LEVEL_WARN\nfunc (logger *Logger) Warn(v ...interface{}) {\n\tlogger.Log(LOG_LEVEL_WARN, v...)\n}\n\n\/\/ Warnf logs a formatted message at log level: LOG_LEVEL_WARN\nfunc (logger *Logger) Warnf(format string, v ...interface{}) {\n\tlogger.Logf(LOG_LEVEL_WARN, format, v...)\n}\n\n\/\/ Warnln logs a formatted message at log level: LOG_LEVEL_WARN\nfunc (logger *Logger) Warnln(v ...interface{}) {\n\tlogger.Logln(LOG_LEVEL_WARN, v...)\n}\n\n\/\/ Error logs a formatted message at log level: LOG_LEVEL_ERROR\nfunc (logger *Logger) Error(v ...interface{}) {\n\tlogger.Log(LOG_LEVEL_ERROR, v...)\n}\n\n\/\/ Errorf logs a formatted message at log level: LOG_LEVEL_ERROR\nfunc (logger *Logger) Errorf(format string, v ...interface{}) {\n\tlogger.Logf(LOG_LEVEL_ERROR, format, v...)\n}\n\n\/\/ Errorln logs a formatted message at log level: LOG_LEVEL_ERROR\nfunc (logger *Logger) Errorln(v ...interface{}) {\n\tlogger.Logln(LOG_LEVEL_ERROR, v...)\n}\n\n\/\/ Fatal logs a formatted message at log level: LOG_LEVEL_FATAL then calls os.Exit(1)\nfunc (logger *Logger) Fatal(v ...interface{}) {\n\tlogger.Log(LOG_LEVEL_FATAL, v...)\n\tos.Exit(1)\n}\n\n\/\/ Fatalf logs a formatted message at log level: LOG_LEVEL_FATAL then calls os.Exit(1)\nfunc (logger *Logger) Fatalf(format string, v ...interface{}) {\n\tlogger.Logf(LOG_LEVEL_FATAL, format, v...)\n\tos.Exit(1)\n}\n\n\/\/ Panic logs a formatted message at log level: LOG_LEVEL_FATAL then calls os.Exit(1)\nfunc (logger *Logger) Fatalln(v ...interface{}) {\n\tlogger.Logln(LOG_LEVEL_FATAL, v...)\n\tos.Exit(1)\n}\n\n\/\/ Panic logs a message at log level: LOG_LEVEL_FATAL then calls panic()\nfunc (logger *Logger) Panic(v ...interface{}) {\n\tlogger.Log(LOG_LEVEL_FATAL, v...)\n\tpanic(nil)\n}\n\n\/\/ Panicf logs a formatted message at log level: LOG_LEVEL_FATAL then calls panic()\nfunc (logger *Logger) Panicf(format string, v ...interface{}) {\n\tlogger.Logf(LOG_LEVEL_FATAL, format, v...)\n\tpanic(nil)\n}\n\n\/\/ Panicln logs a formatted message at log level: LOG_LEVEL_FATAL then calls panic()\nfunc (logger *Logger) Panicln(v ...interface{}) {\n\tlogger.Logln(LOG_LEVEL_FATAL, v...)\n\tpanic(nil)\n}\n\nfunc LogLevel2String(level int) string {\n\tswitch level {\n\tcase LOG_LEVEL_TRACE:\n\t\treturn \"TRACE\"\n\tcase LOG_LEVEL_DEBUG:\n\t\treturn \"DEBUG\"\n\tcase LOG_LEVEL_INFO:\n\t\treturn \"INFO\"\n\tcase LOG_LEVEL_WARN:\n\t\treturn \"WARN\"\n\tcase LOG_LEVEL_ERROR:\n\t\treturn \"ERROR\"\n\tcase LOG_LEVEL_FATAL:\n\t\treturn \"FATAL\"\n\tdefault:\n\t\treturn \"Unknown\"\n\t}\n}\n\nfunc String2LogLevel(str string) int {\n\tstr = strings.ToUpper(str)\n\tswitch str {\n\tcase \"TRACE\":\n\t\treturn LOG_LEVEL_TRACE\n\tcase \"DEBUG\":\n\t\treturn LOG_LEVEL_DEBUG\n\tcase \"INFO\":\n\t\treturn LOG_LEVEL_INFO\n\tcase \"WARN\":\n\t\treturn LOG_LEVEL_WARN\n\tcase \"ERROR\":\n\t\treturn LOG_LEVEL_WARN\n\tcase \"FATAL\":\n\t\treturn LOG_LEVEL_FATAL\n\tdefault:\n\t\treturn -1\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright (c) 2014, YU HengChun\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice, this\n list of conditions and the following disclaimer in the documentation and\/or\n other materials provided with the distribution.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n*\/\n\n\/\/ few code fork go log.\npackage log\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tLdate = 1 << iota\n\tLtime\n\tLmicroseconds\n\tLlongfile\n\tLshortfile\n\tLstdFlags = Ldate | Ltime\n)\n\n\/\/ dont change order\nconst (\n\tLZero = -iota\n\tLFatal\n\tLPanic \/\/ recover panic\n\tLAlert\n\tLError\n\tLReport\n\tLNotify\n\tLInfo\n\tLDebug\n\tnr_levels\n)\n\nvar levelsName [-nr_levels - 1]string\n\nfunc init() {\n\tlevelsName[-LFatal-1] = \"[F]\"\n\tlevelsName[-LPanic-1] = \"[P]\"\n\tlevelsName[-LAlert-1] = \"[A]\"\n\tlevelsName[-LError-1] = \"[E]\"\n\tlevelsName[-LReport-1] = \"[R]\"\n\tlevelsName[-LNotify-1] = \"[N]\"\n\tlevelsName[-LInfo-1] = \"[I]\"\n\tlevelsName[-LDebug-1] = \"[D]\"\n}\n\n\/\/ dont change order\nconst (\n\tMODE_EQUAL = -iota - 100 \/\/ equal level mode\n\tMODE_NONE_NAME \/\/ dont write default level name\n\tMODE_DONT_EXIT \/\/ dont exec os.Exit when Fatal\n\tMODE_DONT_PANIC\n\tMODE_RECOVER\n)\n\ntype nullWriter struct{}\n\nfunc (f *nullWriter) Write(p []byte) (int, error) {\n\treturn len(p), nil\n}\n\ntype BaseLogger interface {\n\tFatal(v ...interface{})\n\tFatalf(format string, v ...interface{})\n\tPanic(v ...interface{})\n\tPanicf(format string, v ...interface{})\n\tAlert(v ...interface{})\n\tAlertf(format string, v ...interface{})\n\tError(v ...interface{})\n\tErrorf(format string, v ...interface{})\n\tReport(v ...interface{})\n\tReportf(format string, v ...interface{})\n\tNotify(v ...interface{})\n\tNotifyf(format string, v ...interface{})\n\tInfo(v ...interface{})\n\tInfof(format string, v ...interface{})\n\tDebug(v ...interface{})\n\tDebugf(format string, v ...interface{})\n}\n\ntype Logger interface {\n\tBaseLogger\n\tio.ReaderFrom\n\tWrite([]byte) (int, error)\n\tPrint(v ...interface{})\n\tPrintf(format string, v ...interface{})\n\tOutput(calldepth int, s string, optionLevel ...int) error\n}\n\nvar _ Logger = &logger{}\n\ntype logger struct {\n\tmu sync.Mutex \/\/ ensures atomic writes; protects the following fields\n\tprefix string \/\/ prefix to write at beginning of each line\n\tflag int \/\/ properties\n\tout io.Writer \/\/ destination for output\n\tbuf []byte \/\/ for accumulating text to write\n\n\tlevel int\n\tequal bool\n\tnoneName bool\n\tdontExit bool\n\tdontPanic bool\n\trecover bool\n}\n\n\/\/ Cheap integer to fixed-width decimal ASCII. Give a negative width to avoid zero-padding.\n\/\/ Knows the buffer has capacity.\nfunc itoa(buf *[]byte, i int, wid int) {\n\tvar u uint = uint(i)\n\tif u == 0 && wid <= 1 {\n\t\t*buf = append(*buf, '0')\n\t\treturn\n\t}\n\n\t\/\/ Assemble decimal in reverse order.\n\tvar b [32]byte\n\tbp := len(b)\n\tfor ; u > 0 || wid > 0; u \/= 10 {\n\t\tbp--\n\t\twid--\n\t\tb[bp] = byte(u%10) + '0'\n\t}\n\t*buf = append(*buf, b[bp:]...)\n}\n\nfunc (l *logger) formatHeader(buf *[]byte, t time.Time, file string, line, level int) {\n\tif len(l.prefix) != 0 {\n\t\t*buf = append(*buf, l.prefix...)\n\t\tif len(l.prefix) != 0 {\n\t\t\t*buf = append(*buf, ' ')\n\t\t}\n\t}\n\n\tif !l.noneName && level > nr_levels && level <= 0 {\n\t\t*buf = append(*buf, levelsName[-level]...)\n\t\t*buf = append(*buf, ' ')\n\t}\n\n\tif l.flag&(Ldate|Ltime|Lmicroseconds) != 0 {\n\t\tif l.flag&Ldate != 0 {\n\t\t\tyear, month, day := t.Date()\n\t\t\titoa(buf, year, 4)\n\t\t\t*buf = append(*buf, '-')\n\t\t\titoa(buf, int(month), 2)\n\t\t\t*buf = append(*buf, '-')\n\t\t\titoa(buf, day, 2)\n\t\t\t*buf = append(*buf, ' ')\n\t\t}\n\t\tif l.flag&(Ltime|Lmicroseconds) != 0 {\n\t\t\thour, min, sec := t.Clock()\n\t\t\titoa(buf, hour, 2)\n\t\t\t*buf = append(*buf, ':')\n\t\t\titoa(buf, min, 2)\n\t\t\t*buf = append(*buf, ':')\n\t\t\titoa(buf, sec, 2)\n\t\t\tif l.flag&Lmicroseconds != 0 {\n\t\t\t\t*buf = append(*buf, '.')\n\t\t\t\titoa(buf, t.Nanosecond()\/1e3, 6)\n\t\t\t}\n\t\t\t*buf = append(*buf, ' ')\n\t\t}\n\t}\n\tif l.flag&(Lshortfile|Llongfile) != 0 {\n\t\tif l.flag&Lshortfile != 0 {\n\t\t\tshort := file\n\t\t\tfor i := len(file) - 1; i > 0; i-- {\n\t\t\t\tif file[i] == '\/' {\n\t\t\t\t\tshort = file[i+1:]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tfile = short\n\t\t}\n\t\t*buf = append(*buf, '<')\n\t\t*buf = append(*buf, file...)\n\t\t*buf = append(*buf, ':')\n\t\titoa(buf, line, -1)\n\t\t*buf = append(*buf, `> `...)\n\t}\n}\n\nfunc printf(format string, v []interface{}) string {\n\tif len(format) == 0 {\n\t\treturn fmt.Sprint(v...)\n\t} else {\n\t\treturn fmt.Sprintf(format, v...)\n\t}\n}\n\nfunc (l *logger) Output(calldepth int, s string, optionLevel ...int) (err error) {\n\tlevel := LZero\n\tif len(optionLevel) != 0 {\n\t\tlevel = optionLevel[0]\n\t}\n\tif level == LZero || l.equal && level == l.level || !l.equal && level > l.level {\n\t\tlevel++\n\t} else {\n\t\treturn\n\t}\n\tnow := time.Now() \/\/ get this early.\n\tvar file string\n\tvar line int\n\tif len(s) != 0 {\n\t\ts = strconv.Quote(s)\n\t}\n\tif l.flag&(Lshortfile|Llongfile) != 0 {\n\t\tvar ok bool\n\t\t_, file, line, ok = runtime.Caller(calldepth)\n\t\tif !ok {\n\t\t\tfile = \"???\"\n\t\t\tline = 0\n\t\t}\n\t}\n\tl.mu.Lock()\n\tdefer func() {\n\t\tl.mu.Unlock()\n\t\tif l.recover {\n\t\t\t_ = recover() \/\/ ignore panic\n\t\t}\n\t}()\n\n\tl.buf = l.buf[:0]\n\n\tl.formatHeader(&l.buf, now, file, line, level)\n\n\tl.buf = append(l.buf, s...)\n\tif len(s) > 0 && s[len(s)-1] != '\\n' {\n\t\tl.buf = append(l.buf, '\\n')\n\t}\n\tn := 0\n\tcount := 0\n\tfor len(l.buf) != 0 && (err == nil || err == io.ErrShortWrite) && count < 10 {\n\t\tn, err = l.out.Write(l.buf)\n\t\tl.buf = l.buf[n:]\n\t\tif err == io.ErrShortWrite {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn\n}\n\nfunc (l *logger) ReadFrom(src Reader) (n int64, err error) {\n\tl.mu.Lock()\n\tdefer func() {\n\t\tl.mu.Unlock()\n\t\tif l.recover {\n\t\t\t_ = recover() \/\/ ignore panic\n\t\t}\n\t}()\n\treturn io.Copy(l.out, src)\n}\n\nfunc (l *logger) Write(p []byte) (n int, err error) {\n\tl.mu.Lock()\n\tdefer func() {\n\t\tl.mu.Unlock()\n\t\tif l.recover {\n\t\t\t_ = recover() \/\/ ignore panic\n\t\t}\n\t}()\n\twBytes := 0\n\tcount := 0\n\tfor len(p) != 0 && (err == nil || err == io.ErrShortWrite) && count < 10 {\n\t\twBytes, err = l.out.Write(p)\n\t\tn += wBytes\n\t\tp = p[n:]\n\t\tif err == io.ErrShortWrite {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn\n}\n\nfunc (l *logger) Print(v ...interface{}) {\n\tl.Output(2, printf(\"\", v), LZero)\n}\n\nfunc (l *logger) Printf(format string, v ...interface{}) {\n\tl.Output(2, printf(format, v), LZero)\n}\n\nfunc (l *logger) Debug(v ...interface{}) {\n\tl.Output(2, printf(\"\", v), LDebug)\n}\n\nfunc (l *logger) Debugf(format string, v ...interface{}) {\n\tl.Output(2, printf(format, v), LDebug)\n}\n\nfunc (l *logger) Info(v ...interface{}) {\n\tl.Output(2, printf(\"\", v), LInfo)\n}\n\nfunc (l *logger) Infof(format string, v ...interface{}) {\n\tl.Output(2, printf(format, v), LInfo)\n}\n\nfunc (l *logger) Notify(v ...interface{}) {\n\tl.Output(2, printf(\"\", v), LNotify)\n}\n\nfunc (l *logger) Notifyf(format string, v ...interface{}) {\n\tl.Output(2, printf(format, v), LNotify)\n}\n\nfunc (l *logger) Report(v ...interface{}) {\n\tl.Output(2, printf(\"\", v), LReport)\n}\n\nfunc (l *logger) Reportf(format string, v ...interface{}) {\n\tl.Output(2, printf(format, v), LReport)\n}\n\nfunc (l *logger) Error(v ...interface{}) {\n\tl.Output(2, printf(\"\", v), LError)\n}\n\nfunc (l *logger) Errorf(format string, v ...interface{}) {\n\tl.Output(2, printf(format, v), LError)\n}\n\nfunc (l *logger) Alert(v ...interface{}) {\n\tl.Output(2, printf(\"\", v), LAlert)\n}\n\nfunc (l *logger) Alertf(format string, v ...interface{}) {\n\tl.Output(2, printf(format, v), LAlert)\n}\n\nfunc (l *logger) Panic(v ...interface{}) {\n\tl.Output(2, printf(\"\", v), LPanic)\n\tif !l.dontPanic {\n\t\tpanic(v)\n\t}\n}\n\nfunc (l *logger) Panicf(format string, v ...interface{}) {\n\tl.Output(2, printf(format, v), LPanic)\n\tif !l.dontPanic {\n\t\tpanic(v)\n\t}\n}\n\nfunc (l *logger) Fatal(v ...interface{}) {\n\tl.Output(2, printf(\"\", v), LFatal)\n\tif !l.dontExit {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc (l *logger) Fatalf(format string, v ...interface{}) {\n\tl.Output(2, printf(format, v), LFatal)\n\tif !l.dontExit {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc New(writer io.Writer, prefix string, flags ...int) Logger {\n\tret := new(logger)\n\tret.level = nr_levels\n\thasflags := false\n\tfor _, flag := range flags {\n\t\tswitch flag {\n\t\tcase MODE_EQUAL:\n\t\t\tret.equal = true\n\t\tcase MODE_NONE_NAME:\n\t\t\tret.noneName = true\n\t\tcase MODE_DONT_EXIT:\n\t\t\tret.dontExit = true\n\t\tcase MODE_DONT_PANIC:\n\t\t\tret.dontPanic = true\n\t\tcase MODE_RECOVER:\n\t\t\tret.recover = true\n\t\tdefault:\n\n\t\t\tif flag >= 0 {\n\t\t\t\thasflags = true\n\t\t\t\tret.flag = ret.flag | flag\n\t\t\t} else {\n\t\t\t\tret.level = flag\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ defaults to LstdFlags.\n\tif !hasflags {\n\t\tret.flag = LstdFlags\n\t}\n\n\tif ret.level <= nr_levels {\n\t\tret.level = nr_levels + 1\n\t}\n\n\tret.prefix = prefix\n\tret.out = writer\n\treturn ret\n}\n<commit_msg>fix ReadFrom<commit_after>\/*\n Copyright (c) 2014, YU HengChun\n All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification,\n are permitted provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice, this\n list of conditions and the following disclaimer in the documentation and\/or\n other materials provided with the distribution.\n\n THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\n ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n*\/\n\n\/\/ few code fork go log.\npackage log\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tLdate = 1 << iota\n\tLtime\n\tLmicroseconds\n\tLlongfile\n\tLshortfile\n\tLstdFlags = Ldate | Ltime\n)\n\n\/\/ dont change order\nconst (\n\tLZero = -iota\n\tLFatal\n\tLPanic \/\/ recover panic\n\tLAlert\n\tLError\n\tLReport\n\tLNotify\n\tLInfo\n\tLDebug\n\tnr_levels\n)\n\nvar levelsName [-nr_levels - 1]string\n\nfunc init() {\n\tlevelsName[-LFatal-1] = \"[F]\"\n\tlevelsName[-LPanic-1] = \"[P]\"\n\tlevelsName[-LAlert-1] = \"[A]\"\n\tlevelsName[-LError-1] = \"[E]\"\n\tlevelsName[-LReport-1] = \"[R]\"\n\tlevelsName[-LNotify-1] = \"[N]\"\n\tlevelsName[-LInfo-1] = \"[I]\"\n\tlevelsName[-LDebug-1] = \"[D]\"\n}\n\n\/\/ dont change order\nconst (\n\tMODE_EQUAL = -iota - 100 \/\/ equal level mode\n\tMODE_NONE_NAME \/\/ dont write default level name\n\tMODE_DONT_EXIT \/\/ dont exec os.Exit when Fatal\n\tMODE_DONT_PANIC\n\tMODE_RECOVER\n)\n\ntype nullWriter struct{}\n\nfunc (f *nullWriter) Write(p []byte) (int, error) {\n\treturn len(p), nil\n}\n\ntype BaseLogger interface {\n\tFatal(v ...interface{})\n\tFatalf(format string, v ...interface{})\n\tPanic(v ...interface{})\n\tPanicf(format string, v ...interface{})\n\tAlert(v ...interface{})\n\tAlertf(format string, v ...interface{})\n\tError(v ...interface{})\n\tErrorf(format string, v ...interface{})\n\tReport(v ...interface{})\n\tReportf(format string, v ...interface{})\n\tNotify(v ...interface{})\n\tNotifyf(format string, v ...interface{})\n\tInfo(v ...interface{})\n\tInfof(format string, v ...interface{})\n\tDebug(v ...interface{})\n\tDebugf(format string, v ...interface{})\n}\n\ntype Logger interface {\n\tBaseLogger\n\tio.ReaderFrom\n\tWrite([]byte) (int, error)\n\tPrint(v ...interface{})\n\tPrintf(format string, v ...interface{})\n\tOutput(calldepth int, s string, optionLevel ...int) error\n}\n\nvar _ Logger = &logger{}\n\ntype logger struct {\n\tmu sync.Mutex \/\/ ensures atomic writes; protects the following fields\n\tprefix string \/\/ prefix to write at beginning of each line\n\tflag int \/\/ properties\n\tout io.Writer \/\/ destination for output\n\tbuf []byte \/\/ for accumulating text to write\n\n\tlevel int\n\tequal bool\n\tnoneName bool\n\tdontExit bool\n\tdontPanic bool\n\trecover bool\n}\n\n\/\/ Cheap integer to fixed-width decimal ASCII. Give a negative width to avoid zero-padding.\n\/\/ Knows the buffer has capacity.\nfunc itoa(buf *[]byte, i int, wid int) {\n\tvar u uint = uint(i)\n\tif u == 0 && wid <= 1 {\n\t\t*buf = append(*buf, '0')\n\t\treturn\n\t}\n\n\t\/\/ Assemble decimal in reverse order.\n\tvar b [32]byte\n\tbp := len(b)\n\tfor ; u > 0 || wid > 0; u \/= 10 {\n\t\tbp--\n\t\twid--\n\t\tb[bp] = byte(u%10) + '0'\n\t}\n\t*buf = append(*buf, b[bp:]...)\n}\n\nfunc (l *logger) formatHeader(buf *[]byte, t time.Time, file string, line, level int) {\n\tif len(l.prefix) != 0 {\n\t\t*buf = append(*buf, l.prefix...)\n\t\tif len(l.prefix) != 0 {\n\t\t\t*buf = append(*buf, ' ')\n\t\t}\n\t}\n\n\tif !l.noneName && level > nr_levels && level <= 0 {\n\t\t*buf = append(*buf, levelsName[-level]...)\n\t\t*buf = append(*buf, ' ')\n\t}\n\n\tif l.flag&(Ldate|Ltime|Lmicroseconds) != 0 {\n\t\tif l.flag&Ldate != 0 {\n\t\t\tyear, month, day := t.Date()\n\t\t\titoa(buf, year, 4)\n\t\t\t*buf = append(*buf, '-')\n\t\t\titoa(buf, int(month), 2)\n\t\t\t*buf = append(*buf, '-')\n\t\t\titoa(buf, day, 2)\n\t\t\t*buf = append(*buf, ' ')\n\t\t}\n\t\tif l.flag&(Ltime|Lmicroseconds) != 0 {\n\t\t\thour, min, sec := t.Clock()\n\t\t\titoa(buf, hour, 2)\n\t\t\t*buf = append(*buf, ':')\n\t\t\titoa(buf, min, 2)\n\t\t\t*buf = append(*buf, ':')\n\t\t\titoa(buf, sec, 2)\n\t\t\tif l.flag&Lmicroseconds != 0 {\n\t\t\t\t*buf = append(*buf, '.')\n\t\t\t\titoa(buf, t.Nanosecond()\/1e3, 6)\n\t\t\t}\n\t\t\t*buf = append(*buf, ' ')\n\t\t}\n\t}\n\tif l.flag&(Lshortfile|Llongfile) != 0 {\n\t\tif l.flag&Lshortfile != 0 {\n\t\t\tshort := file\n\t\t\tfor i := len(file) - 1; i > 0; i-- {\n\t\t\t\tif file[i] == '\/' {\n\t\t\t\t\tshort = file[i+1:]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tfile = short\n\t\t}\n\t\t*buf = append(*buf, '<')\n\t\t*buf = append(*buf, file...)\n\t\t*buf = append(*buf, ':')\n\t\titoa(buf, line, -1)\n\t\t*buf = append(*buf, `> `...)\n\t}\n}\n\nfunc printf(format string, v []interface{}) string {\n\tif len(format) == 0 {\n\t\treturn fmt.Sprint(v...)\n\t} else {\n\t\treturn fmt.Sprintf(format, v...)\n\t}\n}\n\nfunc (l *logger) Output(calldepth int, s string, optionLevel ...int) (err error) {\n\tlevel := LZero\n\tif len(optionLevel) != 0 {\n\t\tlevel = optionLevel[0]\n\t}\n\tif level == LZero || l.equal && level == l.level || !l.equal && level > l.level {\n\t\tlevel++\n\t} else {\n\t\treturn\n\t}\n\tnow := time.Now() \/\/ get this early.\n\tvar file string\n\tvar line int\n\tif len(s) != 0 {\n\t\ts = strconv.Quote(s)\n\t}\n\tif l.flag&(Lshortfile|Llongfile) != 0 {\n\t\tvar ok bool\n\t\t_, file, line, ok = runtime.Caller(calldepth)\n\t\tif !ok {\n\t\t\tfile = \"???\"\n\t\t\tline = 0\n\t\t}\n\t}\n\tl.mu.Lock()\n\tdefer func() {\n\t\tl.mu.Unlock()\n\t\tif l.recover {\n\t\t\t_ = recover() \/\/ ignore panic\n\t\t}\n\t}()\n\n\tl.buf = l.buf[:0]\n\n\tl.formatHeader(&l.buf, now, file, line, level)\n\n\tl.buf = append(l.buf, s...)\n\tif len(s) > 0 && s[len(s)-1] != '\\n' {\n\t\tl.buf = append(l.buf, '\\n')\n\t}\n\tn := 0\n\tcount := 0\n\tfor len(l.buf) != 0 && (err == nil || err == io.ErrShortWrite) && count < 10 {\n\t\tn, err = l.out.Write(l.buf)\n\t\tl.buf = l.buf[n:]\n\t\tif err == io.ErrShortWrite {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn\n}\n\nfunc (l *logger) ReadFrom(src io.Reader) (n int64, err error) {\n\tl.mu.Lock()\n\tdefer func() {\n\t\tl.mu.Unlock()\n\t\tif l.recover {\n\t\t\t_ = recover() \/\/ ignore panic\n\t\t}\n\t}()\n\treturn io.Copy(l.out, src)\n}\n\nfunc (l *logger) Write(p []byte) (n int, err error) {\n\tl.mu.Lock()\n\tdefer func() {\n\t\tl.mu.Unlock()\n\t\tif l.recover {\n\t\t\t_ = recover() \/\/ ignore panic\n\t\t}\n\t}()\n\twBytes := 0\n\tcount := 0\n\tfor len(p) != 0 && (err == nil || err == io.ErrShortWrite) && count < 10 {\n\t\twBytes, err = l.out.Write(p)\n\t\tn += wBytes\n\t\tp = p[n:]\n\t\tif err == io.ErrShortWrite {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn\n}\n\nfunc (l *logger) Print(v ...interface{}) {\n\tl.Output(2, printf(\"\", v), LZero)\n}\n\nfunc (l *logger) Printf(format string, v ...interface{}) {\n\tl.Output(2, printf(format, v), LZero)\n}\n\nfunc (l *logger) Debug(v ...interface{}) {\n\tl.Output(2, printf(\"\", v), LDebug)\n}\n\nfunc (l *logger) Debugf(format string, v ...interface{}) {\n\tl.Output(2, printf(format, v), LDebug)\n}\n\nfunc (l *logger) Info(v ...interface{}) {\n\tl.Output(2, printf(\"\", v), LInfo)\n}\n\nfunc (l *logger) Infof(format string, v ...interface{}) {\n\tl.Output(2, printf(format, v), LInfo)\n}\n\nfunc (l *logger) Notify(v ...interface{}) {\n\tl.Output(2, printf(\"\", v), LNotify)\n}\n\nfunc (l *logger) Notifyf(format string, v ...interface{}) {\n\tl.Output(2, printf(format, v), LNotify)\n}\n\nfunc (l *logger) Report(v ...interface{}) {\n\tl.Output(2, printf(\"\", v), LReport)\n}\n\nfunc (l *logger) Reportf(format string, v ...interface{}) {\n\tl.Output(2, printf(format, v), LReport)\n}\n\nfunc (l *logger) Error(v ...interface{}) {\n\tl.Output(2, printf(\"\", v), LError)\n}\n\nfunc (l *logger) Errorf(format string, v ...interface{}) {\n\tl.Output(2, printf(format, v), LError)\n}\n\nfunc (l *logger) Alert(v ...interface{}) {\n\tl.Output(2, printf(\"\", v), LAlert)\n}\n\nfunc (l *logger) Alertf(format string, v ...interface{}) {\n\tl.Output(2, printf(format, v), LAlert)\n}\n\nfunc (l *logger) Panic(v ...interface{}) {\n\tl.Output(2, printf(\"\", v), LPanic)\n\tif !l.dontPanic {\n\t\tpanic(v)\n\t}\n}\n\nfunc (l *logger) Panicf(format string, v ...interface{}) {\n\tl.Output(2, printf(format, v), LPanic)\n\tif !l.dontPanic {\n\t\tpanic(v)\n\t}\n}\n\nfunc (l *logger) Fatal(v ...interface{}) {\n\tl.Output(2, printf(\"\", v), LFatal)\n\tif !l.dontExit {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc (l *logger) Fatalf(format string, v ...interface{}) {\n\tl.Output(2, printf(format, v), LFatal)\n\tif !l.dontExit {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc New(writer io.Writer, prefix string, flags ...int) Logger {\n\tret := new(logger)\n\tret.level = nr_levels\n\thasflags := false\n\tfor _, flag := range flags {\n\t\tswitch flag {\n\t\tcase MODE_EQUAL:\n\t\t\tret.equal = true\n\t\tcase MODE_NONE_NAME:\n\t\t\tret.noneName = true\n\t\tcase MODE_DONT_EXIT:\n\t\t\tret.dontExit = true\n\t\tcase MODE_DONT_PANIC:\n\t\t\tret.dontPanic = true\n\t\tcase MODE_RECOVER:\n\t\t\tret.recover = true\n\t\tdefault:\n\n\t\t\tif flag >= 0 {\n\t\t\t\thasflags = true\n\t\t\t\tret.flag = ret.flag | flag\n\t\t\t} else {\n\t\t\t\tret.level = flag\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ defaults to LstdFlags.\n\tif !hasflags {\n\t\tret.flag = LstdFlags\n\t}\n\n\tif ret.level <= nr_levels {\n\t\tret.level = nr_levels + 1\n\t}\n\n\tret.prefix = prefix\n\tret.out = writer\n\treturn ret\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/bgentry\/heroku-go\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n)\n\nvar (\n\tlines int\n\tsource string\n\tdyno string\n)\n\nvar cmdLog = &Command{\n\tRun: runLog,\n\tUsage: \"log [-n <lines>] [-s <source>] [-d <dyno>]\",\n\tNeedsApp: true,\n\tCategory: \"app\",\n\tShort: \"stream app log lines\",\n\tLong: `\nLog prints the streaming application log.\n\nOptions:\n\n -n <N> print at most N log lines\n -s <source> filter log source\n -d <dyno> filter dyno or process type\n\nExamples:\n\n $ hk log\n 2013-10-17T00:17:35.066089+00:00 app[web.1]: Completed 302 Found in 0ms\n 2013-10-17T00:17:35.079095+00:00 heroku[router]: at=info method=GET path=\/ host=www.heroku.com fwd=\"1.2.3.4\" dyno=web.1 connect=1ms service=6ms status=302 bytes=95\n 2013-10-17T00:17:35.505389+00:00 heroku[nginx]: 1.2.3.4 - - [17\/Oct\/2013:00:17:35 +0000] \"GET \/ HTTP\/1.1\" 301 5 \"-\" \"Amazon Route 53 Health Check Service\" www.heroku.com\n\t\t...\n\n $ hk log -n 2 -s app -d web\n\t\t2013-10-17T00:17:34.288521+00:00 app[web.1]: Completed 200 OK in 10ms (Views: 10.0ms)\n 2013-10-17T00:17:33.918946+00:00 heroku[web.5]: Started GET \"\/\" for 1.2.3.4 at 2013-10-17 00:17:32 +0000\n\t\t2013-10-17T00:17:34.667654+00:00 heroku[router]: at=info method=GET path=\/ host=www.heroku.com fwd=\"1.2.3.4\" dyno=web.5 connect=3ms service=8ms status=301 bytes=0\n 2013-10-17T00:17:35.079095+00:00 heroku[router]: at=info method=GET path=\/ host=www.heroku.com fwd=\"1.2.3.4\" dyno=web.1 connect=1ms service=6ms status=302 bytes=95\n\t\t...\n\n $ hk log -d web.5\n 2013-10-17T00:17:33.918946+00:00 app[web.5]: Started GET \"\/\" for 1.2.3.4 at 2013-10-17 00:17:32 +0000\n 2013-10-17T00:17:33.918658+00:00 app[web.5]: Processing by PagesController#root as HTML\n\t\t...\n`,\n}\n\nfunc init() {\n\tcmdLog.Flag.IntVar(&lines, \"n\", -1, \"max number of log lines to request\")\n\tcmdLog.Flag.StringVar(&source, \"s\", \"\", \"only display logs from the given source\")\n\tcmdLog.Flag.StringVar(&dyno, \"d\", \"\", \"only display logs from the given dyno or process type\")\n}\n\nfunc runLog(cmd *Command, args []string) {\n\topts := heroku.LogSessionCreateOpts{}\n\tif dyno != \"\" {\n\t\topts.Dyno = &dyno\n\t}\n\tif source != \"\" {\n\t\topts.Source = &source\n\t}\n\n\tif lines != -1 {\n\t\topts.Lines = &lines\n\t} else {\n\t\ttailopt := true\n\t\tlineopt := 10\n\t\topts.Tail = &tailopt\n\t\topts.Lines = &lineopt\n\t}\n\n\tsession, err := client.LogSessionCreate(mustApp(), opts)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tresp, err := http.Get(session.LogplexURL)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif resp.StatusCode\/100 != 2 {\n\t\tif resp.StatusCode\/100 == 4 {\n\t\t\tlog.Fatal(\"Unauthorized\")\n\t\t} else {\n\t\t\tlog.Fatal(\"Unexpected error: \" + resp.Status)\n\t\t}\n\t}\n\n\tif _, err = io.Copy(os.Stdout, resp.Body); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tresp.Body.Close()\n}\n<commit_msg>Scan lines<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/bgentry\/heroku-go\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n)\n\nvar (\n\tlines int\n\tsource string\n\tdyno string\n)\n\nvar cmdLog = &Command{\n\tRun: runLog,\n\tUsage: \"log [-n <lines>] [-s <source>] [-d <dyno>]\",\n\tNeedsApp: true,\n\tCategory: \"app\",\n\tShort: \"stream app log lines\",\n\tLong: `\nLog prints the streaming application log.\n\nOptions:\n\n -n <N> print at most N log lines\n -s <source> filter log source\n -d <dyno> filter dyno or process type\n\nExamples:\n\n $ hk log\n 2013-10-17T00:17:35.066089+00:00 app[web.1]: Completed 302 Found in 0ms\n 2013-10-17T00:17:35.079095+00:00 heroku[router]: at=info method=GET path=\/ host=www.heroku.com fwd=\"1.2.3.4\" dyno=web.1 connect=1ms service=6ms status=302 bytes=95\n 2013-10-17T00:17:35.505389+00:00 heroku[nginx]: 1.2.3.4 - - [17\/Oct\/2013:00:17:35 +0000] \"GET \/ HTTP\/1.1\" 301 5 \"-\" \"Amazon Route 53 Health Check Service\" www.heroku.com\n\t\t...\n\n $ hk log -n 2 -s app -d web\n\t\t2013-10-17T00:17:34.288521+00:00 app[web.1]: Completed 200 OK in 10ms (Views: 10.0ms)\n 2013-10-17T00:17:33.918946+00:00 heroku[web.5]: Started GET \"\/\" for 1.2.3.4 at 2013-10-17 00:17:32 +0000\n\t\t2013-10-17T00:17:34.667654+00:00 heroku[router]: at=info method=GET path=\/ host=www.heroku.com fwd=\"1.2.3.4\" dyno=web.5 connect=3ms service=8ms status=301 bytes=0\n 2013-10-17T00:17:35.079095+00:00 heroku[router]: at=info method=GET path=\/ host=www.heroku.com fwd=\"1.2.3.4\" dyno=web.1 connect=1ms service=6ms status=302 bytes=95\n\t\t...\n\n $ hk log -d web.5\n 2013-10-17T00:17:33.918946+00:00 app[web.5]: Started GET \"\/\" for 1.2.3.4 at 2013-10-17 00:17:32 +0000\n 2013-10-17T00:17:33.918658+00:00 app[web.5]: Processing by PagesController#root as HTML\n\t\t...\n`,\n}\n\nfunc init() {\n\tcmdLog.Flag.IntVar(&lines, \"n\", -1, \"max number of log lines to request\")\n\tcmdLog.Flag.StringVar(&source, \"s\", \"\", \"only display logs from the given source\")\n\tcmdLog.Flag.StringVar(&dyno, \"d\", \"\", \"only display logs from the given dyno or process type\")\n}\n\nfunc runLog(cmd *Command, args []string) {\n\topts := heroku.LogSessionCreateOpts{}\n\tif dyno != \"\" {\n\t\topts.Dyno = &dyno\n\t}\n\tif source != \"\" {\n\t\topts.Source = &source\n\t}\n\n\tif lines != -1 {\n\t\topts.Lines = &lines\n\t} else {\n\t\ttailopt := true\n\t\tlineopt := 10\n\t\topts.Tail = &tailopt\n\t\topts.Lines = &lineopt\n\t}\n\n\tsession, err := client.LogSessionCreate(mustApp(), opts)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tresp, err := http.Get(session.LogplexURL)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif resp.StatusCode\/100 != 2 {\n\t\tif resp.StatusCode\/100 == 4 {\n\t\t\tlog.Fatal(\"Unauthorized\")\n\t\t} else {\n\t\t\tlog.Fatal(\"Unexpected error: \" + resp.Status)\n\t\t}\n\t}\n\n\tscanner := bufio.NewScanner(resp.Body)\n\tscanner.Split(bufio.ScanLines)\n\n\tfor scanner.Scan() {\n\t\tif _, err = fmt.Fprintln(os.Stdout, scanner.Text()); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tresp.Body.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package log provides a helpful wrapper around the standard log package.\n\/\/\n\/\/ Anticipated basic usage:\n\/\/ log.Infof(\"This is an info level message\")\n\/\/ log.Warnf(\"This is a warn level message\")\n\/\/ log.Errorf(\"This is an error level message\")\n\/\/ log.V(5, \"This is info level, but will only show up if --verbosity >= 5\")\n\/\/ log.Panicf(\"This message is error level, and also becomes a panic()\")\n\/\/ log.Fatalf(\"This message is fatal level, and os.Exit(1) follows immediately\")\npackage log\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n)\n\nconst (\n\t\/\/ Kept separate because these may diverge.\n\t\/\/ Call depth when using the default calls to the Root Logger.\n\trootDepth = 3\n\n\t\/\/ Call depth when directly calling log functions on a Logger.\n\tlogDepth = 3\n)\n\nvar (\n\tVerbosity = flag.Int(\"verbosity\", 0, \"Logging verbosity level. Higher means more logs.\")\n\tRoot *Logger\n)\n\n\/\/ The rewriter type allows us to change the destination of written data without\n\/\/ rebuilding the actual log.Logger objects used.\ntype rewriter struct {\n\tw *io.Writer\n}\n\nfunc (w *rewriter) Write(p []byte) (int, error) {\n\treturn (*w.w).Write(p)\n}\n\nfunc init() {\n\tRoot = New(\"\")\n}\n\n\/\/ Logger provides an individually configurable logging instance.\ntype Logger struct {\n\tname string\n\n\t\/\/ Verbosity indicates how \"loud\" this logger is.\n\t\/\/ It defaults to the Verbosity flag.\n\tVerbosity *int\n\n\ti, w, e, f *log.Logger\n\n\t\/\/ Info is where all INFO-level messages get written.\n\tInfo io.Writer\n\n\t\/\/ Warn is where all WARN-level messages get written.\n\tWarn io.Writer\n\n\t\/\/ Error is where all ERROR-level messages (including Panic) get written.\n\tError io.Writer\n\n\t\/\/ Fatal is where all FATAL-level messages get written.\n\tFatal io.Writer\n\n\t\/\/ Exit is the function to call after logging a Fatal message.\n\t\/\/ If nil, is not called.\n\tExit func()\n}\n\n\/\/ New returns a new Logger with the given name.\nfunc New(name string) *Logger {\n\tl := &Logger{\n\t\tname: name,\n\t\tVerbosity: Verbosity,\n\t\tInfo: os.Stderr,\n\t\tWarn: os.Stderr,\n\t\tError: os.Stderr,\n\t\tFatal: os.Stderr,\n\t\tExit: func() { os.Exit(1) },\n\t}\n\tflags := log.Ldate | log.Ltime | log.Lshortfile\n\tl.i = log.New(&rewriter{&l.Info}, \"I\", flags)\n\tl.w = log.New(&rewriter{&l.Warn}, \"W\", flags)\n\tl.e = log.New(&rewriter{&l.Error}, \"E\", flags)\n\tl.f = log.New(&rewriter{&l.Fatal}, \"F\", flags)\n\treturn l\n}\n\nfunc (l *Logger) Name() string {\n\treturn l.name\n}\n\n\/\/ SetVerbosity is a convenience method to set the logging verbosity to a constant.\nfunc (l *Logger) SetVerbosity(v int) {\n\tl.Verbosity = &v\n}\n\n\/\/ Formats the message and writes it to the given logger.\n\/\/ Returns the formatted message.\n\/\/ If there is an error writing to the given logger, writes a description\n\/\/ including the given message to the base logger.\nfunc write(l *log.Logger, depth int, name, format string, v ...interface{}) string {\n\tmsg := fmt.Sprintf(format, v...)\n\tif err := l.Output(depth, msg); err != nil {\n\t\tlog.Printf(\"Failed to write to %s logger: %v.\\n Message: %s\", name, err, msg)\n\t}\n\treturn msg\n}\n\n\/\/ LoudEnough returns whether the verbosity is high enough to include messages of the given level.\nfunc (l *Logger) LoudEnough(level int) bool {\n\treturn level <= *l.Verbosity\n}\n\n\/\/ LoudEnough returns whether the verbosity on the root logger is high enough to include messages of the given level.\nfunc LoudEnough(level int) bool {\n\treturn Root.LoudEnough(level)\n}\n\n\/\/ V writes log messages at INFO level, but only if the configured verbosity is equal or greater than the provided level.\nfunc (l *Logger) V(level int, format string, v ...interface{}) {\n\tif l.LoudEnough(level) {\n\t\twrite(l.i, logDepth, l.name+\" info\", format, v...)\n\t}\n}\n\n\/\/ V writes log messages at INFO level to the root logger, but only if the configured verbosity is equal or greater than the provided level.\nfunc V(level int, format string, v ...interface{}) {\n\tif Root.LoudEnough(level) {\n\t\twrite(Root.i, rootDepth, Root.name+\" info\", format, v...)\n\t}\n}\n\n\/\/ Infof writes log messages at INFO level.\nfunc (l *Logger) Infof(format string, v ...interface{}) {\n\twrite(l.i, logDepth, l.name+\" info\", format, v...)\n}\n\n\/\/ Infof writes log messages at INFO level to the root logger.\nfunc Infof(format string, v ...interface{}) {\n\twrite(Root.i, rootDepth, Root.name+\" info\", format, v...)\n}\n\n\/\/ Printf is synonymous with Infof.\n\/\/ It exists for compatibility with the basic log package.\nfunc (l *Logger) Printf(format string, v ...interface{}) {\n\twrite(l.i, logDepth, l.name+\" info\", format, v...)\n}\n\n\/\/ Printf is synonymous with Infof.\n\/\/ It exists for compatibility with the basic log package.\nfunc Printf(format string, v ...interface{}) {\n\twrite(Root.i, rootDepth, Root.name+\" info\", format, v...)\n}\n\n\/\/ Warnf writes log messages at WARN level.\nfunc (l *Logger) Warnf(format string, v ...interface{}) {\n\twrite(l.w, logDepth, l.name+\" warn\", format, v...)\n}\n\n\/\/ Warnf writes log messages at WARN level to the root logger.\nfunc Warnf(format string, v ...interface{}) {\n\twrite(Root.w, rootDepth, Root.name+\" warn\", format, v...)\n}\n\n\/\/ Errorf writes log messages at ERROR level.\nfunc (l *Logger) Errorf(format string, v ...interface{}) {\n\twrite(l.e, logDepth, l.name+\" error\", format, v...)\n}\n\n\/\/ Errorf writes log messages at ERROR level to the root logger.\nfunc Errorf(format string, v ...interface{}) {\n\twrite(Root.e, rootDepth, Root.name+\" error\", format, v...)\n}\n\n\/\/ Panicf writes log messages at ERROR level, and then panics.\n\/\/ The panic parameter is an error with the formatted message.\nfunc (l *Logger) Panicf(format string, v ...interface{}) {\n\tpanic(errors.New(write(l.e, logDepth, l.name+\" error\", format, v...)))\n}\n\n\/\/ Panicf writes log messages at ERROR level to the root logger, and then panics.\n\/\/ The panic parameter is an error with the formatted message.\nfunc Panicf(format string, v ...interface{}) {\n\tpanic(errors.New(write(Root.e, rootDepth, Root.name+\" error\", format, v...)))\n}\n\n\/\/ Fatalf writes log messages at FATAL level, and then calls Exit.\nfunc (l *Logger) Fatalf(format string, v ...interface{}) {\n\twrite(l.f, logDepth, l.name+\" fatal\", format, v...)\n\tif l.Exit != nil {\n\t\tl.Exit()\n\t}\n}\n\n\/\/ Fatalf writes log messages at FATAL level to the root logger, and then calls Exit.\nfunc Fatalf(format string, v ...interface{}) {\n\twrite(Root.f, rootDepth, Root.name+\" fatal\", format, v...)\n\tif Root.Exit != nil {\n\t\tRoot.Exit()\n\t}\n}\n<commit_msg>Replace *log.Logger with an interface.<commit_after>\/\/ Package log provides a helpful wrapper around the standard log package.\n\/\/\n\/\/ Anticipated basic usage:\n\/\/ log.Infof(\"This is an info level message\")\n\/\/ log.Warnf(\"This is a warn level message\")\n\/\/ log.Errorf(\"This is an error level message\")\n\/\/ log.V(5, \"This is info level, but will only show up if --verbosity >= 5\")\n\/\/ log.Panicf(\"This message is error level, and also becomes a panic()\")\n\/\/ log.Fatalf(\"This message is fatal level, and os.Exit(1) follows immediately\")\npackage log\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n)\n\nconst (\n\t\/\/ Kept separate because these may diverge.\n\t\/\/ Call depth when using the default calls to the Root Logger.\n\trootDepth = 3\n\n\t\/\/ Call depth when directly calling log functions on a Logger.\n\tlogDepth = 3\n)\n\nvar (\n\tVerbosity = flag.Int(\"verbosity\", 0, \"Logging verbosity level. Higher means more logs.\")\n\tRoot *Logger\n)\n\n\/\/ The rewriter type allows us to change the destination of written data without\n\/\/ rebuilding the actual log.Logger objects used.\ntype rewriter struct {\n\tw *io.Writer\n}\n\nfunc (w *rewriter) Write(p []byte) (int, error) {\n\treturn (*w.w).Write(p)\n}\n\nfunc init() {\n\tRoot = New(\"\")\n}\n\n\/\/ Logable is the interface required for writing data to the next lower level.\ntype Logable interface {\n\t\/\/ Output a log message. See log.Logger.Output for details.\n\tOutput(calldepth int, s string) error\n}\n\n\/\/ Logger provides an individually configurable logging instance.\ntype Logger struct {\n\tname string\n\n\t\/\/ Verbosity indicates how \"loud\" this logger is.\n\t\/\/ It defaults to the Verbosity flag.\n\tVerbosity *int\n\n\ti, w, e, f Logable\n\n\t\/\/ Info is where all INFO-level messages get written.\n\tInfo io.Writer\n\n\t\/\/ Warn is where all WARN-level messages get written.\n\tWarn io.Writer\n\n\t\/\/ Error is where all ERROR-level messages (including Panic) get written.\n\tError io.Writer\n\n\t\/\/ Fatal is where all FATAL-level messages get written.\n\tFatal io.Writer\n\n\t\/\/ Exit is the function to call after logging a Fatal message.\n\t\/\/ If nil, is not called.\n\tExit func()\n}\n\n\/\/ New returns a new Logger with the given name.\nfunc New(name string) *Logger {\n\tl := &Logger{\n\t\tname: name,\n\t\tVerbosity: Verbosity,\n\t\tInfo: os.Stderr,\n\t\tWarn: os.Stderr,\n\t\tError: os.Stderr,\n\t\tFatal: os.Stderr,\n\t\tExit: func() { os.Exit(1) },\n\t}\n\tflags := log.Ldate | log.Ltime | log.Lshortfile\n\tl.i = log.New(&rewriter{&l.Info}, \"I\", flags)\n\tl.w = log.New(&rewriter{&l.Warn}, \"W\", flags)\n\tl.e = log.New(&rewriter{&l.Error}, \"E\", flags)\n\tl.f = log.New(&rewriter{&l.Fatal}, \"F\", flags)\n\treturn l\n}\n\nfunc (l *Logger) Name() string {\n\treturn l.name\n}\n\n\/\/ SetVerbosity is a convenience method to set the logging verbosity to a constant.\nfunc (l *Logger) SetVerbosity(v int) {\n\tl.Verbosity = &v\n}\n\n\/\/ Formats the message and writes it to the given logger.\n\/\/ Returns the formatted message.\n\/\/ If there is an error writing to the given logger, writes a description\n\/\/ including the given message to the base logger.\nfunc write(l Logable, depth int, name, format string, v ...interface{}) string {\n\tmsg := fmt.Sprintf(format, v...)\n\tif err := l.Output(depth, msg); err != nil {\n\t\tlog.Printf(\"Failed to write to %s logger: %v.\\n Message: %s\", name, err, msg)\n\t}\n\treturn msg\n}\n\n\/\/ LoudEnough returns whether the verbosity is high enough to include messages of the given level.\nfunc (l *Logger) LoudEnough(level int) bool {\n\treturn level <= *l.Verbosity\n}\n\n\/\/ LoudEnough returns whether the verbosity on the root logger is high enough to include messages of the given level.\nfunc LoudEnough(level int) bool {\n\treturn Root.LoudEnough(level)\n}\n\n\/\/ V writes log messages at INFO level, but only if the configured verbosity is equal or greater than the provided level.\nfunc (l *Logger) V(level int, format string, v ...interface{}) {\n\tif l.LoudEnough(level) {\n\t\twrite(l.i, logDepth, l.name+\" info\", format, v...)\n\t}\n}\n\n\/\/ V writes log messages at INFO level to the root logger, but only if the configured verbosity is equal or greater than the provided level.\nfunc V(level int, format string, v ...interface{}) {\n\tif Root.LoudEnough(level) {\n\t\twrite(Root.i, rootDepth, Root.name+\" info\", format, v...)\n\t}\n}\n\n\/\/ Infof writes log messages at INFO level.\nfunc (l *Logger) Infof(format string, v ...interface{}) {\n\twrite(l.i, logDepth, l.name+\" info\", format, v...)\n}\n\n\/\/ Infof writes log messages at INFO level to the root logger.\nfunc Infof(format string, v ...interface{}) {\n\twrite(Root.i, rootDepth, Root.name+\" info\", format, v...)\n}\n\n\/\/ Printf is synonymous with Infof.\n\/\/ It exists for compatibility with the basic log package.\nfunc (l *Logger) Printf(format string, v ...interface{}) {\n\twrite(l.i, logDepth, l.name+\" info\", format, v...)\n}\n\n\/\/ Printf is synonymous with Infof.\n\/\/ It exists for compatibility with the basic log package.\nfunc Printf(format string, v ...interface{}) {\n\twrite(Root.i, rootDepth, Root.name+\" info\", format, v...)\n}\n\n\/\/ Warnf writes log messages at WARN level.\nfunc (l *Logger) Warnf(format string, v ...interface{}) {\n\twrite(l.w, logDepth, l.name+\" warn\", format, v...)\n}\n\n\/\/ Warnf writes log messages at WARN level to the root logger.\nfunc Warnf(format string, v ...interface{}) {\n\twrite(Root.w, rootDepth, Root.name+\" warn\", format, v...)\n}\n\n\/\/ Errorf writes log messages at ERROR level.\nfunc (l *Logger) Errorf(format string, v ...interface{}) {\n\twrite(l.e, logDepth, l.name+\" error\", format, v...)\n}\n\n\/\/ Errorf writes log messages at ERROR level to the root logger.\nfunc Errorf(format string, v ...interface{}) {\n\twrite(Root.e, rootDepth, Root.name+\" error\", format, v...)\n}\n\n\/\/ Panicf writes log messages at ERROR level, and then panics.\n\/\/ The panic parameter is an error with the formatted message.\nfunc (l *Logger) Panicf(format string, v ...interface{}) {\n\tpanic(errors.New(write(l.e, logDepth, l.name+\" error\", format, v...)))\n}\n\n\/\/ Panicf writes log messages at ERROR level to the root logger, and then panics.\n\/\/ The panic parameter is an error with the formatted message.\nfunc Panicf(format string, v ...interface{}) {\n\tpanic(errors.New(write(Root.e, rootDepth, Root.name+\" error\", format, v...)))\n}\n\n\/\/ Fatalf writes log messages at FATAL level, and then calls Exit.\nfunc (l *Logger) Fatalf(format string, v ...interface{}) {\n\twrite(l.f, logDepth, l.name+\" fatal\", format, v...)\n\tif l.Exit != nil {\n\t\tl.Exit()\n\t}\n}\n\n\/\/ Fatalf writes log messages at FATAL level to the root logger, and then calls Exit.\nfunc Fatalf(format string, v ...interface{}) {\n\twrite(Root.f, rootDepth, Root.name+\" fatal\", format, v...)\n\tif Root.Exit != nil {\n\t\tRoot.Exit()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package css \/\/ import \"github.com\/tdewolff\/minify\/css\"\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/tdewolff\/minify\"\n\t\"github.com\/tdewolff\/test\"\n)\n\nfunc TestCSS(t *testing.T) {\n\tvar cssTests = []struct {\n\t\tcss string\n\t\texpected string\n\t}{\n\t\t{\"i { key: value; key2: value; }\", \"i{key:value;key2:value}\"},\n\t\t{\".cla .ss > #id { x:y; }\", \".cla .ss>#id{x:y}\"},\n\t\t{\".cla[id ^= L] { x:y; }\", \".cla[id^=L]{x:y}\"},\n\t\t{\"area:focus { outline : 0;}\", \"area:focus{outline:0}\"},\n\t\t{\"@import 'file';\", \"@import 'file'\"},\n\t\t{\"@font-face { x:y; }\", \"@font-face{x:y}\"},\n\n\t\t{\"input[type=\\\"radio\\\"]{x:y}\", \"input[type=radio]{x:y}\"},\n\t\t{\"DIV{margin:1em}\", \"div{margin:1em}\"},\n\t\t{\".CLASS{margin:1em}\", \".CLASS{margin:1em}\"},\n\t\t{\"@MEDIA all{}\", \"@media all{}\"},\n\t\t{\"@media only screen and (max-width : 800px){}\", \"@media only screen and (max-width:800px){}\"},\n\t\t{\"@media (-webkit-min-device-pixel-ratio:1.5),(min-resolution:1.5dppx){}\", \"@media(-webkit-min-device-pixel-ratio:1.5),(min-resolution:1.5dppx){}\"},\n\t\t{\"[class^=icon-] i[class^=icon-],i[class*=\\\" icon-\\\"]{x:y}\", \"[class^=icon-] i[class^=icon-],i[class*=\\\" icon-\\\"]{x:y}\"},\n\t\t{\"html{line-height:1;}html{line-height:1;}\", \"html{line-height:1}html{line-height:1}\"},\n\t\t{\".clearfix { *zoom: 1; }\", \".clearfix{*zoom:1}\"},\n\t\t{\"a { b: 1\", \"a{b:1}\"},\n\n\t\t\/\/ coverage\n\t\t{\"a, b + c { x:y; }\", \"a,b+c{x:y}\"},\n\n\t\t\/\/ go-fuzz\n\t\t{\"input[type=\\\"\\x00\\\"] { a: b\\n}.a{}\", \"input[type=\\\"\\x00\\\"] { a: b\\n}.a{}\"},\n\t\t{\"a{a:)'''\", \"a{a:)'''}\"},\n\t}\n\n\tm := minify.New()\n\tfor _, tt := range cssTests {\n\t\tb := &bytes.Buffer{}\n\t\tassert.Nil(t, Minify(m, \"text\/css\", b, bytes.NewBufferString(tt.css)), \"Minify must not return error in \"+tt.css)\n\t\tassert.Equal(t, tt.expected, b.String(), \"Minify must give expected result in \"+tt.css)\n\t}\n}\n\nfunc TestCSSInline(t *testing.T) {\n\tvar cssTests = []struct {\n\t\tcss string\n\t\texpected string\n\t}{\n\t\t{\"\/*comment*\/\", \"\"},\n\t\t{\";\", \"\"},\n\t\t{\"empty:\", \"empty:\"},\n\t\t{\"key: value;\", \"key:value\"},\n\t\t{\"margin: 0 1; padding: 0 1;\", \"margin:0 1;padding:0 1\"},\n\t\t{\"color: #FF0000;\", \"color:red\"},\n\t\t{\"color: #000000;\", \"color:#000\"},\n\t\t{\"color: black;\", \"color:#000\"},\n\t\t{\"color: rgb(255,255,255);\", \"color:#fff\"},\n\t\t{\"color: rgb(100%,100%,100%);\", \"color:#fff\"},\n\t\t{\"color: rgba(255,0,0,1);\", \"color:red\"},\n\t\t{\"color: rgba(255,0,0,2);\", \"color:red\"},\n\t\t{\"color: rgba(255,0,0,0.5);\", \"color:rgba(255,0,0,.5)\"},\n\t\t{\"color: rgba(255,0,0,-1);\", \"color:transparent\"},\n\t\t{\"color: hsl(0,100%,50%);\", \"color:red\"},\n\t\t{\"color: hsla(1,2%,3%,1);\", \"color:#080807\"},\n\t\t{\"color: hsla(1,2%,3%,0);\", \"color:transparent\"},\n\t\t{\"color: hsl(48,100%,50%);\", \"color:#fc0\"},\n\t\t{\"font-weight: bold; font-weight: normal;\", \"font-weight:700;font-weight:400\"},\n\t\t{\"font: bold \\\"Times new Roman\\\",\\\"Sans-Serif\\\";\", \"font:700 times new roman,\\\"sans-serif\\\"\"},\n\t\t{\"outline: none;\", \"outline:0\"},\n\t\t{\"outline: none !important;\", \"outline:0!important\"},\n\t\t{\"border-left: none;\", \"border-left:0\"},\n\t\t{\"margin: 1 1 1 1;\", \"margin:1\"},\n\t\t{\"margin: 1 2 1 2;\", \"margin:1 2\"},\n\t\t{\"margin: 1 2 3 2;\", \"margin:1 2 3\"},\n\t\t{\"margin: 1 2 3 4;\", \"margin:1 2 3 4\"},\n\t\t{\"margin: 1 1 1 a;\", \"margin:1 1 1 a\"},\n\t\t{\"margin: 1 1 1 1 !important;\", \"margin:1!important\"},\n\t\t{\"padding:.2em .4em .2em\", \"padding:.2em .4em\"},\n\t\t{\"margin: 0em;\", \"margin:0\"},\n\t\t{\"font-family:'Arial', 'Times New Roman';\", \"font-family:arial,times new roman\"},\n\t\t{\"background:url('http:\/\/domain.com\/image.png');\", \"background:url(http:\/\/domain.com\/image.png)\"},\n\t\t{\"filter: progid : DXImageTransform.Microsoft.BasicImage(rotation=1);\", \"filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=1)\"},\n\t\t{\"filter: progid:DXImageTransform.Microsoft.Alpha(Opacity=0);\", \"filter:alpha(opacity=0)\"},\n\t\t{\"content: \\\"a\\\\\\nb\\\";\", \"content:\\\"ab\\\"\"},\n\t\t{\"content: \\\"a\\\\\\r\\nb\\\\\\r\\nc\\\";\", \"content:\\\"abc\\\"\"},\n\t\t{\"content: \\\"\\\";\", \"content:\\\"\\\"\"},\n\n\t\t{\"font:27px\/13px arial,sans-serif\", \"font:27px\/13px arial,sans-serif\"},\n\t\t{\"text-decoration: none !important\", \"text-decoration:none!important\"},\n\t\t{\"color:#fff\", \"color:#fff\"},\n\t\t{\"border:2px rgb(255,255,255);\", \"border:2px #fff\"},\n\t\t{\"margin:-1px\", \"margin:-1px\"},\n\t\t{\"margin:+1px\", \"margin:1px\"},\n\t\t{\"margin:0.5em\", \"margin:.5em\"},\n\t\t{\"margin:-0.5em\", \"margin:-.5em\"},\n\t\t{\"margin:05em\", \"margin:5em\"},\n\t\t{\"margin:.50em\", \"margin:.5em\"},\n\t\t{\"margin:5.0em\", \"margin:5em\"},\n\t\t{\"color:#c0c0c0\", \"color:silver\"},\n\t\t{\"-ms-filter: \\\"progid:DXImageTransform.Microsoft.Alpha(Opacity=80)\\\";\", \"-ms-filter:\\\"alpha(opacity=80)\\\"\"},\n\t\t{\"filter: progid:DXImageTransform.Microsoft.Alpha(Opacity = 80);\", \"filter:alpha(opacity=80)\"},\n\t\t{\"MARGIN:1EM\", \"margin:1em\"},\n\t\t{\"color:CYAN\", \"color:cyan\"},\n\t\t{\"background:URL(x.PNG);\", \"background:url(x.PNG)\"},\n\t\t{\"background:url(\/*nocomment*\/)\", \"background:url(\/*nocomment*\/)\"},\n\t\t{\"background:url(data:,text)\", \"background:url(data:,text)\"},\n\t\t{\"background:url('data:text\/xml; version = 2.0,content')\", \"background:url(data:text\/xml;version=2.0,content)\"},\n\t\t{\"background:url('data:\\\\'\\\",text')\", \"background:url('data:\\\\'\\\",text')\"},\n\t\t{\"margin:0 0 18px 0;\", \"margin:0 0 18px\"},\n\t\t{\"background:none\", \"background:0 0\"},\n\t\t{\"background:none 1 1\", \"background:none 1 1\"},\n\t\t{\"z-index:1000\", \"z-index:1000\"},\n\n\t\t\/\/ coverage\n\t\t{\"margin: 1 1;\", \"margin:1\"},\n\t\t{\"margin: 1 2;\", \"margin:1 2\"},\n\t\t{\"margin: 1 1 1;\", \"margin:1\"},\n\t\t{\"margin: 1 2 1;\", \"margin:1 2\"},\n\t\t{\"margin: 1 2 3;\", \"margin:1 2 3\"},\n\t\t{\"margin: 0%;\", \"margin:0\"},\n\t\t{\"color: rgb(255,64,64);\", \"color:#ff4040\"},\n\t\t{\"color: rgb(256,-34,2342435);\", \"color:#f0f\"},\n\t\t{\"color: rgb(120%,-45%,234234234%);\", \"color:#f0f\"},\n\t\t{\"color: rgb(0, 1, ident);\", \"color:rgb(0,1,ident)\"},\n\t\t{\"color: rgb(ident);\", \"color:rgb(ident)\"},\n\t\t{\"margin: rgb(ident);\", \"margin:rgb(ident)\"},\n\t\t{\"filter: progid:b().c.Alpha(rgba(x));\", \"filter:progid:b().c.Alpha(rgba(x))\"},\n\n\t\t\/\/ go-fuzz\n\t\t{\"FONT-FAMILY: ru\\\"\", \"font-family:ru\\\"\"},\n\t}\n\n\tm := minify.New()\n\tfor _, tt := range cssTests {\n\t\tr := bytes.NewBufferString(tt.css)\n\t\tw := &bytes.Buffer{}\n\t\tassert.Nil(t, Minify(m, \"text\/css;inline=1\", w, r), \"Minify must not return error in \"+tt.css)\n\t\tassert.Equal(t, tt.expected, w.String(), \"Minify must give expected result in \"+tt.css)\n\t}\n}\n\nfunc TestCSSInlineMediatype(t *testing.T) {\n\tcss := `color:red`\n\tm := minify.New()\n\n\tr := bytes.NewBufferString(css)\n\tw := &bytes.Buffer{}\n\tassert.Nil(t, Minify(m, \"text\/css ; inline = 1\", w, r), \"Minify must not return error in \"+css)\n\tassert.Equal(t, css, w.String(), \"Minify must give expected result in \"+css)\n}\n\nfunc TestReaderErrors(t *testing.T) {\n\tm := minify.New()\n\tr := test.NewErrorReader(0)\n\tw := &bytes.Buffer{}\n\tassert.Equal(t, test.ErrPlain, Minify(m, \"text\/css\", w, r), \"Minify must return error at first read\")\n}\n\nfunc TestWriterErrors(t *testing.T) {\n\tvar errorTests = []struct {\n\t\tcss string\n\t\tn []int\n\t}{\n\t\t{`@import 'file'`, []int{0, 2}},\n\t\t{`@media all{}`, []int{0, 2, 3, 4}},\n\t\t{`a[id^=\"L\"]{margin:2in!important;color:red}`, []int{0, 4, 6, 7, 8, 9, 10, 11}},\n\t\t{`a{color:rgb(255,0,0)}`, []int{4}},\n\t\t{`a{color:rgb(255,255,255)}`, []int{4}},\n\t\t{`a{color:hsl(0,100%,50%)}`, []int{4}},\n\t\t{`a{color:hsl(360,100%,100%)}`, []int{4}},\n\t\t{`a{color:f(arg)}`, []int{4}},\n\t\t{`<!--`, []int{0}},\n\t}\n\n\tm := minify.New()\n\tfor _, tt := range errorTests {\n\t\tfor _, n := range tt.n {\n\t\t\tr := bytes.NewBufferString(tt.css)\n\t\t\tw := test.NewErrorWriter(n)\n\t\t\tassert.Equal(t, test.ErrPlain, Minify(m, \"text\/css\", w, r), \"Minify must return error in \"+tt.css+\" at write \"+strconv.FormatInt(int64(n), 10))\n\t\t}\n\t}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc ExampleMinify() {\n\tm := minify.New()\n\tm.AddFunc(\"text\/css\", Minify)\n\n\tif err := m.Minify(\"text\/css\", os.Stdout, os.Stdin); err != nil {\n\t\tfmt.Println(\"minify.Minify:\", err)\n\t}\n}\n<commit_msg>Finally fixed!<commit_after>package css \/\/ import \"github.com\/tdewolff\/minify\/css\"\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/tdewolff\/minify\"\n\t\"github.com\/tdewolff\/test\"\n)\n\nfunc TestCSS(t *testing.T) {\n\tvar cssTests = []struct {\n\t\tcss string\n\t\texpected string\n\t}{\n\t\t{\"i { key: value; key2: value; }\", \"i{key:value;key2:value}\"},\n\t\t{\".cla .ss > #id { x:y; }\", \".cla .ss>#id{x:y}\"},\n\t\t{\".cla[id ^= L] { x:y; }\", \".cla[id^=L]{x:y}\"},\n\t\t{\"area:focus { outline : 0;}\", \"area:focus{outline:0}\"},\n\t\t{\"@import 'file';\", \"@import 'file'\"},\n\t\t{\"@font-face { x:y; }\", \"@font-face{x:y}\"},\n\n\t\t{\"input[type=\\\"radio\\\"]{x:y}\", \"input[type=radio]{x:y}\"},\n\t\t{\"DIV{margin:1em}\", \"div{margin:1em}\"},\n\t\t{\".CLASS{margin:1em}\", \".CLASS{margin:1em}\"},\n\t\t{\"@MEDIA all{}\", \"@media all{}\"},\n\t\t{\"@media only screen and (max-width : 800px){}\", \"@media only screen and (max-width:800px){}\"},\n\t\t{\"@media (-webkit-min-device-pixel-ratio:1.5),(min-resolution:1.5dppx){}\", \"@media(-webkit-min-device-pixel-ratio:1.5),(min-resolution:1.5dppx){}\"},\n\t\t{\"[class^=icon-] i[class^=icon-],i[class*=\\\" icon-\\\"]{x:y}\", \"[class^=icon-] i[class^=icon-],i[class*=\\\" icon-\\\"]{x:y}\"},\n\t\t{\"html{line-height:1;}html{line-height:1;}\", \"html{line-height:1}html{line-height:1}\"},\n\t\t{\".clearfix { *zoom: 1; }\", \".clearfix{*zoom:1}\"},\n\t\t{\"a { b: 1\", \"a{b:1}\"},\n\n\t\t\/\/ coverage\n\t\t{\"a, b + c { x:y; }\", \"a,b+c{x:y}\"},\n\n\t\t\/\/ go-fuzz\n\t\t{\"input[type=\\\"\\x00\\\"] { a: b\\n}.a{}\", \"input[type=\\\"\\x00\\\"]{a:b}.a{}\"},\n\t\t{\"a{a:)'''\", \"a{a:)'''}\"},\n\t}\n\n\tm := minify.New()\n\tfor _, tt := range cssTests {\n\t\tb := &bytes.Buffer{}\n\t\tassert.Nil(t, Minify(m, \"text\/css\", b, bytes.NewBufferString(tt.css)), \"Minify must not return error in \"+tt.css)\n\t\tassert.Equal(t, tt.expected, b.String(), \"Minify must give expected result in \"+tt.css)\n\t}\n}\n\nfunc TestCSSInline(t *testing.T) {\n\tvar cssTests = []struct {\n\t\tcss string\n\t\texpected string\n\t}{\n\t\t{\"\/*comment*\/\", \"\"},\n\t\t{\";\", \"\"},\n\t\t{\"empty:\", \"empty:\"},\n\t\t{\"key: value;\", \"key:value\"},\n\t\t{\"margin: 0 1; padding: 0 1;\", \"margin:0 1;padding:0 1\"},\n\t\t{\"color: #FF0000;\", \"color:red\"},\n\t\t{\"color: #000000;\", \"color:#000\"},\n\t\t{\"color: black;\", \"color:#000\"},\n\t\t{\"color: rgb(255,255,255);\", \"color:#fff\"},\n\t\t{\"color: rgb(100%,100%,100%);\", \"color:#fff\"},\n\t\t{\"color: rgba(255,0,0,1);\", \"color:red\"},\n\t\t{\"color: rgba(255,0,0,2);\", \"color:red\"},\n\t\t{\"color: rgba(255,0,0,0.5);\", \"color:rgba(255,0,0,.5)\"},\n\t\t{\"color: rgba(255,0,0,-1);\", \"color:transparent\"},\n\t\t{\"color: hsl(0,100%,50%);\", \"color:red\"},\n\t\t{\"color: hsla(1,2%,3%,1);\", \"color:#080807\"},\n\t\t{\"color: hsla(1,2%,3%,0);\", \"color:transparent\"},\n\t\t{\"color: hsl(48,100%,50%);\", \"color:#fc0\"},\n\t\t{\"font-weight: bold; font-weight: normal;\", \"font-weight:700;font-weight:400\"},\n\t\t{\"font: bold \\\"Times new Roman\\\",\\\"Sans-Serif\\\";\", \"font:700 times new roman,\\\"sans-serif\\\"\"},\n\t\t{\"outline: none;\", \"outline:0\"},\n\t\t{\"outline: none !important;\", \"outline:0!important\"},\n\t\t{\"border-left: none;\", \"border-left:0\"},\n\t\t{\"margin: 1 1 1 1;\", \"margin:1\"},\n\t\t{\"margin: 1 2 1 2;\", \"margin:1 2\"},\n\t\t{\"margin: 1 2 3 2;\", \"margin:1 2 3\"},\n\t\t{\"margin: 1 2 3 4;\", \"margin:1 2 3 4\"},\n\t\t{\"margin: 1 1 1 a;\", \"margin:1 1 1 a\"},\n\t\t{\"margin: 1 1 1 1 !important;\", \"margin:1!important\"},\n\t\t{\"padding:.2em .4em .2em\", \"padding:.2em .4em\"},\n\t\t{\"margin: 0em;\", \"margin:0\"},\n\t\t{\"font-family:'Arial', 'Times New Roman';\", \"font-family:arial,times new roman\"},\n\t\t{\"background:url('http:\/\/domain.com\/image.png');\", \"background:url(http:\/\/domain.com\/image.png)\"},\n\t\t{\"filter: progid : DXImageTransform.Microsoft.BasicImage(rotation=1);\", \"filter:progid:DXImageTransform.Microsoft.BasicImage(rotation=1)\"},\n\t\t{\"filter: progid:DXImageTransform.Microsoft.Alpha(Opacity=0);\", \"filter:alpha(opacity=0)\"},\n\t\t{\"content: \\\"a\\\\\\nb\\\";\", \"content:\\\"ab\\\"\"},\n\t\t{\"content: \\\"a\\\\\\r\\nb\\\\\\r\\nc\\\";\", \"content:\\\"abc\\\"\"},\n\t\t{\"content: \\\"\\\";\", \"content:\\\"\\\"\"},\n\n\t\t{\"font:27px\/13px arial,sans-serif\", \"font:27px\/13px arial,sans-serif\"},\n\t\t{\"text-decoration: none !important\", \"text-decoration:none!important\"},\n\t\t{\"color:#fff\", \"color:#fff\"},\n\t\t{\"border:2px rgb(255,255,255);\", \"border:2px #fff\"},\n\t\t{\"margin:-1px\", \"margin:-1px\"},\n\t\t{\"margin:+1px\", \"margin:1px\"},\n\t\t{\"margin:0.5em\", \"margin:.5em\"},\n\t\t{\"margin:-0.5em\", \"margin:-.5em\"},\n\t\t{\"margin:05em\", \"margin:5em\"},\n\t\t{\"margin:.50em\", \"margin:.5em\"},\n\t\t{\"margin:5.0em\", \"margin:5em\"},\n\t\t{\"color:#c0c0c0\", \"color:silver\"},\n\t\t{\"-ms-filter: \\\"progid:DXImageTransform.Microsoft.Alpha(Opacity=80)\\\";\", \"-ms-filter:\\\"alpha(opacity=80)\\\"\"},\n\t\t{\"filter: progid:DXImageTransform.Microsoft.Alpha(Opacity = 80);\", \"filter:alpha(opacity=80)\"},\n\t\t{\"MARGIN:1EM\", \"margin:1em\"},\n\t\t{\"color:CYAN\", \"color:cyan\"},\n\t\t{\"background:URL(x.PNG);\", \"background:url(x.PNG)\"},\n\t\t{\"background:url(\/*nocomment*\/)\", \"background:url(\/*nocomment*\/)\"},\n\t\t{\"background:url(data:,text)\", \"background:url(data:,text)\"},\n\t\t{\"background:url('data:text\/xml; version = 2.0,content')\", \"background:url(data:text\/xml;version=2.0,content)\"},\n\t\t{\"background:url('data:\\\\'\\\",text')\", \"background:url('data:\\\\'\\\",text')\"},\n\t\t{\"margin:0 0 18px 0;\", \"margin:0 0 18px\"},\n\t\t{\"background:none\", \"background:0 0\"},\n\t\t{\"background:none 1 1\", \"background:none 1 1\"},\n\t\t{\"z-index:1000\", \"z-index:1000\"},\n\n\t\t\/\/ coverage\n\t\t{\"margin: 1 1;\", \"margin:1\"},\n\t\t{\"margin: 1 2;\", \"margin:1 2\"},\n\t\t{\"margin: 1 1 1;\", \"margin:1\"},\n\t\t{\"margin: 1 2 1;\", \"margin:1 2\"},\n\t\t{\"margin: 1 2 3;\", \"margin:1 2 3\"},\n\t\t{\"margin: 0%;\", \"margin:0\"},\n\t\t{\"color: rgb(255,64,64);\", \"color:#ff4040\"},\n\t\t{\"color: rgb(256,-34,2342435);\", \"color:#f0f\"},\n\t\t{\"color: rgb(120%,-45%,234234234%);\", \"color:#f0f\"},\n\t\t{\"color: rgb(0, 1, ident);\", \"color:rgb(0,1,ident)\"},\n\t\t{\"color: rgb(ident);\", \"color:rgb(ident)\"},\n\t\t{\"margin: rgb(ident);\", \"margin:rgb(ident)\"},\n\t\t{\"filter: progid:b().c.Alpha(rgba(x));\", \"filter:progid:b().c.Alpha(rgba(x))\"},\n\n\t\t\/\/ go-fuzz\n\t\t{\"FONT-FAMILY: ru\\\"\", \"font-family:ru\\\"\"},\n\t}\n\n\tm := minify.New()\n\tfor _, tt := range cssTests {\n\t\tr := bytes.NewBufferString(tt.css)\n\t\tw := &bytes.Buffer{}\n\t\tassert.Nil(t, Minify(m, \"text\/css;inline=1\", w, r), \"Minify must not return error in \"+tt.css)\n\t\tassert.Equal(t, tt.expected, w.String(), \"Minify must give expected result in \"+tt.css)\n\t}\n}\n\nfunc TestCSSInlineMediatype(t *testing.T) {\n\tcss := `color:red`\n\tm := minify.New()\n\n\tr := bytes.NewBufferString(css)\n\tw := &bytes.Buffer{}\n\tassert.Nil(t, Minify(m, \"text\/css ; inline = 1\", w, r), \"Minify must not return error in \"+css)\n\tassert.Equal(t, css, w.String(), \"Minify must give expected result in \"+css)\n}\n\nfunc TestReaderErrors(t *testing.T) {\n\tm := minify.New()\n\tr := test.NewErrorReader(0)\n\tw := &bytes.Buffer{}\n\tassert.Equal(t, test.ErrPlain, Minify(m, \"text\/css\", w, r), \"Minify must return error at first read\")\n}\n\nfunc TestWriterErrors(t *testing.T) {\n\tvar errorTests = []struct {\n\t\tcss string\n\t\tn []int\n\t}{\n\t\t{`@import 'file'`, []int{0, 2}},\n\t\t{`@media all{}`, []int{0, 2, 3, 4}},\n\t\t{`a[id^=\"L\"]{margin:2in!important;color:red}`, []int{0, 4, 6, 7, 8, 9, 10, 11}},\n\t\t{`a{color:rgb(255,0,0)}`, []int{4}},\n\t\t{`a{color:rgb(255,255,255)}`, []int{4}},\n\t\t{`a{color:hsl(0,100%,50%)}`, []int{4}},\n\t\t{`a{color:hsl(360,100%,100%)}`, []int{4}},\n\t\t{`a{color:f(arg)}`, []int{4}},\n\t\t{`<!--`, []int{0}},\n\t}\n\n\tm := minify.New()\n\tfor _, tt := range errorTests {\n\t\tfor _, n := range tt.n {\n\t\t\tr := bytes.NewBufferString(tt.css)\n\t\t\tw := test.NewErrorWriter(n)\n\t\t\tassert.Equal(t, test.ErrPlain, Minify(m, \"text\/css\", w, r), \"Minify must return error in \"+tt.css+\" at write \"+strconv.FormatInt(int64(n), 10))\n\t\t}\n\t}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc ExampleMinify() {\n\tm := minify.New()\n\tm.AddFunc(\"text\/css\", Minify)\n\n\tif err := m.Minify(\"text\/css\", os.Stdout, os.Stdin); err != nil {\n\t\tfmt.Println(\"minify.Minify:\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package crawler\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\n\t\"golang.org\/x\/net\/html\"\n\t\"golang.org\/x\/net\/html\/atom\"\n)\n\ntype miniHTML struct {\n}\n\nfunc (m *miniHTML) removeNode(node *html.Node) error {\n\tnode.Parent.RemoveChild(node)\n\n\treturn nil\n}\n\nfunc (m *miniHTML) parseChildren(node *html.Node) error {\n\tfor it := node.FirstChild; it != nil; {\n\t\tcurrentNode := it\n\t\tit = it.NextSibling\n\t\terr := m.parseNode(currentNode)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (m *miniHTML) parseElements(node *html.Node) error {\n\tswitch node.DataAtom {\n\tcase atom.Script:\n\t\treturn m.removeNode(node)\n\tcase atom.Style:\n\t\treturn m.removeNode(node)\n\tcase atom.Form:\n\t\treturn m.removeNode(node)\n\tcase atom.Button:\n\t\treturn m.removeNode(node)\n\tdefault:\n\t}\n\n\treturn m.parseChildren(node)\n}\n\nfunc (m *miniHTML) parseNode(node *html.Node) error {\n\tswitch node.Type {\n\tcase html.ErrorNode:\n\t\treturn errors.New(\"Found error node in html\")\n\tcase html.DocumentNode: \/\/ +children -attr (first node)\n\t\treturn m.parseChildren(node)\n\tcase html.ElementNode: \/\/ +children +attr\n\t\treturn m.parseElements(node)\n\tcase html.TextNode: \/\/ -children -attr\n\t\treturn nil\n\tcase html.DoctypeNode: \/\/ ignore\n\t\treturn nil\n\tcase html.CommentNode:\n\t\tnode.Parent.RemoveChild(node)\n\t\treturn nil\n\tdefault:\n\t\treturn errors.New(\"Unknown node type on html\")\n\t}\n}\n\n\/\/ Minification - start minification body\nfunc Minification(body []byte) ([]byte, error) {\n\tstartNode, err := html.Parse(bytes.NewReader(body))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparser := miniHTML{}\n\terr = parser.parseNode(startNode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar buf bytes.Buffer\n\tw := bufio.NewWriter(&buf)\n\terr = html.Render(w, startNode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tw.Flush()\n\n\treturn buf.Bytes(), nil\n}\n<commit_msg>расширил количество случаев, которые обрабатывает минификатор html<commit_after>package crawler\n\nimport (\n\t\"errors\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/html\"\n\t\"golang.org\/x\/net\/html\/atom\"\n)\n\n\/\/ Minification - struct with functions for minimize html\ntype Minification struct {\n}\n\nfunc (m *Minification) removeNode(node *html.Node) error {\n\tnode.Parent.RemoveChild(node)\n\n\treturn nil\n}\n\nfunc (m *Minification) parseChildren(node *html.Node) error {\n\tfor it := node.FirstChild; it != nil; {\n\t\tcurrentNode := it\n\t\tit = it.NextSibling\n\t\terr := m.Run(currentNode)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (m *Minification) parseElements(node *html.Node) error {\n\tswitch node.DataAtom {\n\tcase atom.Script:\n\t\treturn m.removeNode(node)\n\tcase atom.Style:\n\t\treturn m.removeNode(node)\n\tcase atom.Form:\n\t\treturn m.removeNode(node)\n\tcase atom.Button:\n\t\treturn m.removeNode(node)\n\tcase atom.Time:\n\t\treturn m.removeNode(node)\n\t}\n\n\tlen := len(node.Attr)\n\tif len != 0 {\n\t\tattr := node.Attr\n\t\ti := 0\n\t\tj := 0\n\t\tfor ; i != len; i++ {\n\t\t\tswitch strings.ToLower(attr[i].Key) {\n\t\t\tcase \"id\":\n\t\t\tcase \"style\":\n\t\t\tcase \"onclick\":\n\t\t\tcase \"target\":\n\t\t\tcase \"title\":\n\t\t\tcase \"class\":\n\t\t\tcase \"width\":\n\t\t\tcase \"height\":\n\t\t\tcase \"alt\":\n\t\t\tcase \"disabled\":\n\t\t\tdefault:\n\t\t\t\tif i != j {\n\t\t\t\t\tattr[j] = attr[i]\n\t\t\t\t}\n\t\t\t\tj++\n\t\t\t}\n\t\t}\n\t\tif i != j {\n\t\t\tnode.Attr = attr[:j]\n\t\t}\n\t}\n\n\treturn m.parseChildren(node)\n}\n\n\/\/ Run - start minification node\nfunc (m *Minification) Run(node *html.Node) error {\n\tswitch node.Type {\n\tcase html.DocumentNode: \/\/ +children -attr (first node)\n\t\treturn m.parseChildren(node)\n\tcase html.ElementNode: \/\/ +children +attr\n\t\treturn m.parseElements(node)\n\tcase html.TextNode: \/\/ -children -attr\n\t\treturn nil\n\tcase html.DoctypeNode: \/\/ ignore\n\t\treturn nil\n\tcase html.CommentNode: \/\/ remove\n\t\treturn m.removeNode(node)\n\tdefault:\n\t\treturn errors.New(\"minification.Minification.Run: unexpected node type\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"koding\/tools\/utils\"\n)\n\nfunc TestReplaceEnv(t *testing.T) {\n\ttests := []struct {\n\t\tEnvironment string\n\t\tProvEnv string\n\t\tProvVariable string\n\t\tExp string\n\t\tExpNoManaged string\n\t}{\n\t\t{\n\t\t\t\/\/ 0 \/\/\n\t\t\tEnvironment: \"sandbox\",\n\t\t\tProvEnv: \"sandbox\",\n\t\t\tProvVariable: \"https:\/\/koding.com\/sandbox\/version.txt\",\n\t\t\tExp: \"https:\/\/koding.com\/development\/version.txt\",\n\t\t\tExpNoManaged: \"https:\/\/koding.com\/development\/version.txt\",\n\t\t},\n\t\t{\n\t\t\t\/\/ 1 \/\/\n\t\t\tEnvironment: \"production\",\n\t\t\tProvEnv: \"managed\",\n\t\t\tProvVariable: \"https:\/\/koding.com\/production\/version.txt\",\n\t\t\tExp: \"https:\/\/koding.com\/managed\/version.txt\",\n\t\t\tExpNoManaged: \"https:\/\/koding.com\/production\/version.txt\",\n\t\t},\n\t\t{\n\t\t\t\/\/ 2 \/\/\n\t\t\tEnvironment: \"production\",\n\t\t\tProvEnv: \"devmanaged\",\n\t\t\tProvVariable: \"https:\/\/koding.com\/production\/version.txt\",\n\t\t\tExp: \"https:\/\/koding.com\/devmanaged\/version.txt\",\n\t\t\tExpNoManaged: \"https:\/\/koding.com\/development\/version.txt\",\n\t\t},\n\t\t{\n\t\t\t\/\/ 3 \/\/\n\t\t\tEnvironment: \"default\",\n\t\t\tProvEnv: \"sandbox\",\n\t\t\tProvVariable: \"https:\/\/koding.com\/default\/version.txt\",\n\t\t\tExp: \"https:\/\/koding.com\/development\/version.txt\",\n\t\t\tExpNoManaged: \"https:\/\/koding.com\/development\/version.txt\",\n\t\t},\n\t\t{\n\t\t\t\/\/ 4 \/\/\n\t\t\tEnvironment: \"production\",\n\t\t\tProvEnv: \"production\",\n\t\t\tProvVariable: \"https:\/\/koding.com\/production\/version.txt\",\n\t\t\tExp: \"https:\/\/koding.com\/production\/version.txt\",\n\t\t\tExpNoManaged: \"https:\/\/koding.com\/production\/version.txt\",\n\t\t},\n\t\t{\n\t\t\t\/\/ 5 \/\/\n\t\t\tEnvironment: \"development\",\n\t\t\tProvEnv: \"devmanaged\",\n\t\t\tProvVariable: \"https:\/\/koding.com\/development\/version.txt\",\n\t\t\tExp: \"https:\/\/koding.com\/devmanaged\/version.txt\",\n\t\t\tExpNoManaged: \"https:\/\/koding.com\/development\/version.txt\",\n\t\t},\n\t\t{\n\t\t\t\/\/ 6 \/\/\n\t\t\tEnvironment: \"default\",\n\t\t\tProvEnv: \"devmanaged\",\n\t\t\tProvVariable: \"https:\/\/koding.com\/default\/version.txt\",\n\t\t\tExp: \"https:\/\/koding.com\/devmanaged\/version.txt\",\n\t\t\tExpNoManaged: \"https:\/\/koding.com\/development\/version.txt\",\n\t\t},\n\t}\n\n\tfor i, test := range tests {\n\t\tt.Run(fmt.Sprintf(\"test_no_%d\", i), func(t *testing.T) {\n\t\t\tprovVariable := NewEndpoint(test.ProvVariable)\n\t\t\texp := NewEndpoint(test.Exp)\n\t\t\texpNoManaged := NewEndpoint(test.ExpNoManaged)\n\n\t\t\t\/\/ Temporarily replace buildin environment. This also means that you\n\t\t\t\/\/ should not run these test in parallel!\n\t\t\tvar envcopy = environment\n\t\t\tenvironment = test.Environment\n\t\t\tdefer func() {\n\t\t\t\tenvironment = envcopy\n\t\t\t}()\n\n\t\t\tif e := ReplaceEnv(provVariable, test.ProvEnv); !e.Equal(exp) {\n\t\t\t\tt.Fatalf(\"want string = %#v; got %#v\", test.Exp, e)\n\t\t\t}\n\n\t\t\tif e := ReplaceEnv(provVariable, RmManaged(test.ProvEnv)); !e.Equal(expNoManaged) {\n\t\t\t\tt.Fatalf(\"want string = %#v; got %#v\", test.Exp, e)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestEndpointEqual(t *testing.T) {\n\tgood := mustURL(\"http:\/\/127.0.0.1:56789\/kite\")\n\tbad := mustURL(\"http:\/\/127.0.0.1\")\n\n\tcases := map[string]struct {\n\t\tlhs *Endpoint\n\t\trhs *Endpoint\n\t\tok bool\n\t}{\n\t\t\"empty endpoints\": {\n\t\t\t&Endpoint{},\n\t\t\t&Endpoint{},\n\t\t\ttrue,\n\t\t},\n\t\t\"public private match\": {\n\t\t\t&Endpoint{\n\t\t\t\tPublic: good,\n\t\t\t\tPrivate: good,\n\t\t\t},\n\t\t\t&Endpoint{\n\t\t\t\tPublic: good,\n\t\t\t\tPrivate: good,\n\t\t\t},\n\t\t\ttrue,\n\t\t},\n\t\t\"public match\": {\n\t\t\t&Endpoint{\n\t\t\t\tPublic: good,\n\t\t\t},\n\t\t\t&Endpoint{\n\t\t\t\tPublic: good,\n\t\t\t},\n\t\t\ttrue,\n\t\t},\n\t\t\"private match\": {\n\t\t\t&Endpoint{\n\t\t\t\tPrivate: good,\n\t\t\t},\n\t\t\t&Endpoint{\n\t\t\t\tPrivate: good,\n\t\t\t},\n\t\t\ttrue,\n\t\t},\n\t\t\"public private no match\": {\n\t\t\t&Endpoint{\n\t\t\t\tPublic: good,\n\t\t\t\tPrivate: bad,\n\t\t\t},\n\t\t\t&Endpoint{\n\t\t\t\tPublic: bad,\n\t\t\t\tPrivate: good,\n\t\t\t},\n\t\t\tfalse,\n\t\t},\n\t\t\"public no match\": {\n\t\t\t&Endpoint{\n\t\t\t\tPublic: good,\n\t\t\t},\n\t\t\t&Endpoint{\n\t\t\t\tPublic: bad,\n\t\t\t},\n\t\t\tfalse,\n\t\t},\n\t\t\"private no match\": {\n\t\t\t&Endpoint{\n\t\t\t\tPrivate: good,\n\t\t\t},\n\t\t\t&Endpoint{\n\t\t\t\tPrivate: bad,\n\t\t\t},\n\t\t\tfalse,\n\t\t},\n\t\t\"public no match private match\": {\n\t\t\t&Endpoint{\n\t\t\t\tPublic: good,\n\t\t\t\tPrivate: good,\n\t\t\t},\n\t\t\t&Endpoint{\n\t\t\t\tPublic: bad,\n\t\t\t\tPrivate: good,\n\t\t\t},\n\t\t\tfalse,\n\t\t},\n\t\t\"private no match public match\": {\n\t\t\t&Endpoint{\n\t\t\t\tPublic: good,\n\t\t\t\tPrivate: good,\n\t\t\t},\n\t\t\t&Endpoint{\n\t\t\t\tPublic: bad,\n\t\t\t\tPrivate: good,\n\t\t\t},\n\t\t\tfalse,\n\t\t},\n\t}\n\n\tfor name, cas := range cases {\n\t\t\/\/ capture range variable here\n\t\tcas := cas\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tif ok := cas.lhs.Equal(cas.rhs); ok != cas.ok {\n\t\t\t\tt.Fatalf(\"got %t, want %t\", ok, cas.ok)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestURLCopy(t *testing.T) {\n\tcases := map[string]*URL{\n\t\t\"nil url\": nil,\n\t\t\"nil underlying url\": {URL: nil},\n\t\t\"simple url\": {URL: &url.URL{Scheme: \"http\", Host: \"example.com\"}},\n\t\t\"url with user\": {URL: &url.URL{Scheme: \"http\", Host: \"example.com\", User: url.UserPassword(\"user\", \"pass\")}},\n\t}\n\n\tfor name, u := range cases {\n\t\t\/\/ capture range variable here\n\t\tu := u\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tuCopy := u.Copy()\n\n\t\t\tif u.IsNil() {\n\t\t\t\tif uCopy != nil {\n\t\t\t\t\tt.Errorf(\"want uCopy to be nil; got %#v\", uCopy)\n\t\t\t\t}\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmodifyURL(uCopy)\n\n\t\t\tif reflect.DeepEqual(uCopy, u) {\n\t\t\t\tt.Errorf(\"want %#v != %#v\", uCopy, u)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc modifyURL(u *URL) {\n\tif u.IsNil() {\n\t\treturn\n\t}\n\n\tu.URL.Host = utils.RandomString()\n\n\tif u.URL.User != nil {\n\t\tu.URL.User = url.UserPassword(utils.RandomString(), \"\")\n\t}\n}\n\nfunc TestEndpointCopy(t *testing.T) {\n\turl := &URL{URL: &url.URL{Scheme: \"http\", Host: \"example.com\", User: url.UserPassword(\"user\", \"pass\")}}\n\n\tcases := map[string]*Endpoint{\n\t\t\"nil endpoint\": nil,\n\t\t\"nil underlying urls\": {Private: nil, Public: nil},\n\t\t\"private-only endpoint\": {Private: url},\n\t\t\"public-only endpoint\": {Public: url},\n\t\t\"endpoint\": {Private: url, Public: url},\n\t}\n\n\tfor name, e := range cases {\n\t\t\/\/ capture range variable here\n\t\te := e\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\teCopy := e.Copy()\n\n\t\t\tif e.IsNil() {\n\t\t\t\tif eCopy != nil {\n\t\t\t\t\tt.Errorf(\"want eCopy to be nil; got %#v\", eCopy)\n\t\t\t\t}\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmodifyEndpoint(eCopy)\n\n\t\t\tif reflect.DeepEqual(eCopy, e) {\n\t\t\t\tt.Errorf(\"want %#v != %#v\", eCopy, e)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc modifyEndpoint(e *Endpoint) {\n\tif e.IsNil() {\n\t\treturn\n\t}\n\n\tif !e.Public.IsNil() {\n\t\tmodifyURL(e.Public)\n\t}\n\n\tif !e.Private.IsNil() {\n\t\tmodifyURL(e.Private)\n\t}\n}\n<commit_msg>kites\/config: fix test<commit_after>package config\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"koding\/tools\/utils\"\n)\n\nfunc TestReplaceEnv(t *testing.T) {\n\ttests := []struct {\n\t\tEnvironment string\n\t\tProvEnv string\n\t\tProvVariable string\n\t\tExp string\n\t\tExpNoManaged string\n\t}{\n\t\t{\n\t\t\t\/\/ 0 \/\/\n\t\t\tEnvironment: \"sandbox\",\n\t\t\tProvEnv: \"sandbox\",\n\t\t\tProvVariable: \"https:\/\/koding.com\/sandbox\/version.txt\",\n\t\t\tExp: \"https:\/\/koding.com\/development\/version.txt\",\n\t\t\tExpNoManaged: \"https:\/\/koding.com\/development\/version.txt\",\n\t\t},\n\t\t{\n\t\t\t\/\/ 1 \/\/\n\t\t\tEnvironment: \"production\",\n\t\t\tProvEnv: \"managed\",\n\t\t\tProvVariable: \"https:\/\/koding.com\/production\/version.txt\",\n\t\t\tExp: \"https:\/\/koding.com\/managed\/version.txt\",\n\t\t\tExpNoManaged: \"https:\/\/koding.com\/production\/version.txt\",\n\t\t},\n\t\t{\n\t\t\t\/\/ 2 \/\/\n\t\t\tEnvironment: \"production\",\n\t\t\tProvEnv: \"devmanaged\",\n\t\t\tProvVariable: \"https:\/\/koding.com\/production\/version.txt\",\n\t\t\tExp: \"https:\/\/koding.com\/devmanaged\/version.txt\",\n\t\t\tExpNoManaged: \"https:\/\/koding.com\/development\/version.txt\",\n\t\t},\n\t\t{\n\t\t\t\/\/ 3 \/\/\n\t\t\tEnvironment: \"default\",\n\t\t\tProvEnv: \"sandbox\",\n\t\t\tProvVariable: \"https:\/\/koding.com\/default\/version.txt\",\n\t\t\tExp: \"https:\/\/koding.com\/development\/version.txt\",\n\t\t\tExpNoManaged: \"https:\/\/koding.com\/development\/version.txt\",\n\t\t},\n\t\t{\n\t\t\t\/\/ 4 \/\/\n\t\t\tEnvironment: \"production\",\n\t\t\tProvEnv: \"production\",\n\t\t\tProvVariable: \"https:\/\/koding.com\/production\/version.txt\",\n\t\t\tExp: \"https:\/\/koding.com\/production\/version.txt\",\n\t\t\tExpNoManaged: \"https:\/\/koding.com\/production\/version.txt\",\n\t\t},\n\t\t{\n\t\t\t\/\/ 5 \/\/\n\t\t\tEnvironment: \"development\",\n\t\t\tProvEnv: \"devmanaged\",\n\t\t\tProvVariable: \"https:\/\/koding.com\/development\/version.txt\",\n\t\t\tExp: \"https:\/\/koding.com\/devmanaged\/version.txt\",\n\t\t\tExpNoManaged: \"https:\/\/koding.com\/development\/version.txt\",\n\t\t},\n\t\t{\n\t\t\t\/\/ 6 \/\/\n\t\t\tEnvironment: \"default\",\n\t\t\tProvEnv: \"devmanaged\",\n\t\t\tProvVariable: \"https:\/\/koding.com\/default\/version.txt\",\n\t\t\tExp: \"https:\/\/koding.com\/devmanaged\/version.txt\",\n\t\t\tExpNoManaged: \"https:\/\/koding.com\/development\/version.txt\",\n\t\t},\n\t}\n\n\tfor i, test := range tests {\n\t\tt.Run(fmt.Sprintf(\"test_no_%d\", i), func(t *testing.T) {\n\t\t\tprovVariable := NewEndpoint(test.ProvVariable)\n\t\t\texp := NewEndpoint(test.Exp)\n\t\t\texpNoManaged := NewEndpoint(test.ExpNoManaged)\n\n\t\t\tif e := ReplaceCustomEnv(provVariable, test.Environment, test.ProvEnv); !e.Equal(exp) {\n\t\t\t\tt.Fatalf(\"want %s; got %s\", test.Exp, e)\n\t\t\t}\n\n\t\t\tif e := ReplaceCustomEnv(provVariable, test.Environment, RmManaged(test.ProvEnv)); !e.Equal(expNoManaged) {\n\t\t\t\tt.Fatalf(\"want %s; got %s\", test.Exp, e)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestEndpointEqual(t *testing.T) {\n\tgood := mustURL(\"http:\/\/127.0.0.1:56789\/kite\")\n\tbad := mustURL(\"http:\/\/127.0.0.1\")\n\n\tcases := map[string]struct {\n\t\tlhs *Endpoint\n\t\trhs *Endpoint\n\t\tok bool\n\t}{\n\t\t\"empty endpoints\": {\n\t\t\t&Endpoint{},\n\t\t\t&Endpoint{},\n\t\t\ttrue,\n\t\t},\n\t\t\"public private match\": {\n\t\t\t&Endpoint{\n\t\t\t\tPublic: good,\n\t\t\t\tPrivate: good,\n\t\t\t},\n\t\t\t&Endpoint{\n\t\t\t\tPublic: good,\n\t\t\t\tPrivate: good,\n\t\t\t},\n\t\t\ttrue,\n\t\t},\n\t\t\"public match\": {\n\t\t\t&Endpoint{\n\t\t\t\tPublic: good,\n\t\t\t},\n\t\t\t&Endpoint{\n\t\t\t\tPublic: good,\n\t\t\t},\n\t\t\ttrue,\n\t\t},\n\t\t\"private match\": {\n\t\t\t&Endpoint{\n\t\t\t\tPrivate: good,\n\t\t\t},\n\t\t\t&Endpoint{\n\t\t\t\tPrivate: good,\n\t\t\t},\n\t\t\ttrue,\n\t\t},\n\t\t\"public private no match\": {\n\t\t\t&Endpoint{\n\t\t\t\tPublic: good,\n\t\t\t\tPrivate: bad,\n\t\t\t},\n\t\t\t&Endpoint{\n\t\t\t\tPublic: bad,\n\t\t\t\tPrivate: good,\n\t\t\t},\n\t\t\tfalse,\n\t\t},\n\t\t\"public no match\": {\n\t\t\t&Endpoint{\n\t\t\t\tPublic: good,\n\t\t\t},\n\t\t\t&Endpoint{\n\t\t\t\tPublic: bad,\n\t\t\t},\n\t\t\tfalse,\n\t\t},\n\t\t\"private no match\": {\n\t\t\t&Endpoint{\n\t\t\t\tPrivate: good,\n\t\t\t},\n\t\t\t&Endpoint{\n\t\t\t\tPrivate: bad,\n\t\t\t},\n\t\t\tfalse,\n\t\t},\n\t\t\"public no match private match\": {\n\t\t\t&Endpoint{\n\t\t\t\tPublic: good,\n\t\t\t\tPrivate: good,\n\t\t\t},\n\t\t\t&Endpoint{\n\t\t\t\tPublic: bad,\n\t\t\t\tPrivate: good,\n\t\t\t},\n\t\t\tfalse,\n\t\t},\n\t\t\"private no match public match\": {\n\t\t\t&Endpoint{\n\t\t\t\tPublic: good,\n\t\t\t\tPrivate: good,\n\t\t\t},\n\t\t\t&Endpoint{\n\t\t\t\tPublic: bad,\n\t\t\t\tPrivate: good,\n\t\t\t},\n\t\t\tfalse,\n\t\t},\n\t}\n\n\tfor name, cas := range cases {\n\t\t\/\/ capture range variable here\n\t\tcas := cas\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tif ok := cas.lhs.Equal(cas.rhs); ok != cas.ok {\n\t\t\t\tt.Fatalf(\"got %t, want %t\", ok, cas.ok)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestURLCopy(t *testing.T) {\n\tcases := map[string]*URL{\n\t\t\"nil url\": nil,\n\t\t\"nil underlying url\": {URL: nil},\n\t\t\"simple url\": {URL: &url.URL{Scheme: \"http\", Host: \"example.com\"}},\n\t\t\"url with user\": {URL: &url.URL{Scheme: \"http\", Host: \"example.com\", User: url.UserPassword(\"user\", \"pass\")}},\n\t}\n\n\tfor name, u := range cases {\n\t\t\/\/ capture range variable here\n\t\tu := u\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tuCopy := u.Copy()\n\n\t\t\tif u.IsNil() {\n\t\t\t\tif uCopy != nil {\n\t\t\t\t\tt.Errorf(\"want uCopy to be nil; got %#v\", uCopy)\n\t\t\t\t}\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmodifyURL(uCopy)\n\n\t\t\tif reflect.DeepEqual(uCopy, u) {\n\t\t\t\tt.Errorf(\"want %#v != %#v\", uCopy, u)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc modifyURL(u *URL) {\n\tif u.IsNil() {\n\t\treturn\n\t}\n\n\tu.URL.Host = utils.RandomString()\n\n\tif u.URL.User != nil {\n\t\tu.URL.User = url.UserPassword(utils.RandomString(), \"\")\n\t}\n}\n\nfunc TestEndpointCopy(t *testing.T) {\n\turl := &URL{URL: &url.URL{Scheme: \"http\", Host: \"example.com\", User: url.UserPassword(\"user\", \"pass\")}}\n\n\tcases := map[string]*Endpoint{\n\t\t\"nil endpoint\": nil,\n\t\t\"nil underlying urls\": {Private: nil, Public: nil},\n\t\t\"private-only endpoint\": {Private: url},\n\t\t\"public-only endpoint\": {Public: url},\n\t\t\"endpoint\": {Private: url, Public: url},\n\t}\n\n\tfor name, e := range cases {\n\t\t\/\/ capture range variable here\n\t\te := e\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\teCopy := e.Copy()\n\n\t\t\tif e.IsNil() {\n\t\t\t\tif eCopy != nil {\n\t\t\t\t\tt.Errorf(\"want eCopy to be nil; got %#v\", eCopy)\n\t\t\t\t}\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmodifyEndpoint(eCopy)\n\n\t\t\tif reflect.DeepEqual(eCopy, e) {\n\t\t\t\tt.Errorf(\"want %#v != %#v\", eCopy, e)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc modifyEndpoint(e *Endpoint) {\n\tif e.IsNil() {\n\t\treturn\n\t}\n\n\tif !e.Public.IsNil() {\n\t\tmodifyURL(e.Public)\n\t}\n\n\tif !e.Private.IsNil() {\n\t\tmodifyURL(e.Private)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package models\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"socialapi\/config\"\n\t\"socialapi\/request\"\n\t\"time\"\n\n\t\"github.com\/VerbalExpressions\/GoVerbalExpressions\"\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/koding\/bongo\"\n)\n\nvar mentionRegex = verbalexpressions.New().\n\tFind(\"@\").\n\tBeginCapture().\n\tWord().\n\tEndCapture().\n\tRegex()\n\ntype ChannelMessage struct {\n\t\/\/ unique identifier of the channel message\n\tId int64 `json:\"id,string\"`\n\n\t\/\/ Body of the mesage\n\tBody string `json:\"body\"`\n\n\t\/\/ Generated Slug for body\n\tSlug string `json:\"slug\" sql:\"NOT NULL;TYPE:VARCHAR(100);\"`\n\n\t\/\/ type of the message\n\tTypeConstant string `json:\"typeConstant\" sql:\"NOT NULL;TYPE:VARCHAR(100);\"`\n\n\t\/\/ Creator of the channel message\n\tAccountId int64 `json:\"accountId,string\" sql:\"NOT NULL\"`\n\n\t\/\/ in which channel this message is created\n\tInitialChannelId int64 `json:\"initialChannelId,string\" sql:\"NOT NULL\"`\n\n\t\/\/ holds troll, unsafe, etc\n\tMetaBits int16 `json:\"-\"`\n\n\t\/\/ Creation date of the message\n\tCreatedAt time.Time `json:\"createdAt\" sql:\"DEFAULT:CURRENT_TIMESTAMP\"`\n\n\t\/\/ Modification date of the message\n\tUpdatedAt time.Time `json:\"updatedAt\" sql:\"DEFAULT:CURRENT_TIMESTAMP\"`\n\n\t\/\/ Deletion date of the channel message\n\tDeletedAt time.Time `json:\"deletedAt\"`\n\n\t\/\/ Extra data storage\n\tPayload gorm.Hstore `json:\"payload,omitempty\"`\n}\n\nfunc (c *ChannelMessage) BeforeCreate() {\n\tif res, err := c.isExemptContent(); err == nil && res {\n\t\tc.MetaBits = updateTrollModeBit(c.MetaBits)\n\t}\n\n\tc.DeletedAt = ZeroDate()\n}\n\nfunc (c *ChannelMessage) BeforeUpdate() {\n\tif res, err := c.isExemptContent(); err == nil && res {\n\t\tc.MetaBits = updateTrollModeBit(c.MetaBits)\n\t}\n\n\tc.DeletedAt = ZeroDate()\n}\n\nfunc (c *ChannelMessage) AfterCreate() {\n\tbongo.B.AfterCreate(c)\n}\n\nfunc (c *ChannelMessage) AfterUpdate() {\n\tbongo.B.AfterUpdate(c)\n}\n\nfunc (c ChannelMessage) AfterDelete() {\n\tbongo.B.AfterDelete(c)\n}\n\nfunc (c ChannelMessage) GetId() int64 {\n\treturn c.Id\n}\n\nfunc (c ChannelMessage) TableName() string {\n\treturn \"api.channel_message\"\n}\n\nconst (\n\tChannelMessage_TYPE_POST = \"post\"\n\tChannelMessage_TYPE_REPLY = \"reply\"\n\tChannelMessage_TYPE_JOIN = \"join\"\n\tChannelMessage_TYPE_LEAVE = \"leave\"\n\tChannelMessage_TYPE_CHAT = \"chat\"\n\tChannelMessage_TYPE_PRIVATE_MESSAGE = \"privatemessage\"\n)\n\nfunc NewChannelMessage() *ChannelMessage {\n\treturn &ChannelMessage{}\n}\n\nfunc (c *ChannelMessage) ById(id int64) error {\n\treturn bongo.B.ById(c, id)\n}\n\nfunc (c *ChannelMessage) One(q *bongo.Query) error {\n\treturn bongo.B.One(c, c, q)\n}\n\nfunc (c *ChannelMessage) Some(data interface{}, q *bongo.Query) error {\n\treturn bongo.B.Some(c, data, q)\n}\n\nfunc (c *ChannelMessage) UpdateMulti(rest ...map[string]interface{}) error {\n\treturn bongo.B.UpdateMulti(c, rest...)\n}\n\nfunc (c *ChannelMessage) CountWithQuery(q *bongo.Query) (int, error) {\n\treturn bongo.B.CountWithQuery(c, q)\n}\n\nfunc (c *ChannelMessage) isExemptContent() (bool, error) {\n\t\/\/ set meta bits if only message is post or a reply\n\tif c.TypeConstant != ChannelMessage_TYPE_POST &&\n\t\tc.TypeConstant != ChannelMessage_TYPE_REPLY {\n\t\treturn false, nil\n\t}\n\n\tif c.AccountId == 0 && c.Id != 0 {\n\t\tif err := c.ById(c.Id); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t} else {\n\t\treturn false, fmt.Errorf(\"Couldnt find accountId from content %+v\", c)\n\t}\n\n\taccount, err := FetchAccountFromCache(c.AccountId)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif account == nil {\n\t\treturn false, fmt.Errorf(\"Account is nil, accountId:%d\", c.AccountId)\n\t}\n\n\tif account.IsTroll {\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}\n\nfunc bodyLenCheck(body string) error {\n\tif len(body) < config.Get().Limits.MessageBodyMinLen {\n\t\treturn fmt.Errorf(\"Message Body Length should be greater than %d, yours is %d \", config.Get().Limits.MessageBodyMinLen, len(body))\n\t}\n\n\treturn nil\n}\n\n\/\/ todo create a new message while updating the channel_message and delete other\n\/\/ cases, since deletion is a soft delete, old instances will still be there\nfunc (c *ChannelMessage) Update() error {\n\tif err := bodyLenCheck(c.Body); err != nil {\n\t\treturn err\n\t}\n\t\/\/ only update body\n\terr := bongo.B.UpdatePartial(c,\n\t\tmap[string]interface{}{\n\t\t\t\"body\": c.Body,\n\t\t},\n\t)\n\treturn err\n}\n\nfunc (c *ChannelMessage) Create() error {\n\tif err := bodyLenCheck(c.Body); err != nil {\n\t\treturn err\n\t}\n\n\tvar err error\n\tc, err = Slugify(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn bongo.B.Create(c)\n}\n\n\/\/ CreateRaw creates a new channel message without effected by auto generated createdAt\n\/\/ and updatedAt values\nfunc (c *ChannelMessage) CreateRaw() error {\n\tinsertSql := \"INSERT INTO \" +\n\t\tc.TableName() +\n\t\t` (\"body\",\"slug\",\"type_constant\",\"account_id\",\"initial_channel_id\",` +\n\t\t`\"created_at\",\"updated_at\",\"deleted_at\",\"payload\") ` +\n\t\t\"VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9) \" +\n\t\t\"RETURNING ID\"\n\n\treturn bongo.B.DB.CommonDB().QueryRow(insertSql, c.Body, c.Slug, c.TypeConstant, c.AccountId, c.InitialChannelId,\n\t\tc.CreatedAt, c.UpdatedAt, c.DeletedAt, c.Payload).Scan(&c.Id)\n}\n\n\/\/ UpdateBodyRaw updates message body without effecting createdAt\/UpdatedAt\n\/\/ timestamps\nfunc (c *ChannelMessage) UpdateBodyRaw() error {\n\tupdateSql := fmt.Sprintf(\"UPDATE %s SET body=? WHERE id=?\", c.TableName())\n\n\treturn bongo.B.DB.Exec(updateSql, c.Body, c.Id).Error\n}\n\nfunc (c *ChannelMessage) Delete() error {\n\treturn bongo.B.Delete(c)\n}\n\nfunc (c *ChannelMessage) FetchByIds(ids []int64) ([]ChannelMessage, error) {\n\tvar messages []ChannelMessage\n\n\tif len(ids) == 0 {\n\t\treturn messages, nil\n\t}\n\n\tif err := bongo.B.FetchByIds(c, &messages, ids); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn messages, nil\n}\n\nfunc (c *ChannelMessage) BuildMessages(query *request.Query, messages []ChannelMessage) ([]*ChannelMessageContainer, error) {\n\tcontainers := make([]*ChannelMessageContainer, len(messages))\n\tif len(containers) == 0 {\n\t\treturn containers, nil\n\t}\n\n\tfor i, message := range messages {\n\t\td := NewChannelMessage()\n\t\t*d = message\n\t\tdata, err := d.BuildMessage(query)\n\t\tif err != nil {\n\t\t\treturn containers, err\n\t\t}\n\t\tcontainers[i] = data\n\t}\n\n\treturn containers, nil\n}\n\nfunc (c *ChannelMessage) BuildMessage(query *request.Query) (*ChannelMessageContainer, error) {\n\tcmc, err := c.FetchRelatives(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmr := NewMessageReply()\n\tmr.MessageId = c.Id\n\tq := query\n\tq.Limit = 3\n\treplies, err := mr.List(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trepliesCount, err := mr.Count()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcmc.RepliesCount = repliesCount\n\n\tcmc.IsFollowed, err = c.CheckIsMessageFollowed(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpopulatedChannelMessagesReplies := make([]*ChannelMessageContainer, len(replies))\n\tfor rl := 0; rl < len(replies); rl++ {\n\t\tcmrc, err := replies[rl].FetchRelatives(query)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpopulatedChannelMessagesReplies[rl] = cmrc\n\t}\n\n\tcmc.Replies = populatedChannelMessagesReplies\n\treturn cmc, nil\n}\n\nfunc (c *ChannelMessage) CheckIsMessageFollowed(query *request.Query) (bool, error) {\n\tchannel := NewChannel()\n\tif err := channel.FetchPinnedActivityChannel(query.AccountId, query.GroupName); err != nil {\n\t\tif err == bongo.RecordNotFound {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\n\tcml := NewChannelMessageList()\n\tq := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"channel_id\": channel.Id,\n\t\t\t\"message_id\": c.Id,\n\t\t},\n\t}\n\tif err := cml.One(q); err != nil {\n\t\tif err == bongo.RecordNotFound {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}\n\nfunc (c *ChannelMessage) BuildEmptyMessageContainer() (*ChannelMessageContainer, error) {\n\tif c.Id == 0 {\n\t\treturn nil, errors.New(\"Channel message id is not set\")\n\t}\n\tcontainer := NewChannelMessageContainer()\n\tcontainer.Message = c\n\n\toldId, err := FetchAccountOldIdByIdFromCache(c.AccountId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontainer.AccountOldId = oldId\n\n\tinteractionContainer := NewInteractionContainer()\n\tinteractionContainer.ActorsPreview = make([]string, 0)\n\tinteractionContainer.IsInteracted = false\n\tinteractionContainer.ActorsCount = 0\n\n\tcontainer.Interactions = make(map[string]*InteractionContainer)\n\tcontainer.Interactions[\"like\"] = interactionContainer\n\n\treturn container, nil\n}\n\nfunc (c *ChannelMessage) FetchRelatives(query *request.Query) (*ChannelMessageContainer, error) {\n\tcontainer, err := c.BuildEmptyMessageContainer()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ti := NewInteraction()\n\ti.MessageId = c.Id\n\n\t\/\/ get preview\n\tquery.Type = \"like\"\n\tquery.Limit = 3\n\tinteractorIds, err := i.List(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toldIds, err := FetchOldIdsByAccountIds(interactorIds)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinteractionContainer := NewInteractionContainer()\n\tinteractionContainer.ActorsPreview = oldIds\n\n\t\/\/ check if the current user is interacted in this thread\n\tisInteracted, err := i.IsInteracted(query.AccountId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinteractionContainer.IsInteracted = isInteracted\n\n\t\/\/ fetch interaction count\n\tcount, err := i.Count(query.Type)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinteractionContainer.ActorsCount = count\n\n\tcontainer.Interactions[\"like\"] = interactionContainer\n\treturn container, nil\n}\n\nfunc generateMessageListQuery(channelId int64, q *request.Query) *bongo.Query {\n\tmessageType := q.Type\n\tif messageType == \"\" {\n\t\tmessageType = ChannelMessage_TYPE_POST\n\t}\n\n\treturn &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"account_id\": q.AccountId,\n\t\t\t\"initial_channel_id\": channelId,\n\t\t\t\"type_constant\": messageType,\n\t\t},\n\t\tPagination: *bongo.NewPagination(q.Limit, q.Skip),\n\t\tSort: map[string]string{\n\t\t\t\"created_at\": \"DESC\",\n\t\t},\n\t}\n}\n\nfunc (c *ChannelMessage) FetchMessagesByChannelId(channelId int64, q *request.Query) ([]ChannelMessage, error) {\n\tquery := generateMessageListQuery(channelId, q)\n\n\tvar messages []ChannelMessage\n\tif err := c.Some(&messages, query); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif messages == nil {\n\t\treturn make([]ChannelMessage, 0), nil\n\t}\n\treturn messages, nil\n}\n\nfunc (c *ChannelMessage) GetMentionedUsernames() []string {\n\tflattened := make([]string, 0)\n\n\tres := mentionRegex.FindAllStringSubmatch(c.Body, -1)\n\tif len(res) == 0 {\n\t\treturn flattened\n\t}\n\n\tparticipants := map[string]struct{}{}\n\t\/\/ remove duplicate mentions\n\tfor _, ele := range res {\n\t\tparticipants[ele[1]] = struct{}{}\n\t}\n\n\tfor participant := range participants {\n\t\tflattened = append(flattened, participant)\n\t}\n\n\treturn flattened\n}\n\nfunc (c *ChannelMessage) FetchTotalMessageCount(q *request.Query) (int, error) {\n\tquery := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"account_id\": q.AccountId,\n\t\t\t\"type_constant\": q.Type,\n\t\t},\n\t\tPagination: *bongo.NewPagination(q.Limit, q.Skip),\n\t}\n\n\treturn c.CountWithQuery(query)\n}\n\nfunc (c *ChannelMessage) FetchMessageIds(q *request.Query) ([]int64, error) {\n\tquery := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"account_id\": q.AccountId,\n\t\t\t\"type_constant\": q.Type,\n\t\t},\n\t\tPluck: \"id\",\n\t\tPagination: *bongo.NewPagination(q.Limit, q.Skip),\n\t\tSort: map[string]string{\n\t\t\t\"created_at\": \"DESC\",\n\t\t},\n\t}\n\n\tvar messageIds []int64\n\tif err := c.Some(&messageIds, query); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif messageIds == nil {\n\t\treturn make([]int64, 0), nil\n\t}\n\n\treturn messageIds, nil\n}\n\n\/\/ BySlug fetchs channel message by its slug\n\/\/ checks if message is in the channel or not\nfunc (c *ChannelMessage) BySlug(query *request.Query) error {\n\tif query.Slug == \"\" {\n\t\treturn errors.New(\"slug is not set\")\n\t}\n\n\t\/\/ fetch message itself\n\tq := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"slug\": query.Slug,\n\t\t},\n\t}\n\n\tif err := c.One(q); err != nil {\n\t\treturn nil\n\t}\n\n\t\/\/ fetch channel by group name\n\tquery.Name = query.GroupName\n\tquery.Type = Channel_TYPE_GROUP\n\tch := NewChannel()\n\tchannel, err := ch.ByName(query)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif channel.Id == 0 {\n\t\treturn errors.New(\"channel is not found\")\n\t}\n\n\t\/\/ check if message is in the channel\n\tcml := NewChannelMessageList()\n\tres, err := cml.IsInChannel(c.Id, channel.Id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ if message is not in the channel\n\tif !res {\n\t\treturn bongo.RecordNotFound\n\t}\n\n\treturn nil\n}\n<commit_msg>Sitemap: Fix error return value of BySlug method<commit_after>package models\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"socialapi\/config\"\n\t\"socialapi\/request\"\n\t\"time\"\n\n\t\"github.com\/VerbalExpressions\/GoVerbalExpressions\"\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/koding\/bongo\"\n)\n\nvar mentionRegex = verbalexpressions.New().\n\tFind(\"@\").\n\tBeginCapture().\n\tWord().\n\tEndCapture().\n\tRegex()\n\ntype ChannelMessage struct {\n\t\/\/ unique identifier of the channel message\n\tId int64 `json:\"id,string\"`\n\n\t\/\/ Body of the mesage\n\tBody string `json:\"body\"`\n\n\t\/\/ Generated Slug for body\n\tSlug string `json:\"slug\" sql:\"NOT NULL;TYPE:VARCHAR(100);\"`\n\n\t\/\/ type of the message\n\tTypeConstant string `json:\"typeConstant\" sql:\"NOT NULL;TYPE:VARCHAR(100);\"`\n\n\t\/\/ Creator of the channel message\n\tAccountId int64 `json:\"accountId,string\" sql:\"NOT NULL\"`\n\n\t\/\/ in which channel this message is created\n\tInitialChannelId int64 `json:\"initialChannelId,string\" sql:\"NOT NULL\"`\n\n\t\/\/ holds troll, unsafe, etc\n\tMetaBits int16 `json:\"-\"`\n\n\t\/\/ Creation date of the message\n\tCreatedAt time.Time `json:\"createdAt\" sql:\"DEFAULT:CURRENT_TIMESTAMP\"`\n\n\t\/\/ Modification date of the message\n\tUpdatedAt time.Time `json:\"updatedAt\" sql:\"DEFAULT:CURRENT_TIMESTAMP\"`\n\n\t\/\/ Deletion date of the channel message\n\tDeletedAt time.Time `json:\"deletedAt\"`\n\n\t\/\/ Extra data storage\n\tPayload gorm.Hstore `json:\"payload,omitempty\"`\n}\n\nfunc (c *ChannelMessage) BeforeCreate() {\n\tif res, err := c.isExemptContent(); err == nil && res {\n\t\tc.MetaBits = updateTrollModeBit(c.MetaBits)\n\t}\n\n\tc.DeletedAt = ZeroDate()\n}\n\nfunc (c *ChannelMessage) BeforeUpdate() {\n\tif res, err := c.isExemptContent(); err == nil && res {\n\t\tc.MetaBits = updateTrollModeBit(c.MetaBits)\n\t}\n\n\tc.DeletedAt = ZeroDate()\n}\n\nfunc (c *ChannelMessage) AfterCreate() {\n\tbongo.B.AfterCreate(c)\n}\n\nfunc (c *ChannelMessage) AfterUpdate() {\n\tbongo.B.AfterUpdate(c)\n}\n\nfunc (c ChannelMessage) AfterDelete() {\n\tbongo.B.AfterDelete(c)\n}\n\nfunc (c ChannelMessage) GetId() int64 {\n\treturn c.Id\n}\n\nfunc (c ChannelMessage) TableName() string {\n\treturn \"api.channel_message\"\n}\n\nconst (\n\tChannelMessage_TYPE_POST = \"post\"\n\tChannelMessage_TYPE_REPLY = \"reply\"\n\tChannelMessage_TYPE_JOIN = \"join\"\n\tChannelMessage_TYPE_LEAVE = \"leave\"\n\tChannelMessage_TYPE_CHAT = \"chat\"\n\tChannelMessage_TYPE_PRIVATE_MESSAGE = \"privatemessage\"\n)\n\nfunc NewChannelMessage() *ChannelMessage {\n\treturn &ChannelMessage{}\n}\n\nfunc (c *ChannelMessage) ById(id int64) error {\n\treturn bongo.B.ById(c, id)\n}\n\nfunc (c *ChannelMessage) One(q *bongo.Query) error {\n\treturn bongo.B.One(c, c, q)\n}\n\nfunc (c *ChannelMessage) Some(data interface{}, q *bongo.Query) error {\n\treturn bongo.B.Some(c, data, q)\n}\n\nfunc (c *ChannelMessage) UpdateMulti(rest ...map[string]interface{}) error {\n\treturn bongo.B.UpdateMulti(c, rest...)\n}\n\nfunc (c *ChannelMessage) CountWithQuery(q *bongo.Query) (int, error) {\n\treturn bongo.B.CountWithQuery(c, q)\n}\n\nfunc (c *ChannelMessage) isExemptContent() (bool, error) {\n\t\/\/ set meta bits if only message is post or a reply\n\tif c.TypeConstant != ChannelMessage_TYPE_POST &&\n\t\tc.TypeConstant != ChannelMessage_TYPE_REPLY {\n\t\treturn false, nil\n\t}\n\n\tif c.AccountId == 0 && c.Id != 0 {\n\t\tif err := c.ById(c.Id); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t} else {\n\t\treturn false, fmt.Errorf(\"Couldnt find accountId from content %+v\", c)\n\t}\n\n\taccount, err := FetchAccountFromCache(c.AccountId)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tif account == nil {\n\t\treturn false, fmt.Errorf(\"Account is nil, accountId:%d\", c.AccountId)\n\t}\n\n\tif account.IsTroll {\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}\n\nfunc bodyLenCheck(body string) error {\n\tif len(body) < config.Get().Limits.MessageBodyMinLen {\n\t\treturn fmt.Errorf(\"Message Body Length should be greater than %d, yours is %d \", config.Get().Limits.MessageBodyMinLen, len(body))\n\t}\n\n\treturn nil\n}\n\n\/\/ todo create a new message while updating the channel_message and delete other\n\/\/ cases, since deletion is a soft delete, old instances will still be there\nfunc (c *ChannelMessage) Update() error {\n\tif err := bodyLenCheck(c.Body); err != nil {\n\t\treturn err\n\t}\n\t\/\/ only update body\n\terr := bongo.B.UpdatePartial(c,\n\t\tmap[string]interface{}{\n\t\t\t\"body\": c.Body,\n\t\t},\n\t)\n\treturn err\n}\n\nfunc (c *ChannelMessage) Create() error {\n\tif err := bodyLenCheck(c.Body); err != nil {\n\t\treturn err\n\t}\n\n\tvar err error\n\tc, err = Slugify(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn bongo.B.Create(c)\n}\n\n\/\/ CreateRaw creates a new channel message without effected by auto generated createdAt\n\/\/ and updatedAt values\nfunc (c *ChannelMessage) CreateRaw() error {\n\tinsertSql := \"INSERT INTO \" +\n\t\tc.TableName() +\n\t\t` (\"body\",\"slug\",\"type_constant\",\"account_id\",\"initial_channel_id\",` +\n\t\t`\"created_at\",\"updated_at\",\"deleted_at\",\"payload\") ` +\n\t\t\"VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9) \" +\n\t\t\"RETURNING ID\"\n\n\treturn bongo.B.DB.CommonDB().QueryRow(insertSql, c.Body, c.Slug, c.TypeConstant, c.AccountId, c.InitialChannelId,\n\t\tc.CreatedAt, c.UpdatedAt, c.DeletedAt, c.Payload).Scan(&c.Id)\n}\n\n\/\/ UpdateBodyRaw updates message body without effecting createdAt\/UpdatedAt\n\/\/ timestamps\nfunc (c *ChannelMessage) UpdateBodyRaw() error {\n\tupdateSql := fmt.Sprintf(\"UPDATE %s SET body=? WHERE id=?\", c.TableName())\n\n\treturn bongo.B.DB.Exec(updateSql, c.Body, c.Id).Error\n}\n\nfunc (c *ChannelMessage) Delete() error {\n\treturn bongo.B.Delete(c)\n}\n\nfunc (c *ChannelMessage) FetchByIds(ids []int64) ([]ChannelMessage, error) {\n\tvar messages []ChannelMessage\n\n\tif len(ids) == 0 {\n\t\treturn messages, nil\n\t}\n\n\tif err := bongo.B.FetchByIds(c, &messages, ids); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn messages, nil\n}\n\nfunc (c *ChannelMessage) BuildMessages(query *request.Query, messages []ChannelMessage) ([]*ChannelMessageContainer, error) {\n\tcontainers := make([]*ChannelMessageContainer, len(messages))\n\tif len(containers) == 0 {\n\t\treturn containers, nil\n\t}\n\n\tfor i, message := range messages {\n\t\td := NewChannelMessage()\n\t\t*d = message\n\t\tdata, err := d.BuildMessage(query)\n\t\tif err != nil {\n\t\t\treturn containers, err\n\t\t}\n\t\tcontainers[i] = data\n\t}\n\n\treturn containers, nil\n}\n\nfunc (c *ChannelMessage) BuildMessage(query *request.Query) (*ChannelMessageContainer, error) {\n\tcmc, err := c.FetchRelatives(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmr := NewMessageReply()\n\tmr.MessageId = c.Id\n\tq := query\n\tq.Limit = 3\n\treplies, err := mr.List(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trepliesCount, err := mr.Count()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcmc.RepliesCount = repliesCount\n\n\tcmc.IsFollowed, err = c.CheckIsMessageFollowed(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpopulatedChannelMessagesReplies := make([]*ChannelMessageContainer, len(replies))\n\tfor rl := 0; rl < len(replies); rl++ {\n\t\tcmrc, err := replies[rl].FetchRelatives(query)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tpopulatedChannelMessagesReplies[rl] = cmrc\n\t}\n\n\tcmc.Replies = populatedChannelMessagesReplies\n\treturn cmc, nil\n}\n\nfunc (c *ChannelMessage) CheckIsMessageFollowed(query *request.Query) (bool, error) {\n\tchannel := NewChannel()\n\tif err := channel.FetchPinnedActivityChannel(query.AccountId, query.GroupName); err != nil {\n\t\tif err == bongo.RecordNotFound {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, err\n\t}\n\n\tcml := NewChannelMessageList()\n\tq := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"channel_id\": channel.Id,\n\t\t\t\"message_id\": c.Id,\n\t\t},\n\t}\n\tif err := cml.One(q); err != nil {\n\t\tif err == bongo.RecordNotFound {\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}\n\nfunc (c *ChannelMessage) BuildEmptyMessageContainer() (*ChannelMessageContainer, error) {\n\tif c.Id == 0 {\n\t\treturn nil, errors.New(\"Channel message id is not set\")\n\t}\n\tcontainer := NewChannelMessageContainer()\n\tcontainer.Message = c\n\n\toldId, err := FetchAccountOldIdByIdFromCache(c.AccountId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontainer.AccountOldId = oldId\n\n\tinteractionContainer := NewInteractionContainer()\n\tinteractionContainer.ActorsPreview = make([]string, 0)\n\tinteractionContainer.IsInteracted = false\n\tinteractionContainer.ActorsCount = 0\n\n\tcontainer.Interactions = make(map[string]*InteractionContainer)\n\tcontainer.Interactions[\"like\"] = interactionContainer\n\n\treturn container, nil\n}\n\nfunc (c *ChannelMessage) FetchRelatives(query *request.Query) (*ChannelMessageContainer, error) {\n\tcontainer, err := c.BuildEmptyMessageContainer()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ti := NewInteraction()\n\ti.MessageId = c.Id\n\n\t\/\/ get preview\n\tquery.Type = \"like\"\n\tquery.Limit = 3\n\tinteractorIds, err := i.List(query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toldIds, err := FetchOldIdsByAccountIds(interactorIds)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinteractionContainer := NewInteractionContainer()\n\tinteractionContainer.ActorsPreview = oldIds\n\n\t\/\/ check if the current user is interacted in this thread\n\tisInteracted, err := i.IsInteracted(query.AccountId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinteractionContainer.IsInteracted = isInteracted\n\n\t\/\/ fetch interaction count\n\tcount, err := i.Count(query.Type)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinteractionContainer.ActorsCount = count\n\n\tcontainer.Interactions[\"like\"] = interactionContainer\n\treturn container, nil\n}\n\nfunc generateMessageListQuery(channelId int64, q *request.Query) *bongo.Query {\n\tmessageType := q.Type\n\tif messageType == \"\" {\n\t\tmessageType = ChannelMessage_TYPE_POST\n\t}\n\n\treturn &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"account_id\": q.AccountId,\n\t\t\t\"initial_channel_id\": channelId,\n\t\t\t\"type_constant\": messageType,\n\t\t},\n\t\tPagination: *bongo.NewPagination(q.Limit, q.Skip),\n\t\tSort: map[string]string{\n\t\t\t\"created_at\": \"DESC\",\n\t\t},\n\t}\n}\n\nfunc (c *ChannelMessage) FetchMessagesByChannelId(channelId int64, q *request.Query) ([]ChannelMessage, error) {\n\tquery := generateMessageListQuery(channelId, q)\n\n\tvar messages []ChannelMessage\n\tif err := c.Some(&messages, query); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif messages == nil {\n\t\treturn make([]ChannelMessage, 0), nil\n\t}\n\treturn messages, nil\n}\n\nfunc (c *ChannelMessage) GetMentionedUsernames() []string {\n\tflattened := make([]string, 0)\n\n\tres := mentionRegex.FindAllStringSubmatch(c.Body, -1)\n\tif len(res) == 0 {\n\t\treturn flattened\n\t}\n\n\tparticipants := map[string]struct{}{}\n\t\/\/ remove duplicate mentions\n\tfor _, ele := range res {\n\t\tparticipants[ele[1]] = struct{}{}\n\t}\n\n\tfor participant := range participants {\n\t\tflattened = append(flattened, participant)\n\t}\n\n\treturn flattened\n}\n\nfunc (c *ChannelMessage) FetchTotalMessageCount(q *request.Query) (int, error) {\n\tquery := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"account_id\": q.AccountId,\n\t\t\t\"type_constant\": q.Type,\n\t\t},\n\t\tPagination: *bongo.NewPagination(q.Limit, q.Skip),\n\t}\n\n\treturn c.CountWithQuery(query)\n}\n\nfunc (c *ChannelMessage) FetchMessageIds(q *request.Query) ([]int64, error) {\n\tquery := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"account_id\": q.AccountId,\n\t\t\t\"type_constant\": q.Type,\n\t\t},\n\t\tPluck: \"id\",\n\t\tPagination: *bongo.NewPagination(q.Limit, q.Skip),\n\t\tSort: map[string]string{\n\t\t\t\"created_at\": \"DESC\",\n\t\t},\n\t}\n\n\tvar messageIds []int64\n\tif err := c.Some(&messageIds, query); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif messageIds == nil {\n\t\treturn make([]int64, 0), nil\n\t}\n\n\treturn messageIds, nil\n}\n\n\/\/ BySlug fetchs channel message by its slug\n\/\/ checks if message is in the channel or not\nfunc (c *ChannelMessage) BySlug(query *request.Query) error {\n\tif query.Slug == \"\" {\n\t\treturn errors.New(\"slug is not set\")\n\t}\n\n\t\/\/ fetch message itself\n\tq := &bongo.Query{\n\t\tSelector: map[string]interface{}{\n\t\t\t\"slug\": query.Slug,\n\t\t},\n\t}\n\n\tif err := c.One(q); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ fetch channel by group name\n\tquery.Name = query.GroupName\n\tquery.Type = Channel_TYPE_GROUP\n\tch := NewChannel()\n\tchannel, err := ch.ByName(query)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif channel.Id == 0 {\n\t\treturn errors.New(\"channel is not found\")\n\t}\n\n\t\/\/ check if message is in the channel\n\tcml := NewChannelMessageList()\n\tres, err := cml.IsInChannel(c.Id, channel.Id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ if message is not in the channel\n\tif !res {\n\t\treturn bongo.RecordNotFound\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package golog\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/chapsuk\/golog\/syslog\"\n)\n\n\/\/ SyslogWriter write logs to syslog\ntype SyslogWriter struct {\n\tWriter io.WriteCloser\n\ttimeout time.Duration\n\tmu sync.Mutex\n\tdone chan struct{}\n}\n\n\/\/ NewSyslogWriter return new SyslogWriter instance with concurrent writer to syslog\nfunc NewSyslogWriter(network, addr, tag string, timeout time.Duration) *SyslogWriter {\n\ts := &SyslogWriter{\n\t\ttimeout: timeout,\n\t\tdone: make(chan struct{}),\n\t}\n\tw, err := syslog.Dial(network, addr, syslog.LOG_USER, tag)\n\tif err != nil {\n\t\ts.Writer = os.Stdout\n\t\tstd.Errorf(\"error connecting to syslog: %s\", err.Error())\n\n\t\tgo func() {\n\t\t\tt := time.NewTicker(s.timeout)\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-t.C:\n\t\t\t\t\tw, err := syslog.Dial(network, addr, syslog.LOG_USER, tag)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tstd.Errorf(\"error connecting to syslog: %s\", err.Error())\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tt.Stop()\n\t\t\t\t\ts.mu.Lock()\n\t\t\t\t\ts.Writer = w\n\t\t\t\t\ts.mu.Unlock()\n\t\t\t\t\tbreak\n\t\t\t\tcase <-s.done:\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\treturn s\n\t}\n\n\ts.Writer = w\n\treturn s\n}\n\n\/\/ Write writes data to syslog\nfunc (w *SyslogWriter) Write(p []byte) (int, error) {\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\tif sl, ok := w.Writer.(*syslog.Writer); ok {\n\t\terr := sl.SetWriteDeadLine(time.Now().Add(w.timeout))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\treturn w.Writer.Write(p)\n}\n\n\/\/ Close close connection\nfunc (w *SyslogWriter) Close() {\n\tw.done <- struct{}{}\n\tw.Writer.Close()\n}\n<commit_msg>Fixed break from syslog connection loop (#3)<commit_after>package golog\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/chapsuk\/golog\/syslog\"\n)\n\n\/\/ SyslogWriter write logs to syslog\ntype SyslogWriter struct {\n\tWriter io.WriteCloser\n\ttimeout time.Duration\n\tmu sync.Mutex\n\tdone chan struct{}\n}\n\n\/\/ NewSyslogWriter return new SyslogWriter instance with concurrent writer to syslog\nfunc NewSyslogWriter(network, addr, tag string, timeout time.Duration) *SyslogWriter {\n\ts := &SyslogWriter{\n\t\ttimeout: timeout,\n\t\tdone: make(chan struct{}),\n\t}\n\tw, err := syslog.Dial(network, addr, syslog.LOG_USER, tag)\n\tif err != nil {\n\t\ts.Writer = os.Stdout\n\t\tstd.Errorf(\"error connecting to syslog: %s\", err.Error())\n\n\t\tgo func() {\n\t\t\tt := time.NewTicker(s.timeout)\n\t\t\tdefer t.Stop()\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-t.C:\n\t\t\t\t\tw, err := syslog.Dial(network, addr, syslog.LOG_USER, tag)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tstd.Errorf(\"error connecting to syslog: %s\", err.Error())\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\ts.mu.Lock()\n\t\t\t\t\ts.Writer = w\n\t\t\t\t\ts.mu.Unlock()\n\t\t\t\t\treturn\n\t\t\t\tcase <-s.done:\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\treturn s\n\t}\n\n\ts.Writer = w\n\treturn s\n}\n\n\/\/ Write writes data to syslog\nfunc (w *SyslogWriter) Write(p []byte) (int, error) {\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\tif sl, ok := w.Writer.(*syslog.Writer); ok {\n\t\terr := sl.SetWriteDeadLine(time.Now().Add(w.timeout))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\treturn w.Writer.Write(p)\n}\n\n\/\/ Close close connection\nfunc (w *SyslogWriter) Close() {\n\tw.done <- struct{}{}\n\tw.Writer.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package keepalivego\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"syscall\"\n\n\t\"github.com\/hnakamur\/ltsvlog\"\n\t\"github.com\/mqliang\/libipvs\"\n)\n\ntype LVS struct {\n\tipvs libipvs.IPVSHandle\n}\n\ntype Config struct {\n\tLogFile string \"yaml:`logfile`\"\n\tEnableDebugLog bool \"yaml:`enable_debug_log`\"\n\tVrrp []ConfigVrrp \"yaml:`vrrp`\"\n\tLvs []ConfigLvs \"yaml:`lvs`\"\n}\n\ntype ConfigVrrp struct {\n\tVrid int \"yaml:`vrid`\"\n\tPriority int \"yaml:`priority`\"\n\tAddress string \"yaml:`address`\"\n}\n\ntype ConfigLvs struct {\n\tName string \"yaml:`name`\"\n\tPort uint16 \"yaml:`port`\"\n\tAddress string \"yaml:`address`\"\n\tSchedule string \"yaml:`schedule`\"\n\tType string \"yaml:`type`\"\n\tServers []ConfigServer \"yaml:`servers`\"\n}\n\ntype ConfigServer struct {\n\tPort uint16 \"yaml:`port`\"\n\tAddress string \"yaml:`address`\"\n\tWeight uint32 \"yaml:`weight`\"\n}\n\nfunc New() (*LVS, error) {\n\tipvs, err := libipvs.New()\n\tif err != nil {\n\t\treturn nil, ltsvlog.WrapErr(err, func(err error) error {\n\t\t\treturn fmt.Errorf(\"failed to create libipvs handler, err=%v\", err)\n\t\t}).Stack(\"\")\n\t}\n\n\treturn &LVS{ipvs: ipvs}, nil\n}\n\nfunc (l *LVS) ReloadConfig(config *Config) error {\n\tipvsServices, err := l.ipvs.ListServices()\n\tif err != nil {\n\t\treturn ltsvlog.WrapErr(err, func(err error) error {\n\t\t\treturn fmt.Errorf(\"failed to list ipvs services, err=%v\", err)\n\t\t}).Stack(\"\")\n\t}\n\n\t\/\/ 不要な設定を削除\n\tfor _, ipvsService := range ipvsServices {\n\t\tvar serviceConf ConfigLvs\n\t\texist := false\n\t\tfor _, serviceConf := range config.Lvs {\n\t\t\tif ipvsService.Address.Equal(net.ParseIP(serviceConf.Address)) {\n\t\t\t\texist = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif exist {\n\t\t\tipvsDests, err := l.ipvs.ListDestinations(ipvsService)\n\t\t\tif err != nil {\n\t\t\t\treturn ltsvlog.WrapErr(err, func(err error) error {\n\t\t\t\t\treturn fmt.Errorf(\"failed to list ipvs destinations, err=%v\", err)\n\t\t\t\t}).Stack(\"\")\n\t\t\t}\n\t\t\tfor _, ipvsDest := range ipvsDests {\n\t\t\t\texist = false\n\t\t\t\tfor _, server := range serviceConf.Servers {\n\t\t\t\t\tif ipvsDest.Address.Equal(net.ParseIP(server.Address)) {\n\t\t\t\t\t\texist = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !exist {\n\t\t\t\t\terr := l.ipvs.DelDestination(ipvsService, ipvsDest)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn ltsvlog.WrapErr(err, func(err error) error {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"faild delete ipvs destination, err=%v\", err)\n\t\t\t\t\t\t}).Stack(\"\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\terr := l.ipvs.DelService(ipvsService)\n\t\t\tif err != nil {\n\t\t\t\treturn ltsvlog.WrapErr(err, func(err error) error {\n\t\t\t\t\treturn fmt.Errorf(\"faild delete ipvs service, err=%s\", err)\n\t\t\t\t}).Stringer(\"serviceAddress\", ipvsService.Address).Stack(\"\")\n\t\t\t}\n\t\t\tltsvlog.Logger.Info().String(\"msg\", \"deleted ipvs service\").Stringer(\"serviceAddress\", ipvsService.Address).Log()\n\t\t}\n\t}\n\n\t\/\/ 設定追加 更新\n\tfor _, serviceConf := range config.Lvs {\n\t\tipAddr := net.ParseIP(serviceConf.Address)\n\t\tvar ipvsService *libipvs.Service\n\t\texist := false\n\t\tfor _, ipvsService = range ipvsServices {\n\t\t\tif ipvsService.Address.Equal(ipAddr) {\n\t\t\t\texist = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tfamily := libipvs.AddressFamily(ipAddressFamily(ipAddr))\n\t\tservice := libipvs.Service{\n\t\t\tAddress: ipAddr,\n\t\t\tAddressFamily: family,\n\t\t\tProtocol: libipvs.Protocol(syscall.IPPROTO_TCP),\n\t\t\tPort: serviceConf.Port,\n\t\t\tSchedName: serviceConf.Schedule,\n\t\t}\n\t\tif !exist {\n\t\t\tif err := l.ipvs.NewService(&service); err != nil {\n\t\t\t\treturn ltsvlog.WrapErr(err, func(err error) error {\n\t\t\t\t\treturn fmt.Errorf(\"faild create ipvs service, err=%s\", err)\n\t\t\t\t}).String(\"address\", serviceConf.Address).\n\t\t\t\t\tUint16(\"port\", serviceConf.Port).\n\t\t\t\t\tString(\"schedule\", serviceConf.Schedule).Stack(\"\")\n\t\t\t}\n\t\t\tipvsService = &service\n\t\t} else {\n\t\t\tif err := l.ipvs.UpdateService(&service); err != nil {\n\t\t\t\treturn ltsvlog.WrapErr(err, func(err error) error {\n\t\t\t\t\treturn fmt.Errorf(\"faild update ipvs service, err=%s\", err)\n\t\t\t\t}).String(\"address\", serviceConf.Address).\n\t\t\t\t\tUint16(\"port\", serviceConf.Port).\n\t\t\t\t\tString(\"schedule\", serviceConf.Schedule).Stack(\"\")\n\t\t\t}\n\t\t}\n\n\t\tipvsDests, err := l.ipvs.ListDestinations(ipvsService)\n\t\tif err != nil {\n\t\t\treturn ltsvlog.WrapErr(err, func(err error) error {\n\t\t\t\treturn fmt.Errorf(\"failed to list ipvs destinations, err=%v\", err)\n\t\t\t}).Stack(\"\")\n\t\t}\n\n\t\tfor _, server := range serviceConf.Servers {\n\t\t\tipAddr := net.ParseIP(server.Address)\n\t\t\texist = false\n\t\t\tfor _, ipvsDest := range ipvsDests {\n\t\t\t\tif ipvsDest.Address.Equal(ipAddr) {\n\t\t\t\t\texist = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tvar fwd libipvs.FwdMethod\n\t\t\tswitch serviceConf.Type {\n\t\t\tcase \"dr\":\n\t\t\t\tfwd = libipvs.IP_VS_CONN_F_DROUTE\n\t\t\tcase \"nat\":\n\t\t\t\tfallthrough\n\t\t\tdefault:\n\t\t\t\tfwd = libipvs.IP_VS_CONN_F_MASQ\n\t\t\t}\n\t\t\tfamily := libipvs.AddressFamily(ipAddressFamily(ipAddr))\n\t\t\tdest := libipvs.Destination{\n\t\t\t\tAddress: ipAddr,\n\t\t\t\tAddressFamily: family,\n\t\t\t\tPort: server.Port,\n\t\t\t\tFwdMethod: fwd,\n\t\t\t\tWeight: server.Weight,\n\t\t\t}\n\t\t\tif exist {\n\t\t\t\terr := l.ipvs.UpdateDestination(ipvsService, &dest)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn ltsvlog.WrapErr(err, func(err error) error {\n\t\t\t\t\t\treturn fmt.Errorf(\"faild create ipvs destination, err=%s\", err)\n\t\t\t\t\t}).String(\"address\", server.Address).\n\t\t\t\t\t\tUint16(\"port\", server.Port).\n\t\t\t\t\t\tString(\"fwdMethod\", serviceConf.Type).\n\t\t\t\t\t\tUint32(\"weight\", server.Weight).Stack(\"\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terr := l.ipvs.NewDestination(ipvsService, &dest)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn ltsvlog.WrapErr(err, func(err error) error {\n\t\t\t\t\t\treturn fmt.Errorf(\"faild create ipvs destination, err=%s\", err)\n\t\t\t\t\t}).String(\"address\", server.Address).\n\t\t\t\t\t\tUint16(\"port\", server.Port).\n\t\t\t\t\t\tString(\"fwdMethod\", serviceConf.Type).\n\t\t\t\t\t\tUint32(\"weight\", server.Weight).Stack(\"\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc ipAddressFamily(ip net.IP) int {\n\tif ip.To4() != nil {\n\t\treturn syscall.AF_INET\n\t} else {\n\t\treturn syscall.AF_INET6\n\t}\n}\n<commit_msg>Change abbreviations to upper case<commit_after>package keepalivego\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"syscall\"\n\n\t\"github.com\/hnakamur\/ltsvlog\"\n\t\"github.com\/mqliang\/libipvs\"\n)\n\ntype LVS struct {\n\tipvs libipvs.IPVSHandle\n}\n\ntype Config struct {\n\tLogFile string \"yaml:`logfile`\"\n\tEnableDebugLog bool \"yaml:`enable_debug_log`\"\n\tVRRP []ConfigVRRP \"yaml:`vrrp`\"\n\tLVS []ConfigLVS \"yaml:`lvs`\"\n}\n\ntype ConfigVRRP struct {\n\tVRID int \"yaml:`vrid`\"\n\tPriority int \"yaml:`priority`\"\n\tAddress string \"yaml:`address`\"\n}\n\ntype ConfigLVS struct {\n\tName string \"yaml:`name`\"\n\tPort uint16 \"yaml:`port`\"\n\tAddress string \"yaml:`address`\"\n\tSchedule string \"yaml:`schedule`\"\n\tType string \"yaml:`type`\"\n\tServers []ConfigServer \"yaml:`servers`\"\n}\n\ntype ConfigServer struct {\n\tPort uint16 \"yaml:`port`\"\n\tAddress string \"yaml:`address`\"\n\tWeight uint32 \"yaml:`weight`\"\n}\n\nfunc New() (*LVS, error) {\n\tipvs, err := libipvs.New()\n\tif err != nil {\n\t\treturn nil, ltsvlog.WrapErr(err, func(err error) error {\n\t\t\treturn fmt.Errorf(\"failed to create libipvs handler, err=%v\", err)\n\t\t}).Stack(\"\")\n\t}\n\n\treturn &LVS{ipvs: ipvs}, nil\n}\n\nfunc (l *LVS) ReloadConfig(config *Config) error {\n\tipvsServices, err := l.ipvs.ListServices()\n\tif err != nil {\n\t\treturn ltsvlog.WrapErr(err, func(err error) error {\n\t\t\treturn fmt.Errorf(\"failed to list ipvs services, err=%v\", err)\n\t\t}).Stack(\"\")\n\t}\n\n\t\/\/ 不要な設定を削除\n\tfor _, ipvsService := range ipvsServices {\n\t\tvar serviceConf ConfigLVS\n\t\texist := false\n\t\tfor _, serviceConf := range config.LVS {\n\t\t\tif ipvsService.Address.Equal(net.ParseIP(serviceConf.Address)) {\n\t\t\t\texist = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif exist {\n\t\t\tipvsDests, err := l.ipvs.ListDestinations(ipvsService)\n\t\t\tif err != nil {\n\t\t\t\treturn ltsvlog.WrapErr(err, func(err error) error {\n\t\t\t\t\treturn fmt.Errorf(\"failed to list ipvs destinations, err=%v\", err)\n\t\t\t\t}).Stack(\"\")\n\t\t\t}\n\t\t\tfor _, ipvsDest := range ipvsDests {\n\t\t\t\texist = false\n\t\t\t\tfor _, server := range serviceConf.Servers {\n\t\t\t\t\tif ipvsDest.Address.Equal(net.ParseIP(server.Address)) {\n\t\t\t\t\t\texist = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !exist {\n\t\t\t\t\terr := l.ipvs.DelDestination(ipvsService, ipvsDest)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn ltsvlog.WrapErr(err, func(err error) error {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"faild delete ipvs destination, err=%v\", err)\n\t\t\t\t\t\t}).Stack(\"\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\terr := l.ipvs.DelService(ipvsService)\n\t\t\tif err != nil {\n\t\t\t\treturn ltsvlog.WrapErr(err, func(err error) error {\n\t\t\t\t\treturn fmt.Errorf(\"faild delete ipvs service, err=%s\", err)\n\t\t\t\t}).Stringer(\"serviceAddress\", ipvsService.Address).Stack(\"\")\n\t\t\t}\n\t\t\tltsvlog.Logger.Info().String(\"msg\", \"deleted ipvs service\").Stringer(\"serviceAddress\", ipvsService.Address).Log()\n\t\t}\n\t}\n\n\t\/\/ 設定追加 更新\n\tfor _, serviceConf := range config.LVS {\n\t\tipAddr := net.ParseIP(serviceConf.Address)\n\t\tvar ipvsService *libipvs.Service\n\t\texist := false\n\t\tfor _, ipvsService = range ipvsServices {\n\t\t\tif ipvsService.Address.Equal(ipAddr) {\n\t\t\t\texist = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tfamily := libipvs.AddressFamily(ipAddressFamily(ipAddr))\n\t\tservice := libipvs.Service{\n\t\t\tAddress: ipAddr,\n\t\t\tAddressFamily: family,\n\t\t\tProtocol: libipvs.Protocol(syscall.IPPROTO_TCP),\n\t\t\tPort: serviceConf.Port,\n\t\t\tSchedName: serviceConf.Schedule,\n\t\t}\n\t\tif !exist {\n\t\t\tif err := l.ipvs.NewService(&service); err != nil {\n\t\t\t\treturn ltsvlog.WrapErr(err, func(err error) error {\n\t\t\t\t\treturn fmt.Errorf(\"faild create ipvs service, err=%s\", err)\n\t\t\t\t}).String(\"address\", serviceConf.Address).\n\t\t\t\t\tUint16(\"port\", serviceConf.Port).\n\t\t\t\t\tString(\"schedule\", serviceConf.Schedule).Stack(\"\")\n\t\t\t}\n\t\t\tipvsService = &service\n\t\t} else {\n\t\t\tif err := l.ipvs.UpdateService(&service); err != nil {\n\t\t\t\treturn ltsvlog.WrapErr(err, func(err error) error {\n\t\t\t\t\treturn fmt.Errorf(\"faild update ipvs service, err=%s\", err)\n\t\t\t\t}).String(\"address\", serviceConf.Address).\n\t\t\t\t\tUint16(\"port\", serviceConf.Port).\n\t\t\t\t\tString(\"schedule\", serviceConf.Schedule).Stack(\"\")\n\t\t\t}\n\t\t}\n\n\t\tipvsDests, err := l.ipvs.ListDestinations(ipvsService)\n\t\tif err != nil {\n\t\t\treturn ltsvlog.WrapErr(err, func(err error) error {\n\t\t\t\treturn fmt.Errorf(\"failed to list ipvs destinations, err=%v\", err)\n\t\t\t}).Stack(\"\")\n\t\t}\n\n\t\tfor _, server := range serviceConf.Servers {\n\t\t\tipAddr := net.ParseIP(server.Address)\n\t\t\texist = false\n\t\t\tfor _, ipvsDest := range ipvsDests {\n\t\t\t\tif ipvsDest.Address.Equal(ipAddr) {\n\t\t\t\t\texist = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tvar fwd libipvs.FwdMethod\n\t\t\tswitch serviceConf.Type {\n\t\t\tcase \"dr\":\n\t\t\t\tfwd = libipvs.IP_VS_CONN_F_DROUTE\n\t\t\tcase \"nat\":\n\t\t\t\tfallthrough\n\t\t\tdefault:\n\t\t\t\tfwd = libipvs.IP_VS_CONN_F_MASQ\n\t\t\t}\n\t\t\tfamily := libipvs.AddressFamily(ipAddressFamily(ipAddr))\n\t\t\tdest := libipvs.Destination{\n\t\t\t\tAddress: ipAddr,\n\t\t\t\tAddressFamily: family,\n\t\t\t\tPort: server.Port,\n\t\t\t\tFwdMethod: fwd,\n\t\t\t\tWeight: server.Weight,\n\t\t\t}\n\t\t\tif exist {\n\t\t\t\terr := l.ipvs.UpdateDestination(ipvsService, &dest)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn ltsvlog.WrapErr(err, func(err error) error {\n\t\t\t\t\t\treturn fmt.Errorf(\"faild create ipvs destination, err=%s\", err)\n\t\t\t\t\t}).String(\"address\", server.Address).\n\t\t\t\t\t\tUint16(\"port\", server.Port).\n\t\t\t\t\t\tString(\"fwdMethod\", serviceConf.Type).\n\t\t\t\t\t\tUint32(\"weight\", server.Weight).Stack(\"\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terr := l.ipvs.NewDestination(ipvsService, &dest)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn ltsvlog.WrapErr(err, func(err error) error {\n\t\t\t\t\t\treturn fmt.Errorf(\"faild create ipvs destination, err=%s\", err)\n\t\t\t\t\t}).String(\"address\", server.Address).\n\t\t\t\t\t\tUint16(\"port\", server.Port).\n\t\t\t\t\t\tString(\"fwdMethod\", serviceConf.Type).\n\t\t\t\t\t\tUint32(\"weight\", server.Weight).Stack(\"\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc ipAddressFamily(ip net.IP) int {\n\tif ip.To4() != nil {\n\t\treturn syscall.AF_INET\n\t} else {\n\t\treturn syscall.AF_INET6\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"code.google.com\/p\/go9p\/p\"\n\t\"code.google.com\/p\/go9p\/p\/srv\"\n\t\"strings\"\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"unicode\/utf8\"\n)\n\ntype M9Player struct {\n\tplaylist []string\n\tposition int\n\tqueue []string\n\n\tplayer *M9Play\n\tsong *string\n\n\tactions chan func()\n}\n\ntype M9Play struct {\n\tproc *os.Process\n\tkilled bool\n}\n\nfunc (player *M9Play) Kill() {\n\tplayer.killed = true\n\tplayer.proc.Signal(os.Interrupt)\n}\n\nvar m9 *M9Player\n\nfunc (m9 *M9Player) spawn(song string) {\n\tpath, err := exec.LookPath(\"m9play\")\n\tif err != nil {\n\t\tfmt.Printf(\"couldn't find m9play: %s\\n\", err)\n\t\treturn\n\t}\n\tproc, err := os.StartProcess(path, []string{\"m9play\", song}, new(os.ProcAttr))\n\tif err != nil {\n\t\tfmt.Printf(\"couldn't spawn player: %s\\n\", err)\n\t\treturn\n\t}\n\tplayer := M9Play{proc, false}\n\tm9.player = &player\n\tm9.song = &song\n\tevents <- \"Play \" + song\n\tgo func() {\n\t\tproc.Wait()\n\t\tif player.killed {\n\t\t\treturn\n\t\t}\n\t\tif len(m9.queue) == 0 && len(m9.playlist) > 0 {\n\t\t\tm9.position = (m9.position + 1) % len(m9.playlist)\n\t\t}\n\t\tm9.song = nil\n\t\tm9.player = nil\n\t\tm9.Play(\"\")\n\t}()\n}\n\nfunc (m9 *M9Player) state() string {\n\tvar s string\n\tif m9.player == nil {\n\t\ts = \"Stop\"\n\t} else {\n\t\ts = \"Play\"\n\t}\n\n\tif m9.song != nil {\n\t\treturn s + \" \" + *m9.song\n\t} else if len(m9.queue) > 0 {\n\t\treturn s + \" \" + m9.queue[0]\n\t} else if len(m9.playlist) > 0 {\n\t\treturn s + \" \" + m9.playlist[m9.position]\n\t}\n\treturn s\n}\n\nfunc (m9 *M9Player) Add(song string) {\n\tm9.playlist = append(m9.playlist, song)\n}\n\nfunc (m9 *M9Player) Clear() {\n\tm9.playlist = make([]string, 0)\n}\n\nfunc (m9 *M9Player) Enqueue(song string) {\n\tm9.queue = append(m9.queue, song)\n}\n\nfunc (m9 *M9Player) Play(song string) {\n\tplayer := m9.player\n\tif player != nil {\n\t\t\/* already playing; stop the current player first *\/\n\t\tm9.player = nil\n\t\tplayer.Kill()\n\t}\n\tif song != \"\" {\n\t\tm9.spawn(song)\n\t} else {\n\t\tif len(m9.queue) > 0 {\n\t\t\tm9.spawn(m9.queue[0])\n\t\t\tm9.queue = m9.queue[1:]\n\t\t} else if len(m9.playlist) > 0 {\n\t\t\tm9.spawn(m9.playlist[m9.position])\n\t\t}\n\t}\n}\n\nfunc (m9 *M9Player) Skip(n int) {\n\tif len(m9.playlist) == 0 {\n\t\treturn\n\t}\n\tm9.position += n\n\tm9.position %= len(m9.playlist)\n\tif m9.position < 0 {\n\t\tm9.position += len(m9.playlist)\n\t}\n\tif m9.player != nil {\n\t\tm9.Play(\"\")\n\t} else {\n\t\tevents <- m9.state()\n\t}\n}\n\nfunc (m9 *M9Player) Stop() {\n\tplayer := m9.player\n\tm9.player = nil\n\tif player != nil {\n\t\tplayer.Kill()\n\t}\n\tevents <- m9.state()\n}\n\nfunc play(song string) {\n\tfmt.Printf(\"play: %s\\n\", song)\n\tm9.Play(song)\n}\n\nfunc skip(amount string) error {\n\tif amount == \"\" {\n\t\tm9.Skip(1)\n\t\treturn nil\n\t}\n\ti, err := strconv.Atoi(amount)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm9.Skip(i)\n\treturn nil\n}\n\nfunc stop() {\n\tfmt.Printf(\"stop:\\n\")\n\tm9.Stop()\n}\n\n\n\n\n\nvar events chan string\nvar register chan chan string\n\nfunc eventer() {\n\tlisteners := make([]chan string, 0)\n\tfor {\n\t\tselect {\n\t\tcase ev := <- events:\n\t\t\tfor i := range(listeners) {\n\t\t\t\tlisteners[i] <- ev\n\t\t\t}\n\t\t\tlisteners = make([]chan string, 0)\n\n\t\tcase l := <- register:\n\t\t\tlisteners = append(listeners, l)\n\t\t}\n\t}\n}\n\nfunc waitForEvent() string {\n\tc := make(chan string)\n\tregister <- c\n\tev := <- c\n\treturn ev\n}\n\nvar net = flag.String(\"net\", \"unix\", \"network type\")\nvar addr = flag.String(\"addr\", \"\/tmp\/ns.sqweek.:0\/m9u\", \"network address\")\n\ntype CtlFile struct {\n\tsrv.File\n}\ntype SongListFile struct {\n\tsrv.File\n\trd map[*srv.Fid] []byte\n\twr map[*srv.Fid] *PartialLine\n\tSongAdded func(string)\n}\ntype ListFile struct {\n\tSongListFile\n}\ntype QueueFile struct {\n\tSongListFile\n}\ntype EventFile struct {\n\tsrv.File\n}\n\nfunc (*CtlFile) Write(fid *srv.FFid, b []byte, offset uint64) (n int, err error) {\n\tcmd := string(b)\n\tif strings.HasPrefix(cmd, \"play\") {\n\t\tplay(strings.Trim(cmd[4:], \" \\n\"))\n\t} else if strings.HasPrefix(cmd, \"skip\") {\n\t\terr = skip(strings.Trim(cmd[4:], \" \\n\"))\n\t} else if strings.HasPrefix(cmd, \"stop\") {\n\t\tstop()\n\t} else {\n\t\terr = m9err(\"\/ctl\", \"write\", \"syntax error\")\n\t}\n\tif err == nil {\n\t\tn = len(b)\n\t}\n\treturn n, err\n}\n\nfunc mkbuf(lst []string) []byte {\n\tbuflen := 0\n\tfor i := range(lst) {\n\t\tbuflen += 1 + len([]byte(lst[i]))\n\t}\n\tbuf := make([]byte, buflen)\n\tj := 0\n\tfor i := range(lst) {\n\t\tj += copy(buf[j:], lst[i])\n\t\tbuf[j] = '\\n'\n\t\tj++\n\t}\n\treturn buf\n}\n\ntype PartialLine struct {\n\tleftover []byte\n}\n\nfunc (part *PartialLine) append(bytes []byte) string {\n\tif part.leftover == nil {\n\t\treturn string(bytes)\n\t}\n\tleft := part.leftover\n\tpart.leftover = nil\n\treturn string(left) + string(bytes)\n}\n\nfunc (lstfile *ListFile) Open(fid *srv.FFid, mode uint8) error {\n\tif mode & 3 == p.OWRITE || mode & 3 == p.ORDWR {\n\t\tlstfile.wr[fid.Fid] = new(PartialLine)\n\t\tif mode & p.OTRUNC != 0 {\n\t\t\tm9.Clear()\n\t\t}\n\t}\n\tif mode & 3 == p.OREAD || mode & 3 == p.ORDWR {\n\t\tlstfile.rd[fid.Fid] = mkbuf(m9.playlist)\n\t}\n\treturn nil\n}\n\ntype M9Error struct {\n\tfile string\n\top string\n\tmsg string\n}\n\nfunc (e *M9Error) Error() string {\n\treturn e.file + \": \" + e.op + \": \" + e.msg\n}\n\nfunc m9err(file string, op string, msg string) *M9Error {\n\terr := new(M9Error)\n\terr.file = file\n\terr.op = op\n\terr.msg = msg\n\treturn err\n}\n\nfunc (slf *SongListFile) Write(fid *srv.FFid, b []byte, offset uint64) (int, error) {\n\tprefix, ok := slf.wr[fid.Fid]\n\tif !ok {\n\t\treturn 0, m9err(fid.F.Name, \"write\", \"bad state\")\n\t}\n\ti := 0\n\tfor {\n\t\tj := bytes.IndexByte(b[i:], '\\n')\n\t\tif j == -1 {\n\t\t\tbreak\n\t\t}\n\t\tsong := prefix.append(b[i:i+j])\n\t\tslf.SongAdded(song)\n\t\t\/\/m9.Add(song)\n\t\ti += j+1\n\t}\n\tif i < len(b) {\n\t\tprefix.leftover = b[i:]\n\t}\n\treturn len(b), nil\n}\n\nfunc min(a uint64, b uint64) uint64 {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc (slf *SongListFile) Read(fid *srv.FFid, b []byte, offset uint64) (int, error) {\n\tbuf, ok := slf.rd[fid.Fid]\n\tif !ok {\n\t\treturn 0, m9err(fid.F.Name, \"read\", \"bad state\")\n\t}\n\tremaining := uint64(len(buf)) - offset\n\tn := min(remaining, uint64(len(b)))\n\tcopy(b, buf[offset:offset + n])\n\treturn int(n), nil\n}\n\nfunc (slf *SongListFile) Clunk(fid *srv.FFid) error {\n\tdelete(slf.rd, fid.Fid)\n\tdelete(slf.wr, fid.Fid)\n\treturn nil\n}\n\n\nfunc (qf *QueueFile) Open(fid *srv.FFid, mode uint8) error {\n\tif mode & 3 == p.OWRITE || mode & 3 == p.ORDWR {\n\t\tqf.wr[fid.Fid] = new(PartialLine)\n\t}\n\tif mode & 3 == p.OREAD || mode & 3 == p.ORDWR {\n\t\tqf.rd[fid.Fid] = mkbuf(m9.queue)\n\t}\n\treturn nil\n}\n\n\nfunc (*EventFile) Read(fid *srv.FFid, b []byte, offset uint64) (int, error) {\n\tvar ev string\n\tif offset == 0 {\n\t\tev = m9.state()\n\t} else {\n\t\tev = waitForEvent()\n\t}\n\tbuf := []byte(ev)\n\tfor len(buf) > len(b) - 1 {\n\t\t_, size := utf8.DecodeLastRune(buf)\n\t\tbuf = buf[:len(buf)-size]\n\t}\n\tcopy(b[:len(buf)], buf)\n\tb[len(buf)] = byte('\\n')\n\treturn len(buf)+1, nil\n}\n\nfunc (slf *SongListFile) init(f func(string)) {\n\tslf.wr = make(map[*srv.Fid]*PartialLine)\n\tslf.rd = make(map[*srv.Fid] []byte)\n\tslf.SongAdded = f\n}\n\nfunc main() {\n\tvar err error\n\tflag.Parse()\n\n\tuid := p.OsUsers.Uid2User(os.Geteuid())\n\tgid := p.OsUsers.Gid2Group(os.Getegid())\n\tfmt.Printf(\"uid = %d gid = %d\\n\", os.Geteuid(), os.Getegid())\n\n\tevents = make(chan string)\n\tregister = make(chan chan string)\n\n\tm9 = new(M9Player)\n\n\tgo eventer()\n\n\troot := new(srv.File)\n\troot.Add(nil, \"\/\", uid, gid, p.DMDIR|0555, nil)\n\tctl := new(CtlFile)\n\tctl.Add(root, \"ctl\", uid, gid, 0644, ctl)\n\tlist := new(ListFile)\n\tlist.init(func(song string) {m9.Add(song)})\n\tlist.Add(root, \"list\", uid, gid, 0644, list)\n\tqueue := new(QueueFile)\n\tqueue.init(func(song string) {m9.Enqueue(song)})\n\tqueue.Add(root, \"queue\", uid, gid, 0644, queue)\n\tevent := new(EventFile)\n\tevent.Add(root, \"event\", uid, gid, 0444, event)\n\n\ts := srv.NewFileSrv(root)\n\ts.Start(s)\n\terr = s.StartNetListener(*net, *addr)\n\tif err != nil {\n\t\tfmt.Printf(\"Error: %s\\n\", err)\n\t}\n\n\treturn\n}\n\n<commit_msg>Fix state when skipping while stopped. Remove debugging.<commit_after>package main\n\nimport (\n\t\"code.google.com\/p\/go9p\/p\"\n\t\"code.google.com\/p\/go9p\/p\/srv\"\n\t\"strings\"\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"unicode\/utf8\"\n)\n\ntype M9Player struct {\n\tplaylist []string\n\tposition int\n\tqueue []string\n\n\tplayer *M9Play\n\tsong *string\n\n\tactions chan func()\n}\n\ntype M9Play struct {\n\tSong string\n\tproc *os.Process\n\tkilled bool\n}\n\nfunc (player *M9Play) Kill() {\n\tplayer.killed = true\n\tplayer.proc.Signal(os.Interrupt)\n}\n\nvar m9 *M9Player\n\nfunc (m9 *M9Player) spawn(song string) {\n\tpath, err := exec.LookPath(\"m9play\")\n\tif err != nil {\n\t\tfmt.Printf(\"couldn't find m9play: %s\\n\", err)\n\t\treturn\n\t}\n\tproc, err := os.StartProcess(path, []string{\"m9play\", song}, new(os.ProcAttr))\n\tif err != nil {\n\t\tfmt.Printf(\"couldn't spawn player: %s\\n\", err)\n\t\treturn\n\t}\n\tplayer := M9Play{song, proc, false}\n\tm9.player = &player\n\tevents <- \"Play \" + song\n\tgo func() {\n\t\tproc.Wait()\n\t\tif player.killed {\n\t\t\treturn\n\t\t}\n\t\tif len(m9.queue) == 0 && len(m9.playlist) > 0 {\n\t\t\tm9.position = (m9.position + 1) % len(m9.playlist)\n\t\t}\n\t\tm9.player = nil\n\t\tm9.Play(\"\")\n\t}()\n}\n\nfunc (m9 *M9Player) state() string {\n\tplayer := m9.player\n\tif player != nil {\n\t\treturn \"Play \" + player.Song\n\t} else {\n\t\tif len(m9.queue) > 0 {\n\t\t\treturn \"Stop \" + m9.queue[0]\n\t\t} else if len(m9.playlist) > 0 {\n\t\t\treturn \"Stop \" + m9.playlist[m9.position]\n\t\t}\n\t\treturn \"Stop\"\n\t}\n}\n\nfunc (m9 *M9Player) Add(song string) {\n\tm9.playlist = append(m9.playlist, song)\n}\n\nfunc (m9 *M9Player) Clear() {\n\tm9.playlist = make([]string, 0)\n}\n\nfunc (m9 *M9Player) Enqueue(song string) {\n\tm9.queue = append(m9.queue, song)\n}\n\nfunc (m9 *M9Player) Play(song string) {\n\tplayer := m9.player\n\tif player != nil {\n\t\t\/* already playing; stop the current player first *\/\n\t\tm9.player = nil\n\t\tplayer.Kill()\n\t}\n\tif song != \"\" {\n\t\tm9.spawn(song)\n\t} else {\n\t\tif len(m9.queue) > 0 {\n\t\t\tm9.spawn(m9.queue[0])\n\t\t\tm9.queue = m9.queue[1:]\n\t\t} else if len(m9.playlist) > 0 {\n\t\t\tm9.spawn(m9.playlist[m9.position])\n\t\t}\n\t}\n}\n\nfunc (m9 *M9Player) Skip(n int) {\n\tif len(m9.playlist) == 0 {\n\t\treturn\n\t}\n\tm9.position += n\n\tm9.position %= len(m9.playlist)\n\tif m9.position < 0 {\n\t\tm9.position += len(m9.playlist)\n\t}\n\tif m9.player != nil {\n\t\tm9.Play(\"\")\n\t} else {\n\t\tevents <- m9.state()\n\t}\n}\n\nfunc (m9 *M9Player) Stop() {\n\tplayer := m9.player\n\tm9.player = nil\n\tif player != nil {\n\t\tplayer.Kill()\n\t}\n\tevents <- m9.state()\n}\n\nfunc play(song string) {\n\tm9.Play(song)\n}\n\nfunc skip(amount string) error {\n\tif amount == \"\" {\n\t\tm9.Skip(1)\n\t\treturn nil\n\t}\n\ti, err := strconv.Atoi(amount)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm9.Skip(i)\n\treturn nil\n}\n\nfunc stop() {\n\tm9.Stop()\n}\n\n\n\n\n\nvar events chan string\nvar register chan chan string\n\nfunc eventer() {\n\tlisteners := make([]chan string, 0)\n\tfor {\n\t\tselect {\n\t\tcase ev := <- events:\n\t\t\tfor i := range(listeners) {\n\t\t\t\tlisteners[i] <- ev\n\t\t\t}\n\t\t\tlisteners = make([]chan string, 0)\n\n\t\tcase l := <- register:\n\t\t\tlisteners = append(listeners, l)\n\t\t}\n\t}\n}\n\nfunc waitForEvent() string {\n\tc := make(chan string)\n\tregister <- c\n\tev := <- c\n\treturn ev\n}\n\nvar net = flag.String(\"net\", \"unix\", \"network type\")\nvar addr = flag.String(\"addr\", \"\/tmp\/ns.sqweek.:0\/m9u\", \"network address\")\n\ntype CtlFile struct {\n\tsrv.File\n}\ntype SongListFile struct {\n\tsrv.File\n\trd map[*srv.Fid] []byte\n\twr map[*srv.Fid] *PartialLine\n\tSongAdded func(string)\n}\ntype ListFile struct {\n\tSongListFile\n}\ntype QueueFile struct {\n\tSongListFile\n}\ntype EventFile struct {\n\tsrv.File\n}\n\nfunc (*CtlFile) Write(fid *srv.FFid, b []byte, offset uint64) (n int, err error) {\n\tcmd := string(b)\n\tif strings.HasPrefix(cmd, \"play\") {\n\t\tplay(strings.Trim(cmd[4:], \" \\n\"))\n\t} else if strings.HasPrefix(cmd, \"skip\") {\n\t\terr = skip(strings.Trim(cmd[4:], \" \\n\"))\n\t} else if strings.HasPrefix(cmd, \"stop\") {\n\t\tstop()\n\t} else {\n\t\terr = m9err(\"\/ctl\", \"write\", \"syntax error\")\n\t}\n\tif err == nil {\n\t\tn = len(b)\n\t}\n\treturn n, err\n}\n\nfunc mkbuf(lst []string) []byte {\n\tbuflen := 0\n\tfor i := range(lst) {\n\t\tbuflen += 1 + len([]byte(lst[i]))\n\t}\n\tbuf := make([]byte, buflen)\n\tj := 0\n\tfor i := range(lst) {\n\t\tj += copy(buf[j:], lst[i])\n\t\tbuf[j] = '\\n'\n\t\tj++\n\t}\n\treturn buf\n}\n\ntype PartialLine struct {\n\tleftover []byte\n}\n\nfunc (part *PartialLine) append(bytes []byte) string {\n\tif part.leftover == nil {\n\t\treturn string(bytes)\n\t}\n\tleft := part.leftover\n\tpart.leftover = nil\n\treturn string(left) + string(bytes)\n}\n\nfunc (lstfile *ListFile) Open(fid *srv.FFid, mode uint8) error {\n\tif mode & 3 == p.OWRITE || mode & 3 == p.ORDWR {\n\t\tlstfile.wr[fid.Fid] = new(PartialLine)\n\t\tif mode & p.OTRUNC != 0 {\n\t\t\tm9.Clear()\n\t\t}\n\t}\n\tif mode & 3 == p.OREAD || mode & 3 == p.ORDWR {\n\t\tlstfile.rd[fid.Fid] = mkbuf(m9.playlist)\n\t}\n\treturn nil\n}\n\ntype M9Error struct {\n\tfile string\n\top string\n\tmsg string\n}\n\nfunc (e *M9Error) Error() string {\n\treturn e.file + \": \" + e.op + \": \" + e.msg\n}\n\nfunc m9err(file string, op string, msg string) *M9Error {\n\terr := new(M9Error)\n\terr.file = file\n\terr.op = op\n\terr.msg = msg\n\treturn err\n}\n\nfunc (slf *SongListFile) Write(fid *srv.FFid, b []byte, offset uint64) (int, error) {\n\tprefix, ok := slf.wr[fid.Fid]\n\tif !ok {\n\t\treturn 0, m9err(fid.F.Name, \"write\", \"bad state\")\n\t}\n\ti := 0\n\tfor {\n\t\tj := bytes.IndexByte(b[i:], '\\n')\n\t\tif j == -1 {\n\t\t\tbreak\n\t\t}\n\t\tsong := prefix.append(b[i:i+j])\n\t\tslf.SongAdded(song)\n\t\t\/\/m9.Add(song)\n\t\ti += j+1\n\t}\n\tif i < len(b) {\n\t\tprefix.leftover = b[i:]\n\t}\n\treturn len(b), nil\n}\n\nfunc min(a uint64, b uint64) uint64 {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc (slf *SongListFile) Read(fid *srv.FFid, b []byte, offset uint64) (int, error) {\n\tbuf, ok := slf.rd[fid.Fid]\n\tif !ok {\n\t\treturn 0, m9err(fid.F.Name, \"read\", \"bad state\")\n\t}\n\tremaining := uint64(len(buf)) - offset\n\tn := min(remaining, uint64(len(b)))\n\tcopy(b, buf[offset:offset + n])\n\treturn int(n), nil\n}\n\nfunc (slf *SongListFile) Clunk(fid *srv.FFid) error {\n\tdelete(slf.rd, fid.Fid)\n\tdelete(slf.wr, fid.Fid)\n\treturn nil\n}\n\n\nfunc (qf *QueueFile) Open(fid *srv.FFid, mode uint8) error {\n\tif mode & 3 == p.OWRITE || mode & 3 == p.ORDWR {\n\t\tqf.wr[fid.Fid] = new(PartialLine)\n\t}\n\tif mode & 3 == p.OREAD || mode & 3 == p.ORDWR {\n\t\tqf.rd[fid.Fid] = mkbuf(m9.queue)\n\t}\n\treturn nil\n}\n\n\nfunc (*EventFile) Read(fid *srv.FFid, b []byte, offset uint64) (int, error) {\n\tvar ev string\n\tif offset == 0 {\n\t\tev = m9.state()\n\t} else {\n\t\tev = waitForEvent()\n\t}\n\tbuf := []byte(ev)\n\tfor len(buf) > len(b) - 1 {\n\t\t_, size := utf8.DecodeLastRune(buf)\n\t\tbuf = buf[:len(buf)-size]\n\t}\n\tcopy(b[:len(buf)], buf)\n\tb[len(buf)] = byte('\\n')\n\treturn len(buf)+1, nil\n}\n\nfunc (slf *SongListFile) init(f func(string)) {\n\tslf.wr = make(map[*srv.Fid]*PartialLine)\n\tslf.rd = make(map[*srv.Fid] []byte)\n\tslf.SongAdded = f\n}\n\nfunc main() {\n\tvar err error\n\tflag.Parse()\n\n\tuid := p.OsUsers.Uid2User(os.Geteuid())\n\tgid := p.OsUsers.Gid2Group(os.Getegid())\n\tfmt.Printf(\"uid = %d gid = %d\\n\", os.Geteuid(), os.Getegid())\n\n\tevents = make(chan string)\n\tregister = make(chan chan string)\n\n\tm9 = new(M9Player)\n\n\tgo eventer()\n\n\troot := new(srv.File)\n\troot.Add(nil, \"\/\", uid, gid, p.DMDIR|0555, nil)\n\tctl := new(CtlFile)\n\tctl.Add(root, \"ctl\", uid, gid, 0644, ctl)\n\tlist := new(ListFile)\n\tlist.init(func(song string) {m9.Add(song)})\n\tlist.Add(root, \"list\", uid, gid, 0644, list)\n\tqueue := new(QueueFile)\n\tqueue.init(func(song string) {m9.Enqueue(song)})\n\tqueue.Add(root, \"queue\", uid, gid, 0644, queue)\n\tevent := new(EventFile)\n\tevent.Add(root, \"event\", uid, gid, 0444, event)\n\n\ts := srv.NewFileSrv(root)\n\ts.Start(s)\n\terr = s.StartNetListener(*net, *addr)\n\tif err != nil {\n\t\tfmt.Printf(\"Error: %s\\n\", err)\n\t}\n\n\treturn\n}\n\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage maas\n\nimport (\n\t\"github.com\/juju\/gomaasapi\"\n\tjc \"github.com\/juju\/testing\/checkers\"\n\tgc \"gopkg.in\/check.v1\"\n\n\t\"github.com\/juju\/juju\/environs\/config\"\n\t\"github.com\/juju\/juju\/feature\"\n\tcoretesting \"github.com\/juju\/juju\/testing\"\n)\n\ntype maas2Suite struct {\n\tbaseProviderSuite\n}\n\nfunc (suite *maas2Suite) SetUpTest(c *gc.C) {\n\tsuite.baseProviderSuite.SetUpTest(c)\n\tsuite.SetFeatureFlags(feature.MAAS2)\n}\n\nfunc (suite *maas2Suite) injectController(controller gomaasapi.Controller) {\n\tmockGetController := func(maasServer, apiKey string) (gomaasapi.Controller, error) {\n\t\treturn controller, nil\n\t}\n\tsuite.PatchValue(&GetMAAS2Controller, mockGetController)\n}\n\nfunc (suite *maas2Suite) makeEnviron(c *gc.C, controller gomaasapi.Controller) *maasEnviron {\n\tif controller != nil {\n\t\tsuite.injectController(controller)\n\t}\n\ttestAttrs := coretesting.Attrs{}\n\tfor k, v := range maasEnvAttrs {\n\t\ttestAttrs[k] = v\n\t}\n\ttestAttrs[\"maas-server\"] = \"http:\/\/any-old-junk.invalid\/\"\n\tattrs := coretesting.FakeConfig().Merge(testAttrs)\n\tcfg, err := config.New(config.NoDefaults, attrs)\n\tc.Assert(err, jc.ErrorIsNil)\n\tenv, err := NewEnviron(cfg)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(env, gc.NotNil)\n\treturn env\n}\n\ntype fakeController struct {\n\tgomaasapi.Controller\n\tbootResources []gomaasapi.BootResource\n\tbootResourcesError error\n\tmachines []gomaasapi.Machine\n\tmachinesError error\n\tmachinesArgsCheck func(gomaasapi.MachinesArgs)\n\tzones []gomaasapi.Zone\n\tzonesError error\n\tspaces []gomaasapi.Space\n\tspacesError error\n\tfiles []gomaasapi.File\n\tfilesPrefix string\n\tfilesError error\n\tgetFileFilename string\n\taddFileArgs gomaasapi.AddFileArgs\n\treleaseMachinesErrors []error\n\treleaseMachinesArgs []gomaasapi.ReleaseMachinesArgs\n}\n\nfunc (c *fakeController) Machines(args gomaasapi.MachinesArgs) ([]gomaasapi.Machine, error) {\n\tif c.machinesArgsCheck != nil {\n\t\tc.machinesArgsCheck(args)\n\t}\n\tif c.machinesError != nil {\n\t\treturn nil, c.machinesError\n\t}\n\treturn c.machines, nil\n}\n\nfunc (c *fakeController) BootResources() ([]gomaasapi.BootResource, error) {\n\tif c.bootResourcesError != nil {\n\t\treturn nil, c.bootResourcesError\n\t}\n\treturn c.bootResources, nil\n}\n\nfunc (c *fakeController) Zones() ([]gomaasapi.Zone, error) {\n\tif c.zonesError != nil {\n\t\treturn nil, c.zonesError\n\t}\n\treturn c.zones, nil\n}\n\nfunc (c *fakeController) Spaces() ([]gomaasapi.Space, error) {\n\tif c.spacesError != nil {\n\t\treturn nil, c.spacesError\n\t}\n\treturn c.spaces, nil\n}\n\nfunc (c *fakeController) Files(prefix string) ([]gomaasapi.File, error) {\n\tc.filesPrefix = prefix\n\tif c.filesError != nil {\n\t\treturn nil, c.filesError\n\t}\n\treturn c.files, nil\n}\n\nfunc (c *fakeController) GetFile(filename string) (gomaasapi.File, error) {\n\tc.getFileFilename = filename\n\tif c.filesError != nil {\n\t\treturn nil, c.filesError\n\t}\n\t\/\/ Try to find the file by name (needed for testing RemoveAll)\n\tfor _, file := range c.files {\n\t\tif file.Filename() == filename {\n\t\t\treturn file, nil\n\t\t}\n\t}\n\t\/\/ Otherwise just use the first one.\n\treturn c.files[0], nil\n}\n\nfunc (c *fakeController) AddFile(args gomaasapi.AddFileArgs) error {\n\tc.addFileArgs = args\n\treturn c.filesError\n}\n\nfunc (c *fakeController) ReleaseMachines(args gomaasapi.ReleaseMachinesArgs) error {\n\tif c.releaseMachinesErrors == nil {\n\t\treturn nil\n\t}\n\tc.releaseMachinesArgs = append(c.releaseMachinesArgs, args)\n\terr := c.releaseMachinesErrors[0]\n\tc.releaseMachinesErrors = c.releaseMachinesErrors[1:]\n\treturn err\n}\n\ntype fakeBootResource struct {\n\tgomaasapi.BootResource\n\tname string\n\tarchitecture string\n}\n\nfunc (r *fakeBootResource) Name() string {\n\treturn r.name\n}\n\nfunc (r *fakeBootResource) Architecture() string {\n\treturn r.architecture\n}\n\ntype fakeMachine struct {\n\tgomaasapi.Machine\n\tzoneName string\n\thostname string\n\tsystemID string\n\tipAddresses []string\n\tstatusName string\n\tstatusMessage string\n\tcpuCount int\n\tmemory int\n\tarchitecture string\n}\n\nfunc (m *fakeMachine) CPUCount() int {\n\treturn m.cpuCount\n}\n\nfunc (m *fakeMachine) Memory() int {\n\treturn m.memory\n}\n\nfunc (m *fakeMachine) Architecture() string {\n\treturn m.architecture\n}\n\nfunc (m *fakeMachine) SystemID() string {\n\treturn m.systemID\n}\n\nfunc (m *fakeMachine) Hostname() string {\n\treturn m.hostname\n}\n\nfunc (m *fakeMachine) IPAddresses() []string {\n\treturn m.ipAddresses\n}\n\nfunc (m *fakeMachine) StatusName() string {\n\treturn m.statusName\n}\n\nfunc (m *fakeMachine) StatusMessage() string {\n\treturn m.statusMessage\n}\n\nfunc (m *fakeMachine) Zone() gomaasapi.Zone {\n\treturn fakeZone{name: m.zoneName}\n}\n\ntype fakeZone struct {\n\tgomaasapi.Zone\n\tname string\n}\n\nfunc (z fakeZone) Name() string {\n\treturn z.name\n}\n\ntype fakeSpace struct {\n\tgomaasapi.Space\n\tname string\n\tid int\n\tsubnets []gomaasapi.Subnet\n}\n\nfunc (s fakeSpace) Name() string {\n\treturn s.name\n}\n\nfunc (s fakeSpace) ID() int {\n\treturn s.id\n}\n\nfunc (s fakeSpace) Subnets() []gomaasapi.Subnet {\n\treturn s.subnets\n}\n\ntype fakeSubnet struct {\n\tgomaasapi.Subnet\n\tid int\n\tvlanVid int\n\tcidr string\n}\n\nfunc (s fakeSubnet) ID() int {\n\treturn s.id\n}\n\nfunc (s fakeSubnet) CIDR() string {\n\treturn s.cidr\n}\n\nfunc (s fakeSubnet) VLAN() gomaasapi.VLAN {\n\treturn fakeVLAN{vid: s.vlanVid}\n}\n\ntype fakeVLAN struct {\n\tgomaasapi.VLAN\n\tvid int\n}\n\nfunc (v fakeVLAN) VID() int {\n\treturn v.vid\n}\n\ntype fakeFile struct {\n\tgomaasapi.File\n\tname string\n\turl string\n\tcontents []byte\n\tdeleted bool\n\terror error\n}\n\nfunc (f *fakeFile) Filename() string {\n\treturn f.name\n}\n\nfunc (f *fakeFile) AnonymousURL() string {\n\treturn f.url\n}\n\nfunc (f *fakeFile) Delete() error {\n\tf.deleted = true\n\treturn f.error\n}\n\nfunc (f *fakeFile) ReadAll() ([]byte, error) {\n\tif f.error != nil {\n\t\treturn nil, f.error\n\t}\n\treturn f.contents, nil\n}\n<commit_msg>Always record args, even if returning errors<commit_after>\/\/ Copyright 2016 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage maas\n\nimport (\n\t\"github.com\/juju\/gomaasapi\"\n\tjc \"github.com\/juju\/testing\/checkers\"\n\tgc \"gopkg.in\/check.v1\"\n\n\t\"github.com\/juju\/juju\/environs\/config\"\n\t\"github.com\/juju\/juju\/feature\"\n\tcoretesting \"github.com\/juju\/juju\/testing\"\n)\n\ntype maas2Suite struct {\n\tbaseProviderSuite\n}\n\nfunc (suite *maas2Suite) SetUpTest(c *gc.C) {\n\tsuite.baseProviderSuite.SetUpTest(c)\n\tsuite.SetFeatureFlags(feature.MAAS2)\n}\n\nfunc (suite *maas2Suite) injectController(controller gomaasapi.Controller) {\n\tmockGetController := func(maasServer, apiKey string) (gomaasapi.Controller, error) {\n\t\treturn controller, nil\n\t}\n\tsuite.PatchValue(&GetMAAS2Controller, mockGetController)\n}\n\nfunc (suite *maas2Suite) makeEnviron(c *gc.C, controller gomaasapi.Controller) *maasEnviron {\n\tif controller != nil {\n\t\tsuite.injectController(controller)\n\t}\n\ttestAttrs := coretesting.Attrs{}\n\tfor k, v := range maasEnvAttrs {\n\t\ttestAttrs[k] = v\n\t}\n\ttestAttrs[\"maas-server\"] = \"http:\/\/any-old-junk.invalid\/\"\n\tattrs := coretesting.FakeConfig().Merge(testAttrs)\n\tcfg, err := config.New(config.NoDefaults, attrs)\n\tc.Assert(err, jc.ErrorIsNil)\n\tenv, err := NewEnviron(cfg)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(env, gc.NotNil)\n\treturn env\n}\n\ntype fakeController struct {\n\tgomaasapi.Controller\n\tbootResources []gomaasapi.BootResource\n\tbootResourcesError error\n\tmachines []gomaasapi.Machine\n\tmachinesError error\n\tmachinesArgsCheck func(gomaasapi.MachinesArgs)\n\tzones []gomaasapi.Zone\n\tzonesError error\n\tspaces []gomaasapi.Space\n\tspacesError error\n\tfiles []gomaasapi.File\n\tfilesPrefix string\n\tfilesError error\n\tgetFileFilename string\n\taddFileArgs gomaasapi.AddFileArgs\n\treleaseMachinesErrors []error\n\treleaseMachinesArgs []gomaasapi.ReleaseMachinesArgs\n}\n\nfunc (c *fakeController) Machines(args gomaasapi.MachinesArgs) ([]gomaasapi.Machine, error) {\n\tif c.machinesArgsCheck != nil {\n\t\tc.machinesArgsCheck(args)\n\t}\n\tif c.machinesError != nil {\n\t\treturn nil, c.machinesError\n\t}\n\treturn c.machines, nil\n}\n\nfunc (c *fakeController) BootResources() ([]gomaasapi.BootResource, error) {\n\tif c.bootResourcesError != nil {\n\t\treturn nil, c.bootResourcesError\n\t}\n\treturn c.bootResources, nil\n}\n\nfunc (c *fakeController) Zones() ([]gomaasapi.Zone, error) {\n\tif c.zonesError != nil {\n\t\treturn nil, c.zonesError\n\t}\n\treturn c.zones, nil\n}\n\nfunc (c *fakeController) Spaces() ([]gomaasapi.Space, error) {\n\tif c.spacesError != nil {\n\t\treturn nil, c.spacesError\n\t}\n\treturn c.spaces, nil\n}\n\nfunc (c *fakeController) Files(prefix string) ([]gomaasapi.File, error) {\n\tc.filesPrefix = prefix\n\tif c.filesError != nil {\n\t\treturn nil, c.filesError\n\t}\n\treturn c.files, nil\n}\n\nfunc (c *fakeController) GetFile(filename string) (gomaasapi.File, error) {\n\tc.getFileFilename = filename\n\tif c.filesError != nil {\n\t\treturn nil, c.filesError\n\t}\n\t\/\/ Try to find the file by name (needed for testing RemoveAll)\n\tfor _, file := range c.files {\n\t\tif file.Filename() == filename {\n\t\t\treturn file, nil\n\t\t}\n\t}\n\t\/\/ Otherwise just use the first one.\n\treturn c.files[0], nil\n}\n\nfunc (c *fakeController) AddFile(args gomaasapi.AddFileArgs) error {\n\tc.addFileArgs = args\n\treturn c.filesError\n}\n\nfunc (c *fakeController) ReleaseMachines(args gomaasapi.ReleaseMachinesArgs) error {\n\tc.releaseMachinesArgs = append(c.releaseMachinesArgs, args)\n\tif c.releaseMachinesErrors == nil {\n\t\treturn nil\n\t}\n\terr := c.releaseMachinesErrors[0]\n\tc.releaseMachinesErrors = c.releaseMachinesErrors[1:]\n\treturn err\n}\n\ntype fakeBootResource struct {\n\tgomaasapi.BootResource\n\tname string\n\tarchitecture string\n}\n\nfunc (r *fakeBootResource) Name() string {\n\treturn r.name\n}\n\nfunc (r *fakeBootResource) Architecture() string {\n\treturn r.architecture\n}\n\ntype fakeMachine struct {\n\tgomaasapi.Machine\n\tzoneName string\n\thostname string\n\tsystemID string\n\tipAddresses []string\n\tstatusName string\n\tstatusMessage string\n\tcpuCount int\n\tmemory int\n\tarchitecture string\n}\n\nfunc (m *fakeMachine) CPUCount() int {\n\treturn m.cpuCount\n}\n\nfunc (m *fakeMachine) Memory() int {\n\treturn m.memory\n}\n\nfunc (m *fakeMachine) Architecture() string {\n\treturn m.architecture\n}\n\nfunc (m *fakeMachine) SystemID() string {\n\treturn m.systemID\n}\n\nfunc (m *fakeMachine) Hostname() string {\n\treturn m.hostname\n}\n\nfunc (m *fakeMachine) IPAddresses() []string {\n\treturn m.ipAddresses\n}\n\nfunc (m *fakeMachine) StatusName() string {\n\treturn m.statusName\n}\n\nfunc (m *fakeMachine) StatusMessage() string {\n\treturn m.statusMessage\n}\n\nfunc (m *fakeMachine) Zone() gomaasapi.Zone {\n\treturn fakeZone{name: m.zoneName}\n}\n\ntype fakeZone struct {\n\tgomaasapi.Zone\n\tname string\n}\n\nfunc (z fakeZone) Name() string {\n\treturn z.name\n}\n\ntype fakeSpace struct {\n\tgomaasapi.Space\n\tname string\n\tid int\n\tsubnets []gomaasapi.Subnet\n}\n\nfunc (s fakeSpace) Name() string {\n\treturn s.name\n}\n\nfunc (s fakeSpace) ID() int {\n\treturn s.id\n}\n\nfunc (s fakeSpace) Subnets() []gomaasapi.Subnet {\n\treturn s.subnets\n}\n\ntype fakeSubnet struct {\n\tgomaasapi.Subnet\n\tid int\n\tvlanVid int\n\tcidr string\n}\n\nfunc (s fakeSubnet) ID() int {\n\treturn s.id\n}\n\nfunc (s fakeSubnet) CIDR() string {\n\treturn s.cidr\n}\n\nfunc (s fakeSubnet) VLAN() gomaasapi.VLAN {\n\treturn fakeVLAN{vid: s.vlanVid}\n}\n\ntype fakeVLAN struct {\n\tgomaasapi.VLAN\n\tvid int\n}\n\nfunc (v fakeVLAN) VID() int {\n\treturn v.vid\n}\n\ntype fakeFile struct {\n\tgomaasapi.File\n\tname string\n\turl string\n\tcontents []byte\n\tdeleted bool\n\terror error\n}\n\nfunc (f *fakeFile) Filename() string {\n\treturn f.name\n}\n\nfunc (f *fakeFile) AnonymousURL() string {\n\treturn f.url\n}\n\nfunc (f *fakeFile) Delete() error {\n\tf.deleted = true\n\treturn f.error\n}\n\nfunc (f *fakeFile) ReadAll() ([]byte, error) {\n\tif f.error != nil {\n\t\treturn nil, f.error\n\t}\n\treturn f.contents, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Finds tsuru applications which deploys count % 20 == 0 |||| this is wrong! if count is 30 % 20 will be 10 and the app still needs a flatten!\n\/\/ and flatten their filesystems in order to avoid aufs performance bottlenecks.\npackage docker\n\nimport (\n\t\"bytes\"\n\t\"github.com\/dotcloud\/docker\"\n\tdcli \"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/globocom\/tsuru\/log\"\n\t\"github.com\/globocom\/tsuru\/provision\"\n)\n\nfunc needsFlatten(a provision.App) bool {\n\tdeploys := a.GetDeploys()\n\tif deploys != 0 && deploys%20 == 0 {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc flatten(imageID string) error {\n\tconfig := docker.Config{\n\t\tImage: imageID,\n\t\tCmd: []string{\"\/bin\/bash\"},\n\t\tAttachStdin: false,\n\t\tAttachStdout: false,\n\t\tAttachStderr: false,\n\t}\n\t_, c, err := dockerCluster().CreateContainer(&config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbuf := &bytes.Buffer{}\n\tif err := dockerCluster().ExportContainer(c.ID, buf); err != nil {\n\t\tlog.Printf(\"Flatten: Caugh error while exporting container %s: %s\", c.ID, err.Error())\n\t\treturn err\n\t}\n\topts := dcli.ImportImageOptions{Repository: imageID, Source: \"-\"}\n\tif err := dockerCluster().ImportImage(opts, buf); err != nil {\n\t\tlog.Printf(\"Flatten: Caugh error while importing image from container %s: %s\", c.ID, err.Error())\n\t\treturn err\n\t}\n\tif err := dockerCluster().RemoveContainer(c.ID); err != nil {\n\t\tlog.Printf(\"Flatten: Caugh error while removing container %s: %s\", c.ID, err.Error())\n\t}\n\tremoveFromRegistry(imageID)\n\treturn nil\n}\n\n\/\/ Flatten finds the images that need to be flattened and export\/import\n\/\/ them in order to flatten them and logs errors when they happen.\nfunc Flatten(a provision.App) {\n\tif needsFlatten(a) {\n\t\timage := getImage(a)\n log.Printf(\"Flatten: attempting to flatten image %s.\", image)\n\t\tif err := flatten(image); err != nil {\n\t\t\tlog.Printf(\"Flatten: Caugh error while flattening image %s: %s\", image, err.Error())\n\t\t}\n log.Printf(\"Flatten: successfully flattened image %s.\", image)\n\t}\n}\n<commit_msg>provision\/docker\/flatten: formatting<commit_after>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Finds tsuru applications which deploys count % 20 == 0 |||| this is wrong! if count is 30 % 20 will be 10 and the app still needs a flatten!\n\/\/ and flatten their filesystems in order to avoid aufs performance bottlenecks.\npackage docker\n\nimport (\n\t\"bytes\"\n\t\"github.com\/dotcloud\/docker\"\n\tdcli \"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/globocom\/tsuru\/log\"\n\t\"github.com\/globocom\/tsuru\/provision\"\n)\n\nfunc needsFlatten(a provision.App) bool {\n\tdeploys := a.GetDeploys()\n\tif deploys != 0 && deploys%20 == 0 {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc flatten(imageID string) error {\n\tconfig := docker.Config{\n\t\tImage: imageID,\n\t\tCmd: []string{\"\/bin\/bash\"},\n\t\tAttachStdin: false,\n\t\tAttachStdout: false,\n\t\tAttachStderr: false,\n\t}\n\t_, c, err := dockerCluster().CreateContainer(&config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbuf := &bytes.Buffer{}\n\tif err := dockerCluster().ExportContainer(c.ID, buf); err != nil {\n\t\tlog.Printf(\"Flatten: Caugh error while exporting container %s: %s\", c.ID, err.Error())\n\t\treturn err\n\t}\n\topts := dcli.ImportImageOptions{Repository: imageID, Source: \"-\"}\n\tif err := dockerCluster().ImportImage(opts, buf); err != nil {\n\t\tlog.Printf(\"Flatten: Caugh error while importing image from container %s: %s\", c.ID, err.Error())\n\t\treturn err\n\t}\n\tif err := dockerCluster().RemoveContainer(c.ID); err != nil {\n\t\tlog.Printf(\"Flatten: Caugh error while removing container %s: %s\", c.ID, err.Error())\n\t}\n\tremoveFromRegistry(imageID)\n\treturn nil\n}\n\n\/\/ Flatten finds the images that need to be flattened and export\/import\n\/\/ them in order to flatten them and logs errors when they happen.\nfunc Flatten(a provision.App) {\n\tif needsFlatten(a) {\n\t\timage := getImage(a)\n\t\tlog.Printf(\"Flatten: attempting to flatten image %s.\", image)\n\t\tif err := flatten(image); err != nil {\n\t\t\tlog.Printf(\"Flatten: Caugh error while flattening image %s: %s\", image, err.Error())\n\t\t}\n\t\tlog.Printf(\"Flatten: successfully flattened image %s.\", image)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage provision\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestRegisterAndGetProvisioner(t *testing.T) {\n\tvar p Provisioner\n\tRegister(\"my-provisioner\", p)\n\tgot, err := Get(\"my-provisioner\")\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error when getting provisioner: %q\", err)\n\t}\n\tif !reflect.DeepEqual(p, got) {\n\t\tt.Errorf(\"Get: Want %#v. Got %#v.\", p, got)\n\t}\n\t_, err = Get(\"unknown-provisioner\")\n\tif err == nil {\n\t\tt.Errorf(\"Expected non-nil error when getting unknown provisioner, got <nil>.\")\n\t}\n\texpectedMessage := `Unknown provisioner: \"unknown-provisioner\".`\n\tif err.Error() != expectedMessage {\n\t\tt.Errorf(\"Expected error %q. Got %q.\", expectedMessage, err.Error())\n\t}\n}\n\nfunc TestRegistry(t *testing.T) {\n\tvar p1, p2 Provisioner\n\tRegister(\"my-provisioner\", p1)\n\tRegister(\"your-provisioner\", p2)\n\tprovisioners := Registry()\n\talt1 := []Provisioner{p1, p2}\n\talt2 := []Provisioner{p2, p1}\n\tif !reflect.DeepEqual(provisioners, alt1) && !reflect.DeepEqual(provisioners, alt2) {\n\t\tt.Errorf(\"Registry(): Expected %#v. Got %#v.\", alt1, provisioners)\n\t}\n}\n\nfunc TestError(t *testing.T) {\n\terrs := []*Error{\n\t\t{Reason: \"something\", Err: errors.New(\"went wrong\")},\n\t\t{Reason: \"something went wrong\"},\n\t}\n\texpected := []string{\"went wrong: something\", \"something went wrong\"}\n\tfor i := range errs {\n\t\tif errs[i].Error() != expected[i] {\n\t\t\tt.Errorf(\"Error.Error(): want %q. Got %q.\", expected[i], errs[i].Error())\n\t\t}\n\t}\n}\n\nfunc TestErrorImplementsError(t *testing.T) {\n\tvar _ error = &Error{}\n}\n\nfunc TestStatusString(t *testing.T) {\n\tvar s Status = \"pending\"\n\tgot := s.String()\n\tif got != \"pending\" {\n\t\tt.Errorf(\"Status.String(). want \\\"pending\\\". Got %q.\", got)\n\t}\n}\n<commit_msg>provision: fix year in the license banner<commit_after>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage provision\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestRegisterAndGetProvisioner(t *testing.T) {\n\tvar p Provisioner\n\tRegister(\"my-provisioner\", p)\n\tgot, err := Get(\"my-provisioner\")\n\tif err != nil {\n\t\tt.Fatalf(\"Got unexpected error when getting provisioner: %q\", err)\n\t}\n\tif !reflect.DeepEqual(p, got) {\n\t\tt.Errorf(\"Get: Want %#v. Got %#v.\", p, got)\n\t}\n\t_, err = Get(\"unknown-provisioner\")\n\tif err == nil {\n\t\tt.Errorf(\"Expected non-nil error when getting unknown provisioner, got <nil>.\")\n\t}\n\texpectedMessage := `Unknown provisioner: \"unknown-provisioner\".`\n\tif err.Error() != expectedMessage {\n\t\tt.Errorf(\"Expected error %q. Got %q.\", expectedMessage, err.Error())\n\t}\n}\n\nfunc TestRegistry(t *testing.T) {\n\tvar p1, p2 Provisioner\n\tRegister(\"my-provisioner\", p1)\n\tRegister(\"your-provisioner\", p2)\n\tprovisioners := Registry()\n\talt1 := []Provisioner{p1, p2}\n\talt2 := []Provisioner{p2, p1}\n\tif !reflect.DeepEqual(provisioners, alt1) && !reflect.DeepEqual(provisioners, alt2) {\n\t\tt.Errorf(\"Registry(): Expected %#v. Got %#v.\", alt1, provisioners)\n\t}\n}\n\nfunc TestError(t *testing.T) {\n\terrs := []*Error{\n\t\t{Reason: \"something\", Err: errors.New(\"went wrong\")},\n\t\t{Reason: \"something went wrong\"},\n\t}\n\texpected := []string{\"went wrong: something\", \"something went wrong\"}\n\tfor i := range errs {\n\t\tif errs[i].Error() != expected[i] {\n\t\t\tt.Errorf(\"Error.Error(): want %q. Got %q.\", expected[i], errs[i].Error())\n\t\t}\n\t}\n}\n\nfunc TestErrorImplementsError(t *testing.T) {\n\tvar _ error = &Error{}\n}\n\nfunc TestStatusString(t *testing.T) {\n\tvar s Status = \"pending\"\n\tgot := s.String()\n\tif got != \"pending\" {\n\t\tt.Errorf(\"Status.String(). want \\\"pending\\\". Got %q.\", got)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package postgresmodule\n\n\/\/using latest https:\/\/hub.docker.com\/_\/postgres\/ image for testing\n\nimport (\n\t\"certificate\"\n)\n\nfunc (db *DB) InsertCertificate(cert *certificate.Certificate) error {\n\n\tvar ubuntu_valid, mozilla_valid, msft_valid, apple_valid bool\n\n\t\/\/TODO: iter through truststores and check above booleans.\n\n\t_, err := db.Exec(`INSERT INTO certificates( sha1_fingerprint, sha256_fingerprint,\n\tissuer, subject, version, is_ca, valid_not_before, valid_not_after,\n\tfirst_seen, last_seen, is_ubuntu_valid, is_mozilla_valid, is_microsoft_valid, \n\tis_apple_valid, x509_basicConstraints, x509_crlDistPoints, x509_extendedKeyUsage\n\tx509_authorityKeyIdentifier, x509_subjectKeyIdentifier, x509_keyUsage, x509_subjectAltName,\n\tsignature_algo, parent_id, raw_cert ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11\n\t$12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24 )`,\n\t\tcert.Hashes.SHA1, cert.Hashes.SHA256, cert.Issuer.CommonName, cert.Subject.CommonName,\n\t\tcert.Version, cert.CA, cert.Validity.NotBefore, cert.Validity.NotAfter, cert.FirstSeenTimestamp,\n\t\tcert.LastSeenTimestamp, ubuntu_valid, mozilla_valid, msft_valid, apple_valid,\n\t\tcert.X509v3BasicConstraints, cert.X509v3Extensions.CRLDistributionPoints,\n\t\tcert.X509v3Extensions.ExtendedKeyUsage, cert.X509v3Extensions.AuthorityKeyId,\n\t\tcert.X509v3Extensions.SubjectKeyId, cert.X509v3Extensions.KeyUsage,\n\t\tcert.X509v3Extensions.SubjectAlternativeName, cert.SignatureAlgorithm,\n\t\tcert.ParentSignature \/*TODO put whole raw cert into certificate struct*\/)\n\n\treturn err\n}\n\n\/\/certificate DB schema ( not finalised )\n\/\/CREATE TABLE certificates (\n\/\/\tid \tserial primary key,\n\/\/\tsha1_fingerprint \tbytea NOT NULL,\n\/\/ sha256_fingerprint bytea NOT NULL,\n\/\/\tserial_number \tvarchar NULL,\n\/\/\tissuer_id \tint4 NULL,\n\/\/\tversion \tint2 NULL,\n\/\/\tsubject \tvarchar NULL,\n\/\/\tissuer \tvarchar NULL,\n\/\/\tis_ca \tint2 NULL,\n\/\/\tis_self_signed \tbool NULL,\n\/\/\tnot_valid_before \ttimestamp NULL,\n\/\/\tnot_valid_after \ttimestamp NULL,\n\/\/ first_seen\t\t\t\t\ttimestamp NULL,\n\/\/ last_seen\t\t\t\t\ttimestamp NULL,\n\/\/\tis_valid \tbool NULL,\n\/\/\tvalidation_error \tvarchar NULL,\n\/\/\tis_ubuntu_valid \tbool NULL,\n\/\/\tis_mozilla_valid \tbool NULL,\n\/\/\tis_windows_valid \tbool NULL,\n\/\/\tis_apple_valid \tbool NULL,\n\/\/\tx509_basicConstraints \tvarchar NULL,\n\/\/\tx509_crlDistributionPoints \tvarchar NULL,\n\/\/\tx509_extendedKeyUsage \tvarchar NULL,\n\/\/\tx509_authorityKeyIdentifier\tvarchar NULL,\n\/\/\tx509_subjectKeyIdentifier \tvarchar NULL,\n\/\/\tx509_keyUsage \tvarchar NULL,\n\/\/\tx509_certificatePolicies \tvarchar NULL,\n\/\/\tx509_authorityInfoAccess \tvarchar NULL,\n\/\/\tx509_subjectAltName \tvarchar NULL,\n\/\/\tx509_nsCertType \tvarchar NULL,\n\/\/\tx509_nsComment \tvarchar NULL,\n\/\/\tx509_policyConstraints \tvarchar NULL,\n\/\/\tx509_issuerAltName \tvarchar NULL,\n\/\/\tsignature_algo \tvarchar NULL,\n\/\/ parent_id\t\t\t\t\tnumeric NULL,\n\/\/\tdepth \tint4 NULL,\n\/\/\tpublic_key_id \tint4 NULL,\n\/\/\tpublic_key_type \tvarchar NULL,\n\/\/\tin_openssl_root_store \tbool NULL,\n\/\/\tin_mozilla_root_store \tbool NULL,\n\/\/\tin_windows_root_store \tbool NULL,\n\/\/\tin_apple_root_store \tbool NULL,\n\/\/\tis_revoked \tbool NULL,\n\/\/\trevoked_at \ttimestamp NULL,\n\/\/\traw_cert\t\t\t\t\tvarchar NOT NULL\n\/\/);\n\nfunc (db *DB) UpdateCertLastSeen(cert *certificate.Certificate) error {\n\n\t_, err := db.Exec(\"UPDATE certificates SET last_seen=$1 WHERE sha1_fingerprint=$2\", cert.LastSeenTimestamp, cert.Hashes.SHA1)\n\treturn err\n}\n\nfunc (db *DB) GetCertwithFingerprint(sha1 string) (*certificate.Certificate, error) {\n\n\tvar ubuntu_valid, mozilla_valid, msft_valid, apple_valid bool\n\n\trow := db.QueryRow(`SELECT sha256_fingerprint,\n\t\tissuer, subject, version, is_ca, valid_not_before, valid_not_after,\n\t\tfirst_seen, last_seen, is_ubuntu_valid, is_mozilla_valid, is_microsoft_valid, \n\t\tis_apple_valid, x509_basicConstraints, x509_crlDistPoints, x509_extendedKeyUsage\n\t\tx509_authorityKeyIdentifier, x509_subjectKeyIdentifier, x509_keyUsage, x509_subjectAltName,\n\t\tsignature_algo, parent_id, raw_cert\n\t\tFROM certificates\n\t\tWHERE sha1_fingerprint=$1`, sha1)\n\n\tcert := &certificate.Certificate{}\n\n\terr := row.Scan(&cert.Hashes.SHA256, &cert.Issuer.CommonName, &cert.Subject.CommonName,\n\t\t&cert.Version, &cert.CA, &cert.Validity.NotBefore, &cert.Validity.NotAfter, &cert.FirstSeenTimestamp,\n\t\t&cert.LastSeenTimestamp, &ubuntu_valid, &mozilla_valid, &msft_valid, &apple_valid,\n\t\t&cert.X509v3BasicConstraints, &cert.X509v3Extensions.CRLDistributionPoints,\n\t\t&cert.X509v3Extensions.ExtendedKeyUsage, &cert.X509v3Extensions.AuthorityKeyId,\n\t\t&cert.X509v3Extensions.SubjectKeyId, &cert.X509v3Extensions.KeyUsage,\n\t\t&cert.X509v3Extensions.SubjectAlternativeName, &cert.SignatureAlgorithm,\n\t\t&cert.ParentSignature)\n\n\t\/\/TODO: parse boolean and recreate truststore validity\n\t\/\/may have to think of another way to store that.\n\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn cert, nil\n\t}\n\n}\n<commit_msg>removed fields from scheme<commit_after>package postgresmodule\n\n\/\/using latest https:\/\/hub.docker.com\/_\/postgres\/ image for testing\n\nimport (\n\t\"certificate\"\n)\n\nfunc (db *DB) InsertCertificate(cert *certificate.Certificate) error {\n\n\tvar ubuntu_valid, mozilla_valid, msft_valid, apple_valid bool\n\n\t\/\/TODO: iter through truststores and check above booleans.\n\n\t_, err := db.Exec(`INSERT INTO certificates( sha1_fingerprint, sha256_fingerprint,\n\tissuer, subject, version, is_ca, valid_not_before, valid_not_after,\n\tfirst_seen, last_seen, is_ubuntu_valid, is_mozilla_valid, is_microsoft_valid, \n\tis_apple_valid, x509_basicConstraints, x509_crlDistPoints, x509_extendedKeyUsage\n\tx509_authorityKeyIdentifier, x509_subjectKeyIdentifier, x509_keyUsage, x509_subjectAltName,\n\tsignature_algo, parent_id, raw_cert ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11\n\t$12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24 )`,\n\t\tcert.Hashes.SHA1, cert.Hashes.SHA256, cert.Issuer.CommonName, cert.Subject.CommonName,\n\t\tcert.Version, cert.CA, cert.Validity.NotBefore, cert.Validity.NotAfter, cert.FirstSeenTimestamp,\n\t\tcert.LastSeenTimestamp, ubuntu_valid, mozilla_valid, msft_valid, apple_valid,\n\t\tcert.X509v3BasicConstraints, cert.X509v3Extensions.CRLDistributionPoints,\n\t\tcert.X509v3Extensions.ExtendedKeyUsage, cert.X509v3Extensions.AuthorityKeyId,\n\t\tcert.X509v3Extensions.SubjectKeyId, cert.X509v3Extensions.KeyUsage,\n\t\tcert.X509v3Extensions.SubjectAlternativeName, cert.SignatureAlgorithm,\n\t\tcert.ParentSignature \/*TODO put whole raw cert into certificate struct*\/)\n\n\treturn err\n}\n\n\/\/certificate DB schema ( not finalised )\n\/\/CREATE TABLE certificates (\n\/\/\tid \tserial primary key,\n\/\/\tsha1_fingerprint \tbytea NOT NULL,\n\/\/ sha256_fingerprint bytea NOT NULL,\n\/\/\tserial_number \tvarchar NULL,\n\/\/\tissuer_id \tint4 NULL,\n\/\/\tversion \tint2 NULL,\n\/\/\tsubject \tvarchar NULL,\n\/\/\tissuer \tvarchar NULL,\n\/\/\tis_ca \tint2 NULL,\n\/\/\tis_self_signed \tbool NULL,\n\/\/\tnot_valid_before \ttimestamp NULL,\n\/\/\tnot_valid_after \ttimestamp NULL,\n\/\/ first_seen\t\t\t\t\ttimestamp NULL,\n\/\/ last_seen\t\t\t\t\ttimestamp NULL,\n\/\/\tis_valid \tbool NULL,\n\/\/\tvalidation_error \tvarchar NULL,\n\/\/\tis_ubuntu_valid \tbool NULL,\n\/\/\tis_mozilla_valid \tbool NULL,\n\/\/\tis_windows_valid \tbool NULL,\n\/\/\tis_apple_valid \tbool NULL,\n\/\/\tx509_basicConstraints \tvarchar NULL,\n\/\/\tx509_crlDistributionPoints \tvarchar NULL,\n\/\/\tx509_extendedKeyUsage \tvarchar NULL,\n\/\/\tx509_authorityKeyIdentifier\tvarchar NULL,\n\/\/\tx509_subjectKeyIdentifier \tvarchar NULL,\n\/\/\tx509_keyUsage \tvarchar NULL,\n\/\/\tx509_certificatePolicies \tvarchar NULL,\n\/\/\tx509_authorityInfoAccess \tvarchar NULL,\n\/\/\tx509_subjectAltName \tvarchar NULL,\n\/\/\tx509_issuerAltName \tvarchar NULL,\n\/\/\tsignature_algo \tvarchar NULL,\n\/\/ parent_id\t\t\t\t\tnumeric NULL,\n\/\/\tin_openssl_root_store \tbool NULL,\n\/\/\tin_mozilla_root_store \tbool NULL,\n\/\/\tin_windows_root_store \tbool NULL,\n\/\/\tin_apple_root_store \tbool NULL,\n\/\/\tis_revoked \tbool NULL,\n\/\/\trevoked_at \ttimestamp NULL,\n\/\/\traw_cert\t\t\t\t\tvarchar NOT NULL\n\/\/);\n\nfunc (db *DB) UpdateCertLastSeen(cert *certificate.Certificate) error {\n\n\t_, err := db.Exec(\"UPDATE certificates SET last_seen=$1 WHERE sha1_fingerprint=$2\", cert.LastSeenTimestamp, cert.Hashes.SHA1)\n\treturn err\n}\n\nfunc (db *DB) GetCertwithFingerprint(sha1 string) (*certificate.Certificate, error) {\n\n\tvar ubuntu_valid, mozilla_valid, msft_valid, apple_valid bool\n\n\trow := db.QueryRow(`SELECT sha256_fingerprint,\n\t\tissuer, subject, version, is_ca, valid_not_before, valid_not_after,\n\t\tfirst_seen, last_seen, is_ubuntu_valid, is_mozilla_valid, is_microsoft_valid, \n\t\tis_apple_valid, x509_basicConstraints, x509_crlDistPoints, x509_extendedKeyUsage\n\t\tx509_authorityKeyIdentifier, x509_subjectKeyIdentifier, x509_keyUsage, x509_subjectAltName,\n\t\tsignature_algo, parent_id, raw_cert\n\t\tFROM certificates\n\t\tWHERE sha1_fingerprint=$1`, sha1)\n\n\tcert := &certificate.Certificate{}\n\n\terr := row.Scan(&cert.Hashes.SHA256, &cert.Issuer.CommonName, &cert.Subject.CommonName,\n\t\t&cert.Version, &cert.CA, &cert.Validity.NotBefore, &cert.Validity.NotAfter, &cert.FirstSeenTimestamp,\n\t\t&cert.LastSeenTimestamp, &ubuntu_valid, &mozilla_valid, &msft_valid, &apple_valid,\n\t\t&cert.X509v3BasicConstraints, &cert.X509v3Extensions.CRLDistributionPoints,\n\t\t&cert.X509v3Extensions.ExtendedKeyUsage, &cert.X509v3Extensions.AuthorityKeyId,\n\t\t&cert.X509v3Extensions.SubjectKeyId, &cert.X509v3Extensions.KeyUsage,\n\t\t&cert.X509v3Extensions.SubjectAlternativeName, &cert.SignatureAlgorithm,\n\t\t&cert.ParentSignature)\n\n\t\/\/TODO: parse boolean and recreate truststore validity\n\t\/\/may have to think of another way to store that.\n\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn cert, nil\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package event\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/jbenet\/go-detect-race\"\n)\n\ntype EventA struct{}\ntype EventB int\n\nfunc getN() int {\n\tn := 50000\n\tif detectrace.WithRace() {\n\t\tn = 1000\n\t}\n\treturn n\n}\n\nfunc (EventA) String() string {\n\treturn \"Oh, Hello\"\n}\n\nfunc TestEmit(t *testing.T) {\n\tbus := NewBus()\n\tevents := make(chan EventA)\n\tcancel, err := bus.Subscribe(events)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tgo func() {\n\t\tdefer cancel()\n\t\t<-events\n\t}()\n\n\tem, err := bus.Emitter(new(EventA))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer em.Close()\n\n\tem.Emit(EventA{})\n}\n\nfunc TestSub(t *testing.T) {\n\tbus := NewBus()\n\tevents := make(chan EventB)\n\tcancel, err := bus.Subscribe(events)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar event EventB\n\n\tvar wait sync.WaitGroup\n\twait.Add(1)\n\n\tgo func() {\n\t\tdefer cancel()\n\t\tevent = <-events\n\t\twait.Done()\n\t}()\n\n\tem, err := bus.Emitter(new(EventB))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer em.Close()\n\n\tem.Emit(EventB(7))\n\twait.Wait()\n\n\tif event != 7 {\n\t\tt.Error(\"got wrong event\")\n\t}\n}\n\nfunc TestEmitNoSubNoBlock(t *testing.T) {\n\tbus := NewBus()\n\n\tem, err := bus.Emitter(new(EventA))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer em.Close()\n\n\tem.Emit(EventA{})\n}\n\nfunc TestEmitOnClosed(t *testing.T) {\n\tbus := NewBus()\n\n\tem, err := bus.Emitter(new(EventA))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tem.Close()\n\n\tdefer func() {\n\t\tr := recover()\n\t\tif r == nil {\n\t\t\tt.Errorf(\"expected panic\")\n\t\t}\n\t\tif r.(string) != \"emitter is closed\" {\n\t\t\tt.Error(\"unexpected message\")\n\t\t}\n\t}()\n\n\tem.Emit(EventA{})\n}\n\nfunc TestClosingRaces(t *testing.T) {\n\tsubs := getN()\n\temits := getN()\n\n\tvar wg sync.WaitGroup\n\tvar lk sync.RWMutex\n\tlk.Lock()\n\n\twg.Add(subs + emits)\n\n\tb := NewBus()\n\n\tfor i := 0; i < subs; i++ {\n\t\tgo func() {\n\t\t\tlk.RLock()\n\t\t\tdefer lk.RUnlock()\n\n\t\t\tcancel, _ := b.Subscribe(make(chan EventA))\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\tcancel()\n\n\t\t\twg.Done()\n\t\t}()\n\t}\n\tfor i := 0; i < emits; i++ {\n\t\tgo func() {\n\t\t\tlk.RLock()\n\t\t\tdefer lk.RUnlock()\n\n\t\t\temit, _ := b.Emitter(new(EventA))\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\temit.Close()\n\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\ttime.Sleep(10 * time.Millisecond)\n\tlk.Unlock() \/\/ start everything\n\n\twg.Wait()\n\n\tif len(b.nodes) != 0 {\n\t\tt.Error(\"expected no nodes\")\n\t}\n}\n\nfunc TestSubMany(t *testing.T) {\n\tbus := NewBus()\n\n\tvar r int32\n\n\tn := getN()\n\tvar wait sync.WaitGroup\n\tvar ready sync.WaitGroup\n\twait.Add(n)\n\tready.Add(n)\n\n\tfor i := 0; i < n; i++ {\n\t\tgo func() {\n\t\t\tevents := make(chan EventB)\n\t\t\tcancel, err := bus.Subscribe(events)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tdefer cancel()\n\n\t\t\tready.Done()\n\t\t\tatomic.AddInt32(&r, int32(<-events))\n\t\t\twait.Done()\n\t\t}()\n\t}\n\n\tem, err := bus.Emitter(new(EventB))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer em.Close()\n\n\tready.Wait()\n\n\tem.Emit(EventB(7))\n\twait.Wait()\n\n\tif int(r) != 7*n {\n\t\tt.Error(\"got wrong result\")\n\t}\n}\n\nfunc TestSubType(t *testing.T) {\n\tbus := NewBus()\n\tevents := make(chan fmt.Stringer)\n\tcancel, err := bus.Subscribe(events, ForceSubType(new(EventA)))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar event fmt.Stringer\n\n\tvar wait sync.WaitGroup\n\twait.Add(1)\n\n\tgo func() {\n\t\tdefer cancel()\n\t\tevent = <-events\n\t\twait.Done()\n\t}()\n\n\tem, err := bus.Emitter(new(EventA))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer em.Close()\n\n\tem.Emit(EventA{})\n\twait.Wait()\n\n\tif event.String() != \"Oh, Hello\" {\n\t\tt.Error(\"didn't get the correct message\")\n\t}\n}\n\nfunc TestNonStateful(t *testing.T) {\n\tbus := NewBus()\n\tem, err := bus.Emitter(new(EventB))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer em.Close()\n\n\teventsA := make(chan EventB, 1)\n\tcancelS, err := bus.Subscribe(eventsA)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cancelS()\n\n\tselect {\n\tcase <-eventsA:\n\t\tt.Fatal(\"didn't expect to get an event\")\n\tdefault:\n\t}\n\n\tem.Emit(EventB(1))\n\n\tselect {\n\tcase e := <-eventsA:\n\t\tif e != 1 {\n\t\t\tt.Fatal(\"got wrong event\")\n\t\t}\n\tdefault:\n\t\tt.Fatal(\"expected to get an event\")\n\t}\n\n\teventsB := make(chan EventB, 1)\n\tcancelS2, err := bus.Subscribe(eventsB)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cancelS2()\n\n\tselect {\n\tcase <-eventsA:\n\t\tt.Fatal(\"didn't expect to get an event\")\n\tdefault:\n\t}\n}\n\nfunc TestStateful(t *testing.T) {\n\tbus := NewBus()\n\tem, err := bus.Emitter(new(EventB), Stateful)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer em.Close()\n\n\tem.Emit(EventB(2))\n\n\teventsA := make(chan EventB, 1)\n\tcancelS, err := bus.Subscribe(eventsA)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cancelS()\n\n\tif <-eventsA != 2 {\n\t\tt.Fatal(\"got wrong event\")\n\t}\n}\n\nfunc testMany(t testing.TB, subs, emits, msgs int, stateful bool) {\n\tif detectrace.WithRace() && subs+emits > 5000 {\n\t\tt.SkipNow()\n\t}\n\n\tbus := NewBus()\n\n\tvar r int64\n\n\tvar wait sync.WaitGroup\n\tvar ready sync.WaitGroup\n\twait.Add(subs + emits)\n\tready.Add(subs)\n\n\tfor i := 0; i < subs; i++ {\n\t\tgo func() {\n\t\t\tevents := make(chan EventB)\n\t\t\tcancel, err := bus.Subscribe(events)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tdefer cancel()\n\n\t\t\tready.Done()\n\t\t\tfor i := 0; i < emits*msgs; i++ {\n\t\t\t\tatomic.AddInt64(&r, int64(<-events))\n\t\t\t}\n\t\t\twait.Done()\n\t\t}()\n\t}\n\n\tfor i := 0; i < emits; i++ {\n\t\tgo func() {\n\t\t\tem, err := bus.Emitter(new(EventB), func(settings interface{}) error {\n\t\t\t\tsettings.(*emitterSettings).makeStateful = stateful\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tdefer em.Close()\n\n\t\t\tready.Wait()\n\n\t\t\tfor i := 0; i < msgs; i++ {\n\t\t\t\tem.Emit(EventB(97))\n\t\t\t}\n\n\t\t\twait.Done()\n\t\t}()\n\t}\n\n\twait.Wait()\n\n\tif int(r) != 97*subs*emits*msgs {\n\t\tt.Fatal(\"got wrong result\")\n\t}\n}\n\nfunc TestBothMany(t *testing.T) {\n\ttestMany(t, 10000, 100, 10, false)\n}\n\nfunc BenchmarkSubs(b *testing.B) {\n\tb.ReportAllocs()\n\ttestMany(b, b.N, 100, 100, false)\n}\n\nfunc BenchmarkEmits(b *testing.B) {\n\tb.ReportAllocs()\n\ttestMany(b, 100, b.N, 100, false)\n}\n\nfunc BenchmarkMsgs(b *testing.B) {\n\tb.ReportAllocs()\n\ttestMany(b, 100, 100, b.N, false)\n}\n\nfunc BenchmarkOneToMany(b *testing.B) {\n\tb.ReportAllocs()\n\ttestMany(b, b.N, 1, 100, false)\n}\n\nfunc BenchmarkManyToOne(b *testing.B) {\n\tb.ReportAllocs()\n\ttestMany(b, 1, b.N, 100, false)\n}\n\nfunc BenchmarkMs1e2m4(b *testing.B) {\n\tb.N = 1000000\n\tb.ReportAllocs()\n\ttestMany(b, 10, 100, 10000, false)\n}\n\nfunc BenchmarkMs1e0m6(b *testing.B) {\n\tb.N = 10000000\n\tb.ReportAllocs()\n\ttestMany(b, 10, 1, 1000000, false)\n}\n\nfunc BenchmarkMs0e0m6(b *testing.B) {\n\tb.N = 1000000\n\tb.ReportAllocs()\n\ttestMany(b, 1, 1, 1000000, false)\n}\n\nfunc BenchmarkStatefulMs1e0m6(b *testing.B) {\n\tb.N = 10000000\n\tb.ReportAllocs()\n\ttestMany(b, 10, 1, 1000000, true)\n}\n\nfunc BenchmarkStatefulMs0e0m6(b *testing.B) {\n\tb.N = 1000000\n\tb.ReportAllocs()\n\ttestMany(b, 1, 1, 1000000, true)\n}\n\nfunc BenchmarkMs0e6m0(b *testing.B) {\n\tb.N = 1000000\n\tb.ReportAllocs()\n\ttestMany(b, 1, 1000000, 1, false)\n}\n\nfunc BenchmarkMs6e0m0(b *testing.B) {\n\tb.N = 1000000\n\tb.ReportAllocs()\n\ttestMany(b, 1000000, 1, 1, false)\n}\n<commit_msg>fix compile error in tests.<commit_after>package event\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/jbenet\/go-detect-race\"\n)\n\ntype EventA struct{}\ntype EventB int\n\nfunc getN() int {\n\tn := 50000\n\tif detectrace.WithRace() {\n\t\tn = 1000\n\t}\n\treturn n\n}\n\nfunc (EventA) String() string {\n\treturn \"Oh, Hello\"\n}\n\nfunc TestEmit(t *testing.T) {\n\tbus := NewBus()\n\tevents := make(chan EventA)\n\tcancel, err := bus.Subscribe(events)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tgo func() {\n\t\tdefer cancel()\n\t\t<-events\n\t}()\n\n\tem, err := bus.Emitter(new(EventA))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer em.Close()\n\n\tem.Emit(EventA{})\n}\n\nfunc TestSub(t *testing.T) {\n\tbus := NewBus()\n\tevents := make(chan EventB)\n\tcancel, err := bus.Subscribe(events)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar event EventB\n\n\tvar wait sync.WaitGroup\n\twait.Add(1)\n\n\tgo func() {\n\t\tdefer cancel()\n\t\tevent = <-events\n\t\twait.Done()\n\t}()\n\n\tem, err := bus.Emitter(new(EventB))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer em.Close()\n\n\tem.Emit(EventB(7))\n\twait.Wait()\n\n\tif event != 7 {\n\t\tt.Error(\"got wrong event\")\n\t}\n}\n\nfunc TestEmitNoSubNoBlock(t *testing.T) {\n\tbus := NewBus()\n\n\tem, err := bus.Emitter(new(EventA))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer em.Close()\n\n\tem.Emit(EventA{})\n}\n\nfunc TestEmitOnClosed(t *testing.T) {\n\tbus := NewBus()\n\n\tem, err := bus.Emitter(new(EventA))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tem.Close()\n\n\tdefer func() {\n\t\tr := recover()\n\t\tif r == nil {\n\t\t\tt.Errorf(\"expected panic\")\n\t\t}\n\t\tif r.(string) != \"emitter is closed\" {\n\t\t\tt.Error(\"unexpected message\")\n\t\t}\n\t}()\n\n\tem.Emit(EventA{})\n}\n\nfunc TestClosingRaces(t *testing.T) {\n\tsubs := getN()\n\temits := getN()\n\n\tvar wg sync.WaitGroup\n\tvar lk sync.RWMutex\n\tlk.Lock()\n\n\twg.Add(subs + emits)\n\n\tb := NewBus()\n\n\tfor i := 0; i < subs; i++ {\n\t\tgo func() {\n\t\t\tlk.RLock()\n\t\t\tdefer lk.RUnlock()\n\n\t\t\tcancel, _ := b.Subscribe(make(chan EventA))\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\tcancel()\n\n\t\t\twg.Done()\n\t\t}()\n\t}\n\tfor i := 0; i < emits; i++ {\n\t\tgo func() {\n\t\t\tlk.RLock()\n\t\t\tdefer lk.RUnlock()\n\n\t\t\temit, _ := b.Emitter(new(EventA))\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\temit.Close()\n\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\ttime.Sleep(10 * time.Millisecond)\n\tlk.Unlock() \/\/ start everything\n\n\twg.Wait()\n\n\tif len(b.(*basicBus).nodes) != 0 {\n\t\tt.Error(\"expected no nodes\")\n\t}\n}\n\nfunc TestSubMany(t *testing.T) {\n\tbus := NewBus()\n\n\tvar r int32\n\n\tn := getN()\n\tvar wait sync.WaitGroup\n\tvar ready sync.WaitGroup\n\twait.Add(n)\n\tready.Add(n)\n\n\tfor i := 0; i < n; i++ {\n\t\tgo func() {\n\t\t\tevents := make(chan EventB)\n\t\t\tcancel, err := bus.Subscribe(events)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tdefer cancel()\n\n\t\t\tready.Done()\n\t\t\tatomic.AddInt32(&r, int32(<-events))\n\t\t\twait.Done()\n\t\t}()\n\t}\n\n\tem, err := bus.Emitter(new(EventB))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer em.Close()\n\n\tready.Wait()\n\n\tem.Emit(EventB(7))\n\twait.Wait()\n\n\tif int(r) != 7*n {\n\t\tt.Error(\"got wrong result\")\n\t}\n}\n\nfunc TestSubType(t *testing.T) {\n\tbus := NewBus()\n\tevents := make(chan fmt.Stringer)\n\tcancel, err := bus.Subscribe(events, ForceSubType(new(EventA)))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar event fmt.Stringer\n\n\tvar wait sync.WaitGroup\n\twait.Add(1)\n\n\tgo func() {\n\t\tdefer cancel()\n\t\tevent = <-events\n\t\twait.Done()\n\t}()\n\n\tem, err := bus.Emitter(new(EventA))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer em.Close()\n\n\tem.Emit(EventA{})\n\twait.Wait()\n\n\tif event.String() != \"Oh, Hello\" {\n\t\tt.Error(\"didn't get the correct message\")\n\t}\n}\n\nfunc TestNonStateful(t *testing.T) {\n\tbus := NewBus()\n\tem, err := bus.Emitter(new(EventB))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer em.Close()\n\n\teventsA := make(chan EventB, 1)\n\tcancelS, err := bus.Subscribe(eventsA)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cancelS()\n\n\tselect {\n\tcase <-eventsA:\n\t\tt.Fatal(\"didn't expect to get an event\")\n\tdefault:\n\t}\n\n\tem.Emit(EventB(1))\n\n\tselect {\n\tcase e := <-eventsA:\n\t\tif e != 1 {\n\t\t\tt.Fatal(\"got wrong event\")\n\t\t}\n\tdefault:\n\t\tt.Fatal(\"expected to get an event\")\n\t}\n\n\teventsB := make(chan EventB, 1)\n\tcancelS2, err := bus.Subscribe(eventsB)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cancelS2()\n\n\tselect {\n\tcase <-eventsA:\n\t\tt.Fatal(\"didn't expect to get an event\")\n\tdefault:\n\t}\n}\n\nfunc TestStateful(t *testing.T) {\n\tbus := NewBus()\n\tem, err := bus.Emitter(new(EventB), Stateful)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer em.Close()\n\n\tem.Emit(EventB(2))\n\n\teventsA := make(chan EventB, 1)\n\tcancelS, err := bus.Subscribe(eventsA)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cancelS()\n\n\tif <-eventsA != 2 {\n\t\tt.Fatal(\"got wrong event\")\n\t}\n}\n\nfunc testMany(t testing.TB, subs, emits, msgs int, stateful bool) {\n\tif detectrace.WithRace() && subs+emits > 5000 {\n\t\tt.SkipNow()\n\t}\n\n\tbus := NewBus()\n\n\tvar r int64\n\n\tvar wait sync.WaitGroup\n\tvar ready sync.WaitGroup\n\twait.Add(subs + emits)\n\tready.Add(subs)\n\n\tfor i := 0; i < subs; i++ {\n\t\tgo func() {\n\t\t\tevents := make(chan EventB)\n\t\t\tcancel, err := bus.Subscribe(events)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tdefer cancel()\n\n\t\t\tready.Done()\n\t\t\tfor i := 0; i < emits*msgs; i++ {\n\t\t\t\tatomic.AddInt64(&r, int64(<-events))\n\t\t\t}\n\t\t\twait.Done()\n\t\t}()\n\t}\n\n\tfor i := 0; i < emits; i++ {\n\t\tgo func() {\n\t\t\tem, err := bus.Emitter(new(EventB), func(settings interface{}) error {\n\t\t\t\tsettings.(*emitterSettings).makeStateful = stateful\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tdefer em.Close()\n\n\t\t\tready.Wait()\n\n\t\t\tfor i := 0; i < msgs; i++ {\n\t\t\t\tem.Emit(EventB(97))\n\t\t\t}\n\n\t\t\twait.Done()\n\t\t}()\n\t}\n\n\twait.Wait()\n\n\tif int(r) != 97*subs*emits*msgs {\n\t\tt.Fatal(\"got wrong result\")\n\t}\n}\n\nfunc TestBothMany(t *testing.T) {\n\ttestMany(t, 10000, 100, 10, false)\n}\n\nfunc BenchmarkSubs(b *testing.B) {\n\tb.ReportAllocs()\n\ttestMany(b, b.N, 100, 100, false)\n}\n\nfunc BenchmarkEmits(b *testing.B) {\n\tb.ReportAllocs()\n\ttestMany(b, 100, b.N, 100, false)\n}\n\nfunc BenchmarkMsgs(b *testing.B) {\n\tb.ReportAllocs()\n\ttestMany(b, 100, 100, b.N, false)\n}\n\nfunc BenchmarkOneToMany(b *testing.B) {\n\tb.ReportAllocs()\n\ttestMany(b, b.N, 1, 100, false)\n}\n\nfunc BenchmarkManyToOne(b *testing.B) {\n\tb.ReportAllocs()\n\ttestMany(b, 1, b.N, 100, false)\n}\n\nfunc BenchmarkMs1e2m4(b *testing.B) {\n\tb.N = 1000000\n\tb.ReportAllocs()\n\ttestMany(b, 10, 100, 10000, false)\n}\n\nfunc BenchmarkMs1e0m6(b *testing.B) {\n\tb.N = 10000000\n\tb.ReportAllocs()\n\ttestMany(b, 10, 1, 1000000, false)\n}\n\nfunc BenchmarkMs0e0m6(b *testing.B) {\n\tb.N = 1000000\n\tb.ReportAllocs()\n\ttestMany(b, 1, 1, 1000000, false)\n}\n\nfunc BenchmarkStatefulMs1e0m6(b *testing.B) {\n\tb.N = 10000000\n\tb.ReportAllocs()\n\ttestMany(b, 10, 1, 1000000, true)\n}\n\nfunc BenchmarkStatefulMs0e0m6(b *testing.B) {\n\tb.N = 1000000\n\tb.ReportAllocs()\n\ttestMany(b, 1, 1, 1000000, true)\n}\n\nfunc BenchmarkMs0e6m0(b *testing.B) {\n\tb.N = 1000000\n\tb.ReportAllocs()\n\ttestMany(b, 1, 1000000, 1, false)\n}\n\nfunc BenchmarkMs6e0m0(b *testing.B) {\n\tb.N = 1000000\n\tb.ReportAllocs()\n\ttestMany(b, 1000000, 1, 1, false)\n}\n<|endoftext|>"} {"text":"<commit_before>package daemon\n\nimport (\n\t\"encoding\/json\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\n\tma \"github.com\/jbenet\/go-multiaddr\"\n)\n\n\/\/SendCommand connects to the address on the network with a timeout and encodes the connection into JSON\nfunc SendCommand(command *Command, server string) error {\n\n\tmaddr, err := ma.NewMultiaddr(server)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnetwork, host, err := maddr.DialArgs()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconn, err := net.Dial(network, host)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tenc := json.NewEncoder(conn)\n\terr = enc.Encode(command)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tio.Copy(os.Stdout, conn)\n\n\treturn nil\n}\n<commit_msg>this import snuck in<commit_after>package daemon\n\nimport (\n\t\"encoding\/json\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\n\tma \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-multiaddr\"\n)\n\n\/\/SendCommand connects to the address on the network with a timeout and encodes the connection into JSON\nfunc SendCommand(command *Command, server string) error {\n\n\tmaddr, err := ma.NewMultiaddr(server)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnetwork, host, err := maddr.DialArgs()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconn, err := net.Dial(network, host)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tenc := json.NewEncoder(conn)\n\terr = enc.Encode(command)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tio.Copy(os.Stdout, conn)\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"gitsync\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"os\/user\"\n\t\"time\"\n)\n\nfunc FanIn(inChannels ...chan gitsync.GitChange) (target chan gitsync.GitChange) {\n\ttarget = make(chan gitsync.GitChange)\n\n\tfor _, c := range inChannels {\n\t\tgo func(in chan gitsync.GitChange) {\n\t\t\tfor {\n\t\t\t\tnewVal, stillOpen := <-in\n\t\t\t\tif !stillOpen {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\ttarget <- newVal\n\t\t\t}\n\t\t}(c)\n\t}\n\n\treturn\n}\n\nfunc FanOut(source chan gitsync.GitChange, outChannels ...chan gitsync.GitChange) {\n\tgo func() {\n\t\tfor {\n\t\t\tnewVal, stillOpen := <-source\n\t\t\tif !stillOpen {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, out := range outChannels {\n\t\t\t\tout <- newVal\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc Clone(source chan gitsync.GitChange) (duplicate chan gitsync.GitChange) {\n\tduplicate = make(chan gitsync.GitChange)\n\tFanOut(source, duplicate)\n\treturn\n}\n\nfunc RecieveChanges(changes chan gitsync.GitChange) {\n\tfor {\n\t\tselect {\n\t\tcase change, ok := <-changes:\n\t\t\tif !ok {\n\t\t\t\tlog.Printf(\"Exiting Loop\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tlog.Printf(\"saw %+v\", change)\n\t\t}\n\t}\n}\n\nfunc main() {\n\tlog.Printf(\"Starting\")\n\n\t\/\/ Start changes handler\n\tvar (\n\t\tusername = flag.String(\"user\", \"\", \"Username to report when sending changes to the network\")\n\t\tgroupIP = flag.String(\"ip\", gitsync.IP4MulticastAddr.IP.String(), \"Multicast IP to connect to\")\n\t\tgroupPort = flag.Int(\"port\", gitsync.IP4MulticastAddr.Port, \"Port to use for network IO\")\n\t)\n\tflag.Parse()\n\n\tvar (\n\t\terr error\n\t\tdirName = flag.Args()[0] \/\/ directories to watch\n\t\tnetName string \/\/ name to report to network\n\t\tgroupAddr *net.UDPAddr \/\/ network address to connect to\n\n\t\t\/\/ channels to move change messages around\n\t\tlocalChanges = make(chan gitsync.GitChange, 128)\n\t\tlocalChangesDup = make(chan gitsync.GitChange, 128)\n\t\tremoteChanges = make(chan gitsync.GitChange, 128)\n\t\ttoRemoteChanges = make(chan gitsync.GitChange, 128)\n\t)\n\n\t\/\/ get the user's name\n\tif *username != \"\" {\n\t\tnetName = *username\n\t} else if user, err := user.Current(); err == nil {\n\t\tnetName = user.Username\n\t} else {\n\t\tlog.Fatalf(\"Cannot get username: %v\", err)\n\t}\n\n\tif groupAddr, err = net.ResolveUDPAddr(\"udp\", fmt.Sprintf(\"%s:%d\", *groupIP, *groupPort)); err != nil {\n\t\tlog.Fatalf(\"Cannot resolve address %s: %v\", err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ start directory poller\n\trepo, err := gitsync.NewCliRepo(dirName)\n\tif err != nil {\n\t\tlog.Fatalf(\"Cannot open repo: %s\", err)\n\t\tos.Exit(1)\n\t}\n\tgo gitsync.PollDirectory(dirName, repo, localChanges, 1*time.Second)\n\n\t\/\/ start network listener\n\tFanOut(localChanges, localChangesDup, toRemoteChanges)\n\tgo gitsync.NetIO(netName, groupAddr, remoteChanges, toRemoteChanges)\n\n\tchanges := FanIn(localChangesDup, remoteChanges)\n\tgo RecieveChanges(changes)\n\n\ts := make(chan os.Signal, 1)\n\tsignal.Notify(s, os.Kill)\n\t<-s\n\n\tlog.Printf(\"Exiting\")\n}\n<commit_msg>Error log on missing arg<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"gitsync\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"os\/user\"\n\t\"time\"\n)\n\nfunc FanIn(inChannels ...chan gitsync.GitChange) (target chan gitsync.GitChange) {\n\ttarget = make(chan gitsync.GitChange)\n\n\tfor _, c := range inChannels {\n\t\tgo func(in chan gitsync.GitChange) {\n\t\t\tfor {\n\t\t\t\tnewVal, stillOpen := <-in\n\t\t\t\tif !stillOpen {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\ttarget <- newVal\n\t\t\t}\n\t\t}(c)\n\t}\n\n\treturn\n}\n\nfunc FanOut(source chan gitsync.GitChange, outChannels ...chan gitsync.GitChange) {\n\tgo func() {\n\t\tfor {\n\t\t\tnewVal, stillOpen := <-source\n\t\t\tif !stillOpen {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, out := range outChannels {\n\t\t\t\tout <- newVal\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc Clone(source chan gitsync.GitChange) (duplicate chan gitsync.GitChange) {\n\tduplicate = make(chan gitsync.GitChange)\n\tFanOut(source, duplicate)\n\treturn\n}\n\nfunc RecieveChanges(changes chan gitsync.GitChange) {\n\tfor {\n\t\tselect {\n\t\tcase change, ok := <-changes:\n\t\t\tif !ok {\n\t\t\t\tlog.Printf(\"Exiting Loop\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tlog.Printf(\"saw %+v\", change)\n\t\t}\n\t}\n}\n\nfunc main() {\n\tif len(flag.Args()) == 0 {\n\t\tlog.Fatalf(\"No Git directory supplied\")\n\t}\n\n\tlog.Printf(\"Starting\")\n\n\t\/\/ Start changes handler\n\tvar (\n\t\tusername = flag.String(\"user\", \"\", \"Username to report when sending changes to the network\")\n\t\tgroupIP = flag.String(\"ip\", gitsync.IP4MulticastAddr.IP.String(), \"Multicast IP to connect to\")\n\t\tgroupPort = flag.Int(\"port\", gitsync.IP4MulticastAddr.Port, \"Port to use for network IO\")\n\t)\n\tflag.Parse()\n\n\tvar (\n\t\terr error\n\t\tdirName = flag.Args()[0] \/\/ directories to watch\n\t\tnetName string \/\/ name to report to network\n\t\tgroupAddr *net.UDPAddr \/\/ network address to connect to\n\n\t\t\/\/ channels to move change messages around\n\t\tlocalChanges = make(chan gitsync.GitChange, 128)\n\t\tlocalChangesDup = make(chan gitsync.GitChange, 128)\n\t\tremoteChanges = make(chan gitsync.GitChange, 128)\n\t\ttoRemoteChanges = make(chan gitsync.GitChange, 128)\n\t)\n\n\t\/\/ get the user's name\n\tif *username != \"\" {\n\t\tnetName = *username\n\t} else if user, err := user.Current(); err == nil {\n\t\tnetName = user.Username\n\t} else {\n\t\tlog.Fatalf(\"Cannot get username: %v\", err)\n\t}\n\n\tif groupAddr, err = net.ResolveUDPAddr(\"udp\", fmt.Sprintf(\"%s:%d\", *groupIP, *groupPort)); err != nil {\n\t\tlog.Fatalf(\"Cannot resolve address %s: %v\", err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ start directory poller\n\trepo, err := gitsync.NewCliRepo(dirName)\n\tif err != nil {\n\t\tlog.Fatalf(\"Cannot open repo: %s\", err)\n\t\tos.Exit(1)\n\t}\n\tgo gitsync.PollDirectory(dirName, repo, localChanges, 1*time.Second)\n\n\t\/\/ start network listener\n\tFanOut(localChanges, localChangesDup, toRemoteChanges)\n\tgo gitsync.NetIO(netName, groupAddr, remoteChanges, toRemoteChanges)\n\n\tchanges := FanIn(localChangesDup, remoteChanges)\n\tgo RecieveChanges(changes)\n\n\ts := make(chan os.Signal, 1)\n\tsignal.Notify(s, os.Kill)\n\t<-s\n\n\tlog.Printf(\"Exiting\")\n}\n<|endoftext|>"} {"text":"<commit_before>package datasource\n\nimport (\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n)\n\nvar (\n\tinvalidCsv string\n\tvalidCsv string\n)\n\nfunc defaultValidatorOptions() Validator {\n\treturn csv.Validator{\n\t\tColSeparator: ';',\n\t\tMaxErrors: 10,\n\t}\n}\n\nfunc TestMain(m *testing.M) {\n\tinvalidCsvBytes, err := ioutil.ReadFile(\"..\/testdata\/csv\/invalid.csv\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tinvalidCsv = string(invalidCsvBytes)\n\n\tvalidCsvBytes, err := ioutil.ReadFile(\"..\/testdata\/csv\/valid.csv\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvalidCsv = string(validCsvBytes)\n\n\tos.Exit(m.Run())\n}\n\nfunc TestValidateCSVPositive(t *testing.T) {\n\tr := strings.NewReader(validCsv)\n\n\tresults, err := ValidateCSV(r, defaultValidatorOptions())\n\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif len(results) > 0 {\n\t\tt.Error(\"Expected no errors, got \", results)\n\t}\n}\n\nfunc TestValidateCSVNegative(t *testing.T) {\n\tr := strings.NewReader(invalidCsv)\n\n\tresults, err := ValidateCSV(r, defaultValidatorOptions())\n\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\texpected := []CsvError{\n\t\t{Row: 2, Column: 2, Message: \"Fields may not contain newlines!\"},\n\t\t{Row: 3, Column: 2, Message: \"Fields may not contain the quoting character (\\\")!\"},\n\t\t{Row: 17, Column: 1, Message: \"Fields may not contain the column separator (;)!\"},\n\t}\n\n\tif len(results) != len(expected) {\n\t\tt.Error(\"Expected \", len(expected), \" errors, got \", len(results), results)\n\t\treturn\n\t}\n\n\tfor index := range results {\n\t\tif actual, expected := results[index], expected[index]; actual != expected {\n\t\t\tt.Error(\"Expected \", expected, \" got \", actual)\n\t\t}\n\t}\n}\n<commit_msg>fix csv test<commit_after>package datasource\n\nimport (\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n)\n\nvar (\n\tinvalidCsv string\n\tvalidCsv string\n)\n\nfunc defaultValidatorOptions() Validator {\n\treturn Validator{\n\t\tColSeparator: ';',\n\t\tMaxErrors: 10,\n\t}\n}\n\nfunc TestMain(m *testing.M) {\n\tinvalidCsvBytes, err := ioutil.ReadFile(\"..\/testdata\/csv\/invalid.csv\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tinvalidCsv = string(invalidCsvBytes)\n\n\tvalidCsvBytes, err := ioutil.ReadFile(\"..\/testdata\/csv\/valid.csv\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvalidCsv = string(validCsvBytes)\n\n\tos.Exit(m.Run())\n}\n\nfunc TestValidateCSVPositive(t *testing.T) {\n\tr := strings.NewReader(validCsv)\n\n\tresults, err := ValidateCSV(r, defaultValidatorOptions())\n\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif len(results) > 0 {\n\t\tt.Error(\"Expected no errors, got \", results)\n\t}\n}\n\nfunc TestValidateCSVNegative(t *testing.T) {\n\tr := strings.NewReader(invalidCsv)\n\n\tresults, err := ValidateCSV(r, defaultValidatorOptions())\n\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\texpected := []CsvError{\n\t\t{Row: 2, Column: 2, Message: \"Fields may not contain newlines!\"},\n\t\t{Row: 3, Column: 2, Message: \"Fields may not contain the quoting character (\\\")!\"},\n\t\t{Row: 17, Column: 1, Message: \"Fields may not contain the column separator (;)!\"},\n\t}\n\n\tif len(results) != len(expected) {\n\t\tt.Error(\"Expected \", len(expected), \" errors, got \", len(results), results)\n\t\treturn\n\t}\n\n\tfor index := range results {\n\t\tif actual, expected := results[index], expected[index]; actual != expected {\n\t\t\tt.Error(\"Expected \", expected, \" got \", actual)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package gosteno\n\ntype TestingSink struct {\n\tRecords []*Record\n}\n\nfunc EnterTestMode() {\n\ttestSink := NewTestingSink()\n\tstenoConfig := Config{\n\t\tSinks: []Sink{testSink},\n\t}\n\tInit(&stenoConfig)\n}\n\nfunc NewTestingSink() *TestingSink {\n\treturn &TestingSink{\n\t\tRecords: make([]*Record, 0, 10),\n\t}\n}\n\nfunc (tSink *TestingSink) AddRecord(record *Record) {\n\ttSink.Records = append(tSink.Records, record)\n}\n\nfunc (tSink *TestingSink) Flush() {\n\n}\n\nfunc (tSink *TestingSink) SetCodec(codec Codec) {\n\n}\n\nfunc (tSink *TestingSink) GetCodec() Codec {\n\treturn nil\n}\n<commit_msg>singletons beget singletons<commit_after>package gosteno\n\ntype TestingSink struct {\n\tRecords []*Record\n}\n\nvar theGlobalTestSink *TestingSink\n\nfunc EnterTestMode() {\n\ttheGlobalTestSink = NewTestingSink()\n\tstenoConfig := Config{\n\t\tSinks: []Sink{theGlobalTestSink},\n\t}\n\tInit(&stenoConfig)\n}\n\nfunc GetMeTheGlobalTestSink() *TestingSink {\n\treturn theGlobalTestSink\n}\n\nfunc NewTestingSink() *TestingSink {\n\treturn &TestingSink{\n\t\tRecords: make([]*Record, 0, 10),\n\t}\n}\n\nfunc (tSink *TestingSink) AddRecord(record *Record) {\n\ttSink.Records = append(tSink.Records, record)\n}\n\nfunc (tSink *TestingSink) Flush() {\n\n}\n\nfunc (tSink *TestingSink) SetCodec(codec Codec) {\n\n}\n\nfunc (tSink *TestingSink) GetCodec() Codec {\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package definitions\n\ntype Action struct {\n \/\/ TODO: harmonize with actions_definition_spec.md\n Name string `json:\"name\" yaml:\"name\" toml:\"name\"`\n Services []string `json:\"services\" yaml:\"services\" toml:\"services\"`\n Chains []string `json:\"chains,omitempty\" yaml:\"chains,omitempty\" toml:\"chains,omitempty\"`\n Steps []string `json:\"steps\" yaml:\"steps\" toml:\"steps\"`\n Environment map[string]string `json:\"environment,omitempty\" yaml:\"environment,omitempty\" toml:\"environment,omitempty\"`\n\n \/\/ Used internally\n lastRan string\n}<commit_msg>add maintainer, location, and machine info to actions definition<commit_after>package definitions\n\ntype Action struct {\n \/\/ TODO: harmonize with actions_definition_spec.md\n Name string `json:\"name\" yaml:\"name\" toml:\"name\"`\n Services []string `json:\"services\" yaml:\"services\" toml:\"services\"`\n Chains []string `json:\"chains,omitempty\" yaml:\"chains,omitempty\" toml:\"chains,omitempty\"`\n Steps []string `json:\"steps\" yaml:\"steps\" toml:\"steps\"`\n Environment map[string]string `json:\"environment,omitempty\" yaml:\"environment,omitempty\" toml:\"environment,omitempty\"`\n\n Maintainer *Maintainer `json:\"maintainer,omitempty\" yaml:\"maintainer,omitempty\" toml:\"maintainer,omitempty\"`\n Location *Location `json:\"location,omitempty\" yaml:\"location,omitempty\" toml:\"location,omitempty\"`\n Machine *Machine `json:\"machine,omitempty\" yaml:\"machine,omitempty\" toml:\"machine,omitempty\"`\n Operations *ServiceOperation `json:\"operations\" yaml:\"operations\" toml:\"operations\"`\n}<|endoftext|>"} {"text":"<commit_before>package departure\n\nimport (\n\t\"time\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"encoding\/json\"\n)\n\ntype direction struct {\n\tStatus string\n\tRoutes []struct {\n\t\tLegs []struct {\n\t\t\tSteps [] struct {\n\t\t\t\tTransitDetails struct {\n\t\t\t\t\tDepartureTime struct {\n\t\t\t\t\t\tValue int64\n\t\t\t\t\t} `json:\"departure_time\"`\n\t\t\t\t\tLine struct {\n\t\t\t\t\t\tShortName string `json:\"short_name\"`\n\t\t\t\t\t}\n\t\t\t\t} `json:\"transit_details\"`\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (d *direction) getDepartureTime(lineName string) (time.Time, error) {\n\tif d.Status != \"OK\" {\n\t\treturn time.Time{}, fmt.Errorf(\"direction status was not OK but %s\", d.Status)\n\t}\n\n\tfor _, route := range d.Routes {\n\t\tif len(route.Legs) == 1 && len(route.Legs[0].Steps) == 1 && route.Legs[0].Steps[0].TransitDetails.Line.ShortName == lineName {\n\t\t\tdepartureTime := time.Unix(route.Legs[0].Steps[0].TransitDetails.DepartureTime.Value, 0)\n\n\t\t\treturn departureTime, nil\n\t\t}\n\t}\n\n\treturn time.Time{}, fmt.Errorf(\"No route found for line %s\", lineName)\n}\n\nfunc GetDepartureTime(origin, destination, apiKey, transitMode, lineName string) (time.Time, error) {\n\turl := createURL(origin, destination, apiKey, transitMode)\n\tvar direction direction\n\n\tgetJson(url, &direction)\n\n\tdepTime, err := direction.getDepartureTime(lineName)\n\n\treturn depTime, err\n}\n\nfunc createURL(origin, destination, apiKey, transitMode string) string {\n\tbaseUrl := \"https:\/\/maps.googleapis.com\/maps\/api\/directions\/json\"\n\turl := fmt.Sprintf(\"%s?origin=%s&destination=%s&key=%s&mode=transit&transit_mode=%s&language=en\", baseUrl, origin, destination, apiKey, transitMode)\n\n\treturn url\n}\n\nfunc getJson(url string, target interface{}) error {\n\tr, err := http.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer r.Body.Close()\n\n\treturn json.NewDecoder(r.Body).Decode(target)\n}\n<commit_msg>Add alternatives flag to queries<commit_after>package departure\n\nimport (\n\t\"time\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"encoding\/json\"\n)\n\ntype direction struct {\n\tStatus string\n\tRoutes []struct {\n\t\tLegs []struct {\n\t\t\tSteps [] struct {\n\t\t\t\tTransitDetails struct {\n\t\t\t\t\tDepartureTime struct {\n\t\t\t\t\t\tValue int64\n\t\t\t\t\t} `json:\"departure_time\"`\n\t\t\t\t\tLine struct {\n\t\t\t\t\t\tShortName string `json:\"short_name\"`\n\t\t\t\t\t}\n\t\t\t\t} `json:\"transit_details\"`\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (d *direction) getDepartureTime(lineName string) (time.Time, error) {\n\tif d.Status != \"OK\" {\n\t\treturn time.Time{}, fmt.Errorf(\"direction status was not OK but %s\", d.Status)\n\t}\n\n\tfor _, route := range d.Routes {\n\t\tif len(route.Legs) == 1 && len(route.Legs[0].Steps) == 1 && route.Legs[0].Steps[0].TransitDetails.Line.ShortName == lineName {\n\t\t\tdepartureTime := time.Unix(route.Legs[0].Steps[0].TransitDetails.DepartureTime.Value, 0)\n\n\t\t\treturn departureTime, nil\n\t\t}\n\t}\n\n\treturn time.Time{}, fmt.Errorf(\"No route found for line %s\", lineName)\n}\n\nfunc GetDepartureTime(origin, destination, apiKey, transitMode, lineName string) (time.Time, error) {\n\turl := createURL(origin, destination, apiKey, transitMode)\n\tvar direction direction\n\n\tgetJson(url, &direction)\n\n\tdepTime, err := direction.getDepartureTime(lineName)\n\n\treturn depTime, err\n}\n\nfunc createURL(origin, destination, apiKey, transitMode string) string {\n\tbaseUrl := \"https:\/\/maps.googleapis.com\/maps\/api\/directions\/json\"\n\turl := fmt.Sprintf(\"%s?origin=%s&destination=%s&key=%s&mode=transit&transit_mode=%s&language=en&alternatives=true\", baseUrl, origin, destination, apiKey, transitMode)\n\n\treturn url\n}\n\nfunc getJson(url string, target interface{}) error {\n\tr, err := http.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer r.Body.Close()\n\n\treturn json.NewDecoder(r.Body).Decode(target)\n}\n<|endoftext|>"} {"text":"<commit_before>package mcore\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/mabetle\/mcore\/mcon\"\n\t\"os\"\n\t\"strings\"\n)\n\nconst (\n\tNEW_LINE_BYTE = byte(10)\n)\n\n\/\/ ReadLine from os.Stdio\nfunc ReadLine() string {\n\tr := bufio.NewReader(os.Stdin)\n\tresult, err := r.ReadString(NEW_LINE_BYTE)\n\tif err != nil {\n\t\tmcon.PrintfRed(\"Error:%s\\n\", err)\n\t}\n\tresult = strings.TrimSuffix(result, \"\\n\")\n\tresult = strings.TrimSuffix(result, \"\\r\")\n\treturn result\n}\n\n\/\/ ReadArgs\nfunc ReadArgs() []string {\n\ts := ReadLine()\n\treturn ParseStringToArgs(s)\n}\n\n\/\/ ReadLineWithMsg\nfunc ReadLineWithMsg(msgs ...interface{}) string {\n\tmsg := fmt.Sprint(msgs...)\n\tif !String(msg).IsEndWith(\":\") {\n\t\tmsg = msg + \":\"\n\t}\n\tmcon.PrintGreen(msg)\n\treturn ReadLine()\n}\n\nfunc ReadNotBlankLine() (result string) {\n\tfor {\n\t\tresult = ReadLine()\n\t\tif String(result).IsBlank() {\n\t\t\tmcon.PrintRed(\"input blank line, try again:\")\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}\n\nfunc ReadNotBlankLineWithMsg(msgs ...interface{}) string {\n\tmsg := fmt.Sprint(msgs...)\n\tif !String(msg).IsEndWith(\":\") {\n\t\tmsg = msg + \":\"\n\t}\n\tfmt.Print(msg)\n\treturn ReadNotBlankLine()\n}\n\n\/\/ ReadBool\nfunc ReadBool(dft bool, msg ...interface{}) bool {\n\tv := ReadLineWithMsg(fmt.Sprint(msg...))\n\tif String(v).IsBlank() {\n\t\treturn dft\n\t}\n\treturn String(v).ToBool()\n}\n<commit_msg>fix read int<commit_after>package mcore\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/mabetle\/mcore\/mcon\"\n\t\"os\"\n\t\"strings\"\n)\n\nconst (\n\tNEW_LINE_BYTE = byte(10)\n)\n\n\/\/ ReadLine from os.Stdio\nfunc ReadLine() string {\n\tr := bufio.NewReader(os.Stdin)\n\tresult, err := r.ReadString(NEW_LINE_BYTE)\n\tif err != nil {\n\t\tmcon.PrintfRed(\"Error:%s\\n\", err)\n\t}\n\tresult = strings.TrimSuffix(result, \"\\n\")\n\tresult = strings.TrimSuffix(result, \"\\r\")\n\treturn result\n}\n\n\/\/ ReadArgs\nfunc ReadArgs() []string {\n\ts := ReadLine()\n\treturn ParseStringToArgs(s)\n}\n\n\/\/ ReadLineWithMsg\nfunc ReadLineWithMsg(msgs ...interface{}) string {\n\tmsg := fmt.Sprint(msgs...)\n\tif !String(msg).IsEndWith(\":\") {\n\t\tmsg = msg + \":\"\n\t}\n\tmcon.PrintGreen(msg)\n\treturn ReadLine()\n}\n\nfunc ReadNotBlankLine() (result string) {\n\tfor {\n\t\tresult = ReadLine()\n\t\tif String(result).IsBlank() {\n\t\t\tmcon.PrintRed(\"input blank line, try again:\")\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}\n\nfunc ReadNotBlankLineWithMsg(msgs ...interface{}) string {\n\tmsg := fmt.Sprint(msgs...)\n\tif !String(msg).IsEndWith(\":\") {\n\t\tmsg = msg + \":\"\n\t}\n\tfmt.Print(msg)\n\treturn ReadNotBlankLine()\n}\n\n\/\/ ReadInt\nfunc ReadInt(msg ...interface{}) int {\n\tv := ReadLineWithMsg(msg...)\n\tn, err := StrToInt(v)\n\tif err != nil {\n\t\treturn ReadInt(\"Wrong int format,try again:\")\n\t}\n\treturn n\n}\n\nfunc ReadNotZeroInt(msg ...interface{}) int {\n\tv := ReadInt(msg...)\n\tif v == 0 {\n\t\treturn ReadNotZeroInt(\"Input not zero int, try again:\")\n\t}\n\treturn v\n}\n\n\/\/ ReadBool\nfunc ReadBool(dft bool, msg ...interface{}) bool {\n\tv := ReadLineWithMsg(fmt.Sprint(msg...))\n\tif String(v).IsBlank() {\n\t\treturn dft\n\t}\n\treturn String(v).ToBool()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"io\/ioutil\"\n\n\t\"bufio\"\n\t\"os\"\n\n\t\"strings\"\n\n\t\"os\/exec\"\n\n\tr \"github.com\/dancannon\/gorethink\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/app\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/db\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/db\/schema\"\n\t\"gopkg.in\/olivere\/elastic.v2\"\n)\n\nvar mappings string = `\n{\n\t\"mappings\": {\n\t \"files\": {\n\t \"properties\": {\n\t \"project_id\": {\n\t \"type\": \"string\",\n\t \"index\": \"not_analyzed\"\n\t },\n\t \"project\": {\n\t \t\t\"type\": \"string\",\n\t \t\t\"index\": \"not_analyzed\"\n\t },\n\t \"datadir_id\": {\n\t \"type\": \"string\",\n\t \"index\": \"not_analyzed\"\n\t },\n\t \"id\": {\n\t \"type\": \"string\",\n\t \"index\": \"not_analyzed\"\n\t },\n\t \"usesid\": {\n\t \"type\": \"string\",\n\t \"index\": \"not_analyzed\"\n\t },\n\t \"name\": {\n\t \t \"type\": \"string\",\n\t \"index\": \"not_analyzed\"\n\t }\n\t }\n\t },\n\t \"projects\": {\n\t \t\"properties\": {\n\t \t\t\"id\": {\n\t \t\"type\": \"string\",\n\t \t\"index\": \"not_analyzed\"\n\t },\n\t \"datadir\": {\n\t \t\"type\": \"string\",\n\t \t\"index\": \"not_analyzed\"\n\t }\n\t \t}\n\t },\n\t \"samples\": {\n\t \t\"properties\":{\n\t \t \"id\": {\n\t \t\"type\": \"string\",\n\t \t\"index\": \"not_analyzed\"\n\t }\n\t }\n\t },\n\t \"processes\": {\n\t \t\"properties\":{\n\t \t\t\"id\": {\n\t \t\"type\": \"string\",\n\t \t\"index\": \"not_analyzed\"\n\t }\n\t }\n\t },\n\t\t\"users\": {\n\t\t\t\"properties\":{\n\t \t\t\"id\": {\n\t \t\"type\": \"string\",\n\t \t\"index\": \"not_analyzed\"\n\t }\n\t }\n\t }\n\t}\n}\n`\n\nvar tikableMediaTypes map[string]bool = map[string]bool{\n\t\"application\/msword\": true,\n\t\"application\/pdf\": true,\n\t\"application\/rtf\": true,\n\t\"application\/vnd.ms-excel\": true,\n\t\"application\/vnd.ms-office\": true,\n\t\"application\/vnd.ms-powerpoint\": true,\n\t\"application\/vnd.ms-powerpoint.presentation.macroEnabled.12\": true,\n\t\"application\/vnd.openxmlformats-officedocument.presentationml.presentation\": true,\n\t\"application\/vnd.openxmlformats-officedocument.spreadsheetml.sheet\": true,\n\t\"application\/vnd.openxmlformats-officedocument.wordprocessingml.document\": true,\n\t\"application\/vnd.sealedmedia.softseal.pdf\": true,\n\t\"text\/plain; charset=utf-8\": true,\n}\n\n\/\/var onlyHeader map[string]bool = map[string]bool{\n\/\/\t\"application\/vnd.ms-excel\": true,\n\/\/\t\"application\/vnd.openxmlformats-officedocument.spreadsheetml.sheet\": true,\n\/\/}\n\nfunc main() {\n\tclient, err := elastic.NewClient()\n\tif err != nil {\n\t\tpanic(\"Unable to connect to elasticsearch\")\n\t}\n\n\tsession := db.RSessionMust()\n\n\tcreateIndex(client)\n\tloadFiles(client, session)\n\tloadUsers(client, session)\n\tloadProjects(client, session)\n\n}\n\nfunc createIndex(client *elastic.Client) {\n\texists, err := client.IndexExists(\"mc\").Do()\n\tif err != nil {\n\t\tpanic(\"Failed checking index existence\")\n\t}\n\n\tif exists {\n\t\tclient.DeleteIndex(\"mc\").Do()\n\t}\n\n\tcreateStatus, err := client.CreateIndex(\"mc\").Body(mappings).Do()\n\tif err != nil {\n\t\tfmt.Println(\"Failed creating index: \", err)\n\t\tos.Exit(1)\n\t}\n\tif !createStatus.Acknowledged {\n\t\tfmt.Println(\"Index create not acknowledged\")\n\t}\n}\n\nfunc loadFiles(client *elastic.Client, session *r.Session) {\n\trenameDirPath := func(row r.Term) interface{} {\n\t\treturn row.Merge(map[string]interface{}{\n\t\t\t\"right\": map[string]interface{}{\n\t\t\t\t\"path\": row.Field(\"right\").Field(\"name\"),\n\t\t\t},\n\t\t})\n\t}\n\n\tvar _ = renameDirPath\n\n\tres, err := r.Table(\"projects\").Pluck(\"id\").\n\t\tEqJoin(\"id\", r.Table(\"project2datafile\"), r.EqJoinOpts{Index: \"project_id\"}).Zip().\n\t\tEqJoin(\"datafile_id\", r.Table(\"datadir2datafile\"), r.EqJoinOpts{Index: \"datafile_id\"}).Zip().\n\t\tEqJoin(\"datadir_id\", r.Table(\"datadirs\")).\n\t\tMap(renameDirPath).\n\t\tZip().\n\t\tEqJoin(\"datafile_id\", r.Table(\"datafiles\")).Zip().\n\t\tRun(session)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Unable to query database for files: %s\", err))\n\t}\n\tdefer res.Close()\n\n\tvar df schema.File\n\tcount := 0\n\tmaxCount := 10\n\tbulkReq := client.Bulk()\n\tfor res.Next(&df) {\n\t\treadContents(&df)\n\t\tif count < maxCount {\n\t\t\tindexReq := elastic.NewBulkIndexRequest().Index(\"mc\").Type(\"files\").Id(df.ID).Doc(df)\n\t\t\tbulkReq = bulkReq.Add(indexReq)\n\t\t\tcount++\n\t\t} else {\n\t\t\tcount = 0\n\t\t\tresp, err := bulkReq.Do()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"bulkreq failed: %s\\n\", err)\n\t\t\t\tfmt.Printf(\"%#v\\n\", resp)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tif count != 0 {\n\t\tbulkReq.Do()\n\t}\n}\n\nfunc loadUsers(client *elastic.Client, session *r.Session) {\n\tres, err := r.Table(\"users\").Run(session)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Unable to query database for users: %s\", err))\n\t}\n\tdefer res.Close()\n\n\tvar u schema.User\n\tcount := 0\n\tmaxCount := 1000\n\tbulkReq := client.Bulk()\n\tfor res.Next(&u) {\n\t\tif count < maxCount {\n\t\t\tindexReq := elastic.NewBulkIndexRequest().Index(\"mc\").Type(\"users\").Id(u.ID).Doc(u)\n\t\t\tbulkReq = bulkReq.Add(indexReq)\n\t\t\tcount++\n\t\t} else {\n\t\t\tcount = 0\n\t\t\tresp, err := bulkReq.Do()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"bulkreq failed: %s\\n\", err)\n\t\t\t\tfmt.Printf(\"%#v\\n\", resp)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tif count != 0 {\n\t\tbulkReq.Do()\n\t}\n}\n\nfunc loadProjects(client *elastic.Client, session *r.Session) {\n\tres, err := r.Table(\"projects\").Run(session)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Unable to query database for projects: %s\", err))\n\t}\n\tdefer res.Close()\n\n\tvar p schema.Project\n\tcount := 0\n\tmaxCount := 100\n\tbulkReq := client.Bulk()\n\tfor res.Next(&p) {\n\t\tif count < maxCount {\n\t\t\tindexReq := elastic.NewBulkIndexRequest().Index(\"mc\").Type(\"projects\").Id(p.ID).Doc(p)\n\t\t\tbulkReq = bulkReq.Add(indexReq)\n\t\t\tcount++\n\t\t} else {\n\t\t\tcount = 0\n\t\t\tresp, err := bulkReq.Do()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"bulkreq failed: %s\\n\", err)\n\t\t\t\tfmt.Printf(\"%#v\\n\", resp)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tif count != 0 {\n\t\tbulkReq.Do()\n\t}\n}\n\nconst twoMeg = 2 * 1024 * 1024\n\nfunc readContents(file *schema.File) {\n\tswitch file.MediaType.Mime {\n\tcase \"text\/csv\":\n\t\t\/\/fmt.Println(\"Reading csv file: \", file.ID, file.Name, file.Size)\n\t\tif contents, err := readCSVLines(file.ID); err == nil {\n\t\t\tfile.Contents = string(contents)\n\t\t}\n\tcase \"text\/plain\":\n\t\tif file.Size > twoMeg {\n\t\t\treturn\n\t\t}\n\t\t\/\/fmt.Println(\"Reading text file: \", file.ID, file.Name, file.Size)\n\t\tif contents, err := ioutil.ReadFile(app.MCDir.FilePath(file.ID)); err == nil {\n\t\t\tfile.Contents = string(contents)\n\t\t}\n\tdefault:\n\t\tif _, ok := tikableMediaTypes[file.MediaType.Mime]; ok {\n\t\t\tif contents := extractUsingTika(file); contents != \"\" {\n\t\t\t\tfile.Contents = contents\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc readCSVLines(fileID string) (string, error) {\n\tif file, err := os.Open(app.MCDir.FilePath(fileID)); err == nil {\n\t\tdefer file.Close()\n\n\t\tscanner := bufio.NewScanner(file)\n\t\tfor scanner.Scan() {\n\t\t\ttext := scanner.Text()\n\t\t\tif text != \"\" && !strings.HasPrefix(text, \"#\") {\n\t\t\t\treturn text, nil\n\t\t\t}\n\t\t}\n\t\treturn \"\", errors.New(\"No data\")\n\t} else {\n\t\treturn \"\", err\n\t}\n}\n\nfunc extractUsingTika(file *schema.File) string {\n\tif file.Size > twoMeg {\n\t\treturn \"\"\n\t}\n\n\tout, err := exec.Command(\"tika.sh\", \"--text\", app.MCDir.FilePath(file.ID)).Output()\n\tif err != nil {\n\t\tfmt.Println(\"Tika failed for:\", file.Name, file.ID, file.MediaType.Mime)\n\t\tfmt.Println(\"exec failed:\", err)\n\t\treturn \"\"\n\t}\n\n\treturn string(out)\n}\n<commit_msg>analyze file name field.<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"io\/ioutil\"\n\n\t\"bufio\"\n\t\"os\"\n\n\t\"strings\"\n\n\t\"os\/exec\"\n\n\tr \"github.com\/dancannon\/gorethink\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/app\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/db\"\n\t\"github.com\/materials-commons\/mcstore\/pkg\/db\/schema\"\n\t\"gopkg.in\/olivere\/elastic.v2\"\n)\n\nvar mappings string = `\n{\n\t\"mappings\": {\n\t \"files\": {\n\t \"properties\": {\n\t \"project_id\": {\n\t \"type\": \"string\",\n\t \"index\": \"not_analyzed\"\n\t },\n\t \"project\": {\n\t \t\t\"type\": \"string\",\n\t \t\t\"index\": \"not_analyzed\"\n\t },\n\t \"datadir_id\": {\n\t \"type\": \"string\",\n\t \"index\": \"not_analyzed\"\n\t },\n\t \"id\": {\n\t \"type\": \"string\",\n\t \"index\": \"not_analyzed\"\n\t },\n\t \"usesid\": {\n\t \"type\": \"string\",\n\t \"index\": \"not_analyzed\"\n\t }\n\t }\n\t },\n\t \"projects\": {\n\t \t\"properties\": {\n\t \t\t\"id\": {\n\t \t\"type\": \"string\",\n\t \t\"index\": \"not_analyzed\"\n\t },\n\t \"datadir\": {\n\t \t\"type\": \"string\",\n\t \t\"index\": \"not_analyzed\"\n\t }\n\t \t}\n\t },\n\t \"samples\": {\n\t \t\"properties\":{\n\t \t \"id\": {\n\t \t\"type\": \"string\",\n\t \t\"index\": \"not_analyzed\"\n\t }\n\t }\n\t },\n\t \"processes\": {\n\t \t\"properties\":{\n\t \t\t\"id\": {\n\t \t\"type\": \"string\",\n\t \t\"index\": \"not_analyzed\"\n\t }\n\t }\n\t },\n\t\t\"users\": {\n\t\t\t\"properties\":{\n\t \t\t\"id\": {\n\t \t\"type\": \"string\",\n\t \t\"index\": \"not_analyzed\"\n\t }\n\t }\n\t }\n\t}\n}\n`\n\nvar tikableMediaTypes map[string]bool = map[string]bool{\n\t\"application\/msword\": true,\n\t\"application\/pdf\": true,\n\t\"application\/rtf\": true,\n\t\"application\/vnd.ms-excel\": true,\n\t\"application\/vnd.ms-office\": true,\n\t\"application\/vnd.ms-powerpoint\": true,\n\t\"application\/vnd.ms-powerpoint.presentation.macroEnabled.12\": true,\n\t\"application\/vnd.openxmlformats-officedocument.presentationml.presentation\": true,\n\t\"application\/vnd.openxmlformats-officedocument.spreadsheetml.sheet\": true,\n\t\"application\/vnd.openxmlformats-officedocument.wordprocessingml.document\": true,\n\t\"application\/vnd.sealedmedia.softseal.pdf\": true,\n\t\"text\/plain; charset=utf-8\": true,\n}\n\n\/\/var onlyHeader map[string]bool = map[string]bool{\n\/\/\t\"application\/vnd.ms-excel\": true,\n\/\/\t\"application\/vnd.openxmlformats-officedocument.spreadsheetml.sheet\": true,\n\/\/}\n\nfunc main() {\n\tclient, err := elastic.NewClient()\n\tif err != nil {\n\t\tpanic(\"Unable to connect to elasticsearch\")\n\t}\n\n\tsession := db.RSessionMust()\n\n\tcreateIndex(client)\n\tloadFiles(client, session)\n\tloadUsers(client, session)\n\tloadProjects(client, session)\n\n}\n\nfunc createIndex(client *elastic.Client) {\n\texists, err := client.IndexExists(\"mc\").Do()\n\tif err != nil {\n\t\tpanic(\"Failed checking index existence\")\n\t}\n\n\tif exists {\n\t\tclient.DeleteIndex(\"mc\").Do()\n\t}\n\n\tcreateStatus, err := client.CreateIndex(\"mc\").Body(mappings).Do()\n\tif err != nil {\n\t\tfmt.Println(\"Failed creating index: \", err)\n\t\tos.Exit(1)\n\t}\n\tif !createStatus.Acknowledged {\n\t\tfmt.Println(\"Index create not acknowledged\")\n\t}\n}\n\nfunc loadFiles(client *elastic.Client, session *r.Session) {\n\trenameDirPath := func(row r.Term) interface{} {\n\t\treturn row.Merge(map[string]interface{}{\n\t\t\t\"right\": map[string]interface{}{\n\t\t\t\t\"path\": row.Field(\"right\").Field(\"name\"),\n\t\t\t},\n\t\t})\n\t}\n\n\tvar _ = renameDirPath\n\n\tres, err := r.Table(\"projects\").Pluck(\"id\").\n\t\tEqJoin(\"id\", r.Table(\"project2datafile\"), r.EqJoinOpts{Index: \"project_id\"}).Zip().\n\t\tEqJoin(\"datafile_id\", r.Table(\"datadir2datafile\"), r.EqJoinOpts{Index: \"datafile_id\"}).Zip().\n\t\tEqJoin(\"datadir_id\", r.Table(\"datadirs\")).\n\t\tMap(renameDirPath).\n\t\tZip().\n\t\tEqJoin(\"datafile_id\", r.Table(\"datafiles\")).Zip().\n\t\tRun(session)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Unable to query database for files: %s\", err))\n\t}\n\tdefer res.Close()\n\n\tvar df schema.File\n\tcount := 0\n\tmaxCount := 10\n\tbulkReq := client.Bulk()\n\tfor res.Next(&df) {\n\t\treadContents(&df)\n\t\tif count < maxCount {\n\t\t\tindexReq := elastic.NewBulkIndexRequest().Index(\"mc\").Type(\"files\").Id(df.ID).Doc(df)\n\t\t\tbulkReq = bulkReq.Add(indexReq)\n\t\t\tcount++\n\t\t} else {\n\t\t\tcount = 0\n\t\t\tresp, err := bulkReq.Do()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"bulkreq failed: %s\\n\", err)\n\t\t\t\tfmt.Printf(\"%#v\\n\", resp)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tif count != 0 {\n\t\tbulkReq.Do()\n\t}\n}\n\nfunc loadUsers(client *elastic.Client, session *r.Session) {\n\tres, err := r.Table(\"users\").Run(session)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Unable to query database for users: %s\", err))\n\t}\n\tdefer res.Close()\n\n\tvar u schema.User\n\tcount := 0\n\tmaxCount := 1000\n\tbulkReq := client.Bulk()\n\tfor res.Next(&u) {\n\t\tif count < maxCount {\n\t\t\tindexReq := elastic.NewBulkIndexRequest().Index(\"mc\").Type(\"users\").Id(u.ID).Doc(u)\n\t\t\tbulkReq = bulkReq.Add(indexReq)\n\t\t\tcount++\n\t\t} else {\n\t\t\tcount = 0\n\t\t\tresp, err := bulkReq.Do()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"bulkreq failed: %s\\n\", err)\n\t\t\t\tfmt.Printf(\"%#v\\n\", resp)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tif count != 0 {\n\t\tbulkReq.Do()\n\t}\n}\n\nfunc loadProjects(client *elastic.Client, session *r.Session) {\n\tres, err := r.Table(\"projects\").Run(session)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Unable to query database for projects: %s\", err))\n\t}\n\tdefer res.Close()\n\n\tvar p schema.Project\n\tcount := 0\n\tmaxCount := 100\n\tbulkReq := client.Bulk()\n\tfor res.Next(&p) {\n\t\tif count < maxCount {\n\t\t\tindexReq := elastic.NewBulkIndexRequest().Index(\"mc\").Type(\"projects\").Id(p.ID).Doc(p)\n\t\t\tbulkReq = bulkReq.Add(indexReq)\n\t\t\tcount++\n\t\t} else {\n\t\t\tcount = 0\n\t\t\tresp, err := bulkReq.Do()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"bulkreq failed: %s\\n\", err)\n\t\t\t\tfmt.Printf(\"%#v\\n\", resp)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tif count != 0 {\n\t\tbulkReq.Do()\n\t}\n}\n\nconst twoMeg = 2 * 1024 * 1024\n\nfunc readContents(file *schema.File) {\n\tswitch file.MediaType.Mime {\n\tcase \"text\/csv\":\n\t\t\/\/fmt.Println(\"Reading csv file: \", file.ID, file.Name, file.Size)\n\t\tif contents, err := readCSVLines(file.ID); err == nil {\n\t\t\tfile.Contents = string(contents)\n\t\t}\n\tcase \"text\/plain\":\n\t\tif file.Size > twoMeg {\n\t\t\treturn\n\t\t}\n\t\t\/\/fmt.Println(\"Reading text file: \", file.ID, file.Name, file.Size)\n\t\tif contents, err := ioutil.ReadFile(app.MCDir.FilePath(file.ID)); err == nil {\n\t\t\tfile.Contents = string(contents)\n\t\t}\n\tdefault:\n\t\tif _, ok := tikableMediaTypes[file.MediaType.Mime]; ok {\n\t\t\tif contents := extractUsingTika(file); contents != \"\" {\n\t\t\t\tfile.Contents = contents\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc readCSVLines(fileID string) (string, error) {\n\tif file, err := os.Open(app.MCDir.FilePath(fileID)); err == nil {\n\t\tdefer file.Close()\n\n\t\tscanner := bufio.NewScanner(file)\n\t\tfor scanner.Scan() {\n\t\t\ttext := scanner.Text()\n\t\t\tif text != \"\" && !strings.HasPrefix(text, \"#\") {\n\t\t\t\treturn text, nil\n\t\t\t}\n\t\t}\n\t\treturn \"\", errors.New(\"No data\")\n\t} else {\n\t\treturn \"\", err\n\t}\n}\n\nfunc extractUsingTika(file *schema.File) string {\n\tif file.Size > twoMeg {\n\t\treturn \"\"\n\t}\n\n\tout, err := exec.Command(\"tika.sh\", \"--text\", app.MCDir.FilePath(file.ID)).Output()\n\tif err != nil {\n\t\tfmt.Println(\"Tika failed for:\", file.Name, file.ID, file.MediaType.Mime)\n\t\tfmt.Println(\"exec failed:\", err)\n\t\treturn \"\"\n\t}\n\n\treturn string(out)\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>add test for struct field tag<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>apparently not everyone takes french<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>fixed deep search<commit_after><|endoftext|>"} {"text":"<commit_before>package patcher_test\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/itchio\/headway\/united\"\n\t\"github.com\/itchio\/screw\"\n\t\"github.com\/itchio\/wharf\/wsync\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\n\t\"github.com\/itchio\/wharf\/pwr\"\n\t\"github.com\/itchio\/wharf\/pwr\/bowl\"\n\t\"github.com\/itchio\/wharf\/pwr\/patcher\"\n\t\"github.com\/itchio\/wharf\/pwr\/rediff\"\n\t\"github.com\/itchio\/wharf\/wtest\"\n\n\t\"github.com\/itchio\/headway\/state\"\n\t\"github.com\/itchio\/lake\/pools\/fspool\"\n\t\"github.com\/itchio\/lake\/tlc\"\n\t\"github.com\/itchio\/savior\/seeksource\"\n\n\t_ \"github.com\/itchio\/wharf\/compressors\/cbrotli\"\n\t_ \"github.com\/itchio\/wharf\/decompressors\/cbrotli\"\n)\n\nfunc Test_Naive(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"patcher-noop\")\n\twtest.Must(t, err)\n\tdefer screw.RemoveAll(dir)\n\n\tv1 := filepath.Join(dir, \"v1\")\n\twtest.MakeTestDir(t, v1, wtest.TestDirSettings{\n\t\tEntries: []wtest.TestDirEntry{\n\t\t\t{Path: \"subdir\/file-1\", Seed: 0x1, Size: wtest.BlockSize*220 + 14},\n\t\t\t{Path: \"file-1\", Seed: 0x2},\n\t\t\t{Path: \"dir2\/file-2\", Seed: 0x3},\n\t\t\t{Path: \"dir3\/gone\", Seed: 0x4},\n\t\t},\n\t})\n\n\tv2 := filepath.Join(dir, \"v2\")\n\twtest.MakeTestDir(t, v2, wtest.TestDirSettings{\n\t\tEntries: []wtest.TestDirEntry{\n\t\t\t{Path: \"subdir\/file-1\", Seed: 0x1, Size: wtest.BlockSize*260 + 14, Bsmods: []wtest.Bsmod{\n\t\t\t\t{Interval: wtest.BlockSize\/2 + 3, Delta: 0x4},\n\t\t\t\t{Interval: wtest.BlockSize\/3 + 7, Delta: 0x18},\n\t\t\t}, Swaperoos: []wtest.Swaperoo{\n\t\t\t\t{OldStart: 0, NewStart: wtest.BlockSize * 210, Size: wtest.BlockSize * 10},\n\t\t\t\t{OldStart: 40, NewStart: wtest.BlockSize*10 + 8, Size: wtest.BlockSize * 40},\n\t\t\t}},\n\t\t\t{Path: \"file-1\", Seed: 0x2},\n\t\t\t{Path: \"dir2\/file-2\", Seed: 0x3},\n\t\t},\n\t})\n\n\tpatchBuffer := new(bytes.Buffer)\n\toptimizedPatchBuffer := new(bytes.Buffer)\n\tvar sourceHashes []wsync.BlockHash\n\tconsumer := &state.Consumer{\n\t\tOnMessage: func(level string, message string) {\n\t\t\tt.Logf(\"[%s] %s\", level, message)\n\t\t},\n\t}\n\n\t{\n\t\tcompression := &pwr.CompressionSettings{}\n\t\tcompression.Algorithm = pwr.CompressionAlgorithm_BROTLI\n\t\tcompression.Quality = 1\n\n\t\ttargetContainer, err := tlc.WalkAny(v1, &tlc.WalkOpts{})\n\t\twtest.Must(t, err)\n\n\t\tsourceContainer, err := tlc.WalkAny(v2, &tlc.WalkOpts{})\n\t\twtest.Must(t, err)\n\n\t\t\/\/ Sign!\n\t\tt.Logf(\"Signing %s\", sourceContainer.Stats())\n\t\tsourceHashes, err = pwr.ComputeSignature(context.Background(), sourceContainer, fspool.New(sourceContainer, v2), consumer)\n\t\twtest.Must(t, err)\n\n\t\ttargetPool := fspool.New(targetContainer, v1)\n\t\ttargetSignature, err := pwr.ComputeSignature(context.Background(), targetContainer, targetPool, consumer)\n\t\twtest.Must(t, err)\n\n\t\tpool := fspool.New(sourceContainer, v2)\n\n\t\t\/\/ Diff!\n\t\tt.Logf(\"Diffing (%s)...\", compression)\n\t\tdctx := pwr.DiffContext{\n\t\t\tCompression: compression,\n\t\t\tConsumer: consumer,\n\n\t\t\tSourceContainer: sourceContainer,\n\t\t\tPool: pool,\n\n\t\t\tTargetContainer: targetContainer,\n\t\t\tTargetSignature: targetSignature,\n\t\t}\n\n\t\twtest.Must(t, dctx.WritePatch(context.Background(), patchBuffer, ioutil.Discard))\n\n\t\t\/\/ Rediff!\n\t\tt.Logf(\"Rediffing...\")\n\t\trc, err := rediff.NewContext(rediff.Params{\n\t\t\tConsumer: consumer,\n\t\t\tPatchReader: seeksource.FromBytes(patchBuffer.Bytes()),\n\t\t})\n\t\twtest.Must(t, err)\n\n\t\twtest.Must(t, rc.Optimize(rediff.OptimizeParams{\n\t\t\tTargetPool: targetPool,\n\t\t\tSourcePool: pool,\n\t\t\tPatchWriter: optimizedPatchBuffer,\n\t\t}))\n\t}\n\n\t\/\/ Patch!\n\ttryPatchNoSaves := func(t *testing.T, patchBytes []byte) {\n\t\tconsumer := &state.Consumer{\n\t\t\tOnMessage: func(level string, message string) {\n\t\t\t\tt.Logf(\"[%s] %s\", level, message)\n\t\t\t},\n\t\t}\n\n\t\tout := filepath.Join(dir, \"out\")\n\t\tdefer screw.RemoveAll(out)\n\n\t\tpatchReader := seeksource.FromBytes(patchBytes)\n\n\t\tp, err := patcher.New(patchReader, consumer)\n\t\twtest.Must(t, err)\n\n\t\ttargetPool := fspool.New(p.GetTargetContainer(), v1)\n\n\t\tb, err := bowl.NewFreshBowl(bowl.FreshBowlParams{\n\t\t\tSourceContainer: p.GetSourceContainer(),\n\t\t\tTargetContainer: p.GetTargetContainer(),\n\t\t\tTargetPool: targetPool,\n\t\t\tOutputFolder: out,\n\t\t})\n\t\twtest.Must(t, err)\n\n\t\terr = p.Resume(nil, targetPool, b)\n\t\twtest.Must(t, err)\n\n\t\t\/\/ Validate!\n\t\tsigInfo := &pwr.SignatureInfo{\n\t\t\tContainer: p.GetSourceContainer(),\n\t\t\tHashes: sourceHashes,\n\t\t}\n\t\twtest.Must(t, pwr.AssertValid(out, sigInfo))\n\t\twtest.Must(t, pwr.AssertNoGhosts(out, sigInfo))\n\n\t\tt.Logf(\"Patch applies cleanly!\")\n\t}\n\n\ttryPatchWithSaves := func(t *testing.T, patchBytes []byte) {\n\t\tconsumer := &state.Consumer{\n\t\t\tOnMessage: func(level string, message string) {\n\t\t\t\tt.Logf(\"[%s] %s\", level, message)\n\t\t\t},\n\t\t}\n\n\t\tout := filepath.Join(dir, \"out\")\n\t\tdefer screw.RemoveAll(out)\n\n\t\tpatchReader := seeksource.FromBytes(patchBytes)\n\n\t\tp, err := patcher.New(patchReader, consumer)\n\t\twtest.Must(t, err)\n\n\t\tvar checkpoint *patcher.Checkpoint\n\t\tp.SetSaveConsumer(&patcherSaveConsumer{\n\t\t\tshouldSave: func() bool {\n\t\t\t\treturn true\n\t\t\t},\n\t\t\tsave: func(c *patcher.Checkpoint) (patcher.AfterSaveAction, error) {\n\t\t\t\tcheckpoint = c\n\t\t\t\treturn patcher.AfterSaveStop, nil\n\t\t\t},\n\t\t})\n\n\t\ttargetPool := fspool.New(p.GetTargetContainer(), v1)\n\n\t\tb, err := bowl.NewFreshBowl(bowl.FreshBowlParams{\n\t\t\tSourceContainer: p.GetSourceContainer(),\n\t\t\tTargetContainer: p.GetTargetContainer(),\n\t\t\tTargetPool: targetPool,\n\t\t\tOutputFolder: out,\n\t\t})\n\t\twtest.Must(t, err)\n\n\t\tnumCheckpoints := 0\n\t\tfor {\n\t\t\tc := checkpoint\n\t\t\tcheckpoint = nil\n\t\t\tt.Logf(\"Resuming patcher - has checkpoint: %v\", c != nil)\n\t\t\terr = p.Resume(c, targetPool, b)\n\t\t\tif errors.Cause(err) == patcher.ErrStop {\n\t\t\t\tt.Logf(\"Patcher returned ErrStop\")\n\n\t\t\t\tif checkpoint == nil {\n\t\t\t\t\twtest.Must(t, errors.New(\"patcher stopped but nil checkpoint\"))\n\t\t\t\t}\n\t\t\t\tnumCheckpoints++\n\n\t\t\t\tcheckpointBuf := new(bytes.Buffer)\n\t\t\t\tenc := gob.NewEncoder(checkpointBuf)\n\t\t\t\twtest.Must(t, enc.Encode(checkpoint))\n\n\t\t\t\tt.Logf(\"Got %s checkpoint @ %.2f%% of the patch\", united.FormatBytes(int64(checkpointBuf.Len())), p.Progress()*100.0)\n\n\t\t\t\tcheckpoint = &patcher.Checkpoint{}\n\t\t\t\tdec := gob.NewDecoder(bytes.NewReader(checkpointBuf.Bytes()))\n\t\t\t\twtest.Must(t, dec.Decode(checkpoint))\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\twtest.Must(t, err)\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Validate!\n\t\twtest.Must(t, pwr.AssertValid(out, &pwr.SignatureInfo{\n\t\t\tContainer: p.GetSourceContainer(),\n\t\t\tHashes: sourceHashes,\n\t\t}))\n\n\t\tt.Logf(\"Patch applies cleanly!\")\n\n\t\tt.Logf(\"Had %d checkpoints total\", numCheckpoints)\n\t\tassert.True(t, numCheckpoints > 0, \"had at least one checkpoint\")\n\t}\n\n\ttryPatch := func(kind string, patchBytes []byte) {\n\t\tt.Run(fmt.Sprintf(\"%s-no-saves\", kind), func(t *testing.T) {\n\t\t\tt.Logf(\"Applying %s %s patch (%d bytes), no saves\", united.FormatBytes(int64(len(patchBytes))), kind, len(patchBytes))\n\t\t\ttryPatchNoSaves(t, patchBytes)\n\t\t})\n\n\t\tt.Run(fmt.Sprintf(\"%s-with-saves\", kind), func(t *testing.T) {\n\t\t\tt.Logf(\"Applying %s %s patch (%d bytes) with saves\", united.FormatBytes(int64(len(patchBytes))), kind, len(patchBytes))\n\t\t\ttryPatchWithSaves(t, patchBytes)\n\t\t})\n\t}\n\n\ttryPatch(\"simple\", patchBuffer.Bytes())\n\ttryPatch(\"optimized\", optimizedPatchBuffer.Bytes())\n}\n\n\/\/\n\ntype patcherSaveConsumer struct {\n\tshouldSave func() bool\n\tsave func(checkpoint *patcher.Checkpoint) (patcher.AfterSaveAction, error)\n}\n\nvar _ patcher.SaveConsumer = (*patcherSaveConsumer)(nil)\n\nfunc (psc *patcherSaveConsumer) ShouldSave() bool {\n\treturn psc.shouldSave()\n}\n\nfunc (psc *patcherSaveConsumer) Save(checkpoint *patcher.Checkpoint) (patcher.AfterSaveAction, error) {\n\treturn psc.save(checkpoint)\n}\n<commit_msg>Add tests for sourceIndexWhitelist<commit_after>package patcher_test\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/itchio\/headway\/united\"\n\t\"github.com\/itchio\/lake\"\n\t\"github.com\/itchio\/screw\"\n\t\"github.com\/itchio\/wharf\/wsync\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\n\t\"github.com\/itchio\/wharf\/pwr\"\n\t\"github.com\/itchio\/wharf\/pwr\/bowl\"\n\t\"github.com\/itchio\/wharf\/pwr\/patcher\"\n\t\"github.com\/itchio\/wharf\/pwr\/rediff\"\n\t\"github.com\/itchio\/wharf\/wtest\"\n\n\t\"github.com\/itchio\/headway\/state\"\n\t\"github.com\/itchio\/lake\/pools\/fspool\"\n\t\"github.com\/itchio\/lake\/tlc\"\n\t\"github.com\/itchio\/savior\/seeksource\"\n\n\t_ \"github.com\/itchio\/wharf\/compressors\/cbrotli\"\n\t_ \"github.com\/itchio\/wharf\/decompressors\/cbrotli\"\n)\n\nfunc Test_Naive(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"patcher-noop\")\n\twtest.Must(t, err)\n\tdefer screw.RemoveAll(dir)\n\n\tv1 := filepath.Join(dir, \"v1\")\n\twtest.MakeTestDir(t, v1, wtest.TestDirSettings{\n\t\tEntries: []wtest.TestDirEntry{\n\t\t\t{Path: \"subdir\/file-1\", Seed: 0x1, Size: wtest.BlockSize*220 + 14},\n\t\t\t{Path: \"file-1\", Seed: 0x2},\n\t\t\t{Path: \"dir2\/file-2\", Seed: 0x3},\n\t\t\t{Path: \"dir3\/gone\", Seed: 0x4},\n\t\t},\n\t})\n\n\tv2 := filepath.Join(dir, \"v2\")\n\twtest.MakeTestDir(t, v2, wtest.TestDirSettings{\n\t\tEntries: []wtest.TestDirEntry{\n\t\t\t{Path: \"subdir\/file-1\", Seed: 0x1, Size: wtest.BlockSize*260 + 14, Bsmods: []wtest.Bsmod{\n\t\t\t\t{Interval: wtest.BlockSize\/2 + 3, Delta: 0x4},\n\t\t\t\t{Interval: wtest.BlockSize\/3 + 7, Delta: 0x18},\n\t\t\t}, Swaperoos: []wtest.Swaperoo{\n\t\t\t\t{OldStart: 0, NewStart: wtest.BlockSize * 210, Size: wtest.BlockSize * 10},\n\t\t\t\t{OldStart: 40, NewStart: wtest.BlockSize*10 + 8, Size: wtest.BlockSize * 40},\n\t\t\t}},\n\t\t\t{Path: \"file-1\", Seed: 0x2},\n\t\t\t{Path: \"dir2\/file-2\", Seed: 0x3},\n\t\t},\n\t})\n\n\tpatchBuffer := new(bytes.Buffer)\n\toptimizedPatchBuffer := new(bytes.Buffer)\n\tvar sourceHashes []wsync.BlockHash\n\tconsumer := &state.Consumer{\n\t\tOnMessage: func(level string, message string) {\n\t\t\tt.Logf(\"[%s] %s\", level, message)\n\t\t},\n\t}\n\n\t{\n\t\tcompression := &pwr.CompressionSettings{}\n\t\tcompression.Algorithm = pwr.CompressionAlgorithm_BROTLI\n\t\tcompression.Quality = 1\n\n\t\ttargetContainer, err := tlc.WalkAny(v1, &tlc.WalkOpts{})\n\t\twtest.Must(t, err)\n\n\t\tsourceContainer, err := tlc.WalkAny(v2, &tlc.WalkOpts{})\n\t\twtest.Must(t, err)\n\n\t\t\/\/ Sign!\n\t\tt.Logf(\"Signing %s\", sourceContainer.Stats())\n\t\tsourceHashes, err = pwr.ComputeSignature(context.Background(), sourceContainer, fspool.New(sourceContainer, v2), consumer)\n\t\twtest.Must(t, err)\n\n\t\ttargetPool := fspool.New(targetContainer, v1)\n\t\ttargetSignature, err := pwr.ComputeSignature(context.Background(), targetContainer, targetPool, consumer)\n\t\twtest.Must(t, err)\n\n\t\tpool := fspool.New(sourceContainer, v2)\n\n\t\t\/\/ Diff!\n\t\tt.Logf(\"Diffing (%s)...\", compression)\n\t\tdctx := pwr.DiffContext{\n\t\t\tCompression: compression,\n\t\t\tConsumer: consumer,\n\n\t\t\tSourceContainer: sourceContainer,\n\t\t\tPool: pool,\n\n\t\t\tTargetContainer: targetContainer,\n\t\t\tTargetSignature: targetSignature,\n\t\t}\n\n\t\twtest.Must(t, dctx.WritePatch(context.Background(), patchBuffer, ioutil.Discard))\n\n\t\t\/\/ Rediff!\n\t\tt.Logf(\"Rediffing...\")\n\t\trc, err := rediff.NewContext(rediff.Params{\n\t\t\tConsumer: consumer,\n\t\t\tPatchReader: seeksource.FromBytes(patchBuffer.Bytes()),\n\t\t})\n\t\twtest.Must(t, err)\n\n\t\twtest.Must(t, rc.Optimize(rediff.OptimizeParams{\n\t\t\tTargetPool: targetPool,\n\t\t\tSourcePool: pool,\n\t\t\tPatchWriter: optimizedPatchBuffer,\n\t\t}))\n\t}\n\n\t\/\/ Patch!\n\ttryPatchNoSaves := func(t *testing.T, patchBytes []byte) {\n\t\tconsumer := &state.Consumer{\n\t\t\tOnMessage: func(level string, message string) {\n\t\t\t\tt.Logf(\"[%s] %s\", level, message)\n\t\t\t},\n\t\t}\n\n\t\tout := filepath.Join(dir, \"out\")\n\t\tdefer screw.RemoveAll(out)\n\n\t\tpatchReader := seeksource.FromBytes(patchBytes)\n\n\t\tp, err := patcher.New(patchReader, consumer)\n\t\twtest.Must(t, err)\n\n\t\ttargetPool := fspool.New(p.GetTargetContainer(), v1)\n\n\t\tb, err := bowl.NewFreshBowl(bowl.FreshBowlParams{\n\t\t\tSourceContainer: p.GetSourceContainer(),\n\t\t\tTargetContainer: p.GetTargetContainer(),\n\t\t\tTargetPool: targetPool,\n\t\t\tOutputFolder: out,\n\t\t})\n\t\twtest.Must(t, err)\n\n\t\terr = p.Resume(nil, targetPool, b)\n\t\twtest.Must(t, err)\n\n\t\t\/\/ Validate!\n\t\tsigInfo := &pwr.SignatureInfo{\n\t\t\tContainer: p.GetSourceContainer(),\n\t\t\tHashes: sourceHashes,\n\t\t}\n\t\twtest.Must(t, pwr.AssertValid(out, sigInfo))\n\t\twtest.Must(t, pwr.AssertNoGhosts(out, sigInfo))\n\n\t\tt.Logf(\"Patch applies cleanly!\")\n\t}\n\n\ttryPatchSkip := func(t *testing.T, patchBytes []byte, all bool) {\n\t\tconsumer := &state.Consumer{\n\t\t\tOnMessage: func(level string, message string) {\n\t\t\t\tt.Logf(\"[%s] %s\", level, message)\n\t\t\t},\n\t\t}\n\n\t\tout := filepath.Join(dir, \"out\")\n\t\tdefer screw.RemoveAll(out)\n\n\t\tpatchReader := seeksource.FromBytes(patchBytes)\n\n\t\tp, err := patcher.New(patchReader, consumer)\n\t\twtest.Must(t, err)\n\n\t\tvar targetPool lake.Pool = &explodingPool{}\n\n\t\tsourceIndexWhitelist := make(map[int64]bool)\n\t\tif !all {\n\t\t\tfor i := int64(0); i < int64(len(p.GetSourceContainer().Files)); i += 2 {\n\t\t\t\tsourceIndexWhitelist[i] = true\n\t\t\t}\n\t\t\ttargetPool = fspool.New(p.GetTargetContainer(), v1)\n\t\t}\n\t\tp.SetSourceIndexWhitelist(sourceIndexWhitelist)\n\n\t\tb, err := bowl.NewFreshBowl(bowl.FreshBowlParams{\n\t\t\tSourceContainer: p.GetSourceContainer(),\n\t\t\tTargetContainer: p.GetTargetContainer(),\n\t\t\tTargetPool: targetPool,\n\t\t\tOutputFolder: out,\n\t\t})\n\t\twtest.Must(t, err)\n\n\t\terr = p.Resume(nil, targetPool, b)\n\t\twtest.Must(t, err)\n\n\t\t\/\/ Validate!\n\t\tsigInfo := &pwr.SignatureInfo{\n\t\t\tContainer: p.GetSourceContainer(),\n\t\t\tHashes: sourceHashes,\n\t\t}\n\t\terr = pwr.AssertValid(out, sigInfo)\n\t\tassert.Error(t, err)\n\n\t\tt.Logf(\"Partially applied!\")\n\t}\n\n\ttryPatchWithSaves := func(t *testing.T, patchBytes []byte) {\n\t\tconsumer := &state.Consumer{\n\t\t\tOnMessage: func(level string, message string) {\n\t\t\t\tt.Logf(\"[%s] %s\", level, message)\n\t\t\t},\n\t\t}\n\n\t\tout := filepath.Join(dir, \"out\")\n\t\tdefer screw.RemoveAll(out)\n\n\t\tpatchReader := seeksource.FromBytes(patchBytes)\n\n\t\tp, err := patcher.New(patchReader, consumer)\n\t\twtest.Must(t, err)\n\n\t\tvar checkpoint *patcher.Checkpoint\n\t\tp.SetSaveConsumer(&patcherSaveConsumer{\n\t\t\tshouldSave: func() bool {\n\t\t\t\treturn true\n\t\t\t},\n\t\t\tsave: func(c *patcher.Checkpoint) (patcher.AfterSaveAction, error) {\n\t\t\t\tcheckpoint = c\n\t\t\t\treturn patcher.AfterSaveStop, nil\n\t\t\t},\n\t\t})\n\n\t\ttargetPool := fspool.New(p.GetTargetContainer(), v1)\n\n\t\tb, err := bowl.NewFreshBowl(bowl.FreshBowlParams{\n\t\t\tSourceContainer: p.GetSourceContainer(),\n\t\t\tTargetContainer: p.GetTargetContainer(),\n\t\t\tTargetPool: targetPool,\n\t\t\tOutputFolder: out,\n\t\t})\n\t\twtest.Must(t, err)\n\n\t\tnumCheckpoints := 0\n\t\tfor {\n\t\t\tc := checkpoint\n\t\t\tcheckpoint = nil\n\t\t\tt.Logf(\"Resuming patcher - has checkpoint: %v\", c != nil)\n\t\t\terr = p.Resume(c, targetPool, b)\n\t\t\tif errors.Cause(err) == patcher.ErrStop {\n\t\t\t\tt.Logf(\"Patcher returned ErrStop\")\n\n\t\t\t\tif checkpoint == nil {\n\t\t\t\t\twtest.Must(t, errors.New(\"patcher stopped but nil checkpoint\"))\n\t\t\t\t}\n\t\t\t\tnumCheckpoints++\n\n\t\t\t\tcheckpointBuf := new(bytes.Buffer)\n\t\t\t\tenc := gob.NewEncoder(checkpointBuf)\n\t\t\t\twtest.Must(t, enc.Encode(checkpoint))\n\n\t\t\t\tt.Logf(\"Got %s checkpoint @ %.2f%% of the patch\", united.FormatBytes(int64(checkpointBuf.Len())), p.Progress()*100.0)\n\n\t\t\t\tcheckpoint = &patcher.Checkpoint{}\n\t\t\t\tdec := gob.NewDecoder(bytes.NewReader(checkpointBuf.Bytes()))\n\t\t\t\twtest.Must(t, dec.Decode(checkpoint))\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\twtest.Must(t, err)\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ Validate!\n\t\twtest.Must(t, pwr.AssertValid(out, &pwr.SignatureInfo{\n\t\t\tContainer: p.GetSourceContainer(),\n\t\t\tHashes: sourceHashes,\n\t\t}))\n\n\t\tt.Logf(\"Patch applies cleanly!\")\n\n\t\tt.Logf(\"Had %d checkpoints total\", numCheckpoints)\n\t\tassert.True(t, numCheckpoints > 0, \"had at least one checkpoint\")\n\t}\n\n\ttryPatch := func(kind string, patchBytes []byte) {\n\t\tt.Run(fmt.Sprintf(\"%s-no-saves\", kind), func(t *testing.T) {\n\t\t\tt.Logf(\"Applying %s %s patch (%d bytes), no saves\", united.FormatBytes(int64(len(patchBytes))), kind, len(patchBytes))\n\t\t\ttryPatchNoSaves(t, patchBytes)\n\t\t})\n\n\t\tt.Run(fmt.Sprintf(\"%s-with-saves\", kind), func(t *testing.T) {\n\t\t\tt.Logf(\"Applying %s %s patch (%d bytes) with saves\", united.FormatBytes(int64(len(patchBytes))), kind, len(patchBytes))\n\t\t\ttryPatchWithSaves(t, patchBytes)\n\t\t})\n\n\t\tt.Run(fmt.Sprintf(\"%s-skip-all\", kind), func(t *testing.T) {\n\t\t\tt.Logf(\"Applying %s %s patch (%d bytes) by skipping all entries\", united.FormatBytes(int64(len(patchBytes))), kind, len(patchBytes))\n\t\t\ttryPatchSkip(t, patchBytes, true)\n\t\t})\n\n\t\tt.Run(fmt.Sprintf(\"%s-skip-some\", kind), func(t *testing.T) {\n\t\t\tt.Logf(\"Applying %s %s patch (%d bytes) by skipping some entries\", united.FormatBytes(int64(len(patchBytes))), kind, len(patchBytes))\n\t\t\ttryPatchSkip(t, patchBytes, false)\n\t\t})\n\t}\n\n\ttryPatch(\"simple\", patchBuffer.Bytes())\n\ttryPatch(\"optimized\", optimizedPatchBuffer.Bytes())\n}\n\n\/\/\n\ntype patcherSaveConsumer struct {\n\tshouldSave func() bool\n\tsave func(checkpoint *patcher.Checkpoint) (patcher.AfterSaveAction, error)\n}\n\nvar _ patcher.SaveConsumer = (*patcherSaveConsumer)(nil)\n\nfunc (psc *patcherSaveConsumer) ShouldSave() bool {\n\treturn psc.shouldSave()\n}\n\nfunc (psc *patcherSaveConsumer) Save(checkpoint *patcher.Checkpoint) (patcher.AfterSaveAction, error) {\n\treturn psc.save(checkpoint)\n}\n\n\/\/\n\ntype explodingPool struct{}\n\nvar _ lake.Pool = (*explodingPool)(nil)\n\nfunc (ep *explodingPool) GetSize(fileIndex int64) int64 {\n\tpanic(\"pool exploded\")\n}\nfunc (ep *explodingPool) GetReader(fileIndex int64) (io.Reader, error) {\n\tpanic(\"pool exploded\")\n}\nfunc (ep *explodingPool) GetReadSeeker(fileIndex int64) (io.ReadSeeker, error) {\n\tpanic(\"pool exploded\")\n}\nfunc (ep *explodingPool) Close() error {\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package workflow\n\nimport (\n\t\"github.com\/go-gorp\/gorp\"\n\n\t\"github.com\/ovh\/cds\/engine\/api\/event\"\n\t\"github.com\/ovh\/cds\/sdk\"\n\t\"github.com\/ovh\/cds\/sdk\/log\"\n)\n\n\/\/ GetWorkflowRunEventData read channel to get elements to push\nfunc GetWorkflowRunEventData(cError <-chan error, cEvent <-chan interface{}) ([]sdk.WorkflowRun, []sdk.WorkflowNodeRun, []sdk.WorkflowNodeJobRun, error) {\n\twrs := []sdk.WorkflowRun{}\n\twnrs := []sdk.WorkflowNodeRun{}\n\twnjrs := []sdk.WorkflowNodeJobRun{}\n\n\tfor {\n\t\tselect {\n\t\tcase e, has := <-cError:\n\t\t\tlog.Info(\"GetWorkflowRunEventData> cError has: %t err:%v\", has, e)\n\t\t\tif !has {\n\t\t\t\treturn wrs, wnrs, wnjrs, e\n\t\t\t}\n\t\t\tif e != nil {\n\t\t\t\treturn nil, nil, nil, e\n\t\t\t}\n\t\tcase w, has := <-cEvent:\n\t\t\tlog.Info(\"GetWorkflowRunEventData> cEvent has: %t\", has)\n\t\t\tif !has {\n\t\t\t\treturn wrs, wnrs, wnjrs, nil\n\t\t\t}\n\t\t\tswitch x := w.(type) {\n\t\t\tcase sdk.WorkflowNodeJobRun:\n\t\t\t\twnjrs = append(wnjrs, x)\n\t\t\tcase sdk.WorkflowNodeRun:\n\t\t\t\twnrs = append(wnrs, x)\n\t\t\tcase sdk.WorkflowRun:\n\t\t\t\twrs = append(wrs, x)\n\t\t\tdefault:\n\t\t\t\tlog.Warning(\"GetWorkflowRunEventData> unknown type %T\", w)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ SendEvent Send event on workflow run\nfunc SendEvent(db gorp.SqlExecutor, wrs []sdk.WorkflowRun, wnrs []sdk.WorkflowNodeRun, wnjrs []sdk.WorkflowNodeJobRun, key string) {\n\tfor _, wr := range wrs {\n\t\tevent.PublishWorkflowRun(wr, key)\n\t}\n\tfor _, wnr := range wnrs {\n\t\twr, errWR := LoadRunByID(db, wnr.WorkflowRunID, false)\n\t\tif errWR != nil {\n\t\t\tlog.Warning(\"SendEvent> Cannot load workflow run %d: %s\", wnr.WorkflowRunID, errWR)\n\t\t\tcontinue\n\t\t}\n\t\tevent.PublishWorkflowNodeRun(wnr, *wr, key)\n\t}\n\tfor _, wnjr := range wnjrs {\n\t\tevent.PublishWorkflowNodeJobRun(wnjr)\n\t}\n}\n<commit_msg>fix(api): do not return if channels are not closed (#1645)<commit_after>package workflow\n\nimport (\n\t\"github.com\/go-gorp\/gorp\"\n\n\t\"github.com\/ovh\/cds\/engine\/api\/event\"\n\t\"github.com\/ovh\/cds\/sdk\"\n\t\"github.com\/ovh\/cds\/sdk\/log\"\n)\n\n\/\/ GetWorkflowRunEventData read channel to get elements to push\nfunc GetWorkflowRunEventData(cError <-chan error, cEvent <-chan interface{}) ([]sdk.WorkflowRun, []sdk.WorkflowNodeRun, []sdk.WorkflowNodeJobRun, error) {\n\twrs := []sdk.WorkflowRun{}\n\twnrs := []sdk.WorkflowNodeRun{}\n\twnjrs := []sdk.WorkflowNodeJobRun{}\n\tvar err error\n\n\tfor {\n\t\tselect {\n\t\tcase e, has := <-cError:\n\t\t\tlog.Info(\"GetWorkflowRunEventData> cError has: %t err:%v\", has, e)\n\t\t\terr = sdk.WrapError(e, \"GetWorkflowRunEventData> Error received\")\n\t\t\tif !has {\n\t\t\t\treturn wrs, wnrs, wnjrs, err\n\t\t\t}\n\t\tcase w, has := <-cEvent:\n\t\t\tlog.Info(\"GetWorkflowRunEventData> cEvent has: %t\", has)\n\t\t\tif !has {\n\t\t\t\treturn wrs, wnrs, wnjrs, err\n\t\t\t}\n\t\t\tswitch x := w.(type) {\n\t\t\tcase sdk.WorkflowNodeJobRun:\n\t\t\t\twnjrs = append(wnjrs, x)\n\t\t\tcase sdk.WorkflowNodeRun:\n\t\t\t\twnrs = append(wnrs, x)\n\t\t\tcase sdk.WorkflowRun:\n\t\t\t\twrs = append(wrs, x)\n\t\t\tdefault:\n\t\t\t\tlog.Warning(\"GetWorkflowRunEventData> unknown type %T\", w)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ SendEvent Send event on workflow run\nfunc SendEvent(db gorp.SqlExecutor, wrs []sdk.WorkflowRun, wnrs []sdk.WorkflowNodeRun, wnjrs []sdk.WorkflowNodeJobRun, key string) {\n\tfor _, wr := range wrs {\n\t\tevent.PublishWorkflowRun(wr, key)\n\t}\n\tfor _, wnr := range wnrs {\n\t\twr, errWR := LoadRunByID(db, wnr.WorkflowRunID, false)\n\t\tif errWR != nil {\n\t\t\tlog.Warning(\"SendEvent> Cannot load workflow run %d: %s\", wnr.WorkflowRunID, errWR)\n\t\t\tcontinue\n\t\t}\n\t\tevent.PublishWorkflowNodeRun(wnr, *wr, key)\n\t}\n\tfor _, wnjr := range wnjrs {\n\t\tevent.PublishWorkflowNodeJobRun(wnjr)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Auto commit 02\/10\/2012 21:00:03<commit_after><|endoftext|>"} {"text":"<commit_before>package model\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/oinume\/lekcije\/server\/config\"\n\t\"github.com\/oinume\/lekcije\/server\/util\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestUpdateLessons(t *testing.T) {\n\ta := assert.New(t)\n\tr := require.New(t)\n\n\tteacherID := uint32(util.RandomInt(999999))\n\tdatetime := time.Date(2016, 10, 1, 14, 30, 0, 0, config.LocalTimezone())\n\tlessons := createLessons(teacherID, datetime, \"Reserved\", 5)\n\n\taffected, err := lessonService.UpdateLessons(lessons)\n\tr.NoError(err)\n\ta.Equal(int64(5), affected)\n\tfor _, l := range lessons {\n\t\ta.NotEqual(uint64(0), l.ID)\n\t\tlogs, err := lessonStatusLogService.FindAllByLessonID(l.ID)\n\t\tr.NoError(err)\n\t\ta.Equal(1, len(logs))\n\t}\n\n\tfoundLessons, err := lessonService.FindLessons(teacherID, datetime, datetime)\n\tr.NoError(err)\n\ta.Equal(len(lessons), len(foundLessons))\n\tfor i := range lessons {\n\t\t\/\/ TODO: custom enum type\n\t\ta.Equal(strings.ToLower(lessons[i].Status), strings.ToLower(foundLessons[i].Status))\n\t}\n}\n\nfunc TestUpdateLessonsOverwrite(t *testing.T) {\n\ta := assert.New(t)\n\tr := require.New(t)\n\n\tteacherID := uint32(util.RandomInt(999999))\n\tdatetime := time.Date(2016, 10, 1, 14, 30, 0, 0, config.LocalTimezone())\n\tlessons := createLessons(teacherID, datetime, \"Available\", 5)\n\taffected, err := lessonService.UpdateLessons(lessons)\n\tr.NoError(err)\n\ta.EqualValues(len(lessons), affected)\n\n\ttime.Sleep(1 * time.Second)\n\tlessons[0].Status = \"Reserved\"\n\taffected, err = lessonService.UpdateLessons(lessons)\n\tr.NoError(err)\n\ta.EqualValues(1, affected)\n\n\tfoundLessons, err := lessonService.FindLessons(teacherID, datetime, datetime)\n\tr.NoError(err)\n\ta.Equal(strings.ToLower(foundLessons[0].Status), \"reserved\")\n\n\tlogs, err := lessonStatusLogService.FindAllByLessonID(foundLessons[0].ID)\n\tr.NoError(err)\n\ta.Equal(2, len(logs))\n}\n\nfunc TestGetNewAvailableLessons1(t *testing.T) {\n\ta := assert.New(t)\n\n\tdatetime := time.Date(2016, 10, 1, 14, 30, 0, 0, config.LocalTimezone())\n\tlessons1 := createLessons(1, datetime, \"Reserved\", 3)\n\tlessons2 := createLessons(1, datetime, \"Reserved\", 3)\n\tlessons2[1].Status = \"Available\"\n\t\/\/ Test GetNewAvailableLessons returns a lesson when new lesson is \"Available\"\n\tavailableLessons := lessonService.GetNewAvailableLessons(lessons1, lessons2)\n\ta.Equal(1, len(availableLessons))\n\ta.Equal(datetime.Add(1*time.Hour), availableLessons[0].Datetime)\n}\n\nfunc TestGetNewAvailableLessons2(t *testing.T) {\n\ta := assert.New(t)\n\n\tdatetime := time.Date(2016, 10, 1, 14, 30, 0, 0, config.LocalTimezone())\n\tlessons1 := createLessons(1, datetime, \"Reserved\", 3)\n\tlessons2 := createLessons(1, datetime, \"Reserved\", 3)\n\tlessons1[0].Status = \"Available\"\n\tlessons2[0].Status = \"Available\"\n\t\/\/ Test GetNewAvailableLessons returns nothing when both lessons are \"Available\"\n\tavailableLessons := lessonService.GetNewAvailableLessons(lessons1, lessons2)\n\ta.Equal(0, len(availableLessons))\n}\n\nfunc createLessons(teacherID uint32, baseDatetime time.Time, status string, length int) []*Lesson {\n\tlessons := make([]*Lesson, length)\n\tnow := time.Now().UTC()\n\tfor i := range lessons {\n\t\tlessons[i] = &Lesson{\n\t\t\tTeacherID: teacherID,\n\t\t\tDatetime: baseDatetime.Add(time.Duration(i) * time.Hour),\n\t\t\tStatus: status,\n\t\t\tCreatedAt: now,\n\t\t\tUpdatedAt: now,\n\t\t}\n\t}\n\treturn lessons\n}\n<commit_msg>Add test case<commit_after>package model\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/oinume\/lekcije\/server\/config\"\n\t\"github.com\/oinume\/lekcije\/server\/util\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestUpdateLessons(t *testing.T) {\n\ta := assert.New(t)\n\tr := require.New(t)\n\n\tteacherID := uint32(util.RandomInt(999999))\n\tdatetime := time.Date(2016, 10, 1, 14, 30, 0, 0, config.LocalTimezone())\n\tlessons := createLessons(teacherID, datetime, \"Reserved\", 5)\n\n\taffected, err := lessonService.UpdateLessons(lessons)\n\tr.NoError(err)\n\ta.Equal(int64(5), affected)\n\tfor _, l := range lessons {\n\t\ta.NotEqual(uint64(0), l.ID)\n\t\tlogs, err := lessonStatusLogService.FindAllByLessonID(l.ID)\n\t\tr.NoError(err)\n\t\ta.Equal(1, len(logs))\n\t}\n\n\tfoundLessons, err := lessonService.FindLessons(teacherID, datetime, datetime)\n\tr.NoError(err)\n\ta.Equal(len(lessons), len(foundLessons))\n\tfor i := range lessons {\n\t\t\/\/ TODO: custom enum type\n\t\ta.Equal(strings.ToLower(lessons[i].Status), strings.ToLower(foundLessons[i].Status))\n\t}\n}\n\nfunc TestUpdateLessonsOverwrite(t *testing.T) {\n\ta := assert.New(t)\n\tr := require.New(t)\n\n\tteacherID := uint32(util.RandomInt(999999))\n\tdatetime := time.Date(2016, 10, 1, 14, 30, 0, 0, config.LocalTimezone())\n\tlessons := createLessons(teacherID, datetime, \"Available\", 5)\n\taffected, err := lessonService.UpdateLessons(lessons)\n\tr.NoError(err)\n\ta.EqualValues(len(lessons), affected)\n\n\ttime.Sleep(1 * time.Second)\n\tlessons[0].Status = \"Reserved\"\n\taffected, err = lessonService.UpdateLessons(lessons)\n\tr.NoError(err)\n\ta.EqualValues(1, affected)\n\n\tfoundLessons, err := lessonService.FindLessons(teacherID, datetime, datetime)\n\tr.NoError(err)\n\ta.Equal(strings.ToLower(foundLessons[0].Status), \"reserved\")\n\n\tlogs, err := lessonStatusLogService.FindAllByLessonID(foundLessons[0].ID)\n\tr.NoError(err)\n\ta.Equal(2, len(logs))\n}\n\nfunc TestUpdateLessonsNoChange(t *testing.T) {\n\ta := assert.New(t)\n\tr := require.New(t)\n\n\tteacherID := uint32(util.RandomInt(999999))\n\tdatetime := time.Date(2016, 10, 1, 14, 30, 0, 0, config.LocalTimezone())\n\tlessons := createLessons(teacherID, datetime, \"Available\", 5)\n\taffected, err := lessonService.UpdateLessons(lessons)\n\tr.NoError(err)\n\ta.EqualValues(len(lessons), affected)\n\n\taffected, err = lessonService.UpdateLessons(lessons)\n\tr.NoError(err)\n\ta.EqualValues(0, affected)\n\n\tfoundLessons, err := lessonService.FindLessons(teacherID, datetime, datetime)\n\tr.NoError(err)\n\ta.Equal(strings.ToLower(foundLessons[0].Status), \"available\")\n\n\tlogs, err := lessonStatusLogService.FindAllByLessonID(foundLessons[0].ID)\n\tr.NoError(err)\n\ta.Equal(1, len(logs))\n}\n\nfunc TestGetNewAvailableLessons1(t *testing.T) {\n\ta := assert.New(t)\n\n\tdatetime := time.Date(2016, 10, 1, 14, 30, 0, 0, config.LocalTimezone())\n\tlessons1 := createLessons(1, datetime, \"Reserved\", 3)\n\tlessons2 := createLessons(1, datetime, \"Reserved\", 3)\n\tlessons2[1].Status = \"Available\"\n\t\/\/ Test GetNewAvailableLessons returns a lesson when new lesson is \"Available\"\n\tavailableLessons := lessonService.GetNewAvailableLessons(lessons1, lessons2)\n\ta.Equal(1, len(availableLessons))\n\ta.Equal(datetime.Add(1*time.Hour), availableLessons[0].Datetime)\n}\n\nfunc TestGetNewAvailableLessons2(t *testing.T) {\n\ta := assert.New(t)\n\n\tdatetime := time.Date(2016, 10, 1, 14, 30, 0, 0, config.LocalTimezone())\n\tlessons1 := createLessons(1, datetime, \"Reserved\", 3)\n\tlessons2 := createLessons(1, datetime, \"Reserved\", 3)\n\tlessons1[0].Status = \"Available\"\n\tlessons2[0].Status = \"Available\"\n\t\/\/ Test GetNewAvailableLessons returns nothing when both lessons are \"Available\"\n\tavailableLessons := lessonService.GetNewAvailableLessons(lessons1, lessons2)\n\ta.Equal(0, len(availableLessons))\n}\n\nfunc createLessons(teacherID uint32, baseDatetime time.Time, status string, length int) []*Lesson {\n\tlessons := make([]*Lesson, length)\n\tnow := time.Now().UTC()\n\tfor i := range lessons {\n\t\tlessons[i] = &Lesson{\n\t\t\tTeacherID: teacherID,\n\t\t\tDatetime: baseDatetime.Add(time.Duration(i) * time.Hour),\n\t\t\tStatus: status,\n\t\t\tCreatedAt: now,\n\t\t\tUpdatedAt: now,\n\t\t}\n\t}\n\treturn lessons\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Fix error code and argument order (#36)<commit_after><|endoftext|>"} {"text":"<commit_before>package activitypub\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestCreateActivityNew(t *testing.T) {\n\tvar testValue = ObjectID(\"test\")\n\tvar now time.Time\n\n\tc := CreateActivityNew(testValue, nil, nil)\n\tnow = time.Now()\n\tif c.Activity.ID != testValue {\n\t\tt.Errorf(\"Activity Id '%v' different than expected '%v'\", c.Activity.ID, testValue)\n\t}\n\tif c.Activity.Type != CreateType {\n\t\tt.Errorf(\"Activity Type '%v' different than expected '%v'\", c.Activity.Type, CreateType)\n\t}\n\tif now.Sub(c.Published).Round(time.Millisecond) != 0 {\n\t\tt.Errorf(\"Published time '%v' different than expected '%v'\", c.Published, now)\n\t}\n\n\ttestValue = ObjectID(\"my:note\")\n\tn := ObjectNew(\"my:note\", NoteType)\n\tb := PersonNew(\"bob\")\n\n\tc1 := CreateActivityNew(testValue, *b, n)\n\tnow = time.Now()\n\tif c1.Activity.ID != testValue {\n\t\tt.Errorf(\"Activity Id '%v' different than expected '%v'\", c1.Activity.ID, testValue)\n\t}\n\tif c1.Activity.Type != CreateType {\n\t\tt.Errorf(\"Activity Type '%v' different than expected '%v'\", c1.Activity.Type, CreateType)\n\t}\n\tif now.Sub(c.Published).Round(time.Millisecond) != 0 {\n\t\tt.Errorf(\"Published time '%v' different than expected '%v'\", c1.Published, now)\n\t}\n\tif !reflect.DeepEqual(c1.Activity.Actor.Object().ID, b.Object().ID) {\n\t\tt.Errorf(\"Actor \\n'%#v'\\n\\n different than expected \\n\\n'%#v'\", c1.Activity.Actor.Object().ID, b.Object().ID)\n\t}\n\tif !reflect.DeepEqual(c1.Activity.Object.(apObject), n) {\n\t\tt.Errorf(\"Object \\n'%#v'\\n\\n different than expected \\n\\n'%#v'\\n\", c1.Activity.Object, n)\n\t}\n}\n<commit_msg>Adding more assertions for CreateActivity tests<commit_after>package activitypub\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestCreateActivityNew(t *testing.T) {\n\tvar testValue = ObjectID(\"test\")\n\tvar now time.Time\n\n\tc := CreateActivityNew(testValue, nil, nil)\n\tnow = time.Now()\n\tif c.Activity.ID != testValue {\n\t\tt.Errorf(\"Activity Id '%v' different than expected '%v'\", c.Activity.ID, testValue)\n\t}\n\tif c.Activity.Type != CreateType {\n\t\tt.Errorf(\"Activity Type '%v' different than expected '%v'\", c.Activity.Type, CreateType)\n\t}\n\tif now.Sub(c.Published).Round(time.Millisecond) != 0 {\n\t\tt.Errorf(\"Published time '%v' different than expected '%v'\", c.Published, now)\n\t}\n\n\ttestValue = ObjectID(\"my:note\")\n\tn := ObjectNew(\"my:note\", NoteType)\n\tb := PersonNew(\"bob\")\n\n\tc1 := CreateActivityNew(testValue, *b, n)\n\tnow = time.Now()\n\tif c1.Activity.ID != testValue {\n\t\tt.Errorf(\"Activity Id '%v' different than expected '%v'\", c1.Activity.ID, testValue)\n\t}\n\tif c1.Activity.Type != CreateType {\n\t\tt.Errorf(\"Activity Type '%v' different than expected '%v'\", c1.Activity.Type, CreateType)\n\t}\n\tif now.Sub(c.Published).Round(time.Millisecond) != 0 {\n\t\tt.Errorf(\"Published time '%v' different than expected '%v'\", c1.Published, now)\n\t}\n\tif c1.Activity.Actor.Object().ID != b.ID {\n\t\tt.Errorf(\"Actor ID %q different than expected %q\", c1.Activity.Actor.Object().ID, b.ID)\n\t}\n\tif !reflect.DeepEqual(c1.Activity.Actor.Object(), b.Object()) {\n\t\tt.Errorf(\"Actor %#v different than expected %#v\", c1.Activity.Actor.Object(), b.Object())\n\t}\n\tif !reflect.DeepEqual(c1.Activity.Actor, *b) {\n\t\tt.Errorf(\"Actor %#v\\n\\n different than expected\\n\\n %#v\", c1.Activity.Actor, *b)\n\t}\n\tif c1.Activity.Object.Object().ID != n.ID {\n\t\tt.Errorf(\"Object %q different than expected %q\", c1.Activity.Object.Object().ID, n.ID)\n\t}\n\tif !reflect.DeepEqual(c1.Activity.Object.Object(), n.Object()) {\n\t\tt.Errorf(\"Object %#v different than expected %#v\", c1.Activity.Object.Object(), n.Object())\n\t}\n\tif !reflect.DeepEqual(c1.Activity.Object, n) {\n\t\tt.Errorf(\"Object %#v different than expected %#v\", c1.Activity.Object, n)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package server_test\n\nimport (\n\t\"context\"\n\n\t\"github.com\/cri-o\/cri-o\/internal\/oci\"\n\t\"github.com\/cri-o\/cri-o\/server\/cri\/types\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\tspecs \"github.com\/opencontainers\/runtime-spec\/specs-go\"\n)\n\n\/\/ The actual test suite\nvar _ = t.Describe(\"ListPodSandbox\", func() {\n\t\/\/ Prepare the sut\n\tBeforeEach(func() {\n\t\tbeforeEach()\n\t\tsetupSUT()\n\t})\n\n\tAfterEach(afterEach)\n\n\tt.Describe(\"ListPodSandbox\", func() {\n\t\tIt(\"should succeed\", func() {\n\t\t\t\/\/ Given\n\t\t\tExpect(sut.AddSandbox(testSandbox)).To(BeNil())\n\t\t\ttestContainer.SetState(&oci.ContainerState{\n\t\t\t\tState: specs.State{Status: oci.ContainerStateRunning},\n\t\t\t})\n\t\t\ttestSandbox.SetCreated()\n\t\t\tExpect(testSandbox.SetInfraContainer(testContainer)).To(BeNil())\n\n\t\t\t\/\/ When\n\t\t\tresponse, err := sut.ListPodSandbox(context.Background(),\n\t\t\t\t&types.ListPodSandboxRequest{})\n\n\t\t\t\/\/ Then\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(response).NotTo(BeNil())\n\t\t\tExpect(len(response.Items)).To(BeEquivalentTo(1))\n\t\t})\n\n\t\tIt(\"should succeed without infra container\", func() {\n\t\t\t\/\/ Given\n\t\t\tExpect(sut.AddSandbox(testSandbox)).To(BeNil())\n\t\t\ttestSandbox.SetCreated()\n\n\t\t\t\/\/ When\n\t\t\tresponse, err := sut.ListPodSandbox(context.Background(),\n\t\t\t\t&types.ListPodSandboxRequest{})\n\n\t\t\t\/\/ Then\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(response).NotTo(BeNil())\n\t\t\t\/\/ the sandbox is created, and even though it has no infra container, it should be displayed\n\t\t\tExpect(len(response.Items)).To(Equal(1))\n\t\t})\n\n\t\tIt(\"should skip not created sandboxes\", func() {\n\t\t\t\/\/ Given\n\t\t\tExpect(sut.AddSandbox(testSandbox)).To(BeNil())\n\t\t\tExpect(testSandbox.SetInfraContainer(testContainer)).To(BeNil())\n\n\t\t\t\/\/ When\n\t\t\tresponse, err := sut.ListPodSandbox(context.Background(),\n\t\t\t\t&types.ListPodSandboxRequest{})\n\n\t\t\t\/\/ Then\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(response).NotTo(BeNil())\n\t\t\tExpect(len(response.Items)).To(BeZero())\n\t\t})\n\n\t\tIt(\"should succeed with filter\", func() {\n\t\t\t\/\/ Given\n\t\t\tmockDirs(testManifest)\n\t\t\tcreateDummyState()\n\t\t\tExpect(sut.LoadSandbox(context.Background(), sandboxID)).To(BeNil())\n\n\t\t\t\/\/ When\n\t\t\tresponse, err := sut.ListPodSandbox(context.Background(),\n\t\t\t\t&types.ListPodSandboxRequest{Filter: &types.PodSandboxFilter{\n\t\t\t\t\tID: sandboxID,\n\t\t\t\t}})\n\n\t\t\t\/\/ Then\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(response).NotTo(BeNil())\n\t\t\tExpect(len(response.Items)).To(BeEquivalentTo(1))\n\t\t})\n\n\t\tIt(\"should succeed with filter for state\", func() {\n\t\t\t\/\/ Given\n\t\t\tmockDirs(testManifest)\n\t\t\tcreateDummyState()\n\t\t\tExpect(sut.LoadSandbox(context.Background(), sandboxID)).To(BeNil())\n\n\t\t\t\/\/ When\n\t\t\tresponse, err := sut.ListPodSandbox(context.Background(),\n\t\t\t\t&types.ListPodSandboxRequest{Filter: &types.PodSandboxFilter{\n\t\t\t\t\tID: sandboxID,\n\t\t\t\t\tState: &types.PodSandboxStateValue{\n\t\t\t\t\t\tState: types.PodSandboxStateSandboxReady,\n\t\t\t\t\t},\n\t\t\t\t}})\n\n\t\t\t\/\/ Then\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(response).NotTo(BeNil())\n\t\t\tExpect(len(response.Items)).To(BeZero())\n\t\t})\n\n\t\tIt(\"should succeed with filter for label\", func() {\n\t\t\t\/\/ Given\n\t\t\tmockDirs(testManifest)\n\t\t\tcreateDummyState()\n\t\t\tExpect(sut.LoadSandbox(context.Background(), sandboxID)).To(BeNil())\n\n\t\t\t\/\/ When\n\t\t\tresponse, err := sut.ListPodSandbox(context.Background(),\n\t\t\t\t&types.ListPodSandboxRequest{Filter: &types.PodSandboxFilter{\n\t\t\t\t\tID: sandboxID,\n\t\t\t\t\tLabelSelector: map[string]string{\"label\": \"value\"},\n\t\t\t\t}})\n\n\t\t\t\/\/ Then\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(response).NotTo(BeNil())\n\t\t\tExpect(len(response.Items)).To(BeZero())\n\t\t})\n\n\t\tIt(\"should succeed with filter but when not finding id\", func() {\n\t\t\t\/\/ Given\n\t\t\tExpect(sut.AddSandbox(testSandbox)).To(BeNil())\n\n\t\t\t\/\/ When\n\t\t\tresponse, err := sut.ListPodSandbox(context.Background(),\n\t\t\t\t&types.ListPodSandboxRequest{Filter: &types.PodSandboxFilter{\n\t\t\t\t\tID: sandboxID,\n\t\t\t\t}})\n\n\t\t\t\/\/ Then\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(response).NotTo(BeNil())\n\t\t\tExpect(len(response.Items)).To(BeZero())\n\t\t})\n\t})\n})\n<commit_msg>Fix unit tests<commit_after>package server_test\n\nimport (\n\t\"context\"\n\n\t\"github.com\/cri-o\/cri-o\/internal\/oci\"\n\t\"github.com\/cri-o\/cri-o\/server\/cri\/types\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\tspecs \"github.com\/opencontainers\/runtime-spec\/specs-go\"\n)\n\n\/\/ The actual test suite\nvar _ = t.Describe(\"ListPodSandbox\", func() {\n\t\/\/ Prepare the sut\n\tBeforeEach(func() {\n\t\tbeforeEach()\n\t\tsetupSUT()\n\t})\n\n\tAfterEach(afterEach)\n\n\tt.Describe(\"ListPodSandbox\", func() {\n\t\tIt(\"should succeed\", func() {\n\t\t\t\/\/ Given\n\t\t\tExpect(sut.AddSandbox(testSandbox)).To(BeNil())\n\t\t\ttestContainer.SetState(&oci.ContainerState{\n\t\t\t\tState: specs.State{Status: oci.ContainerStateRunning},\n\t\t\t})\n\t\t\ttestSandbox.SetCreated()\n\t\t\tExpect(testSandbox.SetInfraContainer(testContainer)).To(BeNil())\n\n\t\t\t\/\/ When\n\t\t\tresponse, err := sut.ListPodSandbox(context.Background(),\n\t\t\t\t&types.ListPodSandboxRequest{})\n\n\t\t\t\/\/ Then\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(response).NotTo(BeNil())\n\t\t\tExpect(len(response.Items)).To(BeEquivalentTo(1))\n\t\t})\n\n\t\tIt(\"should succeed without infra container\", func() {\n\t\t\t\/\/ Given\n\t\t\tExpect(sut.AddSandbox(testSandbox)).To(BeNil())\n\t\t\ttestSandbox.SetCreated()\n\n\t\t\t\/\/ When\n\t\t\tresponse, err := sut.ListPodSandbox(context.Background(),\n\t\t\t\t&types.ListPodSandboxRequest{})\n\n\t\t\t\/\/ Then\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(response).NotTo(BeNil())\n\t\t\t\/\/ the sandbox is created, and even though it has no infra container, it should be displayed\n\t\t\tExpect(len(response.Items)).To(Equal(1))\n\t\t})\n\n\t\tIt(\"should skip not created sandboxes\", func() {\n\t\t\t\/\/ Given\n\t\t\tExpect(sut.AddSandbox(testSandbox)).To(BeNil())\n\t\t\tExpect(testSandbox.SetInfraContainer(testContainer)).To(BeNil())\n\n\t\t\t\/\/ When\n\t\t\tresponse, err := sut.ListPodSandbox(context.Background(),\n\t\t\t\t&types.ListPodSandboxRequest{})\n\n\t\t\t\/\/ Then\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(response).NotTo(BeNil())\n\t\t\tExpect(len(response.Items)).To(BeZero())\n\t\t})\n\n\t\tIt(\"should succeed with filter\", func() {\n\t\t\t\/\/ Given\n\t\t\tmockDirs(testManifest)\n\t\t\tcreateDummyState()\n\t\t\t_, err := sut.LoadSandbox(context.Background(), sandboxID)\n\t\t\tExpect(err).To(BeNil())\n\n\t\t\t\/\/ When\n\t\t\tresponse, err := sut.ListPodSandbox(context.Background(),\n\t\t\t\t&types.ListPodSandboxRequest{Filter: &types.PodSandboxFilter{\n\t\t\t\t\tID: sandboxID,\n\t\t\t\t}})\n\n\t\t\t\/\/ Then\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(response).NotTo(BeNil())\n\t\t\tExpect(len(response.Items)).To(BeEquivalentTo(1))\n\t\t})\n\n\t\tIt(\"should succeed with filter for state\", func() {\n\t\t\t\/\/ Given\n\t\t\tmockDirs(testManifest)\n\t\t\tcreateDummyState()\n\t\t\t_, err := sut.LoadSandbox(context.Background(), sandboxID)\n\t\t\tExpect(err).To(BeNil())\n\n\t\t\t\/\/ When\n\t\t\tresponse, err := sut.ListPodSandbox(context.Background(),\n\t\t\t\t&types.ListPodSandboxRequest{Filter: &types.PodSandboxFilter{\n\t\t\t\t\tID: sandboxID,\n\t\t\t\t\tState: &types.PodSandboxStateValue{\n\t\t\t\t\t\tState: types.PodSandboxStateSandboxReady,\n\t\t\t\t\t},\n\t\t\t\t}})\n\n\t\t\t\/\/ Then\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(response).NotTo(BeNil())\n\t\t\tExpect(len(response.Items)).To(BeZero())\n\t\t})\n\n\t\tIt(\"should succeed with filter for label\", func() {\n\t\t\t\/\/ Given\n\t\t\tmockDirs(testManifest)\n\t\t\tcreateDummyState()\n\t\t\t_, err := sut.LoadSandbox(context.Background(), sandboxID)\n\t\t\tExpect(err).To(BeNil())\n\n\t\t\t\/\/ When\n\t\t\tresponse, err := sut.ListPodSandbox(context.Background(),\n\t\t\t\t&types.ListPodSandboxRequest{Filter: &types.PodSandboxFilter{\n\t\t\t\t\tID: sandboxID,\n\t\t\t\t\tLabelSelector: map[string]string{\"label\": \"value\"},\n\t\t\t\t}})\n\n\t\t\t\/\/ Then\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(response).NotTo(BeNil())\n\t\t\tExpect(len(response.Items)).To(BeZero())\n\t\t})\n\n\t\tIt(\"should succeed with filter but when not finding id\", func() {\n\t\t\t\/\/ Given\n\t\t\tExpect(sut.AddSandbox(testSandbox)).To(BeNil())\n\n\t\t\t\/\/ When\n\t\t\tresponse, err := sut.ListPodSandbox(context.Background(),\n\t\t\t\t&types.ListPodSandboxRequest{Filter: &types.PodSandboxFilter{\n\t\t\t\t\tID: sandboxID,\n\t\t\t\t}})\n\n\t\t\t\/\/ Then\n\t\t\tExpect(err).To(BeNil())\n\t\t\tExpect(response).NotTo(BeNil())\n\t\t\tExpect(len(response.Items)).To(BeZero())\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package detector\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com\/golang\/glog\"\n\tmesos \"github.com\/mesos\/mesos-go\/mesosproto\"\n\t\"github.com\/mesos\/mesos-go\/upid\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\tmesosHttpClientTimeout = 10 * time.Second \/\/TODO(jdef) configurable via fiag?\n\tmesosLeaderSyncInterval = 30 * time.Second \/\/TODO(jdef) configurable via fiag?\n\tdefaultMesosMasterPort = 5050\n)\n\ntype Standalone struct {\n\tch chan *mesos.MasterInfo\n\tclient *http.Client\n\ttr *http.Transport\n\tpollOnce sync.Once\n\tinitial *mesos.MasterInfo\n\tdone chan struct{}\n\tcancelOnce sync.Once\n}\n\n\/\/ Create a new stand alone master detector.\nfunc NewStandalone(mi *mesos.MasterInfo) *Standalone {\n\tlog.V(2).Infof(\"creating new standalone detector for %+v\", mi)\n\tch := make(chan *mesos.MasterInfo)\n\ttr := &http.Transport{}\n\treturn &Standalone{\n\t\tch: ch,\n\t\tclient: &http.Client{\n\t\t\tTransport: tr,\n\t\t\tTimeout: mesosHttpClientTimeout,\n\t\t},\n\t\ttr: tr,\n\t\tinitial: mi,\n\t\tdone: make(chan struct{}),\n\t}\n}\n\nfunc (s *Standalone) String() string {\n\treturn fmt.Sprintf(\"{initial: %+v}\", s.initial)\n}\n\n\/\/ Detecting the new master.\nfunc (s *Standalone) Detect(o MasterChanged) error {\n\tlog.V(2).Info(\"Detect()\")\n\ts.pollOnce.Do(func() {\n\t\tlog.V(1).Info(\"spinning up asyc master detector poller\")\n\t\tgo s.poller()\n\t})\n\tif o != nil {\n\t\tlog.V(1).Info(\"spawning asyc master detector listener\")\n\t\tgo func() {\n\t\t\tlog.V(2).Infof(\"waiting for polled to send updates\")\n\t\tpollWaiter:\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase mi, ok := <-s.ch:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tbreak pollWaiter\n\t\t\t\t\t}\n\t\t\t\t\tlog.V(1).Infof(\"detected master change: %+v\", mi)\n\t\t\t\t\to.Notify(mi)\n\t\t\t\tcase <-s.done:\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\to.Notify(nil)\n\t\t}()\n\t} else {\n\t\tlog.Warningf(\"detect called with a nil master change listener\")\n\t}\n\treturn nil\n}\n\nfunc (s *Standalone) Done() <-chan struct{} {\n\treturn s.done\n}\n\nfunc (s *Standalone) Cancel() {\n\ts.cancelOnce.Do(func() { close(s.done) })\n}\n\n\/\/ poll for changes to master leadership\nfunc (s *Standalone) poller() {\n\tif s.initial == nil {\n\t\tlog.Warningf(\"aborting master poller since initial master info is nil\")\n\t\treturn\n\t}\n\t\/\/TODO(jdef) we could attempt to unpack IP addres if Host=\"\"\n\tif s.initial.Hostname != nil && len(*s.initial.Hostname) == 0 {\n\t\tlog.Warningf(\"aborted mater poller since initial master info has no host\")\n\t\treturn\n\t}\n\taddr := *s.initial.Hostname\n\tport := uint32(defaultMesosMasterPort)\n\tif s.initial.Port != nil && *s.initial.Port != 0 {\n\t\tport = *s.initial.Port\n\t}\n\taddr = net.JoinHostPort(addr, strconv.Itoa(int(port)))\n\tlog.V(1).Infof(\"polling for master leadership at '%v'\", addr)\n\tvar lastpid *upid.UPID\n\tfor {\n\t\tstartedAt := time.Now()\n\t\tctx, cancel := context.WithTimeout(context.Background(), mesosLeaderSyncInterval)\n\t\tif pid, err := s.fetchPid(ctx, addr); err == nil {\n\t\t\tif !pid.Equal(lastpid) {\n\t\t\t\tlog.V(2).Infof(\"detected leadership change from '%v' to '%v'\", lastpid, pid)\n\t\t\t\tlastpid = pid\n\t\t\t\telapsed := time.Now().Sub(startedAt)\n\t\t\t\tmi := CreateMasterInfo(pid)\n\t\t\t\tselect {\n\t\t\t\tcase s.ch <- mi:\n\t\t\t\t\t\/\/ noop\n\t\t\t\tcase <-time.After(mesosLeaderSyncInterval - elapsed):\n\t\t\t\t\t\/\/ no one heard the master change, oh well - poll again\n\t\t\t\t\tcancel()\n\t\t\t\t\tcontinue\n\t\t\t\tcase <-s.done:\n\t\t\t\t\tcancel()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.V(2).Infof(\"no change to master leadership: '%v'\", lastpid)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Warning(err)\n\t\t}\n\t\tremaining := mesosLeaderSyncInterval - time.Now().Sub(startedAt)\n\t\tlog.V(3).Infof(\"master leader poller sleeping for %v\", remaining)\n\t\ttime.Sleep(remaining)\n\t\tcancel()\n\t}\n}\n\nfunc (s *Standalone) fetchPid(ctx context.Context, address string) (*upid.UPID, error) {\n\t\/\/TODO(jdef) need better address parsing, for now assume host, or host:port format\n\t\/\/TODO(jdef) need SSL support\n\turi := fmt.Sprintf(\"http:\/\/%s\/state.json\", address)\n\treq, err := http.NewRequest(\"GET\", uri, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar pid *upid.UPID\n\terr = s.httpDo(ctx, req, func(res *http.Response, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer res.Body.Close()\n\t\tif res.StatusCode != 200 {\n\t\t\treturn fmt.Errorf(\"HTTP request failed with code %d: %v\", res.StatusCode, res.Status)\n\t\t}\n\t\tblob, err1 := ioutil.ReadAll(res.Body)\n\t\tif err1 != nil {\n\t\t\treturn err1\n\t\t}\n\t\tlog.V(3).Infof(\"Got mesos state, content length %v\", len(blob))\n\t\ttype State struct {\n\t\t\tLeader string `json:\"leader\"` \/\/ ex: master(1)@10.22.211.18:5050\n\t\t}\n\t\tstate := &State{}\n\t\terr = json.Unmarshal(blob, state)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpid, err = upid.Parse(state.Leader)\n\t\treturn err\n\t})\n\treturn pid, err\n}\n\ntype responseHandler func(*http.Response, error) error\n\n\/\/ hacked from https:\/\/blog.golang.org\/context\nfunc (s *Standalone) httpDo(ctx context.Context, req *http.Request, f responseHandler) error {\n\t\/\/ Run the HTTP request in a goroutine and pass the response to f.\n\tch := make(chan error, 1)\n\tgo func() { ch <- f(s.client.Do(req)) }()\n\tselect {\n\tcase <-ctx.Done():\n\t\ts.tr.CancelRequest(req)\n\t\t<-ch \/\/ Wait for f to return.\n\t\treturn ctx.Err()\n\tcase err := <-ch:\n\t\treturn err\n\t}\n}\n<commit_msg>added docs for standalone poller<commit_after>package detector\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com\/golang\/glog\"\n\tmesos \"github.com\/mesos\/mesos-go\/mesosproto\"\n\t\"github.com\/mesos\/mesos-go\/upid\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\tmesosHttpClientTimeout = 10 * time.Second \/\/TODO(jdef) configurable via fiag?\n\tmesosLeaderSyncInterval = 30 * time.Second \/\/TODO(jdef) configurable via fiag?\n\tdefaultMesosMasterPort = 5050\n)\n\ntype Standalone struct {\n\tch chan *mesos.MasterInfo\n\tclient *http.Client\n\ttr *http.Transport\n\tpollOnce sync.Once\n\tinitial *mesos.MasterInfo\n\tdone chan struct{}\n\tcancelOnce sync.Once\n}\n\n\/\/ Create a new stand alone master detector.\nfunc NewStandalone(mi *mesos.MasterInfo) *Standalone {\n\tlog.V(2).Infof(\"creating new standalone detector for %+v\", mi)\n\tch := make(chan *mesos.MasterInfo)\n\ttr := &http.Transport{}\n\treturn &Standalone{\n\t\tch: ch,\n\t\tclient: &http.Client{\n\t\t\tTransport: tr,\n\t\t\tTimeout: mesosHttpClientTimeout,\n\t\t},\n\t\ttr: tr,\n\t\tinitial: mi,\n\t\tdone: make(chan struct{}),\n\t}\n}\n\nfunc (s *Standalone) String() string {\n\treturn fmt.Sprintf(\"{initial: %+v}\", s.initial)\n}\n\n\/\/ Detecting the new master.\nfunc (s *Standalone) Detect(o MasterChanged) error {\n\tlog.V(2).Info(\"Detect()\")\n\ts.pollOnce.Do(func() {\n\t\tlog.V(1).Info(\"spinning up asyc master detector poller\")\n\t\tgo s.poller()\n\t})\n\tif o != nil {\n\t\tlog.V(1).Info(\"spawning asyc master detector listener\")\n\t\tgo func() {\n\t\t\tlog.V(2).Infof(\"waiting for polled to send updates\")\n\t\tpollWaiter:\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase mi, ok := <-s.ch:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tbreak pollWaiter\n\t\t\t\t\t}\n\t\t\t\t\tlog.V(1).Infof(\"detected master change: %+v\", mi)\n\t\t\t\t\to.Notify(mi)\n\t\t\t\tcase <-s.done:\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\to.Notify(nil)\n\t\t}()\n\t} else {\n\t\tlog.Warningf(\"detect called with a nil master change listener\")\n\t}\n\treturn nil\n}\n\nfunc (s *Standalone) Done() <-chan struct{} {\n\treturn s.done\n}\n\nfunc (s *Standalone) Cancel() {\n\ts.cancelOnce.Do(func() { close(s.done) })\n}\n\n\/\/ poll for changes to master leadership via current leader's \/state.json endpoint.\n\/\/ we start with the `initial` leader, aborting if none was specified. thereafter,\n\/\/ the `leader` property of the state.json is used to identify the next leader that\n\/\/ should be polled.\n\/\/\n\/\/ TODO(jdef) somehow determine all masters in cluster from the state.json?\n\/\/\nfunc (s *Standalone) poller() {\n\tif s.initial == nil {\n\t\tlog.Warningf(\"aborting master poller since initial master info is nil\")\n\t\treturn\n\t}\n\t\/\/TODO(jdef) we could attempt to unpack IP addres if Host=\"\"\n\tif s.initial.Hostname != nil && len(*s.initial.Hostname) == 0 {\n\t\tlog.Warningf(\"aborted mater poller since initial master info has no host\")\n\t\treturn\n\t}\n\taddr := *s.initial.Hostname\n\tport := uint32(defaultMesosMasterPort)\n\tif s.initial.Port != nil && *s.initial.Port != 0 {\n\t\tport = *s.initial.Port\n\t}\n\taddr = net.JoinHostPort(addr, strconv.Itoa(int(port)))\n\tlog.V(1).Infof(\"polling for master leadership at '%v'\", addr)\n\tvar lastpid *upid.UPID\n\tfor {\n\t\tstartedAt := time.Now()\n\t\tctx, cancel := context.WithTimeout(context.Background(), mesosLeaderSyncInterval)\n\t\tif pid, err := s.fetchPid(ctx, addr); err == nil {\n\t\t\tif !pid.Equal(lastpid) {\n\t\t\t\tlog.V(2).Infof(\"detected leadership change from '%v' to '%v'\", lastpid, pid)\n\t\t\t\tlastpid = pid\n\t\t\t\telapsed := time.Now().Sub(startedAt)\n\t\t\t\tmi := CreateMasterInfo(pid)\n\t\t\t\tselect {\n\t\t\t\tcase s.ch <- mi:\n\t\t\t\t\t\/\/ noop\n\t\t\t\tcase <-time.After(mesosLeaderSyncInterval - elapsed):\n\t\t\t\t\t\/\/ no one heard the master change, oh well - poll again\n\t\t\t\t\tcancel()\n\t\t\t\t\tcontinue\n\t\t\t\tcase <-s.done:\n\t\t\t\t\tcancel()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.V(2).Infof(\"no change to master leadership: '%v'\", lastpid)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Warning(err)\n\t\t}\n\t\tremaining := mesosLeaderSyncInterval - time.Now().Sub(startedAt)\n\t\tlog.V(3).Infof(\"master leader poller sleeping for %v\", remaining)\n\t\ttime.Sleep(remaining)\n\t\tcancel()\n\t}\n}\n\nfunc (s *Standalone) fetchPid(ctx context.Context, address string) (*upid.UPID, error) {\n\t\/\/TODO(jdef) need better address parsing, for now assume host, or host:port format\n\t\/\/TODO(jdef) need SSL support\n\turi := fmt.Sprintf(\"http:\/\/%s\/state.json\", address)\n\treq, err := http.NewRequest(\"GET\", uri, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar pid *upid.UPID\n\terr = s.httpDo(ctx, req, func(res *http.Response, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer res.Body.Close()\n\t\tif res.StatusCode != 200 {\n\t\t\treturn fmt.Errorf(\"HTTP request failed with code %d: %v\", res.StatusCode, res.Status)\n\t\t}\n\t\tblob, err1 := ioutil.ReadAll(res.Body)\n\t\tif err1 != nil {\n\t\t\treturn err1\n\t\t}\n\t\tlog.V(3).Infof(\"Got mesos state, content length %v\", len(blob))\n\t\ttype State struct {\n\t\t\tLeader string `json:\"leader\"` \/\/ ex: master(1)@10.22.211.18:5050\n\t\t}\n\t\tstate := &State{}\n\t\terr = json.Unmarshal(blob, state)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpid, err = upid.Parse(state.Leader)\n\t\treturn err\n\t})\n\treturn pid, err\n}\n\ntype responseHandler func(*http.Response, error) error\n\n\/\/ hacked from https:\/\/blog.golang.org\/context\nfunc (s *Standalone) httpDo(ctx context.Context, req *http.Request, f responseHandler) error {\n\t\/\/ Run the HTTP request in a goroutine and pass the response to f.\n\tch := make(chan error, 1)\n\tgo func() { ch <- f(s.client.Do(req)) }()\n\tselect {\n\tcase <-ctx.Done():\n\t\ts.tr.CancelRequest(req)\n\t\t<-ch \/\/ Wait for f to return.\n\t\treturn ctx.Err()\n\tcase err := <-ch:\n\t\treturn err\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2021 Google LLC. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package client provides client support for interacting with a serverless\n\/\/ log.\n\/\/\n\/\/ See the \/cmd\/client package in this repo for an example of using this.\npackage client\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/google\/trillian-examples\/formats\/log\"\n\t\"github.com\/google\/trillian-examples\/serverless\/api\"\n\t\"github.com\/google\/trillian-examples\/serverless\/api\/layout\"\n\t\"github.com\/google\/trillian\/merkle\"\n\t\"github.com\/google\/trillian\/merkle\/compact\"\n\t\"github.com\/google\/trillian\/merkle\/hashers\"\n\t\"github.com\/google\/trillian\/merkle\/logverifier\"\n\t\"golang.org\/x\/mod\/sumdb\/note\"\n)\n\n\/\/ Fetcher is the signature of a function which can retrieve arbitrary files from\n\/\/ a log's data storage, via whatever appropriate mechanism.\n\/\/ The path parameter is relative to the root of the log storage.\ntype Fetcher func(path string) ([]byte, error)\n\nfunc fetchCheckpointAndParse(f Fetcher, v note.Verifier) (*log.Checkpoint, []byte, error) {\n\tcpRaw, err := f(layout.CheckpointPath)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tn, err := note.Open(cpRaw, note.VerifierList(v))\n\tif err != nil {\n\t\tglog.Exitf(\"failed to open Checkpoint: %q\", err)\n\t}\n\tcp := log.Checkpoint{}\n\tif _, err := cp.Unmarshal([]byte(n.Text)); err != nil {\n\t\tglog.V(1).Infof(\"Bad checkpoint: %q\", cpRaw)\n\t\treturn nil, nil, fmt.Errorf(\"failed to unmarshal checkpoint: %w\", err)\n\t}\n\treturn &cp, cpRaw, nil\n}\n\n\/\/ ProofBuilder knows how to build inclusion and consistency proofs from tiles.\n\/\/ Since the tiles commit only to immutable nodes, the job of building proofs is slightly\n\/\/ more complex as proofs can touch \"ephemeral\" nodes, so these need to be synthesized.\ntype ProofBuilder struct {\n\tcp log.Checkpoint\n\tnodeCache nodeCache\n\th compact.HashFn\n}\n\n\/\/ NewProofBuilder creates a new ProofBuilder object for a given tree size.\n\/\/ The returned ProofBuilder can be re-used for proofs related to a given tree size, but\n\/\/ it is not thread-safe and should not be accessed concurrently.\nfunc NewProofBuilder(cp log.Checkpoint, h compact.HashFn, f Fetcher) (*ProofBuilder, error) {\n\ttf := newTileFetcher(f, cp.Size)\n\tpb := &ProofBuilder{\n\t\tcp: cp,\n\t\tnodeCache: newNodeCache(tf, cp.Size),\n\t\th: h,\n\t}\n\n\thashes, err := FetchRangeNodes(cp.Size, tf)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to fetch range nodes: %w\", err)\n\t}\n\t\/\/ Create a compact range which represents the state of the log.\n\tr, err := (&compact.RangeFactory{Hash: h}).NewRange(0, cp.Size, hashes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Recreate the root hash so that:\n\t\/\/ a) we validate the self-integrity of the log state, and\n\t\/\/ b) we calculate (and cache) and ephemeral nodes present in the tree,\n\t\/\/ this is important since they could be required by proofs.\n\tsr, err := r.GetRootHash(pb.nodeCache.SetEphemeralNode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !bytes.Equal(cp.Hash, sr) {\n\t\treturn nil, fmt.Errorf(\"invalid checkpoint hash %x, expected %x\", cp.Hash, sr)\n\t}\n\treturn pb, nil\n}\n\n\/\/ InclusionProof constructs an inclusion proof for the leaf at index in a tree of\n\/\/ the given size.\n\/\/ This function uses the passed-in function to retrieve tiles containing any log tree\n\/\/ nodes necessary to build the proof.\nfunc (pb *ProofBuilder) InclusionProof(index uint64) ([][]byte, error) {\n\tnodes, err := merkle.CalcInclusionProofNodeAddresses(int64(pb.cp.Size), int64(index), int64(pb.cp.Size))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to calculate inclusion proof node list: %w\", err)\n\t}\n\n\tret := make([][]byte, 0)\n\t\/\/ TODO(al) parallelise this.\n\tfor _, n := range nodes {\n\t\th, err := pb.nodeCache.GetNode(n.ID)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to get node (%v): %w\", n.ID, err)\n\t\t}\n\t\tret = append(ret, h)\n\t}\n\treturn ret, nil\n}\n\n\/\/ ConsistencyProof constructs a consistency proof between the two passed in tree sizes.\n\/\/ This function uses the passed-in function to retrieve tiles containing any log tree\n\/\/ nodes necessary to build the proof.\nfunc (pb *ProofBuilder) ConsistencyProof(smaller, larger uint64) ([][]byte, error) {\n\tnodes, err := merkle.CalcConsistencyProofNodeAddresses(int64(smaller), int64(larger), int64(pb.cp.Size))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to calculate consistency proof node list: %w\", err)\n\t}\n\n\thashes := make([][]byte, 0)\n\t\/\/ TODO(al) parallelise this.\n\tfor _, n := range nodes {\n\t\th, err := pb.nodeCache.GetNode(n.ID)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to get node (%v): %w\", n.ID, err)\n\t\t}\n\t\thashes = append(hashes, h)\n\t}\n\treturn hashes, nil\n}\n\n\/\/ FetchRangeNodes returns the set of nodes representing the compact range covering\n\/\/ a log of size s.\nfunc FetchRangeNodes(s uint64, gt GetTileFunc) ([][]byte, error) {\n\tnc := newNodeCache(gt, s)\n\tnIDs := compact.RangeNodes(0, s)\n\tret := make([][]byte, len(nIDs))\n\tfor i, n := range nIDs {\n\t\th, err := nc.GetNode(n)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tret[i] = h\n\t}\n\treturn ret, nil\n}\n\n\/\/ FetchLeafHashes fetches N consecutive leaf hashes starting with the leaf at index first.\nfunc FetchLeafHashes(f Fetcher, first, N, logSize uint64) ([][]byte, error) {\n\tnc := newNodeCache(newTileFetcher(f, logSize), logSize)\n\tret := make([][]byte, N, 0)\n\tfor i, seq := uint64(0), first; i < N; i, seq = i+1, seq+1 {\n\t\tnID := compact.NodeID{Level: 0, Index: seq}\n\t\th, err := nc.GetNode(nID)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to fetch node %v: %v\", nID, err)\n\t\t}\n\t\tret = append(ret, h)\n\t}\n\treturn ret, nil\n}\n\n\/\/ nodeCache hides the tiles abstraction away, and improves\n\/\/ performance by caching tiles it's seen.\n\/\/ Not threadsafe, and intended to be only used throughout the course\n\/\/ of a single request.\ntype nodeCache struct {\n\tlogSize uint64\n\tephemeral map[compact.NodeID][]byte\n\ttiles map[tileKey]api.Tile\n\tgetTile GetTileFunc\n}\n\n\/\/ GetTileFunc is the signature of a function which knows how to fetch a\n\/\/ specific tile.\ntype GetTileFunc func(level, index uint64) (*api.Tile, error)\n\n\/\/ tileKey is used as a key in nodeCache's tile map.\ntype tileKey struct {\n\ttileLevel uint64\n\ttileIndex uint64\n}\n\n\/\/ newNodeCache creates a new nodeCache instance for a given log size.\nfunc newNodeCache(f GetTileFunc, logSize uint64) nodeCache {\n\treturn nodeCache{\n\t\tlogSize: logSize,\n\t\tephemeral: make(map[compact.NodeID][]byte),\n\t\ttiles: make(map[tileKey]api.Tile),\n\t\tgetTile: f,\n\t}\n}\n\n\/\/ SetEphemeralNode stored a derived \"ephemeral\" tree node.\nfunc (n *nodeCache) SetEphemeralNode(id compact.NodeID, h []byte) {\n\tn.ephemeral[id] = h\n}\n\n\/\/ GetNode returns the internal log tree node hash for the specified node ID.\n\/\/ A previously set ephemeral node will be returned if id matches, otherwise\n\/\/ the tile containing the requested node will be fetched and cached, and the\n\/\/ node hash returned.\nfunc (n *nodeCache) GetNode(id compact.NodeID) ([]byte, error) {\n\t\/\/ First check for ephemeral nodes:\n\tif e := n.ephemeral[id]; len(e) != 0 {\n\t\treturn e, nil\n\t}\n\t\/\/ Otherwise look in fetched tiles:\n\ttileLevel, tileIndex, nodeLevel, nodeIndex := layout.NodeCoordsToTileAddress(uint64(id.Level), uint64(id.Index))\n\ttKey := tileKey{tileLevel, tileIndex}\n\tt, ok := n.tiles[tKey]\n\tif !ok {\n\t\ttile, err := n.getTile(tileLevel, tileIndex)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to fetch tile: %w\", err)\n\t\t}\n\t\tt = *tile\n\t\tn.tiles[tKey] = *tile\n\t}\n\tnode := t.Nodes[api.TileNodeKey(nodeLevel, nodeIndex)]\n\tif node == nil {\n\t\treturn nil, fmt.Errorf(\"node %v (tile coords [%d,%d]\/[%d,%d]) unknown\", id, tileLevel, tileIndex, nodeLevel, nodeIndex)\n\t}\n\treturn node, nil\n}\n\n\/\/ newTileFetcher returns a GetTileFunc based on the passed in Fetcher and log size.\nfunc newTileFetcher(f Fetcher, logSize uint64) GetTileFunc {\n\treturn func(level, index uint64) (*api.Tile, error) {\n\t\ttileSize := layout.PartialTileSize(level, index, logSize)\n\t\tp := filepath.Join(layout.TilePath(\"\", level, index, tileSize))\n\t\tt, err := f(p)\n\t\tif err != nil {\n\t\t\tif !errors.Is(err, os.ErrNotExist) {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to read tile at %q: %w\", p, err)\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar tile api.Tile\n\t\tif err := tile.UnmarshalText(t); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse tile: %w\", err)\n\t\t}\n\t\treturn &tile, nil\n\t}\n}\n\n\/\/ LookupIndex fetches the leafhash->seq mapping file from the log, and returns\n\/\/ its parsed contents.\nfunc LookupIndex(f Fetcher, lh []byte) (uint64, error) {\n\tp := filepath.Join(layout.LeafPath(\"\", lh))\n\tsRaw, err := f(p)\n\tif err != nil {\n\t\tif errors.Is(err, os.ErrNotExist) {\n\t\t\treturn 0, fmt.Errorf(\"leafhash unknown (%w)\", err)\n\t\t}\n\t\treturn 0, fmt.Errorf(\"failed to fetch leafhash->seq file: %w\", err)\n\t}\n\treturn strconv.ParseUint(string(sRaw), 16, 64)\n}\n\n\/\/ LogStateTracker represents a client-side view of a target log's state.\n\/\/ This tracker handles verification that updates to the tracked log state are\n\/\/ consistent with previously seen states.\ntype LogStateTracker struct {\n\tHasher hashers.LogHasher\n\tVerifier logverifier.LogVerifier\n\tFetcher Fetcher\n\n\t\/\/ LatestConsistentRaw holds the raw bytes of the latest proven-consistent\n\t\/\/ LogState seen by this tracker.\n\tLatestConsistentRaw []byte\n\n\t\/\/ LatestConsistent is the deserialised form of LatestConsistentRaw\n\tLatestConsistent log.Checkpoint\n\tCpSigVerifier note.Verifier\n}\n\n\/\/ NewLogStateTracker creates a newly initialised tracker.\n\/\/ If a serialised LogState representation is provided then this is used as the\n\/\/ initial tracked state, otherwise a log state is fetched from the target log.\nfunc NewLogStateTracker(f Fetcher, h hashers.LogHasher, checkpointRaw []byte, nV note.Verifier) (LogStateTracker, error) {\n\n\tret := LogStateTracker{\n\t\tFetcher: f,\n\t\tHasher: h,\n\t\tVerifier: logverifier.New(h),\n\t\tLatestConsistent: log.Checkpoint{},\n\t\tCpSigVerifier: nV,\n\t}\n\tif len(checkpointRaw) > 0 {\n\t\tret.LatestConsistentRaw = checkpointRaw\n\t\tif _, err := ret.LatestConsistent.Unmarshal(checkpointRaw); err != nil {\n\t\t\treturn ret, err\n\t\t}\n\t\treturn ret, nil\n\t}\n\treturn ret, ret.Update()\n}\n\n\/\/ ErrInconsistency should be returned when there has been an error proving consistency\n\/\/ between log states.\n\/\/ The raw log state representations are included as-returned by the target log, this\n\/\/ ensures that evidence of inconsistent log updates are available to the caller of\n\/\/ the method(s) returning this error.\ntype ErrInconsistency struct {\n\tSmallerRaw []byte\n\tLargerRaw []byte\n\tProof [][]byte\n\n\tWrapped error\n}\n\nfunc (e ErrInconsistency) Unwrap() error {\n\treturn e.Wrapped\n}\n\nfunc (e ErrInconsistency) Error() string {\n\treturn fmt.Sprintf(\"log consistency check failed: %s\", e.Wrapped)\n}\n\n\/\/ Update attempts to update the local view of the target log's state.\n\/\/ If a more recent logstate is found, this method will attempt to prove\n\/\/ that it is consistent with the local state before updating the tracker's\n\/\/ view.\nfunc (lst *LogStateTracker) Update() error {\n\tc, cRaw, err := fetchCheckpointAndParse(lst.Fetcher, lst.CpSigVerifier)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif lst.LatestConsistent.Size > 0 {\n\t\tif c.Size > lst.LatestConsistent.Size {\n\t\t\tbuilder, err := NewProofBuilder(*c, lst.Hasher.HashChildren, lst.Fetcher)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to create proof builder: %w\", err)\n\t\t\t}\n\t\t\tp, err := builder.ConsistencyProof(lst.LatestConsistent.Size, c.Size)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tglog.V(1).Infof(\"Built consistency proof %x\", p)\n\t\t\tif err := lst.Verifier.VerifyConsistencyProof(int64(lst.LatestConsistent.Size), int64(c.Size), lst.LatestConsistent.Hash, c.Hash, p); err != nil {\n\t\t\t\treturn ErrInconsistency{\n\t\t\t\t\tSmallerRaw: lst.LatestConsistentRaw,\n\t\t\t\t\tLargerRaw: cRaw,\n\t\t\t\t\tProof: p,\n\t\t\t\t\tWrapped: err,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tlst.LatestConsistentRaw, lst.LatestConsistent = cRaw, *c\n\treturn nil\n}\n<commit_msg>Don't exit on a bad checkpoint<commit_after>\/\/ Copyright 2021 Google LLC. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package client provides client support for interacting with a serverless\n\/\/ log.\n\/\/\n\/\/ See the \/cmd\/client package in this repo for an example of using this.\npackage client\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/google\/trillian-examples\/formats\/log\"\n\t\"github.com\/google\/trillian-examples\/serverless\/api\"\n\t\"github.com\/google\/trillian-examples\/serverless\/api\/layout\"\n\t\"github.com\/google\/trillian\/merkle\"\n\t\"github.com\/google\/trillian\/merkle\/compact\"\n\t\"github.com\/google\/trillian\/merkle\/hashers\"\n\t\"github.com\/google\/trillian\/merkle\/logverifier\"\n\t\"golang.org\/x\/mod\/sumdb\/note\"\n)\n\n\/\/ Fetcher is the signature of a function which can retrieve arbitrary files from\n\/\/ a log's data storage, via whatever appropriate mechanism.\n\/\/ The path parameter is relative to the root of the log storage.\ntype Fetcher func(path string) ([]byte, error)\n\nfunc fetchCheckpointAndParse(f Fetcher, v note.Verifier) (*log.Checkpoint, []byte, error) {\n\tcpRaw, err := f(layout.CheckpointPath)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tn, err := note.Open(cpRaw, note.VerifierList(v))\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"failed to open Checkpoint: %v\", err)\n\t}\n\tcp := log.Checkpoint{}\n\tif _, err := cp.Unmarshal([]byte(n.Text)); err != nil {\n\t\tglog.V(1).Infof(\"Bad checkpoint: %q\", cpRaw)\n\t\treturn nil, nil, fmt.Errorf(\"failed to unmarshal checkpoint: %w\", err)\n\t}\n\treturn &cp, cpRaw, nil\n}\n\n\/\/ ProofBuilder knows how to build inclusion and consistency proofs from tiles.\n\/\/ Since the tiles commit only to immutable nodes, the job of building proofs is slightly\n\/\/ more complex as proofs can touch \"ephemeral\" nodes, so these need to be synthesized.\ntype ProofBuilder struct {\n\tcp log.Checkpoint\n\tnodeCache nodeCache\n\th compact.HashFn\n}\n\n\/\/ NewProofBuilder creates a new ProofBuilder object for a given tree size.\n\/\/ The returned ProofBuilder can be re-used for proofs related to a given tree size, but\n\/\/ it is not thread-safe and should not be accessed concurrently.\nfunc NewProofBuilder(cp log.Checkpoint, h compact.HashFn, f Fetcher) (*ProofBuilder, error) {\n\ttf := newTileFetcher(f, cp.Size)\n\tpb := &ProofBuilder{\n\t\tcp: cp,\n\t\tnodeCache: newNodeCache(tf, cp.Size),\n\t\th: h,\n\t}\n\n\thashes, err := FetchRangeNodes(cp.Size, tf)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to fetch range nodes: %w\", err)\n\t}\n\t\/\/ Create a compact range which represents the state of the log.\n\tr, err := (&compact.RangeFactory{Hash: h}).NewRange(0, cp.Size, hashes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Recreate the root hash so that:\n\t\/\/ a) we validate the self-integrity of the log state, and\n\t\/\/ b) we calculate (and cache) and ephemeral nodes present in the tree,\n\t\/\/ this is important since they could be required by proofs.\n\tsr, err := r.GetRootHash(pb.nodeCache.SetEphemeralNode)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !bytes.Equal(cp.Hash, sr) {\n\t\treturn nil, fmt.Errorf(\"invalid checkpoint hash %x, expected %x\", cp.Hash, sr)\n\t}\n\treturn pb, nil\n}\n\n\/\/ InclusionProof constructs an inclusion proof for the leaf at index in a tree of\n\/\/ the given size.\n\/\/ This function uses the passed-in function to retrieve tiles containing any log tree\n\/\/ nodes necessary to build the proof.\nfunc (pb *ProofBuilder) InclusionProof(index uint64) ([][]byte, error) {\n\tnodes, err := merkle.CalcInclusionProofNodeAddresses(int64(pb.cp.Size), int64(index), int64(pb.cp.Size))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to calculate inclusion proof node list: %w\", err)\n\t}\n\n\tret := make([][]byte, 0)\n\t\/\/ TODO(al) parallelise this.\n\tfor _, n := range nodes {\n\t\th, err := pb.nodeCache.GetNode(n.ID)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to get node (%v): %w\", n.ID, err)\n\t\t}\n\t\tret = append(ret, h)\n\t}\n\treturn ret, nil\n}\n\n\/\/ ConsistencyProof constructs a consistency proof between the two passed in tree sizes.\n\/\/ This function uses the passed-in function to retrieve tiles containing any log tree\n\/\/ nodes necessary to build the proof.\nfunc (pb *ProofBuilder) ConsistencyProof(smaller, larger uint64) ([][]byte, error) {\n\tnodes, err := merkle.CalcConsistencyProofNodeAddresses(int64(smaller), int64(larger), int64(pb.cp.Size))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to calculate consistency proof node list: %w\", err)\n\t}\n\n\thashes := make([][]byte, 0)\n\t\/\/ TODO(al) parallelise this.\n\tfor _, n := range nodes {\n\t\th, err := pb.nodeCache.GetNode(n.ID)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to get node (%v): %w\", n.ID, err)\n\t\t}\n\t\thashes = append(hashes, h)\n\t}\n\treturn hashes, nil\n}\n\n\/\/ FetchRangeNodes returns the set of nodes representing the compact range covering\n\/\/ a log of size s.\nfunc FetchRangeNodes(s uint64, gt GetTileFunc) ([][]byte, error) {\n\tnc := newNodeCache(gt, s)\n\tnIDs := compact.RangeNodes(0, s)\n\tret := make([][]byte, len(nIDs))\n\tfor i, n := range nIDs {\n\t\th, err := nc.GetNode(n)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tret[i] = h\n\t}\n\treturn ret, nil\n}\n\n\/\/ FetchLeafHashes fetches N consecutive leaf hashes starting with the leaf at index first.\nfunc FetchLeafHashes(f Fetcher, first, N, logSize uint64) ([][]byte, error) {\n\tnc := newNodeCache(newTileFetcher(f, logSize), logSize)\n\tret := make([][]byte, N, 0)\n\tfor i, seq := uint64(0), first; i < N; i, seq = i+1, seq+1 {\n\t\tnID := compact.NodeID{Level: 0, Index: seq}\n\t\th, err := nc.GetNode(nID)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to fetch node %v: %v\", nID, err)\n\t\t}\n\t\tret = append(ret, h)\n\t}\n\treturn ret, nil\n}\n\n\/\/ nodeCache hides the tiles abstraction away, and improves\n\/\/ performance by caching tiles it's seen.\n\/\/ Not threadsafe, and intended to be only used throughout the course\n\/\/ of a single request.\ntype nodeCache struct {\n\tlogSize uint64\n\tephemeral map[compact.NodeID][]byte\n\ttiles map[tileKey]api.Tile\n\tgetTile GetTileFunc\n}\n\n\/\/ GetTileFunc is the signature of a function which knows how to fetch a\n\/\/ specific tile.\ntype GetTileFunc func(level, index uint64) (*api.Tile, error)\n\n\/\/ tileKey is used as a key in nodeCache's tile map.\ntype tileKey struct {\n\ttileLevel uint64\n\ttileIndex uint64\n}\n\n\/\/ newNodeCache creates a new nodeCache instance for a given log size.\nfunc newNodeCache(f GetTileFunc, logSize uint64) nodeCache {\n\treturn nodeCache{\n\t\tlogSize: logSize,\n\t\tephemeral: make(map[compact.NodeID][]byte),\n\t\ttiles: make(map[tileKey]api.Tile),\n\t\tgetTile: f,\n\t}\n}\n\n\/\/ SetEphemeralNode stored a derived \"ephemeral\" tree node.\nfunc (n *nodeCache) SetEphemeralNode(id compact.NodeID, h []byte) {\n\tn.ephemeral[id] = h\n}\n\n\/\/ GetNode returns the internal log tree node hash for the specified node ID.\n\/\/ A previously set ephemeral node will be returned if id matches, otherwise\n\/\/ the tile containing the requested node will be fetched and cached, and the\n\/\/ node hash returned.\nfunc (n *nodeCache) GetNode(id compact.NodeID) ([]byte, error) {\n\t\/\/ First check for ephemeral nodes:\n\tif e := n.ephemeral[id]; len(e) != 0 {\n\t\treturn e, nil\n\t}\n\t\/\/ Otherwise look in fetched tiles:\n\ttileLevel, tileIndex, nodeLevel, nodeIndex := layout.NodeCoordsToTileAddress(uint64(id.Level), uint64(id.Index))\n\ttKey := tileKey{tileLevel, tileIndex}\n\tt, ok := n.tiles[tKey]\n\tif !ok {\n\t\ttile, err := n.getTile(tileLevel, tileIndex)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to fetch tile: %w\", err)\n\t\t}\n\t\tt = *tile\n\t\tn.tiles[tKey] = *tile\n\t}\n\tnode := t.Nodes[api.TileNodeKey(nodeLevel, nodeIndex)]\n\tif node == nil {\n\t\treturn nil, fmt.Errorf(\"node %v (tile coords [%d,%d]\/[%d,%d]) unknown\", id, tileLevel, tileIndex, nodeLevel, nodeIndex)\n\t}\n\treturn node, nil\n}\n\n\/\/ newTileFetcher returns a GetTileFunc based on the passed in Fetcher and log size.\nfunc newTileFetcher(f Fetcher, logSize uint64) GetTileFunc {\n\treturn func(level, index uint64) (*api.Tile, error) {\n\t\ttileSize := layout.PartialTileSize(level, index, logSize)\n\t\tp := filepath.Join(layout.TilePath(\"\", level, index, tileSize))\n\t\tt, err := f(p)\n\t\tif err != nil {\n\t\t\tif !errors.Is(err, os.ErrNotExist) {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to read tile at %q: %w\", p, err)\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvar tile api.Tile\n\t\tif err := tile.UnmarshalText(t); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse tile: %w\", err)\n\t\t}\n\t\treturn &tile, nil\n\t}\n}\n\n\/\/ LookupIndex fetches the leafhash->seq mapping file from the log, and returns\n\/\/ its parsed contents.\nfunc LookupIndex(f Fetcher, lh []byte) (uint64, error) {\n\tp := filepath.Join(layout.LeafPath(\"\", lh))\n\tsRaw, err := f(p)\n\tif err != nil {\n\t\tif errors.Is(err, os.ErrNotExist) {\n\t\t\treturn 0, fmt.Errorf(\"leafhash unknown (%w)\", err)\n\t\t}\n\t\treturn 0, fmt.Errorf(\"failed to fetch leafhash->seq file: %w\", err)\n\t}\n\treturn strconv.ParseUint(string(sRaw), 16, 64)\n}\n\n\/\/ LogStateTracker represents a client-side view of a target log's state.\n\/\/ This tracker handles verification that updates to the tracked log state are\n\/\/ consistent with previously seen states.\ntype LogStateTracker struct {\n\tHasher hashers.LogHasher\n\tVerifier logverifier.LogVerifier\n\tFetcher Fetcher\n\n\t\/\/ LatestConsistentRaw holds the raw bytes of the latest proven-consistent\n\t\/\/ LogState seen by this tracker.\n\tLatestConsistentRaw []byte\n\n\t\/\/ LatestConsistent is the deserialised form of LatestConsistentRaw\n\tLatestConsistent log.Checkpoint\n\tCpSigVerifier note.Verifier\n}\n\n\/\/ NewLogStateTracker creates a newly initialised tracker.\n\/\/ If a serialised LogState representation is provided then this is used as the\n\/\/ initial tracked state, otherwise a log state is fetched from the target log.\nfunc NewLogStateTracker(f Fetcher, h hashers.LogHasher, checkpointRaw []byte, nV note.Verifier) (LogStateTracker, error) {\n\n\tret := LogStateTracker{\n\t\tFetcher: f,\n\t\tHasher: h,\n\t\tVerifier: logverifier.New(h),\n\t\tLatestConsistent: log.Checkpoint{},\n\t\tCpSigVerifier: nV,\n\t}\n\tif len(checkpointRaw) > 0 {\n\t\tret.LatestConsistentRaw = checkpointRaw\n\t\tif _, err := ret.LatestConsistent.Unmarshal(checkpointRaw); err != nil {\n\t\t\treturn ret, err\n\t\t}\n\t\treturn ret, nil\n\t}\n\treturn ret, ret.Update()\n}\n\n\/\/ ErrInconsistency should be returned when there has been an error proving consistency\n\/\/ between log states.\n\/\/ The raw log state representations are included as-returned by the target log, this\n\/\/ ensures that evidence of inconsistent log updates are available to the caller of\n\/\/ the method(s) returning this error.\ntype ErrInconsistency struct {\n\tSmallerRaw []byte\n\tLargerRaw []byte\n\tProof [][]byte\n\n\tWrapped error\n}\n\nfunc (e ErrInconsistency) Unwrap() error {\n\treturn e.Wrapped\n}\n\nfunc (e ErrInconsistency) Error() string {\n\treturn fmt.Sprintf(\"log consistency check failed: %s\", e.Wrapped)\n}\n\n\/\/ Update attempts to update the local view of the target log's state.\n\/\/ If a more recent logstate is found, this method will attempt to prove\n\/\/ that it is consistent with the local state before updating the tracker's\n\/\/ view.\nfunc (lst *LogStateTracker) Update() error {\n\tc, cRaw, err := fetchCheckpointAndParse(lst.Fetcher, lst.CpSigVerifier)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif lst.LatestConsistent.Size > 0 {\n\t\tif c.Size > lst.LatestConsistent.Size {\n\t\t\tbuilder, err := NewProofBuilder(*c, lst.Hasher.HashChildren, lst.Fetcher)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to create proof builder: %w\", err)\n\t\t\t}\n\t\t\tp, err := builder.ConsistencyProof(lst.LatestConsistent.Size, c.Size)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tglog.V(1).Infof(\"Built consistency proof %x\", p)\n\t\t\tif err := lst.Verifier.VerifyConsistencyProof(int64(lst.LatestConsistent.Size), int64(c.Size), lst.LatestConsistent.Hash, c.Hash, p); err != nil {\n\t\t\t\treturn ErrInconsistency{\n\t\t\t\t\tSmallerRaw: lst.LatestConsistentRaw,\n\t\t\t\t\tLargerRaw: cRaw,\n\t\t\t\t\tProof: p,\n\t\t\t\t\tWrapped: err,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tlst.LatestConsistentRaw, lst.LatestConsistent = cRaw, *c\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package vtrace_test\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"veyron.io\/veyron\/veyron2\/context\"\n\t\"veyron.io\/veyron\/veyron2\/ipc\"\n\t\"veyron.io\/veyron\/veyron2\/ipc\/stream\"\n\t\"veyron.io\/veyron\/veyron2\/naming\"\n\t\"veyron.io\/veyron\/veyron2\/security\"\n\t\"veyron.io\/veyron\/veyron2\/vlog\"\n\t\"veyron.io\/veyron\/veyron2\/vtrace\"\n\n\t_ \"veyron.io\/veyron\/veyron\/lib\/tcp\"\n\tiipc \"veyron.io\/veyron\/veyron\/runtimes\/google\/ipc\"\n\t\"veyron.io\/veyron\/veyron\/runtimes\/google\/ipc\/stream\/manager\"\n\ttnaming \"veyron.io\/veyron\/veyron\/runtimes\/google\/testing\/mocks\/naming\"\n\ttruntime \"veyron.io\/veyron\/veyron\/runtimes\/google\/testing\/mocks\/runtime\"\n\tivtrace \"veyron.io\/veyron\/veyron\/runtimes\/google\/vtrace\"\n)\n\n\/\/ We need a special way to create contexts for tests. We\n\/\/ can't create a real runtime in the runtime implementation\n\/\/ so we use a fake one that panics if used. The runtime\n\/\/ implementation should not ever use the Runtime from a context.\nfunc testContext() context.T {\n\treturn iipc.InternalNewContext(&truntime.PanicRuntime{})\n}\n\nfunc TestNewFromContext(t *testing.T) {\n\tc0 := testContext()\n\tc1, s1 := ivtrace.WithNewSpan(c0, \"s1\")\n\tc2, s2 := ivtrace.WithNewSpan(c1, \"s2\")\n\tc3, s3 := ivtrace.WithNewSpan(c2, \"s3\")\n\texpected := map[context.T]vtrace.Span{\n\t\tc0: nil,\n\t\tc1: s1,\n\t\tc2: s2,\n\t\tc3: s3,\n\t}\n\tfor ctx, expectedSpan := range expected {\n\t\tif s := ivtrace.FromContext(ctx); s != expectedSpan {\n\t\t\tt.Errorf(\"Wrong span for ctx %v. Got %v, want %v\", c0, s, expectedSpan)\n\t\t}\n\t}\n}\n\ntype fakeAuthorizer int\n\nfunc (fakeAuthorizer) Authorize(security.Context) error {\n\treturn nil\n}\n\ntype testServer struct {\n\tsm stream.Manager\n\tns naming.Namespace\n\tname string\n\tchild string\n\tstop func() error\n\tforceCollect bool\n}\n\nfunc (c *testServer) Run(ctx ipc.ServerContext) error {\n\tif c.forceCollect {\n\t\tivtrace.FromContext(ctx).Trace().ForceCollect()\n\t}\n\n\tclient, err := iipc.InternalNewClient(c.sm, c.ns)\n\tif err != nil {\n\t\tvlog.Error(err)\n\t\treturn err\n\t}\n\n\tivtrace.FromContext(ctx).Annotate(c.name + \"-begin\")\n\n\tif c.child != \"\" {\n\t\tvar call ipc.Call\n\t\tif call, err = client.StartCall(ctx, c.child, \"Run\", []interface{}{}); err != nil {\n\t\t\tvlog.Error(err)\n\t\t\treturn err\n\t\t}\n\t\tvar outerr error\n\t\tif err = call.Finish(&outerr); err != nil {\n\t\t\tvlog.Error(err)\n\t\t\treturn err\n\t\t}\n\t\tif outerr != nil {\n\t\t\tvlog.Error(outerr)\n\t\t\treturn outerr\n\t\t}\n\t}\n\tivtrace.FromContext(ctx).Annotate(c.name + \"-end\")\n\n\treturn nil\n}\n\nfunc makeTestServer(ns naming.Namespace, name, child string, forceCollect bool) (*testServer, error) {\n\tsm := manager.InternalNew(naming.FixedRoutingID(0x111111111))\n\tctx := testContext()\n\ts, err := iipc.InternalNewServer(ctx, sm, ns, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif _, err := s.Listen(ipc.ListenSpec{Protocol: \"tcp\", Address: \"127.0.0.1:0\"}); err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &testServer{\n\t\tsm: sm,\n\t\tns: ns,\n\t\tname: name,\n\t\tchild: child,\n\t\tstop: s.Stop,\n\t\tforceCollect: forceCollect,\n\t}\n\n\tif err := s.Serve(name, c, fakeAuthorizer(0)); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}\n\nfunc summary(span *vtrace.SpanRecord) string {\n\tsummary := span.Name\n\tif len(span.Annotations) > 0 {\n\t\tmsgs := []string{}\n\t\tfor _, annotation := range span.Annotations {\n\t\t\tmsgs = append(msgs, annotation.Message)\n\t\t}\n\t\tsummary += \": \" + strings.Join(msgs, \", \")\n\t}\n\treturn summary\n}\n\nfunc expectSequence(t *testing.T, trace vtrace.TraceRecord, expectedSpans []string) {\n\t\/\/ It's okay to have additional spans - someone may have inserted\n\t\/\/ additional spans for more debugging.\n\tif got, want := len(trace.Spans), len(expectedSpans); got < want {\n\t\tt.Errorf(\"Found %d spans, want %d\", got, want)\n\t}\n\n\tspans := map[string]*vtrace.SpanRecord{}\n\tsummaries := []string{}\n\tfor i := range trace.Spans {\n\t\tspan := &trace.Spans[i]\n\n\t\t\/\/ All spans should have a start.\n\t\tif span.Start == 0 {\n\t\t\tt.Errorf(\"span missing start: %#v\", span)\n\t\t}\n\t\t\/\/ All spans except the root should have an end.\n\t\tif span.Name != \"\" && span.End == 0 {\n\t\t\tt.Errorf(\"span missing end: %#v\", span)\n\t\t\tif span.Start >= span.End {\n\t\t\t\tt.Errorf(\"span end should be after start: %#v\", span)\n\t\t\t}\n\t\t}\n\n\t\tsummary := summary(span)\n\t\tsummaries = append(summaries, summary)\n\t\tspans[summary] = span\n\t}\n\n\tfor i := range expectedSpans {\n\t\tchild, ok := spans[expectedSpans[i]]\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected span %s not found in %#v\", expectedSpans[i], summaries)\n\t\t\tcontinue\n\t\t}\n\t\tif i == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tparent, ok := spans[expectedSpans[i-1]]\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected span %s not found in %#v\", expectedSpans[i-1], summaries)\n\t\t\tcontinue\n\t\t}\n\t\tif child.Parent != parent.ID {\n\t\t\tt.Errorf(\"%v should be a child of %v, but it's not.\", child, parent)\n\t\t}\n\t}\n}\n\nfunc runCallChain(t *testing.T, ctx context.T, force1, force2 bool) {\n\tsm := manager.InternalNew(naming.FixedRoutingID(0x555555555))\n\tns := tnaming.NewSimpleNamespace()\n\n\tclient, err := iipc.InternalNewClient(sm, ns)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tc1, err := makeTestServer(ns, \"c1\", \"c2\", force1)\n\tif err != nil {\n\t\tt.Fatal(\"Can't start server:\", err)\n\t}\n\tdefer c1.stop()\n\n\tc2, err := makeTestServer(ns, \"c2\", \"\", force2)\n\tif err != nil {\n\t\tt.Fatal(\"Can't start server:\", err)\n\t}\n\tdefer c2.stop()\n\n\tcall, err := client.StartCall(ctx, \"c1\", \"Run\", []interface{}{})\n\tif err != nil {\n\t\tt.Fatal(\"can't call: \", err)\n\t}\n\tvar outerr error\n\tif err = call.Finish(&outerr); err != nil {\n\t\tt.Error(err)\n\t}\n\tif outerr != nil {\n\t\tt.Error(outerr)\n\t}\n}\n\n\/\/ TestCancellationPropagation tests that cancellation propogates along an\n\/\/ RPC call chain without user intervention.\nfunc TestTraceAcrossRPCs(t *testing.T) {\n\tctx, span := ivtrace.WithNewSpan(testContext(), \"\")\n\tspan.Trace().ForceCollect()\n\tspan.Annotate(\"c0-begin\")\n\n\trunCallChain(t, ctx, false, false)\n\n\tspan.Annotate(\"c0-end\")\n\n\texpectedSpans := []string{\n\t\t\": c0-begin, c0-end\",\n\t\t\"<client>\\\"c1\\\".Run\",\n\t\t\"\\\"\\\".Run: c1-begin, c1-end\",\n\t\t\"<client>\\\"c2\\\".Run\",\n\t\t\"\\\"\\\".Run: c2-begin, c2-end\",\n\t}\n\texpectSequence(t, span.Trace().Record(), expectedSpans)\n}\n\n\/\/ TestCancellationPropagationLateForce tests that cancellation propogates along an\n\/\/ RPC call chain when tracing is initiated by someone deep in the call chain.\nfunc TestTraceAcrossRPCsLateForce(t *testing.T) {\n\tctx, span := ivtrace.WithNewSpan(testContext(), \"\")\n\tspan.Annotate(\"c0-begin\")\n\n\trunCallChain(t, ctx, false, true)\n\n\tspan.Annotate(\"c0-end\")\n\n\texpectedSpans := []string{\n\t\t\": c0-end\",\n\t\t\"<client>\\\"c1\\\".Run\",\n\t\t\"\\\"\\\".Run: c1-end\",\n\t\t\"<client>\\\"c2\\\".Run\",\n\t\t\"\\\"\\\".Run: c2-begin, c2-end\",\n\t}\n\texpectSequence(t, span.Trace().Record(), expectedSpans)\n}\n<commit_msg>veyron\/runtimes\/google\/vtrace: Fix flaky vtrace test.<commit_after>package vtrace_test\n\nimport (\n\t\"bytes\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"veyron.io\/veyron\/veyron2\/context\"\n\t\"veyron.io\/veyron\/veyron2\/ipc\"\n\t\"veyron.io\/veyron\/veyron2\/ipc\/stream\"\n\t\"veyron.io\/veyron\/veyron2\/naming\"\n\t\"veyron.io\/veyron\/veyron2\/security\"\n\t\"veyron.io\/veyron\/veyron2\/vlog\"\n\t\"veyron.io\/veyron\/veyron2\/vtrace\"\n\n\t_ \"veyron.io\/veyron\/veyron\/lib\/tcp\"\n\tiipc \"veyron.io\/veyron\/veyron\/runtimes\/google\/ipc\"\n\t\"veyron.io\/veyron\/veyron\/runtimes\/google\/ipc\/stream\/manager\"\n\ttnaming \"veyron.io\/veyron\/veyron\/runtimes\/google\/testing\/mocks\/naming\"\n\ttruntime \"veyron.io\/veyron\/veyron\/runtimes\/google\/testing\/mocks\/runtime\"\n\tivtrace \"veyron.io\/veyron\/veyron\/runtimes\/google\/vtrace\"\n)\n\n\/\/ We need a special way to create contexts for tests. We\n\/\/ can't create a real runtime in the runtime implementation\n\/\/ so we use a fake one that panics if used. The runtime\n\/\/ implementation should not ever use the Runtime from a context.\nfunc testContext() context.T {\n\treturn iipc.InternalNewContext(&truntime.PanicRuntime{})\n}\n\nfunc TestNewFromContext(t *testing.T) {\n\tc0 := testContext()\n\tc1, s1 := ivtrace.WithNewSpan(c0, \"s1\")\n\tc2, s2 := ivtrace.WithNewSpan(c1, \"s2\")\n\tc3, s3 := ivtrace.WithNewSpan(c2, \"s3\")\n\texpected := map[context.T]vtrace.Span{\n\t\tc0: nil,\n\t\tc1: s1,\n\t\tc2: s2,\n\t\tc3: s3,\n\t}\n\tfor ctx, expectedSpan := range expected {\n\t\tif s := ivtrace.FromContext(ctx); s != expectedSpan {\n\t\t\tt.Errorf(\"Wrong span for ctx %v. Got %v, want %v\", c0, s, expectedSpan)\n\t\t}\n\t}\n}\n\ntype fakeAuthorizer int\n\nfunc (fakeAuthorizer) Authorize(security.Context) error {\n\treturn nil\n}\n\ntype testServer struct {\n\tsm stream.Manager\n\tns naming.Namespace\n\tname string\n\tchild string\n\tstop func() error\n\tforceCollect bool\n}\n\nfunc (c *testServer) Run(ctx ipc.ServerContext) error {\n\tif c.forceCollect {\n\t\tivtrace.FromContext(ctx).Trace().ForceCollect()\n\t}\n\n\tclient, err := iipc.InternalNewClient(c.sm, c.ns)\n\tif err != nil {\n\t\tvlog.Error(err)\n\t\treturn err\n\t}\n\n\tivtrace.FromContext(ctx).Annotate(c.name + \"-begin\")\n\n\tif c.child != \"\" {\n\t\tvar call ipc.Call\n\t\tif call, err = client.StartCall(ctx, c.child, \"Run\", []interface{}{}); err != nil {\n\t\t\tvlog.Error(err)\n\t\t\treturn err\n\t\t}\n\t\tvar outerr error\n\t\tif err = call.Finish(&outerr); err != nil {\n\t\t\tvlog.Error(err)\n\t\t\treturn err\n\t\t}\n\t\tif outerr != nil {\n\t\t\tvlog.Error(outerr)\n\t\t\treturn outerr\n\t\t}\n\t}\n\tivtrace.FromContext(ctx).Annotate(c.name + \"-end\")\n\n\treturn nil\n}\n\nfunc makeTestServer(ns naming.Namespace, name, child string, forceCollect bool) (*testServer, error) {\n\tsm := manager.InternalNew(naming.FixedRoutingID(0x111111111))\n\tctx := testContext()\n\ts, err := iipc.InternalNewServer(ctx, sm, ns, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif _, err := s.Listen(ipc.ListenSpec{Protocol: \"tcp\", Address: \"127.0.0.1:0\"}); err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &testServer{\n\t\tsm: sm,\n\t\tns: ns,\n\t\tname: name,\n\t\tchild: child,\n\t\tstop: s.Stop,\n\t\tforceCollect: forceCollect,\n\t}\n\n\tif err := s.Serve(name, c, fakeAuthorizer(0)); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn c, nil\n}\n\nfunc summary(span *vtrace.SpanRecord) string {\n\tsummary := span.Name\n\tif len(span.Annotations) > 0 {\n\t\tmsgs := []string{}\n\t\tfor _, annotation := range span.Annotations {\n\t\t\tmsgs = append(msgs, annotation.Message)\n\t\t}\n\t\tsummary += \": \" + strings.Join(msgs, \", \")\n\t}\n\treturn summary\n}\n\nfunc traceString(trace *vtrace.TraceRecord) string {\n\tvar b bytes.Buffer\n\tvtrace.FormatTrace(&b, trace, nil)\n\treturn b.String()\n}\n\nfunc expectSequence(t *testing.T, trace vtrace.TraceRecord, expectedSpans []string) {\n\t\/\/ It's okay to have additional spans - someone may have inserted\n\t\/\/ additional spans for more debugging.\n\tif got, want := len(trace.Spans), len(expectedSpans); got < want {\n\t\tt.Errorf(\"Found %d spans, want %d\", got, want)\n\t}\n\n\tspans := map[string]*vtrace.SpanRecord{}\n\tsummaries := []string{}\n\tfor i := range trace.Spans {\n\t\tspan := &trace.Spans[i]\n\n\t\t\/\/ All spans should have a start.\n\t\tif span.Start == 0 {\n\t\t\tt.Errorf(\"span missing start: %x, %s\", span.ID[12:], traceString(&trace))\n\t\t}\n\t\t\/\/ All spans except the root should have a valid end.\n\t\t\/\/ TODO(mattr): For now I'm also skipping connectFlow and\n\t\t\/\/ vc.HandshakeDialedVC spans because the ws endpoints are\n\t\t\/\/ currently non-deterministic in terms of whether they fail\n\t\t\/\/ before the test ends or not. In the future it will be\n\t\t\/\/ configurable whether we listen on ws or not and then we should\n\t\t\/\/ adjust the test to not listen and remove this check.\n\t\tif span.Name != \"\" &&\n\t\t\tspan.Name != \"<client>connectFlow\" &&\n\t\t\tspan.Name != \"vc.HandshakeDialedVC\" {\n\t\t\tif span.End == 0 {\n\t\t\t\tt.Errorf(\"span missing end: %x, %s\", span.ID[12:], traceString(&trace))\n\t\t\t} else if span.Start >= span.End {\n\t\t\t\tt.Errorf(\"span end should be after start: %x, %s\", span.ID[12:], traceString(&trace))\n\t\t\t}\n\t\t}\n\n\t\tsummary := summary(span)\n\t\tsummaries = append(summaries, summary)\n\t\tspans[summary] = span\n\t}\n\n\tfor i := range expectedSpans {\n\t\tchild, ok := spans[expectedSpans[i]]\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected span %s not found in %#v\", expectedSpans[i], summaries)\n\t\t\tcontinue\n\t\t}\n\t\tif i == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tparent, ok := spans[expectedSpans[i-1]]\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected span %s not found in %#v\", expectedSpans[i-1], summaries)\n\t\t\tcontinue\n\t\t}\n\t\tif child.Parent != parent.ID {\n\t\t\tt.Errorf(\"%v should be a child of %v, but it's not.\", child, parent)\n\t\t}\n\t}\n}\n\nfunc runCallChain(t *testing.T, ctx context.T, force1, force2 bool) {\n\tsm := manager.InternalNew(naming.FixedRoutingID(0x555555555))\n\tns := tnaming.NewSimpleNamespace()\n\n\tclient, err := iipc.InternalNewClient(sm, ns)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tc1, err := makeTestServer(ns, \"c1\", \"c2\", force1)\n\tif err != nil {\n\t\tt.Fatal(\"Can't start server:\", err)\n\t}\n\tdefer c1.stop()\n\n\tc2, err := makeTestServer(ns, \"c2\", \"\", force2)\n\tif err != nil {\n\t\tt.Fatal(\"Can't start server:\", err)\n\t}\n\tdefer c2.stop()\n\n\tcall, err := client.StartCall(ctx, \"c1\", \"Run\", []interface{}{})\n\tif err != nil {\n\t\tt.Fatal(\"can't call: \", err)\n\t}\n\tvar outerr error\n\tif err = call.Finish(&outerr); err != nil {\n\t\tt.Error(err)\n\t}\n\tif outerr != nil {\n\t\tt.Error(outerr)\n\t}\n}\n\n\/\/ TestCancellationPropagation tests that cancellation propogates along an\n\/\/ RPC call chain without user intervention.\nfunc TestTraceAcrossRPCs(t *testing.T) {\n\tctx, span := ivtrace.WithNewSpan(testContext(), \"\")\n\tspan.Trace().ForceCollect()\n\tspan.Annotate(\"c0-begin\")\n\n\trunCallChain(t, ctx, false, false)\n\n\tspan.Annotate(\"c0-end\")\n\n\texpectedSpans := []string{\n\t\t\": c0-begin, c0-end\",\n\t\t\"<client>\\\"c1\\\".Run\",\n\t\t\"\\\"\\\".Run: c1-begin, c1-end\",\n\t\t\"<client>\\\"c2\\\".Run\",\n\t\t\"\\\"\\\".Run: c2-begin, c2-end\",\n\t}\n\texpectSequence(t, span.Trace().Record(), expectedSpans)\n}\n\n\/\/ TestCancellationPropagationLateForce tests that cancellation propogates along an\n\/\/ RPC call chain when tracing is initiated by someone deep in the call chain.\nfunc TestTraceAcrossRPCsLateForce(t *testing.T) {\n\tctx, span := ivtrace.WithNewSpan(testContext(), \"\")\n\tspan.Annotate(\"c0-begin\")\n\n\trunCallChain(t, ctx, false, true)\n\n\tspan.Annotate(\"c0-end\")\n\n\texpectedSpans := []string{\n\t\t\": c0-end\",\n\t\t\"<client>\\\"c1\\\".Run\",\n\t\t\"\\\"\\\".Run: c1-end\",\n\t\t\"<client>\\\"c2\\\".Run\",\n\t\t\"\\\"\\\".Run: c2-begin, c2-end\",\n\t}\n\texpectSequence(t, span.Trace().Record(), expectedSpans)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Copyright © 2015-2018 Aeneas Rekkas <aeneas+oss@aeneas.io>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * @author\t\tAeneas Rekkas <aeneas+oss@aeneas.io>\n * @copyright \t2015-2018 Aeneas Rekkas <aeneas+oss@aeneas.io>\n * @license \tApache-2.0\n *\/\n\npackage cli\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/jmoiron\/sqlx\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/viper\"\n\n\t\"github.com\/ory\/hydra\/client\"\n\t\"github.com\/ory\/hydra\/config\"\n\t\"github.com\/ory\/hydra\/consent\"\n\t\"github.com\/ory\/hydra\/jwk\"\n\t\"github.com\/ory\/hydra\/oauth2\"\n\t\"github.com\/ory\/hydra\/pkg\"\n)\n\ntype MigrateHandler struct {\n\tc *config.Config\n}\n\nfunc newMigrateHandler(c *config.Config) *MigrateHandler {\n\treturn &MigrateHandler{c: c}\n}\n\ntype schemaCreator interface {\n\tCreateSchemas() (int, error)\n}\n\nfunc (h *MigrateHandler) connectToSql(dsn string) (*sqlx.DB, error) {\n\tvar db *sqlx.DB\n\n\tu, err := url.Parse(dsn)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"Could not parse DATABASE_URL: %s\", err)\n\t}\n\n\tif err := pkg.Retry(h.c.GetLogger(), time.Second*15, time.Minute*2, func() error {\n\t\tif u.Scheme == \"mysql\" {\n\t\t\tdsn = strings.Replace(dsn, \"mysql:\/\/\", \"\", -1)\n\t\t}\n\n\t\tif db, err = sqlx.Open(u.Scheme, dsn); err != nil {\n\t\t\treturn errors.Errorf(\"Could not connect to SQL: %s\", err)\n\t\t} else if err := db.Ping(); err != nil {\n\t\t\treturn errors.Errorf(\"Could not connect to SQL: %s\", err)\n\t\t}\n\n\t\treturn nil\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn db, nil\n}\n\nfunc getDBUrl(cmd *cobra.Command, args []string, position int) (dburl string) {\n\tif readFromEnv, _ := cmd.Flags().GetBool(\"read-from-env\"); readFromEnv {\n\t\tif len(viper.GetString(\"DATABASE_URL\")) == 0 {\n\t\t\tfmt.Println(cmd.UsageString())\n\t\t\tfmt.Println(\"\")\n\t\t\tfmt.Println(\"When using flag -e, environment variable DATABASE_URL must be set\")\n\t\t\treturn\n\t\t}\n\t\tdburl = viper.GetString(\"DATABASE_URL\")\n\t} else {\n\t\tif len(args) < position {\n\t\t\tfmt.Println(cmd.UsageString())\n\t\t\treturn\n\t\t}\n\t\tdburl = args[position]\n\t}\n\tif dburl == \"\" {\n\t\tfmt.Println(cmd.UsageString())\n\t\treturn\n\t}\n\treturn\n}\n\nfunc (h *MigrateHandler) MigrateSecret(cmd *cobra.Command, args []string) {\n\tdburl := getDBUrl(cmd, args, 0)\n\tif dburl == \"\" {\n\t\treturn\n\t}\n\n\tdb, err := h.connectToSql(dburl)\n\tif err != nil {\n\t\tfmt.Printf(\"An error occurred while connecting to SQL: %s\", err)\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\n\toldSecret := viper.GetString(\"OLD_SYSTEM_SECRET\")\n\tnewSecret := viper.GetString(\"NEW_SYSTEM_SECRET\")\n\n\tif len(oldSecret) == 0 {\n\t\tfmt.Println(\"You did not specify the old system secret, please set environment variable OLD_SYSTEM_SECRET.\")\n\t\tos.Exit(1)\n\t\treturn\n\t} else if len(newSecret) == 0 {\n\t\tfmt.Println(\"You did not specify the old system secret, please set environment variable NEW_SYSTEM_SECRET.\")\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\n\tfmt.Println(\"Rotating encryption keys for JSON Web Key storage...\")\n\tmanager := jwk.NewSQLManager(db, []byte(oldSecret))\n\tif err := manager.RotateKeys(&jwk.AEAD{Key: []byte(newSecret)}); err != nil {\n\t\tfmt.Printf(\"Error \\\"%s\\\" occurred while trying to rotate the JSON Web Key storage. All changes have been reverted.\", err)\n\t}\n\tfmt.Println(\"Rotating encryption keys for JSON Web Key storage completed successfully!\")\n\tfmt.Printf(`You may now run ORY Hydra with the new system secret. If you wish that old OAuth 2.0 Access and Refres\ntokens stay valid, please set environment variable ROTATED_SYSTEM_SECRET to the new secret:\n\nROTATED_SYSTEM_SECRET=%s hydra serve ...\n\nIf you wish that OAuth 2.0 Access and Refresh Tokens issued with the old secret are revoked, simply omit environment variable\nROTATED_SYSTEM_SECRET. This will NOT affect OpenID Connect ID Tokens!\n`, newSecret)\n}\n\nfunc (h *MigrateHandler) MigrateSQL(cmd *cobra.Command, args []string) {\n\tdburl := getDBUrl(cmd, args, 0)\n\tif dburl == \"\" {\n\t\treturn\n\t}\n\n\tdb, err := h.connectToSql(dburl)\n\tif err != nil {\n\t\tfmt.Printf(\"An error occurred while connecting to SQL: %s\", err)\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\n\tif err := h.runMigrateSQL(db); err != nil {\n\t\tfmt.Printf(\"An error occurred while running the migrations: %s\", err)\n\t\tos.Exit(1)\n\t\treturn\n\t}\n\tfmt.Println(\"Migration successful!\")\n}\n\nfunc (h *MigrateHandler) runMigrateSQL(db *sqlx.DB) error {\n\tvar total int\n\tfor k, m := range map[string]schemaCreator{\n\t\t\"client\": &client.SQLManager{DB: db},\n\t\t\"oauth2\": &oauth2.FositeSQLStore{DB: db},\n\t\t\"jwk\": &jwk.SQLManager{DB: db},\n\t\t\"consent\": consent.NewSQLManager(db, nil, nil),\n\t} {\n\t\tfmt.Printf(\"Applying `%s` SQL migrations...\\n\", k)\n\t\tif num, err := m.CreateSchemas(); err != nil {\n\t\t\treturn errors.Wrapf(err, \"Could not apply `%s` SQL migrations\", k)\n\t\t} else {\n\t\t\tfmt.Printf(\"Applied %d `%s` SQL migrations.\\n\", num, k)\n\t\t\ttotal += num\n\t\t}\n\t}\n\n\tfmt.Printf(\"Migration successful! Applied a total of %d SQL migrations.\\n\", total)\n\treturn nil\n}\n<commit_msg>cli: Improve migrate error messages<commit_after>\/*\n * Copyright © 2015-2018 Aeneas Rekkas <aeneas+oss@aeneas.io>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\n * @author\t\tAeneas Rekkas <aeneas+oss@aeneas.io>\n * @copyright \t2015-2018 Aeneas Rekkas <aeneas+oss@aeneas.io>\n * @license \tApache-2.0\n *\/\n\npackage cli\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com\/ory\/x\/cmdx\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/jmoiron\/sqlx\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/viper\"\n\n\t\"github.com\/ory\/hydra\/client\"\n\t\"github.com\/ory\/hydra\/config\"\n\t\"github.com\/ory\/hydra\/consent\"\n\t\"github.com\/ory\/hydra\/jwk\"\n\t\"github.com\/ory\/hydra\/oauth2\"\n\t\"github.com\/ory\/hydra\/pkg\"\n)\n\ntype MigrateHandler struct {\n\tc *config.Config\n}\n\nfunc newMigrateHandler(c *config.Config) *MigrateHandler {\n\treturn &MigrateHandler{c: c}\n}\n\ntype schemaCreator interface {\n\tCreateSchemas() (int, error)\n}\n\nfunc (h *MigrateHandler) connectToSql(dsn string) (*sqlx.DB, error) {\n\tvar db *sqlx.DB\n\n\tu, err := url.Parse(dsn)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"could not parse DATABASE_URL: %s\", err)\n\t}\n\n\tif err := pkg.Retry(h.c.GetLogger(), time.Second*15, time.Minute*2, func() error {\n\t\tif u.Scheme == \"mysql\" {\n\t\t\tdsn = strings.Replace(dsn, \"mysql:\/\/\", \"\", -1)\n\t\t}\n\n\t\tif db, err = sqlx.Open(u.Scheme, dsn); err != nil {\n\t\t\treturn errors.Errorf(\"could not connect to SQL: %s\", err)\n\t\t} else if err := db.Ping(); err != nil {\n\t\t\treturn errors.Errorf(\"could not connect to SQL: %s\", err)\n\t\t}\n\n\t\treturn nil\n\t}); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn db, nil\n}\n\nfunc getDBUrl(cmd *cobra.Command, args []string, position int) (dburl string) {\n\tif readFromEnv, _ := cmd.Flags().GetBool(\"read-from-env\"); readFromEnv {\n\t\tif len(viper.GetString(\"DATABASE_URL\")) == 0 {\n\t\t\tfmt.Println(cmd.UsageString())\n\t\t\tfmt.Println(\"\")\n\t\t\tfmt.Println(\"When using flag -e, environment variable DATABASE_URL must be set\")\n\t\t\treturn\n\t\t}\n\t\tdburl = viper.GetString(\"DATABASE_URL\")\n\t} else {\n\t\tif len(args) < position {\n\t\t\tfmt.Println(cmd.UsageString())\n\t\t\treturn\n\t\t}\n\t\tdburl = args[position]\n\t}\n\tif dburl == \"\" {\n\t\tfmt.Println(cmd.UsageString())\n\t\treturn\n\t}\n\treturn\n}\n\nfunc (h *MigrateHandler) MigrateSecret(cmd *cobra.Command, args []string) {\n\tdburl := getDBUrl(cmd, args, 0)\n\tif dburl == \"\" {\n\t\treturn\n\t}\n\n\tdb, err := h.connectToSql(dburl)\n\tcmdx.Must(err, \"An error occurred while connecting to SQL: %s\", err)\n\n\toldSecret := viper.GetString(\"OLD_SYSTEM_SECRET\")\n\tnewSecret := viper.GetString(\"NEW_SYSTEM_SECRET\")\n\n\tif len(oldSecret) != 32 {\n\t\tcmdx.Fatalf(\"Value of environment variable OLD_SYSTEM_SECRET has to be exactly 32 characters long but got: %d\", len(oldSecret))\n\t}\n\n\tif len(newSecret) != 32 {\n\t\tcmdx.Fatalf(\"Value of environment variable NEW_SYSTEM_SECRET has to be exactly 32 characters long but got: %d\", len(oldSecret))\n\t}\n\n\tfmt.Println(\"Rotating encryption keys for JSON Web Key storage...\")\n\n\tmanager := jwk.NewSQLManager(db, []byte(oldSecret))\n\terr = manager.RotateKeys(context.TODO(), &jwk.AEAD{Key: []byte(newSecret)})\n\tcmdx.Must(err, \"Unable to rotate JSON Web Keys: %s\\nAll changes have been rolled back.\", err)\n\n\tfmt.Println(\"Rotating encryption keys for JSON Web Key storage completed successfully!\")\n\tfmt.Printf(`You may now run ORY Hydra with the new system secret. If you wish that old OAuth 2.0 Access and Refres\ntokens stay valid, please set environment variable ROTATED_SYSTEM_SECRET to the new secret:\n\nROTATED_SYSTEM_SECRET=%s hydra serve ...\n\nIf you wish that OAuth 2.0 Access and Refresh Tokens issued with the old secret are revoked, simply omit environment variable\nROTATED_SYSTEM_SECRET. This will NOT affect OpenID Connect ID Tokens!\n`, newSecret)\n}\n\nfunc (h *MigrateHandler) MigrateSQL(cmd *cobra.Command, args []string) {\n\tdburl := getDBUrl(cmd, args, 0)\n\tif dburl == \"\" {\n\t\treturn\n\t}\n\n\tdb, err := h.connectToSql(dburl)\n\tcmdx.Must(err, \"An error occurred while connecting to SQL: %s\", err)\n\n\terr = h.runMigrateSQL(db)\n\tcmdx.Must(err, \"An error occurred while running the migrations: %s\", err)\n\n\tfmt.Println(\"Migration successful!\")\n}\n\nfunc (h *MigrateHandler) runMigrateSQL(db *sqlx.DB) error {\n\tvar total int\n\tfor k, m := range map[string]schemaCreator{\n\t\t\"client\": &client.SQLManager{DB: db},\n\t\t\"oauth2\": &oauth2.FositeSQLStore{DB: db},\n\t\t\"jwk\": &jwk.SQLManager{DB: db},\n\t\t\"consent\": consent.NewSQLManager(db, nil, nil),\n\t} {\n\t\tfmt.Printf(\"Applying `%s` SQL migrations...\\n\", k)\n\t\tif num, err := m.CreateSchemas(); err != nil {\n\t\t\treturn errors.Wrapf(err, \"could not apply %s SQL migrations\", k)\n\t\t} else {\n\t\t\tfmt.Printf(\"Applied %d `%s` SQL migrations.\\n\", num, k)\n\t\t\ttotal += num\n\t\t}\n\t}\n\n\tfmt.Printf(\"Migration successful! Applied a total of %d SQL migrations.\\n\", total)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Command deployctl implements single-command operator's interface to manage\n\/\/ deployments running under deploy-registry and deploy-agent\npackage main\n\nimport (\n\t\"archive\/tar\"\n\t\"compress\/gzip\"\n\t\"crypto\/sha256\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org\/x\/crypto\/ssh\"\n\t\"golang.org\/x\/crypto\/ssh\/agent\"\n\n\t\"github.com\/artyom\/autoflags\"\n\t\"github.com\/artyom\/deploy-tools\/internal\/shared\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/pkg\/sftp\"\n)\n\nfunc main() {\n\targs := &runArgs{\n\t\tAddr: os.Getenv(\"DEPLOYCTL_ADDR\"),\n\t\tFp: os.Getenv(\"DEPLOYCTL_FINGERPRINT\"),\n\t}\n\tfs := flag.NewFlagSet(\"deployctl\", flag.ExitOnError)\n\tfs.Usage = usageFunc(fs.PrintDefaults)\n\tautoflags.DefineFlagSet(fs, args)\n\tfs.Parse(os.Args[1:])\n\tif err := args.Validate(); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\\n\", err)\n\t\tfs.Usage()\n\t\tos.Exit(2)\n\t}\n\tif len(fs.Args()) == 0 {\n\t\tfs.Usage()\n\t\tos.Exit(2)\n\t}\n\tif err := dispatch(args.Addr, args.Fp, fs.Args()); err != nil {\n\t\tif err == errFlagParseError {\n\t\t\tos.Exit(2)\n\t\t}\n\t\tif _, ok := err.(*ssh.ExitError); !ok { \/\/ don't write \"Process exited with status 1\"\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t}\n\t\tos.Exit(1)\n\t}\n}\n\nfunc dispatch(addr, fingerprint string, rawArgs []string) error {\n\tif len(rawArgs) == 0 {\n\t\treturn errors.New(\"nothing to do\")\n\t}\n\tcmd, args := rawArgs[0], rawArgs[1:]\n\tswitch cmd {\n\tcase \"components\", \"configurations\":\n\t\treturn proxyCommand(addr, fingerprint, rawArgs)\n\tcase \"addver\":\n\t\tval := &shared.ArgsAddVersionByFile{}\n\t\tif err := parseArgs(cmd, val, os.Stderr, args); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn uploadAndUpdate(addr, fingerprint, val)\n\t}\n\tval, err := validatorForCommand(cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := parseArgs(cmd, val, os.Stderr, args); err != nil {\n\t\treturn err\n\t}\n\treturn proxyCommand(addr, fingerprint, rawArgs)\n}\n\nfunc uploadAndUpdate(addr, fingerprint string, args *shared.ArgsAddVersionByFile) error {\n\tsrc, err := os.Open(args.File)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer src.Close()\n\tclient, cancel, err := dialSSH(addr, fingerprint)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer cancel()\n\tsftpconn, err := sftp.NewClient(client)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer sftpconn.Close()\n\tdst, err := sftpconn.Create(\"upload\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer dst.Close()\n\tpr, pw := io.Pipe()\n\tdefer pw.Close()\n\tdefer pr.Close()\n\tverifyErr := make(chan error)\n\tgo func() {\n\t\terr := decodeArchive(pr)\n\t\tpr.CloseWithError(err)\n\t\tverifyErr <- err\n\t}()\n\th := sha256.New()\n\ttr := io.TeeReader(src, h)\n\tif _, err := io.Copy(io.MultiWriter(pw, dst), tr); err != nil {\n\t\treturn errors.WithMessage(err, \"upload failure\")\n\t}\n\tif err := dst.Close(); err != nil {\n\t\treturn err\n\t}\n\t\/\/ decodeArchive buffers data, so it may not fail during io.Copy to\n\t\/\/ MultiWriter above if data is copied faster than decoded; ensure we\n\t\/\/ check decode result\n\tswitch err := <-verifyErr; err {\n\tcase io.EOF:\n\tdefault:\n\t\treturn errors.WithMessage(err, \"file validation failed\")\n\t}\n\tsession, err := client.NewSession()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer session.Close()\n\tsession.Stdout = os.Stdout\n\tsession.Stderr = os.Stderr\n\treturn session.Run(fmt.Sprintf(\"addver -name=%q -version=%q -hash=%x\",\n\t\targs.Name, args.Version, h.Sum(nil)))\n}\n\nfunc dialSSH(addr, fingerprint string) (client *ssh.Client, closeFunc func(), err error) {\n\tagentConn, err := net.Dial(\"unix\", os.Getenv(\"SSH_AUTH_SOCK\"))\n\tif err != nil {\n\t\treturn nil, nil, errors.WithMessage(err, \"cannot connect to ssh-agent, check if SSH_AUTH_SOCK is set\")\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tagentConn.Close()\n\t\t}\n\t}()\n\tsshAgent := agent.NewClient(agentConn)\n\tvar signers []ssh.Signer\n\tsigners, err = sshAgent.Signers()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tconfig := &ssh.ClientConfig{\n\t\tUser: os.Getenv(\"USER\"),\n\t\tAuth: []ssh.AuthMethod{ssh.PublicKeys(signers...)},\n\t\tTimeout: 30 * time.Second,\n\t\tHostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {\n\t\t\tif hostFp := ssh.FingerprintSHA256(key); hostFp != fingerprint {\n\t\t\t\treturn errors.Errorf(\"host key fingerprint mismatch: %v\", hostFp)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\tclient, err = ssh.Dial(\"tcp\", addr, config)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tcloseFunc = func() { client.Close(); agentConn.Close() }\n\treturn client, closeFunc, nil\n}\n\nfunc proxyCommand(addr, fingerprint string, args []string) error {\n\tclient, cancel, err := dialSSH(addr, fingerprint)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer cancel()\n\tsession, err := client.NewSession()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer session.Close()\n\tsession.Stdout = os.Stdout\n\tsession.Stderr = os.Stderr\n\treturn session.Run(strings.Join(args, \" \"))\n}\n\ntype validator interface {\n\tValidate() error\n}\n\nfunc validatorForCommand(name string) (validator, error) {\n\tswitch name {\n\tcase \"delver\":\n\t\treturn &shared.ArgsDelVersion{}, nil\n\tcase \"delcomp\":\n\t\treturn &shared.ArgsDelComponent{}, nil\n\tcase \"addconf\":\n\t\treturn &shared.ArgsAddConfiguration{}, nil\n\tcase \"delconf\":\n\t\treturn &shared.ArgsDelConfiguration{}, nil\n\tcase \"changeconf\":\n\t\treturn &shared.ArgsUpdateConfiguration{}, nil\n\tcase \"showconf\":\n\t\treturn &shared.ArgsShowConfiguration{}, nil\n\tcase \"showcomp\":\n\t\treturn &shared.ArgsShowComponent{}, nil\n\t}\n\treturn nil, errors.Errorf(\"unknown command: %q\", name)\n}\n\n\/\/ errFlagParseError is a sentinel error value used to determine whether error\n\/\/ originates from flagset that already reported error to stderr so its\n\/\/ reporting can be omitted\nvar errFlagParseError = errors.New(\"flag parse error\")\n\n\/\/ parseArgs defines new flag set with flags from argStruct that writes its\n\/\/ errors to w, then calls flag set Parse method on provided raw arguments and\n\/\/ calls Validate() method on provided argStruct. If parseArgs returns\n\/\/ errFlagParseError, it means that flag set already reported error to w.\nfunc parseArgs(command string, argStruct validator, w io.Writer, raw []string) error {\n\tfs := flag.NewFlagSet(command, flag.ContinueOnError)\n\tfs.SetOutput(w)\n\tautoflags.DefineFlagSet(fs, argStruct)\n\tif err := fs.Parse(raw); err != nil {\n\t\treturn errFlagParseError\n\t}\n\treturn argStruct.Validate()\n}\n\ntype runArgs struct {\n\tAddr string `flag:\"addr,$DEPLOYCTL_ADDR, registry host address (host:port)\"`\n\tFp string `flag:\"fp,$DEPLOYCTL_FINGERPRINT, sha256 host key fingerprint (sha256:...)\"`\n}\n\nfunc (a *runArgs) Validate() error {\n\tif a.Addr == \"\" || a.Fp == \"\" {\n\t\treturn errors.New(\"both addr and fp should be set\")\n\t}\n\treturn nil\n}\n\nfunc usageFunc(printDefaults func()) func() {\n\treturn func() {\n\t\tfmt.Fprintln(os.Stderr, \"Usage: deployctl [flags] subcommand [subcommand flags]\")\n\t\tprintDefaults()\n\t\tfmt.Fprintln(os.Stderr, \"\\nSubcommands:\\n\")\n\t\tfmt.Fprintln(os.Stderr, strings.TrimSpace(shared.CommandsListing))\n\t}\n}\n\n\/\/ decodeArchive reads and unpacks rd as tar.gz stream, discarding data and\n\/\/ returning first error it encounters\nfunc decodeArchive(rd io.Reader) error {\n\tgr, err := gzip.NewReader(rd)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer gr.Close()\n\ttr := tar.NewReader(gr)\n\tfor {\n\t\tif _, err := tr.Next(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := io.Copy(ioutil.Discard, tr); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n<commit_msg>deployctl: make go vet happy<commit_after>\/\/ Command deployctl implements single-command operator's interface to manage\n\/\/ deployments running under deploy-registry and deploy-agent\npackage main\n\nimport (\n\t\"archive\/tar\"\n\t\"compress\/gzip\"\n\t\"crypto\/sha256\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org\/x\/crypto\/ssh\"\n\t\"golang.org\/x\/crypto\/ssh\/agent\"\n\n\t\"github.com\/artyom\/autoflags\"\n\t\"github.com\/artyom\/deploy-tools\/internal\/shared\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/pkg\/sftp\"\n)\n\nfunc main() {\n\targs := &runArgs{\n\t\tAddr: os.Getenv(\"DEPLOYCTL_ADDR\"),\n\t\tFp: os.Getenv(\"DEPLOYCTL_FINGERPRINT\"),\n\t}\n\tfs := flag.NewFlagSet(\"deployctl\", flag.ExitOnError)\n\tfs.Usage = usageFunc(fs.PrintDefaults)\n\tautoflags.DefineFlagSet(fs, args)\n\tfs.Parse(os.Args[1:])\n\tif err := args.Validate(); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\\n\", err)\n\t\tfs.Usage()\n\t\tos.Exit(2)\n\t}\n\tif len(fs.Args()) == 0 {\n\t\tfs.Usage()\n\t\tos.Exit(2)\n\t}\n\tif err := dispatch(args.Addr, args.Fp, fs.Args()); err != nil {\n\t\tif err == errFlagParseError {\n\t\t\tos.Exit(2)\n\t\t}\n\t\tif _, ok := err.(*ssh.ExitError); !ok { \/\/ don't write \"Process exited with status 1\"\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t}\n\t\tos.Exit(1)\n\t}\n}\n\nfunc dispatch(addr, fingerprint string, rawArgs []string) error {\n\tif len(rawArgs) == 0 {\n\t\treturn errors.New(\"nothing to do\")\n\t}\n\tcmd, args := rawArgs[0], rawArgs[1:]\n\tswitch cmd {\n\tcase \"components\", \"configurations\":\n\t\treturn proxyCommand(addr, fingerprint, rawArgs)\n\tcase \"addver\":\n\t\tval := &shared.ArgsAddVersionByFile{}\n\t\tif err := parseArgs(cmd, val, os.Stderr, args); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn uploadAndUpdate(addr, fingerprint, val)\n\t}\n\tval, err := validatorForCommand(cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := parseArgs(cmd, val, os.Stderr, args); err != nil {\n\t\treturn err\n\t}\n\treturn proxyCommand(addr, fingerprint, rawArgs)\n}\n\nfunc uploadAndUpdate(addr, fingerprint string, args *shared.ArgsAddVersionByFile) error {\n\tsrc, err := os.Open(args.File)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer src.Close()\n\tclient, cancel, err := dialSSH(addr, fingerprint)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer cancel()\n\tsftpconn, err := sftp.NewClient(client)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer sftpconn.Close()\n\tdst, err := sftpconn.Create(\"upload\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer dst.Close()\n\tpr, pw := io.Pipe()\n\tdefer pw.Close()\n\tdefer pr.Close()\n\tverifyErr := make(chan error)\n\tgo func() {\n\t\terr := decodeArchive(pr)\n\t\tpr.CloseWithError(err)\n\t\tverifyErr <- err\n\t}()\n\th := sha256.New()\n\ttr := io.TeeReader(src, h)\n\tif _, err := io.Copy(io.MultiWriter(pw, dst), tr); err != nil {\n\t\treturn errors.WithMessage(err, \"upload failure\")\n\t}\n\tif err := dst.Close(); err != nil {\n\t\treturn err\n\t}\n\t\/\/ decodeArchive buffers data, so it may not fail during io.Copy to\n\t\/\/ MultiWriter above if data is copied faster than decoded; ensure we\n\t\/\/ check decode result\n\tswitch err := <-verifyErr; err {\n\tcase io.EOF:\n\tdefault:\n\t\treturn errors.WithMessage(err, \"file validation failed\")\n\t}\n\tsession, err := client.NewSession()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer session.Close()\n\tsession.Stdout = os.Stdout\n\tsession.Stderr = os.Stderr\n\treturn session.Run(fmt.Sprintf(\"addver -name=%q -version=%q -hash=%x\",\n\t\targs.Name, args.Version, h.Sum(nil)))\n}\n\nfunc dialSSH(addr, fingerprint string) (client *ssh.Client, closeFunc func(), err error) {\n\tagentConn, err := net.Dial(\"unix\", os.Getenv(\"SSH_AUTH_SOCK\"))\n\tif err != nil {\n\t\treturn nil, nil, errors.WithMessage(err, \"cannot connect to ssh-agent, check if SSH_AUTH_SOCK is set\")\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tagentConn.Close()\n\t\t}\n\t}()\n\tsshAgent := agent.NewClient(agentConn)\n\tvar signers []ssh.Signer\n\tsigners, err = sshAgent.Signers()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tconfig := &ssh.ClientConfig{\n\t\tUser: os.Getenv(\"USER\"),\n\t\tAuth: []ssh.AuthMethod{ssh.PublicKeys(signers...)},\n\t\tTimeout: 30 * time.Second,\n\t\tHostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {\n\t\t\tif hostFp := ssh.FingerprintSHA256(key); hostFp != fingerprint {\n\t\t\t\treturn errors.Errorf(\"host key fingerprint mismatch: %v\", hostFp)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n\tclient, err = ssh.Dial(\"tcp\", addr, config)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tcloseFunc = func() { client.Close(); agentConn.Close() }\n\treturn client, closeFunc, nil\n}\n\nfunc proxyCommand(addr, fingerprint string, args []string) error {\n\tclient, cancel, err := dialSSH(addr, fingerprint)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer cancel()\n\tsession, err := client.NewSession()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer session.Close()\n\tsession.Stdout = os.Stdout\n\tsession.Stderr = os.Stderr\n\treturn session.Run(strings.Join(args, \" \"))\n}\n\ntype validator interface {\n\tValidate() error\n}\n\nfunc validatorForCommand(name string) (validator, error) {\n\tswitch name {\n\tcase \"delver\":\n\t\treturn &shared.ArgsDelVersion{}, nil\n\tcase \"delcomp\":\n\t\treturn &shared.ArgsDelComponent{}, nil\n\tcase \"addconf\":\n\t\treturn &shared.ArgsAddConfiguration{}, nil\n\tcase \"delconf\":\n\t\treturn &shared.ArgsDelConfiguration{}, nil\n\tcase \"changeconf\":\n\t\treturn &shared.ArgsUpdateConfiguration{}, nil\n\tcase \"showconf\":\n\t\treturn &shared.ArgsShowConfiguration{}, nil\n\tcase \"showcomp\":\n\t\treturn &shared.ArgsShowComponent{}, nil\n\t}\n\treturn nil, errors.Errorf(\"unknown command: %q\", name)\n}\n\n\/\/ errFlagParseError is a sentinel error value used to determine whether error\n\/\/ originates from flagset that already reported error to stderr so its\n\/\/ reporting can be omitted\nvar errFlagParseError = errors.New(\"flag parse error\")\n\n\/\/ parseArgs defines new flag set with flags from argStruct that writes its\n\/\/ errors to w, then calls flag set Parse method on provided raw arguments and\n\/\/ calls Validate() method on provided argStruct. If parseArgs returns\n\/\/ errFlagParseError, it means that flag set already reported error to w.\nfunc parseArgs(command string, argStruct validator, w io.Writer, raw []string) error {\n\tfs := flag.NewFlagSet(command, flag.ContinueOnError)\n\tfs.SetOutput(w)\n\tautoflags.DefineFlagSet(fs, argStruct)\n\tif err := fs.Parse(raw); err != nil {\n\t\treturn errFlagParseError\n\t}\n\treturn argStruct.Validate()\n}\n\ntype runArgs struct {\n\tAddr string `flag:\"addr,$DEPLOYCTL_ADDR, registry host address (host:port)\"`\n\tFp string `flag:\"fp,$DEPLOYCTL_FINGERPRINT, sha256 host key fingerprint (sha256:...)\"`\n}\n\nfunc (a *runArgs) Validate() error {\n\tif a.Addr == \"\" || a.Fp == \"\" {\n\t\treturn errors.New(\"both addr and fp should be set\")\n\t}\n\treturn nil\n}\n\nfunc usageFunc(printDefaults func()) func() {\n\treturn func() {\n\t\tfmt.Fprintln(os.Stderr, \"Usage: deployctl [flags] subcommand [subcommand flags]\")\n\t\tprintDefaults()\n\t\tfmt.Fprintln(os.Stderr, \"\\nSubcommands:\")\n\t\tfmt.Fprintln(os.Stderr)\n\t\tfmt.Fprintln(os.Stderr, strings.TrimSpace(shared.CommandsListing))\n\t}\n}\n\n\/\/ decodeArchive reads and unpacks rd as tar.gz stream, discarding data and\n\/\/ returning first error it encounters\nfunc decodeArchive(rd io.Reader) error {\n\tgr, err := gzip.NewReader(rd)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer gr.Close()\n\ttr := tar.NewReader(gr)\n\tfor {\n\t\tif _, err := tr.Next(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := io.Copy(ioutil.Discard, tr); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/msoap\/html2data\"\n)\n\nfunc main() {\n\ttexts, err := html2data.GetData(os.Args[1], map[string]string{\"one\": os.Args[2]})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor _, text := range texts[\"one\"] {\n\t\tfmt.Println(text)\n\t}\n}\n<commit_msg>Fixed out if text not found<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/msoap\/html2data\"\n)\n\nfunc main() {\n\ttexts, err := html2data.GetData(os.Args[1], map[string]string{\"one\": os.Args[2]})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif textOne, ok := texts[\"one\"]; ok {\n\t\tfor _, text := range textOne {\n\t\t\tfmt.Println(text)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ mailbox_sender - sender worker.\n\/*\nUsage:\n\n\tmailbox_sender [flags]\n\nThe flags are:\n\n\t`-cid`: Campaign id\n\t`-g`: User groups\n\t`-p`: HTML file path\n\t`-t`: Mail Subject\n\t`-d`: Dry run all but not to send mail\n\t`-uid`: User ID\n\n`-uid`, `-g` can't use together\n\nExample:\n\n\tmailbox_sender -cid cbc6eb46 -g testuser -p .\/email_1.html -t \"#1 New Paper!\" -d\n\n*\/\npackage main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/md5\"\n\t\"database\/sql\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/url\"\n\t\"os\"\n\t\"regexp\"\n\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\t\"github.com\/toomore\/mailbox\/campaign\"\n\t\"github.com\/toomore\/mailbox\/mails\"\n\t\"github.com\/toomore\/mailbox\/utils\"\n)\n\nvar (\n\tcid = flag.String(\"cid\", \"\", \"campaign ID\")\n\tdryRun = flag.Bool(\"d\", false, \"Dry run\")\n\treplaceLink = flag.Bool(\"rl\", true, \"Replace A tag links\")\n\tgroups = flag.String(\"g\", \"\", \"User groups\")\n\tpath = flag.String(\"p\", \"\", \"HTML file path\")\n\tsubject = flag.String(\"t\", \"\", \"mail subject\")\n\tuid = flag.String(\"uid\", \"\", \"User ID\")\n\tareg = regexp.MustCompile(`href=\"(http[s]?:\/\/[a-zA-z0-9\/\\.:?=,-]+)\"`)\n)\n\nfunc replaceReader(html *[]byte, cid string, seed string, uid string) {\n\tdata := url.Values{}\n\tdata.Set(\"c\", cid)\n\tdata.Set(\"u\", uid)\n\thm := campaign.MakeMacSeed(seed, data)\n\t*html = bytes.Replace(\n\t\t*html,\n\t\t[]byte(\"{{READER}}\"),\n\t\t[]byte(fmt.Sprintf(\"https:\/\/%s\/read\/%x?%s\", os.Getenv(\"mailbox_web_site\"), hm, data.Encode())),\n\t\t1)\n}\n\nfunc replaceFname(html *[]byte, fname string) {\n\t*html = bytes.Replace(*html, []byte(\"{{FNAME}}\"), []byte(fname), -1)\n}\n\nfunc replaceATag(html *[]byte, allATags []linksData, cid string, seed string, uid string) {\n\tfor _, v := range allATags {\n\t\tdata := url.Values{}\n\t\tdata.Set(\"c\", cid)\n\t\tdata.Set(\"u\", uid)\n\t\tdata.Set(\"l\", v.linkID)\n\t\tdata.Set(\"t\", \"a\")\n\t\thm := campaign.MakeMacSeed(seed, data)\n\n\t\t*html = bytes.Replace(*html, v.url,\n\t\t\t[]byte(fmt.Sprintf(\"https:\/\/%s\/door\/%x?%s\", os.Getenv(\"mailbox_web_site\"), hm, data.Encode())), -1)\n\t}\n}\n\ntype linksData struct {\n\tmd5h string\n\tlinkID string\n\turl []byte\n}\n\nfunc filterATags(body []byte) []linksData {\n\tallATags := areg.FindAllSubmatch(body, -1)\n\tresult := make([]linksData, len(allATags))\n\tfor i, v := range allATags {\n\t\tmd5h := md5.New()\n\t\tmd5h.Write(v[1])\n\t\tmd5hstr := fmt.Sprintf(\"%x\", md5h.Sum(nil))\n\t\tlinkID := fmt.Sprintf(\"%s\", utils.GenSeed())\n\t\t_, err := utils.GetConn().Query(`INSERT INTO links(id,cid,url,urlhash) VALUES(?,?,?,?)`, linkID, *cid, v[1], md5hstr)\n\t\tif err != nil {\n\t\t\trows, _ := utils.GetConn().Query(`SELECT id FROM links WHERE cid=? AND urlhash=?`, *cid, md5hstr)\n\t\t\tfor rows.Next() {\n\t\t\t\trows.Scan(&linkID)\n\t\t\t}\n\t\t}\n\t\tresult[i].md5h = md5hstr\n\t\tresult[i].linkID = linkID\n\t\tresult[i].url = v[1]\n\t}\n\treturn result\n}\n\nfunc main() {\n\tflag.Parse()\n\tfile, err := os.Open(*path)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tbody, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tseed := campaign.GetSeed(*cid)\n\tvar rows *sql.Rows\n\tif *uid != \"\" {\n\t\trows, err = utils.GetConn().Query(`SELECT id,email,f_name,l_name FROM user WHERE alive=1 AND id=?`, *uid)\n\t} else {\n\t\trows, err = utils.GetConn().Query(`SELECT id,email,f_name,l_name FROM user WHERE alive=1 AND groups=?`, *groups)\n\t}\n\tdefer rows.Close()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar allATags []linksData\n\tif *replaceLink {\n\t\tallATags = filterATags(body)\n\t}\n\n\tvar count int\n\tfor rows.Next() {\n\t\tvar (\n\t\t\temail string\n\t\t\tfname string\n\t\t\tlname string\n\t\t\tmsg []byte\n\t\t\tno string\n\t\t)\n\t\trows.Scan(&no, &email, &fname, &lname)\n\n\t\tmsg = body\n\t\tif *replaceLink {\n\t\t\treplaceATag(&msg, allATags, *cid, seed, *uid)\n\t\t}\n\t\treplaceFname(&msg, fname)\n\t\treplaceReader(&msg, *cid, seed, no)\n\t\tparams := mails.GenParams(\n\t\t\tfmt.Sprintf(\"%s %s <%s>\", fname, lname, email),\n\t\t\tstring(msg),\n\t\t\t*subject)\n\t\tif *dryRun {\n\t\t\tlog.Printf(\"%s\\n\", msg)\n\t\t\tfor i, v := range allATags {\n\t\t\t\tfmt.Printf(\"%d => [%s] %s\\n\", i, v.linkID, v.url)\n\t\t\t}\n\t\t} else {\n\t\t\tmails.Send(params)\n\t\t}\n\t\tcount++\n\t}\n\tif *uid != \"\" {\n\t\tlog.Printf(\"\\n cid: %s, uid: %s, count: %d\\n Subject: `%s`\\n\", *cid, *uid, count, *subject)\n\t} else {\n\t\tlog.Printf(\"\\n cid: %s, groups: %s, count: %d\\n Subject: `%s`\\n\", *cid, *groups, count, *subject)\n\t}\n}\n<commit_msg>Fixed lost data<commit_after>\/\/ mailbox_sender - sender worker.\n\/*\nUsage:\n\n\tmailbox_sender [flags]\n\nThe flags are:\n\n\t`-cid`: Campaign id\n\t`-g`: User groups\n\t`-p`: HTML file path\n\t`-t`: Mail Subject\n\t`-d`: Dry run all but not to send mail\n\t`-uid`: User ID\n\n`-uid`, `-g` can't use together\n\nExample:\n\n\tmailbox_sender -cid cbc6eb46 -g testuser -p .\/email_1.html -t \"#1 New Paper!\" -d\n\n*\/\npackage main\n\nimport (\n\t\"bytes\"\n\t\"crypto\/md5\"\n\t\"database\/sql\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/url\"\n\t\"os\"\n\t\"regexp\"\n\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\t\"github.com\/toomore\/mailbox\/campaign\"\n\t\"github.com\/toomore\/mailbox\/mails\"\n\t\"github.com\/toomore\/mailbox\/utils\"\n)\n\nvar (\n\tcid = flag.String(\"cid\", \"\", \"campaign ID\")\n\tdryRun = flag.Bool(\"d\", false, \"Dry run\")\n\treplaceLink = flag.Bool(\"rl\", true, \"Replace A tag links\")\n\tgroups = flag.String(\"g\", \"\", \"User groups\")\n\tpath = flag.String(\"p\", \"\", \"HTML file path\")\n\tsubject = flag.String(\"t\", \"\", \"mail subject\")\n\tuid = flag.String(\"uid\", \"\", \"User ID\")\n\tareg = regexp.MustCompile(`href=\"(http[s]?:\/\/[a-zA-z0-9\/\\.:?=,-@]+)\"`)\n)\n\nfunc replaceReader(html *[]byte, cid string, seed string, uid string) {\n\tdata := url.Values{}\n\tdata.Set(\"c\", cid)\n\tdata.Set(\"u\", uid)\n\thm := campaign.MakeMacSeed(seed, data)\n\t*html = bytes.Replace(\n\t\t*html,\n\t\t[]byte(\"{{READER}}\"),\n\t\t[]byte(fmt.Sprintf(\"https:\/\/%s\/read\/%x?%s\", os.Getenv(\"mailbox_web_site\"), hm, data.Encode())),\n\t\t1)\n}\n\nfunc replaceFname(html *[]byte, fname string) {\n\t*html = bytes.Replace(*html, []byte(\"{{FNAME}}\"), []byte(fname), -1)\n}\n\nfunc replaceATag(html *[]byte, allATags []linksData, cid string, seed string, uid string) {\n\tfor _, v := range allATags {\n\t\tdata := url.Values{}\n\t\tdata.Set(\"c\", cid)\n\t\tdata.Set(\"u\", uid)\n\t\tdata.Set(\"l\", v.linkID)\n\t\tdata.Set(\"t\", \"a\")\n\t\thm := campaign.MakeMacSeed(seed, data)\n\n\t\t*html = bytes.Replace(*html, v.url,\n\t\t\t[]byte(fmt.Sprintf(\"https:\/\/%s\/door\/%x?%s\", os.Getenv(\"mailbox_web_site\"), hm, data.Encode())), -1)\n\t}\n}\n\ntype linksData struct {\n\tmd5h string\n\tlinkID string\n\turl []byte\n}\n\nfunc filterATags(body []byte) []linksData {\n\tallATags := areg.FindAllSubmatch(body, -1)\n\tresult := make([]linksData, len(allATags))\n\tfor i, v := range allATags {\n\t\tmd5h := md5.New()\n\t\tmd5h.Write(v[1])\n\t\tmd5hstr := fmt.Sprintf(\"%x\", md5h.Sum(nil))\n\t\tlinkID := fmt.Sprintf(\"%s\", utils.GenSeed())\n\t\t_, err := utils.GetConn().Query(`INSERT INTO links(id,cid,url,urlhash) VALUES(?,?,?,?)`, linkID, *cid, v[1], md5hstr)\n\t\tif err != nil {\n\t\t\trows, _ := utils.GetConn().Query(`SELECT id FROM links WHERE cid=? AND urlhash=?`, *cid, md5hstr)\n\t\t\tfor rows.Next() {\n\t\t\t\trows.Scan(&linkID)\n\t\t\t}\n\t\t}\n\t\tresult[i].md5h = md5hstr\n\t\tresult[i].linkID = linkID\n\t\tresult[i].url = v[1]\n\t}\n\treturn result\n}\n\nfunc main() {\n\tflag.Parse()\n\tfile, err := os.Open(*path)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tbody, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tseed := campaign.GetSeed(*cid)\n\tvar rows *sql.Rows\n\tif *uid != \"\" {\n\t\trows, err = utils.GetConn().Query(`SELECT id,email,f_name,l_name FROM user WHERE alive=1 AND id=?`, *uid)\n\t} else {\n\t\trows, err = utils.GetConn().Query(`SELECT id,email,f_name,l_name FROM user WHERE alive=1 AND groups=?`, *groups)\n\t}\n\tdefer rows.Close()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar allATags []linksData\n\tif *replaceLink {\n\t\tallATags = filterATags(body)\n\t}\n\n\tvar count int\n\tfor rows.Next() {\n\t\tvar (\n\t\t\temail string\n\t\t\tfname string\n\t\t\tlname string\n\t\t\tmsg []byte\n\t\t\tno string\n\t\t)\n\t\trows.Scan(&no, &email, &fname, &lname)\n\n\t\tmsg = body\n\t\tif *replaceLink {\n\t\t\treplaceATag(&msg, allATags, *cid, seed, no)\n\t\t}\n\t\treplaceFname(&msg, fname)\n\t\treplaceReader(&msg, *cid, seed, no)\n\t\tparams := mails.GenParams(\n\t\t\tfmt.Sprintf(\"%s %s <%s>\", fname, lname, email),\n\t\t\tstring(msg),\n\t\t\t*subject)\n\t\tif *dryRun {\n\t\t\tlog.Printf(\"%s\\n\", msg)\n\t\t\tfor i, v := range allATags {\n\t\t\t\tfmt.Printf(\"%d => [%s] %s\\n\", i, v.linkID, v.url)\n\t\t\t}\n\t\t} else {\n\t\t\tmails.Send(params)\n\t\t}\n\t\tcount++\n\t}\n\tif *uid != \"\" {\n\t\tlog.Printf(\"\\n cid: %s, uid: %s, count: %d\\n Subject: `%s`\\n\", *cid, *uid, count, *subject)\n\t} else {\n\t\tlog.Printf(\"\\n cid: %s, groups: %s, count: %d\\n Subject: `%s`\\n\", *cid, *groups, count, *subject)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"testing\"\n\n\t\"github.com\/urfave\/cli\"\n)\n\nfunc TestCmdToot(t *testing.T) {\n\ttoot := \"\"\n\ttestWithServer(\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tswitch r.URL.Path {\n\t\t\tcase \"\/api\/v1\/statuses\":\n\t\t\t\ttoot = r.FormValue(\"status\")\n\t\t\t\tfmt.Fprintln(w, `{\"ID\": 2345}`)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\t\treturn\n\t\t},\n\t\tfunc(app *cli.App) {\n\t\t\tapp.Run([]string{\"mstdn\", \"toot\", \"foo\"})\n\t\t},\n\t)\n\tif toot != \"foo\" {\n\t\tt.Fatalf(\"want %q, got %q\", \"foo\", toot)\n\t}\n}\n<commit_msg>Fix test json property name again<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"testing\"\n\n\t\"github.com\/urfave\/cli\"\n)\n\nfunc TestCmdToot(t *testing.T) {\n\ttoot := \"\"\n\ttestWithServer(\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tswitch r.URL.Path {\n\t\t\tcase \"\/api\/v1\/statuses\":\n\t\t\t\ttoot = r.FormValue(\"status\")\n\t\t\t\tfmt.Fprintln(w, `{\"id\": 2345}`)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\t\treturn\n\t\t},\n\t\tfunc(app *cli.App) {\n\t\t\tapp.Run([]string{\"mstdn\", \"toot\", \"foo\"})\n\t\t},\n\t)\n\tif toot != \"foo\" {\n\t\tt.Fatalf(\"want %q, got %q\", \"foo\", toot)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package graphite\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/influxdb\/influxdb\/models\"\n)\n\nvar (\n\tdefaultTemplate *template\n\tMinDate = time.Date(1901, 12, 13, 0, 0, 0, 0, time.UTC)\n\tMaxDate = time.Date(2038, 1, 19, 0, 0, 0, 0, time.UTC)\n)\n\nfunc init() {\n\tvar err error\n\tdefaultTemplate, err = NewTemplate(\"measurement*\", nil, DefaultSeparator)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/ Parser encapsulates a Graphite Parser.\ntype Parser struct {\n\tmatcher *matcher\n\ttags models.Tags\n}\n\n\/\/ Options are configurable values that can be provided to a Parser\ntype Options struct {\n\tSeparator string\n\tTemplates []string\n\tDefaultTags models.Tags\n}\n\n\/\/ NewParserWithOptions returns a graphite parser using the given options\nfunc NewParserWithOptions(options Options) (*Parser, error) {\n\n\tmatcher := newMatcher()\n\tmatcher.AddDefaultTemplate(defaultTemplate)\n\n\tfor _, pattern := range options.Templates {\n\n\t\ttemplate := pattern\n\t\tfilter := \"\"\n\t\t\/\/ Format is [filter] <template> [tag1=value1,tag2=value2]\n\t\tparts := strings.Fields(pattern)\n\t\tif len(parts) >= 2 {\n\t\t\tif strings.Contains(parts[1], \"=\") {\n\t\t\t\ttemplate = parts[0]\n\t\t\t} else {\n\t\t\t\tfilter = parts[0]\n\t\t\t\ttemplate = parts[1]\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Parse out the default tags specific to this template\n\t\ttags := models.Tags{}\n\t\tif strings.Contains(parts[len(parts)-1], \"=\") {\n\t\t\ttagStrs := strings.Split(parts[len(parts)-1], \",\")\n\t\t\tfor _, kv := range tagStrs {\n\t\t\t\tparts := strings.Split(kv, \"=\")\n\t\t\t\ttags[parts[0]] = parts[1]\n\t\t\t}\n\t\t}\n\n\t\ttmpl, err := NewTemplate(template, tags, options.Separator)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmatcher.Add(filter, tmpl)\n\t}\n\treturn &Parser{matcher: matcher, tags: options.DefaultTags}, nil\n}\n\n\/\/ NewParser returns a GraphiteParser instance.\nfunc NewParser(templates []string, defaultTags models.Tags) (*Parser, error) {\n\treturn NewParserWithOptions(\n\t\tOptions{\n\t\t\tTemplates: templates,\n\t\t\tDefaultTags: defaultTags,\n\t\t\tSeparator: DefaultSeparator,\n\t\t})\n}\n\n\/\/ Parse performs Graphite parsing of a single line.\nfunc (p *Parser) Parse(line string) (models.Point, error) {\n\t\/\/ Break into 3 fields (name, value, timestamp).\n\tfields := strings.Fields(line)\n\tif len(fields) != 2 && len(fields) != 3 {\n\t\treturn nil, fmt.Errorf(\"received %q which doesn't have required fields\", line)\n\t}\n\n\t\/\/ decode the name and tags\n\tmatcher := p.matcher.Match(fields[0])\n\tmeasurement, tags, field := matcher.Apply(fields[0])\n\n\t\/\/ Could not extract measurement, use the raw value\n\tif measurement == \"\" {\n\t\tmeasurement = fields[0]\n\t}\n\n\t\/\/ Parse value.\n\tv, err := strconv.ParseFloat(fields[1], 64)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(`field \"%s\" value: %s`, fields[0], err)\n\t}\n\n\tif field != \"\" {\n\t\tfieldValues := map[string]interface{}{field: v}\n\t} else {\n\t\tfieldValues := map[string]interface{}{\"value\": v}\n\t}\n\n\t\/\/ If no 3rd field, use now as timestamp\n\ttimestamp := time.Now().UTC()\n\n\tif len(fields) == 3 {\n\t\t\/\/ Parse timestamp.\n\t\tunixTime, err := strconv.ParseFloat(fields[2], 64)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(`field \"%s\" time: %s`, fields[0], err)\n\t\t}\n\n\t\t\/\/ -1 is a special value that gets converted to current UTC time\n\t\t\/\/ See https:\/\/github.com\/graphite-project\/carbon\/issues\/54\n\t\tif unixTime != float64(-1) {\n\t\t\t\/\/ Check if we have fractional seconds\n\t\t\ttimestamp = time.Unix(int64(unixTime), int64((unixTime-math.Floor(unixTime))*float64(time.Second)))\n\t\t\tif timestamp.Before(MinDate) || timestamp.After(MaxDate) {\n\t\t\t\treturn nil, fmt.Errorf(\"timestamp out of range\")\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Set the default tags on the point if they are not already set\n\tfor k, v := range p.tags {\n\t\tif _, ok := tags[k]; !ok {\n\t\t\ttags[k] = v\n\t\t}\n\t}\n\tpoint := models.NewPoint(measurement, tags, fieldValues, timestamp)\n\n\treturn point, nil\n}\n\n\/\/ template represents a pattern and tags to map a graphite metric string to a influxdb Point\ntype template struct {\n\ttags []string\n\tdefaultTags models.Tags\n\tgreedyMeasurement bool\n\tseparator string\n}\n\nfunc NewTemplate(pattern string, defaultTags models.Tags, separator string) (*template, error) {\n\ttags := strings.Split(pattern, \".\")\n\thasMeasurement := false\n\ttemplate := &template{tags: tags, defaultTags: defaultTags, separator: separator}\n\n\tfor _, tag := range tags {\n\t\tif strings.HasPrefix(tag, \"measurement\") {\n\t\t\thasMeasurement = true\n\t\t}\n\t\tif tag == \"measurement*\" {\n\t\t\ttemplate.greedyMeasurement = true\n\t\t}\n\t}\n\n\tif !hasMeasurement {\n\t\treturn nil, fmt.Errorf(\"no measurement specified for template. %q\", pattern)\n\t}\n\n\treturn template, nil\n}\n\n\/\/ Apply extracts the template fields form the given line and returns the measurement\n\/\/ name and tags\nfunc (t *template) Apply(line string) (string, map[string]string, string) {\n\tfields := strings.Split(line, \".\")\n\tvar (\n\t\tmeasurement []string\n\t\ttags = make(map[string]string)\n\t\tfield string\n\t)\n\n\t\/\/ Set any default tags\n\tfor k, v := range t.defaultTags {\n\t\ttags[k] = v\n\t}\n\n\tfor i, tag := range t.tags {\n\t\tif i >= len(fields) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif tag == \"measurement\" {\n\t\t\tmeasurement = append(measurement, fields[i])\n\t\t} else if tag == \"field\" {\n\t\t\tif field != \"\" {\n\t\t\t\treturn nil, fmt.Errorf(`template can only have one field defined: %s`, line)\n\t\t\t}\n\t\t\tfield = field\n\t\t} else if tag == \"measurement*\" {\n\t\t\tmeasurement = append(measurement, fields[i:]...)\n\t\t\tbreak\n\t\t} else if tag != \"\" {\n\t\t\ttags[tag] = fields[i]\n\t\t}\n\t}\n\n\treturn strings.Join(measurement, t.separator), tags, field\n}\n\n\/\/ matcher determines which template should be applied to a given metric\n\/\/ based on a filter tree.\ntype matcher struct {\n\troot *node\n\tdefaultTemplate *template\n}\n\nfunc newMatcher() *matcher {\n\treturn &matcher{\n\t\troot: &node{},\n\t}\n}\n\n\/\/ Add inserts the template in the filter tree based the given filter\nfunc (m *matcher) Add(filter string, template *template) {\n\tif filter == \"\" {\n\t\tm.AddDefaultTemplate(template)\n\t\treturn\n\t}\n\tm.root.Insert(filter, template)\n}\n\nfunc (m *matcher) AddDefaultTemplate(template *template) {\n\tm.defaultTemplate = template\n}\n\n\/\/ Match returns the template that matches the given graphite line\nfunc (m *matcher) Match(line string) *template {\n\ttmpl := m.root.Search(line)\n\tif tmpl != nil {\n\t\treturn tmpl\n\t}\n\n\treturn m.defaultTemplate\n}\n\n\/\/ node is an item in a sorted k-ary tree. Each child is sorted by its value.\n\/\/ The special value of \"*\", is always last.\ntype node struct {\n\tvalue string\n\tchildren nodes\n\ttemplate *template\n}\n\nfunc (n *node) insert(values []string, template *template) {\n\t\/\/ Add the end, set the template\n\tif len(values) == 0 {\n\t\tn.template = template\n\t\treturn\n\t}\n\n\t\/\/ See if the the current element already exists in the tree. If so, insert the\n\t\/\/ into that sub-tree\n\tfor _, v := range n.children {\n\t\tif v.value == values[0] {\n\t\t\tv.insert(values[1:], template)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ New element, add it to the tree and sort the children\n\tnewNode := &node{value: values[0]}\n\tn.children = append(n.children, newNode)\n\tsort.Sort(&n.children)\n\n\t\/\/ Now insert the rest of the tree into the new element\n\tnewNode.insert(values[1:], template)\n}\n\n\/\/ Insert inserts the given string template into the tree. The filter string is separated\n\/\/ on \".\" and each part is used as the path in the tree.\nfunc (n *node) Insert(filter string, template *template) {\n\tn.insert(strings.Split(filter, \".\"), template)\n}\n\nfunc (n *node) search(lineParts []string) *template {\n\t\/\/ Nothing to search\n\tif len(lineParts) == 0 || len(n.children) == 0 {\n\t\treturn n.template\n\t}\n\n\t\/\/ If last element is a wildcard, don't include in this search since it's sorted\n\t\/\/ to the end but lexicographically it would not always be and sort.Search assumes\n\t\/\/ the slice is sorted.\n\tlength := len(n.children)\n\tif n.children[length-1].value == \"*\" {\n\t\tlength -= 1\n\t}\n\n\t\/\/ Find the index of child with an exact match\n\ti := sort.Search(length, func(i int) bool {\n\t\treturn n.children[i].value >= lineParts[0]\n\t})\n\n\t\/\/ Found an exact match, so search that child sub-tree\n\tif i < len(n.children) && n.children[i].value == lineParts[0] {\n\t\treturn n.children[i].search(lineParts[1:])\n\t}\n\t\/\/ Not an exact match, see if we have a wildcard child to search\n\tif n.children[len(n.children)-1].value == \"*\" {\n\t\treturn n.children[len(n.children)-1].search(lineParts[1:])\n\t}\n\treturn n.template\n}\n\nfunc (n *node) Search(line string) *template {\n\treturn n.search(strings.Split(line, \".\"))\n}\n\ntype nodes []*node\n\n\/\/ Less returns a boolean indicating whether the filter at position j\n\/\/ is less than the filter at position k. Filters are order by string\n\/\/ comparison of each component parts. A wildcard value \"*\" is never\n\/\/ less than a non-wildcard value.\n\/\/\n\/\/ For example, the filters:\n\/\/ \"*.*\"\n\/\/ \"servers.*\"\n\/\/ \"servers.localhost\"\n\/\/ \"*.localhost\"\n\/\/\n\/\/ Would be sorted as:\n\/\/ \"servers.localhost\"\n\/\/ \"servers.*\"\n\/\/ \"*.localhost\"\n\/\/ \"*.*\"\nfunc (n *nodes) Less(j, k int) bool {\n\tif (*n)[j].value == \"*\" && (*n)[k].value != \"*\" {\n\t\treturn false\n\t}\n\n\tif (*n)[j].value != \"*\" && (*n)[k].value == \"*\" {\n\t\treturn true\n\t}\n\n\treturn (*n)[j].value < (*n)[k].value\n}\n\nfunc (n *nodes) Swap(i, j int) { (*n)[i], (*n)[j] = (*n)[j], (*n)[i] }\nfunc (n *nodes) Len() int { return len(*n) }\n<commit_msg>comment out tests for now..<commit_after>package graphite\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/influxdb\/influxdb\/models\"\n)\n\nvar (\n\tdefaultTemplate *template\n\tMinDate = time.Date(1901, 12, 13, 0, 0, 0, 0, time.UTC)\n\tMaxDate = time.Date(2038, 1, 19, 0, 0, 0, 0, time.UTC)\n)\n\nfunc init() {\n\tvar err error\n\tdefaultTemplate, err = NewTemplate(\"measurement*\", nil, DefaultSeparator)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/ Parser encapsulates a Graphite Parser.\ntype Parser struct {\n\tmatcher *matcher\n\ttags models.Tags\n}\n\n\/\/ Options are configurable values that can be provided to a Parser\ntype Options struct {\n\tSeparator string\n\tTemplates []string\n\tDefaultTags models.Tags\n}\n\n\/\/ NewParserWithOptions returns a graphite parser using the given options\nfunc NewParserWithOptions(options Options) (*Parser, error) {\n\n\tmatcher := newMatcher()\n\tmatcher.AddDefaultTemplate(defaultTemplate)\n\n\tfor _, pattern := range options.Templates {\n\n\t\ttemplate := pattern\n\t\tfilter := \"\"\n\t\t\/\/ Format is [filter] <template> [tag1=value1,tag2=value2]\n\t\tparts := strings.Fields(pattern)\n\t\tif len(parts) >= 2 {\n\t\t\tif strings.Contains(parts[1], \"=\") {\n\t\t\t\ttemplate = parts[0]\n\t\t\t} else {\n\t\t\t\tfilter = parts[0]\n\t\t\t\ttemplate = parts[1]\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Parse out the default tags specific to this template\n\t\ttags := models.Tags{}\n\t\tif strings.Contains(parts[len(parts)-1], \"=\") {\n\t\t\ttagStrs := strings.Split(parts[len(parts)-1], \",\")\n\t\t\tfor _, kv := range tagStrs {\n\t\t\t\tparts := strings.Split(kv, \"=\")\n\t\t\t\ttags[parts[0]] = parts[1]\n\t\t\t}\n\t\t}\n\n\t\ttmpl, err := NewTemplate(template, tags, options.Separator)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmatcher.Add(filter, tmpl)\n\t}\n\treturn &Parser{matcher: matcher, tags: options.DefaultTags}, nil\n}\n\n\/\/ NewParser returns a GraphiteParser instance.\nfunc NewParser(templates []string, defaultTags models.Tags) (*Parser, error) {\n\treturn NewParserWithOptions(\n\t\tOptions{\n\t\t\tTemplates: templates,\n\t\t\tDefaultTags: defaultTags,\n\t\t\tSeparator: DefaultSeparator,\n\t\t})\n}\n\n\/\/ Parse performs Graphite parsing of a single line.\nfunc (p *Parser) Parse(line string) (models.Point, error) {\n\t\/\/ Break into 3 fields (name, value, timestamp).\n\tfields := strings.Fields(line)\n\tif len(fields) != 2 && len(fields) != 3 {\n\t\treturn nil, fmt.Errorf(\"received %q which doesn't have required fields\", line)\n\t}\n\n\t\/\/ decode the name and tags\n\tmatcher := p.matcher.Match(fields[0])\n\tmeasurement, tags, field := matcher.Apply(fields[0])\n\n\t\/\/ Could not extract measurement, use the raw value\n\tif measurement == \"\" {\n\t\tmeasurement = fields[0]\n\t}\n\n\t\/\/ Parse value.\n\tv, err := strconv.ParseFloat(fields[1], 64)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(`field \"%s\" value: %s`, fields[0], err)\n\t}\n\n\tif field != \"\" {\n\t\tfieldValues := map[string]interface{}{field: v}\n\t} else {\n\t\tfieldValues := map[string]interface{}{\"value\": v}\n\t}\n\n\t\/\/ If no 3rd field, use now as timestamp\n\ttimestamp := time.Now().UTC()\n\n\tif len(fields) == 3 {\n\t\t\/\/ Parse timestamp.\n\t\tunixTime, err := strconv.ParseFloat(fields[2], 64)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(`field \"%s\" time: %s`, fields[0], err)\n\t\t}\n\n\t\t\/\/ -1 is a special value that gets converted to current UTC time\n\t\t\/\/ See https:\/\/github.com\/graphite-project\/carbon\/issues\/54\n\t\tif unixTime != float64(-1) {\n\t\t\t\/\/ Check if we have fractional seconds\n\t\t\ttimestamp = time.Unix(int64(unixTime), int64((unixTime-math.Floor(unixTime))*float64(time.Second)))\n\t\t\tif timestamp.Before(MinDate) || timestamp.After(MaxDate) {\n\t\t\t\treturn nil, fmt.Errorf(\"timestamp out of range\")\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Set the default tags on the point if they are not already set\n\tfor k, v := range p.tags {\n\t\tif _, ok := tags[k]; !ok {\n\t\t\ttags[k] = v\n\t\t}\n\t}\n\tpoint := models.NewPoint(measurement, tags, fieldValues, timestamp)\n\n\treturn point, nil\n}\n\n\/\/ template represents a pattern and tags to map a graphite metric string to a influxdb Point\ntype template struct {\n\ttags []string\n\tdefaultTags models.Tags\n\tgreedyMeasurement bool\n\tseparator string\n}\n\nfunc NewTemplate(pattern string, defaultTags models.Tags, separator string) (*template, error) {\n\ttags := strings.Split(pattern, \".\")\n\thasMeasurement := false\n\ttemplate := &template{tags: tags, defaultTags: defaultTags, separator: separator}\n\n\tfor _, tag := range tags {\n\t\tif strings.HasPrefix(tag, \"measurement\") {\n\t\t\thasMeasurement = true\n\t\t}\n\t\tif tag == \"measurement*\" {\n\t\t\ttemplate.greedyMeasurement = true\n\t\t}\n\t}\n\n\tif !hasMeasurement {\n\t\treturn nil, fmt.Errorf(\"no measurement specified for template. %q\", pattern)\n\t}\n\n\treturn template, nil\n}\n\n\/\/ Apply extracts the template fields form the given line and returns the measurement\n\/\/ name and tags\nfunc (t *template) Apply(line string) (string, map[string]string, string) {\n\tfields := strings.Split(line, \".\")\n\tvar (\n\t\tmeasurement []string\n\t\ttags = make(map[string]string)\n\t\tfield string\n\t)\n\n\t\/\/ Set any default tags\n\tfor k, v := range t.defaultTags {\n\t\ttags[k] = v\n\t}\n\n\tfor i, tag := range t.tags {\n\t\tif i >= len(fields) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif tag == \"measurement\" {\n\t\t\tmeasurement = append(measurement, fields[i])\n\t\t} else if tag == \"field\" {\n \/\/ FIXME: cant add error here..\n \/\/if len(field) == 0 {\n \/\/ \/\/ FIXME - more informative error message\n\t\t\t\/\/\treturn nil, fmt.Errorf(\"template can only have one field defined\")\n\t\t\t\/\/}\n\t\t\tfield = field\n\t\t} else if tag == \"measurement*\" {\n\t\t\tmeasurement = append(measurement, fields[i:]...)\n\t\t\tbreak\n\t\t} else if tag != \"\" {\n\t\t\ttags[tag] = fields[i]\n\t\t}\n\t}\n\n\treturn strings.Join(measurement, t.separator), tags, field\n}\n\n\/\/ matcher determines which template should be applied to a given metric\n\/\/ based on a filter tree.\ntype matcher struct {\n\troot *node\n\tdefaultTemplate *template\n}\n\nfunc newMatcher() *matcher {\n\treturn &matcher{\n\t\troot: &node{},\n\t}\n}\n\n\/\/ Add inserts the template in the filter tree based the given filter\nfunc (m *matcher) Add(filter string, template *template) {\n\tif filter == \"\" {\n\t\tm.AddDefaultTemplate(template)\n\t\treturn\n\t}\n\tm.root.Insert(filter, template)\n}\n\nfunc (m *matcher) AddDefaultTemplate(template *template) {\n\tm.defaultTemplate = template\n}\n\n\/\/ Match returns the template that matches the given graphite line\nfunc (m *matcher) Match(line string) *template {\n\ttmpl := m.root.Search(line)\n\tif tmpl != nil {\n\t\treturn tmpl\n\t}\n\n\treturn m.defaultTemplate\n}\n\n\/\/ node is an item in a sorted k-ary tree. Each child is sorted by its value.\n\/\/ The special value of \"*\", is always last.\ntype node struct {\n\tvalue string\n\tchildren nodes\n\ttemplate *template\n}\n\nfunc (n *node) insert(values []string, template *template) {\n\t\/\/ Add the end, set the template\n\tif len(values) == 0 {\n\t\tn.template = template\n\t\treturn\n\t}\n\n\t\/\/ See if the the current element already exists in the tree. If so, insert the\n\t\/\/ into that sub-tree\n\tfor _, v := range n.children {\n\t\tif v.value == values[0] {\n\t\t\tv.insert(values[1:], template)\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ New element, add it to the tree and sort the children\n\tnewNode := &node{value: values[0]}\n\tn.children = append(n.children, newNode)\n\tsort.Sort(&n.children)\n\n\t\/\/ Now insert the rest of the tree into the new element\n\tnewNode.insert(values[1:], template)\n}\n\n\/\/ Insert inserts the given string template into the tree. The filter string is separated\n\/\/ on \".\" and each part is used as the path in the tree.\nfunc (n *node) Insert(filter string, template *template) {\n\tn.insert(strings.Split(filter, \".\"), template)\n}\n\nfunc (n *node) search(lineParts []string) *template {\n\t\/\/ Nothing to search\n\tif len(lineParts) == 0 || len(n.children) == 0 {\n\t\treturn n.template\n\t}\n\n\t\/\/ If last element is a wildcard, don't include in this search since it's sorted\n\t\/\/ to the end but lexicographically it would not always be and sort.Search assumes\n\t\/\/ the slice is sorted.\n\tlength := len(n.children)\n\tif n.children[length-1].value == \"*\" {\n\t\tlength -= 1\n\t}\n\n\t\/\/ Find the index of child with an exact match\n\ti := sort.Search(length, func(i int) bool {\n\t\treturn n.children[i].value >= lineParts[0]\n\t})\n\n\t\/\/ Found an exact match, so search that child sub-tree\n\tif i < len(n.children) && n.children[i].value == lineParts[0] {\n\t\treturn n.children[i].search(lineParts[1:])\n\t}\n\t\/\/ Not an exact match, see if we have a wildcard child to search\n\tif n.children[len(n.children)-1].value == \"*\" {\n\t\treturn n.children[len(n.children)-1].search(lineParts[1:])\n\t}\n\treturn n.template\n}\n\nfunc (n *node) Search(line string) *template {\n\treturn n.search(strings.Split(line, \".\"))\n}\n\ntype nodes []*node\n\n\/\/ Less returns a boolean indicating whether the filter at position j\n\/\/ is less than the filter at position k. Filters are order by string\n\/\/ comparison of each component parts. A wildcard value \"*\" is never\n\/\/ less than a non-wildcard value.\n\/\/\n\/\/ For example, the filters:\n\/\/ \"*.*\"\n\/\/ \"servers.*\"\n\/\/ \"servers.localhost\"\n\/\/ \"*.localhost\"\n\/\/\n\/\/ Would be sorted as:\n\/\/ \"servers.localhost\"\n\/\/ \"servers.*\"\n\/\/ \"*.localhost\"\n\/\/ \"*.*\"\nfunc (n *nodes) Less(j, k int) bool {\n\tif (*n)[j].value == \"*\" && (*n)[k].value != \"*\" {\n\t\treturn false\n\t}\n\n\tif (*n)[j].value != \"*\" && (*n)[k].value == \"*\" {\n\t\treturn true\n\t}\n\n\treturn (*n)[j].value < (*n)[k].value\n}\n\nfunc (n *nodes) Swap(i, j int) { (*n)[i], (*n)[j] = (*n)[j], (*n)[i] }\nfunc (n *nodes) Len() int { return len(*n) }\n<|endoftext|>"} {"text":"<commit_before>package sessions\n\nimport (\n\t\"github.com\/codegangsta\/martini\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n)\n\nfunc BenchmarkNoSessionsMiddleware(b *testing.B) {\n\tm := testMartini()\n\tm.Get(\"\/foo\", func() string {\n\t\treturn \"Foo\"\n\t})\n\n\trecorder := httptest.NewRecorder()\n\tr, _ := http.NewRequest(\"GET\", \"\/foo\", nil)\n\n\tb.ResetTimer()\n\tfor n := 0; n < b.N; n++ {\n\t\tm.ServeHTTP(recorder, r)\n\t}\n}\n\nfunc BenchmarkSessionsNoWrites(b *testing.B) {\n\tm := testMartini()\n\tstore := NewCookieStore([]byte(\"secret123\"))\n\tm.Use(Sessions(\"my_session\", store))\n\tm.Get(\"\/foo\", func() string {\n\t\treturn \"Foo\"\n\t})\n\n\trecorder := httptest.NewRecorder()\n\tr, _ := http.NewRequest(\"GET\", \"\/foo\", nil)\n\n\tb.ResetTimer()\n\tfor n := 0; n < b.N; n++ {\n\t\tm.ServeHTTP(recorder, r)\n\t}\n}\n\nfunc BenchmarkSessionsWithWrite(b *testing.B) {\n\tm := testMartini()\n\tstore := NewCookieStore([]byte(\"secret123\"))\n\tm.Use(Sessions(\"my_session\", store))\n\tm.Get(\"\/foo\", func(s Session) string {\n\t\ts.Set(\"foo\", \"bar\")\n\t\treturn \"Foo\"\n\t})\n\n\trecorder := httptest.NewRecorder()\n\tr, _ := http.NewRequest(\"GET\", \"\/foo\", nil)\n\n\tb.ResetTimer()\n\tfor n := 0; n < b.N; n++ {\n\t\tm.ServeHTTP(recorder, r)\n\t}\n}\n\nfunc BenchmarkSessionsWithRead(b *testing.B) {\n\tm := testMartini()\n\tstore := NewCookieStore([]byte(\"secret123\"))\n\tm.Use(Sessions(\"my_session\", store))\n\tm.Get(\"\/foo\", func(s Session) string {\n\t\ts.Get(\"foo\")\n\t\treturn \"Foo\"\n\t})\n\n\trecorder := httptest.NewRecorder()\n\tr, _ := http.NewRequest(\"GET\", \"\/foo\", nil)\n\n\tb.ResetTimer()\n\tfor n := 0; n < b.N; n++ {\n\t\tm.ServeHTTP(recorder, r)\n\t}\n}\n\nfunc testMartini() *martini.ClassicMartini {\n\tm := martini.Classic()\n\tm.Handlers()\n\tm.Map(log.New(ioutil.Discard, \"\", 0))\n\treturn m\n}\n<commit_msg>JMS #38: Cleaned up benchmarks<commit_after>package sessions\n\nimport (\n\t\"github.com\/codegangsta\/martini\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n)\n\nfunc BenchmarkNoSessionsMiddleware(b *testing.B) {\n\tm := testMartini()\n\tm.Get(\"\/foo\", func() string {\n\t\treturn \"Foo\"\n\t})\n\n\trecorder := httptest.NewRecorder()\n\tr, _ := http.NewRequest(\"GET\", \"\/foo\", nil)\n\n\tb.ResetTimer()\n\tfor n := 0; n < b.N; n++ {\n\t\tm.ServeHTTP(recorder, r)\n\t}\n}\n\nfunc BenchmarkSessionsNoWrites(b *testing.B) {\n\tm := testMartini()\n\tstore := NewCookieStore([]byte(\"secret123\"))\n\tm.Use(Sessions(\"my_session\", store))\n\tm.Get(\"\/foo\", func() string {\n\t\treturn \"Foo\"\n\t})\n\n\trecorder := httptest.NewRecorder()\n\tr, _ := http.NewRequest(\"GET\", \"\/foo\", nil)\n\n\tb.ResetTimer()\n\tfor n := 0; n < b.N; n++ {\n\t\tm.ServeHTTP(recorder, r)\n\t}\n}\n\nfunc BenchmarkSessionsWithWrite(b *testing.B) {\n\tm := testMartini()\n\tstore := NewCookieStore([]byte(\"secret123\"))\n\tm.Use(Sessions(\"my_session\", store))\n\tm.Get(\"\/foo\", func(s Session) string {\n\t\ts.Set(\"foo\", \"bar\")\n\t\treturn \"Foo\"\n\t})\n\n\trecorder := httptest.NewRecorder()\n\tr, _ := http.NewRequest(\"GET\", \"\/foo\", nil)\n\n\tb.ResetTimer()\n\tfor n := 0; n < b.N; n++ {\n\t\tm.ServeHTTP(recorder, r)\n\t}\n}\n\nfunc BenchmarkSessionsWithRead(b *testing.B) {\n\tm := testMartini()\n\tstore := NewCookieStore([]byte(\"secret123\"))\n\tm.Use(Sessions(\"my_session\", store))\n\tm.Get(\"\/foo\", func(s Session) string {\n\t\ts.Get(\"foo\")\n\t\treturn \"Foo\"\n\t})\n\n\trecorder := httptest.NewRecorder()\n\tr, _ := http.NewRequest(\"GET\", \"\/foo\", nil)\n\n\tb.ResetTimer()\n\tfor n := 0; n < b.N; n++ {\n\t\tm.ServeHTTP(recorder, r)\n\t}\n}\n\nfunc testMartini() *martini.ClassicMartini {\n\tm := martini.Classic()\n\tm.Handlers()\n\treturn m\n}\n<|endoftext|>"} {"text":"<commit_before>package authentication\n\nimport (\n\t\"crypto\/rsa\"\n\t\"crypto\/sha1\"\n\t\"crypto\/x509\"\n\t\"encoding\/pem\"\n\t\"fmt\"\n\t\"github.com\/ONSdigital\/go-launch-a-survey\/settings\"\n\t\"github.com\/ONSdigital\/go-launch-a-survey\/surveys\"\n\t\"github.com\/satori\/go.uuid\"\n\t\"gopkg.in\/square\/go-jose.v2\"\n\t\"gopkg.in\/square\/go-jose.v2\/jwt\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"time\"\n\t\"net\/http\"\n\t\"gopkg.in\/square\/go-jose.v2\/json\"\n\n\t\"math\/rand\"\n\t\"encoding\/base64\"\n\t\"log\"\n\t\"bytes\"\n\t\"strings\"\n\t\"path\"\n)\n\n\/\/ KeyLoadError describes an error that can occur during key loading\ntype KeyLoadError struct {\n\t\/\/ Op is the operation which caused the error, such as\n\t\/\/ \"read\", \"parse\" or \"cast\".\n\tOp string\n\n\t\/\/ Err is a description of the error that occurred during the operation.\n\tErr string\n}\n\nfunc (e *KeyLoadError) Error() string {\n\tif e == nil {\n\t\treturn \"<nil>\"\n\t}\n\treturn e.Op + \": \" + e.Err\n}\n\n\/\/ PublicKeyResult is a wrapper for the public key and the kid that identifies it\ntype PublicKeyResult struct {\n\tkey *rsa.PublicKey\n\tkid string\n}\n\n\/\/ PrivateKeyResult is a wrapper for the private key and the kid that identifies it\ntype PrivateKeyResult struct {\n\tkey *rsa.PrivateKey\n\tkid string\n}\n\nfunc loadEncryptionKey() (*PublicKeyResult, *KeyLoadError) {\n\tencryptionKeyPath := settings.Get(\"JWT_ENCRYPTION_KEY_PATH\")\n\n\tkeyData, err := ioutil.ReadFile(encryptionKeyPath)\n\tif err != nil {\n\t\treturn nil, &KeyLoadError{Op: \"read\", Err: \"Failed to read encryption key from file: \" + encryptionKeyPath}\n\t}\n\n\tblock, _ := pem.Decode(keyData)\n\tpub, err := x509.ParsePKIXPublicKey(block.Bytes)\n\tif err != nil {\n\t\treturn nil, &KeyLoadError{Op: \"parse\", Err: \"Failed to parse encryption key PEM\"}\n\t}\n\n\tkid := fmt.Sprintf(\"%x\", sha1.Sum(keyData))\n\n\tpublicKey, ok := pub.(*rsa.PublicKey)\n\tif !ok {\n\t\treturn nil, &KeyLoadError{Op: \"cast\", Err: \"Failed to cast key to rsa.PublicKey\"}\n\t}\n\n\treturn &PublicKeyResult{publicKey, kid}, nil\n}\n\nfunc loadSigningKey() (*PrivateKeyResult, *KeyLoadError) {\n\tsigningKeyPath := settings.Get(\"JWT_SIGNING_KEY_PATH\")\n\tkeyData, err := ioutil.ReadFile(signingKeyPath)\n\tif err != nil {\n\t\treturn nil, &KeyLoadError{Op: \"read\", Err: \"Failed to read signing key from file: \" + signingKeyPath}\n\t}\n\n\tblock, _ := pem.Decode(keyData)\n\tprivateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes)\n\tif err != nil {\n\t\treturn nil, &KeyLoadError{Op: \"parse\", Err: \"Failed to parse signing key from PEM\"}\n\t}\n\n\tPublicKey, err := x509.MarshalPKIXPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn nil, &KeyLoadError{Op: \"marshal\", Err: \"Failed to marshal public key\"}\n\t}\n\n\tpubBytes := pem.EncodeToMemory(&pem.Block{\n\t\tType: \"PUBLIC KEY\",\n\t\tBytes: PublicKey,\n\t})\n\tkid := fmt.Sprintf(\"%x\", sha1.Sum(pubBytes))\n\n\treturn &PrivateKeyResult{privateKey, kid}, nil\n}\n\n\/\/ EqClaims is a representation of the set of values needed when generating a valid token\ntype EqClaims struct {\n\tjwt.Claims\n\tUserID string `json:\"user_id\"`\n\tEqID string `json:\"eq_id\"`\n\tPeriodID string `json:\"period_id\"`\n\tPeriodStr string `json:\"period_str\"`\n\tCollectionExerciseSid string `json:\"collection_exercise_sid\"`\n\tRuRef string `json:\"ru_ref\"`\n\tRuName string `json:\"ru_name\"`\n\tRefPStartDate string `json:\"ref_p_start_date\"` \/\/ iso_8601_date\n\tRefPEndDate string `json:\"ref_p_end_date,omitempty\"` \/\/ iso_8601_date\n\tFormType string `json:\"form_type\"`\n\tSurveyURL string `json:\"survey_url,omitempty\"`\n\tReturnBy string `json:\"return_by\"`\n\tTradAs string `json:\"trad_as,omitempty\"`\n\tEmploymentDate string `json:\"employment_date,omitempty\"` \/\/ iso_8601_date\n\tRegionCode string `json:\"region_code,omitempty\"`\n\tLanguageCode string `json:\"language_code,omitempty\"`\n\tVariantFlags variantFlags `json:\"variant_flags,omitempty\"`\n\tTxID string `json:\"tx_id,omitempty\"`\n\tRoles []string `json:\"roles,omitempty\"`\n\tCaseID string `json:\"case_id,omitempty\"`\n\tCaseRef string `json:\"case_ref,omitempty\"`\n\tAccountServiceURL string `json:\"account_service_url,omitempty\"`\n}\n\ntype variantFlags struct {\n\tSexualIdentity bool `json:\"sexual_identity,omitempty\"`\n}\n\n\/\/ QuestionnaireSchema is a minimal representation of a questionnaire schema used for extracting the eq_id and form_type\ntype QuestionnaireSchema struct {\n\tEqID string `json:\"eq_id\"`\n\tFormType string `json:\"form_type\"`\n}\n\n\/\/ Generates a random string of a defined length\nfunc randomStringGen() (rs string) {\n\tsize := 6 \/\/change the length of the generated random string\n\trb := make([]byte, size)\n\t_, err := rand.Read(rb)\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\trandomString := base64.URLEncoding.EncodeToString(rb)\n\n\treturn randomString\n}\n\nfunc generateDefaultClaims(accountURL string) (claims EqClaims) {\n\tdefaultClaims := EqClaims{\n\t\tUserID: \"UNKNOWN\",\n\t\tPeriodID: \"201605\",\n\t\tPeriodStr: \"May 2017\",\n\t\tCollectionExerciseSid: randomStringGen(),\n\t\tRuRef: \"12346789012A\",\n\t\tRuName: \"ESSENTIAL ENTERPRISE LTD.\",\n\t\tRefPStartDate: \"2016-05-01\",\n\t\tRefPEndDate: \"2016-05-31\",\n\t\tReturnBy: \"2016-06-12\",\n\t\tTradAs: \"ESSENTIAL ENTERPRISE LTD.\",\n\t\tEmploymentDate: \"2016-06-10\",\n\t\tRegionCode: \"GB-ENG\",\n\t\tLanguageCode: \"en\",\n\t\tTxID: uuid.NewV4().String(),\n\t\tCaseID: uuid.NewV4().String(),\n\t\tCaseRef: \"1000000000000001\",\n\t\tAccountServiceURL: accountURL,\n\t\tVariantFlags: variantFlags{\n\t\t\tSexualIdentity: true,\n\t\t},\n\t\tRoles: []string{\"dumper\"},\n\t}\n\treturn defaultClaims\n}\n\nfunc generateClaimsFromPost(postValues url.Values) (claims EqClaims) {\n\tpostClaims := EqClaims{\n\t\tUserID: postValues.Get(\"user_id\"),\n\t\tPeriodID: postValues.Get(\"period_id\"),\n\t\tPeriodStr: postValues.Get(\"period_str\"),\n\t\tCollectionExerciseSid: postValues.Get(\"collection_exercise_sid\"),\n\t\tRuRef: postValues.Get(\"ru_ref\"),\n\t\tRuName: postValues.Get(\"ru_name\"),\n\t\tRefPStartDate: postValues.Get(\"ref_p_start_date\"),\n\t\tRefPEndDate: postValues.Get(\"ref_p_end_date\"),\n\t\tReturnBy: postValues.Get(\"return_by\"),\n\t\tTradAs: postValues.Get(\"trad_as\"),\n\t\tEmploymentDate: postValues.Get(\"employment_date\"),\n\t\tRegionCode: postValues.Get(\"region_code\"),\n\t\tLanguageCode: postValues.Get(\"language_code\"),\n\t\tTxID: uuid.NewV4().String(),\n\t\tVariantFlags: variantFlags{\n\t\t\tSexualIdentity: postValues.Get(\"sexual_identity\") == \"true\",\n\t\t},\n\t\tRoles: []string{postValues.Get(\"roles\")},\n\t\tCaseID: uuid.NewV4().String(),\n\t\tCaseRef: postValues.Get(\"case_ref\"),\n\t\tAccountServiceURL: postValues.Get(\"account_url\"),\n\t}\n\n\treturn postClaims\n}\n\n\/\/ GenerateJwtClaims creates a jwtClaim needed to generate a token\nfunc GenerateJwtClaims() (jwtClaims jwt.Claims) {\n\tissued := time.Now()\n\texpires := issued.Add(time.Minute * 10) \/\/ TODO: Support custom exp: r.PostForm.Get(\"exp\")\n\n\tjwtClaims = jwt.Claims{\n\t\tIssuedAt: jwt.NewNumericDate(issued),\n\t\tExpiry: jwt.NewNumericDate(expires),\n\t\tID: uuid.NewV4().String(),\n\t}\n\n\treturn jwtClaims\n}\n\nfunc launcherSchemaFromURL(url string) (launcherSchema surveys.LauncherSchema, error string) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn launcherSchema, fmt.Sprintf(\"Failed to load Schema from %s\", url)\n\t}\n\n\tresponseBody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvalidationError := validateSchema(responseBody)\n\tif validationError != \"\" {\n\t\treturn launcherSchema, validationError\n\t}\n\n\tvar schema QuestionnaireSchema\n\tif err := json.Unmarshal(responseBody, &schema); err != nil {\n\t\tpanic(err)\n\t}\n\n\tcacheBust := \"\"\n\tif !strings.Contains(url, \"?\") {\n\t\tcacheBust = \"?bust=\" + time.Now().Format(\"20060102150405\")\n\t}\n\n\tlauncherSchema = surveys.LauncherSchema{\n\t\tEqID: schema.EqID,\n\t\tFormType: schema.FormType,\n\t\tURL: url + cacheBust,\n\t}\n\n\treturn launcherSchema, \"\"\n}\n\nfunc validateSchema(payload []byte) (error string) {\n\tif settings.Get(\"SCHEMA_VALIDATOR_URL\") == \"\" {\n\t\tvalidateURL := path.Join(settings.Get(\"SCHEMA_VALIDATOR_URL\"), \"validate\")\n\t\tresp, err := http.Post(validateURL, \"application\/json\", bytes.NewBuffer(payload))\n\t\tif err != nil {\n\t\t\treturn err.Error()\n\t\t}\n\n\t\tresponseBody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn err.Error()\n\t\t}\n\n\t\tif resp.StatusCode != 200 {\n\t\t\treturn string(responseBody)\n\t\t}\n\t}\n\n\treturn \"\"\n}\n\nfunc addSchemaToClaims(claims *EqClaims, LauncherSchema surveys.LauncherSchema) () {\n\tclaims.EqID = LauncherSchema.EqID\n\tclaims.FormType = LauncherSchema.FormType\n\tclaims.SurveyURL = LauncherSchema.URL\n}\n\n\/\/ TokenError describes an error that can occur during JWT generation\ntype TokenError struct {\n\t\/\/ Err is a description of the error that occurred.\n\tDesc string\n\n\t\/\/ From is optionally the original error from which this one was caused.\n\tFrom error\n}\n\nfunc (e *TokenError) Error() string {\n\tif e == nil {\n\t\treturn \"<nil>\"\n\t}\n\terr := e.Desc\n\tif e.From != nil {\n\t\terr += \" (\" + e.From.Error() + \")\"\n\t}\n\treturn err\n}\n\n\/\/ generateTokenFromClaims creates a token though encryption using the private and public keys\nfunc generateTokenFromClaims(cl EqClaims) (string, *TokenError) {\n\tprivateKeyResult, keyErr := loadSigningKey()\n\tif keyErr != nil {\n\t\treturn \"\", &TokenError{Desc: \"Error loading signing key\", From: keyErr}\n\t}\n\n\tpublicKeyResult, keyErr := loadEncryptionKey()\n\tif keyErr != nil {\n\t\treturn \"\", &TokenError{Desc: \"Error loading encryption key\", From: keyErr}\n\t}\n\n\topts := jose.SignerOptions{}\n\topts.WithType(\"JWT\")\n\topts.WithHeader(\"kid\", privateKeyResult.kid)\n\n\tsigner, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.RS256, Key: privateKeyResult.key}, &opts)\n\tif err != nil {\n\t\treturn \"\", &TokenError{Desc: \"Error creating JWT signer\", From: err}\n\t}\n\n\tencryptor, err := jose.NewEncrypter(\n\t\tjose.A256GCM,\n\t\tjose.Recipient{Algorithm: jose.RSA_OAEP, Key: publicKeyResult.key, KeyID: publicKeyResult.kid},\n\t\t(&jose.EncrypterOptions{}).WithType(\"JWT\").WithContentType(\"JWT\"))\n\n\tif err != nil {\n\t\treturn \"\", &TokenError{Desc: \"Error creating JWT signer\", From: err}\n\t}\n\n\ttoken, err := jwt.SignedAndEncrypted(signer, encryptor).Claims(cl).CompactSerialize()\n\n\tif err != nil {\n\t\treturn \"\", &TokenError{Desc: \"Error signing and encrypting JWT\", From: err}\n\t}\n\n\tlog.Println(\"Created signed\/encrypted JWT:\", token)\n\n\treturn token, nil\n}\n\n\/\/ GenerateTokenFromDefaults coverts a set of DEFAULT values into a JWT\nfunc GenerateTokenFromDefaults(url string, accountURL string) (token string, error string) {\n\tclaims := EqClaims{}\n\tclaims = generateDefaultClaims(accountURL)\n\n\tjwtClaims := GenerateJwtClaims()\n\tclaims.Claims = jwtClaims\n\n\tlauncherSchema, validationError := launcherSchemaFromURL(url)\n\tif validationError != \"\" {\n\t\treturn \"\", validationError\n\t}\n\taddSchemaToClaims(&claims, launcherSchema)\n\n\ttoken, tokenError := generateTokenFromClaims(claims)\n\tif tokenError != nil {\n\t\treturn token, fmt.Sprintf(\"GenerateTokenFromDefaults failed err: %v\", tokenError)\n\t}\n\n\treturn token, \"\"\n}\n\n\/\/ GenerateTokenFromPost coverts a set of POST values into a JWT\nfunc GenerateTokenFromPost(postValues url.Values) (string, string) {\n\tlog.Println(\"POST received: \", postValues)\n\n\tclaims := EqClaims{}\n\tclaims = generateClaimsFromPost(postValues)\n\n\tjwtClaims := GenerateJwtClaims()\n\tclaims.Claims = jwtClaims\n\n\tschema := postValues.Get(\"schema\")\n\tlauncherSchema := surveys.FindSurveyByName(schema)\n\taddSchemaToClaims(&claims, launcherSchema)\n\n\ttoken, error := generateTokenFromClaims(claims)\n\treturn token, fmt.Sprintf(\"GenerateTokenFromPost failed err: %v\", error)\n}\n<commit_msg>Pass empty error string from GenerateTokenFromPost if no error<commit_after>package authentication\n\nimport (\n\t\"crypto\/rsa\"\n\t\"crypto\/sha1\"\n\t\"crypto\/x509\"\n\t\"encoding\/pem\"\n\t\"fmt\"\n\t\"github.com\/ONSdigital\/go-launch-a-survey\/settings\"\n\t\"github.com\/ONSdigital\/go-launch-a-survey\/surveys\"\n\t\"github.com\/satori\/go.uuid\"\n\t\"gopkg.in\/square\/go-jose.v2\"\n\t\"gopkg.in\/square\/go-jose.v2\/jwt\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"time\"\n\t\"net\/http\"\n\t\"gopkg.in\/square\/go-jose.v2\/json\"\n\n\t\"math\/rand\"\n\t\"encoding\/base64\"\n\t\"log\"\n\t\"bytes\"\n\t\"strings\"\n\t\"path\"\n)\n\n\/\/ KeyLoadError describes an error that can occur during key loading\ntype KeyLoadError struct {\n\t\/\/ Op is the operation which caused the error, such as\n\t\/\/ \"read\", \"parse\" or \"cast\".\n\tOp string\n\n\t\/\/ Err is a description of the error that occurred during the operation.\n\tErr string\n}\n\nfunc (e *KeyLoadError) Error() string {\n\tif e == nil {\n\t\treturn \"<nil>\"\n\t}\n\treturn e.Op + \": \" + e.Err\n}\n\n\/\/ PublicKeyResult is a wrapper for the public key and the kid that identifies it\ntype PublicKeyResult struct {\n\tkey *rsa.PublicKey\n\tkid string\n}\n\n\/\/ PrivateKeyResult is a wrapper for the private key and the kid that identifies it\ntype PrivateKeyResult struct {\n\tkey *rsa.PrivateKey\n\tkid string\n}\n\nfunc loadEncryptionKey() (*PublicKeyResult, *KeyLoadError) {\n\tencryptionKeyPath := settings.Get(\"JWT_ENCRYPTION_KEY_PATH\")\n\n\tkeyData, err := ioutil.ReadFile(encryptionKeyPath)\n\tif err != nil {\n\t\treturn nil, &KeyLoadError{Op: \"read\", Err: \"Failed to read encryption key from file: \" + encryptionKeyPath}\n\t}\n\n\tblock, _ := pem.Decode(keyData)\n\tpub, err := x509.ParsePKIXPublicKey(block.Bytes)\n\tif err != nil {\n\t\treturn nil, &KeyLoadError{Op: \"parse\", Err: \"Failed to parse encryption key PEM\"}\n\t}\n\n\tkid := fmt.Sprintf(\"%x\", sha1.Sum(keyData))\n\n\tpublicKey, ok := pub.(*rsa.PublicKey)\n\tif !ok {\n\t\treturn nil, &KeyLoadError{Op: \"cast\", Err: \"Failed to cast key to rsa.PublicKey\"}\n\t}\n\n\treturn &PublicKeyResult{publicKey, kid}, nil\n}\n\nfunc loadSigningKey() (*PrivateKeyResult, *KeyLoadError) {\n\tsigningKeyPath := settings.Get(\"JWT_SIGNING_KEY_PATH\")\n\tkeyData, err := ioutil.ReadFile(signingKeyPath)\n\tif err != nil {\n\t\treturn nil, &KeyLoadError{Op: \"read\", Err: \"Failed to read signing key from file: \" + signingKeyPath}\n\t}\n\n\tblock, _ := pem.Decode(keyData)\n\tprivateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes)\n\tif err != nil {\n\t\treturn nil, &KeyLoadError{Op: \"parse\", Err: \"Failed to parse signing key from PEM\"}\n\t}\n\n\tPublicKey, err := x509.MarshalPKIXPublicKey(&privateKey.PublicKey)\n\tif err != nil {\n\t\treturn nil, &KeyLoadError{Op: \"marshal\", Err: \"Failed to marshal public key\"}\n\t}\n\n\tpubBytes := pem.EncodeToMemory(&pem.Block{\n\t\tType: \"PUBLIC KEY\",\n\t\tBytes: PublicKey,\n\t})\n\tkid := fmt.Sprintf(\"%x\", sha1.Sum(pubBytes))\n\n\treturn &PrivateKeyResult{privateKey, kid}, nil\n}\n\n\/\/ EqClaims is a representation of the set of values needed when generating a valid token\ntype EqClaims struct {\n\tjwt.Claims\n\tUserID string `json:\"user_id\"`\n\tEqID string `json:\"eq_id\"`\n\tPeriodID string `json:\"period_id\"`\n\tPeriodStr string `json:\"period_str\"`\n\tCollectionExerciseSid string `json:\"collection_exercise_sid\"`\n\tRuRef string `json:\"ru_ref\"`\n\tRuName string `json:\"ru_name\"`\n\tRefPStartDate string `json:\"ref_p_start_date\"` \/\/ iso_8601_date\n\tRefPEndDate string `json:\"ref_p_end_date,omitempty\"` \/\/ iso_8601_date\n\tFormType string `json:\"form_type\"`\n\tSurveyURL string `json:\"survey_url,omitempty\"`\n\tReturnBy string `json:\"return_by\"`\n\tTradAs string `json:\"trad_as,omitempty\"`\n\tEmploymentDate string `json:\"employment_date,omitempty\"` \/\/ iso_8601_date\n\tRegionCode string `json:\"region_code,omitempty\"`\n\tLanguageCode string `json:\"language_code,omitempty\"`\n\tVariantFlags variantFlags `json:\"variant_flags,omitempty\"`\n\tTxID string `json:\"tx_id,omitempty\"`\n\tRoles []string `json:\"roles,omitempty\"`\n\tCaseID string `json:\"case_id,omitempty\"`\n\tCaseRef string `json:\"case_ref,omitempty\"`\n\tAccountServiceURL string `json:\"account_service_url,omitempty\"`\n}\n\ntype variantFlags struct {\n\tSexualIdentity bool `json:\"sexual_identity,omitempty\"`\n}\n\n\/\/ QuestionnaireSchema is a minimal representation of a questionnaire schema used for extracting the eq_id and form_type\ntype QuestionnaireSchema struct {\n\tEqID string `json:\"eq_id\"`\n\tFormType string `json:\"form_type\"`\n}\n\n\/\/ Generates a random string of a defined length\nfunc randomStringGen() (rs string) {\n\tsize := 6 \/\/change the length of the generated random string\n\trb := make([]byte, size)\n\t_, err := rand.Read(rb)\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\trandomString := base64.URLEncoding.EncodeToString(rb)\n\n\treturn randomString\n}\n\nfunc generateDefaultClaims(accountURL string) (claims EqClaims) {\n\tdefaultClaims := EqClaims{\n\t\tUserID: \"UNKNOWN\",\n\t\tPeriodID: \"201605\",\n\t\tPeriodStr: \"May 2017\",\n\t\tCollectionExerciseSid: randomStringGen(),\n\t\tRuRef: \"12346789012A\",\n\t\tRuName: \"ESSENTIAL ENTERPRISE LTD.\",\n\t\tRefPStartDate: \"2016-05-01\",\n\t\tRefPEndDate: \"2016-05-31\",\n\t\tReturnBy: \"2016-06-12\",\n\t\tTradAs: \"ESSENTIAL ENTERPRISE LTD.\",\n\t\tEmploymentDate: \"2016-06-10\",\n\t\tRegionCode: \"GB-ENG\",\n\t\tLanguageCode: \"en\",\n\t\tTxID: uuid.NewV4().String(),\n\t\tCaseID: uuid.NewV4().String(),\n\t\tCaseRef: \"1000000000000001\",\n\t\tAccountServiceURL: accountURL,\n\t\tVariantFlags: variantFlags{\n\t\t\tSexualIdentity: true,\n\t\t},\n\t\tRoles: []string{\"dumper\"},\n\t}\n\treturn defaultClaims\n}\n\nfunc generateClaimsFromPost(postValues url.Values) (claims EqClaims) {\n\tpostClaims := EqClaims{\n\t\tUserID: postValues.Get(\"user_id\"),\n\t\tPeriodID: postValues.Get(\"period_id\"),\n\t\tPeriodStr: postValues.Get(\"period_str\"),\n\t\tCollectionExerciseSid: postValues.Get(\"collection_exercise_sid\"),\n\t\tRuRef: postValues.Get(\"ru_ref\"),\n\t\tRuName: postValues.Get(\"ru_name\"),\n\t\tRefPStartDate: postValues.Get(\"ref_p_start_date\"),\n\t\tRefPEndDate: postValues.Get(\"ref_p_end_date\"),\n\t\tReturnBy: postValues.Get(\"return_by\"),\n\t\tTradAs: postValues.Get(\"trad_as\"),\n\t\tEmploymentDate: postValues.Get(\"employment_date\"),\n\t\tRegionCode: postValues.Get(\"region_code\"),\n\t\tLanguageCode: postValues.Get(\"language_code\"),\n\t\tTxID: uuid.NewV4().String(),\n\t\tVariantFlags: variantFlags{\n\t\t\tSexualIdentity: postValues.Get(\"sexual_identity\") == \"true\",\n\t\t},\n\t\tRoles: []string{postValues.Get(\"roles\")},\n\t\tCaseID: uuid.NewV4().String(),\n\t\tCaseRef: postValues.Get(\"case_ref\"),\n\t\tAccountServiceURL: postValues.Get(\"account_url\"),\n\t}\n\n\treturn postClaims\n}\n\n\/\/ GenerateJwtClaims creates a jwtClaim needed to generate a token\nfunc GenerateJwtClaims() (jwtClaims jwt.Claims) {\n\tissued := time.Now()\n\texpires := issued.Add(time.Minute * 10) \/\/ TODO: Support custom exp: r.PostForm.Get(\"exp\")\n\n\tjwtClaims = jwt.Claims{\n\t\tIssuedAt: jwt.NewNumericDate(issued),\n\t\tExpiry: jwt.NewNumericDate(expires),\n\t\tID: uuid.NewV4().String(),\n\t}\n\n\treturn jwtClaims\n}\n\nfunc launcherSchemaFromURL(url string) (launcherSchema surveys.LauncherSchema, error string) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif resp.StatusCode != 200 {\n\t\treturn launcherSchema, fmt.Sprintf(\"Failed to load Schema from %s\", url)\n\t}\n\n\tresponseBody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvalidationError := validateSchema(responseBody)\n\tif validationError != \"\" {\n\t\treturn launcherSchema, validationError\n\t}\n\n\tvar schema QuestionnaireSchema\n\tif err := json.Unmarshal(responseBody, &schema); err != nil {\n\t\tpanic(err)\n\t}\n\n\tcacheBust := \"\"\n\tif !strings.Contains(url, \"?\") {\n\t\tcacheBust = \"?bust=\" + time.Now().Format(\"20060102150405\")\n\t}\n\n\tlauncherSchema = surveys.LauncherSchema{\n\t\tEqID: schema.EqID,\n\t\tFormType: schema.FormType,\n\t\tURL: url + cacheBust,\n\t}\n\n\treturn launcherSchema, \"\"\n}\n\nfunc validateSchema(payload []byte) (error string) {\n\tif settings.Get(\"SCHEMA_VALIDATOR_URL\") == \"\" {\n\t\tvalidateURL := path.Join(settings.Get(\"SCHEMA_VALIDATOR_URL\"), \"validate\")\n\t\tresp, err := http.Post(validateURL, \"application\/json\", bytes.NewBuffer(payload))\n\t\tif err != nil {\n\t\t\treturn err.Error()\n\t\t}\n\n\t\tresponseBody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn err.Error()\n\t\t}\n\n\t\tif resp.StatusCode != 200 {\n\t\t\treturn string(responseBody)\n\t\t}\n\t}\n\n\treturn \"\"\n}\n\nfunc addSchemaToClaims(claims *EqClaims, LauncherSchema surveys.LauncherSchema) () {\n\tclaims.EqID = LauncherSchema.EqID\n\tclaims.FormType = LauncherSchema.FormType\n\tclaims.SurveyURL = LauncherSchema.URL\n}\n\n\/\/ TokenError describes an error that can occur during JWT generation\ntype TokenError struct {\n\t\/\/ Err is a description of the error that occurred.\n\tDesc string\n\n\t\/\/ From is optionally the original error from which this one was caused.\n\tFrom error\n}\n\nfunc (e *TokenError) Error() string {\n\tif e == nil {\n\t\treturn \"<nil>\"\n\t}\n\terr := e.Desc\n\tif e.From != nil {\n\t\terr += \" (\" + e.From.Error() + \")\"\n\t}\n\treturn err\n}\n\n\/\/ generateTokenFromClaims creates a token though encryption using the private and public keys\nfunc generateTokenFromClaims(cl EqClaims) (string, *TokenError) {\n\tprivateKeyResult, keyErr := loadSigningKey()\n\tif keyErr != nil {\n\t\treturn \"\", &TokenError{Desc: \"Error loading signing key\", From: keyErr}\n\t}\n\n\tpublicKeyResult, keyErr := loadEncryptionKey()\n\tif keyErr != nil {\n\t\treturn \"\", &TokenError{Desc: \"Error loading encryption key\", From: keyErr}\n\t}\n\n\topts := jose.SignerOptions{}\n\topts.WithType(\"JWT\")\n\topts.WithHeader(\"kid\", privateKeyResult.kid)\n\n\tsigner, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.RS256, Key: privateKeyResult.key}, &opts)\n\tif err != nil {\n\t\treturn \"\", &TokenError{Desc: \"Error creating JWT signer\", From: err}\n\t}\n\n\tencryptor, err := jose.NewEncrypter(\n\t\tjose.A256GCM,\n\t\tjose.Recipient{Algorithm: jose.RSA_OAEP, Key: publicKeyResult.key, KeyID: publicKeyResult.kid},\n\t\t(&jose.EncrypterOptions{}).WithType(\"JWT\").WithContentType(\"JWT\"))\n\n\tif err != nil {\n\t\treturn \"\", &TokenError{Desc: \"Error creating JWT signer\", From: err}\n\t}\n\n\ttoken, err := jwt.SignedAndEncrypted(signer, encryptor).Claims(cl).CompactSerialize()\n\n\tif err != nil {\n\t\treturn \"\", &TokenError{Desc: \"Error signing and encrypting JWT\", From: err}\n\t}\n\n\tlog.Println(\"Created signed\/encrypted JWT:\", token)\n\n\treturn token, nil\n}\n\n\/\/ GenerateTokenFromDefaults coverts a set of DEFAULT values into a JWT\nfunc GenerateTokenFromDefaults(url string, accountURL string) (token string, error string) {\n\tclaims := EqClaims{}\n\tclaims = generateDefaultClaims(accountURL)\n\n\tjwtClaims := GenerateJwtClaims()\n\tclaims.Claims = jwtClaims\n\n\tlauncherSchema, validationError := launcherSchemaFromURL(url)\n\tif validationError != \"\" {\n\t\treturn \"\", validationError\n\t}\n\taddSchemaToClaims(&claims, launcherSchema)\n\n\ttoken, tokenError := generateTokenFromClaims(claims)\n\tif tokenError != nil {\n\t\treturn token, fmt.Sprintf(\"GenerateTokenFromDefaults failed err: %v\", tokenError)\n\t}\n\n\treturn token, \"\"\n}\n\n\/\/ GenerateTokenFromPost coverts a set of POST values into a JWT\nfunc GenerateTokenFromPost(postValues url.Values) (string, string) {\n\tlog.Println(\"POST received: \", postValues)\n\n\tclaims := EqClaims{}\n\tclaims = generateClaimsFromPost(postValues)\n\n\tjwtClaims := GenerateJwtClaims()\n\tclaims.Claims = jwtClaims\n\n\tschema := postValues.Get(\"schema\")\n\tlauncherSchema := surveys.FindSurveyByName(schema)\n\taddSchemaToClaims(&claims, launcherSchema)\n\n\ttoken, tokenError := generateTokenFromClaims(claims)\n\tif tokenError != nil {\n\t\treturn token, fmt.Sprintf(\"GenerateTokenFromPost failed err: %v\", tokenError)\n\t}\n\n\treturn token, \"\"\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>cc_proxy: Provide console path from hypervisor getPodConsole()<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>Some attempted splitting...<commit_after><|endoftext|>"} {"text":"<commit_before>package testdata\n\nfunc fmt () {\n\t\/\/ test\n\t\/\/ test line\n\t\/\/ test line\n\t\/\/ test line\n\t\/\/ test line\n\t\/\/ test line\n\t\/\/ test line\n\t\/\/ test line\n\nprintln(\n\t\t\"hello, gofmt test\" )\n\/\/comment\n}\n\n\ntype s struct { A int }\nfunc (s s) String() { return \"s\" }\n<commit_msg>Apply suggestions from code review<commit_after>package testdata\n\nfunc fmt () {\n\t\/\/ test\n\t\/\/ test line\n\t\/\/ test line\n\t\/\/ test line\n\t\/\/ test line\n\t\/\/ test line\n\t\/\/ test line\n\t\/\/ test line\n\nprintln(\n\t\t\"hello, gofmt test\" )\n\/\/comment\n}\n\ntype s struct{ A int }\n\nfunc (s s) String() { return \"s\" }\n<|endoftext|>"} {"text":"<commit_before>package log\n\nimport (\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/bahadley\/ssim\/system\"\n)\n\nvar (\n\tTrace *log.Logger\n\tInfo *log.Logger\n\tWarning *log.Logger\n\tError *log.Logger\n)\n\nfunc init() {\n\ttraceOut := ioutil.Discard\n\tif system.Trace() {\n\t\ttraceOut = os.Stdout\n\t}\n\n\tTrace = log.New(traceOut,\n\t\t\"TRACE: \",\n\t\tlog.Ldate|log.Lmicroseconds|log.Lshortfile)\n\n\tInfo = log.New(os.Stdout,\n\t\t\"INFO: \",\n\t\tlog.Ldate|log.Ltime)\n\n\tWarning = log.New(os.Stdout,\n\t\t\"WARNING: \",\n\t\tlog.Ldate|log.Ltime|log.Lshortfile)\n\n\tError = log.New(os.Stderr,\n\t\t\"ERROR: \",\n\t\tlog.Ldate|log.Ltime|log.Lshortfile)\n}\n<commit_msg>Switched INFO and WARNING messages to stderr<commit_after>package log\n\nimport (\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com\/bahadley\/ssim\/system\"\n)\n\nvar (\n\tTrace *log.Logger\n\tInfo *log.Logger\n\tWarning *log.Logger\n\tError *log.Logger\n)\n\nfunc init() {\n\ttraceOut := ioutil.Discard\n\tif system.Trace() {\n\t\ttraceOut = os.Stdout\n\t}\n\n\tTrace = log.New(traceOut,\n\t\t\"TRACE: \",\n\t\tlog.Ldate|log.Lmicroseconds|log.Lshortfile)\n\n\tInfo = log.New(os.Stderr,\n\t\t\"INFO: \",\n\t\tlog.Ldate|log.Ltime)\n\n\tWarning = log.New(os.Stderr,\n\t\t\"WARNING: \",\n\t\tlog.Ldate|log.Ltime|log.Lshortfile)\n\n\tError = log.New(os.Stderr,\n\t\t\"ERROR: \",\n\t\tlog.Ldate|log.Ltime|log.Lshortfile)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage log\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/url\"\n\t\"os\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\n\/\/ setSyslogFormatter is nil if the target architecture does not support syslog.\nvar setSyslogFormatter func(logger, string, string) error\n\n\/\/ setEventlogFormatter is nil if the target OS does not support Eventlog (i.e., is not Windows).\nvar setEventlogFormatter func(logger, string, bool) error\n\nfunc setJSONFormatter() {\n\torigLogger.Formatter = &logrus.JSONFormatter{}\n}\n\ntype loggerSettings struct {\n\tlevel string\n\tformat string\n}\n\nfunc (s *loggerSettings) apply(ctx *kingpin.ParseContext) error {\n\terr := baseLogger.SetLevel(s.level)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = baseLogger.SetFormat(s.format)\n\treturn err\n}\n\n\/\/ AddFlags adds the flags used by this package to the Kingpin application.\n\/\/ To use the default Kingpin application, call AddFlags(kingpin.CommandLine)\nfunc AddFlags(a *kingpin.Application) {\n\ts := loggerSettings{}\n\tkingpin.Flag(\"log.level\", \"Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]\").\n\t\tDefault(origLogger.Level.String()).\n\t\tStringVar(&s.level)\n\tdefaultFormat := url.URL{Scheme: \"logger\", Opaque: \"stderr\"}\n\tkingpin.Flag(\"log.format\", `Set the log target and format. Example: \"logger:syslog?appname=bob&local=7\" or \"logger:stdout?json=true\"`).\n\t\tDefault(defaultFormat.String()).\n\t\tStringVar(&s.format)\n\ta.Action(s.apply)\n}\n\n\/\/ Logger is the interface for loggers used in the Prometheus components.\ntype Logger interface {\n\tDebug(...interface{})\n\tDebugln(...interface{})\n\tDebugf(string, ...interface{})\n\n\tInfo(...interface{})\n\tInfoln(...interface{})\n\tInfof(string, ...interface{})\n\n\tWarn(...interface{})\n\tWarnln(...interface{})\n\tWarnf(string, ...interface{})\n\n\tError(...interface{})\n\tErrorln(...interface{})\n\tErrorf(string, ...interface{})\n\n\tFatal(...interface{})\n\tFatalln(...interface{})\n\tFatalf(string, ...interface{})\n\n\tWith(key string, value interface{}) Logger\n\n\tSetFormat(string) error\n\tSetLevel(string) error\n}\n\ntype logger struct {\n\tentry *logrus.Entry\n}\n\nfunc (l logger) With(key string, value interface{}) Logger {\n\treturn logger{l.entry.WithField(key, value)}\n}\n\n\/\/ Debug logs a message at level Debug on the standard logger.\nfunc (l logger) Debug(args ...interface{}) {\n\tl.sourced().Debug(args...)\n}\n\n\/\/ Debug logs a message at level Debug on the standard logger.\nfunc (l logger) Debugln(args ...interface{}) {\n\tl.sourced().Debugln(args...)\n}\n\n\/\/ Debugf logs a message at level Debug on the standard logger.\nfunc (l logger) Debugf(format string, args ...interface{}) {\n\tl.sourced().Debugf(format, args...)\n}\n\n\/\/ Info logs a message at level Info on the standard logger.\nfunc (l logger) Info(args ...interface{}) {\n\tl.sourced().Info(args...)\n}\n\n\/\/ Info logs a message at level Info on the standard logger.\nfunc (l logger) Infoln(args ...interface{}) {\n\tl.sourced().Infoln(args...)\n}\n\n\/\/ Infof logs a message at level Info on the standard logger.\nfunc (l logger) Infof(format string, args ...interface{}) {\n\tl.sourced().Infof(format, args...)\n}\n\n\/\/ Warn logs a message at level Warn on the standard logger.\nfunc (l logger) Warn(args ...interface{}) {\n\tl.sourced().Warn(args...)\n}\n\n\/\/ Warn logs a message at level Warn on the standard logger.\nfunc (l logger) Warnln(args ...interface{}) {\n\tl.sourced().Warnln(args...)\n}\n\n\/\/ Warnf logs a message at level Warn on the standard logger.\nfunc (l logger) Warnf(format string, args ...interface{}) {\n\tl.sourced().Warnf(format, args...)\n}\n\n\/\/ Error logs a message at level Error on the standard logger.\nfunc (l logger) Error(args ...interface{}) {\n\tl.sourced().Error(args...)\n}\n\n\/\/ Error logs a message at level Error on the standard logger.\nfunc (l logger) Errorln(args ...interface{}) {\n\tl.sourced().Errorln(args...)\n}\n\n\/\/ Errorf logs a message at level Error on the standard logger.\nfunc (l logger) Errorf(format string, args ...interface{}) {\n\tl.sourced().Errorf(format, args...)\n}\n\n\/\/ Fatal logs a message at level Fatal on the standard logger.\nfunc (l logger) Fatal(args ...interface{}) {\n\tl.sourced().Fatal(args...)\n}\n\n\/\/ Fatal logs a message at level Fatal on the standard logger.\nfunc (l logger) Fatalln(args ...interface{}) {\n\tl.sourced().Fatalln(args...)\n}\n\n\/\/ Fatalf logs a message at level Fatal on the standard logger.\nfunc (l logger) Fatalf(format string, args ...interface{}) {\n\tl.sourced().Fatalf(format, args...)\n}\n\nfunc (l logger) SetLevel(level string) error {\n\tlvl, err := logrus.ParseLevel(level)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tl.entry.Logger.Level = lvl\n\treturn nil\n}\n\nfunc (l logger) SetFormat(format string) error {\n\tu, err := url.Parse(format)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif u.Scheme != \"logger\" {\n\t\treturn fmt.Errorf(\"invalid scheme %s\", u.Scheme)\n\t}\n\tjsonq := u.Query().Get(\"json\")\n\tif jsonq == \"true\" {\n\t\tsetJSONFormatter()\n\t}\n\n\tswitch u.Opaque {\n\tcase \"syslog\":\n\t\tif setSyslogFormatter == nil {\n\t\t\treturn fmt.Errorf(\"system does not support syslog\")\n\t\t}\n\t\tappname := u.Query().Get(\"appname\")\n\t\tfacility := u.Query().Get(\"local\")\n\t\treturn setSyslogFormatter(l, appname, facility)\n\tcase \"eventlog\":\n\t\tif setEventlogFormatter == nil {\n\t\t\treturn fmt.Errorf(\"system does not support eventlog\")\n\t\t}\n\t\tname := u.Query().Get(\"name\")\n\t\tdebugAsInfo := false\n\t\tdebugAsInfoRaw := u.Query().Get(\"debugAsInfo\")\n\t\tif parsedDebugAsInfo, err := strconv.ParseBool(debugAsInfoRaw); err == nil {\n\t\t\tdebugAsInfo = parsedDebugAsInfo\n\t\t}\n\t\treturn setEventlogFormatter(l, name, debugAsInfo)\n\tcase \"stdout\":\n\t\tl.entry.Logger.Out = os.Stdout\n\tcase \"stderr\":\n\t\tl.entry.Logger.Out = os.Stderr\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported logger %q\", u.Opaque)\n\t}\n\treturn nil\n}\n\n\/\/ sourced adds a source field to the logger that contains\n\/\/ the file name and line where the logging happened.\nfunc (l logger) sourced() *logrus.Entry {\n\t_, file, line, ok := runtime.Caller(2)\n\tif !ok {\n\t\tfile = \"<???>\"\n\t\tline = 1\n\t} else {\n\t\tslash := strings.LastIndex(file, \"\/\")\n\t\tfile = file[slash+1:]\n\t}\n\treturn l.entry.WithField(\"source\", fmt.Sprintf(\"%s:%d\", file, line))\n}\n\nvar origLogger = logrus.New()\nvar baseLogger = logger{entry: logrus.NewEntry(origLogger)}\n\n\/\/ Base returns the default Logger logging to\nfunc Base() Logger {\n\treturn baseLogger\n}\n\n\/\/ NewLogger returns a new Logger logging to out.\nfunc NewLogger(w io.Writer) Logger {\n\tl := logrus.New()\n\tl.Out = w\n\treturn logger{entry: logrus.NewEntry(l)}\n}\n\n\/\/ NewNopLogger returns a logger that discards all log messages.\nfunc NewNopLogger() Logger {\n\tl := logrus.New()\n\tl.Out = ioutil.Discard\n\treturn logger{entry: logrus.NewEntry(l)}\n}\n\n\/\/ With adds a field to the logger.\nfunc With(key string, value interface{}) Logger {\n\treturn baseLogger.With(key, value)\n}\n\n\/\/ Debug logs a message at level Debug on the standard logger.\nfunc Debug(args ...interface{}) {\n\tbaseLogger.sourced().Debug(args...)\n}\n\n\/\/ Debugln logs a message at level Debug on the standard logger.\nfunc Debugln(args ...interface{}) {\n\tbaseLogger.sourced().Debugln(args...)\n}\n\n\/\/ Debugf logs a message at level Debug on the standard logger.\nfunc Debugf(format string, args ...interface{}) {\n\tbaseLogger.sourced().Debugf(format, args...)\n}\n\n\/\/ Info logs a message at level Info on the standard logger.\nfunc Info(args ...interface{}) {\n\tbaseLogger.sourced().Info(args...)\n}\n\n\/\/ Infoln logs a message at level Info on the standard logger.\nfunc Infoln(args ...interface{}) {\n\tbaseLogger.sourced().Infoln(args...)\n}\n\n\/\/ Infof logs a message at level Info on the standard logger.\nfunc Infof(format string, args ...interface{}) {\n\tbaseLogger.sourced().Infof(format, args...)\n}\n\n\/\/ Warn logs a message at level Warn on the standard logger.\nfunc Warn(args ...interface{}) {\n\tbaseLogger.sourced().Warn(args...)\n}\n\n\/\/ Warnln logs a message at level Warn on the standard logger.\nfunc Warnln(args ...interface{}) {\n\tbaseLogger.sourced().Warnln(args...)\n}\n\n\/\/ Warnf logs a message at level Warn on the standard logger.\nfunc Warnf(format string, args ...interface{}) {\n\tbaseLogger.sourced().Warnf(format, args...)\n}\n\n\/\/ Error logs a message at level Error on the standard logger.\nfunc Error(args ...interface{}) {\n\tbaseLogger.sourced().Error(args...)\n}\n\n\/\/ Errorln logs a message at level Error on the standard logger.\nfunc Errorln(args ...interface{}) {\n\tbaseLogger.sourced().Errorln(args...)\n}\n\n\/\/ Errorf logs a message at level Error on the standard logger.\nfunc Errorf(format string, args ...interface{}) {\n\tbaseLogger.sourced().Errorf(format, args...)\n}\n\n\/\/ Fatal logs a message at level Fatal on the standard logger.\nfunc Fatal(args ...interface{}) {\n\tbaseLogger.sourced().Fatal(args...)\n}\n\n\/\/ Fatalln logs a message at level Fatal on the standard logger.\nfunc Fatalln(args ...interface{}) {\n\tbaseLogger.sourced().Fatalln(args...)\n}\n\n\/\/ Fatalf logs a message at level Fatal on the standard logger.\nfunc Fatalf(format string, args ...interface{}) {\n\tbaseLogger.sourced().Fatalf(format, args...)\n}\n\n\/\/ AddHook adds hook to Prometheus' original logger.\nfunc AddHook(hook logrus.Hook) {\n\torigLogger.Hooks.Add(hook)\n}\n\ntype errorLogWriter struct{}\n\nfunc (errorLogWriter) Write(b []byte) (int, error) {\n\tbaseLogger.sourced().Error(string(b))\n\treturn len(b), nil\n}\n\n\/\/ NewErrorLogger returns a log.Logger that is meant to be used\n\/\/ in the ErrorLog field of an http.Server to log HTTP server errors.\nfunc NewErrorLogger() *log.Logger {\n\treturn log.New(&errorLogWriter{}, \"\", 0)\n}\n<commit_msg>AddFlags was only usable on default Kingpin application<commit_after>\/\/ Copyright 2015 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage log\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/url\"\n\t\"os\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\t\"gopkg.in\/alecthomas\/kingpin.v2\"\n)\n\n\/\/ setSyslogFormatter is nil if the target architecture does not support syslog.\nvar setSyslogFormatter func(logger, string, string) error\n\n\/\/ setEventlogFormatter is nil if the target OS does not support Eventlog (i.e., is not Windows).\nvar setEventlogFormatter func(logger, string, bool) error\n\nfunc setJSONFormatter() {\n\torigLogger.Formatter = &logrus.JSONFormatter{}\n}\n\ntype loggerSettings struct {\n\tlevel string\n\tformat string\n}\n\nfunc (s *loggerSettings) apply(ctx *kingpin.ParseContext) error {\n\terr := baseLogger.SetLevel(s.level)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = baseLogger.SetFormat(s.format)\n\treturn err\n}\n\n\/\/ AddFlags adds the flags used by this package to the Kingpin application.\n\/\/ To use the default Kingpin application, call AddFlags(kingpin.CommandLine)\nfunc AddFlags(a *kingpin.Application) {\n\ts := loggerSettings{}\n\ta.Flag(\"log.level\", \"Only log messages with the given severity or above. Valid levels: [debug, info, warn, error, fatal]\").\n\t\tDefault(origLogger.Level.String()).\n\t\tStringVar(&s.level)\n\tdefaultFormat := url.URL{Scheme: \"logger\", Opaque: \"stderr\"}\n\ta.Flag(\"log.format\", `Set the log target and format. Example: \"logger:syslog?appname=bob&local=7\" or \"logger:stdout?json=true\"`).\n\t\tDefault(defaultFormat.String()).\n\t\tStringVar(&s.format)\n\ta.Action(s.apply)\n}\n\n\/\/ Logger is the interface for loggers used in the Prometheus components.\ntype Logger interface {\n\tDebug(...interface{})\n\tDebugln(...interface{})\n\tDebugf(string, ...interface{})\n\n\tInfo(...interface{})\n\tInfoln(...interface{})\n\tInfof(string, ...interface{})\n\n\tWarn(...interface{})\n\tWarnln(...interface{})\n\tWarnf(string, ...interface{})\n\n\tError(...interface{})\n\tErrorln(...interface{})\n\tErrorf(string, ...interface{})\n\n\tFatal(...interface{})\n\tFatalln(...interface{})\n\tFatalf(string, ...interface{})\n\n\tWith(key string, value interface{}) Logger\n\n\tSetFormat(string) error\n\tSetLevel(string) error\n}\n\ntype logger struct {\n\tentry *logrus.Entry\n}\n\nfunc (l logger) With(key string, value interface{}) Logger {\n\treturn logger{l.entry.WithField(key, value)}\n}\n\n\/\/ Debug logs a message at level Debug on the standard logger.\nfunc (l logger) Debug(args ...interface{}) {\n\tl.sourced().Debug(args...)\n}\n\n\/\/ Debug logs a message at level Debug on the standard logger.\nfunc (l logger) Debugln(args ...interface{}) {\n\tl.sourced().Debugln(args...)\n}\n\n\/\/ Debugf logs a message at level Debug on the standard logger.\nfunc (l logger) Debugf(format string, args ...interface{}) {\n\tl.sourced().Debugf(format, args...)\n}\n\n\/\/ Info logs a message at level Info on the standard logger.\nfunc (l logger) Info(args ...interface{}) {\n\tl.sourced().Info(args...)\n}\n\n\/\/ Info logs a message at level Info on the standard logger.\nfunc (l logger) Infoln(args ...interface{}) {\n\tl.sourced().Infoln(args...)\n}\n\n\/\/ Infof logs a message at level Info on the standard logger.\nfunc (l logger) Infof(format string, args ...interface{}) {\n\tl.sourced().Infof(format, args...)\n}\n\n\/\/ Warn logs a message at level Warn on the standard logger.\nfunc (l logger) Warn(args ...interface{}) {\n\tl.sourced().Warn(args...)\n}\n\n\/\/ Warn logs a message at level Warn on the standard logger.\nfunc (l logger) Warnln(args ...interface{}) {\n\tl.sourced().Warnln(args...)\n}\n\n\/\/ Warnf logs a message at level Warn on the standard logger.\nfunc (l logger) Warnf(format string, args ...interface{}) {\n\tl.sourced().Warnf(format, args...)\n}\n\n\/\/ Error logs a message at level Error on the standard logger.\nfunc (l logger) Error(args ...interface{}) {\n\tl.sourced().Error(args...)\n}\n\n\/\/ Error logs a message at level Error on the standard logger.\nfunc (l logger) Errorln(args ...interface{}) {\n\tl.sourced().Errorln(args...)\n}\n\n\/\/ Errorf logs a message at level Error on the standard logger.\nfunc (l logger) Errorf(format string, args ...interface{}) {\n\tl.sourced().Errorf(format, args...)\n}\n\n\/\/ Fatal logs a message at level Fatal on the standard logger.\nfunc (l logger) Fatal(args ...interface{}) {\n\tl.sourced().Fatal(args...)\n}\n\n\/\/ Fatal logs a message at level Fatal on the standard logger.\nfunc (l logger) Fatalln(args ...interface{}) {\n\tl.sourced().Fatalln(args...)\n}\n\n\/\/ Fatalf logs a message at level Fatal on the standard logger.\nfunc (l logger) Fatalf(format string, args ...interface{}) {\n\tl.sourced().Fatalf(format, args...)\n}\n\nfunc (l logger) SetLevel(level string) error {\n\tlvl, err := logrus.ParseLevel(level)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tl.entry.Logger.Level = lvl\n\treturn nil\n}\n\nfunc (l logger) SetFormat(format string) error {\n\tu, err := url.Parse(format)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif u.Scheme != \"logger\" {\n\t\treturn fmt.Errorf(\"invalid scheme %s\", u.Scheme)\n\t}\n\tjsonq := u.Query().Get(\"json\")\n\tif jsonq == \"true\" {\n\t\tsetJSONFormatter()\n\t}\n\n\tswitch u.Opaque {\n\tcase \"syslog\":\n\t\tif setSyslogFormatter == nil {\n\t\t\treturn fmt.Errorf(\"system does not support syslog\")\n\t\t}\n\t\tappname := u.Query().Get(\"appname\")\n\t\tfacility := u.Query().Get(\"local\")\n\t\treturn setSyslogFormatter(l, appname, facility)\n\tcase \"eventlog\":\n\t\tif setEventlogFormatter == nil {\n\t\t\treturn fmt.Errorf(\"system does not support eventlog\")\n\t\t}\n\t\tname := u.Query().Get(\"name\")\n\t\tdebugAsInfo := false\n\t\tdebugAsInfoRaw := u.Query().Get(\"debugAsInfo\")\n\t\tif parsedDebugAsInfo, err := strconv.ParseBool(debugAsInfoRaw); err == nil {\n\t\t\tdebugAsInfo = parsedDebugAsInfo\n\t\t}\n\t\treturn setEventlogFormatter(l, name, debugAsInfo)\n\tcase \"stdout\":\n\t\tl.entry.Logger.Out = os.Stdout\n\tcase \"stderr\":\n\t\tl.entry.Logger.Out = os.Stderr\n\tdefault:\n\t\treturn fmt.Errorf(\"unsupported logger %q\", u.Opaque)\n\t}\n\treturn nil\n}\n\n\/\/ sourced adds a source field to the logger that contains\n\/\/ the file name and line where the logging happened.\nfunc (l logger) sourced() *logrus.Entry {\n\t_, file, line, ok := runtime.Caller(2)\n\tif !ok {\n\t\tfile = \"<???>\"\n\t\tline = 1\n\t} else {\n\t\tslash := strings.LastIndex(file, \"\/\")\n\t\tfile = file[slash+1:]\n\t}\n\treturn l.entry.WithField(\"source\", fmt.Sprintf(\"%s:%d\", file, line))\n}\n\nvar origLogger = logrus.New()\nvar baseLogger = logger{entry: logrus.NewEntry(origLogger)}\n\n\/\/ Base returns the default Logger logging to\nfunc Base() Logger {\n\treturn baseLogger\n}\n\n\/\/ NewLogger returns a new Logger logging to out.\nfunc NewLogger(w io.Writer) Logger {\n\tl := logrus.New()\n\tl.Out = w\n\treturn logger{entry: logrus.NewEntry(l)}\n}\n\n\/\/ NewNopLogger returns a logger that discards all log messages.\nfunc NewNopLogger() Logger {\n\tl := logrus.New()\n\tl.Out = ioutil.Discard\n\treturn logger{entry: logrus.NewEntry(l)}\n}\n\n\/\/ With adds a field to the logger.\nfunc With(key string, value interface{}) Logger {\n\treturn baseLogger.With(key, value)\n}\n\n\/\/ Debug logs a message at level Debug on the standard logger.\nfunc Debug(args ...interface{}) {\n\tbaseLogger.sourced().Debug(args...)\n}\n\n\/\/ Debugln logs a message at level Debug on the standard logger.\nfunc Debugln(args ...interface{}) {\n\tbaseLogger.sourced().Debugln(args...)\n}\n\n\/\/ Debugf logs a message at level Debug on the standard logger.\nfunc Debugf(format string, args ...interface{}) {\n\tbaseLogger.sourced().Debugf(format, args...)\n}\n\n\/\/ Info logs a message at level Info on the standard logger.\nfunc Info(args ...interface{}) {\n\tbaseLogger.sourced().Info(args...)\n}\n\n\/\/ Infoln logs a message at level Info on the standard logger.\nfunc Infoln(args ...interface{}) {\n\tbaseLogger.sourced().Infoln(args...)\n}\n\n\/\/ Infof logs a message at level Info on the standard logger.\nfunc Infof(format string, args ...interface{}) {\n\tbaseLogger.sourced().Infof(format, args...)\n}\n\n\/\/ Warn logs a message at level Warn on the standard logger.\nfunc Warn(args ...interface{}) {\n\tbaseLogger.sourced().Warn(args...)\n}\n\n\/\/ Warnln logs a message at level Warn on the standard logger.\nfunc Warnln(args ...interface{}) {\n\tbaseLogger.sourced().Warnln(args...)\n}\n\n\/\/ Warnf logs a message at level Warn on the standard logger.\nfunc Warnf(format string, args ...interface{}) {\n\tbaseLogger.sourced().Warnf(format, args...)\n}\n\n\/\/ Error logs a message at level Error on the standard logger.\nfunc Error(args ...interface{}) {\n\tbaseLogger.sourced().Error(args...)\n}\n\n\/\/ Errorln logs a message at level Error on the standard logger.\nfunc Errorln(args ...interface{}) {\n\tbaseLogger.sourced().Errorln(args...)\n}\n\n\/\/ Errorf logs a message at level Error on the standard logger.\nfunc Errorf(format string, args ...interface{}) {\n\tbaseLogger.sourced().Errorf(format, args...)\n}\n\n\/\/ Fatal logs a message at level Fatal on the standard logger.\nfunc Fatal(args ...interface{}) {\n\tbaseLogger.sourced().Fatal(args...)\n}\n\n\/\/ Fatalln logs a message at level Fatal on the standard logger.\nfunc Fatalln(args ...interface{}) {\n\tbaseLogger.sourced().Fatalln(args...)\n}\n\n\/\/ Fatalf logs a message at level Fatal on the standard logger.\nfunc Fatalf(format string, args ...interface{}) {\n\tbaseLogger.sourced().Fatalf(format, args...)\n}\n\n\/\/ AddHook adds hook to Prometheus' original logger.\nfunc AddHook(hook logrus.Hook) {\n\torigLogger.Hooks.Add(hook)\n}\n\ntype errorLogWriter struct{}\n\nfunc (errorLogWriter) Write(b []byte) (int, error) {\n\tbaseLogger.sourced().Error(string(b))\n\treturn len(b), nil\n}\n\n\/\/ NewErrorLogger returns a log.Logger that is meant to be used\n\/\/ in the ErrorLog field of an http.Server to log HTTP server errors.\nfunc NewErrorLogger() *log.Logger {\n\treturn log.New(&errorLogWriter{}, \"\", 0)\n}\n<|endoftext|>"} {"text":"<commit_before>package robo\n\nimport (\n\t\"net\/http\"\n)\n\n\/\/ Objects implementing the Handler interface are capable of serving\n\/\/ HTTP requests. It is expected to follow the same core conventions as\n\/\/ the \"net\/http\" equivalent.\ntype Handler interface {\n\tServeRoboHTTP(w ResponseWriter, r *Request)\n}\n\n\/\/ The HandlerFunc type serves as an adaptor to turn plain functions into\n\/\/ an implementation of the Handler interface.\ntype HandlerFunc func(w ResponseWriter, r *Request)\n\nfunc (h HandlerFunc) ServeRoboHTTP(w ResponseWriter, r *Request) {\n\th(w, r)\n}\n\n\/\/ The httpHandler type adds a ServeRoboHTTP method to implementations of\n\/\/ the http.Handler interface.\ntype httpHandler struct {\n\th http.Handler\n}\n\nfunc (h httpHandler) ServeRoboHTTP(w ResponseWriter, r *Request) {\n\th.h.ServeHTTP(w, r.Request)\n}\n\n\/\/ The ResponseWriter type mirrors http.ResponseWriter.\ntype ResponseWriter interface {\n\thttp.ResponseWriter\n}\n\n\/\/ The Request type extends an http.Request instance with additional\n\/\/ functionality.\ntype Request struct {\n\t*http.Request\n\n\t\/\/ named URL parameters for this request and route\n\tparams map[string]string\n\n\t\/\/ reference to the queue\n\tqueue *queue\n}\n\n\/\/ Next yields execution to the next matching handler, if there is one,\n\/\/ blocking until said handler has returned.\nfunc (r *Request) Next(w ResponseWriter) {\n\tr.queue.serveNext(w, r.Request)\n}\n\n\/\/ Param returns the value of a named URL parameter.\nfunc (r *Request) Param(name string) string {\n\tif r.params != nil {\n\t\treturn r.params[name]\n\t}\n\treturn \"\"\n}\n\n\/\/ Mux is a HTTP router. It multiplexes incoming requests to different\n\/\/ handlers based on user-provided rules on methods and paths.\n\/\/\n\/\/ The zero value for a Mux is a Mux without any registered handlers,\n\/\/ ready to use.\ntype Mux struct {\n\troutes []route\n}\n\n\/\/ NewMux returns a new Mux instance.\nfunc NewMux() *Mux {\n\treturn new(Mux)\n}\n\n\/\/ Any registers a new set of handlers listening to all requests for\n\/\/ the specified URL pattern.\nfunc (m *Mux) Any(pattern string, handlers ...interface{}) {\n\tm.add(\"\", pattern, handlers)\n}\n\n\/\/ Get registers a new set of handlers listening to GET requests for\n\/\/ the specified URL pattern.\nfunc (m *Mux) Get(pattern string, handlers ...interface{}) {\n\tm.add(\"GET\", pattern, handlers)\n}\n\n\/\/ Head registers a new set of handlers listening to HEAD requests for\n\/\/ the specified URL pattern.\nfunc (m *Mux) Head(pattern string, handlers ...interface{}) {\n\tm.add(\"HEAD\", pattern, handlers)\n}\n\n\/\/ Post registers a new set of handlers listening to POST requests for\n\/\/ the specified URL pattern.\nfunc (m *Mux) Post(pattern string, handlers ...interface{}) {\n\tm.add(\"POST\", pattern, handlers)\n}\n\n\/\/ Put registers a new set of handlers listening to PUT requests for\n\/\/ the specified URL pattern.\nfunc (m *Mux) Put(pattern string, handlers ...interface{}) {\n\tm.add(\"PUT\", pattern, handlers)\n}\n\n\/\/ Patch registers a new set of handlers listening to PATCH requests for\n\/\/ the specified URL pattern.\nfunc (m *Mux) Patch(pattern string, handlers ...interface{}) {\n\tm.add(\"PATCH\", pattern, handlers)\n}\n\n\/\/ Delete registers a new set of handlers listening to DELETE requests for\n\/\/ the specified URL pattern.\nfunc (m *Mux) Delete(pattern string, handlers ...interface{}) {\n\tm.add(\"DELETE\", pattern, handlers)\n}\n\n\/\/ Options registers a new set of handlers listening to OPTIONS requests for\n\/\/ the specified URL pattern.\nfunc (m *Mux) Options(pattern string, handlers ...interface{}) {\n\tm.add(\"OPTIONS\", pattern, handlers)\n}\n\n\/\/ add registers a set of handlers for the given HTTP method (\"\" matching\n\/\/ any method) and URL pattern.\nfunc (m *Mux) add(method, pattern string, handlers ...interface{}) {\n\tif len(handlers) == 0 {\n\t\tpanic(\"no handlers provided\")\n\t}\n\n\t\/\/ validate the provided set of handlers\n\tclean := make([]Handler, 0, len(handlers))\n\n\tfor _, h := range handlers {\n\t\tswitch h := h.(type) {\n\t\tcase Handler:\n\t\t\tclean = append(clean, h)\n\t\tcase func(w ResponseWriter, r *Request):\n\t\t\tclean = append(clean, HandlerFunc(h))\n\t\tcase http.Handler:\n\t\t\tclean = append(clean, httpHandler{h})\n\t\tcase func(w http.ResponseWriter, r *http.Request):\n\t\t\tclean = append(clean, httpHandler{http.HandlerFunc(h)})\n\t\tdefault:\n\t\t\tpanic(\"not a valid handler\")\n\t\t}\n\t}\n\n\tm.routes = append(m.routes, newRoute(method, pattern, clean))\n}\n\n\/\/ newRoute initializes a new route.\nfunc newRoute(method, rawPattern string, handlers []Handler) route {\n\tpattern, err := compilePattern(rawPattern)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn route{method, pattern, handlers}\n}\n\n\/\/ ServeRoboHTTP dispatches the request to matching routes registered with\n\/\/ the Mux instance.\nfunc (m *Mux) ServeRoboHTTP(w ResponseWriter, r *Request) {\n\tq := queue{nil, nil, m.routes}\n\tq.serveNext(w, r.Request)\n}\n\n\/\/ ServeHTTP dispatches the request to matching routes registered with\n\/\/ the Mux instance.\nfunc (m *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tm.ServeRoboHTTP(w, &Request{Request: r})\n}\n\n\/\/ The route type describes a registered route.\ntype route struct {\n\tmethod string\n\tpattern pattern\n\thandlers []Handler\n}\n\n\/\/ check tests whether the route matches a provided method and path. The\n\/\/ parameter map will always be non-nil when the first is true.\nfunc (r *route) check(method, path string) (bool, map[string]string) {\n\tif r.method != method && r.method != \"\" {\n\t\treturn false, nil\n\t}\n\n\tok, list := r.pattern.match(path, nil)\n\tif !ok {\n\t\treturn false, nil\n\t}\n\n\t\/\/ only build the parameter map if we have to\n\tif len(list) > 0 {\n\t\tparams := make(map[string]string)\n\t\tfor i := 0; i < len(list); i += 2 {\n\t\t\tparams[list[i]] = list[i+1]\n\t\t}\n\t\treturn true, params\n\t}\n\n\treturn true, nil\n}\n\n\/\/ The queue type holds the routing state of an incoming request.\ntype queue struct {\n\t\/\/ remaining handlers, and parameter map, for the current route\n\thandlers []Handler\n\tparams map[string]string\n\n\t\/\/ remaining routes to be tested\n\troutes []route\n}\n\n\/\/ ServeNext attempts to serve an HTTP request using the next matching\n\/\/ route\/handler in the queue.\nfunc (q *queue) serveNext(w ResponseWriter, hr *http.Request) {\n\t\/\/ does the current route still have handlers left?\n\tif len(q.handlers) > 0 {\n\t\th := q.handlers[0]\n\t\tq.handlers = q.handlers[1:]\n\n\t\th.ServeRoboHTTP(w, &Request{hr, q.params, q})\n\t\treturn\n\t}\n\n\t\/\/ look for the next matching route\n\tfor len(q.routes) > 0 {\n\t\tr := q.routes[0]\n\t\tq.routes = q.routes[1:]\n\n\t\t\/\/ does this route match the request at hand?\n\t\tok, params := r.check(hr.Method, hr.URL.Path)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tq.handlers = r.handlers[1:]\n\t\tq.params = params\n\n\t\t\/\/ invoke the route's first handler\n\t\tr.handlers[0].ServeRoboHTTP(w, &Request{hr, q.params, q})\n\t\treturn\n\t}\n\n\t\/\/ when we run out of routes, send a 404 message\n\thttp.Error(w, \"Not found.\\n\", 404)\n}\n<commit_msg>Fix Mux handler registration<commit_after>package robo\n\nimport (\n\t\"net\/http\"\n)\n\n\/\/ Objects implementing the Handler interface are capable of serving\n\/\/ HTTP requests. It is expected to follow the same core conventions as\n\/\/ the \"net\/http\" equivalent.\ntype Handler interface {\n\tServeRoboHTTP(w ResponseWriter, r *Request)\n}\n\n\/\/ The HandlerFunc type serves as an adaptor to turn plain functions into\n\/\/ an implementation of the Handler interface.\ntype HandlerFunc func(w ResponseWriter, r *Request)\n\nfunc (h HandlerFunc) ServeRoboHTTP(w ResponseWriter, r *Request) {\n\th(w, r)\n}\n\n\/\/ The httpHandler type adds a ServeRoboHTTP method to implementations of\n\/\/ the http.Handler interface.\ntype httpHandler struct {\n\th http.Handler\n}\n\nfunc (h httpHandler) ServeRoboHTTP(w ResponseWriter, r *Request) {\n\th.h.ServeHTTP(w, r.Request)\n}\n\n\/\/ The ResponseWriter type mirrors http.ResponseWriter.\ntype ResponseWriter interface {\n\thttp.ResponseWriter\n}\n\n\/\/ The Request type extends an http.Request instance with additional\n\/\/ functionality.\ntype Request struct {\n\t*http.Request\n\n\t\/\/ named URL parameters for this request and route\n\tparams map[string]string\n\n\t\/\/ reference to the queue\n\tqueue *queue\n}\n\n\/\/ Next yields execution to the next matching handler, if there is one,\n\/\/ blocking until said handler has returned.\nfunc (r *Request) Next(w ResponseWriter) {\n\tr.queue.serveNext(w, r.Request)\n}\n\n\/\/ Param returns the value of a named URL parameter.\nfunc (r *Request) Param(name string) string {\n\tif r.params != nil {\n\t\treturn r.params[name]\n\t}\n\treturn \"\"\n}\n\n\/\/ Mux is a HTTP router. It multiplexes incoming requests to different\n\/\/ handlers based on user-provided rules on methods and paths.\n\/\/\n\/\/ The zero value for a Mux is a Mux without any registered handlers,\n\/\/ ready to use.\ntype Mux struct {\n\troutes []route\n}\n\n\/\/ NewMux returns a new Mux instance.\nfunc NewMux() *Mux {\n\treturn new(Mux)\n}\n\n\/\/ Any registers a new set of handlers listening to all requests for\n\/\/ the specified URL pattern.\nfunc (m *Mux) Any(pattern string, handlers ...interface{}) {\n\tm.add(\"\", pattern, handlers...)\n}\n\n\/\/ Get registers a new set of handlers listening to GET requests for\n\/\/ the specified URL pattern.\nfunc (m *Mux) Get(pattern string, handlers ...interface{}) {\n\tm.add(\"GET\", pattern, handlers...)\n}\n\n\/\/ Head registers a new set of handlers listening to HEAD requests for\n\/\/ the specified URL pattern.\nfunc (m *Mux) Head(pattern string, handlers ...interface{}) {\n\tm.add(\"HEAD\", pattern, handlers...)\n}\n\n\/\/ Post registers a new set of handlers listening to POST requests for\n\/\/ the specified URL pattern.\nfunc (m *Mux) Post(pattern string, handlers ...interface{}) {\n\tm.add(\"POST\", pattern, handlers...)\n}\n\n\/\/ Put registers a new set of handlers listening to PUT requests for\n\/\/ the specified URL pattern.\nfunc (m *Mux) Put(pattern string, handlers ...interface{}) {\n\tm.add(\"PUT\", pattern, handlers...)\n}\n\n\/\/ Patch registers a new set of handlers listening to PATCH requests for\n\/\/ the specified URL pattern.\nfunc (m *Mux) Patch(pattern string, handlers ...interface{}) {\n\tm.add(\"PATCH\", pattern, handlers...)\n}\n\n\/\/ Delete registers a new set of handlers listening to DELETE requests for\n\/\/ the specified URL pattern.\nfunc (m *Mux) Delete(pattern string, handlers ...interface{}) {\n\tm.add(\"DELETE\", pattern, handlers...)\n}\n\n\/\/ Options registers a new set of handlers listening to OPTIONS requests for\n\/\/ the specified URL pattern.\nfunc (m *Mux) Options(pattern string, handlers ...interface{}) {\n\tm.add(\"OPTIONS\", pattern, handlers...)\n}\n\n\/\/ add registers a set of handlers for the given HTTP method (\"\" matching\n\/\/ any method) and URL pattern.\nfunc (m *Mux) add(method, pattern string, handlers ...interface{}) {\n\tif len(handlers) == 0 {\n\t\tpanic(\"no handlers provided\")\n\t}\n\n\t\/\/ validate the provided set of handlers\n\tclean := make([]Handler, 0, len(handlers))\n\n\tfor _, h := range handlers {\n\t\tswitch h := h.(type) {\n\t\tcase Handler:\n\t\t\tclean = append(clean, h)\n\t\tcase func(w ResponseWriter, r *Request):\n\t\t\tclean = append(clean, HandlerFunc(h))\n\t\tcase http.Handler:\n\t\t\tclean = append(clean, httpHandler{h})\n\t\tcase func(w http.ResponseWriter, r *http.Request):\n\t\t\tclean = append(clean, httpHandler{http.HandlerFunc(h)})\n\t\tdefault:\n\t\t\tpanic(\"not a valid handler\")\n\t\t}\n\t}\n\n\tm.routes = append(m.routes, newRoute(method, pattern, clean))\n}\n\n\/\/ newRoute initializes a new route.\nfunc newRoute(method, rawPattern string, handlers []Handler) route {\n\tpattern, err := compilePattern(rawPattern)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn route{method, pattern, handlers}\n}\n\n\/\/ ServeRoboHTTP dispatches the request to matching routes registered with\n\/\/ the Mux instance.\nfunc (m *Mux) ServeRoboHTTP(w ResponseWriter, r *Request) {\n\tq := queue{nil, nil, m.routes}\n\tq.serveNext(w, r.Request)\n}\n\n\/\/ ServeHTTP dispatches the request to matching routes registered with\n\/\/ the Mux instance.\nfunc (m *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tm.ServeRoboHTTP(w, &Request{Request: r})\n}\n\n\/\/ The route type describes a registered route.\ntype route struct {\n\tmethod string\n\tpattern pattern\n\thandlers []Handler\n}\n\n\/\/ check tests whether the route matches a provided method and path. The\n\/\/ parameter map will always be non-nil when the first is true.\nfunc (r *route) check(method, path string) (bool, map[string]string) {\n\tif r.method != method && r.method != \"\" {\n\t\treturn false, nil\n\t}\n\n\tok, list := r.pattern.match(path, nil)\n\tif !ok {\n\t\treturn false, nil\n\t}\n\n\t\/\/ only build the parameter map if we have to\n\tif len(list) > 0 {\n\t\tparams := make(map[string]string)\n\t\tfor i := 0; i < len(list); i += 2 {\n\t\t\tparams[list[i]] = list[i+1]\n\t\t}\n\t\treturn true, params\n\t}\n\n\treturn true, nil\n}\n\n\/\/ The queue type holds the routing state of an incoming request.\ntype queue struct {\n\t\/\/ remaining handlers, and parameter map, for the current route\n\thandlers []Handler\n\tparams map[string]string\n\n\t\/\/ remaining routes to be tested\n\troutes []route\n}\n\n\/\/ ServeNext attempts to serve an HTTP request using the next matching\n\/\/ route\/handler in the queue.\nfunc (q *queue) serveNext(w ResponseWriter, hr *http.Request) {\n\t\/\/ does the current route still have handlers left?\n\tif len(q.handlers) > 0 {\n\t\th := q.handlers[0]\n\t\tq.handlers = q.handlers[1:]\n\n\t\th.ServeRoboHTTP(w, &Request{hr, q.params, q})\n\t\treturn\n\t}\n\n\t\/\/ look for the next matching route\n\tfor len(q.routes) > 0 {\n\t\tr := q.routes[0]\n\t\tq.routes = q.routes[1:]\n\n\t\t\/\/ does this route match the request at hand?\n\t\tok, params := r.check(hr.Method, hr.URL.Path)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tq.handlers = r.handlers[1:]\n\t\tq.params = params\n\n\t\t\/\/ invoke the route's first handler\n\t\tr.handlers[0].ServeRoboHTTP(w, &Request{hr, q.params, q})\n\t\treturn\n\t}\n\n\t\/\/ when we run out of routes, send a 404 message\n\thttp.Error(w, \"Not found.\\n\", 404)\n}\n<|endoftext|>"} {"text":"<commit_before>package log\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"bitbucket.org\/mundipagg\/boletoapi\/config\"\n\t\"github.com\/mundipagg\/goseq\"\n)\n\nvar logger *goseq.Logger\n\n\/\/ Operation a operacao usada na API\nvar Operation string\n\n\/\/ Recipient o nome do banco\nvar Recipient string\n\n\/\/ Log struct com os elemtos do log\ntype Log struct {\n\tOperation string\n\tRecipient string\n\tNossoNumero uint\n\tlogger *goseq.Logger\n}\n\n\/\/Install instala o \"servico\" de log do SEQ\nfunc Install() error {\n\t_logger, err := goseq.GetLogger(config.Get().SEQUrl, config.Get().SEQAPIKey, 150)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_logger.SetDefaultProperties(map[string]interface{}{\n\t\t\"Application\": config.Get().ApplicationName,\n\t\t\"Environment\": config.Get().Environment,\n\t\t\"Domain\": config.Get().SEQDomain,\n\t})\n\tlogger = _logger\n\treturn nil\n}\n\nfunc formatter(message string) string {\n\treturn \"[{Application}: {Operation}] - {MessageType} \" + message\n}\n\n\/\/CreateLog cria uma nova instancia do Log\nfunc CreateLog() *Log {\n\treturn &Log{\n\t\tlogger: logger,\n\t}\n}\n\n\/\/ Request loga o request para algum banco\nfunc (l Log) Request(content interface{}, url string, headers http.Header) {\n\tgo (func() {\n\t\tprops := l.defaultProperties(\"Request\", content)\n\t\tprops.AddProperty(\"Headers\", headers)\n\t\tprops.AddProperty(\"URL\", url)\n\t\tmsg := formatter(\"to {Recipient} ({URL})\")\n\n\t\tl.logger.Information(msg, props)\n\t})()\n}\n\n\/\/ Response loga o response para algum banco\nfunc (l Log) Response(content interface{}, url string) {\n\tgo (func() {\n\t\tprops := l.defaultProperties(\"Response\", content)\n\t\tprops.AddProperty(\"URL\", url)\n\t\tmsg := formatter(\"from {Recipient} ({URL})\")\n\n\t\tl.logger.Information(msg, props)\n\t})()\n}\n\n\/\/Info loga mensagem do level INFO\nfunc Info(msg string) {\n\tgo logger.Information(msg, goseq.NewProperties())\n}\n\n\/\/Warn loga mensagem do leve Warning\nfunc (l Log) Warn(content interface{}, msg string) {\n\tgo (func() {\n\t\tprops := l.defaultProperties(\"Warning\", content)\n\t\tm := formatter(msg)\n\n\t\tl.logger.Warning(m, props)\n\t})()\n}\n\n\/\/ Fatal loga erros da aplicação\nfunc (l Log) Fatal(content interface{}, msg string) {\n\tgo (func() {\n\t\tprops := l.defaultProperties(\"Error\", content)\n\t\tm := formatter(msg)\n\n\t\tl.logger.Fatal(m, props)\n\t})()\n}\n\nfunc (l Log) defaultProperties(messageType string, content interface{}) goseq.Properties {\n\tprops := goseq.NewProperties()\n\tprops.AddProperty(\"MessageType\", messageType)\n\tprops.AddProperty(\"Content\", content)\n\tprops.AddProperty(\"Recipient\", l.Recipient)\n\tprops.AddProperty(\"Operation\", l.Operation)\n\tprops.AddProperty(\"NossoNumero\", l.NossoNumero)\n\treturn props\n}\n\n\/\/Close fecha a conexao com o SEQ\nfunc Close() {\n\tfmt.Println(\"Closing SEQ Connection\")\n\tlogger.Close()\n}\n<commit_msg>:construction: muda o metodo de log para receber um mapa[string]string<commit_after>package log\n\nimport (\n\t\"fmt\"\n\n\t\"bitbucket.org\/mundipagg\/boletoapi\/config\"\n\t\"github.com\/mundipagg\/goseq\"\n)\n\nvar logger *goseq.Logger\n\n\/\/ Operation a operacao usada na API\nvar Operation string\n\n\/\/ Recipient o nome do banco\nvar Recipient string\n\n\/\/ Log struct com os elemtos do log\ntype Log struct {\n\tOperation string\n\tRecipient string\n\tNossoNumero uint\n\tlogger *goseq.Logger\n}\n\n\/\/Install instala o \"servico\" de log do SEQ\nfunc Install() error {\n\t_logger, err := goseq.GetLogger(config.Get().SEQUrl, config.Get().SEQAPIKey, 150)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_logger.SetDefaultProperties(map[string]interface{}{\n\t\t\"Application\": config.Get().ApplicationName,\n\t\t\"Environment\": config.Get().Environment,\n\t\t\"Domain\": config.Get().SEQDomain,\n\t})\n\tlogger = _logger\n\treturn nil\n}\n\nfunc formatter(message string) string {\n\treturn \"[{Application}: {Operation}] - {MessageType} \" + message\n}\n\n\/\/CreateLog cria uma nova instancia do Log\nfunc CreateLog() *Log {\n\treturn &Log{\n\t\tlogger: logger,\n\t}\n}\n\n\/\/ Request loga o request para algum banco\nfunc (l Log) Request(content interface{}, url string, headers map[string]string) {\n\tgo (func() {\n\t\tprops := l.defaultProperties(\"Request\", content)\n\t\tprops.AddProperty(\"Headers\", headers)\n\t\tprops.AddProperty(\"URL\", url)\n\t\tmsg := formatter(\"to {Recipient} ({URL})\")\n\n\t\tl.logger.Information(msg, props)\n\t})()\n}\n\n\/\/ Response loga o response para algum banco\nfunc (l Log) Response(content interface{}, url string) {\n\tgo (func() {\n\t\tprops := l.defaultProperties(\"Response\", content)\n\t\tprops.AddProperty(\"URL\", url)\n\t\tmsg := formatter(\"from {Recipient} ({URL})\")\n\n\t\tl.logger.Information(msg, props)\n\t})()\n}\n\n\/\/Info loga mensagem do level INFO\nfunc Info(msg string) {\n\tgo logger.Information(msg, goseq.NewProperties())\n}\n\n\/\/Warn loga mensagem do leve Warning\nfunc (l Log) Warn(content interface{}, msg string) {\n\tgo (func() {\n\t\tprops := l.defaultProperties(\"Warning\", content)\n\t\tm := formatter(msg)\n\n\t\tl.logger.Warning(m, props)\n\t})()\n}\n\n\/\/ Fatal loga erros da aplicação\nfunc (l Log) Fatal(content interface{}, msg string) {\n\tgo (func() {\n\t\tprops := l.defaultProperties(\"Error\", content)\n\t\tm := formatter(msg)\n\n\t\tl.logger.Fatal(m, props)\n\t})()\n}\n\nfunc (l Log) defaultProperties(messageType string, content interface{}) goseq.Properties {\n\tprops := goseq.NewProperties()\n\tprops.AddProperty(\"MessageType\", messageType)\n\tprops.AddProperty(\"Content\", content)\n\tprops.AddProperty(\"Recipient\", l.Recipient)\n\tprops.AddProperty(\"Operation\", l.Operation)\n\tprops.AddProperty(\"NossoNumero\", l.NossoNumero)\n\treturn props\n}\n\n\/\/Close fecha a conexao com o SEQ\nfunc Close() {\n\tfmt.Println(\"Closing SEQ Connection\")\n\tlogger.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package httpmux\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\n\tmuxpath \"github.com\/gogolfing\/httpmux\/path\"\n)\n\ntype variablesKey int\n\nconst variablesKeyValue variablesKey = 1\n\ntype Mux struct {\n\troot *Route\n\n\tAllowTrailingSlashes bool\n\n\tMethodNotAllowedHandler http.Handler\n\tNotFoundHandler http.Handler\n}\n\nfunc New() *Mux {\n\treturn &Mux{\n\t\troot: newRootRoute(),\n\t}\n}\n\nfunc (m *Mux) HandleFunc(path string, handlerFunc http.HandlerFunc, methods ...string) *Route {\n\treturn m.Handle(path, http.HandlerFunc(handlerFunc), methods...)\n}\n\nfunc (m *Mux) Handle(path string, handler http.Handler, methods ...string) *Route {\n\treturn m.SubRoute(path).Handle(handler, methods...)\n}\n\nfunc (m *Mux) Root() *Route {\n\treturn m.SubRoute(muxpath.Slash)\n}\n\nfunc (m *Mux) SubRoute(path string) *Route {\n\treturn m.root.SubRoute(path)\n}\n\nfunc (m *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\thandler, vars, err := m.root.findHandler(r, m.getFoundMatcher())\n\tif err != nil {\n\t\tm.serveError(w, r, err)\n\t\treturn\n\t}\n\tr = m.mapVariables(r, vars)\n\thandler.ServeHTTP(w, r)\n}\n\nfunc (m *Mux) getFoundMatcher() foundMatcher {\n\tif m.AllowTrailingSlashes {\n\t\treturn stringFoundMatcher(muxpath.Slash)\n\t}\n\treturn stringFoundMatcher(\"\")\n}\n\nfunc (m *Mux) serveError(w http.ResponseWriter, r *http.Request, err error) {\n\thandler := m.getErrorHandler(err)\n\tif handler == nil {\n\t\treturn\n\t}\n\thandler.ServeHTTP(w, r)\n}\n\nfunc (m *Mux) getErrorHandler(err error) http.Handler {\n\tif handler, ok := err.(ErrMethodNotAllowed); ok {\n\t\tif m.MethodNotAllowedHandler != nil {\n\t\t\treturn m.MethodNotAllowedHandler\n\t\t}\n\t\treturn handler\n\t}\n\tif err == ErrNotFound {\n\t\tif m.NotFoundHandler != nil {\n\t\t\treturn m.NotFoundHandler\n\t\t}\n\t\treturn ErrNotFound\n\t}\n\treturn nil\n}\n\nfunc (m *Mux) mapVariables(r *http.Request, vars []*Variable) *http.Request {\n\tif len(vars) == 0 {\n\t\treturn r\n\t}\n\n\tctx := context.WithValue(r.Context(), variablesKeyValue, vars)\n\tfor _, v := range vars {\n\t\tctx = context.WithValue(ctx, v.Name, v.Value)\n\t}\n\treturn r.WithContext(ctx)\n}\n\nfunc VariablesFrom(c context.Context) []*Variable {\n\tvars, _ := c.Value(variablesKeyValue).([]*Variable)\n\treturn vars\n}\n\nfunc VariableFrom(c context.Context, name string) *Variable {\n\tv, _ := VariableFromOk(c, name)\n\treturn v\n}\n\nfunc VariableFromOk(c context.Context, name string) (*Variable, bool) {\n\tvalue, ok := c.Value(VarName(name)).(string)\n\tif !ok {\n\t\treturn nil, ok\n\t}\n\treturn &Variable{VarName(name), value}, ok\n}\n<commit_msg>Update Mux.Root() to return internal root Route<commit_after>package httpmux\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\n\tmuxpath \"github.com\/gogolfing\/httpmux\/path\"\n)\n\ntype variablesKey int\n\nconst variablesKeyValue variablesKey = 1\n\ntype Mux struct {\n\troot *Route\n\n\tAllowTrailingSlashes bool\n\n\tMethodNotAllowedHandler http.Handler\n\tNotFoundHandler http.Handler\n}\n\nfunc New() *Mux {\n\treturn &Mux{\n\t\troot: newRootRoute(),\n\t}\n}\n\nfunc (m *Mux) HandleFunc(path string, handlerFunc http.HandlerFunc, methods ...string) *Route {\n\treturn m.Handle(path, http.HandlerFunc(handlerFunc), methods...)\n}\n\nfunc (m *Mux) Handle(path string, handler http.Handler, methods ...string) *Route {\n\treturn m.SubRoute(path).Handle(handler, methods...)\n}\n\nfunc (m *Mux) Root() *Route {\n\treturn m.root\n}\n\nfunc (m *Mux) SubRoute(path string) *Route {\n\treturn m.root.SubRoute(path)\n}\n\nfunc (m *Mux) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\thandler, vars, err := m.root.findHandler(r, m.getFoundMatcher())\n\tif err != nil {\n\t\tm.serveError(w, r, err)\n\t\treturn\n\t}\n\tr = m.mapVariables(r, vars)\n\thandler.ServeHTTP(w, r)\n}\n\nfunc (m *Mux) getFoundMatcher() foundMatcher {\n\tif m.AllowTrailingSlashes {\n\t\treturn stringFoundMatcher(muxpath.Slash)\n\t}\n\treturn stringFoundMatcher(\"\")\n}\n\nfunc (m *Mux) serveError(w http.ResponseWriter, r *http.Request, err error) {\n\thandler := m.getErrorHandler(err)\n\tif handler == nil {\n\t\treturn\n\t}\n\thandler.ServeHTTP(w, r)\n}\n\nfunc (m *Mux) getErrorHandler(err error) http.Handler {\n\tif handler, ok := err.(ErrMethodNotAllowed); ok {\n\t\tif m.MethodNotAllowedHandler != nil {\n\t\t\treturn m.MethodNotAllowedHandler\n\t\t}\n\t\treturn handler\n\t}\n\tif err == ErrNotFound {\n\t\tif m.NotFoundHandler != nil {\n\t\t\treturn m.NotFoundHandler\n\t\t}\n\t\treturn ErrNotFound\n\t}\n\treturn nil\n}\n\nfunc (m *Mux) mapVariables(r *http.Request, vars []*Variable) *http.Request {\n\tif len(vars) == 0 {\n\t\treturn r\n\t}\n\n\tctx := context.WithValue(r.Context(), variablesKeyValue, vars)\n\tfor _, v := range vars {\n\t\tctx = context.WithValue(ctx, v.Name, v.Value)\n\t}\n\treturn r.WithContext(ctx)\n}\n\nfunc VariablesFrom(c context.Context) []*Variable {\n\tvars, _ := c.Value(variablesKeyValue).([]*Variable)\n\treturn vars\n}\n\nfunc VariableFrom(c context.Context, name string) *Variable {\n\tv, _ := VariableFromOk(c, name)\n\treturn v\n}\n\nfunc VariableFromOk(c context.Context, name string) (*Variable, bool) {\n\tvalue, ok := c.Value(VarName(name)).(string)\n\tif !ok {\n\t\treturn nil, ok\n\t}\n\treturn &Variable{VarName(name), value}, ok\n}\n<|endoftext|>"} {"text":"<commit_before>package httpclient\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httptrace\"\n\t\"net\/http\/httputil\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n)\n\n\/\/ A transport that prints request and response\n\ntype LoggingTransport struct {\n\tt *http.Transport\n\trequestBody bool\n\tresponseBody bool\n\ttiming bool\n}\n\nfunc (lt *LoggingTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {\n\tdreq, _ := httputil.DumpRequest(req, lt.requestBody)\n\t\/\/fmt.Println(\"REQUEST:\", strconv.Quote(string(dreq)))\n\tfmt.Println(\"REQUEST:\", string(dreq))\n\tfmt.Println(\"\")\n\n\tvar startTime time.Time\n\tvar elapsed time.Duration\n\n\tif lt.timing {\n\t\tstartTime = time.Now()\n\t}\n\n\tresp, err = lt.t.RoundTrip(req)\n\n\tif lt.timing {\n\t\telapsed = time.Since(startTime)\n\t}\n\n\tif err != nil {\n\t\tif lt.requestBody {\n\t\t\t\/\/ don't print the body twice\n\t\t\tdreq, _ = httputil.DumpRequest(req, false)\n\t\t}\n\t\tfmt.Println(\"ERROR:\", err, \"REQUEST:\", strconv.Quote(string(dreq)))\n\t}\n\tif resp != nil {\n\t\tdresp, _ := httputil.DumpResponse(resp, lt.responseBody)\n\t\tfmt.Println(\"RESPONSE:\", string(dresp))\n\t}\n\n\tif elapsed > 0 {\n\t\tfmt.Println(\"ELAPSED TIME:\", elapsed.Round(time.Millisecond))\n\t}\n\n\tfmt.Println(\"\")\n\treturn\n}\n\nfunc (lt *LoggingTransport) CancelRequest(req *http.Request) {\n\tdreq, _ := httputil.DumpRequest(req, false)\n\tfmt.Println(\"CANCEL REQUEST:\", strconv.Quote(string(dreq)))\n\tlt.t.CancelRequest(req)\n}\n\n\/\/ Enable logging requests\/response headers\n\/\/\n\/\/ if requestBody == true, also log request body\n\/\/ if responseBody == true, also log response body\n\/\/ if timing == true, also log elapsed time\nfunc StartLogging(requestBody, responseBody, timing bool) {\n\thttp.DefaultTransport = &LoggingTransport{&http.Transport{}, requestBody, responseBody, timing}\n}\n\n\/\/ Disable logging requests\/responses\nfunc StopLogging() {\n\thttp.DefaultTransport = &http.Transport{}\n}\n\n\/\/ Wrap input transport into a LoggingTransport\nfunc LoggedTransport(t *http.Transport, requestBody, responseBody, timing bool) http.RoundTripper {\n\treturn &LoggingTransport{t, requestBody, responseBody, timing}\n}\n\n\/\/ A Reader that \"logs\" progress\n\ntype ProgressReader struct {\n\tr io.Reader\n\tc [1]byte\n\tthreshold int\n\tcurr int\n}\n\nfunc NewProgressReader(r io.Reader, c byte, threshold int) *ProgressReader {\n\tif c == 0 {\n\t\tc = '.'\n\t}\n\tif threshold <= 0 {\n\t\tthreshold = 10240\n\t}\n\tp := &ProgressReader{r: r, c: [1]byte{c}, threshold: threshold, curr: 0}\n\treturn p\n}\n\nfunc (p *ProgressReader) Read(b []byte) (int, error) {\n\tn, err := p.r.Read(b)\n\n\tp.curr += n\n\n\tif err == io.EOF {\n\t\tos.Stdout.Write([]byte{'\\n'})\n\t\tos.Stdout.Sync()\n\t} else if p.curr >= p.threshold {\n\t\tp.curr -= p.threshold\n\n\t\tos.Stdout.Write(p.c[:])\n\t\tos.Stdout.Sync()\n\t}\n\n\treturn n, err\n}\n\nfunc (p *ProgressReader) Close() error {\n\tif rc, ok := p.r.(io.ReadCloser); ok {\n\t\treturn rc.Close()\n\t} else {\n\t\treturn nil\n\t}\n}\n\n\/\/ A Logger that can be disabled\n\ntype DebugLog bool\n\nfunc (d DebugLog) Println(args ...interface{}) {\n\tif d {\n\t\tlog.Println(args...)\n\t}\n}\n\nfunc (d DebugLog) Printf(fmt string, args ...interface{}) {\n\tif d {\n\t\tlog.Printf(fmt, args...)\n\t}\n}\n\n\/\/ A ClientTrace implementation that collects request time\n\ntype RequestTrace struct {\n\tDNS time.Duration\n\tConnect time.Duration\n\tConnected bool\n\tTLSHandshake time.Duration\n\tRequest time.Duration\n\tWait time.Duration\n\tResponse time.Duration\n\n\tstartTime time.Time\n}\n\nfunc (r *RequestTrace) Reset() {\n\tr.DNS = 0\n\tr.Connect = 0\n\tr.Connected = false\n\tr.TLSHandshake = 0\n\tr.Request = 0\n\tr.Wait = 0\n}\n\nfunc (r *RequestTrace) NewClientTrace(trace bool) *httptrace.ClientTrace {\n\treturn &httptrace.ClientTrace{\n\t\tGotConn: func(info httptrace.GotConnInfo) {\n\t\t\tr.Connected = info.WasIdle\n\n\t\t\tif trace {\n\t\t\t\tlog.Println(\"GotConn\", info.Conn.RemoteAddr(), \"reused:\", info.Reused, \"wasIdle:\", info.WasIdle)\n\t\t\t}\n\t\t},\n\n\t\tConnectStart: func(network, addr string) {\n\t\t\tr.startTime = time.Now()\n\n\t\t\tif trace {\n\t\t\t\tlog.Println(\"ConnectStart\", network, addr)\n\t\t\t}\n\t\t},\n\n\t\tConnectDone: func(network, addr string, err error) {\n\t\t\tr.Connect = time.Since(r.startTime)\n\t\t\tr.startTime = time.Time{}\n\n\t\t\tif trace {\n\t\t\t\tlog.Println(\"ConnectDone\", network, addr, err)\n\t\t\t}\n\t\t},\n\n\t\tDNSStart: func(info httptrace.DNSStartInfo) {\n\t\t\tr.startTime = time.Now()\n\n\t\t\tif trace {\n\t\t\t\tlog.Println(\"DNSStart\", info.Host)\n\t\t\t}\n\t\t},\n\n\t\tDNSDone: func(info httptrace.DNSDoneInfo) {\n\t\t\tr.DNS = time.Since(r.startTime)\n\t\t\tr.startTime = time.Time{}\n\n\t\t\tif trace {\n\t\t\t\tlog.Println(\"DNSDone\", info.Addrs)\n\t\t\t}\n\t\t},\n\n\t\tTLSHandshakeStart: func() {\n\t\t\tr.startTime = time.Now()\n\n\t\t\tif trace {\n\t\t\t\tlog.Println(\"TLSHandshakeStart\")\n\t\t\t}\n\t\t},\n\n\t\tTLSHandshakeDone: func(state tls.ConnectionState, err error) {\n\t\t\tr.TLSHandshake = time.Since(r.startTime)\n\t\t\tr.startTime = time.Time{}\n\n\t\t\tif trace {\n\t\t\t\tlog.Println(\"TLSHandshakeDone\", err)\n\t\t\t}\n\t\t},\n\n\t\tWroteHeaderField: func(string, []string) {\n\t\t\tif r.startTime.IsZero() {\n\t\t\t\tr.startTime = time.Now()\n\n\t\t\t\tif trace {\n\t\t\t\t\tlog.Println(\"WroteHeader\")\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\n\t\tWroteRequest: func(info httptrace.WroteRequestInfo) {\n\t\t\tr.Request = time.Since(r.startTime)\n\t\t\tr.startTime = time.Now()\n\n\t\t\tif trace {\n\t\t\t\tlog.Println(\"WroteRequest\")\n\t\t\t}\n\t\t},\n\n\t\tGotFirstResponseByte: func() {\n\t\t\tr.Wait = time.Since(r.startTime)\n\t\t\tr.startTime = time.Now()\n\n\t\t\tif trace {\n\t\t\t\tlog.Println(\"GotFirstResponseByte\")\n\t\t\t}\n\t\t},\n\t}\n}\n\n\/\/ Call this after the response has been received to set the Response duration\n\/\/ (there is no callback for this)\n\nfunc (r *RequestTrace) Done() {\n\tr.Response = time.Since(r.startTime)\n\tr.startTime = time.Now()\n}\n<commit_msg>need to reset Response<commit_after>package httpclient\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httptrace\"\n\t\"net\/http\/httputil\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n)\n\n\/\/ A transport that prints request and response\n\ntype LoggingTransport struct {\n\tt *http.Transport\n\trequestBody bool\n\tresponseBody bool\n\ttiming bool\n}\n\nfunc (lt *LoggingTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {\n\tdreq, _ := httputil.DumpRequest(req, lt.requestBody)\n\t\/\/fmt.Println(\"REQUEST:\", strconv.Quote(string(dreq)))\n\tfmt.Println(\"REQUEST:\", string(dreq))\n\tfmt.Println(\"\")\n\n\tvar startTime time.Time\n\tvar elapsed time.Duration\n\n\tif lt.timing {\n\t\tstartTime = time.Now()\n\t}\n\n\tresp, err = lt.t.RoundTrip(req)\n\n\tif lt.timing {\n\t\telapsed = time.Since(startTime)\n\t}\n\n\tif err != nil {\n\t\tif lt.requestBody {\n\t\t\t\/\/ don't print the body twice\n\t\t\tdreq, _ = httputil.DumpRequest(req, false)\n\t\t}\n\t\tfmt.Println(\"ERROR:\", err, \"REQUEST:\", strconv.Quote(string(dreq)))\n\t}\n\tif resp != nil {\n\t\tdresp, _ := httputil.DumpResponse(resp, lt.responseBody)\n\t\tfmt.Println(\"RESPONSE:\", string(dresp))\n\t}\n\n\tif elapsed > 0 {\n\t\tfmt.Println(\"ELAPSED TIME:\", elapsed.Round(time.Millisecond))\n\t}\n\n\tfmt.Println(\"\")\n\treturn\n}\n\nfunc (lt *LoggingTransport) CancelRequest(req *http.Request) {\n\tdreq, _ := httputil.DumpRequest(req, false)\n\tfmt.Println(\"CANCEL REQUEST:\", strconv.Quote(string(dreq)))\n\tlt.t.CancelRequest(req)\n}\n\n\/\/ Enable logging requests\/response headers\n\/\/\n\/\/ if requestBody == true, also log request body\n\/\/ if responseBody == true, also log response body\n\/\/ if timing == true, also log elapsed time\nfunc StartLogging(requestBody, responseBody, timing bool) {\n\thttp.DefaultTransport = &LoggingTransport{&http.Transport{}, requestBody, responseBody, timing}\n}\n\n\/\/ Disable logging requests\/responses\nfunc StopLogging() {\n\thttp.DefaultTransport = &http.Transport{}\n}\n\n\/\/ Wrap input transport into a LoggingTransport\nfunc LoggedTransport(t *http.Transport, requestBody, responseBody, timing bool) http.RoundTripper {\n\treturn &LoggingTransport{t, requestBody, responseBody, timing}\n}\n\n\/\/ A Reader that \"logs\" progress\n\ntype ProgressReader struct {\n\tr io.Reader\n\tc [1]byte\n\tthreshold int\n\tcurr int\n}\n\nfunc NewProgressReader(r io.Reader, c byte, threshold int) *ProgressReader {\n\tif c == 0 {\n\t\tc = '.'\n\t}\n\tif threshold <= 0 {\n\t\tthreshold = 10240\n\t}\n\tp := &ProgressReader{r: r, c: [1]byte{c}, threshold: threshold, curr: 0}\n\treturn p\n}\n\nfunc (p *ProgressReader) Read(b []byte) (int, error) {\n\tn, err := p.r.Read(b)\n\n\tp.curr += n\n\n\tif err == io.EOF {\n\t\tos.Stdout.Write([]byte{'\\n'})\n\t\tos.Stdout.Sync()\n\t} else if p.curr >= p.threshold {\n\t\tp.curr -= p.threshold\n\n\t\tos.Stdout.Write(p.c[:])\n\t\tos.Stdout.Sync()\n\t}\n\n\treturn n, err\n}\n\nfunc (p *ProgressReader) Close() error {\n\tif rc, ok := p.r.(io.ReadCloser); ok {\n\t\treturn rc.Close()\n\t} else {\n\t\treturn nil\n\t}\n}\n\n\/\/ A Logger that can be disabled\n\ntype DebugLog bool\n\nfunc (d DebugLog) Println(args ...interface{}) {\n\tif d {\n\t\tlog.Println(args...)\n\t}\n}\n\nfunc (d DebugLog) Printf(fmt string, args ...interface{}) {\n\tif d {\n\t\tlog.Printf(fmt, args...)\n\t}\n}\n\n\/\/ A ClientTrace implementation that collects request time\n\ntype RequestTrace struct {\n\tDNS time.Duration\n\tConnect time.Duration\n\tConnected bool\n\tTLSHandshake time.Duration\n\tRequest time.Duration\n\tWait time.Duration\n\tResponse time.Duration\n\n\tstartTime time.Time\n}\n\nfunc (r *RequestTrace) Reset() {\n\tr.DNS = 0\n\tr.Connect = 0\n\tr.Connected = false\n\tr.TLSHandshake = 0\n\tr.Request = 0\n\tr.Wait = 0\n\tr.Response = 0\n}\n\nfunc (r *RequestTrace) NewClientTrace(trace bool) *httptrace.ClientTrace {\n\treturn &httptrace.ClientTrace{\n\t\tGotConn: func(info httptrace.GotConnInfo) {\n\t\t\tr.Connected = info.WasIdle\n\n\t\t\tif trace {\n\t\t\t\tlog.Println(\"GotConn\", info.Conn.RemoteAddr(), \"reused:\", info.Reused, \"wasIdle:\", info.WasIdle)\n\t\t\t}\n\t\t},\n\n\t\tConnectStart: func(network, addr string) {\n\t\t\tr.startTime = time.Now()\n\n\t\t\tif trace {\n\t\t\t\tlog.Println(\"ConnectStart\", network, addr)\n\t\t\t}\n\t\t},\n\n\t\tConnectDone: func(network, addr string, err error) {\n\t\t\tr.Connect = time.Since(r.startTime)\n\t\t\tr.startTime = time.Time{}\n\n\t\t\tif trace {\n\t\t\t\tlog.Println(\"ConnectDone\", network, addr, err)\n\t\t\t}\n\t\t},\n\n\t\tDNSStart: func(info httptrace.DNSStartInfo) {\n\t\t\tr.startTime = time.Now()\n\n\t\t\tif trace {\n\t\t\t\tlog.Println(\"DNSStart\", info.Host)\n\t\t\t}\n\t\t},\n\n\t\tDNSDone: func(info httptrace.DNSDoneInfo) {\n\t\t\tr.DNS = time.Since(r.startTime)\n\t\t\tr.startTime = time.Time{}\n\n\t\t\tif trace {\n\t\t\t\tlog.Println(\"DNSDone\", info.Addrs)\n\t\t\t}\n\t\t},\n\n\t\tTLSHandshakeStart: func() {\n\t\t\tr.startTime = time.Now()\n\n\t\t\tif trace {\n\t\t\t\tlog.Println(\"TLSHandshakeStart\")\n\t\t\t}\n\t\t},\n\n\t\tTLSHandshakeDone: func(state tls.ConnectionState, err error) {\n\t\t\tr.TLSHandshake = time.Since(r.startTime)\n\t\t\tr.startTime = time.Time{}\n\n\t\t\tif trace {\n\t\t\t\tlog.Println(\"TLSHandshakeDone\", err)\n\t\t\t}\n\t\t},\n\n\t\tWroteHeaderField: func(string, []string) {\n\t\t\tif r.startTime.IsZero() {\n\t\t\t\tr.startTime = time.Now()\n\n\t\t\t\tif trace {\n\t\t\t\t\tlog.Println(\"WroteHeader\")\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\n\t\tWroteRequest: func(info httptrace.WroteRequestInfo) {\n\t\t\tr.Request = time.Since(r.startTime)\n\t\t\tr.startTime = time.Now()\n\n\t\t\tif trace {\n\t\t\t\tlog.Println(\"WroteRequest\")\n\t\t\t}\n\t\t},\n\n\t\tGotFirstResponseByte: func() {\n\t\t\tr.Wait = time.Since(r.startTime)\n\t\t\tr.startTime = time.Now()\n\n\t\t\tif trace {\n\t\t\t\tlog.Println(\"GotFirstResponseByte\")\n\t\t\t}\n\t\t},\n\t}\n}\n\n\/\/ Call this after the response has been received to set the Response duration\n\/\/ (there is no callback for this)\n\nfunc (r *RequestTrace) Done() {\n\tr.Response = time.Since(r.startTime)\n\tr.startTime = time.Now()\n}\n<|endoftext|>"} {"text":"<commit_before>package jsoniter\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"unicode\"\n\t\"unsafe\"\n)\n\nvar typeDecoders = map[string]ValDecoder{}\nvar fieldDecoders = map[string]ValDecoder{}\nvar typeEncoders = map[string]ValEncoder{}\nvar fieldEncoders = map[string]ValEncoder{}\nvar extensions = []Extension{}\n\ntype StructDescriptor struct {\n\tType reflect.Type\n\tFields []*Binding\n}\n\nfunc (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding {\n\tfor _, binding := range structDescriptor.Fields {\n\t\tif binding.Field.Name == fieldName {\n\t\t\treturn binding\n\t\t}\n\t}\n\treturn nil\n}\n\ntype Binding struct {\n\tField *reflect.StructField\n\tFromNames []string\n\tToNames []string\n\tEncoder ValEncoder\n\tDecoder ValDecoder\n}\n\ntype Extension interface {\n\tUpdateStructDescriptor(structDescriptor *StructDescriptor)\n\tCreateDecoder(typ reflect.Type) ValDecoder\n\tCreateEncoder(typ reflect.Type) ValEncoder\n\tDecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder\n\tDecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder\n}\n\ntype DummyExtension struct {\n}\n\nfunc (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {\n}\n\nfunc (extension *DummyExtension) CreateDecoder(typ reflect.Type) ValDecoder {\n\treturn nil\n}\n\nfunc (extension *DummyExtension) CreateEncoder(typ reflect.Type) ValEncoder {\n\treturn nil\n}\n\nfunc (extension *DummyExtension) DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder {\n\treturn decoder\n}\n\nfunc (extension *DummyExtension) DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder {\n\treturn encoder\n}\n\ntype funcDecoder struct {\n\tfun DecoderFunc\n}\n\nfunc (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {\n\tdecoder.fun(ptr, iter)\n}\n\ntype funcEncoder struct {\n\tfun EncoderFunc\n\tisEmptyFunc func(ptr unsafe.Pointer) bool\n}\n\nfunc (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {\n\tencoder.fun(ptr, stream)\n}\n\nfunc (encoder *funcEncoder) EncodeInterface(val interface{}, stream *Stream) {\n\tWriteToStream(val, stream, encoder)\n}\n\nfunc (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool {\n\tif encoder.isEmptyFunc == nil {\n\t\treturn false\n\t}\n\treturn encoder.isEmptyFunc(ptr)\n}\n\nfunc RegisterTypeDecoderFunc(typ string, fun DecoderFunc) {\n\ttypeDecoders[typ] = &funcDecoder{fun}\n}\n\nfunc RegisterTypeDecoder(typ string, decoder ValDecoder) {\n\ttypeDecoders[typ] = decoder\n}\n\nfunc RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) {\n\tRegisterFieldDecoder(typ, field, &funcDecoder{fun})\n}\n\nfunc RegisterFieldDecoder(typ string, field string, decoder ValDecoder) {\n\tfieldDecoders[fmt.Sprintf(\"%s\/%s\", typ, field)] = decoder\n}\n\nfunc RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {\n\ttypeEncoders[typ] = &funcEncoder{fun, isEmptyFunc}\n}\n\nfunc RegisterTypeEncoder(typ string, encoder ValEncoder) {\n\ttypeEncoders[typ] = encoder\n}\n\nfunc RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {\n\tRegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc})\n}\n\nfunc RegisterFieldEncoder(typ string, field string, encoder ValEncoder) {\n\tfieldEncoders[fmt.Sprintf(\"%s\/%s\", typ, field)] = encoder\n}\n\nfunc RegisterExtension(extension Extension) {\n\textensions = append(extensions, extension)\n}\n\nfunc getTypeDecoderFromExtension(typ reflect.Type) ValDecoder {\n\tdecoder := _getTypeDecoderFromExtension(typ)\n\tif decoder != nil {\n\t\tfor _, extension := range extensions {\n\t\t\tdecoder = extension.DecorateDecoder(typ, decoder)\n\t\t}\n\t}\n\treturn decoder\n}\nfunc _getTypeDecoderFromExtension(typ reflect.Type) ValDecoder {\n\tfor _, extension := range extensions {\n\t\tdecoder := extension.CreateDecoder(typ)\n\t\tif decoder != nil {\n\t\t\treturn decoder\n\t\t}\n\t}\n\ttypeName := typ.String()\n\tdecoder := typeDecoders[typeName]\n\tif decoder != nil {\n\t\treturn decoder\n\t}\n\tif typ.Kind() == reflect.Ptr {\n\t\tdecoder := typeDecoders[typ.Elem().String()]\n\t\tif decoder != nil {\n\t\t\treturn &optionalDecoder{typ.Elem(), decoder}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc getTypeEncoderFromExtension(typ reflect.Type) ValEncoder {\n\tencoder := _getTypeEncoderFromExtension(typ)\n\tif encoder != nil {\n\t\tfor _, extension := range extensions {\n\t\t\tencoder = extension.DecorateEncoder(typ, encoder)\n\t\t}\n\t}\n\treturn encoder\n}\n\nfunc _getTypeEncoderFromExtension(typ reflect.Type) ValEncoder {\n\tfor _, extension := range extensions {\n\t\tencoder := extension.CreateEncoder(typ)\n\t\tif encoder != nil {\n\t\t\treturn encoder\n\t\t}\n\t}\n\ttypeName := typ.String()\n\tencoder := typeEncoders[typeName]\n\tif encoder != nil {\n\t\treturn encoder\n\t}\n\tif typ.Kind() == reflect.Ptr {\n\t\tencoder := typeEncoders[typ.Elem().String()]\n\t\tif encoder != nil {\n\t\t\treturn &optionalEncoder{encoder}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc describeStruct(cfg *frozenConfig, typ reflect.Type) (*StructDescriptor, error) {\n\theadAnonymousBindings := []*Binding{}\n\ttailAnonymousBindings := []*Binding{}\n\tbindings := []*Binding{}\n\tfor i := 0; i < typ.NumField(); i++ {\n\t\tfield := typ.Field(i)\n\t\tif field.Anonymous {\n\t\t\tif field.Type.Kind() == reflect.Struct {\n\t\t\t\tstructDescriptor, err := describeStruct(cfg, field.Type)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tfor _, binding := range structDescriptor.Fields {\n\t\t\t\t\tbinding.Encoder = &structFieldEncoder{&field, binding.Encoder, false}\n\t\t\t\t\tbinding.Decoder = &structFieldDecoder{&field, binding.Decoder}\n\t\t\t\t\tif field.Offset == 0 {\n\t\t\t\t\t\theadAnonymousBindings = append(headAnonymousBindings, binding)\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttailAnonymousBindings = append(tailAnonymousBindings, binding)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if field.Type.Kind() == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct {\n\t\t\t\tstructDescriptor, err := describeStruct(cfg, field.Type.Elem())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tfor _, binding := range structDescriptor.Fields {\n\t\t\t\t\tbinding.Encoder = &optionalEncoder{binding.Encoder}\n\t\t\t\t\tbinding.Encoder = &structFieldEncoder{&field, binding.Encoder, false}\n\t\t\t\t\tbinding.Decoder = &optionalDecoder{field.Type, binding.Decoder}\n\t\t\t\t\tbinding.Decoder = &structFieldDecoder{&field, binding.Decoder}\n\t\t\t\t\tif field.Offset == 0 {\n\t\t\t\t\t\theadAnonymousBindings = append(headAnonymousBindings, binding)\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttailAnonymousBindings = append(tailAnonymousBindings, binding)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\ttagParts := strings.Split(field.Tag.Get(\"json\"), \",\")\n\t\t\tfieldNames := calcFieldNames(field.Name, tagParts[0], string(field.Tag))\n\t\t\tfieldCacheKey := fmt.Sprintf(\"%s\/%s\", typ.String(), field.Name)\n\t\t\tdecoder := fieldDecoders[fieldCacheKey]\n\t\t\tif decoder == nil {\n\t\t\t\tvar err error\n\t\t\t\tdecoder, err = decoderOfType(cfg, field.Type)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tencoder := fieldEncoders[fieldCacheKey]\n\t\t\tif encoder == nil {\n\t\t\t\tvar err error\n\t\t\t\tencoder, err = encoderOfType(cfg, field.Type)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\t\/\/ map is stored as pointer in the struct\n\t\t\t\tif field.Type.Kind() == reflect.Map {\n\t\t\t\t\tencoder = &optionalEncoder{encoder}\n\t\t\t\t}\n\t\t\t}\n\t\t\tbinding := &Binding{\n\t\t\t\tField: &field,\n\t\t\t\tFromNames: fieldNames,\n\t\t\t\tToNames: fieldNames,\n\t\t\t\tDecoder: decoder,\n\t\t\t\tEncoder: encoder,\n\t\t\t}\n\t\t\tbindings = append(bindings, binding)\n\t\t}\n\t}\n\tstructDescriptor := &StructDescriptor{\n\t\tType: typ,\n\t\tFields: bindings,\n\t}\n\tfor _, extension := range extensions {\n\t\textension.UpdateStructDescriptor(structDescriptor)\n\t}\n\tfor _, binding := range structDescriptor.Fields {\n\t\tshouldOmitEmpty := false\n\t\ttagParts := strings.Split(binding.Field.Tag.Get(\"json\"), \",\")\n\t\tfor _, tagPart := range tagParts[1:] {\n\t\t\tif tagPart == \"omitempty\" {\n\t\t\t\tshouldOmitEmpty = true\n\t\t\t} else if tagPart == \"string\" {\n\t\t\t\tif binding.Field.Type.Kind() == reflect.String {\n\t\t\t\t\tbinding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg}\n\t\t\t\t\tbinding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg}\n\t\t\t\t} else {\n\t\t\t\t\tbinding.Decoder = &stringModeNumberDecoder{binding.Decoder}\n\t\t\t\t\tbinding.Encoder = &stringModeNumberEncoder{binding.Encoder}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tbinding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder}\n\t\tbinding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty}\n\t}\n\t\/\/ insert anonymous bindings to the head\n\tstructDescriptor.Fields = append(headAnonymousBindings, structDescriptor.Fields...)\n\tstructDescriptor.Fields = append(structDescriptor.Fields, tailAnonymousBindings...)\n\treturn structDescriptor, nil\n}\n\nfunc calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string {\n\t\/\/ ignore?\n\tif wholeTag == \"-\" {\n\t\treturn []string{}\n\t}\n\t\/\/ rename?\n\tvar fieldNames []string\n\tif tagProvidedFieldName == \"\" {\n\t\tfieldNames = []string{originalFieldName}\n\t} else {\n\t\tfieldNames = []string{tagProvidedFieldName}\n\t}\n\t\/\/ private?\n\tisNotExported := unicode.IsLower(rune(originalFieldName[0]))\n\tif isNotExported {\n\t\tfieldNames = []string{}\n\t}\n\treturn fieldNames\n}\n<commit_msg>#80 fix embedded builtins<commit_after>package jsoniter\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"unicode\"\n\t\"unsafe\"\n)\n\nvar typeDecoders = map[string]ValDecoder{}\nvar fieldDecoders = map[string]ValDecoder{}\nvar typeEncoders = map[string]ValEncoder{}\nvar fieldEncoders = map[string]ValEncoder{}\nvar extensions = []Extension{}\n\ntype StructDescriptor struct {\n\tType reflect.Type\n\tFields []*Binding\n}\n\nfunc (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding {\n\tfor _, binding := range structDescriptor.Fields {\n\t\tif binding.Field.Name == fieldName {\n\t\t\treturn binding\n\t\t}\n\t}\n\treturn nil\n}\n\ntype Binding struct {\n\tField *reflect.StructField\n\tFromNames []string\n\tToNames []string\n\tEncoder ValEncoder\n\tDecoder ValDecoder\n}\n\ntype Extension interface {\n\tUpdateStructDescriptor(structDescriptor *StructDescriptor)\n\tCreateDecoder(typ reflect.Type) ValDecoder\n\tCreateEncoder(typ reflect.Type) ValEncoder\n\tDecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder\n\tDecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder\n}\n\ntype DummyExtension struct {\n}\n\nfunc (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {\n}\n\nfunc (extension *DummyExtension) CreateDecoder(typ reflect.Type) ValDecoder {\n\treturn nil\n}\n\nfunc (extension *DummyExtension) CreateEncoder(typ reflect.Type) ValEncoder {\n\treturn nil\n}\n\nfunc (extension *DummyExtension) DecorateDecoder(typ reflect.Type, decoder ValDecoder) ValDecoder {\n\treturn decoder\n}\n\nfunc (extension *DummyExtension) DecorateEncoder(typ reflect.Type, encoder ValEncoder) ValEncoder {\n\treturn encoder\n}\n\ntype funcDecoder struct {\n\tfun DecoderFunc\n}\n\nfunc (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {\n\tdecoder.fun(ptr, iter)\n}\n\ntype funcEncoder struct {\n\tfun EncoderFunc\n\tisEmptyFunc func(ptr unsafe.Pointer) bool\n}\n\nfunc (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {\n\tencoder.fun(ptr, stream)\n}\n\nfunc (encoder *funcEncoder) EncodeInterface(val interface{}, stream *Stream) {\n\tWriteToStream(val, stream, encoder)\n}\n\nfunc (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool {\n\tif encoder.isEmptyFunc == nil {\n\t\treturn false\n\t}\n\treturn encoder.isEmptyFunc(ptr)\n}\n\nfunc RegisterTypeDecoderFunc(typ string, fun DecoderFunc) {\n\ttypeDecoders[typ] = &funcDecoder{fun}\n}\n\nfunc RegisterTypeDecoder(typ string, decoder ValDecoder) {\n\ttypeDecoders[typ] = decoder\n}\n\nfunc RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) {\n\tRegisterFieldDecoder(typ, field, &funcDecoder{fun})\n}\n\nfunc RegisterFieldDecoder(typ string, field string, decoder ValDecoder) {\n\tfieldDecoders[fmt.Sprintf(\"%s\/%s\", typ, field)] = decoder\n}\n\nfunc RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {\n\ttypeEncoders[typ] = &funcEncoder{fun, isEmptyFunc}\n}\n\nfunc RegisterTypeEncoder(typ string, encoder ValEncoder) {\n\ttypeEncoders[typ] = encoder\n}\n\nfunc RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {\n\tRegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc})\n}\n\nfunc RegisterFieldEncoder(typ string, field string, encoder ValEncoder) {\n\tfieldEncoders[fmt.Sprintf(\"%s\/%s\", typ, field)] = encoder\n}\n\nfunc RegisterExtension(extension Extension) {\n\textensions = append(extensions, extension)\n}\n\nfunc getTypeDecoderFromExtension(typ reflect.Type) ValDecoder {\n\tdecoder := _getTypeDecoderFromExtension(typ)\n\tif decoder != nil {\n\t\tfor _, extension := range extensions {\n\t\t\tdecoder = extension.DecorateDecoder(typ, decoder)\n\t\t}\n\t}\n\treturn decoder\n}\nfunc _getTypeDecoderFromExtension(typ reflect.Type) ValDecoder {\n\tfor _, extension := range extensions {\n\t\tdecoder := extension.CreateDecoder(typ)\n\t\tif decoder != nil {\n\t\t\treturn decoder\n\t\t}\n\t}\n\ttypeName := typ.String()\n\tdecoder := typeDecoders[typeName]\n\tif decoder != nil {\n\t\treturn decoder\n\t}\n\tif typ.Kind() == reflect.Ptr {\n\t\tdecoder := typeDecoders[typ.Elem().String()]\n\t\tif decoder != nil {\n\t\t\treturn &optionalDecoder{typ.Elem(), decoder}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc getTypeEncoderFromExtension(typ reflect.Type) ValEncoder {\n\tencoder := _getTypeEncoderFromExtension(typ)\n\tif encoder != nil {\n\t\tfor _, extension := range extensions {\n\t\t\tencoder = extension.DecorateEncoder(typ, encoder)\n\t\t}\n\t}\n\treturn encoder\n}\n\nfunc _getTypeEncoderFromExtension(typ reflect.Type) ValEncoder {\n\tfor _, extension := range extensions {\n\t\tencoder := extension.CreateEncoder(typ)\n\t\tif encoder != nil {\n\t\t\treturn encoder\n\t\t}\n\t}\n\ttypeName := typ.String()\n\tencoder := typeEncoders[typeName]\n\tif encoder != nil {\n\t\treturn encoder\n\t}\n\tif typ.Kind() == reflect.Ptr {\n\t\tencoder := typeEncoders[typ.Elem().String()]\n\t\tif encoder != nil {\n\t\t\treturn &optionalEncoder{encoder}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc describeStruct(cfg *frozenConfig, typ reflect.Type) (*StructDescriptor, error) {\n\theadAnonymousBindings := []*Binding{}\n\ttailAnonymousBindings := []*Binding{}\n\tbindings := []*Binding{}\n\tfor i := 0; i < typ.NumField(); i++ {\n\t\tfield := typ.Field(i)\n\t\tif field.Anonymous {\n\t\t\tif field.Type.Kind() == reflect.Struct {\n\t\t\t\tstructDescriptor, err := describeStruct(cfg, field.Type)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tfor _, binding := range structDescriptor.Fields {\n\t\t\t\t\tbinding.Encoder = &structFieldEncoder{&field, binding.Encoder, false}\n\t\t\t\t\tbinding.Decoder = &structFieldDecoder{&field, binding.Decoder}\n\t\t\t\t\tif field.Offset == 0 {\n\t\t\t\t\t\theadAnonymousBindings = append(headAnonymousBindings, binding)\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttailAnonymousBindings = append(tailAnonymousBindings, binding)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t} else if field.Type.Kind() == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct {\n\t\t\t\tstructDescriptor, err := describeStruct(cfg, field.Type.Elem())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tfor _, binding := range structDescriptor.Fields {\n\t\t\t\t\tbinding.Encoder = &optionalEncoder{binding.Encoder}\n\t\t\t\t\tbinding.Encoder = &structFieldEncoder{&field, binding.Encoder, false}\n\t\t\t\t\tbinding.Decoder = &optionalDecoder{field.Type, binding.Decoder}\n\t\t\t\t\tbinding.Decoder = &structFieldDecoder{&field, binding.Decoder}\n\t\t\t\t\tif field.Offset == 0 {\n\t\t\t\t\t\theadAnonymousBindings = append(headAnonymousBindings, binding)\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttailAnonymousBindings = append(tailAnonymousBindings, binding)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\ttagParts := strings.Split(field.Tag.Get(\"json\"), \",\")\n\t\tfieldNames := calcFieldNames(field.Name, tagParts[0], string(field.Tag))\n\t\tfieldCacheKey := fmt.Sprintf(\"%s\/%s\", typ.String(), field.Name)\n\t\tdecoder := fieldDecoders[fieldCacheKey]\n\t\tif decoder == nil {\n\t\t\tvar err error\n\t\t\tdecoder, err = decoderOfType(cfg, field.Type)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tencoder := fieldEncoders[fieldCacheKey]\n\t\tif encoder == nil {\n\t\t\tvar err error\n\t\t\tencoder, err = encoderOfType(cfg, field.Type)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t\/\/ map is stored as pointer in the struct\n\t\t\tif field.Type.Kind() == reflect.Map {\n\t\t\t\tencoder = &optionalEncoder{encoder}\n\t\t\t}\n\t\t}\n\t\tbinding := &Binding{\n\t\t\tField: &field,\n\t\t\tFromNames: fieldNames,\n\t\t\tToNames: fieldNames,\n\t\t\tDecoder: decoder,\n\t\t\tEncoder: encoder,\n\t\t}\n\t\tbindings = append(bindings, binding)\n\t}\n\treturn createStructDescriptor(cfg, typ, bindings, headAnonymousBindings, tailAnonymousBindings), nil\n}\nfunc createStructDescriptor(cfg *frozenConfig, typ reflect.Type, bindings []*Binding, headAnonymousBindings []*Binding, tailAnonymousBindings []*Binding) *StructDescriptor {\n\tstructDescriptor := &StructDescriptor{\n\t\tType: typ,\n\t\tFields: bindings,\n\t}\n\tfor _, extension := range extensions {\n\t\textension.UpdateStructDescriptor(structDescriptor)\n\t}\n\tprocessTags(structDescriptor, cfg)\n\t\/\/ insert anonymous bindings to the head\n\tstructDescriptor.Fields = append(headAnonymousBindings, structDescriptor.Fields...)\n\tstructDescriptor.Fields = append(structDescriptor.Fields, tailAnonymousBindings...)\n\treturn structDescriptor\n}\n\nfunc processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) {\n\tfor _, binding := range structDescriptor.Fields {\n\t\tshouldOmitEmpty := false\n\t\ttagParts := strings.Split(binding.Field.Tag.Get(\"json\"), \",\")\n\t\tfor _, tagPart := range tagParts[1:] {\n\t\t\tif tagPart == \"omitempty\" {\n\t\t\t\tshouldOmitEmpty = true\n\t\t\t} else if tagPart == \"string\" {\n\t\t\t\tif binding.Field.Type.Kind() == reflect.String {\n\t\t\t\t\tbinding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg}\n\t\t\t\t\tbinding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg}\n\t\t\t\t} else {\n\t\t\t\t\tbinding.Decoder = &stringModeNumberDecoder{binding.Decoder}\n\t\t\t\t\tbinding.Encoder = &stringModeNumberEncoder{binding.Encoder}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tbinding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder}\n\t\tbinding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty}\n\t}\n}\n\nfunc calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string {\n\t\/\/ ignore?\n\tif wholeTag == \"-\" {\n\t\treturn []string{}\n\t}\n\t\/\/ rename?\n\tvar fieldNames []string\n\tif tagProvidedFieldName == \"\" {\n\t\tfieldNames = []string{originalFieldName}\n\t} else {\n\t\tfieldNames = []string{tagProvidedFieldName}\n\t}\n\t\/\/ private?\n\tisNotExported := unicode.IsLower(rune(originalFieldName[0]))\n\tif isNotExported {\n\t\tfieldNames = []string{}\n\t}\n\treturn fieldNames\n}\n<|endoftext|>"} {"text":"<commit_before>package logger\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\trouter \"gopkg.in\/Clever\/kayvee-go.v5\/router\"\n)\n\nfunc TestMockLoggerImplementsKayveeLogger(t *testing.T) {\n\tassert.Implements(t, (*KayveeLogger)(nil), &MockRouteCountLogger{}, \"*MockRouteCountLogger should implement KayveeLogger\")\n}\n\nfunc TestRouteCountsWithMockLogger(t *testing.T) {\n\troutes := map[string](router.Rule){\n\t\t\"rule-one\": router.Rule{\n\t\t\tMatchers: router.RuleMatchers{\n\t\t\t\t\"foo\": []string{\"bar\", \"baz\"},\n\t\t\t},\n\t\t\tOutput: router.RuleOutput{\n\t\t\t\t\"out\": \"#-%{foo}-\",\n\t\t\t},\n\t\t},\n\t\t\"rule-two\": router.Rule{\n\t\t\tMatchers: router.RuleMatchers{\n\t\t\t\t\"abc\": []string{\"def\"},\n\t\t\t},\n\t\t\tOutput: router.RuleOutput{\n\t\t\t\t\"more\": \"x\",\n\t\t\t},\n\t\t},\n\t}\n\ttestRouter := router.NewFromRoutes(routes)\n\n\tmockLogger := NewMockCountLogger(\"testing\")\n\tmockLogger.logger.logRouter = testRouter\n\n\tdata0 := map[string]interface{}{\n\t\t\"wrong\": \"stuff\",\n\t}\n\texpected0 := map[string]int{}\n\tmockLogger.InfoD(\"log0\", data0)\n\tactual0 := mockLogger.RuleCounts()\n\tassert.Equal(t, expected0, actual0)\n\n\tdata1 := map[string]interface{}{\n\t\t\"foo\": \"bar\",\n\t}\n\texpected1 := map[string]int{\n\t\t\"rule-one\": 1,\n\t}\n\tmockLogger.InfoD(\"log1\", data1)\n\tactual1 := mockLogger.RuleCounts()\n\tassert.Equal(t, expected1, actual1)\n\n\tdata2 := map[string]interface{}{\n\t\t\"foo\": \"bar\",\n\t\t\"abc\": \"def\",\n\t}\n\texpected2 := map[string]int{\n\t\t\"rule-one\": 2,\n\t\t\"rule-two\": 1,\n\t}\n\tmockLogger.InfoD(\"log2\", data2)\n\tactual2 := mockLogger.RuleCounts()\n\tassert.Equal(t, expected2, actual2)\n}\n<commit_msg>Updated unit test to work after rebase<commit_after>package logger\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\trouter \"gopkg.in\/Clever\/kayvee-go.v5\/router\"\n)\n\nfunc TestMockLoggerImplementsKayveeLogger(t *testing.T) {\n\tassert.Implements(t, (*KayveeLogger)(nil), &MockRouteCountLogger{}, \"*MockRouteCountLogger should implement KayveeLogger\")\n}\n\nfunc TestRouteCountsWithMockLogger(t *testing.T) {\n\troutes := map[string](router.Rule){\n\t\t\"rule-one\": router.Rule{\n\t\t\tMatchers: router.RuleMatchers{\n\t\t\t\t\"foo\": []string{\"bar\", \"baz\"},\n\t\t\t},\n\t\t\tOutput: router.RuleOutput{\n\t\t\t\t\"out\": \"#-%{foo}-\",\n\t\t\t},\n\t\t},\n\t\t\"rule-two\": router.Rule{\n\t\t\tMatchers: router.RuleMatchers{\n\t\t\t\t\"abc\": []string{\"def\"},\n\t\t\t},\n\t\t\tOutput: router.RuleOutput{\n\t\t\t\t\"more\": \"x\",\n\t\t\t},\n\t\t},\n\t}\n\ttestRouter, err := router.NewFromRoutes(routes)\n\tassert.NoError(t, err)\n\n\tmockLogger := NewMockCountLogger(\"testing\")\n\tmockLogger.logger.logRouter = testRouter\n\n\tdata0 := map[string]interface{}{\n\t\t\"wrong\": \"stuff\",\n\t}\n\texpected0 := map[string]int{}\n\tmockLogger.InfoD(\"log0\", data0)\n\tactual0 := mockLogger.RuleCounts()\n\tassert.Equal(t, expected0, actual0)\n\n\tdata1 := map[string]interface{}{\n\t\t\"foo\": \"bar\",\n\t}\n\texpected1 := map[string]int{\n\t\t\"rule-one\": 1,\n\t}\n\tmockLogger.InfoD(\"log1\", data1)\n\tactual1 := mockLogger.RuleCounts()\n\tassert.Equal(t, expected1, actual1)\n\n\tdata2 := map[string]interface{}{\n\t\t\"foo\": \"bar\",\n\t\t\"abc\": \"def\",\n\t}\n\texpected2 := map[string]int{\n\t\t\"rule-one\": 2,\n\t\t\"rule-two\": 1,\n\t}\n\tmockLogger.InfoD(\"log2\", data2)\n\tactual2 := mockLogger.RuleCounts()\n\tassert.Equal(t, expected2, actual2)\n}\n<|endoftext|>"} {"text":"<commit_before>package adapter\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/gliderlabs\/logspout\/router\"\n)\n\nconst (\n\tlogglyAddr = \"https:\/\/logs-01.loggly.com\"\n\tlogglyEventEndpoint = \"\/bulk\"\n\tlogglyTagsHeader = \"X-LOGGLY-TAG\"\n\tflushTimeout = 10 * time.Second\n)\n\n\/\/ Adapter satisfies the router.LogAdapter interface by providing Stream which\n\/\/ passes all messages to loggly.\ntype Adapter struct {\n\ttoken string\n\tclient *http.Client\n\ttags string\n\tlog *log.Logger\n\tqueue chan logglyMessage\n\tm sync.Mutex\n\tbufferSize int\n}\n\n\/\/ New returns an Adapter that receives messages from logspout. Additionally,\n\/\/ it launches a goroutine to buffer and flush messages to loggly.\nfunc New(logglyToken, tags string, bufferSize int) *Adapter {\n\n\tadapter := &Adapter{\n\t\tclient: http.DefaultClient,\n\t\tbufferSize: bufferSize,\n\t\tlog: log.New(os.Stdout, \"logspout-loggly\", log.LstdFlags),\n\t\tqueue: make(chan logglyMessage),\n\t\ttoken: logglyToken,\n\t\ttags: tags,\n\t}\n\n\tgo adapter.readQueue()\n\n\treturn adapter\n}\n\n\/\/ Stream satisfies the router.LogAdapter interface and passes all messages to\n\/\/ Loggly\nfunc (l *Adapter) Stream(logstream chan *router.Message) {\n\tfor m := range logstream {\n\t\tl.queue <- logglyMessage{\n\t\t\tMessage: m.Data,\n\t\t\tContainerName: m.Container.Name,\n\t\t\tContainerID: m.Container.ID,\n\t\t\tContainerImage: m.Container.Config.Image,\n\t\t\tContainerHostname: m.Container.Config.Hostname,\n\t\t}\n\t}\n}\n\nfunc (l *Adapter) readQueue() {\n\tbuffer := l.newBuffer()\n\n\ttimeout := time.NewTimer(flushTimeout)\n\n\tfor {\n\t\tselect {\n\t\tcase msg := <-l.queue:\n\t\t\tif len(buffer) == cap(buffer) {\n\t\t\t\ttimeout.Stop()\n\t\t\t\tl.flushBuffer(buffer)\n\t\t\t\tbuffer = l.newBuffer()\n\t\t\t\ttimeout.Reset(flushTimeout)\n\t\t\t}\n\n\t\t\tbuffer = append(buffer, msg)\n\n\t\tcase <-timeout.C:\n\t\t\tif len(buffer) > 0 {\n\t\t\t\tl.flushBuffer(buffer)\n\t\t\t\tbuffer = l.newBuffer()\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (l *Adapter) newBuffer() []logglyMessage {\n\treturn make([]logglyMessage, 0, l.bufferSize)\n}\n\nfunc (l *Adapter) flushBuffer(buffer []logglyMessage) {\n\tvar data bytes.Buffer\n\n\tfor _, msg := range buffer {\n\t\tj, _ := json.Marshal(msg)\n\t\tdata.Write(j)\n\t\tdata.WriteString(\"\\n\")\n\t}\n\n\treq, _ := http.NewRequest(\n\t\t\"POST\",\n\t\tfmt.Sprintf(\"%s%s\/%s\", logglyAddr, logglyEventEndpoint, l.token),\n\t\t&data,\n\t)\n\n\tgo l.sendRequestToLoggly(req)\n}\n\nfunc (l *Adapter) sendRequestToLoggly(req *http.Request) {\n\tl.m.Lock()\n\tdefer l.m.Unlock()\n\tresp, err := l.client.Do(req)\n\tdefer resp.Body.Close()\n\n\tif err != nil {\n\t\tl.log.Println(\n\t\t\tfmt.Errorf(\n\t\t\t\t\"error from client: %s\",\n\t\t\t\terr.Error(),\n\t\t\t),\n\t\t)\n\t\treturn\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tl.log.Println(\n\t\t\tfmt.Errorf(\n\t\t\t\t\"received a non 200 status code when sending message to loggly: %s\",\n\t\t\t\terr.Error(),\n\t\t\t),\n\t\t)\n\t}\n}\n<commit_msg>remove mutex protecting http.Client<commit_after>package adapter\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/gliderlabs\/logspout\/router\"\n)\n\nconst (\n\tlogglyAddr = \"https:\/\/logs-01.loggly.com\"\n\tlogglyEventEndpoint = \"\/bulk\"\n\tlogglyTagsHeader = \"X-LOGGLY-TAG\"\n\tflushTimeout = 10 * time.Second\n)\n\n\/\/ Adapter satisfies the router.LogAdapter interface by providing Stream which\n\/\/ passes all messages to loggly.\ntype Adapter struct {\n\ttoken string\n\tclient *http.Client\n\ttags string\n\tlog *log.Logger\n\tqueue chan logglyMessage\n\tbufferSize int\n}\n\n\/\/ New returns an Adapter that receives messages from logspout. Additionally,\n\/\/ it launches a goroutine to buffer and flush messages to loggly.\nfunc New(logglyToken, tags string, bufferSize int) *Adapter {\n\n\tadapter := &Adapter{\n\t\tclient: http.DefaultClient,\n\t\tbufferSize: bufferSize,\n\t\tlog: log.New(os.Stdout, \"logspout-loggly\", log.LstdFlags),\n\t\tqueue: make(chan logglyMessage),\n\t\ttoken: logglyToken,\n\t\ttags: tags,\n\t}\n\n\tgo adapter.readQueue()\n\n\treturn adapter\n}\n\n\/\/ Stream satisfies the router.LogAdapter interface and passes all messages to\n\/\/ Loggly\nfunc (l *Adapter) Stream(logstream chan *router.Message) {\n\tfor m := range logstream {\n\t\tl.queue <- logglyMessage{\n\t\t\tMessage: m.Data,\n\t\t\tContainerName: m.Container.Name,\n\t\t\tContainerID: m.Container.ID,\n\t\t\tContainerImage: m.Container.Config.Image,\n\t\t\tContainerHostname: m.Container.Config.Hostname,\n\t\t}\n\t}\n}\n\nfunc (l *Adapter) readQueue() {\n\tbuffer := l.newBuffer()\n\n\ttimeout := time.NewTimer(flushTimeout)\n\n\tfor {\n\t\tselect {\n\t\tcase msg := <-l.queue:\n\t\t\tif len(buffer) == cap(buffer) {\n\t\t\t\ttimeout.Stop()\n\t\t\t\tl.flushBuffer(buffer)\n\t\t\t\tbuffer = l.newBuffer()\n\t\t\t\ttimeout.Reset(flushTimeout)\n\t\t\t}\n\n\t\t\tbuffer = append(buffer, msg)\n\n\t\tcase <-timeout.C:\n\t\t\tif len(buffer) > 0 {\n\t\t\t\tl.flushBuffer(buffer)\n\t\t\t\tbuffer = l.newBuffer()\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (l *Adapter) newBuffer() []logglyMessage {\n\treturn make([]logglyMessage, 0, l.bufferSize)\n}\n\nfunc (l *Adapter) flushBuffer(buffer []logglyMessage) {\n\tvar data bytes.Buffer\n\n\tfor _, msg := range buffer {\n\t\tj, _ := json.Marshal(msg)\n\t\tdata.Write(j)\n\t\tdata.WriteString(\"\\n\")\n\t}\n\n\treq, _ := http.NewRequest(\n\t\t\"POST\",\n\t\tfmt.Sprintf(\"%s%s\/%s\", logglyAddr, logglyEventEndpoint, l.token),\n\t\t&data,\n\t)\n\n\tgo l.sendRequestToLoggly(req)\n}\n\nfunc (l *Adapter) sendRequestToLoggly(req *http.Request) {\n\tresp, err := l.client.Do(req)\n\tdefer resp.Body.Close()\n\n\tif err != nil {\n\t\tl.log.Println(\n\t\t\tfmt.Errorf(\n\t\t\t\t\"error from client: %s\",\n\t\t\t\terr.Error(),\n\t\t\t),\n\t\t)\n\t\treturn\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tl.log.Println(\n\t\t\tfmt.Errorf(\n\t\t\t\t\"received a non 200 status code when sending message to loggly: %s\",\n\t\t\t\terr.Error(),\n\t\t\t),\n\t\t)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package isolated\n\nimport (\n\t\"code.cloudfoundry.org\/cli\/integration\/helpers\"\n\t\"fmt\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/gbytes\"\n\t. \"github.com\/onsi\/gomega\/gexec\"\n\t\"net\/http\"\n)\n\nvar _ = Describe(\"Logs Command\", func() {\n\tDescribe(\"help\", func() {\n\t\tIt(\"displays command usage to output\", func() {\n\t\t\tsession := helpers.CF(\"logs\", \"--help\")\n\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\t\t\tEventually(session).Should(Say(\"logs - Tail or show recent logs for an app\"))\n\t\t\tEventually(session).Should(Say(\"USAGE:\"))\n\t\t\tEventually(session).Should(Say(\"cf logs APP_NAME\"))\n\t\t\tEventually(session).Should(Say(\"OPTIONS:\"))\n\t\t\tEventually(session).Should(Say(\"--recent\\\\s+Dump recent logs instead of tailing\"))\n\t\t\tEventually(session).Should(Say(\"SEE ALSO:\"))\n\t\t\tEventually(session).Should(Say(\"app, apps, ssh\"))\n\t\t})\n\t})\n\n\tContext(\"when the environment is not setup correctly\", func() {\n\t\tContext(\"when no API endpoint is set\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.UnsetAPI()\n\t\t\t})\n\t\t\tIt(\"fails with no API endpoint message\", func() {\n\t\t\t\tsession := helpers.CF(\"logs\", \"dora\")\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t\tExpect(session.Out).To(Say(\"FAILED\"))\n\t\t\t\tExpect(session.Out).To(Say(\"No API endpoint set. Use 'cf login' or 'cf api' to target an endpoint\"))\n\t\t\t})\n\t\t})\n\t\tContext(\"not logged in\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.LogoutCF()\n\t\t\t})\n\t\t\tIt(\"fails with not logged in message\", func() {\n\t\t\t\tsession := helpers.CF(\"logs\", \"dora\")\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t\tExpect(session.Out).To(Say(\"FAILED\"))\n\t\t\t\tExpect(session.Out).To(Say(\"Not logged in. Use 'cf login' to log in.\")) \/\/ TODO change to ERR\n\t\t\t})\n\t\t})\n\t\tContext(\"when no org is targeted\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.LogoutCF()\n\t\t\t\thelpers.LoginCF() \/\/ uses the \"cf auth\" command, which loses the targeted org and space (cf login does not)\n\t\t\t})\n\t\t\tIt(\"fails with no org or space targeted message\", func() {\n\t\t\t\tsession := helpers.CF(\"logs\", \"dora\")\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t\tExpect(session.Out).To(Say(\"FAILED\"))\n\t\t\t\tExpect(session.Out).To(Say(\"No org and space targeted, use 'cf target -o ORG -s SPACE' to target an org and space\"))\n\t\t\t\t\/\/ TODO change to ERR above\n\t\t\t})\n\t\t})\n\t\tContext(\"when no space is targeted\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.LogoutCF()\n\t\t\t\thelpers.LoginCF() \/\/ uses the \"cf auth\" command, which loses the targeted org and space (cf login does not)\n\t\t\t\thelpers.TargetOrg(ReadOnlyOrg)\n\t\t\t})\n\t\t\tIt(\"fails with no space targeted message\", func() {\n\t\t\t\tsession := helpers.CF(\"logs\", \"dora\")\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t\tExpect(session.Out).To(Say(\"FAILED\"))\n\t\t\t\tExpect(session.Out).To(Say(\"No space targeted, use 'cf target -s' to target a space.\"))\n\t\t\t\t\/\/ TODO change to err above\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"when the environment is set up correctly\", func() {\n\t\tvar (\n\t\t\torgName string\n\t\t\tspaceName string\n\t\t)\n\t\tBeforeEach(func() {\n\t\t\t\/\/helpers.RunIfExperimental(\"the logs command refactor is still experimental\")\n\t\t\torgName = helpers.NewOrgName()\n\t\t\tspaceName = helpers.PrefixedRandomName(\"SPACE\")\n\t\t\tsetupCF(orgName, spaceName)\n\t\t})\n\t\tContext(\"when input is invalid\", func() {\n\t\t\tContext(\"because no app name is provided\", func() {\n\t\t\t\tIt(\"gives an incorrect usage message\", func() {\n\t\t\t\t\tsession := helpers.CF(\"logs\")\n\t\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t\t\tExpect(session.Err).To(Say(\"Incorrect Usage: the required argument `APP_NAME` was not provided\"))\n\t\t\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\t\t\t\t\tEventually(session).Should(Say(\"logs - Tail or show recent logs for an app\"))\n\t\t\t\t\tEventually(session).Should(Say(\"USAGE:\"))\n\t\t\t\t\tEventually(session).Should(Say(\"cf logs APP_NAME\"))\n\t\t\t\t\tEventually(session).Should(Say(\"OPTIONS:\"))\n\t\t\t\t\tEventually(session).Should(Say(\"--recent\\\\s+Dump recent logs instead of tailing\"))\n\t\t\t\t\tEventually(session).Should(Say(\"SEE ALSO:\"))\n\t\t\t\t\tEventually(session).Should(Say(\"app, apps, ssh\"))\n\t\t\t\t})\n\t\t\t})\n\t\t\tContext(\"because the app does not exist\", func() {\n\t\t\t\tIt(\"fails with an app not found message\", func() {\n\t\t\t\t\tsession := helpers.CF(\"logs\", \"dora\")\n\t\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\t\t\tEventually(session).Should(Say(\"App dora not found\"))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t\tContext(\"when the specified app exists\", func() {\n\t\t\tvar appName string\n\t\t\tBeforeEach(func() {\n\t\t\t\tappName = helpers.PrefixedRandomName(\"app\")\n\t\t\t\thelpers.WithHelloWorldApp(func(appDir string) {\n\t\t\t\t\tEventually(helpers.CF(\"push\", appName, \"-p\", appDir, \"-b\", \"staticfile_buildpack\", \"-u\", \"http\")).Should(Exit(0))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"without the --recent flag\", func() {\n\t\t\t\tIt(\"streams logs out to the screen\", func() {\n\t\t\t\t\tsession := helpers.CF(\"logs\", appName)\n\t\t\t\t\tdefer session.Kill()\n\n\t\t\t\t\tuserName, _ := helpers.GetCredentials()\n\t\t\t\t\tEventually(session).Should(Say(\"Connected, tailing logs for app %s in org %s \/ space %s as %s...\", appName, orgName, spaceName, userName))\n\n\t\t\t\t\tresponse, err := http.Get(fmt.Sprintf(\"http:\/\/%s.%s\", appName, defaultSharedDomain()))\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\tExpect(response.StatusCode).To(Equal(http.StatusOK))\n\t\t\t\t\tEventually(session).Should(Say(\"%s \\\\[APP\/PROC\/WEB\/0\\\\]OUT .*? \\\"GET \/ HTTP\/1.1\\\" 200 11\", helpers.ISO8601Regex))\n\t\t\t\t})\n\t\t\t})\n\t\t\tContext(\"with the --recent flag\", func() {\n\t\t\t\tIt(\"displays the most recent logs and closes the stream\", func() {\n\t\t\t\t\tsession := helpers.CF(\"logs\", appName, \"--recent\")\n\t\t\t\t\tuserName, _ := helpers.GetCredentials()\n\t\t\t\t\tEventually(session).Should(Say(\"Connected, dumping recent logs for app %s in org %s \/ space %s as %s...\", appName, orgName, spaceName, userName))\n\t\t\t\t\tEventually(session).Should(Say(\"%s \\\\[API\/0\\\\]\\\\s+OUT Created app with guid %s\", helpers.ISO8601Regex, helpers.GUIDRegex))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n<commit_msg>skip logs test for now<commit_after>package isolated\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"code.cloudfoundry.org\/cli\/integration\/helpers\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/gbytes\"\n\t. \"github.com\/onsi\/gomega\/gexec\"\n)\n\nvar _ = Describe(\"Logs Command\", func() {\n\tBeforeEach(func() {\n\t\thelpers.RunIfExperimental(\"logs command refactor is still experimental\")\n\t})\n\n\tDescribe(\"help\", func() {\n\t\tIt(\"displays command usage to output\", func() {\n\t\t\tsession := helpers.CF(\"logs\", \"--help\")\n\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\t\t\tEventually(session).Should(Say(\"logs - Tail or show recent logs for an app\"))\n\t\t\tEventually(session).Should(Say(\"USAGE:\"))\n\t\t\tEventually(session).Should(Say(\"cf logs APP_NAME\"))\n\t\t\tEventually(session).Should(Say(\"OPTIONS:\"))\n\t\t\tEventually(session).Should(Say(\"--recent\\\\s+Dump recent logs instead of tailing\"))\n\t\t\tEventually(session).Should(Say(\"SEE ALSO:\"))\n\t\t\tEventually(session).Should(Say(\"app, apps, ssh\"))\n\t\t})\n\t})\n\n\tContext(\"when the environment is not setup correctly\", func() {\n\t\tContext(\"when no API endpoint is set\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.UnsetAPI()\n\t\t\t})\n\t\t\tIt(\"fails with no API endpoint message\", func() {\n\t\t\t\tsession := helpers.CF(\"logs\", \"dora\")\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t\tExpect(session.Out).To(Say(\"FAILED\"))\n\t\t\t\tExpect(session.Out).To(Say(\"No API endpoint set. Use 'cf login' or 'cf api' to target an endpoint\"))\n\t\t\t})\n\t\t})\n\t\tContext(\"not logged in\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.LogoutCF()\n\t\t\t})\n\t\t\tIt(\"fails with not logged in message\", func() {\n\t\t\t\tsession := helpers.CF(\"logs\", \"dora\")\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t\tExpect(session.Out).To(Say(\"FAILED\"))\n\t\t\t\tExpect(session.Out).To(Say(\"Not logged in. Use 'cf login' to log in.\")) \/\/ TODO change to ERR\n\t\t\t})\n\t\t})\n\t\tContext(\"when no org is targeted\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.LogoutCF()\n\t\t\t\thelpers.LoginCF() \/\/ uses the \"cf auth\" command, which loses the targeted org and space (cf login does not)\n\t\t\t})\n\t\t\tIt(\"fails with no org or space targeted message\", func() {\n\t\t\t\tsession := helpers.CF(\"logs\", \"dora\")\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t\tExpect(session.Out).To(Say(\"FAILED\"))\n\t\t\t\tExpect(session.Out).To(Say(\"No org and space targeted, use 'cf target -o ORG -s SPACE' to target an org and space\"))\n\t\t\t\t\/\/ TODO change to ERR above\n\t\t\t})\n\t\t})\n\t\tContext(\"when no space is targeted\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\thelpers.LogoutCF()\n\t\t\t\thelpers.LoginCF() \/\/ uses the \"cf auth\" command, which loses the targeted org and space (cf login does not)\n\t\t\t\thelpers.TargetOrg(ReadOnlyOrg)\n\t\t\t})\n\t\t\tIt(\"fails with no space targeted message\", func() {\n\t\t\t\tsession := helpers.CF(\"logs\", \"dora\")\n\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t\tExpect(session.Out).To(Say(\"FAILED\"))\n\t\t\t\tExpect(session.Out).To(Say(\"No space targeted, use 'cf target -s' to target a space.\"))\n\t\t\t\t\/\/ TODO change to err above\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"when the environment is set up correctly\", func() {\n\t\tvar (\n\t\t\torgName string\n\t\t\tspaceName string\n\t\t)\n\t\tBeforeEach(func() {\n\t\t\t\/\/helpers.RunIfExperimental(\"the logs command refactor is still experimental\")\n\t\t\torgName = helpers.NewOrgName()\n\t\t\tspaceName = helpers.PrefixedRandomName(\"SPACE\")\n\t\t\tsetupCF(orgName, spaceName)\n\t\t})\n\t\tContext(\"when input is invalid\", func() {\n\t\t\tContext(\"because no app name is provided\", func() {\n\t\t\t\tIt(\"gives an incorrect usage message\", func() {\n\t\t\t\t\tsession := helpers.CF(\"logs\")\n\t\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t\t\tExpect(session.Err).To(Say(\"Incorrect Usage: the required argument `APP_NAME` was not provided\"))\n\t\t\t\t\tEventually(session).Should(Say(\"NAME:\"))\n\t\t\t\t\tEventually(session).Should(Say(\"logs - Tail or show recent logs for an app\"))\n\t\t\t\t\tEventually(session).Should(Say(\"USAGE:\"))\n\t\t\t\t\tEventually(session).Should(Say(\"cf logs APP_NAME\"))\n\t\t\t\t\tEventually(session).Should(Say(\"OPTIONS:\"))\n\t\t\t\t\tEventually(session).Should(Say(\"--recent\\\\s+Dump recent logs instead of tailing\"))\n\t\t\t\t\tEventually(session).Should(Say(\"SEE ALSO:\"))\n\t\t\t\t\tEventually(session).Should(Say(\"app, apps, ssh\"))\n\t\t\t\t})\n\t\t\t})\n\t\t\tContext(\"because the app does not exist\", func() {\n\t\t\t\tIt(\"fails with an app not found message\", func() {\n\t\t\t\t\tsession := helpers.CF(\"logs\", \"dora\")\n\t\t\t\t\tEventually(session).Should(Exit(1))\n\t\t\t\t\tEventually(session).Should(Say(\"FAILED\"))\n\t\t\t\t\tEventually(session).Should(Say(\"App dora not found\"))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t\tContext(\"when the specified app exists\", func() {\n\t\t\tvar appName string\n\t\t\tBeforeEach(func() {\n\t\t\t\tappName = helpers.PrefixedRandomName(\"app\")\n\t\t\t\thelpers.WithHelloWorldApp(func(appDir string) {\n\t\t\t\t\tEventually(helpers.CF(\"push\", appName, \"-p\", appDir, \"-b\", \"staticfile_buildpack\", \"-u\", \"http\")).Should(Exit(0))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"without the --recent flag\", func() {\n\t\t\t\tIt(\"streams logs out to the screen\", func() {\n\t\t\t\t\tsession := helpers.CF(\"logs\", appName)\n\t\t\t\t\tdefer session.Kill()\n\n\t\t\t\t\tuserName, _ := helpers.GetCredentials()\n\t\t\t\t\tEventually(session).Should(Say(\"Connected, tailing logs for app %s in org %s \/ space %s as %s...\", appName, orgName, spaceName, userName))\n\n\t\t\t\t\tresponse, err := http.Get(fmt.Sprintf(\"http:\/\/%s.%s\", appName, defaultSharedDomain()))\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\tExpect(response.StatusCode).To(Equal(http.StatusOK))\n\t\t\t\t\tEventually(session).Should(Say(\"%s \\\\[APP\/PROC\/WEB\/0\\\\]OUT .*? \\\"GET \/ HTTP\/1.1\\\" 200 11\", helpers.ISO8601Regex))\n\t\t\t\t})\n\t\t\t})\n\t\t\tContext(\"with the --recent flag\", func() {\n\t\t\t\tIt(\"displays the most recent logs and closes the stream\", func() {\n\t\t\t\t\tsession := helpers.CF(\"logs\", appName, \"--recent\")\n\t\t\t\t\tuserName, _ := helpers.GetCredentials()\n\t\t\t\t\tEventually(session).Should(Say(\"Connected, dumping recent logs for app %s in org %s \/ space %s as %s...\", appName, orgName, spaceName, userName))\n\t\t\t\t\tEventually(session).Should(Say(\"%s \\\\[API\/0\\\\]\\\\s+OUT Created app with guid %s\", helpers.ISO8601Regex, helpers.GUIDRegex))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage opengl\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/affine\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/graphics\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/web\"\n)\n\n\/\/ arrayBufferLayoutPart is a part of an array buffer layout.\ntype arrayBufferLayoutPart struct {\n\t\/\/ TODO: This struct should belong to a program and know it.\n\tname string\n\tnum int\n}\n\n\/\/ arrayBufferLayout is an array buffer layout.\n\/\/\n\/\/ An array buffer in OpenGL is a buffer representing vertices and\n\/\/ is passed to a vertex shader.\ntype arrayBufferLayout struct {\n\tparts []arrayBufferLayoutPart\n\ttotal int\n}\n\n\/\/ totalBytes returns the size in bytes for one element of the array buffer.\nfunc (a *arrayBufferLayout) totalBytes() int {\n\tif a.total != 0 {\n\t\treturn a.total\n\t}\n\tt := 0\n\tfor _, p := range a.parts {\n\t\tt += float.SizeInBytes() * p.num\n\t}\n\ta.total = t\n\treturn a.total\n}\n\n\/\/ newArrayBuffer creates OpenGL's buffer object for the array buffer.\nfunc (a *arrayBufferLayout) newArrayBuffer(context *context) buffer {\n\treturn context.newArrayBuffer(a.totalBytes() * graphics.IndicesNum)\n}\n\n\/\/ enable binds the array buffer the given program to use the array buffer.\nfunc (a *arrayBufferLayout) enable(context *context, program program) {\n\tfor _, p := range a.parts {\n\t\tcontext.enableVertexAttribArray(program, p.name)\n\t}\n\ttotal := a.totalBytes()\n\toffset := 0\n\tfor _, p := range a.parts {\n\t\tcontext.vertexAttribPointer(program, p.name, p.num, float, total, offset)\n\t\toffset += float.SizeInBytes() * p.num\n\t}\n}\n\n\/\/ disable stops using the array buffer.\nfunc (a *arrayBufferLayout) disable(context *context, program program) {\n\t\/\/ TODO: Disabling should be done in reversed order?\n\tfor _, p := range a.parts {\n\t\tcontext.disableVertexAttribArray(program, p.name)\n\t}\n}\n\n\/\/ theArrayBufferLayout is the array buffer layout for Ebiten.\nvar theArrayBufferLayout = arrayBufferLayout{\n\t\/\/ Note that GL_MAX_VERTEX_ATTRIBS is at least 16.\n\tparts: []arrayBufferLayoutPart{\n\t\t{\n\t\t\tname: \"vertex\",\n\t\t\tnum: 2,\n\t\t},\n\t\t{\n\t\t\tname: \"tex\",\n\t\t\tnum: 2,\n\t\t},\n\t\t{\n\t\t\tname: \"tex_region\",\n\t\t\tnum: 4,\n\t\t},\n\t\t{\n\t\t\tname: \"color_scale\",\n\t\t\tnum: 4,\n\t\t},\n\t},\n}\n\nfunc init() {\n\tvertexFloatNum := theArrayBufferLayout.totalBytes() \/ float.SizeInBytes()\n\tif graphics.VertexFloatNum != vertexFloatNum {\n\t\tpanic(fmt.Sprintf(\"vertex float num must be %d but %d\", graphics.VertexFloatNum, vertexFloatNum))\n\t}\n}\n\n\/\/ openGLState is a state for\ntype openGLState struct {\n\t\/\/ arrayBuffer is OpenGL's array buffer (vertices data).\n\tarrayBuffer buffer\n\n\t\/\/ elementArrayBuffer is OpenGL's element array buffer (indices data).\n\telementArrayBuffer buffer\n\n\t\/\/ program is OpenGL's program for rendering a texture.\n\tprogram program\n\n\tlastProgram program\n\tlastViewportWidth int\n\tlastViewportHeight int\n\tlastColorMatrix []float32\n\tlastColorMatrixTranslation []float32\n\tlastSourceWidth int\n\tlastSourceHeight int\n\tlastFilter *graphics.Filter\n\tlastAddress *graphics.Address\n\n\tsource *Image\n\tdestination *Image\n}\n\nvar (\n\tzeroBuffer buffer\n\tzeroProgram program\n)\n\nconst (\n\tmaxTriangles = graphics.IndicesNum \/ 3\n\tmaxQuads = maxTriangles \/ 2\n)\n\n\/\/ reset resets or initializes the OpenGL state.\nfunc (s *openGLState) reset(context *context) error {\n\tif err := context.reset(); err != nil {\n\t\treturn err\n\t}\n\n\ts.lastProgram = zeroProgram\n\ts.lastViewportWidth = 0\n\ts.lastViewportHeight = 0\n\ts.lastColorMatrix = nil\n\ts.lastColorMatrixTranslation = nil\n\ts.lastSourceWidth = 0\n\ts.lastSourceHeight = 0\n\ts.lastFilter = nil\n\ts.lastAddress = nil\n\n\t\/\/ When context lost happens, deleting programs or buffers is not necessary.\n\t\/\/ However, it is not assumed that reset is called only when context lost happens.\n\t\/\/ Let's delete them explicitly.\n\tif s.program != zeroProgram {\n\t\tcontext.deleteProgram(s.program)\n\t}\n\n\t\/\/ On browsers (at least Chrome), buffers are already detached from the context\n\t\/\/ and must not be deleted by DeleteBuffer.\n\tif !web.IsBrowser() {\n\t\tif s.arrayBuffer != zeroBuffer {\n\t\t\tcontext.deleteBuffer(s.arrayBuffer)\n\t\t}\n\t\tif s.elementArrayBuffer != zeroBuffer {\n\t\t\tcontext.deleteBuffer(s.elementArrayBuffer)\n\t\t}\n\t}\n\n\tshaderVertexModelviewNative, err := context.newShader(vertexShader, shaderStr(shaderVertexModelview))\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"graphics: shader compiling error:\\n%s\", err))\n\t}\n\tdefer context.deleteShader(shaderVertexModelviewNative)\n\n\tshaderFragmentColorMatrixNative, err := context.newShader(fragmentShader, shaderStr(shaderFragmentColorMatrix))\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"graphics: shader compiling error:\\n%s\", err))\n\t}\n\tdefer context.deleteShader(shaderFragmentColorMatrixNative)\n\n\ts.program, err = context.newProgram([]shader{\n\t\tshaderVertexModelviewNative,\n\t\tshaderFragmentColorMatrixNative,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.arrayBuffer = theArrayBufferLayout.newArrayBuffer(context)\n\n\t\/\/ Note that the indices passed to NewElementArrayBuffer is not under GC management\n\t\/\/ in opengl package due to unsafe-way.\n\t\/\/ See NewElementArrayBuffer in context_mobile.go.\n\ts.elementArrayBuffer = context.newElementArrayBuffer(graphics.IndicesNum * 2)\n\n\treturn nil\n}\n\n\/\/ areSameFloat32Array returns a boolean indicating if a and b are deeply equal.\nfunc areSameFloat32Array(a, b []float32) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ useProgram uses the program (programTexture).\nfunc (d *Driver) useProgram(mode graphics.CompositeMode, colorM *affine.ColorM, filter graphics.Filter, address graphics.Address) error {\n\tdestination := d.state.destination\n\tif destination == nil {\n\t\tpanic(\"destination image is not set\")\n\t}\n\tsource := d.state.source\n\tif source == nil {\n\t\tpanic(\"source image is not set\")\n\t}\n\n\tif err := destination.setViewport(); err != nil {\n\t\treturn err\n\t}\n\tdstW := destination.width\n\tsrcW, srcH := source.width, source.height\n\n\td.context.blendFunc(mode)\n\n\tprogram := d.state.program\n\tif d.state.lastProgram != program {\n\t\td.context.useProgram(program)\n\t\tif d.state.lastProgram != zeroProgram {\n\t\t\ttheArrayBufferLayout.disable(&d.context, d.state.lastProgram)\n\t\t}\n\t\ttheArrayBufferLayout.enable(&d.context, program)\n\n\t\tif d.state.lastProgram == zeroProgram {\n\t\t\td.context.bindBuffer(arrayBuffer, d.state.arrayBuffer)\n\t\t\td.context.bindBuffer(elementArrayBuffer, d.state.elementArrayBuffer)\n\t\t\td.context.uniformInt(program, \"texture\", 0)\n\t\t}\n\n\t\td.state.lastProgram = program\n\t\td.state.lastViewportWidth = 0\n\t\td.state.lastViewportHeight = 0\n\t\td.state.lastColorMatrix = nil\n\t\td.state.lastColorMatrixTranslation = nil\n\t\td.state.lastSourceWidth = 0\n\t\td.state.lastSourceHeight = 0\n\t}\n\n\tvw := destination.framebuffer.width\n\tvh := destination.framebuffer.height\n\tif d.state.lastViewportWidth != vw || d.state.lastSourceHeight != vh {\n\t\td.context.uniformFloats(program, \"viewport_size\", []float32{float32(vw), float32(vh)})\n\t\td.state.lastViewportWidth = vw\n\t\td.state.lastViewportHeight = vh\n\t}\n\n\tesBody, esTranslate := colorM.UnsafeElements()\n\n\tif !areSameFloat32Array(d.state.lastColorMatrix, esBody) {\n\t\td.context.uniformFloats(program, \"color_matrix_body\", esBody)\n\t\t\/\/ ColorM's elements are immutable. It's OK to hold the reference without copying.\n\t\td.state.lastColorMatrix = esBody\n\t}\n\tif !areSameFloat32Array(d.state.lastColorMatrixTranslation, esTranslate) {\n\t\td.context.uniformFloats(program, \"color_matrix_translation\", esTranslate)\n\t\t\/\/ ColorM's elements are immutable. It's OK to hold the reference without copying.\n\t\td.state.lastColorMatrixTranslation = esTranslate\n\t}\n\n\tsw := graphics.NextPowerOf2Int(srcW)\n\tsh := graphics.NextPowerOf2Int(srcH)\n\n\tif d.state.lastSourceWidth != sw || d.state.lastSourceHeight != sh {\n\t\td.context.uniformFloats(program, \"source_size\", []float32{float32(sw), float32(sh)})\n\t\td.state.lastSourceWidth = sw\n\t\td.state.lastSourceHeight = sh\n\t}\n\n\tif d.state.lastFilter == nil || *d.state.lastFilter != filter {\n\t\td.context.uniformInt(program, \"filter\", int(filter))\n\t\td.state.lastFilter = &filter\n\t}\n\tif d.state.lastAddress == nil || *d.state.lastAddress != address {\n\t\td.context.uniformInt(program, \"address\", int(address))\n\t\td.state.lastAddress = &address\n\t}\n\n\tif filter == graphics.FilterScreen {\n\t\tscale := float32(dstW) \/ float32(srcW)\n\t\td.context.uniformFloat(program, \"scale\", scale)\n\t}\n\n\t\/\/ We don't have to call gl.ActiveTexture here: GL_TEXTURE0 is the default active texture\n\t\/\/ See also: https:\/\/www.opengl.org\/sdk\/docs\/man2\/xhtml\/glActiveTexture.xml\n\td.context.bindTexture(source.textureNative)\n\n\td.state.source = nil\n\td.state.destination = nil\n\treturn nil\n}\n<commit_msg>graphicsdriver\/opengl: Bug fix: misspelling on variables<commit_after>\/\/ Copyright 2014 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage opengl\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/affine\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/graphics\"\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/web\"\n)\n\n\/\/ arrayBufferLayoutPart is a part of an array buffer layout.\ntype arrayBufferLayoutPart struct {\n\t\/\/ TODO: This struct should belong to a program and know it.\n\tname string\n\tnum int\n}\n\n\/\/ arrayBufferLayout is an array buffer layout.\n\/\/\n\/\/ An array buffer in OpenGL is a buffer representing vertices and\n\/\/ is passed to a vertex shader.\ntype arrayBufferLayout struct {\n\tparts []arrayBufferLayoutPart\n\ttotal int\n}\n\n\/\/ totalBytes returns the size in bytes for one element of the array buffer.\nfunc (a *arrayBufferLayout) totalBytes() int {\n\tif a.total != 0 {\n\t\treturn a.total\n\t}\n\tt := 0\n\tfor _, p := range a.parts {\n\t\tt += float.SizeInBytes() * p.num\n\t}\n\ta.total = t\n\treturn a.total\n}\n\n\/\/ newArrayBuffer creates OpenGL's buffer object for the array buffer.\nfunc (a *arrayBufferLayout) newArrayBuffer(context *context) buffer {\n\treturn context.newArrayBuffer(a.totalBytes() * graphics.IndicesNum)\n}\n\n\/\/ enable binds the array buffer the given program to use the array buffer.\nfunc (a *arrayBufferLayout) enable(context *context, program program) {\n\tfor _, p := range a.parts {\n\t\tcontext.enableVertexAttribArray(program, p.name)\n\t}\n\ttotal := a.totalBytes()\n\toffset := 0\n\tfor _, p := range a.parts {\n\t\tcontext.vertexAttribPointer(program, p.name, p.num, float, total, offset)\n\t\toffset += float.SizeInBytes() * p.num\n\t}\n}\n\n\/\/ disable stops using the array buffer.\nfunc (a *arrayBufferLayout) disable(context *context, program program) {\n\t\/\/ TODO: Disabling should be done in reversed order?\n\tfor _, p := range a.parts {\n\t\tcontext.disableVertexAttribArray(program, p.name)\n\t}\n}\n\n\/\/ theArrayBufferLayout is the array buffer layout for Ebiten.\nvar theArrayBufferLayout = arrayBufferLayout{\n\t\/\/ Note that GL_MAX_VERTEX_ATTRIBS is at least 16.\n\tparts: []arrayBufferLayoutPart{\n\t\t{\n\t\t\tname: \"vertex\",\n\t\t\tnum: 2,\n\t\t},\n\t\t{\n\t\t\tname: \"tex\",\n\t\t\tnum: 2,\n\t\t},\n\t\t{\n\t\t\tname: \"tex_region\",\n\t\t\tnum: 4,\n\t\t},\n\t\t{\n\t\t\tname: \"color_scale\",\n\t\t\tnum: 4,\n\t\t},\n\t},\n}\n\nfunc init() {\n\tvertexFloatNum := theArrayBufferLayout.totalBytes() \/ float.SizeInBytes()\n\tif graphics.VertexFloatNum != vertexFloatNum {\n\t\tpanic(fmt.Sprintf(\"vertex float num must be %d but %d\", graphics.VertexFloatNum, vertexFloatNum))\n\t}\n}\n\n\/\/ openGLState is a state for\ntype openGLState struct {\n\t\/\/ arrayBuffer is OpenGL's array buffer (vertices data).\n\tarrayBuffer buffer\n\n\t\/\/ elementArrayBuffer is OpenGL's element array buffer (indices data).\n\telementArrayBuffer buffer\n\n\t\/\/ program is OpenGL's program for rendering a texture.\n\tprogram program\n\n\tlastProgram program\n\tlastViewportWidth int\n\tlastViewportHeight int\n\tlastColorMatrix []float32\n\tlastColorMatrixTranslation []float32\n\tlastSourceWidth int\n\tlastSourceHeight int\n\tlastFilter *graphics.Filter\n\tlastAddress *graphics.Address\n\n\tsource *Image\n\tdestination *Image\n}\n\nvar (\n\tzeroBuffer buffer\n\tzeroProgram program\n)\n\nconst (\n\tmaxTriangles = graphics.IndicesNum \/ 3\n\tmaxQuads = maxTriangles \/ 2\n)\n\n\/\/ reset resets or initializes the OpenGL state.\nfunc (s *openGLState) reset(context *context) error {\n\tif err := context.reset(); err != nil {\n\t\treturn err\n\t}\n\n\ts.lastProgram = zeroProgram\n\ts.lastViewportWidth = 0\n\ts.lastViewportHeight = 0\n\ts.lastColorMatrix = nil\n\ts.lastColorMatrixTranslation = nil\n\ts.lastSourceWidth = 0\n\ts.lastSourceHeight = 0\n\ts.lastFilter = nil\n\ts.lastAddress = nil\n\n\t\/\/ When context lost happens, deleting programs or buffers is not necessary.\n\t\/\/ However, it is not assumed that reset is called only when context lost happens.\n\t\/\/ Let's delete them explicitly.\n\tif s.program != zeroProgram {\n\t\tcontext.deleteProgram(s.program)\n\t}\n\n\t\/\/ On browsers (at least Chrome), buffers are already detached from the context\n\t\/\/ and must not be deleted by DeleteBuffer.\n\tif !web.IsBrowser() {\n\t\tif s.arrayBuffer != zeroBuffer {\n\t\t\tcontext.deleteBuffer(s.arrayBuffer)\n\t\t}\n\t\tif s.elementArrayBuffer != zeroBuffer {\n\t\t\tcontext.deleteBuffer(s.elementArrayBuffer)\n\t\t}\n\t}\n\n\tshaderVertexModelviewNative, err := context.newShader(vertexShader, shaderStr(shaderVertexModelview))\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"graphics: shader compiling error:\\n%s\", err))\n\t}\n\tdefer context.deleteShader(shaderVertexModelviewNative)\n\n\tshaderFragmentColorMatrixNative, err := context.newShader(fragmentShader, shaderStr(shaderFragmentColorMatrix))\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"graphics: shader compiling error:\\n%s\", err))\n\t}\n\tdefer context.deleteShader(shaderFragmentColorMatrixNative)\n\n\ts.program, err = context.newProgram([]shader{\n\t\tshaderVertexModelviewNative,\n\t\tshaderFragmentColorMatrixNative,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.arrayBuffer = theArrayBufferLayout.newArrayBuffer(context)\n\n\t\/\/ Note that the indices passed to NewElementArrayBuffer is not under GC management\n\t\/\/ in opengl package due to unsafe-way.\n\t\/\/ See NewElementArrayBuffer in context_mobile.go.\n\ts.elementArrayBuffer = context.newElementArrayBuffer(graphics.IndicesNum * 2)\n\n\treturn nil\n}\n\n\/\/ areSameFloat32Array returns a boolean indicating if a and b are deeply equal.\nfunc areSameFloat32Array(a, b []float32) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(a); i++ {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ useProgram uses the program (programTexture).\nfunc (d *Driver) useProgram(mode graphics.CompositeMode, colorM *affine.ColorM, filter graphics.Filter, address graphics.Address) error {\n\tdestination := d.state.destination\n\tif destination == nil {\n\t\tpanic(\"destination image is not set\")\n\t}\n\tsource := d.state.source\n\tif source == nil {\n\t\tpanic(\"source image is not set\")\n\t}\n\n\tif err := destination.setViewport(); err != nil {\n\t\treturn err\n\t}\n\tdstW := destination.width\n\tsrcW, srcH := source.width, source.height\n\n\td.context.blendFunc(mode)\n\n\tprogram := d.state.program\n\tif d.state.lastProgram != program {\n\t\td.context.useProgram(program)\n\t\tif d.state.lastProgram != zeroProgram {\n\t\t\ttheArrayBufferLayout.disable(&d.context, d.state.lastProgram)\n\t\t}\n\t\ttheArrayBufferLayout.enable(&d.context, program)\n\n\t\tif d.state.lastProgram == zeroProgram {\n\t\t\td.context.bindBuffer(arrayBuffer, d.state.arrayBuffer)\n\t\t\td.context.bindBuffer(elementArrayBuffer, d.state.elementArrayBuffer)\n\t\t\td.context.uniformInt(program, \"texture\", 0)\n\t\t}\n\n\t\td.state.lastProgram = program\n\t\td.state.lastViewportWidth = 0\n\t\td.state.lastViewportHeight = 0\n\t\td.state.lastColorMatrix = nil\n\t\td.state.lastColorMatrixTranslation = nil\n\t\td.state.lastSourceWidth = 0\n\t\td.state.lastSourceHeight = 0\n\t}\n\n\tvw := destination.framebuffer.width\n\tvh := destination.framebuffer.height\n\tif d.state.lastViewportWidth != vw || d.state.lastViewportHeight != vh {\n\t\td.context.uniformFloats(program, \"viewport_size\", []float32{float32(vw), float32(vh)})\n\t\td.state.lastViewportWidth = vw\n\t\td.state.lastViewportHeight = vh\n\t}\n\n\tesBody, esTranslate := colorM.UnsafeElements()\n\n\tif !areSameFloat32Array(d.state.lastColorMatrix, esBody) {\n\t\td.context.uniformFloats(program, \"color_matrix_body\", esBody)\n\t\t\/\/ ColorM's elements are immutable. It's OK to hold the reference without copying.\n\t\td.state.lastColorMatrix = esBody\n\t}\n\tif !areSameFloat32Array(d.state.lastColorMatrixTranslation, esTranslate) {\n\t\td.context.uniformFloats(program, \"color_matrix_translation\", esTranslate)\n\t\t\/\/ ColorM's elements are immutable. It's OK to hold the reference without copying.\n\t\td.state.lastColorMatrixTranslation = esTranslate\n\t}\n\n\tsw := graphics.NextPowerOf2Int(srcW)\n\tsh := graphics.NextPowerOf2Int(srcH)\n\n\tif d.state.lastSourceWidth != sw || d.state.lastSourceHeight != sh {\n\t\td.context.uniformFloats(program, \"source_size\", []float32{float32(sw), float32(sh)})\n\t\td.state.lastSourceWidth = sw\n\t\td.state.lastSourceHeight = sh\n\t}\n\n\tif d.state.lastFilter == nil || *d.state.lastFilter != filter {\n\t\td.context.uniformInt(program, \"filter\", int(filter))\n\t\td.state.lastFilter = &filter\n\t}\n\tif d.state.lastAddress == nil || *d.state.lastAddress != address {\n\t\td.context.uniformInt(program, \"address\", int(address))\n\t\td.state.lastAddress = &address\n\t}\n\n\tif filter == graphics.FilterScreen {\n\t\tscale := float32(dstW) \/ float32(srcW)\n\t\td.context.uniformFloat(program, \"scale\", scale)\n\t}\n\n\t\/\/ We don't have to call gl.ActiveTexture here: GL_TEXTURE0 is the default active texture\n\t\/\/ See also: https:\/\/www.opengl.org\/sdk\/docs\/man2\/xhtml\/glActiveTexture.xml\n\td.context.bindTexture(source.textureNative)\n\n\td.state.source = nil\n\td.state.destination = nil\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package providercache\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tgetter \"github.com\/hashicorp\/go-getter\"\n\n\t\"github.com\/hashicorp\/terraform\/internal\/copy\"\n\t\"github.com\/hashicorp\/terraform\/internal\/getproviders\"\n\t\"github.com\/hashicorp\/terraform\/internal\/httpclient\"\n)\n\n\/\/ We borrow the \"unpack a zip file into a target directory\" logic from\n\/\/ go-getter, even though we're not otherwise using go-getter here.\n\/\/ (We don't need the same flexibility as we have for modules, because\n\/\/ providers _always_ come from provider registries, which have a very\n\/\/ specific protocol and set of expectations.)\nvar unzip = getter.ZipDecompressor{}\n\nfunc installFromHTTPURL(ctx context.Context, meta getproviders.PackageMeta, targetDir string, allowedHashes []getproviders.Hash) (*getproviders.PackageAuthenticationResult, error) {\n\turl := meta.Location.String()\n\n\t\/\/ When we're installing from an HTTP URL we expect the URL to refer to\n\t\/\/ a zip file. We'll fetch that into a temporary file here and then\n\t\/\/ delegate to installFromLocalArchive below to actually extract it.\n\t\/\/ (We're not using go-getter here because its HTTP getter has a bunch\n\t\/\/ of extraneous functionality we don't need or want, like indirection\n\t\/\/ through X-Terraform-Get header, attempting partial fetches for\n\t\/\/ files that already exist, etc.)\n\n\thttpClient := httpclient.New()\n\treq, err := http.NewRequestWithContext(ctx, \"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid provider download request: %s\", err)\n\t}\n\tresp, err := httpClient.Do(req)\n\tif err != nil {\n\t\tif ctx.Err() == context.Canceled {\n\t\t\t\/\/ \"context canceled\" is not a user-friendly error message,\n\t\t\t\/\/ so we'll return a more appropriate one here.\n\t\t\treturn nil, fmt.Errorf(\"provider download was interrupted\")\n\t\t}\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"unsuccessful request to %s: %s\", url, resp.Status)\n\t}\n\n\tf, err := ioutil.TempFile(\"\", \"terraform-provider\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to open temporary file to download from %s\", url)\n\t}\n\tdefer f.Close()\n\tdefer os.Remove(f.Name())\n\n\t\/\/ We'll borrow go-getter's \"cancelable copy\" implementation here so that\n\t\/\/ the download can potentially be interrupted partway through.\n\tn, err := getter.Copy(ctx, f, resp.Body)\n\tif err == nil && n < resp.ContentLength {\n\t\terr = fmt.Errorf(\"incorrect response size: expected %d bytes, but got %d bytes\", resp.ContentLength, n)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tarchiveFilename := f.Name()\n\tlocalLocation := getproviders.PackageLocalArchive(archiveFilename)\n\n\tvar authResult *getproviders.PackageAuthenticationResult\n\tif meta.Authentication != nil {\n\t\tif authResult, err = meta.Authentication.AuthenticatePackage(localLocation); err != nil {\n\t\t\treturn authResult, err\n\t\t}\n\t}\n\n\t\/\/ We can now delegate to installFromLocalArchive for extraction. To do so,\n\t\/\/ we construct a new package meta description using the local archive\n\t\/\/ path as the location, and skipping authentication. installFromLocalMeta\n\t\/\/ is responsible for verifying that the archive matches the allowedHashes,\n\t\/\/ though.\n\tlocalMeta := getproviders.PackageMeta{\n\t\tProvider: meta.Provider,\n\t\tVersion: meta.Version,\n\t\tProtocolVersions: meta.ProtocolVersions,\n\t\tTargetPlatform: meta.TargetPlatform,\n\t\tFilename: meta.Filename,\n\t\tLocation: localLocation,\n\t\tAuthentication: nil,\n\t}\n\tif _, err := installFromLocalArchive(ctx, localMeta, targetDir, allowedHashes); err != nil {\n\t\treturn nil, err\n\t}\n\treturn authResult, nil\n}\n\nfunc installFromLocalArchive(ctx context.Context, meta getproviders.PackageMeta, targetDir string, allowedHashes []getproviders.Hash) (*getproviders.PackageAuthenticationResult, error) {\n\tvar authResult *getproviders.PackageAuthenticationResult\n\tif meta.Authentication != nil {\n\t\tvar err error\n\t\tif authResult, err = meta.Authentication.AuthenticatePackage(meta.Location); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif len(allowedHashes) > 0 {\n\t\tif matches, err := meta.MatchesAnyHash(allowedHashes); err != nil {\n\t\t\treturn authResult, fmt.Errorf(\n\t\t\t\t\"failed to calculate checksum for %s %s package at %s: %s\",\n\t\t\t\tmeta.Provider, meta.Version, meta.Location, err,\n\t\t\t)\n\t\t} else if !matches {\n\t\t\treturn authResult, fmt.Errorf(\n\t\t\t\t\"the current package for %s %s doesn't match any of the checksums previously recorded in the dependency lock file; for more information: https:\/\/www.terraform.io\/language\/provider-checksum-verification\",\n\t\t\t\tmeta.Provider, meta.Version,\n\t\t\t)\n\t\t}\n\t}\n\n\tfilename := meta.Location.String()\n\n\terr := unzip.Decompress(targetDir, filename, true, 0000)\n\tif err != nil {\n\t\treturn authResult, err\n\t}\n\n\treturn authResult, nil\n}\n\n\/\/ installFromLocalDir is the implementation of both installing a package from\n\/\/ a local directory source _and_ of linking a package from another cache\n\/\/ in LinkFromOtherCache, because they both do fundamentally the same\n\/\/ operation: symlink if possible, or deep-copy otherwise.\nfunc installFromLocalDir(ctx context.Context, meta getproviders.PackageMeta, targetDir string, allowedHashes []getproviders.Hash) (*getproviders.PackageAuthenticationResult, error) {\n\tsourceDir := meta.Location.String()\n\n\tabsNew, err := filepath.Abs(targetDir)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to make target path %s absolute: %s\", targetDir, err)\n\t}\n\tabsCurrent, err := filepath.Abs(sourceDir)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to make source path %s absolute: %s\", sourceDir, err)\n\t}\n\n\t\/\/ Before we do anything else, we'll do a quick check to make sure that\n\t\/\/ these two paths are not pointing at the same physical directory on\n\t\/\/ disk. This compares the files by their OS-level device and directory\n\t\/\/ entry identifiers, not by their virtual filesystem paths.\n\tif same, err := copy.SameFile(absNew, absCurrent); same {\n\t\treturn nil, fmt.Errorf(\"cannot install existing provider directory %s to itself\", targetDir)\n\t} else if err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to determine if %s and %s are the same: %s\", sourceDir, targetDir, err)\n\t}\n\n\tvar authResult *getproviders.PackageAuthenticationResult\n\tif meta.Authentication != nil {\n\t\t\/\/ (we have this here for completeness but note that local filesystem\n\t\t\/\/ mirrors typically don't include enough information for package\n\t\t\/\/ authentication and so we'll rarely get in here in practice.)\n\t\tvar err error\n\t\tif authResult, err = meta.Authentication.AuthenticatePackage(meta.Location); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ If the caller provided at least one hash in allowedHashes then at\n\t\/\/ least one of those hashes ought to match. However, for local directories\n\t\/\/ in particular we can't actually verify the legacy \"zh:\" hash scheme\n\t\/\/ because it requires access to the original .zip archive, and so as a\n\t\/\/ measure of pragmatism we'll treat a set of hashes where all are \"zh:\"\n\t\/\/ the same as no hashes at all, and let anything pass. This is definitely\n\t\/\/ non-ideal but accepted for two reasons:\n\t\/\/ - Packages we find on local disk can be considered a little more trusted\n\t\/\/ than packages coming from over the network, because we assume that\n\t\/\/ they were either placed intentionally by an operator or they were\n\t\/\/ automatically installed by a previous network operation that would've\n\t\/\/ itself verified the hashes.\n\t\/\/ - Our installer makes a concerted effort to record at least one new-style\n\t\/\/ hash for each lock entry, so we should very rarely end up in this\n\t\/\/ situation anyway.\n\tsuitableHashCount := 0\n\tfor _, hash := range allowedHashes {\n\t\tif !hash.HasScheme(getproviders.HashSchemeZip) {\n\t\t\tsuitableHashCount++\n\t\t}\n\t}\n\tif suitableHashCount > 0 {\n\t\tif matches, err := meta.MatchesAnyHash(allowedHashes); err != nil {\n\t\t\treturn authResult, fmt.Errorf(\n\t\t\t\t\"failed to calculate checksum for %s %s package at %s: %s\",\n\t\t\t\tmeta.Provider, meta.Version, meta.Location, err,\n\t\t\t)\n\t\t} else if !matches {\n\t\t\treturn authResult, fmt.Errorf(\n\t\t\t\t\"the local package for %s %s doesn't match any of the checksums previously recorded in the dependency lock file (this might be because the available checksums are for packages targeting different platforms); for more information: https:\/\/www.terraform.io\/language\/provider-checksum-verification\",\n\t\t\t\tmeta.Provider, meta.Version,\n\t\t\t)\n\t\t}\n\t}\n\n\t\/\/ Delete anything that's already present at this path first.\n\terr = os.RemoveAll(targetDir)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn nil, fmt.Errorf(\"failed to remove existing %s before linking it to %s: %s\", sourceDir, targetDir, err)\n\t}\n\n\t\/\/ We'll prefer to create a symlink if possible, but we'll fall back to\n\t\/\/ a recursive copy if symlink creation fails. It could fail for a number\n\t\/\/ of reasons, including being on Windows 8 without administrator\n\t\/\/ privileges or being on a legacy filesystem like FAT that has no way\n\t\/\/ to represent a symlink. (Generalized symlink support for Windows was\n\t\/\/ introduced in a Windows 10 minor update.)\n\t\/\/\n\t\/\/ We use an absolute path for the symlink to reduce the risk of it being\n\t\/\/ broken by moving things around later, since the source directory is\n\t\/\/ likely to be a shared directory independent on any particular target\n\t\/\/ and thus we can't assume that they will move around together.\n\tlinkTarget := absCurrent\n\n\tparentDir := filepath.Dir(absNew)\n\terr = os.MkdirAll(parentDir, 0755)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create parent directories leading to %s: %s\", targetDir, err)\n\t}\n\n\terr = os.Symlink(linkTarget, absNew)\n\tif err == nil {\n\t\t\/\/ Success, then!\n\t\treturn nil, nil\n\t}\n\n\t\/\/ If we get down here then symlinking failed and we need a deep copy\n\t\/\/ instead. To make a copy, we first need to create the target directory,\n\t\/\/ which would otherwise be a symlink.\n\terr = os.Mkdir(absNew, 0755)\n\tif err != nil && os.IsExist(err) {\n\t\treturn nil, fmt.Errorf(\"failed to create directory %s: %s\", absNew, err)\n\t}\n\terr = copy.CopyDir(absNew, absCurrent)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to either symlink or copy %s to %s: %s\", absCurrent, absNew, err)\n\t}\n\n\t\/\/ If we got here then apparently our copy succeeded, so we're done.\n\treturn nil, nil\n}\n<commit_msg>providercache: include host in provider installation error (#31524)<commit_after>package providercache\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tgetter \"github.com\/hashicorp\/go-getter\"\n\n\t\"github.com\/hashicorp\/terraform\/internal\/copy\"\n\t\"github.com\/hashicorp\/terraform\/internal\/getproviders\"\n\t\"github.com\/hashicorp\/terraform\/internal\/httpclient\"\n)\n\n\/\/ We borrow the \"unpack a zip file into a target directory\" logic from\n\/\/ go-getter, even though we're not otherwise using go-getter here.\n\/\/ (We don't need the same flexibility as we have for modules, because\n\/\/ providers _always_ come from provider registries, which have a very\n\/\/ specific protocol and set of expectations.)\nvar unzip = getter.ZipDecompressor{}\n\nfunc installFromHTTPURL(ctx context.Context, meta getproviders.PackageMeta, targetDir string, allowedHashes []getproviders.Hash) (*getproviders.PackageAuthenticationResult, error) {\n\turl := meta.Location.String()\n\n\t\/\/ When we're installing from an HTTP URL we expect the URL to refer to\n\t\/\/ a zip file. We'll fetch that into a temporary file here and then\n\t\/\/ delegate to installFromLocalArchive below to actually extract it.\n\t\/\/ (We're not using go-getter here because its HTTP getter has a bunch\n\t\/\/ of extraneous functionality we don't need or want, like indirection\n\t\/\/ through X-Terraform-Get header, attempting partial fetches for\n\t\/\/ files that already exist, etc.)\n\n\thttpClient := httpclient.New()\n\treq, err := http.NewRequestWithContext(ctx, \"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid provider download request: %s\", err)\n\t}\n\tresp, err := httpClient.Do(req)\n\tif err != nil {\n\t\tif ctx.Err() == context.Canceled {\n\t\t\t\/\/ \"context canceled\" is not a user-friendly error message,\n\t\t\t\/\/ so we'll return a more appropriate one here.\n\t\t\treturn nil, fmt.Errorf(\"provider download was interrupted\")\n\t\t}\n\t\treturn nil, fmt.Errorf(\"%s: %w\", getproviders.HostFromRequest(req), err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"unsuccessful request to %s: %s\", url, resp.Status)\n\t}\n\n\tf, err := ioutil.TempFile(\"\", \"terraform-provider\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to open temporary file to download from %s\", url)\n\t}\n\tdefer f.Close()\n\tdefer os.Remove(f.Name())\n\n\t\/\/ We'll borrow go-getter's \"cancelable copy\" implementation here so that\n\t\/\/ the download can potentially be interrupted partway through.\n\tn, err := getter.Copy(ctx, f, resp.Body)\n\tif err == nil && n < resp.ContentLength {\n\t\terr = fmt.Errorf(\"incorrect response size: expected %d bytes, but got %d bytes\", resp.ContentLength, n)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tarchiveFilename := f.Name()\n\tlocalLocation := getproviders.PackageLocalArchive(archiveFilename)\n\n\tvar authResult *getproviders.PackageAuthenticationResult\n\tif meta.Authentication != nil {\n\t\tif authResult, err = meta.Authentication.AuthenticatePackage(localLocation); err != nil {\n\t\t\treturn authResult, err\n\t\t}\n\t}\n\n\t\/\/ We can now delegate to installFromLocalArchive for extraction. To do so,\n\t\/\/ we construct a new package meta description using the local archive\n\t\/\/ path as the location, and skipping authentication. installFromLocalMeta\n\t\/\/ is responsible for verifying that the archive matches the allowedHashes,\n\t\/\/ though.\n\tlocalMeta := getproviders.PackageMeta{\n\t\tProvider: meta.Provider,\n\t\tVersion: meta.Version,\n\t\tProtocolVersions: meta.ProtocolVersions,\n\t\tTargetPlatform: meta.TargetPlatform,\n\t\tFilename: meta.Filename,\n\t\tLocation: localLocation,\n\t\tAuthentication: nil,\n\t}\n\tif _, err := installFromLocalArchive(ctx, localMeta, targetDir, allowedHashes); err != nil {\n\t\treturn nil, err\n\t}\n\treturn authResult, nil\n}\n\nfunc installFromLocalArchive(ctx context.Context, meta getproviders.PackageMeta, targetDir string, allowedHashes []getproviders.Hash) (*getproviders.PackageAuthenticationResult, error) {\n\tvar authResult *getproviders.PackageAuthenticationResult\n\tif meta.Authentication != nil {\n\t\tvar err error\n\t\tif authResult, err = meta.Authentication.AuthenticatePackage(meta.Location); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif len(allowedHashes) > 0 {\n\t\tif matches, err := meta.MatchesAnyHash(allowedHashes); err != nil {\n\t\t\treturn authResult, fmt.Errorf(\n\t\t\t\t\"failed to calculate checksum for %s %s package at %s: %s\",\n\t\t\t\tmeta.Provider, meta.Version, meta.Location, err,\n\t\t\t)\n\t\t} else if !matches {\n\t\t\treturn authResult, fmt.Errorf(\n\t\t\t\t\"the current package for %s %s doesn't match any of the checksums previously recorded in the dependency lock file; for more information: https:\/\/www.terraform.io\/language\/provider-checksum-verification\",\n\t\t\t\tmeta.Provider, meta.Version,\n\t\t\t)\n\t\t}\n\t}\n\n\tfilename := meta.Location.String()\n\n\terr := unzip.Decompress(targetDir, filename, true, 0000)\n\tif err != nil {\n\t\treturn authResult, err\n\t}\n\n\treturn authResult, nil\n}\n\n\/\/ installFromLocalDir is the implementation of both installing a package from\n\/\/ a local directory source _and_ of linking a package from another cache\n\/\/ in LinkFromOtherCache, because they both do fundamentally the same\n\/\/ operation: symlink if possible, or deep-copy otherwise.\nfunc installFromLocalDir(ctx context.Context, meta getproviders.PackageMeta, targetDir string, allowedHashes []getproviders.Hash) (*getproviders.PackageAuthenticationResult, error) {\n\tsourceDir := meta.Location.String()\n\n\tabsNew, err := filepath.Abs(targetDir)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to make target path %s absolute: %s\", targetDir, err)\n\t}\n\tabsCurrent, err := filepath.Abs(sourceDir)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to make source path %s absolute: %s\", sourceDir, err)\n\t}\n\n\t\/\/ Before we do anything else, we'll do a quick check to make sure that\n\t\/\/ these two paths are not pointing at the same physical directory on\n\t\/\/ disk. This compares the files by their OS-level device and directory\n\t\/\/ entry identifiers, not by their virtual filesystem paths.\n\tif same, err := copy.SameFile(absNew, absCurrent); same {\n\t\treturn nil, fmt.Errorf(\"cannot install existing provider directory %s to itself\", targetDir)\n\t} else if err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to determine if %s and %s are the same: %s\", sourceDir, targetDir, err)\n\t}\n\n\tvar authResult *getproviders.PackageAuthenticationResult\n\tif meta.Authentication != nil {\n\t\t\/\/ (we have this here for completeness but note that local filesystem\n\t\t\/\/ mirrors typically don't include enough information for package\n\t\t\/\/ authentication and so we'll rarely get in here in practice.)\n\t\tvar err error\n\t\tif authResult, err = meta.Authentication.AuthenticatePackage(meta.Location); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t\/\/ If the caller provided at least one hash in allowedHashes then at\n\t\/\/ least one of those hashes ought to match. However, for local directories\n\t\/\/ in particular we can't actually verify the legacy \"zh:\" hash scheme\n\t\/\/ because it requires access to the original .zip archive, and so as a\n\t\/\/ measure of pragmatism we'll treat a set of hashes where all are \"zh:\"\n\t\/\/ the same as no hashes at all, and let anything pass. This is definitely\n\t\/\/ non-ideal but accepted for two reasons:\n\t\/\/ - Packages we find on local disk can be considered a little more trusted\n\t\/\/ than packages coming from over the network, because we assume that\n\t\/\/ they were either placed intentionally by an operator or they were\n\t\/\/ automatically installed by a previous network operation that would've\n\t\/\/ itself verified the hashes.\n\t\/\/ - Our installer makes a concerted effort to record at least one new-style\n\t\/\/ hash for each lock entry, so we should very rarely end up in this\n\t\/\/ situation anyway.\n\tsuitableHashCount := 0\n\tfor _, hash := range allowedHashes {\n\t\tif !hash.HasScheme(getproviders.HashSchemeZip) {\n\t\t\tsuitableHashCount++\n\t\t}\n\t}\n\tif suitableHashCount > 0 {\n\t\tif matches, err := meta.MatchesAnyHash(allowedHashes); err != nil {\n\t\t\treturn authResult, fmt.Errorf(\n\t\t\t\t\"failed to calculate checksum for %s %s package at %s: %s\",\n\t\t\t\tmeta.Provider, meta.Version, meta.Location, err,\n\t\t\t)\n\t\t} else if !matches {\n\t\t\treturn authResult, fmt.Errorf(\n\t\t\t\t\"the local package for %s %s doesn't match any of the checksums previously recorded in the dependency lock file (this might be because the available checksums are for packages targeting different platforms); for more information: https:\/\/www.terraform.io\/language\/provider-checksum-verification\",\n\t\t\t\tmeta.Provider, meta.Version,\n\t\t\t)\n\t\t}\n\t}\n\n\t\/\/ Delete anything that's already present at this path first.\n\terr = os.RemoveAll(targetDir)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn nil, fmt.Errorf(\"failed to remove existing %s before linking it to %s: %s\", sourceDir, targetDir, err)\n\t}\n\n\t\/\/ We'll prefer to create a symlink if possible, but we'll fall back to\n\t\/\/ a recursive copy if symlink creation fails. It could fail for a number\n\t\/\/ of reasons, including being on Windows 8 without administrator\n\t\/\/ privileges or being on a legacy filesystem like FAT that has no way\n\t\/\/ to represent a symlink. (Generalized symlink support for Windows was\n\t\/\/ introduced in a Windows 10 minor update.)\n\t\/\/\n\t\/\/ We use an absolute path for the symlink to reduce the risk of it being\n\t\/\/ broken by moving things around later, since the source directory is\n\t\/\/ likely to be a shared directory independent on any particular target\n\t\/\/ and thus we can't assume that they will move around together.\n\tlinkTarget := absCurrent\n\n\tparentDir := filepath.Dir(absNew)\n\terr = os.MkdirAll(parentDir, 0755)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create parent directories leading to %s: %s\", targetDir, err)\n\t}\n\n\terr = os.Symlink(linkTarget, absNew)\n\tif err == nil {\n\t\t\/\/ Success, then!\n\t\treturn nil, nil\n\t}\n\n\t\/\/ If we get down here then symlinking failed and we need a deep copy\n\t\/\/ instead. To make a copy, we first need to create the target directory,\n\t\/\/ which would otherwise be a symlink.\n\terr = os.Mkdir(absNew, 0755)\n\tif err != nil && os.IsExist(err) {\n\t\treturn nil, fmt.Errorf(\"failed to create directory %s: %s\", absNew, err)\n\t}\n\terr = copy.CopyDir(absNew, absCurrent)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to either symlink or copy %s to %s: %s\", absCurrent, absNew, err)\n\t}\n\n\t\/\/ If we got here then apparently our copy succeeded, so we're done.\n\treturn nil, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package trackermanager\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"github.com\/cenkalti\/rain\/internal\/blocklist\"\n\t\"github.com\/cenkalti\/rain\/internal\/tracker\"\n\t\"github.com\/cenkalti\/rain\/internal\/tracker\/httptracker\"\n\t\"github.com\/cenkalti\/rain\/internal\/tracker\/udptracker\"\n)\n\ntype TrackerManager struct {\n\thttpTransport *http.Transport\n\tudpTransport *udptracker.Transport\n}\n\nfunc New(bl *blocklist.Blocklist) *TrackerManager {\n\tm := &TrackerManager{\n\t\thttpTransport: new(http.Transport),\n\t\tudpTransport: udptracker.NewTransport(bl),\n\t}\n\tm.httpTransport.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {\n\t\tip, port, err := tracker.ResolveHost(ctx, addr, bl)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar d net.Dialer\n\t\ttaddr := &net.TCPAddr{IP: ip, Port: port}\n\t\treturn d.DialContext(ctx, network, taddr.String())\n\t}\n\treturn m\n}\n\nfunc (m *TrackerManager) Get(s string, httpTimeout time.Duration, httpUserAgent string) (tracker.Tracker, error) {\n\tu, err := url.Parse(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tswitch u.Scheme {\n\tcase \"http\", \"https\":\n\t\ttr := httptracker.New(s, u, httpTimeout, m.httpTransport, httpUserAgent)\n\t\treturn tr, nil\n\tcase \"udp\":\n\t\ttr := udptracker.New(s, u, m.udpTransport)\n\t\treturn tr, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported tracker scheme: %s\", u.Scheme)\n\t}\n}\n<commit_msg>disable http\/2 in tracker client<commit_after>package trackermanager\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"github.com\/cenkalti\/rain\/internal\/blocklist\"\n\t\"github.com\/cenkalti\/rain\/internal\/tracker\"\n\t\"github.com\/cenkalti\/rain\/internal\/tracker\/httptracker\"\n\t\"github.com\/cenkalti\/rain\/internal\/tracker\/udptracker\"\n)\n\ntype TrackerManager struct {\n\thttpTransport *http.Transport\n\tudpTransport *udptracker.Transport\n}\n\nfunc New(bl *blocklist.Blocklist) *TrackerManager {\n\tm := &TrackerManager{\n\t\thttpTransport: &http.Transport{\n\t\t\t\/\/ Setting TLSNextProto to non-nil map disables HTTP\/2 support.\n\t\t\tTLSNextProto: make(map[string]func(string, *tls.Conn) http.RoundTripper),\n\t\t},\n\t\tudpTransport: udptracker.NewTransport(bl),\n\t}\n\tm.httpTransport.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {\n\t\tip, port, err := tracker.ResolveHost(ctx, addr, bl)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar d net.Dialer\n\t\ttaddr := &net.TCPAddr{IP: ip, Port: port}\n\t\treturn d.DialContext(ctx, network, taddr.String())\n\t}\n\treturn m\n}\n\nfunc (m *TrackerManager) Get(s string, httpTimeout time.Duration, httpUserAgent string) (tracker.Tracker, error) {\n\tu, err := url.Parse(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tswitch u.Scheme {\n\tcase \"http\", \"https\":\n\t\ttr := httptracker.New(s, u, httpTimeout, m.httpTransport, httpUserAgent)\n\t\treturn tr, nil\n\tcase \"udp\":\n\t\ttr := udptracker.New(s, u, m.udpTransport)\n\t\treturn tr, nil\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported tracker scheme: %s\", u.Scheme)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright 2016 - 2018 Crunchy Data Solutions, Inc.\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n)\n\nconst REPORT = \"\/report\/index.html\"\n\nfunc init() {\n\tlog.SetOutput(os.Stdout)\n\tlog.Println(\"BadgerServer starting..\")\n}\n\nfunc main() {\n\thttp.HandleFunc(\"\/api\/badgergenerate\", BadgerGenerate)\n\thttp.Handle(\"\/static\/\", http.StripPrefix(\"\/static\", http.FileServer(http.Dir(\"\/report\"))))\n\tlog.Fatal(http.ListenAndServe(\":10000\", nil))\n}\n\n\/\/ BadgerGenerate perform a pgbadger to create the HTML output file\nfunc BadgerGenerate(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"Generating report..\")\n\n\tvar cmd *exec.Cmd\n\tcmd = exec.Command(\"badger-generate.sh\")\n\tvar out bytes.Buffer\n\tvar stderr bytes.Buffer\n\tcmd.Stdout = &out\n\tcmd.Stderr = &stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatalf(\"Error running badger-generate: %s\", err)\n\t\treturn\n\t}\n\n\tlog.Println(\"Report generated. Redirecting..\")\n\thttp.Redirect(w, r, \"\/static\", 301)\n}\n<commit_msg>Add handler for \/ in pgbadger http server<commit_after>\/*\n Copyright 2016 - 2018 Crunchy Data Solutions, Inc.\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n)\n\nconst REPORT = \"\/report\/index.html\"\n\nfunc init() {\n\tlog.SetOutput(os.Stdout)\n\tlog.Println(\"BadgerServer starting..\")\n}\n\nfunc main() {\n\thttp.HandleFunc(\"\/api\/badgergenerate\", BadgerGenerate)\n\thttp.Handle(\"\/static\/\", http.StripPrefix(\"\/static\", http.FileServer(http.Dir(\"\/report\"))))\n\thttp.HandleFunc(\"\/\", RootPathRedirect)\n\tlog.Fatal(http.ListenAndServe(\":10000\", nil))\n}\n\n\/\/ BadgerGenerate perform a pgbadger to create the HTML output file\nfunc BadgerGenerate(w http.ResponseWriter, r *http.Request) {\n\tlog.Println(\"Generating report..\")\n\n\tvar cmd *exec.Cmd\n\tcmd = exec.Command(\"badger-generate.sh\")\n\tvar out bytes.Buffer\n\tvar stderr bytes.Buffer\n\tcmd.Stdout = &out\n\tcmd.Stderr = &stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatalf(\"Error running badger-generate: %s\", err)\n\t\treturn\n\t}\n\n\tlog.Println(\"Report generated. Redirecting..\")\n\thttp.Redirect(w, r, \"\/static\", 301)\n}\n\nfunc RootPathRedirect(w http.ResponseWriter, r *http.Request) {\n\tredirect_url := \"\/static\/\"\n\tif _, err := os.Stat(REPORT); os.IsNotExist(err) {\n\t\tredirect_url = \"\/api\/badgergenerate\"\n\t}\n\thttp.Redirect(w, r, redirect_url, 302)\n}\n<|endoftext|>"} {"text":"<commit_before>package lifecycle\n\nimport (\n\t\"github.com\/lxc\/lxd\/lxd\/operations\"\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n\t\"github.com\/lxc\/lxd\/shared\/version\"\n)\n\n\/\/ Internal copy of the instance interface.\ntype instance interface {\n\tName() string\n\tProject() string\n\tOperation() *operations.Operation\n}\n\n\/\/ InstanceAction represents a lifecycle event action for instances.\ntype InstanceAction string\n\n\/\/ All supported lifecycle events for instances.\nconst (\n\tInstanceCreated = InstanceAction(api.EventLifecycleInstanceCreated)\n\tInstanceStarted = InstanceAction(api.EventLifecycleInstanceStarted)\n\tInstanceStopped = InstanceAction(api.EventLifecycleInstanceStopped)\n\tInstanceShutdown = InstanceAction(api.EventLifecycleInstanceShutdown)\n\tInstanceRestarted = InstanceAction(api.EventLifecycleInstanceRestarted)\n\tInstancePaused = InstanceAction(api.EventLifecycleInstancePaused)\n\tInstanceResumed = InstanceAction(api.EventLifecycleInstanceResumed)\n\tInstanceRestored = InstanceAction(api.EventLifecycleInstanceRestored)\n\tInstanceDeleted = InstanceAction(api.EventLifecycleInstanceDeleted)\n\tInstanceRenamed = InstanceAction(api.EventLifecycleInstanceRenamed)\n\tInstanceUpdated = InstanceAction(api.EventLifecycleInstanceUpdated)\n\tInstanceExec = InstanceAction(api.EventLifecycleInstanceExec)\n\tInstanceConsole = InstanceAction(api.EventLifecycleInstanceConsole)\n\tInstanceConsoleRetrieved = InstanceAction(api.EventLifecycleInstanceConsoleRetrieved)\n\tInstanceConsoleReset = InstanceAction(api.EventLifecycleInstanceConsoleReset)\n\tInstanceFileRetrieved = InstanceAction(api.EventLifecycleInstanceFileRetrieved)\n\tInstanceFilePushed = InstanceAction(api.EventLifecycleInstanceFilePushed)\n\tInstanceFileDeleted = InstanceAction(api.EventLifecycleInstanceFileDeleted)\n)\n\n\/\/ Event creates the lifecycle event for an action on an instance.\nfunc (a InstanceAction) Event(inst instance, ctx map[string]any) api.EventLifecycle {\n\turl := api.NewURL().Path(version.APIVersion, \"instances\", inst.Name()).Project(inst.Project())\n\n\tvar requestor *api.EventLifecycleRequestor\n\tif inst.Operation() != nil {\n\t\trequestor = inst.Operation().Requestor()\n\t}\n\n\treturn api.EventLifecycle{\n\t\tAction: string(a),\n\t\tSource: url.String(),\n\t\tContext: ctx,\n\t\tRequestor: requestor,\n\t}\n}\n<commit_msg>lxd\/lifecycle: Add InstanceReady lifecycle event<commit_after>package lifecycle\n\nimport (\n\t\"github.com\/lxc\/lxd\/lxd\/operations\"\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n\t\"github.com\/lxc\/lxd\/shared\/version\"\n)\n\n\/\/ Internal copy of the instance interface.\ntype instance interface {\n\tName() string\n\tProject() string\n\tOperation() *operations.Operation\n}\n\n\/\/ InstanceAction represents a lifecycle event action for instances.\ntype InstanceAction string\n\n\/\/ All supported lifecycle events for instances.\nconst (\n\tInstanceCreated = InstanceAction(api.EventLifecycleInstanceCreated)\n\tInstanceStarted = InstanceAction(api.EventLifecycleInstanceStarted)\n\tInstanceStopped = InstanceAction(api.EventLifecycleInstanceStopped)\n\tInstanceShutdown = InstanceAction(api.EventLifecycleInstanceShutdown)\n\tInstanceRestarted = InstanceAction(api.EventLifecycleInstanceRestarted)\n\tInstancePaused = InstanceAction(api.EventLifecycleInstancePaused)\n\tInstanceReady = InstanceAction(api.EventLifecycleInstanceReady)\n\tInstanceResumed = InstanceAction(api.EventLifecycleInstanceResumed)\n\tInstanceRestored = InstanceAction(api.EventLifecycleInstanceRestored)\n\tInstanceDeleted = InstanceAction(api.EventLifecycleInstanceDeleted)\n\tInstanceRenamed = InstanceAction(api.EventLifecycleInstanceRenamed)\n\tInstanceUpdated = InstanceAction(api.EventLifecycleInstanceUpdated)\n\tInstanceExec = InstanceAction(api.EventLifecycleInstanceExec)\n\tInstanceConsole = InstanceAction(api.EventLifecycleInstanceConsole)\n\tInstanceConsoleRetrieved = InstanceAction(api.EventLifecycleInstanceConsoleRetrieved)\n\tInstanceConsoleReset = InstanceAction(api.EventLifecycleInstanceConsoleReset)\n\tInstanceFileRetrieved = InstanceAction(api.EventLifecycleInstanceFileRetrieved)\n\tInstanceFilePushed = InstanceAction(api.EventLifecycleInstanceFilePushed)\n\tInstanceFileDeleted = InstanceAction(api.EventLifecycleInstanceFileDeleted)\n)\n\n\/\/ Event creates the lifecycle event for an action on an instance.\nfunc (a InstanceAction) Event(inst instance, ctx map[string]any) api.EventLifecycle {\n\turl := api.NewURL().Path(version.APIVersion, \"instances\", inst.Name()).Project(inst.Project())\n\n\tvar requestor *api.EventLifecycleRequestor\n\tif inst.Operation() != nil {\n\t\trequestor = inst.Operation().Requestor()\n\t}\n\n\treturn api.EventLifecycle{\n\t\tAction: string(a),\n\t\tSource: url.String(),\n\t\tContext: ctx,\n\t\tRequestor: requestor,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ (c) 2019-2020, Ava Labs, Inc. All rights reserved.\n\/\/ See the file LICENSE for licensing terms.\n\npackage validators\n\nimport (\n\t\"math\"\n\t\"testing\"\n\n\t\"github.com\/ava-labs\/gecko\/ids\"\n)\n\nfunc TestSamplerSample(t *testing.T) {\n\tvdr0 := GenerateRandomValidator(1)\n\tvdr1 := GenerateRandomValidator(math.MaxInt64 - 1)\n\n\ts := NewSet()\n\ts.Add(vdr0)\n\n\tif sampled := s.Sample(1); len(sampled) != 1 {\n\t\tt.Fatalf(\"Should have sampled 1 validator\")\n\t} else if !sampled[0].ID().Equals(vdr0.ID()) {\n\t\tt.Fatalf(\"Should have sampled vdr0\")\n\t} else if s.Len() != 1 {\n\t\tt.Fatalf(\"Wrong size\")\n\t}\n\n\ts.Add(vdr1)\n\n\tif sampled := s.Sample(1); len(sampled) != 1 {\n\t\tt.Fatalf(\"Should have sampled 1 validator\")\n\t} else if !sampled[0].ID().Equals(vdr1.ID()) {\n\t\tt.Fatalf(\"Should have sampled vdr1\")\n\t} else if s.Len() != 2 {\n\t\tt.Fatalf(\"Wrong size\")\n\t}\n\n\tif sampled := s.Sample(2); len(sampled) != 2 {\n\t\tt.Fatalf(\"Should have sampled 2 validators\")\n\t} else if !sampled[1].ID().Equals(vdr0.ID()) {\n\t\tt.Fatalf(\"Should have sampled vdr0\")\n\t} else if !sampled[0].ID().Equals(vdr1.ID()) {\n\t\tt.Fatalf(\"Should have sampled vdr1\")\n\t}\n\n\tif sampled := s.Sample(3); len(sampled) != 2 {\n\t\tt.Fatalf(\"Should have sampled 2 validators\")\n\t} else if !sampled[1].ID().Equals(vdr0.ID()) {\n\t\tt.Fatalf(\"Should have sampled vdr0\")\n\t} else if !sampled[0].ID().Equals(vdr1.ID()) {\n\t\tt.Fatalf(\"Should have sampled vdr1\")\n\t}\n\n\tif list := s.List(); len(list) != 2 {\n\t\tt.Fatalf(\"Should have returned 2 validators\")\n\t} else if !list[0].ID().Equals(vdr0.ID()) {\n\t\tt.Fatalf(\"Should have returned vdr0\")\n\t} else if !list[1].ID().Equals(vdr1.ID()) {\n\t\tt.Fatalf(\"Should have returned vdr1\")\n\t}\n}\n\nfunc TestSamplerDuplicate(t *testing.T) {\n\tvdr0 := GenerateRandomValidator(1)\n\tvdr1_0 := GenerateRandomValidator(math.MaxInt64 - 1)\n\tvdr1_1 := NewValidator(vdr1_0.ID(), 0)\n\n\ts := NewSet()\n\ts.Add(vdr0)\n\ts.Add(vdr1_0)\n\n\tif sampled := s.Sample(1); len(sampled) != 1 {\n\t\tt.Fatalf(\"Should have sampled 1 validator\")\n\t} else if !sampled[0].ID().Equals(vdr1_0.ID()) {\n\t\tt.Fatalf(\"Should have sampled vdr1\")\n\t}\n\n\ts.Add(vdr1_1)\n\n\tif sampled := s.Sample(1); len(sampled) != 1 {\n\t\tt.Fatalf(\"Should have sampled 1 validator\")\n\t} else if !sampled[0].ID().Equals(vdr0.ID()) {\n\t\tt.Fatalf(\"Should have sampled vdr0\")\n\t}\n\n\tif sampled := s.Sample(2); len(sampled) != 1 {\n\t\tt.Fatalf(\"Should have only sampled 1 validator\")\n\t} else if !sampled[0].ID().Equals(vdr0.ID()) {\n\t\tt.Fatalf(\"Should have sampled vdr0\")\n\t}\n\n\ts.Remove(vdr1_1.ID())\n\n\tif sampled := s.Sample(2); len(sampled) != 1 {\n\t\tt.Fatalf(\"Should have only sampled 1 validator\")\n\t} else if !sampled[0].ID().Equals(vdr0.ID()) {\n\t\tt.Fatalf(\"Should have sampled vdr0\")\n\t}\n}\n\nfunc TestSamplerSimple(t *testing.T) {\n\tvdr := GenerateRandomValidator(1)\n\n\ts := NewSet()\n\ts.Add(vdr)\n\n\tif sampled := s.Sample(1); len(sampled) != 1 {\n\t\tt.Fatalf(\"Should have sampled 1 validator\")\n\t}\n}\n\nfunc TestSamplerContains(t *testing.T) {\n\tvdr := GenerateRandomValidator(1)\n\n\ts := NewSet()\n\ts.Add(vdr)\n\n\tif !s.Contains(vdr.ID()) {\n\t\tt.Fatalf(\"Should have contained validator\")\n\t}\n\n\ts.Remove(vdr.ID())\n\n\tif s.Contains(vdr.ID()) {\n\t\tt.Fatalf(\"Shouldn't have contained validator\")\n\t}\n}\n\nfunc TestSamplerString(t *testing.T) {\n\tvdr0 := NewValidator(ids.ShortEmpty, 1)\n\tvdr1 := NewValidator(\n\t\tids.NewShortID([20]byte{\n\t\t\t0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n\t\t\t0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n\t\t}),\n\t\tmath.MaxInt64-1,\n\t)\n\n\ts := NewSet()\n\ts.Add(vdr0)\n\ts.Add(vdr1)\n\n\texpected := \"Validator Set: (Size = 2)\\n\" +\n\t\t\" Validator[0]: 111111111111111111116DBWJs, 1\\n\" +\n\t\t\" Validator[1]: QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 9223372036854775806\"\n\tif str := s.String(); str != expected {\n\t\tt.Fatalf(\"Got:\\n%s\\nExpected:\\n%s\", str, expected)\n\t}\n}\n<commit_msg>snow: Add test for validators.Set.Set<commit_after>\/\/ (c) 2019-2020, Ava Labs, Inc. All rights reserved.\n\/\/ See the file LICENSE for licensing terms.\n\npackage validators\n\nimport (\n\t\"math\"\n\t\"testing\"\n\n\t\"github.com\/ava-labs\/gecko\/ids\"\n)\n\nfunc TestSetSet(t *testing.T) {\n\tvdr0 := NewValidator(ids.ShortEmpty, 1)\n\tvdr1_0 := NewValidator(ids.NewShortID([20]byte{0xFF}), 1)\n\t\/\/ Should replace vdr1_0, because later additions replace earlier ones\n\tvdr1_1 := NewValidator(ids.NewShortID([20]byte{0xFF}), math.MaxInt64-1)\n\t\/\/ Should be discarded, because it has a weight of 0\n\tvdr2 := NewValidator(ids.NewShortID([20]byte{0xAA}), 0)\n\n\ts := NewSet()\n\ts.Set([]Validator{vdr0, vdr1_0, vdr1_1, vdr2})\n\n\tif !s.Contains(vdr0.ID()) {\n\t\tt.Fatal(\"Should have contained vdr0\", vdr0.ID())\n\t}\n\tif !s.Contains(vdr1_0.ID()) {\n\t\tt.Fatal(\"Should have contained vdr1\", vdr1_0.ID())\n\t}\n\tif sampled := s.Sample(1); !sampled[0].ID().Equals(vdr1_0.ID()) {\n\t\tt.Fatal(\"Should have sampled vdr1\")\n\t}\n\tif len := s.Len(); len != 2 {\n\t\tt.Fatalf(\"Got size %d, expected 2\", len)\n\t}\n}\n\nfunc TestSamplerSample(t *testing.T) {\n\tvdr0 := GenerateRandomValidator(1)\n\tvdr1 := GenerateRandomValidator(math.MaxInt64 - 1)\n\n\ts := NewSet()\n\ts.Add(vdr0)\n\n\tif sampled := s.Sample(1); len(sampled) != 1 {\n\t\tt.Fatalf(\"Should have sampled 1 validator\")\n\t} else if !sampled[0].ID().Equals(vdr0.ID()) {\n\t\tt.Fatalf(\"Should have sampled vdr0\")\n\t} else if s.Len() != 1 {\n\t\tt.Fatalf(\"Wrong size\")\n\t}\n\n\ts.Add(vdr1)\n\n\tif sampled := s.Sample(1); len(sampled) != 1 {\n\t\tt.Fatalf(\"Should have sampled 1 validator\")\n\t} else if !sampled[0].ID().Equals(vdr1.ID()) {\n\t\tt.Fatalf(\"Should have sampled vdr1\")\n\t} else if s.Len() != 2 {\n\t\tt.Fatalf(\"Wrong size\")\n\t}\n\n\tif sampled := s.Sample(2); len(sampled) != 2 {\n\t\tt.Fatalf(\"Should have sampled 2 validators\")\n\t} else if !sampled[1].ID().Equals(vdr0.ID()) {\n\t\tt.Fatalf(\"Should have sampled vdr0\")\n\t} else if !sampled[0].ID().Equals(vdr1.ID()) {\n\t\tt.Fatalf(\"Should have sampled vdr1\")\n\t}\n\n\tif sampled := s.Sample(3); len(sampled) != 2 {\n\t\tt.Fatalf(\"Should have sampled 2 validators\")\n\t} else if !sampled[1].ID().Equals(vdr0.ID()) {\n\t\tt.Fatalf(\"Should have sampled vdr0\")\n\t} else if !sampled[0].ID().Equals(vdr1.ID()) {\n\t\tt.Fatalf(\"Should have sampled vdr1\")\n\t}\n\n\tif list := s.List(); len(list) != 2 {\n\t\tt.Fatalf(\"Should have returned 2 validators\")\n\t} else if !list[0].ID().Equals(vdr0.ID()) {\n\t\tt.Fatalf(\"Should have returned vdr0\")\n\t} else if !list[1].ID().Equals(vdr1.ID()) {\n\t\tt.Fatalf(\"Should have returned vdr1\")\n\t}\n}\n\nfunc TestSamplerDuplicate(t *testing.T) {\n\tvdr0 := GenerateRandomValidator(1)\n\tvdr1_0 := GenerateRandomValidator(math.MaxInt64 - 1)\n\tvdr1_1 := NewValidator(vdr1_0.ID(), 0)\n\n\ts := NewSet()\n\ts.Add(vdr0)\n\ts.Add(vdr1_0)\n\n\tif sampled := s.Sample(1); len(sampled) != 1 {\n\t\tt.Fatalf(\"Should have sampled 1 validator\")\n\t} else if !sampled[0].ID().Equals(vdr1_0.ID()) {\n\t\tt.Fatalf(\"Should have sampled vdr1\")\n\t}\n\n\ts.Add(vdr1_1)\n\n\tif sampled := s.Sample(1); len(sampled) != 1 {\n\t\tt.Fatalf(\"Should have sampled 1 validator\")\n\t} else if !sampled[0].ID().Equals(vdr0.ID()) {\n\t\tt.Fatalf(\"Should have sampled vdr0\")\n\t}\n\n\tif sampled := s.Sample(2); len(sampled) != 1 {\n\t\tt.Fatalf(\"Should have only sampled 1 validator\")\n\t} else if !sampled[0].ID().Equals(vdr0.ID()) {\n\t\tt.Fatalf(\"Should have sampled vdr0\")\n\t}\n\n\ts.Remove(vdr1_1.ID())\n\n\tif sampled := s.Sample(2); len(sampled) != 1 {\n\t\tt.Fatalf(\"Should have only sampled 1 validator\")\n\t} else if !sampled[0].ID().Equals(vdr0.ID()) {\n\t\tt.Fatalf(\"Should have sampled vdr0\")\n\t}\n}\n\nfunc TestSamplerSimple(t *testing.T) {\n\tvdr := GenerateRandomValidator(1)\n\n\ts := NewSet()\n\ts.Add(vdr)\n\n\tif sampled := s.Sample(1); len(sampled) != 1 {\n\t\tt.Fatalf(\"Should have sampled 1 validator\")\n\t}\n}\n\nfunc TestSamplerContains(t *testing.T) {\n\tvdr := GenerateRandomValidator(1)\n\n\ts := NewSet()\n\ts.Add(vdr)\n\n\tif !s.Contains(vdr.ID()) {\n\t\tt.Fatalf(\"Should have contained validator\")\n\t}\n\n\ts.Remove(vdr.ID())\n\n\tif s.Contains(vdr.ID()) {\n\t\tt.Fatalf(\"Shouldn't have contained validator\")\n\t}\n}\n\nfunc TestSamplerString(t *testing.T) {\n\tvdr0 := NewValidator(ids.ShortEmpty, 1)\n\tvdr1 := NewValidator(\n\t\tids.NewShortID([20]byte{\n\t\t\t0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n\t\t\t0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,\n\t\t}),\n\t\tmath.MaxInt64-1,\n\t)\n\n\ts := NewSet()\n\ts.Add(vdr0)\n\ts.Add(vdr1)\n\n\texpected := \"Validator Set: (Size = 2)\\n\" +\n\t\t\" Validator[0]: 111111111111111111116DBWJs, 1\\n\" +\n\t\t\" Validator[1]: QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 9223372036854775806\"\n\tif str := s.String(); str != expected {\n\t\tt.Fatalf(\"Got:\\n%s\\nExpected:\\n%s\", str, expected)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport \"fmt\"\nimport \"time\"\nimport \"bitbucket.org\/davidwallace\/go-opc\/colorutils\"\nimport \"github.com\/davecheney\/profile\"\n\nfunc main() {\n\n\tdefer profile.Start(profile.CPUProfile).Stop()\n\n\tconst N_PIXELS = 1000\n\tvar array = make([]byte, N_PIXELS*3)\n\n\tvar pct, r, g, b, t float64\n\tvar last_print = float64(time.Now().UnixNano()) \/ 1.0e9\n\tvar frames = 0\n\tvar start_time = last_print\n\tt = start_time\n\tfor t < start_time+5 {\n\t\tt = float64(time.Now().UnixNano()) \/ 1.0e9\n\t\tframes += 1\n\t\tif t > last_print+1 {\n\t\t\tlast_print = t\n\t\t\tfmt.Printf(\"%f ms (%d fps)\\n\", 1000.0\/float64(frames), frames)\n\t\t\tframes = 0\n\t\t}\n\t\tfor ii := 0; ii < N_PIXELS; ii++ {\n\t\t\tpct = float64(ii) \/ N_PIXELS\n\n\t\t\tr = pct\n\t\t\tg = pct\n\t\t\tb = pct\n\n\t\t\tarray[ii*3+0] = colorutils.FloatToByte(r)\n\t\t\tarray[ii*3+1] = colorutils.FloatToByte(g)\n\t\t\tarray[ii*3+2] = colorutils.FloatToByte(b)\n\t\t}\n\n\t\t\/\/for ii, v := range array {\n\t\t\/\/ fmt.Printf(\"array[%d] = %d\\n\", ii, v)\n\t\t\/\/}\n\t}\n\n}\n<commit_msg>Read from layout json file<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\t\"github.com\/davecheney\/profile\"\n\t\"bitbucket.org\/davidwallace\/go-opc\/colorutils\"\n)\n\n\/\/ read locations from JSON file into a slice of floats\nfunc readLocations(fn string) []float64 {\n\tlocations := make([]float64, 0)\n\tvar file *os.File\n\tvar err error\n\tif file, err = os.Open(fn); err != nil {\n\t\tpanic(fmt.Sprintf(\"could not open layout file: %s\", fn))\n\t}\n\tdefer file.Close()\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tif len(line) == 0 || line[0] == '[' || line[0] == ']' {\n\t\t\tcontinue\n\t\t}\n\t\tline = strings.Split(line, \"[\")[1]\n\t\tline = strings.Split(line, \"]\")[0]\n\t\tcoordStrings := strings.Split(line, \", \")\n\t\tvar x, y, z float64\n\t\tx, err = strconv.ParseFloat(coordStrings[0], 64)\n\t\ty, err = strconv.ParseFloat(coordStrings[1], 64)\n\t\tz, err = strconv.ParseFloat(coordStrings[2], 64)\n\t\tlocations = append(locations, x, y, z)\n\t}\n\tfmt.Printf(\"Read %v pixel locations from %s\\n\", len(locations), fn)\n\treturn locations\n}\n\nfunc main() {\n\tdefer profile.Start(profile.CPUProfile).Stop()\n\n\tpath := \"circle.json\"\n\n\tLOCATIONS := readLocations(path)\n\tN_PIXELS := len(LOCATIONS) \/ 3\n\tVALUES := make([]byte, N_PIXELS*3)\n\n\t\/\/ fill in values over and over\n\tvar pct, r, g, b, t float64\n\tvar last_print = float64(time.Now().UnixNano()) \/ 1.0e9\n\tvar frames = 0\n\tvar start_time = last_print\n\tt = start_time\n\tfor t < start_time+5 {\n\t\tt = float64(time.Now().UnixNano()) \/ 1.0e9\n\t\tframes += 1\n\t\tif t > last_print+1 {\n\t\t\tlast_print = t\n\t\t\tfmt.Printf(\"%f ms (%d fps)\\n\", 1000.0\/float64(frames), frames)\n\t\t\tframes = 0\n\t\t}\n\t\tfor ii := 0; ii < N_PIXELS; ii++ {\n\t\t\tpct = float64(ii) \/ float64(N_PIXELS)\n\n\t\t\tr = pct\n\t\t\tg = pct\n\t\t\tb = pct\n\n\t\t\tVALUES[ii*3+0] = colorutils.FloatToByte(r)\n\t\t\tVALUES[ii*3+1] = colorutils.FloatToByte(g)\n\t\t\tVALUES[ii*3+2] = colorutils.FloatToByte(b)\n\t\t}\n\n\t\t\/\/for ii, v := range VALUES {\n\t\t\/\/ fmt.Printf(\"VALUES[%d] = %d\\n\", ii, v)\n\t\t\/\/}\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\ntype Ops struct {\n\tConfig string\n\tRoot string\n\tType string\n\tDelay int\n\tVerbose bool\n\tVersion bool\n}\n<commit_msg>update Opt struct<commit_after>package main\n\n\/\/ Ops structure\ntype Ops struct {\n\tConfig string\n\tRoot string\n\tType string\n\tAddr string\n\tDelay int\n\tLoglevel string\n\tVersion bool\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage cmd\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"code.gitea.io\/git\"\n\t\"code.gitea.io\/gitea\/models\"\n\t\"code.gitea.io\/gitea\/modules\/log\"\n\t\"code.gitea.io\/gitea\/modules\/private\"\n\t\"code.gitea.io\/gitea\/modules\/setting\"\n\n\t\"github.com\/urfave\/cli\"\n)\n\nvar (\n\t\/\/ CmdHook represents the available hooks sub-command.\n\tCmdHook = cli.Command{\n\t\tName: \"hook\",\n\t\tUsage: \"Delegate commands to corresponding Git hooks\",\n\t\tDescription: \"This should only be called by Git\",\n\t\tFlags: []cli.Flag{\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"config, c\",\n\t\t\t\tValue: \"custom\/conf\/app.ini\",\n\t\t\t\tUsage: \"Custom configuration file path\",\n\t\t\t},\n\t\t},\n\t\tSubcommands: []cli.Command{\n\t\t\tsubcmdHookPreReceive,\n\t\t\tsubcmdHookUpdate,\n\t\t\tsubcmdHookPostReceive,\n\t\t},\n\t}\n\n\tsubcmdHookPreReceive = cli.Command{\n\t\tName: \"pre-receive\",\n\t\tUsage: \"Delegate pre-receive Git hook\",\n\t\tDescription: \"This command should only be called by Git\",\n\t\tAction: runHookPreReceive,\n\t}\n\tsubcmdHookUpdate = cli.Command{\n\t\tName: \"update\",\n\t\tUsage: \"Delegate update Git hook\",\n\t\tDescription: \"This command should only be called by Git\",\n\t\tAction: runHookUpdate,\n\t}\n\tsubcmdHookPostReceive = cli.Command{\n\t\tName: \"post-receive\",\n\t\tUsage: \"Delegate post-receive Git hook\",\n\t\tDescription: \"This command should only be called by Git\",\n\t\tAction: runHookPostReceive,\n\t}\n)\n\nfunc hookSetup(logPath string) {\n\tsetting.NewContext()\n\tlog.NewGitLogger(filepath.Join(setting.LogRootPath, logPath))\n\tmodels.LoadConfigs()\n}\n\nfunc runHookPreReceive(c *cli.Context) error {\n\tif len(os.Getenv(\"SSH_ORIGINAL_COMMAND\")) == 0 {\n\t\treturn nil\n\t}\n\n\tif c.IsSet(\"config\") {\n\t\tsetting.CustomConf = c.String(\"config\")\n\t} else if c.GlobalIsSet(\"config\") {\n\t\tsetting.CustomConf = c.GlobalString(\"config\")\n\t}\n\n\thookSetup(\"hooks\/pre-receive.log\")\n\n\t\/\/ the environment setted on serv command\n\trepoID, _ := strconv.ParseInt(os.Getenv(models.ProtectedBranchRepoID), 10, 64)\n\tisWiki := (os.Getenv(models.EnvRepoIsWiki) == \"true\")\n\tusername := os.Getenv(models.EnvRepoUsername)\n\treponame := os.Getenv(models.EnvRepoName)\n\tuserIDStr := os.Getenv(models.EnvPusherID)\n\trepoPath := models.RepoPath(username, reponame)\n\n\tbuf := bytes.NewBuffer(nil)\n\tscanner := bufio.NewScanner(os.Stdin)\n\tfor scanner.Scan() {\n\t\tbuf.Write(scanner.Bytes())\n\t\tbuf.WriteByte('\\n')\n\n\t\t\/\/ TODO: support news feeds for wiki\n\t\tif isWiki {\n\t\t\tcontinue\n\t\t}\n\n\t\tfields := bytes.Fields(scanner.Bytes())\n\t\tif len(fields) != 3 {\n\t\t\tcontinue\n\t\t}\n\n\t\toldCommitID := string(fields[0])\n\t\tnewCommitID := string(fields[1])\n\t\trefFullName := string(fields[2])\n\n\t\tbranchName := strings.TrimPrefix(refFullName, git.BranchPrefix)\n\t\tprotectBranch, err := private.GetProtectedBranchBy(repoID, branchName)\n\t\tif err != nil {\n\t\t\tlog.GitLogger.Fatal(2, \"retrieve protected branches information failed\")\n\t\t}\n\n\t\tif protectBranch != nil && protectBranch.IsProtected() {\n\t\t\t\/\/ detect force push\n\t\t\tif git.EmptySHA != oldCommitID {\n\t\t\t\toutput, err := git.NewCommand(\"rev-list\", \"--max-count=1\", oldCommitID, \"^\"+newCommitID).RunInDir(repoPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfail(\"Internal error\", \"Fail to detect force push: %v\", err)\n\t\t\t\t} else if len(output) > 0 {\n\t\t\t\t\tfail(fmt.Sprintf(\"branch %s is protected from force push\", branchName), \"\")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ check and deletion\n\t\t\tif newCommitID == git.EmptySHA {\n\t\t\t\tfail(fmt.Sprintf(\"branch %s is protected from deletion\", branchName), \"\")\n\t\t\t} else {\n\t\t\t\tuserID, _ := strconv.ParseInt(userIDStr, 10, 64)\n\t\t\t\tcanPush, err := private.CanUserPush(protectBranch.ID, userID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfail(\"Internal error\", \"Fail to detect user can push: %v\", err)\n\t\t\t\t} else if !canPush {\n\t\t\t\t\tfail(fmt.Sprintf(\"protected branch %s can not be pushed to\", branchName), \"\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc runHookUpdate(c *cli.Context) error {\n\tif len(os.Getenv(\"SSH_ORIGINAL_COMMAND\")) == 0 {\n\t\treturn nil\n\t}\n\n\tif c.IsSet(\"config\") {\n\t\tsetting.CustomConf = c.String(\"config\")\n\t} else if c.GlobalIsSet(\"config\") {\n\t\tsetting.CustomConf = c.GlobalString(\"config\")\n\t}\n\n\thookSetup(\"hooks\/update.log\")\n\n\treturn nil\n}\n\nfunc runHookPostReceive(c *cli.Context) error {\n\tif len(os.Getenv(\"SSH_ORIGINAL_COMMAND\")) == 0 {\n\t\treturn nil\n\t}\n\n\tif c.IsSet(\"config\") {\n\t\tsetting.CustomConf = c.String(\"config\")\n\t} else if c.GlobalIsSet(\"config\") {\n\t\tsetting.CustomConf = c.GlobalString(\"config\")\n\t}\n\n\thookSetup(\"hooks\/post-receive.log\")\n\n\t\/\/ the environment setted on serv command\n\trepoID, _ := strconv.ParseInt(os.Getenv(models.ProtectedBranchRepoID), 10, 64)\n\trepoUser := os.Getenv(models.EnvRepoUsername)\n\tisWiki := (os.Getenv(models.EnvRepoIsWiki) == \"true\")\n\trepoName := os.Getenv(models.EnvRepoName)\n\tpusherID, _ := strconv.ParseInt(os.Getenv(models.EnvPusherID), 10, 64)\n\tpusherName := os.Getenv(models.EnvPusherName)\n\n\tbuf := bytes.NewBuffer(nil)\n\tscanner := bufio.NewScanner(os.Stdin)\n\tfor scanner.Scan() {\n\t\tbuf.Write(scanner.Bytes())\n\t\tbuf.WriteByte('\\n')\n\n\t\t\/\/ TODO: support news feeds for wiki\n\t\tif isWiki {\n\t\t\tcontinue\n\t\t}\n\n\t\tfields := bytes.Fields(scanner.Bytes())\n\t\tif len(fields) != 3 {\n\t\t\tcontinue\n\t\t}\n\n\t\toldCommitID := string(fields[0])\n\t\tnewCommitID := string(fields[1])\n\t\trefFullName := string(fields[2])\n\n\t\tif err := private.PushUpdate(models.PushUpdateOptions{\n\t\t\tRefFullName: refFullName,\n\t\t\tOldCommitID: oldCommitID,\n\t\t\tNewCommitID: newCommitID,\n\t\t\tPusherID: pusherID,\n\t\t\tPusherName: pusherName,\n\t\t\tRepoUserName: repoUser,\n\t\t\tRepoName: repoName,\n\t\t}); err != nil {\n\t\t\tlog.GitLogger.Error(2, \"Update: %v\", err)\n\t\t}\n\n\t\tif strings.HasPrefix(refFullName, git.BranchPrefix) {\n\t\t\tbranch := strings.TrimPrefix(refFullName, git.BranchPrefix)\n\t\t\trepo, pullRequestAllowed, err := private.GetRepository(repoID)\n\t\t\tif err != nil {\n\t\t\t\tlog.GitLogger.Error(2, \"get repo: %v\", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif !pullRequestAllowed {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tbaseRepo := repo\n\t\t\tif repo.IsFork {\n\t\t\t\tbaseRepo = repo.BaseRepo\n\t\t\t}\n\n\t\t\tif !repo.IsFork && branch == baseRepo.DefaultBranch {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tpr, err := private.ActivePullRequest(baseRepo.ID, repo.ID, baseRepo.DefaultBranch, branch)\n\t\t\tif err != nil {\n\t\t\t\tlog.GitLogger.Error(2, \"get active pr: %v\", err)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tfmt.Fprintln(os.Stderr, \"\")\n\t\t\tif pr == nil {\n\t\t\t\tif repo.IsFork {\n\t\t\t\t\tbranch = fmt.Sprintf(\"%s:%s\", repo.OwnerName, branch)\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Create a new pull request for '%s':\\n\", branch)\n\t\t\t\tfmt.Fprintf(os.Stderr, \" %s\/compare\/%s...%s\\n\", baseRepo.HTMLURL(), url.QueryEscape(baseRepo.DefaultBranch), url.QueryEscape(branch))\n\t\t\t} else {\n\t\t\t\tfmt.Fprint(os.Stderr, \"Visit the existing pull request:\\n\")\n\t\t\t\tfmt.Fprintf(os.Stderr, \" %s\/pulls\/%d\\n\", baseRepo.HTMLURL(), pr.Index)\n\t\t\t}\n\t\t\tfmt.Fprintln(os.Stderr, \"\")\n\t\t}\n\n\t}\n\n\treturn nil\n}\n<commit_msg>fix showing pull request link when delete a branch (#5166)<commit_after>\/\/ Copyright 2017 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage cmd\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"code.gitea.io\/git\"\n\t\"code.gitea.io\/gitea\/models\"\n\t\"code.gitea.io\/gitea\/modules\/log\"\n\t\"code.gitea.io\/gitea\/modules\/private\"\n\t\"code.gitea.io\/gitea\/modules\/setting\"\n\n\t\"github.com\/urfave\/cli\"\n)\n\nvar (\n\t\/\/ CmdHook represents the available hooks sub-command.\n\tCmdHook = cli.Command{\n\t\tName: \"hook\",\n\t\tUsage: \"Delegate commands to corresponding Git hooks\",\n\t\tDescription: \"This should only be called by Git\",\n\t\tFlags: []cli.Flag{\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"config, c\",\n\t\t\t\tValue: \"custom\/conf\/app.ini\",\n\t\t\t\tUsage: \"Custom configuration file path\",\n\t\t\t},\n\t\t},\n\t\tSubcommands: []cli.Command{\n\t\t\tsubcmdHookPreReceive,\n\t\t\tsubcmdHookUpdate,\n\t\t\tsubcmdHookPostReceive,\n\t\t},\n\t}\n\n\tsubcmdHookPreReceive = cli.Command{\n\t\tName: \"pre-receive\",\n\t\tUsage: \"Delegate pre-receive Git hook\",\n\t\tDescription: \"This command should only be called by Git\",\n\t\tAction: runHookPreReceive,\n\t}\n\tsubcmdHookUpdate = cli.Command{\n\t\tName: \"update\",\n\t\tUsage: \"Delegate update Git hook\",\n\t\tDescription: \"This command should only be called by Git\",\n\t\tAction: runHookUpdate,\n\t}\n\tsubcmdHookPostReceive = cli.Command{\n\t\tName: \"post-receive\",\n\t\tUsage: \"Delegate post-receive Git hook\",\n\t\tDescription: \"This command should only be called by Git\",\n\t\tAction: runHookPostReceive,\n\t}\n)\n\nfunc hookSetup(logPath string) {\n\tsetting.NewContext()\n\tlog.NewGitLogger(filepath.Join(setting.LogRootPath, logPath))\n\tmodels.LoadConfigs()\n}\n\nfunc runHookPreReceive(c *cli.Context) error {\n\tif len(os.Getenv(\"SSH_ORIGINAL_COMMAND\")) == 0 {\n\t\treturn nil\n\t}\n\n\tif c.IsSet(\"config\") {\n\t\tsetting.CustomConf = c.String(\"config\")\n\t} else if c.GlobalIsSet(\"config\") {\n\t\tsetting.CustomConf = c.GlobalString(\"config\")\n\t}\n\n\thookSetup(\"hooks\/pre-receive.log\")\n\n\t\/\/ the environment setted on serv command\n\trepoID, _ := strconv.ParseInt(os.Getenv(models.ProtectedBranchRepoID), 10, 64)\n\tisWiki := (os.Getenv(models.EnvRepoIsWiki) == \"true\")\n\tusername := os.Getenv(models.EnvRepoUsername)\n\treponame := os.Getenv(models.EnvRepoName)\n\tuserIDStr := os.Getenv(models.EnvPusherID)\n\trepoPath := models.RepoPath(username, reponame)\n\n\tbuf := bytes.NewBuffer(nil)\n\tscanner := bufio.NewScanner(os.Stdin)\n\tfor scanner.Scan() {\n\t\tbuf.Write(scanner.Bytes())\n\t\tbuf.WriteByte('\\n')\n\n\t\t\/\/ TODO: support news feeds for wiki\n\t\tif isWiki {\n\t\t\tcontinue\n\t\t}\n\n\t\tfields := bytes.Fields(scanner.Bytes())\n\t\tif len(fields) != 3 {\n\t\t\tcontinue\n\t\t}\n\n\t\toldCommitID := string(fields[0])\n\t\tnewCommitID := string(fields[1])\n\t\trefFullName := string(fields[2])\n\n\t\tbranchName := strings.TrimPrefix(refFullName, git.BranchPrefix)\n\t\tprotectBranch, err := private.GetProtectedBranchBy(repoID, branchName)\n\t\tif err != nil {\n\t\t\tlog.GitLogger.Fatal(2, \"retrieve protected branches information failed\")\n\t\t}\n\n\t\tif protectBranch != nil && protectBranch.IsProtected() {\n\t\t\t\/\/ detect force push\n\t\t\tif git.EmptySHA != oldCommitID {\n\t\t\t\toutput, err := git.NewCommand(\"rev-list\", \"--max-count=1\", oldCommitID, \"^\"+newCommitID).RunInDir(repoPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfail(\"Internal error\", \"Fail to detect force push: %v\", err)\n\t\t\t\t} else if len(output) > 0 {\n\t\t\t\t\tfail(fmt.Sprintf(\"branch %s is protected from force push\", branchName), \"\")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ check and deletion\n\t\t\tif newCommitID == git.EmptySHA {\n\t\t\t\tfail(fmt.Sprintf(\"branch %s is protected from deletion\", branchName), \"\")\n\t\t\t} else {\n\t\t\t\tuserID, _ := strconv.ParseInt(userIDStr, 10, 64)\n\t\t\t\tcanPush, err := private.CanUserPush(protectBranch.ID, userID)\n\t\t\t\tif err != nil {\n\t\t\t\t\tfail(\"Internal error\", \"Fail to detect user can push: %v\", err)\n\t\t\t\t} else if !canPush {\n\t\t\t\t\tfail(fmt.Sprintf(\"protected branch %s can not be pushed to\", branchName), \"\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc runHookUpdate(c *cli.Context) error {\n\tif len(os.Getenv(\"SSH_ORIGINAL_COMMAND\")) == 0 {\n\t\treturn nil\n\t}\n\n\tif c.IsSet(\"config\") {\n\t\tsetting.CustomConf = c.String(\"config\")\n\t} else if c.GlobalIsSet(\"config\") {\n\t\tsetting.CustomConf = c.GlobalString(\"config\")\n\t}\n\n\thookSetup(\"hooks\/update.log\")\n\n\treturn nil\n}\n\nfunc runHookPostReceive(c *cli.Context) error {\n\tif len(os.Getenv(\"SSH_ORIGINAL_COMMAND\")) == 0 {\n\t\treturn nil\n\t}\n\n\tif c.IsSet(\"config\") {\n\t\tsetting.CustomConf = c.String(\"config\")\n\t} else if c.GlobalIsSet(\"config\") {\n\t\tsetting.CustomConf = c.GlobalString(\"config\")\n\t}\n\n\thookSetup(\"hooks\/post-receive.log\")\n\n\t\/\/ the environment setted on serv command\n\trepoID, _ := strconv.ParseInt(os.Getenv(models.ProtectedBranchRepoID), 10, 64)\n\trepoUser := os.Getenv(models.EnvRepoUsername)\n\tisWiki := (os.Getenv(models.EnvRepoIsWiki) == \"true\")\n\trepoName := os.Getenv(models.EnvRepoName)\n\tpusherID, _ := strconv.ParseInt(os.Getenv(models.EnvPusherID), 10, 64)\n\tpusherName := os.Getenv(models.EnvPusherName)\n\n\tbuf := bytes.NewBuffer(nil)\n\tscanner := bufio.NewScanner(os.Stdin)\n\tfor scanner.Scan() {\n\t\tbuf.Write(scanner.Bytes())\n\t\tbuf.WriteByte('\\n')\n\n\t\t\/\/ TODO: support news feeds for wiki\n\t\tif isWiki {\n\t\t\tcontinue\n\t\t}\n\n\t\tfields := bytes.Fields(scanner.Bytes())\n\t\tif len(fields) != 3 {\n\t\t\tcontinue\n\t\t}\n\n\t\toldCommitID := string(fields[0])\n\t\tnewCommitID := string(fields[1])\n\t\trefFullName := string(fields[2])\n\n\t\tif err := private.PushUpdate(models.PushUpdateOptions{\n\t\t\tRefFullName: refFullName,\n\t\t\tOldCommitID: oldCommitID,\n\t\t\tNewCommitID: newCommitID,\n\t\t\tPusherID: pusherID,\n\t\t\tPusherName: pusherName,\n\t\t\tRepoUserName: repoUser,\n\t\t\tRepoName: repoName,\n\t\t}); err != nil {\n\t\t\tlog.GitLogger.Error(2, \"Update: %v\", err)\n\t\t}\n\n\t\tif newCommitID != git.EmptySHA && strings.HasPrefix(refFullName, git.BranchPrefix) {\n\t\t\tbranch := strings.TrimPrefix(refFullName, git.BranchPrefix)\n\t\t\trepo, pullRequestAllowed, err := private.GetRepository(repoID)\n\t\t\tif err != nil {\n\t\t\t\tlog.GitLogger.Error(2, \"get repo: %v\", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif !pullRequestAllowed {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tbaseRepo := repo\n\t\t\tif repo.IsFork {\n\t\t\t\tbaseRepo = repo.BaseRepo\n\t\t\t}\n\n\t\t\tif !repo.IsFork && branch == baseRepo.DefaultBranch {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tpr, err := private.ActivePullRequest(baseRepo.ID, repo.ID, baseRepo.DefaultBranch, branch)\n\t\t\tif err != nil {\n\t\t\t\tlog.GitLogger.Error(2, \"get active pr: %v\", err)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tfmt.Fprintln(os.Stderr, \"\")\n\t\t\tif pr == nil {\n\t\t\t\tif repo.IsFork {\n\t\t\t\t\tbranch = fmt.Sprintf(\"%s:%s\", repo.OwnerName, branch)\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Create a new pull request for '%s':\\n\", branch)\n\t\t\t\tfmt.Fprintf(os.Stderr, \" %s\/compare\/%s...%s\\n\", baseRepo.HTMLURL(), url.QueryEscape(baseRepo.DefaultBranch), url.QueryEscape(branch))\n\t\t\t} else {\n\t\t\t\tfmt.Fprint(os.Stderr, \"Visit the existing pull request:\\n\")\n\t\t\t\tfmt.Fprintf(os.Stderr, \" %s\/pulls\/%d\\n\", baseRepo.HTMLURL(), pr.Index)\n\t\t\t}\n\t\t\tfmt.Fprintln(os.Stderr, \"\")\n\t\t}\n\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package manifest\n\nimport (\n\t\"code.cloudfoundry.org\/cli\/plugin\/models\"\n\t\"github.com\/cloudfoundry-incubator\/candiedyaml\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"Manifest\", func() {\n\n\tContext(\"For a manifest with no applications section\", func() {\n\n\t\tinput := map[string]interface{}{\n\t\t\t\"host\": \"bob\",\n\t\t\t\"routes\": []interface{}{\n\t\t\t\tmap[interface{}]interface{}{\"route\": \"example.com\"},\n\t\t\t\tmap[interface{}]interface{}{\"route\": \"www.example.com\/foo\"},\n\t\t\t\tmap[interface{}]interface{}{\"route\": \"tcp-example.com:1234\"},\n\t\t\t},\n\t\t}\n\t\tm := &Manifest{}\n\n\t\tContext(\"the getAppMaps function\", func() {\n\t\t\tappMaps, err := m.getAppMaps(input)\n\t\t\tIt(\"does not error\", func() {\n\t\t\t\tExpect(err).To(BeNil())\n\t\t\t})\n\n\t\t\tIt(\"should return one entry\", func() {\n\t\t\t\tExpect(len(appMaps)).To(Equal(1))\n\t\t\t})\n\n\t\t\tIt(\"should return global properties\", func() {\n\t\t\t\tExpect(appMaps).To(Equal([]map[string]interface{}{input}))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"the parseRoutes function\", func() {\n\t\t\terrs := []error{}\n\t\t\trouteStuff := parseRoutes(input, &errs)\n\n\t\t\tIt(\"does not error\", func() {\n\t\t\t\tExpect(len(errs)).To(Equal(0))\n\t\t\t})\n\n\t\t\tIt(\"should return three routes\", func() {\n\t\t\t\tExpect(len(routeStuff)).To(Equal(3))\n\t\t\t})\n\n\t\t\tIt(\"should return global properties\", func() {\n\t\t\t\t\/\/ We're only testing for domain because of limitations in the route struct\n\t\t\t\tExpect(routeStuff[0].Domain.Name).To(Equal(\"example.com\"))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"For a manifest with an applications section\", func() {\n\t\tapplicationsContents := []interface{}{map[string]string{\n\t\t\t\"fred\": \"hello\",\n\t\t}}\n\t\tinput := map[string]interface{}{\n\t\t\t\"applications\": applicationsContents,\n\t\t\t\"host\": \"bob\",\n\t\t}\n\n\t\tm := &Manifest{}\n\t\tappMaps, err := m.getAppMaps(input)\n\n\t\tContext(\"the AppMaps function\", func() {\n\t\t\tIt(\"does not error\", func() {\n\t\t\t\tExpect(err).To(BeNil())\n\t\t\t})\n\n\t\t\tIt(\"should not alter what gets passed in\", func() {\n\n\t\t\t\tExpect(input[\"applications\"]).To(Equal(applicationsContents))\n\t\t\t\t\/\/ Make sure this doesn't change what's passed in\n\t\t\t\tExpect(input[\"applications\"]).To(Equal(applicationsContents))\n\n\t\t\t})\n\n\t\t\tIt(\"should return one entry\", func() {\n\t\t\t\tExpect(len(appMaps)).To(Equal(1))\n\t\t\t})\n\n\t\t\tIt(\"should merge global properties with application-level properties\", func() {\n\n\t\t\t\tExpect(appMaps[0][\"host\"]).To(Equal(\"bob\"))\n\t\t\t\tExpect(appMaps[0][\"fred\"]).To(Equal(\"hello\"))\n\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"For a manifest with two applications in the applications section\", func() {\n\t\tapplicationsContents := []interface{}{map[string]string{\n\t\t\t\"fred\": \"hello\",\n\t\t},\n\t\t\tmap[string]string{\n\t\t\t\t\"george\": \"goodbye\",\n\t\t\t}}\n\t\tinput := map[string]interface{}{\n\t\t\t\"applications\": applicationsContents,\n\t\t\t\"host\": \"bob\",\n\t\t}\n\n\t\tm := &Manifest{}\n\t\tappMaps, err := m.getAppMaps(input)\n\n\t\tContext(\"the AppMaps function\", func() {\n\t\t\tIt(\"does not error\", func() {\n\t\t\t\tExpect(err).To(BeNil())\n\t\t\t})\n\n\t\t\tIt(\"should not alter what gets passed in\", func() {\n\n\t\t\t\tExpect(input[\"applications\"]).To(Equal(applicationsContents))\n\t\t\t\t\/\/ Make sure this doesn't change what's passed in\n\t\t\t\tExpect(input[\"applications\"]).To(Equal(applicationsContents))\n\n\t\t\t})\n\n\t\t\tIt(\"should return two entry\", func() {\n\t\t\t\tExpect(len(appMaps)).To(Equal(2))\n\t\t\t})\n\n\t\t\tIt(\"should merge global properties with application-level properties\", func() {\n\n\t\t\t\tExpect(appMaps[0][\"host\"]).To(Equal(\"bob\"))\n\t\t\t\tExpect(appMaps[0][\"fred\"]).To(Equal(\"hello\"))\n\t\t\t\tExpect(appMaps[0][\"george\"]).To(BeNil())\n\n\t\t\t\tExpect(appMaps[1][\"host\"]).To(Equal(\"bob\"))\n\t\t\t\tExpect(appMaps[1][\"george\"]).To(Equal(\"goodbye\"))\n\t\t\t\tExpect(appMaps[1][\"fred\"]).To(BeNil())\n\n\t\t\t})\n\t\t})\n\t})\n\n})\n\nvar _ = Describe(\"CloneWithExclude\", func() {\n\n\tContext(\"When the map contains some values and excludeKey exists\", func() {\n\n\t\tinput := map[string]interface{}{\n\t\t\t\"one\": 1,\n\t\t\t\"two\": 2138,\n\t\t\t\"three\": 1908,\n\t\t}\n\n\t\texcludeKey := \"two\"\n\n\t\tactual := cloneWithExclude(input, excludeKey)\n\n\t\tIt(\"should return a new map without the excludeKey\", func() {\n\n\t\t\texpected := map[string]interface{}{\n\t\t\t\t\"one\": 1,\n\t\t\t\t\"three\": 1908,\n\t\t\t}\n\n\t\t\tExpect(actual).To(Equal(expected))\n\t\t})\n\n\t\tIt(\"should not alter the original map\", func() {\n\t\t\tExpect(input[\"two\"]).To(Equal(2138))\n\t\t})\n\t})\n\n\tContext(\"When the map contains some values and excludeKey does not exist\", func() {\n\t\tIt(\"should return a new map with the same contents as the original\", func() {\n\t\t\tinput := map[string]interface{}{\n\t\t\t\t\"one\": 1,\n\t\t\t\t\"two\": 2138,\n\t\t\t\t\"three\": 1908,\n\t\t\t}\n\n\t\t\texcludeKey := \"four\"\n\n\t\t\tactual := cloneWithExclude(input, excludeKey)\n\n\t\t\tExpect(actual).To(Equal(input))\n\t\t})\n\t})\n\n\tContext(\"When the map contains a key that includes the excludeKey\", func() {\n\t\tIt(\"should return a new map with the same contents as the original\", func() {\n\t\t\tinput := map[string]interface{}{\n\t\t\t\t\"one\": 1,\n\t\t\t\t\"two\": 2138,\n\t\t\t\t\"threefour\": 1908,\n\t\t\t}\n\n\t\t\texcludeKey := \"four\"\n\n\t\t\tactual := cloneWithExclude(input, excludeKey)\n\n\t\t\tExpect(actual).To(Equal(input))\n\t\t})\n\t})\n\n\tContext(\"When the map is empty\", func() {\n\t\tIt(\"should return a new empty map\", func() {\n\t\t\tinput := map[string]interface{}{}\n\n\t\t\texcludeKey := \"one\"\n\n\t\t\tactual := cloneWithExclude(input, excludeKey)\n\n\t\t\tExpect(actual).To(Equal(input))\n\t\t})\n\t})\n\n\tContext(\"when the manifest contains a different app name\", func() {\n\t\tmanifest := manifestFromYamlString(`---\n\t name: bar\n\t host: foo`)\n\n\t\tIt(\"Returns nil\", func() {\n\t\t\tExpect(manifest.GetAppParams(\"appname\", \"domain\")).To(BeNil())\n\t\t})\n\n\t\tContext(\"when the manifest contain a host but no app name\", func() {\n\t\t\tmanifest := manifestFromYamlString(`---\nhost: foo`)\n\n\t\t\tIt(\"Returns params that contain the host\", func() {\n\n\t\t\t\tvar hostNames []string\n\n\t\t\t\tfor _, route := range manifest.GetAppParams(\"foo\", \"something.com\").Routes {\n\t\t\t\t\thostNames = append(hostNames, route.Host)\n\t\t\t\t}\n\n\t\t\t\tExpect(hostNames).To(ContainElement(\"foo\"))\n\t\t\t})\n\t\t})\n\n\t\tDescribe(\"Route Lister\", func() {\n\t\t\tIt(\"returns a list of Routes from the manifest\", func() {\n\t\t\t\tmanifest := manifestFromYamlString(`---\nname: foo\nhosts:\n - host1\n - host2\ndomains:\n - example.com\n - example.net`)\n\n\t\t\t\tparams := manifest.GetAppParams(\"foo\", \"example.com\")\n\n\t\t\t\tExpect(params).ToNot(BeNil())\n\t\t\t\tExpect(params.Routes).ToNot(BeNil())\n\n\t\t\t\troutes := params.Routes\n\t\t\t\tExpect(routes).To(ConsistOf(\n\t\t\t\t\tplugin_models.GetApp_RouteSummary{Host: \"host1\", Domain: plugin_models.GetApp_DomainFields{Name: \"example.com\"}},\n\t\t\t\t\tplugin_models.GetApp_RouteSummary{Host: \"host1\", Domain: plugin_models.GetApp_DomainFields{Name: \"example.net\"}},\n\t\t\t\t\tplugin_models.GetApp_RouteSummary{Host: \"host2\", Domain: plugin_models.GetApp_DomainFields{Name: \"example.com\"}},\n\t\t\t\t\tplugin_models.GetApp_RouteSummary{Host: \"host2\", Domain: plugin_models.GetApp_DomainFields{Name: \"example.net\"}},\n\t\t\t\t))\n\t\t\t})\n\n\t\t\tContext(\"when app has just hosts, no domains\", func() {\n\t\t\t\tIt(\"returns Application\", func() {\n\t\t\t\t\tmanifest := manifestFromYamlString(`---\nname: foo\nhosts:\n - host1\n - host2`)\n\n\t\t\t\t\tparams := manifest.GetAppParams(\"foo\", \"example.com\")\n\t\t\t\t\tExpect(params).ToNot(BeNil())\n\t\t\t\t\tExpect(params.Routes).ToNot(BeNil())\n\n\t\t\t\t\troutes := params.Routes\n\t\t\t\t\tExpect(routes).To(ConsistOf(\n\t\t\t\t\t\tplugin_models.GetApp_RouteSummary{Host: \"host1\", Domain: plugin_models.GetApp_DomainFields{Name: \"example.com\"}},\n\t\t\t\t\t\tplugin_models.GetApp_RouteSummary{Host: \"host2\", Domain: plugin_models.GetApp_DomainFields{Name: \"example.com\"}},\n\t\t\t\t\t))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tPContext(\"when app has just routes, no hosts or domains\", func() {\n\t\t\t\tIt(\"returns those routes\", func() {\n\t\t\t\t\tmanifest := manifestFromYamlString(`---\nname: foo\nroutes:\n - route1.domain1\n - route2.domain2`)\n\n\t\t\t\t\tparams := manifest.GetAppParams(\"foo\", \"example.com\")\n\n\t\t\t\t\tExpect(params).To(ConsistOf(\n\t\t\t\t\t\tplugin_models.GetApp_RouteSummary{Host: \"route1\", Domain: plugin_models.GetApp_DomainFields{Name: \"domain1\"}},\n\t\t\t\t\t\tplugin_models.GetApp_RouteSummary{Host: \"route2\", Domain: plugin_models.GetApp_DomainFields{Name: \"domain2\"}},\n\t\t\t\t\t))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when no matching application\", func() {\n\t\t\t\tIt(\"returns nil\", func() {\n\t\t\t\t\tmanifest := manifestFromYamlString(``)\n\n\t\t\t\t\tExpect(manifest.GetAppParams(\"foo\", \"example.com\")).To(BeNil())\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t})\n\n\tContext(\"when the manifest contains multiple apps with 1 matching\", func() {\n\t\tmanifest := manifestFromYamlString(`---\napplications:\n - name: bar\n host: barhost\n - name: foo\n hosts:\n - host1\n - host2\n domains:\n - example1.com\n - example2.com`)\n\t\tIt(\"Returns the correct app\", func() {\n\n\t\t\tvar hostNames []string\n\t\t\tvar domainNames []string\n\n\t\t\tappParams := manifest.GetAppParams(\"foo\", \"\")\n\t\t\tExpect(appParams).ToNot(BeNil())\n\n\t\t\troutes := appParams.Routes\n\t\t\tExpect(routes).ToNot(BeNil())\n\t\t\tfor _, route := range routes {\n\t\t\t\thostNames = append(hostNames, route.Host)\n\t\t\t\tdomainNames = append(domainNames, route.Domain.Name)\n\t\t\t}\n\n\t\t\thostNames = deDuplicate(hostNames)\n\t\t\tdomainNames = deDuplicate(domainNames)\n\n\t\t\tExpect(manifest.GetAppParams(\"foo\", \"\").Name).To(Equal(\"foo\"))\n\t\t\tExpect(hostNames).To(ConsistOf(\"host1\", \"host2\"))\n\t\t\tExpect(domainNames).To(ConsistOf(\"example1.com\", \"example2.com\"))\n\t\t})\n\t})\n})\n\nfunc deDuplicate(ary []string) []string {\n\tif ary == nil {\n\t\treturn nil\n\t}\n\n\tm := make(map[string]bool)\n\tfor _, v := range ary {\n\t\tm[v] = true\n\t}\n\n\tnewAry := []string{}\n\tfor _, val := range ary {\n\t\tif m[val] {\n\t\t\tnewAry = append(newAry, val)\n\t\t\tm[val] = false\n\t\t}\n\t}\n\treturn newAry\n}\n\nfunc manifestFromYamlString(yamlString string) *Manifest {\n\tyamlMap := make(map[string]interface{})\n\tcandiedyaml.Unmarshal([]byte(yamlString), &yamlMap)\n\treturn &Manifest{Data: yamlMap}\n}\n<commit_msg>shorter and stronger test for host with no app name<commit_after>package manifest\n\nimport (\n\t\"code.cloudfoundry.org\/cli\/plugin\/models\"\n\t\"github.com\/cloudfoundry-incubator\/candiedyaml\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"Manifest\", func() {\n\n\tContext(\"For a manifest with no applications section\", func() {\n\n\t\tinput := map[string]interface{}{\n\t\t\t\"host\": \"bob\",\n\t\t\t\"routes\": []interface{}{\n\t\t\t\tmap[interface{}]interface{}{\"route\": \"example.com\"},\n\t\t\t\tmap[interface{}]interface{}{\"route\": \"www.example.com\/foo\"},\n\t\t\t\tmap[interface{}]interface{}{\"route\": \"tcp-example.com:1234\"},\n\t\t\t},\n\t\t}\n\t\tm := &Manifest{}\n\n\t\tContext(\"the getAppMaps function\", func() {\n\t\t\tappMaps, err := m.getAppMaps(input)\n\t\t\tIt(\"does not error\", func() {\n\t\t\t\tExpect(err).To(BeNil())\n\t\t\t})\n\n\t\t\tIt(\"should return one entry\", func() {\n\t\t\t\tExpect(len(appMaps)).To(Equal(1))\n\t\t\t})\n\n\t\t\tIt(\"should return global properties\", func() {\n\t\t\t\tExpect(appMaps).To(Equal([]map[string]interface{}{input}))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"the parseRoutes function\", func() {\n\t\t\terrs := []error{}\n\t\t\trouteStuff := parseRoutes(input, &errs)\n\n\t\t\tIt(\"does not error\", func() {\n\t\t\t\tExpect(len(errs)).To(Equal(0))\n\t\t\t})\n\n\t\t\tIt(\"should return three routes\", func() {\n\t\t\t\tExpect(len(routeStuff)).To(Equal(3))\n\t\t\t})\n\n\t\t\tIt(\"should return global properties\", func() {\n\t\t\t\t\/\/ We're only testing for domain because of limitations in the route struct\n\t\t\t\tExpect(routeStuff[0].Domain.Name).To(Equal(\"example.com\"))\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"For a manifest with an applications section\", func() {\n\t\tapplicationsContents := []interface{}{map[string]string{\n\t\t\t\"fred\": \"hello\",\n\t\t}}\n\t\tinput := map[string]interface{}{\n\t\t\t\"applications\": applicationsContents,\n\t\t\t\"host\": \"bob\",\n\t\t}\n\n\t\tm := &Manifest{}\n\t\tappMaps, err := m.getAppMaps(input)\n\n\t\tContext(\"the AppMaps function\", func() {\n\t\t\tIt(\"does not error\", func() {\n\t\t\t\tExpect(err).To(BeNil())\n\t\t\t})\n\n\t\t\tIt(\"should not alter what gets passed in\", func() {\n\n\t\t\t\tExpect(input[\"applications\"]).To(Equal(applicationsContents))\n\t\t\t\t\/\/ Make sure this doesn't change what's passed in\n\t\t\t\tExpect(input[\"applications\"]).To(Equal(applicationsContents))\n\n\t\t\t})\n\n\t\t\tIt(\"should return one entry\", func() {\n\t\t\t\tExpect(len(appMaps)).To(Equal(1))\n\t\t\t})\n\n\t\t\tIt(\"should merge global properties with application-level properties\", func() {\n\n\t\t\t\tExpect(appMaps[0][\"host\"]).To(Equal(\"bob\"))\n\t\t\t\tExpect(appMaps[0][\"fred\"]).To(Equal(\"hello\"))\n\n\t\t\t})\n\t\t})\n\t})\n\n\tContext(\"For a manifest with two applications in the applications section\", func() {\n\t\tapplicationsContents := []interface{}{map[string]string{\n\t\t\t\"fred\": \"hello\",\n\t\t},\n\t\t\tmap[string]string{\n\t\t\t\t\"george\": \"goodbye\",\n\t\t\t}}\n\t\tinput := map[string]interface{}{\n\t\t\t\"applications\": applicationsContents,\n\t\t\t\"host\": \"bob\",\n\t\t}\n\n\t\tm := &Manifest{}\n\t\tappMaps, err := m.getAppMaps(input)\n\n\t\tContext(\"the AppMaps function\", func() {\n\t\t\tIt(\"does not error\", func() {\n\t\t\t\tExpect(err).To(BeNil())\n\t\t\t})\n\n\t\t\tIt(\"should not alter what gets passed in\", func() {\n\n\t\t\t\tExpect(input[\"applications\"]).To(Equal(applicationsContents))\n\t\t\t\t\/\/ Make sure this doesn't change what's passed in\n\t\t\t\tExpect(input[\"applications\"]).To(Equal(applicationsContents))\n\n\t\t\t})\n\n\t\t\tIt(\"should return two entry\", func() {\n\t\t\t\tExpect(len(appMaps)).To(Equal(2))\n\t\t\t})\n\n\t\t\tIt(\"should merge global properties with application-level properties\", func() {\n\n\t\t\t\tExpect(appMaps[0][\"host\"]).To(Equal(\"bob\"))\n\t\t\t\tExpect(appMaps[0][\"fred\"]).To(Equal(\"hello\"))\n\t\t\t\tExpect(appMaps[0][\"george\"]).To(BeNil())\n\n\t\t\t\tExpect(appMaps[1][\"host\"]).To(Equal(\"bob\"))\n\t\t\t\tExpect(appMaps[1][\"george\"]).To(Equal(\"goodbye\"))\n\t\t\t\tExpect(appMaps[1][\"fred\"]).To(BeNil())\n\n\t\t\t})\n\t\t})\n\t})\n\n})\n\nvar _ = Describe(\"CloneWithExclude\", func() {\n\n\tContext(\"When the map contains some values and excludeKey exists\", func() {\n\n\t\tinput := map[string]interface{}{\n\t\t\t\"one\": 1,\n\t\t\t\"two\": 2138,\n\t\t\t\"three\": 1908,\n\t\t}\n\n\t\texcludeKey := \"two\"\n\n\t\tactual := cloneWithExclude(input, excludeKey)\n\n\t\tIt(\"should return a new map without the excludeKey\", func() {\n\n\t\t\texpected := map[string]interface{}{\n\t\t\t\t\"one\": 1,\n\t\t\t\t\"three\": 1908,\n\t\t\t}\n\n\t\t\tExpect(actual).To(Equal(expected))\n\t\t})\n\n\t\tIt(\"should not alter the original map\", func() {\n\t\t\tExpect(input[\"two\"]).To(Equal(2138))\n\t\t})\n\t})\n\n\tContext(\"When the map contains some values and excludeKey does not exist\", func() {\n\t\tIt(\"should return a new map with the same contents as the original\", func() {\n\t\t\tinput := map[string]interface{}{\n\t\t\t\t\"one\": 1,\n\t\t\t\t\"two\": 2138,\n\t\t\t\t\"three\": 1908,\n\t\t\t}\n\n\t\t\texcludeKey := \"four\"\n\n\t\t\tactual := cloneWithExclude(input, excludeKey)\n\n\t\t\tExpect(actual).To(Equal(input))\n\t\t})\n\t})\n\n\tContext(\"When the map contains a key that includes the excludeKey\", func() {\n\t\tIt(\"should return a new map with the same contents as the original\", func() {\n\t\t\tinput := map[string]interface{}{\n\t\t\t\t\"one\": 1,\n\t\t\t\t\"two\": 2138,\n\t\t\t\t\"threefour\": 1908,\n\t\t\t}\n\n\t\t\texcludeKey := \"four\"\n\n\t\t\tactual := cloneWithExclude(input, excludeKey)\n\n\t\t\tExpect(actual).To(Equal(input))\n\t\t})\n\t})\n\n\tContext(\"When the map is empty\", func() {\n\t\tIt(\"should return a new empty map\", func() {\n\t\t\tinput := map[string]interface{}{}\n\n\t\t\texcludeKey := \"one\"\n\n\t\t\tactual := cloneWithExclude(input, excludeKey)\n\n\t\t\tExpect(actual).To(Equal(input))\n\t\t})\n\t})\n\n\tContext(\"when the manifest contains a different app name\", func() {\n\t\tmanifest := manifestFromYamlString(`---\n\t name: bar\n\t host: foo`)\n\n\t\tIt(\"Returns nil\", func() {\n\t\t\tExpect(manifest.GetAppParams(\"appname\", \"domain\")).To(BeNil())\n\t\t})\n\n\t\tContext(\"when the manifest contain a host but no app name\", func() {\n\t\t\tmanifest := manifestFromYamlString(`---\nhost: foo`)\n\n\t\t\tIt(\"Returns params that contain the host\", func() {\n\n\t\t\t\troutes := manifest.GetAppParams(\"foo\", \"something.com\").Routes\n\t\t\t\tExpect(routes).To(ConsistOf(\n\t\t\t\t\tplugin_models.GetApp_RouteSummary{Host: \"foo\", Domain: plugin_models.GetApp_DomainFields{Name: \"something.com\"}},\n\t\t\t\t))\n\t\t\t})\n\t\t})\n\n\t\tDescribe(\"Route Lister\", func() {\n\t\t\tIt(\"returns a list of Routes from the manifest\", func() {\n\t\t\t\tmanifest := manifestFromYamlString(`---\nname: foo\nhosts:\n - host1\n - host2\ndomains:\n - example.com\n - example.net`)\n\n\t\t\t\tparams := manifest.GetAppParams(\"foo\", \"example.com\")\n\n\t\t\t\tExpect(params).ToNot(BeNil())\n\t\t\t\tExpect(params.Routes).ToNot(BeNil())\n\n\t\t\t\troutes := params.Routes\n\t\t\t\tExpect(routes).To(ConsistOf(\n\t\t\t\t\tplugin_models.GetApp_RouteSummary{Host: \"host1\", Domain: plugin_models.GetApp_DomainFields{Name: \"example.com\"}},\n\t\t\t\t\tplugin_models.GetApp_RouteSummary{Host: \"host1\", Domain: plugin_models.GetApp_DomainFields{Name: \"example.net\"}},\n\t\t\t\t\tplugin_models.GetApp_RouteSummary{Host: \"host2\", Domain: plugin_models.GetApp_DomainFields{Name: \"example.com\"}},\n\t\t\t\t\tplugin_models.GetApp_RouteSummary{Host: \"host2\", Domain: plugin_models.GetApp_DomainFields{Name: \"example.net\"}},\n\t\t\t\t))\n\t\t\t})\n\n\t\t\tContext(\"when app has just hosts, no domains\", func() {\n\t\t\t\tIt(\"returns Application\", func() {\n\t\t\t\t\tmanifest := manifestFromYamlString(`---\nname: foo\nhosts:\n - host1\n - host2`)\n\n\t\t\t\t\tparams := manifest.GetAppParams(\"foo\", \"example.com\")\n\t\t\t\t\tExpect(params).ToNot(BeNil())\n\t\t\t\t\tExpect(params.Routes).ToNot(BeNil())\n\n\t\t\t\t\troutes := params.Routes\n\t\t\t\t\tExpect(routes).To(ConsistOf(\n\t\t\t\t\t\tplugin_models.GetApp_RouteSummary{Host: \"host1\", Domain: plugin_models.GetApp_DomainFields{Name: \"example.com\"}},\n\t\t\t\t\t\tplugin_models.GetApp_RouteSummary{Host: \"host2\", Domain: plugin_models.GetApp_DomainFields{Name: \"example.com\"}},\n\t\t\t\t\t))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tPContext(\"when app has just routes, no hosts or domains\", func() {\n\t\t\t\tIt(\"returns those routes\", func() {\n\t\t\t\t\tmanifest := manifestFromYamlString(`---\nname: foo\nroutes:\n - route1.domain1\n - route2.domain2`)\n\n\t\t\t\t\tparams := manifest.GetAppParams(\"foo\", \"example.com\")\n\n\t\t\t\t\tExpect(params).To(ConsistOf(\n\t\t\t\t\t\tplugin_models.GetApp_RouteSummary{Host: \"route1\", Domain: plugin_models.GetApp_DomainFields{Name: \"domain1\"}},\n\t\t\t\t\t\tplugin_models.GetApp_RouteSummary{Host: \"route2\", Domain: plugin_models.GetApp_DomainFields{Name: \"domain2\"}},\n\t\t\t\t\t))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when no matching application\", func() {\n\t\t\t\tIt(\"returns nil\", func() {\n\t\t\t\t\tmanifest := manifestFromYamlString(``)\n\n\t\t\t\t\tExpect(manifest.GetAppParams(\"foo\", \"example.com\")).To(BeNil())\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t})\n\n\tContext(\"when the manifest contains multiple apps with 1 matching\", func() {\n\t\tmanifest := manifestFromYamlString(`---\napplications:\n - name: bar\n host: barhost\n - name: foo\n hosts:\n - host1\n - host2\n domains:\n - example1.com\n - example2.com`)\n\t\tIt(\"Returns the correct app\", func() {\n\n\t\t\tvar hostNames []string\n\t\t\tvar domainNames []string\n\n\t\t\tappParams := manifest.GetAppParams(\"foo\", \"\")\n\t\t\tExpect(appParams).ToNot(BeNil())\n\n\t\t\troutes := appParams.Routes\n\t\t\tExpect(routes).ToNot(BeNil())\n\t\t\tfor _, route := range routes {\n\t\t\t\thostNames = append(hostNames, route.Host)\n\t\t\t\tdomainNames = append(domainNames, route.Domain.Name)\n\t\t\t}\n\n\t\t\thostNames = deDuplicate(hostNames)\n\t\t\tdomainNames = deDuplicate(domainNames)\n\n\t\t\tExpect(manifest.GetAppParams(\"foo\", \"\").Name).To(Equal(\"foo\"))\n\t\t\tExpect(hostNames).To(ConsistOf(\"host1\", \"host2\"))\n\t\t\tExpect(domainNames).To(ConsistOf(\"example1.com\", \"example2.com\"))\n\t\t})\n\t})\n})\n\nfunc deDuplicate(ary []string) []string {\n\tif ary == nil {\n\t\treturn nil\n\t}\n\n\tm := make(map[string]bool)\n\tfor _, v := range ary {\n\t\tm[v] = true\n\t}\n\n\tnewAry := []string{}\n\tfor _, val := range ary {\n\t\tif m[val] {\n\t\t\tnewAry = append(newAry, val)\n\t\t\tm[val] = false\n\t\t}\n\t}\n\treturn newAry\n}\n\nfunc manifestFromYamlString(yamlString string) *Manifest {\n\tyamlMap := make(map[string]interface{})\n\tcandiedyaml.Unmarshal([]byte(yamlString), &yamlMap)\n\treturn &Manifest{Data: yamlMap}\n}\n<|endoftext|>"} {"text":"<commit_before>package cluster\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/zalando\/postgres-operator\/pkg\/spec\"\n\t\"github.com\/zalando\/postgres-operator\/pkg\/util\"\n\tv1 \"k8s.io\/api\/core\/v1\"\n)\n\n\/\/ VersionMap Map of version numbers\nvar VersionMap = map[string]int{\n\t\"9.5\": 90500,\n\t\"9.6\": 90600,\n\t\"10\": 100000,\n\t\"11\": 110000,\n\t\"12\": 120000,\n\t\"13\": 130000,\n\t\"14\": 140000,\n}\n\n\/\/ IsBiggerPostgresVersion Compare two Postgres version numbers\nfunc IsBiggerPostgresVersion(old string, new string) bool {\n\toldN := VersionMap[old]\n\tnewN := VersionMap[new]\n\treturn newN > oldN\n}\n\n\/\/ GetDesiredMajorVersionAsInt Convert string to comparable integer of PG version\nfunc (c *Cluster) GetDesiredMajorVersionAsInt() int {\n\treturn VersionMap[c.GetDesiredMajorVersion()]\n}\n\n\/\/ GetDesiredMajorVersion returns major version to use, incl. potential auto upgrade\nfunc (c *Cluster) GetDesiredMajorVersion() string {\n\n\tif c.Config.OpConfig.MajorVersionUpgradeMode == \"full\" {\n\t\t\/\/ e.g. current is 9.6, minimal is 11 allowing 11 to 14 clusters, everything below is upgraded\n\t\tif IsBiggerPostgresVersion(c.Spec.PgVersion, c.Config.OpConfig.MinimalMajorVersion) {\n\t\t\tc.logger.Infof(\"overwriting configured major version %s to %s\", c.Spec.PgVersion, c.Config.OpConfig.TargetMajorVersion)\n\t\t\treturn c.Config.OpConfig.TargetMajorVersion\n\t\t}\n\t}\n\n\treturn c.Spec.PgVersion\n}\n\nfunc (c *Cluster) isUpgradeAllowedForTeam(owningTeam string) bool {\n\tallowedTeams := c.OpConfig.MajorVersionUpgradeTeamAllowList\n\n\tif len(allowedTeams) == 0 {\n\t\treturn false\n\t}\n\n\treturn util.SliceContains(allowedTeams, owningTeam)\n}\n\n\/*\n Execute upgrade when mode is set to manual or full or when the owning team is allowed for upgrade (and mode is \"off\").\n\n Manual upgrade means, it is triggered by the user via manifest version change\n Full upgrade means, operator also determines the minimal version used accross all clusters and upgrades violators.\n*\/\nfunc (c *Cluster) majorVersionUpgrade() error {\n\n\tif c.OpConfig.MajorVersionUpgradeMode == \"off\" && !c.isUpgradeAllowedForTeam(c.Spec.TeamID) {\n\t\treturn nil\n\t}\n\n\tdesiredVersion := c.GetDesiredMajorVersionAsInt()\n\n\tif c.currentMajorVersion >= desiredVersion {\n\t\tc.logger.Infof(\"cluster version up to date. current: %d, min desired: %d\", c.currentMajorVersion, desiredVersion)\n\t\treturn nil\n\t}\n\n\tpods, err := c.listPods()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tallRunning := true\n\n\tvar masterPod *v1.Pod\n\n\tfor _, pod := range pods {\n\t\tps, _ := c.patroni.GetMemberData(&pod)\n\n\t\tif ps.State != \"running\" {\n\t\t\tallRunning = false\n\t\t\tc.logger.Infof(\"identified non running pod, potentially skipping major version upgrade\")\n\t\t}\n\n\t\tif ps.Role == \"master\" {\n\t\t\tmasterPod = &pod\n\t\t\tc.currentMajorVersion = ps.ServerVersion\n\t\t}\n\t}\n\n\tnumberOfPods := len(pods)\n\tif allRunning && masterPod != nil {\n\t\tc.logger.Infof(\"healthy cluster ready to upgrade, current: %d desired: %d\", c.currentMajorVersion, desiredVersion)\n\t\tif c.currentMajorVersion < desiredVersion {\n\t\t\tpodName := &spec.NamespacedName{Namespace: masterPod.Namespace, Name: masterPod.Name}\n\t\t\tc.logger.Infof(\"triggering major version upgrade on pod %s of %d pods\", masterPod.Name, numberOfPods)\n\t\t\tc.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, \"Major Version Upgrade\", \"Starting major version upgrade on pod %s of %d pods\", masterPod.Name, numberOfPods)\n\t\t\tupgradeCommand := fmt.Sprintf(\"\/usr\/bin\/python3 \/scripts\/inplace_upgrade.py %d 2>&1 | tee last_upgrade.log\", numberOfPods)\n\n\t\t\tresult, err := c.ExecCommand(podName, \"\/bin\/su\", \"postgres\", \"-c\", upgradeCommand)\n\t\t\tif err != nil {\n\t\t\t\tc.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, \"Major Version Upgrade\", \"Upgrade from %d to %d FAILED: %v\", c.currentMajorVersion, desiredVersion, err)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tc.logger.Infof(\"upgrade action triggered and command completed: %s\", result[:50])\n\t\t\tc.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, \"Major Version Upgrade\", \"Upgrade from %d to %d finished\", c.currentMajorVersion, desiredVersion)\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>make sure upgrade script runs on the master (#1715)<commit_after>package cluster\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/zalando\/postgres-operator\/pkg\/spec\"\n\t\"github.com\/zalando\/postgres-operator\/pkg\/util\"\n\tv1 \"k8s.io\/api\/core\/v1\"\n)\n\n\/\/ VersionMap Map of version numbers\nvar VersionMap = map[string]int{\n\t\"9.5\": 90500,\n\t\"9.6\": 90600,\n\t\"10\": 100000,\n\t\"11\": 110000,\n\t\"12\": 120000,\n\t\"13\": 130000,\n\t\"14\": 140000,\n}\n\n\/\/ IsBiggerPostgresVersion Compare two Postgres version numbers\nfunc IsBiggerPostgresVersion(old string, new string) bool {\n\toldN := VersionMap[old]\n\tnewN := VersionMap[new]\n\treturn newN > oldN\n}\n\n\/\/ GetDesiredMajorVersionAsInt Convert string to comparable integer of PG version\nfunc (c *Cluster) GetDesiredMajorVersionAsInt() int {\n\treturn VersionMap[c.GetDesiredMajorVersion()]\n}\n\n\/\/ GetDesiredMajorVersion returns major version to use, incl. potential auto upgrade\nfunc (c *Cluster) GetDesiredMajorVersion() string {\n\n\tif c.Config.OpConfig.MajorVersionUpgradeMode == \"full\" {\n\t\t\/\/ e.g. current is 9.6, minimal is 11 allowing 11 to 14 clusters, everything below is upgraded\n\t\tif IsBiggerPostgresVersion(c.Spec.PgVersion, c.Config.OpConfig.MinimalMajorVersion) {\n\t\t\tc.logger.Infof(\"overwriting configured major version %s to %s\", c.Spec.PgVersion, c.Config.OpConfig.TargetMajorVersion)\n\t\t\treturn c.Config.OpConfig.TargetMajorVersion\n\t\t}\n\t}\n\n\treturn c.Spec.PgVersion\n}\n\nfunc (c *Cluster) isUpgradeAllowedForTeam(owningTeam string) bool {\n\tallowedTeams := c.OpConfig.MajorVersionUpgradeTeamAllowList\n\n\tif len(allowedTeams) == 0 {\n\t\treturn false\n\t}\n\n\treturn util.SliceContains(allowedTeams, owningTeam)\n}\n\n\/*\n Execute upgrade when mode is set to manual or full or when the owning team is allowed for upgrade (and mode is \"off\").\n\n Manual upgrade means, it is triggered by the user via manifest version change\n Full upgrade means, operator also determines the minimal version used accross all clusters and upgrades violators.\n*\/\nfunc (c *Cluster) majorVersionUpgrade() error {\n\n\tif c.OpConfig.MajorVersionUpgradeMode == \"off\" && !c.isUpgradeAllowedForTeam(c.Spec.TeamID) {\n\t\treturn nil\n\t}\n\n\tdesiredVersion := c.GetDesiredMajorVersionAsInt()\n\n\tif c.currentMajorVersion >= desiredVersion {\n\t\tc.logger.Infof(\"cluster version up to date. current: %d, min desired: %d\", c.currentMajorVersion, desiredVersion)\n\t\treturn nil\n\t}\n\n\tpods, err := c.listPods()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tallRunning := true\n\n\tvar masterPod *v1.Pod\n\n\tfor i, pod := range pods {\n\t\tps, _ := c.patroni.GetMemberData(&pod)\n\n\t\tif ps.State != \"running\" {\n\t\t\tallRunning = false\n\t\t\tc.logger.Infof(\"identified non running pod, potentially skipping major version upgrade\")\n\t\t}\n\n\t\tif ps.Role == \"master\" {\n\t\t\tmasterPod = &pods[i]\n\t\t\tc.currentMajorVersion = ps.ServerVersion\n\t\t}\n\t}\n\n\tnumberOfPods := len(pods)\n\tif allRunning && masterPod != nil {\n\t\tc.logger.Infof(\"healthy cluster ready to upgrade, current: %d desired: %d\", c.currentMajorVersion, desiredVersion)\n\t\tif c.currentMajorVersion < desiredVersion {\n\t\t\tpodName := &spec.NamespacedName{Namespace: masterPod.Namespace, Name: masterPod.Name}\n\t\t\tc.logger.Infof(\"triggering major version upgrade on pod %s of %d pods\", masterPod.Name, numberOfPods)\n\t\t\tc.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, \"Major Version Upgrade\", \"Starting major version upgrade on pod %s of %d pods\", masterPod.Name, numberOfPods)\n\t\t\tupgradeCommand := fmt.Sprintf(\"\/usr\/bin\/python3 \/scripts\/inplace_upgrade.py %d 2>&1 | tee last_upgrade.log\", numberOfPods)\n\n\t\t\tresult, err := c.ExecCommand(podName, \"\/bin\/su\", \"postgres\", \"-c\", upgradeCommand)\n\t\t\tif err != nil {\n\t\t\t\tc.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, \"Major Version Upgrade\", \"Upgrade from %d to %d FAILED: %v\", c.currentMajorVersion, desiredVersion, err)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tc.logger.Infof(\"upgrade action triggered and command completed: %s\", result[:100])\n\t\t\tc.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, \"Major Version Upgrade\", \"Upgrade from %d to %d finished\", c.currentMajorVersion, desiredVersion)\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package oauth2\n\nimport (\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"regexp\"\n\t\"testing\"\n\n\t. \"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/tarent\/loginsrv\/model\"\n)\n\nvar gitlabTestUserResponse = `{\n\t\"id\": 1,\n\t\"username\": \"john_smith\",\n\t\"email\": \"john@example.com\",\n\t\"name\": \"John Smith\",\n\t\"state\": \"active\",\n\t\"avatar_url\": \"http:\/\/localhost:3000\/uploads\/user\/avatar\/1\/index.jpg\",\n\t\"web_url\": \"http:\/\/localhost:3000\/john_smith\",\n\t\"created_at\": \"2012-05-23T08:00:58Z\",\n\t\"bio\": null,\n\t\"location\": null,\n\t\"public_email\": \"john@example.com\",\n\t\"skype\": \"\",\n\t\"linkedin\": \"\",\n\t\"twitter\": \"\",\n\t\"website_url\": \"\",\n\t\"organization\": \"\",\n\t\"last_sign_in_at\": \"2012-06-01T11:41:01Z\",\n\t\"confirmed_at\": \"2012-05-23T09:05:22Z\",\n\t\"theme_id\": 1,\n\t\"last_activity_on\": \"2012-05-23\",\n\t\"color_scheme_id\": 2,\n\t\"projects_limit\": 100,\n\t\"current_sign_in_at\": \"2012-06-02T06:36:55Z\",\n\t\"identities\": [\n\t {\"provider\": \"github\", \"extern_uid\": \"2435223452345\"},\n\t {\"provider\": \"bitbucket\", \"extern_uid\": \"john_smith\"},\n\t {\"provider\": \"google_oauth2\", \"extern_uid\": \"8776128412476123468721346\"}\n\t],\n\t\"can_create_group\": true,\n\t\"can_create_project\": true,\n\t\"two_factor_enabled\": true,\n\t\"external\": false,\n\t\"private_profile\": false\n }`\n\nvar gitlabTestGroupsResponse = `[\n\t{\n\t \"id\": 1,\n\t \"web_url\": \"https:\/\/gitlab.com\/groups\/example\",\n\t \"name\": \"example\",\n\t \"path\": \"example\",\n\t \"description\": \"\",\n\t \"visibility\": \"private\",\n\t \"lfs_enabled\": true,\n\t \"avatar_url\": null,\n\t \"request_access_enabled\": true,\n\t \"full_name\": \"example\",\n\t \"full_path\": \"example\",\n\t \"parent_id\": null,\n\t \"ldap_cn\": null,\n\t \"ldap_access\": null\n\t},\n\t{\n\t\t\"id\": 2,\n\t\t\"web_url\": \"https:\/\/gitlab.com\/groups\/example\/subgroup\",\n\t\t\"name\": \"subgroup\",\n\t\t\"path\": \"subgroup\",\n\t\t\"description\": \"\",\n\t\t\"visibility\": \"private\",\n\t\t\"lfs_enabled\": true,\n\t\t\"avatar_url\": null,\n\t\t\"request_access_enabled\": true,\n\t\t\"full_name\": \"example \/ subgroup\",\n\t\t\"full_path\": \"example\/subgroup\",\n\t\t\"parent_id\": null,\n\t\t\"ldap_cn\": null,\n\t\t\"ldap_access\": null\n\t}\n]`\n\nfunc Test_Gitlab_getUserInfo(t *testing.T) {\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == \"\/user\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.Write([]byte(gitlabTestUserResponse))\n\t\t} else if r.URL.Path == \"\/groups\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.Write([]byte(gitlabTestGroupsResponse))\n\t\t}\n\t}))\n\tdefer server.Close()\n\n\tgitlabAPI = server.URL\n\n\tu, rawJSON, err := providerGitlab.GetUserInfo(TokenInfo{AccessToken: \"secret\"})\n\tNoError(t, err)\n\tEqual(t, \"john_smith\", u.Sub)\n\tEqual(t, \"john@example.com\", u.Email)\n\tEqual(t, \"John Smith\", u.Name)\n\tEqual(t, []string{\"example\", \"example\/subgroup\"}, u.Groups)\n\tEqual(t, `{\"user\":`+gitlabTestUserResponse+`,\"groups\":`+gitlabTestGroupsResponse+`}`, rawJSON)\n}\n\nfunc Test_Gitlab_getUserInfo_NoServer(t *testing.T) {\n\tgitlabAPI = \"http:\/\/localhost\"\n\n\tu, rawJSON, err := providerGitlab.GetUserInfo(TokenInfo{AccessToken: \"secret\"})\n\tEqual(t, model.UserInfo{}, u)\n\tEmpty(t, rawJSON)\n\tError(t, err)\n\tRegexp(t, regexp.MustCompile(`connection refused$`), err.Error())\n}\n\nfunc Test_Gitlab_getUserInfo_UserContentTypeNegative(t *testing.T) {\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == \"\/user\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"text\/plain; charset=utf-8\")\n\t\t\tw.Write([]byte(gitlabTestUserResponse))\n\t\t} else if r.URL.Path == \"\/groups\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.Write([]byte(gitlabTestGroupsResponse))\n\t\t}\n\t}))\n\tdefer server.Close()\n\n\tgitlabAPI = server.URL\n\n\tu, rawJSON, err := providerGitlab.GetUserInfo(TokenInfo{AccessToken: \"secret\"})\n\tEqual(t, model.UserInfo{}, u)\n\tEmpty(t, rawJSON)\n\tError(t, err)\n\tRegexp(t, regexp.MustCompile(`^wrong content-type on gitlab get user info`), err.Error())\n}\n\nfunc Test_Gitlab_getUserInfo_GroupsContentTypeNegative(t *testing.T) {\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == \"\/user\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.Write([]byte(gitlabTestUserResponse))\n\t\t} else if r.URL.Path == \"\/groups\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"text\/plain; charset=utf-8\")\n\t\t\tw.Write([]byte(gitlabTestGroupsResponse))\n\t\t}\n\t}))\n\tdefer server.Close()\n\n\tgitlabAPI = server.URL\n\n\tu, rawJSON, err := providerGitlab.GetUserInfo(TokenInfo{AccessToken: \"secret\"})\n\tEqual(t, model.UserInfo{}, u)\n\tEmpty(t, rawJSON)\n\tError(t, err)\n\tRegexp(t, regexp.MustCompile(`^wrong content-type on gitlab get groups info`), err.Error())\n}\n\nfunc Test_Gitlab_getUserInfo_UserStatusCodeNegative(t *testing.T) {\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == \"\/user\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tw.Write([]byte(gitlabTestUserResponse))\n\t\t} else if r.URL.Path == \"\/groups\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.Write([]byte(gitlabTestGroupsResponse))\n\t\t}\n\t}))\n\tdefer server.Close()\n\n\tgitlabAPI = server.URL\n\n\tu, rawJSON, err := providerGitlab.GetUserInfo(TokenInfo{AccessToken: \"secret\"})\n\tEqual(t, model.UserInfo{}, u)\n\tEmpty(t, rawJSON)\n\tError(t, err)\n\tRegexp(t, regexp.MustCompile(`^got http status [0-9]{3} on gitlab get user info`), err.Error())\n}\n\nfunc Test_Gitlab_getUserInfo_GroupsStatusCodeNegative(t *testing.T) {\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == \"\/user\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.Write([]byte(gitlabTestUserResponse))\n\t\t} else if r.URL.Path == \"\/groups\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tw.Write([]byte(gitlabTestGroupsResponse))\n\t\t}\n\t}))\n\tdefer server.Close()\n\n\tgitlabAPI = server.URL\n\n\tu, rawJSON, err := providerGitlab.GetUserInfo(TokenInfo{AccessToken: \"secret\"})\n\tEqual(t, model.UserInfo{}, u)\n\tEmpty(t, rawJSON)\n\tError(t, err)\n\tRegexp(t, regexp.MustCompile(`^got http status [0-9]{3} on gitlab get groups info`), err.Error())\n}\n\nfunc Test_Gitlab_getUserInfo_UserJSONNegative(t *testing.T) {\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == \"\/user\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.Write([]byte(\"[]\"))\n\t\t} else if r.URL.Path == \"\/groups\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.Write([]byte(gitlabTestGroupsResponse))\n\t\t}\n\t}))\n\tdefer server.Close()\n\n\tgitlabAPI = server.URL\n\n\tu, rawJSON, err := providerGitlab.GetUserInfo(TokenInfo{AccessToken: \"secret\"})\n\tEqual(t, model.UserInfo{}, u)\n\tEmpty(t, rawJSON)\n\tError(t, err)\n\tRegexp(t, regexp.MustCompile(`^error parsing gitlab get user info`), err.Error())\n}\n\nfunc Test_Gitlab_getUserInfo_GroupsJSONNegative(t *testing.T) {\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == \"\/user\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.Write([]byte(gitlabTestUserResponse))\n\t\t} else if r.URL.Path == \"\/groups\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.Write([]byte(\"{}\"))\n\t\t}\n\t}))\n\tdefer server.Close()\n\n\tgitlabAPI = server.URL\n\n\tu, rawJSON, err := providerGitlab.GetUserInfo(TokenInfo{AccessToken: \"secret\"})\n\tEqual(t, model.UserInfo{}, u)\n\tEmpty(t, rawJSON)\n\tError(t, err)\n\tRegexp(t, regexp.MustCompile(`^error parsing gitlab get groups info`), err.Error())\n}\n<commit_msg>test: fix Test_Gitlab_getUserInfo_NoServer on Windows<commit_after>package oauth2\n\nimport (\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"regexp\"\n\t\"testing\"\n\n\t. \"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/tarent\/loginsrv\/model\"\n)\n\nvar gitlabTestUserResponse = `{\n\t\"id\": 1,\n\t\"username\": \"john_smith\",\n\t\"email\": \"john@example.com\",\n\t\"name\": \"John Smith\",\n\t\"state\": \"active\",\n\t\"avatar_url\": \"http:\/\/localhost:3000\/uploads\/user\/avatar\/1\/index.jpg\",\n\t\"web_url\": \"http:\/\/localhost:3000\/john_smith\",\n\t\"created_at\": \"2012-05-23T08:00:58Z\",\n\t\"bio\": null,\n\t\"location\": null,\n\t\"public_email\": \"john@example.com\",\n\t\"skype\": \"\",\n\t\"linkedin\": \"\",\n\t\"twitter\": \"\",\n\t\"website_url\": \"\",\n\t\"organization\": \"\",\n\t\"last_sign_in_at\": \"2012-06-01T11:41:01Z\",\n\t\"confirmed_at\": \"2012-05-23T09:05:22Z\",\n\t\"theme_id\": 1,\n\t\"last_activity_on\": \"2012-05-23\",\n\t\"color_scheme_id\": 2,\n\t\"projects_limit\": 100,\n\t\"current_sign_in_at\": \"2012-06-02T06:36:55Z\",\n\t\"identities\": [\n\t {\"provider\": \"github\", \"extern_uid\": \"2435223452345\"},\n\t {\"provider\": \"bitbucket\", \"extern_uid\": \"john_smith\"},\n\t {\"provider\": \"google_oauth2\", \"extern_uid\": \"8776128412476123468721346\"}\n\t],\n\t\"can_create_group\": true,\n\t\"can_create_project\": true,\n\t\"two_factor_enabled\": true,\n\t\"external\": false,\n\t\"private_profile\": false\n }`\n\nvar gitlabTestGroupsResponse = `[\n\t{\n\t \"id\": 1,\n\t \"web_url\": \"https:\/\/gitlab.com\/groups\/example\",\n\t \"name\": \"example\",\n\t \"path\": \"example\",\n\t \"description\": \"\",\n\t \"visibility\": \"private\",\n\t \"lfs_enabled\": true,\n\t \"avatar_url\": null,\n\t \"request_access_enabled\": true,\n\t \"full_name\": \"example\",\n\t \"full_path\": \"example\",\n\t \"parent_id\": null,\n\t \"ldap_cn\": null,\n\t \"ldap_access\": null\n\t},\n\t{\n\t\t\"id\": 2,\n\t\t\"web_url\": \"https:\/\/gitlab.com\/groups\/example\/subgroup\",\n\t\t\"name\": \"subgroup\",\n\t\t\"path\": \"subgroup\",\n\t\t\"description\": \"\",\n\t\t\"visibility\": \"private\",\n\t\t\"lfs_enabled\": true,\n\t\t\"avatar_url\": null,\n\t\t\"request_access_enabled\": true,\n\t\t\"full_name\": \"example \/ subgroup\",\n\t\t\"full_path\": \"example\/subgroup\",\n\t\t\"parent_id\": null,\n\t\t\"ldap_cn\": null,\n\t\t\"ldap_access\": null\n\t}\n]`\n\nfunc Test_Gitlab_getUserInfo(t *testing.T) {\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == \"\/user\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.Write([]byte(gitlabTestUserResponse))\n\t\t} else if r.URL.Path == \"\/groups\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.Write([]byte(gitlabTestGroupsResponse))\n\t\t}\n\t}))\n\tdefer server.Close()\n\n\tgitlabAPI = server.URL\n\n\tu, rawJSON, err := providerGitlab.GetUserInfo(TokenInfo{AccessToken: \"secret\"})\n\tNoError(t, err)\n\tEqual(t, \"john_smith\", u.Sub)\n\tEqual(t, \"john@example.com\", u.Email)\n\tEqual(t, \"John Smith\", u.Name)\n\tEqual(t, []string{\"example\", \"example\/subgroup\"}, u.Groups)\n\tEqual(t, `{\"user\":`+gitlabTestUserResponse+`,\"groups\":`+gitlabTestGroupsResponse+`}`, rawJSON)\n}\n\nfunc Test_Gitlab_getUserInfo_NoServer(t *testing.T) {\n\tgitlabAPI = \"http:\/\/localhost:1234\"\n\n\tu, rawJSON, err := providerGitlab.GetUserInfo(TokenInfo{AccessToken: \"secret\"})\n\tEqual(t, model.UserInfo{}, u)\n\tEmpty(t, rawJSON)\n\tError(t, err)\n\tRegexp(t, regexp.MustCompile(`(connection refused$)|(actively refused it.$)`), err.Error())\n}\n\nfunc Test_Gitlab_getUserInfo_UserContentTypeNegative(t *testing.T) {\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == \"\/user\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"text\/plain; charset=utf-8\")\n\t\t\tw.Write([]byte(gitlabTestUserResponse))\n\t\t} else if r.URL.Path == \"\/groups\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.Write([]byte(gitlabTestGroupsResponse))\n\t\t}\n\t}))\n\tdefer server.Close()\n\n\tgitlabAPI = server.URL\n\n\tu, rawJSON, err := providerGitlab.GetUserInfo(TokenInfo{AccessToken: \"secret\"})\n\tEqual(t, model.UserInfo{}, u)\n\tEmpty(t, rawJSON)\n\tError(t, err)\n\tRegexp(t, regexp.MustCompile(`^wrong content-type on gitlab get user info`), err.Error())\n}\n\nfunc Test_Gitlab_getUserInfo_GroupsContentTypeNegative(t *testing.T) {\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == \"\/user\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.Write([]byte(gitlabTestUserResponse))\n\t\t} else if r.URL.Path == \"\/groups\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"text\/plain; charset=utf-8\")\n\t\t\tw.Write([]byte(gitlabTestGroupsResponse))\n\t\t}\n\t}))\n\tdefer server.Close()\n\n\tgitlabAPI = server.URL\n\n\tu, rawJSON, err := providerGitlab.GetUserInfo(TokenInfo{AccessToken: \"secret\"})\n\tEqual(t, model.UserInfo{}, u)\n\tEmpty(t, rawJSON)\n\tError(t, err)\n\tRegexp(t, regexp.MustCompile(`^wrong content-type on gitlab get groups info`), err.Error())\n}\n\nfunc Test_Gitlab_getUserInfo_UserStatusCodeNegative(t *testing.T) {\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == \"\/user\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tw.Write([]byte(gitlabTestUserResponse))\n\t\t} else if r.URL.Path == \"\/groups\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.Write([]byte(gitlabTestGroupsResponse))\n\t\t}\n\t}))\n\tdefer server.Close()\n\n\tgitlabAPI = server.URL\n\n\tu, rawJSON, err := providerGitlab.GetUserInfo(TokenInfo{AccessToken: \"secret\"})\n\tEqual(t, model.UserInfo{}, u)\n\tEmpty(t, rawJSON)\n\tError(t, err)\n\tRegexp(t, regexp.MustCompile(`^got http status [0-9]{3} on gitlab get user info`), err.Error())\n}\n\nfunc Test_Gitlab_getUserInfo_GroupsStatusCodeNegative(t *testing.T) {\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == \"\/user\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.Write([]byte(gitlabTestUserResponse))\n\t\t} else if r.URL.Path == \"\/groups\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tw.Write([]byte(gitlabTestGroupsResponse))\n\t\t}\n\t}))\n\tdefer server.Close()\n\n\tgitlabAPI = server.URL\n\n\tu, rawJSON, err := providerGitlab.GetUserInfo(TokenInfo{AccessToken: \"secret\"})\n\tEqual(t, model.UserInfo{}, u)\n\tEmpty(t, rawJSON)\n\tError(t, err)\n\tRegexp(t, regexp.MustCompile(`^got http status [0-9]{3} on gitlab get groups info`), err.Error())\n}\n\nfunc Test_Gitlab_getUserInfo_UserJSONNegative(t *testing.T) {\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == \"\/user\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.Write([]byte(\"[]\"))\n\t\t} else if r.URL.Path == \"\/groups\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.Write([]byte(gitlabTestGroupsResponse))\n\t\t}\n\t}))\n\tdefer server.Close()\n\n\tgitlabAPI = server.URL\n\n\tu, rawJSON, err := providerGitlab.GetUserInfo(TokenInfo{AccessToken: \"secret\"})\n\tEqual(t, model.UserInfo{}, u)\n\tEmpty(t, rawJSON)\n\tError(t, err)\n\tRegexp(t, regexp.MustCompile(`^error parsing gitlab get user info`), err.Error())\n}\n\nfunc Test_Gitlab_getUserInfo_GroupsJSONNegative(t *testing.T) {\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path == \"\/user\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.Write([]byte(gitlabTestUserResponse))\n\t\t} else if r.URL.Path == \"\/groups\" {\n\t\t\tEqual(t, \"secret\", r.FormValue(\"access_token\"))\n\t\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\t\tw.Write([]byte(\"{}\"))\n\t\t}\n\t}))\n\tdefer server.Close()\n\n\tgitlabAPI = server.URL\n\n\tu, rawJSON, err := providerGitlab.GetUserInfo(TokenInfo{AccessToken: \"secret\"})\n\tEqual(t, model.UserInfo{}, u)\n\tEmpty(t, rawJSON)\n\tError(t, err)\n\tRegexp(t, regexp.MustCompile(`^error parsing gitlab get groups info`), err.Error())\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage route\n\nimport (\n\t\"context\"\n\n\tkubeclient \"knative.dev\/pkg\/client\/injection\/kube\/client\"\n\tserviceinformer \"knative.dev\/pkg\/client\/injection\/kube\/informers\/core\/v1\/service\"\n\tservingclient \"knative.dev\/serving\/pkg\/client\/injection\/client\"\n\tcertificateinformer \"knative.dev\/serving\/pkg\/client\/injection\/informers\/networking\/v1alpha1\/certificate\"\n\tingressinformer \"knative.dev\/serving\/pkg\/client\/injection\/informers\/networking\/v1alpha1\/ingress\"\n\tconfigurationinformer \"knative.dev\/serving\/pkg\/client\/injection\/informers\/serving\/v1\/configuration\"\n\trevisioninformer \"knative.dev\/serving\/pkg\/client\/injection\/informers\/serving\/v1\/revision\"\n\trouteinformer \"knative.dev\/serving\/pkg\/client\/injection\/informers\/serving\/v1\/route\"\n\troutereconciler \"knative.dev\/serving\/pkg\/client\/injection\/reconciler\/serving\/v1\/route\"\n\n\t\"k8s.io\/client-go\/tools\/cache\"\n\t\"knative.dev\/pkg\/configmap\"\n\t\"knative.dev\/pkg\/controller\"\n\t\"knative.dev\/pkg\/logging\"\n\t\"knative.dev\/pkg\/system\"\n\t\"knative.dev\/pkg\/tracker\"\n\tv1 \"knative.dev\/serving\/pkg\/apis\/serving\/v1\"\n\t\"knative.dev\/serving\/pkg\/network\"\n\tservingreconciler \"knative.dev\/serving\/pkg\/reconciler\"\n\t\"knative.dev\/serving\/pkg\/reconciler\/route\/config\"\n)\n\nconst controllerAgentName = \"route-controller\"\n\n\/\/ NewController initializes the controller and is called by the generated code\n\/\/ Registers eventhandlers to enqueue events\nfunc NewController(\n\tctx context.Context,\n\tcmw configmap.Watcher,\n) *controller.Impl {\n\treturn newControllerWithClock(ctx, cmw, system.RealClock{})\n}\n\ntype reconcilerOption func(*Reconciler)\n\nfunc newControllerWithClock(\n\tctx context.Context,\n\tcmw configmap.Watcher,\n\tclock system.Clock,\n\topts ...reconcilerOption,\n) *controller.Impl {\n\tctx = servingreconciler.AnnotateLoggerWithName(ctx, controllerAgentName)\n\tlogger := logging.FromContext(ctx)\n\tserviceInformer := serviceinformer.Get(ctx)\n\trouteInformer := routeinformer.Get(ctx)\n\tconfigInformer := configurationinformer.Get(ctx)\n\trevisionInformer := revisioninformer.Get(ctx)\n\tingressInformer := ingressinformer.Get(ctx)\n\tcertificateInformer := certificateinformer.Get(ctx)\n\n\t\/\/ No need to lock domainConfigMutex yet since the informers that can modify\n\t\/\/ domainConfig haven't started yet.\n\tc := &Reconciler{\n\t\tkubeclient: kubeclient.Get(ctx),\n\t\tclient: servingclient.Get(ctx),\n\t\tconfigurationLister: configInformer.Lister(),\n\t\trevisionLister: revisionInformer.Lister(),\n\t\tserviceLister: serviceInformer.Lister(),\n\t\tingressLister: ingressInformer.Lister(),\n\t\tcertificateLister: certificateInformer.Lister(),\n\t\tclock: clock,\n\t}\n\timpl := routereconciler.NewImpl(ctx, c, func(impl *controller.Impl) controller.Options {\n\t\tconfigsToResync := []interface{}{\n\t\t\t&network.Config{},\n\t\t\t&config.Domain{},\n\t\t}\n\t\tresync := configmap.TypeFilter(configsToResync...)(func(string, interface{}) {\n\t\t\timpl.GlobalResync(routeInformer.Informer())\n\t\t})\n\t\tconfigStore := config.NewStore(logging.WithLogger(ctx, logger.Named(\"config-store\")), resync)\n\t\tconfigStore.WatchConfigs(cmw)\n\t\treturn controller.Options{ConfigStore: configStore}\n\t})\n\n\tlogger.Info(\"Setting up event handlers\")\n\trouteInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue))\n\n\tserviceInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{\n\t\tFilterFunc: controller.FilterGroupKind(v1.Kind(\"Route\")),\n\t\tHandler: controller.HandleAll(impl.EnqueueControllerOf),\n\t})\n\n\tingressInformer.Informer().AddEventHandler(controller.HandleAll(impl.EnqueueControllerOf))\n\n\tc.tracker = tracker.New(impl.EnqueueKey, controller.GetTrackerLease(ctx))\n\n\tconfigInformer.Informer().AddEventHandler(controller.HandleAll(\n\t\t\/\/ Call the tracker's OnChanged method, but we've seen the objects\n\t\t\/\/ coming through this path missing TypeMeta, so ensure it is properly\n\t\t\/\/ populated.\n\t\tcontroller.EnsureTypeMeta(\n\t\t\tc.tracker.OnChanged,\n\t\t\tv1.SchemeGroupVersion.WithKind(\"Configuration\"),\n\t\t),\n\t))\n\n\trevisionInformer.Informer().AddEventHandler(controller.HandleAll(\n\t\t\/\/ Call the tracker's OnChanged method, but we've seen the objects\n\t\t\/\/ coming through this path missing TypeMeta, so ensure it is properly\n\t\t\/\/ populated.\n\t\tcontroller.EnsureTypeMeta(\n\t\t\tc.tracker.OnChanged,\n\t\t\tv1.SchemeGroupVersion.WithKind(\"Revision\"),\n\t\t),\n\t))\n\n\tcertificateInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{\n\t\tFilterFunc: controller.FilterGroupKind(v1.Kind(\"Route\")),\n\t\tHandler: controller.HandleAll(impl.EnqueueControllerOf),\n\t})\n\n\tfor _, opt := range opts {\n\t\topt(c)\n\t}\n\treturn impl\n}\n<commit_msg>Remove outdated comment from route controller. (#7688)<commit_after>\/*\nCopyright 2019 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage route\n\nimport (\n\t\"context\"\n\n\tkubeclient \"knative.dev\/pkg\/client\/injection\/kube\/client\"\n\tserviceinformer \"knative.dev\/pkg\/client\/injection\/kube\/informers\/core\/v1\/service\"\n\tservingclient \"knative.dev\/serving\/pkg\/client\/injection\/client\"\n\tcertificateinformer \"knative.dev\/serving\/pkg\/client\/injection\/informers\/networking\/v1alpha1\/certificate\"\n\tingressinformer \"knative.dev\/serving\/pkg\/client\/injection\/informers\/networking\/v1alpha1\/ingress\"\n\tconfigurationinformer \"knative.dev\/serving\/pkg\/client\/injection\/informers\/serving\/v1\/configuration\"\n\trevisioninformer \"knative.dev\/serving\/pkg\/client\/injection\/informers\/serving\/v1\/revision\"\n\trouteinformer \"knative.dev\/serving\/pkg\/client\/injection\/informers\/serving\/v1\/route\"\n\troutereconciler \"knative.dev\/serving\/pkg\/client\/injection\/reconciler\/serving\/v1\/route\"\n\n\t\"k8s.io\/client-go\/tools\/cache\"\n\t\"knative.dev\/pkg\/configmap\"\n\t\"knative.dev\/pkg\/controller\"\n\t\"knative.dev\/pkg\/logging\"\n\t\"knative.dev\/pkg\/system\"\n\t\"knative.dev\/pkg\/tracker\"\n\tv1 \"knative.dev\/serving\/pkg\/apis\/serving\/v1\"\n\t\"knative.dev\/serving\/pkg\/network\"\n\tservingreconciler \"knative.dev\/serving\/pkg\/reconciler\"\n\t\"knative.dev\/serving\/pkg\/reconciler\/route\/config\"\n)\n\nconst controllerAgentName = \"route-controller\"\n\n\/\/ NewController initializes the controller and is called by the generated code\n\/\/ Registers eventhandlers to enqueue events\nfunc NewController(\n\tctx context.Context,\n\tcmw configmap.Watcher,\n) *controller.Impl {\n\treturn newControllerWithClock(ctx, cmw, system.RealClock{})\n}\n\ntype reconcilerOption func(*Reconciler)\n\nfunc newControllerWithClock(\n\tctx context.Context,\n\tcmw configmap.Watcher,\n\tclock system.Clock,\n\topts ...reconcilerOption,\n) *controller.Impl {\n\tctx = servingreconciler.AnnotateLoggerWithName(ctx, controllerAgentName)\n\tlogger := logging.FromContext(ctx)\n\tserviceInformer := serviceinformer.Get(ctx)\n\trouteInformer := routeinformer.Get(ctx)\n\tconfigInformer := configurationinformer.Get(ctx)\n\trevisionInformer := revisioninformer.Get(ctx)\n\tingressInformer := ingressinformer.Get(ctx)\n\tcertificateInformer := certificateinformer.Get(ctx)\n\n\tc := &Reconciler{\n\t\tkubeclient: kubeclient.Get(ctx),\n\t\tclient: servingclient.Get(ctx),\n\t\tconfigurationLister: configInformer.Lister(),\n\t\trevisionLister: revisionInformer.Lister(),\n\t\tserviceLister: serviceInformer.Lister(),\n\t\tingressLister: ingressInformer.Lister(),\n\t\tcertificateLister: certificateInformer.Lister(),\n\t\tclock: clock,\n\t}\n\timpl := routereconciler.NewImpl(ctx, c, func(impl *controller.Impl) controller.Options {\n\t\tconfigsToResync := []interface{}{\n\t\t\t&network.Config{},\n\t\t\t&config.Domain{},\n\t\t}\n\t\tresync := configmap.TypeFilter(configsToResync...)(func(string, interface{}) {\n\t\t\timpl.GlobalResync(routeInformer.Informer())\n\t\t})\n\t\tconfigStore := config.NewStore(logging.WithLogger(ctx, logger.Named(\"config-store\")), resync)\n\t\tconfigStore.WatchConfigs(cmw)\n\t\treturn controller.Options{ConfigStore: configStore}\n\t})\n\n\tlogger.Info(\"Setting up event handlers\")\n\trouteInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue))\n\n\tserviceInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{\n\t\tFilterFunc: controller.FilterGroupKind(v1.Kind(\"Route\")),\n\t\tHandler: controller.HandleAll(impl.EnqueueControllerOf),\n\t})\n\n\tingressInformer.Informer().AddEventHandler(controller.HandleAll(impl.EnqueueControllerOf))\n\n\tc.tracker = tracker.New(impl.EnqueueKey, controller.GetTrackerLease(ctx))\n\n\tconfigInformer.Informer().AddEventHandler(controller.HandleAll(\n\t\t\/\/ Call the tracker's OnChanged method, but we've seen the objects\n\t\t\/\/ coming through this path missing TypeMeta, so ensure it is properly\n\t\t\/\/ populated.\n\t\tcontroller.EnsureTypeMeta(\n\t\t\tc.tracker.OnChanged,\n\t\t\tv1.SchemeGroupVersion.WithKind(\"Configuration\"),\n\t\t),\n\t))\n\n\trevisionInformer.Informer().AddEventHandler(controller.HandleAll(\n\t\t\/\/ Call the tracker's OnChanged method, but we've seen the objects\n\t\t\/\/ coming through this path missing TypeMeta, so ensure it is properly\n\t\t\/\/ populated.\n\t\tcontroller.EnsureTypeMeta(\n\t\t\tc.tracker.OnChanged,\n\t\t\tv1.SchemeGroupVersion.WithKind(\"Revision\"),\n\t\t),\n\t))\n\n\tcertificateInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{\n\t\tFilterFunc: controller.FilterGroupKind(v1.Kind(\"Route\")),\n\t\tHandler: controller.HandleAll(impl.EnqueueControllerOf),\n\t})\n\n\tfor _, opt := range opts {\n\t\topt(c)\n\t}\n\treturn impl\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage ebtables\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"k8s.io\/kubernetes\/pkg\/util\/exec\"\n)\n\nfunc testEnsureChain(t *testing.T) {\n\tfcmd := exec.FakeCmd{\n\t\tCombinedOutputScript: []exec.FakeCombinedOutputAction{\n\t\t\t\/\/ Does not Exists\n\t\t\tfunc() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} },\n\t\t\t\/\/ Success\n\t\t\tfunc() ([]byte, error) { return []byte{}, nil },\n\t\t\t\/\/ Exists\n\t\t\tfunc() ([]byte, error) { return nil, nil },\n\t\t\t\/\/ Does not Exists\n\t\t\tfunc() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} },\n\t\t\t\/\/ Fail to create chain\n\t\t\tfunc() ([]byte, error) { return nil, &exec.FakeExitError{Status: 2} },\n\t\t},\n\t}\n\tfexec := exec.FakeExec{\n\t\tCommandScript: []exec.FakeCommandAction{\n\t\t\tfunc(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },\n\t\t\tfunc(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },\n\t\t\tfunc(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },\n\t\t\tfunc(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },\n\t\t\tfunc(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },\n\t\t},\n\t}\n\n\trunner := New(&fexec)\n\texists, err := runner.EnsureChain(TableFilter, \"TEST-CHAIN\")\n\tif exists {\n\t\tt.Errorf(\"expected exists = false\")\n\t}\n\tif err != nil {\n\t\tt.Errorf(\"expected err = nil\")\n\t}\n\n\texists, err = runner.EnsureChain(TableFilter, \"TEST-CHAIN\")\n\tif !exists {\n\t\tt.Errorf(\"expected exists = true\")\n\t}\n\tif err != nil {\n\t\tt.Errorf(\"expected err = nil\")\n\t}\n\n\texists, err = runner.EnsureChain(TableFilter, \"TEST-CHAIN\")\n\tif exists {\n\t\tt.Errorf(\"expected exists = false\")\n\t}\n\terrStr := \"Failed to ensure TEST-CHAIN chain: exit 2, output:\"\n\tif err == nil || !strings.Contains(err.Error(), errStr) {\n\t\tt.Errorf(\"expected error: %q\", errStr)\n\t}\n}\n\nfunc testEnsureRule(t *testing.T) {\n\tfcmd := exec.FakeCmd{\n\t\tCombinedOutputScript: []exec.FakeCombinedOutputAction{\n\t\t\t\/\/ Exists\n\t\t\tfunc() ([]byte, error) {\n\t\t\t\treturn []byte(`Bridge table: filter\n\nBridge chain: OUTPUT, entries: 4, policy: ACCEPT\n-j TEST\n`), nil\n\t\t\t},\n\t\t\t\/\/ Does not Exists.\n\t\t\tfunc() ([]byte, error) {\n\t\t\t\treturn []byte(`Bridge table: filter\n\nBridge chain: TEST, entries: 0, policy: ACCEPT`), nil\n\t\t\t},\n\t\t\t\/\/ Fail to create\n\t\t\tfunc() ([]byte, error) { return nil, &exec.FakeExitError{Status: 2} },\n\t\t},\n\t}\n\tfexec := exec.FakeExec{\n\t\tCommandScript: []exec.FakeCommandAction{\n\t\t\tfunc(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },\n\t\t\tfunc(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },\n\t\t\tfunc(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },\n\t\t},\n\t}\n\n\trunner := New(&fexec)\n\n\texists, err := runner.EnsureRule(Append, TableFilter, ChainOutput, \"-j\", \"TEST\")\n\tif !exists {\n\t\tt.Errorf(\"expected exists = true\")\n\t}\n\tif err != nil {\n\t\tt.Errorf(\"expected err = nil\")\n\t}\n\n\texists, err = runner.EnsureRule(Append, TableFilter, ChainOutput, \"-j\", \"NEXT-TEST\")\n\tif exists {\n\t\tt.Errorf(\"expected exists = false\")\n\t}\n\terrStr := \"Failed to ensure rule: exist 2, output: \"\n\tif err == nil || err.Error() != errStr {\n\t\tt.Errorf(\"expected error: %q\", errStr)\n\t}\n}\n<commit_msg>Fix ebtables_test.go to actually get run, and to pass<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage ebtables\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"k8s.io\/kubernetes\/pkg\/util\/exec\"\n)\n\nfunc TestEnsureChain(t *testing.T) {\n\tfcmd := exec.FakeCmd{\n\t\tCombinedOutputScript: []exec.FakeCombinedOutputAction{\n\t\t\t\/\/ Does not Exists\n\t\t\tfunc() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} },\n\t\t\t\/\/ Success\n\t\t\tfunc() ([]byte, error) { return []byte{}, nil },\n\t\t\t\/\/ Exists\n\t\t\tfunc() ([]byte, error) { return nil, nil },\n\t\t\t\/\/ Does not Exists\n\t\t\tfunc() ([]byte, error) { return nil, &exec.FakeExitError{Status: 1} },\n\t\t\t\/\/ Fail to create chain\n\t\t\tfunc() ([]byte, error) { return nil, &exec.FakeExitError{Status: 2} },\n\t\t},\n\t}\n\tfexec := exec.FakeExec{\n\t\tCommandScript: []exec.FakeCommandAction{\n\t\t\tfunc(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },\n\t\t\tfunc(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },\n\t\t\tfunc(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },\n\t\t\tfunc(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },\n\t\t\tfunc(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },\n\t\t},\n\t}\n\n\trunner := New(&fexec)\n\texists, err := runner.EnsureChain(TableFilter, \"TEST-CHAIN\")\n\tif exists {\n\t\tt.Errorf(\"expected exists = false\")\n\t}\n\tif err != nil {\n\t\tt.Errorf(\"expected err = nil\")\n\t}\n\n\texists, err = runner.EnsureChain(TableFilter, \"TEST-CHAIN\")\n\tif !exists {\n\t\tt.Errorf(\"expected exists = true\")\n\t}\n\tif err != nil {\n\t\tt.Errorf(\"expected err = nil\")\n\t}\n\n\texists, err = runner.EnsureChain(TableFilter, \"TEST-CHAIN\")\n\tif exists {\n\t\tt.Errorf(\"expected exists = false\")\n\t}\n\terrStr := \"Failed to ensure TEST-CHAIN chain: exit 2, output:\"\n\tif err == nil || !strings.Contains(err.Error(), errStr) {\n\t\tt.Errorf(\"expected error: %q\", errStr)\n\t}\n}\n\nfunc TestEnsureRule(t *testing.T) {\n\tfcmd := exec.FakeCmd{\n\t\tCombinedOutputScript: []exec.FakeCombinedOutputAction{\n\t\t\t\/\/ Exists\n\t\t\tfunc() ([]byte, error) {\n\t\t\t\treturn []byte(`Bridge table: filter\n\nBridge chain: OUTPUT, entries: 4, policy: ACCEPT\n-j TEST\n`), nil\n\t\t\t},\n\t\t\t\/\/ Does not Exists.\n\t\t\tfunc() ([]byte, error) {\n\t\t\t\treturn []byte(`Bridge table: filter\n\nBridge chain: TEST, entries: 0, policy: ACCEPT`), nil\n\t\t\t},\n\t\t\t\/\/ Fail to create\n\t\t\tfunc() ([]byte, error) { return nil, &exec.FakeExitError{Status: 2} },\n\t\t},\n\t}\n\tfexec := exec.FakeExec{\n\t\tCommandScript: []exec.FakeCommandAction{\n\t\t\tfunc(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },\n\t\t\tfunc(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },\n\t\t\tfunc(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },\n\t\t},\n\t}\n\n\trunner := New(&fexec)\n\n\texists, err := runner.EnsureRule(Append, TableFilter, ChainOutput, \"-j\", \"TEST\")\n\tif !exists {\n\t\tt.Errorf(\"expected exists = true\")\n\t}\n\tif err != nil {\n\t\tt.Errorf(\"expected err = nil\")\n\t}\n\n\texists, err = runner.EnsureRule(Append, TableFilter, ChainOutput, \"-j\", \"NEXT-TEST\")\n\tif exists {\n\t\tt.Errorf(\"expected exists = false\")\n\t}\n\terrStr := \"Failed to ensure rule: exit 2, output: \"\n\tif err == nil || err.Error() != errStr {\n\t\tt.Errorf(\"expected error: %q\", errStr)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package opentracing\n\nimport (\n\t\"log\"\n\t\"os\"\n\n\tbasictracer \"github.com\/opentracing\/basictracer-go\"\n\topentracing \"github.com\/opentracing\/opentracing-go\"\n\t\"sourcegraph.com\/sourcegraph\/appdash\"\n)\n\nvar _ opentracing.Tracer = NewTracer(nil) \/\/ Compile time check.\n\n\/\/ Options defines options for a Tracer.\ntype Options struct {\n\t\/\/ ShouldSample is a function that allows deterministic sampling of a trace\n\t\/\/ using the randomly generated Trace ID. The decision is made when a new Trace\n\t\/\/ is created and is propagated to all of the trace's spans. For example,\n\t\/\/\n\t\/\/ func(traceID int64) { return traceID % 128 == 0 }\n\t\/\/\n\t\/\/ samples 1 in every 128 traces, approximately.\n\tShouldSample func(traceID int64) bool\n\n\t\/\/ Verbose determines whether errors are logged to stdout only once or all\n\t\/\/ the time. By default, Verbose is false so only the first error is logged\n\t\/\/ and the rest are silenced.\n\tVerbose bool\n\n\t\/\/ Logger is used to log critical errors that can't be collected by the\n\t\/\/ Appdash Collector.\n\tLogger *log.Logger\n}\n\nfunc newLogger() *log.Logger {\n\treturn log.New(os.Stderr, \"opentracing: \", log.LstdFlags)\n}\n\n\/\/ DefaultOptions creates an Option with a sampling function that always return\n\/\/ true and a logger that logs errors to stderr.\nfunc DefaultOptions() Options {\n\treturn Options{\n\t\tShouldSample: func(_ int64) bool { return true },\n\t\tLogger: newLogger(),\n\t}\n}\n\n\/\/ NewTracer creates a new opentracing.Tracer implementation that reports\n\/\/ spans to an Appdash collector.\n\/\/\n\/\/ The Tracer created by NewTracer reports all spans by default. If you want to\n\/\/ sample 1 in every N spans, see NewTracerWithOptions. Spans are written to\n\/\/ the underlying collector when Finish() is called on the span. It is\n\/\/ possible to buffer and write span on a time interval using appdash.ChunkedCollector.\n\/\/\n\/\/ For example:\n\/\/\n\/\/ collector := appdash.NewLocalCollector(myAppdashStore)\n\/\/ chunkedCollector := appdash.NewChunkedCollector(collector)\n\/\/\n\/\/ tracer := NewTracer(chunkedCollector)\n\/\/\n\/\/ If writing traces to a remote Appdash collector, an appdash.RemoteCollector would\n\/\/ be needed, for example:\n\/\/\n\/\/ collector := appdash.NewRemoteCollector(\"localhost:8700\")\n\/\/ tracer := NewTracer(collector)\n\/\/\n\/\/ will record all spans to a collector server on localhost:8700.\nfunc NewTracer(c appdash.Collector) opentracing.Tracer {\n\treturn NewTracerWithOptions(c, DefaultOptions())\n}\n\n\/\/ NewTracerWithOptions creates a new opentracing.Tracer that records spans to\n\/\/ the given appdash.Collector.\nfunc NewTracerWithOptions(c appdash.Collector, options Options) opentracing.Tracer {\n\topts := basictracer.DefaultOptions()\n\topts.ShouldSample = options.ShouldSample\n\topts.Recorder = NewRecorder(c, options)\n\treturn basictracer.NewWithOptions(opts)\n}\n<commit_msg>updates to match correct types<commit_after>package opentracing\n\nimport (\n\t\"log\"\n\t\"os\"\n\n\tbasictracer \"github.com\/opentracing\/basictracer-go\"\n\topentracing \"github.com\/opentracing\/opentracing-go\"\n\t\"sourcegraph.com\/sourcegraph\/appdash\"\n)\n\nvar _ opentracing.Tracer = NewTracer(nil) \/\/ Compile time check.\n\n\/\/ Options defines options for a Tracer.\ntype Options struct {\n\t\/\/ ShouldSample is a function that allows deterministic sampling of a trace\n\t\/\/ using the randomly generated Trace ID. The decision is made when a new Trace\n\t\/\/ is created and is propagated to all of the trace's spans. For example,\n\t\/\/\n\t\/\/ func(traceID int64) { return traceID % 128 == 0 }\n\t\/\/\n\t\/\/ samples 1 in every 128 traces, approximately.\n\tShouldSample func(traceID uint64) bool\n\n\t\/\/ Verbose determines whether errors are logged to stdout only once or all\n\t\/\/ the time. By default, Verbose is false so only the first error is logged\n\t\/\/ and the rest are silenced.\n\tVerbose bool\n\n\t\/\/ Logger is used to log critical errors that can't be collected by the\n\t\/\/ Appdash Collector.\n\tLogger *log.Logger\n}\n\nfunc newLogger() *log.Logger {\n\treturn log.New(os.Stderr, \"opentracing: \", log.LstdFlags)\n}\n\n\/\/ DefaultOptions creates an Option with a sampling function that always return\n\/\/ true and a logger that logs errors to stderr.\nfunc DefaultOptions() Options {\n\treturn Options{\n\t\tShouldSample: func(_ uint64) bool { return true },\n\t\tLogger: newLogger(),\n\t}\n}\n\n\/\/ NewTracer creates a new opentracing.Tracer implementation that reports\n\/\/ spans to an Appdash collector.\n\/\/\n\/\/ The Tracer created by NewTracer reports all spans by default. If you want to\n\/\/ sample 1 in every N spans, see NewTracerWithOptions. Spans are written to\n\/\/ the underlying collector when Finish() is called on the span. It is\n\/\/ possible to buffer and write span on a time interval using appdash.ChunkedCollector.\n\/\/\n\/\/ For example:\n\/\/\n\/\/ collector := appdash.NewLocalCollector(myAppdashStore)\n\/\/ chunkedCollector := appdash.NewChunkedCollector(collector)\n\/\/\n\/\/ tracer := NewTracer(chunkedCollector)\n\/\/\n\/\/ If writing traces to a remote Appdash collector, an appdash.RemoteCollector would\n\/\/ be needed, for example:\n\/\/\n\/\/ collector := appdash.NewRemoteCollector(\"localhost:8700\")\n\/\/ tracer := NewTracer(collector)\n\/\/\n\/\/ will record all spans to a collector server on localhost:8700.\nfunc NewTracer(c appdash.Collector) opentracing.Tracer {\n\treturn NewTracerWithOptions(c, DefaultOptions())\n}\n\n\/\/ NewTracerWithOptions creates a new opentracing.Tracer that records spans to\n\/\/ the given appdash.Collector.\nfunc NewTracerWithOptions(c appdash.Collector, options Options) opentracing.Tracer {\n\topts := basictracer.DefaultOptions()\n\topts.ShouldSample = options.ShouldSample\n\topts.Recorder = NewRecorder(c, options)\n\treturn basictracer.NewWithOptions(opts)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2016 NAME HERE <EMAIL ADDRESS>\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/viper\"\n)\n\nvar cfgFile string\n\n\/\/ This represents the base command when called without any subcommands\nvar RootCmd = &cobra.Command{\n\tUse: \"kafkactl\",\n\tShort: \"A simple REST client for the scheduler remote API\",\n\tLong: `Kafkactl is a command line tool written in GO for controlling kafka scheduler\nthat runs on top of Apache Mesos.`,\n}\n\n\/\/ Execute adds all child commands to the root command sets flags appropriately.\n\/\/ This is called by main.main(). It only needs to happen once to the rootCmd.\nfunc Execute() {\n\tif err := RootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(-1)\n\t}\n}\n\nfunc init() {\n\tcobra.OnInitialize(initConfig)\n\n\t\/\/ Here you will define your flags and configuration settings.\n\t\/\/ Cobra supports Persistent Flags, which, if defined here,\n\t\/\/ will be global for your application.\n\n\tRootCmd.PersistentFlags().StringVar(&cfgFile, \"config\", \"\", \"config file (default is $HOME\/.kafkactl.yaml)\")\n\t\/\/ Cobra also supports local flags, which will only run\n\t\/\/ when this action is called directly.\n\tRootCmd.Flags().BoolP(\"toggle\", \"t\", false, \"Help message for toggle\")\n}\n\n\/\/ initConfig reads in config file and ENV variables if set.\nfunc initConfig() {\n\tif cfgFile != \"\" { \/\/ enable ability to specify config file via flag\n\t\tviper.SetConfigFile(cfgFile)\n\t}\n\n\tviper.SetConfigName(\".kafkactl\") \/\/ name of config file (without extension)\n\tviper.AddConfigPath(\"$HOME\") \/\/ adding home directory as first search path\n\tviper.AutomaticEnv() \/\/ read in environment variables that match\n\n\t\/\/ If a config file is found, read it in.\n\tif err := viper.ReadInConfig(); err == nil {\n\t\tfmt.Println(\"Using config file:\", viper.ConfigFileUsed())\n\t}\n}\n<commit_msg>root-cmd: Api persistent flags added<commit_after>\/\/ Copyright © 2016 NAME HERE <EMAIL ADDRESS>\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/viper\"\n)\n\nvar cfgFile string\nvar apiURL string\n\n\/\/ This represents the base command when called without any subcommands\nvar RootCmd = &cobra.Command{\n\tUse: \"kafkactl\",\n\tShort: \"A simple REST client for the scheduler remote API\",\n\tLong: `Kafkactl is a command line tool written in GO for controlling kafka scheduler\nthat runs on top of Apache Mesos.`,\n}\n\n\/\/ Execute adds all child commands to the root command sets flags appropriately.\n\/\/ This is called by main.main(). It only needs to happen once to the rootCmd.\nfunc Execute() {\n\tif err := RootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(-1)\n\t}\n}\n\nfunc init() {\n\tcobra.OnInitialize(initConfig)\n\n\t\/\/ Here you will define your flags and configuration settings.\n\t\/\/ Cobra supports Persistent Flags, which, if defined here,\n\t\/\/ will be global for your application.\n\n\tRootCmd.PersistentFlags().StringVar(&cfgFile, \"config\", \"\", \"config file (default is $HOME\/.kafkactl.yaml)\")\n\t\/\/ Cobra also supports local flags, which will only run\n\t\/\/ when this action is called directly.\n\tRootCmd.Flags().BoolP(\"toggle\", \"t\", false, \"Help message for toggle\")\n\n\tRootCmd.PersistentFlags().StringVar(&apiURL, \"api\", os.Getenv(\"KAFKA_API\"), \"Kafka scheduler api url\")\n}\n\n\/\/ initConfig reads in config file and ENV variables if set.\nfunc initConfig() {\n\tif cfgFile != \"\" { \/\/ enable ability to specify config file via flag\n\t\tviper.SetConfigFile(cfgFile)\n\t}\n\n\tviper.SetConfigName(\".kafkactl\") \/\/ name of config file (without extension)\n\tviper.AddConfigPath(\"$HOME\") \/\/ adding home directory as first search path\n\tviper.AutomaticEnv() \/\/ read in environment variables that match\n\n\t\/\/ If a config file is found, read it in.\n\tif err := viper.ReadInConfig(); err == nil {\n\t\tfmt.Println(\"Using config file:\", viper.ConfigFileUsed())\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Gogs Authors. All rights reserved.\n\/\/ Copyright 2016 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"code.gitea.io\/gitea\/models\"\n\tasymkey_model \"code.gitea.io\/gitea\/models\/asymkey\"\n\t\"code.gitea.io\/gitea\/models\/perm\"\n\t\"code.gitea.io\/gitea\/modules\/git\"\n\t\"code.gitea.io\/gitea\/modules\/json\"\n\t\"code.gitea.io\/gitea\/modules\/log\"\n\t\"code.gitea.io\/gitea\/modules\/pprof\"\n\t\"code.gitea.io\/gitea\/modules\/private\"\n\t\"code.gitea.io\/gitea\/modules\/setting\"\n\t\"code.gitea.io\/gitea\/services\/lfs\"\n\n\t\"github.com\/golang-jwt\/jwt\/v4\"\n\t\"github.com\/kballard\/go-shellquote\"\n\t\"github.com\/urfave\/cli\"\n)\n\nconst (\n\tlfsAuthenticateVerb = \"git-lfs-authenticate\"\n)\n\n\/\/ CmdServ represents the available serv sub-command.\nvar CmdServ = cli.Command{\n\tName: \"serv\",\n\tUsage: \"This command should only be called by SSH shell\",\n\tDescription: `Serv provide access auth for repositories`,\n\tAction: runServ,\n\tFlags: []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"enable-pprof\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"debug\",\n\t\t},\n\t},\n}\n\nfunc setup(logPath string, debug bool) {\n\t_ = log.DelLogger(\"console\")\n\tif debug {\n\t\t_ = log.NewLogger(1000, \"console\", \"console\", `{\"level\":\"trace\",\"stacktracelevel\":\"NONE\",\"stderr\":true}`)\n\t} else {\n\t\t_ = log.NewLogger(1000, \"console\", \"console\", `{\"level\":\"fatal\",\"stacktracelevel\":\"NONE\",\"stderr\":true}`)\n\t}\n\tsetting.LoadFromExisting()\n\tif debug {\n\t\tsetting.RunMode = \"dev\"\n\t}\n}\n\nvar (\n\tallowedCommands = map[string]perm.AccessMode{\n\t\t\"git-upload-pack\": perm.AccessModeRead,\n\t\t\"git-upload-archive\": perm.AccessModeRead,\n\t\t\"git-receive-pack\": perm.AccessModeWrite,\n\t\tlfsAuthenticateVerb: perm.AccessModeNone,\n\t}\n\talphaDashDotPattern = regexp.MustCompile(`[^\\w-\\.]`)\n)\n\nfunc fail(userMessage, logMessage string, args ...interface{}) error {\n\t\/\/ There appears to be a chance to cause a zombie process and failure to read the Exit status\n\t\/\/ if nothing is outputted on stdout.\n\tfmt.Fprintln(os.Stdout, \"\")\n\tfmt.Fprintln(os.Stderr, \"Gitea:\", userMessage)\n\n\tif len(logMessage) > 0 {\n\t\tif !setting.IsProd {\n\t\t\tfmt.Fprintf(os.Stderr, logMessage+\"\\n\", args...)\n\t\t}\n\t}\n\tctx, cancel := installSignals()\n\tdefer cancel()\n\n\tif len(logMessage) > 0 {\n\t\t_ = private.SSHLog(ctx, true, fmt.Sprintf(logMessage+\": \", args...))\n\t}\n\treturn cli.NewExitError(\"\", 1)\n}\n\nfunc runServ(c *cli.Context) error {\n\tctx, cancel := installSignals()\n\tdefer cancel()\n\n\t\/\/ FIXME: This needs to internationalised\n\tsetup(\"serv.log\", c.Bool(\"debug\"))\n\n\tif setting.SSH.Disabled {\n\t\tprintln(\"Gitea: SSH has been disabled\")\n\t\treturn nil\n\t}\n\n\tif len(c.Args()) < 1 {\n\t\tif err := cli.ShowSubcommandHelp(c); err != nil {\n\t\t\tfmt.Printf(\"error showing subcommand help: %v\\n\", err)\n\t\t}\n\t\treturn nil\n\t}\n\n\tkeys := strings.Split(c.Args()[0], \"-\")\n\tif len(keys) != 2 || keys[0] != \"key\" {\n\t\treturn fail(\"Key ID format error\", \"Invalid key argument: %s\", c.Args()[0])\n\t}\n\tkeyID, err := strconv.ParseInt(keys[1], 10, 64)\n\tif err != nil {\n\t\treturn fail(\"Key ID format error\", \"Invalid key argument: %s\", c.Args()[1])\n\t}\n\n\tcmd := os.Getenv(\"SSH_ORIGINAL_COMMAND\")\n\tif len(cmd) == 0 {\n\t\tkey, user, err := private.ServNoCommand(ctx, keyID)\n\t\tif err != nil {\n\t\t\treturn fail(\"Internal error\", \"Failed to check provided key: %v\", err)\n\t\t}\n\t\tswitch key.Type {\n\t\tcase asymkey_model.KeyTypeDeploy:\n\t\t\tprintln(\"Hi there! You've successfully authenticated with the deploy key named \" + key.Name + \", but Gitea does not provide shell access.\")\n\t\tcase asymkey_model.KeyTypePrincipal:\n\t\t\tprintln(\"Hi there! You've successfully authenticated with the principal \" + key.Content + \", but Gitea does not provide shell access.\")\n\t\tdefault:\n\t\t\tprintln(\"Hi there, \" + user.Name + \"! You've successfully authenticated with the key named \" + key.Name + \", but Gitea does not provide shell access.\")\n\t\t}\n\t\tprintln(\"If this is unexpected, please log in with password and setup Gitea under another user.\")\n\t\treturn nil\n\t} else if c.Bool(\"debug\") {\n\t\tlog.Debug(\"SSH_ORIGINAL_COMMAND: %s\", os.Getenv(\"SSH_ORIGINAL_COMMAND\"))\n\t}\n\n\twords, err := shellquote.Split(cmd)\n\tif err != nil {\n\t\treturn fail(\"Error parsing arguments\", \"Failed to parse arguments: %v\", err)\n\t}\n\n\tif len(words) < 2 {\n\t\tif git.CheckGitVersionAtLeast(\"2.29\") == nil {\n\t\t\t\/\/ for AGit Flow\n\t\t\tif cmd == \"ssh_info\" {\n\t\t\t\tfmt.Print(`{\"type\":\"gitea\",\"version\":1}`)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn fail(\"Too few arguments\", \"Too few arguments in cmd: %s\", cmd)\n\t}\n\n\tverb := words[0]\n\trepoPath := words[1]\n\tif repoPath[0] == '\/' {\n\t\trepoPath = repoPath[1:]\n\t}\n\n\tvar lfsVerb string\n\tif verb == lfsAuthenticateVerb {\n\t\tif !setting.LFS.StartServer {\n\t\t\treturn fail(\"Unknown git command\", \"LFS authentication request over SSH denied, LFS support is disabled\")\n\t\t}\n\n\t\tif len(words) > 2 {\n\t\t\tlfsVerb = words[2]\n\t\t}\n\t}\n\n\t\/\/ LowerCase and trim the repoPath as that's how they are stored.\n\trepoPath = strings.ToLower(strings.TrimSpace(repoPath))\n\n\trr := strings.SplitN(repoPath, \"\/\", 2)\n\tif len(rr) != 2 {\n\t\treturn fail(\"Invalid repository path\", \"Invalid repository path: %v\", repoPath)\n\t}\n\n\tusername := strings.ToLower(rr[0])\n\treponame := strings.ToLower(strings.TrimSuffix(rr[1], \".git\"))\n\n\tif alphaDashDotPattern.MatchString(reponame) {\n\t\treturn fail(\"Invalid repo name\", \"Invalid repo name: %s\", reponame)\n\t}\n\n\tif c.Bool(\"enable-pprof\") {\n\t\tif err := os.MkdirAll(setting.PprofDataPath, os.ModePerm); err != nil {\n\t\t\treturn fail(\"Error while trying to create PPROF_DATA_PATH\", \"Error while trying to create PPROF_DATA_PATH: %v\", err)\n\t\t}\n\n\t\tstopCPUProfiler, err := pprof.DumpCPUProfileForUsername(setting.PprofDataPath, username)\n\t\tif err != nil {\n\t\t\treturn fail(\"Internal Server Error\", \"Unable to start CPU profile: %v\", err)\n\t\t}\n\t\tdefer func() {\n\t\t\tstopCPUProfiler()\n\t\t\terr := pprof.DumpMemProfileForUsername(setting.PprofDataPath, username)\n\t\t\tif err != nil {\n\t\t\t\t_ = fail(\"Internal Server Error\", \"Unable to dump Mem Profile: %v\", err)\n\t\t\t}\n\t\t}()\n\t}\n\n\trequestedMode, has := allowedCommands[verb]\n\tif !has {\n\t\treturn fail(\"Unknown git command\", \"Unknown git command %s\", verb)\n\t}\n\n\tif verb == lfsAuthenticateVerb {\n\t\tif lfsVerb == \"upload\" {\n\t\t\trequestedMode = perm.AccessModeWrite\n\t\t} else if lfsVerb == \"download\" {\n\t\t\trequestedMode = perm.AccessModeRead\n\t\t} else {\n\t\t\treturn fail(\"Unknown LFS verb\", \"Unknown lfs verb %s\", lfsVerb)\n\t\t}\n\t}\n\n\tresults, err := private.ServCommand(ctx, keyID, username, reponame, requestedMode, verb, lfsVerb)\n\tif err != nil {\n\t\tif private.IsErrServCommand(err) {\n\t\t\terrServCommand := err.(private.ErrServCommand)\n\t\t\tif errServCommand.StatusCode != http.StatusInternalServerError {\n\t\t\t\treturn fail(\"Unauthorized\", \"%s\", errServCommand.Error())\n\t\t\t}\n\t\t\treturn fail(\"Internal Server Error\", \"%s\", errServCommand.Error())\n\t\t}\n\t\treturn fail(\"Internal Server Error\", \"%s\", err.Error())\n\t}\n\tos.Setenv(models.EnvRepoIsWiki, strconv.FormatBool(results.IsWiki))\n\tos.Setenv(models.EnvRepoName, results.RepoName)\n\tos.Setenv(models.EnvRepoUsername, results.OwnerName)\n\tos.Setenv(models.EnvPusherName, results.UserName)\n\tos.Setenv(models.EnvPusherEmail, results.UserEmail)\n\tos.Setenv(models.EnvPusherID, strconv.FormatInt(results.UserID, 10))\n\tos.Setenv(models.EnvRepoID, strconv.FormatInt(results.RepoID, 10))\n\tos.Setenv(models.EnvPRID, fmt.Sprintf(\"%d\", 0))\n\tos.Setenv(models.EnvDeployKeyID, fmt.Sprintf(\"%d\", results.DeployKeyID))\n\tos.Setenv(models.EnvKeyID, fmt.Sprintf(\"%d\", results.KeyID))\n\tos.Setenv(models.EnvAppURL, setting.AppURL)\n\n\t\/\/ LFS token authentication\n\tif verb == lfsAuthenticateVerb {\n\t\turl := fmt.Sprintf(\"%s%s\/%s.git\/info\/lfs\", setting.AppURL, url.PathEscape(results.OwnerName), url.PathEscape(results.RepoName))\n\n\t\tnow := time.Now()\n\t\tclaims := lfs.Claims{\n\t\t\tRegisteredClaims: jwt.RegisteredClaims{\n\t\t\t\tExpiresAt: jwt.NewNumericDate(now.Add(setting.LFS.HTTPAuthExpiry)),\n\t\t\t\tNotBefore: jwt.NewNumericDate(now),\n\t\t\t},\n\t\t\tRepoID: results.RepoID,\n\t\t\tOp: lfsVerb,\n\t\t\tUserID: results.UserID,\n\t\t}\n\t\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\t\t\/\/ Sign and get the complete encoded token as a string using the secret\n\t\ttokenString, err := token.SignedString(setting.LFS.JWTSecretBytes)\n\t\tif err != nil {\n\t\t\treturn fail(\"Internal error\", \"Failed to sign JWT token: %v\", err)\n\t\t}\n\n\t\ttokenAuthentication := &models.LFSTokenResponse{\n\t\t\tHeader: make(map[string]string),\n\t\t\tHref: url,\n\t\t}\n\t\ttokenAuthentication.Header[\"Authorization\"] = fmt.Sprintf(\"Bearer %s\", tokenString)\n\n\t\tenc := json.NewEncoder(os.Stdout)\n\t\terr = enc.Encode(tokenAuthentication)\n\t\tif err != nil {\n\t\t\treturn fail(\"Internal error\", \"Failed to encode LFS json response: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\n\t\/\/ Special handle for Windows.\n\tif setting.IsWindows {\n\t\tverb = strings.Replace(verb, \"-\", \" \", 1)\n\t}\n\n\tvar gitcmd *exec.Cmd\n\tverbs := strings.Split(verb, \" \")\n\tif len(verbs) == 2 {\n\t\tgitcmd = exec.CommandContext(ctx, verbs[0], verbs[1], repoPath)\n\t} else {\n\t\tgitcmd = exec.CommandContext(ctx, verb, repoPath)\n\t}\n\n\tgitcmd.Dir = setting.RepoRootPath\n\tgitcmd.Stdout = os.Stdout\n\tgitcmd.Stdin = os.Stdin\n\tgitcmd.Stderr = os.Stderr\n\tif err = gitcmd.Run(); err != nil {\n\t\treturn fail(\"Internal error\", \"Failed to execute git command: %v\", err)\n\t}\n\n\t\/\/ Update user key activity.\n\tif results.KeyID > 0 {\n\t\tif err = private.UpdatePublicKeyInRepo(ctx, results.KeyID, results.RepoID); err != nil {\n\t\t\treturn fail(\"Internal error\", \"UpdatePublicKeyInRepo: %v\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>Warn on SSH connection for incorrect configuration (#19317)<commit_after>\/\/ Copyright 2014 The Gogs Authors. All rights reserved.\n\/\/ Copyright 2016 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"code.gitea.io\/gitea\/models\"\n\tasymkey_model \"code.gitea.io\/gitea\/models\/asymkey\"\n\t\"code.gitea.io\/gitea\/models\/perm\"\n\t\"code.gitea.io\/gitea\/modules\/git\"\n\t\"code.gitea.io\/gitea\/modules\/json\"\n\t\"code.gitea.io\/gitea\/modules\/log\"\n\t\"code.gitea.io\/gitea\/modules\/pprof\"\n\t\"code.gitea.io\/gitea\/modules\/private\"\n\t\"code.gitea.io\/gitea\/modules\/setting\"\n\t\"code.gitea.io\/gitea\/services\/lfs\"\n\n\t\"github.com\/golang-jwt\/jwt\/v4\"\n\t\"github.com\/kballard\/go-shellquote\"\n\t\"github.com\/urfave\/cli\"\n)\n\nconst (\n\tlfsAuthenticateVerb = \"git-lfs-authenticate\"\n)\n\n\/\/ CmdServ represents the available serv sub-command.\nvar CmdServ = cli.Command{\n\tName: \"serv\",\n\tUsage: \"This command should only be called by SSH shell\",\n\tDescription: `Serv provide access auth for repositories`,\n\tAction: runServ,\n\tFlags: []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"enable-pprof\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"debug\",\n\t\t},\n\t},\n}\n\nfunc setup(logPath string, debug bool) {\n\t_ = log.DelLogger(\"console\")\n\tif debug {\n\t\t_ = log.NewLogger(1000, \"console\", \"console\", `{\"level\":\"trace\",\"stacktracelevel\":\"NONE\",\"stderr\":true}`)\n\t} else {\n\t\t_ = log.NewLogger(1000, \"console\", \"console\", `{\"level\":\"fatal\",\"stacktracelevel\":\"NONE\",\"stderr\":true}`)\n\t}\n\tsetting.LoadFromExisting()\n\tif debug {\n\t\tsetting.RunMode = \"dev\"\n\t}\n}\n\nvar (\n\tallowedCommands = map[string]perm.AccessMode{\n\t\t\"git-upload-pack\": perm.AccessModeRead,\n\t\t\"git-upload-archive\": perm.AccessModeRead,\n\t\t\"git-receive-pack\": perm.AccessModeWrite,\n\t\tlfsAuthenticateVerb: perm.AccessModeNone,\n\t}\n\talphaDashDotPattern = regexp.MustCompile(`[^\\w-\\.]`)\n)\n\nfunc fail(userMessage, logMessage string, args ...interface{}) error {\n\t\/\/ There appears to be a chance to cause a zombie process and failure to read the Exit status\n\t\/\/ if nothing is outputted on stdout.\n\tfmt.Fprintln(os.Stdout, \"\")\n\tfmt.Fprintln(os.Stderr, \"Gitea:\", userMessage)\n\n\tif len(logMessage) > 0 {\n\t\tif !setting.IsProd {\n\t\t\tfmt.Fprintf(os.Stderr, logMessage+\"\\n\", args...)\n\t\t}\n\t}\n\tctx, cancel := installSignals()\n\tdefer cancel()\n\n\tif len(logMessage) > 0 {\n\t\t_ = private.SSHLog(ctx, true, fmt.Sprintf(logMessage+\": \", args...))\n\t}\n\treturn cli.NewExitError(\"\", 1)\n}\n\nfunc runServ(c *cli.Context) error {\n\tctx, cancel := installSignals()\n\tdefer cancel()\n\n\t\/\/ FIXME: This needs to internationalised\n\tsetup(\"serv.log\", c.Bool(\"debug\"))\n\n\tif setting.SSH.Disabled {\n\t\tprintln(\"Gitea: SSH has been disabled\")\n\t\treturn nil\n\t}\n\n\tif len(c.Args()) < 1 {\n\t\tif err := cli.ShowSubcommandHelp(c); err != nil {\n\t\t\tfmt.Printf(\"error showing subcommand help: %v\\n\", err)\n\t\t}\n\t\treturn nil\n\t}\n\n\tkeys := strings.Split(c.Args()[0], \"-\")\n\tif len(keys) != 2 || keys[0] != \"key\" {\n\t\treturn fail(\"Key ID format error\", \"Invalid key argument: %s\", c.Args()[0])\n\t}\n\tkeyID, err := strconv.ParseInt(keys[1], 10, 64)\n\tif err != nil {\n\t\treturn fail(\"Key ID format error\", \"Invalid key argument: %s\", c.Args()[1])\n\t}\n\n\tcmd := os.Getenv(\"SSH_ORIGINAL_COMMAND\")\n\tif len(cmd) == 0 {\n\t\tkey, user, err := private.ServNoCommand(ctx, keyID)\n\t\tif err != nil {\n\t\t\treturn fail(\"Internal error\", \"Failed to check provided key: %v\", err)\n\t\t}\n\t\tswitch key.Type {\n\t\tcase asymkey_model.KeyTypeDeploy:\n\t\t\tprintln(\"Hi there! You've successfully authenticated with the deploy key named \" + key.Name + \", but Gitea does not provide shell access.\")\n\t\tcase asymkey_model.KeyTypePrincipal:\n\t\t\tprintln(\"Hi there! You've successfully authenticated with the principal \" + key.Content + \", but Gitea does not provide shell access.\")\n\t\tdefault:\n\t\t\tprintln(\"Hi there, \" + user.Name + \"! You've successfully authenticated with the key named \" + key.Name + \", but Gitea does not provide shell access.\")\n\t\t}\n\t\tprintln(\"If this is unexpected, please log in with password and setup Gitea under another user.\")\n\t\treturn nil\n\t} else if c.Bool(\"debug\") {\n\t\tlog.Debug(\"SSH_ORIGINAL_COMMAND: %s\", os.Getenv(\"SSH_ORIGINAL_COMMAND\"))\n\t}\n\n\twords, err := shellquote.Split(cmd)\n\tif err != nil {\n\t\treturn fail(\"Error parsing arguments\", \"Failed to parse arguments: %v\", err)\n\t}\n\n\tif len(words) < 2 {\n\t\tif git.CheckGitVersionAtLeast(\"2.29\") == nil {\n\t\t\t\/\/ for AGit Flow\n\t\t\tif cmd == \"ssh_info\" {\n\t\t\t\tfmt.Print(`{\"type\":\"gitea\",\"version\":1}`)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn fail(\"Too few arguments\", \"Too few arguments in cmd: %s\", cmd)\n\t}\n\n\tverb := words[0]\n\trepoPath := words[1]\n\tif repoPath[0] == '\/' {\n\t\trepoPath = repoPath[1:]\n\t}\n\n\tvar lfsVerb string\n\tif verb == lfsAuthenticateVerb {\n\t\tif !setting.LFS.StartServer {\n\t\t\treturn fail(\"Unknown git command\", \"LFS authentication request over SSH denied, LFS support is disabled\")\n\t\t}\n\n\t\tif len(words) > 2 {\n\t\t\tlfsVerb = words[2]\n\t\t}\n\t}\n\n\t\/\/ LowerCase and trim the repoPath as that's how they are stored.\n\trepoPath = strings.ToLower(strings.TrimSpace(repoPath))\n\n\trr := strings.SplitN(repoPath, \"\/\", 2)\n\tif len(rr) != 2 {\n\t\treturn fail(\"Invalid repository path\", \"Invalid repository path: %v\", repoPath)\n\t}\n\n\tusername := strings.ToLower(rr[0])\n\treponame := strings.ToLower(strings.TrimSuffix(rr[1], \".git\"))\n\n\tif alphaDashDotPattern.MatchString(reponame) {\n\t\treturn fail(\"Invalid repo name\", \"Invalid repo name: %s\", reponame)\n\t}\n\n\tif c.Bool(\"enable-pprof\") {\n\t\tif err := os.MkdirAll(setting.PprofDataPath, os.ModePerm); err != nil {\n\t\t\treturn fail(\"Error while trying to create PPROF_DATA_PATH\", \"Error while trying to create PPROF_DATA_PATH: %v\", err)\n\t\t}\n\n\t\tstopCPUProfiler, err := pprof.DumpCPUProfileForUsername(setting.PprofDataPath, username)\n\t\tif err != nil {\n\t\t\treturn fail(\"Internal Server Error\", \"Unable to start CPU profile: %v\", err)\n\t\t}\n\t\tdefer func() {\n\t\t\tstopCPUProfiler()\n\t\t\terr := pprof.DumpMemProfileForUsername(setting.PprofDataPath, username)\n\t\t\tif err != nil {\n\t\t\t\t_ = fail(\"Internal Server Error\", \"Unable to dump Mem Profile: %v\", err)\n\t\t\t}\n\t\t}()\n\t}\n\n\trequestedMode, has := allowedCommands[verb]\n\tif !has {\n\t\treturn fail(\"Unknown git command\", \"Unknown git command %s\", verb)\n\t}\n\n\tif verb == lfsAuthenticateVerb {\n\t\tif lfsVerb == \"upload\" {\n\t\t\trequestedMode = perm.AccessModeWrite\n\t\t} else if lfsVerb == \"download\" {\n\t\t\trequestedMode = perm.AccessModeRead\n\t\t} else {\n\t\t\treturn fail(\"Unknown LFS verb\", \"Unknown lfs verb %s\", lfsVerb)\n\t\t}\n\t}\n\n\tresults, err := private.ServCommand(ctx, keyID, username, reponame, requestedMode, verb, lfsVerb)\n\tif err != nil {\n\t\tif private.IsErrServCommand(err) {\n\t\t\terrServCommand := err.(private.ErrServCommand)\n\t\t\tif errServCommand.StatusCode != http.StatusInternalServerError {\n\t\t\t\treturn fail(\"Unauthorized\", \"%s\", errServCommand.Error())\n\t\t\t}\n\t\t\treturn fail(\"Internal Server Error\", \"%s\", errServCommand.Error())\n\t\t}\n\t\treturn fail(\"Internal Server Error\", \"%s\", err.Error())\n\t}\n\tos.Setenv(models.EnvRepoIsWiki, strconv.FormatBool(results.IsWiki))\n\tos.Setenv(models.EnvRepoName, results.RepoName)\n\tos.Setenv(models.EnvRepoUsername, results.OwnerName)\n\tos.Setenv(models.EnvPusherName, results.UserName)\n\tos.Setenv(models.EnvPusherEmail, results.UserEmail)\n\tos.Setenv(models.EnvPusherID, strconv.FormatInt(results.UserID, 10))\n\tos.Setenv(models.EnvRepoID, strconv.FormatInt(results.RepoID, 10))\n\tos.Setenv(models.EnvPRID, fmt.Sprintf(\"%d\", 0))\n\tos.Setenv(models.EnvDeployKeyID, fmt.Sprintf(\"%d\", results.DeployKeyID))\n\tos.Setenv(models.EnvKeyID, fmt.Sprintf(\"%d\", results.KeyID))\n\tos.Setenv(models.EnvAppURL, setting.AppURL)\n\n\t\/\/ LFS token authentication\n\tif verb == lfsAuthenticateVerb {\n\t\turl := fmt.Sprintf(\"%s%s\/%s.git\/info\/lfs\", setting.AppURL, url.PathEscape(results.OwnerName), url.PathEscape(results.RepoName))\n\n\t\tnow := time.Now()\n\t\tclaims := lfs.Claims{\n\t\t\tRegisteredClaims: jwt.RegisteredClaims{\n\t\t\t\tExpiresAt: jwt.NewNumericDate(now.Add(setting.LFS.HTTPAuthExpiry)),\n\t\t\t\tNotBefore: jwt.NewNumericDate(now),\n\t\t\t},\n\t\t\tRepoID: results.RepoID,\n\t\t\tOp: lfsVerb,\n\t\t\tUserID: results.UserID,\n\t\t}\n\t\ttoken := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)\n\n\t\t\/\/ Sign and get the complete encoded token as a string using the secret\n\t\ttokenString, err := token.SignedString(setting.LFS.JWTSecretBytes)\n\t\tif err != nil {\n\t\t\treturn fail(\"Internal error\", \"Failed to sign JWT token: %v\", err)\n\t\t}\n\n\t\ttokenAuthentication := &models.LFSTokenResponse{\n\t\t\tHeader: make(map[string]string),\n\t\t\tHref: url,\n\t\t}\n\t\ttokenAuthentication.Header[\"Authorization\"] = fmt.Sprintf(\"Bearer %s\", tokenString)\n\n\t\tenc := json.NewEncoder(os.Stdout)\n\t\terr = enc.Encode(tokenAuthentication)\n\t\tif err != nil {\n\t\t\treturn fail(\"Internal error\", \"Failed to encode LFS json response: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\n\t\/\/ Special handle for Windows.\n\tif setting.IsWindows {\n\t\tverb = strings.Replace(verb, \"-\", \" \", 1)\n\t}\n\n\tvar gitcmd *exec.Cmd\n\tverbs := strings.Split(verb, \" \")\n\tif len(verbs) == 2 {\n\t\tgitcmd = exec.CommandContext(ctx, verbs[0], verbs[1], repoPath)\n\t} else {\n\t\tgitcmd = exec.CommandContext(ctx, verb, repoPath)\n\t}\n\n\t\/\/ Check if setting.RepoRootPath exists. It could be the case that it doesn't exist, this can happen when\n\t\/\/ `[repository]` `ROOT` is a relative path and $GITEA_WORK_DIR isn't passed to the SSH connection.\n\tif _, err := os.Stat(setting.RepoRootPath); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn fail(\"Incorrect configuration.\",\n\t\t\t\t\"Directory `[repository]` `ROOT` was not found, please check if $GITEA_WORK_DIR is passed to the SSH connection or make `[repository]` `ROOT` an absolute value.\")\n\t\t}\n\t}\n\n\tgitcmd.Dir = setting.RepoRootPath\n\tgitcmd.Stdout = os.Stdout\n\tgitcmd.Stdin = os.Stdin\n\tgitcmd.Stderr = os.Stderr\n\tif err = gitcmd.Run(); err != nil {\n\t\treturn fail(\"Internal error\", \"Failed to execute git command: %v\", err)\n\t}\n\n\t\/\/ Update user key activity.\n\tif results.KeyID > 0 {\n\t\tif err = private.UpdatePublicKeyInRepo(ctx, results.KeyID, results.RepoID); err != nil {\n\t\t\treturn fail(\"Internal error\", \"UpdatePublicKeyInRepo: %v\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Flesh out the help documentation for show<commit_after><|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"syscall\"\n)\n\n\/\/ `direnv exec DIR <COMMAND> ...`\nvar CmdExec = &Cmd{\n\tName: \"exec\",\n\tDesc: \"Executes a command after loading the first .envrc found in DIR\",\n\tArgs: []string{\"DIR\", \"COMMAND\", \"[...ARGS]\"},\n\tAction: actionWithConfig(func(env Env, args []string, config *Config) (err error) {\n\t\tvar (\n\t\t\tbackupDiff *EnvDiff\n\t\t\tnewEnv Env\n\t\t\trcPath string\n\t\t\tcommand string\n\t\t)\n\n\t\tif len(args) < 2 {\n\t\t\treturn fmt.Errorf(\"missing DIR and COMMAND arguments\")\n\t\t}\n\n\t\trcPath = filepath.Clean(args[1])\n\t\tfi, err := os.Stat(rcPath)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif fi.IsDir() {\n\t\t\tif len(args) < 3 {\n\t\t\t\treturn fmt.Errorf(\"missing COMMAND argument\")\n\t\t\t}\n\t\t\tcommand = args[2]\n\t\t\targs = args[2:]\n\t\t} else {\n\t\t\tcommand = rcPath\n\t\t\trcPath = filepath.Dir(rcPath)\n\t\t\targs = args[1:]\n\t\t}\n\n\t\trc := FindRC(rcPath, config)\n\n\t\t\/\/ Restore pristine environment if needed\n\t\tif backupDiff, err = config.EnvDiff(); err == nil {\n\t\t\tenv = backupDiff.Reverse().Patch(env)\n\t\t}\n\t\tenv.CleanContext()\n\n\t\t\/\/ Load the rc\n\t\tif rc != nil {\n\t\t\tif newEnv, err = rc.Load(config, env); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tnewEnv = env\n\t\t}\n\n\t\tcommand, err = lookPath(command, newEnv[\"PATH\"])\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\terr = syscall.Exec(command, args, newEnv.ToGoEnv())\n\t\treturn\n\t}),\n}\n<commit_msg>direnv exec: improve the error message<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"syscall\"\n)\n\n\/\/ `direnv exec DIR <COMMAND> ...`\nvar CmdExec = &Cmd{\n\tName: \"exec\",\n\tDesc: \"Executes a command after loading the first .envrc found in DIR\",\n\tArgs: []string{\"DIR\", \"COMMAND\", \"[...ARGS]\"},\n\tAction: actionWithConfig(func(env Env, args []string, config *Config) (err error) {\n\t\tvar (\n\t\t\tbackupDiff *EnvDiff\n\t\t\tnewEnv Env\n\t\t\trcPath string\n\t\t\tcommand string\n\t\t)\n\n\t\tif len(args) < 2 {\n\t\t\treturn fmt.Errorf(\"missing DIR and COMMAND arguments\")\n\t\t}\n\n\t\trcPath = filepath.Clean(args[1])\n\t\tfi, err := os.Stat(rcPath)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif fi.IsDir() {\n\t\t\tif len(args) < 3 {\n\t\t\t\treturn fmt.Errorf(\"missing COMMAND argument\")\n\t\t\t}\n\t\t\tcommand = args[2]\n\t\t\targs = args[2:]\n\t\t} else {\n\t\t\tcommand = rcPath\n\t\t\trcPath = filepath.Dir(rcPath)\n\t\t\targs = args[1:]\n\t\t}\n\n\t\trc := FindRC(rcPath, config)\n\n\t\t\/\/ Restore pristine environment if needed\n\t\tif backupDiff, err = config.EnvDiff(); err == nil {\n\t\t\tenv = backupDiff.Reverse().Patch(env)\n\t\t}\n\t\tenv.CleanContext()\n\n\t\t\/\/ Load the rc\n\t\tif rc != nil {\n\t\t\tif newEnv, err = rc.Load(config, env); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tnewEnv = env\n\t\t}\n\n\t\tvar commandPath string\n\t\tcommandPath, err = lookPath(command, newEnv[\"PATH\"])\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"command '%s' not found on PATH '%s'\", command, newEnv[\"PATH\"])\n\t\t\treturn\n\t\t}\n\n\t\terr = syscall.Exec(commandPath, args, newEnv.ToGoEnv())\n\t\treturn\n\t}),\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"html\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/user\"\n\t\"regexp\"\n\n\t\"github.com\/imwally\/pinboard\"\n)\n\nvar (\n\toptions = flag.NewFlagSet(\"\", flag.ExitOnError)\n\tprivFlag = options.Bool(\"private\", false, \"private bookmark\")\n\treadFlag = options.Bool(\"readlater\", false, \"read later bookmark\")\n\tlongFlag = options.Bool(\"l\", false, \"display long format\")\n\textFlag = options.String(\"text\", \"\", \"longer description of bookmark\")\n\ttagFlag = options.String(\"tag\", \"\", \"tags for bookmark\")\n\ttitleFlag = options.String(\"title\", \"\", \"title of the bookmark\")\n\n\ttoken string\n)\n\nvar usage = `Usage: pin\n pin rm URL\n pin add URL [OPTION]\n pin ls [OPTION]\n\nOptions:\n -title title of bookmark being added\n -tag space delimited tags \n -private mark bookmark as private\n -readlater mark bookmark as read later\n -text longer description of bookmark\n -l long format for ls\n`\n\n\/\/ Number of bookmarks to display.\nconst COUNT int = 50\n\n\/\/ Piped is a helper function to check for piped input. It will return\n\/\/ input, true if data was piped.\nfunc Piped() (string, bool) {\n\tfi, err := os.Stdin.Stat()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"pin: %s\", err)\n\t\treturn \"\", false\n\t}\n\n\tisPipe := (fi.Mode() & os.ModeNamedPipe) == os.ModeNamedPipe\n\tif isPipe {\n\t\tread := bufio.NewReader(os.Stdin)\n\t\tline, _, err := read.ReadLine()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\treturn string(line), true\n\t}\n\n\treturn \"\", false\n}\n\n\/\/ PageTitle returns the title from an HTML page.\nfunc PageTitle(url string) (title string, err error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tre := regexp.MustCompile(\"<title>(.*?)<\/title>\")\n\n\treturn html.UnescapeString(string(re.FindSubmatch(body)[1])), nil\n}\n\n\/\/ Add checks flag values and encodes the GET URL for adding a bookmark.\nfunc Add(p pinboard.Post) {\n\n\tvar args []string\n\n\t\/\/ Check if URL is piped in or first argument. Optional tags\n\t\/\/ should follow the URL.\n\tif url, ok := Piped(); ok {\n\t\tp.URL = url\n\t\targs = flag.Args()[1:]\n\t} else {\n\t\tp.URL = flag.Args()[1]\n\t\targs = flag.Args()[2:]\n\t}\n\n\t\/\/ Parse flags after the URL.\n\toptions.Parse(args)\n\n\tif *titleFlag != \"\" {\n\t\tp.Description = *titleFlag\n\t} else {\n\t\t\/\/ Use page title if title flag is not supplied.\n\t\ttitle, err := PageTitle(p.URL)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"pin: couldn't get title: %s\\n\", err)\n\t\t\treturn\n\t\t} else {\n\t\t\tp.Description = title\n\t\t}\n\t}\n\n\tif *privFlag {\n\t\tp.Shared = \"no\"\n\t}\n\n\tif *readFlag {\n\t\tp.Toread = \"yes\"\n\t}\n\n\tp.Extended = *extFlag\n\tp.Tags = *tagFlag\n\n\tp.Encode()\n\terr := p.Add()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}\n\n\/\/ Delete will delete the URL specified.\nfunc Delete(p pinboard.Post) {\n\n\t\/\/ Check if URL is piped in or first argument.\n\tif url, ok := Piped(); ok {\n\t\tp.URL = url\n\t} else {\n\t\tp.URL = flag.Args()[1]\n\t}\n\n\tp.Encode()\n\terr := p.Delete()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}\n\n\/\/ Show will list the most recent bookmarks. The -tag and -readlater\n\/\/ flags can be used to filter results.\nfunc Show(p pinboard.Post) {\n\n\targs := flag.Args()[1:]\n\toptions.Parse(args)\n\n\tif *tagFlag != \"\" {\n\t\tp.Tag = *tagFlag\n\t}\n\tif *readFlag {\n\t\tp.Toread = \"yes\"\n\t}\n\n\tp.Count = COUNT\n\tp.Encode()\n\n\trecent := p.ShowRecent()\n\n\tif *longFlag {\n\t\tfor _, v := range recent.Posts {\n\t\t\tvar shared, unread string\n\t\t\tif v.Shared == \"no\" {\n\t\t\t\tshared = \"[*]\"\n\t\t\t}\n\t\t\tif v.Toread == \"yes\" {\n\t\t\t\tunread = \"[#]\"\n\t\t\t}\n\t\t\tfmt.Println(unread + shared + v.Description)\n\t\t\tfmt.Println(v.Href)\n\t\t\tif v.Extended != \"\" {\n\t\t\t\tfmt.Println(v.Extended)\n\t\t\t}\n\t\t\tfmt.Println(v.Tags, \"\\n\")\n\t\t}\n\t} else {\n\t\tfor _, v := range recent.Posts {\n\t\t\tfmt.Println(v.Href)\n\t\t}\n\t}\n}\n\n\/\/ runCmd takes a command string, initialises a new pinboard post and\n\/\/ runs the command.\nfunc runCmd(cmd string) {\n\tvar p pinboard.Post\n\tp.Token = token\n\n\tif cmd == \"help\" {\n\t\tfmt.Printf(\"%s\", usage)\n\t}\n\n\tif cmd == \"ls\" {\n\t\tShow(p)\n\t}\n\n\tif cmd == \"add\" {\n\t\tAdd(p)\n\t}\n\n\tif cmd == \"rm\" {\n\t\tDelete(p)\n\t}\n}\n\n\/\/ start takes a slice of commands, parses flag arguments and runs the\n\/\/ command if it's found.\nfunc start(cmds []string) {\n\tflag.Parse()\n\tif flag.NArg() < 1 {\n\t\tfmt.Fprintf(os.Stderr, \"pin: no command is given.\\n\")\n\t\treturn\n\t}\n\n\tcmdName := flag.Arg(0)\n\n\tvar found bool\n\tfor _, cmd := range cmds {\n\t\tif cmdName == cmd {\n\t\t\trunCmd(cmd)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif !found {\n\t\tfmt.Fprintf(os.Stderr, \"pin: command %s not found.\\n\", cmdName)\n\t\treturn\n\t}\n}\n\n\/\/ TokenIsSet will check to make sure an authentication token is set before\n\/\/ making any API calls.\nfunc TokenIsSet() bool {\n\tif token == \"\" {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc init() {\n\tu, err := user.Current()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tcontent, err := ioutil.ReadFile(u.HomeDir + \"\/.pinboard\")\n\tif err != nil {\n\t\tfmt.Println(\"No authorization token found. Please add your authorization token to ~\/.pinboard\")\n\t}\n\n\ttoken = string(content)\n}\n\nfunc main() {\n\tif !TokenIsSet() {\n\t\treturn\n\t}\n\n\tcmds := []string{\"help\", \"add\", \"rm\", \"ls\"}\n\n\tstart(cmds)\n}\n<commit_msg>Changes recommended by golint.<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"html\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/user\"\n\t\"regexp\"\n\n\t\"github.com\/imwally\/pinboard\"\n)\n\nvar (\n\toptions = flag.NewFlagSet(\"\", flag.ExitOnError)\n\tprivFlag = options.Bool(\"private\", false, \"private bookmark\")\n\treadFlag = options.Bool(\"readlater\", false, \"read later bookmark\")\n\tlongFlag = options.Bool(\"l\", false, \"display long format\")\n\textFlag = options.String(\"text\", \"\", \"longer description of bookmark\")\n\ttagFlag = options.String(\"tag\", \"\", \"tags for bookmark\")\n\ttitleFlag = options.String(\"title\", \"\", \"title of the bookmark\")\n\n\ttoken string\n)\n\nvar usage = `Usage: pin\n pin rm URL\n pin add URL [OPTION]\n pin ls [OPTION]\n\nOptions:\n -title title of bookmark being added\n -tag space delimited tags \n -private mark bookmark as private\n -readlater mark bookmark as read later\n -text longer description of bookmark\n -l long format for ls\n`\n\n\/\/ COUNT is the number of bookmarks to display.\nconst COUNT int = 50\n\n\/\/ Piped is a helper function to check for piped input. It will return\n\/\/ input, true if data was piped.\nfunc Piped() (string, bool) {\n\tfi, err := os.Stdin.Stat()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"pin: %s\", err)\n\t\treturn \"\", false\n\t}\n\n\tisPipe := (fi.Mode() & os.ModeNamedPipe) == os.ModeNamedPipe\n\tif isPipe {\n\t\tread := bufio.NewReader(os.Stdin)\n\t\tline, _, err := read.ReadLine()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\treturn string(line), true\n\t}\n\n\treturn \"\", false\n}\n\n\/\/ PageTitle returns the title from an HTML page.\nfunc PageTitle(url string) (title string, err error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tre := regexp.MustCompile(\"<title>(.*?)<\/title>\")\n\n\treturn html.UnescapeString(string(re.FindSubmatch(body)[1])), nil\n}\n\n\/\/ Add checks flag values and encodes the GET URL for adding a bookmark.\nfunc Add(p pinboard.Post) {\n\n\tvar args []string\n\n\t\/\/ Check if URL is piped in or first argument. Optional tags\n\t\/\/ should follow the URL.\n\tif url, ok := Piped(); ok {\n\t\tp.URL = url\n\t\targs = flag.Args()[1:]\n\t} else {\n\t\tp.URL = flag.Args()[1]\n\t\targs = flag.Args()[2:]\n\t}\n\n\t\/\/ Parse flags after the URL.\n\toptions.Parse(args)\n\n\tif *titleFlag != \"\" {\n\t\tp.Description = *titleFlag\n\t} else {\n\t\t\/\/ Use page title if title flag is not supplied.\n\t\ttitle, err := PageTitle(p.URL)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"pin: couldn't get title: %s\\n\", err)\n\t\t\treturn\n\t\t}\n\n\t\tp.Description = title\n\t}\n\n\tif *privFlag {\n\t\tp.Shared = \"no\"\n\t}\n\n\tif *readFlag {\n\t\tp.Toread = \"yes\"\n\t}\n\n\tp.Extended = *extFlag\n\tp.Tags = *tagFlag\n\n\tp.Encode()\n\terr := p.Add()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}\n\n\/\/ Delete will delete the URL specified.\nfunc Delete(p pinboard.Post) {\n\n\t\/\/ Check if URL is piped in or first argument.\n\tif url, ok := Piped(); ok {\n\t\tp.URL = url\n\t} else {\n\t\tp.URL = flag.Args()[1]\n\t}\n\n\tp.Encode()\n\terr := p.Delete()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n}\n\n\/\/ Show will list the most recent bookmarks. The -tag and -readlater\n\/\/ flags can be used to filter results.\nfunc Show(p pinboard.Post) {\n\n\targs := flag.Args()[1:]\n\toptions.Parse(args)\n\n\tif *tagFlag != \"\" {\n\t\tp.Tag = *tagFlag\n\t}\n\tif *readFlag {\n\t\tp.Toread = \"yes\"\n\t}\n\n\tp.Count = COUNT\n\tp.Encode()\n\n\trecent := p.ShowRecent()\n\n\tif *longFlag {\n\t\tfor _, v := range recent.Posts {\n\t\t\tvar shared, unread string\n\t\t\tif v.Shared == \"no\" {\n\t\t\t\tshared = \"[*]\"\n\t\t\t}\n\t\t\tif v.Toread == \"yes\" {\n\t\t\t\tunread = \"[#]\"\n\t\t\t}\n\t\t\tfmt.Println(unread + shared + v.Description)\n\t\t\tfmt.Println(v.Href)\n\t\t\tif v.Extended != \"\" {\n\t\t\t\tfmt.Println(v.Extended)\n\t\t\t}\n\t\t\tfmt.Println(v.Tags, \"\\n\")\n\t\t}\n\t} else {\n\t\tfor _, v := range recent.Posts {\n\t\t\tfmt.Println(v.Href)\n\t\t}\n\t}\n}\n\n\/\/ runCmd takes a command string, initialises a new pinboard post and\n\/\/ runs the command.\nfunc runCmd(cmd string) {\n\tvar p pinboard.Post\n\tp.Token = token\n\n\tif cmd == \"help\" {\n\t\tfmt.Printf(\"%s\", usage)\n\t}\n\n\tif cmd == \"ls\" {\n\t\tShow(p)\n\t}\n\n\tif cmd == \"add\" {\n\t\tAdd(p)\n\t}\n\n\tif cmd == \"rm\" {\n\t\tDelete(p)\n\t}\n}\n\n\/\/ start takes a slice of commands, parses flag arguments and runs the\n\/\/ command if it's found.\nfunc start(cmds []string) {\n\tflag.Parse()\n\tif flag.NArg() < 1 {\n\t\tfmt.Fprintf(os.Stderr, \"pin: no command is given.\\n\")\n\t\treturn\n\t}\n\n\tcmdName := flag.Arg(0)\n\n\tvar found bool\n\tfor _, cmd := range cmds {\n\t\tif cmdName == cmd {\n\t\t\trunCmd(cmd)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif !found {\n\t\tfmt.Fprintf(os.Stderr, \"pin: command %s not found.\\n\", cmdName)\n\t\treturn\n\t}\n}\n\n\/\/ TokenIsSet will check to make sure an authentication token is set before\n\/\/ making any API calls.\nfunc TokenIsSet() bool {\n\tif token == \"\" {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc init() {\n\tu, err := user.Current()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tcontent, err := ioutil.ReadFile(u.HomeDir + \"\/.pinboard\")\n\tif err != nil {\n\t\tfmt.Println(\"No authorization token found. Please add your authorization token to ~\/.pinboard\")\n\t}\n\n\ttoken = string(content)\n}\n\nfunc main() {\n\tif !TokenIsSet() {\n\t\treturn\n\t}\n\n\tcmds := []string{\"help\", \"add\", \"rm\", \"ls\"}\n\n\tstart(cmds)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage etcdserver\n\nimport (\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/Godeps\/_workspace\/src\/github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/coreos\/etcd\/pkg\/runtime\"\n)\n\nvar (\n\t\/\/ TODO: with label in v3?\n\tproposeDurations = prometheus.NewSummary(prometheus.SummaryOpts{\n\t\tNamespace: \"etcd\",\n\t\tSubsystem: \"server\",\n\t\tName: \"proposal_durations_milliseconds\",\n\t\tHelp: \"The latency distributions of committing proposal.\",\n\t})\n\tproposePending = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: \"etcd\",\n\t\tSubsystem: \"server\",\n\t\tName: \"pending_proposal_total\",\n\t\tHelp: \"The total number of pending proposals.\",\n\t})\n\t\/\/ This is number of proposal failed in client's view.\n\t\/\/ The proposal might be later got committed in raft.\n\tproposeFailed = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: \"etcd\",\n\t\tSubsystem: \"server\",\n\t\tName: \"proposal_failed_total\",\n\t\tHelp: \"The total number of failed proposals.\",\n\t})\n\n\tfileDescriptorUsed = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: \"etcd\",\n\t\tSubsystem: \"server\",\n\t\tName: \"file_descriptors_used_totol\",\n\t\tHelp: \"The total number of file descriptors used.\",\n\t})\n)\n\nfunc init() {\n\tprometheus.MustRegister(proposeDurations)\n\tprometheus.MustRegister(proposePending)\n\tprometheus.MustRegister(proposeFailed)\n\tprometheus.MustRegister(fileDescriptorUsed)\n}\n\nfunc monitorFileDescriptor(done <-chan struct{}) {\n\tticker := time.NewTicker(5 * time.Second)\n\tdefer ticker.Stop()\n\tfor {\n\t\tused, err := runtime.FDUsage()\n\t\tif err != nil {\n\t\t\tplog.Errorf(\"cannot monitor file descriptor usage (%v)\", err)\n\t\t\treturn\n\t\t}\n\t\tfileDescriptorUsed.Set(float64(used))\n\t\tlimit, err := runtime.FDLimit()\n\t\tif err != nil {\n\t\t\tplog.Errorf(\"cannot monitor file descriptor usage (%v)\", err)\n\t\t\treturn\n\t\t}\n\t\tif used >= limit\/5*4 {\n\t\t\tplog.Warningf(\"80%% of the file descriptor limit is used [used = %d, limit = %d]\", used, limit)\n\t\t}\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\tcase <-done:\n\t\t\treturn\n\t\t}\n\t}\n}\n<commit_msg>etcdserver: fix typo in metrics.go<commit_after>\/\/ Copyright 2015 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage etcdserver\n\nimport (\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/Godeps\/_workspace\/src\/github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/coreos\/etcd\/pkg\/runtime\"\n)\n\nvar (\n\t\/\/ TODO: with label in v3?\n\tproposeDurations = prometheus.NewSummary(prometheus.SummaryOpts{\n\t\tNamespace: \"etcd\",\n\t\tSubsystem: \"server\",\n\t\tName: \"proposal_durations_milliseconds\",\n\t\tHelp: \"The latency distributions of committing proposal.\",\n\t})\n\tproposePending = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: \"etcd\",\n\t\tSubsystem: \"server\",\n\t\tName: \"pending_proposal_total\",\n\t\tHelp: \"The total number of pending proposals.\",\n\t})\n\t\/\/ This is number of proposal failed in client's view.\n\t\/\/ The proposal might be later got committed in raft.\n\tproposeFailed = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: \"etcd\",\n\t\tSubsystem: \"server\",\n\t\tName: \"proposal_failed_total\",\n\t\tHelp: \"The total number of failed proposals.\",\n\t})\n\n\tfileDescriptorUsed = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: \"etcd\",\n\t\tSubsystem: \"server\",\n\t\tName: \"file_descriptors_used_total\",\n\t\tHelp: \"The total number of file descriptors used.\",\n\t})\n)\n\nfunc init() {\n\tprometheus.MustRegister(proposeDurations)\n\tprometheus.MustRegister(proposePending)\n\tprometheus.MustRegister(proposeFailed)\n\tprometheus.MustRegister(fileDescriptorUsed)\n}\n\nfunc monitorFileDescriptor(done <-chan struct{}) {\n\tticker := time.NewTicker(5 * time.Second)\n\tdefer ticker.Stop()\n\tfor {\n\t\tused, err := runtime.FDUsage()\n\t\tif err != nil {\n\t\t\tplog.Errorf(\"cannot monitor file descriptor usage (%v)\", err)\n\t\t\treturn\n\t\t}\n\t\tfileDescriptorUsed.Set(float64(used))\n\t\tlimit, err := runtime.FDLimit()\n\t\tif err != nil {\n\t\t\tplog.Errorf(\"cannot monitor file descriptor usage (%v)\", err)\n\t\t\treturn\n\t\t}\n\t\tif used >= limit\/5*4 {\n\t\t\tplog.Warningf(\"80%% of the file descriptor limit is used [used = %d, limit = %d]\", used, limit)\n\t\t}\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\tcase <-done:\n\t\t\treturn\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>apply goimports<commit_after><|endoftext|>"} {"text":"<commit_before>package messagerouter\n\nimport (\n\t\"fmt\"\n\t\"github.com\/nlopes\/slack\"\n\t\"strings\"\n)\n\ntype ResponseWriter interface {\n\tWrite(string) error\n\tWriteChannel(string, string) error\n\tWriteError(string) error\n}\n\ntype SlackResponseWriter struct {\n\trtm *slack.RTM\n\tmsg *slack.Msg\n}\n\nfunc NewSlackResponseWriter(msg *slack.Msg, rtm *slack.RTM) *SlackResponseWriter {\n\tw := SlackResponseWriter{}\n\tw.msg = msg\n\tw.rtm = rtm\n\n\treturn &w\n}\n\n\/\/Msg sets the SlackResponseWriter Message\nfunc (w *SlackResponseWriter) Msg(msg *slack.Msg) {\n\tw.msg = msg\n}\n\n\/\/Msg sets the SlackResponseWriter RTM\nfunc (w *SlackResponseWriter) Rtm(rtm *slack.RTM) {\n\tw.rtm = rtm\n}\n\n\/\/ WriteChannel sends message to particular channel\nfunc (w *SlackResponseWriter) WriteChannel(channel string, text string) error {\n\tw.rtm.SendMessage(w.rtm.NewOutgoingMessage(text, w.msg.Channel))\n\treturn nil\n}\n\n\/\/ Write writes to Slack\nfunc (w *SlackResponseWriter) Write(text string) error {\n\tw.rtm.SendMessage(w.rtm.NewOutgoingMessage(text, w.msg.Channel))\n\n\treturn nil\n}\n\n\/\/ WriteError writes an error to Slack\nfunc (w *SlackResponseWriter) WriteError(errText string) error {\n\tlines := strings.Split(errText, \"\\n\")\n\tfor _, value := range lines {\n\t\tw.Write(\":poop: \" + value + \" :poop:\")\n\t}\n\treturn nil\n}\n\n\/\/CLIResponseWriter handles writing to the command line\ntype CLIResponseWriter struct {\n}\n\n\/\/ Write writes to CLI, prints channel it would go to if via Slack\nfunc (w *CLIResponseWriter) WriteChannel(channel string, text string) error {\n\tfmt.Println(\">> \"+channel, text)\n\n\treturn nil\n}\n\n\/\/ Write writes to CLI\nfunc (w *CLIResponseWriter) Write(text string) error {\n\tfmt.Println(\">> \", text)\n\n\treturn nil\n}\n\n\/\/ WriteError writes error to CLI\nfunc (w *CLIResponseWriter) WriteError(errText string) error {\n\treturn w.Write(\":poop: \" + errText + \" :poop:\")\n}\n<commit_msg>Fix WriteChannel to actually send to the slack chan<commit_after>package messagerouter\n\nimport (\n\t\"fmt\"\n\t\"github.com\/nlopes\/slack\"\n\t\"strings\"\n)\n\ntype ResponseWriter interface {\n\tWrite(string) error\n\tWriteChannel(string, string) error\n\tWriteError(string) error\n}\n\ntype SlackResponseWriter struct {\n\trtm *slack.RTM\n\tmsg *slack.Msg\n}\n\nfunc NewSlackResponseWriter(msg *slack.Msg, rtm *slack.RTM) *SlackResponseWriter {\n\tw := SlackResponseWriter{}\n\tw.msg = msg\n\tw.rtm = rtm\n\n\treturn &w\n}\n\n\/\/Msg sets the SlackResponseWriter Message\nfunc (w *SlackResponseWriter) Msg(msg *slack.Msg) {\n\tw.msg = msg\n}\n\n\/\/Msg sets the SlackResponseWriter RTM\nfunc (w *SlackResponseWriter) Rtm(rtm *slack.RTM) {\n\tw.rtm = rtm\n}\n\n\/\/ WriteChannel sends message to particular channel\nfunc (w *SlackResponseWriter) WriteChannel(channel string, text string) error {\n\tw.rtm.SendMessage(w.rtm.NewOutgoingMessage(text, channel))\n\treturn nil\n}\n\n\/\/ Write writes to Slack\nfunc (w *SlackResponseWriter) Write(text string) error {\n\tw.rtm.SendMessage(w.rtm.NewOutgoingMessage(text, w.msg.Channel))\n\n\treturn nil\n}\n\n\/\/ WriteError writes an error to Slack\nfunc (w *SlackResponseWriter) WriteError(errText string) error {\n\tlines := strings.Split(errText, \"\\n\")\n\tfor _, value := range lines {\n\t\tw.Write(\":poop: \" + value + \" :poop:\")\n\t}\n\treturn nil\n}\n\n\/\/CLIResponseWriter handles writing to the command line\ntype CLIResponseWriter struct {\n}\n\n\/\/ Write writes to CLI, prints channel it would go to if via Slack\nfunc (w *CLIResponseWriter) WriteChannel(channel string, text string) error {\n\tfmt.Println(\">> \"+channel, text)\n\n\treturn nil\n}\n\n\/\/ Write writes to CLI\nfunc (w *CLIResponseWriter) Write(text string) error {\n\tfmt.Println(\">> \", text)\n\n\treturn nil\n}\n\n\/\/ WriteError writes error to CLI\nfunc (w *CLIResponseWriter) WriteError(errText string) error {\n\treturn w.Write(\":poop: \" + errText + \" :poop:\")\n}\n<|endoftext|>"} {"text":"<commit_before>package controllers\n\nimport (\n\t\"bytes\"\n\t\"github.com\/airdispatch\/dpl\"\n\t\"github.com\/robfig\/revel\"\n\t\"html\/template\"\n\t\"melange\/app\/models\"\n\t\"melange\/app\/routes\"\n\t\"melange\/mailserver\"\n\t\"net\/http\"\n\t\"time\"\n)\n\ntype Dispatch struct {\n\tGorpController\n}\n\nfunc (c Dispatch) Init() revel.Result {\n\tif c.Session[\"user\"] == \"\" {\n\t\treturn c.Redirect(routes.App.Login())\n\t}\n\n\t\/\/ Function to load user apps\n\tapps, err := models.GetUserApps(c.Txn, c.Session)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tc.RenderArgs[\"apps\"] = apps\n\n\tmailserver.InitRouter()\n\n\tc.RenderArgs[\"news\"] = func(m dpl.Message) template.HTML {\n\t\treturn \"hello\"\n\t}\n\n\treturn nil\n}\n\nfunc (d Dispatch) Dashboard() revel.Result {\n\t\/\/ Download all recents from subscribed, download all recents from alerts, sort them chronologically\n\tu, err := d.Txn.Get(&models.User{}, GetUserId(d.Session))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tid, err := models.UserIdentities(u.(*models.User), d.Txn)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(id) == 0 {\n\t\tpanic(\"Not enough IDs\")\n\t}\n\n\tmsgs, err := mailserver.Messages(mailserver.LookupRouter,\n\t\td.Txn,\n\t\tid[0],\n\t\tu.(*models.User),\n\t\ttrue, true, true,\n\t\ttime.Now().Add(-7*24*time.Hour).Unix())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar apps []*models.UserApp\n\t_, err = d.Txn.Select(&apps, \"select * from dispatch_app where userid = $1\", GetUserId(d.Session))\n\tvar loadedApps []*dpl.PluginInstance = make([]*dpl.PluginInstance, len(apps))\n\tfor i, v := range apps {\n\t\tresp, err := http.Get(v.AppURL)\n\t\tif err != nil {\n\t\t\t\/\/ Couldn't load that application\n\t\t\tresp.Body.Close()\n\t\t\tcontinue\n\t\t}\n\t\to, err := dpl.ParseDPLStream(resp.Body)\n\t\tif err != nil {\n\t\t\t\/\/ Couldn't parse that application\n\t\t\tresp.Body.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\tplugin := o.CreateInstance(&PluginHost{\n\t\t\tUser: u.(*models.User),\n\t\t\tTxn: d.Txn,\n\t\t\tR: mailserver.LookupRouter,\n\t\t}, nil)\n\t\tloadedApps[i] = plugin\n\t\tresp.Body.Close()\n\t}\n\n\ttype DisplayMessage struct {\n\t\tmailserver.MelangeMessage\n\t\tDisplay template.HTML\n\t}\n\trecents := make([]DisplayMessage, 0)\n\tfor _, v := range msgs {\n\t\trendered := false\n\t\tfor _, a := range loadedApps {\n\t\t\tfor _, t := range a.Tag {\n\t\t\t\tif t.FeedAction != \"\" {\n\t\t\t\t\tmatch := true\n\t\t\t\t\tfor _, f := range t.Fields {\n\t\t\t\t\t\tif !v.Has(f.Name) && !f.Optional {\n\t\t\t\t\t\t\tmatch = false\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif match {\n\t\t\t\t\t\t\/\/ Do Something\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif !rendered {\n\t\t\tt, _ := template.New(\"\").Parse(`{{ range .Components }}<p><strong>{{ .Key }}<\/strong> {{ .String }}<\/p>{{ end }}`)\n\t\t\tvar b bytes.Buffer\n\t\t\tt.Execute(&b, v)\n\t\t\trecents = append(recents, DisplayMessage{v, template.HTML(b.String())})\n\t\t}\n\t}\n\n\treturn d.Render(recents)\n}\n\nfunc (d Dispatch) Profile() revel.Result {\n\tuser, err := d.Txn.Get(&models.User{}, GetUserId(d.Session))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tid, err := models.UserIdentities(user.(*models.User), d.Txn)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tmessages, err := mailserver.Messages(mailserver.LookupRouter,\n\t\td.Txn,\n\t\tid[0],\n\t\tuser.(*models.User),\n\t\tfalse, false, true,\n\t\ttime.Now().Add(-7*24*time.Hour).Unix())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn d.Render(messages, user)\n}\n\nfunc (d Dispatch) All() revel.Result {\n\tu, err := d.Txn.Get(&models.User{}, GetUserId(d.Session))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tid, err := models.UserIdentities(u.(*models.User), d.Txn)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(id) == 0 {\n\t\tpanic(\"Not enough IDs\")\n\t}\n\n\trecents, err := mailserver.Messages(mailserver.LookupRouter,\n\t\td.Txn,\n\t\tid[0],\n\t\tu.(*models.User),\n\t\ttrue, true, true,\n\t\t0)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn d.Render(recents)\n}\n\nfunc (d Dispatch) Applications() revel.Result {\n\treturn d.Render()\n}\n\nfunc (d Dispatch) AddApplication(url string) revel.Result {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\td.Flash.Error(\"Unable to download application via supplied url.\")\n\t\treturn d.Redirect(routes.Dispatch.Applications())\n\t}\n\tdefer resp.Body.Close()\n\n\tplugin, err := dpl.ParseDPLStream(resp.Body)\n\tif err != nil {\n\t\td.Flash.Error(\"Plugin is not valid.\")\n\t\treturn d.Redirect(routes.Dispatch.Applications())\n\t}\n\n\tin := &models.UserApp{\n\t\tUserId: GetUserId(d.Session),\n\t\tAppURL: url,\n\t\tName: plugin.Name,\n\t\tPath: plugin.Path,\n\t}\n\terr = d.Txn.Insert(in)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn d.Redirect(routes.Loader.LoadAppDefault(plugin.Name))\n}\n\nfunc (d Dispatch) UninstallApplication(app string) revel.Result {\n\tu := GetUserId(d.Session)\n\n\tvar apps []*models.UserApp\n\t_, err := d.Txn.Select(&apps, \"select * from dispatch_app where userid = $1 and UPPER(name) = UPPER($2)\", u, app)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(apps) != 1 {\n\t\tpanic(len(apps))\n\t}\n\n\ttoDelete := apps[0]\n\tcount, err := d.Txn.Delete(toDelete)\n\tif err != nil || count != 1 {\n\t\tpanic(err)\n\t}\n\n\treturn d.Redirect(routes.Dispatch.Applications())\n}\n\nfunc (c Dispatch) Account() revel.Result {\n\tif c.Session[\"user\"] == \"\" {\n\t\treturn c.Redirect(routes.App.Login())\n\t}\n\tvar users []*models.User\n\n\t_, err := c.Txn.Select(&users, \"select * from dispatch_user where userid = $1\", GetUserId(c.Session))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tuser := users[0]\n\n\tvar subscriptions []*models.UserSubscription\n\t_, err = c.Txn.Select(&subscriptions, \"select * from dispatch_subscription where userid = $1\", user.UserId)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar identities []*models.Identity\n\t_, err = c.Txn.Select(&identities, \"select * from dispatch_identity where userid = $1\", user.UserId)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn c.Render(user, subscriptions, identities)\n}\n\nfunc (c Dispatch) AddSubscription(address string) revel.Result {\n\t\/\/ TODO: Verify the Address Somehow...\n\tmailserver.InitRouter()\n\t_, err := mailserver.LookupRouter.LookupAlias(address)\n\tif err != nil {\n\t\tc.Flash.Error(\"Unable to find that address. It has not been added.\")\n\t\treturn c.Redirect(routes.Dispatch.Account())\n\t}\n\n\tuser := GetUserId(c.Session)\n\tnewSub := &models.UserSubscription{\n\t\tUserId: user,\n\t\tAddress: address,\n\t}\n\n\terr = c.Txn.Insert(newSub)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn c.Redirect(routes.Dispatch.Account())\n}\n\nfunc (d Dispatch) RemoveSubscription(id int) revel.Result {\n\tu := GetUserId(d.Session)\n\n\tvar apps []*models.UserSubscription\n\t_, err := d.Txn.Select(&apps, \"select * from dispatch_subscription where subscriptionid = $1 and userid = $2\", id, u)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(apps) != 1 {\n\t\tpanic(len(apps))\n\t}\n\n\ttoDelete := apps[0]\n\tcount, err := d.Txn.Delete(toDelete)\n\tif err != nil || count != 1 {\n\t\tpanic(err)\n\t}\n\n\treturn d.Redirect(routes.Dispatch.Account())\n}\n\nfunc (d Dispatch) RegisterIdentity(id int) revel.Result {\n\tu := GetUserId(d.Session)\n\n\tvar ids []*models.Identity\n\t_, err := d.Txn.Select(&ids, \"select * from dispatch_identity where identityid = $1 and userid = $2\", id, u)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(ids) != 1 {\n\t\tpanic(len(ids))\n\t}\n\n\ttoRegister := ids[0]\n\n\tmailserver.InitRouter()\n\tdid, err := toRegister.ToDispatch()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdid.SetLocation(revel.Config.StringDefault(\"server.location\", \"\"))\n\n\terr = mailserver.RegistrationRouter.Register(did, toRegister.Alias)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn d.Redirect(routes.Dispatch.Account())\n}\n\nfunc (d Dispatch) ProcessAccount(name string, username string, password1 string, password2 string, password string) revel.Result {\n\tu, err := d.Txn.Get(&models.User{}, GetUserId(d.Session))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tuser := u.(*models.User)\n\tuser.Name = name\n\n\tif user.VerifyPassword(password) {\n\t\tif password1 == password2 {\n\t\t\tuser.UpdatePassword(password1)\n\t\t} else {\n\t\t\td.Flash.Error(\"New passwords do not match.\")\n\t\t}\n\t} else {\n\t\tif password != \"\" {\n\t\t\td.Flash.Error(\"Current Password is not correct. Did not update username or password.\")\n\t\t}\n\t}\n\n\tuser.Save(d.Txn)\n\treturn d.Redirect(routes.Dispatch.Account())\n}\n<commit_msg>Fixed Error with Response Object<commit_after>package controllers\n\nimport (\n\t\"bytes\"\n\t\"github.com\/airdispatch\/dpl\"\n\t\"github.com\/robfig\/revel\"\n\t\"html\/template\"\n\t\"melange\/app\/models\"\n\t\"melange\/app\/routes\"\n\t\"melange\/mailserver\"\n\t\"net\/http\"\n\t\"time\"\n)\n\ntype Dispatch struct {\n\tGorpController\n}\n\nfunc (c Dispatch) Init() revel.Result {\n\tif c.Session[\"user\"] == \"\" {\n\t\treturn c.Redirect(routes.App.Login())\n\t}\n\n\t\/\/ Function to load user apps\n\tapps, err := models.GetUserApps(c.Txn, c.Session)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tc.RenderArgs[\"apps\"] = apps\n\n\tmailserver.InitRouter()\n\n\tc.RenderArgs[\"news\"] = func(m dpl.Message) template.HTML {\n\t\treturn \"hello\"\n\t}\n\n\treturn nil\n}\n\nfunc (d Dispatch) Dashboard() revel.Result {\n\t\/\/ Download all recents from subscribed, download all recents from alerts, sort them chronologically\n\tu, err := d.Txn.Get(&models.User{}, GetUserId(d.Session))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tid, err := models.UserIdentities(u.(*models.User), d.Txn)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(id) == 0 {\n\t\tpanic(\"Not enough IDs\")\n\t}\n\n\tmsgs, err := mailserver.Messages(mailserver.LookupRouter,\n\t\td.Txn,\n\t\tid[0],\n\t\tu.(*models.User),\n\t\ttrue, true, true,\n\t\ttime.Now().Add(-7*24*time.Hour).Unix())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar apps []*models.UserApp\n\t_, err = d.Txn.Select(&apps, \"select * from dispatch_app where userid = $1\", GetUserId(d.Session))\n\tvar loadedApps []*dpl.PluginInstance = make([]*dpl.PluginInstance, len(apps))\n\tfor i, v := range apps {\n\t\tresp, err := http.Get(v.AppURL)\n\t\tif err != nil {\n\t\t\t\/\/ Couldn't load that application\n\t\t\tcontinue\n\t\t}\n\t\to, err := dpl.ParseDPLStream(resp.Body)\n\t\tif err != nil {\n\t\t\t\/\/ Couldn't parse that application\n\t\t\tresp.Body.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\tplugin := o.CreateInstance(&PluginHost{\n\t\t\tUser: u.(*models.User),\n\t\t\tTxn: d.Txn,\n\t\t\tR: mailserver.LookupRouter,\n\t\t}, nil)\n\t\tloadedApps[i] = plugin\n\t\tresp.Body.Close()\n\t}\n\n\ttype DisplayMessage struct {\n\t\tmailserver.MelangeMessage\n\t\tDisplay template.HTML\n\t}\n\trecents := make([]DisplayMessage, 0)\n\tfor _, v := range msgs {\n\t\trendered := false\n\t\tfor _, a := range loadedApps {\n\t\t\tfor _, t := range a.Tag {\n\t\t\t\tif t.FeedAction != \"\" {\n\t\t\t\t\tmatch := true\n\t\t\t\t\tfor _, f := range t.Fields {\n\t\t\t\t\t\tif !v.Has(f.Name) && !f.Optional {\n\t\t\t\t\t\t\tmatch = false\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif match {\n\t\t\t\t\t\t\/\/ Do Something\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif !rendered {\n\t\t\tt, _ := template.New(\"\").Parse(`{{ range .Components }}<p><strong>{{ .Key }}<\/strong> {{ .String }}<\/p>{{ end }}`)\n\t\t\tvar b bytes.Buffer\n\t\t\tt.Execute(&b, v)\n\t\t\trecents = append(recents, DisplayMessage{v, template.HTML(b.String())})\n\t\t}\n\t}\n\n\treturn d.Render(recents)\n}\n\nfunc (d Dispatch) Profile() revel.Result {\n\tuser, err := d.Txn.Get(&models.User{}, GetUserId(d.Session))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tid, err := models.UserIdentities(user.(*models.User), d.Txn)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tmessages, err := mailserver.Messages(mailserver.LookupRouter,\n\t\td.Txn,\n\t\tid[0],\n\t\tuser.(*models.User),\n\t\tfalse, false, true,\n\t\ttime.Now().Add(-7*24*time.Hour).Unix())\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn d.Render(messages, user)\n}\n\nfunc (d Dispatch) All() revel.Result {\n\tu, err := d.Txn.Get(&models.User{}, GetUserId(d.Session))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tid, err := models.UserIdentities(u.(*models.User), d.Txn)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(id) == 0 {\n\t\tpanic(\"Not enough IDs\")\n\t}\n\n\trecents, err := mailserver.Messages(mailserver.LookupRouter,\n\t\td.Txn,\n\t\tid[0],\n\t\tu.(*models.User),\n\t\ttrue, true, true,\n\t\t0)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn d.Render(recents)\n}\n\nfunc (d Dispatch) Applications() revel.Result {\n\treturn d.Render()\n}\n\nfunc (d Dispatch) AddApplication(url string) revel.Result {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\td.Flash.Error(\"Unable to download application via supplied url.\")\n\t\treturn d.Redirect(routes.Dispatch.Applications())\n\t}\n\tdefer resp.Body.Close()\n\n\tplugin, err := dpl.ParseDPLStream(resp.Body)\n\tif err != nil {\n\t\td.Flash.Error(\"Plugin is not valid.\")\n\t\treturn d.Redirect(routes.Dispatch.Applications())\n\t}\n\n\tin := &models.UserApp{\n\t\tUserId: GetUserId(d.Session),\n\t\tAppURL: url,\n\t\tName: plugin.Name,\n\t\tPath: plugin.Path,\n\t}\n\terr = d.Txn.Insert(in)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn d.Redirect(routes.Loader.LoadAppDefault(plugin.Name))\n}\n\nfunc (d Dispatch) UninstallApplication(app string) revel.Result {\n\tu := GetUserId(d.Session)\n\n\tvar apps []*models.UserApp\n\t_, err := d.Txn.Select(&apps, \"select * from dispatch_app where userid = $1 and UPPER(name) = UPPER($2)\", u, app)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(apps) != 1 {\n\t\tpanic(len(apps))\n\t}\n\n\ttoDelete := apps[0]\n\tcount, err := d.Txn.Delete(toDelete)\n\tif err != nil || count != 1 {\n\t\tpanic(err)\n\t}\n\n\treturn d.Redirect(routes.Dispatch.Applications())\n}\n\nfunc (c Dispatch) Account() revel.Result {\n\tif c.Session[\"user\"] == \"\" {\n\t\treturn c.Redirect(routes.App.Login())\n\t}\n\tvar users []*models.User\n\n\t_, err := c.Txn.Select(&users, \"select * from dispatch_user where userid = $1\", GetUserId(c.Session))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tuser := users[0]\n\n\tvar subscriptions []*models.UserSubscription\n\t_, err = c.Txn.Select(&subscriptions, \"select * from dispatch_subscription where userid = $1\", user.UserId)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar identities []*models.Identity\n\t_, err = c.Txn.Select(&identities, \"select * from dispatch_identity where userid = $1\", user.UserId)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn c.Render(user, subscriptions, identities)\n}\n\nfunc (c Dispatch) AddSubscription(address string) revel.Result {\n\t\/\/ TODO: Verify the Address Somehow...\n\tmailserver.InitRouter()\n\t_, err := mailserver.LookupRouter.LookupAlias(address)\n\tif err != nil {\n\t\tc.Flash.Error(\"Unable to find that address. It has not been added.\")\n\t\treturn c.Redirect(routes.Dispatch.Account())\n\t}\n\n\tuser := GetUserId(c.Session)\n\tnewSub := &models.UserSubscription{\n\t\tUserId: user,\n\t\tAddress: address,\n\t}\n\n\terr = c.Txn.Insert(newSub)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn c.Redirect(routes.Dispatch.Account())\n}\n\nfunc (d Dispatch) RemoveSubscription(id int) revel.Result {\n\tu := GetUserId(d.Session)\n\n\tvar apps []*models.UserSubscription\n\t_, err := d.Txn.Select(&apps, \"select * from dispatch_subscription where subscriptionid = $1 and userid = $2\", id, u)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(apps) != 1 {\n\t\tpanic(len(apps))\n\t}\n\n\ttoDelete := apps[0]\n\tcount, err := d.Txn.Delete(toDelete)\n\tif err != nil || count != 1 {\n\t\tpanic(err)\n\t}\n\n\treturn d.Redirect(routes.Dispatch.Account())\n}\n\nfunc (d Dispatch) RegisterIdentity(id int) revel.Result {\n\tu := GetUserId(d.Session)\n\n\tvar ids []*models.Identity\n\t_, err := d.Txn.Select(&ids, \"select * from dispatch_identity where identityid = $1 and userid = $2\", id, u)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif len(ids) != 1 {\n\t\tpanic(len(ids))\n\t}\n\n\ttoRegister := ids[0]\n\n\tmailserver.InitRouter()\n\tdid, err := toRegister.ToDispatch()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdid.SetLocation(revel.Config.StringDefault(\"server.location\", \"\"))\n\n\terr = mailserver.RegistrationRouter.Register(did, toRegister.Alias)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn d.Redirect(routes.Dispatch.Account())\n}\n\nfunc (d Dispatch) ProcessAccount(name string, username string, password1 string, password2 string, password string) revel.Result {\n\tu, err := d.Txn.Get(&models.User{}, GetUserId(d.Session))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tuser := u.(*models.User)\n\tuser.Name = name\n\n\tif user.VerifyPassword(password) {\n\t\tif password1 == password2 {\n\t\t\tuser.UpdatePassword(password1)\n\t\t} else {\n\t\t\td.Flash.Error(\"New passwords do not match.\")\n\t\t}\n\t} else {\n\t\tif password != \"\" {\n\t\t\td.Flash.Error(\"Current Password is not correct. Did not update username or password.\")\n\t\t}\n\t}\n\n\tuser.Save(d.Txn)\n\treturn d.Redirect(routes.Dispatch.Account())\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 <chaishushan{AT}gmail.com>. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage webp\n\nimport (\n\t\"image\"\n\t\"image\/color\"\n)\n\nvar (\n\t_ color.Color = (*RGBColor)(nil)\n\t_ image.Image = (*RGBImage)(nil)\n)\n\nvar RGBModel color.Model = color.ModelFunc(rgbModel)\n\ntype RGBColor struct {\n\tR, G, B uint8\n}\n\nfunc (c RGBColor) RGBA() (r, g, b, a uint32) {\n\tr = uint32(c.R)\n\tr |= r << 8\n\tg = uint32(c.G)\n\tg |= g << 8\n\tb = uint32(c.B)\n\tb |= b << 8\n\ta = 0xffff\n\treturn\n}\n\nfunc rgbModel(c color.Color) color.Color {\n\tif _, ok := c.(RGBColor); ok {\n\t\treturn c\n\t}\n\tr, g, b, _ := c.RGBA()\n\treturn RGBColor{R: uint8(r >> 8), G: uint8(g >> 8), B: uint8(b >> 8)}\n}\n\ntype RGBImage struct {\n\tPix []uint8\n\tStride int\n\tRect image.Rectangle\n}\n\nfunc (p *RGBImage) ColorModel() color.Model { return color.RGBAModel }\n\nfunc (p *RGBImage) Bounds() image.Rectangle { return p.Rect }\n\nfunc (p *RGBImage) At(x, y int) color.Color {\n\treturn p.RGBAt(x, y)\n}\n\nfunc (p *RGBImage) RGBAt(x, y int) RGBColor {\n\tif !(image.Point{x, y}.In(p.Rect)) {\n\t\treturn RGBColor{}\n\t}\n\ti := p.PixOffset(x, y)\n\treturn RGBColor{\n\t\tR: p.Pix[i+0],\n\t\tG: p.Pix[i+1],\n\t\tB: p.Pix[i+2],\n\t}\n}\n\n\/\/ PixOffset returns the index of the first element of Pix that corresponds to\n\/\/ the pixel at (x, y).\nfunc (p *RGBImage) PixOffset(x, y int) int {\n\treturn (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*3\n}\n\nfunc (p *RGBImage) Set(x, y int, c color.Color) {\n\tif !(image.Point{x, y}.In(p.Rect)) {\n\t\treturn\n\t}\n\ti := p.PixOffset(x, y)\n\tc1 := RGBModel.Convert(c).(RGBColor)\n\tp.Pix[i+0] = c1.R\n\tp.Pix[i+1] = c1.G\n\tp.Pix[i+2] = c1.B\n\treturn\n}\n\nfunc (p *RGBImage) SetRGB(x, y int, c RGBColor) {\n\tif !(image.Point{x, y}.In(p.Rect)) {\n\t\treturn\n\t}\n\ti := p.PixOffset(x, y)\n\tp.Pix[i+0] = c.R\n\tp.Pix[i+1] = c.G\n\tp.Pix[i+2] = c.B\n\treturn\n}\n\n\/\/ SubImage returns an image representing the portion of the image p visible\n\/\/ through r. The returned value shares pixels with the original image.\nfunc (p *RGBImage) SubImage(r image.Rectangle) image.Image {\n\tr = r.Intersect(p.Rect)\n\t\/\/ If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside\n\t\/\/ either r1 or r2 if the intersection is empty. Without explicitly checking for\n\t\/\/ this, the Pix[i:] expression below can panic.\n\tif r.Empty() {\n\t\treturn &RGBImage{}\n\t}\n\ti := p.PixOffset(r.Min.X, r.Min.Y)\n\treturn &RGBImage{\n\t\tPix: p.Pix[i:],\n\t\tStride: p.Stride,\n\t\tRect: r,\n\t}\n}\n\n\/\/ Opaque scans the entire image and reports whether it is fully opaque.\nfunc (p *RGBImage) Opaque() bool {\n\treturn true\n}\n\n\/\/ NewRGBImage returns a new RGBImage with the given bounds.\nfunc NewRGBImage(r image.Rectangle) *RGBImage {\n\tw, h := r.Dx(), r.Dy()\n\tpix := make([]uint8, 3*w*h)\n\treturn &RGBImage{\n\t\tPix: pix,\n\t\tStride: 3 * w,\n\t\tRect: r,\n\t}\n}\n\nfunc NewRGBImageFrom(m image.Image) *RGBImage {\n\tif m, ok := m.(*RGBImage); ok {\n\t\treturn m\n\t}\n\n\t\/\/ convert to RGBImage\n\tb := m.Bounds()\n\trgb := NewRGBImage(b)\n\tfor y := b.Min.Y; y < b.Max.Y; y++ {\n\t\tfor x := b.Min.X; x < b.Max.X; x++ {\n\t\t\tpr, pg, pb, _ := m.At(x, y).RGBA()\n\t\t\trgb.SetRGB(x, y, RGBColor{\n\t\t\t\tR: uint8(pr >> 8),\n\t\t\t\tG: uint8(pg >> 8),\n\t\t\t\tB: uint8(pb >> 8),\n\t\t\t})\n\t\t}\n\t}\n\treturn rgb\n}\n<commit_msg>sync RGBImage type, see https:\/\/github.com\/chai2010\/image<commit_after>\/\/ Copyright 2014 <chaishushan{AT}gmail.com>. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage webp\n\nimport (\n\t\"image\"\n\t\"image\/color\"\n)\n\nvar (\n\t_ RGBImager = (*RGBImage)(nil)\n\t_ image.Image = (*RGBImage)(nil)\n)\n\ntype RGBImager interface {\n\timage.Image\n\tRGBAt(x, y int) [3]uint8\n\tSetRGB(x, y int, c [3]uint8)\n}\n\ntype RGBImage struct {\n\tPix []uint8\n\tStride int\n\tRect image.Rectangle\n}\n\nfunc (p *RGBImage) ColorModel() color.Model { return color.RGBAModel }\n\nfunc (p *RGBImage) Bounds() image.Rectangle { return p.Rect }\n\nfunc (p *RGBImage) At(x, y int) color.Color {\n\tif !(image.Point{x, y}.In(p.Rect)) {\n\t\treturn color.RGBA{}\n\t}\n\ti := p.PixOffset(x, y)\n\treturn color.RGBA{\n\t\tR: p.Pix[i+0],\n\t\tG: p.Pix[i+1],\n\t\tB: p.Pix[i+2],\n\t\tA: 0xff,\n\t}\n}\n\nfunc (p *RGBImage) RGBAt(x, y int) [3]uint8 {\n\tif !(image.Point{x, y}.In(p.Rect)) {\n\t\treturn [3]uint8{}\n\t}\n\ti := p.PixOffset(x, y)\n\treturn [3]uint8{\n\t\tp.Pix[i+0],\n\t\tp.Pix[i+1],\n\t\tp.Pix[i+2],\n\t}\n}\n\n\/\/ PixOffset returns the index of the first element of Pix that corresponds to\n\/\/ the pixel at (x, y).\nfunc (p *RGBImage) PixOffset(x, y int) int {\n\treturn (y-p.Rect.Min.Y)*p.Stride + (x-p.Rect.Min.X)*3\n}\n\nfunc (p *RGBImage) Set(x, y int, c color.Color) {\n\tif !(image.Point{x, y}.In(p.Rect)) {\n\t\treturn\n\t}\n\ti := p.PixOffset(x, y)\n\tc1 := color.RGBAModel.Convert(c).(color.RGBA)\n\tp.Pix[i+0] = c1.R\n\tp.Pix[i+1] = c1.G\n\tp.Pix[i+2] = c1.B\n\treturn\n}\n\nfunc (p *RGBImage) SetRGB(x, y int, c [3]uint8) {\n\tif !(image.Point{x, y}.In(p.Rect)) {\n\t\treturn\n\t}\n\ti := p.PixOffset(x, y)\n\tp.Pix[i+0] = c[0]\n\tp.Pix[i+1] = c[1]\n\tp.Pix[i+2] = c[2]\n\treturn\n}\n\n\/\/ SubImage returns an image representing the portion of the image p visible\n\/\/ through r. The returned value shares pixels with the original image.\nfunc (p *RGBImage) SubImage(r image.Rectangle) image.Image {\n\tr = r.Intersect(p.Rect)\n\t\/\/ If r1 and r2 are Rectangles, r1.Intersect(r2) is not guaranteed to be inside\n\t\/\/ either r1 or r2 if the intersection is empty. Without explicitly checking for\n\t\/\/ this, the Pix[i:] expression below can panic.\n\tif r.Empty() {\n\t\treturn &RGBImage{}\n\t}\n\ti := p.PixOffset(r.Min.X, r.Min.Y)\n\treturn &RGBImage{\n\t\tPix: p.Pix[i:],\n\t\tStride: p.Stride,\n\t\tRect: r,\n\t}\n}\n\n\/\/ Opaque scans the entire image and reports whether it is fully opaque.\nfunc (p *RGBImage) Opaque() bool {\n\treturn true\n}\n\n\/\/ NewRGBImage returns a new RGBImage with the given bounds.\nfunc NewRGBImage(r image.Rectangle) *RGBImage {\n\tw, h := r.Dx(), r.Dy()\n\tpix := make([]uint8, 3*w*h)\n\treturn &RGBImage{\n\t\tPix: pix,\n\t\tStride: 3 * w,\n\t\tRect: r,\n\t}\n}\n\nfunc NewRGBImageFrom(m image.Image) *RGBImage {\n\tif m, ok := m.(*RGBImage); ok {\n\t\treturn m\n\t}\n\n\t\/\/ convert to RGBImage\n\tb := m.Bounds()\n\trgb := NewRGBImage(b)\n\tfor y := b.Min.Y; y < b.Max.Y; y++ {\n\t\tfor x := b.Min.X; x < b.Max.X; x++ {\n\t\t\tpr, pg, pb, _ := m.At(x, y).RGBA()\n\t\t\trgb.SetRGB(x, y, [3]uint8{\n\t\t\t\tuint8(pr >> 8),\n\t\t\t\tuint8(pg >> 8),\n\t\t\t\tuint8(pb >> 8),\n\t\t\t})\n\t\t}\n\t}\n\treturn rgb\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto\/md5\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"text\/template\"\n\t\"time\"\n\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/rancher\/go-rancher-metadata\/metadata\"\n\t\"gopkg.in\/pipe.v2\"\n)\n\nconst (\n\tDEFAULT_TEMPLATE_DIR = \"\/etc\/rancher-meta-template\/templates\"\n)\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\nfunc createTemplateCtx(meta *metadata.Client) (interface{}, error) {\n\n\tservices, err := meta.GetServices()\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"get services\")\n\t}\n\tcontainers, err := meta.GetContainers()\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"get containers\")\n\t}\n\n\tcontainersW := make([]ContainerWrap, 0)\n\tfor _, container := range containers {\n\t\tcw := ContainerWrap{container}\n\t\tcontainersW = append(containersW, cw)\n\t}\n\n\tservicesW := make([]ServiceWrap, 0)\n\tfor _, service := range services {\n\t\tsw := ServiceWrap{service}\n\t\tservicesW = append(servicesW, sw)\n\t}\n\n\tctx := map[string]interface{}{\n\t\t\"Services\": servicesW,\n\t\t\"Containers\": containersW,\n\t}\n\n\treturn ctx, nil\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\nfunc computeMd5(filePath string) (string, error) {\n\tif _, err := os.Stat(filePath); err != nil {\n\t\treturn \"\", nil\n\t}\n\n\tfile, err := os.Open(filePath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer file.Close()\n\n\thash := md5.New()\n\tif _, err := io.Copy(hash, file); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"%x\", hash.Sum(nil)), nil\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\nfunc appendCommandPipe(cmd Command, pipes []pipe.Pipe) []pipe.Pipe {\n\tif cmd.Cmd != \"\" {\n\t\tif cmd.Args != nil {\n\t\t\treturn append(pipes, pipe.Exec(cmd.Cmd, cmd.Args...))\n\t\t}\n\t\treturn append(pipes, pipe.Exec(cmd.Cmd))\n\t}\n\n\treturn pipes\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\nfunc processTemplateSet(meta *metadata.Client, set TemplateSet) error {\n\n\tif _, err := os.Stat(set.TemplatePath); err != nil {\n\t\tprintWarning(\"template path %q is not available: skip\", set.TemplatePath)\n\t\treturn nil\n\t}\n\n\tbuf, err := ioutil.ReadFile(set.TemplatePath)\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"read template file\")\n\t}\n\n\ttempl := template.New(set.Name).Funcs(newFuncMap())\n\ttmpl, err := templ.Parse(string(buf))\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"parse template\")\n\t}\n\n\tlastMd5 := \"\"\n\tif _, err = os.Stat(set.DestinationPath); err == nil {\n\t\tlmd5, err := computeMd5(set.DestinationPath)\n\t\tif err != nil {\n\t\t\treturn errors.Annotate(err, \"get last md5 hash\")\n\t\t}\n\t\tlastMd5 = lmd5\n\t}\n\n\tctx, err := createTemplateCtx(meta)\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"create template context\")\n\t}\n\n\tvar newBuf bytes.Buffer\n\twr := bufio.NewWriter(&newBuf)\n\tif err := tmpl.Execute(wr, ctx); err != nil {\n\t\treturn errors.Annotate(err, \"execute template\")\n\t}\n\n\tif err := wr.Flush(); err != nil {\n\t\treturn errors.Annotate(err, \"flush tmpl writer\")\n\t}\n\n\thash := md5.New()\n\thash.Write(newBuf.Bytes())\n\tcurrentMd5 := fmt.Sprintf(\"%x\", hash.Sum(nil))\n\n\tif lastMd5 == currentMd5 {\n\t\treturn nil\n\t}\n\n\tif lastMd5 == \"\" {\n\t\tprintInfo(\"create output file\")\n\t} else {\n\t\tprintInfo(\"output file needs refresh\")\n\t\tprintInfo(\"last md5 sum is %q\", lastMd5)\n\t\tprintInfo(\"current md5 sum is %q\", currentMd5)\n\t}\n\n\tf, err := os.Create(set.DestinationPath)\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"create destination file\")\n\t}\n\tdefer f.Close()\n\n\tw := bufio.NewWriter(f)\n\tif _, err := w.Write(newBuf.Bytes()); err != nil {\n\t\treturn errors.Annotate(err, \"write to output\")\n\t}\n\n\tif err := w.Flush(); err != nil {\n\t\treturn errors.Annotate(err, \"flush out writer\")\n\t}\n\n\tprintInfo(\"process check & run\")\n\n\tpipes := make([]pipe.Pipe, 0)\n\tpipes = appendCommandPipe(set.Check, pipes)\n\tpipes = appendCommandPipe(set.Run, pipes)\n\n\tscript := pipe.Script(pipes...)\n\tif output, err := pipe.CombinedOutput(script); err != nil {\n\t\tprintInfo(string(output))\n\t\treturn errors.Annotate(err, \"check & run\")\n\t}\n\n\treturn nil\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\nfunc processTemplates(cnf *Config) error {\n\n\tapiURL := fmt.Sprintf(\"%s%s\", cnf.Host, cnf.Prefix)\n\tmeta := metadata.NewClient(apiURL)\n\n\tprintInfo(\"connect rancher metadata url: %q\", apiURL)\n\n\t\/\/expand template paths\n\tprintDebug(\"expand template paths\")\n\tfor idx, set := range cnf.Sets {\n\t\tif !path.IsAbs(set.TemplatePath) {\n\t\t\tcnf.Sets[idx].TemplatePath = path.Join(DEFAULT_TEMPLATE_DIR, set.TemplatePath)\n\t\t}\n\t}\n\n\tversion := \"init\"\n\tfor {\n\t\tnewVersion, err := meta.GetVersion()\n\t\tif err != nil {\n\t\t\treturn errors.Annotate(err, \"get version\")\n\t\t}\n\n\t\tif newVersion == version {\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\tversion = newVersion\n\t\tprintInfo(\"metadata changed - refresh config\")\n\n\t\tfor _, set := range cnf.Sets {\n\t\t\tif err := processTemplateSet(meta, set); err != nil {\n\t\t\t\treturn errors.Annotate(err, \"process template set\")\n\t\t\t}\n\t\t}\n\n\t\ttime.Sleep(time.Duration(cnf.Repeat) * time.Second)\n\t}\n\n\treturn nil\n}\n<commit_msg>autopush@1447082325<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto\/md5\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"text\/template\"\n\t\"time\"\n\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/rancher\/go-rancher-metadata\/metadata\"\n\t\"gopkg.in\/pipe.v2\"\n)\n\nconst (\n\tDEFAULT_TEMPLATE_DIR = \"\/etc\/rancher-meta-template\/templates\"\n)\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\nfunc createTemplateCtx(meta *metadata.Client) (interface{}, error) {\n\n\tservices, err := meta.GetServices()\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"get services\")\n\t}\n\tcontainers, err := meta.GetContainers()\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"get containers\")\n\t}\n\n\tcontainersW := make([]ContainerWrap, 0)\n\tfor _, container := range containers {\n\t\tcw := ContainerWrap{container}\n\t\tcontainersW = append(containersW, cw)\n\t}\n\n\tservicesW := make([]ServiceWrap, 0)\n\tfor _, service := range services {\n\t\tsw := ServiceWrap{service}\n\t\tservicesW = append(servicesW, sw)\n\t}\n\n\tctx := map[string]interface{}{\n\t\t\"Services\": servicesW,\n\t\t\"Containers\": containersW,\n\t}\n\n\treturn ctx, nil\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\nfunc computeMd5(filePath string) (string, error) {\n\tif _, err := os.Stat(filePath); err != nil {\n\t\treturn \"\", nil\n\t}\n\n\tfile, err := os.Open(filePath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer file.Close()\n\n\thash := md5.New()\n\tif _, err := io.Copy(hash, file); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"%x\", hash.Sum(nil)), nil\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\nfunc appendCommandPipe(cmd Command, pipes []pipe.Pipe) []pipe.Pipe {\n\tif cmd.Cmd != \"\" {\n\t\tif cmd.Args != nil {\n\t\t\treturn append(pipes, pipe.Exec(cmd.Cmd, cmd.Args...))\n\t\t}\n\t\treturn append(pipes, pipe.Exec(cmd.Cmd))\n\t}\n\n\treturn pipes\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\nfunc processTemplateSet(meta *metadata.Client, set TemplateSet) error {\n\n\tif _, err := os.Stat(set.TemplatePath); err != nil {\n\t\tprintWarning(\"template path %q is not available: skip\", set.TemplatePath)\n\t\treturn nil\n\t}\n\n\tbuf, err := ioutil.ReadFile(set.TemplatePath)\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"read template file\")\n\t}\n\n\ttempl := template.New(set.Name).Funcs(newFuncMap())\n\ttmpl, err := templ.Parse(string(buf))\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"parse template\")\n\t}\n\n\tlastMd5 := \"\"\n\tif _, err = os.Stat(set.DestinationPath); err == nil {\n\t\tlmd5, err := computeMd5(set.DestinationPath)\n\t\tif err != nil {\n\t\t\treturn errors.Annotate(err, \"get last md5 hash\")\n\t\t}\n\t\tlastMd5 = lmd5\n\t}\n\n\tctx, err := createTemplateCtx(meta)\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"create template context\")\n\t}\n\n\tvar newBuf bytes.Buffer\n\twr := bufio.NewWriter(&newBuf)\n\tif err := tmpl.Execute(wr, ctx); err != nil {\n\t\treturn errors.Annotate(err, \"execute template\")\n\t}\n\n\tif err := wr.Flush(); err != nil {\n\t\treturn errors.Annotate(err, \"flush tmpl writer\")\n\t}\n\n\thash := md5.New()\n\thash.Write(newBuf.Bytes())\n\tcurrentMd5 := fmt.Sprintf(\"%x\", hash.Sum(nil))\n\n\tif lastMd5 == currentMd5 {\n\t\treturn nil\n\t}\n\n\tif lastMd5 == \"\" {\n\t\tprintInfo(\"create output file\")\n\t} else {\n\t\tprintInfo(\"last md5 sum is %q\", lastMd5)\n\t\tprintInfo(\"current md5 sum is %q\", currentMd5)\n\t\tprintInfo(\"output file needs refresh\")\n\t}\n\n\tf, err := os.Create(set.DestinationPath)\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"create destination file\")\n\t}\n\tdefer f.Close()\n\n\tw := bufio.NewWriter(f)\n\tif _, err := w.Write(newBuf.Bytes()); err != nil {\n\t\treturn errors.Annotate(err, \"write to output\")\n\t}\n\n\tif err := w.Flush(); err != nil {\n\t\treturn errors.Annotate(err, \"flush out writer\")\n\t}\n\n\tprintInfo(\"process check & run\")\n\n\tpipes := make([]pipe.Pipe, 0)\n\tpipes = appendCommandPipe(set.Check, pipes)\n\tpipes = appendCommandPipe(set.Run, pipes)\n\n\tscript := pipe.Script(pipes...)\n\tif output, err := pipe.CombinedOutput(script); err != nil {\n\t\tprintInfo(string(output))\n\t\treturn errors.Annotate(err, \"check & run\")\n\t}\n\n\treturn nil\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\nfunc processTemplates(cnf *Config) error {\n\n\tapiURL := fmt.Sprintf(\"%s%s\", cnf.Host, cnf.Prefix)\n\tmeta := metadata.NewClient(apiURL)\n\n\tprintInfo(\"connect rancher metadata url: %q\", apiURL)\n\n\t\/\/expand template paths\n\tprintDebug(\"expand template paths\")\n\tfor idx, set := range cnf.Sets {\n\t\tif !path.IsAbs(set.TemplatePath) {\n\t\t\tcnf.Sets[idx].TemplatePath = path.Join(DEFAULT_TEMPLATE_DIR, set.TemplatePath)\n\t\t}\n\t}\n\n\tversion := \"init\"\n\tfor {\n\t\tnewVersion, err := meta.GetVersion()\n\t\tif err != nil {\n\t\t\treturn errors.Annotate(err, \"get version\")\n\t\t}\n\n\t\tif newVersion == version {\n\t\t\ttime.Sleep(5 * time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\tversion = newVersion\n\t\tprintInfo(\"metadata changed - refresh config\")\n\n\t\tfor _, set := range cnf.Sets {\n\t\t\tif err := processTemplateSet(meta, set); err != nil {\n\t\t\t\treturn errors.Annotate(err, \"process template set\")\n\t\t\t}\n\t\t}\n\n\t\ttime.Sleep(time.Duration(cnf.Repeat) * time.Second)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/pprof\"\n\t\"os\"\n\t\"time\"\n\n\t\"cloud.google.com\/go\/pubsub\"\n\tcloudtrace \"cloud.google.com\/go\/trace\"\n\t\"code.cloudfoundry.org\/clock\"\n\t\"code.cloudfoundry.org\/lager\"\n\t\"github.com\/google\/go-github\/github\"\n\tflags \"github.com\/jessevdk\/go-flags\"\n\t\"github.com\/pivotal-cf\/paraphernalia\/operate\/admin\"\n\t\"github.com\/pivotal-cf\/paraphernalia\/secure\/tlsconfig\"\n\t\"github.com\/pivotal-cf\/paraphernalia\/serve\/grpcrunner\"\n\t\"github.com\/tedsuo\/ifrit\"\n\t\"github.com\/tedsuo\/ifrit\/grouper\"\n\t\"github.com\/tedsuo\/ifrit\/sigmon\"\n\t\"golang.org\/x\/net\/trace\"\n\t\"golang.org\/x\/oauth2\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/credentials\"\n\n\t\"cred-alert\/config\"\n\t\"cred-alert\/crypto\"\n\t\"cred-alert\/db\"\n\t\"cred-alert\/db\/migrations\"\n\t\"cred-alert\/gitclient\"\n\t\"cred-alert\/metrics\"\n\t\"cred-alert\/notifications\"\n\t\"cred-alert\/queue\"\n\t\"cred-alert\/revok\"\n\t\"cred-alert\/revok\/stats\"\n\t\"cred-alert\/revokpb\"\n\t\"cred-alert\/search\"\n\t\"cred-alert\/sniff\"\n\t\"rolodex\/rolodexpb\"\n)\n\nvar info = admin.ServiceInfo{\n\tName: \"revok\",\n\tDescription: \"A service which fetches new Git commits and scans them for credentials.\",\n\tTeam: \"PCF Security Enablement\",\n}\n\nfunc main() {\n\tvar cfg *config.WorkerConfig\n\tvar flagOpts config.WorkerOpts\n\n\tlogger := lager.NewLogger(\"revok-worker\")\n\tlogger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG))\n\n\tlogger.Info(\"starting\")\n\n\t_, err := flags.Parse(&flagOpts)\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n\n\tbs, err := ioutil.ReadFile(string(flagOpts.ConfigFile))\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-open-config-file\", err)\n\t\tos.Exit(1)\n\t}\n\n\tcfg, err = config.LoadWorkerConfig(bs)\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-load-config-file\", err)\n\t\tos.Exit(1)\n\t}\n\n\terrs := cfg.Validate()\n\tif errs != nil {\n\t\tfor _, err := range errs {\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\tos.Exit(1)\n\t}\n\n\tif cfg.Metrics.SentryDSN != \"\" {\n\t\tlogger.RegisterSink(revok.NewSentrySink(cfg.Metrics.SentryDSN, cfg.Metrics.Environment))\n\t}\n\n\tworkdir := cfg.WorkDir\n\t_, err = os.Lstat(workdir)\n\tif err != nil {\n\t\tlog.Fatalf(\"workdir error: %s\", err)\n\t}\n\n\tdbCertificate, dbCaCertPool := loadCerts(\n\t\tcfg.MySQL.CertificatePath,\n\t\tcfg.MySQL.PrivateKeyPath,\n\t\tcfg.MySQL.PrivateKeyPassphrase,\n\t\tcfg.MySQL.CACertificatePath,\n\t)\n\n\tdbURI := db.NewDSN(\n\t\tcfg.MySQL.Username,\n\t\tcfg.MySQL.Password,\n\t\tcfg.MySQL.DBName,\n\t\tcfg.MySQL.Hostname,\n\t\tint(cfg.MySQL.Port),\n\t\tcfg.MySQL.ServerName,\n\t\tdbCertificate,\n\t\tdbCaCertPool,\n\t)\n\n\tdatabase, err := migrations.LockDBAndMigrate(logger, \"mysql\", dbURI)\n\tif err != nil {\n\t\tlog.Fatalf(\"db error: %s\", err)\n\t}\n\n\tdatabase.LogMode(false)\n\n\tclk := clock.NewClock()\n\n\tcloneMsgCh := make(chan revok.CloneMsg)\n\n\tscanRepository := db.NewScanRepository(database, clk)\n\trepositoryRepository := db.NewRepositoryRepository(database)\n\tfetchRepository := db.NewFetchRepository(database)\n\tcredentialRepository := db.NewCredentialRepository(database)\n\tbranchRepository := db.NewBranchRepository(database)\n\n\temitter := metrics.BuildEmitter(cfg.Metrics.DatadogAPIKey, cfg.Metrics.Environment)\n\tgitClient := gitclient.New(cfg.GitHub.PrivateKeyPath, cfg.GitHub.PublicKeyPath)\n\trepoWhitelist := notifications.BuildWhitelist(cfg.Whitelist...)\n\tformatter := notifications.NewSlackNotificationFormatter()\n\n\tslackHTTPClient := &http.Client{\n\t\tTimeout: 3 * time.Second,\n\t}\n\tnotifier := notifications.NewSlackNotifier(clk, slackHTTPClient, formatter)\n\n\tcertificate, caCertPool := loadCerts(\n\t\tcfg.Identity.CertificatePath,\n\t\tcfg.Identity.PrivateKeyPath,\n\t\tcfg.Identity.PrivateKeyPassphrase,\n\t\tcfg.Identity.CACertificatePath,\n\t)\n\n\trolodexServerAddr := fmt.Sprintf(\"%s:%d\", cfg.Rolodex.ServerAddress, cfg.Rolodex.ServerPort)\n\n\ttlsConfig := tlsconfig.Build(\n\t\ttlsconfig.WithPivotalDefaults(),\n\t\ttlsconfig.WithIdentity(certificate),\n\t)\n\n\ttransportCreds := credentials.NewTLS(tlsConfig.Client(tlsconfig.WithAuthority(caCertPool)))\n\n\ttraceClient, err := cloudtrace.NewClient(context.Background(), cfg.Trace.ProjectName)\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-create-trace-client\", err)\n\t}\n\n\tconn, err := grpc.Dial(\n\t\trolodexServerAddr,\n\t\tgrpc.WithDialer(keepAliveDial),\n\t\tgrpc.WithTransportCredentials(transportCreds),\n\t\tgrpc.WithUnaryInterceptor(cloudtrace.GRPCClientInterceptor()),\n\t)\n\n\trolodexClient := rolodexpb.NewRolodexClient(conn)\n\n\tteamURLs := notifications.NewTeamURLs(\n\t\tcfg.Slack.DefaultURL,\n\t\tcfg.Slack.DefaultChannel,\n\t\tcfg.Slack.TeamURLs,\n\t)\n\n\taddressBook := notifications.NewRolodex(\n\t\trolodexClient,\n\t\tteamURLs,\n\t)\n\n\trouter := notifications.NewRouter(\n\t\tnotifier,\n\t\taddressBook,\n\t\trepoWhitelist,\n\t)\n\n\tsniffer := sniff.NewDefaultSniffer()\n\tscanner := revok.NewScanner(\n\t\tgitClient,\n\t\trepositoryRepository,\n\t\tscanRepository,\n\t\tcredentialRepository,\n\t\tsniffer,\n\t)\n\n\tnotificationComposer := revok.NewNotificationComposer(\n\t\trepositoryRepository,\n\t\trouter,\n\t\tscanner,\n\t)\n\n\tchangeFetcher := revok.NewChangeFetcher(\n\t\tlogger,\n\t\tgitClient,\n\t\tnotificationComposer,\n\t\trepositoryRepository,\n\t\tfetchRepository,\n\t\temitter,\n\t)\n\n\tchangeScheduleRunner := revok.NewScheduleRunner(logger)\n\n\tchangeScheduler := revok.NewChangeScheduler(\n\t\tlogger,\n\t\trepositoryRepository,\n\t\tchangeScheduleRunner,\n\t\tchangeFetcher,\n\t)\n\n\tcloner := revok.NewCloner(\n\t\tlogger,\n\t\tworkdir,\n\t\tcloneMsgCh,\n\t\tgitClient,\n\t\trepositoryRepository,\n\t\tnotificationComposer,\n\t\temitter,\n\t\tchangeScheduler,\n\t)\n\n\tdirscanUpdater := revok.NewRescanner(\n\t\tlogger,\n\t\tscanRepository,\n\t\tcredentialRepository,\n\t\tscanner,\n\t\trouter,\n\t\temitter,\n\t)\n\n\tstatsReporter := stats.NewReporter(\n\t\tlogger,\n\t\tclk,\n\t\t60*time.Second,\n\t\tdb.NewStatsRepository(database),\n\t\temitter,\n\t)\n\n\theadCredentialCounter := revok.NewHeadCredentialCounter(\n\t\tlogger,\n\t\tbranchRepository,\n\t\trepositoryRepository,\n\t\tclk,\n\t\tcfg.CredentialCounterInterval,\n\t\tgitClient,\n\t\tsniffer,\n\t)\n\n\tdebug := admin.Runner(\n\t\t\"6060\",\n\t\tadmin.WithInfo(info),\n\t\tadmin.WithUptime(),\n\t)\n\n\tmembers := []grouper.Member{\n\t\t{\"cloner\", cloner},\n\t\t{\"dirscan-updater\", dirscanUpdater},\n\t\t{\"stats-reporter\", statsReporter},\n\t\t{\"head-credential-counter\", headCredentialCounter},\n\t\t{\"change-schedule-runner\", changeScheduleRunner},\n\t\t{\"debug\", debug},\n\t}\n\n\tlooper := gitclient.NewLooper()\n\tsearcher := search.NewSearcher(repositoryRepository, looper)\n\thandler := revok.NewServer(logger, searcher, repositoryRepository, branchRepository)\n\n\tserverTls := tlsConfig.Server(tlsconfig.WithClientAuthentication(caCertPool))\n\n\tgrpcServer := grpcrunner.New(\n\t\tlogger,\n\t\tfmt.Sprintf(\"%s:%d\", cfg.API.BindIP, cfg.API.BindPort),\n\t\tfunc(server *grpc.Server) {\n\t\t\trevokpb.RegisterRevokServer(server, handler)\n\t\t},\n\t\tgrpc.Creds(credentials.NewTLS(serverTls)),\n\t)\n\n\tmembers = append(members, grouper.Member{\n\t\tName: \"grpc-server\",\n\t\tRunner: grpcServer,\n\t})\n\n\tpubSubClient, err := pubsub.NewClient(context.Background(), cfg.PubSub.ProjectName)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed\", err)\n\t\tos.Exit(1)\n\t}\n\n\tsubscription := pubSubClient.Subscription(cfg.PubSub.FetchHint.Subscription)\n\n\tpublicKey, err := crypto.ReadRSAPublicKey(cfg.PubSub.PublicKeyPath)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed\", err)\n\t\tos.Exit(1)\n\t}\n\tpushEventProcessor := queue.NewPushEventProcessor(\n\t\tchangeFetcher,\n\t\tcrypto.NewRSAVerifier(publicKey),\n\t\temitter,\n\t\tclk,\n\t)\n\n\tmembers = append(members, grouper.Member{\n\t\tName: \"github-hint-handler\",\n\t\tRunner: queue.NewPubSubSubscriber(logger, subscription, pushEventProcessor, emitter, traceClient),\n\t})\n\n\tif cfg.GitHub.AccessToken != \"\" {\n\t\tgithubHTTPClient := &http.Client{\n\t\t\tTimeout: 30 * time.Second,\n\t\t\tTransport: &oauth2.Transport{\n\t\t\t\tSource: oauth2.StaticTokenSource(\n\t\t\t\t\t&oauth2.Token{AccessToken: cfg.GitHub.AccessToken},\n\t\t\t\t),\n\t\t\t\tBase: &http.Transport{\n\t\t\t\t\tDisableKeepAlives: true,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tghClient := revok.NewGitHubClient(github.NewClient(githubHTTPClient))\n\n\t\trepoDiscoverer := revok.NewRepoDiscoverer(\n\t\t\tlogger,\n\t\t\tworkdir,\n\t\t\tcloneMsgCh,\n\t\t\tghClient,\n\t\t\tclk,\n\t\t\tcfg.RepositoryDiscovery.Interval,\n\t\t\tcfg.RepositoryDiscovery.Organizations,\n\t\t\tcfg.RepositoryDiscovery.Users,\n\t\t\trepositoryRepository,\n\t\t)\n\n\t\tmembers = append(members, grouper.Member{\n\t\t\tName: \"repo-discoverer\",\n\t\t\tRunner: repoDiscoverer,\n\t\t})\n\t}\n\n\tstartupTasks := []grouper.Member{\n\t\t{\n\t\t\tName: \"schedule-fetches\",\n\t\t\tRunner: changeScheduler,\n\t\t},\n\t}\n\n\tsystem := []grouper.Member{\n\t\t{\n\t\t\tName: \"servers\",\n\t\t\tRunner: grouper.NewParallel(os.Interrupt, members),\n\t\t},\n\t\t{\n\t\t\tName: \"startup-tasks\",\n\t\t\tRunner: grouper.NewParallel(os.Interrupt, startupTasks),\n\t\t},\n\t}\n\n\trunner := sigmon.New(grouper.NewOrdered(os.Interrupt, system))\n\n\terr = <-ifrit.Invoke(runner).Wait()\n\tif err != nil {\n\t\tlog.Fatalf(\"failed-to-start: %s\", err)\n\t}\n}\n\nfunc loadCerts(certificatePath, privateKeyPath, privateKeyPassphrase, caCertificatePath string) (tls.Certificate, *x509.CertPool) {\n\tcertificate, err := config.LoadCertificate(\n\t\tcertificatePath,\n\t\tprivateKeyPath,\n\t\tprivateKeyPassphrase,\n\t)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tcaCertPool, err := config.LoadCertificatePool(caCertificatePath)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\treturn certificate, caCertPool\n}\n\nfunc keepAliveDial(addr string, timeout time.Duration) (net.Conn, error) {\n\td := net.Dialer{\n\t\tTimeout: timeout,\n\t\tKeepAlive: 60 * time.Second,\n\t}\n\treturn d.Dial(\"tcp\", addr)\n}\n\nfunc debugHandler() http.Handler {\n\tdebugRouter := http.NewServeMux()\n\tdebugRouter.Handle(\"\/debug\/pprof\/\", http.HandlerFunc(pprof.Index))\n\tdebugRouter.Handle(\"\/debug\/pprof\/cmdline\", http.HandlerFunc(pprof.Cmdline))\n\tdebugRouter.Handle(\"\/debug\/pprof\/profile\", http.HandlerFunc(pprof.Profile))\n\tdebugRouter.Handle(\"\/debug\/pprof\/symbol\", http.HandlerFunc(pprof.Symbol))\n\tdebugRouter.Handle(\"\/debug\/pprof\/trace\", http.HandlerFunc(pprof.Trace))\n\n\tdebugRouter.HandleFunc(\"\/debug\/requests\", func(w http.ResponseWriter, req *http.Request) {\n\t\tany, sensitive := trace.AuthRequest(req)\n\t\tif !any {\n\t\t\thttp.Error(w, \"not allowed\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"text\/html; charset=utf-8\")\n\t\ttrace.Render(w, req, sensitive)\n\t})\n\n\tdebugRouter.HandleFunc(\"\/debug\/events\", func(w http.ResponseWriter, req *http.Request) {\n\t\tany, sensitive := trace.AuthRequest(req)\n\t\tif !any {\n\t\t\thttp.Error(w, \"not allowed\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\t\tw.Header().Set(\"Content-Type\", \"text\/html; charset=utf-8\")\n\t\ttrace.RenderEvents(w, req, sensitive)\n\t})\n\n\treturn debugRouter\n}\n<commit_msg>remove unused function<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"cloud.google.com\/go\/pubsub\"\n\tcloudtrace \"cloud.google.com\/go\/trace\"\n\t\"code.cloudfoundry.org\/clock\"\n\t\"code.cloudfoundry.org\/lager\"\n\t\"github.com\/google\/go-github\/github\"\n\tflags \"github.com\/jessevdk\/go-flags\"\n\t\"github.com\/pivotal-cf\/paraphernalia\/operate\/admin\"\n\t\"github.com\/pivotal-cf\/paraphernalia\/secure\/tlsconfig\"\n\t\"github.com\/pivotal-cf\/paraphernalia\/serve\/grpcrunner\"\n\t\"github.com\/tedsuo\/ifrit\"\n\t\"github.com\/tedsuo\/ifrit\/grouper\"\n\t\"github.com\/tedsuo\/ifrit\/sigmon\"\n\t\"golang.org\/x\/oauth2\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/credentials\"\n\n\t\"cred-alert\/config\"\n\t\"cred-alert\/crypto\"\n\t\"cred-alert\/db\"\n\t\"cred-alert\/db\/migrations\"\n\t\"cred-alert\/gitclient\"\n\t\"cred-alert\/metrics\"\n\t\"cred-alert\/notifications\"\n\t\"cred-alert\/queue\"\n\t\"cred-alert\/revok\"\n\t\"cred-alert\/revok\/stats\"\n\t\"cred-alert\/revokpb\"\n\t\"cred-alert\/search\"\n\t\"cred-alert\/sniff\"\n\t\"rolodex\/rolodexpb\"\n)\n\nvar info = admin.ServiceInfo{\n\tName: \"revok\",\n\tDescription: \"A service which fetches new Git commits and scans them for credentials.\",\n\tTeam: \"PCF Security Enablement\",\n}\n\nfunc main() {\n\tvar cfg *config.WorkerConfig\n\tvar flagOpts config.WorkerOpts\n\n\tlogger := lager.NewLogger(\"revok-worker\")\n\tlogger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.DEBUG))\n\n\tlogger.Info(\"starting\")\n\n\t_, err := flags.Parse(&flagOpts)\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n\n\tbs, err := ioutil.ReadFile(string(flagOpts.ConfigFile))\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-open-config-file\", err)\n\t\tos.Exit(1)\n\t}\n\n\tcfg, err = config.LoadWorkerConfig(bs)\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-load-config-file\", err)\n\t\tos.Exit(1)\n\t}\n\n\terrs := cfg.Validate()\n\tif errs != nil {\n\t\tfor _, err := range errs {\n\t\t\tfmt.Println(err.Error())\n\t\t}\n\t\tos.Exit(1)\n\t}\n\n\tif cfg.Metrics.SentryDSN != \"\" {\n\t\tlogger.RegisterSink(revok.NewSentrySink(cfg.Metrics.SentryDSN, cfg.Metrics.Environment))\n\t}\n\n\tworkdir := cfg.WorkDir\n\t_, err = os.Lstat(workdir)\n\tif err != nil {\n\t\tlog.Fatalf(\"workdir error: %s\", err)\n\t}\n\n\tdbCertificate, dbCaCertPool := loadCerts(\n\t\tcfg.MySQL.CertificatePath,\n\t\tcfg.MySQL.PrivateKeyPath,\n\t\tcfg.MySQL.PrivateKeyPassphrase,\n\t\tcfg.MySQL.CACertificatePath,\n\t)\n\n\tdbURI := db.NewDSN(\n\t\tcfg.MySQL.Username,\n\t\tcfg.MySQL.Password,\n\t\tcfg.MySQL.DBName,\n\t\tcfg.MySQL.Hostname,\n\t\tint(cfg.MySQL.Port),\n\t\tcfg.MySQL.ServerName,\n\t\tdbCertificate,\n\t\tdbCaCertPool,\n\t)\n\n\tdatabase, err := migrations.LockDBAndMigrate(logger, \"mysql\", dbURI)\n\tif err != nil {\n\t\tlog.Fatalf(\"db error: %s\", err)\n\t}\n\n\tdatabase.LogMode(false)\n\n\tclk := clock.NewClock()\n\n\tcloneMsgCh := make(chan revok.CloneMsg)\n\n\tscanRepository := db.NewScanRepository(database, clk)\n\trepositoryRepository := db.NewRepositoryRepository(database)\n\tfetchRepository := db.NewFetchRepository(database)\n\tcredentialRepository := db.NewCredentialRepository(database)\n\tbranchRepository := db.NewBranchRepository(database)\n\n\temitter := metrics.BuildEmitter(cfg.Metrics.DatadogAPIKey, cfg.Metrics.Environment)\n\tgitClient := gitclient.New(cfg.GitHub.PrivateKeyPath, cfg.GitHub.PublicKeyPath)\n\trepoWhitelist := notifications.BuildWhitelist(cfg.Whitelist...)\n\tformatter := notifications.NewSlackNotificationFormatter()\n\n\tslackHTTPClient := &http.Client{\n\t\tTimeout: 3 * time.Second,\n\t}\n\tnotifier := notifications.NewSlackNotifier(clk, slackHTTPClient, formatter)\n\n\tcertificate, caCertPool := loadCerts(\n\t\tcfg.Identity.CertificatePath,\n\t\tcfg.Identity.PrivateKeyPath,\n\t\tcfg.Identity.PrivateKeyPassphrase,\n\t\tcfg.Identity.CACertificatePath,\n\t)\n\n\trolodexServerAddr := fmt.Sprintf(\"%s:%d\", cfg.Rolodex.ServerAddress, cfg.Rolodex.ServerPort)\n\n\ttlsConfig := tlsconfig.Build(\n\t\ttlsconfig.WithPivotalDefaults(),\n\t\ttlsconfig.WithIdentity(certificate),\n\t)\n\n\ttransportCreds := credentials.NewTLS(tlsConfig.Client(tlsconfig.WithAuthority(caCertPool)))\n\n\ttraceClient, err := cloudtrace.NewClient(context.Background(), cfg.Trace.ProjectName)\n\tif err != nil {\n\t\tlogger.Error(\"failed-to-create-trace-client\", err)\n\t}\n\n\tconn, err := grpc.Dial(\n\t\trolodexServerAddr,\n\t\tgrpc.WithDialer(keepAliveDial),\n\t\tgrpc.WithTransportCredentials(transportCreds),\n\t\tgrpc.WithUnaryInterceptor(cloudtrace.GRPCClientInterceptor()),\n\t)\n\n\trolodexClient := rolodexpb.NewRolodexClient(conn)\n\n\tteamURLs := notifications.NewTeamURLs(\n\t\tcfg.Slack.DefaultURL,\n\t\tcfg.Slack.DefaultChannel,\n\t\tcfg.Slack.TeamURLs,\n\t)\n\n\taddressBook := notifications.NewRolodex(\n\t\trolodexClient,\n\t\tteamURLs,\n\t)\n\n\trouter := notifications.NewRouter(\n\t\tnotifier,\n\t\taddressBook,\n\t\trepoWhitelist,\n\t)\n\n\tsniffer := sniff.NewDefaultSniffer()\n\tscanner := revok.NewScanner(\n\t\tgitClient,\n\t\trepositoryRepository,\n\t\tscanRepository,\n\t\tcredentialRepository,\n\t\tsniffer,\n\t)\n\n\tnotificationComposer := revok.NewNotificationComposer(\n\t\trepositoryRepository,\n\t\trouter,\n\t\tscanner,\n\t)\n\n\tchangeFetcher := revok.NewChangeFetcher(\n\t\tlogger,\n\t\tgitClient,\n\t\tnotificationComposer,\n\t\trepositoryRepository,\n\t\tfetchRepository,\n\t\temitter,\n\t)\n\n\tchangeScheduleRunner := revok.NewScheduleRunner(logger)\n\n\tchangeScheduler := revok.NewChangeScheduler(\n\t\tlogger,\n\t\trepositoryRepository,\n\t\tchangeScheduleRunner,\n\t\tchangeFetcher,\n\t)\n\n\tcloner := revok.NewCloner(\n\t\tlogger,\n\t\tworkdir,\n\t\tcloneMsgCh,\n\t\tgitClient,\n\t\trepositoryRepository,\n\t\tnotificationComposer,\n\t\temitter,\n\t\tchangeScheduler,\n\t)\n\n\tdirscanUpdater := revok.NewRescanner(\n\t\tlogger,\n\t\tscanRepository,\n\t\tcredentialRepository,\n\t\tscanner,\n\t\trouter,\n\t\temitter,\n\t)\n\n\tstatsReporter := stats.NewReporter(\n\t\tlogger,\n\t\tclk,\n\t\t60*time.Second,\n\t\tdb.NewStatsRepository(database),\n\t\temitter,\n\t)\n\n\theadCredentialCounter := revok.NewHeadCredentialCounter(\n\t\tlogger,\n\t\tbranchRepository,\n\t\trepositoryRepository,\n\t\tclk,\n\t\tcfg.CredentialCounterInterval,\n\t\tgitClient,\n\t\tsniffer,\n\t)\n\n\tdebug := admin.Runner(\n\t\t\"6060\",\n\t\tadmin.WithInfo(info),\n\t\tadmin.WithUptime(),\n\t)\n\n\tmembers := []grouper.Member{\n\t\t{\"cloner\", cloner},\n\t\t{\"dirscan-updater\", dirscanUpdater},\n\t\t{\"stats-reporter\", statsReporter},\n\t\t{\"head-credential-counter\", headCredentialCounter},\n\t\t{\"change-schedule-runner\", changeScheduleRunner},\n\t\t{\"debug\", debug},\n\t}\n\n\tlooper := gitclient.NewLooper()\n\tsearcher := search.NewSearcher(repositoryRepository, looper)\n\thandler := revok.NewServer(logger, searcher, repositoryRepository, branchRepository)\n\n\tserverTls := tlsConfig.Server(tlsconfig.WithClientAuthentication(caCertPool))\n\n\tgrpcServer := grpcrunner.New(\n\t\tlogger,\n\t\tfmt.Sprintf(\"%s:%d\", cfg.API.BindIP, cfg.API.BindPort),\n\t\tfunc(server *grpc.Server) {\n\t\t\trevokpb.RegisterRevokServer(server, handler)\n\t\t},\n\t\tgrpc.Creds(credentials.NewTLS(serverTls)),\n\t)\n\n\tmembers = append(members, grouper.Member{\n\t\tName: \"grpc-server\",\n\t\tRunner: grpcServer,\n\t})\n\n\tpubSubClient, err := pubsub.NewClient(context.Background(), cfg.PubSub.ProjectName)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed\", err)\n\t\tos.Exit(1)\n\t}\n\n\tsubscription := pubSubClient.Subscription(cfg.PubSub.FetchHint.Subscription)\n\n\tpublicKey, err := crypto.ReadRSAPublicKey(cfg.PubSub.PublicKeyPath)\n\tif err != nil {\n\t\tlogger.Fatal(\"failed\", err)\n\t\tos.Exit(1)\n\t}\n\tpushEventProcessor := queue.NewPushEventProcessor(\n\t\tchangeFetcher,\n\t\tcrypto.NewRSAVerifier(publicKey),\n\t\temitter,\n\t\tclk,\n\t)\n\n\tmembers = append(members, grouper.Member{\n\t\tName: \"github-hint-handler\",\n\t\tRunner: queue.NewPubSubSubscriber(logger, subscription, pushEventProcessor, emitter, traceClient),\n\t})\n\n\tif cfg.GitHub.AccessToken != \"\" {\n\t\tgithubHTTPClient := &http.Client{\n\t\t\tTimeout: 30 * time.Second,\n\t\t\tTransport: &oauth2.Transport{\n\t\t\t\tSource: oauth2.StaticTokenSource(\n\t\t\t\t\t&oauth2.Token{AccessToken: cfg.GitHub.AccessToken},\n\t\t\t\t),\n\t\t\t\tBase: &http.Transport{\n\t\t\t\t\tDisableKeepAlives: true,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tghClient := revok.NewGitHubClient(github.NewClient(githubHTTPClient))\n\n\t\trepoDiscoverer := revok.NewRepoDiscoverer(\n\t\t\tlogger,\n\t\t\tworkdir,\n\t\t\tcloneMsgCh,\n\t\t\tghClient,\n\t\t\tclk,\n\t\t\tcfg.RepositoryDiscovery.Interval,\n\t\t\tcfg.RepositoryDiscovery.Organizations,\n\t\t\tcfg.RepositoryDiscovery.Users,\n\t\t\trepositoryRepository,\n\t\t)\n\n\t\tmembers = append(members, grouper.Member{\n\t\t\tName: \"repo-discoverer\",\n\t\t\tRunner: repoDiscoverer,\n\t\t})\n\t}\n\n\tstartupTasks := []grouper.Member{\n\t\t{\n\t\t\tName: \"schedule-fetches\",\n\t\t\tRunner: changeScheduler,\n\t\t},\n\t}\n\n\tsystem := []grouper.Member{\n\t\t{\n\t\t\tName: \"servers\",\n\t\t\tRunner: grouper.NewParallel(os.Interrupt, members),\n\t\t},\n\t\t{\n\t\t\tName: \"startup-tasks\",\n\t\t\tRunner: grouper.NewParallel(os.Interrupt, startupTasks),\n\t\t},\n\t}\n\n\trunner := sigmon.New(grouper.NewOrdered(os.Interrupt, system))\n\n\terr = <-ifrit.Invoke(runner).Wait()\n\tif err != nil {\n\t\tlog.Fatalf(\"failed-to-start: %s\", err)\n\t}\n}\n\nfunc loadCerts(certificatePath, privateKeyPath, privateKeyPassphrase, caCertificatePath string) (tls.Certificate, *x509.CertPool) {\n\tcertificate, err := config.LoadCertificate(\n\t\tcertificatePath,\n\t\tprivateKeyPath,\n\t\tprivateKeyPassphrase,\n\t)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tcaCertPool, err := config.LoadCertificatePool(caCertificatePath)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\treturn certificate, caCertPool\n}\n\nfunc keepAliveDial(addr string, timeout time.Duration) (net.Conn, error) {\n\td := net.Dialer{\n\t\tTimeout: timeout,\n\t\tKeepAlive: 60 * time.Second,\n\t}\n\treturn d.Dial(\"tcp\", addr)\n}\n<|endoftext|>"} {"text":"<commit_before>package test\n\nimport (\n\t_ \"github.com\/boredomist\/gob\/parse\"\n\t_ \"os\"\n\t\"testing\"\n)\n\nvar tests = []string{\"convert.b\", \"copy.b\", \"lower.b\", \"snide.b\"}\n\nfunc TestExampleDummy(t *testing.T) {\n}\n\n\/\/ TODO: parser is not quite ready for this, and I'd rather not have every test fail\n\/\/ func TestExamples(t *testing.T) {\n\/\/ \tfor _, test := range tests {\n\/\/ \t\tif file, err := os.Open(test); err != nil {\n\/\/ \t\t\tt.Errorf(\"failed to open test: %s\", err)\n\/\/ \t\t} else {\n\/\/ \t\t\tp := parse.NewParser(test, file)\n\/\/ \t\t\tif _, err := p.Parse(); err != nil {\n\/\/ \t\t\t\tt.Errorf(\"%s failed: %v\", test, err)\n\/\/ \t\t\t}\n\/\/ \t\t}\n\/\/ \t}\n\/\/ }\n<commit_msg>Uncomment (currently failing) tests<commit_after>package test\n\nimport (\n\t\"github.com\/boredomist\/gob\/parse\"\n\t\"os\"\n\t\"testing\"\n)\n\nvar tests = []string{\"convert.b\", \"copy.b\", \"lower.b\", \"snide.b\"}\n\nfunc TestExampleDummy(t *testing.T) {\n}\n\n\/\/ TODO: parser is not quite ready for this, and I'd rather not have every test fail\nfunc TestExamples(t *testing.T) {\n\tfor _, test := range tests {\n\t\tif file, err := os.Open(test); err != nil {\n\t\t\tt.Errorf(\"failed to open test: %s\", err)\n\t\t} else {\n\t\t\tp := parse.NewParser(test, file)\n\t\t\tif _, err := p.Parse(); err != nil {\n\t\t\t\tt.Errorf(\"%s failed: %v\", test, err)\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package disk\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/xephonhq\/xephon-k\/pkg\/storage\/disk\"\n\t\/\/\"encoding\/binary\"\n\t\"fmt\"\n\t\"encoding\/binary\"\n)\n\nconst (\n\tmagicnumber uint64 = 0x786570686F6E2D6B\n)\n\n\/\/ writing series to disk without any compression and then read it out\ntype fileHeader struct {\n\tversion uint8\n\ttimeCompression uint8\n\tvalueCompression uint8\n}\n\n\/\/ NOTE: must pass a pointer of buffer\nfunc (header *fileHeader) write(buf *bytes.Buffer) {\n\tb := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(b, magicnumber)\n\tbuf.Write(b)\n\tbuf.WriteByte(header.version)\n\tbuf.WriteByte(header.timeCompression)\n\tbuf.WriteByte(header.valueCompression)\n}\n\nfunc TestMagicNumber(t *testing.T) {\n\tvar str = \"xephon-k\"\n\tt.Log(len([]byte(str))) \/\/ 8 byte, uint64\n\tt.Log([]byte(str))\n\t\/\/ [120 101 112 104 111 110 45 107]\n\t\/\/ 78 65 70 68 6F 6E 2D 6B\n\tt.Logf(\"% X\", []byte(str))\n\tt.Logf(\"%X\", []byte(str))\n\tt.Log([]byte(str)[0])\n\n\t\/\/ convert the magic number into binary\n\tt.Log(magicnumber)\n\tb := make([]byte, 10)\n\t\/\/ FIXME: it takes 9 byte instead of 8 byte to write a uint64 http:\/\/stackoverflow.com\/questions\/17289898\/does-a-uint64-take-8-bytes-storage\n\tt.Log(binary.PutUvarint(b, magicnumber)) \/\/ 9 instead of 8\n\tt.Log(b)\n\tv, err := binary.ReadUvarint(bytes.NewReader(b))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tt.Log(v)\n\t\/\/ this use 8 byte\n\tbinary.BigEndian.PutUint64(b, magicnumber)\n\tt.Log(b)\n\tt.Log(binary.BigEndian.Uint64(b))\n\n\t\/\/ Uvarint would use less than 8 byte for small value\n\tt.Log(binary.PutUvarint(b, 1)) \/\/ 1\n\tt.Log(binary.PutUvarint(b, 256)) \/\/ 2\n}\n\nfunc TestNoCompress_Header(t *testing.T) {\n\theader := fileHeader{version: 1, timeCompression: disk.CompressionNone, valueCompression: disk.CompressionNone}\n\t\/\/header := fileHeader{version: 1, timeCompression: disk.CompressionGzip, valueCompression: disk.CompressionZlib}\n\ttmpfile, err := ioutil.TempFile(\"\", \"xephon-no-compress\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/defer os.Remove(tmpfile.Name())\n\n\tvar buf bytes.Buffer\n\t\/\/ TODO: Endianness problem https:\/\/github.com\/xephonhq\/xephon-k\/issues\/34\n\t\/\/ but it seems for single uint8, this is not a problem\n\t\/\/binary.Write(&buf, binary.LittleEndian, header.version)\n\t\/\/binary.Write(&buf, binary.LittleEndian, header.timeCompression)\n\t\/\/binary.Write(&buf, binary.LittleEndian, header.valueCompression)\n\n\theader.write(&buf)\n\n\tn, err := tmpfile.Write(buf.Bytes())\n\tt.Logf(\"written %d bytes\\n\", n)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttmpfile.Close()\n\n\t\/\/ read stuff back\n\tf, err := os.Open(tmpfile.Name())\n\treadBuf := make([]byte, 11)\n\tn, err = f.Read(readBuf)\n\tt.Logf(\"read %d bytes\\n\", n)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tf.Close()\n\t\/\/ convert to header\n\tnewHeader := fileHeader{}\n\tif binary.BigEndian.Uint64(readBuf[:8]) != magicnumber {\n\t\tt.Fatal(\"magic number does not match!\")\n\t} else {\n\t\tt.Log(\"magic number match\")\n\t}\n\tnewHeader.version = uint8(readBuf[8])\n\tnewHeader.timeCompression = uint8(readBuf[9])\n\tnewHeader.valueCompression = uint8(readBuf[10])\n\tfmt.Printf(\"version %d, time compression %d, value compression %d\\n\",\n\t\tnewHeader.version, newHeader.timeCompression, newHeader.valueCompression)\n}\n\nfunc TestNoCompress_Block(t *testing.T) {\n\n}\n<commit_msg>[play][disk] Init block writer #32<commit_after>package disk\n\nimport (\n\t\"bytes\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/xephonhq\/xephon-k\/pkg\/storage\/disk\"\n\t\/\/\"encoding\/binary\"\n\t\"fmt\"\n\t\"encoding\/binary\"\n\t\"io\"\n\t\"bufio\"\n\t\"github.com\/xephonhq\/xephon-k\/pkg\/common\"\n\t\"github.com\/pkg\/errors\"\n)\n\nconst (\n\tmagicnumber uint64 = 0x786570686F6E2D6B\n)\n\n\/\/ writing series to disk without any compression and then read it out\ntype fileHeader struct {\n\tversion uint8\n\ttimeCompression uint8\n\tvalueCompression uint8\n}\n\n\/\/ NOTE: must pass a pointer of buffer\nfunc (header *fileHeader) write(buf *bytes.Buffer) {\n\tb := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(b, magicnumber)\n\tbuf.Write(b)\n\tbuf.WriteByte(header.version)\n\tbuf.WriteByte(header.timeCompression)\n\tbuf.WriteByte(header.valueCompression)\n}\n\nfunc (header *fileHeader) Bytes() []byte {\n\tvar buf bytes.Buffer\n\tb := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(b, magicnumber)\n\tbuf.Write(b)\n\tbuf.WriteByte(header.version)\n\tbuf.WriteByte(header.timeCompression)\n\tbuf.WriteByte(header.valueCompression)\n\treturn buf.Bytes()\n}\n\n\/*\n\n file\n\n | magic | version | time compression | value compression | blocks | indexes | footer | magic |\n\n footer\n\n | offset of indexes |\n\n blocks\n | b1 | b2 | b3|\n\n block\n | t1, t2, ... | v1, v2, .... |\n\n indexes\n | num indexes | i1 | i2 ... |\n\n index\n | len | tags | num blocks |b1 offset | b1 size | b1 count | b2 .... |\n *\/\ntype blockWriter struct {\n\theader fileHeader\n\toriginalWriter io.WriteCloser\n\tw *bufio.Writer\n\tn int64\n}\n\nconst intBlock byte = 1\nconst doubleBlock byte = 2\n\ntype indexEntry struct {\n\tblockType byte\n\toffset int64\n\tsize int64\n}\n\nfunc NewBlockWriter(w io.WriteCloser) *blockWriter {\n\treturn &blockWriter{\n\t\toriginalWriter: w,\n\t\tw: bufio.NewWriter(w),\n\t\tn: 0,\n\t}\n}\n\nfunc (w *blockWriter) WriteIntSeries(series *common.IntSeries) error {\n\tn := 0\n\t\/\/ write header if it does not exists\n\tif w.n == 0 {\n\t\thbits, err := w.w.Write(w.header.Bytes())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tn += hbits\n\t}\n\t\/\/ write timestamps and values separately\n\tvar tBuf bytes.Buffer\n\tvar vBuf bytes.Buffer\n\tb := make([]byte, 10)\n\tfor i := 0; i < len(series.Points); i++ {\n\t\twritten := binary.PutVarint(b, series.Points[i].T)\n\t\ttBuf.Write(b[:written])\n\t\twritten = binary.PutVarint(b, series.Points[i].V)\n\t\tvBuf.Write(b[:written])\n\t}\n\ttbits, err := w.w.Write(tBuf.Bytes())\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"fail writing time \")\n\t}\n\tn += tbits\n\tvbits, err := w.w.Write(vBuf.Bytes())\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"fail writing value\")\n\t}\n\tn += vbits\n\t\/\/ TODO: add the index\n\n\tw.n += int64(n)\n\treturn nil\n}\n\nfunc (w *blockWriter) WriteIndex() error {\n\t\/\/ TODO: implementation\n\treturn nil\n}\n\nfunc (w *blockWriter) Flush() error {\n\tif err := w.w.Flush(); err != nil {\n\t\treturn err\n\t}\n\n\tif f, ok := w.originalWriter.(*os.File); ok {\n\t\tif err := f.Sync(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (w *blockWriter) Close() error {\n\tif err := w.Flush(); err != nil {\n\t\treturn err\n\t}\n\treturn w.originalWriter.Close()\n}\n\nfunc TestMagicNumber(t *testing.T) {\n\tvar str = \"xephon-k\"\n\tt.Log(len([]byte(str))) \/\/ 8 byte, uint64\n\tt.Log([]byte(str))\n\t\/\/ [120 101 112 104 111 110 45 107]\n\t\/\/ 78 65 70 68 6F 6E 2D 6B\n\tt.Logf(\"% X\", []byte(str))\n\tt.Logf(\"%X\", []byte(str))\n\tt.Log([]byte(str)[0])\n\n\t\/\/ convert the magic number into binary\n\tt.Log(magicnumber)\n\tb := make([]byte, 10)\n\t\/\/ FIXME: it takes 9 byte instead of 8 byte to write a uint64 http:\/\/stackoverflow.com\/questions\/17289898\/does-a-uint64-take-8-bytes-storage\n\tt.Log(binary.PutUvarint(b, magicnumber)) \/\/ 9 instead of 8\n\tt.Log(b)\n\tv, err := binary.ReadUvarint(bytes.NewReader(b))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tt.Log(v)\n\t\/\/ this use 8 byte\n\tbinary.BigEndian.PutUint64(b, magicnumber)\n\tt.Log(b)\n\tt.Log(binary.BigEndian.Uint64(b))\n\n\t\/\/ Uvarint would use less than 8 byte for small value\n\tt.Log(binary.PutUvarint(b, 1)) \/\/ 1\n\tt.Log(binary.PutUvarint(b, 256)) \/\/ 2\n}\n\nfunc TestNoCompress_Header(t *testing.T) {\n\theader := fileHeader{version: 1, timeCompression: disk.CompressionNone, valueCompression: disk.CompressionNone}\n\t\/\/header := fileHeader{version: 1, timeCompression: disk.CompressionGzip, valueCompression: disk.CompressionZlib}\n\ttmpfile, err := ioutil.TempFile(\"\", \"xephon-no-compress\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdefer os.Remove(tmpfile.Name())\n\n\tvar buf bytes.Buffer\n\t\/\/ TODO: Endianness problem https:\/\/github.com\/xephonhq\/xephon-k\/issues\/34\n\t\/\/ but it seems for single uint8, this is not a problem\n\t\/\/binary.Write(&buf, binary.LittleEndian, header.version)\n\t\/\/binary.Write(&buf, binary.LittleEndian, header.timeCompression)\n\t\/\/binary.Write(&buf, binary.LittleEndian, header.valueCompression)\n\n\theader.write(&buf)\n\n\tn, err := tmpfile.Write(buf.Bytes())\n\tt.Logf(\"written %d bytes\\n\", n)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttmpfile.Close()\n\n\t\/\/ read stuff back\n\tf, err := os.Open(tmpfile.Name())\n\treadBuf := make([]byte, 11)\n\tn, err = f.Read(readBuf)\n\tt.Logf(\"read %d bytes\\n\", n)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tf.Close()\n\t\/\/ convert to header\n\tnewHeader := fileHeader{}\n\tif binary.BigEndian.Uint64(readBuf[:8]) != magicnumber {\n\t\tt.Fatal(\"magic number does not match!\")\n\t} else {\n\t\tt.Log(\"magic number match\")\n\t}\n\tnewHeader.version = uint8(readBuf[8])\n\tnewHeader.timeCompression = uint8(readBuf[9])\n\tnewHeader.valueCompression = uint8(readBuf[10])\n\tfmt.Printf(\"version %d, time compression %d, value compression %d\\n\",\n\t\tnewHeader.version, newHeader.timeCompression, newHeader.valueCompression)\n}\n\nfunc TestNoCompress_Block(t *testing.T) {\n\ttmpfile, err := ioutil.TempFile(\"\", \"xephon-no-compress\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tdefer os.Remove(tmpfile.Name())\n\n\tw := NewBlockWriter(tmpfile)\n\ts := common.NewIntSeries(\"s\")\n\ts.Points = []common.IntPoint{{T: 1359788400000, V: 1}, {T: 1359788500000, V: 2}}\n\tw.WriteIntSeries(s)\n\tt.Log(w.n)\n\tw.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage atomic\n\nimport \"unsafe\"\n\n\/\/go:nosplit\n\/\/go:noinline\nfunc Load(ptr *uint32) uint32 {\n\treturn *ptr\n}\n\n\/\/go:nosplit\n\/\/go:noinline\nfunc Loadp(ptr unsafe.Pointer) unsafe.Pointer {\n\treturn *(*unsafe.Pointer)(ptr)\n}\n\n\/\/go:nosplit\n\/\/go:noinline\nfunc Load64(ptr *uint64) uint64 {\n\treturn *ptr\n}\n\n\/\/go:nosplit\n\/\/go:noinline\nfunc LoadAcq(ptr *uint32) uint32 {\n\treturn *ptr\n}\n\n\/\/go:noinline\n\/\/go:nosplit\nfunc Store(ptr *uint32, val uint32) {\n\t*ptr = val\n}\n\n\/\/go:noinline\n\/\/go:nosplit\nfunc Store64(ptr *uint64, val uint64) {\n\t*ptr = val\n}\n\n\/\/ NO go:noescape annotation; see atomic_pointer.go.\n\/\/go:noinline\n\/\/go:nosplit\nfunc StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) {\n\t*(*uintptr)(ptr) = uintptr(val)\n}\n\n\/\/go:noinline\n\/\/go:nosplit\nfunc StoreRel(ptr *uint32, val uint32) {\n\t*ptr = val\n}\n\n\/\/go:noescape\nfunc And8(ptr *uint8, val uint8)\n\n\/\/go:noescape\nfunc Or8(ptr *uint8, val uint8)\n\n\/\/ NOTE: Do not add atomicxor8 (XOR is not idempotent).\n\n\/\/go:noescape\nfunc Xadd(ptr *uint32, delta int32) uint32\n\n\/\/go:noescape\nfunc Xadd64(ptr *uint64, delta int64) uint64\n\n\/\/go:noescape\nfunc Xadduintptr(ptr *uintptr, delta uintptr) uintptr\n\n\/\/go:noescape\nfunc Xchg(ptr *uint32, new uint32) uint32\n\n\/\/go:noescape\nfunc Xchg64(ptr *uint64, new uint64) uint64\n\n\/\/go:noescape\nfunc Xchguintptr(ptr *uintptr, new uintptr) uintptr\n\n\/\/go:noescape\nfunc Cas64(ptr *uint64, old, new uint64) bool\n\n\/\/go:noescape\nfunc CasRel(ptr *uint32, old, new uint32) bool\n<commit_msg>runtime\/internal\/atomic: fix s390x's StorepNoWB implementation<commit_after>\/\/ Copyright 2016 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage atomic\n\nimport \"unsafe\"\n\n\/\/go:nosplit\n\/\/go:noinline\nfunc Load(ptr *uint32) uint32 {\n\treturn *ptr\n}\n\n\/\/go:nosplit\n\/\/go:noinline\nfunc Loadp(ptr unsafe.Pointer) unsafe.Pointer {\n\treturn *(*unsafe.Pointer)(ptr)\n}\n\n\/\/go:nosplit\n\/\/go:noinline\nfunc Load64(ptr *uint64) uint64 {\n\treturn *ptr\n}\n\n\/\/go:nosplit\n\/\/go:noinline\nfunc LoadAcq(ptr *uint32) uint32 {\n\treturn *ptr\n}\n\n\/\/go:noinline\n\/\/go:nosplit\nfunc Store(ptr *uint32, val uint32) {\n\t*ptr = val\n}\n\n\/\/go:noinline\n\/\/go:nosplit\nfunc Store64(ptr *uint64, val uint64) {\n\t*ptr = val\n}\n\n\/\/go:notinheap\ntype noWB struct{}\n\n\/\/ NO go:noescape annotation; see atomic_pointer.go.\n\/\/go:noinline\n\/\/go:nosplit\nfunc StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer) {\n\t*(**noWB)(ptr) = (*noWB)(val)\n}\n\n\/\/go:noinline\n\/\/go:nosplit\nfunc StoreRel(ptr *uint32, val uint32) {\n\t*ptr = val\n}\n\n\/\/go:noescape\nfunc And8(ptr *uint8, val uint8)\n\n\/\/go:noescape\nfunc Or8(ptr *uint8, val uint8)\n\n\/\/ NOTE: Do not add atomicxor8 (XOR is not idempotent).\n\n\/\/go:noescape\nfunc Xadd(ptr *uint32, delta int32) uint32\n\n\/\/go:noescape\nfunc Xadd64(ptr *uint64, delta int64) uint64\n\n\/\/go:noescape\nfunc Xadduintptr(ptr *uintptr, delta uintptr) uintptr\n\n\/\/go:noescape\nfunc Xchg(ptr *uint32, new uint32) uint32\n\n\/\/go:noescape\nfunc Xchg64(ptr *uint64, new uint64) uint64\n\n\/\/go:noescape\nfunc Xchguintptr(ptr *uintptr, new uintptr) uintptr\n\n\/\/go:noescape\nfunc Cas64(ptr *uint64, old, new uint64) bool\n\n\/\/go:noescape\nfunc CasRel(ptr *uint32, old, new uint32) bool\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Create the lock file, if we need to<commit_after><|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"text\/template\"\n\t\"time\"\n)\n\ntype work struct {\n\tsrc, dst string\n\tt *template.Template\n\tinfo os.FileInfo\n}\n\ntype build struct {\n\tPackage string\n\tDistros []string\n\tVersions []version\n}\n\ntype version struct {\n\tVersion, Revision string\n\tStable bool\n}\n\ntype cfg struct {\n\tversion\n\tDistroName, Arch, DebArch, Package string\n}\n\nvar (\n\tarchitectures = []string{\"amd64\", \"arm\", \"arm64\"}\n\tserverDistros = []string{\"xenial\"}\n\tallDistros = []string{\"xenial\", \"jessie\", \"precise\", \"sid\", \"stretch\", \"trusty\",\n\t\t\"utopic\", \"vivid\", \"wheezy\", \"wily\", \"yakkety\"}\n\n\tbuiltins = map[string]interface{}{\n\t\t\"date\": func() string {\n\t\t\treturn time.Now().Format(time.RFC1123Z)\n\t\t},\n\t}\n\n\tkeepTmp = flag.Bool(\"keep_tmp\", false, \"keep tmp dir after build\")\n)\n\nfunc runCommand(pwd string, command string, cmdArgs ...string) error {\n\tcmd := exec.Command(command, cmdArgs...)\n\tif len(pwd) != 0 {\n\t\tcmd.Dir = pwd\n\t}\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c cfg) run() error {\n\tlog.Printf(\"!!!!!!!!! doing: %#v\", c)\n\tvar w []work\n\n\tsrcdir := filepath.Join(c.DistroName, c.Package)\n\tdstdir, err := ioutil.TempDir(os.TempDir(), \"debs\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !*keepTmp {\n\t\tdefer os.RemoveAll(dstdir)\n\t}\n\n\t\/\/ allow base package dir to by a symlink so we can reuse packages\n\t\/\/ that don't change between distros\n\trealSrcdir, err := filepath.EvalSymlinks(srcdir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := filepath.Walk(realSrcdir, func(srcfile string, f os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdstfile := filepath.Join(dstdir, srcfile[len(realSrcdir):])\n\t\tif dstfile == dstdir {\n\t\t\treturn nil\n\t\t}\n\t\tif f.IsDir() {\n\t\t\tlog.Printf(dstfile)\n\t\t\treturn os.Mkdir(dstfile, f.Mode())\n\t\t}\n\t\tt, err := template.\n\t\t\tNew(\"\").\n\t\t\tFuncs(builtins).\n\t\t\tOption(\"missingkey=error\").\n\t\t\tParseFiles(srcfile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tw = append(w, work{\n\t\t\tsrc: srcfile,\n\t\t\tdst: dstfile,\n\t\t\tt: t.Templates()[0],\n\t\t\tinfo: f,\n\t\t})\n\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, w := range w {\n\t\tlog.Printf(\"w: %#v\", w)\n\t\tif err := func() error {\n\t\t\tf, err := os.OpenFile(w.dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer f.Close()\n\n\t\t\tif err := w.t.Execute(f, c); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := os.Chmod(w.dst, w.info.Mode()); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = runCommand(dstdir, \"dpkg-buildpackage\", \"-us\", \"-uc\", \"-b\", \"-a\"+c.DebArch)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdstParts := []string{\"bin\"}\n\tif c.Stable {\n\t\tdstParts = append(dstParts, \"stable\")\n\t} else {\n\t\tdstParts = append(dstParts, \"unstable\")\n\t}\n\tdstParts = append(dstParts, c.DistroName)\n\n\tdstPath := filepath.Join(dstParts...)\n\tos.MkdirAll(dstPath, 0777)\n\n\tfileName := fmt.Sprintf(\"%s_%s-%s_%s.deb\", c.Package, c.Version, c.Revision, c.DebArch)\n\terr = runCommand(\"\", \"mv\", filepath.Join(\"\/tmp\", fileName), dstPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc walkBuilds(builds []build, f func(pkg, distro, arch string, v version) error) error {\n\tfor _, a := range architectures {\n\t\tfor _, b := range builds {\n\t\t\tfor _, d := range b.Distros {\n\t\t\t\tfor _, v := range b.Versions {\n\t\t\t\t\tif err := f(b.Package, d, a, v); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tbuilds := []build{\n\t\t{\n\t\t\tPackage: \"kubectl\",\n\t\t\tDistros: allDistros,\n\t\t\tVersions: []version{\n\t\t\t\t{\n\t\t\t\t\tVersion: \"1.4.0\",\n\t\t\t\t\tRevision: \"00\",\n\t\t\t\t\tStable: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tVersion: \"1.4.0\",\n\t\t\t\t\tRevision: \"00\",\n\t\t\t\t\tStable: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tPackage: \"kubelet\",\n\t\t\tDistros: serverDistros,\n\t\t\tVersions: []version{\n\t\t\t\t{\n\t\t\t\t\tVersion: \"1.4.0\",\n\t\t\t\t\tRevision: \"00\",\n\t\t\t\t\tStable: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tVersion: \"1.4.0\",\n\t\t\t\t\tRevision: \"00\",\n\t\t\t\t\tStable: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tPackage: \"kubernetes-cni\",\n\t\t\tDistros: serverDistros,\n\t\t\tVersions: []version{\n\t\t\t\t{\n\t\t\t\t\tVersion: \"0.3.0.1-07a8a2\",\n\t\t\t\t\tRevision: \"00\",\n\t\t\t\t\tStable: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tVersion: \"0.3.0.1-07a8a2\",\n\t\t\t\t\tRevision: \"00\",\n\t\t\t\t\tStable: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tPackage: \"kubeadm\",\n\t\t\tDistros: serverDistros,\n\t\t\tVersions: []version{\n\t\t\t\t{\n\t\t\t\t\tVersion: \"1.5.0-alpha.0-1403-gc19e08e\",\n\t\t\t\t\tRevision: \"00\",\n\t\t\t\t\tStable: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tVersion: \"1.5.0-alpha.0-1403-gc19e08e\",\n\t\t\t\t\tRevision: \"00\",\n\t\t\t\t\tStable: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif err := walkBuilds(builds, func(pkg, distro, arch string, v version) error {\n\t\tc := cfg{\n\t\t\tPackage: pkg,\n\t\t\tversion: v,\n\t\t\tDistroName: distro,\n\t\t\tArch: arch,\n\t\t}\n\t\tif c.Arch == \"arm\" {\n\t\t\tc.DebArch = \"armhf\"\n\t\t} else {\n\t\t\tc.DebArch = c.Arch\n\t\t}\n\t\treturn c.run()\n\t}); err != nil {\n\t\tlog.Fatalf(\"err: %v\", err)\n\t}\n}\n<commit_msg>Bump kubeadm for the version that has arm and arm64 builds<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"text\/template\"\n\t\"time\"\n)\n\ntype work struct {\n\tsrc, dst string\n\tt *template.Template\n\tinfo os.FileInfo\n}\n\ntype build struct {\n\tPackage string\n\tDistros []string\n\tVersions []version\n}\n\ntype version struct {\n\tVersion, Revision string\n\tStable bool\n}\n\ntype cfg struct {\n\tversion\n\tDistroName, Arch, DebArch, Package string\n}\n\nvar (\n\tarchitectures = []string{\"amd64\", \"arm\", \"arm64\"}\n\tserverDistros = []string{\"xenial\"}\n\tallDistros = []string{\"xenial\", \"jessie\", \"precise\", \"sid\", \"stretch\", \"trusty\",\n\t\t\"utopic\", \"vivid\", \"wheezy\", \"wily\", \"yakkety\"}\n\n\tbuiltins = map[string]interface{}{\n\t\t\"date\": func() string {\n\t\t\treturn time.Now().Format(time.RFC1123Z)\n\t\t},\n\t}\n\n\tkeepTmp = flag.Bool(\"keep_tmp\", false, \"keep tmp dir after build\")\n)\n\nfunc runCommand(pwd string, command string, cmdArgs ...string) error {\n\tcmd := exec.Command(command, cmdArgs...)\n\tif len(pwd) != 0 {\n\t\tcmd.Dir = pwd\n\t}\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c cfg) run() error {\n\tlog.Printf(\"!!!!!!!!! doing: %#v\", c)\n\tvar w []work\n\n\tsrcdir := filepath.Join(c.DistroName, c.Package)\n\tdstdir, err := ioutil.TempDir(os.TempDir(), \"debs\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !*keepTmp {\n\t\tdefer os.RemoveAll(dstdir)\n\t}\n\n\t\/\/ allow base package dir to by a symlink so we can reuse packages\n\t\/\/ that don't change between distros\n\trealSrcdir, err := filepath.EvalSymlinks(srcdir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := filepath.Walk(realSrcdir, func(srcfile string, f os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdstfile := filepath.Join(dstdir, srcfile[len(realSrcdir):])\n\t\tif dstfile == dstdir {\n\t\t\treturn nil\n\t\t}\n\t\tif f.IsDir() {\n\t\t\tlog.Printf(dstfile)\n\t\t\treturn os.Mkdir(dstfile, f.Mode())\n\t\t}\n\t\tt, err := template.\n\t\t\tNew(\"\").\n\t\t\tFuncs(builtins).\n\t\t\tOption(\"missingkey=error\").\n\t\t\tParseFiles(srcfile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tw = append(w, work{\n\t\t\tsrc: srcfile,\n\t\t\tdst: dstfile,\n\t\t\tt: t.Templates()[0],\n\t\t\tinfo: f,\n\t\t})\n\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, w := range w {\n\t\tlog.Printf(\"w: %#v\", w)\n\t\tif err := func() error {\n\t\t\tf, err := os.OpenFile(w.dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer f.Close()\n\n\t\t\tif err := w.t.Execute(f, c); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := os.Chmod(w.dst, w.info.Mode()); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = runCommand(dstdir, \"dpkg-buildpackage\", \"-us\", \"-uc\", \"-b\", \"-a\"+c.DebArch)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdstParts := []string{\"bin\"}\n\tif c.Stable {\n\t\tdstParts = append(dstParts, \"stable\")\n\t} else {\n\t\tdstParts = append(dstParts, \"unstable\")\n\t}\n\tdstParts = append(dstParts, c.DistroName)\n\n\tdstPath := filepath.Join(dstParts...)\n\tos.MkdirAll(dstPath, 0777)\n\n\tfileName := fmt.Sprintf(\"%s_%s-%s_%s.deb\", c.Package, c.Version, c.Revision, c.DebArch)\n\terr = runCommand(\"\", \"mv\", filepath.Join(\"\/tmp\", fileName), dstPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc walkBuilds(builds []build, f func(pkg, distro, arch string, v version) error) error {\n\tfor _, a := range architectures {\n\t\tfor _, b := range builds {\n\t\t\tfor _, d := range b.Distros {\n\t\t\t\tfor _, v := range b.Versions {\n\t\t\t\t\tif err := f(b.Package, d, a, v); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tbuilds := []build{\n\t\t{\n\t\t\tPackage: \"kubectl\",\n\t\t\tDistros: allDistros,\n\t\t\tVersions: []version{\n\t\t\t\t{\n\t\t\t\t\tVersion: \"1.4.0\",\n\t\t\t\t\tRevision: \"00\",\n\t\t\t\t\tStable: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tVersion: \"1.4.0\",\n\t\t\t\t\tRevision: \"00\",\n\t\t\t\t\tStable: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tPackage: \"kubelet\",\n\t\t\tDistros: serverDistros,\n\t\t\tVersions: []version{\n\t\t\t\t{\n\t\t\t\t\tVersion: \"1.4.0\",\n\t\t\t\t\tRevision: \"00\",\n\t\t\t\t\tStable: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tVersion: \"1.4.0\",\n\t\t\t\t\tRevision: \"00\",\n\t\t\t\t\tStable: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tPackage: \"kubernetes-cni\",\n\t\t\tDistros: serverDistros,\n\t\t\tVersions: []version{\n\t\t\t\t{\n\t\t\t\t\tVersion: \"0.3.0.1-07a8a2\",\n\t\t\t\t\tRevision: \"00\",\n\t\t\t\t\tStable: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tVersion: \"0.3.0.1-07a8a2\",\n\t\t\t\t\tRevision: \"00\",\n\t\t\t\t\tStable: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tPackage: \"kubeadm\",\n\t\t\tDistros: serverDistros,\n\t\t\tVersions: []version{\n\t\t\t\t{\n\t\t\t\t\tVersion: \"1.5.0-alpha.0-1495-g1e7fa1f\",\n\t\t\t\t\tRevision: \"00\",\n\t\t\t\t\tStable: true,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tVersion: \"1.5.0-alpha.0-1495-g1e7fa1f\",\n\t\t\t\t\tRevision: \"00\",\n\t\t\t\t\tStable: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif err := walkBuilds(builds, func(pkg, distro, arch string, v version) error {\n\t\tc := cfg{\n\t\t\tPackage: pkg,\n\t\t\tversion: v,\n\t\t\tDistroName: distro,\n\t\t\tArch: arch,\n\t\t}\n\t\tif c.Arch == \"arm\" {\n\t\t\tc.DebArch = \"armhf\"\n\t\t} else {\n\t\t\tc.DebArch = c.Arch\n\t\t}\n\t\treturn c.run()\n\t}); err != nil {\n\t\tlog.Fatalf(\"err: %v\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package apd\n\nimport (\n\t\"fmt\"\n\t\"math\/big\"\n\t\"testing\"\n)\n\nfunc (d *Decimal) GoString() string {\n\treturn fmt.Sprintf(`{Coeff: %s, Exponent: %d, MaxExponent: %d, MinExponent: %d, Precision: %d}`, d.Coeff.String(), d.Exponent, d.MaxExponent, d.MinExponent, d.Precision)\n}\n\nfunc TestNewFromString(t *testing.T) {\n\ttests := []struct {\n\t\ts string\n\t\tout string\n\t}{\n\t\t{s: \"0\"},\n\t\t{s: \"0.0\"},\n\t\t{s: \"00.0\", out: \"0.0\"},\n\t\t{s: \"0.00\"},\n\t\t{s: \"00.00\", out: \"0.00\"},\n\t\t{s: \"1\"},\n\t\t{s: \"1.0\"},\n\t\t{s: \"0.1\"},\n\t\t{s: \".1\", out: \"0.1\"},\n\t\t{s: \"01.10\", out: \"1.10\"},\n\t\t{s: \"123456.789\"},\n\t\t{s: \"-123\"},\n\t\t{s: \"1e1\", out: \"10\"},\n\t\t{s: \"1e-1\", out: \"0.1\"},\n\t\t{s: \"0.1e1\", out: \"1\"},\n\t\t{s: \"0.10e1\", out: \"1.0\"},\n\t\t{s: \"0.1e-1\", out: \"0.01\"},\n\t\t{s: \"1e10\", out: \"10000000000\"},\n\t\t{s: \"1e-10\", out: \"0.0000000001\"},\n\t\t{s: \"0.1e10\", out: \"1000000000\"},\n\t\t{s: \"0.1e-10\", out: \"0.00000000001\"},\n\t}\n\tfor _, tc := range tests {\n\t\tt.Run(tc.s, func(t *testing.T) {\n\t\t\td := newDecimal(t, tc.s)\n\t\t\texpect := tc.out\n\t\t\tif expect == \"\" {\n\t\t\t\texpect = tc.s\n\t\t\t}\n\t\t\ts := d.String()\n\t\t\tif s != expect {\n\t\t\t\tt.Errorf(\"expected: %s, got %s\", expect, s)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc newDecimal(t *testing.T, s string) *Decimal {\n\td, err := NewFromString(s)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn d\n}\n\nfunc TestUpscale(t *testing.T) {\n\ttests := []struct {\n\t\tx, y *Decimal\n\t\ta, b *big.Int\n\t\ts int32\n\t}{\n\t\t{x: New(1, 0), y: New(100, -1), a: big.NewInt(10), b: big.NewInt(100), s: -1},\n\t\t{x: New(1, 0), y: New(10, -1), a: big.NewInt(10), b: big.NewInt(10), s: -1},\n\t\t{x: New(1, 0), y: New(10, 0), a: big.NewInt(1), b: big.NewInt(10), s: 0},\n\t\t{x: New(1, 1), y: New(1, 0), a: big.NewInt(10), b: big.NewInt(1), s: 0},\n\t\t{x: New(10, -2), y: New(1, -1), a: big.NewInt(10), b: big.NewInt(10), s: -2},\n\t\t{x: New(1, -2), y: New(100, 1), a: big.NewInt(1), b: big.NewInt(100000), s: -2},\n\t}\n\tfor _, tc := range tests {\n\t\tt.Run(fmt.Sprintf(\"%s, %s\", tc.x, tc.y), func(t *testing.T) {\n\t\t\ta, b, s, err := upscale(tc.x, tc.y)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif a.Cmp(tc.a) != 0 {\n\t\t\t\tt.Errorf(\"a: expected %s, got %s\", tc.a, a)\n\t\t\t}\n\t\t\tif b.Cmp(tc.b) != 0 {\n\t\t\t\tt.Errorf(\"b: expected %s, got %s\", tc.b, b)\n\t\t\t}\n\t\t\tif s != tc.s {\n\t\t\t\tt.Errorf(\"s: expected %d, got %d\", tc.s, s)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestAdd(t *testing.T) {\n\ttests := []struct {\n\t\tx, y string\n\t\tr string\n\t}{\n\t\t{x: \"1\", y: \"10\", r: \"11\"},\n\t\t{x: \"1\", y: \"1e1\", r: \"11\"},\n\t\t{x: \"1e1\", y: \"1\", r: \"11\"},\n\t\t{x: \".1e1\", y: \"100e-1\", r: \"11.0\"},\n\t}\n\tfor _, tc := range tests {\n\t\tt.Run(fmt.Sprintf(\"%s, %s\", tc.x, tc.y), func(t *testing.T) {\n\t\t\tx := newDecimal(t, tc.x)\n\t\t\ty := newDecimal(t, tc.y)\n\t\t\td := new(Decimal)\n\t\t\terr := d.Add(x, y)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\ts := d.String()\n\t\t\tif s != tc.r {\n\t\t\t\tt.Fatalf(\"expected: %s, got: %s\", tc.r, s)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCmp(t *testing.T) {\n\ttests := []struct {\n\t\tx, y string\n\t\tc int\n\t}{\n\t\t{x: \"1\", y: \"10\", c: -1},\n\t\t{x: \"1\", y: \"1e1\", c: -1},\n\t\t{x: \"1e1\", y: \"1\", c: 1},\n\t\t{x: \".1e1\", y: \"100e-1\", c: -1},\n\n\t\t{x: \".1e1\", y: \"100e-2\", c: 0},\n\t\t{x: \"1\", y: \".1e1\", c: 0},\n\t\t{x: \"1\", y: \"1\", c: 0},\n\t}\n\tfor _, tc := range tests {\n\t\tt.Run(fmt.Sprintf(\"%s, %s\", tc.x, tc.y), func(t *testing.T) {\n\t\t\tx := newDecimal(t, tc.x)\n\t\t\ty := newDecimal(t, tc.y)\n\t\t\tc, err := x.Cmp(y)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif c != tc.c {\n\t\t\t\tt.Fatalf(\"expected: %d, got: %d\", tc.c, c)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestModf(t *testing.T) {\n\ttests := []struct {\n\t\tx string\n\t\ti string\n\t\tf string\n\t}{\n\t\t{x: \"1\", i: \"1\", f: \"0\"},\n\t\t{x: \"1.0\", i: \"1\", f: \"0.0\"},\n\t\t{x: \"1.0e1\", i: \"10\", f: \"0\"},\n\t\t{x: \"1.0e2\", i: \"100\", f: \"0\"},\n\t\t{x: \"1.0e-1\", i: \"0\", f: \"0.10\"},\n\t\t{x: \"1.0e-2\", i: \"0\", f: \"0.010\"},\n\t\t{x: \"1234.56\", i: \"1234\", f: \"0.56\"},\n\t\t{x: \"1234.56e2\", i: \"123456\", f: \"0\"},\n\t\t{x: \"1234.56e4\", i: \"12345600\", f: \"0\"},\n\t\t{x: \"1234.56e-2\", i: \"12\", f: \"0.3456\"},\n\t\t{x: \"1234.56e-4\", i: \"0\", f: \"0.123456\"},\n\t\t{x: \"1234.56e-6\", i: \"0\", f: \"0.00123456\"},\n\t\t{x: \"123456e-8\", i: \"0\", f: \"0.00123456\"},\n\t\t{x: \".123456e8\", i: \"12345600\", f: \"0\"},\n\n\t\t{x: \"-1\", i: \"-1\", f: \"0\"},\n\t\t{x: \"-1.0\", i: \"-1\", f: \"0.0\"},\n\t\t{x: \"-1.0e1\", i: \"-10\", f: \"0\"},\n\t\t{x: \"-1.0e2\", i: \"-100\", f: \"0\"},\n\t\t{x: \"-1.0e-1\", i: \"0\", f: \"-0.10\"},\n\t\t{x: \"-1.0e-2\", i: \"0\", f: \"-0.010\"},\n\t\t{x: \"-1234.56\", i: \"-1234\", f: \"-0.56\"},\n\t\t{x: \"-1234.56e2\", i: \"-123456\", f: \"0\"},\n\t\t{x: \"-1234.56e4\", i: \"-12345600\", f: \"0\"},\n\t\t{x: \"-1234.56e-2\", i: \"-12\", f: \"-0.3456\"},\n\t\t{x: \"-1234.56e-4\", i: \"0\", f: \"-0.123456\"},\n\t\t{x: \"-1234.56e-6\", i: \"0\", f: \"-0.00123456\"},\n\t\t{x: \"-123456e-8\", i: \"0\", f: \"-0.00123456\"},\n\t\t{x: \"-.123456e8\", i: \"-12345600\", f: \"0\"},\n\t}\n\tfor _, tc := range tests {\n\t\tt.Run(tc.x, func(t *testing.T) {\n\t\t\tx := newDecimal(t, tc.x)\n\t\t\tinteg, frac := new(Decimal), new(Decimal)\n\t\t\tx.Modf(integ, frac)\n\t\t\tif tc.i != integ.String() {\n\t\t\t\tt.Fatalf(\"integ: expected: %s, got: %s\", tc.i, integ)\n\t\t\t}\n\t\t\tif tc.f != frac.String() {\n\t\t\t\tt.Fatalf(\"frac: expected: %s, got: %s\", tc.f, frac)\n\t\t\t}\n\t\t\ta := new(Decimal)\n\t\t\tif err := a.Add(integ, frac); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif c, err := a.Cmp(x); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t} else if c != 0 {\n\t\t\t\tt.Fatalf(\"%s != %s\", a, x)\n\t\t\t}\n\t\t\tif integ.Exponent < 0 {\n\t\t\t\tt.Fatal(integ.Exponent)\n\t\t\t}\n\t\t\tif frac.Exponent > 0 {\n\t\t\t\tt.Fatal(frac.Exponent)\n\t\t\t}\n\t\t})\n\t}\n}\n<commit_msg>add todo<commit_after>package apd\n\nimport (\n\t\"fmt\"\n\t\"math\/big\"\n\t\"testing\"\n)\n\n\/\/ TODO(mjibson): audit functions to guarantee they still function correctly\n\/\/ if the receiver and args are the same variable.\n\nfunc (d *Decimal) GoString() string {\n\treturn fmt.Sprintf(`{Coeff: %s, Exponent: %d, MaxExponent: %d, MinExponent: %d, Precision: %d}`, d.Coeff.String(), d.Exponent, d.MaxExponent, d.MinExponent, d.Precision)\n}\n\nfunc TestNewFromString(t *testing.T) {\n\ttests := []struct {\n\t\ts string\n\t\tout string\n\t}{\n\t\t{s: \"0\"},\n\t\t{s: \"0.0\"},\n\t\t{s: \"00.0\", out: \"0.0\"},\n\t\t{s: \"0.00\"},\n\t\t{s: \"00.00\", out: \"0.00\"},\n\t\t{s: \"1\"},\n\t\t{s: \"1.0\"},\n\t\t{s: \"0.1\"},\n\t\t{s: \".1\", out: \"0.1\"},\n\t\t{s: \"01.10\", out: \"1.10\"},\n\t\t{s: \"123456.789\"},\n\t\t{s: \"-123\"},\n\t\t{s: \"1e1\", out: \"10\"},\n\t\t{s: \"1e-1\", out: \"0.1\"},\n\t\t{s: \"0.1e1\", out: \"1\"},\n\t\t{s: \"0.10e1\", out: \"1.0\"},\n\t\t{s: \"0.1e-1\", out: \"0.01\"},\n\t\t{s: \"1e10\", out: \"10000000000\"},\n\t\t{s: \"1e-10\", out: \"0.0000000001\"},\n\t\t{s: \"0.1e10\", out: \"1000000000\"},\n\t\t{s: \"0.1e-10\", out: \"0.00000000001\"},\n\t}\n\tfor _, tc := range tests {\n\t\tt.Run(tc.s, func(t *testing.T) {\n\t\t\td := newDecimal(t, tc.s)\n\t\t\texpect := tc.out\n\t\t\tif expect == \"\" {\n\t\t\t\texpect = tc.s\n\t\t\t}\n\t\t\ts := d.String()\n\t\t\tif s != expect {\n\t\t\t\tt.Errorf(\"expected: %s, got %s\", expect, s)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc newDecimal(t *testing.T, s string) *Decimal {\n\td, err := NewFromString(s)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn d\n}\n\nfunc TestUpscale(t *testing.T) {\n\ttests := []struct {\n\t\tx, y *Decimal\n\t\ta, b *big.Int\n\t\ts int32\n\t}{\n\t\t{x: New(1, 0), y: New(100, -1), a: big.NewInt(10), b: big.NewInt(100), s: -1},\n\t\t{x: New(1, 0), y: New(10, -1), a: big.NewInt(10), b: big.NewInt(10), s: -1},\n\t\t{x: New(1, 0), y: New(10, 0), a: big.NewInt(1), b: big.NewInt(10), s: 0},\n\t\t{x: New(1, 1), y: New(1, 0), a: big.NewInt(10), b: big.NewInt(1), s: 0},\n\t\t{x: New(10, -2), y: New(1, -1), a: big.NewInt(10), b: big.NewInt(10), s: -2},\n\t\t{x: New(1, -2), y: New(100, 1), a: big.NewInt(1), b: big.NewInt(100000), s: -2},\n\t}\n\tfor _, tc := range tests {\n\t\tt.Run(fmt.Sprintf(\"%s, %s\", tc.x, tc.y), func(t *testing.T) {\n\t\t\ta, b, s, err := upscale(tc.x, tc.y)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif a.Cmp(tc.a) != 0 {\n\t\t\t\tt.Errorf(\"a: expected %s, got %s\", tc.a, a)\n\t\t\t}\n\t\t\tif b.Cmp(tc.b) != 0 {\n\t\t\t\tt.Errorf(\"b: expected %s, got %s\", tc.b, b)\n\t\t\t}\n\t\t\tif s != tc.s {\n\t\t\t\tt.Errorf(\"s: expected %d, got %d\", tc.s, s)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestAdd(t *testing.T) {\n\ttests := []struct {\n\t\tx, y string\n\t\tr string\n\t}{\n\t\t{x: \"1\", y: \"10\", r: \"11\"},\n\t\t{x: \"1\", y: \"1e1\", r: \"11\"},\n\t\t{x: \"1e1\", y: \"1\", r: \"11\"},\n\t\t{x: \".1e1\", y: \"100e-1\", r: \"11.0\"},\n\t}\n\tfor _, tc := range tests {\n\t\tt.Run(fmt.Sprintf(\"%s, %s\", tc.x, tc.y), func(t *testing.T) {\n\t\t\tx := newDecimal(t, tc.x)\n\t\t\ty := newDecimal(t, tc.y)\n\t\t\td := new(Decimal)\n\t\t\terr := d.Add(x, y)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\ts := d.String()\n\t\t\tif s != tc.r {\n\t\t\t\tt.Fatalf(\"expected: %s, got: %s\", tc.r, s)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCmp(t *testing.T) {\n\ttests := []struct {\n\t\tx, y string\n\t\tc int\n\t}{\n\t\t{x: \"1\", y: \"10\", c: -1},\n\t\t{x: \"1\", y: \"1e1\", c: -1},\n\t\t{x: \"1e1\", y: \"1\", c: 1},\n\t\t{x: \".1e1\", y: \"100e-1\", c: -1},\n\n\t\t{x: \".1e1\", y: \"100e-2\", c: 0},\n\t\t{x: \"1\", y: \".1e1\", c: 0},\n\t\t{x: \"1\", y: \"1\", c: 0},\n\t}\n\tfor _, tc := range tests {\n\t\tt.Run(fmt.Sprintf(\"%s, %s\", tc.x, tc.y), func(t *testing.T) {\n\t\t\tx := newDecimal(t, tc.x)\n\t\t\ty := newDecimal(t, tc.y)\n\t\t\tc, err := x.Cmp(y)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif c != tc.c {\n\t\t\t\tt.Fatalf(\"expected: %d, got: %d\", tc.c, c)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestModf(t *testing.T) {\n\ttests := []struct {\n\t\tx string\n\t\ti string\n\t\tf string\n\t}{\n\t\t{x: \"1\", i: \"1\", f: \"0\"},\n\t\t{x: \"1.0\", i: \"1\", f: \"0.0\"},\n\t\t{x: \"1.0e1\", i: \"10\", f: \"0\"},\n\t\t{x: \"1.0e2\", i: \"100\", f: \"0\"},\n\t\t{x: \"1.0e-1\", i: \"0\", f: \"0.10\"},\n\t\t{x: \"1.0e-2\", i: \"0\", f: \"0.010\"},\n\t\t{x: \"1234.56\", i: \"1234\", f: \"0.56\"},\n\t\t{x: \"1234.56e2\", i: \"123456\", f: \"0\"},\n\t\t{x: \"1234.56e4\", i: \"12345600\", f: \"0\"},\n\t\t{x: \"1234.56e-2\", i: \"12\", f: \"0.3456\"},\n\t\t{x: \"1234.56e-4\", i: \"0\", f: \"0.123456\"},\n\t\t{x: \"1234.56e-6\", i: \"0\", f: \"0.00123456\"},\n\t\t{x: \"123456e-8\", i: \"0\", f: \"0.00123456\"},\n\t\t{x: \".123456e8\", i: \"12345600\", f: \"0\"},\n\n\t\t{x: \"-1\", i: \"-1\", f: \"0\"},\n\t\t{x: \"-1.0\", i: \"-1\", f: \"0.0\"},\n\t\t{x: \"-1.0e1\", i: \"-10\", f: \"0\"},\n\t\t{x: \"-1.0e2\", i: \"-100\", f: \"0\"},\n\t\t{x: \"-1.0e-1\", i: \"0\", f: \"-0.10\"},\n\t\t{x: \"-1.0e-2\", i: \"0\", f: \"-0.010\"},\n\t\t{x: \"-1234.56\", i: \"-1234\", f: \"-0.56\"},\n\t\t{x: \"-1234.56e2\", i: \"-123456\", f: \"0\"},\n\t\t{x: \"-1234.56e4\", i: \"-12345600\", f: \"0\"},\n\t\t{x: \"-1234.56e-2\", i: \"-12\", f: \"-0.3456\"},\n\t\t{x: \"-1234.56e-4\", i: \"0\", f: \"-0.123456\"},\n\t\t{x: \"-1234.56e-6\", i: \"0\", f: \"-0.00123456\"},\n\t\t{x: \"-123456e-8\", i: \"0\", f: \"-0.00123456\"},\n\t\t{x: \"-.123456e8\", i: \"-12345600\", f: \"0\"},\n\t}\n\tfor _, tc := range tests {\n\t\tt.Run(tc.x, func(t *testing.T) {\n\t\t\tx := newDecimal(t, tc.x)\n\t\t\tinteg, frac := new(Decimal), new(Decimal)\n\t\t\tx.Modf(integ, frac)\n\t\t\tif tc.i != integ.String() {\n\t\t\t\tt.Fatalf(\"integ: expected: %s, got: %s\", tc.i, integ)\n\t\t\t}\n\t\t\tif tc.f != frac.String() {\n\t\t\t\tt.Fatalf(\"frac: expected: %s, got: %s\", tc.f, frac)\n\t\t\t}\n\t\t\ta := new(Decimal)\n\t\t\tif err := a.Add(integ, frac); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif c, err := a.Cmp(x); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t} else if c != 0 {\n\t\t\t\tt.Fatalf(\"%s != %s\", a, x)\n\t\t\t}\n\t\t\tif integ.Exponent < 0 {\n\t\t\t\tt.Fatal(integ.Exponent)\n\t\t\t}\n\t\t\tif frac.Exponent > 0 {\n\t\t\t\tt.Fatal(frac.Exponent)\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package define\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/containers\/image\/v5\/manifest\"\n\t\"github.com\/containers\/storage\/pkg\/archive\"\n\t\"github.com\/containers\/storage\/pkg\/chrootarchive\"\n\t\"github.com\/containers\/storage\/pkg\/ioutils\"\n\tv1 \"github.com\/opencontainers\/image-spec\/specs-go\/v1\"\n\t\"github.com\/opencontainers\/runtime-spec\/specs-go\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\nconst (\n\t\/\/ Package is the name of this package, used in help output and to\n\t\/\/ identify working containers.\n\tPackage = \"buildah\"\n\t\/\/ Version for the Package. Bump version in contrib\/rpm\/buildah.spec\n\t\/\/ too.\n\tVersion = \"1.20.1-dev\"\n\n\t\/\/ DefaultRuntime if containers.conf fails.\n\tDefaultRuntime = \"runc\"\n\n\tDefaultCNIPluginPath = \"\/usr\/libexec\/cni:\/opt\/cni\/bin\"\n\t\/\/ DefaultCNIConfigDir is the default location of CNI configuration files.\n\tDefaultCNIConfigDir = \"\/etc\/cni\/net.d\"\n\n\t\/\/ OCIv1ImageManifest is the MIME type of an OCIv1 image manifest,\n\t\/\/ suitable for specifying as a value of the PreferredManifestType\n\t\/\/ member of a CommitOptions structure. It is also the default.\n\tOCIv1ImageManifest = v1.MediaTypeImageManifest\n\t\/\/ Dockerv2ImageManifest is the MIME type of a Docker v2s2 image\n\t\/\/ manifest, suitable for specifying as a value of the\n\t\/\/ PreferredManifestType member of a CommitOptions structure.\n\tDockerv2ImageManifest = manifest.DockerV2Schema2MediaType\n\n\t\/\/ OCI used to define the \"oci\" image format\n\tOCI = \"oci\"\n\t\/\/ DOCKER used to define the \"docker\" image format\n\tDOCKER = \"docker\"\n)\n\nvar (\n\t\/\/ DefaultCapabilities is the list of capabilities which we grant by\n\t\/\/ default to containers which are running under UID 0.\n\tDefaultCapabilities = []string{\n\t\t\"CAP_AUDIT_WRITE\",\n\t\t\"CAP_CHOWN\",\n\t\t\"CAP_DAC_OVERRIDE\",\n\t\t\"CAP_FOWNER\",\n\t\t\"CAP_FSETID\",\n\t\t\"CAP_KILL\",\n\t\t\"CAP_MKNOD\",\n\t\t\"CAP_NET_BIND_SERVICE\",\n\t\t\"CAP_SETFCAP\",\n\t\t\"CAP_SETGID\",\n\t\t\"CAP_SETPCAP\",\n\t\t\"CAP_SETUID\",\n\t\t\"CAP_SYS_CHROOT\",\n\t}\n\t\/\/ DefaultNetworkSysctl is the list of Kernel parameters which we\n\t\/\/ grant by default to containers which are running under UID 0.\n\tDefaultNetworkSysctl = map[string]string{\n\t\t\"net.ipv4.ping_group_range\": \"0 0\",\n\t}\n\n\tGzip = archive.Gzip\n\tBzip2 = archive.Bzip2\n\tXz = archive.Xz\n\tZstd = archive.Zstd\n\tUncompressed = archive.Uncompressed\n)\n\n\/\/ IDMappingOptions controls how we set up UID\/GID mapping when we set up a\n\/\/ user namespace.\ntype IDMappingOptions struct {\n\tHostUIDMapping bool\n\tHostGIDMapping bool\n\tUIDMap []specs.LinuxIDMapping\n\tGIDMap []specs.LinuxIDMapping\n}\n\n\/\/ TempDirForURL checks if the passed-in string looks like a URL or -. If it is,\n\/\/ TempDirForURL creates a temporary directory, arranges for its contents to be\n\/\/ the contents of that URL, and returns the temporary directory's path, along\n\/\/ with the name of a subdirectory which should be used as the build context\n\/\/ (which may be empty or \".\"). Removal of the temporary directory is the\n\/\/ responsibility of the caller. If the string doesn't look like a URL,\n\/\/ TempDirForURL returns empty strings and a nil error code.\nfunc TempDirForURL(dir, prefix, url string) (name string, subdir string, err error) {\n\tif !strings.HasPrefix(url, \"http:\/\/\") &&\n\t\t!strings.HasPrefix(url, \"https:\/\/\") &&\n\t\t!strings.HasPrefix(url, \"git:\/\/\") &&\n\t\t!strings.HasPrefix(url, \"github.com\/\") &&\n\t\turl != \"-\" {\n\t\treturn \"\", \"\", nil\n\t}\n\tname, err = ioutil.TempDir(dir, prefix)\n\tif err != nil {\n\t\treturn \"\", \"\", errors.Wrapf(err, \"error creating temporary directory for %q\", url)\n\t}\n\tif strings.HasPrefix(url, \"git:\/\/\") || strings.HasSuffix(url, \".git\") {\n\t\terr = cloneToDirectory(url, name)\n\t\tif err != nil {\n\t\t\tif err2 := os.RemoveAll(name); err2 != nil {\n\t\t\t\tlogrus.Debugf(\"error removing temporary directory %q: %v\", name, err2)\n\t\t\t}\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\treturn name, \"\", nil\n\t}\n\tif strings.HasPrefix(url, \"github.com\/\") {\n\t\tghurl := url\n\t\turl = fmt.Sprintf(\"https:\/\/%s\/archive\/master.tar.gz\", ghurl)\n\t\tlogrus.Debugf(\"resolving url %q to %q\", ghurl, url)\n\t\tsubdir = path.Base(ghurl) + \"-master\"\n\t}\n\tif strings.HasPrefix(url, \"http:\/\/\") || strings.HasPrefix(url, \"https:\/\/\") {\n\t\terr = downloadToDirectory(url, name)\n\t\tif err != nil {\n\t\t\tif err2 := os.RemoveAll(name); err2 != nil {\n\t\t\t\tlogrus.Debugf(\"error removing temporary directory %q: %v\", name, err2)\n\t\t\t}\n\t\t\treturn \"\", subdir, err\n\t\t}\n\t\treturn name, subdir, nil\n\t}\n\tif url == \"-\" {\n\t\terr = stdinToDirectory(name)\n\t\tif err != nil {\n\t\t\tif err2 := os.RemoveAll(name); err2 != nil {\n\t\t\t\tlogrus.Debugf(\"error removing temporary directory %q: %v\", name, err2)\n\t\t\t}\n\t\t\treturn \"\", subdir, err\n\t\t}\n\t\tlogrus.Debugf(\"Build context is at %q\", name)\n\t\treturn name, subdir, nil\n\t}\n\tlogrus.Debugf(\"don't know how to retrieve %q\", url)\n\tif err2 := os.Remove(name); err2 != nil {\n\t\tlogrus.Debugf(\"error removing temporary directory %q: %v\", name, err2)\n\t}\n\treturn \"\", \"\", errors.Errorf(\"unreachable code reached\")\n}\n\nfunc cloneToDirectory(url, dir string) error {\n\tif !strings.HasPrefix(url, \"git:\/\/\") && !strings.HasSuffix(url, \".git\") {\n\t\turl = \"git:\/\/\" + url\n\t}\n\tgitBranch := strings.Split(url, \"#\")\n\tvar cmd *exec.Cmd\n\tif len(gitBranch) < 2 {\n\t\tlogrus.Debugf(\"cloning %q to %q\", url, dir)\n\t\tcmd = exec.Command(\"git\", \"clone\", url, dir)\n\t} else {\n\t\tlogrus.Debugf(\"cloning repo %q and branch %q to %q\", gitBranch[0], gitBranch[1], dir)\n\t\tcmd = exec.Command(\"git\", \"clone\", \"-b\", gitBranch[1], gitBranch[0], dir)\n\t}\n\treturn cmd.Run()\n}\n\nfunc downloadToDirectory(url, dir string) error {\n\tlogrus.Debugf(\"extracting %q to %q\", url, dir)\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.ContentLength == 0 {\n\t\treturn errors.Errorf(\"no contents in %q\", url)\n\t}\n\tif err := chrootarchive.Untar(resp.Body, dir, nil); err != nil {\n\t\tresp1, err := http.Get(url)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer resp1.Body.Close()\n\t\tbody, err := ioutil.ReadAll(resp1.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdockerfile := filepath.Join(dir, \"Dockerfile\")\n\t\t\/\/ Assume this is a Dockerfile\n\t\tif err := ioutils.AtomicWriteFile(dockerfile, body, 0600); err != nil {\n\t\t\treturn errors.Wrapf(err, \"Failed to write %q to %q\", url, dockerfile)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc stdinToDirectory(dir string) error {\n\tlogrus.Debugf(\"extracting stdin to %q\", dir)\n\tr := bufio.NewReader(os.Stdin)\n\tb, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed to read from stdin\")\n\t}\n\treader := bytes.NewReader(b)\n\tif err := chrootarchive.Untar(reader, dir, nil); err != nil {\n\t\tdockerfile := filepath.Join(dir, \"Dockerfile\")\n\t\t\/\/ Assume this is a Dockerfile\n\t\tif err := ioutils.AtomicWriteFile(dockerfile, b, 0600); err != nil {\n\t\t\treturn errors.Wrapf(err, \"Failed to write bytes to %q\", dockerfile)\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>[NO TESTS NEEDED] Use --recurse-modules when building git context<commit_after>package define\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/containers\/image\/v5\/manifest\"\n\t\"github.com\/containers\/storage\/pkg\/archive\"\n\t\"github.com\/containers\/storage\/pkg\/chrootarchive\"\n\t\"github.com\/containers\/storage\/pkg\/ioutils\"\n\tv1 \"github.com\/opencontainers\/image-spec\/specs-go\/v1\"\n\t\"github.com\/opencontainers\/runtime-spec\/specs-go\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\nconst (\n\t\/\/ Package is the name of this package, used in help output and to\n\t\/\/ identify working containers.\n\tPackage = \"buildah\"\n\t\/\/ Version for the Package. Bump version in contrib\/rpm\/buildah.spec\n\t\/\/ too.\n\tVersion = \"1.20.1-dev\"\n\n\t\/\/ DefaultRuntime if containers.conf fails.\n\tDefaultRuntime = \"runc\"\n\n\tDefaultCNIPluginPath = \"\/usr\/libexec\/cni:\/opt\/cni\/bin\"\n\t\/\/ DefaultCNIConfigDir is the default location of CNI configuration files.\n\tDefaultCNIConfigDir = \"\/etc\/cni\/net.d\"\n\n\t\/\/ OCIv1ImageManifest is the MIME type of an OCIv1 image manifest,\n\t\/\/ suitable for specifying as a value of the PreferredManifestType\n\t\/\/ member of a CommitOptions structure. It is also the default.\n\tOCIv1ImageManifest = v1.MediaTypeImageManifest\n\t\/\/ Dockerv2ImageManifest is the MIME type of a Docker v2s2 image\n\t\/\/ manifest, suitable for specifying as a value of the\n\t\/\/ PreferredManifestType member of a CommitOptions structure.\n\tDockerv2ImageManifest = manifest.DockerV2Schema2MediaType\n\n\t\/\/ OCI used to define the \"oci\" image format\n\tOCI = \"oci\"\n\t\/\/ DOCKER used to define the \"docker\" image format\n\tDOCKER = \"docker\"\n)\n\nvar (\n\t\/\/ DefaultCapabilities is the list of capabilities which we grant by\n\t\/\/ default to containers which are running under UID 0.\n\tDefaultCapabilities = []string{\n\t\t\"CAP_AUDIT_WRITE\",\n\t\t\"CAP_CHOWN\",\n\t\t\"CAP_DAC_OVERRIDE\",\n\t\t\"CAP_FOWNER\",\n\t\t\"CAP_FSETID\",\n\t\t\"CAP_KILL\",\n\t\t\"CAP_MKNOD\",\n\t\t\"CAP_NET_BIND_SERVICE\",\n\t\t\"CAP_SETFCAP\",\n\t\t\"CAP_SETGID\",\n\t\t\"CAP_SETPCAP\",\n\t\t\"CAP_SETUID\",\n\t\t\"CAP_SYS_CHROOT\",\n\t}\n\t\/\/ DefaultNetworkSysctl is the list of Kernel parameters which we\n\t\/\/ grant by default to containers which are running under UID 0.\n\tDefaultNetworkSysctl = map[string]string{\n\t\t\"net.ipv4.ping_group_range\": \"0 0\",\n\t}\n\n\tGzip = archive.Gzip\n\tBzip2 = archive.Bzip2\n\tXz = archive.Xz\n\tZstd = archive.Zstd\n\tUncompressed = archive.Uncompressed\n)\n\n\/\/ IDMappingOptions controls how we set up UID\/GID mapping when we set up a\n\/\/ user namespace.\ntype IDMappingOptions struct {\n\tHostUIDMapping bool\n\tHostGIDMapping bool\n\tUIDMap []specs.LinuxIDMapping\n\tGIDMap []specs.LinuxIDMapping\n}\n\n\/\/ TempDirForURL checks if the passed-in string looks like a URL or -. If it is,\n\/\/ TempDirForURL creates a temporary directory, arranges for its contents to be\n\/\/ the contents of that URL, and returns the temporary directory's path, along\n\/\/ with the name of a subdirectory which should be used as the build context\n\/\/ (which may be empty or \".\"). Removal of the temporary directory is the\n\/\/ responsibility of the caller. If the string doesn't look like a URL,\n\/\/ TempDirForURL returns empty strings and a nil error code.\nfunc TempDirForURL(dir, prefix, url string) (name string, subdir string, err error) {\n\tif !strings.HasPrefix(url, \"http:\/\/\") &&\n\t\t!strings.HasPrefix(url, \"https:\/\/\") &&\n\t\t!strings.HasPrefix(url, \"git:\/\/\") &&\n\t\t!strings.HasPrefix(url, \"github.com\/\") &&\n\t\turl != \"-\" {\n\t\treturn \"\", \"\", nil\n\t}\n\tname, err = ioutil.TempDir(dir, prefix)\n\tif err != nil {\n\t\treturn \"\", \"\", errors.Wrapf(err, \"error creating temporary directory for %q\", url)\n\t}\n\tif strings.HasPrefix(url, \"git:\/\/\") || strings.HasSuffix(url, \".git\") {\n\t\terr = cloneToDirectory(url, name)\n\t\tif err != nil {\n\t\t\tif err2 := os.RemoveAll(name); err2 != nil {\n\t\t\t\tlogrus.Debugf(\"error removing temporary directory %q: %v\", name, err2)\n\t\t\t}\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\treturn name, \"\", nil\n\t}\n\tif strings.HasPrefix(url, \"github.com\/\") {\n\t\tghurl := url\n\t\turl = fmt.Sprintf(\"https:\/\/%s\/archive\/master.tar.gz\", ghurl)\n\t\tlogrus.Debugf(\"resolving url %q to %q\", ghurl, url)\n\t\tsubdir = path.Base(ghurl) + \"-master\"\n\t}\n\tif strings.HasPrefix(url, \"http:\/\/\") || strings.HasPrefix(url, \"https:\/\/\") {\n\t\terr = downloadToDirectory(url, name)\n\t\tif err != nil {\n\t\t\tif err2 := os.RemoveAll(name); err2 != nil {\n\t\t\t\tlogrus.Debugf(\"error removing temporary directory %q: %v\", name, err2)\n\t\t\t}\n\t\t\treturn \"\", subdir, err\n\t\t}\n\t\treturn name, subdir, nil\n\t}\n\tif url == \"-\" {\n\t\terr = stdinToDirectory(name)\n\t\tif err != nil {\n\t\t\tif err2 := os.RemoveAll(name); err2 != nil {\n\t\t\t\tlogrus.Debugf(\"error removing temporary directory %q: %v\", name, err2)\n\t\t\t}\n\t\t\treturn \"\", subdir, err\n\t\t}\n\t\tlogrus.Debugf(\"Build context is at %q\", name)\n\t\treturn name, subdir, nil\n\t}\n\tlogrus.Debugf(\"don't know how to retrieve %q\", url)\n\tif err2 := os.Remove(name); err2 != nil {\n\t\tlogrus.Debugf(\"error removing temporary directory %q: %v\", name, err2)\n\t}\n\treturn \"\", \"\", errors.Errorf(\"unreachable code reached\")\n}\n\nfunc cloneToDirectory(url, dir string) error {\n\tif !strings.HasPrefix(url, \"git:\/\/\") && !strings.HasSuffix(url, \".git\") {\n\t\turl = \"git:\/\/\" + url\n\t}\n\tgitBranch := strings.Split(url, \"#\")\n\tvar cmd *exec.Cmd\n\tif len(gitBranch) < 2 {\n\t\tlogrus.Debugf(\"cloning %q to %q\", url, dir)\n\t\tcmd = exec.Command(\"git\", \"clone\", url, dir)\n\t} else {\n\t\tlogrus.Debugf(\"cloning repo %q and branch %q to %q\", gitBranch[0], gitBranch[1], dir)\n\t\tcmd = exec.Command(\"git\", \"clone\", \"--recurse-submodules\", \"-b\", gitBranch[1], gitBranch[0], dir)\n\t}\n\treturn cmd.Run()\n}\n\nfunc downloadToDirectory(url, dir string) error {\n\tlogrus.Debugf(\"extracting %q to %q\", url, dir)\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.ContentLength == 0 {\n\t\treturn errors.Errorf(\"no contents in %q\", url)\n\t}\n\tif err := chrootarchive.Untar(resp.Body, dir, nil); err != nil {\n\t\tresp1, err := http.Get(url)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer resp1.Body.Close()\n\t\tbody, err := ioutil.ReadAll(resp1.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdockerfile := filepath.Join(dir, \"Dockerfile\")\n\t\t\/\/ Assume this is a Dockerfile\n\t\tif err := ioutils.AtomicWriteFile(dockerfile, body, 0600); err != nil {\n\t\t\treturn errors.Wrapf(err, \"Failed to write %q to %q\", url, dockerfile)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc stdinToDirectory(dir string) error {\n\tlogrus.Debugf(\"extracting stdin to %q\", dir)\n\tr := bufio.NewReader(os.Stdin)\n\tb, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed to read from stdin\")\n\t}\n\treader := bytes.NewReader(b)\n\tif err := chrootarchive.Untar(reader, dir, nil); err != nil {\n\t\tdockerfile := filepath.Join(dir, \"Dockerfile\")\n\t\t\/\/ Assume this is a Dockerfile\n\t\tif err := ioutils.AtomicWriteFile(dockerfile, b, 0600); err != nil {\n\t\t\treturn errors.Wrapf(err, \"Failed to write bytes to %q\", dockerfile)\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build appengine\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/hsluo\/slack-bot\"\n\n\t\"google.golang.org\/appengine\"\n\tl \"google.golang.org\/appengine\/log\"\n\t\"google.golang.org\/appengine\/urlfetch\"\n)\n\ntype task struct {\n\tcontext context.Context\n\turl string\n\tdata url.Values\n}\n\nvar (\n\tbotId, atId string\n\tloc *time.Location\n\toutgoing chan task\n)\n\nfunc handleHook(rw http.ResponseWriter, req *http.Request) {\n\tif req.Method != \"POST\" {\n\t\treturn\n\t}\n\n\ttoken := req.PostFormValue(\"token\")\n\tif token != credentials.HookToken {\n\t\treturn\n\t}\n\n\treplyHook(req)\n}\n\nfunc replyHook(req *http.Request) {\n\tc := appengine.NewContext(req)\n\tl.Infof(c, \"%v\", req.Form)\n\n\tchannel := req.PostFormValue(\"channel_id\")\n\ttext := req.PostFormValue(\"text\")\n\tuser_id := req.PostFormValue(\"user_id\")\n\n\tclient := urlfetch.Client(c)\n\tdata := url.Values{\"channel\": {channel}}\n\n\tif strings.Contains(text, \"commit\") {\n\t\tdata.Add(\"text\", WhatTheCommit(client))\n\t\toutgoing <- task{context: c, url: slack.ChatPostMessageApi, data: data}\n\t} else if strings.Contains(text, bot.User) ||\n\t\tstrings.Contains(text, bot.UserId) {\n\t\td1 := url.Values{\"channel\": {channel}, \"text\": {\"稍等\"}}\n\t\toutgoing <- task{context: c, url: slack.ChatPostMessageApi, data: d1}\n\n\t\ttext := codeWithAt(user_id)\n\t\td2 := url.Values{\"channel\": {channel}, \"text\": {text}}\n\t\toutgoing <- task{context: c, url: slack.ChatPostMessageApi, data: d2}\n\t} else if strings.Contains(text, \"谢谢\") {\n\t\tdata.Add(\"text\", \"不客气 :blush:\")\n\t\toutgoing <- task{context: c, url: slack.ChatPostMessageApi, data: data}\n\t} else {\n\t\tif rand.Intn(2) > 0 {\n\t\t\tdata.Add(\"text\", \"呵呵\")\n\t\t} else {\n\t\t\tdata.Add(\"text\", \"嘻嘻\")\n\t\t}\n\t\toutgoing <- task{context: c, url: slack.ChatPostMessageApi, data: data}\n\t}\n}\n\nfunc worker(outgoing chan task) {\n\tfor task := range outgoing {\n\t\t_, err := bot.WithClient(urlfetch.Client(task.context)).PostForm(task.url, task.data)\n\t\tif err != nil {\n\t\t\tl.Errorf(task.context, \"%s\\n%v\", err, task.data)\n\t\t}\n\t}\n}\n\nfunc standUpAlert(rw http.ResponseWriter, req *http.Request) {\n\tc := appengine.NewContext(req)\n\turl := credentials.SlackbotUrl\n\tif url == \"\" {\n\t\tl.Errorf(c, \"no slackbot URL provided\")\n\t\treturn\n\t}\n\turl = fmt.Sprintf(\"%s&channel=%%23%s\", url, \"general\")\n\tclient := urlfetch.Client(c)\n\tclient.Post(url, \"text\/plain\", strings.NewReader(\"stand up\"))\n}\n\nfunc logglyAlert(rw http.ResponseWriter, req *http.Request) {\n\tc := appengine.NewContext(req)\n\n\tattachment, err := NewAttachment(req)\n\tif err != nil {\n\t\tl.Errorf(c, \"%s\", err)\n\t\treturn\n\t}\n\n\tbytes, err := json.Marshal([]slack.Attachment{attachment})\n\tif err != nil {\n\t\tl.Errorf(c, \"%s\", err)\n\t\treturn\n\t}\n\tdata := url.Values{}\n\tdata.Add(\"channel\", \"#loggly\")\n\tdata.Add(\"attachments\", string(bytes))\n\tdata.Add(\"as_user\", \"false\")\n\toutgoing <- task{context: c, url: slack.ChatPostMessageApi, data: data}\n}\n\nfunc replyCommit(rw http.ResponseWriter, req *http.Request) {\n\tfmt.Fprintln(rw, WhatTheCommit(urlfetch.Client(appengine.NewContext(req))))\n}\n\nvar (\n\tdomain string\n\tlogglyClient *LogglyClient\n)\n\nfunc logglySearch(rw http.ResponseWriter, req *http.Request) {\n\tctx := appengine.NewContext(req)\n\tif logglyClient == nil {\n\t\tdomain = os.Getenv(\"LOGGLY_DOMAIN\")\n\t\tlogglyClient = &LogglyClient{\n\t\t\tusername: os.Getenv(\"LOGGLY_USERNAME\"),\n\t\t\tpassword: os.Getenv(\"LOGGLY_PASSWORD\"),\n\t\t}\n\t}\n\tlogglyClient.client = urlfetch.Client(ctx)\n\n\tapi := fmt.Sprintf(\"http:\/\/%s.loggly.com\/apiv2\/search?%s\",\n\t\tdomain,\n\t\turl.Values{\n\t\t\t\"q\": {`syslog.severity:\"Error\" OR syslog.severity:\"Warning\" OR json.status:>=500`},\n\t\t\t\"from\": {\"-10m\"},\n\t\t\t\"order\": {\"asc\"},\n\t\t}.Encode())\n\trsidResp := make(map[string]interface{})\n\tlogglyClient.Request(api).UnmarshallJson(&rsidResp)\n\n\trsid := rsidResp[\"rsid\"].(map[string]interface{})[\"id\"].(string)\n\tapi = fmt.Sprintf(\"http:\/\/%s.loggly.com\/apiv2\/events?rsid=%s\", domain, rsid)\n\tsearchResult := SearchResult{}\n\tlogglyClient.Request(api).UnmarshallJson(&searchResult)\n\tl.Infof(ctx, \"rsid=%v events=%v\", rsid, searchResult.TotalEvents)\n\n\tif searchResult.TotalEvents == 0 {\n\t\treturn\n\t}\n\n\tevents := make([]string, 0)\n\tfor _, e := range searchResult.Events {\n\t\tvar text string\n\t\tif v, ok := e.Event[\"json\"]; ok {\n\t\t\tb, _ := json.MarshalIndent(v, \"\", \" \")\n\t\t\ttext = fmt.Sprintf(\"```\\n%s\\n```\", string(b))\n\t\t} else {\n\t\t\ttext = e.Logmsg\n\t\t\tif strings.Contains(e.Logmsg, \"#012\") {\n\t\t\t\ttext = fmtHit(e.Logmsg)\n\t\t\t}\n\t\t\tt := time.Unix(e.Timestamp\/1000, 0).In(loc)\n\t\t\ttext = fmt.Sprintf(\"*%v*\\n%s\", t, text)\n\t\t}\n\t\tevents = append(events, text)\n\t}\n\tdata := url.Values{}\n\tdata.Add(\"channel\", \"#loggly\")\n\tdata.Add(\"text\", strings.Join(events, \"\\n\"+strings.Repeat(\"=\", 100)+\"\\n\"))\n\tdata.Add(\"as_user\", \"false\")\n\toutgoing <- task{context: ctx, url: slack.ChatPostMessageApi, data: data}\n}\n\nfunc init() {\n\tlog.Println(\"appengine init\")\n\toutgoing = make(chan task)\n\tgo worker(outgoing)\n\n\thttp.HandleFunc(\"\/hook\", handleHook)\n\thttp.HandleFunc(\"\/alerts\/standup\", standUpAlert)\n\t\/\/http.HandleFunc(\"\/loggly\", logglyAlert)\n\thttp.HandleFunc(\"\/loggly\/search\", logglySearch)\n\thttp.HandleFunc(\"\/cmds\/whatthecommit\",\n\t\tslack.ValidateCommand(http.HandlerFunc(replyCommit), credentials.Commands))\n}\n\nfunc main() {}\n<commit_msg>fix api in task<commit_after>\/\/ +build appengine\n\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/hsluo\/slack-bot\"\n\n\t\"google.golang.org\/appengine\"\n\tl \"google.golang.org\/appengine\/log\"\n\t\"google.golang.org\/appengine\/urlfetch\"\n)\n\ntype task struct {\n\tcontext context.Context\n\tmethod string\n\tdata url.Values\n}\n\nvar (\n\tbotId, atId string\n\tloc *time.Location\n\toutgoing chan task\n)\n\nfunc handleHook(rw http.ResponseWriter, req *http.Request) {\n\tif req.Method != \"POST\" {\n\t\treturn\n\t}\n\n\ttoken := req.PostFormValue(\"token\")\n\tif token != credentials.HookToken {\n\t\treturn\n\t}\n\n\treplyHook(req)\n}\n\nfunc replyHook(req *http.Request) {\n\tc := appengine.NewContext(req)\n\tl.Infof(c, \"%v\", req.Form)\n\n\tchannel := req.PostFormValue(\"channel_id\")\n\ttext := req.PostFormValue(\"text\")\n\tuser_id := req.PostFormValue(\"user_id\")\n\n\tclient := urlfetch.Client(c)\n\tdata := url.Values{\"channel\": {channel}}\n\n\tif strings.Contains(text, \"commit\") {\n\t\tdata.Add(\"text\", WhatTheCommit(client))\n\t\toutgoing <- task{context: c, method: \"chat.postMessage\", data: data}\n\t} else if strings.Contains(text, bot.User) ||\n\t\tstrings.Contains(text, bot.UserId) {\n\t\td1 := url.Values{\"channel\": {channel}, \"text\": {\"稍等\"}}\n\t\toutgoing <- task{context: c, method: \"chat.postMessage\", data: d1}\n\n\t\ttext := codeWithAt(user_id)\n\t\td2 := url.Values{\"channel\": {channel}, \"text\": {text}}\n\t\toutgoing <- task{context: c, method: \"chat.postMessage\", data: d2}\n\t} else if strings.Contains(text, \"谢谢\") {\n\t\tdata.Add(\"text\", \"不客气 :blush:\")\n\t\toutgoing <- task{context: c, method: \"chat.postMessage\", data: data}\n\t} else {\n\t\tif rand.Intn(2) > 0 {\n\t\t\tdata.Add(\"text\", \"呵呵\")\n\t\t} else {\n\t\t\tdata.Add(\"text\", \"嘻嘻\")\n\t\t}\n\t\toutgoing <- task{context: c, method: \"chat.postMessage\", data: data}\n\t}\n}\n\nfunc worker(outgoing chan task) {\n\tfor task := range outgoing {\n\t\t_, err := bot.WithClient(urlfetch.Client(task.context)).PostForm(task.method, task.data)\n\t\tif err != nil {\n\t\t\tl.Errorf(task.context, \"%s\\n%v\", err, task.data)\n\t\t}\n\t}\n}\n\nfunc standUpAlert(rw http.ResponseWriter, req *http.Request) {\n\tc := appengine.NewContext(req)\n\turl := credentials.SlackbotUrl\n\tif url == \"\" {\n\t\tl.Errorf(c, \"no slackbot URL provided\")\n\t\treturn\n\t}\n\turl = fmt.Sprintf(\"%s&channel=%%23%s\", url, \"general\")\n\tclient := urlfetch.Client(c)\n\tclient.Post(url, \"text\/plain\", strings.NewReader(\"stand up\"))\n}\n\nfunc logglyAlert(rw http.ResponseWriter, req *http.Request) {\n\tc := appengine.NewContext(req)\n\n\tattachment, err := NewAttachment(req)\n\tif err != nil {\n\t\tl.Errorf(c, \"%s\", err)\n\t\treturn\n\t}\n\n\tbytes, err := json.Marshal([]slack.Attachment{attachment})\n\tif err != nil {\n\t\tl.Errorf(c, \"%s\", err)\n\t\treturn\n\t}\n\tdata := url.Values{}\n\tdata.Add(\"channel\", \"#loggly\")\n\tdata.Add(\"attachments\", string(bytes))\n\tdata.Add(\"as_user\", \"false\")\n\toutgoing <- task{context: c, method: \"chat.postMessage\", data: data}\n}\n\nfunc replyCommit(rw http.ResponseWriter, req *http.Request) {\n\tfmt.Fprintln(rw, WhatTheCommit(urlfetch.Client(appengine.NewContext(req))))\n}\n\nvar (\n\tdomain string\n\tlogglyClient *LogglyClient\n)\n\nfunc logglySearch(rw http.ResponseWriter, req *http.Request) {\n\tctx := appengine.NewContext(req)\n\tif logglyClient == nil {\n\t\tdomain = os.Getenv(\"LOGGLY_DOMAIN\")\n\t\tlogglyClient = &LogglyClient{\n\t\t\tusername: os.Getenv(\"LOGGLY_USERNAME\"),\n\t\t\tpassword: os.Getenv(\"LOGGLY_PASSWORD\"),\n\t\t}\n\t}\n\tlogglyClient.client = urlfetch.Client(ctx)\n\n\tapi := fmt.Sprintf(\"http:\/\/%s.loggly.com\/apiv2\/search?%s\",\n\t\tdomain,\n\t\turl.Values{\n\t\t\t\"q\": {`syslog.severity:\"Error\" OR syslog.severity:\"Warning\" OR json.status:>=500`},\n\t\t\t\"from\": {\"-10m\"},\n\t\t\t\"order\": {\"asc\"},\n\t\t}.Encode())\n\trsidResp := make(map[string]interface{})\n\tlogglyClient.Request(api).UnmarshallJson(&rsidResp)\n\n\trsid := rsidResp[\"rsid\"].(map[string]interface{})[\"id\"].(string)\n\tapi = fmt.Sprintf(\"http:\/\/%s.loggly.com\/apiv2\/events?rsid=%s\", domain, rsid)\n\tsearchResult := SearchResult{}\n\tlogglyClient.Request(api).UnmarshallJson(&searchResult)\n\tl.Infof(ctx, \"rsid=%v events=%v\", rsid, searchResult.TotalEvents)\n\n\tif searchResult.TotalEvents == 0 {\n\t\treturn\n\t}\n\n\tevents := make([]string, 0)\n\tfor _, e := range searchResult.Events {\n\t\tvar text string\n\t\tif v, ok := e.Event[\"json\"]; ok {\n\t\t\tb, _ := json.MarshalIndent(v, \"\", \" \")\n\t\t\ttext = fmt.Sprintf(\"```\\n%s\\n```\", string(b))\n\t\t} else {\n\t\t\ttext = e.Logmsg\n\t\t\tif strings.Contains(e.Logmsg, \"#012\") {\n\t\t\t\ttext = fmtHit(e.Logmsg)\n\t\t\t}\n\t\t\tt := time.Unix(e.Timestamp\/1000, 0).In(loc)\n\t\t\ttext = fmt.Sprintf(\"*%v*\\n%s\", t, text)\n\t\t}\n\t\tevents = append(events, text)\n\t}\n\tdata := url.Values{}\n\tdata.Add(\"channel\", \"#loggly\")\n\tdata.Add(\"text\", strings.Join(events, \"\\n\"+strings.Repeat(\"=\", 100)+\"\\n\"))\n\tdata.Add(\"as_user\", \"false\")\n\toutgoing <- task{context: ctx, method: \"chat.postMessage\", data: data}\n}\n\nfunc init() {\n\tlog.Println(\"appengine init\")\n\toutgoing = make(chan task)\n\tgo worker(outgoing)\n\n\thttp.HandleFunc(\"\/hook\", handleHook)\n\thttp.HandleFunc(\"\/alerts\/standup\", standUpAlert)\n\t\/\/http.HandleFunc(\"\/loggly\", logglyAlert)\n\thttp.HandleFunc(\"\/loggly\/search\", logglySearch)\n\thttp.HandleFunc(\"\/cmds\/whatthecommit\",\n\t\tslack.ValidateCommand(http.HandlerFunc(replyCommit), credentials.Commands))\n}\n\nfunc main() {}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tATOM_XLMNS = \"http:\/\/www.w3.org\/2005\/Atom\"\n)\n\ntype rss struct {\n\tname xml.Name `xml:\"rss\"`\n\tXmlns string `xml:\"xmlns:atom,attr\"`\n\tVersion string `xml:\"version,attr\"`\n\tChannel RssChannel `xml:\"channel\"`\n}\n\ntype RssChannel struct {\n\tTitle string `xml:\"title\"`\n\tLink string `xml:\"link\"`\n\tDescription string `xml:\"description\"`\n\tLanguage string `xml:\"language\"`\n\tAtomLink struct {\n\t\tHref string `xml:\"href,attr\"`\n\t\tRel string `xml:\"rel,attr\"`\n\t\tType string `xml:\"type,attr\"`\n\t} `xml:\"atom:link\"`\n\tNewzNab struct {\n\t\tOffset int `xml:\"offset,attr\"`\n\t\tTotal int `xml:\"total,attr\"`\n\t} `xml:\"newznab:response\"`\n\tItems []RssItem `xml:\"item\"`\n}\n\ntype RssItem struct {\n\tTitle string `xml:\"title\"`\n\tLink string `xml:\"link\"`\n\tDescription string `xml:\"description\"`\n\tCategory string `xml:\"category\"`\n\tPubDate string `xml:\"pubDate\"`\n\tEnclosure struct {\n\t\tUrl string `xml:\"url,attr\"`\n\t\tLength int64 `xml:\"length,attr\"`\n\t\tType string `xml:\"type,attr\"`\n\t} `xml:\"enclosure\"`\n\tGuid struct {\n\t\tPerma string `xml:\"isPermalink,attr\"`\n\t\tGuid string `xml:\",innerxml\"`\n\t} `xml:\"guid\"`\n}\n\nfunc genrss(ctx *context, res http.ResponseWriter, req *http.Request) {\n\treq.ParseForm()\n\tvar searchQuery string\n\t\/\/var category string\n\tvar max int\n\tif q, ok := req.Form[\"q\"]; ok {\n\t\tif q[0] == \"\" {\n\t\t\tsearchQuery = \"*\"\n\t\t} else {\n\t\t\tsearchQuery = q[0]\n\t\t}\n\t} else {\n\t\tswitch req.URL.Path {\n\t\tcase \"\/rss\":\n\t\t\tsearchQuery = \"*\"\n\t\t}\n\t}\n\t\/*\n\t\tcategory = req.FormValue(\"cat\")\n\t\tcategoryName := \"All\"\n\t\tswitch category {\n\t\tcase \"anime\":\n\t\t\tcategoryName = \"Anime\"\n\t\tdefault:\n\t\t\tcategory = \"\"\n\t\t}\n\t*\/\n\tif n, err := strconv.Atoi(req.FormValue(\"max\")); err == nil {\n\t\tmax = n\n\t} else {\n\t\tmax = 50\n\t}\n\tif searchQuery == \"\" {\n\t\tif f, err := ctx.HtmlDir.Open(\"\/home.html\"); err == nil {\n\t\t\tdefer f.Close()\n\t\t\tif fi, err := f.Stat(); err != nil {\n\t\t\t\tpanic(\"Failed to stat index.html file.\")\n\t\t\t} else {\n\t\t\t\tmod := fi.ModTime()\n\t\t\t\thttp.ServeContent(res, req, \"\/index.html\", mod, f)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tres.Header().Set(\"Content-Type\", \"text\/html\")\n\t\tsResults, _ := searchBackend(ctx, searchQuery, 0, max, true)\n\t\tprotocol := req.URL.Scheme + \":\/\/\"\n\t\tif protocol == \":\/\/\" {\n\t\t\tif useSSL := req.Header.Get(\"X-SSL\"); useSSL == \"true\" {\n\t\t\t\tprotocol = \"https:\/\/\"\n\t\t\t} else {\n\t\t\t\tprotocol = \"http:\/\/\"\n\t\t\t}\n\t\t}\n\t\thostname := req.Host\n\t\tfeed := rss{\n\t\t\tXmlns: ATOM_XLMNS,\n\t\t\tVersion: \"2.0\",\n\t\t\tChannel: RssChannel{\n\t\t\t\tTitle: searchQuery + \" — Animezb\",\n\t\t\t\tLink: protocol + hostname,\n\t\t\t\tDescription: \"Usenet Indexer for Japanese Media\",\n\t\t\t\tLanguage: \"en-us\",\n\t\t\t},\n\t\t}\n\t\tfeed.Channel.AtomLink.Href = protocol + hostname + req.URL.String()\n\t\tfeed.Channel.AtomLink.Rel = \"self\"\n\t\tfeed.Channel.AtomLink.Type = \"application\/rss+xml\"\n\t\tfeed.Channel.Items = make([]RssItem, len(sResults))\n\t\tfeed.Channel.NewzNab.Offset = 0\n\t\tfeed.Channel.NewzNab.Total = len(sResults)\n\n\t\tfor idx, res := range sResults {\n\t\t\tvar postCat string\n\t\t\tswitch res.Group {\n\t\t\tcase \"alt.binaries.anime\", \"alt.binaries.multimedia.anime\", \"alt.binaries.multimedia.anime.repost\", \"alt.binaries.multimedia.anime.highspeed\":\n\t\t\t\tpostCat = \"Anime\"\n\t\t\tdefault:\n\t\t\t\tpostCat = \"Anime\"\n\t\t\t}\n\t\t\tpDate, _ := time.Parse(time.UnixDate, res.Date)\n\t\t\titem := RssItem{\n\t\t\t\tTitle: res.Name,\n\t\t\t\tLink: protocol + hostname + \"\/nzb\/\" + res.UploadId,\n\t\t\t\tDescription: formatRssDesc(res),\n\t\t\t\tCategory: postCat,\n\t\t\t\tPubDate: pDate.Format(time.RFC1123Z),\n\t\t\t}\n\t\t\titem.Enclosure.Url = protocol + hostname + \"\/nzb\/\" + res.UploadId + \"\/\" + strings.Replace(url.QueryEscape(res.Name), \"+\", \"%20\", -1) + \".nzb\"\n\t\t\titem.Enclosure.Length = res.Bytes\n\t\t\titem.Enclosure.Type = \"application\/x-nzb\"\n\t\t\titem.Guid.Guid = protocol + hostname + \"\/nzb\/\" + res.UploadId\n\t\t\titem.Guid.Perma = \"false\"\n\t\t\tfeed.Channel.Items[idx] = item\n\t\t}\n\t\tif output, err := xml.Marshal(feed); err == nil {\n\t\t\tres.Header().Set(\"Content-Type\", \"text\/xml; charset=utf-8\")\n\t\t\tres.WriteHeader(200)\n\t\t\tres.Write([]byte(xml.Header))\n\t\t\tres.Write(output)\n\t\t} else {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n\nfunc formatRssDesc(sr searchResult) string {\n\tformat := `<i>Age<\/i>: %s<br \/><i>Size<\/i>: %s<br \/><i>Parts<\/i>: %s<br \/><i>Files<\/i>: %s<br \/><i>Subject<\/i>: %s`\n\treturn fmt.Sprintf(format, sr.Age, sr.Size, sr.Completion, sr.ExtTypes, sr.Subject)\n}\n<commit_msg>Add Newznab xmlns<commit_after>package main\n\nimport (\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tATOM_XMLNS = \"http:\/\/www.w3.org\/2005\/Atom\"\n\tNEWZNAB_XMLNS = \"http:\/\/www.newznab.com\/DTD\/2010\/feeds\/attributes\/\"\n)\n\ntype rss struct {\n\tname xml.Name `xml:\"rss\"`\n\tXmlnsAtom string `xml:\"xmlns:atom,attr\"`\n\tXmlnsNewzNab string `xml:\"xmlns:newznab,attr\"`\n\tVersion string `xml:\"version,attr\"`\n\tChannel RssChannel `xml:\"channel\"`\n}\n\ntype RssChannel struct {\n\tTitle string `xml:\"title\"`\n\tLink string `xml:\"link\"`\n\tDescription string `xml:\"description\"`\n\tLanguage string `xml:\"language\"`\n\tAtomLink struct {\n\t\tHref string `xml:\"href,attr\"`\n\t\tRel string `xml:\"rel,attr\"`\n\t\tType string `xml:\"type,attr\"`\n\t} `xml:\"atom:link\"`\n\tNewzNab struct {\n\t\tOffset int `xml:\"offset,attr\"`\n\t\tTotal int `xml:\"total,attr\"`\n\t} `xml:\"newznab:response\"`\n\tItems []RssItem `xml:\"item\"`\n}\n\ntype RssItem struct {\n\tTitle string `xml:\"title\"`\n\tLink string `xml:\"link\"`\n\tDescription string `xml:\"description\"`\n\tCategory string `xml:\"category\"`\n\tPubDate string `xml:\"pubDate\"`\n\tEnclosure struct {\n\t\tUrl string `xml:\"url,attr\"`\n\t\tLength int64 `xml:\"length,attr\"`\n\t\tType string `xml:\"type,attr\"`\n\t} `xml:\"enclosure\"`\n\tGuid struct {\n\t\tPerma string `xml:\"isPermalink,attr\"`\n\t\tGuid string `xml:\",innerxml\"`\n\t} `xml:\"guid\"`\n}\n\nfunc genrss(ctx *context, res http.ResponseWriter, req *http.Request) {\n\treq.ParseForm()\n\tvar searchQuery string\n\t\/\/var category string\n\tvar max int\n\tif q, ok := req.Form[\"q\"]; ok {\n\t\tif q[0] == \"\" {\n\t\t\tsearchQuery = \"*\"\n\t\t} else {\n\t\t\tsearchQuery = q[0]\n\t\t}\n\t} else {\n\t\tswitch req.URL.Path {\n\t\tcase \"\/rss\":\n\t\t\tsearchQuery = \"*\"\n\t\t}\n\t}\n\t\/*\n\t\tcategory = req.FormValue(\"cat\")\n\t\tcategoryName := \"All\"\n\t\tswitch category {\n\t\tcase \"anime\":\n\t\t\tcategoryName = \"Anime\"\n\t\tdefault:\n\t\t\tcategory = \"\"\n\t\t}\n\t*\/\n\tif n, err := strconv.Atoi(req.FormValue(\"max\")); err == nil {\n\t\tmax = n\n\t} else {\n\t\tmax = 50\n\t}\n\tif searchQuery == \"\" {\n\t\tif f, err := ctx.HtmlDir.Open(\"\/home.html\"); err == nil {\n\t\t\tdefer f.Close()\n\t\t\tif fi, err := f.Stat(); err != nil {\n\t\t\t\tpanic(\"Failed to stat index.html file.\")\n\t\t\t} else {\n\t\t\t\tmod := fi.ModTime()\n\t\t\t\thttp.ServeContent(res, req, \"\/index.html\", mod, f)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tres.Header().Set(\"Content-Type\", \"text\/html\")\n\t\tsResults, _ := searchBackend(ctx, searchQuery, 0, max, true)\n\t\tprotocol := req.URL.Scheme + \":\/\/\"\n\t\tif protocol == \":\/\/\" {\n\t\t\tif useSSL := req.Header.Get(\"X-SSL\"); useSSL == \"true\" {\n\t\t\t\tprotocol = \"https:\/\/\"\n\t\t\t} else {\n\t\t\t\tprotocol = \"http:\/\/\"\n\t\t\t}\n\t\t}\n\t\thostname := req.Host\n\t\tfeed := rss{\n\t\t\tXmlnsAtom: ATOM_XMLNS,\n\t\t\tXmlnsNewzNab: NEWZNAB_XMLNS,\n\t\t\tVersion: \"2.0\",\n\t\t\tChannel: RssChannel{\n\t\t\t\tTitle: searchQuery + \" — Animezb\",\n\t\t\t\tLink: protocol + hostname,\n\t\t\t\tDescription: \"Usenet Indexer for Japanese Media\",\n\t\t\t\tLanguage: \"en-us\",\n\t\t\t},\n\t\t}\n\t\tfeed.Channel.AtomLink.Href = protocol + hostname + req.URL.String()\n\t\tfeed.Channel.AtomLink.Rel = \"self\"\n\t\tfeed.Channel.AtomLink.Type = \"application\/rss+xml\"\n\t\tfeed.Channel.Items = make([]RssItem, len(sResults))\n\t\tfeed.Channel.NewzNab.Offset = 0\n\t\tfeed.Channel.NewzNab.Total = len(sResults)\n\n\t\tfor idx, res := range sResults {\n\t\t\tvar postCat string\n\t\t\tswitch res.Group {\n\t\t\tcase \"alt.binaries.anime\", \"alt.binaries.multimedia.anime\", \"alt.binaries.multimedia.anime.repost\", \"alt.binaries.multimedia.anime.highspeed\":\n\t\t\t\tpostCat = \"Anime\"\n\t\t\tdefault:\n\t\t\t\tpostCat = \"Anime\"\n\t\t\t}\n\t\t\tpDate, _ := time.Parse(time.UnixDate, res.Date)\n\t\t\titem := RssItem{\n\t\t\t\tTitle: res.Name,\n\t\t\t\tLink: protocol + hostname + \"\/nzb\/\" + res.UploadId,\n\t\t\t\tDescription: formatRssDesc(res),\n\t\t\t\tCategory: postCat,\n\t\t\t\tPubDate: pDate.Format(time.RFC1123Z),\n\t\t\t}\n\t\t\titem.Enclosure.Url = protocol + hostname + \"\/nzb\/\" + res.UploadId + \"\/\" + strings.Replace(url.QueryEscape(res.Name), \"+\", \"%20\", -1) + \".nzb\"\n\t\t\titem.Enclosure.Length = res.Bytes\n\t\t\titem.Enclosure.Type = \"application\/x-nzb\"\n\t\t\titem.Guid.Guid = protocol + hostname + \"\/nzb\/\" + res.UploadId\n\t\t\titem.Guid.Perma = \"false\"\n\t\t\tfeed.Channel.Items[idx] = item\n\t\t}\n\t\tif output, err := xml.Marshal(feed); err == nil {\n\t\t\tres.Header().Set(\"Content-Type\", \"text\/xml; charset=utf-8\")\n\t\t\tres.WriteHeader(200)\n\t\t\tres.Write([]byte(xml.Header))\n\t\t\tres.Write(output)\n\t\t} else {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n\nfunc formatRssDesc(sr searchResult) string {\n\tformat := `<i>Age<\/i>: %s<br \/><i>Size<\/i>: %s<br \/><i>Parts<\/i>: %s<br \/><i>Files<\/i>: %s<br \/><i>Subject<\/i>: %s`\n\treturn fmt.Sprintf(format, sr.Age, sr.Size, sr.Completion, sr.ExtTypes, sr.Subject)\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Change database format<commit_after><|endoftext|>"} {"text":"<commit_before>package event\n\nimport \"database\/sql\"\n\nvar myDB *sql.DB\nvar prepStmts struct {\n\tlookup *sql.Stmt\n\tinsert *sql.Stmt\n}\n\nfunc CreateDB(db *sql.DB) error {\n\tmyDB = db\n\n\t_, err := myDB.Exec(`\n CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\";\n CREATE TABLE IF NOT EXISTS events (\n id UUID PRIMARY KEY,\n event_type VARCHAR(64),\n context VARCHAR(64),\n original_account_id VARCHAR(64),\n created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n data JSON DEFAULT '{}'::json)\n `)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = prepareStatements()\n\n\treturn err\n}\n\nfunc list() ([]Event, error) {\n\trows, err := myDB.Query(\"SELECT * FROM events\")\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tevents := []Event{}\n\n\tfor rows.Next() {\n\t\tvar e Event\n\t\tvar data string\n\n\t\terr := rows.Scan(&e.ID, &e.EventType, &e.Context, &e.OriginalAccountID, &e.CreatedAt, &data)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\te.Data = data\n\n\t\tevents = append(events, e)\n\t}\n\n\terr = rows.Err()\n\treturn events, err\n}\n\nfunc lookup(id string) (Event, error) {\n\tvar e Event\n\tvar data string\n\n\terr := prepStmts.lookup.QueryRow(id).Scan(&e.ID, &e.EventType, &e.Context, &e.OriginalAccountID, &e.CreatedAt, &data)\n\tif err != nil {\n\t\treturn Event{}, err\n\t}\n\te.Data = data\n\n\treturn e, err\n}\n\nfunc (e Event) insert() error {\n\t_, err := prepStmts.insert.Exec(e.EventType, e.Context, e.OriginalAccountID)\n\n\treturn err\n}\n\nfunc prepareStatements() error {\n\tvar err error\n\n\tprepStmts.lookup, err = myDB.Prepare(`SELECT * FROM events\n WHERE id = $1\n `)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprepStmts.insert, err = myDB.Prepare(`INSERT INTO events\n ( id,\n event_type,\n context,\n original_account_id\n )\n VALUES ( uuid_generate_v4(), $1, $2, $3 )\n `)\n\n\treturn err\n}\n<commit_msg>Fill in the Event id properly on creation<commit_after>package event\n\nimport \"database\/sql\"\n\nvar myDB *sql.DB\nvar prepStmts struct {\n\tlookup *sql.Stmt\n\tinsert *sql.Stmt\n}\n\nfunc CreateDB(db *sql.DB) error {\n\tmyDB = db\n\n\t_, err := myDB.Exec(`\n CREATE EXTENSION IF NOT EXISTS \"uuid-ossp\";\n CREATE TABLE IF NOT EXISTS events (\n id UUID PRIMARY KEY NOT NULL,\n event_type VARCHAR(64),\n context VARCHAR(64),\n original_account_id VARCHAR(64),\n created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n data JSON DEFAULT '{}'::json)\n `)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = prepareStatements()\n\n\treturn err\n}\n\nfunc list() ([]Event, error) {\n\trows, err := myDB.Query(\"SELECT * FROM events\")\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rows.Close()\n\n\tevents := []Event{}\n\n\tfor rows.Next() {\n\t\tvar e Event\n\t\tvar data string\n\n\t\terr := rows.Scan(&e.ID, &e.EventType, &e.Context, &e.OriginalAccountID, &e.CreatedAt, &data)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\te.Data = data\n\n\t\tevents = append(events, e)\n\t}\n\n\terr = rows.Err()\n\treturn events, err\n}\n\nfunc lookup(id string) (Event, error) {\n\tvar e Event\n\tvar data string\n\n\terr := prepStmts.lookup.QueryRow(id).Scan(&e.ID, &e.EventType, &e.Context, &e.OriginalAccountID, &e.CreatedAt, &data)\n\tif err != nil {\n\t\treturn Event{}, err\n\t}\n\te.Data = data\n\n\treturn e, err\n}\n\nfunc (e *Event) insert() error {\n\terr := prepStmts.insert.QueryRow(e.EventType, e.Context, e.OriginalAccountID).Scan(&e.ID)\n\n\treturn err\n}\n\nfunc prepareStatements() error {\n\tvar err error\n\n\tprepStmts.lookup, err = myDB.Prepare(`SELECT * FROM events\n WHERE id = $1\n `)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprepStmts.insert, err = myDB.Prepare(`INSERT INTO events\n\t\t( id,\n\t\t\tevent_type,\n\t\t\tcontext,\n\t\t\toriginal_account_id\n\t\t)\n\t\tVALUES ( uuid_generate_v4(), $1, $2, $3 )\n\t\tRETURNING id\n `)\n\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/ghodss\/yaml\"\n\t\"github.com\/loadimpact\/speedboat\/api\"\n\t\"github.com\/loadimpact\/speedboat\/js\"\n\t\"github.com\/loadimpact\/speedboat\/lib\"\n\t\"github.com\/loadimpact\/speedboat\/simple\"\n\t\"github.com\/loadimpact\/speedboat\/stats\"\n\t\"github.com\/loadimpact\/speedboat\/stats\/influxdb\"\n\t\"github.com\/loadimpact\/speedboat\/ui\"\n\t\"gopkg.in\/urfave\/cli.v1\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n)\n\nconst (\n\tTypeAuto = \"auto\"\n\tTypeURL = \"url\"\n\tTypeJS = \"js\"\n)\n\nvar (\n\tErrUnknownType = errors.New(\"Unable to infer type from argument; specify with -t\/--type\")\n\tErrInvalidType = errors.New(\"Invalid type specified, see --help\")\n)\n\nvar commandRun = cli.Command{\n\tName: \"run\",\n\tUsage: \"Starts running a load test\",\n\tArgsUsage: \"url|filename\",\n\tFlags: []cli.Flag{\n\t\tcli.Int64Flag{\n\t\t\tName: \"vus, u\",\n\t\t\tUsage: \"virtual users to simulate\",\n\t\t\tValue: 10,\n\t\t},\n\t\tcli.Int64Flag{\n\t\t\tName: \"max, m\",\n\t\t\tUsage: \"max number of virtual users, if more than --vus\",\n\t\t},\n\t\tcli.DurationFlag{\n\t\t\tName: \"duration, d\",\n\t\t\tUsage: \"test duration, 0 to run until cancelled\",\n\t\t\tValue: 10 * time.Second,\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"run, r\",\n\t\t\tUsage: \"start test immediately\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"type, t\",\n\t\t\tUsage: \"input type, one of: auto, url, js\",\n\t\t\tValue: \"auto\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"quit, q\",\n\t\t\tUsage: \"quit immediately on test completion\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"quit-on-taint\",\n\t\t\tUsage: \"quit immediately if the test gets tainted\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"out, o\",\n\t\t\tUsage: \"output metrics to an external data store\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"config, c\",\n\t\t\tUsage: \"read additional config files\",\n\t\t},\n\t},\n\tAction: actionRun,\n\tDescription: `Run starts a load test.\n\n This is the main entry point to Speedboat, and will do two things:\n \n - Construct an Engine and provide it with a Runner, depending on the first\n argument and the --type flag, which is used to execute the test.\n \n - Start an a web server on the address specified by the global --address\n flag, which serves a web interface and a REST API for remote control.\n \n For ease of use, you may also pass initial status parameters (vus, max,\n duration) to 'run', which will be applied through a normal API call.`,\n}\n\nvar commandInspect = cli.Command{\n\tName: \"inspect\",\n\tAliases: []string{\"i\"},\n\tUsage: \"Merges and prints test configuration\",\n\tArgsUsage: \"url|filename\",\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"type, t\",\n\t\t\tUsage: \"input type, one of: auto, url, js\",\n\t\t\tValue: \"auto\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"config, c\",\n\t\t\tUsage: \"read additional config files\",\n\t\t},\n\t},\n\tAction: actionInspect,\n}\n\nfunc guessType(filename string) string {\n\tswitch {\n\tcase strings.Contains(filename, \":\/\/\"):\n\t\treturn TypeURL\n\tcase strings.HasSuffix(filename, \".js\"):\n\t\treturn TypeJS\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\nfunc makeRunner(filename, t string, opts *lib.Options) (lib.Runner, error) {\n\tif t == TypeAuto {\n\t\tt = guessType(filename)\n\t}\n\n\tswitch t {\n\tcase \"\":\n\t\treturn nil, ErrUnknownType\n\tcase TypeURL:\n\t\treturn simple.New(filename)\n\tcase TypeJS:\n\t\trt, err := js.New()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\texports, err := rt.Load(filename)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := rt.ExtractOptions(exports, opts); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn js.NewRunner(rt, exports)\n\tdefault:\n\t\treturn nil, ErrInvalidType\n\t}\n}\n\nfunc parseCollectorString(s string) (t string, u *url.URL, err error) {\n\tparts := strings.SplitN(s, \"=\", 2)\n\tif len(parts) != 2 {\n\t\treturn \"\", nil, errors.New(\"Malformed output; must be in the form 'type=url'\")\n\t}\n\n\tu, err = url.Parse(parts[1])\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\treturn parts[0], u, nil\n}\n\nfunc makeCollector(s string) (stats.Collector, error) {\n\tt, u, err := parseCollectorString(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch t {\n\tcase \"influxdb\":\n\t\treturn influxdb.New(u)\n\tdefault:\n\t\treturn nil, errors.New(\"Unknown output type: \" + t)\n\t}\n}\n\nfunc actionRun(cc *cli.Context) error {\n\twg := sync.WaitGroup{}\n\n\targs := cc.Args()\n\tif len(args) != 1 {\n\t\treturn cli.NewExitError(\"Wrong number of arguments!\", 1)\n\t}\n\n\t\/\/ Collect CLI arguments, most (not all) relating to options.\n\taddr := cc.GlobalString(\"address\")\n\tout := cc.String(\"out\")\n\topts := lib.Options{\n\t\tRun: cliBool(cc, \"run\"),\n\t\tVUs: cliInt64(cc, \"vus\"),\n\t\tVUsMax: cliInt64(cc, \"vus-max\"),\n\t\tDuration: cliDuration(cc, \"duration\"),\n\t\tQuit: cliBool(cc, \"quit\"),\n\t\tQuitOnTaint: cliBool(cc, \"quit-on-taint\"),\n\t}\n\n\t\/\/ Make the Runner, extract script-defined options.\n\tfilename := args[0]\n\trunnerType := cc.String(\"type\")\n\trunnerOpts := lib.Options{}\n\trunner, err := makeRunner(filename, runnerType, &runnerOpts)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Couldn't create a runner\")\n\t\treturn err\n\t}\n\topts = opts.Apply(runnerOpts)\n\n\t\/\/ Read config files.\n\tfor _, filename := range cc.StringSlice(\"config\") {\n\t\tdata, err := ioutil.ReadFile(filename)\n\t\tif err != nil {\n\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t}\n\n\t\tvar configOpts lib.Options\n\t\tif err := yaml.Unmarshal(data, &configOpts); err != nil {\n\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t}\n\t\topts = opts.Apply(configOpts)\n\t}\n\n\t\/\/ CLI options have defaults, which are set as invalid, but have potentially nonzero values.\n\t\/\/ Flipping the Valid flag for all invalid options thus applies all defaults.\n\topts = opts.SetAllValid(true)\n\n\t\/\/ Make the metric collector, if requested.\n\tvar collector stats.Collector\n\tcollectorString := \"-\"\n\tif out != \"\" {\n\t\tc, err := makeCollector(out)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"Couldn't create output\")\n\t\t\treturn err\n\t\t}\n\t\tcollector = c\n\t\tcollectorString = fmt.Sprint(collector)\n\t}\n\n\t\/\/ Make the Engine\n\tengine, err := lib.NewEngine(runner)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Couldn't create the engine\")\n\t\treturn err\n\t}\n\tengineC, engineCancel := context.WithCancel(context.Background())\n\tengine.Collector = collector\n\n\t\/\/ Make the API Server\n\tsrv := &api.Server{\n\t\tEngine: engine,\n\t\tInfo: lib.Info{Version: cc.App.Version},\n\t}\n\tsrvC, srvCancel := context.WithCancel(context.Background())\n\n\t\/\/ Run the engine and API server in the background\n\twg.Add(2)\n\tgo func() {\n\t\tdefer func() {\n\t\t\tlog.Debug(\"Engine terminated\")\n\t\t\twg.Done()\n\t\t}()\n\t\tlog.Debug(\"Starting engine...\")\n\t\tif err := engine.Run(engineC, opts); err != nil {\n\t\t\tlog.WithError(err).Error(\"Engine Error\")\n\t\t}\n\t\tengineCancel()\n\t}()\n\tgo func() {\n\t\tdefer func() {\n\t\t\tlog.Debug(\"API Server terminated\")\n\t\t\twg.Done()\n\t\t}()\n\t\tlog.WithField(\"addr\", addr).Debug(\"API Server starting...\")\n\t\tsrv.Run(srvC, addr)\n\t\tsrvCancel()\n\t}()\n\n\t\/\/ Print the banner!\n\tfmt.Printf(\"Welcome to Speedboat v%s!\\n\", cc.App.Version)\n\tfmt.Printf(\"\\n\")\n\tfmt.Printf(\" execution: local\\n\")\n\tfmt.Printf(\" output: %s\\n\", collectorString)\n\tfmt.Printf(\" script: %s\\n\", filename)\n\tfmt.Printf(\" ↳ duration: %s\\n\", opts.Duration.String)\n\tfmt.Printf(\" ↳ vus: %d, max: %d\\n\", opts.VUs.Int64, opts.VUsMax.Int64)\n\tfmt.Printf(\"\\n\")\n\tfmt.Printf(\" web ui: http:\/\/%s\/\\n\", addr)\n\tfmt.Printf(\"\\n\")\n\n\tprogressBar := ui.ProgressBar{Width: 60}\n\tfmt.Printf(\" starting %s -- \/ --\\r\", progressBar.String())\n\n\t\/\/ Wait for a signal or timeout before shutting down\n\tsignals := make(chan os.Signal)\n\tsignal.Notify(signals, os.Interrupt, syscall.SIGTERM)\n\tticker := time.NewTicker(10 * time.Millisecond)\n\nloop:\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tstatusString := \"running\"\n\t\t\tif !engine.Status.Running.Bool {\n\t\t\t\tif engine.IsRunning() {\n\t\t\t\t\tstatusString = \"paused\"\n\t\t\t\t} else {\n\t\t\t\t\tstatusString = \"stopping\"\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tatTime := time.Duration(engine.Status.AtTime.Int64)\n\t\t\ttotalTime, finite := engine.TotalTime()\n\t\t\tprogress := 0.0\n\t\t\tif finite {\n\t\t\t\tprogress = float64(atTime) \/ float64(totalTime)\n\t\t\t}\n\n\t\t\tprogressBar.Progress = progress\n\t\t\tfmt.Printf(\"%10s %s %10s \/ %s\\r\",\n\t\t\t\tstatusString,\n\t\t\t\tprogressBar.String(),\n\t\t\t\tatTime-(atTime%(100*time.Millisecond)),\n\t\t\t\ttotalTime-(totalTime%(100*time.Millisecond)),\n\t\t\t)\n\t\tcase <-srvC.Done():\n\t\t\tlog.Debug(\"API server terminated; shutting down...\")\n\t\t\tbreak loop\n\t\tcase <-engineC.Done():\n\t\t\tlog.Debug(\"Engine terminated; shutting down...\")\n\t\t\tbreak loop\n\t\tcase sig := <-signals:\n\t\t\tlog.WithField(\"signal\", sig).Debug(\"Signal received; shutting down...\")\n\t\t\tbreak loop\n\t\t}\n\t}\n\n\t\/\/ Shut down the API server and engine.\n\tsrvCancel()\n\tengineCancel()\n\twg.Wait()\n\n\t\/\/ Test done, leave that status as the final progress bar!\n\tatTime := time.Duration(engine.Status.AtTime.Int64)\n\tprogressBar.Progress = 1.0\n\tfmt.Printf(\" done %s %10s \/ %s\\n\",\n\t\tprogressBar.String(),\n\t\tatTime-(atTime%(100*time.Millisecond)),\n\t\tatTime-(atTime%(100*time.Millisecond)),\n\t)\n\tfmt.Printf(\"\\n\")\n\n\t\/\/ Sort and print metrics.\n\tmetrics := make(map[string]*stats.Metric, len(engine.Metrics))\n\tmetricNames := make([]string, 0, len(engine.Metrics))\n\tfor m, _ := range engine.Metrics {\n\t\tmetrics[m.Name] = m\n\t\tmetricNames = append(metricNames, m.Name)\n\t}\n\tsort.Strings(metricNames)\n\n\tfor _, name := range metricNames {\n\t\tm := metrics[name]\n\t\tm.Sample = engine.Metrics[m].Format()\n\t\tval := metrics[name].Humanize()\n\t\tif val == \"0\" {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Printf(\" ✣ %s: %s\\n\", name, val)\n\t}\n\n\tif engine.Status.Tainted.Bool {\n\t\treturn cli.NewExitError(\"\", 99)\n\t}\n\treturn nil\n}\n\nfunc actionInspect(cc *cli.Context) error {\n\targs := cc.Args()\n\tif len(args) != 1 {\n\t\treturn cli.NewExitError(\"Wrong number of arguments!\", 1)\n\t}\n\tfilename := args[0]\n\n\tt := cc.String(\"type\")\n\tif t == TypeAuto {\n\t\tt = guessType(filename)\n\t}\n\n\tvar opts lib.Options\n\tswitch t {\n\tcase TypeJS:\n\t\tr, err := js.New()\n\t\tif err != nil {\n\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t}\n\n\t\texports, err := r.Load(filename)\n\t\tif err != nil {\n\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t}\n\t\tif err := r.ExtractOptions(exports, &opts); err != nil {\n\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t}\n\t}\n\n\tfor _, filename := range cc.StringSlice(\"config\") {\n\t\tdata, err := ioutil.ReadFile(filename)\n\t\tif err != nil {\n\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t}\n\n\t\tvar configOpts lib.Options\n\t\tif err := yaml.Unmarshal(data, &configOpts); err != nil {\n\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t}\n\t\topts = opts.Apply(configOpts)\n\t}\n\n\treturn dumpYAML(opts)\n}\n<commit_msg>[feat] Groups and Checks in the UI<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/ghodss\/yaml\"\n\t\"github.com\/loadimpact\/speedboat\/api\"\n\t\"github.com\/loadimpact\/speedboat\/js\"\n\t\"github.com\/loadimpact\/speedboat\/lib\"\n\t\"github.com\/loadimpact\/speedboat\/simple\"\n\t\"github.com\/loadimpact\/speedboat\/stats\"\n\t\"github.com\/loadimpact\/speedboat\/stats\/influxdb\"\n\t\"github.com\/loadimpact\/speedboat\/ui\"\n\t\"gopkg.in\/urfave\/cli.v1\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n)\n\nconst (\n\tTypeAuto = \"auto\"\n\tTypeURL = \"url\"\n\tTypeJS = \"js\"\n)\n\nvar (\n\tErrUnknownType = errors.New(\"Unable to infer type from argument; specify with -t\/--type\")\n\tErrInvalidType = errors.New(\"Invalid type specified, see --help\")\n)\n\nvar commandRun = cli.Command{\n\tName: \"run\",\n\tUsage: \"Starts running a load test\",\n\tArgsUsage: \"url|filename\",\n\tFlags: []cli.Flag{\n\t\tcli.Int64Flag{\n\t\t\tName: \"vus, u\",\n\t\t\tUsage: \"virtual users to simulate\",\n\t\t\tValue: 10,\n\t\t},\n\t\tcli.Int64Flag{\n\t\t\tName: \"max, m\",\n\t\t\tUsage: \"max number of virtual users, if more than --vus\",\n\t\t},\n\t\tcli.DurationFlag{\n\t\t\tName: \"duration, d\",\n\t\t\tUsage: \"test duration, 0 to run until cancelled\",\n\t\t\tValue: 10 * time.Second,\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"run, r\",\n\t\t\tUsage: \"start test immediately\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"type, t\",\n\t\t\tUsage: \"input type, one of: auto, url, js\",\n\t\t\tValue: \"auto\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"quit, q\",\n\t\t\tUsage: \"quit immediately on test completion\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"quit-on-taint\",\n\t\t\tUsage: \"quit immediately if the test gets tainted\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"out, o\",\n\t\t\tUsage: \"output metrics to an external data store\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"config, c\",\n\t\t\tUsage: \"read additional config files\",\n\t\t},\n\t},\n\tAction: actionRun,\n\tDescription: `Run starts a load test.\n\n This is the main entry point to Speedboat, and will do two things:\n \n - Construct an Engine and provide it with a Runner, depending on the first\n argument and the --type flag, which is used to execute the test.\n \n - Start an a web server on the address specified by the global --address\n flag, which serves a web interface and a REST API for remote control.\n \n For ease of use, you may also pass initial status parameters (vus, max,\n duration) to 'run', which will be applied through a normal API call.`,\n}\n\nvar commandInspect = cli.Command{\n\tName: \"inspect\",\n\tAliases: []string{\"i\"},\n\tUsage: \"Merges and prints test configuration\",\n\tArgsUsage: \"url|filename\",\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"type, t\",\n\t\t\tUsage: \"input type, one of: auto, url, js\",\n\t\t\tValue: \"auto\",\n\t\t},\n\t\tcli.StringSliceFlag{\n\t\t\tName: \"config, c\",\n\t\t\tUsage: \"read additional config files\",\n\t\t},\n\t},\n\tAction: actionInspect,\n}\n\nfunc guessType(filename string) string {\n\tswitch {\n\tcase strings.Contains(filename, \":\/\/\"):\n\t\treturn TypeURL\n\tcase strings.HasSuffix(filename, \".js\"):\n\t\treturn TypeJS\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\nfunc makeRunner(filename, t string, opts *lib.Options) (lib.Runner, error) {\n\tif t == TypeAuto {\n\t\tt = guessType(filename)\n\t}\n\n\tswitch t {\n\tcase \"\":\n\t\treturn nil, ErrUnknownType\n\tcase TypeURL:\n\t\treturn simple.New(filename)\n\tcase TypeJS:\n\t\trt, err := js.New()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\texports, err := rt.Load(filename)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := rt.ExtractOptions(exports, opts); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn js.NewRunner(rt, exports)\n\tdefault:\n\t\treturn nil, ErrInvalidType\n\t}\n}\n\nfunc parseCollectorString(s string) (t string, u *url.URL, err error) {\n\tparts := strings.SplitN(s, \"=\", 2)\n\tif len(parts) != 2 {\n\t\treturn \"\", nil, errors.New(\"Malformed output; must be in the form 'type=url'\")\n\t}\n\n\tu, err = url.Parse(parts[1])\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\treturn parts[0], u, nil\n}\n\nfunc makeCollector(s string) (stats.Collector, error) {\n\tt, u, err := parseCollectorString(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch t {\n\tcase \"influxdb\":\n\t\treturn influxdb.New(u)\n\tdefault:\n\t\treturn nil, errors.New(\"Unknown output type: \" + t)\n\t}\n}\n\nfunc actionRun(cc *cli.Context) error {\n\twg := sync.WaitGroup{}\n\n\targs := cc.Args()\n\tif len(args) != 1 {\n\t\treturn cli.NewExitError(\"Wrong number of arguments!\", 1)\n\t}\n\n\t\/\/ Collect CLI arguments, most (not all) relating to options.\n\taddr := cc.GlobalString(\"address\")\n\tout := cc.String(\"out\")\n\topts := lib.Options{\n\t\tRun: cliBool(cc, \"run\"),\n\t\tVUs: cliInt64(cc, \"vus\"),\n\t\tVUsMax: cliInt64(cc, \"vus-max\"),\n\t\tDuration: cliDuration(cc, \"duration\"),\n\t\tQuit: cliBool(cc, \"quit\"),\n\t\tQuitOnTaint: cliBool(cc, \"quit-on-taint\"),\n\t}\n\n\t\/\/ Make the Runner, extract script-defined options.\n\tfilename := args[0]\n\trunnerType := cc.String(\"type\")\n\trunnerOpts := lib.Options{}\n\trunner, err := makeRunner(filename, runnerType, &runnerOpts)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Couldn't create a runner\")\n\t\treturn err\n\t}\n\topts = opts.Apply(runnerOpts)\n\n\t\/\/ Read config files.\n\tfor _, filename := range cc.StringSlice(\"config\") {\n\t\tdata, err := ioutil.ReadFile(filename)\n\t\tif err != nil {\n\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t}\n\n\t\tvar configOpts lib.Options\n\t\tif err := yaml.Unmarshal(data, &configOpts); err != nil {\n\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t}\n\t\topts = opts.Apply(configOpts)\n\t}\n\n\t\/\/ CLI options have defaults, which are set as invalid, but have potentially nonzero values.\n\t\/\/ Flipping the Valid flag for all invalid options thus applies all defaults.\n\topts = opts.SetAllValid(true)\n\n\t\/\/ Make the metric collector, if requested.\n\tvar collector stats.Collector\n\tcollectorString := \"-\"\n\tif out != \"\" {\n\t\tc, err := makeCollector(out)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"Couldn't create output\")\n\t\t\treturn err\n\t\t}\n\t\tcollector = c\n\t\tcollectorString = fmt.Sprint(collector)\n\t}\n\n\t\/\/ Make the Engine\n\tengine, err := lib.NewEngine(runner)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Couldn't create the engine\")\n\t\treturn err\n\t}\n\tengineC, engineCancel := context.WithCancel(context.Background())\n\tengine.Collector = collector\n\n\t\/\/ Make the API Server\n\tsrv := &api.Server{\n\t\tEngine: engine,\n\t\tInfo: lib.Info{Version: cc.App.Version},\n\t}\n\tsrvC, srvCancel := context.WithCancel(context.Background())\n\n\t\/\/ Run the engine and API server in the background\n\twg.Add(2)\n\tgo func() {\n\t\tdefer func() {\n\t\t\tlog.Debug(\"Engine terminated\")\n\t\t\twg.Done()\n\t\t}()\n\t\tlog.Debug(\"Starting engine...\")\n\t\tif err := engine.Run(engineC, opts); err != nil {\n\t\t\tlog.WithError(err).Error(\"Engine Error\")\n\t\t}\n\t\tengineCancel()\n\t}()\n\tgo func() {\n\t\tdefer func() {\n\t\t\tlog.Debug(\"API Server terminated\")\n\t\t\twg.Done()\n\t\t}()\n\t\tlog.WithField(\"addr\", addr).Debug(\"API Server starting...\")\n\t\tsrv.Run(srvC, addr)\n\t\tsrvCancel()\n\t}()\n\n\t\/\/ Print the banner!\n\tfmt.Printf(\"Welcome to Speedboat v%s!\\n\", cc.App.Version)\n\tfmt.Printf(\"\\n\")\n\tfmt.Printf(\" execution: local\\n\")\n\tfmt.Printf(\" output: %s\\n\", collectorString)\n\tfmt.Printf(\" script: %s\\n\", filename)\n\tfmt.Printf(\" ↳ duration: %s\\n\", opts.Duration.String)\n\tfmt.Printf(\" ↳ vus: %d, max: %d\\n\", opts.VUs.Int64, opts.VUsMax.Int64)\n\tfmt.Printf(\"\\n\")\n\tfmt.Printf(\" web ui: http:\/\/%s\/\\n\", addr)\n\tfmt.Printf(\"\\n\")\n\n\tprogressBar := ui.ProgressBar{Width: 60}\n\tfmt.Printf(\" starting %s -- \/ --\\r\", progressBar.String())\n\n\t\/\/ Wait for a signal or timeout before shutting down\n\tsignals := make(chan os.Signal)\n\tsignal.Notify(signals, os.Interrupt, syscall.SIGTERM)\n\tticker := time.NewTicker(10 * time.Millisecond)\n\nloop:\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tstatusString := \"running\"\n\t\t\tif !engine.Status.Running.Bool {\n\t\t\t\tif engine.IsRunning() {\n\t\t\t\t\tstatusString = \"paused\"\n\t\t\t\t} else {\n\t\t\t\t\tstatusString = \"stopping\"\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tatTime := time.Duration(engine.Status.AtTime.Int64)\n\t\t\ttotalTime, finite := engine.TotalTime()\n\t\t\tprogress := 0.0\n\t\t\tif finite {\n\t\t\t\tprogress = float64(atTime) \/ float64(totalTime)\n\t\t\t}\n\n\t\t\tprogressBar.Progress = progress\n\t\t\tfmt.Printf(\"%10s %s %10s \/ %s\\r\",\n\t\t\t\tstatusString,\n\t\t\t\tprogressBar.String(),\n\t\t\t\tatTime-(atTime%(100*time.Millisecond)),\n\t\t\t\ttotalTime-(totalTime%(100*time.Millisecond)),\n\t\t\t)\n\t\tcase <-srvC.Done():\n\t\t\tlog.Debug(\"API server terminated; shutting down...\")\n\t\t\tbreak loop\n\t\tcase <-engineC.Done():\n\t\t\tlog.Debug(\"Engine terminated; shutting down...\")\n\t\t\tbreak loop\n\t\tcase sig := <-signals:\n\t\t\tlog.WithField(\"signal\", sig).Debug(\"Signal received; shutting down...\")\n\t\t\tbreak loop\n\t\t}\n\t}\n\n\t\/\/ Shut down the API server and engine.\n\tsrvCancel()\n\tengineCancel()\n\twg.Wait()\n\n\t\/\/ Test done, leave that status as the final progress bar!\n\tatTime := time.Duration(engine.Status.AtTime.Int64)\n\tprogressBar.Progress = 1.0\n\tfmt.Printf(\" done %s %10s \/ %s\\n\",\n\t\tprogressBar.String(),\n\t\tatTime-(atTime%(100*time.Millisecond)),\n\t\tatTime-(atTime%(100*time.Millisecond)),\n\t)\n\tfmt.Printf(\"\\n\")\n\n\t\/\/ Print groups.\n\tvar printGroup func(g *lib.Group, level int)\n\tprintGroup = func(g *lib.Group, level int) {\n\t\tindent := strings.Repeat(\" \", level)\n\n\t\tif g.Name != \"\" && g.Parent != nil {\n\t\t\tfmt.Printf(\"%s█ %s\\n\", indent, g.Name)\n\t\t}\n\n\t\tif len(g.Checks) > 0 {\n\t\t\tif g.Name != \"\" && g.Parent != nil {\n\t\t\t\tfmt.Printf(\"\\n\")\n\t\t\t}\n\t\t\tfor _, check := range g.Checks {\n\t\t\t\ticon := \"✓\"\n\t\t\t\tif check.Fails > 0 {\n\t\t\t\t\ticon = \"✗\"\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"%s %s %2.2f%% - %s\\n\",\n\t\t\t\t\tindent,\n\t\t\t\t\ticon,\n\t\t\t\t\t100*(float64(check.Passes)\/float64(check.Passes+check.Fails)),\n\t\t\t\t\tcheck.Name,\n\t\t\t\t)\n\t\t\t}\n\t\t\tfmt.Printf(\"\\n\")\n\t\t}\n\t\tif len(g.Groups) > 0 {\n\t\t\tif g.Name != \"\" && g.Parent != nil && len(g.Checks) > 0 {\n\t\t\t\tfmt.Printf(\"\\n\")\n\t\t\t}\n\t\t\tfor _, g := range g.Groups {\n\t\t\t\tprintGroup(g, level+1)\n\t\t\t}\n\t\t}\n\t}\n\n\tgroups := engine.Runner.GetGroups()\n\tfor _, g := range groups {\n\t\tif g.Parent != nil {\n\t\t\tcontinue\n\t\t}\n\t\tprintGroup(g, 1)\n\t}\n\n\t\/\/ Sort and print metrics.\n\tmetrics := make(map[string]*stats.Metric, len(engine.Metrics))\n\tmetricNames := make([]string, 0, len(engine.Metrics))\n\tfor m, _ := range engine.Metrics {\n\t\tmetrics[m.Name] = m\n\t\tmetricNames = append(metricNames, m.Name)\n\t}\n\tsort.Strings(metricNames)\n\n\tfor _, name := range metricNames {\n\t\tm := metrics[name]\n\t\tm.Sample = engine.Metrics[m].Format()\n\t\tval := metrics[name].Humanize()\n\t\tif val == \"0\" {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Printf(\" ✣ %s: %s\\n\", name, val)\n\t}\n\n\tif engine.Status.Tainted.Bool {\n\t\treturn cli.NewExitError(\"\", 99)\n\t}\n\treturn nil\n}\n\nfunc actionInspect(cc *cli.Context) error {\n\targs := cc.Args()\n\tif len(args) != 1 {\n\t\treturn cli.NewExitError(\"Wrong number of arguments!\", 1)\n\t}\n\tfilename := args[0]\n\n\tt := cc.String(\"type\")\n\tif t == TypeAuto {\n\t\tt = guessType(filename)\n\t}\n\n\tvar opts lib.Options\n\tswitch t {\n\tcase TypeJS:\n\t\tr, err := js.New()\n\t\tif err != nil {\n\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t}\n\n\t\texports, err := r.Load(filename)\n\t\tif err != nil {\n\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t}\n\t\tif err := r.ExtractOptions(exports, &opts); err != nil {\n\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t}\n\t}\n\n\tfor _, filename := range cc.StringSlice(\"config\") {\n\t\tdata, err := ioutil.ReadFile(filename)\n\t\tif err != nil {\n\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t}\n\n\t\tvar configOpts lib.Options\n\t\tif err := yaml.Unmarshal(data, &configOpts); err != nil {\n\t\t\treturn cli.NewExitError(err.Error(), 1)\n\t\t}\n\t\topts = opts.Apply(configOpts)\n\t}\n\n\treturn dumpYAML(opts)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 Netflix Inc\n\/\/ Author: Colin McIntosh (colin@netflix.com)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Copyright 2018 Google Inc.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Portions of this file including TargetState and its receivers (excluding modifications) are from\n\/\/ https:\/\/github.com\/openconfig\/gnmi\/blob\/89b2bf29312cda887da916d0f3a32c1624b7935f\/cmd\/gnmi_collector\/gnmi_collector.go\n\npackage connections\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/openconfig\/gnmi\/cache\"\n\t\"github.com\/openconfig\/gnmi\/client\"\n\tgnmiclient \"github.com\/openconfig\/gnmi\/client\/gnmi\"\n\tgnmipb \"github.com\/openconfig\/gnmi\/proto\/gnmi\"\n\ttargetpb \"github.com\/openconfig\/gnmi\/proto\/target\"\n\t\"golang.org\/x\/sync\/semaphore\"\n\t\"stash.corp.netflix.com\/ocnas\/gnmi-gateway\/gateway\/configuration\"\n)\n\n\/\/ Container for some of the targetCache TargetState data. It is created once\n\/\/ for every device and used as a closure parameter by ProtoHandler.\ntype TargetState struct {\n\tconfig *configuration.GatewayConfig\n\tname string\n\ttargetCache *cache.Target\n\t\/\/ connected status is set to true when the first gnmi notification is received.\n\t\/\/ it gets reset to false when disconnect call back of ReconnectClient is called.\n\tconnected bool\n\tconnecting bool\n\tclient *client.ReconnectClient\n\tstopped bool\n\ttarget *targetpb.Target\n\trequest *gnmipb.SubscribeRequest\n}\n\nfunc (t *TargetState) Equal(other *targetpb.Target) bool {\n\tif len(t.target.Addresses) != len(other.Addresses) {\n\t\treturn false\n\t}\n\tfor i, addr := range t.target.Addresses {\n\t\tif other.Addresses[i] != addr {\n\t\t\treturn false\n\t\t}\n\t}\n\tif t.target.Credentials.Username != other.Credentials.Username {\n\t\treturn false\n\t}\n\tif t.target.Credentials.Password != other.Credentials.Password {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (t *TargetState) connect() {\n\tt.connecting = true\n\tt.config.Log.Info().Msgf(\"Connecting to target %s\", t.name)\n\tquery, err := client.NewQuery(t.request)\n\tif err != nil {\n\t\tt.config.Log.Error().Msgf(\"NewQuery(%s): %v\", t.request.String(), err)\n\t\treturn\n\t}\n\tquery.Addrs = t.target.Addresses\n\n\tif t.target.Credentials != nil {\n\t\tquery.Credentials = &client.Credentials{\n\t\t\tUsername: t.target.Credentials.Username,\n\t\t\tPassword: t.target.Credentials.Password,\n\t\t}\n\t}\n\n\t\/\/ TLS is always enabled for a targetCache.\n\tquery.TLS = &tls.Config{\n\t\t\/\/ Today, we assume that we should not verify the certificate from the targetCache.\n\t\tInsecureSkipVerify: true,\n\t}\n\n\tquery.Target = t.name\n\tquery.Timeout = t.config.TargetDialTimeout\n\n\tquery.ProtoHandler = t.handleUpdate\n\n\tif err := query.Validate(); err != nil {\n\t\tt.config.Log.Error().Err(err).Msgf(\"query.Validate(): %v\", err)\n\t\treturn\n\t}\n\tt.client = client.Reconnect(&client.BaseClient{}, t.disconnected, nil)\n\t\/\/ Subscribe blocks until .Close() is called\n\tif err := t.client.Subscribe(context.Background(), query, gnmiclient.Type); err != nil {\n\t\tt.config.Log.Error().Err(err).Msgf(\"Subscribe failed for targetCache %q: %v\", t.name, err)\n\t}\n}\n\nfunc (t *TargetState) connectWithLock(connectionSlot *semaphore.Weighted, lock *semaphore.Weighted) {\n\tvar connectionSlotAcquired = false\n\tvar connectionLockAcquired = false\n\tfor !t.stopped {\n\t\tif !connectionSlotAcquired {\n\t\t\tconnectionSlotAcquired = connectionSlot.TryAcquire(1)\n\t\t}\n\t\tif connectionSlotAcquired {\n\t\t\tif !connectionLockAcquired {\n\t\t\t\tconnectionLockAcquired = lock.TryAcquire(1)\n\t\t\t}\n\t\t\tif connectionLockAcquired {\n\t\t\t\tt.connect()\n\t\t\t}\n\t\t}\n\t}\n\tif connectionSlotAcquired {\n\t\tconnectionSlot.Release(1)\n\t}\n\tif connectionLockAcquired {\n\t\tlock.Release(1)\n\t}\n}\n\n\/\/ Disconnect from the target or stop trying to connect.\nfunc (t *TargetState) disconnect() error {\n\tt.stopped = true\n\treturn t.client.Close() \/\/ this will disconnect and reset the cache via the disconnect callback\n}\n\n\/\/ Callback for gNMI client to signal that it has disconnected.\nfunc (t *TargetState) disconnected() {\n\tt.connected = false\n\tt.targetCache.Disconnect()\n\tt.targetCache.Reset()\n}\n\nfunc (t *TargetState) reconnect() error {\n\treturn t.client.Close()\n}\n\n\/\/ handleUpdate parses a protobuf message received from the targetCache. This implementation handles only\n\/\/ gNMI SubscribeResponse messages. When the message is an Update, the GnmiUpdate method of the\n\/\/ cache.Target is called to generate an update. If the message is a sync_response, then targetCache is\n\/\/ marked as synchronised.\nfunc (t *TargetState) handleUpdate(msg proto.Message) error {\n\t\/\/fmt.Printf(\"%+v\\n\", msg)\n\tif !t.connected {\n\t\tt.targetCache.Connect()\n\t\tt.connected = true\n\t}\n\tresp, ok := msg.(*gnmipb.SubscribeResponse)\n\tif !ok {\n\t\treturn fmt.Errorf(\"failed to type assert message %#v\", msg)\n\t}\n\tswitch v := resp.Response.(type) {\n\tcase *gnmipb.SubscribeResponse_Update:\n\t\t\/\/ Gracefully handle gNMI implementations that do not set Prefix.Target in their\n\t\t\/\/ SubscribeResponse Updates.\n\t\tif v.Update.GetPrefix() == nil {\n\t\t\tv.Update.Prefix = &gnmipb.Path{}\n\t\t}\n\t\tif v.Update.Prefix.Target == \"\" {\n\t\t\tv.Update.Prefix.Target = t.name\n\t\t}\n\t\tif err := t.rejectUpdate(v.Update); err != nil {\n\t\t\t\/\/t.config.Log.Warn().Msgf(\"Update rejected: %t: %+v\", err, v.Update)\n\t\t\treturn nil\n\t\t}\n\t\terr := t.targetCache.GnmiUpdate(v.Update)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"targetCache cache update error: %t: %+v\", err, v.Update)\n\t\t}\n\tcase *gnmipb.SubscribeResponse_SyncResponse:\n\t\tt.config.Log.Debug().Msgf(\"Target is synced: %s\", t.name)\n\t\tt.targetCache.Sync()\n\tcase *gnmipb.SubscribeResponse_Error:\n\t\treturn fmt.Errorf(\"error in response: %s\", v)\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown response %T: %t\", v, v)\n\t}\n\treturn nil\n}\n\nfunc (t *TargetState) rejectUpdate(notification *gnmipb.Notification) error {\n\tfor _, update := range notification.GetUpdate() {\n\t\tpath := update.GetPath().GetElem()\n\t\tif len(path) >= 2 {\n\t\t\tif path[0].Name == \"interfaces\" && path[1].Name == \"interface\" {\n\t\t\t\tif value, exists := path[1].Key[\"name\"]; exists {\n\t\t\t\t\tif value == \"interface\" {\n\t\t\t\t\t\treturn errors.New(\"bug for Arista interface path\") \/\/ Arista BUG #??????????\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif path[0].Name == \"network-instances\" && path[1].Name == \"network-instance\" {\n\t\t\t\tif value, exists := path[1].Key[\"name\"]; exists {\n\t\t\t\t\tif value == \"network-instance\" {\n\t\t\t\t\t\treturn errors.New(\"bug for Arista isis adjacency path\") \/\/ Arista BUG #??????????\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif path[0].Name == \"netconf-state\" {\n\t\t\t\treturn errors.New(\"bug for netconf-state path\")\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Improved documentation for how TargetState works.<commit_after>\/\/ Copyright 2020 Netflix Inc\n\/\/ Author: Colin McIntosh (colin@netflix.com)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Copyright 2018 Google Inc.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Portions of this file including TargetState and its receivers (excluding modifications) are from\n\/\/ https:\/\/github.com\/openconfig\/gnmi\/blob\/89b2bf29312cda887da916d0f3a32c1624b7935f\/cmd\/gnmi_collector\/gnmi_collector.go\n\n\/\/\npackage connections\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/openconfig\/gnmi\/cache\"\n\t\"github.com\/openconfig\/gnmi\/client\"\n\tgnmiclient \"github.com\/openconfig\/gnmi\/client\/gnmi\"\n\tgnmipb \"github.com\/openconfig\/gnmi\/proto\/gnmi\"\n\ttargetpb \"github.com\/openconfig\/gnmi\/proto\/target\"\n\t\"golang.org\/x\/sync\/semaphore\"\n\t\"stash.corp.netflix.com\/ocnas\/gnmi-gateway\/gateway\/configuration\"\n)\n\n\/\/ TargetState makes the calls to connect a target, tracks any associated connection state, and is the container for\n\/\/ the target's cache data. It is created once for every device and used as a closure parameter by ProtoHandler.\ntype TargetState struct {\n\tconfig *configuration.GatewayConfig\n\tname string\n\ttargetCache *cache.Target\n\t\/\/ connected status is set to true when the first gnmi notification is received.\n\t\/\/ it gets reset to false when disconnect call back of ReconnectClient is called.\n\tconnected bool\n\t\/\/ connecting status is used to signal that some of the connection process has been started and\n\t\/\/ full reconnection is necessary if the target configuration changes\n\tconnecting bool\n\tclient *client.ReconnectClient\n\t\/\/ stopped status signals that .disconnect() has been called we no longer want to connect to this target so we\n\t\/\/ should stop trying to connect and release any locks that are being held\n\tstopped bool\n\ttarget *targetpb.Target\n\trequest *gnmipb.SubscribeRequest\n}\n\nfunc (t *TargetState) Equal(other *targetpb.Target) bool {\n\tif len(t.target.Addresses) != len(other.Addresses) {\n\t\treturn false\n\t}\n\tfor i, addr := range t.target.Addresses {\n\t\tif other.Addresses[i] != addr {\n\t\t\treturn false\n\t\t}\n\t}\n\tif t.target.Credentials.Username != other.Credentials.Username {\n\t\treturn false\n\t}\n\tif t.target.Credentials.Password != other.Credentials.Password {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (t *TargetState) connect() {\n\tt.connecting = true\n\tt.config.Log.Info().Msgf(\"Connecting to target %s\", t.name)\n\tquery, err := client.NewQuery(t.request)\n\tif err != nil {\n\t\tt.config.Log.Error().Msgf(\"NewQuery(%s): %v\", t.request.String(), err)\n\t\treturn\n\t}\n\tquery.Addrs = t.target.Addresses\n\n\tif t.target.Credentials != nil {\n\t\tquery.Credentials = &client.Credentials{\n\t\t\tUsername: t.target.Credentials.Username,\n\t\t\tPassword: t.target.Credentials.Password,\n\t\t}\n\t}\n\n\t\/\/ TLS is always enabled for a targetCache.\n\tquery.TLS = &tls.Config{\n\t\t\/\/ Today, we assume that we should not verify the certificate from the targetCache.\n\t\tInsecureSkipVerify: true,\n\t}\n\n\tquery.Target = t.name\n\tquery.Timeout = t.config.TargetDialTimeout\n\n\tquery.ProtoHandler = t.handleUpdate\n\n\tif err := query.Validate(); err != nil {\n\t\tt.config.Log.Error().Err(err).Msgf(\"query.Validate(): %v\", err)\n\t\treturn\n\t}\n\tt.client = client.Reconnect(&client.BaseClient{}, t.disconnected, nil)\n\t\/\/ Subscribe blocks until .Close() is called\n\tif err := t.client.Subscribe(context.Background(), query, gnmiclient.Type); err != nil {\n\t\tt.config.Log.Error().Err(err).Msgf(\"Subscribe failed for targetCache %q: %v\", t.name, err)\n\t}\n}\n\n\/\/ Attempt to acquire a connection slot. After a connection slot is acquired attempt to grab the lock for the target.\n\/\/ After the lock for the target is acquired connect to the target. If TargetState.disconnect() is called\n\/\/ all attempts and connections are aborted.\nfunc (t *TargetState) connectWithLock(connectionSlot *semaphore.Weighted, lock *semaphore.Weighted) {\n\tvar connectionSlotAcquired = false\n\tvar connectionLockAcquired = false\n\tfor !t.stopped {\n\t\tif !connectionSlotAcquired {\n\t\t\tconnectionSlotAcquired = connectionSlot.TryAcquire(1)\n\t\t}\n\t\tif connectionSlotAcquired {\n\t\t\tif !connectionLockAcquired {\n\t\t\t\tconnectionLockAcquired = lock.TryAcquire(1)\n\t\t\t}\n\t\t\tif connectionLockAcquired {\n\t\t\t\tt.connect()\n\t\t\t}\n\t\t}\n\t}\n\tif connectionSlotAcquired {\n\t\tconnectionSlot.Release(1)\n\t}\n\tif connectionLockAcquired {\n\t\tlock.Release(1)\n\t}\n}\n\n\/\/ Disconnect from the target or stop trying to connect.\nfunc (t *TargetState) disconnect() error {\n\tt.stopped = true\n\treturn t.client.Close() \/\/ this will disconnect and reset the cache via the disconnect callback\n}\n\n\/\/ Callback for gNMI client to signal that it has disconnected.\nfunc (t *TargetState) disconnected() {\n\tt.connected = false\n\tt.targetCache.Disconnect()\n\tt.targetCache.Reset()\n}\n\nfunc (t *TargetState) reconnect() error {\n\treturn t.client.Close()\n}\n\n\/\/ handleUpdate parses a protobuf message received from the targetCache. This implementation handles only\n\/\/ gNMI SubscribeResponse messages. When the message is an Update, the GnmiUpdate method of the\n\/\/ cache.Target is called to generate an update. If the message is a sync_response, then targetCache is\n\/\/ marked as synchronised.\nfunc (t *TargetState) handleUpdate(msg proto.Message) error {\n\t\/\/fmt.Printf(\"%+v\\n\", msg)\n\tif !t.connected {\n\t\tt.targetCache.Connect()\n\t\tt.connected = true\n\t}\n\tresp, ok := msg.(*gnmipb.SubscribeResponse)\n\tif !ok {\n\t\treturn fmt.Errorf(\"failed to type assert message %#v\", msg)\n\t}\n\tswitch v := resp.Response.(type) {\n\tcase *gnmipb.SubscribeResponse_Update:\n\t\t\/\/ Gracefully handle gNMI implementations that do not set Prefix.Target in their\n\t\t\/\/ SubscribeResponse Updates.\n\t\tif v.Update.GetPrefix() == nil {\n\t\t\tv.Update.Prefix = &gnmipb.Path{}\n\t\t}\n\t\tif v.Update.Prefix.Target == \"\" {\n\t\t\tv.Update.Prefix.Target = t.name\n\t\t}\n\t\tif err := t.rejectUpdate(v.Update); err != nil {\n\t\t\t\/\/t.config.Log.Warn().Msgf(\"Update rejected: %t: %+v\", err, v.Update)\n\t\t\treturn nil\n\t\t}\n\t\terr := t.targetCache.GnmiUpdate(v.Update)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"targetCache cache update error: %t: %+v\", err, v.Update)\n\t\t}\n\tcase *gnmipb.SubscribeResponse_SyncResponse:\n\t\tt.config.Log.Debug().Msgf(\"Target is synced: %s\", t.name)\n\t\tt.targetCache.Sync()\n\tcase *gnmipb.SubscribeResponse_Error:\n\t\treturn fmt.Errorf(\"error in response: %s\", v)\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown response %T: %t\", v, v)\n\t}\n\treturn nil\n}\n\nfunc (t *TargetState) rejectUpdate(notification *gnmipb.Notification) error {\n\tfor _, update := range notification.GetUpdate() {\n\t\tpath := update.GetPath().GetElem()\n\t\tif len(path) >= 2 {\n\t\t\tif path[0].Name == \"interfaces\" && path[1].Name == \"interface\" {\n\t\t\t\tif value, exists := path[1].Key[\"name\"]; exists {\n\t\t\t\t\tif value == \"interface\" {\n\t\t\t\t\t\treturn errors.New(\"bug for Arista interface path\") \/\/ Arista BUG #??????????\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif path[0].Name == \"network-instances\" && path[1].Name == \"network-instance\" {\n\t\t\t\tif value, exists := path[1].Key[\"name\"]; exists {\n\t\t\t\t\tif value == \"network-instance\" {\n\t\t\t\t\t\treturn errors.New(\"bug for Arista isis adjacency path\") \/\/ Arista BUG #??????????\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif path[0].Name == \"netconf-state\" {\n\t\t\t\treturn errors.New(\"bug for netconf-state path\")\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package application_test\n\nimport (\n\t\"time\"\n\n\t. \"github.com\/cloudfoundry-incubator\/cf-test-helpers\/cf\"\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/generator\"\n\tacceptanceTestHelpers \"github.com\/cloudfoundry-incubator\/cf-test-helpers\/helpers\"\n\tgatsHelpers \"github.com\/cloudfoundry\/cli-acceptance-tests\/helpers\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/gexec\"\n)\n\nvar _ = Describe(\"Env\", func() {\n\tconst (\n\t\tassertionTimeout = 10 * time.Second\n\t\tappTimeout = 2 * time.Minute\n\t\tcopyAppTimeout = 3 * time.Minute\n\t)\n\n\tvar (\n\t\tcontext *acceptanceTestHelpers.ConfiguredContext\n\t\tenv *acceptanceTestHelpers.Environment\n\t)\n\n\tconfig := acceptanceTestHelpers.LoadConfig()\n\n\tBeforeEach(func() {\n\t\tcontext = acceptanceTestHelpers.NewContext(config)\n\t\tenv = acceptanceTestHelpers.NewEnvironment(context)\n\n\t\tenv.Setup()\n\t\tAsUser(context.AdminUserContext(), 30*time.Second, func() {\n\t\t\tenvVarGroups := Cf(\"ssevg\", `{}`).Wait(assertionTimeout)\n\t\t\tExpect(envVarGroups).To(Exit(0))\n\n\t\t\tenvVarGroups = Cf(\"srevg\", `{}`).Wait(assertionTimeout)\n\t\t\tExpect(envVarGroups).To(Exit(0))\n\t\t})\n\t})\n\n\tAfterEach(func() {\n\t\tAsUser(context.AdminUserContext(), 30*time.Second, func() {\n\t\t\tenvVarGroups := Cf(\"ssevg\", `{}`).Wait(assertionTimeout)\n\t\t\tExpect(envVarGroups).To(Exit(0))\n\n\t\t\tenvVarGroups = Cf(\"srevg\", `{}`).Wait(assertionTimeout)\n\t\t\tExpect(envVarGroups).To(Exit(0))\n\t\t\tenv.Teardown()\n\t\t})\n\t})\n\n\tIt(\"returns ann applications running, staging, system provided and user defined environment variables\", func() {\n\t\tAsUser(context.AdminUserContext(), 60*time.Second, func() {\n\t\t\tssevgResult := Cf(\"ssevg\", `{\"name\":\"staging-val\"}`).Wait(assertionTimeout)\n\t\t\tExpect(ssevgResult).To(Exit(0))\n\t\t\tsrevgResult := Cf(\"srevg\", `{\"name\":\"running-val\"}`).Wait(assertionTimeout)\n\t\t\tExpect(srevgResult).To(Exit(0))\n\n\t\t\tspace := context.RegularUserContext().Space\n\t\t\torg := context.RegularUserContext().Org\n\n\t\t\ttarget := Cf(\"target\", \"-o\", org, \"-s\", space).Wait(assertionTimeout)\n\t\t\tExpect(target.ExitCode()).To(Equal(0))\n\n\t\t\tappName := generator.RandomName()\n\t\t\tapp := Cf(\"push\", appName, \"-p\", gatsHelpers.NewAssets().ServiceBroker).Wait(appTimeout)\n\t\t\tExpect(app).To(Exit(0))\n\n\t\t\tsetEnvResult := Cf(\"set-env\", appName, \"set-env-key\", \"set-env-val\").Wait(assertionTimeout)\n\t\t\tExpect(setEnvResult).To(Exit(0))\n\n\t\t\tenvResult := Cf(\"env\", appName).Wait(assertionTimeout)\n\t\t\tExpect(envResult).To(Exit(0))\n\n\t\t\toutput := envResult.Out.Contents()\n\t\t\tExpect(output).To(ContainSubstring(\"\\\"VCAP_APPLICATION\\\": {\"))\n\t\t\tExpect(output).To(ContainSubstring(\"User-Provided:\\nset-env-key: set-env-val\"))\n\t\t\tExpect(output).To(ContainSubstring(\"Running Environment Variable Groups:\\nname: running-val\"))\n\t\t\tExpect(output).To(ContainSubstring(\"Staging Environment Variable Groups:\\nname: staging-val\"))\n\t\t})\n\t})\n})\n<commit_msg>randomize the env variables so parallel runs won't conflict<commit_after>package application_test\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t. \"github.com\/cloudfoundry-incubator\/cf-test-helpers\/cf\"\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/generator\"\n\tacceptanceTestHelpers \"github.com\/cloudfoundry-incubator\/cf-test-helpers\/helpers\"\n\tgatsHelpers \"github.com\/cloudfoundry\/cli-acceptance-tests\/helpers\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/gexec\"\n)\n\nvar _ = Describe(\"Env\", func() {\n\tconst (\n\t\tassertionTimeout = 10 * time.Second\n\t\tappTimeout = 2 * time.Minute\n\t\tcopyAppTimeout = 3 * time.Minute\n\t)\n\n\tvar (\n\t\tcontext *acceptanceTestHelpers.ConfiguredContext\n\t\tenv *acceptanceTestHelpers.Environment\n\t)\n\n\tconfig := acceptanceTestHelpers.LoadConfig()\n\n\tBeforeEach(func() {\n\t\tcontext = acceptanceTestHelpers.NewContext(config)\n\t\tenv = acceptanceTestHelpers.NewEnvironment(context)\n\n\t\tenv.Setup()\n\t\tAsUser(context.AdminUserContext(), 30*time.Second, func() {\n\t\t\tenvVarGroups := Cf(\"set-staging-environment-variable-group\", `{}`).Wait(assertionTimeout)\n\t\t\tExpect(envVarGroups).To(Exit(0))\n\n\t\t\tenvVarGroups = Cf(\"set-running-environment-variable-group\", `{}`).Wait(assertionTimeout)\n\t\t\tExpect(envVarGroups).To(Exit(0))\n\t\t})\n\t})\n\n\tAfterEach(func() {\n\t\tAsUser(context.AdminUserContext(), 30*time.Second, func() {\n\t\t\tenvVarGroups := Cf(\"set-staging-environment-variable-group\", `{}`).Wait(assertionTimeout)\n\t\t\tExpect(envVarGroups).To(Exit(0))\n\n\t\t\tenvVarGroups = Cf(\"set-running-environment-variable-group\", `{}`).Wait(assertionTimeout)\n\t\t\tExpect(envVarGroups).To(Exit(0))\n\t\t\tenv.Teardown()\n\t\t})\n\t})\n\n\tIt(\"returns ann applications running, staging, system provided and user defined environment variables\", func() {\n\t\tAsUser(context.AdminUserContext(), 60*time.Second, func() {\n\t\t\tstagingVal := fmt.Sprintf(\"staging-val-%d\", time.Now().Nanosecond())\n\t\t\trunningVal := fmt.Sprintf(\"running-val-%d\", time.Now().Nanosecond())\n\t\t\tsetEnvVal := fmt.Sprintf(\"set-env-val-%d\", time.Now().Nanosecond())\n\n\t\t\tssevgResult := Cf(\"set-staging-environment-variable-group\", fmt.Sprintf(`{\"name\":\"%s\"}`, stagingVal)).Wait(assertionTimeout)\n\t\t\tExpect(ssevgResult).To(Exit(0))\n\t\t\tsrevgResult := Cf(\"set-running-environment-variable-group\", fmt.Sprintf(`{\"name\":\"%s\"}`, runningVal)).Wait(assertionTimeout)\n\t\t\tExpect(srevgResult).To(Exit(0))\n\n\t\t\tspace := context.RegularUserContext().Space\n\t\t\torg := context.RegularUserContext().Org\n\n\t\t\ttarget := Cf(\"target\", \"-o\", org, \"-s\", space).Wait(assertionTimeout)\n\t\t\tExpect(target.ExitCode()).To(Equal(0))\n\n\t\t\tappName := generator.RandomName()\n\t\t\tapp := Cf(\"push\", appName, \"-p\", gatsHelpers.NewAssets().ServiceBroker).Wait(appTimeout)\n\t\t\tExpect(app).To(Exit(0))\n\n\t\t\tsetEnvResult := Cf(\"set-env\", appName, \"set-env-key\", setEnvVal).Wait(assertionTimeout)\n\t\t\tExpect(setEnvResult).To(Exit(0))\n\n\t\t\tenvResult := Cf(\"env\", appName).Wait(assertionTimeout)\n\t\t\tExpect(envResult).To(Exit(0))\n\n\t\t\toutput := envResult.Out.Contents()\n\t\t\tExpect(output).To(ContainSubstring(\"\\\"VCAP_APPLICATION\\\": {\"))\n\t\t\tExpect(output).To(ContainSubstring(\"User-Provided:\\nset-env-key: set-env-val\"))\n\t\t\tExpect(output).To(ContainSubstring(\"Running Environment Variable Groups:\\nname: running-val\"))\n\t\t\tExpect(output).To(ContainSubstring(\"Staging Environment Variable Groups:\\nname: staging-val\"))\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gcsproxy_test\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/googlecloudplatform\/gcsfuse\/gcsproxy\"\n\t\"github.com\/googlecloudplatform\/gcsfuse\/lease\"\n\t\"github.com\/googlecloudplatform\/gcsfuse\/timeutil\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\/gcsfake\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\/gcsutil\"\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t. \"github.com\/jacobsa\/ogletest\"\n)\n\nfunc TestIntegration(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Boilerplate\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nconst fileLeaserLimit = 1 << 10\n\ntype IntegrationTest struct {\n\tctx context.Context\n\tbucket gcs.Bucket\n\tleaser lease.FileLeaser\n\tclock timeutil.SimulatedClock\n\n\tmo *checkingMutableObject\n}\n\nvar _ SetUpInterface = &IntegrationTest{}\nvar _ TearDownInterface = &IntegrationTest{}\n\nfunc init() { RegisterTestSuite(&IntegrationTest{}) }\n\nfunc (t *IntegrationTest) SetUp(ti *TestInfo) {\n\tt.ctx = ti.Ctx\n\tt.bucket = gcsfake.NewFakeBucket(&t.clock, \"some_bucket\")\n\tt.leaser = lease.NewFileLeaser(\"\", fileLeaserLimit)\n\n\t\/\/ Set up a fixed, non-zero time.\n\tt.clock.SetTime(time.Date(2012, 8, 15, 22, 56, 0, 0, time.Local))\n}\n\nfunc (t *IntegrationTest) TearDown() {\n\tif t.mo != nil {\n\t\tt.mo.Destroy()\n\t}\n}\n\nfunc (t *IntegrationTest) create(o *gcs.Object) {\n\t\/\/ Ensure invariants are checked.\n\tt.mo = &checkingMutableObject{\n\t\tctx: t.ctx,\n\t\twrapped: gcsproxy.NewMutableObject(\n\t\t\to,\n\t\t\tt.bucket,\n\t\t\tt.leaser,\n\t\t\t&t.clock),\n\t}\n}\n\n\/\/ Return the object generation, or -1 if non-existent. Panic on error.\nfunc (t *IntegrationTest) objectGeneration(name string) (gen int64) {\n\t\/\/ Stat.\n\treq := &gcs.StatObjectRequest{Name: name}\n\to, err := t.bucket.StatObject(t.ctx, req)\n\n\tif _, ok := err.(*gcs.NotFoundError); ok {\n\t\tgen = -1\n\t\treturn\n\t}\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ Check the result.\n\tif o.Generation > math.MaxInt64 {\n\t\tpanic(fmt.Sprintf(\"Out of range: %v\", o.Generation))\n\t}\n\n\tgen = o.Generation\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *IntegrationTest) ReadThenSync() {\n\t\/\/ Create.\n\to, err := gcsutil.CreateObject(t.ctx, t.bucket, \"foo\", \"taco\")\n\tAssertEq(nil, err)\n\n\tt.create(o)\n\n\t\/\/ Read the contents.\n\tbuf := make([]byte, 1024)\n\tn, err := t.mo.ReadAt(buf, 0)\n\n\tAssertThat(err, AnyOf(io.EOF, nil))\n\tExpectEq(len(\"taco\"), n)\n\tExpectEq(\"taco\", string(buf[:n]))\n\n\t\/\/ Sync doesn't need to do anything.\n\terr = t.mo.Sync()\n\tExpectEq(nil, err)\n\n\tExpectEq(o.Generation, t.mo.SourceGeneration())\n\tExpectEq(o.Generation, t.objectGeneration(\"foo\"))\n}\n\nfunc (t *IntegrationTest) WriteThenSync() {\n\t\/\/ Create.\n\to, err := gcsutil.CreateObject(t.ctx, t.bucket, \"foo\", \"taco\")\n\tAssertEq(nil, err)\n\n\tt.create(o)\n\n\t\/\/ Overwrite the first byte.\n\tn, err := t.mo.WriteAt([]byte(\"p\"), 0)\n\n\tAssertEq(nil, err)\n\tExpectEq(1, n)\n\n\t\/\/ Sync should save out the new generation.\n\terr = t.mo.Sync()\n\tExpectEq(nil, err)\n\n\tExpectNe(o.Generation, t.mo.SourceGeneration())\n\tExpectEq(t.objectGeneration(\"foo\"), t.mo.SourceGeneration())\n\n\tcontents, err := gcsutil.ReadObject(t.ctx, t.bucket, \"foo\")\n\tAssertEq(nil, err)\n\tExpectEq(\"paco\", contents)\n}\n\nfunc (t *IntegrationTest) TruncateThenSync() {\n\t\/\/ Create.\n\to, err := gcsutil.CreateObject(t.ctx, t.bucket, \"foo\", \"taco\")\n\tAssertEq(nil, err)\n\n\tt.create(o)\n\n\t\/\/ Truncate.\n\terr = t.mo.Truncate(2)\n\tAssertEq(nil, err)\n\n\t\/\/ Sync should save out the new generation.\n\terr = t.mo.Sync()\n\tExpectEq(nil, err)\n\n\tExpectNe(o.Generation, t.mo.SourceGeneration())\n\tExpectEq(t.objectGeneration(\"foo\"), t.mo.SourceGeneration())\n\n\tcontents, err := gcsutil.ReadObject(t.ctx, t.bucket, \"foo\")\n\tAssertEq(nil, err)\n\tExpectEq(\"ta\", contents)\n}\n\nfunc (t *IntegrationTest) Stat_Clean() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *IntegrationTest) Stat_Dirty() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *IntegrationTest) SmallerThanLeaserLimit() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *IntegrationTest) LargerThanLeaserLimit() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *IntegrationTest) BackingObjectHasBeenDeleted_BeforeReading() {\n\t\/\/ Create an object to obtain a record, then delete it.\n\tcreateTime := t.clock.Now()\n\to, err := gcsutil.CreateObject(t.ctx, t.bucket, \"foo\", \"taco\")\n\tAssertEq(nil, err)\n\tt.clock.AdvanceTime(time.Second)\n\n\terr = t.bucket.DeleteObject(t.ctx, o.Name)\n\tAssertEq(nil, err)\n\n\t\/\/ Create a mutable object around it.\n\tt.create(o)\n\n\t\/\/ Synchronously-available things should work.\n\tExpectEq(o.Name, t.mo.Name())\n\tExpectEq(o.Generation, t.mo.SourceGeneration())\n\n\tsr, err := t.mo.Stat(true)\n\tAssertEq(nil, err)\n\tExpectEq(o.Size, sr.Size)\n\tExpectThat(sr.Mtime, timeutil.TimeEq(createTime))\n\tExpectTrue(sr.Clobbered)\n\n\t\/\/ Sync doesn't need to do anything.\n\terr = t.mo.Sync()\n\tExpectEq(nil, err)\n\n\t\/\/ Anything that needs to fault in the contents should fail.\n\t_, err = t.mo.ReadAt([]byte{}, 0)\n\tExpectThat(err, Error(HasSubstr(\"not found\")))\n\n\terr = t.mo.Truncate(10)\n\tExpectThat(err, Error(HasSubstr(\"not found\")))\n\n\t_, err = t.mo.WriteAt([]byte{}, 0)\n\tExpectThat(err, Error(HasSubstr(\"not found\")))\n}\n\nfunc (t *IntegrationTest) BackingObjectHasBeenDeleted_AfterReading() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *IntegrationTest) BackingObjectHasBeenOverwritten_BeforeReading() {\n\t\/\/ Create an object, then create the mutable object wrapper around it.\n\tcreateTime := t.clock.Now()\n\to, err := gcsutil.CreateObject(t.ctx, t.bucket, \"foo\", \"taco\")\n\tAssertEq(nil, err)\n\tt.clock.AdvanceTime(time.Second)\n\n\tt.create(o)\n\n\t\/\/ Overwrite the GCS object.\n\t_, err = gcsutil.CreateObject(t.ctx, t.bucket, \"foo\", \"burrito\")\n\tAssertEq(nil, err)\n\n\t\/\/ Synchronously-available things should work.\n\tExpectEq(o.Name, t.mo.Name())\n\tExpectEq(o.Generation, t.mo.SourceGeneration())\n\n\tsr, err := t.mo.Stat(true)\n\tAssertEq(nil, err)\n\tExpectEq(o.Size, sr.Size)\n\tExpectThat(sr.Mtime, timeutil.TimeEq(createTime))\n\tExpectTrue(sr.Clobbered)\n\n\t\/\/ Sync doesn't need to do anything.\n\terr = t.mo.Sync()\n\tExpectEq(nil, err)\n\n\t\/\/ Anything that needs to fault in the contents should fail.\n\t_, err = t.mo.ReadAt([]byte{}, 0)\n\tExpectThat(err, Error(HasSubstr(\"not found\")))\n\n\terr = t.mo.Truncate(10)\n\tExpectThat(err, Error(HasSubstr(\"not found\")))\n\n\t_, err = t.mo.WriteAt([]byte{}, 0)\n\tExpectThat(err, Error(HasSubstr(\"not found\")))\n}\n\nfunc (t *IntegrationTest) BackingObjectHasBeenOverwritten_AfterReading() {\n\tAssertTrue(false, \"TODO\")\n}\n<commit_msg>IntegrationTest.Stat_InitialState<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage gcsproxy_test\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/googlecloudplatform\/gcsfuse\/gcsproxy\"\n\t\"github.com\/googlecloudplatform\/gcsfuse\/lease\"\n\t\"github.com\/googlecloudplatform\/gcsfuse\/timeutil\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\/gcsfake\"\n\t\"github.com\/jacobsa\/gcloud\/gcs\/gcsutil\"\n\t. \"github.com\/jacobsa\/oglematchers\"\n\t. \"github.com\/jacobsa\/ogletest\"\n)\n\nfunc TestIntegration(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Boilerplate\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nconst fileLeaserLimit = 1 << 10\n\ntype IntegrationTest struct {\n\tctx context.Context\n\tbucket gcs.Bucket\n\tleaser lease.FileLeaser\n\tclock timeutil.SimulatedClock\n\n\tmo *checkingMutableObject\n}\n\nvar _ SetUpInterface = &IntegrationTest{}\nvar _ TearDownInterface = &IntegrationTest{}\n\nfunc init() { RegisterTestSuite(&IntegrationTest{}) }\n\nfunc (t *IntegrationTest) SetUp(ti *TestInfo) {\n\tt.ctx = ti.Ctx\n\tt.bucket = gcsfake.NewFakeBucket(&t.clock, \"some_bucket\")\n\tt.leaser = lease.NewFileLeaser(\"\", fileLeaserLimit)\n\n\t\/\/ Set up a fixed, non-zero time.\n\tt.clock.SetTime(time.Date(2012, 8, 15, 22, 56, 0, 0, time.Local))\n}\n\nfunc (t *IntegrationTest) TearDown() {\n\tif t.mo != nil {\n\t\tt.mo.Destroy()\n\t}\n}\n\nfunc (t *IntegrationTest) create(o *gcs.Object) {\n\t\/\/ Ensure invariants are checked.\n\tt.mo = &checkingMutableObject{\n\t\tctx: t.ctx,\n\t\twrapped: gcsproxy.NewMutableObject(\n\t\t\to,\n\t\t\tt.bucket,\n\t\t\tt.leaser,\n\t\t\t&t.clock),\n\t}\n}\n\n\/\/ Return the object generation, or -1 if non-existent. Panic on error.\nfunc (t *IntegrationTest) objectGeneration(name string) (gen int64) {\n\t\/\/ Stat.\n\treq := &gcs.StatObjectRequest{Name: name}\n\to, err := t.bucket.StatObject(t.ctx, req)\n\n\tif _, ok := err.(*gcs.NotFoundError); ok {\n\t\tgen = -1\n\t\treturn\n\t}\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t\/\/ Check the result.\n\tif o.Generation > math.MaxInt64 {\n\t\tpanic(fmt.Sprintf(\"Out of range: %v\", o.Generation))\n\t}\n\n\tgen = o.Generation\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Tests\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *IntegrationTest) ReadThenSync() {\n\t\/\/ Create.\n\to, err := gcsutil.CreateObject(t.ctx, t.bucket, \"foo\", \"taco\")\n\tAssertEq(nil, err)\n\n\tt.create(o)\n\n\t\/\/ Read the contents.\n\tbuf := make([]byte, 1024)\n\tn, err := t.mo.ReadAt(buf, 0)\n\n\tAssertThat(err, AnyOf(io.EOF, nil))\n\tExpectEq(len(\"taco\"), n)\n\tExpectEq(\"taco\", string(buf[:n]))\n\n\t\/\/ Sync doesn't need to do anything.\n\terr = t.mo.Sync()\n\tExpectEq(nil, err)\n\n\tExpectEq(o.Generation, t.mo.SourceGeneration())\n\tExpectEq(o.Generation, t.objectGeneration(\"foo\"))\n}\n\nfunc (t *IntegrationTest) WriteThenSync() {\n\t\/\/ Create.\n\to, err := gcsutil.CreateObject(t.ctx, t.bucket, \"foo\", \"taco\")\n\tAssertEq(nil, err)\n\n\tt.create(o)\n\n\t\/\/ Overwrite the first byte.\n\tn, err := t.mo.WriteAt([]byte(\"p\"), 0)\n\n\tAssertEq(nil, err)\n\tExpectEq(1, n)\n\n\t\/\/ Sync should save out the new generation.\n\terr = t.mo.Sync()\n\tExpectEq(nil, err)\n\n\tExpectNe(o.Generation, t.mo.SourceGeneration())\n\tExpectEq(t.objectGeneration(\"foo\"), t.mo.SourceGeneration())\n\n\tcontents, err := gcsutil.ReadObject(t.ctx, t.bucket, \"foo\")\n\tAssertEq(nil, err)\n\tExpectEq(\"paco\", contents)\n}\n\nfunc (t *IntegrationTest) TruncateThenSync() {\n\t\/\/ Create.\n\to, err := gcsutil.CreateObject(t.ctx, t.bucket, \"foo\", \"taco\")\n\tAssertEq(nil, err)\n\n\tt.create(o)\n\n\t\/\/ Truncate.\n\terr = t.mo.Truncate(2)\n\tAssertEq(nil, err)\n\n\t\/\/ Sync should save out the new generation.\n\terr = t.mo.Sync()\n\tExpectEq(nil, err)\n\n\tExpectNe(o.Generation, t.mo.SourceGeneration())\n\tExpectEq(t.objectGeneration(\"foo\"), t.mo.SourceGeneration())\n\n\tcontents, err := gcsutil.ReadObject(t.ctx, t.bucket, \"foo\")\n\tAssertEq(nil, err)\n\tExpectEq(\"ta\", contents)\n}\n\nfunc (t *IntegrationTest) Stat_InitialState() {\n\t\/\/ Create.\n\tcreateTime := t.clock.Now()\n\to, err := gcsutil.CreateObject(t.ctx, t.bucket, \"foo\", \"taco\")\n\tAssertEq(nil, err)\n\tt.clock.AdvanceTime(time.Second)\n\n\tt.create(o)\n\n\t\/\/ Stat.\n\tsr, err := t.mo.Stat(true)\n\tAssertEq(nil, err)\n\tExpectEq(o.Size, sr.Size)\n\tExpectThat(sr.Mtime, timeutil.TimeEq(createTime))\n\tExpectFalse(sr.Clobbered)\n}\n\nfunc (t *IntegrationTest) Stat_Synced() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *IntegrationTest) Stat_Dirty() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *IntegrationTest) SmallerThanLeaserLimit() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *IntegrationTest) LargerThanLeaserLimit() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *IntegrationTest) BackingObjectHasBeenDeleted_BeforeReading() {\n\t\/\/ Create an object to obtain a record, then delete it.\n\tcreateTime := t.clock.Now()\n\to, err := gcsutil.CreateObject(t.ctx, t.bucket, \"foo\", \"taco\")\n\tAssertEq(nil, err)\n\tt.clock.AdvanceTime(time.Second)\n\n\terr = t.bucket.DeleteObject(t.ctx, o.Name)\n\tAssertEq(nil, err)\n\n\t\/\/ Create a mutable object around it.\n\tt.create(o)\n\n\t\/\/ Synchronously-available things should work.\n\tExpectEq(o.Name, t.mo.Name())\n\tExpectEq(o.Generation, t.mo.SourceGeneration())\n\n\tsr, err := t.mo.Stat(true)\n\tAssertEq(nil, err)\n\tExpectEq(o.Size, sr.Size)\n\tExpectThat(sr.Mtime, timeutil.TimeEq(createTime))\n\tExpectTrue(sr.Clobbered)\n\n\t\/\/ Sync doesn't need to do anything.\n\terr = t.mo.Sync()\n\tExpectEq(nil, err)\n\n\t\/\/ Anything that needs to fault in the contents should fail.\n\t_, err = t.mo.ReadAt([]byte{}, 0)\n\tExpectThat(err, Error(HasSubstr(\"not found\")))\n\n\terr = t.mo.Truncate(10)\n\tExpectThat(err, Error(HasSubstr(\"not found\")))\n\n\t_, err = t.mo.WriteAt([]byte{}, 0)\n\tExpectThat(err, Error(HasSubstr(\"not found\")))\n}\n\nfunc (t *IntegrationTest) BackingObjectHasBeenDeleted_AfterReading() {\n\tAssertTrue(false, \"TODO\")\n}\n\nfunc (t *IntegrationTest) BackingObjectHasBeenOverwritten_BeforeReading() {\n\t\/\/ Create an object, then create the mutable object wrapper around it.\n\tcreateTime := t.clock.Now()\n\to, err := gcsutil.CreateObject(t.ctx, t.bucket, \"foo\", \"taco\")\n\tAssertEq(nil, err)\n\tt.clock.AdvanceTime(time.Second)\n\n\tt.create(o)\n\n\t\/\/ Overwrite the GCS object.\n\t_, err = gcsutil.CreateObject(t.ctx, t.bucket, \"foo\", \"burrito\")\n\tAssertEq(nil, err)\n\n\t\/\/ Synchronously-available things should work.\n\tExpectEq(o.Name, t.mo.Name())\n\tExpectEq(o.Generation, t.mo.SourceGeneration())\n\n\tsr, err := t.mo.Stat(true)\n\tAssertEq(nil, err)\n\tExpectEq(o.Size, sr.Size)\n\tExpectThat(sr.Mtime, timeutil.TimeEq(createTime))\n\tExpectTrue(sr.Clobbered)\n\n\t\/\/ Sync doesn't need to do anything.\n\terr = t.mo.Sync()\n\tExpectEq(nil, err)\n\n\t\/\/ Anything that needs to fault in the contents should fail.\n\t_, err = t.mo.ReadAt([]byte{}, 0)\n\tExpectThat(err, Error(HasSubstr(\"not found\")))\n\n\terr = t.mo.Truncate(10)\n\tExpectThat(err, Error(HasSubstr(\"not found\")))\n\n\t_, err = t.mo.WriteAt([]byte{}, 0)\n\tExpectThat(err, Error(HasSubstr(\"not found\")))\n}\n\nfunc (t *IntegrationTest) BackingObjectHasBeenOverwritten_AfterReading() {\n\tAssertTrue(false, \"TODO\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ sed.go\n\/\/ sed\n\/\/\n\/\/ Copyright (c) 2009 Geoffrey Clements\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\/\/\n\npackage sed\n\nimport (\n\t\"flag\";\n\t\"fmt\";\n\t\"io\/ioutil\";\n\t\"os\";\n\t\"strings\";\n\t\"bytes\";\n\t\"container\/vector\";\n)\n\nconst (\n\tversionMajor\t= 0;\n\tversionMinor\t= 1;\n\tversionPoint\t= 0;\n)\n\nvar versionString string\n\nfunc init() {\n\tversionString = fmt.Sprintf(\"%d.%d.%d\", versionMajor, versionMinor, versionPoint)\n}\n\nvar show_version = flag.Bool(\"version\", false, \"Show version information.\")\nvar show_help = flag.Bool(\"h\", false, \"Show help information.\")\nvar quiet = flag.Bool(\"n\", false, \"Don't print the pattern space at the end of each script cycle.\")\nvar script = flag.String(\"e\", \"\", \"The script used to process the input file.\")\nvar script_file = flag.String(\"f\", \"\", \"Specify a file to read as the script. Ignored if -e present\")\nvar edit_inplace = flag.Bool(\"i\", false, \"This option specifies that files are to be edited in-place. Otherwise output is printed to stdout.\")\nvar line_wrap = flag.Uint(\"l\", 70, \"Specify the default line-wrap length for the l command. A length of 0 (zero) means to never wrap long lines. If not specified, it is taken to be 70.\")\nvar unbuffered = flag.Bool(\"u\", false, \"Buffer both input and output as minimally as practical. (ignored)\")\nvar treat_files_as_seperate = flag.Bool(\"s\", false, \"Treat files as searate entites. Line numbers reset to 1 for each file\")\n\nvar usageShown bool = false\n\ntype Sed struct {\n\tinputLines\t\t[][]byte;\n\tcommands\t\t*vector.Vector;\n\toutputFile\t\t*os.File;\n\tpatternSpace, holdSpace\t[]byte;\n\tlineNumber\t\tint;\n}\n\nfunc (s *Sed) Init() {\n\ts.commands = new(vector.Vector);\n\ts.outputFile = os.Stdout;\n\ts.patternSpace = make([]byte, 0);\n\ts.holdSpace = make([]byte, 0);\n}\n\nfunc usage() {\n\t\/\/ only show usage once.\n\tif !usageShown {\n\t\tusageShown = true;\n\t\tfmt.Fprint(os.Stdout, \"sed [options] [script] input_file\\n\\n\");\n\t\tflag.PrintDefaults();\n\t}\n}\n\nvar inputFilename string\n\nfunc (s *Sed) readInputFile() {\n\tb, err := ioutil.ReadFile(inputFilename);\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error reading input file %s\\n\", inputFilename);\n\t\tos.Exit(-1);\n\t}\n\ts.inputLines = bytes.Split(b, []byte{'\\n'}, 0);\n}\n\nfunc (s *Sed) parseScript(scriptBuffer []byte) (err os.Error) {\n\t\/\/ a script may be a single command or it may be several\n\tscriptLines := bytes.Split(scriptBuffer, []byte{'\\n'}, 0);\n\tfor idx, line := range scriptLines {\n\t\tline = bytes.TrimSpace(line);\n\t\tif bytes.HasPrefix(line, []byte{'#'}) || len(line) == 0 {\n\t\t\t\/\/ comment\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ this isn't really right. There may be slashes in the regular expression\n\t\tpieces := bytes.Split(line, []byte{'\/'}, 0);\n\t\tc, err := NewCmd(pieces);\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Script error: %s -> %d: %s\\n\", err.String(), idx+1, line);\n\t\t\tos.Exit(-1);\n\t\t}\n\t\ts.commands.Push(c);\n\t}\n\treturn nil;\n}\n\nfunc (s *Sed) printPatternSpace() {\n\tl := len(s.patternSpace);\n\tif *line_wrap <= 0 || l < int(*line_wrap) {\n\t\tfmt.Fprintf(s.outputFile, \"%s\\n\", s.patternSpace)\n\t} else {\n\t\t\/\/ print the line in segments\n\t\tfor i := 0; i < l; i += int(*line_wrap) {\n\t\t\tendOfLine := i + int(*line_wrap);\n\t\t\tif endOfLine > l {\n\t\t\t\tendOfLine = l\n\t\t\t}\n\t\t\tfmt.Fprintf(s.outputFile, \"%s\\n\", s.patternSpace[i:endOfLine]);\n\t\t}\n\t}\n}\n\nfunc (s *Sed) process() {\n\tif *treat_files_as_seperate || *edit_inplace {\n\t\ts.lineNumber = 0\n\t}\n\tfor _, s.patternSpace = range s.inputLines {\n\t\t\/\/ track line number starting with line 1\n\t\ts.lineNumber++;\n\t\tfor c := range s.commands.Iter() {\n\t\t\t\/\/ println(\"cmd: \", c.(fmt.Stringer).String());\n\t\t\tif s.lineMatchesAddress(c.(Cmd).getAddress()) {\n\t\t\t\tstop, err := c.(Cmd).processLine(s);\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"%v\\n\", err);\n\t\t\t\t\tos.Exit(-1);\n\t\t\t\t}\n\t\t\t\tif stop {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif !*quiet {\n\t\t\ts.printPatternSpace()\n\t\t}\n\t}\n}\n\nfunc Main() {\n\ts := new(Sed);\n\ts.Init();\n\tflag.Parse();\n\tif *show_version {\n\t\tfmt.Fprintf(os.Stdout, \"Version: %s (c)2009 Geoffrey Clements All Rights Reserved\\n\\n\", versionString)\n\t}\n\tif *show_help {\n\t\tusage();\n\t\treturn;\n\t}\n\n\t\/\/ the first parameter may be a script or an input file. This helps us track which\n\tcurrentFileParameter := 0;\n\tvar scriptBuffer []byte = make([]byte, 0);\n\n\t\/\/ we need a script\n\tif len(*script) == 0 {\n\t\t\/\/ no -e so try -f\n\t\tif len(*script_file) > 0 {\n\t\t\tsb, err := ioutil.ReadFile(*script_file);\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Error reading script file %s\\n\", *script_file);\n\t\t\t\tos.Exit(-1);\n\t\t\t}\n\t\t\tscriptBuffer = sb;\n\t\t} else if flag.NArg() > 1 {\n\t\t\tscriptBuffer = strings.Bytes(flag.Arg(0));\n\t\t\t\/\/ first parameter was the script so move to second parameter\n\t\t\tcurrentFileParameter++;\n\t\t}\n\t} else {\n\t\tscriptBuffer = strings.Bytes(*script)\n\t}\n\n\t\/\/ if script still isn't set we are screwed, exit.\n\tif len(scriptBuffer) == 0 {\n\t\tfmt.Fprint(os.Stderr, \"No script found.\\n\\n\");\n\t\tusage();\n\t\tos.Exit(-1);\n\t}\n\n\t\/\/ parse script\n\ts.parseScript(scriptBuffer);\n\n\tif currentFileParameter >= flag.NArg() {\n\t\tfmt.Fprint(os.Stderr, \"No input file specified.\\n\\n\");\n\t\tusage();\n\t\tos.Exit(-1);\n\t}\n\n\tfor ; currentFileParameter < flag.NArg(); currentFileParameter++ {\n\t\tinputFilename = flag.Arg(currentFileParameter);\n\t\t\/\/ actually do the processing\n\t\ts.readInputFile();\n\t\tif *edit_inplace {\n\t\t\tdir, err := os.Stat(inputFilename);\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Error getting information about input file: %s %v\\n\", err);\n\t\t\t\tos.Exit(-1);\n\t\t\t}\n\t\t\tf, err := os.Open(inputFilename, os.O_WRONLY|os.O_TRUNC, int(dir.Mode));\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprint(os.Stderr, \"Error opening input file for inplace editing: %s %v\\n\", err);\n\t\t\t\tos.Exit(-1);\n\t\t\t}\n\t\t\ts.outputFile = f;\n\t\t}\n\t\ts.process();\n\t\tif *edit_inplace {\n\t\t\ts.outputFile.Close()\n\t\t}\n\t}\n}\n<commit_msg>Rather than indexing over range of input lines refactor so we can get lines from anyplace.<commit_after>\/\/\n\/\/ sed.go\n\/\/ sed\n\/\/\n\/\/ Copyright (c) 2009 Geoffrey Clements\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\/\/\n\npackage sed\n\nimport (\n\t\"flag\";\n\t\"fmt\";\n\t\"io\/ioutil\";\n\t\"os\";\n\t\"strings\";\n\t\"bytes\";\n\t\"container\/vector\";\n)\n\nconst (\n\tversionMajor\t= 0;\n\tversionMinor\t= 1;\n\tversionPoint\t= 0;\n)\n\nvar versionString string\n\nfunc init() {\n\tversionString = fmt.Sprintf(\"%d.%d.%d\", versionMajor, versionMinor, versionPoint)\n}\n\nvar show_version = flag.Bool(\"version\", false, \"Show version information.\")\nvar show_help = flag.Bool(\"h\", false, \"Show help information.\")\nvar quiet = flag.Bool(\"n\", false, \"Don't print the pattern space at the end of each script cycle.\")\nvar script = flag.String(\"e\", \"\", \"The script used to process the input file.\")\nvar script_file = flag.String(\"f\", \"\", \"Specify a file to read as the script. Ignored if -e present\")\nvar edit_inplace = flag.Bool(\"i\", false, \"This option specifies that files are to be edited in-place. Otherwise output is printed to stdout.\")\nvar line_wrap = flag.Uint(\"l\", 70, \"Specify the default line-wrap length for the l command. A length of 0 (zero) means to never wrap long lines. If not specified, it is taken to be 70.\")\nvar unbuffered = flag.Bool(\"u\", false, \"Buffer both input and output as minimally as practical. (ignored)\")\nvar treat_files_as_seperate = flag.Bool(\"s\", false, \"Treat files as searate entites. Line numbers reset to 1 for each file\")\n\nvar usageShown bool = false\n\ntype Sed struct {\n\tinputLines\t\t[][]byte;\n\tcommands\t\t*vector.Vector;\n\toutputFile\t\t*os.File;\n\tpatternSpace, holdSpace\t[]byte;\n\tlineNumber\t\tint;\n}\n\nfunc (s *Sed) Init() {\n\ts.commands = new(vector.Vector);\n\ts.outputFile = os.Stdout;\n\ts.patternSpace = make([]byte, 0);\n\ts.holdSpace = make([]byte, 0);\n\ts.lineNumber = 0;\n}\n\nfunc usage() {\n\t\/\/ only show usage once.\n\tif !usageShown {\n\t\tusageShown = true;\n\t\tfmt.Fprint(os.Stdout, \"sed [options] [script] input_file\\n\\n\");\n\t\tflag.PrintDefaults();\n\t}\n}\n\nvar inputFilename string\n\nfunc (s *Sed) readInputFile() {\n\tb, err := ioutil.ReadFile(inputFilename);\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error reading input file %s\\n\", inputFilename);\n\t\tos.Exit(-1);\n\t}\n\ts.inputLines = bytes.Split(b, []byte{'\\n'}, 0);\n}\n\nfunc (s *Sed) parseScript(scriptBuffer []byte) (err os.Error) {\n\t\/\/ a script may be a single command or it may be several\n\tscriptLines := bytes.Split(scriptBuffer, []byte{'\\n'}, 0);\n\tfor idx, line := range scriptLines {\n\t\tline = bytes.TrimSpace(line);\n\t\tif bytes.HasPrefix(line, []byte{'#'}) || len(line) == 0 {\n\t\t\t\/\/ comment\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ this isn't really right. There may be slashes in the regular expression\n\t\tpieces := bytes.Split(line, []byte{'\/'}, 0);\n\t\tc, err := NewCmd(pieces);\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Script error: %s -> %d: %s\\n\", err.String(), idx+1, line);\n\t\t\tos.Exit(-1);\n\t\t}\n\t\ts.commands.Push(c);\n\t}\n\treturn nil;\n}\n\nfunc (s *Sed) printPatternSpace() {\n\tl := len(s.patternSpace);\n\tif *line_wrap <= 0 || l < int(*line_wrap) {\n\t\tfmt.Fprintf(s.outputFile, \"%s\\n\", s.patternSpace)\n\t} else {\n\t\t\/\/ print the line in segments\n\t\tfor i := 0; i < l; i += int(*line_wrap) {\n\t\t\tendOfLine := i + int(*line_wrap);\n\t\t\tif endOfLine > l {\n\t\t\t\tendOfLine = l\n\t\t\t}\n\t\t\tfmt.Fprintf(s.outputFile, \"%s\\n\", s.patternSpace[i:endOfLine]);\n\t\t}\n\t}\n}\n\nfunc (s *Sed)getNextLine()([]byte, os.Error) {\n\tif s.lineNumber < len(s.inputLines) {\n\t\tval := s.inputLines[s.lineNumber];\n\t\ts.lineNumber++;\n\t\treturn val, nil;\n\t}\n\treturn nil, os.EOF;\n}\n\nfunc (s *Sed) process() {\n\tif *treat_files_as_seperate || *edit_inplace {\n\t\ts.lineNumber = 0\n\t}\n\tvar err os.Error;\n\tfor s.patternSpace, err = s.getNextLine(); err == nil; s.patternSpace, err = s.getNextLine() {\n\t\t\/\/ track line number starting with line 1\n\t\tfor c := range s.commands.Iter() {\n\t\t\t\/\/ println(\"cmd: \", c.(fmt.Stringer).String());\n\t\t\tif s.lineMatchesAddress(c.(Cmd).getAddress()) {\n\t\t\t\tstop, err := c.(Cmd).processLine(s);\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Printf(\"%v\\n\", err);\n\t\t\t\t\tos.Exit(-1);\n\t\t\t\t}\n\t\t\t\tif stop {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif !*quiet {\n\t\t\ts.printPatternSpace()\n\t\t}\n\t}\n}\n\nfunc Main() {\n\ts := new(Sed);\n\ts.Init();\n\tflag.Parse();\n\tif *show_version {\n\t\tfmt.Fprintf(os.Stdout, \"Version: %s (c)2009 Geoffrey Clements All Rights Reserved\\n\\n\", versionString)\n\t}\n\tif *show_help {\n\t\tusage();\n\t\treturn;\n\t}\n\n\t\/\/ the first parameter may be a script or an input file. This helps us track which\n\tcurrentFileParameter := 0;\n\tvar scriptBuffer []byte = make([]byte, 0);\n\n\t\/\/ we need a script\n\tif len(*script) == 0 {\n\t\t\/\/ no -e so try -f\n\t\tif len(*script_file) > 0 {\n\t\t\tsb, err := ioutil.ReadFile(*script_file);\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Error reading script file %s\\n\", *script_file);\n\t\t\t\tos.Exit(-1);\n\t\t\t}\n\t\t\tscriptBuffer = sb;\n\t\t} else if flag.NArg() > 1 {\n\t\t\tscriptBuffer = strings.Bytes(flag.Arg(0));\n\t\t\t\/\/ first parameter was the script so move to second parameter\n\t\t\tcurrentFileParameter++;\n\t\t}\n\t} else {\n\t\tscriptBuffer = strings.Bytes(*script)\n\t}\n\n\t\/\/ if script still isn't set we are screwed, exit.\n\tif len(scriptBuffer) == 0 {\n\t\tfmt.Fprint(os.Stderr, \"No script found.\\n\\n\");\n\t\tusage();\n\t\tos.Exit(-1);\n\t}\n\n\t\/\/ parse script\n\ts.parseScript(scriptBuffer);\n\n\tif currentFileParameter >= flag.NArg() {\n\t\tfmt.Fprint(os.Stderr, \"No input file specified.\\n\\n\");\n\t\tusage();\n\t\tos.Exit(-1);\n\t}\n\n\tfor ; currentFileParameter < flag.NArg(); currentFileParameter++ {\n\t\tinputFilename = flag.Arg(currentFileParameter);\n\t\t\/\/ actually do the processing\n\t\ts.readInputFile();\n\t\tif *edit_inplace {\n\t\t\tdir, err := os.Stat(inputFilename);\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Error getting information about input file: %s %v\\n\", err);\n\t\t\t\tos.Exit(-1);\n\t\t\t}\n\t\t\tf, err := os.Open(inputFilename, os.O_WRONLY|os.O_TRUNC, int(dir.Mode));\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprint(os.Stderr, \"Error opening input file for inplace editing: %s %v\\n\", err);\n\t\t\t\tos.Exit(-1);\n\t\t\t}\n\t\t\ts.outputFile = f;\n\t\t}\n\t\ts.process();\n\t\tif *edit_inplace {\n\t\t\ts.outputFile.Close()\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"archive\/tar\"\n\t\"hash\"\n\t\"io\"\n\t\"log\"\n\t\"path\"\n\t\"regexp\"\n\t\"strconv\"\n\n\t\"github.com\/gogo\/protobuf\/proto\"\n\t\"github.com\/gogo\/protobuf\/types\"\n\tgloblib \"github.com\/pachyderm\/ohmyglob\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/auth\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pfs\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/errors\"\n\tpfsserver \"github.com\/pachyderm\/pachyderm\/src\/server\/pfs\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/backoff\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/storage\/fileset\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/storage\/fileset\/index\"\n\ttxnenv \"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/transactionenv\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/uuid\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/work\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\t\/\/ tmpPrefix is for temporary storage paths.\n\ttmpPrefix = \"tmp\"\n\tstorageTaskNamespace = \"storage\"\n)\n\nfunc (d *driver) startCommitNewStorageLayer(txnCtx *txnenv.TransactionContext, id string, parent *pfs.Commit, branch string, provenance []*pfs.CommitProvenance, description string) (*pfs.Commit, error) {\n\treturn d.startCommit(txnCtx, id, parent, branch, provenance, description)\n}\n\nfunc (d *driver) finishCommitNewStorageLayer(txnCtx *txnenv.TransactionContext, commit *pfs.Commit, description string) (retErr error) {\n\tif err := d.checkIsAuthorizedInTransaction(txnCtx, commit.Repo, auth.Scope_WRITER); err != nil {\n\t\treturn err\n\t}\n\tcommitInfo, err := d.resolveCommit(txnCtx.Stm, commit)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif commitInfo.Finished != nil {\n\t\treturn pfsserver.ErrCommitFinished{commit}\n\t}\n\tif description != \"\" {\n\t\tcommitInfo.Description = description\n\t}\n\tcommitPath := path.Join(commit.Repo.Name, commit.ID)\n\t\/\/ Clean up temporary filesets leftover from failed operations.\n\tif err := d.storage.Delete(txnCtx.Client.Ctx(), path.Join(tmpPrefix, commitPath)); err != nil {\n\t\treturn err\n\t}\n\t\/\/ Run compaction task.\n\treturn d.compactionQueue.RunTaskBlock(txnCtx.Client.Ctx(), func(m *work.Master) error {\n\t\tif err := backoff.Retry(func() error {\n\t\t\tif err := d.storage.Delete(context.Background(), path.Join(commitPath, fileset.Diff)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn d.storage.Delete(context.Background(), path.Join(commitPath, fileset.Compacted))\n\t\t}, backoff.NewExponentialBackOff()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ Compact the commit changes into a diff file set.\n\t\tif err := d.compact(m, path.Join(commitPath, fileset.Diff), []string{commitPath}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ Compact the commit changes (diff file set) into the total changes in the commit's ancestry.\n\t\tvar compactSpec *fileset.CompactSpec\n\t\tif commitInfo.ParentCommit == nil {\n\t\t\tcompactSpec, err = d.storage.CompactSpec(m.Ctx(), commitPath)\n\t\t} else {\n\t\t\tparentCommitPath := path.Join(commitInfo.ParentCommit.Repo.Name, commitInfo.ParentCommit.ID)\n\t\t\tcompactSpec, err = d.storage.CompactSpec(m.Ctx(), commitPath, parentCommitPath)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := d.compact(m, compactSpec.Output, compactSpec.Input); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ (bryce) need size.\n\t\tcommitInfo.SizeBytes = uint64(0)\n\t\tcommitInfo.Finished = types.TimestampNow()\n\t\treturn d.writeFinishedCommit(txnCtx.Stm, commit, commitInfo)\n\t})\n}\n\n\/\/ (bryce) holding off on integrating with downstream commit deletion logic until global ids.\n\/\/func (d *driver) deleteCommitNewStorageLayer(txnCtx *txnenv.TransactionContext, commit *pfs.Commit) error {\n\/\/\treturn d.storage.Delete(txnCtx.Client.Ctx(), path.Join(commit.Repo.Name, commit.ID))\n\/\/}\n\n\/\/ (bryce) add commit validation.\n\/\/ (bryce) probably should prevent \/ clean files that end with \"\/\", since that will indicate a directory.\nfunc (d *driver) putFilesNewStorageLayer(ctx context.Context, repo, commit string, r io.Reader) (retErr error) {\n\t\/\/ (bryce) subFileSet will need to be incremented through etcd eventually.\n\td.mu.Lock()\n\tsubFileSetStr := fileset.SubFileSetStr(d.subFileSet)\n\tsubFileSetPath := path.Join(repo, commit, subFileSetStr)\n\tfs := d.storage.New(ctx, path.Join(tmpPrefix, subFileSetPath), subFileSetStr)\n\td.subFileSet++\n\td.mu.Unlock()\n\tdefer func() {\n\t\tif err := d.storage.Delete(ctx, path.Join(tmpPrefix, subFileSetPath)); retErr == nil {\n\t\t\tretErr = err\n\t\t}\n\t}()\n\tif err := fs.Put(r); err != nil {\n\t\treturn err\n\t}\n\tif err := fs.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn d.compactionQueue.RunTaskBlock(ctx, func(m *work.Master) error {\n\t\treturn d.compact(m, subFileSetPath, []string{path.Join(tmpPrefix, subFileSetPath)})\n\t})\n}\n\nfunc (d *driver) getFilesNewStorageLayer(ctx context.Context, repo, commit, glob string, w io.Writer) error {\n\t\/\/ (bryce) glob should be cleaned in option function\n\t\/\/ (bryce) need exact match option for file glob.\n\tcompactedPath := path.Join(repo, commit, fileset.Compacted)\n\tmr, err := d.storage.NewMergeReader(ctx, []string{compactedPath}, index.WithPrefix(glob))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn mr.Get(w)\n}\n\nvar globRegex = regexp.MustCompile(`[*?[\\]{}!()@+^]`)\n\nfunc globLiteralPrefix(glob string) string {\n\tidx := globRegex.FindStringIndex(glob)\n\tif idx == nil {\n\t\treturn glob\n\t}\n\treturn glob[:idx[0]]\n}\n\nfunc (d *driver) getFilesConditional(ctx context.Context, repo, commit, glob string, f func(*FileReader) error) error {\n\tcompactedPaths := []string{path.Join(repo, commit, fileset.Compacted)}\n\tprefix := globLiteralPrefix(glob)\n\tmr, err := d.storage.NewMergeReader(ctx, compactedPaths, index.WithPrefix(prefix))\n\tif err != nil {\n\t\treturn err\n\t}\n\tmf, err := matchFunc(glob)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar fr *FileReader\n\tnextFileReader := func(idx *index.Index) error {\n\t\tfmr, err := mr.Next()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !mf(idx.Path) {\n\t\t\treturn nil\n\t\t}\n\t\tfr = newFileReader(client.NewFile(repo, commit, idx.Path), idx, fmr, mr)\n\t\treturn nil\n\t}\n\tif err := d.storage.ResolveIndexes(ctx, compactedPaths, func(idx *index.Index) error {\n\t\tif fr == nil {\n\t\t\treturn nextFileReader(idx)\n\t\t}\n\t\tdir := path.Dir(idx.Path)\n\t\tif dir == fr.file.Path {\n\t\t\tfr.updateFileInfo(idx)\n\t\t\treturn nil\n\t\t}\n\t\tif err := f(fr); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := fr.drain(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfr = nil\n\t\treturn nextFileReader(idx)\n\n\t}, index.WithPrefix(prefix)); err != nil {\n\t\treturn err\n\t}\n\tif fr != nil {\n\t\treturn f(fr)\n\t}\n\treturn nil\n}\n\nfunc matchFunc(glob string) (func(string) bool, error) {\n\t\/\/ (bryce) this is a little weird, but it prevents the parent directory from being matched (i.e. \/*).\n\tvar parentG *globlib.Glob\n\tparentGlob, baseGlob := path.Split(glob)\n\tif len(baseGlob) > 0 {\n\t\tvar err error\n\t\tparentG, err = globlib.Compile(parentGlob, '\/')\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tg, err := globlib.Compile(glob, '\/')\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn func(s string) bool {\n\t\treturn g.Match(s) && (parentG == nil || !parentG.Match(s))\n\t}, nil\n}\n\n\/\/ FileReader is a PFS wrapper for a fileset.MergeReader.\n\/\/ The primary purpose of this abstraction is to convert from index.Index to\n\/\/ pfs.FileInfoNewStorage and to convert a set of index hashes to a file hash.\ntype FileReader struct {\n\tfile *pfs.File\n\tidx *index.Index\n\tfmr *fileset.FileMergeReader\n\tmr *fileset.MergeReader\n\tfileCount int\n\thash hash.Hash\n}\n\nfunc newFileReader(file *pfs.File, idx *index.Index, fmr *fileset.FileMergeReader, mr *fileset.MergeReader) *FileReader {\n\th := pfs.NewHash()\n\tfor _, dataRef := range idx.DataOp.DataRefs {\n\t\th.Write([]byte(dataRef.Hash))\n\t}\n\treturn &FileReader{\n\t\tfile: file,\n\t\tidx: idx,\n\t\tfmr: fmr,\n\t\tmr: mr,\n\t\thash: h,\n\t}\n}\n\nfunc (fr *FileReader) updateFileInfo(idx *index.Index) {\n\tfr.fileCount++\n\tfor _, dataRef := range idx.DataOp.DataRefs {\n\t\tfr.hash.Write([]byte(dataRef.Hash))\n\t}\n}\n\n\/\/ Info returns the info for the file.\nfunc (fr *FileReader) Info() *pfs.FileInfoNewStorage {\n\treturn &pfs.FileInfoNewStorage{\n\t\tFile: fr.file,\n\t\tHash: pfs.EncodeHash(fr.hash.Sum(nil)),\n\t}\n}\n\n\/\/ Get writes a tar stream that contains the file.\nfunc (fr *FileReader) Get(w io.Writer) error {\n\tif err := fr.fmr.Get(w); err != nil {\n\t\treturn err\n\t}\n\tfor fr.fileCount > 0 {\n\t\tfmr, err := fr.mr.Next()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := fmr.Get(w); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfr.fileCount--\n\t}\n\t\/\/ Close a tar writer to create tar EOF padding.\n\treturn tar.NewWriter(w).Close()\n}\n\nfunc (fr *FileReader) drain() error {\n\tfor fr.fileCount > 0 {\n\t\tif _, err := fr.mr.Next(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfr.fileCount--\n\t}\n\treturn nil\n}\n\nfunc (d *driver) compact(master *work.Master, outputPath string, inputPrefixes []string) (retErr error) {\n\tscratch := path.Join(tmpPrefix, uuid.NewWithoutDashes())\n\tdefer func() {\n\t\tif err := d.storage.Delete(context.Background(), scratch); retErr == nil {\n\t\t\tretErr = err\n\t\t}\n\t}()\n\tcompaction := &pfs.Compaction{InputPrefixes: inputPrefixes}\n\tvar subtasks []*work.Task\n\tif err := d.storage.Shard(master.Ctx(), inputPrefixes, func(pathRange *index.PathRange) error {\n\t\toutputPath := path.Join(scratch, strconv.Itoa(len(subtasks)))\n\t\tshard, err := serializeShard(&pfs.Shard{\n\t\t\tCompaction: compaction,\n\t\t\tRange: &pfs.PathRange{\n\t\t\t\tLower: pathRange.Lower,\n\t\t\t\tUpper: pathRange.Upper,\n\t\t\t},\n\t\t\tOutputPath: outputPath,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsubtasks = append(subtasks, &work.Task{Data: shard})\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\tif err := master.RunSubtasks(subtasks, func(_ context.Context, taskInfo *work.TaskInfo) error {\n\t\tif taskInfo.State == work.State_FAILURE {\n\t\t\treturn errors.Errorf(taskInfo.Reason)\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\treturn d.storage.Compact(master.Ctx(), outputPath, []string{scratch})\n}\n\nfunc serializeShard(shard *pfs.Shard) (*types.Any, error) {\n\tserializedShard, err := proto.Marshal(shard)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &types.Any{\n\t\tTypeUrl: \"\/\" + proto.MessageName(shard),\n\t\tValue: serializedShard,\n\t}, nil\n}\n\nfunc deserializeShard(shardAny *types.Any) (*pfs.Shard, error) {\n\tshard := &pfs.Shard{}\n\tif err := types.UnmarshalAny(shardAny, shard); err != nil {\n\t\treturn nil, err\n\t}\n\treturn shard, nil\n}\n\n\/\/ (bryce) it might potentially make sense to exit if an error occurs in this function\n\/\/ because each pachd instance that errors here will lose its compaction worker without an obvious\n\/\/ notification for the user (outside of the log message).\n\/\/ (bryce) ^ maybe just a retry would be good enough.\nfunc (d *driver) compactionWorker() {\n\tw := work.NewWorker(d.etcdClient, d.prefix, storageTaskNamespace)\n\tif err := w.Run(context.Background(), func(ctx context.Context, subtask *work.Task) error {\n\t\tshard, err := deserializeShard(subtask.Data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpathRange := &index.PathRange{\n\t\t\tLower: shard.Range.Lower,\n\t\t\tUpper: shard.Range.Upper,\n\t\t}\n\t\treturn d.storage.Compact(ctx, shard.OutputPath, shard.Compaction.InputPrefixes, index.WithRange(pathRange))\n\t}); err != nil {\n\t\tlog.Printf(\"error in compaction worker: %v\", err)\n\t}\n}\n<commit_msg>Add retry loop to compactionWorker (#4864)<commit_after>package server\n\nimport (\n\t\"archive\/tar\"\n\t\"hash\"\n\t\"io\"\n\t\"log\"\n\t\"path\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/gogo\/protobuf\/proto\"\n\t\"github.com\/gogo\/protobuf\/types\"\n\tgloblib \"github.com\/pachyderm\/ohmyglob\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/auth\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pfs\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/errors\"\n\tpfsserver \"github.com\/pachyderm\/pachyderm\/src\/server\/pfs\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/backoff\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/storage\/fileset\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/storage\/fileset\/index\"\n\ttxnenv \"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/transactionenv\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/uuid\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/work\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nconst (\n\t\/\/ tmpPrefix is for temporary storage paths.\n\ttmpPrefix = \"tmp\"\n\tstorageTaskNamespace = \"storage\"\n)\n\nfunc (d *driver) startCommitNewStorageLayer(txnCtx *txnenv.TransactionContext, id string, parent *pfs.Commit, branch string, provenance []*pfs.CommitProvenance, description string) (*pfs.Commit, error) {\n\treturn d.startCommit(txnCtx, id, parent, branch, provenance, description)\n}\n\nfunc (d *driver) finishCommitNewStorageLayer(txnCtx *txnenv.TransactionContext, commit *pfs.Commit, description string) (retErr error) {\n\tif err := d.checkIsAuthorizedInTransaction(txnCtx, commit.Repo, auth.Scope_WRITER); err != nil {\n\t\treturn err\n\t}\n\tcommitInfo, err := d.resolveCommit(txnCtx.Stm, commit)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif commitInfo.Finished != nil {\n\t\treturn pfsserver.ErrCommitFinished{commit}\n\t}\n\tif description != \"\" {\n\t\tcommitInfo.Description = description\n\t}\n\tcommitPath := path.Join(commit.Repo.Name, commit.ID)\n\t\/\/ Clean up temporary filesets leftover from failed operations.\n\tif err := d.storage.Delete(txnCtx.Client.Ctx(), path.Join(tmpPrefix, commitPath)); err != nil {\n\t\treturn err\n\t}\n\t\/\/ Run compaction task.\n\treturn d.compactionQueue.RunTaskBlock(txnCtx.Client.Ctx(), func(m *work.Master) error {\n\t\tif err := backoff.Retry(func() error {\n\t\t\tif err := d.storage.Delete(context.Background(), path.Join(commitPath, fileset.Diff)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn d.storage.Delete(context.Background(), path.Join(commitPath, fileset.Compacted))\n\t\t}, backoff.NewExponentialBackOff()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ Compact the commit changes into a diff file set.\n\t\tif err := d.compact(m, path.Join(commitPath, fileset.Diff), []string{commitPath}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ Compact the commit changes (diff file set) into the total changes in the commit's ancestry.\n\t\tvar compactSpec *fileset.CompactSpec\n\t\tif commitInfo.ParentCommit == nil {\n\t\t\tcompactSpec, err = d.storage.CompactSpec(m.Ctx(), commitPath)\n\t\t} else {\n\t\t\tparentCommitPath := path.Join(commitInfo.ParentCommit.Repo.Name, commitInfo.ParentCommit.ID)\n\t\t\tcompactSpec, err = d.storage.CompactSpec(m.Ctx(), commitPath, parentCommitPath)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := d.compact(m, compactSpec.Output, compactSpec.Input); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ (bryce) need size.\n\t\tcommitInfo.SizeBytes = uint64(0)\n\t\tcommitInfo.Finished = types.TimestampNow()\n\t\treturn d.writeFinishedCommit(txnCtx.Stm, commit, commitInfo)\n\t})\n}\n\n\/\/ (bryce) holding off on integrating with downstream commit deletion logic until global ids.\n\/\/func (d *driver) deleteCommitNewStorageLayer(txnCtx *txnenv.TransactionContext, commit *pfs.Commit) error {\n\/\/\treturn d.storage.Delete(txnCtx.Client.Ctx(), path.Join(commit.Repo.Name, commit.ID))\n\/\/}\n\n\/\/ (bryce) add commit validation.\n\/\/ (bryce) probably should prevent \/ clean files that end with \"\/\", since that will indicate a directory.\nfunc (d *driver) putFilesNewStorageLayer(ctx context.Context, repo, commit string, r io.Reader) (retErr error) {\n\t\/\/ (bryce) subFileSet will need to be incremented through etcd eventually.\n\td.mu.Lock()\n\tsubFileSetStr := fileset.SubFileSetStr(d.subFileSet)\n\tsubFileSetPath := path.Join(repo, commit, subFileSetStr)\n\tfs := d.storage.New(ctx, path.Join(tmpPrefix, subFileSetPath), subFileSetStr)\n\td.subFileSet++\n\td.mu.Unlock()\n\tdefer func() {\n\t\tif err := d.storage.Delete(ctx, path.Join(tmpPrefix, subFileSetPath)); retErr == nil {\n\t\t\tretErr = err\n\t\t}\n\t}()\n\tif err := fs.Put(r); err != nil {\n\t\treturn err\n\t}\n\tif err := fs.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn d.compactionQueue.RunTaskBlock(ctx, func(m *work.Master) error {\n\t\treturn d.compact(m, subFileSetPath, []string{path.Join(tmpPrefix, subFileSetPath)})\n\t})\n}\n\nfunc (d *driver) getFilesNewStorageLayer(ctx context.Context, repo, commit, glob string, w io.Writer) error {\n\t\/\/ (bryce) glob should be cleaned in option function\n\t\/\/ (bryce) need exact match option for file glob.\n\tcompactedPath := path.Join(repo, commit, fileset.Compacted)\n\tmr, err := d.storage.NewMergeReader(ctx, []string{compactedPath}, index.WithPrefix(glob))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn mr.Get(w)\n}\n\nvar globRegex = regexp.MustCompile(`[*?[\\]{}!()@+^]`)\n\nfunc globLiteralPrefix(glob string) string {\n\tidx := globRegex.FindStringIndex(glob)\n\tif idx == nil {\n\t\treturn glob\n\t}\n\treturn glob[:idx[0]]\n}\n\nfunc (d *driver) getFilesConditional(ctx context.Context, repo, commit, glob string, f func(*FileReader) error) error {\n\tcompactedPaths := []string{path.Join(repo, commit, fileset.Compacted)}\n\tprefix := globLiteralPrefix(glob)\n\tmr, err := d.storage.NewMergeReader(ctx, compactedPaths, index.WithPrefix(prefix))\n\tif err != nil {\n\t\treturn err\n\t}\n\tmf, err := matchFunc(glob)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar fr *FileReader\n\tnextFileReader := func(idx *index.Index) error {\n\t\tfmr, err := mr.Next()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !mf(idx.Path) {\n\t\t\treturn nil\n\t\t}\n\t\tfr = newFileReader(client.NewFile(repo, commit, idx.Path), idx, fmr, mr)\n\t\treturn nil\n\t}\n\tif err := d.storage.ResolveIndexes(ctx, compactedPaths, func(idx *index.Index) error {\n\t\tif fr == nil {\n\t\t\treturn nextFileReader(idx)\n\t\t}\n\t\tdir := path.Dir(idx.Path)\n\t\tif dir == fr.file.Path {\n\t\t\tfr.updateFileInfo(idx)\n\t\t\treturn nil\n\t\t}\n\t\tif err := f(fr); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := fr.drain(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfr = nil\n\t\treturn nextFileReader(idx)\n\n\t}, index.WithPrefix(prefix)); err != nil {\n\t\treturn err\n\t}\n\tif fr != nil {\n\t\treturn f(fr)\n\t}\n\treturn nil\n}\n\nfunc matchFunc(glob string) (func(string) bool, error) {\n\t\/\/ (bryce) this is a little weird, but it prevents the parent directory from being matched (i.e. \/*).\n\tvar parentG *globlib.Glob\n\tparentGlob, baseGlob := path.Split(glob)\n\tif len(baseGlob) > 0 {\n\t\tvar err error\n\t\tparentG, err = globlib.Compile(parentGlob, '\/')\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tg, err := globlib.Compile(glob, '\/')\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn func(s string) bool {\n\t\treturn g.Match(s) && (parentG == nil || !parentG.Match(s))\n\t}, nil\n}\n\n\/\/ FileReader is a PFS wrapper for a fileset.MergeReader.\n\/\/ The primary purpose of this abstraction is to convert from index.Index to\n\/\/ pfs.FileInfoNewStorage and to convert a set of index hashes to a file hash.\ntype FileReader struct {\n\tfile *pfs.File\n\tidx *index.Index\n\tfmr *fileset.FileMergeReader\n\tmr *fileset.MergeReader\n\tfileCount int\n\thash hash.Hash\n}\n\nfunc newFileReader(file *pfs.File, idx *index.Index, fmr *fileset.FileMergeReader, mr *fileset.MergeReader) *FileReader {\n\th := pfs.NewHash()\n\tfor _, dataRef := range idx.DataOp.DataRefs {\n\t\th.Write([]byte(dataRef.Hash))\n\t}\n\treturn &FileReader{\n\t\tfile: file,\n\t\tidx: idx,\n\t\tfmr: fmr,\n\t\tmr: mr,\n\t\thash: h,\n\t}\n}\n\nfunc (fr *FileReader) updateFileInfo(idx *index.Index) {\n\tfr.fileCount++\n\tfor _, dataRef := range idx.DataOp.DataRefs {\n\t\tfr.hash.Write([]byte(dataRef.Hash))\n\t}\n}\n\n\/\/ Info returns the info for the file.\nfunc (fr *FileReader) Info() *pfs.FileInfoNewStorage {\n\treturn &pfs.FileInfoNewStorage{\n\t\tFile: fr.file,\n\t\tHash: pfs.EncodeHash(fr.hash.Sum(nil)),\n\t}\n}\n\n\/\/ Get writes a tar stream that contains the file.\nfunc (fr *FileReader) Get(w io.Writer) error {\n\tif err := fr.fmr.Get(w); err != nil {\n\t\treturn err\n\t}\n\tfor fr.fileCount > 0 {\n\t\tfmr, err := fr.mr.Next()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := fmr.Get(w); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfr.fileCount--\n\t}\n\t\/\/ Close a tar writer to create tar EOF padding.\n\treturn tar.NewWriter(w).Close()\n}\n\nfunc (fr *FileReader) drain() error {\n\tfor fr.fileCount > 0 {\n\t\tif _, err := fr.mr.Next(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfr.fileCount--\n\t}\n\treturn nil\n}\n\nfunc (d *driver) compact(master *work.Master, outputPath string, inputPrefixes []string) (retErr error) {\n\tscratch := path.Join(tmpPrefix, uuid.NewWithoutDashes())\n\tdefer func() {\n\t\tif err := d.storage.Delete(context.Background(), scratch); retErr == nil {\n\t\t\tretErr = err\n\t\t}\n\t}()\n\tcompaction := &pfs.Compaction{InputPrefixes: inputPrefixes}\n\tvar subtasks []*work.Task\n\tif err := d.storage.Shard(master.Ctx(), inputPrefixes, func(pathRange *index.PathRange) error {\n\t\toutputPath := path.Join(scratch, strconv.Itoa(len(subtasks)))\n\t\tshard, err := serializeShard(&pfs.Shard{\n\t\t\tCompaction: compaction,\n\t\t\tRange: &pfs.PathRange{\n\t\t\t\tLower: pathRange.Lower,\n\t\t\t\tUpper: pathRange.Upper,\n\t\t\t},\n\t\t\tOutputPath: outputPath,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsubtasks = append(subtasks, &work.Task{Data: shard})\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\tif err := master.RunSubtasks(subtasks, func(_ context.Context, taskInfo *work.TaskInfo) error {\n\t\tif taskInfo.State == work.State_FAILURE {\n\t\t\treturn errors.Errorf(taskInfo.Reason)\n\t\t}\n\t\treturn nil\n\t}); err != nil {\n\t\treturn err\n\t}\n\treturn d.storage.Compact(master.Ctx(), outputPath, []string{scratch})\n}\n\nfunc serializeShard(shard *pfs.Shard) (*types.Any, error) {\n\tserializedShard, err := proto.Marshal(shard)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &types.Any{\n\t\tTypeUrl: \"\/\" + proto.MessageName(shard),\n\t\tValue: serializedShard,\n\t}, nil\n}\n\nfunc deserializeShard(shardAny *types.Any) (*pfs.Shard, error) {\n\tshard := &pfs.Shard{}\n\tif err := types.UnmarshalAny(shardAny, shard); err != nil {\n\t\treturn nil, err\n\t}\n\treturn shard, nil\n}\n\nfunc (d *driver) compactionWorker() {\n\tctx := context.Background()\n\tw := work.NewWorker(d.etcdClient, d.prefix, storageTaskNamespace)\n\t\/\/ Configure backoff so we retry indefinitely\n\tbackoffStrat := backoff.NewExponentialBackOff()\n\tbackoffStrat.MaxElapsedTime = 0\n\terr := backoff.RetryNotify(func() error {\n\t\treturn w.Run(ctx, func(ctx context.Context, subtask *work.Task) error {\n\t\t\tshard, err := deserializeShard(subtask.Data)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tpathRange := &index.PathRange{\n\t\t\t\tLower: shard.Range.Lower,\n\t\t\t\tUpper: shard.Range.Upper,\n\t\t\t}\n\t\t\treturn d.storage.Compact(ctx, shard.OutputPath, shard.Compaction.InputPrefixes, index.WithRange(pathRange))\n\t\t})\n\t}, backoffStrat, func(err error, t time.Duration) error {\n\t\tlog.Printf(\"error in compaction worker: %v\", err)\n\t\t\/\/ non-nil shuts down retry loop\n\t\treturn nil\n\t})\n\t\/\/ never ending backoff should prevent us from getting here.\n\tpanic(err)\n}\n<|endoftext|>"} {"text":"<commit_before>package db\r\n\r\nimport (\r\n\t\"fmt\"\r\n\r\n\t\"github.com\/boltdb\/bolt\"\r\n\t\"github.com\/pkg\/errors\"\r\n)\r\n\r\nconst indiceBucket string = \"indices\"\r\n\r\n\/\/ getLeagueIndexBucketRO returns the bucket corresponding\r\n\/\/ to a specific league's index. This will never write\r\n\/\/ and can be used safely with a readonly transaction.\r\n\/\/\r\n\/\/ Will either panic or return a valid bucket.\r\nfunc getLeagueIndexBucket(league LeagueHeapID, tx *bolt.Tx) *bolt.Bucket {\r\n\t\/\/ Grab league bucket\r\n\tleagueBucket := getLeagueBucket(league, tx)\r\n\r\n\t\/\/ This can never fail, its a guarantee that the itemStoreBucket was registered\r\n\t\/\/ and will always appear on a valid leagueBucket\r\n\tindices := leagueBucket.Bucket([]byte(indiceBucket))\r\n\tif indices == nil {\r\n\t\tpanic(fmt.Sprintf(\"%s bucket not found when expected\", itemStoreBucket))\r\n\t}\r\n\r\n\treturn indices\r\n}\r\n\r\n\/\/ getItemModIndexBucket returns a bucket which a given mod can be put\r\n\/\/ when considering the item containing it.\r\n\/\/\r\n\/\/ This WILL write if a bucket is not found. Hence, readonly tx unsafe.\r\nfunc getItemModIndexBucket(rootType, rootFlavor, mod StringHeapID,\r\n\tleague LeagueHeapID, tx *bolt.Tx) (*bolt.Bucket, error) {\r\n\t\/\/ Keys towards the bucket we want to return, they may or may not exist\r\n\tkeys := []StringHeapID{rootType, rootFlavor, mod}\r\n\r\n\t\/\/ Start at the index bucket\r\n\tcurrentBucket := getLeagueIndexBucket(league, tx)\r\n\r\n\t\/\/ Create all of the intervening keys\r\n\tfor _, key := range keys {\r\n\t\tkeyBytes := key.ToBytes()\r\n\t\tprevBucket := currentBucket.Bucket(keyBytes)\r\n\t\tif prevBucket == nil {\r\n\t\t\t\/\/ Create the bucket\r\n\t\t\tvar err error\r\n\t\t\tprevBucket, err = currentBucket.CreateBucket(keyBytes)\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn nil,\r\n\t\t\t\t\terrors.Wrapf(err,\r\n\t\t\t\t\t\t\"failed to add index intermediary bucket bucket, bucket=%s, chain=%v\",\r\n\t\t\t\t\t\tkey, keys)\r\n\t\t\t}\r\n\t\t}\r\n\t\tcurrentBucket = prevBucket\r\n\t}\r\n\r\n\t\/\/ If we made it through, our currentBucket should be the one we want\r\n\treturn currentBucket, nil\r\n}\r\n\r\n\/\/ getItemModIndexBucketRO returns a bucket which a given mod can be put\r\n\/\/ when considering the item containing it.\r\n\/\/\r\n\/\/ This WILL NOT write if a bucket is not found. Hence, readonly tx unsafe.\r\nfunc getItemModIndexBucketRO(rootType, rootFlavor, mod StringHeapID,\r\n\tleague LeagueHeapID, tx *bolt.Tx) (*bolt.Bucket, error) {\r\n\t\/\/ Keys towards the bucket we want to return, they may or may not exist\r\n\tkeys := []StringHeapID{rootType, rootFlavor, mod}\r\n\r\n\t\/\/ Start at the index bucket\r\n\tcurrentBucket := getLeagueIndexBucket(league, tx)\r\n\r\n\t\/\/ Traverse all intervening buckets\r\n\tfor _, key := range keys {\r\n\t\tkeyBytes := key.ToBytes()\r\n\t\tprevBucket := currentBucket.Bucket(keyBytes)\r\n\t\tif prevBucket == nil {\r\n\t\t\treturn nil, errors.Errorf(\"invalid bucket, key=%d, chain=%v\", key, keys)\r\n\t\t}\r\n\t\tcurrentBucket = prevBucket\r\n\t}\r\n\r\n\t\/\/ If we made it through, our currentBucket should be the one we want\r\n\treturn currentBucket, nil\r\n}\r\n\r\n\/\/ ModIndexKeySuffixLength allows us to fetch variable numbers\r\n\/\/ of pre-pended values given their length.\r\nconst ModIndexKeySuffixLength = TimestampSize\r\n\r\n\/\/ encodeModIndexKey generates a mod key based off of the provided data\r\n\/\/\r\n\/\/ The mod index key is generated as [mod.Values..., now, updateSequence]\r\nfunc encodeModIndexKey(mod ItemMod, now Timestamp) []byte {\r\n\r\n\t\/\/ Pre-allocate index key so the entire key can be\r\n\t\/\/ encoded with a single allocation.\r\n\tmodsLength := 2\r\n\tindexKey := make([]byte, ModIndexKeySuffixLength+modsLength)\r\n\r\n\t\/\/ Generate the suffix\r\n\tsuffix := (indexKey[modsLength:])[:0] \/\/ Deal with pre-allocated space\r\n\tsuffix = append(suffix, now.TruncateToIndexBucket()[:]...)\r\n\r\n\tif len(suffix) != ModIndexKeySuffixLength {\r\n\t\tpanic(fmt.Sprintf(\"unexpected suffix length, got %d, expected %d\",\r\n\t\t\tlen(suffix), ModIndexKeySuffixLength))\r\n\t}\r\n\r\n\t\/\/ Fill in the index from the front\r\n\t\/\/\r\n\t\/\/ TODO: avoid appends, pre-size the backing slice to accomodate the\r\n\t\/\/ contents including the header\r\n\tindex := indexKey[:0] \/\/ Deal with pre-allocated space\r\n\tindex = append(index, i16tob(mod.Value)...)\r\n\r\n\t\/\/ And return the index with its suffix\r\n\treturn append(index, suffix...)\r\n}\r\n\r\n\/\/ decodeModIndexKey decodes a provided mod index key\r\n\/\/\r\n\/\/ This returns the values encoded in the key.\r\n\/\/\r\n\/\/ This is possible as the suffix is a fixed length and format while\r\n\/\/ the values of the modifer are simple appended\r\nfunc decodeModIndexKey(key []byte) ([]uint16, error) {\r\n\r\n\t\/\/ Basic sanity check\r\n\tif len(key) < ModIndexKeySuffixLength {\r\n\t\treturn nil, errors.New(\"invalid index key passed, less than length of suffix\")\r\n\t}\r\n\r\n\t\/\/ Ensure we are divisible by 2 following the removal of the suffix\r\n\tif (len(key)-ModIndexKeySuffixLength)%2 != 0 {\r\n\t\treturn nil, errors.New(\"invalid index key passed, values malformed\")\r\n\t}\r\n\r\n\tvalueBytes := key[:len(key)-ModIndexKeySuffixLength]\r\n\tvalues := make([]uint16, len(valueBytes)\/2)\r\n\tfor index := 0; index*2 < len(valueBytes); index++ {\r\n\t\tvalues[index] = btoi16(valueBytes[index*2:])\r\n\t}\r\n\r\n\treturn values, nil\r\n\r\n}\r\n\r\n\/\/ IndexItems adds tbe given items to their correct indices\r\n\/\/ for efficient lookup. Returns number of index entries added.\r\n\/\/\r\n\/\/ Provided items CAN differ in their league.\r\nfunc IndexItems(items []Item, tx *bolt.Tx) (int, error) {\r\n\r\n\t\/\/ Sanity check passed in transaction, better to do this than panic.\r\n\tif !tx.Writable() {\r\n\t\treturn 0, errors.New(\"cannot IndexItems on readonly transaction\")\r\n\t}\r\n\r\n\t\/\/ Silently exit when no items present to add\r\n\tif len(items) < 1 {\r\n\t\treturn 0, nil\r\n\t}\r\n\r\n\tvar added int\r\n\r\n\tfor _, item := range items {\r\n\r\n\t\tfor _, mod := range item.Mods {\r\n\t\t\t\/\/ Grab the bucket we can actually insert things into\r\n\r\n\t\t\titemModBucket, err := getItemModIndexBucket(item.RootType, item.RootFlavor,\r\n\t\t\t\tmod.Mod, item.League, tx)\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn 0, errors.New(\"failed to get item mod bucket\")\r\n\t\t\t}\r\n\r\n\t\t\tmodKey := encodeModIndexKey(mod, item.When)\r\n\r\n\t\t\t\/\/ Check for pre-existing items in the bucket, if none, we establish\r\n\t\t\t\/\/ the bucket\r\n\t\t\texisting := itemModBucket.Get(modKey)\r\n\t\t\tif existing == nil {\r\n\t\t\t\t\/\/ We need to make a copy of the item ID or bolt\r\n\t\t\t\t\/\/ will get a buffer reused for all items.\r\n\t\t\t\t\/\/\r\n\t\t\t\t\/\/ Without this, all index entries will point to the last\r\n\t\t\t\t\/\/ item added.\r\n\t\t\t\tidCopy := make([]byte, IDSize)\r\n\t\t\t\tcopy(idCopy, item.ID[:])\r\n\r\n\t\t\t\titemModBucket.Put(modKey, idCopy)\r\n\t\t\t} else {\r\n\t\t\t\t\/\/ We assume item not already present in bucket.\r\n\t\t\t\t\/\/ If it is, we end up with a duplicate.\r\n\t\t\t\t\/\/\r\n\t\t\t\t\/\/ Allocate a buffer large enough for an append\r\n\t\t\t\t\/\/ without another allocation.\r\n\t\t\t\t\/\/ Yes, this looks super dirty. TODO: cleanup D:\r\n\t\t\t\tappended := make([]byte, len(existing)+IDSize)[:0]\r\n\t\t\t\tappended = append(appended, existing...)\r\n\t\t\t\tappended = append(appended, item.ID[:]...)\r\n\r\n\t\t\t\titemModBucket.Put(modKey, appended)\r\n\t\t\t}\r\n\t\t\tadded++\r\n\t\t}\r\n\t}\r\n\r\n\treturn added, nil\r\n\r\n}\r\n\r\n\/\/ DeindexItems removes tbe given items from their correct indices\r\n\/\/\r\n\/\/ If an index entry cannot be removed, we return an error. This ensures\r\n\/\/ all existing index entries must be alive\r\nfunc DeindexItems(items []Item, tx *bolt.Tx) error {\r\n\r\n\t\/\/ Sanity check passed in transaction, better to do this than panic.\r\n\tif !tx.Writable() {\r\n\t\treturn errors.New(\"cannot IndexItems on readonly transaction\")\r\n\t}\r\n\r\n\t\/\/ Silently exit when no items present to add\r\n\tif len(items) < 1 {\r\n\t\treturn nil\r\n\t}\r\n\r\n\tfor _, item := range items {\r\n\r\n\t\tfor _, mod := range item.Mods {\r\n\t\t\t\/\/ Grab the bucket we can actually insert things into\r\n\t\t\titemModBucket, err := getItemModIndexBucket(item.RootType, item.RootFlavor,\r\n\t\t\t\tmod.Mod, item.League, tx)\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn errors.New(\"failed to get item mod bucket\")\r\n\t\t\t}\r\n\r\n\t\t\tmodKey := encodeModIndexKey(mod, item.When)\r\n\r\n\t\t\t\/\/ We need to make a copy of the item ID or bolt\r\n\t\t\t\/\/ will get a buffer reused for all items.\r\n\t\t\t\/\/\r\n\t\t\t\/\/ Without this, all index entries will point to the last\r\n\t\t\t\/\/ item added.\r\n\t\t\tidCopy := make([]byte, IDSize)\r\n\t\t\tcopy(idCopy, item.ID[:])\r\n\r\n\t\t\titemModBucket.Delete(modKey)\r\n\t\t}\r\n\t}\r\n\r\n\treturn nil\r\n\r\n}\r\n\r\n\/\/ IndexEntryCount returns the number of index entries across all leagues\r\nfunc IndexEntryCount(db *bolt.DB) (int, error) {\r\n\tvar count int\r\n\r\n\tleagueStrings, err := ListLeagues(db)\r\n\tif err != nil {\r\n\t\treturn 0, err\r\n\t}\r\n\tleagueIDs, err := GetLeagues(leagueStrings, db)\r\n\tif err != nil {\r\n\t\treturn 0, err\r\n\t}\r\n\r\n\treturn count, db.View(func(tx *bolt.Tx) error {\r\n\r\n\t\tfor _, id := range leagueIDs {\r\n\t\t\tb := getLeagueIndexBucket(id, tx)\r\n\t\t\tif b == nil {\r\n\t\t\t\treturn errors.Errorf(\"%s bucket not found\", itemStoreBucket)\r\n\t\t\t}\r\n\t\t\tstats := b.Stats()\r\n\t\t\tcount += stats.KeyN\r\n\t\t}\r\n\r\n\t\treturn nil\r\n\t})\r\n}\r\n<commit_msg>db introduce IndexEntry to simplify operations<commit_after>package db\r\n\r\nimport (\r\n\t\"fmt\"\r\n\r\n\t\"github.com\/boltdb\/bolt\"\r\n\t\"github.com\/pkg\/errors\"\r\n)\r\n\r\nconst indiceBucket string = \"indices\"\r\n\r\n\/\/ getLeagueIndexBucketRO returns the bucket corresponding\r\n\/\/ to a specific league's index. This will never write\r\n\/\/ and can be used safely with a readonly transaction.\r\n\/\/\r\n\/\/ Will either panic or return a valid bucket.\r\nfunc getLeagueIndexBucket(league LeagueHeapID, tx *bolt.Tx) *bolt.Bucket {\r\n\t\/\/ Grab league bucket\r\n\tleagueBucket := getLeagueBucket(league, tx)\r\n\r\n\t\/\/ This can never fail, its a guarantee that the itemStoreBucket was registered\r\n\t\/\/ and will always appear on a valid leagueBucket\r\n\tindices := leagueBucket.Bucket([]byte(indiceBucket))\r\n\tif indices == nil {\r\n\t\tpanic(fmt.Sprintf(\"%s bucket not found when expected\", itemStoreBucket))\r\n\t}\r\n\r\n\treturn indices\r\n}\r\n\r\n\/\/ getItemModIndexBucket returns a bucket which a given mod can be put\r\n\/\/ when considering the item containing it.\r\n\/\/\r\n\/\/ This WILL write if a bucket is not found. Hence, readonly tx unsafe.\r\nfunc getItemModIndexBucket(rootType, rootFlavor, mod StringHeapID,\r\n\tleague LeagueHeapID, tx *bolt.Tx) (*bolt.Bucket, error) {\r\n\t\/\/ Keys towards the bucket we want to return, they may or may not exist\r\n\tkeys := []StringHeapID{rootType, rootFlavor, mod}\r\n\r\n\t\/\/ Start at the index bucket\r\n\tcurrentBucket := getLeagueIndexBucket(league, tx)\r\n\r\n\t\/\/ Create all of the intervening keys\r\n\tfor _, key := range keys {\r\n\t\tkeyBytes := key.ToBytes()\r\n\t\tprevBucket := currentBucket.Bucket(keyBytes)\r\n\t\tif prevBucket == nil {\r\n\t\t\t\/\/ Create the bucket\r\n\t\t\tvar err error\r\n\t\t\tprevBucket, err = currentBucket.CreateBucket(keyBytes)\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn nil,\r\n\t\t\t\t\terrors.Wrapf(err,\r\n\t\t\t\t\t\t\"failed to add index intermediary bucket bucket, bucket=%s, chain=%v\",\r\n\t\t\t\t\t\tkey, keys)\r\n\t\t\t}\r\n\t\t}\r\n\t\tcurrentBucket = prevBucket\r\n\t}\r\n\r\n\t\/\/ If we made it through, our currentBucket should be the one we want\r\n\treturn currentBucket, nil\r\n}\r\n\r\n\/\/ getItemModIndexBucketRO returns a bucket which a given mod can be put\r\n\/\/ when considering the item containing it.\r\n\/\/\r\n\/\/ This WILL NOT write if a bucket is not found. Hence, readonly tx unsafe.\r\nfunc getItemModIndexBucketRO(rootType, rootFlavor, mod StringHeapID,\r\n\tleague LeagueHeapID, tx *bolt.Tx) (*bolt.Bucket, error) {\r\n\t\/\/ Keys towards the bucket we want to return, they may or may not exist\r\n\tkeys := []StringHeapID{rootType, rootFlavor, mod}\r\n\r\n\t\/\/ Start at the index bucket\r\n\tcurrentBucket := getLeagueIndexBucket(league, tx)\r\n\r\n\t\/\/ Traverse all intervening buckets\r\n\tfor _, key := range keys {\r\n\t\tkeyBytes := key.ToBytes()\r\n\t\tprevBucket := currentBucket.Bucket(keyBytes)\r\n\t\tif prevBucket == nil {\r\n\t\t\treturn nil, errors.Errorf(\"invalid bucket, key=%d, chain=%v\", key, keys)\r\n\t\t}\r\n\t\tcurrentBucket = prevBucket\r\n\t}\r\n\r\n\t\/\/ If we made it through, our currentBucket should be the one we want\r\n\treturn currentBucket, nil\r\n}\r\n\r\n\/\/ ModIndexKeySuffixLength allows us to fetch variable numbers\r\n\/\/ of pre-pended values given their length.\r\nconst ModIndexKeySuffixLength = TimestampSize\r\n\r\n\/\/ encodeModIndexKey generates a mod key based off of the provided data\r\n\/\/\r\n\/\/ The mod index key is generated as [mod.Values..., now, updateSequence]\r\nfunc encodeModIndexKey(mod ItemMod, now Timestamp) []byte {\r\n\r\n\t\/\/ Pre-allocate index key so the entire key can be\r\n\t\/\/ encoded with a single allocation.\r\n\tmodsLength := 2\r\n\tindexKey := make([]byte, ModIndexKeySuffixLength+modsLength)\r\n\r\n\t\/\/ Generate the suffix\r\n\tsuffix := (indexKey[modsLength:])[:0] \/\/ Deal with pre-allocated space\r\n\tsuffix = append(suffix, now.TruncateToIndexBucket()[:]...)\r\n\r\n\tif len(suffix) != ModIndexKeySuffixLength {\r\n\t\tpanic(fmt.Sprintf(\"unexpected suffix length, got %d, expected %d\",\r\n\t\t\tlen(suffix), ModIndexKeySuffixLength))\r\n\t}\r\n\r\n\t\/\/ Fill in the index from the front\r\n\t\/\/\r\n\t\/\/ TODO: avoid appends, pre-size the backing slice to accomodate the\r\n\t\/\/ contents including the header\r\n\tindex := indexKey[:0] \/\/ Deal with pre-allocated space\r\n\tindex = append(index, i16tob(mod.Value)...)\r\n\r\n\t\/\/ And return the index with its suffix\r\n\treturn append(index, suffix...)\r\n}\r\n\r\n\/\/ decodeModIndexKey decodes a provided mod index key\r\n\/\/\r\n\/\/ This returns the values encoded in the key.\r\n\/\/\r\n\/\/ This is possible as the suffix is a fixed length and format while\r\n\/\/ the values of the modifer are simple appended\r\nfunc decodeModIndexKey(key []byte) ([]uint16, error) {\r\n\r\n\t\/\/ Basic sanity check\r\n\tif len(key) < ModIndexKeySuffixLength {\r\n\t\treturn nil, errors.New(\"invalid index key passed, less than length of suffix\")\r\n\t}\r\n\r\n\t\/\/ Ensure we are divisible by 2 following the removal of the suffix\r\n\tif (len(key)-ModIndexKeySuffixLength)%2 != 0 {\r\n\t\treturn nil, errors.New(\"invalid index key passed, values malformed\")\r\n\t}\r\n\r\n\tvalueBytes := key[:len(key)-ModIndexKeySuffixLength]\r\n\tvalues := make([]uint16, len(valueBytes)\/2)\r\n\tfor index := 0; index*2 < len(valueBytes); index++ {\r\n\t\tvalues[index] = btoi16(valueBytes[index*2:])\r\n\t}\r\n\r\n\treturn values, nil\r\n\r\n}\r\n\r\n\/\/ IndexItems adds tbe given items to their correct indices\r\n\/\/ for efficient lookup. Returns number of index entries added.\r\n\/\/\r\n\/\/ Provided items CAN differ in their league.\r\nfunc IndexItems(items []Item, tx *bolt.Tx) (int, error) {\r\n\r\n\t\/\/ Sanity check passed in transaction, better to do this than panic.\r\n\tif !tx.Writable() {\r\n\t\treturn 0, errors.New(\"cannot IndexItems on readonly transaction\")\r\n\t}\r\n\r\n\t\/\/ Silently exit when no items present to add\r\n\tif len(items) < 1 {\r\n\t\treturn 0, nil\r\n\t}\r\n\r\n\tvar added int\r\n\r\n\tfor _, item := range items {\r\n\r\n\t\tfor _, mod := range item.Mods {\r\n\t\t\t\/\/ Grab the bucket we can actually insert things into\r\n\r\n\t\t\titemModBucket, err := getItemModIndexBucket(item.RootType, item.RootFlavor,\r\n\t\t\t\tmod.Mod, item.League, tx)\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn 0, errors.New(\"failed to get item mod bucket\")\r\n\t\t\t}\r\n\r\n\t\t\tmodKey := encodeModIndexKey(mod, item.When)\r\n\r\n\t\t\t\/\/ Check for pre-existing items in the bucket, if none, we establish\r\n\t\t\t\/\/ the bucket\r\n\t\t\texisting := itemModBucket.Get(modKey)\r\n\t\t\twrapped := WrapIndexEntryBytes(existing)\r\n\t\t\twrapped.Append(item.ID)\r\n\t\t\titemModBucket.Put(modKey, wrapped.Unwrap())\r\n\t\t\tadded++\r\n\t\t}\r\n\t}\r\n\r\n\treturn added, nil\r\n\r\n}\r\n\r\n\/\/ DeindexItems removes tbe given items from their correct indices\r\n\/\/\r\n\/\/ If an index entry cannot be removed, we return an error. This ensures\r\n\/\/ all existing index entries must be alive\r\nfunc DeindexItems(items []Item, tx *bolt.Tx) error {\r\n\r\n\t\/\/ Sanity check passed in transaction, better to do this than panic.\r\n\tif !tx.Writable() {\r\n\t\treturn errors.New(\"cannot IndexItems on readonly transaction\")\r\n\t}\r\n\r\n\t\/\/ Silently exit when no items present to add\r\n\tif len(items) < 1 {\r\n\t\treturn nil\r\n\t}\r\n\r\n\tfor _, item := range items {\r\n\r\n\t\tfor _, mod := range item.Mods {\r\n\t\t\t\/\/ Grab the bucket we can actually insert things into\r\n\t\t\titemModBucket, err := getItemModIndexBucket(item.RootType, item.RootFlavor,\r\n\t\t\t\tmod.Mod, item.League, tx)\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn errors.New(\"failed to get item mod bucket\")\r\n\t\t\t}\r\n\r\n\t\t\tmodKey := encodeModIndexKey(mod, item.When)\r\n\r\n\t\t\t\/\/ We need to make a copy of the item ID or bolt\r\n\t\t\t\/\/ will get a buffer reused for all items.\r\n\t\t\t\/\/\r\n\t\t\t\/\/ Without this, all index entries will point to the last\r\n\t\t\t\/\/ item added.\r\n\t\t\tidCopy := make([]byte, IDSize)\r\n\t\t\tcopy(idCopy, item.ID[:])\r\n\r\n\t\t\titemModBucket.Delete(modKey)\r\n\t\t}\r\n\t}\r\n\r\n\treturn nil\r\n\r\n}\r\n\r\n\/\/ IndexEntry represents bytes interpreted as an entry within the index\r\n\/\/\r\n\/\/ Whenever possible, we avoid allocations.\r\ntype IndexEntry struct {\r\n\tin []byte\r\n}\r\n\r\n\/\/ WrapIndexEntryBytes wraps provided byte slice to allow\r\n\/\/ them to be interpreted as an indexEntry\r\n\/\/\r\n\/\/ in can be nil.\r\nfunc WrapIndexEntryBytes(in []byte) IndexEntry {\r\n\treturn IndexEntry{in}\r\n}\r\n\r\n\/\/ Unwrap returns the backing array behind an indexEntry\r\nfunc (entry *IndexEntry) Unwrap() []byte {\r\n\treturn entry.in\r\n}\r\n\r\n\/\/ Append adds another ID to the entry\r\n\/\/\r\n\/\/ If an id is already present in the id, we end up with a duplicate.\r\n\/\/ Such is life.\r\nfunc (entry *IndexEntry) Append(id ID) {\r\n\tif entry.in == nil {\r\n\t\t\/\/ Copy necessary due to boltdb semantics for passed buffers\r\n\t\tentry.in = make([]byte, len(id))\r\n\t\tcopy(entry.in, id[:])\r\n\t} else {\r\n\t\t\/\/ We assume item not already present in bucket.\r\n\t\t\/\/ If it is, we end up with a duplicate.\r\n\t\t\/\/\r\n\t\t\/\/ Allocate a buffer large enough for an append\r\n\t\t\/\/ without another allocation.\r\n\t\t\/\/ Yes, this looks super dirty. TODO: cleanup D:\r\n\t\tappended := make([]byte, len(entry.in)+IDSize)[:0]\r\n\t\tappended = append(appended, entry.in...)\r\n\t\tappended = append(appended, id[:]...)\r\n\r\n\t\tentry.in = appended\r\n\t}\r\n\r\n}\r\n\r\n\/\/ IndexEntryCount returns the number of index entries across all leagues\r\nfunc IndexEntryCount(db *bolt.DB) (int, error) {\r\n\tvar count int\r\n\r\n\tleagueStrings, err := ListLeagues(db)\r\n\tif err != nil {\r\n\t\treturn 0, err\r\n\t}\r\n\tleagueIDs, err := GetLeagues(leagueStrings, db)\r\n\tif err != nil {\r\n\t\treturn 0, err\r\n\t}\r\n\r\n\treturn count, db.View(func(tx *bolt.Tx) error {\r\n\r\n\t\tfor _, id := range leagueIDs {\r\n\t\t\tb := getLeagueIndexBucket(id, tx)\r\n\t\t\tif b == nil {\r\n\t\t\t\treturn errors.Errorf(\"%s bucket not found\", itemStoreBucket)\r\n\t\t\t}\r\n\t\t\tstats := b.Stats()\r\n\t\t\tcount += stats.KeyN\r\n\t\t}\r\n\r\n\t\treturn nil\r\n\t})\r\n}\r\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage influxdb\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/google\/cadvisor\/info\"\n\t\"github.com\/google\/cadvisor\/storage\"\n\t\"github.com\/google\/cadvisor\/storage\/test\"\n\tinfluxdb \"github.com\/influxdb\/influxdb\/client\"\n)\n\n\/\/ The duration in seconds for which stats will be buffered in the influxdb driver.\nconst kCacheDuration = 1\n\ntype influxDbTestStorageDriver struct {\n\tcount int\n\tbuffer int\n\tbase storage.StorageDriver\n}\n\nfunc (self *influxDbTestStorageDriver) readyToFlush() bool {\n\tif self.count >= self.buffer {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (self *influxDbTestStorageDriver) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error {\n\tself.count++\n\treturn self.base.AddStats(ref, stats)\n}\n\nfunc (self *influxDbTestStorageDriver) RecentStats(containerName string, numStats int) ([]*info.ContainerStats, error) {\n\treturn self.base.RecentStats(containerName, numStats)\n}\n\nfunc (self *influxDbTestStorageDriver) Percentiles(containerName string, cpuUsagePercentiles []int, memUsagePercentiles []int) (*info.ContainerStatsPercentiles, error) {\n\treturn self.base.Percentiles(containerName, cpuUsagePercentiles, memUsagePercentiles)\n}\n\nfunc (self *influxDbTestStorageDriver) Samples(containerName string, numSamples int) ([]*info.ContainerStatsSample, error) {\n\treturn self.base.Samples(containerName, numSamples)\n}\n\nfunc (self *influxDbTestStorageDriver) Close() error {\n\treturn self.base.Close()\n}\n\nfunc (self *influxDbTestStorageDriver) StatsEq(a, b *info.ContainerStats) bool {\n\tif !test.TimeEq(a.Timestamp, b.Timestamp, 10*time.Millisecond) {\n\t\treturn false\n\t}\n\t\/\/ Check only the stats populated in influxdb.\n\tif a.Cpu.Usage.Total != b.Cpu.Usage.Total {\n\t\treturn false\n\t}\n\n\tif a.Memory.Usage != b.Memory.Usage {\n\t\treturn false\n\t}\n\n\tif a.Memory.WorkingSet != b.Memory.WorkingSet {\n\t\treturn false\n\t}\n\n\tif !reflect.DeepEqual(a.Network, b.Network) {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc runStorageTest(f func(test.TestStorageDriver, *testing.T), t *testing.T, bufferCount int) {\n\tmachineName := \"machineA\"\n\ttablename := \"t\"\n\tdatabase := \"cadvisor\"\n\tusername := \"root\"\n\tpassword := \"root\"\n\thostname := \"localhost:8086\"\n\tpercentilesDuration := 10 * time.Minute\n\trootConfig := &influxdb.ClientConfig{\n\t\tHost: hostname,\n\t\tUsername: username,\n\t\tPassword: password,\n\t\tIsSecure: false,\n\t}\n\trootClient, err := influxdb.NewClient(rootConfig)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ create the data base first.\n\trootClient.CreateDatabase(database)\n\tconfig := &influxdb.ClientConfig{\n\t\tHost: hostname,\n\t\tUsername: username,\n\t\tPassword: password,\n\t\tDatabase: database,\n\t\tIsSecure: false,\n\t}\n\tclient, err := influxdb.NewClient(config)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tclient.DisableCompression()\n\tdeleteAll := fmt.Sprintf(\"drop series %v\", tablename)\n\t_, err = client.Query(deleteAll)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ delete all data by the end of the call\n\tdefer client.Query(deleteAll)\n\n\tdriver, err := New(machineName,\n\t\ttablename,\n\t\tdatabase,\n\t\tusername,\n\t\tpassword,\n\t\thostname,\n\t\tfalse,\n\t\ttime.Duration(bufferCount),\n\t\tpercentilesDuration)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttestDriver := &influxDbTestStorageDriver{buffer: bufferCount}\n\tdriver.OverrideReadyToFlush(testDriver.readyToFlush)\n\ttestDriver.base = driver\n\n\t\/\/ generate another container's data on same machine.\n\ttest.StorageDriverFillRandomStatsFunc(\"containerOnSameMachine\", 100, testDriver, t)\n\n\t\/\/ generate another container's data on another machine.\n\tdriverForAnotherMachine, err := New(\"machineB\",\n\t\ttablename,\n\t\tdatabase,\n\t\tusername,\n\t\tpassword,\n\t\thostname,\n\t\tfalse,\n\t\ttime.Duration(bufferCount),\n\t\tpercentilesDuration)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer driverForAnotherMachine.Close()\n\ttestDriverOtherMachine := &influxDbTestStorageDriver{buffer: bufferCount}\n\tdriverForAnotherMachine.OverrideReadyToFlush(testDriverOtherMachine.readyToFlush)\n\ttestDriverOtherMachine.base = driverForAnotherMachine\n\n\ttest.StorageDriverFillRandomStatsFunc(\"containerOnAnotherMachine\", 100, testDriverOtherMachine, t)\n\tf(testDriver, t)\n}\n\nfunc TestSampleCpuUsage(t *testing.T) {\n\trunStorageTest(test.StorageDriverTestSampleCpuUsage, t, kCacheDuration)\n}\n\nfunc TestRetrievePartialRecentStats(t *testing.T) {\n\trunStorageTest(test.StorageDriverTestRetrievePartialRecentStats, t, 20)\n}\n\nfunc TestSamplesWithoutSample(t *testing.T) {\n\trunStorageTest(test.StorageDriverTestSamplesWithoutSample, t, kCacheDuration)\n}\n\nfunc TestRetrieveAllRecentStats(t *testing.T) {\n\trunStorageTest(test.StorageDriverTestRetrieveAllRecentStats, t, 10)\n}\n\nfunc TestNoRecentStats(t *testing.T) {\n\trunStorageTest(test.StorageDriverTestNoRecentStats, t, kCacheDuration)\n}\n\nfunc TestNoSamples(t *testing.T) {\n\trunStorageTest(test.StorageDriverTestNoSamples, t, kCacheDuration)\n}\n\nfunc TestPercentiles(t *testing.T) {\n\trunStorageTest(test.StorageDriverTestPercentiles, t, kCacheDuration)\n}\n\nfunc TestMaxMemoryUsage(t *testing.T) {\n\trunStorageTest(test.StorageDriverTestMaxMemoryUsage, t, kCacheDuration)\n}\n\nfunc TestPercentilesWithoutSample(t *testing.T) {\n\trunStorageTest(test.StorageDriverTestPercentilesWithoutSample, t, kCacheDuration)\n}\n\nfunc TestPercentilesWithoutStats(t *testing.T) {\n\trunStorageTest(test.StorageDriverTestPercentilesWithoutStats, t, kCacheDuration)\n}\n\nfunc TestRetrieveZeroStats(t *testing.T) {\n\tt.SkipNow()\n\trunStorageTest(test.StorageDriverTestRetrieveZeroRecentStats, t, kCacheDuration)\n}\n\nfunc TestRetrieveZeroSamples(t *testing.T) {\n\tt.SkipNow()\n\trunStorageTest(test.StorageDriverTestRetrieveZeroSamples, t, kCacheDuration)\n}\n<commit_msg>Ignore the InfluxDB tests while they are flaky.<commit_after>\/\/+build ignore\n\/\/ Copyright 2014 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage influxdb\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/google\/cadvisor\/info\"\n\t\"github.com\/google\/cadvisor\/storage\"\n\t\"github.com\/google\/cadvisor\/storage\/test\"\n\tinfluxdb \"github.com\/influxdb\/influxdb\/client\"\n)\n\n\/\/ The duration in seconds for which stats will be buffered in the influxdb driver.\nconst kCacheDuration = 1\n\ntype influxDbTestStorageDriver struct {\n\tcount int\n\tbuffer int\n\tbase storage.StorageDriver\n}\n\nfunc (self *influxDbTestStorageDriver) readyToFlush() bool {\n\tif self.count >= self.buffer {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (self *influxDbTestStorageDriver) AddStats(ref info.ContainerReference, stats *info.ContainerStats) error {\n\tself.count++\n\treturn self.base.AddStats(ref, stats)\n}\n\nfunc (self *influxDbTestStorageDriver) RecentStats(containerName string, numStats int) ([]*info.ContainerStats, error) {\n\treturn self.base.RecentStats(containerName, numStats)\n}\n\nfunc (self *influxDbTestStorageDriver) Percentiles(containerName string, cpuUsagePercentiles []int, memUsagePercentiles []int) (*info.ContainerStatsPercentiles, error) {\n\treturn self.base.Percentiles(containerName, cpuUsagePercentiles, memUsagePercentiles)\n}\n\nfunc (self *influxDbTestStorageDriver) Samples(containerName string, numSamples int) ([]*info.ContainerStatsSample, error) {\n\treturn self.base.Samples(containerName, numSamples)\n}\n\nfunc (self *influxDbTestStorageDriver) Close() error {\n\treturn self.base.Close()\n}\n\nfunc (self *influxDbTestStorageDriver) StatsEq(a, b *info.ContainerStats) bool {\n\tif !test.TimeEq(a.Timestamp, b.Timestamp, 10*time.Millisecond) {\n\t\treturn false\n\t}\n\t\/\/ Check only the stats populated in influxdb.\n\tif a.Cpu.Usage.Total != b.Cpu.Usage.Total {\n\t\treturn false\n\t}\n\n\tif a.Memory.Usage != b.Memory.Usage {\n\t\treturn false\n\t}\n\n\tif a.Memory.WorkingSet != b.Memory.WorkingSet {\n\t\treturn false\n\t}\n\n\tif !reflect.DeepEqual(a.Network, b.Network) {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc runStorageTest(f func(test.TestStorageDriver, *testing.T), t *testing.T, bufferCount int) {\n\tmachineName := \"machineA\"\n\ttablename := \"t\"\n\tdatabase := \"cadvisor\"\n\tusername := \"root\"\n\tpassword := \"root\"\n\thostname := \"localhost:8086\"\n\tpercentilesDuration := 10 * time.Minute\n\trootConfig := &influxdb.ClientConfig{\n\t\tHost: hostname,\n\t\tUsername: username,\n\t\tPassword: password,\n\t\tIsSecure: false,\n\t}\n\trootClient, err := influxdb.NewClient(rootConfig)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ create the data base first.\n\trootClient.CreateDatabase(database)\n\tconfig := &influxdb.ClientConfig{\n\t\tHost: hostname,\n\t\tUsername: username,\n\t\tPassword: password,\n\t\tDatabase: database,\n\t\tIsSecure: false,\n\t}\n\tclient, err := influxdb.NewClient(config)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tclient.DisableCompression()\n\tdeleteAll := fmt.Sprintf(\"drop series %v\", tablename)\n\t_, err = client.Query(deleteAll)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ delete all data by the end of the call\n\tdefer client.Query(deleteAll)\n\n\tdriver, err := New(machineName,\n\t\ttablename,\n\t\tdatabase,\n\t\tusername,\n\t\tpassword,\n\t\thostname,\n\t\tfalse,\n\t\ttime.Duration(bufferCount),\n\t\tpercentilesDuration)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttestDriver := &influxDbTestStorageDriver{buffer: bufferCount}\n\tdriver.OverrideReadyToFlush(testDriver.readyToFlush)\n\ttestDriver.base = driver\n\n\t\/\/ generate another container's data on same machine.\n\ttest.StorageDriverFillRandomStatsFunc(\"containerOnSameMachine\", 100, testDriver, t)\n\n\t\/\/ generate another container's data on another machine.\n\tdriverForAnotherMachine, err := New(\"machineB\",\n\t\ttablename,\n\t\tdatabase,\n\t\tusername,\n\t\tpassword,\n\t\thostname,\n\t\tfalse,\n\t\ttime.Duration(bufferCount),\n\t\tpercentilesDuration)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer driverForAnotherMachine.Close()\n\ttestDriverOtherMachine := &influxDbTestStorageDriver{buffer: bufferCount}\n\tdriverForAnotherMachine.OverrideReadyToFlush(testDriverOtherMachine.readyToFlush)\n\ttestDriverOtherMachine.base = driverForAnotherMachine\n\n\ttest.StorageDriverFillRandomStatsFunc(\"containerOnAnotherMachine\", 100, testDriverOtherMachine, t)\n\tf(testDriver, t)\n}\n\nfunc TestSampleCpuUsage(t *testing.T) {\n\trunStorageTest(test.StorageDriverTestSampleCpuUsage, t, kCacheDuration)\n}\n\nfunc TestRetrievePartialRecentStats(t *testing.T) {\n\trunStorageTest(test.StorageDriverTestRetrievePartialRecentStats, t, 20)\n}\n\nfunc TestSamplesWithoutSample(t *testing.T) {\n\trunStorageTest(test.StorageDriverTestSamplesWithoutSample, t, kCacheDuration)\n}\n\nfunc TestRetrieveAllRecentStats(t *testing.T) {\n\trunStorageTest(test.StorageDriverTestRetrieveAllRecentStats, t, 10)\n}\n\nfunc TestNoRecentStats(t *testing.T) {\n\trunStorageTest(test.StorageDriverTestNoRecentStats, t, kCacheDuration)\n}\n\nfunc TestNoSamples(t *testing.T) {\n\trunStorageTest(test.StorageDriverTestNoSamples, t, kCacheDuration)\n}\n\nfunc TestPercentiles(t *testing.T) {\n\trunStorageTest(test.StorageDriverTestPercentiles, t, kCacheDuration)\n}\n\nfunc TestMaxMemoryUsage(t *testing.T) {\n\trunStorageTest(test.StorageDriverTestMaxMemoryUsage, t, kCacheDuration)\n}\n\nfunc TestPercentilesWithoutSample(t *testing.T) {\n\trunStorageTest(test.StorageDriverTestPercentilesWithoutSample, t, kCacheDuration)\n}\n\nfunc TestPercentilesWithoutStats(t *testing.T) {\n\trunStorageTest(test.StorageDriverTestPercentilesWithoutStats, t, kCacheDuration)\n}\n\nfunc TestRetrieveZeroStats(t *testing.T) {\n\tt.SkipNow()\n\trunStorageTest(test.StorageDriverTestRetrieveZeroRecentStats, t, kCacheDuration)\n}\n\nfunc TestRetrieveZeroSamples(t *testing.T) {\n\tt.SkipNow()\n\trunStorageTest(test.StorageDriverTestRetrieveZeroSamples, t, kCacheDuration)\n}\n<|endoftext|>"} {"text":"<commit_before>package env\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/imdario\/mergo\"\n)\n\n\/\/ Env is the structure of a configuration for an environment.\ntype Env struct {\n\tName string `yaml:\"-\" json:\"-\" env:\"-\"`\n\tPassword string `yaml:\"password,omitempty\" json:\"password,omitempty\" env:\"THEMEKIT_PASSWORD\"`\n\tThemeID string `yaml:\"theme_id,omitempty\" json:\"theme_id,omitempty\" env:\"THEMEKIT_THEME_ID\"`\n\tDomain string `yaml:\"store\" json:\"store\" env:\"THEMEKIT_STORE\"`\n\tDirectory string `yaml:\"directory,omitempty\" json:\"directory,omitempty\" env:\"THEMEKIT_DIRECTORY\"`\n\tIgnoredFiles []string `yaml:\"ignore_files,omitempty\" json:\"ignore_files,omitempty\" env:\"THEMEKIT_IGNORE_FILES\" envSeparator:\":\"`\n\tProxy string `yaml:\"proxy,omitempty\" json:\"proxy,omitempty\" env:\"THEMEKIT_PROXY\"`\n\tIgnores []string `yaml:\"ignores,omitempty\" json:\"ignores,omitempty\" env:\"THEMEKIT_IGNORES\" envSeparator:\":\"`\n\tTimeout time.Duration `yaml:\"timeout,omitempty\" json:\"timeout,omitempty\" env:\"THEMEKIT_TIMEOUT\"`\n\tReadOnly bool `yaml:\"readonly,omitempty\" json:\"readonly,omitempty\" env:\"-\"`\n\tNotify string `yaml:\"notify,omitempty\" json:\"notify,omitempty\" env:\"THEMEKIT_NOTIFY\"`\n}\n\n\/\/Default is the default values for a environment\nvar Default = Env{\n\tName: \"development\",\n\tTimeout: 30 * time.Second,\n}\n\nfunc init() {\n\tDefault.Directory, _ = os.Getwd()\n}\n\nfunc newEnv(name string, initial Env, overrides ...Env) (*Env, error) {\n\tnewConfig := &Env{Name: name}\n\tfor _, override := range overrides {\n\t\tmergo.Merge(newConfig, &override)\n\t}\n\tmergo.Merge(newConfig, &initial)\n\tmergo.Merge(newConfig, &Default)\n\treturn newConfig, newConfig.validate()\n}\n\nfunc (env *Env) validate() error {\n\terrors := []string{}\n\n\tenv.ThemeID = strings.ToLower(strings.TrimSpace(env.ThemeID))\n\tif env.ThemeID != \"\" {\n\t\tif env.ThemeID == \"live\" {\n\t\t\tenv.ThemeID = \"\"\n\t\t} else if _, err := strconv.ParseInt(env.ThemeID, 10, 64); err != nil {\n\t\t\terrors = append(errors, \"invalid theme_id\")\n\t\t}\n\t}\n\n\tif len(env.Domain) == 0 {\n\t\terrors = append(errors, \"missing store domain\")\n\t} else if !strings.HasSuffix(env.Domain, \"myshopify.com\") && !strings.HasSuffix(env.Domain, \"myshopify.io\") {\n\t\terrors = append(errors, \"invalid store domain must end in '.myshopify.com'\")\n\t}\n\n\tif len(env.Password) == 0 {\n\t\terrors = append(errors, \"missing password\")\n\t}\n\n\tvar dirErrors []string\n\tenv.Directory, dirErrors = validateDirectory(env.Directory)\n\terrors = append(errors, dirErrors...)\n\n\tif len(errors) > 0 {\n\t\treturn fmt.Errorf(\"invalid environment [%s]: (%v)\", env.Name, strings.Join(errors, \",\"))\n\t}\n\n\treturn nil\n}\n\nfunc validateDirectory(dir string) (finalDir string, errors []string) {\n\tif fi, err := os.Lstat(filepath.Clean(dir)); err != nil {\n\t\terrors = append(errors, fmt.Sprintf(\"invalid project directory %v\", err))\n\t} else if fi.Mode()&os.ModeSymlink != 0 {\n\t\tif symDir, symlinkErr := filepath.EvalSymlinks(filepath.Clean(dir)); symlinkErr != nil {\n\t\t\terrors = append(errors, fmt.Sprintf(\"invalid project symlink: %s\", symlinkErr.Error()))\n\t\t} else {\n\t\t\treturn validateDirectory(symDir)\n\t\t}\n\t} else if !fi.Mode().IsDir() {\n\t\terrors = append(errors, fmt.Sprintf(\"Directory config %v is not a directory: %v\", dir, err))\n\t}\n\n\tif !filepath.IsAbs(dir) {\n\t\tvar err error\n\t\tif dir, err = filepath.Abs(dir); err != nil {\n\t\t\terrors = append(errors, fmt.Sprintf(\"Could not get absolute root bath: %v\", err))\n\t\t}\n\t}\n\n\treturn dir, errors\n}\n<commit_msg>Simple typo fix<commit_after>package env\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/imdario\/mergo\"\n)\n\n\/\/ Env is the structure of a configuration for an environment.\ntype Env struct {\n\tName string `yaml:\"-\" json:\"-\" env:\"-\"`\n\tPassword string `yaml:\"password,omitempty\" json:\"password,omitempty\" env:\"THEMEKIT_PASSWORD\"`\n\tThemeID string `yaml:\"theme_id,omitempty\" json:\"theme_id,omitempty\" env:\"THEMEKIT_THEME_ID\"`\n\tDomain string `yaml:\"store\" json:\"store\" env:\"THEMEKIT_STORE\"`\n\tDirectory string `yaml:\"directory,omitempty\" json:\"directory,omitempty\" env:\"THEMEKIT_DIRECTORY\"`\n\tIgnoredFiles []string `yaml:\"ignore_files,omitempty\" json:\"ignore_files,omitempty\" env:\"THEMEKIT_IGNORE_FILES\" envSeparator:\":\"`\n\tProxy string `yaml:\"proxy,omitempty\" json:\"proxy,omitempty\" env:\"THEMEKIT_PROXY\"`\n\tIgnores []string `yaml:\"ignores,omitempty\" json:\"ignores,omitempty\" env:\"THEMEKIT_IGNORES\" envSeparator:\":\"`\n\tTimeout time.Duration `yaml:\"timeout,omitempty\" json:\"timeout,omitempty\" env:\"THEMEKIT_TIMEOUT\"`\n\tReadOnly bool `yaml:\"readonly,omitempty\" json:\"readonly,omitempty\" env:\"-\"`\n\tNotify string `yaml:\"notify,omitempty\" json:\"notify,omitempty\" env:\"THEMEKIT_NOTIFY\"`\n}\n\n\/\/Default is the default values for a environment\nvar Default = Env{\n\tName: \"development\",\n\tTimeout: 30 * time.Second,\n}\n\nfunc init() {\n\tDefault.Directory, _ = os.Getwd()\n}\n\nfunc newEnv(name string, initial Env, overrides ...Env) (*Env, error) {\n\tnewConfig := &Env{Name: name}\n\tfor _, override := range overrides {\n\t\tmergo.Merge(newConfig, &override)\n\t}\n\tmergo.Merge(newConfig, &initial)\n\tmergo.Merge(newConfig, &Default)\n\treturn newConfig, newConfig.validate()\n}\n\nfunc (env *Env) validate() error {\n\terrors := []string{}\n\n\tenv.ThemeID = strings.ToLower(strings.TrimSpace(env.ThemeID))\n\tif env.ThemeID != \"\" {\n\t\tif env.ThemeID == \"live\" {\n\t\t\tenv.ThemeID = \"\"\n\t\t} else if _, err := strconv.ParseInt(env.ThemeID, 10, 64); err != nil {\n\t\t\terrors = append(errors, \"invalid theme_id\")\n\t\t}\n\t}\n\n\tif len(env.Domain) == 0 {\n\t\terrors = append(errors, \"missing store domain\")\n\t} else if !strings.HasSuffix(env.Domain, \"myshopify.com\") && !strings.HasSuffix(env.Domain, \"myshopify.io\") {\n\t\terrors = append(errors, \"invalid store domain must end in '.myshopify.com'\")\n\t}\n\n\tif len(env.Password) == 0 {\n\t\terrors = append(errors, \"missing password\")\n\t}\n\n\tvar dirErrors []string\n\tenv.Directory, dirErrors = validateDirectory(env.Directory)\n\terrors = append(errors, dirErrors...)\n\n\tif len(errors) > 0 {\n\t\treturn fmt.Errorf(\"invalid environment [%s]: (%v)\", env.Name, strings.Join(errors, \",\"))\n\t}\n\n\treturn nil\n}\n\nfunc validateDirectory(dir string) (finalDir string, errors []string) {\n\tif fi, err := os.Lstat(filepath.Clean(dir)); err != nil {\n\t\terrors = append(errors, fmt.Sprintf(\"invalid project directory %v\", err))\n\t} else if fi.Mode()&os.ModeSymlink != 0 {\n\t\tif symDir, symlinkErr := filepath.EvalSymlinks(filepath.Clean(dir)); symlinkErr != nil {\n\t\t\terrors = append(errors, fmt.Sprintf(\"invalid project symlink: %s\", symlinkErr.Error()))\n\t\t} else {\n\t\t\treturn validateDirectory(symDir)\n\t\t}\n\t} else if !fi.Mode().IsDir() {\n\t\terrors = append(errors, fmt.Sprintf(\"Directory config %v is not a directory: %v\", dir, err))\n\t}\n\n\tif !filepath.IsAbs(dir) {\n\t\tvar err error\n\t\tif dir, err = filepath.Abs(dir); err != nil {\n\t\t\terrors = append(errors, fmt.Sprintf(\"Could not get absolute root path: %v\", err))\n\t\t}\n\t}\n\n\treturn dir, errors\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage image\n\nimport (\n\t\"image\/color\"\n)\n\nvar (\n\t\/\/ Black is an opaque black uniform image.\n\tBlack = NewUniform(color.Black)\n\t\/\/ White is an opaque white uniform image.\n\tWhite = NewUniform(color.White)\n\t\/\/ Transparent is a fully transparent uniform image.\n\tTransparent = NewUniform(color.Transparent)\n\t\/\/ Opaque is a fully opaque uniform image.\n\tOpaque = NewUniform(color.Opaque)\n)\n\n\/\/ Uniform is an infinite-sized Image of uniform color.\n\/\/ It implements the color.Color, color.ColorModel, and Image interfaces.\ntype Uniform struct {\n\tC color.Color\n}\n\nfunc (c *Uniform) RGBA() (r, g, b, a uint32) {\n\treturn c.C.RGBA()\n}\n\nfunc (c *Uniform) ColorModel() color.Model {\n\treturn c\n}\n\nfunc (c *Uniform) Convert(color.Color) color.Color {\n\treturn c.C\n}\n\nfunc (c *Uniform) Bounds() Rectangle { return Rectangle{Point{-1e9, -1e9}, Point{1e9, 1e9}} }\n\nfunc (c *Uniform) At(x, y int) color.Color { return c.C }\n\n\/\/ Opaque scans the entire image and returns whether or not it is fully opaque.\nfunc (c *Uniform) Opaque() bool {\n\t_, _, _, a := c.C.RGBA()\n\treturn a == 0xffff\n}\n\nfunc NewUniform(c color.Color) *Uniform {\n\treturn &Uniform{c}\n}\n<commit_msg>image: fix doc typo Fixes issue 3789.<commit_after>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage image\n\nimport (\n\t\"image\/color\"\n)\n\nvar (\n\t\/\/ Black is an opaque black uniform image.\n\tBlack = NewUniform(color.Black)\n\t\/\/ White is an opaque white uniform image.\n\tWhite = NewUniform(color.White)\n\t\/\/ Transparent is a fully transparent uniform image.\n\tTransparent = NewUniform(color.Transparent)\n\t\/\/ Opaque is a fully opaque uniform image.\n\tOpaque = NewUniform(color.Opaque)\n)\n\n\/\/ Uniform is an infinite-sized Image of uniform color.\n\/\/ It implements the color.Color, color.Model, and Image interfaces.\ntype Uniform struct {\n\tC color.Color\n}\n\nfunc (c *Uniform) RGBA() (r, g, b, a uint32) {\n\treturn c.C.RGBA()\n}\n\nfunc (c *Uniform) ColorModel() color.Model {\n\treturn c\n}\n\nfunc (c *Uniform) Convert(color.Color) color.Color {\n\treturn c.C\n}\n\nfunc (c *Uniform) Bounds() Rectangle { return Rectangle{Point{-1e9, -1e9}, Point{1e9, 1e9}} }\n\nfunc (c *Uniform) At(x, y int) color.Color { return c.C }\n\n\/\/ Opaque scans the entire image and returns whether or not it is fully opaque.\nfunc (c *Uniform) Opaque() bool {\n\t_, _, _, a := c.C.RGBA()\n\treturn a == 0xffff\n}\n\nfunc NewUniform(c color.Color) *Uniform {\n\treturn &Uniform{c}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tmoocfetcher \"github.com\/moocfetcher\/moocfetcher-appliance\/backend\/lib\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\"\n\t\"github.com\/urfave\/cli\"\n)\n\nvar svc = s3.New(session.New(aws.NewConfig().WithRegion(\"us-east-1\")))\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"moocfetcher\"\n\tapp.Usage = \"MOOCFetcher commandline app\"\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"update-sizes\",\n\t\t\tAliases: []string{\"us\"},\n\t\t\tUsage: \"Calculates and updates size (no. of bytes) of launched courses.\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{Name: \"dryrun, r\",\n\t\t\t\t\tUsage: \"Don’t update remote launched.json file\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: updateCourseSizes,\n\t\t},\n\t\t{\n\t\t\tName: \"filter-courses\",\n\t\t\tAliases: []string{\"fc\"},\n\t\t\tUsage: \"Filters the list of launched courses to only include courses that are present on local disk\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{Name: \"dryrun, r\",\n\t\t\t\t\tUsage: \"Don’t create a filtered courses.json file\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{Name: \"courses-dir, d\",\n\t\t\t\t\tUsage: \"Location of courses on filesystem. Locate courses in `DIRECTORY`.\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{Name: \"english-only, e\",\n\t\t\t\t\tUsage: \"Filter English language courses only\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: filterCourses,\n\t\t},\n\t}\n\n\tapp.Run(os.Args)\n}\n\nfunc fetchCourses() (*moocfetcher.CourseData, error) {\n\tfmt.Println(\"Retrieving launched courses…\")\n\n\t\/\/ Retrieve list of courses.\n\tresp, err := svc.GetObject(&s3.GetObjectInput{\n\t\tBucket: aws.String(moocfetcher.S3BucketMOOCFetcher),\n\t\tKey: aws.String(moocfetcher.OnDemandLaunchedCoursesKey),\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar courses *moocfetcher.CourseData = &moocfetcher.CourseData{}\n\terr = json.Unmarshal(body, courses)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn courses, nil\n}\n\nfunc updateCourseSizes(c *cli.Context) error {\n\n\tcourses, err := fetchCourses()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar totalSize uint64\n\tfor i, course := range courses.Courses {\n\t\tif course.Size == 0 {\n\t\t\tfmt.Printf(\"Finding size of %s…\", course.Slug)\n\n\t\t\tvar totalCourseSize uint64\n\n\t\t\terr := svc.ListObjectsV2Pages(&s3.ListObjectsV2Input{\n\t\t\t\tBucket: aws.String(moocfetcher.S3BucketMOOCFetcherCourseArchive),\n\t\t\t\tPrefix: aws.String(fmt.Sprintf(moocfetcher.S3CourseURLFormatString, course.Slug)),\n\t\t\t}, func(page *s3.ListObjectsV2Output, lastPage bool) bool {\n\t\t\t\tfor _, o := range page.Contents {\n\t\t\t\t\ttotalCourseSize += uint64(*o.Size)\n\t\t\t\t}\n\t\t\t\treturn !lastPage\n\t\t\t})\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfmt.Printf(\"%d MB\\n\", totalCourseSize\/(1<<20))\n\t\t\tcourse.Size = totalCourseSize\n\t\t\tcourses.Courses[i] = course\n\t\t\ttotalSize += totalCourseSize\n\t\t}\n\t}\n\n\tif totalSize == 0 {\n\t\tfmt.Println(\"Nothing to update\")\n\t\treturn nil\n\t}\n\n\tif c.Bool(\"dryrun\") {\n\t\tfmt.Println(\"Dry run…not updating launched courses.\")\n\t\treturn nil\n\t}\n\n\tfmt.Println(\"Updating launched courses\")\n\tb, err := json.Marshal(courses)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = svc.PutObject(&s3.PutObjectInput{\n\t\tBucket: aws.String(moocfetcher.S3BucketMOOCFetcher),\n\t\tKey: aws.String(moocfetcher.OnDemandLaunchedCoursesKey),\n\t\tBody: bytes.NewReader(b),\n\t})\n\n\treturn err\n}\n\nfunc filterCourses(c *cli.Context) error {\n\tcoursesDir := c.String(\"courses-dir\")\n\n\tif coursesDir == \"\" {\n\t\treturn errors.New(\"courses-directory is required\")\n\t}\n\n\tcourses, err := fetchCourses()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar filtered moocfetcher.CourseData\n\n\tfor _, course := range courses.Courses {\n\t\tpath := filepath.Join(coursesDir, course.Slug)\n\t\tfmt.Printf(\"Checking for %s…\", path)\n\n\t\tvar found bool\n\t\tif info, err := os.Stat(path); err == nil {\n\t\t\tif info.Mode().IsDir() {\n\t\t\t\tfound = true\n\t\t\t\tfiltered.Courses = append(filtered.Courses, course)\n\t\t\t}\n\t\t}\n\n\t\tvar status string\n\t\tif found {\n\t\t\tstatus = \"found\"\n\t\t} else {\n\t\t\tstatus = \"NOT FOUND\"\n\t\t}\n\t\tfmt.Printf(\"%s\\n\", status)\n\t}\n\n\tfmt.Printf(\"%d courses found\\n\", len(filtered.Courses))\n\n\t\/\/ Filter only English language courses, if required\n\tif c.Bool(\"english-only\") {\n\t\tvar english moocfetcher.CourseData\n\t\tfor _, course := range filtered.Courses {\n\t\t\tlangs := course.Languages\n\t\t\tvar en bool\n\t\t\tfor _, l := range langs {\n\t\t\t\tif l == \"en\" {\n\t\t\t\t\ten = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tif en {\n\t\t\t\tenglish.Courses = append(english.Courses, course)\n\t\t\t}\n\t\t}\n\t\tfmt.Printf(\"%d English courses found\\n\", len(english.Courses))\n\t\tfiltered = english\n\t}\n\n\tif c.Bool(\"dryrun\") {\n\t\tfmt.Println(\"Dry run…not writing to file.\")\n\t\treturn nil\n\t}\n\n\tfmt.Println(\"Writing to courses.json\")\n\n\toutput, err := json.MarshalIndent(&filtered, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(\"courses.json\", output, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>Calculate sizes only for English courses<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tmoocfetcher \"github.com\/moocfetcher\/moocfetcher-appliance\/backend\/lib\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\"\n\t\"github.com\/urfave\/cli\"\n)\n\nvar svc = s3.New(session.New(aws.NewConfig().WithRegion(\"us-east-1\")))\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"moocfetcher\"\n\tapp.Usage = \"MOOCFetcher commandline app\"\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"update-sizes\",\n\t\t\tAliases: []string{\"us\"},\n\t\t\tUsage: \"Calculates and updates size (no. of bytes) of launched courses.\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{Name: \"dryrun, r\",\n\t\t\t\t\tUsage: \"Don’t update remote launched.json file\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: updateCourseSizes,\n\t\t},\n\t\t{\n\t\t\tName: \"filter-courses\",\n\t\t\tAliases: []string{\"fc\"},\n\t\t\tUsage: \"Filters the list of launched courses to only include courses that are present on local disk\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.BoolFlag{Name: \"dryrun, r\",\n\t\t\t\t\tUsage: \"Don’t create a filtered courses.json file\",\n\t\t\t\t},\n\t\t\t\tcli.StringFlag{Name: \"courses-dir, d\",\n\t\t\t\t\tUsage: \"Location of courses on filesystem. Locate courses in `DIRECTORY`.\",\n\t\t\t\t},\n\t\t\t\tcli.BoolFlag{Name: \"english-only, e\",\n\t\t\t\t\tUsage: \"Filter English language courses only\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tAction: filterCourses,\n\t\t},\n\t}\n\n\tapp.Run(os.Args)\n}\n\nfunc fetchCourses() (*moocfetcher.CourseData, error) {\n\tfmt.Println(\"Retrieving launched courses…\")\n\n\t\/\/ Retrieve list of courses.\n\tresp, err := svc.GetObject(&s3.GetObjectInput{\n\t\tBucket: aws.String(moocfetcher.S3BucketMOOCFetcher),\n\t\tKey: aws.String(moocfetcher.OnDemandLaunchedCoursesKey),\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar courses *moocfetcher.CourseData = &moocfetcher.CourseData{}\n\terr = json.Unmarshal(body, courses)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn courses, nil\n}\n\nfunc updateCourseSizes(c *cli.Context) error {\n\n\tcourses, err := fetchCourses()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar totalSize uint64\n\tfor i, course := range courses.Courses {\n\t\t\/\/ Check if course is English\n\t\tlangs := course.Languages\n\t\tvar en bool\n\t\tfor _, l := range langs {\n\t\t\tif l == \"en\" {\n\t\t\t\ten = true\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Don’t do anything if course is not English\n\t\tif !en {\n\t\t\tcontinue\n\t\t}\n\n\t\tif course.Size == 0 {\n\t\t\tfmt.Printf(\"Finding size of %s…\", course.Slug)\n\n\t\t\tvar totalCourseSize uint64\n\n\t\t\terr := svc.ListObjectsV2Pages(&s3.ListObjectsV2Input{\n\t\t\t\tBucket: aws.String(moocfetcher.S3BucketMOOCFetcherCourseArchive),\n\t\t\t\tPrefix: aws.String(fmt.Sprintf(moocfetcher.S3CourseURLFormatString, course.Slug)),\n\t\t\t}, func(page *s3.ListObjectsV2Output, lastPage bool) bool {\n\t\t\t\tfor _, o := range page.Contents {\n\t\t\t\t\ttotalCourseSize += uint64(*o.Size)\n\t\t\t\t}\n\t\t\t\treturn !lastPage\n\t\t\t})\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfmt.Printf(\"%d MB\\n\", totalCourseSize\/(1<<20))\n\t\t\tcourse.Size = totalCourseSize\n\t\t\tcourses.Courses[i] = course\n\t\t\ttotalSize += totalCourseSize\n\t\t}\n\t}\n\n\tif totalSize == 0 {\n\t\tfmt.Println(\"Nothing to update\")\n\t\treturn nil\n\t}\n\n\tif c.Bool(\"dryrun\") {\n\t\tfmt.Println(\"Dry run…not updating launched courses.\")\n\t\treturn nil\n\t}\n\n\tfmt.Println(\"Updating launched courses\")\n\tb, err := json.Marshal(courses)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = svc.PutObject(&s3.PutObjectInput{\n\t\tBucket: aws.String(moocfetcher.S3BucketMOOCFetcher),\n\t\tKey: aws.String(moocfetcher.OnDemandLaunchedCoursesKey),\n\t\tBody: bytes.NewReader(b),\n\t})\n\n\treturn err\n}\n\nfunc filterCourses(c *cli.Context) error {\n\tcoursesDir := c.String(\"courses-dir\")\n\n\tif coursesDir == \"\" {\n\t\treturn errors.New(\"courses-directory is required\")\n\t}\n\n\tcourses, err := fetchCourses()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar filtered moocfetcher.CourseData\n\n\tfor _, course := range courses.Courses {\n\t\tpath := filepath.Join(coursesDir, course.Slug)\n\t\tfmt.Printf(\"Checking for %s…\", path)\n\n\t\tvar found bool\n\t\tif info, err := os.Stat(path); err == nil {\n\t\t\tif info.Mode().IsDir() {\n\t\t\t\tfound = true\n\t\t\t\tfiltered.Courses = append(filtered.Courses, course)\n\t\t\t}\n\t\t}\n\n\t\tvar status string\n\t\tif found {\n\t\t\tstatus = \"found\"\n\t\t} else {\n\t\t\tstatus = \"NOT FOUND\"\n\t\t}\n\t\tfmt.Printf(\"%s\\n\", status)\n\t}\n\n\tfmt.Printf(\"%d courses found\\n\", len(filtered.Courses))\n\n\t\/\/ Filter only English language courses, if required\n\tif c.Bool(\"english-only\") {\n\t\tvar english moocfetcher.CourseData\n\t\tfor _, course := range filtered.Courses {\n\t\t\tlangs := course.Languages\n\t\t\tvar en bool\n\t\t\tfor _, l := range langs {\n\t\t\t\tif l == \"en\" {\n\t\t\t\t\ten = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tif en {\n\t\t\t\tenglish.Courses = append(english.Courses, course)\n\t\t\t}\n\t\t}\n\t\tfmt.Printf(\"%d English courses found\\n\", len(english.Courses))\n\t\tfiltered = english\n\t}\n\n\tif c.Bool(\"dryrun\") {\n\t\tfmt.Println(\"Dry run…not writing to file.\")\n\t\treturn nil\n\t}\n\n\tfmt.Println(\"Writing to courses.json\")\n\n\toutput, err := json.MarshalIndent(&filtered, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(\"courses.json\", output, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package anomalydetector implements the Azure ARM Anomalydetector service API version 1.0.\n\/\/\n\/\/ The Anomaly Detector API detects anomalies automatically in time series data. It supports two functionalities, one\n\/\/ is for detecting the whole series with model trained by the timeseries, another is detecting last point with model\n\/\/ trained by points before. By using this service, business customers can discover incidents and establish a logic\n\/\/ flow for root cause analysis.\npackage anomalydetector\n\n\/\/ Copyright (c) Microsoft and contributors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\/\/ Code generated by Microsoft (R) AutoRest Code Generator.\n\/\/ Changes may cause incorrect behavior and will be lost if the code is regenerated.\n\nimport (\n\t\"context\"\n\t\"github.com\/Azure\/go-autorest\/autorest\"\n\t\"github.com\/Azure\/go-autorest\/autorest\/azure\"\n\t\"github.com\/Azure\/go-autorest\/autorest\/validation\"\n\t\"github.com\/Azure\/go-autorest\/tracing\"\n\t\"net\/http\"\n)\n\n\/\/ BaseClient is the base client for Anomalydetector.\ntype BaseClient struct {\n\tautorest.Client\n\tEndpoint string\n}\n\n\/\/ New creates an instance of the BaseClient client.\nfunc New(endpoint string) BaseClient {\n\treturn NewWithoutDefaults(endpoint)\n}\n\n\/\/ NewWithoutDefaults creates an instance of the BaseClient client.\nfunc NewWithoutDefaults(endpoint string) BaseClient {\n\treturn BaseClient{\n\t\tClient: autorest.NewClientWithUserAgent(UserAgent()),\n\t\tEndpoint: endpoint,\n\t}\n}\n\n\/\/ EntireDetect this operation generates a model using an entire series, each point is detected with the same model.\n\/\/ With this method, points before and after a certain point are used to determine whether it is an anomaly. The entire\n\/\/ detection can give user an overall status of the time series.\n\/\/ Parameters:\n\/\/ body - time series points and period if needed. Advanced model parameters can also be set in the request.\nfunc (client BaseClient) EntireDetect(ctx context.Context, body Request) (result EntireDetectResponse, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"\/BaseClient.EntireDetect\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response.Response != nil {\n\t\t\t\tsc = result.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: body,\n\t\t\tConstraints: []validation.Constraint{{Target: \"body.Series\", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {\n\t\treturn result, validation.NewError(\"anomalydetector.BaseClient\", \"EntireDetect\", err.Error())\n\t}\n\n\treq, err := client.EntireDetectPreparer(ctx, body)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"anomalydetector.BaseClient\", \"EntireDetect\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.EntireDetectSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"anomalydetector.BaseClient\", \"EntireDetect\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.EntireDetectResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"anomalydetector.BaseClient\", \"EntireDetect\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}\n\n\/\/ EntireDetectPreparer prepares the EntireDetect request.\nfunc (client BaseClient) EntireDetectPreparer(ctx context.Context, body Request) (*http.Request, error) {\n\turlParameters := map[string]interface{}{\n\t\t\"Endpoint\": client.Endpoint,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsContentType(\"application\/json; charset=utf-8\"),\n\t\tautorest.AsPost(),\n\t\tautorest.WithCustomBaseURL(\"{Endpoint}\/anomalydetector\/v1.0\", urlParameters),\n\t\tautorest.WithPath(\"\/timeseries\/entire\/detect\"),\n\t\tautorest.WithJSON(body))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}\n\n\/\/ EntireDetectSender sends the EntireDetect request. The method will close the\n\/\/ http.Response Body if it receives an error.\nfunc (client BaseClient) EntireDetectSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}\n\n\/\/ EntireDetectResponder handles the response to the EntireDetect request. The method always\n\/\/ closes the http.Response Body.\nfunc (client BaseClient) EntireDetectResponder(resp *http.Response) (result EntireDetectResponse, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}\n\n\/\/ LastDetect this operation generates a model using points before the latest one. With this method, only historical\n\/\/ points are used to determine whether the target point is an anomaly. The latest point detecting operation matches\n\/\/ the scenario of real-time monitoring of business metrics.\n\/\/ Parameters:\n\/\/ body - time series points and period if needed. Advanced model parameters can also be set in the request.\nfunc (client BaseClient) LastDetect(ctx context.Context, body Request) (result LastDetectResponse, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"\/BaseClient.LastDetect\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response.Response != nil {\n\t\t\t\tsc = result.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: body,\n\t\t\tConstraints: []validation.Constraint{{Target: \"body.Series\", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {\n\t\treturn result, validation.NewError(\"anomalydetector.BaseClient\", \"LastDetect\", err.Error())\n\t}\n\n\treq, err := client.LastDetectPreparer(ctx, body)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"anomalydetector.BaseClient\", \"LastDetect\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.LastDetectSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"anomalydetector.BaseClient\", \"LastDetect\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.LastDetectResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"anomalydetector.BaseClient\", \"LastDetect\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}\n\n\/\/ LastDetectPreparer prepares the LastDetect request.\nfunc (client BaseClient) LastDetectPreparer(ctx context.Context, body Request) (*http.Request, error) {\n\turlParameters := map[string]interface{}{\n\t\t\"Endpoint\": client.Endpoint,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsContentType(\"application\/json; charset=utf-8\"),\n\t\tautorest.AsPost(),\n\t\tautorest.WithCustomBaseURL(\"{Endpoint}\/anomalydetector\/v1.0\", urlParameters),\n\t\tautorest.WithPath(\"\/timeseries\/last\/detect\"),\n\t\tautorest.WithJSON(body))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}\n\n\/\/ LastDetectSender sends the LastDetect request. The method will close the\n\/\/ http.Response Body if it receives an error.\nfunc (client BaseClient) LastDetectSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}\n\n\/\/ LastDetectResponder handles the response to the LastDetect request. The method always\n\/\/ closes the http.Response Body.\nfunc (client BaseClient) LastDetectResponder(resp *http.Response) (result LastDetectResponse, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}\n<commit_msg>Generated from ebbbdd1db732261ea8038e49422b55a3b289e9ce (#4567)<commit_after>\/\/ Package anomalydetector implements the Azure ARM Anomalydetector service API version 1.0.\n\/\/\n\/\/ The Anomaly Detector API detects anomalies automatically in time series data. It supports two functionalities, one\n\/\/ is for detecting the whole series with model trained by the timeseries, another is detecting last point with model\n\/\/ trained by points before. By using this service, business customers can discover incidents and establish a logic\n\/\/ flow for root cause analysis.\npackage anomalydetector\n\n\/\/ Copyright (c) Microsoft and contributors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\/\/ Code generated by Microsoft (R) AutoRest Code Generator.\n\/\/ Changes may cause incorrect behavior and will be lost if the code is regenerated.\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"github.com\/Azure\/go-autorest\/autorest\"\n\t\"github.com\/Azure\/go-autorest\/autorest\/azure\"\n\t\"github.com\/Azure\/go-autorest\/autorest\/validation\"\n\t\"github.com\/Azure\/go-autorest\/tracing\"\n\t\"net\/http\"\n)\n\n\/\/ BaseClient is the base client for Anomalydetector.\ntype BaseClient struct {\n\tautorest.Client\n\tEndpoint string\n}\n\n\/\/ New creates an instance of the BaseClient client.\nfunc New(endpoint string) BaseClient {\n\treturn NewWithoutDefaults(endpoint)\n}\n\n\/\/ NewWithoutDefaults creates an instance of the BaseClient client.\nfunc NewWithoutDefaults(endpoint string) BaseClient {\n\treturn BaseClient{\n\t\tClient: autorest.NewClientWithOptions(autorest.ClientOptions{UserAgent: UserAgent(), Renegotiation: tls.RenegotiateFreelyAsClient}),\n\t\tEndpoint: endpoint,\n\t}\n}\n\n\/\/ EntireDetect this operation generates a model using an entire series, each point is detected with the same model.\n\/\/ With this method, points before and after a certain point are used to determine whether it is an anomaly. The entire\n\/\/ detection can give user an overall status of the time series.\n\/\/ Parameters:\n\/\/ body - time series points and period if needed. Advanced model parameters can also be set in the request.\nfunc (client BaseClient) EntireDetect(ctx context.Context, body Request) (result EntireDetectResponse, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"\/BaseClient.EntireDetect\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response.Response != nil {\n\t\t\t\tsc = result.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: body,\n\t\t\tConstraints: []validation.Constraint{{Target: \"body.Series\", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {\n\t\treturn result, validation.NewError(\"anomalydetector.BaseClient\", \"EntireDetect\", err.Error())\n\t}\n\n\treq, err := client.EntireDetectPreparer(ctx, body)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"anomalydetector.BaseClient\", \"EntireDetect\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.EntireDetectSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"anomalydetector.BaseClient\", \"EntireDetect\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.EntireDetectResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"anomalydetector.BaseClient\", \"EntireDetect\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}\n\n\/\/ EntireDetectPreparer prepares the EntireDetect request.\nfunc (client BaseClient) EntireDetectPreparer(ctx context.Context, body Request) (*http.Request, error) {\n\turlParameters := map[string]interface{}{\n\t\t\"Endpoint\": client.Endpoint,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsContentType(\"application\/json; charset=utf-8\"),\n\t\tautorest.AsPost(),\n\t\tautorest.WithCustomBaseURL(\"{Endpoint}\/anomalydetector\/v1.0\", urlParameters),\n\t\tautorest.WithPath(\"\/timeseries\/entire\/detect\"),\n\t\tautorest.WithJSON(body))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}\n\n\/\/ EntireDetectSender sends the EntireDetect request. The method will close the\n\/\/ http.Response Body if it receives an error.\nfunc (client BaseClient) EntireDetectSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}\n\n\/\/ EntireDetectResponder handles the response to the EntireDetect request. The method always\n\/\/ closes the http.Response Body.\nfunc (client BaseClient) EntireDetectResponder(resp *http.Response) (result EntireDetectResponse, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}\n\n\/\/ LastDetect this operation generates a model using points before the latest one. With this method, only historical\n\/\/ points are used to determine whether the target point is an anomaly. The latest point detecting operation matches\n\/\/ the scenario of real-time monitoring of business metrics.\n\/\/ Parameters:\n\/\/ body - time series points and period if needed. Advanced model parameters can also be set in the request.\nfunc (client BaseClient) LastDetect(ctx context.Context, body Request) (result LastDetectResponse, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"\/BaseClient.LastDetect\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response.Response != nil {\n\t\t\t\tsc = result.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tif err := validation.Validate([]validation.Validation{\n\t\t{TargetValue: body,\n\t\t\tConstraints: []validation.Constraint{{Target: \"body.Series\", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {\n\t\treturn result, validation.NewError(\"anomalydetector.BaseClient\", \"LastDetect\", err.Error())\n\t}\n\n\treq, err := client.LastDetectPreparer(ctx, body)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"anomalydetector.BaseClient\", \"LastDetect\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.LastDetectSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"anomalydetector.BaseClient\", \"LastDetect\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult, err = client.LastDetectResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"anomalydetector.BaseClient\", \"LastDetect\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}\n\n\/\/ LastDetectPreparer prepares the LastDetect request.\nfunc (client BaseClient) LastDetectPreparer(ctx context.Context, body Request) (*http.Request, error) {\n\turlParameters := map[string]interface{}{\n\t\t\"Endpoint\": client.Endpoint,\n\t}\n\n\tpreparer := autorest.CreatePreparer(\n\t\tautorest.AsContentType(\"application\/json; charset=utf-8\"),\n\t\tautorest.AsPost(),\n\t\tautorest.WithCustomBaseURL(\"{Endpoint}\/anomalydetector\/v1.0\", urlParameters),\n\t\tautorest.WithPath(\"\/timeseries\/last\/detect\"),\n\t\tautorest.WithJSON(body))\n\treturn preparer.Prepare((&http.Request{}).WithContext(ctx))\n}\n\n\/\/ LastDetectSender sends the LastDetect request. The method will close the\n\/\/ http.Response Body if it receives an error.\nfunc (client BaseClient) LastDetectSender(req *http.Request) (*http.Response, error) {\n\treturn autorest.SendWithSender(client, req,\n\t\tautorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))\n}\n\n\/\/ LastDetectResponder handles the response to the LastDetect request. The method always\n\/\/ closes the http.Response Body.\nfunc (client BaseClient) LastDetectResponder(resp *http.Response) (result LastDetectResponse, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tclient.ByInspecting(),\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package messagebird\n\nimport (\n\t\"errors\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype TypeDetails map[string]interface{}\n\ntype Message struct {\n\tId string\n\tHRef string\n\tDirection string\n\tType string\n\tOriginator string\n\tBody string\n\tReference string\n\tValidity *int\n\tGateway int\n\tTypeDetails TypeDetails\n\tDataCoding string\n\tMClass int\n\tScheduledDatetime *time.Time\n\tCreatedDatetime *time.Time\n\tRecipients Recipients\n\tErrors []Error\n}\n\ntype MessageParams struct {\n\tType string\n\tReference string\n\tValidity int\n\tGateway int\n\tTypeDetails TypeDetails\n\tDataCoding string\n\tScheduledDatetime time.Time\n}\n\ntype MessageQueryParams struct {\n\tOriginator string\n\tDirection string\n\tLimit int\n\tOffset int\n}\n\ntype Messages []Message\n\n\/\/ paramsForMessage converts the specified MessageParams struct to a\n\/\/ url.Values pointer and returns it.\nfunc paramsForMessage(params *MessageParams) (*url.Values, error) {\n\turlParams := &url.Values{}\n\n\tif params == nil {\n\t\treturn urlParams, nil\n\t}\n\n\tif params.Type != \"\" {\n\t\turlParams.Set(\"type\", params.Type)\n\t\tif params.Type == \"flash\" {\n\t\t\turlParams.Set(\"mclass\", \"0\")\n\t\t}\n\t}\n\tif params.Reference != \"\" {\n\t\turlParams.Set(\"reference\", params.Reference)\n\t}\n\tif params.Validity != 0 {\n\t\turlParams.Set(\"validity\", strconv.Itoa(params.Validity))\n\t}\n\tif params.Gateway != 0 {\n\t\turlParams.Set(\"gateway\", strconv.Itoa(params.Gateway))\n\t}\n\n\tfor k, v := range params.TypeDetails {\n\t\tif vs, ok := v.(string); ok {\n\t\t\turlParams.Set(\"typeDetails[\"+k+\"]\", vs)\n\t\t} else if vi, ok := v.(int); ok {\n\t\t\turlParams.Set(\"typeDetails[\"+k+\"]\", strconv.Itoa(vi))\n\t\t} else {\n\t\t\treturn nil, errors.New(\"Unknown type for typeDetails value\")\n\t\t}\n\t}\n\n\tif params.DataCoding != \"\" {\n\t\turlParams.Set(\"datacoding\", params.DataCoding)\n\t}\n\tif params.ScheduledDatetime.Unix() > 0 {\n\t\turlParams.Set(\"scheduledDatetime\", params.ScheduledDatetime.Format(time.RFC3339))\n\t}\n\n\treturn urlParams, nil\n}\n\nfunc paramsForMessageQuery(params *MessageQueryParams) (*url.Values, error) {\n\turlParams := &url.Values{}\n\n\tif params == nil {\n\t\treturn urlParams, nil\n\t}\n\n\tif params.Direction != \"\" {\n\t\turlParams.Set(\"direction\", params.Direction)\n\t}\n\n\tif params.Originator != \"\" {\n\t\turlParams.Set(\"originator\", params.Originator)\n\t}\n\n\tif params.Limit != 0 {\n\t\turlParams.Set(\"limit\", strconv.Itoa(params.Limit))\n\t}\n\n\turlParams.Set(\"offset\", strconv.Itoa(params.Offset))\n\n\treturn urlParams, nil\n}\n<commit_msg>Added wrapper<commit_after>package messagebird\n\nimport (\n\t\"errors\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype TypeDetails map[string]interface{}\n\ntype Message struct {\n\tId string\n\tHRef string\n\tDirection string\n\tType string\n\tOriginator string\n\tBody string\n\tReference string\n\tValidity *int\n\tGateway int\n\tTypeDetails TypeDetails\n\tDataCoding string\n\tMClass int\n\tScheduledDatetime *time.Time\n\tCreatedDatetime *time.Time\n\tRecipients Recipients\n\tErrors []Error\n}\n\ntype MessageParams struct {\n\tType string\n\tReference string\n\tValidity int\n\tGateway int\n\tTypeDetails TypeDetails\n\tDataCoding string\n\tScheduledDatetime time.Time\n}\n\ntype MessageQueryParams struct {\n\tOriginator string\n\tDirection string\n\tLimit int\n\tOffset int\n}\n\ntype Messages struct {\n\tOffset int\n\tLimit int\n\tCount int\n\tTotalCount int\n\tItems []Message\n}\n\n\/\/ paramsForMessage converts the specified MessageParams struct to a\n\/\/ url.Values pointer and returns it.\nfunc paramsForMessage(params *MessageParams) (*url.Values, error) {\n\turlParams := &url.Values{}\n\n\tif params == nil {\n\t\treturn urlParams, nil\n\t}\n\n\tif params.Type != \"\" {\n\t\turlParams.Set(\"type\", params.Type)\n\t\tif params.Type == \"flash\" {\n\t\t\turlParams.Set(\"mclass\", \"0\")\n\t\t}\n\t}\n\tif params.Reference != \"\" {\n\t\turlParams.Set(\"reference\", params.Reference)\n\t}\n\tif params.Validity != 0 {\n\t\turlParams.Set(\"validity\", strconv.Itoa(params.Validity))\n\t}\n\tif params.Gateway != 0 {\n\t\turlParams.Set(\"gateway\", strconv.Itoa(params.Gateway))\n\t}\n\n\tfor k, v := range params.TypeDetails {\n\t\tif vs, ok := v.(string); ok {\n\t\t\turlParams.Set(\"typeDetails[\"+k+\"]\", vs)\n\t\t} else if vi, ok := v.(int); ok {\n\t\t\turlParams.Set(\"typeDetails[\"+k+\"]\", strconv.Itoa(vi))\n\t\t} else {\n\t\t\treturn nil, errors.New(\"Unknown type for typeDetails value\")\n\t\t}\n\t}\n\n\tif params.DataCoding != \"\" {\n\t\turlParams.Set(\"datacoding\", params.DataCoding)\n\t}\n\tif params.ScheduledDatetime.Unix() > 0 {\n\t\turlParams.Set(\"scheduledDatetime\", params.ScheduledDatetime.Format(time.RFC3339))\n\t}\n\n\treturn urlParams, nil\n}\n\nfunc paramsForMessageQuery(params *MessageQueryParams) (*url.Values, error) {\n\turlParams := &url.Values{}\n\n\tif params == nil {\n\t\treturn urlParams, nil\n\t}\n\n\tif params.Direction != \"\" {\n\t\turlParams.Set(\"direction\", params.Direction)\n\t}\n\n\tif params.Originator != \"\" {\n\t\turlParams.Set(\"originator\", params.Originator)\n\t}\n\n\tif params.Limit != 0 {\n\t\turlParams.Set(\"limit\", strconv.Itoa(params.Limit))\n\t}\n\n\turlParams.Set(\"offset\", strconv.Itoa(params.Offset))\n\n\treturn urlParams, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package sarama\n\nimport (\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"time\"\n\n\t\"github.com\/eapache\/go-xerial-snappy\"\n)\n\n\/\/ CompressionCodec represents the various compression codecs recognized by Kafka in messages.\ntype CompressionCodec int8\n\n\/\/ only the last two bits are really used\nconst compressionCodecMask int8 = 0x03\n\nconst (\n\tCompressionNone CompressionCodec = 0\n\tCompressionGZIP CompressionCodec = 1\n\tCompressionSnappy CompressionCodec = 2\n)\n\ntype Message struct {\n\tCodec CompressionCodec \/\/ codec used to compress the message contents\n\tKey []byte \/\/ the message key, may be nil\n\tValue []byte \/\/ the message contents\n\tSet *MessageSet \/\/ the message set a message might wrap\n\tVersion int8 \/\/ v1 requires Kafka 0.10\n\tTimestamp time.Time \/\/ the timestamp of the message (version 1+ only)\n\n\tcompressedCache []byte\n}\n\nfunc (m *Message) encode(pe packetEncoder) error {\n\tpe.push(&crc32Field{})\n\n\tpe.putInt8(m.Version)\n\n\tattributes := int8(m.Codec) & compressionCodecMask\n\tpe.putInt8(attributes)\n\n\tif m.Version >= 1 {\n\t\tpe.putInt64(m.Timestamp.UnixNano() \/ int64(time.Millisecond))\n\t}\n\n\terr := pe.putBytes(m.Key)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar payload []byte\n\n\tif m.compressedCache != nil {\n\t\tpayload = m.compressedCache\n\t\tm.compressedCache = nil\n\t} else if m.Value != nil {\n\t\tswitch m.Codec {\n\t\tcase CompressionNone:\n\t\t\tpayload = m.Value\n\t\tcase CompressionGZIP:\n\t\t\tvar buf bytes.Buffer\n\t\t\twriter := gzip.NewWriter(&buf)\n\t\t\tif _, err = writer.Write(m.Value); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = writer.Close(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tm.compressedCache = buf.Bytes()\n\t\t\tpayload = m.compressedCache\n\t\tcase CompressionSnappy:\n\t\t\ttmp := snappy.Encode(m.Value)\n\t\t\tm.compressedCache = tmp\n\t\t\tpayload = m.compressedCache\n\t\tdefault:\n\t\t\treturn PacketEncodingError{fmt.Sprintf(\"unsupported compression codec (%d)\", m.Codec)}\n\t\t}\n\t}\n\n\tif err = pe.putBytes(payload); err != nil {\n\t\treturn err\n\t}\n\n\treturn pe.pop()\n}\n\nfunc (m *Message) decode(pd packetDecoder) (err error) {\n\terr = pd.push(&crc32Field{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.Version, err = pd.getInt8()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tattribute, err := pd.getInt8()\n\tif err != nil {\n\t\treturn err\n\t}\n\tm.Codec = CompressionCodec(attribute & compressionCodecMask)\n\n\tif m.Version >= 1 {\n\t\tmillis, err := pd.getInt64()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tm.Timestamp = time.Unix(millis\/1000, (millis%1000)*int64(time.Millisecond))\n\t}\n\n\tm.Key, err = pd.getBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.Value, err = pd.getBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch m.Codec {\n\tcase CompressionNone:\n\t\t\/\/ nothing to do\n\tcase CompressionGZIP:\n\t\tif m.Value == nil {\n\t\t\tbreak\n\t\t}\n\t\treader, err := gzip.NewReader(bytes.NewReader(m.Value))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif m.Value, err = ioutil.ReadAll(reader); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := m.decodeSet(); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase CompressionSnappy:\n\t\tif m.Value == nil {\n\t\t\tbreak\n\t\t}\n\t\tif m.Value, err = snappy.Decode(m.Value); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := m.decodeSet(); err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\treturn PacketDecodingError{fmt.Sprintf(\"invalid compression specified (%d)\", m.Codec)}\n\t}\n\n\treturn pd.pop()\n}\n\n\/\/ decodes a message set from a previousy encoded bulk-message\nfunc (m *Message) decodeSet() (err error) {\n\tpd := realDecoder{raw: m.Value}\n\tm.Set = &MessageSet{}\n\treturn m.Set.decode(&pd)\n}\n<commit_msg>Add lz4 compression<commit_after>package sarama\n\nimport (\n\t\"bytes\"\n\t\"compress\/gzip\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"time\"\n\n\t\"github.com\/eapache\/go-xerial-snappy\"\n\t\"github.com\/pierrec\/lz4\"\n)\n\n\/\/ CompressionCodec represents the various compression codecs recognized by Kafka in messages.\ntype CompressionCodec int8\n\n\/\/ only the last two bits are really used\nconst compressionCodecMask int8 = 0x03\n\nconst (\n\tCompressionNone CompressionCodec = 0\n\tCompressionGZIP CompressionCodec = 1\n\tCompressionSnappy CompressionCodec = 2\n\tCompressionLZ4 CompressionCodec = 3\n)\n\ntype Message struct {\n\tCodec CompressionCodec \/\/ codec used to compress the message contents\n\tKey []byte \/\/ the message key, may be nil\n\tValue []byte \/\/ the message contents\n\tSet *MessageSet \/\/ the message set a message might wrap\n\tVersion int8 \/\/ v1 requires Kafka 0.10\n\tTimestamp time.Time \/\/ the timestamp of the message (version 1+ only)\n\n\tcompressedCache []byte\n}\n\nfunc (m *Message) encode(pe packetEncoder) error {\n\tpe.push(&crc32Field{})\n\n\tpe.putInt8(m.Version)\n\n\tattributes := int8(m.Codec) & compressionCodecMask\n\tpe.putInt8(attributes)\n\n\tif m.Version >= 1 {\n\t\tpe.putInt64(m.Timestamp.UnixNano() \/ int64(time.Millisecond))\n\t}\n\n\terr := pe.putBytes(m.Key)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar payload []byte\n\n\tif m.compressedCache != nil {\n\t\tpayload = m.compressedCache\n\t\tm.compressedCache = nil\n\t} else if m.Value != nil {\n\t\tswitch m.Codec {\n\t\tcase CompressionNone:\n\t\t\tpayload = m.Value\n\t\tcase CompressionGZIP:\n\t\t\tvar buf bytes.Buffer\n\t\t\twriter := gzip.NewWriter(&buf)\n\t\t\tif _, err = writer.Write(m.Value); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = writer.Close(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tm.compressedCache = buf.Bytes()\n\t\t\tpayload = m.compressedCache\n\t\tcase CompressionSnappy:\n\t\t\ttmp := snappy.Encode(m.Value)\n\t\t\tm.compressedCache = tmp\n\t\t\tpayload = m.compressedCache\n\t\tcase CompressionLZ4:\n\t\t\tvar buf bytes.Buffer\n\t\t\twriter := lz4.NewWriter(&buf)\n\t\t\tif _, err = writer.Write(m.Value); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = writer.Close(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tm.compressedCache = buf.Bytes()\n\t\t\tpayload = m.compressedCache\n\n\t\tdefault:\n\t\t\treturn PacketEncodingError{fmt.Sprintf(\"unsupported compression codec (%d)\", m.Codec)}\n\t\t}\n\t}\n\n\tif err = pe.putBytes(payload); err != nil {\n\t\treturn err\n\t}\n\n\treturn pe.pop()\n}\n\nfunc (m *Message) decode(pd packetDecoder) (err error) {\n\terr = pd.push(&crc32Field{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.Version, err = pd.getInt8()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tattribute, err := pd.getInt8()\n\tif err != nil {\n\t\treturn err\n\t}\n\tm.Codec = CompressionCodec(attribute & compressionCodecMask)\n\n\tif m.Version >= 1 {\n\t\tmillis, err := pd.getInt64()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tm.Timestamp = time.Unix(millis\/1000, (millis%1000)*int64(time.Millisecond))\n\t}\n\n\tm.Key, err = pd.getBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.Value, err = pd.getBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch m.Codec {\n\tcase CompressionNone:\n\t\t\/\/ nothing to do\n\tcase CompressionGZIP:\n\t\tif m.Value == nil {\n\t\t\tbreak\n\t\t}\n\t\treader, err := gzip.NewReader(bytes.NewReader(m.Value))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif m.Value, err = ioutil.ReadAll(reader); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := m.decodeSet(); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase CompressionSnappy:\n\t\tif m.Value == nil {\n\t\t\tbreak\n\t\t}\n\t\tif m.Value, err = snappy.Decode(m.Value); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := m.decodeSet(); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase CompressionLZ4:\n\t\tif m.Value == nil {\n\t\t\tbreak\n\t\t}\n\t\treader := lz4.NewReader(bytes.NewReader(m.Value))\n\t\tif m.Value, err = ioutil.ReadAll(reader); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := m.decodeSet(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\tdefault:\n\t\treturn PacketDecodingError{fmt.Sprintf(\"invalid compression specified (%d)\", m.Codec)}\n\t}\n\n\treturn pd.pop()\n}\n\n\/\/ decodes a message set from a previousy encoded bulk-message\nfunc (m *Message) decodeSet() (err error) {\n\tpd := realDecoder{raw: m.Value}\n\tm.Set = &MessageSet{}\n\treturn m.Set.decode(&pd)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\n\/\/ Message represents an individual message for processing by the server or\n\/\/ returning to the client.\n\/\/\n\/\/ See http:\/\/jmap.io\/spec.html#the-structure-of-an-exchange\ntype Message struct {\n\t\/\/ Name specifies the method to be called on the server or the type of\n\t\/\/ response being sent to the client.\n\tName string\n\n\t\/\/ Arguments is an object containing named arguments for the method or\n\t\/\/ response.\n\tArguments interface{}\n\n\t\/\/ ClientID is an arbitary string to be echoed back with the responses\n\t\/\/ emitted bythe method call.\n\tClientID string\n}\n<commit_msg>message: implement json marshaling and unmarshaling<commit_after>package main\n\nimport \"encoding\/json\"\n\n\/\/ Message represents an individual message for processing by the server or\n\/\/ returning to the client.\n\/\/\n\/\/ See http:\/\/jmap.io\/spec.html#the-structure-of-an-exchange\ntype Message struct {\n\t\/\/ Name specifies the method to be called on the server or the type of\n\t\/\/ response being sent to the client.\n\tName string\n\n\t\/\/ Arguments is an object containing named arguments for the method or\n\t\/\/ response.\n\tArguments interface{}\n\n\t\/\/ ClientID is an arbitary string to be echoed back with the responses\n\t\/\/ emitted bythe method call.\n\tClientID string\n}\n\nfunc (m *Message) MarshalJSON() ([]byte, error) {\n\tarr := make([]interface{}, 3)\n\tarr[0] = m.Name\n\tarr[1] = m.Arguments\n\tarr[2] = m.ClientID\n\n\treturn json.Marshal(arr)\n}\n\nfunc (m *Message) UnmarshalJSON(j []byte) error {\n\tarr := []interface{}{}\n\tif err := json.Unmarshal(j, &arr); err != nil {\n\t\treturn err\n\t}\n\n\tm.Name = arr[0].(string)\n\tm.Arguments = arr[1]\n\tm.ClientID = arr[2].(string)\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package smtpd\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"mime\"\n\t\"mime\/multipart\"\n\t\"mime\/quotedprintable\"\n\t\"net\/mail\"\n\t\"net\/textproto\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst idEntropy = 64\nconst letterBytes = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\nconst (\n\tletterIdxBits = 6 \/\/ 6 bits to represent a letter index\n\tletterIdxMask = 1<<letterIdxBits - 1 \/\/ All 1-bits, as many as letterIdxBits\n\tletterIdxMax = 63 \/ letterIdxBits \/\/ # of letter indices fitting in 63 bits\n)\n\n\/\/ Message is a nicely packaged representation of the\n\/\/ recieved message\ntype Message struct {\n\tTo []*mail.Address\n\tFrom *mail.Address\n\tHeader mail.Header\n\tSubject string\n\tRawBody []byte\n\tSource []byte\n\n\tmessageID string\n\tgenMessageID sync.Once\n\trcpt []*mail.Address\n\n\t\/\/ meta info\n\tLogger *log.Logger\n}\n\n\/\/ Part represents a single part of the message\ntype Part struct {\n\tHeader textproto.MIMEHeader\n\tpart *multipart.Part\n\tBody []byte\n\tChildren []*Part\n}\n\n\/\/ ID returns an identifier for this message, or generates one if none available using the masked string\n\/\/ algorithm from https:\/\/stackoverflow.com\/questions\/22892120\/how-to-generate-a-random-string-of-a-fixed-length-in-golang\nfunc (m *Message) ID() string {\n\tm.genMessageID.Do(func() {\n\t\tif m.messageID = m.Header.Get(\"Message-ID\"); m.messageID != \"\" {\n\t\t\treturn\n\t\t}\n\t\tvar src = rand.NewSource(time.Now().UnixNano())\n\n\t\tb := make([]byte, idEntropy)\n\t\t\/\/ A src.Int63() generates 63 random bits, enough for letterIdxMax characters!\n\t\tfor i, cache, remain := idEntropy-1, src.Int63(), letterIdxMax; i >= 0; {\n\t\t\tif remain == 0 {\n\t\t\t\tcache, remain = src.Int63(), letterIdxMax\n\t\t\t}\n\t\t\tif idx := int(cache & letterIdxMask); idx < len(letterBytes) {\n\t\t\t\tb[i] = letterBytes[idx]\n\t\t\t\ti--\n\t\t\t}\n\t\t\tcache >>= letterIdxBits\n\t\t\tremain--\n\t\t}\n\n\t\tm.messageID = string(b)\n\t})\n\treturn m.messageID\n}\n\n\/\/ BCC returns a list of addresses this message should be\nfunc (m *Message) BCC() []*mail.Address {\n\n\tvar inHeaders = make(map[string]struct{})\n\tfor _, to := range m.To {\n\t\tinHeaders[to.Address] = struct{}{}\n\t}\n\n\tvar bcc []*mail.Address\n\tfor _, recipient := range m.rcpt {\n\t\tif _, ok := inHeaders[recipient.Address]; !ok {\n\t\t\tbcc = append(bcc, recipient)\n\t\t}\n\t}\n\n\treturn bcc\n}\n\n\/\/ Plain returns the text\/plain content of the message, if any\nfunc (m *Message) Plain() ([]byte, error) {\n\treturn m.FindBody(\"text\/plain\")\n}\n\n\/\/ HTML returns the text\/html content of the message, if any\nfunc (m *Message) HTML() ([]byte, error) {\n\treturn m.FindBody(\"text\/html\")\n}\n\nfunc findTypeInParts(contentType string, parts []*Part) *Part {\n\tfor _, p := range parts {\n\t\tmediaType, _, err := mime.ParseMediaType(p.Header.Get(\"Content-Type\"))\n\t\tif err == nil && mediaType == contentType {\n\t\t\treturn p\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Attachments returns the list of attachments on this message\n\/\/ XXX: this assumes that the only mimetype supporting attachments is multipart\/mixed\n\/\/ need to review https:\/\/en.wikipedia.org\/wiki\/MIME#Multipart_messages to ensure that is the case\nfunc (m *Message) Attachments() ([]*Part, error) {\n\tmediaType, _, err := mime.ParseMediaType(m.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparts, err := m.Parts()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar attachments []*Part\n\tif mediaType == \"multipart\/mixed\" {\n\t\tfor _, part := range parts {\n\t\t\tmediaType, _, err := mime.ParseMediaType(part.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif strings.HasPrefix(mediaType, \"multipart\/\") {\n\t\t\t\t\/\/ XXX: any cases where this would still be an attachment?\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tattachments = append(attachments, part)\n\t\t}\n\t}\n\treturn attachments, nil\n}\n\n\/\/ FindBody finds the first part of the message with the specified Content-Type\nfunc (m *Message) FindBody(contentType string) ([]byte, error) {\n\n\tmediaType, _, err := mime.ParseMediaType(m.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparts, err := m.Parts()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar alternatives []*Part\n\tswitch mediaType {\n\tcase contentType:\n\t\tif len(parts) > 0 {\n\t\t\treturn parts[0].Body, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"%v found, but no data in body\", contentType)\n\tcase \"multipart\/alternative\":\n\t\talternatives = parts\n\tdefault:\n\t\tif alt := findTypeInParts(\"multipart\/alternative\", parts); alt != nil {\n\t\t\talternatives = alt.Children\n\t\t}\n\t}\n\n\tif len(alternatives) == 0 {\n\t\treturn nil, fmt.Errorf(\"No multipart\/alternative section found, can't find %v\", contentType)\n\t}\n\n\tpart := findTypeInParts(contentType, alternatives)\n\tif part == nil {\n\t\treturn nil, fmt.Errorf(\"No %v content found in multipart\/alternative section\", contentType)\n\t}\n\n\treturn part.Body, nil\n}\n\nfunc readToPart(header textproto.MIMEHeader, content io.Reader) (*Part, error) {\n\tcte := strings.ToLower(header.Get(\"Content-Transfer-Encoding\"))\n\n\tif cte == \"quoted-printable\" {\n\t\tcontent = quotedprintable.NewReader(content)\n\t}\n\n\tslurp, err := ioutil.ReadAll(content)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif cte == \"base64\" {\n\t\tdst := make([]byte, base64.StdEncoding.DecodedLen(len(slurp)))\n\t\tdecodedLen, err := base64.StdEncoding.Decode(dst, slurp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tslurp = dst[:decodedLen]\n\t}\n\treturn &Part{\n\t\tHeader: header,\n\t\tBody: slurp,\n\t}, nil\n}\n\nfunc parseContent(header textproto.MIMEHeader, content io.Reader) ([]*Part, error) {\n\n\tmediaType, params, err := mime.ParseMediaType(header.Get(\"Content-Type\"))\n\tif err != nil && err.Error() == \"mime: no media type\" {\n\t\tmediaType = \"application\/octet-stream\"\n\t} else if err != nil {\n\t\treturn nil, fmt.Errorf(\"Media Type error: %v\", err)\n\t}\n\n\tvar parts []*Part\n\n\tif strings.HasPrefix(mediaType, \"multipart\/\") {\n\n\t\tmr := multipart.NewReader(content, params[\"boundary\"])\n\t\tfor {\n\t\t\tp, err := mr.NextPart()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t} else if err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"MIME error: %v\", err)\n\t\t\t}\n\n\t\t\tpart, err := readToPart(p.Header, p)\n\n\t\t\t\/\/ XXX: maybe want to implement a less strict mode that gets what it can out of the message\n\t\t\t\/\/ instead of erroring out on individual sections?\n\t\t\tpartType, _, err := mime.ParseMediaType(p.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif strings.HasPrefix(partType, \"multipart\/\") {\n\t\t\t\tsubParts, err := parseContent(p.Header, bytes.NewBuffer(part.Body))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tpart.Children = subParts\n\t\t\t}\n\t\t\tparts = append(parts, part)\n\t\t}\n\t} else {\n\t\tpart, err := readToPart(header, content)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tparts = append(parts, part)\n\t}\n\n\treturn parts, nil\n}\n\n\/\/ Parts breaks a message body into its mime parts\nfunc (m *Message) Parts() ([]*Part, error) {\n\tparts, err := parseContent(textproto.MIMEHeader(m.Header), bytes.NewBuffer(m.RawBody))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn parts, nil\n}\n\n\/\/ NewMessage creates a Message from a data blob and a recipients list\nfunc NewMessage(data []byte, rcpt []*mail.Address, logger *log.Logger) (*Message, error) {\n\tm, err := mail.ReadMessage(bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ TODO: This isn't accurate, the To field should be all the values from RCPT TO:\n\tto, err := m.Header.AddressList(\"To\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfrom, err := m.Header.AddressList(\"From\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\theader := make(map[string]string)\n\n\tfor k, v := range m.Header {\n\t\tif len(v) == 1 {\n\t\t\theader[k] = v[0]\n\t\t}\n\t}\n\n\traw, err := ioutil.ReadAll(m.Body)\n\tif err != nil && err != io.EOF {\n\t\treturn nil, err\n\t}\n\n\treturn &Message{\n\t\trcpt: rcpt,\n\t\tTo: to,\n\t\tFrom: from[0],\n\t\tHeader: m.Header,\n\t\tSubject: m.Header.Get(\"subject\"),\n\t\tRawBody: raw,\n\t\tSource: data,\n\t\tLogger: logger,\n\t}, nil\n\n}\n<commit_msg>Don't require the To header<commit_after>package smtpd\n\nimport (\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"mime\"\n\t\"mime\/multipart\"\n\t\"mime\/quotedprintable\"\n\t\"net\/mail\"\n\t\"net\/textproto\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst idEntropy = 64\nconst letterBytes = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\nconst (\n\tletterIdxBits = 6 \/\/ 6 bits to represent a letter index\n\tletterIdxMask = 1<<letterIdxBits - 1 \/\/ All 1-bits, as many as letterIdxBits\n\tletterIdxMax = 63 \/ letterIdxBits \/\/ # of letter indices fitting in 63 bits\n)\n\n\/\/ Message is a nicely packaged representation of the\n\/\/ recieved message\ntype Message struct {\n\tTo []*mail.Address\n\tFrom *mail.Address\n\tHeader mail.Header\n\tSubject string\n\tRawBody []byte\n\tSource []byte\n\n\tmessageID string\n\tgenMessageID sync.Once\n\trcpt []*mail.Address\n\n\t\/\/ meta info\n\tLogger *log.Logger\n}\n\n\/\/ Part represents a single part of the message\ntype Part struct {\n\tHeader textproto.MIMEHeader\n\tpart *multipart.Part\n\tBody []byte\n\tChildren []*Part\n}\n\n\/\/ ID returns an identifier for this message, or generates one if none available using the masked string\n\/\/ algorithm from https:\/\/stackoverflow.com\/questions\/22892120\/how-to-generate-a-random-string-of-a-fixed-length-in-golang\nfunc (m *Message) ID() string {\n\tm.genMessageID.Do(func() {\n\t\tif m.messageID = m.Header.Get(\"Message-ID\"); m.messageID != \"\" {\n\t\t\treturn\n\t\t}\n\t\tvar src = rand.NewSource(time.Now().UnixNano())\n\n\t\tb := make([]byte, idEntropy)\n\t\t\/\/ A src.Int63() generates 63 random bits, enough for letterIdxMax characters!\n\t\tfor i, cache, remain := idEntropy-1, src.Int63(), letterIdxMax; i >= 0; {\n\t\t\tif remain == 0 {\n\t\t\t\tcache, remain = src.Int63(), letterIdxMax\n\t\t\t}\n\t\t\tif idx := int(cache & letterIdxMask); idx < len(letterBytes) {\n\t\t\t\tb[i] = letterBytes[idx]\n\t\t\t\ti--\n\t\t\t}\n\t\t\tcache >>= letterIdxBits\n\t\t\tremain--\n\t\t}\n\n\t\tm.messageID = string(b)\n\t})\n\treturn m.messageID\n}\n\n\/\/ BCC returns a list of addresses this message should be\nfunc (m *Message) BCC() []*mail.Address {\n\n\tvar inHeaders = make(map[string]struct{})\n\tfor _, to := range m.To {\n\t\tinHeaders[to.Address] = struct{}{}\n\t}\n\n\tvar bcc []*mail.Address\n\tfor _, recipient := range m.rcpt {\n\t\tif _, ok := inHeaders[recipient.Address]; !ok {\n\t\t\tbcc = append(bcc, recipient)\n\t\t}\n\t}\n\n\treturn bcc\n}\n\n\/\/ Plain returns the text\/plain content of the message, if any\nfunc (m *Message) Plain() ([]byte, error) {\n\treturn m.FindBody(\"text\/plain\")\n}\n\n\/\/ HTML returns the text\/html content of the message, if any\nfunc (m *Message) HTML() ([]byte, error) {\n\treturn m.FindBody(\"text\/html\")\n}\n\nfunc findTypeInParts(contentType string, parts []*Part) *Part {\n\tfor _, p := range parts {\n\t\tmediaType, _, err := mime.ParseMediaType(p.Header.Get(\"Content-Type\"))\n\t\tif err == nil && mediaType == contentType {\n\t\t\treturn p\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Attachments returns the list of attachments on this message\n\/\/ XXX: this assumes that the only mimetype supporting attachments is multipart\/mixed\n\/\/ need to review https:\/\/en.wikipedia.org\/wiki\/MIME#Multipart_messages to ensure that is the case\nfunc (m *Message) Attachments() ([]*Part, error) {\n\tmediaType, _, err := mime.ParseMediaType(m.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparts, err := m.Parts()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar attachments []*Part\n\tif mediaType == \"multipart\/mixed\" {\n\t\tfor _, part := range parts {\n\t\t\tmediaType, _, err := mime.ParseMediaType(part.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif strings.HasPrefix(mediaType, \"multipart\/\") {\n\t\t\t\t\/\/ XXX: any cases where this would still be an attachment?\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tattachments = append(attachments, part)\n\t\t}\n\t}\n\treturn attachments, nil\n}\n\n\/\/ FindBody finds the first part of the message with the specified Content-Type\nfunc (m *Message) FindBody(contentType string) ([]byte, error) {\n\n\tmediaType, _, err := mime.ParseMediaType(m.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparts, err := m.Parts()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar alternatives []*Part\n\tswitch mediaType {\n\tcase contentType:\n\t\tif len(parts) > 0 {\n\t\t\treturn parts[0].Body, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"%v found, but no data in body\", contentType)\n\tcase \"multipart\/alternative\":\n\t\talternatives = parts\n\tdefault:\n\t\tif alt := findTypeInParts(\"multipart\/alternative\", parts); alt != nil {\n\t\t\talternatives = alt.Children\n\t\t}\n\t}\n\n\tif len(alternatives) == 0 {\n\t\treturn nil, fmt.Errorf(\"No multipart\/alternative section found, can't find %v\", contentType)\n\t}\n\n\tpart := findTypeInParts(contentType, alternatives)\n\tif part == nil {\n\t\treturn nil, fmt.Errorf(\"No %v content found in multipart\/alternative section\", contentType)\n\t}\n\n\treturn part.Body, nil\n}\n\nfunc readToPart(header textproto.MIMEHeader, content io.Reader) (*Part, error) {\n\tcte := strings.ToLower(header.Get(\"Content-Transfer-Encoding\"))\n\n\tif cte == \"quoted-printable\" {\n\t\tcontent = quotedprintable.NewReader(content)\n\t}\n\n\tslurp, err := ioutil.ReadAll(content)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif cte == \"base64\" {\n\t\tdst := make([]byte, base64.StdEncoding.DecodedLen(len(slurp)))\n\t\tdecodedLen, err := base64.StdEncoding.Decode(dst, slurp)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tslurp = dst[:decodedLen]\n\t}\n\treturn &Part{\n\t\tHeader: header,\n\t\tBody: slurp,\n\t}, nil\n}\n\nfunc parseContent(header textproto.MIMEHeader, content io.Reader) ([]*Part, error) {\n\n\tmediaType, params, err := mime.ParseMediaType(header.Get(\"Content-Type\"))\n\tif err != nil && err.Error() == \"mime: no media type\" {\n\t\tmediaType = \"application\/octet-stream\"\n\t} else if err != nil {\n\t\treturn nil, fmt.Errorf(\"Media Type error: %v\", err)\n\t}\n\n\tvar parts []*Part\n\n\tif strings.HasPrefix(mediaType, \"multipart\/\") {\n\n\t\tmr := multipart.NewReader(content, params[\"boundary\"])\n\t\tfor {\n\t\t\tp, err := mr.NextPart()\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t} else if err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"MIME error: %v\", err)\n\t\t\t}\n\n\t\t\tpart, err := readToPart(p.Header, p)\n\n\t\t\t\/\/ XXX: maybe want to implement a less strict mode that gets what it can out of the message\n\t\t\t\/\/ instead of erroring out on individual sections?\n\t\t\tpartType, _, err := mime.ParseMediaType(p.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif strings.HasPrefix(partType, \"multipart\/\") {\n\t\t\t\tsubParts, err := parseContent(p.Header, bytes.NewBuffer(part.Body))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tpart.Children = subParts\n\t\t\t}\n\t\t\tparts = append(parts, part)\n\t\t}\n\t} else {\n\t\tpart, err := readToPart(header, content)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tparts = append(parts, part)\n\t}\n\n\treturn parts, nil\n}\n\n\/\/ Parts breaks a message body into its mime parts\nfunc (m *Message) Parts() ([]*Part, error) {\n\tparts, err := parseContent(textproto.MIMEHeader(m.Header), bytes.NewBuffer(m.RawBody))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn parts, nil\n}\n\n\/\/ NewMessage creates a Message from a data blob and a recipients list\nfunc NewMessage(data []byte, rcpt []*mail.Address, logger *log.Logger) (*Message, error) {\n\tm, err := mail.ReadMessage(bytes.NewBuffer(data))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ this is only used to differentiate normal To: recipients from BCC:\n\t\/\/ recipients in tests. The To: header explicitly not required by RFC 2822\n\t\/\/ 3.6.\n\tto, err := m.Header.AddressList(\"To\")\n\tif err != nil && err != mail.ErrHeaderNotPresent {\n\t\treturn nil, err\n\t}\n\n\tfrom, err := m.Header.AddressList(\"From\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\theader := make(map[string]string)\n\n\tfor k, v := range m.Header {\n\t\tif len(v) == 1 {\n\t\t\theader[k] = v[0]\n\t\t}\n\t}\n\n\traw, err := ioutil.ReadAll(m.Body)\n\tif err != nil && err != io.EOF {\n\t\treturn nil, err\n\t}\n\n\treturn &Message{\n\t\trcpt: rcpt,\n\t\tTo: to,\n\t\tFrom: from[0],\n\t\tHeader: m.Header,\n\t\tSubject: m.Header.Get(\"subject\"),\n\t\tRawBody: raw,\n\t\tSource: data,\n\t\tLogger: logger,\n\t}, nil\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Discordgo - Discord bindings for Go\n\/\/ Available at https:\/\/github.com\/bwmarrin\/discordgo\n\n\/\/ Copyright 2015-2016 Bruce Marriner <bruce@sqls.net>. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file contains code related to the Message struct\n\npackage discordgo\n\nimport (\n\t\"io\"\n\t\"regexp\"\n\t\"strings\"\n)\n\n\/\/ MessageType is the type of Message\ntype MessageType int\n\n\/\/ Block contains the valid known MessageType values\nconst (\n\tMessageTypeDefault MessageType = iota\n\tMessageTypeRecipientAdd\n\tMessageTypeRecipientRemove\n\tMessageTypeCall\n\tMessageTypeChannelNameChange\n\tMessageTypeChannelIconChange\n\tMessageTypeChannelPinnedMessage\n\tMessageTypeGuildMemberJoin\n\tMessageTypeUserPremiumGuildSubscription\n\tMessageTypeUserPremiumGuildSubscriptionTierOne\n\tMessageTypeUserPremiumGuildSubscriptionTierTwo\n\tMessageTypeUserPremiumGuildSubscriptionTierThree\n\tMessageTypeChannelFollowAdd\n)\n\n\/\/ A Message stores all data related to a specific Discord message.\ntype Message struct {\n\t\/\/ The ID of the message.\n\tID string `json:\"id\"`\n\n\t\/\/ The ID of the channel in which the message was sent.\n\tChannelID string `json:\"channel_id\"`\n\n\t\/\/ The ID of the guild in which the message was sent.\n\tGuildID string `json:\"guild_id,omitempty\"`\n\n\t\/\/ The content of the message.\n\tContent string `json:\"content\"`\n\n\t\/\/ The time at which the messsage was sent.\n\t\/\/ CAUTION: this field may be removed in a\n\t\/\/ future API version; it is safer to calculate\n\t\/\/ the creation time via the ID.\n\tTimestamp Timestamp `json:\"timestamp\"`\n\n\t\/\/ The time at which the last edit of the message\n\t\/\/ occurred, if it has been edited.\n\tEditedTimestamp Timestamp `json:\"edited_timestamp\"`\n\n\t\/\/ The roles mentioned in the message.\n\tMentionRoles []string `json:\"mention_roles\"`\n\n\t\/\/ Whether the message is text-to-speech.\n\tTts bool `json:\"tts\"`\n\n\t\/\/ Whether the message mentions everyone.\n\tMentionEveryone bool `json:\"mention_everyone\"`\n\n\t\/\/ The author of the message. This is not guaranteed to be a\n\t\/\/ valid user (webhook-sent messages do not possess a full author).\n\tAuthor *User `json:\"author\"`\n\n\t\/\/ A list of attachments present in the message.\n\tAttachments []*MessageAttachment `json:\"attachments\"`\n\n\t\/\/ A list of embeds present in the message. Multiple\n\t\/\/ embeds can currently only be sent by webhooks.\n\tEmbeds []*MessageEmbed `json:\"embeds\"`\n\n\t\/\/ A list of users mentioned in the message.\n\tMentions []*User `json:\"mentions\"`\n\n\t\/\/ A list of reactions to the message.\n\tReactions []*MessageReactions `json:\"reactions\"`\n\n\t\/\/ Whether the message is pinned or not.\n\tPinned bool `json:\"pinned\"`\n\n\t\/\/ The type of the message.\n\tType MessageType `json:\"type\"`\n\n\t\/\/ The webhook ID of the message, if it was generated by a webhook\n\tWebhookID string `json:\"webhook_id\"`\n\n\t\/\/ Member properties for this message's author,\n\t\/\/ contains only partial information\n\tMember *Member `json:\"member\"`\n\n\t\/\/ Channels specifically mentioned in this message\n\t\/\/ Not all channel mentions in a message will appear in mention_channels.\n\t\/\/ Only textual channels that are visible to everyone in a lurkable guild will ever be included.\n\t\/\/ Only crossposted messages (via Channel Following) currently include mention_channels at all.\n\t\/\/ If no mentions in the message meet these requirements, this field will not be sent.\n\tMentionChannels []*Channel `json:\"mention_channels\"`\n\n\t\/\/ Is sent with Rich Presence-related chat embeds\n\tActivity *MessageActivity `json:\"activity\"`\n\n\t\/\/ Is sent with Rich Presence-related chat embeds\n\tApplication *MessageApplication `json:\"application\"`\n\n\t\/\/ MessageReference contains reference data sent with crossposted messages\n\tMessageReference *MessageReference `json:\"message_reference\"`\n\n\t\/\/ The flags of the message, which describe extra features of a message.\n\t\/\/ This is a combination of bit masks; the presence of a certain permission can\n\t\/\/ be checked by performing a bitwise AND between this int and the flag.\n\tFlags int `json:\"flags\"`\n}\n\n\/\/ File stores info about files you e.g. send in messages.\ntype File struct {\n\tName string\n\tContentType string\n\tReader io.Reader\n}\n\n\/\/ MessageSend stores all parameters you can send with ChannelMessageSendComplex.\ntype MessageSend struct {\n\tContent string `json:\"content,omitempty\"`\n\tEmbed *MessageEmbed `json:\"embed,omitempty\"`\n\tTts bool `json:\"tts\"`\n\tFiles []*File `json:\"-\"`\n\n\t\/\/ TODO: Remove this when compatibility is not required.\n\tFile *File `json:\"-\"`\n}\n\n\/\/ MessageEdit is used to chain parameters via ChannelMessageEditComplex, which\n\/\/ is also where you should get the instance from.\ntype MessageEdit struct {\n\tContent *string `json:\"content,omitempty\"`\n\tEmbed *MessageEmbed `json:\"embed,omitempty\"`\n\n\tID string\n\tChannel string\n}\n\n\/\/ NewMessageEdit returns a MessageEdit struct, initialized\n\/\/ with the Channel and ID.\nfunc NewMessageEdit(channelID string, messageID string) *MessageEdit {\n\treturn &MessageEdit{\n\t\tChannel: channelID,\n\t\tID: messageID,\n\t}\n}\n\n\/\/ SetContent is the same as setting the variable Content,\n\/\/ except it doesn't take a pointer.\nfunc (m *MessageEdit) SetContent(str string) *MessageEdit {\n\tm.Content = &str\n\treturn m\n}\n\n\/\/ SetEmbed is a convenience function for setting the embed,\n\/\/ so you can chain commands.\nfunc (m *MessageEdit) SetEmbed(embed *MessageEmbed) *MessageEdit {\n\tm.Embed = embed\n\treturn m\n}\n\n\/\/ A MessageAttachment stores data for message attachments.\ntype MessageAttachment struct {\n\tID string `json:\"id\"`\n\tURL string `json:\"url\"`\n\tProxyURL string `json:\"proxy_url\"`\n\tFilename string `json:\"filename\"`\n\tWidth int `json:\"width\"`\n\tHeight int `json:\"height\"`\n\tSize int `json:\"size\"`\n}\n\n\/\/ MessageEmbedFooter is a part of a MessageEmbed struct.\ntype MessageEmbedFooter struct {\n\tText string `json:\"text,omitempty\"`\n\tIconURL string `json:\"icon_url,omitempty\"`\n\tProxyIconURL string `json:\"proxy_icon_url,omitempty\"`\n}\n\n\/\/ MessageEmbedImage is a part of a MessageEmbed struct.\ntype MessageEmbedImage struct {\n\tURL string `json:\"url,omitempty\"`\n\tProxyURL string `json:\"proxy_url,omitempty\"`\n\tWidth int `json:\"width,omitempty\"`\n\tHeight int `json:\"height,omitempty\"`\n}\n\n\/\/ MessageEmbedThumbnail is a part of a MessageEmbed struct.\ntype MessageEmbedThumbnail struct {\n\tURL string `json:\"url,omitempty\"`\n\tProxyURL string `json:\"proxy_url,omitempty\"`\n\tWidth int `json:\"width,omitempty\"`\n\tHeight int `json:\"height,omitempty\"`\n}\n\n\/\/ MessageEmbedVideo is a part of a MessageEmbed struct.\ntype MessageEmbedVideo struct {\n\tURL string `json:\"url,omitempty\"`\n\tProxyURL string `json:\"proxy_url,omitempty\"`\n\tWidth int `json:\"width,omitempty\"`\n\tHeight int `json:\"height,omitempty\"`\n}\n\n\/\/ MessageEmbedProvider is a part of a MessageEmbed struct.\ntype MessageEmbedProvider struct {\n\tURL string `json:\"url,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n}\n\n\/\/ MessageEmbedAuthor is a part of a MessageEmbed struct.\ntype MessageEmbedAuthor struct {\n\tURL string `json:\"url,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tIconURL string `json:\"icon_url,omitempty\"`\n\tProxyIconURL string `json:\"proxy_icon_url,omitempty\"`\n}\n\n\/\/ MessageEmbedField is a part of a MessageEmbed struct.\ntype MessageEmbedField struct {\n\tName string `json:\"name,omitempty\"`\n\tValue string `json:\"value,omitempty\"`\n\tInline bool `json:\"inline,omitempty\"`\n}\n\n\/\/ An MessageEmbed stores data for message embeds.\ntype MessageEmbed struct {\n\tURL string `json:\"url,omitempty\"`\n\tType string `json:\"type,omitempty\"`\n\tTitle string `json:\"title,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tTimestamp string `json:\"timestamp,omitempty\"`\n\tColor int `json:\"color,omitempty\"`\n\tFooter *MessageEmbedFooter `json:\"footer,omitempty\"`\n\tImage *MessageEmbedImage `json:\"image,omitempty\"`\n\tThumbnail *MessageEmbedThumbnail `json:\"thumbnail,omitempty\"`\n\tVideo *MessageEmbedVideo `json:\"video,omitempty\"`\n\tProvider *MessageEmbedProvider `json:\"provider,omitempty\"`\n\tAuthor *MessageEmbedAuthor `json:\"author,omitempty\"`\n\tFields []*MessageEmbedField `json:\"fields,omitempty\"`\n}\n\n\/\/ MessageReactions holds a reactions object for a message.\ntype MessageReactions struct {\n\tCount int `json:\"count\"`\n\tMe bool `json:\"me\"`\n\tEmoji *Emoji `json:\"emoji\"`\n}\n\n\/\/ MessageActivity is sent with Rich Presence-related chat embeds\ntype MessageActivity struct {\n\tType MessageActivityType `json:\"type\"`\n\tPartyID string `json:\"party_id\"`\n}\n\n\/\/ MessageActivityType is the type of message activity\ntype MessageActivityType int\n\nconst (\n\tMessageActivityTypeJoin = iota + 1\n\tMessageActivityTypeSpectate\n\tMessageActivityTypeListen\n\tMessageActivityTypeJoinRequest\n)\n\n\/\/ MessageFlag describes an extra feature of the message\ntype MessageFlag int\n\n\/\/ Constants for the different bit offsets of Message Flags\nconst (\n\t\/\/ This message has been published to subscribed channels (via Channel Following)\n\tMessageFlagCrossposted = 1 << iota\n\t\/\/ This message originated from a message in another channel (via Channel Following)\n\tMessageFlagIsCrosspost\n\t\/\/ Do not include any embeds when serializing this message\n\tMessageFlagSuppressEmbeds\n)\n\n\/\/ MessageApplication is sent with Rich Presence-related chat embeds\ntype MessageApplication struct {\n\tID string `json:\"id\"`\n\tCoverImage string `json:\"cover_image\"`\n\tDescription string `json:\"description\"`\n\tIcon string `json:\"icon\"`\n\tName string `json:\"name\"`\n}\n\n\/\/ MessageReference contains reference data sent with crossposted messages\ntype MessageReference struct {\n\tMessageID string `json:\"message_id\"`\n\tChannelID string `json:\"channel_id\"`\n\tGuildID string `json:\"guild_id\"`\n}\n\n\/\/ ContentWithMentionsReplaced will replace all @<id> mentions with the\n\/\/ username of the mention.\nfunc (m *Message) ContentWithMentionsReplaced() (content string) {\n\tcontent = m.Content\n\n\tfor _, user := range m.Mentions {\n\t\tcontent = strings.NewReplacer(\n\t\t\t\"<@\"+user.ID+\">\", \"@\"+user.Username,\n\t\t\t\"<@!\"+user.ID+\">\", \"@\"+user.Username,\n\t\t).Replace(content)\n\t}\n\treturn\n}\n\nvar patternChannels = regexp.MustCompile(\"<#[^>]*>\")\n\n\/\/ ContentWithMoreMentionsReplaced will replace all @<id> mentions with the\n\/\/ username of the mention, but also role IDs and more.\nfunc (m *Message) ContentWithMoreMentionsReplaced(s *Session) (content string, err error) {\n\tcontent = m.Content\n\n\tif !s.StateEnabled {\n\t\tcontent = m.ContentWithMentionsReplaced()\n\t\treturn\n\t}\n\n\tchannel, err := s.State.Channel(m.ChannelID)\n\tif err != nil {\n\t\tcontent = m.ContentWithMentionsReplaced()\n\t\treturn\n\t}\n\n\tfor _, user := range m.Mentions {\n\t\tnick := user.Username\n\n\t\tmember, err := s.State.Member(channel.GuildID, user.ID)\n\t\tif err == nil && member.Nick != \"\" {\n\t\t\tnick = member.Nick\n\t\t}\n\n\t\tcontent = strings.NewReplacer(\n\t\t\t\"<@\"+user.ID+\">\", \"@\"+user.Username,\n\t\t\t\"<@!\"+user.ID+\">\", \"@\"+nick,\n\t\t).Replace(content)\n\t}\n\tfor _, roleID := range m.MentionRoles {\n\t\trole, err := s.State.Role(channel.GuildID, roleID)\n\t\tif err != nil || !role.Mentionable {\n\t\t\tcontinue\n\t\t}\n\n\t\tcontent = strings.Replace(content, \"<@&\"+role.ID+\">\", \"@\"+role.Name, -1)\n\t}\n\n\tcontent = patternChannels.ReplaceAllStringFunc(content, func(mention string) string {\n\t\tchannel, err := s.State.Channel(mention[2 : len(mention)-1])\n\t\tif err != nil || channel.Type == ChannelTypeGuildVoice {\n\t\t\treturn mention\n\t\t}\n\n\t\treturn \"#\" + channel.Name\n\t})\n\treturn\n}\n<commit_msg>fixes lint<commit_after>\/\/ Discordgo - Discord bindings for Go\n\/\/ Available at https:\/\/github.com\/bwmarrin\/discordgo\n\n\/\/ Copyright 2015-2016 Bruce Marriner <bruce@sqls.net>. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file contains code related to the Message struct\n\npackage discordgo\n\nimport (\n\t\"io\"\n\t\"regexp\"\n\t\"strings\"\n)\n\n\/\/ MessageType is the type of Message\ntype MessageType int\n\n\/\/ Block contains the valid known MessageType values\nconst (\n\tMessageTypeDefault MessageType = iota\n\tMessageTypeRecipientAdd\n\tMessageTypeRecipientRemove\n\tMessageTypeCall\n\tMessageTypeChannelNameChange\n\tMessageTypeChannelIconChange\n\tMessageTypeChannelPinnedMessage\n\tMessageTypeGuildMemberJoin\n\tMessageTypeUserPremiumGuildSubscription\n\tMessageTypeUserPremiumGuildSubscriptionTierOne\n\tMessageTypeUserPremiumGuildSubscriptionTierTwo\n\tMessageTypeUserPremiumGuildSubscriptionTierThree\n\tMessageTypeChannelFollowAdd\n)\n\n\/\/ A Message stores all data related to a specific Discord message.\ntype Message struct {\n\t\/\/ The ID of the message.\n\tID string `json:\"id\"`\n\n\t\/\/ The ID of the channel in which the message was sent.\n\tChannelID string `json:\"channel_id\"`\n\n\t\/\/ The ID of the guild in which the message was sent.\n\tGuildID string `json:\"guild_id,omitempty\"`\n\n\t\/\/ The content of the message.\n\tContent string `json:\"content\"`\n\n\t\/\/ The time at which the messsage was sent.\n\t\/\/ CAUTION: this field may be removed in a\n\t\/\/ future API version; it is safer to calculate\n\t\/\/ the creation time via the ID.\n\tTimestamp Timestamp `json:\"timestamp\"`\n\n\t\/\/ The time at which the last edit of the message\n\t\/\/ occurred, if it has been edited.\n\tEditedTimestamp Timestamp `json:\"edited_timestamp\"`\n\n\t\/\/ The roles mentioned in the message.\n\tMentionRoles []string `json:\"mention_roles\"`\n\n\t\/\/ Whether the message is text-to-speech.\n\tTts bool `json:\"tts\"`\n\n\t\/\/ Whether the message mentions everyone.\n\tMentionEveryone bool `json:\"mention_everyone\"`\n\n\t\/\/ The author of the message. This is not guaranteed to be a\n\t\/\/ valid user (webhook-sent messages do not possess a full author).\n\tAuthor *User `json:\"author\"`\n\n\t\/\/ A list of attachments present in the message.\n\tAttachments []*MessageAttachment `json:\"attachments\"`\n\n\t\/\/ A list of embeds present in the message. Multiple\n\t\/\/ embeds can currently only be sent by webhooks.\n\tEmbeds []*MessageEmbed `json:\"embeds\"`\n\n\t\/\/ A list of users mentioned in the message.\n\tMentions []*User `json:\"mentions\"`\n\n\t\/\/ A list of reactions to the message.\n\tReactions []*MessageReactions `json:\"reactions\"`\n\n\t\/\/ Whether the message is pinned or not.\n\tPinned bool `json:\"pinned\"`\n\n\t\/\/ The type of the message.\n\tType MessageType `json:\"type\"`\n\n\t\/\/ The webhook ID of the message, if it was generated by a webhook\n\tWebhookID string `json:\"webhook_id\"`\n\n\t\/\/ Member properties for this message's author,\n\t\/\/ contains only partial information\n\tMember *Member `json:\"member\"`\n\n\t\/\/ Channels specifically mentioned in this message\n\t\/\/ Not all channel mentions in a message will appear in mention_channels.\n\t\/\/ Only textual channels that are visible to everyone in a lurkable guild will ever be included.\n\t\/\/ Only crossposted messages (via Channel Following) currently include mention_channels at all.\n\t\/\/ If no mentions in the message meet these requirements, this field will not be sent.\n\tMentionChannels []*Channel `json:\"mention_channels\"`\n\n\t\/\/ Is sent with Rich Presence-related chat embeds\n\tActivity *MessageActivity `json:\"activity\"`\n\n\t\/\/ Is sent with Rich Presence-related chat embeds\n\tApplication *MessageApplication `json:\"application\"`\n\n\t\/\/ MessageReference contains reference data sent with crossposted messages\n\tMessageReference *MessageReference `json:\"message_reference\"`\n\n\t\/\/ The flags of the message, which describe extra features of a message.\n\t\/\/ This is a combination of bit masks; the presence of a certain permission can\n\t\/\/ be checked by performing a bitwise AND between this int and the flag.\n\tFlags int `json:\"flags\"`\n}\n\n\/\/ File stores info about files you e.g. send in messages.\ntype File struct {\n\tName string\n\tContentType string\n\tReader io.Reader\n}\n\n\/\/ MessageSend stores all parameters you can send with ChannelMessageSendComplex.\ntype MessageSend struct {\n\tContent string `json:\"content,omitempty\"`\n\tEmbed *MessageEmbed `json:\"embed,omitempty\"`\n\tTts bool `json:\"tts\"`\n\tFiles []*File `json:\"-\"`\n\n\t\/\/ TODO: Remove this when compatibility is not required.\n\tFile *File `json:\"-\"`\n}\n\n\/\/ MessageEdit is used to chain parameters via ChannelMessageEditComplex, which\n\/\/ is also where you should get the instance from.\ntype MessageEdit struct {\n\tContent *string `json:\"content,omitempty\"`\n\tEmbed *MessageEmbed `json:\"embed,omitempty\"`\n\n\tID string\n\tChannel string\n}\n\n\/\/ NewMessageEdit returns a MessageEdit struct, initialized\n\/\/ with the Channel and ID.\nfunc NewMessageEdit(channelID string, messageID string) *MessageEdit {\n\treturn &MessageEdit{\n\t\tChannel: channelID,\n\t\tID: messageID,\n\t}\n}\n\n\/\/ SetContent is the same as setting the variable Content,\n\/\/ except it doesn't take a pointer.\nfunc (m *MessageEdit) SetContent(str string) *MessageEdit {\n\tm.Content = &str\n\treturn m\n}\n\n\/\/ SetEmbed is a convenience function for setting the embed,\n\/\/ so you can chain commands.\nfunc (m *MessageEdit) SetEmbed(embed *MessageEmbed) *MessageEdit {\n\tm.Embed = embed\n\treturn m\n}\n\n\/\/ A MessageAttachment stores data for message attachments.\ntype MessageAttachment struct {\n\tID string `json:\"id\"`\n\tURL string `json:\"url\"`\n\tProxyURL string `json:\"proxy_url\"`\n\tFilename string `json:\"filename\"`\n\tWidth int `json:\"width\"`\n\tHeight int `json:\"height\"`\n\tSize int `json:\"size\"`\n}\n\n\/\/ MessageEmbedFooter is a part of a MessageEmbed struct.\ntype MessageEmbedFooter struct {\n\tText string `json:\"text,omitempty\"`\n\tIconURL string `json:\"icon_url,omitempty\"`\n\tProxyIconURL string `json:\"proxy_icon_url,omitempty\"`\n}\n\n\/\/ MessageEmbedImage is a part of a MessageEmbed struct.\ntype MessageEmbedImage struct {\n\tURL string `json:\"url,omitempty\"`\n\tProxyURL string `json:\"proxy_url,omitempty\"`\n\tWidth int `json:\"width,omitempty\"`\n\tHeight int `json:\"height,omitempty\"`\n}\n\n\/\/ MessageEmbedThumbnail is a part of a MessageEmbed struct.\ntype MessageEmbedThumbnail struct {\n\tURL string `json:\"url,omitempty\"`\n\tProxyURL string `json:\"proxy_url,omitempty\"`\n\tWidth int `json:\"width,omitempty\"`\n\tHeight int `json:\"height,omitempty\"`\n}\n\n\/\/ MessageEmbedVideo is a part of a MessageEmbed struct.\ntype MessageEmbedVideo struct {\n\tURL string `json:\"url,omitempty\"`\n\tProxyURL string `json:\"proxy_url,omitempty\"`\n\tWidth int `json:\"width,omitempty\"`\n\tHeight int `json:\"height,omitempty\"`\n}\n\n\/\/ MessageEmbedProvider is a part of a MessageEmbed struct.\ntype MessageEmbedProvider struct {\n\tURL string `json:\"url,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n}\n\n\/\/ MessageEmbedAuthor is a part of a MessageEmbed struct.\ntype MessageEmbedAuthor struct {\n\tURL string `json:\"url,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tIconURL string `json:\"icon_url,omitempty\"`\n\tProxyIconURL string `json:\"proxy_icon_url,omitempty\"`\n}\n\n\/\/ MessageEmbedField is a part of a MessageEmbed struct.\ntype MessageEmbedField struct {\n\tName string `json:\"name,omitempty\"`\n\tValue string `json:\"value,omitempty\"`\n\tInline bool `json:\"inline,omitempty\"`\n}\n\n\/\/ An MessageEmbed stores data for message embeds.\ntype MessageEmbed struct {\n\tURL string `json:\"url,omitempty\"`\n\tType string `json:\"type,omitempty\"`\n\tTitle string `json:\"title,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tTimestamp string `json:\"timestamp,omitempty\"`\n\tColor int `json:\"color,omitempty\"`\n\tFooter *MessageEmbedFooter `json:\"footer,omitempty\"`\n\tImage *MessageEmbedImage `json:\"image,omitempty\"`\n\tThumbnail *MessageEmbedThumbnail `json:\"thumbnail,omitempty\"`\n\tVideo *MessageEmbedVideo `json:\"video,omitempty\"`\n\tProvider *MessageEmbedProvider `json:\"provider,omitempty\"`\n\tAuthor *MessageEmbedAuthor `json:\"author,omitempty\"`\n\tFields []*MessageEmbedField `json:\"fields,omitempty\"`\n}\n\n\/\/ MessageReactions holds a reactions object for a message.\ntype MessageReactions struct {\n\tCount int `json:\"count\"`\n\tMe bool `json:\"me\"`\n\tEmoji *Emoji `json:\"emoji\"`\n}\n\n\/\/ MessageActivity is sent with Rich Presence-related chat embeds\ntype MessageActivity struct {\n\tType MessageActivityType `json:\"type\"`\n\tPartyID string `json:\"party_id\"`\n}\n\n\/\/ MessageActivityType is the type of message activity\ntype MessageActivityType int\n\n\/\/ Constants for the different types of Message Activity\nconst (\n\tMessageActivityTypeJoin = iota + 1\n\tMessageActivityTypeSpectate\n\tMessageActivityTypeListen\n\tMessageActivityTypeJoinRequest\n)\n\n\/\/ MessageFlag describes an extra feature of the message\ntype MessageFlag int\n\n\/\/ Constants for the different bit offsets of Message Flags\nconst (\n\t\/\/ This message has been published to subscribed channels (via Channel Following)\n\tMessageFlagCrossposted = 1 << iota\n\t\/\/ This message originated from a message in another channel (via Channel Following)\n\tMessageFlagIsCrosspost\n\t\/\/ Do not include any embeds when serializing this message\n\tMessageFlagSuppressEmbeds\n)\n\n\/\/ MessageApplication is sent with Rich Presence-related chat embeds\ntype MessageApplication struct {\n\tID string `json:\"id\"`\n\tCoverImage string `json:\"cover_image\"`\n\tDescription string `json:\"description\"`\n\tIcon string `json:\"icon\"`\n\tName string `json:\"name\"`\n}\n\n\/\/ MessageReference contains reference data sent with crossposted messages\ntype MessageReference struct {\n\tMessageID string `json:\"message_id\"`\n\tChannelID string `json:\"channel_id\"`\n\tGuildID string `json:\"guild_id\"`\n}\n\n\/\/ ContentWithMentionsReplaced will replace all @<id> mentions with the\n\/\/ username of the mention.\nfunc (m *Message) ContentWithMentionsReplaced() (content string) {\n\tcontent = m.Content\n\n\tfor _, user := range m.Mentions {\n\t\tcontent = strings.NewReplacer(\n\t\t\t\"<@\"+user.ID+\">\", \"@\"+user.Username,\n\t\t\t\"<@!\"+user.ID+\">\", \"@\"+user.Username,\n\t\t).Replace(content)\n\t}\n\treturn\n}\n\nvar patternChannels = regexp.MustCompile(\"<#[^>]*>\")\n\n\/\/ ContentWithMoreMentionsReplaced will replace all @<id> mentions with the\n\/\/ username of the mention, but also role IDs and more.\nfunc (m *Message) ContentWithMoreMentionsReplaced(s *Session) (content string, err error) {\n\tcontent = m.Content\n\n\tif !s.StateEnabled {\n\t\tcontent = m.ContentWithMentionsReplaced()\n\t\treturn\n\t}\n\n\tchannel, err := s.State.Channel(m.ChannelID)\n\tif err != nil {\n\t\tcontent = m.ContentWithMentionsReplaced()\n\t\treturn\n\t}\n\n\tfor _, user := range m.Mentions {\n\t\tnick := user.Username\n\n\t\tmember, err := s.State.Member(channel.GuildID, user.ID)\n\t\tif err == nil && member.Nick != \"\" {\n\t\t\tnick = member.Nick\n\t\t}\n\n\t\tcontent = strings.NewReplacer(\n\t\t\t\"<@\"+user.ID+\">\", \"@\"+user.Username,\n\t\t\t\"<@!\"+user.ID+\">\", \"@\"+nick,\n\t\t).Replace(content)\n\t}\n\tfor _, roleID := range m.MentionRoles {\n\t\trole, err := s.State.Role(channel.GuildID, roleID)\n\t\tif err != nil || !role.Mentionable {\n\t\t\tcontinue\n\t\t}\n\n\t\tcontent = strings.Replace(content, \"<@&\"+role.ID+\">\", \"@\"+role.Name, -1)\n\t}\n\n\tcontent = patternChannels.ReplaceAllStringFunc(content, func(mention string) string {\n\t\tchannel, err := s.State.Channel(mention[2 : len(mention)-1])\n\t\tif err != nil || channel.Type == ChannelTypeGuildVoice {\n\t\t\treturn mention\n\t\t}\n\n\t\treturn \"#\" + channel.Name\n\t})\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package modules\n\nimport (\n\t\"fmt\"\n\n\t\"bitbucket.org\/cihangirsavas\/gene\/generators\/folders\"\n\t\"bitbucket.org\/cihangirsavas\/gene\/generators\/handlers\"\n\t\"bitbucket.org\/cihangirsavas\/gene\/schema\"\n\n\t\"bitbucket.org\/cihangirsavas\/gene\/stringext\"\n)\n\ntype Module struct {\n\tschema *schema.Schema\n}\n\nfunc NewModule(s *schema.Schema) *Module {\n\treturn &Module{schema: s}\n}\n\nfunc (m *Module) Create() error {\n\trootPath := \".\/\"\n\n\t\/\/ first ensure that we have the correct folder structure for our system\n\tif err := folders.EnsureFolders(\n\t\trootPath, \/\/ root folder\n\t\tfolders.FolderStucture,\n\t); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ create the module folder structure\n\tif err := folders.EnsureFolders(\n\t\trootPath, \/\/ root folder\n\t\tcreateModuleStructure(stringext.ToLowerFirst(\n\t\t\tm.schema.Title,\n\t\t)),\n\t); err != nil {\n\t\treturn err\n\t}\n\n\tif err := handlers.Generate(rootPath, m.schema.Title); err != nil {\n\t\treturn err\n\t}\n\n\tif err := m.GenerateMainFile(rootPath); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nvar moduleFolderStucture = []string{\n\t\"gene\/modules\/%[1]s\",\n\t\"gene\/modules\/%[1]s\/api\",\n\t\"gene\/modules\/%[1]s\/%[1]s\",\n\t\"gene\/modules\/%[1]s\/cmd\",\n\t\"gene\/modules\/%[1]s\/cmd\/%[1]s\",\n\t\"gene\/modules\/%[1]s\/tests\",\n\t\"gene\/modules\/%[1]s\/errors\",\n\t\"gene\/modules\/%[1]s\/handlers\",\n}\n\nfunc createModuleStructure(name string) []string {\n\tmodified := make([]string, len(moduleFolderStucture))\n\tfor i, str := range moduleFolderStucture {\n\t\tmodified[i] = fmt.Sprintf(str, name)\n\t}\n\n\treturn modified\n}\n<commit_msg>Module: added model creation into module creator<commit_after>package modules\n\nimport (\n\t\"fmt\"\n\n\t\"bitbucket.org\/cihangirsavas\/gene\/generators\/folders\"\n\t\"bitbucket.org\/cihangirsavas\/gene\/generators\/handlers\"\n\t\"bitbucket.org\/cihangirsavas\/gene\/generators\/models\"\n\t\"bitbucket.org\/cihangirsavas\/gene\/schema\"\n\n\t\"bitbucket.org\/cihangirsavas\/gene\/stringext\"\n)\n\ntype Module struct {\n\tschema *schema.Schema\n}\n\nfunc NewModule(s *schema.Schema) *Module {\n\treturn &Module{schema: s}\n}\n\nfunc (m *Module) Create() error {\n\trootPath := \".\/\"\n\n\t\/\/ first ensure that we have the correct folder structure for our system\n\tif err := folders.EnsureFolders(\n\t\trootPath, \/\/ root folder\n\t\tfolders.FolderStucture,\n\t); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ create the module folder structure\n\tif err := folders.EnsureFolders(\n\t\trootPath, \/\/ root folder\n\t\tcreateModuleStructure(stringext.ToLowerFirst(\n\t\t\tm.schema.Title,\n\t\t)),\n\t); err != nil {\n\t\treturn err\n\t}\n\n\tif err := models.Generate(rootPath, m.schema); err != nil {\n\t\treturn err\n\t}\n\n\tif err := handlers.Generate(rootPath, m.schema.Title); err != nil {\n\t\treturn err\n\t}\n\n\tif err := m.GenerateMainFile(rootPath); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nvar moduleFolderStucture = []string{\n\t\"gene\/modules\/%[1]s\",\n\t\"gene\/modules\/%[1]s\/api\",\n\t\"gene\/modules\/%[1]s\/%[1]s\",\n\t\"gene\/modules\/%[1]s\/cmd\",\n\t\"gene\/modules\/%[1]s\/cmd\/%[1]s\",\n\t\"gene\/modules\/%[1]s\/tests\",\n\t\"gene\/modules\/%[1]s\/errors\",\n\t\"gene\/modules\/%[1]s\/handlers\",\n}\n\nfunc createModuleStructure(name string) []string {\n\tmodified := make([]string, len(moduleFolderStucture))\n\tfor i, str := range moduleFolderStucture {\n\t\tmodified[i] = fmt.Sprintf(str, name)\n\t}\n\n\treturn modified\n}\n<|endoftext|>"} {"text":"<commit_before>package pcp\n\nimport (\n\t\"errors\"\n\t\"hash\/fnv\"\n)\n\n\/\/ MetricType is an enumerated type representing all valid types for a metric\ntype MetricType int32\n\n\/\/ Possible values for a MetricType\nconst (\n\tNoSupportType MetricType = iota\n\tInt32Type MetricType = iota\n\tUint32Type MetricType = iota\n\tInt64Type MetricType = iota\n\tUint64Type MetricType = iota\n\tFloatType MetricType = iota\n\tDoubleType MetricType = iota\n\tStringType MetricType = iota\n\tAggregateType MetricType = iota\n\tAggregateStaticType MetricType = iota\n\tEventType MetricType = iota\n\tHighresEventType MetricType = iota\n\tUnknownType MetricType = iota\n)\n\nfunc (mt MetricType) String() string {\n\tswitch mt {\n\tcase NoSupportType:\n\t\treturn \"Type: No Support\"\n\tcase Int32Type:\n\t\treturn \"Type: Int32\"\n\tcase Int64Type:\n\t\treturn \"Type: Int64\"\n\tcase Uint32Type:\n\t\treturn \"Type: Uint32\"\n\tcase Uint64Type:\n\t\treturn \"Type: Uint64\"\n\tcase FloatType:\n\t\treturn \"Type: Float\"\n\tcase DoubleType:\n\t\treturn \"Type: Double\"\n\tcase StringType:\n\t\treturn \"Type: String\"\n\tcase AggregateType:\n\t\treturn \"Type: Aggregate\"\n\tcase AggregateStaticType:\n\t\treturn \"Type: Aggregate Static\"\n\tcase EventType:\n\t\treturn \"Type: Event\"\n\tcase HighresEventType:\n\t\treturn \"Type: Highres Event\"\n\tcase UnknownType:\n\t\treturn \"Type: Unknown\"\n\tdefault:\n\t\treturn \"Type: Invalid\"\n\t}\n}\n\n\/\/ MetricUnit is an enumerated type representing all possible values for a valid PCP unit\ntype MetricUnit int32\n\n\/\/ SpaceUnit is an enumerated type representing all units for space\ntype SpaceUnit MetricUnit\n\n\/\/ Possible values for SpaceUnit\nconst (\n\tByteUnit SpaceUnit = iota\n\tKilobyteUnit SpaceUnit = iota\n\tMegabyteUnit SpaceUnit = iota\n\tGigabyteUnit SpaceUnit = iota\n\tTerabyteUnit SpaceUnit = iota\n\tPetabyteUnit SpaceUnit = iota\n\tExabyteUnit SpaceUnit = iota\n)\n\nfunc (su SpaceUnit) String() string {\n\tswitch su {\n\tcase ByteUnit:\n\t\treturn \"Unit: Byte\"\n\tcase KilobyteUnit:\n\t\treturn \"Unit: Kilobyte\"\n\tcase MegabyteUnit:\n\t\treturn \"Unit: Megabyte\"\n\tcase GigabyteUnit:\n\t\treturn \"Unit: Gigabyte\"\n\tcase TerabyteUnit:\n\t\treturn \"Unit: Terabyte\"\n\tcase PetabyteUnit:\n\t\treturn \"Unit: Petabyte\"\n\tcase ExabyteUnit:\n\t\treturn \"Unit: Exabyte\"\n\tdefault:\n\t\treturn \"Unit: Invalid SpaceUnit\"\n\t}\n}\n\n\/\/ TimeUnit is an enumerated type representing all possible units for representing time\ntype TimeUnit MetricUnit\n\n\/\/ Possible Values for TimeUnit\nconst (\n\tNanosecondUnit TimeUnit = iota\n\tMicrosecondUnit TimeUnit = iota\n\tMillisecondUnit TimeUnit = iota\n\tSecondUnit TimeUnit = iota\n\tMinuteUnit TimeUnit = iota\n\tHourUnit TimeUnit = iota\n)\n\nfunc (tu TimeUnit) String() string {\n\tswitch tu {\n\tcase NanosecondUnit:\n\t\treturn \"Unit: Nanosecond\"\n\tcase MicrosecondUnit:\n\t\treturn \"Unit: Microsecond\"\n\tcase MillisecondUnit:\n\t\treturn \"Unit: Millisecond\"\n\tcase SecondUnit:\n\t\treturn \"Unit: Second\"\n\tcase MinuteUnit:\n\t\treturn \"Unit: Minute\"\n\tcase HourUnit:\n\t\treturn \"Unit: Hour\"\n\tdefault:\n\t\treturn \"Unit: Invalid TimeUnit\"\n\t}\n}\n\n\/\/ CountUnit is a type representing a counted quantity\ntype CountUnit MetricUnit\n\n\/\/ OneUnit represents the only CountUnit\nconst OneUnit CountUnit = iota\n\nfunc (cu CountUnit) String() string {\n\tswitch cu {\n\tcase OneUnit:\n\t\treturn \"Unit: One\"\n\tdefault:\n\t\treturn \"Unit: Invalid CounterUnit\"\n\t}\n}\n\n\/\/ MetricSemantics represents an enumerated type representing the possible\n\/\/ values for the semantics of a metric\ntype MetricSemantics int32\n\n\/\/ Possible values for MetricSemantics\nconst (\n\tNoSemantics MetricSemantics = iota\n\tCounterSemantics MetricSemantics = iota\n\tInstantSemantics MetricSemantics = iota\n\tDiscreteSemantics MetricSemantics = iota\n)\n\nfunc (ms MetricSemantics) String() string {\n\tswitch ms {\n\tcase NoSemantics:\n\t\treturn \"Semantics: None\"\n\tcase CounterSemantics:\n\t\treturn \"Semantics: Counter\"\n\tcase InstantSemantics:\n\t\treturn \"Semantics: Instant\"\n\tcase DiscreteSemantics:\n\t\treturn \"Semantics: Discrete\"\n\tdefault:\n\t\treturn \"Semantics: Invalid\"\n\t}\n}\n\n\/\/ Metric defines the general interface a type needs to implement to qualify\n\/\/ as a valid PCP metric\ntype Metric interface {\n\tVal() interface{} \/\/ gets the value of the metric\n\tSet(interface{}) error \/\/ sets the value of the metric to a value, optionally returns an error on failure\n\tType() MetricType \/\/ gets the type of a metric\n\tUnit() MetricUnit \/\/ gets the unit of a metric\n\tSemantics() MetricSemantics \/\/ gets the semantics for a metric\n\tDescription() string \/\/ gets the description of a metric\n}\n\n\/\/ generate a unique uint32 hash for a string\n\/\/ NOTE: make sure this is as fast as possible\nfunc getHash(s string) uint32 {\n\th := fnv.New32a()\n\th.Write([]byte(s))\n\treturn h.Sum32()\n}\n\n\/\/ Instance wraps a PCP compatible Instance\ntype Instance struct {\n\tname string\n\tid uint32\n\tindom *InstanceDomain\n}\n\n\/\/ NewInstance generates a new Instance type based on the passed parameters\n\/\/ the id is passed explicitly as it is assumed that this will be constructed\n\/\/ after initializing the InstanceDomain\n\/\/ this is not a part of the public API as this is not supposed to be used directly,\n\/\/ but instead added using the AddInstance method of InstanceDomain\nfunc newInstance(id uint32, name string, indom *InstanceDomain) *Instance {\n\treturn &Instance{\n\t\tname, id, indom,\n\t}\n}\n\n\/\/ InstanceDomain wraps a PCP compatible instance domain\ntype InstanceDomain struct {\n\tid uint32\n\tname string\n\tinstanceCache map[uint32]*Instance \/\/ the instances for this InstanceDomain stored as a map\n\tshortHelpText, longHelpText string\n}\n\n\/\/ NOTE: this declaration alone doesn't make this usable\n\/\/ it needs to be 'made' at the beginning of monitoring\nvar instanceDomainCache map[uint32]*InstanceDomain\n\n\/\/ NOTE: this is different from parfait's idea of generating ids for InstanceDomains\n\/\/ We simply generate a unique 32 bit hash for an instance domain name, and if it has not\n\/\/ already been created, we create it, otherwise we return the already created version\nfunc NewInstanceDomain(name string) *InstanceDomain {\n\th := getHash(name)\n\n\tv, present := instanceDomainCache[h]\n\tif present {\n\t\treturn v\n\t}\n\n\tinstanceDomainCache[h] = &InstanceDomain{\n\t\tid: h,\n\t\tname: name,\n\t}\n\n\treturn instanceDomainCache[h]\n}\n\n\/\/ AddInstance adds a new instance to the current InstanceDomain\nfunc (indom *InstanceDomain) AddInstance(name string) error {\n\th := getHash(name)\n\n\t_, present := indom.instanceCache[h]\n\tif present {\n\t\treturn errors.New(\"Instance with same name already created for the InstanceDomain\")\n\t}\n\n\tins := newInstance(h, name, indom)\n\tindom.instanceCache[h] = ins\n\n\treturn nil\n}\n<commit_msg>InstanceDomain: fix construction<commit_after>package pcp\n\nimport (\n\t\"errors\"\n\t\"hash\/fnv\"\n)\n\n\/\/ MetricType is an enumerated type representing all valid types for a metric\ntype MetricType int32\n\n\/\/ Possible values for a MetricType\nconst (\n\tNoSupportType MetricType = iota\n\tInt32Type MetricType = iota\n\tUint32Type MetricType = iota\n\tInt64Type MetricType = iota\n\tUint64Type MetricType = iota\n\tFloatType MetricType = iota\n\tDoubleType MetricType = iota\n\tStringType MetricType = iota\n\tAggregateType MetricType = iota\n\tAggregateStaticType MetricType = iota\n\tEventType MetricType = iota\n\tHighresEventType MetricType = iota\n\tUnknownType MetricType = iota\n)\n\nfunc (mt MetricType) String() string {\n\tswitch mt {\n\tcase NoSupportType:\n\t\treturn \"Type: No Support\"\n\tcase Int32Type:\n\t\treturn \"Type: Int32\"\n\tcase Int64Type:\n\t\treturn \"Type: Int64\"\n\tcase Uint32Type:\n\t\treturn \"Type: Uint32\"\n\tcase Uint64Type:\n\t\treturn \"Type: Uint64\"\n\tcase FloatType:\n\t\treturn \"Type: Float\"\n\tcase DoubleType:\n\t\treturn \"Type: Double\"\n\tcase StringType:\n\t\treturn \"Type: String\"\n\tcase AggregateType:\n\t\treturn \"Type: Aggregate\"\n\tcase AggregateStaticType:\n\t\treturn \"Type: Aggregate Static\"\n\tcase EventType:\n\t\treturn \"Type: Event\"\n\tcase HighresEventType:\n\t\treturn \"Type: Highres Event\"\n\tcase UnknownType:\n\t\treturn \"Type: Unknown\"\n\tdefault:\n\t\treturn \"Type: Invalid\"\n\t}\n}\n\n\/\/ MetricUnit is an enumerated type representing all possible values for a valid PCP unit\ntype MetricUnit int32\n\n\/\/ SpaceUnit is an enumerated type representing all units for space\ntype SpaceUnit MetricUnit\n\n\/\/ Possible values for SpaceUnit\nconst (\n\tByteUnit SpaceUnit = iota\n\tKilobyteUnit SpaceUnit = iota\n\tMegabyteUnit SpaceUnit = iota\n\tGigabyteUnit SpaceUnit = iota\n\tTerabyteUnit SpaceUnit = iota\n\tPetabyteUnit SpaceUnit = iota\n\tExabyteUnit SpaceUnit = iota\n)\n\nfunc (su SpaceUnit) String() string {\n\tswitch su {\n\tcase ByteUnit:\n\t\treturn \"Unit: Byte\"\n\tcase KilobyteUnit:\n\t\treturn \"Unit: Kilobyte\"\n\tcase MegabyteUnit:\n\t\treturn \"Unit: Megabyte\"\n\tcase GigabyteUnit:\n\t\treturn \"Unit: Gigabyte\"\n\tcase TerabyteUnit:\n\t\treturn \"Unit: Terabyte\"\n\tcase PetabyteUnit:\n\t\treturn \"Unit: Petabyte\"\n\tcase ExabyteUnit:\n\t\treturn \"Unit: Exabyte\"\n\tdefault:\n\t\treturn \"Unit: Invalid SpaceUnit\"\n\t}\n}\n\n\/\/ TimeUnit is an enumerated type representing all possible units for representing time\ntype TimeUnit MetricUnit\n\n\/\/ Possible Values for TimeUnit\nconst (\n\tNanosecondUnit TimeUnit = iota\n\tMicrosecondUnit TimeUnit = iota\n\tMillisecondUnit TimeUnit = iota\n\tSecondUnit TimeUnit = iota\n\tMinuteUnit TimeUnit = iota\n\tHourUnit TimeUnit = iota\n)\n\nfunc (tu TimeUnit) String() string {\n\tswitch tu {\n\tcase NanosecondUnit:\n\t\treturn \"Unit: Nanosecond\"\n\tcase MicrosecondUnit:\n\t\treturn \"Unit: Microsecond\"\n\tcase MillisecondUnit:\n\t\treturn \"Unit: Millisecond\"\n\tcase SecondUnit:\n\t\treturn \"Unit: Second\"\n\tcase MinuteUnit:\n\t\treturn \"Unit: Minute\"\n\tcase HourUnit:\n\t\treturn \"Unit: Hour\"\n\tdefault:\n\t\treturn \"Unit: Invalid TimeUnit\"\n\t}\n}\n\n\/\/ CountUnit is a type representing a counted quantity\ntype CountUnit MetricUnit\n\n\/\/ OneUnit represents the only CountUnit\nconst OneUnit CountUnit = iota\n\nfunc (cu CountUnit) String() string {\n\tswitch cu {\n\tcase OneUnit:\n\t\treturn \"Unit: One\"\n\tdefault:\n\t\treturn \"Unit: Invalid CounterUnit\"\n\t}\n}\n\n\/\/ MetricSemantics represents an enumerated type representing the possible\n\/\/ values for the semantics of a metric\ntype MetricSemantics int32\n\n\/\/ Possible values for MetricSemantics\nconst (\n\tNoSemantics MetricSemantics = iota\n\tCounterSemantics MetricSemantics = iota\n\tInstantSemantics MetricSemantics = iota\n\tDiscreteSemantics MetricSemantics = iota\n)\n\nfunc (ms MetricSemantics) String() string {\n\tswitch ms {\n\tcase NoSemantics:\n\t\treturn \"Semantics: None\"\n\tcase CounterSemantics:\n\t\treturn \"Semantics: Counter\"\n\tcase InstantSemantics:\n\t\treturn \"Semantics: Instant\"\n\tcase DiscreteSemantics:\n\t\treturn \"Semantics: Discrete\"\n\tdefault:\n\t\treturn \"Semantics: Invalid\"\n\t}\n}\n\n\/\/ Metric defines the general interface a type needs to implement to qualify\n\/\/ as a valid PCP metric\ntype Metric interface {\n\tVal() interface{} \/\/ gets the value of the metric\n\tSet(interface{}) error \/\/ sets the value of the metric to a value, optionally returns an error on failure\n\tType() MetricType \/\/ gets the type of a metric\n\tUnit() MetricUnit \/\/ gets the unit of a metric\n\tSemantics() MetricSemantics \/\/ gets the semantics for a metric\n\tDescription() string \/\/ gets the description of a metric\n}\n\n\/\/ generate a unique uint32 hash for a string\n\/\/ NOTE: make sure this is as fast as possible\nfunc getHash(s string) uint32 {\n\th := fnv.New32a()\n\th.Write([]byte(s))\n\treturn h.Sum32()\n}\n\n\/\/ Instance wraps a PCP compatible Instance\ntype Instance struct {\n\tname string\n\tid uint32\n\tindom *InstanceDomain\n}\n\n\/\/ NewInstance generates a new Instance type based on the passed parameters\n\/\/ the id is passed explicitly as it is assumed that this will be constructed\n\/\/ after initializing the InstanceDomain\n\/\/ this is not a part of the public API as this is not supposed to be used directly,\n\/\/ but instead added using the AddInstance method of InstanceDomain\nfunc newInstance(id uint32, name string, indom *InstanceDomain) *Instance {\n\treturn &Instance{\n\t\tname, id, indom,\n\t}\n}\n\n\/\/ InstanceDomain wraps a PCP compatible instance domain\ntype InstanceDomain struct {\n\tid uint32\n\tname string\n\tinstanceCache map[uint32]*Instance \/\/ the instances for this InstanceDomain stored as a map\n\tshortHelpText, longHelpText string\n}\n\n\/\/ NOTE: this declaration alone doesn't make this usable\n\/\/ it needs to be 'made' at the beginning of monitoring\nvar instanceDomainCache map[uint32]*InstanceDomain\n\n\/\/ NewInstanceDomain creates a new instance domain or returns an already created one for the passed name\n\/\/ NOTE: this is different from parfait's idea of generating ids for InstanceDomains\n\/\/ We simply generate a unique 32 bit hash for an instance domain name, and if it has not\n\/\/ already been created, we create it, otherwise we return the already created version\nfunc NewInstanceDomain(name, shortDescription, longDescription string) *InstanceDomain {\n\th := getHash(name)\n\n\tv, present := instanceDomainCache[h]\n\tif present {\n\t\treturn v\n\t}\n\n\tcache := make(map[uint32]*Instance)\n\tinstanceDomainCache[h] = &InstanceDomain{\n\t\tid: h,\n\t\tname: name,\n\t\tinstanceCache: cache,\n\t\tshortHelpText: shortDescription,\n\t\tlongHelpText: longDescription,\n\t}\n\n\treturn instanceDomainCache[h]\n}\n\n\/\/ AddInstance adds a new instance to the current InstanceDomain\nfunc (indom *InstanceDomain) AddInstance(name string) error {\n\th := getHash(name)\n\n\t_, present := indom.instanceCache[h]\n\tif present {\n\t\treturn errors.New(\"Instance with same name already created for the InstanceDomain\")\n\t}\n\n\tins := newInstance(h, name, indom)\n\tindom.instanceCache[h] = ins\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package gitlab provides authentication strategies using Gitlab.\npackage gitlab\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strconv\"\n\n\t\"github.com\/coreos\/dex\/connector\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"golang.org\/x\/oauth2\"\n)\n\nconst (\n\tscopeEmail = \"user:email\"\n\tscopeOrgs = \"read:org\"\n)\n\n\/\/ Config holds configuration options for gilab logins.\ntype Config struct {\n\tBaseURL string `json:\"baseURL\"`\n\tClientID string `json:\"clientID\"`\n\tClientSecret string `json:\"clientSecret\"`\n\tRedirectURI string `json:\"redirectURI\"`\n}\n\ntype gitlabUser struct {\n\tID int\n\tName string\n\tUsername string\n\tState string\n\tEmail string\n\tIsAdmin bool\n}\n\ntype gitlabGroup struct {\n\tID int\n\tName string\n\tPath string\n}\n\n\/\/ Open returns a strategy for logging in through GitLab.\nfunc (c *Config) Open(logger logrus.FieldLogger) (connector.Connector, error) {\n\tif c.BaseURL == \"\" {\n\t\tc.BaseURL = \"https:\/\/www.gitlab.com\"\n\t}\n\treturn &gitlabConnector{\n\t\tbaseURL: c.BaseURL,\n\t\tredirectURI: c.RedirectURI,\n\t\tclientID: c.ClientID,\n\t\tclientSecret: c.ClientSecret,\n\t\tlogger: logger,\n\t}, nil\n}\n\ntype connectorData struct {\n\t\/\/ GitLab's OAuth2 tokens never expire. We don't need a refresh token.\n\tAccessToken string `json:\"accessToken\"`\n}\n\nvar (\n\t_ connector.CallbackConnector = (*gitlabConnector)(nil)\n\t_ connector.RefreshConnector = (*gitlabConnector)(nil)\n)\n\ntype gitlabConnector struct {\n\tbaseURL string\n\tredirectURI string\n\torg string\n\tclientID string\n\tclientSecret string\n\tlogger logrus.FieldLogger\n}\n\nfunc (c *gitlabConnector) oauth2Config(scopes connector.Scopes) *oauth2.Config {\n\tgitlabScopes := []string{\"api\"}\n\tgitlabEndpoint := oauth2.Endpoint{AuthURL: c.baseURL + \"\/oauth\/authorize\", TokenURL: c.baseURL + \"\/oauth\/token\"}\n\treturn &oauth2.Config{\n\t\tClientID: c.clientID,\n\t\tClientSecret: c.clientSecret,\n\t\tEndpoint: gitlabEndpoint,\n\t\tScopes: gitlabScopes,\n\t\tRedirectURL: c.redirectURI,\n\t}\n}\n\nfunc (c *gitlabConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, error) {\n\tif c.redirectURI != callbackURL {\n\t\treturn \"\", fmt.Errorf(\"expected callback URL %q did not match the URL in the config %q\", c.redirectURI, callbackURL)\n\t}\n\treturn c.oauth2Config(scopes).AuthCodeURL(state), nil\n}\n\ntype oauth2Error struct {\n\terror string\n\terrorDescription string\n}\n\nfunc (e *oauth2Error) Error() string {\n\tif e.errorDescription == \"\" {\n\t\treturn e.error\n\t}\n\treturn e.error + \": \" + e.errorDescription\n}\n\nfunc (c *gitlabConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) {\n\tq := r.URL.Query()\n\tif errType := q.Get(\"error\"); errType != \"\" {\n\t\treturn identity, &oauth2Error{errType, q.Get(\"error_description\")}\n\t}\n\n\toauth2Config := c.oauth2Config(s)\n\tctx := r.Context()\n\n\ttoken, err := oauth2Config.Exchange(ctx, q.Get(\"code\"))\n\tif err != nil {\n\t\treturn identity, fmt.Errorf(\"gitlab: failed to get token: %v\", err)\n\t}\n\n\tclient := oauth2Config.Client(ctx, token)\n\n\tuser, err := c.user(ctx, client)\n\tif err != nil {\n\t\treturn identity, fmt.Errorf(\"gitlab: get user: %v\", err)\n\t}\n\n\tusername := user.Name\n\tif username == \"\" {\n\t\tusername = user.Email\n\t}\n\tidentity = connector.Identity{\n\t\tUserID: strconv.Itoa(user.ID),\n\t\tUsername: username,\n\t\tEmail: user.Email,\n\t\tEmailVerified: true,\n\t}\n\n\tif s.Groups {\n\t\tgroups, err := c.groups(ctx, client)\n\t\tif err != nil {\n\t\t\treturn identity, fmt.Errorf(\"gitlab: get groups: %v\", err)\n\t\t}\n\t\tidentity.Groups = groups\n\t}\n\n\tif s.OfflineAccess {\n\t\tdata := connectorData{AccessToken: token.AccessToken}\n\t\tconnData, err := json.Marshal(data)\n\t\tif err != nil {\n\t\t\treturn identity, fmt.Errorf(\"marshal connector data: %v\", err)\n\t\t}\n\t\tidentity.ConnectorData = connData\n\t}\n\n\treturn identity, nil\n}\n\nfunc (c *gitlabConnector) Refresh(ctx context.Context, s connector.Scopes, ident connector.Identity) (connector.Identity, error) {\n\tif len(ident.ConnectorData) == 0 {\n\t\treturn ident, errors.New(\"no upstream access token found\")\n\t}\n\n\tvar data connectorData\n\tif err := json.Unmarshal(ident.ConnectorData, &data); err != nil {\n\t\treturn ident, fmt.Errorf(\"gitlab: unmarshal access token: %v\", err)\n\t}\n\n\tclient := c.oauth2Config(s).Client(ctx, &oauth2.Token{AccessToken: data.AccessToken})\n\tuser, err := c.user(ctx, client)\n\tif err != nil {\n\t\treturn ident, fmt.Errorf(\"gitlab: get user: %v\", err)\n\t}\n\n\tusername := user.Name\n\tif username == \"\" {\n\t\tusername = user.Email\n\t}\n\tident.Username = username\n\tident.Email = user.Email\n\n\tif s.Groups {\n\t\tgroups, err := c.groups(ctx, client)\n\t\tif err != nil {\n\t\t\treturn ident, fmt.Errorf(\"gitlab: get groups: %v\", err)\n\t\t}\n\t\tident.Groups = groups\n\t}\n\treturn ident, nil\n}\n\n\/\/ user queries the GitLab API for profile information using the provided client. The HTTP\n\/\/ client is expected to be constructed by the golang.org\/x\/oauth2 package, which inserts\n\/\/ a bearer token as part of the request.\nfunc (c *gitlabConnector) user(ctx context.Context, client *http.Client) (gitlabUser, error) {\n\tvar u gitlabUser\n\treq, err := http.NewRequest(\"GET\", c.baseURL+\"\/api\/v3\/user\", nil)\n\tif err != nil {\n\t\treturn u, fmt.Errorf(\"gitlab: new req: %v\", err)\n\t}\n\treq = req.WithContext(ctx)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn u, fmt.Errorf(\"gitlab: get URL %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn u, fmt.Errorf(\"gitlab: read body: %v\", err)\n\t\t}\n\t\treturn u, fmt.Errorf(\"%s: %s\", resp.Status, body)\n\t}\n\n\tif err := json.NewDecoder(resp.Body).Decode(&u); err != nil {\n\t\treturn u, fmt.Errorf(\"failed to decode response: %v\", err)\n\t}\n\treturn u, nil\n}\n\n\/\/ groups queries the GitLab API for group membership.\n\/\/\n\/\/ The HTTP passed client is expected to be constructed by the golang.org\/x\/oauth2 package,\n\/\/ which inserts a bearer token as part of the request.\nfunc (c *gitlabConnector) groups(ctx context.Context, client *http.Client) ([]string, error) {\n\n\tapiURL := c.baseURL + \"\/api\/v3\/groups\"\n\n\treNext := regexp.MustCompile(\"<(.*)>; rel=\\\"next\\\"\")\n\treLast := regexp.MustCompile(\"<(.*)>; rel=\\\"last\\\"\")\n\n\tgroups := []string{}\n\tvar gitlabGroups []gitlabGroup\n\tfor {\n\t\t\/\/ 100 is the maximum number for per_page that allowed by gitlab\n\t\treq, err := http.NewRequest(\"GET\", apiURL, nil)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"gitlab: new req: %v\", err)\n\t\t}\n\t\treq = req.WithContext(ctx)\n\t\tresp, err := client.Do(req)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"gitlab: get groups: %v\", err)\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"gitlab: read body: %v\", err)\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"%s: %s\", resp.Status, body)\n\t\t}\n\n\t\tif err := json.NewDecoder(resp.Body).Decode(&gitlabGroups); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"gitlab: unmarshal groups: %v\", err)\n\t\t}\n\n\t\tfor _, group := range gitlabGroups {\n\t\t\tgroups = append(groups, group.Name)\n\t\t}\n\n\t\tlink := resp.Header.Get(\"Link\")\n\n\t\tif len(reLast.FindStringSubmatch(link)) > 1 {\n\t\t\tlastPageURL := reLast.FindStringSubmatch(link)[1]\n\n\t\t\tif apiURL == lastPageURL {\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\n\t\tif len(reNext.FindStringSubmatch(link)) > 1 {\n\t\t\tapiURL = reNext.FindStringSubmatch(link)[1]\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\n\t}\n\treturn groups, nil\n}\n<commit_msg>connector\/gitlab: correct scope strings, better default<commit_after>\/\/ Package gitlab provides authentication strategies using Gitlab.\npackage gitlab\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strconv\"\n\n\t\"github.com\/coreos\/dex\/connector\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"golang.org\/x\/oauth2\"\n)\n\nconst (\n\t\/\/ https:\/\/docs.gitlab.com\/ee\/integration\/oauth_provider.html#authorized-applications\n\tscopeUser = \"read_user\"\n\tscopeAPI = \"api\"\n)\n\n\/\/ Config holds configuration options for gilab logins.\ntype Config struct {\n\tBaseURL string `json:\"baseURL\"`\n\tClientID string `json:\"clientID\"`\n\tClientSecret string `json:\"clientSecret\"`\n\tRedirectURI string `json:\"redirectURI\"`\n}\n\ntype gitlabUser struct {\n\tID int\n\tName string\n\tUsername string\n\tState string\n\tEmail string\n\tIsAdmin bool\n}\n\ntype gitlabGroup struct {\n\tID int\n\tName string\n\tPath string\n}\n\n\/\/ Open returns a strategy for logging in through GitLab.\nfunc (c *Config) Open(logger logrus.FieldLogger) (connector.Connector, error) {\n\tif c.BaseURL == \"\" {\n\t\tc.BaseURL = \"https:\/\/www.gitlab.com\"\n\t}\n\treturn &gitlabConnector{\n\t\tbaseURL: c.BaseURL,\n\t\tredirectURI: c.RedirectURI,\n\t\tclientID: c.ClientID,\n\t\tclientSecret: c.ClientSecret,\n\t\tlogger: logger,\n\t}, nil\n}\n\ntype connectorData struct {\n\t\/\/ GitLab's OAuth2 tokens never expire. We don't need a refresh token.\n\tAccessToken string `json:\"accessToken\"`\n}\n\nvar (\n\t_ connector.CallbackConnector = (*gitlabConnector)(nil)\n\t_ connector.RefreshConnector = (*gitlabConnector)(nil)\n)\n\ntype gitlabConnector struct {\n\tbaseURL string\n\tredirectURI string\n\torg string\n\tclientID string\n\tclientSecret string\n\tlogger logrus.FieldLogger\n}\n\nfunc (c *gitlabConnector) oauth2Config(scopes connector.Scopes) *oauth2.Config {\n\tgitlabScopes := []string{scopeUser}\n\tif scopes.Groups {\n\t\tgitlabScopes = []string{scopeAPI}\n\t}\n\n\tgitlabEndpoint := oauth2.Endpoint{AuthURL: c.baseURL + \"\/oauth\/authorize\", TokenURL: c.baseURL + \"\/oauth\/token\"}\n\treturn &oauth2.Config{\n\t\tClientID: c.clientID,\n\t\tClientSecret: c.clientSecret,\n\t\tEndpoint: gitlabEndpoint,\n\t\tScopes: gitlabScopes,\n\t\tRedirectURL: c.redirectURI,\n\t}\n}\n\nfunc (c *gitlabConnector) LoginURL(scopes connector.Scopes, callbackURL, state string) (string, error) {\n\tif c.redirectURI != callbackURL {\n\t\treturn \"\", fmt.Errorf(\"expected callback URL %q did not match the URL in the config %q\", c.redirectURI, callbackURL)\n\t}\n\treturn c.oauth2Config(scopes).AuthCodeURL(state), nil\n}\n\ntype oauth2Error struct {\n\terror string\n\terrorDescription string\n}\n\nfunc (e *oauth2Error) Error() string {\n\tif e.errorDescription == \"\" {\n\t\treturn e.error\n\t}\n\treturn e.error + \": \" + e.errorDescription\n}\n\nfunc (c *gitlabConnector) HandleCallback(s connector.Scopes, r *http.Request) (identity connector.Identity, err error) {\n\tq := r.URL.Query()\n\tif errType := q.Get(\"error\"); errType != \"\" {\n\t\treturn identity, &oauth2Error{errType, q.Get(\"error_description\")}\n\t}\n\n\toauth2Config := c.oauth2Config(s)\n\tctx := r.Context()\n\n\ttoken, err := oauth2Config.Exchange(ctx, q.Get(\"code\"))\n\tif err != nil {\n\t\treturn identity, fmt.Errorf(\"gitlab: failed to get token: %v\", err)\n\t}\n\n\tclient := oauth2Config.Client(ctx, token)\n\n\tuser, err := c.user(ctx, client)\n\tif err != nil {\n\t\treturn identity, fmt.Errorf(\"gitlab: get user: %v\", err)\n\t}\n\n\tusername := user.Name\n\tif username == \"\" {\n\t\tusername = user.Email\n\t}\n\tidentity = connector.Identity{\n\t\tUserID: strconv.Itoa(user.ID),\n\t\tUsername: username,\n\t\tEmail: user.Email,\n\t\tEmailVerified: true,\n\t}\n\n\tif s.Groups {\n\t\tgroups, err := c.groups(ctx, client)\n\t\tif err != nil {\n\t\t\treturn identity, fmt.Errorf(\"gitlab: get groups: %v\", err)\n\t\t}\n\t\tidentity.Groups = groups\n\t}\n\n\tif s.OfflineAccess {\n\t\tdata := connectorData{AccessToken: token.AccessToken}\n\t\tconnData, err := json.Marshal(data)\n\t\tif err != nil {\n\t\t\treturn identity, fmt.Errorf(\"marshal connector data: %v\", err)\n\t\t}\n\t\tidentity.ConnectorData = connData\n\t}\n\n\treturn identity, nil\n}\n\nfunc (c *gitlabConnector) Refresh(ctx context.Context, s connector.Scopes, ident connector.Identity) (connector.Identity, error) {\n\tif len(ident.ConnectorData) == 0 {\n\t\treturn ident, errors.New(\"no upstream access token found\")\n\t}\n\n\tvar data connectorData\n\tif err := json.Unmarshal(ident.ConnectorData, &data); err != nil {\n\t\treturn ident, fmt.Errorf(\"gitlab: unmarshal access token: %v\", err)\n\t}\n\n\tclient := c.oauth2Config(s).Client(ctx, &oauth2.Token{AccessToken: data.AccessToken})\n\tuser, err := c.user(ctx, client)\n\tif err != nil {\n\t\treturn ident, fmt.Errorf(\"gitlab: get user: %v\", err)\n\t}\n\n\tusername := user.Name\n\tif username == \"\" {\n\t\tusername = user.Email\n\t}\n\tident.Username = username\n\tident.Email = user.Email\n\n\tif s.Groups {\n\t\tgroups, err := c.groups(ctx, client)\n\t\tif err != nil {\n\t\t\treturn ident, fmt.Errorf(\"gitlab: get groups: %v\", err)\n\t\t}\n\t\tident.Groups = groups\n\t}\n\treturn ident, nil\n}\n\n\/\/ user queries the GitLab API for profile information using the provided client. The HTTP\n\/\/ client is expected to be constructed by the golang.org\/x\/oauth2 package, which inserts\n\/\/ a bearer token as part of the request.\nfunc (c *gitlabConnector) user(ctx context.Context, client *http.Client) (gitlabUser, error) {\n\tvar u gitlabUser\n\treq, err := http.NewRequest(\"GET\", c.baseURL+\"\/api\/v4\/user\", nil)\n\tif err != nil {\n\t\treturn u, fmt.Errorf(\"gitlab: new req: %v\", err)\n\t}\n\treq = req.WithContext(ctx)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn u, fmt.Errorf(\"gitlab: get URL %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn u, fmt.Errorf(\"gitlab: read body: %v\", err)\n\t\t}\n\t\treturn u, fmt.Errorf(\"%s: %s\", resp.Status, body)\n\t}\n\n\tif err := json.NewDecoder(resp.Body).Decode(&u); err != nil {\n\t\treturn u, fmt.Errorf(\"failed to decode response: %v\", err)\n\t}\n\treturn u, nil\n}\n\n\/\/ groups queries the GitLab API for group membership.\n\/\/\n\/\/ The HTTP passed client is expected to be constructed by the golang.org\/x\/oauth2 package,\n\/\/ which inserts a bearer token as part of the request.\nfunc (c *gitlabConnector) groups(ctx context.Context, client *http.Client) ([]string, error) {\n\n\tapiURL := c.baseURL + \"\/api\/v4\/groups\"\n\n\treNext := regexp.MustCompile(\"<(.*)>; rel=\\\"next\\\"\")\n\treLast := regexp.MustCompile(\"<(.*)>; rel=\\\"last\\\"\")\n\n\tgroups := []string{}\n\tvar gitlabGroups []gitlabGroup\n\tfor {\n\t\t\/\/ 100 is the maximum number for per_page that allowed by gitlab\n\t\treq, err := http.NewRequest(\"GET\", apiURL, nil)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"gitlab: new req: %v\", err)\n\t\t}\n\t\treq = req.WithContext(ctx)\n\t\tresp, err := client.Do(req)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"gitlab: get groups: %v\", err)\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"gitlab: read body: %v\", err)\n\t\t\t}\n\t\t\treturn nil, fmt.Errorf(\"%s: %s\", resp.Status, body)\n\t\t}\n\n\t\tif err := json.NewDecoder(resp.Body).Decode(&gitlabGroups); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"gitlab: unmarshal groups: %v\", err)\n\t\t}\n\n\t\tfor _, group := range gitlabGroups {\n\t\t\tgroups = append(groups, group.Name)\n\t\t}\n\n\t\tlink := resp.Header.Get(\"Link\")\n\n\t\tif len(reLast.FindStringSubmatch(link)) > 1 {\n\t\t\tlastPageURL := reLast.FindStringSubmatch(link)[1]\n\n\t\t\tif apiURL == lastPageURL {\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\n\t\tif len(reNext.FindStringSubmatch(link)) > 1 {\n\t\t\tapiURL = reNext.FindStringSubmatch(link)[1]\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\n\t}\n\treturn groups, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package metrics is a go library for sampling, counting and timing go code\n\/\/ to be output in the l2met format.\npackage metrics\n\nimport (\n\t\"log\"\n\t\"os\"\n)\n\n\/\/ Logger is the logger to use when printing metrics. By default, metrics are printed\n\/\/ to Stdout.\nvar Logger = log.New(os.Stdout, \"\", 0)\n\nfunc Count(metric string, v interface{}) {\n\tprintMetric(MetricCount, metric, v, \"\")\n}\n\nfunc Sample(metric string, v interface{}, units string) {\n\tprintMetric(MetricSample, metric, v, units)\n}\n\nfunc Measure(metric string, v interface{}, units string) {\n\tprintMetric(MetricMeasure, metric, v, units)\n}\n\nfunc printMetric(t MetricType, metric string, v interface{}, units string) {\n\tm := &Metric{Name: metric, Type: t, Value: v, Units: units}\n\tm.Print()\n}\n<commit_msg>Namespace.<commit_after>\/\/ Package metrics is a go library for sampling, counting and timing go code\n\/\/ to be output in the l2met format.\npackage metrics\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n)\n\ntype Namespace string\n\n\/\/ Logger is the logger to use when printing metrics. By default, metrics are printed\n\/\/ to Stdout.\nvar Logger = log.New(os.Stdout, \"\", 0)\n\n\/\/ The root namespace.\nvar root Namespace = \"\"\n\nfunc (n Namespace) Count(metric string, v interface{}) {\n\tn.print(MetricCount, metric, v, \"\")\n}\n\nfunc (n Namespace) Sample(metric string, v interface{}, units string) {\n\tn.print(MetricSample, metric, v, units)\n}\n\nfunc (n Namespace) Measure(metric string, v interface{}, units string) {\n\tn.print(MetricMeasure, metric, v, units)\n}\n\nfunc (n Namespace) print(t MetricType, metric string, v interface{}, units string) {\n\tif n != \"\" {\n\t\tmetric = fmt.Sprintf(\"%s.%s\", n, metric)\n\t}\n\tm := &Metric{Name: metric, Type: t, Value: v, Units: units}\n\tm.Print()\n}\n\nfunc Count(metric string, v interface{}) {\n\troot.Count(metric, v)\n}\n\nfunc Sample(metric string, v interface{}, units string) {\n\troot.Sample(metric, v, units)\n}\n\nfunc Measure(metric string, v interface{}, units string) {\n\troot.Measure(metric, v, units)\n}\n<|endoftext|>"} {"text":"<commit_before>package handlers\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"code.cloudfoundry.org\/lager\"\n\t\"github.com\/cloudfoundry\/hm9000\/store\"\n\t\"code.cloudfoundry.org\/clock\"\n)\n\ntype bulkHandler struct {\n\tlogger lager.Logger\n\tstore store.Store\n\tclock clock.Clock\n}\n\ntype AppStateRequest struct {\n\tAppGuid string `json:\"droplet\"`\n\tAppVersion string `json:\"version\"`\n}\n\nfunc NewBulkAppStateHandler(logger lager.Logger, store store.Store, clock clock.Clock) http.Handler {\n\treturn &bulkHandler{\n\t\tlogger: logger,\n\t\tstore: store,\n\t\tclock: clock,\n\t}\n}\n\nfunc (handler *bulkHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tstartTime := time.Now()\n\trequests := make([]AppStateRequest, 0)\n\n\tbodyBytes, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\thandler.logger.Error(\"Failed to handle bulk_app_state request\", err)\n\t\tw.Write([]byte(\"{}\"))\n\t}\n\n\terr = json.Unmarshal(bodyBytes, &requests)\n\tif err != nil {\n\t\thandler.logger.Error(\"Failed to handle bulk_app_state request\", err, lager.Data{\n\t\t\t\"payload\": string(bodyBytes),\n\t\t\t\"elapsed time\": fmt.Sprintf(\"%s\", time.Since(startTime)),\n\t\t})\n\t\tw.Write([]byte(\"{}\"))\n\t\treturn\n\t}\n\n\terr = handler.store.VerifyFreshness(handler.clock.Now())\n\tif err != nil {\n\t\thandler.logger.Error(\"Failed to handle bulk_app_state request\", err, lager.Data{\n\t\t\t\"payload\": string(bodyBytes),\n\t\t\t\"elapsed time\": fmt.Sprintf(\"%s\", time.Since(startTime)),\n\t\t})\n\t\tw.Write([]byte(\"{}\"))\n\t\treturn\n\t}\n\n\tvar apps = make(map[string]interface{})\n\tfor _, request := range requests {\n\t\tapp, err := handler.store.GetApp(request.AppGuid, request.AppVersion)\n\t\tif err == nil {\n\t\t\tapps[app.AppGuid] = app\n\t\t}\n\t}\n\n\tappsJson, err := json.Marshal(apps)\n\tif err != nil {\n\t\thandler.logger.Error(\"Failed to handle bulk_app_state request\", err, lager.Data{\n\t\t\t\"payload\": string(bodyBytes),\n\t\t\t\"elapsed time\": fmt.Sprintf(\"%s\", time.Since(startTime)),\n\t\t})\n\t}\n\n\tw.Write([]byte(appsJson))\n}\n<commit_msg>Make error messages for bulk app state handling more meaningful<commit_after>package handlers\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"code.cloudfoundry.org\/lager\"\n\t\"github.com\/cloudfoundry\/hm9000\/store\"\n\t\"code.cloudfoundry.org\/clock\"\n)\n\ntype bulkHandler struct {\n\tlogger lager.Logger\n\tstore store.Store\n\tclock clock.Clock\n}\n\ntype AppStateRequest struct {\n\tAppGuid string `json:\"droplet\"`\n\tAppVersion string `json:\"version\"`\n}\n\nfunc NewBulkAppStateHandler(logger lager.Logger, store store.Store, clock clock.Clock) http.Handler {\n\treturn &bulkHandler{\n\t\tlogger: logger,\n\t\tstore: store,\n\t\tclock: clock,\n\t}\n}\n\nfunc (handler *bulkHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tstartTime := time.Now()\n\trequests := make([]AppStateRequest, 0)\n\n\tbodyBytes, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\thandler.logger.Error(\"Failed to handle bulk_app_state request\", err)\n\t\tw.Write([]byte(\"{}\"))\n\t}\n\n\terr = json.Unmarshal(bodyBytes, &requests)\n\tif err != nil {\n\t\thandler.logger.Error(\"Failed to unmarshal bulk app state request\", err, lager.Data{\n\t\t\t\"payload\": string(bodyBytes),\n\t\t\t\"elapsed time\": fmt.Sprintf(\"%s\", time.Since(startTime)),\n\t\t})\n\t\tw.Write([]byte(\"{}\"))\n\t\treturn\n\t}\n\n\terr = handler.store.VerifyFreshness(handler.clock.Now())\n\tif err != nil {\n\t\thandler.logger.Error(\"Failed to verify freshness while handling bulk app state request\", err, lager.Data{\n\t\t\t\"payload\": string(bodyBytes),\n\t\t\t\"elapsed time\": fmt.Sprintf(\"%s\", time.Since(startTime)),\n\t\t})\n\t\tw.Write([]byte(\"{}\"))\n\t\treturn\n\t}\n\n\tvar apps = make(map[string]interface{})\n\tfor _, request := range requests {\n\t\tapp, err := handler.store.GetApp(request.AppGuid, request.AppVersion)\n\t\tif err == nil {\n\t\t\tapps[app.AppGuid] = app\n\t\t}\n\t}\n\n\tappsJson, err := json.Marshal(apps)\n\tif err != nil {\n\t\thandler.logger.Error(\"Failed to marshal app states while handling bulk app state request\", err, lager.Data{\n\t\t\t\"payload\": string(bodyBytes),\n\t\t\t\"elapsed time\": fmt.Sprintf(\"%s\", time.Since(startTime)),\n\t\t})\n\t}\n\n\tw.Write([]byte(appsJson))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\t\"github.com\/karamani\/iostreams\"\n)\n\nvar (\n\tdebugMode bool\n\tqueryArg string\n\texecArg string\n\tconnectionStringArg string\n\tformatArg string\n\tfakeMode bool\n\tisExec bool\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"massquery\"\n\tapp.Usage = \"massquery\"\n\tapp.Flags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"debug\",\n\t\t\tUsage: \"debug mode\",\n\t\t\tEnvVar: \"MASSQUERY_DEBUG\",\n\t\t\tDestination: &debugMode,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"query\",\n\t\t\tUsage: \"sql-query\",\n\t\t\tDestination: &queryArg,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"exec\",\n\t\t\tUsage: \"exec-string (insert, update or delete)\",\n\t\t\tDestination: &execArg,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"cnn\",\n\t\t\tUsage: \"db connection string\",\n\t\t\tEnvVar: \"MASSQUERY_CNN\",\n\t\t\tDestination: &connectionStringArg,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"format\",\n\t\t\tUsage: \"output format\",\n\t\t\tDestination: &formatArg,\n\t\t},\n\t}\n\n\tapp.Action = func(c *cli.Context) {\n\n\t\tif len(connectionStringArg) == 0 {\n\t\t\tlog.Println(\"[ERROR] 'cnn' arg is required\")\n\t\t\treturn\n\t\t}\n\n\t\tif len(queryArg) == 0 && len(execArg) == 0 {\n\t\t\tlog.Println(\"[ERROR] It should be one of the arguments: 'query' or 'exec'\")\n\t\t\treturn\n\t\t}\n\n\t\tquery := queryArg\n\t\tisExec = len(query) == 0\n\t\tif isExec {\n\t\t\tquery = execArg\n\t\t}\n\n\t\tif !iostreams.StdinReady() {\n\t\t\tprocessOneQuery(connectionStringArg, query, isExec, \"\")\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ this func's called for each stdin's row\n\t\tprocess := func(row []byte) error {\n\n\t\t\tdebug(string(row))\n\n\t\t\tparams := strings.Split(string(row), \"\\t\")\n\t\t\trowQuery := parameterizedString(query, \"{%d}\", params)\n\t\t\trowCnn := parameterizedString(connectionStringArg, \"{%d}\", params)\n\n\t\t\tdebug(\"connection:\" + rowCnn)\n\t\t\tdebug(\"query:\" + rowQuery)\n\n\t\t\tprocessOneQuery(rowCnn, rowQuery, isExec, string(row))\n\n\t\t\treturn nil\n\t\t}\n\n\t\tif err := iostreams.ProcessStdin(process); err != nil {\n\t\t\tlog.Panicln(err.Error())\n\t\t}\n\t}\n\n\tapp.Run(os.Args)\n}\n\nfunc processOneQuery(cnn, query string, isExec bool, input string) {\n\tres, err := runQuery(cnn, query, isExec)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\tprintRes(formatRes(formatArg, input, cnn, \"error\", nil))\n\t} else {\n\t\tfor _, resrow := range res {\n\t\t\tprintRes(formatRes(formatArg, input, cnn, \"success\", resrow))\n\t\t}\n\t}\n}\n\nfunc parameterizedString(s, tpl string, params []string) string {\n\tres := s\n\tfor i, param := range params {\n\t\tt := fmt.Sprintf(tpl, i)\n\t\tres = strings.Replace(res, t, param, -1)\n\t}\n\treturn res\n}\n\nfunc formatRes(format, input, cnn, status string, values []string) string {\n\n\tres := strings.Join(values, \"\\t\")\n\n\tif len(format) > 0 {\n\t\ts := format\n\n\t\t\/\/ remove unnecessary quotes from command line\n\t\ts = strings.Replace(s, \"\\\\t\", \"\\t\", -1)\n\t\ts = strings.Replace(s, \"\\\\n\", \"\\n\", -1)\n\n\t\ts = strings.Replace(s, \"{input}\", input, -1)\n\t\ts = strings.Replace(s, \"{res}\", res, -1)\n\t\ts = strings.Replace(s, \"{cnn}\", cnn, -1)\n\t\ts = strings.Replace(s, \"{status}\", status, -1)\n\t\ts = parameterizedString(s, \"{res%d}\", values)\n\n\t\tres = s\n\t}\n\n\treturn res\n}\n\nfunc printRes(s string) {\n\tif len(s) > 0 {\n\t\tfmt.Println(s)\n\t}\n}\n\nfunc runQuery(connectionString, query string, isExec bool) (res [][]string, resErr error) {\n\n\tres, resErr = nil, nil\n\n\tdb, resErr := sql.Open(\"mysql\", connectionString)\n\tif resErr != nil {\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\tresErr = db.Ping()\n\tif resErr != nil {\n\t\treturn\n\t}\n\n\tif isExec {\n\n\t\tresErr = func() error {\n\t\t\texecRes, err := db.Exec(query)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\taffected, err := execRes.RowsAffected()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlastInsertID, err := execRes.LastInsertId()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tres = append(res, []string{\n\t\t\t\tstrconv.FormatInt(affected, 10),\n\t\t\t\tstrconv.FormatInt(lastInsertID, 10),\n\t\t\t})\n\t\t\treturn nil\n\t\t}()\n\n\t} else {\n\n\t\tresErr = func() error {\n\n\t\t\trows, err := db.Query(query)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer rows.Close()\n\n\t\t\tcols, err := rows.Columns()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcontainer := newScanContainer(len(cols))\n\t\t\tfor rows.Next() {\n\t\t\t\tif err := rows.Scan(container.Pointers...); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tres = append(res, container.AsStrings())\n\t\t\t}\n\n\t\t\tif err := rows.Err(); err != nil {\n\t\t\t\tres = nil\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}()\n\t}\n\n\treturn\n}\n\nfunc debug(format string, args ...interface{}) {\n\tif debugMode {\n\t\tlog.Printf(\"[DEBUG] \"+format+\"\\n\", args...)\n\t}\n}\n<commit_msg>Small refactoring in func processOneQuery<commit_after>package main\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t_ \"github.com\/go-sql-driver\/mysql\"\n\t\"github.com\/karamani\/iostreams\"\n)\n\nvar (\n\tdebugMode bool\n\tqueryArg string\n\texecArg string\n\tconnectionStringArg string\n\tformatArg string\n\tfakeMode bool\n\tisExec bool\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"massquery\"\n\tapp.Usage = \"massquery\"\n\tapp.Flags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName: \"debug\",\n\t\t\tUsage: \"debug mode\",\n\t\t\tEnvVar: \"MASSQUERY_DEBUG\",\n\t\t\tDestination: &debugMode,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"query\",\n\t\t\tUsage: \"sql-query\",\n\t\t\tDestination: &queryArg,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"exec\",\n\t\t\tUsage: \"exec-string (insert, update or delete)\",\n\t\t\tDestination: &execArg,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"cnn\",\n\t\t\tUsage: \"db connection string\",\n\t\t\tEnvVar: \"MASSQUERY_CNN\",\n\t\t\tDestination: &connectionStringArg,\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"format\",\n\t\t\tUsage: \"output format\",\n\t\t\tDestination: &formatArg,\n\t\t},\n\t}\n\n\tapp.Action = func(c *cli.Context) {\n\n\t\tif len(connectionStringArg) == 0 {\n\t\t\tlog.Println(\"[ERROR] 'cnn' arg is required\")\n\t\t\treturn\n\t\t}\n\n\t\tif len(queryArg) == 0 && len(execArg) == 0 {\n\t\t\tlog.Println(\"[ERROR] It should be one of the arguments: 'query' or 'exec'\")\n\t\t\treturn\n\t\t}\n\n\t\tquery := queryArg\n\t\tisExec = len(query) == 0\n\t\tif isExec {\n\t\t\tquery = execArg\n\t\t}\n\n\t\tif !iostreams.StdinReady() {\n\t\t\tprocessOneQuery(connectionStringArg, query, isExec, \"\")\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ this func's called for each stdin's row\n\t\tprocess := func(row []byte) error {\n\n\t\t\tdebug(string(row))\n\n\t\t\tparams := strings.Split(string(row), \"\\t\")\n\t\t\trowQuery := parameterizedString(query, \"{%d}\", params)\n\t\t\trowCnn := parameterizedString(connectionStringArg, \"{%d}\", params)\n\n\t\t\tdebug(\"connection:\" + rowCnn)\n\t\t\tdebug(\"query:\" + rowQuery)\n\n\t\t\tprocessOneQuery(rowCnn, rowQuery, isExec, string(row))\n\n\t\t\treturn nil\n\t\t}\n\n\t\tif err := iostreams.ProcessStdin(process); err != nil {\n\t\t\tlog.Panicln(err.Error())\n\t\t}\n\t}\n\n\tapp.Run(os.Args)\n}\n\nfunc processOneQuery(cnn, query string, isExec bool, input string) {\n\tres, err := runQuery(cnn, query, isExec)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\tprintRes(formatRes(formatArg, input, cnn, \"error\", nil))\n\t\treturn\n\t}\n\n\tfor _, resrow := range res {\n\t\tprintRes(formatRes(formatArg, input, cnn, \"success\", resrow))\n\t}\n}\n\nfunc parameterizedString(s, tpl string, params []string) string {\n\tres := s\n\tfor i, param := range params {\n\t\tt := fmt.Sprintf(tpl, i)\n\t\tres = strings.Replace(res, t, param, -1)\n\t}\n\treturn res\n}\n\nfunc formatRes(format, input, cnn, status string, values []string) string {\n\n\tres := strings.Join(values, \"\\t\")\n\n\tif len(format) > 0 {\n\t\ts := format\n\n\t\t\/\/ remove unnecessary quotes from command line\n\t\ts = strings.Replace(s, \"\\\\t\", \"\\t\", -1)\n\t\ts = strings.Replace(s, \"\\\\n\", \"\\n\", -1)\n\n\t\ts = strings.Replace(s, \"{input}\", input, -1)\n\t\ts = strings.Replace(s, \"{res}\", res, -1)\n\t\ts = strings.Replace(s, \"{cnn}\", cnn, -1)\n\t\ts = strings.Replace(s, \"{status}\", status, -1)\n\t\ts = parameterizedString(s, \"{res%d}\", values)\n\n\t\tres = s\n\t}\n\n\treturn res\n}\n\nfunc printRes(s string) {\n\tif len(s) > 0 {\n\t\tfmt.Println(s)\n\t}\n}\n\nfunc runQuery(connectionString, query string, isExec bool) (res [][]string, resErr error) {\n\n\tres, resErr = nil, nil\n\n\tdb, resErr := sql.Open(\"mysql\", connectionString)\n\tif resErr != nil {\n\t\treturn\n\t}\n\tdefer db.Close()\n\n\tresErr = db.Ping()\n\tif resErr != nil {\n\t\treturn\n\t}\n\n\tif isExec {\n\n\t\tresErr = func() error {\n\t\t\texecRes, err := db.Exec(query)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\taffected, err := execRes.RowsAffected()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlastInsertID, err := execRes.LastInsertId()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tres = append(res, []string{\n\t\t\t\tstrconv.FormatInt(affected, 10),\n\t\t\t\tstrconv.FormatInt(lastInsertID, 10),\n\t\t\t})\n\t\t\treturn nil\n\t\t}()\n\n\t} else {\n\n\t\tresErr = func() error {\n\n\t\t\trows, err := db.Query(query)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer rows.Close()\n\n\t\t\tcols, err := rows.Columns()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcontainer := newScanContainer(len(cols))\n\t\t\tfor rows.Next() {\n\t\t\t\tif err := rows.Scan(container.Pointers...); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tres = append(res, container.AsStrings())\n\t\t\t}\n\n\t\t\tif err := rows.Err(); err != nil {\n\t\t\t\tres = nil\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}()\n\t}\n\n\treturn\n}\n\nfunc debug(format string, args ...interface{}) {\n\tif debugMode {\n\t\tlog.Printf(\"[DEBUG] \"+format+\"\\n\", args...)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"code.google.com\/p\/go-tour\/tree\"\n\t\"fmt\"\n\t\"sort\"\n)\n\nfunc Walk(t *tree.Tree, ch chan int) {\n\tch <- t.Value\n\tif t.Left != nil {\n\t\tgo Walk(t.Left, ch)\n\t}\n\tif t.Right != nil {\n\t\tgo Walk(t.Right, ch)\n\t}\n}\n\nfunc Same(t1, t2 *tree.Tree) bool {\n\tch := make(chan int)\n\tt1vals := make([]int, 10)\n\tt2vals := make([]int, 10)\n\n\tgo Walk(t1, ch)\n\tfor i := 0; i < 10; i++ {\n\t\tt1vals[i] = <-ch\n\t}\n\tgo Walk(t2, ch)\n\tfor i := 0; i < 10; i++ {\n\t\tt2vals[i] = <-ch\n\t}\n\tfmt.Println(t1vals)\n\tfmt.Println(t2vals)\n\tsort.Ints(t1vals)\n\tsort.Ints(t2vals)\n\tstrt1 := fmt.Sprintf(\"%v\", t1vals)\n\tstrt2 := fmt.Sprintf(\"%v\", t2vals)\n\tif strt1 == strt2 {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}\n\nfunc main() {\n\tfmt.Println(Same(tree.New(1), tree.New(1)))\n}\n<commit_msg>Update binarytree.go<commit_after>package main\n\nimport (\n\t\"code.google.com\/p\/go-tour\/tree\"\n\t\"fmt\"\n\t\"sort\"\n)\n\nfunc Walk(t *tree.Tree, ch chan int) {\n\tch <- t.Value\n\tif t.Left != nil {\n\t\tgo Walk(t.Left, ch)\n\t}\n\tif t.Right != nil {\n\t\tgo Walk(t.Right, ch)\n\t}\n}\n\nfunc Same(t1, t2 *tree.Tree) bool {\n\tch := make(chan int)\n\tt1vals := make([]int, 10)\n\tt2vals := make([]int, 10)\n\n\tgo Walk(t1, ch)\n\tfor i := 0; i < 10; i++ {\n\t\tt1vals[i] = <-ch\n\t}\n\tgo Walk(t2, ch)\n\tfor i := 0; i < 10; i++ {\n\t\tt2vals[i] = <-ch\n\t}\n\t\/\/ the next few lines should be made a bit cleaner, you can't compare slices so I should use arrays instead\n\tfmt.Println(t1vals)\n\tfmt.Println(t2vals)\n\tsort.Ints(t1vals)\n\tsort.Ints(t2vals)\n\tstrt1 := fmt.Sprintf(\"%v\", t1vals)\n\tstrt2 := fmt.Sprintf(\"%v\", t2vals)\n\tif strt1 == strt2 {\n\t\treturn true\n\t} else {\n\t\treturn false\n\t}\n}\n\nfunc main() {\n\tfmt.Println(Same(tree.New(1), tree.New(1)))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\ttc \"github.com\/ametheus\/go-termcolours\"\n\t\"image\"\n\t\"image\/color\"\n\t_ \"image\/jpeg\"\n\t_ \"image\/png\"\n\t\"log\"\n\t\"os\"\n)\n\nvar (\n\timage_file = flag.String(\"image_file\", \"\", \"The image file to display\")\n)\n\nconst BLOCK = \"\\xe2\\x96\\x80\"\n\nfunc init() {\n\tflag.Parse()\n}\n\nfunc main() {\n\treader, err := os.Open(*image_file)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer reader.Close()\n\n\tm, _, err := image.Decode(reader)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tbounds := m.Bounds()\n\n\tfmt.Printf(\"Image is %s by %s pixels wide\\n\", tc.Green(fmt.Sprintf(\"%d\", bounds.Max.X)), tc.Green(fmt.Sprintf(\"%d\", bounds.Max.Y)))\n\n\tvar c0, c1 color.Color\n\tfor y := bounds.Min.Y; y < bounds.Max.Y; y += 2 {\n\t\tfor x := bounds.Min.X; x < bounds.Max.X; x++ {\n\t\t\tc0 = m.At(x, y)\n\t\t\tc1 = color.Black\n\t\t\tif (y + 1) < bounds.Max.Y {\n\t\t\t\tc1 = m.At(x, y+1)\n\t\t\t}\n\n\t\t\tfmt.Print(tc.Background24(c1, tc.Foreground24(c0, BLOCK)))\n\t\t}\n\t\tfmt.Print(\"\\n\")\n\t}\n}\n<commit_msg>Add 256-colour mode, with simple dithering<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\ttc \"github.com\/ametheus\/go-termcolours\"\n\t\"image\"\n\t\"image\/color\"\n\t_ \"image\/jpeg\"\n\t_ \"image\/png\"\n\t\"log\"\n\t\"os\"\n)\n\nvar (\n\timage_file = flag.String(\"image_file\", \"\", \"The image file to display\")\n\tuse_24bit = flag.Bool(\"use_24bit\", true, \"Use 24-bit colours\")\n)\n\nconst BLOCK = \"\\xe2\\x96\\x80\"\n\nfunc init() {\n\tflag.Parse()\n}\n\nfunc main() {\n\treader, err := os.Open(*image_file)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer reader.Close()\n\n\tm, _, err := image.Decode(reader)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tbounds := m.Bounds()\n\n\tfmt.Printf(\"Image is %s by %s pixels wide\\n\", tc.Green(fmt.Sprintf(\"%d\", bounds.Max.X)), tc.Green(fmt.Sprintf(\"%d\", bounds.Max.Y)))\n\n\tif *use_24bit {\n\t\tWrite24(m, bounds)\n\t} else {\n\t\tWrite8(m, bounds)\n\t}\n}\n\nfunc Write24(i image.Image, bounds image.Rectangle) {\n\tvar c0, c1 color.Color\n\tfor y := bounds.Min.Y; y < bounds.Max.Y; y += 2 {\n\t\tfor x := bounds.Min.X; x < bounds.Max.X; x++ {\n\t\t\tc0 = i.At(x, y)\n\t\t\tc1 = color.Black\n\t\t\tif (y + 1) < bounds.Max.Y {\n\t\t\t\tc1 = i.At(x, y+1)\n\t\t\t}\n\n\t\t\tfmt.Print(tc.Background24(c1, tc.Foreground24(c0, BLOCK)))\n\t\t}\n\t\tfmt.Print(\"\\n\")\n\t}\n}\n\nfunc cdiff(before color.Color, after tc.C256) (r, g, b int32) {\n\tr0, g0, b0, _ := before.RGBA()\n\tr1, g1, b1, _ := after.RGBA()\n\n\tr = int32(r1) - int32(r0)\n\tg = int32(g1) - int32(g0)\n\tb = int32(b1) - int32(b0)\n\treturn\n}\n\nfunc pos(a, b int32) uint32 {\n\ta += b\n\tif a < 0 {\n\t\treturn 0\n\t}\n\treturn uint32(a)\n}\n\nfunc iadd(i image.Image, x, y int, dR, dG, dB int32, multiplier float64) {\n\tcol := i.At(x, y)\n\n\tr, g, b, _ := col.RGBA()\n\tr = pos(int32(r), int32(float64(dR)*multiplier))\n\tg = pos(int32(g), int32(float64(dG)*multiplier))\n\tb = pos(int32(b), int32(float64(dB)*multiplier))\n\n\tcnew := color.RGBA{uint8(r >> 8), uint8(g >> 8), uint8(b >> 8), 0}\n\ti.(*image.RGBA).Set(x, y, cnew)\n}\n\nfunc iget(i image.Image, bounds image.Rectangle, x, y int) tc.C256 {\n\tcol := i.At(x, y)\n\taft := tc.Convert256(col)\n\n\tdr, dg, db := cdiff(col, aft)\n\n\tif (x + 1) < bounds.Max.X {\n\t\tiadd(i, x+1, y, dr, db, dg, 7.0\/16.0)\n\t\tif (y + 1) < bounds.Max.Y {\n\t\t\tiadd(i, x+1, y+1, dr, db, dg, 1.0\/16.0)\n\t\t}\n\t}\n\tif (y + 1) < bounds.Max.Y {\n\t\tiadd(i, x, y+1, dr, db, dg, 5.0\/16.0)\n\t\tif (x - 1) >= bounds.Min.X {\n\t\t\tiadd(i, x-1, y+1, dr, db, dg, 3.0\/16.0)\n\t\t}\n\t}\n\n\treturn aft\n}\n\nfunc Write8(i image.Image, bounds image.Rectangle) {\n\tvar c0, c1 tc.C256\n\tfor y := bounds.Min.Y; y < bounds.Max.Y; y += 2 {\n\t\tfor x := bounds.Min.X; x < bounds.Max.X; x++ {\n\t\t\tc0 = iget(i, bounds, x, y)\n\t\t\tc1 = 232\n\t\t\tif (y + 1) < bounds.Max.Y {\n\t\t\t\tc1 = iget(i, bounds, x, y+1)\n\t\t\t}\n\n\t\t\tfmt.Print(tc.Background8(c1, tc.Foreground24(c0, BLOCK)))\n\t\t}\n\t\tfmt.Print(\"\\n\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package controller\n\nimport (\n\t\"testing\"\n\t\"github.com\/mageddo\/go-logging\"\n\t\"bk-api\/test\"\n\t\"bk-api\/service\"\n\t\"bk-api\/entity\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestGetV1_0ListBookmarksNoOffsetError(t *testing.T){\n\n\ttest.BuildDatabase()\n\n\tnowOffsetMsg := `{\"code\":400,\"message\":\"Please pass a valid offset and quantity\"}\n`\n\tresp, c, err := test.NewReq(\"GET\", BOOKMARK_V1)\n\n\tassert.Nil(t, err)\n\tassert.Equal(t, 400, c)\n\n\tassert.Equal(t, nowOffsetMsg, resp)\n\n}\n\nfunc TestGetV1_0ListBookmarks(t *testing.T){\n\n\tctx := logging.NewContext()\n\ttest.BuildDatabase()\n\n\texpectedBookmarks := \"[{\\\"id\\\":1,\\\"name\\\":\\\"X\\\",\\\"visibility\\\":1,\\\"html\\\":\\\"\\\",\\\"length\\\":3}]\"\n\n\n\tservice.NewBookmarkService(ctx).SaveBookmark(entity.NewBookmarkWithNameAndVisibility(\"X\", entity.PUBLIC))\n\tservice.NewBookmarkService(ctx).SaveBookmark(entity.NewBookmarkWithNameAndVisibility(\"X2\", entity.PUBLIC))\n\tservice.NewBookmarkService(ctx).SaveBookmark(entity.NewBookmarkWithNameAndVisibility(\"X3\", entity.PRIVATE))\n\n\tresp, c, err := test.NewReq(\"GET\", BOOKMARK_V1 + \"?from=0&quantity=1\")\n\n\tassert.Nil(t, err)\n\tassert.Equal(t, 200, c)\n\n\tresp = regex.ReplaceAllString(resp, \"\")\n\n\tassert.Equal(t, len(expectedBookmarks), len(resp))\n\tassert.Equal(t, expectedBookmarks, resp)\n\n}\n\nfunc TestGetV1_0ListBookmarksValidateFromSuccess(t *testing.T){\n\n\tctx := logging.NewContext()\n\ttest.BuildDatabase()\n\n\texpectedBookmarks := \"[{\\\"id\\\":2,\\\"name\\\":\\\"X2\\\",\\\"visibility\\\":1,\\\"html\\\":\\\"\\\",\\\"length\\\":3},{\\\"id\\\":3,\\\"name\\\":\\\"X3\\\",\\\"visibility\\\":0,\\\"html\\\":\\\"\\\",\\\"length\\\":3}]\"\n\n\n\tservice.NewBookmarkService(ctx).SaveBookmark(entity.NewBookmarkWithNameAndVisibility(\"X\", entity.PUBLIC))\n\tservice.NewBookmarkService(ctx).SaveBookmark(entity.NewBookmarkWithNameAndVisibility(\"X2\", entity.PUBLIC))\n\tservice.NewBookmarkService(ctx).SaveBookmark(entity.NewBookmarkWithNameAndVisibility(\"X3\", entity.PRIVATE))\n\n\tresp, c, err := test.NewReq(\"GET\", BOOKMARK_V1 + \"?from=1&quantity=2\")\n\n\tassert.Nil(t, err)\n\tassert.Equal(t, 200, c)\n\n\tresp = regex.ReplaceAllString(resp, \"\")\n\n\tassert.Equal(t, len(expectedBookmarks), len(resp))\n\tassert.Equal(t, expectedBookmarks, resp)\n\n}\n\n\n<commit_msg>MG-402 creating search test<commit_after>package controller\n\nimport (\n\t\"testing\"\n\t\"github.com\/mageddo\/go-logging\"\n\t\"bk-api\/test\"\n\t\"bk-api\/service\"\n\t\"bk-api\/entity\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestGetV1_0ListBookmarksNoOffsetError(t *testing.T){\n\n\ttest.BuildDatabase()\n\n\tnowOffsetMsg := `{\"code\":400,\"message\":\"Please pass a valid offset and quantity\"}\n`\n\tresp, c, err := test.NewReq(\"GET\", BOOKMARK_V1)\n\n\tassert.Nil(t, err)\n\tassert.Equal(t, 400, c)\n\n\tassert.Equal(t, nowOffsetMsg, resp)\n\n}\n\nfunc TestGetV1_0ListBookmarks(t *testing.T){\n\n\tctx := logging.NewContext()\n\ttest.BuildDatabase()\n\n\texpectedBookmarks := \"[{\\\"id\\\":1,\\\"name\\\":\\\"X\\\",\\\"visibility\\\":1,\\\"html\\\":\\\"\\\",\\\"length\\\":3}]\"\n\n\tservice.NewBookmarkService(ctx).SaveBookmark(entity.NewBookmarkWithNameAndVisibility(\"X\", entity.PUBLIC))\n\tservice.NewBookmarkService(ctx).SaveBookmark(entity.NewBookmarkWithNameAndVisibility(\"X2\", entity.PUBLIC))\n\tservice.NewBookmarkService(ctx).SaveBookmark(entity.NewBookmarkWithNameAndVisibility(\"X3\", entity.PRIVATE))\n\n\tresp, c, err := test.NewReq(\"GET\", BOOKMARK_V1 + \"?from=0&quantity=1\")\n\n\tassert.Nil(t, err)\n\tassert.Equal(t, 200, c)\n\n\tresp = regex.ReplaceAllString(resp, \"\")\n\n\tassert.Equal(t, len(expectedBookmarks), len(resp))\n\tassert.Equal(t, expectedBookmarks, resp)\n\n}\n\nfunc TestGetV1_0ListBookmarksValidateFromSuccess(t *testing.T){\n\n\tctx := logging.NewContext()\n\ttest.BuildDatabase()\n\n\texpectedBookmarks := \"[{\\\"id\\\":2,\\\"name\\\":\\\"X2\\\",\\\"visibility\\\":1,\\\"html\\\":\\\"\\\",\\\"length\\\":3},{\\\"id\\\":3,\\\"name\\\":\\\"X3\\\",\\\"visibility\\\":0,\\\"html\\\":\\\"\\\",\\\"length\\\":3}]\"\n\n\tservice.NewBookmarkService(ctx).SaveBookmark(entity.NewBookmarkWithNameAndVisibility(\"X\", entity.PUBLIC))\n\tservice.NewBookmarkService(ctx).SaveBookmark(entity.NewBookmarkWithNameAndVisibility(\"X2\", entity.PUBLIC))\n\tservice.NewBookmarkService(ctx).SaveBookmark(entity.NewBookmarkWithNameAndVisibility(\"X3\", entity.PRIVATE))\n\n\tresp, c, err := test.NewReq(\"GET\", BOOKMARK_V1 + \"?from=1&quantity=2\")\n\n\tassert.Nil(t, err)\n\tassert.Equal(t, 200, c)\n\n\tresp = regex.ReplaceAllString(resp, \"\")\n\n\tassert.Equal(t, len(expectedBookmarks), len(resp))\n\tassert.Equal(t, expectedBookmarks, resp)\n\n}\n\nfunc TestGetV1_0ListBookmarksSearchSuccess(t *testing.T){\n\n\tctx := logging.NewContext()\n\ttest.BuildDatabase()\n\n\texpectedBookmarks := `[{\"id\":2,\"name\":\"Android 7.0 was released\",\"html\":\"\",\"length\":2},{\"id\":3,\"name\":\"Separate your software release by major, minor and patch\",\"html\":\"\",\"length\":2}]`\n\n\tservice.NewBookmarkService(ctx).SaveBookmark(entity.NewBookmarkWithNameAndVisibility(\"Google is the most popular search engine site\", entity.PUBLIC))\n\tservice.NewBookmarkService(ctx).SaveBookmark(entity.NewBookmarkWithNameAndVisibility(\"Android 7.0 was released\", entity.PUBLIC))\n\tservice.NewBookmarkService(ctx).SaveBookmark(entity.NewBookmarkWithNameAndVisibility(\"Separate your software release by major, minor and patch\", entity.PRIVATE))\n\n\tresp, c, err := test.NewReq(\"GET\", BOOKMARK_V1 + \"?from=0&quantity=3&query=release\")\n\n\tassert.Nil(t, err)\n\tassert.Equal(t, 200, c)\n\n\tresp = regex.ReplaceAllString(resp, \"\")\n\n\tassert.Equal(t, len(expectedBookmarks), len(resp))\n\tassert.Equal(t, expectedBookmarks, resp)\n\n}\n\n\n<|endoftext|>"} {"text":"<commit_before>package pipelines_test\n\nimport (\n\t\"github.com\/concourse\/testflight\/gitserver\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n)\n\nvar _ = Describe(\"Configuring a resource in a pipeline config\", func() {\n\tvar gitServer *gitserver.Server\n\n\tBeforeEach(func() {\n\t\tgitServer = gitserver.Start(client)\n\n\t\tconfigurePipeline(\n\t\t\t\"-c\", \"fixtures\/config_params.yml\",\n\t\t\t\"-v\", \"git-server=\"+gitServer.URI(),\n\t\t)\n\n\t\ttaskFileContents := `---\nplatform: linux\nimage_resource:\n type: docker-image\n source: {repository: busybox}\nrun:\n path: sh\n args: [\"-c\", \"printenv SOURCE_PARAM\"]\nparams:\n SOURCE_PARAM: file_source\n`\n\t\tgitServer.WriteFile(\"some-repo\/task.yml\", taskFileContents)\n\t\tgitServer.CommitResourceWithFile(\"task.yml\")\n\t})\n\n\tAfterEach(func() {\n\t\tgitServer.Stop()\n\t})\n\n\tContext(\"when specifying file in task config\", func() {\n\t\tIt(\"executes the file with params specified in file\", func() {\n\t\t\twatch := flyWatch(\"file-test\")\n\t\t\tExpect(watch).To(gbytes.Say(\"file_source\"))\n\t\t})\n\n\t\tIt(\"executes the file with params from config\", func() {\n\t\t\twatch := flyWatch(\"file-config-params-test\")\n\t\t\tExpect(watch).To(gbytes.Say(\"config_params_source\"))\n\t\t})\n\n\t\tIt(\"executes the file with job params\", func() {\n\t\t\twatch := flyWatch(\"file-params-test\")\n\t\t\tExpect(watch).To(gbytes.Say(\"job_params_source\"))\n\t\t})\n\n\t\tIt(\"executes the file with job params, overlaying the config params\", func() {\n\t\t\twatch := flyWatch(\"everything-params-test\")\n\t\t\tExpect(watch).To(gbytes.Say(\"job_params_source\"))\n\t\t})\n\t})\n})\n<commit_msg>attempt some other syntax<commit_after>package pipelines_test\n\nimport (\n\t\"github.com\/concourse\/testflight\/gitserver\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n)\n\nvar _ = Describe(\"Configuring a resource in a pipeline config\", func() {\n\tvar gitServer *gitserver.Server\n\n\tBeforeEach(func() {\n\t\tgitServer = gitserver.Start(client)\n\n\t\tconfigurePipeline(\n\t\t\t\"-c\", \"fixtures\/config_params.yml\",\n\t\t\t\"-v\", \"git-server=\"+gitServer.URI(),\n\t\t)\n\n\t\ttaskFileContents := `---\nplatform: linux\nimage_resource:\n type: docker-image\n source: {repository: busybox}\nrun:\n path: sh\n args: [\"-ec\", \"echo -n 'SOURCE_PARAM is '; printenv SOURCE_PARAM; echo .\"]\nparams:\n SOURCE_PARAM: file_source\n`\n\t\tgitServer.WriteFile(\"some-repo\/task.yml\", taskFileContents)\n\t\tgitServer.CommitResourceWithFile(\"task.yml\")\n\t})\n\n\tAfterEach(func() {\n\t\tgitServer.Stop()\n\t})\n\n\tContext(\"when specifying file in task config\", func() {\n\t\tIt(\"executes the file with params specified in file\", func() {\n\t\t\twatch := flyWatch(\"file-test\")\n\t\t\tExpect(watch).To(gbytes.Say(\"file_source\"))\n\t\t})\n\n\t\tIt(\"executes the file with params from config\", func() {\n\t\t\twatch := flyWatch(\"file-config-params-test\")\n\t\t\tExpect(watch).To(gbytes.Say(\"config_params_source\"))\n\t\t})\n\n\t\tIt(\"executes the file with job params\", func() {\n\t\t\twatch := flyWatch(\"file-params-test\")\n\t\t\tExpect(watch).To(gbytes.Say(\"job_params_source\"))\n\t\t})\n\n\t\tIt(\"executes the file with job params, overlaying the config params\", func() {\n\t\t\twatch := flyWatch(\"everything-params-test\")\n\t\t\tExpect(watch).To(gbytes.Say(\"job_params_source\"))\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/hellofresh\/janus\/pkg\/proxy\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestNewFileSystemRepository(t *testing.T) {\n\twd, err := os.Getwd()\n\tassert.NoError(t, err)\n\tassert.Contains(t, wd, \"github.com\/hellofresh\/janus\")\n\n\t\/\/ ...\/github.com\/hellofresh\/janus\/pkg\/api\/..\/..\/examples\/apis\n\texampleAPIsPath := filepath.Join(wd, \"..\", \"..\", \"examples\", \"apis\")\n\tinfo, err := os.Stat(exampleAPIsPath)\n\tassert.NoError(t, err)\n\tassert.True(t, info.IsDir())\n\n\tfsRepo, err := NewFileSystemRepository(exampleAPIsPath)\n\tassert.NoError(t, err)\n\n\tallDefinitions, err := fsRepo.FindAll()\n\tassert.NoError(t, err)\n\tassert.Equal(t, 2, len(allDefinitions))\n\n\thealthDefinitions, err := fsRepo.FindValidAPIHealthChecks()\n\tassert.NoError(t, err)\n\tassert.Equal(t, 1, len(healthDefinitions))\n\tassert.Equal(t, \"example\", healthDefinitions[0].Name)\n\tassert.Equal(t, \"\/example\/*\", healthDefinitions[0].Proxy.ListenPath)\n\tassert.Equal(t, \"https:\/\/example.com\/status\", healthDefinitions[0].HealthCheck.URL)\n\n\tassertFindByName(t, fsRepo)\n\tassertFindByFindByListenPath(t, fsRepo)\n\tassertExists(t, fsRepo)\n\n\tdefToAdd := &Definition{Name: \"foo-bar\", Proxy: &proxy.Definition{ListenPath: \"\/foo\/bar\/*\"}}\n\terr = fsRepo.Add(defToAdd)\n\tassert.NoError(t, err)\n\n\tdef, err := fsRepo.FindByName(defToAdd.Name)\n\tassert.NoError(t, err)\n\tassert.Equal(t, defToAdd.Name, def.Name)\n\tassert.Equal(t, defToAdd.Proxy.ListenPath, def.Proxy.ListenPath)\n\n\tdef, err = fsRepo.FindByListenPath(defToAdd.Proxy.ListenPath)\n\tassert.NoError(t, err)\n\tassert.Equal(t, defToAdd.Name, def.Name)\n\tassert.Equal(t, defToAdd.Proxy.ListenPath, def.Proxy.ListenPath)\n\n\texists, err := fsRepo.Exists(&Definition{Name: defToAdd.Name})\n\tassert.True(t, exists)\n\tassert.Equal(t, ErrAPINameExists, err)\n\n\texists, err = fsRepo.Exists(&Definition{\n\t\tName: time.Now().Format(time.RFC3339Nano),\n\t\tProxy: &proxy.Definition{ListenPath: defToAdd.Proxy.ListenPath},\n\t})\n\tassert.True(t, exists)\n\tassert.Equal(t, ErrAPIListenPathExists, err)\n\n\terr = fsRepo.Remove(defToAdd.Name)\n\tassert.NoError(t, err)\n\n\terr = fsRepo.Remove(defToAdd.Name)\n\tassert.Equal(t, ErrAPIDefinitionNotFound, err)\n\n\t_, err = fsRepo.FindByName(defToAdd.Name)\n\tassert.Equal(t, ErrAPIDefinitionNotFound, err)\n\n\t_, err = fsRepo.FindByListenPath(defToAdd.Proxy.ListenPath)\n\tassert.Equal(t, ErrAPIDefinitionNotFound, err)\n\n\texists, err = fsRepo.Exists(defToAdd)\n\tassert.False(t, exists)\n\tassert.NoError(t, err)\n}\n\nfunc assertFindByName(t *testing.T, fsRepo *FileSystemRepository) {\n\tdef, err := fsRepo.FindByName(\"example\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"example\", def.Name)\n\tassert.Equal(t, \"\/example\/*\", def.Proxy.ListenPath)\n\n\tdef, err = fsRepo.FindByName(\"posts\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"posts\", def.Name)\n\tassert.Equal(t, \"\/posts\/*\", def.Proxy.ListenPath)\n\n\t_, err = fsRepo.FindByName(\"foo\")\n\tassert.Equal(t, ErrAPIDefinitionNotFound, err)\n}\n\nfunc assertFindByFindByListenPath(t *testing.T, fsRepo *FileSystemRepository) {\n\tdef, err := fsRepo.FindByListenPath(\"\/example\/*\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"example\", def.Name)\n\tassert.Equal(t, \"\/example\/*\", def.Proxy.ListenPath)\n\n\tdef, err = fsRepo.FindByListenPath(\"\/posts\/*\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"posts\", def.Name)\n\tassert.Equal(t, \"\/posts\/*\", def.Proxy.ListenPath)\n\n\t_, err = fsRepo.FindByListenPath(\"\/foo\/*\")\n\tassert.Equal(t, ErrAPIDefinitionNotFound, err)\n}\n\nfunc assertExists(t *testing.T, fsRepo *FileSystemRepository) {\n\texists, err := fsRepo.Exists(&Definition{Name: \"example\"})\n\tassert.True(t, exists)\n\tassert.Equal(t, ErrAPINameExists, err)\n\n\texists, err = fsRepo.Exists(&Definition{Name: \"posts\"})\n\tassert.True(t, exists)\n\tassert.Equal(t, ErrAPINameExists, err)\n\n\texists, err = fsRepo.Exists(&Definition{Name: \"example1\", Proxy: &proxy.Definition{ListenPath: \"\/example\/*\"}})\n\tassert.True(t, exists)\n\tassert.Equal(t, ErrAPIListenPathExists, err)\n\n\texists, err = fsRepo.Exists(&Definition{Name: \"posts1\", Proxy: &proxy.Definition{ListenPath: \"\/posts\/*\"}})\n\tassert.True(t, exists)\n\tassert.Equal(t, ErrAPIListenPathExists, err)\n\n\texists, err = fsRepo.Exists(&Definition{Name: \"example1\", Proxy: &proxy.Definition{ListenPath: \"\/example1\/*\"}})\n\tassert.False(t, exists)\n\tassert.NoError(t, err)\n\n\texists, err = fsRepo.Exists(&Definition{Name: \"posts1\", Proxy: &proxy.Definition{ListenPath: \"\/posts1\/*\"}})\n\tassert.False(t, exists)\n\tassert.NoError(t, err)\n}\n<commit_msg>Added definition validation test<commit_after>package api\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/hellofresh\/janus\/pkg\/proxy\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc newRepo(t *testing.T) *FileSystemRepository {\n\twd, err := os.Getwd()\n\tassert.NoError(t, err)\n\tassert.Contains(t, wd, \"github.com\/hellofresh\/janus\")\n\n\t\/\/ ...\/github.com\/hellofresh\/janus\/pkg\/api\/..\/..\/examples\/apis\n\texampleAPIsPath := filepath.Join(wd, \"..\", \"..\", \"examples\", \"apis\")\n\tinfo, err := os.Stat(exampleAPIsPath)\n\tassert.NoError(t, err)\n\tassert.True(t, info.IsDir())\n\n\tfsRepo, err := NewFileSystemRepository(exampleAPIsPath)\n\tassert.NoError(t, err)\n\n\treturn fsRepo\n}\n\nfunc TestNewFileSystemRepository(t *testing.T) {\n\tfsRepo := newRepo(t)\n\n\tallDefinitions, err := fsRepo.FindAll()\n\tassert.NoError(t, err)\n\tassert.Equal(t, 2, len(allDefinitions))\n\n\thealthDefinitions, err := fsRepo.FindValidAPIHealthChecks()\n\tassert.NoError(t, err)\n\tassert.Equal(t, 1, len(healthDefinitions))\n\tassert.Equal(t, \"example\", healthDefinitions[0].Name)\n\tassert.Equal(t, \"\/example\/*\", healthDefinitions[0].Proxy.ListenPath)\n\tassert.Equal(t, \"https:\/\/example.com\/status\", healthDefinitions[0].HealthCheck.URL)\n\n\tassertFindByName(t, fsRepo)\n\tassertFindByFindByListenPath(t, fsRepo)\n\tassertExists(t, fsRepo)\n\n\tdefToAdd := &Definition{Name: \"foo-bar\", Proxy: &proxy.Definition{ListenPath: \"\/foo\/bar\/*\"}}\n\terr = fsRepo.Add(defToAdd)\n\tassert.NoError(t, err)\n\n\tdef, err := fsRepo.FindByName(defToAdd.Name)\n\tassert.NoError(t, err)\n\tassert.Equal(t, defToAdd.Name, def.Name)\n\tassert.Equal(t, defToAdd.Proxy.ListenPath, def.Proxy.ListenPath)\n\n\tdef, err = fsRepo.FindByListenPath(defToAdd.Proxy.ListenPath)\n\tassert.NoError(t, err)\n\tassert.Equal(t, defToAdd.Name, def.Name)\n\tassert.Equal(t, defToAdd.Proxy.ListenPath, def.Proxy.ListenPath)\n\n\texists, err := fsRepo.Exists(&Definition{Name: defToAdd.Name})\n\tassert.True(t, exists)\n\tassert.Equal(t, ErrAPINameExists, err)\n\n\texists, err = fsRepo.Exists(&Definition{\n\t\tName: time.Now().Format(time.RFC3339Nano),\n\t\tProxy: &proxy.Definition{ListenPath: defToAdd.Proxy.ListenPath},\n\t})\n\tassert.True(t, exists)\n\tassert.Equal(t, ErrAPIListenPathExists, err)\n\n\terr = fsRepo.Remove(defToAdd.Name)\n\tassert.NoError(t, err)\n\n\terr = fsRepo.Remove(defToAdd.Name)\n\tassert.Equal(t, ErrAPIDefinitionNotFound, err)\n\n\t_, err = fsRepo.FindByName(defToAdd.Name)\n\tassert.Equal(t, ErrAPIDefinitionNotFound, err)\n\n\t_, err = fsRepo.FindByListenPath(defToAdd.Proxy.ListenPath)\n\tassert.Equal(t, ErrAPIDefinitionNotFound, err)\n\n\texists, err = fsRepo.Exists(defToAdd)\n\tassert.False(t, exists)\n\tassert.NoError(t, err)\n}\n\nfunc assertFindByName(t *testing.T, fsRepo *FileSystemRepository) {\n\tdef, err := fsRepo.FindByName(\"example\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"example\", def.Name)\n\tassert.Equal(t, \"\/example\/*\", def.Proxy.ListenPath)\n\n\tdef, err = fsRepo.FindByName(\"posts\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"posts\", def.Name)\n\tassert.Equal(t, \"\/posts\/*\", def.Proxy.ListenPath)\n\n\t_, err = fsRepo.FindByName(\"foo\")\n\tassert.Equal(t, ErrAPIDefinitionNotFound, err)\n}\n\nfunc assertFindByFindByListenPath(t *testing.T, fsRepo *FileSystemRepository) {\n\tdef, err := fsRepo.FindByListenPath(\"\/example\/*\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"example\", def.Name)\n\tassert.Equal(t, \"\/example\/*\", def.Proxy.ListenPath)\n\n\tdef, err = fsRepo.FindByListenPath(\"\/posts\/*\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"posts\", def.Name)\n\tassert.Equal(t, \"\/posts\/*\", def.Proxy.ListenPath)\n\n\t_, err = fsRepo.FindByListenPath(\"\/foo\/*\")\n\tassert.Equal(t, ErrAPIDefinitionNotFound, err)\n}\n\nfunc assertExists(t *testing.T, fsRepo *FileSystemRepository) {\n\texists, err := fsRepo.Exists(&Definition{Name: \"example\"})\n\tassert.True(t, exists)\n\tassert.Equal(t, ErrAPINameExists, err)\n\n\texists, err = fsRepo.Exists(&Definition{Name: \"posts\"})\n\tassert.True(t, exists)\n\tassert.Equal(t, ErrAPINameExists, err)\n\n\texists, err = fsRepo.Exists(&Definition{Name: \"example1\", Proxy: &proxy.Definition{ListenPath: \"\/example\/*\"}})\n\tassert.True(t, exists)\n\tassert.Equal(t, ErrAPIListenPathExists, err)\n\n\texists, err = fsRepo.Exists(&Definition{Name: \"posts1\", Proxy: &proxy.Definition{ListenPath: \"\/posts\/*\"}})\n\tassert.True(t, exists)\n\tassert.Equal(t, ErrAPIListenPathExists, err)\n\n\texists, err = fsRepo.Exists(&Definition{Name: \"example1\", Proxy: &proxy.Definition{ListenPath: \"\/example1\/*\"}})\n\tassert.False(t, exists)\n\tassert.NoError(t, err)\n\n\texists, err = fsRepo.Exists(&Definition{Name: \"posts1\", Proxy: &proxy.Definition{ListenPath: \"\/posts1\/*\"}})\n\tassert.False(t, exists)\n\tassert.NoError(t, err)\n}\n\nfunc TestFileSystemRepository_Add(t *testing.T) {\n\tfsRepo := newRepo(t)\n\n\tinvalidName := &Definition{Name: \"\"}\n\terr := fsRepo.Add(invalidName)\n\tassert.Error(t, err)\n}\n<|endoftext|>"} {"text":"<commit_before>package connector\n\nimport (\n\t\"context\"\n\t\"net\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/datawire\/ambassador\/pkg\/metriton\"\n\t\"github.com\/datawire\/dlib\/dgroup\"\n\t\"github.com\/datawire\/dlib\/dlog\"\n\t\"github.com\/datawire\/dlib\/dutil\"\n\t\"github.com\/golang\/protobuf\/ptypes\/empty\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/spf13\/cobra\"\n\t\"golang.org\/x\/crypto\/ssh\/terminal\"\n\t\"google.golang.org\/grpc\"\n\n\t\"github.com\/datawire\/telepresence2\/pkg\/client\"\n\tmanager \"github.com\/datawire\/telepresence2\/pkg\/rpc\"\n\trpc \"github.com\/datawire\/telepresence2\/pkg\/rpc\/connector\"\n\t\"github.com\/datawire\/telepresence2\/pkg\/rpc\/daemon\"\n\t\"github.com\/datawire\/telepresence2\/pkg\/rpc\/version\"\n)\n\nvar help = `The Telepresence Connect is a background component that manages a connection. It\nrequires that a daemon is already running.\n\nLaunch the Telepresence Connector:\n telepresence connect\n\nThe Connector uses the Daemon's log so its output can be found in\n ` + client.Logfile + `\nto troubleshoot problems.\n`\n\n\/\/ service represents the state of the Telepresence Connector\ntype service struct {\n\trpc.UnimplementedConnectorServer\n\tdaemon daemon.DaemonClient\n\tdaemonLogger daemonLogger\n\tcluster *k8sCluster\n\tbridge *bridge\n\ttrafficMgr *trafficManager\n\tgrpc *grpc.Server\n\tctx context.Context\n\tcancel func()\n}\n\n\/\/ Command returns the CLI sub-command for \"connector-foreground\"\nfunc Command() *cobra.Command {\n\tvar init bool\n\tc := &cobra.Command{\n\t\tUse: \"connector-foreground\",\n\t\tShort: \"Launch Telepresence Connector in the foreground (debug)\",\n\t\tArgs: cobra.ExactArgs(0),\n\t\tHidden: true,\n\t\tLong: help,\n\t\tRunE: func(_ *cobra.Command, args []string) error {\n\t\t\treturn run(init)\n\t\t},\n\t}\n\tflags := c.Flags()\n\tflags.BoolVar(&init, \"init\", false, \"initialize running connector (for debugging)\")\n\treturn c\n}\n\ntype callCtx struct {\n\tcontext.Context\n\tcaller context.Context\n}\n\nfunc (c callCtx) Deadline() (deadline time.Time, ok bool) {\n\tif dl, ok := c.Context.Deadline(); ok {\n\t\treturn dl, true\n\t}\n\treturn c.caller.Deadline()\n}\n\nfunc (c callCtx) Done() <-chan struct{} {\n\tch := make(chan struct{})\n\tgo func() {\n\t\tselect {\n\t\tcase <-c.Context.Done():\n\t\t\tclose(ch)\n\t\tcase <-c.caller.Done():\n\t\t\tclose(ch)\n\t\t}\n\t}()\n\treturn ch\n}\n\nfunc (c callCtx) Err() error {\n\terr := c.Context.Err()\n\tif err == nil {\n\t\terr = c.caller.Err()\n\t}\n\treturn err\n}\n\nfunc (c callCtx) Value(key interface{}) interface{} {\n\treturn c.Context.Value(key)\n}\n\nfunc (s *service) callGroup(c context.Context) *dgroup.Group {\n\treturn dgroup.NewGroup(&callCtx{Context: s.ctx, caller: c}, dgroup.GroupConfig{})\n}\n\nfunc callRecovery(c context.Context, r interface{}, err error) error {\n\tperr := dutil.PanicToError(r)\n\tif perr != nil {\n\t\tif err == nil {\n\t\t\terr = perr\n\t\t} else {\n\t\t\tdlog.Errorf(c, \"%+v\", perr)\n\t\t}\n\t}\n\tif err != nil {\n\t\tdlog.Errorf(c, \"%+v\", err)\n\t}\n\treturn err\n}\n\nfunc (s *service) Version(_ context.Context, _ *empty.Empty) (*version.VersionInfo, error) {\n\treturn &version.VersionInfo{\n\t\tApiVersion: client.APIVersion,\n\t\tVersion: client.Version(),\n\t}, nil\n}\n\nfunc (s *service) Status(c context.Context, _ *empty.Empty) (result *rpc.ConnectorStatus, err error) {\n\tg := s.callGroup(c)\n\tg.Go(\"Status\", func(c context.Context) (err error) {\n\t\tdefer func() { err = callRecovery(c, recover(), err) }()\n\t\tresult = s.status(c)\n\t\treturn\n\t})\n\terr = g.Wait()\n\treturn\n}\n\nfunc (s *service) Connect(c context.Context, cr *rpc.ConnectRequest) (ci *rpc.ConnectInfo, err error) {\n\tg := s.callGroup(c)\n\tg.Go(\"Connect\", func(c context.Context) (err error) {\n\t\tdefer func() { err = callRecovery(c, recover(), err) }()\n\t\tci = s.connect(c, cr)\n\t\treturn\n\t})\n\terr = g.Wait()\n\treturn\n}\n\nfunc (s *service) CreateIntercept(c context.Context, ir *manager.CreateInterceptRequest) (result *rpc.InterceptResult, err error) {\n\tie, is := s.interceptStatus()\n\tif ie != rpc.InterceptError_UNSPECIFIED {\n\t\treturn &rpc.InterceptResult{Error: ie, ErrorText: is}, nil\n\t}\n\tg := s.callGroup(c)\n\tg.Go(\"CreateIntercept\", func(c context.Context) (err error) {\n\t\tdefer func() { err = callRecovery(c, recover(), err) }()\n\t\tresult, err = s.trafficMgr.addIntercept(c, s.ctx, ir)\n\t\treturn\n\t})\n\terr = g.Wait()\n\treturn\n}\n\nfunc (s *service) RemoveIntercept(c context.Context, rr *manager.RemoveInterceptRequest2) (result *rpc.InterceptResult, err error) {\n\tie, is := s.interceptStatus()\n\tif ie != rpc.InterceptError_UNSPECIFIED {\n\t\treturn &rpc.InterceptResult{Error: ie, ErrorText: is}, nil\n\t}\n\tg := s.callGroup(c)\n\tg.Go(\"RemoveIntercept\", func(c context.Context) (err error) {\n\t\tdefer func() { err = callRecovery(c, recover(), err) }()\n\t\terr = s.trafficMgr.removeIntercept(c, rr.Name)\n\t\treturn\n\t})\n\terr = g.Wait()\n\treturn &rpc.InterceptResult{}, err\n}\n\nfunc (s *service) AvailableIntercepts(_ context.Context, _ *empty.Empty) (*manager.AgentInfoSnapshot, error) {\n\tif s.trafficMgr.grpc == nil {\n\t\treturn &manager.AgentInfoSnapshot{}, nil\n\t}\n\treturn s.trafficMgr.agentInfoSnapshot(), nil\n}\n\nfunc (s *service) ListIntercepts(_ context.Context, _ *empty.Empty) (*manager.InterceptInfoSnapshot, error) {\n\tif s.trafficMgr.grpc == nil {\n\t\treturn &manager.InterceptInfoSnapshot{}, nil\n\t}\n\treturn s.trafficMgr.interceptInfoSnapshot(), nil\n}\n\nfunc (s *service) Quit(_ context.Context, _ *empty.Empty) (*empty.Empty, error) {\n\ts.cancel()\n\treturn &empty.Empty{}, nil\n}\n\n\/\/ daemonLogger is an io.Writer implementation that sends data to the daemon logger\ntype daemonLogger struct {\n\tstream daemon.Daemon_LoggerClient\n}\n\nfunc (d *daemonLogger) Write(data []byte) (n int, err error) {\n\terr = d.stream.Send(&daemon.LogMessage{Text: data})\n\treturn len(data), err\n}\n\n\/\/ connect the connector to a cluster\nfunc (s *service) connect(c context.Context, cr *rpc.ConnectRequest) *rpc.ConnectInfo {\n\treporter := &metriton.Reporter{\n\t\tApplication: \"telepresence2\",\n\t\tVersion: client.Version(),\n\t\tGetInstallID: func(_ *metriton.Reporter) (string, error) { return cr.InstallId, nil },\n\t\tBaseMetadata: map[string]interface{}{\"mode\": \"daemon\"},\n\t}\n\n\tif _, err := reporter.Report(c, map[string]interface{}{\"action\": \"connect\"}); err != nil {\n\t\tdlog.Errorf(c, \"report failed: %+v\", err)\n\t}\n\n\t\/\/ Sanity checks\n\tr := &rpc.ConnectInfo{}\n\tif s.cluster != nil {\n\t\tr.ClusterContext = s.cluster.Context\n\t\tr.ClusterServer = s.cluster.server()\n\t\tr.Error = rpc.ConnectInfo_ALREADY_CONNECTED\n\t\treturn r\n\t}\n\tif s.bridge != nil {\n\t\tr.Error = rpc.ConnectInfo_DISCONNECTING\n\t\treturn r\n\t}\n\n\tdlog.Info(c, \"Connecting to traffic manager...\")\n\tcluster, err := trackKCluster(s.ctx, cr.Context, cr.Namespace, cr.Args)\n\tif err != nil {\n\t\tdlog.Errorf(c, \"unable to track k8s cluster: %+v\", err)\n\t\tr.Error = rpc.ConnectInfo_CLUSTER_FAILED\n\t\tr.ErrorText = err.Error()\n\t\ts.cancel()\n\t\treturn r\n\t}\n\ts.cluster = cluster\n\n\t\/*\n\t\tpreviewHost, err := cluster.getClusterPreviewHostname(p)\n\t\tif err != nil {\n\t\t\tp.Logf(\"get preview URL hostname: %+v\", err)\n\t\t\tpreviewHost = \"\"\n\t\t}\n\t*\/\n\n\tdlog.Infof(c, \"Connected to context %s (%s)\", s.cluster.Context, s.cluster.server())\n\n\tr.ClusterContext = s.cluster.Context\n\tr.ClusterServer = s.cluster.server()\n\n\ttmgr, err := newTrafficManager(s.ctx, s.cluster, cr.InstallId, cr.IsCi)\n\tif err != nil {\n\t\tdlog.Errorf(c, \"Unable to connect to TrafficManager: %s\", err)\n\t\tr.Error = rpc.ConnectInfo_TRAFFIC_MANAGER_FAILED\n\t\tr.ErrorText = err.Error()\n\t\tif cr.InterceptEnabled {\n\t\t\t\/\/ No point in continuing without a traffic manager\n\t\t\ts.cancel()\n\t\t}\n\t\treturn r\n\t}\n\n\t\/\/ tmgr.previewHost = previewHost\n\ts.trafficMgr = tmgr\n\tdlog.Infof(c, \"Starting traffic-manager bridge in context %s, namespace %s\", cluster.Context, cluster.Namespace)\n\tbr := newBridge(cluster, s.daemon, tmgr.sshPort)\n\terr = br.start(s.ctx)\n\tif err != nil {\n\t\tdlog.Errorf(c, \"Failed to start traffic-manager bridge: %s\", err.Error())\n\t\tr.Error = rpc.ConnectInfo_BRIDGE_FAILED\n\t\tr.ErrorText = err.Error()\n\t\t\/\/ No point in continuing without a bridge\n\t\ts.cancel()\n\t\treturn r\n\t}\n\ts.bridge = br\n\n\tif !cr.InterceptEnabled {\n\t\treturn r\n\t}\n\n\t\/\/ Wait for traffic manager to connect\n\tdlog.Info(c, \"Waiting for TrafficManager to connect\")\n\tif err := tmgr.waitUntilStarted(); err != nil {\n\t\tdlog.Errorf(c, \"Failed to start traffic-manager: %s\", err.Error())\n\t\tr.Error = rpc.ConnectInfo_TRAFFIC_MANAGER_FAILED\n\t\tr.ErrorText = err.Error()\n\t\t\/\/ No point in continuing without a traffic manager\n\t\ts.cancel()\n\t}\n\treturn r\n}\n\n\/\/ setUpLogging connects to the daemon logger\nfunc (s *service) setUpLogging(c context.Context) (context.Context, error) {\n\tvar err error\n\ts.daemonLogger.stream, err = s.daemon.Logger(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogger := logrus.StandardLogger()\n\tlogger.Out = &s.daemonLogger\n\tloggingToTerminal := terminal.IsTerminal(int(os.Stdout.Fd()))\n\tif loggingToTerminal {\n\t\tlogger.Formatter = client.NewFormatter(\"15:04:05\")\n\t} else {\n\t\tlogger.Formatter = client.NewFormatter(\"2006\/01\/02 15:04:05\")\n\t}\n\tlogger.Level = logrus.DebugLevel\n\treturn dlog.WithLogger(c, dlog.WrapLogrus(logger)), nil\n}\n\n\/\/ run is the main function when executing as the connector\nfunc run(init bool) error {\n\t\/\/ establish a connection to the daemon gRPC service\n\tconn, err := client.DialSocket(client.DaemonSocketName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\ts := &service{daemon: daemon.NewDaemonClient(conn), grpc: grpc.NewServer()}\n\trpc.RegisterConnectorServer(s.grpc, s)\n\n\tc, err := s.setUpLogging(context.Background())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc = dgroup.WithGoroutineName(c, \"connector\")\n\n\tvar cancel context.CancelFunc\n\tc, cancel = context.WithCancel(c)\n\ts.cancel = func() {\n\t\tdlog.Debug(s.ctx, \"cancelling connector context\")\n\t\tcancel()\n\t}\n\n\tg := dgroup.NewGroup(c, dgroup.GroupConfig{\n\t\tSoftShutdownTimeout: 2 * time.Second,\n\t\tEnableSignalHandling: true})\n\n\tdlog.Info(c, \"---\")\n\tdlog.Infof(c, \"Telepresence Connector %s starting...\", client.DisplayVersion())\n\tdlog.Infof(c, \"PID is %d\", os.Getpid())\n\tdlog.Info(c, \"\")\n\n\tsvcStarted := make(chan bool)\n\tif init {\n\t\tg.Go(\"debug-init\", func(c context.Context) error {\n\t\t\t<-svcStarted\n\t\t\t_, _ = s.Connect(c, &rpc.ConnectRequest{InstallId: \"dummy-id\"})\n\t\t\treturn nil\n\t\t})\n\t}\n\n\tg.Go(\"service\", func(c context.Context) (err error) {\n\t\tvar listener net.Listener\n\t\tdefer func() {\n\t\t\tif perr := dutil.PanicToError(recover()); perr != nil {\n\t\t\t\tif err == nil {\n\t\t\t\t\terr = perr\n\t\t\t\t}\n\t\t\t\tif listener != nil {\n\t\t\t\t\t_ = listener.Close()\n\t\t\t\t}\n\t\t\t\t_ = os.Remove(client.ConnectorSocketName)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tdlog.Errorf(c, \"Server ended with: %s\", err.Error())\n\t\t\t} else {\n\t\t\t\tdlog.Debug(c, \"Server ended\")\n\t\t\t}\n\t\t}()\n\n\t\t\/\/ Listen on unix domain socket\n\t\tdlog.Debug(c, \"Server starting\")\n\t\ts.ctx = c\n\t\tlistener, err = net.Listen(\"unix\", client.ConnectorSocketName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tclose(svcStarted)\n\t\treturn s.grpc.Serve(listener)\n\t})\n\n\tg.Go(\"teardown\", s.handleShutdown)\n\n\terr = g.Wait()\n\tif err != nil {\n\t\tdlog.Error(c, err.Error())\n\t}\n\treturn err\n}\n\n\/\/ handleShutdown ensures that the connector quits gracefully when receiving a signal\n\/\/ or when the context is cancelled.\nfunc (s *service) handleShutdown(c context.Context) error {\n\tdefer s.grpc.GracefulStop()\n\n\t<-c.Done()\n\tdlog.Info(c, \"Shutting down\")\n\n\tcluster := s.cluster\n\tif cluster == nil {\n\t\treturn nil\n\t}\n\ts.cluster = nil\n\ttrafficMgr := s.trafficMgr\n\n\ts.trafficMgr = nil\n\tif trafficMgr != nil {\n\t\t_ = trafficMgr.clearIntercepts(context.Background())\n\t\t_ = trafficMgr.Close()\n\t}\n\ts.bridge = nil\n\treturn nil\n}\n<commit_msg>Do not wait for meriton.Reporter to report<commit_after>package connector\n\nimport (\n\t\"context\"\n\t\"net\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/datawire\/ambassador\/pkg\/metriton\"\n\t\"github.com\/datawire\/dlib\/dgroup\"\n\t\"github.com\/datawire\/dlib\/dlog\"\n\t\"github.com\/datawire\/dlib\/dutil\"\n\t\"github.com\/golang\/protobuf\/ptypes\/empty\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/spf13\/cobra\"\n\t\"golang.org\/x\/crypto\/ssh\/terminal\"\n\t\"google.golang.org\/grpc\"\n\n\t\"github.com\/datawire\/telepresence2\/pkg\/client\"\n\tmanager \"github.com\/datawire\/telepresence2\/pkg\/rpc\"\n\trpc \"github.com\/datawire\/telepresence2\/pkg\/rpc\/connector\"\n\t\"github.com\/datawire\/telepresence2\/pkg\/rpc\/daemon\"\n\t\"github.com\/datawire\/telepresence2\/pkg\/rpc\/version\"\n)\n\nvar help = `The Telepresence Connect is a background component that manages a connection. It\nrequires that a daemon is already running.\n\nLaunch the Telepresence Connector:\n telepresence connect\n\nThe Connector uses the Daemon's log so its output can be found in\n ` + client.Logfile + `\nto troubleshoot problems.\n`\n\n\/\/ service represents the state of the Telepresence Connector\ntype service struct {\n\trpc.UnimplementedConnectorServer\n\tdaemon daemon.DaemonClient\n\tdaemonLogger daemonLogger\n\tcluster *k8sCluster\n\tbridge *bridge\n\ttrafficMgr *trafficManager\n\tgrpc *grpc.Server\n\tctx context.Context\n\tcancel func()\n}\n\n\/\/ Command returns the CLI sub-command for \"connector-foreground\"\nfunc Command() *cobra.Command {\n\tvar init bool\n\tc := &cobra.Command{\n\t\tUse: \"connector-foreground\",\n\t\tShort: \"Launch Telepresence Connector in the foreground (debug)\",\n\t\tArgs: cobra.ExactArgs(0),\n\t\tHidden: true,\n\t\tLong: help,\n\t\tRunE: func(_ *cobra.Command, args []string) error {\n\t\t\treturn run(init)\n\t\t},\n\t}\n\tflags := c.Flags()\n\tflags.BoolVar(&init, \"init\", false, \"initialize running connector (for debugging)\")\n\treturn c\n}\n\ntype callCtx struct {\n\tcontext.Context\n\tcaller context.Context\n}\n\nfunc (c callCtx) Deadline() (deadline time.Time, ok bool) {\n\tif dl, ok := c.Context.Deadline(); ok {\n\t\treturn dl, true\n\t}\n\treturn c.caller.Deadline()\n}\n\nfunc (c callCtx) Done() <-chan struct{} {\n\tch := make(chan struct{})\n\tgo func() {\n\t\tselect {\n\t\tcase <-c.Context.Done():\n\t\t\tclose(ch)\n\t\tcase <-c.caller.Done():\n\t\t\tclose(ch)\n\t\t}\n\t}()\n\treturn ch\n}\n\nfunc (c callCtx) Err() error {\n\terr := c.Context.Err()\n\tif err == nil {\n\t\terr = c.caller.Err()\n\t}\n\treturn err\n}\n\nfunc (c callCtx) Value(key interface{}) interface{} {\n\treturn c.Context.Value(key)\n}\n\nfunc (s *service) callGroup(c context.Context) *dgroup.Group {\n\treturn dgroup.NewGroup(&callCtx{Context: s.ctx, caller: c}, dgroup.GroupConfig{})\n}\n\nfunc callRecovery(c context.Context, r interface{}, err error) error {\n\tperr := dutil.PanicToError(r)\n\tif perr != nil {\n\t\tif err == nil {\n\t\t\terr = perr\n\t\t} else {\n\t\t\tdlog.Errorf(c, \"%+v\", perr)\n\t\t}\n\t}\n\tif err != nil {\n\t\tdlog.Errorf(c, \"%+v\", err)\n\t}\n\treturn err\n}\n\nfunc (s *service) Version(_ context.Context, _ *empty.Empty) (*version.VersionInfo, error) {\n\treturn &version.VersionInfo{\n\t\tApiVersion: client.APIVersion,\n\t\tVersion: client.Version(),\n\t}, nil\n}\n\nfunc (s *service) Status(c context.Context, _ *empty.Empty) (result *rpc.ConnectorStatus, err error) {\n\tg := s.callGroup(c)\n\tg.Go(\"Status\", func(c context.Context) (err error) {\n\t\tdefer func() { err = callRecovery(c, recover(), err) }()\n\t\tresult = s.status(c)\n\t\treturn\n\t})\n\terr = g.Wait()\n\treturn\n}\n\nfunc (s *service) Connect(c context.Context, cr *rpc.ConnectRequest) (ci *rpc.ConnectInfo, err error) {\n\tg := s.callGroup(c)\n\tg.Go(\"Connect\", func(c context.Context) (err error) {\n\t\tdefer func() { err = callRecovery(c, recover(), err) }()\n\t\tci = s.connect(c, cr)\n\t\treturn\n\t})\n\terr = g.Wait()\n\treturn\n}\n\nfunc (s *service) CreateIntercept(c context.Context, ir *manager.CreateInterceptRequest) (result *rpc.InterceptResult, err error) {\n\tie, is := s.interceptStatus()\n\tif ie != rpc.InterceptError_UNSPECIFIED {\n\t\treturn &rpc.InterceptResult{Error: ie, ErrorText: is}, nil\n\t}\n\tg := s.callGroup(c)\n\tg.Go(\"CreateIntercept\", func(c context.Context) (err error) {\n\t\tdefer func() { err = callRecovery(c, recover(), err) }()\n\t\tresult, err = s.trafficMgr.addIntercept(c, s.ctx, ir)\n\t\treturn\n\t})\n\terr = g.Wait()\n\treturn\n}\n\nfunc (s *service) RemoveIntercept(c context.Context, rr *manager.RemoveInterceptRequest2) (result *rpc.InterceptResult, err error) {\n\tie, is := s.interceptStatus()\n\tif ie != rpc.InterceptError_UNSPECIFIED {\n\t\treturn &rpc.InterceptResult{Error: ie, ErrorText: is}, nil\n\t}\n\tg := s.callGroup(c)\n\tg.Go(\"RemoveIntercept\", func(c context.Context) (err error) {\n\t\tdefer func() { err = callRecovery(c, recover(), err) }()\n\t\terr = s.trafficMgr.removeIntercept(c, rr.Name)\n\t\treturn\n\t})\n\terr = g.Wait()\n\treturn &rpc.InterceptResult{}, err\n}\n\nfunc (s *service) AvailableIntercepts(_ context.Context, _ *empty.Empty) (*manager.AgentInfoSnapshot, error) {\n\tif s.trafficMgr.grpc == nil {\n\t\treturn &manager.AgentInfoSnapshot{}, nil\n\t}\n\treturn s.trafficMgr.agentInfoSnapshot(), nil\n}\n\nfunc (s *service) ListIntercepts(_ context.Context, _ *empty.Empty) (*manager.InterceptInfoSnapshot, error) {\n\tif s.trafficMgr.grpc == nil {\n\t\treturn &manager.InterceptInfoSnapshot{}, nil\n\t}\n\treturn s.trafficMgr.interceptInfoSnapshot(), nil\n}\n\nfunc (s *service) Quit(_ context.Context, _ *empty.Empty) (*empty.Empty, error) {\n\ts.cancel()\n\treturn &empty.Empty{}, nil\n}\n\n\/\/ daemonLogger is an io.Writer implementation that sends data to the daemon logger\ntype daemonLogger struct {\n\tstream daemon.Daemon_LoggerClient\n}\n\nfunc (d *daemonLogger) Write(data []byte) (n int, err error) {\n\terr = d.stream.Send(&daemon.LogMessage{Text: data})\n\treturn len(data), err\n}\n\n\/\/ connect the connector to a cluster\nfunc (s *service) connect(c context.Context, cr *rpc.ConnectRequest) *rpc.ConnectInfo {\n\tgo func() {\n\t\treporter := &metriton.Reporter{\n\t\t\tApplication: \"telepresence2\",\n\t\t\tVersion: client.Version(),\n\t\t\tGetInstallID: func(_ *metriton.Reporter) (string, error) { return cr.InstallId, nil },\n\t\t\tBaseMetadata: map[string]interface{}{\"mode\": \"daemon\"},\n\t\t}\n\n\t\tif _, err := reporter.Report(c, map[string]interface{}{\"action\": \"connect\"}); err != nil {\n\t\t\tdlog.Errorf(c, \"report failed: %+v\", err)\n\t\t}\n\t}()\n\n\t\/\/ Sanity checks\n\tr := &rpc.ConnectInfo{}\n\tif s.cluster != nil {\n\t\tr.ClusterContext = s.cluster.Context\n\t\tr.ClusterServer = s.cluster.server()\n\t\tr.Error = rpc.ConnectInfo_ALREADY_CONNECTED\n\t\treturn r\n\t}\n\tif s.bridge != nil {\n\t\tr.Error = rpc.ConnectInfo_DISCONNECTING\n\t\treturn r\n\t}\n\n\tdlog.Info(c, \"Connecting to traffic manager...\")\n\tcluster, err := trackKCluster(s.ctx, cr.Context, cr.Namespace, cr.Args)\n\tif err != nil {\n\t\tdlog.Errorf(c, \"unable to track k8s cluster: %+v\", err)\n\t\tr.Error = rpc.ConnectInfo_CLUSTER_FAILED\n\t\tr.ErrorText = err.Error()\n\t\ts.cancel()\n\t\treturn r\n\t}\n\ts.cluster = cluster\n\n\t\/*\n\t\tpreviewHost, err := cluster.getClusterPreviewHostname(p)\n\t\tif err != nil {\n\t\t\tp.Logf(\"get preview URL hostname: %+v\", err)\n\t\t\tpreviewHost = \"\"\n\t\t}\n\t*\/\n\n\tdlog.Infof(c, \"Connected to context %s (%s)\", s.cluster.Context, s.cluster.server())\n\n\tr.ClusterContext = s.cluster.Context\n\tr.ClusterServer = s.cluster.server()\n\n\ttmgr, err := newTrafficManager(s.ctx, s.cluster, cr.InstallId, cr.IsCi)\n\tif err != nil {\n\t\tdlog.Errorf(c, \"Unable to connect to TrafficManager: %s\", err)\n\t\tr.Error = rpc.ConnectInfo_TRAFFIC_MANAGER_FAILED\n\t\tr.ErrorText = err.Error()\n\t\tif cr.InterceptEnabled {\n\t\t\t\/\/ No point in continuing without a traffic manager\n\t\t\ts.cancel()\n\t\t}\n\t\treturn r\n\t}\n\n\t\/\/ tmgr.previewHost = previewHost\n\ts.trafficMgr = tmgr\n\tdlog.Infof(c, \"Starting traffic-manager bridge in context %s, namespace %s\", cluster.Context, cluster.Namespace)\n\tbr := newBridge(cluster, s.daemon, tmgr.sshPort)\n\terr = br.start(s.ctx)\n\tif err != nil {\n\t\tdlog.Errorf(c, \"Failed to start traffic-manager bridge: %s\", err.Error())\n\t\tr.Error = rpc.ConnectInfo_BRIDGE_FAILED\n\t\tr.ErrorText = err.Error()\n\t\t\/\/ No point in continuing without a bridge\n\t\ts.cancel()\n\t\treturn r\n\t}\n\ts.bridge = br\n\n\tif !cr.InterceptEnabled {\n\t\treturn r\n\t}\n\n\t\/\/ Wait for traffic manager to connect\n\tdlog.Info(c, \"Waiting for TrafficManager to connect\")\n\tif err := tmgr.waitUntilStarted(); err != nil {\n\t\tdlog.Errorf(c, \"Failed to start traffic-manager: %s\", err.Error())\n\t\tr.Error = rpc.ConnectInfo_TRAFFIC_MANAGER_FAILED\n\t\tr.ErrorText = err.Error()\n\t\t\/\/ No point in continuing without a traffic manager\n\t\ts.cancel()\n\t}\n\treturn r\n}\n\n\/\/ setUpLogging connects to the daemon logger\nfunc (s *service) setUpLogging(c context.Context) (context.Context, error) {\n\tvar err error\n\ts.daemonLogger.stream, err = s.daemon.Logger(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogger := logrus.StandardLogger()\n\tlogger.Out = &s.daemonLogger\n\tloggingToTerminal := terminal.IsTerminal(int(os.Stdout.Fd()))\n\tif loggingToTerminal {\n\t\tlogger.Formatter = client.NewFormatter(\"15:04:05\")\n\t} else {\n\t\tlogger.Formatter = client.NewFormatter(\"2006\/01\/02 15:04:05\")\n\t}\n\tlogger.Level = logrus.DebugLevel\n\treturn dlog.WithLogger(c, dlog.WrapLogrus(logger)), nil\n}\n\n\/\/ run is the main function when executing as the connector\nfunc run(init bool) error {\n\t\/\/ establish a connection to the daemon gRPC service\n\tconn, err := client.DialSocket(client.DaemonSocketName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\ts := &service{daemon: daemon.NewDaemonClient(conn), grpc: grpc.NewServer()}\n\trpc.RegisterConnectorServer(s.grpc, s)\n\n\tc, err := s.setUpLogging(context.Background())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc = dgroup.WithGoroutineName(c, \"connector\")\n\n\tvar cancel context.CancelFunc\n\tc, cancel = context.WithCancel(c)\n\ts.cancel = func() {\n\t\tdlog.Debug(s.ctx, \"cancelling connector context\")\n\t\tcancel()\n\t}\n\n\tg := dgroup.NewGroup(c, dgroup.GroupConfig{\n\t\tSoftShutdownTimeout: 2 * time.Second,\n\t\tEnableSignalHandling: true})\n\n\tdlog.Info(c, \"---\")\n\tdlog.Infof(c, \"Telepresence Connector %s starting...\", client.DisplayVersion())\n\tdlog.Infof(c, \"PID is %d\", os.Getpid())\n\tdlog.Info(c, \"\")\n\n\tsvcStarted := make(chan bool)\n\tif init {\n\t\tg.Go(\"debug-init\", func(c context.Context) error {\n\t\t\t<-svcStarted\n\t\t\t_, _ = s.Connect(c, &rpc.ConnectRequest{InstallId: \"dummy-id\"})\n\t\t\treturn nil\n\t\t})\n\t}\n\n\tg.Go(\"service\", func(c context.Context) (err error) {\n\t\tvar listener net.Listener\n\t\tdefer func() {\n\t\t\tif perr := dutil.PanicToError(recover()); perr != nil {\n\t\t\t\tif err == nil {\n\t\t\t\t\terr = perr\n\t\t\t\t}\n\t\t\t\tif listener != nil {\n\t\t\t\t\t_ = listener.Close()\n\t\t\t\t}\n\t\t\t\t_ = os.Remove(client.ConnectorSocketName)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tdlog.Errorf(c, \"Server ended with: %s\", err.Error())\n\t\t\t} else {\n\t\t\t\tdlog.Debug(c, \"Server ended\")\n\t\t\t}\n\t\t}()\n\n\t\t\/\/ Listen on unix domain socket\n\t\tdlog.Debug(c, \"Server starting\")\n\t\ts.ctx = c\n\t\tlistener, err = net.Listen(\"unix\", client.ConnectorSocketName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tclose(svcStarted)\n\t\treturn s.grpc.Serve(listener)\n\t})\n\n\tg.Go(\"teardown\", s.handleShutdown)\n\n\terr = g.Wait()\n\tif err != nil {\n\t\tdlog.Error(c, err.Error())\n\t}\n\treturn err\n}\n\n\/\/ handleShutdown ensures that the connector quits gracefully when receiving a signal\n\/\/ or when the context is cancelled.\nfunc (s *service) handleShutdown(c context.Context) error {\n\tdefer s.grpc.GracefulStop()\n\n\t<-c.Done()\n\tdlog.Info(c, \"Shutting down\")\n\n\tcluster := s.cluster\n\tif cluster == nil {\n\t\treturn nil\n\t}\n\ts.cluster = nil\n\ttrafficMgr := s.trafficMgr\n\n\ts.trafficMgr = nil\n\tif trafficMgr != nil {\n\t\t_ = trafficMgr.clearIntercepts(context.Background())\n\t\t_ = trafficMgr.Close()\n\t}\n\ts.bridge = nil\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package demoinfocs_test\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"testing\"\n\n\tdemoinfocs \"github.com\/markus-wa\/demoinfocs-golang\/v2\/pkg\/demoinfocs\"\n\tevents \"github.com\/markus-wa\/demoinfocs-golang\/v2\/pkg\/demoinfocs\/events\"\n)\n\n\/*\nThis will print all kills of a demo in the format '[[killer]] <[[weapon]] [(HS)] [(WB)]> [[victim]]'\n*\/\n\/\/noinspection GoUnhandledErrorResult\nfunc ExampleParser() {\n\tf, err := os.Open(\"..\/..\/test\/cs-demos\/default.demx\")\n\tif err != nil {\n\t\tlog.Panic(\"failed to open demo file: \", err)\n\t}\n\n\tdefer f.Close()\n\n\tp := demoinfocs.NewParser(f)\n\tdefer p.Close()\n\n\t\/\/ Register handler on kill events\n\tp.RegisterEventHandler(func(e events.Kill) {\n\t\tvar hs string\n\t\tif e.IsHeadshot {\n\t\t\ths = \" (HS)\"\n\t\t}\n\n\t\tvar wallBang string\n\t\tif e.PenetratedObjects > 0 {\n\t\t\twallBang = \" (WB)\"\n\t\t}\n\n\t\tfmt.Printf(\"%s <%v%s%s> %s\\n\", e.Killer, e.Weapon, hs, wallBang, e.Victim)\n\t})\n\n\t\/\/ Parse to end\n\terr = p.ParseToEnd()\n\tif err != nil {\n\t\tlog.Panic(\"failed to parse demo: \", err)\n\t}\n}\n\nfunc TestExamplesWithoutOutput(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping long running test\")\n\t}\n\n\tExampleParser()\n}\n<commit_msg>fix error in examples_test.go<commit_after>package demoinfocs_test\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"testing\"\n\n\tdemoinfocs \"github.com\/markus-wa\/demoinfocs-golang\/v2\/pkg\/demoinfocs\"\n\tevents \"github.com\/markus-wa\/demoinfocs-golang\/v2\/pkg\/demoinfocs\/events\"\n)\n\n\/*\nThis will print all kills of a demo in the format '[[killer]] <[[weapon]] [(HS)] [(WB)]> [[victim]]'\n*\/\n\/\/noinspection GoUnhandledErrorResult\nfunc ExampleParser() {\n\tf, err := os.Open(\"..\/..\/test\/cs-demos\/default.dem\")\n\tif err != nil {\n\t\tlog.Panic(\"failed to open demo file: \", err)\n\t}\n\n\tdefer f.Close()\n\n\tp := demoinfocs.NewParser(f)\n\tdefer p.Close()\n\n\t\/\/ Register handler on kill events\n\tp.RegisterEventHandler(func(e events.Kill) {\n\t\tvar hs string\n\t\tif e.IsHeadshot {\n\t\t\ths = \" (HS)\"\n\t\t}\n\n\t\tvar wallBang string\n\t\tif e.PenetratedObjects > 0 {\n\t\t\twallBang = \" (WB)\"\n\t\t}\n\n\t\tfmt.Printf(\"%s <%v%s%s> %s\\n\", e.Killer, e.Weapon, hs, wallBang, e.Victim)\n\t})\n\n\t\/\/ Parse to end\n\terr = p.ParseToEnd()\n\tif err != nil {\n\t\tlog.Panic(\"failed to parse demo: \", err)\n\t}\n}\n\nfunc TestExamplesWithoutOutput(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping long running test\")\n\t}\n\n\tExampleParser()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The OpenSDS Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/*\nThis module implements the entry into operations of storageDock module.\n\n*\/\n\npackage discovery\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com\/golang\/glog\"\n\t\"github.com\/opensds\/opensds\/contrib\/connector\"\n\t\"github.com\/opensds\/opensds\/contrib\/drivers\"\n\tfd \"github.com\/opensds\/opensds\/contrib\/drivers\/filesharedrivers\"\n\t\"github.com\/opensds\/opensds\/contrib\/drivers\/utils\/config\"\n\tc \"github.com\/opensds\/opensds\/pkg\/context\"\n\t\"github.com\/opensds\/opensds\/pkg\/db\"\n\t\"github.com\/opensds\/opensds\/pkg\/model\"\n\t\"github.com\/opensds\/opensds\/pkg\/utils\"\n\t. \"github.com\/opensds\/opensds\/pkg\/utils\/config\"\n\tuuid \"github.com\/satori\/go.uuid\"\n)\n\nconst (\n\tavailableStatus = \"available\"\n\tunavailableStatus = \"unavailable\"\n)\n\ntype Context struct {\n\tStopChan chan bool\n\tErrChan chan error\n\tMetaChan chan string\n}\n\nfunc DiscoveryAndReport(dd DockDiscoverer, ctx *Context) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.StopChan:\n\t\t\treturn\n\t\tdefault:\n\t\t\tif err := dd.Discover(); err != nil {\n\t\t\t\tctx.ErrChan <- err\n\t\t\t}\n\n\t\t\tif err := dd.Report(); err != nil {\n\t\t\t\tctx.ErrChan <- err\n\t\t\t}\n\t\t}\n\n\t\ttime.Sleep(60 * time.Second)\n\t}\n}\n\ntype DockDiscoverer interface {\n\tInit() error\n\n\tDiscover() error\n\n\tReport() error\n}\n\n\/\/ NewDockDiscoverer method creates a new DockDiscoverer.\nfunc NewDockDiscoverer(dockType string) DockDiscoverer {\n\tswitch dockType {\n\tcase model.DockTypeProvioner:\n\t\treturn &provisionDockDiscoverer{\n\t\t\tDockRegister: NewDockRegister(),\n\t\t}\n\tcase model.DockTypeAttacher:\n\t\treturn &attachDockDiscoverer{\n\t\t\tDockRegister: NewDockRegister(),\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ provisionDockDiscoverer is a struct for exposing some operations of provision\n\/\/ dock service discovery.\ntype provisionDockDiscoverer struct {\n\t*DockRegister\n\n\tdcks []*model.DockSpec\n\tpols []*model.StoragePoolSpec\n}\n\nfunc (pdd *provisionDockDiscoverer) Init() error {\n\t\/\/ Load resource from specified file\n\tbm := GetBackendsMap()\n\thost, err := os.Hostname()\n\tif err != nil {\n\t\tlog.Error(\"When get os hostname:\", err)\n\t\treturn err\n\t}\n\n\tfor _, v := range CONF.EnabledBackends {\n\t\tb := bm[v]\n\t\tif b.Name == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tdck := &model.DockSpec{\n\t\t\tBaseModel: &model.BaseModel{\n\t\t\t\tId: uuid.NewV5(uuid.NamespaceOID, host+\":\"+b.DriverName).String(),\n\t\t\t},\n\t\t\tName: b.Name,\n\t\t\tDescription: b.Description,\n\t\t\tDriverName: b.DriverName,\n\t\t\tEndpoint: CONF.OsdsDock.ApiEndpoint,\n\t\t\tNodeId: host,\n\t\t\tType: model.DockTypeProvioner,\n\t\t\tMetadata: map[string]string{\"HostReplicationDriver\": CONF.OsdsDock.HostBasedReplicationDriver},\n\t\t}\n\t\t\/\/ Update the id if the dock is already in etcd\n\t\tname := map[string][]string{\n\t\t\t\"Name\": {dck.Name},\n\t\t}\n\t\tdocks, err := pdd.DockRegister.c.ListDocksWithFilter(c.NewAdminContext(), name)\n\t\tif err == nil && len(docks) != 0 {\n\t\t\tdck.Id = docks[0].Id\n\t\t}\n\t\tpdd.dcks = append(pdd.dcks, dck)\n\t}\n\n\treturn nil\n}\n\nvar filesharedrivers = []string{config.NFSDriverType, config.HuaweiOceanStorFileDriverType, config.ManilaDriverType, config.ChubaofsDriverType}\n\nfunc (pdd *provisionDockDiscoverer) Discover() error {\n\t\/\/ Clear existing pool info\n\tpdd.pols = pdd.pols[:0]\n\tvar pols []*model.StoragePoolSpec\n\tvar err error\n\tvar polsInDb []*model.StoragePoolSpec\n\tctx := c.NewAdminContext()\n\tpolsInDb, err = pdd.c.ListPools(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can not read pools in db\")\n\t}\n\tdbPolsMap := make(map[string]map[string]*model.StoragePoolSpec)\n\tfor _, dck := range pdd.dcks {\n\t\tdbPolsMap[dck.Id] = make(map[string]*model.StoragePoolSpec)\n\t}\n\tfor _, polInDb := range polsInDb {\n\t\tif dbPolsMap[polInDb.DockId] != nil {\n\t\t\tpolInDb.Status = unavailableStatus\n\t\t\tdbPolsMap[polInDb.DockId][polInDb.Id] = polInDb\n\t\t}\n\t}\n\tfor _, dck := range pdd.dcks {\n\t\t\/\/ Call function of StorageDrivers configured by storage drivers.\n\t\tif utils.Contains(filesharedrivers, dck.DriverName) {\n\t\t\td := fd.Init(dck.DriverName)\n\t\t\tdefer fd.Clean(d)\n\t\t\tpols, err = d.ListPools()\n\t\t\tfor _, pol := range pols {\n\t\t\t\tlog.Infof(\"Backend %s discovered pool %s\", dck.DriverName, pol.Name)\n\t\t\t\tdelete(dbPolsMap[dck.Id], pol.Id)\n\t\t\t\tpol.DockId = dck.Id\n\t\t\t\tpol.Status = availableStatus\n\t\t\t}\n\t\t} else {\n\t\t\td := drivers.Init(dck.DriverName)\n\t\t\tdefer drivers.Clean(d)\n\t\t\tpols, err = d.ListPools()\n\n\t\t\treplicationDriverName := dck.Metadata[\"HostReplicationDriver\"]\n\t\t\treplicationType := model.ReplicationTypeHost\n\t\t\tif drivers.IsSupportArrayBasedReplication(dck.DriverName) {\n\t\t\t\treplicationType = model.ReplicationTypeArray\n\t\t\t\treplicationDriverName = dck.DriverName\n\t\t\t}\n\t\t\tfor _, pol := range pols {\n\t\t\t\tlog.Infof(\"Backend %s discovered pool %s\", dck.DriverName, pol.Name)\n\t\t\t\tdelete(dbPolsMap[dck.Id], pol.Id)\n\t\t\t\tpol.DockId = dck.Id\n\t\t\t\tpol.ReplicationType = replicationType\n\t\t\t\tpol.ReplicationDriverName = replicationDriverName\n\t\t\t\tpol.Status = availableStatus\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Error(\"Call driver to list pools failed:\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(pols) == 0 {\n\t\t\tlog.Warningf(\"The pool of dock %s is empty!\\n\", dck.Id)\n\t\t}\n\n\t\tpdd.pols = append(pdd.pols, pols...)\n\t\tfor _, pol := range dbPolsMap[dck.Id] {\n\t\t\tpdd.pols = append(pdd.pols, pol)\n\t\t}\n\n\t}\n\tif len(pdd.pols) == 0 {\n\t\treturn fmt.Errorf(\"there is no pool can be found\")\n\t}\n\n\treturn nil\n}\n\nfunc (pdd *provisionDockDiscoverer) Report() error {\n\tvar err error\n\n\t\/\/ Store dock resources in database.\n\tfor _, dck := range pdd.dcks {\n\t\tif err = pdd.Register(dck); err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ Store pool resources in database.\n\tfor _, pol := range pdd.pols {\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\terr = pdd.Register(pol)\n\t}\n\n\treturn err\n}\n\n\/\/ attachDockDiscoverer is a struct for exposing some operations of attach\n\/\/ dock service discovery.\ntype attachDockDiscoverer struct {\n\t*DockRegister\n\n\tdck *model.DockSpec\n}\n\nfunc (add *attachDockDiscoverer) Init() error { return nil }\n\nfunc (add *attachDockDiscoverer) Discover() error {\n\thost, err := os.Hostname()\n\tif err != nil {\n\t\tlog.Error(\"When get os hostname:\", err)\n\t\treturn err\n\t}\n\n\tlocalIqn, err := connector.NewConnector(connector.IscsiDriver).GetInitiatorInfo()\n\tif err != nil {\n\t\tlog.Warning(\"get initiator failed, \", err)\n\t}\n\n\tbindIp := CONF.BindIp\n\tif bindIp == \"\" {\n\t\tbindIp = connector.GetHostIP()\n\t}\n\n\tfcInitiator, err := connector.NewConnector(connector.FcDriver).GetInitiatorInfo()\n\tif err != nil {\n\t\tlog.Warning(\"get initiator failed, \", err)\n\t}\n\n\tvar wwpns []string\n\tfor _, v := range strings.Split(fcInitiator, \",\") {\n\t\tif strings.Contains(v, \"node_name\") {\n\t\t\twwpns = append(wwpns, strings.Split(v, \":\")[1])\n\t\t}\n\t}\n\n\tsegments := strings.Split(CONF.OsdsDock.ApiEndpoint, \":\")\n\tendpointIp := segments[len(segments)-2]\n\tadd.dck = &model.DockSpec{\n\t\tBaseModel: &model.BaseModel{\n\t\t\tId: uuid.NewV5(uuid.NamespaceOID, host+\":\"+endpointIp).String(),\n\t\t},\n\t\tEndpoint: CONF.OsdsDock.ApiEndpoint,\n\t\tNodeId: host,\n\t\tType: model.DockTypeAttacher,\n\t\tMetadata: map[string]string{\n\t\t\t\"Platform\": runtime.GOARCH,\n\t\t\t\"OsType\": runtime.GOOS,\n\t\t\t\"HostIp\": bindIp,\n\t\t\t\"Initiator\": localIqn,\n\t\t\t\"WWPNS\": strings.Join(wwpns, \",\"),\n\t\t},\n\t}\n\n\treturn nil\n}\n\nfunc (add *attachDockDiscoverer) Report() error {\n\treturn add.Register(add.dck)\n}\n\nfunc NewDockRegister() *DockRegister {\n\treturn &DockRegister{c: db.C}\n}\n\ntype DockRegister struct {\n\tc db.Client\n}\n\nfunc (dr *DockRegister) Register(in interface{}) error {\n\tctx := c.NewAdminContext()\n\n\tswitch in.(type) {\n\tcase *model.DockSpec:\n\t\tdck := in.(*model.DockSpec)\n\t\t\/\/ Call db module to create dock resource with latest info.\n\t\tif _, err := dr.c.CreateDock(ctx, dck); err != nil {\n\t\t\tlog.Errorf(\"When create dock %s in db: %v\\n\", dck.Id, err)\n\t\t\treturn err\n\t\t}\n\n\t\tbreak\n\tcase *model.StoragePoolSpec:\n\t\tpol := in.(*model.StoragePoolSpec)\n\t\t\/\/ Call db module to create pool resource.\n\t\tname := map[string][]string{\n\t\t\t\"Name\": {pol.Name},\n\t\t\t\"dockId\": {pol.DockId},\n\t\t}\n\t\tpools, err := dr.c.ListPoolsWithFilter(ctx, name)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif len(pools) != 0 {\n\t\t\tpol.Id = pools[0].Id\n\t\t}\n\t\tif _, err := dr.c.CreatePool(ctx, pol); err != nil {\n\t\t\tlog.Errorf(\"When create pool %s in db: %v\\n\", pol.Id, err)\n\t\t\treturn err\n\t\t}\n\t\tbreak\n\tdefault:\n\t\treturn fmt.Errorf(\"Resource type is not supported!\")\n\t}\n\n\treturn nil\n}\n\nfunc (dr *DockRegister) Unregister(in interface{}) error {\n\tctx := c.NewAdminContext()\n\n\tswitch in.(type) {\n\tcase *model.DockSpec:\n\t\tdck := in.(*model.DockSpec)\n\t\t\/\/ Call db module to delete dock resource.\n\t\tif err := dr.c.DeleteDock(ctx, dck.Id); err != nil {\n\t\t\tlog.Errorf(\"When delete dock %s in db: %v\\n\", dck.Id, err)\n\t\t\treturn err\n\t\t}\n\t\tbreak\n\tcase *model.StoragePoolSpec:\n\t\tpol := in.(*model.StoragePoolSpec)\n\t\t\/\/ Call db module to delete pool resource.\n\t\tif err := dr.c.DeletePool(ctx, pol.Id); err != nil {\n\t\t\tlog.Errorf(\"When delete pool %s in db: %v\\n\", pol.Id, err)\n\t\t\treturn err\n\t\t}\n\t\tbreak\n\tdefault:\n\t\treturn fmt.Errorf(\"Resource type is not supported!\")\n\t}\n\n\treturn nil\n}\n<commit_msg>Code improvement<commit_after>\/\/ Copyright 2017 The OpenSDS Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/*\nThis module implements the entry into operations of storageDock module.\n\n*\/\n\npackage discovery\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com\/golang\/glog\"\n\t\"github.com\/opensds\/opensds\/contrib\/connector\"\n\t\"github.com\/opensds\/opensds\/contrib\/drivers\"\n\tfd \"github.com\/opensds\/opensds\/contrib\/drivers\/filesharedrivers\"\n\t\"github.com\/opensds\/opensds\/contrib\/drivers\/utils\/config\"\n\tc \"github.com\/opensds\/opensds\/pkg\/context\"\n\t\"github.com\/opensds\/opensds\/pkg\/db\"\n\t\"github.com\/opensds\/opensds\/pkg\/model\"\n\t\"github.com\/opensds\/opensds\/pkg\/utils\"\n\t. \"github.com\/opensds\/opensds\/pkg\/utils\/config\"\n\tuuid \"github.com\/satori\/go.uuid\"\n)\n\nconst (\n\tavailableStatus = \"available\"\n\tunavailableStatus = \"unavailable\"\n)\n\ntype Context struct {\n\tStopChan chan bool\n\tErrChan chan error\n\tMetaChan chan string\n}\n\nfunc DiscoveryAndReport(dd DockDiscoverer, ctx *Context) {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.StopChan:\n\t\t\treturn\n\t\tdefault:\n\t\t\tif err := dd.Discover(); err != nil {\n\t\t\t\tctx.ErrChan <- err\n\t\t\t}\n\n\t\t\tif err := dd.Report(); err != nil {\n\t\t\t\tctx.ErrChan <- err\n\t\t\t}\n\t\t}\n\n\t\ttime.Sleep(60 * time.Second)\n\t}\n}\n\ntype DockDiscoverer interface {\n\tInit() error\n\n\tDiscover() error\n\n\tReport() error\n}\n\n\/\/ NewDockDiscoverer method creates a new DockDiscoverer.\nfunc NewDockDiscoverer(dockType string) DockDiscoverer {\n\tswitch dockType {\n\tcase model.DockTypeProvioner:\n\t\treturn &provisionDockDiscoverer{\n\t\t\tDockRegister: NewDockRegister(),\n\t\t}\n\tcase model.DockTypeAttacher:\n\t\treturn &attachDockDiscoverer{\n\t\t\tDockRegister: NewDockRegister(),\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ provisionDockDiscoverer is a struct for exposing some operations of provision\n\/\/ dock service discovery.\ntype provisionDockDiscoverer struct {\n\t*DockRegister\n\n\tdcks []*model.DockSpec\n\tpols []*model.StoragePoolSpec\n}\n\nfunc (pdd *provisionDockDiscoverer) Init() error {\n\t\/\/ Load resource from specified file\n\tbm := GetBackendsMap()\n\thost, err := os.Hostname()\n\tif err != nil {\n\t\tlog.Error(\"When get os hostname:\", err)\n\t\treturn err\n\t}\n\n\tfor _, v := range CONF.EnabledBackends {\n\t\tb := bm[v]\n\t\tif b.Name == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tdck := &model.DockSpec{\n\t\t\tBaseModel: &model.BaseModel{\n\t\t\t\tId: uuid.NewV5(uuid.NamespaceOID, host+\":\"+b.DriverName).String(),\n\t\t\t},\n\t\t\tName: b.Name,\n\t\t\tDescription: b.Description,\n\t\t\tDriverName: b.DriverName,\n\t\t\tEndpoint: CONF.OsdsDock.ApiEndpoint,\n\t\t\tNodeId: host,\n\t\t\tType: model.DockTypeProvioner,\n\t\t\tMetadata: map[string]string{\"HostReplicationDriver\": CONF.OsdsDock.HostBasedReplicationDriver},\n\t\t}\n\t\t\/\/ Update the id if the dock is already in etcd\n\t\tname := map[string][]string{\n\t\t\t\"Name\": {dck.Name},\n\t\t}\n\t\tdocks, err := pdd.DockRegister.c.ListDocksWithFilter(c.NewAdminContext(), name)\n\t\tif err == nil && len(docks) != 0 {\n\t\t\tdck.Id = docks[0].Id\n\t\t}\n\t\tpdd.dcks = append(pdd.dcks, dck)\n\t}\n\n\treturn nil\n}\n\nvar filesharedrivers = []string{config.NFSDriverType, config.HuaweiOceanStorFileDriverType, config.ManilaDriverType, config.ChubaofsDriverType}\n\nfunc (pdd *provisionDockDiscoverer) Discover() error {\n\t\/\/ Clear existing pool info\n\tpdd.pols = pdd.pols[:0]\n\tvar pols []*model.StoragePoolSpec\n\tvar err error\n\tvar polsInDb []*model.StoragePoolSpec\n\tctx := c.NewAdminContext()\n\tpolsInDb, err = pdd.c.ListPools(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"can not read pools in db\")\n\t}\n\tdbPolsMap := make(map[string]map[string]*model.StoragePoolSpec)\n\tfor _, dck := range pdd.dcks {\n\t\tdbPolsMap[dck.Id] = make(map[string]*model.StoragePoolSpec)\n\t}\n\tfor _, polInDb := range polsInDb {\n\t\tif dbPolsMap[polInDb.DockId] != nil {\n\t\t\tpolInDb.Status = unavailableStatus\n\t\t\tdbPolsMap[polInDb.DockId][polInDb.Id] = polInDb\n\t\t}\n\t}\n\tfor _, dck := range pdd.dcks {\n\t\t\/\/ Call function of StorageDrivers configured by storage drivers.\n\t\tif utils.Contains(filesharedrivers, dck.DriverName) {\n\t\t\td := fd.Init(dck.DriverName)\n\t\t\tdefer fd.Clean(d)\n\t\t\tpols, err = d.ListPools()\n\t\t\tfor _, pol := range pols {\n\t\t\t\tlog.Infof(\"Backend %s discovered pool %s\", dck.DriverName, pol.Name)\n\t\t\t\tdelete(dbPolsMap[dck.Id], pol.Id)\n\t\t\t\tpol.DockId = dck.Id\n\t\t\t\tpol.Status = availableStatus\n\t\t\t}\n\t\t} else {\n\t\t\td := drivers.Init(dck.DriverName)\n\t\t\tdefer drivers.Clean(d)\n\t\t\tpols, err = d.ListPools()\n\n\t\t\treplicationDriverName := dck.Metadata[\"HostReplicationDriver\"]\n\t\t\treplicationType := model.ReplicationTypeHost\n\t\t\tif drivers.IsSupportArrayBasedReplication(dck.DriverName) {\n\t\t\t\treplicationType = model.ReplicationTypeArray\n\t\t\t\treplicationDriverName = dck.DriverName\n\t\t\t}\n\t\t\tfor _, pol := range pols {\n\t\t\t\tlog.Infof(\"Backend %s discovered pool %s\", dck.DriverName, pol.Name)\n\t\t\t\tname := map[string][]string{\n\t\t\t\t\t\"Name\": {pol.Name},\n\t\t\t\t\t\"dockId\": {pol.DockId},\n\t\t\t\t}\n\t\t\t\tpools, err := pdd.c.ListPoolsWithFilter(ctx, name)\n\t\t\t\tif err == nil && len(pools) != 0 {\n\t\t\t\t\tlog.Errorf(\"name is %s, id is %s, not found\\n\", pol.Name, pol.DockId)\n\t\t\t\t\tpol.Id = pools[0].Id\n\t\t\t\t}\n\n\t\t\t\tdelete(dbPolsMap[dck.Id], pol.Id)\n\t\t\t\tpol.DockId = dck.Id\n\t\t\t\tpol.ReplicationType = replicationType\n\t\t\t\tpol.ReplicationDriverName = replicationDriverName\n\t\t\t\tpol.Status = availableStatus\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Error(\"Call driver to list pools failed:\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(pols) == 0 {\n\t\t\tlog.Warningf(\"The pool of dock %s is empty!\\n\", dck.Id)\n\t\t}\n\n\t\tpdd.pols = append(pdd.pols, pols...)\n\t\tfor _, pol := range dbPolsMap[dck.Id] {\n\t\t\tpdd.pols = append(pdd.pols, pol)\n\t\t}\n\n\t}\n\tif len(pdd.pols) == 0 {\n\t\treturn fmt.Errorf(\"there is no pool can be found\")\n\t}\n\n\treturn nil\n}\n\nfunc (pdd *provisionDockDiscoverer) Report() error {\n\tvar err error\n\n\t\/\/ Store dock resources in database.\n\tfor _, dck := range pdd.dcks {\n\t\tif err = pdd.Register(dck); err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ Store pool resources in database.\n\tfor _, pol := range pdd.pols {\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\terr = pdd.Register(pol)\n\t}\n\n\treturn err\n}\n\n\/\/ attachDockDiscoverer is a struct for exposing some operations of attach\n\/\/ dock service discovery.\ntype attachDockDiscoverer struct {\n\t*DockRegister\n\n\tdck *model.DockSpec\n}\n\nfunc (add *attachDockDiscoverer) Init() error { return nil }\n\nfunc (add *attachDockDiscoverer) Discover() error {\n\thost, err := os.Hostname()\n\tif err != nil {\n\t\tlog.Error(\"When get os hostname:\", err)\n\t\treturn err\n\t}\n\n\tlocalIqn, err := connector.NewConnector(connector.IscsiDriver).GetInitiatorInfo()\n\tif err != nil {\n\t\tlog.Warning(\"get initiator failed, \", err)\n\t}\n\n\tbindIp := CONF.BindIp\n\tif bindIp == \"\" {\n\t\tbindIp = connector.GetHostIP()\n\t}\n\n\tfcInitiator, err := connector.NewConnector(connector.FcDriver).GetInitiatorInfo()\n\tif err != nil {\n\t\tlog.Warning(\"get initiator failed, \", err)\n\t}\n\n\tvar wwpns []string\n\tfor _, v := range strings.Split(fcInitiator, \",\") {\n\t\tif strings.Contains(v, \"node_name\") {\n\t\t\twwpns = append(wwpns, strings.Split(v, \":\")[1])\n\t\t}\n\t}\n\n\tsegments := strings.Split(CONF.OsdsDock.ApiEndpoint, \":\")\n\tendpointIp := segments[len(segments)-2]\n\tadd.dck = &model.DockSpec{\n\t\tBaseModel: &model.BaseModel{\n\t\t\tId: uuid.NewV5(uuid.NamespaceOID, host+\":\"+endpointIp).String(),\n\t\t},\n\t\tEndpoint: CONF.OsdsDock.ApiEndpoint,\n\t\tNodeId: host,\n\t\tType: model.DockTypeAttacher,\n\t\tMetadata: map[string]string{\n\t\t\t\"Platform\": runtime.GOARCH,\n\t\t\t\"OsType\": runtime.GOOS,\n\t\t\t\"HostIp\": bindIp,\n\t\t\t\"Initiator\": localIqn,\n\t\t\t\"WWPNS\": strings.Join(wwpns, \",\"),\n\t\t},\n\t}\n\n\treturn nil\n}\n\nfunc (add *attachDockDiscoverer) Report() error {\n\treturn add.Register(add.dck)\n}\n\nfunc NewDockRegister() *DockRegister {\n\treturn &DockRegister{c: db.C}\n}\n\ntype DockRegister struct {\n\tc db.Client\n}\n\nfunc (dr *DockRegister) Register(in interface{}) error {\n\tctx := c.NewAdminContext()\n\n\tswitch in.(type) {\n\tcase *model.DockSpec:\n\t\tdck := in.(*model.DockSpec)\n\t\t\/\/ Call db module to create dock resource with latest info.\n\t\tif _, err := dr.c.CreateDock(ctx, dck); err != nil {\n\t\t\tlog.Errorf(\"When create dock %s in db: %v\\n\", dck.Id, err)\n\t\t\treturn err\n\t\t}\n\n\t\tbreak\n\tcase *model.StoragePoolSpec:\n\t\tpol := in.(*model.StoragePoolSpec)\n\t\t\/\/ Call db module to create pool resource.\n\t\tif _, err := dr.c.CreatePool(ctx, pol); err != nil {\n\t\t\tlog.Errorf(\"When create pool %s in db: %v\\n\", pol.Id, err)\n\t\t\treturn err\n\t\t}\n\t\tbreak\n\tdefault:\n\t\treturn fmt.Errorf(\"Resource type is not supported!\")\n\t}\n\n\treturn nil\n}\n\nfunc (dr *DockRegister) Unregister(in interface{}) error {\n\tctx := c.NewAdminContext()\n\n\tswitch in.(type) {\n\tcase *model.DockSpec:\n\t\tdck := in.(*model.DockSpec)\n\t\t\/\/ Call db module to delete dock resource.\n\t\tif err := dr.c.DeleteDock(ctx, dck.Id); err != nil {\n\t\t\tlog.Errorf(\"When delete dock %s in db: %v\\n\", dck.Id, err)\n\t\t\treturn err\n\t\t}\n\t\tbreak\n\tcase *model.StoragePoolSpec:\n\t\tpol := in.(*model.StoragePoolSpec)\n\t\t\/\/ Call db module to delete pool resource.\n\t\tif err := dr.c.DeletePool(ctx, pol.Id); err != nil {\n\t\t\tlog.Errorf(\"When delete pool %s in db: %v\\n\", pol.Id, err)\n\t\t\treturn err\n\t\t}\n\t\tbreak\n\tdefault:\n\t\treturn fmt.Errorf(\"Resource type is not supported!\")\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cluster\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/docker\/machine\/drivers\/virtualbox\"\n\t\"github.com\/docker\/machine\/libmachine\"\n\t\"github.com\/docker\/machine\/libmachine\/drivers\"\n\t\"github.com\/docker\/machine\/libmachine\/engine\"\n\t\"github.com\/docker\/machine\/libmachine\/host\"\n\t\"github.com\/docker\/machine\/libmachine\/state\"\n\t\"github.com\/golang\/glog\"\n\tkubeApi \"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/client\/unversioned\"\n\t\"k8s.io\/kubernetes\/pkg\/client\/unversioned\/clientcmd\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/constants\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/sshutil\"\n\t\"k8s.io\/minikube\/pkg\/util\"\n)\n\nvar (\n\tcerts = []string{\"apiserver.crt\", \"apiserver.key\"}\n)\n\n\/\/This init function is used to set the logtostderr variable to false so that INFO level log info does not clutter the CLI\n\/\/INFO lvl logging is displayed due to the kubernetes api calling flag.Set(\"logtostderr\", \"true\") in its init()\n\/\/see: https:\/\/github.com\/kubernetes\/kubernetes\/blob\/master\/pkg\/util\/logs.go#L32-34\nfunc init() {\n\tflag.Set(\"logtostderr\", \"false\")\n}\n\n\/\/ StartHost starts a host VM.\nfunc StartHost(api libmachine.API, config MachineConfig) (*host.Host, error) {\n\tif exists, err := api.Exists(constants.MachineName); err != nil {\n\t\treturn nil, fmt.Errorf(\"Error checking if host exists: %s\", err)\n\t} else if exists {\n\t\tglog.Infoln(\"Machine exists!\")\n\t\th, err := api.Load(constants.MachineName)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error loading existing host: %s\", err)\n\t\t}\n\t\ts, err := h.Driver.GetState()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error getting state for host: %s\", err)\n\t\t}\n\t\tif s != state.Running {\n\t\t\tif err := h.Driver.Start(); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Error starting stopped host: %s\", err)\n\t\t\t}\n\t\t\tif err := api.Save(h); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Error saving started host: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif err := h.ConfigureAuth(); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error configuring auth on host: %s\", err)\n\t\t}\n\t\treturn h, nil\n\t} else {\n\t\treturn createHost(api, config)\n\t}\n}\n\n\/\/ StopHost stops the host VM.\nfunc StopHost(api libmachine.API) error {\n\thost, err := api.Load(constants.MachineName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := host.Stop(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\ntype multiError struct {\n\tErrors []error\n}\n\nfunc (m *multiError) Collect(err error) {\n\tif err != nil {\n\t\tm.Errors = append(m.Errors, err)\n\t}\n}\n\nfunc (m multiError) ToError() error {\n\tif len(m.Errors) == 0 {\n\t\treturn nil\n\t}\n\n\terrStrings := []string{}\n\tfor _, err := range m.Errors {\n\t\terrStrings = append(errStrings, err.Error())\n\t}\n\treturn fmt.Errorf(strings.Join(errStrings, \"\\n\"))\n}\n\n\/\/ DeleteHost deletes the host VM.\nfunc DeleteHost(api libmachine.API) error {\n\thost, err := api.Load(constants.MachineName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm := multiError{}\n\tm.Collect(host.Driver.Remove())\n\tm.Collect(api.Remove(constants.MachineName))\n\treturn m.ToError()\n}\n\n\/\/ GetHostStatus gets the status of the host VM.\nfunc GetHostStatus(api libmachine.API) (string, error) {\n\tdne := \"Does Not Exist\"\n\texists, err := api.Exists(constants.MachineName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif !exists {\n\t\treturn dne, nil\n\t}\n\n\thost, err := api.Load(constants.MachineName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ts, err := host.Driver.GetState()\n\tif s.String() == \"\" {\n\t\treturn dne, err\n\t}\n\treturn s.String(), err\n}\n\ntype sshAble interface {\n\tRunSSHCommand(string) (string, error)\n}\n\n\/\/ MachineConfig contains the parameters used to start a cluster.\ntype MachineConfig struct {\n\tMinikubeISO string\n\tMemory int\n\tCPUs int\n\tVMDriver string\n}\n\n\/\/ StartCluster starts a k8s cluster on the specified Host.\nfunc StartCluster(h sshAble) error {\n\tcommands := []string{stopCommand, GetStartCommand()}\n\n\tfor _, cmd := range commands {\n\t\tglog.Infoln(cmd)\n\t\toutput, err := h.RunSSHCommand(cmd)\n\t\tglog.Infoln(output)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\ntype fileToCopy struct {\n\tAssetName string\n\tTargetDir string\n\tTargetName string\n\tPermissions string\n}\n\nvar assets = []fileToCopy{\n\t{\n\t\tAssetName: \"out\/localkube\",\n\t\tTargetDir: \"\/usr\/local\/bin\",\n\t\tTargetName: \"localkube\",\n\t\tPermissions: \"0777\",\n\t},\n\t{\n\t\tAssetName: \"deploy\/iso\/addon-manager.yaml\",\n\t\tTargetDir: \"\/etc\/kubernetes\/manifests\/\",\n\t\tTargetName: \"addon-manager.yaml\",\n\t\tPermissions: \"0640\",\n\t},\n\t{\n\t\tAssetName: \"deploy\/addons\/dashboard-rc.yaml\",\n\t\tTargetDir: \"\/etc\/kubernetes\/addons\/\",\n\t\tTargetName: \"dashboard-rc.yaml\",\n\t\tPermissions: \"0640\",\n\t},\n\t{\n\t\tAssetName: \"deploy\/addons\/dashboard-svc.yaml\",\n\t\tTargetDir: \"\/etc\/kubernetes\/addons\/\",\n\t\tTargetName: \"dashboard-svc.yaml\",\n\t\tPermissions: \"0640\",\n\t},\n}\n\nfunc UpdateCluster(d drivers.Driver) error {\n\tclient, err := sshutil.NewSSHClient(d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, a := range assets {\n\t\tcontents, err := Asset(a.AssetName)\n\t\tif err != nil {\n\t\t\tglog.Infof(\"Error loading asset %s: %s\", a.AssetName, err)\n\t\t\treturn err\n\t\t}\n\n\t\tif err := sshutil.Transfer(contents, a.TargetDir, a.TargetName, a.Permissions, client); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ SetupCerts gets the generated credentials required to talk to the APIServer.\nfunc SetupCerts(d drivers.Driver) error {\n\tlocalPath := constants.Minipath\n\tipStr, err := d.GetIP()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tip := net.ParseIP(ipStr)\n\tpublicPath := filepath.Join(localPath, \"apiserver.crt\")\n\tprivatePath := filepath.Join(localPath, \"apiserver.key\")\n\tif err := GenerateCerts(publicPath, privatePath, ip); err != nil {\n\t\treturn err\n\t}\n\n\tclient, err := sshutil.NewSSHClient(d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, cert := range certs {\n\t\tp := filepath.Join(localPath, cert)\n\t\tdata, err := ioutil.ReadFile(p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sshutil.Transfer(data, util.DefaultCertPath, cert, \"0644\", client); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc createHost(api libmachine.API, config MachineConfig) (*host.Host, error) {\n\tvar driver interface{}\n\n\tswitch config.VMDriver {\n\tcase \"virtualbox\":\n\t\td := virtualbox.NewDriver(constants.MachineName, constants.Minipath)\n\t\td.Boot2DockerURL = config.MinikubeISO\n\t\td.Memory = config.Memory\n\t\td.CPU = config.CPUs\n\t\tdriver = d\n\tcase \"vmwarefusion\":\n\t\tdriver = createVMwareFusionHost(config)\n\tcase \"kvm\":\n\t\tdriver = createKVMHost(config)\n\tcase \"xhyve\":\n\t\tdriver = createXhyveHost(config)\n\tdefault:\n\t\tglog.Exitf(\"Unsupported driver: %s\\n\", config.VMDriver)\n\t}\n\n\tdata, err := json.Marshal(driver)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\th, err := api.NewHost(config.VMDriver, data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating new host: %s\", err)\n\t}\n\n\th.HostOptions.AuthOptions.CertDir = constants.Minipath\n\th.HostOptions.AuthOptions.StorePath = constants.Minipath\n\th.HostOptions.EngineOptions = &engine.Options{}\n\n\tif err := api.Create(h); err != nil {\n\t\t\/\/ Wait for all the logs to reach the client\n\t\ttime.Sleep(2 * time.Second)\n\t\treturn nil, fmt.Errorf(\"Error creating. %s\", err)\n\t}\n\n\tif err := api.Save(h); err != nil {\n\t\treturn nil, fmt.Errorf(\"Error attempting to save store: %s\", err)\n\t}\n\treturn h, nil\n}\n\n\/\/ GetHostDockerEnv gets the necessary docker env variables to allow the use of docker through minikube's vm\nfunc GetHostDockerEnv(api libmachine.API) (map[string]string, error) {\n\thost, err := checkIfApiExistsAndLoad(api)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tip, err := host.Driver.GetIP()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttcpPrefix := \"tcp:\/\/\"\n\tportDelimiter := \":\"\n\tport := \"2376\"\n\n\tenvMap := map[string]string{\n\t\t\"DOCKER_TLS_VERIFY\": \"1\",\n\t\t\"DOCKER_HOST\": tcpPrefix + ip + portDelimiter + port,\n\t\t\"DOCKER_CERT_PATH\": constants.MakeMiniPath(\"certs\"),\n\t}\n\treturn envMap, nil\n}\n\n\/\/ GetHostLogs gets the localkube logs of the host VM.\nfunc GetHostLogs(api libmachine.API) (string, error) {\n\thost, err := checkIfApiExistsAndLoad(api)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ts, err := host.RunSSHCommand(logsCommand)\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\treturn s, err\n}\n\nfunc checkIfApiExistsAndLoad(api libmachine.API) (*host.Host, error) {\n\texists, err := api.Exists(constants.MachineName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, fmt.Errorf(\"Machine does not exist for api.Exists(%s)\", constants.MachineName)\n\t}\n\n\thost, err := api.Load(constants.MachineName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn host, nil\n}\n\nfunc CreateSSHShell(api libmachine.API, args []string) error {\n\thost, err := checkIfApiExistsAndLoad(api)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcurrentState, err := host.Driver.GetState()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif currentState != state.Running {\n\t\treturn fmt.Errorf(\"Error: Cannot run ssh command: Host %q is not running\", constants.MachineName)\n\t}\n\n\tclient, err := host.CreateSSHClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn client.Shell(strings.Join(args, \" \"))\n}\n\nfunc GetServiceURL(api libmachine.API, namespace, service string) (string, error) {\n\thost, err := checkIfApiExistsAndLoad(api)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tip, err := host.Driver.GetIP()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tport, err := getServicePort(namespace, service)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"http:\/\/%s:%d\", ip, port), nil\n}\n\ntype serviceGetter interface {\n\tGet(name string) (*kubeApi.Service, error)\n}\n\nfunc getServicePort(namespace, service string) (int, error) {\n\tservices, err := getKubernetesServicesWithNamespace(namespace)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn getServicePortFromServiceGetter(services, service)\n}\n\nfunc getServicePortFromServiceGetter(services serviceGetter, service string) (int, error) {\n\tsvc, err := services.Get(service)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"Error getting %s service: %s\", service, err)\n\t}\n\treturn int(svc.Spec.Ports[0].NodePort), nil\n}\n\nfunc getKubernetesServicesWithNamespace(namespace string) (serviceGetter, error) {\n\tloadingRules := clientcmd.NewDefaultClientConfigLoadingRules()\n\tconfigOverrides := &clientcmd.ConfigOverrides{}\n\tkubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)\n\tconfig, err := kubeConfig.ClientConfig()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating kubeConfig: %s\", err)\n\t}\n\tclient, err := unversioned.New(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tservices := client.Services(namespace)\n\treturn services, nil\n}\n<commit_msg>Add some logging to cert generation.<commit_after>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cluster\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/docker\/machine\/drivers\/virtualbox\"\n\t\"github.com\/docker\/machine\/libmachine\"\n\t\"github.com\/docker\/machine\/libmachine\/drivers\"\n\t\"github.com\/docker\/machine\/libmachine\/engine\"\n\t\"github.com\/docker\/machine\/libmachine\/host\"\n\t\"github.com\/docker\/machine\/libmachine\/state\"\n\t\"github.com\/golang\/glog\"\n\tkubeApi \"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/client\/unversioned\"\n\t\"k8s.io\/kubernetes\/pkg\/client\/unversioned\/clientcmd\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/constants\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/sshutil\"\n\t\"k8s.io\/minikube\/pkg\/util\"\n)\n\nvar (\n\tcerts = []string{\"apiserver.crt\", \"apiserver.key\"}\n)\n\n\/\/This init function is used to set the logtostderr variable to false so that INFO level log info does not clutter the CLI\n\/\/INFO lvl logging is displayed due to the kubernetes api calling flag.Set(\"logtostderr\", \"true\") in its init()\n\/\/see: https:\/\/github.com\/kubernetes\/kubernetes\/blob\/master\/pkg\/util\/logs.go#L32-34\nfunc init() {\n\tflag.Set(\"logtostderr\", \"false\")\n}\n\n\/\/ StartHost starts a host VM.\nfunc StartHost(api libmachine.API, config MachineConfig) (*host.Host, error) {\n\tif exists, err := api.Exists(constants.MachineName); err != nil {\n\t\treturn nil, fmt.Errorf(\"Error checking if host exists: %s\", err)\n\t} else if exists {\n\t\tglog.Infoln(\"Machine exists!\")\n\t\th, err := api.Load(constants.MachineName)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error loading existing host: %s\", err)\n\t\t}\n\t\ts, err := h.Driver.GetState()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error getting state for host: %s\", err)\n\t\t}\n\t\tif s != state.Running {\n\t\t\tif err := h.Driver.Start(); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Error starting stopped host: %s\", err)\n\t\t\t}\n\t\t\tif err := api.Save(h); err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Error saving started host: %s\", err)\n\t\t\t}\n\t\t}\n\t\tif err := h.ConfigureAuth(); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error configuring auth on host: %s\", err)\n\t\t}\n\t\treturn h, nil\n\t} else {\n\t\treturn createHost(api, config)\n\t}\n}\n\n\/\/ StopHost stops the host VM.\nfunc StopHost(api libmachine.API) error {\n\thost, err := api.Load(constants.MachineName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := host.Stop(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\ntype multiError struct {\n\tErrors []error\n}\n\nfunc (m *multiError) Collect(err error) {\n\tif err != nil {\n\t\tm.Errors = append(m.Errors, err)\n\t}\n}\n\nfunc (m multiError) ToError() error {\n\tif len(m.Errors) == 0 {\n\t\treturn nil\n\t}\n\n\terrStrings := []string{}\n\tfor _, err := range m.Errors {\n\t\terrStrings = append(errStrings, err.Error())\n\t}\n\treturn fmt.Errorf(strings.Join(errStrings, \"\\n\"))\n}\n\n\/\/ DeleteHost deletes the host VM.\nfunc DeleteHost(api libmachine.API) error {\n\thost, err := api.Load(constants.MachineName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm := multiError{}\n\tm.Collect(host.Driver.Remove())\n\tm.Collect(api.Remove(constants.MachineName))\n\treturn m.ToError()\n}\n\n\/\/ GetHostStatus gets the status of the host VM.\nfunc GetHostStatus(api libmachine.API) (string, error) {\n\tdne := \"Does Not Exist\"\n\texists, err := api.Exists(constants.MachineName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif !exists {\n\t\treturn dne, nil\n\t}\n\n\thost, err := api.Load(constants.MachineName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ts, err := host.Driver.GetState()\n\tif s.String() == \"\" {\n\t\treturn dne, err\n\t}\n\treturn s.String(), err\n}\n\ntype sshAble interface {\n\tRunSSHCommand(string) (string, error)\n}\n\n\/\/ MachineConfig contains the parameters used to start a cluster.\ntype MachineConfig struct {\n\tMinikubeISO string\n\tMemory int\n\tCPUs int\n\tVMDriver string\n}\n\n\/\/ StartCluster starts a k8s cluster on the specified Host.\nfunc StartCluster(h sshAble) error {\n\tcommands := []string{stopCommand, GetStartCommand()}\n\n\tfor _, cmd := range commands {\n\t\tglog.Infoln(cmd)\n\t\toutput, err := h.RunSSHCommand(cmd)\n\t\tglog.Infoln(output)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\ntype fileToCopy struct {\n\tAssetName string\n\tTargetDir string\n\tTargetName string\n\tPermissions string\n}\n\nvar assets = []fileToCopy{\n\t{\n\t\tAssetName: \"out\/localkube\",\n\t\tTargetDir: \"\/usr\/local\/bin\",\n\t\tTargetName: \"localkube\",\n\t\tPermissions: \"0777\",\n\t},\n\t{\n\t\tAssetName: \"deploy\/iso\/addon-manager.yaml\",\n\t\tTargetDir: \"\/etc\/kubernetes\/manifests\/\",\n\t\tTargetName: \"addon-manager.yaml\",\n\t\tPermissions: \"0640\",\n\t},\n\t{\n\t\tAssetName: \"deploy\/addons\/dashboard-rc.yaml\",\n\t\tTargetDir: \"\/etc\/kubernetes\/addons\/\",\n\t\tTargetName: \"dashboard-rc.yaml\",\n\t\tPermissions: \"0640\",\n\t},\n\t{\n\t\tAssetName: \"deploy\/addons\/dashboard-svc.yaml\",\n\t\tTargetDir: \"\/etc\/kubernetes\/addons\/\",\n\t\tTargetName: \"dashboard-svc.yaml\",\n\t\tPermissions: \"0640\",\n\t},\n}\n\nfunc UpdateCluster(d drivers.Driver) error {\n\tclient, err := sshutil.NewSSHClient(d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, a := range assets {\n\t\tcontents, err := Asset(a.AssetName)\n\t\tif err != nil {\n\t\t\tglog.Infof(\"Error loading asset %s: %s\", a.AssetName, err)\n\t\t\treturn err\n\t\t}\n\n\t\tif err := sshutil.Transfer(contents, a.TargetDir, a.TargetName, a.Permissions, client); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ SetupCerts gets the generated credentials required to talk to the APIServer.\nfunc SetupCerts(d drivers.Driver) error {\n\tlocalPath := constants.Minipath\n\tipStr, err := d.GetIP()\n\tif err != nil {\n\t\treturn err\n\t}\n\tglog.Infoln(\"Setting up certificates for IP: %s\", ipStr)\n\n\tip := net.ParseIP(ipStr)\n\tpublicPath := filepath.Join(localPath, \"apiserver.crt\")\n\tprivatePath := filepath.Join(localPath, \"apiserver.key\")\n\tif err := GenerateCerts(publicPath, privatePath, ip); err != nil {\n\t\treturn err\n\t}\n\n\tclient, err := sshutil.NewSSHClient(d)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, cert := range certs {\n\t\tp := filepath.Join(localPath, cert)\n\t\tdata, err := ioutil.ReadFile(p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := sshutil.Transfer(data, util.DefaultCertPath, cert, \"0644\", client); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc createHost(api libmachine.API, config MachineConfig) (*host.Host, error) {\n\tvar driver interface{}\n\n\tswitch config.VMDriver {\n\tcase \"virtualbox\":\n\t\td := virtualbox.NewDriver(constants.MachineName, constants.Minipath)\n\t\td.Boot2DockerURL = config.MinikubeISO\n\t\td.Memory = config.Memory\n\t\td.CPU = config.CPUs\n\t\tdriver = d\n\tcase \"vmwarefusion\":\n\t\tdriver = createVMwareFusionHost(config)\n\tcase \"kvm\":\n\t\tdriver = createKVMHost(config)\n\tcase \"xhyve\":\n\t\tdriver = createXhyveHost(config)\n\tdefault:\n\t\tglog.Exitf(\"Unsupported driver: %s\\n\", config.VMDriver)\n\t}\n\n\tdata, err := json.Marshal(driver)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\th, err := api.NewHost(config.VMDriver, data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating new host: %s\", err)\n\t}\n\n\th.HostOptions.AuthOptions.CertDir = constants.Minipath\n\th.HostOptions.AuthOptions.StorePath = constants.Minipath\n\th.HostOptions.EngineOptions = &engine.Options{}\n\n\tif err := api.Create(h); err != nil {\n\t\t\/\/ Wait for all the logs to reach the client\n\t\ttime.Sleep(2 * time.Second)\n\t\treturn nil, fmt.Errorf(\"Error creating. %s\", err)\n\t}\n\n\tif err := api.Save(h); err != nil {\n\t\treturn nil, fmt.Errorf(\"Error attempting to save store: %s\", err)\n\t}\n\treturn h, nil\n}\n\n\/\/ GetHostDockerEnv gets the necessary docker env variables to allow the use of docker through minikube's vm\nfunc GetHostDockerEnv(api libmachine.API) (map[string]string, error) {\n\thost, err := checkIfApiExistsAndLoad(api)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tip, err := host.Driver.GetIP()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttcpPrefix := \"tcp:\/\/\"\n\tportDelimiter := \":\"\n\tport := \"2376\"\n\n\tenvMap := map[string]string{\n\t\t\"DOCKER_TLS_VERIFY\": \"1\",\n\t\t\"DOCKER_HOST\": tcpPrefix + ip + portDelimiter + port,\n\t\t\"DOCKER_CERT_PATH\": constants.MakeMiniPath(\"certs\"),\n\t}\n\treturn envMap, nil\n}\n\n\/\/ GetHostLogs gets the localkube logs of the host VM.\nfunc GetHostLogs(api libmachine.API) (string, error) {\n\thost, err := checkIfApiExistsAndLoad(api)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ts, err := host.RunSSHCommand(logsCommand)\n\tif err != nil {\n\t\treturn \"\", nil\n\t}\n\treturn s, err\n}\n\nfunc checkIfApiExistsAndLoad(api libmachine.API) (*host.Host, error) {\n\texists, err := api.Exists(constants.MachineName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !exists {\n\t\treturn nil, fmt.Errorf(\"Machine does not exist for api.Exists(%s)\", constants.MachineName)\n\t}\n\n\thost, err := api.Load(constants.MachineName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn host, nil\n}\n\nfunc CreateSSHShell(api libmachine.API, args []string) error {\n\thost, err := checkIfApiExistsAndLoad(api)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcurrentState, err := host.Driver.GetState()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif currentState != state.Running {\n\t\treturn fmt.Errorf(\"Error: Cannot run ssh command: Host %q is not running\", constants.MachineName)\n\t}\n\n\tclient, err := host.CreateSSHClient()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn client.Shell(strings.Join(args, \" \"))\n}\n\nfunc GetServiceURL(api libmachine.API, namespace, service string) (string, error) {\n\thost, err := checkIfApiExistsAndLoad(api)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tip, err := host.Driver.GetIP()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tport, err := getServicePort(namespace, service)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"http:\/\/%s:%d\", ip, port), nil\n}\n\ntype serviceGetter interface {\n\tGet(name string) (*kubeApi.Service, error)\n}\n\nfunc getServicePort(namespace, service string) (int, error) {\n\tservices, err := getKubernetesServicesWithNamespace(namespace)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn getServicePortFromServiceGetter(services, service)\n}\n\nfunc getServicePortFromServiceGetter(services serviceGetter, service string) (int, error) {\n\tsvc, err := services.Get(service)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"Error getting %s service: %s\", service, err)\n\t}\n\treturn int(svc.Spec.Ports[0].NodePort), nil\n}\n\nfunc getKubernetesServicesWithNamespace(namespace string) (serviceGetter, error) {\n\tloadingRules := clientcmd.NewDefaultClientConfigLoadingRules()\n\tconfigOverrides := &clientcmd.ConfigOverrides{}\n\tkubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)\n\tconfig, err := kubeConfig.ClientConfig()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error creating kubeConfig: %s\", err)\n\t}\n\tclient, err := unversioned.New(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tservices := client.Services(namespace)\n\treturn services, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package trie\n\nimport \"errors\"\n\ntype altBranch struct {\n\tletter rune\n\tbranch *altTrie\n}\n\ntype altTrie struct {\n\tvalue interface{}\n\tvalidLeaf bool\n\tchildren []altBranch\n}\n\n\/\/ Alt returns an alternate implementation of a Trie, which is slightly faster for searching. Useful for Tries that are created and then infrequently changed.\nfunc Alt() *altTrie {\n\treturn &altTrie{nil, false, nil}\n}\n\nfunc (t altTrie) getChild(r rune) int {\n\tfor i, child := range t.children {\n\t\tif child.letter == r {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n\n\/\/ Add an element to the Trie, mapped to the given value.\nfunc (t *altTrie) Add(key string, val interface{}) error {\n\trunes := []rune(key)\n\texists := t.add(runes, val)\n\n\tif exists {\n\t\treturn errors.New(\"key already exists\")\n\t}\n\n\treturn nil\n}\n\nfunc (t *altTrie) add(r []rune, val interface{}) bool {\n\tif len(r) == 0 {\n\t\treturn false\n\t}\n\n\tif i := t.getChild(r[0]); i != -1 {\n\t\tif len(r) > 1 {\n\t\t\treturn t.children[i].branch.add(r[1:], val)\n\t\t}\n\t\tif t.children[i].branch.validLeaf {\n\t\t\treturn true\n\t\t}\n\t\tchild := t.children[i].branch\n\t\tchild.validLeaf = true\n\t\tchild.value = val\n\t} else {\n\t\tif len(r) > 1 {\n\t\t\tchild := altBranch{\n\t\t\t\tr[0],\n\t\t\t\t&altTrie{},\n\t\t\t}\n\t\t\tt.children = append(t.children, child)\n\n\t\t\treturn child.branch.add(r[1:], val)\n\t\t}\n\t\tt.children = append(t.children, altBranch{\n\t\t\tr[0],\n\t\t\t&altTrie{val, true, nil},\n\t\t})\n\t}\n\treturn false\n}\n\n\/\/ Get a value from the Trie.\n\/\/ Uses a comma ok format.\nfunc (t *altTrie) Get(key string) (interface{}, bool) {\n\tif len(key) == 0 {\n\t\treturn nil, false\n\t}\n\treturn t.get([]rune(key))\n}\n\nfunc (t *altTrie) get(key []rune) (interface{}, bool) {\n\tif len(key) == 0 {\n\t\treturn t.value, t.validLeaf\n\t}\n\tif i := t.getChild(key[0]); i != -1 {\n\t\treturn t.children[i].branch.get(key[1:])\n\t}\n\treturn nil, false\n}\n\n\/\/ Search the Trie for all keys starting with the key.\n\/\/ A full listing of the Trie is possible using t.Search(\"\")\nfunc (t *altTrie) Search(key string) (r []interface{}) {\n\tr = make([]interface{}, 0, 32)\n\tt.search([]rune(key), &r)\n\treturn\n}\n\nfunc (t *altTrie) search(key []rune, results *[]interface{}) {\niterate:\n\tif len(key) == 0 {\n\t\tfor _, child := range t.children {\n\t\t\tchild.branch.search(key, results)\n\t\t}\n\t\tif t.validLeaf {\n\t\t\tl := len(*results)\n\n\t\t\tif l < cap(*results) {\n\t\t\t\t*results = (*results)[:l+1]\n\t\t\t\t(*results)[l] = t.value\n\t\t\t} else {\n\t\t\t\toldR := *results\n\t\t\t\t*results = make([]interface{}, l+1, l*2)\n\t\t\t\tcopy(*results, oldR)\n\t\t\t\t(*results)[l] = t.value\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif i := t.getChild(key[0]); i != -1 {\n\t\tt = t.children[i].branch\n\t\tkey = key[1:]\n\t\tgoto iterate\n\t}\n\treturn\n}\n\n\/\/ Remove the key from the Trie.\n\/\/ The Trie will compact itself if possible.\nfunc (t *altTrie) Remove(key string) error {\n\trunes := []rune(key)\n\n\tif !t.remove(runes) {\n\t\terrors.New(\"key not in trie\")\n\t}\n\n\treturn nil\n}\n\nfunc (t *altTrie) remove(key []rune) bool {\n\tif len(key) == 1 {\n\t\tif i := t.getChild(key[0]); i != -1 {\n\t\t\tchild := t.children[i].branch\n\t\t\tif len(child.children) == 0 {\n\t\t\t\tt.children = append(t.children[:i], t.children[i+1:]...)\n\t\t\t} else {\n\t\t\t\tchild.validLeaf = false\n\t\t\t\tchild.value = nil\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tif i := t.getChild(key[0]); i != -1 {\n\t\tchild := t.children[i].branch\n\t\tret := child.remove(key[1:])\n\n\t\tif !child.validLeaf && len(child.children) == 0 {\n\t\t\tt.children = append(t.children[:i], t.children[i+1:]...)\n\t\t}\n\t\treturn ret\n\t}\n\treturn false\n}\n<commit_msg>Tuning initial result slice size.<commit_after>package trie\n\nimport \"errors\"\n\ntype altBranch struct {\n\tletter rune\n\tbranch *altTrie\n}\n\ntype altTrie struct {\n\tvalue interface{}\n\tvalidLeaf bool\n\tchildren []altBranch\n}\n\n\/\/ Alt returns an alternate implementation of a Trie, which is slightly faster for searching. Useful for Tries that are created and then infrequently changed.\nfunc Alt() *altTrie {\n\treturn &altTrie{nil, false, nil}\n}\n\nfunc (t altTrie) getChild(r rune) int {\n\tfor i, child := range t.children {\n\t\tif child.letter == r {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n\n\/\/ Add an element to the Trie, mapped to the given value.\nfunc (t *altTrie) Add(key string, val interface{}) error {\n\trunes := []rune(key)\n\texists := t.add(runes, val)\n\n\tif exists {\n\t\treturn errors.New(\"key already exists\")\n\t}\n\n\treturn nil\n}\n\nfunc (t *altTrie) add(r []rune, val interface{}) bool {\n\tif len(r) == 0 {\n\t\treturn false\n\t}\n\n\tif i := t.getChild(r[0]); i != -1 {\n\t\tif len(r) > 1 {\n\t\t\treturn t.children[i].branch.add(r[1:], val)\n\t\t}\n\t\tif t.children[i].branch.validLeaf {\n\t\t\treturn true\n\t\t}\n\t\tchild := t.children[i].branch\n\t\tchild.validLeaf = true\n\t\tchild.value = val\n\t} else {\n\t\tif len(r) > 1 {\n\t\t\tchild := altBranch{\n\t\t\t\tr[0],\n\t\t\t\t&altTrie{},\n\t\t\t}\n\t\t\tt.children = append(t.children, child)\n\n\t\t\treturn child.branch.add(r[1:], val)\n\t\t}\n\t\tt.children = append(t.children, altBranch{\n\t\t\tr[0],\n\t\t\t&altTrie{val, true, nil},\n\t\t})\n\t}\n\treturn false\n}\n\n\/\/ Get a value from the Trie.\n\/\/ Uses a comma ok format.\nfunc (t *altTrie) Get(key string) (interface{}, bool) {\n\tif len(key) == 0 {\n\t\treturn nil, false\n\t}\n\treturn t.get([]rune(key))\n}\n\nfunc (t *altTrie) get(key []rune) (interface{}, bool) {\n\tif len(key) == 0 {\n\t\treturn t.value, t.validLeaf\n\t}\n\tif i := t.getChild(key[0]); i != -1 {\n\t\treturn t.children[i].branch.get(key[1:])\n\t}\n\treturn nil, false\n}\n\n\/\/ Search the Trie for all keys starting with the key.\n\/\/ A full listing of the Trie is possible using t.Search(\"\")\nfunc (t *altTrie) Search(key string) (r []interface{}) {\n\tr = make([]interface{}, 0, 64)\n\tt.search([]rune(key), &r)\n\treturn\n}\n\nfunc (t *altTrie) search(key []rune, results *[]interface{}) {\niterate:\n\tif len(key) == 0 {\n\t\tfor _, child := range t.children {\n\t\t\tchild.branch.search(key, results)\n\t\t}\n\t\tif t.validLeaf {\n\t\t\tl := len(*results)\n\n\t\t\tif l < cap(*results) {\n\t\t\t\t*results = (*results)[:l+1]\n\t\t\t\t(*results)[l] = t.value\n\t\t\t} else {\n\t\t\t\toldR := *results\n\t\t\t\t*results = make([]interface{}, l+1, l*2)\n\t\t\t\tcopy(*results, oldR)\n\t\t\t\t(*results)[l] = t.value\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif i := t.getChild(key[0]); i != -1 {\n\t\tt = t.children[i].branch\n\t\tkey = key[1:]\n\t\tgoto iterate\n\t}\n\treturn\n}\n\n\/\/ Remove the key from the Trie.\n\/\/ The Trie will compact itself if possible.\nfunc (t *altTrie) Remove(key string) error {\n\trunes := []rune(key)\n\n\tif !t.remove(runes) {\n\t\terrors.New(\"key not in trie\")\n\t}\n\n\treturn nil\n}\n\nfunc (t *altTrie) remove(key []rune) bool {\n\tif len(key) == 1 {\n\t\tif i := t.getChild(key[0]); i != -1 {\n\t\t\tchild := t.children[i].branch\n\t\t\tif len(child.children) == 0 {\n\t\t\t\tt.children = append(t.children[:i], t.children[i+1:]...)\n\t\t\t} else {\n\t\t\t\tchild.validLeaf = false\n\t\t\t\tchild.value = nil\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\tif i := t.getChild(key[0]); i != -1 {\n\t\tchild := t.children[i].branch\n\t\tret := child.remove(key[1:])\n\n\t\tif !child.validLeaf && len(child.children) == 0 {\n\t\t\tt.children = append(t.children[:i], t.children[i+1:]...)\n\t\t}\n\t\treturn ret\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/High-level functionality\n\npackage trion\n\nimport (\n\t\"polydawn.net\/dockctrl\/crocker\"\n\t. \"fmt\"\n)\n\nconst ExportPath = \".\/\" \/\/Where to export docker images\n\n\/\/Helps run anyything that requires a docker connection.\n\/\/Handles creation & cleanup in one place.\nfunc WithDocker(fn func(TrionConfig, *crocker.Dock, *Command) error ) error {\n\t\/\/Load configuration, then find or start a docker\n\tconfig := FindConfig(\".\")\n\tdock, dir, ours := crocker.FindDock()\n\tcmd := &Command{dock.Client()}\n\n\t\/\/Announce the docker\n\tif ours {\n\t\tPrintln(\"Started a docker in\", dir)\n\t} else {\n\t\tPrintln(\"Connecting to docker\", dir)\n\t}\n\n\t\/\/Run the closure, kill the docker if needed, and return any errors.\n\terr := fn(config, dock, cmd)\n\tdock.Slay()\n\treturn err\n}\n\n\/\/Launches a docker\nfunc Launch(config TrionConfig, dock *crocker.Dock, cmd *Command) error {\n\t\/\/Start the docker and wait for it to finish\n\tCID := cmd.Run(config)\n\tcmd.Wait(CID)\n\n\t\/\/Remove if desired\n\tif config.Purge {\n\t\tcmd.Purge(CID)\n\t}\n\n\treturn nil\n}\n\n\/\/Builds a docker\nfunc Build(config TrionConfig, dock *crocker.Dock, cmd *Command) error {\n\t\/\/Use the build command and upstream image\n\tbuildConfig := config\n\tbuildConfig.Command = config.Build\n\tbuildConfig.Image = config.Upstream\n\n\t\/\/Run the build\n\tCID := cmd.Run(buildConfig)\n\tcmd.Wait(CID)\n\n\t\/\/Create a tar\n\tcmd.Export(CID, ExportPath)\n\n\t\/\/Import the built docker\n\t\/\/ Todo: add --noImport option to goflags\n\tcmd.Import(config, ExportPath)\n\n\t\/\/Remove if desired\n\tif config.Purge {\n\t\tcmd.Purge(CID)\n\t}\n\n\treturn nil\n}\n<commit_msg>remove unused variables.<commit_after>\/\/High-level functionality\n\npackage trion\n\nimport (\n\t\"polydawn.net\/dockctrl\/crocker\"\n\t. \"fmt\"\n)\n\nconst ExportPath = \".\/\" \/\/Where to export docker images\n\n\/\/Helps run anyything that requires a docker connection.\n\/\/Handles creation & cleanup in one place.\nfunc WithDocker(fn func(TrionConfig, *Command) error ) error {\n\t\/\/Load configuration, then find or start a docker\n\tconfig := FindConfig(\".\")\n\tdock, dir, ours := crocker.FindDock()\n\tcmd := &Command{dock.Client()}\n\n\t\/\/Announce the docker\n\tif ours {\n\t\tPrintln(\"Started a docker in\", dir)\n\t} else {\n\t\tPrintln(\"Connecting to docker\", dir)\n\t}\n\n\t\/\/Run the closure, kill the docker if needed, and return any errors.\n\terr := fn(config, cmd)\n\tdock.Slay()\n\treturn err\n}\n\n\/\/Launches a docker\nfunc Launch(config TrionConfig, cmd *Command) error {\n\t\/\/Start the docker and wait for it to finish\n\tCID := cmd.Run(config)\n\tcmd.Wait(CID)\n\n\t\/\/Remove if desired\n\tif config.Purge {\n\t\tcmd.Purge(CID)\n\t}\n\n\treturn nil\n}\n\n\/\/Builds a docker\nfunc Build(config TrionConfig, cmd *Command) error {\n\t\/\/Use the build command and upstream image\n\tbuildConfig := config\n\tbuildConfig.Command = config.Build\n\tbuildConfig.Image = config.Upstream\n\n\t\/\/Run the build\n\tCID := cmd.Run(buildConfig)\n\tcmd.Wait(CID)\n\n\t\/\/Create a tar\n\tcmd.Export(CID, ExportPath)\n\n\t\/\/Import the built docker\n\t\/\/ Todo: add --noImport option to goflags\n\tcmd.Import(config, ExportPath)\n\n\t\/\/Remove if desired\n\tif config.Purge {\n\t\tcmd.Purge(CID)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 fatedier, fatedier@gmail.com\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage plugin\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"strings\"\n\n\tfrpNet \"github.com\/fatedier\/frp\/pkg\/util\/net\"\n)\n\nconst PluginHTTP2HTTPS = \"http2https\"\n\nfunc init() {\n\tRegister(PluginHTTP2HTTPS, NewHTTP2HTTPSPlugin)\n}\n\ntype HTTP2HTTPSPlugin struct {\n\thostHeaderRewrite string\n\tlocalAddr string\n\theaders map[string]string\n\n\tl *Listener\n\ts *http.Server\n}\n\nfunc NewHTTP2HTTPSPlugin(params map[string]string) (Plugin, error) {\n\tlocalAddr := params[\"plugin_local_addr\"]\n\thostHeaderRewrite := params[\"plugin_host_header_rewrite\"]\n\theaders := make(map[string]string)\n\tfor k, v := range params {\n\t\tif !strings.HasPrefix(k, \"plugin_header_\") {\n\t\t\tcontinue\n\t\t}\n\t\tif k = strings.TrimPrefix(k, \"plugin_header_\"); k != \"\" {\n\t\t\theaders[k] = v\n\t\t}\n\t}\n\n\tif localAddr == \"\" {\n\t\treturn nil, fmt.Errorf(\"plugin_local_addr is required\")\n\t}\n\n\tlistener := NewProxyListener()\n\n\tp := &HTTPS2HTTPPlugin{\n\t\tlocalAddr: localAddr,\n\t\thostHeaderRewrite: hostHeaderRewrite,\n\t\theaders: headers,\n\t\tl: listener,\n\t}\n\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\n\trp := &httputil.ReverseProxy{\n\t\tDirector: func(req *http.Request) {\n\t\t\treq.URL.Scheme = \"https\"\n\t\t\treq.URL.Host = p.localAddr\n\t\t\tif p.hostHeaderRewrite != \"\" {\n\t\t\t\treq.Host = p.hostHeaderRewrite\n\t\t\t}\n\t\t\tfor k, v := range p.headers {\n\t\t\t\treq.Header.Set(k, v)\n\t\t\t}\n\t\t},\n\t\tTransport: tr,\n\t}\n\n\tp.s = &http.Server{\n\t\tHandler: rp,\n\t}\n\n\tgo p.s.Serve(listener)\n\n\treturn p, nil\n}\n\nfunc (p *HTTP2HTTPSPlugin) Handle(conn io.ReadWriteCloser, realConn net.Conn, extraBufToLocal []byte) {\n\twrapConn := frpNet.WrapReadWriteCloserToConn(conn, realConn)\n\tp.l.PutConn(wrapConn)\n}\n\nfunc (p *HTTP2HTTPSPlugin) Name() string {\n\treturn PluginHTTP2HTTPS\n}\n\nfunc (p *HTTP2HTTPSPlugin) Close() error {\n\tif err := p.s.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<commit_msg>fix: stuct name typo (#2458)<commit_after>\/\/ Copyright 2019 fatedier, fatedier@gmail.com\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage plugin\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"strings\"\n\n\tfrpNet \"github.com\/fatedier\/frp\/pkg\/util\/net\"\n)\n\nconst PluginHTTP2HTTPS = \"http2https\"\n\nfunc init() {\n\tRegister(PluginHTTP2HTTPS, NewHTTP2HTTPSPlugin)\n}\n\ntype HTTP2HTTPSPlugin struct {\n\thostHeaderRewrite string\n\tlocalAddr string\n\theaders map[string]string\n\n\tl *Listener\n\ts *http.Server\n}\n\nfunc NewHTTP2HTTPSPlugin(params map[string]string) (Plugin, error) {\n\tlocalAddr := params[\"plugin_local_addr\"]\n\thostHeaderRewrite := params[\"plugin_host_header_rewrite\"]\n\theaders := make(map[string]string)\n\tfor k, v := range params {\n\t\tif !strings.HasPrefix(k, \"plugin_header_\") {\n\t\t\tcontinue\n\t\t}\n\t\tif k = strings.TrimPrefix(k, \"plugin_header_\"); k != \"\" {\n\t\t\theaders[k] = v\n\t\t}\n\t}\n\n\tif localAddr == \"\" {\n\t\treturn nil, fmt.Errorf(\"plugin_local_addr is required\")\n\t}\n\n\tlistener := NewProxyListener()\n\n\tp := &HTTP2HTTPSPlugin{\n\t\tlocalAddr: localAddr,\n\t\thostHeaderRewrite: hostHeaderRewrite,\n\t\theaders: headers,\n\t\tl: listener,\n\t}\n\n\ttr := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\n\trp := &httputil.ReverseProxy{\n\t\tDirector: func(req *http.Request) {\n\t\t\treq.URL.Scheme = \"https\"\n\t\t\treq.URL.Host = p.localAddr\n\t\t\tif p.hostHeaderRewrite != \"\" {\n\t\t\t\treq.Host = p.hostHeaderRewrite\n\t\t\t}\n\t\t\tfor k, v := range p.headers {\n\t\t\t\treq.Header.Set(k, v)\n\t\t\t}\n\t\t},\n\t\tTransport: tr,\n\t}\n\n\tp.s = &http.Server{\n\t\tHandler: rp,\n\t}\n\n\tgo p.s.Serve(listener)\n\n\treturn p, nil\n}\n\nfunc (p *HTTP2HTTPSPlugin) Handle(conn io.ReadWriteCloser, realConn net.Conn, extraBufToLocal []byte) {\n\twrapConn := frpNet.WrapReadWriteCloserToConn(conn, realConn)\n\tp.l.PutConn(wrapConn)\n}\n\nfunc (p *HTTP2HTTPSPlugin) Name() string {\n\treturn PluginHTTP2HTTPS\n}\n\nfunc (p *HTTP2HTTPSPlugin) Close() error {\n\tif err := p.s.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package plugin\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\n\tutilwait \"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\tutildbus \"k8s.io\/kubernetes\/pkg\/util\/dbus\"\n\tkexec \"k8s.io\/kubernetes\/pkg\/util\/exec\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/iptables\"\n)\n\ntype FirewallRule struct {\n\ttable string\n\tchain string\n\targs []string\n}\n\ntype NodeIPTables struct {\n\tipt iptables.Interface\n\tclusterNetworkCIDR string\n\tsyncPeriod time.Duration\n\n\tmu sync.Mutex \/\/ Protects concurrent access to syncIPTableRules()\n}\n\nconst (\n\tOutputFilteringChain iptables.Chain = \"OPENSHIFT-ADMIN-OUTPUT-RULES\"\n)\n\nfunc newNodeIPTables(clusterNetworkCIDR string, syncPeriod time.Duration) *NodeIPTables {\n\treturn &NodeIPTables{\n\t\tipt: iptables.New(kexec.New(), utildbus.New(), iptables.ProtocolIpv4),\n\t\tclusterNetworkCIDR: clusterNetworkCIDR,\n\t\tsyncPeriod: syncPeriod,\n\t}\n}\n\nfunc (n *NodeIPTables) Setup() error {\n\tif err := n.syncIPTableRules(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ If firewalld is running, reload will call this method\n\tn.ipt.AddReloadFunc(func() {\n\t\tif err := n.syncIPTableRules(); err != nil {\n\t\t\tglog.Errorf(\"Reloading openshift iptables failed: %v\", err)\n\t\t}\n\t})\n\n\tgo utilwait.Forever(n.syncLoop, 0)\n\treturn nil\n}\n\n\/\/ syncLoop periodically calls syncIPTableRules().\n\/\/ This is expected to run as a go routine or as the main loop. It does not return.\nfunc (n *NodeIPTables) syncLoop() {\n\tt := time.NewTicker(n.syncPeriod)\n\tdefer t.Stop()\n\tfor {\n\t\t<-t.C\n\t\tglog.V(6).Infof(\"Periodic openshift iptables sync\")\n\t\terr := n.syncIPTableRules()\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Syncing openshift iptables failed: %v\", err)\n\t\t}\n\t}\n}\n\n\/\/ syncIPTableRules syncs the cluster network cidr iptables rules.\n\/\/ Called from SyncLoop() or firwalld reload()\nfunc (n *NodeIPTables) syncIPTableRules() error {\n\tn.mu.Lock()\n\tdefer n.mu.Unlock()\n\n\tstart := time.Now()\n\tdefer func() {\n\t\tglog.V(4).Infof(\"syncIPTableRules took %v\", time.Since(start))\n\t}()\n\tglog.V(3).Infof(\"Syncing openshift iptables rules\")\n\n\tif _, err := n.ipt.EnsureChain(iptables.TableFilter, OutputFilteringChain); err != nil {\n\t\treturn fmt.Errorf(\"failed to ensure chain %q exists: %v\", OutputFilteringChain, err)\n\t}\n\n\trules := n.getStaticNodeIPTablesRules()\n\tfor _, rule := range rules {\n\t\t_, err := n.ipt.EnsureRule(iptables.Prepend, iptables.Table(rule.table), iptables.Chain(rule.chain), rule.args...)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to ensure rule %v exists: %v\", rule, err)\n\t\t}\n\t}\n\treturn nil\n}\n\nconst VXLAN_PORT = \"4789\"\n\n\/\/ Get openshift iptables rules\nfunc (n *NodeIPTables) getStaticNodeIPTablesRules() []FirewallRule {\n\treturn []FirewallRule{\n\t\t{\"nat\", \"POSTROUTING\", []string{\"-s\", n.clusterNetworkCIDR, \"-m\", \"comment\", \"--comment\", \"masquerade pod-to-service and pod-to-external traffic\", \"-j\", \"MASQUERADE\"}},\n\t\t{\"filter\", \"INPUT\", []string{\"-p\", \"udp\", \"--dport\", VXLAN_PORT, \"-m\", \"comment\", \"--comment\", \"VXLAN incoming\", \"-j\", \"ACCEPT\"}},\n\t\t{\"filter\", \"INPUT\", []string{\"-i\", TUN, \"-m\", \"comment\", \"--comment\", \"from SDN to localhost\", \"-j\", \"ACCEPT\"}},\n\t\t{\"filter\", \"INPUT\", []string{\"-i\", \"docker0\", \"-m\", \"comment\", \"--comment\", \"from docker to localhost\", \"-j\", \"ACCEPT\"}},\n\t\t{\"filter\", \"FORWARD\", []string{\"-s\", n.clusterNetworkCIDR, \"-m\", \"comment\", \"--comment\", \"attempted resend after connection close\", \"-m\", \"conntrack\", \"--ctstate\", \"INVALID\", \"-j\", \"DROP\"}},\n\t\t{\"filter\", \"FORWARD\", []string{\"-d\", n.clusterNetworkCIDR, \"-m\", \"comment\", \"--comment\", \"forward traffic from SDN\", \"-j\", \"ACCEPT\"}},\n\t\t{\"filter\", \"FORWARD\", []string{\"-s\", n.clusterNetworkCIDR, \"-m\", \"comment\", \"--comment\", \"forward traffic to SDN\", \"-j\", \"ACCEPT\"}},\n\t\t{\"filter\", \"FORWARD\", []string{\"-i\", TUN, \"!\", \"-o\", TUN, \"-m\", \"comment\", \"--comment\", \"administrator overrides\", \"-j\", string(OutputFilteringChain)}},\n\t}\n}\n<commit_msg>Segregate OpenShift's iptables rules<commit_after>package plugin\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\n\tutilwait \"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\tutildbus \"k8s.io\/kubernetes\/pkg\/util\/dbus\"\n\tkexec \"k8s.io\/kubernetes\/pkg\/util\/exec\"\n\t\"k8s.io\/kubernetes\/pkg\/util\/iptables\"\n)\n\ntype NodeIPTables struct {\n\tipt iptables.Interface\n\tclusterNetworkCIDR string\n\tsyncPeriod time.Duration\n\n\tmu sync.Mutex \/\/ Protects concurrent access to syncIPTableRules()\n}\n\nfunc newNodeIPTables(clusterNetworkCIDR string, syncPeriod time.Duration) *NodeIPTables {\n\treturn &NodeIPTables{\n\t\tipt: iptables.New(kexec.New(), utildbus.New(), iptables.ProtocolIpv4),\n\t\tclusterNetworkCIDR: clusterNetworkCIDR,\n\t\tsyncPeriod: syncPeriod,\n\t}\n}\n\nfunc (n *NodeIPTables) Setup() error {\n\tif err := n.syncIPTableRules(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ If firewalld is running, reload will call this method\n\tn.ipt.AddReloadFunc(func() {\n\t\tif err := n.syncIPTableRules(); err != nil {\n\t\t\tglog.Errorf(\"Reloading openshift iptables failed: %v\", err)\n\t\t}\n\t})\n\n\tgo utilwait.Forever(n.syncLoop, 0)\n\treturn nil\n}\n\n\/\/ syncLoop periodically calls syncIPTableRules().\n\/\/ This is expected to run as a go routine or as the main loop. It does not return.\nfunc (n *NodeIPTables) syncLoop() {\n\tt := time.NewTicker(n.syncPeriod)\n\tdefer t.Stop()\n\tfor {\n\t\t<-t.C\n\t\tglog.V(6).Infof(\"Periodic openshift iptables sync\")\n\t\terr := n.syncIPTableRules()\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"Syncing openshift iptables failed: %v\", err)\n\t\t}\n\t}\n}\n\ntype Chain struct {\n\ttable string\n\tname string\n\tsrcChain string\n\tsrcRule []string\n\trules [][]string\n}\n\n\/\/ Adds all the rules in chain, returning true if they were all already present\nfunc (n *NodeIPTables) addChainRules(chain Chain) (bool, error) {\n\tallExisted := true\n\tfor _, rule := range chain.rules {\n\t\texisted, err := n.ipt.EnsureRule(iptables.Append, iptables.Table(chain.table), iptables.Chain(chain.name), rule...)\n\t\tif err != nil {\n\t\t\treturn false, fmt.Errorf(\"failed to ensure rule %v exists: %v\", rule, err)\n\t\t}\n\t\tif !existed {\n\t\t\tallExisted = false\n\t\t}\n\t}\n\treturn allExisted, nil\n}\n\n\/\/ syncIPTableRules syncs the cluster network cidr iptables rules.\n\/\/ Called from SyncLoop() or firewalld reload()\nfunc (n *NodeIPTables) syncIPTableRules() error {\n\tn.mu.Lock()\n\tdefer n.mu.Unlock()\n\n\tstart := time.Now()\n\tdefer func() {\n\t\tglog.V(4).Infof(\"syncIPTableRules took %v\", time.Since(start))\n\t}()\n\tglog.V(3).Infof(\"Syncing openshift iptables rules\")\n\n\tfor _, chain := range n.getNodeIPTablesChains() {\n\t\t\/\/ Create chain if it does not already exist\n\t\tchainExisted, err := n.ipt.EnsureChain(iptables.Table(chain.table), iptables.Chain(chain.name))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to ensure chain %s exists: %v\", chain.name, err)\n\t\t}\n\n\t\t\/\/ Create the rule pointing to it from its parent chain. Note that since we\n\t\t\/\/ use iptables.Prepend each time, chains with the same table and srcChain\n\t\t\/\/ (ie, OPENSHIFT-FIREWALL-FORWARD and OPENSHIFT-ADMIN-OUTPUT-RULES) will\n\t\t\/\/ run in *reverse* order of how they are listed in getNodeIPTablesChains().\n\t\t_, err = n.ipt.EnsureRule(iptables.Prepend, iptables.Table(chain.table), iptables.Chain(chain.srcChain), append(chain.srcRule, \"-j\", chain.name)...)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to ensure rule from %s to %s exists: %v\", chain.srcChain, chain.name, err)\n\t\t}\n\n\t\t\/\/ Add\/sync the rules\n\t\trulesExisted, err := n.addChainRules(chain)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif chainExisted && !rulesExisted {\n\t\t\t\/\/ Chain existed but not with the expected rules; this probably means\n\t\t\t\/\/ it contained rules referring to a *different* subnet; flush them\n\t\t\t\/\/ and try again.\n\t\t\tif err = n.ipt.FlushChain(iptables.Table(chain.table), iptables.Chain(chain.name)); err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to flush chain %s: %v\", chain.name, err)\n\t\t\t}\n\t\t\tif _, err = n.addChainRules(chain); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nconst VXLAN_PORT = \"4789\"\n\nfunc (n *NodeIPTables) getNodeIPTablesChains() []Chain {\n\treturn []Chain{\n\t\t{\n\t\t\ttable: \"nat\",\n\t\t\tname: \"OPENSHIFT-MASQUERADE\",\n\t\t\tsrcChain: \"POSTROUTING\",\n\t\t\tsrcRule: []string{\"-m\", \"comment\", \"--comment\", \"rules for masquerading OpenShift traffic\"},\n\t\t\trules: [][]string{\n\t\t\t\t{\"-s\", n.clusterNetworkCIDR, \"-m\", \"comment\", \"--comment\", \"masquerade pod-to-service and pod-to-external traffic\", \"-j\", \"MASQUERADE\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ttable: \"filter\",\n\t\t\tname: \"OPENSHIFT-FIREWALL-ALLOW\",\n\t\t\tsrcChain: \"INPUT\",\n\t\t\tsrcRule: []string{\"-m\", \"comment\", \"--comment\", \"firewall overrides\"},\n\t\t\trules: [][]string{\n\t\t\t\t{\"-p\", \"udp\", \"--dport\", VXLAN_PORT, \"-m\", \"comment\", \"--comment\", \"VXLAN incoming\", \"-j\", \"ACCEPT\"},\n\t\t\t\t{\"-i\", TUN, \"-m\", \"comment\", \"--comment\", \"from SDN to localhost\", \"-j\", \"ACCEPT\"},\n\t\t\t\t{\"-i\", \"docker0\", \"-m\", \"comment\", \"--comment\", \"from docker to localhost\", \"-j\", \"ACCEPT\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ttable: \"filter\",\n\t\t\tname: \"OPENSHIFT-FIREWALL-FORWARD\",\n\t\t\tsrcChain: \"FORWARD\",\n\t\t\tsrcRule: []string{\"-m\", \"comment\", \"--comment\", \"firewall overrides\"},\n\t\t\trules: [][]string{\n\t\t\t\t{\"-s\", n.clusterNetworkCIDR, \"-m\", \"comment\", \"--comment\", \"attempted resend after connection close\", \"-m\", \"conntrack\", \"--ctstate\", \"INVALID\", \"-j\", \"DROP\"},\n\t\t\t\t{\"-d\", n.clusterNetworkCIDR, \"-m\", \"comment\", \"--comment\", \"forward traffic from SDN\", \"-j\", \"ACCEPT\"},\n\t\t\t\t{\"-s\", n.clusterNetworkCIDR, \"-m\", \"comment\", \"--comment\", \"forward traffic to SDN\", \"-j\", \"ACCEPT\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ttable: \"filter\",\n\t\t\tname: \"OPENSHIFT-ADMIN-OUTPUT-RULES\",\n\t\t\tsrcChain: \"FORWARD\",\n\t\t\tsrcRule: []string{\"-i\", TUN, \"!\", \"-o\", TUN, \"-m\", \"comment\", \"--comment\", \"administrator overrides\"},\n\t\t\trules: nil,\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2021 The gVisor Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage control\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"gvisor.dev\/gvisor\/pkg\/abi\/linux\"\n\t\"gvisor.dev\/gvisor\/pkg\/fd\"\n\t\"gvisor.dev\/gvisor\/pkg\/log\"\n\t\"gvisor.dev\/gvisor\/pkg\/sentry\/fs\/user\"\n\t\"gvisor.dev\/gvisor\/pkg\/sentry\/kernel\"\n\t\"gvisor.dev\/gvisor\/pkg\/sentry\/kernel\/auth\"\n\t\"gvisor.dev\/gvisor\/pkg\/sentry\/limits\"\n\t\"gvisor.dev\/gvisor\/pkg\/sentry\/vfs\"\n\t\"gvisor.dev\/gvisor\/pkg\/sync\"\n\t\"gvisor.dev\/gvisor\/pkg\/urpc\"\n)\n\n\/\/ Lifecycle provides functions related to starting and stopping tasks.\ntype Lifecycle struct {\n\t\/\/ Kernel is the kernel where the tasks belong to.\n\tKernel *kernel.Kernel\n\n\t\/\/ StartedCh is the channel used to send a message to the sentry that\n\t\/\/ all the containers in the sandbox have been started.\n\tStartedCh chan struct{}\n\n\t\/\/ mu protects the fields below.\n\tmu sync.RWMutex\n\n\t\/\/ containersStarted is the number of containers started in the sandbox.\n\tcontainersStarted int32\n\n\t\/\/ MountNamespacesMap is a map of container id\/names and the mount\n\t\/\/ namespaces.\n\tMountNamespacesMap map[string]*vfs.MountNamespace\n}\n\n\/\/ StartContainerArgs is the set of arguments to start a container.\ntype StartContainerArgs struct {\n\t\/\/ Filename is the filename to load.\n\t\/\/\n\t\/\/ If this is provided as \"\", then the file will be guessed via Argv[0].\n\tFilename string `json:\"filename\"`\n\n\t\/\/ Argv is a list of arguments.\n\tArgv []string `json:\"argv\"`\n\n\t\/\/ Envv is a list of environment variables.\n\tEnvv []string `json:\"envv\"`\n\n\t\/\/ WorkingDirectory defines the working directory for the new process.\n\tWorkingDirectory string `json:\"wd\"`\n\n\t\/\/ KUID is the UID to run with in the root user namespace. Defaults to\n\t\/\/ root if not set explicitly.\n\tKUID auth.KUID `json:\"KUID\"`\n\n\t\/\/ KGID is the GID to run with in the root user namespace. Defaults to\n\t\/\/ the root group if not set explicitly.\n\tKGID auth.KGID `json:\"KGID\"`\n\n\t\/\/ ExtraKGIDs is the list of additional groups to which the user belongs.\n\tExtraKGIDs []auth.KGID `json:\"extraKGID\"`\n\n\t\/\/ Capabilities is the list of capabilities to give to the process.\n\tCapabilities *auth.TaskCapabilities `json:\"capabilities\"`\n\n\t\/\/ FilePayload determines the files to give to the new process.\n\turpc.FilePayload\n\n\t\/\/ ContainerID is the container for the process being executed.\n\tContainerID string `json:\"containerID\"`\n\n\t\/\/ Limits is the limit set for the process being executed.\n\tLimits *limits.LimitSet `json:\"limits\"`\n}\n\n\/\/ String prints the StartContainerArgs.argv as a string.\nfunc (args StartContainerArgs) String() string {\n\tif len(args.Argv) == 0 {\n\t\treturn args.Filename\n\t}\n\ta := make([]string, len(args.Argv))\n\tcopy(a, args.Argv)\n\tif args.Filename != \"\" {\n\t\ta[0] = args.Filename\n\t}\n\treturn strings.Join(a, \" \")\n}\n\n\/\/ StartContainer will start a new container in the sandbox.\nfunc (l *Lifecycle) StartContainer(args *StartContainerArgs, _ *uint32) error {\n\t\/\/ Import file descriptors.\n\tfdTable := l.Kernel.NewFDTable()\n\n\tcreds := auth.NewUserCredentials(\n\t\targs.KUID,\n\t\targs.KGID,\n\t\targs.ExtraKGIDs,\n\t\targs.Capabilities,\n\t\tl.Kernel.RootUserNamespace())\n\n\tlimitSet := args.Limits\n\tif limitSet == nil {\n\t\tlimitSet = limits.NewLimitSet()\n\t}\n\tinitArgs := kernel.CreateProcessArgs{\n\t\tFilename: args.Filename,\n\t\tArgv: args.Argv,\n\t\tEnvv: args.Envv,\n\t\tWorkingDirectory: args.WorkingDirectory,\n\t\tCredentials: creds,\n\t\tFDTable: fdTable,\n\t\tUmask: 0022,\n\t\tLimits: limitSet,\n\t\tMaxSymlinkTraversals: linux.MaxSymlinkTraversals,\n\t\tUTSNamespace: l.Kernel.RootUTSNamespace(),\n\t\tIPCNamespace: l.Kernel.RootIPCNamespace(),\n\t\tAbstractSocketNamespace: l.Kernel.RootAbstractSocketNamespace(),\n\t\tContainerID: args.ContainerID,\n\t\tPIDNamespace: l.Kernel.RootPIDNamespace(),\n\t}\n\n\tctx := initArgs.NewContext(l.Kernel)\n\tdefer fdTable.DecRef(ctx)\n\n\t\/\/ VFS2 is supported in multi-container mode by default.\n\tl.mu.RLock()\n\tmntns, ok := l.MountNamespacesMap[initArgs.ContainerID]\n\tif !ok {\n\t\tl.mu.RUnlock()\n\t\treturn fmt.Errorf(\"mount namespace is nil for %s\", initArgs.ContainerID)\n\t}\n\tinitArgs.MountNamespaceVFS2 = mntns\n\tl.mu.RUnlock()\n\tinitArgs.MountNamespaceVFS2.IncRef()\n\n\tresolved, err := user.ResolveExecutablePath(ctx, &initArgs)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinitArgs.Filename = resolved\n\n\tfds, err := fd.NewFromFiles(args.Files)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"duplicating payload files: %w\", err)\n\t}\n\tdefer func() {\n\t\tfor _, fd := range fds {\n\t\t\t_ = fd.Close()\n\t\t}\n\t}()\n\n\ttg, _, err := l.Kernel.CreateProcess(initArgs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tl.mu.Lock()\n\tnumContainers := int32(len(l.MountNamespacesMap))\n\n\t\/\/ Start the newly created process.\n\tl.Kernel.StartProcess(tg)\n\tlog.Infof(\"Started the new container %v \", l.containersStarted)\n\tl.containersStarted++\n\tif numContainers == l.containersStarted {\n\t\tl.StartedCh <- struct{}{}\n\t}\n\tl.mu.Unlock()\n\treturn nil\n}\n\n\/\/ Pause pauses all tasks, blocking until they are stopped.\nfunc (l *Lifecycle) Pause(_, _ *struct{}) error {\n\tl.Kernel.Pause()\n\treturn nil\n}\n\n\/\/ Resume resumes all tasks.\nfunc (l *Lifecycle) Resume(_, _ *struct{}) error {\n\tl.Kernel.Unpause()\n\treturn nil\n}\n<commit_msg>Add a new control message for destroying sandbox in multi-container mode.<commit_after>\/\/ Copyright 2021 The gVisor Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage control\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"gvisor.dev\/gvisor\/pkg\/abi\/linux\"\n\t\"gvisor.dev\/gvisor\/pkg\/fd\"\n\t\"gvisor.dev\/gvisor\/pkg\/log\"\n\t\"gvisor.dev\/gvisor\/pkg\/sentry\/fs\/user\"\n\t\"gvisor.dev\/gvisor\/pkg\/sentry\/kernel\"\n\t\"gvisor.dev\/gvisor\/pkg\/sentry\/kernel\/auth\"\n\t\"gvisor.dev\/gvisor\/pkg\/sentry\/limits\"\n\t\"gvisor.dev\/gvisor\/pkg\/sentry\/vfs\"\n\t\"gvisor.dev\/gvisor\/pkg\/sync\"\n\t\"gvisor.dev\/gvisor\/pkg\/urpc\"\n)\n\n\/\/ Lifecycle provides functions related to starting and stopping tasks.\ntype Lifecycle struct {\n\t\/\/ Kernel is the kernel where the tasks belong to.\n\tKernel *kernel.Kernel\n\n\t\/\/ ShutdownCh is the channel used to signal the sentry to shutdown\n\t\/\/ the sentry\/sandbox.\n\tShutdownCh chan struct{}\n\n\t\/\/ mu protects the fields below.\n\tmu sync.RWMutex\n\n\t\/\/ MountNamespacesMap is a map of container id\/names and the mount\n\t\/\/ namespaces.\n\tMountNamespacesMap map[string]*vfs.MountNamespace\n}\n\n\/\/ StartContainerArgs is the set of arguments to start a container.\ntype StartContainerArgs struct {\n\t\/\/ Filename is the filename to load.\n\t\/\/\n\t\/\/ If this is provided as \"\", then the file will be guessed via Argv[0].\n\tFilename string `json:\"filename\"`\n\n\t\/\/ Argv is a list of arguments.\n\tArgv []string `json:\"argv\"`\n\n\t\/\/ Envv is a list of environment variables.\n\tEnvv []string `json:\"envv\"`\n\n\t\/\/ WorkingDirectory defines the working directory for the new process.\n\tWorkingDirectory string `json:\"wd\"`\n\n\t\/\/ KUID is the UID to run with in the root user namespace. Defaults to\n\t\/\/ root if not set explicitly.\n\tKUID auth.KUID `json:\"KUID\"`\n\n\t\/\/ KGID is the GID to run with in the root user namespace. Defaults to\n\t\/\/ the root group if not set explicitly.\n\tKGID auth.KGID `json:\"KGID\"`\n\n\t\/\/ ExtraKGIDs is the list of additional groups to which the user belongs.\n\tExtraKGIDs []auth.KGID `json:\"extraKGID\"`\n\n\t\/\/ Capabilities is the list of capabilities to give to the process.\n\tCapabilities *auth.TaskCapabilities `json:\"capabilities\"`\n\n\t\/\/ FilePayload determines the files to give to the new process.\n\turpc.FilePayload\n\n\t\/\/ ContainerID is the container for the process being executed.\n\tContainerID string `json:\"containerID\"`\n\n\t\/\/ Limits is the limit set for the process being executed.\n\tLimits *limits.LimitSet `json:\"limits\"`\n}\n\n\/\/ String prints the StartContainerArgs.argv as a string.\nfunc (args StartContainerArgs) String() string {\n\tif len(args.Argv) == 0 {\n\t\treturn args.Filename\n\t}\n\ta := make([]string, len(args.Argv))\n\tcopy(a, args.Argv)\n\tif args.Filename != \"\" {\n\t\ta[0] = args.Filename\n\t}\n\treturn strings.Join(a, \" \")\n}\n\n\/\/ StartContainer will start a new container in the sandbox.\nfunc (l *Lifecycle) StartContainer(args *StartContainerArgs, _ *uint32) error {\n\t\/\/ Import file descriptors.\n\tfdTable := l.Kernel.NewFDTable()\n\n\tcreds := auth.NewUserCredentials(\n\t\targs.KUID,\n\t\targs.KGID,\n\t\targs.ExtraKGIDs,\n\t\targs.Capabilities,\n\t\tl.Kernel.RootUserNamespace())\n\n\tlimitSet := args.Limits\n\tif limitSet == nil {\n\t\tlimitSet = limits.NewLimitSet()\n\t}\n\tinitArgs := kernel.CreateProcessArgs{\n\t\tFilename: args.Filename,\n\t\tArgv: args.Argv,\n\t\tEnvv: args.Envv,\n\t\tWorkingDirectory: args.WorkingDirectory,\n\t\tCredentials: creds,\n\t\tFDTable: fdTable,\n\t\tUmask: 0022,\n\t\tLimits: limitSet,\n\t\tMaxSymlinkTraversals: linux.MaxSymlinkTraversals,\n\t\tUTSNamespace: l.Kernel.RootUTSNamespace(),\n\t\tIPCNamespace: l.Kernel.RootIPCNamespace(),\n\t\tAbstractSocketNamespace: l.Kernel.RootAbstractSocketNamespace(),\n\t\tContainerID: args.ContainerID,\n\t\tPIDNamespace: l.Kernel.RootPIDNamespace(),\n\t}\n\n\tctx := initArgs.NewContext(l.Kernel)\n\tdefer fdTable.DecRef(ctx)\n\n\t\/\/ VFS2 is supported in multi-container mode by default.\n\tl.mu.RLock()\n\tmntns, ok := l.MountNamespacesMap[initArgs.ContainerID]\n\tif !ok {\n\t\tl.mu.RUnlock()\n\t\treturn fmt.Errorf(\"mount namespace is nil for %s\", initArgs.ContainerID)\n\t}\n\tinitArgs.MountNamespaceVFS2 = mntns\n\tl.mu.RUnlock()\n\tinitArgs.MountNamespaceVFS2.IncRef()\n\n\tresolved, err := user.ResolveExecutablePath(ctx, &initArgs)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinitArgs.Filename = resolved\n\n\tfds, err := fd.NewFromFiles(args.Files)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"duplicating payload files: %w\", err)\n\t}\n\tdefer func() {\n\t\tfor _, fd := range fds {\n\t\t\t_ = fd.Close()\n\t\t}\n\t}()\n\n\ttg, _, err := l.Kernel.CreateProcess(initArgs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Start the newly created process.\n\tl.Kernel.StartProcess(tg)\n\tlog.Infof(\"Started the new container %v \", initArgs.ContainerID)\n\treturn nil\n}\n\n\/\/ Pause pauses all tasks, blocking until they are stopped.\nfunc (l *Lifecycle) Pause(_, _ *struct{}) error {\n\tl.Kernel.Pause()\n\treturn nil\n}\n\n\/\/ Resume resumes all tasks.\nfunc (l *Lifecycle) Resume(_, _ *struct{}) error {\n\tl.Kernel.Unpause()\n\treturn nil\n}\n\n\/\/ Shutdown sends signal to destroy the sentry\/sandbox.\nfunc (l *Lifecycle) Shutdown(_, _ *struct{}) error {\n\tclose(l.ShutdownCh)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Access pass function propogation\n\/\/\n\/\/ Alias accesses to function parameters passed by pointers, and record\n\/\/ global variable accesses. Propogate accesses upwards through the blocks.\n\/\/\n\/\/ func foo(ptr *DataA, val DataB) {\n\/\/ \/\/ Read ptr\n\/\/ \/\/ Write ptr\n\/\/ \/\/ Read val\n\/\/ \/\/ Write val\n\/\/ }\n\/\/ \n\/\/ func bar(index int, val []DataB) int {\n\/\/ return val[index]\n\/\/ }\n\/\/\n\/\/ func main() {\n\/\/ a := &DataA{}\n\/\/ foo(a, DataB{}) \/\/ bubble up the reads and writes to \"a\" (ptr)\n\/\/ i := 0\n\/\/ aList := []DataA{a}\n\/\/ bar(i, aList) \/\/ bubble up val[index] -> aList[i] access\n\/\/ }\npackage main\n\nimport (\n\t\"fmt\"\n\t\"go\/ast\"\n)\n\ntype AccessPassFuncPropogate struct {\n\tBasePass\n}\n\ntype AccessPassFuncPropogateVisitor struct {\n\tpass Pass\n\tp *Package\n\tcur *BasicBlock\n\tdataBlock *AccessPassData\n\tnode ast.Node\n}\n\nfunc (v AccessPassFuncPropogateVisitor) Done(block *BasicBlock) (modified bool, err error) {\n\tdataBlock := v.dataBlock\n\n\tblock.Print(\"== Defines ==\")\n\tfor ident, expr := range dataBlock.defines {\n\t\tblock.Printf(\"%s = %T %+v\", ident, expr, expr)\n\t}\n\tblock.Print(\"== Accesses ==\")\n\tfor _, access := range dataBlock.accesses {\n\t\tblock.Printf(access.String())\n\t}\n\n\treturn\n}\n\nfunc (v AccessPassFuncPropogateVisitor) Visit(node ast.Node) (w BasicBlockVisitor) {\n\t\/\/ Get the closest enclosing basic block for this node\n\tdataBlock := v.dataBlock\n\tb := v.cur\n\tpass := v.pass\n\tif node == nil {\n\t\t\/\/ post-order actions (all sub-nodes have been visited)\n\t\tnode = v.node\n\t\t\/\/ Locate function calls\n\t\tswitch t := node.(type) {\n\t\tcase *ast.CallExpr:\n\t\t\tswitch f := t.Fun.(type) {\n\t\t\tcase *ast.FuncLit:\n\t\t\t\t\/\/ go down a FuncLit branch\n\t\t\tcase *ast.Ident:\n\t\t\t\tfun := v.p.Lookup(f.Name)\n\t\t\t\tif fun == nil {\n\t\t\t\t\t\/\/ builtin function, or not found\n\t\t\t\t\tb.Print(\"Function not found\", f.Name)\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tfuncDecl := fun.Decl.(*ast.FuncDecl)\n\t\t\t\tfuncType := funcDecl.Type\n\t\t\t\tfuncDataBlock := pass.GetCompiler().GetPassResult(BasicBlockPassType, funcDecl).(*BasicBlock).Get(AccessPassType).(*AccessPassData)\n\n\t\t\t\tchild := v.cur\n\t\t\t\tparent := child.parent\n\t\t\t\t\/\/ Now fill in the accesses this call would have made, and propogate it\n\t\t\t\t\/\/ all the way to the top\n\t\t\t\tvar funcAccesses []IdentifierGroup \/\/ only the accesses this function made\n\n\t\t\t\t\/\/ Fill in global accesses\n\t\t\t\tfor _, access := range funcDataBlock.accesses {\n\t\t\t\t\tif _, ok := funcDataBlock.defines[access.group[0].id]; !ok {\n\t\t\t\t\t\t\/\/ if there is an array access that uses an identifier block defined in \n\t\t\t\t\t\t\/\/ this block, change the access from b[idx] to b\n\t\t\t\t\t\tvar ig IdentifierGroup = access\n\t\t\t\t\t\tfor idx, ident := range access.group {\n\t\t\t\t\t\t\tif _, ok := dataBlock.defines[ident.index]; ok && ident.isIndexed {\n\t\t\t\t\t\t\t\tig.group = make([]Identifier, idx+1)\n\t\t\t\t\t\t\t\tcopy(ig.group, access.group)\n\t\t\t\t\t\t\t\tparent.Printf(\"Leaving index scope [%s]\", ig.group[idx].index)\n\t\t\t\t\t\t\t\tig.group[idx].isIndexed = false\n\t\t\t\t\t\t\t\tig.group[idx].index = \"\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tb.Print(\"Global access\", ig.String())\n\t\t\t\t\t\tfuncAccesses = append(funcAccesses, ig)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t\/\/ Fill in aliased arguments\n\t\t\t\tpos := 0 \/\/ argument position\n\t\t\t\tfor _, arg := range funcType.Params.List {\n\t\t\t\t\twriteThrough := false\n\t\t\t\t\t\/\/ is the argument able to be modified?\n\t\t\t\t\t\/\/ builtin types (slice, map, chan), pointers\n\t\t\t\t\tswitch arg.Type.(type) {\n\t\t\t\t\tcase *ast.ArrayType, *ast.MapType, *ast.ChanType:\n\t\t\t\t\t\tb.Printf(\"Pass-by-reference %v %T\", arg.Names, arg.Type)\n\t\t\t\t\t\twriteThrough = true\n\t\t\t\t\tcase *ast.StarExpr:\n\t\t\t\t\t\tb.Printf(\"Pass-by-pointer %v %T\", arg.Names, arg.Type)\n\t\t\t\t\t\twriteThrough = true\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, argName := range arg.Names {\n\t\t\t\t\t\tif writeThrough {\n\t\t\t\t\t\t\tcallArg := t.Args[pos]\n\t\t\t\t\t\t\tcallIdent := &IdentifierGroup{}\n\t\t\t\t\t\t\tAccessIdentBuild(callIdent, callArg, nil)\n\n\t\t\t\t\t\t\t\/\/ Find all accesses to these variables\n\t\t\t\t\t\t\tfor _, access := range funcDataBlock.accesses {\n\t\t\t\t\t\t\t\t\/\/ Replace the function arg name with the callIdent prefix\n\t\t\t\t\t\t\t\tif access.group[0].id == argName.Name {\n\t\t\t\t\t\t\t\t\t\/\/ check if an index variable is also a function argument and\n\t\t\t\t\t\t\t\t\t\/\/ remove it\n\t\t\t\t\t\t\t\t\tnewAccess := make([]Identifier, len(access.group))\n\n\t\t\t\t\t\t\t\t\tcopy(newAccess, access.group) \/\/ full copy\n\t\t\t\t\t\t\t\t\tfor idx, ident := range newAccess {\n\t\t\t\t\t\t\t\t\t\tif _, ok := funcDataBlock.defines[ident.index]; ok && ident.isIndexed {\n\t\t\t\t\t\t\t\t\t\t\tnewAccess = newAccess[0 : idx+1]\n\t\t\t\t\t\t\t\t\t\t\tnewAccess[idx].isIndexed = false\n\t\t\t\t\t\t\t\t\t\t\tb.Printf(\"Stripping array index %s\", ident.index)\n\t\t\t\t\t\t\t\t\t\t\tnewAccess[idx].index = \"\"\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t\/\/ if the callsite is &a and the access is *a, make the access\n\t\t\t\t\t\t\t\t\t\/\/ a for this function\n\t\t\t\t\t\t\t\t\tvar callIdentCopy []Identifier\n\t\t\t\t\t\t\t\t\tif callIdent.group[len(callIdent.group)-1].refType == AddressOf && newAccess[len(newAccess)-1].refType == Dereference {\n\t\t\t\t\t\t\t\t\t\tb.Print(\"Removing pointer alias & -> *\")\n\t\t\t\t\t\t\t\t\t\tnewAccess[len(newAccess)-1].refType = NoReference\n\t\t\t\t\t\t\t\t\t\tcallIdentCopy = make([]Identifier, len(callIdent.group))\n\t\t\t\t\t\t\t\t\t\tcopy(callIdentCopy, callIdent.group)\n\t\t\t\t\t\t\t\t\t\tcallIdentCopy[len(callIdentCopy)-1].refType = NoReference\n\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\tcallIdentCopy = callIdent.group\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\/\/ replace access[0] with callIdent\n\t\t\t\t\t\t\t\t\tvar ig IdentifierGroup\n\t\t\t\t\t\t\t\t\tig.t = access.t\n\t\t\t\t\t\t\t\t\tig.group = append(ig.group, callIdentCopy...)\n\t\t\t\t\t\t\t\t\tig.group = append(ig.group, newAccess[1:]...)\n\t\t\t\t\t\t\t\t\tb.Printf(\"%s -> %s\", access.String(), ig.String())\n\t\t\t\t\t\t\t\t\tfuncAccesses = append(funcAccesses, ig)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tpos++\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t\/\/ Propogate ONLY aliased argument accesses upwards\n\t\t\t\t\/\/ NOTE: doesn't work with recursive functions\n\n\t\t\t\t\/\/ Move upwards, replacing the placeholder access with the group of\n\t\t\t\t\/\/ accesses made by this function. Stop at variable define boundaries\n\t\t\t\tplaceholderIdent := v.pass.GetCompiler().GetPassResult(AccessPassType, t).(*ast.Ident)\n\t\t\t\tb.Printf(\"\\x1b[33m>> %s\\x1b[0m filling in function effects: %+v, %+v\", placeholderIdent.Name, t, funcDecl)\n\n\t\t\t\t\/\/ Walk up the parent blocks\n\t\t\t\tfor ; child != nil; child = child.parent {\n\t\t\t\t\t\/\/ Find the placeholder\n\t\t\t\t\tdataBlock := child.Get(AccessPassType).(*AccessPassData)\n\t\t\t\t\tvar placeholderIdx int\n\t\t\t\t\tvar val IdentifierGroup\n\t\t\t\t\tfor placeholderIdx, val = range dataBlock.accesses {\n\t\t\t\t\t\tif val.group[0].id == placeholderIdent.Name {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tb.Printf(\"Replacing placeholder at %d\", placeholderIdx)\n\n\t\t\t\t\t\/\/ Remove the placeholder\n\t\t\t\t\tdataBlock.accesses = append(dataBlock.accesses[0:placeholderIdx], dataBlock.accesses[placeholderIdx+1:]...)\n\t\t\t\t\t\/\/ Insert the function accesses\n\t\t\t\t\tfuncAccessCopy := make([]IdentifierGroup, len(funcAccesses))\n\t\t\t\t\tcopy(funcAccessCopy, funcAccesses)\n\n\t\t\t\t\tb.Print(\" << Propogating up\")\n\t\t\t\t\tfor _, a := range funcAccessCopy {\n\t\t\t\t\t\tb.Print(a.String())\n\t\t\t\t\t}\n\t\t\t\t\tdataBlock.accesses = append(dataBlock.accesses[0:placeholderIdx], append(funcAccessCopy, dataBlock.accesses[placeholderIdx:]...)...)\n\n\t\t\t\t\t\/\/ Check if the identifier leaves scope\n\t\t\t\t\tfor idx := 0; idx < len(funcAccesses); {\n\t\t\t\t\t\taccess := funcAccesses[idx]\n\t\t\t\t\t\tif _, ok := dataBlock.defines[access.group[0].id]; ok {\n\t\t\t\t\t\t\tb.Print(\"Leaving scope\", access.String())\n\t\t\t\t\t\t\tfuncAccesses = append(funcAccesses[:idx], funcAccesses[idx+1:]...)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tidx++\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Check if an index variable leaves scope\n\t\t\t\t\tfor accIdx, access := range funcAccesses {\n\t\t\t\t\t\t\/\/ check if an index variable is also a function argument and\n\t\t\t\t\t\t\/\/ remove it\n\t\t\t\t\t\tfor idx, ident := range access.group {\n\t\t\t\t\t\t\tif _, ok := dataBlock.defines[ident.index]; ok && ident.isIndexed {\n\t\t\t\t\t\t\t\t\/\/ update the real value in funcAccesses\n\t\t\t\t\t\t\t\taccess.group = access.group[0 : idx+1]\n\t\t\t\t\t\t\t\taccess.group[idx].isIndexed = false\n\t\t\t\t\t\t\t\tb.Printf(\"Stripping array index %s\", ident.index)\n\t\t\t\t\t\t\t\taccess.group[idx].index = \"\"\n\t\t\t\t\t\t\t\tfuncAccesses[accIdx] = access \/\/ write back to the original storage!\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn v\n\t}\n\tv.node = node\n\n\treturn v\n}\n\nfunc NewAccessPassFuncPropogate() *AccessPassFuncPropogate {\n\treturn &AccessPassFuncPropogate{\n\t\tBasePass: NewBasePass(),\n\t}\n}\n\nfunc (pass *AccessPassFuncPropogate) GetPassType() PassType {\n\treturn AccessPassFuncPropogateType\n}\n\nfunc (pass *AccessPassFuncPropogate) GetPassMode() PassMode {\n\treturn ModulePassMode\n}\n\nfunc (pass *AccessPassFuncPropogate) GetDependencies() []PassType {\n\treturn []PassType{CallGraphPassType, AccessPassPropogateType}\n}\n\n\/\/ Declare two Run* functions\n\nfunc (pass *AccessPassFuncPropogate) RunBasicBlockPass(block *BasicBlock, p *Package) BasicBlockVisitor {\n\tdataBlock := block.Get(AccessPassType).(*AccessPassData)\n\treturn AccessPassFuncPropogateVisitor{pass: pass, cur: block, dataBlock: dataBlock, p: p}\n}\n\nfunc (pass *AccessPassFuncPropogate) RunModulePass(file *ast.File, p *Package) (modified bool, err error) {\n\tcallGraph := pass.compiler.GetPassResult(CallGraphPassType, nil).(*CallGraphPassData)\n\trun := make(map[string]bool) \/\/ which functions have been propogated\n\tvar orderGraph func(map[string][]string, string) []string\n\torderGraph = func(graph map[string][]string, f string) (result []string) {\n\t\tfor _, fn := range callGraph.graph[f] {\n\t\t\tresult = append(result, orderGraph(graph, fn)...)\n\t\t}\n\t\tresult = append(result, f)\n\t\treturn\n\t}\n\n\trunOrder := orderGraph(callGraph.graph, \"main\")\n\tfmt.Println(runOrder)\n\tfor _, fnName := range runOrder {\n\t\tfn := p.Lookup(fnName)\n\t\tif fn == nil || run[fnName] {\n\t\t\tcontinue\n\t\t}\n\t\tfnDecl := fn.Decl.(*ast.FuncDecl)\n\t\tblock := pass.compiler.GetPassResult(BasicBlockPassType, fnDecl).(*BasicBlock)\n\n\t\t\/\/ Manually run the basic block pass in inverse call graph order\n\t\tvar mod bool\n\t\tmod, err = RunBasicBlock(pass, block, p)\n\t\tmodified = modified || mod\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\trun[fnName] = true\n\t}\n\treturn\n}\n<commit_msg>fixed another deep copy bug in func prop, third parallel loop<commit_after>\/\/ Access pass function propogation\n\/\/\n\/\/ Alias accesses to function parameters passed by pointers, and record\n\/\/ global variable accesses. Propogate accesses upwards through the blocks.\n\/\/\n\/\/ func foo(ptr *DataA, val DataB) {\n\/\/ \/\/ Read ptr\n\/\/ \/\/ Write ptr\n\/\/ \/\/ Read val\n\/\/ \/\/ Write val\n\/\/ }\n\/\/ \n\/\/ func bar(index int, val []DataB) int {\n\/\/ return val[index]\n\/\/ }\n\/\/\n\/\/ func main() {\n\/\/ a := &DataA{}\n\/\/ foo(a, DataB{}) \/\/ bubble up the reads and writes to \"a\" (ptr)\n\/\/ i := 0\n\/\/ aList := []DataA{a}\n\/\/ bar(i, aList) \/\/ bubble up val[index] -> aList[i] access\n\/\/ }\npackage main\n\nimport (\n\t\"fmt\"\n\t\"go\/ast\"\n)\n\ntype AccessPassFuncPropogate struct {\n\tBasePass\n}\n\ntype AccessPassFuncPropogateVisitor struct {\n\tpass Pass\n\tp *Package\n\tcur *BasicBlock\n\tdataBlock *AccessPassData\n\tnode ast.Node\n}\n\nfunc (v AccessPassFuncPropogateVisitor) Done(block *BasicBlock) (modified bool, err error) {\n\tdataBlock := v.dataBlock\n\n\tblock.Print(\"== Defines ==\")\n\tfor ident, expr := range dataBlock.defines {\n\t\tblock.Printf(\"%s = %T %+v\", ident, expr, expr)\n\t}\n\tblock.Print(\"== Accesses ==\")\n\tfor _, access := range dataBlock.accesses {\n\t\tblock.Printf(access.String())\n\t}\n\n\treturn\n}\n\nfunc (v AccessPassFuncPropogateVisitor) Visit(node ast.Node) (w BasicBlockVisitor) {\n\t\/\/ Get the closest enclosing basic block for this node\n\tdataBlock := v.dataBlock\n\tb := v.cur\n\tpass := v.pass\n\tif node == nil {\n\t\t\/\/ post-order actions (all sub-nodes have been visited)\n\t\tnode = v.node\n\t\t\/\/ Locate function calls\n\t\tswitch t := node.(type) {\n\t\tcase *ast.CallExpr:\n\t\t\tswitch f := t.Fun.(type) {\n\t\t\tcase *ast.FuncLit:\n\t\t\t\t\/\/ go down a FuncLit branch\n\t\t\tcase *ast.Ident:\n\t\t\t\tfun := v.p.Lookup(f.Name)\n\t\t\t\tif fun == nil {\n\t\t\t\t\t\/\/ builtin function, or not found\n\t\t\t\t\tb.Print(\"Function not found\", f.Name)\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tfuncDecl := fun.Decl.(*ast.FuncDecl)\n\t\t\t\tfuncType := funcDecl.Type\n\t\t\t\tfuncDataBlock := pass.GetCompiler().GetPassResult(BasicBlockPassType, funcDecl).(*BasicBlock).Get(AccessPassType).(*AccessPassData)\n\n\t\t\t\tchild := v.cur\n\t\t\t\tparent := child.parent\n\t\t\t\t\/\/ Now fill in the accesses this call would have made, and propogate it\n\t\t\t\t\/\/ all the way to the top\n\t\t\t\tvar funcAccesses []IdentifierGroup \/\/ only the accesses this function made\n\n\t\t\t\t\/\/ Fill in global accesses\n\t\t\t\tfor _, access := range funcDataBlock.accesses {\n\t\t\t\t\tif _, ok := funcDataBlock.defines[access.group[0].id]; !ok {\n\t\t\t\t\t\t\/\/ if there is an array access that uses an identifier block defined in \n\t\t\t\t\t\t\/\/ this block, change the access from b[idx] to b\n\t\t\t\t\t\tvar ig IdentifierGroup = access\n\t\t\t\t\t\tfor idx, ident := range access.group {\n\t\t\t\t\t\t\tif _, ok := dataBlock.defines[ident.index]; ok && ident.isIndexed {\n\t\t\t\t\t\t\t\tig.group = make([]Identifier, idx+1)\n\t\t\t\t\t\t\t\tcopy(ig.group, access.group)\n\t\t\t\t\t\t\t\tparent.Printf(\"Leaving index scope [%s]\", ig.group[idx].index)\n\t\t\t\t\t\t\t\tig.group[idx].isIndexed = false\n\t\t\t\t\t\t\t\tig.group[idx].index = \"\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tb.Print(\"Global access\", ig.String())\n\t\t\t\t\t\tfuncAccesses = append(funcAccesses, ig)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t\/\/ Fill in aliased arguments\n\t\t\t\tpos := 0 \/\/ argument position\n\t\t\t\tfor _, arg := range funcType.Params.List {\n\t\t\t\t\twriteThrough := false\n\t\t\t\t\t\/\/ is the argument able to be modified?\n\t\t\t\t\t\/\/ builtin types (slice, map, chan), pointers\n\t\t\t\t\tswitch arg.Type.(type) {\n\t\t\t\t\tcase *ast.ArrayType, *ast.MapType, *ast.ChanType:\n\t\t\t\t\t\tb.Printf(\"Pass-by-reference %v %T\", arg.Names, arg.Type)\n\t\t\t\t\t\twriteThrough = true\n\t\t\t\t\tcase *ast.StarExpr:\n\t\t\t\t\t\tb.Printf(\"Pass-by-pointer %v %T\", arg.Names, arg.Type)\n\t\t\t\t\t\twriteThrough = true\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, argName := range arg.Names {\n\t\t\t\t\t\tif writeThrough {\n\t\t\t\t\t\t\tcallArg := t.Args[pos]\n\t\t\t\t\t\t\tcallIdent := &IdentifierGroup{}\n\t\t\t\t\t\t\tAccessIdentBuild(callIdent, callArg, nil)\n\n\t\t\t\t\t\t\t\/\/ Find all accesses to these variables\n\t\t\t\t\t\t\tfor _, access := range funcDataBlock.accesses {\n\t\t\t\t\t\t\t\t\/\/ Replace the function arg name with the callIdent prefix\n\t\t\t\t\t\t\t\tif access.group[0].id == argName.Name {\n\t\t\t\t\t\t\t\t\t\/\/ check if an index variable is also a function argument and\n\t\t\t\t\t\t\t\t\t\/\/ remove it\n\t\t\t\t\t\t\t\t\tnewAccess := make([]Identifier, len(access.group))\n\n\t\t\t\t\t\t\t\t\tcopy(newAccess, access.group) \/\/ full copy\n\t\t\t\t\t\t\t\t\tfor idx, ident := range newAccess {\n\t\t\t\t\t\t\t\t\t\tif _, ok := funcDataBlock.defines[ident.index]; ok && ident.isIndexed {\n\t\t\t\t\t\t\t\t\t\t\tnewAccess = newAccess[0 : idx+1]\n\t\t\t\t\t\t\t\t\t\t\tnewAccess[idx].isIndexed = false\n\t\t\t\t\t\t\t\t\t\t\tb.Printf(\"Stripping array index %s\", ident.index)\n\t\t\t\t\t\t\t\t\t\t\tnewAccess[idx].index = \"\"\n\t\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t\t\/\/ if the callsite is &a and the access is *a, make the access\n\t\t\t\t\t\t\t\t\t\/\/ a for this function\n\t\t\t\t\t\t\t\t\tvar callIdentCopy []Identifier\n\t\t\t\t\t\t\t\t\tif callIdent.group[len(callIdent.group)-1].refType == AddressOf && newAccess[len(newAccess)-1].refType == Dereference {\n\t\t\t\t\t\t\t\t\t\tb.Print(\"Removing pointer alias & -> *\")\n\t\t\t\t\t\t\t\t\t\tnewAccess[len(newAccess)-1].refType = NoReference\n\t\t\t\t\t\t\t\t\t\tcallIdentCopy = make([]Identifier, len(callIdent.group))\n\t\t\t\t\t\t\t\t\t\tcopy(callIdentCopy, callIdent.group)\n\t\t\t\t\t\t\t\t\t\tcallIdentCopy[len(callIdentCopy)-1].refType = NoReference\n\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\tcallIdentCopy = callIdent.group\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\/\/ replace access[0] with callIdent\n\t\t\t\t\t\t\t\t\tvar ig IdentifierGroup\n\t\t\t\t\t\t\t\t\tig.t = access.t\n\t\t\t\t\t\t\t\t\tig.group = append(ig.group, callIdentCopy...)\n\t\t\t\t\t\t\t\t\tig.group = append(ig.group, newAccess[1:]...)\n\t\t\t\t\t\t\t\t\tb.Printf(\"%s -> %s\", access.String(), ig.String())\n\t\t\t\t\t\t\t\t\tfuncAccesses = append(funcAccesses, ig)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tpos++\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t\/\/ Propogate ONLY aliased argument accesses upwards\n\t\t\t\t\/\/ NOTE: doesn't work with recursive functions??\n\n\t\t\t\t\/\/ Move upwards, replacing the placeholder access with the group of\n\t\t\t\t\/\/ accesses made by this function. Stop at variable define boundaries\n\t\t\t\tplaceholderIdent := v.pass.GetCompiler().GetPassResult(AccessPassType, t).(*ast.Ident)\n\t\t\t\tb.Printf(\"\\x1b[33m>> %s\\x1b[0m filling in function effects: %+v, %+v\", placeholderIdent.Name, t, funcDecl)\n\n\t\t\t\t\/\/ Walk up the parent blocks\n\t\t\t\tfor ; child != nil; child = child.parent {\n\t\t\t\t\t\/\/ Find the placeholder\n\t\t\t\t\tdataBlock := child.Get(AccessPassType).(*AccessPassData)\n\t\t\t\t\tvar placeholderIdx int\n\t\t\t\t\tvar val IdentifierGroup\n\t\t\t\t\tfor placeholderIdx, val = range dataBlock.accesses {\n\t\t\t\t\t\tif val.group[0].id == placeholderIdent.Name {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tb.Printf(\"Replacing placeholder at %d\", placeholderIdx)\n\n\t\t\t\t\t\/\/ Remove the placeholder\n\t\t\t\t\tdataBlock.accesses = append(dataBlock.accesses[0:placeholderIdx], dataBlock.accesses[placeholderIdx+1:]...)\n\t\t\t\t\t\/\/ Insert the function accesses, do a deep copy\n\t\t\t\t\tvar funcAccessCopy []IdentifierGroup\n\t\t\t\t\tfor _, v := range funcAccesses {\n\t\t\t\t\t\t\/\/ deep copy the identifers\n\t\t\t\t\t\tgroupCopy := make([]Identifier, len(v.group))\n\t\t\t\t\t\tcopy(groupCopy, v.group)\n\t\t\t\t\t\tv.group = groupCopy\n\t\t\t\t\t\tfuncAccessCopy = append(funcAccessCopy, v)\n\t\t\t\t\t}\n\n\t\t\t\t\tb.Print(\" << Propogating up\")\n\t\t\t\t\tfor _, a := range funcAccessCopy {\n\t\t\t\t\t\tb.Print(a.String())\n\t\t\t\t\t}\n\t\t\t\t\tdataBlock.accesses = append(dataBlock.accesses[0:placeholderIdx], append(funcAccessCopy, dataBlock.accesses[placeholderIdx:]...)...)\n\n\t\t\t\t\t\/\/ Check if the identifier leaves scope\n\t\t\t\t\tfor idx := 0; idx < len(funcAccesses); {\n\t\t\t\t\t\taccess := funcAccesses[idx]\n\t\t\t\t\t\tif _, ok := dataBlock.defines[access.group[0].id]; ok {\n\t\t\t\t\t\t\tb.Print(\"Leaving scope\", access.String())\n\t\t\t\t\t\t\tfuncAccesses = append(funcAccesses[:idx], funcAccesses[idx+1:]...)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tidx++\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Check if an index variable leaves scope\n\t\t\t\t\tfor accIdx, access := range funcAccesses {\n\t\t\t\t\t\t\/\/ check if an index variable is also a function argument and\n\t\t\t\t\t\t\/\/ remove it\n\t\t\t\t\t\tfor idx, ident := range access.group {\n\t\t\t\t\t\t\tif _, ok := dataBlock.defines[ident.index]; ok && ident.isIndexed {\n\t\t\t\t\t\t\t\t\/\/ update the real value in funcAccesses\n\t\t\t\t\t\t\t\taccess.group = access.group[0 : idx+1]\n\t\t\t\t\t\t\t\taccess.group[idx].isIndexed = false\n\t\t\t\t\t\t\t\tb.Printf(\"Stripping array index %s\", ident.index)\n\t\t\t\t\t\t\t\taccess.group[idx].index = \"\"\n\t\t\t\t\t\t\t\tfuncAccesses[accIdx] = access \/\/ write back to the original storage!\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn v\n\t}\n\tv.node = node\n\n\treturn v\n}\n\nfunc NewAccessPassFuncPropogate() *AccessPassFuncPropogate {\n\treturn &AccessPassFuncPropogate{\n\t\tBasePass: NewBasePass(),\n\t}\n}\n\nfunc (pass *AccessPassFuncPropogate) GetPassType() PassType {\n\treturn AccessPassFuncPropogateType\n}\n\nfunc (pass *AccessPassFuncPropogate) GetPassMode() PassMode {\n\treturn ModulePassMode\n}\n\nfunc (pass *AccessPassFuncPropogate) GetDependencies() []PassType {\n\treturn []PassType{CallGraphPassType, AccessPassPropogateType}\n}\n\n\/\/ Declare two Run* functions\n\nfunc (pass *AccessPassFuncPropogate) RunBasicBlockPass(block *BasicBlock, p *Package) BasicBlockVisitor {\n\tdataBlock := block.Get(AccessPassType).(*AccessPassData)\n\treturn AccessPassFuncPropogateVisitor{pass: pass, cur: block, dataBlock: dataBlock, p: p}\n}\n\nfunc (pass *AccessPassFuncPropogate) RunModulePass(file *ast.File, p *Package) (modified bool, err error) {\n\tcallGraph := pass.compiler.GetPassResult(CallGraphPassType, nil).(*CallGraphPassData)\n\trun := make(map[string]bool) \/\/ which functions have been propogated\n\tvar orderGraph func(map[string][]string, string) []string\n\torderGraph = func(graph map[string][]string, f string) (result []string) {\n\t\tfor _, fn := range callGraph.graph[f] {\n\t\t\tresult = append(result, orderGraph(graph, fn)...)\n\t\t}\n\t\tresult = append(result, f)\n\t\treturn\n\t}\n\n\trunOrder := orderGraph(callGraph.graph, \"main\")\n\tfmt.Println(runOrder)\n\tfor _, fnName := range runOrder {\n\t\tfn := p.Lookup(fnName)\n\t\tif fn == nil || run[fnName] {\n\t\t\tcontinue\n\t\t}\n\t\tfnDecl := fn.Decl.(*ast.FuncDecl)\n\t\tblock := pass.compiler.GetPassResult(BasicBlockPassType, fnDecl).(*BasicBlock)\n\n\t\t\/\/ Manually run the basic block pass in inverse call graph order\n\t\tvar mod bool\n\t\tmod, err = RunBasicBlock(pass, block, p)\n\t\tmodified = modified || mod\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\trun[fnName] = true\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build dragonfly freebsd netbsd openbsd solaris\n\npackage runtime\n\nimport (\n\t\"unsafe\"\n)\n\n\/\/ Don't split the stack as this function may be invoked without a valid G,\n\/\/ which prevents us from allocating more stack.\n\/\/go:nosplit\nfunc sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {\n\tv, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)\n\tif err != 0 {\n\t\treturn nil\n\t}\n\tmSysStatInc(sysStat, n)\n\treturn v\n}\n\nfunc sysUnused(v unsafe.Pointer, n uintptr) {\n\tmadvise(v, n, _MADV_FREE)\n}\n\nfunc sysUsed(v unsafe.Pointer, n uintptr) {\n}\n\nfunc sysHugePage(v unsafe.Pointer, n uintptr) {\n}\n\n\/\/ Don't split the stack as this function may be invoked without a valid G,\n\/\/ which prevents us from allocating more stack.\n\/\/go:nosplit\nfunc sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {\n\tmSysStatDec(sysStat, n)\n\tmunmap(v, n)\n}\n\nfunc sysFault(v unsafe.Pointer, n uintptr) {\n\tmmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)\n}\n\nfunc sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {\n\tp, err := mmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE, -1, 0)\n\tif err != 0 {\n\t\treturn nil\n\t}\n\treturn p\n}\n\nconst _sunosEAGAIN = 11\nconst _ENOMEM = 12\n\nfunc sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {\n\tmSysStatInc(sysStat, n)\n\n\tp, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)\n\tif err == _ENOMEM || ((GOOS == \"solaris\" || GOOS == \"illumos\") && err == _sunosEAGAIN) {\n\t\tthrow(\"runtime: out of memory\")\n\t}\n\tif p != v || err != 0 {\n\t\tthrow(\"runtime: cannot map pages in arena address space\")\n\t}\n}\n<commit_msg>runtime: map reserved memory as NORESERVE on solaris<commit_after>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build dragonfly freebsd netbsd openbsd solaris\n\npackage runtime\n\nimport (\n\t\"unsafe\"\n)\n\n\/\/ Don't split the stack as this function may be invoked without a valid G,\n\/\/ which prevents us from allocating more stack.\n\/\/go:nosplit\nfunc sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {\n\tv, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)\n\tif err != 0 {\n\t\treturn nil\n\t}\n\tmSysStatInc(sysStat, n)\n\treturn v\n}\n\nfunc sysUnused(v unsafe.Pointer, n uintptr) {\n\tmadvise(v, n, _MADV_FREE)\n}\n\nfunc sysUsed(v unsafe.Pointer, n uintptr) {\n}\n\nfunc sysHugePage(v unsafe.Pointer, n uintptr) {\n}\n\n\/\/ Don't split the stack as this function may be invoked without a valid G,\n\/\/ which prevents us from allocating more stack.\n\/\/go:nosplit\nfunc sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64) {\n\tmSysStatDec(sysStat, n)\n\tmunmap(v, n)\n}\n\nfunc sysFault(v unsafe.Pointer, n uintptr) {\n\tmmap(v, n, _PROT_NONE, _MAP_ANON|_MAP_PRIVATE|_MAP_FIXED, -1, 0)\n}\n\n\/\/ Indicates not to reserve swap space for the mapping.\nconst _sunosMAP_NORESERVE = 0x40\n\nfunc sysReserve(v unsafe.Pointer, n uintptr) unsafe.Pointer {\n\tflags := int32(_MAP_ANON | _MAP_PRIVATE)\n\tif GOOS == \"solaris\" || GOOS == \"illumos\" {\n\t\t\/\/ Be explicit that we don't want to reserve swap space\n\t\t\/\/ for PROT_NONE anonymous mappings. This avoids an issue\n\t\t\/\/ wherein large mappings can cause fork to fail.\n\t\tflags |= _sunosMAP_NORESERVE\n\t}\n\tp, err := mmap(v, n, _PROT_NONE, flags, -1, 0)\n\tif err != 0 {\n\t\treturn nil\n\t}\n\treturn p\n}\n\nconst _sunosEAGAIN = 11\nconst _ENOMEM = 12\n\nfunc sysMap(v unsafe.Pointer, n uintptr, sysStat *uint64) {\n\tmSysStatInc(sysStat, n)\n\n\tp, err := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)\n\tif err == _ENOMEM || ((GOOS == \"solaris\" || GOOS == \"illumos\") && err == _sunosEAGAIN) {\n\t\tthrow(\"runtime: out of memory\")\n\t}\n\tif p != v || err != 0 {\n\t\tthrow(\"runtime: cannot map pages in arena address space\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"sync\"\n\t\"io\/ioutil\"\n\t\"yaml\"\n\t\"time\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"github.com\/joonakannisto\/gocrypto\/ssh\"\n)\n\n\ntype Config struct {\n\tHostKey string `yaml:\"IdPHost\"`\n}\ntype sessionInfo struct {\n\tUser string\n\tKeys []ssh.PublicKey\n}\n\ntype Server struct {\n\tsshConfig *ssh.ServerConfig\n\tmu sync.RWMutex\n\tsessionInfo map[string]sessionInfo\n}\n\nfunc (s *Server) PublicKeyCallback(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {\n\ts.mu.Lock()\n\tsi := s.sessionInfo[string(conn.SessionID())]\n\tsi.User = conn.User()\n\tconfigText, err := ioutil.ReadFile(\"config.yml\")\n\tvar C Config\n\tfatalIfErr(yaml.Unmarshal(configText, &C))\n\t\n\tsshConfig := &ssh.ClientConfig{\n\t\tUser: si.User,\n\t\tAuth: []ssh.AuthMethod{ssh.WorkingKeys(key)},\n\t}\n\t_, err := ssh.ShakeThat(\"tcp\", C.IdPHost, sshConfig)\n if err != nil {\n si.Keys = append(si.Keys, key)\n\t}\n\t\n\ts.sessionInfo[string(conn.SessionID())] = si\n\ts.mu.Unlock()\n\n\t\/\/ Never succeed a key, or we might not see the next. See KeyboardInteractiveCallback.\n\treturn nil, errors.New(\"\")\n\n}\n\nfunc (s *Server) KeyboardInteractiveCallback(ssh.ConnMetadata, ssh.KeyboardInteractiveChallenge) (*ssh.Permissions, error) {\n\t\/\/ keyboard-interactive is tried when all public keys failed, and\n\t\/\/ since it's server-driven we can just pass without user\n\t\/\/ interaction to let the user in once we got all the public keys\n\treturn nil, nil\n}\n\ntype logEntry struct {\n\tTimestamp string\n\tUsername string\n\tChannelTypes []string\n\tRequestTypes []string\n\tError string\n\tKeysOffered []string\n\tClientVersion string\n}\n\nfunc (s *Server) Handle(nConn net.Conn) {\n\tle := &logEntry{Timestamp: time.Now().Format(time.RFC3339)}\n\tdefer json.NewEncoder(os.Stdout).Encode(le)\n\n\tconn, chans, reqs, err := ssh.NewServerConn(nConn, s.sshConfig)\n\tif err != nil {\n\t\tle.Error = \"Handshake failed: \" + err.Error()\n\t\treturn\n\t}\n\tdefer func() {\n\t\ts.mu.Lock()\n\t\tdelete(s.sessionInfo, string(conn.SessionID()))\n\t\ts.mu.Unlock()\n\t\tconn.Close()\n\t}()\n\tgo func(in <-chan *ssh.Request) {\n\t\tfor req := range in {\n\t\t\tle.RequestTypes = append(le.RequestTypes, req.Type)\n\t\t\tif req.WantReply {\n\t\t\t\treq.Reply(false, nil)\n\t\t\t}\n\t\t}\n\t}(reqs)\n\n\ts.mu.RLock()\n\tsi := s.sessionInfo[string(conn.SessionID())]\n\ts.mu.RUnlock()\n\n\tle.Username = conn.User()\n\tle.ClientVersion = fmt.Sprintf(\"%x\", conn.ClientVersion())\n\tfor _, key := range si.Keys {\n\t\tle.KeysOffered = append(le.KeysOffered, string(ssh.MarshalAuthorizedKey(key)))\n\t}\n\n\tfor newChannel := range chans {\n\t\tle.ChannelTypes = append(le.ChannelTypes, newChannel.ChannelType())\n\n\t\tif newChannel.ChannelType() != \"session\" {\n\t\t\tnewChannel.Reject(ssh.UnknownChannelType, \"unknown channel type\")\n\t\t\tcontinue\n\t\t}\n\t\tchannel, requests, err := newChannel.Accept()\n\t\tif err != nil {\n\t\t\tle.Error = \"Channel accept failed: \" + err.Error()\n\t\t\tcontinue\n\t\t}\n\n\t\tagentFwd, x11 := false, false\n\t\treqLock := &sync.Mutex{}\n\t\treqLock.Lock()\n\t\ttimeout := time.AfterFunc(30*time.Second, func() { reqLock.Unlock() })\n\n\t\tgo func(in <-chan *ssh.Request) {\n\t\t\tfor req := range in {\n\t\t\t\tle.RequestTypes = append(le.RequestTypes, req.Type)\n\t\t\t\tok := false\n\t\t\t\tswitch req.Type {\n\t\t\t\tcase \"shell\":\n\t\t\t\t\tfallthrough\n\t\t\t\tcase \"pty-req\":\n\t\t\t\t\tok = true\n\n\t\t\t\t\t\/\/ \"auth-agent-req@openssh.com\" and \"x11-req\" always arrive\n\t\t\t\t\t\/\/ before the \"pty-req\", so we can go ahead now\n\t\t\t\t\tif timeout.Stop() {\n\t\t\t\t\t\treqLock.Unlock()\n\t\t\t\t\t}\n\n\t\t\t\tcase \"auth-agent-req@openssh.com\":\n\t\t\t\t\tagentFwd = true\n\t\t\t\tcase \"x11-req\":\n\t\t\t\t\tx11 = true\n\t\t\t\t}\n\n\t\t\t\tif req.WantReply {\n\t\t\t\t\treq.Reply(ok, nil)\n\t\t\t\t}\n\t\t\t}\n\t\t}(requests)\n\n\t\treqLock.Lock()\n\t\t\n\n\t\t\n\t\tif err != nil {\n\t\t\tle.Error = \"findUser failed: \" + err.Error()\n\t\t\tchannel.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\t\n\n\t\t\n\t\tchannel.Close()\n\t}\n}\n\n<commit_msg>pubkey check<commit_after>package main\n\nimport (\n\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"sync\"\n\t\"io\/ioutil\"\n\t\"time\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"github.com\/joonakannisto\/gocrypto\/ssh\"\n)\n\n\ntype Config struct {\n\tHostKey string `yaml:\"IdPHost\"`\n}\ntype sessionInfo struct {\n\tUser string\n\tKeys []ssh.PublicKey\n}\n\ntype Server struct {\n\tsshConfig *ssh.ServerConfig\n\tmu sync.RWMutex\n\tsessionInfo map[string]sessionInfo\n}\n\nfunc (s *Server) PublicKeyCallback(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {\n\ts.mu.Lock()\n\tsi := s.sessionInfo[string(conn.SessionID())]\n\tsi.User = conn.User()\n\tconfigText, err := ioutil.ReadFile(\"config.yml\")\n\tvar C Config\n\tfatalIfErr(yaml.Unmarshal(configText, &C))\n\t\n\tsshConfig := &ssh.ClientConfig{\n\t\tUser: si.User,\n\t\tAuth: []ssh.AuthMethod{ssh.WorkingKeys(key)},\n\t}\n\t_, err := ssh.ShakeThat(\"tcp\", C.IdPHost, sshConfig)\n if err != nil {\n si.Keys = append(si.Keys, key)\n\t}\n\t\n\ts.sessionInfo[string(conn.SessionID())] = si\n\ts.mu.Unlock()\n\n\t\/\/ Never succeed a key, or we might not see the next. See KeyboardInteractiveCallback.\n\treturn nil, errors.New(\"\")\n\n}\n\nfunc (s *Server) KeyboardInteractiveCallback(ssh.ConnMetadata, ssh.KeyboardInteractiveChallenge) (*ssh.Permissions, error) {\n\t\/\/ keyboard-interactive is tried when all public keys failed, and\n\t\/\/ since it's server-driven we can just pass without user\n\t\/\/ interaction to let the user in once we got all the public keys\n\treturn nil, nil\n}\n\ntype logEntry struct {\n\tTimestamp string\n\tUsername string\n\tChannelTypes []string\n\tRequestTypes []string\n\tError string\n\tKeysOffered []string\n\tClientVersion string\n}\n\nfunc (s *Server) Handle(nConn net.Conn) {\n\tle := &logEntry{Timestamp: time.Now().Format(time.RFC3339)}\n\tdefer json.NewEncoder(os.Stdout).Encode(le)\n\n\tconn, chans, reqs, err := ssh.NewServerConn(nConn, s.sshConfig)\n\tif err != nil {\n\t\tle.Error = \"Handshake failed: \" + err.Error()\n\t\treturn\n\t}\n\tdefer func() {\n\t\ts.mu.Lock()\n\t\tdelete(s.sessionInfo, string(conn.SessionID()))\n\t\ts.mu.Unlock()\n\t\tconn.Close()\n\t}()\n\tgo func(in <-chan *ssh.Request) {\n\t\tfor req := range in {\n\t\t\tle.RequestTypes = append(le.RequestTypes, req.Type)\n\t\t\tif req.WantReply {\n\t\t\t\treq.Reply(false, nil)\n\t\t\t}\n\t\t}\n\t}(reqs)\n\n\ts.mu.RLock()\n\tsi := s.sessionInfo[string(conn.SessionID())]\n\ts.mu.RUnlock()\n\n\tle.Username = conn.User()\n\tle.ClientVersion = fmt.Sprintf(\"%x\", conn.ClientVersion())\n\tfor _, key := range si.Keys {\n\t\tle.KeysOffered = append(le.KeysOffered, string(ssh.MarshalAuthorizedKey(key)))\n\t}\n\n\tfor newChannel := range chans {\n\t\tle.ChannelTypes = append(le.ChannelTypes, newChannel.ChannelType())\n\n\t\tif newChannel.ChannelType() != \"session\" {\n\t\t\tnewChannel.Reject(ssh.UnknownChannelType, \"unknown channel type\")\n\t\t\tcontinue\n\t\t}\n\t\tchannel, requests, err := newChannel.Accept()\n\t\tif err != nil {\n\t\t\tle.Error = \"Channel accept failed: \" + err.Error()\n\t\t\tcontinue\n\t\t}\n\n\t\tagentFwd, x11 := false, false\n\t\treqLock := &sync.Mutex{}\n\t\treqLock.Lock()\n\t\ttimeout := time.AfterFunc(30*time.Second, func() { reqLock.Unlock() })\n\n\t\tgo func(in <-chan *ssh.Request) {\n\t\t\tfor req := range in {\n\t\t\t\tle.RequestTypes = append(le.RequestTypes, req.Type)\n\t\t\t\tok := false\n\t\t\t\tswitch req.Type {\n\t\t\t\tcase \"shell\":\n\t\t\t\t\tfallthrough\n\t\t\t\tcase \"pty-req\":\n\t\t\t\t\tok = true\n\n\t\t\t\t\t\/\/ \"auth-agent-req@openssh.com\" and \"x11-req\" always arrive\n\t\t\t\t\t\/\/ before the \"pty-req\", so we can go ahead now\n\t\t\t\t\tif timeout.Stop() {\n\t\t\t\t\t\treqLock.Unlock()\n\t\t\t\t\t}\n\n\t\t\t\tcase \"auth-agent-req@openssh.com\":\n\t\t\t\t\tagentFwd = true\n\t\t\t\tcase \"x11-req\":\n\t\t\t\t\tx11 = true\n\t\t\t\t}\n\n\t\t\t\tif req.WantReply {\n\t\t\t\t\treq.Reply(ok, nil)\n\t\t\t\t}\n\t\t\t}\n\t\t}(requests)\n\n\t\treqLock.Lock()\n\t\t\n\n\t\t\n\t\tif err != nil {\n\t\t\tle.Error = \"findUser failed: \" + err.Error()\n\t\t\tchannel.Close()\n\t\t\tcontinue\n\t\t}\n\n\t\t\n\n\t\t\n\t\tchannel.Close()\n\t}\n}\n\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e_node\n\nimport (\n\t\"time\"\n\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/client\/restclient\"\n\tclient \"k8s.io\/kubernetes\/pkg\/client\/unversioned\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nconst (\n\tretryTimeout = 4 * time.Minute\n\tpollInterval = time.Second * 5\n)\n\nvar _ = Describe(\"Container Conformance Test\", func() {\n\tvar cl *client.Client\n\n\tBeforeEach(func() {\n\t\t\/\/ Setup the apiserver client\n\t\tcl = client.NewOrDie(&restclient.Config{Host: *apiServerAddress})\n\t})\n\n\tDescribe(\"container conformance blackbox test\", func() {\n\t\tContext(\"when testing images that exist\", func() {\n\t\t\tvar conformImages []ConformanceImage\n\t\t\tconformImageTags := []string{\n\t\t\t\t\"gcr.io\/google_containers\/busybox:1.24\",\n\t\t\t\t\"gcr.io\/google_containers\/mounttest:0.2\",\n\t\t\t\t\"gcr.io\/google_containers\/nettest:1.7\",\n\t\t\t\t\"gcr.io\/google_containers\/nginx:1.7.9\",\n\t\t\t}\n\t\t\tIt(\"it should pull successfully [Conformance]\", func() {\n\t\t\t\tfor _, imageTag := range conformImageTags {\n\t\t\t\t\timage, _ := NewConformanceImage(\"docker\", imageTag)\n\t\t\t\t\tconformImages = append(conformImages, image)\n\t\t\t\t}\n\t\t\t\tfor _, image := range conformImages {\n\t\t\t\t\tif err := image.Pull(); err != nil {\n\t\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t\tIt(\"it should list pulled images [Conformance]\", func() {\n\t\t\t\timage, _ := NewConformanceImage(\"docker\", \"\")\n\t\t\t\ttags, _ := image.List()\n\t\t\t\tfor _, tag := range conformImageTags {\n\t\t\t\t\tExpect(tags).To(ContainElement(tag))\n\t\t\t\t}\n\t\t\t})\n\t\t\tIt(\"it should remove successfully [Conformance]\", func() {\n\t\t\t\tfor _, image := range conformImages {\n\t\t\t\t\tif err := image.Remove(); err != nil {\n\t\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t\tContext(\"when testing image that does not exist\", func() {\n\t\t\tvar invalidImage ConformanceImage\n\t\t\tvar invalidImageTag string\n\t\t\tIt(\"it should not pull successfully [Conformance]\", func() {\n\t\t\t\tinvalidImageTag = \"foo.com\/foo\/foo\"\n\t\t\t\tinvalidImage, _ = NewConformanceImage(\"docker\", invalidImageTag)\n\t\t\t\terr := invalidImage.Pull()\n\t\t\t\tExpect(err).To(HaveOccurred())\n\t\t\t})\n\t\t\tIt(\"it should not list pulled images [Conformance]\", func() {\n\t\t\t\timage, _ := NewConformanceImage(\"docker\", \"\")\n\t\t\t\ttags, _ := image.List()\n\t\t\t\tExpect(tags).NotTo(ContainElement(invalidImageTag))\n\t\t\t})\n\t\t\tIt(\"it should not remove successfully [Conformance]\", func() {\n\t\t\t\terr := invalidImage.Remove()\n\t\t\t\tExpect(err).To(HaveOccurred())\n\t\t\t})\n\t\t})\n\t\tContext(\"when running a container that terminates\", func() {\n\t\t\tvar terminateCase ConformanceContainer\n\t\t\tIt(\"it should run successfully to completion [Conformance]\", func() {\n\t\t\t\tterminateCase = ConformanceContainer{\n\t\t\t\t\tContainer: api.Container{\n\t\t\t\t\t\tImage: \"gcr.io\/google_containers\/busybox\",\n\t\t\t\t\t\tName: \"busybox\",\n\t\t\t\t\t\tCommand: []string{\"sh\", \"-c\", \"env\"},\n\t\t\t\t\t\tImagePullPolicy: api.PullIfNotPresent,\n\t\t\t\t\t},\n\t\t\t\t\tClient: cl,\n\t\t\t\t\tPhase: api.PodSucceeded,\n\t\t\t\t\tNodeName: *nodeName,\n\t\t\t\t}\n\t\t\t\terr := terminateCase.Create()\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\t\/\/ TODO: Check that the container enters running state by sleeping in the container #23309\n\t\t\t\tEventually(func() (api.PodPhase, error) {\n\t\t\t\t\tpod, err := terminateCase.Get()\n\t\t\t\t\treturn pod.Phase, err\n\t\t\t\t}, retryTimeout, pollInterval).Should(Equal(terminateCase.Phase))\n\t\t\t})\n\t\t\tIt(\"it should report its phase as 'succeeded' [Conformance]\", func() {\n\t\t\t\tccontainer, err := terminateCase.Get()\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\tExpect(ccontainer).Should(CContainerEqual(terminateCase))\n\t\t\t})\n\t\t\tIt(\"it should be possible to delete [Conformance]\", func() {\n\t\t\t\terr := terminateCase.Delete()\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t})\n\t\t})\n\t\tContext(\"when running a container with invalid image\", func() {\n\t\t\tvar invalidImageCase ConformanceContainer\n\t\t\tIt(\"it should not start successfully [Conformance]\", func() {\n\t\t\t\tinvalidImageCase = ConformanceContainer{\n\t\t\t\t\tContainer: api.Container{\n\t\t\t\t\t\tImage: \"foo.com\/foo\/foo\",\n\t\t\t\t\t\tName: \"foo\",\n\t\t\t\t\t\tCommand: []string{\"foo\", \"'Should not work'\"},\n\t\t\t\t\t\tImagePullPolicy: api.PullIfNotPresent,\n\t\t\t\t\t},\n\t\t\t\t\tClient: cl,\n\t\t\t\t\tPhase: api.PodPending,\n\t\t\t\t\tNodeName: *nodeName,\n\t\t\t\t}\n\t\t\t\terr := invalidImageCase.Create()\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\tEventually(func() (api.PodPhase, error) {\n\t\t\t\t\tpod, err := invalidImageCase.Get()\n\t\t\t\t\treturn pod.Phase, err\n\t\t\t\t}, retryTimeout, pollInterval).Should(Equal(invalidImageCase.Phase))\n\t\t\t})\n\t\t\tIt(\"it should report its phase as 'pending' [Conformance]\", func() {\n\t\t\t\tccontainer, err := invalidImageCase.Get()\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\tExpect(ccontainer).Should(CContainerEqual(invalidImageCase))\n\t\t\t})\n\t\t\tIt(\"it should be possible to delete [Conformance]\", func() {\n\t\t\t\terr := invalidImageCase.Delete()\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t})\n\t\t})\n\t})\n})\n<commit_msg>Retry failed image pulls. Closes #23669.<commit_after>\/*\nCopyright 2016 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage e2e_node\n\nimport (\n\t\"time\"\n\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/client\/restclient\"\n\tclient \"k8s.io\/kubernetes\/pkg\/client\/unversioned\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nconst (\n\tretryTimeout = time.Minute * 4\n\tpollInterval = time.Second * 5\n\timageRetryTimeout = time.Minute * 2\n\timagePullInterval = time.Second * 15\n)\n\nvar _ = Describe(\"Container Conformance Test\", func() {\n\tvar cl *client.Client\n\n\tBeforeEach(func() {\n\t\t\/\/ Setup the apiserver client\n\t\tcl = client.NewOrDie(&restclient.Config{Host: *apiServerAddress})\n\t})\n\n\tDescribe(\"container conformance blackbox test\", func() {\n\t\tContext(\"when testing images that exist\", func() {\n\t\t\tvar conformImages []ConformanceImage\n\t\t\tconformImageTags := []string{\n\t\t\t\t\"gcr.io\/google_containers\/busybox:1.24\",\n\t\t\t\t\"gcr.io\/google_containers\/mounttest:0.2\",\n\t\t\t\t\"gcr.io\/google_containers\/nettest:1.7\",\n\t\t\t\t\"gcr.io\/google_containers\/nginx:1.7.9\",\n\t\t\t}\n\t\t\tIt(\"it should pull successfully [Conformance]\", func() {\n\t\t\t\tfor _, imageTag := range conformImageTags {\n\t\t\t\t\timage, _ := NewConformanceImage(\"docker\", imageTag)\n\t\t\t\t\tconformImages = append(conformImages, image)\n\t\t\t\t}\n\t\t\t\tfor _, image := range conformImages {\n\t\t\t\t\t\/\/ Pulling images from gcr.io is flaky, so retry failures\n\t\t\t\t\tEventually(func() error {\n\t\t\t\t\t\treturn image.Pull()\n\t\t\t\t\t}, imageRetryTimeout, imagePullInterval).ShouldNot(HaveOccurred())\n\t\t\t\t}\n\t\t\t})\n\t\t\tIt(\"it should list pulled images [Conformance]\", func() {\n\t\t\t\timage, _ := NewConformanceImage(\"docker\", \"\")\n\t\t\t\ttags, _ := image.List()\n\t\t\t\tfor _, tag := range conformImageTags {\n\t\t\t\t\tExpect(tags).To(ContainElement(tag))\n\t\t\t\t}\n\t\t\t})\n\t\t\tIt(\"it should remove successfully [Conformance]\", func() {\n\t\t\t\tfor _, image := range conformImages {\n\t\t\t\t\tif err := image.Remove(); err != nil {\n\t\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t\tContext(\"when testing image that does not exist\", func() {\n\t\t\tvar invalidImage ConformanceImage\n\t\t\tvar invalidImageTag string\n\t\t\tIt(\"it should not pull successfully [Conformance]\", func() {\n\t\t\t\tinvalidImageTag = \"foo.com\/foo\/foo\"\n\t\t\t\tinvalidImage, _ = NewConformanceImage(\"docker\", invalidImageTag)\n\t\t\t\terr := invalidImage.Pull()\n\t\t\t\tExpect(err).To(HaveOccurred())\n\t\t\t})\n\t\t\tIt(\"it should not list pulled images [Conformance]\", func() {\n\t\t\t\timage, _ := NewConformanceImage(\"docker\", \"\")\n\t\t\t\ttags, _ := image.List()\n\t\t\t\tExpect(tags).NotTo(ContainElement(invalidImageTag))\n\t\t\t})\n\t\t\tIt(\"it should not remove successfully [Conformance]\", func() {\n\t\t\t\terr := invalidImage.Remove()\n\t\t\t\tExpect(err).To(HaveOccurred())\n\t\t\t})\n\t\t})\n\t\tContext(\"when running a container that terminates\", func() {\n\t\t\tvar terminateCase ConformanceContainer\n\t\t\tIt(\"it should run successfully to completion [Conformance]\", func() {\n\t\t\t\tterminateCase = ConformanceContainer{\n\t\t\t\t\tContainer: api.Container{\n\t\t\t\t\t\tImage: \"gcr.io\/google_containers\/busybox\",\n\t\t\t\t\t\tName: \"busybox\",\n\t\t\t\t\t\tCommand: []string{\"sh\", \"-c\", \"env\"},\n\t\t\t\t\t\tImagePullPolicy: api.PullIfNotPresent,\n\t\t\t\t\t},\n\t\t\t\t\tClient: cl,\n\t\t\t\t\tPhase: api.PodSucceeded,\n\t\t\t\t\tNodeName: *nodeName,\n\t\t\t\t}\n\t\t\t\terr := terminateCase.Create()\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\t\t\/\/ TODO: Check that the container enters running state by sleeping in the container #23309\n\t\t\t\tEventually(func() (api.PodPhase, error) {\n\t\t\t\t\tpod, err := terminateCase.Get()\n\t\t\t\t\treturn pod.Phase, err\n\t\t\t\t}, retryTimeout, pollInterval).Should(Equal(terminateCase.Phase))\n\t\t\t})\n\t\t\tIt(\"it should report its phase as 'succeeded' [Conformance]\", func() {\n\t\t\t\tccontainer, err := terminateCase.Get()\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\tExpect(ccontainer).Should(CContainerEqual(terminateCase))\n\t\t\t})\n\t\t\tIt(\"it should be possible to delete [Conformance]\", func() {\n\t\t\t\terr := terminateCase.Delete()\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t})\n\t\t})\n\t\tContext(\"when running a container with invalid image\", func() {\n\t\t\tvar invalidImageCase ConformanceContainer\n\t\t\tIt(\"it should not start successfully [Conformance]\", func() {\n\t\t\t\tinvalidImageCase = ConformanceContainer{\n\t\t\t\t\tContainer: api.Container{\n\t\t\t\t\t\tImage: \"foo.com\/foo\/foo\",\n\t\t\t\t\t\tName: \"foo\",\n\t\t\t\t\t\tCommand: []string{\"foo\", \"'Should not work'\"},\n\t\t\t\t\t\tImagePullPolicy: api.PullIfNotPresent,\n\t\t\t\t\t},\n\t\t\t\t\tClient: cl,\n\t\t\t\t\tPhase: api.PodPending,\n\t\t\t\t\tNodeName: *nodeName,\n\t\t\t\t}\n\t\t\t\terr := invalidImageCase.Create()\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\tEventually(func() (api.PodPhase, error) {\n\t\t\t\t\tpod, err := invalidImageCase.Get()\n\t\t\t\t\treturn pod.Phase, err\n\t\t\t\t}, retryTimeout, pollInterval).Should(Equal(invalidImageCase.Phase))\n\t\t\t})\n\t\t\tIt(\"it should report its phase as 'pending' [Conformance]\", func() {\n\t\t\t\tccontainer, err := invalidImageCase.Get()\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\tExpect(ccontainer).Should(CContainerEqual(invalidImageCase))\n\t\t\t})\n\t\t\tIt(\"it should be possible to delete [Conformance]\", func() {\n\t\t\t\terr := invalidImageCase.Delete()\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t})\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package bootstrap implements the bootstrapping logic: generation of a .go file to\n\/\/ launch the actual generator and launching the generator itself.\n\/\/\n\/\/ The package may be preferred to a command-line utility if generating the serializers\n\/\/ from golang code is required.\npackage bootstrap\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"sort\"\n)\n\nconst genPackage = \"github.com\/mailru\/easyjson\/gen\"\nconst pkgWriter = \"github.com\/mailru\/easyjson\/jwriter\"\nconst pkgLexer = \"github.com\/mailru\/easyjson\/jlexer\"\n\ntype Generator struct {\n\tPkgPath, PkgName string\n\tTypes []string\n\n\tNoStdMarshalers bool\n\tSnakeCase bool\n\tLowerCamelCase bool\n\tOmitEmpty bool\n\tDisallowUnknownFields bool\n\n\tOutName string\n\tBuildTags string\n\n\tStubsOnly bool\n\tLeaveTemps bool\n\tNoFormat bool\n}\n\n\/\/ writeStub outputs an initial stubs for marshalers\/unmarshalers so that the package\n\/\/ using marshalers\/unmarshales compiles correctly for boostrapping code.\nfunc (g *Generator) writeStub() error {\n\tf, err := os.Create(g.OutName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tif g.BuildTags != \"\" {\n\t\tfmt.Fprintln(f, \"\/\/ +build \", g.BuildTags)\n\t\tfmt.Fprintln(f)\n\t}\n\tfmt.Fprintln(f, \"\/\/ TEMPORARY AUTOGENERATED FILE: easyjson stub code to make the package\")\n\tfmt.Fprintln(f, \"\/\/ compilable during generation.\")\n\tfmt.Fprintln(f)\n\tfmt.Fprintln(f, \"package \", g.PkgName)\n\n\tif len(g.Types) > 0 {\n\t\tfmt.Fprintln(f)\n\t\tfmt.Fprintln(f, \"import (\")\n\t\tfmt.Fprintln(f, ` \"`+pkgWriter+`\"`)\n\t\tfmt.Fprintln(f, ` \"`+pkgLexer+`\"`)\n\t\tfmt.Fprintln(f, \")\")\n\t}\n\n\tsort.Strings(g.Types)\n\tfor _, t := range g.Types {\n\t\tfmt.Fprintln(f)\n\t\tif !g.NoStdMarshalers {\n\t\t\tfmt.Fprintln(f, \"func (\", t, \") MarshalJSON() ([]byte, error) { return nil, nil }\")\n\t\t\tfmt.Fprintln(f, \"func (*\", t, \") UnmarshalJSON([]byte) error { return nil }\")\n\t\t}\n\n\t\tfmt.Fprintln(f, \"func (\", t, \") MarshalEasyJSON(w *jwriter.Writer) {}\")\n\t\tfmt.Fprintln(f, \"func (*\", t, \") UnmarshalEasyJSON(l *jlexer.Lexer) {}\")\n\t\tfmt.Fprintln(f)\n\t\tfmt.Fprintln(f, \"type EasyJSON_exporter_\"+t+\" *\"+t)\n\t}\n\treturn nil\n}\n\n\/\/ writeMain creates a .go file that launches the generator if 'go run'.\nfunc (g *Generator) writeMain() (path string, err error) {\n\tf, err := ioutil.TempFile(filepath.Dir(g.OutName), \"easyjson-bootstrap\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfmt.Fprintln(f, \"\/\/ +build ignore\")\n\tfmt.Fprintln(f)\n\tfmt.Fprintln(f, \"\/\/ TEMPORARY AUTOGENERATED FILE: easyjson bootstapping code to launch\")\n\tfmt.Fprintln(f, \"\/\/ the actual generator.\")\n\tfmt.Fprintln(f)\n\tfmt.Fprintln(f, \"package main\")\n\tfmt.Fprintln(f)\n\tfmt.Fprintln(f, \"import (\")\n\tfmt.Fprintln(f, ` \"fmt\"`)\n\tfmt.Fprintln(f, ` \"os\"`)\n\tfmt.Fprintln(f)\n\tfmt.Fprintf(f, \" %q\\n\", genPackage)\n\tif len(g.Types) > 0 {\n\t\tfmt.Fprintln(f)\n\t\tfmt.Fprintf(f, \" pkg %q\\n\", g.PkgPath)\n\t}\n\tfmt.Fprintln(f, \")\")\n\tfmt.Fprintln(f)\n\tfmt.Fprintln(f, \"func main() {\")\n\tfmt.Fprintf(f, \" g := gen.NewGenerator(%q)\\n\", filepath.Base(g.OutName))\n\tfmt.Fprintf(f, \" g.SetPkg(%q, %q)\\n\", g.PkgName, g.PkgPath)\n\tif g.BuildTags != \"\" {\n\t\tfmt.Fprintf(f, \" g.SetBuildTags(%q)\\n\", g.BuildTags)\n\t}\n\tif g.SnakeCase {\n\t\tfmt.Fprintln(f, \" g.UseSnakeCase()\")\n\t}\n\tif g.LowerCamelCase {\n\t\tfmt.Fprintln(f, \" g.UseLowerCamelCase()\")\n\t}\n\tif g.OmitEmpty {\n\t\tfmt.Fprintln(f, \" g.OmitEmpty()\")\n\t}\n\tif g.NoStdMarshalers {\n\t\tfmt.Fprintln(f, \" g.NoStdMarshalers()\")\n\t}\n\tif g.DisallowUnknownFields {\n\t\tfmt.Fprintln(f, \" g.DisallowUnknownFields()\")\n\t}\n\n\tsort.Strings(g.Types)\n\tfor _, v := range g.Types {\n\t\tfmt.Fprintln(f, \" g.Add(pkg.EasyJSON_exporter_\"+v+\"(nil))\")\n\t}\n\n\tfmt.Fprintln(f, \" if err := g.Run(os.Stdout); err != nil {\")\n\tfmt.Fprintln(f, \" fmt.Fprintln(os.Stderr, err)\")\n\tfmt.Fprintln(f, \" os.Exit(1)\")\n\tfmt.Fprintln(f, \" }\")\n\tfmt.Fprintln(f, \"}\")\n\n\tsrc := f.Name()\n\tif err := f.Close(); err != nil {\n\t\treturn src, err\n\t}\n\n\tdest := src + \".go\"\n\treturn dest, os.Rename(src, dest)\n}\n\nfunc (g *Generator) Run() error {\n\tif err := g.writeStub(); err != nil {\n\t\treturn err\n\t}\n\tif g.StubsOnly {\n\t\treturn nil\n\t}\n\n\tpath, err := g.writeMain()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !g.LeaveTemps {\n\t\tdefer os.Remove(path)\n\t}\n\n\tf, err := os.Create(g.OutName + \".tmp\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !g.LeaveTemps {\n\t\tdefer os.Remove(f.Name()) \/\/ will not remove after rename\n\t}\n\n\tcmd := exec.Command(\"go\", \"run\", \"-tags\", g.BuildTags, filepath.Base(path))\n\tcmd.Stdout = f\n\tcmd.Stderr = os.Stderr\n\tcmd.Dir = filepath.Dir(path)\n\tif err = cmd.Run(); err != nil {\n\t\treturn err\n\t}\n\n\tf.Close()\n\n\tif !g.NoFormat {\n\t\tcmd = exec.Command(\"gofmt\", \"-w\", f.Name())\n\t\tcmd.Stderr = os.Stderr\n\t\tcmd.Stdout = os.Stdout\n\n\t\tif err = cmd.Run(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn os.Rename(f.Name(), g.OutName)\n}\n<commit_msg>Fix a little typo<commit_after>\/\/ Package bootstrap implements the bootstrapping logic: generation of a .go file to\n\/\/ launch the actual generator and launching the generator itself.\n\/\/\n\/\/ The package may be preferred to a command-line utility if generating the serializers\n\/\/ from golang code is required.\npackage bootstrap\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"sort\"\n)\n\nconst genPackage = \"github.com\/mailru\/easyjson\/gen\"\nconst pkgWriter = \"github.com\/mailru\/easyjson\/jwriter\"\nconst pkgLexer = \"github.com\/mailru\/easyjson\/jlexer\"\n\ntype Generator struct {\n\tPkgPath, PkgName string\n\tTypes []string\n\n\tNoStdMarshalers bool\n\tSnakeCase bool\n\tLowerCamelCase bool\n\tOmitEmpty bool\n\tDisallowUnknownFields bool\n\n\tOutName string\n\tBuildTags string\n\n\tStubsOnly bool\n\tLeaveTemps bool\n\tNoFormat bool\n}\n\n\/\/ writeStub outputs an initial stub for marshalers\/unmarshalers so that the package\n\/\/ using marshalers\/unmarshales compiles correctly for boostrapping code.\nfunc (g *Generator) writeStub() error {\n\tf, err := os.Create(g.OutName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tif g.BuildTags != \"\" {\n\t\tfmt.Fprintln(f, \"\/\/ +build \", g.BuildTags)\n\t\tfmt.Fprintln(f)\n\t}\n\tfmt.Fprintln(f, \"\/\/ TEMPORARY AUTOGENERATED FILE: easyjson stub code to make the package\")\n\tfmt.Fprintln(f, \"\/\/ compilable during generation.\")\n\tfmt.Fprintln(f)\n\tfmt.Fprintln(f, \"package \", g.PkgName)\n\n\tif len(g.Types) > 0 {\n\t\tfmt.Fprintln(f)\n\t\tfmt.Fprintln(f, \"import (\")\n\t\tfmt.Fprintln(f, ` \"`+pkgWriter+`\"`)\n\t\tfmt.Fprintln(f, ` \"`+pkgLexer+`\"`)\n\t\tfmt.Fprintln(f, \")\")\n\t}\n\n\tsort.Strings(g.Types)\n\tfor _, t := range g.Types {\n\t\tfmt.Fprintln(f)\n\t\tif !g.NoStdMarshalers {\n\t\t\tfmt.Fprintln(f, \"func (\", t, \") MarshalJSON() ([]byte, error) { return nil, nil }\")\n\t\t\tfmt.Fprintln(f, \"func (*\", t, \") UnmarshalJSON([]byte) error { return nil }\")\n\t\t}\n\n\t\tfmt.Fprintln(f, \"func (\", t, \") MarshalEasyJSON(w *jwriter.Writer) {}\")\n\t\tfmt.Fprintln(f, \"func (*\", t, \") UnmarshalEasyJSON(l *jlexer.Lexer) {}\")\n\t\tfmt.Fprintln(f)\n\t\tfmt.Fprintln(f, \"type EasyJSON_exporter_\"+t+\" *\"+t)\n\t}\n\treturn nil\n}\n\n\/\/ writeMain creates a .go file that launches the generator if 'go run'.\nfunc (g *Generator) writeMain() (path string, err error) {\n\tf, err := ioutil.TempFile(filepath.Dir(g.OutName), \"easyjson-bootstrap\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfmt.Fprintln(f, \"\/\/ +build ignore\")\n\tfmt.Fprintln(f)\n\tfmt.Fprintln(f, \"\/\/ TEMPORARY AUTOGENERATED FILE: easyjson bootstapping code to launch\")\n\tfmt.Fprintln(f, \"\/\/ the actual generator.\")\n\tfmt.Fprintln(f)\n\tfmt.Fprintln(f, \"package main\")\n\tfmt.Fprintln(f)\n\tfmt.Fprintln(f, \"import (\")\n\tfmt.Fprintln(f, ` \"fmt\"`)\n\tfmt.Fprintln(f, ` \"os\"`)\n\tfmt.Fprintln(f)\n\tfmt.Fprintf(f, \" %q\\n\", genPackage)\n\tif len(g.Types) > 0 {\n\t\tfmt.Fprintln(f)\n\t\tfmt.Fprintf(f, \" pkg %q\\n\", g.PkgPath)\n\t}\n\tfmt.Fprintln(f, \")\")\n\tfmt.Fprintln(f)\n\tfmt.Fprintln(f, \"func main() {\")\n\tfmt.Fprintf(f, \" g := gen.NewGenerator(%q)\\n\", filepath.Base(g.OutName))\n\tfmt.Fprintf(f, \" g.SetPkg(%q, %q)\\n\", g.PkgName, g.PkgPath)\n\tif g.BuildTags != \"\" {\n\t\tfmt.Fprintf(f, \" g.SetBuildTags(%q)\\n\", g.BuildTags)\n\t}\n\tif g.SnakeCase {\n\t\tfmt.Fprintln(f, \" g.UseSnakeCase()\")\n\t}\n\tif g.LowerCamelCase {\n\t\tfmt.Fprintln(f, \" g.UseLowerCamelCase()\")\n\t}\n\tif g.OmitEmpty {\n\t\tfmt.Fprintln(f, \" g.OmitEmpty()\")\n\t}\n\tif g.NoStdMarshalers {\n\t\tfmt.Fprintln(f, \" g.NoStdMarshalers()\")\n\t}\n\tif g.DisallowUnknownFields {\n\t\tfmt.Fprintln(f, \" g.DisallowUnknownFields()\")\n\t}\n\n\tsort.Strings(g.Types)\n\tfor _, v := range g.Types {\n\t\tfmt.Fprintln(f, \" g.Add(pkg.EasyJSON_exporter_\"+v+\"(nil))\")\n\t}\n\n\tfmt.Fprintln(f, \" if err := g.Run(os.Stdout); err != nil {\")\n\tfmt.Fprintln(f, \" fmt.Fprintln(os.Stderr, err)\")\n\tfmt.Fprintln(f, \" os.Exit(1)\")\n\tfmt.Fprintln(f, \" }\")\n\tfmt.Fprintln(f, \"}\")\n\n\tsrc := f.Name()\n\tif err := f.Close(); err != nil {\n\t\treturn src, err\n\t}\n\n\tdest := src + \".go\"\n\treturn dest, os.Rename(src, dest)\n}\n\nfunc (g *Generator) Run() error {\n\tif err := g.writeStub(); err != nil {\n\t\treturn err\n\t}\n\tif g.StubsOnly {\n\t\treturn nil\n\t}\n\n\tpath, err := g.writeMain()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !g.LeaveTemps {\n\t\tdefer os.Remove(path)\n\t}\n\n\tf, err := os.Create(g.OutName + \".tmp\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !g.LeaveTemps {\n\t\tdefer os.Remove(f.Name()) \/\/ will not remove after rename\n\t}\n\n\tcmd := exec.Command(\"go\", \"run\", \"-tags\", g.BuildTags, filepath.Base(path))\n\tcmd.Stdout = f\n\tcmd.Stderr = os.Stderr\n\tcmd.Dir = filepath.Dir(path)\n\tif err = cmd.Run(); err != nil {\n\t\treturn err\n\t}\n\n\tf.Close()\n\n\tif !g.NoFormat {\n\t\tcmd = exec.Command(\"gofmt\", \"-w\", f.Name())\n\t\tcmd.Stderr = os.Stderr\n\t\tcmd.Stdout = os.Stdout\n\n\t\tif err = cmd.Run(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn os.Rename(f.Name(), g.OutName)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Google Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage version\n\nvar (\n\t\/\/ Major is the current major version of master branch.\n\tMajor = 0\n\t\/\/ Minor is the current minor version of master branch.\n\tMinor = 2\n\t\/\/ Patch is the curernt patched version of the master branch.\n\tPatch = 1\n\t\/\/ Release is the current release level of the master branch. Valid values\n\t\/\/ are dev (developement unreleased), rcX (release candidate with current\n\t\/\/ iteration), stable (indicates a final released version).\n\tRelease = \"stable\"\n)\n<commit_msg>Bumping ups to 0.2.2 dev<commit_after>\/\/ Copyright 2016 Google Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage version\n\nvar (\n\t\/\/ Major is the current major version of master branch.\n\tMajor = 0\n\t\/\/ Minor is the current minor version of master branch.\n\tMinor = 2\n\t\/\/ Patch is the curernt patched version of the master branch.\n\tPatch = 2\n\t\/\/ Release is the current release level of the master branch. Valid values\n\t\/\/ are dev (developement unreleased), rcX (release candidate with current\n\t\/\/ iteration), stable (indicates a final released version).\n\tRelease = \"dev\"\n)\n<|endoftext|>"} {"text":"<commit_before>package operators\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\tg \"github.com\/onsi\/ginkgo\"\n\to \"github.com\/onsi\/gomega\"\n\t\"github.com\/stretchr\/objx\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\t\"k8s.io\/client-go\/dynamic\"\n\te2e \"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\te2eskipper \"k8s.io\/kubernetes\/test\/e2e\/framework\/skipper\"\n\n\texutil \"github.com\/openshift\/origin\/test\/extended\/util\"\n\t\"github.com\/openshift\/origin\/test\/extended\/util\/prometheus\"\n)\n\nvar _ = g.Describe(\"[sig-cluster-lifecycle][Feature:Machines][Early] Managed cluster should\", func() {\n\tdefer g.GinkgoRecover()\n\n\tg.It(\"have same number of Machines and Nodes\", func() {\n\t\tcfg, err := e2e.LoadConfig()\n\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\tc, err := e2e.LoadClientset()\n\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\tdc, err := dynamic.NewForConfig(cfg)\n\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\tg.By(\"getting MachineSet list\")\n\t\tmachineSetClient := dc.Resource(schema.GroupVersionResource{Group: \"machine.openshift.io\", Resource: \"machinesets\", Version: \"v1beta1\"})\n\t\tmsList, err := machineSetClient.List(context.Background(), metav1.ListOptions{})\n\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\tmachineSetList := objx.Map(msList.UnstructuredContent())\n\t\tmachineSetItems := objects(machineSetList.Get(\"items\"))\n\n\t\tif len(machineSetItems) == 0 {\n\t\t\te2eskipper.Skipf(\"cluster does not have machineset resources\")\n\t\t}\n\n\t\tg.By(\"getting Node list\")\n\t\tnodeList, err := c.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})\n\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\tnodeItems := nodeList.Items\n\n\t\tg.By(\"getting Machine list\")\n\t\tmachineClient := dc.Resource(schema.GroupVersionResource{Group: \"machine.openshift.io\", Resource: \"machines\", Version: \"v1beta1\"})\n\t\tobj, err := machineClient.List(context.Background(), metav1.ListOptions{})\n\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\tmachineList := objx.Map(obj.UnstructuredContent())\n\t\tmachineItems := objects(machineList.Get(\"items\"))\n\n\t\tg.By(\"ensure number of Machines and Nodes are equal\")\n\t\to.Expect(len(nodeItems)).To(o.Equal(len(machineItems)))\n\t})\n})\n\nvar _ = g.Describe(\"[sig-node] Managed cluster\", func() {\n\tdefer g.GinkgoRecover()\n\tvar (\n\t\toc = exutil.NewCLIWithoutNamespace(\"managed-cluster-node\")\n\n\t\turl, bearerToken string\n\t)\n\tg.BeforeEach(func() {\n\t\tvar ok bool\n\t\turl, bearerToken, ok = prometheus.LocatePrometheus(oc)\n\t\tif !ok {\n\t\t\te2e.Failf(\"Prometheus could not be located on this cluster, failing prometheus test\")\n\t\t}\n\t})\n\n\tg.It(\"should report ready nodes the entire duration of the test run [Late]\", func() {\n\t\tns := oc.SetupNamespace()\n\t\texecPod := exutil.CreateExecPodOrFail(oc.AdminKubeClient(), ns, \"execpod\")\n\t\tdefer func() {\n\t\t\toc.AdminKubeClient().CoreV1().Pods(ns).Delete(context.Background(), execPod.Name, *metav1.NewDeleteOptions(1))\n\t\t}()\n\n\t\t\/\/ we only consider samples since the beginning of the test\n\t\ttestDuration := exutil.DurationSinceStartInSeconds().String()\n\n\t\ttests := map[string]bool{\n\t\t\t\/\/ all nodes should be reporting ready throughout the entire run, as long as they are older than 6m\n\t\t\tfmt.Sprintf(`(min_over_time((max by (node) (kube_node_status_condition{condition=\"Ready\",status=\"true\"}) and (((max by (node) (kube_node_status_condition))) and (0*max by (node) (kube_node_status_condition offset 6m))))[%s:1s])) < 1`, testDuration): false,\n\t\t}\n\t\terr := prometheus.RunQueries(tests, oc, ns, execPod.Name, url, bearerToken)\n\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t})\n})\n<commit_msg>test: Nodes that are deleted should not fire the unready alert<commit_after>package operators\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\tg \"github.com\/onsi\/ginkgo\"\n\to \"github.com\/onsi\/gomega\"\n\t\"github.com\/stretchr\/objx\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\t\"k8s.io\/client-go\/dynamic\"\n\te2e \"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\te2eskipper \"k8s.io\/kubernetes\/test\/e2e\/framework\/skipper\"\n\n\texutil \"github.com\/openshift\/origin\/test\/extended\/util\"\n\t\"github.com\/openshift\/origin\/test\/extended\/util\/prometheus\"\n)\n\nvar _ = g.Describe(\"[sig-cluster-lifecycle][Feature:Machines][Early] Managed cluster should\", func() {\n\tdefer g.GinkgoRecover()\n\n\tg.It(\"have same number of Machines and Nodes\", func() {\n\t\tcfg, err := e2e.LoadConfig()\n\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\tc, err := e2e.LoadClientset()\n\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\tdc, err := dynamic.NewForConfig(cfg)\n\t\to.Expect(err).NotTo(o.HaveOccurred())\n\n\t\tg.By(\"getting MachineSet list\")\n\t\tmachineSetClient := dc.Resource(schema.GroupVersionResource{Group: \"machine.openshift.io\", Resource: \"machinesets\", Version: \"v1beta1\"})\n\t\tmsList, err := machineSetClient.List(context.Background(), metav1.ListOptions{})\n\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\tmachineSetList := objx.Map(msList.UnstructuredContent())\n\t\tmachineSetItems := objects(machineSetList.Get(\"items\"))\n\n\t\tif len(machineSetItems) == 0 {\n\t\t\te2eskipper.Skipf(\"cluster does not have machineset resources\")\n\t\t}\n\n\t\tg.By(\"getting Node list\")\n\t\tnodeList, err := c.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})\n\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\tnodeItems := nodeList.Items\n\n\t\tg.By(\"getting Machine list\")\n\t\tmachineClient := dc.Resource(schema.GroupVersionResource{Group: \"machine.openshift.io\", Resource: \"machines\", Version: \"v1beta1\"})\n\t\tobj, err := machineClient.List(context.Background(), metav1.ListOptions{})\n\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t\tmachineList := objx.Map(obj.UnstructuredContent())\n\t\tmachineItems := objects(machineList.Get(\"items\"))\n\n\t\tg.By(\"ensure number of Machines and Nodes are equal\")\n\t\to.Expect(len(nodeItems)).To(o.Equal(len(machineItems)))\n\t})\n})\n\nvar _ = g.Describe(\"[sig-node] Managed cluster\", func() {\n\tdefer g.GinkgoRecover()\n\tvar (\n\t\toc = exutil.NewCLIWithoutNamespace(\"managed-cluster-node\")\n\n\t\turl, bearerToken string\n\t)\n\tg.BeforeEach(func() {\n\t\tvar ok bool\n\t\turl, bearerToken, ok = prometheus.LocatePrometheus(oc)\n\t\tif !ok {\n\t\t\te2e.Failf(\"Prometheus could not be located on this cluster, failing prometheus test\")\n\t\t}\n\t})\n\n\tg.It(\"should report ready nodes the entire duration of the test run [Late]\", func() {\n\t\tns := oc.SetupNamespace()\n\t\texecPod := exutil.CreateExecPodOrFail(oc.AdminKubeClient(), ns, \"execpod\")\n\t\tdefer func() {\n\t\t\toc.AdminKubeClient().CoreV1().Pods(ns).Delete(context.Background(), execPod.Name, *metav1.NewDeleteOptions(1))\n\t\t}()\n\n\t\t\/\/ we only consider samples since the beginning of the test\n\t\ttestDuration := exutil.DurationSinceStartInSeconds().String()\n\n\t\ttests := map[string]bool{\n\t\t\t\/\/ all nodes should be reporting ready throughout the entire run, as long as they are older than 6m, and they still\n\t\t\t\/\/ exist in 1m (because prometheus doesn't support negative offsets, we have to shift the entire query left). Since\n\t\t\t\/\/ the late test might not catch a node not ready at the very end of the run anyway, we don't do anything special\n\t\t\t\/\/ to shift the test execution later, we just note that there's a scrape_interval+wait_interval gap here of up to\n\t\t\t\/\/ 1m30s and we can live with ith\n\t\t\tfmt.Sprintf(`(min_over_time((max by (node) (kube_node_status_condition{condition=\"Ready\",status=\"true\"} offset 1m) and (((max by (node) (kube_node_status_condition offset 1m))) and (0*max by (node) (kube_node_status_condition offset 7m)) and (0*max by (node) (kube_node_status_condition))))[%s:1s])) < 1`, testDuration): false,\n\t\t}\n\t\terr := prometheus.RunQueries(tests, oc, ns, execPod.Name, url, bearerToken)\n\t\to.Expect(err).NotTo(o.HaveOccurred())\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package effio\n\n\/\/ encoding\/csv doesn't strip whitespace and does a fair bit of\n\/\/ work to handle strings & quoting which are totally unnecessary\n\/\/ overhead for these files so skip it\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ the input is ints but plotinum uses float64 so might as well\n\/\/ start there and avoid the type conversions later\ntype LatRec struct {\n\ttime float64 \/\/ time offset from beginning of fio run\n\tperf float64 \/\/ latency value\n}\n\ntype LatRecs []LatRec\n\n\/*\ntime, perf, ??, block\n3, 205274611861, 0, 4096\n16, 205274624691, 0, 4096\n*\/\nfunc LoadCSV(filename string) LatRecs {\n\tfd, err := os.Open(filename)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to open file '%s' for read: %s\\n\", filename, err)\n\t\treturn LatRecs{}\n\t}\n\tdefer fd.Close()\n\n\trecords := make(LatRecs, 0)\n\tvar time, perf float64\n\tbfd := bufio.NewReader(fd)\n\tvar lno int = 0\n\tfor {\n\t\tline, _, err := bfd.ReadLine()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Read from file '%s' failed: %s\", filename, err)\n\t\t}\n\t\tlno++\n\n\t\t\/\/ fio always uses \", \" instead of \",\" as far as I can tell\n\t\tr := strings.SplitN(string(line), \", \", 4)\n\t\t\/\/ probably an impartial record at the end of the file\n\t\tif len(r) < 4 || r[0] == \"\" || r[1] == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\ttime, err = strconv.ParseFloat(r[0], 64)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Parsing time integer failed in file '%s' at line %d: %s\", filename, lno, err)\n\t\t}\n\t\tperf, err = strconv.ParseFloat(r[1], 64)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Parsing perf integer failed in file '%s' at line %d: %s\", filename, lno, err)\n\t\t}\n\t\t\/\/ r[2:3] are unused, 2 is reserved, 3 is block size\n\n\t\tlr := LatRec{time, perf}\n\t\trecords = append(records, lr)\n\t}\n\tlog.Printf(\"Done parsing file '%s'.\\n\", filename)\n\n\treturn records\n}\n\n\/\/ implement some plotinum interfaces\nfunc (lrs LatRecs) Len() int {\n\treturn len(lrs)\n}\n\nfunc (lrs LatRecs) XY(i int) (float64, float64) {\n\treturn lrs[i].time, lrs[i].perf\n}\n\nfunc (lrs LatRecs) Value(i int) float64 {\n\treturn lrs[i].perf\n}\n\nfunc (lrs LatRecs) Values(i int) (vals []float64) {\n\tfor _, l := range lrs {\n\t\tvals = append(vals, l.perf)\n\t}\n\treturn\n}\n\n\/\/ reduces the number of data points to sz by taking the mean across buckets\nfunc (lrs LatRecs) Histogram(sz int) (out LatRecs) {\n\tif sz > len(lrs) {\n\t\tlog.Fatalf(\"Error: Histogram(%d) is smaller than the dataset of length %d.\", sz, len(lrs))\n\t}\n\n\tbktsz := len(lrs) \/ sz\n\tlog.Printf(\"Bucket size for %d\/%d is %d\\n\", len(lrs), sz, bktsz)\n\n\tvar total, time float64\n\tvar count int = 0\n\tfor _, v := range lrs {\n\t\tif count == 0 {\n\t\t\ttime = v.time\n\t\t\ttotal = 0.0\n\t\t}\n\n\t\ttotal += v.perf\n\t\tcount++\n\n\t\tif count == bktsz {\n\t\t\tval := total \/ float64(count)\n\t\t\tout = append(out, LatRec{time, val})\n\t\t\tcount = 0\n\t\t\tcontinue\n\t\t}\n\t}\n\treturn\n}\n<commit_msg>add ..... progress to CVS loading (for now)<commit_after>package effio\n\n\/\/ encoding\/csv doesn't strip whitespace and does a fair bit of\n\/\/ work to handle strings & quoting which are totally unnecessary\n\/\/ overhead for these files so skip it\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ the input is ints but plotinum uses float64 so might as well\n\/\/ start there and avoid the type conversions later\ntype LatRec struct {\n\ttime float64 \/\/ time offset from beginning of fio run\n\tperf float64 \/\/ latency value\n}\n\ntype LatRecs []LatRec\n\n\/*\ntime, perf, ??, block\n3, 205274611861, 0, 4096\n16, 205274624691, 0, 4096\n*\/\nfunc LoadCSV(filename string) LatRecs {\n\tfmt.Printf(\"Parsing file: '%s' ... \", filename)\n\n\tfd, err := os.Open(filename)\n\tif err != nil {\n\t\tfmt.Printf(\" Failed.\\nCould not open file '%s' for read: %s\\n\", filename, err)\n\t\treturn LatRecs{}\n\t}\n\tdefer fd.Close()\n\n\trecords := make(LatRecs, 0)\n\tvar time, perf float64\n\tbfd := bufio.NewReader(fd)\n\tvar lno int = 0\n\tfor {\n\t\tline, _, err := bfd.ReadLine()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"\\nRead from file '%s' failed: %s\", filename, err)\n\t\t}\n\t\tlno++\n\n\t\tif lno%10000 == 0 {\n\t\t\tfmt.Printf(\".\")\n\t\t}\n\n\t\t\/\/ fio always uses \", \" instead of \",\" as far as I can tell\n\t\tr := strings.SplitN(string(line), \", \", 4)\n\t\t\/\/ probably an impartial record at the end of the file\n\t\tif len(r) < 4 || r[0] == \"\" || r[1] == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\ttime, err = strconv.ParseFloat(r[0], 64)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"\\nParsing time integer failed in file '%s' at line %d: %s\", filename, lno, err)\n\t\t}\n\t\tperf, err = strconv.ParseFloat(r[1], 64)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"\\nParsing perf integer failed in file '%s' at line %d: %s\", filename, lno, err)\n\t\t}\n\t\t\/\/ r[2:3] are unused, 2 is reserved, 3 is block size\n\n\t\tlr := LatRec{time, perf}\n\t\trecords = append(records, lr)\n\t}\n\tfmt.Println(\" Done.\")\n\n\treturn records\n}\n\n\/\/ implement some plotinum interfaces\nfunc (lrs LatRecs) Len() int {\n\treturn len(lrs)\n}\n\nfunc (lrs LatRecs) XY(i int) (float64, float64) {\n\treturn lrs[i].time, lrs[i].perf\n}\n\nfunc (lrs LatRecs) Value(i int) float64 {\n\treturn lrs[i].perf\n}\n\nfunc (lrs LatRecs) Values(i int) (vals []float64) {\n\tfor _, l := range lrs {\n\t\tvals = append(vals, l.perf)\n\t}\n\treturn\n}\n\n\/\/ reduces the number of data points to sz by taking the mean across buckets\nfunc (lrs LatRecs) Histogram(sz int) (out LatRecs) {\n\tif sz > len(lrs) {\n\t\tlog.Fatalf(\"Error: Histogram(%d) is smaller than the dataset of length %d.\", sz, len(lrs))\n\t}\n\n\tbktsz := len(lrs) \/ sz\n\tlog.Printf(\"Bucket size for %d\/%d is %d\\n\", len(lrs), sz, bktsz)\n\n\tvar total, time float64\n\tvar count int = 0\n\tfor _, v := range lrs {\n\t\tif count == 0 {\n\t\t\ttime = v.time\n\t\t\ttotal = 0.0\n\t\t}\n\n\t\ttotal += v.perf\n\t\tcount++\n\n\t\tif count == bktsz {\n\t\t\tval := total \/ float64(count)\n\t\t\tout = append(out, LatRec{time, val})\n\t\t\tcount = 0\n\t\t\tcontinue\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015-2016, Cyrill @ Schumacher.fm and the CoreStore contributors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package net provides additional network helper functions.\npackage net\n<commit_msg>net: Update doc<commit_after>\/\/ Copyright 2015-2016, Cyrill @ Schumacher.fm and the CoreStore contributors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package net provides additional network helper functions.\n\/\/\n\/\/ context Package: https:\/\/twitter.com\/peterbourgon\/status\/752022730812317696\npackage net\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package vector implements persistent vector.\npackage vector\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n)\n\nconst (\n\tchunkBits = 5\n\tnodeSize = 1 << chunkBits\n\ttailMaxLen = nodeSize\n\tchunkMask = nodeSize - 1\n)\n\n\/\/ Vector is a persistent sequential container for arbitrary values. It supports\n\/\/ O(1) lookup by index, modification by index, and insertion and removal\n\/\/ operations at the end. Being a persistent variant of the data structure, it\n\/\/ is immutable, and provides O(1) operations to create modified versions of the\n\/\/ vector that shares the underlying data structure, making it suitable for\n\/\/ concurrent access. The empty value is a valid empty vector.\ntype Vector interface {\n\tjson.Marshaler\n\t\/\/ Len returns the length of the vector.\n\tLen() int\n\t\/\/ Index returns the i-th element of the vector, if it exists. The second\n\t\/\/ return value indicates whether the element exists.\n\tIndex(i int) (interface{}, bool)\n\t\/\/ Assoc returns an almost identical Vector, with the i-th element\n\t\/\/ replaced. If the index is smaller than 0 or greater than the length of\n\t\/\/ the vector, it returns nil. If the index is equal to the size of the\n\t\/\/ vector, it is equivalent to Cons.\n\tAssoc(i int, val interface{}) Vector\n\t\/\/ Cons returns an almost identical Vector, with an additional element\n\t\/\/ appended to the end.\n\tCons(val interface{}) Vector\n\t\/\/ Pop returns an almost identical Vector, with the last element removed. It\n\t\/\/ returns nil if the vector is already empty.\n\tPop() Vector\n\t\/\/ SubVector returns a subvector containing the elements from i up to but\n\t\/\/ not including j.\n\tSubVector(i, j int) Vector\n\t\/\/ Iterator returns an iterator over the vector.\n\tIterator() Iterator\n}\n\n\/\/ Iterator is an iterator over vector elements. It can be used like this:\n\/\/\n\/\/ for it := v.Iterator(); it.HasElem(); it.Next() {\n\/\/ elem := it.Elem()\n\/\/ \/\/ do something with elem...\n\/\/ }\ntype Iterator interface {\n\t\/\/ Elem returns the element at the current position.\n\tElem() interface{}\n\t\/\/ HasElem returns whether the iterator is pointing to an element.\n\tHasElem() bool\n\t\/\/ Next moves the iterator to the next position.\n\tNext()\n}\n\ntype vector struct {\n\tcount int\n\t\/\/ height of the tree structure, defined to be 0 when root is a leaf.\n\theight uint\n\troot node\n\ttail []interface{}\n}\n\n\/\/ Empty is an empty Vector.\nvar Empty Vector = &vector{}\n\n\/\/ node is a node in the vector tree. It is always of the size nodeSize.\ntype node *[nodeSize]interface{}\n\nfunc newNode() node {\n\treturn node(&[nodeSize]interface{}{})\n}\n\nfunc clone(n node) node {\n\ta := *n\n\treturn node(&a)\n}\n\nfunc nodeFromSlice(s []interface{}) node {\n\tvar n [nodeSize]interface{}\n\tcopy(n[:], s)\n\treturn &n\n}\n\n\/\/ Count returns the number of elements in a Vector.\nfunc (v *vector) Len() int {\n\treturn v.count\n}\n\n\/\/ treeSize returns the number of elements stored in the tree (as opposed to the\n\/\/ tail).\nfunc (v *vector) treeSize() int {\n\tif v.count < tailMaxLen {\n\t\treturn 0\n\t}\n\treturn ((v.count - 1) >> chunkBits) << chunkBits\n}\n\nfunc (v *vector) Index(i int) (interface{}, bool) {\n\tif i < 0 || i >= v.count {\n\t\treturn nil, false\n\t}\n\n\t\/\/ The following is very similar to sliceFor, but is implemented separately\n\t\/\/ to avoid unncessary copying.\n\tif i >= v.treeSize() {\n\t\treturn v.tail[i&chunkMask], true\n\t}\n\tn := v.root\n\tfor shift := v.height * chunkBits; shift > 0; shift -= chunkBits {\n\t\tn = n[(i>>shift)&chunkMask].(node)\n\t}\n\treturn n[i&chunkMask], true\n}\n\n\/\/ sliceFor returns the slice where the i-th element is stored. The index must\n\/\/ be in bound.\nfunc (v *vector) sliceFor(i int) []interface{} {\n\tif i >= v.treeSize() {\n\t\treturn v.tail\n\t}\n\tn := v.root\n\tfor shift := v.height * chunkBits; shift > 0; shift -= chunkBits {\n\t\tn = n[(i>>shift)&chunkMask].(node)\n\t}\n\treturn n[:]\n}\n\nfunc (v *vector) Assoc(i int, val interface{}) Vector {\n\tif i < 0 || i > v.count {\n\t\treturn nil\n\t} else if i == v.count {\n\t\treturn v.Cons(val)\n\t}\n\tif i >= v.treeSize() {\n\t\tnewTail := append([]interface{}(nil), v.tail...)\n\t\tnewTail[i&chunkMask] = val\n\t\treturn &vector{v.count, v.height, v.root, newTail}\n\t}\n\treturn &vector{v.count, v.height, doAssoc(v.height, v.root, i, val), v.tail}\n}\n\n\/\/ doAssoc returns an almost identical tree, with the i-th element replaced by\n\/\/ val.\nfunc doAssoc(height uint, n node, i int, val interface{}) node {\n\tm := clone(n)\n\tif height == 0 {\n\t\tm[i&chunkMask] = val\n\t} else {\n\t\tsub := (i >> (height * chunkBits)) & chunkMask\n\t\tm[sub] = doAssoc(height-1, m[sub].(node), i, val)\n\t}\n\treturn m\n}\n\nfunc (v *vector) Cons(val interface{}) Vector {\n\t\/\/ Room in tail?\n\tif v.count-v.treeSize() < tailMaxLen {\n\t\tnewTail := make([]interface{}, len(v.tail)+1)\n\t\tcopy(newTail, v.tail)\n\t\tnewTail[len(v.tail)] = val\n\t\treturn &vector{v.count + 1, v.height, v.root, newTail}\n\t}\n\t\/\/ Full tail; push into tree.\n\ttailNode := nodeFromSlice(v.tail)\n\tnewHeight := v.height\n\tvar newRoot node\n\t\/\/ Overflow root?\n\tif (v.count >> chunkBits) > (1 << (v.height * chunkBits)) {\n\t\tnewRoot = newNode()\n\t\tnewRoot[0] = v.root\n\t\tnewRoot[1] = newPath(v.height, tailNode)\n\t\tnewHeight++\n\t} else {\n\t\tnewRoot = v.pushTail(v.height, v.root, tailNode)\n\t}\n\treturn &vector{v.count + 1, newHeight, newRoot, []interface{}{val}}\n}\n\n\/\/ pushTail returns a tree with tail appended.\nfunc (v *vector) pushTail(height uint, n node, tail node) node {\n\tif height == 0 {\n\t\treturn tail\n\t}\n\tidx := ((v.count - 1) >> (height * chunkBits)) & chunkMask\n\tm := clone(n)\n\tchild := n[idx]\n\tif child == nil {\n\t\tm[idx] = newPath(height-1, tail)\n\t} else {\n\t\tm[idx] = v.pushTail(height-1, child.(node), tail)\n\t}\n\treturn m\n}\n\n\/\/ newPath creates a left-branching tree of specified height and leaf.\nfunc newPath(height uint, leaf node) node {\n\tif height == 0 {\n\t\treturn leaf\n\t}\n\tret := newNode()\n\tret[0] = newPath(height-1, leaf)\n\treturn ret\n}\n\nfunc (v *vector) Pop() Vector {\n\tswitch v.count {\n\tcase 0:\n\t\treturn nil\n\tcase 1:\n\t\treturn Empty\n\t}\n\tif v.count-v.treeSize() > 1 {\n\t\tnewTail := make([]interface{}, len(v.tail)-1)\n\t\tcopy(newTail, v.tail)\n\t\treturn &vector{v.count - 1, v.height, v.root, newTail}\n\t}\n\tnewTail := v.sliceFor(v.count - 2)\n\tnewRoot := v.popTail(v.height, v.root)\n\tnewHeight := v.height\n\tif v.height > 0 && newRoot[1] == nil {\n\t\tnewRoot = newRoot[0].(node)\n\t\tnewHeight--\n\t}\n\treturn &vector{v.count - 1, newHeight, newRoot, newTail}\n}\n\n\/\/ popTail returns a new tree with the last leaf removed.\nfunc (v *vector) popTail(level uint, n node) node {\n\tidx := ((v.count - 2) >> (level * chunkBits)) & chunkMask\n\tif level > 1 {\n\t\tnewChild := v.popTail(level-1, n[idx].(node))\n\t\tif newChild == nil && idx == 0 {\n\t\t\treturn nil\n\t\t}\n\t\tm := clone(n)\n\t\tif newChild == nil {\n\t\t\t\/\/ This is needed since `m[idx] = newChild` would store an\n\t\t\t\/\/ interface{} with a non-nil type part, which is non-nil\n\t\t\tm[idx] = nil\n\t\t} else {\n\t\t\tm[idx] = newChild\n\t\t}\n\t\treturn m\n\t} else if idx == 0 {\n\t\treturn nil\n\t} else {\n\t\tm := clone(n)\n\t\tm[idx] = nil\n\t\treturn m\n\t}\n}\n\nfunc (v *vector) SubVector(begin, end int) Vector {\n\tif begin < 0 || begin > end || end > v.count {\n\t\treturn nil\n\t}\n\treturn &subVector{v, begin, end}\n}\n\nfunc (v *vector) Iterator() Iterator {\n\treturn newIterator(v)\n}\n\nfunc (v *vector) MarshalJSON() ([]byte, error) {\n\treturn marshalJSON(v.Iterator())\n}\n\ntype subVector struct {\n\tv *vector\n\tbegin int\n\tend int\n}\n\nfunc (s *subVector) Len() int {\n\treturn s.end - s.begin\n}\n\nfunc (s *subVector) Index(i int) (interface{}, bool) {\n\tif i < 0 || s.begin+i >= s.end {\n\t\treturn nil, false\n\t}\n\treturn s.v.Index(s.begin + i)\n}\n\nfunc (s *subVector) Assoc(i int, val interface{}) Vector {\n\tif i < 0 || s.begin+i > s.end {\n\t\treturn nil\n\t} else if s.begin+i == s.end {\n\t\treturn s.Cons(val)\n\t}\n\treturn s.v.Assoc(s.begin+i, val).SubVector(s.begin, s.end)\n}\n\nfunc (s *subVector) Cons(val interface{}) Vector {\n\treturn s.v.Assoc(s.end, val).SubVector(s.begin, s.end+1)\n}\n\nfunc (s *subVector) Pop() Vector {\n\tswitch s.Len() {\n\tcase 0:\n\t\treturn nil\n\tcase 1:\n\t\treturn Empty\n\tdefault:\n\t\treturn s.v.SubVector(s.begin, s.end-1)\n\t}\n}\n\nfunc (s *subVector) SubVector(i, j int) Vector {\n\treturn s.v.SubVector(s.begin+i, s.begin+j)\n}\n\nfunc (s *subVector) Iterator() Iterator {\n\treturn newIteratorWithRange(s.v, s.begin, s.end)\n}\n\nfunc (s *subVector) MarshalJSON() ([]byte, error) {\n\treturn marshalJSON(s.Iterator())\n}\n\ntype iterator struct {\n\tv *vector\n\ttreeSize int\n\tindex int\n\tend int\n\tpath []pathEntry\n}\n\ntype pathEntry struct {\n\tnode node\n\tindex int\n}\n\nfunc (e pathEntry) current() interface{} {\n\treturn e.node[e.index]\n}\n\nfunc newIterator(v *vector) *iterator {\n\treturn newIteratorWithRange(v, 0, v.Len())\n}\n\nfunc newIteratorWithRange(v *vector, begin, end int) *iterator {\n\tit := &iterator{v, v.treeSize(), begin, end, nil}\n\tif it.index >= it.treeSize {\n\t\treturn it\n\t}\n\t\/\/ Find the node for begin, remembering all nodes along the path.\n\tn := v.root\n\tfor shift := v.height * chunkBits; shift > 0; shift -= chunkBits {\n\t\tidx := (begin >> shift) & chunkMask\n\t\tit.path = append(it.path, pathEntry{n, idx})\n\t\tn = n[idx].(node)\n\t}\n\tit.path = append(it.path, pathEntry{n, begin & chunkMask})\n\treturn it\n}\n\nfunc (it *iterator) Elem() interface{} {\n\tif it.index >= it.treeSize {\n\t\treturn it.v.tail[it.index-it.treeSize]\n\t}\n\treturn it.path[len(it.path)-1].current()\n}\n\nfunc (it *iterator) HasElem() bool {\n\treturn it.index < it.end\n}\n\nfunc (it *iterator) Next() {\n\tif it.index+1 >= it.treeSize {\n\t\t\/\/ Next element is in tail. Just increment the index.\n\t\tit.index++\n\t\treturn\n\t}\n\t\/\/ Find the deepest level that can be advanced.\n\tvar i int\n\tfor i = len(it.path) - 1; i >= 0; i-- {\n\t\te := it.path[i]\n\t\tif e.index+1 < len(e.node) {\n\t\t\tbreak\n\t\t}\n\t}\n\tif i == -1 {\n\t\tpanic(\"cannot advance; vector iterator bug\")\n\t}\n\t\/\/ Advance on this node, and re-populate all deeper levels.\n\tit.path[i].index++\n\tfor i++; i < len(it.path); i++ {\n\t\tit.path[i] = pathEntry{it.path[i-1].current().(node), 0}\n\t}\n\tit.index++\n}\n\ntype marshalError struct {\n\tindex int\n\tcause error\n}\n\nfunc (err *marshalError) Error() string {\n\treturn fmt.Sprintf(\"element %d: %s\", err.index, err.cause)\n}\n\nfunc marshalJSON(it Iterator) ([]byte, error) {\n\tvar buf bytes.Buffer\n\tbuf.WriteByte('[')\n\tindex := 0\n\tfor ; it.HasElem(); it.Next() {\n\t\tif index > 0 {\n\t\t\tbuf.WriteByte(',')\n\t\t}\n\t\telemBytes, err := json.Marshal(it.Elem())\n\t\tif err != nil {\n\t\t\treturn nil, &marshalError{index, err}\n\t\t}\n\t\tbuf.Write(elemBytes)\n\t\tindex++\n\t}\n\tbuf.WriteByte(']')\n\treturn buf.Bytes(), nil\n}\n<commit_msg>Add more info in the vector package's godoc.<commit_after>\/\/ Package vector implements persistent vector.\n\/\/\n\/\/ This is a Go clone of Clojure's PersistentVector type\n\/\/ (https:\/\/github.com\/clojure\/clojure\/blob\/master\/src\/jvm\/clojure\/lang\/PersistentVector.java).\n\/\/ For an introduction to the internals, see\n\/\/ https:\/\/hypirion.com\/musings\/understanding-persistent-vector-pt-1.\npackage vector\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n)\n\nconst (\n\tchunkBits = 5\n\tnodeSize = 1 << chunkBits\n\ttailMaxLen = nodeSize\n\tchunkMask = nodeSize - 1\n)\n\n\/\/ Vector is a persistent sequential container for arbitrary values. It supports\n\/\/ O(1) lookup by index, modification by index, and insertion and removal\n\/\/ operations at the end. Being a persistent variant of the data structure, it\n\/\/ is immutable, and provides O(1) operations to create modified versions of the\n\/\/ vector that shares the underlying data structure, making it suitable for\n\/\/ concurrent access. The empty value is a valid empty vector.\ntype Vector interface {\n\tjson.Marshaler\n\t\/\/ Len returns the length of the vector.\n\tLen() int\n\t\/\/ Index returns the i-th element of the vector, if it exists. The second\n\t\/\/ return value indicates whether the element exists.\n\tIndex(i int) (interface{}, bool)\n\t\/\/ Assoc returns an almost identical Vector, with the i-th element\n\t\/\/ replaced. If the index is smaller than 0 or greater than the length of\n\t\/\/ the vector, it returns nil. If the index is equal to the size of the\n\t\/\/ vector, it is equivalent to Cons.\n\tAssoc(i int, val interface{}) Vector\n\t\/\/ Cons returns an almost identical Vector, with an additional element\n\t\/\/ appended to the end.\n\tCons(val interface{}) Vector\n\t\/\/ Pop returns an almost identical Vector, with the last element removed. It\n\t\/\/ returns nil if the vector is already empty.\n\tPop() Vector\n\t\/\/ SubVector returns a subvector containing the elements from i up to but\n\t\/\/ not including j.\n\tSubVector(i, j int) Vector\n\t\/\/ Iterator returns an iterator over the vector.\n\tIterator() Iterator\n}\n\n\/\/ Iterator is an iterator over vector elements. It can be used like this:\n\/\/\n\/\/ for it := v.Iterator(); it.HasElem(); it.Next() {\n\/\/ elem := it.Elem()\n\/\/ \/\/ do something with elem...\n\/\/ }\ntype Iterator interface {\n\t\/\/ Elem returns the element at the current position.\n\tElem() interface{}\n\t\/\/ HasElem returns whether the iterator is pointing to an element.\n\tHasElem() bool\n\t\/\/ Next moves the iterator to the next position.\n\tNext()\n}\n\ntype vector struct {\n\tcount int\n\t\/\/ height of the tree structure, defined to be 0 when root is a leaf.\n\theight uint\n\troot node\n\ttail []interface{}\n}\n\n\/\/ Empty is an empty Vector.\nvar Empty Vector = &vector{}\n\n\/\/ node is a node in the vector tree. It is always of the size nodeSize.\ntype node *[nodeSize]interface{}\n\nfunc newNode() node {\n\treturn node(&[nodeSize]interface{}{})\n}\n\nfunc clone(n node) node {\n\ta := *n\n\treturn node(&a)\n}\n\nfunc nodeFromSlice(s []interface{}) node {\n\tvar n [nodeSize]interface{}\n\tcopy(n[:], s)\n\treturn &n\n}\n\n\/\/ Count returns the number of elements in a Vector.\nfunc (v *vector) Len() int {\n\treturn v.count\n}\n\n\/\/ treeSize returns the number of elements stored in the tree (as opposed to the\n\/\/ tail).\nfunc (v *vector) treeSize() int {\n\tif v.count < tailMaxLen {\n\t\treturn 0\n\t}\n\treturn ((v.count - 1) >> chunkBits) << chunkBits\n}\n\nfunc (v *vector) Index(i int) (interface{}, bool) {\n\tif i < 0 || i >= v.count {\n\t\treturn nil, false\n\t}\n\n\t\/\/ The following is very similar to sliceFor, but is implemented separately\n\t\/\/ to avoid unncessary copying.\n\tif i >= v.treeSize() {\n\t\treturn v.tail[i&chunkMask], true\n\t}\n\tn := v.root\n\tfor shift := v.height * chunkBits; shift > 0; shift -= chunkBits {\n\t\tn = n[(i>>shift)&chunkMask].(node)\n\t}\n\treturn n[i&chunkMask], true\n}\n\n\/\/ sliceFor returns the slice where the i-th element is stored. The index must\n\/\/ be in bound.\nfunc (v *vector) sliceFor(i int) []interface{} {\n\tif i >= v.treeSize() {\n\t\treturn v.tail\n\t}\n\tn := v.root\n\tfor shift := v.height * chunkBits; shift > 0; shift -= chunkBits {\n\t\tn = n[(i>>shift)&chunkMask].(node)\n\t}\n\treturn n[:]\n}\n\nfunc (v *vector) Assoc(i int, val interface{}) Vector {\n\tif i < 0 || i > v.count {\n\t\treturn nil\n\t} else if i == v.count {\n\t\treturn v.Cons(val)\n\t}\n\tif i >= v.treeSize() {\n\t\tnewTail := append([]interface{}(nil), v.tail...)\n\t\tnewTail[i&chunkMask] = val\n\t\treturn &vector{v.count, v.height, v.root, newTail}\n\t}\n\treturn &vector{v.count, v.height, doAssoc(v.height, v.root, i, val), v.tail}\n}\n\n\/\/ doAssoc returns an almost identical tree, with the i-th element replaced by\n\/\/ val.\nfunc doAssoc(height uint, n node, i int, val interface{}) node {\n\tm := clone(n)\n\tif height == 0 {\n\t\tm[i&chunkMask] = val\n\t} else {\n\t\tsub := (i >> (height * chunkBits)) & chunkMask\n\t\tm[sub] = doAssoc(height-1, m[sub].(node), i, val)\n\t}\n\treturn m\n}\n\nfunc (v *vector) Cons(val interface{}) Vector {\n\t\/\/ Room in tail?\n\tif v.count-v.treeSize() < tailMaxLen {\n\t\tnewTail := make([]interface{}, len(v.tail)+1)\n\t\tcopy(newTail, v.tail)\n\t\tnewTail[len(v.tail)] = val\n\t\treturn &vector{v.count + 1, v.height, v.root, newTail}\n\t}\n\t\/\/ Full tail; push into tree.\n\ttailNode := nodeFromSlice(v.tail)\n\tnewHeight := v.height\n\tvar newRoot node\n\t\/\/ Overflow root?\n\tif (v.count >> chunkBits) > (1 << (v.height * chunkBits)) {\n\t\tnewRoot = newNode()\n\t\tnewRoot[0] = v.root\n\t\tnewRoot[1] = newPath(v.height, tailNode)\n\t\tnewHeight++\n\t} else {\n\t\tnewRoot = v.pushTail(v.height, v.root, tailNode)\n\t}\n\treturn &vector{v.count + 1, newHeight, newRoot, []interface{}{val}}\n}\n\n\/\/ pushTail returns a tree with tail appended.\nfunc (v *vector) pushTail(height uint, n node, tail node) node {\n\tif height == 0 {\n\t\treturn tail\n\t}\n\tidx := ((v.count - 1) >> (height * chunkBits)) & chunkMask\n\tm := clone(n)\n\tchild := n[idx]\n\tif child == nil {\n\t\tm[idx] = newPath(height-1, tail)\n\t} else {\n\t\tm[idx] = v.pushTail(height-1, child.(node), tail)\n\t}\n\treturn m\n}\n\n\/\/ newPath creates a left-branching tree of specified height and leaf.\nfunc newPath(height uint, leaf node) node {\n\tif height == 0 {\n\t\treturn leaf\n\t}\n\tret := newNode()\n\tret[0] = newPath(height-1, leaf)\n\treturn ret\n}\n\nfunc (v *vector) Pop() Vector {\n\tswitch v.count {\n\tcase 0:\n\t\treturn nil\n\tcase 1:\n\t\treturn Empty\n\t}\n\tif v.count-v.treeSize() > 1 {\n\t\tnewTail := make([]interface{}, len(v.tail)-1)\n\t\tcopy(newTail, v.tail)\n\t\treturn &vector{v.count - 1, v.height, v.root, newTail}\n\t}\n\tnewTail := v.sliceFor(v.count - 2)\n\tnewRoot := v.popTail(v.height, v.root)\n\tnewHeight := v.height\n\tif v.height > 0 && newRoot[1] == nil {\n\t\tnewRoot = newRoot[0].(node)\n\t\tnewHeight--\n\t}\n\treturn &vector{v.count - 1, newHeight, newRoot, newTail}\n}\n\n\/\/ popTail returns a new tree with the last leaf removed.\nfunc (v *vector) popTail(level uint, n node) node {\n\tidx := ((v.count - 2) >> (level * chunkBits)) & chunkMask\n\tif level > 1 {\n\t\tnewChild := v.popTail(level-1, n[idx].(node))\n\t\tif newChild == nil && idx == 0 {\n\t\t\treturn nil\n\t\t}\n\t\tm := clone(n)\n\t\tif newChild == nil {\n\t\t\t\/\/ This is needed since `m[idx] = newChild` would store an\n\t\t\t\/\/ interface{} with a non-nil type part, which is non-nil\n\t\t\tm[idx] = nil\n\t\t} else {\n\t\t\tm[idx] = newChild\n\t\t}\n\t\treturn m\n\t} else if idx == 0 {\n\t\treturn nil\n\t} else {\n\t\tm := clone(n)\n\t\tm[idx] = nil\n\t\treturn m\n\t}\n}\n\nfunc (v *vector) SubVector(begin, end int) Vector {\n\tif begin < 0 || begin > end || end > v.count {\n\t\treturn nil\n\t}\n\treturn &subVector{v, begin, end}\n}\n\nfunc (v *vector) Iterator() Iterator {\n\treturn newIterator(v)\n}\n\nfunc (v *vector) MarshalJSON() ([]byte, error) {\n\treturn marshalJSON(v.Iterator())\n}\n\ntype subVector struct {\n\tv *vector\n\tbegin int\n\tend int\n}\n\nfunc (s *subVector) Len() int {\n\treturn s.end - s.begin\n}\n\nfunc (s *subVector) Index(i int) (interface{}, bool) {\n\tif i < 0 || s.begin+i >= s.end {\n\t\treturn nil, false\n\t}\n\treturn s.v.Index(s.begin + i)\n}\n\nfunc (s *subVector) Assoc(i int, val interface{}) Vector {\n\tif i < 0 || s.begin+i > s.end {\n\t\treturn nil\n\t} else if s.begin+i == s.end {\n\t\treturn s.Cons(val)\n\t}\n\treturn s.v.Assoc(s.begin+i, val).SubVector(s.begin, s.end)\n}\n\nfunc (s *subVector) Cons(val interface{}) Vector {\n\treturn s.v.Assoc(s.end, val).SubVector(s.begin, s.end+1)\n}\n\nfunc (s *subVector) Pop() Vector {\n\tswitch s.Len() {\n\tcase 0:\n\t\treturn nil\n\tcase 1:\n\t\treturn Empty\n\tdefault:\n\t\treturn s.v.SubVector(s.begin, s.end-1)\n\t}\n}\n\nfunc (s *subVector) SubVector(i, j int) Vector {\n\treturn s.v.SubVector(s.begin+i, s.begin+j)\n}\n\nfunc (s *subVector) Iterator() Iterator {\n\treturn newIteratorWithRange(s.v, s.begin, s.end)\n}\n\nfunc (s *subVector) MarshalJSON() ([]byte, error) {\n\treturn marshalJSON(s.Iterator())\n}\n\ntype iterator struct {\n\tv *vector\n\ttreeSize int\n\tindex int\n\tend int\n\tpath []pathEntry\n}\n\ntype pathEntry struct {\n\tnode node\n\tindex int\n}\n\nfunc (e pathEntry) current() interface{} {\n\treturn e.node[e.index]\n}\n\nfunc newIterator(v *vector) *iterator {\n\treturn newIteratorWithRange(v, 0, v.Len())\n}\n\nfunc newIteratorWithRange(v *vector, begin, end int) *iterator {\n\tit := &iterator{v, v.treeSize(), begin, end, nil}\n\tif it.index >= it.treeSize {\n\t\treturn it\n\t}\n\t\/\/ Find the node for begin, remembering all nodes along the path.\n\tn := v.root\n\tfor shift := v.height * chunkBits; shift > 0; shift -= chunkBits {\n\t\tidx := (begin >> shift) & chunkMask\n\t\tit.path = append(it.path, pathEntry{n, idx})\n\t\tn = n[idx].(node)\n\t}\n\tit.path = append(it.path, pathEntry{n, begin & chunkMask})\n\treturn it\n}\n\nfunc (it *iterator) Elem() interface{} {\n\tif it.index >= it.treeSize {\n\t\treturn it.v.tail[it.index-it.treeSize]\n\t}\n\treturn it.path[len(it.path)-1].current()\n}\n\nfunc (it *iterator) HasElem() bool {\n\treturn it.index < it.end\n}\n\nfunc (it *iterator) Next() {\n\tif it.index+1 >= it.treeSize {\n\t\t\/\/ Next element is in tail. Just increment the index.\n\t\tit.index++\n\t\treturn\n\t}\n\t\/\/ Find the deepest level that can be advanced.\n\tvar i int\n\tfor i = len(it.path) - 1; i >= 0; i-- {\n\t\te := it.path[i]\n\t\tif e.index+1 < len(e.node) {\n\t\t\tbreak\n\t\t}\n\t}\n\tif i == -1 {\n\t\tpanic(\"cannot advance; vector iterator bug\")\n\t}\n\t\/\/ Advance on this node, and re-populate all deeper levels.\n\tit.path[i].index++\n\tfor i++; i < len(it.path); i++ {\n\t\tit.path[i] = pathEntry{it.path[i-1].current().(node), 0}\n\t}\n\tit.index++\n}\n\ntype marshalError struct {\n\tindex int\n\tcause error\n}\n\nfunc (err *marshalError) Error() string {\n\treturn fmt.Sprintf(\"element %d: %s\", err.index, err.cause)\n}\n\nfunc marshalJSON(it Iterator) ([]byte, error) {\n\tvar buf bytes.Buffer\n\tbuf.WriteByte('[')\n\tindex := 0\n\tfor ; it.HasElem(); it.Next() {\n\t\tif index > 0 {\n\t\t\tbuf.WriteByte(',')\n\t\t}\n\t\telemBytes, err := json.Marshal(it.Elem())\n\t\tif err != nil {\n\t\t\treturn nil, &marshalError{index, err}\n\t\t}\n\t\tbuf.Write(elemBytes)\n\t\tindex++\n\t}\n\tbuf.WriteByte(']')\n\treturn buf.Bytes(), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n)\n\nfunc resourceAwsVpcDhcpOptions() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsVpcDhcpOptionsCreate,\n\t\tRead: resourceAwsVpcDhcpOptionsRead,\n\t\tUpdate: resourceAwsVpcDhcpOptionsUpdate,\n\t\tDelete: resourceAwsVpcDhcpOptionsDelete,\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: schema.ImportStatePassthrough,\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"domain_name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"domain_name_servers\": &schema.Schema{\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t},\n\n\t\t\t\"ntp_servers\": &schema.Schema{\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t},\n\n\t\t\t\"netbios_node_type\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"netbios_name_servers\": &schema.Schema{\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t},\n\n\t\t\t\"tags\": &schema.Schema{\n\t\t\t\tType: schema.TypeMap,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceAwsVpcDhcpOptionsCreate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ec2conn\n\n\tsetDHCPOption := func(key string) *ec2.NewDhcpConfiguration {\n\t\tlog.Printf(\"[DEBUG] Setting DHCP option %s...\", key)\n\t\ttfKey := strings.Replace(key, \"-\", \"_\", -1)\n\n\t\tvalue, ok := d.GetOk(tfKey)\n\t\tif !ok {\n\t\t\treturn nil\n\t\t}\n\n\t\tif v, ok := value.(string); ok {\n\t\t\treturn &ec2.NewDhcpConfiguration{\n\t\t\t\tKey: aws.String(key),\n\t\t\t\tValues: []*string{\n\t\t\t\t\taws.String(v),\n\t\t\t\t},\n\t\t\t}\n\t\t}\n\n\t\tif v, ok := value.([]interface{}); ok {\n\t\t\tvar s []*string\n\t\t\tfor _, attr := range v {\n\t\t\t\ts = append(s, aws.String(attr.(string)))\n\t\t\t}\n\n\t\t\treturn &ec2.NewDhcpConfiguration{\n\t\t\t\tKey: aws.String(key),\n\t\t\t\tValues: s,\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tcreateOpts := &ec2.CreateDhcpOptionsInput{\n\t\tDhcpConfigurations: []*ec2.NewDhcpConfiguration{\n\t\t\tsetDHCPOption(\"domain-name\"),\n\t\t\tsetDHCPOption(\"domain-name-servers\"),\n\t\t\tsetDHCPOption(\"ntp-servers\"),\n\t\t\tsetDHCPOption(\"netbios-node-type\"),\n\t\t\tsetDHCPOption(\"netbios-name-servers\"),\n\t\t},\n\t}\n\n\tresp, err := conn.CreateDhcpOptions(createOpts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating DHCP Options Set: %s\", err)\n\t}\n\n\tdos := resp.DhcpOptions\n\td.SetId(*dos.DhcpOptionsId)\n\tlog.Printf(\"[INFO] DHCP Options Set ID: %s\", d.Id())\n\n\t\/\/ Wait for the DHCP Options to become available\n\tlog.Printf(\"[DEBUG] Waiting for DHCP Options (%s) to become available\", d.Id())\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"pending\"},\n\t\tTarget: []string{\"created\"},\n\t\tRefresh: resourceDHCPOptionsStateRefreshFunc(conn, d.Id()),\n\t\tTimeout: 5 * time.Minute,\n\t}\n\tif _, err := stateConf.WaitForState(); err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"Error waiting for DHCP Options (%s) to become available: %s\",\n\t\t\td.Id(), err)\n\t}\n\n\treturn resourceAwsVpcDhcpOptionsUpdate(d, meta)\n}\n\nfunc resourceAwsVpcDhcpOptionsRead(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ec2conn\n\treq := &ec2.DescribeDhcpOptionsInput{\n\t\tDhcpOptionsIds: []*string{\n\t\t\taws.String(d.Id()),\n\t\t},\n\t}\n\n\tresp, err := conn.DescribeDhcpOptions(req)\n\tif err != nil {\n\t\tec2err, ok := err.(awserr.Error)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Error retrieving DHCP Options: %s\", err.Error())\n\t\t}\n\n\t\tif ec2err.Code() == \"InvalidDhcpOptionID.NotFound\" {\n\t\t\tlog.Printf(\"[WARN] DHCP Options (%s) not found, removing from state\", d.Id())\n\t\t\td.SetId(\"\")\n\t\t\treturn nil\n\t\t}\n\n\t\treturn fmt.Errorf(\"Error retrieving DHCP Options: %s\", err.Error())\n\t}\n\n\tif len(resp.DhcpOptions) == 0 {\n\t\treturn nil\n\t}\n\n\topts := resp.DhcpOptions[0]\n\td.Set(\"tags\", tagsToMap(opts.Tags))\n\n\tfor _, cfg := range opts.DhcpConfigurations {\n\t\ttfKey := strings.Replace(*cfg.Key, \"-\", \"_\", -1)\n\n\t\tif _, ok := d.Get(tfKey).(string); ok {\n\t\t\td.Set(tfKey, cfg.Values[0].Value)\n\t\t} else {\n\t\t\tvalues := make([]string, 0, len(cfg.Values))\n\t\t\tfor _, v := range cfg.Values {\n\t\t\t\tvalues = append(values, *v.Value)\n\t\t\t}\n\n\t\t\td.Set(tfKey, values)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc resourceAwsVpcDhcpOptionsUpdate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ec2conn\n\treturn setTags(conn, d)\n}\n\nfunc resourceAwsVpcDhcpOptionsDelete(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ec2conn\n\n\treturn resource.Retry(3*time.Minute, func() *resource.RetryError {\n\t\tlog.Printf(\"[INFO] Deleting DHCP Options ID %s...\", d.Id())\n\t\t_, err := conn.DeleteDhcpOptions(&ec2.DeleteDhcpOptionsInput{\n\t\t\tDhcpOptionsId: aws.String(d.Id()),\n\t\t})\n\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tlog.Printf(\"[WARN] %s\", err)\n\n\t\tec2err, ok := err.(awserr.Error)\n\t\tif !ok {\n\t\t\treturn resource.RetryableError(err)\n\t\t}\n\n\t\tswitch ec2err.Code() {\n\t\tcase \"InvalidDhcpOptionsID.NotFound\":\n\t\t\treturn nil\n\t\tcase \"DependencyViolation\":\n\t\t\t\/\/ If it is a dependency violation, we want to disassociate\n\t\t\t\/\/ all VPCs using the given DHCP Options ID, and retry deleting.\n\t\t\tvpcs, err2 := findVPCsByDHCPOptionsID(conn, d.Id())\n\t\t\tif err2 != nil {\n\t\t\t\tlog.Printf(\"[ERROR] %s\", err2)\n\t\t\t\treturn resource.RetryableError(err2)\n\t\t\t}\n\n\t\t\tfor _, vpc := range vpcs {\n\t\t\t\tlog.Printf(\"[INFO] Disassociating DHCP Options Set %s from VPC %s...\", d.Id(), *vpc.VpcId)\n\t\t\t\tif _, err := conn.AssociateDhcpOptions(&ec2.AssociateDhcpOptionsInput{\n\t\t\t\t\tDhcpOptionsId: aws.String(\"default\"),\n\t\t\t\t\tVpcId: vpc.VpcId,\n\t\t\t\t}); err != nil {\n\t\t\t\t\treturn resource.RetryableError(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn resource.RetryableError(err)\n\t\tdefault:\n\t\t\treturn resource.NonRetryableError(err)\n\t\t}\n\t})\n}\n\nfunc findVPCsByDHCPOptionsID(conn *ec2.EC2, id string) ([]*ec2.Vpc, error) {\n\treq := &ec2.DescribeVpcsInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t&ec2.Filter{\n\t\t\t\tName: aws.String(\"dhcp-options-id\"),\n\t\t\t\tValues: []*string{\n\t\t\t\t\taws.String(id),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tresp, err := conn.DescribeVpcs(req)\n\tif err != nil {\n\t\tif ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == \"InvalidVpcID.NotFound\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\n\treturn resp.Vpcs, nil\n}\n\nfunc resourceDHCPOptionsStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc {\n\treturn func() (interface{}, string, error) {\n\t\tDescribeDhcpOpts := &ec2.DescribeDhcpOptionsInput{\n\t\t\tDhcpOptionsIds: []*string{\n\t\t\t\taws.String(id),\n\t\t\t},\n\t\t}\n\n\t\tresp, err := conn.DescribeDhcpOptions(DescribeDhcpOpts)\n\t\tif err != nil {\n\t\t\tif ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == \"InvalidDhcpOptionsID.NotFound\" {\n\t\t\t\tresp = nil\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Error on DHCPOptionsStateRefresh: %s\", err)\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t}\n\n\t\tif resp == nil {\n\t\t\t\/\/ Sometimes AWS just has consistency issues and doesn't see\n\t\t\t\/\/ our instance yet. Return an empty state.\n\t\t\treturn nil, \"\", nil\n\t\t}\n\n\t\tdos := resp.DhcpOptions[0]\n\t\treturn dos, \"created\", nil\n\t}\n}\n<commit_msg>Update not found error message for DescribeDhcpOptions.<commit_after>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n)\n\nfunc resourceAwsVpcDhcpOptions() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsVpcDhcpOptionsCreate,\n\t\tRead: resourceAwsVpcDhcpOptionsRead,\n\t\tUpdate: resourceAwsVpcDhcpOptionsUpdate,\n\t\tDelete: resourceAwsVpcDhcpOptionsDelete,\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: schema.ImportStatePassthrough,\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"domain_name\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"domain_name_servers\": &schema.Schema{\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t},\n\n\t\t\t\"ntp_servers\": &schema.Schema{\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t},\n\n\t\t\t\"netbios_node_type\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"netbios_name_servers\": &schema.Schema{\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t},\n\n\t\t\t\"tags\": &schema.Schema{\n\t\t\t\tType: schema.TypeMap,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceAwsVpcDhcpOptionsCreate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ec2conn\n\n\tsetDHCPOption := func(key string) *ec2.NewDhcpConfiguration {\n\t\tlog.Printf(\"[DEBUG] Setting DHCP option %s...\", key)\n\t\ttfKey := strings.Replace(key, \"-\", \"_\", -1)\n\n\t\tvalue, ok := d.GetOk(tfKey)\n\t\tif !ok {\n\t\t\treturn nil\n\t\t}\n\n\t\tif v, ok := value.(string); ok {\n\t\t\treturn &ec2.NewDhcpConfiguration{\n\t\t\t\tKey: aws.String(key),\n\t\t\t\tValues: []*string{\n\t\t\t\t\taws.String(v),\n\t\t\t\t},\n\t\t\t}\n\t\t}\n\n\t\tif v, ok := value.([]interface{}); ok {\n\t\t\tvar s []*string\n\t\t\tfor _, attr := range v {\n\t\t\t\ts = append(s, aws.String(attr.(string)))\n\t\t\t}\n\n\t\t\treturn &ec2.NewDhcpConfiguration{\n\t\t\t\tKey: aws.String(key),\n\t\t\t\tValues: s,\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tcreateOpts := &ec2.CreateDhcpOptionsInput{\n\t\tDhcpConfigurations: []*ec2.NewDhcpConfiguration{\n\t\t\tsetDHCPOption(\"domain-name\"),\n\t\t\tsetDHCPOption(\"domain-name-servers\"),\n\t\t\tsetDHCPOption(\"ntp-servers\"),\n\t\t\tsetDHCPOption(\"netbios-node-type\"),\n\t\t\tsetDHCPOption(\"netbios-name-servers\"),\n\t\t},\n\t}\n\n\tresp, err := conn.CreateDhcpOptions(createOpts)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error creating DHCP Options Set: %s\", err)\n\t}\n\n\tdos := resp.DhcpOptions\n\td.SetId(*dos.DhcpOptionsId)\n\tlog.Printf(\"[INFO] DHCP Options Set ID: %s\", d.Id())\n\n\t\/\/ Wait for the DHCP Options to become available\n\tlog.Printf(\"[DEBUG] Waiting for DHCP Options (%s) to become available\", d.Id())\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"pending\"},\n\t\tTarget: []string{\"created\"},\n\t\tRefresh: resourceDHCPOptionsStateRefreshFunc(conn, d.Id()),\n\t\tTimeout: 5 * time.Minute,\n\t}\n\tif _, err := stateConf.WaitForState(); err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"Error waiting for DHCP Options (%s) to become available: %s\",\n\t\t\td.Id(), err)\n\t}\n\n\treturn resourceAwsVpcDhcpOptionsUpdate(d, meta)\n}\n\nfunc resourceAwsVpcDhcpOptionsRead(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ec2conn\n\treq := &ec2.DescribeDhcpOptionsInput{\n\t\tDhcpOptionsIds: []*string{\n\t\t\taws.String(d.Id()),\n\t\t},\n\t}\n\n\tresp, err := conn.DescribeDhcpOptions(req)\n\tif err != nil {\n\t\tec2err, ok := err.(awserr.Error)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Error retrieving DHCP Options: %s\", err.Error())\n\t\t}\n\n\t\tif ec2err.Code() == \"InvalidDhcpOptionID.NotFound\" {\n\t\t\tlog.Printf(\"[WARN] DHCP Options (%s) not found, removing from state\", d.Id())\n\t\t\td.SetId(\"\")\n\t\t\treturn nil\n\t\t}\n\n\t\treturn fmt.Errorf(\"Error retrieving DHCP Options: %s\", err.Error())\n\t}\n\n\tif len(resp.DhcpOptions) == 0 {\n\t\treturn nil\n\t}\n\n\topts := resp.DhcpOptions[0]\n\td.Set(\"tags\", tagsToMap(opts.Tags))\n\n\tfor _, cfg := range opts.DhcpConfigurations {\n\t\ttfKey := strings.Replace(*cfg.Key, \"-\", \"_\", -1)\n\n\t\tif _, ok := d.Get(tfKey).(string); ok {\n\t\t\td.Set(tfKey, cfg.Values[0].Value)\n\t\t} else {\n\t\t\tvalues := make([]string, 0, len(cfg.Values))\n\t\t\tfor _, v := range cfg.Values {\n\t\t\t\tvalues = append(values, *v.Value)\n\t\t\t}\n\n\t\t\td.Set(tfKey, values)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc resourceAwsVpcDhcpOptionsUpdate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ec2conn\n\treturn setTags(conn, d)\n}\n\nfunc resourceAwsVpcDhcpOptionsDelete(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ec2conn\n\n\treturn resource.Retry(3*time.Minute, func() *resource.RetryError {\n\t\tlog.Printf(\"[INFO] Deleting DHCP Options ID %s...\", d.Id())\n\t\t_, err := conn.DeleteDhcpOptions(&ec2.DeleteDhcpOptionsInput{\n\t\t\tDhcpOptionsId: aws.String(d.Id()),\n\t\t})\n\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tlog.Printf(\"[WARN] %s\", err)\n\n\t\tec2err, ok := err.(awserr.Error)\n\t\tif !ok {\n\t\t\treturn resource.RetryableError(err)\n\t\t}\n\n\t\tswitch ec2err.Code() {\n\t\tcase \"InvalidDhcpOptionsID.NotFound\":\n\t\t\treturn nil\n\t\tcase \"DependencyViolation\":\n\t\t\t\/\/ If it is a dependency violation, we want to disassociate\n\t\t\t\/\/ all VPCs using the given DHCP Options ID, and retry deleting.\n\t\t\tvpcs, err2 := findVPCsByDHCPOptionsID(conn, d.Id())\n\t\t\tif err2 != nil {\n\t\t\t\tlog.Printf(\"[ERROR] %s\", err2)\n\t\t\t\treturn resource.RetryableError(err2)\n\t\t\t}\n\n\t\t\tfor _, vpc := range vpcs {\n\t\t\t\tlog.Printf(\"[INFO] Disassociating DHCP Options Set %s from VPC %s...\", d.Id(), *vpc.VpcId)\n\t\t\t\tif _, err := conn.AssociateDhcpOptions(&ec2.AssociateDhcpOptionsInput{\n\t\t\t\t\tDhcpOptionsId: aws.String(\"default\"),\n\t\t\t\t\tVpcId: vpc.VpcId,\n\t\t\t\t}); err != nil {\n\t\t\t\t\treturn resource.RetryableError(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn resource.RetryableError(err)\n\t\tdefault:\n\t\t\treturn resource.NonRetryableError(err)\n\t\t}\n\t})\n}\n\nfunc findVPCsByDHCPOptionsID(conn *ec2.EC2, id string) ([]*ec2.Vpc, error) {\n\treq := &ec2.DescribeVpcsInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t&ec2.Filter{\n\t\t\t\tName: aws.String(\"dhcp-options-id\"),\n\t\t\t\tValues: []*string{\n\t\t\t\t\taws.String(id),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tresp, err := conn.DescribeVpcs(req)\n\tif err != nil {\n\t\tif ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == \"InvalidVpcID.NotFound\" {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\n\treturn resp.Vpcs, nil\n}\n\nfunc resourceDHCPOptionsStateRefreshFunc(conn *ec2.EC2, id string) resource.StateRefreshFunc {\n\treturn func() (interface{}, string, error) {\n\t\tDescribeDhcpOpts := &ec2.DescribeDhcpOptionsInput{\n\t\t\tDhcpOptionsIds: []*string{\n\t\t\t\taws.String(id),\n\t\t\t},\n\t\t}\n\n\t\tresp, err := conn.DescribeDhcpOptions(DescribeDhcpOpts)\n\t\tif err != nil {\n\t\t\tif ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == \"InvalidDhcpOptionID.NotFound\" {\n\t\t\t\tresp = nil\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"Error on DHCPOptionsStateRefresh: %s\", err)\n\t\t\t\treturn nil, \"\", err\n\t\t\t}\n\t\t}\n\n\t\tif resp == nil {\n\t\t\t\/\/ Sometimes AWS just has consistency issues and doesn't see\n\t\t\t\/\/ our instance yet. Return an empty state.\n\t\t\treturn nil, \"\", nil\n\t\t}\n\n\t\tdos := resp.DhcpOptions[0]\n\t\treturn dos, \"created\", nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package manta\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"path\"\n\n\tuuid \"github.com\/hashicorp\/go-uuid\"\n\t\"github.com\/hashicorp\/terraform\/state\"\n\t\"github.com\/hashicorp\/terraform\/state\/remote\"\n\ttritonErrors \"github.com\/joyent\/triton-go\/errors\"\n\t\"github.com\/joyent\/triton-go\/storage\"\n)\n\nconst (\n\tmantaDefaultRootStore = \"\/stor\"\n\tlockFileName = \"tflock\"\n)\n\ntype RemoteClient struct {\n\tstorageClient *storage.StorageClient\n\tdirectoryName string\n\tkeyName string\n\tstatePath string\n}\n\nfunc (c *RemoteClient) Get() (*remote.Payload, error) {\n\toutput, err := c.storageClient.Objects().Get(context.Background(), &storage.GetObjectInput{\n\t\tObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, c.keyName),\n\t})\n\tif err != nil {\n\t\tif tritonErrors.IsResourceNotFound(err) {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\tdefer output.ObjectReader.Close()\n\n\tbuf := bytes.NewBuffer(nil)\n\tif _, err := io.Copy(buf, output.ObjectReader); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to read remote state: %s\", err)\n\t}\n\n\tpayload := &remote.Payload{\n\t\tData: buf.Bytes(),\n\t}\n\n\t\/\/ If there was no data, then return nil\n\tif len(payload.Data) == 0 {\n\t\treturn nil, nil\n\t}\n\n\treturn payload, nil\n\n}\n\nfunc (c *RemoteClient) Put(data []byte) error {\n\tcontentType := \"application\/json\"\n\tcontentLength := int64(len(data))\n\n\tparams := &storage.PutObjectInput{\n\t\tContentType: contentType,\n\t\tContentLength: uint64(contentLength),\n\t\tObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, c.keyName),\n\t\tObjectReader: bytes.NewReader(data),\n\t}\n\n\tlog.Printf(\"[DEBUG] Uploading remote state to Manta: %#v\", params)\n\terr := c.storageClient.Objects().Put(context.Background(), params)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *RemoteClient) Delete() error {\n\terr := c.storageClient.Objects().Delete(context.Background(), &storage.DeleteObjectInput{\n\t\tObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, c.keyName),\n\t})\n\n\treturn err\n}\n\nfunc (c *RemoteClient) Lock(info *state.LockInfo) (string, error) {\n\t\/\/At Joyent, we want to make sure that the State directory exists before we interact with it\n\t\/\/We don't expect users to have to create it in advance\n\t\/\/The order of operations of Backend State as follows:\n\t\/\/ * Get - if this doesn't exist then we continue as though it's new\n\t\/\/ * Lock - we make sure that the state directory exists as it's the entrance to writing to Manta\n\t\/\/ * Put - put the state up there\n\t\/\/ * Unlock - unlock the directory\n\t\/\/We can always guarantee that the user can put their state in the specified location because of this\n\terr := c.storageClient.Dir().Put(context.Background(), &storage.PutDirectoryInput{\n\t\tDirectoryName: path.Join(mantaDefaultRootStore, c.directoryName),\n\t})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/firstly we want to check that a lock doesn't already exist\n\tlockErr := &state.LockError{}\n\tlockInfo, err := c.getLockInfo()\n\tif err != nil {\n\t\tif !tritonErrors.IsResourceNotFound(err) {\n\t\t\tlockErr.Err = fmt.Errorf(\"failed to retrieve lock info: %s\", err)\n\t\t\treturn \"\", lockErr\n\t\t}\n\t}\n\n\tif lockInfo != nil {\n\t\tlockErr := &state.LockError{\n\t\t\tErr: fmt.Errorf(\"A lock is already acquired\"),\n\t\t\tInfo: lockInfo,\n\t\t}\n\t\treturn \"\", lockErr\n\t}\n\n\tinfo.Path = path.Join(c.directoryName, lockFileName)\n\n\tif info.ID == \"\" {\n\t\tlockID, err := uuid.GenerateUUID()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tinfo.ID = lockID\n\t}\n\n\tdata := info.Marshal()\n\n\tcontentType := \"application\/json\"\n\tcontentLength := int64(len(data))\n\n\tparams := &storage.PutObjectInput{\n\t\tContentType: contentType,\n\t\tContentLength: uint64(contentLength),\n\t\tObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, lockFileName),\n\t\tObjectReader: bytes.NewReader(data),\n\t}\n\n\tlog.Printf(\"[DEBUG] Creating manta state lock: %#v\", params)\n\terr = c.storageClient.Objects().Put(context.Background(), params)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn info.ID, nil\n}\n\nfunc (c *RemoteClient) Unlock(id string) error {\n\tlockErr := &state.LockError{}\n\n\tlockInfo, err := c.getLockInfo()\n\tif err != nil {\n\t\tlockErr.Err = fmt.Errorf(\"failed to retrieve lock info: %s\", err)\n\t\treturn lockErr\n\t}\n\tlockErr.Info = lockInfo\n\n\tif lockInfo.ID != id {\n\t\tlockErr.Err = fmt.Errorf(\"lock id %q does not match existing lock\", id)\n\t\treturn lockErr\n\t}\n\n\terr = c.storageClient.Objects().Delete(context.Background(), &storage.DeleteObjectInput{\n\t\tObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, lockFileName),\n\t})\n\n\treturn err\n}\n\nfunc (c *RemoteClient) getLockInfo() (*state.LockInfo, error) {\n\toutput, err := c.storageClient.Objects().Get(context.Background(), &storage.GetObjectInput{\n\t\tObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, lockFileName),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer output.ObjectReader.Close()\n\n\tbuf := bytes.NewBuffer(nil)\n\tif _, err := io.Copy(buf, output.ObjectReader); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to read lock info: %s\", err)\n\t}\n\n\tlockInfo := &state.LockInfo{}\n\terr = json.Unmarshal(buf.Bytes(), lockInfo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn lockInfo, nil\n}\n<commit_msg>backend\/manta: Add support for manta backend tiered path structure<commit_after>package manta\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"path\"\n\n\tuuid \"github.com\/hashicorp\/go-uuid\"\n\t\"github.com\/hashicorp\/terraform\/state\"\n\t\"github.com\/hashicorp\/terraform\/state\/remote\"\n\ttritonErrors \"github.com\/joyent\/triton-go\/errors\"\n\t\"github.com\/joyent\/triton-go\/storage\"\n)\n\nconst (\n\tmantaDefaultRootStore = \"\/stor\"\n\tlockFileName = \"tflock\"\n)\n\ntype RemoteClient struct {\n\tstorageClient *storage.StorageClient\n\tdirectoryName string\n\tkeyName string\n\tstatePath string\n}\n\nfunc (c *RemoteClient) Get() (*remote.Payload, error) {\n\toutput, err := c.storageClient.Objects().Get(context.Background(), &storage.GetObjectInput{\n\t\tObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, c.keyName),\n\t})\n\tif err != nil {\n\t\tif tritonErrors.IsResourceNotFound(err) {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\tdefer output.ObjectReader.Close()\n\n\tbuf := bytes.NewBuffer(nil)\n\tif _, err := io.Copy(buf, output.ObjectReader); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to read remote state: %s\", err)\n\t}\n\n\tpayload := &remote.Payload{\n\t\tData: buf.Bytes(),\n\t}\n\n\t\/\/ If there was no data, then return nil\n\tif len(payload.Data) == 0 {\n\t\treturn nil, nil\n\t}\n\n\treturn payload, nil\n\n}\n\nfunc (c *RemoteClient) Put(data []byte) error {\n\tcontentType := \"application\/json\"\n\tcontentLength := int64(len(data))\n\n\tparams := &storage.PutObjectInput{\n\t\tContentType: contentType,\n\t\tContentLength: uint64(contentLength),\n\t\tObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, c.keyName),\n\t\tObjectReader: bytes.NewReader(data),\n\t}\n\n\tlog.Printf(\"[DEBUG] Uploading remote state to Manta: %#v\", params)\n\terr := c.storageClient.Objects().Put(context.Background(), params)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *RemoteClient) Delete() error {\n\terr := c.storageClient.Objects().Delete(context.Background(), &storage.DeleteObjectInput{\n\t\tObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, c.keyName),\n\t})\n\n\treturn err\n}\n\nfunc (c *RemoteClient) Lock(info *state.LockInfo) (string, error) {\n\t\/\/At Joyent, we want to make sure that the State directory exists before we interact with it\n\t\/\/We don't expect users to have to create it in advance\n\t\/\/The order of operations of Backend State as follows:\n\t\/\/ * Get - if this doesn't exist then we continue as though it's new\n\t\/\/ * Lock - we make sure that the state directory exists as it's the entrance to writing to Manta\n\t\/\/ * Put - put the state up there\n\t\/\/ * Unlock - unlock the directory\n\t\/\/We can always guarantee that the user can put their state in the specified location because of this\n\terr := c.storageClient.Dir().Put(context.Background(), &storage.PutDirectoryInput{\n\t\tDirectoryName: path.Join(mantaDefaultRootStore, c.directoryName),\n\t})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/firstly we want to check that a lock doesn't already exist\n\tlockErr := &state.LockError{}\n\tlockInfo, err := c.getLockInfo()\n\tif err != nil {\n\t\tif !tritonErrors.IsResourceNotFound(err) {\n\t\t\tlockErr.Err = fmt.Errorf(\"failed to retrieve lock info: %s\", err)\n\t\t\treturn \"\", lockErr\n\t\t}\n\t}\n\n\tif lockInfo != nil {\n\t\tlockErr := &state.LockError{\n\t\t\tErr: fmt.Errorf(\"A lock is already acquired\"),\n\t\t\tInfo: lockInfo,\n\t\t}\n\t\treturn \"\", lockErr\n\t}\n\n\tinfo.Path = path.Join(c.directoryName, lockFileName)\n\n\tif info.ID == \"\" {\n\t\tlockID, err := uuid.GenerateUUID()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tinfo.ID = lockID\n\t}\n\n\tdata := info.Marshal()\n\n\tcontentType := \"application\/json\"\n\tcontentLength := int64(len(data))\n\n\tparams := &storage.PutObjectInput{\n\t\tContentType: contentType,\n\t\tContentLength: uint64(contentLength),\n\t\tObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, lockFileName),\n\t\tObjectReader: bytes.NewReader(data),\n\t\tForceInsert: true,\n\t}\n\n\tlog.Printf(\"[DEBUG] Creating manta state lock: %#v\", params)\n\terr = c.storageClient.Objects().Put(context.Background(), params)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn info.ID, nil\n}\n\nfunc (c *RemoteClient) Unlock(id string) error {\n\tlockErr := &state.LockError{}\n\n\tlockInfo, err := c.getLockInfo()\n\tif err != nil {\n\t\tlockErr.Err = fmt.Errorf(\"failed to retrieve lock info: %s\", err)\n\t\treturn lockErr\n\t}\n\tlockErr.Info = lockInfo\n\n\tif lockInfo.ID != id {\n\t\tlockErr.Err = fmt.Errorf(\"lock id %q does not match existing lock\", id)\n\t\treturn lockErr\n\t}\n\n\terr = c.storageClient.Objects().Delete(context.Background(), &storage.DeleteObjectInput{\n\t\tObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, lockFileName),\n\t})\n\n\treturn err\n}\n\nfunc (c *RemoteClient) getLockInfo() (*state.LockInfo, error) {\n\toutput, err := c.storageClient.Objects().Get(context.Background(), &storage.GetObjectInput{\n\t\tObjectPath: path.Join(mantaDefaultRootStore, c.directoryName, lockFileName),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer output.ObjectReader.Close()\n\n\tbuf := bytes.NewBuffer(nil)\n\tif _, err := io.Copy(buf, output.ObjectReader); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to read lock info: %s\", err)\n\t}\n\n\tlockInfo := &state.LockInfo{}\n\terr = json.Unmarshal(buf.Bytes(), lockInfo)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn lockInfo, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2021 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage managedfields\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\n\t\"sigs.k8s.io\/structured-merge-diff\/v4\/fieldpath\"\n\t\"sigs.k8s.io\/structured-merge-diff\/v4\/typed\"\n\n\t\"k8s.io\/apimachinery\/pkg\/api\/meta\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n)\n\n\/\/ ExtractInto extracts the applied configuration state from object for fieldManager\n\/\/ into applyConfiguration. If no managed fields are found for the given fieldManager,\n\/\/ no error is returned, but applyConfiguration is left unpopulated. It is possible\n\/\/ that no managed fields were found for the fieldManager because other field managers\n\/\/ have taken ownership of all the fields previously owned by the fieldManager. It is\n\/\/ also possible the fieldManager never owned fields.\n\/\/\n\/\/ The provided object MUST bo a root resource object since subresource objects\n\/\/ do not contain their own managed fields. For example, an autoscaling.Scale\n\/\/ object read from a \"scale\" subresource does not have any managed fields and so\n\/\/ cannot be used as the object.\n\/\/\n\/\/ If the fields of a subresource are a subset of the fields of the root object,\n\/\/ and their field paths and types are exactly the same, then ExtractInto can be\n\/\/ called with the root resource as the object and the subresource as the\n\/\/ applyConfiguration. This works for \"status\", obviously, because status is\n\/\/ represented by the exact same object as the root resource. This this does NOT\n\/\/ work, for example, with the \"scale\" subresources of Deployment, ReplicaSet and\n\/\/ StatefulSet. While the spec.replicas, status.replicas fields are in the same\n\/\/ exact field path locations as they are in autoscaling.Scale, the selector\n\/\/ fields are in different locations, and are a different type.\nfunc ExtractInto(object runtime.Object, objectType typed.ParseableType, fieldManager string, applyConfiguration interface{}, subresource string) error {\n\ttypedObj, err := toTyped(object, objectType)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error converting obj to typed: %w\", err)\n\t}\n\n\taccessor, err := meta.Accessor(object)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error accessing metadata: %w\", err)\n\t}\n\tfieldsEntry, ok := findManagedFields(accessor, fieldManager, subresource)\n\tif !ok {\n\t\treturn nil\n\t}\n\tfieldset := &fieldpath.Set{}\n\terr = fieldset.FromJSON(bytes.NewReader(fieldsEntry.FieldsV1.Raw))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error marshalling FieldsV1 to JSON: %w\", err)\n\t}\n\n\tu := typedObj.ExtractItems(fieldset.Leaves()).AsValue().Unstructured()\n\tm, ok := u.(map[string]interface{})\n\tif !ok {\n\t\treturn fmt.Errorf(\"unable to convert managed fields for %s to unstructured, expected map, got %T\", fieldManager, u)\n\t}\n\n\t\/\/ set the type meta manually if it doesn't exist to avoid missing kind errors\n\t\/\/ when decoding from unstructured JSON\n\tif _, ok := m[\"kind\"]; !ok {\n\t\tm[\"kind\"] = object.GetObjectKind().GroupVersionKind().Kind\n\t\tm[\"apiVersion\"] = object.GetObjectKind().GroupVersionKind().GroupVersion().String()\n\t}\n\tif err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, applyConfiguration); err != nil {\n\t\treturn fmt.Errorf(\"error extracting into obj from unstructured: %w\", err)\n\t}\n\treturn nil\n}\n\nfunc findManagedFields(accessor metav1.Object, fieldManager string, subresource string) (metav1.ManagedFieldsEntry, bool) {\n\tobjManagedFields := accessor.GetManagedFields()\n\tfor _, mf := range objManagedFields {\n\t\tif mf.Manager == fieldManager && mf.Operation == metav1.ManagedFieldsOperationApply && mf.Subresource == subresource {\n\t\t\treturn mf, true\n\t\t}\n\t}\n\treturn metav1.ManagedFieldsEntry{}, false\n}\n\nfunc toTyped(obj runtime.Object, objectType typed.ParseableType) (*typed.TypedValue, error) {\n\tswitch o := obj.(type) {\n\tcase *unstructured.Unstructured:\n\t\treturn objectType.FromUnstructured(o.Object)\n\tdefault:\n\t\treturn objectType.FromStructured(o)\n\t}\n}\n<commit_msg>fix extract_test<commit_after>\/*\nCopyright 2021 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage managedfields\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\n\t\"sigs.k8s.io\/structured-merge-diff\/v4\/fieldpath\"\n\t\"sigs.k8s.io\/structured-merge-diff\/v4\/typed\"\n\n\t\"k8s.io\/apimachinery\/pkg\/api\/meta\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\/unstructured\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n)\n\n\/\/ ExtractInto extracts the applied configuration state from object for fieldManager\n\/\/ into applyConfiguration. If no managed fields are found for the given fieldManager,\n\/\/ no error is returned, but applyConfiguration is left unpopulated. It is possible\n\/\/ that no managed fields were found for the fieldManager because other field managers\n\/\/ have taken ownership of all the fields previously owned by the fieldManager. It is\n\/\/ also possible the fieldManager never owned fields.\n\/\/\n\/\/ The provided object MUST bo a root resource object since subresource objects\n\/\/ do not contain their own managed fields. For example, an autoscaling.Scale\n\/\/ object read from a \"scale\" subresource does not have any managed fields and so\n\/\/ cannot be used as the object.\n\/\/\n\/\/ If the fields of a subresource are a subset of the fields of the root object,\n\/\/ and their field paths and types are exactly the same, then ExtractInto can be\n\/\/ called with the root resource as the object and the subresource as the\n\/\/ applyConfiguration. This works for \"status\", obviously, because status is\n\/\/ represented by the exact same object as the root resource. This this does NOT\n\/\/ work, for example, with the \"scale\" subresources of Deployment, ReplicaSet and\n\/\/ StatefulSet. While the spec.replicas, status.replicas fields are in the same\n\/\/ exact field path locations as they are in autoscaling.Scale, the selector\n\/\/ fields are in different locations, and are a different type.\nfunc ExtractInto(object runtime.Object, objectType typed.ParseableType, fieldManager string, applyConfiguration interface{}, subresource string) error {\n\ttypedObj, err := toTyped(object, objectType)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error converting obj to typed: %w\", err)\n\t}\n\n\taccessor, err := meta.Accessor(object)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error accessing metadata: %w\", err)\n\t}\n\tfieldsEntry, ok := findManagedFields(accessor, fieldManager, subresource)\n\tif !ok {\n\t\treturn nil\n\t}\n\tfieldset := &fieldpath.Set{}\n\terr = fieldset.FromJSON(bytes.NewReader(fieldsEntry.FieldsV1.Raw))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error marshalling FieldsV1 to JSON: %w\", err)\n\t}\n\n\tu := typedObj.ExtractItems(fieldset.Leaves()).AsValue().Unstructured()\n\tm, ok := u.(map[string]interface{})\n\tif !ok {\n\t\treturn fmt.Errorf(\"unable to convert managed fields for %s to unstructured, expected map, got %T\", fieldManager, u)\n\t}\n\n\t\/\/ set the type meta manually if it doesn't exist to avoid missing kind errors\n\t\/\/ when decoding from unstructured JSON\n\tif _, ok := m[\"kind\"]; !ok && object.GetObjectKind().GroupVersionKind().Kind != \"\" {\n\t\tm[\"kind\"] = object.GetObjectKind().GroupVersionKind().Kind\n\t\tm[\"apiVersion\"] = object.GetObjectKind().GroupVersionKind().GroupVersion().String()\n\t}\n\tif err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, applyConfiguration); err != nil {\n\t\treturn fmt.Errorf(\"error extracting into obj from unstructured: %w\", err)\n\t}\n\treturn nil\n}\n\nfunc findManagedFields(accessor metav1.Object, fieldManager string, subresource string) (metav1.ManagedFieldsEntry, bool) {\n\tobjManagedFields := accessor.GetManagedFields()\n\tfor _, mf := range objManagedFields {\n\t\tif mf.Manager == fieldManager && mf.Operation == metav1.ManagedFieldsOperationApply && mf.Subresource == subresource {\n\t\t\treturn mf, true\n\t\t}\n\t}\n\treturn metav1.ManagedFieldsEntry{}, false\n}\n\nfunc toTyped(obj runtime.Object, objectType typed.ParseableType) (*typed.TypedValue, error) {\n\tswitch o := obj.(type) {\n\tcase *unstructured.Unstructured:\n\t\treturn objectType.FromUnstructured(o.Object)\n\tdefault:\n\t\treturn objectType.FromStructured(o)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage syscall_test\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"syscall\"\n\t\"testing\"\n)\n\nfunc TestWin32finddata(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"go-build\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create temp directory: %v\", err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tpath := filepath.Join(dir, \"long_name.and_extension\")\n\tf, err := os.Create(path)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create %v: %v\", path, err)\n\t}\n\tf.Close()\n\n\ttype X struct {\n\t\tfd syscall.Win32finddata1\n\t\tgot byte\n\t\tpad [10]byte \/\/ to protect ourselves\n\n\t}\n\tvar want byte = 2 \/\/ it is unlikely to have this character in the filename\n\tx := X{got: want}\n\n\th, err := syscall.FindFirstFile1(syscall.StringToUTF16Ptr(path), &(x.fd))\n\tif err != nil {\n\t\tt.Fatalf(\"FindFirstFile failed: %v\", err)\n\t}\n\terr = syscall.FindClose(h)\n\tif err != nil {\n\t\tt.Fatalf(\"FindClose failed: %v\", err)\n\t}\n\n\tif x.got != want {\n\t\tt.Fatalf(\"memory corruption: want=%d got=%d\", want, x.got)\n\t}\n}\n<commit_msg>syscall: fix windows build<commit_after>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage syscall_test\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"syscall\"\n\t\"testing\"\n)\n\nfunc TestWin32finddata(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"go-build\")\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create temp directory: %v\", err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\tpath := filepath.Join(dir, \"long_name.and_extension\")\n\tf, err := os.Create(path)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to create %v: %v\", path, err)\n\t}\n\tf.Close()\n\n\ttype X struct {\n\t\tfd syscall.Win32finddata\n\t\tgot byte\n\t\tpad [10]byte \/\/ to protect ourselves\n\n\t}\n\tvar want byte = 2 \/\/ it is unlikely to have this character in the filename\n\tx := X{got: want}\n\n\th, err := syscall.FindFirstFile(syscall.StringToUTF16Ptr(path), &(x.fd))\n\tif err != nil {\n\t\tt.Fatalf(\"FindFirstFile failed: %v\", err)\n\t}\n\terr = syscall.FindClose(h)\n\tif err != nil {\n\t\tt.Fatalf(\"FindClose failed: %v\", err)\n\t}\n\n\tif x.got != want {\n\t\tt.Fatalf(\"memory corruption: want=%d got=%d\", want, x.got)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package ofutils\n\nimport (\n\t\"crypto\/md5\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/astaxie\/beego\/orm\"\n)\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n}\n\nvar letterRunes = []rune(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\")\n\nfunc RandString(n int) string {\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letterRunes[rand.Intn(len(letterRunes))]\n\t}\n\treturn string(b)\n}\n\nfunc MD5(text string) string {\n\thasher := md5.New()\n\thasher.Write([]byte(text))\n\treturn hex.EncodeToString(hasher.Sum(nil))\n}\n\nfunc ToString(str interface{}) string {\n\tif str == nil {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"%v\", str)\n}\n\nfunc ToInt(val interface{}) int {\n\tif val == nil {\n\t\treturn 0\n\t}\n\ts, ok := val.(string)\n\tif ok {\n\t\ti, err := strconv.Atoi(s)\n\t\tif err != nil {\n\t\t\treturn 0\n\t\t}\n\t\treturn i\n\t}\n\ti, ok := val.(int)\n\tif ok {\n\t\treturn i\n\t} else {\n\t\treturn 0\n\t}\n}\n\nfunc GetWeekFirstDay() string {\n\tweek := time.Now().Weekday().String()\n\tvar day time.Duration\n\tswitch week {\n\tcase \"Sunday\":\n\t\tday = 6\n\t\tbreak\n\tcase \"Monday\":\n\t\tday = 0\n\t\tbreak\n\tcase \"Tuesday\":\n\t\tday = 1\n\t\tbreak\n\tcase \"Wednesday\":\n\t\tday = 2\n\t\tbreak\n\tcase \"Thursday\":\n\t\tday = 3\n\t\tbreak\n\tcase \"Friday\":\n\t\tday = 4\n\t\tbreak\n\tcase \"Saturday\":\n\t\tday = 5\n\t\tbreak\n\t}\n\tdate := time.Now().Add(-day * 24 * time.Hour)\n\treturn date.Format(\"2006-01-02\")\n}\n\ntype ByKey struct {\n\tKey string\n\tList []orm.Params\n}\n\nfunc (a ByKey) Len() int {\n\treturn len(a.List)\n}\nfunc (a ByKey) Swap(i, j int) {\n\ta.List[i], a.List[j] = a.List[j], a.List[i]\n}\nfunc (a ByKey) Less(i, j int) bool {\n\treturn ToInt(a.List[i][a.Key]) > ToInt(a.List[j][a.Key])\n}\n\nfunc Sort(list []orm.Params, key string) []orm.Params {\n\tbyKey := ByKey{List: list, Key: key}\n\tsort.Sort(byKey)\n\treturn byKey.List\n}\n\nfunc SubString(str string, begin, length int) (substr string) {\n\t\/\/ 将字符串的转换成[]rune\n\trs := []rune(str)\n\tlth := len(rs)\n\n\t\/\/ 简单的越界判断\n\tif begin < 0 {\n\t\tbegin = 0\n\t}\n\tif begin >= lth {\n\t\tbegin = lth\n\t}\n\tend := begin + length\n\tif end > lth {\n\t\tend = lth\n\t}\n\t\/\/ 返回子串\n\treturn string(rs[begin:end])\n}\n\nfunc ToFloat(str interface{}) float64 {\n\tif str == nil {\n\t\treturn 0\n\t}\n\ttf, err := strconv.ParseFloat(ToString(str), 64)\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn tf\n}\nfunc GetEncryptPhone(phone interface{}) string {\n\ttemp := ToString(phone)\n\tif len(temp) == 11 {\n\t\tstart := SubString(temp, 0, 3)\n\t\tend := SubString(temp, 7, 11)\n\t\ttemp = start + \"****\" + end\n\t}\n\treturn temp\n}\nfunc Exist(filename string) bool {\n\t_, err := os.Stat(filename)\n\treturn err == nil || os.IsExist(err)\n}\n<commit_msg>update<commit_after>package ofutils\n\nimport (\n\t\"crypto\/md5\"\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/astaxie\/beego\/orm\"\n)\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n}\n\nvar letterRunes = []rune(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\")\n\nfunc RandString(n int) string {\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letterRunes[rand.Intn(len(letterRunes))]\n\t}\n\treturn string(b)\n}\n\nfunc MD5(text string) string {\n\thasher := md5.New()\n\thasher.Write([]byte(text))\n\treturn hex.EncodeToString(hasher.Sum(nil))\n}\n\nfunc ToString(str interface{}) string {\n\tif str == nil {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"%v\", str)\n}\n\nfunc ToInt(val interface{}) int {\n\tif val == nil {\n\t\treturn 0\n\t}\n\ts, ok := val.(string)\n\tif ok {\n\t\ti, err := strconv.Atoi(s)\n\t\tif err != nil {\n\t\t\treturn 0\n\t\t}\n\t\treturn i\n\t}\n\ti, ok := val.(int)\n\tif ok {\n\t\treturn i\n\t} else {\n\t\treturn 0\n\t}\n}\n\nfunc GetWeekFirstDay() string {\n\tweek := time.Now().Weekday().String()\n\tvar day time.Duration\n\tswitch week {\n\tcase \"Sunday\":\n\t\tday = 6\n\t\tbreak\n\tcase \"Monday\":\n\t\tday = 0\n\t\tbreak\n\tcase \"Tuesday\":\n\t\tday = 1\n\t\tbreak\n\tcase \"Wednesday\":\n\t\tday = 2\n\t\tbreak\n\tcase \"Thursday\":\n\t\tday = 3\n\t\tbreak\n\tcase \"Friday\":\n\t\tday = 4\n\t\tbreak\n\tcase \"Saturday\":\n\t\tday = 5\n\t\tbreak\n\t}\n\tdate := time.Now().Add(-day * 24 * time.Hour)\n\treturn date.Format(\"2006-01-02\")\n}\n\ntype ByKey struct {\n\tKey string\n\tList []orm.Params\n}\n\nfunc (a ByKey) Len() int {\n\treturn len(a.List)\n}\nfunc (a ByKey) Swap(i, j int) {\n\ta.List[i], a.List[j] = a.List[j], a.List[i]\n}\nfunc (a ByKey) Less(i, j int) bool {\n\treturn ToInt(a.List[i][a.Key]) > ToInt(a.List[j][a.Key])\n}\n\nfunc Sort(list []orm.Params, key string) []orm.Params {\n\tbyKey := ByKey{List: list, Key: key}\n\tsort.Sort(byKey)\n\treturn byKey.List\n}\n\nfunc SubString(str string, begin, length int) (substr string) {\n\t\/\/ 将字符串的转换成[]rune\n\trs := []rune(str)\n\tlth := len(rs)\n\n\t\/\/ 简单的越界判断\n\tif begin < 0 {\n\t\tbegin = 0\n\t}\n\tif begin >= lth {\n\t\tbegin = lth\n\t}\n\tend := begin + length\n\tif end > lth {\n\t\tend = lth\n\t}\n\t\/\/ 返回子串\n\treturn string(rs[begin:end])\n}\n\nfunc ToFloat(str interface{}) float64 {\n\tif str == nil {\n\t\treturn 0\n\t}\n\ttf, err := strconv.ParseFloat(ToString(str), 64)\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn tf\n}\nfunc ToJson(datas interface{}) string {\n\tjsonString, _ := json.Marshal(datas)\n\treturn string(jsonString)\n}\nfunc GetEncryptPhone(phone interface{}) string {\n\ttemp := ToString(phone)\n\tif len(temp) == 11 {\n\t\tstart := SubString(temp, 0, 3)\n\t\tend := SubString(temp, 7, 11)\n\t\ttemp = start + \"****\" + end\n\t}\n\treturn temp\n}\nfunc Exist(filename string) bool {\n\t_, err := os.Stat(filename)\n\treturn err == nil || os.IsExist(err)\n}\n<|endoftext|>"} {"text":"<commit_before>package csrf\n\nimport \"net\/http\"\n\n\/\/ Option describes a functional option for configuring the the CSRF handler.\ntype Option func(*csrf)\n\n\/\/ MaxAge sets the maximum age (in seconds) of a CSRF token's underlying cookie.\n\/\/ Defaults to 12 hours.\nfunc MaxAge(age int) Option {\n\treturn func(cs *csrf) {\n\t\tcs.opts.MaxAge = age\n\t}\n}\n\n\/\/ Domain sets the cookie domain. Defaults to the current domain of the request\n\/\/ only (recommended).\n\/\/\n\/\/ This should be a hostname and not a URL. If set, the domain is treated as\n\/\/ being prefixed with a '.' - e.g. \"example.com\" becomes \".example.com\" and\n\/\/ matches \"www.example.com\" and \"secure.example.com\".\nfunc Domain(domain string) Option {\n\treturn func(cs *csrf) {\n\t\tcs.opts.Domain = domain\n\t}\n}\n\n\/\/ Path sets the cookie path. Defaults to the path the cookie was issued from\n\/\/ (recommended).\n\/\/\n\/\/ This instructs clients to only respond with cookie for that path and its\n\/\/ subpaths - i.e. a cookie issued from \"\/register\" would be included in requests\n\/\/ to \"\/register\/step2\" and \"\/register\/submit\".\nfunc Path(p string) Option {\n\treturn func(cs *csrf) {\n\t\tcs.opts.Path = p\n\t}\n}\n\n\/\/ Secure sets the 'Secure' flag on the cookie. Defaults to true (recommended).\n\/\/ Set this to 'false' in your development environment otherwise the cookie won't\n\/\/ be sent over an insecure channel. Setting this via the presence of a 'DEV'\n\/\/ environmental variable is a good way of making sure this won't make it to a\n\/\/ production environment.\nfunc Secure(s bool) Option {\n\treturn func(cs *csrf) {\n\t\tcs.opts.Secure = s\n\t}\n}\n\n\/\/ HttpOnly sets the 'HttpOnly' flag on the cookie. Defaults to true (recommended).\nfunc HttpOnly(h bool) Option {\n\treturn func(cs *csrf) {\n\t\t\/\/ Note that the function and field names match the case of the\n\t\t\/\/ related http.Cookie field instead of the \"correct\" HTTPOnly name\n\t\t\/\/ that golint suggests.\n\t\tcs.opts.HttpOnly = h\n\t}\n}\n\n\/\/ ErrorHandler allows you to change the handler called when CSRF request\n\/\/ processing encounters an invalid token or request. A typical use would be to\n\/\/ provide a handler that returns a static HTML file with a HTTP 403 status. By\n\/\/ default a HTTP 404 status and a plain text CSRF failure reason are served.\n\/\/\n\/\/ Note that a custom error handler can also access the csrf.Failure(r)\n\/\/ function to retrieve the CSRF validation reason from the request context.\nfunc ErrorHandler(h http.Handler) Option {\n\treturn func(cs *csrf) {\n\t\tcs.opts.ErrorHandler = h\n\t}\n}\n\n\/\/ RequestHeader allows you to change the request header the CSRF middleware\n\/\/ inspects. The default is X-CSRF-Token.\nfunc RequestHeader(header string) Option {\n\treturn func(cs *csrf) {\n\t\tcs.opts.RequestHeader = header\n\t}\n}\n\n\/\/ FieldName allows you to change the name value of the hidden <input> field\n\/\/ generated by csrf.FormField. The default is {{ .csrfToken }}\nfunc FieldName(name string) Option {\n\treturn func(cs *csrf) {\n\t\tcs.opts.FieldName = name\n\t}\n}\n\n\/\/ CookieName changes the name of the CSRF cookie issued to clients.\n\/\/\n\/\/ Note that cookie names should not contain whitespace, commas, semicolons,\n\/\/ backslashes or control characters as per RFC6265.\nfunc CookieName(name string) Option {\n\treturn func(cs *csrf) {\n\t\tcs.opts.CookieName = name\n\t}\n}\n\n\/\/ setStore sets the store used by the CSRF middleware.\n\/\/ Note: this is private (for now) to allow for internal API changes.\nfunc setStore(s store) Option {\n\treturn func(cs *csrf) {\n\t\tcs.st = s\n\t}\n}\n\n\/\/ parseOptions parses the supplied options functions and returns a configured\n\/\/ csrf handler.\nfunc parseOptions(h http.Handler, opts ...Option) *csrf {\n\t\/\/ Set the handler to call after processing.\n\tcs := &csrf{\n\t\th: h,\n\t}\n\n\t\/\/ Default to true. See Secure & HttpOnly function comments for rationale.\n\t\/\/ Set here to allow package users to override the default.\n\tcs.opts.Secure = true\n\tcs.opts.HttpOnly = true\n\n\t\/\/ Range over each options function and apply it\n\t\/\/ to our csrf type to configure it. Options functions are\n\t\/\/ applied in order, with any conflicting options overriding\n\t\/\/ earlier calls.\n\tfor _, option := range opts {\n\t\toption(cs)\n\t}\n\n\treturn cs\n}\n<commit_msg>[docs] Corrected status code in docs (404 => 403).<commit_after>package csrf\n\nimport \"net\/http\"\n\n\/\/ Option describes a functional option for configuring the the CSRF handler.\ntype Option func(*csrf)\n\n\/\/ MaxAge sets the maximum age (in seconds) of a CSRF token's underlying cookie.\n\/\/ Defaults to 12 hours.\nfunc MaxAge(age int) Option {\n\treturn func(cs *csrf) {\n\t\tcs.opts.MaxAge = age\n\t}\n}\n\n\/\/ Domain sets the cookie domain. Defaults to the current domain of the request\n\/\/ only (recommended).\n\/\/\n\/\/ This should be a hostname and not a URL. If set, the domain is treated as\n\/\/ being prefixed with a '.' - e.g. \"example.com\" becomes \".example.com\" and\n\/\/ matches \"www.example.com\" and \"secure.example.com\".\nfunc Domain(domain string) Option {\n\treturn func(cs *csrf) {\n\t\tcs.opts.Domain = domain\n\t}\n}\n\n\/\/ Path sets the cookie path. Defaults to the path the cookie was issued from\n\/\/ (recommended).\n\/\/\n\/\/ This instructs clients to only respond with cookie for that path and its\n\/\/ subpaths - i.e. a cookie issued from \"\/register\" would be included in requests\n\/\/ to \"\/register\/step2\" and \"\/register\/submit\".\nfunc Path(p string) Option {\n\treturn func(cs *csrf) {\n\t\tcs.opts.Path = p\n\t}\n}\n\n\/\/ Secure sets the 'Secure' flag on the cookie. Defaults to true (recommended).\n\/\/ Set this to 'false' in your development environment otherwise the cookie won't\n\/\/ be sent over an insecure channel. Setting this via the presence of a 'DEV'\n\/\/ environmental variable is a good way of making sure this won't make it to a\n\/\/ production environment.\nfunc Secure(s bool) Option {\n\treturn func(cs *csrf) {\n\t\tcs.opts.Secure = s\n\t}\n}\n\n\/\/ HttpOnly sets the 'HttpOnly' flag on the cookie. Defaults to true (recommended).\nfunc HttpOnly(h bool) Option {\n\treturn func(cs *csrf) {\n\t\t\/\/ Note that the function and field names match the case of the\n\t\t\/\/ related http.Cookie field instead of the \"correct\" HTTPOnly name\n\t\t\/\/ that golint suggests.\n\t\tcs.opts.HttpOnly = h\n\t}\n}\n\n\/\/ ErrorHandler allows you to change the handler called when CSRF request\n\/\/ processing encounters an invalid token or request. A typical use would be to\n\/\/ provide a handler that returns a static HTML file with a HTTP 403 status. By\n\/\/ default a HTTP 403 status and a plain text CSRF failure reason are served.\n\/\/\n\/\/ Note that a custom error handler can also access the csrf.Failure(r)\n\/\/ function to retrieve the CSRF validation reason from the request context.\nfunc ErrorHandler(h http.Handler) Option {\n\treturn func(cs *csrf) {\n\t\tcs.opts.ErrorHandler = h\n\t}\n}\n\n\/\/ RequestHeader allows you to change the request header the CSRF middleware\n\/\/ inspects. The default is X-CSRF-Token.\nfunc RequestHeader(header string) Option {\n\treturn func(cs *csrf) {\n\t\tcs.opts.RequestHeader = header\n\t}\n}\n\n\/\/ FieldName allows you to change the name value of the hidden <input> field\n\/\/ generated by csrf.FormField. The default is {{ .csrfToken }}\nfunc FieldName(name string) Option {\n\treturn func(cs *csrf) {\n\t\tcs.opts.FieldName = name\n\t}\n}\n\n\/\/ CookieName changes the name of the CSRF cookie issued to clients.\n\/\/\n\/\/ Note that cookie names should not contain whitespace, commas, semicolons,\n\/\/ backslashes or control characters as per RFC6265.\nfunc CookieName(name string) Option {\n\treturn func(cs *csrf) {\n\t\tcs.opts.CookieName = name\n\t}\n}\n\n\/\/ setStore sets the store used by the CSRF middleware.\n\/\/ Note: this is private (for now) to allow for internal API changes.\nfunc setStore(s store) Option {\n\treturn func(cs *csrf) {\n\t\tcs.st = s\n\t}\n}\n\n\/\/ parseOptions parses the supplied options functions and returns a configured\n\/\/ csrf handler.\nfunc parseOptions(h http.Handler, opts ...Option) *csrf {\n\t\/\/ Set the handler to call after processing.\n\tcs := &csrf{\n\t\th: h,\n\t}\n\n\t\/\/ Default to true. See Secure & HttpOnly function comments for rationale.\n\t\/\/ Set here to allow package users to override the default.\n\tcs.opts.Secure = true\n\tcs.opts.HttpOnly = true\n\n\t\/\/ Range over each options function and apply it\n\t\/\/ to our csrf type to configure it. Options functions are\n\t\/\/ applied in order, with any conflicting options overriding\n\t\/\/ earlier calls.\n\tfor _, option := range opts {\n\t\toption(cs)\n\t}\n\n\treturn cs\n}\n<|endoftext|>"} {"text":"<commit_before>package daemon\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com\/noironetworks\/cilium-net\/common\"\n\t\"github.com\/noironetworks\/cilium-net\/common\/types\"\n\n\tconsulAPI \"github.com\/noironetworks\/cilium-net\/Godeps\/_workspace\/src\/github.com\/hashicorp\/consul\/api\"\n)\n\nfunc (d *Daemon) initializeFreeID() error {\n\tpath := common.LastFreeIDKeyPath\n\tfreeIDByte, err := json.Marshal(common.FirstFreeID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsession, _, err := d.consul.Session().CreateNoChecks(nil, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp := &consulAPI.KVPair{Key: path, Value: freeIDByte}\n\tlockPair := &consulAPI.KVPair{Key: common.GetLockPath(path), Session: session}\n\tlog.Debug(\"Trying to acquire lock for free ID...\")\n\tacq, _, err := d.consul.KV().Acquire(lockPair, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !acq {\n\t\treturn nil\n\t}\n\tdefer d.consul.KV().Release(lockPair, nil)\n\n\tlog.Debug(\"Trying to acquire free ID...\")\n\tk, _, err := d.consul.KV().Get(path, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif k != nil {\n\t\t\/\/ FreeID already set\n\t\treturn nil\n\t}\n\tlog.Info(\"Trying to put free ID...\")\n\t_, err = d.consul.KV().Put(p, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Info(\"Free ID successfully initialized\")\n\n\treturn nil\n}\n\nfunc (d *Daemon) updateIDRef(secCtxLabels *types.SecCtxLabel) error {\n\tvar err error\n\tlblKey := &consulAPI.KVPair{Key: common.IDKeyPath + strconv.Itoa(secCtxLabels.ID)}\n\tlblKey.Value, err = json.Marshal(secCtxLabels)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = d.consul.KV().Put(lblKey, nil)\n\treturn err\n}\n\n\/\/ gasNewID gets and sets a New ID.\nfunc (d *Daemon) gasNewID(labels *types.SecCtxLabel) error {\n\tfreeID, err := d.GetMaxID()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsetID2Label := func(lockPair *consulAPI.KVPair) error {\n\t\tdefer d.consul.KV().Release(lockPair, nil)\n\t\tlabels.ID = freeID\n\t\tif err := d.updateIDRef(labels); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn d.setMaxID(freeID + 1)\n\t}\n\n\tsession, _, err := d.consul.Session().CreateNoChecks(nil, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbeginning := freeID\n\tfor {\n\t\tlog.Debugf(\"Trying to aquire a new free ID %d\", freeID)\n\t\tpath := common.IDKeyPath + strconv.Itoa(freeID)\n\n\t\tlockPair := &consulAPI.KVPair{Key: common.GetLockPath(path), Session: session}\n\t\tacq, _, err := d.consul.KV().Acquire(lockPair, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif acq {\n\t\t\tlblKey, _, err := d.consul.KV().Get(path, nil)\n\t\t\tif err != nil {\n\t\t\t\td.consul.KV().Release(lockPair, nil)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif lblKey == nil {\n\t\t\t\treturn setID2Label(lockPair)\n\t\t\t}\n\t\t\tvar consulLabels types.SecCtxLabel\n\t\t\tif err := json.Unmarshal(lblKey.Value, &consulLabels); err != nil {\n\t\t\t\td.consul.KV().Release(lockPair, nil)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif consulLabels.RefCount == 0 {\n\t\t\t\tlog.Info(\"Recycling ID %d\", freeID)\n\t\t\t\treturn setID2Label(lockPair)\n\t\t\t}\n\t\t\td.consul.KV().Release(lockPair, nil)\n\t\t}\n\t\tfreeID++\n\t\tif freeID > common.MaxSetOfLabels {\n\t\t\tfreeID = common.FirstFreeID\n\t\t}\n\t\tif beginning == freeID {\n\t\t\treturn fmt.Errorf(\"Reached maximum set of labels available.\")\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (d *Daemon) lockPath(path string) (*consulAPI.Lock, <-chan struct{}, error) {\n\tlog.Debugf(\"Creating lock for %s\", path)\n\topts := &consulAPI.LockOptions{\n\t\tKey: common.GetLockPath(path),\n\t}\n\tlockKey, err := d.consul.LockOpts(opts)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tc, err := lockKey.Lock(nil)\n\tlog.Debugf(\"Locked for %s\", path)\n\treturn lockKey, c, err\n}\n\n\/\/ PutLabels stores to given labels in consul and returns the SecCtxLabels created for\n\/\/ the given labels.\nfunc (d *Daemon) PutLabels(labels types.Labels) (*types.SecCtxLabel, bool, error) {\n\tlog.Debugf(\"Putting labels %+v\", labels)\n\tisNew := false\n\n\t\/\/ Retrieve unique SHA256Sum for labels\n\tsha256Sum, err := labels.SHA256Sum()\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\tlblPath := common.LabelsKeyPath + sha256Sum\n\n\t\/\/ Lock that sha256Sum\n\tlockKey, locker, err := d.lockPath(lblPath)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\tif locker == nil {\n\t\treturn nil, false, fmt.Errorf(\"locker is nil\\n\")\n\t}\n\tdefer lockKey.Unlock()\n\n\t\/\/ After lock complete, get label's path\n\tpair, _, err := d.consul.KV().Get(lblPath, nil)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tvar secCtxLbls types.SecCtxLabel\n\tif pair == nil {\n\t\tpair = &consulAPI.KVPair{Key: lblPath}\n\t\tsecCtxLbls.Labels = labels\n\t\tsecCtxLbls.RefCount = 1\n\t\tisNew = true\n\t} else {\n\t\tif err := json.Unmarshal(pair.Value, &secCtxLbls); err != nil {\n\t\t\treturn nil, false, err\n\t\t}\n\t\t\/\/ If RefCount is 0 then we have to retrieve a new ID\n\t\tif secCtxLbls.RefCount == 0 {\n\t\t\tisNew = true\n\t\t}\n\t\tsecCtxLbls.RefCount++\n\t}\n\n\tif isNew {\n\t\tif err := d.gasNewID(&secCtxLbls); err != nil {\n\t\t\treturn nil, false, err\n\t\t}\n\t} else if err := d.updateIDRef(&secCtxLbls); err != nil {\n\t\treturn nil, false, err\n\t}\n\tlog.Debugf(\"Incrementing label %d ref-count to %d\\n\", secCtxLbls.ID, secCtxLbls.RefCount)\n\n\tsecCtxLblsByte, err := json.Marshal(secCtxLbls)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tpair.Value = secCtxLblsByte\n\t_, err = d.consul.KV().Put(pair, nil)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\treturn &secCtxLbls, isNew, nil\n}\n\n\/\/ GetLabels returns the SecCtxLabels that belongs to id.\nfunc (d *Daemon) GetLabels(id int) (*types.SecCtxLabel, error) {\n\tstrID := strconv.Itoa(id)\n\tpair, _, err := d.consul.KV().Get(common.IDKeyPath+strID, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif pair == nil {\n\t\treturn nil, nil\n\t}\n\tvar secCtxLabels types.SecCtxLabel\n\tif err := json.Unmarshal(pair.Value, &secCtxLabels); err != nil {\n\t\treturn nil, err\n\t}\n\tif secCtxLabels.RefCount == 0 {\n\t\treturn nil, nil\n\t}\n\treturn &secCtxLabels, nil\n}\n\n\/\/ DeleteLabelsByUUID deletes the SecCtxLabels belonging to id.\nfunc (d *Daemon) DeleteLabelsByUUID(id int) error {\n\tsecCtxLabels, err := d.GetLabels(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif secCtxLabels == nil {\n\t\treturn nil\n\t}\n\tsha256sum, err := secCtxLabels.Labels.SHA256Sum()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn d.DeleteLabelsBySHA256(sha256sum)\n}\n\n\/\/ DeleteLabelsBySHA256 deletes the SecCtxLabels that belong to the labels' sha256Sum.\nfunc (d *Daemon) DeleteLabelsBySHA256(sha256Sum string) error {\n\tif sha256Sum == \"\" {\n\t\treturn nil\n\t}\n\tlblPath := common.LabelsKeyPath + sha256Sum\n\t\/\/ Lock that sha256Sum\n\tlockKey, locker, err := d.lockPath(lblPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif locker == nil {\n\t\treturn fmt.Errorf(\"locker is nil\\n\")\n\t}\n\tdefer lockKey.Unlock()\n\n\t\/\/ After lock complete, get label's path\n\tpair, _, err := d.consul.KV().Get(lblPath, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar dbSecCtxLbls types.SecCtxLabel\n\tif pair == nil {\n\t\treturn nil\n\t}\n\tif err := json.Unmarshal(pair.Value, &dbSecCtxLbls); err != nil {\n\t\treturn err\n\t}\n\tif dbSecCtxLbls.RefCount > 0 {\n\t\tdbSecCtxLbls.RefCount--\n\t}\n\tif err := d.updateIDRef(&dbSecCtxLbls); err != nil {\n\t\treturn err\n\t}\n\tlog.Debugf(\"Decremented label %d ref-count to %d\\n\", dbSecCtxLbls.ID, dbSecCtxLbls.RefCount)\n\n\tsecCtxLblsByte, err := json.Marshal(dbSecCtxLbls)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpair.Value = secCtxLblsByte\n\t_, err = d.consul.KV().Put(pair, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ GetMaxID returns the maximum possible free UUID stored in consul.\nfunc (d *Daemon) GetMaxID() (int, error) {\n\tk, _, err := d.consul.KV().Get(common.LastFreeIDKeyPath, nil)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tif k == nil {\n\t\t\/\/ FreeID is empty? We should set it out!\n\t\tlog.Infof(\"Empty FreeID, setting it up with default value %d\", common.FirstFreeID)\n\t\tif err := d.initializeFreeID(); err != nil {\n\t\t\treturn -1, err\n\t\t}\n\t\tk, _, err = d.consul.KV().Get(common.LastFreeIDKeyPath, nil)\n\t\tif k == nil {\n\t\t\t\/\/ Something is really wrong\n\t\t\terrMsg := \"Unable to retrieve last free ID because the key is always empty\\n\"\n\t\t\tlog.Errorf(errMsg)\n\t\t\treturn -1, fmt.Errorf(errMsg)\n\t\t}\n\t}\n\tvar freeID int\n\tlog.Debugf(\"Retrieving max free ID %v\", k.Value)\n\tif err := json.Unmarshal(k.Value, &freeID); err != nil {\n\t\treturn -1, err\n\t}\n\treturn freeID, nil\n}\n\nfunc (d *Daemon) setMaxID(freeID int) error {\n\tk, _, err := d.consul.KV().Get(common.LastFreeIDKeyPath, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif k == nil {\n\t\t\/\/ FreeIDs is empty? We should set it out!\n\t\tif err := d.initializeFreeID(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tk, _, err = d.consul.KV().Get(common.LastFreeIDKeyPath, nil)\n\t\tif k == nil {\n\t\t\t\/\/ Something is really wrong\n\t\t\terrMsg := \"Unable to setting ID because the key is always empty\\n\"\n\t\t\tlog.Errorf(errMsg)\n\t\t\treturn fmt.Errorf(errMsg)\n\t\t}\n\t}\n\tk.Value, err = json.Marshal(freeID)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = d.consul.KV().Put(k, nil)\n\treturn err\n}\n<commit_msg>Deleting unreachable code in label's daemon<commit_after>package daemon\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com\/noironetworks\/cilium-net\/common\"\n\t\"github.com\/noironetworks\/cilium-net\/common\/types\"\n\n\tconsulAPI \"github.com\/noironetworks\/cilium-net\/Godeps\/_workspace\/src\/github.com\/hashicorp\/consul\/api\"\n)\n\nfunc (d *Daemon) initializeFreeID() error {\n\tpath := common.LastFreeIDKeyPath\n\tfreeIDByte, err := json.Marshal(common.FirstFreeID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsession, _, err := d.consul.Session().CreateNoChecks(nil, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp := &consulAPI.KVPair{Key: path, Value: freeIDByte}\n\tlockPair := &consulAPI.KVPair{Key: common.GetLockPath(path), Session: session}\n\tlog.Debug(\"Trying to acquire lock for free ID...\")\n\tacq, _, err := d.consul.KV().Acquire(lockPair, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !acq {\n\t\treturn nil\n\t}\n\tdefer d.consul.KV().Release(lockPair, nil)\n\n\tlog.Debug(\"Trying to acquire free ID...\")\n\tk, _, err := d.consul.KV().Get(path, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif k != nil {\n\t\t\/\/ FreeID already set\n\t\treturn nil\n\t}\n\tlog.Info(\"Trying to put free ID...\")\n\t_, err = d.consul.KV().Put(p, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Info(\"Free ID successfully initialized\")\n\n\treturn nil\n}\n\nfunc (d *Daemon) updateIDRef(secCtxLabels *types.SecCtxLabel) error {\n\tvar err error\n\tlblKey := &consulAPI.KVPair{Key: common.IDKeyPath + strconv.Itoa(secCtxLabels.ID)}\n\tlblKey.Value, err = json.Marshal(secCtxLabels)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = d.consul.KV().Put(lblKey, nil)\n\treturn err\n}\n\n\/\/ gasNewID gets and sets a New ID.\nfunc (d *Daemon) gasNewID(labels *types.SecCtxLabel) error {\n\tfreeID, err := d.GetMaxID()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsetID2Label := func(lockPair *consulAPI.KVPair) error {\n\t\tdefer d.consul.KV().Release(lockPair, nil)\n\t\tlabels.ID = freeID\n\t\tif err := d.updateIDRef(labels); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn d.setMaxID(freeID + 1)\n\t}\n\n\tsession, _, err := d.consul.Session().CreateNoChecks(nil, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbeginning := freeID\n\tfor {\n\t\tlog.Debugf(\"Trying to aquire a new free ID %d\", freeID)\n\t\tpath := common.IDKeyPath + strconv.Itoa(freeID)\n\n\t\tlockPair := &consulAPI.KVPair{Key: common.GetLockPath(path), Session: session}\n\t\tacq, _, err := d.consul.KV().Acquire(lockPair, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif acq {\n\t\t\tlblKey, _, err := d.consul.KV().Get(path, nil)\n\t\t\tif err != nil {\n\t\t\t\td.consul.KV().Release(lockPair, nil)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif lblKey == nil {\n\t\t\t\treturn setID2Label(lockPair)\n\t\t\t}\n\t\t\tvar consulLabels types.SecCtxLabel\n\t\t\tif err := json.Unmarshal(lblKey.Value, &consulLabels); err != nil {\n\t\t\t\td.consul.KV().Release(lockPair, nil)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif consulLabels.RefCount == 0 {\n\t\t\t\tlog.Info(\"Recycling ID %d\", freeID)\n\t\t\t\treturn setID2Label(lockPair)\n\t\t\t}\n\t\t\td.consul.KV().Release(lockPair, nil)\n\t\t}\n\t\tfreeID++\n\t\tif freeID > common.MaxSetOfLabels {\n\t\t\tfreeID = common.FirstFreeID\n\t\t}\n\t\tif beginning == freeID {\n\t\t\treturn fmt.Errorf(\"Reached maximum set of labels available.\")\n\t\t}\n\t}\n}\n\nfunc (d *Daemon) lockPath(path string) (*consulAPI.Lock, <-chan struct{}, error) {\n\tlog.Debugf(\"Creating lock for %s\", path)\n\topts := &consulAPI.LockOptions{\n\t\tKey: common.GetLockPath(path),\n\t}\n\tlockKey, err := d.consul.LockOpts(opts)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tc, err := lockKey.Lock(nil)\n\tlog.Debugf(\"Locked for %s\", path)\n\treturn lockKey, c, err\n}\n\n\/\/ PutLabels stores to given labels in consul and returns the SecCtxLabels created for\n\/\/ the given labels.\nfunc (d *Daemon) PutLabels(labels types.Labels) (*types.SecCtxLabel, bool, error) {\n\tlog.Debugf(\"Putting labels %+v\", labels)\n\tisNew := false\n\n\t\/\/ Retrieve unique SHA256Sum for labels\n\tsha256Sum, err := labels.SHA256Sum()\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\tlblPath := common.LabelsKeyPath + sha256Sum\n\n\t\/\/ Lock that sha256Sum\n\tlockKey, locker, err := d.lockPath(lblPath)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\tif locker == nil {\n\t\treturn nil, false, fmt.Errorf(\"locker is nil\\n\")\n\t}\n\tdefer lockKey.Unlock()\n\n\t\/\/ After lock complete, get label's path\n\tpair, _, err := d.consul.KV().Get(lblPath, nil)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tvar secCtxLbls types.SecCtxLabel\n\tif pair == nil {\n\t\tpair = &consulAPI.KVPair{Key: lblPath}\n\t\tsecCtxLbls.Labels = labels\n\t\tsecCtxLbls.RefCount = 1\n\t\tisNew = true\n\t} else {\n\t\tif err := json.Unmarshal(pair.Value, &secCtxLbls); err != nil {\n\t\t\treturn nil, false, err\n\t\t}\n\t\t\/\/ If RefCount is 0 then we have to retrieve a new ID\n\t\tif secCtxLbls.RefCount == 0 {\n\t\t\tisNew = true\n\t\t}\n\t\tsecCtxLbls.RefCount++\n\t}\n\n\tif isNew {\n\t\tif err := d.gasNewID(&secCtxLbls); err != nil {\n\t\t\treturn nil, false, err\n\t\t}\n\t} else if err := d.updateIDRef(&secCtxLbls); err != nil {\n\t\treturn nil, false, err\n\t}\n\tlog.Debugf(\"Incrementing label %d ref-count to %d\\n\", secCtxLbls.ID, secCtxLbls.RefCount)\n\n\tsecCtxLblsByte, err := json.Marshal(secCtxLbls)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\tpair.Value = secCtxLblsByte\n\t_, err = d.consul.KV().Put(pair, nil)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\n\treturn &secCtxLbls, isNew, nil\n}\n\n\/\/ GetLabels returns the SecCtxLabels that belongs to id.\nfunc (d *Daemon) GetLabels(id int) (*types.SecCtxLabel, error) {\n\tstrID := strconv.Itoa(id)\n\tpair, _, err := d.consul.KV().Get(common.IDKeyPath+strID, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif pair == nil {\n\t\treturn nil, nil\n\t}\n\tvar secCtxLabels types.SecCtxLabel\n\tif err := json.Unmarshal(pair.Value, &secCtxLabels); err != nil {\n\t\treturn nil, err\n\t}\n\tif secCtxLabels.RefCount == 0 {\n\t\treturn nil, nil\n\t}\n\treturn &secCtxLabels, nil\n}\n\n\/\/ DeleteLabelsByUUID deletes the SecCtxLabels belonging to id.\nfunc (d *Daemon) DeleteLabelsByUUID(id int) error {\n\tsecCtxLabels, err := d.GetLabels(id)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif secCtxLabels == nil {\n\t\treturn nil\n\t}\n\tsha256sum, err := secCtxLabels.Labels.SHA256Sum()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn d.DeleteLabelsBySHA256(sha256sum)\n}\n\n\/\/ DeleteLabelsBySHA256 deletes the SecCtxLabels that belong to the labels' sha256Sum.\nfunc (d *Daemon) DeleteLabelsBySHA256(sha256Sum string) error {\n\tif sha256Sum == \"\" {\n\t\treturn nil\n\t}\n\tlblPath := common.LabelsKeyPath + sha256Sum\n\t\/\/ Lock that sha256Sum\n\tlockKey, locker, err := d.lockPath(lblPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif locker == nil {\n\t\treturn fmt.Errorf(\"locker is nil\\n\")\n\t}\n\tdefer lockKey.Unlock()\n\n\t\/\/ After lock complete, get label's path\n\tpair, _, err := d.consul.KV().Get(lblPath, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar dbSecCtxLbls types.SecCtxLabel\n\tif pair == nil {\n\t\treturn nil\n\t}\n\tif err := json.Unmarshal(pair.Value, &dbSecCtxLbls); err != nil {\n\t\treturn err\n\t}\n\tif dbSecCtxLbls.RefCount > 0 {\n\t\tdbSecCtxLbls.RefCount--\n\t}\n\tif err := d.updateIDRef(&dbSecCtxLbls); err != nil {\n\t\treturn err\n\t}\n\tlog.Debugf(\"Decremented label %d ref-count to %d\\n\", dbSecCtxLbls.ID, dbSecCtxLbls.RefCount)\n\n\tsecCtxLblsByte, err := json.Marshal(dbSecCtxLbls)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpair.Value = secCtxLblsByte\n\t_, err = d.consul.KV().Put(pair, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ GetMaxID returns the maximum possible free UUID stored in consul.\nfunc (d *Daemon) GetMaxID() (int, error) {\n\tk, _, err := d.consul.KV().Get(common.LastFreeIDKeyPath, nil)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tif k == nil {\n\t\t\/\/ FreeID is empty? We should set it out!\n\t\tlog.Infof(\"Empty FreeID, setting it up with default value %d\", common.FirstFreeID)\n\t\tif err := d.initializeFreeID(); err != nil {\n\t\t\treturn -1, err\n\t\t}\n\t\tk, _, err = d.consul.KV().Get(common.LastFreeIDKeyPath, nil)\n\t\tif k == nil {\n\t\t\t\/\/ Something is really wrong\n\t\t\terrMsg := \"Unable to retrieve last free ID because the key is always empty\\n\"\n\t\t\tlog.Errorf(errMsg)\n\t\t\treturn -1, fmt.Errorf(errMsg)\n\t\t}\n\t}\n\tvar freeID int\n\tlog.Debugf(\"Retrieving max free ID %v\", k.Value)\n\tif err := json.Unmarshal(k.Value, &freeID); err != nil {\n\t\treturn -1, err\n\t}\n\treturn freeID, nil\n}\n\nfunc (d *Daemon) setMaxID(freeID int) error {\n\tk, _, err := d.consul.KV().Get(common.LastFreeIDKeyPath, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif k == nil {\n\t\t\/\/ FreeIDs is empty? We should set it out!\n\t\tif err := d.initializeFreeID(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tk, _, err = d.consul.KV().Get(common.LastFreeIDKeyPath, nil)\n\t\tif k == nil {\n\t\t\t\/\/ Something is really wrong\n\t\t\terrMsg := \"Unable to setting ID because the key is always empty\\n\"\n\t\t\tlog.Errorf(errMsg)\n\t\t\treturn fmt.Errorf(errMsg)\n\t\t}\n\t}\n\tk.Value, err = json.Marshal(freeID)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = d.consul.KV().Put(k, nil)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/mitchellh\/go-homedir\"\n)\n\nvar (\n\tShortGitHubURI = regexp.MustCompile(`^[\\w\\-.]+\/[\\w\\-.]+$`)\n)\n\nfunc ToSourceURI(uri string) (string, error) {\n\tswitch {\n\tcase ShortGitHubURI.MatchString(uri):\n\t\treturn \"https:\/\/github.com\/\" + uri, nil\n\tdefault:\n\t\treturn uri, nil\n\t}\n}\n\nvar (\n\thome, errInit = homedir.Dir()\n\tdotvim = filepath.Join(home, \".vim\")\n)\n\nfunc ToDestinationPath(uri, filetype string) (string, error) {\n\tname := filepath.Base(uri)\n\tif filetype == \"\" {\n\t\treturn filepath.Join(dotvim, \"bundle\", name), nil\n\t}\n\treturn filepath.Join(dotvim, \"ftbundle\", filetype, name), nil\n}\n\ntype Package struct {\n\tsrc string\n\tdst string\n}\n\nfunc NewPackage(uri, filetype string) (*Package, error) {\n\tsrc, err := ToSourceURI(uri)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdst, err := ToDestinationPath(uri, filetype)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Package{\n\t\tsrc: src,\n\t\tdst: dst,\n\t}, nil\n}\n\nfunc (p *Package) toInstallCommand() *exec.Cmd {\n\treturn exec.Command(\"git\", \"clone\", p.src, p.dst)\n}\n\nfunc (p *Package) installed() bool {\n\t_, err := os.Stat(p.dst)\n\treturn err == nil\n}\n\nfunc (p *Package) Install() error {\n\tif p.installed() {\n\t\treturn nil\n\t}\n\tif _, err := exec.LookPath(\"git\"); err != nil {\n\t\treturn err\n\t}\n\n\terrMessage := bytes.NewBuffer(make([]byte, 0))\n\n\tinstallcmd := p.toInstallCommand()\n\tinstallcmd.Stderr = errMessage\n\tif err := installcmd.Run(); err != nil {\n\t\treturn fmt.Errorf(\"%s\", strings.TrimSpace(errMessage.String()))\n\t}\n\treturn nil\n}\n\nfunc (p *Package) Remove() error {\n\tif !p.installed() {\n\t\treturn nil\n\t}\n\treturn os.RemoveAll(p.dst)\n}\n\nfunc (p *Package) Update() error {\n\tif p.installed() {\n\t\treturn p.Remove()\n\t}\n\treturn p.Install()\n}\n<commit_msg>Fix exit without install if installed package at update<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/mitchellh\/go-homedir\"\n)\n\nvar (\n\tShortGitHubURI = regexp.MustCompile(`^[\\w\\-.]+\/[\\w\\-.]+$`)\n)\n\nfunc ToSourceURI(uri string) (string, error) {\n\tswitch {\n\tcase ShortGitHubURI.MatchString(uri):\n\t\treturn \"https:\/\/github.com\/\" + uri, nil\n\tdefault:\n\t\treturn uri, nil\n\t}\n}\n\nvar (\n\thome, errInit = homedir.Dir()\n\tdotvim = filepath.Join(home, \".vim\")\n)\n\nfunc ToDestinationPath(uri, filetype string) (string, error) {\n\tname := filepath.Base(uri)\n\tif filetype == \"\" {\n\t\treturn filepath.Join(dotvim, \"bundle\", name), nil\n\t}\n\treturn filepath.Join(dotvim, \"ftbundle\", filetype, name), nil\n}\n\ntype Package struct {\n\tsrc string\n\tdst string\n}\n\nfunc NewPackage(uri, filetype string) (*Package, error) {\n\tsrc, err := ToSourceURI(uri)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdst, err := ToDestinationPath(uri, filetype)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Package{\n\t\tsrc: src,\n\t\tdst: dst,\n\t}, nil\n}\n\nfunc (p *Package) toInstallCommand() *exec.Cmd {\n\treturn exec.Command(\"git\", \"clone\", p.src, p.dst)\n}\n\nfunc (p *Package) installed() bool {\n\t_, err := os.Stat(p.dst)\n\treturn err == nil\n}\n\nfunc (p *Package) Install() error {\n\tif p.installed() {\n\t\treturn nil\n\t}\n\tif _, err := exec.LookPath(\"git\"); err != nil {\n\t\treturn err\n\t}\n\n\terrMessage := bytes.NewBuffer(make([]byte, 0))\n\n\tinstallcmd := p.toInstallCommand()\n\tinstallcmd.Stderr = errMessage\n\tif err := installcmd.Run(); err != nil {\n\t\treturn fmt.Errorf(\"%s\", strings.TrimSpace(errMessage.String()))\n\t}\n\treturn nil\n}\n\nfunc (p *Package) Remove() error {\n\tif !p.installed() {\n\t\treturn nil\n\t}\n\treturn os.RemoveAll(p.dst)\n}\n\nfunc (p *Package) Update() error {\n\tif p.installed() {\n\t\tp.Remove()\n\t}\n\treturn p.Install()\n}\n<|endoftext|>"} {"text":"<commit_before>package wrap\n\nimport (\n\t\"io\"\n\t\"net\/rpc\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/go-playground\/log\"\n\t\"github.com\/go-playground\/wave\/client\"\n)\n\n\/\/ RetryReconnectEndpoint is interface needed to comply with wrapping\n\/\/ with Rety and Reconnect logic.\ntype RetryReconnectEndpoint interface {\n\tclient.Endpoint\n\tSetClient(*rpc.Client)\n\tNewClient() (*rpc.Client, error)\n}\n\n\/\/ RetryReconnect wraps the given RetryReconnectEndpoint endpoint and automatically\n\/\/ handles logic to reconnect and retry\nfunc RetryReconnect(endpoint RetryReconnectEndpoint, retryDuration time.Duration) (e client.Endpoint, err error) {\n\n\trr := &retryReconnect{\n\t\tRetryReconnectEndpoint: endpoint,\n\t\tclientMutex: new(sync.RWMutex),\n\t\treconnectMutex: new(sync.RWMutex),\n\t\tretryDuration: retryDuration,\n\t\tisDisconnected: true,\n\t}\n\n\te = rr\n\n\t_, err = rr.NewClient()\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\ntype retryReconnect struct {\n\tRetryReconnectEndpoint\n\tclientMutex *sync.RWMutex\n\treconnectMutex *sync.RWMutex\n\tretryDuration time.Duration\n\tisDisconnected bool \/\/ defaults to true\n}\n\nvar _ client.Endpoint = &retryReconnect{}\nvar _ RetryReconnectEndpoint = &retryReconnect{}\n\nfunc (r *retryReconnect) NewClient() (c *rpc.Client, err error) {\n\n\tr.reconnectMutex.Lock()\n\tr.isDisconnected = true\n\tr.reconnectMutex.Unlock()\n\n\t\/\/ a bunch of calls could have gotten here if server was busy, lets double check we're still disconnected\n\t\/\/ right after lock is released\n\tr.clientMutex.Lock()\n\tdefer r.clientMutex.Unlock()\n\n\t\/\/ check if still disconnected\n\tr.reconnectMutex.RLock()\n\tif !r.isDisconnected {\n\t\tr.reconnectMutex.RUnlock()\n\t\treturn nil, err\n\t}\n\tr.reconnectMutex.RUnlock()\n\n\tfor i := 0; i < 3; i++ {\n\t\tc, err = r.RetryReconnectEndpoint.NewClient()\n\t\tif err != nil {\n\t\t\ttime.Sleep(r.retryDuration)\n\t\t\tcontinue\n\t\t}\n\n\t\tr.SetClient(c)\n\n\t\tr.reconnectMutex.Lock()\n\t\tr.isDisconnected = false\n\t\tr.reconnectMutex.Unlock()\n\n\t\treturn\n\t}\n\n\tlog.WithFields(log.F(\"err\", err)).Alert(\"RPC Connection could not be established\/reestablished\")\n\n\tgo func() {\n\n\t\tr.clientMutex.Lock()\n\t\tdefer r.clientMutex.Unlock()\n\n\t\tfor {\n\n\t\t\tvar client *rpc.Client\n\t\t\tvar err2 error\n\n\t\t\ttime.Sleep(r.retryDuration)\n\t\t\tclient, err2 = r.RetryReconnectEndpoint.NewClient()\n\t\t\tif err2 != nil {\n\t\t\t\ttime.Sleep(r.retryDuration)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tr.SetClient(client)\n\n\t\t\tr.reconnectMutex.Lock()\n\t\t\tr.isDisconnected = false\n\t\t\tr.reconnectMutex.Unlock()\n\n\t\t\tbreak\n\t\t}\n\t}()\n\n\terr = rpc.ErrShutdown\n\treturn\n}\n\nfunc (r *retryReconnect) Call(args interface{}, reply interface{}) (err error) {\n\n\t\/\/ check if disconnected\n\tr.reconnectMutex.RLock()\n\tif r.isDisconnected {\n\t\tr.reconnectMutex.RUnlock()\n\t\treturn rpc.ErrShutdown\n\t}\n\tr.reconnectMutex.RUnlock()\n\nRETRY:\n\t\/\/ make rpc call\n\tr.clientMutex.RLock()\n\terr = r.RetryReconnectEndpoint.Call(args, reply)\n\tr.clientMutex.RUnlock()\n\n\t\/\/ if error indicates a disconnect of some sort, try and reconnect\n\tif err != nil && err == rpc.ErrShutdown || err == io.EOF || err == io.ErrUnexpectedEOF {\n\n\t\t_, err = r.NewClient()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tgoto RETRY\n\t}\n\n\treturn\n}\n\nfunc (r *retryReconnect) Go(args interface{}, reply interface{}, done chan *rpc.Call) (c *rpc.Call) {\n\n\tif done == nil {\n\t\tdone = make(chan *rpc.Call, 10)\n\t}\n\n\tc = &rpc.Call{\n\t\tServiceMethod: r.ServiceMethod(),\n\t\tArgs: args,\n\t\tReply: reply,\n\t\tDone: done,\n\t}\n\n\t\/\/ check if disconnected\n\tr.reconnectMutex.RLock()\n\tif r.isDisconnected {\n\t\tr.reconnectMutex.RUnlock()\n\n\t\tc.Error = rpc.ErrShutdown\n\n\t\tgo func() {\n\t\t\tc.Done <- c\n\t\t}()\n\t\treturn\n\t}\n\n\tr.reconnectMutex.RUnlock()\n\n\tdc := make(chan *rpc.Call, cap(done))\n\t\/\/ make rpc call\n\tr.clientMutex.RLock()\n\tc2 := r.RetryReconnectEndpoint.Go(args, reply, dc)\n\tr.clientMutex.RUnlock()\n\n\tgo func() {\n\t\tres := <-c2.Done\n\n\t\tif res.Error != nil && res.Error == rpc.ErrShutdown || res.Error == io.EOF || res.Error == io.ErrUnexpectedEOF {\n\t\t\t_, res.Error = r.NewClient()\n\t\t\tif res.Error != nil {\n\t\t\t\tc.Done <- res\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ RETRY\n\t\t\tr.clientMutex.RLock()\n\t\t\tc2 = r.RetryReconnectEndpoint.Go(args, reply, dc)\n\t\t\tr.clientMutex.RUnlock()\n\n\t\t\tres = <-c2.Done\n\t\t}\n\n\t\tc.Done <- res\n\t}()\n\n\treturn\n}\n<commit_msg>Add some documentation<commit_after>package wrap\n\nimport (\n\t\"io\"\n\t\"net\/rpc\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/go-playground\/log\"\n\t\"github.com\/go-playground\/wave\/client\"\n)\n\n\/\/ RetryReconnectEndpoint is interface needed to comply with wrapping\n\/\/ with Rety and Reconnect logic.\ntype RetryReconnectEndpoint interface {\n\tclient.Endpoint\n\n\t\/\/ SetClient is called after a successful connection is established\/reestablished\n\t\/\/ NOTE: should close the existing client in this method if not nil prior to\n\t\/\/ updating to the new one passed in.\n\tSetClient(*rpc.Client)\n\n\t\/\/ NewClient calls the RetryReconnectEndpoint's NewClient method\n\t\/\/ wrapping it with retry\/reconnect logic\n\tNewClient() (*rpc.Client, error)\n}\n\n\/\/ RetryReconnect wraps the given RetryReconnectEndpoint endpoint and automatically\n\/\/ handles logic to reconnect and retry\nfunc RetryReconnect(endpoint RetryReconnectEndpoint, retryDuration time.Duration) (e client.Endpoint, err error) {\n\n\trr := &retryReconnect{\n\t\tRetryReconnectEndpoint: endpoint,\n\t\tclientMutex: new(sync.RWMutex),\n\t\treconnectMutex: new(sync.RWMutex),\n\t\tretryDuration: retryDuration,\n\t\tisDisconnected: true,\n\t}\n\n\te = rr\n\n\t_, err = rr.NewClient()\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}\n\ntype retryReconnect struct {\n\tRetryReconnectEndpoint\n\tclientMutex *sync.RWMutex\n\treconnectMutex *sync.RWMutex\n\tretryDuration time.Duration\n\tisDisconnected bool \/\/ defaults to true\n}\n\nvar _ client.Endpoint = &retryReconnect{}\nvar _ RetryReconnectEndpoint = &retryReconnect{}\n\n\/\/ NewClient calls the RetryReconnectEndpoint's NewClient method\n\/\/ wrapping it with retry\/reconnect logic\nfunc (r *retryReconnect) NewClient() (c *rpc.Client, err error) {\n\n\tr.reconnectMutex.Lock()\n\tr.isDisconnected = true\n\tr.reconnectMutex.Unlock()\n\n\t\/\/ a bunch of calls could have gotten here if server was busy, lets double check we're still disconnected\n\t\/\/ right after lock is released\n\tr.clientMutex.Lock()\n\tdefer r.clientMutex.Unlock()\n\n\t\/\/ check if still disconnected\n\tr.reconnectMutex.RLock()\n\tif !r.isDisconnected {\n\t\tr.reconnectMutex.RUnlock()\n\t\treturn nil, err\n\t}\n\tr.reconnectMutex.RUnlock()\n\n\tfor i := 0; i < 3; i++ {\n\t\tc, err = r.RetryReconnectEndpoint.NewClient()\n\t\tif err != nil {\n\t\t\ttime.Sleep(r.retryDuration)\n\t\t\tcontinue\n\t\t}\n\n\t\tr.SetClient(c)\n\n\t\tr.reconnectMutex.Lock()\n\t\tr.isDisconnected = false\n\t\tr.reconnectMutex.Unlock()\n\n\t\treturn\n\t}\n\n\tlog.WithFields(log.F(\"err\", err)).Alert(\"RPC Connection could not be established\/reestablished\")\n\n\tgo func() {\n\n\t\tr.clientMutex.Lock()\n\t\tdefer r.clientMutex.Unlock()\n\n\t\tfor {\n\n\t\t\tvar client *rpc.Client\n\t\t\tvar err2 error\n\n\t\t\ttime.Sleep(r.retryDuration)\n\t\t\tclient, err2 = r.RetryReconnectEndpoint.NewClient()\n\t\t\tif err2 != nil {\n\t\t\t\ttime.Sleep(r.retryDuration)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tr.SetClient(client)\n\n\t\t\tr.reconnectMutex.Lock()\n\t\t\tr.isDisconnected = false\n\t\t\tr.reconnectMutex.Unlock()\n\n\t\t\tbreak\n\t\t}\n\t}()\n\n\terr = rpc.ErrShutdown\n\treturn\n}\n\n\/\/ Call calls the RetryReconnectEndpoint's Call method\n\/\/ wrapping it with retry\/reconnect logic\nfunc (r *retryReconnect) Call(args interface{}, reply interface{}) (err error) {\n\n\t\/\/ check if disconnected\n\tr.reconnectMutex.RLock()\n\tif r.isDisconnected {\n\t\tr.reconnectMutex.RUnlock()\n\t\treturn rpc.ErrShutdown\n\t}\n\tr.reconnectMutex.RUnlock()\n\nRETRY:\n\t\/\/ make rpc call\n\tr.clientMutex.RLock()\n\terr = r.RetryReconnectEndpoint.Call(args, reply)\n\tr.clientMutex.RUnlock()\n\n\t\/\/ if error indicates a disconnect of some sort, try and reconnect\n\tif err != nil && err == rpc.ErrShutdown || err == io.EOF || err == io.ErrUnexpectedEOF {\n\n\t\t_, err = r.NewClient()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tgoto RETRY\n\t}\n\n\treturn\n}\n\n\/\/ Go calls the RetryReconnectEndpoint's Call method\n\/\/ wrapping it with retry\/reconnect logic\nfunc (r *retryReconnect) Go(args interface{}, reply interface{}, done chan *rpc.Call) (c *rpc.Call) {\n\n\tif done == nil {\n\t\tdone = make(chan *rpc.Call, 10)\n\t}\n\n\tc = &rpc.Call{\n\t\tServiceMethod: r.ServiceMethod(),\n\t\tArgs: args,\n\t\tReply: reply,\n\t\tDone: done,\n\t}\n\n\t\/\/ check if disconnected\n\tr.reconnectMutex.RLock()\n\tif r.isDisconnected {\n\t\tr.reconnectMutex.RUnlock()\n\n\t\tc.Error = rpc.ErrShutdown\n\n\t\tgo func() {\n\t\t\tc.Done <- c\n\t\t}()\n\t\treturn\n\t}\n\n\tr.reconnectMutex.RUnlock()\n\n\tdc := make(chan *rpc.Call, cap(done))\n\t\/\/ make rpc call\n\tr.clientMutex.RLock()\n\tc2 := r.RetryReconnectEndpoint.Go(args, reply, dc)\n\tr.clientMutex.RUnlock()\n\n\tgo func() {\n\t\tres := <-c2.Done\n\n\t\tif res.Error != nil && res.Error == rpc.ErrShutdown || res.Error == io.EOF || res.Error == io.ErrUnexpectedEOF {\n\t\t\t_, res.Error = r.NewClient()\n\t\t\tif res.Error != nil {\n\t\t\t\tc.Done <- res\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ RETRY\n\t\t\tr.clientMutex.RLock()\n\t\t\tc2 = r.RetryReconnectEndpoint.Go(args, reply, dc)\n\t\t\tr.clientMutex.RUnlock()\n\n\t\t\tres = <-c2.Done\n\t\t}\n\n\t\tc.Done <- res\n\t}()\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\t\"github.com\/spf13\/cobra\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\t\"k8s.io\/client-go\/tools\/clientcmd\"\n\n\t\"github.com\/argoproj\/argo-cd\/common\"\n\t\"github.com\/argoproj\/argo-cd\/errors\"\n\tappclientset \"github.com\/argoproj\/argo-cd\/pkg\/client\/clientset\/versioned\"\n\t\"github.com\/argoproj\/argo-cd\/reposerver\"\n\t\"github.com\/argoproj\/argo-cd\/server\"\n\t\"github.com\/argoproj\/argo-cd\/util\/cache\"\n\t\"github.com\/argoproj\/argo-cd\/util\/cli\"\n\t\"github.com\/argoproj\/argo-cd\/util\/stats\"\n\t\"github.com\/argoproj\/argo-cd\/util\/tls\"\n)\n\n\/\/ NewCommand returns a new instance of an argocd command\nfunc NewCommand() *cobra.Command {\n\tvar (\n\t\tinsecure bool\n\t\tlistenPort int\n\t\tmetricsPort int\n\t\tlogLevel string\n\t\tglogLevel int\n\t\tclientConfig clientcmd.ClientConfig\n\t\tstaticAssetsDir string\n\t\tbaseHRef string\n\t\trepoServerAddress string\n\t\tdexServerAddress string\n\t\tdisableAuth bool\n\t\ttlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)\n\t\tcacheSrc func() (*cache.Cache, error)\n\t)\n\tvar command = &cobra.Command{\n\t\tUse: cliName,\n\t\tShort: \"Run the argocd API server\",\n\t\tLong: \"Run the argocd API server\",\n\t\tRun: func(c *cobra.Command, args []string) {\n\t\t\tcli.SetLogLevel(logLevel)\n\t\t\tcli.SetGLogLevel(glogLevel)\n\n\t\t\tconfig, err := clientConfig.ClientConfig()\n\t\t\terrors.CheckError(err)\n\t\t\tconfig.QPS = common.K8sClientConfigQPS\n\t\t\tconfig.Burst = common.K8sClientConfigBurst\n\n\t\t\tnamespace, _, err := clientConfig.Namespace()\n\t\t\terrors.CheckError(err)\n\n\t\t\ttlsConfigCustomizer, err := tlsConfigCustomizerSrc()\n\t\t\terrors.CheckError(err)\n\t\t\tcache, err := cacheSrc()\n\t\t\terrors.CheckError(err)\n\n\t\t\tkubeclientset := kubernetes.NewForConfigOrDie(config)\n\t\t\tappclientset := appclientset.NewForConfigOrDie(config)\n\t\t\trepoclientset := reposerver.NewRepoServerClientset(repoServerAddress, 0)\n\n\t\t\targoCDOpts := server.ArgoCDServerOpts{\n\t\t\t\tInsecure: insecure,\n\t\t\t\tListenPort: listenPort,\n\t\t\t\tMetricsPort: metricsPort,\n\t\t\t\tNamespace: namespace,\n\t\t\t\tStaticAssetsDir: staticAssetsDir,\n\t\t\t\tBaseHRef: baseHRef,\n\t\t\t\tKubeClientset: kubeclientset,\n\t\t\t\tAppClientset: appclientset,\n\t\t\t\tRepoClientset: repoclientset,\n\t\t\t\tDexServerAddr: dexServerAddress,\n\t\t\t\tDisableAuth: disableAuth,\n\t\t\t\tTLSConfigCustomizer: tlsConfigCustomizer,\n\t\t\t\tCache: cache,\n\t\t\t}\n\n\t\t\tstats.RegisterStackDumper()\n\t\t\tstats.StartStatsTicker(10 * time.Minute)\n\t\t\tstats.RegisterHeapDumper(\"memprofile\")\n\n\t\t\tfor {\n\t\t\t\tctx := context.Background()\n\t\t\t\tctx, cancel := context.WithCancel(ctx)\n\t\t\t\targocd := server.NewServer(ctx, argoCDOpts)\n\t\t\t\targocd.Run(ctx, listenPort, metricsPort)\n\t\t\t\tcancel()\n\t\t\t}\n\t\t},\n\t}\n\n\tclientConfig = cli.AddKubectlFlagsToCmd(command)\n\tcommand.Flags().BoolVar(&insecure, \"insecure\", false, \"Run server without TLS\")\n\tcommand.Flags().StringVar(&staticAssetsDir, \"staticassets\", \"\", \"Static assets directory path\")\n\tcommand.Flags().StringVar(&baseHRef, \"basehref\", \"\/\", \"Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from \/\")\n\tcommand.Flags().StringVar(&logLevel, \"loglevel\", \"info\", \"Set the logging level. One of: debug|info|warn|error\")\n\tcommand.Flags().IntVar(&glogLevel, \"gloglevel\", 0, \"Set the glog logging level\")\n\tcommand.Flags().StringVar(&repoServerAddress, \"repo-server\", common.DefaultRepoServerAddr, \"Repo server address\")\n\tcommand.Flags().StringVar(&dexServerAddress, \"dex-server\", common.DefaultDexServerAddr, \"Dex server address\")\n\tcommand.Flags().BoolVar(&disableAuth, \"disable-auth\", false, \"Disable client authentication\")\n\tcommand.AddCommand(cli.NewVersionCmd(cliName))\n\tcommand.Flags().IntVar(&listenPort, \"port\", common.DefaultPortAPIServer, \"Listen on given port\")\n\tcommand.Flags().IntVar(&metricsPort, \"metrics-port\", common.DefaultPortArgoCDAPIServerMetrics, \"Start metrics on given port\")\n\ttlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(command)\n\tcacheSrc = cache.AddCacheFlagsToCmd(command)\n\treturn command\n}\n<commit_msg>Issue #1820 - Make sure api server to repo server grpc calls have timeout (#1832)<commit_after>package commands\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\t\"github.com\/spf13\/cobra\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\t\"k8s.io\/client-go\/tools\/clientcmd\"\n\n\t\"github.com\/argoproj\/argo-cd\/common\"\n\t\"github.com\/argoproj\/argo-cd\/errors\"\n\tappclientset \"github.com\/argoproj\/argo-cd\/pkg\/client\/clientset\/versioned\"\n\t\"github.com\/argoproj\/argo-cd\/reposerver\"\n\t\"github.com\/argoproj\/argo-cd\/server\"\n\t\"github.com\/argoproj\/argo-cd\/util\/cache\"\n\t\"github.com\/argoproj\/argo-cd\/util\/cli\"\n\t\"github.com\/argoproj\/argo-cd\/util\/stats\"\n\t\"github.com\/argoproj\/argo-cd\/util\/tls\"\n)\n\n\/\/ NewCommand returns a new instance of an argocd command\nfunc NewCommand() *cobra.Command {\n\tvar (\n\t\tinsecure bool\n\t\tlistenPort int\n\t\tmetricsPort int\n\t\tlogLevel string\n\t\tglogLevel int\n\t\tclientConfig clientcmd.ClientConfig\n\t\trepoServerTimeoutSeconds int\n\t\tstaticAssetsDir string\n\t\tbaseHRef string\n\t\trepoServerAddress string\n\t\tdexServerAddress string\n\t\tdisableAuth bool\n\t\ttlsConfigCustomizerSrc func() (tls.ConfigCustomizer, error)\n\t\tcacheSrc func() (*cache.Cache, error)\n\t)\n\tvar command = &cobra.Command{\n\t\tUse: cliName,\n\t\tShort: \"Run the argocd API server\",\n\t\tLong: \"Run the argocd API server\",\n\t\tRun: func(c *cobra.Command, args []string) {\n\t\t\tcli.SetLogLevel(logLevel)\n\t\t\tcli.SetGLogLevel(glogLevel)\n\n\t\t\tconfig, err := clientConfig.ClientConfig()\n\t\t\terrors.CheckError(err)\n\t\t\tconfig.QPS = common.K8sClientConfigQPS\n\t\t\tconfig.Burst = common.K8sClientConfigBurst\n\n\t\t\tnamespace, _, err := clientConfig.Namespace()\n\t\t\terrors.CheckError(err)\n\n\t\t\ttlsConfigCustomizer, err := tlsConfigCustomizerSrc()\n\t\t\terrors.CheckError(err)\n\t\t\tcache, err := cacheSrc()\n\t\t\terrors.CheckError(err)\n\n\t\t\tkubeclientset := kubernetes.NewForConfigOrDie(config)\n\t\t\tappclientset := appclientset.NewForConfigOrDie(config)\n\t\t\trepoclientset := reposerver.NewRepoServerClientset(repoServerAddress, repoServerTimeoutSeconds)\n\n\t\t\targoCDOpts := server.ArgoCDServerOpts{\n\t\t\t\tInsecure: insecure,\n\t\t\t\tListenPort: listenPort,\n\t\t\t\tMetricsPort: metricsPort,\n\t\t\t\tNamespace: namespace,\n\t\t\t\tStaticAssetsDir: staticAssetsDir,\n\t\t\t\tBaseHRef: baseHRef,\n\t\t\t\tKubeClientset: kubeclientset,\n\t\t\t\tAppClientset: appclientset,\n\t\t\t\tRepoClientset: repoclientset,\n\t\t\t\tDexServerAddr: dexServerAddress,\n\t\t\t\tDisableAuth: disableAuth,\n\t\t\t\tTLSConfigCustomizer: tlsConfigCustomizer,\n\t\t\t\tCache: cache,\n\t\t\t}\n\n\t\t\tstats.RegisterStackDumper()\n\t\t\tstats.StartStatsTicker(10 * time.Minute)\n\t\t\tstats.RegisterHeapDumper(\"memprofile\")\n\n\t\t\tfor {\n\t\t\t\tctx := context.Background()\n\t\t\t\tctx, cancel := context.WithCancel(ctx)\n\t\t\t\targocd := server.NewServer(ctx, argoCDOpts)\n\t\t\t\targocd.Run(ctx, listenPort, metricsPort)\n\t\t\t\tcancel()\n\t\t\t}\n\t\t},\n\t}\n\n\tclientConfig = cli.AddKubectlFlagsToCmd(command)\n\tcommand.Flags().BoolVar(&insecure, \"insecure\", false, \"Run server without TLS\")\n\tcommand.Flags().StringVar(&staticAssetsDir, \"staticassets\", \"\", \"Static assets directory path\")\n\tcommand.Flags().StringVar(&baseHRef, \"basehref\", \"\/\", \"Value for base href in index.html. Used if Argo CD is running behind reverse proxy under subpath different from \/\")\n\tcommand.Flags().StringVar(&logLevel, \"loglevel\", \"info\", \"Set the logging level. One of: debug|info|warn|error\")\n\tcommand.Flags().IntVar(&glogLevel, \"gloglevel\", 0, \"Set the glog logging level\")\n\tcommand.Flags().StringVar(&repoServerAddress, \"repo-server\", common.DefaultRepoServerAddr, \"Repo server address\")\n\tcommand.Flags().StringVar(&dexServerAddress, \"dex-server\", common.DefaultDexServerAddr, \"Dex server address\")\n\tcommand.Flags().BoolVar(&disableAuth, \"disable-auth\", false, \"Disable client authentication\")\n\tcommand.AddCommand(cli.NewVersionCmd(cliName))\n\tcommand.Flags().IntVar(&listenPort, \"port\", common.DefaultPortAPIServer, \"Listen on given port\")\n\tcommand.Flags().IntVar(&metricsPort, \"metrics-port\", common.DefaultPortArgoCDAPIServerMetrics, \"Start metrics on given port\")\n\tcommand.Flags().IntVar(&repoServerTimeoutSeconds, \"repo-server-timeout-seconds\", 60, \"Repo server RPC call timeout seconds.\")\n\ttlsConfigCustomizerSrc = tls.AddTLSFlagsToCmd(command)\n\tcacheSrc = cache.AddCacheFlagsToCmd(command)\n\treturn command\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage http\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ This implementation is done according to RFC 6265:\n\/\/\n\/\/ http:\/\/tools.ietf.org\/html\/rfc6265\n\n\/\/ A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an\n\/\/ HTTP response or the Cookie header of an HTTP request.\ntype Cookie struct {\n\tName string\n\tValue string\n\tPath string\n\tDomain string\n\tExpires time.Time\n\tRawExpires string\n\n\t\/\/ MaxAge=0 means no 'Max-Age' attribute specified. \n\t\/\/ MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0'\n\t\/\/ MaxAge>0 means Max-Age attribute present and given in seconds\n\tMaxAge int\n\tSecure bool\n\tHttpOnly bool\n\tRaw string\n\tUnparsed []string \/\/ Raw text of unparsed attribute-value pairs\n}\n\n\/\/ readSetCookies parses all \"Set-Cookie\" values from\n\/\/ the header h and returns the successfully parsed Cookies.\nfunc readSetCookies(h Header) []*Cookie {\n\tcookies := []*Cookie{}\n\tfor _, line := range h[\"Set-Cookie\"] {\n\t\tparts := strings.Split(strings.TrimSpace(line), \";\")\n\t\tif len(parts) == 1 && parts[0] == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tparts[0] = strings.TrimSpace(parts[0])\n\t\tj := strings.Index(parts[0], \"=\")\n\t\tif j < 0 {\n\t\t\tcontinue\n\t\t}\n\t\tname, value := parts[0][:j], parts[0][j+1:]\n\t\tif !isCookieNameValid(name) {\n\t\t\tcontinue\n\t\t}\n\t\tvalue, success := parseCookieValue(value)\n\t\tif !success {\n\t\t\tcontinue\n\t\t}\n\t\tc := &Cookie{\n\t\t\tName: name,\n\t\t\tValue: value,\n\t\t\tRaw: line,\n\t\t}\n\t\tfor i := 1; i < len(parts); i++ {\n\t\t\tparts[i] = strings.TrimSpace(parts[i])\n\t\t\tif len(parts[i]) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tattr, val := parts[i], \"\"\n\t\t\tif j := strings.Index(attr, \"=\"); j >= 0 {\n\t\t\t\tattr, val = attr[:j], attr[j+1:]\n\t\t\t}\n\t\t\tlowerAttr := strings.ToLower(attr)\n\t\t\tparseCookieValueFn := parseCookieValue\n\t\t\tif lowerAttr == \"expires\" {\n\t\t\t\tparseCookieValueFn = parseCookieExpiresValue\n\t\t\t}\n\t\t\tval, success = parseCookieValueFn(val)\n\t\t\tif !success {\n\t\t\t\tc.Unparsed = append(c.Unparsed, parts[i])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch lowerAttr {\n\t\t\tcase \"secure\":\n\t\t\t\tc.Secure = true\n\t\t\t\tcontinue\n\t\t\tcase \"httponly\":\n\t\t\t\tc.HttpOnly = true\n\t\t\t\tcontinue\n\t\t\tcase \"domain\":\n\t\t\t\tc.Domain = val\n\t\t\t\t\/\/ TODO: Add domain parsing\n\t\t\t\tcontinue\n\t\t\tcase \"max-age\":\n\t\t\t\tsecs, err := strconv.Atoi(val)\n\t\t\t\tif err != nil || secs < 0 || secs != 0 && val[0] == '0' {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif secs <= 0 {\n\t\t\t\t\tc.MaxAge = -1\n\t\t\t\t} else {\n\t\t\t\t\tc.MaxAge = secs\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\tcase \"expires\":\n\t\t\t\tc.RawExpires = val\n\t\t\t\texptime, err := time.Parse(time.RFC1123, val)\n\t\t\t\tif err != nil {\n\t\t\t\t\texptime, err = time.Parse(\"Mon, 02-Jan-2006 15:04:05 MST\", val)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tc.Expires = time.Time{}\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tc.Expires = exptime.UTC()\n\t\t\t\tcontinue\n\t\t\tcase \"path\":\n\t\t\t\tc.Path = val\n\t\t\t\t\/\/ TODO: Add path parsing\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.Unparsed = append(c.Unparsed, parts[i])\n\t\t}\n\t\tcookies = append(cookies, c)\n\t}\n\treturn cookies\n}\n\n\/\/ SetCookie adds a Set-Cookie header to the provided ResponseWriter's headers.\nfunc SetCookie(w ResponseWriter, cookie *Cookie) {\n\tw.Header().Add(\"Set-Cookie\", cookie.String())\n}\n\n\/\/ String returns the serialization of the cookie for use in a Cookie\n\/\/ header (if only Name and Value are set) or a Set-Cookie response\n\/\/ header (if other fields are set).\nfunc (c *Cookie) String() string {\n\tvar b bytes.Buffer\n\tfmt.Fprintf(&b, \"%s=%s\", sanitizeName(c.Name), sanitizeValue(c.Value))\n\tif len(c.Path) > 0 {\n\t\tfmt.Fprintf(&b, \"; Path=%s\", sanitizeValue(c.Path))\n\t}\n\tif len(c.Domain) > 0 {\n\t\tfmt.Fprintf(&b, \"; Domain=%s\", sanitizeValue(c.Domain))\n\t}\n\tif c.Expires.Unix() > 0 {\n\t\tfmt.Fprintf(&b, \"; Expires=%s\", c.Expires.UTC().Format(time.RFC1123))\n\t}\n\tif c.MaxAge > 0 {\n\t\tfmt.Fprintf(&b, \"; Max-Age=%d\", c.MaxAge)\n\t} else if c.MaxAge < 0 {\n\t\tfmt.Fprintf(&b, \"; Max-Age=0\")\n\t}\n\tif c.HttpOnly {\n\t\tfmt.Fprintf(&b, \"; HttpOnly\")\n\t}\n\tif c.Secure {\n\t\tfmt.Fprintf(&b, \"; Secure\")\n\t}\n\treturn b.String()\n}\n\n\/\/ readCookies parses all \"Cookie\" values from the header h and\n\/\/ returns the successfully parsed Cookies.\n\/\/\n\/\/ if filter isn't empty, only cookies of that name are returned\nfunc readCookies(h Header, filter string) []*Cookie {\n\tcookies := []*Cookie{}\n\tlines, ok := h[\"Cookie\"]\n\tif !ok {\n\t\treturn cookies\n\t}\n\n\tfor _, line := range lines {\n\t\tparts := strings.Split(strings.TrimSpace(line), \";\")\n\t\tif len(parts) == 1 && parts[0] == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Per-line attributes\n\t\tparsedPairs := 0\n\t\tfor i := 0; i < len(parts); i++ {\n\t\t\tparts[i] = strings.TrimSpace(parts[i])\n\t\t\tif len(parts[i]) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tname, val := parts[i], \"\"\n\t\t\tif j := strings.Index(name, \"=\"); j >= 0 {\n\t\t\t\tname, val = name[:j], name[j+1:]\n\t\t\t}\n\t\t\tif !isCookieNameValid(name) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif filter != \"\" && filter != name {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tval, success := parseCookieValue(val)\n\t\t\tif !success {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcookies = append(cookies, &Cookie{Name: name, Value: val})\n\t\t\tparsedPairs++\n\t\t}\n\t}\n\treturn cookies\n}\n\nvar cookieNameSanitizer = strings.NewReplacer(\"\\n\", \"-\", \"\\r\", \"-\")\n\nfunc sanitizeName(n string) string {\n\treturn cookieNameSanitizer.Replace(n)\n}\n\nvar cookieValueSanitizer = strings.NewReplacer(\"\\n\", \" \", \"\\r\", \" \", \";\", \" \")\n\nfunc sanitizeValue(v string) string {\n\treturn cookieValueSanitizer.Replace(v)\n}\n\nfunc unquoteCookieValue(v string) string {\n\tif len(v) > 1 && v[0] == '\"' && v[len(v)-1] == '\"' {\n\t\treturn v[1 : len(v)-1]\n\t}\n\treturn v\n}\n\nfunc isCookieByte(c byte) bool {\n\tswitch {\n\tcase c == 0x21, 0x23 <= c && c <= 0x2b, 0x2d <= c && c <= 0x3a,\n\t\t0x3c <= c && c <= 0x5b, 0x5d <= c && c <= 0x7e:\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc isCookieExpiresByte(c byte) (ok bool) {\n\treturn isCookieByte(c) || c == ',' || c == ' '\n}\n\nfunc parseCookieValue(raw string) (string, bool) {\n\treturn parseCookieValueUsing(raw, isCookieByte)\n}\n\nfunc parseCookieExpiresValue(raw string) (string, bool) {\n\treturn parseCookieValueUsing(raw, isCookieExpiresByte)\n}\n\nfunc parseCookieValueUsing(raw string, validByte func(byte) bool) (string, bool) {\n\traw = unquoteCookieValue(raw)\n\tfor i := 0; i < len(raw); i++ {\n\t\tif !validByte(raw[i]) {\n\t\t\treturn \"\", false\n\t\t}\n\t}\n\treturn raw, true\n}\n\nfunc isCookieNameValid(raw string) bool {\n\tfor _, c := range raw {\n\t\tif !isToken(byte(c)) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n<commit_msg>http: Allow cookies with negative Max-Age attribute as these are allowed by RFC 6265 sec 5.2.2.<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage http\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ This implementation is done according to RFC 6265:\n\/\/\n\/\/ http:\/\/tools.ietf.org\/html\/rfc6265\n\n\/\/ A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an\n\/\/ HTTP response or the Cookie header of an HTTP request.\ntype Cookie struct {\n\tName string\n\tValue string\n\tPath string\n\tDomain string\n\tExpires time.Time\n\tRawExpires string\n\n\t\/\/ MaxAge=0 means no 'Max-Age' attribute specified. \n\t\/\/ MaxAge<0 means delete cookie now, equivalently 'Max-Age: 0'\n\t\/\/ MaxAge>0 means Max-Age attribute present and given in seconds\n\tMaxAge int\n\tSecure bool\n\tHttpOnly bool\n\tRaw string\n\tUnparsed []string \/\/ Raw text of unparsed attribute-value pairs\n}\n\n\/\/ readSetCookies parses all \"Set-Cookie\" values from\n\/\/ the header h and returns the successfully parsed Cookies.\nfunc readSetCookies(h Header) []*Cookie {\n\tcookies := []*Cookie{}\n\tfor _, line := range h[\"Set-Cookie\"] {\n\t\tparts := strings.Split(strings.TrimSpace(line), \";\")\n\t\tif len(parts) == 1 && parts[0] == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tparts[0] = strings.TrimSpace(parts[0])\n\t\tj := strings.Index(parts[0], \"=\")\n\t\tif j < 0 {\n\t\t\tcontinue\n\t\t}\n\t\tname, value := parts[0][:j], parts[0][j+1:]\n\t\tif !isCookieNameValid(name) {\n\t\t\tcontinue\n\t\t}\n\t\tvalue, success := parseCookieValue(value)\n\t\tif !success {\n\t\t\tcontinue\n\t\t}\n\t\tc := &Cookie{\n\t\t\tName: name,\n\t\t\tValue: value,\n\t\t\tRaw: line,\n\t\t}\n\t\tfor i := 1; i < len(parts); i++ {\n\t\t\tparts[i] = strings.TrimSpace(parts[i])\n\t\t\tif len(parts[i]) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tattr, val := parts[i], \"\"\n\t\t\tif j := strings.Index(attr, \"=\"); j >= 0 {\n\t\t\t\tattr, val = attr[:j], attr[j+1:]\n\t\t\t}\n\t\t\tlowerAttr := strings.ToLower(attr)\n\t\t\tparseCookieValueFn := parseCookieValue\n\t\t\tif lowerAttr == \"expires\" {\n\t\t\t\tparseCookieValueFn = parseCookieExpiresValue\n\t\t\t}\n\t\t\tval, success = parseCookieValueFn(val)\n\t\t\tif !success {\n\t\t\t\tc.Unparsed = append(c.Unparsed, parts[i])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch lowerAttr {\n\t\t\tcase \"secure\":\n\t\t\t\tc.Secure = true\n\t\t\t\tcontinue\n\t\t\tcase \"httponly\":\n\t\t\t\tc.HttpOnly = true\n\t\t\t\tcontinue\n\t\t\tcase \"domain\":\n\t\t\t\tc.Domain = val\n\t\t\t\t\/\/ TODO: Add domain parsing\n\t\t\t\tcontinue\n\t\t\tcase \"max-age\":\n\t\t\t\tsecs, err := strconv.Atoi(val)\n\t\t\t\tif err != nil || secs != 0 && val[0] == '0' {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif secs <= 0 {\n\t\t\t\t\tc.MaxAge = -1\n\t\t\t\t} else {\n\t\t\t\t\tc.MaxAge = secs\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\tcase \"expires\":\n\t\t\t\tc.RawExpires = val\n\t\t\t\texptime, err := time.Parse(time.RFC1123, val)\n\t\t\t\tif err != nil {\n\t\t\t\t\texptime, err = time.Parse(\"Mon, 02-Jan-2006 15:04:05 MST\", val)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tc.Expires = time.Time{}\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tc.Expires = exptime.UTC()\n\t\t\t\tcontinue\n\t\t\tcase \"path\":\n\t\t\t\tc.Path = val\n\t\t\t\t\/\/ TODO: Add path parsing\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.Unparsed = append(c.Unparsed, parts[i])\n\t\t}\n\t\tcookies = append(cookies, c)\n\t}\n\treturn cookies\n}\n\n\/\/ SetCookie adds a Set-Cookie header to the provided ResponseWriter's headers.\nfunc SetCookie(w ResponseWriter, cookie *Cookie) {\n\tw.Header().Add(\"Set-Cookie\", cookie.String())\n}\n\n\/\/ String returns the serialization of the cookie for use in a Cookie\n\/\/ header (if only Name and Value are set) or a Set-Cookie response\n\/\/ header (if other fields are set).\nfunc (c *Cookie) String() string {\n\tvar b bytes.Buffer\n\tfmt.Fprintf(&b, \"%s=%s\", sanitizeName(c.Name), sanitizeValue(c.Value))\n\tif len(c.Path) > 0 {\n\t\tfmt.Fprintf(&b, \"; Path=%s\", sanitizeValue(c.Path))\n\t}\n\tif len(c.Domain) > 0 {\n\t\tfmt.Fprintf(&b, \"; Domain=%s\", sanitizeValue(c.Domain))\n\t}\n\tif c.Expires.Unix() > 0 {\n\t\tfmt.Fprintf(&b, \"; Expires=%s\", c.Expires.UTC().Format(time.RFC1123))\n\t}\n\tif c.MaxAge > 0 {\n\t\tfmt.Fprintf(&b, \"; Max-Age=%d\", c.MaxAge)\n\t} else if c.MaxAge < 0 {\n\t\tfmt.Fprintf(&b, \"; Max-Age=0\")\n\t}\n\tif c.HttpOnly {\n\t\tfmt.Fprintf(&b, \"; HttpOnly\")\n\t}\n\tif c.Secure {\n\t\tfmt.Fprintf(&b, \"; Secure\")\n\t}\n\treturn b.String()\n}\n\n\/\/ readCookies parses all \"Cookie\" values from the header h and\n\/\/ returns the successfully parsed Cookies.\n\/\/\n\/\/ if filter isn't empty, only cookies of that name are returned\nfunc readCookies(h Header, filter string) []*Cookie {\n\tcookies := []*Cookie{}\n\tlines, ok := h[\"Cookie\"]\n\tif !ok {\n\t\treturn cookies\n\t}\n\n\tfor _, line := range lines {\n\t\tparts := strings.Split(strings.TrimSpace(line), \";\")\n\t\tif len(parts) == 1 && parts[0] == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Per-line attributes\n\t\tparsedPairs := 0\n\t\tfor i := 0; i < len(parts); i++ {\n\t\t\tparts[i] = strings.TrimSpace(parts[i])\n\t\t\tif len(parts[i]) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tname, val := parts[i], \"\"\n\t\t\tif j := strings.Index(name, \"=\"); j >= 0 {\n\t\t\t\tname, val = name[:j], name[j+1:]\n\t\t\t}\n\t\t\tif !isCookieNameValid(name) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif filter != \"\" && filter != name {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tval, success := parseCookieValue(val)\n\t\t\tif !success {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcookies = append(cookies, &Cookie{Name: name, Value: val})\n\t\t\tparsedPairs++\n\t\t}\n\t}\n\treturn cookies\n}\n\nvar cookieNameSanitizer = strings.NewReplacer(\"\\n\", \"-\", \"\\r\", \"-\")\n\nfunc sanitizeName(n string) string {\n\treturn cookieNameSanitizer.Replace(n)\n}\n\nvar cookieValueSanitizer = strings.NewReplacer(\"\\n\", \" \", \"\\r\", \" \", \";\", \" \")\n\nfunc sanitizeValue(v string) string {\n\treturn cookieValueSanitizer.Replace(v)\n}\n\nfunc unquoteCookieValue(v string) string {\n\tif len(v) > 1 && v[0] == '\"' && v[len(v)-1] == '\"' {\n\t\treturn v[1 : len(v)-1]\n\t}\n\treturn v\n}\n\nfunc isCookieByte(c byte) bool {\n\tswitch {\n\tcase c == 0x21, 0x23 <= c && c <= 0x2b, 0x2d <= c && c <= 0x3a,\n\t\t0x3c <= c && c <= 0x5b, 0x5d <= c && c <= 0x7e:\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc isCookieExpiresByte(c byte) (ok bool) {\n\treturn isCookieByte(c) || c == ',' || c == ' '\n}\n\nfunc parseCookieValue(raw string) (string, bool) {\n\treturn parseCookieValueUsing(raw, isCookieByte)\n}\n\nfunc parseCookieExpiresValue(raw string) (string, bool) {\n\treturn parseCookieValueUsing(raw, isCookieExpiresByte)\n}\n\nfunc parseCookieValueUsing(raw string, validByte func(byte) bool) (string, bool) {\n\traw = unquoteCookieValue(raw)\n\tfor i := 0; i < len(raw); i++ {\n\t\tif !validByte(raw[i]) {\n\t\t\treturn \"\", false\n\t\t}\n\t}\n\treturn raw, true\n}\n\nfunc isCookieNameValid(raw string) bool {\n\tfor _, c := range raw {\n\t\tif !isToken(byte(c)) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package atomic provides low-level atomic memory primitives\n\/\/ useful for implementing synchronization algorithms.\n\/\/\n\/\/ These functions require great care to be used correctly.\n\/\/ Except for special, low-level applications, synchronization is better\n\/\/ done with channels or the facilities of the sync package.\n\/\/ Share memory by communicating;\n\/\/ don't communicate by sharing memory.\n\/\/\n\/\/ The compare-and-swap operation, implemented by the CompareAndSwapT\n\/\/ functions, is the atomic equivalent of:\n\/\/\n\/\/\tif *val == old {\n\/\/\t\t*val = new\n\/\/\t\treturn true\n\/\/\t}\n\/\/\treturn false\n\/\/\npackage atomic\n\n\/\/ BUG(rsc):\n\/\/ On ARM, the 64-bit functions use instructions unavailable before ARM 11.\n\/\/\n\/\/ On x86-32, the 64-bit functions use instructions unavailable before the Pentium.\n\n\/\/ CompareAndSwapInt32 executes the compare-and-swap operation for an int32 value.\nfunc CompareAndSwapInt32(val *int32, old, new int32) (swapped bool)\n\n\/\/ CompareAndSwapInt64 executes the compare-and-swap operation for an int64 value.\nfunc CompareAndSwapInt64(val *int64, old, new int64) (swapped bool)\n\n\/\/ CompareAndSwapUint32 executes the compare-and-swap operation for a uint32 value.\nfunc CompareAndSwapUint32(val *uint32, old, new uint32) (swapped bool)\n\n\/\/ CompareAndSwapUint64 executes the compare-and-swap operation for a uint64 value.\nfunc CompareAndSwapUint64(val *uint64, old, new uint64) (swapped bool)\n\n\/\/ CompareAndSwapUintptr executes the compare-and-swap operation for a uintptr value.\nfunc CompareAndSwapUintptr(val *uintptr, old, new uintptr) (swapped bool)\n\n\/\/ AddInt32 atomically adds delta to *val and returns the new value.\nfunc AddInt32(val *int32, delta int32) (new int32)\n\n\/\/ AddUint32 atomically adds delta to *val and returns the new value.\nfunc AddUint32(val *uint32, delta uint32) (new uint32)\n\n\/\/ AddInt64 atomically adds delta to *val and returns the new value.\nfunc AddInt64(val *int64, delta int64) (new int64)\n\n\/\/ AddUint64 atomically adds delta to *val and returns the new value.\nfunc AddUint64(val *uint64, delta uint64) (new uint64)\n\n\/\/ AddUintptr atomically adds delta to *val and returns the new value.\nfunc AddUintptr(val *uintptr, delta uintptr) (new uintptr)\n<commit_msg>sync\/atomic: make BUG comment godoc-compatible<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package atomic provides low-level atomic memory primitives\n\/\/ useful for implementing synchronization algorithms.\n\/\/\n\/\/ These functions require great care to be used correctly.\n\/\/ Except for special, low-level applications, synchronization is better\n\/\/ done with channels or the facilities of the sync package.\n\/\/ Share memory by communicating;\n\/\/ don't communicate by sharing memory.\n\/\/\n\/\/ The compare-and-swap operation, implemented by the CompareAndSwapT\n\/\/ functions, is the atomic equivalent of:\n\/\/\n\/\/\tif *val == old {\n\/\/\t\t*val = new\n\/\/\t\treturn true\n\/\/\t}\n\/\/\treturn false\n\/\/\npackage atomic\n\n\/\/ BUG(rsc): On ARM, the 64-bit functions use instructions unavailable before ARM 11.\n\/\/\n\/\/ On x86-32, the 64-bit functions use instructions unavailable before the Pentium.\n\n\/\/ CompareAndSwapInt32 executes the compare-and-swap operation for an int32 value.\nfunc CompareAndSwapInt32(val *int32, old, new int32) (swapped bool)\n\n\/\/ CompareAndSwapInt64 executes the compare-and-swap operation for an int64 value.\nfunc CompareAndSwapInt64(val *int64, old, new int64) (swapped bool)\n\n\/\/ CompareAndSwapUint32 executes the compare-and-swap operation for a uint32 value.\nfunc CompareAndSwapUint32(val *uint32, old, new uint32) (swapped bool)\n\n\/\/ CompareAndSwapUint64 executes the compare-and-swap operation for a uint64 value.\nfunc CompareAndSwapUint64(val *uint64, old, new uint64) (swapped bool)\n\n\/\/ CompareAndSwapUintptr executes the compare-and-swap operation for a uintptr value.\nfunc CompareAndSwapUintptr(val *uintptr, old, new uintptr) (swapped bool)\n\n\/\/ AddInt32 atomically adds delta to *val and returns the new value.\nfunc AddInt32(val *int32, delta int32) (new int32)\n\n\/\/ AddUint32 atomically adds delta to *val and returns the new value.\nfunc AddUint32(val *uint32, delta uint32) (new uint32)\n\n\/\/ AddInt64 atomically adds delta to *val and returns the new value.\nfunc AddInt64(val *int64, delta int64) (new int64)\n\n\/\/ AddUint64 atomically adds delta to *val and returns the new value.\nfunc AddUint64(val *uint64, delta uint64) (new uint64)\n\n\/\/ AddUintptr atomically adds delta to *val and returns the new value.\nfunc AddUintptr(val *uintptr, delta uintptr) (new uintptr)\n<|endoftext|>"} {"text":"<commit_before>package todotxt\n\nimport (\n \"testing\"\n \"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestLoadTaskList (t *testing.T) {\n tasklist := LoadTaskList(\"todo.txt\")\n assert.Equal(t, tasklist.Len(), 8, \"Something went wrong with LoadTaskList\")\n}\n\nfunc TestLoadTaskListNonExistent (t *testing.T) {\n defer func(){\n if r:=recover(); r!=nil {\n \/\/ recovered\n } else {\n t.Errorf(\"Something went seriously wrong\")\n }\n }()\n tasklist := LoadTaskList(\"nonexistent-file.txt\")\n\n t.Errorf(\"Something is still wrong %v\", tasklist)\n}\n\nfunc TestCreateTask (t *testing.T) {\n task := CreateTask(1, \"(A) +funny task with prioity and project\")\n\n assert.Equal(t, task.id, 1, \"id should be 1\")\n assert.Equal(t, rune(task.priority), rune('A'), \"priority should be A\")\n\n projects := make([]string, 1)\n projects[0] = \"+funny\"\n\n assert.Equal(t, task.projects, projects, \"there should be a project for sure\")\n assert.Equal(t, task.todo, \"+funny task with prioity and project\", \"todo should equal\")\n\n}\n<commit_msg>added finished task test<commit_after>package todotxt\n\nimport (\n \"testing\"\n \"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestLoadTaskList (t *testing.T) {\n tasklist := LoadTaskList(\"todo.txt\")\n assert.Equal(t, tasklist.Len(), 8, \"Something went wrong with LoadTaskList\")\n}\n\nfunc TestLoadTaskListNonExistent (t *testing.T) {\n defer func(){\n if r:=recover(); r!=nil {\n \/\/ recovered\n } else {\n t.Errorf(\"Something went seriously wrong\")\n }\n }()\n tasklist := LoadTaskList(\"nonexistent-file.txt\")\n\n t.Errorf(\"Something is still wrong %v\", tasklist)\n}\n\nfunc TestCreateTask (t *testing.T) {\n task := CreateTask(1, \"(A) +funny task with prioity and project\")\n\n assert.Equal(t, task.id, 1, \"id should be 1\")\n assert.Equal(t, rune(task.priority), rune('A'), \"priority should be A\")\n\n projects := make([]string, 1)\n projects[0] = \"+funny\"\n\n assert.Equal(t, task.projects, projects, \"there should be a project for sure\")\n assert.Equal(t, task.todo, \"+funny task with prioity and project\", \"todo should equal\")\n\n\n finished_task := CreateTask(1, \"x This is a finished task\")\n\n assert.Equal(t, finished_task.id, 1)\n assert.Equal(t, finished_task.todo, \"This is a finished task\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport \"strings\"\n\nfunc initRString() *RObject {\n\tobj := &RObject{}\n\tobj.name = \"RString\"\n\tobj.ivars = make(map[string]Object)\n\tobj.class = nil\n\tobj.methods = make(map[string]*RMethod)\n\n\t\/\/ RString method initialization\n\tobj.methods[\"new\"] = &RMethod{gofunc: RString_new}\n\tobj.methods[\"to_s\"] = &RMethod{gofunc: RString_to_s}\n\tobj.methods[\"inspect\"] = &RMethod{gofunc: RString_inspect}\n\tobj.methods[\"size\"] = &RMethod{gofunc: RString_length}\n\tobj.methods[\"len\"] = &RMethod{gofunc: RString_length}\n\tobj.methods[\"split\"] = &RMethod{gofunc: RString_split}\n\n\treturn obj\n}\n\n\/\/ String.new(str='')\n\/\/ v = [string]\nfunc RString_new(vm *GobiesVM, receiver Object, v []Object) Object {\n\tstr := \"\"\n\tif len(v) == 1 {\n\t\tstr = v[0].(string)\n\t}\n\n\tobj := &RObject{}\n\tobj.class = vm.consts[\"RString\"]\n\tobj.val.str = str\n\n\treturn obj\n}\n\nfunc RString_to_s(vm *GobiesVM, receiver Object, v []Object) Object {\n\tobj := receiver.(*RObject)\n\treturn obj.val.str\n}\n\nfunc RString_inspect(vm *GobiesVM, receiver Object, v []Object) Object {\n\tobj := receiver.(*RObject)\n\tstr := obj.val.str\n\tarray := []string{\"'\", str, \"'\"}\n\treturn strings.Join(array, \"\")\n}\n\nfunc RString_length(vm *GobiesVM, receiver Object, v []Object) Object {\n\tobj := receiver.(*RObject)\n\targ := make([]Object, 1, 1)\n\targ[0] = int64(len(obj.val.str))\n\treturn RFixnum_new(vm, receiver, arg)\n}\n\nfunc RString_split(vm *GobiesVM, receiver Object, v []Object) Object {\n\tobj := receiver.(*RObject)\n\tsep := v[0].(*RObject).val.str\n\n\t\/\/ Manually escape linebreak if any\n\tsep = strings.Replace(sep, \"\\\\n\", \"\\n\", -1)\n\n\tstrList := strings.Split(obj.val.str, sep)\n\targ := make([]Object, len(strList), len(strList))\n\tfor i, v := range strList {\n\t\tdummy_arg := make([]Object, 1, 1)\n\t\tdummy_arg[0] = v\n\t\targ[i] = RString_new(vm, nil, dummy_arg)\n\t}\n\treturn RArray_new(vm, nil, arg)\n}\n<commit_msg>[RString] modify split() behavior to fit MRI stdlib<commit_after>package main\n\nimport \"strings\"\n\nfunc initRString() *RObject {\n\tobj := &RObject{}\n\tobj.name = \"RString\"\n\tobj.ivars = make(map[string]Object)\n\tobj.class = nil\n\tobj.methods = make(map[string]*RMethod)\n\n\t\/\/ RString method initialization\n\tobj.methods[\"new\"] = &RMethod{gofunc: RString_new}\n\tobj.methods[\"to_s\"] = &RMethod{gofunc: RString_to_s}\n\tobj.methods[\"inspect\"] = &RMethod{gofunc: RString_inspect}\n\tobj.methods[\"size\"] = &RMethod{gofunc: RString_length}\n\tobj.methods[\"len\"] = &RMethod{gofunc: RString_length}\n\tobj.methods[\"split\"] = &RMethod{gofunc: RString_split}\n\n\treturn obj\n}\n\n\/\/ String.new(str='')\n\/\/ v = [string]\nfunc RString_new(vm *GobiesVM, receiver Object, v []Object) Object {\n\tstr := \"\"\n\tif len(v) == 1 {\n\t\tstr = v[0].(string)\n\t}\n\n\tobj := &RObject{}\n\tobj.class = vm.consts[\"RString\"]\n\tobj.val.str = str\n\n\treturn obj\n}\n\nfunc RString_to_s(vm *GobiesVM, receiver Object, v []Object) Object {\n\tobj := receiver.(*RObject)\n\treturn obj.val.str\n}\n\nfunc RString_inspect(vm *GobiesVM, receiver Object, v []Object) Object {\n\tobj := receiver.(*RObject)\n\tstr := obj.val.str\n\tarray := []string{\"'\", str, \"'\"}\n\treturn strings.Join(array, \"\")\n}\n\nfunc RString_length(vm *GobiesVM, receiver Object, v []Object) Object {\n\tobj := receiver.(*RObject)\n\targ := make([]Object, 1, 1)\n\targ[0] = int64(len(obj.val.str))\n\treturn RFixnum_new(vm, receiver, arg)\n}\n\nfunc RString_split(vm *GobiesVM, receiver Object, v []Object) Object {\n\tobj := receiver.(*RObject)\n\tsep := v[0].(*RObject).val.str\n\tstr := obj.val.str\n\n\t\/\/ Manually unescape linebreak if any\n\tsep = strings.Replace(sep, \"\\\\n\", \"\\n\", -1)\n\n\tstrList := []string{str}\n\n\t\/\/ Split string individually for each separator\n\tfor _, pass := range sep {\n\t\toldStrList := []string{}\n\n\t\tfor _, item := range strList {\n\t\t\toldStrList = append(oldStrList, item)\n\t\t}\n\t\tstrList = []string{}\n\t\tfor _, item := range oldStrList {\n\t\t\ttempStrList := strings.Split(item, string(pass))\n\t\t\tfor _, splitted := range tempStrList {\n\t\t\t\tif len(splitted) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tstrList = append(strList, splitted)\n\t\t\t}\n\t\t}\n\t}\n\n\targ := make([]Object, len(strList), len(strList))\n\tfor i, v := range strList {\n\t\tdummy_arg := make([]Object, 1, 1)\n\t\tdummy_arg[0] = v\n\t\targ[i] = RString_new(vm, nil, dummy_arg)\n\t}\n\treturn RArray_new(vm, nil, arg)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nfunc initRThread() *RObject {\n\tobj := &RObject{}\n\tobj.name = \"RThread\"\n\tobj.ivars = make(map[string]Object)\n\tobj.class = nil\n\tobj.methods = make(map[string]*RMethod)\n\n\t\/\/ RThread method initialization\n\tobj.methods[\"new\"] = &RMethod{gofunc: RThread_new}\n\n\treturn obj\n}\n\nfunc RThread_new(vm *GobiesVM, env *ThreadEnv, receiver Object, v []Object) Object {\n\tvm.transactionEnd(env)\n\n\twg.Add(1)\n\tgo vm.executeThread(v[0].(*RObject).methods[\"def\"].def)\n\n\tvm.transactionBegin(env, []Instruction{})\n\n\treturn nil\n}\n<commit_msg>Thread.new doesn't need to care about transaction<commit_after>package main\n\nfunc initRThread() *RObject {\n\tobj := &RObject{}\n\tobj.name = \"RThread\"\n\tobj.ivars = make(map[string]Object)\n\tobj.class = nil\n\tobj.methods = make(map[string]*RMethod)\n\n\t\/\/ RThread method initialization\n\tobj.methods[\"new\"] = &RMethod{gofunc: RThread_new}\n\n\treturn obj\n}\n\nfunc RThread_new(vm *GobiesVM, env *ThreadEnv, receiver Object, v []Object) Object {\n\twg.Add(1)\n\tgo vm.executeThread(v[0].(*RObject).methods[\"def\"].def, env)\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 The GoGo Authors. All rights reserved.\n\/\/ Use of this source code is governed by the MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\n\/\/\n\/\/ Imports\n\/\/ TODO: Get rid of all non libgogo ones.\n\/\/\nimport \"fmt\"\nimport \".\/libgogo\/_obj\/libgogo\"\n\n\/\/\n\/\/ Set of recognized tokens \n\/\/\nconst TOKEN_IDENTIFIER uint64 = 1; \/\/ Identifier\nconst TOKEN_STRING = 2; \/\/ String using \"...\"\nconst TOKEN_EOS = 3; \/\/ End of Scan\nconst TOKEN_LBRAC = 4; \/\/ Left bracket '('\nconst TOKEN_RBRAC = 5; \/\/ Right bracket ')'\nconst TOKEN_LSBRAC = 6; \/\/ Left square bracket '['\nconst TOKEN_RSBRAC = 7; \/\/ Right square bracket ']'\nconst TOKEN_INTEGER = 8; \/\/ Integer number\nconst TOKEN_LCBRAC = 9; \/\/ Left curly bracket '{'\nconst TOKEN_RCBRAC = 10; \/\/ Right curly bracket '}'\nconst TOKEN_PT = 11; \/\/ Point '.'\nconst TOKEN_NOT = 12; \/\/ Single not '!'\nconst TOKEN_NOTEQUAL = 13; \/\/ Comparison, not equal '!='\nconst TOKEN_SEMICOLON = 14; \/\/ Semi-colon ';'\nconst TOKEN_COLON = 15; \/\/ Colon ','\nconst TOKEN_ASSIGN = 16; \/\/ Assignment '='\nconst TOKEN_EQUALS = 17; \/\/ Equal comparison '=='\nconst TOKEN_CHAR = 18; \/\/ Single Quoted Character\nconst TOKEN_REL_AND = 19; \/\/ AND Relation\nconst TOKEN_REL_OR = 20; \/\/ OR Relation\nconst TOKEN_REL_GTOE = 21; \/\/ Greather-Than or Equal\nconst TOKEN_REL_GT = 22; \/\/ Greather-Than\nconst TOKEN_REL_LTOE = 23; \/\/ Less-Than or Equal\nconst TOKEN_REL_LT = 24; \/\/ Less-Than\nconst TOKEN_ARITH_PLUS = 25; \/\/ Arith. Plus\nconst TOKEN_ARITH_MINUS = 26; \/\/ Arith. Minus\n\n\/\/ Advanced tokens, that are generated in the 2nd step from identifiers\nconst TOKEN_FOR = 27;\nconst TOKEN_IF = 28;\n\n\/\/\n\/\/ Token struct holding the relevant data of a parsed token.\n\/\/\ntype Token struct {\n id uint64; \/\/ The id. Is one of TOKEN_*\n value [255]byte; \/\/ If the id requires a value to be stored, it is found here \n value_len uint64; \/\/ Length of the value stored in `value`\n intValue uint64;\n\n nextChar byte; \/\/ Sometime the next char is already read. It is stored here to be re-assigned in the next GetNextToken() round\n};\n\n\/*\n * Function getting the next raw token. May contain token that must be converted\n * before the parser can work with them.\n *\/\nfunc GetNextTokenRaw(fd uint64, oldToken Token) Token {\n var singleChar byte; \/\/ Byte holding the last read value\n \/* \n * Flag indicating whether we are in a comment.\n * 0 for no comment\n * 1 for a single line comment \n * 2 for a multi line comment\n *\/\n var inComment uint64;\n var done uint64; \/\/ Flag indicating whether a cycle (Token) is finsihed \n var spaceDone uint64; \/\/ Flag indicating whether an abolishment cycle is finished \n var newToken Token; \/\/ The new token that is returned\n\n \/\/ Initialize variables\n done = 0;\n spaceDone = 0;\n newToken.id = 0;\n newToken.value_len = 0;\n newToken.nextChar = 0; \n inComment = 0; \n\n \/\/ If the old Token had to read the next char (and stored it), we can now\n \/\/ get it back\n if oldToken.nextChar == 0 { \n singleChar=libgogo.GetChar(fd)\n } else {\n singleChar = oldToken.nextChar;\n }\n\n \/\/ check if it is a valid read, or an EOF\n if singleChar == 0 {\n newToken.id = TOKEN_EOS;\n done = 1;\n spaceDone = 1;\n }\n\n \/\/\n \/\/ Cleaning Tasks\n \/\/ The next part strips out spaces, newlines, tabs, and comments\n \/\/ Comments can either be single line with double slashes (\/\/) or multiline\n \/\/ using C++ syntax \/* *\/ \n \/\/\n for ; spaceDone != 1; {\n\n \/\/ check whether a comment is starting\n if singleChar == '\/' {\n \/\/ if we are in a comment skip the rest, get the next char otherwise\n if inComment == 0 {\n singleChar = libgogo.GetChar(fd); \n if singleChar == '\/' {\n \/\/ we are in a single line comment (until newline is found)\n inComment = 1;\n } else {\n if singleChar == '*' {\n \/\/ we are in a multiline comment (until ending is found)\n inComment = 2;\n } else {\n tmp_error(\">> Scanner: Unkown character combination for comments. Exiting.\");\n }\n }\n }\n } \n\n \/\/ check whether a multi-line comment is ending\n if singleChar == '*' {\n singleChar = libgogo.GetChar(fd);\n if singleChar == '\/' {\n if inComment == 2 {\n inComment = 0;\n singleChar = libgogo.GetChar(fd);\n }\n }\n }\n\n \/\/ if character is a newline:\n \/\/ *) if in a singleline comment, exit the comment\n \/\/ *) skip otherwise\n if singleChar == 10 {\n if inComment == 1 {\n inComment = 0;\n } \n } \n\n \/\/ handle everything that is not a space,tab,newline\n if singleChar != ' ' && singleChar != 9 && singleChar != 10 {\n \/\/ if not in a comment we have our current valid char\n if inComment == 0 {\n spaceDone = 1;\n } \n\n \/\/ check if GetChar() returned EOF while skipping\n if singleChar == 0 {\n newToken.id = TOKEN_EOS;\n spaceDone = 1;\n done = 1;\n } \n }\n \n \n \/\/ if we are not done until now, get a new character and start another abolishing cycle \n if spaceDone == 0 { \n singleChar=libgogo.GetChar(fd);\n }\n }\n\n \/\/\n \/\/ Actual scanning part starts here\n \/\/\n\n \/\/ Catch identifiers\n \/\/ identifier = letter { letter | digit }.\n if (done != 1) && (singleChar >= 'A' && singleChar <= 'Z') || (singleChar >= 'a' && singleChar <= 'z') || singleChar == '_' { \/\/ check for letter or _\n newToken.id = TOKEN_IDENTIFIER;\n \/\/ preceding characters may be letter,_, or a number\n for ; (singleChar >= 'A' && singleChar <= 'Z') || (singleChar >= 'a' && singleChar <= 'z') || singleChar == '_' || (singleChar >= '0' && singleChar <= '9'); singleChar = libgogo.GetChar(fd) {\n newToken.value[newToken.value_len] = singleChar;\n newToken.value_len = newToken.value_len +1;\n }\n newToken.value[newToken.value_len] = 0;\n \/\/ save the last read character for the next GetNextToken() cycle\n newToken.nextChar = singleChar;\n done = 1;\n }\n\n \/\/ string \"...\"\n if (done != 1) && singleChar == '\"' {\n newToken.id = TOKEN_STRING; \n for singleChar = libgogo.GetChar(fd); singleChar != '\"' &&singleChar > 31 && singleChar < 127;singleChar = libgogo.GetChar(fd) {\n newToken.value[newToken.value_len] = singleChar;\n newToken.value_len = newToken.value_len +1;\n }\n newToken.value[newToken.value_len] = 0;\n if singleChar != '\"' {\n tmp_error(\">> Scanner: String not closing. Exiting.\");\n }\n done = 1;\n }\n\n \/\/ Single Quoted Character\n \/\/ Needs to be converted later on\n if (done != 1) && singleChar == 39 {\n newToken.id = TOKEN_CHAR;\n for singleChar = libgogo.GetChar(fd); singleChar != 39 && singleChar > 31 && singleChar < 127;singleChar = libgogo.GetChar(fd) {\n newToken.value[newToken.value_len] = singleChar;\n newToken.value_len = newToken.value_len +1;\n } \n newToken.value[newToken.value_len] = 0;\n if singleChar != 39 {\n tmp_error(\">> Scanner: Character not closing. Exiting.\");\n }\n done = 1;\n }\n\n \/\/ left brace (\n if (done != 1) && singleChar == '(' {\n newToken.id = TOKEN_LBRAC;\n done = 1;\n }\n\n \/\/ right brace )\n if (done != 1) && singleChar == ')' {\n newToken.id = TOKEN_RBRAC;\n done = 1;\n }\n\n \/\/ left square bracket [\n if (done != 1) && singleChar == '[' {\n newToken.id = TOKEN_LSBRAC;\n done = 1; \n }\n \n \/\/ right square bracket ]\n if (done != 1) && singleChar == ']' {\n newToken.id = TOKEN_RSBRAC;\n done = 1;\n }\n\n \/\/ integer\n if (done != 1) && singleChar > 47 && singleChar < 58 {\n newToken.id = TOKEN_INTEGER;\n for ; singleChar > 47 && singleChar < 58 ; singleChar = libgogo.GetChar(fd) {\n newToken.value[newToken.value_len] = singleChar;\n newToken.value_len = newToken.value_len +1;\n }\n newToken.value[newToken.value_len] = 0\n newToken.nextChar = singleChar; \n done = 1;\n }\n\n \/\/ Left curly bracket '{'\n if (done != 1) && singleChar == '{' {\n newToken.id = TOKEN_LCBRAC;\n done = 1;\n }\n \n \/\/ Right curly bracket '}'\n if (done != 1) && singleChar == '}' {\n newToken.id = TOKEN_RCBRAC;\n done = 1;\n }\n\n \/\/ Point '.'\n if (done != 1) && singleChar == '.' {\n newToken.id = TOKEN_PT;\n done = 1;\n }\n\n \/\/ Not ('!') or Not Equal ('!=')\n if (done != 1) && singleChar == '!' {\n singleChar = libgogo.GetChar(fd);\n if singleChar == '=' {\n newToken.id = TOKEN_NOTEQUAL;\n } else {\n newToken.id = TOKEN_NOT;\n newToken.nextChar = singleChar;\n }\n done = 1;\n }\n\n \/\/ Semicolon ';'\n if (done != 1) && singleChar == ';' {\n newToken.id = TOKEN_SEMICOLON;\n done = 1;\n }\n\n \/\/ Colon ','\n if (done != 1) && singleChar == ',' {\n newToken.id = TOKEN_COLON;\n done = 1;\n }\n\n \/\/ Assignment '=' or Equals comparison '=='\n if (done != 1) && singleChar == '=' {\n singleChar = libgogo.GetChar(fd);\n if singleChar == '=' {\n newToken.id = TOKEN_EQUALS;\n } else {\n newToken.id = TOKEN_ASSIGN;\n newToken.nextChar = singleChar;\n }\n done = 1;\n }\n\n \/\/ AND Relation '&&'\n if (done != 1) && singleChar == '&' {\n singleChar = libgogo.GetChar(fd);\n if singleChar == '&' {\n newToken.id = TOKEN_REL_AND;\n } else {\n tmp_error(\">> Scanner: No address operator supported.\");\n }\n done = 1;\n }\n\n \/\/ OR Relation '||'\n if (done != 1) && singleChar == '|' {\n singleChar = libgogo.GetChar(fd);\n if singleChar == '|' {\n newToken.id = TOKEN_REL_OR;\n } else {\n tmp_error(\">> Scanner: No binary OR (|) supported. Only ||.\");\n }\n done = 1;\n } \n\n \/\/ Greater and Greater-Than relation\n if (done != 1) && singleChar == '>' {\n singleChar = libgogo.GetChar(fd);\n if singleChar == '=' {\n newToken.id = TOKEN_REL_GTOE;\n } else {\n newToken.id = TOKEN_REL_GT;\n newToken.nextChar = singleChar;\n } \n done = 1;\n } \n\n \/\/ Less and Less-Than relation\n if (done != 1) && singleChar == '<' {\n singleChar = libgogo.GetChar(fd);\n if singleChar == '=' {\n newToken.id = TOKEN_REL_LTOE;\n } else {\n newToken.id = TOKEN_REL_LT;\n newToken.nextChar = singleChar;\n } \n done = 1;\n } \n\n if (done != 1) && singleChar == '+' {\n newToken.id = TOKEN_ARITH_PLUS;\n done = 1;\n }\n\n if (done != 1) && singleChar == '-' {\n newToken.id = TOKEN_ARITH_MINUS;\n done = 1;\n }\n\n if done != 1 {\n fmt.Printf(\"'%c'\\n\",singleChar);\n tmp_error(\">> Scanner: Unkown char detected. Exiting\");\n }\n\n return newToken;\n}\n\nfunc GetNextToken(fd uint64, oldToken Token) Token {\n var newToken Token;\n newToken = GetNextTokenRaw(fd,oldToken)\n \n \/\/ Convert integer from byte array to integer value\n if newToken.id == TOKEN_INTEGER {\n newToken.intValue = ByteBufToInt(newToken.value,newToken.value_len);\n }\n\n \/\/ Convert single quoted characters to integer\n if newToken.id == TOKEN_CHAR {\n if newToken.value_len != 1 {\n tmp_error (\">> Scanner: Only single characters are supported!\");\n } else {\n newToken.id = TOKEN_INTEGER;\n newToken.intValue = ToIntFromByte(newToken.value[0]);\n }\n }\n\n \/\/ Convert identifier to keyworded tokens (if possible)\n \/\/ <TODO>\n\n return newToken;\n}\n\n\/\/ Move to libgogo?\nfunc ToIntFromByte(b byte) uint64 {\n return uint64(b);\n}\n\n\/\/ Move to libgogo?\nfunc ByteBufToInt(byteBuf [255]byte, bufLen uint64) uint64 {\n var m1 uint64;\n var i uint64; \n var val uint64;\n \n val = 0;\n\n for i = 0; bufLen > 0 ; bufLen = bufLen -1 {\n m1 = pow(10,bufLen);\n val = val + mul( m1, uint64(byteBuf[i]) - 48 );\n i = i +1;\n }\n\n return val;\n}\n\n\/\/ Move to libgogo?\nfunc mul(m1 uint64, m2 uint64) uint64 {\n var val uint64;\n for val = 0 ; m2 > 0 ; m2 = m2 -1 {\n val = val + m1;\n }\n return val;\n}\n\n\/\/ Move to libgogo?\nfunc pow(base uint64, exponent uint64) uint64 {\n var val uint64;\n for val = 1; exponent > 1 ; exponent = exponent -1 {\n val = mul(val,base);\n }\n return val;\n}\n\n\/\/ Move to libgogo?\nfunc charToIntToken(oldToken Token) Token {\n return oldToken;\n}\n\n\/\/ Move something like this to libgogo?\nfunc tmp_print(tok Token) {\n var i int;\n fmt.Printf(\"Token Id: %d\\n\",tok.id);\n if tok.id == TOKEN_IDENTIFIER || tok.id == TOKEN_STRING {\n fmt.Printf(\"Identifier\/String value: \");\n for i=0;tok.value[i] != 0;i=i+1 {\n fmt.Printf(\"%c\",tok.value[i]);\n }\n fmt.Printf(\"\\n\");\n }\n}\n\n\/\/ Move something like this to libgogo?\nfunc tmp_error ( s string) {\n fmt.Printf(\"%s\\n\",s);\n libgogo.Exit(1);\n}\n\n\/\/ Temporary test function\nfunc scanner_test(fd uint64) { \n var tok Token;\n tok.id = 0;\n tok.nextChar = 0;\n\n for tok = GetNextToken(fd,tok); tok.id != TOKEN_EOS; tok = GetNextToken(fd,tok) {\n tmp_print(tok);\n }\n}\n<commit_msg>scanner.go: Output of integer token values and more efficient conversion of strings to the latter (no more need for additional mul and pow routines)<commit_after>\/\/ Copyright 2010 The GoGo Authors. All rights reserved.\n\/\/ Use of this source code is governed by the MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\n\/\/\n\/\/ Imports\n\/\/ TODO: Get rid of all non libgogo ones.\n\/\/\nimport \"fmt\"\nimport \".\/libgogo\/_obj\/libgogo\"\n\n\/\/\n\/\/ Set of recognized tokens \n\/\/\nconst TOKEN_IDENTIFIER uint64 = 1; \/\/ Identifier\nconst TOKEN_STRING = 2; \/\/ String using \"...\"\nconst TOKEN_EOS = 3; \/\/ End of Scan\nconst TOKEN_LBRAC = 4; \/\/ Left bracket '('\nconst TOKEN_RBRAC = 5; \/\/ Right bracket ')'\nconst TOKEN_LSBRAC = 6; \/\/ Left square bracket '['\nconst TOKEN_RSBRAC = 7; \/\/ Right square bracket ']'\nconst TOKEN_INTEGER = 8; \/\/ Integer number\nconst TOKEN_LCBRAC = 9; \/\/ Left curly bracket '{'\nconst TOKEN_RCBRAC = 10; \/\/ Right curly bracket '}'\nconst TOKEN_PT = 11; \/\/ Point '.'\nconst TOKEN_NOT = 12; \/\/ Single not '!'\nconst TOKEN_NOTEQUAL = 13; \/\/ Comparison, not equal '!='\nconst TOKEN_SEMICOLON = 14; \/\/ Semi-colon ';'\nconst TOKEN_COLON = 15; \/\/ Colon ','\nconst TOKEN_ASSIGN = 16; \/\/ Assignment '='\nconst TOKEN_EQUALS = 17; \/\/ Equal comparison '=='\nconst TOKEN_CHAR = 18; \/\/ Single Quoted Character\nconst TOKEN_REL_AND = 19; \/\/ AND Relation\nconst TOKEN_REL_OR = 20; \/\/ OR Relation\nconst TOKEN_REL_GTOE = 21; \/\/ Greather-Than or Equal\nconst TOKEN_REL_GT = 22; \/\/ Greather-Than\nconst TOKEN_REL_LTOE = 23; \/\/ Less-Than or Equal\nconst TOKEN_REL_LT = 24; \/\/ Less-Than\nconst TOKEN_ARITH_PLUS = 25; \/\/ Arith. Plus\nconst TOKEN_ARITH_MINUS = 26; \/\/ Arith. Minus\n\n\/\/ Advanced tokens, that are generated in the 2nd step from identifiers\nconst TOKEN_FOR = 27;\nconst TOKEN_IF = 28;\n\n\/\/\n\/\/ Token struct holding the relevant data of a parsed token.\n\/\/\ntype Token struct {\n id uint64; \/\/ The id. Is one of TOKEN_*\n value [255]byte; \/\/ If the id requires a value to be stored, it is found here \n value_len uint64; \/\/ Length of the value stored in `value`\n intValue uint64;\n\n nextChar byte; \/\/ Sometime the next char is already read. It is stored here to be re-assigned in the next GetNextToken() round\n};\n\n\/*\n * Function getting the next raw token. May contain token that must be converted\n * before the parser can work with them.\n *\/\nfunc GetNextTokenRaw(fd uint64, oldToken Token) Token {\n var singleChar byte; \/\/ Byte holding the last read value\n \/* \n * Flag indicating whether we are in a comment.\n * 0 for no comment\n * 1 for a single line comment \n * 2 for a multi line comment\n *\/\n var inComment uint64;\n var done uint64; \/\/ Flag indicating whether a cycle (Token) is finsihed \n var spaceDone uint64; \/\/ Flag indicating whether an abolishment cycle is finished \n var newToken Token; \/\/ The new token that is returned\n\n \/\/ Initialize variables\n done = 0;\n spaceDone = 0;\n newToken.id = 0;\n newToken.value_len = 0;\n newToken.nextChar = 0; \n inComment = 0; \n\n \/\/ If the old Token had to read the next char (and stored it), we can now\n \/\/ get it back\n if oldToken.nextChar == 0 { \n singleChar=libgogo.GetChar(fd)\n } else {\n singleChar = oldToken.nextChar;\n }\n\n \/\/ check if it is a valid read, or an EOF\n if singleChar == 0 {\n newToken.id = TOKEN_EOS;\n done = 1;\n spaceDone = 1;\n }\n\n \/\/\n \/\/ Cleaning Tasks\n \/\/ The next part strips out spaces, newlines, tabs, and comments\n \/\/ Comments can either be single line with double slashes (\/\/) or multiline\n \/\/ using C++ syntax \/* *\/ \n \/\/\n for ; spaceDone != 1; {\n\n \/\/ check whether a comment is starting\n if singleChar == '\/' {\n \/\/ if we are in a comment skip the rest, get the next char otherwise\n if inComment == 0 {\n singleChar = libgogo.GetChar(fd); \n if singleChar == '\/' {\n \/\/ we are in a single line comment (until newline is found)\n inComment = 1;\n } else {\n if singleChar == '*' {\n \/\/ we are in a multiline comment (until ending is found)\n inComment = 2;\n } else {\n tmp_error(\">> Scanner: Unkown character combination for comments. Exiting.\");\n }\n }\n }\n } \n\n \/\/ check whether a multi-line comment is ending\n if singleChar == '*' {\n singleChar = libgogo.GetChar(fd);\n if singleChar == '\/' {\n if inComment == 2 {\n inComment = 0;\n singleChar = libgogo.GetChar(fd);\n }\n }\n }\n\n \/\/ if character is a newline:\n \/\/ *) if in a singleline comment, exit the comment\n \/\/ *) skip otherwise\n if singleChar == 10 {\n if inComment == 1 {\n inComment = 0;\n } \n } \n\n \/\/ handle everything that is not a space,tab,newline\n if singleChar != ' ' && singleChar != 9 && singleChar != 10 {\n \/\/ if not in a comment we have our current valid char\n if inComment == 0 {\n spaceDone = 1;\n } \n\n \/\/ check if GetChar() returned EOF while skipping\n if singleChar == 0 {\n newToken.id = TOKEN_EOS;\n spaceDone = 1;\n done = 1;\n } \n }\n \n \n \/\/ if we are not done until now, get a new character and start another abolishing cycle \n if spaceDone == 0 { \n singleChar=libgogo.GetChar(fd);\n }\n }\n\n \/\/\n \/\/ Actual scanning part starts here\n \/\/\n\n \/\/ Catch identifiers\n \/\/ identifier = letter { letter | digit }.\n if (done != 1) && (singleChar >= 'A' && singleChar <= 'Z') || (singleChar >= 'a' && singleChar <= 'z') || singleChar == '_' { \/\/ check for letter or _\n newToken.id = TOKEN_IDENTIFIER;\n \/\/ preceding characters may be letter,_, or a number\n for ; (singleChar >= 'A' && singleChar <= 'Z') || (singleChar >= 'a' && singleChar <= 'z') || singleChar == '_' || (singleChar >= '0' && singleChar <= '9'); singleChar = libgogo.GetChar(fd) {\n newToken.value[newToken.value_len] = singleChar;\n newToken.value_len = newToken.value_len +1;\n }\n newToken.value[newToken.value_len] = 0;\n \/\/ save the last read character for the next GetNextToken() cycle\n newToken.nextChar = singleChar;\n done = 1;\n }\n\n \/\/ string \"...\"\n if (done != 1) && singleChar == '\"' {\n newToken.id = TOKEN_STRING; \n for singleChar = libgogo.GetChar(fd); singleChar != '\"' &&singleChar > 31 && singleChar < 127;singleChar = libgogo.GetChar(fd) {\n newToken.value[newToken.value_len] = singleChar;\n newToken.value_len = newToken.value_len +1;\n }\n newToken.value[newToken.value_len] = 0;\n if singleChar != '\"' {\n tmp_error(\">> Scanner: String not closing. Exiting.\");\n }\n done = 1;\n }\n\n \/\/ Single Quoted Character\n \/\/ Needs to be converted later on\n if (done != 1) && singleChar == 39 {\n newToken.id = TOKEN_CHAR;\n for singleChar = libgogo.GetChar(fd); singleChar != 39 && singleChar > 31 && singleChar < 127;singleChar = libgogo.GetChar(fd) {\n newToken.value[newToken.value_len] = singleChar;\n newToken.value_len = newToken.value_len +1;\n } \n newToken.value[newToken.value_len] = 0;\n if singleChar != 39 {\n tmp_error(\">> Scanner: Character not closing. Exiting.\");\n }\n done = 1;\n }\n\n \/\/ left brace (\n if (done != 1) && singleChar == '(' {\n newToken.id = TOKEN_LBRAC;\n done = 1;\n }\n\n \/\/ right brace )\n if (done != 1) && singleChar == ')' {\n newToken.id = TOKEN_RBRAC;\n done = 1;\n }\n\n \/\/ left square bracket [\n if (done != 1) && singleChar == '[' {\n newToken.id = TOKEN_LSBRAC;\n done = 1; \n }\n \n \/\/ right square bracket ]\n if (done != 1) && singleChar == ']' {\n newToken.id = TOKEN_RSBRAC;\n done = 1;\n }\n\n \/\/ integer\n if (done != 1) && singleChar > 47 && singleChar < 58 {\n newToken.id = TOKEN_INTEGER;\n for ; singleChar > 47 && singleChar < 58 ; singleChar = libgogo.GetChar(fd) {\n newToken.value[newToken.value_len] = singleChar;\n newToken.value_len = newToken.value_len +1;\n }\n newToken.value[newToken.value_len] = 0\n newToken.nextChar = singleChar; \n done = 1;\n }\n\n \/\/ Left curly bracket '{'\n if (done != 1) && singleChar == '{' {\n newToken.id = TOKEN_LCBRAC;\n done = 1;\n }\n \n \/\/ Right curly bracket '}'\n if (done != 1) && singleChar == '}' {\n newToken.id = TOKEN_RCBRAC;\n done = 1;\n }\n\n \/\/ Point '.'\n if (done != 1) && singleChar == '.' {\n newToken.id = TOKEN_PT;\n done = 1;\n }\n\n \/\/ Not ('!') or Not Equal ('!=')\n if (done != 1) && singleChar == '!' {\n singleChar = libgogo.GetChar(fd);\n if singleChar == '=' {\n newToken.id = TOKEN_NOTEQUAL;\n } else {\n newToken.id = TOKEN_NOT;\n newToken.nextChar = singleChar;\n }\n done = 1;\n }\n\n \/\/ Semicolon ';'\n if (done != 1) && singleChar == ';' {\n newToken.id = TOKEN_SEMICOLON;\n done = 1;\n }\n\n \/\/ Colon ','\n if (done != 1) && singleChar == ',' {\n newToken.id = TOKEN_COLON;\n done = 1;\n }\n\n \/\/ Assignment '=' or Equals comparison '=='\n if (done != 1) && singleChar == '=' {\n singleChar = libgogo.GetChar(fd);\n if singleChar == '=' {\n newToken.id = TOKEN_EQUALS;\n } else {\n newToken.id = TOKEN_ASSIGN;\n newToken.nextChar = singleChar;\n }\n done = 1;\n }\n\n \/\/ AND Relation '&&'\n if (done != 1) && singleChar == '&' {\n singleChar = libgogo.GetChar(fd);\n if singleChar == '&' {\n newToken.id = TOKEN_REL_AND;\n } else {\n tmp_error(\">> Scanner: No address operator supported.\");\n }\n done = 1;\n }\n\n \/\/ OR Relation '||'\n if (done != 1) && singleChar == '|' {\n singleChar = libgogo.GetChar(fd);\n if singleChar == '|' {\n newToken.id = TOKEN_REL_OR;\n } else {\n tmp_error(\">> Scanner: No binary OR (|) supported. Only ||.\");\n }\n done = 1;\n } \n\n \/\/ Greater and Greater-Than relation\n if (done != 1) && singleChar == '>' {\n singleChar = libgogo.GetChar(fd);\n if singleChar == '=' {\n newToken.id = TOKEN_REL_GTOE;\n } else {\n newToken.id = TOKEN_REL_GT;\n newToken.nextChar = singleChar;\n } \n done = 1;\n } \n\n \/\/ Less and Less-Than relation\n if (done != 1) && singleChar == '<' {\n singleChar = libgogo.GetChar(fd);\n if singleChar == '=' {\n newToken.id = TOKEN_REL_LTOE;\n } else {\n newToken.id = TOKEN_REL_LT;\n newToken.nextChar = singleChar;\n } \n done = 1;\n } \n\n if (done != 1) && singleChar == '+' {\n newToken.id = TOKEN_ARITH_PLUS;\n done = 1;\n }\n\n if (done != 1) && singleChar == '-' {\n newToken.id = TOKEN_ARITH_MINUS;\n done = 1;\n }\n\n if done != 1 {\n fmt.Printf(\"'%c'\\n\",singleChar);\n tmp_error(\">> Scanner: Unkown char detected. Exiting\");\n }\n\n return newToken;\n}\n\nfunc GetNextToken(fd uint64, oldToken Token) Token {\n var newToken Token;\n newToken = GetNextTokenRaw(fd,oldToken)\n \n \/\/ Convert integer from byte array to integer value\n if newToken.id == TOKEN_INTEGER {\n newToken.intValue = ByteBufToInt(newToken.value,newToken.value_len);\n }\n\n \/\/ Convert single quoted characters to integer\n if newToken.id == TOKEN_CHAR {\n if newToken.value_len != 1 {\n tmp_error (\">> Scanner: Only single characters are supported!\");\n } else {\n newToken.id = TOKEN_INTEGER;\n newToken.intValue = ToIntFromByte(newToken.value[0]);\n }\n }\n\n \/\/ Convert identifier to keyworded tokens (if possible)\n \/\/ <TODO>\n\n return newToken;\n}\n\n\/\/ Move to libgogo?\nfunc ToIntFromByte(b byte) uint64 {\n return uint64(b);\n}\n\n\/\/ Move to libgogo?\nfunc ByteBufToInt(byteBuf [255]byte, bufLen uint64) uint64 {\n var i uint64; \n var val uint64;\n \n val = 0;\n\n for i = 0; i < bufLen ; i = i +1 {\n val = val * 10;\n val = val + uint64(byteBuf[i]) - 48;\n }\n\n return val;\n}\n\n\/\/ Move to libgogo?\nfunc charToIntToken(oldToken Token) Token {\n return oldToken;\n}\n\n\/\/ Move something like this to libgogo?\nfunc tmp_print(tok Token) {\n var i int;\n fmt.Printf(\"Token Id: %d\\n\",tok.id);\n if tok.id == TOKEN_IDENTIFIER || tok.id == TOKEN_STRING {\n fmt.Printf(\"Identifier\/String value: \");\n for i=0;tok.value[i] != 0;i=i+1 {\n fmt.Printf(\"%c\",tok.value[i]);\n }\n fmt.Printf(\"\\n\");\n } else {\n if tok.id == TOKEN_INTEGER {\n fmt.Printf(\"Integer value: \");\n fmt.Printf(\"%d\", tok.intValue);\n fmt.Printf(\"\\n\");\n }\n }\n}\n\n\/\/ Move something like this to libgogo?\nfunc tmp_error ( s string) {\n fmt.Printf(\"%s\\n\",s);\n libgogo.Exit(1);\n}\n\n\/\/ Temporary test function\nfunc scanner_test(fd uint64) { \n var tok Token;\n tok.id = 0;\n tok.nextChar = 0;\n\n for tok = GetNextToken(fd,tok); tok.id != TOKEN_EOS; tok = GetNextToken(fd,tok) {\n tmp_print(tok);\n }\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Matthew Holt and The Caddy Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage templates\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"text\/template\"\n\n\t\"github.com\/Masterminds\/sprig\/v3\"\n\t\"github.com\/alecthomas\/chroma\/formatters\/html\"\n\t\"github.com\/caddyserver\/caddy\/v2\/modules\/caddyhttp\"\n\t\"github.com\/yuin\/goldmark\"\n\thighlighting \"github.com\/yuin\/goldmark-highlighting\"\n\t\"github.com\/yuin\/goldmark\/extension\"\n\t\"github.com\/yuin\/goldmark\/parser\"\n\tgmhtml \"github.com\/yuin\/goldmark\/renderer\/html\"\n)\n\n\/\/ templateContext is the templateContext with which HTTP templates are executed.\ntype templateContext struct {\n\tRoot http.FileSystem\n\tReq *http.Request\n\tArgs []interface{} \/\/ defined by arguments to .Include\n\tRespHeader tplWrappedHeader\n\n\tconfig *Templates\n}\n\n\/\/ OriginalReq returns the original, unmodified, un-rewritten request as\n\/\/ it originally came in over the wire.\nfunc (c templateContext) OriginalReq() http.Request {\n\tor, _ := c.Req.Context().Value(caddyhttp.OriginalRequestCtxKey).(http.Request)\n\treturn or\n}\n\n\/\/ funcInclude returns the contents of filename relative to the site root.\n\/\/ Note that included files are NOT escaped, so you should only include\n\/\/ trusted files. If it is not trusted, be sure to use escaping functions\n\/\/ in your template.\nfunc (c templateContext) funcInclude(filename string, args ...interface{}) (string, error) {\n\tif c.Root == nil {\n\t\treturn \"\", fmt.Errorf(\"root file system not specified\")\n\t}\n\n\tfile, err := c.Root.Open(filename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer file.Close()\n\n\tbodyBuf := bufPool.Get().(*bytes.Buffer)\n\tbodyBuf.Reset()\n\tdefer bufPool.Put(bodyBuf)\n\n\t_, err = io.Copy(bodyBuf, file)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tc.Args = args\n\n\terr = c.executeTemplateInBuffer(filename, bodyBuf)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn bodyBuf.String(), nil\n}\n\n\/\/ funcHTTPInclude returns the body of a virtual (lightweight) request\n\/\/ to the given URI on the same server. Note that included bodies\n\/\/ are NOT escaped, so you should only include trusted resources.\n\/\/ If it is not trusted, be sure to use escaping functions yourself.\nfunc (c templateContext) funcHTTPInclude(uri string) (string, error) {\n\t\/\/ prevent virtual request loops by counting how many levels\n\t\/\/ deep we are; and if we get too deep, return an error\n\trecursionCount := 1\n\tif numStr := c.Req.Header.Get(recursionPreventionHeader); numStr != \"\" {\n\t\tnum, err := strconv.Atoi(numStr)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"parsing %s: %v\", recursionPreventionHeader, err)\n\t\t}\n\t\tif num >= 3 {\n\t\t\treturn \"\", fmt.Errorf(\"virtual request cycle\")\n\t\t}\n\t\trecursionCount = num + 1\n\t}\n\n\tbuf := bufPool.Get().(*bytes.Buffer)\n\tbuf.Reset()\n\tdefer bufPool.Put(buf)\n\n\tvirtReq, err := http.NewRequest(\"GET\", uri, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvirtReq.Host = c.Req.Host\n\tvirtReq.Header = c.Req.Header.Clone()\n\tvirtReq.Trailer = c.Req.Trailer.Clone()\n\tvirtReq.Header.Set(recursionPreventionHeader, strconv.Itoa(recursionCount))\n\n\tvrw := &virtualResponseWriter{body: buf, header: make(http.Header)}\n\tserver := c.Req.Context().Value(caddyhttp.ServerCtxKey).(http.Handler)\n\n\tserver.ServeHTTP(vrw, virtReq)\n\tif vrw.status >= 400 {\n\t\treturn \"\", fmt.Errorf(\"http %d\", vrw.status)\n\t}\n\n\terr = c.executeTemplateInBuffer(uri, buf)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn buf.String(), nil\n}\n\nfunc (c templateContext) executeTemplateInBuffer(tplName string, buf *bytes.Buffer) error {\n\ttpl := template.New(tplName)\n\tif len(c.config.Delimiters) == 2 {\n\t\ttpl.Delims(c.config.Delimiters[0], c.config.Delimiters[1])\n\t}\n\n\ttpl.Funcs(sprigFuncMap)\n\n\ttpl.Funcs(template.FuncMap{\n\t\t\"include\": c.funcInclude,\n\t\t\"httpInclude\": c.funcHTTPInclude,\n\t\t\"stripHTML\": c.funcStripHTML,\n\t\t\"markdown\": c.funcMarkdown,\n\t\t\"splitFrontMatter\": c.funcSplitFrontMatter,\n\t\t\"listFiles\": c.funcListFiles,\n\t})\n\n\tparsedTpl, err := tpl.Parse(buf.String())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbuf.Reset() \/\/ reuse buffer for output\n\n\treturn parsedTpl.Execute(buf, c)\n}\n\n\/\/ Cookie gets the value of a cookie with name name.\nfunc (c templateContext) Cookie(name string) string {\n\tcookies := c.Req.Cookies()\n\tfor _, cookie := range cookies {\n\t\tif cookie.Name == name {\n\t\t\treturn cookie.Value\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ RemoteIP gets the IP address of the client making the request.\nfunc (c templateContext) RemoteIP() string {\n\tip, _, err := net.SplitHostPort(c.Req.RemoteAddr)\n\tif err != nil {\n\t\treturn c.Req.RemoteAddr\n\t}\n\treturn ip\n}\n\n\/\/ Host returns the hostname portion of the Host header\n\/\/ from the HTTP request.\nfunc (c templateContext) Host() (string, error) {\n\thost, _, err := net.SplitHostPort(c.Req.Host)\n\tif err != nil {\n\t\tif !strings.Contains(c.Req.Host, \":\") {\n\t\t\t\/\/ common with sites served on the default port 80\n\t\t\treturn c.Req.Host, nil\n\t\t}\n\t\treturn \"\", err\n\t}\n\treturn host, nil\n}\n\n\/\/ funcStripHTML returns s without HTML tags. It is fairly naive\n\/\/ but works with most valid HTML inputs.\nfunc (c templateContext) funcStripHTML(s string) string {\n\tvar buf bytes.Buffer\n\tvar inTag, inQuotes bool\n\tvar tagStart int\n\tfor i, ch := range s {\n\t\tif inTag {\n\t\t\tif ch == '>' && !inQuotes {\n\t\t\t\tinTag = false\n\t\t\t} else if ch == '<' && !inQuotes {\n\t\t\t\t\/\/ false start\n\t\t\t\tbuf.WriteString(s[tagStart:i])\n\t\t\t\ttagStart = i\n\t\t\t} else if ch == '\"' {\n\t\t\t\tinQuotes = !inQuotes\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif ch == '<' {\n\t\t\tinTag = true\n\t\t\ttagStart = i\n\t\t\tcontinue\n\t\t}\n\t\tbuf.WriteRune(ch)\n\t}\n\tif inTag {\n\t\t\/\/ false start\n\t\tbuf.WriteString(s[tagStart:])\n\t}\n\treturn buf.String()\n}\n\n\/\/ funcMarkdown renders the markdown body as HTML. The resulting\n\/\/ HTML is NOT escaped so that it can be rendered as HTML.\nfunc (c templateContext) funcMarkdown(input interface{}) (string, error) {\n\tinputStr := toString(input)\n\n\tmd := goldmark.New(\n\t\tgoldmark.WithExtensions(\n\t\t\textension.GFM,\n\t\t\textension.Footnote,\n\t\t\thighlighting.NewHighlighting(\n\t\t\t\thighlighting.WithFormatOptions(\n\t\t\t\t\thtml.WithClasses(true),\n\t\t\t\t),\n\t\t\t),\n\t\t),\n\t\tgoldmark.WithParserOptions(\n\t\t\tparser.WithAutoHeadingID(),\n\t\t),\n\t\tgoldmark.WithRendererOptions(\n\t\t\tgmhtml.WithHardWraps(),\n\t\t\tgmhtml.WithUnsafe(), \/\/ TODO: this is not awesome, maybe should be configurable?\n\t\t),\n\t)\n\n\tbuf := bufPool.Get().(*bytes.Buffer)\n\tbuf.Reset()\n\tdefer bufPool.Put(buf)\n\n\tmd.Convert([]byte(inputStr), buf)\n\n\treturn buf.String(), nil\n}\n\n\/\/ splitFrontMatter parses front matter out from the beginning of input,\n\/\/ and returns the separated key-value pairs and the body\/content. input\n\/\/ must be a \"stringy\" value.\nfunc (c templateContext) funcSplitFrontMatter(input interface{}) (parsedMarkdownDoc, error) {\n\tmeta, body, err := extractFrontMatter(toString(input))\n\tif err != nil {\n\t\treturn parsedMarkdownDoc{}, err\n\t}\n\treturn parsedMarkdownDoc{Meta: meta, Body: body}, nil\n}\n\n\/\/ funcListFiles reads and returns a slice of names from the given\n\/\/ directory relative to the root of c.\nfunc (c templateContext) funcListFiles(name string) ([]string, error) {\n\tif c.Root == nil {\n\t\treturn nil, fmt.Errorf(\"root file system not specified\")\n\t}\n\n\tdir, err := c.Root.Open(path.Clean(name))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer dir.Close()\n\n\tstat, err := dir.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !stat.IsDir() {\n\t\treturn nil, fmt.Errorf(\"%v is not a directory\", name)\n\t}\n\n\tdirInfo, err := dir.Readdir(0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnames := make([]string, len(dirInfo))\n\tfor i, fileInfo := range dirInfo {\n\t\tnames[i] = fileInfo.Name()\n\t}\n\n\treturn names, nil\n}\n\n\/\/ tplWrappedHeader wraps niladic functions so that they\n\/\/ can be used in templates. (Template functions must\n\/\/ return a value.)\ntype tplWrappedHeader struct{ http.Header }\n\n\/\/ Add adds a header field value, appending val to\n\/\/ existing values for that field. It returns an\n\/\/ empty string.\nfunc (h tplWrappedHeader) Add(field, val string) string {\n\th.Header.Add(field, val)\n\treturn \"\"\n}\n\n\/\/ Set sets a header field value, overwriting any\n\/\/ other values for that field. It returns an\n\/\/ empty string.\nfunc (h tplWrappedHeader) Set(field, val string) string {\n\th.Header.Set(field, val)\n\treturn \"\"\n}\n\n\/\/ Del deletes a header field. It returns an empty string.\nfunc (h tplWrappedHeader) Del(field string) string {\n\th.Header.Del(field)\n\treturn \"\"\n}\n\nfunc toString(input interface{}) string {\n\tswitch v := input.(type) {\n\tcase string:\n\t\treturn v\n\tcase fmt.Stringer:\n\t\treturn v.String()\n\tcase error:\n\t\treturn v.Error()\n\tdefault:\n\t\treturn fmt.Sprintf(\"%v\", input)\n\t}\n}\n\nvar bufPool = sync.Pool{\n\tNew: func() interface{} {\n\t\treturn new(bytes.Buffer)\n\t},\n}\n\n\/\/ at time of writing, sprig.FuncMap() makes a copy, thus\n\/\/ involves iterating the whole map, so do it just once\nvar sprigFuncMap = sprig.TxtFuncMap()\n\nconst recursionPreventionHeader = \"Caddy-Templates-Include\"\n<commit_msg>templates: Add env function (closes #3237)<commit_after>\/\/ Copyright 2015 Matthew Holt and The Caddy Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage templates\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"text\/template\"\n\n\t\"github.com\/Masterminds\/sprig\/v3\"\n\t\"github.com\/alecthomas\/chroma\/formatters\/html\"\n\t\"github.com\/caddyserver\/caddy\/v2\/modules\/caddyhttp\"\n\t\"github.com\/yuin\/goldmark\"\n\thighlighting \"github.com\/yuin\/goldmark-highlighting\"\n\t\"github.com\/yuin\/goldmark\/extension\"\n\t\"github.com\/yuin\/goldmark\/parser\"\n\tgmhtml \"github.com\/yuin\/goldmark\/renderer\/html\"\n)\n\n\/\/ templateContext is the templateContext with which HTTP templates are executed.\ntype templateContext struct {\n\tRoot http.FileSystem\n\tReq *http.Request\n\tArgs []interface{} \/\/ defined by arguments to .Include\n\tRespHeader tplWrappedHeader\n\n\tconfig *Templates\n}\n\n\/\/ OriginalReq returns the original, unmodified, un-rewritten request as\n\/\/ it originally came in over the wire.\nfunc (c templateContext) OriginalReq() http.Request {\n\tor, _ := c.Req.Context().Value(caddyhttp.OriginalRequestCtxKey).(http.Request)\n\treturn or\n}\n\n\/\/ funcInclude returns the contents of filename relative to the site root.\n\/\/ Note that included files are NOT escaped, so you should only include\n\/\/ trusted files. If it is not trusted, be sure to use escaping functions\n\/\/ in your template.\nfunc (c templateContext) funcInclude(filename string, args ...interface{}) (string, error) {\n\tif c.Root == nil {\n\t\treturn \"\", fmt.Errorf(\"root file system not specified\")\n\t}\n\n\tfile, err := c.Root.Open(filename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer file.Close()\n\n\tbodyBuf := bufPool.Get().(*bytes.Buffer)\n\tbodyBuf.Reset()\n\tdefer bufPool.Put(bodyBuf)\n\n\t_, err = io.Copy(bodyBuf, file)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tc.Args = args\n\n\terr = c.executeTemplateInBuffer(filename, bodyBuf)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn bodyBuf.String(), nil\n}\n\n\/\/ funcHTTPInclude returns the body of a virtual (lightweight) request\n\/\/ to the given URI on the same server. Note that included bodies\n\/\/ are NOT escaped, so you should only include trusted resources.\n\/\/ If it is not trusted, be sure to use escaping functions yourself.\nfunc (c templateContext) funcHTTPInclude(uri string) (string, error) {\n\t\/\/ prevent virtual request loops by counting how many levels\n\t\/\/ deep we are; and if we get too deep, return an error\n\trecursionCount := 1\n\tif numStr := c.Req.Header.Get(recursionPreventionHeader); numStr != \"\" {\n\t\tnum, err := strconv.Atoi(numStr)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"parsing %s: %v\", recursionPreventionHeader, err)\n\t\t}\n\t\tif num >= 3 {\n\t\t\treturn \"\", fmt.Errorf(\"virtual request cycle\")\n\t\t}\n\t\trecursionCount = num + 1\n\t}\n\n\tbuf := bufPool.Get().(*bytes.Buffer)\n\tbuf.Reset()\n\tdefer bufPool.Put(buf)\n\n\tvirtReq, err := http.NewRequest(\"GET\", uri, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tvirtReq.Host = c.Req.Host\n\tvirtReq.Header = c.Req.Header.Clone()\n\tvirtReq.Trailer = c.Req.Trailer.Clone()\n\tvirtReq.Header.Set(recursionPreventionHeader, strconv.Itoa(recursionCount))\n\n\tvrw := &virtualResponseWriter{body: buf, header: make(http.Header)}\n\tserver := c.Req.Context().Value(caddyhttp.ServerCtxKey).(http.Handler)\n\n\tserver.ServeHTTP(vrw, virtReq)\n\tif vrw.status >= 400 {\n\t\treturn \"\", fmt.Errorf(\"http %d\", vrw.status)\n\t}\n\n\terr = c.executeTemplateInBuffer(uri, buf)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn buf.String(), nil\n}\n\nfunc (c templateContext) executeTemplateInBuffer(tplName string, buf *bytes.Buffer) error {\n\ttpl := template.New(tplName)\n\tif len(c.config.Delimiters) == 2 {\n\t\ttpl.Delims(c.config.Delimiters[0], c.config.Delimiters[1])\n\t}\n\n\ttpl.Funcs(sprigFuncMap)\n\n\ttpl.Funcs(template.FuncMap{\n\t\t\"include\": c.funcInclude,\n\t\t\"httpInclude\": c.funcHTTPInclude,\n\t\t\"stripHTML\": c.funcStripHTML,\n\t\t\"markdown\": c.funcMarkdown,\n\t\t\"splitFrontMatter\": c.funcSplitFrontMatter,\n\t\t\"listFiles\": c.funcListFiles,\n\t\t\"env\": c.funcEnv,\n\t})\n\n\tparsedTpl, err := tpl.Parse(buf.String())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbuf.Reset() \/\/ reuse buffer for output\n\n\treturn parsedTpl.Execute(buf, c)\n}\n\nfunc (templateContext) funcEnv(varName string) string {\n\treturn os.Getenv(varName)\n}\n\n\/\/ Cookie gets the value of a cookie with name name.\nfunc (c templateContext) Cookie(name string) string {\n\tcookies := c.Req.Cookies()\n\tfor _, cookie := range cookies {\n\t\tif cookie.Name == name {\n\t\t\treturn cookie.Value\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ RemoteIP gets the IP address of the client making the request.\nfunc (c templateContext) RemoteIP() string {\n\tip, _, err := net.SplitHostPort(c.Req.RemoteAddr)\n\tif err != nil {\n\t\treturn c.Req.RemoteAddr\n\t}\n\treturn ip\n}\n\n\/\/ Host returns the hostname portion of the Host header\n\/\/ from the HTTP request.\nfunc (c templateContext) Host() (string, error) {\n\thost, _, err := net.SplitHostPort(c.Req.Host)\n\tif err != nil {\n\t\tif !strings.Contains(c.Req.Host, \":\") {\n\t\t\t\/\/ common with sites served on the default port 80\n\t\t\treturn c.Req.Host, nil\n\t\t}\n\t\treturn \"\", err\n\t}\n\treturn host, nil\n}\n\n\/\/ funcStripHTML returns s without HTML tags. It is fairly naive\n\/\/ but works with most valid HTML inputs.\nfunc (templateContext) funcStripHTML(s string) string {\n\tvar buf bytes.Buffer\n\tvar inTag, inQuotes bool\n\tvar tagStart int\n\tfor i, ch := range s {\n\t\tif inTag {\n\t\t\tif ch == '>' && !inQuotes {\n\t\t\t\tinTag = false\n\t\t\t} else if ch == '<' && !inQuotes {\n\t\t\t\t\/\/ false start\n\t\t\t\tbuf.WriteString(s[tagStart:i])\n\t\t\t\ttagStart = i\n\t\t\t} else if ch == '\"' {\n\t\t\t\tinQuotes = !inQuotes\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif ch == '<' {\n\t\t\tinTag = true\n\t\t\ttagStart = i\n\t\t\tcontinue\n\t\t}\n\t\tbuf.WriteRune(ch)\n\t}\n\tif inTag {\n\t\t\/\/ false start\n\t\tbuf.WriteString(s[tagStart:])\n\t}\n\treturn buf.String()\n}\n\n\/\/ funcMarkdown renders the markdown body as HTML. The resulting\n\/\/ HTML is NOT escaped so that it can be rendered as HTML.\nfunc (templateContext) funcMarkdown(input interface{}) (string, error) {\n\tinputStr := toString(input)\n\n\tmd := goldmark.New(\n\t\tgoldmark.WithExtensions(\n\t\t\textension.GFM,\n\t\t\textension.Footnote,\n\t\t\thighlighting.NewHighlighting(\n\t\t\t\thighlighting.WithFormatOptions(\n\t\t\t\t\thtml.WithClasses(true),\n\t\t\t\t),\n\t\t\t),\n\t\t),\n\t\tgoldmark.WithParserOptions(\n\t\t\tparser.WithAutoHeadingID(),\n\t\t),\n\t\tgoldmark.WithRendererOptions(\n\t\t\tgmhtml.WithHardWraps(),\n\t\t\tgmhtml.WithUnsafe(), \/\/ TODO: this is not awesome, maybe should be configurable?\n\t\t),\n\t)\n\n\tbuf := bufPool.Get().(*bytes.Buffer)\n\tbuf.Reset()\n\tdefer bufPool.Put(buf)\n\n\tmd.Convert([]byte(inputStr), buf)\n\n\treturn buf.String(), nil\n}\n\n\/\/ splitFrontMatter parses front matter out from the beginning of input,\n\/\/ and returns the separated key-value pairs and the body\/content. input\n\/\/ must be a \"stringy\" value.\nfunc (templateContext) funcSplitFrontMatter(input interface{}) (parsedMarkdownDoc, error) {\n\tmeta, body, err := extractFrontMatter(toString(input))\n\tif err != nil {\n\t\treturn parsedMarkdownDoc{}, err\n\t}\n\treturn parsedMarkdownDoc{Meta: meta, Body: body}, nil\n}\n\n\/\/ funcListFiles reads and returns a slice of names from the given\n\/\/ directory relative to the root of c.\nfunc (c templateContext) funcListFiles(name string) ([]string, error) {\n\tif c.Root == nil {\n\t\treturn nil, fmt.Errorf(\"root file system not specified\")\n\t}\n\n\tdir, err := c.Root.Open(path.Clean(name))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer dir.Close()\n\n\tstat, err := dir.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !stat.IsDir() {\n\t\treturn nil, fmt.Errorf(\"%v is not a directory\", name)\n\t}\n\n\tdirInfo, err := dir.Readdir(0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnames := make([]string, len(dirInfo))\n\tfor i, fileInfo := range dirInfo {\n\t\tnames[i] = fileInfo.Name()\n\t}\n\n\treturn names, nil\n}\n\n\/\/ tplWrappedHeader wraps niladic functions so that they\n\/\/ can be used in templates. (Template functions must\n\/\/ return a value.)\ntype tplWrappedHeader struct{ http.Header }\n\n\/\/ Add adds a header field value, appending val to\n\/\/ existing values for that field. It returns an\n\/\/ empty string.\nfunc (h tplWrappedHeader) Add(field, val string) string {\n\th.Header.Add(field, val)\n\treturn \"\"\n}\n\n\/\/ Set sets a header field value, overwriting any\n\/\/ other values for that field. It returns an\n\/\/ empty string.\nfunc (h tplWrappedHeader) Set(field, val string) string {\n\th.Header.Set(field, val)\n\treturn \"\"\n}\n\n\/\/ Del deletes a header field. It returns an empty string.\nfunc (h tplWrappedHeader) Del(field string) string {\n\th.Header.Del(field)\n\treturn \"\"\n}\n\nfunc toString(input interface{}) string {\n\tswitch v := input.(type) {\n\tcase string:\n\t\treturn v\n\tcase fmt.Stringer:\n\t\treturn v.String()\n\tcase error:\n\t\treturn v.Error()\n\tdefault:\n\t\treturn fmt.Sprintf(\"%v\", input)\n\t}\n}\n\nvar bufPool = sync.Pool{\n\tNew: func() interface{} {\n\t\treturn new(bytes.Buffer)\n\t},\n}\n\n\/\/ at time of writing, sprig.FuncMap() makes a copy, thus\n\/\/ involves iterating the whole map, so do it just once\nvar sprigFuncMap = sprig.TxtFuncMap()\n\nconst recursionPreventionHeader = \"Caddy-Templates-Include\"\n<|endoftext|>"} {"text":"<commit_before><commit_msg>added support for removal of projects<commit_after><|endoftext|>"} {"text":"<commit_before>package cloudup\n\nimport (\n\t\"encoding\/base64\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"k8s.io\/kops\/upup\/pkg\/api\"\n\t\"math\/big\"\n\t\"net\"\n\t\"sort\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\ntype TemplateFunctions struct {\n\tcluster *api.Cluster\n\ttags map[string]struct{}\n\tregion string\n}\n\nfunc (tf *TemplateFunctions) WellKnownServiceIP(id int) (net.IP, error) {\n\t_, cidr, err := net.ParseCIDR(tf.cluster.Spec.ServiceClusterIPRange)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error parsing ServiceClusterIPRange %q: %v\", tf.cluster.Spec.ServiceClusterIPRange, err)\n\t}\n\n\tip4 := cidr.IP.To4()\n\tif ip4 != nil {\n\t\tn := binary.BigEndian.Uint32(ip4)\n\t\tn += uint32(id)\n\t\tserviceIP := make(net.IP, len(ip4))\n\t\tbinary.BigEndian.PutUint32(serviceIP, n)\n\t\treturn serviceIP, nil\n\t}\n\n\tip6 := cidr.IP.To16()\n\tif ip6 != nil {\n\t\tbaseIPInt := big.NewInt(0)\n\t\tbaseIPInt.SetBytes(ip6)\n\t\tserviceIPInt := big.NewInt(0)\n\t\tserviceIPInt.Add(big.NewInt(int64(id)), baseIPInt)\n\t\tserviceIP := make(net.IP, len(ip6))\n\t\tserviceIPBytes := serviceIPInt.Bytes()\n\t\tfor i := range serviceIPBytes {\n\t\t\tserviceIP[len(serviceIP)-len(serviceIPBytes)+i] = serviceIPBytes[i]\n\t\t}\n\t\treturn serviceIP, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"Unexpected IP address type for ServiceClusterIPRange: %s\", tf.cluster.Spec.ServiceClusterIPRange)\n}\n\nfunc (tf *TemplateFunctions) AddTo(dest template.FuncMap) {\n\tdest[\"EtcdClusterMemberTags\"] = tf.EtcdClusterMemberTags\n\tdest[\"SharedVPC\"] = tf.SharedVPC\n\tdest[\"WellKnownServiceIP\"] = tf.WellKnownServiceIP\n\tdest[\"AdminCIDR\"] = tf.AdminCIDR\n\n\tdest[\"Base64Encode\"] = func(s string) string {\n\t\treturn base64.StdEncoding.EncodeToString([]byte(s))\n\t}\n\tdest[\"replace\"] = func(s, find, replace string) string {\n\t\treturn strings.Replace(s, find, replace, -1)\n\t}\n\tdest[\"join\"] = func(a []string, sep string) string {\n\t\treturn strings.Join(a, sep)\n\t}\n\n\tdest[\"ClusterName\"] = func() string {\n\t\treturn tf.cluster.Name\n\t}\n\n\tdest[\"HasTag\"] = func(tag string) bool {\n\t\t_, found := tf.tags[tag]\n\t\treturn found\n\t}\n\n\tdest[\"IAMPrefix\"] = tf.IAMPrefix\n\tdest[\"IAMServiceEC2\"] = tf.IAMServiceEC2\n\n\tdest[\"AssociatePublicIP\"] = func() string {\n\t\treturn tf.cluster.Spec.AssociatePublicIP\n\t}\n}\n\nfunc (tf *TemplateFunctions) EtcdClusterMemberTags(etcd *api.EtcdClusterSpec, m *api.EtcdMemberSpec) map[string]string {\n\ttags := make(map[string]string)\n\n\tvar allMembers []string\n\n\tfor _, m := range etcd.Members {\n\t\tallMembers = append(allMembers, m.Name)\n\t}\n\n\tsort.Strings(allMembers)\n\n\t\/\/ This is the configuration of the etcd cluster\n\ttags[\"k8s.io\/etcd\/\"+etcd.Name] = m.Name + \"\/\" + strings.Join(allMembers, \",\")\n\n\t\/\/ This says \"only mount on a master\"\n\ttags[\"k8s.io\/role\/master\"] = \"1\"\n\n\treturn tags\n}\n\n\/\/ SharedVPC is a simple helper function which makes the templates for a shared VPC clearer\nfunc (tf *TemplateFunctions) SharedVPC() bool {\n\treturn tf.cluster.Spec.NetworkID != \"\"\n}\n\n\/\/ AdminCIDR returns the single CIDR that is allowed access to the admin ports of the cluster (22, 443 on master)\nfunc (tf *TemplateFunctions) AdminCIDR() (string, error) {\n\tif len(tf.cluster.Spec.AdminAccess) == 0 {\n\t\treturn \"0.0.0.0\/0\", nil\n\t}\n\tif len(tf.cluster.Spec.AdminAccess) == 1 {\n\t\treturn tf.cluster.Spec.AdminAccess[0], nil\n\t}\n\treturn \"\", fmt.Errorf(\"Multiple AdminAccess rules are not (currently) supported\")\n}\n\n\/\/ IAMServiceEC2 returns the name of the IAM service for EC2 in the current region\n\/\/ it is ec2.amazonaws.com everywhere but in cn-north, where it is ec2.amazonaws.com.cn\nfunc (tf *TemplateFunctions) IAMServiceEC2() string {\n\tswitch tf.region {\n\tcase \"cn-north-1\":\n\t\treturn \"ec2.amazonaws.com.cn\"\n\tdefault:\n\t\treturn \"ec2.amazonaws.com\"\n\t}\n}\n\n\/\/ IAMPrefix returns the prefix for AWS ARNs in the current region, for use with IAM\n\/\/ it is arn:aws everywhere but in cn-north, where it is arn:aws-cn\nfunc (tf *TemplateFunctions) IAMPrefix() string {\n\tswitch tf.region {\n\tcase \"cn-north-1\":\n\t\treturn \"arn:aws-cn\"\n\tdefault:\n\t\treturn \"arn:aws\"\n\t}\n}\n<commit_msg>add parameter --disable-associate-public-ip<commit_after>package cloudup\n\nimport (\n\t\"encoding\/base64\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"k8s.io\/kops\/upup\/pkg\/api\"\n\t\"math\/big\"\n\t\"net\"\n\t\"sort\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\ntype TemplateFunctions struct {\n\tcluster *api.Cluster\n\ttags map[string]struct{}\n\tregion string\n}\n\nfunc (tf *TemplateFunctions) WellKnownServiceIP(id int) (net.IP, error) {\n\t_, cidr, err := net.ParseCIDR(tf.cluster.Spec.ServiceClusterIPRange)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error parsing ServiceClusterIPRange %q: %v\", tf.cluster.Spec.ServiceClusterIPRange, err)\n\t}\n\n\tip4 := cidr.IP.To4()\n\tif ip4 != nil {\n\t\tn := binary.BigEndian.Uint32(ip4)\n\t\tn += uint32(id)\n\t\tserviceIP := make(net.IP, len(ip4))\n\t\tbinary.BigEndian.PutUint32(serviceIP, n)\n\t\treturn serviceIP, nil\n\t}\n\n\tip6 := cidr.IP.To16()\n\tif ip6 != nil {\n\t\tbaseIPInt := big.NewInt(0)\n\t\tbaseIPInt.SetBytes(ip6)\n\t\tserviceIPInt := big.NewInt(0)\n\t\tserviceIPInt.Add(big.NewInt(int64(id)), baseIPInt)\n\t\tserviceIP := make(net.IP, len(ip6))\n\t\tserviceIPBytes := serviceIPInt.Bytes()\n\t\tfor i := range serviceIPBytes {\n\t\t\tserviceIP[len(serviceIP)-len(serviceIPBytes)+i] = serviceIPBytes[i]\n\t\t}\n\t\treturn serviceIP, nil\n\t}\n\n\treturn nil, fmt.Errorf(\"Unexpected IP address type for ServiceClusterIPRange: %s\", tf.cluster.Spec.ServiceClusterIPRange)\n}\n\nfunc (tf *TemplateFunctions) AddTo(dest template.FuncMap) {\n\tdest[\"EtcdClusterMemberTags\"] = tf.EtcdClusterMemberTags\n\tdest[\"SharedVPC\"] = tf.SharedVPC\n\tdest[\"WellKnownServiceIP\"] = tf.WellKnownServiceIP\n\tdest[\"AdminCIDR\"] = tf.AdminCIDR\n\n\tdest[\"Base64Encode\"] = func(s string) string {\n\t\treturn base64.StdEncoding.EncodeToString([]byte(s))\n\t}\n\tdest[\"replace\"] = func(s, find, replace string) string {\n\t\treturn strings.Replace(s, find, replace, -1)\n\t}\n\tdest[\"join\"] = func(a []string, sep string) string {\n\t\treturn strings.Join(a, sep)\n\t}\n\n\tdest[\"ClusterName\"] = func() string {\n\t\treturn tf.cluster.Name\n\t}\n\n\tdest[\"HasTag\"] = func(tag string) bool {\n\t\t_, found := tf.tags[tag]\n\t\treturn found\n\t}\n\n\tdest[\"IAMPrefix\"] = tf.IAMPrefix\n\tdest[\"IAMServiceEC2\"] = tf.IAMServiceEC2\n\n\tdest[\"AssociatePublicIP\"] = func() bool {\n\t\treturn tf.cluster.Spec.AssociatePublicIP\n\t}\n}\n\nfunc (tf *TemplateFunctions) EtcdClusterMemberTags(etcd *api.EtcdClusterSpec, m *api.EtcdMemberSpec) map[string]string {\n\ttags := make(map[string]string)\n\n\tvar allMembers []string\n\n\tfor _, m := range etcd.Members {\n\t\tallMembers = append(allMembers, m.Name)\n\t}\n\n\tsort.Strings(allMembers)\n\n\t\/\/ This is the configuration of the etcd cluster\n\ttags[\"k8s.io\/etcd\/\"+etcd.Name] = m.Name + \"\/\" + strings.Join(allMembers, \",\")\n\n\t\/\/ This says \"only mount on a master\"\n\ttags[\"k8s.io\/role\/master\"] = \"1\"\n\n\treturn tags\n}\n\n\/\/ SharedVPC is a simple helper function which makes the templates for a shared VPC clearer\nfunc (tf *TemplateFunctions) SharedVPC() bool {\n\treturn tf.cluster.Spec.NetworkID != \"\"\n}\n\n\/\/ AdminCIDR returns the single CIDR that is allowed access to the admin ports of the cluster (22, 443 on master)\nfunc (tf *TemplateFunctions) AdminCIDR() (string, error) {\n\tif len(tf.cluster.Spec.AdminAccess) == 0 {\n\t\treturn \"0.0.0.0\/0\", nil\n\t}\n\tif len(tf.cluster.Spec.AdminAccess) == 1 {\n\t\treturn tf.cluster.Spec.AdminAccess[0], nil\n\t}\n\treturn \"\", fmt.Errorf(\"Multiple AdminAccess rules are not (currently) supported\")\n}\n\n\/\/ IAMServiceEC2 returns the name of the IAM service for EC2 in the current region\n\/\/ it is ec2.amazonaws.com everywhere but in cn-north, where it is ec2.amazonaws.com.cn\nfunc (tf *TemplateFunctions) IAMServiceEC2() string {\n\tswitch tf.region {\n\tcase \"cn-north-1\":\n\t\treturn \"ec2.amazonaws.com.cn\"\n\tdefault:\n\t\treturn \"ec2.amazonaws.com\"\n\t}\n}\n\n\/\/ IAMPrefix returns the prefix for AWS ARNs in the current region, for use with IAM\n\/\/ it is arn:aws everywhere but in cn-north, where it is arn:aws-cn\nfunc (tf *TemplateFunctions) IAMPrefix() string {\n\tswitch tf.region {\n\tcase \"cn-north-1\":\n\t\treturn \"arn:aws-cn\"\n\tdefault:\n\t\treturn \"arn:aws\"\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"syscall\"\n\t\"github.com\/docker\/docker\/pkg\/reexec\"\n\t\"path\/filepath\"\n\t\"github.com\/getsentry\/raven-go\"\n\t\"..\/..\/models\"\n\t\"..\/..\/config\"\n\t\"flag\"\n\t\"strings\"\n\t\"bytes\"\n)\n\nfunc init() {\n\treexec.Register(\"justice_init\", justice_init)\n\tif reexec.Init() {\n\t\tos.Exit(0)\n\t}\n}\n\nfunc pivot_root(new_root string) error {\n\tput_old := filepath.Join(new_root, \"\/.pivot_root\")\n\n\t\/\/ bind mount new_root to itself - this is a slight hack needed to satisfy requirement (2)\n\t\/\/\n\t\/\/ The following restrictions apply to new_root and put_old:\n\t\/\/ 1. They must be directories.\n\t\/\/ 2. new_root and put_old must not be on the same filesystem as the current root.\n\t\/\/ 3. put_old must be underneath new_root, that is, adding a nonzero\n\t\/\/ number of \/.. to the string pointed to by put_old must yield the same directory as new_root.\n\t\/\/ 4. No other filesystem may be mounted on put_old.\n\tif err := syscall.Mount(new_root, new_root, \"\", syscall.MS_BIND|syscall.MS_REC, \"\"); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ create put_old directory\n\tif err := os.MkdirAll(put_old, 0700); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ call pivot_root\n\tif err := syscall.PivotRoot(new_root, put_old); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Note that this also applies to the calling process: pivot_root() may\n\t\/\/ or may not affect its current working directory. It is therefore\n\t\/\/ recommended to call chdir(\"\/\") immediately after pivot_root().\n\tif err := os.Chdir(\"\/\"); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ umount put_old, which now lives at \/.pivot_root\n\tput_old = \"\/.pivot_root\"\n\tif err := syscall.Unmount(put_old, syscall.MNT_DETACH); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ remove put_old\n\tif err := os.RemoveAll(put_old); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc mount_proc(new_root string) error {\n\ttarget := filepath.Join(new_root, \"\/proc\")\n\tos.MkdirAll(target, 0755)\n\treturn syscall.Mount(\"proc\", target, \"proc\", uintptr(0), \"\")\n}\n\nfunc justice_init() {\n\tnew_root_path := os.Args[1]\n\tinput := os.Args[2]\n\texpected := os.Args[3]\n\n\traven.SetDSN(config.SENTRY_DSN)\n\n\tif err := mount_proc(new_root_path); err != nil {\n\t\traven.CaptureErrorAndWait(err, map[string]string{\"error\": \"InitContainerFailed\"})\n\t\tresult, _ := json.Marshal(models.GetRuntimeErrorTaskResult())\n\t\tos.Stdout.Write(result)\n\t\tos.Exit(models.CODE_INIT_CONTAINER_FAILED)\n\t}\n\n\tif err := pivot_root(new_root_path); err != nil {\n\t\traven.CaptureErrorAndWait(err, map[string]string{\"error\": \"InitContainerFailed\"})\n\t\tresult, _ := json.Marshal(models.GetRuntimeErrorTaskResult())\n\t\tos.Stdout.Write(result)\n\t\tos.Exit(models.CODE_INIT_CONTAINER_FAILED)\n\t}\n\n\tif err := syscall.Sethostname([]byte(\"justice\")); err != nil {\n\t\traven.CaptureErrorAndWait(err, map[string]string{\"error\": \"InitContainerFailed\"})\n\t\tresult, _ := json.Marshal(models.GetRuntimeErrorTaskResult())\n\t\tos.Stdout.Write(result)\n\t\tos.Exit(models.CODE_INIT_CONTAINER_FAILED)\n\t}\n\n\tjustice_run(input, expected)\n}\n\nfunc justice_run(input, expected string) {\n\traven.SetDSN(config.SENTRY_DSN)\n\n\t\/\/ for c programs, compiled binary with name [Main] will be located in \"\/\"\n\tvar o bytes.Buffer\n\tcmd := exec.Command(\"\/Main\")\n\tcmd.Stdin = strings.NewReader(input)\n\tcmd.Stdout = &o\n\tcmd.Stderr = os.Stderr\n\tcmd.Env = []string{\"PS1=[justice] # \"}\n\n\tif err := cmd.Run(); err != nil {\n\t\traven.CaptureErrorAndWait(err, map[string]string{\"error\": \"ContainerRunTimeError\"})\n\t\tresult, _ := json.Marshal(models.GetRuntimeErrorTaskResult())\n\t\tos.Stdout.Write(result)\n\t\tos.Exit(models.CODE_CONTAINER_RUNTIME_ERROR)\n\t}\n\n\toutput := o.String()\n\tif output == expected {\n\t\tresult, _ := json.Marshal(models.GetAccepptedTaskResult(13,456))\n\t\tos.Stdout.Write(result)\n\t} else {\n\t\tresult, _ := json.Marshal(models.GetWrongAnswerTaskResult(input, output, expected))\n\t\tos.Stdout.Write(result)\n\t}\n\tos.Exit(models.CODE_OK)\n}\n\nfunc main() {\n\tbasedir := flag.String(\"basedir\", \"\/tmp\", \"basedir of tmp C binary\")\n\tinput := flag.String(\"input\", \"\", \"test case input\")\n\texpected := flag.String(\"expected\", \"\", \"test case expected\")\n\tflag.Parse()\n\n\traven.SetDSN(config.SENTRY_DSN)\n\n\tcmd := reexec.Command(\"justice_init\", *basedir, *input, *expected)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tCloneflags: syscall.CLONE_NEWNS |\n\t\t\tsyscall.CLONE_NEWUTS |\n\t\t\tsyscall.CLONE_NEWIPC |\n\t\t\tsyscall.CLONE_NEWPID |\n\t\t\tsyscall.CLONE_NEWNET |\n\t\t\tsyscall.CLONE_NEWUSER,\n\t\tUidMappings: []syscall.SysProcIDMap{\n\t\t\t{\n\t\t\t\tContainerID: 0,\n\t\t\t\tHostID: os.Getuid(),\n\t\t\t\tSize: 1,\n\t\t\t},\n\t\t},\n\t\tGidMappings: []syscall.SysProcIDMap{\n\t\t\t{\n\t\t\t\tContainerID: 0,\n\t\t\t\tHostID: os.Getgid(),\n\t\t\t\tSize: 1,\n\t\t\t},\n\t\t},\n\t}\n\n\tif err := cmd.Start(); err != nil {\n\t\traven.CaptureErrorAndWait(err, map[string]string{\"error\": \"ContainerRunTimeError\"})\n\t\tresult, _ := json.Marshal(models.GetRuntimeErrorTaskResult())\n\t\tos.Stdout.Write(result)\n\t\tos.Exit(models.CODE_CONTAINER_RUNTIME_ERROR)\n\t}\n\n\tif err := cmd.Wait(); err != nil {\n\t\traven.CaptureErrorAndWait(err, map[string]string{\"error\": \"ContainerRunTimeError\"})\n\t\tresult, _ := json.Marshal(models.GetRuntimeErrorTaskResult())\n\t\tos.Stdout.Write(result)\n\t\tos.Exit(models.CODE_CONTAINER_RUNTIME_ERROR)\n\t}\n}\n<commit_msg>add runtime limitation<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"syscall\"\n\t\"github.com\/docker\/docker\/pkg\/reexec\"\n\t\"path\/filepath\"\n\t\"github.com\/getsentry\/raven-go\"\n\t\"..\/..\/models\"\n\t\"..\/..\/config\"\n\t\"flag\"\n\t\"strings\"\n\t\"bytes\"\n\t\"time\"\n\t\"strconv\"\n)\n\nfunc init() {\n\treexec.Register(\"justice_init\", justice_init)\n\tif reexec.Init() {\n\t\tos.Exit(0)\n\t}\n}\n\nfunc pivot_root(new_root string) error {\n\tput_old := filepath.Join(new_root, \"\/.pivot_root\")\n\n\t\/\/ bind mount new_root to itself - this is a slight hack needed to satisfy requirement (2)\n\t\/\/\n\t\/\/ The following restrictions apply to new_root and put_old:\n\t\/\/ 1. They must be directories.\n\t\/\/ 2. new_root and put_old must not be on the same filesystem as the current root.\n\t\/\/ 3. put_old must be underneath new_root, that is, adding a nonzero\n\t\/\/ number of \/.. to the string pointed to by put_old must yield the same directory as new_root.\n\t\/\/ 4. No other filesystem may be mounted on put_old.\n\tif err := syscall.Mount(new_root, new_root, \"\", syscall.MS_BIND|syscall.MS_REC, \"\"); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ create put_old directory\n\tif err := os.MkdirAll(put_old, 0700); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ call pivot_root\n\tif err := syscall.PivotRoot(new_root, put_old); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Note that this also applies to the calling process: pivot_root() may\n\t\/\/ or may not affect its current working directory. It is therefore\n\t\/\/ recommended to call chdir(\"\/\") immediately after pivot_root().\n\tif err := os.Chdir(\"\/\"); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ umount put_old, which now lives at \/.pivot_root\n\tput_old = \"\/.pivot_root\"\n\tif err := syscall.Unmount(put_old, syscall.MNT_DETACH); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ remove put_old\n\tif err := os.RemoveAll(put_old); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc mount_proc(new_root string) error {\n\ttarget := filepath.Join(new_root, \"\/proc\")\n\tos.MkdirAll(target, 0755)\n\treturn syscall.Mount(\"proc\", target, \"proc\", uintptr(0), \"\")\n}\n\nfunc justice_init() {\n\tnew_root_path := os.Args[1]\n\tinput := os.Args[2]\n\texpected := os.Args[3]\n\ttimeout, _ := strconv.ParseInt(os.Args[4],10, 32)\n\n\traven.SetDSN(config.SENTRY_DSN)\n\n\tif err := mount_proc(new_root_path); err != nil {\n\t\traven.CaptureErrorAndWait(err, map[string]string{\"error\": \"InitContainerFailed\"})\n\t\tresult, _ := json.Marshal(models.GetRuntimeErrorTaskResult())\n\t\tos.Stdout.Write(result)\n\t\tos.Exit(models.CODE_INIT_CONTAINER_FAILED)\n\t}\n\n\tif err := pivot_root(new_root_path); err != nil {\n\t\traven.CaptureErrorAndWait(err, map[string]string{\"error\": \"InitContainerFailed\"})\n\t\tresult, _ := json.Marshal(models.GetRuntimeErrorTaskResult())\n\t\tos.Stdout.Write(result)\n\t\tos.Exit(models.CODE_INIT_CONTAINER_FAILED)\n\t}\n\n\tif err := syscall.Sethostname([]byte(\"justice\")); err != nil {\n\t\traven.CaptureErrorAndWait(err, map[string]string{\"error\": \"InitContainerFailed\"})\n\t\tresult, _ := json.Marshal(models.GetRuntimeErrorTaskResult())\n\t\tos.Stdout.Write(result)\n\t\tos.Exit(models.CODE_INIT_CONTAINER_FAILED)\n\t}\n\n\tjustice_run(input, expected, int32(timeout))\n}\n\nfunc justice_run(input, expected string, timeout int32) {\n\traven.SetDSN(config.SENTRY_DSN)\n\n\t\/\/ for c programs, compiled binary with name [Main] will be located in \"\/\"\n\tvar o bytes.Buffer\n\tcmd := exec.Command(\"\/Main\")\n\tcmd.Stdin = strings.NewReader(input)\n\tcmd.Stdout = &o\n\tcmd.Stderr = os.Stderr\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tSetpgid: true,\n\t}\n\tcmd.Env = []string{\"PS1=[justice] # \"}\n\n\ttime.AfterFunc(time.Duration(timeout)*time.Millisecond, func() {\n\t\tsyscall.Kill(-cmd.Process.Pid, syscall.SIGKILL)\n\t})\n\n\tif err := cmd.Run(); err != nil {\n\t\traven.CaptureErrorAndWait(err, map[string]string{\"error\": \"ContainerRunTimeError\"})\n\t\tresult, _ := json.Marshal(models.GetRuntimeErrorTaskResult())\n\t\tos.Stdout.Write(result)\n\t\tos.Exit(models.CODE_CONTAINER_RUNTIME_ERROR)\n\t}\n\n\toutput := o.String()\n\tif output == expected {\n\t\tresult, _ := json.Marshal(models.GetAccepptedTaskResult(13,456))\n\t\tos.Stdout.Write(result)\n\t} else {\n\t\tresult, _ := json.Marshal(models.GetWrongAnswerTaskResult(input, output, expected))\n\t\tos.Stdout.Write(result)\n\t}\n\tos.Exit(models.CODE_OK)\n}\n\nfunc main() {\n\tbasedir := flag.String(\"basedir\", \"\/tmp\", \"basedir of tmp C binary\")\n\tinput := flag.String(\"input\", \"\", \"test case input\")\n\texpected := flag.String(\"expected\", \"\", \"test case expected\")\n\ttimeout := flag.String(\"timeout\", \"10000\", \"timeout in milliseconds\")\n\tflag.Parse()\n\n\traven.SetDSN(config.SENTRY_DSN)\n\n\tcmd := reexec.Command(\"justice_init\", *basedir, *input, *expected, *timeout)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tCloneflags: syscall.CLONE_NEWNS |\n\t\t\tsyscall.CLONE_NEWUTS |\n\t\t\tsyscall.CLONE_NEWIPC |\n\t\t\tsyscall.CLONE_NEWPID |\n\t\t\tsyscall.CLONE_NEWNET |\n\t\t\tsyscall.CLONE_NEWUSER,\n\t\tUidMappings: []syscall.SysProcIDMap{\n\t\t\t{\n\t\t\t\tContainerID: 0,\n\t\t\t\tHostID: os.Getuid(),\n\t\t\t\tSize: 1,\n\t\t\t},\n\t\t},\n\t\tGidMappings: []syscall.SysProcIDMap{\n\t\t\t{\n\t\t\t\tContainerID: 0,\n\t\t\t\tHostID: os.Getgid(),\n\t\t\t\tSize: 1,\n\t\t\t},\n\t\t},\n\t}\n\n\tif err := cmd.Start(); err != nil {\n\t\traven.CaptureErrorAndWait(err, map[string]string{\"error\": \"ContainerRunTimeError\"})\n\t\tresult, _ := json.Marshal(models.GetRuntimeErrorTaskResult())\n\t\tos.Stdout.Write(result)\n\t\tos.Exit(models.CODE_CONTAINER_RUNTIME_ERROR)\n\t}\n\n\tif err := cmd.Wait(); err != nil {\n\t\traven.CaptureErrorAndWait(err, map[string]string{\"error\": \"ContainerRunTimeError\"})\n\t\tresult, _ := json.Marshal(models.GetRuntimeErrorTaskResult())\n\t\tos.Stdout.Write(result)\n\t\tos.Exit(models.CODE_CONTAINER_RUNTIME_ERROR)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package tcp\n\nimport (\n\t\"bufio\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com\/cihub\/seelog\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/schollz\/croc\/src\/comm\"\n\t\"github.com\/schollz\/croc\/src\/logger\"\n\t\"github.com\/schollz\/croc\/src\/models\"\n)\n\ntype roomInfo struct {\n\treceiver *comm.Comm\n\topened time.Time\n}\n\ntype roomMap struct {\n\trooms map[string]roomInfo\n\tsync.Mutex\n}\n\nvar rooms roomMap\n\n\/\/ Run starts a tcp listener, run async\nfunc Run(debugLevel, port string) {\n\tlogger.SetLogLevel(debugLevel)\n\trooms.Lock()\n\trooms.rooms = make(map[string]roomInfo)\n\trooms.Unlock()\n\terr := run(port)\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\n\t\/\/ TODO:\n\t\/\/ delete old rooms\n}\n\nfunc run(port string) (err error) {\n\tlog.Debugf(\"starting TCP server on \" + port)\n\trAddr, err := net.ResolveTCPAddr(\"tcp\", \"0.0.0.0:\"+port)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tserver, err := net.ListenTCP(\"tcp\", rAddr)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Error listening on :\"+port)\n\t}\n\tdefer server.Close()\n\t\/\/ spawn a new goroutine whenever a client connects\n\tfor {\n\t\tconnection, err := server.Accept()\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"problem accepting connection\")\n\t\t}\n\t\tlog.Debugf(\"client %s connected\", connection.RemoteAddr().String())\n\t\tgo func(port string, connection net.Conn) {\n\t\t\terrCommunication := clientCommuncation(port, comm.New(connection))\n\t\t\tif errCommunication != nil {\n\t\t\t\tlog.Warnf(\"relay-%s: %s\", connection.RemoteAddr().String(), errCommunication.Error())\n\t\t\t}\n\t\t}(port, connection)\n\t}\n}\n\nfunc clientCommuncation(port string, c *comm.Comm) (err error) {\n\t\/\/ send ok to tell client they are connected\n\terr = c.Send(\"ok\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ wait for client to tell me which room they want\n\troom, err := c.Receive()\n\tif err != nil {\n\t\treturn\n\t}\n\n\trooms.Lock()\n\t\/\/ first connection is always the receiver\n\tif _, ok := rooms.rooms[room]; !ok {\n\t\trooms.rooms[room] = roomInfo{\n\t\t\treceiver: c,\n\t\t\topened: time.Now(),\n\t\t}\n\t\trooms.Unlock()\n\t\t\/\/ tell the client that they got the room\n\t\terr = c.Send(\"recipient\")\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn nil\n\t}\n\treceiver := rooms.rooms[room].receiver\n\trooms.Unlock()\n\n\t\/\/ second connection is the sender, time to staple connections\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\n\t\/\/ start piping\n\tgo func(com1, com2 *comm.Comm, wg *sync.WaitGroup) {\n\t\tlog.Debug(\"starting pipes\")\n\t\tpipe(com1.Connection(), com2.Connection())\n\t\twg.Done()\n\t\tlog.Debug(\"done piping\")\n\t}(c, receiver, &wg)\n\n\t\/\/ tell the sender everything is ready\n\terr = c.Send(\"sender\")\n\tif err != nil {\n\t\treturn\n\t}\n\twg.Wait()\n\n\t\/\/ delete room\n\trooms.Lock()\n\tlog.Debugf(\"deleting room: %s\", room)\n\tdelete(rooms.rooms, room)\n\trooms.Unlock()\n\treturn nil\n}\n\n\/\/ chanFromConn creates a channel from a Conn object, and sends everything it\n\/\/ Read()s from the socket to the channel.\nfunc chanFromConn(conn net.Conn) chan []byte {\n\tc := make(chan []byte)\n\treader := bufio.NewReader(conn)\n\n\tgo func() {\n\t\tfor {\n\t\t\tb := make([]byte, models.TCP_BUFFER_SIZE)\n\t\t\tn, err := reader.Read(b)\n\t\t\tif n > 0 {\n\t\t\t\tres := make([]byte, n)\n\t\t\t\t\/\/ Copy the buffer so it doesn't get changed while read by the recipient.\n\t\t\t\tcopy(res, b[:n])\n\t\t\t\tc <- res\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tc <- nil\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn c\n}\n\n\/\/ pipe creates a full-duplex pipe between the two sockets and\n\/\/ transfers data from one to the other.\nfunc pipe(conn1 net.Conn, conn2 net.Conn) {\n\tchan1 := chanFromConn(conn1)\n\tchan2 := chanFromConn(conn2)\n\twriter1 := bufio.NewWriter(conn1)\n\twriter2 := bufio.NewWriter(conn2)\n\n\tfor {\n\t\tselect {\n\t\tcase b1 := <-chan1:\n\t\t\tif b1 == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\twriter2.Write(b1)\n\n\t\tcase b2 := <-chan2:\n\t\t\tif b2 == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\twriter1.Write(b2)\n\t\t}\n\t}\n}\n<commit_msg>try fix<commit_after>package tcp\n\nimport (\n\t\"bufio\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com\/cihub\/seelog\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/schollz\/croc\/src\/comm\"\n\t\"github.com\/schollz\/croc\/src\/logger\"\n\t\"github.com\/schollz\/croc\/src\/models\"\n)\n\ntype roomInfo struct {\n\treceiver *comm.Comm\n\topened time.Time\n}\n\ntype roomMap struct {\n\trooms map[string]roomInfo\n\tsync.Mutex\n}\n\nvar rooms roomMap\n\n\/\/ Run starts a tcp listener, run async\nfunc Run(debugLevel, port string) {\n\tlogger.SetLogLevel(debugLevel)\n\trooms.Lock()\n\trooms.rooms = make(map[string]roomInfo)\n\trooms.Unlock()\n\terr := run(port)\n\tif err != nil {\n\t\tlog.Error(err)\n\t}\n\n\t\/\/ TODO:\n\t\/\/ delete old rooms\n}\n\nfunc run(port string) (err error) {\n\tlog.Debugf(\"starting TCP server on \" + port)\n\trAddr, err := net.ResolveTCPAddr(\"tcp\", \"0.0.0.0:\"+port)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tserver, err := net.ListenTCP(\"tcp\", rAddr)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"Error listening on :\"+port)\n\t}\n\tdefer server.Close()\n\t\/\/ spawn a new goroutine whenever a client connects\n\tfor {\n\t\tconnection, err := server.Accept()\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"problem accepting connection\")\n\t\t}\n\t\tlog.Debugf(\"client %s connected\", connection.RemoteAddr().String())\n\t\tgo func(port string, connection net.Conn) {\n\t\t\terrCommunication := clientCommuncation(port, comm.New(connection))\n\t\t\tif errCommunication != nil {\n\t\t\t\tlog.Warnf(\"relay-%s: %s\", connection.RemoteAddr().String(), errCommunication.Error())\n\t\t\t}\n\t\t}(port, connection)\n\t}\n}\n\nfunc clientCommuncation(port string, c *comm.Comm) (err error) {\n\t\/\/ send ok to tell client they are connected\n\terr = c.Send(\"ok\")\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ wait for client to tell me which room they want\n\troom, err := c.Receive()\n\tif err != nil {\n\t\treturn\n\t}\n\n\trooms.Lock()\n\t\/\/ first connection is always the receiver\n\tif _, ok := rooms.rooms[room]; !ok {\n\t\trooms.rooms[room] = roomInfo{\n\t\t\treceiver: c,\n\t\t\topened: time.Now(),\n\t\t}\n\t\trooms.Unlock()\n\t\t\/\/ tell the client that they got the room\n\t\terr = c.Send(\"recipient\")\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\treturn nil\n\t}\n\treceiver := rooms.rooms[room].receiver\n\trooms.Unlock()\n\n\t\/\/ second connection is the sender, time to staple connections\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\n\t\/\/ start piping\n\tgo func(com1, com2 *comm.Comm, wg *sync.WaitGroup) {\n\t\tlog.Debug(\"starting pipes\")\n\t\tpipe(com1.Connection(), com2.Connection())\n\t\twg.Done()\n\t\tlog.Debug(\"done piping\")\n\t}(c, receiver, &wg)\n\n\t\/\/ tell the sender everything is ready\n\terr = c.Send(\"sender\")\n\tif err != nil {\n\t\treturn\n\t}\n\twg.Wait()\n\n\t\/\/ delete room\n\trooms.Lock()\n\tlog.Debugf(\"deleting room: %s\", room)\n\tdelete(rooms.rooms, room)\n\trooms.Unlock()\n\treturn nil\n}\n\n\/\/ chanFromConn creates a channel from a Conn object, and sends everything it\n\/\/ Read()s from the socket to the channel.\nfunc chanFromConn(conn net.Conn) chan []byte {\n\tc := make(chan []byte)\n\treader := bufio.NewReader(conn)\n\n\tgo func() {\n\t\tfor {\n\t\t\tb := make([]byte, models.TCP_BUFFER_SIZE)\n\t\t\tn, err := reader.Read(b)\n\t\t\tif n > 0 {\n\t\t\t\tc <- b\n\t\t\t\t\/\/ res := make([]byte, n)\n\t\t\t\t\/\/ \/\/ Copy the buffer so it doesn't get changed while read by the recipient.\n\t\t\t\t\/\/ copy(res, b[:n])\n\t\t\t\t\/\/ c <- res\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tc <- nil\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn c\n}\n\n\/\/ pipe creates a full-duplex pipe between the two sockets and\n\/\/ transfers data from one to the other.\nfunc pipe(conn1 net.Conn, conn2 net.Conn) {\n\tchan1 := chanFromConn(conn1)\n\tchan2 := chanFromConn(conn2)\n\twriter1 := bufio.NewWriter(conn1)\n\twriter2 := bufio.NewWriter(conn2)\n\n\tfor {\n\t\tselect {\n\t\tcase b1 := <-chan1:\n\t\t\tif b1 == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\twriter2.Write(b1)\n\n\t\tcase b2 := <-chan2:\n\t\t\tif b2 == nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\twriter1.Write(b2)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Serulian Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage typeconstructor\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/serulian\/compiler\/compilercommon\"\n\t\"github.com\/serulian\/compiler\/compilergraph\"\n\t\"github.com\/serulian\/compiler\/graphs\/typegraph\"\n\t\"github.com\/serulian\/compiler\/webidl\"\n)\n\n\/\/ GLOBAL_CONTEXT_ANNOTATION is the annotation that marks an interface as being the global context\n\/\/ (e.g. Window) in WebIDL.\nconst GLOBAL_CONTEXT_ANNOTATION = \"GlobalContext\"\n\n\/\/ CONSTRUCTOR_ANNOTATION is an annotation that describes support for a constructor on a WebIDL\n\/\/ type. This translates to being able to do \"new Type(...)\" in ECMAScript.\nconst CONSTRUCTOR_ANNOTATION = \"Constructor\"\n\n\/\/ NATIVE_OPERATOR_ANNOTATION is an annotation that marks an declaration as supporting the\n\/\/ specified operator natively (i.e. not a custom defined operator).\nconst NATIVE_OPERATOR_ANNOTATION = \"NativeOperator\"\n\n\/\/ GetConstructor returns a TypeGraph constructor for the given IRG.\nfunc GetConstructor(irg *webidl.WebIRG) *irgTypeConstructor {\n\treturn &irgTypeConstructor{\n\t\tirg: irg,\n\t}\n}\n\n\/\/ irgTypeConstructor defines a type for populating a type graph from the IRG.\ntype irgTypeConstructor struct {\n\tirg *webidl.WebIRG \/\/ The IRG being transformed.\n}\n\nfunc (itc *irgTypeConstructor) DefineModules(builder typegraph.GetModuleBuilder) {\n\tfor _, module := range itc.irg.GetModules() {\n\t\tbuilder().\n\t\t\tName(module.Name()).\n\t\t\tPath(string(module.InputSource())).\n\t\t\tSourceNode(module.Node()).\n\t\t\tDefine()\n\t}\n}\n\nfunc (itc *irgTypeConstructor) DefineTypes(builder typegraph.GetTypeBuilder) {\n\tfor _, module := range itc.irg.GetModules() {\n\t\tfor _, declaration := range module.Declarations() {\n\t\t\tif declaration.HasAnnotation(GLOBAL_CONTEXT_ANNOTATION) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttypeBuilder := builder(module.Node())\n\t\t\ttypeBuilder.Name(declaration.Name()).\n\t\t\t\tSourceNode(declaration.GraphNode).\n\t\t\t\tTypeKind(typegraph.ExternalInternalType).\n\t\t\t\tDefine()\n\t\t}\n\t}\n}\n\nfunc (itc *irgTypeConstructor) DefineDependencies(annotator *typegraph.Annotator, graph *typegraph.TypeGraph) {\n\n}\n\nfunc (itc *irgTypeConstructor) DefineMembers(builder typegraph.GetMemberBuilder, reporter typegraph.IssueReporter, graph *typegraph.TypeGraph) {\n\tfor _, declaration := range itc.irg.Declarations() {\n\t\t\/\/ GlobalContext members get defined under their module, not their declaration.\n\t\tvar parentNode = declaration.GraphNode\n\t\tif declaration.HasAnnotation(GLOBAL_CONTEXT_ANNOTATION) {\n\t\t\tparentNode = declaration.Module().GraphNode\n\t\t}\n\n\t\t\/\/ If the declaration has one (or more) constructors, add then as a \"new\".\n\t\tif declaration.HasAnnotation(CONSTRUCTOR_ANNOTATION) {\n\t\t\t\/\/ For each constructor defined, create the intersection of their parameters.\n\t\t\tvar parameters = make([]typegraph.TypeReference, 0)\n\t\t\tfor constructorIndex, constructor := range declaration.GetAnnotations(CONSTRUCTOR_ANNOTATION) {\n\t\t\t\tfor index, parameter := range constructor.Parameters() {\n\t\t\t\t\tparameterType, err := itc.ResolveType(parameter.DeclaredType(), graph)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treporter.ReportError(parameter.GraphNode, \"%v\", err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tvar resolvedParameterType = parameterType\n\t\t\t\t\tif parameter.IsOptional() {\n\t\t\t\t\t\tresolvedParameterType = resolvedParameterType.AsNullable()\n\t\t\t\t\t}\n\n\t\t\t\t\tif index >= len(parameters) {\n\t\t\t\t\t\t\/\/ If this is not the first constructor, then this parameter is implicitly optional\n\t\t\t\t\t\t\/\/ and therefore nullable.\n\t\t\t\t\t\tif constructorIndex > 0 {\n\t\t\t\t\t\t\tresolvedParameterType = resolvedParameterType.AsNullable()\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tparameters = append(parameters, resolvedParameterType)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tparameters[index] = parameters[index].Intersect(resolvedParameterType)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Define the construction function for the type.\n\t\t\ttypeDecl, _ := graph.GetTypeForSourceNode(declaration.GraphNode)\n\t\t\tvar constructorFunction = graph.FunctionTypeReference(typeDecl.GetTypeReference())\n\t\t\tfor _, parameterType := range parameters {\n\t\t\t\tconstructorFunction = constructorFunction.WithParameter(parameterType)\n\t\t\t}\n\n\t\t\t\/\/ Declare a \"new\" member which returns an instance of this type.\n\t\t\tbuilder(parentNode, false).\n\t\t\t\tName(\"new\").\n\t\t\t\tInitialDefine().\n\t\t\t\tExported(true).\n\t\t\t\tStatic(true).\n\t\t\t\tReadOnly(true).\n\t\t\t\tSynchronous(true).\n\t\t\t\tMemberKind(uint64(webidl.ConstructorMember)).\n\t\t\t\tMemberType(constructorFunction).\n\t\t\t\tDefine()\n\t\t}\n\n\t\t\/\/ Add support for any native operators.\n\t\tif declaration.HasAnnotation(GLOBAL_CONTEXT_ANNOTATION) && declaration.HasAnnotation(NATIVE_OPERATOR_ANNOTATION) {\n\t\t\treporter.ReportError(declaration.GraphNode, \"[NativeOperator] not supported on declarations marked with [GlobalContext]\")\n\t\t\treturn\n\t\t}\n\n\t\tfor _, nativeOp := range declaration.GetAnnotations(NATIVE_OPERATOR_ANNOTATION) {\n\t\t\topName, hasOpName := nativeOp.Value()\n\t\t\tif !hasOpName {\n\t\t\t\treporter.ReportError(nativeOp.GraphNode, \"Missing operator name on [NativeOperator] annotation\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttypeDecl, _ := graph.GetTypeForSourceNode(declaration.GraphNode)\n\n\t\t\tvar operatorType = graph.FunctionTypeReference(typeDecl.GetTypeReference())\n\t\t\toperatorType = operatorType.WithParameter(typeDecl.GetTypeReference())\n\n\t\t\tif strings.Lower(opName) != \"not\" {\n\t\t\t\toperatorType = operatorType.WithParameter(typeDecl.GetTypeReference())\n\t\t\t}\n\n\t\t\tbuilder(parentNode, true).\n\t\t\t\tName(opName).\n\t\t\t\tSourceNode(nativeOp.GraphNode).\n\t\t\t\tInitialDefine().\n\t\t\t\tNative(true).\n\t\t\t\tExported(true).\n\t\t\t\tStatic(true).\n\t\t\t\tReadOnly(true).\n\t\t\t\tMemberType(operatorType).\n\t\t\t\tMemberKind(uint64(webidl.OperatorMember)).\n\t\t\t\tDefine()\n\t\t}\n\n\t\t\/\/ Add the declared members.\n\t\tfor _, member := range declaration.Members() {\n\t\t\tibuilder := builder(parentNode, false).\n\t\t\t\tName(member.Name()).\n\t\t\t\tSourceNode(member.GraphNode).\n\t\t\t\tInitialDefine()\n\n\t\t\tdeclaredType, err := itc.ResolveType(member.DeclaredType(), graph)\n\t\t\tif err != nil {\n\t\t\t\treporter.ReportError(member.GraphNode, \"%v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar memberType = declaredType\n\t\t\tvar isReadonly = member.IsReadonly()\n\n\t\t\tswitch member.Kind() {\n\t\t\tcase webidl.FunctionMember:\n\t\t\t\tisReadonly = true\n\t\t\t\tmemberType = graph.FunctionTypeReference(memberType)\n\n\t\t\t\t\/\/ Add the parameter types.\n\t\t\t\tvar markOptional = false\n\t\t\t\tfor _, parameter := range member.Parameters() {\n\t\t\t\t\tif parameter.IsOptional() {\n\t\t\t\t\t\tmarkOptional = true\n\t\t\t\t\t}\n\n\t\t\t\t\tparameterType, err := itc.ResolveType(parameter.DeclaredType(), graph)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treporter.ReportError(member.GraphNode, \"%v\", err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ All optional parameters get marked as nullable, which means we can skip\n\t\t\t\t\t\/\/ passing them on function calls.\n\t\t\t\t\tif markOptional {\n\t\t\t\t\t\tmemberType = memberType.WithParameter(parameterType.AsNullable())\n\t\t\t\t\t} else {\n\t\t\t\t\t\tmemberType = memberType.WithParameter(parameterType)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase webidl.AttributeMember:\n\t\t\t\tif len(member.Parameters()) > 0 {\n\t\t\t\t\treporter.ReportError(member.GraphNode, \"Attributes cannot have parameters\")\n\t\t\t\t}\n\n\t\t\tdefault:\n\t\t\t\tpanic(\"Unknown WebIDL member kind\")\n\t\t\t}\n\n\t\t\tibuilder.Exported(true).\n\t\t\t\tStatic(member.IsStatic()).\n\t\t\t\tSynchronous(true).\n\t\t\t\tReadOnly(isReadonly).\n\t\t\t\tMemberKind(uint64(member.Kind())).\n\t\t\t\tMemberType(memberType).\n\t\t\t\tDefine()\n\t\t}\n\t}\n}\n\nfunc (itc *irgTypeConstructor) Validate(reporter typegraph.IssueReporter, graph *typegraph.TypeGraph) {\n\tseen := map[string]bool{}\n\n\tfor _, module := range itc.irg.GetModules() {\n\t\tfor _, declaration := range module.Declarations() {\n\t\t\tif _, ok := seen[declaration.Name()]; ok {\n\t\t\t\treporter.ReportError(declaration.GraphNode, \"'%s' is already declared in WebIDL\", declaration.Name())\n\t\t\t}\n\t\t\tseen[declaration.Name()] = true\n\t\t}\n\t}\n}\n\nfunc (itc *irgTypeConstructor) GetLocation(sourceNodeId compilergraph.GraphNodeId) (compilercommon.SourceAndLocation, bool) {\n\tlayerNode, found := itc.irg.TryGetNode(sourceNodeId)\n\tif !found {\n\t\treturn compilercommon.SourceAndLocation{}, false\n\t}\n\n\treturn itc.irg.NodeLocation(layerNode), true\n}\n\n\/\/ ResolveType attempts to resolve the given type string.\nfunc (itc *irgTypeConstructor) ResolveType(typeString string, graph *typegraph.TypeGraph) (typegraph.TypeReference, error) {\n\tif typeString == \"any\" {\n\t\treturn graph.AnyTypeReference(), nil\n\t}\n\n\tif typeString == \"void\" {\n\t\treturn graph.VoidTypeReference(), nil\n\t}\n\n\tdeclaration, hasDeclaration := itc.irg.FindDeclaration(typeString)\n\tif !hasDeclaration {\n\t\treturn graph.AnyTypeReference(), fmt.Errorf(\"Could not find WebIDL type %v\", typeString)\n\t}\n\n\ttypeDecl, hasType := graph.GetTypeForSourceNode(declaration.GraphNode)\n\tif !hasType {\n\t\tpanic(\"Type not found for WebIDL type declaration\")\n\t}\n\n\treturn typeDecl.GetTypeReference(), nil\n}\n<commit_msg>Fix typo<commit_after>\/\/ Copyright 2015 The Serulian Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage typeconstructor\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/serulian\/compiler\/compilercommon\"\n\t\"github.com\/serulian\/compiler\/compilergraph\"\n\t\"github.com\/serulian\/compiler\/graphs\/typegraph\"\n\t\"github.com\/serulian\/compiler\/webidl\"\n)\n\n\/\/ GLOBAL_CONTEXT_ANNOTATION is the annotation that marks an interface as being the global context\n\/\/ (e.g. Window) in WebIDL.\nconst GLOBAL_CONTEXT_ANNOTATION = \"GlobalContext\"\n\n\/\/ CONSTRUCTOR_ANNOTATION is an annotation that describes support for a constructor on a WebIDL\n\/\/ type. This translates to being able to do \"new Type(...)\" in ECMAScript.\nconst CONSTRUCTOR_ANNOTATION = \"Constructor\"\n\n\/\/ NATIVE_OPERATOR_ANNOTATION is an annotation that marks an declaration as supporting the\n\/\/ specified operator natively (i.e. not a custom defined operator).\nconst NATIVE_OPERATOR_ANNOTATION = \"NativeOperator\"\n\n\/\/ GetConstructor returns a TypeGraph constructor for the given IRG.\nfunc GetConstructor(irg *webidl.WebIRG) *irgTypeConstructor {\n\treturn &irgTypeConstructor{\n\t\tirg: irg,\n\t}\n}\n\n\/\/ irgTypeConstructor defines a type for populating a type graph from the IRG.\ntype irgTypeConstructor struct {\n\tirg *webidl.WebIRG \/\/ The IRG being transformed.\n}\n\nfunc (itc *irgTypeConstructor) DefineModules(builder typegraph.GetModuleBuilder) {\n\tfor _, module := range itc.irg.GetModules() {\n\t\tbuilder().\n\t\t\tName(module.Name()).\n\t\t\tPath(string(module.InputSource())).\n\t\t\tSourceNode(module.Node()).\n\t\t\tDefine()\n\t}\n}\n\nfunc (itc *irgTypeConstructor) DefineTypes(builder typegraph.GetTypeBuilder) {\n\tfor _, module := range itc.irg.GetModules() {\n\t\tfor _, declaration := range module.Declarations() {\n\t\t\tif declaration.HasAnnotation(GLOBAL_CONTEXT_ANNOTATION) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttypeBuilder := builder(module.Node())\n\t\t\ttypeBuilder.Name(declaration.Name()).\n\t\t\t\tSourceNode(declaration.GraphNode).\n\t\t\t\tTypeKind(typegraph.ExternalInternalType).\n\t\t\t\tDefine()\n\t\t}\n\t}\n}\n\nfunc (itc *irgTypeConstructor) DefineDependencies(annotator *typegraph.Annotator, graph *typegraph.TypeGraph) {\n\n}\n\nfunc (itc *irgTypeConstructor) DefineMembers(builder typegraph.GetMemberBuilder, reporter typegraph.IssueReporter, graph *typegraph.TypeGraph) {\n\tfor _, declaration := range itc.irg.Declarations() {\n\t\t\/\/ GlobalContext members get defined under their module, not their declaration.\n\t\tvar parentNode = declaration.GraphNode\n\t\tif declaration.HasAnnotation(GLOBAL_CONTEXT_ANNOTATION) {\n\t\t\tparentNode = declaration.Module().GraphNode\n\t\t}\n\n\t\t\/\/ If the declaration has one (or more) constructors, add then as a \"new\".\n\t\tif declaration.HasAnnotation(CONSTRUCTOR_ANNOTATION) {\n\t\t\t\/\/ For each constructor defined, create the intersection of their parameters.\n\t\t\tvar parameters = make([]typegraph.TypeReference, 0)\n\t\t\tfor constructorIndex, constructor := range declaration.GetAnnotations(CONSTRUCTOR_ANNOTATION) {\n\t\t\t\tfor index, parameter := range constructor.Parameters() {\n\t\t\t\t\tparameterType, err := itc.ResolveType(parameter.DeclaredType(), graph)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treporter.ReportError(parameter.GraphNode, \"%v\", err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tvar resolvedParameterType = parameterType\n\t\t\t\t\tif parameter.IsOptional() {\n\t\t\t\t\t\tresolvedParameterType = resolvedParameterType.AsNullable()\n\t\t\t\t\t}\n\n\t\t\t\t\tif index >= len(parameters) {\n\t\t\t\t\t\t\/\/ If this is not the first constructor, then this parameter is implicitly optional\n\t\t\t\t\t\t\/\/ and therefore nullable.\n\t\t\t\t\t\tif constructorIndex > 0 {\n\t\t\t\t\t\t\tresolvedParameterType = resolvedParameterType.AsNullable()\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tparameters = append(parameters, resolvedParameterType)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tparameters[index] = parameters[index].Intersect(resolvedParameterType)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Define the construction function for the type.\n\t\t\ttypeDecl, _ := graph.GetTypeForSourceNode(declaration.GraphNode)\n\t\t\tvar constructorFunction = graph.FunctionTypeReference(typeDecl.GetTypeReference())\n\t\t\tfor _, parameterType := range parameters {\n\t\t\t\tconstructorFunction = constructorFunction.WithParameter(parameterType)\n\t\t\t}\n\n\t\t\t\/\/ Declare a \"new\" member which returns an instance of this type.\n\t\t\tbuilder(parentNode, false).\n\t\t\t\tName(\"new\").\n\t\t\t\tInitialDefine().\n\t\t\t\tExported(true).\n\t\t\t\tStatic(true).\n\t\t\t\tReadOnly(true).\n\t\t\t\tSynchronous(true).\n\t\t\t\tMemberKind(uint64(webidl.ConstructorMember)).\n\t\t\t\tMemberType(constructorFunction).\n\t\t\t\tDefine()\n\t\t}\n\n\t\t\/\/ Add support for any native operators.\n\t\tif declaration.HasAnnotation(GLOBAL_CONTEXT_ANNOTATION) && declaration.HasAnnotation(NATIVE_OPERATOR_ANNOTATION) {\n\t\t\treporter.ReportError(declaration.GraphNode, \"[NativeOperator] not supported on declarations marked with [GlobalContext]\")\n\t\t\treturn\n\t\t}\n\n\t\tfor _, nativeOp := range declaration.GetAnnotations(NATIVE_OPERATOR_ANNOTATION) {\n\t\t\topName, hasOpName := nativeOp.Value()\n\t\t\tif !hasOpName {\n\t\t\t\treporter.ReportError(nativeOp.GraphNode, \"Missing operator name on [NativeOperator] annotation\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ttypeDecl, _ := graph.GetTypeForSourceNode(declaration.GraphNode)\n\n\t\t\tvar operatorType = graph.FunctionTypeReference(typeDecl.GetTypeReference())\n\t\t\toperatorType = operatorType.WithParameter(typeDecl.GetTypeReference())\n\n\t\t\tif strings.ToLower(opName) != \"not\" {\n\t\t\t\toperatorType = operatorType.WithParameter(typeDecl.GetTypeReference())\n\t\t\t}\n\n\t\t\tbuilder(parentNode, true).\n\t\t\t\tName(opName).\n\t\t\t\tSourceNode(nativeOp.GraphNode).\n\t\t\t\tInitialDefine().\n\t\t\t\tNative(true).\n\t\t\t\tExported(true).\n\t\t\t\tStatic(true).\n\t\t\t\tReadOnly(true).\n\t\t\t\tMemberType(operatorType).\n\t\t\t\tMemberKind(uint64(webidl.OperatorMember)).\n\t\t\t\tDefine()\n\t\t}\n\n\t\t\/\/ Add the declared members.\n\t\tfor _, member := range declaration.Members() {\n\t\t\tibuilder := builder(parentNode, false).\n\t\t\t\tName(member.Name()).\n\t\t\t\tSourceNode(member.GraphNode).\n\t\t\t\tInitialDefine()\n\n\t\t\tdeclaredType, err := itc.ResolveType(member.DeclaredType(), graph)\n\t\t\tif err != nil {\n\t\t\t\treporter.ReportError(member.GraphNode, \"%v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar memberType = declaredType\n\t\t\tvar isReadonly = member.IsReadonly()\n\n\t\t\tswitch member.Kind() {\n\t\t\tcase webidl.FunctionMember:\n\t\t\t\tisReadonly = true\n\t\t\t\tmemberType = graph.FunctionTypeReference(memberType)\n\n\t\t\t\t\/\/ Add the parameter types.\n\t\t\t\tvar markOptional = false\n\t\t\t\tfor _, parameter := range member.Parameters() {\n\t\t\t\t\tif parameter.IsOptional() {\n\t\t\t\t\t\tmarkOptional = true\n\t\t\t\t\t}\n\n\t\t\t\t\tparameterType, err := itc.ResolveType(parameter.DeclaredType(), graph)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treporter.ReportError(member.GraphNode, \"%v\", err)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ All optional parameters get marked as nullable, which means we can skip\n\t\t\t\t\t\/\/ passing them on function calls.\n\t\t\t\t\tif markOptional {\n\t\t\t\t\t\tmemberType = memberType.WithParameter(parameterType.AsNullable())\n\t\t\t\t\t} else {\n\t\t\t\t\t\tmemberType = memberType.WithParameter(parameterType)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\tcase webidl.AttributeMember:\n\t\t\t\tif len(member.Parameters()) > 0 {\n\t\t\t\t\treporter.ReportError(member.GraphNode, \"Attributes cannot have parameters\")\n\t\t\t\t}\n\n\t\t\tdefault:\n\t\t\t\tpanic(\"Unknown WebIDL member kind\")\n\t\t\t}\n\n\t\t\tibuilder.Exported(true).\n\t\t\t\tStatic(member.IsStatic()).\n\t\t\t\tSynchronous(true).\n\t\t\t\tReadOnly(isReadonly).\n\t\t\t\tMemberKind(uint64(member.Kind())).\n\t\t\t\tMemberType(memberType).\n\t\t\t\tDefine()\n\t\t}\n\t}\n}\n\nfunc (itc *irgTypeConstructor) Validate(reporter typegraph.IssueReporter, graph *typegraph.TypeGraph) {\n\tseen := map[string]bool{}\n\n\tfor _, module := range itc.irg.GetModules() {\n\t\tfor _, declaration := range module.Declarations() {\n\t\t\tif _, ok := seen[declaration.Name()]; ok {\n\t\t\t\treporter.ReportError(declaration.GraphNode, \"'%s' is already declared in WebIDL\", declaration.Name())\n\t\t\t}\n\t\t\tseen[declaration.Name()] = true\n\t\t}\n\t}\n}\n\nfunc (itc *irgTypeConstructor) GetLocation(sourceNodeId compilergraph.GraphNodeId) (compilercommon.SourceAndLocation, bool) {\n\tlayerNode, found := itc.irg.TryGetNode(sourceNodeId)\n\tif !found {\n\t\treturn compilercommon.SourceAndLocation{}, false\n\t}\n\n\treturn itc.irg.NodeLocation(layerNode), true\n}\n\n\/\/ ResolveType attempts to resolve the given type string.\nfunc (itc *irgTypeConstructor) ResolveType(typeString string, graph *typegraph.TypeGraph) (typegraph.TypeReference, error) {\n\tif typeString == \"any\" {\n\t\treturn graph.AnyTypeReference(), nil\n\t}\n\n\tif typeString == \"void\" {\n\t\treturn graph.VoidTypeReference(), nil\n\t}\n\n\tdeclaration, hasDeclaration := itc.irg.FindDeclaration(typeString)\n\tif !hasDeclaration {\n\t\treturn graph.AnyTypeReference(), fmt.Errorf(\"Could not find WebIDL type %v\", typeString)\n\t}\n\n\ttypeDecl, hasType := graph.GetTypeForSourceNode(declaration.GraphNode)\n\tif !hasType {\n\t\tpanic(\"Type not found for WebIDL type declaration\")\n\t}\n\n\treturn typeDecl.GetTypeReference(), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"github.com\/vmware\/govmomi\/find\"\n\t\"github.com\/vmware\/govmomi\/object\"\n\t\"github.com\/vmware\/govmomi\/property\"\n\t\"github.com\/vmware\/govmomi\/vim25\/mo\"\n\t\"github.com\/vmware\/govmomi\/vim25\/types\"\n\t\"log\"\n\t\"strings\"\n)\n\nfunc resourceVirtualMachine() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceVirtualMachineCreate,\n\t\tRead: resourceVirtualMachineRead,\n\t\tDelete: resourceVirtualMachineDelete,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"name\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"image\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"datacenter\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"folder\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"host\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"resource_pool\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"datastore\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"linked_clone\": {\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: false,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"cpus\": {\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"memory\": {\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"domain\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"ip_address\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"subnet_mask\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"gateway\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"configuration_parameters\": {\n\t\t\t\tType: schema.TypeMap,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"power_on\": {\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error {\n\tproviderMeta := meta.(providerMeta)\n\tclient := providerMeta.client\n\tctx := providerMeta.context\n\tfinder := find.NewFinder(client, false)\n\n\tdc_name := d.Get(\"datacenter\").(string)\n\tif dc_name == \"\" {\n\t\tdc, err := finder.DefaultDatacenter(ctx)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error reading default datacenter: %s\", err)\n\t\t}\n\t\tvar dc_mo mo.Datacenter\n\t\terr = dc.Properties(ctx, dc.Reference(), []string{\"name\"}, &dc_mo)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error reading datacenter name: %s\", err)\n\t\t}\n\t\tdc_name = dc_mo.Name\n\t\tfinder.SetDatacenter(dc)\n\t\td.Set(\"datacenter\", dc_name)\n\t}\n\n\timage_name := d.Get(\"image\").(string)\n\timage_ref, err := object.NewSearchIndex(client).FindByInventoryPath(ctx, fmt.Sprintf(\"%s\/vm\/%s\", dc_name, image_name))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error reading vm: %s\", err)\n\t}\n\tif image_ref == nil {\n\t\treturn fmt.Errorf(\"Cannot find image %s\", image_name)\n\t}\n\timage := image_ref.(*object.VirtualMachine)\n\n\tvar image_mo mo.VirtualMachine\n\terr = image.Properties(ctx, image.Reference(), []string{\"parent\", \"config.template\", \"resourcePool\", \"snapshot\", \"guest.toolsVersionStatus2\", \"config.guestFullName\"}, &image_mo)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error reading base VM properties: %s\", err)\n\t}\n\n\tvar folder_ref object.Reference\n\tvar folder *object.Folder\n\tif d.Get(\"folder\").(string) != \"\" {\n\t\tfolder_ref, err = object.NewSearchIndex(client).FindByInventoryPath(ctx, fmt.Sprintf(\"%v\/vm\/%v\", dc_name, d.Get(\"folder\").(string)))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error reading folder: %s\", err)\n\t\t}\n\t\tif folder_ref == nil {\n\t\t\treturn fmt.Errorf(\"Cannot find folder %s\", d.Get(\"folder\").(string))\n\t\t}\n\n\t\tfolder = folder_ref.(*object.Folder)\n\t} else {\n\t\tfolder = object.NewFolder(client, *image_mo.Parent)\n\t}\n\n\thost_name := d.Get(\"host\").(string)\n\tif host_name == \"\" {\n\t\tif image_mo.Config.Template == true {\n\t\t\treturn errors.New(\"Image is a template, 'host' is a required\")\n\t\t} else {\n\t\t\tvar pool_mo mo.ResourcePool\n\t\t\terr = property.DefaultCollector(client).RetrieveOne(ctx, *image_mo.ResourcePool, []string{\"owner\"}, &pool_mo)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error reading resource pool of base VM: %s\", err)\n\t\t\t}\n\n\t\t\tif strings.Contains(pool_mo.Owner.Value, \"domain-s\") {\n\t\t\t\tvar host_mo mo.ComputeResource\n\t\t\t\terr = property.DefaultCollector(client).RetrieveOne(ctx, pool_mo.Owner, []string{\"name\"}, &host_mo)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Error reading host of base VM: %s\", err)\n\t\t\t\t}\n\t\t\t\thost_name = host_mo.Name\n\t\t\t} else if strings.Contains(pool_mo.Owner.Value, \"domain-c\") {\n\t\t\t\tvar cluster_mo mo.ClusterComputeResource\n\t\t\t\terr = property.DefaultCollector(client).RetrieveOne(ctx, pool_mo.Owner, []string{\"name\"}, &cluster_mo)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Error reading cluster of base VM: %s\", err)\n\t\t\t\t}\n\t\t\t\thost_name = cluster_mo.Name\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"Unknown compute resource format of base VM: %s\", pool_mo.Owner.Value)\n\t\t\t}\n\t\t}\n\t}\n\n\tpool_name := d.Get(\"resource_pool\").(string)\n\tpool_ref, err := object.NewSearchIndex(client).FindByInventoryPath(ctx, fmt.Sprintf(\"%v\/host\/%v\/Resources\/%v\", dc_name, host_name, pool_name))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error reading resource pool: %s\", err)\n\t}\n\tif pool_ref == nil {\n\t\treturn fmt.Errorf(\"Cannot find resource pool %s\", pool_name)\n\t}\n\n\tvar relocateSpec types.VirtualMachineRelocateSpec\n\tvar pool_mor types.ManagedObjectReference\n\tpool_mor = pool_ref.Reference()\n\trelocateSpec.Pool = &pool_mor\n\n\tdatastore_name := d.Get(\"datastore\").(string)\n\tif datastore_name != \"\" {\n\t\tdatastore_ref, err := finder.Datastore(ctx, fmt.Sprintf(\"\/%v\/datastore\/%v\", dc_name, datastore_name))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Cannot find datastore '%s'\", datastore_name)\n\t\t}\n\t\tdatastore_mor := datastore_ref.Reference()\n\t\trelocateSpec.Datastore = &datastore_mor\n\t}\n\n\tif d.Get(\"linked_clone\").(bool) {\n\t\trelocateSpec.DiskMoveType = \"createNewChildDiskBacking\"\n\t}\n\tvar confSpec types.VirtualMachineConfigSpec\n\tif d.Get(\"cpus\") != nil {\n\t\tconfSpec.NumCPUs = int32(d.Get(\"cpus\").(int))\n\t}\n\tif d.Get(\"memory\") != nil {\n\t\tconfSpec.MemoryMB = int64(d.Get(\"memory\").(int))\n\t}\n\n\tparams := d.Get(\"configuration_parameters\").(map[string]interface{})\n\tvar ov []types.BaseOptionValue\n\tif len(params) > 0 {\n\t\tfor k, v := range params {\n\t\t\to := types.OptionValue{\n\t\t\t\tKey: k,\n\t\t\t\tValue: v,\n\t\t\t}\n\t\t\tov = append(ov, &o)\n\t\t}\n\t\tconfSpec.ExtraConfig = ov\n\t}\n\n\tcloneSpec := types.VirtualMachineCloneSpec{\n\t\tLocation: relocateSpec,\n\t\tConfig: &confSpec,\n\t\tPowerOn: d.Get(\"power_on\").(bool),\n\t}\n\tif d.Get(\"linked_clone\").(bool) {\n\t\tif image_mo.Snapshot == nil {\n\t\t\treturn errors.New(\"`linked_clone=true`, but image VM has no snapshots\")\n\t\t}\n\t\tcloneSpec.Snapshot = image_mo.Snapshot.CurrentSnapshot\n\t}\n\n\tdomain := d.Get(\"domain\").(string)\n\tip_address := d.Get(\"ip_address\").(string)\n\tif domain != \"\" {\n\t\tif image_mo.Guest.ToolsVersionStatus2 == \"guestToolsNotInstalled\" {\n\t\t\treturn errors.New(\"VMware tools are not installed in base VM\")\n\t\t}\n\t\tif !strings.Contains(image_mo.Config.GuestFullName, \"Linux\") && !strings.Contains(image_mo.Config.GuestFullName, \"CentOS\") {\n\t\t\treturn fmt.Errorf(\"Guest customization is supported only for Linux. Base image OS is: %s\", image_mo.Config.GuestFullName)\n\t\t}\n\t\tcustomizationSpec := types.CustomizationSpec{\n\t\t\tGlobalIPSettings: types.CustomizationGlobalIPSettings{},\n\t\t\tIdentity: &types.CustomizationLinuxPrep{\n\t\t\t\tHostName: &types.CustomizationVirtualMachineName{},\n\t\t\t\tDomain: domain,\n\t\t\t},\n\t\t\tNicSettingMap: []types.CustomizationAdapterMapping{\n\t\t\t\t{\n\t\t\t\t\tAdapter: types.CustomizationIPSettings{},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tif ip_address != \"\" {\n\t\t\tmask := d.Get(\"subnet_mask\").(string)\n\t\t\tif mask == \"\" {\n\t\t\t\treturn errors.New(\"'subnet_mask' must be set, if static 'ip_address' is specified\")\n\t\t\t}\n\t\t\tcustomizationSpec.NicSettingMap[0].Adapter.Ip = &types.CustomizationFixedIp{\n\t\t\t\tIpAddress: ip_address,\n\t\t\t}\n\t\t\tcustomizationSpec.NicSettingMap[0].Adapter.SubnetMask = d.Get(\"subnet_mask\").(string)\n\t\t\tgateway := d.Get(\"gateway\").(string)\n\t\t\tif gateway != \"\" {\n\t\t\t\tcustomizationSpec.NicSettingMap[0].Adapter.Gateway = []string{gateway}\n\t\t\t}\n\t\t} else {\n\t\t\tcustomizationSpec.NicSettingMap[0].Adapter.Ip = &types.CustomizationDhcpIpGenerator{}\n\t\t}\n\t\tcloneSpec.Customization = &customizationSpec\n\t} else if ip_address != \"\" {\n\t\treturn errors.New(\"'domain' must be set, if static 'ip_address' is specified\")\n\t}\n\n\ttask, err := image.Clone(ctx, folder, d.Get(\"name\").(string), cloneSpec)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error clonning vm: %s\", err)\n\t}\n\tinfo, err := task.WaitForResult(ctx, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error clonning vm: %s\", err)\n\t}\n\n\tvm_mor := info.Result.(types.ManagedObjectReference)\n\td.SetId(vm_mor.Value)\n\tvm := object.NewVirtualMachine(client, vm_mor)\n\t\/\/ workaround for https:\/\/github.com\/vmware\/govmomi\/issues\/218\n\tif ip_address == \"\" && d.Get(\"power_on\").(bool) {\n\t\tip, err := vm.WaitForIP(ctx)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[ERROR] Cannot read ip address: %s\", err)\n\t\t} else {\n\t\t\td.Set(\"ip_address\", ip)\n\t\t\td.SetConnInfo(map[string]string{\n\t\t\t\t\"type\": \"ssh\",\n\t\t\t\t\"host\": ip,\n\t\t\t})\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc resourceVirtualMachineRead(d *schema.ResourceData, meta interface{}) error {\n\tproviderMeta := meta.(providerMeta)\n\tclient := providerMeta.client\n\tctx := providerMeta.context\n\tvm_mor := types.ManagedObjectReference{Type: \"VirtualMachine\", Value: d.Id()}\n\tvm := object.NewVirtualMachine(client, vm_mor)\n\n\tvar vm_mo mo.VirtualMachine\n\terr := vm.Properties(ctx, vm.Reference(), []string{\"summary\"}, &vm_mo)\n\tif err != nil {\n\t\tlog.Printf(\"[INFO] Cannot read VM properties: %s\", err)\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\td.Set(\"name\", vm_mo.Summary.Config.Name)\n\td.Set(\"cpus\", vm_mo.Summary.Config.NumCpu)\n\td.Set(\"memory\", vm_mo.Summary.Config.MemorySizeMB)\n\n\tif vm_mo.Summary.Runtime.PowerState == \"poweredOn\" {\n\t\td.Set(\"power_on\", true)\n\t} else {\n\t\td.Set(\"power_on\", false)\n\t}\n\n\tif d.Get(\"power_on\").(bool) {\n\t\tip, err := vm.WaitForIP(ctx)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[ERROR] Cannot read ip address: %s\", err)\n\t\t} else {\n\t\t\td.Set(\"ip_address\", ip)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc resourceVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error {\n\tproviderMeta := meta.(providerMeta)\n\tclient := providerMeta.client\n\tctx := providerMeta.context\n\n\tvm_mor := types.ManagedObjectReference{Type: \"VirtualMachine\", Value: d.Id()}\n\tvm := object.NewVirtualMachine(client, vm_mor)\n\n\ttask, err := vm.PowerOff(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error powering vm off: %s\", err)\n\t}\n\terr = task.Wait(ctx)\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), \"in the current state (Powered off)\") {\n\t\t\treturn fmt.Errorf(\"Error powering vm off: %s\", err)\n\t\t}\n\t}\n\n\ttask, err = vm.Destroy(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error deleting vm: %s\", err)\n\t}\n\terr = task.Wait(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error deleting vm: %s\", err)\n\t}\n\n\treturn nil\n}\n<commit_msg>report all vm IP addresses<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"github.com\/vmware\/govmomi\/find\"\n\t\"github.com\/vmware\/govmomi\/object\"\n\t\"github.com\/vmware\/govmomi\/property\"\n\t\"github.com\/vmware\/govmomi\/vim25\/mo\"\n\t\"github.com\/vmware\/govmomi\/vim25\/types\"\n\t\"log\"\n\t\"strings\"\n)\n\nfunc resourceVirtualMachine() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceVirtualMachineCreate,\n\t\tRead: resourceVirtualMachineRead,\n\t\tDelete: resourceVirtualMachineDelete,\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"name\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"image\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"datacenter\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"folder\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"host\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"resource_pool\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"datastore\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"linked_clone\": {\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: false,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"cpus\": {\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"memory\": {\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"domain\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"ip_address\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t\t\"ip_addresses\": {\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tComputed: true,\n\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"subnet_mask\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"gateway\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"configuration_parameters\": {\n\t\t\t\tType: schema.TypeMap,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"power_on\": {\n\t\t\t\tType: schema.TypeBool,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceVirtualMachineCreate(d *schema.ResourceData, meta interface{}) error {\n\tproviderMeta := meta.(providerMeta)\n\tclient := providerMeta.client\n\tctx := providerMeta.context\n\tfinder := find.NewFinder(client, false)\n\n\tdc_name := d.Get(\"datacenter\").(string)\n\tif dc_name == \"\" {\n\t\tdc, err := finder.DefaultDatacenter(ctx)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error reading default datacenter: %s\", err)\n\t\t}\n\t\tvar dc_mo mo.Datacenter\n\t\terr = dc.Properties(ctx, dc.Reference(), []string{\"name\"}, &dc_mo)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error reading datacenter name: %s\", err)\n\t\t}\n\t\tdc_name = dc_mo.Name\n\t\tfinder.SetDatacenter(dc)\n\t\td.Set(\"datacenter\", dc_name)\n\t}\n\n\timage_name := d.Get(\"image\").(string)\n\timage_ref, err := object.NewSearchIndex(client).FindByInventoryPath(ctx, fmt.Sprintf(\"%s\/vm\/%s\", dc_name, image_name))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error reading vm: %s\", err)\n\t}\n\tif image_ref == nil {\n\t\treturn fmt.Errorf(\"Cannot find image %s\", image_name)\n\t}\n\timage := image_ref.(*object.VirtualMachine)\n\n\tvar image_mo mo.VirtualMachine\n\terr = image.Properties(ctx, image.Reference(), []string{\"parent\", \"config.template\", \"resourcePool\", \"snapshot\", \"guest.toolsVersionStatus2\", \"config.guestFullName\"}, &image_mo)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error reading base VM properties: %s\", err)\n\t}\n\n\tvar folder_ref object.Reference\n\tvar folder *object.Folder\n\tif d.Get(\"folder\").(string) != \"\" {\n\t\tfolder_ref, err = object.NewSearchIndex(client).FindByInventoryPath(ctx, fmt.Sprintf(\"%v\/vm\/%v\", dc_name, d.Get(\"folder\").(string)))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error reading folder: %s\", err)\n\t\t}\n\t\tif folder_ref == nil {\n\t\t\treturn fmt.Errorf(\"Cannot find folder %s\", d.Get(\"folder\").(string))\n\t\t}\n\n\t\tfolder = folder_ref.(*object.Folder)\n\t} else {\n\t\tfolder = object.NewFolder(client, *image_mo.Parent)\n\t}\n\n\thost_name := d.Get(\"host\").(string)\n\tif host_name == \"\" {\n\t\tif image_mo.Config.Template == true {\n\t\t\treturn errors.New(\"Image is a template, 'host' is a required\")\n\t\t} else {\n\t\t\tvar pool_mo mo.ResourcePool\n\t\t\terr = property.DefaultCollector(client).RetrieveOne(ctx, *image_mo.ResourcePool, []string{\"owner\"}, &pool_mo)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error reading resource pool of base VM: %s\", err)\n\t\t\t}\n\n\t\t\tif strings.Contains(pool_mo.Owner.Value, \"domain-s\") {\n\t\t\t\tvar host_mo mo.ComputeResource\n\t\t\t\terr = property.DefaultCollector(client).RetrieveOne(ctx, pool_mo.Owner, []string{\"name\"}, &host_mo)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Error reading host of base VM: %s\", err)\n\t\t\t\t}\n\t\t\t\thost_name = host_mo.Name\n\t\t\t} else if strings.Contains(pool_mo.Owner.Value, \"domain-c\") {\n\t\t\t\tvar cluster_mo mo.ClusterComputeResource\n\t\t\t\terr = property.DefaultCollector(client).RetrieveOne(ctx, pool_mo.Owner, []string{\"name\"}, &cluster_mo)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Error reading cluster of base VM: %s\", err)\n\t\t\t\t}\n\t\t\t\thost_name = cluster_mo.Name\n\t\t\t} else {\n\t\t\t\treturn fmt.Errorf(\"Unknown compute resource format of base VM: %s\", pool_mo.Owner.Value)\n\t\t\t}\n\t\t}\n\t}\n\n\tpool_name := d.Get(\"resource_pool\").(string)\n\tpool_ref, err := object.NewSearchIndex(client).FindByInventoryPath(ctx, fmt.Sprintf(\"%v\/host\/%v\/Resources\/%v\", dc_name, host_name, pool_name))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error reading resource pool: %s\", err)\n\t}\n\tif pool_ref == nil {\n\t\treturn fmt.Errorf(\"Cannot find resource pool %s\", pool_name)\n\t}\n\n\tvar relocateSpec types.VirtualMachineRelocateSpec\n\tvar pool_mor types.ManagedObjectReference\n\tpool_mor = pool_ref.Reference()\n\trelocateSpec.Pool = &pool_mor\n\n\tdatastore_name := d.Get(\"datastore\").(string)\n\tif datastore_name != \"\" {\n\t\tdatastore_ref, err := finder.Datastore(ctx, fmt.Sprintf(\"\/%v\/datastore\/%v\", dc_name, datastore_name))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Cannot find datastore '%s'\", datastore_name)\n\t\t}\n\t\tdatastore_mor := datastore_ref.Reference()\n\t\trelocateSpec.Datastore = &datastore_mor\n\t}\n\n\tif d.Get(\"linked_clone\").(bool) {\n\t\trelocateSpec.DiskMoveType = \"createNewChildDiskBacking\"\n\t}\n\tvar confSpec types.VirtualMachineConfigSpec\n\tif d.Get(\"cpus\") != nil {\n\t\tconfSpec.NumCPUs = int32(d.Get(\"cpus\").(int))\n\t}\n\tif d.Get(\"memory\") != nil {\n\t\tconfSpec.MemoryMB = int64(d.Get(\"memory\").(int))\n\t}\n\n\tparams := d.Get(\"configuration_parameters\").(map[string]interface{})\n\tvar ov []types.BaseOptionValue\n\tif len(params) > 0 {\n\t\tfor k, v := range params {\n\t\t\to := types.OptionValue{\n\t\t\t\tKey: k,\n\t\t\t\tValue: v,\n\t\t\t}\n\t\t\tov = append(ov, &o)\n\t\t}\n\t\tconfSpec.ExtraConfig = ov\n\t}\n\n\tcloneSpec := types.VirtualMachineCloneSpec{\n\t\tLocation: relocateSpec,\n\t\tConfig: &confSpec,\n\t\tPowerOn: d.Get(\"power_on\").(bool),\n\t}\n\tif d.Get(\"linked_clone\").(bool) {\n\t\tif image_mo.Snapshot == nil {\n\t\t\treturn errors.New(\"`linked_clone=true`, but image VM has no snapshots\")\n\t\t}\n\t\tcloneSpec.Snapshot = image_mo.Snapshot.CurrentSnapshot\n\t}\n\n\tdomain := d.Get(\"domain\").(string)\n\tip_address := d.Get(\"ip_address\").(string)\n\tif domain != \"\" {\n\t\tif image_mo.Guest.ToolsVersionStatus2 == \"guestToolsNotInstalled\" {\n\t\t\treturn errors.New(\"VMware tools are not installed in base VM\")\n\t\t}\n\t\tif !strings.Contains(image_mo.Config.GuestFullName, \"Linux\") && !strings.Contains(image_mo.Config.GuestFullName, \"CentOS\") {\n\t\t\treturn fmt.Errorf(\"Guest customization is supported only for Linux. Base image OS is: %s\", image_mo.Config.GuestFullName)\n\t\t}\n\t\tcustomizationSpec := types.CustomizationSpec{\n\t\t\tGlobalIPSettings: types.CustomizationGlobalIPSettings{},\n\t\t\tIdentity: &types.CustomizationLinuxPrep{\n\t\t\t\tHostName: &types.CustomizationVirtualMachineName{},\n\t\t\t\tDomain: domain,\n\t\t\t},\n\t\t\tNicSettingMap: []types.CustomizationAdapterMapping{\n\t\t\t\t{\n\t\t\t\t\tAdapter: types.CustomizationIPSettings{},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tif ip_address != \"\" {\n\t\t\tmask := d.Get(\"subnet_mask\").(string)\n\t\t\tif mask == \"\" {\n\t\t\t\treturn errors.New(\"'subnet_mask' must be set, if static 'ip_address' is specified\")\n\t\t\t}\n\t\t\tcustomizationSpec.NicSettingMap[0].Adapter.Ip = &types.CustomizationFixedIp{\n\t\t\t\tIpAddress: ip_address,\n\t\t\t}\n\t\t\tcustomizationSpec.NicSettingMap[0].Adapter.SubnetMask = d.Get(\"subnet_mask\").(string)\n\t\t\tgateway := d.Get(\"gateway\").(string)\n\t\t\tif gateway != \"\" {\n\t\t\t\tcustomizationSpec.NicSettingMap[0].Adapter.Gateway = []string{gateway}\n\t\t\t}\n\t\t} else {\n\t\t\tcustomizationSpec.NicSettingMap[0].Adapter.Ip = &types.CustomizationDhcpIpGenerator{}\n\t\t}\n\t\tcloneSpec.Customization = &customizationSpec\n\t} else if ip_address != \"\" {\n\t\treturn errors.New(\"'domain' must be set, if static 'ip_address' is specified\")\n\t}\n\n\ttask, err := image.Clone(ctx, folder, d.Get(\"name\").(string), cloneSpec)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error clonning vm: %s\", err)\n\t}\n\tinfo, err := task.WaitForResult(ctx, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error clonning vm: %s\", err)\n\t}\n\n\tvm_mor := info.Result.(types.ManagedObjectReference)\n\td.SetId(vm_mor.Value)\n\tvm := object.NewVirtualMachine(client, vm_mor)\n\t\/\/ workaround for https:\/\/github.com\/vmware\/govmomi\/issues\/218\n\tif d.Get(\"power_on\").(bool) {\n\t\tif ip_address == \"\" {\n\t\t\tip, err := vm.WaitForIP(ctx)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"[ERROR] Cannot read ip address: %s\", err)\n\t\t\t} else {\n\t\t\t\td.Set(\"ip_address\", ip)\n\t\t\t\td.SetConnInfo(map[string]string{\n\t\t\t\t\t\"type\": \"ssh\",\n\t\t\t\t\t\"host\": ip,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\n\t\tupdateIPAddresses(d, vm, ctx)\n\t}\n\n\treturn nil\n}\n\nfunc resourceVirtualMachineRead(d *schema.ResourceData, meta interface{}) error {\n\tproviderMeta := meta.(providerMeta)\n\tclient := providerMeta.client\n\tctx := providerMeta.context\n\tvm_mor := types.ManagedObjectReference{Type: \"VirtualMachine\", Value: d.Id()}\n\tvm := object.NewVirtualMachine(client, vm_mor)\n\n\tvar vm_mo mo.VirtualMachine\n\terr := vm.Properties(ctx, vm.Reference(), []string{\"summary\"}, &vm_mo)\n\tif err != nil {\n\t\tlog.Printf(\"[INFO] Cannot read VM properties: %s\", err)\n\t\td.SetId(\"\")\n\t\treturn nil\n\t}\n\td.Set(\"name\", vm_mo.Summary.Config.Name)\n\td.Set(\"cpus\", vm_mo.Summary.Config.NumCpu)\n\td.Set(\"memory\", vm_mo.Summary.Config.MemorySizeMB)\n\n\tif vm_mo.Summary.Runtime.PowerState == \"poweredOn\" {\n\t\td.Set(\"power_on\", true)\n\t} else {\n\t\td.Set(\"power_on\", false)\n\t}\n\n\tif d.Get(\"power_on\").(bool) {\n\t\tip, err := vm.WaitForIP(ctx)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[ERROR] Cannot read ip address: %s\", err)\n\t\t} else {\n\t\t\td.Set(\"ip_address\", ip)\n\t\t}\n\n\t\tupdateIPAddresses(d, vm, ctx)\n\t}\n\n\treturn nil\n}\n\nfunc resourceVirtualMachineDelete(d *schema.ResourceData, meta interface{}) error {\n\tproviderMeta := meta.(providerMeta)\n\tclient := providerMeta.client\n\tctx := providerMeta.context\n\n\tvm_mor := types.ManagedObjectReference{Type: \"VirtualMachine\", Value: d.Id()}\n\tvm := object.NewVirtualMachine(client, vm_mor)\n\n\ttask, err := vm.PowerOff(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error powering vm off: %s\", err)\n\t}\n\terr = task.Wait(ctx)\n\tif err != nil {\n\t\tif !strings.Contains(err.Error(), \"in the current state (Powered off)\") {\n\t\t\treturn fmt.Errorf(\"Error powering vm off: %s\", err)\n\t\t}\n\t}\n\n\ttask, err = vm.Destroy(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error deleting vm: %s\", err)\n\t}\n\terr = task.Wait(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error deleting vm: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc updateIPAddresses(resourceData *schema.ResourceData, vm *object.VirtualMachine, ctx context.Context) {\n\tif ipMap, err := vm.WaitForNetIP(ctx, false); err != nil {\n\t\tlog.Printf(\"[ERROR] Cannot read ip addresses: %s\", err)\n\t} else {\n\t\tips := make([]string, 0)\n\t\tfor _, nicIps := range ipMap {\n\t\t\tips = append(ips, nicIps...)\n\t\t}\n\t\tresourceData.Set(\"ip_addresses\", ips)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package device\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/ScriptRock\/crypto\/ssh\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype CiscoDevice struct {\n\tUsername string\n\tPassword string\n\tEnable string\n\tname string\n\tHostname string\n\tstdin io.WriteCloser\n\tstdout io.Reader\n\tsession *ssh.Session\n\tEcho bool\n\tEnableLog bool\n\tLogdir string\n\tLog *os.File\n\tPrompt string\n\tReadChan chan *string\n\tStopChan chan struct{}\n\tclient *ssh.Client\n\tTimeout int\n}\n\nfunc (d *CiscoDevice) Connect() error {\n\tconfig := &ssh.ClientConfig{\n\t\tTimeout: time.Second * 5,\n\t\tUser: d.Username,\n\t\tAuth: []ssh.AuthMethod{\n\t\t\tssh.Password(d.Password),\n\t\t},\n\t\tConfig: ssh.Config{\n\t\t\tCiphers: ssh.AllSupportedCiphers(),\n\t\t},\n\t}\n\tclient, err := ssh.Dial(\"tcp\", d.Hostname+\":22\", config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsession, err := client.NewSession()\n\tif err != nil {\n\t\tclient.Conn.Close()\n\t\treturn err\n\t}\n\tif d.StopChan == nil {\n\t\td.StopChan = make(chan struct{})\n\t}\n\td.client = client\n\td.stdin, _ = session.StdinPipe()\n\td.stdout, _ = session.StdoutPipe()\n\td.Echo = true\n\td.EnableLog = true\n\tif d.Timeout == 0 {\n\t\td.Timeout = 30\n\t}\n\tmodes := ssh.TerminalModes{\n\t\tssh.ECHO: 0, \/\/ disable echoing\n\t\tssh.OCRNL: 0,\n\t\tssh.TTY_OP_ISPEED: 38400, \/\/ input speed = 14.4kbaud\n\t\tssh.TTY_OP_OSPEED: 38400, \/\/ output speed = 14.4kbaud\n\t}\n\tsession.RequestPty(\"vt100\", 0, 2000, modes)\n\tsession.Shell()\n\tif d.Logdir != \"\" {\n\t\tt := time.Now()\n\t\td.Log, err = os.OpenFile(filepath.Join(d.Logdir, t.Format(\"200601021504\")+\"-\"+d.Hostname), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\td.init()\n\td.session = session\n\treturn nil\n}\n\nfunc (d *CiscoDevice) Close() {\n\td.client.Conn.Close()\n\td.session.Close()\n}\n\nfunc (d *CiscoDevice) Cmd(cmd string) (string, error) {\n\tvar result string\n\tbufstdout := bufio.NewReader(d.stdout)\n\tlines := strings.Split(cmd, \"!\")\n\tfor _, line := range lines {\n\t\tio.WriteString(d.stdin, line+\"\\n\")\n\t\ttime.Sleep(time.Millisecond * 100)\n\t}\n\tgo d.readln(bufstdout)\n\tfor {\n\t\tselect {\n\t\tcase output := <-d.ReadChan:\n\t\t\t{\n\t\t\t\tif output == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif d.Echo == false {\n\t\t\t\t\tresult = strings.Replace(*output, lines[0], \"\", 1)\n\t\t\t\t} else {\n\t\t\t\t\tresult = *output\n\t\t\t\t}\n\t\t\t\treturn result, nil\n\t\t\t}\n\t\tcase <-d.StopChan:\n\t\t\t{\n\t\t\t\tif d.session != nil {\n\t\t\t\t\td.session.Close()\n\t\t\t\t}\n\t\t\t\td.client.Conn.Close()\n\t\t\t\td.Close()\n\t\t\t\treturn \"\", fmt.Errorf(\"EOF\")\n\t\t\t}\n\t\tcase <-time.After(time.Second * time.Duration(d.Timeout)):\n\t\t\t{\n\t\t\t\tfmt.Println(\"timeout on\", d.Hostname)\n\t\t\t\tif d.session != nil {\n\t\t\t\t\td.session.Close()\n\t\t\t\t}\n\t\t\t\td.client.Conn.Close()\n\t\t\t\td.Connect()\n\t\t\t\treturn \"\", nil\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (d *CiscoDevice) init() {\n\td.ReadChan = make(chan *string, 20)\n\td.StopChan = make(chan struct{})\n\tbufstdout := bufio.NewReader(d.stdout)\n\tio.WriteString(d.stdin, \"enable\\n\")\n\ttime.Sleep(time.Millisecond * 100)\n\tre := regexp.MustCompile(\"assword:\")\n\tbuf := make([]byte, 1000)\n\tloadStr := \"\"\n\tfor {\n\t\tn, err := bufstdout.Read(buf)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tloadStr += string(buf[:n])\n\t\tif re.MatchString(loadStr) {\n\t\t\tio.WriteString(d.stdin, d.Enable+\"\\n\")\n\t\t\tbreak\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\td.Cmd(\"terminal length 0\")\n\td.Cmd(\"\")\n\tprompt, _ := d.Cmd(\"\")\n\td.Prompt = strings.TrimSpace(prompt)\n\td.Prompt = strings.Replace(d.Prompt, \"#\", \"\", -1)\n\t\/\/ sometimes using conf t makes the (config-xx-something) so long that only 10 chars of\n\t\/\/ original prompt remain\n\tif len(d.Prompt) > 10 {\n\t\td.Prompt = d.Prompt[:10]\n\t}\n}\n\nfunc (d *CiscoDevice) readln(r io.Reader) {\n\t\/\/re := regexp.MustCompile(\".*?#.?$\")\n\tvar re *regexp.Regexp\n\tif d.Prompt == \"\" {\n\t\tre = regexp.MustCompile(\"[[:alnum:]]#.?$\")\n\t} else {\n\t\tre = regexp.MustCompile(d.Prompt + \".*?#.?$\")\n\t}\n\t\/\/fmt.Println(\"using prompt\" + d.Prompt)\n\tbuf := make([]byte, 10000)\n\tloadStr := \"\"\n\tfor {\n\t\tn, err := r.Read(buf)\n\t\tif err != nil {\n\t\t\tif err.Error() != \"EOF\" {\n\t\t\t\tfmt.Println(\"ERROR \", err)\n\t\t\t}\n\t\t\td.StopChan <- struct{}{}\n\t\t}\n\t\tloadStr += string(buf[:n])\n\t\t\/\/ logging to file if necessary\n\t\tif d.Logdir != \"\" {\n\t\t\tif d.EnableLog {\n\t\t\t\tfmt.Fprint(d.Log, string(buf[:n]))\n\t\t\t}\n\t\t}\n\t\tif len(loadStr) >= 50 && re.MatchString(loadStr[len(loadStr)-45:]) {\n\t\t\tbreak\n\t\t}\n\t\tif len(loadStr) < 50 && re.MatchString(loadStr) {\n\t\t\tbreak\n\t\t}\n\t\t\/\/ keepalive\n\t\td.ReadChan <- nil\n\t}\n\tloadStr = strings.Replace(loadStr, \"\\r\", \"\", -1)\n\td.ReadChan <- &loadStr\n}\n<commit_msg>Split correctly multiple lines in multiple commands. Closes #5<commit_after>package device\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/ScriptRock\/crypto\/ssh\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype CiscoDevice struct {\n\tUsername string\n\tPassword string\n\tEnable string\n\tname string\n\tHostname string\n\tstdin io.WriteCloser\n\tstdout io.Reader\n\tsession *ssh.Session\n\tEcho bool\n\tEnableLog bool\n\tLogdir string\n\tLog *os.File\n\tPrompt string\n\tReadChan chan *string\n\tStopChan chan struct{}\n\tclient *ssh.Client\n\tTimeout int\n}\n\nfunc (d *CiscoDevice) Connect() error {\n\tconfig := &ssh.ClientConfig{\n\t\tTimeout: time.Second * 5,\n\t\tUser: d.Username,\n\t\tAuth: []ssh.AuthMethod{\n\t\t\tssh.Password(d.Password),\n\t\t},\n\t\tConfig: ssh.Config{\n\t\t\tCiphers: ssh.AllSupportedCiphers(),\n\t\t},\n\t}\n\tclient, err := ssh.Dial(\"tcp\", d.Hostname+\":22\", config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tsession, err := client.NewSession()\n\tif err != nil {\n\t\tclient.Conn.Close()\n\t\treturn err\n\t}\n\tif d.StopChan == nil {\n\t\td.StopChan = make(chan struct{})\n\t}\n\td.client = client\n\td.stdin, _ = session.StdinPipe()\n\td.stdout, _ = session.StdoutPipe()\n\td.Echo = true\n\td.EnableLog = true\n\tif d.Timeout == 0 {\n\t\td.Timeout = 30\n\t}\n\tmodes := ssh.TerminalModes{\n\t\tssh.ECHO: 0, \/\/ disable echoing\n\t\tssh.OCRNL: 0,\n\t\tssh.TTY_OP_ISPEED: 38400, \/\/ input speed = 14.4kbaud\n\t\tssh.TTY_OP_OSPEED: 38400, \/\/ output speed = 14.4kbaud\n\t}\n\tsession.RequestPty(\"vt100\", 0, 2000, modes)\n\tsession.Shell()\n\tif d.Logdir != \"\" {\n\t\tt := time.Now()\n\t\td.Log, err = os.OpenFile(filepath.Join(d.Logdir, t.Format(\"200601021504\")+\"-\"+d.Hostname), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\td.init()\n\td.session = session\n\treturn nil\n}\n\nfunc (d *CiscoDevice) Close() {\n\td.client.Conn.Close()\n\tif d.session != nil {\n\t\td.session.Close()\n\t}\n}\n\nfunc (d *CiscoDevice) Cmd(cmd string) (string, error) {\n\tresult := \"\"\n\tlines := strings.Split(cmd, \"!\")\n\tfor _, line := range lines {\n\t\tres, err := d.cmd(line)\n\t\tresult += res\n\t\tif err != nil {\n\t\t\treturn result, err\n\t\t}\n\t}\n\treturn result, nil\n}\n\nfunc (d *CiscoDevice) cmd(cmd string) (string, error) {\n\tvar result string\n\tbufstdout := bufio.NewReader(d.stdout)\n\tio.WriteString(d.stdin, cmd+\"\\n\")\n\tgo d.readln(bufstdout)\n\tfor {\n\t\tselect {\n\t\tcase output := <-d.ReadChan:\n\t\t\t{\n\t\t\t\tif output == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif d.Echo == false {\n\t\t\t\t\tresult = strings.Replace(*output, cmd, \"\", 1)\n\t\t\t\t} else {\n\t\t\t\t\tresult = *output\n\t\t\t\t}\n\t\t\t\treturn result, nil\n\t\t\t}\n\t\tcase <-d.StopChan:\n\t\t\t{\n\t\t\t\tif d.session != nil {\n\t\t\t\t\td.session.Close()\n\t\t\t\t}\n\t\t\t\td.client.Conn.Close()\n\t\t\t\td.Close()\n\t\t\t\treturn \"\", fmt.Errorf(\"EOF\")\n\t\t\t}\n\t\tcase <-time.After(time.Second * time.Duration(d.Timeout)):\n\t\t\t{\n\t\t\t\tfmt.Println(\"timeout on\", d.Hostname)\n\t\t\t\tif d.session != nil {\n\t\t\t\t\td.session.Close()\n\t\t\t\t}\n\t\t\t\td.client.Conn.Close()\n\t\t\t\td.Close()\n\t\t\t\treturn \"\", fmt.Errorf(\"timeout\")\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (d *CiscoDevice) init() {\n\td.ReadChan = make(chan *string, 20)\n\td.StopChan = make(chan struct{})\n\tbufstdout := bufio.NewReader(d.stdout)\n\tio.WriteString(d.stdin, \"enable\\n\")\n\ttime.Sleep(time.Millisecond * 100)\n\tre := regexp.MustCompile(\"assword:\")\n\tbuf := make([]byte, 1000)\n\tloadStr := \"\"\n\tfor {\n\t\tn, err := bufstdout.Read(buf)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tloadStr += string(buf[:n])\n\t\tif re.MatchString(loadStr) {\n\t\t\tio.WriteString(d.stdin, d.Enable+\"\\n\")\n\t\t\tbreak\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\td.Cmd(\"terminal length 0\")\n\td.Cmd(\"\")\n\tprompt, _ := d.Cmd(\"\")\n\td.Prompt = strings.TrimSpace(prompt)\n\td.Prompt = strings.Replace(d.Prompt, \"#\", \"\", -1)\n\t\/\/ sometimes using conf t makes the (config-xx-something) so long that only 10 chars of\n\t\/\/ original prompt remain\n\tif len(d.Prompt) > 10 {\n\t\td.Prompt = d.Prompt[:10]\n\t}\n}\n\nfunc (d *CiscoDevice) readln(r io.Reader) {\n\t\/\/re := regexp.MustCompile(\".*?#.?$\")\n\tvar re *regexp.Regexp\n\tif d.Prompt == \"\" {\n\t\tre = regexp.MustCompile(\"[[:alnum:]]#.?$\")\n\t} else {\n\t\tre = regexp.MustCompile(d.Prompt + \".*?#.?$\")\n\t}\n\t\/\/fmt.Println(\"using prompt\" + d.Prompt)\n\tbuf := make([]byte, 10000)\n\tloadStr := \"\"\n\tfor {\n\t\tn, err := r.Read(buf)\n\t\tif err != nil {\n\t\t\tif err.Error() != \"EOF\" {\n\t\t\t\tfmt.Println(\"ERROR \", err)\n\t\t\t}\n\t\t\td.StopChan <- struct{}{}\n\t\t}\n\t\tloadStr += string(buf[:n])\n\t\t\/\/ logging to file if necessary\n\t\tif d.Logdir != \"\" {\n\t\t\tif d.EnableLog {\n\t\t\t\tfmt.Fprint(d.Log, string(buf[:n]))\n\t\t\t}\n\t\t}\n\t\tif len(loadStr) >= 50 && re.MatchString(loadStr[len(loadStr)-45:]) {\n\t\t\tbreak\n\t\t}\n\t\tif len(loadStr) < 50 && re.MatchString(loadStr) {\n\t\t\tbreak\n\t\t}\n\t\t\/\/ keepalive\n\t\td.ReadChan <- nil\n\t}\n\tloadStr = strings.Replace(loadStr, \"\\r\", \"\", -1)\n\td.ReadChan <- &loadStr\n}\n<|endoftext|>"} {"text":"<commit_before>package common\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst taskIdSize = 20\n\nvar (\n\tTracker = &TaskTracker{tasks: map[string]*Task{}}\n\tTaskStatusUnknown = &TaskStatus{Status: StatusUnknown}\n)\n\nfunc MaintenanceChecker(file string, interval time.Duration) {\n\tgo Tracker.MaintenanceChecker(file, interval)\n}\n\nfunc NewTask(name string, executor TaskExecutor) *Task {\n\ttask := &Task{Tracker: Tracker, Executor: executor}\n\ttask.Status = StatusInit\n\ttask.StatusTime = time.Now()\n\ttask.Name = name\n\ttask.Description = executor.Description()\n\ttask.Request = executor.Request()\n\treturn task\n}\n\ntype TaskTracker struct {\n\tsync.RWMutex\n\tResultDuration time.Duration\n\tMaintenance bool\n\ttasks map[string]*Task\n}\n\ntype Task struct {\n\tsync.RWMutex\n\tTaskStatus\n\tErr error\n\tTracker *TaskTracker\n\tId string\n\tExecutor TaskExecutor\n\tRequest interface{}\n\tResult interface{}\n}\n\ntype TaskExecutor interface {\n\tRequest() interface{}\n\tResult() interface{}\n\tDescription() string\n\tExecute(t *Task) error\n\tAuthorize() error\n}\n\ntype TaskMaintenanceExecutor interface {\n\tAllowDuringMaintenance() bool\n}\n\nfunc createTaskId() string {\n\treturn CreateRandomId(taskIdSize)\n}\n\nfunc (t *TaskTracker) SetMaintenance(on bool) {\n\tt.Lock()\n\tt.Maintenance = on\n\tt.Unlock()\n}\n\nfunc (t *TaskTracker) UnderMaintenance() bool {\n\tt.RLock()\n\tmaint := t.Maintenance\n\tt.RUnlock()\n\treturn maint\n}\n\nfunc (t *TaskTracker) MaintenanceChecker(file string, interval time.Duration) {\n\tfor {\n\t\tif _, err := os.Stat(file); err == nil {\n\t\t\t\/\/ maintenance file exists\n\t\t\tif !t.UnderMaintenance() {\n\t\t\t\tlog.Println(\"Begin Maintenance\")\n\t\t\t\tt.SetMaintenance(true)\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ maintenance file doesn't exist or there is an error looking for it\n\t\t\tif t.UnderMaintenance() {\n\t\t\t\tlog.Println(\"End Maintenance\")\n\t\t\t\tt.SetMaintenance(false)\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(interval)\n\t}\n}\n\nfunc (t *TaskTracker) Idle(checkTask *Task) bool {\n\tidle := true\n\tt.RLock()\n\tfor _, task := range t.tasks {\n\t\tif task != checkTask && !task.Done {\n\t\t\tidle = false\n\t\t\tbreak\n\t\t}\n\t}\n\tt.RUnlock()\n\treturn idle\n}\n\nfunc (t *TaskTracker) ReserveTaskId(task *Task) string {\n\tt.Lock()\n\trequestId := createTaskId()\n\tfor _, present := t.tasks[requestId]; present; _, present = t.tasks[requestId] {\n\t\trequestId = createTaskId()\n\t}\n\tt.tasks[requestId] = task \/\/ reserve request id\n\tt.Unlock()\n\ttask.Lock()\n\ttask.Id = requestId\n\ttask.Unlock()\n\treturn requestId\n}\n\nfunc (t *TaskTracker) ReleaseTaskId(id string) {\n\tt.Lock()\n\tdelete(t.tasks, id)\n\tt.Unlock()\n}\n\nfunc (t *TaskTracker) Status(id string) (*TaskStatus, error) {\n\tt.RLock()\n\ttask := t.tasks[id]\n\tt.RUnlock()\n\tif task != nil {\n\t\ttask.RLock()\n\t\tstatus := task.CopyTaskStatus()\n\t\terr := task.Err\n\t\ttask.RUnlock()\n\t\treturn status, err\n\t}\n\treturn TaskStatusUnknown, errors.New(\"Unknown Task Status\")\n}\n\nfunc (t *TaskTracker) Result(id string) interface{} {\n\tt.RLock()\n\ttask := t.tasks[id]\n\tt.RUnlock()\n\tif task != nil {\n\t\ttask.RLock()\n\t\tresult := task.Result\n\t\ttask.RUnlock()\n\t\treturn result\n\t}\n\treturn nil\n}\n\nfunc (t *Task) Authorize() error {\n\treturn t.Executor.Authorize()\n}\n\nfunc (t *Task) Run() error {\n\tif t.Tracker.UnderMaintenance() {\n\t\texecutor, ok := t.Executor.(TaskMaintenanceExecutor)\n\t\tif !ok || !executor.AllowDuringMaintenance() {\n\t\t\treturn t.End(errors.New(\"Under Maintenance\"), false)\n\t\t}\n\t}\n\tt.Tracker.ReserveTaskId(t)\n\tt.Log(\"Begin %s\", t.Description)\n\tt.Lock()\n\tt.StartTime = time.Now()\n\tt.Unlock()\n\terr := t.Executor.Authorize()\n\tif err != nil {\n\t\treturn t.End(err, false)\n\t}\n\treturn t.End(t.Executor.Execute(t), false)\n}\n\nfunc (t *Task) RunAsync(r *AsyncReply) error {\n\tif t.Tracker.UnderMaintenance() {\n\t\texecutor, ok := t.Executor.(TaskMaintenanceExecutor)\n\t\tif !ok || !executor.AllowDuringMaintenance() {\n\t\t\treturn t.End(errors.New(\"Under Maintenance\"), false)\n\t\t}\n\t}\n\tt.Tracker.ReserveTaskId(t)\n\tt.RLock()\n\tr.Id = t.Id\n\tt.RUnlock()\n\tgo func() error {\n\t\tt.Log(\"Begin\")\n\t\tt.Lock()\n\t\tt.StartTime = time.Now()\n\t\tt.Unlock()\n\t\terr := t.Executor.Authorize()\n\t\tif err != nil {\n\t\t\treturn t.End(err, true)\n\t\t}\n\t\tt.End(t.Executor.Execute(t), true)\n\t\treturn nil\n\t}()\n\treturn nil\n}\n\nfunc (t *Task) End(err error, async bool) error {\n\tlogString := fmt.Sprintf(\"End %s\", t.Description)\n\tt.Lock()\n\tt.Result = t.Executor.Result()\n\tt.EndTime = time.Now()\n\tt.StatusTime = t.EndTime\n\tif err == nil {\n\t\tt.Status = StatusDone\n\t\tt.Done = true\n\t} else {\n\t\tt.Status = StatusError\n\t\tt.Err = err\n\t\tt.Done = true\n\t\tlogString += fmt.Sprintf(\" - Error: %s\", err.Error())\n\t}\n\tt.Unlock()\n\tt.Log(logString)\n\tif async {\n\t\ttime.AfterFunc(t.Tracker.ResultDuration, func() {\n\t\t\t\/\/ keep result around for 30 min in case someone wants to check on it\n\t\t\tt.Tracker.ReleaseTaskId(t.Id)\n\t\t})\n\t} else {\n\t\tt.Tracker.ReleaseTaskId(t.Id)\n\t}\n\treturn err\n}\n\nfunc (t *Task) Log(format string, args ...interface{}) {\n\tt.RLock()\n\tlog.Printf(\"[RPC][\"+t.Name+\"][\"+t.Id+\"] \"+format, args...)\n\tt.RUnlock()\n}\n\nfunc (t *Task) LogStatus(format string, args ...interface{}) {\n\tt.Log(format, args...)\n\tt.Lock()\n\tt.StatusTime = time.Now()\n\tt.Status = fmt.Sprintf(format, args...)\n\tt.Unlock()\n}\n\ntype TaskStatus struct {\n\tName string\n\tDescription string\n\tStatus string\n\tDone bool\n\tStartTime time.Time\n\tStatusTime time.Time\n\tEndTime time.Time\n}\n\nfunc (t *TaskStatus) Map() map[string]interface{} {\n\treturn map[string]interface{}{\n\t\t\"Name\": t.Name,\n\t\t\"Description\": t.Description,\n\t\t\"Status\": t.Status,\n\t\t\"Done\": t.Done,\n\t\t\"StartTime\": t.StartTime,\n\t\t\"StatusTime\": t.StatusTime,\n\t\t\"EndTime\": t.EndTime,\n\t}\n}\n\nfunc (t *TaskStatus) String() string {\n\treturn fmt.Sprintf(`%s\nDescription : %s\nStatus : %s\nDone : %t\nStartTime : %s\nStatusTime : %s\nEndTime : %s`, t.Name, t.Description, t.Status, t.Done, t.StartTime, t.StatusTime,\n\t\tt.EndTime)\n}\n\nfunc (t *TaskStatus) CopyTaskStatus() *TaskStatus {\n\treturn &TaskStatus{t.Name, t.Description, t.Status, t.Done, t.StartTime, t.StatusTime,\n\t\tt.EndTime}\n}\n<commit_msg>add description to begin async task<commit_after>package common\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst taskIdSize = 20\n\nvar (\n\tTracker = &TaskTracker{tasks: map[string]*Task{}}\n\tTaskStatusUnknown = &TaskStatus{Status: StatusUnknown}\n)\n\nfunc MaintenanceChecker(file string, interval time.Duration) {\n\tgo Tracker.MaintenanceChecker(file, interval)\n}\n\nfunc NewTask(name string, executor TaskExecutor) *Task {\n\ttask := &Task{Tracker: Tracker, Executor: executor}\n\ttask.Status = StatusInit\n\ttask.StatusTime = time.Now()\n\ttask.Name = name\n\ttask.Description = executor.Description()\n\ttask.Request = executor.Request()\n\treturn task\n}\n\ntype TaskTracker struct {\n\tsync.RWMutex\n\tResultDuration time.Duration\n\tMaintenance bool\n\ttasks map[string]*Task\n}\n\ntype Task struct {\n\tsync.RWMutex\n\tTaskStatus\n\tErr error\n\tTracker *TaskTracker\n\tId string\n\tExecutor TaskExecutor\n\tRequest interface{}\n\tResult interface{}\n}\n\ntype TaskExecutor interface {\n\tRequest() interface{}\n\tResult() interface{}\n\tDescription() string\n\tExecute(t *Task) error\n\tAuthorize() error\n}\n\ntype TaskMaintenanceExecutor interface {\n\tAllowDuringMaintenance() bool\n}\n\nfunc createTaskId() string {\n\treturn CreateRandomId(taskIdSize)\n}\n\nfunc (t *TaskTracker) SetMaintenance(on bool) {\n\tt.Lock()\n\tt.Maintenance = on\n\tt.Unlock()\n}\n\nfunc (t *TaskTracker) UnderMaintenance() bool {\n\tt.RLock()\n\tmaint := t.Maintenance\n\tt.RUnlock()\n\treturn maint\n}\n\nfunc (t *TaskTracker) MaintenanceChecker(file string, interval time.Duration) {\n\tfor {\n\t\tif _, err := os.Stat(file); err == nil {\n\t\t\t\/\/ maintenance file exists\n\t\t\tif !t.UnderMaintenance() {\n\t\t\t\tlog.Println(\"Begin Maintenance\")\n\t\t\t\tt.SetMaintenance(true)\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ maintenance file doesn't exist or there is an error looking for it\n\t\t\tif t.UnderMaintenance() {\n\t\t\t\tlog.Println(\"End Maintenance\")\n\t\t\t\tt.SetMaintenance(false)\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(interval)\n\t}\n}\n\nfunc (t *TaskTracker) Idle(checkTask *Task) bool {\n\tidle := true\n\tt.RLock()\n\tfor _, task := range t.tasks {\n\t\tif task != checkTask && !task.Done {\n\t\t\tidle = false\n\t\t\tbreak\n\t\t}\n\t}\n\tt.RUnlock()\n\treturn idle\n}\n\nfunc (t *TaskTracker) ReserveTaskId(task *Task) string {\n\tt.Lock()\n\trequestId := createTaskId()\n\tfor _, present := t.tasks[requestId]; present; _, present = t.tasks[requestId] {\n\t\trequestId = createTaskId()\n\t}\n\tt.tasks[requestId] = task \/\/ reserve request id\n\tt.Unlock()\n\ttask.Lock()\n\ttask.Id = requestId\n\ttask.Unlock()\n\treturn requestId\n}\n\nfunc (t *TaskTracker) ReleaseTaskId(id string) {\n\tt.Lock()\n\tdelete(t.tasks, id)\n\tt.Unlock()\n}\n\nfunc (t *TaskTracker) Status(id string) (*TaskStatus, error) {\n\tt.RLock()\n\ttask := t.tasks[id]\n\tt.RUnlock()\n\tif task != nil {\n\t\ttask.RLock()\n\t\tstatus := task.CopyTaskStatus()\n\t\terr := task.Err\n\t\ttask.RUnlock()\n\t\treturn status, err\n\t}\n\treturn TaskStatusUnknown, errors.New(\"Unknown Task Status\")\n}\n\nfunc (t *TaskTracker) Result(id string) interface{} {\n\tt.RLock()\n\ttask := t.tasks[id]\n\tt.RUnlock()\n\tif task != nil {\n\t\ttask.RLock()\n\t\tresult := task.Result\n\t\ttask.RUnlock()\n\t\treturn result\n\t}\n\treturn nil\n}\n\nfunc (t *Task) Authorize() error {\n\treturn t.Executor.Authorize()\n}\n\nfunc (t *Task) Run() error {\n\tif t.Tracker.UnderMaintenance() {\n\t\texecutor, ok := t.Executor.(TaskMaintenanceExecutor)\n\t\tif !ok || !executor.AllowDuringMaintenance() {\n\t\t\treturn t.End(errors.New(\"Under Maintenance\"), false)\n\t\t}\n\t}\n\tt.Tracker.ReserveTaskId(t)\n\tt.Log(\"Begin %s\", t.Description)\n\tt.Lock()\n\tt.StartTime = time.Now()\n\tt.Unlock()\n\terr := t.Executor.Authorize()\n\tif err != nil {\n\t\treturn t.End(err, false)\n\t}\n\treturn t.End(t.Executor.Execute(t), false)\n}\n\nfunc (t *Task) RunAsync(r *AsyncReply) error {\n\tif t.Tracker.UnderMaintenance() {\n\t\texecutor, ok := t.Executor.(TaskMaintenanceExecutor)\n\t\tif !ok || !executor.AllowDuringMaintenance() {\n\t\t\treturn t.End(errors.New(\"Under Maintenance\"), false)\n\t\t}\n\t}\n\tt.Tracker.ReserveTaskId(t)\n\tt.RLock()\n\tr.Id = t.Id\n\tt.RUnlock()\n\tgo func() error {\n\t\tt.Log(\"Begin %s\", t.Description)\n\t\tt.Lock()\n\t\tt.StartTime = time.Now()\n\t\tt.Unlock()\n\t\terr := t.Executor.Authorize()\n\t\tif err != nil {\n\t\t\treturn t.End(err, true)\n\t\t}\n\t\tt.End(t.Executor.Execute(t), true)\n\t\treturn nil\n\t}()\n\treturn nil\n}\n\nfunc (t *Task) End(err error, async bool) error {\n\tlogString := fmt.Sprintf(\"End %s\", t.Description)\n\tt.Lock()\n\tt.Result = t.Executor.Result()\n\tt.EndTime = time.Now()\n\tt.StatusTime = t.EndTime\n\tif err == nil {\n\t\tt.Status = StatusDone\n\t\tt.Done = true\n\t} else {\n\t\tt.Status = StatusError\n\t\tt.Err = err\n\t\tt.Done = true\n\t\tlogString += fmt.Sprintf(\" - Error: %s\", err.Error())\n\t}\n\tt.Unlock()\n\tt.Log(logString)\n\tif async {\n\t\ttime.AfterFunc(t.Tracker.ResultDuration, func() {\n\t\t\t\/\/ keep result around for 30 min in case someone wants to check on it\n\t\t\tt.Tracker.ReleaseTaskId(t.Id)\n\t\t})\n\t} else {\n\t\tt.Tracker.ReleaseTaskId(t.Id)\n\t}\n\treturn err\n}\n\nfunc (t *Task) Log(format string, args ...interface{}) {\n\tt.RLock()\n\tlog.Printf(\"[RPC][\"+t.Name+\"][\"+t.Id+\"] \"+format, args...)\n\tt.RUnlock()\n}\n\nfunc (t *Task) LogStatus(format string, args ...interface{}) {\n\tt.Log(format, args...)\n\tt.Lock()\n\tt.StatusTime = time.Now()\n\tt.Status = fmt.Sprintf(format, args...)\n\tt.Unlock()\n}\n\ntype TaskStatus struct {\n\tName string\n\tDescription string\n\tStatus string\n\tDone bool\n\tStartTime time.Time\n\tStatusTime time.Time\n\tEndTime time.Time\n}\n\nfunc (t *TaskStatus) Map() map[string]interface{} {\n\treturn map[string]interface{}{\n\t\t\"Name\": t.Name,\n\t\t\"Description\": t.Description,\n\t\t\"Status\": t.Status,\n\t\t\"Done\": t.Done,\n\t\t\"StartTime\": t.StartTime,\n\t\t\"StatusTime\": t.StatusTime,\n\t\t\"EndTime\": t.EndTime,\n\t}\n}\n\nfunc (t *TaskStatus) String() string {\n\treturn fmt.Sprintf(`%s\nDescription : %s\nStatus : %s\nDone : %t\nStartTime : %s\nStatusTime : %s\nEndTime : %s`, t.Name, t.Description, t.Status, t.Done, t.StartTime, t.StatusTime,\n\t\tt.EndTime)\n}\n\nfunc (t *TaskStatus) CopyTaskStatus() *TaskStatus {\n\treturn &TaskStatus{t.Name, t.Description, t.Status, t.Done, t.StartTime, t.StatusTime,\n\t\tt.EndTime}\n}\n<|endoftext|>"} {"text":"<commit_before>package tests\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/deis\/deis\/tests\/dockercli\"\n\t\"github.com\/deis\/deis\/tests\/etcdutils\"\n\t\"github.com\/deis\/deis\/tests\/utils\"\n)\n\nfunc TestRouter(t *testing.T) {\n\tvar err error\n\tsetkeys := []string{\n\t\t\"\/deis\/controller\/host\",\n\t\t\"\/deis\/controller\/port\",\n\t\t\"\/deis\/builder\/host\",\n\t\t\"\/deis\/builder\/port\",\n\t\t\"\/deis\/store\/gateway\/host\",\n\t\t\"\/deis\/store\/gateway\/port\",\n\t}\n\tsetdir := []string{\n\t\t\"\/deis\/controller\",\n\t\t\"\/deis\/router\",\n\t\t\"\/deis\/database\",\n\t\t\"\/deis\/services\",\n\t\t\"\/deis\/builder\",\n\t\t\"\/deis\/domains\",\n\t\t\"\/deis\/store\",\n\t}\n\ttag, etcdPort := utils.BuildTag(), utils.RandomPort()\n\tetcdName := \"deis-etcd-\" + tag\n\tcli, stdout, stdoutPipe := dockercli.NewClient()\n\tdockercli.RunTestEtcd(t, etcdName, etcdPort)\n\tdefer cli.CmdRm(\"-f\", etcdName)\n\thandler := etcdutils.InitEtcd(setdir, setkeys, etcdPort)\n\tetcdutils.PublishEtcd(t, handler)\n\thost, port := utils.HostAddress(), utils.RandomPort()\n\tfmt.Printf(\"--- Run deis\/router:%s at %s:%s\\n\", tag, host, port)\n\tname := \"deis-router-\" + tag\n\tgo func() {\n\t\t_ = cli.CmdRm(\"-f\", name)\n\t\terr = dockercli.RunContainer(cli,\n\t\t\t\"--name\", name,\n\t\t\t\"--rm\",\n\t\t\t\"-p\", port+\":80\",\n\t\t\t\"-p\", utils.RandomPort()+\":2222\",\n\t\t\t\"-e\", \"EXTERNAL_PORT=\"+port,\n\t\t\t\"-e\", \"HOST=\"+host,\n\t\t\t\"-e\", \"ETCD_PORT=\"+etcdPort,\n\t\t\t\"deis\/router:\"+tag)\n\t}()\n\tdockercli.PrintToStdout(t, stdout, stdoutPipe, \"deis-router running\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ FIXME: nginx needs a couple seconds to wake up here\n\t\/\/ FIXME: Wait until etcd keys are published\n\ttime.Sleep(5000 * time.Millisecond)\n\tdockercli.DeisServiceTest(t, name, port, \"http\")\n\tetcdutils.VerifyEtcdValue(t, \"\/deis\/router\/gzip\", \"on\", etcdPort)\n\tetcdutils.VerifyEtcdValue(t,\n\t\t\"\/deis\/router\/hosts\/\"+host,\n\t\tfmt.Sprintf(\"%s:%s\", host, port),\n\t\tetcdPort)\n\t_ = cli.CmdRm(\"-f\", name)\n}\n<commit_msg>ref(test): by default run with logs in debug<commit_after>package tests\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/deis\/deis\/tests\/dockercli\"\n\t\"github.com\/deis\/deis\/tests\/etcdutils\"\n\t\"github.com\/deis\/deis\/tests\/utils\"\n)\n\nfunc TestRouter(t *testing.T) {\n\tvar err error\n\tsetkeys := []string{\n\t\t\"\/deis\/controller\/host\",\n\t\t\"\/deis\/controller\/port\",\n\t\t\"\/deis\/builder\/host\",\n\t\t\"\/deis\/builder\/port\",\n\t\t\"\/deis\/store\/gateway\/host\",\n\t\t\"\/deis\/store\/gateway\/port\",\n\t}\n\tsetdir := []string{\n\t\t\"\/deis\/controller\",\n\t\t\"\/deis\/router\",\n\t\t\"\/deis\/database\",\n\t\t\"\/deis\/services\",\n\t\t\"\/deis\/builder\",\n\t\t\"\/deis\/domains\",\n\t\t\"\/deis\/store\",\n\t}\n\ttag, etcdPort := utils.BuildTag(), utils.RandomPort()\n\tetcdName := \"deis-etcd-\" + tag\n\tcli, stdout, stdoutPipe := dockercli.NewClient()\n\tdockercli.RunTestEtcd(t, etcdName, etcdPort)\n\tdefer cli.CmdRm(\"-f\", etcdName)\n\thandler := etcdutils.InitEtcd(setdir, setkeys, etcdPort)\n\tetcdutils.PublishEtcd(t, handler)\n\thost, port := utils.HostAddress(), utils.RandomPort()\n\tfmt.Printf(\"--- Run deis\/router:%s at %s:%s\\n\", tag, host, port)\n\tname := \"deis-router-\" + tag\n\tgo func() {\n\t\t_ = cli.CmdRm(\"-f\", name)\n\t\terr = dockercli.RunContainer(cli,\n\t\t\t\"--name\", name,\n\t\t\t\"--rm\",\n\t\t\t\"-p\", port+\":80\",\n\t\t\t\"-p\", utils.RandomPort()+\":2222\",\n\t\t\t\"-e\", \"EXTERNAL_PORT=\"+port,\n\t\t\t\"-e\", \"HOST=\"+host,\n\t\t\t\"-e\", \"ETCD_PORT=\"+etcdPort,\n\t\t\t\"-e\", \"LOG=debug\",\n\t\t\t\"deis\/router:\"+tag)\n\t}()\n\tdockercli.PrintToStdout(t, stdout, stdoutPipe, \"deis-router running\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ FIXME: nginx needs a couple seconds to wake up here\n\t\/\/ FIXME: Wait until etcd keys are published\n\ttime.Sleep(5000 * time.Millisecond)\n\tdockercli.DeisServiceTest(t, name, port, \"http\")\n\tetcdutils.VerifyEtcdValue(t, \"\/deis\/router\/gzip\", \"on\", etcdPort)\n\tetcdutils.VerifyEtcdValue(t,\n\t\t\"\/deis\/router\/hosts\/\"+host,\n\t\tfmt.Sprintf(\"%s:%s\", host, port),\n\t\tetcdPort)\n\t_ = cli.CmdRm(\"-f\", name)\n}\n<|endoftext|>"} {"text":"<commit_before>package cassandra\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\/\/ DRIVER: cql\n\tcql \"github.com\/MichaelS11\/go-cql-driver\"\n\n\t\"github.com\/gocql\/gocql\"\n\t\"github.com\/xo\/dburl\"\n\n\t\"github.com\/xo\/usql\/drivers\"\n)\n\n\/\/ logger is a null logger that satisfies the gocql.StdLogger and the io.Writer\n\/\/ interfaces in order to capture the last error issued by the cql\/gocql\n\/\/ packages, since the cql package does not (at this time) return any error\n\/\/ other than sql.ErrBadConn.\ntype logger struct {\n\tlast string\n}\n\nfunc (*logger) Print(...interface{}) {}\nfunc (*logger) Printf(string, ...interface{}) {}\nfunc (*logger) Println(...interface{}) {}\nfunc (l *logger) Write(buf []byte) (int, error) {\n\tl.last = string(buf)\n\treturn len(buf), nil\n}\n\nfunc init() {\n\t\/\/ error regexp's\n\tauthReqRE := regexp.MustCompile(`authentication required`)\n\tpasswordErrRE := regexp.MustCompile(`Provided username (.*)and\/or password are incorrect`)\n\n\tvar l *logger\n\tdrivers.Register(\"cql\", drivers.Driver{\n\t\tAllowMultilineComments: true,\n\t\tAllowCComments: true,\n\t\tForceParams: func(u *dburl.URL) {\n\t\t\tif q := u.Query(); q.Get(\"timeout\") == \"\" {\n\t\t\t\tq.Set(\"timeout\", \"300s\")\n\t\t\t\tu.RawQuery = q.Encode()\n\t\t\t}\n\t\t},\n\t\tOpen: func(*dburl.URL) (func(string, string) (*sql.DB, error), error) {\n\t\t\t\/\/ override cql and gocql loggers\n\t\t\tl = new(logger)\n\t\t\tgocql.Logger, cql.CqlDriver.Logger = l, log.New(l, \"\", 0)\n\t\t\treturn sql.Open, nil\n\t\t},\n\t\tVersion: func(db drivers.DB) (string, error) {\n\t\t\tvar ver string\n\t\t\terr := db.QueryRow(`SELECT cql_version FROM system.local`).Scan(&ver)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\treturn \"Cassandra \" + ver, nil\n\t\t},\n\t\tChangePassword: func(db drivers.DB, user, newpw, _ string) error {\n\t\t\t_, err := db.Exec(`ALTER ROLE ` + user + ` WITH PASSWORD '` + newpw + `'`)\n\t\t\treturn err\n\t\t},\n\t\tIsPasswordErr: func(err error) bool {\n\t\t\treturn passwordErrRE.MatchString(l.last)\n\t\t},\n\t\tErr: func(err error) (string, string) {\n\t\t\tif authReqRE.MatchString(l.last) {\n\t\t\t\treturn \"\", \"authentication required\"\n\t\t\t}\n\t\t\tif m := passwordErrRE.FindStringSubmatch(l.last); m != nil {\n\t\t\t\treturn \"\", fmt.Sprintf(\"invalid username %sor password\", m[1])\n\t\t\t}\n\t\t\treturn \"\", strings.TrimPrefix(strings.TrimPrefix(err.Error(), \"driver: \"), \"gocql: \")\n\t\t},\n\t\tRowsAffected: func(sql.Result) (int64, error) {\n\t\t\treturn 0, nil\n\t\t},\n\t})\n}\n<commit_msg>Fixing minor issue with \\password for cassandra<commit_after>package cassandra\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\/\/ DRIVER: cql\n\tcql \"github.com\/MichaelS11\/go-cql-driver\"\n\n\t\"github.com\/gocql\/gocql\"\n\t\"github.com\/xo\/dburl\"\n\n\t\"github.com\/xo\/usql\/drivers\"\n)\n\n\/\/ logger is a null logger that satisfies the gocql.StdLogger and the io.Writer\n\/\/ interfaces in order to capture the last error issued by the cql\/gocql\n\/\/ packages, since the cql package does not (at this time) return any error\n\/\/ other than sql.ErrBadConn.\ntype logger struct {\n\tlast string\n}\n\nfunc (*logger) Print(...interface{}) {}\nfunc (*logger) Printf(string, ...interface{}) {}\nfunc (*logger) Println(...interface{}) {}\nfunc (l *logger) Write(buf []byte) (int, error) {\n\tl.last = string(buf)\n\treturn len(buf), nil\n}\n\nfunc init() {\n\t\/\/ error regexp's\n\tauthReqRE := regexp.MustCompile(`authentication required`)\n\tpasswordErrRE := regexp.MustCompile(`Provided username (.*)and\/or password are incorrect`)\n\n\tvar l *logger\n\tdrivers.Register(\"cql\", drivers.Driver{\n\t\tAllowMultilineComments: true,\n\t\tAllowCComments: true,\n\t\tForceParams: func(u *dburl.URL) {\n\t\t\tif q := u.Query(); q.Get(\"timeout\") == \"\" {\n\t\t\t\tq.Set(\"timeout\", \"300s\")\n\t\t\t\tu.RawQuery = q.Encode()\n\t\t\t}\n\t\t},\n\t\tOpen: func(*dburl.URL) (func(string, string) (*sql.DB, error), error) {\n\t\t\t\/\/ override cql and gocql loggers\n\t\t\tl = new(logger)\n\t\t\tgocql.Logger, cql.CqlDriver.Logger = l, log.New(l, \"\", 0)\n\t\t\treturn sql.Open, nil\n\t\t},\n\t\tVersion: func(db drivers.DB) (string, error) {\n\t\t\tvar ver string\n\t\t\terr := db.QueryRow(`SELECT cql_version FROM system.local`).Scan(&ver)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\treturn \"Cassandra \" + ver, nil\n\t\t},\n\t\tChangePassword: func(db drivers.DB, user, newpw, _ string) error {\n\t\t\t_, err := db.Exec(`ALTER ROLE ` + user + ` WITH PASSWORD = '` + newpw + `'`)\n\t\t\treturn err\n\t\t},\n\t\tIsPasswordErr: func(err error) bool {\n\t\t\treturn passwordErrRE.MatchString(l.last)\n\t\t},\n\t\tErr: func(err error) (string, string) {\n\t\t\tif authReqRE.MatchString(l.last) {\n\t\t\t\treturn \"\", \"authentication required\"\n\t\t\t}\n\t\t\tif m := passwordErrRE.FindStringSubmatch(l.last); m != nil {\n\t\t\t\treturn \"\", fmt.Sprintf(\"invalid username %sor password\", m[1])\n\t\t\t}\n\t\t\treturn \"\", strings.TrimPrefix(strings.TrimPrefix(err.Error(), \"driver: \"), \"gocql: \")\n\t\t},\n\t\tRowsAffected: func(sql.Result) (int64, error) {\n\t\t\treturn 0, nil\n\t\t},\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package mock\n\nimport (\n\t\"time\"\n\n\t\"github.com\/funkygao\/dbus\/engine\"\n\t\"github.com\/funkygao\/dbus\/pkg\/model\"\n\tconf \"github.com\/funkygao\/jsconf\"\n\tlog \"github.com\/funkygao\/log4go\"\n)\n\ntype MockInput struct {\n\tstopChan chan struct{}\n\n\tpayload engine.Payloader\n\tsleep time.Duration\n}\n\nfunc (this *MockInput) Init(config *conf.Conf) {\n\tthis.sleep = config.Duration(\"sleep\", 0)\n\tthis.stopChan = make(chan struct{})\n\tswitch config.String(\"payload\", \"Bytes\") {\n\tcase \"RowsEvent\":\n\t\tthis.payload = &model.RowsEvent{\n\t\t\tLog: \"mysql-bin.0001\",\n\t\t\tPosition: 498876,\n\t\t\tSchema: \"mydabase\",\n\t\t\tTable: \"user_account\",\n\t\t\tAction: \"I\",\n\t\t\tTimestamp: 1486554654,\n\t\t\tRows: [][]interface{}{{\"user\", 15, \"hello world\"}},\n\t\t}\n\n\tdefault:\n\t\tthis.payload = model.Bytes(`{\"log\":\"6633343-bin.006419\",\"pos\":795931083,\"db\":\"owl_t_prod_cd\",\"tbl\":\"owl_mi\",\"dml\":\"U\",\"ts\":1488934500,\"rows\":[[132332,\"expired_keys\",\"过期的key的个数\",\"mock-monitor|member-mock|10.1.1.1|10489|expired_keys\",\"mock-monitor\",244526,\"10489\",null,\"2015-12-25 17:12:00\",null,null,\"28571284\",\"Ok\",\"TypeLong\",\"2017-03-08 08:54:01\",null,null,null,null,null,null],[132332,\"expired_keys\",\"过期的key的个数\",\"mock-monitor|member-mock|10.1.1.6|10489|expired_keys\",\"mock-monitor\",244526,\"10489\",null,\"2015-12-25 17:12:00\",null,null,\"28571320\",\"Ok\",\"TypeLong\",\"2017-03-08 08:55:00\",null,null,null,null,null,null]]}`)\n\t}\n}\n\nfunc (this *MockInput) OnAck(pack *engine.Packet) error {\n\treturn nil\n}\n\nfunc (this *MockInput) Stop(r engine.InputRunner) {\n\tlog.Trace(\"[%s] stopping...\", r.Name())\n\tclose(this.stopChan)\n}\n\nfunc (this *MockInput) CleanupForRestart() bool {\n\treturn true\n}\n\nfunc (this *MockInput) Pause(r engine.InputRunner) error {\n\tlog.Warn(\"[%s] paused\", r.Name())\n\treturn nil\n}\n\nfunc (this *MockInput) Resume(r engine.InputRunner) error {\n\tlog.Info(\"[%s] resumed\", r.Name())\n\treturn nil\n}\n\nfunc (this *MockInput) Run(r engine.InputRunner, h engine.PluginHelper) error {\n\tfor {\n\t\tselect {\n\t\tcase <-this.stopChan:\n\t\t\treturn nil\n\n\t\tcase pack, ok := <-r.InChan():\n\t\t\tif !ok {\n\t\t\t\tlog.Debug(\"yes sir!\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tpack.Payload = this.payload\n\t\t\tr.Inject(pack)\n\n\t\t\tif this.sleep > 0 {\n\t\t\t\ttime.Sleep(this.sleep)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>MockInput can pause\/resume<commit_after>package mock\n\nimport (\n\t\"runtime\"\n\t\"time\"\n\n\t\"github.com\/funkygao\/dbus\/engine\"\n\t\"github.com\/funkygao\/dbus\/pkg\/model\"\n\tconf \"github.com\/funkygao\/jsconf\"\n\tlog \"github.com\/funkygao\/log4go\"\n)\n\ntype MockInput struct {\n\tstopChan chan struct{}\n\tinChan <-chan *engine.Packet\n\n\tpayload engine.Payloader\n\tsleep time.Duration\n}\n\nfunc (this *MockInput) Init(config *conf.Conf) {\n\tthis.sleep = config.Duration(\"sleep\", 0)\n\tthis.stopChan = make(chan struct{})\n\tswitch config.String(\"payload\", \"Bytes\") {\n\tcase \"RowsEvent\":\n\t\tthis.payload = &model.RowsEvent{\n\t\t\tLog: \"mysql-bin.0001\",\n\t\t\tPosition: 498876,\n\t\t\tSchema: \"mydabase\",\n\t\t\tTable: \"user_account\",\n\t\t\tAction: \"I\",\n\t\t\tTimestamp: 1486554654,\n\t\t\tRows: [][]interface{}{{\"user\", 15, \"hello world\"}},\n\t\t}\n\n\tdefault:\n\t\tthis.payload = model.Bytes(`{\"log\":\"6633343-bin.006419\",\"pos\":795931083,\"db\":\"owl_t_prod_cd\",\"tbl\":\"owl_mi\",\"dml\":\"U\",\"ts\":1488934500,\"rows\":[[132332,\"expired_keys\",\"过期的key的个数\",\"mock-monitor|member-mock|10.1.1.1|10489|expired_keys\",\"mock-monitor\",244526,\"10489\",null,\"2015-12-25 17:12:00\",null,null,\"28571284\",\"Ok\",\"TypeLong\",\"2017-03-08 08:54:01\",null,null,null,null,null,null],[132332,\"expired_keys\",\"过期的key的个数\",\"mock-monitor|member-mock|10.1.1.6|10489|expired_keys\",\"mock-monitor\",244526,\"10489\",null,\"2015-12-25 17:12:00\",null,null,\"28571320\",\"Ok\",\"TypeLong\",\"2017-03-08 08:55:00\",null,null,null,null,null,null]]}`)\n\t}\n}\n\nfunc (this *MockInput) OnAck(pack *engine.Packet) error {\n\treturn nil\n}\n\nfunc (this *MockInput) Stop(r engine.InputRunner) {\n\tlog.Debug(\"[%s] stopping...\", r.Name())\n\tclose(this.stopChan)\n}\n\nfunc (this *MockInput) CleanupForRestart() bool {\n\treturn true\n}\n\nfunc (this *MockInput) Pause(r engine.InputRunner) error {\n\tlog.Info(\"[%s] paused\", r.Name())\n\tthis.inChan = nil\n\treturn nil\n}\n\nfunc (this *MockInput) Resume(r engine.InputRunner) error {\n\tlog.Info(\"[%s] resumed\", r.Name())\n\tthis.inChan = r.InChan()\n\treturn nil\n}\n\nfunc (this *MockInput) Run(r engine.InputRunner, h engine.PluginHelper) error {\n\tthis.inChan = r.InChan()\n\tfor {\n\t\tselect {\n\t\tcase <-this.stopChan:\n\t\t\treturn nil\n\n\t\tcase pack, ok := <-this.inChan:\n\t\t\tif !ok {\n\t\t\t\tlog.Debug(\"[%s] yes sir!\", r.Name())\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tpack.Payload = this.payload\n\t\t\tr.Inject(pack)\n\n\t\t\tif this.sleep > 0 {\n\t\t\t\ttime.Sleep(this.sleep)\n\t\t\t}\n\n\t\tdefault:\n\t\t\truntime.Gosched()\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux\n\n\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage phases\n\nimport (\n\t\"io\/ioutil\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"k8s.io\/klog\/v2\"\n)\n\n\/\/ unmountKubeletDirectory unmounts all paths that contain KubeletRunDirectory\nfunc unmountKubeletDirectory(absoluteKubeletRunDirectory string) error {\n\traw, err := ioutil.ReadFile(\"\/proc\/mounts\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tmounts := strings.Split(string(raw), \"\\n\")\n\tfor _, mount := range mounts {\n\t\tm := strings.Split(mount, \" \")\n\t\tif len(m) < 2 || !strings.HasPrefix(m[1], absoluteKubeletRunDirectory) {\n\t\t\tcontinue\n\t\t}\n\t\tif err := syscall.Unmount(m[1], 0); err != nil {\n\t\t\tklog.Warningf(\"[reset] Failed to unmount mounted directory in %s: %s\", absoluteKubeletRunDirectory, m[1])\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>kubeadm: reset don't unmount \/var\/lib\/kubelet if it is mounted<commit_after>\/\/ +build linux\n\n\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage phases\n\nimport (\n\t\"io\/ioutil\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"k8s.io\/klog\/v2\"\n)\n\n\/\/ unmountKubeletDirectory unmounts all paths that contain KubeletRunDirectory\nfunc unmountKubeletDirectory(absoluteKubeletRunDirectory string) error {\n\traw, err := ioutil.ReadFile(\"\/proc\/mounts\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !strings.HasSuffix(absoluteKubeletRunDirectory, \"\/\") {\n\t\t\/\/ trailing \"\/\" is needed to ensure that possibly mounted \/var\/lib\/kubelet is skipped\n\t\tabsoluteKubeletRunDirectory += \"\/\"\n\t}\n\n\tmounts := strings.Split(string(raw), \"\\n\")\n\tfor _, mount := range mounts {\n\t\tm := strings.Split(mount, \" \")\n\t\tif len(m) < 2 || !strings.HasPrefix(m[1], absoluteKubeletRunDirectory) {\n\t\t\tcontinue\n\t\t}\n\t\tif err := syscall.Unmount(m[1], 0); err != nil {\n\t\t\tklog.Warningf(\"[reset] Failed to unmount mounted directory in %s: %s\", absoluteKubeletRunDirectory, m[1])\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/* This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/. *\/\n\npackage views\n\nimport (\n\t\"encoding\/json\"\n\t\"strings\"\n\n\t\"github.com\/ernestio\/api-gateway\/models\"\n\t\"log\"\n\n\tgraph \"gopkg.in\/r3labs\/graph.v2\"\n)\n\n\/\/ ServiceRender : Service representation to be rendered on the frontend\ntype ServiceRender struct {\n\tID string `json:\"id\"`\n\tDatacenterID int `json:\"datacenter_id\"`\n\tName string `json:\"name\"`\n\tVersion string `json:\"version\"`\n\tStatus string `json:\"status\"`\n\tUserID int `json:\"user_id\"`\n\tUserName string `json:\"user_name\"`\n\tLastKnownError string `json:\"last_known_error\"`\n\tOptions string `json:\"options\"`\n\tDefinition string `json:\"definition\"`\n\tVpcs []map[string]string `json:\"vpcs\"`\n\tNetworks []map[string]string `json:\"networks\"`\n\tInstances []map[string]string `json:\"instances\"`\n\tNats []map[string]string `json:\"nats\"`\n\tSecurityGroups []map[string]string `json:\"security_groups\"`\n\tElbs []map[string]string `json:\"elbs\"`\n\tRDSClusters []map[string]string `json:\"rds_clusters\"`\n\tRDSInstances []map[string]string `json:\"rds_instances\"`\n\tEBSVolumes []map[string]string `json:\"ebs_volumes\"`\n\tLoadBalancers []map[string]string `json:\"load_balancers\"`\n\tSQLDatabases []map[string]string `json:\"sql_databases\"`\n\tVirtualMachines []map[string]string `json:\"virtual_machines\"`\n}\n\n\/\/ Render : Map a Service to a ServiceRender\nfunc (o *ServiceRender) Render(s models.Service) (err error) {\n\to.ID = s.ID\n\to.DatacenterID = s.DatacenterID\n\to.Name = s.Name\n\to.Version = s.Version.String()\n\to.Status = s.Status\n\to.UserID = s.UserID\n\to.UserName = s.UserName\n\tif def, ok := s.Definition.(string); ok == true {\n\t\to.Definition = def\n\t}\n\n\tg, err := s.Mapping()\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn err\n\t}\n\n\to.Vpcs = RenderVpcs(g)\n\to.Networks = RenderNetworks(g)\n\to.SecurityGroups = RenderSecurityGroups(g)\n\to.Nats = RenderNats(g)\n\to.Instances = RenderInstances(g)\n\to.Elbs = RenderELBs(g)\n\to.RDSClusters = RenderRDSClusters(g)\n\to.RDSInstances = RenderRDSInstances(g)\n\to.EBSVolumes = RenderEBSVolumes(g)\n\to.LoadBalancers = RenderLoadBalancers(g)\n\to.SQLDatabases = RenderSQLDatabases(g)\n\to.VirtualMachines = RenderVirtualMachines(g)\n\n\treturn err\n}\n\n\/\/ RenderVpcs : renders a services vpcs\nfunc RenderVpcs(g *graph.Graph) []map[string]string {\n\tvar vpcs []map[string]string\n\n\tfor _, n := range g.GetComponents().ByType(\"vpc\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tid, _ := (*gc)[\"vpc_aws_id\"].(string)\n\t\tsubnet, _ := (*gc)[\"vpc_subnet\"].(string)\n\t\tvpcs = append(vpcs, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"vpc_id\": id,\n\t\t\t\"vpc_subnet\": subnet,\n\t\t})\n\t}\n\n\treturn vpcs\n}\n\n\/\/ RenderNetworks : renders a services networks\nfunc RenderNetworks(g *graph.Graph) []map[string]string {\n\tvar networks []map[string]string\n\n\tfor _, n := range g.GetComponents().ByType(\"network\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tid, _ := (*gc)[\"network_aws_id\"].(string)\n\t\taz, _ := (*gc)[\"availability_zone\"].(string)\n\t\tnetworks = append(networks, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"network_aws_id\": id,\n\t\t\t\"availability_zone\": az,\n\t\t})\n\t}\n\n\treturn networks\n}\n\n\/\/ RenderSecurityGroups : renders a services security groups\nfunc RenderSecurityGroups(g *graph.Graph) []map[string]string {\n\tvar sgs []map[string]string\n\n\tfor _, n := range g.GetComponents().ByType(\"firewall\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tid, _ := (*gc)[\"security_group_aws_id\"].(string)\n\t\tsgs = append(sgs, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"security_group_aws_id\": id,\n\t\t})\n\t}\n\n\treturn sgs\n}\n\n\/\/ RenderNats : renders a services nat gateways\nfunc RenderNats(g *graph.Graph) []map[string]string {\n\tvar nats []map[string]string\n\n\tfor _, n := range g.GetComponents().ByType(\"nat\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tid, _ := (*gc)[\"nat_gateway_aws_id\"].(string)\n\t\tpubIP, _ := (*gc)[\"nat_gateway_allocation_ip\"].(string)\n\t\tnats = append(nats, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"nat_gateway_aws_id\": id,\n\t\t\t\"public_ip\": pubIP,\n\t\t})\n\t}\n\n\treturn nats\n}\n\n\/\/ RenderELBs : renders a services elbs\nfunc RenderELBs(g *graph.Graph) []map[string]string {\n\tvar elbs []map[string]string\n\n\tfor _, n := range g.GetComponents().ByType(\"elb\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tdns, _ := (*gc)[\"dns_name\"].(string)\n\t\telbs = append(elbs, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"dns_name\": dns,\n\t\t})\n\t}\n\n\treturn elbs\n}\n\n\/\/ RenderInstances : renders a services instances\nfunc RenderInstances(g *graph.Graph) []map[string]string {\n\tvar instances []map[string]string\n\n\tfor _, n := range g.GetComponents().ByType(\"instance\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tid, _ := (*gc)[\"instance_aws_id\"].(string)\n\t\tpip, _ := (*gc)[\"public_ip\"].(string)\n\t\tip, _ := (*gc)[\"ip\"].(string)\n\t\tinstances = append(instances, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"instance_aws_id\": id,\n\t\t\t\"public_ip\": pip,\n\t\t\t\"ip\": ip,\n\t\t})\n\t}\n\n\treturn instances\n}\n\n\/\/ RenderRDSClusters : renders a services rds clusters\nfunc RenderRDSClusters(g *graph.Graph) []map[string]string {\n\tvar rdss []map[string]string\n\n\tfor _, n := range g.GetComponents().ByType(\"rds_cluster\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tendpoint, _ := (*gc)[\"endpoint\"].(string)\n\t\trdss = append(rdss, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"endpoint\": endpoint,\n\t\t})\n\t}\n\n\treturn rdss\n}\n\n\/\/ RenderRDSInstances : renders a services rds instances\nfunc RenderRDSInstances(g *graph.Graph) []map[string]string {\n\tvar rdss []map[string]string\n\n\tfor _, n := range g.GetComponents().ByType(\"rds_instance\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tendpoint, _ := (*gc)[\"endpoint\"].(string)\n\t\trdss = append(rdss, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"endpoint\": endpoint,\n\t\t})\n\t}\n\n\treturn rdss\n}\n\n\/\/ RenderEBSVolumes : renders a services ebs volumes\nfunc RenderEBSVolumes(g *graph.Graph) []map[string]string {\n\tvar rdss []map[string]string\n\n\tfor _, n := range g.GetComponents().ByType(\"ebs_volume\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tid, _ := (*gc)[\"volume_aws_id\"].(string)\n\t\trdss = append(rdss, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"volume_aws_id\": id,\n\t\t})\n\t}\n\n\treturn rdss\n}\n\n\/\/ RenderLoadBalancers : renders load balancers\nfunc RenderLoadBalancers(g *graph.Graph) []map[string]string {\n\treturn renderResources(g, \"lb\", func(gc *graph.GenericComponent) map[string]string {\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tid, _ := (*gc)[\"id\"].(string)\n\t\tconfigs, _ := (*gc)[\"frontend_ip_configurations\"].([]interface{})\n\t\tcfg, _ := configs[0].(map[string]string)\n\n\t\treturn map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"id\": id,\n\t\t\t\"public_ip\": cfg[\"public_ip_address\"],\n\t\t}\n\t})\n}\n\n\/\/ RenderVirtualMachines : renders virtual machines\nfunc RenderVirtualMachines(g *graph.Graph) []map[string]string {\n\tvar resources []map[string]string\n\tmappedIPs := make(map[string]interface{}, 0)\n\texistingIPs := make(map[string]string, 0)\n\n\tfor _, ip := range g.GetComponents().ByType(\"public_ip\") {\n\t\tgc := ip.(*graph.GenericComponent)\n\t\tid, _ := (*gc)[\"id\"].(string)\n\t\tipAddress, _ := (*gc)[\"ip_address\"].(string)\n\t\texistingIPs[id] = ipAddress\n\t}\n\n\tfor _, ni := range g.GetComponents().ByType(\"network_interface\") {\n\t\tvar public []string\n\t\tvar private []string\n\n\t\tgc := ni.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tips := make(map[string][]string)\n\n\t\tconfigs, _ := (*gc)[\"ip_configuration\"].([]interface{})\n\t\tfor _, cfg := range configs {\n\t\t\tc, _ := cfg.(map[string]interface{})\n\t\t\tpubID, _ := c[\"public_ip_address_id\"].(string)\n\t\t\tpri, _ := c[\"private_ip_address\"].(string)\n\t\t\tif pub, ok := existingIPs[pubID]; ok {\n\t\t\t\tpublic = append(public, pub)\n\t\t\t}\n\t\t\tprivate = append(private, pri)\n\t\t}\n\n\t\tips[\"public\"] = public\n\t\tips[\"private\"] = private\n\t\tmappedIPs[name] = make(map[string][]string, 0)\n\t\tmappedIPs[name] = ips\n\t}\n\n\tfor _, n := range g.GetComponents().ByType(\"virtual_machine\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tid, _ := (*gc)[\"id\"].(string)\n\t\tnetworks, _ := (*gc)[\"network_interfaces\"].([]interface{})\n\t\tpublicIPs := make([]string, 0)\n\t\tprivateIPs := make([]string, 0)\n\t\tfor _, ni := range networks {\n\t\t\tnetName := ni.(string)\n\t\t\tif val, ok := mappedIPs[netName]; ok {\n\t\t\t\tips, _ := val.(map[string][]string)\n\t\t\t\tpublicIPs = append(publicIPs, ips[\"public\"]...)\n\t\t\t\tprivateIPs = append(privateIPs, ips[\"private\"]...)\n\t\t\t}\n\t\t}\n\n\t\tresources = append(resources, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"id\": id,\n\t\t\t\"public_ip\": strings.Join(publicIPs, \", \"),\n\t\t\t\"private_ip\": strings.Join(privateIPs, \", \"),\n\t\t})\n\t}\n\n\treturn resources\n}\n\n\/\/ RenderSQLDatabases : renders sql databases\nfunc RenderSQLDatabases(g *graph.Graph) []map[string]string {\n\treturn renderResources(g, \"sql_database\", func(gc *graph.GenericComponent) map[string]string {\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tserver, _ := (*gc)[\"server_name\"].(string)\n\t\tid, _ := (*gc)[\"id\"].(string)\n\n\t\treturn map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"server_name\": server,\n\t\t\t\"id\": id,\n\t\t}\n\t})\n}\n\ntype convert func(*graph.GenericComponent) map[string]string\n\nfunc renderResources(g *graph.Graph, resourceType string, f convert) (resources []map[string]string) {\n\tfor _, n := range g.GetComponents().ByType(resourceType) {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tresources = append(resources, f(gc))\n\t}\n\n\treturn\n}\n\n\/\/ RenderCollection : Maps a collection of Service on a collection of ServiceRender\nfunc (o *ServiceRender) RenderCollection(services []models.Service) (list []ServiceRender, err error) {\n\tfor _, s := range services {\n\t\tvar output ServiceRender\n\t\tif err := output.Render(s); err == nil {\n\t\t\tlist = append(list, output)\n\t\t}\n\t}\n\n\treturn list, nil\n}\n\n\/\/ ToJSON : Converts a ServiceRender to json string\nfunc (o *ServiceRender) ToJSON() ([]byte, error) {\n\treturn json.Marshal(o)\n}\n<commit_msg>Calculate loadbalancer IP<commit_after>\/* This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/. *\/\n\npackage views\n\nimport (\n\t\"encoding\/json\"\n\t\"strings\"\n\n\t\"github.com\/ernestio\/api-gateway\/models\"\n\t\"log\"\n\n\tgraph \"gopkg.in\/r3labs\/graph.v2\"\n)\n\n\/\/ ServiceRender : Service representation to be rendered on the frontend\ntype ServiceRender struct {\n\tID string `json:\"id\"`\n\tDatacenterID int `json:\"datacenter_id\"`\n\tName string `json:\"name\"`\n\tVersion string `json:\"version\"`\n\tStatus string `json:\"status\"`\n\tUserID int `json:\"user_id\"`\n\tUserName string `json:\"user_name\"`\n\tLastKnownError string `json:\"last_known_error\"`\n\tOptions string `json:\"options\"`\n\tDefinition string `json:\"definition\"`\n\tVpcs []map[string]string `json:\"vpcs\"`\n\tNetworks []map[string]string `json:\"networks\"`\n\tInstances []map[string]string `json:\"instances\"`\n\tNats []map[string]string `json:\"nats\"`\n\tSecurityGroups []map[string]string `json:\"security_groups\"`\n\tElbs []map[string]string `json:\"elbs\"`\n\tRDSClusters []map[string]string `json:\"rds_clusters\"`\n\tRDSInstances []map[string]string `json:\"rds_instances\"`\n\tEBSVolumes []map[string]string `json:\"ebs_volumes\"`\n\tLoadBalancers []map[string]string `json:\"load_balancers\"`\n\tSQLDatabases []map[string]string `json:\"sql_databases\"`\n\tVirtualMachines []map[string]string `json:\"virtual_machines\"`\n}\n\n\/\/ Render : Map a Service to a ServiceRender\nfunc (o *ServiceRender) Render(s models.Service) (err error) {\n\to.ID = s.ID\n\to.DatacenterID = s.DatacenterID\n\to.Name = s.Name\n\to.Version = s.Version.String()\n\to.Status = s.Status\n\to.UserID = s.UserID\n\to.UserName = s.UserName\n\tif def, ok := s.Definition.(string); ok == true {\n\t\to.Definition = def\n\t}\n\n\tg, err := s.Mapping()\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn err\n\t}\n\n\to.Vpcs = RenderVpcs(g)\n\to.Networks = RenderNetworks(g)\n\to.SecurityGroups = RenderSecurityGroups(g)\n\to.Nats = RenderNats(g)\n\to.Instances = RenderInstances(g)\n\to.Elbs = RenderELBs(g)\n\to.RDSClusters = RenderRDSClusters(g)\n\to.RDSInstances = RenderRDSInstances(g)\n\to.EBSVolumes = RenderEBSVolumes(g)\n\to.LoadBalancers = RenderLoadBalancers(g)\n\to.SQLDatabases = RenderSQLDatabases(g)\n\to.VirtualMachines = RenderVirtualMachines(g)\n\n\treturn err\n}\n\n\/\/ RenderVpcs : renders a services vpcs\nfunc RenderVpcs(g *graph.Graph) []map[string]string {\n\tvar vpcs []map[string]string\n\n\tfor _, n := range g.GetComponents().ByType(\"vpc\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tid, _ := (*gc)[\"vpc_aws_id\"].(string)\n\t\tsubnet, _ := (*gc)[\"vpc_subnet\"].(string)\n\t\tvpcs = append(vpcs, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"vpc_id\": id,\n\t\t\t\"vpc_subnet\": subnet,\n\t\t})\n\t}\n\n\treturn vpcs\n}\n\n\/\/ RenderNetworks : renders a services networks\nfunc RenderNetworks(g *graph.Graph) []map[string]string {\n\tvar networks []map[string]string\n\n\tfor _, n := range g.GetComponents().ByType(\"network\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tid, _ := (*gc)[\"network_aws_id\"].(string)\n\t\taz, _ := (*gc)[\"availability_zone\"].(string)\n\t\tnetworks = append(networks, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"network_aws_id\": id,\n\t\t\t\"availability_zone\": az,\n\t\t})\n\t}\n\n\treturn networks\n}\n\n\/\/ RenderSecurityGroups : renders a services security groups\nfunc RenderSecurityGroups(g *graph.Graph) []map[string]string {\n\tvar sgs []map[string]string\n\n\tfor _, n := range g.GetComponents().ByType(\"firewall\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tid, _ := (*gc)[\"security_group_aws_id\"].(string)\n\t\tsgs = append(sgs, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"security_group_aws_id\": id,\n\t\t})\n\t}\n\n\treturn sgs\n}\n\n\/\/ RenderNats : renders a services nat gateways\nfunc RenderNats(g *graph.Graph) []map[string]string {\n\tvar nats []map[string]string\n\n\tfor _, n := range g.GetComponents().ByType(\"nat\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tid, _ := (*gc)[\"nat_gateway_aws_id\"].(string)\n\t\tpubIP, _ := (*gc)[\"nat_gateway_allocation_ip\"].(string)\n\t\tnats = append(nats, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"nat_gateway_aws_id\": id,\n\t\t\t\"public_ip\": pubIP,\n\t\t})\n\t}\n\n\treturn nats\n}\n\n\/\/ RenderELBs : renders a services elbs\nfunc RenderELBs(g *graph.Graph) []map[string]string {\n\tvar elbs []map[string]string\n\n\tfor _, n := range g.GetComponents().ByType(\"elb\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tdns, _ := (*gc)[\"dns_name\"].(string)\n\t\telbs = append(elbs, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"dns_name\": dns,\n\t\t})\n\t}\n\n\treturn elbs\n}\n\n\/\/ RenderInstances : renders a services instances\nfunc RenderInstances(g *graph.Graph) []map[string]string {\n\tvar instances []map[string]string\n\n\tfor _, n := range g.GetComponents().ByType(\"instance\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tid, _ := (*gc)[\"instance_aws_id\"].(string)\n\t\tpip, _ := (*gc)[\"public_ip\"].(string)\n\t\tip, _ := (*gc)[\"ip\"].(string)\n\t\tinstances = append(instances, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"instance_aws_id\": id,\n\t\t\t\"public_ip\": pip,\n\t\t\t\"ip\": ip,\n\t\t})\n\t}\n\n\treturn instances\n}\n\n\/\/ RenderRDSClusters : renders a services rds clusters\nfunc RenderRDSClusters(g *graph.Graph) []map[string]string {\n\tvar rdss []map[string]string\n\n\tfor _, n := range g.GetComponents().ByType(\"rds_cluster\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tendpoint, _ := (*gc)[\"endpoint\"].(string)\n\t\trdss = append(rdss, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"endpoint\": endpoint,\n\t\t})\n\t}\n\n\treturn rdss\n}\n\n\/\/ RenderRDSInstances : renders a services rds instances\nfunc RenderRDSInstances(g *graph.Graph) []map[string]string {\n\tvar rdss []map[string]string\n\n\tfor _, n := range g.GetComponents().ByType(\"rds_instance\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tendpoint, _ := (*gc)[\"endpoint\"].(string)\n\t\trdss = append(rdss, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"endpoint\": endpoint,\n\t\t})\n\t}\n\n\treturn rdss\n}\n\n\/\/ RenderEBSVolumes : renders a services ebs volumes\nfunc RenderEBSVolumes(g *graph.Graph) []map[string]string {\n\tvar rdss []map[string]string\n\n\tfor _, n := range g.GetComponents().ByType(\"ebs_volume\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tid, _ := (*gc)[\"volume_aws_id\"].(string)\n\t\trdss = append(rdss, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"volume_aws_id\": id,\n\t\t})\n\t}\n\n\treturn rdss\n}\n\n\/\/ RenderLoadBalancers : renders load balancers\nfunc RenderLoadBalancers(g *graph.Graph) []map[string]string {\n\tvar lbs []map[string]string\n\tips := listIPAddresses(g)\n\n\tfor _, n := range g.GetComponents().ByType(\"lb\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tid, _ := (*gc)[\"id\"].(string)\n\t\tconfigs, _ := (*gc)[\"frontend_ip_configurations\"].([]interface{})\n\t\tcfg, _ := configs[0].(map[string]interface{})\n\t\tipID, _ := cfg[\"public_ip_address_id\"].(string)\n\t\tip, _ := ips[ipID]\n\n\t\tlbs = append(lbs, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"id\": id,\n\t\t\t\"public_ip\": ip,\n\t\t})\n\t}\n\n\treturn lbs\n}\n\nfunc listIPAddresses(g *graph.Graph) map[string]string {\n\texistingIPs := make(map[string]string, 0)\n\n\tfor _, ip := range g.GetComponents().ByType(\"public_ip\") {\n\t\tgc := ip.(*graph.GenericComponent)\n\t\tid, _ := (*gc)[\"id\"].(string)\n\t\tipAddress, _ := (*gc)[\"ip_address\"].(string)\n\t\texistingIPs[id] = ipAddress\n\t}\n\n\treturn existingIPs\n}\n\n\/\/ RenderVirtualMachines : renders virtual machines\nfunc RenderVirtualMachines(g *graph.Graph) []map[string]string {\n\tvar resources []map[string]string\n\tmappedIPs := make(map[string]interface{}, 0)\n\texistingIPs := listIPAddresses(g)\n\n\tfor _, ni := range g.GetComponents().ByType(\"network_interface\") {\n\t\tvar public []string\n\t\tvar private []string\n\n\t\tgc := ni.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tips := make(map[string][]string)\n\n\t\tconfigs, _ := (*gc)[\"ip_configuration\"].([]interface{})\n\t\tfor _, cfg := range configs {\n\t\t\tc, _ := cfg.(map[string]interface{})\n\t\t\tpubID, _ := c[\"public_ip_address_id\"].(string)\n\t\t\tpri, _ := c[\"private_ip_address\"].(string)\n\t\t\tif pub, ok := existingIPs[pubID]; ok {\n\t\t\t\tpublic = append(public, pub)\n\t\t\t}\n\t\t\tprivate = append(private, pri)\n\t\t}\n\n\t\tips[\"public\"] = public\n\t\tips[\"private\"] = private\n\t\tmappedIPs[name] = make(map[string][]string, 0)\n\t\tmappedIPs[name] = ips\n\t}\n\n\tfor _, n := range g.GetComponents().ByType(\"virtual_machine\") {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tid, _ := (*gc)[\"id\"].(string)\n\t\tnetworks, _ := (*gc)[\"network_interfaces\"].([]interface{})\n\t\tpublicIPs := make([]string, 0)\n\t\tprivateIPs := make([]string, 0)\n\t\tfor _, ni := range networks {\n\t\t\tnetName := ni.(string)\n\t\t\tif val, ok := mappedIPs[netName]; ok {\n\t\t\t\tips, _ := val.(map[string][]string)\n\t\t\t\tpublicIPs = append(publicIPs, ips[\"public\"]...)\n\t\t\t\tprivateIPs = append(privateIPs, ips[\"private\"]...)\n\t\t\t}\n\t\t}\n\n\t\tresources = append(resources, map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"id\": id,\n\t\t\t\"public_ip\": strings.Join(publicIPs, \", \"),\n\t\t\t\"private_ip\": strings.Join(privateIPs, \", \"),\n\t\t})\n\t}\n\n\treturn resources\n}\n\n\/\/ RenderSQLDatabases : renders sql databases\nfunc RenderSQLDatabases(g *graph.Graph) []map[string]string {\n\treturn renderResources(g, \"sql_database\", func(gc *graph.GenericComponent) map[string]string {\n\t\tname, _ := (*gc)[\"name\"].(string)\n\t\tserver, _ := (*gc)[\"server_name\"].(string)\n\t\tid, _ := (*gc)[\"id\"].(string)\n\n\t\treturn map[string]string{\n\t\t\t\"name\": name,\n\t\t\t\"server_name\": server,\n\t\t\t\"id\": id,\n\t\t}\n\t})\n}\n\ntype convert func(*graph.GenericComponent) map[string]string\n\nfunc renderResources(g *graph.Graph, resourceType string, f convert) (resources []map[string]string) {\n\tfor _, n := range g.GetComponents().ByType(resourceType) {\n\t\tgc := n.(*graph.GenericComponent)\n\t\tresources = append(resources, f(gc))\n\t}\n\n\treturn\n}\n\n\/\/ RenderCollection : Maps a collection of Service on a collection of ServiceRender\nfunc (o *ServiceRender) RenderCollection(services []models.Service) (list []ServiceRender, err error) {\n\tfor _, s := range services {\n\t\tvar output ServiceRender\n\t\tif err := output.Render(s); err == nil {\n\t\t\tlist = append(list, output)\n\t\t}\n\t}\n\n\treturn list, nil\n}\n\n\/\/ ToJSON : Converts a ServiceRender to json string\nfunc (o *ServiceRender) ToJSON() ([]byte, error) {\n\treturn json.Marshal(o)\n}\n<|endoftext|>"} {"text":"<commit_before>package dht\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/google\/uuid\"\n\t\"github.com\/sirupsen\/logrus\"\n\n\tmessagebus \"github.com\/nimona\/go-nimona-messagebus\"\n\tnet \"github.com\/nimona\/go-nimona-net\"\n)\n\nconst (\n\tprotocolID = \"dht-kad\/v1\"\n)\n\n\/\/ DHTNode is the struct that implements the dht protocol\ntype DHTNode struct {\n\tlocalPeer net.Peer\n\tstore *Store\n\tnet net.Network\n\tmessageBus messagebus.MessageBus\n\tqueries map[string]*query\n\tmt sync.RWMutex\n}\n\nfunc NewDHTNode(bps []net.Peer, lp net.Peer, nn net.Network) (*DHTNode, error) {\n\t\/\/ create new routing table\n\tst, _ := newStore()\n\n\t\/\/ create messagebud\n\tmb, err := messagebus.New(protocolID, nn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Create DHT node\n\tnd := &DHTNode{\n\t\tlocalPeer: lp,\n\t\tstore: st,\n\t\tnet: nn,\n\t\tmessageBus: mb,\n\t\tqueries: map[string]*query{},\n\t}\n\n\t\/\/ Register message bus, message handler\n\tif err := mb.HandleMessage(nd.messageHandler); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Add bootstrap nodes\n\tfor _, peer := range bps {\n\t\tif err := nd.storePeer(peer, true); err != nil {\n\t\t\tlogrus.WithField(\"error\", err).Error(\"new could not store peer\")\n\t\t}\n\t\tif err := nd.putPeer(peer); err != nil {\n\t\t\tlogrus.WithField(\"error\", err).Error(\"new could not put peer\")\n\t\t}\n\t}\n\n\t\/\/ start refresh worker\n\t\/\/ ticker := time.NewTicker(15 * time.Second)\n\t\/\/ quit := make(chan struct{})\n\tgo func() {\n\t\t\/\/ refresh for the first time\n\t\tnd.refresh()\n\t\t\/\/ and then just wait\n\t\t\/\/ for {\n\t\t\/\/ \tselect {\n\t\t\/\/ \tcase <-ticker.C:\n\t\t\/\/ \t\tnd.refresh()\n\t\t\/\/ \tcase <-quit:\n\t\t\/\/ \t\tticker.Stop()\n\t\t\/\/ \t\treturn\n\t\t\/\/ \t}\n\t\t\/\/ }\n\t}()\n\n\treturn nd, nil\n}\n\nfunc (nd *DHTNode) refresh() {\n\tlogrus.Infof(\"Refreshing\")\n\tcps, err := nd.store.FindKeysNearestTo(KeyPrefixPeer, nd.GetLocalPeer().ID, numPeersNear*10)\n\tif err != nil {\n\t\tlogrus.WithError(err).Warnf(\"refresh could not get peers ids\")\n\t\treturn\n\t}\n\tctx := context.Background()\n\tfor _, cp := range cps {\n\t\tres, err := nd.Get(ctx, cp)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).WithField(\"peerID\", cps).Warnf(\"refresh could not get for peer\")\n\t\t\tcontinue\n\t\t}\n\t\tfor range res {\n\t\t\t\/\/ just swallow channel results\n\t\t}\n\t}\n}\n\nfunc (nd *DHTNode) messageHandler(hash []byte, msg messagebus.Message) error {\n\tswitch msg.Payload.Type {\n\tcase MessageTypeGet:\n\t\tgetMsg := &messageGet{}\n\t\tif err := json.Unmarshal(msg.Payload.Data, getMsg); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tnd.getHandler(getMsg)\n\tcase MessageTypePut:\n\t\tputMsg := &messagePut{}\n\t\tif err := json.Unmarshal(msg.Payload.Data, putMsg); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tnd.putHandler(putMsg)\n\tdefault:\n\t\tlogrus.Info(\"Call type not implemented\")\n\t}\n\treturn nil\n}\n\nfunc (nd *DHTNode) Get(ctx context.Context, key string) (chan string, error) {\n\tlogrus.Infof(\"Searching for key %s\", key)\n\n\t\/\/ create query\n\t\/\/ TODO query needs the context\n\tq := &query{\n\t\tid: uuid.New().String(),\n\t\tdht: nd,\n\t\tkey: key,\n\t\tcontactedPeers: []string{},\n\t\tresults: make(chan string, 100),\n\t\tincomingMessages: make(chan messagePut, 100),\n\t\tlock: &sync.RWMutex{},\n\t}\n\n\t\/\/ and store it\n\tnd.mt.Lock()\n\tnd.queries[q.id] = q\n\tnd.mt.Unlock()\n\n\t\/\/ run query\n\tq.Run(ctx)\n\n\t\/\/ return results channel\n\treturn q.results, nil\n}\n\nfunc (nd *DHTNode) GetPeer(ctx context.Context, id string) (net.Peer, error) {\n\t\/\/ get peer key\n\tres, err := nd.Get(ctx, getPeerKey(id))\n\tif err != nil {\n\t\treturn net.Peer{}, err\n\t}\n\n\t\/\/ hold addresses\n\taddrs := []string{}\n\n\t\/\/ go through results and create addresses array\n\tfor addr := range res {\n\t\taddrs = appendIfMissing(addrs, addr)\n\t}\n\n\t\/\/ check addrs\n\tif len(addrs) == 0 {\n\t\treturn net.Peer{}, ErrPeerNotFound\n\t}\n\n\treturn net.Peer{\n\t\tID: id,\n\t\tAddresses: addrs,\n\t}, nil\n}\n\nfunc (nd *DHTNode) sendMsgPeer(msgType string, msg interface{}, peerID string) error {\n\tif peerID == nd.localPeer.ID {\n\t\treturn nil\n\t}\n\n\tdata, err := json.Marshal(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpl := &messagebus.Payload{\n\t\tCreator: nd.localPeer.ID,\n\t\tCoded: \"json\",\n\t\tType: msgType,\n\t\tData: data,\n\t}\n\n\treturn nd.messageBus.Send(pl, []string{peerID})\n}\n\nfunc (nd *DHTNode) getHandler(msg *messageGet) {\n\t\/\/ origin peer is asking for a peer\n\n\t\/\/ store info on origin peer\n\tnd.storePeer(msg.OriginPeer, false)\n\tnd.putPeer(msg.OriginPeer)\n\n\t\/\/ check if we have the value of the key\n\tks, err := nd.store.Get(msg.Key)\n\tif err != nil {\n\t\tlogrus.WithField(\"msg\", msg).Error(\"Failed to find nodes near\")\n\t\treturn\n\t}\n\n\tlogrus.Infof(\"%+v\", nd.store.pairs)\n\n\t\/\/ send them if we do\n\tif len(ks) > 0 {\n\t\tmsgPut := &messagePut{\n\t\t\tQueryID: msg.QueryID,\n\t\t\tOriginPeer: msg.OriginPeer,\n\t\t\tKey: msg.Key,\n\t\t\tValues: ks,\n\t\t}\n\t\t\/\/ send response\n\t\tif err := nd.sendMsgPeer(MessageTypePut, msgPut, msg.OriginPeer.ID); err != nil {\n\t\t\tlogrus.WithError(err).Warnf(\"getHandler could not send msg\")\n\t\t}\n\t}\n\n\t\/\/ find peers nearest peers that might have it\n\tcps, err := nd.store.FindKeysNearestTo(KeyPrefixPeer, msg.Key, numPeersNear)\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"getHandler could not find nearest peers\")\n\t\treturn\n\t}\n\n\t\/\/ give up if there are no peers\n\tif len(cps) == 0 {\n\t\treturn\n\t}\n\n\t\/\/ send messages with closes peers\n\tfor _, cp := range cps {\n\t\tcpid := trimKey(cp, KeyPrefixPeer)\n\t\t\/\/ skil us and original peer\n\t\tif cpid == msg.OriginPeer.ID || cpid == nd.GetLocalPeer().ID {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ get neighbor addresses\n\t\taddrs, err := nd.store.Get(cp)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Warnf(\"getHandler could not get addrs\")\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ create a response\n\t\tmsgPut := &messagePut{\n\t\t\tQueryID: msg.QueryID,\n\t\t\tOriginPeer: msg.OriginPeer,\n\t\t\tKey: cp,\n\t\t\tValues: addrs,\n\t\t}\n\t\t\/\/ send response\n\t\tif err := nd.sendMsgPeer(MessageTypePut, msgPut, msg.OriginPeer.ID); err != nil {\n\t\t\tlogrus.WithError(err).Warnf(\"getHandler could not send msg\")\n\t\t}\n\t}\n}\n\nfunc (nd *DHTNode) putHandler(msg *messagePut) {\n\t\/\/ A peer we asked is informing us of a peer\n\tlogrus.WithField(\"key\", msg.Key).Infof(\"Got response\")\n\n\t\/\/ check if this still a valid query\n\tif q, ok := nd.queries[msg.QueryID]; ok {\n\t\tq.incomingMessages <- *msg\n\t}\n\n\t\/\/ add values to our store\n\tif checkKey(msg.Key) {\n\t\tfor _, v := range msg.Values {\n\t\t\tnd.store.Put(msg.Key, v, false)\n\t\t}\n\t}\n\n\t\/\/ check if this is a peer\n\tif strings.HasPrefix(msg.Key, KeyPrefixPeer) {\n\t\tpr, err := nd.gatherPeer(strings.Replace(msg.Key, KeyPrefixPeer, \"\", 1))\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Infof(\"putHandler could get pairs for putPeer\")\n\t\t\treturn\n\t\t}\n\t\tif err := nd.putPeer(pr); err != nil {\n\t\t\tlogrus.WithError(err).Infof(\"putHandler could get putPeer\")\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (nd *DHTNode) gatherPeer(peerID string) (net.Peer, error) {\n\taddrs, err := nd.store.Get(peerID)\n\tif err != nil {\n\t\treturn net.Peer{}, err\n\t}\n\tpr := net.Peer{\n\t\tID: peerID,\n\t\tAddresses: addrs,\n\t}\n\treturn pr, nil\n}\n\nfunc (nd *DHTNode) putPeer(peer net.Peer) error {\n\tlogrus.Infof(\"Adding peer to network peer=%v\", peer)\n\t\/\/ add peer to network\n\tif err := nd.net.PutPeer(peer); err != nil {\n\t\tlogrus.WithError(err).Warnf(\"Could not add peer to network\")\n\t\treturn err\n\t}\n\tlogrus.Infof(\"PUT PEER %+v\", peer)\n\treturn nil\n}\n\nfunc (nd *DHTNode) storePeer(peer net.Peer, persistent bool) error {\n\tfor _, addr := range peer.Addresses {\n\t\tlogrus.WithField(\"k\", getPeerKey(peer.ID)).WithField(\"v\", addr).Infof(\"Adding peer addresses to kv\")\n\t\tif err := nd.store.Put(getPeerKey(peer.ID), addr, persistent); err != nil {\n\t\t\tlogrus.WithError(err).WithField(\"peerID\", peer.ID).Warnf(\"storePeer could not put peer\")\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (nd *DHTNode) GetLocalPeer() net.Peer {\n\treturn nd.localPeer\n}\n<commit_msg>Add refreshing<commit_after>package dht\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/google\/uuid\"\n\t\"github.com\/sirupsen\/logrus\"\n\n\tmessagebus \"github.com\/nimona\/go-nimona-messagebus\"\n\tnet \"github.com\/nimona\/go-nimona-net\"\n)\n\nconst (\n\tprotocolID = \"dht-kad\/v1\"\n)\n\n\/\/ DHTNode is the struct that implements the dht protocol\ntype DHTNode struct {\n\tlocalPeer net.Peer\n\tstore *Store\n\tnet net.Network\n\tmessageBus messagebus.MessageBus\n\tqueries map[string]*query\n\tmt sync.RWMutex\n}\n\nfunc NewDHTNode(bps []net.Peer, lp net.Peer, nn net.Network) (*DHTNode, error) {\n\t\/\/ create new routing table\n\tst, _ := newStore()\n\n\t\/\/ create messagebud\n\tmb, err := messagebus.New(protocolID, nn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Create DHT node\n\tnd := &DHTNode{\n\t\tlocalPeer: lp,\n\t\tstore: st,\n\t\tnet: nn,\n\t\tmessageBus: mb,\n\t\tqueries: map[string]*query{},\n\t}\n\n\t\/\/ Register message bus, message handler\n\tif err := mb.HandleMessage(nd.messageHandler); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Add bootstrap nodes\n\tfor _, peer := range bps {\n\t\tif err := nd.storePeer(peer, true); err != nil {\n\t\t\tlogrus.WithField(\"error\", err).Error(\"new could not store peer\")\n\t\t}\n\t\tif err := nd.putPeer(peer); err != nil {\n\t\t\tlogrus.WithField(\"error\", err).Error(\"new could not put peer\")\n\t\t}\n\t}\n\n\t\/\/ start refresh worker\n\tticker := time.NewTicker(15 * time.Second)\n\tquit := make(chan struct{})\n\tgo func() {\n\t\t\/\/ refresh for the first time\n\t\tnd.refresh()\n\t\t\/\/ and then just wait\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tnd.refresh()\n\t\t\tcase <-quit:\n\t\t\t\tticker.Stop()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn nd, nil\n}\n\nfunc (nd *DHTNode) refresh() {\n\tlogrus.Infof(\"Refreshing\")\n\tcps, err := nd.store.FindKeysNearestTo(KeyPrefixPeer, nd.GetLocalPeer().ID, numPeersNear*10)\n\tif err != nil {\n\t\tlogrus.WithError(err).Warnf(\"refresh could not get peers ids\")\n\t\treturn\n\t}\n\tctx := context.Background()\n\tfor _, cp := range cps {\n\t\tres, err := nd.Get(ctx, cp)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).WithField(\"peerID\", cps).Warnf(\"refresh could not get for peer\")\n\t\t\tcontinue\n\t\t}\n\t\tfor range res {\n\t\t\t\/\/ just swallow channel results\n\t\t}\n\t}\n}\n\nfunc (nd *DHTNode) messageHandler(hash []byte, msg messagebus.Message) error {\n\tswitch msg.Payload.Type {\n\tcase MessageTypeGet:\n\t\tgetMsg := &messageGet{}\n\t\tif err := json.Unmarshal(msg.Payload.Data, getMsg); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tnd.getHandler(getMsg)\n\tcase MessageTypePut:\n\t\tputMsg := &messagePut{}\n\t\tif err := json.Unmarshal(msg.Payload.Data, putMsg); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tnd.putHandler(putMsg)\n\tdefault:\n\t\tlogrus.Info(\"Call type not implemented\")\n\t}\n\treturn nil\n}\n\nfunc (nd *DHTNode) Get(ctx context.Context, key string) (chan string, error) {\n\tlogrus.Infof(\"Searching for key %s\", key)\n\n\t\/\/ create query\n\t\/\/ TODO query needs the context\n\tq := &query{\n\t\tid: uuid.New().String(),\n\t\tdht: nd,\n\t\tkey: key,\n\t\tcontactedPeers: []string{},\n\t\tresults: make(chan string, 100),\n\t\tincomingMessages: make(chan messagePut, 100),\n\t\tlock: &sync.RWMutex{},\n\t}\n\n\t\/\/ and store it\n\tnd.mt.Lock()\n\tnd.queries[q.id] = q\n\tnd.mt.Unlock()\n\n\t\/\/ run query\n\tq.Run(ctx)\n\n\t\/\/ return results channel\n\treturn q.results, nil\n}\n\nfunc (nd *DHTNode) GetPeer(ctx context.Context, id string) (net.Peer, error) {\n\t\/\/ get peer key\n\tres, err := nd.Get(ctx, getPeerKey(id))\n\tif err != nil {\n\t\treturn net.Peer{}, err\n\t}\n\n\t\/\/ hold addresses\n\taddrs := []string{}\n\n\t\/\/ go through results and create addresses array\n\tfor addr := range res {\n\t\taddrs = appendIfMissing(addrs, addr)\n\t}\n\n\t\/\/ check addrs\n\tif len(addrs) == 0 {\n\t\treturn net.Peer{}, ErrPeerNotFound\n\t}\n\n\treturn net.Peer{\n\t\tID: id,\n\t\tAddresses: addrs,\n\t}, nil\n}\n\nfunc (nd *DHTNode) sendMsgPeer(msgType string, msg interface{}, peerID string) error {\n\tif peerID == nd.localPeer.ID {\n\t\treturn nil\n\t}\n\n\tdata, err := json.Marshal(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpl := &messagebus.Payload{\n\t\tCreator: nd.localPeer.ID,\n\t\tCoded: \"json\",\n\t\tType: msgType,\n\t\tData: data,\n\t}\n\n\treturn nd.messageBus.Send(pl, []string{peerID})\n}\n\nfunc (nd *DHTNode) getHandler(msg *messageGet) {\n\t\/\/ origin peer is asking for a peer\n\n\t\/\/ store info on origin peer\n\tnd.storePeer(msg.OriginPeer, false)\n\tnd.putPeer(msg.OriginPeer)\n\n\t\/\/ check if we have the value of the key\n\tks, err := nd.store.Get(msg.Key)\n\tif err != nil {\n\t\tlogrus.WithField(\"msg\", msg).Error(\"Failed to find nodes near\")\n\t\treturn\n\t}\n\n\tlogrus.Infof(\"%+v\", nd.store.pairs)\n\n\t\/\/ send them if we do\n\tif len(ks) > 0 {\n\t\tmsgPut := &messagePut{\n\t\t\tQueryID: msg.QueryID,\n\t\t\tOriginPeer: msg.OriginPeer,\n\t\t\tKey: msg.Key,\n\t\t\tValues: ks,\n\t\t}\n\t\t\/\/ send response\n\t\tif err := nd.sendMsgPeer(MessageTypePut, msgPut, msg.OriginPeer.ID); err != nil {\n\t\t\tlogrus.WithError(err).Warnf(\"getHandler could not send msg\")\n\t\t}\n\t}\n\n\t\/\/ find peers nearest peers that might have it\n\tcps, err := nd.store.FindKeysNearestTo(KeyPrefixPeer, msg.Key, numPeersNear)\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"getHandler could not find nearest peers\")\n\t\treturn\n\t}\n\n\t\/\/ give up if there are no peers\n\tif len(cps) == 0 {\n\t\treturn\n\t}\n\n\t\/\/ send messages with closes peers\n\tfor _, cp := range cps {\n\t\tcpid := trimKey(cp, KeyPrefixPeer)\n\t\t\/\/ skil us and original peer\n\t\tif cpid == msg.OriginPeer.ID || cpid == nd.GetLocalPeer().ID {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ get neighbor addresses\n\t\taddrs, err := nd.store.Get(cp)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Warnf(\"getHandler could not get addrs\")\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ create a response\n\t\tmsgPut := &messagePut{\n\t\t\tQueryID: msg.QueryID,\n\t\t\tOriginPeer: msg.OriginPeer,\n\t\t\tKey: cp,\n\t\t\tValues: addrs,\n\t\t}\n\t\t\/\/ send response\n\t\tif err := nd.sendMsgPeer(MessageTypePut, msgPut, msg.OriginPeer.ID); err != nil {\n\t\t\tlogrus.WithError(err).Warnf(\"getHandler could not send msg\")\n\t\t}\n\t}\n}\n\nfunc (nd *DHTNode) putHandler(msg *messagePut) {\n\t\/\/ A peer we asked is informing us of a peer\n\tlogrus.WithField(\"key\", msg.Key).Infof(\"Got response\")\n\n\t\/\/ check if this still a valid query\n\tif q, ok := nd.queries[msg.QueryID]; ok {\n\t\tq.incomingMessages <- *msg\n\t}\n\n\t\/\/ add values to our store\n\tif checkKey(msg.Key) {\n\t\tfor _, v := range msg.Values {\n\t\t\tnd.store.Put(msg.Key, v, false)\n\t\t}\n\t}\n\n\t\/\/ check if this is a peer\n\tif strings.HasPrefix(msg.Key, KeyPrefixPeer) {\n\t\tpr, err := nd.gatherPeer(strings.Replace(msg.Key, KeyPrefixPeer, \"\", 1))\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Infof(\"putHandler could get pairs for putPeer\")\n\t\t\treturn\n\t\t}\n\t\tif err := nd.putPeer(pr); err != nil {\n\t\t\tlogrus.WithError(err).Infof(\"putHandler could get putPeer\")\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (nd *DHTNode) gatherPeer(peerID string) (net.Peer, error) {\n\taddrs, err := nd.store.Get(peerID)\n\tif err != nil {\n\t\treturn net.Peer{}, err\n\t}\n\tpr := net.Peer{\n\t\tID: peerID,\n\t\tAddresses: addrs,\n\t}\n\treturn pr, nil\n}\n\nfunc (nd *DHTNode) putPeer(peer net.Peer) error {\n\tlogrus.Infof(\"Adding peer to network peer=%v\", peer)\n\t\/\/ add peer to network\n\tif err := nd.net.PutPeer(peer); err != nil {\n\t\tlogrus.WithError(err).Warnf(\"Could not add peer to network\")\n\t\treturn err\n\t}\n\tlogrus.Infof(\"PUT PEER %+v\", peer)\n\treturn nil\n}\n\nfunc (nd *DHTNode) storePeer(peer net.Peer, persistent bool) error {\n\tfor _, addr := range peer.Addresses {\n\t\tlogrus.WithField(\"k\", getPeerKey(peer.ID)).WithField(\"v\", addr).Infof(\"Adding peer addresses to kv\")\n\t\tif err := nd.store.Put(getPeerKey(peer.ID), addr, persistent); err != nil {\n\t\t\tlogrus.WithError(err).WithField(\"peerID\", peer.ID).Warnf(\"storePeer could not put peer\")\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (nd *DHTNode) GetLocalPeer() net.Peer {\n\treturn nd.localPeer\n}\n<|endoftext|>"} {"text":"<commit_before>package instances\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t. \"launchpad.net\/gocheck\"\n\t\"launchpad.net\/juju-core\/constraints\"\n\t\"launchpad.net\/juju-core\/testing\"\n\t\"strings\"\n)\n\ntype imageSuite struct {\n\ttesting.LoggingSuite\n}\n\nvar _ = Suite(&imageSuite{})\n\nfunc (s *imageSuite) SetUpSuite(c *C) {\n\ts.LoggingSuite.SetUpSuite(c)\n}\n\nfunc (s *imageSuite) TearDownSuite(c *C) {\n\ts.LoggingSuite.TearDownTest(c)\n}\n\nvar imagesData = imagesFields(\n\t\"instance-store amd64 us-east-1 ami-00000011 paravirtual\",\n\t\"ebs amd64 eu-west-1 ami-00000016 paravirtual\",\n\t\"ebs i386 ap-northeast-1 ami-00000023 paravirtual\",\n\t\"ebs amd64 ap-northeast-1 ami-00000026 paravirtual\",\n\t\"ebs amd64 ap-northeast-1 ami-00000087 hvm\",\n\t\"ebs amd64 test ami-00000033 paravirtual\",\n\t\"ebs i386 test ami-00000034 paravirtual\",\n\t\"ebs amd64 test ami-00000035 hvm\",\n\t\"ebs i386 i386-only ami-00000036 paravirtual\",\n)\n\nfunc imagesFields(srcs ...string) string {\n\tstrs := make([]string, len(srcs))\n\tfor i, src := range srcs {\n\t\tparts := strings.Split(src, \" \")\n\t\tif len(parts) != 5 {\n\t\t\tpanic(\"bad clouddata field input\")\n\t\t}\n\t\targs := make([]interface{}, len(parts))\n\t\tfor i, part := range parts {\n\t\t\targs[i] = part\n\t\t}\n\t\t\/\/ Ignored fields are left empty for clarity's sake, and two additional\n\t\t\/\/ tabs are tacked on to the end to verify extra columns are ignored.\n\t\tstrs[i] = fmt.Sprintf(\"\\t\\t\\t\\t%s\\t%s\\t%s\\t%s\\t\\t\\t%s\\t\\t\\n\", args...)\n\t}\n\treturn strings.Join(strs, \"\")\n}\n\ntype instanceSpecTestParams struct {\n\tdesc string\n\tregion string\n\tarches []string\n\tconstraints string\n\tinstanceTypes []InstanceType\n\tdefaultImageId string\n\tdefaultInstanceType string\n\timageId string\n\tinstanceTypeId string\n\tinstanceTypeName string\n\terr string\n}\n\nfunc (p *instanceSpecTestParams) init() {\n\tif p.arches == nil {\n\t\tp.arches = Both\n\t}\n\tif p.instanceTypes == nil {\n\t\tp.instanceTypes = []InstanceType{{Id: \"1\", Name: \"it-1\", Arches: Both}}\n\t\tp.instanceTypeId = \"1\"\n\t\tp.instanceTypeName = \"it-1\"\n\t}\n}\n\nvar findInstanceSpecTests = []instanceSpecTestParams{\n\t{\n\t\tdesc: \"image exists in metadata\",\n\t\tregion: \"test\",\n\t\tdefaultImageId: \"1234\",\n\t\timageId: \"ami-00000033\",\n\t},\n\t{\n\t\tdesc: \"no image exists in metadata, use supplied default\",\n\t\tregion: \"invalid-region\",\n\t\tdefaultImageId: \"1234\",\n\t\timageId: \"1234\",\n\t},\n\t{\n\t\tdesc: \"no image exists in metadata, no default supplied\",\n\t\tregion: \"invalid-region\",\n\t\timageId: \"1234\",\n\t\terr: `no \"raring\" images in invalid-region with arches \\[amd64 i386\\], and no default specified`,\n\t},\n\t{\n\t\tdesc: \"no valid instance types\",\n\t\tregion: \"test\",\n\t\tinstanceTypes: []InstanceType{},\n\t\terr: `no instance types in test matching constraints \"cpu-power=100\", and no default specified`,\n\t},\n\t{\n\t\tdesc: \"no compatible instance types\",\n\t\tregion: \"i386-only\",\n\t\tinstanceTypes: []InstanceType{{Id: \"1\", Name: \"it-1\", Arches: Amd64, Mem: 2048}},\n\t\terr: `no \"raring\" images in i386-only matching instance types \\[it-1\\]`,\n\t},\n\t{\n\t\tdesc: \"fallback instance type, enough memory for mongodb\",\n\t\tregion: \"test\",\n\t\tconstraints: \"mem=8G\",\n\t\tinstanceTypes: []InstanceType{\n\t\t\t{Id: \"3\", Name: \"it-3\", Arches: Amd64, Mem: 4096},\n\t\t\t{Id: \"2\", Name: \"it-2\", Arches: Amd64, Mem: 2048},\n\t\t\t{Id: \"1\", Name: \"it-1\", Arches: Amd64, Mem: 512},\n\t\t},\n\t\timageId: \"ami-00000033\",\n\t\tinstanceTypeId: \"2\",\n\t\tinstanceTypeName: \"it-2\",\n\t},\n\t{\n\t\tdesc: \"fallback instance type, not enough memory for mongodb\",\n\t\tregion: \"test\",\n\t\tconstraints: \"mem=4G\",\n\t\tinstanceTypes: []InstanceType{\n\t\t\t{Id: \"2\", Name: \"it-2\", Arches: Amd64, Mem: 256},\n\t\t\t{Id: \"1\", Name: \"it-1\", Arches: Amd64, Mem: 512},\n\t\t},\n\t\timageId: \"ami-00000033\",\n\t\tinstanceTypeId: \"1\",\n\t\tinstanceTypeName: \"it-1\",\n\t},\n}\n\nfunc (s *imageSuite) TestFindInstanceSpec(c *C) {\n\tfor _, t := range findInstanceSpecTests {\n\t\tc.Logf(\"test: %v\", t.desc)\n\t\tt.init()\n\t\tr := bufio.NewReader(bytes.NewBufferString(imagesData))\n\t\tspec, err := FindInstanceSpec(r, &InstanceConstraint{\n\t\t\tSeries: \"raring\",\n\t\t\tRegion: t.region,\n\t\t\tArches: t.arches,\n\t\t\tConstraints: constraints.MustParse(t.constraints),\n\t\t\tDefaultImageId: t.defaultImageId,\n\t\t}, t.instanceTypes, nil)\n\t\tif t.err != \"\" {\n\t\t\tc.Check(err, ErrorMatches, t.err)\n\t\t\tcontinue\n\t\t}\n\t\tif !c.Check(err, IsNil) {\n\t\t\tcontinue\n\t\t}\n\t\tc.Check(spec.Image.Id, Equals, t.imageId)\n\t\tc.Check(spec.InstanceTypeId, Equals, t.instanceTypeId)\n\t\tc.Check(spec.InstanceTypeName, Equals, t.instanceTypeName)\n\t}\n}\n\nvar getImagesTests = []struct {\n\tregion string\n\tseries string\n\tarches []string\n\timages []Image\n\terr string\n}{\n\t{\n\t\tregion: \"us-east-1\",\n\t\tseries: \"precise\",\n\t\tarches: Both,\n\t\terr: `no \"precise\" images in us-east-1 with arches \\[amd64 i386\\]`,\n\t}, {\n\t\tregion: \"eu-west-1\",\n\t\tseries: \"precise\",\n\t\tarches: []string{\"i386\"},\n\t\terr: `no \"precise\" images in eu-west-1 with arches \\[i386\\]`,\n\t}, {\n\t\tregion: \"ap-northeast-1\",\n\t\tseries: \"precise\",\n\t\tarches: Both,\n\t\timages: []Image{\n\t\t\t{\"ami-00000026\", \"amd64\", false},\n\t\t\t{\"ami-00000087\", \"amd64\", true},\n\t\t\t{\"ami-00000023\", \"i386\", false},\n\t\t},\n\t}, {\n\t\tregion: \"ap-northeast-1\",\n\t\tseries: \"precise\",\n\t\tarches: []string{\"amd64\"},\n\t\timages: []Image{\n\t\t\t{\"ami-00000026\", \"amd64\", false},\n\t\t\t{\"ami-00000087\", \"amd64\", true},\n\t\t},\n\t}, {\n\t\tregion: \"ap-northeast-1\",\n\t\tseries: \"precise\",\n\t\tarches: []string{\"i386\"},\n\t\timages: []Image{\n\t\t\t{\"ami-00000023\", \"i386\", false},\n\t\t},\n\t},\n}\n\nfunc (s *imageSuite) TestGetImages(c *C) {\n\tvar ebs = \"ebs\"\n\tvar cluster = \"hvm\"\n\tfor i, t := range getImagesTests {\n\t\tc.Logf(\"test %d\", i)\n\t\tr := bufio.NewReader(bytes.NewBufferString(imagesData))\n\t\timages, err := getImages(r, &InstanceConstraint{\n\t\t\tRegion: t.region,\n\t\t\tSeries: t.series,\n\t\t\tArches: t.arches,\n\t\t\tStorage: &ebs,\n\t\t\tCluster: &cluster,\n\t\t})\n\t\tif t.err != \"\" {\n\t\t\tc.Check(err, ErrorMatches, t.err)\n\t\t\tcontinue\n\t\t}\n\t\tif !c.Check(err, IsNil) {\n\t\t\tcontinue\n\t\t}\n\t\tc.Check(images, DeepEquals, t.images)\n\t}\n}\n\nvar imageMatchtests = []struct {\n\timage Image\n\titype InstanceType\n\tmatch bool\n}{\n\t{\n\t\timage: Image{Arch: \"amd64\"},\n\t\titype: InstanceType{Arches: []string{\"amd64\"}},\n\t\tmatch: true,\n\t}, {\n\t\timage: Image{Arch: \"amd64\"},\n\t\titype: InstanceType{Arches: []string{\"i386\", \"amd64\"}},\n\t\tmatch: true,\n\t}, {\n\t\timage: Image{Arch: \"amd64\", Clustered: true},\n\t\titype: InstanceType{Arches: []string{\"amd64\"}, Clustered: true},\n\t\tmatch: true,\n\t}, {\n\t\timage: Image{Arch: \"i386\"},\n\t\titype: InstanceType{Arches: []string{\"amd64\"}},\n\t}, {\n\t\timage: Image{Arch: \"amd64\", Clustered: true},\n\t\titype: InstanceType{Arches: []string{\"amd64\"}},\n\t}, {\n\t\timage: Image{Arch: \"amd64\"},\n\t\titype: InstanceType{Arches: []string{\"amd64\"}, Clustered: true},\n\t},\n}\n\nfunc (s *imageSuite) TestImageMatch(c *C) {\n\tfor i, t := range imageMatchtests {\n\t\tc.Logf(\"test %d\", i)\n\t\tc.Check(t.image.match(t.itype), Equals, t.match)\n\t}\n}\n<commit_msg>Add missing test setup<commit_after>package instances\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t. \"launchpad.net\/gocheck\"\n\t\"launchpad.net\/juju-core\/constraints\"\n\tcoretesting \"launchpad.net\/juju-core\/testing\"\n\t\"strings\"\n\t\"testing\"\n)\n\ntype imageSuite struct {\n\tcoretesting.LoggingSuite\n}\n\nfunc Test(t *testing.T) {\n\tTestingT(t)\n}\n\nvar _ = Suite(&imageSuite{})\n\nfunc (s *imageSuite) SetUpSuite(c *C) {\n\ts.LoggingSuite.SetUpSuite(c)\n}\n\nfunc (s *imageSuite) TearDownSuite(c *C) {\n\ts.LoggingSuite.TearDownTest(c)\n}\n\nvar imagesData = imagesFields(\n\t\"instance-store amd64 us-east-1 ami-00000011 paravirtual\",\n\t\"ebs amd64 eu-west-1 ami-00000016 paravirtual\",\n\t\"ebs i386 ap-northeast-1 ami-00000023 paravirtual\",\n\t\"ebs amd64 ap-northeast-1 ami-00000026 paravirtual\",\n\t\"ebs amd64 ap-northeast-1 ami-00000087 hvm\",\n\t\"ebs amd64 test ami-00000033 paravirtual\",\n\t\"ebs i386 test ami-00000034 paravirtual\",\n\t\"ebs amd64 test ami-00000035 hvm\",\n\t\"ebs i386 i386-only ami-00000036 paravirtual\",\n)\n\nfunc imagesFields(srcs ...string) string {\n\tstrs := make([]string, len(srcs))\n\tfor i, src := range srcs {\n\t\tparts := strings.Split(src, \" \")\n\t\tif len(parts) != 5 {\n\t\t\tpanic(\"bad clouddata field input\")\n\t\t}\n\t\targs := make([]interface{}, len(parts))\n\t\tfor i, part := range parts {\n\t\t\targs[i] = part\n\t\t}\n\t\t\/\/ Ignored fields are left empty for clarity's sake, and two additional\n\t\t\/\/ tabs are tacked on to the end to verify extra columns are ignored.\n\t\tstrs[i] = fmt.Sprintf(\"\\t\\t\\t\\t%s\\t%s\\t%s\\t%s\\t\\t\\t%s\\t\\t\\n\", args...)\n\t}\n\treturn strings.Join(strs, \"\")\n}\n\ntype instanceSpecTestParams struct {\n\tdesc string\n\tregion string\n\tarches []string\n\tconstraints string\n\tinstanceTypes []InstanceType\n\tdefaultImageId string\n\tdefaultInstanceType string\n\timageId string\n\tinstanceTypeId string\n\tinstanceTypeName string\n\terr string\n}\n\nfunc (p *instanceSpecTestParams) init() {\n\tif p.arches == nil {\n\t\tp.arches = Both\n\t}\n\tif p.instanceTypes == nil {\n\t\tp.instanceTypes = []InstanceType{{Id: \"1\", Name: \"it-1\", Arches: Both}}\n\t\tp.instanceTypeId = \"1\"\n\t\tp.instanceTypeName = \"it-1\"\n\t}\n}\n\nvar findInstanceSpecTests = []instanceSpecTestParams{\n\t{\n\t\tdesc: \"image exists in metadata\",\n\t\tregion: \"test\",\n\t\tdefaultImageId: \"1234\",\n\t\timageId: \"ami-00000033\",\n\t},\n\t{\n\t\tdesc: \"no image exists in metadata, use supplied default\",\n\t\tregion: \"invalid-region\",\n\t\tdefaultImageId: \"1234\",\n\t\timageId: \"1234\",\n\t},\n\t{\n\t\tdesc: \"no image exists in metadata, no default supplied\",\n\t\tregion: \"invalid-region\",\n\t\timageId: \"1234\",\n\t\terr: `no \"raring\" images in invalid-region with arches \\[amd64 i386\\], and no default specified`,\n\t},\n\t{\n\t\tdesc: \"no valid instance types\",\n\t\tregion: \"test\",\n\t\tinstanceTypes: []InstanceType{},\n\t\terr: `no instance types in test matching constraints \"cpu-power=100\", and no default specified`,\n\t},\n\t{\n\t\tdesc: \"no compatible instance types\",\n\t\tregion: \"i386-only\",\n\t\tinstanceTypes: []InstanceType{{Id: \"1\", Name: \"it-1\", Arches: Amd64, Mem: 2048}},\n\t\terr: `no \"raring\" images in i386-only matching instance types \\[it-1\\]`,\n\t},\n\t{\n\t\tdesc: \"fallback instance type, enough memory for mongodb\",\n\t\tregion: \"test\",\n\t\tconstraints: \"mem=8G\",\n\t\tinstanceTypes: []InstanceType{\n\t\t\t{Id: \"3\", Name: \"it-3\", Arches: Amd64, Mem: 4096},\n\t\t\t{Id: \"2\", Name: \"it-2\", Arches: Amd64, Mem: 2048},\n\t\t\t{Id: \"1\", Name: \"it-1\", Arches: Amd64, Mem: 512},\n\t\t},\n\t\timageId: \"ami-00000033\",\n\t\tinstanceTypeId: \"2\",\n\t\tinstanceTypeName: \"it-2\",\n\t},\n\t{\n\t\tdesc: \"fallback instance type, not enough memory for mongodb\",\n\t\tregion: \"test\",\n\t\tconstraints: \"mem=4G\",\n\t\tinstanceTypes: []InstanceType{\n\t\t\t{Id: \"2\", Name: \"it-2\", Arches: Amd64, Mem: 256},\n\t\t\t{Id: \"1\", Name: \"it-1\", Arches: Amd64, Mem: 512},\n\t\t},\n\t\timageId: \"ami-00000033\",\n\t\tinstanceTypeId: \"1\",\n\t\tinstanceTypeName: \"it-1\",\n\t},\n}\n\nfunc (s *imageSuite) TestFindInstanceSpec(c *C) {\n\tfor _, t := range findInstanceSpecTests {\n\t\tc.Logf(\"test: %v\", t.desc)\n\t\tt.init()\n\t\tr := bufio.NewReader(bytes.NewBufferString(imagesData))\n\t\tspec, err := FindInstanceSpec(r, &InstanceConstraint{\n\t\t\tSeries: \"raring\",\n\t\t\tRegion: t.region,\n\t\t\tArches: t.arches,\n\t\t\tConstraints: constraints.MustParse(t.constraints),\n\t\t\tDefaultImageId: t.defaultImageId,\n\t\t}, t.instanceTypes, nil)\n\t\tif t.err != \"\" {\n\t\t\tc.Check(err, ErrorMatches, t.err)\n\t\t\tcontinue\n\t\t}\n\t\tif !c.Check(err, IsNil) {\n\t\t\tcontinue\n\t\t}\n\t\tc.Check(spec.Image.Id, Equals, t.imageId)\n\t\tc.Check(spec.InstanceTypeId, Equals, t.instanceTypeId)\n\t\tc.Check(spec.InstanceTypeName, Equals, t.instanceTypeName)\n\t}\n}\n\nvar getImagesTests = []struct {\n\tregion string\n\tseries string\n\tarches []string\n\timages []Image\n\terr string\n}{\n\t{\n\t\tregion: \"us-east-1\",\n\t\tseries: \"precise\",\n\t\tarches: Both,\n\t\terr: `no \"precise\" images in us-east-1 with arches \\[amd64 i386\\]`,\n\t}, {\n\t\tregion: \"eu-west-1\",\n\t\tseries: \"precise\",\n\t\tarches: []string{\"i386\"},\n\t\terr: `no \"precise\" images in eu-west-1 with arches \\[i386\\]`,\n\t}, {\n\t\tregion: \"ap-northeast-1\",\n\t\tseries: \"precise\",\n\t\tarches: Both,\n\t\timages: []Image{\n\t\t\t{\"ami-00000026\", \"amd64\", false},\n\t\t\t{\"ami-00000087\", \"amd64\", true},\n\t\t\t{\"ami-00000023\", \"i386\", false},\n\t\t},\n\t}, {\n\t\tregion: \"ap-northeast-1\",\n\t\tseries: \"precise\",\n\t\tarches: []string{\"amd64\"},\n\t\timages: []Image{\n\t\t\t{\"ami-00000026\", \"amd64\", false},\n\t\t\t{\"ami-00000087\", \"amd64\", true},\n\t\t},\n\t}, {\n\t\tregion: \"ap-northeast-1\",\n\t\tseries: \"precise\",\n\t\tarches: []string{\"i386\"},\n\t\timages: []Image{\n\t\t\t{\"ami-00000023\", \"i386\", false},\n\t\t},\n\t},\n}\n\nfunc (s *imageSuite) TestGetImages(c *C) {\n\tvar ebs = \"ebs\"\n\tvar cluster = \"hvm\"\n\tfor i, t := range getImagesTests {\n\t\tc.Logf(\"test %d\", i)\n\t\tr := bufio.NewReader(bytes.NewBufferString(imagesData))\n\t\timages, err := getImages(r, &InstanceConstraint{\n\t\t\tRegion: t.region,\n\t\t\tSeries: t.series,\n\t\t\tArches: t.arches,\n\t\t\tStorage: &ebs,\n\t\t\tCluster: &cluster,\n\t\t})\n\t\tif t.err != \"\" {\n\t\t\tc.Check(err, ErrorMatches, t.err)\n\t\t\tcontinue\n\t\t}\n\t\tif !c.Check(err, IsNil) {\n\t\t\tcontinue\n\t\t}\n\t\tc.Check(images, DeepEquals, t.images)\n\t}\n}\n\nvar imageMatchtests = []struct {\n\timage Image\n\titype InstanceType\n\tmatch bool\n}{\n\t{\n\t\timage: Image{Arch: \"amd64\"},\n\t\titype: InstanceType{Arches: []string{\"amd64\"}},\n\t\tmatch: true,\n\t}, {\n\t\timage: Image{Arch: \"amd64\"},\n\t\titype: InstanceType{Arches: []string{\"i386\", \"amd64\"}},\n\t\tmatch: true,\n\t}, {\n\t\timage: Image{Arch: \"amd64\", Clustered: true},\n\t\titype: InstanceType{Arches: []string{\"amd64\"}, Clustered: true},\n\t\tmatch: true,\n\t}, {\n\t\timage: Image{Arch: \"i386\"},\n\t\titype: InstanceType{Arches: []string{\"amd64\"}},\n\t}, {\n\t\timage: Image{Arch: \"amd64\", Clustered: true},\n\t\titype: InstanceType{Arches: []string{\"amd64\"}},\n\t}, {\n\t\timage: Image{Arch: \"amd64\"},\n\t\titype: InstanceType{Arches: []string{\"amd64\"}, Clustered: true},\n\t},\n}\n\nfunc (s *imageSuite) TestImageMatch(c *C) {\n\tfor i, t := range imageMatchtests {\n\t\tc.Logf(\"test %d\", i)\n\t\tc.Check(t.image.match(t.itype), Equals, t.match)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"tinygo.org\/x\/bluetooth\"\n)\n\nvar KnownServiceUUIDs = []bluetooth.UUID{\n\tbluetooth.ServiceUUIDCyclingSpeedAndCadence,\n\tbluetooth.ServiceUUIDCyclingPower,\n\tbluetooth.ServiceUUIDHeartRate,\n\n\t\/\/ General controllable device, seems more involved.\n\t\/\/ bluetooth.ServiceUUIDFitnessMachine,\n}\n\nvar KnownServiceCharacteristicUUIDs = map[bluetooth.UUID][]bluetooth.UUID{\n\t\/\/ https:\/\/www.bluetooth.com\/specifications\/specs\/cycling-power-service-1-1\/\n\tbluetooth.ServiceUUIDCyclingPower: {\n\t\tbluetooth.CharacteristicUUIDCyclingPowerMeasurement,\n\t\tbluetooth.CharacteristicUUIDCyclingPowerFeature,\n\t\t\/\/ TODO:\n\t\t\/\/ Not a standardized characteristic, but this is offered by KICKR.\n\t\t\/\/ See GoldenCheetah source for some use examples:\n\t\t\/\/ https:\/\/github.com\/GoldenCheetah\/GoldenCheetah\/blob\/master\/src\/Train\/BT40Device.cpp\n\t\t\/\/\n\t\t\/\/ var WahooKickrControlCharacteristic = bluetooth.ParseUUID(\n\t\t\/\/ \t\"a026e005-0a7d-4ab3-97fa-f1500f9feb8b\"\n\t\t\/\/ )\n\t\t\/\/ TODO: Also, how does this one work?\n\t\t\/\/ bluetooth.CharacteristicUUIDCyclingPowerControlPoint,\n\t},\n\tbluetooth.ServiceUUIDHeartRate: {\n\t\tbluetooth.CharacteristicUUIDHeartRateMeasurement,\n\t},\n}\nvar (\n\tKnownServiceNames = map[bluetooth.UUID]string{\n\t\tbluetooth.ServiceUUIDCyclingSpeedAndCadence: \"Cycling Speed and Cadence\",\n\t\tbluetooth.ServiceUUIDCyclingPower: \"Cycling Power\",\n\t\tbluetooth.ServiceUUIDHeartRate: \"Heart Rate\",\n\t}\n\tKnownCharacteristicNames = map[bluetooth.UUID]string{\n\t\tbluetooth.CharacteristicUUIDCyclingPowerMeasurement: \"Cycling Power Measure\",\n\t\tbluetooth.CharacteristicUUIDCyclingPowerFeature: \"Cycling Power Feature\",\n\t\tbluetooth.CharacteristicUUIDHeartRateMeasurement: \"Heart Rate Measurement\",\n\t}\n)\n\ntype MetricKind int\n\nconst (\n\tMetricHeartRate = iota\n\tMetricCyclingPower\n\tMetricCyclingSpeed\n\tMetricCyclingCadence\n)\n\ntype DeviceMetric struct {\n\tkind MetricKind\n\tvalue int\n}\n\ntype MetricSink struct {\n}\n\ntype MetricSource struct {\n\tsinks []chan DeviceMetric\n\n\tsvc *bluetooth.DeviceService\n\tch *bluetooth.DeviceCharacteristic\n}\n\nfunc NewMetricSource(\n\tsvc *bluetooth.DeviceService,\n\tch *bluetooth.DeviceCharacteristic,\n) MetricSource {\n\treturn MetricSource{\n\t\tsinks: []chan DeviceMetric{},\n\t\tsvc: svc,\n\t\tch: ch,\n\t}\n}\n\nfunc (src *MetricSource) Name() string {\n\tif name, ok := KnownCharacteristicNames[src.ch.UUID()]; ok {\n\t\treturn name\n\t}\n\treturn fmt.Sprintf(\"<unknown: %s>\", src.ch.UUID().String())\n}\n\nfunc (src *MetricSource) AddSink(sink chan DeviceMetric) {\n\tsrc.sinks = append(src.sinks, sink)\n\n\t\/\/ Start listenening first time we add a sink\n\tif len(src.sinks) == 1 {\n\t\thandler := src.notificationHandler()\n\t\tsrc.ch.EnableNotifications(handler)\n\t}\n}\n\nfunc (src *MetricSource) notificationHandler() func([]byte) {\n\tswitch src.ch.UUID() {\n\tcase bluetooth.CharacteristicUUIDCyclingPowerMeasurement:\n\t\treturn src.handleCyclingPowerMeasurement\n\n\t\/\/ TODO\n\tcase bluetooth.CharacteristicUUIDCyclingPowerFeature:\n\tcase bluetooth.CharacteristicUUIDHeartRateMeasurement:\n\t\treturn src.handleHeartRateMeasurement\n\t}\n\n\treturn nil\n}\n\nfunc (src *MetricSource) emit(m DeviceMetric) {\n\tfor _, sink := range src.sinks {\n\t\tsink <- m\n\t}\n}\n\nconst (\n\t\/\/ BPM size, 0 if u8, 1 if u16\n\tHeartRateFlagSize = 1 << 0\n\n\t\/\/ 00 unsupported\n\t\/\/ 01 unsupported\n\t\/\/ 10 supported, not detected\n\t\/\/ 11 supported, detected\n\tHeartRateFlagContactStatus = (1 << 1) | (1 << 2)\n\n\tHeartRateFlagHasEnergyExpended = 1 << 3\n\tHeartRateFlagHasRRInterval = 1 << 4\n\n\t\/\/ bits 5-8 reserved\n)\n\nfunc (src *MetricSource) handleHeartRateMeasurement(buf []byte) {\n\t\/\/ malformed\n\tif len(buf) < 2 {\n\t\treturn\n\t}\n\n\tflag := buf[0]\n\n\tis16Bit := (flag & HeartRateFlagSize) != 0\n\tcontactStatus := (flag & HeartRateFlagContactStatus) >> 1\n\n\tcontactSupported := contactStatus&(0b10) != 0\n\tcontactFound := contactStatus&(0b01) != 0\n\n\t\/\/ No use sending this metric if the sensor isn't reading.\n\tif contactSupported && !contactFound {\n\t\treturn\n\t}\n\n\tvar hr int = int(buf[1])\n\tif is16Bit {\n\t\thr = (hr << 8) | int(buf[2])\n\t}\n\n\tsrc.emit(DeviceMetric{\n\t\tkind: MetricHeartRate,\n\t\tvalue: hr,\n\t})\n}\n\nconst (\n\tCyclingPowerFlagHasPedalPowerBalance = 1 << 0\n\tCyclingPowerFlagPedalPowerBalanceReference = 1 << 1\n\tCyclingPowerFlagHasAccumulatedTorque = 1 << 2\n\tCyclingPowerFlagAccumulatedTorqueSource = 1 << 3\n\tCyclingPowerFlagHasWheelRevolution = 1 << 4\n\tCyclingPowerFlagHasCrankRevolution = 1 << 5\n\tCyclingPowerFlagHasExtremeForceMagnitudes = 1 << 6\n\tCyclingPowerFlagHasExtremeTorqueMagnitudes = 1 << 7\n\tCyclingPowerFlagHasExtremeAngles = 1 << 8\n\tCyclingPowerFlagHasTopDeadSpotAngle = 1 << 9\n\tCyclingPowerFlagHasBottomDeadSpotAngle = 1 << 10\n\tCyclingPowerFlagHasAccumulatedEnergy = 1 << 11\n\tCyclingPowerFlagHasOffsetCompensationIndicator = 1 << 12\n\n\t\/\/ Bits 13-16 reserved\n)\n\n\/\/ Two flag bytes, followed by a 16 bit power reading. All subsequent\n\/\/ fields are optional, based on the flag bits set.\n\/\/\n\/\/ sint16 instantaneous_power watts with resolution 1\n\/\/ uint8 pedal_power_balance percentage with resolution 1\/2\n\/\/ uint16 accumulated_torque newton meters with resolution 1\/32\n\/\/ uint32 wheel_rev_cumulative unitless\n\/\/ uint16 wheel_rev_last_time seconds with resolution 1\/2048\n\/\/ uint16 crank_rev_cumulative unitless\n\/\/ uint16 crank_rev_last_time seconds with resolution 1\/1024\n\/\/ sint16 extreme_force_max_magn newtons with resolution 1\n\/\/ sint16 extreme_force_min_magn newtons with resolution 1\n\/\/ sint16 extreme_torque_max_magn newton meters with resolution 1\/32\n\/\/ sint16 extreme_torque_min_magn newton meters with resolution 1\/32\n\/\/ uint12 extreme_angles_max degrees with resolution 1\n\/\/ uint12 extreme_angles_min degrees with resolution 1\n\/\/ uint16 top_dead_spot_angle degrees with resolution 1\n\/\/ uint16 bottom_dead_spot_angle degrees with resolution 1\n\/\/ uint16 accumulated_energy kilojoules with resolution 1\nfunc (src *MetricSource) handleCyclingPowerMeasurement(buf []byte) {\n\t\/\/ malformed\n\tif len(buf) < 2 {\n\t\treturn\n\t}\n\n\tflags := uint16(buf[0]) | uint16(buf[1]<<8)\n\n\twatts := int16(buf[2]<<8) | int16(buf[3])\n\tfmt.Printf(\"power measure: %d watts, flags=%b\\n\", watts, flags)\n\n\tif flags&CyclingPowerFlagHasAccumulatedEnergy != 0 {\n\t\tfmt.Println(\"also have energy\")\n\t}\n\n}\n\nfunc scanDevices() {\n\tadapter := bluetooth.DefaultAdapter\n\tfmt.Println(\"Starting device scan...\")\n\n\tif err := adapter.Enable(); err != nil {\n\t\tfmt.Println(\"FATAL: Failed to enable BLE\")\n\t\tpanic(err)\n\t}\n\n\t\/\/ Keep track of addresses we've already looked ad\n\taddrsChecked := map[string]bool{}\n\n\tonScanResult := func(bt *bluetooth.Adapter, result bluetooth.ScanResult) {\n\t\tif _, seen := addrsChecked[result.Address.String()]; seen {\n\t\t\treturn\n\t\t}\n\t\taddrsChecked[result.Address.String()] = true\n\n\t\tserviceNames := []string{}\n\t\tfor _, s := range KnownServiceUUIDs {\n\t\t\tif !result.HasServiceUUID(s) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tserviceNames = append(serviceNames, KnownServiceNames[s])\n\t\t}\n\n\t\t\/\/ No matching services, skip this device.\n\t\tif len(serviceNames) == 0 {\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Printf(\"%s %-20s %-20s [RSSI:%d]\\n\",\n\t\t\tresult.Address.String(),\n\t\t\tresult.LocalName(),\n\t\t\tstrings.Join(serviceNames, \",\"),\n\t\t\tresult.RSSI,\n\t\t)\n\t}\n\n\tif err := adapter.Scan(onScanResult); err != nil {\n\t\tfmt.Println(\"FATAL: Failed to scan for devices\")\n\t\tpanic(err)\n\t}\n\n\tfmt.Println(\"Scan complete.\")\n}\n\nvar (\n\tflagScan = flag.Bool(\"scan\", false, \"scan for nearby devices\")\n\tflagHeartRateAddr = flag.String(\"hr\", \"\", \"address for heart rate device\")\n\tflagCyclingPowerAddr = flag.String(\"power\", \"\", \"address for cycling power device\")\n\tflagCyclingSpeedCadenceAddr = flag.String(\"speed\", \"\", \"address for cycling speed\/cadence device\")\n)\n\nfunc init() {\n\tflag.Parse()\n}\n\nfunc main() {\n\tif *flagScan {\n\t\tscanDevices()\n\t\treturn\n\t}\n\n\tadapter := bluetooth.DefaultAdapter\n\tif err := adapter.Enable(); err != nil {\n\t\tfmt.Println(\"FATAL: Failed to enable BLE\")\n\t\tpanic(err)\n\t}\n\n\tdeviceChan := make(chan *bluetooth.Device)\n\n\twg := sync.WaitGroup{}\n\tconnectLock := sync.Mutex{}\n\n\tconnectRetry := func(addr string) {\n\t\tuuid, err := bluetooth.ParseUUID(addr)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"FATAL: bad UUID given: <%s>\\n\", addr)\n\t\t\tpanic(err)\n\t\t}\n\n\t\tparams := bluetooth.ConnectionParams{\n\t\t\tConnectionTimeout: bluetooth.Duration(100),\n\t\t}\n\n\t\t\/\/ TODO: We should add a time bound for this\n\t\tfor {\n\t\t\t\/\/ TODO: tiny-go\/bluetooth's Connect is not\n\t\t\t\/\/ thread-safe. Multiple concurrent calls will race and\n\t\t\t\/\/ return the wrong data to the wrong caller.\n\t\t\tconnectLock.Lock()\n\t\t\tdefer connectLock.Unlock()\n\n\t\t\t\/\/ TODO: bluetooth.Address bit is not cross-platform.\n\t\t\tdevice, err := adapter.Connect(bluetooth.Address{uuid}, params)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdeviceChan <- device\n\t\t\tbreak\n\t\t}\n\n\t\twg.Done()\n\t}\n\n\tif *flagHeartRateAddr != \"\" {\n\t\twg.Add(1)\n\t\tgo connectRetry(*flagHeartRateAddr)\n\t}\n\tif *flagCyclingPowerAddr != \"\" {\n\t\twg.Add(1)\n\t\tgo connectRetry(*flagCyclingPowerAddr)\n\t}\n\tif *flagCyclingSpeedCadenceAddr != \"\" {\n\t\twg.Add(1)\n\t\tgo connectRetry(*flagCyclingSpeedCadenceAddr)\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(deviceChan)\n\t}()\n\n\tmetricsChan := make(chan DeviceMetric)\n\tgo func() {\n\t\tfor m := range metricsChan {\n\t\t\tfmt.Printf(\"Metric: %+v\\n\", m)\n\t\t}\n\t}()\n\n\tfor device := range deviceChan {\n\t\tfmt.Println(\"Initializing device...\")\n\t\tservices, err := device.DiscoverServices(KnownServiceUUIDs)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tfor _, service := range services {\n\t\t\tif name, ok := KnownServiceNames[service.UUID()]; ok {\n\t\t\t\tfmt.Printf(\"\\tservice: %s\\n\", name)\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"\\tservice: unknown <%+v>\\n\", service.UUID().String())\n\t\t\t}\n\n\t\t\tknownChars := KnownServiceCharacteristicUUIDs[service.UUID()]\n\t\t\tchars, err := service.DiscoverCharacteristics(knownChars)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tfor _, char := range chars {\n\t\t\t\tname := KnownCharacteristicNames[char.UUID()]\n\t\t\t\tfmt.Printf(\"\\t\\tcharacteristic: %s\\n\", name)\n\n\t\t\t\tsrc := NewMetricSource(&service, &char)\n\t\t\t\tsrc.AddSink(metricsChan)\n\t\t\t}\n\t\t}\n\t}\n\n\tprintln(\"that's all!\")\n\tselect {}\n}\n<commit_msg>Emit power metric<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"tinygo.org\/x\/bluetooth\"\n)\n\nvar KnownServiceUUIDs = []bluetooth.UUID{\n\tbluetooth.ServiceUUIDCyclingSpeedAndCadence,\n\tbluetooth.ServiceUUIDCyclingPower,\n\tbluetooth.ServiceUUIDHeartRate,\n\n\t\/\/ General controllable device, seems more involved.\n\t\/\/ bluetooth.ServiceUUIDFitnessMachine,\n}\n\nvar KnownServiceCharacteristicUUIDs = map[bluetooth.UUID][]bluetooth.UUID{\n\t\/\/ https:\/\/www.bluetooth.com\/specifications\/specs\/cycling-power-service-1-1\/\n\tbluetooth.ServiceUUIDCyclingPower: {\n\t\tbluetooth.CharacteristicUUIDCyclingPowerMeasurement,\n\t\tbluetooth.CharacteristicUUIDCyclingPowerFeature,\n\t\t\/\/ TODO:\n\t\t\/\/ Not a standardized characteristic, but this is offered by KICKR.\n\t\t\/\/ See GoldenCheetah source for some use examples:\n\t\t\/\/ https:\/\/github.com\/GoldenCheetah\/GoldenCheetah\/blob\/master\/src\/Train\/BT40Device.cpp\n\t\t\/\/\n\t\t\/\/ var WahooKickrControlCharacteristic = bluetooth.ParseUUID(\n\t\t\/\/ \t\"a026e005-0a7d-4ab3-97fa-f1500f9feb8b\"\n\t\t\/\/ )\n\t\t\/\/ TODO: Also, how does this one work?\n\t\t\/\/ bluetooth.CharacteristicUUIDCyclingPowerControlPoint,\n\t},\n\tbluetooth.ServiceUUIDHeartRate: {\n\t\tbluetooth.CharacteristicUUIDHeartRateMeasurement,\n\t},\n}\nvar (\n\tKnownServiceNames = map[bluetooth.UUID]string{\n\t\tbluetooth.ServiceUUIDCyclingSpeedAndCadence: \"Cycling Speed and Cadence\",\n\t\tbluetooth.ServiceUUIDCyclingPower: \"Cycling Power\",\n\t\tbluetooth.ServiceUUIDHeartRate: \"Heart Rate\",\n\t}\n\tKnownCharacteristicNames = map[bluetooth.UUID]string{\n\t\tbluetooth.CharacteristicUUIDCyclingPowerMeasurement: \"Cycling Power Measure\",\n\t\tbluetooth.CharacteristicUUIDCyclingPowerFeature: \"Cycling Power Feature\",\n\t\tbluetooth.CharacteristicUUIDHeartRateMeasurement: \"Heart Rate Measurement\",\n\t}\n)\n\ntype MetricKind int\n\nconst (\n\tMetricHeartRate = iota\n\tMetricCyclingPower\n\tMetricCyclingSpeed\n\tMetricCyclingCadence\n)\n\ntype DeviceMetric struct {\n\tkind MetricKind\n\tvalue int\n}\n\ntype MetricSink struct {\n}\n\ntype MetricSource struct {\n\tsinks []chan DeviceMetric\n\n\tsvc *bluetooth.DeviceService\n\tch *bluetooth.DeviceCharacteristic\n}\n\nfunc NewMetricSource(\n\tsvc *bluetooth.DeviceService,\n\tch *bluetooth.DeviceCharacteristic,\n) MetricSource {\n\treturn MetricSource{\n\t\tsinks: []chan DeviceMetric{},\n\t\tsvc: svc,\n\t\tch: ch,\n\t}\n}\n\nfunc (src *MetricSource) Name() string {\n\tif name, ok := KnownCharacteristicNames[src.ch.UUID()]; ok {\n\t\treturn name\n\t}\n\treturn fmt.Sprintf(\"<unknown: %s>\", src.ch.UUID().String())\n}\n\nfunc (src *MetricSource) AddSink(sink chan DeviceMetric) {\n\tsrc.sinks = append(src.sinks, sink)\n\n\t\/\/ Start listenening first time we add a sink\n\tif len(src.sinks) == 1 {\n\t\thandler := src.notificationHandler()\n\t\tsrc.ch.EnableNotifications(handler)\n\t}\n}\n\nfunc (src *MetricSource) notificationHandler() func([]byte) {\n\tswitch src.ch.UUID() {\n\tcase bluetooth.CharacteristicUUIDCyclingPowerMeasurement:\n\t\treturn src.handleCyclingPowerMeasurement\n\n\t\/\/ TODO\n\tcase bluetooth.CharacteristicUUIDCyclingPowerFeature:\n\tcase bluetooth.CharacteristicUUIDHeartRateMeasurement:\n\t\treturn src.handleHeartRateMeasurement\n\t}\n\n\treturn nil\n}\n\nfunc (src *MetricSource) emit(m DeviceMetric) {\n\tfor _, sink := range src.sinks {\n\t\tsink <- m\n\t}\n}\n\nconst (\n\t\/\/ BPM size, 0 if u8, 1 if u16\n\tHeartRateFlagSize = 1 << 0\n\n\t\/\/ 00 unsupported\n\t\/\/ 01 unsupported\n\t\/\/ 10 supported, not detected\n\t\/\/ 11 supported, detected\n\tHeartRateFlagContactStatus = (1 << 1) | (1 << 2)\n\n\tHeartRateFlagHasEnergyExpended = 1 << 3\n\tHeartRateFlagHasRRInterval = 1 << 4\n\n\t\/\/ bits 5-8 reserved\n)\n\nfunc (src *MetricSource) handleHeartRateMeasurement(buf []byte) {\n\t\/\/ malformed\n\tif len(buf) < 2 {\n\t\treturn\n\t}\n\n\tflag := buf[0]\n\n\tis16Bit := (flag & HeartRateFlagSize) != 0\n\tcontactStatus := (flag & HeartRateFlagContactStatus) >> 1\n\n\tcontactSupported := contactStatus&(0b10) != 0\n\tcontactFound := contactStatus&(0b01) != 0\n\n\t\/\/ No use sending this metric if the sensor isn't reading.\n\tif contactSupported && !contactFound {\n\t\treturn\n\t}\n\n\tvar hr int = int(buf[1])\n\tif is16Bit {\n\t\thr = (hr << 8) | int(buf[2])\n\t}\n\n\tsrc.emit(DeviceMetric{\n\t\tkind: MetricHeartRate,\n\t\tvalue: hr,\n\t})\n}\n\nconst (\n\tCyclingPowerFlagHasPedalPowerBalance = 1 << 0\n\tCyclingPowerFlagPedalPowerBalanceReference = 1 << 1\n\tCyclingPowerFlagHasAccumulatedTorque = 1 << 2\n\tCyclingPowerFlagAccumulatedTorqueSource = 1 << 3\n\tCyclingPowerFlagHasWheelRevolution = 1 << 4\n\tCyclingPowerFlagHasCrankRevolution = 1 << 5\n\tCyclingPowerFlagHasExtremeForceMagnitudes = 1 << 6\n\tCyclingPowerFlagHasExtremeTorqueMagnitudes = 1 << 7\n\tCyclingPowerFlagHasExtremeAngles = 1 << 8\n\tCyclingPowerFlagHasTopDeadSpotAngle = 1 << 9\n\tCyclingPowerFlagHasBottomDeadSpotAngle = 1 << 10\n\tCyclingPowerFlagHasAccumulatedEnergy = 1 << 11\n\tCyclingPowerFlagHasOffsetCompensationIndicator = 1 << 12\n\n\t\/\/ Bits 13-16 reserved\n)\n\n\/\/ Two flag bytes, followed by a 16 bit power reading. All subsequent\n\/\/ fields are optional, based on the flag bits set.\n\/\/\n\/\/ sint16 instantaneous_power watts with resolution 1\n\/\/ uint8 pedal_power_balance percentage with resolution 1\/2\n\/\/ uint16 accumulated_torque newton meters with resolution 1\/32\n\/\/ uint32 wheel_rev_cumulative unitless\n\/\/ uint16 wheel_rev_last_time seconds with resolution 1\/2048\n\/\/ uint16 crank_rev_cumulative unitless\n\/\/ uint16 crank_rev_last_time seconds with resolution 1\/1024\n\/\/ sint16 extreme_force_max_magn newtons with resolution 1\n\/\/ sint16 extreme_force_min_magn newtons with resolution 1\n\/\/ sint16 extreme_torque_max_magn newton meters with resolution 1\/32\n\/\/ sint16 extreme_torque_min_magn newton meters with resolution 1\/32\n\/\/ uint12 extreme_angles_max degrees with resolution 1\n\/\/ uint12 extreme_angles_min degrees with resolution 1\n\/\/ uint16 top_dead_spot_angle degrees with resolution 1\n\/\/ uint16 bottom_dead_spot_angle degrees with resolution 1\n\/\/ uint16 accumulated_energy kilojoules with resolution 1\nfunc (src *MetricSource) handleCyclingPowerMeasurement(buf []byte) {\n\t\/\/ malformed\n\tif len(buf) < 2 {\n\t\treturn\n\t}\n\n\tflags := uint16(buf[0]) | uint16(buf[1]<<8)\n\n\tpowerWatts := int16(buf[2]<<8) | int16(buf[3])\n\n\t\/\/ Power meters will send packets even if nothing's happening.\n\tif powerWatts == 0 {\n\t\treturn\n\t}\n\n\tsrc.emit(DeviceMetric{\n\t\tkind: MetricCyclingPower,\n\t\tvalue: int(powerWatts),\n\t})\n\n\tif flags&CyclingPowerFlagHasAccumulatedEnergy != 0 {\n\t\tfmt.Println(\"also have energy\")\n\t}\n\n}\n\nfunc scanDevices() {\n\tadapter := bluetooth.DefaultAdapter\n\tfmt.Println(\"Starting device scan...\")\n\n\tif err := adapter.Enable(); err != nil {\n\t\tfmt.Println(\"FATAL: Failed to enable BLE\")\n\t\tpanic(err)\n\t}\n\n\t\/\/ Keep track of addresses we've already looked ad\n\taddrsChecked := map[string]bool{}\n\n\tonScanResult := func(bt *bluetooth.Adapter, result bluetooth.ScanResult) {\n\t\tif _, seen := addrsChecked[result.Address.String()]; seen {\n\t\t\treturn\n\t\t}\n\t\taddrsChecked[result.Address.String()] = true\n\n\t\tserviceNames := []string{}\n\t\tfor _, s := range KnownServiceUUIDs {\n\t\t\tif !result.HasServiceUUID(s) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tserviceNames = append(serviceNames, KnownServiceNames[s])\n\t\t}\n\n\t\t\/\/ No matching services, skip this device.\n\t\tif len(serviceNames) == 0 {\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Printf(\"%s %-20s %-20s [RSSI:%d]\\n\",\n\t\t\tresult.Address.String(),\n\t\t\tresult.LocalName(),\n\t\t\tstrings.Join(serviceNames, \",\"),\n\t\t\tresult.RSSI,\n\t\t)\n\t}\n\n\tif err := adapter.Scan(onScanResult); err != nil {\n\t\tfmt.Println(\"FATAL: Failed to scan for devices\")\n\t\tpanic(err)\n\t}\n\n\tfmt.Println(\"Scan complete.\")\n}\n\nvar (\n\tflagScan = flag.Bool(\"scan\", false, \"scan for nearby devices\")\n\tflagHeartRateAddr = flag.String(\"hr\", \"\", \"address for heart rate device\")\n\tflagCyclingPowerAddr = flag.String(\"power\", \"\", \"address for cycling power device\")\n\tflagCyclingSpeedCadenceAddr = flag.String(\"speed\", \"\", \"address for cycling speed\/cadence device\")\n)\n\nfunc init() {\n\tflag.Parse()\n}\n\nfunc main() {\n\tif *flagScan {\n\t\tscanDevices()\n\t\treturn\n\t}\n\n\tadapter := bluetooth.DefaultAdapter\n\tif err := adapter.Enable(); err != nil {\n\t\tfmt.Println(\"FATAL: Failed to enable BLE\")\n\t\tpanic(err)\n\t}\n\n\tdeviceChan := make(chan *bluetooth.Device)\n\n\twg := sync.WaitGroup{}\n\tconnectLock := sync.Mutex{}\n\n\tconnectRetry := func(addr string) {\n\t\tuuid, err := bluetooth.ParseUUID(addr)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"FATAL: bad UUID given: <%s>\\n\", addr)\n\t\t\tpanic(err)\n\t\t}\n\n\t\tparams := bluetooth.ConnectionParams{\n\t\t\tConnectionTimeout: bluetooth.Duration(100),\n\t\t}\n\n\t\t\/\/ TODO: We should add a time bound for this\n\t\tfor {\n\t\t\t\/\/ TODO: tiny-go\/bluetooth's Connect is not\n\t\t\t\/\/ thread-safe. Multiple concurrent calls will race and\n\t\t\t\/\/ return the wrong data to the wrong caller.\n\t\t\tconnectLock.Lock()\n\t\t\tdefer connectLock.Unlock()\n\n\t\t\t\/\/ TODO: bluetooth.Address bit is not cross-platform.\n\t\t\tdevice, err := adapter.Connect(bluetooth.Address{uuid}, params)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdeviceChan <- device\n\t\t\tbreak\n\t\t}\n\n\t\twg.Done()\n\t}\n\n\tif *flagHeartRateAddr != \"\" {\n\t\twg.Add(1)\n\t\tgo connectRetry(*flagHeartRateAddr)\n\t}\n\tif *flagCyclingPowerAddr != \"\" {\n\t\twg.Add(1)\n\t\tgo connectRetry(*flagCyclingPowerAddr)\n\t}\n\tif *flagCyclingSpeedCadenceAddr != \"\" {\n\t\twg.Add(1)\n\t\tgo connectRetry(*flagCyclingSpeedCadenceAddr)\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(deviceChan)\n\t}()\n\n\tmetricsChan := make(chan DeviceMetric)\n\tgo func() {\n\t\tfor m := range metricsChan {\n\t\t\tfmt.Printf(\"Metric: %+v\\n\", m)\n\t\t}\n\t}()\n\n\tfor device := range deviceChan {\n\t\tfmt.Println(\"Initializing device...\")\n\t\tservices, err := device.DiscoverServices(KnownServiceUUIDs)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tfor _, service := range services {\n\t\t\tif name, ok := KnownServiceNames[service.UUID()]; ok {\n\t\t\t\tfmt.Printf(\"\\tservice: %s\\n\", name)\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"\\tservice: unknown <%+v>\\n\", service.UUID().String())\n\t\t\t}\n\n\t\t\tknownChars := KnownServiceCharacteristicUUIDs[service.UUID()]\n\t\t\tchars, err := service.DiscoverCharacteristics(knownChars)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tfor _, char := range chars {\n\t\t\t\tname := KnownCharacteristicNames[char.UUID()]\n\t\t\t\tfmt.Printf(\"\\t\\tcharacteristic: %s\\n\", name)\n\n\t\t\t\tsrc := NewMetricSource(&service, &char)\n\t\t\t\tsrc.AddSink(metricsChan)\n\t\t\t}\n\t\t}\n\t}\n\n\tprintln(\"that's all!\")\n\tselect {}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Title:顾问通话总览列表\n\/\/\n\/\/ Description:\n\/\/\n\/\/ Author:black\n\/\/\n\/\/ Createtime:2013-09-26 15:50\n\/\/\n\/\/ Version:1.0\n\/\/\n\/\/ 修改历史:版本号 修改日期 修改人 修改说明\n\/\/\n\/\/ 1.0 2013-09-26 15:50 black 创建文档\npackage server\n\nimport (\n\t\"fmt\"\n\t\"github.com\/hjqhezgh\/commonlib\"\n\t\"github.com\/hjqhezgh\/lessgo\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n)\n\n\/\/顾问分页数据服务\nfunc ConsultantPhoneListAction(w http.ResponseWriter, r *http.Request) {\n\n\tm := make(map[string]interface{})\n\n\temployee := lessgo.GetCurrentEmployee(r)\n\n\tif employee.UserId == \"\" {\n\t\tlessgo.Log.Warn(\"用户未登陆\")\n\t\tm[\"success\"] = false\n\t\tm[\"code\"] = 100\n\t\tm[\"msg\"] = \"用户未登陆\"\n\t\tcommonlib.OutputJson(w, m, \" \")\n\t\treturn\n\t}\n\n\terr := r.ParseForm()\n\n\tif err != nil {\n\t\tm[\"success\"] = false\n\t\tm[\"code\"] = 100\n\t\tm[\"msg\"] = \"出现错误,请联系IT部门,错误信息:\" + err.Error()\n\t\tcommonlib.OutputJson(w, m, \" \")\n\t\treturn\n\t}\n\n\tpageNoString := r.FormValue(\"page\")\n\tpageNo := 1\n\tif pageNoString != \"\" {\n\t\tpageNo, err = strconv.Atoi(pageNoString)\n\t\tif err != nil {\n\t\t\tpageNo = 1\n\t\t\tlessgo.Log.Warn(\"错误的pageNo:\", pageNo)\n\t\t}\n\t}\n\n\tpageSizeString := r.FormValue(\"rows\")\n\tpageSize := 10\n\tif pageSizeString != \"\" {\n\t\tpageSize, err = strconv.Atoi(pageSizeString)\n\t\tif err != nil {\n\t\t\tlessgo.Log.Warn(\"错误的pageSize:\", pageSize)\n\t\t}\n\t}\n\n\tdataType := \"\"\n\n\troleIds := strings.Split(employee.RoleId, \",\")\n\n\tfor _, roleId := range roleIds {\n\t\tif roleId == \"1\" || roleId == \"3\" {\n\t\t\tdataType = \"all\"\n\t\t\tbreak\n\t\t} else if roleId == \"2\" {\n\t\t\tdataType = \"center\"\n\t\t\tbreak\n\t\t} else{\n\t\t\tdataType = \"self\"\n\t\t}\n\t}\n\n\tcid := r.FormValue(\"cid-eq\")\n\tname := r.FormValue(\"name-like\")\n\tyear := r.FormValue(\"year-eq\")\n\tmonth := r.FormValue(\"month-eq\")\n\tweek := r.FormValue(\"week-eq\")\n\tstartTime := r.FormValue(\"start_time-eq\")\n\n\tst := \"\"\n\tet := \"\"\n\tflag := true\n\n\tif startTime != \"\" {\n\t\tst = startTime + \" 00:00:00\"\n\t\tet = startTime + \" 23:59:59\"\n\t} else {\n\t\tif week != \"\" && month != \"\" && year != \"\" {\n\t\t\tst, et, flag = lessgo.FindRangeTimeDim(\"\", \"\", year+month+week)\n\t\t} else if month != \"\" && year != \"\" {\n\t\t\tst, et, flag = lessgo.FindRangeTimeDim(\"\", year+month, \"\")\n\t\t} else if year != \"\" {\n\t\t\tst, et, flag = lessgo.FindRangeTimeDim(year, \"\", \"\")\n\t\t}\n\t}\n\n\tparams := []interface{}{}\n\n\tsql := \"\"\n\tcountSql := \"\"\n\n\tif dataType == \"all\" {\n\n\t\tsql += \"select c.name,c.cid,e.user_id,e.really_name,phone_count.num a,rank.rowNo b,phone_count.num c,phone_count.num d,phone_count.localphone from (select count(*) num,localphone,cid from audio where remotephone!='' and remotephone is not null \"\n\n\t\tif cid != \"\" {\n\t\t\tsql += \" and cid=? \"\n\t\t\tparams = append(params, cid)\n\t\t}\n\n\t\tif flag {\n\t\t\tif st != \"\" && et != \"\" {\n\t\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\t\tparams = append(params, st)\n\t\t\t\tparams = append(params, et)\n\t\t\t}\n\t\t} else { \/\/找不到相应的时间区间\n\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\tparams = append(params, \"2000-01-01 00:00:00\")\n\t\t\tparams = append(params, \"2000-01-01 00:00:01\")\n\t\t}\n\n\t\tsql += \" group by localphone) phone_count left join center c on c.cid=phone_count.cid left join employee e on e.phone_in_center=phone_count.localphone left join (select a.*,(@rowNum:=@rowNum+1) as rowNo from (select count(*) num,localphone from audio where remotephone!='' and remotephone is not null \"\n\n\t\tif flag {\n\t\t\tif st != \"\" && et != \"\" {\n\t\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\t\tparams = append(params, st)\n\t\t\t\tparams = append(params, et)\n\t\t\t}\n\t\t} else { \/\/找不到相应的时间区间\n\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\tparams = append(params, \"2000-01-01 00:00:00\")\n\t\t\tparams = append(params, \"2000-01-01 00:00:01\")\n\t\t}\n\n\t\tsql += \" group by localphone order by num desc) a,(Select (@rowNum :=0) ) b)rank on rank.localphone=phone_count.localphone \"\n\n\t\tif name != \"\" {\n\t\t\tsql += \" where e.really_name like ? \"\n\t\t\tparams = append(params, \"%\"+name+\"%\")\n\t\t}\n\n\t\tsql += \" order by rank.rowNo \"\n\n\t} else if dataType == \"center\" {\n\n\t\tsql += \"select c.name,c.cid,e.user_id,e.really_name,phone_count.num a,rank.rowNo b,phone_count.num c,phone_count.num d,phone_count.localphone from (select count(*) num,localphone,cid from audio where cid=? and remotephone!='' and remotephone is not null \"\n\n\t\tuserId, _ := strconv.Atoi(employee.UserId)\n\t\t_employee, err := FindEmployeeById(userId)\n\t\tif err != nil {\n\t\t\tm[\"success\"] = false\n\t\t\tm[\"code\"] = 100\n\t\t\tm[\"msg\"] = \"出现错误,请联系IT部门,错误信息:\" + err.Error()\n\t\t\tcommonlib.OutputJson(w, m, \" \")\n\t\t\treturn\n\t\t}\n\t\tparams = append(params, _employee.CenterId)\n\n\t\tif flag {\n\t\t\tif st != \"\" && et != \"\" {\n\t\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\t\tparams = append(params, st)\n\t\t\t\tparams = append(params, et)\n\t\t\t}\n\t\t} else { \/\/找不到相应的时间区间\n\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\tparams = append(params, \"2000-01-01 00:00:00\")\n\t\t\tparams = append(params, \"2000-01-01 00:00:01\")\n\t\t}\n\n\t\tsql += \" group by localphone) phone_count left join center c on c.cid=phone_count.cid left join employee e on e.phone_in_center=phone_count.localphone left join (select a.*,(@rowNum:=@rowNum+1) as rowNo from (select count(*) num,localphone from audio where remotephone!='' and remotephone is not null \"\n\n\t\tif flag {\n\t\t\tif st != \"\" && et != \"\" {\n\t\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\t\tparams = append(params, st)\n\t\t\t\tparams = append(params, et)\n\t\t\t}\n\t\t} else { \/\/找不到相应的时间区间\n\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\tparams = append(params, \"2000-01-01 00:00:00\")\n\t\t\tparams = append(params, \"2000-01-01 00:00:01\")\n\t\t}\n\n\t\tsql += \" group by localphone order by num desc) a,(Select (@rowNum :=0) ) b)rank on rank.localphone=phone_count.localphone \"\n\n\t\tif name != \"\" {\n\t\t\tsql += \" where e.really_name like ? \"\n\t\t\tparams = append(params, \"%\"+name+\"%\")\n\t\t}\n\n\t\tsql += \" order by rank.rowNo \"\n\n\t} else if dataType == \"self\" {\n\n\t\tsql += \"select c.name,c.cid,e.user_id,e.really_name,phone_count.num a,rank.rowNo b,phone_count.num c,phone_count.num d,phone_count.localphone from (select count(*) num,localphone,cid from audio where remotephone!='' and remotephone is not null \"\n\n\t\tif flag {\n\t\t\tif st != \"\" && et != \"\" {\n\t\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\t\tparams = append(params, st)\n\t\t\t\tparams = append(params, et)\n\t\t\t}\n\t\t} else { \/\/找不到相应的时间区间\n\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\tparams = append(params, \"2000-01-01 00:00:00\")\n\t\t\tparams = append(params, \"2000-01-01 00:00:01\")\n\t\t}\n\n\t\tsql += \" group by localphone) phone_count left join center c on c.cid=phone_count.cid left join employee e on e.phone_in_center=phone_count.localphone left join (select a.*,(@rowNum:=@rowNum+1) as rowNo from (select count(*) num,localphone from audio where remotephone!='' and remotephone is not null \"\n\n\t\tif flag {\n\t\t\tif st != \"\" && et != \"\" {\n\t\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\t\tparams = append(params, st)\n\t\t\t\tparams = append(params, et)\n\t\t\t}\n\t\t} else { \/\/找不到相应的时间区间\n\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\tparams = append(params, \"2000-01-01 00:00:00\")\n\t\t\tparams = append(params, \"2000-01-01 00:00:01\")\n\t\t}\n\n\t\tsql += \" group by localphone order by num desc) a,(Select (@rowNum :=0) ) b)rank on rank.localphone=phone_count.localphone \"\n\n\t\tsql += \" where e.user_id=? \"\n\n\t\tparams = append(params, employee.UserId)\n\n\t\tsql += \" order by rank.rowNo \"\n\t}\n\n\tcountSql = \"select count(1) from (\" + sql + \") num\"\n\n\tlessgo.Log.Debug(countSql)\n\n\tdb := lessgo.GetMySQL()\n\tdefer db.Close()\n\n\trows, err := db.Query(countSql, params...)\n\n\tif err != nil {\n\t\tlessgo.Log.Warn(err.Error())\n\t\tm[\"success\"] = false\n\t\tm[\"code\"] = 100\n\t\tm[\"msg\"] = \"系统发生错误,请联系IT部门\"\n\t\tcommonlib.OutputJson(w, m, \" \")\n\t\treturn\n\t}\n\n\ttotalNum := 0\n\n\tif rows.Next() {\n\t\terr := rows.Scan(&totalNum)\n\n\t\tif err != nil {\n\t\t\tlessgo.Log.Warn(err.Error())\n\t\t\tm[\"success\"] = false\n\t\t\tm[\"code\"] = 100\n\t\t\tm[\"msg\"] = \"系统发生错误,请联系IT部门\"\n\t\t\tcommonlib.OutputJson(w, m, \" \")\n\t\t\treturn\n\t\t}\n\t}\n\n\ttotalPage := int(math.Ceil(float64(totalNum) \/ float64(pageSize)))\n\n\tcurrPageNo := pageNo\n\n\tif currPageNo > totalPage {\n\t\tcurrPageNo = totalPage\n\t}\n\n\tlessgo.Log.Debug(sql + \" limit ?,?\")\n\n\tparams = append(params, (currPageNo-1)*pageSize)\n\tparams = append(params, pageSize)\n\n\trows, err = db.Query(sql+\" limit ?,?\", params...)\n\n\tif err != nil {\n\t\tlessgo.Log.Warn(err.Error())\n\t\tm[\"success\"] = false\n\t\tm[\"code\"] = 100\n\t\tm[\"msg\"] = \"系统发生错误,请联系IT部门\"\n\t\tcommonlib.OutputJson(w, m, \" \")\n\t\treturn\n\t}\n\n\tobjects := []interface{}{}\n\n\tfor rows.Next() {\n\n\t\tmodel := new(lessgo.Model)\n\t\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\t\tmodel.Id = r.Intn(1000)\n\t\tmodel.Props = []*lessgo.Prop{}\n\n\t\tfillObjects := []interface{}{}\n\n\t\tfor i := 0; i < 9; i++ {\n\t\t\tprop := new(lessgo.Prop)\n\t\t\tprop.Name = fmt.Sprint(i)\n\t\t\tprop.Value = \"\"\n\t\t\tfillObjects = append(fillObjects, &prop.Value)\n\t\t\tmodel.Props = append(model.Props, prop)\n\t\t}\n\n\t\terr = commonlib.PutRecord(rows, fillObjects...)\n\n\t\tif err != nil {\n\t\t\tlessgo.Log.Warn(err.Error())\n\t\t\tm[\"success\"] = false\n\t\t\tm[\"code\"] = 100\n\t\t\tm[\"msg\"] = \"系统发生错误,请联系IT部门\"\n\t\t\tcommonlib.OutputJson(w, m, \" \")\n\t\t\treturn\n\t\t}\n\n\t\tobjects = append(objects, model)\n\t}\n\n\tpageData := commonlib.BulidTraditionPage(currPageNo, pageSize, totalNum, objects)\n\n\tm[\"PageData\"] = pageData\n\tm[\"DataLength\"] = len(pageData.Datas) - 1\n\tif len(pageData.Datas) > 0 {\n\t\tm[\"FieldLength\"] = len(pageData.Datas[0].(*lessgo.Model).Props) - 1\n\t}\n\n\tcommonlib.RenderTemplate(w, r, \"entity_page.json\", m, template.FuncMap{\"getPropValue\": lessgo.GetPropValue, \"compareInt\": lessgo.CompareInt, \"dealJsonString\": lessgo.DealJsonString}, \"..\/lessgo\/template\/entity_page.json\")\n\n}\n<commit_msg>修改通话模块权限<commit_after>\/\/ Title:顾问通话总览列表\n\/\/\n\/\/ Description:\n\/\/\n\/\/ Author:black\n\/\/\n\/\/ Createtime:2013-09-26 15:50\n\/\/\n\/\/ Version:1.0\n\/\/\n\/\/ 修改历史:版本号 修改日期 修改人 修改说明\n\/\/\n\/\/ 1.0 2013-09-26 15:50 black 创建文档\npackage server\n\nimport (\n\t\"fmt\"\n\t\"github.com\/hjqhezgh\/commonlib\"\n\t\"github.com\/hjqhezgh\/lessgo\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n)\n\n\/\/顾问分页数据服务\nfunc ConsultantPhoneListAction(w http.ResponseWriter, r *http.Request) {\n\n\tm := make(map[string]interface{})\n\n\temployee := lessgo.GetCurrentEmployee(r)\n\n\tif employee.UserId == \"\" {\n\t\tlessgo.Log.Warn(\"用户未登陆\")\n\t\tm[\"success\"] = false\n\t\tm[\"code\"] = 100\n\t\tm[\"msg\"] = \"用户未登陆\"\n\t\tcommonlib.OutputJson(w, m, \" \")\n\t\treturn\n\t}\n\n\terr := r.ParseForm()\n\n\tif err != nil {\n\t\tm[\"success\"] = false\n\t\tm[\"code\"] = 100\n\t\tm[\"msg\"] = \"出现错误,请联系IT部门,错误信息:\" + err.Error()\n\t\tcommonlib.OutputJson(w, m, \" \")\n\t\treturn\n\t}\n\n\tpageNoString := r.FormValue(\"page\")\n\tpageNo := 1\n\tif pageNoString != \"\" {\n\t\tpageNo, err = strconv.Atoi(pageNoString)\n\t\tif err != nil {\n\t\t\tpageNo = 1\n\t\t\tlessgo.Log.Warn(\"错误的pageNo:\", pageNo)\n\t\t}\n\t}\n\n\tpageSizeString := r.FormValue(\"rows\")\n\tpageSize := 10\n\tif pageSizeString != \"\" {\n\t\tpageSize, err = strconv.Atoi(pageSizeString)\n\t\tif err != nil {\n\t\t\tlessgo.Log.Warn(\"错误的pageSize:\", pageSize)\n\t\t}\n\t}\n\n\tdataType := \"\"\n\n\troleIds := strings.Split(employee.RoleId, \",\")\n\n\tfor _, roleId := range roleIds {\n\t\tif roleId == \"1\" || roleId == \"3\" || roleId == \"6\" || roleId==\"10\"{\n\t\t\tdataType = \"all\"\n\t\t\tbreak\n\t\t} else if roleId == \"2\" {\n\t\t\tdataType = \"center\"\n\t\t\tbreak\n\t\t} else{\n\t\t\tdataType = \"self\"\n\t\t}\n\t}\n\n\tcid := r.FormValue(\"cid-eq\")\n\tname := r.FormValue(\"name-like\")\n\tyear := r.FormValue(\"year-eq\")\n\tmonth := r.FormValue(\"month-eq\")\n\tweek := r.FormValue(\"week-eq\")\n\tstartTime := r.FormValue(\"start_time-eq\")\n\n\tst := \"\"\n\tet := \"\"\n\tflag := true\n\n\tif startTime != \"\" {\n\t\tst = startTime + \" 00:00:00\"\n\t\tet = startTime + \" 23:59:59\"\n\t} else {\n\t\tif week != \"\" && month != \"\" && year != \"\" {\n\t\t\tst, et, flag = lessgo.FindRangeTimeDim(\"\", \"\", year+month+week)\n\t\t} else if month != \"\" && year != \"\" {\n\t\t\tst, et, flag = lessgo.FindRangeTimeDim(\"\", year+month, \"\")\n\t\t} else if year != \"\" {\n\t\t\tst, et, flag = lessgo.FindRangeTimeDim(year, \"\", \"\")\n\t\t}\n\t}\n\n\tparams := []interface{}{}\n\n\tsql := \"\"\n\tcountSql := \"\"\n\n\tif dataType == \"all\" {\n\n\t\tsql += \"select c.name,c.cid,e.user_id,e.really_name,phone_count.num a,rank.rowNo b,phone_count.num c,phone_count.num d,phone_count.localphone from (select count(*) num,localphone,cid from audio where remotephone!='' and remotephone is not null \"\n\n\t\tif cid != \"\" {\n\t\t\tsql += \" and cid=? \"\n\t\t\tparams = append(params, cid)\n\t\t}\n\n\t\tif flag {\n\t\t\tif st != \"\" && et != \"\" {\n\t\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\t\tparams = append(params, st)\n\t\t\t\tparams = append(params, et)\n\t\t\t}\n\t\t} else { \/\/找不到相应的时间区间\n\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\tparams = append(params, \"2000-01-01 00:00:00\")\n\t\t\tparams = append(params, \"2000-01-01 00:00:01\")\n\t\t}\n\n\t\tsql += \" group by localphone) phone_count left join center c on c.cid=phone_count.cid left join employee e on e.phone_in_center=phone_count.localphone left join (select a.*,(@rowNum:=@rowNum+1) as rowNo from (select count(*) num,localphone from audio where remotephone!='' and remotephone is not null \"\n\n\t\tif flag {\n\t\t\tif st != \"\" && et != \"\" {\n\t\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\t\tparams = append(params, st)\n\t\t\t\tparams = append(params, et)\n\t\t\t}\n\t\t} else { \/\/找不到相应的时间区间\n\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\tparams = append(params, \"2000-01-01 00:00:00\")\n\t\t\tparams = append(params, \"2000-01-01 00:00:01\")\n\t\t}\n\n\t\tsql += \" group by localphone order by num desc) a,(Select (@rowNum :=0) ) b)rank on rank.localphone=phone_count.localphone \"\n\n\t\tif name != \"\" {\n\t\t\tsql += \" where e.really_name like ? \"\n\t\t\tparams = append(params, \"%\"+name+\"%\")\n\t\t}\n\n\t\tsql += \" order by rank.rowNo \"\n\n\t} else if dataType == \"center\" {\n\n\t\tsql += \"select c.name,c.cid,e.user_id,e.really_name,phone_count.num a,rank.rowNo b,phone_count.num c,phone_count.num d,phone_count.localphone from (select count(*) num,localphone,cid from audio where cid=? and remotephone!='' and remotephone is not null \"\n\n\t\tuserId, _ := strconv.Atoi(employee.UserId)\n\t\t_employee, err := FindEmployeeById(userId)\n\t\tif err != nil {\n\t\t\tm[\"success\"] = false\n\t\t\tm[\"code\"] = 100\n\t\t\tm[\"msg\"] = \"出现错误,请联系IT部门,错误信息:\" + err.Error()\n\t\t\tcommonlib.OutputJson(w, m, \" \")\n\t\t\treturn\n\t\t}\n\t\tparams = append(params, _employee.CenterId)\n\n\t\tif flag {\n\t\t\tif st != \"\" && et != \"\" {\n\t\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\t\tparams = append(params, st)\n\t\t\t\tparams = append(params, et)\n\t\t\t}\n\t\t} else { \/\/找不到相应的时间区间\n\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\tparams = append(params, \"2000-01-01 00:00:00\")\n\t\t\tparams = append(params, \"2000-01-01 00:00:01\")\n\t\t}\n\n\t\tsql += \" group by localphone) phone_count left join center c on c.cid=phone_count.cid left join employee e on e.phone_in_center=phone_count.localphone left join (select a.*,(@rowNum:=@rowNum+1) as rowNo from (select count(*) num,localphone from audio where remotephone!='' and remotephone is not null \"\n\n\t\tif flag {\n\t\t\tif st != \"\" && et != \"\" {\n\t\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\t\tparams = append(params, st)\n\t\t\t\tparams = append(params, et)\n\t\t\t}\n\t\t} else { \/\/找不到相应的时间区间\n\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\tparams = append(params, \"2000-01-01 00:00:00\")\n\t\t\tparams = append(params, \"2000-01-01 00:00:01\")\n\t\t}\n\n\t\tsql += \" group by localphone order by num desc) a,(Select (@rowNum :=0) ) b)rank on rank.localphone=phone_count.localphone \"\n\n\t\tif name != \"\" {\n\t\t\tsql += \" where e.really_name like ? \"\n\t\t\tparams = append(params, \"%\"+name+\"%\")\n\t\t}\n\n\t\tsql += \" order by rank.rowNo \"\n\n\t} else if dataType == \"self\" {\n\n\t\tsql += \"select c.name,c.cid,e.user_id,e.really_name,phone_count.num a,rank.rowNo b,phone_count.num c,phone_count.num d,phone_count.localphone from (select count(*) num,localphone,cid from audio where remotephone!='' and remotephone is not null \"\n\n\t\tif flag {\n\t\t\tif st != \"\" && et != \"\" {\n\t\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\t\tparams = append(params, st)\n\t\t\t\tparams = append(params, et)\n\t\t\t}\n\t\t} else { \/\/找不到相应的时间区间\n\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\tparams = append(params, \"2000-01-01 00:00:00\")\n\t\t\tparams = append(params, \"2000-01-01 00:00:01\")\n\t\t}\n\n\t\tsql += \" group by localphone) phone_count left join center c on c.cid=phone_count.cid left join employee e on e.phone_in_center=phone_count.localphone left join (select a.*,(@rowNum:=@rowNum+1) as rowNo from (select count(*) num,localphone from audio where remotephone!='' and remotephone is not null \"\n\n\t\tif flag {\n\t\t\tif st != \"\" && et != \"\" {\n\t\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\t\tparams = append(params, st)\n\t\t\t\tparams = append(params, et)\n\t\t\t}\n\t\t} else { \/\/找不到相应的时间区间\n\t\t\tsql += \" and start_time >= ? and start_time<= ?\"\n\t\t\tparams = append(params, \"2000-01-01 00:00:00\")\n\t\t\tparams = append(params, \"2000-01-01 00:00:01\")\n\t\t}\n\n\t\tsql += \" group by localphone order by num desc) a,(Select (@rowNum :=0) ) b)rank on rank.localphone=phone_count.localphone \"\n\n\t\tsql += \" where e.user_id=? \"\n\n\t\tparams = append(params, employee.UserId)\n\n\t\tsql += \" order by rank.rowNo \"\n\t}\n\n\tcountSql = \"select count(1) from (\" + sql + \") num\"\n\n\tlessgo.Log.Debug(countSql)\n\n\tdb := lessgo.GetMySQL()\n\tdefer db.Close()\n\n\trows, err := db.Query(countSql, params...)\n\n\tif err != nil {\n\t\tlessgo.Log.Warn(err.Error())\n\t\tm[\"success\"] = false\n\t\tm[\"code\"] = 100\n\t\tm[\"msg\"] = \"系统发生错误,请联系IT部门\"\n\t\tcommonlib.OutputJson(w, m, \" \")\n\t\treturn\n\t}\n\n\ttotalNum := 0\n\n\tif rows.Next() {\n\t\terr := rows.Scan(&totalNum)\n\n\t\tif err != nil {\n\t\t\tlessgo.Log.Warn(err.Error())\n\t\t\tm[\"success\"] = false\n\t\t\tm[\"code\"] = 100\n\t\t\tm[\"msg\"] = \"系统发生错误,请联系IT部门\"\n\t\t\tcommonlib.OutputJson(w, m, \" \")\n\t\t\treturn\n\t\t}\n\t}\n\n\ttotalPage := int(math.Ceil(float64(totalNum) \/ float64(pageSize)))\n\n\tcurrPageNo := pageNo\n\n\tif currPageNo > totalPage {\n\t\tcurrPageNo = totalPage\n\t}\n\n\tlessgo.Log.Debug(sql + \" limit ?,?\")\n\n\tparams = append(params, (currPageNo-1)*pageSize)\n\tparams = append(params, pageSize)\n\n\trows, err = db.Query(sql+\" limit ?,?\", params...)\n\n\tif err != nil {\n\t\tlessgo.Log.Warn(err.Error())\n\t\tm[\"success\"] = false\n\t\tm[\"code\"] = 100\n\t\tm[\"msg\"] = \"系统发生错误,请联系IT部门\"\n\t\tcommonlib.OutputJson(w, m, \" \")\n\t\treturn\n\t}\n\n\tobjects := []interface{}{}\n\n\tfor rows.Next() {\n\n\t\tmodel := new(lessgo.Model)\n\t\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\t\tmodel.Id = r.Intn(1000)\n\t\tmodel.Props = []*lessgo.Prop{}\n\n\t\tfillObjects := []interface{}{}\n\n\t\tfor i := 0; i < 9; i++ {\n\t\t\tprop := new(lessgo.Prop)\n\t\t\tprop.Name = fmt.Sprint(i)\n\t\t\tprop.Value = \"\"\n\t\t\tfillObjects = append(fillObjects, &prop.Value)\n\t\t\tmodel.Props = append(model.Props, prop)\n\t\t}\n\n\t\terr = commonlib.PutRecord(rows, fillObjects...)\n\n\t\tif err != nil {\n\t\t\tlessgo.Log.Warn(err.Error())\n\t\t\tm[\"success\"] = false\n\t\t\tm[\"code\"] = 100\n\t\t\tm[\"msg\"] = \"系统发生错误,请联系IT部门\"\n\t\t\tcommonlib.OutputJson(w, m, \" \")\n\t\t\treturn\n\t\t}\n\n\t\tobjects = append(objects, model)\n\t}\n\n\tpageData := commonlib.BulidTraditionPage(currPageNo, pageSize, totalNum, objects)\n\n\tm[\"PageData\"] = pageData\n\tm[\"DataLength\"] = len(pageData.Datas) - 1\n\tif len(pageData.Datas) > 0 {\n\t\tm[\"FieldLength\"] = len(pageData.Datas[0].(*lessgo.Model).Props) - 1\n\t}\n\n\tcommonlib.RenderTemplate(w, r, \"entity_page.json\", m, template.FuncMap{\"getPropValue\": lessgo.GetPropValue, \"compareInt\": lessgo.CompareInt, \"dealJsonString\": lessgo.DealJsonString}, \"..\/lessgo\/template\/entity_page.json\")\n\n}\n<|endoftext|>"} {"text":"<commit_before>package helm\n\nimport (\n\t\"github.com\/keel-hq\/keel\/types\"\n\t\"github.com\/keel-hq\/keel\/util\/image\"\n\t\"github.com\/keel-hq\/keel\/util\/version\"\n\n\thapi_chart \"k8s.io\/helm\/pkg\/proto\/hapi\/chart\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nfunc checkVersionedRelease(newVersion *types.Version, repo *types.Repository, namespace, name string, chart *hapi_chart.Chart, config *hapi_chart.Config) (plan *UpdatePlan, shouldUpdateRelease bool, err error) {\n\tplan = &UpdatePlan{\n\t\tChart: chart,\n\t\tNamespace: namespace,\n\t\tName: name,\n\t\tValues: make(map[string]string),\n\t}\n\n\teventRepoRef, err := image.Parse(repo.Name)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t\t\"repository_name\": repo.Name,\n\t\t}).Error(\"provider.helm: failed to parse event repository name\")\n\t\treturn\n\t}\n\n\t\/\/ getting configuration\n\tvals, err := values(chart, config)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t}).Error(\"provider.helm: failed to get values.yaml for release\")\n\t\treturn\n\t}\n\n\tkeelCfg, err := getKeelConfig(vals)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t}).Error(\"provider.helm: failed to get keel configuration for release\")\n\t\t\/\/ ignoring this release, no keel config found\n\t\treturn plan, false, nil\n\t}\n\t\/\/ checking for impacted images\n\tfor _, imageDetails := range keelCfg.Images {\n\n\t\timageRef, err := parseImage(vals, &imageDetails)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err,\n\t\t\t\t\"repository_name\": imageDetails.RepositoryPath,\n\t\t\t\t\"repository_tag\": imageDetails.TagPath,\n\t\t\t}).Error(\"provider.helm: failed to parse image\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif imageRef.Repository() != eventRepoRef.Repository() {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"parsed_image_name\": imageRef.Remote(),\n\t\t\t\t\"target_image_name\": repo.Name,\n\t\t\t}).Info(\"provider.helm: images do not match, ignoring\")\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ checking policy and whether we should update\n\t\tif keelCfg.Policy == types.PolicyTypeForce || imageRef.Tag() == \"latest\" {\n\t\t\tpath, value := getPlanValues(newVersion, imageRef, &imageDetails)\n\t\t\tplan.Values[path] = value\n\t\t\tplan.NewVersion = newVersion.String()\n\t\t\tplan.CurrentVersion = imageRef.Tag()\n\t\t\tplan.Config = keelCfg\n\t\t\tshouldUpdateRelease = true\n\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"parsed_image\": imageRef.Remote(),\n\t\t\t\t\"target_image\": repo.Name,\n\t\t\t\t\"target_image_tag\": repo.Tag,\n\t\t\t\t\"policy\": keelCfg.Policy,\n\t\t\t}).Info(\"provider.helm: impacted release container found\")\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ checking current\n\t\tcurrentVersion, err := version.GetVersion(imageRef.Tag())\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err,\n\t\t\t\t\"container_image\": imageRef.Repository(),\n\t\t\t\t\"container_image_tag\": imageRef.Tag(),\n\t\t\t\t\"keel_policy\": keelCfg.Policy,\n\t\t\t}).Error(\"provider.helm: failed to get image version, is it tagged as semver?\")\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"name\": name,\n\t\t\t\"namespace\": namespace,\n\t\t\t\"container_image\": imageRef.Repository(),\n\t\t\t\"current_version\": currentVersion.String(),\n\t\t\t\"policy\": keelCfg.Policy,\n\t\t}).Info(\"provider.helm: current image version\")\n\n\t\tshouldUpdate, err := version.ShouldUpdate(currentVersion, newVersion, keelCfg.Policy)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err,\n\t\t\t\t\"new_version\": newVersion.String(),\n\t\t\t\t\"current_version\": currentVersion.String(),\n\t\t\t\t\"keel_policy\": keelCfg.Policy,\n\t\t\t}).Error(\"provider.helm: got error while checking whether deployment should be updated\")\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"name\": name,\n\t\t\t\"namespace\": namespace,\n\t\t\t\"container_image\": imageRef.Repository(),\n\t\t\t\"current_version\": currentVersion.String(),\n\t\t\t\"new_version\": newVersion.String(),\n\t\t\t\"policy\": keelCfg.Policy,\n\t\t\t\"should_update\": shouldUpdate,\n\t\t}).Info(\"provider.helm: checked version, deciding whether to update\")\n\n\t\tif shouldUpdate {\n\t\t\tpath, value := getPlanValues(newVersion, imageRef, &imageDetails)\n\t\t\tplan.Values[path] = value\n\t\t\tplan.NewVersion = newVersion.String()\n\t\t\tplan.CurrentVersion = currentVersion.String()\n\t\t\tplan.Config = keelCfg\n\t\t\tshouldUpdateRelease = true\n\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"container_image\": imageRef.Repository(),\n\t\t\t\t\"target_image\": repo.Name,\n\t\t\t\t\"target_image_tag\": repo.Tag,\n\t\t\t\t\"policy\": keelCfg.Policy,\n\t\t\t}).Info(\"provider.helm: impacted release tags found\")\n\t\t}\n\n\t}\n\treturn plan, shouldUpdateRelease, nil\n}\n<commit_msg>collecting release notes<commit_after>package helm\n\nimport (\n\t\"github.com\/keel-hq\/keel\/types\"\n\t\"github.com\/keel-hq\/keel\/util\/image\"\n\t\"github.com\/keel-hq\/keel\/util\/version\"\n\n\thapi_chart \"k8s.io\/helm\/pkg\/proto\/hapi\/chart\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nfunc checkVersionedRelease(newVersion *types.Version, repo *types.Repository, namespace, name string, chart *hapi_chart.Chart, config *hapi_chart.Config) (plan *UpdatePlan, shouldUpdateRelease bool, err error) {\n\tplan = &UpdatePlan{\n\t\tChart: chart,\n\t\tNamespace: namespace,\n\t\tName: name,\n\t\tValues: make(map[string]string),\n\t}\n\n\teventRepoRef, err := image.Parse(repo.Name)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t\t\"repository_name\": repo.Name,\n\t\t}).Error(\"provider.helm: failed to parse event repository name\")\n\t\treturn\n\t}\n\n\t\/\/ getting configuration\n\tvals, err := values(chart, config)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t}).Error(\"provider.helm: failed to get values.yaml for release\")\n\t\treturn\n\t}\n\n\tkeelCfg, err := getKeelConfig(vals)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"error\": err,\n\t\t}).Error(\"provider.helm: failed to get keel configuration for release\")\n\t\t\/\/ ignoring this release, no keel config found\n\t\treturn plan, false, nil\n\t}\n\t\/\/ checking for impacted images\n\tfor _, imageDetails := range keelCfg.Images {\n\n\t\timageRef, err := parseImage(vals, &imageDetails)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err,\n\t\t\t\t\"repository_name\": imageDetails.RepositoryPath,\n\t\t\t\t\"repository_tag\": imageDetails.TagPath,\n\t\t\t}).Error(\"provider.helm: failed to parse image\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif imageRef.Repository() != eventRepoRef.Repository() {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"parsed_image_name\": imageRef.Remote(),\n\t\t\t\t\"target_image_name\": repo.Name,\n\t\t\t}).Info(\"provider.helm: images do not match, ignoring\")\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ checking policy and whether we should update\n\t\tif keelCfg.Policy == types.PolicyTypeForce || imageRef.Tag() == \"latest\" {\n\t\t\tpath, value := getPlanValues(newVersion, imageRef, &imageDetails)\n\t\t\tplan.Values[path] = value\n\t\t\tplan.NewVersion = newVersion.String()\n\t\t\tplan.CurrentVersion = imageRef.Tag()\n\t\t\tplan.Config = keelCfg\n\t\t\tshouldUpdateRelease = true\n\t\t\tif imageDetails.ReleaseNotes != \"\" {\n\t\t\t\tplan.ReleaseNotes = append(plan.ReleaseNotes, imageDetails.ReleaseNotes)\n\t\t\t}\n\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"parsed_image\": imageRef.Remote(),\n\t\t\t\t\"target_image\": repo.Name,\n\t\t\t\t\"target_image_tag\": repo.Tag,\n\t\t\t\t\"policy\": keelCfg.Policy,\n\t\t\t}).Info(\"provider.helm: impacted release container found\")\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ checking current\n\t\tcurrentVersion, err := version.GetVersion(imageRef.Tag())\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err,\n\t\t\t\t\"container_image\": imageRef.Repository(),\n\t\t\t\t\"container_image_tag\": imageRef.Tag(),\n\t\t\t\t\"keel_policy\": keelCfg.Policy,\n\t\t\t}).Error(\"provider.helm: failed to get image version, is it tagged as semver?\")\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"name\": name,\n\t\t\t\"namespace\": namespace,\n\t\t\t\"container_image\": imageRef.Repository(),\n\t\t\t\"current_version\": currentVersion.String(),\n\t\t\t\"policy\": keelCfg.Policy,\n\t\t}).Info(\"provider.helm: current image version\")\n\n\t\tshouldUpdate, err := version.ShouldUpdate(currentVersion, newVersion, keelCfg.Policy)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"error\": err,\n\t\t\t\t\"new_version\": newVersion.String(),\n\t\t\t\t\"current_version\": currentVersion.String(),\n\t\t\t\t\"keel_policy\": keelCfg.Policy,\n\t\t\t}).Error(\"provider.helm: got error while checking whether deployment should be updated\")\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"name\": name,\n\t\t\t\"namespace\": namespace,\n\t\t\t\"container_image\": imageRef.Repository(),\n\t\t\t\"current_version\": currentVersion.String(),\n\t\t\t\"new_version\": newVersion.String(),\n\t\t\t\"policy\": keelCfg.Policy,\n\t\t\t\"should_update\": shouldUpdate,\n\t\t}).Info(\"provider.helm: checked version, deciding whether to update\")\n\n\t\tif shouldUpdate {\n\t\t\tpath, value := getPlanValues(newVersion, imageRef, &imageDetails)\n\t\t\tplan.Values[path] = value\n\t\t\tplan.NewVersion = newVersion.String()\n\t\t\tplan.CurrentVersion = currentVersion.String()\n\t\t\tplan.Config = keelCfg\n\t\t\tshouldUpdateRelease = true\n\t\t\tif imageDetails.ReleaseNotes != \"\" {\n\t\t\t\tplan.ReleaseNotes = append(plan.ReleaseNotes, imageDetails.ReleaseNotes)\n\t\t\t}\n\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"container_image\": imageRef.Repository(),\n\t\t\t\t\"target_image\": repo.Name,\n\t\t\t\t\"target_image_tag\": repo.Tag,\n\t\t\t\t\"policy\": keelCfg.Policy,\n\t\t\t}).Info(\"provider.helm: impacted release tags found\")\n\t\t}\n\n\t}\n\treturn plan, shouldUpdateRelease, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package sparkpost\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"net\/http\"\n\n\t\"github.com\/jmcvetta\/napping\"\n\tlog \"gopkg.in\/inconshreveable\/log15.v2\"\n\n\t\"github.com\/schmooser\/go-mailer\/message\"\n)\n\n\/\/ Log represents logger.\nvar Log = log.New()\n\n\/\/ SparkPost defines SparkPost transactional mail provider.\ntype SparkPost struct {\n\tkey string\n}\n\nconst apiURL = \"https:\/\/api.sparkpost.com\/api\/v1\/transmissions?num_rcpt_errors=30\"\n\n\/\/ Address is SparkPost' address part.\ntype Address struct {\n\tName string `json:\"name,omitempty\"`\n\tEmail string `json:\"email\"`\n\tHeaderTo string `json:\"header_to,omitempty\"`\n}\n\n\/\/ Recipient defines SparkPost' recipient.\ntype Recipient struct {\n\tAddress Address `json:\"address\"`\n}\n\n\/\/ Content defines SparkPost' content.\ntype Content struct {\n\tFrom Address `json:\"from,omitempty\"`\n\tHeaders map[string]string `json:\"headers,omitempty\"`\n\tSubject string `json:\"subject,omitempty\"`\n\tText string `json:\"text,omitempty\"`\n\tHTML string `json:\"html,omitempty\"`\n}\n\n\/\/ Message defines SparkPost' message.\ntype Message struct {\n\tReturnPath string `json:\"return_path,omitempty\"`\n\tRecipients []Recipient `json:\"recipients,omitempty\"`\n\tContent Content `json:\"content,omitempt\"`\n}\n\n\/\/ New returns new SparkPost instance. API key is validated upon creation,\n\/\/ returning error if key is not valid.\nfunc New(key string) (*SparkPost, error) {\n\treturn &SparkPost{key: key}, nil\n}\n\n\/\/ Send sends provided message in async or sync way.\nfunc (sp *SparkPost) Send(msg *message.Message, async bool) (interface{}, error) {\n\n\tvar recipients []Recipient\n\tfor _, r := range msg.To {\n\t\trecipients = append(recipients, Recipient{\n\t\t\tAddress: Address{\n\t\t\t\tEmail: r.Address,\n\t\t\t\tName: r.Name,\n\t\t\t},\n\t\t})\n\t}\n\tfor _, r := range append(msg.CC, msg.BCC...) {\n\t\trecipients = append(recipients, Recipient{\n\t\t\tAddress: Address{\n\t\t\t\tEmail: r.Address,\n\t\t\t\tHeaderTo: msg.To.String(),\n\t\t\t},\n\t\t})\n\t}\n\n\tm := Message{\n\t\tReturnPath: msg.From.Address,\n\t\tRecipients: recipients,\n\t\tContent: Content{\n\t\t\tFrom: Address{\n\t\t\t\tName: msg.From.Name,\n\t\t\t\tEmail: msg.From.Address,\n\t\t\t},\n\t\t\tSubject: msg.Subject,\n\t\t\tText: msg.Text,\n\t\t\tHTML: msg.HTML,\n\t\t\tHeaders: map[string]string{\"CC\": msg.CC.String()},\n\t\t},\n\t}\n\n\tb, _ := json.Marshal(m)\n\tLog.Debug(\"Message to send\", \"msg\", string(b))\n\n\theaders := make(http.Header)\n\theaders.Add(\"Authorization\", sp.key)\n\ts := napping.Session{\n\t\tHeader: &headers,\n\t}\n\n\tvar result interface{}\n\tresp, err := s.Post(apiURL, &m, &result, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.Status() == 200 {\n\t\treturn result, nil\n\t}\n\n\tLog.Error(\"Response\", \"status\", resp.Status(), \"body\", resp.RawText())\n\treturn nil, errors.New(\"Non-200 status returned\")\n}\n\nfunc init() {\n\tLog.SetHandler(log.DiscardHandler())\n}\n<commit_msg>Fix sparkpost provider - don't generate empty CC header<commit_after>package sparkpost\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"net\/http\"\n\n\t\"github.com\/jmcvetta\/napping\"\n\tlog \"gopkg.in\/inconshreveable\/log15.v2\"\n\n\t\"github.com\/schmooser\/go-mailer\/message\"\n)\n\n\/\/ Log represents logger.\nvar Log = log.New()\n\n\/\/ SparkPost defines SparkPost transactional mail provider.\ntype SparkPost struct {\n\tkey string\n}\n\nconst apiURL = \"https:\/\/api.sparkpost.com\/api\/v1\/transmissions?num_rcpt_errors=30\"\n\n\/\/ Address is SparkPost' address part.\ntype Address struct {\n\tName string `json:\"name,omitempty\"`\n\tEmail string `json:\"email\"`\n\tHeaderTo string `json:\"header_to,omitempty\"`\n}\n\n\/\/ Recipient defines SparkPost' recipient.\ntype Recipient struct {\n\tAddress Address `json:\"address\"`\n}\n\n\/\/ Content defines SparkPost' content.\ntype Content struct {\n\tFrom Address `json:\"from,omitempty\"`\n\tHeaders map[string]string `json:\"headers,omitempty\"`\n\tSubject string `json:\"subject,omitempty\"`\n\tText string `json:\"text,omitempty\"`\n\tHTML string `json:\"html,omitempty\"`\n}\n\n\/\/ Message defines SparkPost' message.\ntype Message struct {\n\tReturnPath string `json:\"return_path,omitempty\"`\n\tRecipients []Recipient `json:\"recipients,omitempty\"`\n\tContent Content `json:\"content,omitempt\"`\n}\n\n\/\/ New returns new SparkPost instance. API key is validated upon creation,\n\/\/ returning error if key is not valid.\nfunc New(key string) (*SparkPost, error) {\n\treturn &SparkPost{key: key}, nil\n}\n\n\/\/ Send sends provided message in async or sync way.\nfunc (sp *SparkPost) Send(msg *message.Message, async bool) (interface{}, error) {\n\n\tvar recipients []Recipient\n\tfor _, r := range msg.To {\n\t\trecipients = append(recipients, Recipient{\n\t\t\tAddress: Address{\n\t\t\t\tEmail: r.Address,\n\t\t\t\tName: r.Name,\n\t\t\t},\n\t\t})\n\t}\n\tfor _, r := range append(msg.CC, msg.BCC...) {\n\t\trecipients = append(recipients, Recipient{\n\t\t\tAddress: Address{\n\t\t\t\tEmail: r.Address,\n\t\t\t\tHeaderTo: msg.To.String(),\n\t\t\t},\n\t\t})\n\t}\n\n\tm := Message{\n\t\tReturnPath: msg.From.Address,\n\t\tRecipients: recipients,\n\t\tContent: Content{\n\t\t\tFrom: Address{\n\t\t\t\tName: msg.From.Name,\n\t\t\t\tEmail: msg.From.Address,\n\t\t\t},\n\t\t\tSubject: msg.Subject,\n\t\t\tText: msg.Text,\n\t\t\tHTML: msg.HTML,\n\t\t\tHeaders: make(map[string]string),\n\t\t},\n\t}\n\n\tif msg.CC.String() != \"\" {\n\t\tm.Content.Headers[\"CC\"] = msg.CC.String()\n\t}\n\n\tb, _ := json.Marshal(m)\n\tLog.Debug(\"Message to send\", \"msg\", string(b))\n\n\theaders := make(http.Header)\n\theaders.Add(\"Authorization\", sp.key)\n\ts := napping.Session{\n\t\tHeader: &headers,\n\t}\n\n\tvar result interface{}\n\tresp, err := s.Post(apiURL, &m, &result, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.Status() == 200 {\n\t\treturn result, nil\n\t}\n\n\tLog.Error(\"Response\", \"status\", resp.Status(), \"body\", resp.RawText())\n\treturn nil, errors.New(\"Non-200 status returned\")\n}\n\nfunc init() {\n\tLog.SetHandler(log.DiscardHandler())\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage interrupts\n\nimport (\n\t\"context\"\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"crypto\/x509\/pkix\"\n\t\"encoding\/pem\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/big\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"sync\"\n\t\"syscall\"\n\t\"testing\"\n\t\"time\"\n)\n\n\/\/ interrupt allows for tests to trigger an interrupt as needed\nvar interrupt = make(chan os.Signal, 1)\n\n\/\/ this init will be executed before that in the code package,\n\/\/ so we can inject our implementation of the interrupt channel\nfunc init() {\n\tsignalsLock.Lock()\n\tgracePeriod = 0 * time.Second\n\tsignals = func() <-chan os.Signal {\n\t\treturn interrupt\n\t}\n\tsignalsLock.Unlock()\n}\n\n\/\/ instead of building a mechanism to reset\/re-initialize the interrupt\n\/\/ manager which would only be used in testing, we write an integration\n\/\/ test that only fires the mock interrupt once\nfunc TestInterrupts(t *testing.T) {\n\t\/\/ we need to lock around values used to test otherwise the test\n\t\/\/ goroutine will race with the workers\n\tlock := sync.Mutex{}\n\n\tctx := Context()\n\tvar ctxDone bool\n\tgo func() {\n\t\t<-ctx.Done()\n\n\t\tlock.Lock()\n\t\tctxDone = true\n\t\tlock.Unlock()\n\t}()\n\n\tvar workDone bool\n\tvar workCancelled bool\n\twork := func(ctx context.Context) {\n\t\tlock.Lock()\n\t\tworkDone = true\n\t\tlock.Unlock()\n\n\t\t<-ctx.Done()\n\n\t\tlock.Lock()\n\t\tworkCancelled = true\n\t\tlock.Unlock()\n\t}\n\tRun(work)\n\n\t\/\/ we cannot use httptest mocks for the tests here as they expect\n\t\/\/ to be started by the httptest package itself, not by a downstream\n\t\/\/ caller like the interrupts library\n\tvar serverCalled bool\n\tvar serverCancelled bool\n\tserver := &http.Server{Addr: \":9999\", Handler: http.HandlerFunc(func(http.ResponseWriter, *http.Request) {\n\t\tlock.Lock()\n\t\tserverCalled = true\n\t\tlock.Unlock()\n\t})}\n\tserver.RegisterOnShutdown(func() {\n\t\tlock.Lock()\n\t\tserverCancelled = true\n\t\tlock.Unlock()\n\t})\n\tListenAndServe(server, 1*time.Microsecond)\n\tif _, err := http.Get(\"http:\/\/127.0.0.1:9999\"); err != nil {\n\t\tt.Errorf(\"could not reach server registered with ListenAndServe(): %v\", err)\n\t}\n\n\tvar tlsServerCalled bool\n\tvar tlsServerCancelled bool\n\ttlsServer := &http.Server{Addr: \"127.0.0.1:9998\", Handler: http.HandlerFunc(func(http.ResponseWriter, *http.Request) {\n\t\tlock.Lock()\n\t\ttlsServerCalled = true\n\t\tlock.Unlock()\n\t})}\n\ttlsServer.RegisterOnShutdown(func() {\n\t\tlock.Lock()\n\t\ttlsServerCancelled = true\n\t\tlock.Unlock()\n\t})\n\tcert, key, err := generateCerts(\"127.0.0.1\")\n\tif err != nil {\n\t\tt.Fatalf(\"could not generate cert and key for TLS server: %v\", err)\n\t}\n\tListenAndServeTLS(tlsServer, cert, key, 1*time.Microsecond)\n\tclient := &http.Client{Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}}\n\tif _, err := client.Get(\"https:\/\/127.0.0.1:9998\"); err != nil {\n\t\tt.Errorf(\"could not reach server registered with ListenAndServeTLS(): %v\", err)\n\t}\n\n\tvar intervalCalls int\n\tinterval := func() time.Duration {\n\t\tlock.Lock()\n\t\tintervalCalls++\n\t\tlock.Unlock()\n\t\tif intervalCalls > 2 {\n\t\t\treturn 10 * time.Hour\n\t\t}\n\t\treturn 1 * time.Nanosecond\n\t}\n\tvar tickCalls int\n\ttick := func() {\n\t\tlock.Lock()\n\t\ttickCalls++\n\t\tlock.Unlock()\n\t}\n\tTick(tick, interval)\n\t\/\/ writing a test that functions correctly here without being susceptible\n\t\/\/ to timing flakes is challenging. Using time.Sleep like this does have\n\t\/\/ that downside, but the sleep time is many orders of magnitude higher\n\t\/\/ than the tick intervals and the amount of time taken to execute the\n\t\/\/ test as well, so it is going to be exceedingly rare that scheduling of\n\t\/\/ the test process will cause a flake here from timing. The test cannot\n\t\/\/ use synchronized approaches to waiting here as we do not know how long\n\t\/\/ we must wait. The test must have enough time to ask for the interval\n\t\/\/ as many times as we expect it to, but if we only wait for that we fail\n\t\/\/ to catch the cases where the interval is requested too many times.\n\ttime.Sleep(100 * time.Millisecond)\n\n\tvar onInterruptCalled bool\n\tOnInterrupt(func() {\n\t\tlock.Lock()\n\t\tonInterruptCalled = true\n\t\tlock.Unlock()\n\t})\n\n\tdone := sync.WaitGroup{}\n\tdone.Add(1)\n\tgo func() {\n\t\tWaitForGracefulShutdown()\n\t\tdone.Done()\n\t}()\n\n\tif onInterruptCalled {\n\t\tt.Error(\"work registered with OnInterrupt() was executed before interrupt\")\n\t}\n\n\t\/\/ trigger the interrupt\n\tinterrupt <- syscall.Signal(1)\n\t\/\/ wait for graceful shutdown to occur\n\tdone.Wait()\n\n\tlock.Lock()\n\tif !ctxDone {\n\t\tt.Error(\"context from Context() was not cancelled on interrupt\")\n\t}\n\tif !workDone {\n\t\tt.Error(\"work registered with Run() was not executed\")\n\t}\n\tif !workCancelled {\n\t\tt.Error(\"work registered with Run() was not cancelled on interrupt\")\n\t}\n\tif !serverCalled {\n\t\tt.Error(\"server registered with ListenAndServe() was not serving\")\n\t}\n\tif !serverCancelled {\n\t\tt.Error(\"server registered with ListenAndServe() was not cancelled on interrupt\")\n\t}\n\tif !tlsServerCalled {\n\t\tt.Error(\"server registered with ListenAndServeTLS() was not serving\")\n\t}\n\tif !tlsServerCancelled {\n\t\tt.Error(\"server registered with ListenAndServeTLS() was not cancelled on interrupt\")\n\t}\n\tif tickCalls != 2 {\n\t\tt.Errorf(\"work registered with Tick() was called %d times, not %d; interval was requested %d times\", tickCalls, 2, intervalCalls)\n\t}\n\tif !onInterruptCalled {\n\t\tt.Error(\"work registered with OnInterrupt() was not executed on interrupt\")\n\t}\n\tlock.Unlock()\n}\n\nfunc generateCerts(url string) (string, string, error) {\n\tpriv, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"failed to generate private key: %v\", err)\n\t}\n\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"failed to generate serial number: %s\", err)\n\t}\n\n\ttemplate := x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"Acme Co\"},\n\t\t},\n\t\tNotBefore: time.Now(),\n\t\tNotAfter: time.Now().Add(1 * time.Hour),\n\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\n\t\tIPAddresses: []net.IP{net.ParseIP(url)},\n\t}\n\n\tderBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"failed to create certificate: %s\", err)\n\t}\n\n\tcertOut, err := ioutil.TempFile(\"\", \"cert.pem\")\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"failed to open cert.pem for writing: %s\", err)\n\t}\n\tif err := pem.Encode(certOut, &pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes}); err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"failed to write data to cert.pem: %s\", err)\n\t}\n\tif err := certOut.Close(); err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"error closing cert.pem: %s\", err)\n\t}\n\n\tkeyOut, err := ioutil.TempFile(\"\", \"key.pem\")\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"failed to open key.pem for writing: %v\", err)\n\t}\n\tprivBytes, err := x509.MarshalPKCS8PrivateKey(priv)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"unable to marshal private key: %v\", err)\n\t}\n\tif err := pem.Encode(keyOut, &pem.Block{Type: \"PRIVATE KEY\", Bytes: privBytes}); err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"failed to write data to key.pem: %s\", err)\n\t}\n\tif err := keyOut.Close(); err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"error closing key.pem: %s\", err)\n\t}\n\tif err := os.Chmod(keyOut.Name(), 0600); err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"could not change permissions on key.pem: %v\", err)\n\t}\n\treturn certOut.Name(), keyOut.Name(), nil\n}\n<commit_msg>Update interrupts test servers to grab random ports<commit_after>\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage interrupts\n\nimport (\n\t\"context\"\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"crypto\/x509\/pkix\"\n\t\"encoding\/pem\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/big\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"sync\"\n\t\"syscall\"\n\t\"testing\"\n\t\"time\"\n)\n\n\/\/ interrupt allows for tests to trigger an interrupt as needed\nvar interrupt = make(chan os.Signal, 1)\n\n\/\/ this init will be executed before that in the code package,\n\/\/ so we can inject our implementation of the interrupt channel\nfunc init() {\n\tsignalsLock.Lock()\n\tgracePeriod = 0 * time.Second\n\tsignals = func() <-chan os.Signal {\n\t\treturn interrupt\n\t}\n\tsignalsLock.Unlock()\n}\n\n\/\/ instead of building a mechanism to reset\/re-initialize the interrupt\n\/\/ manager which would only be used in testing, we write an integration\n\/\/ test that only fires the mock interrupt once\nfunc TestInterrupts(t *testing.T) {\n\t\/\/ we need to lock around values used to test otherwise the test\n\t\/\/ goroutine will race with the workers\n\tlock := sync.Mutex{}\n\n\tctx := Context()\n\tvar ctxDone bool\n\tgo func() {\n\t\t<-ctx.Done()\n\n\t\tlock.Lock()\n\t\tctxDone = true\n\t\tlock.Unlock()\n\t}()\n\n\tvar workDone bool\n\tvar workCancelled bool\n\twork := func(ctx context.Context) {\n\t\tlock.Lock()\n\t\tworkDone = true\n\t\tlock.Unlock()\n\n\t\t<-ctx.Done()\n\n\t\tlock.Lock()\n\t\tworkCancelled = true\n\t\tlock.Unlock()\n\t}\n\tRun(work)\n\n\t\/\/ we cannot use httptest mocks for the tests here as they expect\n\t\/\/ to be started by the httptest package itself, not by a downstream\n\t\/\/ caller like the interrupts library\n\tvar serverCalled bool\n\tvar serverCancelled bool\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:\")\n\tif err != nil {\n\t\tt.Fatalf(\"could not listen on random port: %v\", err)\n\t}\n\tif err := listener.Close(); err != nil {\n\t\tt.Fatalf(\"could close listener: %v\", err)\n\t}\n\tserver := &http.Server{Addr: listener.Addr().String(), Handler: http.HandlerFunc(func(http.ResponseWriter, *http.Request) {\n\t\tlock.Lock()\n\t\tserverCalled = true\n\t\tlock.Unlock()\n\t})}\n\tserver.RegisterOnShutdown(func() {\n\t\tlock.Lock()\n\t\tserverCancelled = true\n\t\tlock.Unlock()\n\t})\n\tListenAndServe(server, 1*time.Microsecond)\n\tif _, err := http.Get(\"http:\/\/\" + listener.Addr().String()); err != nil {\n\t\tt.Errorf(\"could not reach server registered with ListenAndServe(): %v\", err)\n\t}\n\n\tvar tlsServerCalled bool\n\tvar tlsServerCancelled bool\n\ttlsListener, err := net.Listen(\"tcp\", \"127.0.0.1:\")\n\tif err != nil {\n\t\tt.Fatalf(\"could not listen on random port: %v\", err)\n\t}\n\tif err := tlsListener.Close(); err != nil {\n\t\tt.Fatalf(\"could close listener: %v\", err)\n\t}\n\ttlsServer := &http.Server{Addr: tlsListener.Addr().String(), Handler: http.HandlerFunc(func(http.ResponseWriter, *http.Request) {\n\t\tlock.Lock()\n\t\ttlsServerCalled = true\n\t\tlock.Unlock()\n\t})}\n\ttlsServer.RegisterOnShutdown(func() {\n\t\tlock.Lock()\n\t\ttlsServerCancelled = true\n\t\tlock.Unlock()\n\t})\n\tcert, key, err := generateCerts(\"127.0.0.1\")\n\tif err != nil {\n\t\tt.Fatalf(\"could not generate cert and key for TLS server: %v\", err)\n\t}\n\tListenAndServeTLS(tlsServer, cert, key, 1*time.Microsecond)\n\tclient := &http.Client{Transport: &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}}}\n\tif _, err := client.Get(\"https:\/\/\" + tlsListener.Addr().String()); err != nil {\n\t\tt.Errorf(\"could not reach server registered with ListenAndServeTLS(): %v\", err)\n\t}\n\n\tvar intervalCalls int\n\tinterval := func() time.Duration {\n\t\tlock.Lock()\n\t\tintervalCalls++\n\t\tlock.Unlock()\n\t\tif intervalCalls > 2 {\n\t\t\treturn 10 * time.Hour\n\t\t}\n\t\treturn 1 * time.Nanosecond\n\t}\n\tvar tickCalls int\n\ttick := func() {\n\t\tlock.Lock()\n\t\ttickCalls++\n\t\tlock.Unlock()\n\t}\n\tTick(tick, interval)\n\t\/\/ writing a test that functions correctly here without being susceptible\n\t\/\/ to timing flakes is challenging. Using time.Sleep like this does have\n\t\/\/ that downside, but the sleep time is many orders of magnitude higher\n\t\/\/ than the tick intervals and the amount of time taken to execute the\n\t\/\/ test as well, so it is going to be exceedingly rare that scheduling of\n\t\/\/ the test process will cause a flake here from timing. The test cannot\n\t\/\/ use synchronized approaches to waiting here as we do not know how long\n\t\/\/ we must wait. The test must have enough time to ask for the interval\n\t\/\/ as many times as we expect it to, but if we only wait for that we fail\n\t\/\/ to catch the cases where the interval is requested too many times.\n\ttime.Sleep(100 * time.Millisecond)\n\n\tvar onInterruptCalled bool\n\tOnInterrupt(func() {\n\t\tlock.Lock()\n\t\tonInterruptCalled = true\n\t\tlock.Unlock()\n\t})\n\n\tdone := sync.WaitGroup{}\n\tdone.Add(1)\n\tgo func() {\n\t\tWaitForGracefulShutdown()\n\t\tdone.Done()\n\t}()\n\n\tif onInterruptCalled {\n\t\tt.Error(\"work registered with OnInterrupt() was executed before interrupt\")\n\t}\n\n\t\/\/ trigger the interrupt\n\tinterrupt <- syscall.Signal(1)\n\t\/\/ wait for graceful shutdown to occur\n\tdone.Wait()\n\n\tlock.Lock()\n\tif !ctxDone {\n\t\tt.Error(\"context from Context() was not cancelled on interrupt\")\n\t}\n\tif !workDone {\n\t\tt.Error(\"work registered with Run() was not executed\")\n\t}\n\tif !workCancelled {\n\t\tt.Error(\"work registered with Run() was not cancelled on interrupt\")\n\t}\n\tif !serverCalled {\n\t\tt.Error(\"server registered with ListenAndServe() was not serving\")\n\t}\n\tif !serverCancelled {\n\t\tt.Error(\"server registered with ListenAndServe() was not cancelled on interrupt\")\n\t}\n\tif !tlsServerCalled {\n\t\tt.Error(\"server registered with ListenAndServeTLS() was not serving\")\n\t}\n\tif !tlsServerCancelled {\n\t\tt.Error(\"server registered with ListenAndServeTLS() was not cancelled on interrupt\")\n\t}\n\tif tickCalls != 2 {\n\t\tt.Errorf(\"work registered with Tick() was called %d times, not %d; interval was requested %d times\", tickCalls, 2, intervalCalls)\n\t}\n\tif !onInterruptCalled {\n\t\tt.Error(\"work registered with OnInterrupt() was not executed on interrupt\")\n\t}\n\tlock.Unlock()\n}\n\nfunc generateCerts(url string) (string, string, error) {\n\tpriv, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"failed to generate private key: %v\", err)\n\t}\n\n\tserialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)\n\tserialNumber, err := rand.Int(rand.Reader, serialNumberLimit)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"failed to generate serial number: %s\", err)\n\t}\n\n\ttemplate := x509.Certificate{\n\t\tSerialNumber: serialNumber,\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"Acme Co\"},\n\t\t},\n\t\tNotBefore: time.Now(),\n\t\tNotAfter: time.Now().Add(1 * time.Hour),\n\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,\n\t\tExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\n\t\tIPAddresses: []net.IP{net.ParseIP(url)},\n\t}\n\n\tderBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"failed to create certificate: %s\", err)\n\t}\n\n\tcertOut, err := ioutil.TempFile(\"\", \"cert.pem\")\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"failed to open cert.pem for writing: %s\", err)\n\t}\n\tif err := pem.Encode(certOut, &pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes}); err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"failed to write data to cert.pem: %s\", err)\n\t}\n\tif err := certOut.Close(); err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"error closing cert.pem: %s\", err)\n\t}\n\n\tkeyOut, err := ioutil.TempFile(\"\", \"key.pem\")\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"failed to open key.pem for writing: %v\", err)\n\t}\n\tprivBytes, err := x509.MarshalPKCS8PrivateKey(priv)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"unable to marshal private key: %v\", err)\n\t}\n\tif err := pem.Encode(keyOut, &pem.Block{Type: \"PRIVATE KEY\", Bytes: privBytes}); err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"failed to write data to key.pem: %s\", err)\n\t}\n\tif err := keyOut.Close(); err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"error closing key.pem: %s\", err)\n\t}\n\tif err := os.Chmod(keyOut.Name(), 0600); err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"could not change permissions on key.pem: %v\", err)\n\t}\n\treturn certOut.Name(), keyOut.Name(), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\n\t\"github.com\/Microsoft\/hcsshim\/internal\/oci\"\n\t\"github.com\/Microsoft\/hcsshim\/internal\/uvm\"\n\teventstypes \"github.com\/containerd\/containerd\/api\/events\"\n\t\"github.com\/containerd\/containerd\/errdefs\"\n\t\"github.com\/containerd\/containerd\/runtime\"\n\t\"github.com\/containerd\/containerd\/runtime\/v2\/task\"\n\tspecs \"github.com\/opencontainers\/runtime-spec\/specs-go\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"golang.org\/x\/sync\/errgroup\"\n)\n\n\/\/ shimPod represents the logical grouping of all tasks in a single set of\n\/\/ shared namespaces. The pod sandbox (container) is represented by the task\n\/\/ that matches the `shimPod.ID()`\ntype shimPod interface {\n\t\/\/ ID is the id of the task representing the pause (sandbox) container.\n\tID() string\n\t\/\/ CreateTask creates a workload task within this pod named `tid` with\n\t\/\/ settings `s`.\n\t\/\/\n\t\/\/ If `tid==ID()` or `tid` is the same as any other task in this pod, this\n\t\/\/ pod MUST return `errdefs.ErrAlreadyExists`.\n\tCreateTask(ctx context.Context, req *task.CreateTaskRequest, s *specs.Spec) (shimTask, error)\n\t\/\/ GetTask returns a task in this pod that matches `tid`.\n\t\/\/\n\t\/\/ If `tid` is not found, this pod MUST return `errdefs.ErrNotFound`.\n\tGetTask(tid string) (shimTask, error)\n\t\/\/ KillTask sends `signal` to task that matches `tid`.\n\t\/\/\n\t\/\/ If `tid` is not found, this pod MUST return `errdefs.ErrNotFound`.\n\t\/\/\n\t\/\/ If `tid==ID() && eid == \"\" && all == true` this pod will send `signal` to\n\t\/\/ all tasks in the pod and lastly send `signal` to the sandbox itself.\n\t\/\/\n\t\/\/ If `all == true && eid != \"\"` this pod MUST return\n\t\/\/ `errdefs.ErrFailedPrecondition`.\n\t\/\/\n\t\/\/ A call to `KillTask` is only valid when the exec found by `tid,eid` is in\n\t\/\/ the `shimExecStateRunning` state. If the exec is not in this state this\n\t\/\/ pod MUST return `errdefs.ErrFailedPrecondition`.\n\tKillTask(ctx context.Context, tid, eid string, signal uint32, all bool) error\n}\n\nfunc createPod(ctx context.Context, events publisher, req *task.CreateTaskRequest, s *specs.Spec) (shimPod, error) {\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"tid\": req.ID,\n\t}).Debug(\"createPod\")\n\n\tct, sid, err := oci.GetSandboxTypeAndID(s.Annotations)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ct != oci.KubernetesContainerTypeSandbox {\n\t\treturn nil, errors.Wrapf(\n\t\t\terrdefs.ErrFailedPrecondition,\n\t\t\t\"expected annotation: '%s': '%s' got '%s'\",\n\t\t\toci.KubernetesContainerTypeAnnotation,\n\t\t\toci.KubernetesContainerTypeSandbox,\n\t\t\tct)\n\t}\n\tif sid != req.ID {\n\t\treturn nil, errors.Wrapf(\n\t\t\terrdefs.ErrFailedPrecondition,\n\t\t\t\"expected annotation '%s': '%s' got '%s'\",\n\t\t\toci.KubernetesSandboxIDAnnotation,\n\t\t\treq.ID,\n\t\t\tsid)\n\t}\n\n\towner, err := os.Executable()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar parent *uvm.UtilityVM\n\tif oci.IsIsolated(s) {\n\t\t\/\/ Create the UVM parent\n\t\topts, err := oci.SpecToUVMCreateOpts(s, fmt.Sprintf(\"%s@vm\", req.ID), owner)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tswitch opts.(type) {\n\t\tcase *uvm.OptionsLCOW:\n\t\t\tlopts := (opts).(*uvm.OptionsLCOW)\n\t\t\tparent, err = uvm.CreateLCOW(lopts)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tcase *uvm.OptionsWCOW:\n\t\t\twopts := (opts).(*uvm.OptionsWCOW)\n\n\t\t\t\/\/ In order for the UVM sandbox.vhdx not to collide with the actual\n\t\t\t\/\/ nested Argon sandbox.vhdx we append the \\vm folder to the last\n\t\t\t\/\/ entry in the list.\n\t\t\tlayersLen := len(s.Windows.LayerFolders)\n\t\t\tlayers := make([]string, layersLen)\n\t\t\tcopy(layers, s.Windows.LayerFolders)\n\n\t\t\tvmPath := filepath.Join(layers[layersLen-1], \"vm\")\n\t\t\terr := os.MkdirAll(vmPath, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tlayers[layersLen-1] = vmPath\n\t\t\twopts.LayerFolders = layers\n\n\t\t\tparent, err = uvm.CreateWCOW(wopts)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\terr = parent.Start()\n\t\tif err != nil {\n\t\t\tparent.Close()\n\t\t}\n\t} else if !oci.IsWCOW(s) {\n\t\treturn nil, errors.Wrap(errdefs.ErrFailedPrecondition, \"oci spec does not contain WCOW or LCOW spec\")\n\t}\n\n\tp := pod{\n\t\tevents: events,\n\t\tid: req.ID,\n\t\thost: parent,\n\t}\n\tif oci.IsWCOW(s) {\n\t\t\/\/ For WCOW we fake out the init task since we dont need it.\n\t\tp.sandboxTask = newWcowPodSandboxTask(ctx, events, req.ID, req.Bundle, parent)\n\t\t\/\/ Publish the created event. We only do this for the fake WCOW task a\n\t\t\/\/ HCS Task will event itself.\n\t\tevents(\n\t\t\truntime.TaskCreateEventTopic,\n\t\t\t&eventstypes.TaskCreate{\n\t\t\t\tContainerID: req.ID,\n\t\t\t\tBundle: req.Bundle,\n\t\t\t\tRootfs: req.Rootfs,\n\t\t\t\tIO: &eventstypes.TaskIO{\n\t\t\t\t\tStdin: req.Stdin,\n\t\t\t\t\tStdout: req.Stdout,\n\t\t\t\t\tStderr: req.Stderr,\n\t\t\t\t\tTerminal: req.Terminal,\n\t\t\t\t},\n\t\t\t\tCheckpoint: \"\",\n\t\t\t\tPid: 0,\n\t\t\t})\n\t} else {\n\t\t\/\/ LCOW requires a real task for the sandbox\n\t\tlt, err := newHcsTask(ctx, events, parent, true, req, s)\n\t\tif err != nil {\n\t\t\tparent.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\tp.sandboxTask = lt\n\t}\n\n\treturn &p, nil\n}\n\nvar _ = (shimPod)(&pod{})\n\ntype pod struct {\n\tevents publisher\n\t\/\/ id is the id of the sandbox task when the pod is created.\n\t\/\/\n\t\/\/ It MUST be treated as read only in the lifetime of the pod.\n\tid string\n\t\/\/ sandboxTask is the task that represents the sandbox.\n\t\/\/\n\t\/\/ Note: The invariant `id==sandboxTask.ID()` MUST be true.\n\t\/\/\n\t\/\/ It MUST be treated as read only in the lifetime of the pod.\n\tsandboxTask shimTask\n\t\/\/ host is the UtilityVM that is hosting `sandboxTask` if the task is\n\t\/\/ hypervisor isolated.\n\t\/\/\n\t\/\/ It MUST be treated as read only in the lifetime of the pod.\n\thost *uvm.UtilityVM\n\n\t\/\/ wcl is the worload create mutex. All calls to CreateTask must hold this\n\t\/\/ lock while the ID reservation takes place. Once the ID is held it is safe\n\t\/\/ to release the lock to allow concurrent creates.\n\twcl sync.Mutex\n\tworkloadTasks sync.Map\n}\n\nfunc (p *pod) ID() string {\n\treturn p.id\n}\n\nfunc (p *pod) CreateTask(ctx context.Context, req *task.CreateTaskRequest, s *specs.Spec) (shimTask, error) {\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"pod-id\": p.id,\n\t\t\"tid\": req.ID,\n\t}).Debug(\"pod::CreateTask\")\n\n\tif req.ID == p.id {\n\t\treturn nil, errors.Wrapf(errdefs.ErrAlreadyExists, \"task with id: '%s' already exists\", req.ID)\n\t}\n\te, _ := p.sandboxTask.GetExec(\"\")\n\tif e.State() != shimExecStateRunning {\n\t\treturn nil, errors.Wrapf(errdefs.ErrFailedPrecondition, \"task with id: '%s' cannot be created in pod: '%s' which is not running\", req.ID, p.id)\n\t}\n\n\tp.wcl.Lock()\n\t_, loaded := p.workloadTasks.LoadOrStore(req.ID, nil)\n\tif loaded {\n\t\treturn nil, errors.Wrapf(errdefs.ErrAlreadyExists, \"task with id: '%s' already exists id pod: '%s'\", req.ID, p.id)\n\t}\n\tp.wcl.Unlock()\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tp.workloadTasks.Delete(req.ID)\n\t\t}\n\t}()\n\n\tct, sid, err := oci.GetSandboxTypeAndID(s.Annotations)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ct != oci.KubernetesContainerTypeContainer {\n\t\treturn nil, errors.Wrapf(\n\t\t\terrdefs.ErrFailedPrecondition,\n\t\t\t\"expected annotation: '%s': '%s' got '%s'\",\n\t\t\toci.KubernetesContainerTypeAnnotation,\n\t\t\toci.KubernetesContainerTypeContainer,\n\t\t\tct)\n\t}\n\tif sid != p.id {\n\t\treturn nil, errors.Wrapf(\n\t\t\terrdefs.ErrFailedPrecondition,\n\t\t\t\"expected annotation '%s': '%s' got '%s'\",\n\t\t\toci.KubernetesSandboxIDAnnotation,\n\t\t\tp.id,\n\t\t\tsid)\n\t}\n\n\tst, err := newHcsTask(ctx, p.events, p.host, false, req, s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.workloadTasks.Store(req.ID, st)\n\treturn st, nil\n}\n\nfunc (p *pod) GetTask(tid string) (shimTask, error) {\n\tif tid == p.id {\n\t\treturn p.sandboxTask, nil\n\t}\n\traw, loaded := p.workloadTasks.Load(tid)\n\tif !loaded {\n\t\treturn nil, errors.Wrapf(errdefs.ErrNotFound, \"task with id: '%s' not found\", tid)\n\t}\n\treturn raw.(shimTask), nil\n}\n\nfunc (p *pod) KillTask(ctx context.Context, tid, eid string, signal uint32, all bool) error {\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"pod-id\": p.id,\n\t\t\"tid\": tid,\n\t\t\"eid\": eid,\n\t\t\"signal\": signal,\n\t\t\"all\": all,\n\t}).Debug(\"pod::KillTask\")\n\n\tt, err := p.GetTask(tid)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif all && eid != \"\" {\n\t\treturn errors.Wrapf(errdefs.ErrFailedPrecondition, \"cannot signal all with non empty ExecID: '%s'\", eid)\n\t}\n\teg := errgroup.Group{}\n\tif all && tid == p.id {\n\t\t\/\/ We are in a kill all on the sandbox task. Signal everything.\n\t\tp.workloadTasks.Range(func(key, value interface{}) bool {\n\t\t\twt := value.(shimTask)\n\t\t\teg.Go(func() error {\n\t\t\t\treturn wt.KillExec(ctx, eid, signal, all)\n\t\t\t})\n\n\t\t\t\/\/ iterate all\n\t\t\treturn false\n\t\t})\n\t}\n\teg.Go(func() error {\n\t\treturn t.KillExec(ctx, eid, signal, all)\n\t})\n\treturn eg.Wait()\n}\n<commit_msg>Pod Kill all should skip exited tasks<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\n\t\"github.com\/Microsoft\/hcsshim\/internal\/oci\"\n\t\"github.com\/Microsoft\/hcsshim\/internal\/uvm\"\n\teventstypes \"github.com\/containerd\/containerd\/api\/events\"\n\t\"github.com\/containerd\/containerd\/errdefs\"\n\t\"github.com\/containerd\/containerd\/runtime\"\n\t\"github.com\/containerd\/containerd\/runtime\/v2\/task\"\n\tspecs \"github.com\/opencontainers\/runtime-spec\/specs-go\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"golang.org\/x\/sync\/errgroup\"\n)\n\n\/\/ shimPod represents the logical grouping of all tasks in a single set of\n\/\/ shared namespaces. The pod sandbox (container) is represented by the task\n\/\/ that matches the `shimPod.ID()`\ntype shimPod interface {\n\t\/\/ ID is the id of the task representing the pause (sandbox) container.\n\tID() string\n\t\/\/ CreateTask creates a workload task within this pod named `tid` with\n\t\/\/ settings `s`.\n\t\/\/\n\t\/\/ If `tid==ID()` or `tid` is the same as any other task in this pod, this\n\t\/\/ pod MUST return `errdefs.ErrAlreadyExists`.\n\tCreateTask(ctx context.Context, req *task.CreateTaskRequest, s *specs.Spec) (shimTask, error)\n\t\/\/ GetTask returns a task in this pod that matches `tid`.\n\t\/\/\n\t\/\/ If `tid` is not found, this pod MUST return `errdefs.ErrNotFound`.\n\tGetTask(tid string) (shimTask, error)\n\t\/\/ KillTask sends `signal` to task that matches `tid`.\n\t\/\/\n\t\/\/ If `tid` is not found, this pod MUST return `errdefs.ErrNotFound`.\n\t\/\/\n\t\/\/ If `tid==ID() && eid == \"\" && all == true` this pod will send `signal` to\n\t\/\/ all tasks in the pod and lastly send `signal` to the sandbox itself.\n\t\/\/\n\t\/\/ If `all == true && eid != \"\"` this pod MUST return\n\t\/\/ `errdefs.ErrFailedPrecondition`.\n\t\/\/\n\t\/\/ A call to `KillTask` is only valid when the exec found by `tid,eid` is in\n\t\/\/ the `shimExecStateRunning` state. If the exec is not in this state this\n\t\/\/ pod MUST return `errdefs.ErrFailedPrecondition`.\n\tKillTask(ctx context.Context, tid, eid string, signal uint32, all bool) error\n}\n\nfunc createPod(ctx context.Context, events publisher, req *task.CreateTaskRequest, s *specs.Spec) (shimPod, error) {\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"tid\": req.ID,\n\t}).Debug(\"createPod\")\n\n\tct, sid, err := oci.GetSandboxTypeAndID(s.Annotations)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ct != oci.KubernetesContainerTypeSandbox {\n\t\treturn nil, errors.Wrapf(\n\t\t\terrdefs.ErrFailedPrecondition,\n\t\t\t\"expected annotation: '%s': '%s' got '%s'\",\n\t\t\toci.KubernetesContainerTypeAnnotation,\n\t\t\toci.KubernetesContainerTypeSandbox,\n\t\t\tct)\n\t}\n\tif sid != req.ID {\n\t\treturn nil, errors.Wrapf(\n\t\t\terrdefs.ErrFailedPrecondition,\n\t\t\t\"expected annotation '%s': '%s' got '%s'\",\n\t\t\toci.KubernetesSandboxIDAnnotation,\n\t\t\treq.ID,\n\t\t\tsid)\n\t}\n\n\towner, err := os.Executable()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar parent *uvm.UtilityVM\n\tif oci.IsIsolated(s) {\n\t\t\/\/ Create the UVM parent\n\t\topts, err := oci.SpecToUVMCreateOpts(s, fmt.Sprintf(\"%s@vm\", req.ID), owner)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tswitch opts.(type) {\n\t\tcase *uvm.OptionsLCOW:\n\t\t\tlopts := (opts).(*uvm.OptionsLCOW)\n\t\t\tparent, err = uvm.CreateLCOW(lopts)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tcase *uvm.OptionsWCOW:\n\t\t\twopts := (opts).(*uvm.OptionsWCOW)\n\n\t\t\t\/\/ In order for the UVM sandbox.vhdx not to collide with the actual\n\t\t\t\/\/ nested Argon sandbox.vhdx we append the \\vm folder to the last\n\t\t\t\/\/ entry in the list.\n\t\t\tlayersLen := len(s.Windows.LayerFolders)\n\t\t\tlayers := make([]string, layersLen)\n\t\t\tcopy(layers, s.Windows.LayerFolders)\n\n\t\t\tvmPath := filepath.Join(layers[layersLen-1], \"vm\")\n\t\t\terr := os.MkdirAll(vmPath, 0)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tlayers[layersLen-1] = vmPath\n\t\t\twopts.LayerFolders = layers\n\n\t\t\tparent, err = uvm.CreateWCOW(wopts)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\terr = parent.Start()\n\t\tif err != nil {\n\t\t\tparent.Close()\n\t\t}\n\t} else if !oci.IsWCOW(s) {\n\t\treturn nil, errors.Wrap(errdefs.ErrFailedPrecondition, \"oci spec does not contain WCOW or LCOW spec\")\n\t}\n\n\tp := pod{\n\t\tevents: events,\n\t\tid: req.ID,\n\t\thost: parent,\n\t}\n\tif oci.IsWCOW(s) {\n\t\t\/\/ For WCOW we fake out the init task since we dont need it.\n\t\tp.sandboxTask = newWcowPodSandboxTask(ctx, events, req.ID, req.Bundle, parent)\n\t\t\/\/ Publish the created event. We only do this for the fake WCOW task a\n\t\t\/\/ HCS Task will event itself.\n\t\tevents(\n\t\t\truntime.TaskCreateEventTopic,\n\t\t\t&eventstypes.TaskCreate{\n\t\t\t\tContainerID: req.ID,\n\t\t\t\tBundle: req.Bundle,\n\t\t\t\tRootfs: req.Rootfs,\n\t\t\t\tIO: &eventstypes.TaskIO{\n\t\t\t\t\tStdin: req.Stdin,\n\t\t\t\t\tStdout: req.Stdout,\n\t\t\t\t\tStderr: req.Stderr,\n\t\t\t\t\tTerminal: req.Terminal,\n\t\t\t\t},\n\t\t\t\tCheckpoint: \"\",\n\t\t\t\tPid: 0,\n\t\t\t})\n\t} else {\n\t\t\/\/ LCOW requires a real task for the sandbox\n\t\tlt, err := newHcsTask(ctx, events, parent, true, req, s)\n\t\tif err != nil {\n\t\t\tparent.Close()\n\t\t\treturn nil, err\n\t\t}\n\t\tp.sandboxTask = lt\n\t}\n\n\treturn &p, nil\n}\n\nvar _ = (shimPod)(&pod{})\n\ntype pod struct {\n\tevents publisher\n\t\/\/ id is the id of the sandbox task when the pod is created.\n\t\/\/\n\t\/\/ It MUST be treated as read only in the lifetime of the pod.\n\tid string\n\t\/\/ sandboxTask is the task that represents the sandbox.\n\t\/\/\n\t\/\/ Note: The invariant `id==sandboxTask.ID()` MUST be true.\n\t\/\/\n\t\/\/ It MUST be treated as read only in the lifetime of the pod.\n\tsandboxTask shimTask\n\t\/\/ host is the UtilityVM that is hosting `sandboxTask` if the task is\n\t\/\/ hypervisor isolated.\n\t\/\/\n\t\/\/ It MUST be treated as read only in the lifetime of the pod.\n\thost *uvm.UtilityVM\n\n\t\/\/ wcl is the worload create mutex. All calls to CreateTask must hold this\n\t\/\/ lock while the ID reservation takes place. Once the ID is held it is safe\n\t\/\/ to release the lock to allow concurrent creates.\n\twcl sync.Mutex\n\tworkloadTasks sync.Map\n}\n\nfunc (p *pod) ID() string {\n\treturn p.id\n}\n\nfunc (p *pod) CreateTask(ctx context.Context, req *task.CreateTaskRequest, s *specs.Spec) (shimTask, error) {\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"pod-id\": p.id,\n\t\t\"tid\": req.ID,\n\t}).Debug(\"pod::CreateTask\")\n\n\tif req.ID == p.id {\n\t\treturn nil, errors.Wrapf(errdefs.ErrAlreadyExists, \"task with id: '%s' already exists\", req.ID)\n\t}\n\te, _ := p.sandboxTask.GetExec(\"\")\n\tif e.State() != shimExecStateRunning {\n\t\treturn nil, errors.Wrapf(errdefs.ErrFailedPrecondition, \"task with id: '%s' cannot be created in pod: '%s' which is not running\", req.ID, p.id)\n\t}\n\n\tp.wcl.Lock()\n\t_, loaded := p.workloadTasks.LoadOrStore(req.ID, nil)\n\tif loaded {\n\t\treturn nil, errors.Wrapf(errdefs.ErrAlreadyExists, \"task with id: '%s' already exists id pod: '%s'\", req.ID, p.id)\n\t}\n\tp.wcl.Unlock()\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tp.workloadTasks.Delete(req.ID)\n\t\t}\n\t}()\n\n\tct, sid, err := oci.GetSandboxTypeAndID(s.Annotations)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ct != oci.KubernetesContainerTypeContainer {\n\t\treturn nil, errors.Wrapf(\n\t\t\terrdefs.ErrFailedPrecondition,\n\t\t\t\"expected annotation: '%s': '%s' got '%s'\",\n\t\t\toci.KubernetesContainerTypeAnnotation,\n\t\t\toci.KubernetesContainerTypeContainer,\n\t\t\tct)\n\t}\n\tif sid != p.id {\n\t\treturn nil, errors.Wrapf(\n\t\t\terrdefs.ErrFailedPrecondition,\n\t\t\t\"expected annotation '%s': '%s' got '%s'\",\n\t\t\toci.KubernetesSandboxIDAnnotation,\n\t\t\tp.id,\n\t\t\tsid)\n\t}\n\n\tst, err := newHcsTask(ctx, p.events, p.host, false, req, s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tp.workloadTasks.Store(req.ID, st)\n\treturn st, nil\n}\n\nfunc (p *pod) GetTask(tid string) (shimTask, error) {\n\tif tid == p.id {\n\t\treturn p.sandboxTask, nil\n\t}\n\traw, loaded := p.workloadTasks.Load(tid)\n\tif !loaded {\n\t\treturn nil, errors.Wrapf(errdefs.ErrNotFound, \"task with id: '%s' not found\", tid)\n\t}\n\treturn raw.(shimTask), nil\n}\n\nfunc (p *pod) KillTask(ctx context.Context, tid, eid string, signal uint32, all bool) error {\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"pod-id\": p.id,\n\t\t\"tid\": tid,\n\t\t\"eid\": eid,\n\t\t\"signal\": signal,\n\t\t\"all\": all,\n\t}).Debug(\"pod::KillTask\")\n\n\tt, err := p.GetTask(tid)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif all && eid != \"\" {\n\t\treturn errors.Wrapf(errdefs.ErrFailedPrecondition, \"cannot signal all with non empty ExecID: '%s'\", eid)\n\t}\n\teg := errgroup.Group{}\n\tif all && tid == p.id {\n\t\t\/\/ We are in a kill all on the sandbox task. Signal everything.\n\t\tp.workloadTasks.Range(func(key, value interface{}) bool {\n\t\t\twt := value.(shimTask)\n\t\t\tie, _ := wt.GetExec(\"\")\n\t\t\t\/\/ Only send the kill signal to non-exited tasks when in the `all`\n\t\t\t\/\/ case.\n\t\t\tif ie.State() != shimExecStateExited {\n\t\t\t\teg.Go(func() error {\n\t\t\t\t\treturn wt.KillExec(ctx, eid, signal, all)\n\t\t\t\t})\n\t\t\t}\n\n\t\t\t\/\/ iterate all\n\t\t\treturn false\n\t\t})\n\t}\n\teg.Go(func() error {\n\t\treturn t.KillExec(ctx, eid, signal, all)\n\t})\n\treturn eg.Wait()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\/\/\"runtime\/pprof\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/livepeer\/go-livepeer\/common\"\n\t\"github.com\/livepeer\/lpms\/ffmpeg\"\n\t\"github.com\/livepeer\/m3u8\"\n\t\"github.com\/olekukonko\/tablewriter\"\n)\n\nfunc main() {\n\t\/\/ Override the default flag set since there are dependencies that\n\t\/\/ incorrectly add their own flags (specifically, due to the 'testing'\n\t\/\/ package being linked)\n\tflag.Set(\"logtostderr\", \"true\")\n\tflag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)\n\n\tin := flag.String(\"in\", \"\", \"Input m3u8 manifest file\")\n\tlive := flag.Bool(\"live\", true, \"Simulate live stream\")\n\tconcurrentSessions := flag.Int(\"concurrentSessions\", 1, \"# of concurrent transcode sessions\")\n\tsegs := flag.Int(\"segs\", 0, \"Maximum # of segments to transcode (default all)\")\n\ttranscodingOptions := flag.String(\"transcodingOptions\", \"P240p30fps16x9,P360p30fps16x9,P720p30fps16x9\", \"Transcoding options for broadcast job, or path to json config\")\n\tnvidia := flag.String(\"nvidia\", \"\", \"Comma-separated list of Nvidia GPU device IDs (or \\\"all\\\" for all available devices)\")\n\toutPrefix := flag.String(\"outPrefix\", \"\", \"Output segments' prefix (no segments are generated by default)\")\n\n\tflag.Parse()\n\n\tif *in == \"\" {\n\t\tglog.Errorf(\"Please provide the input manifest as `%s -in <input.m3u8>`\", os.Args[0])\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tprofiles := parseVideoProfiles(*transcodingOptions)\n\n\tf, err := os.Open(*in)\n\tif err != nil {\n\t\tglog.Fatal(\"Couldn't open input manifest: \", err)\n\t}\n\tp, _, err := m3u8.DecodeFrom(bufio.NewReader(f), true)\n\tif err != nil {\n\t\tglog.Fatal(\"Couldn't decode input manifest: \", err)\n\t}\n\tpl, ok := p.(*m3u8.MediaPlaylist)\n\tif !ok {\n\t\tglog.Fatalf(\"Expecting media playlist in the input %s\", *in)\n\t}\n\n\taccel := ffmpeg.Software\n\tdevices := []string{}\n\tif *nvidia != \"\" {\n\t\tvar err error\n\t\taccel = ffmpeg.Nvidia\n\t\tdevices, err = common.ParseNvidiaDevices(*nvidia)\n\t\tif err != nil {\n\t\t\tglog.Fatalf(\"Error while parsing '-nvidia %v' flag: %v\", *nvidia, err)\n\t\t}\n\t}\n\n\tffmpeg.InitFFmpeg()\n\tvar wg sync.WaitGroup\n\tdir := path.Dir(*in)\n\n\ttable := tablewriter.NewWriter(os.Stderr)\n\tdata := [][]string{\n\t\t{\"Source File\", *in},\n\t\t{\"Transcoding Options\", *transcodingOptions},\n\t\t{\"Concurrent Sessions\", fmt.Sprintf(\"%v\", *concurrentSessions)},\n\t\t{\"Live Mode\", fmt.Sprintf(\"%v\", *live)},\n\t}\n\n\tif accel == ffmpeg.Nvidia && len(devices) > 0 {\n\t\tdata = append(data, []string{\"Nvidia GPU IDs\", fmt.Sprintf(\"%v\", strings.Join(devices, \",\"))})\n\t}\n\n\ttable.SetAlignment(tablewriter.ALIGN_LEFT)\n\ttable.SetCenterSeparator(\"*\")\n\ttable.SetColumnSeparator(\"|\")\n\ttable.AppendBulk(data)\n\ttable.Render()\n\n\tfmt.Println(\"timestamp,session,segment,seg_dur,transcode_time\")\n\tsegCount := 0\n\trealTimeSegCount := 0\n\tsrcDur := 0.0\n\tvar mu sync.Mutex\n\ttranscodeDur := 0.0\n\tfor i := 0; i < *concurrentSessions; i++ {\n\t\twg.Add(1)\n\t\tgo func(k int, wg *sync.WaitGroup) {\n\t\t\ttc := ffmpeg.NewTranscoder()\n\t\t\tfor j, v := range pl.Segments {\n\t\t\t\titerStart := time.Now()\n\t\t\t\tif *segs > 0 && j >= *segs {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif v == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tu := path.Join(dir, v.URI)\n\t\t\t\tin := &ffmpeg.TranscodeOptionsIn{\n\t\t\t\t\tFname: u,\n\t\t\t\t\tAccel: accel,\n\t\t\t\t}\n\t\t\t\tif ffmpeg.Software != accel {\n\t\t\t\t\tin.Device = devices[k%len(devices)]\n\t\t\t\t}\n\t\t\t\tprofs2opts := func(profs []ffmpeg.VideoProfile) []ffmpeg.TranscodeOptions {\n\t\t\t\t\topts := []ffmpeg.TranscodeOptions{}\n\t\t\t\t\tfor n, p := range profs {\n\t\t\t\t\t\toname := \"\"\n\t\t\t\t\t\tmuxer := \"\"\n\t\t\t\t\t\tif *outPrefix != \"\" {\n\t\t\t\t\t\t\toname = fmt.Sprintf(\"%s_%s_%d_%d_%d.ts\", *outPrefix, p.Name, n, k, j)\n\t\t\t\t\t\t\tmuxer = \"mpegts\"\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\toname = \"-\"\n\t\t\t\t\t\t\tmuxer = \"null\"\n\t\t\t\t\t\t}\n\t\t\t\t\t\to := ffmpeg.TranscodeOptions{\n\t\t\t\t\t\t\tOname: oname,\n\t\t\t\t\t\t\tProfile: p,\n\t\t\t\t\t\t\tAccel: accel,\n\t\t\t\t\t\t\tAudioEncoder: ffmpeg.ComponentOptions{Name: \"drop\"},\n\t\t\t\t\t\t\tMuxer: ffmpeg.ComponentOptions{Name: muxer},\n\t\t\t\t\t\t}\n\t\t\t\t\t\topts = append(opts, o)\n\t\t\t\t\t}\n\t\t\t\t\treturn opts\n\t\t\t\t}\n\t\t\t\tout := profs2opts(profiles)\n\t\t\t\tt := time.Now()\n\t\t\t\t_, err := tc.Transcode(in, out)\n\t\t\t\tend := time.Now()\n\t\t\t\tif err != nil {\n\t\t\t\t\tglog.Fatalf(\"Transcoding failed for session %d segment %d: %v\", k, j, err)\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"%s,%d,%d,%0.4v,%0.4v\\n\", end.Format(\"2006-01-02 15:04:05.9999\"), k, j, v.Duration, end.Sub(t).Seconds())\n\t\t\t\tsegTxDur := end.Sub(t).Seconds()\n\t\t\t\tmu.Lock()\n\t\t\t\ttranscodeDur += segTxDur\n\t\t\t\tsrcDur += v.Duration\n\t\t\t\tsegCount++\n\t\t\t\tif segTxDur <= v.Duration {\n\t\t\t\t\trealTimeSegCount += 1\n\t\t\t\t}\n\t\t\t\tmu.Unlock()\n\t\t\t\titerEnd := time.Now()\n\t\t\t\tsegDur := time.Duration(v.Duration * float64(time.Second))\n\t\t\t\tif *live {\n\t\t\t\t\ttime.Sleep(segDur - iterEnd.Sub(iterStart))\n\t\t\t\t}\n\t\t\t}\n\t\t\ttc.StopTranscoder()\n\t\t\twg.Done()\n\t\t}(i, &wg)\n\t\ttime.Sleep(300 * time.Millisecond)\n\t}\n\twg.Wait()\n\tif segCount == 0 || srcDur == 0.0 {\n\t\tglog.Fatal(\"Input manifest has no segments or total duration is 0s\")\n\t}\n\tstatsTable := tablewriter.NewWriter(os.Stderr)\n\tstats := [][]string{\n\t\t{\"Concurrent Sessions\", fmt.Sprintf(\"%v\", *concurrentSessions)},\n\t\t{\"Total Segs Transcoded\", fmt.Sprintf(\"%v\", segCount)},\n\t\t{\"Real-Time Segs Transcoded\", fmt.Sprintf(\"%v\", realTimeSegCount)},\n\t\t{\"* Real-Time Segs Ratio *\", fmt.Sprintf(\"%0.4v\", float64(realTimeSegCount)\/float64(segCount))},\n\t\t{\"Total Source Duration\", fmt.Sprintf(\"%vs\", srcDur)},\n\t\t{\"Total Transcoding Duration\", fmt.Sprintf(\"%vs\", transcodeDur)},\n\t\t{\"* Real-Time Duration Ratio *\", fmt.Sprintf(\"%0.4v\", transcodeDur\/srcDur)},\n\t}\n\n\tstatsTable.SetAlignment(tablewriter.ALIGN_LEFT)\n\tstatsTable.SetCenterSeparator(\"*\")\n\tstatsTable.SetColumnSeparator(\"|\")\n\tstatsTable.AppendBulk(stats)\n\tstatsTable.Render()\n}\n\nfunc parseVideoProfiles(inp string) []ffmpeg.VideoProfile {\n\ttype profilesJson struct {\n\t\tProfiles []struct {\n\t\t\tName string `json:\"name\"`\n\t\t\tWidth int `json:\"width\"`\n\t\t\tHeight int `json:\"height\"`\n\t\t\tBitrate int `json:\"bitrate\"`\n\t\t\tFPS uint `json:\"fps\"`\n\t\t\tFPSDen uint `json:\"fpsDen\"`\n\t\t\tProfile string `json:\"profile\"`\n\t\t\tGOP string `json:\"gop\"`\n\t\t} `json:\"profiles\"`\n\t}\n\tprofs := []ffmpeg.VideoProfile{}\n\tif inp != \"\" {\n\t\t\/\/ try opening up json file with profiles\n\t\tcontent, err := ioutil.ReadFile(inp)\n\t\tif err == nil && len(content) > 0 {\n\t\t\t\/\/ parse json profiles\n\t\t\tresp := &profilesJson{}\n\t\t\terr = json.Unmarshal(content, &resp.Profiles)\n\t\t\tif err != nil {\n\t\t\t\tglog.Fatal(\"Unable to unmarshal the passed transcoding option: \", err)\n\t\t\t}\n\t\t\tfor _, profile := range resp.Profiles {\n\t\t\t\tname := profile.Name\n\t\t\t\tif name == \"\" {\n\t\t\t\t\tname = \"custom_\" + common.DefaultProfileName(\n\t\t\t\t\t\tprofile.Width,\n\t\t\t\t\t\tprofile.Height,\n\t\t\t\t\t\tprofile.Bitrate)\n\t\t\t\t}\n\t\t\t\tvar gop time.Duration\n\t\t\t\tif profile.GOP != \"\" {\n\t\t\t\t\tif profile.GOP == \"intra\" {\n\t\t\t\t\t\tgop = ffmpeg.GOPIntraOnly\n\t\t\t\t\t} else {\n\t\t\t\t\t\tgopFloat, err := strconv.ParseFloat(profile.GOP, 64)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tglog.Fatal(\"Cannot parse the GOP value in the transcoding options: \", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif gopFloat <= 0.0 {\n\t\t\t\t\t\t\tglog.Fatalf(\"Invalid gop value %f. Please set it to a positive value\", gopFloat)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tgop = time.Duration(gopFloat * float64(time.Second))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tencodingProfile, err := common.EncoderProfileNameToValue(profile.Profile)\n\t\t\t\tif err != nil {\n\t\t\t\t\tglog.Fatal(\"Unable to parse the H264 encoder profile: \", err)\n\t\t\t\t}\n\t\t\t\tprof := ffmpeg.VideoProfile{\n\t\t\t\t\tName: name,\n\t\t\t\t\tBitrate: fmt.Sprint(profile.Bitrate),\n\t\t\t\t\tFramerate: profile.FPS,\n\t\t\t\t\tFramerateDen: profile.FPSDen,\n\t\t\t\t\tResolution: fmt.Sprintf(\"%dx%d\", profile.Width, profile.Height),\n\t\t\t\t\tProfile: encodingProfile,\n\t\t\t\t\tGOP: gop,\n\t\t\t\t}\n\t\t\t\tprofs = append(profs, prof)\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ check the built-in profiles\n\t\t\tprofs = make([]ffmpeg.VideoProfile, 0)\n\t\t\tpresets := strings.Split(inp, \",\")\n\t\t\tfor _, v := range presets {\n\t\t\t\tif p, ok := ffmpeg.VideoProfileLookup[strings.TrimSpace(v)]; ok {\n\t\t\t\t\tprofs = append(profs, p)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(profs) <= 0 {\n\t\t\tglog.Fatalf(\"No transcoding options provided\")\n\t\t}\n\t}\n\treturn profs\n}\n<commit_msg>cmd\/livepeer_bench: Benchmark content detection<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\/\/\"runtime\/pprof\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/livepeer\/go-livepeer\/common\"\n\t\"github.com\/livepeer\/lpms\/ffmpeg\"\n\t\"github.com\/livepeer\/m3u8\"\n\t\"github.com\/olekukonko\/tablewriter\"\n)\n\nfunc main() {\n\t\/\/ Override the default flag set since there are dependencies that\n\t\/\/ incorrectly add their own flags (specifically, due to the 'testing'\n\t\/\/ package being linked)\n\tflag.Set(\"logtostderr\", \"true\")\n\tflag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError)\n\n\tin := flag.String(\"in\", \"\", \"Input m3u8 manifest file\")\n\tlive := flag.Bool(\"live\", true, \"Simulate live stream\")\n\tconcurrentSessions := flag.Int(\"concurrentSessions\", 1, \"# of concurrent transcode sessions\")\n\tsegs := flag.Int(\"segs\", 0, \"Maximum # of segments to transcode (default all)\")\n\ttranscodingOptions := flag.String(\"transcodingOptions\", \"P240p30fps16x9,P360p30fps16x9,P720p30fps16x9\", \"Transcoding options for broadcast job, or path to json config\")\n\tnvidia := flag.String(\"nvidia\", \"\", \"Comma-separated list of Nvidia GPU device IDs (or \\\"all\\\" for all available devices)\")\n\toutPrefix := flag.String(\"outPrefix\", \"\", \"Output segments' prefix (no segments are generated by default)\")\n\tdetectionFreq := flag.Int(\"detectionFreq\", 0, \"Run content-detection on every nth segment. No detection occurs for default frequency of 0.\")\n\tdetectionSampleRate := flag.Uint(\"detectionSampleRate\", 1, \"Run content-detection on every nth frame of a particular segment, if detectionFreq > 0.\")\n\n\tflag.Parse()\n\n\tif *in == \"\" {\n\t\tglog.Errorf(\"Please provide the input manifest as `%s -in <input.m3u8>`\", os.Args[0])\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tprofiles := parseVideoProfiles(*transcodingOptions)\n\n\tf, err := os.Open(*in)\n\tif err != nil {\n\t\tglog.Fatal(\"Couldn't open input manifest: \", err)\n\t}\n\tp, _, err := m3u8.DecodeFrom(bufio.NewReader(f), true)\n\tif err != nil {\n\t\tglog.Fatal(\"Couldn't decode input manifest: \", err)\n\t}\n\tpl, ok := p.(*m3u8.MediaPlaylist)\n\tif !ok {\n\t\tglog.Fatalf(\"Expecting media playlist in the input %s\", *in)\n\t}\n\n\taccel := ffmpeg.Software\n\tdevices := []string{}\n\tif *nvidia != \"\" {\n\t\tvar err error\n\t\taccel = ffmpeg.Nvidia\n\t\tdevices, err = common.ParseNvidiaDevices(*nvidia)\n\t\tif err != nil {\n\t\t\tglog.Fatalf(\"Error while parsing '-nvidia %v' flag: %v\", *nvidia, err)\n\t\t}\n\t}\n\tvar wg sync.WaitGroup\n\tdir := path.Dir(*in)\n\n\ttable := tablewriter.NewWriter(os.Stderr)\n\tdata := [][]string{\n\t\t{\"Source File\", *in},\n\t\t{\"Transcoding Options\", *transcodingOptions},\n\t\t{\"Concurrent Sessions\", fmt.Sprintf(\"%v\", *concurrentSessions)},\n\t\t{\"Live Mode\", fmt.Sprintf(\"%v\", *live)},\n\t}\n\n\tif accel == ffmpeg.Nvidia && len(devices) > 0 {\n\t\tdata = append(data, []string{\"Nvidia GPU IDs\", fmt.Sprintf(\"%v\", strings.Join(devices, \",\"))})\n\t}\n\tif *detectionFreq > 0 {\n\t\tdata = append(data, []string{\"Content Detection (segment_freq,frame_sample_rate)\", fmt.Sprintf(\"%v,%v\", *detectionFreq, *detectionSampleRate)})\n\t}\n\n\ttable.SetAlignment(tablewriter.ALIGN_LEFT)\n\ttable.SetCenterSeparator(\"*\")\n\ttable.SetColumnSeparator(\"|\")\n\ttable.AppendBulk(data)\n\ttable.Render()\n\n\tdetectionOpts := ffmpeg.TranscodeOptions{\n\t\tAccel: accel,\n\t\tAudioEncoder: ffmpeg.ComponentOptions{Name: \"copy\"},\n\t\tDetector: &ffmpeg.SceneClassificationProfile{\n\t\t\tSampleRate: *detectionSampleRate,\n\t\t\tInput: ffmpeg.DSceneAdultSoccer.Input,\n\t\t\tOutput: ffmpeg.DSceneAdultSoccer.Output,\n\t\t\tClasses: ffmpeg.DSceneAdultSoccer.Classes,\n\t\t},\n\t}\n\tif *detectionFreq > 0 {\n\t\terr = ffmpeg.InitFFmpegWithDetectorProfile(detectionOpts.Detector, *nvidia)\n\t\tif err != nil {\n\t\t\tglog.Fatalf(\"Could not initialize detector profiles\")\n\t\t}\n\t\tdefer ffmpeg.ReleaseFFmpegDetectorProfile()\n\t\tfmt.Println(\"timestamp,session,segment,seg_dur,transcode_time,detect_data\")\n\t} else {\n\t\tffmpeg.InitFFmpeg()\n\t\tfmt.Println(\"timestamp,session,segment,seg_dur,transcode_time\")\n\t}\n\n\tsegCount := 0\n\trealTimeSegCount := 0\n\tsrcDur := 0.0\n\tvar mu sync.Mutex\n\ttranscodeDur := 0.0\n\tfor i := 0; i < *concurrentSessions; i++ {\n\t\twg.Add(1)\n\t\tgo func(k int, wg *sync.WaitGroup) {\n\t\t\ttc := ffmpeg.NewTranscoder()\n\t\t\tfor j, v := range pl.Segments {\n\t\t\t\titerStart := time.Now()\n\t\t\t\tif *segs > 0 && j >= *segs {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif v == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tu := path.Join(dir, v.URI)\n\t\t\t\tin := &ffmpeg.TranscodeOptionsIn{\n\t\t\t\t\tFname: u,\n\t\t\t\t\tAccel: accel,\n\t\t\t\t}\n\t\t\t\tif ffmpeg.Software != accel {\n\t\t\t\t\tin.Device = devices[k%len(devices)]\n\t\t\t\t}\n\t\t\t\tprofs2opts := func(profs []ffmpeg.VideoProfile) []ffmpeg.TranscodeOptions {\n\t\t\t\t\topts := []ffmpeg.TranscodeOptions{}\n\t\t\t\t\tfor n, p := range profs {\n\t\t\t\t\t\toname := \"\"\n\t\t\t\t\t\tmuxer := \"\"\n\t\t\t\t\t\tif *outPrefix != \"\" {\n\t\t\t\t\t\t\toname = fmt.Sprintf(\"%s_%s_%d_%d_%d.ts\", *outPrefix, p.Name, n, k, j)\n\t\t\t\t\t\t\tmuxer = \"mpegts\"\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\toname = \"-\"\n\t\t\t\t\t\t\tmuxer = \"null\"\n\t\t\t\t\t\t}\n\t\t\t\t\t\to := ffmpeg.TranscodeOptions{\n\t\t\t\t\t\t\tOname: oname,\n\t\t\t\t\t\t\tProfile: p,\n\t\t\t\t\t\t\tAccel: accel,\n\t\t\t\t\t\t\tAudioEncoder: ffmpeg.ComponentOptions{Name: \"copy\"},\n\t\t\t\t\t\t\tMuxer: ffmpeg.ComponentOptions{Name: muxer},\n\t\t\t\t\t\t}\n\t\t\t\t\t\topts = append(opts, o)\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ add detector profile if freq > 0\n\t\t\t\t\tif *detectionFreq > 0 && j%*detectionFreq == 0 {\n\t\t\t\t\t\topts = append(opts, detectionOpts)\n\t\t\t\t\t}\n\t\t\t\t\treturn opts\n\t\t\t\t}\n\t\t\t\tout := profs2opts(profiles)\n\t\t\t\tt := time.Now()\n\t\t\t\tres, err := tc.Transcode(in, out)\n\t\t\t\tend := time.Now()\n\t\t\t\tif err != nil {\n\t\t\t\t\tglog.Fatalf(\"Transcoding failed for session %d segment %d: %v\", k, j, err)\n\t\t\t\t}\n\t\t\t\tif *detectionFreq > 0 && j%*detectionFreq == 0 {\n\t\t\t\t\tfmt.Printf(\"%s,%d,%d,%0.4v,%0.4v,%v\\n\", end.Format(\"2006-01-02 15:04:05.9999\"), k, j, v.Duration, end.Sub(t).Seconds(), res.Encoded[len(res.Encoded)-1].DetectData)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Printf(\"%s,%d,%d,%0.4v,%0.4v\\n\", end.Format(\"2006-01-02 15:04:05.9999\"), k, j, v.Duration, end.Sub(t).Seconds())\n\t\t\t\t}\n\t\t\t\tsegTxDur := end.Sub(t).Seconds()\n\t\t\t\tmu.Lock()\n\t\t\t\ttranscodeDur += segTxDur\n\t\t\t\tsrcDur += v.Duration\n\t\t\t\tsegCount++\n\t\t\t\tif segTxDur <= v.Duration {\n\t\t\t\t\trealTimeSegCount += 1\n\t\t\t\t}\n\t\t\t\tmu.Unlock()\n\t\t\t\titerEnd := time.Now()\n\t\t\t\tsegDur := time.Duration(v.Duration * float64(time.Second))\n\t\t\t\tif *live {\n\t\t\t\t\ttime.Sleep(segDur - iterEnd.Sub(iterStart))\n\t\t\t\t}\n\t\t\t}\n\t\t\ttc.StopTranscoder()\n\t\t\twg.Done()\n\t\t}(i, &wg)\n\t\ttime.Sleep(2300 * time.Millisecond) \/\/ wait for at least one segment before moving on to the next session\n\t}\n\twg.Wait()\n\tif segCount == 0 || srcDur == 0.0 {\n\t\tglog.Fatal(\"Input manifest has no segments or total duration is 0s\")\n\t}\n\tstatsTable := tablewriter.NewWriter(os.Stderr)\n\tstats := [][]string{\n\t\t{\"Concurrent Sessions\", fmt.Sprintf(\"%v\", *concurrentSessions)},\n\t\t{\"Total Segs Transcoded\", fmt.Sprintf(\"%v\", segCount)},\n\t\t{\"Real-Time Segs Transcoded\", fmt.Sprintf(\"%v\", realTimeSegCount)},\n\t\t{\"* Real-Time Segs Ratio *\", fmt.Sprintf(\"%0.4v\", float64(realTimeSegCount)\/float64(segCount))},\n\t\t{\"Total Source Duration\", fmt.Sprintf(\"%vs\", srcDur)},\n\t\t{\"Total Transcoding Duration\", fmt.Sprintf(\"%vs\", transcodeDur)},\n\t\t{\"* Real-Time Duration Ratio *\", fmt.Sprintf(\"%0.4v\", transcodeDur\/srcDur)},\n\t}\n\n\tstatsTable.SetAlignment(tablewriter.ALIGN_LEFT)\n\tstatsTable.SetCenterSeparator(\"*\")\n\tstatsTable.SetColumnSeparator(\"|\")\n\tstatsTable.AppendBulk(stats)\n\tstatsTable.Render()\n}\n\nfunc parseVideoProfiles(inp string) []ffmpeg.VideoProfile {\n\ttype profilesJson struct {\n\t\tProfiles []struct {\n\t\t\tName string `json:\"name\"`\n\t\t\tWidth int `json:\"width\"`\n\t\t\tHeight int `json:\"height\"`\n\t\t\tBitrate int `json:\"bitrate\"`\n\t\t\tFPS uint `json:\"fps\"`\n\t\t\tFPSDen uint `json:\"fpsDen\"`\n\t\t\tProfile string `json:\"profile\"`\n\t\t\tGOP string `json:\"gop\"`\n\t\t} `json:\"profiles\"`\n\t}\n\tprofs := []ffmpeg.VideoProfile{}\n\tif inp != \"\" {\n\t\t\/\/ try opening up json file with profiles\n\t\tcontent, err := ioutil.ReadFile(inp)\n\t\tif err == nil && len(content) > 0 {\n\t\t\t\/\/ parse json profiles\n\t\t\tresp := &profilesJson{}\n\t\t\terr = json.Unmarshal(content, &resp.Profiles)\n\t\t\tif err != nil {\n\t\t\t\tglog.Fatal(\"Unable to unmarshal the passed transcoding option: \", err)\n\t\t\t}\n\t\t\tfor _, profile := range resp.Profiles {\n\t\t\t\tname := profile.Name\n\t\t\t\tif name == \"\" {\n\t\t\t\t\tname = \"custom_\" + common.DefaultProfileName(\n\t\t\t\t\t\tprofile.Width,\n\t\t\t\t\t\tprofile.Height,\n\t\t\t\t\t\tprofile.Bitrate)\n\t\t\t\t}\n\t\t\t\tvar gop time.Duration\n\t\t\t\tif profile.GOP != \"\" {\n\t\t\t\t\tif profile.GOP == \"intra\" {\n\t\t\t\t\t\tgop = ffmpeg.GOPIntraOnly\n\t\t\t\t\t} else {\n\t\t\t\t\t\tgopFloat, err := strconv.ParseFloat(profile.GOP, 64)\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tglog.Fatal(\"Cannot parse the GOP value in the transcoding options: \", err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif gopFloat <= 0.0 {\n\t\t\t\t\t\t\tglog.Fatalf(\"Invalid gop value %f. Please set it to a positive value\", gopFloat)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tgop = time.Duration(gopFloat * float64(time.Second))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tencodingProfile, err := common.EncoderProfileNameToValue(profile.Profile)\n\t\t\t\tif err != nil {\n\t\t\t\t\tglog.Fatal(\"Unable to parse the H264 encoder profile: \", err)\n\t\t\t\t}\n\t\t\t\tprof := ffmpeg.VideoProfile{\n\t\t\t\t\tName: name,\n\t\t\t\t\tBitrate: fmt.Sprint(profile.Bitrate),\n\t\t\t\t\tFramerate: profile.FPS,\n\t\t\t\t\tFramerateDen: profile.FPSDen,\n\t\t\t\t\tResolution: fmt.Sprintf(\"%dx%d\", profile.Width, profile.Height),\n\t\t\t\t\tProfile: encodingProfile,\n\t\t\t\t\tGOP: gop,\n\t\t\t\t}\n\t\t\t\tprofs = append(profs, prof)\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ check the built-in profiles\n\t\t\tprofs = make([]ffmpeg.VideoProfile, 0)\n\t\t\tpresets := strings.Split(inp, \",\")\n\t\t\tfor _, v := range presets {\n\t\t\t\tif p, ok := ffmpeg.VideoProfileLookup[strings.TrimSpace(v)]; ok {\n\t\t\t\t\tprofs = append(profs, p)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(profs) <= 0 {\n\t\t\tglog.Fatalf(\"No transcoding options provided\")\n\t\t}\n\t}\n\treturn profs\n}\n<|endoftext|>"} {"text":"<commit_before>package extract\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\tscrape \"github.com\/slotix\/dataflowkit\/scrape\"\n\t\"golang.org\/x\/net\/html\"\n)\n\nvar logger *log.Logger\n\nfunc init() {\n\tlogger = log.New(os.Stdout, \"extractor: \", log.Lshortfile)\n}\n\n\/\/ Const is a PieceExtractor that returns a constant value.\ntype Const struct {\n\t\/\/ The value to return when the Extract() function is called.\n\tVal interface{}\n}\n\nfunc (e Const) Extract(sel *goquery.Selection) (interface{}, error) {\n\treturn e.Val, nil\n}\n\nvar _ scrape.PieceExtractor = Const{}\n\n\/\/ Text is a PieceExtractor that returns the combined text contents of\n\/\/ the given selection.\ntype Text struct {\n\t\/\/ If text is empty in the selection, then return the empty string from Extract,\n\t\/\/ instead of 'nil'. This signals that the result of this Piece\n\t\/\/ should be included to the results, as opposed to omiting the\n\t\/\/ empty string.\n\tIncludeIfEmpty bool\n}\n\nfunc (e Text) Extract(sel *goquery.Selection) (interface{}, error) {\n\tif sel.Text() == \"\" && !e.IncludeIfEmpty {\n\t\treturn nil, nil\n\t}\n\treturn sel.Text(), nil\n}\n\nfunc (e *Text) fillParams(m map[string]interface{}) error {\n\terr := FillStruct(m, e)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nvar _ scrape.PieceExtractor = Text{}\n\n\/\/ MultipleText is a PieceExtractor that extracts the text from each element\n\/\/ in the given selection and returns the texts as an array.\ntype MultipleText struct {\n\t\/\/ If there are no items in the selection, then return empty list from Extract,\n\t\/\/ instead of the 'nil'. This signals that the result of this Piece\n\t\/\/ should be included to the results, as opposed to omiting the\n\t\/\/ empty list.\n\tIncludeIfEmpty bool\n}\n\nfunc (e MultipleText) Extract(sel *goquery.Selection) (interface{}, error) {\n\tresults := []string{}\n\n\tsel.Each(func(i int, s *goquery.Selection) {\n\t\tresults = append(results, s.Text())\n\t})\n\n\tif len(results) == 0 && !e.IncludeIfEmpty {\n\t\treturn nil, nil\n\t}\n\n\treturn results, nil\n}\n\n\/\/ Html extracts and returns the HTML from inside each element of the\n\/\/ given selection, as a string.\n\/\/\n\/\/ Note that this results in what is effectively the innerHTML of the element -\n\/\/ i.e. if our selection consists of [\"<p><b>ONE<\/b><\/p>\", \"<p><i>TWO<\/i><\/p>\"]\n\/\/ then the output will be: \"<b>ONE<\/b><i>TWO<\/i>\".\n\/\/\n\/\/ The return type is a string of all the inner HTML joined together.\ntype Html struct{}\n\nfunc (e Html) Extract(sel *goquery.Selection) (interface{}, error) {\n\tvar ret, h string\n\tvar err error\n\n\tsel.EachWithBreak(func(i int, s *goquery.Selection) bool {\n\t\th, err = s.Html()\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\n\t\tret += h\n\t\treturn true\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n}\n\nvar _ scrape.PieceExtractor = Html{}\n\n\/\/ OuterHtml extracts and returns the HTML of each element of the\n\/\/ given selection, as a string.\n\/\/\n\/\/ To illustrate, if our selection consists of\n\/\/ [\"<div><b>ONE<\/b><\/div>\", \"<p><i>TWO<\/i><\/p>\"] then the output will be:\n\/\/ \"<div><b>ONE<\/b><\/div><p><i>TWO<\/i><\/p>\".\n\/\/\n\/\/ The return type is a string of all the outer HTML joined together.\ntype OuterHtml struct{}\n\nfunc (e OuterHtml) Extract(sel *goquery.Selection) (interface{}, error) {\n\toutput := bytes.NewBufferString(\"\")\n\tfor _, node := range sel.Nodes {\n\t\tif err := html.Render(output, node); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn output.String(), nil\n}\n\nvar _ scrape.PieceExtractor = OuterHtml{}\n\n\/\/ Regex runs the given regex over the contents of each element in the\n\/\/ given selection, and, for each match, extracts the given subexpression.\n\/\/ The return type of the extractor is a list of string matches (i.e. []string).\ntype Regex struct {\n\t\/\/ The regular expression to match. This regular expression must define\n\t\/\/ exactly one parenthesized subexpression (sometimes known as a \"capturing\n\t\/\/ group\"), which will be extracted.\n\tRegex *regexp.Regexp\n\t\/\/ The subexpression of the regex to match. If this value is not set, and if\n\t\/\/ the given regex has more than one subexpression, an error will be thrown.\n\tSubexpression int\n\n\t\/\/ When OnlyText is true, only run the given regex over the text contents of\n\t\/\/ each element in the selection, as opposed to the HTML contents.\n\tOnlyText bool\n\n\t\/\/ By default, if there is only a single match, Regex will return\n\t\/\/ the match itself (as opposed to an array containing the single match).\n\t\/\/ Set AlwaysReturnList to true to disable this behaviour, ensuring that the\n\t\/\/ Extract function always returns an array.\n\tAlwaysReturnList bool\n\n\t\/\/ If no matches of the provided regex could be extracted, then return the empty list\n\t\/\/ from Extract, instead of 'nil'. This signals that the result of\n\t\/\/ this Piece should be included to the results, as opposed to\n\t\/\/ omiting the empty list.\n\tIncludeIfEmpty bool\n}\n\nfunc (e Regex) Extract(sel *goquery.Selection) (interface{}, error) {\n\tif e.Regex == nil {\n\t\treturn nil, errors.New(\"no regex given\")\n\t}\n\tif e.Regex.NumSubexp() == 0 {\n\t\treturn nil, errors.New(\"regex has no subexpressions\")\n\t}\n\n\tvar subexp int\n\tif e.Subexpression == 0 {\n\t\tif e.Regex.NumSubexp() != 1 {\n\t\t\te := fmt.Errorf(\n\t\t\t\t\"regex has more than one subexpression (%d), but which to \"+\n\t\t\t\t\t\"extract was not specified\",\n\t\t\t\te.Regex.NumSubexp())\n\t\t\treturn nil, e\n\t\t}\n\n\t\tsubexp = 1\n\t} else {\n\t\tsubexp = e.Subexpression\n\t}\n\n\tresults := []string{}\n\n\t\/\/ For each element in the selector...\n\tvar err error\n\tsel.EachWithBreak(func(i int, s *goquery.Selection) bool {\n\t\tvar contents string\n\t\tif e.OnlyText {\n\t\t\tcontents = s.Text()\n\t\t} else {\n\t\t\tcontents, err = s.Html()\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\tret := e.Regex.FindAllStringSubmatch(contents, -1)\n\n\t\t\/\/ A return value of nil == no match\n\t\tif ret == nil {\n\t\t\treturn true\n\t\t}\n\n\t\t\/\/ For each regex match...\n\t\tfor _, submatches := range ret {\n\t\t\t\/\/ The 0th entry will be the match of the entire string. The 1st\n\t\t\t\/\/ entry will be the first capturing group, which is what we want to\n\t\t\t\/\/ extract.\n\t\t\tif len(submatches) > 1 {\n\t\t\t\tresults = append(results, submatches[subexp])\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(results) == 0 && !e.IncludeIfEmpty {\n\t\treturn nil, nil\n\t}\n\tif len(results) == 1 && !e.AlwaysReturnList {\n\t\treturn results[0], nil\n\t}\n\n\treturn results, nil\n}\n\nfunc (e *Regex) fillParams(m map[string]interface{}) error {\n\terr := FillStruct(m, e)\n\tif err != nil {\n\t\treturn err\n\t}\n\tregExp := m[\"regexp\"]\n\te.Regex = regexp.MustCompile(regExp.(string))\n\treturn nil\n}\n\nvar _ scrape.PieceExtractor = Regex{}\n\n\/\/ Attr extracts the value of a given HTML attribute from each element\n\/\/ in the selection, and returns them as a list.\n\/\/ The return type of the extractor is a list of attribute valueus (i.e. []string).\ntype Attr struct {\n\t\/\/ The HTML attribute to extract from each element.\n\tAttr string\n\n\t\/\/ By default, if there is only a single attribute extracted, AttrExtractor\n\t\/\/ will return the match itself (as opposed to an array containing the single\n\t\/\/ match). Set AlwaysReturnList to true to disable this behaviour, ensuring\n\t\/\/ that the Extract function always returns an array.\n\tAlwaysReturnList bool\n\n\t\/\/ If no elements with this attribute are found, then return the empty list from\n\t\/\/ Extract, instead of 'nil'. This signals that the result of this\n\t\/\/ Piece should be included to the results, as opposed to omiting\n\t\/\/ the empty list.\n\tIncludeIfEmpty bool\n}\n\nfunc (e Attr) Extract(sel *goquery.Selection) (interface{}, error) {\n\tif len(e.Attr) == 0 {\n\t\treturn nil, errors.New(\"no attribute provided\")\n\t}\n\n\tresults := []string{}\n\n\tsel.Each(func(i int, s *goquery.Selection) {\n\t\tif val, found := s.Attr(e.Attr); found {\n\t\t\tresults = append(results, val)\n\t\t}\n\t})\n\n\tif len(results) == 0 && !e.IncludeIfEmpty {\n\t\treturn nil, nil\n\t}\n\tif len(results) == 1 && !e.AlwaysReturnList {\n\t\treturn results[0], nil\n\t}\n\n\treturn results, nil\n}\n\nfunc (e *Attr) fillParams(m map[string]interface{}) error {\n\terr := FillStruct(m, e)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\nvar _ scrape.PieceExtractor = Attr{}\n\n\/\/ Count extracts the count of elements that are matched and returns it.\ntype Count struct {\n\t\/\/ If no elements with this attribute are found, then return a number from\n\t\/\/ Extract, instead of 'nil'. This signals that the result of this\n\t\/\/ Piece should be included to the results, as opposed to omiting\n\t\/\/ the empty list.\n\tIncludeIfEmpty bool\n}\n\nfunc (e Count) Extract(sel *goquery.Selection) (interface{}, error) {\n\tl := sel.Length()\n\tif l == 0 && !e.IncludeIfEmpty {\n\t\treturn nil, nil\n\t}\n\n\treturn l, nil\n}\n\nfunc FillParams(t string, m map[string]interface{}) (scrape.PieceExtractor, error) {\n\tvar err error\n\t\/*\n\t\tvar e scrape.PieceExtractor\n\t\tswitch t {\n\t\tcase \"text\":\n\t\t\te = Text{}\n\t\tcase \"attr\":\n\t\t\te = Attr{}\n\t\tcase \"regex\":\n\t\t\te = Regex{}\n\t\t}\n\t\tif m != nil {\n\t\t\terr := FillStruct(m, &e)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\treturn e, nil\n\t*\/\n\n\tswitch t {\n\tcase \"text\":\n\t\ttxt := Text{}\n\t\tif m != nil {\n\t\t\terr = txt.fillParams(m)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\treturn txt, nil\n\tcase \"attr\":\n\t\ta := Attr{}\n\t\tif m != nil {\n\t\t\terr = a.fillParams(m)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\treturn a, nil\n\tcase \"regex\":\n\t\tr := Regex{}\n\t\tif m != nil {\n\t\t\terr = r.fillParams(m)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\treturn r, nil\n\t}\n\treturn nil, err\n\n}\n\nfunc FillStruct(m map[string]interface{}, s interface{}) error {\n\tfor k, v := range m {\n\t\terr := SetField(s, k, v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc SetField(obj interface{}, name string, value interface{}) error {\n\tstructValue := reflect.ValueOf(obj).Elem()\n\t\/\/structFieldValue := structValue.FieldByName(name)\n\tstructFieldValue := structValue.FieldByName(strings.Title(name))\n\n\tif !structFieldValue.IsValid() {\n\t\t\/\/skip non-existent fields\n\t\treturn nil\n\t\t\/\/return fmt.Errorf(\"No such field: %s in obj\", name)\n\t}\n\n\tif !structFieldValue.CanSet() {\n\t\treturn fmt.Errorf(\"Cannot set %s field value\", name)\n\t}\n\n\tstructFieldType := structFieldValue.Type()\n\tval := reflect.ValueOf(value)\n\tif structFieldType != val.Type() {\n\t\tinvalidTypeError := errors.New(\"Provided value type didn't match obj field type\")\n\t\treturn invalidTypeError\n\t}\n\n\tstructFieldValue.Set(val)\n\treturn nil\n}\n<commit_msg>extractors<commit_after>package extract\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/PuerkitoBio\/goquery\"\n\tscrape \"github.com\/slotix\/dataflowkit\/scrape\"\n\t\"golang.org\/x\/net\/html\"\n)\n\nvar logger *log.Logger\n\nfunc init() {\n\tlogger = log.New(os.Stdout, \"extractor: \", log.Lshortfile)\n}\n\n\/\/ Const is a PieceExtractor that returns a constant value.\ntype Const struct {\n\t\/\/ The value to return when the Extract() function is called.\n\tVal interface{}\n}\n\nfunc (e Const) Extract(sel *goquery.Selection) (interface{}, error) {\n\treturn e.Val, nil\n}\n\nvar _ scrape.PieceExtractor = Const{}\n\n\/\/ Text is a PieceExtractor that returns the combined text contents of\n\/\/ the given selection.\ntype Text struct {\n\t\/\/ If text is empty in the selection, then return the empty string from Extract,\n\t\/\/ instead of 'nil'. This signals that the result of this Piece\n\t\/\/ should be included to the results, as opposed to omiting the\n\t\/\/ empty string.\n\tIncludeIfEmpty bool\n}\n\nfunc (e Text) Extract(sel *goquery.Selection) (interface{}, error) {\n\tif sel.Text() == \"\" && !e.IncludeIfEmpty {\n\t\treturn nil, nil\n\t}\n\treturn sel.Text(), nil\n}\n\nfunc (e *Text) fillParams(m map[string]interface{}) error {\n\terr := FillStruct(m, e)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nvar _ scrape.PieceExtractor = Text{}\n\n\/\/ MultipleText is a PieceExtractor that extracts the text from each element\n\/\/ in the given selection and returns the texts as an array.\ntype MultipleText struct {\n\t\/\/ If there are no items in the selection, then return empty list from Extract,\n\t\/\/ instead of the 'nil'. This signals that the result of this Piece\n\t\/\/ should be included to the results, as opposed to omiting the\n\t\/\/ empty list.\n\tIncludeIfEmpty bool\n}\n\nfunc (e MultipleText) Extract(sel *goquery.Selection) (interface{}, error) {\n\tresults := []string{}\n\n\tsel.Each(func(i int, s *goquery.Selection) {\n\t\tresults = append(results, s.Text())\n\t})\n\n\tif len(results) == 0 && !e.IncludeIfEmpty {\n\t\treturn nil, nil\n\t}\n\n\treturn results, nil\n}\n\n\/\/ Html extracts and returns the HTML from inside each element of the\n\/\/ given selection, as a string.\n\/\/\n\/\/ Note that this results in what is effectively the innerHTML of the element -\n\/\/ i.e. if our selection consists of [\"<p><b>ONE<\/b><\/p>\", \"<p><i>TWO<\/i><\/p>\"]\n\/\/ then the output will be: \"<b>ONE<\/b><i>TWO<\/i>\".\n\/\/\n\/\/ The return type is a string of all the inner HTML joined together.\ntype Html struct{}\n\nfunc (e Html) Extract(sel *goquery.Selection) (interface{}, error) {\n\tvar ret, h string\n\tvar err error\n\n\tsel.EachWithBreak(func(i int, s *goquery.Selection) bool {\n\t\th, err = s.Html()\n\t\tif err != nil {\n\t\t\treturn false\n\t\t}\n\n\t\tret += h\n\t\treturn true\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n}\n\nvar _ scrape.PieceExtractor = Html{}\n\n\/\/ OuterHtml extracts and returns the HTML of each element of the\n\/\/ given selection, as a string.\n\/\/\n\/\/ To illustrate, if our selection consists of\n\/\/ [\"<div><b>ONE<\/b><\/div>\", \"<p><i>TWO<\/i><\/p>\"] then the output will be:\n\/\/ \"<div><b>ONE<\/b><\/div><p><i>TWO<\/i><\/p>\".\n\/\/\n\/\/ The return type is a string of all the outer HTML joined together.\ntype OuterHtml struct{}\n\nfunc (e OuterHtml) Extract(sel *goquery.Selection) (interface{}, error) {\n\toutput := bytes.NewBufferString(\"\")\n\tfor _, node := range sel.Nodes {\n\t\tif err := html.Render(output, node); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn output.String(), nil\n}\n\nvar _ scrape.PieceExtractor = OuterHtml{}\n\n\/\/ Regex runs the given regex over the contents of each element in the\n\/\/ given selection, and, for each match, extracts the given subexpression.\n\/\/ The return type of the extractor is a list of string matches (i.e. []string).\ntype Regex struct {\n\t\/\/ The regular expression to match. This regular expression must define\n\t\/\/ exactly one parenthesized subexpression (sometimes known as a \"capturing\n\t\/\/ group\"), which will be extracted.\n\tRegex *regexp.Regexp\n\t\/\/ The subexpression of the regex to match. If this value is not set, and if\n\t\/\/ the given regex has more than one subexpression, an error will be thrown.\n\tSubexpression int\n\n\t\/\/ When OnlyText is true, only run the given regex over the text contents of\n\t\/\/ each element in the selection, as opposed to the HTML contents.\n\tOnlyText bool\n\n\t\/\/ By default, if there is only a single match, Regex will return\n\t\/\/ the match itself (as opposed to an array containing the single match).\n\t\/\/ Set AlwaysReturnList to true to disable this behaviour, ensuring that the\n\t\/\/ Extract function always returns an array.\n\tAlwaysReturnList bool\n\n\t\/\/ If no matches of the provided regex could be extracted, then return the empty list\n\t\/\/ from Extract, instead of 'nil'. This signals that the result of\n\t\/\/ this Piece should be included to the results, as opposed to\n\t\/\/ omiting the empty list.\n\tIncludeIfEmpty bool\n}\n\nfunc (e Regex) Extract(sel *goquery.Selection) (interface{}, error) {\n\tif e.Regex == nil {\n\t\treturn nil, errors.New(\"no regex given\")\n\t}\n\tif e.Regex.NumSubexp() == 0 {\n\t\treturn nil, errors.New(\"regex has no subexpressions\")\n\t}\n\n\tvar subexp int\n\tif e.Subexpression == 0 {\n\t\tif e.Regex.NumSubexp() != 1 {\n\t\t\te := fmt.Errorf(\n\t\t\t\t\"regex has more than one subexpression (%d), but which to \"+\n\t\t\t\t\t\"extract was not specified\",\n\t\t\t\te.Regex.NumSubexp())\n\t\t\treturn nil, e\n\t\t}\n\n\t\tsubexp = 1\n\t} else {\n\t\tsubexp = e.Subexpression\n\t}\n\n\tresults := []string{}\n\n\t\/\/ For each element in the selector...\n\tvar err error\n\tsel.EachWithBreak(func(i int, s *goquery.Selection) bool {\n\t\tvar contents string\n\t\tif e.OnlyText {\n\t\t\tcontents = s.Text()\n\t\t} else {\n\t\t\tcontents, err = s.Html()\n\t\t\tif err != nil {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\tret := e.Regex.FindAllStringSubmatch(contents, -1)\n\n\t\t\/\/ A return value of nil == no match\n\t\tif ret == nil {\n\t\t\treturn true\n\t\t}\n\n\t\t\/\/ For each regex match...\n\t\tfor _, submatches := range ret {\n\t\t\t\/\/ The 0th entry will be the match of the entire string. The 1st\n\t\t\t\/\/ entry will be the first capturing group, which is what we want to\n\t\t\t\/\/ extract.\n\t\t\tif len(submatches) > 1 {\n\t\t\t\tresults = append(results, submatches[subexp])\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(results) == 0 && !e.IncludeIfEmpty {\n\t\treturn nil, nil\n\t}\n\tif len(results) == 1 && !e.AlwaysReturnList {\n\t\treturn results[0], nil\n\t}\n\n\treturn results, nil\n}\n\nfunc (e *Regex) fillParams(m map[string]interface{}) error {\n\terr := FillStruct(m, e)\n\tif err != nil {\n\t\treturn err\n\t}\n\tregExp := m[\"regexp\"]\n\te.Regex = regexp.MustCompile(regExp.(string))\n\treturn nil\n}\n\nvar _ scrape.PieceExtractor = Regex{}\n\n\/\/ Attr extracts the value of a given HTML attribute from each element\n\/\/ in the selection, and returns them as a list.\n\/\/ The return type of the extractor is a list of attribute valueus (i.e. []string).\ntype Attr struct {\n\t\/\/ The HTML attribute to extract from each element.\n\tAttr string\n\n\t\/\/ By default, if there is only a single attribute extracted, AttrExtractor\n\t\/\/ will return the match itself (as opposed to an array containing the single\n\t\/\/ match). Set AlwaysReturnList to true to disable this behaviour, ensuring\n\t\/\/ that the Extract function always returns an array.\n\tAlwaysReturnList bool\n\n\t\/\/ If no elements with this attribute are found, then return the empty list from\n\t\/\/ Extract, instead of 'nil'. This signals that the result of this\n\t\/\/ Piece should be included to the results, as opposed to omiting\n\t\/\/ the empty list.\n\tIncludeIfEmpty bool\n}\n\nfunc (e Attr) Extract(sel *goquery.Selection) (interface{}, error) {\n\tif len(e.Attr) == 0 {\n\t\treturn nil, errors.New(\"no attribute provided\")\n\t}\n\n\tresults := []string{}\n\n\tsel.Each(func(i int, s *goquery.Selection) {\n\t\tif val, found := s.Attr(e.Attr); found {\n\t\t\tresults = append(results, val)\n\t\t}\n\t})\n\n\tif len(results) == 0 && !e.IncludeIfEmpty {\n\t\treturn nil, nil\n\t}\n\tif len(results) == 1 && !e.AlwaysReturnList {\n\t\treturn results[0], nil\n\t}\n\n\treturn results, nil\n}\n\nfunc (e *Attr) fillParams(m map[string]interface{}) error {\n\terr := FillStruct(m, e)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nvar _ scrape.PieceExtractor = Attr{}\n\n\/\/ Count extracts the count of elements that are matched and returns it.\ntype Count struct {\n\t\/\/ If no elements with this attribute are found, then return a number from\n\t\/\/ Extract, instead of 'nil'. This signals that the result of this\n\t\/\/ Piece should be included to the results, as opposed to omiting\n\t\/\/ the empty list.\n\tIncludeIfEmpty bool\n}\n\nfunc (e Count) Extract(sel *goquery.Selection) (interface{}, error) {\n\tl := sel.Length()\n\tif l == 0 && !e.IncludeIfEmpty {\n\t\treturn nil, nil\n\t}\n\n\treturn l, nil\n}\n\nfunc FillParams(t string, m map[string]interface{}) (scrape.PieceExtractor, error) {\n\t\/\/var err error\n\n\tlogger.Println(t)\n\tvar e scrape.PieceExtractor\n\tswitch t {\n\tcase \"text\":\n\t\te = &Text{}\n\tcase \"attr\":\n\t\te = &Attr{}\n\tcase \"regex\":\n\t\t\/\/e = &Regex{}\n\t\tr := &Regex{}\n\t\tregExp := m[\"regexp\"]\n\t\tr.Regex = regexp.MustCompile(regExp.(string))\n\t\te = r\n\t}\n\tif m != nil {\n\t\terr := FillStruct(m, e)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn e, nil\n\n\t\/*\n\t\tswitch t {\n\t\tcase \"text\":\n\t\t\ttxt := Text{}\n\t\t\tif m != nil {\n\t\t\t\terr = txt.fillParams(m)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn txt, nil\n\t\tcase \"attr\":\n\t\t\ta := Attr{}\n\t\t\tif m != nil {\n\t\t\t\terr = a.fillParams(m)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn a, nil\n\t\tcase \"regex\":\n\t\t\tr := Regex{}\n\t\t\tif m != nil {\n\t\t\t\terr = r.fillParams(m)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn r, nil\n\t\t}\n\n\t\treturn nil, err\n*\/\n}\n\nfunc FillStruct(m map[string]interface{}, s interface{}) error {\n\tfor k, v := range m {\n\t\tlogger.Println(k,v)\n\t\terr := SetField(s, k, v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc SetField(obj interface{}, name string, value interface{}) error {\n\/\/logger.Printf(\"%T, %t\", obj, obj)\n\tstructValue := reflect.ValueOf(obj).Elem()\n\t\/\/structFieldValue := structValue.FieldByName(name)\n\tstructFieldValue := structValue.FieldByName(strings.Title(name))\n\n\tif !structFieldValue.IsValid() {\n\t\t\/\/skip non-existent fields\n\t\treturn nil\n\t\t\/\/return fmt.Errorf(\"No such field: %s in obj\", name)\n\t}\n\n\tif !structFieldValue.CanSet() {\n\t\treturn fmt.Errorf(\"Cannot set %s field value\", name)\n\t}\n\n\tstructFieldType := structFieldValue.Type()\n\tval := reflect.ValueOf(value)\n\tif structFieldType != val.Type() {\n\t\tinvalidTypeError := errors.New(\"Provided value type didn't match obj field type\")\n\t\treturn invalidTypeError\n\t}\n\n\tstructFieldValue.Set(val)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package deletion\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/grafana\/loki\/pkg\/storage\/stores\/indexshipper\/compactor\/deletionmode\"\n\n\t\"github.com\/go-kit\/log\/level\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/common\/model\"\n\n\t\"github.com\/grafana\/loki\/pkg\/storage\/stores\/indexshipper\/compactor\/retention\"\n\tutil_log \"github.com\/grafana\/loki\/pkg\/util\/log\"\n)\n\nconst (\n\tstatusSuccess = \"success\"\n\tstatusFail = \"fail\"\n)\n\ntype userDeleteRequests struct {\n\trequests []DeleteRequest\n\t\/\/ requestsInterval holds the earliest start time and latest end time considering all the delete requests\n\trequestsInterval model.Interval\n}\n\ntype DeleteRequestsManager struct {\n\tdeleteRequestsStore DeleteRequestsStore\n\tdeleteRequestCancelPeriod time.Duration\n\n\tdeleteRequestsToProcess map[string]*userDeleteRequests\n\tchunkIntervalsToRetain []retention.IntervalFilter\n\t\/\/ WARN: If by any chance we change deleteRequestsToProcessMtx to sync.RWMutex to be able to check multiple chunks at a time,\n\t\/\/ please take care of chunkIntervalsToRetain which should be unique per chunk.\n\tdeleteRequestsToProcessMtx sync.Mutex\n\tmetrics *deleteRequestsManagerMetrics\n\twg sync.WaitGroup\n\tdone chan struct{}\n\tbatchSize int\n\tlimits Limits\n}\n\nfunc NewDeleteRequestsManager(store DeleteRequestsStore, deleteRequestCancelPeriod time.Duration, batchSize int, limits Limits, registerer prometheus.Registerer) *DeleteRequestsManager {\n\tdm := &DeleteRequestsManager{\n\t\tdeleteRequestsStore: store,\n\t\tdeleteRequestCancelPeriod: deleteRequestCancelPeriod,\n\t\tdeleteRequestsToProcess: map[string]*userDeleteRequests{},\n\t\tmetrics: newDeleteRequestsManagerMetrics(registerer),\n\t\tdone: make(chan struct{}),\n\t\tbatchSize: batchSize,\n\t\tlimits: limits,\n\t}\n\n\tgo dm.loop()\n\n\treturn dm\n}\n\nfunc (d *DeleteRequestsManager) loop() {\n\tticker := time.NewTicker(5 * time.Minute)\n\tdefer ticker.Stop()\n\n\td.wg.Add(1)\n\tdefer d.wg.Done()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif err := d.updateMetrics(); err != nil {\n\t\t\t\tlevel.Error(util_log.Logger).Log(\"msg\", \"failed to update metrics\", \"err\", err)\n\t\t\t}\n\t\tcase <-d.done:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (d *DeleteRequestsManager) Stop() {\n\tclose(d.done)\n\td.wg.Wait()\n}\n\nfunc (d *DeleteRequestsManager) updateMetrics() error {\n\tdeleteRequests, err := d.deleteRequestsStore.GetDeleteRequestsByStatus(context.Background(), StatusReceived)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpendingDeleteRequestsCount := 0\n\toldestPendingRequestCreatedAt := model.Time(0)\n\n\tfor _, deleteRequest := range deleteRequests {\n\t\t\/\/ adding an extra minute here to avoid a race between cancellation of request and picking up the request for processing\n\t\tif deleteRequest.Status != StatusReceived || deleteRequest.CreatedAt.Add(d.deleteRequestCancelPeriod).Add(time.Minute).After(model.Now()) {\n\t\t\tcontinue\n\t\t}\n\n\t\tpendingDeleteRequestsCount++\n\t\tif oldestPendingRequestCreatedAt == 0 || deleteRequest.CreatedAt.Before(oldestPendingRequestCreatedAt) {\n\t\t\toldestPendingRequestCreatedAt = deleteRequest.CreatedAt\n\t\t}\n\t}\n\n\t\/\/ track age of oldest delete request since they became eligible for processing\n\toldestPendingRequestAge := time.Duration(0)\n\tif oldestPendingRequestCreatedAt != 0 {\n\t\toldestPendingRequestAge = model.Now().Sub(oldestPendingRequestCreatedAt.Add(d.deleteRequestCancelPeriod))\n\t}\n\td.metrics.oldestPendingDeleteRequestAgeSeconds.Set(float64(oldestPendingRequestAge \/ time.Second))\n\td.metrics.pendingDeleteRequestsCount.Set(float64(pendingDeleteRequestsCount))\n\n\treturn nil\n}\n\nfunc (d *DeleteRequestsManager) loadDeleteRequestsToProcess() error {\n\td.deleteRequestsToProcessMtx.Lock()\n\tdefer d.deleteRequestsToProcessMtx.Unlock()\n\n\t\/\/ Reset this first so any errors result in a clear map\n\td.deleteRequestsToProcess = map[string]*userDeleteRequests{}\n\n\tdeleteRequests, err := d.filteredSortedDeleteRequests()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i, deleteRequest := range deleteRequests {\n\t\tif i >= d.batchSize {\n\t\t\tlogBatchTruncation(i, len(deleteRequests))\n\t\t\tbreak\n\t\t}\n\n\t\tlevel.Info(util_log.Logger).Log(\n\t\t\t\"msg\", \"Started processing delete request for user\",\n\t\t\t\"delete_request_id\", deleteRequest.RequestID,\n\t\t\t\"user\", deleteRequest.UserID,\n\t\t)\n\n\t\tdeleteRequest.Metrics = d.metrics\n\n\t\tur := d.requestsForUser(deleteRequest)\n\t\tur.requests = append(ur.requests, deleteRequest)\n\t\tif deleteRequest.StartTime < ur.requestsInterval.Start {\n\t\t\tur.requestsInterval.Start = deleteRequest.StartTime\n\t\t}\n\t\tif deleteRequest.EndTime > ur.requestsInterval.End {\n\t\t\tur.requestsInterval.End = deleteRequest.EndTime\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (d *DeleteRequestsManager) filteredSortedDeleteRequests() ([]DeleteRequest, error) {\n\tdeleteRequests, err := d.deleteRequestsStore.GetDeleteRequestsByStatus(context.Background(), StatusReceived)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdeleteRequests, err = d.filteredRequests(deleteRequests)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsort.Slice(deleteRequests, func(i, j int) bool {\n\t\treturn deleteRequests[i].StartTime < deleteRequests[j].StartTime\n\t})\n\n\treturn deleteRequests, nil\n}\n\nfunc (d *DeleteRequestsManager) filteredRequests(reqs []DeleteRequest) ([]DeleteRequest, error) {\n\tfiltered := make([]DeleteRequest, 0, len(reqs))\n\tfor _, deleteRequest := range reqs {\n\t\t\/\/ adding an extra minute here to avoid a race between cancellation of request and picking up the request for processing\n\t\tif deleteRequest.CreatedAt.Add(d.deleteRequestCancelPeriod).Add(time.Minute).After(model.Now()) {\n\t\t\tcontinue\n\t\t}\n\n\t\tprocessRequest, err := d.shouldProcessRequest(deleteRequest)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif !processRequest {\n\t\t\tcontinue\n\t\t}\n\n\t\tfiltered = append(filtered, deleteRequest)\n\t}\n\n\treturn filtered, nil\n}\n\nfunc (d *DeleteRequestsManager) requestsForUser(dr DeleteRequest) *userDeleteRequests {\n\tur, ok := d.deleteRequestsToProcess[dr.UserID]\n\tif !ok {\n\t\tur = &userDeleteRequests{\n\t\t\trequestsInterval: model.Interval{\n\t\t\t\tStart: dr.StartTime,\n\t\t\t\tEnd: dr.EndTime,\n\t\t\t},\n\t\t}\n\t\td.deleteRequestsToProcess[dr.UserID] = ur\n\t}\n\treturn ur\n}\n\nfunc logBatchTruncation(size, total int) {\n\tif size < total {\n\t\tlevel.Info(util_log.Logger).Log(\n\t\t\t\"msg\", fmt.Sprintf(\"Processing %d of %d delete requests. More requests will be processed in subsequent compactions\", size, total),\n\t\t)\n\t}\n}\n\nfunc (d *DeleteRequestsManager) shouldProcessRequest(dr DeleteRequest) (bool, error) {\n\tmode, err := deleteModeFromLimits(d.limits, dr.UserID)\n\tif err != nil {\n\t\tlevel.Error(util_log.Logger).Log(\n\t\t\t\"msg\", \"unable to determine deletion mode for user\",\n\t\t\t\"user\", dr.UserID,\n\t\t)\n\t\treturn false, err\n\t}\n\n\treturn mode == deletionmode.FilterAndDelete, nil\n}\n\nfunc (d *DeleteRequestsManager) Expired(ref retention.ChunkEntry, _ model.Time) (bool, []retention.IntervalFilter) {\n\td.deleteRequestsToProcessMtx.Lock()\n\tdefer d.deleteRequestsToProcessMtx.Unlock()\n\n\tuserIDStr := unsafeGetString(ref.UserID)\n\tif d.deleteRequestsToProcess[userIDStr] == nil || !intervalsOverlap(d.deleteRequestsToProcess[userIDStr].requestsInterval, model.Interval{\n\t\tStart: ref.From,\n\t\tEnd: ref.Through,\n\t}) {\n\t\treturn false, nil\n\t}\n\n\td.chunkIntervalsToRetain = d.chunkIntervalsToRetain[:0]\n\td.chunkIntervalsToRetain = append(d.chunkIntervalsToRetain, retention.IntervalFilter{\n\t\tInterval: model.Interval{\n\t\t\tStart: ref.From,\n\t\t\tEnd: ref.Through,\n\t\t},\n\t})\n\n\tfor _, deleteRequest := range d.deleteRequestsToProcess[userIDStr].requests {\n\t\trebuiltIntervals := make([]retention.IntervalFilter, 0, len(d.chunkIntervalsToRetain))\n\t\tfor _, ivf := range d.chunkIntervalsToRetain {\n\t\t\tentry := ref\n\t\t\tentry.From = ivf.Interval.Start\n\t\t\tentry.Through = ivf.Interval.End\n\t\t\tisDeleted, newIntervalsToRetain := deleteRequest.IsDeleted(entry)\n\t\t\tif !isDeleted {\n\t\t\t\trebuiltIntervals = append(rebuiltIntervals, ivf)\n\t\t\t} else {\n\t\t\t\trebuiltIntervals = append(rebuiltIntervals, newIntervalsToRetain...)\n\t\t\t}\n\t\t}\n\n\t\td.chunkIntervalsToRetain = rebuiltIntervals\n\t\tif len(d.chunkIntervalsToRetain) == 0 {\n\t\t\tlevel.Info(util_log.Logger).Log(\n\t\t\t\t\"msg\", \"no chunks to retain: the whole chunk is deleted\",\n\t\t\t\t\"delete_request_id\", deleteRequest.RequestID,\n\t\t\t\t\"user\", deleteRequest.UserID,\n\t\t\t\t\"chunkID\", string(ref.ChunkID),\n\t\t\t)\n\t\t\td.metrics.deleteRequestsChunksSelectedTotal.WithLabelValues(string(ref.UserID)).Inc()\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\tif len(d.chunkIntervalsToRetain) == 1 && d.chunkIntervalsToRetain[0].Interval.Start == ref.From && d.chunkIntervalsToRetain[0].Interval.End == ref.Through {\n\t\treturn false, nil\n\t}\n\n\td.metrics.deleteRequestsChunksSelectedTotal.WithLabelValues(string(ref.UserID)).Inc()\n\treturn true, d.chunkIntervalsToRetain\n}\n\nfunc (d *DeleteRequestsManager) MarkPhaseStarted() {\n\tstatus := statusSuccess\n\tif err := d.loadDeleteRequestsToProcess(); err != nil {\n\t\tstatus = statusFail\n\t\tlevel.Error(util_log.Logger).Log(\"msg\", \"failed to load delete requests to process\", \"err\", err)\n\t}\n\td.metrics.loadPendingRequestsAttemptsTotal.WithLabelValues(status).Inc()\n}\n\nfunc (d *DeleteRequestsManager) MarkPhaseFailed() {\n\td.deleteRequestsToProcessMtx.Lock()\n\tdefer d.deleteRequestsToProcessMtx.Unlock()\n\n\td.metrics.deletionFailures.WithLabelValues(\"error\").Inc()\n\td.deleteRequestsToProcess = map[string]*userDeleteRequests{}\n}\n\nfunc (d *DeleteRequestsManager) MarkPhaseTimedOut() {\n\td.deleteRequestsToProcessMtx.Lock()\n\tdefer d.deleteRequestsToProcessMtx.Unlock()\n\n\td.metrics.deletionFailures.WithLabelValues(\"timeout\").Inc()\n\td.deleteRequestsToProcess = map[string]*userDeleteRequests{}\n}\n\nfunc (d *DeleteRequestsManager) MarkPhaseFinished() {\n\td.deleteRequestsToProcessMtx.Lock()\n\tdefer d.deleteRequestsToProcessMtx.Unlock()\n\n\tfor _, userDeleteRequests := range d.deleteRequestsToProcess {\n\t\tif userDeleteRequests == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, deleteRequest := range userDeleteRequests.requests {\n\t\t\tif err := d.deleteRequestsStore.UpdateStatus(context.Background(), deleteRequest, StatusProcessed); err != nil {\n\t\t\t\tlevel.Error(util_log.Logger).Log(\n\t\t\t\t\t\"msg\", \"failed to mark delete request for user as processed\",\n\t\t\t\t\t\"delete_request_id\", deleteRequest.RequestID,\n\t\t\t\t\t\"user\", deleteRequest.UserID,\n\t\t\t\t\t\"err\", err,\n\t\t\t\t\t\"deleted_lines\", deleteRequest.DeletedLines,\n\t\t\t\t)\n\t\t\t} else {\n\t\t\t\tlevel.Info(util_log.Logger).Log(\n\t\t\t\t\t\"msg\", \"delete request for user marked as processed\",\n\t\t\t\t\t\"delete_request_id\", deleteRequest.RequestID,\n\t\t\t\t\t\"user\", deleteRequest.UserID,\n\t\t\t\t\t\"deleted_lines\", deleteRequest.DeletedLines,\n\t\t\t\t)\n\t\t\t}\n\t\t\td.metrics.deleteRequestsProcessedTotal.WithLabelValues(deleteRequest.UserID).Inc()\n\t\t}\n\t}\n}\n\nfunc (d *DeleteRequestsManager) IntervalMayHaveExpiredChunks(_ model.Interval, userID string) bool {\n\td.deleteRequestsToProcessMtx.Lock()\n\tdefer d.deleteRequestsToProcessMtx.Unlock()\n\n\t\/\/ We can't do the overlap check between the passed interval and delete requests interval from a user because\n\t\/\/ if a request is issued just for today and there are chunks spanning today and yesterday then\n\t\/\/ the overlap check would skip processing yesterday's index which would result in the index pointing to deleted chunks.\n\tif userID != \"\" {\n\t\treturn d.deleteRequestsToProcess[userID] != nil\n\t}\n\n\treturn len(d.deleteRequestsToProcess) != 0\n}\n\nfunc (d *DeleteRequestsManager) DropFromIndex(_ retention.ChunkEntry, _ model.Time, _ model.Time) bool {\n\treturn false\n}\n<commit_msg>Add sequence num to log output for delete requests (#7087)<commit_after>package deletion\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/grafana\/loki\/pkg\/storage\/stores\/indexshipper\/compactor\/deletionmode\"\n\n\t\"github.com\/go-kit\/log\/level\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/common\/model\"\n\n\t\"github.com\/grafana\/loki\/pkg\/storage\/stores\/indexshipper\/compactor\/retention\"\n\tutil_log \"github.com\/grafana\/loki\/pkg\/util\/log\"\n)\n\nconst (\n\tstatusSuccess = \"success\"\n\tstatusFail = \"fail\"\n)\n\ntype userDeleteRequests struct {\n\trequests []DeleteRequest\n\t\/\/ requestsInterval holds the earliest start time and latest end time considering all the delete requests\n\trequestsInterval model.Interval\n}\n\ntype DeleteRequestsManager struct {\n\tdeleteRequestsStore DeleteRequestsStore\n\tdeleteRequestCancelPeriod time.Duration\n\n\tdeleteRequestsToProcess map[string]*userDeleteRequests\n\tchunkIntervalsToRetain []retention.IntervalFilter\n\t\/\/ WARN: If by any chance we change deleteRequestsToProcessMtx to sync.RWMutex to be able to check multiple chunks at a time,\n\t\/\/ please take care of chunkIntervalsToRetain which should be unique per chunk.\n\tdeleteRequestsToProcessMtx sync.Mutex\n\tmetrics *deleteRequestsManagerMetrics\n\twg sync.WaitGroup\n\tdone chan struct{}\n\tbatchSize int\n\tlimits Limits\n}\n\nfunc NewDeleteRequestsManager(store DeleteRequestsStore, deleteRequestCancelPeriod time.Duration, batchSize int, limits Limits, registerer prometheus.Registerer) *DeleteRequestsManager {\n\tdm := &DeleteRequestsManager{\n\t\tdeleteRequestsStore: store,\n\t\tdeleteRequestCancelPeriod: deleteRequestCancelPeriod,\n\t\tdeleteRequestsToProcess: map[string]*userDeleteRequests{},\n\t\tmetrics: newDeleteRequestsManagerMetrics(registerer),\n\t\tdone: make(chan struct{}),\n\t\tbatchSize: batchSize,\n\t\tlimits: limits,\n\t}\n\n\tgo dm.loop()\n\n\treturn dm\n}\n\nfunc (d *DeleteRequestsManager) loop() {\n\tticker := time.NewTicker(5 * time.Minute)\n\tdefer ticker.Stop()\n\n\td.wg.Add(1)\n\tdefer d.wg.Done()\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif err := d.updateMetrics(); err != nil {\n\t\t\t\tlevel.Error(util_log.Logger).Log(\"msg\", \"failed to update metrics\", \"err\", err)\n\t\t\t}\n\t\tcase <-d.done:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (d *DeleteRequestsManager) Stop() {\n\tclose(d.done)\n\td.wg.Wait()\n}\n\nfunc (d *DeleteRequestsManager) updateMetrics() error {\n\tdeleteRequests, err := d.deleteRequestsStore.GetDeleteRequestsByStatus(context.Background(), StatusReceived)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpendingDeleteRequestsCount := 0\n\toldestPendingRequestCreatedAt := model.Time(0)\n\n\tfor _, deleteRequest := range deleteRequests {\n\t\t\/\/ adding an extra minute here to avoid a race between cancellation of request and picking up the request for processing\n\t\tif deleteRequest.Status != StatusReceived || deleteRequest.CreatedAt.Add(d.deleteRequestCancelPeriod).Add(time.Minute).After(model.Now()) {\n\t\t\tcontinue\n\t\t}\n\n\t\tpendingDeleteRequestsCount++\n\t\tif oldestPendingRequestCreatedAt == 0 || deleteRequest.CreatedAt.Before(oldestPendingRequestCreatedAt) {\n\t\t\toldestPendingRequestCreatedAt = deleteRequest.CreatedAt\n\t\t}\n\t}\n\n\t\/\/ track age of oldest delete request since they became eligible for processing\n\toldestPendingRequestAge := time.Duration(0)\n\tif oldestPendingRequestCreatedAt != 0 {\n\t\toldestPendingRequestAge = model.Now().Sub(oldestPendingRequestCreatedAt.Add(d.deleteRequestCancelPeriod))\n\t}\n\td.metrics.oldestPendingDeleteRequestAgeSeconds.Set(float64(oldestPendingRequestAge \/ time.Second))\n\td.metrics.pendingDeleteRequestsCount.Set(float64(pendingDeleteRequestsCount))\n\n\treturn nil\n}\n\nfunc (d *DeleteRequestsManager) loadDeleteRequestsToProcess() error {\n\td.deleteRequestsToProcessMtx.Lock()\n\tdefer d.deleteRequestsToProcessMtx.Unlock()\n\n\t\/\/ Reset this first so any errors result in a clear map\n\td.deleteRequestsToProcess = map[string]*userDeleteRequests{}\n\n\tdeleteRequests, err := d.filteredSortedDeleteRequests()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i, deleteRequest := range deleteRequests {\n\t\tif i >= d.batchSize {\n\t\t\tlogBatchTruncation(i, len(deleteRequests))\n\t\t\tbreak\n\t\t}\n\n\t\tlevel.Info(util_log.Logger).Log(\n\t\t\t\"msg\", \"Started processing delete request for user\",\n\t\t\t\"delete_request_id\", deleteRequest.RequestID,\n\t\t\t\"user\", deleteRequest.UserID,\n\t\t)\n\n\t\tdeleteRequest.Metrics = d.metrics\n\n\t\tur := d.requestsForUser(deleteRequest)\n\t\tur.requests = append(ur.requests, deleteRequest)\n\t\tif deleteRequest.StartTime < ur.requestsInterval.Start {\n\t\t\tur.requestsInterval.Start = deleteRequest.StartTime\n\t\t}\n\t\tif deleteRequest.EndTime > ur.requestsInterval.End {\n\t\t\tur.requestsInterval.End = deleteRequest.EndTime\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (d *DeleteRequestsManager) filteredSortedDeleteRequests() ([]DeleteRequest, error) {\n\tdeleteRequests, err := d.deleteRequestsStore.GetDeleteRequestsByStatus(context.Background(), StatusReceived)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdeleteRequests, err = d.filteredRequests(deleteRequests)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsort.Slice(deleteRequests, func(i, j int) bool {\n\t\treturn deleteRequests[i].StartTime < deleteRequests[j].StartTime\n\t})\n\n\treturn deleteRequests, nil\n}\n\nfunc (d *DeleteRequestsManager) filteredRequests(reqs []DeleteRequest) ([]DeleteRequest, error) {\n\tfiltered := make([]DeleteRequest, 0, len(reqs))\n\tfor _, deleteRequest := range reqs {\n\t\t\/\/ adding an extra minute here to avoid a race between cancellation of request and picking up the request for processing\n\t\tif deleteRequest.CreatedAt.Add(d.deleteRequestCancelPeriod).Add(time.Minute).After(model.Now()) {\n\t\t\tcontinue\n\t\t}\n\n\t\tprocessRequest, err := d.shouldProcessRequest(deleteRequest)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif !processRequest {\n\t\t\tcontinue\n\t\t}\n\n\t\tfiltered = append(filtered, deleteRequest)\n\t}\n\n\treturn filtered, nil\n}\n\nfunc (d *DeleteRequestsManager) requestsForUser(dr DeleteRequest) *userDeleteRequests {\n\tur, ok := d.deleteRequestsToProcess[dr.UserID]\n\tif !ok {\n\t\tur = &userDeleteRequests{\n\t\t\trequestsInterval: model.Interval{\n\t\t\t\tStart: dr.StartTime,\n\t\t\t\tEnd: dr.EndTime,\n\t\t\t},\n\t\t}\n\t\td.deleteRequestsToProcess[dr.UserID] = ur\n\t}\n\treturn ur\n}\n\nfunc logBatchTruncation(size, total int) {\n\tif size < total {\n\t\tlevel.Info(util_log.Logger).Log(\n\t\t\t\"msg\", fmt.Sprintf(\"Processing %d of %d delete requests. More requests will be processed in subsequent compactions\", size, total),\n\t\t)\n\t}\n}\n\nfunc (d *DeleteRequestsManager) shouldProcessRequest(dr DeleteRequest) (bool, error) {\n\tmode, err := deleteModeFromLimits(d.limits, dr.UserID)\n\tif err != nil {\n\t\tlevel.Error(util_log.Logger).Log(\n\t\t\t\"msg\", \"unable to determine deletion mode for user\",\n\t\t\t\"user\", dr.UserID,\n\t\t)\n\t\treturn false, err\n\t}\n\n\treturn mode == deletionmode.FilterAndDelete, nil\n}\n\nfunc (d *DeleteRequestsManager) Expired(ref retention.ChunkEntry, _ model.Time) (bool, []retention.IntervalFilter) {\n\td.deleteRequestsToProcessMtx.Lock()\n\tdefer d.deleteRequestsToProcessMtx.Unlock()\n\n\tuserIDStr := unsafeGetString(ref.UserID)\n\tif d.deleteRequestsToProcess[userIDStr] == nil || !intervalsOverlap(d.deleteRequestsToProcess[userIDStr].requestsInterval, model.Interval{\n\t\tStart: ref.From,\n\t\tEnd: ref.Through,\n\t}) {\n\t\treturn false, nil\n\t}\n\n\td.chunkIntervalsToRetain = d.chunkIntervalsToRetain[:0]\n\td.chunkIntervalsToRetain = append(d.chunkIntervalsToRetain, retention.IntervalFilter{\n\t\tInterval: model.Interval{\n\t\t\tStart: ref.From,\n\t\t\tEnd: ref.Through,\n\t\t},\n\t})\n\n\tfor _, deleteRequest := range d.deleteRequestsToProcess[userIDStr].requests {\n\t\trebuiltIntervals := make([]retention.IntervalFilter, 0, len(d.chunkIntervalsToRetain))\n\t\tfor _, ivf := range d.chunkIntervalsToRetain {\n\t\t\tentry := ref\n\t\t\tentry.From = ivf.Interval.Start\n\t\t\tentry.Through = ivf.Interval.End\n\t\t\tisDeleted, newIntervalsToRetain := deleteRequest.IsDeleted(entry)\n\t\t\tif !isDeleted {\n\t\t\t\trebuiltIntervals = append(rebuiltIntervals, ivf)\n\t\t\t} else {\n\t\t\t\trebuiltIntervals = append(rebuiltIntervals, newIntervalsToRetain...)\n\t\t\t}\n\t\t}\n\n\t\td.chunkIntervalsToRetain = rebuiltIntervals\n\t\tif len(d.chunkIntervalsToRetain) == 0 {\n\t\t\tlevel.Info(util_log.Logger).Log(\n\t\t\t\t\"msg\", \"no chunks to retain: the whole chunk is deleted\",\n\t\t\t\t\"delete_request_id\", deleteRequest.RequestID,\n\t\t\t\t\"sequence_num\", deleteRequest.SequenceNum,\n\t\t\t\t\"user\", deleteRequest.UserID,\n\t\t\t\t\"chunkID\", string(ref.ChunkID),\n\t\t\t)\n\t\t\td.metrics.deleteRequestsChunksSelectedTotal.WithLabelValues(string(ref.UserID)).Inc()\n\t\t\treturn true, nil\n\t\t}\n\t}\n\n\tif len(d.chunkIntervalsToRetain) == 1 && d.chunkIntervalsToRetain[0].Interval.Start == ref.From && d.chunkIntervalsToRetain[0].Interval.End == ref.Through {\n\t\treturn false, nil\n\t}\n\n\td.metrics.deleteRequestsChunksSelectedTotal.WithLabelValues(string(ref.UserID)).Inc()\n\treturn true, d.chunkIntervalsToRetain\n}\n\nfunc (d *DeleteRequestsManager) MarkPhaseStarted() {\n\tstatus := statusSuccess\n\tif err := d.loadDeleteRequestsToProcess(); err != nil {\n\t\tstatus = statusFail\n\t\tlevel.Error(util_log.Logger).Log(\"msg\", \"failed to load delete requests to process\", \"err\", err)\n\t}\n\td.metrics.loadPendingRequestsAttemptsTotal.WithLabelValues(status).Inc()\n}\n\nfunc (d *DeleteRequestsManager) MarkPhaseFailed() {\n\td.deleteRequestsToProcessMtx.Lock()\n\tdefer d.deleteRequestsToProcessMtx.Unlock()\n\n\td.metrics.deletionFailures.WithLabelValues(\"error\").Inc()\n\td.deleteRequestsToProcess = map[string]*userDeleteRequests{}\n}\n\nfunc (d *DeleteRequestsManager) MarkPhaseTimedOut() {\n\td.deleteRequestsToProcessMtx.Lock()\n\tdefer d.deleteRequestsToProcessMtx.Unlock()\n\n\td.metrics.deletionFailures.WithLabelValues(\"timeout\").Inc()\n\td.deleteRequestsToProcess = map[string]*userDeleteRequests{}\n}\n\nfunc (d *DeleteRequestsManager) MarkPhaseFinished() {\n\td.deleteRequestsToProcessMtx.Lock()\n\tdefer d.deleteRequestsToProcessMtx.Unlock()\n\n\tfor _, userDeleteRequests := range d.deleteRequestsToProcess {\n\t\tif userDeleteRequests == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, deleteRequest := range userDeleteRequests.requests {\n\t\t\tif err := d.deleteRequestsStore.UpdateStatus(context.Background(), deleteRequest, StatusProcessed); err != nil {\n\t\t\t\tlevel.Error(util_log.Logger).Log(\n\t\t\t\t\t\"msg\", \"failed to mark delete request for user as processed\",\n\t\t\t\t\t\"delete_request_id\", deleteRequest.RequestID,\n\t\t\t\t\t\"sequence_num\", deleteRequest.SequenceNum,\n\t\t\t\t\t\"user\", deleteRequest.UserID,\n\t\t\t\t\t\"err\", err,\n\t\t\t\t\t\"deleted_lines\", deleteRequest.DeletedLines,\n\t\t\t\t)\n\t\t\t} else {\n\t\t\t\tlevel.Info(util_log.Logger).Log(\n\t\t\t\t\t\"msg\", \"delete request for user marked as processed\",\n\t\t\t\t\t\"delete_request_id\", deleteRequest.RequestID,\n\t\t\t\t\t\"sequence_num\", deleteRequest.SequenceNum,\n\t\t\t\t\t\"user\", deleteRequest.UserID,\n\t\t\t\t\t\"deleted_lines\", deleteRequest.DeletedLines,\n\t\t\t\t)\n\t\t\t}\n\t\t\td.metrics.deleteRequestsProcessedTotal.WithLabelValues(deleteRequest.UserID).Inc()\n\t\t}\n\t}\n}\n\nfunc (d *DeleteRequestsManager) IntervalMayHaveExpiredChunks(_ model.Interval, userID string) bool {\n\td.deleteRequestsToProcessMtx.Lock()\n\tdefer d.deleteRequestsToProcessMtx.Unlock()\n\n\t\/\/ We can't do the overlap check between the passed interval and delete requests interval from a user because\n\t\/\/ if a request is issued just for today and there are chunks spanning today and yesterday then\n\t\/\/ the overlap check would skip processing yesterday's index which would result in the index pointing to deleted chunks.\n\tif userID != \"\" {\n\t\treturn d.deleteRequestsToProcess[userID] != nil\n\t}\n\n\treturn len(d.deleteRequestsToProcess) != 0\n}\n\nfunc (d *DeleteRequestsManager) DropFromIndex(_ retention.ChunkEntry, _ model.Time, _ model.Time) bool {\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package ufop\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tHTML2IMAGE_MAX_PAGE_SIZE = 10 * 1024 * 1024\n)\n\ntype Html2Imager struct {\n\tmaxPageSize int64\n}\n\ntype Html2ImageOptions struct {\n\tCropH int\n\tCropW int\n\tCropX int\n\tCropY int\n\tFormat string\n\tHeight int\n\tWidth int\n\tQuality int\n\tForce bool\n}\n\nfunc (this *Html2Imager) parse(cmd string) (options *Html2ImageOptions, err error) {\n\tpattern := `^html2image(\/croph\/\\d+|\/cropw\/\\d+|\/cropx\/\\d+|\/cropy\/\\d+|\/format\/(png|jpg|jpeg)|\/height\/\\d+|\/quality\/\\d+|\/width\/\\d+|\/force\/[0|1]){0,9}$`\n\tmatched, _ := regexp.Match(pattern, []byte(cmd))\n\tif !matched {\n\t\terr = errors.New(\"invalid html2image command format\")\n\t\treturn\n\t}\n\n\toptions = &Html2ImageOptions{\n\t\tFormat: \"jpg\",\n\t}\n\n\t\/\/croph\n\tcropHStr := getParam(cmd, `croph\/\\d+`, \"croph\")\n\tif cropHStr != \"\" {\n\t\tcropH, _ := strconv.Atoi(cropHStr)\n\t\tif cropH <= 0 {\n\t\t\terr = errors.New(\"invalid html2image parameter 'croph'\")\n\t\t\treturn\n\t\t} else {\n\t\t\toptions.CropH = cropH\n\t\t}\n\t}\n\n\t\/\/cropw\n\tcropWStr := getParam(cmd, `cropw\/\\d+`, \"cropw\")\n\tif cropWStr != \"\" {\n\t\tcropW, _ := strconv.Atoi(cropWStr)\n\t\tif cropW <= 0 {\n\t\t\terr = errors.New(\"invalid html2image parameter 'cropw'\")\n\t\t\treturn\n\t\t} else {\n\t\t\toptions.CropW = cropW\n\t\t}\n\t}\n\n\t\/\/cropx\n\tcropXStr := getParam(cmd, `cropx\/\\d+`, \"cropx\")\n\tfmt.Println(cropXStr)\n\tif cropXStr != \"\" {\n\t\tcropX, _ := strconv.Atoi(cropXStr)\n\t\tif cropX <= 0 {\n\t\t\terr = errors.New(\"invalid html2image parameter 'cropx'\")\n\t\t\treturn\n\t\t} else {\n\t\t\toptions.CropX = cropX\n\t\t}\n\t}\n\n\t\/\/cropy\n\tcropYStr := getParam(cmd, `cropy\/\\d+`, \"cropy\")\n\tif cropYStr != \"\" {\n\t\tcropY, _ := strconv.Atoi(cropYStr)\n\t\tif cropY <= 0 {\n\t\t\terr = errors.New(\"invalid html2image parameter 'cropy'\")\n\t\t\treturn\n\t\t} else {\n\t\t\toptions.CropY = cropY\n\t\t}\n\t}\n\n\t\/\/format\n\tformatStr := getParam(cmd, \"format\/(png|jpg|jpeg)\", \"format\")\n\tif formatStr != \"\" {\n\t\toptions.Format = formatStr\n\t}\n\n\t\/\/height\n\theightStr := getParam(cmd, `height\/\\d+`, \"height\")\n\tif heightStr != \"\" {\n\t\theight, _ := strconv.Atoi(heightStr)\n\t\tif height <= 0 {\n\t\t\terr = errors.New(\"invalid html2image parameter 'height'\")\n\t\t\treturn\n\t\t} else {\n\t\t\toptions.Height = height\n\t\t}\n\t}\n\n\t\/\/width\n\twidthStr := getParam(cmd, `width\/\\d+`, \"width\")\n\tif widthStr != \"\" {\n\t\twidth, _ := strconv.Atoi(widthStr)\n\t\tif width <= 0 {\n\t\t\terr = errors.New(\"invalid html2image parameter 'width'\")\n\t\t\treturn\n\t\t} else {\n\t\t\toptions.Width = width\n\t\t}\n\t}\n\n\t\/\/quality\n\tqualityStr := getParam(cmd, `quality\/\\d+`, \"quality\")\n\tif qualityStr != \"\" {\n\t\tquality, _ := strconv.Atoi(qualityStr)\n\t\tif quality > 100 || quality <= 0 {\n\t\t\terr = errors.New(\"invalid html2image parameter 'quality'\")\n\t\t\treturn\n\t\t} else {\n\t\t\toptions.Quality = quality\n\t\t}\n\t}\n\n\t\/\/force\n\tforceStr := getParam(cmd, \"force\/[0|1]\", \"force\")\n\tif forceStr != \"\" {\n\t\tforce, _ := strconv.Atoi(forceStr)\n\t\tif force == 1 {\n\t\t\toptions.Force = true\n\t\t}\n\t}\n\n\treturn\n\n}\n\nfunc (this *Html2Imager) Do(req UfopRequest) (result interface{}, contentType string, err error) {\n\tif this.maxPageSize <= 0 {\n\t\tthis.maxPageSize = HTML2IMAGE_MAX_PAGE_SIZE\n\t}\n\n\t\/\/if not text format, error it\n\tif !strings.HasPrefix(req.Src.MimeType, \"text\/\") {\n\t\terr = errors.New(\"unsupported file mime type, only text\/* allowed\")\n\t\treturn\n\t}\n\n\t\/\/if file size exceeds, error it\n\tif req.Src.Fsize > this.maxPageSize {\n\t\terr = errors.New(\"page file length exceeds the limit\")\n\t\treturn\n\t}\n\n\toptions, pErr := this.parse(req.Cmd)\n\tif pErr != nil {\n\t\terr = pErr\n\t\treturn\n\t}\n\n\t\/\/get page file content save it into temp dir\n\tresp, respErr := http.Get(req.Src.Url)\n\tif respErr != nil || resp.StatusCode != 200 {\n\t\tif respErr != nil {\n\t\t\terr = errors.New(fmt.Sprintf(\"retrieve page file resource data failed, %s\", respErr.Error()))\n\t\t} else {\n\t\t\terr = errors.New(fmt.Sprintf(\"retrieve page file resource data failed, %s\", resp.Status))\n\t\t\tif resp.Body != nil {\n\t\t\t\tresp.Body.Close()\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\tjobPrefix := md5Hex(req.Src.Url)\n\n\tpageSuffix := \"txt\"\n\tif req.Src.MimeType == \"text\/html\" {\n\t\tpageSuffix = \"html\"\n\t}\n\n\tlocalPageTmpFname := fmt.Sprintf(\"%s%d.page.%s\", jobPrefix, time.Now().UnixNano(), pageSuffix)\n\tlocalPageTmpFpath := filepath.Join(os.TempDir(), localPageTmpFname)\n\tdefer os.Remove(localPageTmpFpath)\n\n\tlocalPageTmpFp, openErr := os.OpenFile(localPageTmpFpath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0655)\n\tif openErr != nil {\n\t\terr = errors.New(fmt.Sprintf(\"open page file temp file failed, %s\", openErr.Error()))\n\t\treturn\n\t}\n\t_, cpErr := io.Copy(localPageTmpFp, resp.Body)\n\tif cpErr != nil {\n\t\terr = errors.New(fmt.Sprintf(\"save page file content to tmp file failed, %s\", cpErr.Error()))\n\t\treturn\n\t}\n\n\tlocalPageTmpFp.Close()\n\tresp.Body.Close()\n\n\t\/\/prepare command\n\tcmdParams := make([]string, 0)\n\tcmdParams = append(cmdParams, \"-q\")\n\n\tif options.CropH > 0 {\n\t\tcmdParams = append(cmdParams, \"--crop-h\", fmt.Sprintf(\"%d\", options.CropH))\n\t}\n\n\tif options.CropW > 0 {\n\t\tcmdParams = append(cmdParams, \"--crop-w\", fmt.Sprintf(\"%s\", options.CropW))\n\t}\n\n\tif options.CropX > 0 {\n\t\tcmdParams = append(cmdParams, \"--crop-x\", fmt.Sprintf(\"%d\", options.CropX))\n\t}\n\n\tif options.CropY > 0 {\n\t\tcmdParams = append(cmdParams, \"--crop-y\", fmt.Sprintf(\"%d\", options.CropY))\n\t}\n\n\tif options.Format != \"\" {\n\t\tcmdParams = append(cmdParams, \"--format\", options.Format)\n\t}\n\n\tif options.Quality > 0 {\n\t\tcmdParams = append(cmdParams, \"--quality\", fmt.Sprintf(\"%d\", options.Quality))\n\t}\n\n\tif options.Height > 0 {\n\t\tcmdParams = append(cmdParams, \"--height\", fmt.Sprintf(\"%d\", options.Height))\n\t}\n\n\tif options.Width > 0 {\n\t\tcmdParams = append(cmdParams, \"--width\", fmt.Sprintf(\"%d\", options.Width))\n\t}\n\n\tif options.Force {\n\t\tcmdParams = append(cmdParams, \"--disable-smart-width\")\n\t}\n\n\t\/\/result tmp file\n\tresultTmpFname := fmt.Sprintf(\"%s%d.result.%s\", jobPrefix, time.Now().UnixNano(), options.Format)\n\tresultTmpFpath := filepath.Join(os.TempDir(), resultTmpFname)\n\tdefer os.Remove(resultTmpFpath)\n\n\tcmdParams = append(cmdParams, localPageTmpFpath, resultTmpFpath)\n\n\t\/\/cmd\n\tconvertCmd := exec.Command(\"wkhtmltoimage\", cmdParams...)\n\tlog.Println(convertCmd.Path, convertCmd.Args)\n\n\tstdErrPipe, pipeErr := convertCmd.StderrPipe()\n\tif pipeErr != nil {\n\t\terr = errors.New(fmt.Sprintf(\"open exec stderr pipe error, %s\", pipeErr.Error()))\n\t\treturn\n\t}\n\n\tif startErr := convertCmd.Start(); startErr != nil {\n\t\terr = errors.New(fmt.Sprintf(\"start html2image command error, %s\", startErr.Error()))\n\t\treturn\n\t}\n\n\tstdErrData, readErr := ioutil.ReadAll(stdErrPipe)\n\tif readErr != nil {\n\t\terr = errors.New(fmt.Sprintf(\"read html2image command stderr error, %s\", readErr.Error()))\n\t\treturn\n\t}\n\n\t\/\/check stderr output & output file\n\tif string(stdErrData) != \"\" {\n\t\tlog.Println(string(stdErrData))\n\t}\n\n\tif waitErr := convertCmd.Wait(); waitErr != nil {\n\t\terr = errors.New(fmt.Sprintf(\"wait html2image to exit error, %s\", waitErr.Error()))\n\t\treturn\n\t}\n\n\tif _, statErr := os.Stat(resultTmpFpath); statErr == nil {\n\t\toTmpFp, openErr := os.Open(resultTmpFpath)\n\t\tif openErr != nil {\n\t\t\terr = errors.New(fmt.Sprintf(\"open html2image output result error, %s\", openErr.Error()))\n\t\t\treturn\n\t\t}\n\t\tdefer oTmpFp.Close()\n\n\t\toutputBytes, readErr := ioutil.ReadAll(oTmpFp)\n\t\tif readErr != nil {\n\t\t\terr = errors.New(fmt.Sprintf(\"read html2image output result error, %s\", readErr.Error()))\n\t\t\treturn\n\t\t}\n\t\tresult = outputBytes\n\t} else {\n\t\terr = errors.New(\"html2image with no valid output result\")\n\t}\n\n\tif options.Format == \"png\" {\n\t\tcontentType = \"image\/png\"\n\t} else {\n\t\tcontentType = \"image\/jpeg\"\n\t}\n\n\treturn\n}\n<commit_msg>Fix bug in html2image.<commit_after>package ufop\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\tHTML2IMAGE_MAX_PAGE_SIZE = 10 * 1024 * 1024\n)\n\ntype Html2Imager struct {\n\tmaxPageSize int64\n}\n\ntype Html2ImageOptions struct {\n\tCropH int\n\tCropW int\n\tCropX int\n\tCropY int\n\tFormat string\n\tHeight int\n\tWidth int\n\tQuality int\n\tForce bool\n}\n\nfunc (this *Html2Imager) parse(cmd string) (options *Html2ImageOptions, err error) {\n\tpattern := `^html2image(\/croph\/\\d+|\/cropw\/\\d+|\/cropx\/\\d+|\/cropy\/\\d+|\/format\/(png|jpg|jpeg)|\/height\/\\d+|\/quality\/\\d+|\/width\/\\d+|\/force\/[0|1]){0,9}$`\n\tmatched, _ := regexp.Match(pattern, []byte(cmd))\n\tif !matched {\n\t\terr = errors.New(\"invalid html2image command format\")\n\t\treturn\n\t}\n\n\toptions = &Html2ImageOptions{\n\t\tFormat: \"jpg\",\n\t}\n\n\t\/\/croph\n\tcropHStr := getParam(cmd, `croph\/\\d+`, \"croph\")\n\tif cropHStr != \"\" {\n\t\tcropH, _ := strconv.Atoi(cropHStr)\n\t\tif cropH <= 0 {\n\t\t\terr = errors.New(\"invalid html2image parameter 'croph'\")\n\t\t\treturn\n\t\t} else {\n\t\t\toptions.CropH = cropH\n\t\t}\n\t}\n\n\t\/\/cropw\n\tcropWStr := getParam(cmd, `cropw\/\\d+`, \"cropw\")\n\tif cropWStr != \"\" {\n\t\tcropW, _ := strconv.Atoi(cropWStr)\n\t\tif cropW <= 0 {\n\t\t\terr = errors.New(\"invalid html2image parameter 'cropw'\")\n\t\t\treturn\n\t\t} else {\n\t\t\toptions.CropW = cropW\n\t\t}\n\t}\n\n\t\/\/cropx\n\tcropXStr := getParam(cmd, `cropx\/\\d+`, \"cropx\")\n\tfmt.Println(cropXStr)\n\tif cropXStr != \"\" {\n\t\tcropX, _ := strconv.Atoi(cropXStr)\n\t\tif cropX <= 0 {\n\t\t\terr = errors.New(\"invalid html2image parameter 'cropx'\")\n\t\t\treturn\n\t\t} else {\n\t\t\toptions.CropX = cropX\n\t\t}\n\t}\n\n\t\/\/cropy\n\tcropYStr := getParam(cmd, `cropy\/\\d+`, \"cropy\")\n\tif cropYStr != \"\" {\n\t\tcropY, _ := strconv.Atoi(cropYStr)\n\t\tif cropY <= 0 {\n\t\t\terr = errors.New(\"invalid html2image parameter 'cropy'\")\n\t\t\treturn\n\t\t} else {\n\t\t\toptions.CropY = cropY\n\t\t}\n\t}\n\n\t\/\/format\n\tformatStr := getParam(cmd, \"format\/(png|jpg|jpeg)\", \"format\")\n\tif formatStr != \"\" {\n\t\toptions.Format = formatStr\n\t}\n\n\t\/\/height\n\theightStr := getParam(cmd, `height\/\\d+`, \"height\")\n\tif heightStr != \"\" {\n\t\theight, _ := strconv.Atoi(heightStr)\n\t\tif height <= 0 {\n\t\t\terr = errors.New(\"invalid html2image parameter 'height'\")\n\t\t\treturn\n\t\t} else {\n\t\t\toptions.Height = height\n\t\t}\n\t}\n\n\t\/\/width\n\twidthStr := getParam(cmd, `width\/\\d+`, \"width\")\n\tif widthStr != \"\" {\n\t\twidth, _ := strconv.Atoi(widthStr)\n\t\tif width <= 0 {\n\t\t\terr = errors.New(\"invalid html2image parameter 'width'\")\n\t\t\treturn\n\t\t} else {\n\t\t\toptions.Width = width\n\t\t}\n\t}\n\n\t\/\/quality\n\tqualityStr := getParam(cmd, `quality\/\\d+`, \"quality\")\n\tif qualityStr != \"\" {\n\t\tquality, _ := strconv.Atoi(qualityStr)\n\t\tif quality > 100 || quality <= 0 {\n\t\t\terr = errors.New(\"invalid html2image parameter 'quality'\")\n\t\t\treturn\n\t\t} else {\n\t\t\toptions.Quality = quality\n\t\t}\n\t}\n\n\t\/\/force\n\tforceStr := getParam(cmd, \"force\/[0|1]\", \"force\")\n\tif forceStr != \"\" {\n\t\tforce, _ := strconv.Atoi(forceStr)\n\t\tif force == 1 {\n\t\t\toptions.Force = true\n\t\t}\n\t}\n\n\treturn\n\n}\n\nfunc (this *Html2Imager) Do(req UfopRequest) (result interface{}, contentType string, err error) {\n\tif this.maxPageSize <= 0 {\n\t\tthis.maxPageSize = HTML2IMAGE_MAX_PAGE_SIZE\n\t}\n\n\t\/\/if not text format, error it\n\tif !strings.HasPrefix(req.Src.MimeType, \"text\/\") {\n\t\terr = errors.New(\"unsupported file mime type, only text\/* allowed\")\n\t\treturn\n\t}\n\n\t\/\/if file size exceeds, error it\n\tif req.Src.Fsize > this.maxPageSize {\n\t\terr = errors.New(\"page file length exceeds the limit\")\n\t\treturn\n\t}\n\n\toptions, pErr := this.parse(req.Cmd)\n\tif pErr != nil {\n\t\terr = pErr\n\t\treturn\n\t}\n\n\t\/\/get page file content save it into temp dir\n\tresp, respErr := http.Get(req.Src.Url)\n\tif respErr != nil || resp.StatusCode != 200 {\n\t\tif respErr != nil {\n\t\t\terr = errors.New(fmt.Sprintf(\"retrieve page file resource data failed, %s\", respErr.Error()))\n\t\t} else {\n\t\t\terr = errors.New(fmt.Sprintf(\"retrieve page file resource data failed, %s\", resp.Status))\n\t\t\tif resp.Body != nil {\n\t\t\t\tresp.Body.Close()\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\tjobPrefix := md5Hex(req.Src.Url)\n\n\tpageSuffix := \"txt\"\n\tif req.Src.MimeType == \"text\/html\" {\n\t\tpageSuffix = \"html\"\n\t}\n\n\tlocalPageTmpFname := fmt.Sprintf(\"%s%d.page.%s\", jobPrefix, time.Now().UnixNano(), pageSuffix)\n\tlocalPageTmpFpath := filepath.Join(os.TempDir(), localPageTmpFname)\n\tdefer os.Remove(localPageTmpFpath)\n\n\tlocalPageTmpFp, openErr := os.OpenFile(localPageTmpFpath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0655)\n\tif openErr != nil {\n\t\terr = errors.New(fmt.Sprintf(\"open page file temp file failed, %s\", openErr.Error()))\n\t\treturn\n\t}\n\t_, cpErr := io.Copy(localPageTmpFp, resp.Body)\n\tif cpErr != nil {\n\t\terr = errors.New(fmt.Sprintf(\"save page file content to tmp file failed, %s\", cpErr.Error()))\n\t\treturn\n\t}\n\n\tlocalPageTmpFp.Close()\n\tresp.Body.Close()\n\n\t\/\/prepare command\n\tcmdParams := make([]string, 0)\n\tcmdParams = append(cmdParams, \"-q\")\n\n\tif options.CropH > 0 {\n\t\tcmdParams = append(cmdParams, \"--crop-h\", fmt.Sprintf(\"%d\", options.CropH))\n\t}\n\n\tif options.CropW > 0 {\n\t\tcmdParams = append(cmdParams, \"--crop-w\", fmt.Sprintf(\"%d\", options.CropW))\n\t}\n\n\tif options.CropX > 0 {\n\t\tcmdParams = append(cmdParams, \"--crop-x\", fmt.Sprintf(\"%d\", options.CropX))\n\t}\n\n\tif options.CropY > 0 {\n\t\tcmdParams = append(cmdParams, \"--crop-y\", fmt.Sprintf(\"%d\", options.CropY))\n\t}\n\n\tif options.Format != \"\" {\n\t\tcmdParams = append(cmdParams, \"--format\", options.Format)\n\t}\n\n\tif options.Quality > 0 {\n\t\tcmdParams = append(cmdParams, \"--quality\", fmt.Sprintf(\"%d\", options.Quality))\n\t}\n\n\tif options.Height > 0 {\n\t\tcmdParams = append(cmdParams, \"--height\", fmt.Sprintf(\"%d\", options.Height))\n\t}\n\n\tif options.Width > 0 {\n\t\tcmdParams = append(cmdParams, \"--width\", fmt.Sprintf(\"%d\", options.Width))\n\t}\n\n\tif options.Force {\n\t\tcmdParams = append(cmdParams, \"--disable-smart-width\")\n\t}\n\n\t\/\/result tmp file\n\tresultTmpFname := fmt.Sprintf(\"%s%d.result.%s\", jobPrefix, time.Now().UnixNano(), options.Format)\n\tresultTmpFpath := filepath.Join(os.TempDir(), resultTmpFname)\n\tdefer os.Remove(resultTmpFpath)\n\n\tcmdParams = append(cmdParams, localPageTmpFpath, resultTmpFpath)\n\n\t\/\/cmd\n\tconvertCmd := exec.Command(\"wkhtmltoimage\", cmdParams...)\n\tlog.Println(convertCmd.Path, convertCmd.Args)\n\n\tstdErrPipe, pipeErr := convertCmd.StderrPipe()\n\tif pipeErr != nil {\n\t\terr = errors.New(fmt.Sprintf(\"open exec stderr pipe error, %s\", pipeErr.Error()))\n\t\treturn\n\t}\n\n\tif startErr := convertCmd.Start(); startErr != nil {\n\t\terr = errors.New(fmt.Sprintf(\"start html2image command error, %s\", startErr.Error()))\n\t\treturn\n\t}\n\n\tstdErrData, readErr := ioutil.ReadAll(stdErrPipe)\n\tif readErr != nil {\n\t\terr = errors.New(fmt.Sprintf(\"read html2image command stderr error, %s\", readErr.Error()))\n\t\treturn\n\t}\n\n\t\/\/check stderr output & output file\n\tif string(stdErrData) != \"\" {\n\t\tlog.Println(string(stdErrData))\n\t}\n\n\tif waitErr := convertCmd.Wait(); waitErr != nil {\n\t\terr = errors.New(fmt.Sprintf(\"wait html2image to exit error, %s\", waitErr.Error()))\n\t\treturn\n\t}\n\n\tif _, statErr := os.Stat(resultTmpFpath); statErr == nil {\n\t\toTmpFp, openErr := os.Open(resultTmpFpath)\n\t\tif openErr != nil {\n\t\t\terr = errors.New(fmt.Sprintf(\"open html2image output result error, %s\", openErr.Error()))\n\t\t\treturn\n\t\t}\n\t\tdefer oTmpFp.Close()\n\n\t\toutputBytes, readErr := ioutil.ReadAll(oTmpFp)\n\t\tif readErr != nil {\n\t\t\terr = errors.New(fmt.Sprintf(\"read html2image output result error, %s\", readErr.Error()))\n\t\t\treturn\n\t\t}\n\t\tresult = outputBytes\n\t} else {\n\t\terr = errors.New(\"html2image with no valid output result\")\n\t}\n\n\tif options.Format == \"png\" {\n\t\tcontentType = \"image\/png\"\n\t} else {\n\t\tcontentType = \"image\/jpeg\"\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport \"fmt\"\n\n\/\/AzureEnvironmentSpecConfig is the overall configuration differences in different cloud environments.\ntype AzureEnvironmentSpecConfig struct {\n\tCloudName string\n\tDockerSpecConfig DockerSpecConfig\n\tKubernetesSpecConfig KubernetesSpecConfig\n\tDCOSSpecConfig DCOSSpecConfig\n\tEndpointConfig AzureEndpointConfig\n\tOSImageConfig map[Distro]AzureOSImageConfig\n}\n\n\/\/DockerSpecConfig is the configurations of docker\ntype DockerSpecConfig struct {\n\tDockerEngineRepo string\n\tDockerComposeDownloadURL string\n}\n\n\/\/DCOSSpecConfig is the configurations of DCOS\ntype DCOSSpecConfig struct {\n\tDCOS188BootstrapDownloadURL string\n\tDCOS190BootstrapDownloadURL string\n\tDCOS198BootstrapDownloadURL string\n\tDCOS110BootstrapDownloadURL string\n\tDCOS111BootstrapDownloadURL string\n\tDCOSWindowsBootstrapDownloadURL string\n\tDcosRepositoryURL string \/\/ For custom install, for example CI, need these three addributes\n\tDcosClusterPackageListID string \/\/ the id of the package list file\n\tDcosProviderPackageID string \/\/ the id of the dcos-provider-xxx package\n}\n\n\/\/KubernetesSpecConfig is the kubernetes container images used.\ntype KubernetesSpecConfig struct {\n\tKubernetesImageBase string\n\tTillerImageBase string\n\tACIConnectorImageBase string\n\tNVIDIAImageBase string\n\tAzureCNIImageBase string\n\tEtcdDownloadURLBase string\n\tKubeBinariesSASURLBase string\n\tWindowsPackageSASURLBase string\n\tWindowsTelemetryGUID string\n\tCNIPluginsDownloadURL string\n\tVnetCNILinuxPluginsDownloadURL string\n\tVnetCNIWindowsPluginsDownloadURL string\n\tContainerdDownloadURLBase string\n}\n\n\/\/AzureEndpointConfig describes an Azure endpoint\ntype AzureEndpointConfig struct {\n\tResourceManagerVMDNSSuffix string\n}\n\n\/\/AzureOSImageConfig describes an Azure OS image\ntype AzureOSImageConfig struct {\n\tImageOffer string\n\tImageSku string\n\tImagePublisher string\n\tImageVersion string\n}\n\nvar (\n\t\/\/DefaultKubernetesSpecConfig is the default Docker image source of Kubernetes\n\tDefaultKubernetesSpecConfig = KubernetesSpecConfig{\n\t\tKubernetesImageBase: \"k8s.gcr.io\/\",\n\t\tTillerImageBase: \"gcr.io\/kubernetes-helm\/\",\n\t\tACIConnectorImageBase: \"microsoft\/\",\n\t\tNVIDIAImageBase: \"nvidia\/\",\n\t\tAzureCNIImageBase: \"containernetworking\/\",\n\t\tEtcdDownloadURLBase: \"https:\/\/acs-mirror.azureedge.net\/github-coreos\",\n\t\tKubeBinariesSASURLBase: \"https:\/\/acs-mirror.azureedge.net\/wink8s\/\",\n\t\tWindowsPackageSASURLBase: \"https:\/\/acs-mirror.azureedge.net\/wink8s\/\",\n\t\tWindowsTelemetryGUID: \"fb801154-36b9-41bc-89c2-f4d4f05472b0\",\n\t\tCNIPluginsDownloadURL: \"https:\/\/acs-mirror.azureedge.net\/cni\/cni-plugins-amd64-\" + CNIPluginVer + \".tgz\",\n\t\tVnetCNILinuxPluginsDownloadURL: \"https:\/\/acs-mirror.azureedge.net\/cni\/azure-vnet-cni-linux-amd64-\" + AzureCniPluginVerLinux + \".tgz\",\n\t\tVnetCNIWindowsPluginsDownloadURL: \"https:\/\/acs-mirror.azureedge.net\/cni\/azure-vnet-cni-windows-amd64-\" + AzureCniPluginVerWindows + \".zip\",\n\t\tContainerdDownloadURLBase: \"https:\/\/storage.googleapis.com\/cri-containerd-release\/\",\n\t}\n\n\t\/\/DefaultDCOSSpecConfig is the default DC\/OS binary download URL.\n\tDefaultDCOSSpecConfig = DCOSSpecConfig{\n\t\tDCOS188BootstrapDownloadURL: fmt.Sprintf(AzureEdgeDCOSBootstrapDownloadURL, \"stable\", \"5df43052907c021eeb5de145419a3da1898c58a5\"),\n\t\tDCOS190BootstrapDownloadURL: fmt.Sprintf(AzureEdgeDCOSBootstrapDownloadURL, \"stable\", \"58fd0833ce81b6244fc73bf65b5deb43217b0bd7\"),\n\t\tDCOS198BootstrapDownloadURL: fmt.Sprintf(AzureEdgeDCOSBootstrapDownloadURL, \"stable\/1.9.8\", \"f4ae0d20665fc68ee25282d6f78681b2773c6e10\"),\n\t\tDCOS110BootstrapDownloadURL: fmt.Sprintf(AzureEdgeDCOSBootstrapDownloadURL, \"stable\/1.10.0\", \"4d92536e7381176206e71ee15b5ffe454439920c\"),\n\t\tDCOS111BootstrapDownloadURL: fmt.Sprintf(AzureEdgeDCOSBootstrapDownloadURL, \"stable\/1.11.0\", \"a0654657903fb68dff60f6e522a7f241c1bfbf0f\"),\n\t\tDCOSWindowsBootstrapDownloadURL: \"http:\/\/dcos-win.westus.cloudapp.azure.com\/dcos-windows\/stable\/\",\n\t\tDcosRepositoryURL: \"https:\/\/dcosio.azureedge.net\/dcos\/stable\/1.11.0\",\n\t\tDcosClusterPackageListID: \"248a66388bba1adbcb14a52fd3b7b424ab06fa76\",\n\t}\n\n\t\/\/DefaultDockerSpecConfig is the default Docker engine repo.\n\tDefaultDockerSpecConfig = DockerSpecConfig{\n\t\tDockerEngineRepo: \"https:\/\/aptdocker.azureedge.net\/repo\",\n\t\tDockerComposeDownloadURL: \"https:\/\/github.com\/docker\/compose\/releases\/download\",\n\t}\n\n\t\/\/DefaultUbuntuImageConfig is the default Linux distribution.\n\tDefaultUbuntuImageConfig = AzureOSImageConfig{\n\t\tImageOffer: \"UbuntuServer\",\n\t\tImageSku: \"16.04-LTS\",\n\t\tImagePublisher: \"Canonical\",\n\t\tImageVersion: \"latest\",\n\t}\n\n\t\/\/SovereignCloudsUbuntuImageConfig is the Linux distribution for Azure Sovereign Clouds.\n\tSovereignCloudsUbuntuImageConfig = AzureOSImageConfig{\n\t\tImageOffer: \"UbuntuServer\",\n\t\tImageSku: \"16.04-LTS\",\n\t\tImagePublisher: \"Canonical\",\n\t\tImageVersion: \"latest\",\n\t}\n\n\t\/\/GermanCloudUbuntuImageConfig is the Linux distribution for Azure Sovereign Clouds.\n\tGermanCloudUbuntuImageConfig = AzureOSImageConfig{\n\t\tImageOffer: \"UbuntuServer\",\n\t\tImageSku: \"16.04-LTS\",\n\t\tImagePublisher: \"Canonical\",\n\t\tImageVersion: \"16.04.201801050\",\n\t}\n\n\t\/\/DefaultRHELOSImageConfig is the RHEL Linux distribution.\n\tDefaultRHELOSImageConfig = AzureOSImageConfig{\n\t\tImageOffer: \"RHEL\",\n\t\tImageSku: \"7.3\",\n\t\tImagePublisher: \"RedHat\",\n\t\tImageVersion: \"latest\",\n\t}\n\n\t\/\/DefaultCoreOSImageConfig is the CoreOS Linux distribution.\n\tDefaultCoreOSImageConfig = AzureOSImageConfig{\n\t\tImageOffer: \"CoreOS\",\n\t\tImageSku: \"Stable\",\n\t\tImagePublisher: \"CoreOS\",\n\t\tImageVersion: \"latest\",\n\t}\n\n\t\/\/ DefaultAKSOSImageConfig is the AKS image based on Ubuntu 16.04.\n\tDefaultAKSOSImageConfig = AzureOSImageConfig{\n\t\tImageOffer: \"aks\",\n\t\tImageSku: \"aks-ubuntu-1604-201810\",\n\t\tImagePublisher: \"microsoft-aks\",\n\t\tImageVersion: \"2018.10.26\",\n\t}\n\n\t\/\/ DefaultAKSDockerEngineOSImageConfig is the AKS image based on Ubuntu 16.04.\n\tDefaultAKSDockerEngineOSImageConfig = AzureOSImageConfig{\n\t\tImageOffer: \"aks\",\n\t\tImageSku: \"aks-ubuntu-1604-docker-engine\",\n\t\tImagePublisher: \"microsoft-aks\",\n\t\tImageVersion: \"2018.10.26\",\n\t}\n\n\t\/\/DefaultOpenShift39RHELImageConfig is the OpenShift on RHEL distribution.\n\tDefaultOpenShift39RHELImageConfig = AzureOSImageConfig{\n\t\tImageOffer: \"acsengine-preview\",\n\t\tImageSku: \"rhel74\",\n\t\tImagePublisher: \"redhat\",\n\t\tImageVersion: \"latest\",\n\t}\n\n\t\/\/DefaultOpenShift39CentOSImageConfig is the OpenShift on CentOS distribution.\n\tDefaultOpenShift39CentOSImageConfig = AzureOSImageConfig{\n\t\tImageOffer: \"origin-acsengine-preview\",\n\t\tImageSku: \"centos7\",\n\t\tImagePublisher: \"redhat\",\n\t\tImageVersion: \"latest\",\n\t}\n\n\t\/\/AzureCloudSpec is the default configurations for global azure.\n\tAzureCloudSpec = AzureEnvironmentSpecConfig{\n\t\tCloudName: azurePublicCloud,\n\t\t\/\/DockerSpecConfig specify the docker engine download repo\n\t\tDockerSpecConfig: DefaultDockerSpecConfig,\n\t\t\/\/KubernetesSpecConfig is the default kubernetes container image url.\n\t\tKubernetesSpecConfig: DefaultKubernetesSpecConfig,\n\t\tDCOSSpecConfig: DefaultDCOSSpecConfig,\n\n\t\tEndpointConfig: AzureEndpointConfig{\n\t\t\tResourceManagerVMDNSSuffix: \"cloudapp.azure.com\",\n\t\t},\n\n\t\tOSImageConfig: map[Distro]AzureOSImageConfig{\n\t\t\tUbuntu: DefaultUbuntuImageConfig,\n\t\t\tRHEL: DefaultRHELOSImageConfig,\n\t\t\tCoreOS: DefaultCoreOSImageConfig,\n\t\t\tAKS: DefaultAKSOSImageConfig,\n\t\t\tAKSDockerEngine: DefaultAKSDockerEngineOSImageConfig,\n\t\t\t\/\/ Image config supported for OpenShift\n\t\t\tOpenShift39RHEL: DefaultOpenShift39RHELImageConfig,\n\t\t\tOpenShiftCentOS: DefaultOpenShift39CentOSImageConfig,\n\t\t},\n\t}\n\n\t\/\/AzureGermanCloudSpec is the German cloud config.\n\tAzureGermanCloudSpec = AzureEnvironmentSpecConfig{\n\t\tCloudName: azureGermanCloud,\n\t\tDockerSpecConfig: DefaultDockerSpecConfig,\n\t\tKubernetesSpecConfig: DefaultKubernetesSpecConfig,\n\t\tDCOSSpecConfig: DefaultDCOSSpecConfig,\n\t\tEndpointConfig: AzureEndpointConfig{\n\t\t\tResourceManagerVMDNSSuffix: \"cloudapp.microsoftazure.de\",\n\t\t},\n\t\tOSImageConfig: map[Distro]AzureOSImageConfig{\n\t\t\tUbuntu: GermanCloudUbuntuImageConfig,\n\t\t\tRHEL: DefaultRHELOSImageConfig,\n\t\t\tCoreOS: DefaultCoreOSImageConfig,\n\t\t\tAKS: GermanCloudUbuntuImageConfig,\n\t\t},\n\t}\n\n\t\/\/AzureUSGovernmentCloud is the US government config.\n\tAzureUSGovernmentCloud = AzureEnvironmentSpecConfig{\n\t\tCloudName: azureUSGovernmentCloud,\n\t\tDockerSpecConfig: DefaultDockerSpecConfig,\n\t\tKubernetesSpecConfig: DefaultKubernetesSpecConfig,\n\t\tDCOSSpecConfig: DefaultDCOSSpecConfig,\n\t\tEndpointConfig: AzureEndpointConfig{\n\t\t\tResourceManagerVMDNSSuffix: \"cloudapp.usgovcloudapi.net\",\n\t\t},\n\t\tOSImageConfig: map[Distro]AzureOSImageConfig{\n\t\t\tUbuntu: SovereignCloudsUbuntuImageConfig,\n\t\t\tRHEL: DefaultRHELOSImageConfig,\n\t\t\tCoreOS: DefaultCoreOSImageConfig,\n\t\t\tAKS: SovereignCloudsUbuntuImageConfig,\n\t\t},\n\t}\n\n\t\/\/AzureChinaCloudSpec is the configurations for Azure China (Mooncake)\n\tAzureChinaCloudSpec = AzureEnvironmentSpecConfig{\n\t\tCloudName: azureChinaCloud,\n\t\t\/\/DockerSpecConfig specify the docker engine download repo\n\t\tDockerSpecConfig: DockerSpecConfig{\n\t\t\tDockerEngineRepo: \"https:\/\/mirror.azure.cn\/docker-engine\/apt\/repo\/\",\n\t\t\tDockerComposeDownloadURL: \"https:\/\/mirror.azure.cn\/docker-toolbox\/linux\/compose\",\n\t\t},\n\t\t\/\/KubernetesSpecConfig - Due to Chinese firewall issue, the default containers from google is blocked, use the Chinese local mirror instead\n\t\tKubernetesSpecConfig: KubernetesSpecConfig{\n\t\t\tKubernetesImageBase: \"gcr.azk8s.cn\/google_containers\/\",\n\t\t\tTillerImageBase: \"gcr.azk8s.cn\/kubernetes-helm\/\",\n\t\t\tACIConnectorImageBase: \"dockerhub.azk8s.cn\/microsoft\/\",\n\t\t\tNVIDIAImageBase: \"dockerhub.azk8s.cn\/nvidia\/\",\n\t\t\tAzureCNIImageBase: \"dockerhub.azk8s.cn\/containernetworking\/\",\n\t\t\tEtcdDownloadURLBase: DefaultKubernetesSpecConfig.EtcdDownloadURLBase,\n\t\t\tKubeBinariesSASURLBase: DefaultKubernetesSpecConfig.KubeBinariesSASURLBase,\n\t\t\tWindowsPackageSASURLBase: DefaultKubernetesSpecConfig.WindowsPackageSASURLBase,\n\t\t\tWindowsTelemetryGUID: DefaultKubernetesSpecConfig.WindowsTelemetryGUID,\n\t\t\tCNIPluginsDownloadURL: DefaultKubernetesSpecConfig.CNIPluginsDownloadURL,\n\t\t\tVnetCNILinuxPluginsDownloadURL: DefaultKubernetesSpecConfig.VnetCNILinuxPluginsDownloadURL,\n\t\t\tVnetCNIWindowsPluginsDownloadURL: DefaultKubernetesSpecConfig.VnetCNIWindowsPluginsDownloadURL,\n\t\t\tContainerdDownloadURLBase: \"https:\/\/mirror.azure.cn\/kubernetes\/containerd\/\",\n\t\t},\n\t\tDCOSSpecConfig: DCOSSpecConfig{\n\t\t\tDCOS188BootstrapDownloadURL: fmt.Sprintf(AzureChinaCloudDCOSBootstrapDownloadURL, \"5df43052907c021eeb5de145419a3da1898c58a5\"),\n\t\t\tDCOSWindowsBootstrapDownloadURL: \"https:\/\/dcosdevstorage.blob.core.windows.net\/dcos-windows\",\n\t\t\tDCOS190BootstrapDownloadURL: fmt.Sprintf(AzureChinaCloudDCOSBootstrapDownloadURL, \"58fd0833ce81b6244fc73bf65b5deb43217b0bd7\"),\n\t\t\tDCOS198BootstrapDownloadURL: fmt.Sprintf(AzureChinaCloudDCOSBootstrapDownloadURL, \"f4ae0d20665fc68ee25282d6f78681b2773c6e10\"),\n\t\t},\n\n\t\tEndpointConfig: AzureEndpointConfig{\n\t\t\tResourceManagerVMDNSSuffix: \"cloudapp.chinacloudapi.cn\",\n\t\t},\n\t\tOSImageConfig: map[Distro]AzureOSImageConfig{\n\t\t\tUbuntu: SovereignCloudsUbuntuImageConfig,\n\t\t\tRHEL: DefaultRHELOSImageConfig,\n\t\t\tCoreOS: DefaultCoreOSImageConfig,\n\t\t\tAKS: SovereignCloudsUbuntuImageConfig,\n\t\t},\n\t}\n\n\t\/\/ AzureCloudSpecEnvMap is the environment configuration map for all the Azure cloid environments.\n\tAzureCloudSpecEnvMap = map[string]AzureEnvironmentSpecConfig{\n\t\tazureChinaCloud: AzureChinaCloudSpec,\n\t\tazureGermanCloud: AzureGermanCloudSpec,\n\t\tazureUSGovernmentCloud: AzureUSGovernmentCloud,\n\t\tazurePublicCloud: AzureCloudSpec,\n\t}\n)\n<commit_msg>use china mirror in binary downloading (#4137)<commit_after>package api\n\nimport \"fmt\"\n\n\/\/AzureEnvironmentSpecConfig is the overall configuration differences in different cloud environments.\ntype AzureEnvironmentSpecConfig struct {\n\tCloudName string\n\tDockerSpecConfig DockerSpecConfig\n\tKubernetesSpecConfig KubernetesSpecConfig\n\tDCOSSpecConfig DCOSSpecConfig\n\tEndpointConfig AzureEndpointConfig\n\tOSImageConfig map[Distro]AzureOSImageConfig\n}\n\n\/\/DockerSpecConfig is the configurations of docker\ntype DockerSpecConfig struct {\n\tDockerEngineRepo string\n\tDockerComposeDownloadURL string\n}\n\n\/\/DCOSSpecConfig is the configurations of DCOS\ntype DCOSSpecConfig struct {\n\tDCOS188BootstrapDownloadURL string\n\tDCOS190BootstrapDownloadURL string\n\tDCOS198BootstrapDownloadURL string\n\tDCOS110BootstrapDownloadURL string\n\tDCOS111BootstrapDownloadURL string\n\tDCOSWindowsBootstrapDownloadURL string\n\tDcosRepositoryURL string \/\/ For custom install, for example CI, need these three addributes\n\tDcosClusterPackageListID string \/\/ the id of the package list file\n\tDcosProviderPackageID string \/\/ the id of the dcos-provider-xxx package\n}\n\n\/\/KubernetesSpecConfig is the kubernetes container images used.\ntype KubernetesSpecConfig struct {\n\tKubernetesImageBase string\n\tTillerImageBase string\n\tACIConnectorImageBase string\n\tNVIDIAImageBase string\n\tAzureCNIImageBase string\n\tEtcdDownloadURLBase string\n\tKubeBinariesSASURLBase string\n\tWindowsPackageSASURLBase string\n\tWindowsTelemetryGUID string\n\tCNIPluginsDownloadURL string\n\tVnetCNILinuxPluginsDownloadURL string\n\tVnetCNIWindowsPluginsDownloadURL string\n\tContainerdDownloadURLBase string\n}\n\n\/\/AzureEndpointConfig describes an Azure endpoint\ntype AzureEndpointConfig struct {\n\tResourceManagerVMDNSSuffix string\n}\n\n\/\/AzureOSImageConfig describes an Azure OS image\ntype AzureOSImageConfig struct {\n\tImageOffer string\n\tImageSku string\n\tImagePublisher string\n\tImageVersion string\n}\n\nvar (\n\t\/\/DefaultKubernetesSpecConfig is the default Docker image source of Kubernetes\n\tDefaultKubernetesSpecConfig = KubernetesSpecConfig{\n\t\tKubernetesImageBase: \"k8s.gcr.io\/\",\n\t\tTillerImageBase: \"gcr.io\/kubernetes-helm\/\",\n\t\tACIConnectorImageBase: \"microsoft\/\",\n\t\tNVIDIAImageBase: \"nvidia\/\",\n\t\tAzureCNIImageBase: \"containernetworking\/\",\n\t\tEtcdDownloadURLBase: \"https:\/\/acs-mirror.azureedge.net\/github-coreos\",\n\t\tKubeBinariesSASURLBase: \"https:\/\/acs-mirror.azureedge.net\/wink8s\/\",\n\t\tWindowsPackageSASURLBase: \"https:\/\/acs-mirror.azureedge.net\/wink8s\/\",\n\t\tWindowsTelemetryGUID: \"fb801154-36b9-41bc-89c2-f4d4f05472b0\",\n\t\tCNIPluginsDownloadURL: \"https:\/\/acs-mirror.azureedge.net\/cni\/cni-plugins-amd64-\" + CNIPluginVer + \".tgz\",\n\t\tVnetCNILinuxPluginsDownloadURL: \"https:\/\/acs-mirror.azureedge.net\/cni\/azure-vnet-cni-linux-amd64-\" + AzureCniPluginVerLinux + \".tgz\",\n\t\tVnetCNIWindowsPluginsDownloadURL: \"https:\/\/acs-mirror.azureedge.net\/cni\/azure-vnet-cni-windows-amd64-\" + AzureCniPluginVerWindows + \".zip\",\n\t\tContainerdDownloadURLBase: \"https:\/\/storage.googleapis.com\/cri-containerd-release\/\",\n\t}\n\n\t\/\/DefaultDCOSSpecConfig is the default DC\/OS binary download URL.\n\tDefaultDCOSSpecConfig = DCOSSpecConfig{\n\t\tDCOS188BootstrapDownloadURL: fmt.Sprintf(AzureEdgeDCOSBootstrapDownloadURL, \"stable\", \"5df43052907c021eeb5de145419a3da1898c58a5\"),\n\t\tDCOS190BootstrapDownloadURL: fmt.Sprintf(AzureEdgeDCOSBootstrapDownloadURL, \"stable\", \"58fd0833ce81b6244fc73bf65b5deb43217b0bd7\"),\n\t\tDCOS198BootstrapDownloadURL: fmt.Sprintf(AzureEdgeDCOSBootstrapDownloadURL, \"stable\/1.9.8\", \"f4ae0d20665fc68ee25282d6f78681b2773c6e10\"),\n\t\tDCOS110BootstrapDownloadURL: fmt.Sprintf(AzureEdgeDCOSBootstrapDownloadURL, \"stable\/1.10.0\", \"4d92536e7381176206e71ee15b5ffe454439920c\"),\n\t\tDCOS111BootstrapDownloadURL: fmt.Sprintf(AzureEdgeDCOSBootstrapDownloadURL, \"stable\/1.11.0\", \"a0654657903fb68dff60f6e522a7f241c1bfbf0f\"),\n\t\tDCOSWindowsBootstrapDownloadURL: \"http:\/\/dcos-win.westus.cloudapp.azure.com\/dcos-windows\/stable\/\",\n\t\tDcosRepositoryURL: \"https:\/\/dcosio.azureedge.net\/dcos\/stable\/1.11.0\",\n\t\tDcosClusterPackageListID: \"248a66388bba1adbcb14a52fd3b7b424ab06fa76\",\n\t}\n\n\t\/\/DefaultDockerSpecConfig is the default Docker engine repo.\n\tDefaultDockerSpecConfig = DockerSpecConfig{\n\t\tDockerEngineRepo: \"https:\/\/aptdocker.azureedge.net\/repo\",\n\t\tDockerComposeDownloadURL: \"https:\/\/github.com\/docker\/compose\/releases\/download\",\n\t}\n\n\t\/\/DefaultUbuntuImageConfig is the default Linux distribution.\n\tDefaultUbuntuImageConfig = AzureOSImageConfig{\n\t\tImageOffer: \"UbuntuServer\",\n\t\tImageSku: \"16.04-LTS\",\n\t\tImagePublisher: \"Canonical\",\n\t\tImageVersion: \"latest\",\n\t}\n\n\t\/\/SovereignCloudsUbuntuImageConfig is the Linux distribution for Azure Sovereign Clouds.\n\tSovereignCloudsUbuntuImageConfig = AzureOSImageConfig{\n\t\tImageOffer: \"UbuntuServer\",\n\t\tImageSku: \"16.04-LTS\",\n\t\tImagePublisher: \"Canonical\",\n\t\tImageVersion: \"latest\",\n\t}\n\n\t\/\/GermanCloudUbuntuImageConfig is the Linux distribution for Azure Sovereign Clouds.\n\tGermanCloudUbuntuImageConfig = AzureOSImageConfig{\n\t\tImageOffer: \"UbuntuServer\",\n\t\tImageSku: \"16.04-LTS\",\n\t\tImagePublisher: \"Canonical\",\n\t\tImageVersion: \"16.04.201801050\",\n\t}\n\n\t\/\/DefaultRHELOSImageConfig is the RHEL Linux distribution.\n\tDefaultRHELOSImageConfig = AzureOSImageConfig{\n\t\tImageOffer: \"RHEL\",\n\t\tImageSku: \"7.3\",\n\t\tImagePublisher: \"RedHat\",\n\t\tImageVersion: \"latest\",\n\t}\n\n\t\/\/DefaultCoreOSImageConfig is the CoreOS Linux distribution.\n\tDefaultCoreOSImageConfig = AzureOSImageConfig{\n\t\tImageOffer: \"CoreOS\",\n\t\tImageSku: \"Stable\",\n\t\tImagePublisher: \"CoreOS\",\n\t\tImageVersion: \"latest\",\n\t}\n\n\t\/\/ DefaultAKSOSImageConfig is the AKS image based on Ubuntu 16.04.\n\tDefaultAKSOSImageConfig = AzureOSImageConfig{\n\t\tImageOffer: \"aks\",\n\t\tImageSku: \"aks-ubuntu-1604-201810\",\n\t\tImagePublisher: \"microsoft-aks\",\n\t\tImageVersion: \"2018.10.26\",\n\t}\n\n\t\/\/ DefaultAKSDockerEngineOSImageConfig is the AKS image based on Ubuntu 16.04.\n\tDefaultAKSDockerEngineOSImageConfig = AzureOSImageConfig{\n\t\tImageOffer: \"aks\",\n\t\tImageSku: \"aks-ubuntu-1604-docker-engine\",\n\t\tImagePublisher: \"microsoft-aks\",\n\t\tImageVersion: \"2018.10.26\",\n\t}\n\n\t\/\/DefaultOpenShift39RHELImageConfig is the OpenShift on RHEL distribution.\n\tDefaultOpenShift39RHELImageConfig = AzureOSImageConfig{\n\t\tImageOffer: \"acsengine-preview\",\n\t\tImageSku: \"rhel74\",\n\t\tImagePublisher: \"redhat\",\n\t\tImageVersion: \"latest\",\n\t}\n\n\t\/\/DefaultOpenShift39CentOSImageConfig is the OpenShift on CentOS distribution.\n\tDefaultOpenShift39CentOSImageConfig = AzureOSImageConfig{\n\t\tImageOffer: \"origin-acsengine-preview\",\n\t\tImageSku: \"centos7\",\n\t\tImagePublisher: \"redhat\",\n\t\tImageVersion: \"latest\",\n\t}\n\n\t\/\/AzureCloudSpec is the default configurations for global azure.\n\tAzureCloudSpec = AzureEnvironmentSpecConfig{\n\t\tCloudName: azurePublicCloud,\n\t\t\/\/DockerSpecConfig specify the docker engine download repo\n\t\tDockerSpecConfig: DefaultDockerSpecConfig,\n\t\t\/\/KubernetesSpecConfig is the default kubernetes container image url.\n\t\tKubernetesSpecConfig: DefaultKubernetesSpecConfig,\n\t\tDCOSSpecConfig: DefaultDCOSSpecConfig,\n\n\t\tEndpointConfig: AzureEndpointConfig{\n\t\t\tResourceManagerVMDNSSuffix: \"cloudapp.azure.com\",\n\t\t},\n\n\t\tOSImageConfig: map[Distro]AzureOSImageConfig{\n\t\t\tUbuntu: DefaultUbuntuImageConfig,\n\t\t\tRHEL: DefaultRHELOSImageConfig,\n\t\t\tCoreOS: DefaultCoreOSImageConfig,\n\t\t\tAKS: DefaultAKSOSImageConfig,\n\t\t\tAKSDockerEngine: DefaultAKSDockerEngineOSImageConfig,\n\t\t\t\/\/ Image config supported for OpenShift\n\t\t\tOpenShift39RHEL: DefaultOpenShift39RHELImageConfig,\n\t\t\tOpenShiftCentOS: DefaultOpenShift39CentOSImageConfig,\n\t\t},\n\t}\n\n\t\/\/AzureGermanCloudSpec is the German cloud config.\n\tAzureGermanCloudSpec = AzureEnvironmentSpecConfig{\n\t\tCloudName: azureGermanCloud,\n\t\tDockerSpecConfig: DefaultDockerSpecConfig,\n\t\tKubernetesSpecConfig: DefaultKubernetesSpecConfig,\n\t\tDCOSSpecConfig: DefaultDCOSSpecConfig,\n\t\tEndpointConfig: AzureEndpointConfig{\n\t\t\tResourceManagerVMDNSSuffix: \"cloudapp.microsoftazure.de\",\n\t\t},\n\t\tOSImageConfig: map[Distro]AzureOSImageConfig{\n\t\t\tUbuntu: GermanCloudUbuntuImageConfig,\n\t\t\tRHEL: DefaultRHELOSImageConfig,\n\t\t\tCoreOS: DefaultCoreOSImageConfig,\n\t\t\tAKS: GermanCloudUbuntuImageConfig,\n\t\t},\n\t}\n\n\t\/\/AzureUSGovernmentCloud is the US government config.\n\tAzureUSGovernmentCloud = AzureEnvironmentSpecConfig{\n\t\tCloudName: azureUSGovernmentCloud,\n\t\tDockerSpecConfig: DefaultDockerSpecConfig,\n\t\tKubernetesSpecConfig: DefaultKubernetesSpecConfig,\n\t\tDCOSSpecConfig: DefaultDCOSSpecConfig,\n\t\tEndpointConfig: AzureEndpointConfig{\n\t\t\tResourceManagerVMDNSSuffix: \"cloudapp.usgovcloudapi.net\",\n\t\t},\n\t\tOSImageConfig: map[Distro]AzureOSImageConfig{\n\t\t\tUbuntu: SovereignCloudsUbuntuImageConfig,\n\t\t\tRHEL: DefaultRHELOSImageConfig,\n\t\t\tCoreOS: DefaultCoreOSImageConfig,\n\t\t\tAKS: SovereignCloudsUbuntuImageConfig,\n\t\t},\n\t}\n\n\t\/\/AzureChinaCloudSpec is the configurations for Azure China (Mooncake)\n\tAzureChinaCloudSpec = AzureEnvironmentSpecConfig{\n\t\tCloudName: azureChinaCloud,\n\t\t\/\/DockerSpecConfig specify the docker engine download repo\n\t\tDockerSpecConfig: DockerSpecConfig{\n\t\t\tDockerEngineRepo: \"https:\/\/mirror.azk8s.cn\/docker-engine\/apt\/repo\/\",\n\t\t\tDockerComposeDownloadURL: \"https:\/\/mirror.azk8s.cn\/docker-toolbox\/linux\/compose\",\n\t\t},\n\t\t\/\/KubernetesSpecConfig - Due to Chinese firewall issue, the default containers from google is blocked, use the Chinese local mirror instead\n\t\tKubernetesSpecConfig: KubernetesSpecConfig{\n\t\t\tKubernetesImageBase: \"gcr.azk8s.cn\/google_containers\/\",\n\t\t\tTillerImageBase: \"gcr.azk8s.cn\/kubernetes-helm\/\",\n\t\t\tACIConnectorImageBase: \"dockerhub.azk8s.cn\/microsoft\/\",\n\t\t\tNVIDIAImageBase: \"dockerhub.azk8s.cn\/nvidia\/\",\n\t\t\tAzureCNIImageBase: \"dockerhub.azk8s.cn\/containernetworking\/\",\n\t\t\tEtcdDownloadURLBase: \"https:\/\/mirror.azk8s.cn\/kubernetes\/etcd\",\n\t\t\tKubeBinariesSASURLBase: DefaultKubernetesSpecConfig.KubeBinariesSASURLBase,\n\t\t\tWindowsPackageSASURLBase: DefaultKubernetesSpecConfig.WindowsPackageSASURLBase,\n\t\t\tWindowsTelemetryGUID: DefaultKubernetesSpecConfig.WindowsTelemetryGUID,\n\t\t\tCNIPluginsDownloadURL: \"https:\/\/mirror.azk8s.cn\/kubernetes\/containernetworking-plugins\/cni-plugins-amd64-\" + CNIPluginVer + \".tgz\",\n\t\t\tVnetCNILinuxPluginsDownloadURL: \"https:\/\/mirror.azk8s.cn\/kubernetes\/azure-container-networking\/azure-vnet-cni-linux-amd64-\" + AzureCniPluginVerLinux + \".tgz\",\n\t\t\tVnetCNIWindowsPluginsDownloadURL: \"https:\/\/mirror.azk8s.cn\/kubernetes\/azure-container-networking\/azure-vnet-cni-windows-amd64-\" + AzureCniPluginVerWindows + \".zip\",\n\t\t\tContainerdDownloadURLBase: \"https:\/\/mirror.azk8s.cn\/kubernetes\/containerd\/\",\n\t\t},\n\t\tDCOSSpecConfig: DCOSSpecConfig{\n\t\t\tDCOS188BootstrapDownloadURL: fmt.Sprintf(AzureChinaCloudDCOSBootstrapDownloadURL, \"5df43052907c021eeb5de145419a3da1898c58a5\"),\n\t\t\tDCOSWindowsBootstrapDownloadURL: \"https:\/\/dcosdevstorage.blob.core.windows.net\/dcos-windows\",\n\t\t\tDCOS190BootstrapDownloadURL: fmt.Sprintf(AzureChinaCloudDCOSBootstrapDownloadURL, \"58fd0833ce81b6244fc73bf65b5deb43217b0bd7\"),\n\t\t\tDCOS198BootstrapDownloadURL: fmt.Sprintf(AzureChinaCloudDCOSBootstrapDownloadURL, \"f4ae0d20665fc68ee25282d6f78681b2773c6e10\"),\n\t\t},\n\n\t\tEndpointConfig: AzureEndpointConfig{\n\t\t\tResourceManagerVMDNSSuffix: \"cloudapp.chinacloudapi.cn\",\n\t\t},\n\t\tOSImageConfig: map[Distro]AzureOSImageConfig{\n\t\t\tUbuntu: SovereignCloudsUbuntuImageConfig,\n\t\t\tRHEL: DefaultRHELOSImageConfig,\n\t\t\tCoreOS: DefaultCoreOSImageConfig,\n\t\t\tAKS: SovereignCloudsUbuntuImageConfig,\n\t\t},\n\t}\n\n\t\/\/ AzureCloudSpecEnvMap is the environment configuration map for all the Azure cloid environments.\n\tAzureCloudSpecEnvMap = map[string]AzureEnvironmentSpecConfig{\n\t\tazureChinaCloud: AzureChinaCloudSpec,\n\t\tazureGermanCloud: AzureGermanCloudSpec,\n\t\tazureUSGovernmentCloud: AzureUSGovernmentCloud,\n\t\tazurePublicCloud: AzureCloudSpec,\n\t}\n)\n<|endoftext|>"} {"text":"<commit_before>package stns\n\nimport (\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\n\t\"github.com\/BurntSushi\/toml\"\n)\n\ntype Config struct {\n\tPort int `toml:\"port\"`\n\tInclude string `toml:\"include\"`\n\tSalt bool `toml:\"salt_enable\"`\n\tStretching int `toml:\"stretching_number\"`\n\tUser string `toml:\"user\"`\n\tPassword string `toml:\"password\"`\n\tHashType string `toml:\"hash_type\" json:\"hash_type\"`\n\tUsers Attributes\n\tGroups Attributes\n\tSudoers Attributes\n}\n\nvar MinUserId, MinGroupId int\n\nfunc LoadConfig(configFile string) (Config, error) {\n\tvar config Config\n\tdefaultConfig(&config)\n\n\t_, err := toml.DecodeFile(configFile, &config)\n\tif err != nil {\n\t\treturn Config{}, err\n\t}\n\n\tif config.Include != \"\" {\n\t\tif err := includeConfigFile(&config, config.Include); err != nil {\n\t\t\treturn Config{}, err\n\t\t}\n\t}\n\tsetMinId(&MinUserId, config.Users)\n\tsetMinId(&MinGroupId, config.Groups)\n\tmergeLinkAttribute(\"user\", config.Users)\n\tmergeLinkAttribute(\"group\", config.Groups)\n\treturn config, nil\n}\n\nfunc defaultConfig(config *Config) {\n\tconfig.Port = 1104\n\tconfig.Salt = false\n\tconfig.Stretching = 0\n\tconfig.HashType = \"sha256\"\n}\n\nfunc includeConfigFile(config *Config, include string) error {\n\tfiles, err := filepath.Glob(include)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, file := range files {\n\t\t_, err := toml.DecodeFile(file, &config)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"while loading included config file %s: %s\", file, err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc setMinId(min *int, attrs Attributes) {\n\tif len(attrs) > 0 {\n\t\tfor _, a := range attrs {\n\t\t\tswitch {\n\t\t\tcase *min == 0:\n\t\t\t\t*min = a.Id\n\t\t\tcase *min > a.Id:\n\t\t\t\t*min = a.Id\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc mergeLinkAttribute(rtype string, attr Attributes) {\n\tfor k, v := range attr {\n\t\tmergeValue := []string{}\n\t\tlinker := getLinker(rtype, v)\n\n\t\tif linker != nil && !reflect.ValueOf(linker).IsNil() &&\n\t\t\tlinker.LinkParams() != nil && !reflect.ValueOf(linker.LinkParams()).IsNil() {\n\t\t\tfor _, linkValue := range linker.LinkParams() {\n\t\t\t\tlinkValues := map[string][]string{k: linker.LinkValue()}\n\n\t\t\t\trecursiveSetLinkValue(attr, rtype, linkValue, linkValues)\n\t\t\t\tfor _, val := range linkValues {\n\t\t\t\t\tmergeValue = append(mergeValue, val...)\n\t\t\t\t}\n\t\t\t\tlinker.SetLinkValue(RemoveDuplicates(mergeValue))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc getLinker(rtype string, attr *Attribute) Linker {\n\tif attr != nil && !reflect.ValueOf(attr).IsNil() {\n\t\tif rtype == \"user\" {\n\t\t\treturn attr.User\n\t\t} else if rtype == \"group\" {\n\t\t\treturn attr.Group\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc recursiveSetLinkValue(attr Attributes, rtype, name string, result map[string][]string) {\n\tif result[name] != nil {\n\t\treturn\n\t}\n\n\tlinker := getLinker(rtype, attr[name])\n\n\tif linker != nil && !reflect.ValueOf(linker).IsNil() && len(linker.LinkValue()) > 0 {\n\t\tresult[name] = linker.LinkValue()\n\t\tif linker.LinkParams() != nil || !reflect.ValueOf(linker.LinkParams()).IsNil() {\n\t\t\tfor _, next_name := range linker.LinkParams() {\n\t\t\t\trecursiveSetLinkValue(attr, rtype, next_name, result)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc member(n string, xs []string) bool {\n\tfor _, x := range xs {\n\t\tif n == x {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc RemoveDuplicates(xs []string) []string {\n\tys := make([]string, 0, len(xs))\n\tfor _, x := range xs {\n\t\tif !member(x, ys) {\n\t\t\tys = append(ys, x)\n\t\t}\n\t}\n\treturn ys\n}\n<commit_msg>reset minid<commit_after>package stns\n\nimport (\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\n\t\"github.com\/BurntSushi\/toml\"\n)\n\ntype Config struct {\n\tPort int `toml:\"port\"`\n\tInclude string `toml:\"include\"`\n\tSalt bool `toml:\"salt_enable\"`\n\tStretching int `toml:\"stretching_number\"`\n\tUser string `toml:\"user\"`\n\tPassword string `toml:\"password\"`\n\tHashType string `toml:\"hash_type\" json:\"hash_type\"`\n\tUsers Attributes\n\tGroups Attributes\n\tSudoers Attributes\n}\n\nvar MinUserId, MinGroupId int\n\nfunc LoadConfig(configFile string) (Config, error) {\n\tvar config Config\n\tdefaultConfig(&config)\n\n\t_, err := toml.DecodeFile(configFile, &config)\n\tif err != nil {\n\t\treturn Config{}, err\n\t}\n\n\tif config.Include != \"\" {\n\t\tif err := includeConfigFile(&config, config.Include); err != nil {\n\t\t\treturn Config{}, err\n\t\t}\n\t}\n\tsetMinId(&MinUserId, config.Users)\n\tsetMinId(&MinGroupId, config.Groups)\n\tmergeLinkAttribute(\"user\", config.Users)\n\tmergeLinkAttribute(\"group\", config.Groups)\n\treturn config, nil\n}\n\nfunc defaultConfig(config *Config) {\n\tconfig.Port = 1104\n\tconfig.Salt = false\n\tconfig.Stretching = 0\n\tconfig.HashType = \"sha256\"\n}\n\nfunc includeConfigFile(config *Config, include string) error {\n\tfiles, err := filepath.Glob(include)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, file := range files {\n\t\t_, err := toml.DecodeFile(file, &config)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"while loading included config file %s: %s\", file, err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc setMinId(min *int, attrs Attributes) {\n\t*min = 0\n\tif len(attrs) > 0 {\n\t\tfor _, a := range attrs {\n\t\t\tswitch {\n\t\t\tcase *min == 0:\n\t\t\t\t*min = a.Id\n\t\t\tcase *min > a.Id:\n\t\t\t\t*min = a.Id\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc mergeLinkAttribute(rtype string, attr Attributes) {\n\tfor k, v := range attr {\n\t\tmergeValue := []string{}\n\t\tlinker := getLinker(rtype, v)\n\n\t\tif linker != nil && !reflect.ValueOf(linker).IsNil() &&\n\t\t\tlinker.LinkParams() != nil && !reflect.ValueOf(linker.LinkParams()).IsNil() {\n\t\t\tfor _, linkValue := range linker.LinkParams() {\n\t\t\t\tlinkValues := map[string][]string{k: linker.LinkValue()}\n\n\t\t\t\trecursiveSetLinkValue(attr, rtype, linkValue, linkValues)\n\t\t\t\tfor _, val := range linkValues {\n\t\t\t\t\tmergeValue = append(mergeValue, val...)\n\t\t\t\t}\n\t\t\t\tlinker.SetLinkValue(RemoveDuplicates(mergeValue))\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc getLinker(rtype string, attr *Attribute) Linker {\n\tif attr != nil && !reflect.ValueOf(attr).IsNil() {\n\t\tif rtype == \"user\" {\n\t\t\treturn attr.User\n\t\t} else if rtype == \"group\" {\n\t\t\treturn attr.Group\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc recursiveSetLinkValue(attr Attributes, rtype, name string, result map[string][]string) {\n\tif result[name] != nil {\n\t\treturn\n\t}\n\n\tlinker := getLinker(rtype, attr[name])\n\n\tif linker != nil && !reflect.ValueOf(linker).IsNil() && len(linker.LinkValue()) > 0 {\n\t\tresult[name] = linker.LinkValue()\n\t\tif linker.LinkParams() != nil || !reflect.ValueOf(linker.LinkParams()).IsNil() {\n\t\t\tfor _, next_name := range linker.LinkParams() {\n\t\t\t\trecursiveSetLinkValue(attr, rtype, next_name, result)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc member(n string, xs []string) bool {\n\tfor _, x := range xs {\n\t\tif n == x {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc RemoveDuplicates(xs []string) []string {\n\tys := make([]string, 0, len(xs))\n\tfor _, x := range xs {\n\t\tif !member(x, ys) {\n\t\t\tys = append(ys, x)\n\t\t}\n\t}\n\treturn ys\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>More ticme in the ticker, the interval was too short for real loads<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 The etcd Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build !cov\n\npackage e2e\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com\/coreos\/etcd\/version\"\n)\n\nfunc TestV3CurlCipherSuitesValid(t *testing.T) { testV3CurlCipherSuites(t, true) }\nfunc TestV3CurlCipherSuitesMismatch(t *testing.T) { testV3CurlCipherSuites(t, false) }\nfunc testV3CurlCipherSuites(t *testing.T, valid bool) {\n\tcc := configClientTLS\n\tcc.clusterSize = 1\n\tcc.cipherSuites = []string{\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t}\n\ttestFunc := cipherSuiteTestValid\n\tif !valid {\n\t\ttestFunc = cipherSuiteTestMismatch\n\t}\n\ttestCtl(t, testFunc, withCfg(cc))\n}\n\nfunc cipherSuiteTestValid(cx ctlCtx) {\n\tif err := cURLGet(cx.epc, cURLReq{\n\t\tendpoint: \"\/metrics\",\n\t\texpected: fmt.Sprintf(`etcd_server_version{server_version=\"%s\"} 1`, version.Version),\n\t\tmetricsURLScheme: cx.cfg.metricsURLScheme,\n\t\tciphers: \"ECDHE-RSA-AES128-GCM-SHA256\", \/\/ TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n\t}); err != nil {\n\t\tcx.t.Fatalf(\"failed get with curl (%v)\", err)\n\t}\n}\n\nfunc cipherSuiteTestMismatch(cx ctlCtx) {\n\tif err := cURLGet(cx.epc, cURLReq{\n\t\tendpoint: \"\/metrics\",\n\t\texpected: \"alert handshake failure\",\n\t\tmetricsURLScheme: cx.cfg.metricsURLScheme,\n\t\tciphers: \"ECDHE-RSA-DES-CBC3-SHA\", \/\/ TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n\t}); err != nil {\n\t\tcx.t.Fatalf(\"failed get with curl (%v)\", err)\n\t}\n}\n<commit_msg>tests\/e2e: do not run cipher suite tests for gRPC proxy<commit_after>\/\/ Copyright 2018 The etcd Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build !cov,!cluster_proxy\n\npackage e2e\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com\/coreos\/etcd\/version\"\n)\n\nfunc TestV3CurlCipherSuitesValid(t *testing.T) { testV3CurlCipherSuites(t, true) }\nfunc TestV3CurlCipherSuitesMismatch(t *testing.T) { testV3CurlCipherSuites(t, false) }\nfunc testV3CurlCipherSuites(t *testing.T, valid bool) {\n\tcc := configClientTLS\n\tcc.clusterSize = 1\n\tcc.cipherSuites = []string{\n\t\t\"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384\",\n\t\t\"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305\",\n\t\t\"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305\",\n\t}\n\ttestFunc := cipherSuiteTestValid\n\tif !valid {\n\t\ttestFunc = cipherSuiteTestMismatch\n\t}\n\ttestCtl(t, testFunc, withCfg(cc))\n}\n\nfunc cipherSuiteTestValid(cx ctlCtx) {\n\tif err := cURLGet(cx.epc, cURLReq{\n\t\tendpoint: \"\/metrics\",\n\t\texpected: fmt.Sprintf(`etcd_server_version{server_version=\"%s\"} 1`, version.Version),\n\t\tmetricsURLScheme: cx.cfg.metricsURLScheme,\n\t\tciphers: \"ECDHE-RSA-AES128-GCM-SHA256\", \/\/ TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\n\t}); err != nil {\n\t\tcx.t.Fatalf(\"failed get with curl (%v)\", err)\n\t}\n}\n\nfunc cipherSuiteTestMismatch(cx ctlCtx) {\n\tif err := cURLGet(cx.epc, cURLReq{\n\t\tendpoint: \"\/metrics\",\n\t\texpected: \"alert handshake failure\",\n\t\tmetricsURLScheme: cx.cfg.metricsURLScheme,\n\t\tciphers: \"ECDHE-RSA-DES-CBC3-SHA\", \/\/ TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA\n\t}); err != nil {\n\t\tcx.t.Fatalf(\"failed get with curl (%v)\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package values\n\nimport (\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"testing\"\n\t\"time\"\n\t. \"github.com\/zubairhamed\/go-commons\/typeval\"\n)\n\nfunc TestStringValue(t *testing.T) {\n\tval := String(\"this is a string\")\n\tassert.Equal(t, VALUETYPE_STRING, val.GetType())\n\tassert.Equal(t, \"this is a string\", val.GetValue())\n\tassert.Equal(t, \"this is a string\", val.GetStringValue())\n\tassert.Equal(t, 16, len(val.GetBytes()))\n}\n\nfunc TestIntegerValue(t *testing.T) {\n\tval := Integer(42)\n\tassert.Equal(t, VALUETYPE_INTEGER, val.GetType())\n\tassert.Equal(t, 42, val.GetValue())\n\tassert.Equal(t, \"42\", val.GetStringValue())\n\tassert.Equal(t, 2, len(val.GetBytes()))\n}\n\nfunc TestTimeValue(t *testing.T) {\n\ttv := time.Unix(1433767779, 0)\n\tval := Time(tv)\n\tassert.Equal(t, VALUETYPE_TIME, val.GetType())\n\tassert.Equal(t, tv, val.GetValue())\n\tassert.Equal(t, \"1433767779\", val.GetStringValue())\n\tassert.Equal(t, 10, len(val.GetBytes()))\n}\n\nfunc TestFloatValue(t *testing.T) {\n\tval := Float(4.2)\n\tassert.Equal(t, VALUETYPE_FLOAT, val.GetType())\n\tassert.Equal(t, 4.2, val.GetValue())\n\tassert.Equal(t, \"4\", val.GetStringValue())\n\tassert.Equal(t, 8, len(val.GetBytes()))\n}\n\nfunc TestBooleanValue(t *testing.T) {\n\tval := Boolean(true)\n\tassert.Equal(t, VALUETYPE_BOOLEAN, val.GetType())\n\tassert.Equal(t, true, val.GetValue())\n\tassert.Equal(t, \"1\", val.GetStringValue())\n\tassert.Equal(t, 0, len(val.GetBytes()))\n}\n\nfunc TestEmptyValue(t *testing.T) {\n\tval := Empty()\n\tassert.Equal(t, VALUETYPE_EMPTY, val.GetType())\n\tassert.Equal(t, \"\", val.GetValue())\n\tassert.Equal(t, \"\", val.GetStringValue())\n\tassert.Equal(t, 0, len(val.GetBytes()))\n}\n\n\/*\nfunc TestTlvValue(t *testing.T) {\n\tval := Tlv([]byte{0, 1, 2})\n\tassert.Equal(t, VALUETYPE_TLV, val.GetType())\n\tassert.Equal(t, []byte{0, 1, 2}, val.GetValue())\n\tassert.Equal(t, \"\", val.GetStringValue())\n\tassert.Equal(t, 3, len(val.GetBytes()))\n}\n\nfunc TestMultipleResourceInstanceValue(t *testing.T) {\n\n}\n*\/\n<commit_msg>fixing values test<commit_after>package values\n\nimport (\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"testing\"\n\t\"time\"\n\t. \"github.com\/zubairhamed\/go-commons\/typeval\"\n)\n\nfunc TestStringValue(t *testing.T) {\n\tval := String(\"this is a string\")\n\tassert.Equal(t, VALUETYPE_STRING, val.GetType())\n\tassert.Equal(t, \"this is a string\", val.GetValue())\n\tassert.Equal(t, \"this is a string\", val.GetStringValue())\n\tassert.Equal(t, 16, len(val.GetBytes()))\n}\n\nfunc TestIntegerValue(t *testing.T) {\n\tval := Integer(42)\n\tassert.Equal(t, VALUETYPE_INTEGER, val.GetType())\n\tassert.Equal(t, 42, val.GetValue())\n\tassert.Equal(t, \"42\", val.GetStringValue())\n\tassert.Equal(t, 2, len(val.GetBytes()))\n}\n\nfunc TestTimeValue(t *testing.T) {\n\ttv := time.Unix(1433767779, 0)\n\tval := Time(tv)\n\tassert.Equal(t, VALUETYPE_TIME, val.GetType())\n\tassert.Equal(t, tv, val.GetValue())\n\tassert.Equal(t, \"1433767779\", val.GetStringValue())\n\tassert.Equal(t, 10, len(val.GetBytes()))\n}\n\nfunc TestFloatValue(t *testing.T) {\n\tval := Float(float32(4.2))\n\tassert.Equal(t, VALUETYPE_FLOAT, val.GetType())\n\tassert.Equal(t, float32(4.2), val.GetValue())\n\tassert.Equal(t, \"4\", val.GetStringValue())\n\tassert.Equal(t, 4, len(val.GetBytes()))\n}\n\nfunc TestBooleanValue(t *testing.T) {\n\tval := Boolean(true)\n\tassert.Equal(t, VALUETYPE_BOOLEAN, val.GetType())\n\tassert.Equal(t, true, val.GetValue())\n\tassert.Equal(t, \"1\", val.GetStringValue())\n\tassert.Equal(t, 0, len(val.GetBytes()))\n}\n\nfunc TestEmptyValue(t *testing.T) {\n\tval := Empty()\n\tassert.Equal(t, VALUETYPE_EMPTY, val.GetType())\n\tassert.Equal(t, \"\", val.GetValue())\n\tassert.Equal(t, \"\", val.GetStringValue())\n\tassert.Equal(t, 0, len(val.GetBytes()))\n}\n\n\/*\nfunc TestTlvValue(t *testing.T) {\n\tval := Tlv([]byte{0, 1, 2})\n\tassert.Equal(t, VALUETYPE_TLV, val.GetType())\n\tassert.Equal(t, []byte{0, 1, 2}, val.GetValue())\n\tassert.Equal(t, \"\", val.GetStringValue())\n\tassert.Equal(t, 3, len(val.GetBytes()))\n}\n\nfunc TestMultipleResourceInstanceValue(t *testing.T) {\n\n}\n*\/\n<|endoftext|>"} {"text":"<commit_before>package facebook_test\n\nimport (\n\t\"github.com\/Lunchr\/luncher-api\/db\/model\"\n\t\"github.com\/Lunchr\/luncher-api\/facebook\"\n\t\"github.com\/Lunchr\/luncher-api\/facebook\/mocks\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"Post\", func() {\n\tvar (\n\t\tfacebookPost facebook.Post\n\n\t\tgroupPosts *mocks.OfferGroupPosts\n\t\toffers *mocks.Offers\n\t\tregions *mocks.Regions\n\t\tfbAuth *mocks.Authenticator\n\n\t\tuser *model.User\n\t\trestaurant *model.Restaurant\n\t)\n\n\tBeforeEach(func() {\n\t\tgroupPosts = new(mocks.OfferGroupPosts)\n\t\toffers = new(mocks.Offers)\n\t\tregions = new(mocks.Regions)\n\t\tfbAuth = new(mocks.Authenticator)\n\n\t\tfacebookPost = facebook.NewPost(groupPosts, offers, regions, fbAuth)\n\t})\n\n\tDescribe(\"Update\", func() {\n\t\tvar date model.DateWithoutTime\n\n\t\tContext(\"for restaurants without an associated FB page\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\trestaurant = &model.Restaurant{\n\t\t\t\t\tFacebookPageID: \"\",\n\t\t\t\t}\n\t\t\t})\n\n\t\t\tIt(\"does nothing\", func() {\n\t\t\t\terr := facebookPost.Update(date, user, restaurant)\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t})\n\t\t})\n\t})\n})\n<commit_msg>Add first full test<commit_after>package facebook_test\n\nimport (\n\t\"time\"\n\n\t\"github.com\/Lunchr\/luncher-api\/db\/model\"\n\t\"github.com\/Lunchr\/luncher-api\/facebook\"\n\t\"github.com\/Lunchr\/luncher-api\/facebook\/mocks\"\n\tfbmodel \"github.com\/deiwin\/facebook\/model\"\n\t\"golang.org\/x\/oauth2\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"Post\", func() {\n\tvar (\n\t\tfacebookPost facebook.Post\n\n\t\tgroupPosts *mocks.OfferGroupPosts\n\t\toffersCollection *mocks.Offers\n\t\tregions *mocks.Regions\n\t\tfbAuth *mocks.Authenticator\n\n\t\tuser *model.User\n\t\trestaurant *model.Restaurant\n\t)\n\n\tBeforeEach(func() {\n\t\tgroupPosts = new(mocks.OfferGroupPosts)\n\t\toffersCollection = new(mocks.Offers)\n\t\tregions = new(mocks.Regions)\n\t\tfbAuth = new(mocks.Authenticator)\n\n\t\tfacebookPost = facebook.NewPost(groupPosts, offersCollection, regions, fbAuth)\n\t})\n\n\tJustBeforeEach(func() {\n\t})\n\n\tDescribe(\"Update\", func() {\n\t\tvar date model.DateWithoutTime\n\n\t\tContext(\"for restaurants without an associated FB page\", func() {\n\t\t\tBeforeEach(func() {\n\t\t\t\trestaurant = &model.Restaurant{\n\t\t\t\t\tFacebookPageID: \"\",\n\t\t\t\t}\n\t\t\t\tdate = model.DateWithoutTime(\"2011-04-24\")\n\t\t\t})\n\n\t\t\tIt(\"does nothing\", func() {\n\t\t\t\terr := facebookPost.Update(date, user, restaurant)\n\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t})\n\t\t})\n\n\t\tContext(\"for a restaurant with an associated FB page\", func() {\n\t\t\tvar (\n\t\t\t\trestaurantID bson.ObjectId\n\t\t\t\tfacebookPageID string\n\t\t\t\tfacebookUserToken *oauth2.Token\n\t\t\t\tfacebookPageToken string\n\t\t\t\tfbAPI *mocks.API\n\t\t\t)\n\n\t\t\tBeforeEach(func() {\n\t\t\t\trestaurantID = bson.NewObjectId()\n\t\t\t\tfacebookPageID = \"a page ID\"\n\t\t\t\tfacebookUserToken = &oauth2.Token{\n\t\t\t\t\tAccessToken: \"a user token\",\n\t\t\t\t}\n\t\t\t\tfacebookPageToken = \"a page token\"\n\n\t\t\t\tregionName := \"a region\"\n\t\t\t\tregion := &model.Region{\n\t\t\t\t\tName: regionName,\n\t\t\t\t\tLocation: \"UTC\",\n\t\t\t\t}\n\t\t\t\tregions.On(\"GetName\", regionName).Return(region, nil)\n\n\t\t\t\tstartTime := time.Date(2011, 04, 24, 0, 0, 0, 0, time.UTC)\n\t\t\t\tendTime := time.Date(2011, 04, 25, 0, 0, 0, 0, time.UTC)\n\t\t\t\toffers := []*model.Offer{\n\t\t\t\t\t&model.Offer{\n\t\t\t\t\t\tCommonOfferFields: model.CommonOfferFields{\n\t\t\t\t\t\t\tTitle: \"atitle\",\n\t\t\t\t\t\t\tPrice: 5.670000000000,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t&model.Offer{\n\t\t\t\t\t\tCommonOfferFields: model.CommonOfferFields{\n\t\t\t\t\t\t\tTitle: \"btitle\",\n\t\t\t\t\t\t\tPrice: 4.670000000000,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\toffersCollection.On(\"GetForRestaurantWithinTimeBounds\", restaurantID, startTime, endTime).Return(offers, nil)\n\n\t\t\t\trestaurant = &model.Restaurant{\n\t\t\t\t\tID: restaurantID,\n\t\t\t\t\tFacebookPageID: facebookPageID,\n\t\t\t\t\tRegion: regionName,\n\t\t\t\t}\n\t\t\t\tuser = &model.User{\n\t\t\t\t\tSession: &model.UserSession{\n\t\t\t\t\t\tFacebookUserToken: *facebookUserToken,\n\t\t\t\t\t\tFacebookPageToken: facebookPageToken,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tfbAPI = new(mocks.API)\n\t\t\t\tfbAuth.On(\"APIConnection\", facebookUserToken).Return(fbAPI)\n\t\t\t})\n\n\t\t\tContext(\"with an existing OfferGroupPost\", func() {\n\t\t\t\tvar (\n\t\t\t\t\tmessageTemplate string\n\t\t\t\t\tofferGroupPost *model.OfferGroupPost\n\t\t\t\t\tfacebookPostID string\n\t\t\t\t)\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tid := bson.NewObjectId()\n\t\t\t\t\tmessageTemplate = \"a message template\"\n\t\t\t\t\tofferGroupPost = &model.OfferGroupPost{\n\t\t\t\t\t\tID: id,\n\t\t\t\t\t\tDate: date,\n\t\t\t\t\t\tMessageTemplate: messageTemplate,\n\t\t\t\t\t}\n\t\t\t\t\tgroupPosts.On(\"GetByDate\", date, restaurantID).Return(offerGroupPost, nil)\n\n\t\t\t\t\tfacebookPostID = \"fb post id\"\n\t\t\t\t\tfbAPI.On(\"PagePublish\", facebookPageToken, facebookPageID,\n\t\t\t\t\t\tmessageTemplate+\"\\n\\natitle - 5.67€\\nbtitle - 4.67€\").Return(&fbmodel.Post{\n\t\t\t\t\t\tID: facebookPostID,\n\t\t\t\t\t}, nil)\n\t\t\t\t\tgroupPosts.On(\"UpdateByID\", id, &model.OfferGroupPost{\n\t\t\t\t\t\tID: id,\n\t\t\t\t\t\tDate: date,\n\t\t\t\t\t\tMessageTemplate: messageTemplate,\n\t\t\t\t\t\tFBPostID: facebookPostID,\n\t\t\t\t\t}).Return(nil)\n\t\t\t\t})\n\n\t\t\t\tContext(\"without a previous associated FB post\", func() {\n\t\t\t\t\tAfterEach(func() {\n\t\t\t\t\t\tgroupPosts.AssertExpectations(GinkgoT())\n\t\t\t\t\t\toffersCollection.AssertExpectations(GinkgoT())\n\t\t\t\t\t\tregions.AssertExpectations(GinkgoT())\n\t\t\t\t\t\tfbAuth.AssertExpectations(GinkgoT())\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"should succeed\", func() {\n\t\t\t\t\t\terr := facebookPost.Update(date, user, restaurant)\n\t\t\t\t\t\tExpect(err).To(BeNil())\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package templates\n\nvar LoggedInHome = `\n<!doctype html>\n <html lang=\"en\">\n <head>\n {{template \"header\" . }}\n\n <link rel=\"stylesheet\" href=\"\/a\/p\/p\/{{.Version}}\/kd.css\" \/>\n <link rel=\"stylesheet\" href=\"\/a\/p\/p\/{{.Version}}\/app.css\" \/>\n <\/head>\n\n <body class='logged-in'>\n {{.UnsupportedHTML}}\n\n <script>\n var _globals = {\n config: {{.Runtime}},\n isLoggedInOnLoad: true,\n userId: {{.User.GetWithDefaultStr \"UserId\" }},\n userAccount: {{.User.GetWithDefaultHash \"Account\" }},\n currentGroup: {{.User.GetWithDefaultHash \"Group\" }},\n userEnvironmentData: {{.User.GetWithDefaultHash \"EnvData\" }},\n socialApiData: {{.User.GetWithDefaultHash \"SocialApiData\" }}\n };\n <\/script>\n\n <script src=\"\/a\/p\/p\/{{.Version}}\/thirdparty\/pubnub.min.js\"><\/script>\n <script src=\"\/a\/p\/p\/{{.Version}}\/bundle.js\"><\/script>\n <script>require('app')();<\/script>\n\n <script>\n (function(d) {\n var config = {\n kitId: 'rbd0tum',\n scriptTimeout: 3000\n },\n h=d.documentElement,t=setTimeout(function(){h.className=h.className.replace(\/\\bwf-loading\\b\/g,\"\")+\" wf-inactive\";},config.scriptTimeout),tk=d.createElement(\"script\"),f=false,s=d.getElementsByTagName(\"script\")[0],a;h.className+=\" wf-loading\";tk.src='\/\/use.typekit.net\/'+config.kitId+'.js';tk.async=true;tk.onload=tk.onreadystatechange=function(){a=this.readyState;if(f||a&&a!=\"complete\"&&a!=\"loaded\")return;f=true;clearTimeout(t);try{Typekit.load(config)}catch(e){}};s.parentNode.insertBefore(tk,s)\n })(document);\n <\/script>\n\n {{if not .Impersonating }}\n <script type=\"text\/javascript\">\n (function () {\n var _user_id = '{{.User.GetWithDefaultStr \"Username\" }}'; var _session_id = '{{.User.GetWithDefaultStr \"SessionId\" }}'; var _sift = _sift || []; _sift.push(['_setAccount', 'f270274999']); _sift.push(['_setUserId', _user_id]); _sift.push(['_setSessionId', _session_id]); _sift.push(['_trackPageview']); (function() { function ls() { var e = document.createElement('script'); e.type = 'text\/javascript'; e.async = true; e.src = ('https:' == document.location.protocol ? 'https:\/\/' : 'http:\/\/') + 'cdn.siftscience.com\/s.js'; var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(e, s); } if (window.attachEvent) { window.attachEvent('onload', ls); } else { window.addEventListener('load', ls, false); } })();\n })();\n <\/script>\n {{end}}\n\n <script>\n (function(h,o,t,j,a,r){\n h.hj=h.hj||function(){(h.hj.q=h.hj.q||[]).push(arguments)};\n h._hjSettings={hjid:156048,hjsv:5};\n a=o.getElementsByTagName('head')[0];\n r=o.createElement('script');r.async=1;\n r.src=t+h._hjSettings.hjid+j+h._hjSettings.hjsv;\n a.appendChild(r);\n })(window,document,'\/\/static.hotjar.com\/c\/hotjar-','.js?sv=');\n <\/script>\n<\/body>\n<\/html>\n`\n<commit_msg>gowebserver: added userRoles and userPermissions<commit_after>package templates\n\nvar LoggedInHome = `\n<!doctype html>\n <html lang=\"en\">\n <head>\n {{template \"header\" . }}\n\n <link rel=\"stylesheet\" href=\"\/a\/p\/p\/{{.Version}}\/kd.css\" \/>\n <link rel=\"stylesheet\" href=\"\/a\/p\/p\/{{.Version}}\/app.css\" \/>\n <\/head>\n\n <body class='logged-in'>\n {{.UnsupportedHTML}}\n\n <script>\n var _globals = {\n config: {{.Runtime}},\n isLoggedInOnLoad: true,\n userId: {{.User.GetWithDefaultStr \"UserId\" }},\n userAccount: {{.User.GetWithDefaultHash \"Account\" }},\n currentGroup: {{.User.GetWithDefaultHash \"Group\" }},\n userEnvironmentData: {{.User.GetWithDefaultHash \"EnvData\" }},\n socialApiData: {{.User.GetWithDefaultHash \"SocialApiData\" }},\n userRoles: {{.User.GetWithDefaultHash \"Roles\" }}\n userPermissions: {{.User.GetWithDefaultHash \"Permissions\" }}\n };\n <\/script>\n\n <script src=\"\/a\/p\/p\/{{.Version}}\/thirdparty\/pubnub.min.js\"><\/script>\n <script src=\"\/a\/p\/p\/{{.Version}}\/bundle.js\"><\/script>\n <script>require('app')();<\/script>\n\n <script>\n (function(d) {\n var config = {\n kitId: 'rbd0tum',\n scriptTimeout: 3000\n },\n h=d.documentElement,t=setTimeout(function(){h.className=h.className.replace(\/\\bwf-loading\\b\/g,\"\")+\" wf-inactive\";},config.scriptTimeout),tk=d.createElement(\"script\"),f=false,s=d.getElementsByTagName(\"script\")[0],a;h.className+=\" wf-loading\";tk.src='\/\/use.typekit.net\/'+config.kitId+'.js';tk.async=true;tk.onload=tk.onreadystatechange=function(){a=this.readyState;if(f||a&&a!=\"complete\"&&a!=\"loaded\")return;f=true;clearTimeout(t);try{Typekit.load(config)}catch(e){}};s.parentNode.insertBefore(tk,s)\n })(document);\n <\/script>\n\n {{if not .Impersonating }}\n <script type=\"text\/javascript\">\n (function () {\n var _user_id = '{{.User.GetWithDefaultStr \"Username\" }}'; var _session_id = '{{.User.GetWithDefaultStr \"SessionId\" }}'; var _sift = _sift || []; _sift.push(['_setAccount', 'f270274999']); _sift.push(['_setUserId', _user_id]); _sift.push(['_setSessionId', _session_id]); _sift.push(['_trackPageview']); (function() { function ls() { var e = document.createElement('script'); e.type = 'text\/javascript'; e.async = true; e.src = ('https:' == document.location.protocol ? 'https:\/\/' : 'http:\/\/') + 'cdn.siftscience.com\/s.js'; var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(e, s); } if (window.attachEvent) { window.attachEvent('onload', ls); } else { window.addEventListener('load', ls, false); } })();\n })();\n <\/script>\n {{end}}\n\n <script>\n (function(h,o,t,j,a,r){\n h.hj=h.hj||function(){(h.hj.q=h.hj.q||[]).push(arguments)};\n h._hjSettings={hjid:156048,hjsv:5};\n a=o.getElementsByTagName('head')[0];\n r=o.createElement('script');r.async=1;\n r.src=t+h._hjSettings.hjid+j+h._hjSettings.hjsv;\n a.appendChild(r);\n })(window,document,'\/\/static.hotjar.com\/c\/hotjar-','.js?sv=');\n <\/script>\n<\/body>\n<\/html>\n`\n<|endoftext|>"} {"text":"<commit_before>package functions\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"strconv\"\n\t\"time\"\n)\n\nfunc defaults(robot Robot) {\n\trobot.HandleFunc(robot.Direct(\"ping\"), func(s State) {\n\t\ts.Chat().Send(s.Message().ChannelID(), \"pong!\")\n\t})\n\n\trobot.HandleFunc(robot.Direct(\"roll(\\\\s(\\\\d+))?\"), func(s State) {\n\t\tdefer recover()\n\n\t\tbound := 100\n\t\tval := s.Params()[1]\n\n\t\tif val != \"\" {\n\t\t\tvar err error\n\t\t\tbound, err = strconv.Atoi(val)\n\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\trand.Seed(time.Now().UTC().UnixNano())\n\t\trandom := rand.Intn(bound)\n\n\t\tmsg := fmt.Sprintf(\"%s rolled a %d of %d\", s.Message().UserName(), random, bound)\n\t\ts.Chat().Send(s.Message().ChannelID(), msg)\n\t})\n}\n<commit_msg>Fixed defaults example<commit_after>package functions\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/brettbuddin\/victor\"\n)\n\nfunc Defaults(robot victor.Robot) {\n\trobot.HandleFunc(robot.Direct(\"ping\"), func(s victor.State) {\n\t\ts.Chat().Send(s.Message().ChannelID(), \"pong!\")\n\t})\n\n\trobot.HandleFunc(robot.Direct(\"roll(\\\\s(\\\\d+))?\"), func(s victor.State) {\n\t\tdefer recover()\n\n\t\tbound := 100\n\t\tval := s.Params()[1]\n\n\t\tif val != \"\" {\n\t\t\tvar err error\n\t\t\tbound, err = strconv.Atoi(val)\n\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\trand.Seed(time.Now().UTC().UnixNano())\n\t\trandom := rand.Intn(bound)\n\n\t\tmsg := fmt.Sprintf(\"%s rolled a %d of %d\", s.Message().UserName(), random, bound)\n\t\ts.Chat().Send(s.Message().ChannelID(), msg)\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright 2016 Padduck, LLC\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n \thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage templates\n\nconst SRCDS = `{\n \"pufferd\": {\n \"type\": \"srcds\",\n \"install\": {\n \"commands\": [\n {\n \"files\": \"https:\/\/steamcdn-a.akamaihd.net\/client\/installer\/steamcmd_linux.tar.gz\",\n \"type\": \"download\"\n },\n {\n \"commands\": [\n \"mkdir steamcmd\",\n \"tar --no-same-owner -xzvf steamcmd_linux.tar.gz -C steamcmd\",\n \"steamcmd\/steamcmd.sh +login anonymous +force_install_dir ${rootdir} +app_update ${appid} +quit\",\n \"mkdir -p .steam\/sdk32\",\n \"cp steamcmd\/linux32\/steamclient.so .steam\/sdk32\/steamclient.so\"\n ],\n \"type\": \"command\"\n }\n ]\n },\n \"run\": {\n \"stop\": \"exit\",\n \"pre\": [],\n \"post\": [],\n \"arguments\": [\n \t\"+ip\",\n \t\"${ip}\",\n \t\"+port\",\n \t\"${port}\",\n \t\"-game ${gametype}\",\n \t\"-console\",\n \"+map ${map}\",\n \t\"-norestart\"\n ],\n \"program\": \".\/srcds_run\"\n },\n \"data\": {\n \"appid\": {\n \"value\": \"232250\",\n \"required\": true,\n \"desc\": \"App ID\",\n \"display\": \"Application ID\",\n \"internal\": false\n },\n \"gametype\": {\n \"value\": \"tf\",\n \"required\": true,\n \"desc\": \"Game Type\",\n \"display\": \"tf, csgo, etc.\",\n \"internal\": false\n },\n \"map\": {\n \t\"value\": \"ctf_2fort\",\n \t\"required\": false,\n \t\"desc\": \"Map\",\n \t\"display\": \"Map to load\",\n \t\"internal\": false\n },\n \"ip\": {\n \"value\": \"0.0.0.0\",\n \"required\": true,\n \"desc\": \"What IP to bind the server to\",\n \"display\": \"IP\",\n \"internal\": false\n },\n \"port\": {\n \"value\": \"27015\",\n \"required\": true,\n \"desc\": \"What port to bind the server to\",\n \"display\": \"Port\",\n \"internal\": false\n }\n }\n }\n}`\n\nconst TF2 = `{\n \"pufferd\": {\n \"type\": \"srcds\",\n \"display\": \"Team Fortress 2\",\n \"install\": {\n \"commands\": [\n {\n \"files\": \"https:\/\/steamcdn-a.akamaihd.net\/client\/installer\/steamcmd_linux.tar.gz\",\n \"type\": \"download\"\n },\n {\n \"commands\": [\n \"mkdir steamcmd\",\n \"tar --no-same-owner -xzvf steamcmd_linux.tar.gz -C steamcmd\",\n \"steamcmd\/steamcmd.sh +login anonymous +force_install_dir ${rootdir} +app_update 232250 +quit\",\n \"mkdir -p .steam\/sdk32\",\n \"cp steamcmd\/linux32\/steamclient.so .steam\/sdk32\/steamclient.so\"\n ],\n \"type\": \"command\"\n }\n ]\n },\n \"run\": {\n \"stop\": \"exit\",\n \"pre\": [],\n \"post\": [],\n \"arguments\": [\n \t\"+ip\",\n \t\"${ip}\",\n \t\"+port\",\n \t\"${port}\",\n \t\"-game tf\",\n \t\"-console\",\n \"+map ${map}\",\n \t\"-norestart\"\n ],\n \"program\": \".\/srcds_run\"\n },\n \"environment\": {\n \"type\": \"tty\"\n },\n \"data\": {\n \"map\": {\n \t\"value\": \"ctf_2fort\",\n \t\"required\": true,\n \t\"desc\": \"TF2 Map\",\n \t\"display\": \"Team Fortess 2 Map to load\",\n \t\"internal\": false\n },\n \"ip\": {\n \"value\": \"0.0.0.0\",\n \"required\": true,\n \"desc\": \"What IP to bind the server to\",\n \"display\": \"IP\",\n \"internal\": false\n },\n \"port\": {\n \"value\": \"27015\",\n \"required\": true,\n \"desc\": \"What port to bind the server to\",\n \"display\": \"Port\",\n \"internal\": false\n }\n }\n }\n}`\n<commit_msg>Update SRCDS template<commit_after>\/*\n Copyright 2016 Padduck, LLC\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n \thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage templates\n\nconst SRCDS = `{\n \"pufferd\": {\n \"type\": \"srcds\",\n \"install\": {\n \"commands\": [\n {\n \"files\": \"https:\/\/steamcdn-a.akamaihd.net\/client\/installer\/steamcmd_linux.tar.gz\",\n \"type\": \"download\"\n },\n {\n \"commands\": [\n \"mkdir steamcmd\",\n \"tar --no-same-owner -xzvf steamcmd_linux.tar.gz -C steamcmd\",\n \"steamcmd\/steamcmd.sh +login anonymous +force_install_dir ${rootdir} +app_update ${appid} +quit\",\n \"mkdir -p .steam\/sdk32\",\n \"cp steamcmd\/linux32\/steamclient.so .steam\/sdk32\/steamclient.so\"\n ],\n \"type\": \"command\"\n }\n ]\n },\n \"run\": {\n \"stop\": \"exit\",\n \"pre\": [],\n \"post\": [],\n \"arguments\": [\n \t\"+ip\",\n \t\"${ip}\",\n \t\"+port\",\n \t\"${port}\",\n \t\"-game ${gametype}\",\n \t\"-console\",\n \"+map ${map}\",\n \t\"-norestart\"\n ],\n \"program\": \".\/srcds_run\"\n },\n \"environment\": {\n \"type\": \"tty\"\n },\n \"data\": {\n \"appid\": {\n \"value\": \"232250\",\n \"required\": true,\n \"desc\": \"App ID\",\n \"display\": \"Application ID\",\n \"internal\": false\n },\n \"gametype\": {\n \"value\": \"tf\",\n \"required\": false,\n \"desc\": \"Game Type\",\n \"display\": \"tf, csgo, etc.\",\n \"internal\": false\n },\n \"map\": {\n \t\"value\": \"ctf_2fort\",\n \t\"required\": false,\n \t\"desc\": \"Map\",\n \t\"display\": \"Map to load\",\n \t\"internal\": false\n },\n \"ip\": {\n \"value\": \"0.0.0.0\",\n \"required\": true,\n \"desc\": \"What IP to bind the server to\",\n \"display\": \"IP\",\n \"internal\": false\n },\n \"port\": {\n \"value\": \"27015\",\n \"required\": true,\n \"desc\": \"What port to bind the server to\",\n \"display\": \"Port\",\n \"internal\": false\n }\n }\n }\n}`\n\nconst TF2 = `{\n \"pufferd\": {\n \"type\": \"srcds\",\n \"display\": \"Team Fortress 2\",\n \"install\": {\n \"commands\": [\n {\n \"files\": \"https:\/\/steamcdn-a.akamaihd.net\/client\/installer\/steamcmd_linux.tar.gz\",\n \"type\": \"download\"\n },\n {\n \"commands\": [\n \"mkdir steamcmd\",\n \"tar --no-same-owner -xzvf steamcmd_linux.tar.gz -C steamcmd\",\n \"steamcmd\/steamcmd.sh +login anonymous +force_install_dir ${rootdir} +app_update 232250 +quit\",\n \"mkdir -p .steam\/sdk32\",\n \"cp steamcmd\/linux32\/steamclient.so .steam\/sdk32\/steamclient.so\"\n ],\n \"type\": \"command\"\n }\n ]\n },\n \"run\": {\n \"stop\": \"exit\",\n \"pre\": [],\n \"post\": [],\n \"arguments\": [\n \t\"+ip\",\n \t\"${ip}\",\n \t\"+port\",\n \t\"${port}\",\n \t\"-game tf\",\n \t\"-console\",\n \"+map ${map}\",\n \t\"-norestart\"\n ],\n \"program\": \".\/srcds_run\"\n },\n \"environment\": {\n \"type\": \"tty\"\n },\n \"data\": {\n \"map\": {\n \t\"value\": \"ctf_2fort\",\n \t\"required\": true,\n \t\"desc\": \"TF2 Map\",\n \t\"display\": \"Team Fortess 2 Map to load\",\n \t\"internal\": false\n },\n \"ip\": {\n \"value\": \"0.0.0.0\",\n \"required\": true,\n \"desc\": \"What IP to bind the server to\",\n \"display\": \"IP\",\n \"internal\": false\n },\n \"port\": {\n \"value\": \"27015\",\n \"required\": true,\n \"desc\": \"What port to bind the server to\",\n \"display\": \"Port\",\n \"internal\": false\n }\n }\n }\n}`\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Parse Plan 9 timezone(2) files.\n\npackage time\n\nimport (\n\t\"errors\"\n\t\"runtime\"\n\t\"syscall\"\n)\n\nfunc isSpace(r rune) bool {\n\treturn r == ' ' || r == '\\t' || r == '\\n'\n}\n\n\/\/ Copied from strings to avoid a dependency.\nfunc fields(s string) []string {\n\t\/\/ First count the fields.\n\tn := 0\n\tinField := false\n\tfor _, rune := range s {\n\t\twasInField := inField\n\t\tinField = !isSpace(rune)\n\t\tif inField && !wasInField {\n\t\t\tn++\n\t\t}\n\t}\n\n\t\/\/ Now create them.\n\ta := make([]string, n)\n\tna := 0\n\tfieldStart := -1 \/\/ Set to -1 when looking for start of field.\n\tfor i, rune := range s {\n\t\tif isSpace(rune) {\n\t\t\tif fieldStart >= 0 {\n\t\t\t\ta[na] = s[fieldStart:i]\n\t\t\t\tna++\n\t\t\t\tfieldStart = -1\n\t\t\t}\n\t\t} else if fieldStart == -1 {\n\t\t\tfieldStart = i\n\t\t}\n\t}\n\tif fieldStart >= 0 { \/\/ Last field might end at EOF.\n\t\ta[na] = s[fieldStart:]\n\t}\n\treturn a\n}\n\nfunc loadZoneDataPlan9(s string) (l *Location, err error) {\n\tf := fields(s)\n\tif len(f) < 4 {\n\t\tif len(f) == 2 && f[0] == \"GMT\" {\n\t\t\treturn UTC, nil\n\t\t}\n\t\treturn nil, badData\n\t}\n\n\tvar zones [2]zone\n\n\t\/\/ standard timezone offset\n\to, err := atoi(f[1])\n\tif err != nil {\n\t\treturn nil, badData\n\t}\n\tzones[0] = zone{name: f[0], offset: o, isDST: false}\n\n\t\/\/ alternate timezone offset\n\to, err = atoi(f[3])\n\tif err != nil {\n\t\treturn nil, badData\n\t}\n\tzones[1] = zone{name: f[2], offset: o, isDST: true}\n\n\t\/\/ transition time pairs\n\tvar tx []zoneTrans\n\tf = f[4:]\n\tfor i := 0; i < len(f); i++ {\n\t\tzi := 0\n\t\tif i%2 == 0 {\n\t\t\tzi = 1\n\t\t}\n\t\tt, err := atoi(f[i])\n\t\tif err != nil {\n\t\t\treturn nil, badData\n\t\t}\n\t\tt -= zones[0].offset\n\t\ttx = append(tx, zoneTrans{when: int64(t), index: uint8(zi)})\n\t}\n\n\t\/\/ Committed to succeed.\n\tl = &Location{zone: zones[:], tx: tx}\n\n\t\/\/ Fill in the cache with information about right now,\n\t\/\/ since that will be the most common lookup.\n\tsec, _ := now()\n\tfor i := range tx {\n\t\tif tx[i].when <= sec && (i+1 == len(tx) || sec < tx[i+1].when) {\n\t\t\tl.cacheStart = tx[i].when\n\t\t\tl.cacheEnd = omega\n\t\t\tif i+1 < len(tx) {\n\t\t\t\tl.cacheEnd = tx[i+1].when\n\t\t\t}\n\t\t\tl.cacheZone = &l.zone[tx[i].index]\n\t\t}\n\t}\n\n\treturn l, nil\n}\n\nfunc loadZoneFilePlan9(name string) (*Location, error) {\n\tb, err := readFile(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn loadZoneDataPlan9(string(b))\n}\n\nfunc initTestingZone() {\n\tz, err := loadLocation(\"America\/Los_Angeles\")\n\tif err != nil {\n\t\tpanic(\"cannot load America\/Los_Angeles for testing: \" + err.Error())\n\t}\n\tz.name = \"Local\"\n\tlocalLoc = *z\n}\n\nfunc initLocal() {\n\tt, ok := syscall.Getenv(\"timezone\")\n\tif ok {\n\t\tif z, err := loadZoneDataPlan9(t); err == nil {\n\t\t\tlocalLoc = *z\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif z, err := loadZoneFilePlan9(\"\/adm\/timezone\/local\"); err == nil {\n\t\t\tlocalLoc = *z\n\t\t\tlocalLoc.name = \"Local\"\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Fall back to UTC.\n\tlocalLoc.name = \"UTC\"\n}\n\nfunc loadLocation(name string) (*Location, error) {\n\tz, err := loadZoneFile(runtime.GOROOT()+\"\/lib\/time\/zoneinfo.zip\", name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tz.name = name\n\treturn z, nil\n}\n\nfunc forceZipFileForTesting(zipOnly bool) {\n\t\/\/ We only use the zip file anyway.\n}\n<commit_msg>Build fix for plan9 after 8bf13838eb21.<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Parse Plan 9 timezone(2) files.\n\npackage time\n\nimport (\n\t\"runtime\"\n\t\"syscall\"\n)\n\nfunc isSpace(r rune) bool {\n\treturn r == ' ' || r == '\\t' || r == '\\n'\n}\n\n\/\/ Copied from strings to avoid a dependency.\nfunc fields(s string) []string {\n\t\/\/ First count the fields.\n\tn := 0\n\tinField := false\n\tfor _, rune := range s {\n\t\twasInField := inField\n\t\tinField = !isSpace(rune)\n\t\tif inField && !wasInField {\n\t\t\tn++\n\t\t}\n\t}\n\n\t\/\/ Now create them.\n\ta := make([]string, n)\n\tna := 0\n\tfieldStart := -1 \/\/ Set to -1 when looking for start of field.\n\tfor i, rune := range s {\n\t\tif isSpace(rune) {\n\t\t\tif fieldStart >= 0 {\n\t\t\t\ta[na] = s[fieldStart:i]\n\t\t\t\tna++\n\t\t\t\tfieldStart = -1\n\t\t\t}\n\t\t} else if fieldStart == -1 {\n\t\t\tfieldStart = i\n\t\t}\n\t}\n\tif fieldStart >= 0 { \/\/ Last field might end at EOF.\n\t\ta[na] = s[fieldStart:]\n\t}\n\treturn a\n}\n\nfunc loadZoneDataPlan9(s string) (l *Location, err error) {\n\tf := fields(s)\n\tif len(f) < 4 {\n\t\tif len(f) == 2 && f[0] == \"GMT\" {\n\t\t\treturn UTC, nil\n\t\t}\n\t\treturn nil, badData\n\t}\n\n\tvar zones [2]zone\n\n\t\/\/ standard timezone offset\n\to, err := atoi(f[1])\n\tif err != nil {\n\t\treturn nil, badData\n\t}\n\tzones[0] = zone{name: f[0], offset: o, isDST: false}\n\n\t\/\/ alternate timezone offset\n\to, err = atoi(f[3])\n\tif err != nil {\n\t\treturn nil, badData\n\t}\n\tzones[1] = zone{name: f[2], offset: o, isDST: true}\n\n\t\/\/ transition time pairs\n\tvar tx []zoneTrans\n\tf = f[4:]\n\tfor i := 0; i < len(f); i++ {\n\t\tzi := 0\n\t\tif i%2 == 0 {\n\t\t\tzi = 1\n\t\t}\n\t\tt, err := atoi(f[i])\n\t\tif err != nil {\n\t\t\treturn nil, badData\n\t\t}\n\t\tt -= zones[0].offset\n\t\ttx = append(tx, zoneTrans{when: int64(t), index: uint8(zi)})\n\t}\n\n\t\/\/ Committed to succeed.\n\tl = &Location{zone: zones[:], tx: tx}\n\n\t\/\/ Fill in the cache with information about right now,\n\t\/\/ since that will be the most common lookup.\n\tsec, _ := now()\n\tfor i := range tx {\n\t\tif tx[i].when <= sec && (i+1 == len(tx) || sec < tx[i+1].when) {\n\t\t\tl.cacheStart = tx[i].when\n\t\t\tl.cacheEnd = omega\n\t\t\tif i+1 < len(tx) {\n\t\t\t\tl.cacheEnd = tx[i+1].when\n\t\t\t}\n\t\t\tl.cacheZone = &l.zone[tx[i].index]\n\t\t}\n\t}\n\n\treturn l, nil\n}\n\nfunc loadZoneFilePlan9(name string) (*Location, error) {\n\tb, err := readFile(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn loadZoneDataPlan9(string(b))\n}\n\nfunc initTestingZone() {\n\tz, err := loadLocation(\"America\/Los_Angeles\")\n\tif err != nil {\n\t\tpanic(\"cannot load America\/Los_Angeles for testing: \" + err.Error())\n\t}\n\tz.name = \"Local\"\n\tlocalLoc = *z\n}\n\nfunc initLocal() {\n\tt, ok := syscall.Getenv(\"timezone\")\n\tif ok {\n\t\tif z, err := loadZoneDataPlan9(t); err == nil {\n\t\t\tlocalLoc = *z\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif z, err := loadZoneFilePlan9(\"\/adm\/timezone\/local\"); err == nil {\n\t\t\tlocalLoc = *z\n\t\t\tlocalLoc.name = \"Local\"\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Fall back to UTC.\n\tlocalLoc.name = \"UTC\"\n}\n\nfunc loadLocation(name string) (*Location, error) {\n\tz, err := loadZoneFile(runtime.GOROOT()+\"\/lib\/time\/zoneinfo.zip\", name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tz.name = name\n\treturn z, nil\n}\n\nfunc forceZipFileForTesting(zipOnly bool) {\n\t\/\/ We only use the zip file anyway.\n}\n<|endoftext|>"} {"text":"<commit_before>package helpers\n\nimport (\n\t\"errors\"\n\t\"github.com\/koding\/logging\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"socialapi\/models\"\n\t\"strconv\"\n\n\t\"github.com\/jinzhu\/gorm\"\n)\n\n\/\/ type ApiResponse struct {\n\/\/ \tStatus int\n\/\/ \tHeader http.Header\n\/\/ \tResponse interface{}\n\/\/ \tError error\n\/\/ }\n\n\/\/ type ApiRequest struct {\n\/\/ \tURL *url.URL\n\/\/ \tHeader http.Header\n\/\/ \tRequest interface{}\n\/\/ }\n\n\/\/ func NewResponse() *ApiResponse {\n\/\/ \treturn &ApiResponse{\n\/\/ \t\tStatus: http.StatusOK,\n\/\/ \t\tHeader: nil,\n\/\/ \t\tResponse: nil,\n\/\/ \t\tError: nil,\n\/\/ \t}\n\/\/ }\n\nvar Log logging.Logger\n\nfunc NewBadRequestResponse(err error) (int, http.Header, interface{}, error) {\n\tif err == nil {\n\t\terr = errors.New(\"Request is not valid\")\n\t}\n\tif Log != nil {\n\t\tLog.Error(\"Bad Request: %s\", err)\n\t}\n\n\treturn http.StatusBadRequest, nil, nil, err\n}\n\nfunc HandleResultAndError(res interface{}, err error) (int, http.Header, interface{}, error) {\n\tif err != nil {\n\t\tif err == gorm.RecordNotFound {\n\t\t\treturn NewNotFoundResponse()\n\t\t}\n\t\treturn NewBadRequestResponse(err)\n\t}\n\treturn NewOKResponse(res)\n}\n\nfunc NewOKResponse(res interface{}) (int, http.Header, interface{}, error) {\n\treturn http.StatusOK, nil, res, nil\n}\n\nfunc NewNotFoundResponse() (int, http.Header, interface{}, error) {\n\treturn http.StatusNotFound, nil, nil, errors.New(\"Data not found\")\n}\n\nfunc NewDeletedResponse() (int, http.Header, interface{}, error) {\n\treturn http.StatusAccepted, nil, nil, nil\n}\n\nfunc NewDefaultOKResponse() (int, http.Header, interface{}, error) {\n\tres := map[string]interface{}{\n\t\t\"status\": true,\n\t}\n\n\treturn http.StatusOK, nil, res, nil\n}\n\nfunc GetId(u *url.URL) (int64, error) {\n\treturn strconv.ParseInt(u.Query().Get(\"id\"), 10, 64)\n}\n\nfunc GetURIInt64(u *url.URL, queryParam string) (int64, error) {\n\treturn strconv.ParseInt(u.Query().Get(queryParam), 10, 64)\n}\n\nfunc GetQuery(u *url.URL) *models.Query {\n\treturn models.NewQuery().MapURL(u).SetDefaults()\n}\n<commit_msg>Social: Responsehelper now uses a[i\/helper logger<commit_after>package helpers\n\nimport (\n\t\"errors\"\n\t\"github.com\/koding\/api\/helpers\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"socialapi\/models\"\n\t\"strconv\"\n\n\t\"github.com\/jinzhu\/gorm\"\n)\n\n\/\/ type ApiResponse struct {\n\/\/ \tStatus int\n\/\/ \tHeader http.Header\n\/\/ \tResponse interface{}\n\/\/ \tError error\n\/\/ }\n\n\/\/ type ApiRequest struct {\n\/\/ \tURL *url.URL\n\/\/ \tHeader http.Header\n\/\/ \tRequest interface{}\n\/\/ }\n\n\/\/ func NewResponse() *ApiResponse {\n\/\/ \treturn &ApiResponse{\n\/\/ \t\tStatus: http.StatusOK,\n\/\/ \t\tHeader: nil,\n\/\/ \t\tResponse: nil,\n\/\/ \t\tError: nil,\n\/\/ \t}\n\/\/ }\nfunc NewBadRequestResponse(err error) (int, http.Header, interface{}, error) {\n\tif err == nil {\n\t\terr = errors.New(\"Request is not valid\")\n\t}\n\n\thelpers.MustGetLogger().Error(\"Bad Request: %s\", err)\n\n\treturn http.StatusBadRequest, nil, nil, err\n}\n\nfunc HandleResultAndError(res interface{}, err error) (int, http.Header, interface{}, error) {\n\tif err != nil {\n\t\tif err == gorm.RecordNotFound {\n\t\t\treturn NewNotFoundResponse()\n\t\t}\n\t\treturn NewBadRequestResponse(err)\n\t}\n\treturn NewOKResponse(res)\n}\n\nfunc NewOKResponse(res interface{}) (int, http.Header, interface{}, error) {\n\treturn http.StatusOK, nil, res, nil\n}\n\nfunc NewNotFoundResponse() (int, http.Header, interface{}, error) {\n\treturn http.StatusNotFound, nil, nil, errors.New(\"Data not found\")\n}\n\nfunc NewDeletedResponse() (int, http.Header, interface{}, error) {\n\treturn http.StatusAccepted, nil, nil, nil\n}\n\nfunc NewDefaultOKResponse() (int, http.Header, interface{}, error) {\n\tres := map[string]interface{}{\n\t\t\"status\": true,\n\t}\n\n\treturn http.StatusOK, nil, res, nil\n}\n\nfunc GetId(u *url.URL) (int64, error) {\n\treturn strconv.ParseInt(u.Query().Get(\"id\"), 10, 64)\n}\n\nfunc GetURIInt64(u *url.URL, queryParam string) (int64, error) {\n\treturn strconv.ParseInt(u.Query().Get(queryParam), 10, 64)\n}\n\nfunc GetQuery(u *url.URL) *models.Query {\n\treturn models.NewQuery().MapURL(u).SetDefaults()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License. See the AUTHORS file\n\/\/ for names of contributors.\n\/\/\n\/\/ Author: Matt Tracy (matt@cockroachlabs.com)\n\npackage storage\n\nimport (\n\t\"encoding\/json\"\n\t\"time\"\n\n\t\"github.com\/cockroachdb\/cockroach\/internal\/client\"\n\t\"github.com\/cockroachdb\/cockroach\/roachpb\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/hlc\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/log\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ TODO(mrtracy): All of this logic should probably be moved into the SQL\n\/\/ package; there are going to be additional event log tables which will not be\n\/\/ strongly associated with a store, and it would be better to keep event log\n\/\/ tables close together in the code.\n\n\/\/ RangeEventLogType describes a specific event type recorded in the range log\n\/\/ table.\ntype RangeEventLogType string\n\nconst (\n\t\/\/ RangeEventLogSplit is the event type recorded when a range splits.\n\tRangeEventLogSplit RangeEventLogType = \"split\"\n\t\/\/ RangeEventLogAdd is the event type recorded when a range adds a\n\t\/\/ new replica.\n\tRangeEventLogAdd RangeEventLogType = \"add\"\n\t\/\/ RangeEventLogRemove is the event type recorded when a range removes a\n\t\/\/ replica.\n\tRangeEventLogRemove RangeEventLogType = \"remove\"\n)\n\n\/\/ RangeEventTableSchema defines the schema of the event log table. It is\n\/\/ currently envisioned as a wide table; many different event types can be\n\/\/ recorded to the table.\nconst RangeEventTableSchema = `\nCREATE TABLE system.rangelog (\n timestamp TIMESTAMP NOT NULL,\n rangeID INT NOT NULL,\n storeID INT NOT NULL,\n eventType STRING NOT NULL,\n otherRangeID INT,\n info STRING,\n uniqueID INT DEFAULT unique_rowid(),\n PRIMARY KEY (timestamp, uniqueID)\n);`\n\ntype rangeLogEvent struct {\n\ttimestamp time.Time\n\trangeID roachpb.RangeID\n\tstoreID roachpb.StoreID\n\teventType RangeEventLogType\n\totherRangeID *roachpb.RangeID\n\tinfo *string\n}\n\nfunc (s *Store) insertRangeLogEvent(txn *client.Txn, event rangeLogEvent) error {\n\t\/\/ Record range log event to console log.\n\tvar info string\n\tif event.info != nil {\n\t\tinfo = *event.info\n\t}\n\tlog.Infof(txn.Context, \"Range Event: %q, range: %d, info: %s\",\n\t\tevent.eventType,\n\t\tevent.rangeID,\n\t\tinfo)\n\n\tconst insertEventTableStmt = `\nINSERT INTO system.rangelog (\n timestamp, rangeID, storeID, eventType, otherRangeID, info\n)\nVALUES(\n $1, $2, $3, $4, $5, $6\n)\n`\n\targs := []interface{}{\n\t\tevent.timestamp,\n\t\tevent.rangeID,\n\t\tevent.storeID,\n\t\tevent.eventType,\n\t\tnil, \/\/ otherRangeID\n\t\tnil, \/\/ info\n\t}\n\tif event.otherRangeID != nil {\n\t\targs[4] = *event.otherRangeID\n\t}\n\tif event.info != nil {\n\t\targs[5] = *event.info\n\t}\n\n\t\/\/ Update range event metrics. We do this close to the insertion of the\n\t\/\/ corresponding range log entry to reduce potential skew between metrics and\n\t\/\/ range log.\n\tswitch event.eventType {\n\tcase RangeEventLogSplit:\n\t\ts.metrics.RangeSplits.Inc(1)\n\tcase RangeEventLogAdd:\n\t\ts.metrics.RangeAdds.Inc(1)\n\tcase RangeEventLogRemove:\n\t\ts.metrics.RangeRemoves.Inc(1)\n\t}\n\n\trows, err := s.ctx.SQLExecutor.ExecuteStatementInTransaction(txn, insertEventTableStmt, args...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif rows != 1 {\n\t\treturn errors.Errorf(\"%d rows affected by log insertion; expected exactly one row affected.\", rows)\n\t}\n\treturn nil\n}\n\n\/\/ logSplit logs a range split event into the event table. The affected range is\n\/\/ the range which previously existed and is being split in half; the \"other\"\n\/\/ range is the new range which is being created.\n\/\/ TODO(mrtracy): There are several different reasons that a replica split\n\/\/ could occur, and that information should be logged.\nfunc (s *Store) logSplit(txn *client.Txn, updatedDesc, newDesc roachpb.RangeDescriptor) error {\n\tif !s.ctx.LogRangeEvents {\n\t\treturn nil\n\t}\n\tinfo := struct {\n\t\tUpdatedDesc roachpb.RangeDescriptor\n\t\tNewDesc roachpb.RangeDescriptor\n\t}{updatedDesc, newDesc}\n\tinfoBytes, err := json.Marshal(info)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfoStr := string(infoBytes)\n\treturn s.insertRangeLogEvent(txn, rangeLogEvent{\n\t\ttimestamp: selectEventTimestamp(s, txn.Proto.Timestamp),\n\t\trangeID: updatedDesc.RangeID,\n\t\teventType: RangeEventLogSplit,\n\t\tstoreID: s.StoreID(),\n\t\totherRangeID: &newDesc.RangeID,\n\t\tinfo: &infoStr,\n\t})\n}\n\n\/\/ logChange logs a replica change event, which represents a replica being added\n\/\/ to or removed from a range.\n\/\/ TODO(mrtracy): There are several different reasons that a replica change\n\/\/ could occur, and that information should be logged.\nfunc (s *Store) logChange(txn *client.Txn, changeType roachpb.ReplicaChangeType, replica roachpb.ReplicaDescriptor,\n\tdesc roachpb.RangeDescriptor) error {\n\tif !s.ctx.LogRangeEvents {\n\t\treturn nil\n\t}\n\n\tvar logType RangeEventLogType\n\tvar infoStruct interface{}\n\tswitch changeType {\n\tcase roachpb.ADD_REPLICA:\n\t\tlogType = RangeEventLogAdd\n\t\tinfoStruct = struct {\n\t\t\tAddReplica roachpb.ReplicaDescriptor\n\t\t\tUpdatedDesc roachpb.RangeDescriptor\n\t\t}{replica, desc}\n\tcase roachpb.REMOVE_REPLICA:\n\t\tlogType = RangeEventLogRemove\n\t\tinfoStruct = struct {\n\t\t\tRemovedReplica roachpb.ReplicaDescriptor\n\t\t\tUpdatedDesc roachpb.RangeDescriptor\n\t\t}{replica, desc}\n\tdefault:\n\t\treturn errors.Errorf(\"unknown replica change type %s\", changeType)\n\t}\n\n\tinfoBytes, err := json.Marshal(infoStruct)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfoStr := string(infoBytes)\n\treturn s.insertRangeLogEvent(txn, rangeLogEvent{\n\t\ttimestamp: selectEventTimestamp(s, txn.Proto.Timestamp),\n\t\trangeID: desc.RangeID,\n\t\teventType: logType,\n\t\tstoreID: s.StoreID(),\n\t\tinfo: &infoStr,\n\t})\n}\n\n\/\/ selectEventTimestamp selects a timestamp for this log message. If the\n\/\/ transaction this event is being written in has a non-zero timestamp, then that\n\/\/ timestamp should be used; otherwise, the store's physical clock is used.\n\/\/ This helps with testing; in normal usage, the logging of an event will never\n\/\/ be the first action in the transaction, and thus the transaction will have an\n\/\/ assigned database timestamp. However, in the case of our tests log events\n\/\/ *are* the first action in a transaction, and we must elect to use the store's\n\/\/ physical time instead.\nfunc selectEventTimestamp(s *Store, input hlc.Timestamp) time.Time {\n\tif input == hlc.ZeroTimestamp {\n\t\treturn s.Clock().PhysicalTime()\n\t}\n\treturn input.GoTime()\n}\n<commit_msg>storage: move range-event logs under V(1)<commit_after>\/\/ Copyright 2015 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License. See the AUTHORS file\n\/\/ for names of contributors.\n\/\/\n\/\/ Author: Matt Tracy (matt@cockroachlabs.com)\n\npackage storage\n\nimport (\n\t\"encoding\/json\"\n\t\"time\"\n\n\t\"github.com\/cockroachdb\/cockroach\/internal\/client\"\n\t\"github.com\/cockroachdb\/cockroach\/roachpb\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/hlc\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/log\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ TODO(mrtracy): All of this logic should probably be moved into the SQL\n\/\/ package; there are going to be additional event log tables which will not be\n\/\/ strongly associated with a store, and it would be better to keep event log\n\/\/ tables close together in the code.\n\n\/\/ RangeEventLogType describes a specific event type recorded in the range log\n\/\/ table.\ntype RangeEventLogType string\n\nconst (\n\t\/\/ RangeEventLogSplit is the event type recorded when a range splits.\n\tRangeEventLogSplit RangeEventLogType = \"split\"\n\t\/\/ RangeEventLogAdd is the event type recorded when a range adds a\n\t\/\/ new replica.\n\tRangeEventLogAdd RangeEventLogType = \"add\"\n\t\/\/ RangeEventLogRemove is the event type recorded when a range removes a\n\t\/\/ replica.\n\tRangeEventLogRemove RangeEventLogType = \"remove\"\n)\n\n\/\/ RangeEventTableSchema defines the schema of the event log table. It is\n\/\/ currently envisioned as a wide table; many different event types can be\n\/\/ recorded to the table.\nconst RangeEventTableSchema = `\nCREATE TABLE system.rangelog (\n timestamp TIMESTAMP NOT NULL,\n rangeID INT NOT NULL,\n storeID INT NOT NULL,\n eventType STRING NOT NULL,\n otherRangeID INT,\n info STRING,\n uniqueID INT DEFAULT unique_rowid(),\n PRIMARY KEY (timestamp, uniqueID)\n);`\n\ntype rangeLogEvent struct {\n\ttimestamp time.Time\n\trangeID roachpb.RangeID\n\tstoreID roachpb.StoreID\n\teventType RangeEventLogType\n\totherRangeID *roachpb.RangeID\n\tinfo *string\n}\n\nfunc (s *Store) insertRangeLogEvent(txn *client.Txn, event rangeLogEvent) error {\n\t\/\/ Record range log event to console log.\n\tvar info string\n\tif event.info != nil {\n\t\tinfo = *event.info\n\t}\n\tif log.V(1) {\n\t\tlog.Infof(txn.Context, \"Range Event: %q, range: %d, info: %s\",\n\t\t\tevent.eventType,\n\t\t\tevent.rangeID,\n\t\t\tinfo)\n\t}\n\n\tconst insertEventTableStmt = `\nINSERT INTO system.rangelog (\n timestamp, rangeID, storeID, eventType, otherRangeID, info\n)\nVALUES(\n $1, $2, $3, $4, $5, $6\n)\n`\n\targs := []interface{}{\n\t\tevent.timestamp,\n\t\tevent.rangeID,\n\t\tevent.storeID,\n\t\tevent.eventType,\n\t\tnil, \/\/ otherRangeID\n\t\tnil, \/\/ info\n\t}\n\tif event.otherRangeID != nil {\n\t\targs[4] = *event.otherRangeID\n\t}\n\tif event.info != nil {\n\t\targs[5] = *event.info\n\t}\n\n\t\/\/ Update range event metrics. We do this close to the insertion of the\n\t\/\/ corresponding range log entry to reduce potential skew between metrics and\n\t\/\/ range log.\n\tswitch event.eventType {\n\tcase RangeEventLogSplit:\n\t\ts.metrics.RangeSplits.Inc(1)\n\tcase RangeEventLogAdd:\n\t\ts.metrics.RangeAdds.Inc(1)\n\tcase RangeEventLogRemove:\n\t\ts.metrics.RangeRemoves.Inc(1)\n\t}\n\n\trows, err := s.ctx.SQLExecutor.ExecuteStatementInTransaction(txn, insertEventTableStmt, args...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif rows != 1 {\n\t\treturn errors.Errorf(\"%d rows affected by log insertion; expected exactly one row affected.\", rows)\n\t}\n\treturn nil\n}\n\n\/\/ logSplit logs a range split event into the event table. The affected range is\n\/\/ the range which previously existed and is being split in half; the \"other\"\n\/\/ range is the new range which is being created.\n\/\/ TODO(mrtracy): There are several different reasons that a replica split\n\/\/ could occur, and that information should be logged.\nfunc (s *Store) logSplit(txn *client.Txn, updatedDesc, newDesc roachpb.RangeDescriptor) error {\n\tif !s.ctx.LogRangeEvents {\n\t\treturn nil\n\t}\n\tinfo := struct {\n\t\tUpdatedDesc roachpb.RangeDescriptor\n\t\tNewDesc roachpb.RangeDescriptor\n\t}{updatedDesc, newDesc}\n\tinfoBytes, err := json.Marshal(info)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfoStr := string(infoBytes)\n\treturn s.insertRangeLogEvent(txn, rangeLogEvent{\n\t\ttimestamp: selectEventTimestamp(s, txn.Proto.Timestamp),\n\t\trangeID: updatedDesc.RangeID,\n\t\teventType: RangeEventLogSplit,\n\t\tstoreID: s.StoreID(),\n\t\totherRangeID: &newDesc.RangeID,\n\t\tinfo: &infoStr,\n\t})\n}\n\n\/\/ logChange logs a replica change event, which represents a replica being added\n\/\/ to or removed from a range.\n\/\/ TODO(mrtracy): There are several different reasons that a replica change\n\/\/ could occur, and that information should be logged.\nfunc (s *Store) logChange(txn *client.Txn, changeType roachpb.ReplicaChangeType, replica roachpb.ReplicaDescriptor,\n\tdesc roachpb.RangeDescriptor) error {\n\tif !s.ctx.LogRangeEvents {\n\t\treturn nil\n\t}\n\n\tvar logType RangeEventLogType\n\tvar infoStruct interface{}\n\tswitch changeType {\n\tcase roachpb.ADD_REPLICA:\n\t\tlogType = RangeEventLogAdd\n\t\tinfoStruct = struct {\n\t\t\tAddReplica roachpb.ReplicaDescriptor\n\t\t\tUpdatedDesc roachpb.RangeDescriptor\n\t\t}{replica, desc}\n\tcase roachpb.REMOVE_REPLICA:\n\t\tlogType = RangeEventLogRemove\n\t\tinfoStruct = struct {\n\t\t\tRemovedReplica roachpb.ReplicaDescriptor\n\t\t\tUpdatedDesc roachpb.RangeDescriptor\n\t\t}{replica, desc}\n\tdefault:\n\t\treturn errors.Errorf(\"unknown replica change type %s\", changeType)\n\t}\n\n\tinfoBytes, err := json.Marshal(infoStruct)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinfoStr := string(infoBytes)\n\treturn s.insertRangeLogEvent(txn, rangeLogEvent{\n\t\ttimestamp: selectEventTimestamp(s, txn.Proto.Timestamp),\n\t\trangeID: desc.RangeID,\n\t\teventType: logType,\n\t\tstoreID: s.StoreID(),\n\t\tinfo: &infoStr,\n\t})\n}\n\n\/\/ selectEventTimestamp selects a timestamp for this log message. If the\n\/\/ transaction this event is being written in has a non-zero timestamp, then that\n\/\/ timestamp should be used; otherwise, the store's physical clock is used.\n\/\/ This helps with testing; in normal usage, the logging of an event will never\n\/\/ be the first action in the transaction, and thus the transaction will have an\n\/\/ assigned database timestamp. However, in the case of our tests log events\n\/\/ *are* the first action in a transaction, and we must elect to use the store's\n\/\/ physical time instead.\nfunc selectEventTimestamp(s *Store, input hlc.Timestamp) time.Time {\n\tif input == hlc.ZeroTimestamp {\n\t\treturn s.Clock().PhysicalTime()\n\t}\n\treturn input.GoTime()\n}\n<|endoftext|>"} {"text":"<commit_before>package repositories\n\nimport (\n\t\"github.com\/gorilla\/securecookie\"\n\t\"github.com\/gorilla\/sessions\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n)\n\nvar sessionStore sessions.Store\n\n\/\/ Initialize the session store with authentication\n\/\/ and encryption keys.\n\/\/\n\/\/ Keys are read from the environment variables\n\/\/ SESSION_AUTH_KEY and SESSION_ENCRYPTION_KEY\n\/\/ If not set, temporary keys will be used.\n\/\/\n\/\/ Using temporary keys will invalidate all sessions\n\/\/ when the server restarts.\nfunc EnableSessions() {\n\tauth_key := []byte(os.Getenv(\"SESSION_AUTH_KEY\"))\n\n\tif len(auth_key) == 0 {\n\t\tlog.Println(\"Using temporary authentication key for session\")\n\t\tauth_key = securecookie.GenerateRandomKey(64)\n\t}\n\n\tencryption_key := []byte(os.Getenv(\"SESSION_ENCRYPTION_KEY\"))\n\n\tif len(encryption_key) == 0 {\n\t\tlog.Println(\"Using temporary encryption key for session\")\n\t\tencryption_key = securecookie.GenerateRandomKey(32)\n\t}\n\n\tsessionStore = sessions.NewCookieStore(auth_key, encryption_key)\n}\n\ntype SessionRepository interface {\n\tValue(key string) interface{}\n\tSetValue(key string, value interface{})\n\tSave() error\n}\n\nfunc Session(res http.ResponseWriter, req *http.Request) SessionRepository {\n\tsession, _ := sessionStore.Get(req, \"bones_session\")\n\treturn &CookieSessionRepository{session, res, req}\n}\n\ntype CookieSessionRepository struct {\n\tsession *sessions.Session\n\tresponseWriter http.ResponseWriter\n\trequest *http.Request\n}\n\nfunc (s *CookieSessionRepository) Value(key string) interface{} {\n\treturn s.session.Values[key]\n}\n\nfunc (s *CookieSessionRepository) SetValue(key string, value interface{}) {\n\ts.session.Values[key] = value\n}\n\nfunc (s *CookieSessionRepository) Save() error {\n\treturn s.session.Save(s.request, s.responseWriter)\n}\n<commit_msg>Add Clear method to SessionRepository<commit_after>package repositories\n\nimport (\n\t\"github.com\/gorilla\/securecookie\"\n\t\"github.com\/gorilla\/sessions\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n)\n\nvar sessionStore sessions.Store\n\n\/\/ Initialize the session store with authentication\n\/\/ and encryption keys.\n\/\/\n\/\/ Keys are read from the environment variables\n\/\/ SESSION_AUTH_KEY and SESSION_ENCRYPTION_KEY\n\/\/ If not set, temporary keys will be used.\n\/\/\n\/\/ Using temporary keys will invalidate all sessions\n\/\/ when the server restarts.\nfunc EnableSessions() {\n\tauth_key := []byte(os.Getenv(\"SESSION_AUTH_KEY\"))\n\n\tif len(auth_key) == 0 {\n\t\tlog.Println(\"Using temporary authentication key for session\")\n\t\tauth_key = securecookie.GenerateRandomKey(64)\n\t}\n\n\tencryption_key := []byte(os.Getenv(\"SESSION_ENCRYPTION_KEY\"))\n\n\tif len(encryption_key) == 0 {\n\t\tlog.Println(\"Using temporary encryption key for session\")\n\t\tencryption_key = securecookie.GenerateRandomKey(32)\n\t}\n\n\tsessionStore = sessions.NewCookieStore(auth_key, encryption_key)\n}\n\ntype SessionRepository interface {\n\tValue(key string) interface{}\n\tSetValue(key string, value interface{})\n\tSave() error\n\tClear()\n}\n\nfunc Session(res http.ResponseWriter, req *http.Request) SessionRepository {\n\tsession, _ := sessionStore.Get(req, \"bones_session\")\n\treturn &CookieSessionRepository{session, res, req}\n}\n\ntype CookieSessionRepository struct {\n\tsession *sessions.Session\n\tresponseWriter http.ResponseWriter\n\trequest *http.Request\n}\n\nfunc (s *CookieSessionRepository) Value(key string) interface{} {\n\treturn s.session.Values[key]\n}\n\nfunc (s *CookieSessionRepository) SetValue(key string, value interface{}) {\n\ts.session.Values[key] = value\n}\n\nfunc (s *CookieSessionRepository) Save() error {\n\treturn s.session.Save(s.request, s.responseWriter)\n}\n\nfunc (s *CookieSessionRepository) Clear() {\n\ts.session.Values = nil\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Remove SetDebug<commit_after><|endoftext|>"} {"text":"<commit_before>package windows_test\n\nimport (\n\t\"archive\/tar\"\n\t\"compress\/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"net\/http\"\n\n\t\"github.com\/cloudfoundry\/bosh-agent\/integration\/windows\/utils\"\n\t\"github.com\/masterzen\/winrm\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"testing\"\n\t\"text\/template\"\n)\n\nvar (\n\tVagrantProvider = os.Getenv(\"VAGRANT_PROVIDER\")\n\tOsVersion = getOsVersion()\n\tAgentPublicIP, NATSPublicIP string\n\tdirname = filepath.Join(\n\t\tos.Getenv(\"GOPATH\"),\n\t\t\"src\/github.com\/cloudfoundry\/bosh-agent\/integration\/windows\/fixtures\",\n\t)\n\tagent *WindowsEnvironment\n)\n\ntype BoshAgentSettings struct {\n\tNatsPrivateIP string\n\tEphemeralDiskConfig string\n}\n\nfunc getOsVersion() string {\n\tosVersion := os.Getenv(\"WINDOWS_OS_VERSION\")\n\tif osVersion == \"\" {\n\t\tosVersion = \"2012R2\"\n\t}\n\n\treturn osVersion\n}\n\nfunc TestWindows(t *testing.T) {\n\tRegisterFailHandler(Fail)\n\tRunSpecs(t, \"Windows Suite\")\n}\n\nfunc tarFixtures(fixturesDir, filename string) error {\n\tfixtures := []string{\n\t\t\"service_wrapper.xml\",\n\t\t\"service_wrapper.exe\",\n\t\t\"job-service-wrapper.exe\",\n\t\t\"bosh-blobstore-dav.exe\",\n\t\t\"bosh-agent.exe\",\n\t\t\"pipe.exe\",\n\t\t\"agent-configuration\/agent.json\",\n\t\t\"agent-configuration\/root-partition-agent.json\",\n\t\t\"agent-configuration\/root-partition-agent-ephemeral-disabled.json\",\n\t\t\"agent-configuration\/root-disk-settings.json\",\n\t\t\"agent-configuration\/second-disk-settings.json\",\n\t\t\"agent-configuration\/second-disk-digit-settings.json\",\n\t\t\"agent-configuration\/third-disk-settings.json\",\n\t\t\"psFixture\/psFixture.psd1\",\n\t\t\"psFixture\/psFixture.psm1\",\n\t}\n\n\tarchive, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer archive.Close()\n\n\tgzipWriter := gzip.NewWriter(archive)\n\ttarWriter := tar.NewWriter(gzipWriter)\n\n\tfor _, name := range fixtures {\n\t\tpath := filepath.Join(fixturesDir, name)\n\t\tfi, err := os.Stat(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\thdr, err := tar.FileInfoHeader(fi, \"\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thdr.Name = name\n\n\t\tif err := tarWriter.WriteHeader(hdr); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\n\t\tif _, err := io.Copy(tarWriter, f); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := tarWriter.Close(); err != nil {\n\t\treturn err\n\t}\n\n\treturn gzipWriter.Close()\n}\n\nvar _ = BeforeSuite(func() {\n\tif os.Getenv(\"GOPATH\") == \"\" {\n\t\tFail(\"Environment variable GOPATH not set\", 1)\n\t}\n\n\tif err := utils.BuildAgent(); err != nil {\n\t\tFail(fmt.Sprintln(\"Could not build the bosh-agent project.\\nError is:\", err))\n\t}\n\n\terr := utils.StartVagrant(\"nats\", VagrantProvider, OsVersion)\n\tnatsPrivateIP, err := utils.RetrievePrivateIP(\"nats\")\n\tExpect(err).NotTo(HaveOccurred())\n\tExpect(natsPrivateIP).NotTo(BeEmpty(), \"Couldn't retrieve NATS private IP\")\n\n\ttemplateSettings(natsPrivateIP, `\"\"`, \"root-disk-settings.json\")\n\ttemplateSettings(natsPrivateIP, `\"\/dev\/sdb\"`, \"second-disk-settings.json\")\n\ttemplateSettings(natsPrivateIP, `\"1\"`, \"second-disk-digit-settings.json\")\n\ttemplateSettings(natsPrivateIP, `\"\/dev\/sdc\"`, \"third-disk-settings.json\")\n\n\tfilename := filepath.Join(dirname, \"fixtures.tgz\")\n\tif err := tarFixtures(dirname, filename); err != nil {\n\t\tFail(fmt.Sprintln(\"Creating fixtures TGZ::\", err))\n\t}\n\n\terr = utils.StartVagrant(\"agent\", VagrantProvider, OsVersion)\n\n\tAgentPublicIP, err = utils.RetrievePublicIP(\"agent\")\n\tExpect(err).NotTo(HaveOccurred())\n\n\tNATSPublicIP, err = utils.RetrievePublicIP(\"nats\")\n\tExpect(err).NotTo(HaveOccurred())\n\n\tif err != nil {\n\t\tFail(fmt.Sprintln(\"Could not setup and run vagrant.\\nError is:\", err))\n\t}\n\n\tendpoint := winrm.NewEndpoint(AgentPublicIP, 5985, false, false, nil, nil, nil, 0)\n\tclient, err := winrm.NewClientWithParameters(\n\t\tendpoint,\n\t\t\"vagrant\",\n\t\t\"Password123!\",\n\t\twinrm.NewParameters(\"PT5M\", \"en-US\", 153600),\n\t)\n\tExpect(err).NotTo(HaveOccurred())\n\n\tagent = &WindowsEnvironment{\n\t\tClient: client,\n\t}\n\n\t\/\/ We do this so that both 2012R2 and 1709 run ephemeral disk tests against raw disks.\n\t\/\/ 2012R2 additional disks start formatted on AWS for some reason.\n\tagent.EnsureDiskCleared(\"1\")\n\tagent.EnsureDiskCleared(\"2\")\n\n\tgoSourcePath := filepath.Join(dirname, \"templates\", \"go\", \"go1.7.1.windows-amd64.zip\")\n\tos.RemoveAll(goSourcePath)\n\tdownloadFile(goSourcePath, \"https:\/\/dl.google.com\/go\/go1.7.1.windows-amd64.zip\")\n})\n\nfunc templateSettings(natsPrivateIP, ephemeralDiskConfig, filename string) {\n\tagentSettings := BoshAgentSettings{\n\t\tNatsPrivateIP: natsPrivateIP,\n\t\tEphemeralDiskConfig: ephemeralDiskConfig,\n\t}\n\tsettingsTmpl, err := template.ParseFiles(\n\t\tfilepath.Join(dirname, \"templates\", \"agent-configuration\", \"settings.json.tmpl\"),\n\t)\n\tExpect(err).NotTo(HaveOccurred())\n\toutputFile, err := os.Create(filepath.Join(dirname, \"agent-configuration\", filename))\n\tdefer outputFile.Close()\n\n\tExpect(err).NotTo(HaveOccurred())\n\terr = settingsTmpl.Execute(outputFile, agentSettings)\n\tExpect(err).NotTo(HaveOccurred())\n}\n\nfunc downloadFile(localPath, sourceUrl string) error {\n\tf, err := os.OpenFile(localPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tres, err := http.Get(sourceUrl)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\tif _, err := io.Copy(f, res.Body); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>golint prefers URL<commit_after>package windows_test\n\nimport (\n\t\"archive\/tar\"\n\t\"compress\/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"net\/http\"\n\n\t\"github.com\/cloudfoundry\/bosh-agent\/integration\/windows\/utils\"\n\t\"github.com\/masterzen\/winrm\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"testing\"\n\t\"text\/template\"\n)\n\nvar (\n\tVagrantProvider = os.Getenv(\"VAGRANT_PROVIDER\")\n\tOsVersion = getOsVersion()\n\tAgentPublicIP, NATSPublicIP string\n\tdirname = filepath.Join(\n\t\tos.Getenv(\"GOPATH\"),\n\t\t\"src\/github.com\/cloudfoundry\/bosh-agent\/integration\/windows\/fixtures\",\n\t)\n\tagent *WindowsEnvironment\n)\n\ntype BoshAgentSettings struct {\n\tNatsPrivateIP string\n\tEphemeralDiskConfig string\n}\n\nfunc getOsVersion() string {\n\tosVersion := os.Getenv(\"WINDOWS_OS_VERSION\")\n\tif osVersion == \"\" {\n\t\tosVersion = \"2012R2\"\n\t}\n\n\treturn osVersion\n}\n\nfunc TestWindows(t *testing.T) {\n\tRegisterFailHandler(Fail)\n\tRunSpecs(t, \"Windows Suite\")\n}\n\nfunc tarFixtures(fixturesDir, filename string) error {\n\tfixtures := []string{\n\t\t\"service_wrapper.xml\",\n\t\t\"service_wrapper.exe\",\n\t\t\"job-service-wrapper.exe\",\n\t\t\"bosh-blobstore-dav.exe\",\n\t\t\"bosh-agent.exe\",\n\t\t\"pipe.exe\",\n\t\t\"agent-configuration\/agent.json\",\n\t\t\"agent-configuration\/root-partition-agent.json\",\n\t\t\"agent-configuration\/root-partition-agent-ephemeral-disabled.json\",\n\t\t\"agent-configuration\/root-disk-settings.json\",\n\t\t\"agent-configuration\/second-disk-settings.json\",\n\t\t\"agent-configuration\/second-disk-digit-settings.json\",\n\t\t\"agent-configuration\/third-disk-settings.json\",\n\t\t\"psFixture\/psFixture.psd1\",\n\t\t\"psFixture\/psFixture.psm1\",\n\t}\n\n\tarchive, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer archive.Close()\n\n\tgzipWriter := gzip.NewWriter(archive)\n\ttarWriter := tar.NewWriter(gzipWriter)\n\n\tfor _, name := range fixtures {\n\t\tpath := filepath.Join(fixturesDir, name)\n\t\tfi, err := os.Stat(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\thdr, err := tar.FileInfoHeader(fi, \"\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thdr.Name = name\n\n\t\tif err := tarWriter.WriteHeader(hdr); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\n\t\tif _, err := io.Copy(tarWriter, f); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif err := tarWriter.Close(); err != nil {\n\t\treturn err\n\t}\n\n\treturn gzipWriter.Close()\n}\n\nvar _ = BeforeSuite(func() {\n\tif os.Getenv(\"GOPATH\") == \"\" {\n\t\tFail(\"Environment variable GOPATH not set\", 1)\n\t}\n\n\tif err := utils.BuildAgent(); err != nil {\n\t\tFail(fmt.Sprintln(\"Could not build the bosh-agent project.\\nError is:\", err))\n\t}\n\n\terr := utils.StartVagrant(\"nats\", VagrantProvider, OsVersion)\n\tnatsPrivateIP, err := utils.RetrievePrivateIP(\"nats\")\n\tExpect(err).NotTo(HaveOccurred())\n\tExpect(natsPrivateIP).NotTo(BeEmpty(), \"Couldn't retrieve NATS private IP\")\n\n\ttemplateSettings(natsPrivateIP, `\"\"`, \"root-disk-settings.json\")\n\ttemplateSettings(natsPrivateIP, `\"\/dev\/sdb\"`, \"second-disk-settings.json\")\n\ttemplateSettings(natsPrivateIP, `\"1\"`, \"second-disk-digit-settings.json\")\n\ttemplateSettings(natsPrivateIP, `\"\/dev\/sdc\"`, \"third-disk-settings.json\")\n\n\tfilename := filepath.Join(dirname, \"fixtures.tgz\")\n\tif err := tarFixtures(dirname, filename); err != nil {\n\t\tFail(fmt.Sprintln(\"Creating fixtures TGZ::\", err))\n\t}\n\n\terr = utils.StartVagrant(\"agent\", VagrantProvider, OsVersion)\n\n\tAgentPublicIP, err = utils.RetrievePublicIP(\"agent\")\n\tExpect(err).NotTo(HaveOccurred())\n\n\tNATSPublicIP, err = utils.RetrievePublicIP(\"nats\")\n\tExpect(err).NotTo(HaveOccurred())\n\n\tif err != nil {\n\t\tFail(fmt.Sprintln(\"Could not setup and run vagrant.\\nError is:\", err))\n\t}\n\n\tendpoint := winrm.NewEndpoint(AgentPublicIP, 5985, false, false, nil, nil, nil, 0)\n\tclient, err := winrm.NewClientWithParameters(\n\t\tendpoint,\n\t\t\"vagrant\",\n\t\t\"Password123!\",\n\t\twinrm.NewParameters(\"PT5M\", \"en-US\", 153600),\n\t)\n\tExpect(err).NotTo(HaveOccurred())\n\n\tagent = &WindowsEnvironment{\n\t\tClient: client,\n\t}\n\n\t\/\/ We do this so that both 2012R2 and 1709 run ephemeral disk tests against raw disks.\n\t\/\/ 2012R2 additional disks start formatted on AWS for some reason.\n\tagent.EnsureDiskCleared(\"1\")\n\tagent.EnsureDiskCleared(\"2\")\n\n\tgoSourcePath := filepath.Join(dirname, \"templates\", \"go\", \"go1.7.1.windows-amd64.zip\")\n\tos.RemoveAll(goSourcePath)\n\tdownloadFile(goSourcePath, \"https:\/\/dl.google.com\/go\/go1.7.1.windows-amd64.zip\")\n})\n\nfunc templateSettings(natsPrivateIP, ephemeralDiskConfig, filename string) {\n\tagentSettings := BoshAgentSettings{\n\t\tNatsPrivateIP: natsPrivateIP,\n\t\tEphemeralDiskConfig: ephemeralDiskConfig,\n\t}\n\tsettingsTmpl, err := template.ParseFiles(\n\t\tfilepath.Join(dirname, \"templates\", \"agent-configuration\", \"settings.json.tmpl\"),\n\t)\n\tExpect(err).NotTo(HaveOccurred())\n\toutputFile, err := os.Create(filepath.Join(dirname, \"agent-configuration\", filename))\n\tdefer outputFile.Close()\n\n\tExpect(err).NotTo(HaveOccurred())\n\terr = settingsTmpl.Execute(outputFile, agentSettings)\n\tExpect(err).NotTo(HaveOccurred())\n}\n\nfunc downloadFile(localPath, sourceURL string) error {\n\tf, err := os.OpenFile(localPath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tres, err := http.Get(sourceURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\tif _, err := io.Copy(f, res.Body); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage migrations\n\nimport (\n\t\"os\"\n\n\t\"code.gitea.io\/gitea\/models\"\n\t\"xorm.io\/builder\"\n\t\"xorm.io\/xorm\"\n)\n\nfunc removeAttachmentMissedRepo(x *xorm.Engine) error {\n\ttype Attachment struct {\n\t\tUUID string `xorm:\"uuid\"`\n\t}\n\tvar start int\n\tattachments := make([]*Attachment, 0, 50)\n\tfor {\n\t\terr := x.Select(\"uuid\").Where(builder.NotIn(\"release_id\", builder.Select(\"id\").From(\"`release`\"))).\n\t\t\tOrderBy(\"id\").Limit(50, start).Find(&attachments)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor i := 0; i < len(attachments); i++ {\n\t\t\tos.RemoveAll(models.AttachmentLocalPath(attachments[i].UUID))\n\t\t}\n\n\t\tif len(attachments) < 50 {\n\t\t\tbreak\n\t\t}\n\t\tstart += 50\n\t\tattachments = attachments[:0]\n\t}\n\n\t_, err := x.Exec(\"DELETE FROM attachment WHERE release_id NOT IN (SELECT id FROM `release`)\")\n\treturn err\n}\n<commit_msg>fix wrong migration (#9381)<commit_after>\/\/ Copyright 2019 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage migrations\n\nimport (\n\t\"os\"\n\n\t\"code.gitea.io\/gitea\/models\"\n\t\"xorm.io\/builder\"\n\t\"xorm.io\/xorm\"\n)\n\nfunc removeAttachmentMissedRepo(x *xorm.Engine) error {\n\ttype Attachment struct {\n\t\tUUID string `xorm:\"uuid\"`\n\t}\n\tvar start int\n\tattachments := make([]*Attachment, 0, 50)\n\tfor {\n\t\terr := x.Select(\"uuid\").Where(builder.NotIn(\"release_id\", builder.Select(\"id\").From(\"`release`\"))).\n\t\t\tAnd(\"release_id > 0\").\n\t\t\tOrderBy(\"id\").Limit(50, start).Find(&attachments)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor i := 0; i < len(attachments); i++ {\n\t\t\tos.RemoveAll(models.AttachmentLocalPath(attachments[i].UUID))\n\t\t}\n\n\t\tif len(attachments) < 50 {\n\t\t\tbreak\n\t\t}\n\t\tstart += 50\n\t\tattachments = attachments[:0]\n\t}\n\n\t_, err := x.Exec(\"DELETE FROM attachment WHERE release_id > 0 AND release_id NOT IN (SELECT id FROM `release`)\")\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package doubleratchet\n\nimport (\n\t\"github.com\/stretchr\/testify\/require\"\n\t\"testing\"\n)\n\nfunc TestDefaultCrypto_GenerateDH(t *testing.T) {\n\t\/\/ Arrange.\n\tc := DefaultCrypto{}\n\n\t\/\/ Act.\n\tpair, err := c.GenerateDH()\n\n\t\/\/ Assert.\n\trequire.Nil(t, err)\n\n\trequire.Equal(t, byte(0), pair.PrivateKey[0]&7)\n\trequire.Equal(t, byte(0), pair.PrivateKey[31]&128)\n\trequire.Equal(t, byte(64), pair.PrivateKey[31]&64)\n\n\trequire.Len(t, pair.PrivateKey, 32)\n\trequire.Len(t, pair.PublicKey, 32)\n}\n<commit_msg>Wrote TestDefaultCrypto_DH<commit_after>package doubleratchet\n\nimport (\n\t\"github.com\/stretchr\/testify\/require\"\n\t\"testing\"\n)\n\nfunc TestDefaultCrypto_GenerateDH(t *testing.T) {\n\t\/\/ Arrange.\n\tc := DefaultCrypto{}\n\n\t\/\/ Act.\n\tpair, err := c.GenerateDH()\n\trequire.Nil(t, err)\n\n\t\/\/ Assert.\n\trequire.EqualValues(t, 0, pair.PrivateKey[0]&7)\n\trequire.EqualValues(t, 0, pair.PrivateKey[31]&128)\n\trequire.EqualValues(t, 64, pair.PrivateKey[31]&64)\n\n\trequire.Len(t, pair.PrivateKey, 32)\n\trequire.Len(t, pair.PublicKey, 32)\n}\n\nfunc TestDefaultCrypto_DH(t *testing.T) {\n\t\/\/ Arrange.\n\tc := DefaultCrypto{}\n\n\t\/\/ Act.\n\tvar (\n\t\talicePair, err1 = c.GenerateDH()\n\t\tbobPair, err2 = c.GenerateDH()\n\t\taliceSK = c.DH(alicePair, bobPair.PublicKey)\n\t\tbobSK = c.DH(bobPair, alicePair.PublicKey)\n\t)\n\n\t\/\/ Assert.\n\trequire.Nil(t, err1)\n\trequire.Nil(t, err2)\n\trequire.NotEqual(t, [32]byte{}, aliceSK)\n\trequire.Equal(t, aliceSK, bobSK)\n}\n<|endoftext|>"} {"text":"<commit_before>package sudoku\n\nimport (\n\t\"testing\"\n)\n\nfunc TestHumanSolveSearcher(t *testing.T) {\n\n\tgrid := NewGrid()\n\tgrid.LoadSDK(TEST_GRID)\n\n\tsearcher := newHumanSolveSearcher(grid, nil, DefaultHumanSolveOptions())\n\n\tif searcher.Len() != 1 {\n\t\tt.Error(\"Expected new frontier to have exactly one item in it, but got\", searcher.Len())\n\t}\n\n\tif searcher.grid == nil {\n\t\tt.Error(\"No grid in frontier\")\n\t}\n\n\tbasePotentialNextStep := searcher.NextPossibleStep()\n\n\tif basePotentialNextStep == nil {\n\t\tt.Error(\"Didn'get base potential next step\")\n\t}\n\n\tbaseGrid := basePotentialNextStep.Grid()\n\n\tif baseGrid.DataString() != grid.DataString() {\n\t\tt.Error(\"the grid in the base item in the frontier was not right. Got\", baseGrid.DataString(), \"wanted\", grid.DataString())\n\t}\n\n\tif searcher.Len() != 0 {\n\t\tt.Error(\"Getting the base potential next step should have emptied it, but len is\", searcher.Len())\n\t}\n\n\tnInRowTechnique := techniquesByName[\"Necessary In Row\"]\n\n\tsimpleFillStep := &SolveStep{\n\t\tTechnique: nInRowTechnique,\n\t\tTargetCells: CellSlice{\n\t\t\tgrid.Cell(0, 0),\n\t\t},\n\t\tTargetNums: IntSlice{1},\n\t}\n\n\tif simpleFillStep.Technique == nil {\n\t\tt.Fatal(\"couldn't find necessary in row technique\")\n\t}\n\n\tsimpleFillStepItem := basePotentialNextStep.AddStep(simpleFillStep)\n\n\tif simpleFillStepItem == nil {\n\t\tt.Fatal(\"Adding fill step didn't return anything\")\n\t}\n\n\tif simpleFillStepItem.heapIndex != -1 {\n\t\tt.Fatal(\"Adding completed item to frontier didn't have -1 index\")\n\t}\n\n\tif len(searcher.completedItems) != 1 {\n\t\tt.Error(\"Expected the completed item to go into CompletedItems,but it's empty\")\n\t}\n\n\tif len(searcher.itemsToExplore) != 0 {\n\t\tt.Error(\"Expected the completed item to go into COmpletedItems, but it apparently went into items.\")\n\t}\n\n\t\/\/TODO: this is a fragile test. Whenever we change twiddlers it will change.\n\t\/\/Curerntly 5 for common numbers, 22.5 for Human Likelihood, and 1 for chained steps.\n\texpectedGoodness := 112.5\n\n\tif simpleFillStepItem.Goodness() != expectedGoodness {\n\t\tt.Error(\"Goodness of simple fill step was wrong. Execpted\", expectedGoodness, \"got\", simpleFillStepItem.Goodness(), simpleFillStepItem.explainGoodness())\n\t}\n\n\tcell := simpleFillStepItem.Grid().Cell(0, 0)\n\n\tif cell.Number() != 1 {\n\t\tt.Error(\"Cell in grid was not set correctly. Got\", cell.Number(), \"wanted 1\")\n\t}\n\n\tnonFillStep := &SolveStep{\n\t\tTechnique: techniquesByName[\"Pointing Pair Row\"],\n\t\tTargetCells: CellSlice{\n\t\t\tgrid.Cell(0, 1),\n\t\t},\n\t\tTargetNums: IntSlice{2},\n\t}\n\n\tif nonFillStep.Technique == nil {\n\t\tt.Fatal(\"Couldn't find pointing pair row techhnique\")\n\t}\n\n\tnonFillStepItem := basePotentialNextStep.AddStep(nonFillStep)\n\n\tif nonFillStepItem == nil {\n\t\tt.Fatal(\"Adding non fill step didn't return a frontier object\")\n\t}\n\n\tif searcher.Len() != 1 {\n\t\tt.Error(\"Frontier had wrong length after adding one complete and one incomplete items. Got\", searcher.Len(), \"expected 1\")\n\t}\n\n\tif searcher.itemsToExplore[0] != nonFillStepItem {\n\t\tt.Error(\"We though that simpleFillStep should be at the end of the queue but it wasn't.\")\n\t}\n\n\texpensiveStep := &SolveStep{\n\t\tTechnique: techniquesByName[\"Hidden Quad Block\"],\n\t\tTargetCells: CellSlice{\n\t\t\tgrid.Cell(0, 2),\n\t\t},\n\t\tTargetNums: IntSlice{3},\n\t}\n\n\texpensiveStepItem := basePotentialNextStep.AddStep(expensiveStep)\n\n\tif searcher.Len() != 2 {\n\t\tt.Error(\"Wrong length after adding two items to frontier. Got\", searcher.Len(), \"expected 2\")\n\t}\n\n\tif searcher.itemsToExplore[1] != nonFillStepItem {\n\t\tt.Error(\"We expected the expensive step to be worse\", searcher.String())\n\t}\n\n\texpensiveStepItem.Twiddle(0.00000000000000001, \"Very small amount to make this #1\")\n\n\tif searcher.itemsToExplore[1] != expensiveStepItem {\n\t\tt.Error(\"Even after twiddling up guess step by a lot it still wasn't in the top position in frontier\", searcher.itemsToExplore[0], searcher.itemsToExplore[1])\n\t}\n\n\tpoppedItem := searcher.NextPossibleStep()\n\n\tif poppedItem != expensiveStepItem {\n\t\tt.Error(\"Expected popped item to be the non-fill step now that its goodness is higher, but got\", poppedItem)\n\t}\n\n\tif searcher.Len() != 1 {\n\t\tt.Error(\"Wrong frontier length after popping item. Got\", searcher.Len(), \"expected 1\")\n\t}\n\n\tpoppedItem = searcher.NextPossibleStep()\n\t\/\/Should be nonFillStepItem\n\n\tcurrentGoodness := nonFillStepItem.Goodness()\n\n\tcompletedNonFillStemItem := nonFillStepItem.AddStep(simpleFillStep)\n\n\tif completedNonFillStemItem.Goodness() == currentGoodness {\n\t\tt.Error(\"Adding a step to end of nonfill step didn't change goodness.\")\n\t}\n\n\tif searcher.Len() != 0 {\n\t\tt.Error(\"Adding an item gave wrong len. Got\", searcher.Len(), \"wanted 0\")\n\t}\n\n\tif len(searcher.completedItems) != 2 {\n\t\tt.Error(\"Got wrong number of completed items. Got\", len(searcher.completedItems), \"expected 2\")\n\t}\n\n\tsteps := completedNonFillStemItem.Steps()\n\n\tif len(steps) != 2 {\n\t\tt.Error(\"Expected two steps back, got\", len(steps))\n\t}\n\n\tif steps[0] != nonFillStepItem.step {\n\t\tt.Error(\"Expected first step to be the step of nonFillStepItem. Got\", steps[0])\n\t}\n\n}\n<commit_msg>Demoted one TODO to just normal comment.<commit_after>package sudoku\n\nimport (\n\t\"testing\"\n)\n\nfunc TestHumanSolveSearcher(t *testing.T) {\n\n\tgrid := NewGrid()\n\tgrid.LoadSDK(TEST_GRID)\n\n\tsearcher := newHumanSolveSearcher(grid, nil, DefaultHumanSolveOptions())\n\n\tif searcher.Len() != 1 {\n\t\tt.Error(\"Expected new frontier to have exactly one item in it, but got\", searcher.Len())\n\t}\n\n\tif searcher.grid == nil {\n\t\tt.Error(\"No grid in frontier\")\n\t}\n\n\tbasePotentialNextStep := searcher.NextPossibleStep()\n\n\tif basePotentialNextStep == nil {\n\t\tt.Error(\"Didn'get base potential next step\")\n\t}\n\n\tbaseGrid := basePotentialNextStep.Grid()\n\n\tif baseGrid.DataString() != grid.DataString() {\n\t\tt.Error(\"the grid in the base item in the frontier was not right. Got\", baseGrid.DataString(), \"wanted\", grid.DataString())\n\t}\n\n\tif searcher.Len() != 0 {\n\t\tt.Error(\"Getting the base potential next step should have emptied it, but len is\", searcher.Len())\n\t}\n\n\tnInRowTechnique := techniquesByName[\"Necessary In Row\"]\n\n\tsimpleFillStep := &SolveStep{\n\t\tTechnique: nInRowTechnique,\n\t\tTargetCells: CellSlice{\n\t\t\tgrid.Cell(0, 0),\n\t\t},\n\t\tTargetNums: IntSlice{1},\n\t}\n\n\tif simpleFillStep.Technique == nil {\n\t\tt.Fatal(\"couldn't find necessary in row technique\")\n\t}\n\n\tsimpleFillStepItem := basePotentialNextStep.AddStep(simpleFillStep)\n\n\tif simpleFillStepItem == nil {\n\t\tt.Fatal(\"Adding fill step didn't return anything\")\n\t}\n\n\tif simpleFillStepItem.heapIndex != -1 {\n\t\tt.Fatal(\"Adding completed item to frontier didn't have -1 index\")\n\t}\n\n\tif len(searcher.completedItems) != 1 {\n\t\tt.Error(\"Expected the completed item to go into CompletedItems,but it's empty\")\n\t}\n\n\tif len(searcher.itemsToExplore) != 0 {\n\t\tt.Error(\"Expected the completed item to go into COmpletedItems, but it apparently went into items.\")\n\t}\n\n\t\/\/This is a fragile way to test this; it will need to be updated every\n\t\/\/time we change the twiddlers. :-( Currently 5 for common numbers, 22.5\n\t\/\/for Human Likelihood, and 1 for chained steps.\n\texpectedGoodness := 112.5\n\n\tif simpleFillStepItem.Goodness() != expectedGoodness {\n\t\tt.Error(\"Goodness of simple fill step was wrong. Execpted\", expectedGoodness, \"got\", simpleFillStepItem.Goodness(), simpleFillStepItem.explainGoodness())\n\t}\n\n\tcell := simpleFillStepItem.Grid().Cell(0, 0)\n\n\tif cell.Number() != 1 {\n\t\tt.Error(\"Cell in grid was not set correctly. Got\", cell.Number(), \"wanted 1\")\n\t}\n\n\tnonFillStep := &SolveStep{\n\t\tTechnique: techniquesByName[\"Pointing Pair Row\"],\n\t\tTargetCells: CellSlice{\n\t\t\tgrid.Cell(0, 1),\n\t\t},\n\t\tTargetNums: IntSlice{2},\n\t}\n\n\tif nonFillStep.Technique == nil {\n\t\tt.Fatal(\"Couldn't find pointing pair row techhnique\")\n\t}\n\n\tnonFillStepItem := basePotentialNextStep.AddStep(nonFillStep)\n\n\tif nonFillStepItem == nil {\n\t\tt.Fatal(\"Adding non fill step didn't return a frontier object\")\n\t}\n\n\tif searcher.Len() != 1 {\n\t\tt.Error(\"Frontier had wrong length after adding one complete and one incomplete items. Got\", searcher.Len(), \"expected 1\")\n\t}\n\n\tif searcher.itemsToExplore[0] != nonFillStepItem {\n\t\tt.Error(\"We though that simpleFillStep should be at the end of the queue but it wasn't.\")\n\t}\n\n\texpensiveStep := &SolveStep{\n\t\tTechnique: techniquesByName[\"Hidden Quad Block\"],\n\t\tTargetCells: CellSlice{\n\t\t\tgrid.Cell(0, 2),\n\t\t},\n\t\tTargetNums: IntSlice{3},\n\t}\n\n\texpensiveStepItem := basePotentialNextStep.AddStep(expensiveStep)\n\n\tif searcher.Len() != 2 {\n\t\tt.Error(\"Wrong length after adding two items to frontier. Got\", searcher.Len(), \"expected 2\")\n\t}\n\n\tif searcher.itemsToExplore[1] != nonFillStepItem {\n\t\tt.Error(\"We expected the expensive step to be worse\", searcher.String())\n\t}\n\n\texpensiveStepItem.Twiddle(0.00000000000000001, \"Very small amount to make this #1\")\n\n\tif searcher.itemsToExplore[1] != expensiveStepItem {\n\t\tt.Error(\"Even after twiddling up guess step by a lot it still wasn't in the top position in frontier\", searcher.itemsToExplore[0], searcher.itemsToExplore[1])\n\t}\n\n\tpoppedItem := searcher.NextPossibleStep()\n\n\tif poppedItem != expensiveStepItem {\n\t\tt.Error(\"Expected popped item to be the non-fill step now that its goodness is higher, but got\", poppedItem)\n\t}\n\n\tif searcher.Len() != 1 {\n\t\tt.Error(\"Wrong frontier length after popping item. Got\", searcher.Len(), \"expected 1\")\n\t}\n\n\tpoppedItem = searcher.NextPossibleStep()\n\t\/\/Should be nonFillStepItem\n\n\tcurrentGoodness := nonFillStepItem.Goodness()\n\n\tcompletedNonFillStemItem := nonFillStepItem.AddStep(simpleFillStep)\n\n\tif completedNonFillStemItem.Goodness() == currentGoodness {\n\t\tt.Error(\"Adding a step to end of nonfill step didn't change goodness.\")\n\t}\n\n\tif searcher.Len() != 0 {\n\t\tt.Error(\"Adding an item gave wrong len. Got\", searcher.Len(), \"wanted 0\")\n\t}\n\n\tif len(searcher.completedItems) != 2 {\n\t\tt.Error(\"Got wrong number of completed items. Got\", len(searcher.completedItems), \"expected 2\")\n\t}\n\n\tsteps := completedNonFillStemItem.Steps()\n\n\tif len(steps) != 2 {\n\t\tt.Error(\"Expected two steps back, got\", len(steps))\n\t}\n\n\tif steps[0] != nonFillStepItem.step {\n\t\tt.Error(\"Expected first step to be the step of nonFillStepItem. Got\", steps[0])\n\t}\n\n}\n\/<|endoftext|>"} {"text":"<commit_before>package store\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\n\t\"golang.org\/x\/oauth2\"\n)\n\n\/\/ Project type\ntype Project struct {\n\tName string\n}\n\n\/\/ User type\ntype User struct {\n\tUsername string\n\tToken oauth2.Token\n\tAvatarURL string\n\tProjects []Project\n}\n\n\/\/ GetUser returns an user info\nfunc GetUser(username string) (user User, err error) {\n\tbuf, err := ioutil.ReadFile(\".\/data\/\" + username + \".json\")\n\terr = json.Unmarshal(buf, &user)\n\tif err != nil {\n\t\treturn user, err\n\t}\n\treturn user, err\n}\n\n\/\/ SaveUser saves user info\nfunc SaveUser(user User) error {\n\tb, err := json.Marshal(user)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(\".\/data\/\"+user.Username+\".json\", b, 0644)\n\treturn err\n}\n<commit_msg>Added Icon to Project<commit_after>package store\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\n\t\"golang.org\/x\/oauth2\"\n)\n\n\/\/ Project type\ntype Project struct {\n\tName string\n\tIcon string\n}\n\n\/\/ User type\ntype User struct {\n\tUsername string\n\tToken oauth2.Token\n\tAvatarURL string\n\tProjects []Project\n}\n\n\/\/ GetUser returns an user info\nfunc GetUser(username string) (user User, err error) {\n\tbuf, err := ioutil.ReadFile(\".\/data\/\" + username + \".json\")\n\terr = json.Unmarshal(buf, &user)\n\tif err != nil {\n\t\treturn user, err\n\t}\n\treturn user, err\n}\n\n\/\/ SaveUser saves user info\nfunc SaveUser(user User) error {\n\tb, err := json.Marshal(user)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(\".\/data\/\"+user.Username+\".json\", b, 0644)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 NetApp, Inc. All Rights Reserved.\n\npackage persistentstore\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/netapp\/trident\/config\"\n\t\"github.com\/netapp\/trident\/storage\"\n)\n\ntype CRDDataMigrator struct {\n\tetcdClient EtcdClient\n\tcrdClient CRDClient\n\tdryRun bool\n\ttransformer *EtcdDataTransformer\n}\n\nfunc NewCRDDataMigrator(etcdClient EtcdClient, crdClient CRDClient, dryRun bool, t *EtcdDataTransformer) *CRDDataMigrator {\n\treturn &CRDDataMigrator{\n\t\tetcdClient: etcdClient,\n\t\tcrdClient: crdClient,\n\t\tdryRun: dryRun,\n\t\ttransformer: t,\n\t}\n}\n\nfunc (m *CRDDataMigrator) RunPrechecks() error {\n\n\t\/\/ Ensure we have valid etcd V3 data present\n\tetcdVersion, err := m.etcdClient.GetVersion()\n\tif err != nil {\n\t\tif MatchKeyNotFoundErr(err) {\n\t\t\treturn fmt.Errorf(\"etcdv3 data not found, install an earlier Trident version in the range \" +\n\t\t\t\t\"[v18.01, v19.01] to automatically upgrade from etcdv2 to etcdv3\")\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"could not check for etcdv3 data; %v\", err)\n\t\t}\n\t} else {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"PersistentStoreVersion\": etcdVersion.PersistentStoreVersion,\n\t\t\t\"OrchestratorAPIVersion\": etcdVersion.OrchestratorAPIVersion,\n\t\t}).Debug(\"Found etcdv3 persistent state version.\")\n\n\t\tif etcdVersion.PersistentStoreVersion != string(EtcdV3Store) {\n\t\t\treturn fmt.Errorf(\"etcd persistent state version is %s, not %s\",\n\t\t\t\tetcdVersion.PersistentStoreVersion, EtcdV3Store)\n\t\t}\n\t}\n\n\t\/\/ Ensure there are no CRD-based backends\n\tif hasBackends, err := m.crdClient.HasBackends(); err != nil {\n\t\treturn fmt.Errorf(\"could not check for CRD-based backends; %v\", err)\n\t} else if hasBackends {\n\t\treturn errors.New(\"CRD-based backends are already present, aborting migration\")\n\t} else {\n\t\tlog.Debug(\"No CRD-based backends found.\")\n\t}\n\n\t\/\/ Ensure there are no CRD-based storage classes\n\tif hasStorageClasses, err := m.crdClient.HasStorageClasses(); err != nil {\n\t\treturn fmt.Errorf(\"could not check for CRD-based storage classes; %v\", err)\n\t} else if hasStorageClasses {\n\t\treturn errors.New(\"CRD-based storage classes are already present, aborting migration\")\n\t} else {\n\t\tlog.Debug(\"No CRD-based storage classes found.\")\n\t}\n\n\t\/\/ Ensure there are no CRD-based volumes\n\tif hasVolumes, err := m.crdClient.HasVolumes(); err != nil {\n\t\treturn fmt.Errorf(\"could not check for CRD-based volumes; %v\", err)\n\t} else if hasVolumes {\n\t\treturn errors.New(\"CRD-based volumes are already present, aborting migration\")\n\t} else {\n\t\tlog.Debug(\"No CRD-based volumes found.\")\n\t}\n\n\t\/\/ Ensure there are no CRD-based volume transactions\n\tif hasVolumeTransactions, err := m.crdClient.HasVolumeTransactions(); err != nil {\n\t\treturn fmt.Errorf(\"could not check for CRD-based volume transactions; %v\", err)\n\t} else if hasVolumeTransactions {\n\t\treturn errors.New(\"CRD-based volume transactions are already present, aborting migration\")\n\t} else {\n\t\tlog.Debug(\"No CRD-based volume transactions found.\")\n\t}\n\n\t\/\/ Ensure there is no CRD-based version\n\tcrdVersion, err := m.crdClient.GetVersion()\n\tif err != nil {\n\t\tif MatchKeyNotFoundErr(err) {\n\t\t\tlog.Debug(\"Trident CRDs not found, migration can proceed.\")\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"could not check for Trident CRDs; %v\", err)\n\t\t}\n\t} else {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"PersistentStoreVersion\": crdVersion.PersistentStoreVersion,\n\t\t\t\"OrchestratorAPIVersion\": crdVersion.OrchestratorAPIVersion,\n\t\t}).Debug(\"Found CRD-based persistent state version.\")\n\t\treturn errors.New(\"Trident CRDs are already present, aborting migration\")\n\t}\n\n\tif err := m.transformer.RunPrechecks(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (m *CRDDataMigrator) Run() error {\n\n\ttransformerResult, err := m.transformer.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := m.migrateBackends(transformerResult.Backends); err != nil {\n\t\treturn err\n\t}\n\n\tif err := m.migrateStorageClasses(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := m.migrateVolumes(transformerResult.Volumes); err != nil {\n\t\treturn err\n\t}\n\n\tif err := m.migrateTransactions(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := m.writeCRDSchemaVersion(); err != nil {\n\t\treturn err\n\t}\n\n\tif m.dryRun {\n\t\tlog.Info(\"Migration dry run completed, no problems found.\")\n\t} else {\n\t\tlog.Info(\"Migration succeeded.\")\n\t}\n\n\treturn nil\n}\n\nfunc (m *CRDDataMigrator) migrateBackends(backends []*storage.BackendPersistent) error {\n\n\tif backends == nil || len(backends) == 0 {\n\t\tif m.dryRun {\n\t\t\tlog.Info(\"Dry run: no backends found.\")\n\t\t} else {\n\t\t\tlog.Info(\"No backends found, none will be migrated.\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tif m.dryRun {\n\t\tlog.WithField(\"count\", len(backends)).Info(\"Dry run: read all backends.\")\n\t\treturn nil\n\t}\n\n\tfor _, backend := range backends {\n\t\tif err := m.crdClient.AddBackendPersistent(backend); err != nil {\n\t\t\treturn fmt.Errorf(\"could not write backend resource; %v\", err)\n\t\t}\n\t\tlog.WithField(\"backend\", backend.Name).Debug(\"Copied backend.\")\n\t}\n\tlog.WithField(\"count\", len(backends)).Info(\"Copied all backends to CRD resources.\")\n\n\treturn nil\n}\n\nfunc (m *CRDDataMigrator) migrateStorageClasses() error {\n\n\tstorageClasses, err := m.etcdClient.GetStorageClasses()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not read storage classes from etcd; %v\", err)\n\t}\n\n\tif len(storageClasses) == 0 {\n\t\tif m.dryRun {\n\t\t\tlog.Info(\"Dry run: no storage classes found.\")\n\t\t} else {\n\t\t\tlog.Info(\"No storage classes found, none will be migrated.\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tif m.dryRun {\n\t\tlog.WithField(\"count\", len(storageClasses)).Info(\"Dry run: read all storage classes.\")\n\t\treturn nil\n\t}\n\n\tfor _, sc := range storageClasses {\n\t\tif err = m.crdClient.AddStorageClassPersistent(sc); err != nil {\n\t\t\treturn fmt.Errorf(\"could not write storage class resource; %v\", err)\n\t\t}\n\t\tlog.WithField(\"sc\", sc.GetName()).Debug(\"Copied storage class.\")\n\t}\n\tlog.WithField(\"count\", len(storageClasses)).Info(\"Copied all storage classes to CRD resources.\")\n\n\treturn nil\n}\n\nfunc (m *CRDDataMigrator) migrateVolumes(volumes []*storage.VolumeExternal) error {\n\n\tif volumes == nil || len(volumes) == 0 {\n\t\tif m.dryRun {\n\t\t\tlog.Info(\"Dry run: no volumes found.\")\n\t\t} else {\n\t\t\tlog.Info(\"No volumes found, none will be migrated.\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tif m.dryRun {\n\t\tlog.WithField(\"count\", len(volumes)).Info(\"Dry run: read all volumes.\")\n\t\treturn nil\n\t}\n\n\tfor _, volume := range volumes {\n\t\tif err := m.crdClient.AddVolumePersistent(volume); err != nil {\n\t\t\treturn fmt.Errorf(\"could not write volume resource; %v\", err)\n\t\t}\n\t\tlog.WithField(\"volume\", volume.Config.Name).Debug(\"Copied volume.\")\n\t}\n\tlog.WithField(\"count\", len(volumes)).Info(\"Copied all volumes to CRD resources.\")\n\n\treturn nil\n}\n\nfunc (m *CRDDataMigrator) migrateTransactions() error {\n\n\ttransactions, err := m.etcdClient.GetVolumeTransactions()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not read transactions from etcd; %v\", err)\n\t}\n\n\tif len(transactions) == 0 {\n\t\tif m.dryRun {\n\t\t\tlog.Info(\"Dry run: no transactions found.\")\n\t\t} else {\n\t\t\tlog.Info(\"No transactions found, none will be migrated.\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tif m.dryRun {\n\t\tlog.WithField(\"count\", len(transactions)).Info(\"Dry run: read all transactions.\")\n\t\treturn nil\n\t}\n\n\tfor _, txn := range transactions {\n\t\tif err = m.crdClient.AddVolumeTransaction(txn); err != nil {\n\t\t\treturn fmt.Errorf(\"could not write transaction resource; %v\", err)\n\t\t}\n\t\tlog.WithField(\"volume\", txn.Config.Name).Debug(\"Copied transaction.\")\n\t}\n\tlog.WithField(\"count\", len(transactions)).Info(\"Copied all transactions to CRD resources.\")\n\n\treturn nil\n}\n\nfunc (m *CRDDataMigrator) writeCRDSchemaVersion() error {\n\n\tif m.dryRun {\n\t\treturn nil\n\t}\n\n\tcrdVersion := &config.PersistentStateVersion{\n\t\tPersistentStoreVersion: string(CRDV1Store),\n\t\tOrchestratorAPIVersion: config.OrchestratorAPIVersion,\n\t}\n\tif err := m.crdClient.SetVersion(crdVersion); err != nil {\n\t\treturn fmt.Errorf(\"failed to set the persistent state version after migration: %v\", err)\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"PersistentStoreVersion\": crdVersion.PersistentStoreVersion,\n\t\t\"OrchestratorAPIVersion\": crdVersion.OrchestratorAPIVersion,\n\t}).Info(\"Wrote CRD-based persistent state version.\")\n\n\treturn nil\n}\n\nfunc (m *CRDDataMigrator) writeEtcdSchemaVersion() error {\n\n\tif m.dryRun {\n\t\treturn nil\n\t}\n\n\tetcdVersion := &config.PersistentStateVersion{\n\t\tPersistentStoreVersion: string(EtcdV3bStore),\n\t\tOrchestratorAPIVersion: config.OrchestratorAPIVersion,\n\t}\n\tif err := m.etcdClient.SetVersion(etcdVersion); err != nil {\n\t\treturn fmt.Errorf(\"failed to set the persistent state version after migration: %v\", err)\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"PersistentStoreVersion\": etcdVersion.PersistentStoreVersion,\n\t\t\"OrchestratorAPIVersion\": etcdVersion.OrchestratorAPIVersion,\n\t}).Info(\"Wrote Etcd-based persistent state version.\")\n\n\treturn nil\n}\n<commit_msg>Added time estimates to etcd-CRD migrator (#73)<commit_after>\/\/ Copyright 2019 NetApp, Inc. All Rights Reserved.\n\npackage persistentstore\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/netapp\/trident\/config\"\n\t\"github.com\/netapp\/trident\/storage\"\n\tstorageclass \"github.com\/netapp\/trident\/storage_class\"\n)\n\ntype CRDDataMigrator struct {\n\tetcdClient EtcdClient\n\tcrdClient CRDClient\n\tdryRun bool\n\ttransformer *EtcdDataTransformer\n\ttotalCount int\n\tmigratedCount int\n\tstartTime time.Time\n}\n\nfunc NewCRDDataMigrator(etcdClient EtcdClient, crdClient CRDClient, dryRun bool, t *EtcdDataTransformer) *CRDDataMigrator {\n\treturn &CRDDataMigrator{\n\t\tetcdClient: etcdClient,\n\t\tcrdClient: crdClient,\n\t\tdryRun: dryRun,\n\t\ttransformer: t,\n\t\ttotalCount: 0,\n\t\tmigratedCount: 0,\n\t}\n}\n\nfunc (m *CRDDataMigrator) RunPrechecks() error {\n\n\t\/\/ Ensure we have valid etcd V3 data present\n\tetcdVersion, err := m.etcdClient.GetVersion()\n\tif err != nil {\n\t\tif MatchKeyNotFoundErr(err) {\n\t\t\treturn fmt.Errorf(\"etcdv3 data not found, install an earlier Trident version in the range \" +\n\t\t\t\t\"[v18.01, v19.01] to automatically upgrade from etcdv2 to etcdv3\")\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"could not check for etcdv3 data; %v\", err)\n\t\t}\n\t} else {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"PersistentStoreVersion\": etcdVersion.PersistentStoreVersion,\n\t\t\t\"OrchestratorAPIVersion\": etcdVersion.OrchestratorAPIVersion,\n\t\t}).Debug(\"Found etcdv3 persistent state version.\")\n\n\t\tif etcdVersion.PersistentStoreVersion != string(EtcdV3Store) {\n\t\t\treturn fmt.Errorf(\"etcd persistent state version is %s, not %s\",\n\t\t\t\tetcdVersion.PersistentStoreVersion, EtcdV3Store)\n\t\t}\n\t}\n\n\t\/\/ Ensure there are no CRD-based backends\n\tif hasBackends, err := m.crdClient.HasBackends(); err != nil {\n\t\treturn fmt.Errorf(\"could not check for CRD-based backends; %v\", err)\n\t} else if hasBackends {\n\t\treturn errors.New(\"CRD-based backends are already present, aborting migration\")\n\t} else {\n\t\tlog.Debug(\"No CRD-based backends found.\")\n\t}\n\n\t\/\/ Ensure there are no CRD-based storage classes\n\tif hasStorageClasses, err := m.crdClient.HasStorageClasses(); err != nil {\n\t\treturn fmt.Errorf(\"could not check for CRD-based storage classes; %v\", err)\n\t} else if hasStorageClasses {\n\t\treturn errors.New(\"CRD-based storage classes are already present, aborting migration\")\n\t} else {\n\t\tlog.Debug(\"No CRD-based storage classes found.\")\n\t}\n\n\t\/\/ Ensure there are no CRD-based volumes\n\tif hasVolumes, err := m.crdClient.HasVolumes(); err != nil {\n\t\treturn fmt.Errorf(\"could not check for CRD-based volumes; %v\", err)\n\t} else if hasVolumes {\n\t\treturn errors.New(\"CRD-based volumes are already present, aborting migration\")\n\t} else {\n\t\tlog.Debug(\"No CRD-based volumes found.\")\n\t}\n\n\t\/\/ Ensure there are no CRD-based volume transactions\n\tif hasVolumeTransactions, err := m.crdClient.HasVolumeTransactions(); err != nil {\n\t\treturn fmt.Errorf(\"could not check for CRD-based volume transactions; %v\", err)\n\t} else if hasVolumeTransactions {\n\t\treturn errors.New(\"CRD-based volume transactions are already present, aborting migration\")\n\t} else {\n\t\tlog.Debug(\"No CRD-based volume transactions found.\")\n\t}\n\n\t\/\/ Ensure there is no CRD-based version\n\tcrdVersion, err := m.crdClient.GetVersion()\n\tif err != nil {\n\t\tif MatchKeyNotFoundErr(err) {\n\t\t\tlog.Debug(\"Trident CRDs not found, migration can proceed.\")\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"could not check for Trident CRDs; %v\", err)\n\t\t}\n\t} else {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"PersistentStoreVersion\": crdVersion.PersistentStoreVersion,\n\t\t\t\"OrchestratorAPIVersion\": crdVersion.OrchestratorAPIVersion,\n\t\t}).Debug(\"Found CRD-based persistent state version.\")\n\t\treturn errors.New(\"Trident CRDs are already present, aborting migration\")\n\t}\n\n\tif err := m.transformer.RunPrechecks(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (m *CRDDataMigrator) Run() error {\n\n\tvar (\n\t\tbackends []*storage.BackendPersistent\n\t\tstorageClasses []*storageclass.Persistent\n\t\tvolumes []*storage.VolumeExternal\n\t\ttransactions []*VolumeTransaction\n\t)\n\n\t\/\/ Transform data into the latest schema\n\ttransformerResult, err := m.transformer.Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Backends and volumes are returned by the schema transformer\n\tbackends = transformerResult.Backends\n\tvolumes = transformerResult.Volumes\n\n\t\/\/ Read storage classes from etcd\n\tstorageClasses, err = m.etcdClient.GetStorageClasses()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not read storage classes from etcd; %v\", err)\n\t}\n\n\t\/\/ Read transactions from etcd\n\ttransactions, err = m.etcdClient.GetVolumeTransactions()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not read transactions from etcd; %v\", err)\n\t}\n\n\t\/\/ Determine number of objects to migrate\n\tm.totalCount = len(backends) + len(storageClasses) + len(volumes) + len(transactions)\n\n\t\/\/ Save start time\n\tm.startTime = time.Now()\n\n\tlog.Infof(\"Migrating %d objects. Please do not interrupt this operation!\", m.totalCount)\n\n\t\/\/ Migrate backends\n\tif err := m.migrateBackends(backends); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Migrate storage classes\n\tif err := m.migrateStorageClasses(storageClasses); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Migrate volumes\n\tif err := m.migrateVolumes(volumes); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Migrate transactions\n\tif err := m.migrateTransactions(transactions); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Write schema version to prevent future migrations\n\tif err := m.writeCRDSchemaVersion(); err != nil {\n\t\treturn err\n\t}\n\n\tif m.dryRun {\n\t\tlog.Info(\"Migration dry run completed, no problems found.\")\n\t} else {\n\t\tlog.WithField(\"count\", m.migratedCount).Info(\"Migration succeeded.\")\n\t}\n\n\treturn nil\n}\n\nfunc (m *CRDDataMigrator) migrateBackends(backends []*storage.BackendPersistent) error {\n\n\tif backends == nil || len(backends) == 0 {\n\t\tif m.dryRun {\n\t\t\tlog.Info(\"Dry run: no backends found.\")\n\t\t} else {\n\t\t\tlog.Info(\"No backends found, none will be migrated.\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tif m.dryRun {\n\t\tlog.WithField(\"count\", len(backends)).Info(\"Dry run: read all backends.\")\n\t\treturn nil\n\t}\n\n\tfor _, backend := range backends {\n\t\tif err := m.crdClient.AddBackendPersistent(backend); err != nil {\n\t\t\treturn fmt.Errorf(\"could not write backend resource; %v\", err)\n\t\t}\n\t\tlog.WithField(\"backend\", backend.Name).Debug(\"Copied backend.\")\n\n\t\tm.migratedCount++\n\t\tm.logTimeRemainingEstimate()\n\t}\n\tlog.WithField(\"count\", len(backends)).Info(\"Copied all backends to CRD resources.\")\n\n\treturn nil\n}\n\nfunc (m *CRDDataMigrator) migrateStorageClasses(storageClasses []*storageclass.Persistent) error {\n\n\tif len(storageClasses) == 0 {\n\t\tif m.dryRun {\n\t\t\tlog.Info(\"Dry run: no storage classes found.\")\n\t\t} else {\n\t\t\tlog.Info(\"No storage classes found, none will be migrated.\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tif m.dryRun {\n\t\tlog.WithField(\"count\", len(storageClasses)).Info(\"Dry run: read all storage classes.\")\n\t\treturn nil\n\t}\n\n\tfor _, sc := range storageClasses {\n\t\tif err := m.crdClient.AddStorageClassPersistent(sc); err != nil {\n\t\t\treturn fmt.Errorf(\"could not write storage class resource; %v\", err)\n\t\t}\n\t\tlog.WithField(\"sc\", sc.GetName()).Debug(\"Copied storage class.\")\n\n\t\tm.migratedCount++\n\t\tm.logTimeRemainingEstimate()\n\t}\n\tlog.WithField(\"count\", len(storageClasses)).Info(\"Copied all storage classes to CRD resources.\")\n\n\treturn nil\n}\n\nfunc (m *CRDDataMigrator) migrateVolumes(volumes []*storage.VolumeExternal) error {\n\n\tif volumes == nil || len(volumes) == 0 {\n\t\tif m.dryRun {\n\t\t\tlog.Info(\"Dry run: no volumes found.\")\n\t\t} else {\n\t\t\tlog.Info(\"No volumes found, none will be migrated.\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tif m.dryRun {\n\t\tlog.WithField(\"count\", len(volumes)).Info(\"Dry run: read all volumes.\")\n\t\treturn nil\n\t}\n\n\tfor _, volume := range volumes {\n\t\tif err := m.crdClient.AddVolumePersistent(volume); err != nil {\n\t\t\treturn fmt.Errorf(\"could not write volume resource; %v\", err)\n\t\t}\n\t\tlog.WithField(\"volume\", volume.Config.Name).Debug(\"Copied volume.\")\n\n\t\tm.migratedCount++\n\t\tm.logTimeRemainingEstimate()\n\t}\n\tlog.WithField(\"count\", len(volumes)).Info(\"Copied all volumes to CRD resources.\")\n\n\treturn nil\n}\n\nfunc (m *CRDDataMigrator) migrateTransactions(transactions []*VolumeTransaction) error {\n\n\tif len(transactions) == 0 {\n\t\tif m.dryRun {\n\t\t\tlog.Info(\"Dry run: no transactions found.\")\n\t\t} else {\n\t\t\tlog.Info(\"No transactions found, none will be migrated.\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tif m.dryRun {\n\t\tlog.WithField(\"count\", len(transactions)).Info(\"Dry run: read all transactions.\")\n\t\treturn nil\n\t}\n\n\tfor _, txn := range transactions {\n\t\tif err := m.crdClient.AddVolumeTransaction(txn); err != nil {\n\t\t\treturn fmt.Errorf(\"could not write transaction resource; %v\", err)\n\t\t}\n\t\tlog.WithField(\"volume\", txn.Config.Name).Debug(\"Copied transaction.\")\n\n\t\tm.migratedCount++\n\t\tm.logTimeRemainingEstimate()\n\t}\n\tlog.WithField(\"count\", len(transactions)).Info(\"Copied all transactions to CRD resources.\")\n\n\treturn nil\n}\n\nfunc (m *CRDDataMigrator) writeCRDSchemaVersion() error {\n\n\tif m.dryRun {\n\t\treturn nil\n\t}\n\n\tcrdVersion := &config.PersistentStateVersion{\n\t\tPersistentStoreVersion: string(CRDV1Store),\n\t\tOrchestratorAPIVersion: config.OrchestratorAPIVersion,\n\t}\n\tif err := m.crdClient.SetVersion(crdVersion); err != nil {\n\t\treturn fmt.Errorf(\"failed to set the persistent state version after migration: %v\", err)\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"PersistentStoreVersion\": crdVersion.PersistentStoreVersion,\n\t\t\"OrchestratorAPIVersion\": crdVersion.OrchestratorAPIVersion,\n\t}).Info(\"Wrote CRD-based persistent state version.\")\n\n\treturn nil\n}\n\nfunc (m *CRDDataMigrator) writeEtcdSchemaVersion() error {\n\n\tif m.dryRun {\n\t\treturn nil\n\t}\n\n\tetcdVersion := &config.PersistentStateVersion{\n\t\tPersistentStoreVersion: string(EtcdV3bStore),\n\t\tOrchestratorAPIVersion: config.OrchestratorAPIVersion,\n\t}\n\tif err := m.etcdClient.SetVersion(etcdVersion); err != nil {\n\t\treturn fmt.Errorf(\"failed to set the persistent state version after migration: %v\", err)\n\t}\n\n\tlog.WithFields(log.Fields{\n\t\t\"PersistentStoreVersion\": etcdVersion.PersistentStoreVersion,\n\t\t\"OrchestratorAPIVersion\": etcdVersion.OrchestratorAPIVersion,\n\t}).Info(\"Wrote Etcd-based persistent state version.\")\n\n\treturn nil\n}\n\nfunc (m *CRDDataMigrator) logTimeRemainingEstimate() {\n\n\t\/\/ Ensure we have migrated something, and log the time remaining every 100 objects\n\tif (m.migratedCount%100) != 0 || m.migratedCount <= 0 {\n\t\treturn\n\t}\n\n\t\/\/ Determine time elapsed and ensure it is positive\n\ttimeElapsed := time.Since(m.startTime)\n\tif timeElapsed <= 0 {\n\t\treturn\n\t}\n\n\t\/\/ Determine average time per object\n\tnanosecondsPerObject := timeElapsed.Nanoseconds() \/ int64(m.migratedCount)\n\n\t\/\/ Determine time remaining\n\tobjectsRemaining := m.totalCount - m.migratedCount\n\ttimeRemaining := time.Duration(nanosecondsPerObject * int64(objectsRemaining)).Truncate(time.Second)\n\n\tlog.WithField(\"estimatedTimeRemaining\", timeRemaining).Infof(\"Migrated %d of %d objects.\",\n\t\tm.migratedCount, m.totalCount)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"compress\/gzip\"\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/andrewstuart\/bible-http-server\/osis\"\n\t_ \"github.com\/lib\/pq\"\n)\n\nconst (\n\tversionInsert = `INSERT INTO version (extid, name) VALUES ($1, $2) RETURNING id`\n)\n\nvar db *sql.DB\n\nconst PassEnvName = \"PGPASSWORD\"\n\nfunc init() {\n\tvar err error\n\n\tpass := strings.TrimSpace(os.Getenv(PassEnvName))\n\tif pass == \"\" {\n\t\tlog.Fatalf(\"Please set %s environment variable with postgres password.\", PassEnvName)\n\t}\n\n\tdbHost := stringDef(getLinkedPort(), \"localhost\")\n\n\tdbConn := fmt.Sprintf(\"postgres:\/\/bible:%s@%s\/bible?sslmode=disable\", pass, dbHost)\n\n\tdb, err = sql.Open(\"postgres\", dbConn)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc store(b *osis.Bible) error {\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar versionId int\n\terr = tx.QueryRow(versionInsert, b.Version.ID, b.Version.Title).Scan(&versionId)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\twg := &sync.WaitGroup{}\n\n\tfor i, bk := range b.Books {\n\t\twg.Add(1)\n\t\tgo func(i int, bk *osis.Book) {\n\t\t\tdefer wg.Done()\n\n\t\t\tfor j, ch := range bk.Chs {\n\t\t\t\tfor k, vs := range ch.Vrs {\n\n\t\t\t\t\t\/\/Handle words (osis uses for greek\/hebrew)\n\t\t\t\t\tif len(vs.Words) != 0 {\n\t\t\t\t\t\ttxt := make([]string, len(vs.Words))\n\t\t\t\t\t\tfor i := range vs.Words {\n\t\t\t\t\t\t\ttxt[i] = vs.Words[i].Text\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tvs.Text = strings.Join(txt, \" \")\n\t\t\t\t\t}\n\n\t\t\t\t\tvar verseId int\n\t\t\t\t\terr = tx.QueryRow(`SELECT id FROM verse where book = $1 and chapter = $2 and verse = $3`, bk.ID, j+1, k+1).Scan(&verseId)\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Printf(\"Could not find %s %d:%d, inserting.\\n\", bk.ID, j+1, k+1)\n\t\t\t\t\t\terr = tx.QueryRow(`INSERT INTO verse (book, chapter, verse) values ($1, $2, $3) RETURNING id`, bk.ID, j+1, k+1).Scan(&verseId)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\t_, err = tx.Exec(`INSERT INTO verse_version (versionId, verseId, text) values ($1, $2, $3)`, versionId, verseId, vs.Text)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}(i, bk)\n\t}\n\n\twg.Wait()\n\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\terr = tx.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc getLinkedPort() string {\n\te := os.Getenv(\"POSTGRES_PORT\")\n\tif e == \"\" {\n\t\treturn \"\"\n\t}\n\n\tvals := strings.Split(e, \":\/\/\")\n\tif len(vals) < 2 {\n\t\treturn \"\"\n\t}\n\n\treturn vals[1]\n}\n\nfunc loadFromGzippedFile(path string) (*osis.Bible, error) {\n\tzipped, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr, err := gzip.NewReader(zipped)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn osis.NewBible(r)\n}\n<commit_msg>Update some variable names<commit_after>package main\n\nimport (\n\t\"compress\/gzip\"\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/andrewstuart\/bible-http-server\/osis\"\n\t_ \"github.com\/lib\/pq\"\n)\n\nconst (\n\tversionInsert = `INSERT INTO version (extid, name) VALUES ($1, $2) RETURNING id`\n)\n\nvar db *sql.DB\n\nconst passEnvName = \"PGPASSWORD\"\n\nfunc init() {\n\tvar err error\n\n\tpass := strings.TrimSpace(os.Getenv(passEnvName))\n\tif pass == \"\" {\n\t\tlog.Fatalf(\"Please set %s environment variable with postgres password.\", passEnvName)\n\t}\n\n\tdbHost := stringDef(getLinkedPort(), \"localhost\")\n\n\tdbConn := fmt.Sprintf(\"postgres:\/\/bible:%s@%s\/bible?sslmode=disable\", pass, dbHost)\n\n\tdb, err = sql.Open(\"postgres\", dbConn)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc store(b *osis.Bible) error {\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar versionID int\n\terr = tx.QueryRow(versionInsert, b.Version.ID, b.Version.Title).Scan(&versionID)\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\twg := &sync.WaitGroup{}\n\n\tfor i, bk := range b.Books {\n\t\twg.Add(1)\n\t\tgo func(i int, bk *osis.Book) {\n\t\t\tdefer wg.Done()\n\n\t\t\tfor j, ch := range bk.Chs {\n\t\t\t\tfor k, vs := range ch.Vrs {\n\n\t\t\t\t\t\/\/Handle words (osis uses for greek\/hebrew)\n\t\t\t\t\tif len(vs.Words) != 0 {\n\t\t\t\t\t\ttxt := make([]string, len(vs.Words))\n\t\t\t\t\t\tfor i := range vs.Words {\n\t\t\t\t\t\t\ttxt[i] = vs.Words[i].Text\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tvs.Text = strings.Join(txt, \" \")\n\t\t\t\t\t}\n\n\t\t\t\t\tvar verseID int\n\t\t\t\t\terr = tx.QueryRow(`SELECT id FROM verse where book = $1 and chapter = $2 and verse = $3`, bk.ID, j+1, k+1).Scan(&verseID)\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Printf(\"Could not find %s %d:%d, inserting.\\n\", bk.ID, j+1, k+1)\n\t\t\t\t\t\terr = tx.QueryRow(`INSERT INTO verse (book, chapter, verse) values ($1, $2, $3) RETURNING id`, bk.ID, j+1, k+1).Scan(&verseID)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\t_, err = tx.Exec(`INSERT INTO verse_version (versionId, verseId, text) values ($1, $2, $3)`, versionID, verseID, vs.Text)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}(i, bk)\n\t}\n\n\twg.Wait()\n\n\tif err != nil {\n\t\ttx.Rollback()\n\t\treturn err\n\t}\n\n\terr = tx.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc getLinkedPort() string {\n\te := os.Getenv(\"POSTGRES_PORT\")\n\tif e == \"\" {\n\t\treturn \"\"\n\t}\n\n\tvals := strings.Split(e, \":\/\/\")\n\tif len(vals) < 2 {\n\t\treturn \"\"\n\t}\n\n\treturn vals[1]\n}\n\nfunc loadFromGzippedFile(path string) (*osis.Bible, error) {\n\tzipped, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr, err := gzip.NewReader(zipped)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn osis.NewBible(r)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/rpc\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"path\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"syscall\"\n)\n\nfunc createPidFile(filename string) (*os.File, error) {\n\n\tfile, err := os.OpenFile(filename,\n\t\tos.O_WRONLY|os.O_CREATE|os.O_EXCL|os.O_SYNC,\n\t\tsyscall.S_IRUSR|syscall.S_IWUSR)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = syscall.Flock(int(file.Fd()), syscall.F_WRLCK)\n\tif err != nil {\n\t\tfile.Close()\n\t\treturn nil, err\n\t}\n\n\t_, err = file.WriteString(fmt.Sprintf(\"%d\", os.Getpid()))\n\tif err != nil {\n\t\tfile.Close()\n\t\treturn nil, err\n\t}\n\n\treturn file, nil\n}\n\nfunc removePidFile(file *os.File) {\n\n\terr := syscall.Flock(int(file.Fd()), syscall.F_UNLCK)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfile.Close()\n\tos.Remove(file.Name())\n}\n\ntype TranscodeQueue struct {\n\tRequests chan string\n}\n\nfunc NewTranscodeQueue() *TranscodeQueue {\n\treturn &TranscodeQueue{\n\t\tRequests: make(chan string, 100),\n\t}\n}\n\nfunc (this *TranscodeQueue) DoTranscode(file string, reply *bool) error {\n\tlog.Println(\"Asked to transcode:\", file)\n\tthis.Requests <- file\n\t*reply = true\n\treturn nil\n}\n\nfunc performTranscode(infile, outfile string, isDone chan<- error) {\n\n\t\/\/transcodeCommand := exec.Command(\"\/bin\/sleep\", \"10\")\n\ttranscodeCommand := exec.Command(\n\t\t\"\/usr\/local\/bin\/HandBrakeCLI\",\n\t\t\"-O\", \"-I\",\n\t\t\"-f\", \"mp4\",\n\t\t\"--encoder\", \"x264\",\n\t\t\"--x264-preset\", \"faster\",\n\t\t\"--x264-tune\", \"film\",\n\t\t\"--h264-profile\", \"auto\",\n\t\t\"--h264-level\", \"auto\",\n\t\t\"--quality\", \"20\",\n\t\t\"--large-file\",\n\t\t\"--aencoder\", \"ca_aac,copy:ac3\",\n\t\t\"-B\", \"160\",\n\t\t\"--mixdown\", \"dpl2\",\n\t\t\"--aname\", \"English\",\n\t\t\"--loose-anamorphic\",\n\t\t\"--decomb\",\n\t\t\"--modulus\", \"2\",\n\t\t\"-i\", infile,\n\t\t\"-o\", outfile)\n\n\t\/\/transferCommand := exec.Command(\"\/bin\/sleep\", \"5\")\n\ttransferCommand := exec.Command(\n\t\t\"\/usr\/bin\/scp\", \"-B\", \"-C\", \"-q\", outfile,\n\t\t\"guy@mediaserver.local:\/srv\/Media\/Movies\/0\\\\ -\\\\ Inbox\/TV\")\n\n\tlog.Println(\"Performing transcode of\", infile)\n\tlog.Println(\"Writing file\", outfile)\n\n\terr := transcodeCommand.Run()\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\tisDone <- err\n\t\treturn\n\t}\n\n\tlog.Println(\"Done transcoding file\")\n\tlog.Println(\"Transfering file to server\")\n\n\terr = transferCommand.Run()\n\tif err == nil {\n\t\tos.Remove(infile)\n\t\t\/\/os.Remove(outfile)\n\t\t\/\/ leave commented out until im sure i understand the scp problem\n\t} else {\n\t\tlog.Println(err.Error())\n\t\tisDone <- err\n\t\treturn\n\t}\n\n\tlog.Println(\"Done transfering file to server\")\n\tisDone <- nil\n}\n\nfunc requestTranscode(infile, unixSocket string) (reply bool, err error) {\n\n\tclient, err := rpc.Dial(\"unix\", unixSocket)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = client.Call(\"TranscodeQueue.DoTranscode\", infile, &reply)\n\treturn\n}\n\nfunc createTranscodeServer(pidfile *os.File, unixSocket string) error {\n\n\tsigc := make(chan os.Signal, 10)\n\tsignal.Notify(sigc,\n\t\tsyscall.SIGHUP,\n\t\tsyscall.SIGINT,\n\t\tsyscall.SIGTERM,\n\t\tsyscall.SIGQUIT)\n\tdefer signal.Stop(sigc)\n\n\tq := NewTranscodeQueue()\n\tserver := rpc.NewServer()\n\tserver.Register(q)\n\n\tl, err := net.Listen(\"unix\", unixSocket)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer l.Close()\n\n\tgo server.Accept(l)\n\n\tisDone := make(chan error)\n\trxp, err := regexp.Compile(`(.*)(\\.mpg)$`)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tq.Requests <- os.Args[1]\n\n\tfor {\n\t\tselect {\n\t\tcase <-sigc:\n\t\t\treturn nil\n\t\tcase req := <-q.Requests:\n\n\t\t\toutfile := rxp.ReplaceAllString(req, \"$1.m4v\")\n\t\t\tgo performTranscode(req, outfile, isDone)\n\n\t\t\tselect {\n\t\t\tcase <-sigc:\n\t\t\t\treturn nil\n\t\t\tcase e := <-isDone:\n\t\t\t\tif e != nil {\n\t\t\t\t\treturn e\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc main() {\n\n\tif len(os.Args) != 2 {\n\t\treturn\n\t}\n\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tlog.Println(\"Starting...\")\n\n\ttoTranscode := os.Args[1]\n\tpidFileName := path.Join(os.TempDir(), \"transcodequeue.pid\")\n\tunixSocket := path.Join(os.TempDir(), \"transcodequeue.sock\")\n\n\tpidFile, err := createPidFile(pidFileName)\n\n\tif err != nil {\n\t\tlog.Println(\"Sending server request to transcode\", toTranscode)\n\n\t\treply, err := requestTranscode(toTranscode, unixSocket)\n\t\tif err != nil {\n\t\t\tlog.Panicln(err.Error())\n\t\t}\n\n\t\tif reply {\n\t\t\tlog.Println(\"Successfully requested transcode\")\n\t\t}\n\t} else {\n\t\tlog.Println(\"Creating Server\")\n\n\t\tdefer removePidFile(pidFile)\n\n\t\terr = createTranscodeServer(pidFile, unixSocket)\n\t\tif err != nil {\n\t\t\tlog.Panicln(err.Error())\n\t\t}\n\t}\n\n\tlog.Println(\"Finished\")\n}\n<commit_msg>cacnel running backgrund job on ctrlc<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/rpc\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"path\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"syscall\"\n)\n\nfunc createPidFile(filename string) (*os.File, error) {\n\n\tfile, err := os.OpenFile(filename,\n\t\tos.O_WRONLY|os.O_CREATE|os.O_EXCL|os.O_SYNC,\n\t\tsyscall.S_IRUSR|syscall.S_IWUSR)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = syscall.Flock(int(file.Fd()), syscall.F_WRLCK)\n\tif err != nil {\n\t\tfile.Close()\n\t\treturn nil, err\n\t}\n\n\t_, err = file.WriteString(fmt.Sprintf(\"%d\", os.Getpid()))\n\tif err != nil {\n\t\tfile.Close()\n\t\treturn nil, err\n\t}\n\n\treturn file, nil\n}\n\nfunc removePidFile(file *os.File) {\n\n\terr := syscall.Flock(int(file.Fd()), syscall.F_UNLCK)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfile.Close()\n\tos.Remove(file.Name())\n}\n\ntype TranscodeQueue struct {\n\tRequests chan string\n}\n\nfunc NewTranscodeQueue() *TranscodeQueue {\n\treturn &TranscodeQueue{\n\t\tRequests: make(chan string, 100),\n\t}\n}\n\nfunc (this *TranscodeQueue) DoTranscode(file string, reply *bool) error {\n\tlog.Println(\"Asked to transcode:\", file)\n\tthis.Requests <- file\n\t*reply = true\n\treturn nil\n}\n\nfunc runCommand(cmd *exec.Cmd, die <-chan os.Signal) (bool, error) {\n\n\tisDone := make(chan error)\n\tif err := cmd.Start(); err != nil {\n\t\treturn false, err\n\t}\n\n\tgo func() {\n\t\tif err := cmd.Wait(); err != nil {\n\t\t\tisDone <- err\n\t\t}\n\t\tisDone <- nil\n\t}()\n\n\tselect {\n\tcase err := <-isDone:\n\t\treturn true, err\n\tcase <-die:\n\t\tlog.Println(\"Killing process\")\n\t\tif err := cmd.Process.Kill(); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\treturn false, nil\n\t}\n\n}\n\nvar wasCanceled error = errors.New(\"Was Canceled\")\n\nfunc performTranscode(infile, outfile string, isDone chan<- error, sigc <-chan os.Signal) {\n\n\t\/\/transcodeCommand := exec.Command(\"\/bin\/sleep\", \"10\")\n\ttranscodeCommand := exec.Command(\n\t\t\"\/usr\/local\/bin\/HandBrakeCLI\",\n\t\t\"-O\", \"-I\",\n\t\t\"-f\", \"mp4\",\n\t\t\"--encoder\", \"x264\",\n\t\t\"--x264-preset\", \"faster\",\n\t\t\"--x264-tune\", \"film\",\n\t\t\"--h264-profile\", \"auto\",\n\t\t\"--h264-level\", \"auto\",\n\t\t\"--quality\", \"20\",\n\t\t\"--large-file\",\n\t\t\"--aencoder\", \"ca_aac,copy:ac3\",\n\t\t\"-B\", \"160\",\n\t\t\"--mixdown\", \"dpl2\",\n\t\t\"--aname\", \"English\",\n\t\t\"--loose-anamorphic\",\n\t\t\"--decomb\",\n\t\t\"--modulus\", \"2\",\n\t\t\"-i\", infile,\n\t\t\"-o\", outfile)\n\n\t\/\/transferCommand := exec.Command(\"\/bin\/sleep\", \"5\")\n\ttransferCommand := exec.Command(\n\t\t\"\/usr\/bin\/scp\", \"-B\", \"-C\", \"-q\", outfile,\n\t\t\"guy@mediaserver.local:\/srv\/Media\/Movies\/0\\\\ -\\\\ Inbox\/TV\")\n\n\tlog.Println(\"Performing transcode of\", infile)\n\tlog.Println(\"Writing file\", outfile)\n\n\tdone, err := runCommand(transcodeCommand, sigc)\n\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\tisDone <- err\n\t\treturn\n\t}\n\tif !done {\n\t\tisDone <- wasCanceled\n\t\treturn\n\t}\n\n\tlog.Println(\"Done transcoding file\")\n\tlog.Println(\"Transfering file to server\")\n\n\tdone, err = runCommand(transferCommand, sigc)\n\n\tif err == nil && done {\n\t\tos.Remove(infile)\n\t\t\/\/os.Remove(outfile)\n\t\t\/\/ leave commented out until im sure i understand the scp problem\n\t} else {\n\t\tif err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t\tisDone <- err\n\n\t\t} else {\n\t\t\tisDone <- wasCanceled\n\t\t}\n\t\treturn\n\t}\n\n\tlog.Println(\"Done transfering file to server\")\n\tisDone <- nil\n}\n\nfunc requestTranscode(infile, unixSocket string) (reply bool, err error) {\n\n\tclient, err := rpc.Dial(\"unix\", unixSocket)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = client.Call(\"TranscodeQueue.DoTranscode\", infile, &reply)\n\treturn\n}\n\nfunc createTranscodeServer(pidfile *os.File, unixSocket string) error {\n\n\tsigc := make(chan os.Signal, 10)\n\tsignal.Notify(sigc,\n\t\tsyscall.SIGHUP,\n\t\tsyscall.SIGINT,\n\t\tsyscall.SIGTERM,\n\t\tsyscall.SIGQUIT)\n\tdefer signal.Stop(sigc)\n\n\tq := NewTranscodeQueue()\n\tserver := rpc.NewServer()\n\tserver.Register(q)\n\n\tl, err := net.Listen(\"unix\", unixSocket)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer l.Close()\n\n\tgo server.Accept(l)\n\n\tisDone := make(chan error)\n\trxp, err := regexp.Compile(`(.*)(\\.mpg)$`)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tq.Requests <- os.Args[1]\n\n\tfor {\n\t\tselect {\n\t\tcase req := <-q.Requests:\n\n\t\t\toutfile := rxp.ReplaceAllString(req, \"$1.m4v\")\n\t\t\tgo performTranscode(req, outfile, isDone, sigc)\n\n\t\t\tselect {\n\t\t\tcase e := <-isDone:\n\t\t\t\tif e != nil {\n\t\t\t\t\treturn e\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc main() {\n\n\tif len(os.Args) != 2 {\n\t\treturn\n\t}\n\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tlog.Println(\"Starting...\")\n\n\ttoTranscode := os.Args[1]\n\tpidFileName := path.Join(os.TempDir(), \"transcodequeue.pid\")\n\tunixSocket := path.Join(os.TempDir(), \"transcodequeue.sock\")\n\n\tpidFile, err := createPidFile(pidFileName)\n\n\tif err != nil {\n\t\tlog.Println(\"Sending server request to transcode\", toTranscode)\n\n\t\treply, err := requestTranscode(toTranscode, unixSocket)\n\t\tif err != nil {\n\t\t\tlog.Panicln(err.Error())\n\t\t}\n\n\t\tif reply {\n\t\t\tlog.Println(\"Successfully requested transcode\")\n\t\t}\n\t} else {\n\t\tlog.Println(\"Creating Server\")\n\n\t\tdefer removePidFile(pidFile)\n\n\t\terr = createTranscodeServer(pidFile, unixSocket)\n\t\tif err != nil {\n\t\t\tlog.Println(err.Error())\n\t\t}\n\t}\n\n\tlog.Println(\"Finished\")\n}\n<|endoftext|>"} {"text":"<commit_before>package datastore\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/suite\"\n)\n\ntype UsersTestSuite struct {\n\tsuite.Suite\n}\n\nfunc (s *UsersTestSuite) SetupTest() {\n\tInit(testDB)\n\tcreateTables()\n\tnewTestUsers()\n}\n\nfunc (s *UsersTestSuite) TearDownTest() {\n\tdropTables()\n\tExit()\n}\n\nfunc (s *UsersTestSuite) TestGetByUsername() {\n\tuser, err := testStore.UserStore.GetByUsername(testSenderUname)\n\ts.Equal(testSender.Username, user.Username)\n\ts.Equal(testSender.Email, user.Email)\n\ts.Equal(testSender.PhoneNumber, user.PhoneNumber)\n\ts.Equal(testSender.Password, user.Password)\n\ts.Nil(err)\n}\n\nfunc TestUsers(t *testing.T) {\n\tsuite.Run(t, new(UsersTestSuite))\n}\n<commit_msg>Finish tests for datastore\/users<commit_after>package datastore\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/suite\"\n\t\"gitlab.com\/wujiang\/asapp\"\n)\n\ntype UsersTestSuite struct {\n\tsuite.Suite\n}\n\nfunc (s *UsersTestSuite) SetupTest() {\n\tInit(testDB)\n\tcreateTables()\n\tnewTestUsers()\n}\n\nfunc (s *UsersTestSuite) TearDownTest() {\n\tdropTables()\n\tExit()\n}\n\nfunc (s *UsersTestSuite) TestGetByUsername() {\n\tuser, err := testStore.UserStore.GetByUsername(testSenderUname)\n\ts.Equal(testSender.Username, user.Username)\n\ts.Equal(testSender.Email, user.Email)\n\ts.Equal(testSender.PhoneNumber, user.PhoneNumber)\n\ts.Equal(testSender.Password, user.Password)\n\ts.Nil(err)\n}\n\nfunc (s *UsersTestSuite) TestCreate() {\n\tuser := asapp.NewUser(\"test\", \"last\", \"username\", \"password123\",\n\t\t\"test@last.com\", \"1357902468\", \"0.0.0.0\")\n\ts.Nil(testStore.UserStore.Create(user))\n\tu, err := testStore.UserStore.GetByUsername(\"username\")\n\ts.Nil(err)\n\ts.Equal(\"test\", u.FirstName)\n\ts.Equal(\"username\", u.Username)\n\n\t\/\/ duplicates\n\ts.NotNil(testStore.UserStore.Create(user))\n\tct, err := testStore.dbh.Delete(user)\n\ts.Equal(int64(1), ct)\n\ts.Nil(err)\n}\n\nfunc (s *UsersTestSuite) TestUpdate() {\n\tu := testSender\n\tu.Email = \"changed@send.com\"\n\tct, err := testStore.UserStore.Update(u)\n\ts.Equal(int64(1), ct)\n\ts.Nil(err)\n\n\tuser, err := testStore.UserStore.GetByUsername(testSenderUname)\n\ts.Equal(testSender.Username, user.Username)\n\ts.Equal(testSender.Email, \"changed@send.com\")\n}\n\nfunc TestUsers(t *testing.T) {\n\tsuite.Run(t, new(UsersTestSuite))\n}\n<|endoftext|>"} {"text":"<commit_before>package kloud\n\nimport (\n\t\"crypto\/sha1\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"koding\/kites\/kloud\/api\/amazon\"\n\t\"koding\/kites\/kloud\/contexthelper\/publickeys\"\n\t\"koding\/kites\/kloud\/contexthelper\/session\"\n\t\"koding\/kites\/kloud\/klient\"\n\t\"koding\/kites\/kloud\/userdata\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/hashicorp\/go-multierror\"\n\t\"github.com\/hashicorp\/hcl\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\"github.com\/koding\/kite\/protocol\"\n\t\"github.com\/mitchellh\/goamz\/aws\"\n\t\"github.com\/mitchellh\/goamz\/ec2\"\n\t\"github.com\/nu7hatch\/gouuid\"\n)\n\ntype TerraformMachine struct {\n\tProvider string `json:\"provider\"`\n\tLabel string `json:\"label\"`\n\tRegion string `json:\"region\"`\n\tQueryString string `json:\"queryString,omitempty\"`\n\tAttributes map[string]string `json:\"attributes\"`\n}\n\ntype Machines struct {\n\tMachines []TerraformMachine `json:\"machines\"`\n}\n\ntype buildData struct {\n\tTemplate string\n\tRegion string\n\tKiteIds map[string]string\n}\n\nfunc (m *Machines) AppendRegion(region string) {\n\tfor i, machine := range m.Machines {\n\t\tmachine.Region = region\n\t\tm.Machines[i] = machine\n\t}\n}\n\nfunc (m *Machines) AppendQueryString(queryStrings map[string]string) {\n\tfor i, machine := range m.Machines {\n\t\tqueryString := queryStrings[machine.Label]\n\t\tmachine.QueryString = protocol.Kite{ID: queryString}.String()\n\t\tm.Machines[i] = machine\n\t}\n}\n\n\/\/ WithLabel returns the machine with the associated label\nfunc (m *Machines) WithLabel(label string) (TerraformMachine, error) {\n\tfor _, machine := range m.Machines {\n\t\tif machine.Label == label {\n\t\t\treturn machine, nil\n\t\t}\n\t}\n\n\treturn TerraformMachine{}, fmt.Errorf(\"couldn't find machine with label '%s\", label)\n}\n\nfunc machinesFromState(state *terraform.State) (*Machines, error) {\n\tif state.Modules == nil {\n\t\treturn nil, errors.New(\"state modules is empty\")\n\t}\n\n\tout := &Machines{\n\t\tMachines: make([]TerraformMachine, 0),\n\t}\n\n\tattrs := make(map[string]string, 0)\n\n\tfor _, m := range state.Modules {\n\t\tfor resource, r := range m.Resources {\n\t\t\tif r.Primary == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tprovider, label, err := parseProviderAndLabel(resource)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tfor key, val := range r.Primary.Attributes {\n\t\t\t\tattrs[key] = val\n\t\t\t}\n\n\t\t\tout.Machines = append(out.Machines, TerraformMachine{\n\t\t\t\tProvider: provider,\n\t\t\t\tLabel: label,\n\t\t\t\tAttributes: attrs,\n\t\t\t})\n\t\t}\n\t}\n\n\treturn out, nil\n}\n\nfunc machinesFromPlan(plan *terraform.Plan) (*Machines, error) {\n\tif plan.Diff == nil {\n\t\treturn nil, errors.New(\"plan diff is empty\")\n\t}\n\n\tif plan.Diff.Modules == nil {\n\t\treturn nil, errors.New(\"plan diff module is empty\")\n\t}\n\n\tout := &Machines{\n\t\tMachines: make([]TerraformMachine, 0),\n\t}\n\n\tattrs := make(map[string]string, 0)\n\n\tfor _, d := range plan.Diff.Modules {\n\t\tif d.Resources == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor providerResource, r := range d.Resources {\n\t\t\tif r.Attributes == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor name, a := range r.Attributes {\n\t\t\t\tattrs[name] = a.New\n\t\t\t}\n\n\t\t\tprovider, label, err := parseProviderAndLabel(providerResource)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tout.Machines = append(out.Machines, TerraformMachine{\n\t\t\t\tProvider: provider,\n\t\t\t\tLabel: label,\n\t\t\t\tAttributes: attrs,\n\t\t\t})\n\t\t}\n\t}\n\n\treturn out, nil\n}\n\nfunc parseProviderAndLabel(resource string) (string, string, error) {\n\t\/\/ resource is in the form of \"aws_instance.foo.bar\"\n\tsplitted := strings.Split(resource, \"_\")\n\tif len(splitted) < 2 {\n\t\treturn \"\", \"\", fmt.Errorf(\"provider resource is unknown: %v\", splitted)\n\t}\n\n\t\/\/ splitted[1]: instance.foo.bar\n\tresourceSplitted := strings.SplitN(splitted[1], \".\", 2)\n\n\tprovider := splitted[0] \/\/ aws\n\tlabel := resourceSplitted[1] \/\/ foo.bar\n\n\treturn provider, label, nil\n}\n\nfunc regionFromHCL(hclContent string) (string, error) {\n\tvar data struct {\n\t\tProvider struct {\n\t\t\tAws struct {\n\t\t\t\tRegion string\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := hcl.Decode(&data, hclContent); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif data.Provider.Aws.Region == \"\" {\n\t\treturn \"\", fmt.Errorf(\"HCL content doesn't contain region information: %s\", hclContent)\n\t}\n\n\treturn data.Provider.Aws.Region, nil\n}\n\nfunc injectKodingData(ctx context.Context, hclContent, username string, creds *terraformCredentials) (*buildData, error) {\n\tsess, ok := session.FromContext(ctx)\n\tif !ok {\n\t\treturn nil, errors.New(\"session context is not passed\")\n\t}\n\n\tkeys, ok := publickeys.FromContext(ctx)\n\tif !ok {\n\t\treturn nil, errors.New(\"public keys are not available\")\n\t}\n\n\t\/\/ for now we only support \"aws\", the logic below should be refactored once\n\t\/\/ we support multiple providers\n\tvar accessKey, secretKey string\n\tfor _, c := range creds.Creds {\n\t\tif c.Provider != \"aws\" {\n\t\t\tcontinue\n\t\t}\n\n\t\taccessKey = c.Data[\"access_key\"]\n\t\tsecretKey = c.Data[\"secret_key\"]\n\t}\n\n\tregion, err := regionFromHCL(hclContent)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ inject our own public\/private keys into the machine\n\tamazonClient, err := amazon.New(\n\t\tmap[string]interface{}{\n\t\t\t\"key_pair\": keys.KeyName,\n\t\t\t\"publicKey\": keys.PublicKey,\n\t\t\t\"privateKey\": keys.PrivateKey,\n\t\t},\n\t\tec2.New(\n\t\t\taws.Auth{AccessKey: accessKey, SecretKey: secretKey},\n\t\t\taws.Regions[region],\n\t\t))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"kloud aws client err: %s\", err)\n\t}\n\n\tsubnets, err := amazonClient.ListSubnets()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(subnets.Subnets) == 0 {\n\t\treturn nil, errors.New(\"no subnets are available\")\n\t}\n\n\tvar subnetId string\n\tvar vpcId string\n\tfor _, subnet := range subnets.Subnets {\n\t\tif subnet.AvailableIpAddressCount == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tsubnetId = subnet.SubnetId\n\t\tvpcId = subnet.VpcId\n\t}\n\n\tif subnetId == \"\" {\n\t\treturn nil, errors.New(\"subnetId is empty\")\n\t}\n\n\tvar groupName = \"Koding-Kloud-SG\"\n\tsess.Log.Debug(\"Fetching or creating SG: %s, %s\", groupName, vpcId)\n\tgroup, err := amazonClient.CreateOrGetSecurityGroup(groupName, vpcId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsess.Log.Debug(\"first group = %+v\\n\", group)\n\tsess.Log.Debug(\"vpcId = %+v\\n\", vpcId)\n\tsess.Log.Debug(\"subnetId = %+v\\n\", subnetId)\n\n\t\/\/ this will either create the \"kloud-deployment\" key or it will just\n\t\/\/ return with a nil error (means success)\n\tif _, err = amazonClient.DeployKey(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data struct {\n\t\tResource struct {\n\t\t\tAws_Instance map[string]map[string]interface{} `json:\"aws_instance\"`\n\t\t} `json:\"resource\"`\n\t\tProvider map[string]map[string]interface{} `json:\"provider\"`\n\t\tVariable map[string]map[string]interface{} `json:\"variable\"`\n\t}\n\n\tif err := hcl.Decode(&data, hclContent); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(data.Resource.Aws_Instance) == 0 {\n\t\treturn nil, fmt.Errorf(\"instance is empty: %v\", data.Resource.Aws_Instance)\n\t}\n\n\tkiteIds := make(map[string]string)\n\n\tfor resourceName, instance := range data.Resource.Aws_Instance {\n\t\t\/\/ create a new kite id for every new aws resource\n\t\tkiteUUID, err := uuid.NewV4()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tkiteId := kiteUUID.String()\n\n\t\tuserdata, err := sess.Userdata.Create(&userdata.CloudInitConfig{\n\t\t\tUsername: username,\n\t\t\tGroups: []string{\"sudo\"},\n\t\t\tHostname: username, \/\/ no typo here. hostname = username\n\t\t\tKiteId: kiteId,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tkiteIds[resourceName] = kiteId\n\t\tinstance[\"user_data\"] = string(userdata)\n\t\tinstance[\"key_name\"] = keys.KeyName\n\t\tinstance[\"security_groups\"] = []string{group.Id}\n\n\t\t\/\/ user has provided a custom subnet id, if this is the case, fetch the\n\t\t\/\/ securitygroup from it.\n\t\tif instance[\"subnet_id\"] != \"\" {\n\t\t\tsubnetId := instance[\"subnet_id\"]\n\t\t\tvar subnet ec2.Subnet\n\t\t\tfound := false\n\t\t\tfor _, s := range subnets.Subnets {\n\t\t\t\tif s.SubnetId == subnetId {\n\t\t\t\t\tfound = true\n\t\t\t\t\tsubnet = s\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !found {\n\t\t\t\treturn nil, fmt.Errorf(\"no subnet with id '%s' found\", subnetId)\n\t\t\t}\n\n\t\t\tgroup, err := amazonClient.CreateOrGetSecurityGroup(groupName, subnet.VpcId)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tsess.Log.Debug(\"second group = %+v\\n\", group)\n\t\t\tinstance[\"security_groups\"] = []string{group.Id}\n\t\t} else {\n\t\t\tinstance[\"subnet_id\"] = subnetId\n\t\t}\n\n\t\tdata.Resource.Aws_Instance[resourceName] = instance\n\t}\n\n\tout, err := json.MarshalIndent(data, \"\", \" \")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb := &buildData{\n\t\tTemplate: string(out),\n\t\tKiteIds: kiteIds,\n\t\tRegion: region,\n\t}\n\n\treturn b, nil\n}\n\n\/\/ appendVariables appends the given key\/value credentials to the hclFile (terraform) file\nfunc appendVariables(hclFile string, creds *terraformCredentials) (string, error) {\n\n\tfound := false\n\tfor _, cred := range creds.Creds {\n\t\t\/\/ we only support aws for now\n\t\tif cred.Provider != \"aws\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tfound = true\n\t\tfor k, v := range cred.Data {\n\t\t\thclFile += \"\\n\"\n\t\t\tvarTemplate := `\nvariable \"%s\" {\n\tdefault = \"%s\"\n}`\n\t\t\thclFile += fmt.Sprintf(varTemplate, k, v)\n\t\t}\n\t}\n\n\tif !found {\n\t\treturn \"\", fmt.Errorf(\"no creds found for: %v\", creds)\n\t}\n\n\treturn hclFile, nil\n}\n\nfunc varsFromCredentials(creds *terraformCredentials) map[string]string {\n\tvars := make(map[string]string, 0)\n\tfor _, cred := range creds.Creds {\n\t\tfor k, v := range cred.Data {\n\t\t\tvars[k] = v\n\t\t}\n\t}\n\treturn vars\n}\n\nfunc sha1sum(s string) string {\n\treturn fmt.Sprintf(\"%x\", sha1.Sum([]byte(s)))\n}\n\nfunc checkKlients(ctx context.Context, kiteIds map[string]string) error {\n\tsess, ok := session.FromContext(ctx)\n\tif !ok {\n\t\treturn errors.New(\"session context is not passed\")\n\t}\n\n\tvar wg sync.WaitGroup\n\tvar mu sync.Mutex \/\/ protects multierror\n\tvar multiErrors error\n\n\tfor l, k := range kiteIds {\n\t\twg.Add(1)\n\t\tgo func(label, kiteId string) {\n\t\t\tdefer wg.Done()\n\n\t\t\tqueryString := protocol.Kite{ID: kiteId}.String()\n\t\t\tklientRef, err := klient.NewWithTimeout(sess.Kite, queryString, time.Minute*5)\n\t\t\tif err != nil {\n\t\t\t\tmu.Lock()\n\t\t\t\tmultiErrors = multierror.Append(multiErrors,\n\t\t\t\t\tfmt.Errorf(\"Couldn't connect to '%s:%s'\", label, kiteId))\n\t\t\t\tmu.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer klientRef.Close()\n\n\t\t\tif err := klientRef.Ping(); err != nil {\n\t\t\t\tmu.Lock()\n\t\t\t\tmultiErrors = multierror.Append(multiErrors,\n\t\t\t\t\tfmt.Errorf(\"Couldn't send ping to '%s:%s'\", label, kiteId))\n\t\t\t\tmu.Unlock()\n\t\t\t}\n\t\t}(l, k)\n\t}\n\n\twg.Wait()\n\n\treturn multiErrors\n}\n<commit_msg>kloud\/apply: fix not passing subnet_id<commit_after>package kloud\n\nimport (\n\t\"crypto\/sha1\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"koding\/kites\/kloud\/api\/amazon\"\n\t\"koding\/kites\/kloud\/contexthelper\/publickeys\"\n\t\"koding\/kites\/kloud\/contexthelper\/session\"\n\t\"koding\/kites\/kloud\/klient\"\n\t\"koding\/kites\/kloud\/userdata\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/hashicorp\/go-multierror\"\n\t\"github.com\/hashicorp\/hcl\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\"github.com\/koding\/kite\/protocol\"\n\t\"github.com\/mitchellh\/goamz\/aws\"\n\t\"github.com\/mitchellh\/goamz\/ec2\"\n\t\"github.com\/nu7hatch\/gouuid\"\n)\n\ntype TerraformMachine struct {\n\tProvider string `json:\"provider\"`\n\tLabel string `json:\"label\"`\n\tRegion string `json:\"region\"`\n\tQueryString string `json:\"queryString,omitempty\"`\n\tAttributes map[string]string `json:\"attributes\"`\n}\n\ntype Machines struct {\n\tMachines []TerraformMachine `json:\"machines\"`\n}\n\ntype buildData struct {\n\tTemplate string\n\tRegion string\n\tKiteIds map[string]string\n}\n\nfunc (m *Machines) AppendRegion(region string) {\n\tfor i, machine := range m.Machines {\n\t\tmachine.Region = region\n\t\tm.Machines[i] = machine\n\t}\n}\n\nfunc (m *Machines) AppendQueryString(queryStrings map[string]string) {\n\tfor i, machine := range m.Machines {\n\t\tqueryString := queryStrings[machine.Label]\n\t\tmachine.QueryString = protocol.Kite{ID: queryString}.String()\n\t\tm.Machines[i] = machine\n\t}\n}\n\n\/\/ WithLabel returns the machine with the associated label\nfunc (m *Machines) WithLabel(label string) (TerraformMachine, error) {\n\tfor _, machine := range m.Machines {\n\t\tif machine.Label == label {\n\t\t\treturn machine, nil\n\t\t}\n\t}\n\n\treturn TerraformMachine{}, fmt.Errorf(\"couldn't find machine with label '%s\", label)\n}\n\nfunc machinesFromState(state *terraform.State) (*Machines, error) {\n\tif state.Modules == nil {\n\t\treturn nil, errors.New(\"state modules is empty\")\n\t}\n\n\tout := &Machines{\n\t\tMachines: make([]TerraformMachine, 0),\n\t}\n\n\tattrs := make(map[string]string, 0)\n\n\tfor _, m := range state.Modules {\n\t\tfor resource, r := range m.Resources {\n\t\t\tif r.Primary == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tprovider, label, err := parseProviderAndLabel(resource)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tfor key, val := range r.Primary.Attributes {\n\t\t\t\tattrs[key] = val\n\t\t\t}\n\n\t\t\tout.Machines = append(out.Machines, TerraformMachine{\n\t\t\t\tProvider: provider,\n\t\t\t\tLabel: label,\n\t\t\t\tAttributes: attrs,\n\t\t\t})\n\t\t}\n\t}\n\n\treturn out, nil\n}\n\nfunc machinesFromPlan(plan *terraform.Plan) (*Machines, error) {\n\tif plan.Diff == nil {\n\t\treturn nil, errors.New(\"plan diff is empty\")\n\t}\n\n\tif plan.Diff.Modules == nil {\n\t\treturn nil, errors.New(\"plan diff module is empty\")\n\t}\n\n\tout := &Machines{\n\t\tMachines: make([]TerraformMachine, 0),\n\t}\n\n\tattrs := make(map[string]string, 0)\n\n\tfor _, d := range plan.Diff.Modules {\n\t\tif d.Resources == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor providerResource, r := range d.Resources {\n\t\t\tif r.Attributes == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor name, a := range r.Attributes {\n\t\t\t\tattrs[name] = a.New\n\t\t\t}\n\n\t\t\tprovider, label, err := parseProviderAndLabel(providerResource)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tout.Machines = append(out.Machines, TerraformMachine{\n\t\t\t\tProvider: provider,\n\t\t\t\tLabel: label,\n\t\t\t\tAttributes: attrs,\n\t\t\t})\n\t\t}\n\t}\n\n\treturn out, nil\n}\n\nfunc parseProviderAndLabel(resource string) (string, string, error) {\n\t\/\/ resource is in the form of \"aws_instance.foo.bar\"\n\tsplitted := strings.Split(resource, \"_\")\n\tif len(splitted) < 2 {\n\t\treturn \"\", \"\", fmt.Errorf(\"provider resource is unknown: %v\", splitted)\n\t}\n\n\t\/\/ splitted[1]: instance.foo.bar\n\tresourceSplitted := strings.SplitN(splitted[1], \".\", 2)\n\n\tprovider := splitted[0] \/\/ aws\n\tlabel := resourceSplitted[1] \/\/ foo.bar\n\n\treturn provider, label, nil\n}\n\nfunc regionFromHCL(hclContent string) (string, error) {\n\tvar data struct {\n\t\tProvider struct {\n\t\t\tAws struct {\n\t\t\t\tRegion string\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := hcl.Decode(&data, hclContent); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif data.Provider.Aws.Region == \"\" {\n\t\treturn \"\", fmt.Errorf(\"HCL content doesn't contain region information: %s\", hclContent)\n\t}\n\n\treturn data.Provider.Aws.Region, nil\n}\n\nfunc injectKodingData(ctx context.Context, hclContent, username string, creds *terraformCredentials) (*buildData, error) {\n\tsess, ok := session.FromContext(ctx)\n\tif !ok {\n\t\treturn nil, errors.New(\"session context is not passed\")\n\t}\n\n\tkeys, ok := publickeys.FromContext(ctx)\n\tif !ok {\n\t\treturn nil, errors.New(\"public keys are not available\")\n\t}\n\n\t\/\/ for now we only support \"aws\", the logic below should be refactored once\n\t\/\/ we support multiple providers\n\tvar accessKey, secretKey string\n\tfor _, c := range creds.Creds {\n\t\tif c.Provider != \"aws\" {\n\t\t\tcontinue\n\t\t}\n\n\t\taccessKey = c.Data[\"access_key\"]\n\t\tsecretKey = c.Data[\"secret_key\"]\n\t}\n\n\tregion, err := regionFromHCL(hclContent)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ inject our own public\/private keys into the machine\n\tamazonClient, err := amazon.New(\n\t\tmap[string]interface{}{\n\t\t\t\"key_pair\": keys.KeyName,\n\t\t\t\"publicKey\": keys.PublicKey,\n\t\t\t\"privateKey\": keys.PrivateKey,\n\t\t},\n\t\tec2.New(\n\t\t\taws.Auth{AccessKey: accessKey, SecretKey: secretKey},\n\t\t\taws.Regions[region],\n\t\t))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"kloud aws client err: %s\", err)\n\t}\n\n\tsubnets, err := amazonClient.ListSubnets()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(subnets.Subnets) == 0 {\n\t\treturn nil, errors.New(\"no subnets are available\")\n\t}\n\n\tvar subnetId string\n\tvar vpcId string\n\tfor _, subnet := range subnets.Subnets {\n\t\tif subnet.AvailableIpAddressCount == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tsubnetId = subnet.SubnetId\n\t\tvpcId = subnet.VpcId\n\t}\n\n\tif subnetId == \"\" {\n\t\treturn nil, errors.New(\"subnetId is empty\")\n\t}\n\n\tvar groupName = \"Koding-Kloud-SG\"\n\tsess.Log.Debug(\"Fetching or creating SG: %s, %s\", groupName, vpcId)\n\tgroup, err := amazonClient.CreateOrGetSecurityGroup(groupName, vpcId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsess.Log.Debug(\"first group = %+v\\n\", group)\n\tsess.Log.Debug(\"vpcId = %+v\\n\", vpcId)\n\tsess.Log.Debug(\"subnetId = %+v\\n\", subnetId)\n\n\t\/\/ this will either create the \"kloud-deployment\" key or it will just\n\t\/\/ return with a nil error (means success)\n\tif _, err = amazonClient.DeployKey(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data struct {\n\t\tResource struct {\n\t\t\tAws_Instance map[string]map[string]interface{} `json:\"aws_instance\"`\n\t\t} `json:\"resource\"`\n\t\tProvider map[string]map[string]interface{} `json:\"provider\"`\n\t\tVariable map[string]map[string]interface{} `json:\"variable\"`\n\t}\n\n\tif err := hcl.Decode(&data, hclContent); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(data.Resource.Aws_Instance) == 0 {\n\t\treturn nil, fmt.Errorf(\"instance is empty: %v\", data.Resource.Aws_Instance)\n\t}\n\n\tkiteIds := make(map[string]string)\n\n\tfor resourceName, instance := range data.Resource.Aws_Instance {\n\t\t\/\/ create a new kite id for every new aws resource\n\t\tkiteUUID, err := uuid.NewV4()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tkiteId := kiteUUID.String()\n\n\t\tuserdata, err := sess.Userdata.Create(&userdata.CloudInitConfig{\n\t\t\tUsername: username,\n\t\t\tGroups: []string{\"sudo\"},\n\t\t\tHostname: username, \/\/ no typo here. hostname = username\n\t\t\tKiteId: kiteId,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tkiteIds[resourceName] = kiteId\n\t\tinstance[\"user_data\"] = string(userdata)\n\t\tinstance[\"key_name\"] = keys.KeyName\n\t\tinstance[\"security_groups\"] = []string{group.Id}\n\n\t\t\/\/ user has provided a custom subnet id, if this is the case, fetch the\n\t\t\/\/ securitygroup from it.\n\t\tif instance[\"subnet_id\"] != nil {\n\t\t\tsubnetId, ok := instance[\"subnet_id\"].(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"subnet Id should be a string, got: %v\", instance[\"subnet_id\"])\n\t\t\t}\n\n\t\t\tvar subnet ec2.Subnet\n\t\t\tfound := false\n\t\t\tfor _, s := range subnets.Subnets {\n\t\t\t\tif s.SubnetId == subnetId {\n\t\t\t\t\tfound = true\n\t\t\t\t\tsubnet = s\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !found {\n\t\t\t\treturn nil, fmt.Errorf(\"no subnet with id '%s' found\", subnetId)\n\t\t\t}\n\n\t\t\tgroup, err := amazonClient.CreateOrGetSecurityGroup(groupName, subnet.VpcId)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tsess.Log.Debug(\"second group = %+v\\n\", group)\n\t\t\tinstance[\"security_groups\"] = []string{group.Id}\n\t\t} else {\n\t\t\tinstance[\"subnet_id\"] = subnetId\n\t\t}\n\n\t\tdata.Resource.Aws_Instance[resourceName] = instance\n\t}\n\n\tout, err := json.MarshalIndent(data, \"\", \" \")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb := &buildData{\n\t\tTemplate: string(out),\n\t\tKiteIds: kiteIds,\n\t\tRegion: region,\n\t}\n\n\treturn b, nil\n}\n\n\/\/ appendVariables appends the given key\/value credentials to the hclFile (terraform) file\nfunc appendVariables(hclFile string, creds *terraformCredentials) (string, error) {\n\n\tfound := false\n\tfor _, cred := range creds.Creds {\n\t\t\/\/ we only support aws for now\n\t\tif cred.Provider != \"aws\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tfound = true\n\t\tfor k, v := range cred.Data {\n\t\t\thclFile += \"\\n\"\n\t\t\tvarTemplate := `\nvariable \"%s\" {\n\tdefault = \"%s\"\n}`\n\t\t\thclFile += fmt.Sprintf(varTemplate, k, v)\n\t\t}\n\t}\n\n\tif !found {\n\t\treturn \"\", fmt.Errorf(\"no creds found for: %v\", creds)\n\t}\n\n\treturn hclFile, nil\n}\n\nfunc varsFromCredentials(creds *terraformCredentials) map[string]string {\n\tvars := make(map[string]string, 0)\n\tfor _, cred := range creds.Creds {\n\t\tfor k, v := range cred.Data {\n\t\t\tvars[k] = v\n\t\t}\n\t}\n\treturn vars\n}\n\nfunc sha1sum(s string) string {\n\treturn fmt.Sprintf(\"%x\", sha1.Sum([]byte(s)))\n}\n\nfunc checkKlients(ctx context.Context, kiteIds map[string]string) error {\n\tsess, ok := session.FromContext(ctx)\n\tif !ok {\n\t\treturn errors.New(\"session context is not passed\")\n\t}\n\n\tvar wg sync.WaitGroup\n\tvar mu sync.Mutex \/\/ protects multierror\n\tvar multiErrors error\n\n\tfor l, k := range kiteIds {\n\t\twg.Add(1)\n\t\tgo func(label, kiteId string) {\n\t\t\tdefer wg.Done()\n\n\t\t\tqueryString := protocol.Kite{ID: kiteId}.String()\n\t\t\tklientRef, err := klient.NewWithTimeout(sess.Kite, queryString, time.Minute*5)\n\t\t\tif err != nil {\n\t\t\t\tmu.Lock()\n\t\t\t\tmultiErrors = multierror.Append(multiErrors,\n\t\t\t\t\tfmt.Errorf(\"Couldn't connect to '%s:%s'\", label, kiteId))\n\t\t\t\tmu.Unlock()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer klientRef.Close()\n\n\t\t\tif err := klientRef.Ping(); err != nil {\n\t\t\t\tmu.Lock()\n\t\t\t\tmultiErrors = multierror.Append(multiErrors,\n\t\t\t\t\tfmt.Errorf(\"Couldn't send ping to '%s:%s'\", label, kiteId))\n\t\t\t\tmu.Unlock()\n\t\t\t}\n\t\t}(l, k)\n\t}\n\n\twg.Wait()\n\n\treturn multiErrors\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ Copyright (C) 2013 The Docker Cloud authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/proppy\/docker-cloud\/dockercloud\"\n)\n\n\/\/ Try to connect to a tunnel to the docker dameon if it exists.\n\/\/ url is the URL to test.\n\/\/ returns true, if the connection was successful, false otherwise\ntype Tunnel struct {\n\turl.URL\n}\n\nfunc (t Tunnel) isActive() bool {\n\t_, err := http.Get(t.String())\n\treturn err == nil\n}\n\ntype ProxyServer struct {\n\tcloud dockercloud.Cloud\n}\n\nfunc (server ProxyServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\terr := server.doServe(w, r)\n\tif err != nil {\n\t\tfmt.Printf(\"Error: %s\\n\", err)\n\t\tw.WriteHeader(500)\n\t\tfmt.Fprintf(w, \"{'error': '%s'}\", err)\n\t}\n}\n\nfunc (server ProxyServer) doServe(w http.ResponseWriter, r *http.Request) error {\n\tvar err error\n\tvar ip string\n\tpath := r.URL.Path\n\tquery := r.URL.RawQuery\n\thost := fmt.Sprintf(\"localhost:%d\", *localPort)\n\ttargetUrl := fmt.Sprintf(\"http:\/\/%s%s?%s\", host, path, query)\n\n\tw.Header().Add(\"Content-Type\", \"application\/json\")\n\n\t\/\/ Try to find a VM instance.\n\tip, err = server.cloud.GetPublicIPAddress(*instanceName, *zone)\n\tinstanceRunning := len(ip) > 0\n\t\/\/ err is 404 if the instance doesn't exist, so we only error out when\n\t\/\/ instanceRunning is true.\n\tif err != nil && instanceRunning {\n\t\treturn err\n\t}\n\n\t\/\/ If there's no VM instance, and the request is 'ps' just return []\n\tif r.Method == \"GET\" && strings.HasSuffix(path, \"\/containers\/json\") && !instanceRunning {\n\t\tw.WriteHeader(200)\n\t\tfmt.Fprintf(w, \"[]\")\n\t\treturn nil\n\t}\n\n\t\/\/ Otherwise create a new VM.\n\tif !instanceRunning {\n\t\tip, err = server.cloud.CreateInstance(*instanceName, *zone)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Test for the SSH tunnel, create if it doesn't exist.\n\ttunnelUrl, err := url.Parse(\"http:\/\/\" + host + \"\/v1.6\/containers\/json\")\n\tif err != nil {\n\t\treturn err\n\t}\n\ttunnel := Tunnel{*tunnelUrl}\n\n\tif !tunnel.isActive() {\n\t\tfmt.Printf(\"Creating tunnel\")\n\t\t_, err = server.cloud.OpenSecureTunnel(*instanceName, *zone, *tunnelPort, *dockerPort)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = proxyRequest(targetUrl, r, w)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif strings.HasSuffix(path, \"\/stop\") {\n\t\tserver.maybeDelete(host, *instanceName, *zone)\n\t}\n\treturn nil\n}\n\nfunc proxyRequest(url string, r *http.Request, w http.ResponseWriter) error {\n\tvar res *http.Response\n\tvar err error\n\n\t\/\/ Proxy the request.\n\tif r.Method == \"GET\" {\n\t\tres, err = http.Get(url)\n\t}\n\tif r.Method == \"POST\" {\n\t\tres, err = http.Post(url, \"application\/json\", r.Body)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tw.WriteHeader(res.StatusCode)\n\tdefer res.Body.Close()\n\t\/\/ TODO(bburns) : Intercept 'ps' here and substitute in the ip address.\n\t_, err = io.Copy(w, res.Body)\n\treturn err\n}\n\n\/\/ TODO(bburns) : clone this from docker somehow?\ntype ContainerPort struct {\n\tPrivatePort float64\n\tPublicPort float64\n\tType string\n}\n\ntype ContainerStatus struct {\n\tId string\n\tImage string\n\tCommand string\n\tCreated float64\n\tStatus string\n\tPorts []ContainerPort\n\tSizeRW float64\n\tSizeRootFs float64\n}\n\nfunc (server ProxyServer) maybeDelete(host string, instanceName string, zone string) error {\n\tres, err := http.Get(fmt.Sprintf(\"http:\/\/%s\/v1.6\/containers\/json\", host))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(string(body))\n\tvar containers []ContainerStatus\n\terr = json.Unmarshal(body, &containers)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(containers) == 0 {\n\t\terr = server.cloud.DeleteInstance(instanceName, zone)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nvar (\n\tclientId = flag.String(\"id\", \"676599397109-0te3n95co16j9mkinnq6vdhphp4nnd06.apps.googleusercontent.com\", \"Client id\")\n\tclientSecret = flag.String(\"secret\", \"JnMnI5z9iH7YItv_jy_TZ1Hg\", \"Client Secret\")\n\tscope = flag.String(\"scope\", \"https:\/\/www.googleapis.com\/auth\/userinfo.profile https:\/\/www.googleapis.com\/auth\/compute https:\/\/www.googleapis.com\/auth\/devstorage.read_write\", \"OAuth Scope\")\n\tcode = flag.String(\"code\", \"\", \"Authorization code\")\n\tprojectId = flag.String(\"project\", \"\", \"Google Cloud Project Name\")\n\tproxyPort = flag.Int(\"port\", 8080, \"The local port to run on.\")\n\tdockerPort = flag.Int(\"dockerport\", 8000, \"The remote port to run docker on\")\n\ttunnelPort = flag.Int(\"tunnelport\", 8001, \"The local port open the tunnel to docker\")\n\tinstanceName = flag.String(\"instancename\", \"docker-instance\", \"The name of the instance\")\n\tzone = flag.String(\"zone\", \"us-central1-a\", \"The zone to run in\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tserver := ProxyServer{\n\t\tcloud: dockercloud.NewCloudGce(*clientId, *clientSecret, *scope, *code, *projectId),\n\t}\n\thttp.Handle(\"\/\", server)\n\taddr := fmt.Sprintf(\":%d\", *proxyPort)\n\tlog.Print(\"listening on \", addr)\n\tlog.Fatal(http.ListenAndServe(addr, nil))\n}\n<commit_msg>docker-cloud: fix port<commit_after>\/\/\n\/\/ Copyright (C) 2013 The Docker Cloud authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/proppy\/docker-cloud\/dockercloud\"\n)\n\n\/\/ Try to connect to a tunnel to the docker dameon if it exists.\n\/\/ url is the URL to test.\n\/\/ returns true, if the connection was successful, false otherwise\ntype Tunnel struct {\n\turl.URL\n}\n\nfunc (t Tunnel) isActive() bool {\n\t_, err := http.Get(t.String())\n\treturn err == nil\n}\n\ntype ProxyServer struct {\n\tcloud dockercloud.Cloud\n}\n\nfunc (server ProxyServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\terr := server.doServe(w, r)\n\tif err != nil {\n\t\tfmt.Printf(\"Error: %s\\n\", err)\n\t\tw.WriteHeader(500)\n\t\tfmt.Fprintf(w, \"{'error': '%s'}\", err)\n\t}\n}\n\nfunc (server ProxyServer) doServe(w http.ResponseWriter, r *http.Request) error {\n\tvar err error\n\tvar ip string\n\tpath := r.URL.Path\n\tquery := r.URL.RawQuery\n\thost := fmt.Sprintf(\"localhost:%d\", *proxyPort)\n\ttargetUrl := fmt.Sprintf(\"http:\/\/%s%s?%s\", host, path, query)\n\n\tw.Header().Add(\"Content-Type\", \"application\/json\")\n\n\t\/\/ Try to find a VM instance.\n\tip, err = server.cloud.GetPublicIPAddress(*instanceName, *zone)\n\tinstanceRunning := len(ip) > 0\n\t\/\/ err is 404 if the instance doesn't exist, so we only error out when\n\t\/\/ instanceRunning is true.\n\tif err != nil && instanceRunning {\n\t\treturn err\n\t}\n\n\t\/\/ If there's no VM instance, and the request is 'ps' just return []\n\tif r.Method == \"GET\" && strings.HasSuffix(path, \"\/containers\/json\") && !instanceRunning {\n\t\tw.WriteHeader(200)\n\t\tfmt.Fprintf(w, \"[]\")\n\t\treturn nil\n\t}\n\n\t\/\/ Otherwise create a new VM.\n\tif !instanceRunning {\n\t\tip, err = server.cloud.CreateInstance(*instanceName, *zone)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Test for the SSH tunnel, create if it doesn't exist.\n\ttunnelUrl, err := url.Parse(\"http:\/\/\" + host + \"\/v1.6\/containers\/json\")\n\tif err != nil {\n\t\treturn err\n\t}\n\ttunnel := Tunnel{*tunnelUrl}\n\n\tif !tunnel.isActive() {\n\t\tfmt.Printf(\"Creating tunnel\")\n\t\t_, err = server.cloud.OpenSecureTunnel(*instanceName, *zone, *tunnelPort, *dockerPort)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = proxyRequest(targetUrl, r, w)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif strings.HasSuffix(path, \"\/stop\") {\n\t\tserver.maybeDelete(host, *instanceName, *zone)\n\t}\n\treturn nil\n}\n\nfunc proxyRequest(url string, r *http.Request, w http.ResponseWriter) error {\n\tvar res *http.Response\n\tvar err error\n\n\t\/\/ Proxy the request.\n\tif r.Method == \"GET\" {\n\t\tres, err = http.Get(url)\n\t}\n\tif r.Method == \"POST\" {\n\t\tres, err = http.Post(url, \"application\/json\", r.Body)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tw.WriteHeader(res.StatusCode)\n\tdefer res.Body.Close()\n\t\/\/ TODO(bburns) : Intercept 'ps' here and substitute in the ip address.\n\t_, err = io.Copy(w, res.Body)\n\treturn err\n}\n\n\/\/ TODO(bburns) : clone this from docker somehow?\ntype ContainerPort struct {\n\tPrivatePort float64\n\tPublicPort float64\n\tType string\n}\n\ntype ContainerStatus struct {\n\tId string\n\tImage string\n\tCommand string\n\tCreated float64\n\tStatus string\n\tPorts []ContainerPort\n\tSizeRW float64\n\tSizeRootFs float64\n}\n\nfunc (server ProxyServer) maybeDelete(host string, instanceName string, zone string) error {\n\tres, err := http.Get(fmt.Sprintf(\"http:\/\/%s\/v1.6\/containers\/json\", host))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer res.Body.Close()\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(string(body))\n\tvar containers []ContainerStatus\n\terr = json.Unmarshal(body, &containers)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(containers) == 0 {\n\t\terr = server.cloud.DeleteInstance(instanceName, zone)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nvar (\n\tclientId = flag.String(\"id\", \"676599397109-0te3n95co16j9mkinnq6vdhphp4nnd06.apps.googleusercontent.com\", \"Client id\")\n\tclientSecret = flag.String(\"secret\", \"JnMnI5z9iH7YItv_jy_TZ1Hg\", \"Client Secret\")\n\tscope = flag.String(\"scope\", \"https:\/\/www.googleapis.com\/auth\/userinfo.profile https:\/\/www.googleapis.com\/auth\/compute https:\/\/www.googleapis.com\/auth\/devstorage.read_write\", \"OAuth Scope\")\n\tcode = flag.String(\"code\", \"\", \"Authorization code\")\n\tprojectId = flag.String(\"project\", \"\", \"Google Cloud Project Name\")\n\tproxyPort = flag.Int(\"port\", 8080, \"The local port to run on.\")\n\tdockerPort = flag.Int(\"dockerport\", 8000, \"The remote port to run docker on\")\n\ttunnelPort = flag.Int(\"tunnelport\", 8001, \"The local port open the tunnel to docker\")\n\tinstanceName = flag.String(\"instancename\", \"docker-instance\", \"The name of the instance\")\n\tzone = flag.String(\"zone\", \"us-central1-a\", \"The zone to run in\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tserver := ProxyServer{\n\t\tcloud: dockercloud.NewCloudGce(*clientId, *clientSecret, *scope, *code, *projectId),\n\t}\n\thttp.Handle(\"\/\", server)\n\taddr := fmt.Sprintf(\":%d\", *proxyPort)\n\tlog.Print(\"listening on \", addr)\n\tlog.Fatal(http.ListenAndServe(addr, nil))\n}\n<|endoftext|>"} {"text":"<commit_before>package build\n\nimport (\n\t\"os\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestInfo(t *testing.T) {\n\tbuild := &Build{\n\t\tDoc: \"Test documentation\",\n\t\tDefault: []string{\"default\"},\n\t\tRepository: \"repository\",\n\t\tSingleton: \"12345\",\n\t\tExtends: []string{\"foo\", \"bar\"},\n\t\tConfig: []string{\"foo\", \"bar\"},\n\t\t\/\/Scripts: []string{\"foo\", \"bar\"},\n\t\tTargets: map[string]*Target{\n\t\t\t\"test1\": {\n\t\t\t\tDoc: \"Test 1 doc\",\n\t\t\t\tDepends: []string{\"foo\", \"bar\"},\n\t\t\t},\n\t\t\t\"test2\": {\n\t\t\t\tDoc: \"Test 2 doc\",\n\t\t\t},\n\t\t},\n\t\tEnvironment: map[string]string{\n\t\t\t\"FOO\": \"SPAM\",\n\t\t\t\"BAR\": \"EGGS\",\n\t\t},\n\t\tProperties: map[string]interface{}{\n\t\t\t\"foo\": \"spam\",\n\t\t\t\"bar\": \"eggs\",\n\t\t},\n\t}\n\tcontext := NewContext(build)\n\terr := context.Init()\n\tif err != nil {\n\t\tt.Errorf(\"Failure: %v\", err)\n\t}\n\t\/\/build.Properties = build.GetProperties()\n\texpected := `doc: Test documentation\ndefault: [default]\nrepository: repository\nsingleton: 12345\nextends:\n- foo\n- bar\nconfiguration:\n- foo\n- bar\n\nenvironment:\n BAR: \"EGGS\"\n FOO: \"SPAM\"\n\nproperties:\n bar: \"eggs\"\n foo: \"spam\"\n\ntargets:\n test1: Test 1 doc [foo, bar]\n test2: Test 2 doc`\n\tinfo, err := build.Info(context)\n\tif err != nil {\n\t\tt.Errorf(\"Failure: %v\", err)\n\t}\n\tif info != expected {\n\t\tt.Errorf(\"Bad build info: %s\", info)\n\t}\n}\n\nfunc TestInfoDoc(t *testing.T) {\n\tbuild := Build{\n\t\tDoc: \"Test documentation\",\n\t}\n\tif build.infoDoc() != \"doc: Test documentation\\n\" {\n\t\tt.Errorf(\"Bad build doc: %s\", build.infoDoc())\n\t}\n}\n\nfunc TestInfoDefault(t *testing.T) {\n\tbuild := Build{\n\t\tDefault: []string{\"default\"},\n\t}\n\tif build.infoDefault() != \"default: [default]\\n\" {\n\t\tt.Errorf(\"Bad build default: %s\", build.infoDefault())\n\t}\n}\n\nfunc TestInfoRepository(t *testing.T) {\n\tbuild := Build{\n\t\tRepository: \"repository\",\n\t}\n\tif build.infoRepository() != \"repository: repository\\n\" {\n\t\tt.Errorf(\"Bad build repository: %s\", build.infoRepository())\n\t}\n}\n\nfunc TestInfoSingleton(t *testing.T) {\n\tbuild := &Build{\n\t\tSingleton: \"12345\",\n\t}\n\tcontext := NewContext(build)\n\tif build.infoSingleton(context) != \"singleton: 12345\\n\" {\n\t\tt.Errorf(\"Bad build singleton: %s\", build.infoSingleton(context))\n\t}\n}\n\nfunc TestInfoExtends(t *testing.T) {\n\tbuild := &Build{\n\t\tExtends: []string{\"foo\", \"bar\"},\n\t}\n\tif build.infoExtends() != \"extends:\\n- foo\\n- bar\\n\" {\n\t\tt.Errorf(\"Bad build extends: %s\", build.infoExtends())\n\t}\n}\n\nfunc TestInfoConfiguration(t *testing.T) {\n\tbuild := &Build{\n\t\tConfig: []string{\"foo\", \"bar\"},\n\t}\n\tif build.infoConfiguration() != \"configuration:\\n- foo\\n- bar\\n\" {\n\t\tt.Errorf(\"Bad build config: %s\", build.infoConfiguration())\n\t}\n}\n\nfunc TestInfoContext(t *testing.T) {\n\tbuild := &Build{\n\t\tScripts: []string{\"foo\", \"bar\"},\n\t}\n\tif build.infoContext() != \"context:\\n- foo\\n- bar\\n\" {\n\t\tt.Errorf(\"Bad build context: %s\", build.infoContext())\n\t}\n}\n\nfunc TestInfoProperties(t *testing.T) {\n\tbuild := &Build{\n\t\tProperties: map[string]interface{}{\n\t\t\t\"foo\": \"spam\",\n\t\t\t\"bar\": \"eggs\",\n\t\t},\n\t}\n\tcontext := NewContext(build)\n\tcontext.Init()\n\tproperties, err := build.infoProperties(context)\n\tif err != nil {\n\t\tt.Errorf(\"getting properties: %v\", err)\n\t}\n\texpected := `properties:\n bar: \"eggs\"\n foo: \"spam\"\n`\n\tif properties != expected {\n\t\tt.Errorf(\"Bad properties info: %s\", properties)\n\t}\n}\n\nfunc TestInfoEnvironment(t *testing.T) {\n\tbuild := &Build{\n\t\tEnvironment: map[string]string{\n\t\t\t\"FOO\": \"SPAM\",\n\t\t\t\"BAR\": \"EGGS\",\n\t\t},\n\t}\n\tenvironment := build.infoEnvironment()\n\texpected := `environment:\n BAR: \"EGGS\"\n FOO: \"SPAM\"\n`\n\tif environment != expected {\n\t\tt.Errorf(\"Bad properties info: %s\", environment)\n\t}\n}\n\nfunc TestInfoTargets(t *testing.T) {\n\tbuild := &Build{\n\t\tTargets: map[string]*Target{\n\t\t\t\"test1\": {\n\t\t\t\tDoc: \"Test 1 doc\",\n\t\t\t\tDepends: []string{\"foo\", \"bar\"},\n\t\t\t},\n\t\t\t\"test2\": {\n\t\t\t\tDoc: \"Test 2 doc\",\n\t\t\t},\n\t\t},\n\t}\n\texpected := `targets:\n test1: Test 1 doc [foo, bar]\n test2: Test 2 doc\n`\n\tif build.infoTargets() != expected {\n\t\tt.Errorf(\"Bad targets info: '%s'\", build.infoTargets())\n\t}\n}\n\nfunc TestInfoTasks(t *testing.T) {\n\tTaskMap = make(map[string]TaskDesc)\n\ttype testArgs struct {\n\t\tTest string\n\t}\n\tAddTask(TaskDesc{\n\t\tName: \"task\",\n\t\tFunc: testFunc,\n\t\tArgs: reflect.TypeOf(testArgs{}),\n\t\tHelp: `Task documentation.`,\n\t})\n\ttasks := InfoTasks()\n\tif tasks != \"task\" {\n\t\tt.Errorf(\"Bad tasks: %s\", tasks)\n\t}\n}\n\nfunc TestInfoTask(t *testing.T) {\n\tTaskMap = make(map[string]TaskDesc)\n\ttype testArgs struct {\n\t\tTest string\n\t}\n\tAddTask(TaskDesc{\n\t\tName: \"task\",\n\t\tFunc: testFunc,\n\t\tArgs: reflect.TypeOf(testArgs{}),\n\t\tHelp: `Task documentation.`,\n\t})\n\ttask := InfoTask(\"task\")\n\tif task != \"Task documentation.\" {\n\t\tt.Errorf(\"Bad task: %s\", task)\n\t}\n}\n\nfunc TestInfoBuiltins(t *testing.T) {\n\tBuiltinMap = make(map[string]BuiltinDesc)\n\tAddBuiltin(BuiltinDesc{\n\t\tName: \"test\",\n\t\tFunc: TestInfoBuiltins,\n\t\tHelp: `Test documentation.`,\n\t})\n\tbuiltins := InfoBuiltins()\n\tif builtins != \"test\" {\n\t\tt.Errorf(\"Bad builtins: %s\", builtins)\n\t}\n}\n\nfunc TestInfoBuiltin(t *testing.T) {\n\tBuiltinMap = make(map[string]BuiltinDesc)\n\tAddBuiltin(BuiltinDesc{\n\t\tName: \"test\",\n\t\tFunc: TestInfoBuiltins,\n\t\tHelp: `Test documentation.`,\n\t})\n\tinfo := InfoBuiltin(\"test\")\n\tif info != \"Test documentation.\" {\n\t\tt.Errorf(\"Bad builtin info: %s\", info)\n\t}\n}\n\nfunc TestInfoThemes(t *testing.T) {\n\tthemes := InfoThemes()\n\tif themes != \"bee blue bold cyan fire green magenta marine nature red reverse rgb yellow\" {\n\t\tt.Errorf(\"Bad themes\")\n\t}\n}\n\nfunc TestInfoTemplates(t *testing.T) {\n\trepo := \"\/tmp\/neon\"\n\tWriteFile(repo+\"\/foo\/bar\", \"template1.tpl\", \"\")\n\tWriteFile(repo+\"\/foo\/bar\", \"template2.tpl\", \"\")\n\tdefer os.RemoveAll(repo)\n\tparents := InfoTemplates(repo)\n\tif parents != \"foo\/bar\/template1.tpl\\nfoo\/bar\/template2.tpl\" {\n\t\tt.Errorf(\"Bad templates info: %s\", parents)\n\t}\n}\n\nfunc TestInfoParents(t *testing.T) {\n\trepo := \"\/tmp\/neon\"\n\tWriteFile(repo+\"\/foo\/bar\", \"parent1.yml\", \"\")\n\tWriteFile(repo+\"\/foo\/bar\", \"parent2.yml\", \"\")\n\tdefer os.RemoveAll(repo)\n\tparents := InfoParents(repo)\n\tif parents != \"foo\/bar\/parent1.yml\\nfoo\/bar\/parent2.yml\" {\n\t\tt.Errorf(\"Bad parents info: %s\", parents)\n\t}\n}\n\nfunc testFunc(context *Context, args interface{}) error {\n\tcontext.SetProperty(\"test\", \"This is a test\")\n\treturn nil\n}\n\nfunc TestInfoReference(t *testing.T) {\n\tBuiltinMap = make(map[string]BuiltinDesc)\n\tAddBuiltin(BuiltinDesc{\n\t\tName: \"builtin\",\n\t\tFunc: TestInfoReference,\n\t\tHelp: `Builtin documentation.`,\n\t})\n\ttype testArgs struct {\n\t\tTest string\n\t}\n\tTaskMap = make(map[string]TaskDesc)\n\tAddTask(TaskDesc{\n\t\tName: \"task\",\n\t\tFunc: testFunc,\n\t\tArgs: reflect.TypeOf(testArgs{}),\n\t\tHelp: `Task documentation.`,\n\t})\n\tactual := InfoReference()\n\texpected := `# Tasks Reference\n\n## task\n\nTask documentation.\n\n# Builtins Reference\n\n## builtin\n\nBuiltin documentation.`\n\tif actual != expected {\n\t\tt.Errorf(\"Bad reference: %s\", actual)\n\t}\n}\n<commit_msg>Fixed unit test<commit_after>package build\n\nimport (\n\t\"os\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestInfo(t *testing.T) {\n\tbuild := &Build{\n\t\tFile: \"build.yml\",\n\t\tDir: \"\/home\/casa\/doc\/neon\",\n\t\tDoc: \"Test documentation\",\n\t\tDefault: []string{\"default\"},\n\t\tRepository: \"repository\",\n\t\tSingleton: \"12345\",\n\t\tExtends: []string{\"foo\", \"bar\"},\n\t\tConfig: []string{\"foo\", \"bar\"},\n\t\t\/\/Scripts: []string{\"foo\", \"bar\"},\n\t\tTargets: map[string]*Target{\n\t\t\t\"test1\": {\n\t\t\t\tDoc: \"Test 1 doc\",\n\t\t\t\tDepends: []string{\"foo\", \"bar\"},\n\t\t\t},\n\t\t\t\"test2\": {\n\t\t\t\tDoc: \"Test 2 doc\",\n\t\t\t},\n\t\t},\n\t\tEnvironment: map[string]string{\n\t\t\t\"FOO\": \"SPAM\",\n\t\t\t\"BAR\": \"EGGS\",\n\t\t},\n\t\tProperties: map[string]interface{}{\n\t\t\t\"foo\": \"spam\",\n\t\t\t\"bar\": \"eggs\",\n\t\t},\n\t}\n\tcontext := NewContext(build)\n\terr := context.Init()\n\tif err != nil {\n\t\tt.Errorf(\"Failure: %v\", err)\n\t}\n\t\/\/build.Properties = build.GetProperties()\n\texpected := `build: \/home\/casa\/doc\/neon\/build.yml\ndoc: Test documentation\ndefault: [default]\nrepository: repository\nsingleton: 12345\nextends:\n- foo\n- bar\nconfiguration:\n- foo\n- bar\n\nenvironment:\n BAR: \"EGGS\"\n FOO: \"SPAM\"\n\nproperties:\n bar: \"eggs\"\n foo: \"spam\"\n\ntargets:\n test1: Test 1 doc [foo, bar]\n test2: Test 2 doc`\n\tinfo, err := build.Info(context)\n\tif err != nil {\n\t\tt.Errorf(\"Failure: %v\", err)\n\t}\n\tif info != expected {\n\t\tt.Errorf(\"Bad build info: %s\", info)\n\t}\n}\n\nfunc TestInfoDoc(t *testing.T) {\n\tbuild := Build{\n\t\tDoc: \"Test documentation\",\n\t}\n\tif build.infoDoc() != \"doc: Test documentation\\n\" {\n\t\tt.Errorf(\"Bad build doc: %s\", build.infoDoc())\n\t}\n}\n\nfunc TestInfoDefault(t *testing.T) {\n\tbuild := Build{\n\t\tDefault: []string{\"default\"},\n\t}\n\tif build.infoDefault() != \"default: [default]\\n\" {\n\t\tt.Errorf(\"Bad build default: %s\", build.infoDefault())\n\t}\n}\n\nfunc TestInfoRepository(t *testing.T) {\n\tbuild := Build{\n\t\tRepository: \"repository\",\n\t}\n\tif build.infoRepository() != \"repository: repository\\n\" {\n\t\tt.Errorf(\"Bad build repository: %s\", build.infoRepository())\n\t}\n}\n\nfunc TestInfoSingleton(t *testing.T) {\n\tbuild := &Build{\n\t\tSingleton: \"12345\",\n\t}\n\tcontext := NewContext(build)\n\tif build.infoSingleton(context) != \"singleton: 12345\\n\" {\n\t\tt.Errorf(\"Bad build singleton: %s\", build.infoSingleton(context))\n\t}\n}\n\nfunc TestInfoExtends(t *testing.T) {\n\tbuild := &Build{\n\t\tExtends: []string{\"foo\", \"bar\"},\n\t}\n\tif build.infoExtends() != \"extends:\\n- foo\\n- bar\\n\" {\n\t\tt.Errorf(\"Bad build extends: %s\", build.infoExtends())\n\t}\n}\n\nfunc TestInfoConfiguration(t *testing.T) {\n\tbuild := &Build{\n\t\tConfig: []string{\"foo\", \"bar\"},\n\t}\n\tif build.infoConfiguration() != \"configuration:\\n- foo\\n- bar\\n\" {\n\t\tt.Errorf(\"Bad build config: %s\", build.infoConfiguration())\n\t}\n}\n\nfunc TestInfoContext(t *testing.T) {\n\tbuild := &Build{\n\t\tScripts: []string{\"foo\", \"bar\"},\n\t}\n\tif build.infoContext() != \"context:\\n- foo\\n- bar\\n\" {\n\t\tt.Errorf(\"Bad build context: %s\", build.infoContext())\n\t}\n}\n\nfunc TestInfoProperties(t *testing.T) {\n\tbuild := &Build{\n\t\tProperties: map[string]interface{}{\n\t\t\t\"foo\": \"spam\",\n\t\t\t\"bar\": \"eggs\",\n\t\t},\n\t}\n\tcontext := NewContext(build)\n\tcontext.Init()\n\tproperties, err := build.infoProperties(context)\n\tif err != nil {\n\t\tt.Errorf(\"getting properties: %v\", err)\n\t}\n\texpected := `properties:\n bar: \"eggs\"\n foo: \"spam\"\n`\n\tif properties != expected {\n\t\tt.Errorf(\"Bad properties info: %s\", properties)\n\t}\n}\n\nfunc TestInfoEnvironment(t *testing.T) {\n\tbuild := &Build{\n\t\tEnvironment: map[string]string{\n\t\t\t\"FOO\": \"SPAM\",\n\t\t\t\"BAR\": \"EGGS\",\n\t\t},\n\t}\n\tenvironment := build.infoEnvironment()\n\texpected := `environment:\n BAR: \"EGGS\"\n FOO: \"SPAM\"\n`\n\tif environment != expected {\n\t\tt.Errorf(\"Bad properties info: %s\", environment)\n\t}\n}\n\nfunc TestInfoTargets(t *testing.T) {\n\tbuild := &Build{\n\t\tTargets: map[string]*Target{\n\t\t\t\"test1\": {\n\t\t\t\tDoc: \"Test 1 doc\",\n\t\t\t\tDepends: []string{\"foo\", \"bar\"},\n\t\t\t},\n\t\t\t\"test2\": {\n\t\t\t\tDoc: \"Test 2 doc\",\n\t\t\t},\n\t\t},\n\t}\n\texpected := `targets:\n test1: Test 1 doc [foo, bar]\n test2: Test 2 doc\n`\n\tif build.infoTargets() != expected {\n\t\tt.Errorf(\"Bad targets info: '%s'\", build.infoTargets())\n\t}\n}\n\nfunc TestInfoTasks(t *testing.T) {\n\tTaskMap = make(map[string]TaskDesc)\n\ttype testArgs struct {\n\t\tTest string\n\t}\n\tAddTask(TaskDesc{\n\t\tName: \"task\",\n\t\tFunc: testFunc,\n\t\tArgs: reflect.TypeOf(testArgs{}),\n\t\tHelp: `Task documentation.`,\n\t})\n\ttasks := InfoTasks()\n\tif tasks != \"task\" {\n\t\tt.Errorf(\"Bad tasks: %s\", tasks)\n\t}\n}\n\nfunc TestInfoTask(t *testing.T) {\n\tTaskMap = make(map[string]TaskDesc)\n\ttype testArgs struct {\n\t\tTest string\n\t}\n\tAddTask(TaskDesc{\n\t\tName: \"task\",\n\t\tFunc: testFunc,\n\t\tArgs: reflect.TypeOf(testArgs{}),\n\t\tHelp: `Task documentation.`,\n\t})\n\ttask := InfoTask(\"task\")\n\tif task != \"Task documentation.\" {\n\t\tt.Errorf(\"Bad task: %s\", task)\n\t}\n}\n\nfunc TestInfoBuiltins(t *testing.T) {\n\tBuiltinMap = make(map[string]BuiltinDesc)\n\tAddBuiltin(BuiltinDesc{\n\t\tName: \"test\",\n\t\tFunc: TestInfoBuiltins,\n\t\tHelp: `Test documentation.`,\n\t})\n\tbuiltins := InfoBuiltins()\n\tif builtins != \"test\" {\n\t\tt.Errorf(\"Bad builtins: %s\", builtins)\n\t}\n}\n\nfunc TestInfoBuiltin(t *testing.T) {\n\tBuiltinMap = make(map[string]BuiltinDesc)\n\tAddBuiltin(BuiltinDesc{\n\t\tName: \"test\",\n\t\tFunc: TestInfoBuiltins,\n\t\tHelp: `Test documentation.`,\n\t})\n\tinfo := InfoBuiltin(\"test\")\n\tif info != \"Test documentation.\" {\n\t\tt.Errorf(\"Bad builtin info: %s\", info)\n\t}\n}\n\nfunc TestInfoThemes(t *testing.T) {\n\tthemes := InfoThemes()\n\tif themes != \"bee blue bold cyan fire green magenta marine nature red reverse rgb yellow\" {\n\t\tt.Errorf(\"Bad themes\")\n\t}\n}\n\nfunc TestInfoTemplates(t *testing.T) {\n\trepo := \"\/tmp\/neon\"\n\tWriteFile(repo+\"\/foo\/bar\", \"template1.tpl\", \"\")\n\tWriteFile(repo+\"\/foo\/bar\", \"template2.tpl\", \"\")\n\tdefer os.RemoveAll(repo)\n\tparents := InfoTemplates(repo)\n\tif parents != \"foo\/bar\/template1.tpl\\nfoo\/bar\/template2.tpl\" {\n\t\tt.Errorf(\"Bad templates info: %s\", parents)\n\t}\n}\n\nfunc TestInfoParents(t *testing.T) {\n\trepo := \"\/tmp\/neon\"\n\tWriteFile(repo+\"\/foo\/bar\", \"parent1.yml\", \"\")\n\tWriteFile(repo+\"\/foo\/bar\", \"parent2.yml\", \"\")\n\tdefer os.RemoveAll(repo)\n\tparents := InfoParents(repo)\n\tif parents != \"foo\/bar\/parent1.yml\\nfoo\/bar\/parent2.yml\" {\n\t\tt.Errorf(\"Bad parents info: %s\", parents)\n\t}\n}\n\nfunc testFunc(context *Context, args interface{}) error {\n\tcontext.SetProperty(\"test\", \"This is a test\")\n\treturn nil\n}\n\nfunc TestInfoReference(t *testing.T) {\n\tBuiltinMap = make(map[string]BuiltinDesc)\n\tAddBuiltin(BuiltinDesc{\n\t\tName: \"builtin\",\n\t\tFunc: TestInfoReference,\n\t\tHelp: `Builtin documentation.`,\n\t})\n\ttype testArgs struct {\n\t\tTest string\n\t}\n\tTaskMap = make(map[string]TaskDesc)\n\tAddTask(TaskDesc{\n\t\tName: \"task\",\n\t\tFunc: testFunc,\n\t\tArgs: reflect.TypeOf(testArgs{}),\n\t\tHelp: `Task documentation.`,\n\t})\n\tactual := InfoReference()\n\texpected := `# Tasks Reference\n\n## task\n\nTask documentation.\n\n# Builtins Reference\n\n## builtin\n\nBuiltin documentation.`\n\tif actual != expected {\n\t\tt.Errorf(\"Bad reference: %s\", actual)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage opengl\n\n\/\/ Since js.Object (Program) can't be keys of a map, use integers (programID) instead.\n\nvar uniformLocationCache = map[programID]map[string]UniformLocation{}\nvar attribLocationCache = map[programID]map[string]AttribLocation{}\n\ntype UniformLocationGetter interface {\n\tgetUniformLocation(p Program, location string) UniformLocation\n}\n\n\/\/ TODO: Rename these functions not to be confusing\n\nfunc GetUniformLocation(g UniformLocationGetter, p Program, location string) UniformLocation {\n\tid := p.id()\n\tif _, ok := uniformLocationCache[id]; !ok {\n\t\tuniformLocationCache[id] = map[string]UniformLocation{}\n\t}\n\tl, ok := uniformLocationCache[id][location]\n\tif !ok {\n\t\tl = g.getUniformLocation(p, location)\n\t\tuniformLocationCache[id][location] = l\n\t}\n\treturn l\n}\n\ntype AttribLocationGetter interface {\n\tgetAttribLocation(p Program, location string) AttribLocation\n}\n\nfunc GetAttribLocation(g AttribLocationGetter, p Program, location string) AttribLocation {\n\tid := p.id()\n\tif _, ok := attribLocationCache[id]; !ok {\n\t\tattribLocationCache[id] = map[string]AttribLocation{}\n\t}\n\tl, ok := attribLocationCache[id][location]\n\tif !ok {\n\t\tl = g.getAttribLocation(p, location)\n\t\tattribLocationCache[id][location] = l\n\t}\n\treturn l\n}\n<commit_msg>opengl: Unexport some interfaces<commit_after>\/\/ Copyright 2014 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage opengl\n\n\/\/ Since js.Object (Program) can't be keys of a map, use integers (programID) instead.\n\nvar uniformLocationCache = map[programID]map[string]UniformLocation{}\nvar attribLocationCache = map[programID]map[string]AttribLocation{}\n\ntype uniformLocationGetter interface {\n\tgetUniformLocation(p Program, location string) UniformLocation\n}\n\n\/\/ TODO: Rename these functions not to be confusing\n\nfunc GetUniformLocation(g uniformLocationGetter, p Program, location string) UniformLocation {\n\tid := p.id()\n\tif _, ok := uniformLocationCache[id]; !ok {\n\t\tuniformLocationCache[id] = map[string]UniformLocation{}\n\t}\n\tl, ok := uniformLocationCache[id][location]\n\tif !ok {\n\t\tl = g.getUniformLocation(p, location)\n\t\tuniformLocationCache[id][location] = l\n\t}\n\treturn l\n}\n\ntype attribLocationGetter interface {\n\tgetAttribLocation(p Program, location string) AttribLocation\n}\n\nfunc GetAttribLocation(g attribLocationGetter, p Program, location string) AttribLocation {\n\tid := p.id()\n\tif _, ok := attribLocationCache[id]; !ok {\n\t\tattribLocationCache[id] = map[string]AttribLocation{}\n\t}\n\tl, ok := attribLocationCache[id][location]\n\tif !ok {\n\t\tl = g.getAttribLocation(p, location)\n\t\tattribLocationCache[id][location] = l\n\t}\n\treturn l\n}\n<|endoftext|>"} {"text":"<commit_before>package apns\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n)\n\n\/\/ The maximum number of seconds we're willing to wait for a response\n\/\/ from the Apple Push Notification Service.\nconst TimeoutSeconds = 5\n\n\/\/ APNs replies only when there is an error.\n\/\/ Response len is 6 bytes:\n\/\/ byte 0 : Command.\n\/\/ byte 1 : Status. See `ApplePushResponseCodes`\n\/\/ bytes 2-5 : Identifier. The identifier of the push notification that is the cause of the error\n\n\/\/ Response command\n\/\/ Apple response command is `AppleResponseCommand`\ntype PushResponseCommand uint8\n\nconst (\n\tAppleResponseCommand = 8\n\tLocalResponseCommand = 0xDD \/\/ A command used strictly locally by the library\n)\n\n\/\/ ApplePushResponseStatus is the status type (byte 1)\ntype ApplePushResponseStatus uint8\n\n\/\/ Status that APNs can reply (byte 1)\nconst (\n\tNoErrorsStatus = 0\n\tProcessingErrorStatus = 1\n\tMissingDeviceTokenErrorStatus = 2\n\tMissingTopicErrorStatus = 3\n\tMissingPayloadErrorStatus = 4\n\tInvalidTokenSizeErrorStatus = 5\n\tInvalidTopicSizeErrorStatus = 6\n\tInvalidPayloadSizeErrorStatus = 7\n\tInvalidTokenErrorStatus = 8\n\tShutdownErrorStatus = 10\n\tUnknownErrorStatus = 255\n)\n\nconst (\n\tRetryPushNotificationStatus = 1\n\tCanceledPushNotificationStatus = 2\n)\n\n\/\/ This enumerates the response codes that Apple defines\n\/\/ for push notification attempts.\nvar ApplePushResponseDescriptions = map[uint8]string{\n\tNoErrorsStatus: \"NO_ERRORS\",\n\tProcessingErrorStatus: \"PROCESSING_ERROR\",\n\tMissingDeviceTokenErrorStatus: \"MISSING_DEVICE_TOKEN\",\n\tMissingTopicErrorStatus: \"MISSING_TOPIC\",\n\tMissingPayloadErrorStatus: \"MISSING_PAYLOAD\",\n\tInvalidTokenSizeErrorStatus: \"INVALID_TOKEN_SIZE\",\n\tInvalidTopicSizeErrorStatus: \"INVALID_TOPIC_SIZE\",\n\tInvalidPayloadSizeErrorStatus: \"INVALID_PAYLOAD_SIZE\",\n\tInvalidTokenErrorStatus: \"INVALID_TOKEN\",\n\tShutdownErrorStatus: \"SHUTDOWN\",\n\tUnknownErrorStatus: \"UNKNOWN\",\n}\n\n\/\/ PushNotificationResponse details what Apple had to say, if anything.\ntype PushNotificationResponse struct {\n\tIdentifier uint32\n\tSuccess bool\n\tResponseCommand PushResponseCommand\n\tResponseStatus ApplePushResponseStatus\n\tAppleResponse string \/\/ Legacy field\n\tError error \/\/ Legacy field\n}\n\n\/\/ NewPushNotificationResponse creates and returns a new PushNotificationResponse\n\/\/ structure; it defaults to being unsuccessful at first.\nfunc NewPushNotificationResponse(pn *PushNotification) *PushNotificationResponse {\n\treturn &PushNotificationResponse{Identifier: pn.Identifier, Success: false}\n}\n\nfunc (pnr *PushNotificationResponse) FromRawAppleResponse(r []byte) {\n\n\tpnr.AppleResponse = ApplePushResponseDescriptions[r[1]]\n\n\tif r[1] == NoErrorsStatus { \/\/ No error, so timeout\n\t\tpnr.Success = true\n\t\tpnr.Error = nil\n\t} else {\n\t\tpnr.Success = false\n\t\tpnr.Error = errors.New(pnr.AppleResponse)\n\n\t\tpnr.ResponseCommand = PushResponseCommand(r[0])\n\t\tpnr.ResponseStatus = ApplePushResponseStatus(r[1])\n\t\tbinary.Read(bytes.NewBuffer(r[2:]), binary.BigEndian, &(pnr.Identifier))\n\t}\n\n}\n<commit_msg>Fix: wrong push notification response<commit_after>package apns\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"errors\"\n)\n\n\/\/ The maximum number of seconds we're willing to wait for a response\n\/\/ from the Apple Push Notification Service.\nconst TimeoutSeconds = 5\n\n\/\/ APNs replies only when there is an error.\n\/\/ Response len is 6 bytes:\n\/\/ byte 0 : Command.\n\/\/ byte 1 : Status. See `ApplePushResponseCodes`\n\/\/ bytes 2-5 : Identifier. The identifier of the push notification that is the cause of the error\n\n\/\/ Response command\n\/\/ Apple response command is `AppleResponseCommand`\ntype PushResponseCommand uint8\n\nconst (\n\tAppleResponseCommand = 8\n\tLocalResponseCommand = 0xDD \/\/ A command used strictly locally by the library\n)\n\n\/\/ ApplePushResponseStatus is the status type (byte 1)\ntype ApplePushResponseStatus uint8\n\n\/\/ Status that APNs can reply (byte 1)\nconst (\n\tNoErrorsStatus = 0\n\tProcessingErrorStatus = 1\n\tMissingDeviceTokenErrorStatus = 2\n\tMissingTopicErrorStatus = 3\n\tMissingPayloadErrorStatus = 4\n\tInvalidTokenSizeErrorStatus = 5\n\tInvalidTopicSizeErrorStatus = 6\n\tInvalidPayloadSizeErrorStatus = 7\n\tInvalidTokenErrorStatus = 8\n\tShutdownErrorStatus = 10\n\tUnknownErrorStatus = 255\n)\n\nconst (\n\tRetryPushNotificationStatus = 1\n\tCanceledPushNotificationStatus = 2\n)\n\n\/\/ This enumerates the response codes that Apple defines\n\/\/ for push notification attempts.\nvar ApplePushResponseDescriptions = map[uint8]string{\n\tNoErrorsStatus: \"NO_ERRORS\",\n\tProcessingErrorStatus: \"PROCESSING_ERROR\",\n\tMissingDeviceTokenErrorStatus: \"MISSING_DEVICE_TOKEN\",\n\tMissingTopicErrorStatus: \"MISSING_TOPIC\",\n\tMissingPayloadErrorStatus: \"MISSING_PAYLOAD\",\n\tInvalidTokenSizeErrorStatus: \"INVALID_TOKEN_SIZE\",\n\tInvalidTopicSizeErrorStatus: \"INVALID_TOPIC_SIZE\",\n\tInvalidPayloadSizeErrorStatus: \"INVALID_PAYLOAD_SIZE\",\n\tInvalidTokenErrorStatus: \"INVALID_TOKEN\",\n\tShutdownErrorStatus: \"SHUTDOWN\",\n\tUnknownErrorStatus: \"UNKNOWN\",\n}\n\nvar LocalResponseDescriptions = map[uint8]string{\n\tRetryPushNotificationStatus: \"LOCAL_ERROR_RETRY\",\n\tCanceledPushNotificationStatus: \"LOCAL_ERROR_CANCEL\",\n}\n\n\/\/ PushNotificationResponse details what Apple had to say, if anything.\ntype PushNotificationResponse struct {\n\tIdentifier uint32\n\tSuccess bool\n\tResponseCommand PushResponseCommand\n\tResponseStatus ApplePushResponseStatus\n\tAppleResponse string \/\/ Legacy field\n\tError error \/\/ Legacy field\n}\n\n\/\/ NewPushNotificationResponse creates and returns a new PushNotificationResponse\n\/\/ structure; it defaults to being unsuccessful at first.\nfunc NewPushNotificationResponse(pn *PushNotification) *PushNotificationResponse {\n\treturn &PushNotificationResponse{Identifier: pn.Identifier, Success: false}\n}\n\nfunc (pnr *PushNotificationResponse) FromRawAppleResponse(r []byte) {\n\n\tif r[1] == NoErrorsStatus { \/\/ No error, so timeout\n\t\tpnr.AppleResponse = ApplePushResponseDescriptions[NoErrorsStatus]\n\t\tpnr.Success = true\n\t\tpnr.Error = nil\n\t} else {\n\t\tpnr.Success = false\n\t\tpnr.ResponseCommand = PushResponseCommand(r[0])\n\t\tpnr.ResponseStatus = ApplePushResponseStatus(r[1])\n\t\tbinary.Read(bytes.NewBuffer(r[2:]), binary.BigEndian, &(pnr.Identifier))\n\n\t\tif pnr.ResponseCommand == AppleResponseCommand {\n\t\t\tpnr.AppleResponse = ApplePushResponseDescriptions[uint8(pnr.ResponseStatus)]\n\t\t} else {\n\t\t\tpnr.AppleResponse = LocalResponseDescriptions[uint8(pnr.ResponseStatus)]\n\t\t}\n\t\tpnr.Error = errors.New(pnr.AppleResponse)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package frank\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\nfunc TestExtract(t *testing.T) {\n\tvar msgs = make(map[string][]string)\n\tmsgs[\"Ich finde http:\/\/github.com\/lol toll, aber http:\/\/heise.de besser\"] = []string{\"http:\/\/github.com\/lol\", \"http:\/\/heise.de\"}\n\tmsgs[\"dort (http:\/\/deinemudda.de) gibts geile pics\"] = []string{\"http:\/\/deinemudda.de\"}\n\tmsgs[\"http:\/\/heise.de, letztens gefunden.\"] = []string{\"http:\/\/heise.de\"}\n\tmsgs[\"http-rfc ist doof\"] = []string{}\n\tmsgs[\"http:\/\/http:\/\/foo.de, letztens gefunden.\"] = []string{\"http:\/\/http:\/\/foo.de\"}\n\tmsgs[\"http:\/\/http:\/\/foo.de letztens gefunden\"] = []string{\"http:\/\/http:\/\/foo.de\"}\n\tmsgs[\"sECuRE: failed Dein Algo nicht auf https:\/\/maps.google.de\/maps?q=Frankfurt+(Oder)&hl=de ?\"] = []string{\"https:\/\/maps.google.de\/maps?q=Frankfurt+(Oder)&hl=de\"}\n\n\tfor from, to := range msgs {\n\t\tx := fmt.Sprintf(\"%v\", extract(from))\n\t\tto := fmt.Sprintf(\"%v\", to)\n\n\t\tif x != to {\n\t\t\tt.Errorf(\"extract(%v)\\n GOT: %v\\nWANT: %v\", from, x, to)\n\t\t}\n\t}\n}\n\nfunc TestTitleGet(t *testing.T) {\n\tvar samples = make(map[string]string)\n\tsamples[\"https:\/\/twitter.com\/dave_tucker\/status\/400269131255390210\"] = \"Dave Tucker (@dave_tucker): This morning the wife asked “Why is your phone issuing you death threats?”. Me: “Oh it’s just my new alarm clock” \/cc @CARROT_app\"\n\tsamples[\"http:\/\/twitter.com\/dave_tucker\/status\/400269131255390210\"] = \"Dave Tucker (@dave_tucker): This morning the wife asked “Why is your phone issuing you death threats?”. Me: “Oh it’s just my new alarm clock” \/cc @CARROT_app\"\n\tsamples[\"https:\/\/twitter.com\/Perspective_pic\/status\/400356645504831489\/photo\/1\"] = \"Perspective Pictures (@Perspective_pic): Sorry but this without a doubt the greatest thing ever seen on an air duct https:\/\/pbs.twimg.com\/media\/BY5aP2RIQAAWPl1.jpg:large\"\n\tsamples[\"https:\/\/twitter.com\/Perspective_pic\/status\/400356645504831489\"] = \"Perspective Pictures (@Perspective_pic): Sorry but this without a doubt the greatest thing ever seen on an air duct https:\/\/pbs.twimg.com\/media\/BY5aP2RIQAAWPl1.jpg:large\"\n\n\tfor url, title := range samples {\n\t\tx, _, _ := TitleGet(url)\n\t\tif x != title {\n\t\t\tt.Errorf(\"TitleGet(%v)\\n GOT: ||%v||\\nWANT: ||%v||\", url, x, title)\n\t\t}\n\t}\n}\n\nfunc TestClean(t *testing.T) {\n\tif x := clean(\"x‏‎​ x‏\"); x != \"x x\" {\n\t\tt.Errorf(\"clean does not remove all whitespace\/non-printable chars (got: %v)\", x)\n\t}\n\n\tif x := clean(\" trim \"); x != \"trim\" {\n\t\tt.Errorf(\"clean does not trim properly (got: %v)\", x)\n\t}\n}\n<commit_msg>test for previous commit<commit_after>package frank\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\nfunc TestExtract(t *testing.T) {\n\tvar msgs = make(map[string][]string)\n\tmsgs[\"Ich finde http:\/\/github.com\/lol toll, aber http:\/\/heise.de besser\"] = []string{\"http:\/\/github.com\/lol\", \"http:\/\/heise.de\"}\n\tmsgs[\"dort (http:\/\/deinemudda.de) gibts geile pics\"] = []string{\"http:\/\/deinemudda.de\"}\n\tmsgs[\"http:\/\/heise.de, letztens gefunden.\"] = []string{\"http:\/\/heise.de\"}\n\tmsgs[\"http-rfc ist doof\"] = []string{}\n\tmsgs[\"http:\/\/http:\/\/foo.de, letztens gefunden.\"] = []string{\"http:\/\/http:\/\/foo.de\"}\n\tmsgs[\"http:\/\/http:\/\/foo.de letztens gefunden\"] = []string{\"http:\/\/http:\/\/foo.de\"}\n\tmsgs[\"sECuRE: failed Dein Algo nicht auf https:\/\/maps.google.de\/maps?q=Frankfurt+(Oder)&hl=de ?\"] = []string{\"https:\/\/maps.google.de\/maps?q=Frankfurt+(Oder)&hl=de\"}\n\n\tfor from, to := range msgs {\n\t\tx := fmt.Sprintf(\"%v\", extract(from))\n\t\tto := fmt.Sprintf(\"%v\", to)\n\n\t\tif x != to {\n\t\t\tt.Errorf(\"extract(%v)\\n GOT: %v\\nWANT: %v\", from, x, to)\n\t\t}\n\t}\n}\n\nfunc TestTitleGet(t *testing.T) {\n\tvar samples = make(map[string]string)\n\tsamples[\"https:\/\/twitter.com\/dave_tucker\/status\/400269131255390210\"] = \"Dave Tucker (@dave_tucker): This morning the wife asked “Why is your phone issuing you death threats?”. Me: “Oh it’s just my new alarm clock” \/cc @CARROT_app\"\n\tsamples[\"http:\/\/twitter.com\/dave_tucker\/status\/400269131255390210\"] = \"Dave Tucker (@dave_tucker): This morning the wife asked “Why is your phone issuing you death threats?”. Me: “Oh it’s just my new alarm clock” \/cc @CARROT_app\"\n\tsamples[\"https:\/\/twitter.com\/Perspective_pic\/status\/400356645504831489\/photo\/1\"] = \"Perspective Pictures (@Perspective_pic): Sorry but this without a doubt the greatest thing ever seen on an air duct https:\/\/pbs.twimg.com\/media\/BY5aP2RIQAAWPl1.jpg:large\"\n\tsamples[\"https:\/\/twitter.com\/Perspective_pic\/status\/400356645504831489\"] = \"Perspective Pictures (@Perspective_pic): Sorry but this without a doubt the greatest thing ever seen on an air duct https:\/\/pbs.twimg.com\/media\/BY5aP2RIQAAWPl1.jpg:large\"\n\tsamples[\"https:\/\/twitter.com\/quityourjrob\/status\/405438033853313025\/photo\/1\"] = \"Joanna Robinson (@quityourjrob): How to tell if a toy is for boys or girls. https:\/\/pbs.twimg.com\/media\/BaBnvl5CYAAyYzm.jpg:large\"\n\n\tfor url, title := range samples {\n\t\tx, _, _ := TitleGet(url)\n\t\tif x != title {\n\t\t\tt.Errorf(\"TitleGet(%v)\\n GOT: ||%v||\\nWANT: ||%v||\", url, x, title)\n\t\t}\n\t}\n}\n\nfunc TestClean(t *testing.T) {\n\tif x := clean(\"x‏‎​ x‏\"); x != \"x x\" {\n\t\tt.Errorf(\"clean does not remove all whitespace\/non-printable chars (got: %v)\", x)\n\t}\n\n\tif x := clean(\" trim \"); x != \"trim\" {\n\t\tt.Errorf(\"clean does not trim properly (got: %v)\", x)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package mpawskinesisstreams\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/cloudwatch\"\n\tmp \"github.com\/mackerelio\/go-mackerel-plugin-helper\"\n)\n\nconst (\n\tnamespace = \"AWS\/Kinesis\"\n\tmetricsTypeAverage = \"Average\"\n\tmetricsTypeMaximum = \"Maximum\"\n\tmetricsTypeMinimum = \"Minimum\"\n)\n\ntype metrics struct {\n\tCloudWatchName string\n\tMackerelName string\n\tType string\n}\n\n\/\/ KinesisStreamsPlugin mackerel plugin for aws kinesis\ntype KinesisStreamsPlugin struct {\n\tName string\n\tPrefix string\n\n\tAccessKeyID string\n\tSecretAccessKey string\n\tRegion string\n\tCloudWatch *cloudwatch.CloudWatch\n}\n\n\/\/ MetricKeyPrefix interface for PluginWithPrefix\nfunc (p KinesisStreamsPlugin) MetricKeyPrefix() string {\n\tif p.Prefix == \"\" {\n\t\tp.Prefix = \"kinesis-streams\"\n\t}\n\treturn p.Prefix\n}\n\n\/\/ prepare creates CloudWatch instance\nfunc (p *KinesisStreamsPlugin) prepare() error {\n\tsess, err := session.NewSession()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconfig := aws.NewConfig()\n\tif p.AccessKeyID != \"\" && p.SecretAccessKey != \"\" {\n\t\tconfig = config.WithCredentials(credentials.NewStaticCredentials(p.AccessKeyID, p.SecretAccessKey, \"\"))\n\t}\n\tif p.Region != \"\" {\n\t\tconfig = config.WithRegion(p.Region)\n\t}\n\n\tp.CloudWatch = cloudwatch.New(sess, config)\n\n\treturn nil\n}\n\n\/\/ getLastPoint fetches a CloudWatch metric and parse\nfunc (p KinesisStreamsPlugin) getLastPoint(metric metrics) (float64, error) {\n\tnow := time.Now()\n\n\tdimensions := []*cloudwatch.Dimension{\n\t\t{\n\t\t\tName: aws.String(\"StreamName\"),\n\t\t\tValue: aws.String(p.Name),\n\t\t},\n\t}\n\n\tresponse, err := p.CloudWatch.GetMetricStatistics(&cloudwatch.GetMetricStatisticsInput{\n\t\tDimensions: dimensions,\n\t\tStartTime: aws.Time(now.Add(time.Duration(180) * time.Second * -1)), \/\/ 3 min\n\t\tEndTime: aws.Time(now),\n\t\tMetricName: aws.String(metric.CloudWatchName),\n\t\tPeriod: aws.Int64(60),\n\t\tStatistics: []*string{aws.String(metric.Type)},\n\t\tNamespace: aws.String(namespace),\n\t})\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tdatapoints := response.Datapoints\n\tif len(datapoints) == 0 {\n\t\treturn 0, errors.New(\"fetched no datapoints\")\n\t}\n\n\tlatest := new(time.Time)\n\tvar latestVal float64\n\tfor _, dp := range datapoints {\n\t\tif dp.Timestamp.Before(*latest) {\n\t\t\tcontinue\n\t\t}\n\n\t\tlatest = dp.Timestamp\n\t\tswitch metric.Type {\n\t\tcase metricsTypeAverage:\n\t\t\tlatestVal = *dp.Average\n\t\tcase metricsTypeMaximum:\n\t\t\tlatestVal = *dp.Maximum\n\t\tcase metricsTypeMinimum:\n\t\t\tlatestVal = *dp.Minimum\n\t\t}\n\t}\n\n\treturn latestVal, nil\n}\n\n\/\/ FetchMetrics fetch the metrics\nfunc (p KinesisStreamsPlugin) FetchMetrics() (map[string]interface{}, error) {\n\tstat := make(map[string]interface{})\n\n\tfor _, met := range [...]metrics{\n\t\t{CloudWatchName: \"GetRecords.Bytes\", MackerelName: \"GetRecordsBytes\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"GetRecords.IteratorAgeMilliseconds\", MackerelName: \"GetRecordsDelayMaxMilliseconds\", Type: metricsTypeMaximum},\n\t\t{CloudWatchName: \"GetRecords.IteratorAgeMilliseconds\", MackerelName: \"GetRecordsDelayMinMilliseconds\", Type: metricsTypeMinimum},\n\t\t{CloudWatchName: \"GetRecords.Latency\", MackerelName: \"GetRecordsLatency\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"GetRecords.Records\", MackerelName: \"GetRecordsRecords\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"GetRecords.Success\", MackerelName: \"GetRecordsSuccess\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"IncomingBytes\", MackerelName: \"IncomingBytes\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"IncomingRecords\", MackerelName: \"IncomingRecords\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"PutRecord.Bytes\", MackerelName: \"PutRecordBytes\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"PutRecord.Latency\", MackerelName: \"PutRecordLatency\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"PutRecord.Success\", MackerelName: \"PutRecordSuccess\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"PutRecords.Bytes\", MackerelName: \"PutRecordsBytes\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"PutRecords.Latency\", MackerelName: \"PutRecordsLatency\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"PutRecords.Records\", MackerelName: \"PutRecordsRecords\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"PutRecords.Success\", MackerelName: \"PutRecordsSuccess\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"ReadProvidionedThroughputExceeded\", MackerelName: \"ReadThroughputExceeded\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"WriteProvidionedThroughputExceeded\", MackerelName: \"WriteThroughputExceeded\", Type: metricsTypeAverage},\n\t} {\n\t\tv, err := p.getLastPoint(met)\n\t\tif err == nil {\n\t\t\tstat[met.MackerelName] = v\n\t\t} else {\n\t\t\tlog.Printf(\"%s: %s\", met, err)\n\t\t}\n\t}\n\treturn stat, nil\n}\n\n\/\/ GraphDefinition of KinesisStreamsPlugin\nfunc (p KinesisStreamsPlugin) GraphDefinition() map[string]mp.Graphs {\n\tlabelPrefix := strings.Title(p.Prefix)\n\tlabelPrefix = strings.Replace(labelPrefix, \"-\", \" \", -1)\n\n\tvar graphdef = map[string]mp.Graphs{\n\t\t\"bytes\": {\n\t\t\tLabel: (labelPrefix + \" Bytes\"),\n\t\t\tUnit: \"integer\",\n\t\t\tMetrics: []mp.Metrics{\n\t\t\t\t{Name: \"GetRecordsBytes\", Label: \"GetRecords\"},\n\t\t\t\t{Name: \"IncomingBytes\", Label: \"Total Incoming\"},\n\t\t\t\t{Name: \"PutRecordBytes\", Label: \"PutRecord\"},\n\t\t\t\t{Name: \"PutRecordsBytes\", Label: \"PutRecords\"},\n\t\t\t},\n\t\t},\n\t\t\"iteratorage\": {\n\t\t\tLabel: (labelPrefix + \" Read Delay\"),\n\t\t\tUnit: \"integer\",\n\t\t\tMetrics: []mp.Metrics{\n\t\t\t\t{Name: \"GetRecordsDelayMaxMilliseconds\", Label: \"Max\"},\n\t\t\t\t{Name: \"GetRecordsDelayMinMilliseconds\", Label: \"Min\"},\n\t\t\t},\n\t\t},\n\t\t\"latency\": {\n\t\t\tLabel: (labelPrefix + \" Operation Latency\"),\n\t\t\tUnit: \"integer\",\n\t\t\tMetrics: []mp.Metrics{\n\t\t\t\t{Name: \"GetRecordsLatency\", Label: \"GetRecords\"},\n\t\t\t\t{Name: \"PutRecordLatency\", Label: \"PutRecord\"},\n\t\t\t\t{Name: \"PutRecordsLatency\", Label: \"PutRecords\"},\n\t\t\t},\n\t\t},\n\t\t\"records\": {\n\t\t\tLabel: (labelPrefix + \" Records\"),\n\t\t\tUnit: \"integer\",\n\t\t\tMetrics: []mp.Metrics{\n\t\t\t\t{Name: \"GetRecordsRecords\", Label: \"GetRecords\"},\n\t\t\t\t{Name: \"IncomingRecords\", Label: \"Total Incoming\"},\n\t\t\t\t{Name: \"PutRecordsRecords\", Label: \"PutRecords\"},\n\t\t\t},\n\t\t},\n\t\t\"success\": {\n\t\t\tLabel: (labelPrefix + \" Operation Success\"),\n\t\t\tUnit: \"integer\",\n\t\t\tMetrics: []mp.Metrics{\n\t\t\t\t{Name: \"GetRecordsSuccess\", Label: \"GetRecords\"},\n\t\t\t\t{Name: \"PutRecordSuccess\", Label: \"PutRecord\"},\n\t\t\t\t{Name: \"PutRecordsSuccess\", Label: \"PutRecords\"},\n\t\t\t},\n\t\t},\n\t\t\"pending\": {\n\t\t\tLabel: (labelPrefix + \" Pending Operations\"),\n\t\t\tUnit: \"integer\",\n\t\t\tMetrics: []mp.Metrics{\n\t\t\t\t{Name: \"ReadThroughputExceeded\", Label: \"Read\"},\n\t\t\t\t{Name: \"WriteThroughputExceeded\", Label: \"Write\"},\n\t\t\t},\n\t\t},\n\t}\n\treturn graphdef\n}\n\n\/\/ Do the plugin\nfunc Do() {\n\toptAccessKeyID := flag.String(\"access-key-id\", \"\", \"AWS Access Key ID\")\n\toptSecretAccessKey := flag.String(\"secret-access-key\", \"\", \"AWS Secret Access Key\")\n\toptRegion := flag.String(\"region\", \"\", \"AWS Region\")\n\toptIdentifier := flag.String(\"identifier\", \"\", \"Stream Name\")\n\toptTempfile := flag.String(\"tempfile\", \"\", \"Temp file name\")\n\toptPrefix := flag.String(\"metric-key-prefix\", \"kinesis-streams\", \"Metric key prefix\")\n\tflag.Parse()\n\n\tvar plugin KinesisStreamsPlugin\n\n\tplugin.AccessKeyID = *optAccessKeyID\n\tplugin.SecretAccessKey = *optSecretAccessKey\n\tplugin.Region = *optRegion\n\tplugin.Name = *optIdentifier\n\tplugin.Prefix = *optPrefix\n\n\terr := plugin.prepare()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\thelper := mp.NewMackerelPlugin(plugin)\n\thelper.Tempfile = *optTempfile\n\n\thelper.Run()\n}\n<commit_msg>add Average and description<commit_after>package mpawskinesisstreams\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/cloudwatch\"\n\tmp \"github.com\/mackerelio\/go-mackerel-plugin-helper\"\n)\n\nconst (\n\tnamespace = \"AWS\/Kinesis\"\n\tmetricsTypeAverage = \"Average\"\n\tmetricsTypeMaximum = \"Maximum\"\n\tmetricsTypeMinimum = \"Minimum\"\n)\n\ntype metrics struct {\n\tCloudWatchName string\n\tMackerelName string\n\tType string\n}\n\n\/\/ KinesisStreamsPlugin mackerel plugin for aws kinesis\ntype KinesisStreamsPlugin struct {\n\tName string\n\tPrefix string\n\n\tAccessKeyID string\n\tSecretAccessKey string\n\tRegion string\n\tCloudWatch *cloudwatch.CloudWatch\n}\n\n\/\/ MetricKeyPrefix interface for PluginWithPrefix\nfunc (p KinesisStreamsPlugin) MetricKeyPrefix() string {\n\tif p.Prefix == \"\" {\n\t\tp.Prefix = \"kinesis-streams\"\n\t}\n\treturn p.Prefix\n}\n\n\/\/ prepare creates CloudWatch instance\nfunc (p *KinesisStreamsPlugin) prepare() error {\n\tsess, err := session.NewSession()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconfig := aws.NewConfig()\n\tif p.AccessKeyID != \"\" && p.SecretAccessKey != \"\" {\n\t\tconfig = config.WithCredentials(credentials.NewStaticCredentials(p.AccessKeyID, p.SecretAccessKey, \"\"))\n\t}\n\tif p.Region != \"\" {\n\t\tconfig = config.WithRegion(p.Region)\n\t}\n\n\tp.CloudWatch = cloudwatch.New(sess, config)\n\n\treturn nil\n}\n\n\/\/ getLastPoint fetches a CloudWatch metric and parse\nfunc (p KinesisStreamsPlugin) getLastPoint(metric metrics) (float64, error) {\n\tnow := time.Now()\n\n\tdimensions := []*cloudwatch.Dimension{\n\t\t{\n\t\t\tName: aws.String(\"StreamName\"),\n\t\t\tValue: aws.String(p.Name),\n\t\t},\n\t}\n\n\tresponse, err := p.CloudWatch.GetMetricStatistics(&cloudwatch.GetMetricStatisticsInput{\n\t\tDimensions: dimensions,\n\t\tStartTime: aws.Time(now.Add(time.Duration(180) * time.Second * -1)), \/\/ 3 min\n\t\tEndTime: aws.Time(now),\n\t\tMetricName: aws.String(metric.CloudWatchName),\n\t\tPeriod: aws.Int64(60),\n\t\tStatistics: []*string{aws.String(metric.Type)},\n\t\tNamespace: aws.String(namespace),\n\t})\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tdatapoints := response.Datapoints\n\tif len(datapoints) == 0 {\n\t\treturn 0, errors.New(\"fetched no datapoints\")\n\t}\n\n\tlatest := new(time.Time)\n\tvar latestVal float64\n\tfor _, dp := range datapoints {\n\t\tif dp.Timestamp.Before(*latest) {\n\t\t\tcontinue\n\t\t}\n\n\t\tlatest = dp.Timestamp\n\t\tswitch metric.Type {\n\t\tcase metricsTypeAverage:\n\t\t\tlatestVal = *dp.Average\n\t\tcase metricsTypeMaximum:\n\t\t\tlatestVal = *dp.Maximum\n\t\tcase metricsTypeMinimum:\n\t\t\tlatestVal = *dp.Minimum\n\t\t}\n\t}\n\n\treturn latestVal, nil\n}\n\n\/\/ FetchMetrics fetch the metrics\nfunc (p KinesisStreamsPlugin) FetchMetrics() (map[string]interface{}, error) {\n\tstat := make(map[string]interface{})\n\n\tfor _, met := range [...]metrics{\n\t\t{CloudWatchName: \"GetRecords.Bytes\", MackerelName: \"GetRecordsBytes\", Type: metricsTypeAverage},\n\t\t\/\/ Max of IteratorAgeMilliseconds is useful especially when few of iterators are in trouble\n\t\t{CloudWatchName: \"GetRecords.IteratorAgeMilliseconds\", MackerelName: \"GetRecordsDelayMaxMilliseconds\", Type: metricsTypeMaximum},\n\t\t{CloudWatchName: \"GetRecords.IteratorAgeMilliseconds\", MackerelName: \"GetRecordsDelayMinMilliseconds\", Type: metricsTypeMinimum},\n\t\t{CloudWatchName: \"GetRecords.IteratorAgeMilliseconds\", MackerelName: \"GetRecordsDelayAverageMilliseconds\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"GetRecords.Latency\", MackerelName: \"GetRecordsLatency\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"GetRecords.Records\", MackerelName: \"GetRecordsRecords\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"GetRecords.Success\", MackerelName: \"GetRecordsSuccess\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"IncomingBytes\", MackerelName: \"IncomingBytes\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"IncomingRecords\", MackerelName: \"IncomingRecords\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"PutRecord.Bytes\", MackerelName: \"PutRecordBytes\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"PutRecord.Latency\", MackerelName: \"PutRecordLatency\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"PutRecord.Success\", MackerelName: \"PutRecordSuccess\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"PutRecords.Bytes\", MackerelName: \"PutRecordsBytes\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"PutRecords.Latency\", MackerelName: \"PutRecordsLatency\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"PutRecords.Records\", MackerelName: \"PutRecordsRecords\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"PutRecords.Success\", MackerelName: \"PutRecordsSuccess\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"ReadProvidionedThroughputExceeded\", MackerelName: \"ReadThroughputExceeded\", Type: metricsTypeAverage},\n\t\t{CloudWatchName: \"WriteProvidionedThroughputExceeded\", MackerelName: \"WriteThroughputExceeded\", Type: metricsTypeAverage},\n\t} {\n\t\tv, err := p.getLastPoint(met)\n\t\tif err == nil {\n\t\t\tstat[met.MackerelName] = v\n\t\t} else {\n\t\t\tlog.Printf(\"%s: %s\", met, err)\n\t\t}\n\t}\n\treturn stat, nil\n}\n\n\/\/ GraphDefinition of KinesisStreamsPlugin\nfunc (p KinesisStreamsPlugin) GraphDefinition() map[string]mp.Graphs {\n\tlabelPrefix := strings.Title(p.Prefix)\n\tlabelPrefix = strings.Replace(labelPrefix, \"-\", \" \", -1)\n\n\tvar graphdef = map[string]mp.Graphs{\n\t\t\"bytes\": {\n\t\t\tLabel: (labelPrefix + \" Bytes\"),\n\t\t\tUnit: \"integer\",\n\t\t\tMetrics: []mp.Metrics{\n\t\t\t\t{Name: \"GetRecordsBytes\", Label: \"GetRecords\"},\n\t\t\t\t{Name: \"IncomingBytes\", Label: \"Total Incoming\"},\n\t\t\t\t{Name: \"PutRecordBytes\", Label: \"PutRecord\"},\n\t\t\t\t{Name: \"PutRecordsBytes\", Label: \"PutRecords\"},\n\t\t\t},\n\t\t},\n\t\t\"iteratorage\": {\n\t\t\tLabel: (labelPrefix + \" Read Delay\"),\n\t\t\tUnit: \"integer\",\n\t\t\tMetrics: []mp.Metrics{\n\t\t\t\t{Name: \"GetRecordsDelayAverageMilliseconds\", Label: \"Average\"},\n\t\t\t\t{Name: \"GetRecordsDelayMaxMilliseconds\", Label: \"Max\"},\n\t\t\t\t{Name: \"GetRecordsDelayMinMilliseconds\", Label: \"Min\"},\n\t\t\t},\n\t\t},\n\t\t\"latency\": {\n\t\t\tLabel: (labelPrefix + \" Operation Latency\"),\n\t\t\tUnit: \"integer\",\n\t\t\tMetrics: []mp.Metrics{\n\t\t\t\t{Name: \"GetRecordsLatency\", Label: \"GetRecords\"},\n\t\t\t\t{Name: \"PutRecordLatency\", Label: \"PutRecord\"},\n\t\t\t\t{Name: \"PutRecordsLatency\", Label: \"PutRecords\"},\n\t\t\t},\n\t\t},\n\t\t\"records\": {\n\t\t\tLabel: (labelPrefix + \" Records\"),\n\t\t\tUnit: \"integer\",\n\t\t\tMetrics: []mp.Metrics{\n\t\t\t\t{Name: \"GetRecordsRecords\", Label: \"GetRecords\"},\n\t\t\t\t{Name: \"IncomingRecords\", Label: \"Total Incoming\"},\n\t\t\t\t{Name: \"PutRecordsRecords\", Label: \"PutRecords\"},\n\t\t\t},\n\t\t},\n\t\t\"success\": {\n\t\t\tLabel: (labelPrefix + \" Operation Success\"),\n\t\t\tUnit: \"integer\",\n\t\t\tMetrics: []mp.Metrics{\n\t\t\t\t{Name: \"GetRecordsSuccess\", Label: \"GetRecords\"},\n\t\t\t\t{Name: \"PutRecordSuccess\", Label: \"PutRecord\"},\n\t\t\t\t{Name: \"PutRecordsSuccess\", Label: \"PutRecords\"},\n\t\t\t},\n\t\t},\n\t\t\"pending\": {\n\t\t\tLabel: (labelPrefix + \" Pending Operations\"),\n\t\t\tUnit: \"integer\",\n\t\t\tMetrics: []mp.Metrics{\n\t\t\t\t{Name: \"ReadThroughputExceeded\", Label: \"Read\"},\n\t\t\t\t{Name: \"WriteThroughputExceeded\", Label: \"Write\"},\n\t\t\t},\n\t\t},\n\t}\n\treturn graphdef\n}\n\n\/\/ Do the plugin\nfunc Do() {\n\toptAccessKeyID := flag.String(\"access-key-id\", \"\", \"AWS Access Key ID\")\n\toptSecretAccessKey := flag.String(\"secret-access-key\", \"\", \"AWS Secret Access Key\")\n\toptRegion := flag.String(\"region\", \"\", \"AWS Region\")\n\toptIdentifier := flag.String(\"identifier\", \"\", \"Stream Name\")\n\toptTempfile := flag.String(\"tempfile\", \"\", \"Temp file name\")\n\toptPrefix := flag.String(\"metric-key-prefix\", \"kinesis-streams\", \"Metric key prefix\")\n\tflag.Parse()\n\n\tvar plugin KinesisStreamsPlugin\n\n\tplugin.AccessKeyID = *optAccessKeyID\n\tplugin.SecretAccessKey = *optSecretAccessKey\n\tplugin.Region = *optRegion\n\tplugin.Name = *optIdentifier\n\tplugin.Prefix = *optPrefix\n\n\terr := plugin.prepare()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\thelper := mp.NewMackerelPlugin(plugin)\n\thelper.Tempfile = *optTempfile\n\n\thelper.Run()\n}\n<|endoftext|>"} {"text":"<commit_before>package capacitor\n\nimport (\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\ntype Interface interface {\n\tSubmit(func())\n\tDischarge()\n\tCancel()\n}\n\nfunc New() Interface {\n\tc := &capacitor{\n\t\tdelay: time.Second,\n\t}\n\n\treturn c\n}\n\ntype delayer struct {\n\tcurrent atomic.Value\n\tterminate chan bool\n}\n\nfunc (d *delayer) cancel(discharge bool) {\n\td.terminate <- discharge\n}\n\nfunc (d *delayer) execute() {\n\tif f, ok := d.current.Load().(func()); f != nil && ok {\n\t\tf()\n\t}\n}\n\nfunc (d *delayer) run(timer <-chan time.Time, stop func() bool) {\n\tdefer stop()\n\tfor {\n\t\tselect {\n\t\tcase <-timer:\n\t\t\tselect {\n\t\t\tcase discharge := <-d.terminate:\n\t\t\t\tif discharge {\n\t\t\t\t\td.execute()\n\t\t\t\t}\n\n\t\t\tdefault:\n\t\t\t\td.execute()\n\t\t\t}\n\n\t\t\treturn\n\n\t\tcase discharge := <-d.terminate:\n\t\t\tif discharge {\n\t\t\t\td.execute()\n\t\t\t}\n\t\t}\n\t}\n}\n\ntype capacitor struct {\n\tlock sync.Mutex\n\tdelay time.Duration\n\td *delayer\n}\n\nfunc (c *capacitor) Submit(v func()) {\n\tc.lock.Lock()\n\tif c.d == nil {\n\t\tc.d = &delayer{\n\t\t\tterminate: make(chan bool, 1),\n\t\t}\n\n\t\ttimer := time.NewTimer(c.delay)\n\t\tgo c.d.run(timer.C, timer.Stop)\n\t}\n\n\tc.d.current.Store(v)\n\tc.lock.Unlock()\n}\n\nfunc (c *capacitor) Discharge() {\n\tc.lock.Lock()\n\tif c.d != nil {\n\t\tc.d.cancel(true)\n\t\tc.d = nil\n\t}\n\n\tc.lock.Unlock()\n}\n\nfunc (c *capacitor) Cancel() {\n\tc.lock.Lock()\n\tif c.d != nil {\n\t\tc.d.cancel(false)\n\t\tc.d = nil\n\t}\n\n\tc.lock.Unlock()\n}\n<commit_msg>Fleshed out the atomic operations<commit_after>package capacitor\n\nimport (\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/Comcast\/webpa-common\/clock\"\n)\n\ntype Interface interface {\n\tSubmit(func())\n\tDischarge()\n\tCancel()\n}\n\nfunc New() Interface {\n\tc := &capacitor{\n\t\tdelay: time.Second,\n\t\tc: clock.System(),\n\t}\n\n\treturn c\n}\n\ntype delayer struct {\n\tcurrent atomic.Value\n\ttimer clock.Timer\n\tterminate chan bool\n\treset func()\n}\n\nfunc (d *delayer) discharge() {\n\td.terminate <- true\n}\n\nfunc (d *delayer) cancel() {\n\td.terminate <- false\n}\n\nfunc (d *delayer) execute() {\n\tif f, ok := d.current.Load().(func()); f != nil && ok {\n\t\tf()\n\t}\n}\n\nfunc (d *delayer) run() {\n\tdefer d.timer.Stop()\n\tdefer d.reset()\n\n\tfor {\n\t\tselect {\n\t\tcase <-d.timer.C():\n\t\t\tselect {\n\t\t\tcase discharge := <-d.terminate:\n\t\t\t\tif discharge {\n\t\t\t\t\td.execute()\n\t\t\t\t}\n\n\t\t\tdefault:\n\t\t\t\td.execute()\n\t\t\t}\n\n\t\t\treturn\n\n\t\tcase discharge := <-d.terminate:\n\t\t\tif discharge {\n\t\t\t\td.execute()\n\t\t\t}\n\t\t}\n\t}\n}\n\ntype capacitor struct {\n\tlock sync.Mutex\n\tdelay time.Duration\n\tc clock.Interface\n\td *delayer\n}\n\n\/\/ reset produces a closure that a given delayer must call to clean up the enclosing capacitor.\n\/\/ The returned closure atomically sets the delayer to nil if and only if it matched the given delayer.\n\/\/ This allows for barging between the public interface methods.\nfunc (c *capacitor) reset(d *delayer) func() {\n\treturn func() {\n\t\tc.lock.Lock()\n\t\tif c.d == d {\n\t\t\tc.d = nil\n\t\t}\n\t\tc.lock.Unlock()\n\t}\n}\n\nfunc (c *capacitor) Submit(v func()) {\n\tc.lock.Lock()\n\tif c.d == nil {\n\t\tc.d = &delayer{\n\t\t\tterminate: make(chan bool, 1),\n\t\t\ttimer: c.c.NewTimer(c.delay),\n\t\t}\n\n\t\tc.d.current.Store(v)\n\t\tc.d.reset = c.reset(c.d)\n\t\tgo c.d.run()\n\t} else {\n\t\tc.d.current.Store(v)\n\t}\n\n\tc.lock.Unlock()\n}\n\nfunc (c *capacitor) Discharge() {\n\tc.lock.Lock()\n\tif c.d != nil {\n\t\tc.d.discharge()\n\t\tc.d = nil\n\t}\n\n\tc.lock.Unlock()\n}\n\nfunc (c *capacitor) Cancel() {\n\tc.lock.Lock()\n\tif c.d != nil {\n\t\tc.d.cancel()\n\t\tc.d = nil\n\t}\n\n\tc.lock.Unlock()\n}\n<|endoftext|>"} {"text":"<commit_before>package runrunc_test\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/cloudfoundry-incubator\/guardian\/rundmc\/runrunc\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/ginkgo\/extensions\/table\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"LookupUser\", func() {\n\tContext(\"when we try to get the Uid and Gid of a username\", func() {\n\t\tvar (\n\t\t\trootFsPath string\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tvar err error\n\t\t\trootFsPath, err = ioutil.TempDir(\"\", \"passwdtestdir\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tExpect(os.MkdirAll(filepath.Join(rootFsPath, \"etc\"), 0777)).To(Succeed())\n\t\t})\n\n\t\tContext(\"when we try to get the Uid and Gid of the empty string\", func() {\n\t\t\tIt(\"returns the default UID and GID\", func() {\n\t\t\t\trootFsPath = \"some path\"\n\t\t\t\tuser, err := runrunc.LookupUser(rootFsPath, \"\")\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\tExpect(user.Uid).To(BeEquivalentTo(runrunc.DefaultUID))\n\t\t\t\tExpect(user.Gid).To(BeEquivalentTo(runrunc.DefaultGID))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when \/etc\/passwd exists with one matching user\", func() {\n\t\t\tconst ()\n\t\t\tBeforeEach(func() {\n\t\t\t\tExpect(ioutil.WriteFile(filepath.Join(rootFsPath, \"etc\", \"passwd\"), []byte(\n\t\t\t\t\t`_lda:*:211:211:Local Delivery Agent:\/var\/empty:\/usr\/bin\/false\n_cvmsroot:*:212:212:CVMS Root:\/var\/empty:\/usr\/bin\/false\n_usbmuxd:*:213:213:iPhone OS Device Helper:\/var\/db\/lockdown:\/usr\/bin\/false\ndevil:*:666:777:Beelzebub:\/home\/fieryunderworld:\/usr\/bin\/false\n_dovecot:*:214:6:Dovecot Administrator:\/var\/empty:\/usr\/bin\/false`,\n\t\t\t\t), 0777)).To(Succeed())\n\t\t\t})\n\n\t\t\tIt(\"gets the user ID from \/etc\/passwd\", func() {\n\t\t\t\tuser, err := runrunc.LookupUser(rootFsPath, \"devil\")\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\tExpect(user.Uid).To(BeEquivalentTo(666)) \/\/ the UID of the beas\n\t\t\t\tExpect(user.Gid).To(BeEquivalentTo(777)) \/\/ the GID of the beas\n\t\t\t\tExpect(user.Home).To(Equal(\"\/home\/fieryunderworld\")) \/\/ the GID of the beast\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when \/etc\/passwd exists with no matching users\", func() {\n\t\t\tIt(\"returns an error\", func() {\n\t\t\t\tExpect(ioutil.WriteFile(filepath.Join(rootFsPath, \"etc\", \"passwd\"), []byte{}, 0777)).To(Succeed())\n\n\t\t\t\t_, err := runrunc.LookupUser(rootFsPath, \"unknownUser\")\n\t\t\t\tExpect(err).To(MatchError(ContainSubstring(\"Unable to find\")))\n\t\t\t})\n\t\t})\n\n\t\tDescribeTable(\"when \/etc\/passwd cannot be parsed\", func(breakEtcPasswd func()) {\n\t\t\tbreakEtcPasswd()\n\t\t\t_, err := runrunc.LookupUser(rootFsPath, \"devil\")\n\t\t\tExpect(err).To(MatchError(ContainSubstring(\"Unable to find\")))\n\t\t},\n\t\t\tEntry(\"because it doesn't exist\", func() {}),\n\t\t\tEntry(\"because the contents makes no sense\", func() {\n\t\t\t\tsenselessContents := []byte(\n\t\t\t\t\t`lorem ipsum dollar sit amet\n\t\t\t\t\tunix at the portal\n\t\t\t\t\tbody type by letroset\n\t\t\t\t\there at the epoch\n\t\t\t\t\tlet us forget...`,\n\t\t\t\t)\n\t\t\t\tpasswdPath := filepath.Join(rootFsPath, \"etc\", \"passwd\")\n\t\t\t\tExpect(ioutil.WriteFile(passwdPath, senselessContents, 0777)).To(Succeed())\n\t\t\t}))\n\t})\n})\n<commit_msg>Fix test that failed due to bumped runc<commit_after>package runrunc_test\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/cloudfoundry-incubator\/guardian\/rundmc\/runrunc\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/ginkgo\/extensions\/table\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"LookupUser\", func() {\n\tContext(\"when we try to get the Uid and Gid of a username\", func() {\n\t\tvar (\n\t\t\trootFsPath string\n\t\t)\n\n\t\tBeforeEach(func() {\n\t\t\tvar err error\n\t\t\trootFsPath, err = ioutil.TempDir(\"\", \"passwdtestdir\")\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tExpect(os.MkdirAll(filepath.Join(rootFsPath, \"etc\"), 0777)).To(Succeed())\n\t\t})\n\n\t\tContext(\"when we try to get the Uid and Gid of the empty string\", func() {\n\t\t\tIt(\"returns the default UID and GID\", func() {\n\t\t\t\trootFsPath = \"some path\"\n\t\t\t\tuser, err := runrunc.LookupUser(rootFsPath, \"\")\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\tExpect(user.Uid).To(BeEquivalentTo(runrunc.DefaultUID))\n\t\t\t\tExpect(user.Gid).To(BeEquivalentTo(runrunc.DefaultGID))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when \/etc\/passwd exists with one matching user\", func() {\n\t\t\tconst ()\n\t\t\tBeforeEach(func() {\n\t\t\t\tExpect(ioutil.WriteFile(filepath.Join(rootFsPath, \"etc\", \"passwd\"), []byte(\n\t\t\t\t\t`_lda:*:211:211:Local Delivery Agent:\/var\/empty:\/usr\/bin\/false\n_cvmsroot:*:212:212:CVMS Root:\/var\/empty:\/usr\/bin\/false\n_usbmuxd:*:213:213:iPhone OS Device Helper:\/var\/db\/lockdown:\/usr\/bin\/false\ndevil:*:666:777:Beelzebub:\/home\/fieryunderworld:\/usr\/bin\/false\n_dovecot:*:214:6:Dovecot Administrator:\/var\/empty:\/usr\/bin\/false`,\n\t\t\t\t), 0777)).To(Succeed())\n\t\t\t})\n\n\t\t\tIt(\"gets the user ID from \/etc\/passwd\", func() {\n\t\t\t\tuser, err := runrunc.LookupUser(rootFsPath, \"devil\")\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\tExpect(user.Uid).To(BeEquivalentTo(666)) \/\/ the UID of the beas\n\t\t\t\tExpect(user.Gid).To(BeEquivalentTo(777)) \/\/ the GID of the beas\n\t\t\t\tExpect(user.Home).To(Equal(\"\/home\/fieryunderworld\")) \/\/ the GID of the beast\n\t\t\t})\n\t\t})\n\n\t\tContext(\"when \/etc\/passwd exists with no matching users\", func() {\n\t\t\tIt(\"returns an error\", func() {\n\t\t\t\tExpect(ioutil.WriteFile(filepath.Join(rootFsPath, \"etc\", \"passwd\"), []byte{}, 0777)).To(Succeed())\n\n\t\t\t\t_, err := runrunc.LookupUser(rootFsPath, \"unknownUser\")\n\t\t\t\tExpect(err).To(MatchError(ContainSubstring(\"unable to find\")))\n\t\t\t})\n\t\t})\n\n\t\tDescribeTable(\"when \/etc\/passwd cannot be parsed\", func(breakEtcPasswd func()) {\n\t\t\tbreakEtcPasswd()\n\t\t\t_, err := runrunc.LookupUser(rootFsPath, \"devil\")\n\t\t\tExpect(err).To(MatchError(ContainSubstring(\"unable to find\")))\n\t\t},\n\t\t\tEntry(\"because it doesn't exist\", func() {}),\n\t\t\tEntry(\"because the contents makes no sense\", func() {\n\t\t\t\tsenselessContents := []byte(\n\t\t\t\t\t`lorem ipsum dollar sit amet\n\t\t\t\t\tunix at the portal\n\t\t\t\t\tbody type by letroset\n\t\t\t\t\there at the epoch\n\t\t\t\t\tlet us forget...`,\n\t\t\t\t)\n\t\t\t\tpasswdPath := filepath.Join(rootFsPath, \"etc\", \"passwd\")\n\t\t\t\tExpect(ioutil.WriteFile(passwdPath, senselessContents, 0777)).To(Succeed())\n\t\t\t}))\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package riak_cs_service\n\nimport (\n\t. \"github.com\/onsi\/ginkgo\"\n\n\t\"fmt\"\n\t\"time\"\n\n\t. \"github.com\/cloudfoundry-incubator\/cf-test-helpers\/cf\"\n\t. \"github.com\/cloudfoundry-incubator\/cf-test-helpers\/generator\"\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/runner\"\n\t. \"github.com\/cloudfoundry-incubator\/cf-test-helpers\/services\/context_setup\"\n)\n\nvar _ = Describe(\"Riak CS Service Lifecycle\", func() {\n\n\tvar (\n\t\tshortTimeout, longTimeout, startTimeout time.Duration\n\t)\n\n\tBeforeEach(func() {\n\t\tshortTimeout = ScaledTimeout(30 * time.Second)\n\t\tlongTimeout = ScaledTimeout(60 * time.Second)\n\t\tstartTimeout = ScaledTimeout(5 * time.Minute)\n\n\t\tAppName = RandomName()\n\n\t\trunner.NewCmdRunner(Cf(\"push\", AppName, \"-m\", \"256M\", \"-p\", sinatraPath, \"-no-start\"), longTimeout).Run()\n\t})\n\n\tAfterEach(func() {\n\t\trunner.NewCmdRunner(Cf(\"delete\", AppName, \"-f\"), longTimeout).Run()\n\t})\n\n\tIt(\"Allows users to create, bind, write to, read from, unbind, and destroy the service instance\", func() {\n\t\tServiceName := ServiceName()\n\t\tPlanName := PlanName()\n\t\tServiceInstanceName := RandomName()\n\n\t\trunner.NewCmdRunner(Cf(\"create-service\", ServiceName, PlanName, ServiceInstanceName), longTimeout).Run()\n\t\trunner.NewCmdRunner(Cf(\"bind-service\", AppName, ServiceInstanceName), longTimeout).Run()\n\t\trunner.NewCmdRunner(Cf(\"start\", AppName), startTimeout).Run()\n\n\t\turi := AppUri(AppName) + \"\/service\/blobstore\/\" + ServiceInstanceName + \"\/mykey\"\n\t\tdelete_uri := AppUri(AppName) + \"\/service\/blobstore\/\" + ServiceInstanceName\n\n\t\tfmt.Println(\"Posting to url: \", uri)\n\t\trunner.NewCmdRunner(runner.Curl(\"-k\", \"-d\", \"myvalue\", uri), shortTimeout).WithOutput(\"myvalue\").Run()\n\t\tfmt.Println(\"\\n\")\n\n\t\tfmt.Println(\"Curling url: \", uri)\n\t\trunner.NewCmdRunner(runner.Curl(\"-k\", uri), shortTimeout).WithOutput(\"myvalue\").Run()\n\t\tfmt.Println(\"\\n\")\n\n\t\tfmt.Println(\"Sending delete to: \", delete_uri)\n\t\trunner.NewCmdRunner(runner.Curl(\"-X\", \"DELETE\", \"-k\", delete_uri), shortTimeout).WithOutput(\"successfully_deleted\").Run()\n\t\tfmt.Println(\"\\n\")\n\n\t\trunner.NewCmdRunner(Cf(\"unbind-service\", AppName, ServiceInstanceName), longTimeout)\n\t\trunner.NewCmdRunner(Cf(\"delete-service\", \"-f\", ServiceInstanceName), longTimeout)\n\t})\n})\n<commit_msg>Bug fix: Wait for all cf command executions to complete<commit_after>package riak_cs_service\n\nimport (\n\t. \"github.com\/onsi\/ginkgo\"\n\n\t\"fmt\"\n\t\"time\"\n\n\t. \"github.com\/cloudfoundry-incubator\/cf-test-helpers\/cf\"\n\t. \"github.com\/cloudfoundry-incubator\/cf-test-helpers\/generator\"\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/runner\"\n\t. \"github.com\/cloudfoundry-incubator\/cf-test-helpers\/services\/context_setup\"\n)\n\nvar _ = Describe(\"Riak CS Service Lifecycle\", func() {\n\n\tvar (\n\t\tshortTimeout, longTimeout, startTimeout time.Duration\n\t)\n\n\tBeforeEach(func() {\n\t\tshortTimeout = ScaledTimeout(30 * time.Second)\n\t\tlongTimeout = ScaledTimeout(60 * time.Second)\n\t\tstartTimeout = ScaledTimeout(5 * time.Minute)\n\n\t\tAppName = RandomName()\n\n\t\trunner.NewCmdRunner(Cf(\"push\", AppName, \"-m\", \"256M\", \"-p\", sinatraPath, \"-no-start\"), longTimeout).Run()\n\t})\n\n\tAfterEach(func() {\n\t\trunner.NewCmdRunner(Cf(\"delete\", AppName, \"-f\"), longTimeout).Run()\n\t})\n\n\tIt(\"Allows users to create, bind, write to, read from, unbind, and destroy the service instance\", func() {\n\t\tServiceName := ServiceName()\n\t\tPlanName := PlanName()\n\t\tServiceInstanceName := RandomName()\n\n\t\trunner.NewCmdRunner(Cf(\"create-service\", ServiceName, PlanName, ServiceInstanceName), longTimeout).Run()\n\t\trunner.NewCmdRunner(Cf(\"bind-service\", AppName, ServiceInstanceName), longTimeout).Run()\n\t\trunner.NewCmdRunner(Cf(\"start\", AppName), startTimeout).Run()\n\n\t\turi := AppUri(AppName) + \"\/service\/blobstore\/\" + ServiceInstanceName + \"\/mykey\"\n\t\tdelete_uri := AppUri(AppName) + \"\/service\/blobstore\/\" + ServiceInstanceName\n\n\t\tfmt.Println(\"Posting to url: \", uri)\n\t\trunner.NewCmdRunner(runner.Curl(\"-k\", \"-d\", \"myvalue\", uri), shortTimeout).WithOutput(\"myvalue\").Run()\n\t\tfmt.Println(\"\\n\")\n\n\t\tfmt.Println(\"Curling url: \", uri)\n\t\trunner.NewCmdRunner(runner.Curl(\"-k\", uri), shortTimeout).WithOutput(\"myvalue\").Run()\n\t\tfmt.Println(\"\\n\")\n\n\t\tfmt.Println(\"Sending delete to: \", delete_uri)\n\t\trunner.NewCmdRunner(runner.Curl(\"-X\", \"DELETE\", \"-k\", delete_uri), shortTimeout).WithOutput(\"successfully_deleted\").Run()\n\t\tfmt.Println(\"\\n\")\n\n\t\trunner.NewCmdRunner(Cf(\"unbind-service\", AppName, ServiceInstanceName), longTimeout).Run()\n\t\trunner.NewCmdRunner(Cf(\"delete-service\", \"-f\", ServiceInstanceName), longTimeout).Run()\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Fixed typo<commit_after><|endoftext|>"} {"text":"<commit_before>package validation\n\nimport (\n\t\"chain\/fedchain\/bc\"\n\t\"chain\/fedchain\/state\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\nfunc TestNoUpdateEmptyAD(t *testing.T) {\n\tctx := context.Background()\n\tview := newTestView()\n\ttx := bc.NewTx(bc.TxData{Inputs: []*bc.TxInput{{\n\t\tSignatureScript: []byte(\"foo\"),\n\t\tPrevious: bc.Outpoint{Index: bc.InvalidOutputIndex},\n\t}}})\n\tApplyTx(ctx, view, tx)\n\tif len(view.adps) > 0 {\n\t\t\/\/ If metadata field is empty, no update of ADP takes place.\n\t\t\/\/ See https:\/\/github.com\/chain-engineering\/fedchain\/blob\/master\/documentation\/fedchain-specification.md#extract-asset-definition.\n\t\tt.Fatal(\"apply tx should not save an empty asset definition\")\n\t}\n}\n\nfunc BenchmarkValidateTx(b *testing.B) {\n\tctx := context.Background()\n\tview := newTestView()\n\ttx := txFromHex(\"0000000101341fb89912be0110b527375998810c99ac96a317c63b071ccf33b7514cf0f0a5ffffffff6f00473045022100c561a9b4854742bc36c805513b872b2c0a1a367da24710eadd4f3fbc3b1ab41302207cf9eec4e5db694831fe43cf193f23d869291025ac6062199dd6b8998e93e15825512103623fb1fe38ce7e43cf407ec99b061c6d2da0278e80ce094393875c5b94f1ed9051ae0001df03f294bd08930f542a42b91199a8afe1b45c28eeb058cc5e8c8d600e0dd42f0000000000000001000000000000000000000474782d31\")\n\tts := uint64(time.Now().Unix())\n\tfor i := 0; i < b.N; i++ {\n\t\tValidateTx(ctx, view, tx, ts)\n\t}\n}\n\ntype testView struct {\n\touts map[bc.Outpoint]*state.Output\n\tadps map[bc.AssetID]bc.Hash\n}\n\nfunc newTestView() *testView {\n\treturn &testView{\n\t\touts: make(map[bc.Outpoint]*state.Output),\n\t\tadps: make(map[bc.AssetID]bc.Hash),\n\t}\n}\nfunc (v *testView) Output(context.Context, bc.Outpoint) *state.Output {\n\treturn nil\n}\n\nfunc (v *testView) SaveAssetDefinitionPointer(asset bc.AssetID, hash bc.Hash) {\n\tv.adps[asset] = hash\n}\n\nfunc (v *testView) SaveOutput(*state.Output) {}\n\nfunc txFromHex(s string) *bc.Tx {\n\ttx := new(bc.Tx)\n\terr := tx.UnmarshalText([]byte(s))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn tx\n}\n<commit_msg>fedchain\/validation: remove unnecessary testView<commit_after>package validation\n\nimport (\n\t\"chain\/fedchain\/bc\"\n\t\"chain\/fedchain\/state\"\n\t\"testing\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\nfunc TestNoUpdateEmptyAD(t *testing.T) {\n\tctx := context.Background()\n\tview := state.NewMemView()\n\ttx := bc.NewTx(bc.TxData{Inputs: []*bc.TxInput{{\n\t\tSignatureScript: []byte(\"foo\"),\n\t\tPrevious: bc.Outpoint{Index: bc.InvalidOutputIndex},\n\t}}})\n\tApplyTx(ctx, view, tx)\n\tif len(view.ADPs) > 0 {\n\t\t\/\/ If metadata field is empty, no update of ADP takes place.\n\t\t\/\/ See https:\/\/github.com\/chain-engineering\/fedchain\/blob\/master\/documentation\/fedchain-specification.md#extract-asset-definition.\n\t\tt.Fatal(\"apply tx should not save an empty asset definition\")\n\t}\n}\n\nfunc BenchmarkValidateTx(b *testing.B) {\n\tctx := context.Background()\n\tview := state.NewMemView()\n\ttx := txFromHex(\"0000000101341fb89912be0110b527375998810c99ac96a317c63b071ccf33b7514cf0f0a5ffffffff6f00473045022100c561a9b4854742bc36c805513b872b2c0a1a367da24710eadd4f3fbc3b1ab41302207cf9eec4e5db694831fe43cf193f23d869291025ac6062199dd6b8998e93e15825512103623fb1fe38ce7e43cf407ec99b061c6d2da0278e80ce094393875c5b94f1ed9051ae0001df03f294bd08930f542a42b91199a8afe1b45c28eeb058cc5e8c8d600e0dd42f0000000000000001000000000000000000000474782d31\")\n\tts := uint64(time.Now().Unix())\n\tfor i := 0; i < b.N; i++ {\n\t\tValidateTx(ctx, view, tx, ts)\n\t}\n}\n\nfunc txFromHex(s string) *bc.Tx {\n\ttx := new(bc.Tx)\n\terr := tx.UnmarshalText([]byte(s))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn tx\n}\n<|endoftext|>"} {"text":"<commit_before>package pay\n\nimport (\n\t\"github.com\/silenceper\/wechat\/v2\/pay\/config\"\n\t\"github.com\/silenceper\/wechat\/v2\/pay\/notify\"\n\t\"github.com\/silenceper\/wechat\/v2\/pay\/order\"\n\t\"github.com\/silenceper\/wechat\/v2\/pay\/refund\"\n)\n\n\/\/Pay 微信支付相关API\ntype Pay struct {\n\tcfg *config.Config\n}\n\n\/\/NewPay 实例化微信支付相关API\nfunc NewPay(cfg *config.Config) *Pay {\n\treturn &Pay{cfg}\n}\n\n\/\/ GetOrder 下单\nfunc (pay *Pay) GetOrder() *order.Order {\n\treturn order.NewOrder(pay.cfg)\n}\n\n\/\/ GetNotify 通知\nfunc (pay *Pay) GetNotify() *notify.Notify {\n\treturn notify.NewNotify(pay.cfg)\n}\n\n\/\/ GetRefund 退款\nfunc (pay *Pay) GetRefund() *refund.Refund {\n\treturn refund.NewRefund(pay.cfg)\n}\n<commit_msg>[付款] pay.go 增加 GetTransfer 方法 (#400)<commit_after>package pay\n\nimport (\n\t\"github.com\/silenceper\/wechat\/v2\/pay\/config\"\n\t\"github.com\/silenceper\/wechat\/v2\/pay\/notify\"\n\t\"github.com\/silenceper\/wechat\/v2\/pay\/order\"\n\t\"github.com\/silenceper\/wechat\/v2\/pay\/refund\"\n\t\"github.com\/silenceper\/wechat\/v2\/pay\/transfer\"\n)\n\n\/\/Pay 微信支付相关API\ntype Pay struct {\n\tcfg *config.Config\n}\n\n\/\/NewPay 实例化微信支付相关API\nfunc NewPay(cfg *config.Config) *Pay {\n\treturn &Pay{cfg}\n}\n\n\/\/ GetOrder 下单\nfunc (pay *Pay) GetOrder() *order.Order {\n\treturn order.NewOrder(pay.cfg)\n}\n\n\/\/ GetNotify 通知\nfunc (pay *Pay) GetNotify() *notify.Notify {\n\treturn notify.NewNotify(pay.cfg)\n}\n\n\/\/ GetRefund 退款\nfunc (pay *Pay) GetRefund() *refund.Refund {\n\treturn refund.NewRefund(pay.cfg)\n}\n\n\/\/ GetTransfer 付款\nfunc (pay *Pay) GetTransfer() *transfer.Transfer {\n\treturn transfer.NewTransfer(pay.cfg)\n}\n<|endoftext|>"} {"text":"<commit_before>package data_types\n\ntype SoftLayer_Virtual_Guest_Block_Device_Template_GroupInitParameters struct {\n\tParameters SoftLayer_Virtual_Guest_Block_Device_Template_GroupInitParameter `json:\"parameters\"`\n}\n\ntype SoftLayer_Virtual_Guest_Block_Device_Template_GroupInitParameter struct {\n\tAccountId int `json:\"accountId\"`\n}\n<commit_msg>renamed to correct name *_guestblock_* to *_guest_block_*<commit_after><|endoftext|>"} {"text":"<commit_before>package warserver\n\nimport (\n \"github.com\/gorilla\/websocket\"\n \"net\"\n \"net\/http\"\n \"warserver\/logger\"\n)\n\nconst (\n RECV_BUF_LEN = 1024\n SERVER_IP = \"localhost\"\n SERVER_PORT = \"5269\"\n)\n\ntype websocketHandler interface {\n handleWebsocket(message []byte)\n}\n\ntype clientConnection struct {\n ws *websocket.Conn\n currentHandler websocketHandler\n handlers chan websocketHandler\n}\n\ntype pipe struct {\n wsRecv chan []byte\n sockRecv chan []byte\n}\n\ntype proxy struct {\n proxyConn clientConnection\n proxyPipes pipe\n}\n\nfunc (pc *clientConnection) wsReadPump() {\n pc.ws.SetReadLimit(RECV_BUF_LEN)\n for {\n _, msg, err := pc.ws.ReadMessage()\n if err != nil {\n logger.Errorf(\"Error while reading from websocket: %s\", err)\n break\n }\n logger.Debugf(\"Received %s from websocket\", msg)\n select {\n case newHandler := <-pc.handlers:\n pc.currentHandler = newHandler\n default:\n }\n pc.currentHandler.handleWebsocket(msg)\n }\n}\n\nfunc connectToServer() (net.Conn, error) {\n return net.Dial(\"tcp\", SERVER_IP + \":\" + SERVER_PORT)\n}\n\nfunc serveWs(w http.ResponseWriter, r *http.Request) {\n if r.Method != \"GET\" {\n http.Error(w, \"Method not allowed\", 405)\n return\n }\n if r.Header.Get(\"Origin\") != \"http:\/\/\"+r.Host {\n http.Error(w, \"Origin not allowed\", 403)\n return\n }\n ws, err := websocket.Upgrade(w, r, nil, 1024, 1024)\n if _, ok := err.(websocket.HandshakeError); ok {\n http.Error(w, \"Not a websocket handshake\", 400)\n return\n } else if err != nil {\n logger.Errorf(\"Websocket upgrade error: %s\", err)\n return\n }\n gamehub.wsRegister<- ws\n}\n<commit_msg>whitespace<commit_after>package warserver\n\nimport (\n \"github.com\/gorilla\/websocket\"\n \"net\"\n \"net\/http\"\n \"warserver\/logger\"\n)\n\nconst (\n RECV_BUF_LEN = 1024\n SERVER_IP = \"localhost\"\n SERVER_PORT = \"5269\"\n)\n\ntype websocketHandler interface {\n handleWebsocket(message []byte)\n}\n\ntype clientConnection struct {\n ws *websocket.Conn\n currentHandler websocketHandler\n handlers chan websocketHandler\n}\n\ntype pipe struct {\n wsRecv chan []byte\n sockRecv chan []byte\n}\n\ntype proxy struct {\n proxyConn clientConnection\n proxyPipes pipe\n}\n\nfunc (pc *clientConnection) wsReadPump() {\n pc.ws.SetReadLimit(RECV_BUF_LEN)\n for {\n _, msg, err := pc.ws.ReadMessage()\n if err != nil {\n logger.Errorf(\"Error while reading from websocket: %s\", err)\n break\n }\n logger.Debugf(\"Received %s from websocket\", msg)\n select {\n case newHandler := <-pc.handlers:\n pc.currentHandler = newHandler\n default:\n }\n pc.currentHandler.handleWebsocket(msg)\n }\n}\n\nfunc connectToServer() (net.Conn, error) {\n return net.Dial(\"tcp\", SERVER_IP + \":\" + SERVER_PORT)\n}\n\nfunc serveWs(w http.ResponseWriter, r *http.Request) {\n if r.Method != \"GET\" {\n http.Error(w, \"Method not allowed\", 405)\n return\n }\n if r.Header.Get(\"Origin\") != \"http:\/\/\"+r.Host {\n http.Error(w, \"Origin not allowed\", 403)\n return\n }\n ws, err := websocket.Upgrade(w, r, nil, 1024, 1024)\n if _, ok := err.(websocket.HandshakeError); ok {\n http.Error(w, \"Not a websocket handshake\", 400)\n return\n } else if err != nil {\n logger.Errorf(\"Websocket upgrade error: %s\", err)\n return\n }\n gamehub.wsRegister<- ws\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cronjob\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/robfig\/cron\"\n\n\tbatchv1 \"k8s.io\/api\/batch\/v1\"\n\tbatchv1beta1 \"k8s.io\/api\/batch\/v1beta1\"\n\t\"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\tref \"k8s.io\/client-go\/tools\/reference\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/legacyscheme\"\n)\n\n\/\/ Utilities for dealing with Jobs and CronJobs and time.\n\nfunc inActiveList(sj batchv1beta1.CronJob, uid types.UID) bool {\n\tfor _, j := range sj.Status.Active {\n\t\tif j.UID == uid {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc deleteFromActiveList(sj *batchv1beta1.CronJob, uid types.UID) {\n\tif sj == nil {\n\t\treturn\n\t}\n\tnewActive := []v1.ObjectReference{}\n\tfor _, j := range sj.Status.Active {\n\t\tif j.UID != uid {\n\t\t\tnewActive = append(newActive, j)\n\t\t}\n\t}\n\tsj.Status.Active = newActive\n}\n\n\/\/ getParentUIDFromJob extracts UID of job's parent and whether it was found\nfunc getParentUIDFromJob(j batchv1.Job) (types.UID, bool) {\n\tcontrollerRef := metav1.GetControllerOf(&j)\n\n\tif controllerRef == nil {\n\t\treturn types.UID(\"\"), false\n\t}\n\n\tif controllerRef.Kind != \"CronJob\" {\n\t\tglog.V(4).Infof(\"Job with non-CronJob parent, name %s namespace %s\", j.Name, j.Namespace)\n\t\treturn types.UID(\"\"), false\n\t}\n\n\treturn controllerRef.UID, true\n}\n\n\/\/ groupJobsByParent groups jobs into a map keyed by the job parent UID (e.g. scheduledJob).\n\/\/ It has no receiver, to facilitate testing.\nfunc groupJobsByParent(js []batchv1.Job) map[types.UID][]batchv1.Job {\n\tjobsBySj := make(map[types.UID][]batchv1.Job)\n\tfor _, job := range js {\n\t\tparentUID, found := getParentUIDFromJob(job)\n\t\tif !found {\n\t\t\tglog.V(4).Infof(\"Unable to get parent uid from job %s in namespace %s\", job.Name, job.Namespace)\n\t\t\tcontinue\n\t\t}\n\t\tjobsBySj[parentUID] = append(jobsBySj[parentUID], job)\n\t}\n\treturn jobsBySj\n}\n\n\/\/ getNextStartTimeAfter gets the latest scheduled start time that is less than \"now\", or an error.\nfunc getNextStartTimeAfter(schedule string, now time.Time) (time.Time, error) {\n\t\/\/ Using robfig\/cron for cron scheduled parsing and next runtime\n\t\/\/ computation. Not using the entire library because:\n\t\/\/ - I want to detect when we missed a runtime due to being down.\n\t\/\/ - How do I set the time such that I can detect the last known runtime?\n\t\/\/ - I guess the functions could launch a go-routine to start the job and\n\t\/\/ then return.\n\t\/\/ How to handle concurrency control.\n\t\/\/ How to detect changes to schedules or deleted schedules and then\n\t\/\/ update the jobs?\n\tsched, err := cron.Parse(schedule)\n\tif err != nil {\n\t\treturn time.Unix(0, 0), fmt.Errorf(\"Unparseable schedule: %s : %s\", schedule, err)\n\t}\n\treturn sched.Next(now), nil\n}\n\n\/\/ getRecentUnmetScheduleTimes gets a slice of times (from oldest to latest) that have passed when a Job should have started but did not.\n\/\/\n\/\/ If there are too many (>100) unstarted times, just give up and return an empty slice.\n\/\/ If there were missed times prior to the last known start time, then those are not returned.\nfunc getRecentUnmetScheduleTimes(sj batchv1beta1.CronJob, now time.Time) ([]time.Time, error) {\n\tstarts := []time.Time{}\n\tsched, err := cron.ParseStandard(sj.Spec.Schedule)\n\tif err != nil {\n\t\treturn starts, fmt.Errorf(\"Unparseable schedule: %s : %s\", sj.Spec.Schedule, err)\n\t}\n\n\tvar earliestTime time.Time\n\tif sj.Status.LastScheduleTime != nil {\n\t\tearliestTime = sj.Status.LastScheduleTime.Time\n\t} else {\n\t\t\/\/ If none found, then this is either a recently created scheduledJob,\n\t\t\/\/ or the active\/completed info was somehow lost (contract for status\n\t\t\/\/ in kubernetes says it may need to be recreated), or that we have\n\t\t\/\/ started a job, but have not noticed it yet (distributed systems can\n\t\t\/\/ have arbitrary delays). In any case, use the creation time of the\n\t\t\/\/ CronJob as last known start time.\n\t\tearliestTime = sj.ObjectMeta.CreationTimestamp.Time\n\t}\n\tif sj.Spec.StartingDeadlineSeconds != nil {\n\t\t\/\/ Controller is not going to schedule anything below this point\n\t\tschedulingDeadline := now.Add(-time.Second * time.Duration(*sj.Spec.StartingDeadlineSeconds))\n\n\t\tif schedulingDeadline.After(earliestTime) {\n\t\t\tearliestTime = schedulingDeadline\n\t\t}\n\t}\n\tif earliestTime.After(now) {\n\t\treturn []time.Time{}, nil\n\t}\n\n\tfor t := sched.Next(earliestTime); !t.After(now); t = sched.Next(t) {\n\t\tstarts = append(starts, t)\n\t\t\/\/ An object might miss several starts. For example, if\n\t\t\/\/ controller gets wedged on friday at 5:01pm when everyone has\n\t\t\/\/ gone home, and someone comes in on tuesday AM and discovers\n\t\t\/\/ the problem and restarts the controller, then all the hourly\n\t\t\/\/ jobs, more than 80 of them for one hourly scheduledJob, should\n\t\t\/\/ all start running with no further intervention (if the scheduledJob\n\t\t\/\/ allows concurrency and late starts).\n\t\t\/\/\n\t\t\/\/ However, if there is a bug somewhere, or incorrect clock\n\t\t\/\/ on controller's server or apiservers (for setting creationTimestamp)\n\t\t\/\/ then there could be so many missed start times (it could be off\n\t\t\/\/ by decades or more), that it would eat up all the CPU and memory\n\t\t\/\/ of this controller. In that case, we want to not try to list\n\t\t\/\/ all the missed start times.\n\t\t\/\/\n\t\t\/\/ I've somewhat arbitrarily picked 100, as more than 80,\n\t\t\/\/ but less than \"lots\".\n\t\tif len(starts) > 100 {\n\t\t\t\/\/ We can't get the most recent times so just return an empty slice\n\t\t\treturn []time.Time{}, fmt.Errorf(\"Too many missed start time (> 100). Set or decrease .spec.startingDeadlineSeconds or check clock skew.\")\n\t\t}\n\t}\n\treturn starts, nil\n}\n\n\/\/ getJobFromTemplate makes a Job from a CronJob\nfunc getJobFromTemplate(sj *batchv1beta1.CronJob, scheduledTime time.Time) (*batchv1.Job, error) {\n\t\/\/ TODO: consider adding the following labels:\n\t\/\/ nominal-start-time=$RFC_3339_DATE_OF_INTENDED_START -- for user convenience\n\t\/\/ scheduled-job-name=$SJ_NAME -- for user convenience\n\tlabels := copyLabels(&sj.Spec.JobTemplate)\n\tannotations := copyAnnotations(&sj.Spec.JobTemplate)\n\t\/\/ We want job names for a given nominal start time to have a deterministic name to avoid the same job being created twice\n\tname := fmt.Sprintf(\"%s-%d\", sj.Name, getTimeHash(scheduledTime))\n\n\tjob := &batchv1.Job{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tLabels: labels,\n\t\t\tAnnotations: annotations,\n\t\t\tName: name,\n\t\t\tOwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(sj, controllerKind)},\n\t\t},\n\t}\n\tif err := legacyscheme.Scheme.Convert(&sj.Spec.JobTemplate.Spec, &job.Spec, nil); err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to convert job template: %v\", err)\n\t}\n\treturn job, nil\n}\n\n\/\/ getTimeHash returns Unix Epoch Time\nfunc getTimeHash(scheduledTime time.Time) int64 {\n\treturn scheduledTime.Unix()\n}\n\n\/\/ makeCreatedByRefJson makes a json string with an object reference for use in \"created-by\" annotation value\nfunc makeCreatedByRefJson(object runtime.Object) (string, error) {\n\tcreatedByRef, err := ref.GetReference(legacyscheme.Scheme, object)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to get controller reference: %v\", err)\n\t}\n\n\t\/\/ TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients\n\t\/\/ would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment.\n\t\/\/ We need to consistently handle this case of annotation versioning.\n\tcodec := legacyscheme.Codecs.LegacyCodec(schema.GroupVersion{Group: v1.GroupName, Version: \"v1\"})\n\n\tcreatedByRefJson, err := runtime.Encode(codec, &v1.SerializedReference{\n\t\tReference: *createdByRef,\n\t})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to serialize controller reference: %v\", err)\n\t}\n\treturn string(createdByRefJson), nil\n}\n\nfunc getFinishedStatus(j *batchv1.Job) (bool, batchv1.JobConditionType) {\n\tfor _, c := range j.Status.Conditions {\n\t\tif (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed) && c.Status == v1.ConditionTrue {\n\t\t\treturn true, c.Type\n\t\t}\n\t}\n\treturn false, \"\"\n}\n\nfunc IsJobFinished(j *batchv1.Job) bool {\n\tisFinished, _ := getFinishedStatus(j)\n\treturn isFinished\n}\n\n\/\/ byJobStartTime sorts a list of jobs by start timestamp, using their names as a tie breaker.\ntype byJobStartTime []batchv1.Job\n\nfunc (o byJobStartTime) Len() int { return len(o) }\nfunc (o byJobStartTime) Swap(i, j int) { o[i], o[j] = o[j], o[i] }\n\nfunc (o byJobStartTime) Less(i, j int) bool {\n\tif o[j].Status.StartTime == nil {\n\t\treturn o[i].Status.StartTime != nil\n\t}\n\n\tif o[i].Status.StartTime.Equal(o[j].Status.StartTime) {\n\t\treturn o[i].Name < o[j].Name\n\t}\n\n\treturn o[i].Status.StartTime.Before(o[j].Status.StartTime)\n}\n<commit_msg>cronjob_remove_getNextStartTimeAfter<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cronjob\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/robfig\/cron\"\n\n\tbatchv1 \"k8s.io\/api\/batch\/v1\"\n\tbatchv1beta1 \"k8s.io\/api\/batch\/v1beta1\"\n\t\"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\t\"k8s.io\/apimachinery\/pkg\/types\"\n\tref \"k8s.io\/client-go\/tools\/reference\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/legacyscheme\"\n)\n\n\/\/ Utilities for dealing with Jobs and CronJobs and time.\n\nfunc inActiveList(sj batchv1beta1.CronJob, uid types.UID) bool {\n\tfor _, j := range sj.Status.Active {\n\t\tif j.UID == uid {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc deleteFromActiveList(sj *batchv1beta1.CronJob, uid types.UID) {\n\tif sj == nil {\n\t\treturn\n\t}\n\tnewActive := []v1.ObjectReference{}\n\tfor _, j := range sj.Status.Active {\n\t\tif j.UID != uid {\n\t\t\tnewActive = append(newActive, j)\n\t\t}\n\t}\n\tsj.Status.Active = newActive\n}\n\n\/\/ getParentUIDFromJob extracts UID of job's parent and whether it was found\nfunc getParentUIDFromJob(j batchv1.Job) (types.UID, bool) {\n\tcontrollerRef := metav1.GetControllerOf(&j)\n\n\tif controllerRef == nil {\n\t\treturn types.UID(\"\"), false\n\t}\n\n\tif controllerRef.Kind != \"CronJob\" {\n\t\tglog.V(4).Infof(\"Job with non-CronJob parent, name %s namespace %s\", j.Name, j.Namespace)\n\t\treturn types.UID(\"\"), false\n\t}\n\n\treturn controllerRef.UID, true\n}\n\n\/\/ groupJobsByParent groups jobs into a map keyed by the job parent UID (e.g. scheduledJob).\n\/\/ It has no receiver, to facilitate testing.\nfunc groupJobsByParent(js []batchv1.Job) map[types.UID][]batchv1.Job {\n\tjobsBySj := make(map[types.UID][]batchv1.Job)\n\tfor _, job := range js {\n\t\tparentUID, found := getParentUIDFromJob(job)\n\t\tif !found {\n\t\t\tglog.V(4).Infof(\"Unable to get parent uid from job %s in namespace %s\", job.Name, job.Namespace)\n\t\t\tcontinue\n\t\t}\n\t\tjobsBySj[parentUID] = append(jobsBySj[parentUID], job)\n\t}\n\treturn jobsBySj\n}\n\n\/\/ getRecentUnmetScheduleTimes gets a slice of times (from oldest to latest) that have passed when a Job should have started but did not.\n\/\/\n\/\/ If there are too many (>100) unstarted times, just give up and return an empty slice.\n\/\/ If there were missed times prior to the last known start time, then those are not returned.\nfunc getRecentUnmetScheduleTimes(sj batchv1beta1.CronJob, now time.Time) ([]time.Time, error) {\n\tstarts := []time.Time{}\n\tsched, err := cron.ParseStandard(sj.Spec.Schedule)\n\tif err != nil {\n\t\treturn starts, fmt.Errorf(\"Unparseable schedule: %s : %s\", sj.Spec.Schedule, err)\n\t}\n\n\tvar earliestTime time.Time\n\tif sj.Status.LastScheduleTime != nil {\n\t\tearliestTime = sj.Status.LastScheduleTime.Time\n\t} else {\n\t\t\/\/ If none found, then this is either a recently created scheduledJob,\n\t\t\/\/ or the active\/completed info was somehow lost (contract for status\n\t\t\/\/ in kubernetes says it may need to be recreated), or that we have\n\t\t\/\/ started a job, but have not noticed it yet (distributed systems can\n\t\t\/\/ have arbitrary delays). In any case, use the creation time of the\n\t\t\/\/ CronJob as last known start time.\n\t\tearliestTime = sj.ObjectMeta.CreationTimestamp.Time\n\t}\n\tif sj.Spec.StartingDeadlineSeconds != nil {\n\t\t\/\/ Controller is not going to schedule anything below this point\n\t\tschedulingDeadline := now.Add(-time.Second * time.Duration(*sj.Spec.StartingDeadlineSeconds))\n\n\t\tif schedulingDeadline.After(earliestTime) {\n\t\t\tearliestTime = schedulingDeadline\n\t\t}\n\t}\n\tif earliestTime.After(now) {\n\t\treturn []time.Time{}, nil\n\t}\n\n\tfor t := sched.Next(earliestTime); !t.After(now); t = sched.Next(t) {\n\t\tstarts = append(starts, t)\n\t\t\/\/ An object might miss several starts. For example, if\n\t\t\/\/ controller gets wedged on friday at 5:01pm when everyone has\n\t\t\/\/ gone home, and someone comes in on tuesday AM and discovers\n\t\t\/\/ the problem and restarts the controller, then all the hourly\n\t\t\/\/ jobs, more than 80 of them for one hourly scheduledJob, should\n\t\t\/\/ all start running with no further intervention (if the scheduledJob\n\t\t\/\/ allows concurrency and late starts).\n\t\t\/\/\n\t\t\/\/ However, if there is a bug somewhere, or incorrect clock\n\t\t\/\/ on controller's server or apiservers (for setting creationTimestamp)\n\t\t\/\/ then there could be so many missed start times (it could be off\n\t\t\/\/ by decades or more), that it would eat up all the CPU and memory\n\t\t\/\/ of this controller. In that case, we want to not try to list\n\t\t\/\/ all the missed start times.\n\t\t\/\/\n\t\t\/\/ I've somewhat arbitrarily picked 100, as more than 80,\n\t\t\/\/ but less than \"lots\".\n\t\tif len(starts) > 100 {\n\t\t\t\/\/ We can't get the most recent times so just return an empty slice\n\t\t\treturn []time.Time{}, fmt.Errorf(\"Too many missed start time (> 100). Set or decrease .spec.startingDeadlineSeconds or check clock skew.\")\n\t\t}\n\t}\n\treturn starts, nil\n}\n\n\/\/ getJobFromTemplate makes a Job from a CronJob\nfunc getJobFromTemplate(sj *batchv1beta1.CronJob, scheduledTime time.Time) (*batchv1.Job, error) {\n\t\/\/ TODO: consider adding the following labels:\n\t\/\/ nominal-start-time=$RFC_3339_DATE_OF_INTENDED_START -- for user convenience\n\t\/\/ scheduled-job-name=$SJ_NAME -- for user convenience\n\tlabels := copyLabels(&sj.Spec.JobTemplate)\n\tannotations := copyAnnotations(&sj.Spec.JobTemplate)\n\t\/\/ We want job names for a given nominal start time to have a deterministic name to avoid the same job being created twice\n\tname := fmt.Sprintf(\"%s-%d\", sj.Name, getTimeHash(scheduledTime))\n\n\tjob := &batchv1.Job{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tLabels: labels,\n\t\t\tAnnotations: annotations,\n\t\t\tName: name,\n\t\t\tOwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(sj, controllerKind)},\n\t\t},\n\t}\n\tif err := legacyscheme.Scheme.Convert(&sj.Spec.JobTemplate.Spec, &job.Spec, nil); err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to convert job template: %v\", err)\n\t}\n\treturn job, nil\n}\n\n\/\/ getTimeHash returns Unix Epoch Time\nfunc getTimeHash(scheduledTime time.Time) int64 {\n\treturn scheduledTime.Unix()\n}\n\n\/\/ makeCreatedByRefJson makes a json string with an object reference for use in \"created-by\" annotation value\nfunc makeCreatedByRefJson(object runtime.Object) (string, error) {\n\tcreatedByRef, err := ref.GetReference(legacyscheme.Scheme, object)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to get controller reference: %v\", err)\n\t}\n\n\t\/\/ TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients\n\t\/\/ would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment.\n\t\/\/ We need to consistently handle this case of annotation versioning.\n\tcodec := legacyscheme.Codecs.LegacyCodec(schema.GroupVersion{Group: v1.GroupName, Version: \"v1\"})\n\n\tcreatedByRefJson, err := runtime.Encode(codec, &v1.SerializedReference{\n\t\tReference: *createdByRef,\n\t})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to serialize controller reference: %v\", err)\n\t}\n\treturn string(createdByRefJson), nil\n}\n\nfunc getFinishedStatus(j *batchv1.Job) (bool, batchv1.JobConditionType) {\n\tfor _, c := range j.Status.Conditions {\n\t\tif (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed) && c.Status == v1.ConditionTrue {\n\t\t\treturn true, c.Type\n\t\t}\n\t}\n\treturn false, \"\"\n}\n\nfunc IsJobFinished(j *batchv1.Job) bool {\n\tisFinished, _ := getFinishedStatus(j)\n\treturn isFinished\n}\n\n\/\/ byJobStartTime sorts a list of jobs by start timestamp, using their names as a tie breaker.\ntype byJobStartTime []batchv1.Job\n\nfunc (o byJobStartTime) Len() int { return len(o) }\nfunc (o byJobStartTime) Swap(i, j int) { o[i], o[j] = o[j], o[i] }\n\nfunc (o byJobStartTime) Less(i, j int) bool {\n\tif o[j].Status.StartTime == nil {\n\t\treturn o[i].Status.StartTime != nil\n\t}\n\n\tif o[i].Status.StartTime.Equal(o[j].Status.StartTime) {\n\t\treturn o[i].Name < o[j].Name\n\t}\n\n\treturn o[i].Status.StartTime.Before(o[j].Status.StartTime)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2013 ActiveState Software Inc. All rights reserved.\n\npackage watch\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/ActiveState\/tail\/util\"\n\t\"gopkg.in\/fsnotify.v0\"\n\t\"gopkg.in\/tomb.v1\"\n)\n\n\/\/ InotifyFileWatcher uses inotify to monitor file changes.\ntype InotifyFileWatcher struct {\n\tFilename string\n\tSize int64\n\tw *fsnotify.Watcher\n}\n\nfunc NewInotifyFileWatcher(filename string, w *fsnotify.Watcher) *InotifyFileWatcher {\n\tfw := &InotifyFileWatcher{filename, 0, w}\n\treturn fw\n}\n\nfunc (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error {\n\tdirname := filepath.Dir(fw.Filename)\n\n\t\/\/ Watch for new files to be created in the parent directory.\n\terr := fw.w.WatchFlags(dirname, fsnotify.FSN_CREATE)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer fw.w.RemoveWatch(dirname)\n\n\t\/\/ Do a real check now as the file might have been created before\n\t\/\/ calling `WatchFlags` above.\n\tif _, err = os.Stat(fw.Filename); !os.IsNotExist(err) {\n\t\t\/\/ file exists, or stat returned an error.\n\t\treturn err\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase evt, ok := <-fw.w.Event:\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"inotify watcher has been closed\")\n\t\t\t} else if filepath.Base(evt.Name) == filepath.Base(fw.Filename) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase <-t.Dying():\n\t\t\treturn tomb.ErrDying\n\t\t}\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, fi os.FileInfo) *FileChanges {\n\tchanges := NewFileChanges()\n\n\terr := fw.w.Watch(fw.Filename)\n\tif err != nil {\n\t\tutil.Fatal(\"Error watching %v: %v\", fw.Filename, err)\n\t}\n\n\tfw.Size = fi.Size()\n\n\tgo func() {\n\t\tdefer fw.w.RemoveWatch(fw.Filename)\n\t\tdefer changes.Close()\n\n\t\tfor {\n\t\t\tprevSize := fw.Size\n\n\t\t\tvar evt *fsnotify.FileEvent\n\t\t\tvar ok bool\n\n\t\t\tselect {\n\t\t\tcase evt, ok = <-fw.w.Event:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase <-t.Dying():\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tswitch {\n\t\t\tcase evt.IsDelete():\n\t\t\t\tfallthrough\n\n\t\t\tcase evt.IsRename():\n\t\t\t\tchanges.NotifyDeleted()\n\t\t\t\treturn\n\n\t\t\tcase evt.IsModify():\n\t\t\t\tfi, err := os.Stat(fw.Filename)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif os.IsNotExist(err) {\n\t\t\t\t\t\tchanges.NotifyDeleted()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ XXX: report this error back to the user\n\t\t\t\t\tutil.Fatal(\"Failed to stat file %v: %v\", fw.Filename, err)\n\t\t\t\t}\n\t\t\t\tfw.Size = fi.Size()\n\n\t\t\t\tif prevSize > 0 && prevSize > fw.Size {\n\t\t\t\t\tchanges.NotifyTruncated()\n\t\t\t\t} else {\n\t\t\t\t\tchanges.NotifyModified()\n\t\t\t\t}\n\t\t\t\tprevSize = fw.Size\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn changes\n}\n<commit_msg>Watch the *directory* for file deletions instead of the file itself.<commit_after>\/\/ Copyright (c) 2013 ActiveState Software Inc. All rights reserved.\n\npackage watch\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/ActiveState\/tail\/util\"\n\t\"gopkg.in\/fsnotify.v0\"\n\t\"gopkg.in\/tomb.v1\"\n)\n\n\/\/ InotifyFileWatcher uses inotify to monitor file changes.\ntype InotifyFileWatcher struct {\n\tFilename string\n\tSize int64\n\tw *fsnotify.Watcher\n}\n\nfunc NewInotifyFileWatcher(filename string, w *fsnotify.Watcher) *InotifyFileWatcher {\n\tfw := &InotifyFileWatcher{filename, 0, w}\n\treturn fw\n}\n\nfunc (fw *InotifyFileWatcher) BlockUntilExists(t *tomb.Tomb) error {\n\tdirname := filepath.Dir(fw.Filename)\n\n\t\/\/ Watch for new files to be created in the parent directory.\n\terr := fw.w.WatchFlags(dirname, fsnotify.FSN_CREATE)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer fw.w.RemoveWatch(dirname)\n\n\t\/\/ Do a real check now as the file might have been created before\n\t\/\/ calling `WatchFlags` above.\n\tif _, err = os.Stat(fw.Filename); !os.IsNotExist(err) {\n\t\t\/\/ file exists, or stat returned an error.\n\t\treturn err\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase evt, ok := <-fw.w.Event:\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"inotify watcher has been closed\")\n\t\t\t} else if filepath.Base(evt.Name) == filepath.Base(fw.Filename) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase <-t.Dying():\n\t\t\treturn tomb.ErrDying\n\t\t}\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc (fw *InotifyFileWatcher) ChangeEvents(t *tomb.Tomb, fi os.FileInfo) *FileChanges {\n\tchanges := NewFileChanges()\n\n\tif err := fw.w.Watch(fw.Filename); err != nil {\n\t\tutil.Fatal(\"Error watching %v: %v\", fw.Filename, err)\n\t}\n\n\t\/\/ Watch the directory to be notified when the file is deleted since the file\n\t\/\/ watch is on the inode, not the path.\n\tdirname := filepath.Dir(fw.Filename)\n\tif err := fw.w.WatchFlags(dirname, fsnotify.FSN_DELETE); err != nil {\n\t\tutil.Fatal(\"Error watching %v: %v\", dirname, err)\n\t}\n\n\tfw.Size = fi.Size()\n\n\tgo func() {\n\t\tdefer fw.w.RemoveWatch(fw.Filename)\n\t\tdefer fw.w.RemoveWatch(dirname)\n\t\tdefer changes.Close()\n\n\t\tfor {\n\t\t\tprevSize := fw.Size\n\n\t\t\tvar evt *fsnotify.FileEvent\n\t\t\tvar ok bool\n\n\t\t\tselect {\n\t\t\tcase evt, ok = <-fw.w.Event:\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase <-t.Dying():\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tswitch {\n\t\t\tcase evt.IsDelete():\n\t\t\t\tif filepath.Base(evt.Name) != filepath.Base(fw.Filename) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfallthrough\n\n\t\t\tcase evt.IsRename():\n\t\t\t\tchanges.NotifyDeleted()\n\t\t\t\treturn\n\n\t\t\tcase evt.IsModify():\n\t\t\t\tfi, err := os.Stat(fw.Filename)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif os.IsNotExist(err) {\n\t\t\t\t\t\tchanges.NotifyDeleted()\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ XXX: report this error back to the user\n\t\t\t\t\tutil.Fatal(\"Failed to stat file %v: %v\", fw.Filename, err)\n\t\t\t\t}\n\t\t\t\tfw.Size = fi.Size()\n\n\t\t\t\tif prevSize > 0 && prevSize > fw.Size {\n\t\t\t\t\tchanges.NotifyTruncated()\n\t\t\t\t} else {\n\t\t\t\t\tchanges.NotifyModified()\n\t\t\t\t}\n\t\t\t\tprevSize = fw.Size\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn changes\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage elliptic\n\nimport (\n\t\"crypto\/elliptic\/internal\/fiat\"\n\t\"math\/big\"\n)\n\ntype p521Curve struct {\n\t*CurveParams\n}\n\nvar p521 p521Curve\nvar p521Params *CurveParams\n\nfunc initP521() {\n\t\/\/ See FIPS 186-3, section D.2.5\n\tp521.CurveParams = &CurveParams{Name: \"P-521\"}\n\tp521.P, _ = new(big.Int).SetString(\"6864797660130609714981900799081393217269435300143305409394463459185543183397656052122559640661454554977296311391480858037121987999716643812574028291115057151\", 10)\n\tp521.N, _ = new(big.Int).SetString(\"6864797660130609714981900799081393217269435300143305409394463459185543183397655394245057746333217197532963996371363321113864768612440380340372808892707005449\", 10)\n\tp521.B, _ = new(big.Int).SetString(\"051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00\", 16)\n\tp521.Gx, _ = new(big.Int).SetString(\"c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66\", 16)\n\tp521.Gy, _ = new(big.Int).SetString(\"11839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650\", 16)\n\tp521.BitSize = 521\n}\n\nfunc (curve p521Curve) Params() *CurveParams {\n\treturn curve.CurveParams\n}\n\nfunc (curve p521Curve) IsOnCurve(x, y *big.Int) bool {\n\tx1 := bigIntToFiatP521(x)\n\ty1 := bigIntToFiatP521(y)\n\tb := bigIntToFiatP521(curve.B) \/\/ TODO: precompute this value.\n\n\t\/\/ x³ - 3x + b.\n\tx3 := new(fiat.P521Element).Square(x1)\n\tx3.Mul(x3, x1)\n\n\tthreeX := new(fiat.P521Element).Add(x1, x1)\n\tthreeX.Add(threeX, x1)\n\n\tx3.Sub(x3, threeX)\n\tx3.Add(x3, b)\n\n\t\/\/ y² = x³ - 3x + b\n\ty2 := new(fiat.P521Element).Square(y1)\n\n\treturn x3.Equal(y2) == 1\n}\n\ntype p512Point struct {\n\tx, y, z *fiat.P521Element\n}\n\nfunc fiatP521ToBigInt(x *fiat.P521Element) *big.Int {\n\txBytes := x.Bytes()\n\tfor i := range xBytes[:len(xBytes)\/2] {\n\t\txBytes[i], xBytes[len(xBytes)-i-1] = xBytes[len(xBytes)-i-1], xBytes[i]\n\t}\n\treturn new(big.Int).SetBytes(xBytes)\n}\n\n\/\/ affineFromJacobian brings a point in Jacobian coordinates back to affine\n\/\/ coordinates, with (0, 0) representing infinity by convention. It also goes\n\/\/ back to big.Int values to match the exposed API.\nfunc (curve p521Curve) affineFromJacobian(p *p512Point) (x, y *big.Int) {\n\tif p.z.IsZero() == 1 {\n\t\treturn new(big.Int), new(big.Int)\n\t}\n\n\tzinv := new(fiat.P521Element).Invert(p.z)\n\tzinvsq := new(fiat.P521Element).Mul(zinv, zinv)\n\n\txx := new(fiat.P521Element).Mul(p.x, zinvsq)\n\tzinvsq.Mul(zinvsq, zinv)\n\tyy := new(fiat.P521Element).Mul(p.y, zinvsq)\n\n\treturn fiatP521ToBigInt(xx), fiatP521ToBigInt(yy)\n}\n\nfunc bigIntToFiatP521(x *big.Int) *fiat.P521Element {\n\txBytes := new(big.Int).Mod(x, p521.P).FillBytes(make([]byte, 66))\n\tfor i := range xBytes[:len(xBytes)\/2] {\n\t\txBytes[i], xBytes[len(xBytes)-i-1] = xBytes[len(xBytes)-i-1], xBytes[i]\n\t}\n\tx1, err := new(fiat.P521Element).SetBytes(xBytes)\n\tif err != nil {\n\t\t\/\/ The input is reduced modulo P and encoded in a fixed size bytes\n\t\t\/\/ slice, this should be impossible.\n\t\tpanic(\"internal error: bigIntToFiatP521\")\n\t}\n\treturn x1\n}\n\n\/\/ jacobianFromAffine converts (x, y) affine coordinates into (x, y, z) Jacobian\n\/\/ coordinates. It also converts from big.Int to fiat, which is necessarily a\n\/\/ messy and variable-time operation, which we can't avoid due to the exposed API.\nfunc (curve p521Curve) jacobianFromAffine(x, y *big.Int) *p512Point {\n\t\/\/ (0, 0) is by convention the point at infinity, which can't be represented\n\t\/\/ in affine coordinates, but is (0, 0, 0) in Jacobian.\n\tif x.Sign() == 0 && y.Sign() == 0 {\n\t\treturn &p512Point{\n\t\t\tx: new(fiat.P521Element),\n\t\t\ty: new(fiat.P521Element),\n\t\t\tz: new(fiat.P521Element),\n\t\t}\n\t}\n\treturn &p512Point{\n\t\tx: bigIntToFiatP521(x),\n\t\ty: bigIntToFiatP521(y),\n\t\tz: new(fiat.P521Element).One(),\n\t}\n}\n\nfunc (curve p521Curve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {\n\tp1 := curve.jacobianFromAffine(x1, y1)\n\tp2 := curve.jacobianFromAffine(x2, y2)\n\treturn curve.affineFromJacobian(p1.addJacobian(p1, p2))\n}\n\n\/\/ addJacobian sets q = p1 + p2, and returns q. The points may overlap.\nfunc (q *p512Point) addJacobian(p1, p2 *p512Point) *p512Point {\n\t\/\/ https:\/\/hyperelliptic.org\/EFD\/g1p\/auto-shortw-jacobian-3.html#addition-add-2007-bl\n\tif p1.z.IsZero() == 1 {\n\t\tq.x.Set(p2.x)\n\t\tq.y.Set(p2.y)\n\t\tq.z.Set(p2.z)\n\t\treturn q\n\t}\n\tif p2.z.IsZero() == 1 {\n\t\tq.x.Set(p1.x)\n\t\tq.y.Set(p1.y)\n\t\tq.z.Set(p1.z)\n\t\treturn q\n\t}\n\n\tz1z1 := new(fiat.P521Element).Square(p1.z)\n\tz2z2 := new(fiat.P521Element).Square(p2.z)\n\n\tu1 := new(fiat.P521Element).Mul(p1.x, z2z2)\n\tu2 := new(fiat.P521Element).Mul(p2.x, z1z1)\n\th := new(fiat.P521Element).Sub(u2, u1)\n\txEqual := h.IsZero() == 1\n\ti := new(fiat.P521Element).Add(h, h)\n\ti.Square(i)\n\tj := new(fiat.P521Element).Mul(h, i)\n\n\ts1 := new(fiat.P521Element).Mul(p1.y, p2.z)\n\ts1.Mul(s1, z2z2)\n\ts2 := new(fiat.P521Element).Mul(p2.y, p1.z)\n\ts2.Mul(s2, z1z1)\n\tr := new(fiat.P521Element).Sub(s2, s1)\n\tyEqual := r.IsZero() == 1\n\tif xEqual && yEqual {\n\t\treturn q.doubleJacobian(p1)\n\t}\n\tr.Add(r, r)\n\tv := new(fiat.P521Element).Mul(u1, i)\n\n\tq.x.Set(r)\n\tq.x.Square(q.x)\n\tq.x.Sub(q.x, j)\n\tq.x.Sub(q.x, v)\n\tq.x.Sub(q.x, v)\n\n\tq.y.Set(r)\n\tv.Sub(v, q.x)\n\tq.y.Mul(q.y, v)\n\ts1.Mul(s1, j)\n\ts1.Add(s1, s1)\n\tq.y.Sub(q.y, s1)\n\n\tq.z.Add(p1.z, p2.z)\n\tq.z.Square(q.z)\n\tq.z.Sub(q.z, z1z1)\n\tq.z.Sub(q.z, z2z2)\n\tq.z.Mul(q.z, h)\n\n\treturn q\n}\n\nfunc (curve p521Curve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {\n\tp := curve.jacobianFromAffine(x1, y1)\n\treturn curve.affineFromJacobian(p.doubleJacobian(p))\n}\n\n\/\/ doubleJacobian sets q = p + p, and returns q. The points may overlap.\nfunc (q *p512Point) doubleJacobian(p *p512Point) *p512Point {\n\t\/\/ https:\/\/hyperelliptic.org\/EFD\/g1p\/auto-shortw-jacobian-3.html#doubling-dbl-2001-b\n\tdelta := new(fiat.P521Element).Square(p.z)\n\tgamma := new(fiat.P521Element).Square(p.y)\n\talpha := new(fiat.P521Element).Sub(p.x, delta)\n\talpha2 := new(fiat.P521Element).Add(p.x, delta)\n\talpha.Mul(alpha, alpha2)\n\talpha2.Set(alpha)\n\talpha.Add(alpha, alpha)\n\talpha.Add(alpha, alpha2)\n\n\tbeta := alpha2.Mul(p.x, gamma)\n\n\tq.x.Square(alpha)\n\tbeta8 := new(fiat.P521Element).Add(beta, beta)\n\tbeta8.Add(beta8, beta8)\n\tbeta8.Add(beta8, beta8)\n\tq.x.Sub(q.x, beta8)\n\n\tq.z.Add(p.y, p.z)\n\tq.z.Square(q.z)\n\tq.z.Sub(q.z, gamma)\n\tq.z.Sub(q.z, delta)\n\n\tbeta.Add(beta, beta)\n\tbeta.Add(beta, beta)\n\tbeta.Sub(beta, q.x)\n\tq.y.Mul(alpha, beta)\n\n\tgamma.Square(gamma)\n\tgamma.Add(gamma, gamma)\n\tgamma.Add(gamma, gamma)\n\tgamma.Add(gamma, gamma)\n\n\tq.y.Sub(q.y, gamma)\n\n\treturn q\n}\n\nfunc (curve p521Curve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) {\n\tB := curve.jacobianFromAffine(Bx, By)\n\tp := &p512Point{\n\t\tx: new(fiat.P521Element),\n\t\ty: new(fiat.P521Element),\n\t\tz: new(fiat.P521Element),\n\t}\n\n\tfor _, byte := range k {\n\t\tfor bitNum := 0; bitNum < 8; bitNum++ {\n\t\t\tp.doubleJacobian(p)\n\t\t\tif byte&0x80 == 0x80 {\n\t\t\t\tp.addJacobian(B, p)\n\t\t\t}\n\t\t\tbyte <<= 1\n\t\t}\n\t}\n\n\treturn curve.affineFromJacobian(p)\n}\n\nfunc (curve p521Curve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {\n\treturn curve.ScalarMult(curve.Gx, curve.Gy, k)\n}\n<commit_msg>crypto\/elliptic: make P-521 scalar multiplication constant time<commit_after>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage elliptic\n\nimport (\n\t\"crypto\/elliptic\/internal\/fiat\"\n\t\"math\/big\"\n)\n\ntype p521Curve struct {\n\t*CurveParams\n}\n\nvar p521 p521Curve\nvar p521Params *CurveParams\n\nfunc initP521() {\n\t\/\/ See FIPS 186-3, section D.2.5\n\tp521.CurveParams = &CurveParams{Name: \"P-521\"}\n\tp521.P, _ = new(big.Int).SetString(\"6864797660130609714981900799081393217269435300143305409394463459185543183397656052122559640661454554977296311391480858037121987999716643812574028291115057151\", 10)\n\tp521.N, _ = new(big.Int).SetString(\"6864797660130609714981900799081393217269435300143305409394463459185543183397655394245057746333217197532963996371363321113864768612440380340372808892707005449\", 10)\n\tp521.B, _ = new(big.Int).SetString(\"051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00\", 16)\n\tp521.Gx, _ = new(big.Int).SetString(\"c6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66\", 16)\n\tp521.Gy, _ = new(big.Int).SetString(\"11839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650\", 16)\n\tp521.BitSize = 521\n}\n\nfunc (curve p521Curve) Params() *CurveParams {\n\treturn curve.CurveParams\n}\n\nfunc (curve p521Curve) IsOnCurve(x, y *big.Int) bool {\n\tx1 := bigIntToFiatP521(x)\n\ty1 := bigIntToFiatP521(y)\n\tb := bigIntToFiatP521(curve.B) \/\/ TODO: precompute this value.\n\n\t\/\/ x³ - 3x + b.\n\tx3 := new(fiat.P521Element).Square(x1)\n\tx3.Mul(x3, x1)\n\n\tthreeX := new(fiat.P521Element).Add(x1, x1)\n\tthreeX.Add(threeX, x1)\n\n\tx3.Sub(x3, threeX)\n\tx3.Add(x3, b)\n\n\t\/\/ y² = x³ - 3x + b\n\ty2 := new(fiat.P521Element).Square(y1)\n\n\treturn x3.Equal(y2) == 1\n}\n\ntype p512Point struct {\n\tx, y, z *fiat.P521Element\n}\n\nfunc fiatP521ToBigInt(x *fiat.P521Element) *big.Int {\n\txBytes := x.Bytes()\n\tfor i := range xBytes[:len(xBytes)\/2] {\n\t\txBytes[i], xBytes[len(xBytes)-i-1] = xBytes[len(xBytes)-i-1], xBytes[i]\n\t}\n\treturn new(big.Int).SetBytes(xBytes)\n}\n\n\/\/ affineFromJacobian brings a point in Jacobian coordinates back to affine\n\/\/ coordinates, with (0, 0) representing infinity by convention. It also goes\n\/\/ back to big.Int values to match the exposed API.\nfunc (curve p521Curve) affineFromJacobian(p *p512Point) (x, y *big.Int) {\n\tif p.z.IsZero() == 1 {\n\t\treturn new(big.Int), new(big.Int)\n\t}\n\n\tzinv := new(fiat.P521Element).Invert(p.z)\n\tzinvsq := new(fiat.P521Element).Mul(zinv, zinv)\n\n\txx := new(fiat.P521Element).Mul(p.x, zinvsq)\n\tzinvsq.Mul(zinvsq, zinv)\n\tyy := new(fiat.P521Element).Mul(p.y, zinvsq)\n\n\treturn fiatP521ToBigInt(xx), fiatP521ToBigInt(yy)\n}\n\nfunc bigIntToFiatP521(x *big.Int) *fiat.P521Element {\n\txBytes := new(big.Int).Mod(x, p521.P).FillBytes(make([]byte, 66))\n\tfor i := range xBytes[:len(xBytes)\/2] {\n\t\txBytes[i], xBytes[len(xBytes)-i-1] = xBytes[len(xBytes)-i-1], xBytes[i]\n\t}\n\tx1, err := new(fiat.P521Element).SetBytes(xBytes)\n\tif err != nil {\n\t\t\/\/ The input is reduced modulo P and encoded in a fixed size bytes\n\t\t\/\/ slice, this should be impossible.\n\t\tpanic(\"internal error: bigIntToFiatP521\")\n\t}\n\treturn x1\n}\n\n\/\/ jacobianFromAffine converts (x, y) affine coordinates into (x, y, z) Jacobian\n\/\/ coordinates. It also converts from big.Int to fiat, which is necessarily a\n\/\/ messy and variable-time operation, which we can't avoid due to the exposed API.\nfunc (curve p521Curve) jacobianFromAffine(x, y *big.Int) *p512Point {\n\t\/\/ (0, 0) is by convention the point at infinity, which can't be represented\n\t\/\/ in affine coordinates, but is (0, 0, 0) in Jacobian.\n\tif x.Sign() == 0 && y.Sign() == 0 {\n\t\treturn &p512Point{\n\t\t\tx: new(fiat.P521Element),\n\t\t\ty: new(fiat.P521Element),\n\t\t\tz: new(fiat.P521Element),\n\t\t}\n\t}\n\treturn &p512Point{\n\t\tx: bigIntToFiatP521(x),\n\t\ty: bigIntToFiatP521(y),\n\t\tz: new(fiat.P521Element).One(),\n\t}\n}\n\nfunc (curve p521Curve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) {\n\tp1 := curve.jacobianFromAffine(x1, y1)\n\tp2 := curve.jacobianFromAffine(x2, y2)\n\treturn curve.affineFromJacobian(p1.addJacobian(p1, p2))\n}\n\n\/\/ addJacobian sets q = p1 + p2, and returns q. The points may overlap.\nfunc (q *p512Point) addJacobian(p1, p2 *p512Point) *p512Point {\n\t\/\/ https:\/\/hyperelliptic.org\/EFD\/g1p\/auto-shortw-jacobian-3.html#addition-add-2007-bl\n\tz1IsZero := p1.z.IsZero()\n\tz2IsZero := p2.z.IsZero()\n\n\tz1z1 := new(fiat.P521Element).Square(p1.z)\n\tz2z2 := new(fiat.P521Element).Square(p2.z)\n\n\tu1 := new(fiat.P521Element).Mul(p1.x, z2z2)\n\tu2 := new(fiat.P521Element).Mul(p2.x, z1z1)\n\th := new(fiat.P521Element).Sub(u2, u1)\n\txEqual := h.IsZero() == 1\n\ti := new(fiat.P521Element).Add(h, h)\n\ti.Square(i)\n\tj := new(fiat.P521Element).Mul(h, i)\n\n\ts1 := new(fiat.P521Element).Mul(p1.y, p2.z)\n\ts1.Mul(s1, z2z2)\n\ts2 := new(fiat.P521Element).Mul(p2.y, p1.z)\n\ts2.Mul(s2, z1z1)\n\tr := new(fiat.P521Element).Sub(s2, s1)\n\tyEqual := r.IsZero() == 1\n\tif xEqual && yEqual && z1IsZero == 0 && z2IsZero == 0 {\n\t\treturn q.doubleJacobian(p1)\n\t}\n\tr.Add(r, r)\n\tv := new(fiat.P521Element).Mul(u1, i)\n\n\tx := new(fiat.P521Element).Set(r)\n\tx.Square(x)\n\tx.Sub(x, j)\n\tx.Sub(x, v)\n\tx.Sub(x, v)\n\n\ty := new(fiat.P521Element).Set(r)\n\tv.Sub(v, x)\n\ty.Mul(y, v)\n\ts1.Mul(s1, j)\n\ts1.Add(s1, s1)\n\ty.Sub(y, s1)\n\n\tz := new(fiat.P521Element).Add(p1.z, p2.z)\n\tz.Square(z)\n\tz.Sub(z, z1z1)\n\tz.Sub(z, z2z2)\n\tz.Mul(z, h)\n\n\tx.Select(p2.x, x, z1IsZero)\n\tx.Select(p1.x, x, z2IsZero)\n\ty.Select(p2.y, y, z1IsZero)\n\ty.Select(p1.y, y, z2IsZero)\n\tz.Select(p2.z, z, z1IsZero)\n\tz.Select(p1.z, z, z2IsZero)\n\n\tq.x.Set(x)\n\tq.y.Set(y)\n\tq.z.Set(z)\n\treturn q\n}\n\nfunc (curve p521Curve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) {\n\tp := curve.jacobianFromAffine(x1, y1)\n\treturn curve.affineFromJacobian(p.doubleJacobian(p))\n}\n\n\/\/ doubleJacobian sets q = p + p, and returns q. The points may overlap.\nfunc (q *p512Point) doubleJacobian(p *p512Point) *p512Point {\n\t\/\/ https:\/\/hyperelliptic.org\/EFD\/g1p\/auto-shortw-jacobian-3.html#doubling-dbl-2001-b\n\tdelta := new(fiat.P521Element).Square(p.z)\n\tgamma := new(fiat.P521Element).Square(p.y)\n\talpha := new(fiat.P521Element).Sub(p.x, delta)\n\talpha2 := new(fiat.P521Element).Add(p.x, delta)\n\talpha.Mul(alpha, alpha2)\n\talpha2.Set(alpha)\n\talpha.Add(alpha, alpha)\n\talpha.Add(alpha, alpha2)\n\n\tbeta := alpha2.Mul(p.x, gamma)\n\n\tq.x.Square(alpha)\n\tbeta8 := new(fiat.P521Element).Add(beta, beta)\n\tbeta8.Add(beta8, beta8)\n\tbeta8.Add(beta8, beta8)\n\tq.x.Sub(q.x, beta8)\n\n\tq.z.Add(p.y, p.z)\n\tq.z.Square(q.z)\n\tq.z.Sub(q.z, gamma)\n\tq.z.Sub(q.z, delta)\n\n\tbeta.Add(beta, beta)\n\tbeta.Add(beta, beta)\n\tbeta.Sub(beta, q.x)\n\tq.y.Mul(alpha, beta)\n\n\tgamma.Square(gamma)\n\tgamma.Add(gamma, gamma)\n\tgamma.Add(gamma, gamma)\n\tgamma.Add(gamma, gamma)\n\n\tq.y.Sub(q.y, gamma)\n\n\treturn q\n}\n\nfunc (curve p521Curve) ScalarMult(Bx, By *big.Int, scalar []byte) (*big.Int, *big.Int) {\n\tB := curve.jacobianFromAffine(Bx, By)\n\tp, t := &p512Point{\n\t\tx: new(fiat.P521Element),\n\t\ty: new(fiat.P521Element),\n\t\tz: new(fiat.P521Element),\n\t}, &p512Point{\n\t\tx: new(fiat.P521Element),\n\t\ty: new(fiat.P521Element),\n\t\tz: new(fiat.P521Element),\n\t}\n\n\tfor _, byte := range scalar {\n\t\tfor bitNum := 0; bitNum < 8; bitNum++ {\n\t\t\tp.doubleJacobian(p)\n\t\t\tbit := (byte >> (7 - bitNum)) & 1\n\t\t\tt.addJacobian(p, B)\n\t\t\tp.x.Select(t.x, p.x, int(bit))\n\t\t\tp.y.Select(t.y, p.y, int(bit))\n\t\t\tp.z.Select(t.z, p.z, int(bit))\n\t\t}\n\t}\n\n\treturn curve.affineFromJacobian(p)\n}\n\nfunc (curve p521Curve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) {\n\treturn curve.ScalarMult(curve.Gx, curve.Gy, k)\n}\n<|endoftext|>"} {"text":"<commit_before>package skunk\n\nimport (\n\t\"encoding\/json\"\n\t\"math\"\n\t\"strconv\"\n\t\"time\"\n)\n\n\/\/ Body represents the POSTed body of a NewRelic plugin's metrics data.\ntype Body struct {\n\tAgent AgentRep `json:\"agent\"`\n\tComponents []*Component `json:\"components\"`\n}\n\n\/\/ AgentRep describes a NewRelic agent.\ntype AgentRep struct {\n\tHost string `json:\"host\"`\n\t\/\/ PID zero is treated as an erroneous\/nonexistent PID. If you're shoving NewRelic into a Go-based scheduler in\n\t\/\/ the kernel, I guess you could open an issue for this. Otherwise, this seems reasonable to me.\n\tPID int `json:\"pid,omitempty\"`\n\tVersion string `json:\"version\"`\n}\n\n\/\/ Seconds is a convenience wrapper around a time.Duration to allow any duration to be used when describing the duration\n\/\/ of components' metrics.\ntype Seconds struct{ time.Duration }\n\nfunc (s Seconds) MarshalJSON() ([]byte, error) {\n\tvar i int64\n\tif int, frac := math.Modf(s.Seconds()); frac >= 0.5 {\n\t\ti = int64(int) + 1\n\t} else {\n\t\ti = int64(int)\n\t}\n\treturn strconv.AppendInt(nil, i, 10), nil\n}\n\n\/\/ Component describes a component in a NewRelic agent. It must contain a minimum of at least one metric, otherwise the\n\/\/ component is culled from its parent Body before constructing a JSON payload. All fields of the Component are\n\/\/ read-only once initialized.\ntype Component struct {\n\tName string `json:\"name\"`\n\tGUID string `json:\"guid\"`\n\t\/\/ Duration is the time elapsed, in seconds, for this snapshot of the component. The duration is rounded to the\n\t\/\/ nearest second. This is only used when constructing a payload using a copy of a Component.\n\tDuration Seconds `json:\"duration\"`\n\tMetrics map[string]Metric `json:\"metrics\"`\n\n\t\/\/ start is the time that the first metric was recorded. If start.IsZero is true, the time needs to be set to\n\t\/\/ the current time once a metric is added. The start time is cleared upon an agent successfully sending\n\t\/\/ a payload to NewRelic.\n\tstart time.Time\n\n\t\/\/ agent is a pointer to the Agent that owns this component.\n\tagent *Agent\n}\n\n\/\/ AddMetric adds a single metric to the Component. If the metric already exists by name in the Component, the value is\n\/\/ added to the existing metric, otherwise the metric is added as a ScalarMetric.\nfunc (c *Component) AddMetric(name string, value float64) {\n\tc.agent.ops <- func(*Agent) error {\n\t\tif m, ok := c.Metrics[name]; ok {\n\t\t\tc.Metrics[name] = m.Add(value)\n\t\t} else {\n\t\t\tc.Metrics[name] = ScalarMetric(value)\n\t\t}\n\n\t\t\/\/ Set the time over which the metric was gathered.\n\t\tif c.start.IsZero() {\n\t\t\tc.start = time.Now()\n\t\t\tc.Duration = Seconds{0}\n\t\t} else {\n\t\t\tc.Duration.Duration = time.Since(c.start)\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\n\/\/ Metric describes any metric that can have an additional value added to it. All metrics must be marshallable as JSON,\n\/\/ but are not required to implement MarshalJSON (e.g., RangeMetric).\n\/\/\n\/\/ The Add method of a Metric is used to get the result of adding an additional value to a metric. Metrics themselves\n\/\/ should be considered immutable, so the result must be a new Metric.\ntype Metric interface {\n\tAdd(value float64) Metric\n}\n\n\/\/ ScalarMetric is any singular metric that does not cover a range a values. Adding to a ScalarMetric produces\n\/\/ a RangeMetric.\ntype ScalarMetric float64\n\nfunc (s ScalarMetric) Add(value float64) Metric {\n\tf := float64(s)\n\treturn RangeMetric{\n\t\tTotal: f + value,\n\t\tCount: 2,\n\t\tMin: math.Min(value, f),\n\t\tMax: math.Max(value, f),\n\t\tSquare: math.Pow(f, 2) + math.Pow(value, 2),\n\t}\n}\n\nfunc (s ScalarMetric) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(float64(s))\n}\n\n\/\/ RangeMetric is any metric that covers a range of values. Adding to a RangeMetric produces a new RangeMetric.\ntype RangeMetric struct {\n\tTotal float64 `json:\"total\"`\n\tCount int `json:\"count\"`\n\tMin float64 `json:\"min\"`\n\tMax float64 `json:\"max\"`\n\t\/\/ Square is the sum of squares of all values recorded for the metric. This is simply A₁² + A₂² + Aₙ² where A is\n\t\/\/ the set of numbers recorded for this metric.\n\tSquare float64 `json:\"sum_of_squares\"`\n}\n\nfunc (r RangeMetric) Add(value float64) Metric {\n\treturn RangeMetric{\n\t\tTotal: r.Total + value,\n\t\tCount: r.Count + 1,\n\t\tMin: math.Min(value, r.Min),\n\t\tMax: math.Max(value, r.Max),\n\t\tSquare: r.Square + math.Pow(value, 2),\n\t}\n}\n<commit_msg>Add metric merging to components<commit_after>package skunk\n\nimport (\n\t\"encoding\/json\"\n\t\"math\"\n\t\"strconv\"\n\t\"time\"\n)\n\n\/\/ Body represents the POSTed body of a NewRelic plugin's metrics data.\ntype Body struct {\n\tAgent AgentRep `json:\"agent\"`\n\tComponents []*Component `json:\"components\"`\n}\n\n\/\/ AgentRep describes a NewRelic agent.\ntype AgentRep struct {\n\tHost string `json:\"host\"`\n\t\/\/ PID zero is treated as an erroneous\/nonexistent PID. If you're shoving NewRelic into a Go-based scheduler in\n\t\/\/ the kernel, I guess you could open an issue for this. Otherwise, this seems reasonable to me.\n\tPID int `json:\"pid,omitempty\"`\n\tVersion string `json:\"version\"`\n}\n\n\/\/ Seconds is a convenience wrapper around a time.Duration to allow any duration to be used when describing the duration\n\/\/ of components' metrics.\ntype Seconds struct{ time.Duration }\n\nfunc (s Seconds) MarshalJSON() ([]byte, error) {\n\tvar i int64\n\tif int, frac := math.Modf(s.Seconds()); frac >= 0.5 {\n\t\ti = int64(int) + 1\n\t} else {\n\t\ti = int64(int)\n\t}\n\treturn strconv.AppendInt(nil, i, 10), nil\n}\n\n\/\/ Component describes a component in a NewRelic agent. It must contain a minimum of at least one metric, otherwise the\n\/\/ component is culled from its parent Body before constructing a JSON payload. All fields of the Component are\n\/\/ read-only once initialized.\ntype Component struct {\n\tName string `json:\"name\"`\n\tGUID string `json:\"guid\"`\n\t\/\/ Duration is the time elapsed, in seconds, for this snapshot of the component. The duration is rounded to the\n\t\/\/ nearest second. This is only used when constructing a payload using a copy of a Component.\n\tDuration Seconds `json:\"duration\"`\n\tMetrics map[string]Metric `json:\"metrics\"`\n\n\t\/\/ start is the time that the first metric was recorded. If start.IsZero is true, the time needs to be set to\n\t\/\/ the current time once a metric is added. The start time is cleared upon an agent successfully sending\n\t\/\/ a payload to NewRelic.\n\tstart time.Time\n\n\t\/\/ agent is a pointer to the Agent that owns this component.\n\tagent *Agent\n}\n\n\/\/ AddMetric adds a single metric to the Component. If the metric already exists by name in the Component, the value is\n\/\/ added to the existing metric, otherwise the metric is added as a ScalarMetric.\nfunc (c *Component) AddMetric(name string, value float64) {\n\tc.MergeMetric(name, ScalarMetric(value))\n}\n\n\/\/ AddMetric adds a single metric to the Component. If the metric already exists by name in the Component, the value is\n\/\/ added to the existing metric, otherwise the metric is added as a ScalarMetric.\nfunc (c *Component) MergeMetric(name string, value Metric) {\n\tc.agent.ops <- func(*Agent) error {\n\t\tif m, ok := c.Metrics[name]; ok {\n\t\t\tc.Metrics[name] = m.Merge(value)\n\t\t} else {\n\t\t\tc.Metrics[name] = value\n\t\t}\n\n\t\t\/\/ Set the time over which the metric was gathered.\n\t\tif c.start.IsZero() {\n\t\t\tc.start = time.Now()\n\t\t\tc.Duration = Seconds{0}\n\t\t} else {\n\t\t\tc.Duration.Duration = time.Since(c.start)\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\n\/\/ Metric describes any metric that can have an additional value added to it. All metrics must be marshallable as JSON,\n\/\/ but are not required to implement MarshalJSON (e.g., RangeMetric).\n\/\/\n\/\/ The Add method of a Metric is used to get the result of adding an additional value to a metric. Metrics themselves\n\/\/ should be considered immutable, so the result must be a new Metric.\n\/\/\n\/\/ This shouldn't be implemented by other libraries.\ntype Metric interface {\n\tAdd(value float64) Metric\n\tMerge(Metric) Metric\n}\n\n\/\/ ScalarMetric is any singular metric that does not cover a range a values. Adding to a ScalarMetric produces\n\/\/ a RangeMetric.\ntype ScalarMetric float64\n\nfunc (s ScalarMetric) Add(value float64) Metric {\n\tf := float64(s)\n\treturn RangeMetric{\n\t\tTotal: f + value,\n\t\tCount: 2,\n\t\tMin: math.Min(value, f),\n\t\tMax: math.Max(value, f),\n\t\tSquare: math.Pow(f, 2) + math.Pow(value, 2),\n\t}\n}\n\nfunc (s ScalarMetric) Merge(value Metric) Metric {\n\tf := float64(s)\n\treturn value.Merge(RangeMetric{Total: f, Count: 1, Min: f, Max: f, Square: math.Pow(f, 2)})\n}\n\nfunc (s ScalarMetric) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(float64(s))\n}\n\n\/\/ RangeMetric is any metric that covers a range of values. Adding to a RangeMetric produces a new RangeMetric.\ntype RangeMetric struct {\n\tTotal float64 `json:\"total\"`\n\tCount int `json:\"count\"`\n\tMin float64 `json:\"min\"`\n\tMax float64 `json:\"max\"`\n\t\/\/ Square is the sum of squares of all values recorded for the metric. This is simply A₁² + A₂² + Aₙ² where A is\n\t\/\/ the set of numbers recorded for this metric.\n\tSquare float64 `json:\"sum_of_squares\"`\n}\n\nfunc (r RangeMetric) Add(value float64) Metric {\n\treturn RangeMetric{\n\t\tTotal: r.Total + value,\n\t\tCount: r.Count + 1,\n\t\tMin: math.Min(value, r.Min),\n\t\tMax: math.Max(value, r.Max),\n\t\tSquare: r.Square + math.Pow(value, 2),\n\t}\n}\n\nfunc (r RangeMetric) Merge(value Metric) Metric {\n\tswitch o := value.(type) {\n\tcase ScalarMetric:\n\t\tf := float64(o)\n\t\tr.Total += f\n\t\tr.Count++\n\t\tr.Min = math.Min(r.Min, f)\n\t\tr.Max = math.Max(r.Max, f)\n\t\tr.Square += math.Pow(f, 2)\n\tcase RangeMetric:\n\t\tr.Total += o.Total\n\t\tr.Count += o.Count\n\t\tr.Min = math.Min(r.Min, o.Min)\n\t\tr.Max = math.Max(r.Max, o.Max)\n\t\tr.Square += o.Square\n\tdefault:\n\t\t\/\/ Defer to the other metric to attempt the merge.\n\t\treturn value.Merge(r)\n\t}\n\treturn r\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016-2020 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build !privileged_tests\n\npackage labelsfilter\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/checker\"\n\tk8sConst \"github.com\/cilium\/cilium\/pkg\/k8s\/apis\/cilium.io\"\n\t\"github.com\/cilium\/cilium\/pkg\/labels\"\n\n\t. \"gopkg.in\/check.v1\"\n)\n\n\/\/ Hook up gocheck into the \"go test\" runner.\nfunc Test(t *testing.T) {\n\tTestingT(t)\n}\n\ntype LabelsPrefCfgSuite struct{}\n\nvar _ = Suite(&LabelsPrefCfgSuite{})\n\nfunc (s *LabelsPrefCfgSuite) TestFilterLabels(c *C) {\n\twanted := labels.Labels{\n\t\t\"id.lizards\": labels.NewLabel(\"id.lizards\", \"web\", labels.LabelSourceContainer),\n\t\t\"id.lizards.k8s\": labels.NewLabel(\"id.lizards.k8s\", \"web\", labels.LabelSourceK8s),\n\t\t\"io.kubernetes.pod.namespace\": labels.NewLabel(\"io.kubernetes.pod.namespace\", \"default\", labels.LabelSourceContainer),\n\t\t\"app.kubernetes.io\": labels.NewLabel(\"app.kubernetes.io\", \"my-nginx\", labels.LabelSourceContainer),\n\t\t\"foo2.lizards.k8s\": labels.NewLabel(\"foo2.lizards.k8s\", \"web\", labels.LabelSourceK8s),\n\t}\n\n\terr := ParseLabelPrefixCfg([]string{\":!ignor[eE]\", \"id.*\", \"foo\"}, \"\")\n\tc.Assert(err, IsNil)\n\tdlpcfg := validLabelPrefixes\n\tallNormalLabels := map[string]string{\n\t\t\"io.kubernetes.container.hash\": \"cf58006d\",\n\t\t\"io.kubernetes.container.name\": \"POD\",\n\t\t\"io.kubernetes.container.restartCount\": \"0\",\n\t\t\"io.kubernetes.container.terminationMessagePath\": \"\",\n\t\t\"io.kubernetes.pod.name\": \"my-nginx-3800858182-07i3n\",\n\t\t\"io.kubernetes.pod.namespace\": \"default\",\n\t\t\"app.kubernetes.io\": \"my-nginx\",\n\t\t\"kubernetes.io.foo\": \"foo\",\n\t\t\"beta.kubernetes.io.foo\": \"foo\",\n\t\t\"annotation.kubectl.kubernetes.io\": \"foo\",\n\t\t\"annotation.hello\": \"world\",\n\t\t\"annotation.\" + k8sConst.CiliumIdentityAnnotationDeprecated: \"12356\",\n\t\t\"io.kubernetes.pod.terminationGracePeriod\": \"30\",\n\t\t\"io.kubernetes.pod.uid\": \"c2e22414-dfc3-11e5-9792-080027755f5a\",\n\t\t\"ignore\": \"foo\",\n\t\t\"ignorE\": \"foo\",\n\t\t\"annotation.kubernetes.io\/config.seen\": \"2017-05-30T14:22:17.691491034Z\",\n\t\t\"controller-revision-hash\": \"123456\",\n\t}\n\tallLabels := labels.Map2Labels(allNormalLabels, labels.LabelSourceContainer)\n\tfiltered, _ := dlpcfg.filterLabels(allLabels)\n\tc.Assert(len(filtered), Equals, 2)\n\tallLabels[\"id.lizards\"] = labels.NewLabel(\"id.lizards\", \"web\", labels.LabelSourceContainer)\n\tallLabels[\"id.lizards.k8s\"] = labels.NewLabel(\"id.lizards.k8s\", \"web\", labels.LabelSourceK8s)\n\tfiltered, _ = dlpcfg.filterLabels(allLabels)\n\tc.Assert(len(filtered), Equals, 4)\n\t\/\/ Checking that it does not need to an exact match of \"foo\", but \"foo2\" also works since it's not a regex\n\tallLabels[\"foo2.lizards.k8s\"] = labels.NewLabel(\"foo2.lizards.k8s\", \"web\", labels.LabelSourceK8s)\n\tfiltered, _ = dlpcfg.filterLabels(allLabels)\n\tc.Assert(len(filtered), Equals, 5)\n\t\/\/ Checking that \"foo\" only works if it's the prefix of a label\n\tallLabels[\"lizards.foo.lizards.k8s\"] = labels.NewLabel(\"lizards.foo.lizards.k8s\", \"web\", labels.LabelSourceK8s)\n\tfiltered, _ = dlpcfg.filterLabels(allLabels)\n\tc.Assert(len(filtered), Equals, 5)\n\tc.Assert(filtered, checker.DeepEquals, wanted)\n\t\/\/ Making sure we are deep copying the labels\n\tallLabels[\"id.lizards\"] = labels.NewLabel(\"id.lizards\", \"web\", \"I can change this and doesn't affect any one\")\n\tc.Assert(filtered, checker.DeepEquals, wanted)\n}\n\nfunc (s *LabelsPrefCfgSuite) TestDefaultFilterLabels(c *C) {\n\twanted := labels.Labels{\n\t\t\"app.kubernetes.io\": labels.NewLabel(\"app.kubernetes.io\", \"my-nginx\", labels.LabelSourceContainer),\n\t\t\"id.lizards.k8s\": labels.NewLabel(\"id.lizards.k8s\", \"web\", labels.LabelSourceK8s),\n\t\t\"id.lizards\": labels.NewLabel(\"id.lizards\", \"web\", labels.LabelSourceContainer),\n\t\t\"ignorE\": labels.NewLabel(\"ignorE\", \"foo\", labels.LabelSourceContainer),\n\t\t\"ignore\": labels.NewLabel(\"ignore\", \"foo\", labels.LabelSourceContainer),\n\t\t\"reserved:host\": labels.NewLabel(\"reserved:host\", \"\", labels.LabelSourceAny),\n\t\t\"io.kubernetes.pod.namespace\": labels.NewLabel(\"io.kubernetes.pod.namespace\", \"default\", labels.LabelSourceContainer),\n\t}\n\n\terr := ParseLabelPrefixCfg([]string{}, \"\")\n\tc.Assert(err, IsNil)\n\tdlpcfg := validLabelPrefixes\n\tallNormalLabels := map[string]string{\n\t\t\"io.kubernetes.container.hash\": \"cf58006d\",\n\t\t\"io.kubernetes.container.name\": \"POD\",\n\t\t\"io.kubernetes.container.restartCount\": \"0\",\n\t\t\"io.kubernetes.container.terminationMessagePath\": \"\",\n\t\t\"io.kubernetes.pod.name\": \"my-nginx-3800858182-07i3n\",\n\t\t\"io.kubernetes.pod.namespace\": \"default\",\n\t\t\"app.kubernetes.io\": \"my-nginx\",\n\t\t\"kubernetes.io.foo\": \"foo\",\n\t\t\"beta.kubernetes.io.foo\": \"foo\",\n\t\t\"annotation.kubectl.kubernetes.io\": \"foo\",\n\t\t\"annotation.hello\": \"world\",\n\t\t\"annotation.\" + k8sConst.CiliumIdentityAnnotationDeprecated: \"12356\",\n\t\t\"io.kubernetes.pod.terminationGracePeriod\": \"30\",\n\t\t\"io.kubernetes.pod.uid\": \"c2e22414-dfc3-11e5-9792-080027755f5a\",\n\t\t\"ignore\": \"foo\",\n\t\t\"ignorE\": \"foo\",\n\t\t\"annotation.kubernetes.io\/config.seen\": \"2017-05-30T14:22:17.691491034Z\",\n\t\t\"controller-revision-hash\": \"123456\",\n\t}\n\tallLabels := labels.Map2Labels(allNormalLabels, labels.LabelSourceContainer)\n\tallLabels[\"reserved:host\"] = labels.NewLabel(\"reserved:host\", \"\", labels.LabelSourceAny)\n\tfiltered, _ := dlpcfg.filterLabels(allLabels)\n\tc.Assert(len(filtered), Equals, 5)\n\tallLabels[\"id.lizards\"] = labels.NewLabel(\"id.lizards\", \"web\", labels.LabelSourceContainer)\n\tallLabels[\"id.lizards.k8s\"] = labels.NewLabel(\"id.lizards.k8s\", \"web\", labels.LabelSourceK8s)\n\tfiltered, _ = dlpcfg.filterLabels(allLabels)\n\tc.Assert(len(filtered), Equals, 7)\n\tc.Assert(filtered, checker.DeepEquals, wanted)\n}\n\nfunc (s *LabelsPrefCfgSuite) TestFilterLabelsDocExample(c *C) {\n\twanted := labels.Labels{\n\t\t\"io.cilium.k8s.namespace.labels\": labels.NewLabel(\"io.cilium.k8s.namespace.labels\", \"foo\", labels.LabelSourceK8s),\n\t\t\"k8s-app-team\": labels.NewLabel(\"k8s-app-team\", \"foo\", labels.LabelSourceK8s),\n\t\t\"app-production\": labels.NewLabel(\"app-production\", \"foo\", labels.LabelSourceK8s),\n\t\t\"name-defined\": labels.NewLabel(\"name-defined\", \"foo\", labels.LabelSourceK8s),\n\t\t\"host\": labels.NewLabel(\"host\", \"\", labels.LabelSourceReserved),\n\t\t\"io.kubernetes.pod.namespace\": labels.NewLabel(\"io.kubernetes.pod.namespace\", \"docker\", labels.LabelSourceAny),\n\t}\n\n\terr := ParseLabelPrefixCfg([]string{\"k8s:io.kubernetes.pod.namespace\", \"k8s:k8s-app\", \"k8s:app\", \"k8s:name\"}, \"\")\n\tc.Assert(err, IsNil)\n\tdlpcfg := validLabelPrefixes\n\tallNormalLabels := map[string]string{\n\t\t\"io.cilium.k8s.namespace.labels\": \"foo\",\n\t\t\"k8s-app-team\": \"foo\",\n\t\t\"app-production\": \"foo\",\n\t\t\"name-defined\": \"foo\",\n\t}\n\tallLabels := labels.Map2Labels(allNormalLabels, labels.LabelSourceK8s)\n\tfiltered, _ := dlpcfg.filterLabels(allLabels)\n\tc.Assert(len(filtered), Equals, 4)\n\n\t\/\/ Reserved labels are included.\n\tallLabels[\"host\"] = labels.NewLabel(\"host\", \"\", labels.LabelSourceReserved)\n\tfiltered, _ = dlpcfg.filterLabels(allLabels)\n\tc.Assert(len(filtered), Equals, 5)\n\n\t\/\/ io.kubernetes.pod.namespace=docker matches because the default list has any:io.kubernetes.pod.namespace.\n\tallLabels[\"io.kubernetes.pod.namespace\"] = labels.NewLabel(\"io.kubernetes.pod.namespace\", \"docker\", labels.LabelSourceAny)\n\tfiltered, _ = dlpcfg.filterLabels(allLabels)\n\tc.Assert(len(filtered), Equals, 6)\n\n\t\/\/ container:k8s-app-role=foo doesn't match because it doesn't have source k8s.\n\tallLabels[\"k8s-app-role\"] = labels.NewLabel(\"k8s-app-role\", \"foo\", labels.LabelSourceContainer)\n\tfiltered, _ = dlpcfg.filterLabels(allLabels)\n\tc.Assert(len(filtered), Equals, 6)\n\tc.Assert(filtered, checker.DeepEquals, wanted)\n}\n<commit_msg>labelsfilter: Fix test for default filters<commit_after>\/\/ Copyright 2016-2020 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build !privileged_tests\n\npackage labelsfilter\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/cilium\/cilium\/pkg\/checker\"\n\tk8sConst \"github.com\/cilium\/cilium\/pkg\/k8s\/apis\/cilium.io\"\n\t\"github.com\/cilium\/cilium\/pkg\/labels\"\n\n\t. \"gopkg.in\/check.v1\"\n)\n\n\/\/ Hook up gocheck into the \"go test\" runner.\nfunc Test(t *testing.T) {\n\tTestingT(t)\n}\n\ntype LabelsPrefCfgSuite struct{}\n\nvar _ = Suite(&LabelsPrefCfgSuite{})\n\nfunc (s *LabelsPrefCfgSuite) TestFilterLabels(c *C) {\n\twanted := labels.Labels{\n\t\t\"id.lizards\": labels.NewLabel(\"id.lizards\", \"web\", labels.LabelSourceContainer),\n\t\t\"id.lizards.k8s\": labels.NewLabel(\"id.lizards.k8s\", \"web\", labels.LabelSourceK8s),\n\t\t\"io.kubernetes.pod.namespace\": labels.NewLabel(\"io.kubernetes.pod.namespace\", \"default\", labels.LabelSourceContainer),\n\t\t\"app.kubernetes.io\": labels.NewLabel(\"app.kubernetes.io\", \"my-nginx\", labels.LabelSourceContainer),\n\t\t\"foo2.lizards.k8s\": labels.NewLabel(\"foo2.lizards.k8s\", \"web\", labels.LabelSourceK8s),\n\t}\n\n\terr := ParseLabelPrefixCfg([]string{\":!ignor[eE]\", \"id.*\", \"foo\"}, \"\")\n\tc.Assert(err, IsNil)\n\tdlpcfg := validLabelPrefixes\n\tallNormalLabels := map[string]string{\n\t\t\"io.kubernetes.container.hash\": \"cf58006d\",\n\t\t\"io.kubernetes.container.name\": \"POD\",\n\t\t\"io.kubernetes.container.restartCount\": \"0\",\n\t\t\"io.kubernetes.container.terminationMessagePath\": \"\",\n\t\t\"io.kubernetes.pod.name\": \"my-nginx-3800858182-07i3n\",\n\t\t\"io.kubernetes.pod.namespace\": \"default\",\n\t\t\"app.kubernetes.io\": \"my-nginx\",\n\t\t\"kubernetes.io.foo\": \"foo\",\n\t\t\"beta.kubernetes.io.foo\": \"foo\",\n\t\t\"annotation.kubectl.kubernetes.io\": \"foo\",\n\t\t\"annotation.hello\": \"world\",\n\t\t\"annotation.\" + k8sConst.CiliumIdentityAnnotationDeprecated: \"12356\",\n\t\t\"io.kubernetes.pod.terminationGracePeriod\": \"30\",\n\t\t\"io.kubernetes.pod.uid\": \"c2e22414-dfc3-11e5-9792-080027755f5a\",\n\t\t\"ignore\": \"foo\",\n\t\t\"ignorE\": \"foo\",\n\t\t\"annotation.kubernetes.io\/config.seen\": \"2017-05-30T14:22:17.691491034Z\",\n\t\t\"controller-revision-hash\": \"123456\",\n\t}\n\tallLabels := labels.Map2Labels(allNormalLabels, labels.LabelSourceContainer)\n\tfiltered, _ := dlpcfg.filterLabels(allLabels)\n\tc.Assert(len(filtered), Equals, 2)\n\tallLabels[\"id.lizards\"] = labels.NewLabel(\"id.lizards\", \"web\", labels.LabelSourceContainer)\n\tallLabels[\"id.lizards.k8s\"] = labels.NewLabel(\"id.lizards.k8s\", \"web\", labels.LabelSourceK8s)\n\tfiltered, _ = dlpcfg.filterLabels(allLabels)\n\tc.Assert(len(filtered), Equals, 4)\n\t\/\/ Checking that it does not need to an exact match of \"foo\", but \"foo2\" also works since it's not a regex\n\tallLabels[\"foo2.lizards.k8s\"] = labels.NewLabel(\"foo2.lizards.k8s\", \"web\", labels.LabelSourceK8s)\n\tfiltered, _ = dlpcfg.filterLabels(allLabels)\n\tc.Assert(len(filtered), Equals, 5)\n\t\/\/ Checking that \"foo\" only works if it's the prefix of a label\n\tallLabels[\"lizards.foo.lizards.k8s\"] = labels.NewLabel(\"lizards.foo.lizards.k8s\", \"web\", labels.LabelSourceK8s)\n\tfiltered, _ = dlpcfg.filterLabels(allLabels)\n\tc.Assert(len(filtered), Equals, 5)\n\tc.Assert(filtered, checker.DeepEquals, wanted)\n\t\/\/ Making sure we are deep copying the labels\n\tallLabels[\"id.lizards\"] = labels.NewLabel(\"id.lizards\", \"web\", \"I can change this and doesn't affect any one\")\n\tc.Assert(filtered, checker.DeepEquals, wanted)\n}\n\nfunc (s *LabelsPrefCfgSuite) TestDefaultFilterLabels(c *C) {\n\twanted := labels.Labels{\n\t\t\"app.kubernetes.io\": labels.NewLabel(\"app.kubernetes.io\", \"my-nginx\", labels.LabelSourceContainer),\n\t\t\"id.lizards.k8s\": labels.NewLabel(\"id.lizards.k8s\", \"web\", labels.LabelSourceK8s),\n\t\t\"id.lizards\": labels.NewLabel(\"id.lizards\", \"web\", labels.LabelSourceContainer),\n\t\t\"ignorE\": labels.NewLabel(\"ignorE\", \"foo\", labels.LabelSourceContainer),\n\t\t\"ignore\": labels.NewLabel(\"ignore\", \"foo\", labels.LabelSourceContainer),\n\t\t\"host\": labels.NewLabel(\"host\", \"\", labels.LabelSourceReserved),\n\t\t\"io.kubernetes.pod.namespace\": labels.NewLabel(\"io.kubernetes.pod.namespace\", \"default\", labels.LabelSourceContainer),\n\t}\n\n\terr := ParseLabelPrefixCfg([]string{}, \"\")\n\tc.Assert(err, IsNil)\n\tdlpcfg := validLabelPrefixes\n\tallNormalLabels := map[string]string{\n\t\t\"io.kubernetes.container.hash\": \"cf58006d\",\n\t\t\"io.kubernetes.container.name\": \"POD\",\n\t\t\"io.kubernetes.container.restartCount\": \"0\",\n\t\t\"io.kubernetes.container.terminationMessagePath\": \"\",\n\t\t\"io.kubernetes.pod.name\": \"my-nginx-3800858182-07i3n\",\n\t\t\"io.kubernetes.pod.namespace\": \"default\",\n\t\t\"app.kubernetes.io\": \"my-nginx\",\n\t\t\"kubernetes.io.foo\": \"foo\",\n\t\t\"beta.kubernetes.io.foo\": \"foo\",\n\t\t\"annotation.kubectl.kubernetes.io\": \"foo\",\n\t\t\"annotation.hello\": \"world\",\n\t\t\"annotation.\" + k8sConst.CiliumIdentityAnnotationDeprecated: \"12356\",\n\t\t\"io.kubernetes.pod.terminationGracePeriod\": \"30\",\n\t\t\"io.kubernetes.pod.uid\": \"c2e22414-dfc3-11e5-9792-080027755f5a\",\n\t\t\"ignore\": \"foo\",\n\t\t\"ignorE\": \"foo\",\n\t\t\"annotation.kubernetes.io\/config.seen\": \"2017-05-30T14:22:17.691491034Z\",\n\t\t\"controller-revision-hash\": \"123456\",\n\t}\n\tallLabels := labels.Map2Labels(allNormalLabels, labels.LabelSourceContainer)\n\tallLabels[\"host\"] = labels.NewLabel(\"host\", \"\", labels.LabelSourceReserved)\n\tfiltered, _ := dlpcfg.filterLabels(allLabels)\n\tc.Assert(len(filtered), Equals, 5)\n\tallLabels[\"id.lizards\"] = labels.NewLabel(\"id.lizards\", \"web\", labels.LabelSourceContainer)\n\tallLabels[\"id.lizards.k8s\"] = labels.NewLabel(\"id.lizards.k8s\", \"web\", labels.LabelSourceK8s)\n\tfiltered, _ = dlpcfg.filterLabels(allLabels)\n\tc.Assert(len(filtered), Equals, 7)\n\tc.Assert(filtered, checker.DeepEquals, wanted)\n}\n\nfunc (s *LabelsPrefCfgSuite) TestFilterLabelsDocExample(c *C) {\n\twanted := labels.Labels{\n\t\t\"io.cilium.k8s.namespace.labels\": labels.NewLabel(\"io.cilium.k8s.namespace.labels\", \"foo\", labels.LabelSourceK8s),\n\t\t\"k8s-app-team\": labels.NewLabel(\"k8s-app-team\", \"foo\", labels.LabelSourceK8s),\n\t\t\"app-production\": labels.NewLabel(\"app-production\", \"foo\", labels.LabelSourceK8s),\n\t\t\"name-defined\": labels.NewLabel(\"name-defined\", \"foo\", labels.LabelSourceK8s),\n\t\t\"host\": labels.NewLabel(\"host\", \"\", labels.LabelSourceReserved),\n\t\t\"io.kubernetes.pod.namespace\": labels.NewLabel(\"io.kubernetes.pod.namespace\", \"docker\", labels.LabelSourceAny),\n\t}\n\n\terr := ParseLabelPrefixCfg([]string{\"k8s:io.kubernetes.pod.namespace\", \"k8s:k8s-app\", \"k8s:app\", \"k8s:name\"}, \"\")\n\tc.Assert(err, IsNil)\n\tdlpcfg := validLabelPrefixes\n\tallNormalLabels := map[string]string{\n\t\t\"io.cilium.k8s.namespace.labels\": \"foo\",\n\t\t\"k8s-app-team\": \"foo\",\n\t\t\"app-production\": \"foo\",\n\t\t\"name-defined\": \"foo\",\n\t}\n\tallLabels := labels.Map2Labels(allNormalLabels, labels.LabelSourceK8s)\n\tfiltered, _ := dlpcfg.filterLabels(allLabels)\n\tc.Assert(len(filtered), Equals, 4)\n\n\t\/\/ Reserved labels are included.\n\tallLabels[\"host\"] = labels.NewLabel(\"host\", \"\", labels.LabelSourceReserved)\n\tfiltered, _ = dlpcfg.filterLabels(allLabels)\n\tc.Assert(len(filtered), Equals, 5)\n\n\t\/\/ io.kubernetes.pod.namespace=docker matches because the default list has any:io.kubernetes.pod.namespace.\n\tallLabels[\"io.kubernetes.pod.namespace\"] = labels.NewLabel(\"io.kubernetes.pod.namespace\", \"docker\", labels.LabelSourceAny)\n\tfiltered, _ = dlpcfg.filterLabels(allLabels)\n\tc.Assert(len(filtered), Equals, 6)\n\n\t\/\/ container:k8s-app-role=foo doesn't match because it doesn't have source k8s.\n\tallLabels[\"k8s-app-role\"] = labels.NewLabel(\"k8s-app-role\", \"foo\", labels.LabelSourceContainer)\n\tfiltered, _ = dlpcfg.filterLabels(allLabels)\n\tc.Assert(len(filtered), Equals, 6)\n\tc.Assert(filtered, checker.DeepEquals, wanted)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\tprow_config \"k8s.io\/test-infra\/prow\/config\"\n\t\"k8s.io\/test-infra\/testgrid\/config\/yaml2proto\"\n)\n\nfunc main() {\n\targs := os.Args[1:]\n\n\tif len(args) != 3 {\n\t\tfmt.Println(\"Missing args - usage: go run jenkins_validate.go <path\/to\/job_collection> <path\/to\/prow> <path\/to\/testgrid_config>\")\n\t\tos.Exit(1)\n\t}\n\n\tjobPath := args[0]\n\tprowPath := args[1]\n\tconfigPath := args[2]\n\n\tjobs := make(map[string]bool)\n\tfiles, err := filepath.Glob(jobPath + \"\/*\")\n\tif err != nil {\n\t\tfmt.Println(\"Failed to collect outputs.\")\n\t\tos.Exit(1)\n\t}\n\n\tfor _, file := range files {\n\t\tfile = strings.TrimPrefix(file, jobPath+\"\/\")\n\t\tjobs[file] = false\n\t}\n\n\tdata, err := ioutil.ReadFile(configPath)\n\tif err != nil {\n\t\tfmt.Printf(\"Failed reading %v\\n\", configPath)\n\t\tos.Exit(1)\n\t}\n\n\tc := yaml2proto.Config{}\n\tif err := c.Update(data); err != nil {\n\t\tfmt.Printf(\"Failed to convert yaml to protobuf: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tconfig, err := c.Raw()\n\tif err != nil {\n\t\tfmt.Printf(\"Error validating config: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tprowConfig, err := prow_config.Load(prowPath + \"\/config.yaml\")\n\tif err != nil {\n\t\tfmt.Printf(\"Could not load prow configs: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Also check k\/k presubmit, prow postsubmit and periodic jobs\n\tfor _, job := range prowConfig.AllPresubmits([]string{}) {\n\t\tjobs[job.Name] = false\n\t}\n\n\tfor _, job := range prowConfig.AllPostsubmits([]string{}) {\n\t\tif job.Agent != \"jenkins\" {\n\t\t\tjobs[job.Name] = false\n\t\t}\n\t}\n\n\tfor _, job := range prowConfig.AllPeriodics() {\n\t\tif job.Agent != \"jenkins\" {\n\t\t\tjobs[job.Name] = false\n\t\t}\n\t}\n\n\t\/\/ For now anything outsite k8s-jenkins\/(pr-)logs are considered to be fine\n\ttestgroups := make(map[string]bool)\n\tfor _, testgroup := range config.TestGroups {\n\t\tif strings.Contains(testgroup.GcsPrefix, \"kubernetes-jenkins\/logs\/\") {\n\t\t\tjob := strings.TrimPrefix(testgroup.GcsPrefix, \"kubernetes-jenkins\/logs\/\")\n\t\t\ttestgroups[job] = false\n\t\t}\n\n\t\tif strings.Contains(testgroup.GcsPrefix, \"kubernetes-jenkins\/pr-logs\/directory\/\") {\n\t\t\tjob := strings.TrimPrefix(testgroup.GcsPrefix, \"kubernetes-jenkins\/pr-logs\/directory\/\")\n\t\t\ttestgroups[job] = false\n\t\t}\n\t}\n\n\t\/\/ Cross check\n\t\/\/ -- Each job need to have a match testgrid group\n\tfor job := range jobs {\n\t\tif _, ok := testgroups[job]; ok {\n\t\t\ttestgroups[job] = true\n\t\t\tjobs[job] = true\n\t\t}\n\t}\n\n\t\/\/ Conclusion\n\tbadjobs := []string{}\n\tfor job, valid := range jobs {\n\t\tif !valid {\n\t\t\tbadjobs = append(badjobs, job)\n\t\t\tfmt.Printf(\"Job %v does not have a matching testgrid testgroup\\n\", job)\n\t\t}\n\t}\n\n\tbadconfigs := []string{}\n\tfor testgroup, valid := range testgroups {\n\t\tif !valid {\n\t\t\tbadconfigs = append(badconfigs, testgroup)\n\t\t\tfmt.Printf(\"Testgrid group %v does not have a matching jenkins or prow job\\n\", testgroup)\n\t\t}\n\t}\n\n\tif len(badconfigs) > 0 {\n\t\tfmt.Printf(\"Total bad config(s) - %v\\n\", len(badconfigs))\n\t}\n\n\tif len(badjobs) > 0 {\n\t\tfmt.Printf(\"Total bad job(s) - %v\\n\", len(badjobs))\n\t}\n\n\tif len(badconfigs) > 0 || len(badjobs) > 0 {\n\t\tos.Exit(1)\n\t}\n}\n<commit_msg>Revert \"* Try to fix failure\"<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\tprow_config \"k8s.io\/test-infra\/prow\/config\"\n\t\"k8s.io\/test-infra\/testgrid\/config\/yaml2proto\"\n)\n\nfunc main() {\n\targs := os.Args[1:]\n\n\tif len(args) != 3 {\n\t\tfmt.Println(\"Missing args - usage: go run jenkins_validate.go <path\/to\/job_collection> <path\/to\/prow> <path\/to\/testgrid_config>\")\n\t\tos.Exit(1)\n\t}\n\n\tjobPath := args[0]\n\tprowPath := args[1]\n\tconfigPath := args[2]\n\n\tjobs := make(map[string]bool)\n\tfiles, err := filepath.Glob(jobPath + \"\/*\")\n\tif err != nil {\n\t\tfmt.Println(\"Failed to collect outputs.\")\n\t\tos.Exit(1)\n\t}\n\n\tfor _, file := range files {\n\t\tfile = strings.TrimPrefix(file, jobPath+\"\/\")\n\t\tjobs[file] = false\n\t}\n\n\tdata, err := ioutil.ReadFile(configPath)\n\tif err != nil {\n\t\tfmt.Printf(\"Failed reading %v\\n\", configPath)\n\t\tos.Exit(1)\n\t}\n\n\tc := yaml2proto.Config{}\n\tif err := c.Update(data); err != nil {\n\t\tfmt.Printf(\"Failed to convert yaml to protobuf: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tconfig, err := c.Raw()\n\tif err != nil {\n\t\tfmt.Printf(\"Error validating config: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tprowConfig, err := prow_config.Load(prowPath + \"\/config.yaml\")\n\tif err != nil {\n\t\tfmt.Printf(\"Could not load prow configs: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ Also check k\/k presubmit, prow postsubmit and periodic jobs\n\tfor _, job := range prowConfig.AllPresubmits([]string{\"kubernetes\/kubernetes\"}) {\n\t\tjobs[job.Name] = false\n\t}\n\n\tfor _, job := range prowConfig.AllPostsubmits([]string{}) {\n\t\tif job.Agent != \"jenkins\" {\n\t\t\tjobs[job.Name] = false\n\t\t}\n\t}\n\n\tfor _, job := range prowConfig.AllPeriodics() {\n\t\tif job.Agent != \"jenkins\" {\n\t\t\tjobs[job.Name] = false\n\t\t}\n\t}\n\n\t\/\/ For now anything outsite k8s-jenkins\/(pr-)logs are considered to be fine\n\ttestgroups := make(map[string]bool)\n\tfor _, testgroup := range config.TestGroups {\n\t\tif strings.Contains(testgroup.GcsPrefix, \"kubernetes-jenkins\/logs\/\") {\n\t\t\tjob := strings.TrimPrefix(testgroup.GcsPrefix, \"kubernetes-jenkins\/logs\/\")\n\t\t\ttestgroups[job] = false\n\t\t}\n\n\t\tif strings.Contains(testgroup.GcsPrefix, \"kubernetes-jenkins\/pr-logs\/directory\/\") {\n\t\t\tjob := strings.TrimPrefix(testgroup.GcsPrefix, \"kubernetes-jenkins\/pr-logs\/directory\/\")\n\t\t\ttestgroups[job] = false\n\t\t}\n\t}\n\n\t\/\/ Cross check\n\t\/\/ -- Each job need to have a match testgrid group\n\tfor job := range jobs {\n\t\tif _, ok := testgroups[job]; ok {\n\t\t\ttestgroups[job] = true\n\t\t\tjobs[job] = true\n\t\t}\n\t}\n\n\t\/\/ Conclusion\n\tbadjobs := []string{}\n\tfor job, valid := range jobs {\n\t\tif !valid {\n\t\t\tbadjobs = append(badjobs, job)\n\t\t\tfmt.Printf(\"Job %v does not have a matching testgrid testgroup\\n\", job)\n\t\t}\n\t}\n\n\tbadconfigs := []string{}\n\tfor testgroup, valid := range testgroups {\n\t\tif !valid {\n\t\t\tbadconfigs = append(badconfigs, testgroup)\n\t\t\tfmt.Printf(\"Testgrid group %v does not have a matching jenkins or prow job\\n\", testgroup)\n\t\t}\n\t}\n\n\tif len(badconfigs) > 0 {\n\t\tfmt.Printf(\"Total bad config(s) - %v\\n\", len(badconfigs))\n\t}\n\n\tif len(badjobs) > 0 {\n\t\tfmt.Printf(\"Total bad job(s) - %v\\n\", len(badjobs))\n\t}\n\n\tif len(badconfigs) > 0 || len(badjobs) > 0 {\n\t\tos.Exit(1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The LUCI Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"go.chromium.org\/luci\/common\/errors\"\n\t\"go.chromium.org\/luci\/common\/flag\/stringsetflag\"\n)\n\ntype app struct {\n\tout io.Writer\n\n\tpackageName string\n\ttypeNames stringsetflag.Flag\n\toutFile string\n\theader string\n}\n\nconst help = `Usage of %s:\n\n%s is a go-generator program that generates PropertyConverter implementations\nfor types produced by protoc. It can be used in a go generation file like:\n\n \/\/go:generate <protoc command>\n \/\/go:generate proto-gae -type MessageType -type OtherMessageType\n\nThis will produce a new file which implements the ToProperty and FromProperty\nmethods for the named types.\n\nOptions:\n`\n\nconst copyright = `\/\/ Copyright 2016 The LUCI Authors. All rights reserved.\n\/\/ Use of this source code is governed under the Apache License, Version 2.0\n\/\/ that can be found in the LICENSE file.\n`\n\nfunc (a *app) parseArgs(fs *flag.FlagSet, args []string) error {\n\tfs.SetOutput(a.out)\n\tfs.Usage = func() {\n\t\tfmt.Fprintf(a.out, help, args[0], args[0])\n\t\tfs.PrintDefaults()\n\t}\n\n\tfs.Var(&a.typeNames, \"type\",\n\t\t\"A generated proto.Message type to generate stubs for (required, repeatable)\")\n\tfs.StringVar(&a.outFile, \"out\", \"proto_gae.gen.go\",\n\t\t\"The name of the output file\")\n\tfs.StringVar(&a.header, \"header\", copyright, \"Header text to put at the top of \"+\n\t\t\"the generated file. Defaults to the LUCI Authors copyright.\")\n\n\tif err := fs.Parse(args[1:]); err != nil {\n\t\treturn err\n\t}\n\tfail := errors.MultiError(nil)\n\tif a.typeNames.Data == nil || a.typeNames.Data.Len() == 0 {\n\t\tfail = append(fail, errors.New(\"must specify one or more -type\"))\n\t}\n\tif !strings.HasSuffix(a.outFile, \".go\") {\n\t\tfail = append(fail, errors.New(\"-output must end with '.go'\"))\n\t}\n\tif len(fail) > 0 {\n\t\tfor _, e := range fail {\n\t\t\tfmt.Fprintln(a.out, \"error:\", e)\n\t\t}\n\t\tfmt.Fprintln(a.out)\n\t\tfs.Usage()\n\t\treturn fail\n\t}\n\treturn nil\n}\n\nvar tmpl = template.Must(\n\ttemplate.New(\"main\").Parse(`{{if index . \"header\"}}{{index . \"header\"}}\n{{end}}\/\/ AUTOGENERATED: Do not edit\n\npackage {{index . \"package\"}}\n\nimport (\n\t\"github.com\/golang\/protobuf\/proto\"\n\n\t\"go.chromium.org\/gae\/service\/datastore\"\n){{range index . \"types\"}}\n\nvar _ datastore.PropertyConverter = (*{{.}})(nil)\n\n\/\/ ToProperty implements datastore.PropertyConverter. It causes an embedded\n\/\/ '{{.}}' to serialize to an unindexed '[]byte' when used with the\n\/\/ \"go.chromium.org\/gae\" library.\nfunc (p *{{.}}) ToProperty() (prop datastore.Property, err error) {\n\tdata, err := proto.Marshal(p)\n\tif err == nil {\n\t\tprop.SetValue(data, datastore.NoIndex)\n\t}\n\treturn\n}\n\n\/\/ FromProperty implements datastore.PropertyConverter. It parses a '[]byte'\n\/\/ into an embedded '{{.}}' when used with the \"go.chromium.org\/gae\" library.\nfunc (p *{{.}}) FromProperty(prop datastore.Property) error {\n\tdata, err := prop.Project(datastore.PTBytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn proto.Unmarshal(data.([]byte), p)\n}{{end}}\n`))\n\nfunc (a *app) writeTo(w io.Writer) error {\n\ttypeNames := a.typeNames.Data.ToSlice()\n\tsort.Strings(typeNames)\n\n\treturn tmpl.Execute(w, map[string]interface{}{\n\t\t\"package\": a.packageName,\n\t\t\"types\": typeNames,\n\t\t\"header\": a.header,\n\t})\n}\n\nfunc (a *app) main() {\n\tif err := a.parseArgs(flag.NewFlagSet(os.Args[0], flag.ContinueOnError), os.Args); err != nil {\n\t\tos.Exit(1)\n\t}\n\tofile, err := os.Create(a.outFile)\n\tif err != nil {\n\t\tfmt.Fprintf(a.out, \"error: %s\", err)\n\t\tos.Exit(2)\n\t}\n\tcloseFn := func(delete bool) {\n\t\tif ofile != nil {\n\t\t\tif err := ofile.Close(); err != nil {\n\t\t\t\tfmt.Fprintf(a.out, \"error while closing file: %s\", err)\n\t\t\t}\n\t\t\tif delete {\n\t\t\t\tif err := os.Remove(a.outFile); err != nil {\n\t\t\t\t\tfmt.Fprintf(a.out, \"failed to remove file!\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tofile = nil\n\t}\n\tdefer closeFn(false)\n\tbuf := bufio.NewWriter(ofile)\n\terr = a.writeTo(buf)\n\tif err != nil {\n\t\tfmt.Fprintf(a.out, \"error while writing: %s\", err)\n\t\tcloseFn(true)\n\t\tos.Exit(3)\n\t}\n\tif err := buf.Flush(); err != nil {\n\t\tfmt.Fprintf(a.out, \"error while writing: %s\", err)\n\t\tcloseFn(true)\n\t\tos.Exit(4)\n\t}\n}\n\nfunc main() {\n\t(&app{out: os.Stderr, packageName: os.Getenv(\"GOPACKAGE\")}).main()\n}\n<commit_msg>[proto-gae] Update default copyright header.<commit_after>\/\/ Copyright 2015 The LUCI Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"go.chromium.org\/luci\/common\/errors\"\n\t\"go.chromium.org\/luci\/common\/flag\/stringsetflag\"\n)\n\ntype app struct {\n\tout io.Writer\n\n\tpackageName string\n\ttypeNames stringsetflag.Flag\n\toutFile string\n\theader string\n}\n\nconst help = `Usage of %s:\n\n%s is a go-generator program that generates PropertyConverter implementations\nfor types produced by protoc. It can be used in a go generation file like:\n\n \/\/go:generate <protoc command>\n \/\/go:generate proto-gae -type MessageType -type OtherMessageType\n\nThis will produce a new file which implements the ToProperty and FromProperty\nmethods for the named types.\n\nOptions:\n`\n\nconst copyright = `\/\/ Copyright 2017 The LUCI Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n`\n\nfunc (a *app) parseArgs(fs *flag.FlagSet, args []string) error {\n\tfs.SetOutput(a.out)\n\tfs.Usage = func() {\n\t\tfmt.Fprintf(a.out, help, args[0], args[0])\n\t\tfs.PrintDefaults()\n\t}\n\n\tfs.Var(&a.typeNames, \"type\",\n\t\t\"A generated proto.Message type to generate stubs for (required, repeatable)\")\n\tfs.StringVar(&a.outFile, \"out\", \"proto_gae.gen.go\",\n\t\t\"The name of the output file\")\n\tfs.StringVar(&a.header, \"header\", copyright, \"Header text to put at the top of \"+\n\t\t\"the generated file. Defaults to the LUCI Authors copyright.\")\n\n\tif err := fs.Parse(args[1:]); err != nil {\n\t\treturn err\n\t}\n\tfail := errors.MultiError(nil)\n\tif a.typeNames.Data == nil || a.typeNames.Data.Len() == 0 {\n\t\tfail = append(fail, errors.New(\"must specify one or more -type\"))\n\t}\n\tif !strings.HasSuffix(a.outFile, \".go\") {\n\t\tfail = append(fail, errors.New(\"-output must end with '.go'\"))\n\t}\n\tif len(fail) > 0 {\n\t\tfor _, e := range fail {\n\t\t\tfmt.Fprintln(a.out, \"error:\", e)\n\t\t}\n\t\tfmt.Fprintln(a.out)\n\t\tfs.Usage()\n\t\treturn fail\n\t}\n\treturn nil\n}\n\nvar tmpl = template.Must(\n\ttemplate.New(\"main\").Parse(`{{if index . \"header\"}}{{index . \"header\"}}\n{{end}}\/\/ AUTOGENERATED: Do not edit\n\npackage {{index . \"package\"}}\n\nimport (\n\t\"github.com\/golang\/protobuf\/proto\"\n\n\t\"go.chromium.org\/gae\/service\/datastore\"\n){{range index . \"types\"}}\n\nvar _ datastore.PropertyConverter = (*{{.}})(nil)\n\n\/\/ ToProperty implements datastore.PropertyConverter. It causes an embedded\n\/\/ '{{.}}' to serialize to an unindexed '[]byte' when used with the\n\/\/ \"go.chromium.org\/gae\" library.\nfunc (p *{{.}}) ToProperty() (prop datastore.Property, err error) {\n\tdata, err := proto.Marshal(p)\n\tif err == nil {\n\t\tprop.SetValue(data, datastore.NoIndex)\n\t}\n\treturn\n}\n\n\/\/ FromProperty implements datastore.PropertyConverter. It parses a '[]byte'\n\/\/ into an embedded '{{.}}' when used with the \"go.chromium.org\/gae\" library.\nfunc (p *{{.}}) FromProperty(prop datastore.Property) error {\n\tdata, err := prop.Project(datastore.PTBytes)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn proto.Unmarshal(data.([]byte), p)\n}{{end}}\n`))\n\nfunc (a *app) writeTo(w io.Writer) error {\n\ttypeNames := a.typeNames.Data.ToSlice()\n\tsort.Strings(typeNames)\n\n\treturn tmpl.Execute(w, map[string]interface{}{\n\t\t\"package\": a.packageName,\n\t\t\"types\": typeNames,\n\t\t\"header\": a.header,\n\t})\n}\n\nfunc (a *app) main() {\n\tif err := a.parseArgs(flag.NewFlagSet(os.Args[0], flag.ContinueOnError), os.Args); err != nil {\n\t\tos.Exit(1)\n\t}\n\tofile, err := os.Create(a.outFile)\n\tif err != nil {\n\t\tfmt.Fprintf(a.out, \"error: %s\", err)\n\t\tos.Exit(2)\n\t}\n\tcloseFn := func(delete bool) {\n\t\tif ofile != nil {\n\t\t\tif err := ofile.Close(); err != nil {\n\t\t\t\tfmt.Fprintf(a.out, \"error while closing file: %s\", err)\n\t\t\t}\n\t\t\tif delete {\n\t\t\t\tif err := os.Remove(a.outFile); err != nil {\n\t\t\t\t\tfmt.Fprintf(a.out, \"failed to remove file!\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tofile = nil\n\t}\n\tdefer closeFn(false)\n\tbuf := bufio.NewWriter(ofile)\n\terr = a.writeTo(buf)\n\tif err != nil {\n\t\tfmt.Fprintf(a.out, \"error while writing: %s\", err)\n\t\tcloseFn(true)\n\t\tos.Exit(3)\n\t}\n\tif err := buf.Flush(); err != nil {\n\t\tfmt.Fprintf(a.out, \"error while writing: %s\", err)\n\t\tcloseFn(true)\n\t\tos.Exit(4)\n\t}\n}\n\nfunc main() {\n\t(&app{out: os.Stderr, packageName: os.Getenv(\"GOPACKAGE\")}).main()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst requestTimeout = time.Second * 5\n\nvar (\n\tedgeHost = flag.String(\"edgeHost\", \"www.gov.uk\", \"Hostname of edge\")\n\toriginPort = flag.Int(\"originPort\", 8080, \"Origin port to listen on for requests\")\n\tinsecureTLS = flag.Bool(\"insecureTLS\", false, \"Whether to check server certificates\")\n\n\tclient *http.Transport\n\toriginServer *CDNServeMux\n)\n\n\/\/ Setup clients and servers.\nfunc init() {\n\n\tflag.Parse()\n\n\ttlsOptions := &tls.Config{}\n\tif *insecureTLS {\n\t\ttlsOptions.InsecureSkipVerify = true\n\t}\n\n\tclient = &http.Transport{\n\t\tResponseHeaderTimeout: requestTimeout,\n\t\tTLSClientConfig: tlsOptions,\n\t}\n\toriginServer = StartServer(*originPort)\n\n\tlog.Println(\"Confirming that CDN has successfully probed Origin\")\n\terr := confirmOriginIsEnabled(originServer, *edgeHost)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc TestHelpers(t *testing.T) {\n\ttestHelpersCDNServeMuxHandlers(t, originServer)\n\ttestHelpersCDNServeMuxProbes(t, originServer)\n}\n\n\/\/ Should redirect from HTTP to HTTPS without hitting origin.\nfunc TestProtocolRedirect(t *testing.T) {\n\toriginServer.SwitchHandler(func(w http.ResponseWriter, r *http.Request) {\n\t\tt.Error(\"Request should not have made it to origin\")\n\t})\n\n\tsourceUrl := fmt.Sprintf(\"http:\/\/%s\/foo\/bar\", *edgeHost)\n\tdestUrl := fmt.Sprintf(\"https:\/\/%s\/foo\/bar\", *edgeHost)\n\n\treq, _ := http.NewRequest(\"GET\", sourceUrl, nil)\n\tresp, err := client.RoundTrip(req)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif resp.StatusCode != 301 {\n\t\tt.Errorf(\"Status code expected 301, got %d\", resp.StatusCode)\n\t}\n\tif d := resp.Header.Get(\"Location\"); d != destUrl {\n\t\tt.Errorf(\"Location header expected %s, got %s\", destUrl, d)\n\t}\n}\n\n\/\/ Should send request to origin by default\nfunc TestRequestsGoToOriginByDefault(t *testing.T) {\n\tuuid := NewUUID()\n\toriginServer.SwitchHandler(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Method == \"GET\" && r.URL.Path == fmt.Sprintf(\"\/%s\", uuid) {\n\t\t\tw.Header().Set(\"EnsureOriginServed\", uuid)\n\t\t}\n\t})\n\n\tsourceUrl := fmt.Sprintf(\"https:\/\/%s\/%s\", *edgeHost, uuid)\n\n\treq, _ := http.NewRequest(\"GET\", sourceUrl, nil)\n\tresp, err := client.RoundTrip(req)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif resp.StatusCode != 200 {\n\t\tt.Errorf(\"Status code expected 200, got %d\", resp.StatusCode)\n\t}\n\tif d := resp.Header.Get(\"EnsureOriginServed\"); d != uuid {\n\t\tt.Errorf(\"EnsureOriginServed header has not come from Origin: expected %q, got %q\", uuid, d)\n\t}\n\n}\n\n\/\/ Should cache first response and return it on second request without\n\/\/ hitting origin again.\nfunc TestFirstResponseCached(t *testing.T) {\n\tconst bodyExpected = \"first request\"\n\tconst requestsExpectedCount = 1\n\trequestsReceivedCount := 0\n\n\toriginServer.SwitchHandler(func(w http.ResponseWriter, r *http.Request) {\n\t\tif requestsReceivedCount == 0 {\n\t\t\tw.Write([]byte(bodyExpected))\n\t\t} else {\n\t\t\tw.Write([]byte(\"subsequent request\"))\n\t\t}\n\n\t\trequestsReceivedCount++\n\t})\n\n\turl := fmt.Sprintf(\"https:\/\/%s\/%s\", *edgeHost, NewUUID())\n\treq, _ := http.NewRequest(\"GET\", url, nil)\n\n\tfor i := 0; i < 2; i++ {\n\t\tresp, err := client.RoundTrip(req)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tdefer resp.Body.Close()\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif string(body) != bodyExpected {\n\t\t\tt.Errorf(\"Incorrect response body. Expected %q, got %q\", bodyExpected, body)\n\t\t}\n\t}\n\n\tif requestsReceivedCount > requestsExpectedCount {\n\t\tt.Errorf(\"originServer got too many requests. Expected %d requests, got %d\", requestsExpectedCount, requestsReceivedCount)\n\t}\n}\n\n\/\/ Should return 403 for PURGE requests from IPs not in the whitelist.\nfunc TestRestrictPurgeRequests(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should create an X-Forwarded-For header containing the client's IP.\nfunc TestHeaderCreateXFF(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should append client's IP to existing X-Forwarded-For header.\nfunc TestHeaderAppendXFF(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should create a True-Client-IP header containing the client's IP\n\/\/ address, discarding the value provided in the original request.\nfunc TestHeaderUnspoofableClientIP(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should not modify Host header from original request.\nfunc TestHeaderHostUnmodified(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should set a default TTL if the response doesn't set one.\nfunc TestDefaultTTL(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should serve stale object and not hit mirror(s) if origin is down and\n\/\/ object is beyond TTL but still in cache.\nfunc TestFailoverOriginDownServeStale(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should serve stale object and not hit mirror(s) if origin returns a 5xx\n\/\/ response and object is beyond TTL but still in cache.\nfunc TestFailoverOrigin5xxServeStale(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should fallback to first mirror if origin is down and object is not in\n\/\/ cache (active or stale).\nfunc TestFailoverOriginDownUseFirstMirror(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should fallback to first mirror if origin returns 5xx response and object\n\/\/ is not in cache (active or stale).\nfunc TestFailoverOrigin5xxUseFirstMirror(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should fallback to second mirror if both origin and first mirror are\n\/\/ down.\nfunc TestFailoverOriginDownFirstMirrorDownUseSecondMirror(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should fallback to second mirror if both origin and first mirror return\n\/\/ 5xx responses.\nfunc TestFailoverOrigin5xxFirstMirror5xxUseSecondMirror(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should not fallback to mirror if origin returns a 5xx response with a\n\/\/ No-Fallback header.\nfunc TestFailoverNoFallbackHeader(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should serve a known static error page if cannot serve a page\n\/\/ from origin, stale or any mirror.\n\/\/ NB: ideally this should be a page that we control that has a mechanism\n\/\/ to alert us that it has been served.\nfunc TestErrorPageIsServedWhenNoBackendAvailable(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should not cache a response with a Set-Cookie a header.\nfunc TestNoCacheHeaderSetCookie(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should not cache a response with a Cache-Control: private header.\nfunc TestNoCacheHeaderCacheControlPrivate(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ ---------------------------------------------------------\n\/\/ Test that useful common cache-related parameters are sent to the\n\/\/ client by this CDN provider.\n\n\/\/ Should set an Age header itself rather than passing the Age header from origin.\nfunc TestAgeHeaderIsSetByProviderNotOrigin(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should set an X-Cache header containing HIT\/MISS from 'origin, itself'\nfunc TestXCacheHeaderContainsHitMissFromBothProviderAndOrigin(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should set an X-Served-By header giving information on the node and location served from.\nfunc TestXServedByHeaderContainsANodeIdAndLocation(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should set an X-Cache-Hits header containing hit count for this object,\n\/\/ from the provider not origin\nfunc TestXCacheHitsContainsProviderHitCountForThisObject(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n<commit_msg>Test that for X-Served-By is set by Edge<commit_after>package main\n\nimport (\n\t\"crypto\/tls\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"testing\"\n\t\"time\"\n)\n\nconst requestTimeout = time.Second * 5\n\nvar (\n\tedgeHost = flag.String(\"edgeHost\", \"www.gov.uk\", \"Hostname of edge\")\n\toriginPort = flag.Int(\"originPort\", 8080, \"Origin port to listen on for requests\")\n\tinsecureTLS = flag.Bool(\"insecureTLS\", false, \"Whether to check server certificates\")\n\n\tclient *http.Transport\n\toriginServer *CDNServeMux\n)\n\n\/\/ Setup clients and servers.\nfunc init() {\n\n\tflag.Parse()\n\n\ttlsOptions := &tls.Config{}\n\tif *insecureTLS {\n\t\ttlsOptions.InsecureSkipVerify = true\n\t}\n\n\tclient = &http.Transport{\n\t\tResponseHeaderTimeout: requestTimeout,\n\t\tTLSClientConfig: tlsOptions,\n\t}\n\toriginServer = StartServer(*originPort)\n\n\tlog.Println(\"Confirming that CDN has successfully probed Origin\")\n\terr := confirmOriginIsEnabled(originServer, *edgeHost)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc TestHelpers(t *testing.T) {\n\ttestHelpersCDNServeMuxHandlers(t, originServer)\n\ttestHelpersCDNServeMuxProbes(t, originServer)\n}\n\n\/\/ Should redirect from HTTP to HTTPS without hitting origin.\nfunc TestProtocolRedirect(t *testing.T) {\n\toriginServer.SwitchHandler(func(w http.ResponseWriter, r *http.Request) {\n\t\tt.Error(\"Request should not have made it to origin\")\n\t})\n\n\tsourceUrl := fmt.Sprintf(\"http:\/\/%s\/foo\/bar\", *edgeHost)\n\tdestUrl := fmt.Sprintf(\"https:\/\/%s\/foo\/bar\", *edgeHost)\n\n\treq, _ := http.NewRequest(\"GET\", sourceUrl, nil)\n\tresp, err := client.RoundTrip(req)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif resp.StatusCode != 301 {\n\t\tt.Errorf(\"Status code expected 301, got %d\", resp.StatusCode)\n\t}\n\tif d := resp.Header.Get(\"Location\"); d != destUrl {\n\t\tt.Errorf(\"Location header expected %s, got %s\", destUrl, d)\n\t}\n}\n\n\/\/ Should send request to origin by default\nfunc TestRequestsGoToOriginByDefault(t *testing.T) {\n\tuuid := NewUUID()\n\toriginServer.SwitchHandler(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Method == \"GET\" && r.URL.Path == fmt.Sprintf(\"\/%s\", uuid) {\n\t\t\tw.Header().Set(\"EnsureOriginServed\", uuid)\n\t\t}\n\t})\n\n\tsourceUrl := fmt.Sprintf(\"https:\/\/%s\/%s\", *edgeHost, uuid)\n\n\treq, _ := http.NewRequest(\"GET\", sourceUrl, nil)\n\tresp, err := client.RoundTrip(req)\n\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif resp.StatusCode != 200 {\n\t\tt.Errorf(\"Status code expected 200, got %d\", resp.StatusCode)\n\t}\n\tif d := resp.Header.Get(\"EnsureOriginServed\"); d != uuid {\n\t\tt.Errorf(\"EnsureOriginServed header has not come from Origin: expected %q, got %q\", uuid, d)\n\t}\n\n}\n\n\/\/ Should cache first response and return it on second request without\n\/\/ hitting origin again.\nfunc TestFirstResponseCached(t *testing.T) {\n\tconst bodyExpected = \"first request\"\n\tconst requestsExpectedCount = 1\n\trequestsReceivedCount := 0\n\n\toriginServer.SwitchHandler(func(w http.ResponseWriter, r *http.Request) {\n\t\tif requestsReceivedCount == 0 {\n\t\t\tw.Write([]byte(bodyExpected))\n\t\t} else {\n\t\t\tw.Write([]byte(\"subsequent request\"))\n\t\t}\n\n\t\trequestsReceivedCount++\n\t})\n\n\turl := fmt.Sprintf(\"https:\/\/%s\/%s\", *edgeHost, NewUUID())\n\treq, _ := http.NewRequest(\"GET\", url, nil)\n\n\tfor i := 0; i < 2; i++ {\n\t\tresp, err := client.RoundTrip(req)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tdefer resp.Body.Close()\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif string(body) != bodyExpected {\n\t\t\tt.Errorf(\"Incorrect response body. Expected %q, got %q\", bodyExpected, body)\n\t\t}\n\t}\n\n\tif requestsReceivedCount > requestsExpectedCount {\n\t\tt.Errorf(\"originServer got too many requests. Expected %d requests, got %d\", requestsExpectedCount, requestsReceivedCount)\n\t}\n}\n\n\/\/ Should return 403 for PURGE requests from IPs not in the whitelist.\nfunc TestRestrictPurgeRequests(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should create an X-Forwarded-For header containing the client's IP.\nfunc TestHeaderCreateXFF(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should append client's IP to existing X-Forwarded-For header.\nfunc TestHeaderAppendXFF(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should create a True-Client-IP header containing the client's IP\n\/\/ address, discarding the value provided in the original request.\nfunc TestHeaderUnspoofableClientIP(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should not modify Host header from original request.\nfunc TestHeaderHostUnmodified(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should set a default TTL if the response doesn't set one.\nfunc TestDefaultTTL(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should serve stale object and not hit mirror(s) if origin is down and\n\/\/ object is beyond TTL but still in cache.\nfunc TestFailoverOriginDownServeStale(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should serve stale object and not hit mirror(s) if origin returns a 5xx\n\/\/ response and object is beyond TTL but still in cache.\nfunc TestFailoverOrigin5xxServeStale(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should fallback to first mirror if origin is down and object is not in\n\/\/ cache (active or stale).\nfunc TestFailoverOriginDownUseFirstMirror(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should fallback to first mirror if origin returns 5xx response and object\n\/\/ is not in cache (active or stale).\nfunc TestFailoverOrigin5xxUseFirstMirror(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should fallback to second mirror if both origin and first mirror are\n\/\/ down.\nfunc TestFailoverOriginDownFirstMirrorDownUseSecondMirror(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should fallback to second mirror if both origin and first mirror return\n\/\/ 5xx responses.\nfunc TestFailoverOrigin5xxFirstMirror5xxUseSecondMirror(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should not fallback to mirror if origin returns a 5xx response with a\n\/\/ No-Fallback header.\nfunc TestFailoverNoFallbackHeader(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should serve a known static error page if cannot serve a page\n\/\/ from origin, stale or any mirror.\n\/\/ NB: ideally this should be a page that we control that has a mechanism\n\/\/ to alert us that it has been served.\nfunc TestErrorPageIsServedWhenNoBackendAvailable(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should not cache a response with a Set-Cookie a header.\nfunc TestNoCacheHeaderSetCookie(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should not cache a response with a Cache-Control: private header.\nfunc TestNoCacheHeaderCacheControlPrivate(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ ---------------------------------------------------------\n\/\/ Test that useful common cache-related parameters are sent to the\n\/\/ client by this CDN provider.\n\n\/\/ Should set an Age header itself rather than passing the Age header from origin.\nfunc TestAgeHeaderIsSetByProviderNotOrigin(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should set an X-Cache header containing HIT\/MISS from 'origin, itself'\nfunc TestXCacheHeaderContainsHitMissFromBothProviderAndOrigin(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n\n\/\/ Should set an X-Served-By header giving information on the node and location served from.\nfunc TestXServedByHeaderContainsANodeIdAndLocation(t *testing.T) {\n\n\tuuid := NewUUID()\n\toriginServer.SwitchHandler(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Method == \"GET\" && r.URL.Path == fmt.Sprintf(\"\/%s\", uuid) {\n\t\t\tw.WriteHeader(200)\n\t\t}\n\t})\n\n\tsourceUrl := fmt.Sprintf(\"https:\/\/%s\/%s\", *edgeHost, uuid)\n\n\t\/\/ Get first request, will come from origin. Edge Hit Count 0\n\treq, _ := http.NewRequest(\"GET\", sourceUrl, nil)\n\tresp, err := client.RoundTrip(req)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tactualHeader := resp.Header.Get(\"X-Served-By\")\n\tif actualHeader == \"\" {\n\t\tt.Error(\"X-Served-By header has not been set by Edge\")\n\t}\n\n\tre := regexp.MustCompile(\"^cache-[a-z0-9]+-[A-Z]{3}$\")\n\tif re.FindString(actualHeader) != actualHeader {\n\t\tt.Errorf(\"X-Served-By is not as expected: got %q\", actualHeader)\n\t}\n\n}\n\n\/\/ Should set an X-Cache-Hits header containing hit count for this object,\n\/\/ from the provider not origin\nfunc TestXCacheHitsContainsProviderHitCountForThisObject(t *testing.T) {\n\tt.Error(\"Not implemented\")\n}\n<|endoftext|>"} {"text":"<commit_before>package dbhandler\n\nimport (\n\t\"fmt\"\n\t\"database\/sql\"\n\t\/\/ the driver is used internally, the underscore makes sure the \"unused\"\n\t\/\/ error is suppressed.\n\t_ \"github.com\/lib\/pq\"\n\t\"log\"\n)\n\n\/\/TODO: Read from a config file or environment\nconst (\n\tDB_USER = \"hspc\"\n\tDB_PASSWORD = \"HSPC-Password\"\n\tDB_NAME = \"postgres\"\n)\n\nfunc checkErr(err error) {\n\tif err != nil {\n\t\t\/\/log.Fatal(err)\n\t\tpanic(err)\n\t}\n}\n\nfunc getDBConn() *sql.DB {\n\tdbinfo := fmt.Sprintf(\"user=%s password=%s dbname=%s sslmode=disable\",\n\t\tDB_USER, DB_PASSWORD, DB_NAME)\n\tdb, err := sql.Open(\"postgres\", dbinfo)\n\tcheckErr(err)\n\treturn db\n}\n\n\/**\nCredential\n **\/\nfunc CredentialCreate(emailaddress string, password_hash string) int64 {\n\tlog.Printf(\"# Creating credential\")\n\n\tdb := getDBConn()\n\n\tstmt, err := db.Prepare(\"INSERT INTO Credential(emailaddress, password_hash) VALUES($1, $2) returning credential_id\")\n\tif (err != nil) {\n\t\tlog.Fatal(\"Error creating prepared statement\")\n\t\tlog.Panic(err)\n\t}\n\n\tvar lastInsertId int64\n\terr = stmt.QueryRow(emailaddress, password_hash).Scan(&lastInsertId)\n\n\tif (err != nil) {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Printf(\"credential_id = %d\", lastInsertId)\n\n\treturn lastInsertId\n}\n\nfunc CredentialRead(emailaddress string) credential_struct {\n\tvar credential = credential_struct{}\n\tdb := getDBConn()\n\n\tlog.Printf(\"# Reading Credential\")\n\tlog.Printf(\"emailaddress = %s\", emailaddress)\n\n\tstmt, err := db.Prepare(\"SELECT credential_id, emailaddress, password_hash FROM Credential WHERE emailaddress = $1\")\n\tdefer stmt.Close()\n\n\terr = stmt.QueryRow(emailaddress).Scan(&credential.credential_id, &credential.emailaddress, &credential.password_hash)\n\n\tif (err == nil) {\n\t\treturn credential\n\t}\n\n\treturn credential\n}\n\nfunc CredentialUpdate(emailaddress string, password string) int64 {\n\tdb := getDBConn()\n\n\tlog.Printf(\"# Updating Credential\")\n\tlog.Printf(\"emailaddress = %s\", emailaddress)\n\n\tstmt, err := db.Prepare(\"UPDATE Credential SET emailaddress = $1, password_hash = $2 WHERE emailaddress = $1\")\n\tdefer stmt.Close()\n\n\tresult, err := stmt.Exec(emailaddress, password)\n\n\tcheckErr(err)\n\n\taffectedCount, err := result.RowsAffected()\n\n\tif ( affectedCount != 1) {\n\t\tlog.Fatalf(\"Unexpected number of updates: $d\", affectedCount)\n\t}\n\n\treturn affectedCount\n}\n\nfunc CredentialDelete(emailaddress string) int64 {\n\tdb := getDBConn()\n\n\tlog.Printf(\"# Deleting Credential\")\n\tlog.Printf(\"emailaddress = %s\", emailaddress)\n\n\tstmt, err := db.Prepare(\"DELETE FROM Credential WHERE emailaddress = $1\")\n\tdefer stmt.Close()\n\n\tresult, err := stmt.Exec(emailaddress)\n\n\tcheckErr(err)\n\n\taffectedCount, err := result.RowsAffected()\n\n\tif ( affectedCount != 1) {\n\t\tlog.Fatalf(\"Unexpected number of updates: $d\", affectedCount)\n\t}\n\n\treturn affectedCount\n}\n\n\/*\nAddress\n*\/\nfunc AddressCreate(address address_struct) int64 {\n\tdb := getDBConn()\n\n\tlog.Printf(\"# Creating Address\")\n\n\tstmt, err := db.Prepare(\"INSERT INTO address(address_country, address_zip, address_state, address_city, address_line1, address_line2) \" +\n\t\t\"VALUES($1, $2, $3, $4, $5, $6) returning address_id\")\n\tdefer stmt.Close()\n\n\tif (err != nil) {\n\t\tlog.Fatal(\"Error creating prepared statement: \")\n\t\tlog.Panic(err)\n\t}\n\n\tvar address_id int64\n\terr = stmt.QueryRow(address.country, address.zipcode, address.state, address.city, address.line1, address.line2).Scan(&address_id)\n\n\tif (err != nil) {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn address_id\n}\n\nfunc AddressRead(address_id int64) address_struct {\n\tlog.Printf(\"# Reading Address\")\n\n\tdb := getDBConn()\n var address = address_struct{}\n\n\tstmt, err := db.Prepare(\"SELECT address_id, address_country, address_zip, address_city, address_line1, address_line2 \" +\n\t\t\"FROM address WHERE address_id = $1\")\n\tdefer stmt.Close()\n\n\tif (err != nil) {\n\t\tlog.Fatal(\"Error creating prepared statement: \")\n\t\tlog.Panic(err)\n\t}\n\n\terr = stmt.QueryRow(address_id).Scan(&address.address_id, &address.country, &address.zipcode, &address.city, &address.line1, &address.line2)\n\n\tif (err == nil) {\n\t\t return address\n\t}\n\n\treturn address\n}\n\nfunc AddressUpdate(address address_struct) int64 {\n\tdb := getDBConn()\n\n\tlog.Printf(\"# Updating Address\")\n\tlog.Printf(\"Address ID = %d\", address.address_id)\n\n\tstmt, err := db.Prepare(\"UPDATE address SET address_country = $1, address_zip = $2 \" +\n\t\t\" ,address_state = $3 ,address_city = $4\" +\n\t\t\" ,address_line1 = $5 ,address_line2 = $6\" +\n\t\t\" WHERE address_id = $7\")\n\tdefer stmt.Close()\n\n\tif (err != nil) {\n\t\tlog.Fatal(\"Error creating prepared statement: \")\n\t\tlog.Panic(err)\n\t}\n\n\tresult, err := stmt.Exec(address.country, address.zipcode, address.state, address.city, address.line1, address.line2, address.address_id)\n\n\tif (err != nil) {\n\t\tlog.Fatal(\"Error updating\")\n\t\tlog.Panic(err)\n\t}\n\n\taffectedCount, err := result.RowsAffected()\n\n\tif ( affectedCount != 1) {\n\t\tlog.Fatalf(\"Unexpected number of updates: $d\", affectedCount)\n\t}\n\n\treturn affectedCount\n}\n\nfunc AddressDelete(address_id int64) int64 {\n\tdb := getDBConn()\n\n\tlog.Printf(\"# Deleting Address\")\n\tlog.Printf(\"Address ID = %d\", address_id)\n\n\tstmt, err := db.Prepare(\"DELETE FROM address WHERE address_id = $1\")\n\tdefer stmt.Close()\n\n\tresult, err := stmt.Exec(address_id)\n\n\tif (err != nil) {\n\t\tlog.Fatal(\"Delete Failed\")\n\t\tlog.Panic(err)\n\t}\n\n\taffectedCount, err := result.RowsAffected()\n\n\tif ( affectedCount != 1) {\n\t\tlog.Fatalf(\"Unexpected number of updates: $d\", affectedCount)\n\t}\n\n\treturn affectedCount\n}\n\n\/*\nSchool\n*\/\nfunc SchoolCreate(school school_struct) int64 {\n \/\/TODO: complete the logic\n\n return 0\n}\n\nfunc SchoolRead(school_id int64) school_struct {\n var school = school_struct{}\n \/\/TODO: complete the logic\n\n return school\n}\n\nfunc SchoolUpdate(school school_struct) school_struct {\n \/\/TODO: complete the logic\n\n return school\n}\n\nfunc SchoolDelete(school_id int64) int64 {\n \/\/TODO: complete the logic\n\n return 0\n}\n\n\/*\nAdvisor\n*\/\nfunc AdvisorCreate(advisor advisor_struct) int64 {\n \/\/TODO: complete the logic\n\n return 0\n}\n\nfunc AdvisorRead(advisor_id int64) advisor_struct {\n var advisor = advisor_struct{}\n\n \/\/TODO: complete the logic\n\n return advisor\n}\n\nfunc AdvisorUpdate(advisor advisor_struct) advisor_struct {\n \/\/TODO: complete the logic\n\n return advisor\n}\n\nfunc AdvisorDelete(advisor_id int64) int64 {\n \/\/TODO: complete the logic\n\n return 0\n}\n\n\/*\nTeam\n*\/\nfunc TeamCreate(team team_struct) int64 {\n \/\/TODO: complete the logic\n\n return 0\n}\n\nfunc TeamRead(team_id int64) team_struct {\n var team = team_struct{}\n\n \/\/TODO: complete the logic\n\n return team\n}\n\nfunc TeamUpdate(team team_struct) team_struct {\n \/\/TODO: complete the logic\n\n return team\n}\n\nfunc TeamDelete(team_id int64) int64 {\n \/\/TODO: complete the logic\n\n return 0\n}\n\n\/*\nStudent\n*\/\nfunc studentCreate(student student_struct) int64 {\n \/\/TODO: complete the logic\n\n return 0\n}\n\nfunc studentRead(student_id int64) student_struct {\n var student = student_struct{}\n\n \/\/TODO: complete the logic\n\n return student\n}\n\nfunc studentUpdate(student student_struct) student_struct {\n \/\/TODO: complete the logic\n\n return student\n}\n\nfunc studentDelete(student_id int64) int64 {\n \/\/TODO: complete the logic\n\n return 0\n}\n\n\/*\nTeam Score\n*\/\nfunc teamscoreCreate(teamscore team_score_struct) int64 {\n \/\/TODO: complete the logic\n\n return 0\n}\n\nfunc teamscoreRead(teamscore_id int64) team_score_struct {\n var teamscore = team_score_struct{}\n\n \/\/TODO: complete the logic\n\n return teamscore\n}\n\nfunc teamscoreUpdate(teamscore team_score_struct) team_score_struct {\n \/\/TODO: complete the logic\n\n return teamscore\n}\n\nfunc teamscoreDelete(teamscore_id int64) int64 {\n \/\/TODO: complete the logic\n\n return 0\n}\n\n\/*\nParking\n*\/\nfunc parkingCreate(parking team_score_struct) int64 {\n \/\/TODO: complete the logic\n\n return 0\n}\n\nfunc parkingRead(parking_id int64) team_score_struct {\n var parking = team_score_struct{}\n\n \/\/TODO: complete the logic\n\n return parking\n}\n\nfunc parkingUpdate(parking team_score_struct) team_score_struct {\n \/\/TODO: complete the logic\n\n return parking\n}\n\nfunc parkingDelete(parking_id int64) int64 {\n \/\/TODO: complete the logic\n\n return 0\n}\n\n\/*\nProblem\n*\/\nfunc problemCreate(problem problem_struct) int64 {\n \/\/TODO: complete the logic\n\n return 0\n}\n\nfunc problemRead(problem_id int64) problem_struct {\n var problem = problem_struct{}\n\n \/\/TODO: complete the logic\n\n return problem\n}\n\nfunc problemUpdate(problem problem_struct) problem_struct {\n \/\/TODO: complete the logic\n\n return problem\n}\n\nfunc problemDelete(problem_id int64) int64 {\n \/\/TODO: complete the logic\n\n return 0\n}\n\n\/*\nSolution\n*\/\nfunc solutionCreate(solution solution_struct) int64 {\n \/\/TODO: complete the logic\n\n return 0\n}\n\nfunc solutionRead(solution_id int64) solution_struct {\n var solution = solution_struct{}\n\n \/\/TODO: complete the logic\n\n return solution\n}\n\nfunc solutionUpdate(solution solution_struct) solution_struct {\n \/\/TODO: complete the logic\n\n return solution\n}\n\nfunc solutionDelete(solution_id int64) int64 {\n \/\/TODO: complete the logic\n\n return 0\n}\n\n\/*\nProblem_Solution\n*\/\nfunc problemsolutionCreate(problemsolution problem_solution_struct) int64 {\n \/\/TODO: complete the logic\n\n return 0\n}\n\nfunc problemsolutionRead(problemsolution_id int64) problem_solution_struct {\n var problemsolution = problem_solution_struct{}\n\n \/\/TODO: complete the logic\n\n return problemsolution\n}\n\nfunc problemsolutionUpdate(problemsolution problem_solution_struct) problem_solution_struct {\n \/\/TODO: complete the logic\n\n return problemsolution\n}\n\nfunc problemsolutionDelete(problemsolution_id int64) int64 {\n \/\/TODO: complete the logic\n\n return 0\n}<commit_msg>filed SQL error handling, handled ErrNoRows instead of panicing, implemented School DB functions<commit_after>package dbhandler\n\nimport (\n\t\"fmt\"\n\t\"database\/sql\"\n\t\/\/ the driver is used internally, the underscore makes sure the \"unused\"\n\t\/\/ error is suppressed.\n\t_ \"github.com\/lib\/pq\"\n\t\"log\"\n)\n\n\/\/TODO: Read from a config file or environment\nconst (\n\tDB_USER = \"hspc\"\n\tDB_PASSWORD = \"HSPC-Password\"\n\tDB_NAME = \"postgres\"\n)\n\nfunc checkErr(err error) {\n\tif err != nil {\n\t\t\/\/log.Fatal(err)\n\t\tpanic(err)\n\t}\n}\n\nfunc getDBConn() *sql.DB {\n\tdbinfo := fmt.Sprintf(\"user=%s password=%s dbname=%s sslmode=disable\",\n\t\tDB_USER, DB_PASSWORD, DB_NAME)\n\tdb, err := sql.Open(\"postgres\", dbinfo)\n\tcheckErr(err)\n\treturn db\n}\n\n\/**\nCredential\n **\/\nfunc CredentialCreate(emailaddress string, password_hash string) int64 {\n\tlog.Printf(\"# Creating credential\")\n\n\tdb := getDBConn()\n\n\tstmt, err := db.Prepare(\"INSERT INTO Credential(emailaddress, password_hash) VALUES($1, $2) returning credential_id\")\n\tif err != nil {\n\t\tlog.Print(\"Error creating prepared statement\")\n\t\tlog.Fatal(err)\n\t}\n\n\tvar lastInsertId int64\n\terr = stmt.QueryRow(emailaddress, password_hash).Scan(&lastInsertId)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Printf(\"credential_id = %d\", lastInsertId)\n\n\treturn lastInsertId\n}\n\nfunc CredentialRead(emailaddress string) credential_struct {\n\tvar credential = credential_struct{}\n\tdb := getDBConn()\n\n\tlog.Printf(\"# Reading Credential\")\n\tlog.Printf(\"emailaddress = %s\", emailaddress)\n\n\tstmt, err := db.Prepare(\"SELECT credential_id, emailaddress, password_hash FROM Credential WHERE emailaddress = $1\")\n\tdefer stmt.Close()\n\n\tif err != nil {\n\t\tlog.Print(\"Error creating prepared statement\")\n\t\tlog.Fatal(err)\n\t}\n\n\terr = stmt.QueryRow(emailaddress).Scan(&credential.credential_id, &credential.emailaddress, &credential.password_hash)\n\n\tif err == sql.ErrNoRows {\n\t\treturn credential_struct{}\n\t}\n\n\tif err != nil {\n\t\tlog.Print(\"Error reading Credential data\")\n\t\tlog.Fatal(err)\n\t}\n\n\treturn credential\n}\n\nfunc CredentialUpdate(emailaddress string, password string) int64 {\n\tdb := getDBConn()\n\n\tlog.Printf(\"# Updating Credential\")\n\tlog.Printf(\"emailaddress = %s\", emailaddress)\n\n\tstmt, err := db.Prepare(\"UPDATE Credential SET emailaddress = $1, password_hash = $2 WHERE emailaddress = $1\")\n\tdefer stmt.Close()\n\n\tif err != nil {\n\t\tlog.Print(\"Error creating prepared statement\")\n\t\tlog.Fatal(err)\n\t}\n\n\tresult, err := stmt.Exec(emailaddress, password)\n\n\tcheckErr(err)\n\n\taffectedCount, err := result.RowsAffected()\n\n\tif affectedCount != 1 {\n\t\tlog.Fatalf(\"Unexpected number of updates: $d\", affectedCount)\n\t}\n\n\treturn affectedCount\n}\n\nfunc CredentialDelete(emailaddress string) int64 {\n\tdb := getDBConn()\n\n\tlog.Printf(\"# Deleting Credential\")\n\tlog.Printf(\"emailaddress = %s\", emailaddress)\n\n\tstmt, err := db.Prepare(\"DELETE FROM Credential WHERE emailaddress = $1\")\n\tdefer stmt.Close()\n\n\tif err != nil {\n\t\tlog.Print(\"Error creating prepared statement\")\n\t\tlog.Fatal(err)\n\t}\n\n\tresult, err := stmt.Exec(emailaddress)\n\n\tcheckErr(err)\n\n\taffectedCount, err := result.RowsAffected()\n\n\tif affectedCount != 1 {\n\t\tlog.Fatalf(\"Unexpected number of updates: $d\", affectedCount)\n\t}\n\n\treturn affectedCount\n}\n\n\/*\nAddress\n*\/\nfunc AddressCreate(address address_struct) int64 {\n\tdb := getDBConn()\n\n\tlog.Printf(\"# Creating Address\")\n\n\tstmt, err := db.Prepare(\"INSERT INTO address(address_country, address_zip, address_state, address_city, address_line1, address_line2) \" +\n\t\t\"VALUES($1, $2, $3, $4, $5, $6) returning address_id\")\n\tdefer stmt.Close()\n\n\tif err != nil {\n\t\tlog.Print(\"Error creating prepared statement\")\n\t\tlog.Fatal(err)\n\t}\n\n\tvar address_id int64\n\terr = stmt.QueryRow(address.country, address.zipcode, address.state, address.city, address.line1, address.line2).Scan(&address_id)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn address_id\n}\n\nfunc AddressRead(address_id int64) address_struct {\n\tlog.Printf(\"# Reading Address\")\n\n\tdb := getDBConn()\n\tvar address = address_struct{}\n\n\tstmt, err := db.Prepare(\"SELECT address_id, address_country, address_zip, address_city, address_line1, address_line2 \" +\n\t\t\"FROM address WHERE address_id = $1\")\n\tdefer stmt.Close()\n\n\tif err != nil {\n\t\tlog.Print(\"Error creating prepared statement\")\n\t\tlog.Fatal(err)\n\t}\n\n\terr = stmt.QueryRow(address_id).Scan(&address.address_id, &address.country, &address.zipcode, &address.city, &address.line1, &address.line2)\n\n\tif err == sql.ErrNoRows {\n\t\treturn address_struct{}\n\t}\n\n\tif err != nil {\n\t\tlog.Fatal(\"Error reading address data\")\n\t\tlog.Panic(err)\n\t}\n\n\treturn address\n}\n\nfunc AddressUpdate(address address_struct) int64 {\n\tdb := getDBConn()\n\n\tlog.Printf(\"# Updating Address\")\n\tlog.Printf(\"Address ID = %d\", address.address_id)\n\n\tstmt, err := db.Prepare(\"UPDATE address SET address_country = $1, address_zip = $2 \" +\n\t\t\" ,address_state = $3 ,address_city = $4\" +\n\t\t\" ,address_line1 = $5 ,address_line2 = $6\" +\n\t\t\" WHERE address_id = $7\")\n\tdefer stmt.Close()\n\n\tif err != nil {\n\t\tlog.Print(\"Error creating prepared statement\")\n\t\tlog.Fatal(err)\n\t}\n\n\tresult, err := stmt.Exec(address.country, address.zipcode, address.state, address.city, address.line1, address.line2, address.address_id)\n\n\tif err != nil {\n\t\tlog.Fatal(\"Error updating\")\n\t\tlog.Panic(err)\n\t}\n\n\taffectedCount, err := result.RowsAffected()\n\n\tif affectedCount != 1 {\n\t\tlog.Fatalf(\"Unexpected number of updates: $d\", affectedCount)\n\t}\n\n\treturn affectedCount\n}\n\nfunc AddressDelete(address_id int64) int64 {\n\tdb := getDBConn()\n\n\tlog.Printf(\"# Deleting Address\")\n\tlog.Printf(\"Address ID = %d\", address_id)\n\n\tstmt, err := db.Prepare(\"DELETE FROM address WHERE address_id = $1\")\n\tdefer stmt.Close()\n\n\tif err != nil {\n\t\tlog.Print(\"Error creating prepared statement\")\n\t\tlog.Fatal(err)\n\t}\n\n\tresult, err := stmt.Exec(address_id)\n\n\tif err != nil {\n\t\tlog.Fatal(\"Delete Failed\")\n\t\tlog.Panic(err)\n\t}\n\n\taffectedCount, err := result.RowsAffected()\n\n\tif affectedCount != 1 {\n\t\tlog.Fatalf(\"Unexpected number of updates: $d\", affectedCount)\n\t}\n\n\treturn affectedCount\n}\n\n\/*\nSchool\n*\/\nfunc SchoolCreate(school school_struct) int64 {\n\tlog.Printf(\"# Creating School\")\n\n\tdb := getDBConn()\n\tstmt, err := db.Prepare(\"INSERT INTO school(school_name, address_id) VALUES($1, $2) returning school_id\")\n\tdefer stmt.Close()\n\n\tif err != nil {\n\t\tlog.Print(\"Error creating prepared statement\")\n\t\tlog.Fatal(err)\n\t}\n\n\tvar school_id int64\n\terr = stmt.QueryRow(school.school_name, school.address_id).Scan(&school_id)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn 0\n\t}\n\n\treturn school_id\n}\n\nfunc SchoolRead(school_id int64) school_struct {\n\tlog.Printf(\"# Reading Address\")\n\n\tdb := getDBConn()\n\tstmt, err := db.Prepare(\"SELECT school_id, school_name, address_id FROM school WHERE school_id = $1\")\n\tdefer stmt.Close()\n\n\tif err != nil {\n\t\tlog.Print(\"Error creating prepared statement\")\n\t\tlog.Fatal(err)\n\t}\n\n\tvar school = school_struct{}\n\terr = stmt.QueryRow(school_id).Scan(&school.school_id, &school.school_name, &school.address_id)\n\n\t\/\/ if no records found, return an empty struct\n\tif err == sql.ErrNoRows {\n\t\treturn school_struct{}\n\t}\n\n\tif err != nil {\n\t\tlog.Print(\"Error getting school data\")\n\t\tlog.Panic(err)\n\t}\n\n\treturn school\n}\n\nfunc SchoolUpdate(school school_struct) int64 {\n\tdb := getDBConn()\n\n\tlog.Printf(\"# Updating School\")\n\tlog.Printf(\"School ID = %d\", school.school_id)\n\n\tstmt, err := db.Prepare(\"UPDATE school SET school_name = $1, address_id = $2 WHERE school_id = $3\")\n\tdefer stmt.Close()\n\n\tif err != nil {\n\t\tlog.Print(\"Error creating prepared statement\")\n\t\tlog.Fatal(err)\n\t}\n\n\tresult, err := stmt.Exec(school.school_name, school.address_id, school.school_id)\n\n\tif err != nil {\n\t\tlog.Fatal(\"Error updating school\")\n\t\tlog.Panic(err)\n\t}\n\n\taffectedCount, err := result.RowsAffected()\n\n\tif affectedCount != 1 {\n\t\tlog.Fatalf(\"Unexpected number of updates: %d\", affectedCount)\n\t}\n\n\treturn affectedCount\n}\n\nfunc SchoolDelete(school_id int64) int64 {\n\tdb := getDBConn()\n\n\tlog.Printf(\"# Deleting School\")\n\tlog.Printf(\"School ID = %d\", school_id)\n\n\tstmt, err := db.Prepare(\"DELETE FROM school WHERE school_id = $1\")\n\tdefer stmt.Close()\n\n\tif err != nil {\n\t\tlog.Print(\"Error creating prepared statement\")\n\t\tlog.Fatal(err)\n\t}\n\n\tresult, err := stmt.Exec(school_id)\n\n\tif err != nil {\n\t\tlog.Fatal(\"Delete Failed\")\n\t\tlog.Panic(err)\n\t}\n\n\taffectedCount, err := result.RowsAffected()\n\n\tif affectedCount != 1 {\n\t\tlog.Fatalf(\"Unexpected number of updates: $d\", affectedCount)\n\t}\n\n\treturn affectedCount\n}\n\n\/*\nAdvisor\n*\/\nfunc AdvisorCreate(advisor advisor_struct) int64 {\n\t\/\/TODO: complete the logic\n\n\treturn 0\n}\n\nfunc AdvisorRead(advisor_id int64) advisor_struct {\n\tvar advisor = advisor_struct{}\n\n\t\/\/TODO: complete the logic\n\n\treturn advisor\n}\n\nfunc AdvisorUpdate(advisor advisor_struct) advisor_struct {\n\t\/\/TODO: complete the logic\n\n\treturn advisor\n}\n\nfunc AdvisorDelete(advisor_id int64) int64 {\n\t\/\/TODO: complete the logic\n\n\treturn 0\n}\n\n\/*\nTeam\n*\/\nfunc TeamCreate(team team_struct) int64 {\n\t\/\/TODO: complete the logic\n\n\treturn 0\n}\n\nfunc TeamRead(team_id int64) team_struct {\n\tvar team = team_struct{}\n\n\t\/\/TODO: complete the logic\n\n\treturn team\n}\n\nfunc TeamUpdate(team team_struct) team_struct {\n\t\/\/TODO: complete the logic\n\n\treturn team\n}\n\nfunc TeamDelete(team_id int64) int64 {\n\t\/\/TODO: complete the logic\n\n\treturn 0\n}\n\n\/*\nStudent\n*\/\nfunc studentCreate(student student_struct) int64 {\n\t\/\/TODO: complete the logic\n\n\treturn 0\n}\n\nfunc studentRead(student_id int64) student_struct {\n\tvar student = student_struct{}\n\n\t\/\/TODO: complete the logic\n\n\treturn student\n}\n\nfunc studentUpdate(student student_struct) student_struct {\n\t\/\/TODO: complete the logic\n\n\treturn student\n}\n\nfunc studentDelete(student_id int64) int64 {\n\t\/\/TODO: complete the logic\n\n\treturn 0\n}\n\n\/*\nTeam Score\n*\/\nfunc teamscoreCreate(teamscore team_score_struct) int64 {\n\t\/\/TODO: complete the logic\n\n\treturn 0\n}\n\nfunc teamscoreRead(teamscore_id int64) team_score_struct {\n\tvar teamscore = team_score_struct{}\n\n\t\/\/TODO: complete the logic\n\n\treturn teamscore\n}\n\nfunc teamscoreUpdate(teamscore team_score_struct) team_score_struct {\n\t\/\/TODO: complete the logic\n\n\treturn teamscore\n}\n\nfunc teamscoreDelete(teamscore_id int64) int64 {\n\t\/\/TODO: complete the logic\n\n\treturn 0\n}\n\n\/*\nParking\n*\/\nfunc parkingCreate(parking team_score_struct) int64 {\n\t\/\/TODO: complete the logic\n\n\treturn 0\n}\n\nfunc parkingRead(parking_id int64) team_score_struct {\n\tvar parking = team_score_struct{}\n\n\t\/\/TODO: complete the logic\n\n\treturn parking\n}\n\nfunc parkingUpdate(parking team_score_struct) team_score_struct {\n\t\/\/TODO: complete the logic\n\n\treturn parking\n}\n\nfunc parkingDelete(parking_id int64) int64 {\n\t\/\/TODO: complete the logic\n\n\treturn 0\n}\n\n\/*\nProblem\n*\/\nfunc problemCreate(problem problem_struct) int64 {\n\t\/\/TODO: complete the logic\n\n\treturn 0\n}\n\nfunc problemRead(problem_id int64) problem_struct {\n\tvar problem = problem_struct{}\n\n\t\/\/TODO: complete the logic\n\n\treturn problem\n}\n\nfunc problemUpdate(problem problem_struct) problem_struct {\n\t\/\/TODO: complete the logic\n\n\treturn problem\n}\n\nfunc problemDelete(problem_id int64) int64 {\n\t\/\/TODO: complete the logic\n\n\treturn 0\n}\n\n\/*\nSolution\n*\/\nfunc solutionCreate(solution solution_struct) int64 {\n\t\/\/TODO: complete the logic\n\n\treturn 0\n}\n\nfunc solutionRead(solution_id int64) solution_struct {\n\tvar solution = solution_struct{}\n\n\t\/\/TODO: complete the logic\n\n\treturn solution\n}\n\nfunc solutionUpdate(solution solution_struct) solution_struct {\n\t\/\/TODO: complete the logic\n\n\treturn solution\n}\n\nfunc solutionDelete(solution_id int64) int64 {\n\t\/\/TODO: complete the logic\n\n\treturn 0\n}\n\n\/*\nProblem_Solution\n*\/\nfunc problemsolutionCreate(problemsolution problem_solution_struct) int64 {\n\t\/\/TODO: complete the logic\n\n\treturn 0\n}\n\nfunc problemsolutionRead(problemsolution_id int64) problem_solution_struct {\n\tvar problemsolution = problem_solution_struct{}\n\n\t\/\/TODO: complete the logic\n\n\treturn problemsolution\n}\n\nfunc problemsolutionUpdate(problemsolution problem_solution_struct) problem_solution_struct {\n\t\/\/TODO: complete the logic\n\n\treturn problemsolution\n}\n\nfunc problemsolutionDelete(problemsolution_id int64) int64 {\n\t\/\/TODO: complete the logic\n\n\treturn 0\n}\n<|endoftext|>"} {"text":"<commit_before>package stdlib\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/PuerkitoBio\/agora\/runtime\"\n)\n\nfunc TestConvNumber(t *testing.T) {\n\tctx := runtime.NewCtx(nil, nil)\n\t\/\/ For case 10 below\n\tob := runtime.NewObject()\n\tob.Set(runtime.String(\"__toFloat\"), runtime.NewNativeFunc(ctx, \"\", func(args ...runtime.Val) runtime.Val {\n\t\treturn runtime.Number(22)\n\t}))\n\n\tcases := []struct {\n\t\tsrc runtime.Val\n\t\texp runtime.Val\n\t\terr bool\n\t}{\n\t\t0: {\n\t\t\tsrc: runtime.Nil,\n\t\t\terr: true,\n\t\t},\n\t\t1: {\n\t\t\tsrc: runtime.Number(1),\n\t\t\texp: runtime.Number(1),\n\t\t},\n\t\t2: {\n\t\t\tsrc: runtime.Bool(true),\n\t\t\texp: runtime.Number(1),\n\t\t},\n\t\t3: {\n\t\t\tsrc: runtime.Bool(false),\n\t\t\texp: runtime.Number(0),\n\t\t},\n\t\t4: {\n\t\t\tsrc: runtime.String(\"\"),\n\t\t\terr: true,\n\t\t},\n\t\t5: {\n\t\t\tsrc: runtime.String(\"not a number\"),\n\t\t\terr: true,\n\t\t},\n\t\t6: {\n\t\t\tsrc: runtime.String(\"17\"),\n\t\t\texp: runtime.Number(17),\n\t\t},\n\t\t7: {\n\t\t\tsrc: runtime.String(\"3.1415\"),\n\t\t\texp: runtime.Number(3.1415),\n\t\t},\n\t\t8: {\n\t\t\tsrc: runtime.NewObject(),\n\t\t\terr: true,\n\t\t},\n\t\t9: {\n\t\t\tsrc: runtime.NewNativeFunc(ctx, \"\", func(args ...runtime.Val) runtime.Val { return runtime.Nil }),\n\t\t\terr: true,\n\t\t},\n\t\t10: {\n\t\t\tsrc: ob,\n\t\t\texp: runtime.Number(22),\n\t\t},\n\t}\n\n\tcm := new(ConvMod)\n\tcm.SetCtx(ctx)\n\tfor i, c := range cases {\n\t\tfunc() {\n\t\t\tdefer func() {\n\t\t\t\tif e := recover(); (e != nil) != c.err {\n\t\t\t\t\tif c.err {\n\t\t\t\t\t\tt.Errorf(\"[%d] - expected a panic, got none\", i)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tt.Errorf(\"[%d] - expected no panic, got %v\", i, e)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t\tret := cm.conv_Number(c.src)\n\t\t\tif ret != c.exp {\n\t\t\t\tt.Errorf(\"[%d] - expected %v, got %v\", i, c.exp, ret)\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc TestConvType(t *testing.T) {\n\tctx := runtime.NewCtx(nil, nil)\n\n\tcases := []struct {\n\t\tsrc runtime.Val\n\t\texp string\n\t}{\n\t\t0: {\n\t\t\tsrc: runtime.Nil,\n\t\t\texp: \"nil\",\n\t\t},\n\t\t1: {\n\t\t\tsrc: runtime.Number(0),\n\t\t\texp: \"number\",\n\t\t},\n\t\t2: {\n\t\t\tsrc: runtime.Bool(false),\n\t\t\texp: \"bool\",\n\t\t},\n\t\t3: {\n\t\t\tsrc: runtime.String(\"\"),\n\t\t\texp: \"string\",\n\t\t},\n\t\t4: {\n\t\t\tsrc: runtime.NewNativeFunc(ctx, \"\", func(args ...runtime.Val) runtime.Val { return runtime.Nil }),\n\t\t\texp: \"func\",\n\t\t},\n\t\t5: {\n\t\t\tsrc: runtime.NewObject(),\n\t\t\texp: \"object\",\n\t\t},\n\t}\n\tcm := new(ConvMod)\n\tcm.SetCtx(ctx)\n\tfor i, c := range cases {\n\t\tret := cm.conv_Type(c.src)\n\t\tif ret.String() != c.exp {\n\t\t\tt.Errorf(\"[%d] - expected %s, got %s\", i, c.exp, ret)\n\t\t}\n\t}\n}\n<commit_msg>conv module fully tested<commit_after>package stdlib\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/PuerkitoBio\/agora\/runtime\"\n)\n\nfunc TestConvBool(t *testing.T) {\n\tctx := runtime.NewCtx(nil, nil)\n\t\/\/ For case 9 below\n\tob := runtime.NewObject()\n\tob.Set(runtime.String(\"__toBool\"), runtime.NewNativeFunc(ctx, \"\", func(args ...runtime.Val) runtime.Val {\n\t\treturn runtime.Bool(false)\n\t}))\n\n\tcases := []struct {\n\t\tsrc runtime.Val\n\t\texp runtime.Val\n\t\terr bool\n\t}{\n\t\t0: {\n\t\t\tsrc: runtime.Nil,\n\t\t\texp: runtime.Bool(false),\n\t\t},\n\t\t1: {\n\t\t\tsrc: runtime.Number(1),\n\t\t\texp: runtime.Bool(true),\n\t\t},\n\t\t2: {\n\t\t\tsrc: runtime.Number(3.1415),\n\t\t\texp: runtime.Bool(true),\n\t\t},\n\t\t3: {\n\t\t\tsrc: runtime.Number(0),\n\t\t\texp: runtime.Bool(false),\n\t\t},\n\t\t4: {\n\t\t\tsrc: runtime.Bool(true),\n\t\t\texp: runtime.Bool(true),\n\t\t},\n\t\t5: {\n\t\t\tsrc: runtime.Bool(false),\n\t\t\texp: runtime.Bool(false),\n\t\t},\n\t\t6: {\n\t\t\tsrc: runtime.String(\"some string\"),\n\t\t\texp: runtime.Bool(true),\n\t\t},\n\t\t7: {\n\t\t\tsrc: runtime.NewObject(),\n\t\t\texp: runtime.Bool(true),\n\t\t},\n\t\t8: {\n\t\t\tsrc: runtime.NewNativeFunc(ctx, \"\", func(args ...runtime.Val) runtime.Val { return runtime.Nil }),\n\t\t\texp: runtime.Bool(true),\n\t\t},\n\t\t9: {\n\t\t\tsrc: ob,\n\t\t\texp: runtime.Bool(false),\n\t\t},\n\t}\n\n\tcm := new(ConvMod)\n\tcm.SetCtx(ctx)\n\tfor i, c := range cases {\n\t\tfunc() {\n\t\t\tdefer func() {\n\t\t\t\tif e := recover(); (e != nil) != c.err {\n\t\t\t\t\tif c.err {\n\t\t\t\t\t\tt.Errorf(\"[%d] - expected a panic, got none\", i)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tt.Errorf(\"[%d] - expected no panic, got %v\", i, e)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t\tret := cm.conv_Bool(c.src)\n\t\t\tif ret != c.exp {\n\t\t\t\tt.Errorf(\"[%d] - expected %v, got %v\", i, c.exp, ret)\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc TestConvString(t *testing.T) {\n\tctx := runtime.NewCtx(nil, nil)\n\t\/\/ For case 8 below\n\tob := runtime.NewObject()\n\tob.Set(runtime.String(\"__toString\"), runtime.NewNativeFunc(ctx, \"\", func(args ...runtime.Val) runtime.Val {\n\t\treturn runtime.String(\"ok\")\n\t}))\n\n\tcases := []struct {\n\t\tsrc runtime.Val\n\t\texp runtime.Val\n\t\terr bool\n\t}{\n\t\t0: {\n\t\t\tsrc: runtime.Nil,\n\t\t\texp: runtime.String(\"nil\"),\n\t\t},\n\t\t1: {\n\t\t\tsrc: runtime.Number(1),\n\t\t\texp: runtime.String(\"1\"),\n\t\t},\n\t\t2: {\n\t\t\tsrc: runtime.Number(3.1415),\n\t\t\texp: runtime.String(\"3.1415\"),\n\t\t},\n\t\t3: {\n\t\t\tsrc: runtime.Bool(true),\n\t\t\texp: runtime.String(\"true\"),\n\t\t},\n\t\t4: {\n\t\t\tsrc: runtime.Bool(false),\n\t\t\texp: runtime.String(\"false\"),\n\t\t},\n\t\t5: {\n\t\t\tsrc: runtime.String(\"some string\"),\n\t\t\texp: runtime.String(\"some string\"),\n\t\t},\n\t\t6: {\n\t\t\tsrc: runtime.NewObject(),\n\t\t\terr: true,\n\t\t},\n\t\t7: {\n\t\t\tsrc: runtime.NewNativeFunc(ctx, \"\", func(args ...runtime.Val) runtime.Val { return runtime.Nil }),\n\t\t\terr: true,\n\t\t},\n\t\t8: {\n\t\t\tsrc: ob,\n\t\t\texp: runtime.String(\"ok\"),\n\t\t},\n\t}\n\n\tcm := new(ConvMod)\n\tcm.SetCtx(ctx)\n\tfor i, c := range cases {\n\t\tfunc() {\n\t\t\tdefer func() {\n\t\t\t\tif e := recover(); (e != nil) != c.err {\n\t\t\t\t\tif c.err {\n\t\t\t\t\t\tt.Errorf(\"[%d] - expected a panic, got none\", i)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tt.Errorf(\"[%d] - expected no panic, got %v\", i, e)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t\tret := cm.conv_String(c.src)\n\t\t\tif ret != c.exp {\n\t\t\t\tt.Errorf(\"[%d] - expected %v, got %v\", i, c.exp, ret)\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc TestConvNumber(t *testing.T) {\n\tctx := runtime.NewCtx(nil, nil)\n\t\/\/ For case 10 below\n\tob := runtime.NewObject()\n\tob.Set(runtime.String(\"__toFloat\"), runtime.NewNativeFunc(ctx, \"\", func(args ...runtime.Val) runtime.Val {\n\t\treturn runtime.Number(22)\n\t}))\n\n\tcases := []struct {\n\t\tsrc runtime.Val\n\t\texp runtime.Val\n\t\terr bool\n\t}{\n\t\t0: {\n\t\t\tsrc: runtime.Nil,\n\t\t\terr: true,\n\t\t},\n\t\t1: {\n\t\t\tsrc: runtime.Number(1),\n\t\t\texp: runtime.Number(1),\n\t\t},\n\t\t2: {\n\t\t\tsrc: runtime.Bool(true),\n\t\t\texp: runtime.Number(1),\n\t\t},\n\t\t3: {\n\t\t\tsrc: runtime.Bool(false),\n\t\t\texp: runtime.Number(0),\n\t\t},\n\t\t4: {\n\t\t\tsrc: runtime.String(\"\"),\n\t\t\terr: true,\n\t\t},\n\t\t5: {\n\t\t\tsrc: runtime.String(\"not a number\"),\n\t\t\terr: true,\n\t\t},\n\t\t6: {\n\t\t\tsrc: runtime.String(\"17\"),\n\t\t\texp: runtime.Number(17),\n\t\t},\n\t\t7: {\n\t\t\tsrc: runtime.String(\"3.1415\"),\n\t\t\texp: runtime.Number(3.1415),\n\t\t},\n\t\t8: {\n\t\t\tsrc: runtime.NewObject(),\n\t\t\terr: true,\n\t\t},\n\t\t9: {\n\t\t\tsrc: runtime.NewNativeFunc(ctx, \"\", func(args ...runtime.Val) runtime.Val { return runtime.Nil }),\n\t\t\terr: true,\n\t\t},\n\t\t10: {\n\t\t\tsrc: ob,\n\t\t\texp: runtime.Number(22),\n\t\t},\n\t}\n\n\tcm := new(ConvMod)\n\tcm.SetCtx(ctx)\n\tfor i, c := range cases {\n\t\tfunc() {\n\t\t\tdefer func() {\n\t\t\t\tif e := recover(); (e != nil) != c.err {\n\t\t\t\t\tif c.err {\n\t\t\t\t\t\tt.Errorf(\"[%d] - expected a panic, got none\", i)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tt.Errorf(\"[%d] - expected no panic, got %v\", i, e)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t\tret := cm.conv_Number(c.src)\n\t\t\tif ret != c.exp {\n\t\t\t\tt.Errorf(\"[%d] - expected %v, got %v\", i, c.exp, ret)\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc TestConvType(t *testing.T) {\n\tctx := runtime.NewCtx(nil, nil)\n\n\tcases := []struct {\n\t\tsrc runtime.Val\n\t\texp string\n\t}{\n\t\t0: {\n\t\t\tsrc: runtime.Nil,\n\t\t\texp: \"nil\",\n\t\t},\n\t\t1: {\n\t\t\tsrc: runtime.Number(0),\n\t\t\texp: \"number\",\n\t\t},\n\t\t2: {\n\t\t\tsrc: runtime.Bool(false),\n\t\t\texp: \"bool\",\n\t\t},\n\t\t3: {\n\t\t\tsrc: runtime.String(\"\"),\n\t\t\texp: \"string\",\n\t\t},\n\t\t4: {\n\t\t\tsrc: runtime.NewNativeFunc(ctx, \"\", func(args ...runtime.Val) runtime.Val { return runtime.Nil }),\n\t\t\texp: \"func\",\n\t\t},\n\t\t5: {\n\t\t\tsrc: runtime.NewObject(),\n\t\t\texp: \"object\",\n\t\t},\n\t}\n\tcm := new(ConvMod)\n\tcm.SetCtx(ctx)\n\tfor i, c := range cases {\n\t\tret := cm.conv_Type(c.src)\n\t\tif ret.String() != c.exp {\n\t\t\tt.Errorf(\"[%d] - expected %s, got %s\", i, c.exp, ret)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n)\n\ntype Config struct {\n\tFactorioDir string `json:\"factorio_dir\"`\n\tFactorioSavesDir string `json:\"saves_dir\"`\n\tFactorioModsDir string `json:\"mods_dir\"`\n\tFactorioConfigFile string `json:\"config_file\"`\n\tFactorioConfigDir string `json:\"config_directory\"`\n\tFactorioLog string `json:\"logfile\"`\n\tFactorioBinary string `json:\"factorio_binary\"`\n\tServerIP string `json:\"server_ip\"`\n\tServerPort string `json:\"server_port\"`\n\tMaxUploadSize int64 `json:\"max_upload_size\"`\n\tUsername string `json:\"username\"`\n\tPassword string `json:\"password\"`\n\tDatabaseFile string `json:\"database_file\"`\n\tCookieEncryptionKey string `json:\"cookie_encryption_key\"`\n\tSettingsFile string `json:\"settings_file\"`\n\tLogFile string `json:\"log_file\"`\n\tConfFile string\n}\n\nvar (\n\tconfig Config\n\tFactorioServ *FactorioServer\n\tAuth *AuthHTTP\n)\n\nfunc failOnError(err error, msg string) {\n\tif err != nil {\n\t\tlog.Printf(\"%s: %s\", msg, err)\n\t\tpanic(fmt.Sprintf(\"%s: %s\", msg, err))\n\t}\n}\n\n\/\/ Loads server configuration files\n\/\/ JSON config file contains default values,\n\/\/ config file will overwrite any provided flags\nfunc loadServerConfig(f string) {\n\tfile, err := os.Open(f)\n\tfailOnError(err, \"Error loading config file.\")\n\n\tdecoder := json.NewDecoder(file)\n\terr = decoder.Decode(&config)\n}\n\nfunc parseFlags() {\n\tconfFile := flag.String(\"conf\", \".\/conf.json\", \"Specify location of Factorio Server Manager config file.\")\n\tfactorioDir := flag.String(\"dir\", \".\/\", \"Specify location of Factorio directory.\")\n\tfactorioIP := flag.String(\"host\", \"0.0.0.0\", \"Specify IP for webserver to listen on.\")\n\tfactorioPort := flag.String(\"port\", \"8080\", \"Specify a port for the server.\")\n\tfactorioConfigFile := flag.String(\"config\", \"config\/config.ini\", \"Specify location of Factorio config.ini file\")\n\tfactorioMaxUpload := flag.Int64(\"max-upload\", 1024*1024*20, \"Maximum filesize for uploaded files (default 20MB).\")\n\tfactorioBinary := flag.String(\"bin\", \"bin\/x64\/factorio\", \"Location of Factorio Server binary file\")\n\n\tflag.Parse()\n\n\tconfig.ConfFile = *confFile\n\tconfig.FactorioDir = *factorioDir\n\tconfig.ServerIP = *factorioIP\n\tconfig.ServerPort = *factorioPort\n\tconfig.FactorioSavesDir = filepath.Join(config.FactorioDir, \"saves\")\n\tconfig.FactorioModsDir = filepath.Join(config.FactorioDir, \"mods\")\n\tconfig.FactorioConfigDir = filepath.Join(config.FactorioDir, \"config\")\n\tconfig.FactorioConfigFile = filepath.Join(config.FactorioDir, *factorioConfigFile)\n\tconfig.FactorioBinary = filepath.Join(config.FactorioDir, *factorioBinary)\n\tconfig.MaxUploadSize = *factorioMaxUpload\n\n\tif runtime.GOOS == \"windows\" {\n\t\tappdata := os.Getenv(\"APPDATA\")\n\t\tconfig.FactorioLog = filepath.Join(appdata, \"Factorio\", \"factorio-current.log\")\n\t} else {\n\t\tconfig.FactorioLog = filepath.Join(config.FactorioDir, \"factorio-current.log\")\n\t}\n}\n\nfunc main() {\n\tvar err error\n\n\t\/\/ Parse configuration flags\n\tparseFlags()\n\t\/\/ Load server config from file\n\tloadServerConfig(config.ConfFile)\n\t\/\/ Create mod pack dir if missing\n\tcreateModPackDir()\n\n\t\/\/ Set logging output to file\n\tlogPath := filepath.Join(config.FactorioDir, config.LogFile)\n\tlogFile, err := os.OpenFile(logPath, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666)\n\tif err != nil {\n\t\tlog.Fatalf(\"error opening log file: %v\", err)\n\t}\n\tdefer logFile.Close()\n\tlog.SetOutput(logFile)\n\n\tvar err error\n\n\t\/\/ Initialize Factorio Server struct\n\tFactorioServ, err = initFactorio()\n\tif err != nil {\n\t\tlog.Printf(\"Error occurred during FactorioServer initializaion: %v\\n\", err)\n\t\treturn\n\t}\n\n\t\/\/ Initialize authentication system\n\tAuth = initAuth()\n\tAuth.CreateAuth(config.DatabaseFile, config.CookieEncryptionKey)\n\tAuth.CreateOrUpdateUser(config.Username, config.Password, \"admin\", \"\")\n\n\t\/\/ Initialize HTTP router\n\trouter := NewRouter()\n\n\tfmt.Printf(\"Starting server on: %s:%s\", config.ServerIP, config.ServerPort)\n\tlog.Fatal(http.ListenAndServe(config.ServerIP+\":\"+config.ServerPort, router))\n}\n<commit_msg>remove duplicate err var<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n)\n\ntype Config struct {\n\tFactorioDir string `json:\"factorio_dir\"`\n\tFactorioSavesDir string `json:\"saves_dir\"`\n\tFactorioModsDir string `json:\"mods_dir\"`\n\tFactorioConfigFile string `json:\"config_file\"`\n\tFactorioConfigDir string `json:\"config_directory\"`\n\tFactorioLog string `json:\"logfile\"`\n\tFactorioBinary string `json:\"factorio_binary\"`\n\tServerIP string `json:\"server_ip\"`\n\tServerPort string `json:\"server_port\"`\n\tMaxUploadSize int64 `json:\"max_upload_size\"`\n\tUsername string `json:\"username\"`\n\tPassword string `json:\"password\"`\n\tDatabaseFile string `json:\"database_file\"`\n\tCookieEncryptionKey string `json:\"cookie_encryption_key\"`\n\tSettingsFile string `json:\"settings_file\"`\n\tLogFile string `json:\"log_file\"`\n\tConfFile string\n}\n\nvar (\n\tconfig Config\n\tFactorioServ *FactorioServer\n\tAuth *AuthHTTP\n)\n\nfunc failOnError(err error, msg string) {\n\tif err != nil {\n\t\tlog.Printf(\"%s: %s\", msg, err)\n\t\tpanic(fmt.Sprintf(\"%s: %s\", msg, err))\n\t}\n}\n\n\/\/ Loads server configuration files\n\/\/ JSON config file contains default values,\n\/\/ config file will overwrite any provided flags\nfunc loadServerConfig(f string) {\n\tfile, err := os.Open(f)\n\tfailOnError(err, \"Error loading config file.\")\n\n\tdecoder := json.NewDecoder(file)\n\terr = decoder.Decode(&config)\n}\n\nfunc parseFlags() {\n\tconfFile := flag.String(\"conf\", \".\/conf.json\", \"Specify location of Factorio Server Manager config file.\")\n\tfactorioDir := flag.String(\"dir\", \".\/\", \"Specify location of Factorio directory.\")\n\tfactorioIP := flag.String(\"host\", \"0.0.0.0\", \"Specify IP for webserver to listen on.\")\n\tfactorioPort := flag.String(\"port\", \"8080\", \"Specify a port for the server.\")\n\tfactorioConfigFile := flag.String(\"config\", \"config\/config.ini\", \"Specify location of Factorio config.ini file\")\n\tfactorioMaxUpload := flag.Int64(\"max-upload\", 1024*1024*20, \"Maximum filesize for uploaded files (default 20MB).\")\n\tfactorioBinary := flag.String(\"bin\", \"bin\/x64\/factorio\", \"Location of Factorio Server binary file\")\n\n\tflag.Parse()\n\n\tconfig.ConfFile = *confFile\n\tconfig.FactorioDir = *factorioDir\n\tconfig.ServerIP = *factorioIP\n\tconfig.ServerPort = *factorioPort\n\tconfig.FactorioSavesDir = filepath.Join(config.FactorioDir, \"saves\")\n\tconfig.FactorioModsDir = filepath.Join(config.FactorioDir, \"mods\")\n\tconfig.FactorioConfigDir = filepath.Join(config.FactorioDir, \"config\")\n\tconfig.FactorioConfigFile = filepath.Join(config.FactorioDir, *factorioConfigFile)\n\tconfig.FactorioBinary = filepath.Join(config.FactorioDir, *factorioBinary)\n\tconfig.MaxUploadSize = *factorioMaxUpload\n\n\tif runtime.GOOS == \"windows\" {\n\t\tappdata := os.Getenv(\"APPDATA\")\n\t\tconfig.FactorioLog = filepath.Join(appdata, \"Factorio\", \"factorio-current.log\")\n\t} else {\n\t\tconfig.FactorioLog = filepath.Join(config.FactorioDir, \"factorio-current.log\")\n\t}\n}\n\nfunc main() {\n\tvar err error\n\n\t\/\/ Parse configuration flags\n\tparseFlags()\n\t\/\/ Load server config from file\n\tloadServerConfig(config.ConfFile)\n\t\/\/ Create mod pack dir if missing\n\tcreateModPackDir()\n\n\t\/\/ Set logging output to file\n\tlogPath := filepath.Join(config.FactorioDir, config.LogFile)\n\tlogFile, err := os.OpenFile(logPath, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666)\n\tif err != nil {\n\t\tlog.Fatalf(\"error opening log file: %v\", err)\n\t}\n\tdefer logFile.Close()\n\tlog.SetOutput(logFile)\n\n\t\/\/ Initialize Factorio Server struct\n\tFactorioServ, err = initFactorio()\n\tif err != nil {\n\t\tlog.Printf(\"Error occurred during FactorioServer initializaion: %v\\n\", err)\n\t\treturn\n\t}\n\n\t\/\/ Initialize authentication system\n\tAuth = initAuth()\n\tAuth.CreateAuth(config.DatabaseFile, config.CookieEncryptionKey)\n\tAuth.CreateOrUpdateUser(config.Username, config.Password, \"admin\", \"\")\n\n\t\/\/ Initialize HTTP router\n\trouter := NewRouter()\n\n\tfmt.Printf(\"Starting server on: %s:%s\", config.ServerIP, config.ServerPort)\n\tlog.Fatal(http.ListenAndServe(config.ServerIP+\":\"+config.ServerPort, router))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Licensed to Elasticsearch B.V. under one or more agreements.\n\/\/ Elasticsearch B.V. licenses this file to you under the Apache 2.0 License.\n\/\/ See the LICENSE file in the project root for more information.\n\npackage gentests\n\nimport (\n \"fmt\"\n \"strings\"\n\n \"gopkg.in\/yaml.v2\"\n)\n\nvar skipTests map[string][]string\n\nfunc init() {\n err := yaml.NewDecoder(strings.NewReader(skipTestsYAML)).Decode(&skipTests)\n if err != nil {\n panic(fmt.Sprintf(\"ERROR: %v\", err))\n }\n}\n\nvar skipFiles = []string{\n \"update\/85_fields_meta.yml\", \/\/ Uses non-existing API property\n \"update\/86_fields_meta_with_types.yml\", \/\/ --||--\n\n \"ml\/jobs_get_result_buckets.yml\", \/\/ Passes string value to int variable\n \"ml\/jobs_get_result_categories.yml\", \/\/ --||--\n \"ml\/set_upgrade_mode.yml\", \/\/ --||--\n\n \"ml\/evaluate_data_frame.yml\", \/\/ Floats as map keys\n\n \"search\/320_disallow_queries.yml\", \/\/ Tries to match key in an empty map (`transient:{}`)\n\n \"watcher\/stats\/10_basic.yml\", \/\/ Sets \"emit_stacktraces\" as string (\"true\"), not bool\n}\n\n\/\/ TODO: Comments into descriptions for `Skip()`\n\/\/\nvar skipTestsYAML = `\n---\n# Cannot distinguish between missing value for refresh and an empty string\nbulk\/50_refresh.yml:\n - refresh=empty string immediately makes changes are visible in search\nbulk\/51_refresh_with_types.yml:\n - refresh=empty string immediately makes changes are visible in search\ncreate\/60_refresh.yml:\n - When refresh url parameter is an empty string that means \"refresh immediately\"\ncreate\/61_refresh_with_types.yml:\n - When refresh url parameter is an empty string that means \"refresh immediately\"\ndelete\/50_refresh.yml:\n - When refresh url parameter is an empty string that means \"refresh immediately\"\ndelete\/51_refresh_with_types.yml:\n - When refresh url parameter is an empty string that means \"refresh immediately\"\nindex\/60_refresh.yml:\n - When refresh url parameter is an empty string that means \"refresh immediately\"\nindex\/61_refresh_with_types.yml:\n - When refresh url parameter is an empty string that means \"refresh immediately\"\nupdate\/60_refresh.yml:\n - When refresh url parameter is an empty string that means \"refresh immediately\"\nupdate\/61_refresh_with_types.yml:\n - When refresh url parameter is an empty string that means \"refresh immediately\"\n\n# Stash in value\ncluster.reroute\/11_explain.yml:\nnodes.info\/30_settings.yml:\nnodes.stats\/20_response_filtering.yml:\nnodes.stats\/30_discovery.yml:\n - Discovery stats\nnodes.discovery\/30_discovery.yml:\n - Discovery stats\n\n# Arbitrary key\nindices.shrink\/10_basic.yml:\nindices.shrink\/20_source_mapping.yml:\nindices.shrink\/30_copy_settings.yml:\nindices.split\/30_copy_settings.yml:\n\n# Parsed response is YAML: value is map[interface {}]interface {}, not map[string]interface {}\ncat.aliases\/20_headers.yml:\n - Simple alias with yaml body through Accept header\n\n# Incorrect int instead of float in match (aggregations.date_range.buckets.0.from: 1000000); TODO: PR\nsearch.aggregation\/40_range.yml:\n - Date range\n\n# No support for headers per request yet\ntasks.list\/10_basic.yml:\n - tasks_list headers\n\n# Node Selector feature not implemented\ncat.aliases\/10_basic.yml:\n - \"Help (pre 7.4.0)\"\n - \"Simple alias (pre 7.4.0)\"\n - \"Complex alias (pre 7.4.0)\"\n - \"Column headers (pre 7.4.0)\"\n - \"Alias against closed index (pre 7.4.0)\"\n\nindices.put_mapping\/10_basic.yml:\n - \"Put mappings with explicit _doc type bwc\"\n\n# Not relevant\nsearch\/issue4895.yml:\nsearch\/issue9606.yml:\n\n# FIXME\nbulk\/80_cas.yml:\nbulk\/81_cas_with_types.yml:\n\n# ----- X-Pack ----------------------------------------------------------------\n\n# Float \"3.0\" handled as \"3\" in fmt.Sprintf()\nanalytics\/top_metrics.yml:\n\n# Stash in body\napi_key\/10_basic.yml:\n - Test invalidate api key\napi_key\/11_invalidation.yml:\n - Test invalidate api key by username\nrollup\/put_job.yml:\n - Test put job with templates\n\n# Changing password locks out tests\nchange_password\/10_basic.yml:\n - Test user changing their own password\n\n# Missing refreshes in the test\ndata_frame\/transforms_start_stop.yml:\nml\/index_layout.yml:\ntransform\/transforms_start_stop.yml:\n - Verify start transform reuses destination index\ntransform\/transforms_start_stop.yml:\n - Test get multiple transform stats\ntransform\/transforms_stats.yml:\n - Test get multiple transform stats\n - Test get multiple transform stats where one does not have a task\n\n# More QA tests than API tests\ndata_frame\/transforms_stats.yml:\n - Test get multiple transform stats\n - Test get transform stats on missing transform\n - Test get multiple transform stats where one does not have a task\nml\/jobs_crud.yml:\n - Test reopen job resets the finished time\n\n# Invalid license makes subsequent tests fail\nlicense\/20_put_license.yml:\n\n# Test tries to match on map from body, but Go keys are not sorted\nml\/jobs_crud.yml:\n - Test job with rules\n - Test put job with model_memory_limit as number\n - Test put job with model_memory_limit as string and lazy open\n\n# Test gets stuck every time\nml\/jobs_get_stats.yml:\n\n# status_exception, Cannot process data because job [post-data-job] does not have a corresponding autodetect process\n# resource_already_exists_exception, task with id {job-post-data-job} already exist\n# status_exception, Cannot open job [start-stop-datafeed-job-foo-1] because it has already been opened\nml\/post_data.yml:\n - Test flush with skip_time\n - Test POST data job api, flush, close and verify DataCounts doc\n - Test flush and close job WITHOUT sending any data\nml\/start_stop_datafeed.yml:\n - Test stop given expression\ntransform\/transforms_start_stop.yml:\n - Test start transform\n - Verify start transform reuses destination index\n\n# Possible bad test setup, Cannot open job [start-stop-datafeed-job] because it has already been opened\n# resource_already_exists_exception, task with id {job-start-stop-datafeed-job-foo-2} already exist\nml\/start_stop_datafeed.yml:\n - Test start datafeed when persistent task allocation disabled\n\n# Indexing step doesn't appear to work (getting total.hits=0)\nmonitoring\/bulk\/10_basic.yml:\n - Bulk indexing of monitoring data on closed indices should throw an export exception\n# Indexing step doesn't appear to work (getting total.hits=0)\nmonitoring\/bulk\/20_privileges.yml:\n - Monitoring Bulk API\n\n# Test tries to match on whole body, but map keys are unstable in Go\nrollup\/security_tests.yml:\n\n# Test tries to match on map key, but map keys are unstable in Go\nml\/data_frame_analytics_crud.yml:\n - Test put with description\n - Test put valid config with custom outlier detection\n\n# 404s, panics, ... possible bad setup\/teardown\nml\/delete_model_snapshot.yml:\nml\/get_datafeed_stats.yml:\nml\/get_model_snapshots.yml:\n\n# TEMPORARY: Missing 'body: { indices: \"test_index\" }' payload, TODO: PR\nsnapshot\/10_basic.yml:\n - Create a source only snapshot and then restore it\n\n# illegal_argument_exception: Provided password hash uses [NOOP] but the configured hashing algorithm is [BCRYPT]\nusers\/10_basic.yml:\n - Test put user with password hash\n\n# Slash in index name is not escaped (BUG)\nsecurity\/authz\/13_index_datemath.yml:\n - Test indexing documents with datemath, when permitted\n\n# Possibly a cluster health color mismatch...\nsecurity\/authz\/14_cat_indices.yml:\n\n# Test looks for \"testnode.crt\", but \"ca.crt\" is returned first\nssl\/10_basic.yml:\n - Test get SSL certificates\n\n# class org.elasticsearch.xpack.vectors.query.VectorScriptDocValues$DenseVectorScriptDocValues cannot be cast to class org.elasticsearch.xpack.vectors.query.VectorScriptDocValues$SparseVectorScriptDocValues ...\nvectors\/30_sparse_vector_basic.yml:\n - Dot Product\n# java.lang.IllegalArgumentException: No field found for [my_dense_vector] in mapping\nvectors\/40_sparse_vector_special_cases.yml:\n - Vectors of different dimensions and data types\n - Dimensions can be sorted differently\n - Distance functions for documents missing vector field should return 0\n\n# Cannot connect to Docker IP\nwatcher\/execute_watch\/60_http_input.yml:\n\n# Test tries to match on \"tagline\", which requires \"human=false\", which doesn't work in the Go API.\n# Also test does too much within a single test, so has to be disabled as whole, unfortunately.\nxpack\/15_basic.yml:\n\n# Test uses \"y\" as a property name, which is parsed as 'true' in the Go YAML library;\n# see https:\/\/yaml.org\/type\/bool.html\nml\/explain_data_frame_analytics.yml:\n - Test empty data frame given body\n - Test non-empty data frame given body\n`\n<commit_msg>Generator: Tests: Update the list of skipped tests<commit_after>\/\/ Licensed to Elasticsearch B.V. under one or more agreements.\n\/\/ Elasticsearch B.V. licenses this file to you under the Apache 2.0 License.\n\/\/ See the LICENSE file in the project root for more information.\n\npackage gentests\n\nimport (\n \"fmt\"\n \"strings\"\n\n \"gopkg.in\/yaml.v2\"\n)\n\nvar skipTests map[string][]string\n\nfunc init() {\n err := yaml.NewDecoder(strings.NewReader(skipTestsYAML)).Decode(&skipTests)\n if err != nil {\n panic(fmt.Sprintf(\"ERROR: %v\", err))\n }\n}\n\nvar skipFiles = []string{\n \"update\/85_fields_meta.yml\", \/\/ Uses non-existing API property\n \"update\/86_fields_meta_with_types.yml\", \/\/ --||--\n\n \"ml\/jobs_get_result_buckets.yml\", \/\/ Passes string value to int variable\n \"ml\/jobs_get_result_categories.yml\", \/\/ --||--\n \"ml\/set_upgrade_mode.yml\", \/\/ --||--\n\n \"ml\/evaluate_data_frame.yml\", \/\/ Floats as map keys\n\n \"search\/320_disallow_queries.yml\", \/\/ Tries to match key in an empty map (`transient:{}`)\n\n \"watcher\/stats\/10_basic.yml\", \/\/ Sets \"emit_stacktraces\" as string (\"true\"), not bool\n}\n\n\/\/ TODO: Comments into descriptions for `Skip()`\n\/\/\nvar skipTestsYAML = `\n---\n# Cannot distinguish between missing value for refresh and an empty string\nbulk\/50_refresh.yml:\n - refresh=empty string immediately makes changes are visible in search\nbulk\/51_refresh_with_types.yml:\n - refresh=empty string immediately makes changes are visible in search\ncreate\/60_refresh.yml:\n - When refresh url parameter is an empty string that means \"refresh immediately\"\ncreate\/61_refresh_with_types.yml:\n - When refresh url parameter is an empty string that means \"refresh immediately\"\ndelete\/50_refresh.yml:\n - When refresh url parameter is an empty string that means \"refresh immediately\"\ndelete\/51_refresh_with_types.yml:\n - When refresh url parameter is an empty string that means \"refresh immediately\"\nindex\/60_refresh.yml:\n - When refresh url parameter is an empty string that means \"refresh immediately\"\nindex\/61_refresh_with_types.yml:\n - When refresh url parameter is an empty string that means \"refresh immediately\"\nupdate\/60_refresh.yml:\n - When refresh url parameter is an empty string that means \"refresh immediately\"\nupdate\/61_refresh_with_types.yml:\n - When refresh url parameter is an empty string that means \"refresh immediately\"\n\n# Stash in value\ncluster.reroute\/11_explain.yml:\nnodes.info\/30_settings.yml:\nnodes.stats\/20_response_filtering.yml:\nnodes.stats\/30_discovery.yml:\n - Discovery stats\nnodes.discovery\/30_discovery.yml:\n - Discovery stats\n\n# Arbitrary key\nindices.shrink\/10_basic.yml:\nindices.shrink\/20_source_mapping.yml:\nindices.shrink\/30_copy_settings.yml:\nindices.split\/30_copy_settings.yml:\n\n# Parsed response is YAML: value is map[interface {}]interface {}, not map[string]interface {}\ncat.aliases\/20_headers.yml:\n - Simple alias with yaml body through Accept header\n\n# Incorrect int instead of float in match (aggregations.date_range.buckets.0.from: 1000000); TODO: PR\nsearch.aggregation\/40_range.yml:\n - Date range\n\n# No support for headers per request yet\ntasks.list\/10_basic.yml:\n - tasks_list headers\n\n# Node Selector feature not implemented\ncat.aliases\/10_basic.yml:\n - \"Help (pre 7.4.0)\"\n - \"Simple alias (pre 7.4.0)\"\n - \"Complex alias (pre 7.4.0)\"\n - \"Column headers (pre 7.4.0)\"\n - \"Alias against closed index (pre 7.4.0)\"\n\nindices.put_mapping\/10_basic.yml:\n - \"Put mappings with explicit _doc type bwc\"\n\n# Not relevant\nsearch\/issue4895.yml:\nsearch\/issue9606.yml:\n\n# FIXME\nbulk\/80_cas.yml:\nbulk\/81_cas_with_types.yml:\n\n# ----- X-Pack ----------------------------------------------------------------\n\n# Float \"3.0\" handled as \"3\" in fmt.Sprintf()\nanalytics\/top_metrics.yml:\n\n# Stash in body\napi_key\/10_basic.yml:\n - Test invalidate api key\napi_key\/11_invalidation.yml:\n - Test invalidate api key by username\nrollup\/put_job.yml:\n - Test put job with templates\n\n# Changing password locks out tests\nchange_password\/10_basic.yml:\n - Test user changing their own password\n\n# Missing refreshes in the test\ndata_frame\/transforms_start_stop.yml:\nml\/index_layout.yml:\ntransform\/transforms_start_stop.yml:\n - Verify start transform reuses destination index\ntransform\/transforms_start_stop.yml:\n - Test get multiple transform stats\ntransform\/transforms_stats.yml:\n - Test get multiple transform stats\n - Test get multiple transform stats where one does not have a task\n\n# More QA tests than API tests\ndata_frame\/transforms_stats.yml:\n - Test get multiple transform stats\n - Test get transform stats on missing transform\n - Test get multiple transform stats where one does not have a task\nml\/jobs_crud.yml:\n - Test reopen job resets the finished time\n\n# Invalid license makes subsequent tests fail\nlicense\/20_put_license.yml:\n\n# Test tries to match on map from body, but Go keys are not sorted\nml\/jobs_crud.yml:\n - Test job with rules\n - Test put job with model_memory_limit as number\n - Test put job with model_memory_limit as string and lazy open\n\n# Test gets stuck every time\nml\/jobs_get_stats.yml:\n\n# status_exception, Cannot process data because job [post-data-job] does not have a corresponding autodetect process\n# resource_already_exists_exception, task with id {job-post-data-job} already exist\n# status_exception, Cannot open job [start-stop-datafeed-job-foo-1] because it has already been opened\nml\/post_data.yml:\n - Test flush with skip_time\n - Test POST data job api, flush, close and verify DataCounts doc\n - Test flush and close job WITHOUT sending any data\nml\/start_stop_datafeed.yml:\n - Test stop given expression\ntransform\/transforms_start_stop.yml:\n - Test start transform\n - Verify start transform reuses destination index\n\n# Possible bad test setup, Cannot open job [start-stop-datafeed-job] because it has already been opened\n# resource_already_exists_exception, task with id {job-start-stop-datafeed-job-foo-2} already exist\nml\/start_stop_datafeed.yml:\n - Test start datafeed when persistent task allocation disabled\n\n# Indexing step doesn't appear to work (getting total.hits=0)\nmonitoring\/bulk\/10_basic.yml:\n - Bulk indexing of monitoring data on closed indices should throw an export exception\n# Indexing step doesn't appear to work (getting total.hits=0)\nmonitoring\/bulk\/20_privileges.yml:\n - Monitoring Bulk API\n\n# Test tries to match on whole body, but map keys are unstable in Go\nrollup\/security_tests.yml:\n\n# Test tries to match on map key, but map keys are unstable in Go\nml\/data_frame_analytics_crud.yml:\n - Test put with description\n - Test put valid config with custom outlier detection\n\n# Unsupported feature: allowed_warnings\nml\/data_frame_analytics_crud.yml:\n - Test put classification given deprecated maximum_number_trees\n\n# This test suite keeps failing too often: disable it altogether\nml\/data_frame_analytics_crud.yml:\n\n# 404s, panics, ... possible bad setup\/teardown\nml\/delete_model_snapshot.yml:\nml\/get_datafeed_stats.yml:\nml\/get_model_snapshots.yml:\n\n# TEMPORARY: Missing 'body: { indices: \"test_index\" }' payload, TODO: PR\nsnapshot\/10_basic.yml:\n - Create a source only snapshot and then restore it\n\n# illegal_argument_exception: Provided password hash uses [NOOP] but the configured hashing algorithm is [BCRYPT]\nusers\/10_basic.yml:\n - Test put user with password hash\n\n# Slash in index name is not escaped (BUG)\nsecurity\/authz\/13_index_datemath.yml:\n - Test indexing documents with datemath, when permitted\n\n# Possibly a cluster health color mismatch...\nsecurity\/authz\/14_cat_indices.yml:\n\n# Test looks for \"testnode.crt\", but \"ca.crt\" is returned first\nssl\/10_basic.yml:\n - Test get SSL certificates\n\n# class org.elasticsearch.xpack.vectors.query.VectorScriptDocValues$DenseVectorScriptDocValues cannot be cast to class org.elasticsearch.xpack.vectors.query.VectorScriptDocValues$SparseVectorScriptDocValues ...\nvectors\/30_sparse_vector_basic.yml:\n - Dot Product\n# java.lang.IllegalArgumentException: No field found for [my_dense_vector] in mapping\nvectors\/40_sparse_vector_special_cases.yml:\n - Vectors of different dimensions and data types\n - Dimensions can be sorted differently\n - Distance functions for documents missing vector field should return 0\n\n# Cannot connect to Docker IP\nwatcher\/execute_watch\/60_http_input.yml:\n\n# Test tries to match on \"tagline\", which requires \"human=false\", which doesn't work in the Go API.\n# Also test does too much within a single test, so has to be disabled as whole, unfortunately.\nxpack\/15_basic.yml:\n\n# Test uses \"y\" as a property name, which is parsed as 'true' in the Go YAML library;\n# see https:\/\/yaml.org\/type\/bool.html\nml\/explain_data_frame_analytics.yml:\n - Test empty data frame given body\n - Test non-empty data frame given body\n`\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2018 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage zanzibar\n\nimport (\n\t\"github.com\/stretchr\/testify\/assert\"\n\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestNilCallReferenceForLogger(t *testing.T) {\n\n\theaders := map[string]string{\n\t\t\"header-key\": \"header-value\",\n\t}\n\toutboundCall := &tchannelOutboundCall{\n\t\tclient: nil,\n\t\tcall: nil,\n\t\tmethodName: \"GET\",\n\t\tserviceMethod: \"Test\",\n\t\tsuccess: false,\n\t\tstartTime: time.Now(),\n\t\tfinishTime: time.Now(),\n\t\treqHeaders: headers,\n\t\tresHeaders: headers,\n\t\tlogger: nil,\n\t\tmetrics: nil,\n\t}\n\tfields := outboundCall.logFields()\n\n\t\/\/ one field for each of the the:\n\t\/\/ timestamp-started, timestamp-finished, remoteAddr, requestHeader, responseHeader\n\tassert.Len(t, fields, 5)\n\tassert.Equal(t, fields[0].Key, \"remoteAddr\")\n\t\/\/ nil call should cause remoteAddr to be set to unknown\n\tassert.Equal(t, fields[0].String, \"unknown\")\n}\n<commit_msg>Fix code style issues<commit_after>\/\/ Copyright (c) 2018 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage zanzibar\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestNilCallReferenceForLogger(t *testing.T) {\n\theaders := map[string]string{\n\t\t\"header-key\": \"header-value\",\n\t}\n\tstaticTestTime := time.Unix(1500000000, 0)\n\toutboundCall := &tchannelOutboundCall{\n\t\tmethodName: \"Get\",\n\t\tserviceMethod: \"Test\",\n\t\tstartTime: staticTestTime,\n\t\tfinishTime: staticTestTime,\n\t\treqHeaders: headers,\n\t\tresHeaders: headers,\n\t}\n\n\tfields := outboundCall.logFields()\n\n\t\/\/ one field for each of the:\n\t\/\/ timestamp-started, timestamp-finished, remoteAddr, requestHeader, responseHeader\n\tassert.Len(t, fields, 5)\n\tassert.Equal(t, fields[0].Key, \"remoteAddr\")\n\t\/\/ nil call should cause remoteAddr to be set to unknown\n\tassert.Equal(t, fields[0].String, \"unknown\")\n}\n<|endoftext|>"} {"text":"<commit_before>package plugin\r\n\r\nimport (\r\n\t\"zvr\/server\"\r\n\t\"zvr\/utils\"\r\n\t\"fmt\"\r\n)\r\n\r\nconst (\r\n\tSET_SNAT_PATH = \"\/setsnat\"\r\n\tREMOVE_SNAT_PATH = \"\/removesnat\"\r\n\tSYNC_SNAT_PATH = \"\/syncsnat\"\r\n)\r\n\r\ntype snatInfo struct {\r\n\tPublicNicMac string `json:\"publicNicMac\"`\r\n\tPublicIp string `json:\"publicIp\"`\r\n\tPrivateNicMac string `json:\"privateNicMac\"`\r\n\tPrivateNicIp string `json:\"privateNicIp\"`\r\n\tSnatNetmask string `json:\"snatNetmask\"`\r\n}\r\n\r\ntype setSnatCmd struct {\r\n\tSnat snatInfo `json:\"snat\"`\r\n}\r\n\r\ntype removeSnatCmd struct {\r\n\tNatInfo []snatInfo `json:\"natInfo\"`\r\n}\r\n\r\ntype syncSnatCmd struct {\r\n\tSnats []snatInfo `json:\"snats\"`\r\n}\r\n\r\nvar SNAT_RULE_NUMBER = 9999\r\n\r\nfunc setSnatHandler(ctx *server.CommandContext) interface{} {\r\n\tcmd := &setSnatCmd{}\r\n\tctx.GetCommand(cmd)\r\n\r\n\ts := cmd.Snat\r\n\ttree := server.NewParserFromShowConfiguration().Tree\r\n\toutNic, err := utils.GetNicNameByMac(s.PublicNicMac); utils.PanicOnError(err)\r\n\tinNic, err := utils.GetNicNameByMac(s.PublicNicMac); utils.PanicOnError(err)\r\n\tnicNumber, err := utils.GetNicNumber(inNic); utils.PanicOnError(err)\r\n\taddress, err := utils.GetNetworkNumber(s.PrivateNicIp, s.SnatNetmask); utils.PanicOnError(err)\r\n\r\n\tif hasRuleNumberForAddress(tree, address) {\r\n\t\treturn nil\r\n\t}\r\n\r\n\t\/\/ make source nat rule as the latest rule\r\n\t\/\/ in case there are EIP rules\r\n\ttree.SetSnatWithRuleNumber(SNAT_RULE_NUMBER - nicNumber,\r\n\t\tfmt.Sprintf(\"outbound-interface %s\", outNic),\r\n\t\tfmt.Sprintf(\"source address %v\", address),\r\n\t\tfmt.Sprintf(\"translation address %s\", s.PublicIp),\r\n\t)\r\n\r\n\ttree.Apply(false)\r\n\r\n\treturn nil\r\n}\r\n\r\nfunc removeSnatHandler(ctx *server.CommandContext) interface{} {\r\n\tcmd := &removeSnatCmd{}\r\n\tctx.GetCommand(&cmd)\r\n\r\n\ttree := server.NewParserFromShowConfiguration().Tree\r\n\trs := tree.Get(\"nat source rule\")\r\n\tif rs == nil {\r\n\t\treturn nil\r\n\t}\r\n\r\n\tfor _, s := range cmd.NatInfo {\r\n\t\taddress, err := utils.GetNetworkNumber(s.PrivateNicIp, s.SnatNetmask); utils.PanicOnError(err)\r\n\r\n\t\tfor _, r := range rs.Children() {\r\n\t\t\tif addr := r.Get(\"source address\"); addr != nil && addr.Value() == address {\r\n\t\t\t\taddr.Delete()\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\ttree.Apply(false)\r\n\r\n\treturn nil\r\n}\r\n\r\nfunc hasRuleNumberForAddress(tree *server.VyosConfigTree, address string) bool {\r\n\trs := tree.Get(\"nat source rule\")\r\n\tif rs == nil {\r\n\t\treturn false\r\n\t}\r\n\r\n\tfor _, r := range rs.Children() {\r\n\t\tif addr := r.Get(\"source address\"); addr != nil && addr.Value() == address {\r\n\t\t\treturn true\r\n\t\t}\r\n\t}\r\n\r\n\treturn false\r\n}\r\n\r\nfunc syncSnatHandler(ctx *server.CommandContext) interface{} {\r\n\tcmd := &syncSnatCmd{}\r\n\tctx.GetCommand(cmd)\r\n\r\n\ttree := server.NewParserFromShowConfiguration().Tree\r\n\r\n\tfor _, s := range cmd.Snats {\r\n\t\toutNic, err := utils.GetNicNameByMac(s.PublicNicMac); utils.PanicOnError(err)\r\n\t\tinNic, err := utils.GetNicNameByMac(s.PublicNicMac); utils.PanicOnError(err)\r\n\t\tnicNumber, err := utils.GetNicNumber(inNic); utils.PanicOnError(err)\r\n\t\taddress, err := utils.GetNetworkNumber(s.PrivateNicIp, s.SnatNetmask); utils.PanicOnError(err)\r\n\t\tif rs := tree.Getf(\"nat source rule %v\", SNAT_RULE_NUMBER - nicNumber); rs != nil {\r\n\t\t\trs.Delete()\r\n\t\t}\r\n\r\n\t\ttree.SetSnatWithRuleNumber(SNAT_RULE_NUMBER - nicNumber,\r\n\t\t\tfmt.Sprintf(\"outbound-interface %s\", outNic),\r\n\t\t\tfmt.Sprintf(\"source address %s\", address),\r\n\t\t\tfmt.Sprintf(\"translation address %s\", s.PublicIp),\r\n\t\t)\r\n\t}\r\n\r\n\ttree.Apply(false)\r\n\r\n\treturn nil\r\n}\r\n\r\nfunc SnatEntryPoint() {\r\n\tserver.RegisterAsyncCommandHandler(SET_SNAT_PATH, server.VyosLock(setSnatHandler))\r\n\tserver.RegisterAsyncCommandHandler(REMOVE_SNAT_PATH, server.VyosLock(removeSnatHandler))\r\n\tserver.RegisterAsyncCommandHandler(SYNC_SNAT_PATH, server.VyosLock(syncSnatHandler))\r\n}<commit_msg>Fix multiple snat bug<commit_after>package plugin\r\n\r\nimport (\r\n\t\"zvr\/server\"\r\n\t\"zvr\/utils\"\r\n\t\"fmt\"\r\n)\r\n\r\nconst (\r\n\tSET_SNAT_PATH = \"\/setsnat\"\r\n\tREMOVE_SNAT_PATH = \"\/removesnat\"\r\n\tSYNC_SNAT_PATH = \"\/syncsnat\"\r\n)\r\n\r\ntype snatInfo struct {\r\n\tPublicNicMac string `json:\"publicNicMac\"`\r\n\tPublicIp string `json:\"publicIp\"`\r\n\tPrivateNicMac string `json:\"privateNicMac\"`\r\n\tPrivateNicIp string `json:\"privateNicIp\"`\r\n\tSnatNetmask string `json:\"snatNetmask\"`\r\n}\r\n\r\ntype setSnatCmd struct {\r\n\tSnat snatInfo `json:\"snat\"`\r\n}\r\n\r\ntype removeSnatCmd struct {\r\n\tNatInfo []snatInfo `json:\"natInfo\"`\r\n}\r\n\r\ntype syncSnatCmd struct {\r\n\tSnats []snatInfo `json:\"snats\"`\r\n}\r\n\r\nvar SNAT_RULE_NUMBER = 9999\r\n\r\nfunc setSnatHandler(ctx *server.CommandContext) interface{} {\r\n\tcmd := &setSnatCmd{}\r\n\tctx.GetCommand(cmd)\r\n\r\n\ts := cmd.Snat\r\n\ttree := server.NewParserFromShowConfiguration().Tree\r\n\toutNic, err := utils.GetNicNameByMac(s.PublicNicMac); utils.PanicOnError(err)\r\n\tinNic, err := utils.GetNicNameByMac(s.PrivateNicMac); utils.PanicOnError(err)\r\n\tnicNumber, err := utils.GetNicNumber(inNic); utils.PanicOnError(err)\r\n\taddress, err := utils.GetNetworkNumber(s.PrivateNicIp, s.SnatNetmask); utils.PanicOnError(err)\r\n\r\n\tif hasRuleNumberForAddress(tree, address) {\r\n\t\treturn nil\r\n\t}\r\n\r\n\t\/\/ make source nat rule as the latest rule\r\n\t\/\/ in case there are EIP rules\r\n\ttree.SetSnatWithRuleNumber(SNAT_RULE_NUMBER - nicNumber,\r\n\t\tfmt.Sprintf(\"outbound-interface %s\", outNic),\r\n\t\tfmt.Sprintf(\"source address %v\", address),\r\n\t\tfmt.Sprintf(\"translation address %s\", s.PublicIp),\r\n\t)\r\n\r\n\ttree.Apply(false)\r\n\r\n\treturn nil\r\n}\r\n\r\nfunc removeSnatHandler(ctx *server.CommandContext) interface{} {\r\n\tcmd := &removeSnatCmd{}\r\n\tctx.GetCommand(&cmd)\r\n\r\n\ttree := server.NewParserFromShowConfiguration().Tree\r\n\trs := tree.Get(\"nat source rule\")\r\n\tif rs == nil {\r\n\t\treturn nil\r\n\t}\r\n\r\n\tfor _, s := range cmd.NatInfo {\r\n\t\taddress, err := utils.GetNetworkNumber(s.PrivateNicIp, s.SnatNetmask); utils.PanicOnError(err)\r\n\r\n\t\tfor _, r := range rs.Children() {\r\n\t\t\tif addr := r.Get(\"source address\"); addr != nil && addr.Value() == address {\r\n\t\t\t\taddr.Delete()\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\ttree.Apply(false)\r\n\r\n\treturn nil\r\n}\r\n\r\nfunc hasRuleNumberForAddress(tree *server.VyosConfigTree, address string) bool {\r\n\trs := tree.Get(\"nat source rule\")\r\n\tif rs == nil {\r\n\t\treturn false\r\n\t}\r\n\r\n\tfor _, r := range rs.Children() {\r\n\t\tif addr := r.Get(\"source address\"); addr != nil && addr.Value() == address {\r\n\t\t\treturn true\r\n\t\t}\r\n\t}\r\n\r\n\treturn false\r\n}\r\n\r\nfunc syncSnatHandler(ctx *server.CommandContext) interface{} {\r\n\tcmd := &syncSnatCmd{}\r\n\tctx.GetCommand(cmd)\r\n\r\n\ttree := server.NewParserFromShowConfiguration().Tree\r\n\r\n\tfor _, s := range cmd.Snats {\r\n\t\toutNic, err := utils.GetNicNameByMac(s.PublicNicMac); utils.PanicOnError(err)\r\n\t\tinNic, err := utils.GetNicNameByMac(s.PrivateNicMac); utils.PanicOnError(err)\r\n\t\tnicNumber, err := utils.GetNicNumber(inNic); utils.PanicOnError(err)\r\n\t\taddress, err := utils.GetNetworkNumber(s.PrivateNicIp, s.SnatNetmask); utils.PanicOnError(err)\r\n\t\tif rs := tree.Getf(\"nat source rule %v\", SNAT_RULE_NUMBER - nicNumber); rs != nil {\r\n\t\t\trs.Delete()\r\n\t\t}\r\n\r\n\t\ttree.SetSnatWithRuleNumber(SNAT_RULE_NUMBER - nicNumber,\r\n\t\t\tfmt.Sprintf(\"outbound-interface %s\", outNic),\r\n\t\t\tfmt.Sprintf(\"source address %s\", address),\r\n\t\t\tfmt.Sprintf(\"translation address %s\", s.PublicIp),\r\n\t\t)\r\n\t}\r\n\r\n\ttree.Apply(false)\r\n\r\n\treturn nil\r\n}\r\n\r\nfunc SnatEntryPoint() {\r\n\tserver.RegisterAsyncCommandHandler(SET_SNAT_PATH, server.VyosLock(setSnatHandler))\r\n\tserver.RegisterAsyncCommandHandler(REMOVE_SNAT_PATH, server.VyosLock(removeSnatHandler))\r\n\tserver.RegisterAsyncCommandHandler(SYNC_SNAT_PATH, server.VyosLock(syncSnatHandler))\r\n}<|endoftext|>"} {"text":"<commit_before>package dockerclient\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sync\/atomic\"\n)\n\nvar (\n\tErrNotFound = errors.New(\"Not found\")\n)\n\ntype DockerClient struct {\n\tURL *url.URL\n\tHTTPClient *http.Client\n\tmonitorEvents int32\n}\n\ntype Callback func(*Event, ...interface{})\n\ntype Error struct {\n\tStatusCode int\n\tStatus string\n\tmsg string\n}\n\nfunc (e Error) Error() string {\n\treturn fmt.Sprintf(\"%s: %s\", e.Status, e.msg)\n}\n\nfunc NewDockerClient(daemonUrl string, tlsConfig *tls.Config) (*DockerClient, error) {\n\tu, err := url.Parse(daemonUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif u.Scheme == \"tcp\" {\n\t\tu.Scheme = \"http\"\n\t}\n\thttpClient := newHTTPClient(u, tlsConfig)\n\treturn &DockerClient{u, httpClient, 0}, nil\n}\n\nfunc (client *DockerClient) doRequest(method string, path string, body []byte) ([]byte, error) {\n\tb := bytes.NewBuffer(body)\n\treq, err := http.NewRequest(method, client.URL.String()+path, b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\tresp, err := client.HTTPClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode == 404 {\n\t\treturn nil, ErrNotFound\n\t}\n\tif resp.StatusCode >= 400 {\n\t\treturn nil, Error{StatusCode: resp.StatusCode, Status: resp.Status, msg: string(data)}\n\t}\n\treturn data, nil\n}\n\nfunc (client *DockerClient) Info() (*Info, error) {\n\tdata, err := client.doRequest(\"GET\", \"\/v1.10\/info\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tret := &Info{}\n\terr = json.Unmarshal(data, &ret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n}\n\nfunc (client *DockerClient) ListContainers(all bool) ([]Container, error) {\n\targAll := 0\n\tif all == true {\n\t\targAll = 1\n\t}\n\targs := fmt.Sprintf(\"?all=%d\", argAll)\n\tdata, err := client.doRequest(\"GET\", \"\/v1.10\/containers\/json\"+args, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tret := []Container{}\n\terr = json.Unmarshal(data, &ret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n}\n\nfunc (client *DockerClient) InspectContainer(id string) (*ContainerInfo, error) {\n\turi := fmt.Sprintf(\"\/v1.10\/containers\/%s\/json\", id)\n\tdata, err := client.doRequest(\"GET\", uri, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinfo := &ContainerInfo{}\n\terr = json.Unmarshal(data, info)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn info, nil\n}\n\nfunc (client *DockerClient) CreateContainer(config *ContainerConfig, name string) (string, error) {\n\tdata, err := json.Marshal(config)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\turi := \"\/v1.10\/containers\/create\"\n\n\tif name != \"\" {\n\t\tv := url.Values{}\n\t\tv.Set(\"name\", name)\n\t\turi = fmt.Sprintf(\"%s?%s\", uri, v.Encode())\n\t}\n\tdata, err = client.doRequest(\"POST\", uri, data)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tresult := &RespContainersCreate{}\n\terr = json.Unmarshal(data, result)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn result.Id, nil\n}\n\nfunc (client *DockerClient) StartContainer(id string, config *HostConfig) error {\n\tdata, err := json.Marshal(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\turi := fmt.Sprintf(\"\/v1.10\/containers\/%s\/start\", id)\n\t_, err = client.doRequest(\"POST\", uri, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (client *DockerClient) StopContainer(id string, timeout int) error {\n\turi := fmt.Sprintf(\"\/v1.10\/containers\/%s\/stop?t=%d\", id, timeout)\n\t_, err := client.doRequest(\"POST\", uri, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (client *DockerClient) RestartContainer(id string, timeout int) error {\n\turi := fmt.Sprintf(\"\/v1.10\/containers\/%s\/restart?t=%d\", id, timeout)\n\t_, err := client.doRequest(\"POST\", uri, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (client *DockerClient) KillContainer(id string) error {\n\turi := fmt.Sprintf(\"\/v1.10\/containers\/%s\/kill\", id)\n\t_, err := client.doRequest(\"POST\", uri, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (client *DockerClient) StartMonitorEvents(cb Callback, args ...interface{}) {\n\tatomic.StoreInt32(&client.monitorEvents, 1)\n\tgo client.getEvents(cb, args...)\n}\n\nfunc (client *DockerClient) getEvents(cb Callback, args ...interface{}) {\n\turi := client.URL.String() + \"\/v1.10\/events\"\n\tresp, err := client.HTTPClient.Get(uri)\n\tif err != nil {\n\t\tlog.Printf(\"GET %s failed: %v\", uri, err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tdec := json.NewDecoder(resp.Body)\n\tfor atomic.LoadInt32(&client.monitorEvents) > 0 {\n\t\tvar event *Event\n\t\tif err := dec.Decode(&event); err != nil {\n\t\t\tlog.Printf(\"Event decoding failed: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tcb(event, args...)\n\t}\n}\n\nfunc (client *DockerClient) StopAllMonitorEvents() {\n\tatomic.StoreInt32(&client.monitorEvents, 0)\n}\n\nfunc (client *DockerClient) Version() (*Version, error) {\n\tdata, err := client.doRequest(\"GET\", \"\/v1.10\/version\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tversion := &Version{}\n\terr = json.Unmarshal(data, version)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn version, nil\n}\n\nfunc (client *DockerClient) PullImage(name, tag string) error {\n\tv := url.Values{}\n\tv.Set(\"fromImage\", name)\n\tif tag != \"\" {\n\t\tv.Set(\"tag\", tag)\n\t}\n\t_, err := client.doRequest(\"POST\", \"\/v1.10\/images\/create?\"+v.Encode(), nil)\n\treturn err\n}\n\nfunc (client *DockerClient) RemoveContainer(id string, force bool) error {\n\targForce := 0\n\tif force == true {\n\t\targForce = 1\n\t}\n\targs := fmt.Sprintf(\"force=%d\", argForce)\n\n\t_, err := client.doRequest(\"DELETE\", fmt.Sprintf(\"\/v1.10\/containers\/%s?%s\", id, args), nil)\n\treturn err\n}\n\nfunc (client *DockerClient) ListImages() ([]*Image, error) {\n\tdata, err := client.doRequest(\"GET\", \"\/v1.10\/images\/json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar images []*Image\n\tif err := json.Unmarshal(data, &images); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn images, nil\n}\n\nfunc (client *DockerClient) RemoveImage(name string) error {\n\t_, err := client.doRequest(\"DELETE\", fmt.Sprintf(\"\/v1.10\/images\/%s\", name), nil)\n\treturn err\n}\n<commit_msg>container logs<commit_after>package dockerclient\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sync\/atomic\"\n)\n\nvar (\n\tErrNotFound = errors.New(\"Not found\")\n)\n\ntype DockerClient struct {\n\tURL *url.URL\n\tHTTPClient *http.Client\n\tmonitorEvents int32\n}\n\ntype Callback func(*Event, ...interface{})\n\ntype Error struct {\n\tStatusCode int\n\tStatus string\n\tmsg string\n}\n\nfunc (e Error) Error() string {\n\treturn fmt.Sprintf(\"%s: %s\", e.Status, e.msg)\n}\n\nfunc NewDockerClient(daemonUrl string, tlsConfig *tls.Config) (*DockerClient, error) {\n\tu, err := url.Parse(daemonUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif u.Scheme == \"tcp\" {\n\t\tu.Scheme = \"http\"\n\t}\n\thttpClient := newHTTPClient(u, tlsConfig)\n\treturn &DockerClient{u, httpClient, 0}, nil\n}\n\nfunc (client *DockerClient) doRequest(method string, path string, body []byte) ([]byte, error) {\n\tb := bytes.NewBuffer(body)\n\treq, err := http.NewRequest(method, client.URL.String()+path, b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\tresp, err := client.HTTPClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode == 404 {\n\t\treturn nil, ErrNotFound\n\t}\n\tif resp.StatusCode >= 400 {\n\t\treturn nil, Error{StatusCode: resp.StatusCode, Status: resp.Status, msg: string(data)}\n\t}\n\treturn data, nil\n}\n\nfunc (client *DockerClient) Info() (*Info, error) {\n\tdata, err := client.doRequest(\"GET\", \"\/v1.10\/info\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tret := &Info{}\n\terr = json.Unmarshal(data, &ret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n}\n\nfunc (client *DockerClient) ListContainers(all bool) ([]Container, error) {\n\targAll := 0\n\tif all == true {\n\t\targAll = 1\n\t}\n\targs := fmt.Sprintf(\"?all=%d\", argAll)\n\tdata, err := client.doRequest(\"GET\", \"\/v1.10\/containers\/json\"+args, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tret := []Container{}\n\terr = json.Unmarshal(data, &ret)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ret, nil\n}\n\nfunc (client *DockerClient) InspectContainer(id string) (*ContainerInfo, error) {\n\turi := fmt.Sprintf(\"\/v1.10\/containers\/%s\/json\", id)\n\tdata, err := client.doRequest(\"GET\", uri, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinfo := &ContainerInfo{}\n\terr = json.Unmarshal(data, info)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn info, nil\n}\n\nfunc (client *DockerClient) CreateContainer(config *ContainerConfig, name string) (string, error) {\n\tdata, err := json.Marshal(config)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\turi := \"\/v1.10\/containers\/create\"\n\n\tif name != \"\" {\n\t\tv := url.Values{}\n\t\tv.Set(\"name\", name)\n\t\turi = fmt.Sprintf(\"%s?%s\", uri, v.Encode())\n\t}\n\tdata, err = client.doRequest(\"POST\", uri, data)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tresult := &RespContainersCreate{}\n\terr = json.Unmarshal(data, result)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn result.Id, nil\n}\n\nfunc (client *DockerClient) ContainerLogs(id string) ([]byte, error) {\n\turi := fmt.Sprintf(\"\/v1.10\/containers\/%s\/logs?stderr=1&stdout=1\", id)\n\tdata, err := client.doRequest(\"GET\", uri, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn data, nil\n}\n\nfunc (client *DockerClient) StartContainer(id string, config *HostConfig) error {\n\tdata, err := json.Marshal(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\turi := fmt.Sprintf(\"\/v1.10\/containers\/%s\/start\", id)\n\t_, err = client.doRequest(\"POST\", uri, data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (client *DockerClient) StopContainer(id string, timeout int) error {\n\turi := fmt.Sprintf(\"\/v1.10\/containers\/%s\/stop?t=%d\", id, timeout)\n\t_, err := client.doRequest(\"POST\", uri, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (client *DockerClient) RestartContainer(id string, timeout int) error {\n\turi := fmt.Sprintf(\"\/v1.10\/containers\/%s\/restart?t=%d\", id, timeout)\n\t_, err := client.doRequest(\"POST\", uri, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (client *DockerClient) KillContainer(id string) error {\n\turi := fmt.Sprintf(\"\/v1.10\/containers\/%s\/kill\", id)\n\t_, err := client.doRequest(\"POST\", uri, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (client *DockerClient) StartMonitorEvents(cb Callback, args ...interface{}) {\n\tatomic.StoreInt32(&client.monitorEvents, 1)\n\tgo client.getEvents(cb, args...)\n}\n\nfunc (client *DockerClient) getEvents(cb Callback, args ...interface{}) {\n\turi := client.URL.String() + \"\/v1.10\/events\"\n\tresp, err := client.HTTPClient.Get(uri)\n\tif err != nil {\n\t\tlog.Printf(\"GET %s failed: %v\", uri, err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\n\tdec := json.NewDecoder(resp.Body)\n\tfor atomic.LoadInt32(&client.monitorEvents) > 0 {\n\t\tvar event *Event\n\t\tif err := dec.Decode(&event); err != nil {\n\t\t\tlog.Printf(\"Event decoding failed: %v\", err)\n\t\t\treturn\n\t\t}\n\t\tcb(event, args...)\n\t}\n}\n\nfunc (client *DockerClient) StopAllMonitorEvents() {\n\tatomic.StoreInt32(&client.monitorEvents, 0)\n}\n\nfunc (client *DockerClient) Version() (*Version, error) {\n\tdata, err := client.doRequest(\"GET\", \"\/v1.10\/version\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tversion := &Version{}\n\terr = json.Unmarshal(data, version)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn version, nil\n}\n\nfunc (client *DockerClient) PullImage(name, tag string) error {\n\tv := url.Values{}\n\tv.Set(\"fromImage\", name)\n\tif tag != \"\" {\n\t\tv.Set(\"tag\", tag)\n\t}\n\t_, err := client.doRequest(\"POST\", \"\/v1.10\/images\/create?\"+v.Encode(), nil)\n\treturn err\n}\n\nfunc (client *DockerClient) RemoveContainer(id string, force bool) error {\n\targForce := 0\n\tif force == true {\n\t\targForce = 1\n\t}\n\targs := fmt.Sprintf(\"force=%d\", argForce)\n\n\t_, err := client.doRequest(\"DELETE\", fmt.Sprintf(\"\/v1.10\/containers\/%s?%s\", id, args), nil)\n\treturn err\n}\n\nfunc (client *DockerClient) ListImages() ([]*Image, error) {\n\tdata, err := client.doRequest(\"GET\", \"\/v1.10\/images\/json\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar images []*Image\n\tif err := json.Unmarshal(data, &images); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn images, nil\n}\n\nfunc (client *DockerClient) RemoveImage(name string) error {\n\t_, err := client.doRequest(\"DELETE\", fmt.Sprintf(\"\/v1.10\/images\/%s\", name), nil)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package df\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\nfunc TestAppend(t *testing.T) {\n\tcolname := \"T\"\n\tcol := column{\n\t\tcolName: colname,\n\t}\n\tvar tests = []struct {\n\t\tdata cells\n\t\texpectedLen int\n\t}{\n\t\t{Strings(\"A\", \"B\"), 2},\n\t\t{Strings(\"1\", \"2\"), 2},\n\t\t{Ints(3, 4, nil), 3},\n\t\t{Strings(\"CDE\", \"FGH\"), 2},\n\t\t{nil, 0},\n\t}\n\tfor k, v := range tests {\n\t\tcolb, err := col.append(v.data...)\n\t\tif err != nil {\n\t\t\tt.Error(\"Error on test\", k, \":\", err)\n\t\t}\n\t\texpectedLen := v.expectedLen\n\t\treceivedLen := len(colb.cells)\n\t\tif expectedLen != receivedLen {\n\t\t\tt.Error(\"Error on test\", k, \":\\n\",\n\t\t\t\t\"Expected Len:\", expectedLen,\n\t\t\t\t\"Received Length:\", receivedLen)\n\t\t}\n\t}\n\t_, err := col.append(Int{nil}, String{\"A\"})\n\tif err == nil {\n\t\tt.Error(\"Should throw an error: Conflicting types\")\n\t}\n}\n\nfunc TestNewCol(t *testing.T) {\n\tcol, err := newCol(\"TestCol\", Strings(\"A\", \"B\"))\n\tif err != nil || col == nil {\n\t\tt.Error(\"NewCol has failed unexpectedly:\", err)\n\t}\n\texpected := \"[A B]\"\n\treceived := fmt.Sprint(col.cells)\n\tif expected != received {\n\t\tt.Error(\n\t\t\t\"Single element not being introduced properly\",\n\t\t\t\"Expected:\\n\",\n\t\t\texpected, \"\\n\",\n\t\t\t\"Received:\\n\",\n\t\t\treceived,\n\t\t)\n\t}\n}\n\nfunc TestColumn_parseColumn(t *testing.T) {\n\t\/\/ String to Int\n\t\/\/cola, _ := NewCol(\"TestCol\", Strings(\"1\", \"2\"))\n\t\/\/colb, err := parseColumn(*cola, \"int\")\n\t\/\/if err != nil {\n\t\/\/t.Error(\"Error parsing a df.String column into df.Int:\", err)\n\t\/\/}\n\t\/\/if colb.Len() != cola.Len() ||\n\t\/\/colb.colName != cola.colName ||\n\t\/\/colb.colType != \"df.Int\" ||\n\t\/\/fmt.Sprint(colb.row) != \"[1 2]\" {\n\t\/\/t.Error(\"Error parsing a df.String column into df.Int\",\n\t\/\/\"\\ncola.Len():\", cola.Len(),\n\t\/\/\"\\ncolb.Len():\", colb.Len(),\n\t\/\/\"\\ncola.colName:\", cola.colName,\n\t\/\/\"\\ncolb.colName:\", colb.colName,\n\t\/\/\"\\ncolb.colType:\", colb.colType,\n\t\/\/)\n\t\/\/}\n\n\t\/\/ String to String\n\t\/\/cola, _ = NewCol(\"TestCol\", Strings(\"1\", \"2\"))\n\t\/\/colb, err = parseColumn(*cola, \"string\")\n\t\/\/if err != nil {\n\t\/\/t.Error(\"Error parsing a df.String column into df.String:\", err)\n\t\/\/}\n\t\/\/if colb.Len() != cola.Len() ||\n\t\/\/colb.colName != cola.colName ||\n\t\/\/colb.colType != \"df.String\" ||\n\t\/\/fmt.Sprint(colb.row) != \"[1 2]\" {\n\t\/\/t.Error(\"Error parsing a df.String column into df.Int\",\n\t\/\/\"\\ncola.Len():\", cola.Len(),\n\t\/\/\"\\ncolb.Len():\", colb.Len(),\n\t\/\/\"\\ncola.colName:\", cola.colName,\n\t\/\/\"\\ncolb.colName:\", colb.colName,\n\t\/\/\"\\ncolb.colType:\", colb.colType,\n\t\/\/)\n\t\/\/}\n\n\t\/\/\/\/ Int to String\n\t\/\/cola, _ = NewCol(\"TestCol\", Ints(1, 2))\n\t\/\/colb, err = parseColumn(*cola, \"string\")\n\t\/\/if err != nil {\n\t\/\/t.Error(\"Error parsing a df.Int column into df.String:\", err)\n\t\/\/}\n\t\/\/if colb.Len() != cola.Len() ||\n\t\/\/colb.colName != cola.colName ||\n\t\/\/colb.colType != \"df.String\" ||\n\t\/\/fmt.Sprint(colb.row) != \"[1 2]\" {\n\t\/\/t.Error(\"Error parsing a df.String column into df.Int\",\n\t\/\/\"\\ncola.Len():\", cola.Len(),\n\t\/\/\"\\ncolb.Len():\", colb.Len(),\n\t\/\/\"\\ncola.colName:\", cola.colName,\n\t\/\/\"\\ncolb.colName:\", colb.colName,\n\t\/\/\"\\ncolb.colType:\", colb.colType,\n\t\/\/)\n\t\/\/}\n\n\t\/\/\/\/ Unknown type\n\t\/\/cola, _ = NewCol(\"TestCol\", Ints(1, 2))\n\t\/\/colb, err = parseColumn(*cola, \"asdfg\")\n\t\/\/if err == nil {\n\t\/\/t.Error(\"Error parsing an unknown type, error not thrown.\")\n\t\/\/}\n\n}\n<commit_msg>Restore tests for column parsing<commit_after>package df\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\nfunc TestAppend(t *testing.T) {\n\tcolname := \"T\"\n\tcol := column{\n\t\tcolName: colname,\n\t}\n\tvar tests = []struct {\n\t\tdata cells\n\t\texpectedLen int\n\t}{\n\t\t{Strings(\"A\", \"B\"), 2},\n\t\t{Strings(\"1\", \"2\"), 2},\n\t\t{Ints(3, 4, nil), 3},\n\t\t{Strings(\"CDE\", \"FGH\"), 2},\n\t\t{nil, 0},\n\t}\n\tfor k, v := range tests {\n\t\tcolb, err := col.append(v.data...)\n\t\tif err != nil {\n\t\t\tt.Error(\"Error on test\", k, \":\", err)\n\t\t}\n\t\texpectedLen := v.expectedLen\n\t\treceivedLen := len(colb.cells)\n\t\tif expectedLen != receivedLen {\n\t\t\tt.Error(\"Error on test\", k, \":\\n\",\n\t\t\t\t\"Expected Len:\", expectedLen,\n\t\t\t\t\"Received Length:\", receivedLen)\n\t\t}\n\t}\n\t_, err := col.append(Int{nil}, String{\"A\"})\n\tif err == nil {\n\t\tt.Error(\"Should throw an error: Conflicting types\")\n\t}\n}\n\nfunc TestnewCol(t *testing.T) {\n\tcol, err := newCol(\"TestCol\", Strings(\"A\", \"B\"))\n\tif err != nil || col == nil {\n\t\tt.Error(\"newCol has failed unexpectedly:\", err)\n\t}\n\texpected := \"[A B]\"\n\treceived := fmt.Sprint(col.cells)\n\tif expected != received {\n\t\tt.Error(\n\t\t\t\"Single element not being introduced properly\",\n\t\t\t\"Expected:\\n\",\n\t\t\texpected, \"\\n\",\n\t\t\t\"Received:\\n\",\n\t\t\treceived,\n\t\t)\n\t}\n}\n\nfunc TestColumn_parseColumn(t *testing.T) {\n\t\/\/ String to Int\n\tcola, _ := newCol(\"TestCol\", Strings(\"1\", \"2\"))\n\tcolb, err := parseColumn(*cola, \"int\")\n\tif err != nil {\n\t\tt.Error(\"Error parsing a df.String column into df.Int:\", err)\n\t}\n\tif len(colb.cells) != len(cola.cells) ||\n\t\tcolb.colName != cola.colName ||\n\t\tcolb.colType != \"df.Int\" ||\n\t\tfmt.Sprint(colb.cells) != \"[1 2]\" {\n\t\tt.Error(\"Error parsing a df.String column into df.Int\",\n\t\t\t\"\\nlen(cola.cells):\", len(cola.cells),\n\t\t\t\"\\nlen(colb.cells):\", len(colb.cells),\n\t\t\t\"\\ncola.colName:\", cola.colName,\n\t\t\t\"\\ncolb.colName:\", colb.colName,\n\t\t\t\"\\ncolb.colType:\", colb.colType,\n\t\t)\n\t}\n\n\t\/\/ String to String\n\tcola, _ = newCol(\"TestCol\", Strings(\"1\", \"2\"))\n\tcolb, err = parseColumn(*cola, \"string\")\n\tif err != nil {\n\t\tt.Error(\"Error parsing a df.String column into df.String:\", err)\n\t}\n\tif len(colb.cells) != len(cola.cells) ||\n\t\tcolb.colName != cola.colName ||\n\t\tcolb.colType != \"df.String\" ||\n\t\tfmt.Sprint(colb.cells) != \"[1 2]\" {\n\t\tt.Error(\"Error parsing a df.String column into df.Int\",\n\t\t\t\"\\nlen(cola.cells):\", len(cola.cells),\n\t\t\t\"\\nlen(colb.cells):\", len(colb.cells),\n\t\t\t\"\\ncola.colName:\", cola.colName,\n\t\t\t\"\\ncolb.colName:\", colb.colName,\n\t\t\t\"\\ncolb.colType:\", colb.colType,\n\t\t)\n\t}\n\n\t\/\/ Int to String\n\tcola, _ = newCol(\"TestCol\", Ints(1, 2))\n\tcolb, err = parseColumn(*cola, \"string\")\n\tif err != nil {\n\t\tt.Error(\"Error parsing a df.Int column into df.String:\", err)\n\t}\n\tif len(colb.cells) != len(cola.cells) ||\n\t\tcolb.colName != cola.colName ||\n\t\tcolb.colType != \"df.String\" ||\n\t\tfmt.Sprint(colb.cells) != \"[1 2]\" {\n\t\tt.Error(\"Error parsing a df.String column into df.Int\",\n\t\t\t\"\\nlen(cola.cells):\", len(cola.cells),\n\t\t\t\"\\nlen(colb.cells):\", len(colb.cells),\n\t\t\t\"\\ncola.colName:\", cola.colName,\n\t\t\t\"\\ncolb.colName:\", colb.colName,\n\t\t\t\"\\ncolb.colType:\", colb.colType,\n\t\t)\n\t}\n\n\t\/\/ Unknown type\n\tcola, _ = newCol(\"TestCol\", Ints(1, 2))\n\tcolb, err = parseColumn(*cola, \"asdfg\")\n\tif err == nil {\n\t\tt.Error(\"Error parsing an unknown type, error not thrown.\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"fmt\"\n \"encoding\/json\"\n \"net\"\n \"bufio\"\n)\n\ntype PeerInfo struct {\n Peer string `json:\"peer\"`\n Latency int `json:\"latency\"`\n}\n\ntype UserInfo struct {\n Type string `json:\"type\" `\n User string `json:\"user\" `\n Room string `json:\"room\" `\n Host string `json:\"host\" `\n Latency []PeerInfo `json:\"latency\"`\n}\n\ntype Instruction struct {\n Type string `json:\"type\"` \/\/enum: \"newPeerConnection\" \"deletePeerConnection\"\n Parent string `json:\"parent\"`\n Child string `json:\"child\"` \n Host string `json:\"host\"`\n}\n\n\n\nconst (\n CONN_HOST = \"localhost\"\n CONN_PORT = \"8888\"\n CONN_TYPE = \"tcp\"\n)\n\nvar rooms map[string]chan UserInfo\nvar openRoom chan (chan UserInfo)\nvar closeRoom chan (chan UserInfo)\nvar ins chan Instruction\nvar conn net.Conn\n\nfunc main() {\n \/\/ Listen for incoming connections.\n listener, err := net.Listen(CONN_TYPE, CONN_HOST+\":\"+CONN_PORT)\n queue := make(chan UserInfo, 10) \/\/ Buffered channel with capacity of 10\n ins = make(chan Instruction, 10)\n rooms = make(map[string]chan UserInfo, 0)\n \/\/rooms = make(map[string]Room)\n \n if err != nil {\n\tfmt.Println(\"Error listening:\", err.Error())\n }\n \n \/\/ Close the listener when the application closes.\n defer listener.Close()\n \n for {\n\t\/\/ Listen for an incoming connection.\n\tconn, err := listener.Accept()\n\t\n\tif err != nil {\n\t fmt.Println(\"Error accepting: \", err.Error())\n\t continue\n\t}\n\t\n\t\/\/ Setup connections map\n\t\n\t\/\/ Handle connections in a new goroutine.\n\tgo handleRequests(conn, queue)\n\tgo handleTasks(queue) \/\/ Potentially need to increase the number of workers\n\tgo handleInstructions(ins)\n }\n}\n\n\/\/ Handles incoming requests and parse response from json to UserInfo struct\nfunc handleRequests(con net.Conn, queue chan<- UserInfo) {\n fmt.Println(\"handleRequests is working\")\n conn = con\n defer con.Close() \/\/ may cause problem\n \n input := bufio.NewScanner(conn)\n var userInfo UserInfo\n \n for input.Scan() {\n\ttext := input.Text()\n\tbyte_text := []byte(text)\n\terr := json.Unmarshal(byte_text, &userInfo)\n\tif err != nil {\n\t continue\n\t}\n\tqueue <- userInfo \/\/ send userInfo to task queue\n }\n}\n\n\n\nfunc handleTasks(queue <-chan UserInfo) {\n fmt.Println(\"handleTasks is working\")\n\n for {\n\tuserInfo := <- queue\n\t\n\tswitch userInfo.Type {\n\tcase \"newUser\": newUserHandler(userInfo) \n\tcase \"host\": newHostHandler(userInfo)\n\tcase \"disconnectedUser\": disconnectHandler(userInfo)\n\t}\n\tfmt.Printf(\"New task received -> Type: %s User: %s Room: %s\\n\", userInfo.Type, userInfo.User, userInfo.Room)\n }\n}\n\nfunc newUserHandler(userInfo UserInfo) {\n fmt.Println(\"newUserHandlerCalled\")\n roomId := userInfo.Room\n if room, exist := rooms[roomId]; exist {\n\troom <- userInfo\n } else {\n\tfmt.Println(\"ERR: newUserHandler - room doesn't exist\")\n }\n\t\/* Send out instructions *\/\n\t\/* TODO: may need to separate out this part *\/\n\t\n\t\/\/ host := room.getHost()\n \/\/host := userInfo.Host\n\t\/\/if host.Role == \"host\" { \n \/\/ins <- Instruction{Type:\"newPeerConnection\", Parent: host, Child: userInfo.User}\n\t\/\/} else {\n\t \/\/fmt.Println(\"ERR: Host doesn't exist\")\n\t\/\/}\n}\n\nfunc newHostHandler(userInfo UserInfo) {\n fmt.Println(\"newHostHandlerCalled\")\n roomId := userInfo.Room\n if _, exist := rooms[roomId]; !exist {\n\troom := make(chan UserInfo)\n\trooms[roomId] = room\n\tgo manageRoom(room)\n\t\/\/ openRoom <- room\n\troom <- userInfo\n\t\/\/ins <- Instruction{Type:\"host\", Host: userInfo.User}\n } else {\n\tfmt.Println(\"ERR: newHostHandler - room already exists\")\n }\n\t\/*\n\tuser := User{Name: userInfo.User, Role: \"host\"}\n\tusers := make([]User, 0)\n\tusers = append(users, user)\n\troom := Room{ID: roomId, Users: users}\n\trooms[roomId] = room;\n\tfmt.Println(room.getUsers())\n\tins <- Instruction{Type:\"host\", Host: user.Name}\n\t*\/\n}\n\nfunc disconnectHandler(userInfo UserInfo) {\n roomId := userInfo.Room\n if room, exist := rooms[roomId]; exist {\n\troom <- userInfo\n\t\/\/room.removeUser(user)\n\t\n\t\/* Send out instruction *\/\n\t\/\/host := room.getHost()\n\t\/\/host := userInfo.Host;\n\t\n\t\/\/if host.Role == \"host\" {\n\t\/\/ins <- Instruction{Type:\"deletePeerConnection\", Parent: host, Child: userInfo.User}\n\t\/\/} else {\n\t\/\/fmt.Println(\"ERR: Host doesn't exist\")\n\t\/\/}\n\t\n\t\/*\n\tif len(room.getUsers())==0 {\n\t delete(rooms, roomId)\n\t}\n\t*\/\n\t\/\/fmt.Println(room.getUsers())\n } else {\n\tfmt.Println(\"ERR: disconnectHandler - disconnecting from a room non-existing\")\n }\n}\n\nfunc manageRoom(room <-chan UserInfo) {\n \/\/defer close(room)\n \n var graph = NewGraph() \/\/ TODO: implement Graph\n var tree = NewGraph()\n \n for {\n\tuserInfo := <- room\n\t\/\/fmt.Printf(\"[DEBUG] %v\\n\", userInfo.Host)\n\t\n\tswitch userInfo.Type {\n\tcase \"host\": \n\t username := userInfo.User\n\t graph.AddNode(username)\n\t graph.SetHead(username)\n\t ins <- Instruction{Type: \"host\", Host: username} \n\t \n\t if userInfo.Latency != nil { \/\/ may be unnecessary\n\t\tfor _, p := range userInfo.Latency {\n\t\t peername := p.Peer\n\t\t weight := p.Latency\n\t\t graph.AddUniEdge(peername, username, weight)\n\t\t}\n\t }\n\t \n\tcase \"newUser\": \n\t username := userInfo.User\n\t graph.AddNode(username)\n\t for _, p := range userInfo.Latency {\n\t\tpeername := p.Peer\n\t\tweight := p.Latency\n\t\tgraph.AddUniEdge(peername, username, weight)\n\t }\n\t \n\tcase \"disconnectedUser\": \n\t username := userInfo.User\n\t graph.RemoveNode(username)\n\t if graph.GetTotalNodes() == 0 {\n\t\treturn\n\t }\n\t\n\tcase \"closeRoom\":\n\t return\n\t}\n\t\n\t\/\/graph.Print()\n\tif graph.GetTotalNodes() > 1 {\n\t newTree := graph.GetDCMST(2) \/\/ parameter is the constraint. 1 = traveling salesman, 2 means a hamitonian path problem aka maximum spanning binary tree \n\t newTree.Print()\n\t \n\t addedEdges, removedEdges := newTree.Compare(tree) \/\/ addedEdges, removedEdges := graph.Compare(tree, newTree) \n\t \n\t for _, edge := range removedEdges {\n\t\tins <- Instruction{Type:\"deletePeerConnection\", Parent: edge.Parent.Value, Child: edge.Child.Value}\n\t }\n\t \n\t for _, edge := range addedEdges { \/\/ assuming addedEdges are sorted in good orders \n\t\tins <- Instruction{Type:\"newPeerConnection\", Parent: edge.Parent.Value, Child: edge.Child.Value}\n\t }\n\t\n\t tree = newTree\n\t}\n }\n}\n\nfunc handleInstructions(ins <-chan Instruction) {\n fmt.Println(\"handleInstructions is working\")\n for {\n\tinstruction := <- ins\n\tstr, err := json.Marshal(instruction)\n\tif err != nil {\n\t fmt.Println(\"Error listening:\", err.Error())\n\t continue\n\t}\n\tfmt.Fprintf(conn, \"%s\\n\", string(str))\t\/\/ Refering to global variable\n\t\t\t\t\t\t\/\/ assuming one signal server\n\tfmt.Println(\"Instruction Sent\")\n }\n}\n<commit_msg>Add host in go instruction sent<commit_after>package main\n\nimport (\n \"fmt\"\n \"encoding\/json\"\n \"net\"\n \"bufio\"\n)\n\ntype PeerInfo struct {\n Peer string `json:\"peer\"`\n Latency int `json:\"latency\"`\n}\n\ntype UserInfo struct {\n Type string `json:\"type\" `\n User string `json:\"user\" `\n Room string `json:\"room\" `\n Host string `json:\"host\" `\n Latency []PeerInfo `json:\"latency\"`\n}\n\ntype Instruction struct {\n Type string `json:\"type\"` \/\/enum: \"newPeerConnection\" \"deletePeerConnection\"\n Parent string `json:\"parent\"`\n Child string `json:\"child\"` \n Host string `json:\"host\"`\n}\n\n\n\nconst (\n CONN_HOST = \"localhost\"\n CONN_PORT = \"8888\"\n CONN_TYPE = \"tcp\"\n)\n\nvar rooms map[string]chan UserInfo\nvar openRoom chan (chan UserInfo)\nvar closeRoom chan (chan UserInfo)\nvar ins chan Instruction\nvar conn net.Conn\n\nfunc main() {\n \/\/ Listen for incoming connections.\n listener, err := net.Listen(CONN_TYPE, CONN_HOST+\":\"+CONN_PORT)\n queue := make(chan UserInfo, 10) \/\/ Buffered channel with capacity of 10\n ins = make(chan Instruction, 10)\n rooms = make(map[string]chan UserInfo, 0)\n \/\/rooms = make(map[string]Room)\n \n if err != nil {\n\tfmt.Println(\"Error listening:\", err.Error())\n }\n \n \/\/ Close the listener when the application closes.\n defer listener.Close()\n \n for {\n\t\/\/ Listen for an incoming connection.\n\tconn, err := listener.Accept()\n\t\n\tif err != nil {\n\t fmt.Println(\"Error accepting: \", err.Error())\n\t continue\n\t}\n\t\n\t\/\/ Setup connections map\n\t\n\t\/\/ Handle connections in a new goroutine.\n\tgo handleRequests(conn, queue)\n\tgo handleTasks(queue) \/\/ Potentially need to increase the number of workers\n\tgo handleInstructions(ins)\n }\n}\n\n\/\/ Handles incoming requests and parse response from json to UserInfo struct\nfunc handleRequests(con net.Conn, queue chan<- UserInfo) {\n fmt.Println(\"handleRequests is working\")\n conn = con\n defer con.Close() \/\/ may cause problem\n \n input := bufio.NewScanner(conn)\n var userInfo UserInfo\n \n for input.Scan() {\n\ttext := input.Text()\n\tbyte_text := []byte(text)\n\terr := json.Unmarshal(byte_text, &userInfo)\n\tif err != nil {\n\t continue\n\t}\n\tqueue <- userInfo \/\/ send userInfo to task queue\n }\n}\n\n\n\nfunc handleTasks(queue <-chan UserInfo) {\n fmt.Println(\"handleTasks is working\")\n\n for {\n\tuserInfo := <- queue\n\t\n\tswitch userInfo.Type {\n\tcase \"newUser\": newUserHandler(userInfo) \n\tcase \"host\": newHostHandler(userInfo)\n\tcase \"disconnectedUser\": disconnectHandler(userInfo)\n\t}\n\tfmt.Printf(\"New task received -> Type: %s User: %s Room: %s\\n\", userInfo.Type, userInfo.User, userInfo.Room)\n }\n}\n\nfunc newUserHandler(userInfo UserInfo) {\n fmt.Println(\"newUserHandlerCalled\")\n roomId := userInfo.Room\n if room, exist := rooms[roomId]; exist {\n\troom <- userInfo\n } else {\n\tfmt.Println(\"ERR: newUserHandler - room doesn't exist\")\n }\n\t\/* Send out instructions *\/\n\t\/* TODO: may need to separate out this part *\/\n\t\n\t\/\/ host := room.getHost()\n \/\/host := userInfo.Host\n\t\/\/if host.Role == \"host\" { \n \/\/ins <- Instruction{Type:\"newPeerConnection\", Parent: host, Child: userInfo.User}\n\t\/\/} else {\n\t \/\/fmt.Println(\"ERR: Host doesn't exist\")\n\t\/\/}\n}\n\nfunc newHostHandler(userInfo UserInfo) {\n fmt.Println(\"newHostHandlerCalled\")\n roomId := userInfo.Room\n if _, exist := rooms[roomId]; !exist {\n\troom := make(chan UserInfo)\n\trooms[roomId] = room\n\tgo manageRoom(room)\n\t\/\/ openRoom <- room\n\troom <- userInfo\n\t\/\/ins <- Instruction{Type:\"host\", Host: userInfo.User}\n } else {\n\tfmt.Println(\"ERR: newHostHandler - room already exists\")\n }\n\t\/*\n\tuser := User{Name: userInfo.User, Role: \"host\"}\n\tusers := make([]User, 0)\n\tusers = append(users, user)\n\troom := Room{ID: roomId, Users: users}\n\trooms[roomId] = room;\n\tfmt.Println(room.getUsers())\n\tins <- Instruction{Type:\"host\", Host: user.Name}\n\t*\/\n}\n\nfunc disconnectHandler(userInfo UserInfo) {\n roomId := userInfo.Room\n if room, exist := rooms[roomId]; exist {\n\troom <- userInfo\n\t\/\/room.removeUser(user)\n\t\n\t\/* Send out instruction *\/\n\t\/\/host := room.getHost()\n\t\/\/host := userInfo.Host;\n\t\n\t\/\/if host.Role == \"host\" {\n\t\/\/ins <- Instruction{Type:\"deletePeerConnection\", Parent: host, Child: userInfo.User}\n\t\/\/} else {\n\t\/\/fmt.Println(\"ERR: Host doesn't exist\")\n\t\/\/}\n\t\n\t\/*\n\tif len(room.getUsers())==0 {\n\t delete(rooms, roomId)\n\t}\n\t*\/\n\t\/\/fmt.Println(room.getUsers())\n } else {\n\tfmt.Println(\"ERR: disconnectHandler - disconnecting from a room non-existing\")\n }\n}\n\nfunc manageRoom(room <-chan UserInfo) {\n \/\/defer close(room)\n \n var graph = NewGraph() \/\/ TODO: implement Graph\n var tree = NewGraph()\n \n for {\n\tuserInfo := <- room\n\t\/\/fmt.Printf(\"[DEBUG] %v\\n\", userInfo.Host)\n\t\n\tswitch userInfo.Type {\n\tcase \"host\": \n\t username := userInfo.User\n\t graph.AddNode(username)\n\t graph.SetHead(username)\n\t ins <- Instruction{Type: \"host\", Host: username} \n\t \n\t if userInfo.Latency != nil { \/\/ may be unnecessary\n\t\tfor _, p := range userInfo.Latency {\n\t\t peername := p.Peer\n\t\t weight := p.Latency\n\t\t graph.AddUniEdge(peername, username, weight)\n\t\t}\n\t }\n\t \n\tcase \"newUser\": \n\t username := userInfo.User\n\t graph.AddNode(username)\n\t for _, p := range userInfo.Latency {\n\t\tpeername := p.Peer\n\t\tweight := p.Latency\n\t\tgraph.AddUniEdge(peername, username, weight)\n\t }\n\t \n\tcase \"disconnectedUser\": \n\t username := userInfo.User\n\t graph.RemoveNode(username)\n\t if graph.GetTotalNodes() == 0 {\n\t\treturn\n\t }\n\t\n\tcase \"closeRoom\":\n\t return\n\t}\n\t\n\t\/\/graph.Print()\n\tif graph.GetTotalNodes() > 1 {\n\t newTree := graph.GetDCMST(2) \/\/ parameter is the constraint. 1 = traveling salesman, 2 means a hamitonian path problem aka maximum spanning binary tree \n\t newTree.Print()\n\t \n\t addedEdges, removedEdges := newTree.Compare(tree) \/\/ addedEdges, removedEdges := graph.Compare(tree, newTree) \n\t \n\t host := newTree.GetHead().Value\n\t for _, edge := range removedEdges {\n\t\tins <- Instruction{Type:\"deletePeerConnection\", Parent: edge.Parent.Value, Child: edge.Child.Value, Host:host}\n\t }\n\t \n\t for _, edge := range addedEdges { \/\/ assuming addedEdges are sorted in good orders \n\t\tins <- Instruction{Type:\"newPeerConnection\", Parent: edge.Parent.Value, Child: edge.Child.Value, Host:host}\n\t }\n\t\n\t tree = newTree\n\t}\n }\n}\n\nfunc handleInstructions(ins <-chan Instruction) {\n fmt.Println(\"handleInstructions is working\")\n for {\n\tinstruction := <- ins\n\tstr, err := json.Marshal(instruction)\n\tif err != nil {\n\t fmt.Println(\"Error listening:\", err.Error())\n\t continue\n\t}\n\tfmt.Fprintf(conn, \"%s\\n\", string(str))\t\/\/ Refering to global variable\n\t\t\t\t\t\t\/\/ assuming one signal server\n\tfmt.Println(\"Instruction Sent\")\n }\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage proxy\n\nimport (\n\t\"github.com\/pkg\/errors\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\n\t\"github.com\/projectcalico\/felix\/bpf\"\n)\n\n\/\/ StartKubeProxy start a new kube-proxy if there was no error\nfunc StartKubeProxy(k8sClientSet *kubernetes.Clientset, hostname string, frontendMap, backendMap bpf.Map, opts ...Option) error {\n\tsyncer, err := NewSyncer(nil, frontendMap, backendMap)\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"new bpf syncer\")\n\t}\n\n\t_, err = New(k8sClientSet, syncer, hostname, opts...)\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"new proxy\")\n\t}\n\n\tlog.Infof(\"kube-proxy started, hostname=%q\", hostname)\n\n\treturn nil\n}\n<commit_msg>kube-proxy get the host ip from eth0<commit_after>\/\/ Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage proxy\n\nimport (\n\t\"net\"\n\n\t\"github.com\/pkg\/errors\"\n\tlog \"github.com\/sirupsen\/logrus\"\n\t\"github.com\/vishvananda\/netlink\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\n\t\"github.com\/projectcalico\/felix\/bpf\"\n)\n\n\/\/ StartKubeProxy start a new kube-proxy if there was no error\nfunc StartKubeProxy(k8sClientSet *kubernetes.Clientset, hostname string, frontendMap, backendMap bpf.Map, opts ...Option) error {\n\thostIPs, err := getHostIPs()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsyncer, err := NewSyncer(hostIPs, frontendMap, backendMap)\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"new bpf syncer\")\n\t}\n\n\t_, err = New(k8sClientSet, syncer, hostname, opts...)\n\tif err != nil {\n\t\treturn errors.WithMessage(err, \"new proxy\")\n\t}\n\n\tlog.Infof(\"kube-proxy started, hostname=%q hostIP=%+v\", hostname, hostIPs)\n\n\treturn nil\n}\n\nfunc getHostIPs() ([]net.IP, error) {\n\tnl, err := netlink.NewHandle()\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"failed to create netlink handle: %s\", err)\n\t}\n\n\teth0, err := nl.LinkByName(\"eth0\")\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"failed to find eth0: %s\", err)\n\t}\n\n\taddrs, err := netlink.AddrList(eth0, 0)\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"failed to list eth0 addrs: %s\", err)\n\t}\n\n\tvar ret []net.IP\n\n\tfor _, a := range addrs {\n\t\tif a.IPNet != nil {\n\t\t\tif ipv4 := a.IP.To4(); ipv4 != nil {\n\t\t\t\tret = append(ret, ipv4)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn ret, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package plugins\n\nimport (\n \"github.com\/bwmarrin\/discordgo\"\n \"strings\"\n \"github.com\/Seklfreak\/Robyul2\/helpers\"\n rethink \"github.com\/gorethink\/gorethink\"\n \"math\"\n \"time\"\n \"math\/rand\"\n \"sync\"\n \"errors\"\n \"github.com\/Seklfreak\/Robyul2\/ratelimits\"\n \"fmt\"\n \"github.com\/bradfitz\/slice\"\n \"strconv\"\n)\n\ntype Levels struct {\n sync.RWMutex\n\n buckets map[string]int8\n}\n\nvar (\n LevelsBucket = &ratelimits.BucketContainer{}\n\n \/\/ How many keys a bucket may contain when created\n BUCKET_INITIAL_FILL int8 = 1\n\n \/\/ The maximum amount of keys a user may possess\n BUCKET_UPPER_BOUND int8 = 1\n\n \/\/ How often new keys drip into the buckets\n DROP_INTERVAL = 60 * time.Second\n\n \/\/ How many keys may drop at a time\n DROP_SIZE int8 = 1\n)\n\nfunc (m *Levels) Commands() []string {\n return []string{\n \"level\",\n }\n}\n\ntype DB_Levels_ServerUser struct {\n ID string `gorethink:\"id,omitempty\"`\n UserID string `gorethink:\"userid\"`\n GuildID string `gorethink:\"guildid\"`\n Exp int64 `gorethink:\"exp\"`\n}\n\nfunc (m *Levels) Init(session *discordgo.Session) {\n m.BucketInit()\n}\n\nfunc (m *Levels) Action(command string, content string, msg *discordgo.Message, session *discordgo.Session) {\n switch command {\n case \"level\": \/\/ [p]level <user> or [p]level top\n session.ChannelTyping(msg.ChannelID)\n targetUser, err := session.User(msg.Author.ID)\n helpers.Relax(err)\n args := strings.Split(content, \" \")\n\n channel, err := session.Channel(msg.ChannelID)\n helpers.Relax(err)\n\n if len(args) >= 1 && args[0] != \"\" {\n switch args[0] {\n case \"leaderboard\", \"top\": \/\/ [p]level top\n var levelsServersUsers []DB_Levels_ServerUser\n listCursor, err := rethink.Table(\"levels_serverusers\").Filter(\n rethink.Row.Field(\"guildid\").Eq(channel.GuildID),\n ).Run(helpers.GetDB())\n helpers.Relax(err)\n defer listCursor.Close()\n err = listCursor.All(&levelsServersUsers)\n\n if err == rethink.ErrEmptyResult || len(levelsServersUsers) <= 0 {\n _, err := session.ChannelMessageSend(msg.ChannelID, helpers.GetText(\"plugins.levels.top-server-no-stats\"))\n helpers.Relax(err)\n return\n } else if err != nil {\n helpers.Relax(err)\n }\n\n slice.Sort(levelsServersUsers, func(i, j int) bool {\n return levelsServersUsers[i].Exp > levelsServersUsers[j].Exp\n })\n\n topLevelEmbed := &discordgo.MessageEmbed{\n Color: 0x0FADED,\n Title: helpers.GetText(\"plugins.levels.top-server-embed-title\"),\n \/\/Description: \"\",\n \/\/Footer: &discordgo.MessageEmbedFooter{Text: helpers.GetText(\"plugins.stats.voicestats-embed-footer\")},\n Fields: []*discordgo.MessageEmbedField{},\n }\n\n i := 0\n for _, levelsServersUser := range levelsServersUsers {\n currentMember, err := session.GuildMember(channel.GuildID, levelsServersUser.UserID)\n fullUsername := currentMember.User.Username\n if currentMember.Nick != \"\" {\n fullUsername += \" ~ \" + currentMember.Nick\n }\n helpers.Relax(err)\n topLevelEmbed.Fields = append(topLevelEmbed.Fields, &discordgo.MessageEmbedField{\n Name: fmt.Sprintf(\"#%d: %s\", i+1, fullUsername),\n Value: fmt.Sprintf(\"Level: %d\", m.getLevelFromExp(levelsServersUser.Exp)),\n Inline: false,\n })\n i++\n if i >= 10 {\n break\n }\n }\n\n _, err = session.ChannelMessageSendEmbed(msg.ChannelID, topLevelEmbed)\n helpers.Relax(err)\n return\n }\n targetUser, err = helpers.GetUserFromMention(args[0])\n if targetUser == nil || targetUser.ID == \"\" {\n _, err := session.ChannelMessageSend(msg.ChannelID, helpers.GetText(\"bot.arguments.invalid\"))\n helpers.Relax(err)\n return\n }\n }\n\n var levelsServersUser []DB_Levels_ServerUser\n listCursor, err := rethink.Table(\"levels_serverusers\").Filter(\n rethink.Row.Field(\"userid\").Eq(targetUser.ID),\n ).Run(helpers.GetDB())\n helpers.Relax(err)\n defer listCursor.Close()\n err = listCursor.All(&levelsServersUser)\n\n if err == rethink.ErrEmptyResult {\n _, err := session.ChannelMessageSend(msg.ChannelID, helpers.GetText(\"plugins.levels.level-no-stats\"))\n helpers.Relax(err)\n return\n } else if err != nil {\n helpers.Relax(err)\n }\n\n var levelThisServerUser DB_Levels_ServerUser\n var totalExp int64\n for _, levelsServerUser := range levelsServersUser {\n if levelsServerUser.GuildID == channel.GuildID {\n levelThisServerUser = levelsServerUser\n }\n totalExp += levelsServerUser.Exp\n }\n\n if totalExp <= 0 {\n _, err := session.ChannelMessageSend(msg.ChannelID, helpers.GetText(\"plugins.levels.level-no-stats\"))\n helpers.Relax(err)\n return\n }\n\n currentMember, err := session.GuildMember(channel.GuildID, levelThisServerUser.UserID)\n fullUsername := currentMember.User.Username\n if currentMember.Nick != \"\" {\n fullUsername += \" ~ \" + currentMember.Nick\n }\n\n userLevelEmbed := &discordgo.MessageEmbed{\n Color: 0x0FADED,\n Title: helpers.GetTextF(\"plugins.levels.user-embed-title\", fullUsername),\n \/\/Description: \"\",\n \/\/Footer: &discordgo.MessageEmbedFooter{Text: helpers.GetText(\"plugins.stats.voicestats-embed-footer\")},\n Fields: []*discordgo.MessageEmbedField{\n &discordgo.MessageEmbedField{\n Name: \"Level\",\n Value: strconv.Itoa(m.getLevelFromExp(levelThisServerUser.Exp)),\n Inline: true,\n },\n &discordgo.MessageEmbedField{\n Name: \"Next Level Progress\",\n Value: strconv.Itoa(m.getProgressToNextLevelFromExp(levelThisServerUser.Exp)) + \" %\",\n Inline: true,\n },\n &discordgo.MessageEmbedField{\n Name: \":white_circle:\",\n Value: \":white_circle:\",\n Inline: true,\n },\n &discordgo.MessageEmbedField{\n Name: \"Global Level\",\n Value: strconv.Itoa(m.getLevelFromExp(totalExp)),\n Inline: true,\n },\n &discordgo.MessageEmbedField{\n Name: \"Next Global Level Progress\",\n Value: strconv.Itoa(m.getProgressToNextLevelFromExp(totalExp)) + \" %\",\n Inline: true,\n },\n &discordgo.MessageEmbedField{\n Name: \":white_circle:\",\n Value: \":white_circle:\",\n Inline: true,\n },\n },\n }\n\n _, err = session.ChannelMessageSendEmbed(msg.ChannelID, userLevelEmbed)\n helpers.Relax(err)\n return\n }\n\n}\n\nfunc (m *Levels) OnMessage(content string, msg *discordgo.Message, session *discordgo.Session) {\n channel, err := session.Channel(msg.ChannelID)\n helpers.Relax(err)\n \/\/ ignore bot messages\n if msg.Author.Bot == true {\n return\n }\n \/\/ ignore commands\n prefix := helpers.GetPrefixForServer(channel.GuildID)\n if prefix != \"\" {\n if strings.HasPrefix(content, prefix) {\n return\n }\n }\n \/\/ check if bucket is empty\n if !m.BucketHasKeys(channel.GuildID + msg.Author.ID) {\n \/\/m.BucketSet(channel.GuildID+msg.Author.ID, -1)\n return\n }\n\n err = m.BucketDrain(1, channel.GuildID+msg.Author.ID)\n helpers.Relax(err)\n\n levelsServerUser := m.getLevelsServerUserOrCreateNew(channel.GuildID, msg.Author.ID)\n levelsServerUser.Exp += m.getRandomExpForMessage()\n m.setLevelsServerUser(levelsServerUser)\n}\n\nfunc (m *Levels) OnGuildMemberAdd(member *discordgo.Member, session *discordgo.Session) {\n\n}\n\nfunc (m *Levels) OnGuildMemberRemove(member *discordgo.Member, session *discordgo.Session) {\n\n}\n\nfunc (m *Levels) getLevelsServerUserOrCreateNew(guildid string, userid string) DB_Levels_ServerUser {\n var levelsServerUser DB_Levels_ServerUser\n listCursor, err := rethink.Table(\"levels_serverusers\").Filter(\n rethink.Row.Field(\"guildid\").Eq(guildid),\n ).Filter(\n rethink.Row.Field(\"userid\").Eq(userid),\n ).Run(helpers.GetDB())\n helpers.Relax(err)\n defer listCursor.Close()\n err = listCursor.One(&levelsServerUser)\n\n if err == rethink.ErrEmptyResult {\n insert := rethink.Table(\"levels_serverusers\").Insert(DB_Levels_ServerUser{GuildID: guildid, UserID: userid})\n _, e := insert.RunWrite(helpers.GetDB())\n if e != nil {\n panic(e)\n } else {\n return m.getLevelsServerUserOrCreateNew(guildid, userid)\n }\n } else if err != nil {\n panic(err)\n }\n\n return levelsServerUser\n}\n\nfunc (m *Levels) setLevelsServerUser(entry DB_Levels_ServerUser) {\n _, err := rethink.Table(\"levels_serverusers\").Update(entry).Run(helpers.GetDB())\n helpers.Relax(err)\n}\n\nfunc (m *Levels) getLevelFromExp(exp int64) int {\n calculatedLevel := 0.1 * math.Sqrt(float64(exp))\n\n return int(math.Floor(calculatedLevel))\n}\n\nfunc (m *Levels) getExpForLevel(level int) int64 {\n if level <= 0 {\n return 0\n }\n\n calculatedExp := math.Pow(float64(level)\/0.1, 2)\n return int64(calculatedExp)\n}\n\nfunc (m *Levels) getProgressToNextLevelFromExp(exp int64) int {\n expLevelCurrently := exp - m.getExpForLevel(m.getLevelFromExp(exp))\n expLevelNext := m.getExpForLevel((m.getLevelFromExp(exp) + 1)) - m.getExpForLevel(m.getLevelFromExp(exp))\n return int(expLevelCurrently \/ (expLevelNext \/ 100))\n}\n\nfunc (m *Levels) getRandomExpForMessage() int64 {\n min := 10\n max := 15\n rand.Seed(time.Now().Unix())\n return int64(rand.Intn(max-min) + min)\n}\n\nfunc (b *Levels) BucketInit() {\n b.Lock()\n b.buckets = make(map[string]int8)\n b.Unlock()\n\n go b.BucketRefiller()\n}\n\n\/\/ Refills user buckets in a set interval\nfunc (b *Levels) BucketRefiller() {\n for {\n b.Lock()\n for user, keys := range b.buckets {\n \/\/ Chill zone\n if keys == -1 {\n b.buckets[user]++\n continue\n }\n\n \/\/ Chill zone exit\n if keys == 0 {\n b.buckets[user] = BUCKET_INITIAL_FILL\n continue\n }\n\n \/\/ More free keys for nice users :3\n if keys < BUCKET_UPPER_BOUND {\n b.buckets[user] += DROP_SIZE\n continue\n }\n }\n b.Unlock()\n\n time.Sleep(DROP_INTERVAL)\n }\n}\n\n\/\/ Check if the user has a bucket. If not create one\nfunc (b *Levels) CreateBucketIfNotExists(user string) {\n b.RLock()\n _, e := b.buckets[user]\n b.RUnlock()\n\n if !e {\n b.Lock()\n b.buckets[user] = BUCKET_INITIAL_FILL\n b.Unlock()\n }\n}\n\n\/\/ Drains $amount from $user if he has enough keys left\nfunc (b *Levels) BucketDrain(amount int8, user string) error {\n b.CreateBucketIfNotExists(user)\n\n \/\/ Check if there are enough keys left\n b.RLock()\n userAmount := b.buckets[user]\n b.RUnlock()\n\n if amount > userAmount {\n return errors.New(\"No keys left\")\n }\n\n \/\/ Remove keys from bucket\n b.Lock()\n b.buckets[user] -= amount\n b.Unlock()\n\n return nil\n}\n\n\/\/ Check if the user still has keys\nfunc (b *Levels) BucketHasKeys(user string) bool {\n b.CreateBucketIfNotExists(user)\n\n b.RLock()\n defer b.RUnlock()\n\n return b.buckets[user] > 0\n}\n\nfunc (b *Levels) BucketGet(user string) int8 {\n b.RLock()\n defer b.RUnlock()\n\n return b.buckets[user]\n}\n\nfunc (b *Levels) BucketSet(user string, value int8) {\n b.Lock()\n b.buckets[user] = value\n b.Unlock()\n}\n<commit_msg>[levels] adds [p]level process-history command<commit_after>package plugins\n\nimport (\n \"github.com\/bwmarrin\/discordgo\"\n \"strings\"\n \"github.com\/Seklfreak\/Robyul2\/helpers\"\n rethink \"github.com\/gorethink\/gorethink\"\n \"math\"\n \"time\"\n \"math\/rand\"\n \"sync\"\n \"errors\"\n \"github.com\/Seklfreak\/Robyul2\/ratelimits\"\n \"fmt\"\n \"github.com\/bradfitz\/slice\"\n \"strconv\"\n \"git.lukas.moe\/sn0w\/Karen\/logger\"\n)\n\ntype Levels struct {\n sync.RWMutex\n\n buckets map[string]int8\n}\n\nvar (\n LevelsBucket = &ratelimits.BucketContainer{}\n\n \/\/ How many keys a bucket may contain when created\n BUCKET_INITIAL_FILL int8 = 1\n\n \/\/ The maximum amount of keys a user may possess\n BUCKET_UPPER_BOUND int8 = 1\n\n \/\/ How often new keys drip into the buckets\n DROP_INTERVAL = 60 * time.Second\n\n \/\/ How many keys may drop at a time\n DROP_SIZE int8 = 1\n\n temporaryIgnoredGuilds []string\n)\n\nfunc (m *Levels) Commands() []string {\n return []string{\n \"level\",\n \"levels\",\n }\n}\n\ntype DB_Levels_ServerUser struct {\n ID string `gorethink:\"id,omitempty\"`\n UserID string `gorethink:\"userid\"`\n GuildID string `gorethink:\"guildid\"`\n Exp int64 `gorethink:\"exp\"`\n}\n\nfunc (m *Levels) Init(session *discordgo.Session) {\n m.BucketInit()\n}\n\nfunc (m *Levels) Action(command string, content string, msg *discordgo.Message, session *discordgo.Session) {\n switch command {\n case \"level\", \"levels\": \/\/ [p]level <user> or [p]level top\n session.ChannelTyping(msg.ChannelID)\n targetUser, err := session.User(msg.Author.ID)\n helpers.Relax(err)\n args := strings.Split(content, \" \")\n\n channel, err := session.Channel(msg.ChannelID)\n helpers.Relax(err)\n\n if len(args) >= 1 && args[0] != \"\" {\n switch args[0] {\n case \"leaderboard\", \"top\": \/\/ [p]level top\n var levelsServersUsers []DB_Levels_ServerUser\n listCursor, err := rethink.Table(\"levels_serverusers\").Filter(\n rethink.Row.Field(\"guildid\").Eq(channel.GuildID),\n ).Run(helpers.GetDB())\n helpers.Relax(err)\n defer listCursor.Close()\n err = listCursor.All(&levelsServersUsers)\n\n if err == rethink.ErrEmptyResult || len(levelsServersUsers) <= 0 {\n _, err := session.ChannelMessageSend(msg.ChannelID, helpers.GetText(\"plugins.levels.top-server-no-stats\"))\n helpers.Relax(err)\n return\n } else if err != nil {\n helpers.Relax(err)\n }\n\n slice.Sort(levelsServersUsers, func(i, j int) bool {\n return levelsServersUsers[i].Exp > levelsServersUsers[j].Exp\n })\n\n topLevelEmbed := &discordgo.MessageEmbed{\n Color: 0x0FADED,\n Title: helpers.GetText(\"plugins.levels.top-server-embed-title\"),\n \/\/Description: \"\",\n \/\/Footer: &discordgo.MessageEmbedFooter{Text: helpers.GetText(\"plugins.stats.voicestats-embed-footer\")},\n Fields: []*discordgo.MessageEmbedField{},\n }\n\n i := 0\n for _, levelsServersUser := range levelsServersUsers {\n currentMember, err := session.GuildMember(channel.GuildID, levelsServersUser.UserID)\n fullUsername := currentMember.User.Username\n if currentMember.Nick != \"\" {\n fullUsername += \" ~ \" + currentMember.Nick\n }\n helpers.Relax(err)\n topLevelEmbed.Fields = append(topLevelEmbed.Fields, &discordgo.MessageEmbedField{\n Name: fmt.Sprintf(\"#%d: %s\", i+1, fullUsername),\n Value: fmt.Sprintf(\"Level: %d\", m.getLevelFromExp(levelsServersUser.Exp)),\n Inline: false,\n })\n i++\n if i >= 10 {\n break\n }\n }\n\n _, err = session.ChannelMessageSendEmbed(msg.ChannelID, topLevelEmbed)\n helpers.Relax(err)\n return\n case \"process-history\": \/\/ [p]level process-history\n helpers.RequireBotAdmin(msg, func() {\n session.ChannelTyping(msg.ChannelID)\n channel, err := session.Channel(msg.ChannelID)\n helpers.Relax(err)\n guild, err := session.Guild(channel.GuildID)\n helpers.Relax(err)\n \/\/ pause new message processing for that guild\n temporaryIgnoredGuilds = append(temporaryIgnoredGuilds, channel.GuildID)\n _, err = session.ChannelMessageSend(msg.ChannelID, \"Temporary disabled EXP Processing for this server while processing the Message History.\")\n helpers.Relax(err)\n \/\/ reset accounts on this server\n var levelsServersUsers []DB_Levels_ServerUser\n listCursor, err := rethink.Table(\"levels_serverusers\").Filter(\n rethink.Row.Field(\"guildid\").Eq(channel.GuildID),\n ).Run(helpers.GetDB())\n helpers.Relax(err)\n defer listCursor.Close()\n err = listCursor.All(&levelsServersUsers)\n for _, levelsServerUser := range levelsServersUsers {\n levelsServerUser.Exp = 0\n m.setLevelsServerUser(levelsServerUser)\n }\n _, err = session.ChannelMessageSend(msg.ChannelID, \"Resetted the EXP for every User on this server.\")\n helpers.Relax(err)\n \/\/ process history\n \/\/var wg sync.WaitGroup\n \/\/wg.Add(len(guild.Channels))\n for _, guildChannel := range guild.Channels {\n guildChannelCurrent := guildChannel\n \/\/go func() {\n prefix := helpers.GetPrefixForServer(guildChannelCurrent.GuildID)\n expForUsers := make(map[string]int64)\n \/\/defer wg.Done()\n if guildChannelCurrent.Type == \"voice\" {\n continue\n }\n\n logger.VERBOSE.L(\"levels\", fmt.Sprintf(\"Started processing of Channel #%s (#%s) on Guild %s (#%s)\",\n guildChannelCurrent.Name, guildChannelCurrent.ID, guild.Name, guild.ID))\n \/\/ (asynchronous)\n _, err = session.ChannelMessageSend(msg.ChannelID, fmt.Sprintf(\"Started processing Messages for Channel <#%s>.\", guildChannelCurrent.ID))\n helpers.Relax(err)\n lastBefore := \"\"\n for {\n messages, err := session.ChannelMessages(guildChannelCurrent.ID, 100, lastBefore, \"\")\n if err != nil {\n logger.ERROR.L(\"levels\", err.Error())\n break\n }\n logger.VERBOSE.L(\"levels\", fmt.Sprintf(\"Processing %d messages for Channel #%s (#%s) from before \\\"%s\\\" on Guild %s (#%s)\",\n len(messages), guildChannelCurrent.Name, guildChannelCurrent.ID, lastBefore, guild.Name, guild.ID))\n if len(messages) <= 0 {\n break\n }\n for _, message := range messages {\n \/\/ ignore bot messages\n if message.Author.Bot == true {\n continue\n }\n \/\/ ignore commands\n if prefix != \"\" {\n if strings.HasPrefix(message.Content, prefix) {\n continue\n }\n }\n if _, ok := expForUsers[message.Author.ID]; ok {\n expForUsers[message.Author.ID] += 5\n } else {\n expForUsers[message.Author.ID] = 5\n }\n\n }\n lastBefore = messages[len(messages)-1].ID\n }\n\n for userId, expForuser := range expForUsers {\n levelsServerUser := m.getLevelsServerUserOrCreateNew(guildChannelCurrent.GuildID, userId)\n levelsServerUser.Exp += expForuser\n m.setLevelsServerUser(levelsServerUser)\n }\n\n logger.VERBOSE.L(\"levels\", fmt.Sprintf(\"Completed processing of Channel #%s (#%s) on Guild %s (#%s)\",\n guildChannelCurrent.Name, guildChannelCurrent.ID, guild.Name, guild.ID))\n _, err = session.ChannelMessageSend(msg.ChannelID, fmt.Sprintf(\"Completed processing Messages for Channel <#%s>.\", guildChannelCurrent.ID))\n helpers.Relax(err)\n \/\/}()\n }\n \/\/fmt.Println(\"Waiting for all channels\")\n \/\/wg.Wait()\n \/\/ enable new message processing again\n var newTemporaryIgnoredGuilds []string\n for _, temporaryIgnoredGuild := range temporaryIgnoredGuilds {\n if temporaryIgnoredGuild != channel.GuildID {\n newTemporaryIgnoredGuilds = append(newTemporaryIgnoredGuilds, temporaryIgnoredGuild)\n }\n }\n temporaryIgnoredGuilds = newTemporaryIgnoredGuilds\n _, err = session.ChannelMessageSend(msg.ChannelID, \"Enabled EXP Processing for this server again.\")\n helpers.Relax(err)\n _, err = session.ChannelMessageSend(msg.ChannelID, fmt.Sprintf(\"<@%s> Done!\", msg.Author.ID))\n helpers.Relax(err)\n return\n })\n return\n }\n targetUser, err = helpers.GetUserFromMention(args[0])\n if targetUser == nil || targetUser.ID == \"\" {\n _, err := session.ChannelMessageSend(msg.ChannelID, helpers.GetText(\"bot.arguments.invalid\"))\n helpers.Relax(err)\n return\n }\n }\n\n var levelsServersUser []DB_Levels_ServerUser\n listCursor, err := rethink.Table(\"levels_serverusers\").Filter(\n rethink.Row.Field(\"userid\").Eq(targetUser.ID),\n ).Run(helpers.GetDB())\n helpers.Relax(err)\n defer listCursor.Close()\n err = listCursor.All(&levelsServersUser)\n\n if err == rethink.ErrEmptyResult {\n _, err := session.ChannelMessageSend(msg.ChannelID, helpers.GetText(\"plugins.levels.level-no-stats\"))\n helpers.Relax(err)\n return\n } else if err != nil {\n helpers.Relax(err)\n }\n\n var levelThisServerUser DB_Levels_ServerUser\n var totalExp int64\n for _, levelsServerUser := range levelsServersUser {\n if levelsServerUser.GuildID == channel.GuildID {\n levelThisServerUser = levelsServerUser\n }\n totalExp += levelsServerUser.Exp\n }\n\n if totalExp <= 0 {\n _, err := session.ChannelMessageSend(msg.ChannelID, helpers.GetText(\"plugins.levels.level-no-stats\"))\n helpers.Relax(err)\n return\n }\n\n currentMember, err := session.GuildMember(channel.GuildID, levelThisServerUser.UserID)\n fullUsername := currentMember.User.Username\n if currentMember.Nick != \"\" {\n fullUsername += \" ~ \" + currentMember.Nick\n }\n\n userLevelEmbed := &discordgo.MessageEmbed{\n Color: 0x0FADED,\n Title: helpers.GetTextF(\"plugins.levels.user-embed-title\", fullUsername),\n \/\/Description: \"\",\n \/\/Footer: &discordgo.MessageEmbedFooter{Text: helpers.GetText(\"plugins.stats.voicestats-embed-footer\")},\n Fields: []*discordgo.MessageEmbedField{\n &discordgo.MessageEmbedField{\n Name: \"Level\",\n Value: strconv.Itoa(m.getLevelFromExp(levelThisServerUser.Exp)),\n Inline: true,\n },\n &discordgo.MessageEmbedField{\n Name: \"Level Progress\",\n Value: strconv.Itoa(m.getProgressToNextLevelFromExp(levelThisServerUser.Exp)) + \" %\",\n Inline: true,\n },\n &discordgo.MessageEmbedField{\n Name: \":white_circle:\",\n Value: \":white_circle:\",\n Inline: true,\n },\n &discordgo.MessageEmbedField{\n Name: \"Global Level\",\n Value: strconv.Itoa(m.getLevelFromExp(totalExp)),\n Inline: true,\n },\n &discordgo.MessageEmbedField{\n Name: \"Global Level Progress\",\n Value: strconv.Itoa(m.getProgressToNextLevelFromExp(totalExp)) + \" %\",\n Inline: true,\n },\n &discordgo.MessageEmbedField{\n Name: \":white_circle:\",\n Value: \":white_circle:\",\n Inline: true,\n },\n },\n }\n\n _, err = session.ChannelMessageSendEmbed(msg.ChannelID, userLevelEmbed)\n helpers.Relax(err)\n return\n }\n\n}\n\nfunc (m *Levels) OnMessage(content string, msg *discordgo.Message, session *discordgo.Session) {\n m.ProcessMessage(msg, session)\n}\n\nfunc (m *Levels) ProcessMessage(msg *discordgo.Message, session *discordgo.Session) {\n channel, err := session.Channel(msg.ChannelID)\n helpers.Relax(err)\n \/\/ ignore temporary ignored guilds\n for _, temporaryIgnoredGuild := range temporaryIgnoredGuilds {\n if temporaryIgnoredGuild == channel.GuildID {\n return\n }\n }\n \/\/ ignore bot messages\n if msg.Author.Bot == true {\n return\n }\n \/\/ ignore commands\n prefix := helpers.GetPrefixForServer(channel.GuildID)\n if prefix != \"\" {\n if strings.HasPrefix(msg.Content, prefix) {\n return\n }\n }\n \/\/ check if bucket is empty\n if !m.BucketHasKeys(channel.GuildID + msg.Author.ID) {\n \/\/m.BucketSet(channel.GuildID+msg.Author.ID, -1)\n return\n }\n\n err = m.BucketDrain(1, channel.GuildID+msg.Author.ID)\n helpers.Relax(err)\n\n levelsServerUser := m.getLevelsServerUserOrCreateNew(channel.GuildID, msg.Author.ID)\n levelsServerUser.Exp += m.getRandomExpForMessage()\n m.setLevelsServerUser(levelsServerUser)\n}\n\nfunc (m *Levels) OnGuildMemberAdd(member *discordgo.Member, session *discordgo.Session) {\n\n}\n\nfunc (m *Levels) OnGuildMemberRemove(member *discordgo.Member, session *discordgo.Session) {\n\n}\n\nfunc (m *Levels) getLevelsServerUserOrCreateNew(guildid string, userid string) DB_Levels_ServerUser {\n var levelsServerUser DB_Levels_ServerUser\n listCursor, err := rethink.Table(\"levels_serverusers\").Filter(\n rethink.Row.Field(\"guildid\").Eq(guildid),\n ).Filter(\n rethink.Row.Field(\"userid\").Eq(userid),\n ).Run(helpers.GetDB())\n helpers.Relax(err)\n defer listCursor.Close()\n err = listCursor.One(&levelsServerUser)\n\n if err == rethink.ErrEmptyResult {\n insert := rethink.Table(\"levels_serverusers\").Insert(DB_Levels_ServerUser{GuildID: guildid, UserID: userid})\n _, e := insert.RunWrite(helpers.GetDB())\n if e != nil {\n panic(e)\n } else {\n return m.getLevelsServerUserOrCreateNew(guildid, userid)\n }\n } else if err != nil {\n panic(err)\n }\n\n return levelsServerUser\n}\n\nfunc (m *Levels) setLevelsServerUser(entry DB_Levels_ServerUser) {\n _, err := rethink.Table(\"levels_serverusers\").Update(entry).Run(helpers.GetDB())\n helpers.Relax(err)\n}\n\nfunc (m *Levels) getLevelFromExp(exp int64) int {\n calculatedLevel := 0.1 * math.Sqrt(float64(exp))\n\n return int(math.Floor(calculatedLevel))\n}\n\nfunc (m *Levels) getExpForLevel(level int) int64 {\n if level <= 0 {\n return 0\n }\n\n calculatedExp := math.Pow(float64(level)\/0.1, 2)\n return int64(calculatedExp)\n}\n\nfunc (m *Levels) getProgressToNextLevelFromExp(exp int64) int {\n expLevelCurrently := exp - m.getExpForLevel(m.getLevelFromExp(exp))\n expLevelNext := m.getExpForLevel((m.getLevelFromExp(exp) + 1)) - m.getExpForLevel(m.getLevelFromExp(exp))\n return int(expLevelCurrently \/ (expLevelNext \/ 100))\n}\n\nfunc (m *Levels) getRandomExpForMessage() int64 {\n min := 10\n max := 15\n rand.Seed(time.Now().Unix())\n return int64(rand.Intn(max-min) + min)\n}\n\nfunc (b *Levels) BucketInit() {\n b.Lock()\n b.buckets = make(map[string]int8)\n b.Unlock()\n\n go b.BucketRefiller()\n}\n\n\/\/ Refills user buckets in a set interval\nfunc (b *Levels) BucketRefiller() {\n for {\n b.Lock()\n for user, keys := range b.buckets {\n \/\/ Chill zone\n if keys == -1 {\n b.buckets[user]++\n continue\n }\n\n \/\/ Chill zone exit\n if keys == 0 {\n b.buckets[user] = BUCKET_INITIAL_FILL\n continue\n }\n\n \/\/ More free keys for nice users :3\n if keys < BUCKET_UPPER_BOUND {\n b.buckets[user] += DROP_SIZE\n continue\n }\n }\n b.Unlock()\n\n time.Sleep(DROP_INTERVAL)\n }\n}\n\n\/\/ Check if the user has a bucket. If not create one\nfunc (b *Levels) CreateBucketIfNotExists(user string) {\n b.RLock()\n _, e := b.buckets[user]\n b.RUnlock()\n\n if !e {\n b.Lock()\n b.buckets[user] = BUCKET_INITIAL_FILL\n b.Unlock()\n }\n}\n\n\/\/ Drains $amount from $user if he has enough keys left\nfunc (b *Levels) BucketDrain(amount int8, user string) error {\n b.CreateBucketIfNotExists(user)\n\n \/\/ Check if there are enough keys left\n b.RLock()\n userAmount := b.buckets[user]\n b.RUnlock()\n\n if amount > userAmount {\n return errors.New(\"No keys left\")\n }\n\n \/\/ Remove keys from bucket\n b.Lock()\n b.buckets[user] -= amount\n b.Unlock()\n\n return nil\n}\n\n\/\/ Check if the user still has keys\nfunc (b *Levels) BucketHasKeys(user string) bool {\n b.CreateBucketIfNotExists(user)\n\n b.RLock()\n defer b.RUnlock()\n\n return b.buckets[user] > 0\n}\n\nfunc (b *Levels) BucketGet(user string) int8 {\n b.RLock()\n defer b.RUnlock()\n\n return b.buckets[user]\n}\n\nfunc (b *Levels) BucketSet(user string, value int8) {\n b.Lock()\n b.buckets[user] = value\n b.Unlock()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2015 Mattermost, Inc. All Rights Reserved.\n\/\/ See License.txt for license information.\n\npackage store\n\nimport (\n\t\"github.com\/mattermost\/platform\/model\"\n)\n\ntype SqlWebhookStore struct {\n\t*SqlStore\n}\n\nfunc NewSqlWebhookStore(sqlStore *SqlStore) WebhookStore {\n\ts := &SqlWebhookStore{sqlStore}\n\n\tfor _, db := range sqlStore.GetAllConns() {\n\t\ttable := db.AddTableWithName(model.IncomingWebhook{}, \"IncomingWebhooks\").SetKeys(false, \"Id\")\n\t\ttable.ColMap(\"Id\").SetMaxSize(26)\n\t\ttable.ColMap(\"UserId\").SetMaxSize(26)\n\t\ttable.ColMap(\"ChannelId\").SetMaxSize(26)\n\t\ttable.ColMap(\"TeamId\").SetMaxSize(26)\n\n\t\ttableo := db.AddTableWithName(model.OutgoingWebhook{}, \"OutgoingWebhooks\").SetKeys(false, \"Id\")\n\t\ttableo.ColMap(\"Id\").SetMaxSize(26)\n\t\ttableo.ColMap(\"Token\").SetMaxSize(26)\n\t\ttableo.ColMap(\"CreatorId\").SetMaxSize(26)\n\t\ttableo.ColMap(\"ChannelId\").SetMaxSize(26)\n\t\ttableo.ColMap(\"TeamId\").SetMaxSize(26)\n\t\ttableo.ColMap(\"TriggerWords\").SetMaxSize(1024)\n\t\ttableo.ColMap(\"CallbackURLs\").SetMaxSize(1024)\n\t}\n\n\treturn s\n}\n\nfunc (s SqlWebhookStore) UpgradeSchemaIfNeeded() {\n}\n\nfunc (s SqlWebhookStore) CreateIndexesIfNotExists() {\n\ts.CreateIndexIfNotExists(\"idx_incoming_webhook_user_id\", \"IncomingWebhooks\", \"UserId\")\n\ts.CreateIndexIfNotExists(\"idx_incoming_webhook_team_id\", \"IncomingWebhooks\", \"TeamId\")\n\ts.CreateIndexIfNotExists(\"idx_outgoing_webhook_channel_id\", \"OutgoingWebhooks\", \"ChannelId\")\n}\n\nfunc (s SqlWebhookStore) SaveIncoming(webhook *model.IncomingWebhook) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\tif len(webhook.Id) > 0 {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.SaveIncoming\",\n\t\t\t\t\"You cannot overwrite an existing IncomingWebhook\", \"id=\"+webhook.Id)\n\t\t\tstoreChannel <- result\n\t\t\tclose(storeChannel)\n\t\t\treturn\n\t\t}\n\n\t\twebhook.PreSave()\n\t\tif result.Err = webhook.IsValid(); result.Err != nil {\n\t\t\tstoreChannel <- result\n\t\t\tclose(storeChannel)\n\t\t\treturn\n\t\t}\n\n\t\tif err := s.GetMaster().Insert(webhook); err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.SaveIncoming\", \"We couldn't save the IncomingWebhook\", \"id=\"+webhook.Id+\", \"+err.Error())\n\t\t} else {\n\t\t\tresult.Data = webhook\n\t\t}\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n\nfunc (s SqlWebhookStore) GetIncoming(id string) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\tvar webhook model.IncomingWebhook\n\n\t\tif err := s.GetReplica().SelectOne(&webhook, \"SELECT * FROM IncomingWebhooks WHERE Id = :Id AND DeleteAt = 0\", map[string]interface{}{\"Id\": id}); err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.GetIncoming\", \"We couldn't get the webhook\", \"id=\"+id+\", err=\"+err.Error())\n\t\t}\n\n\t\tresult.Data = &webhook\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n\nfunc (s SqlWebhookStore) DeleteIncoming(webhookId string, time int64) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\t_, err := s.GetMaster().Exec(\"Update IncomingWebhooks SET DeleteAt = :DeleteAt, UpdateAt = :UpdateAt WHERE Id = :Id\", map[string]interface{}{\"DeleteAt\": time, \"UpdateAt\": time, \"Id\": webhookId})\n\t\tif err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.DeleteIncoming\", \"We couldn't delete the webhook\", \"id=\"+webhookId+\", err=\"+err.Error())\n\t\t}\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n\nfunc (s SqlWebhookStore) GetIncomingByUser(userId string) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\tvar webhooks []*model.IncomingWebhook\n\n\t\tif _, err := s.GetReplica().Select(&webhooks, \"SELECT * FROM IncomingWebhooks WHERE UserId = :UserId AND DeleteAt = 0\", map[string]interface{}{\"UserId\": userId}); err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.GetIncomingByUser\", \"We couldn't get the webhook\", \"userId=\"+userId+\", err=\"+err.Error())\n\t\t}\n\n\t\tresult.Data = webhooks\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n\nfunc (s SqlWebhookStore) SaveOutgoing(webhook *model.OutgoingWebhook) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\tif len(webhook.Id) > 0 {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.SaveOutgoing\",\n\t\t\t\t\"You cannot overwrite an existing OutgoingWebhook\", \"id=\"+webhook.Id)\n\t\t\tstoreChannel <- result\n\t\t\tclose(storeChannel)\n\t\t\treturn\n\t\t}\n\n\t\twebhook.PreSave()\n\t\tif result.Err = webhook.IsValid(); result.Err != nil {\n\t\t\tstoreChannel <- result\n\t\t\tclose(storeChannel)\n\t\t\treturn\n\t\t}\n\n\t\tif err := s.GetMaster().Insert(webhook); err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.SaveOutgoing\", \"We couldn't save the OutgoingWebhook\", \"id=\"+webhook.Id+\", \"+err.Error())\n\t\t} else {\n\t\t\tresult.Data = webhook\n\t\t}\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n\nfunc (s SqlWebhookStore) GetOutgoing(id string) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\tvar webhook model.OutgoingWebhook\n\n\t\tif err := s.GetReplica().SelectOne(&webhook, \"SELECT * FROM OutgoingWebhooks WHERE Id = :Id AND DeleteAt = 0\", map[string]interface{}{\"Id\": id}); err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.GetOutgoing\", \"We couldn't get the webhook\", \"id=\"+id+\", err=\"+err.Error())\n\t\t}\n\n\t\tresult.Data = &webhook\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n\nfunc (s SqlWebhookStore) GetOutgoingByCreator(userId string) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\tvar webhooks []*model.OutgoingWebhook\n\n\t\tif _, err := s.GetReplica().Select(&webhooks, \"SELECT * FROM OutgoingWebhooks WHERE CreatorId = :UserId AND DeleteAt = 0\", map[string]interface{}{\"UserId\": userId}); err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.GetOutgoingByCreator\", \"We couldn't get the webhooks\", \"userId=\"+userId+\", err=\"+err.Error())\n\t\t}\n\n\t\tresult.Data = webhooks\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n\nfunc (s SqlWebhookStore) GetOutgoingByChannel(channelId string) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\tvar webhooks []*model.OutgoingWebhook\n\n\t\tif _, err := s.GetReplica().Select(&webhooks, \"SELECT * FROM OutgoingWebhooks WHERE ChannelId = :ChannelId AND DeleteAt = 0\", map[string]interface{}{\"ChannelId\": channelId}); err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.GetOutgoingByChannel\", \"We couldn't get the webhooks\", \"channelId=\"+channelId+\", err=\"+err.Error())\n\t\t}\n\n\t\tresult.Data = webhooks\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n\nfunc (s SqlWebhookStore) GetOutgoingByTeam(teamId string) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\tvar webhooks []*model.OutgoingWebhook\n\n\t\tif _, err := s.GetReplica().Select(&webhooks, \"SELECT * FROM OutgoingWebhooks WHERE TeamId = :TeamId AND DeleteAt = 0\", map[string]interface{}{\"TeamId\": teamId}); err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.GetOutgoingByTeam\", \"We couldn't get the webhooks\", \"teamId=\"+teamId+\", err=\"+err.Error())\n\t\t}\n\n\t\tresult.Data = webhooks\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n\nfunc (s SqlWebhookStore) DeleteOutgoing(webhookId string, time int64) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\t_, err := s.GetMaster().Exec(\"Update OutgoingWebhooks SET DeleteAt = :DeleteAt, UpdateAt = :UpdateAt WHERE Id = :Id\", map[string]interface{}{\"DeleteAt\": time, \"UpdateAt\": time, \"Id\": webhookId})\n\t\tif err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.DeleteOutgoing\", \"We couldn't delete the webhook\", \"id=\"+webhookId+\", err=\"+err.Error())\n\t\t}\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n\nfunc (s SqlWebhookStore) UpdateOutgoing(hook *model.OutgoingWebhook) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\thook.UpdateAt = model.GetMillis()\n\n\t\tif _, err := s.GetMaster().Update(hook); err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.UpdateOutgoing\", \"We couldn't update the webhook\", \"id=\"+hook.Id+\", \"+err.Error())\n\t\t} else {\n\t\t\tresult.Data = hook\n\t\t}\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n<commit_msg>Update db index for outgoing webhook<commit_after>\/\/ Copyright (c) 2015 Mattermost, Inc. All Rights Reserved.\n\/\/ See License.txt for license information.\n\npackage store\n\nimport (\n\t\"github.com\/mattermost\/platform\/model\"\n)\n\ntype SqlWebhookStore struct {\n\t*SqlStore\n}\n\nfunc NewSqlWebhookStore(sqlStore *SqlStore) WebhookStore {\n\ts := &SqlWebhookStore{sqlStore}\n\n\tfor _, db := range sqlStore.GetAllConns() {\n\t\ttable := db.AddTableWithName(model.IncomingWebhook{}, \"IncomingWebhooks\").SetKeys(false, \"Id\")\n\t\ttable.ColMap(\"Id\").SetMaxSize(26)\n\t\ttable.ColMap(\"UserId\").SetMaxSize(26)\n\t\ttable.ColMap(\"ChannelId\").SetMaxSize(26)\n\t\ttable.ColMap(\"TeamId\").SetMaxSize(26)\n\n\t\ttableo := db.AddTableWithName(model.OutgoingWebhook{}, \"OutgoingWebhooks\").SetKeys(false, \"Id\")\n\t\ttableo.ColMap(\"Id\").SetMaxSize(26)\n\t\ttableo.ColMap(\"Token\").SetMaxSize(26)\n\t\ttableo.ColMap(\"CreatorId\").SetMaxSize(26)\n\t\ttableo.ColMap(\"ChannelId\").SetMaxSize(26)\n\t\ttableo.ColMap(\"TeamId\").SetMaxSize(26)\n\t\ttableo.ColMap(\"TriggerWords\").SetMaxSize(1024)\n\t\ttableo.ColMap(\"CallbackURLs\").SetMaxSize(1024)\n\t}\n\n\treturn s\n}\n\nfunc (s SqlWebhookStore) UpgradeSchemaIfNeeded() {\n}\n\nfunc (s SqlWebhookStore) CreateIndexesIfNotExists() {\n\ts.CreateIndexIfNotExists(\"idx_incoming_webhook_user_id\", \"IncomingWebhooks\", \"UserId\")\n\ts.CreateIndexIfNotExists(\"idx_incoming_webhook_team_id\", \"IncomingWebhooks\", \"TeamId\")\n\ts.CreateIndexIfNotExists(\"idx_outgoing_webhook_team_id\", \"OutgoingWebhooks\", \"TeamId\")\n}\n\nfunc (s SqlWebhookStore) SaveIncoming(webhook *model.IncomingWebhook) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\tif len(webhook.Id) > 0 {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.SaveIncoming\",\n\t\t\t\t\"You cannot overwrite an existing IncomingWebhook\", \"id=\"+webhook.Id)\n\t\t\tstoreChannel <- result\n\t\t\tclose(storeChannel)\n\t\t\treturn\n\t\t}\n\n\t\twebhook.PreSave()\n\t\tif result.Err = webhook.IsValid(); result.Err != nil {\n\t\t\tstoreChannel <- result\n\t\t\tclose(storeChannel)\n\t\t\treturn\n\t\t}\n\n\t\tif err := s.GetMaster().Insert(webhook); err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.SaveIncoming\", \"We couldn't save the IncomingWebhook\", \"id=\"+webhook.Id+\", \"+err.Error())\n\t\t} else {\n\t\t\tresult.Data = webhook\n\t\t}\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n\nfunc (s SqlWebhookStore) GetIncoming(id string) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\tvar webhook model.IncomingWebhook\n\n\t\tif err := s.GetReplica().SelectOne(&webhook, \"SELECT * FROM IncomingWebhooks WHERE Id = :Id AND DeleteAt = 0\", map[string]interface{}{\"Id\": id}); err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.GetIncoming\", \"We couldn't get the webhook\", \"id=\"+id+\", err=\"+err.Error())\n\t\t}\n\n\t\tresult.Data = &webhook\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n\nfunc (s SqlWebhookStore) DeleteIncoming(webhookId string, time int64) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\t_, err := s.GetMaster().Exec(\"Update IncomingWebhooks SET DeleteAt = :DeleteAt, UpdateAt = :UpdateAt WHERE Id = :Id\", map[string]interface{}{\"DeleteAt\": time, \"UpdateAt\": time, \"Id\": webhookId})\n\t\tif err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.DeleteIncoming\", \"We couldn't delete the webhook\", \"id=\"+webhookId+\", err=\"+err.Error())\n\t\t}\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n\nfunc (s SqlWebhookStore) GetIncomingByUser(userId string) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\tvar webhooks []*model.IncomingWebhook\n\n\t\tif _, err := s.GetReplica().Select(&webhooks, \"SELECT * FROM IncomingWebhooks WHERE UserId = :UserId AND DeleteAt = 0\", map[string]interface{}{\"UserId\": userId}); err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.GetIncomingByUser\", \"We couldn't get the webhook\", \"userId=\"+userId+\", err=\"+err.Error())\n\t\t}\n\n\t\tresult.Data = webhooks\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n\nfunc (s SqlWebhookStore) SaveOutgoing(webhook *model.OutgoingWebhook) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\tif len(webhook.Id) > 0 {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.SaveOutgoing\",\n\t\t\t\t\"You cannot overwrite an existing OutgoingWebhook\", \"id=\"+webhook.Id)\n\t\t\tstoreChannel <- result\n\t\t\tclose(storeChannel)\n\t\t\treturn\n\t\t}\n\n\t\twebhook.PreSave()\n\t\tif result.Err = webhook.IsValid(); result.Err != nil {\n\t\t\tstoreChannel <- result\n\t\t\tclose(storeChannel)\n\t\t\treturn\n\t\t}\n\n\t\tif err := s.GetMaster().Insert(webhook); err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.SaveOutgoing\", \"We couldn't save the OutgoingWebhook\", \"id=\"+webhook.Id+\", \"+err.Error())\n\t\t} else {\n\t\t\tresult.Data = webhook\n\t\t}\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n\nfunc (s SqlWebhookStore) GetOutgoing(id string) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\tvar webhook model.OutgoingWebhook\n\n\t\tif err := s.GetReplica().SelectOne(&webhook, \"SELECT * FROM OutgoingWebhooks WHERE Id = :Id AND DeleteAt = 0\", map[string]interface{}{\"Id\": id}); err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.GetOutgoing\", \"We couldn't get the webhook\", \"id=\"+id+\", err=\"+err.Error())\n\t\t}\n\n\t\tresult.Data = &webhook\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n\nfunc (s SqlWebhookStore) GetOutgoingByCreator(userId string) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\tvar webhooks []*model.OutgoingWebhook\n\n\t\tif _, err := s.GetReplica().Select(&webhooks, \"SELECT * FROM OutgoingWebhooks WHERE CreatorId = :UserId AND DeleteAt = 0\", map[string]interface{}{\"UserId\": userId}); err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.GetOutgoingByCreator\", \"We couldn't get the webhooks\", \"userId=\"+userId+\", err=\"+err.Error())\n\t\t}\n\n\t\tresult.Data = webhooks\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n\nfunc (s SqlWebhookStore) GetOutgoingByChannel(channelId string) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\tvar webhooks []*model.OutgoingWebhook\n\n\t\tif _, err := s.GetReplica().Select(&webhooks, \"SELECT * FROM OutgoingWebhooks WHERE ChannelId = :ChannelId AND DeleteAt = 0\", map[string]interface{}{\"ChannelId\": channelId}); err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.GetOutgoingByChannel\", \"We couldn't get the webhooks\", \"channelId=\"+channelId+\", err=\"+err.Error())\n\t\t}\n\n\t\tresult.Data = webhooks\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n\nfunc (s SqlWebhookStore) GetOutgoingByTeam(teamId string) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\tvar webhooks []*model.OutgoingWebhook\n\n\t\tif _, err := s.GetReplica().Select(&webhooks, \"SELECT * FROM OutgoingWebhooks WHERE TeamId = :TeamId AND DeleteAt = 0\", map[string]interface{}{\"TeamId\": teamId}); err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.GetOutgoingByTeam\", \"We couldn't get the webhooks\", \"teamId=\"+teamId+\", err=\"+err.Error())\n\t\t}\n\n\t\tresult.Data = webhooks\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n\nfunc (s SqlWebhookStore) DeleteOutgoing(webhookId string, time int64) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\t_, err := s.GetMaster().Exec(\"Update OutgoingWebhooks SET DeleteAt = :DeleteAt, UpdateAt = :UpdateAt WHERE Id = :Id\", map[string]interface{}{\"DeleteAt\": time, \"UpdateAt\": time, \"Id\": webhookId})\n\t\tif err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.DeleteOutgoing\", \"We couldn't delete the webhook\", \"id=\"+webhookId+\", err=\"+err.Error())\n\t\t}\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n\nfunc (s SqlWebhookStore) UpdateOutgoing(hook *model.OutgoingWebhook) StoreChannel {\n\tstoreChannel := make(StoreChannel)\n\n\tgo func() {\n\t\tresult := StoreResult{}\n\n\t\thook.UpdateAt = model.GetMillis()\n\n\t\tif _, err := s.GetMaster().Update(hook); err != nil {\n\t\t\tresult.Err = model.NewAppError(\"SqlWebhookStore.UpdateOutgoing\", \"We couldn't update the webhook\", \"id=\"+hook.Id+\", \"+err.Error())\n\t\t} else {\n\t\t\tresult.Data = hook\n\t\t}\n\n\t\tstoreChannel <- result\n\t\tclose(storeChannel)\n\t}()\n\n\treturn storeChannel\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage validation\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"k8s.io\/kops\/pkg\/apis\/kops\"\n\t\"k8s.io\/kops\/upup\/pkg\/fi\"\n\t\"k8s.io\/kops\/util\/pkg\/slice\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\/arn\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/validation\/field\"\n)\n\n\/\/ ValidateInstanceGroup is responsible for validating the configuration of a instancegroup\nfunc ValidateInstanceGroup(g *kops.InstanceGroup) error {\n\tif g.ObjectMeta.Name == \"\" {\n\t\treturn field.Required(field.NewPath(\"Name\"), \"\")\n\t}\n\n\tswitch g.Spec.Role {\n\tcase \"\":\n\t\treturn field.Required(field.NewPath(\"Role\"), \"Role must be set\")\n\tcase kops.InstanceGroupRoleMaster:\n\tcase kops.InstanceGroupRoleNode:\n\tcase kops.InstanceGroupRoleBastion:\n\tdefault:\n\t\treturn field.Invalid(field.NewPath(\"Role\"), g.Spec.Role, \"Unknown role\")\n\t}\n\n\tif g.Spec.Tenancy != \"\" {\n\t\tif g.Spec.Tenancy != \"default\" && g.Spec.Tenancy != \"dedicated\" && g.Spec.Tenancy != \"host\" {\n\t\t\treturn field.Invalid(field.NewPath(\"Tenancy\"), g.Spec.Tenancy, \"Unknown tenancy. Must be Default, Dedicated or Host.\")\n\t\t}\n\t}\n\n\tif g.Spec.MaxSize != nil && g.Spec.MinSize != nil {\n\t\tif *g.Spec.MaxSize < *g.Spec.MinSize {\n\t\t\treturn field.Invalid(field.NewPath(\"MaxSize\"), *g.Spec.MaxSize, \"maxSize must be greater than or equal to minSize.\")\n\t\t}\n\t}\n\n\tif fi.Int32Value(g.Spec.RootVolumeIops) < 0 {\n\t\treturn field.Invalid(field.NewPath(\"RootVolumeIops\"), g.Spec.RootVolumeIops, \"RootVolumeIops must be greater than 0\")\n\t}\n\n\t\/\/ @check all the hooks are valid in this instancegroup\n\tfor i := range g.Spec.Hooks {\n\t\tif errs := validateHookSpec(&g.Spec.Hooks[i], field.NewPath(\"hooks\").Index(i)); len(errs) > 0 {\n\t\t\treturn errs.ToAggregate()\n\t\t}\n\t}\n\n\t\/\/ @check the fileAssets for this instancegroup are valid\n\tfor i := range g.Spec.FileAssets {\n\t\tif errs := validateFileAssetSpec(&g.Spec.FileAssets[i], field.NewPath(\"fileAssets\").Index(i)); len(errs) > 0 {\n\t\t\treturn errs.ToAggregate()\n\t\t}\n\t}\n\n\tif g.IsMaster() {\n\t\tif len(g.Spec.Subnets) == 0 {\n\t\t\treturn fmt.Errorf(\"master InstanceGroup %s did not specify any Subnets\", g.ObjectMeta.Name)\n\t\t}\n\t}\n\n\tif g.Spec.MixedInstancesPolicy != nil {\n\t\tif errs := validatedMixedInstancesPolicy(field.NewPath(g.Name), g.Spec.MixedInstancesPolicy, g); len(errs) > 0 {\n\t\t\treturn errs.ToAggregate()\n\t\t}\n\t}\n\n\tif len(g.Spec.AdditionalUserData) > 0 {\n\t\tfor _, UserDataInfo := range g.Spec.AdditionalUserData {\n\t\t\terr := validateExtraUserData(&UserDataInfo)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ @step: iterate and check the volume specs\n\tfor i, x := range g.Spec.Volumes {\n\t\tdevices := make(map[string]bool)\n\t\tpath := field.NewPath(\"volumes\").Index(i)\n\n\t\tif err := validateVolumeSpec(path, x); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ @check the device name has not been used already\n\t\tif _, found := devices[x.Device]; found {\n\t\t\treturn field.Invalid(path.Child(\"device\"), x.Device, \"duplicate device name found in volumes\")\n\t\t}\n\n\t\tdevices[x.Device] = true\n\t}\n\n\t\/\/ @step: iterate and check the volume mount specs\n\tfor i, x := range g.Spec.VolumeMounts {\n\t\tused := make(map[string]bool)\n\t\tpath := field.NewPath(\"volumeMounts\").Index(i)\n\n\t\tif err := validateVolumeMountSpec(path, x); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, found := used[x.Device]; found {\n\t\t\treturn field.Invalid(path.Child(\"device\"), x.Device, \"duplicate device reference\")\n\t\t}\n\t\tif _, found := used[x.Path]; found {\n\t\t\treturn field.Invalid(path.Child(\"path\"), x.Path, \"duplicate mount path specified\")\n\t\t}\n\t}\n\n\tif err := validateInstanceProfile(g.Spec.IAM, field.NewPath(\"iam\")); err != nil {\n\t\treturn err\n\t}\n\n\tif g.Spec.RollingUpdate != nil {\n\t\tif errs := validateRollingUpdate(g.Spec.RollingUpdate, field.NewPath(\"rollingUpdate\")); len(errs) > 0 {\n\t\t\treturn errs.ToAggregate()\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ validatedMixedInstancesPolicy is responsible for validating the user input of a mixed instance policy\nfunc validatedMixedInstancesPolicy(path *field.Path, spec *kops.MixedInstancesPolicySpec, ig *kops.InstanceGroup) field.ErrorList {\n\tvar errs field.ErrorList\n\n\tif len(spec.Instances) < 2 {\n\t\terrs = append(errs, field.Invalid(path.Child(\"instances\"), spec.Instances, \"must be 2 or more instance types\"))\n\t}\n\t\/\/ @step: check the instances are validate\n\tfor i, x := range spec.Instances {\n\t\terrs = append(errs, awsValidateMachineType(path.Child(\"instances\").Index(i).Child(\"instanceType\"), x)...)\n\t}\n\n\tif spec.OnDemandBase != nil {\n\t\tif fi.Int64Value(spec.OnDemandBase) < 0 {\n\t\t\terrs = append(errs, field.Invalid(path.Child(\"onDemandBase\"), spec.OnDemandBase, \"cannot be less than zero\"))\n\t\t}\n\t\tif fi.Int64Value(spec.OnDemandBase) > int64(fi.Int32Value(ig.Spec.MaxSize)) {\n\t\t\terrs = append(errs, field.Invalid(path.Child(\"onDemandBase\"), spec.OnDemandBase, \"cannot be greater than max size\"))\n\t\t}\n\t}\n\n\tif spec.OnDemandAboveBase != nil {\n\t\tif fi.Int64Value(spec.OnDemandAboveBase) < 0 {\n\t\t\terrs = append(errs, field.Invalid(path.Child(\"onDemandAboveBase\"), spec.OnDemandAboveBase, \"cannot be less than 0\"))\n\t\t}\n\t\tif fi.Int64Value(spec.OnDemandAboveBase) > 100 {\n\t\t\terrs = append(errs, field.Invalid(path.Child(\"onDemandAboveBase\"), spec.OnDemandAboveBase, \"cannot be greater than 100\"))\n\t\t}\n\t}\n\n\tif spec.SpotAllocationStrategy != nil && !slice.Contains(kops.SpotAllocationStrategies, fi.StringValue(spec.SpotAllocationStrategy)) {\n\t\terrs = append(errs, field.Invalid(path.Child(\"spotAllocationStrategy\"), spec.SpotAllocationStrategy, \"unsupported spot allocation strategy\"))\n\t}\n\n\treturn errs\n}\n\n\/\/ validateVolumeSpec is responsible for checking a volume spec is ok\nfunc validateVolumeSpec(path *field.Path, v *kops.VolumeSpec) error {\n\tif v.Device == \"\" {\n\t\treturn field.Required(path.Child(\"device\"), \"device name required\")\n\t}\n\tif v.Size <= 0 {\n\t\treturn field.Invalid(path.Child(\"size\"), v.Size, \"must be greater than zero\")\n\t}\n\n\treturn nil\n}\n\n\/\/ validateVolumeMountSpec is responsible for checking the volume mount is ok\nfunc validateVolumeMountSpec(path *field.Path, spec *kops.VolumeMountSpec) error {\n\tif spec.Device == \"\" {\n\t\treturn field.Required(path.Child(\"device\"), \"device name required\")\n\t}\n\tif spec.Filesystem == \"\" {\n\t\treturn field.Required(path.Child(\"filesystem\"), \"filesystem type required\")\n\t}\n\tif spec.Path == \"\" {\n\t\treturn field.Required(path.Child(\"path\"), \"mount path required\")\n\t}\n\tif !slice.Contains(kops.SupportedFilesystems, spec.Filesystem) {\n\t\treturn field.Invalid(path.Child(\"filesystem\"), spec.Filesystem,\n\t\t\tfmt.Sprintf(\"unsupported filesystem, available types: %s\", strings.Join(kops.SupportedFilesystems, \",\")))\n\t}\n\n\treturn nil\n}\n\n\/\/ CrossValidateInstanceGroup performs validation of the instance group, including that it is consistent with the Cluster\n\/\/ It calls ValidateInstanceGroup, so all that validation is included.\nfunc CrossValidateInstanceGroup(g *kops.InstanceGroup, cluster *kops.Cluster, strict bool) error {\n\terr := ValidateInstanceGroup(g)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Check that instance groups are defined in subnets that are defined in the cluster\n\t{\n\t\tclusterSubnets := make(map[string]*kops.ClusterSubnetSpec)\n\t\tfor i := range cluster.Spec.Subnets {\n\t\t\ts := &cluster.Spec.Subnets[i]\n\t\t\tif clusterSubnets[s.Name] != nil {\n\t\t\t\treturn fmt.Errorf(\"subnets contained a duplicate value: %v\", s.Name)\n\t\t\t}\n\t\t\tclusterSubnets[s.Name] = s\n\t\t}\n\n\t\tfor _, z := range g.Spec.Subnets {\n\t\t\tif clusterSubnets[z] == nil {\n\t\t\t\treturn fmt.Errorf(\"InstanceGroup %q is configured in %q, but this is not configured as a Subnet in the cluster\", g.ObjectMeta.Name, z)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc validateExtraUserData(userData *kops.UserData) error {\n\tfieldPath := field.NewPath(\"AdditionalUserData\")\n\n\tif userData.Name == \"\" {\n\t\treturn field.Required(fieldPath.Child(\"Name\"), \"field must be set\")\n\t}\n\n\tif userData.Content == \"\" {\n\t\treturn field.Required(fieldPath.Child(\"Content\"), \"field must be set\")\n\t}\n\n\tswitch userData.Type {\n\tcase \"text\/x-include-once-url\":\n\tcase \"text\/x-include-url\":\n\tcase \"text\/cloud-config-archive\":\n\tcase \"text\/upstart-job\":\n\tcase \"text\/cloud-config\":\n\tcase \"text\/part-handler\":\n\tcase \"text\/x-shellscript\":\n\tcase \"text\/cloud-boothook\":\n\n\tdefault:\n\t\treturn field.Invalid(fieldPath.Child(\"Type\"), userData.Type, \"Invalid user-data content type\")\n\t}\n\n\treturn nil\n}\n\n\/\/ validateInstanceProfile checks the String values for the AuthProfile\nfunc validateInstanceProfile(v *kops.IAMProfileSpec, fldPath *field.Path) *field.Error {\n\tif v != nil && v.Profile != nil {\n\t\tinstanceProfileARN := *v.Profile\n\t\tparsedARN, err := arn.Parse(instanceProfileARN)\n\t\tif err != nil || !strings.HasPrefix(parsedARN.Resource, \"instance-profile\") {\n\t\t\treturn field.Invalid(fldPath.Child(\"Profile\"), instanceProfileARN,\n\t\t\t\t\"Instance Group IAM Instance Profile must be a valid aws arn such as arn:aws:iam::123456789012:instance-profile\/KopsExampleRole\")\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Remove unnecessary validation<commit_after>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage validation\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"k8s.io\/kops\/pkg\/apis\/kops\"\n\t\"k8s.io\/kops\/upup\/pkg\/fi\"\n\t\"k8s.io\/kops\/util\/pkg\/slice\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\/arn\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/validation\/field\"\n)\n\n\/\/ ValidateInstanceGroup is responsible for validating the configuration of a instancegroup\nfunc ValidateInstanceGroup(g *kops.InstanceGroup) error {\n\tif g.ObjectMeta.Name == \"\" {\n\t\treturn field.Required(field.NewPath(\"Name\"), \"\")\n\t}\n\n\tswitch g.Spec.Role {\n\tcase \"\":\n\t\treturn field.Required(field.NewPath(\"Role\"), \"Role must be set\")\n\tcase kops.InstanceGroupRoleMaster:\n\tcase kops.InstanceGroupRoleNode:\n\tcase kops.InstanceGroupRoleBastion:\n\tdefault:\n\t\treturn field.Invalid(field.NewPath(\"Role\"), g.Spec.Role, \"Unknown role\")\n\t}\n\n\tif g.Spec.Tenancy != \"\" {\n\t\tif g.Spec.Tenancy != \"default\" && g.Spec.Tenancy != \"dedicated\" && g.Spec.Tenancy != \"host\" {\n\t\t\treturn field.Invalid(field.NewPath(\"Tenancy\"), g.Spec.Tenancy, \"Unknown tenancy. Must be Default, Dedicated or Host.\")\n\t\t}\n\t}\n\n\tif g.Spec.MaxSize != nil && g.Spec.MinSize != nil {\n\t\tif *g.Spec.MaxSize < *g.Spec.MinSize {\n\t\t\treturn field.Invalid(field.NewPath(\"MaxSize\"), *g.Spec.MaxSize, \"maxSize must be greater than or equal to minSize.\")\n\t\t}\n\t}\n\n\tif fi.Int32Value(g.Spec.RootVolumeIops) < 0 {\n\t\treturn field.Invalid(field.NewPath(\"RootVolumeIops\"), g.Spec.RootVolumeIops, \"RootVolumeIops must be greater than 0\")\n\t}\n\n\t\/\/ @check all the hooks are valid in this instancegroup\n\tfor i := range g.Spec.Hooks {\n\t\tif errs := validateHookSpec(&g.Spec.Hooks[i], field.NewPath(\"hooks\").Index(i)); len(errs) > 0 {\n\t\t\treturn errs.ToAggregate()\n\t\t}\n\t}\n\n\t\/\/ @check the fileAssets for this instancegroup are valid\n\tfor i := range g.Spec.FileAssets {\n\t\tif errs := validateFileAssetSpec(&g.Spec.FileAssets[i], field.NewPath(\"fileAssets\").Index(i)); len(errs) > 0 {\n\t\t\treturn errs.ToAggregate()\n\t\t}\n\t}\n\n\tif g.IsMaster() {\n\t\tif len(g.Spec.Subnets) == 0 {\n\t\t\treturn fmt.Errorf(\"master InstanceGroup %s did not specify any Subnets\", g.ObjectMeta.Name)\n\t\t}\n\t}\n\n\tif g.Spec.MixedInstancesPolicy != nil {\n\t\tif errs := validatedMixedInstancesPolicy(field.NewPath(g.Name), g.Spec.MixedInstancesPolicy, g); len(errs) > 0 {\n\t\t\treturn errs.ToAggregate()\n\t\t}\n\t}\n\n\tif len(g.Spec.AdditionalUserData) > 0 {\n\t\tfor _, UserDataInfo := range g.Spec.AdditionalUserData {\n\t\t\terr := validateExtraUserData(&UserDataInfo)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ @step: iterate and check the volume specs\n\tfor i, x := range g.Spec.Volumes {\n\t\tdevices := make(map[string]bool)\n\t\tpath := field.NewPath(\"volumes\").Index(i)\n\n\t\tif err := validateVolumeSpec(path, x); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ @check the device name has not been used already\n\t\tif _, found := devices[x.Device]; found {\n\t\t\treturn field.Invalid(path.Child(\"device\"), x.Device, \"duplicate device name found in volumes\")\n\t\t}\n\n\t\tdevices[x.Device] = true\n\t}\n\n\t\/\/ @step: iterate and check the volume mount specs\n\tfor i, x := range g.Spec.VolumeMounts {\n\t\tused := make(map[string]bool)\n\t\tpath := field.NewPath(\"volumeMounts\").Index(i)\n\n\t\tif err := validateVolumeMountSpec(path, x); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, found := used[x.Device]; found {\n\t\t\treturn field.Invalid(path.Child(\"device\"), x.Device, \"duplicate device reference\")\n\t\t}\n\t\tif _, found := used[x.Path]; found {\n\t\t\treturn field.Invalid(path.Child(\"path\"), x.Path, \"duplicate mount path specified\")\n\t\t}\n\t}\n\n\tif err := validateInstanceProfile(g.Spec.IAM, field.NewPath(\"iam\")); err != nil {\n\t\treturn err\n\t}\n\n\tif g.Spec.RollingUpdate != nil {\n\t\tif errs := validateRollingUpdate(g.Spec.RollingUpdate, field.NewPath(\"rollingUpdate\")); len(errs) > 0 {\n\t\t\treturn errs.ToAggregate()\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ validatedMixedInstancesPolicy is responsible for validating the user input of a mixed instance policy\nfunc validatedMixedInstancesPolicy(path *field.Path, spec *kops.MixedInstancesPolicySpec, ig *kops.InstanceGroup) field.ErrorList {\n\tvar errs field.ErrorList\n\n\tif len(spec.Instances) < 2 {\n\t\terrs = append(errs, field.Invalid(path.Child(\"instances\"), spec.Instances, \"must be 2 or more instance types\"))\n\t}\n\t\/\/ @step: check the instances are validate\n\tfor i, x := range spec.Instances {\n\t\terrs = append(errs, awsValidateMachineType(path.Child(\"instances\").Index(i).Child(\"instanceType\"), x)...)\n\t}\n\n\tif spec.OnDemandBase != nil {\n\t\tif fi.Int64Value(spec.OnDemandBase) < 0 {\n\t\t\terrs = append(errs, field.Invalid(path.Child(\"onDemandBase\"), spec.OnDemandBase, \"cannot be less than zero\"))\n\t\t}\n\t\tif fi.Int64Value(spec.OnDemandBase) > int64(fi.Int32Value(ig.Spec.MaxSize)) {\n\t\t\terrs = append(errs, field.Invalid(path.Child(\"onDemandBase\"), spec.OnDemandBase, \"cannot be greater than max size\"))\n\t\t}\n\t}\n\n\tif spec.OnDemandAboveBase != nil {\n\t\tif fi.Int64Value(spec.OnDemandAboveBase) < 0 {\n\t\t\terrs = append(errs, field.Invalid(path.Child(\"onDemandAboveBase\"), spec.OnDemandAboveBase, \"cannot be less than 0\"))\n\t\t}\n\t\tif fi.Int64Value(spec.OnDemandAboveBase) > 100 {\n\t\t\terrs = append(errs, field.Invalid(path.Child(\"onDemandAboveBase\"), spec.OnDemandAboveBase, \"cannot be greater than 100\"))\n\t\t}\n\t}\n\n\tif spec.SpotAllocationStrategy != nil && !slice.Contains(kops.SpotAllocationStrategies, fi.StringValue(spec.SpotAllocationStrategy)) {\n\t\terrs = append(errs, field.Invalid(path.Child(\"spotAllocationStrategy\"), spec.SpotAllocationStrategy, \"unsupported spot allocation strategy\"))\n\t}\n\n\treturn errs\n}\n\n\/\/ validateVolumeSpec is responsible for checking a volume spec is ok\nfunc validateVolumeSpec(path *field.Path, v *kops.VolumeSpec) error {\n\tif v.Device == \"\" {\n\t\treturn field.Required(path.Child(\"device\"), \"device name required\")\n\t}\n\tif v.Size <= 0 {\n\t\treturn field.Invalid(path.Child(\"size\"), v.Size, \"must be greater than zero\")\n\t}\n\n\treturn nil\n}\n\n\/\/ validateVolumeMountSpec is responsible for checking the volume mount is ok\nfunc validateVolumeMountSpec(path *field.Path, spec *kops.VolumeMountSpec) error {\n\tif spec.Device == \"\" {\n\t\treturn field.Required(path.Child(\"device\"), \"device name required\")\n\t}\n\tif spec.Filesystem == \"\" {\n\t\treturn field.Required(path.Child(\"filesystem\"), \"filesystem type required\")\n\t}\n\tif spec.Path == \"\" {\n\t\treturn field.Required(path.Child(\"path\"), \"mount path required\")\n\t}\n\tif !slice.Contains(kops.SupportedFilesystems, spec.Filesystem) {\n\t\treturn field.Invalid(path.Child(\"filesystem\"), spec.Filesystem,\n\t\t\tfmt.Sprintf(\"unsupported filesystem, available types: %s\", strings.Join(kops.SupportedFilesystems, \",\")))\n\t}\n\n\treturn nil\n}\n\n\/\/ CrossValidateInstanceGroup performs validation of the instance group, including that it is consistent with the Cluster\n\/\/ It calls ValidateInstanceGroup, so all that validation is included.\nfunc CrossValidateInstanceGroup(g *kops.InstanceGroup, cluster *kops.Cluster, strict bool) error {\n\terr := ValidateInstanceGroup(g)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Check that instance groups are defined in subnets that are defined in the cluster\n\t{\n\t\tclusterSubnets := make(map[string]*kops.ClusterSubnetSpec)\n\t\tfor i := range cluster.Spec.Subnets {\n\t\t\ts := &cluster.Spec.Subnets[i]\n\t\t\tclusterSubnets[s.Name] = s\n\t\t}\n\n\t\tfor _, z := range g.Spec.Subnets {\n\t\t\tif clusterSubnets[z] == nil {\n\t\t\t\treturn fmt.Errorf(\"InstanceGroup %q is configured in %q, but this is not configured as a Subnet in the cluster\", g.ObjectMeta.Name, z)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc validateExtraUserData(userData *kops.UserData) error {\n\tfieldPath := field.NewPath(\"AdditionalUserData\")\n\n\tif userData.Name == \"\" {\n\t\treturn field.Required(fieldPath.Child(\"Name\"), \"field must be set\")\n\t}\n\n\tif userData.Content == \"\" {\n\t\treturn field.Required(fieldPath.Child(\"Content\"), \"field must be set\")\n\t}\n\n\tswitch userData.Type {\n\tcase \"text\/x-include-once-url\":\n\tcase \"text\/x-include-url\":\n\tcase \"text\/cloud-config-archive\":\n\tcase \"text\/upstart-job\":\n\tcase \"text\/cloud-config\":\n\tcase \"text\/part-handler\":\n\tcase \"text\/x-shellscript\":\n\tcase \"text\/cloud-boothook\":\n\n\tdefault:\n\t\treturn field.Invalid(fieldPath.Child(\"Type\"), userData.Type, \"Invalid user-data content type\")\n\t}\n\n\treturn nil\n}\n\n\/\/ validateInstanceProfile checks the String values for the AuthProfile\nfunc validateInstanceProfile(v *kops.IAMProfileSpec, fldPath *field.Path) *field.Error {\n\tif v != nil && v.Profile != nil {\n\t\tinstanceProfileARN := *v.Profile\n\t\tparsedARN, err := arn.Parse(instanceProfileARN)\n\t\tif err != nil || !strings.HasPrefix(parsedARN.Resource, \"instance-profile\") {\n\t\t\treturn field.Invalid(fldPath.Child(\"Profile\"), instanceProfileARN,\n\t\t\t\t\"Instance Group IAM Instance Profile must be a valid aws arn such as arn:aws:iam::123456789012:instance-profile\/KopsExampleRole\")\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\nimport (\n\t\"os\"\n\t\"strconv\"\n)\n\nvar ENV = map[string]string{\n\t\"DOCKER_URL\": \"http:\/\/127.0.0.1:4243\",\n\t\"PORT\": \"4244\",\n\t\"REFRESH_TIME\": \"2\",\n\t\"CGROUP_SOURCE\": \"docker\",\n\t\"CGROUP_DIR\": \"\/sys\/fs\/cgroup\",\n\t\"PROC_DIR\": \"\/proc\",\n\t\"RUNNER_DIR\": \"\/usr\/bin\",\n\t\"DEBUG\": \"false\",\n\t\"NET_MONITORING\": \"true\",\n}\n\nvar (\n\tRefreshTime int\n\tDebug bool\n)\n\nfunc init() {\n\tfor k, v := range ENV {\n\t\tif os.Getenv(k) != \"\" {\n\t\t\tENV[k] = os.Getenv(k)\n\t\t} else {\n\t\t\tos.Setenv(k, v)\n\t\t}\n\t}\n\n\tif ENV[\"DEBUG\"] == \"1\" || ENV[\"DEBUG\"] == \"true\" {\n\t\tDebug = true\n\t}\n\n\tvar err error\n\tRefreshTime, err = strconv.Atoi(ENV[\"REFRESH_TIME\"])\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc CgroupPath(cgroup string, id string) string {\n\tif ENV[\"CGROUP_SOURCE\"] == \"docker\" {\n\t\treturn ENV[\"CGROUP_DIR\"] + \"\/\" + cgroup + \"\/docker\/\" + id\n\t} else if ENV[\"CGROUP_SOURCE\"] == \"systemd\" {\n\t\treturn ENV[\"CGROUP_DIR\"] + \"\/\" + cgroup + \"\/system.slice\/docker-\" + id + \".scope\"\n\t} else {\n\t\tpanic(\"unknown cgroup source\" + ENV[\"CGROUP_SOURCE\"])\n\t}\n}\n<commit_msg>default refresh time to 20 secs<commit_after>package config\n\nimport (\n\t\"os\"\n\t\"strconv\"\n)\n\nvar ENV = map[string]string{\n\t\"DOCKER_URL\": \"http:\/\/127.0.0.1:4243\",\n\t\"PORT\": \"4244\",\n\t\"REFRESH_TIME\": \"20\",\n\t\"CGROUP_SOURCE\": \"docker\",\n\t\"CGROUP_DIR\": \"\/sys\/fs\/cgroup\",\n\t\"PROC_DIR\": \"\/proc\",\n\t\"RUNNER_DIR\": \"\/usr\/bin\",\n\t\"DEBUG\": \"false\",\n\t\"NET_MONITORING\": \"true\",\n}\n\nvar (\n\tRefreshTime int\n\tDebug bool\n)\n\nfunc init() {\n\tfor k, v := range ENV {\n\t\tif os.Getenv(k) != \"\" {\n\t\t\tENV[k] = os.Getenv(k)\n\t\t} else {\n\t\t\tos.Setenv(k, v)\n\t\t}\n\t}\n\n\tif ENV[\"DEBUG\"] == \"1\" || ENV[\"DEBUG\"] == \"true\" {\n\t\tDebug = true\n\t}\n\n\tvar err error\n\tRefreshTime, err = strconv.Atoi(ENV[\"REFRESH_TIME\"])\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc CgroupPath(cgroup string, id string) string {\n\tif ENV[\"CGROUP_SOURCE\"] == \"docker\" {\n\t\treturn ENV[\"CGROUP_DIR\"] + \"\/\" + cgroup + \"\/docker\/\" + id\n\t} else if ENV[\"CGROUP_SOURCE\"] == \"systemd\" {\n\t\treturn ENV[\"CGROUP_DIR\"] + \"\/\" + cgroup + \"\/system.slice\/docker-\" + id + \".scope\"\n\t} else {\n\t\tpanic(\"unknown cgroup source\" + ENV[\"CGROUP_SOURCE\"])\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Add missing formatting directives for fmt.Errorf<commit_after><|endoftext|>"} {"text":"<commit_before>package entities\n\nimport (\n\t\"time\"\n)\n\n\/\/ SentTransactionStatus type represents sent transaction status\ntype SentTransactionStatus string\n\nconst (\n\t\/\/ SentTransactionStatusSending is a status indicating that transaction is sending\n\tSentTransactionStatusSending SentTransactionStatus = \"sending\"\n\t\/\/ SentTransactionStatusSuccess is a status indicating that transaction has been successfully sent\n\tSentTransactionStatusSuccess SentTransactionStatus = \"success\"\n\t\/\/ SentTransactionStatusFailure is a status indicating that there has been an error while sending a transaction\n\tSentTransactionStatusFailure SentTransactionStatus = \"failure\"\n)\n\n\/\/ SentTransaction represents transaction sent by the gateway server\ntype SentTransaction struct {\n\texists bool\n\tID *int64 `db:\"id\"`\n\tTransactionID string `db:\"transaction_id\"`\n\tStatus SentTransactionStatus `db:\"status\"` \/\/ sending\/success\/failure\n\tSource string `db:\"source\"`\n\tSubmittedAt time.Time `db:\"submitted_at\"`\n\tSucceededAt *time.Time `db:\"succeeded_at\"`\n\tLedger *uint64 `db:\"ledger\"`\n\tEnvelopeXdr string `db:\"envelope_xdr\"`\n\tResultXdr *string `db:\"result_xdr\"`\n}\n\n\/\/ GetID returns ID of the entity\nfunc (e *SentTransaction) GetID() *int64 {\n\tif e.ID == nil {\n\t\treturn nil\n\t}\n\tnewID := *e.ID\n\treturn &newID\n}\n\n\/\/ SetID sets ID of the entity\nfunc (e *SentTransaction) SetID(id int64) {\n\te.ID = &id\n}\n\n\/\/ IsNew returns true if the entity has not been persisted yet\nfunc (e *SentTransaction) IsNew() bool {\n\treturn !e.exists\n}\n\n\/\/ SetExists sets entity as persisted\nfunc (e *SentTransaction) SetExists() {\n\te.exists = true\n}\n\n\/\/ MarkSucceeded marks transaction as succeeded\nfunc (e *SentTransaction) MarkSucceeded(ledger uint64) {\n\te.Status = SentTransactionStatusSuccess\n\te.Ledger = &ledger\n\tnow := time.Now()\n\te.SucceededAt = &now\n}\n\n\/\/ MarkFailed marks transaction as failed\nfunc (e *SentTransaction) MarkFailed(resultXdr string) {\n\te.Status = SentTransactionStatusFailure\n\te.ResultXdr = &resultXdr\n}\n<commit_msg>Add valuer implementation to SentTransactionStatus<commit_after>package entities\n\nimport (\n\t\"database\/sql\/driver\"\n\t\"time\"\n)\n\n\/\/ SentTransactionStatus type represents sent transaction status\ntype SentTransactionStatus string\n\n\/\/ Value implements driver.Valuer\nfunc (status SentTransactionStatus) Value() (driver.Value, error) {\n\treturn driver.Value(string(status)), nil\n}\n\nvar _ driver.Valuer = SentTransactionStatus(\"\")\n\nconst (\n\t\/\/ SentTransactionStatusSending is a status indicating that transaction is sending\n\tSentTransactionStatusSending SentTransactionStatus = \"sending\"\n\t\/\/ SentTransactionStatusSuccess is a status indicating that transaction has been successfully sent\n\tSentTransactionStatusSuccess SentTransactionStatus = \"success\"\n\t\/\/ SentTransactionStatusFailure is a status indicating that there has been an error while sending a transaction\n\tSentTransactionStatusFailure SentTransactionStatus = \"failure\"\n)\n\n\/\/ SentTransaction represents transaction sent by the gateway server\ntype SentTransaction struct {\n\texists bool\n\tID *int64 `db:\"id\"`\n\tTransactionID string `db:\"transaction_id\"`\n\tStatus SentTransactionStatus `db:\"status\"` \/\/ sending\/success\/failure\n\tSource string `db:\"source\"`\n\tSubmittedAt time.Time `db:\"submitted_at\"`\n\tSucceededAt *time.Time `db:\"succeeded_at\"`\n\tLedger *uint64 `db:\"ledger\"`\n\tEnvelopeXdr string `db:\"envelope_xdr\"`\n\tResultXdr *string `db:\"result_xdr\"`\n}\n\n\/\/ GetID returns ID of the entity\nfunc (e *SentTransaction) GetID() *int64 {\n\tif e.ID == nil {\n\t\treturn nil\n\t}\n\tnewID := *e.ID\n\treturn &newID\n}\n\n\/\/ SetID sets ID of the entity\nfunc (e *SentTransaction) SetID(id int64) {\n\te.ID = &id\n}\n\n\/\/ IsNew returns true if the entity has not been persisted yet\nfunc (e *SentTransaction) IsNew() bool {\n\treturn !e.exists\n}\n\n\/\/ SetExists sets entity as persisted\nfunc (e *SentTransaction) SetExists() {\n\te.exists = true\n}\n\n\/\/ MarkSucceeded marks transaction as succeeded\nfunc (e *SentTransaction) MarkSucceeded(ledger uint64) {\n\te.Status = SentTransactionStatusSuccess\n\te.Ledger = &ledger\n\tnow := time.Now()\n\te.SucceededAt = &now\n}\n\n\/\/ MarkFailed marks transaction as failed\nfunc (e *SentTransaction) MarkFailed(resultXdr string) {\n\te.Status = SentTransactionStatusFailure\n\te.ResultXdr = &resultXdr\n}\n<|endoftext|>"} {"text":"<commit_before>package google\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"testing\"\n\n\t\"code.google.com\/p\/google-api-go-client\/replicapool\/v1beta2\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n)\n\nfunc TestAccInstanceGroupManager_basic(t *testing.T) {\n\tvar manager replicapool.InstanceGroupManager\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckInstanceGroupManagerDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccInstanceGroupManager_basic,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckInstanceGroupManagerExists(\n\t\t\t\t\t\t\"google_replicapool_instance_group_manager.foobar\", &manager),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccInstanceGroupManager_update(t *testing.T) {\n\tvar manager replicapool.InstanceGroupManager\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckInstanceGroupManagerDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccInstanceGroupManager_basic,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckInstanceGroupManagerExists(\n\t\t\t\t\t\t\"google_replicapool_instance_group_manager.foobar\", &manager),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccInstanceGroupManager_update,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckInstanceGroupManagerExists(\n\t\t\t\t\t\t\"google_replicapool_instance_group_manager.foobar\", &manager),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccInstanceGroupManager_update2,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckInstanceGroupManagerExists(\n\t\t\t\t\t\t\"google_replicapool_instance_group_manager.foobar\", &manager),\n\t\t\t\t\ttestAccCheckInstanceGroupManagerUpdated(\n\t\t\t\t\t\t\"google_replicapool_instance_group_manager.foobar\", 3,\n\t\t\t\t\t\t\"google_compute_target_pool.foobaz\", \"terraform-test-foobaz\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error {\n\tconfig := testAccProvider.Meta().(*Config)\n\n\tfor _, rs := range s.RootModule().Resources {\n\t\tif rs.Type != \"google_replicapool_instance_group_manager\" {\n\t\t\tcontinue\n\t\t}\n\t\t_, err := config.clientReplicaPool.InstanceGroupManagers.Get(\n\t\t\tconfig.Project, rs.Primary.Attributes[\"zone\"], rs.Primary.ID).Do()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"InstanceGroupManager still exists\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc testAccCheckInstanceGroupManagerExists(n string, manager *replicapool.InstanceGroupManager) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No ID is set\")\n\t\t}\n\n\t\tconfig := testAccProvider.Meta().(*Config)\n\n\t\tfound, err := config.clientReplicaPool.InstanceGroupManagers.Get(\n\t\t\tconfig.Project, rs.Primary.Attributes[\"zone\"], rs.Primary.ID).Do()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif found.Name != rs.Primary.ID {\n\t\t\treturn fmt.Errorf(\"InstanceGroupManager not found\")\n\t\t}\n\n\t\t*manager = *found\n\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool string, template string) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No ID is set\")\n\t\t}\n\t\tlog.Printf(\"[DEBUG] XXXXXXXXXXXXXXXXXXXXXXXX Manager Test: %#v\", rs)\n\n\t\tconfig := testAccProvider.Meta().(*Config)\n\n\t\tmanager, err := config.clientReplicaPool.InstanceGroupManagers.Get(\n\t\t\tconfig.Project, rs.Primary.Attributes[\"zone\"], rs.Primary.ID).Do()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ check that total instance count is \"size\"\n\t\tlog.Printf(\"[DEBUG] XXXXXXXXXXXXXXXXXXXXXXXX Manager Test: %#v\", manager.TargetSize)\n\t\tif manager.CurrentSize != size {\n\t\t\treturn fmt.Errorf(\"instance count incorrect\")\n\t\t}\n\n\t\t\/\/ check that at least one instance exists in \"targetpool\"\n\t\ttp, ok := s.RootModule().Resources[targetPool]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", targetPool)\n\t\t}\n\n\t\tif tp.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No ID is set\")\n\t\t}\n\t\tlog.Printf(\"[DEBUG] XXXXXXXXXXXXXXXXXXXXXXXX Manager Test: %#v\", tp)\n\n\t\ttargetpool, err := config.clientCompute.TargetPools.Get(\n\t\t\tconfig.Project, config.Region, tp.Primary.ID).Do()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ check that total instance count is \"size\"\n\t\tlog.Printf(\"[DEBUG] XXXXXXXXXXXXXXXXXXXXXXXX Manager Test: %#v\", len(targetpool.Instances))\n\t\tif len(targetpool.Instances) == 0 {\n\t\t\treturn fmt.Errorf(\"no instance in new targetpool\")\n\t\t}\n\n\t\t\/\/ check that the instance template updated\n\t\tinstanceTemplate, err := config.clientCompute.InstanceTemplates.Get(\n\t\t\tconfig.Project, template).Do()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error reading instance template: %s\", err)\n\t\t}\n\n\t\tif instanceTemplate.Name != template {\n\t\t\treturn fmt.Errorf(\"instance template not updated\")\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\nconst testAccInstanceGroupManager_basic = `\nresource \"google_compute_instance_template\" \"foobar\" {\n\tname = \"terraform-test-foobar\"\n\tmachine_type = \"n1-standard-1\"\n\tcan_ip_forward = false\n\ttags = [\"foo\", \"bar\"]\n\n\tdisk {\n\t\tsource_image = \"projects\/debian-cloud\/global\/images\/debian-7-wheezy-v20140814\"\n\t\tauto_delete = true\n\t\tboot = true\n\t}\n\n\tnetwork_interface {\n\t\tnetwork = \"default\"\n\t}\n\n\tmetadata {\n\t\tfoo = \"bar\"\n\t}\n\n\tservice_account {\n\t\tscopes = [\"userinfo-email\", \"compute-ro\", \"storage-ro\"]\n\t}\n}\n\nresource \"google_compute_target_pool\" \"foobar\" {\n\tdescription = \"Resource created for Terraform acceptance testing\"\n\tname = \"terraform-test-foobar\"\n\tsession_affinity = \"CLIENT_IP_PROTO\"\n}\n\nresource \"google_replicapool_instance_group_manager\" \"foobar\" {\n\tdescription = \"Terraform test instance group manager\"\n\tname = \"terraform-test\"\n\tinstance_template = \"${google_compute_instance_template.foobar.self_link}\"\n\ttarget_pools = [\"${google_compute_target_pool.foobar.self_link}\"]\n\tbase_instance_name = \"foobar\"\n\tzone = \"us-central1-a\"\n\tsize = 2\n}`\n\nconst testAccInstanceGroupManager_update = `\nresource \"google_compute_instance_template\" \"foobar\" {\n\tname = \"terraform-test-foobar\"\n\tmachine_type = \"n1-standard-1\"\n\tcan_ip_forward = false\n\ttags = [\"foo\", \"bar\"]\n\n\tdisk {\n\t\tsource_image = \"projects\/debian-cloud\/global\/images\/debian-7-wheezy-v20140814\"\n\t\tauto_delete = true\n\t\tboot = true\n\t}\n\n\tnetwork_interface {\n\t\tnetwork = \"default\"\n\t}\n\n\tmetadata {\n\t\tfoo = \"bar\"\n\t}\n\n\tservice_account {\n\t\tscopes = [\"userinfo-email\", \"compute-ro\", \"storage-ro\"]\n\t}\n}\n\nresource \"google_compute_instance_template\" \"foobaz\" {\n\tname = \"terraform-test-foobaz\"\n\tmachine_type = \"n1-standard-1\"\n\tcan_ip_forward = false\n\ttags = [\"foo\", \"bar\"]\n\n\tdisk {\n\t\tsource_image = \"projects\/debian-cloud\/global\/images\/debian-7-wheezy-v20140814\"\n\t\tauto_delete = true\n\t\tboot = true\n\t}\n\n\tnetwork_interface {\n\t\tnetwork = \"default\"\n\t}\n\n\tmetadata {\n\t\tfoo = \"bar\"\n\t}\n\n\tservice_account {\n\t\tscopes = [\"userinfo-email\", \"compute-ro\", \"storage-ro\"]\n\t}\n}\n\nresource \"google_compute_target_pool\" \"foobar\" {\n\tdescription = \"Resource created for Terraform acceptance testing\"\n\tname = \"terraform-test-foobar\"\n\tsession_affinity = \"CLIENT_IP_PROTO\"\n}\n\nresource \"google_compute_target_pool\" \"foobaz\" {\n\tdescription = \"Resource created for Terraform acceptance testing\"\n\tname = \"terraform-test-foobaz\"\n\tsession_affinity = \"CLIENT_IP_PROTO\"\n}\n\nresource \"google_replicapool_instance_group_manager\" \"foobar\" {\n\tdescription = \"Terraform test instance group manager\"\n\tname = \"terraform-test\"\n\tinstance_template = \"${google_compute_instance_template.foobar.self_link}\"\n\ttarget_pools = [\"${google_compute_target_pool.foobaz.self_link}\"]\n\tbase_instance_name = \"foobar\"\n\tzone = \"us-central1-a\"\n\tsize = 2\n}`\n\nconst testAccInstanceGroupManager_update2 = `\nresource \"google_compute_instance_template\" \"foobar\" {\n\tname = \"terraform-test-foobar\"\n\tmachine_type = \"n1-standard-1\"\n\tcan_ip_forward = false\n\ttags = [\"foo\", \"bar\"]\n\n\tdisk {\n\t\tsource_image = \"projects\/debian-cloud\/global\/images\/debian-7-wheezy-v20140814\"\n\t\tauto_delete = true\n\t\tboot = true\n\t}\n\n\tnetwork_interface {\n\t\tnetwork = \"default\"\n\t}\n\n\tmetadata {\n\t\tfoo = \"bar\"\n\t}\n\n\tservice_account {\n\t\tscopes = [\"userinfo-email\", \"compute-ro\", \"storage-ro\"]\n\t}\n}\n\nresource \"google_compute_instance_template\" \"foobaz\" {\n\tname = \"terraform-test-foobaz\"\n\tmachine_type = \"n1-standard-1\"\n\tcan_ip_forward = false\n\ttags = [\"foo\", \"bar\"]\n\n\tdisk {\n\t\tsource_image = \"projects\/debian-cloud\/global\/images\/debian-7-wheezy-v20140814\"\n\t\tauto_delete = true\n\t\tboot = true\n\t}\n\n\tnetwork_interface {\n\t\tnetwork = \"default\"\n\t}\n\n\tmetadata {\n\t\tfoo = \"bar\"\n\t}\n\n\tservice_account {\n\t\tscopes = [\"userinfo-email\", \"compute-ro\", \"storage-ro\"]\n\t}\n}\n\nresource \"google_compute_target_pool\" \"foobar\" {\n\tdescription = \"Resource created for Terraform acceptance testing\"\n\tname = \"terraform-test-foobar\"\n\tsession_affinity = \"CLIENT_IP_PROTO\"\n}\n\nresource \"google_compute_target_pool\" \"foobaz\" {\n\tdescription = \"Resource created for Terraform acceptance testing\"\n\tname = \"terraform-test-foobaz\"\n\tsession_affinity = \"CLIENT_IP_PROTO\"\n}\n\nresource \"google_replicapool_instance_group_manager\" \"foobar\" {\n\tdescription = \"Terraform test instance group manager\"\n\tname = \"terraform-test\"\n\tinstance_template = \"${google_compute_instance_template.foobaz.self_link}\"\n\ttarget_pools = [\"${google_compute_target_pool.foobaz.self_link}\"]\n\tbase_instance_name = \"foobar\"\n\tzone = \"us-central1-a\"\n\tsize = 3\n}`\n<commit_msg>Remove debugging log lines.<commit_after>package google\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"code.google.com\/p\/google-api-go-client\/replicapool\/v1beta2\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n)\n\nfunc TestAccInstanceGroupManager_basic(t *testing.T) {\n\tvar manager replicapool.InstanceGroupManager\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckInstanceGroupManagerDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccInstanceGroupManager_basic,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckInstanceGroupManagerExists(\n\t\t\t\t\t\t\"google_replicapool_instance_group_manager.foobar\", &manager),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccInstanceGroupManager_update(t *testing.T) {\n\tvar manager replicapool.InstanceGroupManager\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckInstanceGroupManagerDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccInstanceGroupManager_basic,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckInstanceGroupManagerExists(\n\t\t\t\t\t\t\"google_replicapool_instance_group_manager.foobar\", &manager),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccInstanceGroupManager_update,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckInstanceGroupManagerExists(\n\t\t\t\t\t\t\"google_replicapool_instance_group_manager.foobar\", &manager),\n\t\t\t\t),\n\t\t\t},\n\t\t\tresource.TestStep{\n\t\t\t\tConfig: testAccInstanceGroupManager_update2,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckInstanceGroupManagerExists(\n\t\t\t\t\t\t\"google_replicapool_instance_group_manager.foobar\", &manager),\n\t\t\t\t\ttestAccCheckInstanceGroupManagerUpdated(\n\t\t\t\t\t\t\"google_replicapool_instance_group_manager.foobar\", 3,\n\t\t\t\t\t\t\"google_compute_target_pool.foobaz\", \"terraform-test-foobaz\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc testAccCheckInstanceGroupManagerDestroy(s *terraform.State) error {\n\tconfig := testAccProvider.Meta().(*Config)\n\n\tfor _, rs := range s.RootModule().Resources {\n\t\tif rs.Type != \"google_replicapool_instance_group_manager\" {\n\t\t\tcontinue\n\t\t}\n\t\t_, err := config.clientReplicaPool.InstanceGroupManagers.Get(\n\t\t\tconfig.Project, rs.Primary.Attributes[\"zone\"], rs.Primary.ID).Do()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"InstanceGroupManager still exists\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc testAccCheckInstanceGroupManagerExists(n string, manager *replicapool.InstanceGroupManager) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No ID is set\")\n\t\t}\n\n\t\tconfig := testAccProvider.Meta().(*Config)\n\n\t\tfound, err := config.clientReplicaPool.InstanceGroupManagers.Get(\n\t\t\tconfig.Project, rs.Primary.Attributes[\"zone\"], rs.Primary.ID).Do()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif found.Name != rs.Primary.ID {\n\t\t\treturn fmt.Errorf(\"InstanceGroupManager not found\")\n\t\t}\n\n\t\t*manager = *found\n\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckInstanceGroupManagerUpdated(n string, size int64, targetPool string, template string) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No ID is set\")\n\t\t}\n\n\t\tconfig := testAccProvider.Meta().(*Config)\n\n\t\tmanager, err := config.clientReplicaPool.InstanceGroupManagers.Get(\n\t\t\tconfig.Project, rs.Primary.Attributes[\"zone\"], rs.Primary.ID).Do()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ check that total instance count is \"size\"\n\t\tif manager.CurrentSize != size {\n\t\t\treturn fmt.Errorf(\"instance count incorrect\")\n\t\t}\n\n\t\t\/\/ check that at least one instance exists in \"targetpool\"\n\t\ttp, ok := s.RootModule().Resources[targetPool]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", targetPool)\n\t\t}\n\n\t\tif tp.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No ID is set\")\n\t\t}\n\n\t\ttargetpool, err := config.clientCompute.TargetPools.Get(\n\t\t\tconfig.Project, config.Region, tp.Primary.ID).Do()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ check that total instance count is \"size\"\n\t\tif len(targetpool.Instances) == 0 {\n\t\t\treturn fmt.Errorf(\"no instance in new targetpool\")\n\t\t}\n\n\t\t\/\/ check that the instance template updated\n\t\tinstanceTemplate, err := config.clientCompute.InstanceTemplates.Get(\n\t\t\tconfig.Project, template).Do()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error reading instance template: %s\", err)\n\t\t}\n\n\t\tif instanceTemplate.Name != template {\n\t\t\treturn fmt.Errorf(\"instance template not updated\")\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\nconst testAccInstanceGroupManager_basic = `\nresource \"google_compute_instance_template\" \"foobar\" {\n\tname = \"terraform-test-foobar\"\n\tmachine_type = \"n1-standard-1\"\n\tcan_ip_forward = false\n\ttags = [\"foo\", \"bar\"]\n\n\tdisk {\n\t\tsource_image = \"projects\/debian-cloud\/global\/images\/debian-7-wheezy-v20140814\"\n\t\tauto_delete = true\n\t\tboot = true\n\t}\n\n\tnetwork_interface {\n\t\tnetwork = \"default\"\n\t}\n\n\tmetadata {\n\t\tfoo = \"bar\"\n\t}\n\n\tservice_account {\n\t\tscopes = [\"userinfo-email\", \"compute-ro\", \"storage-ro\"]\n\t}\n}\n\nresource \"google_compute_target_pool\" \"foobar\" {\n\tdescription = \"Resource created for Terraform acceptance testing\"\n\tname = \"terraform-test-foobar\"\n\tsession_affinity = \"CLIENT_IP_PROTO\"\n}\n\nresource \"google_replicapool_instance_group_manager\" \"foobar\" {\n\tdescription = \"Terraform test instance group manager\"\n\tname = \"terraform-test\"\n\tinstance_template = \"${google_compute_instance_template.foobar.self_link}\"\n\ttarget_pools = [\"${google_compute_target_pool.foobar.self_link}\"]\n\tbase_instance_name = \"foobar\"\n\tzone = \"us-central1-a\"\n\tsize = 2\n}`\n\nconst testAccInstanceGroupManager_update = `\nresource \"google_compute_instance_template\" \"foobar\" {\n\tname = \"terraform-test-foobar\"\n\tmachine_type = \"n1-standard-1\"\n\tcan_ip_forward = false\n\ttags = [\"foo\", \"bar\"]\n\n\tdisk {\n\t\tsource_image = \"projects\/debian-cloud\/global\/images\/debian-7-wheezy-v20140814\"\n\t\tauto_delete = true\n\t\tboot = true\n\t}\n\n\tnetwork_interface {\n\t\tnetwork = \"default\"\n\t}\n\n\tmetadata {\n\t\tfoo = \"bar\"\n\t}\n\n\tservice_account {\n\t\tscopes = [\"userinfo-email\", \"compute-ro\", \"storage-ro\"]\n\t}\n}\n\nresource \"google_compute_instance_template\" \"foobaz\" {\n\tname = \"terraform-test-foobaz\"\n\tmachine_type = \"n1-standard-1\"\n\tcan_ip_forward = false\n\ttags = [\"foo\", \"bar\"]\n\n\tdisk {\n\t\tsource_image = \"projects\/debian-cloud\/global\/images\/debian-7-wheezy-v20140814\"\n\t\tauto_delete = true\n\t\tboot = true\n\t}\n\n\tnetwork_interface {\n\t\tnetwork = \"default\"\n\t}\n\n\tmetadata {\n\t\tfoo = \"bar\"\n\t}\n\n\tservice_account {\n\t\tscopes = [\"userinfo-email\", \"compute-ro\", \"storage-ro\"]\n\t}\n}\n\nresource \"google_compute_target_pool\" \"foobar\" {\n\tdescription = \"Resource created for Terraform acceptance testing\"\n\tname = \"terraform-test-foobar\"\n\tsession_affinity = \"CLIENT_IP_PROTO\"\n}\n\nresource \"google_compute_target_pool\" \"foobaz\" {\n\tdescription = \"Resource created for Terraform acceptance testing\"\n\tname = \"terraform-test-foobaz\"\n\tsession_affinity = \"CLIENT_IP_PROTO\"\n}\n\nresource \"google_replicapool_instance_group_manager\" \"foobar\" {\n\tdescription = \"Terraform test instance group manager\"\n\tname = \"terraform-test\"\n\tinstance_template = \"${google_compute_instance_template.foobar.self_link}\"\n\ttarget_pools = [\"${google_compute_target_pool.foobaz.self_link}\"]\n\tbase_instance_name = \"foobar\"\n\tzone = \"us-central1-a\"\n\tsize = 2\n}`\n\nconst testAccInstanceGroupManager_update2 = `\nresource \"google_compute_instance_template\" \"foobar\" {\n\tname = \"terraform-test-foobar\"\n\tmachine_type = \"n1-standard-1\"\n\tcan_ip_forward = false\n\ttags = [\"foo\", \"bar\"]\n\n\tdisk {\n\t\tsource_image = \"projects\/debian-cloud\/global\/images\/debian-7-wheezy-v20140814\"\n\t\tauto_delete = true\n\t\tboot = true\n\t}\n\n\tnetwork_interface {\n\t\tnetwork = \"default\"\n\t}\n\n\tmetadata {\n\t\tfoo = \"bar\"\n\t}\n\n\tservice_account {\n\t\tscopes = [\"userinfo-email\", \"compute-ro\", \"storage-ro\"]\n\t}\n}\n\nresource \"google_compute_instance_template\" \"foobaz\" {\n\tname = \"terraform-test-foobaz\"\n\tmachine_type = \"n1-standard-1\"\n\tcan_ip_forward = false\n\ttags = [\"foo\", \"bar\"]\n\n\tdisk {\n\t\tsource_image = \"projects\/debian-cloud\/global\/images\/debian-7-wheezy-v20140814\"\n\t\tauto_delete = true\n\t\tboot = true\n\t}\n\n\tnetwork_interface {\n\t\tnetwork = \"default\"\n\t}\n\n\tmetadata {\n\t\tfoo = \"bar\"\n\t}\n\n\tservice_account {\n\t\tscopes = [\"userinfo-email\", \"compute-ro\", \"storage-ro\"]\n\t}\n}\n\nresource \"google_compute_target_pool\" \"foobar\" {\n\tdescription = \"Resource created for Terraform acceptance testing\"\n\tname = \"terraform-test-foobar\"\n\tsession_affinity = \"CLIENT_IP_PROTO\"\n}\n\nresource \"google_compute_target_pool\" \"foobaz\" {\n\tdescription = \"Resource created for Terraform acceptance testing\"\n\tname = \"terraform-test-foobaz\"\n\tsession_affinity = \"CLIENT_IP_PROTO\"\n}\n\nresource \"google_replicapool_instance_group_manager\" \"foobar\" {\n\tdescription = \"Terraform test instance group manager\"\n\tname = \"terraform-test\"\n\tinstance_template = \"${google_compute_instance_template.foobaz.self_link}\"\n\ttarget_pools = [\"${google_compute_target_pool.foobaz.self_link}\"]\n\tbase_instance_name = \"foobar\"\n\tzone = \"us-central1-a\"\n\tsize = 3\n}`\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Vincent Landgraf. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage confd\n\nimport (\n\t\"errors\"\n\t\"log\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/\/ BUG(threez) It currently requires to connect directly to the confd database.\n\/\/ This can be done by connecting through an ssh tunnel and forward the port\n\/\/ 4472, e.g.:\n\/\/\n\/\/ ssh -L 4472:127.0.0.1:4472 root@utm\n\n\/\/ Conn is the confd connection object\ntype Conn struct {\n\tURL *url.URL \/\/ URL that the connection connects to\n\tLogger *log.Logger \/\/ Logger if specified, will log confd actions\n\tOptions *Options \/\/ Options represent connection options\n\tid struct {\n\t\tValue uint64 \/\/ json rpc counter\n\t\tsync.Mutex \/\/ prevent multiple write\/read transactions\n\t}\n\tTransport Transport\n\ttxMu sync.Mutex \/\/ prevent multiple write\/read transactions\n\tsessionMu sync.Mutex \/\/ prevent concurrent confd access\n}\n\n\/\/ NewConn creates a new confd connection (is not acually connecting)\nfunc NewConn(URL string) (conn *Conn, err error) {\n\tu, err := url.Parse(URL)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tconn = &Conn{\n\t\tURL: u,\n\t\tLogger: nil,\n\t\tOptions: newOptions(u),\n\t\tTransport: &tcpTransport{Timeout: defaultTimeout},\n\t}\n\treturn\n}\n\n\/\/ NewAnonymousConn creates a new confd connection (is not acually connecting)\n\/\/ to http:\/\/127.0.0.1:4472\/ (Local Connection)\nfunc NewAnonymousConn() (conn *Conn) {\n\t\/\/ error is only for url parsing which can not happen here, therefore ignored\n\tconn, _ = NewConn(anonymousLocalConn)\n\treturn conn\n}\n\n\/\/ NewSystemConn creates a new confd connection (is not acually connecting)\n\/\/ to http:\/\/system@127.0.0.1:4472\/ (Local Connection)\nfunc NewSystemConn() (conn *Conn) {\n\t\/\/ error is only for url parsing which can not happen here, therefore ignored\n\tconn, _ = NewConn(systemLocalConn)\n\treturn conn\n}\n\n\/\/ SimpleRequest sends a simple request (untyped response) to the confd\nfunc (c *Conn) SimpleRequest(method string, params ...interface{}) (interface{}, error) {\n\tresult := new(interface{})\n\terr := c.Request(method, result, params...)\n\treturn result, err\n}\n\n\/\/ Request allows to send request with typed (parsed with json) responses\nfunc (c *Conn) Request(method string, result interface{}, params ...interface{}) (err error) {\n\t\/\/ make sure we have a connection to the server\n\terr = c.connect()\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = c.request(method, result, params...)\n\n\t\/\/ automatic error handling\n\tif err == ErrEmptyResponse || err == ErrReturnCode {\n\t\terrs, err := c.ErrList()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(errs) > 0 {\n\t\t\treturn errors.New(errs[0].Error())\n\t\t}\n\t}\n\n\tif err != nil {\n\t\tc.Logger.Printf(\"Error: %v\", err)\n\t}\n\treturn\n}\n\n\/\/ Connect creates a new confd session by calling new and get_SID confd calls\nfunc (c *Conn) connect() (err error) {\n\tif c.Transport.IsConnected() {\n\t\treturn\n\t}\n\tc.sessionMu.Lock()\n\tdefer c.sessionMu.Unlock()\n\tc.logf(\"Connect to %s\", c.safeURL())\n\terr = c.Transport.Connect(c.URL)\n\tif err != nil {\n\t\tc.logf(\"Unable to connect %s\", err)\n\t\treturn\n\t}\n\terr = c.request(\"new\", nil, c.Options)\n\tif err == nil && c.Options.SID == nil {\n\t\t\/\/ if we got a sid we will use it next time\n\t\terr = c.request(\"get_SID\", &c.Options.SID)\n\t}\n\tif err != nil {\n\t\tc.logf(\"Unable to create session %v\", err)\n\t}\n\treturn\n}\n\nfunc (c *Conn) request(method string, result interface{}, params ...interface{}) error {\n\t\/\/ request\n\tr, err := newRequest(method, params, c.nextID())\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.logf(\"=> %s\", r.String())\n\treq, err := r.HTTP(c.URL.Host)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ send request\n\tresp, err := c.Transport.RoundTrip(req)\n\tif err != nil {\n\t\t\/\/ send receive operation failed, conenction will be closed\n\t\t_ = c.Transport.Close() \/\/ ignore close errors\n\t\treturn err\n\t}\n\n\t\/\/ decode response\n\trespObj, err := newResponse(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = respObj.Decode(result, method != \"get_SID\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.logf(\"<= %v\", respObj)\n\n\treturn nil\n}\n\n\/\/ Close the confd connection\nfunc (c *Conn) Close() (err error) {\n\tif c.Transport.IsConnected() {\n\t\tc.sessionMu.Lock()\n\t\tdefer c.sessionMu.Unlock()\n\t\tc.logf(\"Disconnect from %s\", c.safeURL())\n\t\t_ = c.request(\"detach\", nil) \/\/ ignore if we can't detach\n\t\t_ = c.Transport.Close() \/\/ ignore close errors\n\t}\n\treturn\n}\n\nfunc (c *Conn) logf(format string, args ...interface{}) {\n\tif c.Logger != nil {\n\t\tc.Logger.Printf(format, args...)\n\t}\n}\n\nfunc (c *Conn) safeURL() string {\n\tif c.Options.Password != \"\" {\n\t\treturn strings.Replace(c.URL.String(), c.Options.Password, \"********\", 1)\n\t}\n\treturn c.URL.String()\n}\n\nfunc (c *Conn) nextID() uint64 {\n\tc.id.Lock()\n\tdefer c.id.Unlock()\n\tnext := c.id.Value\n\tc.id.Value++\n\treturn next\n}\n<commit_msg>Lock requests to the confd to prevent conflicts<commit_after>\/\/ Copyright 2016 Vincent Landgraf. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage confd\n\nimport (\n\t\"errors\"\n\t\"log\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/\/ BUG(threez) It currently requires to connect directly to the confd database.\n\/\/ This can be done by connecting through an ssh tunnel and forward the port\n\/\/ 4472, e.g.:\n\/\/\n\/\/ ssh -L 4472:127.0.0.1:4472 root@utm\n\n\/\/ Conn is the confd connection object\ntype Conn struct {\n\tURL *url.URL \/\/ URL that the connection connects to\n\tLogger *log.Logger \/\/ Logger if specified, will log confd actions\n\tOptions *Options \/\/ Options represent connection options\n\tid struct {\n\t\tValue uint64 \/\/ json rpc counter\n\t\tsync.Mutex \/\/ prevent double counting\n\t}\n\tTransport Transport\n\ttxMu sync.Mutex \/\/ prevent multiple write\/read transactions\n\tsessionMu sync.Mutex \/\/ prevent multiple connections\n\trequestMu sync.Mutex \/\/ prevent concurrent confd access\n}\n\n\/\/ NewConn creates a new confd connection (is not acually connecting)\nfunc NewConn(URL string) (conn *Conn, err error) {\n\tu, err := url.Parse(URL)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tconn = &Conn{\n\t\tURL: u,\n\t\tLogger: nil,\n\t\tOptions: newOptions(u),\n\t\tTransport: &tcpTransport{Timeout: defaultTimeout},\n\t}\n\treturn\n}\n\n\/\/ NewAnonymousConn creates a new confd connection (is not acually connecting)\n\/\/ to http:\/\/127.0.0.1:4472\/ (Local Connection)\nfunc NewAnonymousConn() (conn *Conn) {\n\t\/\/ error is only for url parsing which can not happen here, therefore ignored\n\tconn, _ = NewConn(anonymousLocalConn)\n\treturn conn\n}\n\n\/\/ NewSystemConn creates a new confd connection (is not acually connecting)\n\/\/ to http:\/\/system@127.0.0.1:4472\/ (Local Connection)\nfunc NewSystemConn() (conn *Conn) {\n\t\/\/ error is only for url parsing which can not happen here, therefore ignored\n\tconn, _ = NewConn(systemLocalConn)\n\treturn conn\n}\n\n\/\/ SimpleRequest sends a simple request (untyped response) to the confd\nfunc (c *Conn) SimpleRequest(method string, params ...interface{}) (interface{}, error) {\n\tresult := new(interface{})\n\terr := c.Request(method, result, params...)\n\treturn result, err\n}\n\n\/\/ Request allows to send request with typed (parsed with json) responses\nfunc (c *Conn) Request(method string, result interface{}, params ...interface{}) (err error) {\n\t\/\/ make sure we have a connection to the server\n\terr = c.connect()\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = c.request(method, result, params...)\n\n\t\/\/ automatic error handling\n\tif err == ErrEmptyResponse || err == ErrReturnCode {\n\t\terrs, err := c.ErrList()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(errs) > 0 {\n\t\t\treturn errors.New(errs[0].Error())\n\t\t}\n\t}\n\n\tif err != nil {\n\t\tc.logf(\"Error: %v\", err)\n\t}\n\treturn\n}\n\n\/\/ Connect creates a new confd session by calling new and get_SID confd calls\nfunc (c *Conn) connect() (err error) {\n\tif c.Transport.IsConnected() {\n\t\treturn\n\t}\n\tc.sessionMu.Lock()\n\tdefer c.sessionMu.Unlock()\n\tc.logf(\"Connect to %s\", c.safeURL())\n\terr = c.Transport.Connect(c.URL)\n\tif err != nil {\n\t\tc.logf(\"Unable to connect %s\", err)\n\t\treturn\n\t}\n\terr = c.request(\"new\", nil, c.Options)\n\tif err == nil && c.Options.SID == nil {\n\t\t\/\/ if we got a sid we will use it next time\n\t\terr = c.request(\"get_SID\", &c.Options.SID)\n\t}\n\tif err != nil {\n\t\tc.logf(\"Unable to create session %v\", err)\n\t}\n\treturn\n}\n\nfunc (c *Conn) request(method string, result interface{}, params ...interface{}) error {\n\t\/\/ request\n\tr, err := newRequest(method, params, c.nextID())\n\tif err != nil {\n\t\treturn err\n\t}\n\tc.logf(\"=> %s\", r.String())\n\treq, err := r.HTTP(c.URL.Host)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ send request\n\tc.requestMu.Lock()\n\tdefer c.requestMu.Unlock()\n\tresp, err := c.Transport.RoundTrip(req)\n\tif err != nil {\n\t\t\/\/ send receive operation failed, conenction will be closed\n\t\t_ = c.Transport.Close() \/\/ ignore close errors\n\t\treturn err\n\t}\n\n\t\/\/ decode response\n\trespObj, err := newResponse(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = respObj.Decode(result, method != \"get_SID\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.logf(\"<= %v\", respObj)\n\n\treturn nil\n}\n\n\/\/ Close the confd connection\nfunc (c *Conn) Close() (err error) {\n\tif c.Transport.IsConnected() {\n\t\tc.sessionMu.Lock()\n\t\tdefer c.sessionMu.Unlock()\n\t\tc.logf(\"Disconnect from %s\", c.safeURL())\n\t\t_ = c.request(\"detach\", nil) \/\/ ignore if we can't detach\n\t\t_ = c.Transport.Close() \/\/ ignore close errors\n\t}\n\treturn\n}\n\nfunc (c *Conn) logf(format string, args ...interface{}) {\n\tif c.Logger != nil {\n\t\tc.Logger.Printf(format, args...)\n\t}\n}\n\nfunc (c *Conn) safeURL() string {\n\tif c.Options.Password != \"\" {\n\t\treturn strings.Replace(c.URL.String(), c.Options.Password, \"********\", 1)\n\t}\n\treturn c.URL.String()\n}\n\nfunc (c *Conn) nextID() uint64 {\n\tc.id.Lock()\n\tdefer c.id.Unlock()\n\tnext := c.id.Value\n\tc.id.Value++\n\treturn next\n}\n<|endoftext|>"} {"text":"<commit_before>package sync\n\ntype Change struct {\n\tPath string\n\tOld *Entry\n\tNew *Entry\n}\n\nfunc (c Change) String() string {\n\t\/\/ TODO: Classify the change based on Old\/New and provide a more detailed\n\t\/\/ representation.\n\treturn c.Path\n}\n<commit_msg>Removed Change string representation.<commit_after>package sync\n\ntype Change struct {\n\tPath string\n\tOld *Entry\n\tNew *Entry\n}\n<|endoftext|>"} {"text":"<commit_before>package path\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/thatguystone\/cog\/cfs\"\n\t\"github.com\/thatguystone\/cog\/check\"\n)\n\nfunc TestMain(m *testing.M) {\n\tcheck.Main(m)\n}\n\nfunc TestGenerateFromTypesBasic(t *testing.T) {\n\tc := check.New(t)\n\n\tif testing.Short() {\n\t\tc.Skip(\"skipping test in short mode.\")\n\t}\n\n\tsubPath, err := cfs.ImportPath(c.FS.Path(\"subpkg\/subpkg.go\"), false)\n\tc.MustNotError(err)\n\n\totherPath, err := cfs.ImportPath(c.FS.Path(\"subother\/other.go\"), false)\n\tc.MustNotError(err)\n\n\tfile := \"file.go\"\n\tc.FS.SWriteFile(file, fmt.Sprintf(fixtureBasic, subPath))\n\n\tc.FS.SWriteFile(\"subother\/other.go\", fixtureSubOther)\n\tc.FS.SWriteFile(\"subpkg\/subpkg.go\", fmt.Sprintf(fixtureSubpkg, otherPath))\n\n\terr = GenerateFrom(c.FS.Path(file))\n\tc.MustNotError(err)\n\n\ts := c.FS.SReadFile(genFileName(file))\n\n\tc.Contains(s, `append(s.B, \"static\"...)`)\n\tc.Contains(s, \"v.H.Marshal\")\n\tc.Contains(s, \"v.I.BoolInterfaced.Marshal\")\n\tc.Contains(s, \"s.EmitUint32(v.I.O)\")\n\tc.Contains(s, \"func (v *stuff) UnmarshalPath(s path.Decoder) path.Decoder {\")\n\tc.Contains(s, \"v.L.A\")\n\tc.Contains(s, \"v.M.MarshalPath\")\n\tc.Contains(s, \"s = s.ExpectString(&v.SelectorExpr[i])\")\n\n\t\/\/ Exported fields shouldn't be around\n\tc.NotContains(s, \"v.g\")\n}\n\nfunc TestGenerateEndToEnd(t *testing.T) {\n\tc := check.New(t)\n\n\tif testing.Short() {\n\t\tc.Skip(\"skipping test in short mode.\")\n\t}\n\n\tpath, err := cfs.ImportPath(c.FS.Path(\"subpkg\/subpkg.go\"), false)\n\tc.MustNotError(err)\n\n\tc.FS.SWriteFile(\"fixture.go\", fixtureEndToEnd)\n\tc.FS.SWriteFile(\"integrate.go\",\n\t\tfmt.Sprintf(fixtureIntegrate, path))\n\tc.FS.SWriteFile(\"integrate_test.go\", fixtureEndToEndTest)\n\tc.FS.SWriteFile(\"subpkg\/subpkg.go\", fixtureSubpkg)\n\n\terr = GenerateFrom(c.FS.Path(\"integrate.go\"))\n\tc.MustNotError(err)\n\n\twd, err := os.Getwd()\n\tc.MustNotError(err)\n\n\trel, err := filepath.Rel(wd, c.FS.Path(\"\"))\n\tc.MustNotError(err)\n\n\toutput, err := exec.Command(\"go\", \"test\", \".\/\"+rel).CombinedOutput()\n\tc.MustNotError(err, string(output))\n}\n\nfunc TestGenerateFromTypesErrors(t *testing.T) {\n\tc := check.New(t)\n\n\terr := GenerateFrom(\"blah blah blah\")\n\tc.MustError(err)\n}\n<commit_msg>path: Fix end-to-end generate test<commit_after>package path\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/thatguystone\/cog\/cfs\"\n\t\"github.com\/thatguystone\/cog\/check\"\n)\n\nfunc TestMain(m *testing.M) {\n\tcheck.Main(m)\n}\n\nfunc TestGenerateFromTypesBasic(t *testing.T) {\n\tc := check.New(t)\n\n\tif testing.Short() {\n\t\tc.Skip(\"skipping test in short mode.\")\n\t}\n\n\tsubPath, err := cfs.ImportPath(c.FS.Path(\"subpkg\/subpkg.go\"), false)\n\tc.MustNotError(err)\n\n\totherPath, err := cfs.ImportPath(c.FS.Path(\"subother\/other.go\"), false)\n\tc.MustNotError(err)\n\n\tfile := \"file.go\"\n\tc.FS.SWriteFile(file, fmt.Sprintf(fixtureBasic, subPath))\n\n\tc.FS.SWriteFile(\"subother\/other.go\", fixtureSubOther)\n\tc.FS.SWriteFile(\"subpkg\/subpkg.go\", fmt.Sprintf(fixtureSubpkg, otherPath))\n\n\terr = GenerateFrom(c.FS.Path(file))\n\tc.MustNotError(err)\n\n\ts := c.FS.SReadFile(genFileName(file))\n\n\tc.Contains(s, `append(s.B, \"static\"...)`)\n\tc.Contains(s, \"v.H.Marshal\")\n\tc.Contains(s, \"v.I.BoolInterfaced.Marshal\")\n\tc.Contains(s, \"s.EmitUint32(v.I.O)\")\n\tc.Contains(s, \"func (v *stuff) UnmarshalPath(s path.Decoder) path.Decoder {\")\n\tc.Contains(s, \"v.L.A\")\n\tc.Contains(s, \"v.M.MarshalPath\")\n\tc.Contains(s, \"s = s.ExpectString(&v.SelectorExpr[i])\")\n\n\t\/\/ Exported fields shouldn't be around\n\tc.NotContains(s, \"v.g\")\n}\n\nfunc TestGenerateEndToEnd(t *testing.T) {\n\tc := check.New(t)\n\n\tif testing.Short() {\n\t\tc.Skip(\"skipping test in short mode.\")\n\t}\n\n\tsubPath, err := cfs.ImportPath(c.FS.Path(\"subpkg\/subpkg.go\"), false)\n\tc.MustNotError(err)\n\n\totherPath, err := cfs.ImportPath(c.FS.Path(\"subother\/other.go\"), false)\n\tc.MustNotError(err)\n\n\tc.FS.SWriteFile(\"fixture.go\", fixtureEndToEnd)\n\tc.FS.SWriteFile(\"integrate.go\",\n\t\tfmt.Sprintf(fixtureIntegrate, subPath))\n\tc.FS.SWriteFile(\"integrate_test.go\", fixtureEndToEndTest)\n\tc.FS.SWriteFile(\"subother\/other.go\", fixtureSubOther)\n\tc.FS.SWriteFile(\"subpkg\/subpkg.go\", fmt.Sprintf(fixtureSubpkg, otherPath))\n\n\terr = GenerateFrom(c.FS.Path(\"integrate.go\"))\n\tc.MustNotError(err)\n\n\twd, err := os.Getwd()\n\tc.MustNotError(err)\n\n\trel, err := filepath.Rel(wd, c.FS.Path(\"\"))\n\tc.MustNotError(err)\n\n\toutput, err := exec.Command(\"go\", \"test\", \".\/\"+rel).CombinedOutput()\n\tc.MustNotError(err, string(output))\n}\n\nfunc TestGenerateFromTypesErrors(t *testing.T) {\n\tc := check.New(t)\n\n\terr := GenerateFrom(\"blah blah blah\")\n\tc.MustError(err)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage mtls\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t\"github.com\/ghodss\/yaml\"\n\n\t\"github.com\/golang\/protobuf\/jsonpb\"\n\n\t\"istio.io\/istio\/security\/proto\/authentication\/v1alpha1\"\n)\n\nfunc TestMTLSPolicyChecker_singleResource(t *testing.T) {\n\ttype PolicyResource struct {\n\t\tnamespace string\n\t\tpolicy string\n\t}\n\n\ttests := map[string]struct {\n\t\tmeshPolicy string\n\t\tpolicy PolicyResource\n\t\tservice TargetService\n\t\twant bool\n\t}{\n\t\t\"no policies means no strict mtls\": {\n\t\t\t\/\/ Note no policies specified\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t\t\"service specific policy\": {\n\t\t\tpolicy: PolicyResource{\n\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - number: 8080\npeers:\n- mtls:\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: true,\n\t\t},\n\t\t\"service specific policy uses only the first mtls configuration found\": {\n\t\t\tpolicy: PolicyResource{\n\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - number: 8080\npeers:\n# Oops, we specified mtls twice!\n- mtls:\n mode: PERMISSIVE\n- mtls:\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t\t\"service specific policy using port name\": {\n\t\t\tpolicy: PolicyResource{\n\n\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - name: https\npeers:\n- mtls:\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortName(\"foobar.my-namespace.svc.cluster.local\", \"https\"),\n\t\t\twant: true,\n\t\t},\n\t\t\"non-matching host service specific policy\": {\n\t\t\tpolicy: PolicyResource{\n\n\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\tpolicy: `\ntargets:\n- name: baz\n ports:\n - number: 8080\npeers:\n- mtls:\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t\t\"non-matching namespace service specific policy\": {\n\t\t\tpolicy: PolicyResource{\n\t\t\t\tnamespace: \"my-other-namespace\",\n\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - number: 8080\npeers:\n- mtls:\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t\t\"policy matches service but is not strict\": {\n\t\t\tpolicy: PolicyResource{\n\n\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - number: 8080\npeers:\n- mtls:\n mode: PERMISSIVE\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t\t\"policy matches service but uses deprecated field\": {\n\t\t\tpolicy: PolicyResource{\n\n\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - number: 8080\npeers:\n- mtls:\n allowTls: true\n mode: STRICT\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t\t\"policy matches service but peer is optional\": {\n\t\t\tpolicy: PolicyResource{\n\n\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - number: 8080\npeers:\n- mtls:\n mode: STRICT\npeerIsOptional: true\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t\t\"policy matches service but does not use mtls\": {\n\t\t\tpolicy: PolicyResource{\n\n\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - number: 8080\npeers:\n- jwt: # undocumented setting?\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t\t\"policy matches every port on service\": {\n\t\t\tpolicy: PolicyResource{\n\n\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\tpolicy: `\ntargets:\n- name: foobar\npeers:\n- mtls:\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: true,\n\t\t},\n\t\t\"policy matches every service in namespace\": {\n\t\t\tpolicy: PolicyResource{\n\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\tpolicy: `\npeers:\n- mtls:\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: true,\n\t\t},\n\t\t\"policy matches entire mesh\": {\n\t\t\tmeshPolicy: `\npeers:\n- mtls:\n`,\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: true,\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tpc := NewPolicyChecker()\n\n\t\t\tif tc.meshPolicy != \"\" {\n\t\t\t\tmeshpb, err := yAMLToPolicy(tc.meshPolicy)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"expected: %v, got error when parsing yaml: %v\", tc.want, err)\n\t\t\t\t}\n\t\t\t\tpc.AddMeshPolicy(meshpb)\n\t\t\t}\n\n\t\t\tif tc.policy.policy != \"\" {\n\t\t\t\tpb, err := yAMLToPolicy(tc.policy.policy)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"expected: %v, got error when parsing yaml: %v\", tc.want, err)\n\t\t\t\t}\n\t\t\t\tpc.AddPolicy(tc.policy.namespace, pb)\n\t\t\t}\n\n\t\t\tgot, err := pc.IsServiceMTLSEnforced(tc.service)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"expected: %v, got error: %v\", tc.want, err)\n\t\t\t}\n\t\t\tif got != tc.want {\n\t\t\t\tt.Fatalf(\"expected: %v, got: %v\", tc.want, got)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestMTLSPolicyChecker_multipleResources(t *testing.T) {\n\ttype PolicyResource struct {\n\t\tnamespace string\n\t\tpolicy string\n\t}\n\n\ttests := map[string]struct {\n\t\tmeshPolicy string\n\t\tpolicies []PolicyResource\n\t\tservice TargetService\n\t\twant bool\n\t}{\n\t\t\"namespace policy overrides mesh policy\": {\n\t\t\tmeshPolicy: `\npeers:\n- mtls:\n`,\n\t\t\tpolicies: []PolicyResource{\n\t\t\t\t{\n\t\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - number: 8080\npeers:\n- mtls:\n mode: PERMISSIVE\n`,\n\t\t\t\t},\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t\t\"service policy overrides namespace policy\": {\n\t\t\tpolicies: []PolicyResource{\n\t\t\t\t{\n\t\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\t\tpolicy: `\npeers:\n- mtls:\n mode: STRICT\n`,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - number: 8080\npeers:\n- mtls:\n mode: PERMISSIVE\n`,\n\t\t\t\t},\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t\t\"port-specific policy overrides service-level policy\": {\n\t\t\tpolicies: []PolicyResource{\n\t\t\t\t{\n\t\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\t\tpolicy: `\ntargets:\n- name: foobar\npeers:\n- mtls:\n mode: STRICT\n`,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - number: 8080\npeers:\n- mtls:\n mode: PERMISSIVE\n`,\n\t\t\t\t},\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tpc := NewPolicyChecker()\n\t\t\t\/\/ Add mesh policy, if it exists.\n\t\t\tmeshpb, err := yAMLToPolicy(tc.meshPolicy)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"expected: %v, got error when parsing yaml: %v\", tc.want, err)\n\t\t\t}\n\t\t\tpc.AddMeshPolicy(meshpb)\n\n\t\t\t\/\/ Add in all other policies\n\t\t\tfor _, p := range tc.policies {\n\t\t\t\tpb, err := yAMLToPolicy(p.policy)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"expected: %v, got error when parsing yaml: %v\", tc.want, err)\n\t\t\t\t}\n\t\t\t\tpc.AddPolicy(p.namespace, pb)\n\t\t\t}\n\n\t\t\tgot, err := pc.IsServiceMTLSEnforced(tc.service)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"expected: %v, got error: %v\", tc.want, err)\n\t\t\t}\n\t\t\tif got != tc.want {\n\t\t\t\tt.Fatalf(\"expected: %v, got: %v\", tc.want, got)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc yAMLToPolicy(yml string) (*v1alpha1.Policy, error) {\n\tjs, err := yaml.YAMLToJSON([]byte(yml))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar pb v1alpha1.Policy\n\terr = jsonpb.Unmarshal(bytes.NewReader(js), &pb)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &pb, nil\n}\n<commit_msg>Check for err when adding policy in tests (#18478)<commit_after>\/\/ Copyright 2019 Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage mtls\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t\"github.com\/ghodss\/yaml\"\n\n\t\"github.com\/golang\/protobuf\/jsonpb\"\n\n\t\"istio.io\/istio\/security\/proto\/authentication\/v1alpha1\"\n)\n\nfunc TestMTLSPolicyChecker_singleResource(t *testing.T) {\n\ttype PolicyResource struct {\n\t\tnamespace string\n\t\tpolicy string\n\t}\n\n\ttests := map[string]struct {\n\t\tmeshPolicy string\n\t\tpolicy PolicyResource\n\t\tservice TargetService\n\t\twant bool\n\t}{\n\t\t\"no policies means no strict mtls\": {\n\t\t\t\/\/ Note no policies specified\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t\t\"service specific policy\": {\n\t\t\tpolicy: PolicyResource{\n\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - number: 8080\npeers:\n- mtls:\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: true,\n\t\t},\n\t\t\"service specific policy uses only the first mtls configuration found\": {\n\t\t\tpolicy: PolicyResource{\n\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - number: 8080\npeers:\n# Oops, we specified mtls twice!\n- mtls:\n mode: PERMISSIVE\n- mtls:\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t\t\"service specific policy using port name\": {\n\t\t\tpolicy: PolicyResource{\n\n\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - name: https\npeers:\n- mtls:\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortName(\"foobar.my-namespace.svc.cluster.local\", \"https\"),\n\t\t\twant: true,\n\t\t},\n\t\t\"non-matching host service specific policy\": {\n\t\t\tpolicy: PolicyResource{\n\n\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\tpolicy: `\ntargets:\n- name: baz\n ports:\n - number: 8080\npeers:\n- mtls:\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t\t\"non-matching namespace service specific policy\": {\n\t\t\tpolicy: PolicyResource{\n\t\t\t\tnamespace: \"my-other-namespace\",\n\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - number: 8080\npeers:\n- mtls:\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t\t\"policy matches service but is not strict\": {\n\t\t\tpolicy: PolicyResource{\n\n\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - number: 8080\npeers:\n- mtls:\n mode: PERMISSIVE\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t\t\"policy matches service but uses deprecated field\": {\n\t\t\tpolicy: PolicyResource{\n\n\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - number: 8080\npeers:\n- mtls:\n allowTls: true\n mode: STRICT\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t\t\"policy matches service but peer is optional\": {\n\t\t\tpolicy: PolicyResource{\n\n\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - number: 8080\npeers:\n- mtls:\n mode: STRICT\npeerIsOptional: true\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t\t\"policy matches service but does not use mtls\": {\n\t\t\tpolicy: PolicyResource{\n\n\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - number: 8080\npeers:\n- jwt: # undocumented setting?\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t\t\"policy matches every port on service\": {\n\t\t\tpolicy: PolicyResource{\n\n\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\tpolicy: `\ntargets:\n- name: foobar\npeers:\n- mtls:\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: true,\n\t\t},\n\t\t\"policy matches every service in namespace\": {\n\t\t\tpolicy: PolicyResource{\n\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\tpolicy: `\npeers:\n- mtls:\n`,\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: true,\n\t\t},\n\t\t\"policy matches entire mesh\": {\n\t\t\tmeshPolicy: `\npeers:\n- mtls:\n`,\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: true,\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tpc := NewPolicyChecker()\n\n\t\t\tif tc.meshPolicy != \"\" {\n\t\t\t\tmeshpb, err := yAMLToPolicy(tc.meshPolicy)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"expected: %v, got error when parsing yaml: %v\", tc.want, err)\n\t\t\t\t}\n\t\t\t\tpc.AddMeshPolicy(meshpb)\n\t\t\t}\n\n\t\t\tif tc.policy.policy != \"\" {\n\t\t\t\tpb, err := yAMLToPolicy(tc.policy.policy)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"expected: %v, got error when parsing yaml: %v\", tc.want, err)\n\t\t\t\t}\n\t\t\t\terr = pc.AddPolicy(tc.policy.namespace, pb)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"expected: %v, got error when adding policy: %v\", tc.want, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tgot, err := pc.IsServiceMTLSEnforced(tc.service)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"expected: %v, got error: %v\", tc.want, err)\n\t\t\t}\n\t\t\tif got != tc.want {\n\t\t\t\tt.Fatalf(\"expected: %v, got: %v\", tc.want, got)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestMTLSPolicyChecker_multipleResources(t *testing.T) {\n\ttype PolicyResource struct {\n\t\tnamespace string\n\t\tpolicy string\n\t}\n\n\ttests := map[string]struct {\n\t\tmeshPolicy string\n\t\tpolicies []PolicyResource\n\t\tservice TargetService\n\t\twant bool\n\t}{\n\t\t\"namespace policy overrides mesh policy\": {\n\t\t\tmeshPolicy: `\npeers:\n- mtls:\n`,\n\t\t\tpolicies: []PolicyResource{\n\t\t\t\t{\n\t\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - number: 8080\npeers:\n- mtls:\n mode: PERMISSIVE\n`,\n\t\t\t\t},\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t\t\"service policy overrides namespace policy\": {\n\t\t\tpolicies: []PolicyResource{\n\t\t\t\t{\n\t\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\t\tpolicy: `\npeers:\n- mtls:\n mode: STRICT\n`,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - number: 8080\npeers:\n- mtls:\n mode: PERMISSIVE\n`,\n\t\t\t\t},\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t\t\"port-specific policy overrides service-level policy\": {\n\t\t\tpolicies: []PolicyResource{\n\t\t\t\t{\n\t\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\t\tpolicy: `\ntargets:\n- name: foobar\npeers:\n- mtls:\n mode: STRICT\n`,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tnamespace: \"my-namespace\",\n\t\t\t\t\tpolicy: `\ntargets:\n- name: foobar\n ports:\n - number: 8080\npeers:\n- mtls:\n mode: PERMISSIVE\n`,\n\t\t\t\t},\n\t\t\t},\n\t\t\tservice: NewTargetServiceWithPortNumber(\"foobar.my-namespace.svc.cluster.local\", 8080),\n\t\t\twant: false,\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tpc := NewPolicyChecker()\n\t\t\t\/\/ Add mesh policy, if it exists.\n\t\t\tmeshpb, err := yAMLToPolicy(tc.meshPolicy)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"expected: %v, got error when parsing yaml: %v\", tc.want, err)\n\t\t\t}\n\t\t\tpc.AddMeshPolicy(meshpb)\n\n\t\t\t\/\/ Add in all other policies\n\t\t\tfor _, p := range tc.policies {\n\t\t\t\tpb, err := yAMLToPolicy(p.policy)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"expected: %v, got error when parsing yaml: %v\", tc.want, err)\n\t\t\t\t}\n\t\t\t\terr = pc.AddPolicy(p.namespace, pb)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Fatalf(\"expected: %v, got error when adding policy: %v\", tc.want, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tgot, err := pc.IsServiceMTLSEnforced(tc.service)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"expected: %v, got error: %v\", tc.want, err)\n\t\t\t}\n\t\t\tif got != tc.want {\n\t\t\t\tt.Fatalf(\"expected: %v, got: %v\", tc.want, got)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc yAMLToPolicy(yml string) (*v1alpha1.Policy, error) {\n\tjs, err := yaml.YAMLToJSON([]byte(yml))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar pb v1alpha1.Policy\n\terr = jsonpb.Unmarshal(bytes.NewReader(js), &pb)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &pb, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/apigateway\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/helper\/schema\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/helper\/validation\"\n)\n\nconst defaultAuthorizerTTL = 300\n\nfunc resourceAwsApiGatewayAuthorizer() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsApiGatewayAuthorizerCreate,\n\t\tRead: resourceAwsApiGatewayAuthorizerRead,\n\t\tUpdate: resourceAwsApiGatewayAuthorizerUpdate,\n\t\tDelete: resourceAwsApiGatewayAuthorizerDelete,\n\t\tCustomizeDiff: resourceAwsApiGatewayAuthorizerCustomizeDiff,\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {\n\t\t\t\tidParts := strings.Split(d.Id(), \"\/\")\n\t\t\t\tif len(idParts) != 2 || idParts[0] == \"\" || idParts[1] == \"\" {\n\t\t\t\t\treturn nil, fmt.Errorf(\"Unexpected format of ID (%q), expected REST-API-ID\/AUTHORIZER-ID\", d.Id())\n\t\t\t\t}\n\t\t\t\trestAPIId := idParts[0]\n\t\t\t\tauthorizerId := idParts[1]\n\t\t\t\td.Set(\"rest_api_id\", restAPIId)\n\t\t\t\td.SetId(authorizerId)\n\t\t\t\treturn []*schema.ResourceData{d}, nil\n\t\t\t},\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"authorizer_uri\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true, \/\/ authorizer_uri is required for authorizer TOKEN\/REQUEST\n\t\t\t\tValidateFunc: validateArn,\n\t\t\t},\n\t\t\t\"identity_source\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: \"method.request.header.Authorization\",\n\t\t\t},\n\t\t\t\"name\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t},\n\t\t\t\"rest_api_id\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"type\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: apigateway.AuthorizerTypeToken,\n\t\t\t\tValidateFunc: validation.StringInSlice([]string{\n\t\t\t\t\tapigateway.AuthorizerTypeCognitoUserPools,\n\t\t\t\t\tapigateway.AuthorizerTypeRequest,\n\t\t\t\t\tapigateway.AuthorizerTypeToken,\n\t\t\t\t}, false),\n\t\t\t},\n\t\t\t\"authorizer_credentials\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tValidateFunc: validateArn,\n\t\t\t},\n\t\t\t\"authorizer_result_ttl_in_seconds\": {\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tValidateFunc: validation.IntBetween(0, 3600),\n\t\t\t\tDefault: defaultAuthorizerTTL,\n\t\t\t},\n\t\t\t\"identity_validation_expression\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"provider_arns\": {\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true, \/\/ provider_arns is required for authorizer COGNITO_USER_POOLS.\n\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\tValidateFunc: validateArn,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceAwsApiGatewayAuthorizerCreate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).apigatewayconn\n\n\tinput := apigateway.CreateAuthorizerInput{\n\t\tIdentitySource: aws.String(d.Get(\"identity_source\").(string)),\n\t\tName: aws.String(d.Get(\"name\").(string)),\n\t\tRestApiId: aws.String(d.Get(\"rest_api_id\").(string)),\n\t\tType: aws.String(d.Get(\"type\").(string)),\n\t\tAuthorizerResultTtlInSeconds: aws.Int64(int64(d.Get(\"authorizer_result_ttl_in_seconds\").(int))),\n\t}\n\n\tif err := validateAuthorizerType(d); err != nil {\n\t\treturn err\n\t}\n\tif v, ok := d.GetOk(\"authorizer_uri\"); ok {\n\t\tinput.AuthorizerUri = aws.String(v.(string))\n\t}\n\tif v, ok := d.GetOk(\"authorizer_credentials\"); ok {\n\t\tinput.AuthorizerCredentials = aws.String(v.(string))\n\t}\n\n\tif v, ok := d.GetOk(\"identity_validation_expression\"); ok {\n\t\tinput.IdentityValidationExpression = aws.String(v.(string))\n\t}\n\tif v, ok := d.GetOk(\"provider_arns\"); ok {\n\t\tinput.ProviderARNs = expandStringSet(v.(*schema.Set))\n\t}\n\n\tlog.Printf(\"[INFO] Creating API Gateway Authorizer: %s\", input)\n\tout, err := conn.CreateAuthorizer(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating API Gateway Authorizer: %w\", err)\n\t}\n\n\td.SetId(aws.StringValue(out.Id))\n\n\treturn resourceAwsApiGatewayAuthorizerRead(d, meta)\n}\n\nfunc resourceAwsApiGatewayAuthorizerRead(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).apigatewayconn\n\n\tlog.Printf(\"[INFO] Reading API Gateway Authorizer %s\", d.Id())\n\tinput := apigateway.GetAuthorizerInput{\n\t\tAuthorizerId: aws.String(d.Id()),\n\t\tRestApiId: aws.String(d.Get(\"rest_api_id\").(string)),\n\t}\n\n\tauthorizer, err := conn.GetAuthorizer(&input)\n\tif err != nil {\n\t\tif isAWSErr(err, apigateway.ErrCodeNotFoundException, \"\") {\n\t\t\tlog.Printf(\"[WARN] No API Gateway Authorizer found: %s\", input)\n\t\t\td.SetId(\"\")\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tlog.Printf(\"[DEBUG] Received API Gateway Authorizer: %s\", authorizer)\n\n\td.Set(\"authorizer_credentials\", authorizer.AuthorizerCredentials)\n\n\tif authorizer.AuthorizerResultTtlInSeconds != nil {\n\t\td.Set(\"authorizer_result_ttl_in_seconds\", authorizer.AuthorizerResultTtlInSeconds)\n\t} else {\n\t\td.Set(\"authorizer_result_ttl_in_seconds\", defaultAuthorizerTTL)\n\t}\n\n\td.Set(\"authorizer_uri\", authorizer.AuthorizerUri)\n\td.Set(\"identity_source\", authorizer.IdentitySource)\n\td.Set(\"identity_validation_expression\", authorizer.IdentityValidationExpression)\n\td.Set(\"name\", authorizer.Name)\n\td.Set(\"type\", authorizer.Type)\n\td.Set(\"provider_arns\", flattenStringSet(authorizer.ProviderARNs))\n\n\treturn nil\n}\n\nfunc resourceAwsApiGatewayAuthorizerUpdate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).apigatewayconn\n\n\tinput := apigateway.UpdateAuthorizerInput{\n\t\tAuthorizerId: aws.String(d.Id()),\n\t\tRestApiId: aws.String(d.Get(\"rest_api_id\").(string)),\n\t}\n\n\toperations := make([]*apigateway.PatchOperation, 0)\n\n\tif d.HasChange(\"authorizer_uri\") {\n\t\toperations = append(operations, &apigateway.PatchOperation{\n\t\t\tOp: aws.String(apigateway.OpReplace),\n\t\t\tPath: aws.String(\"\/authorizerUri\"),\n\t\t\tValue: aws.String(d.Get(\"authorizer_uri\").(string)),\n\t\t})\n\t}\n\tif d.HasChange(\"identity_source\") {\n\t\toperations = append(operations, &apigateway.PatchOperation{\n\t\t\tOp: aws.String(apigateway.OpReplace),\n\t\t\tPath: aws.String(\"\/identitySource\"),\n\t\t\tValue: aws.String(d.Get(\"identity_source\").(string)),\n\t\t})\n\t}\n\tif d.HasChange(\"name\") {\n\t\toperations = append(operations, &apigateway.PatchOperation{\n\t\t\tOp: aws.String(apigateway.OpReplace),\n\t\t\tPath: aws.String(\"\/name\"),\n\t\t\tValue: aws.String(d.Get(\"name\").(string)),\n\t\t})\n\t}\n\tif d.HasChange(\"type\") {\n\t\toperations = append(operations, &apigateway.PatchOperation{\n\t\t\tOp: aws.String(apigateway.OpReplace),\n\t\t\tPath: aws.String(\"\/type\"),\n\t\t\tValue: aws.String(d.Get(\"type\").(string)),\n\t\t})\n\t}\n\tif d.HasChange(\"authorizer_credentials\") {\n\t\toperations = append(operations, &apigateway.PatchOperation{\n\t\t\tOp: aws.String(apigateway.OpReplace),\n\t\t\tPath: aws.String(\"\/authorizerCredentials\"),\n\t\t\tValue: aws.String(d.Get(\"authorizer_credentials\").(string)),\n\t\t})\n\t}\n\tif d.HasChange(\"authorizer_result_ttl_in_seconds\") {\n\t\toperations = append(operations, &apigateway.PatchOperation{\n\t\t\tOp: aws.String(apigateway.OpReplace),\n\t\t\tPath: aws.String(\"\/authorizerResultTtlInSeconds\"),\n\t\t\tValue: aws.String(fmt.Sprintf(\"%d\", d.Get(\"authorizer_result_ttl_in_seconds\").(int))),\n\t\t})\n\t}\n\tif d.HasChange(\"identity_validation_expression\") {\n\t\toperations = append(operations, &apigateway.PatchOperation{\n\t\t\tOp: aws.String(apigateway.OpReplace),\n\t\t\tPath: aws.String(\"\/identityValidationExpression\"),\n\t\t\tValue: aws.String(d.Get(\"identity_validation_expression\").(string)),\n\t\t})\n\t}\n\tif d.HasChange(\"provider_arns\") {\n\t\told, new := d.GetChange(\"provider_arns\")\n\t\tos := old.(*schema.Set)\n\t\tns := new.(*schema.Set)\n\t\t\/\/ providerARNs can't be empty, so add first and then remove\n\t\tadditionList := ns.Difference(os)\n\t\tfor _, v := range additionList.List() {\n\t\t\toperations = append(operations, &apigateway.PatchOperation{\n\t\t\t\tOp: aws.String(apigateway.OpAdd),\n\t\t\t\tPath: aws.String(\"\/providerARNs\"),\n\t\t\t\tValue: aws.String(v.(string)),\n\t\t\t})\n\t\t}\n\t\tremovalList := os.Difference(ns)\n\t\tfor _, v := range removalList.List() {\n\t\t\toperations = append(operations, &apigateway.PatchOperation{\n\t\t\t\tOp: aws.String(apigateway.OpRemove),\n\t\t\t\tPath: aws.String(\"\/providerARNs\"),\n\t\t\t\tValue: aws.String(v.(string)),\n\t\t\t})\n\t\t}\n\t}\n\n\tinput.PatchOperations = operations\n\n\tlog.Printf(\"[INFO] Updating API Gateway Authorizer: %s\", input)\n\t_, err := conn.UpdateAuthorizer(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"updating API Gateway Authorizer failed: %w\", err)\n\t}\n\n\treturn resourceAwsApiGatewayAuthorizerRead(d, meta)\n}\n\nfunc resourceAwsApiGatewayAuthorizerDelete(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).apigatewayconn\n\tinput := apigateway.DeleteAuthorizerInput{\n\t\tAuthorizerId: aws.String(d.Id()),\n\t\tRestApiId: aws.String(d.Get(\"rest_api_id\").(string)),\n\t}\n\tlog.Printf(\"[INFO] Deleting API Gateway Authorizer: %s\", input)\n\t_, err := conn.DeleteAuthorizer(&input)\n\tif err != nil {\n\t\t\/\/ XXX: Figure out a way to delete the method that depends on the authorizer first\n\t\t\/\/ otherwise the authorizer will be dangling until the API is deleted\n\t\tif !strings.Contains(err.Error(), apigateway.ErrCodeConflictException) {\n\t\t\treturn fmt.Errorf(\"deleting API Gateway Authorizer failed: %w\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc resourceAwsApiGatewayAuthorizerCustomizeDiff(diff *schema.ResourceDiff, v interface{}) error {\n\t\/\/ switch type between COGNITO_USER_POOLS and TOKEN\/REQUEST will create new resource.\n\tif diff.HasChange(\"type\") {\n\t\to, n := diff.GetChange(\"type\")\n\t\tif o.(string) == apigateway.AuthorizerTypeCognitoUserPools || n.(string) == apigateway.AuthorizerTypeCognitoUserPools {\n\t\t\tif err := diff.ForceNew(\"type\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc validateAuthorizerType(d *schema.ResourceData) error {\n\tauthType := d.Get(\"type\").(string)\n\t\/\/ authorizer_uri is required for authorizer TOKEN\/REQUEST\n\tif authType == apigateway.AuthorizerTypeRequest || authType == apigateway.AuthorizerTypeToken {\n\t\tif v, ok := d.GetOk(\"authorizer_uri\"); !ok || v.(string) == \"\" {\n\t\t\treturn fmt.Errorf(\"authorizer_uri must be set non-empty when authorizer type is %s\", authType)\n\t\t}\n\t}\n\t\/\/ provider_arns is required for authorizer COGNITO_USER_POOLS.\n\tif authType == apigateway.AuthorizerTypeCognitoUserPools {\n\t\tif v, ok := d.GetOk(\"provider_arns\"); !ok || len(v.(*schema.Set).List()) == 0 {\n\t\t\treturn fmt.Errorf(\"provider_arns must be set non-empty when authorizer type is %s\", authType)\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>use set len func<commit_after>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/apigateway\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/helper\/schema\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/helper\/validation\"\n)\n\nconst defaultAuthorizerTTL = 300\n\nfunc resourceAwsApiGatewayAuthorizer() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsApiGatewayAuthorizerCreate,\n\t\tRead: resourceAwsApiGatewayAuthorizerRead,\n\t\tUpdate: resourceAwsApiGatewayAuthorizerUpdate,\n\t\tDelete: resourceAwsApiGatewayAuthorizerDelete,\n\t\tCustomizeDiff: resourceAwsApiGatewayAuthorizerCustomizeDiff,\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {\n\t\t\t\tidParts := strings.Split(d.Id(), \"\/\")\n\t\t\t\tif len(idParts) != 2 || idParts[0] == \"\" || idParts[1] == \"\" {\n\t\t\t\t\treturn nil, fmt.Errorf(\"Unexpected format of ID (%q), expected REST-API-ID\/AUTHORIZER-ID\", d.Id())\n\t\t\t\t}\n\t\t\t\trestAPIId := idParts[0]\n\t\t\t\tauthorizerId := idParts[1]\n\t\t\t\td.Set(\"rest_api_id\", restAPIId)\n\t\t\t\td.SetId(authorizerId)\n\t\t\t\treturn []*schema.ResourceData{d}, nil\n\t\t\t},\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"authorizer_uri\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true, \/\/ authorizer_uri is required for authorizer TOKEN\/REQUEST\n\t\t\t\tValidateFunc: validateArn,\n\t\t\t},\n\t\t\t\"identity_source\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: \"method.request.header.Authorization\",\n\t\t\t},\n\t\t\t\"name\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t},\n\t\t\t\"rest_api_id\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"type\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tDefault: apigateway.AuthorizerTypeToken,\n\t\t\t\tValidateFunc: validation.StringInSlice([]string{\n\t\t\t\t\tapigateway.AuthorizerTypeCognitoUserPools,\n\t\t\t\t\tapigateway.AuthorizerTypeRequest,\n\t\t\t\t\tapigateway.AuthorizerTypeToken,\n\t\t\t\t}, false),\n\t\t\t},\n\t\t\t\"authorizer_credentials\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tValidateFunc: validateArn,\n\t\t\t},\n\t\t\t\"authorizer_result_ttl_in_seconds\": {\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tOptional: true,\n\t\t\t\tValidateFunc: validation.IntBetween(0, 3600),\n\t\t\t\tDefault: defaultAuthorizerTTL,\n\t\t\t},\n\t\t\t\"identity_validation_expression\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t},\n\t\t\t\"provider_arns\": {\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true, \/\/ provider_arns is required for authorizer COGNITO_USER_POOLS.\n\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\tValidateFunc: validateArn,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceAwsApiGatewayAuthorizerCreate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).apigatewayconn\n\n\tinput := apigateway.CreateAuthorizerInput{\n\t\tIdentitySource: aws.String(d.Get(\"identity_source\").(string)),\n\t\tName: aws.String(d.Get(\"name\").(string)),\n\t\tRestApiId: aws.String(d.Get(\"rest_api_id\").(string)),\n\t\tType: aws.String(d.Get(\"type\").(string)),\n\t\tAuthorizerResultTtlInSeconds: aws.Int64(int64(d.Get(\"authorizer_result_ttl_in_seconds\").(int))),\n\t}\n\n\tif err := validateAuthorizerType(d); err != nil {\n\t\treturn err\n\t}\n\tif v, ok := d.GetOk(\"authorizer_uri\"); ok {\n\t\tinput.AuthorizerUri = aws.String(v.(string))\n\t}\n\tif v, ok := d.GetOk(\"authorizer_credentials\"); ok {\n\t\tinput.AuthorizerCredentials = aws.String(v.(string))\n\t}\n\n\tif v, ok := d.GetOk(\"identity_validation_expression\"); ok {\n\t\tinput.IdentityValidationExpression = aws.String(v.(string))\n\t}\n\tif v, ok := d.GetOk(\"provider_arns\"); ok {\n\t\tinput.ProviderARNs = expandStringSet(v.(*schema.Set))\n\t}\n\n\tlog.Printf(\"[INFO] Creating API Gateway Authorizer: %s\", input)\n\tout, err := conn.CreateAuthorizer(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating API Gateway Authorizer: %w\", err)\n\t}\n\n\td.SetId(aws.StringValue(out.Id))\n\n\treturn resourceAwsApiGatewayAuthorizerRead(d, meta)\n}\n\nfunc resourceAwsApiGatewayAuthorizerRead(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).apigatewayconn\n\n\tlog.Printf(\"[INFO] Reading API Gateway Authorizer %s\", d.Id())\n\tinput := apigateway.GetAuthorizerInput{\n\t\tAuthorizerId: aws.String(d.Id()),\n\t\tRestApiId: aws.String(d.Get(\"rest_api_id\").(string)),\n\t}\n\n\tauthorizer, err := conn.GetAuthorizer(&input)\n\tif err != nil {\n\t\tif isAWSErr(err, apigateway.ErrCodeNotFoundException, \"\") {\n\t\t\tlog.Printf(\"[WARN] No API Gateway Authorizer found: %s\", input)\n\t\t\td.SetId(\"\")\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tlog.Printf(\"[DEBUG] Received API Gateway Authorizer: %s\", authorizer)\n\n\td.Set(\"authorizer_credentials\", authorizer.AuthorizerCredentials)\n\n\tif authorizer.AuthorizerResultTtlInSeconds != nil {\n\t\td.Set(\"authorizer_result_ttl_in_seconds\", authorizer.AuthorizerResultTtlInSeconds)\n\t} else {\n\t\td.Set(\"authorizer_result_ttl_in_seconds\", defaultAuthorizerTTL)\n\t}\n\n\td.Set(\"authorizer_uri\", authorizer.AuthorizerUri)\n\td.Set(\"identity_source\", authorizer.IdentitySource)\n\td.Set(\"identity_validation_expression\", authorizer.IdentityValidationExpression)\n\td.Set(\"name\", authorizer.Name)\n\td.Set(\"type\", authorizer.Type)\n\td.Set(\"provider_arns\", flattenStringSet(authorizer.ProviderARNs))\n\n\treturn nil\n}\n\nfunc resourceAwsApiGatewayAuthorizerUpdate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).apigatewayconn\n\n\tinput := apigateway.UpdateAuthorizerInput{\n\t\tAuthorizerId: aws.String(d.Id()),\n\t\tRestApiId: aws.String(d.Get(\"rest_api_id\").(string)),\n\t}\n\n\toperations := make([]*apigateway.PatchOperation, 0)\n\n\tif d.HasChange(\"authorizer_uri\") {\n\t\toperations = append(operations, &apigateway.PatchOperation{\n\t\t\tOp: aws.String(apigateway.OpReplace),\n\t\t\tPath: aws.String(\"\/authorizerUri\"),\n\t\t\tValue: aws.String(d.Get(\"authorizer_uri\").(string)),\n\t\t})\n\t}\n\tif d.HasChange(\"identity_source\") {\n\t\toperations = append(operations, &apigateway.PatchOperation{\n\t\t\tOp: aws.String(apigateway.OpReplace),\n\t\t\tPath: aws.String(\"\/identitySource\"),\n\t\t\tValue: aws.String(d.Get(\"identity_source\").(string)),\n\t\t})\n\t}\n\tif d.HasChange(\"name\") {\n\t\toperations = append(operations, &apigateway.PatchOperation{\n\t\t\tOp: aws.String(apigateway.OpReplace),\n\t\t\tPath: aws.String(\"\/name\"),\n\t\t\tValue: aws.String(d.Get(\"name\").(string)),\n\t\t})\n\t}\n\tif d.HasChange(\"type\") {\n\t\toperations = append(operations, &apigateway.PatchOperation{\n\t\t\tOp: aws.String(apigateway.OpReplace),\n\t\t\tPath: aws.String(\"\/type\"),\n\t\t\tValue: aws.String(d.Get(\"type\").(string)),\n\t\t})\n\t}\n\tif d.HasChange(\"authorizer_credentials\") {\n\t\toperations = append(operations, &apigateway.PatchOperation{\n\t\t\tOp: aws.String(apigateway.OpReplace),\n\t\t\tPath: aws.String(\"\/authorizerCredentials\"),\n\t\t\tValue: aws.String(d.Get(\"authorizer_credentials\").(string)),\n\t\t})\n\t}\n\tif d.HasChange(\"authorizer_result_ttl_in_seconds\") {\n\t\toperations = append(operations, &apigateway.PatchOperation{\n\t\t\tOp: aws.String(apigateway.OpReplace),\n\t\t\tPath: aws.String(\"\/authorizerResultTtlInSeconds\"),\n\t\t\tValue: aws.String(fmt.Sprintf(\"%d\", d.Get(\"authorizer_result_ttl_in_seconds\").(int))),\n\t\t})\n\t}\n\tif d.HasChange(\"identity_validation_expression\") {\n\t\toperations = append(operations, &apigateway.PatchOperation{\n\t\t\tOp: aws.String(apigateway.OpReplace),\n\t\t\tPath: aws.String(\"\/identityValidationExpression\"),\n\t\t\tValue: aws.String(d.Get(\"identity_validation_expression\").(string)),\n\t\t})\n\t}\n\tif d.HasChange(\"provider_arns\") {\n\t\told, new := d.GetChange(\"provider_arns\")\n\t\tos := old.(*schema.Set)\n\t\tns := new.(*schema.Set)\n\t\t\/\/ providerARNs can't be empty, so add first and then remove\n\t\tadditionList := ns.Difference(os)\n\t\tfor _, v := range additionList.List() {\n\t\t\toperations = append(operations, &apigateway.PatchOperation{\n\t\t\t\tOp: aws.String(apigateway.OpAdd),\n\t\t\t\tPath: aws.String(\"\/providerARNs\"),\n\t\t\t\tValue: aws.String(v.(string)),\n\t\t\t})\n\t\t}\n\t\tremovalList := os.Difference(ns)\n\t\tfor _, v := range removalList.List() {\n\t\t\toperations = append(operations, &apigateway.PatchOperation{\n\t\t\t\tOp: aws.String(apigateway.OpRemove),\n\t\t\t\tPath: aws.String(\"\/providerARNs\"),\n\t\t\t\tValue: aws.String(v.(string)),\n\t\t\t})\n\t\t}\n\t}\n\n\tinput.PatchOperations = operations\n\n\tlog.Printf(\"[INFO] Updating API Gateway Authorizer: %s\", input)\n\t_, err := conn.UpdateAuthorizer(&input)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"updating API Gateway Authorizer failed: %w\", err)\n\t}\n\n\treturn resourceAwsApiGatewayAuthorizerRead(d, meta)\n}\n\nfunc resourceAwsApiGatewayAuthorizerDelete(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).apigatewayconn\n\tinput := apigateway.DeleteAuthorizerInput{\n\t\tAuthorizerId: aws.String(d.Id()),\n\t\tRestApiId: aws.String(d.Get(\"rest_api_id\").(string)),\n\t}\n\tlog.Printf(\"[INFO] Deleting API Gateway Authorizer: %s\", input)\n\t_, err := conn.DeleteAuthorizer(&input)\n\tif err != nil {\n\t\t\/\/ XXX: Figure out a way to delete the method that depends on the authorizer first\n\t\t\/\/ otherwise the authorizer will be dangling until the API is deleted\n\t\tif !strings.Contains(err.Error(), apigateway.ErrCodeConflictException) {\n\t\t\treturn fmt.Errorf(\"deleting API Gateway Authorizer failed: %w\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc resourceAwsApiGatewayAuthorizerCustomizeDiff(diff *schema.ResourceDiff, v interface{}) error {\n\t\/\/ switch type between COGNITO_USER_POOLS and TOKEN\/REQUEST will create new resource.\n\tif diff.HasChange(\"type\") {\n\t\to, n := diff.GetChange(\"type\")\n\t\tif o.(string) == apigateway.AuthorizerTypeCognitoUserPools || n.(string) == apigateway.AuthorizerTypeCognitoUserPools {\n\t\t\tif err := diff.ForceNew(\"type\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc validateAuthorizerType(d *schema.ResourceData) error {\n\tauthType := d.Get(\"type\").(string)\n\t\/\/ authorizer_uri is required for authorizer TOKEN\/REQUEST\n\tif authType == apigateway.AuthorizerTypeRequest || authType == apigateway.AuthorizerTypeToken {\n\t\tif v, ok := d.GetOk(\"authorizer_uri\"); !ok || v.(string) == \"\" {\n\t\t\treturn fmt.Errorf(\"authorizer_uri must be set non-empty when authorizer type is %s\", authType)\n\t\t}\n\t}\n\t\/\/ provider_arns is required for authorizer COGNITO_USER_POOLS.\n\tif authType == apigateway.AuthorizerTypeCognitoUserPools {\n\t\tif v, ok := d.GetOk(\"provider_arns\"); !ok || v.(*schema.Set).Len() == 0 {\n\t\t\treturn fmt.Errorf(\"provider_arns must be set non-empty when authorizer type is %s\", authType)\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2020 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage download\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\n\t\"github.com\/blang\/semver\/v4\"\n\t\"github.com\/pkg\/errors\"\n\t\"k8s.io\/klog\/v2\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/out\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/style\"\n)\n\nfunc driverWithChecksumURL(name string, v semver.Version) string {\n\tbase := fmt.Sprintf(\"https:\/\/github.com\/kubernetes\/minikube\/releases\/download\/v%s\/%s\", v, name)\n\treturn fmt.Sprintf(\"%s?checksum=file:%s.sha256\", base, base)\n}\nfunc driverWithArchAndChecksumURL(name string, v semver.Version) string {\n\tbase := fmt.Sprintf(\"https:\/\/github.com\/kubernetes\/minikube\/releases\/download\/v%s-%s\/%s\", v, runtime.GOARCH, name)\n\treturn fmt.Sprintf(\"%s?checksum=file:%s.sha256\", base, base)\n}\n\n\/\/ Driver downloads an arbitrary driver\nfunc Driver(name string, destination string, v semver.Version) error {\n\tout.Step(style.FileDownload, \"Downloading driver {{.driver}}:\", out.V{\"driver\": name})\n\n\tarchURL := driverWithArchAndChecksumURL(name, v)\n\tif err := download(archURL, destination); err != nil {\n\t\tklog.Infof(\"failed to download arch specific driver: %v. trying to get the common version\", err)\n\t\tif err := download(driverWithChecksumURL(name, v), destination); err != nil {\n\t\t\treturn errors.Wrap(err, \"download\")\n\t\t}\n\t}\n\n\t\/\/ Give downloaded drivers a baseline decent file permission\n\treturn os.Chmod(destination, 0o755)\n}\n<commit_msg>Fix bug in github download URL.<commit_after>\/*\nCopyright 2020 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage download\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\n\t\"github.com\/blang\/semver\/v4\"\n\t\"github.com\/pkg\/errors\"\n\t\"k8s.io\/klog\/v2\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/out\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/style\"\n)\n\nfunc driverWithChecksumURL(name string, v semver.Version) string {\n\tbase := fmt.Sprintf(\"https:\/\/github.com\/kubernetes\/minikube\/releases\/download\/v%s\/%s\", v, name)\n\treturn fmt.Sprintf(\"%s?checksum=file:%s.sha256\", base, base)\n}\nfunc driverWithArchAndChecksumURL(name string, v semver.Version) string {\n\tbase := fmt.Sprintf(\"https:\/\/github.com\/kubernetes\/minikube\/releases\/download\/v%s\/%s-%s\", v, name, runtime.GOARCH)\n\treturn fmt.Sprintf(\"%s?checksum=file:%s.sha256\", base, base)\n}\n\n\/\/ Driver downloads an arbitrary driver\nfunc Driver(name string, destination string, v semver.Version) error {\n\tout.Step(style.FileDownload, \"Downloading driver {{.driver}}:\", out.V{\"driver\": name})\n\n\tarchURL := driverWithArchAndChecksumURL(name, v)\n\tif err := download(archURL, destination); err != nil {\n\t\tklog.Infof(\"failed to download arch specific driver: %v. trying to get the common version\", err)\n\t\tif err := download(driverWithChecksumURL(name, v), destination); err != nil {\n\t\t\treturn errors.Wrap(err, \"download\")\n\t\t}\n\t}\n\n\t\/\/ Give downloaded drivers a baseline decent file permission\n\treturn os.Chmod(destination, 0o755)\n}\n<|endoftext|>"} {"text":"<commit_before>package jobutil\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n)\n\nfunc http_reader(address string) io.ReadCloser {\n\tresp, err := http.Get(address)\n\tCheck(err)\n\tif resp.StatusCode != http.StatusOK {\n\t\tlog.Fatal(\"bad response: \", resp.Status)\n\t}\n\treturn resp.Body\n}\n\nfunc absolute_disco_path(address string, disco_data string) string {\n\treturn path.Join(disco_data, address[len(\"disco:\/\/\"):])\n}\n\nfunc absolute_dir_path(address string, disco_data string) string {\n\treturn path.Join(disco_data, address[len(\"dir:\/\/\"):])\n}\n\ntype DiscoReader struct {\n\tfile *os.File\n}\n\nfunc (dr *DiscoReader) Read(p []byte) (n int, err error) {\n\treturn dr.file.Read(p)\n}\n\nfunc (dr *DiscoReader) Close() error {\n\treturn dr.file.Close()\n}\n\nfunc disco_reader(address string, dataDir string) io.ReadCloser {\n\tdr := new(DiscoReader)\n\tpath := absolute_disco_path(address, dataDir)\n\tfile, err := os.Open(path)\n\tCheck(err)\n\tdr.file = file\n\treturn dr\n}\n\ntype DirReader struct {\n\tdirfile *os.File\n\tscanner *bufio.Scanner\n\tfile *os.File\n\tdisco_data string\n}\n\nfunc (dr *DirReader) read_data(p []byte) (int, error) {\n\tn, err := dr.file.Read(p)\n\tif n == 0 && err == io.EOF {\n\t\terr = dr.file.Close()\n\t\tCheck(err)\n\t\tdr.file = nil\n\t\treturn dr.Read(p)\n\t}\n\treturn n, err\n}\n\nfunc (dr *DirReader) Read(p []byte) (n int, err error) {\n\tif dr.file != nil {\n\t\treturn dr.read_data(p)\n\t}\n\t\/\/ first read\n\tvar line string\n\tif dr.scanner.Scan() {\n\t\tline = dr.scanner.Text()\n\t}\n\tif err := dr.scanner.Err(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar address string\n\tvar label, size int\n\tfmt.Sscanf(line, \"%d %s %d\", &label, &address, &size)\n\tpath := absolute_disco_path(address, dr.disco_data)\n\tdr.file, err = os.Open(path)\n\tCheck(err)\n\treturn dr.read_data(p)\n}\n\nfunc (dr *DirReader) Close() error {\n\tif dr.file != nil {\n\t\tdr.file.Close()\n\t}\n\treturn dr.dirfile.Close()\n}\n\nfunc dir_reader(address string, dataDir string) io.ReadCloser {\n\tdr := new(DirReader)\n\tpath := absolute_dir_path(address, dataDir)\n\tfile, err := os.Open(path)\n\tCheck(err)\n\tdr.dirfile = file\n\tdr.scanner = bufio.NewScanner(dr.dirfile)\n\tdr.file = nil\n\tdr.disco_data = dataDir\n\treturn dr\n}\n\nfunc scheme_split(url string) (scheme, rest string) {\n\tif index := strings.Index(url, \":\/\/\"); index == -1 {\n\t\treturn \"\", url\n\t} else {\n\t\treturn url[:index], url[index+len(\":\/\/\"):]\n\t}\n}\n\nfunc loc_str(url string) (scheme, locstr, path string) {\n\tscheme, rest := scheme_split(url)\n\n\tif index := strings.Index(rest, \"\/\"); index == -1 {\n\t\tlocstr = rest\n\t\tpath = \"\"\n\t} else {\n\t\tlocstr, path = rest[:index], rest[index+len(\"\/\"):]\n\t}\n\treturn scheme, locstr, path\n}\n\nfunc HostAndPort(url string) (host, port string) {\n\t_, locstr, _ := loc_str(url)\n\tif index := strings.Index(locstr, \":\"); index == -1 {\n\t\thost = locstr\n\t\tport = \"\"\n\t} else {\n\t\thost, port = locstr[:index], locstr[index+len(\":\"):]\n\t}\n\treturn\n}\n\nfunc convert_uri(uri string) string {\n\tscheme, locstr, path := loc_str(uri)\n\t\/\/ TODO make the conversion smarter! Do not convert if this is the localhost\n\t\/\/ or the hostname matches our hostname.\n\t\/\/ TODO add the dir scheme\n\tif scheme == \"disco\" {\n\t\treturn \"http:\/\/\" + Setting(\"DISCO_MASTER\") + \":\" + Setting(\"DISCO_PORT\") +\n\t\t\t\"\/disco\/\" + locstr + \"\/\" + path\n\t}\n\treturn uri\n}\n\nfunc AddressReader(address string, dataDir string) io.ReadCloser {\n\taddress = convert_uri(address)\n\tscheme, _ := scheme_split(address)\n\n\tswitch scheme {\n\tcase \"http\":\n\t\tfallthrough\n\tcase \"https\":\n\t\treturn http_reader(address)\n\tcase \"disco\":\n\t\treturn disco_reader(address, dataDir)\n\tcase \"dir\":\n\t\treturn dir_reader(address, dataDir)\n\tdefault:\n\t\tlog.Fatal(\"Cannot read the input: \", scheme, \" : \", address)\n\t}\n\treturn nil\n}\n<commit_msg>Get the results from the replica instead of the master.<commit_after>package jobutil\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n)\n\nfunc http_reader(address string) io.ReadCloser {\n\tresp, err := http.Get(address)\n\tCheck(err)\n\tif resp.StatusCode != http.StatusOK {\n\t\tlog.Fatal(\"bad response: \", resp.Status)\n\t}\n\treturn resp.Body\n}\n\nfunc absolute_disco_path(address string, disco_data string) string {\n\treturn path.Join(disco_data, address[len(\"disco:\/\/\"):])\n}\n\nfunc absolute_dir_path(address string, disco_data string) string {\n\treturn path.Join(disco_data, address[len(\"dir:\/\/\"):])\n}\n\ntype DiscoReader struct {\n\tfile *os.File\n}\n\nfunc (dr *DiscoReader) Read(p []byte) (n int, err error) {\n\treturn dr.file.Read(p)\n}\n\nfunc (dr *DiscoReader) Close() error {\n\treturn dr.file.Close()\n}\n\nfunc disco_reader(address string, dataDir string) io.ReadCloser {\n\tdr := new(DiscoReader)\n\tpath := absolute_disco_path(address, dataDir)\n\tfile, err := os.Open(path)\n\tCheck(err)\n\tdr.file = file\n\treturn dr\n}\n\ntype DirReader struct {\n\tdirfile *os.File\n\tscanner *bufio.Scanner\n\tfile *os.File\n\tdisco_data string\n}\n\nfunc (dr *DirReader) read_data(p []byte) (int, error) {\n\tn, err := dr.file.Read(p)\n\tif n == 0 && err == io.EOF {\n\t\terr = dr.file.Close()\n\t\tCheck(err)\n\t\tdr.file = nil\n\t\treturn dr.Read(p)\n\t}\n\treturn n, err\n}\n\nfunc (dr *DirReader) Read(p []byte) (n int, err error) {\n\tif dr.file != nil {\n\t\treturn dr.read_data(p)\n\t}\n\t\/\/ first read\n\tvar line string\n\tif dr.scanner.Scan() {\n\t\tline = dr.scanner.Text()\n\t}\n\tif err := dr.scanner.Err(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar address string\n\tvar label, size int\n\tfmt.Sscanf(line, \"%d %s %d\", &label, &address, &size)\n\tpath := absolute_disco_path(address, dr.disco_data)\n\tdr.file, err = os.Open(path)\n\tCheck(err)\n\treturn dr.read_data(p)\n}\n\nfunc (dr *DirReader) Close() error {\n\tif dr.file != nil {\n\t\tdr.file.Close()\n\t}\n\treturn dr.dirfile.Close()\n}\n\nfunc dir_reader(address string, dataDir string) io.ReadCloser {\n\tdr := new(DirReader)\n\tpath := absolute_dir_path(address, dataDir)\n\tfile, err := os.Open(path)\n\tCheck(err)\n\tdr.dirfile = file\n\tdr.scanner = bufio.NewScanner(dr.dirfile)\n\tdr.file = nil\n\tdr.disco_data = dataDir\n\treturn dr\n}\n\nfunc scheme_split(url string) (scheme, rest string) {\n\tif index := strings.Index(url, \":\/\/\"); index == -1 {\n\t\treturn \"\", url\n\t} else {\n\t\treturn url[:index], url[index+len(\":\/\/\"):]\n\t}\n}\n\nfunc loc_str(url string) (scheme, locstr, path string) {\n\tscheme, rest := scheme_split(url)\n\n\tif index := strings.Index(rest, \"\/\"); index == -1 {\n\t\tlocstr = rest\n\t\tpath = \"\"\n\t} else {\n\t\tlocstr, path = rest[:index], rest[index+len(\"\/\"):]\n\t}\n\treturn scheme, locstr, path\n}\n\nfunc HostAndPort(url string) (host, port string) {\n\t_, locstr, _ := loc_str(url)\n\tif index := strings.Index(locstr, \":\"); index == -1 {\n\t\thost = locstr\n\t\tport = \"\"\n\t} else {\n\t\thost, port = locstr[:index], locstr[index+len(\":\"):]\n\t}\n\treturn\n}\n\nfunc convert_uri(uri string) string {\n\tscheme, locstr, path := loc_str(uri)\n\t\/\/ TODO make the conversion smarter! Do not convert if this is the localhost\n\t\/\/ or the hostname matches our hostname.\n\t\/\/ TODO add the dir scheme\n\tif scheme == \"disco\" {\n\t\treturn \"http:\/\/\" + locstr + \":\" + Setting(\"DISCO_PORT\") +\n\t\t\t\"\/disco\/\" + locstr + \"\/\" + path\n\t}\n\treturn uri\n}\n\nfunc AddressReader(address string, dataDir string) io.ReadCloser {\n\taddress = convert_uri(address)\n\tscheme, _ := scheme_split(address)\n\n\tswitch scheme {\n\tcase \"http\":\n\t\tfallthrough\n\tcase \"https\":\n\t\treturn http_reader(address)\n\tcase \"disco\":\n\t\treturn disco_reader(address, dataDir)\n\tcase \"dir\":\n\t\treturn dir_reader(address, dataDir)\n\tdefault:\n\t\tlog.Fatal(\"Cannot read the input: \", scheme, \" : \", address)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package bmatrix\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"html\"\n\t\"mime\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/42wim\/matterbridge\/bridge\"\n\t\"github.com\/42wim\/matterbridge\/bridge\/config\"\n\t\"github.com\/42wim\/matterbridge\/bridge\/helper\"\n\tmatrix \"github.com\/matterbridge\/gomatrix\"\n)\n\ntype Bmatrix struct {\n\tmc *matrix.Client\n\tUserID string\n\tRoomMap map[string]string\n\tsync.RWMutex\n\t*bridge.Config\n}\n\nfunc New(cfg *bridge.Config) bridge.Bridger {\n\tb := &Bmatrix{Config: cfg}\n\tb.RoomMap = make(map[string]string)\n\treturn b\n}\n\nfunc (b *Bmatrix) Connect() error {\n\tvar err error\n\tb.Log.Infof(\"Connecting %s\", b.GetString(\"Server\"))\n\tb.mc, err = matrix.NewClient(b.GetString(\"Server\"), \"\", \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := b.mc.Login(&matrix.ReqLogin{\n\t\tType: \"m.login.password\",\n\t\tUser: b.GetString(\"Login\"),\n\t\tPassword: b.GetString(\"Password\"),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tb.mc.SetCredentials(resp.UserID, resp.AccessToken)\n\tb.UserID = resp.UserID\n\tb.Log.Info(\"Connection succeeded\")\n\tgo b.handlematrix()\n\treturn nil\n}\n\nfunc (b *Bmatrix) Disconnect() error {\n\treturn nil\n}\n\nfunc (b *Bmatrix) JoinChannel(channel config.ChannelInfo) error {\n\tresp, err := b.mc.JoinRoom(channel.Name, \"\", nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tb.Lock()\n\tb.RoomMap[resp.RoomID] = channel.Name\n\tb.Unlock()\n\treturn err\n}\n\nfunc (b *Bmatrix) Send(msg config.Message) (string, error) {\n\tb.Log.Debugf(\"=> Receiving %#v\", msg)\n\n\tchannel := b.getRoomID(msg.Channel)\n\tb.Log.Debugf(\"Channel %s maps to channel id %s\", msg.Channel, channel)\n\n\t\/\/ Make a action \/me of the message\n\tif msg.Event == config.EventUserAction {\n\t\tm := matrix.TextMessage{\n\t\t\tMsgType: \"m.emote\",\n\t\t\tBody: msg.Username + msg.Text,\n\t\t}\n\t\tresp, err := b.mc.SendMessageEvent(channel, \"m.room.message\", m)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn resp.EventID, err\n\t}\n\n\t\/\/ Delete message\n\tif msg.Event == config.EventMsgDelete {\n\t\tif msg.ID == \"\" {\n\t\t\treturn \"\", nil\n\t\t}\n\t\tresp, err := b.mc.RedactEvent(channel, msg.ID, &matrix.ReqRedact{})\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn resp.EventID, err\n\t}\n\n\t\/\/ Upload a file if it exists\n\tif msg.Extra != nil {\n\t\tfor _, rmsg := range helper.HandleExtra(&msg, b.General) {\n\t\t\tif _, err := b.mc.SendText(channel, rmsg.Username+rmsg.Text); err != nil {\n\t\t\t\tb.Log.Errorf(\"sendText failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\t\/\/ check if we have files to upload (from slack, telegram or mattermost)\n\t\tif len(msg.Extra[\"file\"]) > 0 {\n\t\t\treturn b.handleUploadFiles(&msg, channel)\n\t\t}\n\t}\n\n\t\/\/ Edit message if we have an ID\n\t\/\/ matrix has no editing support\n\n\t\/\/ Post normal message with HTML support (eg riot.im)\n\tresp, err := b.mc.SendHTML(channel, msg.Text, html.EscapeString(msg.Username)+helper.ParseMarkdown(msg.Text))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn resp.EventID, err\n}\n\nfunc (b *Bmatrix) getRoomID(channel string) string {\n\tb.RLock()\n\tdefer b.RUnlock()\n\tfor ID, name := range b.RoomMap {\n\t\tif name == channel {\n\t\t\treturn ID\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc (b *Bmatrix) handlematrix() {\n\tsyncer := b.mc.Syncer.(*matrix.DefaultSyncer)\n\tsyncer.OnEventType(\"m.room.redaction\", b.handleEvent)\n\tsyncer.OnEventType(\"m.room.message\", b.handleEvent)\n\tgo func() {\n\t\tfor {\n\t\t\tif err := b.mc.Sync(); err != nil {\n\t\t\t\tb.Log.Println(\"Sync() returned \", err)\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (b *Bmatrix) handleEvent(ev *matrix.Event) {\n\tb.Log.Debugf(\"== Receiving event: %#v\", ev)\n\tif ev.Sender != b.UserID {\n\t\tb.RLock()\n\t\tchannel, ok := b.RoomMap[ev.RoomID]\n\t\tb.RUnlock()\n\t\tif !ok {\n\t\t\tb.Log.Debugf(\"Unknown room %s\", ev.RoomID)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ TODO download avatar\n\n\t\t\/\/ Create our message\n\t\trmsg := config.Message{Username: ev.Sender[1:], Channel: channel, Account: b.Account, UserID: ev.Sender, ID: ev.ID}\n\n\t\t\/\/ Text must be a string\n\t\tif rmsg.Text, ok = ev.Content[\"body\"].(string); !ok {\n\t\t\tb.Log.Errorf(\"Content[body] is not a string: %T\\n%#v\",\n\t\t\t\tev.Content[\"body\"], ev.Content)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Remove homeserver suffix if configured\n\t\tif b.GetBool(\"NoHomeServerSuffix\") {\n\t\t\tre := regexp.MustCompile(\"(.*?):.*\")\n\t\t\trmsg.Username = re.ReplaceAllString(rmsg.Username, `$1`)\n\t\t}\n\n\t\t\/\/ Delete event\n\t\tif ev.Type == \"m.room.redaction\" {\n\t\t\trmsg.Event = config.EventMsgDelete\n\t\t\trmsg.ID = ev.Redacts\n\t\t\trmsg.Text = config.EventMsgDelete\n\t\t\tb.Remote <- rmsg\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Do we have a \/me action\n\t\tif ev.Content[\"msgtype\"].(string) == \"m.emote\" {\n\t\t\trmsg.Event = config.EventUserAction\n\t\t}\n\n\t\t\/\/ Do we have attachments\n\t\tif b.containsAttachment(ev.Content) {\n\t\t\terr := b.handleDownloadFile(&rmsg, ev.Content)\n\t\t\tif err != nil {\n\t\t\t\tb.Log.Errorf(\"download failed: %#v\", err)\n\t\t\t}\n\t\t}\n\n\t\tb.Log.Debugf(\"<= Sending message from %s on %s to gateway\", ev.Sender, b.Account)\n\t\tb.Remote <- rmsg\n\t}\n}\n\n\/\/ handleDownloadFile handles file download\nfunc (b *Bmatrix) handleDownloadFile(rmsg *config.Message, content map[string]interface{}) error {\n\tvar (\n\t\tok bool\n\t\turl, name, msgtype, mtype string\n\t\tinfo map[string]interface{}\n\t\tsize float64\n\t)\n\n\trmsg.Extra = make(map[string][]interface{})\n\tif url, ok = content[\"url\"].(string); !ok {\n\t\treturn fmt.Errorf(\"url isn't a %T\", url)\n\t}\n\turl = strings.Replace(url, \"mxc:\/\/\", b.GetString(\"Server\")+\"\/_matrix\/media\/v1\/download\/\", -1)\n\n\tif info, ok = content[\"info\"].(map[string]interface{}); !ok {\n\t\treturn fmt.Errorf(\"info isn't a %T\", info)\n\t}\n\tif size, ok = info[\"size\"].(float64); !ok {\n\t\treturn fmt.Errorf(\"size isn't a %T\", size)\n\t}\n\tif name, ok = content[\"body\"].(string); !ok {\n\t\treturn fmt.Errorf(\"name isn't a %T\", name)\n\t}\n\tif msgtype, ok = content[\"msgtype\"].(string); !ok {\n\t\treturn fmt.Errorf(\"msgtype isn't a %T\", msgtype)\n\t}\n\tif mtype, ok = info[\"mimetype\"].(string); !ok {\n\t\treturn fmt.Errorf(\"mtype isn't a %T\", mtype)\n\t}\n\n\t\/\/ check if we have an image uploaded without extension\n\tif !strings.Contains(name, \".\") {\n\t\tif msgtype == \"m.image\" {\n\t\t\tmext, _ := mime.ExtensionsByType(mtype)\n\t\t\tif len(mext) > 0 {\n\t\t\t\tname += mext[0]\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ just a default .png extension if we don't have mime info\n\t\t\tname += \".png\"\n\t\t}\n\t}\n\n\t\/\/ check if the size is ok\n\terr := helper.HandleDownloadSize(b.Log, rmsg, name, int64(size), b.General)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ actually download the file\n\tdata, err := helper.DownloadFile(url)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"download %s failed %#v\", url, err)\n\t}\n\t\/\/ add the downloaded data to the message\n\thelper.HandleDownloadData(b.Log, rmsg, name, \"\", url, data, b.General)\n\treturn nil\n}\n\n\/\/ handleUploadFiles handles native upload of files.\nfunc (b *Bmatrix) handleUploadFiles(msg *config.Message, channel string) (string, error) {\n\tfor _, f := range msg.Extra[\"file\"] {\n\t\tif fi, ok := f.(config.FileInfo); ok {\n\t\t\tb.handleUploadFile(msg, channel, &fi)\n\t\t}\n\t}\n\treturn \"\", nil\n}\n\n\/\/ handleUploadFile handles native upload of a file.\nfunc (b *Bmatrix) handleUploadFile(msg *config.Message, channel string, fi *config.FileInfo) {\n\tcontent := bytes.NewReader(*fi.Data)\n\tsp := strings.Split(fi.Name, \".\")\n\tmtype := mime.TypeByExtension(\".\" + sp[len(sp)-1])\n\tif !strings.Contains(mtype, \"image\") && !strings.Contains(mtype, \"video\") {\n\t\treturn\n\t}\n\tif fi.Comment != \"\" {\n\t\t_, err := b.mc.SendText(channel, msg.Username+fi.Comment)\n\t\tif err != nil {\n\t\t\tb.Log.Errorf(\"file comment failed: %#v\", err)\n\t\t}\n\t}\n\tb.Log.Debugf(\"uploading file: %s %s\", fi.Name, mtype)\n\tres, err := b.mc.UploadToContentRepo(content, mtype, int64(len(*fi.Data)))\n\tif err != nil {\n\t\tb.Log.Errorf(\"file upload failed: %#v\", err)\n\t\treturn\n\t}\n\n\tswitch {\n\tcase strings.Contains(mtype, \"video\"):\n\t\tb.Log.Debugf(\"sendVideo %s\", res.ContentURI)\n\t\t_, err = b.mc.SendVideo(channel, fi.Name, res.ContentURI)\n\t\tif err != nil {\n\t\t\tb.Log.Errorf(\"sendVideo failed: %#v\", err)\n\t\t}\n\tcase strings.Contains(mtype, \"image\"):\n\t\tb.Log.Debugf(\"sendImage %s\", res.ContentURI)\n\t\t_, err = b.mc.SendImage(channel, fi.Name, res.ContentURI)\n\t\tif err != nil {\n\t\t\tb.Log.Errorf(\"sendImage failed: %#v\", err)\n\t\t}\n\t}\n\tb.Log.Debugf(\"result: %#v\", res)\n}\n\n\/\/ skipMessages returns true if this message should not be handled\nfunc (b *Bmatrix) containsAttachment(content map[string]interface{}) bool {\n\t\/\/ Skip empty messages\n\tif content[\"msgtype\"] == nil {\n\t\treturn false\n\t}\n\n\t\/\/ Only allow image,video or file msgtypes\n\tif !(content[\"msgtype\"].(string) == \"m.image\" ||\n\t\tcontent[\"msgtype\"].(string) == \"m.video\" ||\n\t\tcontent[\"msgtype\"].(string) == \"m.file\") {\n\t\treturn false\n\t}\n\treturn true\n}\n<commit_msg>Fix displaying usernames for plain text clients. (matrix) (#685)<commit_after>package bmatrix\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"html\"\n\t\"mime\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/42wim\/matterbridge\/bridge\"\n\t\"github.com\/42wim\/matterbridge\/bridge\/config\"\n\t\"github.com\/42wim\/matterbridge\/bridge\/helper\"\n\tmatrix \"github.com\/matterbridge\/gomatrix\"\n)\n\ntype Bmatrix struct {\n\tmc *matrix.Client\n\tUserID string\n\tRoomMap map[string]string\n\tsync.RWMutex\n\t*bridge.Config\n}\n\nfunc New(cfg *bridge.Config) bridge.Bridger {\n\tb := &Bmatrix{Config: cfg}\n\tb.RoomMap = make(map[string]string)\n\treturn b\n}\n\nfunc (b *Bmatrix) Connect() error {\n\tvar err error\n\tb.Log.Infof(\"Connecting %s\", b.GetString(\"Server\"))\n\tb.mc, err = matrix.NewClient(b.GetString(\"Server\"), \"\", \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp, err := b.mc.Login(&matrix.ReqLogin{\n\t\tType: \"m.login.password\",\n\t\tUser: b.GetString(\"Login\"),\n\t\tPassword: b.GetString(\"Password\"),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tb.mc.SetCredentials(resp.UserID, resp.AccessToken)\n\tb.UserID = resp.UserID\n\tb.Log.Info(\"Connection succeeded\")\n\tgo b.handlematrix()\n\treturn nil\n}\n\nfunc (b *Bmatrix) Disconnect() error {\n\treturn nil\n}\n\nfunc (b *Bmatrix) JoinChannel(channel config.ChannelInfo) error {\n\tresp, err := b.mc.JoinRoom(channel.Name, \"\", nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tb.Lock()\n\tb.RoomMap[resp.RoomID] = channel.Name\n\tb.Unlock()\n\treturn err\n}\n\nfunc (b *Bmatrix) Send(msg config.Message) (string, error) {\n\tb.Log.Debugf(\"=> Receiving %#v\", msg)\n\n\tchannel := b.getRoomID(msg.Channel)\n\tb.Log.Debugf(\"Channel %s maps to channel id %s\", msg.Channel, channel)\n\n\t\/\/ Make a action \/me of the message\n\tif msg.Event == config.EventUserAction {\n\t\tm := matrix.TextMessage{\n\t\t\tMsgType: \"m.emote\",\n\t\t\tBody: msg.Username + msg.Text,\n\t\t}\n\t\tresp, err := b.mc.SendMessageEvent(channel, \"m.room.message\", m)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn resp.EventID, err\n\t}\n\n\t\/\/ Delete message\n\tif msg.Event == config.EventMsgDelete {\n\t\tif msg.ID == \"\" {\n\t\t\treturn \"\", nil\n\t\t}\n\t\tresp, err := b.mc.RedactEvent(channel, msg.ID, &matrix.ReqRedact{})\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn resp.EventID, err\n\t}\n\n\t\/\/ Upload a file if it exists\n\tif msg.Extra != nil {\n\t\tfor _, rmsg := range helper.HandleExtra(&msg, b.General) {\n\t\t\tif _, err := b.mc.SendText(channel, rmsg.Username+rmsg.Text); err != nil {\n\t\t\t\tb.Log.Errorf(\"sendText failed: %s\", err)\n\t\t\t}\n\t\t}\n\t\t\/\/ check if we have files to upload (from slack, telegram or mattermost)\n\t\tif len(msg.Extra[\"file\"]) > 0 {\n\t\t\treturn b.handleUploadFiles(&msg, channel)\n\t\t}\n\t}\n\n\t\/\/ Edit message if we have an ID\n\t\/\/ matrix has no editing support\n\n\t\/\/ Post normal message with HTML support (eg riot.im)\n\tresp, err := b.mc.SendHTML(channel, msg.Username+msg.Text, html.EscapeString(msg.Username)+helper.ParseMarkdown(msg.Text))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn resp.EventID, err\n}\n\nfunc (b *Bmatrix) getRoomID(channel string) string {\n\tb.RLock()\n\tdefer b.RUnlock()\n\tfor ID, name := range b.RoomMap {\n\t\tif name == channel {\n\t\t\treturn ID\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc (b *Bmatrix) handlematrix() {\n\tsyncer := b.mc.Syncer.(*matrix.DefaultSyncer)\n\tsyncer.OnEventType(\"m.room.redaction\", b.handleEvent)\n\tsyncer.OnEventType(\"m.room.message\", b.handleEvent)\n\tgo func() {\n\t\tfor {\n\t\t\tif err := b.mc.Sync(); err != nil {\n\t\t\t\tb.Log.Println(\"Sync() returned \", err)\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (b *Bmatrix) handleEvent(ev *matrix.Event) {\n\tb.Log.Debugf(\"== Receiving event: %#v\", ev)\n\tif ev.Sender != b.UserID {\n\t\tb.RLock()\n\t\tchannel, ok := b.RoomMap[ev.RoomID]\n\t\tb.RUnlock()\n\t\tif !ok {\n\t\t\tb.Log.Debugf(\"Unknown room %s\", ev.RoomID)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ TODO download avatar\n\n\t\t\/\/ Create our message\n\t\trmsg := config.Message{Username: ev.Sender[1:], Channel: channel, Account: b.Account, UserID: ev.Sender, ID: ev.ID}\n\n\t\t\/\/ Text must be a string\n\t\tif rmsg.Text, ok = ev.Content[\"body\"].(string); !ok {\n\t\t\tb.Log.Errorf(\"Content[body] is not a string: %T\\n%#v\",\n\t\t\t\tev.Content[\"body\"], ev.Content)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Remove homeserver suffix if configured\n\t\tif b.GetBool(\"NoHomeServerSuffix\") {\n\t\t\tre := regexp.MustCompile(\"(.*?):.*\")\n\t\t\trmsg.Username = re.ReplaceAllString(rmsg.Username, `$1`)\n\t\t}\n\n\t\t\/\/ Delete event\n\t\tif ev.Type == \"m.room.redaction\" {\n\t\t\trmsg.Event = config.EventMsgDelete\n\t\t\trmsg.ID = ev.Redacts\n\t\t\trmsg.Text = config.EventMsgDelete\n\t\t\tb.Remote <- rmsg\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Do we have a \/me action\n\t\tif ev.Content[\"msgtype\"].(string) == \"m.emote\" {\n\t\t\trmsg.Event = config.EventUserAction\n\t\t}\n\n\t\t\/\/ Do we have attachments\n\t\tif b.containsAttachment(ev.Content) {\n\t\t\terr := b.handleDownloadFile(&rmsg, ev.Content)\n\t\t\tif err != nil {\n\t\t\t\tb.Log.Errorf(\"download failed: %#v\", err)\n\t\t\t}\n\t\t}\n\n\t\tb.Log.Debugf(\"<= Sending message from %s on %s to gateway\", ev.Sender, b.Account)\n\t\tb.Remote <- rmsg\n\t}\n}\n\n\/\/ handleDownloadFile handles file download\nfunc (b *Bmatrix) handleDownloadFile(rmsg *config.Message, content map[string]interface{}) error {\n\tvar (\n\t\tok bool\n\t\turl, name, msgtype, mtype string\n\t\tinfo map[string]interface{}\n\t\tsize float64\n\t)\n\n\trmsg.Extra = make(map[string][]interface{})\n\tif url, ok = content[\"url\"].(string); !ok {\n\t\treturn fmt.Errorf(\"url isn't a %T\", url)\n\t}\n\turl = strings.Replace(url, \"mxc:\/\/\", b.GetString(\"Server\")+\"\/_matrix\/media\/v1\/download\/\", -1)\n\n\tif info, ok = content[\"info\"].(map[string]interface{}); !ok {\n\t\treturn fmt.Errorf(\"info isn't a %T\", info)\n\t}\n\tif size, ok = info[\"size\"].(float64); !ok {\n\t\treturn fmt.Errorf(\"size isn't a %T\", size)\n\t}\n\tif name, ok = content[\"body\"].(string); !ok {\n\t\treturn fmt.Errorf(\"name isn't a %T\", name)\n\t}\n\tif msgtype, ok = content[\"msgtype\"].(string); !ok {\n\t\treturn fmt.Errorf(\"msgtype isn't a %T\", msgtype)\n\t}\n\tif mtype, ok = info[\"mimetype\"].(string); !ok {\n\t\treturn fmt.Errorf(\"mtype isn't a %T\", mtype)\n\t}\n\n\t\/\/ check if we have an image uploaded without extension\n\tif !strings.Contains(name, \".\") {\n\t\tif msgtype == \"m.image\" {\n\t\t\tmext, _ := mime.ExtensionsByType(mtype)\n\t\t\tif len(mext) > 0 {\n\t\t\t\tname += mext[0]\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ just a default .png extension if we don't have mime info\n\t\t\tname += \".png\"\n\t\t}\n\t}\n\n\t\/\/ check if the size is ok\n\terr := helper.HandleDownloadSize(b.Log, rmsg, name, int64(size), b.General)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ actually download the file\n\tdata, err := helper.DownloadFile(url)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"download %s failed %#v\", url, err)\n\t}\n\t\/\/ add the downloaded data to the message\n\thelper.HandleDownloadData(b.Log, rmsg, name, \"\", url, data, b.General)\n\treturn nil\n}\n\n\/\/ handleUploadFiles handles native upload of files.\nfunc (b *Bmatrix) handleUploadFiles(msg *config.Message, channel string) (string, error) {\n\tfor _, f := range msg.Extra[\"file\"] {\n\t\tif fi, ok := f.(config.FileInfo); ok {\n\t\t\tb.handleUploadFile(msg, channel, &fi)\n\t\t}\n\t}\n\treturn \"\", nil\n}\n\n\/\/ handleUploadFile handles native upload of a file.\nfunc (b *Bmatrix) handleUploadFile(msg *config.Message, channel string, fi *config.FileInfo) {\n\tcontent := bytes.NewReader(*fi.Data)\n\tsp := strings.Split(fi.Name, \".\")\n\tmtype := mime.TypeByExtension(\".\" + sp[len(sp)-1])\n\tif !strings.Contains(mtype, \"image\") && !strings.Contains(mtype, \"video\") {\n\t\treturn\n\t}\n\tif fi.Comment != \"\" {\n\t\t_, err := b.mc.SendText(channel, msg.Username+fi.Comment)\n\t\tif err != nil {\n\t\t\tb.Log.Errorf(\"file comment failed: %#v\", err)\n\t\t}\n\t}\n\tb.Log.Debugf(\"uploading file: %s %s\", fi.Name, mtype)\n\tres, err := b.mc.UploadToContentRepo(content, mtype, int64(len(*fi.Data)))\n\tif err != nil {\n\t\tb.Log.Errorf(\"file upload failed: %#v\", err)\n\t\treturn\n\t}\n\n\tswitch {\n\tcase strings.Contains(mtype, \"video\"):\n\t\tb.Log.Debugf(\"sendVideo %s\", res.ContentURI)\n\t\t_, err = b.mc.SendVideo(channel, fi.Name, res.ContentURI)\n\t\tif err != nil {\n\t\t\tb.Log.Errorf(\"sendVideo failed: %#v\", err)\n\t\t}\n\tcase strings.Contains(mtype, \"image\"):\n\t\tb.Log.Debugf(\"sendImage %s\", res.ContentURI)\n\t\t_, err = b.mc.SendImage(channel, fi.Name, res.ContentURI)\n\t\tif err != nil {\n\t\t\tb.Log.Errorf(\"sendImage failed: %#v\", err)\n\t\t}\n\t}\n\tb.Log.Debugf(\"result: %#v\", res)\n}\n\n\/\/ skipMessages returns true if this message should not be handled\nfunc (b *Bmatrix) containsAttachment(content map[string]interface{}) bool {\n\t\/\/ Skip empty messages\n\tif content[\"msgtype\"] == nil {\n\t\treturn false\n\t}\n\n\t\/\/ Only allow image,video or file msgtypes\n\tif !(content[\"msgtype\"].(string) == \"m.image\" ||\n\t\tcontent[\"msgtype\"].(string) == \"m.video\" ||\n\t\tcontent[\"msgtype\"].(string) == \"m.file\") {\n\t\treturn false\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage install\n\nimport (\n\t\"testing\"\n\n\t\"k8s.io\/apimachinery\/pkg\/api\/apitesting\/roundtrip\"\n\twardlefuzzer \"k8s.io\/sample-apiserver\/pkg\/apis\/wardle\/fuzzer\"\n)\n\nfunc TestRoundTripTypes(t *testing.T) {\n\troundtrip.RoundTripTestForAPIGroup(t, Install, wardlefuzzer.Funcs)\n}\n<commit_msg>Add proto roundtrip tests in roundtrip_test.go files<commit_after>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage install\n\nimport (\n\t\"testing\"\n\n\t\"k8s.io\/apimachinery\/pkg\/api\/apitesting\/roundtrip\"\n\twardlefuzzer \"k8s.io\/sample-apiserver\/pkg\/apis\/wardle\/fuzzer\"\n)\n\nfunc TestRoundTripTypes(t *testing.T) {\n\troundtrip.RoundTripTestForAPIGroup(t, Install, wardlefuzzer.Funcs)\n\t\/\/ TODO: enable protobuf generation for the sample-apiserver\n\t\/\/ roundtrip.RoundTripProtobufTestForAPIGroup(t, Install, wardlefuzzer.Funcs)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage fs\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\t\"github.com\/jacobsa\/fuse\/fuseops\"\n\t\"github.com\/jacobsa\/fuse\/fuseutil\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n)\n\nvar (\n\tcounterFsRequests = prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName: \"gcsfuse_fs_requests\",\n\t\t\tHelp: \"Number of requests per file system API.\",\n\t\t},\n\t\t[]string{ \/\/ labels\n\t\t\t\"method\",\n\t\t},\n\t)\n\tcounterFsErrors = prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName: \"gcsfuse_fs_errors\",\n\t\t\tHelp: \"Number of errors per file system API.\",\n\t\t},\n\t\t[]string{ \/\/ labels\n\t\t\t\"method\",\n\t\t},\n\t)\n\tlatencyOpenFile = prometheus.NewHistogram(\n\t\tprometheus.HistogramOpts{\n\t\t\tName: \"gcsfuse_fs_open_file_latency\",\n\t\t\tHelp: \"The latency of OpenFile file system requests in ms.\",\n\t\t},\n\t)\n\tlatencyReadFile = prometheus.NewHistogram(\n\t\tprometheus.HistogramOpts{\n\t\t\tName: \"gcsfuse_fs_read_file_latency\",\n\t\t\tHelp: \"The latency of ReadFile file system requests in ms.\",\n\t\t},\n\t)\n)\n\n\/\/ Initialize the prometheus metrics.\nfunc init() {\n\tprometheus.MustRegister(counterFsRequests)\n\tprometheus.MustRegister(counterFsErrors)\n\tprometheus.MustRegister(latencyOpenFile)\n\tprometheus.MustRegister(latencyReadFile)\n}\n\nfunc incrementCounterFsRequests(method string) {\n\tcounterFsRequests.With(\n\t\tprometheus.Labels{\n\t\t\t\"method\": method,\n\t\t},\n\t).Inc()\n}\nfunc incrementCounterFsErrors(method string, err error) {\n\tif err != nil {\n\t\tcounterFsErrors.With(\n\t\t\tprometheus.Labels{\n\t\t\t\t\"method\": method,\n\t\t\t},\n\t\t).Inc()\n\t}\n}\n\nfunc recordLatency(metric prometheus.Histogram, start time.Time) {\n\tlatency := float64(time.Since(start).Milliseconds())\n\tmetric.Observe(latency)\n}\n\n\/\/ WithMonitoring takes a FileSystem, returns a FileSystem with monitoring\n\/\/ on the counts of requests per API.\nfunc WithMonitoring(fs fuseutil.FileSystem) fuseutil.FileSystem {\n\treturn &monitoringFileSystem{\n\t\twrapped: fs,\n\t}\n}\n\ntype monitoringFileSystem struct {\n\twrapped fuseutil.FileSystem\n}\n\nfunc (fs *monitoringFileSystem) Destroy() {\n\tincrementCounterFsRequests(\"Destroy\")\n\tfs.wrapped.Destroy()\n}\n\nfunc (fs *monitoringFileSystem) StatFS(\n\tctx context.Context,\n\top *fuseops.StatFSOp) error {\n\tincrementCounterFsRequests(\"StatFS\")\n\terr := fs.wrapped.StatFS(ctx, op)\n\tincrementCounterFsErrors(\"StatFS\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) LookUpInode(\n\tctx context.Context,\n\top *fuseops.LookUpInodeOp) error {\n\tincrementCounterFsRequests(\"LookUpInode\")\n\terr := fs.wrapped.LookUpInode(ctx, op)\n\tincrementCounterFsErrors(\"LookUpInode\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) GetInodeAttributes(\n\tctx context.Context,\n\top *fuseops.GetInodeAttributesOp) error {\n\tincrementCounterFsRequests(\"GetInodeAttributes\")\n\terr := fs.wrapped.GetInodeAttributes(ctx, op)\n\tincrementCounterFsErrors(\"GetInodeAttributes\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) SetInodeAttributes(\n\tctx context.Context,\n\top *fuseops.SetInodeAttributesOp) error {\n\tincrementCounterFsRequests(\"SetInodeAttributes\")\n\terr := fs.wrapped.SetInodeAttributes(ctx, op)\n\tincrementCounterFsErrors(\"SetInodeAttributes\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) ForgetInode(\n\tctx context.Context,\n\top *fuseops.ForgetInodeOp) error {\n\tincrementCounterFsRequests(\"ForgetInode\")\n\terr := fs.wrapped.ForgetInode(ctx, op)\n\tincrementCounterFsErrors(\"ForgetInode\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) MkDir(\n\tctx context.Context,\n\top *fuseops.MkDirOp) error {\n\tincrementCounterFsRequests(\"MkDir\")\n\terr := fs.wrapped.MkDir(ctx, op)\n\tincrementCounterFsErrors(\"MkDir\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) MkNode(\n\tctx context.Context,\n\top *fuseops.MkNodeOp) error {\n\tincrementCounterFsRequests(\"MkNode\")\n\terr := fs.wrapped.MkNode(ctx, op)\n\tincrementCounterFsErrors(\"MkNode\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) CreateFile(\n\tctx context.Context,\n\top *fuseops.CreateFileOp) error {\n\tincrementCounterFsRequests(\"CreateFile\")\n\terr := fs.wrapped.CreateFile(ctx, op)\n\tincrementCounterFsErrors(\"CreateFile\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) CreateSymlink(\n\tctx context.Context,\n\top *fuseops.CreateSymlinkOp) error {\n\tincrementCounterFsRequests(\"CreateSymlink\")\n\terr := fs.wrapped.CreateSymlink(ctx, op)\n\tincrementCounterFsErrors(\"CreateSymlink\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) Rename(\n\tctx context.Context,\n\top *fuseops.RenameOp) error {\n\tincrementCounterFsRequests(\"Rename\")\n\terr := fs.wrapped.Rename(ctx, op)\n\tincrementCounterFsErrors(\"Rename\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) RmDir(\n\tctx context.Context,\n\top *fuseops.RmDirOp) error {\n\tincrementCounterFsRequests(\"RmDir\")\n\terr := fs.wrapped.RmDir(ctx, op)\n\tincrementCounterFsErrors(\"RmDir\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) Unlink(\n\tctx context.Context,\n\top *fuseops.UnlinkOp) error {\n\tincrementCounterFsRequests(\"Unlink\")\n\terr := fs.wrapped.Unlink(ctx, op)\n\tincrementCounterFsErrors(\"Unlink\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) OpenDir(\n\tctx context.Context,\n\top *fuseops.OpenDirOp) error {\n\tincrementCounterFsRequests(\"OpenDir\")\n\terr := fs.wrapped.OpenDir(ctx, op)\n\tincrementCounterFsErrors(\"OpenDir\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) ReadDir(\n\tctx context.Context,\n\top *fuseops.ReadDirOp) error {\n\tincrementCounterFsRequests(\"ReadDir\")\n\terr := fs.wrapped.ReadDir(ctx, op)\n\tincrementCounterFsErrors(\"ReadDir\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) ReleaseDirHandle(\n\tctx context.Context,\n\top *fuseops.ReleaseDirHandleOp) error {\n\tincrementCounterFsRequests(\"ReleaseDirHandle\")\n\terr := fs.wrapped.ReleaseDirHandle(ctx, op)\n\tincrementCounterFsErrors(\"ReleaseDirHandle\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) OpenFile(\n\tctx context.Context,\n\top *fuseops.OpenFileOp) error {\n\tincrementCounterFsRequests(\"OpenFile\")\n\tdefer recordLatency(latencyOpenFile, time.Now())\n\terr := fs.wrapped.OpenFile(ctx, op)\n\tincrementCounterFsErrors(\"OpenFile\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) ReadFile(\n\tctx context.Context,\n\top *fuseops.ReadFileOp) error {\n\tincrementCounterFsRequests(\"ReadFile\")\n\tdefer recordLatency(latencyReadFile, time.Now())\n\terr := fs.wrapped.ReadFile(ctx, op)\n\tincrementCounterFsErrors(\"ReadFile\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) WriteFile(\n\tctx context.Context,\n\top *fuseops.WriteFileOp) error {\n\tincrementCounterFsRequests(\"WriteFile\")\n\terr := fs.wrapped.WriteFile(ctx, op)\n\tincrementCounterFsErrors(\"WriteFile\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) SyncFile(\n\tctx context.Context,\n\top *fuseops.SyncFileOp) error {\n\tincrementCounterFsRequests(\"SyncFile\")\n\terr := fs.wrapped.SyncFile(ctx, op)\n\tincrementCounterFsErrors(\"SyncFile\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) FlushFile(\n\tctx context.Context,\n\top *fuseops.FlushFileOp) error {\n\tincrementCounterFsRequests(\"FlushFile\")\n\terr := fs.wrapped.FlushFile(ctx, op)\n\tincrementCounterFsErrors(\"FlushFile\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) ReleaseFileHandle(\n\tctx context.Context,\n\top *fuseops.ReleaseFileHandleOp) error {\n\tincrementCounterFsRequests(\"ReleaseFileHandle\")\n\terr := fs.wrapped.ReleaseFileHandle(ctx, op)\n\tincrementCounterFsErrors(\"ReleaseFileHandle\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) ReadSymlink(\n\tctx context.Context,\n\top *fuseops.ReadSymlinkOp) error {\n\tincrementCounterFsRequests(\"ReadSymlink\")\n\terr := fs.wrapped.ReadSymlink(ctx, op)\n\tincrementCounterFsErrors(\"ReadSymlink\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) RemoveXattr(\n\tctx context.Context,\n\top *fuseops.RemoveXattrOp) error {\n\tincrementCounterFsRequests(\"RemoveXattr\")\n\terr := fs.wrapped.RemoveXattr(ctx, op)\n\tincrementCounterFsErrors(\"RemoveXattr\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) GetXattr(\n\tctx context.Context,\n\top *fuseops.GetXattrOp) error {\n\tincrementCounterFsRequests(\"GetXattr\")\n\terr := fs.wrapped.GetXattr(ctx, op)\n\tincrementCounterFsErrors(\"GetXattr\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) ListXattr(\n\tctx context.Context,\n\top *fuseops.ListXattrOp) error {\n\tincrementCounterFsRequests(\"ListXattr\")\n\terr := fs.wrapped.ListXattr(ctx, op)\n\tincrementCounterFsErrors(\"ListXattr\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) SetXattr(\n\tctx context.Context,\n\top *fuseops.SetXattrOp) error {\n\tincrementCounterFsRequests(\"SetXattr\")\n\terr := fs.wrapped.SetXattr(ctx, op)\n\tincrementCounterFsErrors(\"SetXattr\", err)\n\treturn err\n}\n<commit_msg>Specify histogram buckets for latency metrics<commit_after>\/\/ Copyright 2020 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage fs\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\t\"github.com\/jacobsa\/fuse\/fuseops\"\n\t\"github.com\/jacobsa\/fuse\/fuseutil\"\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n)\n\nvar (\n\tcounterFsRequests = prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName: \"gcsfuse_fs_requests\",\n\t\t\tHelp: \"Number of requests per file system API.\",\n\t\t},\n\t\t[]string{ \/\/ labels\n\t\t\t\"method\",\n\t\t},\n\t)\n\tcounterFsErrors = prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName: \"gcsfuse_fs_errors\",\n\t\t\tHelp: \"Number of errors per file system API.\",\n\t\t},\n\t\t[]string{ \/\/ labels\n\t\t\t\"method\",\n\t\t},\n\t)\n\tlatencyOpenFile = prometheus.NewHistogram(\n\t\tprometheus.HistogramOpts{\n\t\t\tName: \"gcsfuse_fs_open_file_latency\",\n\t\t\tHelp: \"The latency of executing an OpenFile request in ms.\",\n\t\t\tBuckets: prometheus.ExponentialBuckets(0.01, 10, 8),\n\t\t},\n\t)\n\tlatencyReadFile = prometheus.NewHistogram(\n\t\tprometheus.HistogramOpts{\n\t\t\tName: \"gcsfuse_fs_read_file_latency\",\n\t\t\tHelp: \"The latency of executing a ReadFile request in ms.\",\n\t\t\tBuckets: prometheus.ExponentialBuckets(0.01, 10, 8),\n\t\t},\n\t)\n)\n\n\/\/ Initialize the prometheus metrics.\nfunc init() {\n\tprometheus.MustRegister(counterFsRequests)\n\tprometheus.MustRegister(counterFsErrors)\n\tprometheus.MustRegister(latencyOpenFile)\n\tprometheus.MustRegister(latencyReadFile)\n}\n\nfunc incrementCounterFsRequests(method string) {\n\tcounterFsRequests.With(\n\t\tprometheus.Labels{\n\t\t\t\"method\": method,\n\t\t},\n\t).Inc()\n}\nfunc incrementCounterFsErrors(method string, err error) {\n\tif err != nil {\n\t\tcounterFsErrors.With(\n\t\t\tprometheus.Labels{\n\t\t\t\t\"method\": method,\n\t\t\t},\n\t\t).Inc()\n\t}\n}\n\nfunc recordLatency(metric prometheus.Histogram, start time.Time) {\n\tlatency := float64(time.Since(start).Milliseconds())\n\tmetric.Observe(latency)\n}\n\n\/\/ WithMonitoring takes a FileSystem, returns a FileSystem with monitoring\n\/\/ on the counts of requests per API.\nfunc WithMonitoring(fs fuseutil.FileSystem) fuseutil.FileSystem {\n\treturn &monitoringFileSystem{\n\t\twrapped: fs,\n\t}\n}\n\ntype monitoringFileSystem struct {\n\twrapped fuseutil.FileSystem\n}\n\nfunc (fs *monitoringFileSystem) Destroy() {\n\tincrementCounterFsRequests(\"Destroy\")\n\tfs.wrapped.Destroy()\n}\n\nfunc (fs *monitoringFileSystem) StatFS(\n\tctx context.Context,\n\top *fuseops.StatFSOp) error {\n\tincrementCounterFsRequests(\"StatFS\")\n\terr := fs.wrapped.StatFS(ctx, op)\n\tincrementCounterFsErrors(\"StatFS\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) LookUpInode(\n\tctx context.Context,\n\top *fuseops.LookUpInodeOp) error {\n\tincrementCounterFsRequests(\"LookUpInode\")\n\terr := fs.wrapped.LookUpInode(ctx, op)\n\tincrementCounterFsErrors(\"LookUpInode\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) GetInodeAttributes(\n\tctx context.Context,\n\top *fuseops.GetInodeAttributesOp) error {\n\tincrementCounterFsRequests(\"GetInodeAttributes\")\n\terr := fs.wrapped.GetInodeAttributes(ctx, op)\n\tincrementCounterFsErrors(\"GetInodeAttributes\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) SetInodeAttributes(\n\tctx context.Context,\n\top *fuseops.SetInodeAttributesOp) error {\n\tincrementCounterFsRequests(\"SetInodeAttributes\")\n\terr := fs.wrapped.SetInodeAttributes(ctx, op)\n\tincrementCounterFsErrors(\"SetInodeAttributes\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) ForgetInode(\n\tctx context.Context,\n\top *fuseops.ForgetInodeOp) error {\n\tincrementCounterFsRequests(\"ForgetInode\")\n\terr := fs.wrapped.ForgetInode(ctx, op)\n\tincrementCounterFsErrors(\"ForgetInode\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) MkDir(\n\tctx context.Context,\n\top *fuseops.MkDirOp) error {\n\tincrementCounterFsRequests(\"MkDir\")\n\terr := fs.wrapped.MkDir(ctx, op)\n\tincrementCounterFsErrors(\"MkDir\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) MkNode(\n\tctx context.Context,\n\top *fuseops.MkNodeOp) error {\n\tincrementCounterFsRequests(\"MkNode\")\n\terr := fs.wrapped.MkNode(ctx, op)\n\tincrementCounterFsErrors(\"MkNode\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) CreateFile(\n\tctx context.Context,\n\top *fuseops.CreateFileOp) error {\n\tincrementCounterFsRequests(\"CreateFile\")\n\terr := fs.wrapped.CreateFile(ctx, op)\n\tincrementCounterFsErrors(\"CreateFile\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) CreateSymlink(\n\tctx context.Context,\n\top *fuseops.CreateSymlinkOp) error {\n\tincrementCounterFsRequests(\"CreateSymlink\")\n\terr := fs.wrapped.CreateSymlink(ctx, op)\n\tincrementCounterFsErrors(\"CreateSymlink\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) Rename(\n\tctx context.Context,\n\top *fuseops.RenameOp) error {\n\tincrementCounterFsRequests(\"Rename\")\n\terr := fs.wrapped.Rename(ctx, op)\n\tincrementCounterFsErrors(\"Rename\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) RmDir(\n\tctx context.Context,\n\top *fuseops.RmDirOp) error {\n\tincrementCounterFsRequests(\"RmDir\")\n\terr := fs.wrapped.RmDir(ctx, op)\n\tincrementCounterFsErrors(\"RmDir\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) Unlink(\n\tctx context.Context,\n\top *fuseops.UnlinkOp) error {\n\tincrementCounterFsRequests(\"Unlink\")\n\terr := fs.wrapped.Unlink(ctx, op)\n\tincrementCounterFsErrors(\"Unlink\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) OpenDir(\n\tctx context.Context,\n\top *fuseops.OpenDirOp) error {\n\tincrementCounterFsRequests(\"OpenDir\")\n\terr := fs.wrapped.OpenDir(ctx, op)\n\tincrementCounterFsErrors(\"OpenDir\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) ReadDir(\n\tctx context.Context,\n\top *fuseops.ReadDirOp) error {\n\tincrementCounterFsRequests(\"ReadDir\")\n\terr := fs.wrapped.ReadDir(ctx, op)\n\tincrementCounterFsErrors(\"ReadDir\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) ReleaseDirHandle(\n\tctx context.Context,\n\top *fuseops.ReleaseDirHandleOp) error {\n\tincrementCounterFsRequests(\"ReleaseDirHandle\")\n\terr := fs.wrapped.ReleaseDirHandle(ctx, op)\n\tincrementCounterFsErrors(\"ReleaseDirHandle\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) OpenFile(\n\tctx context.Context,\n\top *fuseops.OpenFileOp) error {\n\tincrementCounterFsRequests(\"OpenFile\")\n\tdefer recordLatency(latencyOpenFile, time.Now())\n\terr := fs.wrapped.OpenFile(ctx, op)\n\tincrementCounterFsErrors(\"OpenFile\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) ReadFile(\n\tctx context.Context,\n\top *fuseops.ReadFileOp) error {\n\tincrementCounterFsRequests(\"ReadFile\")\n\tdefer recordLatency(latencyReadFile, time.Now())\n\terr := fs.wrapped.ReadFile(ctx, op)\n\tincrementCounterFsErrors(\"ReadFile\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) WriteFile(\n\tctx context.Context,\n\top *fuseops.WriteFileOp) error {\n\tincrementCounterFsRequests(\"WriteFile\")\n\terr := fs.wrapped.WriteFile(ctx, op)\n\tincrementCounterFsErrors(\"WriteFile\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) SyncFile(\n\tctx context.Context,\n\top *fuseops.SyncFileOp) error {\n\tincrementCounterFsRequests(\"SyncFile\")\n\terr := fs.wrapped.SyncFile(ctx, op)\n\tincrementCounterFsErrors(\"SyncFile\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) FlushFile(\n\tctx context.Context,\n\top *fuseops.FlushFileOp) error {\n\tincrementCounterFsRequests(\"FlushFile\")\n\terr := fs.wrapped.FlushFile(ctx, op)\n\tincrementCounterFsErrors(\"FlushFile\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) ReleaseFileHandle(\n\tctx context.Context,\n\top *fuseops.ReleaseFileHandleOp) error {\n\tincrementCounterFsRequests(\"ReleaseFileHandle\")\n\terr := fs.wrapped.ReleaseFileHandle(ctx, op)\n\tincrementCounterFsErrors(\"ReleaseFileHandle\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) ReadSymlink(\n\tctx context.Context,\n\top *fuseops.ReadSymlinkOp) error {\n\tincrementCounterFsRequests(\"ReadSymlink\")\n\terr := fs.wrapped.ReadSymlink(ctx, op)\n\tincrementCounterFsErrors(\"ReadSymlink\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) RemoveXattr(\n\tctx context.Context,\n\top *fuseops.RemoveXattrOp) error {\n\tincrementCounterFsRequests(\"RemoveXattr\")\n\terr := fs.wrapped.RemoveXattr(ctx, op)\n\tincrementCounterFsErrors(\"RemoveXattr\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) GetXattr(\n\tctx context.Context,\n\top *fuseops.GetXattrOp) error {\n\tincrementCounterFsRequests(\"GetXattr\")\n\terr := fs.wrapped.GetXattr(ctx, op)\n\tincrementCounterFsErrors(\"GetXattr\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) ListXattr(\n\tctx context.Context,\n\top *fuseops.ListXattrOp) error {\n\tincrementCounterFsRequests(\"ListXattr\")\n\terr := fs.wrapped.ListXattr(ctx, op)\n\tincrementCounterFsErrors(\"ListXattr\", err)\n\treturn err\n}\n\nfunc (fs *monitoringFileSystem) SetXattr(\n\tctx context.Context,\n\top *fuseops.SetXattrOp) error {\n\tincrementCounterFsRequests(\"SetXattr\")\n\terr := fs.wrapped.SetXattr(ctx, op)\n\tincrementCounterFsErrors(\"SetXattr\", err)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package version\n\nvar (\n\t\/\/ The git commit that was compiled. This will be filled in by the compiler.\n\tGitCommit string\n\tGitDescribe string\n\n\t\/\/ Whether cgo is enabled or not; set at build time\n\tCgoEnabled bool\n\n\tVersion = \"1.3.0\"\n\tVersionPrerelease = \"\"\n\tVersionMetadata = \"\"\n)\n<commit_msg>sdk: set version to 1.4.0-beta<commit_after>package version\n\nvar (\n\t\/\/ The git commit that was compiled. This will be filled in by the compiler.\n\tGitCommit string\n\tGitDescribe string\n\n\t\/\/ Whether cgo is enabled or not; set at build time\n\tCgoEnabled bool\n\n\tVersion = \"1.4.0\"\n\tVersionPrerelease = \"beta\"\n\tVersionMetadata = \"\"\n)\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Koichi Shiraishi. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage pathutil\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n)\n\nvar (\n\tvcsDirs = []string{\".git\", \".svn\", \".hg\"}\n\tvcsDirFound bool\n)\n\n\/\/ FindVcsRoot find package root path from arg path\nfunc FindVcsRoot(basedir string) string {\n\tvcsDirFound = false\n\tfilepath.Walk(basedir, findvcsDirWalkFunc)\n\n\tfor {\n\t\tif !vcsDirFound {\n\t\t\tbasedir = filepath.Dir(basedir)\n\t\t\tfilepath.Walk(basedir, findvcsDirWalkFunc)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn basedir\n}\n\nfunc findvcsDirWalkFunc(path string, fileInfo os.FileInfo, err error) error {\n\tif err != nil || fileInfo == nil || fileInfo.IsDir() == false {\n\t\treturn nil\n\t}\n\n\tfor _, d := range vcsDirs {\n\t\t_, err := os.Stat(filepath.Join(path, d))\n\t\tif err == nil {\n\t\t\tvcsDirFound = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>pathutil: filepath.Clean to return directory path<commit_after>\/\/ Copyright 2016 Koichi Shiraishi. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage pathutil\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n)\n\nvar (\n\tvcsDirs = []string{\".git\", \".svn\", \".hg\"}\n\tvcsDirFound bool\n)\n\n\/\/ FindVcsRoot find package root path from arg path\nfunc FindVcsRoot(basedir string) string {\n\tvcsDirFound = false\n\tfilepath.Walk(basedir, findvcsDirWalkFunc)\n\n\tfor {\n\t\tif !vcsDirFound {\n\t\t\tbasedir = filepath.Dir(basedir)\n\t\t\tfilepath.Walk(basedir, findvcsDirWalkFunc)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn filepath.Clean(basedir)\n}\n\nfunc findvcsDirWalkFunc(path string, fileInfo os.FileInfo, err error) error {\n\tif err != nil || fileInfo == nil || fileInfo.IsDir() == false {\n\t\treturn nil\n\t}\n\n\tfor _, d := range vcsDirs {\n\t\t_, err := os.Stat(filepath.Join(path, d))\n\t\tif err == nil {\n\t\t\tvcsDirFound = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package docker\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/spf13\/cobra\"\n\t\"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/templates\"\n\tkcmdutil \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/util\"\n\tdockertools \"k8s.io\/kubernetes\/pkg\/kubelet\/dockershim\/libdocker\"\n\n\t\"github.com\/openshift\/origin\/pkg\/oc\/bootstrap\/docker\/dockerhelper\"\n\t\"github.com\/openshift\/origin\/pkg\/oc\/bootstrap\/docker\/openshift\"\n\tosclientcmd \"github.com\/openshift\/origin\/pkg\/oc\/cli\/util\/clientcmd\"\n)\n\nconst CmdDownRecommendedName = \"down\"\n\nvar (\n\tcmdDownLong = templates.LongDesc(`\n\t\tStops the container running OpenShift on Docker and associated containers.\n\n\t\tIf you started your OpenShift with a specific docker-machine, you need to specify the\n\t\tsame machine using the --docker-machine argument.`)\n\n\tcmdDownExample = templates.Examples(`\n\t # Stop local OpenShift cluster\n\t %[1]s\n\n\t # Stop cluster running on Docker machine 'mymachine'\n\t %[1]s --docker-machine=mymachine`)\n)\n\ntype ClientStopConfig struct {\n\tDockerMachine string\n}\n\n\/\/ NewCmdDown creates a command that stops OpenShift\nfunc NewCmdDown(name, fullName string, f *osclientcmd.Factory, out io.Writer) *cobra.Command {\n\tconfig := &ClientStopConfig{}\n\tcmd := &cobra.Command{\n\t\tUse: name,\n\t\tShort: \"Stop OpenShift on Docker\",\n\t\tLong: cmdDownLong,\n\t\tExample: fmt.Sprintf(cmdDownExample, fullName),\n\t\tRun: func(c *cobra.Command, args []string) {\n\t\t\tkcmdutil.CheckErr(config.Stop(out))\n\t\t},\n\t}\n\tcmd.Flags().StringVar(&config.DockerMachine, \"docker-machine\", \"\", \"Specify the Docker machine to use\")\n\treturn cmd\n}\n\n\/\/ Stop stops the currently running origin container and any\n\/\/ containers started by the node.\nfunc (c *ClientStopConfig) Stop(out io.Writer) error {\n\tclient, err := getDockerClient(out, c.DockerMachine, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\thelper := dockerhelper.NewHelper(client)\n\tglog.V(4).Infof(\"Killing previous socat tunnel\")\n\terr = openshift.KillExistingSocat()\n\tif err != nil {\n\t\tglog.V(2).Infof(\"error: cannot kill socat: %v\", err)\n\t}\n\tglog.V(4).Infof(\"Stopping and removing origin container\")\n\tif err = helper.StopAndRemoveContainer(\"origin\"); err != nil {\n\t\tglog.V(2).Infof(\"Error stopping origin container: %v\", err)\n\t}\n\tnames, err := helper.ListContainerNames()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, name := range names {\n\t\tif _, _, err = dockertools.ParseDockerName(name); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tname = strings.TrimLeft(name, \"\/\")\n\t\tglog.V(4).Infof(\"Stopping container %s\", name)\n\t\tif err = client.ContainerStop(name, 0); err != nil {\n\t\t\tglog.V(2).Infof(\"Error stopping container %s: %v\", name, err)\n\t\t}\n\t\tglog.V(4).Infof(\"Removing container %s\", name)\n\t\tif err = helper.RemoveContainer(name); err != nil {\n\t\t\tglog.V(2).Infof(\"Error removing container %s: %v\", name, err)\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>oc: cluster up dockername parsing<commit_after>package docker\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/spf13\/cobra\"\n\t\"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/templates\"\n\tkcmdutil \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/util\"\n\n\t\"github.com\/openshift\/origin\/pkg\/oc\/bootstrap\/docker\/dockerhelper\"\n\t\"github.com\/openshift\/origin\/pkg\/oc\/bootstrap\/docker\/openshift\"\n\tosclientcmd \"github.com\/openshift\/origin\/pkg\/oc\/cli\/util\/clientcmd\"\n)\n\nconst CmdDownRecommendedName = \"down\"\n\nvar (\n\tcmdDownLong = templates.LongDesc(`\n\t\tStops the container running OpenShift on Docker and associated containers.\n\n\t\tIf you started your OpenShift with a specific docker-machine, you need to specify the\n\t\tsame machine using the --docker-machine argument.`)\n\n\tcmdDownExample = templates.Examples(`\n\t # Stop local OpenShift cluster\n\t %[1]s\n\n\t # Stop cluster running on Docker machine 'mymachine'\n\t %[1]s --docker-machine=mymachine`)\n)\n\ntype ClientStopConfig struct {\n\tDockerMachine string\n}\n\n\/\/ NewCmdDown creates a command that stops OpenShift\nfunc NewCmdDown(name, fullName string, f *osclientcmd.Factory, out io.Writer) *cobra.Command {\n\tconfig := &ClientStopConfig{}\n\tcmd := &cobra.Command{\n\t\tUse: name,\n\t\tShort: \"Stop OpenShift on Docker\",\n\t\tLong: cmdDownLong,\n\t\tExample: fmt.Sprintf(cmdDownExample, fullName),\n\t\tRun: func(c *cobra.Command, args []string) {\n\t\t\tkcmdutil.CheckErr(config.Stop(out))\n\t\t},\n\t}\n\tcmd.Flags().StringVar(&config.DockerMachine, \"docker-machine\", \"\", \"Specify the Docker machine to use\")\n\treturn cmd\n}\n\n\/\/ Stop stops the currently running origin container and any\n\/\/ containers started by the node.\nfunc (c *ClientStopConfig) Stop(out io.Writer) error {\n\tclient, err := getDockerClient(out, c.DockerMachine, false)\n\tif err != nil {\n\t\treturn err\n\t}\n\thelper := dockerhelper.NewHelper(client)\n\tglog.V(4).Infof(\"Killing previous socat tunnel\")\n\terr = openshift.KillExistingSocat()\n\tif err != nil {\n\t\tglog.V(2).Infof(\"error: cannot kill socat: %v\", err)\n\t}\n\tglog.V(4).Infof(\"Stopping and removing origin container\")\n\tif err = helper.StopAndRemoveContainer(\"origin\"); err != nil {\n\t\tglog.V(2).Infof(\"Error stopping origin container: %v\", err)\n\t}\n\tnames, err := helper.ListContainerNames()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, name := range names {\n\t\tif _, err = parseDockerName(name); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tname = strings.TrimLeft(name, \"\/\")\n\t\tglog.V(4).Infof(\"Stopping container %s\", name)\n\t\tif err = client.ContainerStop(name, 0); err != nil {\n\t\t\tglog.V(2).Infof(\"Error stopping container %s: %v\", name, err)\n\t\t}\n\t\tglog.V(4).Infof(\"Removing container %s\", name)\n\t\tif err = helper.RemoveContainer(name); err != nil {\n\t\t\tglog.V(2).Infof(\"Error removing container %s: %v\", name, err)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Unpacks a container name, returning the pod full name and container name we would have used to\n\/\/ construct the docker name. If we are unable to parse the name, an error is returned.\nfunc parseDockerName(name string) (hash uint64, err error) {\n\tconst containerNamePrefix = \"k8s\"\n\t\/\/ For some reason docker appears to be appending '\/' to names.\n\t\/\/ If it's there, strip it.\n\tname = strings.TrimPrefix(name, \"\/\")\n\tparts := strings.Split(name, \"_\")\n\tif len(parts) == 0 || parts[0] != containerNamePrefix {\n\t\terr = fmt.Errorf(\"failed to parse Docker container name %q into parts\", name)\n\t\treturn 0, err\n\t}\n\tif len(parts) < 6 {\n\t\t\/\/ We have at least 5 fields. We may have more in the future.\n\t\t\/\/ Anything with less fields than this is not something we can\n\t\t\/\/ manage.\n\t\tglog.Warningf(\"found a container with the %q prefix, but too few fields (%d): %q\", containerNamePrefix, len(parts), name)\n\t\terr = fmt.Errorf(\"Docker container name %q has less parts than expected %v\", name, parts)\n\t\treturn 0, err\n\t}\n\n\tnameParts := strings.Split(parts[1], \".\")\n\tif len(nameParts) > 1 {\n\t\thash, err = strconv.ParseUint(nameParts[1], 16, 32)\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"invalid container hash %q in container %q\", nameParts[1], name)\n\t\t}\n\t}\n\n\treturn hash, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package privatemessage\n\nimport (\n\t\"errors\"\n\t\"koding\/db\/mongodb\/modelhelper\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"socialapi\/models\"\n\t\"socialapi\/workers\/api\/modules\/helpers\"\n\n\t\"github.com\/koding\/bongo\"\n)\n\nfunc fetchParticipantIds(participantNames []string) ([]int64, error) {\n\tparticipantIds := make([]int64, len(participantNames))\n\tfor i, participantName := range participantNames {\n\t\taccount, err := modelhelper.GetAccount(participantName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ta := models.NewAccount()\n\t\ta.Id = account.SocialApiId\n\t\ta.OldId = account.Id.Hex()\n\t\t\/\/ fetch or create social api id\n\t\tif a.Id == 0 {\n\t\t\tif err := a.FetchOrCreate(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tparticipantIds[i] = a.Id\n\t}\n\n\treturn participantIds, nil\n}\n\nfunc appendCreatorIdIntoParticipantList(participants []int64, authorId int64) []int64 {\n\tfor _, participant := range participants {\n\t\tif participant == authorId {\n\t\t\treturn participants\n\t\t}\n\t}\n\n\treturn append(participants, authorId)\n}\n\nfunc Send(u *url.URL, h http.Header, req *models.PrivateMessageRequest) (int, http.Header, interface{}, error) {\n\tif req.AccountId == 0 {\n\t\treturn helpers.NewBadRequestResponse(errors.New(\"AcccountId is not defined\"))\n\t}\n\n\t\/\/ \/\/ req.Recipients = append(req.Recipients, req.AccountId)\n\tcm := models.NewChannelMessage()\n\tcm.Body = req.Body\n\tparticipantNames := cm.GetMentionedUsernames()\n\tparticipantIds, err := fetchParticipantIds(participantNames)\n\tif err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\t\/\/ append creator to the recipients\n\tparticipantIds = appendCreatorIdIntoParticipantList(participantIds, req.AccountId)\n\n\t\/\/ author and atleast one recipient should be in the\n\t\/\/ recipient list\n\tif len(participantIds) < 2 {\n\t\treturn helpers.NewBadRequestResponse(errors.New(\"You should define your recipients\"))\n\t}\n\n\tif req.GroupName == \"\" {\n\t\treq.GroupName = models.Channel_KODING_NAME\n\t}\n\n\t\/\/\/\/ first create the channel\n\tc := models.NewPrivateMessageChannel(req.AccountId, req.GroupName)\n\tif err := c.Create(); err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\tcm.TypeConstant = models.ChannelMessage_TYPE_PRIVATE_MESSAGE\n\tcm.AccountId = req.AccountId\n\tcm.InitialChannelId = c.Id\n\tif err := cm.Create(); err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\tmessageContainer, err := cm.BuildEmptyMessageContainer()\n\tif err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\t_, err = c.AddMessage(cm.Id)\n\tif err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\tfor _, participantId := range participantIds {\n\t\t_, err := c.AddParticipant(participantId)\n\t\tif err != nil {\n\t\t\treturn helpers.NewBadRequestResponse(err)\n\t\t}\n\t}\n\n\tcmc := models.NewChannelContainer()\n\tcmc.Channel = *c\n\tcmc.IsParticipant = true\n\tcmc.LastMessage = messageContainer\n\tcmc.ParticipantCount = len(participantIds)\n\tparticipantOldIds, err := models.AccountOldsIdByIds(participantIds)\n\tif err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\tcmc.ParticipantsPreview = participantOldIds\n\n\treturn helpers.NewOKResponse(cmc)\n}\n\nfunc List(u *url.URL, h http.Header, _ interface{}) (int, http.Header, interface{}, error) {\n\tq := helpers.GetQuery(u)\n\n\tchannels, err := getPrivateMessageChannels(q)\n\tif err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\treturn helpers.HandleResultAndError(\n\t\tmodels.PopulateChannelContainers(channels, q.AccountId),\n\t)\n}\n\nfunc getPrivateMessageChannels(q *models.Query) ([]models.Channel, error) {\n\t\/\/ build query for\n\tc := models.NewChannel()\n\tchannelIds := make([]int64, 0)\n\trows, err := bongo.B.DB.Table(c.TableName()).\n\t\tSelect(\"api.channel_participant.channel_id\").\n\t\tJoins(\"left join api.channel_participant on api.channel_participant.channel_id = api.channel.id\").\n\t\tWhere(\"api.channel_participant.account_id = ? and \"+\n\t\t\"api.channel.group_name = ? and \"+\n\t\t\"api.channel.type_constant = ? and \"+\n\t\t\"api.channel_participant.status_constant = ?\",\n\t\tq.AccountId,\n\t\tq.GroupName,\n\t\tmodels.Channel_TYPE_PRIVATE_MESSAGE,\n\t\tmodels.ChannelParticipant_STATUS_ACTIVE).\n\t\tLimit(q.Limit).\n\t\tOffset(q.Skip).\n\t\tRows()\n\tdefer rows.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar channelId int64\n\tfor rows.Next() {\n\t\trows.Scan(&channelId)\n\t\tchannelIds = append(channelIds, channelId)\n\t}\n\n\tchannels, err := c.FetchByIds(channelIds)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn channels, nil\n}\n<commit_msg>Social: more refactoring for function names<commit_after>package privatemessage\n\nimport (\n\t\"errors\"\n\t\"koding\/db\/mongodb\/modelhelper\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"socialapi\/models\"\n\t\"socialapi\/workers\/api\/modules\/helpers\"\n\n\t\"github.com\/koding\/bongo\"\n)\n\nfunc fetchParticipantIds(participantNames []string) ([]int64, error) {\n\tparticipantIds := make([]int64, len(participantNames))\n\tfor i, participantName := range participantNames {\n\t\taccount, err := modelhelper.GetAccount(participantName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ta := models.NewAccount()\n\t\ta.Id = account.SocialApiId\n\t\ta.OldId = account.Id.Hex()\n\t\t\/\/ fetch or create social api id\n\t\tif a.Id == 0 {\n\t\t\tif err := a.FetchOrCreate(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tparticipantIds[i] = a.Id\n\t}\n\n\treturn participantIds, nil\n}\n\nfunc appendCreatorIdIntoParticipantList(participants []int64, authorId int64) []int64 {\n\tfor _, participant := range participants {\n\t\tif participant == authorId {\n\t\t\treturn participants\n\t\t}\n\t}\n\n\treturn append(participants, authorId)\n}\n\nfunc Send(u *url.URL, h http.Header, req *models.PrivateMessageRequest) (int, http.Header, interface{}, error) {\n\tif req.AccountId == 0 {\n\t\treturn helpers.NewBadRequestResponse(errors.New(\"AcccountId is not defined\"))\n\t}\n\n\t\/\/ \/\/ req.Recipients = append(req.Recipients, req.AccountId)\n\tcm := models.NewChannelMessage()\n\tcm.Body = req.Body\n\tparticipantNames := cm.GetMentionedUsernames()\n\tparticipantIds, err := fetchParticipantIds(participantNames)\n\tif err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\t\/\/ append creator to the recipients\n\tparticipantIds = appendCreatorIdIntoParticipantList(participantIds, req.AccountId)\n\n\t\/\/ author and atleast one recipient should be in the\n\t\/\/ recipient list\n\tif len(participantIds) < 2 {\n\t\treturn helpers.NewBadRequestResponse(errors.New(\"You should define your recipients\"))\n\t}\n\n\tif req.GroupName == \"\" {\n\t\treq.GroupName = models.Channel_KODING_NAME\n\t}\n\n\t\/\/\/\/ first create the channel\n\tc := models.NewPrivateMessageChannel(req.AccountId, req.GroupName)\n\tif err := c.Create(); err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\tcm.TypeConstant = models.ChannelMessage_TYPE_PRIVATE_MESSAGE\n\tcm.AccountId = req.AccountId\n\tcm.InitialChannelId = c.Id\n\tif err := cm.Create(); err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\tmessageContainer, err := cm.BuildEmptyMessageContainer()\n\tif err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\t_, err = c.AddMessage(cm.Id)\n\tif err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\tfor _, participantId := range participantIds {\n\t\t_, err := c.AddParticipant(participantId)\n\t\tif err != nil {\n\t\t\treturn helpers.NewBadRequestResponse(err)\n\t\t}\n\t}\n\n\tcmc := models.NewChannelContainer()\n\tcmc.Channel = *c\n\tcmc.IsParticipant = true\n\tcmc.LastMessage = messageContainer\n\tcmc.ParticipantCount = len(participantIds)\n\tparticipantOldIds, err := models.FetchAccountOldsIdByIdsFromCache(participantIds)\n\tif err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\tcmc.ParticipantsPreview = participantOldIds\n\n\treturn helpers.NewOKResponse(cmc)\n}\n\nfunc List(u *url.URL, h http.Header, _ interface{}) (int, http.Header, interface{}, error) {\n\tq := helpers.GetQuery(u)\n\n\tchannels, err := getPrivateMessageChannels(q)\n\tif err != nil {\n\t\treturn helpers.NewBadRequestResponse(err)\n\t}\n\n\treturn helpers.HandleResultAndError(\n\t\tmodels.PopulateChannelContainers(channels, q.AccountId),\n\t)\n}\n\nfunc getPrivateMessageChannels(q *models.Query) ([]models.Channel, error) {\n\t\/\/ build query for\n\tc := models.NewChannel()\n\tchannelIds := make([]int64, 0)\n\trows, err := bongo.B.DB.Table(c.TableName()).\n\t\tSelect(\"api.channel_participant.channel_id\").\n\t\tJoins(\"left join api.channel_participant on api.channel_participant.channel_id = api.channel.id\").\n\t\tWhere(\"api.channel_participant.account_id = ? and \"+\n\t\t\"api.channel.group_name = ? and \"+\n\t\t\"api.channel.type_constant = ? and \"+\n\t\t\"api.channel_participant.status_constant = ?\",\n\t\tq.AccountId,\n\t\tq.GroupName,\n\t\tmodels.Channel_TYPE_PRIVATE_MESSAGE,\n\t\tmodels.ChannelParticipant_STATUS_ACTIVE).\n\t\tLimit(q.Limit).\n\t\tOffset(q.Skip).\n\t\tRows()\n\tdefer rows.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar channelId int64\n\tfor rows.Next() {\n\t\trows.Scan(&channelId)\n\t\tchannelIds = append(channelIds, channelId)\n\t}\n\n\tchannels, err := c.FetchByIds(channelIds)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn channels, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package handler\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\/dependency\/forgotpwdemail\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\/dependency\/userprofile\"\n\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\/dependency\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\/dependency\/provider\/password\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/audit\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/auth\/authinfo\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/auth\/authtoken\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/auth\/authz\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/auth\/authz\/policy\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/db\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/handler\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/inject\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/server\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/server\/skyerr\"\n)\n\n\/\/ AttachForgotPasswordResetHandler attaches ForgotPasswordResetHandler to server\nfunc AttachForgotPasswordResetHandler(\n\tserver *server.Server,\n\tauthDependency auth.DependencyMap,\n) *server.Server {\n\tserver.Handle(\"\/forgot_password\/reset_password\", &ForgotPasswordResetHandlerFactory{\n\t\tauthDependency,\n\t}).Methods(\"OPTIONS\", \"POST\")\n\treturn server\n}\n\n\/\/ ForgotPasswordResetHandlerFactory creates ForgotPasswordResetHandler\ntype ForgotPasswordResetHandlerFactory struct {\n\tDependency auth.DependencyMap\n}\n\n\/\/ NewHandler creates new ForgotPasswordResetHandler\nfunc (f ForgotPasswordResetHandlerFactory) NewHandler(request *http.Request) http.Handler {\n\th := &ForgotPasswordResetHandler{}\n\tinject.DefaultRequestInject(h, f.Dependency, request)\n\treturn handler.APIHandlerToHandler(h, h.TxContext)\n}\n\n\/\/ ProvideAuthzPolicy provides authorization policy of handler\nfunc (f ForgotPasswordResetHandlerFactory) ProvideAuthzPolicy() authz.Policy {\n\treturn authz.PolicyFunc(policy.DenyNoAccessKey)\n}\n\ntype ForgotPasswordResetPayload struct {\n\tUserID string `json:\"user_id\"`\n\tCode string `json:\"code\"`\n\tExpireAt int64 `json:\"expire_at\"`\n\tExpireAtTime time.Time\n\tNewPassword string `json:\"new_password\"`\n}\n\nfunc (payload ForgotPasswordResetPayload) Validate() error {\n\tif payload.UserID == \"\" {\n\t\treturn skyerr.NewInvalidArgument(\"empty user_id\", []string{\"user_id\"})\n\t}\n\n\tif payload.Code == \"\" {\n\t\treturn skyerr.NewInvalidArgument(\"empty code\", []string{\"code\"})\n\t}\n\n\tif payload.ExpireAt == 0 {\n\t\treturn skyerr.NewInvalidArgument(\"empty expire_at\", []string{\"expire_at\"})\n\t}\n\n\tif payload.NewPassword == \"\" {\n\t\treturn skyerr.NewInvalidArgument(\"empty password\", []string{\"new_password\"})\n\t}\n\n\treturn nil\n}\n\n\/\/ ForgotPasswordResetHandler reset user password with given code from email.\n\/\/\n\/\/ curl -X POST -H \"Content-Type: application\/json\" \\\n\/\/ -d @- http:\/\/localhost:3000\/forgot_password\/reset_password <<EOF\n\/\/ {\n\/\/ \"user_id\": \"xxx\",\n\/\/ \"code\": \"xxx\",\n\/\/ \"expire_at\": xxx, (utc timestamp)\n\/\/ \"new_password\": \"xxx\",\n\/\/ }\n\/\/ EOF\ntype ForgotPasswordResetHandler struct {\n\tCodeGenerator *forgotpwdemail.CodeGenerator `dependency:\"ForgotPasswordCodeGenerator\"`\n\tPasswordChecker dependency.PasswordChecker `dependency:\"PasswordChecker\"`\n\tTokenStore authtoken.Store `dependency:\"TokenStore\"`\n\tAuthInfoStore authinfo.Store `dependency:\"AuthInfoStore\"`\n\tPasswordAuthProvider password.Provider `dependency:\"PasswordAuthProvider\"`\n\tUserProfileStore userprofile.Store `dependency:\"UserProfileStore\"`\n\tTxContext db.TxContext `dependency:\"TxContext\"`\n\tLogger *logrus.Entry `dependency:\"HandlerLogger\"`\n}\n\nfunc (h ForgotPasswordResetHandler) WithTx() bool {\n\treturn true\n}\n\n\/\/ DecodeRequest decode request payload\nfunc (h ForgotPasswordResetHandler) DecodeRequest(request *http.Request) (handler.RequestPayload, error) {\n\tpayload := ForgotPasswordResetPayload{}\n\tif err := json.NewDecoder(request.Body).Decode(&payload); err != nil {\n\t\treturn nil, skyerr.NewError(skyerr.BadRequest, \"fails to decode the request payload\")\n\t}\n\n\tpayload.ExpireAtTime = time.Unix(payload.ExpireAt, 0).UTC()\n\n\treturn payload, nil\n}\n\nfunc (h ForgotPasswordResetHandler) Handle(req interface{}) (resp interface{}, err error) {\n\tpayload := req.(ForgotPasswordResetPayload)\n\n\t\/\/ check code expiration\n\tif timeNow().After(payload.ExpireAtTime) {\n\t\th.Logger.Error(\"forgot password code expired\")\n\t\terr = h.genericError()\n\t\treturn\n\t}\n\n\tauthInfo := authinfo.AuthInfo{}\n\tif e := h.AuthInfoStore.GetAuth(payload.UserID, &authInfo); e != nil {\n\t\th.Logger.WithFields(map[string]interface{}{\n\t\t\t\"user_id\": payload.UserID,\n\t\t}).WithError(e).Error(\"user not found\")\n\t\terr = h.genericError()\n\t\treturn\n\t}\n\n\t\/\/ generate access-token\n\ttoken, err := h.TokenStore.NewToken(authInfo.ID)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Get Profile\n\tvar userProfile userprofile.UserProfile\n\tif userProfile, err = h.UserProfileStore.GetUserProfile(authInfo.ID, token.AccessToken); err != nil {\n\t\th.Logger.WithFields(map[string]interface{}{\n\t\t\t\"user_id\": payload.UserID,\n\t\t}).WithError(err).Error(\"unable to get user profile\")\n\t\terr = h.genericError()\n\t\treturn\n\t}\n\n\t\/\/ Get password auth principals\n\tprincipals, err := h.PasswordAuthProvider.GetPrincipalsByUserID(authInfo.ID)\n\tif err != nil {\n\t\th.Logger.WithFields(map[string]interface{}{\n\t\t\t\"user_id\": payload.UserID,\n\t\t}).WithError(err).Error(\"unable to get password auth principals\")\n\t\terr = h.genericError()\n\t\treturn\n\t}\n\n\thashedPassword := principals[0].HashedPassword\n\texpectedCode := h.CodeGenerator.Generate(authInfo, userProfile, hashedPassword, payload.ExpireAtTime)\n\tif payload.Code != expectedCode {\n\t\th.Logger.WithFields(map[string]interface{}{\n\t\t\t\"user_id\": payload.UserID,\n\t\t\t\"code\": payload.Code,\n\t\t\t\"expected_code\": expectedCode,\n\t\t}).Error(\"wrong forgot password reset password code\")\n\t\terr = h.genericError()\n\t\treturn\n\t}\n\n\tif err = h.PasswordChecker.ValidatePassword(audit.ValidatePasswordPayload{\n\t\tPlainPassword: payload.NewPassword,\n\t}); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ reset password\n\tfor _, p := range principals {\n\t\tp.PlainPassword = payload.NewPassword\n\t\terr = h.PasswordAuthProvider.UpdatePrincipal(*p)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif err = h.TokenStore.Put(&token); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ revoke old tokens\n\tnow := timeNow()\n\tauthInfo.TokenValidSince = &now\n\tif err = h.AuthInfoStore.UpdateAuth(&authInfo); err != nil {\n\t\treturn\n\t}\n\n\tresp = \"OK\"\n\treturn\n}\n\nfunc (h ForgotPasswordResetHandler) genericError() error {\n\treturn skyerr.NewError(skyerr.ResourceNotFound, \"user not found or code invalid\")\n}\n<commit_msg>Use auth response in reset forgot password handler<commit_after>package handler\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\/dependency\/forgotpwdemail\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\/dependency\/userprofile\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\/response\"\n\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\/dependency\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/auth\/dependency\/provider\/password\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/audit\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/auth\/authinfo\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/auth\/authtoken\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/auth\/authz\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/auth\/authz\/policy\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/db\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/handler\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/inject\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/core\/server\"\n\t\"github.com\/skygeario\/skygear-server\/pkg\/server\/skyerr\"\n)\n\n\/\/ AttachForgotPasswordResetHandler attaches ForgotPasswordResetHandler to server\nfunc AttachForgotPasswordResetHandler(\n\tserver *server.Server,\n\tauthDependency auth.DependencyMap,\n) *server.Server {\n\tserver.Handle(\"\/forgot_password\/reset_password\", &ForgotPasswordResetHandlerFactory{\n\t\tauthDependency,\n\t}).Methods(\"OPTIONS\", \"POST\")\n\treturn server\n}\n\n\/\/ ForgotPasswordResetHandlerFactory creates ForgotPasswordResetHandler\ntype ForgotPasswordResetHandlerFactory struct {\n\tDependency auth.DependencyMap\n}\n\n\/\/ NewHandler creates new ForgotPasswordResetHandler\nfunc (f ForgotPasswordResetHandlerFactory) NewHandler(request *http.Request) http.Handler {\n\th := &ForgotPasswordResetHandler{}\n\tinject.DefaultRequestInject(h, f.Dependency, request)\n\treturn handler.APIHandlerToHandler(h, h.TxContext)\n}\n\n\/\/ ProvideAuthzPolicy provides authorization policy of handler\nfunc (f ForgotPasswordResetHandlerFactory) ProvideAuthzPolicy() authz.Policy {\n\treturn authz.PolicyFunc(policy.DenyNoAccessKey)\n}\n\ntype ForgotPasswordResetPayload struct {\n\tUserID string `json:\"user_id\"`\n\tCode string `json:\"code\"`\n\tExpireAt int64 `json:\"expire_at\"`\n\tExpireAtTime time.Time\n\tNewPassword string `json:\"new_password\"`\n}\n\nfunc (payload ForgotPasswordResetPayload) Validate() error {\n\tif payload.UserID == \"\" {\n\t\treturn skyerr.NewInvalidArgument(\"empty user_id\", []string{\"user_id\"})\n\t}\n\n\tif payload.Code == \"\" {\n\t\treturn skyerr.NewInvalidArgument(\"empty code\", []string{\"code\"})\n\t}\n\n\tif payload.ExpireAt == 0 {\n\t\treturn skyerr.NewInvalidArgument(\"empty expire_at\", []string{\"expire_at\"})\n\t}\n\n\tif payload.NewPassword == \"\" {\n\t\treturn skyerr.NewInvalidArgument(\"empty password\", []string{\"new_password\"})\n\t}\n\n\treturn nil\n}\n\n\/\/ ForgotPasswordResetHandler reset user password with given code from email.\n\/\/\n\/\/ curl -X POST -H \"Content-Type: application\/json\" \\\n\/\/ -d @- http:\/\/localhost:3000\/forgot_password\/reset_password <<EOF\n\/\/ {\n\/\/ \"user_id\": \"xxx\",\n\/\/ \"code\": \"xxx\",\n\/\/ \"expire_at\": xxx, (utc timestamp)\n\/\/ \"new_password\": \"xxx\",\n\/\/ }\n\/\/ EOF\ntype ForgotPasswordResetHandler struct {\n\tCodeGenerator *forgotpwdemail.CodeGenerator `dependency:\"ForgotPasswordCodeGenerator\"`\n\tPasswordChecker dependency.PasswordChecker `dependency:\"PasswordChecker\"`\n\tTokenStore authtoken.Store `dependency:\"TokenStore\"`\n\tAuthInfoStore authinfo.Store `dependency:\"AuthInfoStore\"`\n\tPasswordAuthProvider password.Provider `dependency:\"PasswordAuthProvider\"`\n\tUserProfileStore userprofile.Store `dependency:\"UserProfileStore\"`\n\tTxContext db.TxContext `dependency:\"TxContext\"`\n\tLogger *logrus.Entry `dependency:\"HandlerLogger\"`\n}\n\nfunc (h ForgotPasswordResetHandler) WithTx() bool {\n\treturn true\n}\n\n\/\/ DecodeRequest decode request payload\nfunc (h ForgotPasswordResetHandler) DecodeRequest(request *http.Request) (handler.RequestPayload, error) {\n\tpayload := ForgotPasswordResetPayload{}\n\tif err := json.NewDecoder(request.Body).Decode(&payload); err != nil {\n\t\treturn nil, skyerr.NewError(skyerr.BadRequest, \"fails to decode the request payload\")\n\t}\n\n\tpayload.ExpireAtTime = time.Unix(payload.ExpireAt, 0).UTC()\n\n\treturn payload, nil\n}\n\nfunc (h ForgotPasswordResetHandler) Handle(req interface{}) (resp interface{}, err error) {\n\tpayload := req.(ForgotPasswordResetPayload)\n\n\t\/\/ check code expiration\n\tif timeNow().After(payload.ExpireAtTime) {\n\t\th.Logger.Error(\"forgot password code expired\")\n\t\terr = h.genericError()\n\t\treturn\n\t}\n\n\tauthInfo := authinfo.AuthInfo{}\n\tif e := h.AuthInfoStore.GetAuth(payload.UserID, &authInfo); e != nil {\n\t\th.Logger.WithFields(map[string]interface{}{\n\t\t\t\"user_id\": payload.UserID,\n\t\t}).WithError(e).Error(\"user not found\")\n\t\terr = h.genericError()\n\t\treturn\n\t}\n\n\t\/\/ generate access-token\n\ttoken, err := h.TokenStore.NewToken(authInfo.ID)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Get Profile\n\tvar userProfile userprofile.UserProfile\n\tif userProfile, err = h.UserProfileStore.GetUserProfile(authInfo.ID, token.AccessToken); err != nil {\n\t\th.Logger.WithFields(map[string]interface{}{\n\t\t\t\"user_id\": payload.UserID,\n\t\t}).WithError(err).Error(\"unable to get user profile\")\n\t\terr = h.genericError()\n\t\treturn\n\t}\n\n\t\/\/ Get password auth principals\n\tprincipals, err := h.PasswordAuthProvider.GetPrincipalsByUserID(authInfo.ID)\n\tif err != nil {\n\t\th.Logger.WithFields(map[string]interface{}{\n\t\t\t\"user_id\": payload.UserID,\n\t\t}).WithError(err).Error(\"unable to get password auth principals\")\n\t\terr = h.genericError()\n\t\treturn\n\t}\n\n\thashedPassword := principals[0].HashedPassword\n\texpectedCode := h.CodeGenerator.Generate(authInfo, userProfile, hashedPassword, payload.ExpireAtTime)\n\tif payload.Code != expectedCode {\n\t\th.Logger.WithFields(map[string]interface{}{\n\t\t\t\"user_id\": payload.UserID,\n\t\t\t\"code\": payload.Code,\n\t\t\t\"expected_code\": expectedCode,\n\t\t}).Error(\"wrong forgot password reset password code\")\n\t\terr = h.genericError()\n\t\treturn\n\t}\n\n\tif err = h.PasswordChecker.ValidatePassword(audit.ValidatePasswordPayload{\n\t\tPlainPassword: payload.NewPassword,\n\t}); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ reset password\n\tfor _, p := range principals {\n\t\tp.PlainPassword = payload.NewPassword\n\t\terr = h.PasswordAuthProvider.UpdatePrincipal(*p)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif err = h.TokenStore.Put(&token); err != nil {\n\t\treturn\n\t}\n\n\t\/\/ revoke old tokens\n\tnow := timeNow()\n\tauthInfo.TokenValidSince = &now\n\tif err = h.AuthInfoStore.UpdateAuth(&authInfo); err != nil {\n\t\treturn\n\t}\n\n\tresp = response.NewAuthResponse(authInfo, userProfile, token.AccessToken)\n\treturn\n}\n\nfunc (h ForgotPasswordResetHandler) genericError() error {\n\treturn skyerr.NewError(skyerr.ResourceNotFound, \"user not found or code invalid\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage server\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\n\trestclient \"k8s.io\/client-go\/rest\"\n)\n\n\/\/ LoopbackClientServerNameOverride is passed to the apiserver from the loopback client in order to\n\/\/ select the loopback certificate via SNI if TLS is used.\nconst LoopbackClientServerNameOverride = \"apiserver-loopback-client\"\n\nfunc (s *SecureServingInfo) NewClientConfig(caCert []byte) (*restclient.Config, error) {\n\tif s == nil || (s.Cert == nil && len(s.SNICerts) == 0) {\n\t\treturn nil, nil\n\t}\n\n\thost, port, err := LoopbackHostPort(s.Listener.Addr().String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &restclient.Config{\n\t\t\/\/ Increase QPS limits. The client is currently passed to all admission plugins,\n\t\t\/\/ and those can be throttled in case of higher load on apiserver - see #22340 and #22422\n\t\t\/\/ for more details. Once #22422 is fixed, we may want to remove it.\n\t\tQPS: 50,\n\t\tBurst: 100,\n\t\tHost: \"https:\/\/\" + net.JoinHostPort(host, port),\n\t\t\/\/ override the ServerName to select our loopback certificate via SNI. This name is also\n\t\t\/\/ used by the client to compare the returns server certificate against.\n\t\tTLSClientConfig: restclient.TLSClientConfig{\n\t\t\tCAData: caCert,\n\t\t},\n\t}, nil\n}\n\nfunc (s *SecureServingInfo) NewLoopbackClientConfig(token string, loopbackCert []byte) (*restclient.Config, error) {\n\tc, err := s.NewClientConfig(loopbackCert)\n\tif err != nil || c == nil {\n\t\treturn c, err\n\t}\n\n\tc.BearerToken = token\n\tc.TLSClientConfig.ServerName = LoopbackClientServerNameOverride\n\n\treturn c, nil\n}\n\n\/\/ LoopbackHostPort returns the host and port loopback REST clients should use\n\/\/ to contact the server.\nfunc LoopbackHostPort(bindAddress string) (string, string, error) {\n\thost, port, err := net.SplitHostPort(bindAddress)\n\tif err != nil {\n\t\t\/\/ should never happen\n\t\treturn \"\", \"\", fmt.Errorf(\"invalid server bind address: %q\", bindAddress)\n\t}\n\n\tisIPv6 := net.ParseIP(host).To4() == nil\n\n\t\/\/ Value is expected to be an IP or DNS name, not \"0.0.0.0\".\n\tif host == \"0.0.0.0\" || host == \"::\" {\n\t\thost = \"localhost\"\n\t\t\/\/ Get ip of local interface, but fall back to \"localhost\".\n\t\t\/\/ Note that \"localhost\" is resolved with the external nameserver first with Go's stdlib.\n\t\t\/\/ So if localhost.<yoursearchdomain> resolves, we don't get a 127.0.0.1 as expected.\n\t\taddrs, err := net.InterfaceAddrs()\n\t\tif err == nil {\n\t\t\tfor _, address := range addrs {\n\t\t\t\tif ipnet, ok := address.(*net.IPNet); ok && ipnet.IP.IsLoopback() && isIPv6 == (ipnet.IP.To4() == nil) {\n\t\t\t\t\thost = ipnet.IP.String()\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn host, port, nil\n}\n<commit_msg>remove apiserver loopback client QPS limit<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage server\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\n\trestclient \"k8s.io\/client-go\/rest\"\n)\n\n\/\/ LoopbackClientServerNameOverride is passed to the apiserver from the loopback client in order to\n\/\/ select the loopback certificate via SNI if TLS is used.\nconst LoopbackClientServerNameOverride = \"apiserver-loopback-client\"\n\nfunc (s *SecureServingInfo) NewClientConfig(caCert []byte) (*restclient.Config, error) {\n\tif s == nil || (s.Cert == nil && len(s.SNICerts) == 0) {\n\t\treturn nil, nil\n\t}\n\n\thost, port, err := LoopbackHostPort(s.Listener.Addr().String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &restclient.Config{\n\t\t\/\/ Do not limit loopback client QPS.\n\t\tQPS: -1,\n\t\tHost: \"https:\/\/\" + net.JoinHostPort(host, port),\n\t\t\/\/ override the ServerName to select our loopback certificate via SNI. This name is also\n\t\t\/\/ used by the client to compare the returns server certificate against.\n\t\tTLSClientConfig: restclient.TLSClientConfig{\n\t\t\tCAData: caCert,\n\t\t},\n\t}, nil\n}\n\nfunc (s *SecureServingInfo) NewLoopbackClientConfig(token string, loopbackCert []byte) (*restclient.Config, error) {\n\tc, err := s.NewClientConfig(loopbackCert)\n\tif err != nil || c == nil {\n\t\treturn c, err\n\t}\n\n\tc.BearerToken = token\n\tc.TLSClientConfig.ServerName = LoopbackClientServerNameOverride\n\n\treturn c, nil\n}\n\n\/\/ LoopbackHostPort returns the host and port loopback REST clients should use\n\/\/ to contact the server.\nfunc LoopbackHostPort(bindAddress string) (string, string, error) {\n\thost, port, err := net.SplitHostPort(bindAddress)\n\tif err != nil {\n\t\t\/\/ should never happen\n\t\treturn \"\", \"\", fmt.Errorf(\"invalid server bind address: %q\", bindAddress)\n\t}\n\n\tisIPv6 := net.ParseIP(host).To4() == nil\n\n\t\/\/ Value is expected to be an IP or DNS name, not \"0.0.0.0\".\n\tif host == \"0.0.0.0\" || host == \"::\" {\n\t\thost = \"localhost\"\n\t\t\/\/ Get ip of local interface, but fall back to \"localhost\".\n\t\t\/\/ Note that \"localhost\" is resolved with the external nameserver first with Go's stdlib.\n\t\t\/\/ So if localhost.<yoursearchdomain> resolves, we don't get a 127.0.0.1 as expected.\n\t\taddrs, err := net.InterfaceAddrs()\n\t\tif err == nil {\n\t\t\tfor _, address := range addrs {\n\t\t\t\tif ipnet, ok := address.(*net.IPNet); ok && ipnet.IP.IsLoopback() && isIPv6 == (ipnet.IP.To4() == nil) {\n\t\t\t\t\thost = ipnet.IP.String()\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn host, port, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage genericclioptions\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\tjsonpatch \"github.com\/evanphx\/json-patch\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/pflag\"\n\n\t\"k8s.io\/apimachinery\/pkg\/api\/meta\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/json\"\n)\n\n\/\/ ChangeCauseAnnotation is the annotation indicating a guess at \"why\" something was changed\nconst ChangeCauseAnnotation = \"kubernetes.io\/change-cause\"\n\n\/\/ RecordFlags contains all flags associated with the \"--record\" operation\ntype RecordFlags struct {\n\t\/\/ Record indicates the state of the recording flag. It is a pointer so a caller can opt out or rebind\n\tRecord *bool\n\n\tchangeCause string\n}\n\n\/\/ ToRecorder returns a ChangeCause recorder if --record=false was not\n\/\/ explicitly given by the user\nfunc (f *RecordFlags) ToRecorder() (Recorder, error) {\n\tif f == nil {\n\t\treturn NoopRecorder{}, nil\n\t}\n\n\tshouldRecord := false\n\tif f.Record != nil {\n\t\tshouldRecord = *f.Record\n\t}\n\n\t\/\/ if flag was explicitly set to false by the user,\n\t\/\/ do not record\n\tif !shouldRecord {\n\t\treturn NoopRecorder{}, nil\n\t}\n\n\treturn &ChangeCauseRecorder{\n\t\tchangeCause: f.changeCause,\n\t}, nil\n}\n\n\/\/ Complete is called before the command is run, but after it is invoked to finish the state of the struct before use.\nfunc (f *RecordFlags) Complete(cmd *cobra.Command) error {\n\tif f == nil {\n\t\treturn nil\n\t}\n\n\tf.changeCause = parseCommandArguments(cmd)\n\treturn nil\n}\n\n\/\/ CompleteWithChangeCause alters changeCause value with a new cause\nfunc (f *RecordFlags) CompleteWithChangeCause(cause string) error {\n\tif f == nil {\n\t\treturn nil\n\t}\n\n\tf.changeCause = cause\n\treturn nil\n}\n\n\/\/ AddFlags binds the requested flags to the provided flagset\n\/\/ TODO have this only take a flagset\nfunc (f *RecordFlags) AddFlags(cmd *cobra.Command) {\n\tif f == nil {\n\t\treturn\n\t}\n\n\tif f.Record != nil {\n\t\tcmd.Flags().BoolVar(f.Record, \"record\", *f.Record, \"Record current kubectl command in the resource annotation. If set to false, do not record the command. If set to true, record the command. If not set, default to updating the existing annotation value only if one already exists.\")\n\t}\n}\n\n\/\/ NewRecordFlags provides a RecordFlags with reasonable default values set for use\nfunc NewRecordFlags() *RecordFlags {\n\trecord := false\n\n\treturn &RecordFlags{\n\t\tRecord: &record,\n\t}\n}\n\n\/\/ Recorder is used to record why a runtime.Object was changed in an annotation.\ntype Recorder interface {\n\t\/\/ Record records why a runtime.Object was changed in an annotation.\n\tRecord(runtime.Object) error\n\tMakeRecordMergePatch(runtime.Object) ([]byte, error)\n}\n\n\/\/ NoopRecorder does nothing. It is a \"do nothing\" that can be returned so code doesn't switch on it.\ntype NoopRecorder struct{}\n\n\/\/ Record implements Recorder\nfunc (r NoopRecorder) Record(obj runtime.Object) error {\n\treturn nil\n}\n\n\/\/ MakeRecordMergePatch implements Recorder\nfunc (r NoopRecorder) MakeRecordMergePatch(obj runtime.Object) ([]byte, error) {\n\treturn nil, nil\n}\n\n\/\/ ChangeCauseRecorder annotates a \"change-cause\" to an input runtime object\ntype ChangeCauseRecorder struct {\n\tchangeCause string\n}\n\n\/\/ Record annotates a \"change-cause\" to a given info if either \"shouldRecord\" is true,\n\/\/ or the resource info previously contained a \"change-cause\" annotation.\nfunc (r *ChangeCauseRecorder) Record(obj runtime.Object) error {\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tannotations := accessor.GetAnnotations()\n\tif annotations == nil {\n\t\tannotations = make(map[string]string)\n\t}\n\tannotations[ChangeCauseAnnotation] = r.changeCause\n\taccessor.SetAnnotations(annotations)\n\treturn nil\n}\n\n\/\/ MakeRecordMergePatch produces a merge patch for updating the recording annotation.\nfunc (r *ChangeCauseRecorder) MakeRecordMergePatch(obj runtime.Object) ([]byte, error) {\n\t\/\/ copy so we don't mess with the original\n\tobjCopy := obj.DeepCopyObject()\n\tif err := r.Record(objCopy); err != nil {\n\t\treturn nil, err\n\t}\n\n\toldData, err := json.Marshal(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnewData, err := json.Marshal(objCopy)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn jsonpatch.CreateMergePatch(oldData, newData)\n}\n\n\/\/ parseCommandArguments will stringify and return all environment arguments ie. a command run by a client\n\/\/ using the factory.\n\/\/ Set showSecrets false to filter out stuff like secrets.\nfunc parseCommandArguments(cmd *cobra.Command) string {\n\tif len(os.Args) == 0 {\n\t\treturn \"\"\n\t}\n\n\tflags := \"\"\n\tparseFunc := func(flag *pflag.Flag, value string) error {\n\t\tflags = flags + \" --\" + flag.Name\n\t\tif set, ok := flag.Annotations[\"classified\"]; !ok || len(set) == 0 {\n\t\t\tflags = flags + \"=\" + value\n\t\t} else {\n\t\t\tflags = flags + \"=CLASSIFIED\"\n\t\t}\n\t\treturn nil\n\t}\n\tvar err error\n\terr = cmd.Flags().ParseAll(os.Args[1:], parseFunc)\n\tif err != nil || !cmd.Flags().Parsed() {\n\t\treturn \"\"\n\t}\n\n\targs := \"\"\n\tif arguments := cmd.Flags().Args(); len(arguments) > 0 {\n\t\targs = \" \" + strings.Join(arguments, \" \")\n\t}\n\n\tbase := filepath.Base(os.Args[0])\n\treturn base + args + flags\n}\n<commit_msg>Start deprecation of --record flag<commit_after>\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage genericclioptions\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\tjsonpatch \"github.com\/evanphx\/json-patch\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/pflag\"\n\n\t\"k8s.io\/apimachinery\/pkg\/api\/meta\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/json\"\n)\n\n\/\/ ChangeCauseAnnotation is the annotation indicating a guess at \"why\" something was changed\nconst ChangeCauseAnnotation = \"kubernetes.io\/change-cause\"\n\n\/\/ RecordFlags contains all flags associated with the \"--record\" operation\ntype RecordFlags struct {\n\t\/\/ Record indicates the state of the recording flag. It is a pointer so a caller can opt out or rebind\n\tRecord *bool\n\n\tchangeCause string\n}\n\n\/\/ ToRecorder returns a ChangeCause recorder if --record=false was not\n\/\/ explicitly given by the user\nfunc (f *RecordFlags) ToRecorder() (Recorder, error) {\n\tif f == nil {\n\t\treturn NoopRecorder{}, nil\n\t}\n\n\tshouldRecord := false\n\tif f.Record != nil {\n\t\tshouldRecord = *f.Record\n\t}\n\n\t\/\/ if flag was explicitly set to false by the user,\n\t\/\/ do not record\n\tif !shouldRecord {\n\t\treturn NoopRecorder{}, nil\n\t}\n\n\treturn &ChangeCauseRecorder{\n\t\tchangeCause: f.changeCause,\n\t}, nil\n}\n\n\/\/ Complete is called before the command is run, but after it is invoked to finish the state of the struct before use.\nfunc (f *RecordFlags) Complete(cmd *cobra.Command) error {\n\tif f == nil {\n\t\treturn nil\n\t}\n\n\tf.changeCause = parseCommandArguments(cmd)\n\treturn nil\n}\n\n\/\/ CompleteWithChangeCause alters changeCause value with a new cause\nfunc (f *RecordFlags) CompleteWithChangeCause(cause string) error {\n\tif f == nil {\n\t\treturn nil\n\t}\n\n\tf.changeCause = cause\n\treturn nil\n}\n\n\/\/ AddFlags binds the requested flags to the provided flagset\n\/\/ TODO have this only take a flagset\nfunc (f *RecordFlags) AddFlags(cmd *cobra.Command) {\n\tif f == nil {\n\t\treturn\n\t}\n\n\tif f.Record != nil {\n\t\tcmd.Flags().BoolVar(f.Record, \"record\", *f.Record, \"Record current kubectl command in the resource annotation. If set to false, do not record the command. If set to true, record the command. If not set, default to updating the existing annotation value only if one already exists.\")\n\t\tcmd.Flags().MarkDeprecated(\"record\", \"--record will be removed in the future\")\n\t}\n}\n\n\/\/ NewRecordFlags provides a RecordFlags with reasonable default values set for use\nfunc NewRecordFlags() *RecordFlags {\n\trecord := false\n\n\treturn &RecordFlags{\n\t\tRecord: &record,\n\t}\n}\n\n\/\/ Recorder is used to record why a runtime.Object was changed in an annotation.\ntype Recorder interface {\n\t\/\/ Record records why a runtime.Object was changed in an annotation.\n\tRecord(runtime.Object) error\n\tMakeRecordMergePatch(runtime.Object) ([]byte, error)\n}\n\n\/\/ NoopRecorder does nothing. It is a \"do nothing\" that can be returned so code doesn't switch on it.\ntype NoopRecorder struct{}\n\n\/\/ Record implements Recorder\nfunc (r NoopRecorder) Record(obj runtime.Object) error {\n\treturn nil\n}\n\n\/\/ MakeRecordMergePatch implements Recorder\nfunc (r NoopRecorder) MakeRecordMergePatch(obj runtime.Object) ([]byte, error) {\n\treturn nil, nil\n}\n\n\/\/ ChangeCauseRecorder annotates a \"change-cause\" to an input runtime object\ntype ChangeCauseRecorder struct {\n\tchangeCause string\n}\n\n\/\/ Record annotates a \"change-cause\" to a given info if either \"shouldRecord\" is true,\n\/\/ or the resource info previously contained a \"change-cause\" annotation.\nfunc (r *ChangeCauseRecorder) Record(obj runtime.Object) error {\n\taccessor, err := meta.Accessor(obj)\n\tif err != nil {\n\t\treturn err\n\t}\n\tannotations := accessor.GetAnnotations()\n\tif annotations == nil {\n\t\tannotations = make(map[string]string)\n\t}\n\tannotations[ChangeCauseAnnotation] = r.changeCause\n\taccessor.SetAnnotations(annotations)\n\treturn nil\n}\n\n\/\/ MakeRecordMergePatch produces a merge patch for updating the recording annotation.\nfunc (r *ChangeCauseRecorder) MakeRecordMergePatch(obj runtime.Object) ([]byte, error) {\n\t\/\/ copy so we don't mess with the original\n\tobjCopy := obj.DeepCopyObject()\n\tif err := r.Record(objCopy); err != nil {\n\t\treturn nil, err\n\t}\n\n\toldData, err := json.Marshal(obj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnewData, err := json.Marshal(objCopy)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn jsonpatch.CreateMergePatch(oldData, newData)\n}\n\n\/\/ parseCommandArguments will stringify and return all environment arguments ie. a command run by a client\n\/\/ using the factory.\n\/\/ Set showSecrets false to filter out stuff like secrets.\nfunc parseCommandArguments(cmd *cobra.Command) string {\n\tif len(os.Args) == 0 {\n\t\treturn \"\"\n\t}\n\n\tflags := \"\"\n\tparseFunc := func(flag *pflag.Flag, value string) error {\n\t\tflags = flags + \" --\" + flag.Name\n\t\tif set, ok := flag.Annotations[\"classified\"]; !ok || len(set) == 0 {\n\t\t\tflags = flags + \"=\" + value\n\t\t} else {\n\t\t\tflags = flags + \"=CLASSIFIED\"\n\t\t}\n\t\treturn nil\n\t}\n\tvar err error\n\terr = cmd.Flags().ParseAll(os.Args[1:], parseFunc)\n\tif err != nil || !cmd.Flags().Parsed() {\n\t\treturn \"\"\n\t}\n\n\targs := \"\"\n\tif arguments := cmd.Flags().Args(); len(arguments) > 0 {\n\t\targs = \" \" + strings.Join(arguments, \" \")\n\t}\n\n\tbase := filepath.Base(os.Args[0])\n\treturn base + args + flags\n}\n<|endoftext|>"} {"text":"<commit_before>package cryptoki\n\nimport (\n\t\"crypto\"\n\t\"errors\"\n\t\"fmt\"\n\t\"hash\/crc64\"\n\n\t\"github.com\/miekg\/pkcs11\"\n)\n\n\/\/ A Token represents a cryptographic token that implements PKCS #11.\ntype Token struct {\n\tmodule *pkcs11.Ctx\n\tsession pkcs11.SessionHandle\n}\n\n\/\/ findSlot retrieves ID of the slot with matching token label.\nfunc findSlot(module *pkcs11.Ctx, tokenLabel string) (uint, error) {\n\tvar nilSlot uint\n\n\tslots, err := module.GetSlotList(true)\n\tif err != nil {\n\t\treturn nilSlot, fmt.Errorf(\"failed to get slot list: %s\", err)\n\t}\n\n\tfor _, slot := range slots {\n\t\ttokenInfo, err := module.GetTokenInfo(slot)\n\t\tif err != nil {\n\t\t\treturn nilSlot, fmt.Errorf(\"failed to get token info: %s\", err)\n\t\t}\n\n\t\tif tokenInfo.Label == tokenLabel {\n\t\t\treturn slot, nil\n\t\t}\n\t}\n\n\treturn nilSlot, fmt.Errorf(\"no slot with token label '%q'\", tokenLabel)\n}\n\n\/\/ OpenToken opens a new session with the given cryptographic token.\nfunc OpenToken(modulePath, tokenLabel, pin string, readOnly bool) (*Token, error) {\n\tmodule := pkcs11.New(modulePath)\n\tif module == nil {\n\t\treturn nil, fmt.Errorf(\"failed to load module '%s'\", modulePath)\n\t}\n\n\terr := module.Initialize()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tslotID, err := findSlot(module, tokenLabel)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar flags uint\n\tif readOnly {\n\t\tflags = pkcs11.CKF_SERIAL_SESSION\n\t} else {\n\t\tflags = pkcs11.CKF_SERIAL_SESSION | pkcs11.CKF_RW_SESSION\n\t}\n\tsession, err := module.OpenSession(slotID, flags)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Log in as a normal user with given PIN.\n\t\/\/\n\t\/\/ NOTE: Login status is application-wide, not per session. It is fine\n\t\/\/ if the token complains user already logged in.\n\terr = module.Login(session, pkcs11.CKU_USER, pin)\n\tif err != nil && err != pkcs11.Error(pkcs11.CKR_USER_ALREADY_LOGGED_IN) {\n\t\tmodule.CloseSession(session)\n\t\treturn nil, err\n\t}\n\n\treturn &Token{module, session}, nil\n}\n\n\/\/ Close closes the current session with the token.\n\/\/\n\/\/ NOTE: We do not explicitly log out the session or unload the module\n\/\/ here, as it may cause problem if there are multiple sessions active.\n\/\/ In general, it will log out once the last session is closed and the\n\/\/ module will be unloaded at the end of the process.\nfunc (tk *Token) Close() error {\n\terr := tk.module.CloseSession(tk.session)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to close session: %s\", err)\n\t}\n\n\treturn nil\n}\n\n\/\/ Info obtains information about the token.\nfunc (tk *Token) Info() (pkcs11.TokenInfo, error) {\n\tvar nilTokenInfo pkcs11.TokenInfo\n\n\tsessionInfo, err := tk.module.GetSessionInfo(tk.session)\n\tif err != nil {\n\t\treturn nilTokenInfo, fmt.Errorf(\"failed to get session info: %s\", err)\n\t}\n\n\ttokenInfo, err := tk.module.GetTokenInfo(sessionInfo.SlotID)\n\tif err != nil {\n\t\treturn nilTokenInfo, fmt.Errorf(\"failed to get token info: %s\", err)\n\t}\n\n\treturn tokenInfo, nil\n}\n\n\/\/ GenerateKeyPair generates a key pair inside the token.\nfunc (tk *Token) GenerateKeyPair(label string, kr KeyRequest) (*KeyPair, error) {\n\tkeyID := uint(crc64.Checksum([]byte(label), crc64.MakeTable(crc64.ECMA)))\n\tpublicKeyTemplate := []*pkcs11.Attribute{\n\t\t\/\/ Common storage object attributes (PKCS #11-B 10.4)\n\t\tpkcs11.NewAttribute(pkcs11.CKA_TOKEN, true),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_PRIVATE, false),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_MODIFIABLE, false),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_LABEL, label),\n\t\t\/\/ Common key attributes (PKCS #11-B 10.7)\n\t\tpkcs11.NewAttribute(pkcs11.CKA_ID, keyID),\n\t\t\/\/ Common public key attributes (PKCS #11-B 10.8)\n\t\tpkcs11.NewAttribute(pkcs11.CKA_ENCRYPT, true),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_VERIFY, true),\n\t}\n\n\tvar req keyRequest\n\tswitch kr.Algo() {\n\tcase \"rsa\":\n\t\treq = NewRSAKeyRequest(kr.Size()).(*rsaKeyRequest)\n\tcase \"ecdsa\":\n\t\treq = NewECKeyRequest(kr.Size()).(*ecdsaKeyRequest)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported algorithm: %s\", kr.Algo())\n\t}\n\n\tattrs, err := req.Attrs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpublicKeyTemplate = append(publicKeyTemplate, attrs...)\n\n\tprivateKeyTemplate := []*pkcs11.Attribute{\n\t\t\/\/ Common storage object attributes (PKCS #11-B 10.4)\n\t\tpkcs11.NewAttribute(pkcs11.CKA_TOKEN, true),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_PRIVATE, true),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_MODIFIABLE, false),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_LABEL, label),\n\t\t\/\/ Common key attributes (PKCS #11-B 10.7)\n\t\tpkcs11.NewAttribute(pkcs11.CKA_ID, keyID),\n\t\t\/\/ Common private key attributes (PKCS #11-B 10.9)\n\t\tpkcs11.NewAttribute(pkcs11.CKA_SENSITIVE, true),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_DECRYPT, true),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_SIGN, true),\n\t}\n\n\tpubHandle, privHandle, err := tk.module.GenerateKeyPair(tk.session, req.Mechanisms(), publicKeyTemplate, privateKeyTemplate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpub, err := tk.ExportPublicKey(pubHandle)\n\tif err != nil {\n\t\treturn nil, err\n\n\t}\n\n\treturn &KeyPair{tk, pub, privHandle}, nil\n}\n\n\/\/ FindKeyPair looks up a key pair inside the token with the public key.\nfunc (tk *Token) FindKeyPair(pub crypto.PublicKey) (*KeyPair, error) {\n\t\/\/ First, looks up the given public key in the token, and returns get\n\t\/\/ its object handle if found.\n\tkp, err := parseKeyParams(pub)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttemplate, err := kp.Attrs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpubHandle, err := tk.FindObject(template)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Then looks up the private key with matching CKA_ID of the given public key handle.\n\tpublicKeyID, err := tk.GetAttribute(pubHandle, pkcs11.CKA_ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprivHandle, err := tk.FindObject([]*pkcs11.Attribute{\n\t\tpkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_PRIVATE_KEY),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_ID, publicKeyID),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &KeyPair{tk, pub, privHandle}, nil\n}\n\n\/\/ ExportPublicKey retrieves the public key with given object handle.\nfunc (tk *Token) ExportPublicKey(handle pkcs11.ObjectHandle) (crypto.PublicKey, error) {\n\tvalue, err := tk.GetAttribute(handle, pkcs11.CKA_KEY_TYPE)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(value) == 0 {\n\t\treturn nil, errors.New(\"invalid public key object\")\n\t}\n\tkeyType := value[0]\n\n\tswitch keyType {\n\tcase pkcs11.CKK_RSA:\n\t\tkp := new(rsaKeyParams)\n\t\tkp.modulus, _ = tk.GetAttribute(handle, pkcs11.CKA_MODULUS)\n\t\tkp.exponent, _ = tk.GetAttribute(handle, pkcs11.CKA_PUBLIC_EXPONENT)\n\t\treturn kp.Key()\n\tcase pkcs11.CKK_EC:\n\t\tkp := new(ecdsaKeyParams)\n\t\tkp.ecParams, _ = tk.GetAttribute(handle, pkcs11.CKA_EC_PARAMS)\n\t\tkp.ecPoint, _ = tk.GetAttribute(handle, pkcs11.CKA_EC_POINT)\n\t\treturn kp.Key()\n\tdefault:\n\t\treturn nil, errors.New(\"unknown key type\")\n\t}\n}\n\n\/\/ Sign signs msg with the private key inside the token. The caller is\n\/\/ responsibile to compute the message digest.\nfunc (tk *Token) Sign(mech uint, msg []byte, key pkcs11.ObjectHandle) ([]byte, error) {\n\tm := []*pkcs11.Mechanism{pkcs11.NewMechanism(mech, nil)}\n\tif err := tk.module.SignInit(tk.session, m, key); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn tk.module.Sign(tk.session, msg)\n}\n\n\/\/ FindObject returns the first object it found that matches the query.\nfunc (tk *Token) FindObject(query []*pkcs11.Attribute) (pkcs11.ObjectHandle, error) {\n\terr := tk.module.FindObjectsInit(tk.session, query)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tresult, _, err := tk.module.FindObjects(tk.session, 1)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\terr = tk.module.FindObjectsFinal(tk.session)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif len(result) == 0 {\n\t\treturn 0, errors.New(\"object not found\")\n\t}\n\n\treturn result[0], nil\n}\n\n\/\/ GetAttribute obtains the value of a single object attribute. If there\n\/\/ are multiple attributes of the same type, it only returns the value\n\/\/ of the first one.\nfunc (tk *Token) GetAttribute(obj pkcs11.ObjectHandle, typ uint) ([]byte, error) {\n\tattr, err := tk.module.GetAttributeValue(tk.session, obj, []*pkcs11.Attribute{\n\t\tpkcs11.NewAttribute(typ, nil),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(attr) == 0 {\n\t\treturn nil, errors.New(\"attribute not found\")\n\t}\n\n\treturn attr[0].Value, nil\n}\n<commit_msg>Keep slotID in Token<commit_after>package cryptoki\n\nimport (\n\t\"crypto\"\n\t\"errors\"\n\t\"fmt\"\n\t\"hash\/crc64\"\n\n\t\"github.com\/miekg\/pkcs11\"\n)\n\n\/\/ A Token represents a cryptographic token that implements PKCS #11.\ntype Token struct {\n\tmodule *pkcs11.Ctx\n\tslotID uint\n\n\tsession pkcs11.SessionHandle\n}\n\n\/\/ findSlot retrieves ID of the slot with matching token label.\nfunc findSlot(module *pkcs11.Ctx, tokenLabel string) (slotID uint, err error) {\n\tslots, err := module.GetSlotList(true)\n\tif err != nil {\n\t\treturn slotID, fmt.Errorf(\"failed to get slot list: %s\", err)\n\t}\n\n\tfor _, id := range slots {\n\t\ttokenInfo, err := module.GetTokenInfo(id)\n\t\tif err != nil {\n\t\t\treturn slotID, fmt.Errorf(\"failed to get token info: %s\", err)\n\t\t}\n\n\t\tif tokenInfo.Label == tokenLabel {\n\t\t\treturn id, nil\n\t\t}\n\t}\n\n\treturn slotID, fmt.Errorf(\"no slot with token label '%q'\", tokenLabel)\n}\n\n\/\/ OpenToken opens a new session with the given cryptographic token.\nfunc OpenToken(modulePath, tokenLabel, pin string, readOnly bool) (*Token, error) {\n\tmodule := pkcs11.New(modulePath)\n\tif module == nil {\n\t\treturn nil, fmt.Errorf(\"failed to load module '%s'\", modulePath)\n\t}\n\n\terr := module.Initialize()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tslotID, err := findSlot(module, tokenLabel)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar flags uint\n\tif readOnly {\n\t\tflags = pkcs11.CKF_SERIAL_SESSION\n\t} else {\n\t\tflags = pkcs11.CKF_SERIAL_SESSION | pkcs11.CKF_RW_SESSION\n\t}\n\tsession, err := module.OpenSession(slotID, flags)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Log in as a normal user with given PIN.\n\t\/\/\n\t\/\/ NOTE: Login status is application-wide, not per session. It is fine\n\t\/\/ if the token complains user already logged in.\n\terr = module.Login(session, pkcs11.CKU_USER, pin)\n\tif err != nil && err != pkcs11.Error(pkcs11.CKR_USER_ALREADY_LOGGED_IN) {\n\t\tmodule.CloseSession(session)\n\t\treturn nil, err\n\t}\n\n\treturn &Token{module, slotID, session}, nil\n}\n\n\/\/ Close closes the current session with the token.\n\/\/\n\/\/ NOTE: We do not explicitly log out the session or unload the module\n\/\/ here, as it may cause problem if there are multiple sessions active.\n\/\/ In general, it will log out once the last session is closed and the\n\/\/ module will be unloaded at the end of the process.\nfunc (tk *Token) Close() error {\n\terr := tk.module.CloseSession(tk.session)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to close session: %s\", err)\n\t}\n\n\treturn nil\n}\n\n\/\/ Info obtains information about the token.\nfunc (tk *Token) Info() (pkcs11.TokenInfo, error) {\n\tvar nilTokenInfo pkcs11.TokenInfo\n\n\tsessionInfo, err := tk.module.GetSessionInfo(tk.session)\n\tif err != nil {\n\t\treturn nilTokenInfo, fmt.Errorf(\"failed to get session info: %s\", err)\n\t}\n\n\ttokenInfo, err := tk.module.GetTokenInfo(sessionInfo.SlotID)\n\tif err != nil {\n\t\treturn nilTokenInfo, fmt.Errorf(\"failed to get token info: %s\", err)\n\t}\n\n\treturn tokenInfo, nil\n}\n\n\/\/ GenerateKeyPair generates a key pair inside the token.\nfunc (tk *Token) GenerateKeyPair(label string, kr KeyRequest) (*KeyPair, error) {\n\tkeyID := uint(crc64.Checksum([]byte(label), crc64.MakeTable(crc64.ECMA)))\n\tpublicKeyTemplate := []*pkcs11.Attribute{\n\t\t\/\/ Common storage object attributes (PKCS #11-B 10.4)\n\t\tpkcs11.NewAttribute(pkcs11.CKA_TOKEN, true),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_PRIVATE, false),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_MODIFIABLE, false),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_LABEL, label),\n\t\t\/\/ Common key attributes (PKCS #11-B 10.7)\n\t\tpkcs11.NewAttribute(pkcs11.CKA_ID, keyID),\n\t\t\/\/ Common public key attributes (PKCS #11-B 10.8)\n\t\tpkcs11.NewAttribute(pkcs11.CKA_ENCRYPT, true),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_VERIFY, true),\n\t}\n\n\tvar req keyRequest\n\tswitch kr.Algo() {\n\tcase \"rsa\":\n\t\treq = NewRSAKeyRequest(kr.Size()).(*rsaKeyRequest)\n\tcase \"ecdsa\":\n\t\treq = NewECKeyRequest(kr.Size()).(*ecdsaKeyRequest)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported algorithm: %s\", kr.Algo())\n\t}\n\n\tattrs, err := req.Attrs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpublicKeyTemplate = append(publicKeyTemplate, attrs...)\n\n\tprivateKeyTemplate := []*pkcs11.Attribute{\n\t\t\/\/ Common storage object attributes (PKCS #11-B 10.4)\n\t\tpkcs11.NewAttribute(pkcs11.CKA_TOKEN, true),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_PRIVATE, true),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_MODIFIABLE, false),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_LABEL, label),\n\t\t\/\/ Common key attributes (PKCS #11-B 10.7)\n\t\tpkcs11.NewAttribute(pkcs11.CKA_ID, keyID),\n\t\t\/\/ Common private key attributes (PKCS #11-B 10.9)\n\t\tpkcs11.NewAttribute(pkcs11.CKA_SENSITIVE, true),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_DECRYPT, true),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_SIGN, true),\n\t}\n\n\tpubHandle, privHandle, err := tk.module.GenerateKeyPair(tk.session, req.Mechanisms(), publicKeyTemplate, privateKeyTemplate)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpub, err := tk.ExportPublicKey(pubHandle)\n\tif err != nil {\n\t\treturn nil, err\n\n\t}\n\n\treturn &KeyPair{tk, pub, privHandle}, nil\n}\n\n\/\/ FindKeyPair looks up a key pair inside the token with the public key.\nfunc (tk *Token) FindKeyPair(pub crypto.PublicKey) (*KeyPair, error) {\n\t\/\/ First, looks up the given public key in the token, and returns get\n\t\/\/ its object handle if found.\n\tkp, err := parseKeyParams(pub)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttemplate, err := kp.Attrs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpubHandle, err := tk.FindObject(template)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Then looks up the private key with matching CKA_ID of the given public key handle.\n\tpublicKeyID, err := tk.GetAttribute(pubHandle, pkcs11.CKA_ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tprivHandle, err := tk.FindObject([]*pkcs11.Attribute{\n\t\tpkcs11.NewAttribute(pkcs11.CKA_CLASS, pkcs11.CKO_PRIVATE_KEY),\n\t\tpkcs11.NewAttribute(pkcs11.CKA_ID, publicKeyID),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &KeyPair{tk, pub, privHandle}, nil\n}\n\n\/\/ ExportPublicKey retrieves the public key with given object handle.\nfunc (tk *Token) ExportPublicKey(handle pkcs11.ObjectHandle) (crypto.PublicKey, error) {\n\tvalue, err := tk.GetAttribute(handle, pkcs11.CKA_KEY_TYPE)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(value) == 0 {\n\t\treturn nil, errors.New(\"invalid public key object\")\n\t}\n\tkeyType := value[0]\n\n\tswitch keyType {\n\tcase pkcs11.CKK_RSA:\n\t\tkp := new(rsaKeyParams)\n\t\tkp.modulus, _ = tk.GetAttribute(handle, pkcs11.CKA_MODULUS)\n\t\tkp.exponent, _ = tk.GetAttribute(handle, pkcs11.CKA_PUBLIC_EXPONENT)\n\t\treturn kp.Key()\n\tcase pkcs11.CKK_EC:\n\t\tkp := new(ecdsaKeyParams)\n\t\tkp.ecParams, _ = tk.GetAttribute(handle, pkcs11.CKA_EC_PARAMS)\n\t\tkp.ecPoint, _ = tk.GetAttribute(handle, pkcs11.CKA_EC_POINT)\n\t\treturn kp.Key()\n\tdefault:\n\t\treturn nil, errors.New(\"unknown key type\")\n\t}\n}\n\n\/\/ Sign signs msg with the private key inside the token. The caller is\n\/\/ responsibile to compute the message digest.\nfunc (tk *Token) Sign(mech uint, msg []byte, key pkcs11.ObjectHandle) ([]byte, error) {\n\tm := []*pkcs11.Mechanism{pkcs11.NewMechanism(mech, nil)}\n\tif err := tk.module.SignInit(tk.session, m, key); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn tk.module.Sign(tk.session, msg)\n}\n\n\/\/ FindObject returns the first object it found that matches the query.\nfunc (tk *Token) FindObject(query []*pkcs11.Attribute) (pkcs11.ObjectHandle, error) {\n\terr := tk.module.FindObjectsInit(tk.session, query)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tresult, _, err := tk.module.FindObjects(tk.session, 1)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\terr = tk.module.FindObjectsFinal(tk.session)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tif len(result) == 0 {\n\t\treturn 0, errors.New(\"object not found\")\n\t}\n\n\treturn result[0], nil\n}\n\n\/\/ GetAttribute obtains the value of a single object attribute. If there\n\/\/ are multiple attributes of the same type, it only returns the value\n\/\/ of the first one.\nfunc (tk *Token) GetAttribute(obj pkcs11.ObjectHandle, typ uint) ([]byte, error) {\n\tattr, err := tk.module.GetAttributeValue(tk.session, obj, []*pkcs11.Attribute{\n\t\tpkcs11.NewAttribute(typ, nil),\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(attr) == 0 {\n\t\treturn nil, errors.New(\"attribute not found\")\n\t}\n\n\treturn attr[0].Value, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package watcher\n\nimport (\n\t\"context\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/rancher\/norman\/controller\"\n\t\"github.com\/rancher\/rancher\/pkg\/controllers\/user\/alert\/common\"\n\t\"github.com\/rancher\/rancher\/pkg\/controllers\/user\/alert\/manager\"\n\t\"github.com\/rancher\/rancher\/pkg\/controllers\/user\/workload\"\n\t\"github.com\/rancher\/rancher\/pkg\/ticker\"\n\tv1 \"github.com\/rancher\/types\/apis\/core\/v1\"\n\tv3 \"github.com\/rancher\/types\/apis\/management.cattle.io\/v3\"\n\t\"github.com\/rancher\/types\/config\"\n\t\"github.com\/sirupsen\/logrus\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tkerrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n)\n\ntype PodWatcher struct {\n\tpodLister v1.PodLister\n\talertManager *manager.AlertManager\n\tprojectAlertPolicies v3.ProjectAlertRuleInterface\n\tprojectAlertGroupLister v3.ProjectAlertRuleLister\n\tclusterName string\n\tpodRestartTrack sync.Map\n\tclusterLister v3.ClusterLister\n\tprojectLister v3.ProjectLister\n\tworkloadFetcher workloadFetcher\n}\n\ntype restartTrack struct {\n\tCount int32\n\tTime time.Time\n}\n\nfunc StartPodWatcher(ctx context.Context, cluster *config.UserContext, manager *manager.AlertManager) {\n\tprojectAlertPolicies := cluster.Management.Management.ProjectAlertRules(\"\")\n\tworkloadFetcher := workloadFetcher{\n\t\tworkloadController: workload.NewWorkloadController(ctx, cluster.UserOnlyContext(), nil),\n\t}\n\n\tpodWatcher := &PodWatcher{\n\t\tpodLister: cluster.Core.Pods(\"\").Controller().Lister(),\n\t\tprojectAlertPolicies: projectAlertPolicies,\n\t\tprojectAlertGroupLister: projectAlertPolicies.Controller().Lister(),\n\t\talertManager: manager,\n\t\tclusterName: cluster.ClusterName,\n\t\tpodRestartTrack: sync.Map{},\n\t\tclusterLister: cluster.Management.Management.Clusters(\"\").Controller().Lister(),\n\t\tprojectLister: cluster.Management.Management.Projects(cluster.ClusterName).Controller().Lister(),\n\t\tworkloadFetcher: workloadFetcher,\n\t}\n\n\tprojectAlertLifecycle := &ProjectAlertLifecycle{\n\t\tpodWatcher: podWatcher,\n\t}\n\tprojectAlertPolicies.AddClusterScopedLifecycle(ctx, \"pod-target-alert-watcher\", cluster.ClusterName, projectAlertLifecycle)\n\n\tgo podWatcher.watch(ctx, syncInterval)\n}\n\nfunc (w *PodWatcher) watch(ctx context.Context, interval time.Duration) {\n\tfor range ticker.Context(ctx, interval) {\n\t\terr := w.watchRule()\n\t\tif err != nil {\n\t\t\tlogrus.Infof(\"Failed to watch pod, error: %v\", err)\n\t\t}\n\t}\n}\n\ntype ProjectAlertLifecycle struct {\n\tpodWatcher *PodWatcher\n}\n\nfunc (l *ProjectAlertLifecycle) Create(obj *v3.ProjectAlertRule) (runtime.Object, error) {\n\tl.podWatcher.podRestartTrack.Store(obj.Namespace+\":\"+obj.Name, make([]restartTrack, 0))\n\treturn obj, nil\n}\n\nfunc (l *ProjectAlertLifecycle) Updated(obj *v3.ProjectAlertRule) (runtime.Object, error) {\n\treturn obj, nil\n}\n\nfunc (l *ProjectAlertLifecycle) Remove(obj *v3.ProjectAlertRule) (runtime.Object, error) {\n\tl.podWatcher.podRestartTrack.Delete(obj.Namespace + \":\" + obj.Name)\n\treturn obj, nil\n}\n\nfunc (w *PodWatcher) watchRule() error {\n\tif w.alertManager.IsDeploy == false {\n\t\treturn nil\n\t}\n\n\tprojectAlerts, err := w.projectAlertGroupLister.List(\"\", labels.NewSelector())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpAlerts := []*v3.ProjectAlertRule{}\n\tfor _, alert := range projectAlerts {\n\t\tif controller.ObjectInCluster(w.clusterName, alert) {\n\t\t\tpAlerts = append(pAlerts, alert)\n\t\t}\n\t}\n\n\tfor _, alert := range pAlerts {\n\t\tif alert.Status.AlertState == \"inactive\" || alert.Spec.PodRule == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tparts := strings.Split(alert.Spec.PodRule.PodName, \":\")\n\t\tif len(parts) < 2 {\n\t\t\t\/\/TODO: for invalid format pod\n\t\t\tif err = w.projectAlertPolicies.DeleteNamespaced(alert.Namespace, alert.Name, &metav1.DeleteOptions{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tns := parts[0]\n\t\tpodID := parts[1]\n\t\tnewPod, err := w.podLister.Get(ns, podID)\n\t\tif err != nil {\n\t\t\t\/\/TODO: what to do when pod not found\n\t\t\tif kerrors.IsNotFound(err) || newPod == nil {\n\t\t\t\tif err = w.projectAlertPolicies.DeleteNamespaced(alert.Namespace, alert.Name, &metav1.DeleteOptions{}); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tlogrus.Debugf(\"Failed to get pod %s: %v\", podID, err)\n\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch alert.Spec.PodRule.Condition {\n\t\tcase \"notrunning\":\n\t\t\tw.checkPodRunning(newPod, alert)\n\t\tcase \"notscheduled\":\n\t\t\tw.checkPodScheduled(newPod, alert)\n\t\tcase \"restarts\":\n\t\t\tw.checkPodRestarts(newPod, alert)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (w *PodWatcher) checkPodRestarts(pod *corev1.Pod, alert *v3.ProjectAlertRule) {\n\n\tfor _, containerStatus := range pod.Status.ContainerStatuses {\n\t\tif containerStatus.State.Running == nil {\n\t\t\tcurCount := containerStatus.RestartCount\n\t\t\tpreCount := w.getRestartTimeFromTrack(alert, curCount)\n\n\t\t\tif curCount-preCount >= int32(alert.Spec.PodRule.RestartTimes) {\n\t\t\t\truleID := common.GetRuleID(alert.Spec.GroupName, alert.Name)\n\n\t\t\t\tdetails := \"\"\n\t\t\t\tif containerStatus.State.Waiting != nil {\n\t\t\t\t\tdetails = containerStatus.State.Waiting.Message\n\t\t\t\t}\n\n\t\t\t\tclusterDisplayName := common.GetClusterDisplayName(w.clusterName, w.clusterLister)\n\t\t\t\tprojectDisplayName := common.GetProjectDisplayName(alert.Spec.ProjectName, w.projectLister)\n\n\t\t\t\tdata := map[string]string{}\n\t\t\t\tdata[\"rule_id\"] = ruleID\n\t\t\t\tdata[\"group_id\"] = alert.Spec.GroupName\n\t\t\t\tdata[\"alert_name\"] = alert.Spec.DisplayName\n\t\t\t\tdata[\"alert_type\"] = \"podRestarts\"\n\t\t\t\tdata[\"severity\"] = alert.Spec.Severity\n\t\t\t\tdata[\"cluster_name\"] = clusterDisplayName\n\t\t\t\tdata[\"project_name\"] = projectDisplayName\n\t\t\t\tdata[\"namespace\"] = pod.Namespace\n\t\t\t\tdata[\"pod_name\"] = pod.Name\n\t\t\t\tdata[\"container_name\"] = containerStatus.Name\n\t\t\t\tdata[\"restart_times\"] = strconv.Itoa(alert.Spec.PodRule.RestartTimes)\n\t\t\t\tdata[\"restart_interval\"] = strconv.Itoa(alert.Spec.PodRule.RestartIntervalSeconds)\n\n\t\t\t\tif details != \"\" {\n\t\t\t\t\tdata[\"logs\"] = details\n\t\t\t\t}\n\n\t\t\t\tworkloadName, err := w.getWorkloadInfo(pod)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogrus.Warnf(\"Failed to get workload info for %s:%s %v\", pod.Namespace, pod.Name, err)\n\t\t\t\t}\n\t\t\t\tif workloadName != \"\" {\n\t\t\t\t\tdata[\"workload_name\"] = workloadName\n\t\t\t\t}\n\n\t\t\t\tif err := w.alertManager.SendAlert(data); err != nil {\n\t\t\t\t\tlogrus.Debugf(\"Error occurred while getting pod %s: %v\", alert.Spec.PodRule.PodName, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn\n\t\t}\n\t}\n\n}\n\nfunc (w *PodWatcher) getRestartTimeFromTrack(alert *v3.ProjectAlertRule, curCount int32) int32 {\n\tname := alert.Name\n\tnamespace := alert.Namespace\n\n\tobj, ok := w.podRestartTrack.Load(namespace + \":\" + name)\n\tif !ok {\n\t\treturn curCount\n\t}\n\ttracks := obj.([]restartTrack)\n\n\tnow := time.Now()\n\n\tif len(tracks) == 0 {\n\t\ttracks = append(tracks, restartTrack{Count: curCount, Time: now})\n\t\tw.podRestartTrack.Store(namespace+\":\"+name, tracks)\n\t\treturn curCount\n\t}\n\n\tfor i, track := range tracks {\n\t\tif now.Sub(track.Time).Seconds() < float64(alert.Spec.PodRule.RestartIntervalSeconds) {\n\t\t\ttracks = tracks[i:]\n\t\t\ttracks = append(tracks, restartTrack{Count: curCount, Time: now})\n\t\t\tw.podRestartTrack.Store(namespace+\":\"+name, tracks)\n\t\t\treturn track.Count\n\t\t}\n\t}\n\n\tw.podRestartTrack.Store(namespace+\":\"+name, []restartTrack{})\n\treturn curCount\n}\n\nfunc (w *PodWatcher) checkPodRunning(pod *corev1.Pod, alert *v3.ProjectAlertRule) {\n\tif !w.checkPodScheduled(pod, alert) {\n\t\treturn\n\t}\n\n\tfor _, containerStatus := range pod.Status.ContainerStatuses {\n\t\tif containerStatus.State.Running == nil {\n\t\t\truleID := common.GetRuleID(alert.Spec.GroupName, alert.Name)\n\n\t\t\t\/\/TODO: need to consider all the cases\n\t\t\tdetails := \"\"\n\t\t\tif containerStatus.State.Waiting != nil {\n\t\t\t\tdetails = containerStatus.State.Waiting.Message\n\t\t\t}\n\n\t\t\tif containerStatus.State.Terminated != nil {\n\t\t\t\tdetails = containerStatus.State.Terminated.Message\n\t\t\t}\n\n\t\t\tclusterDisplayName := common.GetClusterDisplayName(w.clusterName, w.clusterLister)\n\t\t\tprojectDisplayName := common.GetProjectDisplayName(alert.Spec.ProjectName, w.projectLister)\n\n\t\t\tdata := map[string]string{}\n\t\t\tdata[\"rule_id\"] = ruleID\n\t\t\tdata[\"group_id\"] = alert.Spec.GroupName\n\t\t\tdata[\"alert_name\"] = alert.Spec.DisplayName\n\t\t\tdata[\"alert_type\"] = \"podNotRunning\"\n\t\t\tdata[\"severity\"] = alert.Spec.Severity\n\t\t\tdata[\"cluster_name\"] = clusterDisplayName\n\t\t\tdata[\"namespace\"] = pod.Namespace\n\t\t\tdata[\"project_name\"] = projectDisplayName\n\t\t\tdata[\"pod_name\"] = pod.Name\n\t\t\tdata[\"container_name\"] = containerStatus.Name\n\n\t\t\tif details != \"\" {\n\t\t\t\tdata[\"logs\"] = details\n\t\t\t}\n\n\t\t\tworkloadName, err := w.getWorkloadInfo(pod)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Warnf(\"Failed to get workload info for %s:%s %v\", pod.Namespace, pod.Name, err)\n\t\t\t}\n\t\t\tif workloadName != \"\" {\n\t\t\t\tdata[\"workload_name\"] = workloadName\n\t\t\t}\n\n\t\t\tif err := w.alertManager.SendAlert(data); err != nil {\n\t\t\t\tlogrus.Debugf(\"Error occurred while send alert %s: %v\", alert.Spec.PodRule.PodName, err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (w *PodWatcher) checkPodScheduled(pod *corev1.Pod, alert *v3.ProjectAlertRule) bool {\n\n\tfor _, condition := range pod.Status.Conditions {\n\t\tif condition.Type == corev1.PodScheduled && condition.Status == corev1.ConditionFalse {\n\t\t\truleID := common.GetRuleID(alert.Spec.GroupName, alert.Name)\n\t\t\tdetails := condition.Message\n\n\t\t\tclusterDisplayName := common.GetClusterDisplayName(w.clusterName, w.clusterLister)\n\t\t\tprojectDisplayName := common.GetProjectDisplayName(alert.Spec.ProjectName, w.projectLister)\n\n\t\t\tdata := map[string]string{}\n\t\t\tdata[\"rule_id\"] = ruleID\n\t\t\tdata[\"group_id\"] = alert.Spec.GroupName\n\t\t\tdata[\"alert_type\"] = \"podNotScheduled\"\n\t\t\tdata[\"alert_name\"] = alert.Spec.DisplayName\n\t\t\tdata[\"severity\"] = alert.Spec.Severity\n\t\t\tdata[\"cluster_name\"] = clusterDisplayName\n\t\t\tdata[\"namespace\"] = pod.Namespace\n\t\t\tdata[\"project_name\"] = projectDisplayName\n\t\t\tdata[\"pod_name\"] = pod.Name\n\n\t\t\tif details != \"\" {\n\t\t\t\tdata[\"logs\"] = details\n\t\t\t}\n\n\t\t\tworkloadName, err := w.getWorkloadInfo(pod)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Warnf(\"Failed to get workload info for %s:%s %v\", pod.Namespace, pod.Name, err)\n\t\t\t}\n\t\t\tif workloadName != \"\" {\n\t\t\t\tdata[\"workload_name\"] = workloadName\n\t\t\t}\n\n\t\t\tif err := w.alertManager.SendAlert(data); err != nil {\n\t\t\t\tlogrus.Debugf(\"Error occurred while getting pod %s: %v\", alert.Spec.PodRule.PodName, err)\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n\n}\n\nfunc (w *PodWatcher) getWorkloadInfo(pod *corev1.Pod) (string, error) {\n\tif len(pod.OwnerReferences) == 0 {\n\t\treturn pod.Name, nil\n\t}\n\townerRef := pod.OwnerReferences[0]\n\tworkloadName, err := w.workloadFetcher.getWorkloadName(pod.Namespace, ownerRef.Name, ownerRef.Kind)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Failed to get workload info for alert\")\n\t}\n\treturn workloadName, nil\n}\n<commit_msg>Fix pod restart not fire alerting after server restarted<commit_after>package watcher\n\nimport (\n\t\"context\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/rancher\/norman\/controller\"\n\t\"github.com\/rancher\/rancher\/pkg\/controllers\/user\/alert\/common\"\n\t\"github.com\/rancher\/rancher\/pkg\/controllers\/user\/alert\/manager\"\n\t\"github.com\/rancher\/rancher\/pkg\/controllers\/user\/workload\"\n\t\"github.com\/rancher\/rancher\/pkg\/ticker\"\n\tv1 \"github.com\/rancher\/types\/apis\/core\/v1\"\n\tv3 \"github.com\/rancher\/types\/apis\/management.cattle.io\/v3\"\n\t\"github.com\/rancher\/types\/config\"\n\t\"github.com\/sirupsen\/logrus\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tkerrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/labels\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n)\n\ntype PodWatcher struct {\n\tpodLister v1.PodLister\n\talertManager *manager.AlertManager\n\tprojectAlertPolicies v3.ProjectAlertRuleInterface\n\tprojectAlertGroupLister v3.ProjectAlertRuleLister\n\tclusterName string\n\tpodRestartTrack sync.Map\n\tclusterLister v3.ClusterLister\n\tprojectLister v3.ProjectLister\n\tworkloadFetcher workloadFetcher\n}\n\ntype restartTrack struct {\n\tCount int32\n\tTime time.Time\n}\n\nfunc StartPodWatcher(ctx context.Context, cluster *config.UserContext, manager *manager.AlertManager) {\n\tprojectAlertPolicies := cluster.Management.Management.ProjectAlertRules(\"\")\n\tworkloadFetcher := workloadFetcher{\n\t\tworkloadController: workload.NewWorkloadController(ctx, cluster.UserOnlyContext(), nil),\n\t}\n\n\tpodWatcher := &PodWatcher{\n\t\tpodLister: cluster.Core.Pods(\"\").Controller().Lister(),\n\t\tprojectAlertPolicies: projectAlertPolicies,\n\t\tprojectAlertGroupLister: projectAlertPolicies.Controller().Lister(),\n\t\talertManager: manager,\n\t\tclusterName: cluster.ClusterName,\n\t\tpodRestartTrack: sync.Map{},\n\t\tclusterLister: cluster.Management.Management.Clusters(\"\").Controller().Lister(),\n\t\tprojectLister: cluster.Management.Management.Projects(cluster.ClusterName).Controller().Lister(),\n\t\tworkloadFetcher: workloadFetcher,\n\t}\n\n\tprojectAlertLifecycle := &ProjectAlertLifecycle{\n\t\tpodWatcher: podWatcher,\n\t}\n\tprojectAlertPolicies.AddClusterScopedLifecycle(ctx, \"pod-target-alert-watcher\", cluster.ClusterName, projectAlertLifecycle)\n\n\tgo podWatcher.watch(ctx, syncInterval)\n}\n\nfunc (w *PodWatcher) watch(ctx context.Context, interval time.Duration) {\n\tfor range ticker.Context(ctx, interval) {\n\t\terr := w.watchRule()\n\t\tif err != nil {\n\t\t\tlogrus.Infof(\"Failed to watch pod, error: %v\", err)\n\t\t}\n\t}\n}\n\ntype ProjectAlertLifecycle struct {\n\tpodWatcher *PodWatcher\n}\n\nfunc (l *ProjectAlertLifecycle) Create(obj *v3.ProjectAlertRule) (runtime.Object, error) {\n\treturn obj, nil\n}\n\nfunc (l *ProjectAlertLifecycle) Updated(obj *v3.ProjectAlertRule) (runtime.Object, error) {\n\treturn obj, nil\n}\n\nfunc (l *ProjectAlertLifecycle) Remove(obj *v3.ProjectAlertRule) (runtime.Object, error) {\n\tl.podWatcher.podRestartTrack.Delete(obj.Namespace + \":\" + obj.Name)\n\treturn obj, nil\n}\n\nfunc (w *PodWatcher) watchRule() error {\n\tif w.alertManager.IsDeploy == false {\n\t\treturn nil\n\t}\n\n\tprojectAlerts, err := w.projectAlertGroupLister.List(\"\", labels.NewSelector())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpAlerts := []*v3.ProjectAlertRule{}\n\tfor _, alert := range projectAlerts {\n\t\tif controller.ObjectInCluster(w.clusterName, alert) {\n\t\t\tpAlerts = append(pAlerts, alert)\n\t\t}\n\t}\n\n\tfor _, alert := range pAlerts {\n\t\tif alert.Status.AlertState == \"inactive\" || alert.Spec.PodRule == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tparts := strings.Split(alert.Spec.PodRule.PodName, \":\")\n\t\tif len(parts) < 2 {\n\t\t\t\/\/TODO: for invalid format pod\n\t\t\tif err = w.projectAlertPolicies.DeleteNamespaced(alert.Namespace, alert.Name, &metav1.DeleteOptions{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tns := parts[0]\n\t\tpodID := parts[1]\n\t\tnewPod, err := w.podLister.Get(ns, podID)\n\t\tif err != nil {\n\t\t\t\/\/TODO: what to do when pod not found\n\t\t\tif kerrors.IsNotFound(err) || newPod == nil {\n\t\t\t\tif err = w.projectAlertPolicies.DeleteNamespaced(alert.Namespace, alert.Name, &metav1.DeleteOptions{}); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tlogrus.Debugf(\"Failed to get pod %s: %v\", podID, err)\n\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch alert.Spec.PodRule.Condition {\n\t\tcase \"notrunning\":\n\t\t\tw.checkPodRunning(newPod, alert)\n\t\tcase \"notscheduled\":\n\t\t\tw.checkPodScheduled(newPod, alert)\n\t\tcase \"restarts\":\n\t\t\tw.checkPodRestarts(newPod, alert)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (w *PodWatcher) checkPodRestarts(pod *corev1.Pod, alert *v3.ProjectAlertRule) {\n\n\tfor _, containerStatus := range pod.Status.ContainerStatuses {\n\t\tcurCount := containerStatus.RestartCount\n\t\tpreCount := w.getRestartTimeFromTrack(alert, curCount)\n\n\t\tif curCount-preCount >= int32(alert.Spec.PodRule.RestartTimes) {\n\t\t\truleID := common.GetRuleID(alert.Spec.GroupName, alert.Name)\n\n\t\t\tdetails := \"\"\n\t\t\tif containerStatus.State.Waiting != nil {\n\t\t\t\tdetails = containerStatus.State.Waiting.Message\n\t\t\t}\n\n\t\t\tclusterDisplayName := common.GetClusterDisplayName(w.clusterName, w.clusterLister)\n\t\t\tprojectDisplayName := common.GetProjectDisplayName(alert.Spec.ProjectName, w.projectLister)\n\n\t\t\tdata := map[string]string{}\n\t\t\tdata[\"rule_id\"] = ruleID\n\t\t\tdata[\"group_id\"] = alert.Spec.GroupName\n\t\t\tdata[\"alert_name\"] = alert.Spec.DisplayName\n\t\t\tdata[\"alert_type\"] = \"podRestarts\"\n\t\t\tdata[\"severity\"] = alert.Spec.Severity\n\t\t\tdata[\"cluster_name\"] = clusterDisplayName\n\t\t\tdata[\"project_name\"] = projectDisplayName\n\t\t\tdata[\"namespace\"] = pod.Namespace\n\t\t\tdata[\"pod_name\"] = pod.Name\n\t\t\tdata[\"container_name\"] = containerStatus.Name\n\t\t\tdata[\"restart_times\"] = strconv.Itoa(alert.Spec.PodRule.RestartTimes)\n\t\t\tdata[\"restart_interval\"] = strconv.Itoa(alert.Spec.PodRule.RestartIntervalSeconds)\n\n\t\t\tif details != \"\" {\n\t\t\t\tdata[\"logs\"] = details\n\t\t\t}\n\n\t\t\tworkloadName, err := w.getWorkloadInfo(pod)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Warnf(\"Failed to get workload info for %s:%s %v\", pod.Namespace, pod.Name, err)\n\t\t\t}\n\t\t\tif workloadName != \"\" {\n\t\t\t\tdata[\"workload_name\"] = workloadName\n\t\t\t}\n\n\t\t\tif err := w.alertManager.SendAlert(data); err != nil {\n\t\t\t\tlogrus.Debugf(\"Error occurred while getting pod %s: %v\", alert.Spec.PodRule.PodName, err)\n\t\t\t}\n\t\t}\n\n\t\treturn\n\t}\n\n}\n\nfunc (w *PodWatcher) getRestartTimeFromTrack(alert *v3.ProjectAlertRule, curCount int32) int32 {\n\tname := alert.Name\n\tnamespace := alert.Namespace\n\tnow := time.Now()\n\tcurrentRestartTrack := restartTrack{Count: curCount, Time: now}\n\tcurrentRestartTrackArr := []restartTrack{currentRestartTrack}\n\n\tobj, loaded := w.podRestartTrack.LoadOrStore(namespace+\":\"+name, currentRestartTrackArr)\n\tif loaded {\n\t\ttracks := obj.([]restartTrack)\n\t\tfor i, track := range tracks {\n\t\t\tif now.Sub(track.Time).Seconds() < float64(alert.Spec.PodRule.RestartIntervalSeconds) {\n\t\t\t\ttracks = tracks[i:]\n\t\t\t\ttracks = append(tracks, currentRestartTrack)\n\t\t\t\tw.podRestartTrack.Store(namespace+\":\"+name, tracks)\n\t\t\t\treturn track.Count\n\t\t\t}\n\t\t}\n\t}\n\n\treturn curCount\n}\n\nfunc (w *PodWatcher) checkPodRunning(pod *corev1.Pod, alert *v3.ProjectAlertRule) {\n\tif !w.checkPodScheduled(pod, alert) {\n\t\treturn\n\t}\n\n\tfor _, containerStatus := range pod.Status.ContainerStatuses {\n\t\tif containerStatus.State.Running == nil {\n\t\t\truleID := common.GetRuleID(alert.Spec.GroupName, alert.Name)\n\n\t\t\t\/\/TODO: need to consider all the cases\n\t\t\tdetails := \"\"\n\t\t\tif containerStatus.State.Waiting != nil {\n\t\t\t\tdetails = containerStatus.State.Waiting.Message\n\t\t\t}\n\n\t\t\tif containerStatus.State.Terminated != nil {\n\t\t\t\tdetails = containerStatus.State.Terminated.Message\n\t\t\t}\n\n\t\t\tclusterDisplayName := common.GetClusterDisplayName(w.clusterName, w.clusterLister)\n\t\t\tprojectDisplayName := common.GetProjectDisplayName(alert.Spec.ProjectName, w.projectLister)\n\n\t\t\tdata := map[string]string{}\n\t\t\tdata[\"rule_id\"] = ruleID\n\t\t\tdata[\"group_id\"] = alert.Spec.GroupName\n\t\t\tdata[\"alert_name\"] = alert.Spec.DisplayName\n\t\t\tdata[\"alert_type\"] = \"podNotRunning\"\n\t\t\tdata[\"severity\"] = alert.Spec.Severity\n\t\t\tdata[\"cluster_name\"] = clusterDisplayName\n\t\t\tdata[\"namespace\"] = pod.Namespace\n\t\t\tdata[\"project_name\"] = projectDisplayName\n\t\t\tdata[\"pod_name\"] = pod.Name\n\t\t\tdata[\"container_name\"] = containerStatus.Name\n\n\t\t\tif details != \"\" {\n\t\t\t\tdata[\"logs\"] = details\n\t\t\t}\n\n\t\t\tworkloadName, err := w.getWorkloadInfo(pod)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Warnf(\"Failed to get workload info for %s:%s %v\", pod.Namespace, pod.Name, err)\n\t\t\t}\n\t\t\tif workloadName != \"\" {\n\t\t\t\tdata[\"workload_name\"] = workloadName\n\t\t\t}\n\n\t\t\tif err := w.alertManager.SendAlert(data); err != nil {\n\t\t\t\tlogrus.Debugf(\"Error occurred while send alert %s: %v\", alert.Spec.PodRule.PodName, err)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (w *PodWatcher) checkPodScheduled(pod *corev1.Pod, alert *v3.ProjectAlertRule) bool {\n\n\tfor _, condition := range pod.Status.Conditions {\n\t\tif condition.Type == corev1.PodScheduled && condition.Status == corev1.ConditionFalse {\n\t\t\truleID := common.GetRuleID(alert.Spec.GroupName, alert.Name)\n\t\t\tdetails := condition.Message\n\n\t\t\tclusterDisplayName := common.GetClusterDisplayName(w.clusterName, w.clusterLister)\n\t\t\tprojectDisplayName := common.GetProjectDisplayName(alert.Spec.ProjectName, w.projectLister)\n\n\t\t\tdata := map[string]string{}\n\t\t\tdata[\"rule_id\"] = ruleID\n\t\t\tdata[\"group_id\"] = alert.Spec.GroupName\n\t\t\tdata[\"alert_type\"] = \"podNotScheduled\"\n\t\t\tdata[\"alert_name\"] = alert.Spec.DisplayName\n\t\t\tdata[\"severity\"] = alert.Spec.Severity\n\t\t\tdata[\"cluster_name\"] = clusterDisplayName\n\t\t\tdata[\"namespace\"] = pod.Namespace\n\t\t\tdata[\"project_name\"] = projectDisplayName\n\t\t\tdata[\"pod_name\"] = pod.Name\n\n\t\t\tif details != \"\" {\n\t\t\t\tdata[\"logs\"] = details\n\t\t\t}\n\n\t\t\tworkloadName, err := w.getWorkloadInfo(pod)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Warnf(\"Failed to get workload info for %s:%s %v\", pod.Namespace, pod.Name, err)\n\t\t\t}\n\t\t\tif workloadName != \"\" {\n\t\t\t\tdata[\"workload_name\"] = workloadName\n\t\t\t}\n\n\t\t\tif err := w.alertManager.SendAlert(data); err != nil {\n\t\t\t\tlogrus.Debugf(\"Error occurred while getting pod %s: %v\", alert.Spec.PodRule.PodName, err)\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n\n}\n\nfunc (w *PodWatcher) getWorkloadInfo(pod *corev1.Pod) (string, error) {\n\tif len(pod.OwnerReferences) == 0 {\n\t\treturn pod.Name, nil\n\t}\n\townerRef := pod.OwnerReferences[0]\n\tworkloadName, err := w.workloadFetcher.getWorkloadName(pod.Namespace, ownerRef.Name, ownerRef.Kind)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"Failed to get workload info for alert\")\n\t}\n\treturn workloadName, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Skaffold Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage color\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\n\t\"golang.org\/x\/crypto\/ssh\/terminal\"\n)\n\n\/\/ IsTerminal will check if the specified output stream is a terminal. This can be changed\n\/\/ for testing to an arbitrary method.\nvar IsTerminal = isTerminal\n\n\/\/ Color can be used to format text using ANSI escape codes so it can be printed to\n\/\/ the terminal in color.\ntype Color int\n\nvar (\n\t\/\/ LightRed can format text to be displayed to the terminal in light red, using ANSI escape codes.\n\tLightRed = Color(91)\n\t\/\/ LightGreen can format text to be displayed to the terminal in light green, using ANSI escape codes.\n\tLightGreen = Color(92)\n\t\/\/ LightYellow can format text to be displayed to the terminal in light yellow, using ANSI escape codes.\n\tLightYellow = Color(93)\n\t\/\/ LightBlue can format text to be displayed to the terminal in light blue, using ANSI escape codes.\n\tLightBlue = Color(94)\n\t\/\/ LightPurple can format text to be displayed to the terminal in light purple, using ANSI escape codes.\n\tLightPurple = Color(95)\n\t\/\/ Red can format text to be displayed to the terminal in red, using ANSI escape codes.\n\tRed = Color(31)\n\t\/\/ Green can format text to be displayed to the terminal in green, using ANSI escape codes.\n\tGreen = Color(32)\n\t\/\/ Yellow can format text to be displayed to the terminal in yellow, using ANSI escape codes.\n\tYellow = Color(33)\n\t\/\/ Blue can format text to be displayed to the terminal in blue, using ANSI escape codes.\n\tBlue = Color(34)\n\t\/\/ Purple can format text to be displayed to the terminal in purple, using ANSI escape codes.\n\tPurple = Color(35)\n\t\/\/ Cyan can format text to be displayed to the terminal in cyan, using ANSI escape codes.\n\tCyan = Color(36)\n\t\/\/ White can format text to be displayed to the terminal in white, using ANSI escape codes.\n\tWhite = Color(37)\n\t\/\/ None uses ANSI escape codes to reset all formatting.\n\tNone = Color(0)\n\n\t\/\/ Default default output color for output from Skaffold to the user\n\tDefault = Blue\n)\n\n\/\/ Fprint wraps the operands in c's ANSI escape codes, and outputs the result to\n\/\/ out. If out is not a terminal, the escape codes will not be added.\n\/\/ It returns the number of bytes written and any errors encountered.\nfunc (c Color) Fprint(out io.Writer, a ...interface{}) (n int, err error) {\n\tif IsTerminal(out) {\n\t\treturn fmt.Fprintf(out, \"\\033[%dm%s\\033[0m\", c, fmt.Sprint(a...))\n\t}\n\treturn fmt.Fprint(out, a...)\n}\n\n\/\/ Fprintln wraps the operands in c's ANSI escape codes, and outputs the result to\n\/\/ out, followed by a newline. If out is not a terminal, the escape codes will not be added.\n\/\/ It returns the number of bytes written and any errors encountered.\nfunc (c Color) Fprintln(out io.Writer, a ...interface{}) (n int, err error) {\n\tif IsTerminal(out) {\n\t\treturn fmt.Fprintf(out, \"\\033[%dm%s\\033[0m\\n\", c, strings.TrimSuffix(fmt.Sprintln(a...), \"\\n\"))\n\t}\n\treturn fmt.Fprintln(out, a...)\n}\n\n\/\/ Fprintf applies formats according to the format specifier (and the optional interfaces provided),\n\/\/ wraps the result in c's ANSI escape codes, and outputs the result to\n\/\/ out, followed by a newline. If out is not a terminal, the escape codes will not be added.\n\/\/ It returns the number of bytes written and any errors encountered.\nfunc (c Color) Fprintf(out io.Writer, format string, a ...interface{}) (n int, err error) {\n\tif IsTerminal(out) {\n\t\treturn fmt.Fprintf(out, \"\\033[%dm%s\\033[0m\", c, fmt.Sprintf(format, a...))\n\t}\n\treturn fmt.Fprintf(out, format, a...)\n}\n\n\/\/ ColoredWriteCloser forces printing with colors to an io.WriteCloser.\ntype ColoredWriteCloser struct {\n\tio.WriteCloser\n}\n\n\/\/ ColoredWriter forces printing with colors to an io.Writer.\ntype ColoredWriter struct {\n\tio.Writer\n}\n\n\/\/ OverwriteDefault overwrites default color\nfunc OverwriteDefault(color Color) {\n\tDefault = color\n}\n\n\/\/ This implementation comes from logrus (https:\/\/github.com\/sirupsen\/logrus\/blob\/master\/terminal_check_notappengine.go),\n\/\/ unfortunately logrus doesn't expose a public interface we can use to call it.\nfunc isTerminal(w io.Writer) bool {\n\tif _, ok := w.(ColoredWriteCloser); ok {\n\t\treturn true\n\t}\n\tif _, ok := w.(ColoredWriter); ok {\n\t\treturn true\n\t}\n\n\tswitch v := w.(type) {\n\tcase *os.File:\n\t\treturn terminal.IsTerminal(int(v.Fd()))\n\tdefault:\n\t\treturn false\n\t}\n}\n<commit_msg>Remove dead code<commit_after>\/*\nCopyright 2019 The Skaffold Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage color\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\n\t\"golang.org\/x\/crypto\/ssh\/terminal\"\n)\n\n\/\/ IsTerminal will check if the specified output stream is a terminal. This can be changed\n\/\/ for testing to an arbitrary method.\nvar IsTerminal = isTerminal\n\n\/\/ Color can be used to format text using ANSI escape codes so it can be printed to\n\/\/ the terminal in color.\ntype Color int\n\nvar (\n\t\/\/ LightRed can format text to be displayed to the terminal in light red, using ANSI escape codes.\n\tLightRed = Color(91)\n\t\/\/ LightGreen can format text to be displayed to the terminal in light green, using ANSI escape codes.\n\tLightGreen = Color(92)\n\t\/\/ LightYellow can format text to be displayed to the terminal in light yellow, using ANSI escape codes.\n\tLightYellow = Color(93)\n\t\/\/ LightBlue can format text to be displayed to the terminal in light blue, using ANSI escape codes.\n\tLightBlue = Color(94)\n\t\/\/ LightPurple can format text to be displayed to the terminal in light purple, using ANSI escape codes.\n\tLightPurple = Color(95)\n\t\/\/ Red can format text to be displayed to the terminal in red, using ANSI escape codes.\n\tRed = Color(31)\n\t\/\/ Green can format text to be displayed to the terminal in green, using ANSI escape codes.\n\tGreen = Color(32)\n\t\/\/ Yellow can format text to be displayed to the terminal in yellow, using ANSI escape codes.\n\tYellow = Color(33)\n\t\/\/ Blue can format text to be displayed to the terminal in blue, using ANSI escape codes.\n\tBlue = Color(34)\n\t\/\/ Purple can format text to be displayed to the terminal in purple, using ANSI escape codes.\n\tPurple = Color(35)\n\t\/\/ Cyan can format text to be displayed to the terminal in cyan, using ANSI escape codes.\n\tCyan = Color(36)\n\t\/\/ White can format text to be displayed to the terminal in white, using ANSI escape codes.\n\tWhite = Color(37)\n\t\/\/ None uses ANSI escape codes to reset all formatting.\n\tNone = Color(0)\n\n\t\/\/ Default default output color for output from Skaffold to the user\n\tDefault = Blue\n)\n\n\/\/ Fprint wraps the operands in c's ANSI escape codes, and outputs the result to\n\/\/ out. If out is not a terminal, the escape codes will not be added.\n\/\/ It returns the number of bytes written and any errors encountered.\nfunc (c Color) Fprint(out io.Writer, a ...interface{}) (n int, err error) {\n\tif IsTerminal(out) {\n\t\treturn fmt.Fprintf(out, \"\\033[%dm%s\\033[0m\", c, fmt.Sprint(a...))\n\t}\n\treturn fmt.Fprint(out, a...)\n}\n\n\/\/ Fprintln wraps the operands in c's ANSI escape codes, and outputs the result to\n\/\/ out, followed by a newline. If out is not a terminal, the escape codes will not be added.\n\/\/ It returns the number of bytes written and any errors encountered.\nfunc (c Color) Fprintln(out io.Writer, a ...interface{}) (n int, err error) {\n\tif IsTerminal(out) {\n\t\treturn fmt.Fprintf(out, \"\\033[%dm%s\\033[0m\\n\", c, strings.TrimSuffix(fmt.Sprintln(a...), \"\\n\"))\n\t}\n\treturn fmt.Fprintln(out, a...)\n}\n\n\/\/ Fprintf applies formats according to the format specifier (and the optional interfaces provided),\n\/\/ wraps the result in c's ANSI escape codes, and outputs the result to\n\/\/ out, followed by a newline. If out is not a terminal, the escape codes will not be added.\n\/\/ It returns the number of bytes written and any errors encountered.\nfunc (c Color) Fprintf(out io.Writer, format string, a ...interface{}) (n int, err error) {\n\tif IsTerminal(out) {\n\t\treturn fmt.Fprintf(out, \"\\033[%dm%s\\033[0m\", c, fmt.Sprintf(format, a...))\n\t}\n\treturn fmt.Fprintf(out, format, a...)\n}\n\n\/\/ ColoredWriteCloser forces printing with colors to an io.WriteCloser.\ntype ColoredWriteCloser struct {\n\tio.WriteCloser\n}\n\n\/\/ OverwriteDefault overwrites default color\nfunc OverwriteDefault(color Color) {\n\tDefault = color\n}\n\n\/\/ This implementation comes from logrus (https:\/\/github.com\/sirupsen\/logrus\/blob\/master\/terminal_check_notappengine.go),\n\/\/ unfortunately logrus doesn't expose a public interface we can use to call it.\nfunc isTerminal(w io.Writer) bool {\n\tif _, ok := w.(ColoredWriteCloser); ok {\n\t\treturn true\n\t}\n\n\tswitch v := w.(type) {\n\tcase *os.File:\n\t\treturn terminal.IsTerminal(int(v.Fd()))\n\tdefault:\n\t\treturn false\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package gsocket\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"sync\"\n)\n\n\/\/ Connection 代表一个连接会话\ntype Connection struct {\n\tconn net.Conn\n\tsendBuffer chan []byte\n\tterminated bool\n}\n\n\/\/ newConnection 生成一个新的Session\nfunc newConnection(conn net.Conn) (c *Connection) {\n\tc = &Connection{\n\t\tconn: conn,\n\t\tsendBuffer: make(chan []byte, 10),\n\t\tterminated: false,\n\t}\n\n\treturn c\n}\n\n\/\/ RemoteAddr 返回客户端的地址和端口\nfunc (c *Connection) RemoteAddr() string {\n\treturn c.conn.RemoteAddr().String()\n}\n\n\/\/ Close 关闭连接\nfunc (c *Connection) Close() {\n\tc.terminated = true\n\tclose(c.sendBuffer)\n\tc.conn.Close()\n}\n\nfunc (c *Connection) recvThread(wg *sync.WaitGroup, handler tcpEventHandler) {\n\tdefer wg.Done()\n\tbuffer := make([]byte, 4096)\n\tfor {\n\t\tn, err := c.conn.Read(buffer)\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tif handler.handlerError != nil {\n\t\t\t\t\thandler.handlerError(c, err)\n\t\t\t\t}\n\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif handler.handlerDisconnect != nil {\n\t\t\t\thandler.handlerDisconnect(c)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/session.RecvedPackets = append(session.RecvedPackets, buffer[:n]...)\n\t\tif handler.handlerRecv != nil {\n\t\t\thandler.handlerRecv(c, buffer[:n])\n\t\t}\n\t}\n\n\tif c.terminated == false {\n\t\tc.Close()\n\t}\n\tlog.Printf(\"session %s recvThread Exit\", c.RemoteAddr())\n}\n\nfunc (c *Connection) sendThread(wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\n\tfor {\n\t\tpacket, ok := <-c.sendBuffer\n\t\tif !ok {\n\t\t\t\/\/ 意味着道通已经空了,并且已被关闭\n\t\t\tbreak\n\t\t}\n\t\t_, err := c.conn.Write(packet)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tlog.Printf(\"session %s sendThread Exit\", c.RemoteAddr())\n}\n\n\/\/ Send 发送数据\nfunc (c *Connection) Send(data []byte) {\n\tc.sendBuffer <- data\n}\n<commit_msg>recvThread加打印消息<commit_after>package gsocket\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"sync\"\n)\n\n\/\/ Connection 代表一个连接会话\ntype Connection struct {\n\tconn net.Conn\n\tsendBuffer chan []byte\n\tterminated bool\n}\n\n\/\/ newConnection 生成一个新的Session\nfunc newConnection(conn net.Conn) (c *Connection) {\n\tc = &Connection{\n\t\tconn: conn,\n\t\tsendBuffer: make(chan []byte, 10),\n\t\tterminated: false,\n\t}\n\n\treturn c\n}\n\n\/\/ RemoteAddr 返回客户端的地址和端口\nfunc (c *Connection) RemoteAddr() string {\n\treturn c.conn.RemoteAddr().String()\n}\n\n\/\/ LocalAddr 返回本机地址和端口\nfunc (c *Connection) LocalAddr() string {\n\treturn c.conn.LocalAddr().String()\n}\n\n\/\/ Close 关闭连接\nfunc (c *Connection) Close() {\n\tc.terminated = true\n\tclose(c.sendBuffer)\n\tc.conn.Close()\n}\n\nfunc (c *Connection) recvThread(wg *sync.WaitGroup, handler tcpEventHandler) {\n\tdefer wg.Done()\n\tbuffer := make([]byte, 4096)\n\tfor {\n\t\tn, err := c.conn.Read(buffer)\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tif handler.handlerError != nil {\n\t\t\t\t\tlog.Printf(\"handler error: %s\\n\", c.LocalAddr())\n\t\t\t\t\thandler.handlerError(c, err)\n\t\t\t\t}\n\t\t\t\tlog.Println(\"handler read break 1\")\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif handler.handlerDisconnect != nil {\n\t\t\t\thandler.handlerDisconnect(c)\n\t\t\t}\n\t\t\tlog.Println(\"handler read break 2\")\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/session.RecvedPackets = append(session.RecvedPackets, buffer[:n]...)\n\t\tif handler.handlerRecv != nil {\n\t\t\thandler.handlerRecv(c, buffer[:n])\n\t\t}\n\t}\n\n\tif c.terminated == false {\n\t\tc.Close()\n\t}\n\tlog.Printf(\"session %s recvThread Exit\", c.RemoteAddr())\n}\n\nfunc (c *Connection) sendThread(wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\n\tfor {\n\t\tpacket, ok := <-c.sendBuffer\n\t\tif !ok {\n\t\t\t\/\/ 意味着道通已经空了,并且已被关闭\n\t\t\tbreak\n\t\t}\n\t\t_, err := c.conn.Write(packet)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tlog.Printf(\"session %s sendThread Exit\", c.RemoteAddr())\n}\n\n\/\/ Send 发送数据\nfunc (c *Connection) Send(data []byte) {\n\tc.sendBuffer <- data\n}\n<|endoftext|>"} {"text":"<commit_before>package plugins\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/docker\/go-connections\/sockets\"\n\t\"github.com\/docker\/go-connections\/tlsconfig\"\n)\n\nconst (\n\tversionMimetype = \"application\/vnd.docker.plugins.v1.2+json\"\n\tdefaultTimeOut = 30\n)\n\n\/\/ NewClient creates a new plugin client (http).\nfunc NewClient(addr string, tlsConfig tlsconfig.Options) (*Client, error) {\n\ttr := &http.Transport{}\n\n\tc, err := tlsconfig.Client(tlsConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttr.TLSClientConfig = c\n\n\tprotoAndAddr := strings.Split(addr, \":\/\/\")\n\tif err := sockets.ConfigureTransport(tr, protoAndAddr[0], protoAndAddr[1]); err != nil {\n\t\treturn nil, err\n\t}\n\n\tscheme := protoAndAddr[0]\n\tif scheme != \"https\" {\n\t\tscheme = \"http\"\n\t}\n\treturn &Client{&http.Client{Transport: tr}, scheme, protoAndAddr[1]}, nil\n}\n\n\/\/ Client represents a plugin client.\ntype Client struct {\n\thttp *http.Client \/\/ http client to use\n\tscheme string \/\/ scheme protocol of the plugin\n\taddr string \/\/ http address of the plugin\n}\n\n\/\/ Call calls the specified method with the specified arguments for the plugin.\n\/\/ It will retry for 30 seconds if a failure occurs when calling.\nfunc (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error {\n\tvar buf bytes.Buffer\n\tif args != nil {\n\t\tif err := json.NewEncoder(&buf).Encode(args); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tbody, err := c.callWithRetry(serviceMethod, &buf, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer body.Close()\n\tif ret != nil {\n\t\tif err := json.NewDecoder(body).Decode(&ret); err != nil {\n\t\t\tlogrus.Errorf(\"%s: error reading plugin resp: %v\", serviceMethod, err)\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Stream calls the specified method with the specified arguments for the plugin and returns the response body\nfunc (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) {\n\tvar buf bytes.Buffer\n\tif err := json.NewEncoder(&buf).Encode(args); err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.callWithRetry(serviceMethod, &buf, true)\n}\n\n\/\/ SendFile calls the specified method, and passes through the IO stream\nfunc (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error {\n\tbody, err := c.callWithRetry(serviceMethod, data, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := json.NewDecoder(body).Decode(&ret); err != nil {\n\t\tlogrus.Errorf(\"%s: error reading plugin resp: %v\", serviceMethod, err)\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) {\n\treq, err := http.NewRequest(\"POST\", \"\/\"+serviceMethod, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Accept\", versionMimetype)\n\treq.URL.Scheme = c.scheme\n\treq.URL.Host = c.addr\n\n\tvar retries int\n\tstart := time.Now()\n\n\tfor {\n\t\tresp, err := c.http.Do(req)\n\t\tif err != nil {\n\t\t\tif !retry {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\ttimeOff := backoff(retries)\n\t\t\tif abort(start, timeOff) {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tretries++\n\t\t\tlogrus.Warnf(\"Unable to connect to plugin: %s, retrying in %v\", c.addr, timeOff)\n\t\t\ttime.Sleep(timeOff)\n\t\t\tcontinue\n\t\t}\n\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, &statusError{resp.StatusCode, serviceMethod, err.Error()}\n\t\t\t}\n\n\t\t\t\/\/ Plugins' Response(s) should have an Err field indicating what went\n\t\t\t\/\/ wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just\n\t\t\t\/\/ return the string(body)\n\t\t\ttype responseErr struct {\n\t\t\t\tErr string\n\t\t\t}\n\t\t\tremoteErr := responseErr{}\n\t\t\tif err := json.Unmarshal(b, &remoteErr); err == nil {\n\t\t\t\tif remoteErr.Err != \"\" {\n\t\t\t\t\treturn nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err}\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ old way...\n\t\t\treturn nil, &statusError{resp.StatusCode, serviceMethod, string(b)}\n\t\t}\n\t\treturn resp.Body, nil\n\t}\n}\n\nfunc backoff(retries int) time.Duration {\n\tb, max := 1, defaultTimeOut\n\tfor b < max && retries > 0 {\n\t\tb *= 2\n\t\tretries--\n\t}\n\tif b > max {\n\t\tb = max\n\t}\n\treturn time.Duration(b) * time.Second\n}\n\nfunc abort(start time.Time, timeOff time.Duration) bool {\n\treturn timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second\n}\n<commit_msg>Close resp body on plugin call error<commit_after>package plugins\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/docker\/go-connections\/sockets\"\n\t\"github.com\/docker\/go-connections\/tlsconfig\"\n)\n\nconst (\n\tversionMimetype = \"application\/vnd.docker.plugins.v1.2+json\"\n\tdefaultTimeOut = 30\n)\n\n\/\/ NewClient creates a new plugin client (http).\nfunc NewClient(addr string, tlsConfig tlsconfig.Options) (*Client, error) {\n\ttr := &http.Transport{}\n\n\tc, err := tlsconfig.Client(tlsConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttr.TLSClientConfig = c\n\n\tprotoAndAddr := strings.Split(addr, \":\/\/\")\n\tif err := sockets.ConfigureTransport(tr, protoAndAddr[0], protoAndAddr[1]); err != nil {\n\t\treturn nil, err\n\t}\n\n\tscheme := protoAndAddr[0]\n\tif scheme != \"https\" {\n\t\tscheme = \"http\"\n\t}\n\treturn &Client{&http.Client{Transport: tr}, scheme, protoAndAddr[1]}, nil\n}\n\n\/\/ Client represents a plugin client.\ntype Client struct {\n\thttp *http.Client \/\/ http client to use\n\tscheme string \/\/ scheme protocol of the plugin\n\taddr string \/\/ http address of the plugin\n}\n\n\/\/ Call calls the specified method with the specified arguments for the plugin.\n\/\/ It will retry for 30 seconds if a failure occurs when calling.\nfunc (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error {\n\tvar buf bytes.Buffer\n\tif args != nil {\n\t\tif err := json.NewEncoder(&buf).Encode(args); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tbody, err := c.callWithRetry(serviceMethod, &buf, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer body.Close()\n\tif ret != nil {\n\t\tif err := json.NewDecoder(body).Decode(&ret); err != nil {\n\t\t\tlogrus.Errorf(\"%s: error reading plugin resp: %v\", serviceMethod, err)\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Stream calls the specified method with the specified arguments for the plugin and returns the response body\nfunc (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) {\n\tvar buf bytes.Buffer\n\tif err := json.NewEncoder(&buf).Encode(args); err != nil {\n\t\treturn nil, err\n\t}\n\treturn c.callWithRetry(serviceMethod, &buf, true)\n}\n\n\/\/ SendFile calls the specified method, and passes through the IO stream\nfunc (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error {\n\tbody, err := c.callWithRetry(serviceMethod, data, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := json.NewDecoder(body).Decode(&ret); err != nil {\n\t\tlogrus.Errorf(\"%s: error reading plugin resp: %v\", serviceMethod, err)\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) {\n\treq, err := http.NewRequest(\"POST\", \"\/\"+serviceMethod, data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Accept\", versionMimetype)\n\treq.URL.Scheme = c.scheme\n\treq.URL.Host = c.addr\n\n\tvar retries int\n\tstart := time.Now()\n\n\tfor {\n\t\tresp, err := c.http.Do(req)\n\t\tif err != nil {\n\t\t\tif !retry {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\ttimeOff := backoff(retries)\n\t\t\tif abort(start, timeOff) {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tretries++\n\t\t\tlogrus.Warnf(\"Unable to connect to plugin: %s, retrying in %v\", c.addr, timeOff)\n\t\t\ttime.Sleep(timeOff)\n\t\t\tcontinue\n\t\t}\n\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\t\tresp.Body.Close()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, &statusError{resp.StatusCode, serviceMethod, err.Error()}\n\t\t\t}\n\n\t\t\t\/\/ Plugins' Response(s) should have an Err field indicating what went\n\t\t\t\/\/ wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just\n\t\t\t\/\/ return the string(body)\n\t\t\ttype responseErr struct {\n\t\t\t\tErr string\n\t\t\t}\n\t\t\tremoteErr := responseErr{}\n\t\t\tif err := json.Unmarshal(b, &remoteErr); err == nil {\n\t\t\t\tif remoteErr.Err != \"\" {\n\t\t\t\t\treturn nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err}\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ old way...\n\t\t\treturn nil, &statusError{resp.StatusCode, serviceMethod, string(b)}\n\t\t}\n\t\treturn resp.Body, nil\n\t}\n}\n\nfunc backoff(retries int) time.Duration {\n\tb, max := 1, defaultTimeOut\n\tfor b < max && retries > 0 {\n\t\tb *= 2\n\t\tretries--\n\t}\n\tif b > max {\n\t\tb = max\n\t}\n\treturn time.Duration(b) * time.Second\n}\n\nfunc abort(start time.Time, timeOff time.Duration) bool {\n\treturn timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second\n}\n<|endoftext|>"} {"text":"<commit_before>package hostport\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestNew(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\thostPort string\n\t\thost string\n\t\thostFQDN string\n\t\tport int\n\t\terr string\n\t}{\n\t\t{\"127.0.0.1:1234\", \"127.0.0.1\", \"127.0.0.1\", 1234, \"\"},\n\t\t{\"example.net\", \"\", \"\", NoPort, \"missing port in address\"},\n\t\t{\"example.net:1337\", \"example.net\", \"example.net.\", 1337, \"\"},\n\t\t{\"example.net.:1337\", \"example.net.\", \"example.net.\", 1337, \"\"},\n\t\t{\"[example.net.]:1337\", \"example.net.\", \"example.net.\", 1337, \"\"},\n\t\t{\"[127.0.0.1]:1337\", \"127.0.0.1\", \"127.0.0.1\", 1337, \"\"},\n\t\t{\"[example.net]:1337\", \"example.net\", \"example.net.\", 1337, \"\"},\n\t\t{\"[[example.net]]:1337\", \"\", \"\", NoPort, \"missing port in address\"},\n\t\t{\"\", \"\", \"\", NoPort, \"missing port in address\"},\n\t\t{\":\", \"\", \"\", NoPort, \"invalid syntax\"},\n\t\t{\"::\", \"\", \"\", NoPort, \"too many colons in address\"},\n\t\t{\"2001:DB8::1337\", \"\", \"\", NoPort, \"too many colons in address\"},\n\t\t{\"[2001:DB8::1337]:1337\", \"2001:db8::1337\", \"2001:db8::1337\", 1337, \"\"},\n\t\t{\"[2001:DB8::1337]:91337\", \"2001:db8::1337\", \"2001:db8::1337\", NoPort, \"must be between 0 and 65535\"},\n\t\t{\"[2001:DB8::1337]:007\", \"2001:db8::1337\", \"2001:db8::1337\", 7, \"\"},\n\t\t{\"[2001:DB8::1337]:-12\", \"2001:db8::1337\", \"2001:db8::1337\", NoPort, \"must be between 0 and 65535\"},\n\t\t{\"[2001:DB8::1337]:https\", \"2001:db8::1337\", \"2001:db8::1337\", NoPort, \"invalid syntax\"},\n\t\t{\"🔐.example.com:123\", \"xn--jv8h.example.com\", \"xn--jv8h.example.com.\", 123, \"\"},\n\t\t{\"🔐.example.com:007\", \"xn--jv8h.example.com\", \"xn--jv8h.example.com.\", 7, \"\"},\n\t} {\n\t\tt.Run(fmt.Sprintf(\"%d:%s\", i+1, tt.hostPort), func(t *testing.T) {\n\t\t\ta := assert.New(t)\n\t\t\thp, err := New(tt.hostPort, false)\n\t\t\ta.Equal(tt.host, hp.Host)\n\t\t\ta.Equal(tt.port, hp.Port)\n\t\t\tif tt.err != \"\" {\n\t\t\t\ta.ErrorContains(err, tt.err)\n\t\t\t} else {\n\t\t\t\ta.NoError(err)\n\t\t\t}\n\t\t\t\/\/ FQDN tests\n\t\t\thp, err = New(tt.hostPort, true)\n\t\t\ta.Equal(tt.hostFQDN, hp.Host)\n\t\t\ta.Equal(tt.port, hp.Port)\n\t\t\tif tt.err == \"\" {\n\t\t\t\ta.NoError(err)\n\t\t\t} else {\n\t\t\t\ta.ErrorContains(err, tt.err)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestNewWithScheme(t *testing.T) {\n\tvar tests = []struct {\n\t\tscheme string\n\t\thostPort string\n\t\thost string\n\t\tport int\n\t\tforceFQDN bool\n\t\terrorMsg string\n\t}{\n\t\t{\"http\", \"example.com\", \"example.com\", 80, false, \"\"},\n\t\t{\"http\", \"127.0.0.1\", \"127.0.0.1\", 80, false, \"\"},\n\t\t{\"https\", \"127.0.0.1:123\", \"127.0.0.1\", 123, false, \"\"},\n\t\t{\"https\", \"[2001:DB8::1337]\", \"\", 443, false, \"invalid domain '[2001:DB8::1337]': idna: disallowed rune U+005B\"},\n\t\t{\"https\", \"2001:DB8::1337\", \"2001:db8::1337\", 443, false, \"\"},\n\t\t{\"https\", \"[2001:DB8::1337]:443\", \"2001:db8::1337\", 443, false, \"\"},\n\t\t{\"https\", \"[2001:db8::1337]:443\", \"2001:db8::1337\", 443, false, \"\"},\n\t\t{\"https\", \"[2001:DB8::1337]:-1\", \"2001:DB8::1337\", NoPort, false, \"invalid port number -1: must be between 0 and 65535\"},\n\t\t{\"https\", \"[2001:db8::1337]:111111\", \"2001:db8::1337\", NoPort, false, \"invalid port number 111111: must be between 0 and 65535\"},\n\t\t{\"unknown\", \"[[2001:DB8::1337]]\", \"[[2001:DB8::1337]]\", NoPort, false, \"unable to determine port for unknown\"},\n\t\t{\"https\", \"🔐.example.com:123\", \"xn--jv8h.example.com\", 123, false, \"\"},\n\t\t{\"smtp\", \"✉️.example.com.\", \"xn--4bi.example.com.\", 25, false, \"\"},\n\t\t{\"https\", \"🔐.example.com:123\", \"xn--jv8h.example.com.\", 123, true, \"\"},\n\t\t{\"https\", \"🔐.example.com:007\", \"xn--jv8h.example.com.\", 7, true, \"\"},\n\t\t{\"https\", \"FOO-BAR.example.com\", \"foo-bar.example.com.\", 443, true, \"\"},\n\t\t{\"smtp\", \"✉️.example.com\", \"xn--4bi.example.com.\", 25, true, \"\"},\n\t}\n\n\tfor _, tt := range tests {\n\t\ttestname := fmt.Sprintf(\"%v:\/\/%v\", tt.scheme, tt.hostPort)\n\t\tt.Run(testname, func(t *testing.T) {\n\t\t\tr := require.New(t)\n\n\t\t\thp, err := NewWithScheme(tt.hostPort, tt.scheme, tt.forceFQDN)\n\t\t\tr.Equal(tt.host, hp.Host)\n\t\t\tr.Equal(tt.port, hp.Port)\n\n\t\t\tif err != nil {\n\t\t\t\tr.EqualError(err, tt.errorMsg)\n\t\t\t} else {\n\t\t\t\tr.NoError(err)\n\t\t\t}\n\t\t})\n\t}\n}\n<commit_msg>(test) Fix error checks<commit_after>package hostport\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestNew(t *testing.T) {\n\tfor i, tt := range []struct {\n\t\thostPort string\n\t\thost string\n\t\thostFQDN string\n\t\tport int\n\t\terr string\n\t}{\n\t\t{\"127.0.0.1:1234\", \"127.0.0.1\", \"127.0.0.1\", 1234, \"\"},\n\t\t{\"example.net\", \"\", \"\", NoPort, \"missing port in address\"},\n\t\t{\"example.net:1337\", \"example.net\", \"example.net.\", 1337, \"\"},\n\t\t{\"example.net.:1337\", \"example.net.\", \"example.net.\", 1337, \"\"},\n\t\t{\"[example.net.]:1337\", \"example.net.\", \"example.net.\", 1337, \"\"},\n\t\t{\"[127.0.0.1]:1337\", \"127.0.0.1\", \"127.0.0.1\", 1337, \"\"},\n\t\t{\"[example.net]:1337\", \"example.net\", \"example.net.\", 1337, \"\"},\n\t\t{\"[[example.net]]:1337\", \"\", \"\", NoPort, \"missing port in address\"},\n\t\t{\"\", \"\", \"\", NoPort, \"missing port in address\"},\n\t\t{\":\", \"\", \"\", NoPort, \"invalid syntax\"},\n\t\t{\"::\", \"\", \"\", NoPort, \"too many colons in address\"},\n\t\t{\"2001:DB8::1337\", \"\", \"\", NoPort, \"too many colons in address\"},\n\t\t{\"[2001:DB8::1337]:1337\", \"2001:db8::1337\", \"2001:db8::1337\", 1337, \"\"},\n\t\t{\"[2001:DB8::1337]:91337\", \"2001:db8::1337\", \"2001:db8::1337\", NoPort, \"must be between 0 and 65535\"},\n\t\t{\"[2001:DB8::1337]:007\", \"2001:db8::1337\", \"2001:db8::1337\", 7, \"\"},\n\t\t{\"[2001:DB8::1337]:-12\", \"2001:db8::1337\", \"2001:db8::1337\", NoPort, \"must be between 0 and 65535\"},\n\t\t{\"[2001:DB8::1337]:https\", \"2001:db8::1337\", \"2001:db8::1337\", NoPort, \"invalid syntax\"},\n\t\t{\"🔐.example.com:123\", \"xn--jv8h.example.com\", \"xn--jv8h.example.com.\", 123, \"\"},\n\t\t{\"🔐.example.com:007\", \"xn--jv8h.example.com\", \"xn--jv8h.example.com.\", 7, \"\"},\n\t} {\n\t\tt.Run(fmt.Sprintf(\"%d:%s\", i+1, tt.hostPort), func(t *testing.T) {\n\t\t\ta := assert.New(t)\n\t\t\thp, err := New(tt.hostPort, false)\n\t\t\ta.Equal(tt.host, hp.Host)\n\t\t\ta.Equal(tt.port, hp.Port)\n\t\t\tif tt.err != \"\" {\n\t\t\t\ta.ErrorContains(err, tt.err)\n\t\t\t} else {\n\t\t\t\ta.NoError(err)\n\t\t\t}\n\t\t\t\/\/ FQDN tests\n\t\t\thp, err = New(tt.hostPort, true)\n\t\t\ta.Equal(tt.hostFQDN, hp.Host)\n\t\t\ta.Equal(tt.port, hp.Port)\n\t\t\tif tt.err == \"\" {\n\t\t\t\ta.NoError(err)\n\t\t\t} else {\n\t\t\t\ta.ErrorContains(err, tt.err)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestNewWithScheme(t *testing.T) {\n\tvar tests = []struct {\n\t\tscheme string\n\t\thostPort string\n\t\thost string\n\t\tport int\n\t\tforceFQDN bool\n\t\terr string\n\t}{\n\t\t{\"http\", \"example.com\", \"example.com\", 80, false, \"\"},\n\t\t{\"http\", \"127.0.0.1\", \"127.0.0.1\", 80, false, \"\"},\n\t\t{\"https\", \"127.0.0.1:123\", \"127.0.0.1\", 123, false, \"\"},\n\t\t{\"https\", \"[2001:DB8::1337]\", \"\", 443, false, \"invalid domain '[2001:DB8::1337]': idna: disallowed rune U+005B\"},\n\t\t{\"https\", \"2001:DB8::1337\", \"2001:db8::1337\", 443, false, \"\"},\n\t\t{\"https\", \"[2001:DB8::1337]:443\", \"2001:db8::1337\", 443, false, \"\"},\n\t\t{\"https\", \"[2001:db8::1337]:443\", \"2001:db8::1337\", 443, false, \"\"},\n\t\t{\"https\", \"[2001:DB8::1337]:-1\", \"2001:DB8::1337\", NoPort, false, \"invalid port number -1: must be between 0 and 65535\"},\n\t\t{\"https\", \"[2001:db8::1337]:111111\", \"2001:db8::1337\", NoPort, false, \"invalid port number 111111: must be between 0 and 65535\"},\n\t\t{\"unknown\", \"[[2001:DB8::1337]]\", \"[[2001:DB8::1337]]\", NoPort, false, \"unable to determine port for unknown\"},\n\t\t{\"https\", \"🔐.example.com:123\", \"xn--jv8h.example.com\", 123, false, \"\"},\n\t\t{\"smtp\", \"✉️.example.com.\", \"xn--4bi.example.com.\", 25, false, \"\"},\n\t\t{\"https\", \"🔐.example.com:123\", \"xn--jv8h.example.com.\", 123, true, \"\"},\n\t\t{\"https\", \"🔐.example.com:007\", \"xn--jv8h.example.com.\", 7, true, \"\"},\n\t\t{\"https\", \"FOO-BAR.example.com\", \"foo-bar.example.com.\", 443, true, \"\"},\n\t\t{\"smtp\", \"✉️.example.com\", \"xn--4bi.example.com.\", 25, true, \"\"},\n\t}\n\n\tfor _, tt := range tests {\n\t\ttestname := fmt.Sprintf(\"%v:\/\/%v\", tt.scheme, tt.hostPort)\n\t\tt.Run(testname, func(t *testing.T) {\n\t\t\tr := require.New(t)\n\n\t\t\thp, err := NewWithScheme(tt.hostPort, tt.scheme, tt.forceFQDN)\n\t\t\tr.Equal(tt.host, hp.Host)\n\t\t\tr.Equal(tt.port, hp.Port)\n\n\t\t\tif tt.err != \"\" {\n\t\t\t\tr.EqualError(err, tt.err)\n\t\t\t} else {\n\t\t\t\tr.NoError(err)\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package actors\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com\/cloudfoundry\/cli\/cf\/actors\/plan_builder\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/actors\/service_builder\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/api\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/models\"\n)\n\ntype ServicePlanActor interface {\n\tFindServiceAccess(string) (ServiceAccess, error)\n\tUpdateAllPlansForService(string, bool) (bool, error)\n\tUpdateOrgForService(string, string, bool) (bool, error)\n\tUpdateSinglePlanForService(string, string, bool) (PlanAccess, error)\n\tUpdatePlanAndOrgForService(string, string, string, bool) (PlanAccess, error)\n}\n\ntype PlanAccess int\n\nconst (\n\tPlanAccessError PlanAccess = iota\n\tAll\n\tLimited\n\tNone\n)\n\ntype ServiceAccess int\n\nconst (\n\tServiceAccessError ServiceAccess = iota\n\tAllPlansArePublic\n\tAllPlansArePrivate\n\tAllPlansAreLimited\n\tSomePlansArePublicSomeAreLimited\n\tSomePlansArePublicSomeArePrivate\n\tSomePlansAreLimitedSomeArePrivate\n\tSomePlansArePublicSomeAreLimitedSomeArePrivate\n)\n\ntype ServicePlanHandler struct {\n\tservicePlanRepo api.ServicePlanRepository\n\tservicePlanVisibilityRepo api.ServicePlanVisibilityRepository\n\torgRepo api.OrganizationRepository\n\tserviceBuilder service_builder.ServiceBuilder\n\tplanBuilder plan_builder.PlanBuilder\n}\n\nfunc NewServicePlanHandler(plan api.ServicePlanRepository, vis api.ServicePlanVisibilityRepository, org api.OrganizationRepository, planBuilder plan_builder.PlanBuilder, serviceBuilder service_builder.ServiceBuilder) ServicePlanHandler {\n\treturn ServicePlanHandler{\n\t\tservicePlanRepo: plan,\n\t\tservicePlanVisibilityRepo: vis,\n\t\torgRepo: org,\n\t\tserviceBuilder: serviceBuilder,\n\t\tplanBuilder: planBuilder,\n\t}\n}\n\nfunc (actor ServicePlanHandler) UpdateAllPlansForService(serviceName string, setPlanVisibility bool) (bool, error) {\n\tservice, err := actor.serviceBuilder.GetServiceByName(serviceName)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tallPlansWereSet := true\n\tfor _, plan := range service.Plans {\n\t\tplanAccess, err := actor.updateSinglePlan(service, plan.Name, setPlanVisibility)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\t\/\/ If any plan is Limited we know that we have to change the visibility.\n\t\tplanAlreadySet := ((planAccess == All) == setPlanVisibility) && planAccess != Limited\n\t\tallPlansWereSet = allPlansWereSet && planAlreadySet\n\t}\n\treturn allPlansWereSet, nil\n}\n\nfunc (actor ServicePlanHandler) UpdateOrgForService(serviceName string, orgName string, setPlanVisibility bool) (bool, error) {\n\tvar err error\n\tvar service models.ServiceOffering\n\n\tservice, err = actor.serviceBuilder.GetServiceByName(serviceName)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\torg, err := actor.orgRepo.FindByName(orgName)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tallPlansWereSet := true\n\tfor _, plan := range service.Plans {\n\t\tvisibilityExists := plan.OrgHasVisibility(org.Name)\n\t\tif plan.Public || visibilityExists == setPlanVisibility {\n\t\t\tcontinue\n\t\t} else if visibilityExists && !setPlanVisibility {\n\t\t\tactor.deleteServicePlanVisibilities(map[string]string{\"org_guid\": org.Guid, \"service_plan_guid\": plan.Guid})\n\t\t} else if !visibilityExists && setPlanVisibility {\n\t\t\terr = actor.servicePlanVisibilityRepo.Create(plan.Guid, org.Guid)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\t\t\/\/ We only get here once we have already updated a plan.\n\t\tallPlansWereSet = false\n\t}\n\treturn allPlansWereSet, nil\n}\n\nfunc (actor ServicePlanHandler) UpdatePlanAndOrgForService(serviceName, planName, orgName string, setPlanVisibility bool) (PlanAccess, error) {\n\tservice, err := actor.serviceBuilder.GetServiceByName(serviceName)\n\tif err != nil {\n\t\treturn PlanAccessError, err\n\t}\n\n\torg, err := actor.orgRepo.FindByName(orgName)\n\tif err != nil {\n\t\treturn PlanAccessError, err\n\t}\n\n\tfound := false\n\tvar servicePlan models.ServicePlanFields\n\tfor i, val := range service.Plans {\n\t\tif val.Name == planName {\n\t\t\tfound = true\n\t\t\tservicePlan = service.Plans[i]\n\t\t}\n\t}\n\tif !found {\n\t\treturn PlanAccessError, errors.New(fmt.Sprintf(\"Service plan %s not found\", planName))\n\t}\n\n\tif !servicePlan.Public && setPlanVisibility {\n\t\tif servicePlan.OrgHasVisibility(orgName) {\n\t\t\treturn Limited, nil\n\t\t}\n\n\t\t\/\/ Enable service access\n\t\terr = actor.servicePlanVisibilityRepo.Create(servicePlan.Guid, org.Guid)\n\t\tif err != nil {\n\t\t\treturn PlanAccessError, err\n\t\t}\n\t} else if !servicePlan.Public && !setPlanVisibility {\n\t\t\/\/ Disable service access\n\t\tif servicePlan.OrgHasVisibility(org.Name) {\n\t\t\terr = actor.deleteServicePlanVisibilities(map[string]string{\"organization_guid\": org.Guid, \"service_plan_guid\": servicePlan.Guid})\n\t\t\tif err != nil {\n\t\t\t\treturn PlanAccessError, err\n\t\t\t}\n\t\t}\n\t}\n\n\taccess := actor.findPlanAccess(servicePlan)\n\treturn access, nil\n}\n\nfunc (actor ServicePlanHandler) UpdateSinglePlanForService(serviceName string, planName string, setPlanVisibility bool) (PlanAccess, error) {\n\tserviceOffering, err := actor.serviceBuilder.GetServiceByName(serviceName)\n\tif err != nil {\n\t\treturn PlanAccessError, err\n\t}\n\treturn actor.updateSinglePlan(serviceOffering, planName, setPlanVisibility)\n}\n\nfunc (actor ServicePlanHandler) updateSinglePlan(serviceOffering models.ServiceOffering, planName string, setPlanVisibility bool) (PlanAccess, error) {\n\tvar planToUpdate *models.ServicePlanFields\n\n\t\/\/find the service plan and set it as the only service plan for update\n\tfor _, servicePlan := range serviceOffering.Plans {\n\t\tif servicePlan.Name == planName {\n\t\t\tplanToUpdate = &servicePlan \/\/he has the orgs inside him!!!\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif planToUpdate == nil {\n\t\treturn PlanAccessError, errors.New(fmt.Sprintf(\"The plan %s could not be found for service %s\", planName, serviceOffering.Label))\n\t}\n\n\terr := actor.updateServicePlanAvailability(serviceOffering.Guid, *planToUpdate, setPlanVisibility)\n\tif err != nil {\n\t\treturn PlanAccessError, err\n\t}\n\n\taccess := actor.findPlanAccess(*planToUpdate)\n\treturn access, nil\n}\n\nfunc (actor ServicePlanHandler) deleteServicePlanVisibilities(queryParams map[string]string) error {\n\tvisibilities, err := actor.servicePlanVisibilityRepo.Search(queryParams)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, visibility := range visibilities {\n\t\terr = actor.servicePlanVisibilityRepo.Delete(visibility.Guid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (actor ServicePlanHandler) updateServicePlanAvailability(serviceGuid string, servicePlan models.ServicePlanFields, setPlanVisibility bool) error {\n\t\/\/ We delete all service plan visibilities for the given Plan since the attribute public should function as a giant on\/off\n\t\/\/ switch for all orgs. Thus we need to clean up any visibilities laying around so that they don't carry over.\n\terr := actor.deleteServicePlanVisibilities(map[string]string{\"plan_guid\": servicePlan.Guid})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif servicePlan.Public == setPlanVisibility {\n\t\treturn nil\n\t}\n\n\treturn actor.servicePlanRepo.Update(servicePlan, serviceGuid, setPlanVisibility)\n}\n\nfunc (actor ServicePlanHandler) FindServiceAccess(serviceName string) (ServiceAccess, error) {\n\tservice, err := actor.serviceBuilder.GetServiceByName(serviceName)\n\tif err != nil {\n\t\treturn ServiceAccessError, err\n\t}\n\n\tpublicBucket, limitedBucket, privateBucket := 0, 0, 0\n\n\tfor _, plan := range service.Plans {\n\t\tif plan.Public {\n\t\t\tpublicBucket++\n\t\t} else if len(plan.OrgNames) > 0 {\n\t\t\tlimitedBucket++\n\t\t} else {\n\t\t\tprivateBucket++\n\t\t}\n\t}\n\n\tif publicBucket > 0 && limitedBucket == 0 && privateBucket == 0 {\n\t\treturn AllPlansArePublic, nil\n\t}\n\tif publicBucket > 0 && limitedBucket > 0 && privateBucket == 0 {\n\t\treturn SomePlansArePublicSomeAreLimited, nil\n\t}\n\tif publicBucket > 0 && privateBucket > 0 && limitedBucket == 0 {\n\t\treturn SomePlansArePublicSomeArePrivate, nil\n\t}\n\n\tif limitedBucket > 0 && publicBucket == 0 && privateBucket == 0 {\n\t\treturn AllPlansAreLimited, nil\n\t}\n\tif privateBucket > 0 && publicBucket == 0 && privateBucket == 0 {\n\t\treturn AllPlansArePrivate, nil\n\t}\n\tif limitedBucket > 0 && privateBucket > 0 && publicBucket == 0 {\n\t\treturn SomePlansAreLimitedSomeArePrivate, nil\n\t}\n\treturn SomePlansArePublicSomeAreLimitedSomeArePrivate, nil\n}\n\nfunc (actor ServicePlanHandler) findPlanAccess(plan models.ServicePlanFields) PlanAccess {\n\tif plan.Public {\n\t\treturn All\n\t} else if len(plan.OrgNames) > 0 {\n\t\treturn Limited\n\t} else {\n\t\treturn None\n\t}\n}\n<commit_msg>Use correct parameter for querying service plan visibilities.<commit_after>package actors\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com\/cloudfoundry\/cli\/cf\/actors\/plan_builder\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/actors\/service_builder\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/api\"\n\t\"github.com\/cloudfoundry\/cli\/cf\/models\"\n)\n\ntype ServicePlanActor interface {\n\tFindServiceAccess(string) (ServiceAccess, error)\n\tUpdateAllPlansForService(string, bool) (bool, error)\n\tUpdateOrgForService(string, string, bool) (bool, error)\n\tUpdateSinglePlanForService(string, string, bool) (PlanAccess, error)\n\tUpdatePlanAndOrgForService(string, string, string, bool) (PlanAccess, error)\n}\n\ntype PlanAccess int\n\nconst (\n\tPlanAccessError PlanAccess = iota\n\tAll\n\tLimited\n\tNone\n)\n\ntype ServiceAccess int\n\nconst (\n\tServiceAccessError ServiceAccess = iota\n\tAllPlansArePublic\n\tAllPlansArePrivate\n\tAllPlansAreLimited\n\tSomePlansArePublicSomeAreLimited\n\tSomePlansArePublicSomeArePrivate\n\tSomePlansAreLimitedSomeArePrivate\n\tSomePlansArePublicSomeAreLimitedSomeArePrivate\n)\n\ntype ServicePlanHandler struct {\n\tservicePlanRepo api.ServicePlanRepository\n\tservicePlanVisibilityRepo api.ServicePlanVisibilityRepository\n\torgRepo api.OrganizationRepository\n\tserviceBuilder service_builder.ServiceBuilder\n\tplanBuilder plan_builder.PlanBuilder\n}\n\nfunc NewServicePlanHandler(plan api.ServicePlanRepository, vis api.ServicePlanVisibilityRepository, org api.OrganizationRepository, planBuilder plan_builder.PlanBuilder, serviceBuilder service_builder.ServiceBuilder) ServicePlanHandler {\n\treturn ServicePlanHandler{\n\t\tservicePlanRepo: plan,\n\t\tservicePlanVisibilityRepo: vis,\n\t\torgRepo: org,\n\t\tserviceBuilder: serviceBuilder,\n\t\tplanBuilder: planBuilder,\n\t}\n}\n\nfunc (actor ServicePlanHandler) UpdateAllPlansForService(serviceName string, setPlanVisibility bool) (bool, error) {\n\tservice, err := actor.serviceBuilder.GetServiceByName(serviceName)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tallPlansWereSet := true\n\tfor _, plan := range service.Plans {\n\t\tplanAccess, err := actor.updateSinglePlan(service, plan.Name, setPlanVisibility)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\t\/\/ If any plan is Limited we know that we have to change the visibility.\n\t\tplanAlreadySet := ((planAccess == All) == setPlanVisibility) && planAccess != Limited\n\t\tallPlansWereSet = allPlansWereSet && planAlreadySet\n\t}\n\treturn allPlansWereSet, nil\n}\n\nfunc (actor ServicePlanHandler) UpdateOrgForService(serviceName string, orgName string, setPlanVisibility bool) (bool, error) {\n\tvar err error\n\tvar service models.ServiceOffering\n\n\tservice, err = actor.serviceBuilder.GetServiceByName(serviceName)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\torg, err := actor.orgRepo.FindByName(orgName)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tallPlansWereSet := true\n\tfor _, plan := range service.Plans {\n\t\tvisibilityExists := plan.OrgHasVisibility(org.Name)\n\t\tif plan.Public || visibilityExists == setPlanVisibility {\n\t\t\tcontinue\n\t\t} else if visibilityExists && !setPlanVisibility {\n\t\t\tactor.deleteServicePlanVisibilities(map[string]string{\"org_guid\": org.Guid, \"service_plan_guid\": plan.Guid})\n\t\t} else if !visibilityExists && setPlanVisibility {\n\t\t\terr = actor.servicePlanVisibilityRepo.Create(plan.Guid, org.Guid)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\t\t\/\/ We only get here once we have already updated a plan.\n\t\tallPlansWereSet = false\n\t}\n\treturn allPlansWereSet, nil\n}\n\nfunc (actor ServicePlanHandler) UpdatePlanAndOrgForService(serviceName, planName, orgName string, setPlanVisibility bool) (PlanAccess, error) {\n\tservice, err := actor.serviceBuilder.GetServiceByName(serviceName)\n\tif err != nil {\n\t\treturn PlanAccessError, err\n\t}\n\n\torg, err := actor.orgRepo.FindByName(orgName)\n\tif err != nil {\n\t\treturn PlanAccessError, err\n\t}\n\n\tfound := false\n\tvar servicePlan models.ServicePlanFields\n\tfor i, val := range service.Plans {\n\t\tif val.Name == planName {\n\t\t\tfound = true\n\t\t\tservicePlan = service.Plans[i]\n\t\t}\n\t}\n\tif !found {\n\t\treturn PlanAccessError, errors.New(fmt.Sprintf(\"Service plan %s not found\", planName))\n\t}\n\n\tif !servicePlan.Public && setPlanVisibility {\n\t\tif servicePlan.OrgHasVisibility(orgName) {\n\t\t\treturn Limited, nil\n\t\t}\n\n\t\t\/\/ Enable service access\n\t\terr = actor.servicePlanVisibilityRepo.Create(servicePlan.Guid, org.Guid)\n\t\tif err != nil {\n\t\t\treturn PlanAccessError, err\n\t\t}\n\t} else if !servicePlan.Public && !setPlanVisibility {\n\t\t\/\/ Disable service access\n\t\tif servicePlan.OrgHasVisibility(org.Name) {\n\t\t\terr = actor.deleteServicePlanVisibilities(map[string]string{\"organization_guid\": org.Guid, \"service_plan_guid\": servicePlan.Guid})\n\t\t\tif err != nil {\n\t\t\t\treturn PlanAccessError, err\n\t\t\t}\n\t\t}\n\t}\n\n\taccess := actor.findPlanAccess(servicePlan)\n\treturn access, nil\n}\n\nfunc (actor ServicePlanHandler) UpdateSinglePlanForService(serviceName string, planName string, setPlanVisibility bool) (PlanAccess, error) {\n\tserviceOffering, err := actor.serviceBuilder.GetServiceByName(serviceName)\n\tif err != nil {\n\t\treturn PlanAccessError, err\n\t}\n\treturn actor.updateSinglePlan(serviceOffering, planName, setPlanVisibility)\n}\n\nfunc (actor ServicePlanHandler) updateSinglePlan(serviceOffering models.ServiceOffering, planName string, setPlanVisibility bool) (PlanAccess, error) {\n\tvar planToUpdate *models.ServicePlanFields\n\n\t\/\/find the service plan and set it as the only service plan for update\n\tfor _, servicePlan := range serviceOffering.Plans {\n\t\tif servicePlan.Name == planName {\n\t\t\tplanToUpdate = &servicePlan \/\/he has the orgs inside him!!!\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif planToUpdate == nil {\n\t\treturn PlanAccessError, errors.New(fmt.Sprintf(\"The plan %s could not be found for service %s\", planName, serviceOffering.Label))\n\t}\n\n\terr := actor.updateServicePlanAvailability(serviceOffering.Guid, *planToUpdate, setPlanVisibility)\n\tif err != nil {\n\t\treturn PlanAccessError, err\n\t}\n\n\taccess := actor.findPlanAccess(*planToUpdate)\n\treturn access, nil\n}\n\nfunc (actor ServicePlanHandler) deleteServicePlanVisibilities(queryParams map[string]string) error {\n\tvisibilities, err := actor.servicePlanVisibilityRepo.Search(queryParams)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, visibility := range visibilities {\n\t\terr = actor.servicePlanVisibilityRepo.Delete(visibility.Guid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (actor ServicePlanHandler) updateServicePlanAvailability(serviceGuid string, servicePlan models.ServicePlanFields, setPlanVisibility bool) error {\n\t\/\/ We delete all service plan visibilities for the given Plan since the attribute public should function as a giant on\/off\n\t\/\/ switch for all orgs. Thus we need to clean up any visibilities laying around so that they don't carry over.\n\terr := actor.deleteServicePlanVisibilities(map[string]string{\"service_plan_guid\": servicePlan.Guid})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif servicePlan.Public == setPlanVisibility {\n\t\treturn nil\n\t}\n\n\treturn actor.servicePlanRepo.Update(servicePlan, serviceGuid, setPlanVisibility)\n}\n\nfunc (actor ServicePlanHandler) FindServiceAccess(serviceName string) (ServiceAccess, error) {\n\tservice, err := actor.serviceBuilder.GetServiceByName(serviceName)\n\tif err != nil {\n\t\treturn ServiceAccessError, err\n\t}\n\n\tpublicBucket, limitedBucket, privateBucket := 0, 0, 0\n\n\tfor _, plan := range service.Plans {\n\t\tif plan.Public {\n\t\t\tpublicBucket++\n\t\t} else if len(plan.OrgNames) > 0 {\n\t\t\tlimitedBucket++\n\t\t} else {\n\t\t\tprivateBucket++\n\t\t}\n\t}\n\n\tif publicBucket > 0 && limitedBucket == 0 && privateBucket == 0 {\n\t\treturn AllPlansArePublic, nil\n\t}\n\tif publicBucket > 0 && limitedBucket > 0 && privateBucket == 0 {\n\t\treturn SomePlansArePublicSomeAreLimited, nil\n\t}\n\tif publicBucket > 0 && privateBucket > 0 && limitedBucket == 0 {\n\t\treturn SomePlansArePublicSomeArePrivate, nil\n\t}\n\n\tif limitedBucket > 0 && publicBucket == 0 && privateBucket == 0 {\n\t\treturn AllPlansAreLimited, nil\n\t}\n\tif privateBucket > 0 && publicBucket == 0 && privateBucket == 0 {\n\t\treturn AllPlansArePrivate, nil\n\t}\n\tif limitedBucket > 0 && privateBucket > 0 && publicBucket == 0 {\n\t\treturn SomePlansAreLimitedSomeArePrivate, nil\n\t}\n\treturn SomePlansArePublicSomeAreLimitedSomeArePrivate, nil\n}\n\nfunc (actor ServicePlanHandler) findPlanAccess(plan models.ServicePlanFields) PlanAccess {\n\tif plan.Public {\n\t\treturn All\n\t} else if len(plan.OrgNames) > 0 {\n\t\treturn Limited\n\t} else {\n\t\treturn None\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Go MySQL Driver - A MySQL-Driver for Go's database\/sql package\n\/\/\n\/\/ Copyright 2012 Julien Schmidt. All rights reserved.\n\/\/ http:\/\/www.julienschmidt.com\n\/\/\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this file,\n\/\/ You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage mysql\n\nimport (\n\t\"database\/sql\/driver\"\n\t\"errors\"\n\t\"net\"\n\t\"strings\"\n)\n\ntype mysqlConn struct {\n\tcfg *config\n\tflags clientFlag\n\tcharset byte\n\tcipher []byte\n\tnetConn net.Conn\n\tbuf *buffer\n\tprotocol uint8\n\tsequence uint8\n\taffectedRows uint64\n\tinsertId uint64\n\tmaxPacketAllowed int\n\tmaxWriteSize int\n}\n\ntype config struct {\n\tuser string\n\tpasswd string\n\tnet string\n\taddr string\n\tdbname string\n\tparams map[string]string\n}\n\n\/\/ Handles parameters set in DSN\nfunc (mc *mysqlConn) handleParams() (err error) {\n\tfor param, val := range mc.cfg.params {\n\t\tswitch param {\n\t\t\/\/ Charset\n\t\tcase \"charset\":\n\t\t\tcharsets := strings.Split(val, \",\")\n\t\t\tfor i := range charsets {\n\t\t\t\t\/\/ ignore errors here - a charset may not exist\n\t\t\t\terr = mc.exec(\"SET NAMES \" + charsets[i])\n\t\t\t\tif err == nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\/\/ handled elsewhere\n\t\tcase \"timeout\", \"allowAllFiles\":\n\t\t\tcontinue\n\n\t\t\/\/ TLS-Encryption\n\t\tcase \"tls\":\n\t\t\terr = errors.New(\"TLS-Encryption not implemented yet\")\n\t\t\treturn\n\n\t\t\/\/ Compression\n\t\tcase \"compress\":\n\t\t\terr = errors.New(\"Compression not implemented yet\")\n\n\t\t\/\/ System Vars\n\t\tdefault:\n\t\t\terr = mc.exec(\"SET \" + param + \"=\" + val + \"\")\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (mc *mysqlConn) Begin() (driver.Tx, error) {\n\terr := mc.exec(\"START TRANSACTION\")\n\tif err == nil {\n\t\treturn &mysqlTx{mc}, err\n\t}\n\n\treturn nil, err\n}\n\nfunc (mc *mysqlConn) Close() (err error) {\n\tmc.writeCommandPacket(comQuit)\n\tmc.cfg = nil\n\tmc.buf = nil\n\tmc.netConn.Close()\n\tmc.netConn = nil\n\treturn\n}\n\nfunc (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {\n\t\/\/ Send command\n\terr := mc.writeCommandPacketStr(comStmtPrepare, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstmt := &mysqlStmt{\n\t\tmc: mc,\n\t}\n\n\t\/\/ Read Result\n\tcolumnCount, err := stmt.readPrepareResultPacket()\n\tif err == nil {\n\t\tif stmt.paramCount > 0 {\n\t\t\tstmt.params, err = stmt.mc.readColumns(stmt.paramCount)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tif columnCount > 0 {\n\t\t\terr = stmt.mc.readUntilEOF()\n\t\t}\n\t}\n\n\treturn stmt, err\n}\n\nfunc (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {\n\tif len(args) == 0 { \/\/ no args, fastpath\n\t\tmc.affectedRows = 0\n\t\tmc.insertId = 0\n\n\t\terr := mc.exec(query)\n\t\tif err == nil {\n\t\t\treturn &mysqlResult{\n\t\t\t\taffectedRows: int64(mc.affectedRows),\n\t\t\t\tinsertId: int64(mc.insertId),\n\t\t\t}, err\n\t\t}\n\t\treturn nil, err\n\t}\n\n\t\/\/ with args, must use prepared stmt\n\treturn nil, driver.ErrSkip\n\n}\n\n\/\/ Internal function to execute commands\nfunc (mc *mysqlConn) exec(query string) (err error) {\n\t\/\/ Send command\n\terr = mc.writeCommandPacketStr(comQuery, query)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Read Result\n\tvar resLen int\n\tresLen, err = mc.readResultSetHeaderPacket()\n\tif err == nil && resLen > 0 {\n\t\terr = mc.readUntilEOF()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\terr = mc.readUntilEOF()\n\t}\n\n\treturn\n}\n\nfunc (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {\n\tif len(args) == 0 { \/\/ no args, fastpath\n\t\t\/\/ Send command\n\t\terr := mc.writeCommandPacketStr(comQuery, query)\n\t\tif err == nil {\n\t\t\t\/\/ Read Result\n\t\t\tvar resLen int\n\t\t\tresLen, err = mc.readResultSetHeaderPacket()\n\t\t\tif err == nil {\n\t\t\t\trows := &mysqlRows{mc, false, nil, false}\n\n\t\t\t\tif resLen > 0 {\n\t\t\t\t\t\/\/ Columns\n\t\t\t\t\trows.columns, err = mc.readColumns(resLen)\n\t\t\t\t}\n\t\t\t\treturn rows, err\n\t\t\t}\n\t\t}\n\n\t\treturn nil, err\n\t}\n\n\t\/\/ with args, must use prepared stmt\n\treturn nil, driver.ErrSkip\n}\n\n\/\/ Gets the value of the given MySQL System Variable\nfunc (mc *mysqlConn) getSystemVar(name string) (val []byte, err error) {\n\t\/\/ Send command\n\terr = mc.writeCommandPacketStr(comQuery, \"SELECT @@\"+name)\n\tif err == nil {\n\t\t\/\/ Read Result\n\t\tvar resLen int\n\t\tresLen, err = mc.readResultSetHeaderPacket()\n\t\tif err == nil {\n\t\t\trows := &mysqlRows{mc, false, nil, false}\n\n\t\t\tif resLen > 0 {\n\t\t\t\t\/\/ Columns\n\t\t\t\trows.columns, err = mc.readColumns(resLen)\n\t\t\t}\n\n\t\t\tdest := make([]driver.Value, resLen)\n\t\t\terr = rows.readRow(dest)\n\t\t\tif err == nil {\n\t\t\t\tval = dest[0].([]byte)\n\t\t\t\terr = mc.readUntilEOF()\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n<commit_msg>getSystemVar result validity doc<commit_after>\/\/ Go MySQL Driver - A MySQL-Driver for Go's database\/sql package\n\/\/\n\/\/ Copyright 2012 Julien Schmidt. All rights reserved.\n\/\/ http:\/\/www.julienschmidt.com\n\/\/\n\/\/ This Source Code Form is subject to the terms of the Mozilla Public\n\/\/ License, v. 2.0. If a copy of the MPL was not distributed with this file,\n\/\/ You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/.\n\npackage mysql\n\nimport (\n\t\"database\/sql\/driver\"\n\t\"errors\"\n\t\"net\"\n\t\"strings\"\n)\n\ntype mysqlConn struct {\n\tcfg *config\n\tflags clientFlag\n\tcharset byte\n\tcipher []byte\n\tnetConn net.Conn\n\tbuf *buffer\n\tprotocol uint8\n\tsequence uint8\n\taffectedRows uint64\n\tinsertId uint64\n\tmaxPacketAllowed int\n\tmaxWriteSize int\n}\n\ntype config struct {\n\tuser string\n\tpasswd string\n\tnet string\n\taddr string\n\tdbname string\n\tparams map[string]string\n}\n\n\/\/ Handles parameters set in DSN\nfunc (mc *mysqlConn) handleParams() (err error) {\n\tfor param, val := range mc.cfg.params {\n\t\tswitch param {\n\t\t\/\/ Charset\n\t\tcase \"charset\":\n\t\t\tcharsets := strings.Split(val, \",\")\n\t\t\tfor i := range charsets {\n\t\t\t\t\/\/ ignore errors here - a charset may not exist\n\t\t\t\terr = mc.exec(\"SET NAMES \" + charsets[i])\n\t\t\t\tif err == nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\/\/ handled elsewhere\n\t\tcase \"timeout\", \"allowAllFiles\":\n\t\t\tcontinue\n\n\t\t\/\/ TLS-Encryption\n\t\tcase \"tls\":\n\t\t\terr = errors.New(\"TLS-Encryption not implemented yet\")\n\t\t\treturn\n\n\t\t\/\/ Compression\n\t\tcase \"compress\":\n\t\t\terr = errors.New(\"Compression not implemented yet\")\n\n\t\t\/\/ System Vars\n\t\tdefault:\n\t\t\terr = mc.exec(\"SET \" + param + \"=\" + val + \"\")\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (mc *mysqlConn) Begin() (driver.Tx, error) {\n\terr := mc.exec(\"START TRANSACTION\")\n\tif err == nil {\n\t\treturn &mysqlTx{mc}, err\n\t}\n\n\treturn nil, err\n}\n\nfunc (mc *mysqlConn) Close() (err error) {\n\tmc.writeCommandPacket(comQuit)\n\tmc.cfg = nil\n\tmc.buf = nil\n\tmc.netConn.Close()\n\tmc.netConn = nil\n\treturn\n}\n\nfunc (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {\n\t\/\/ Send command\n\terr := mc.writeCommandPacketStr(comStmtPrepare, query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstmt := &mysqlStmt{\n\t\tmc: mc,\n\t}\n\n\t\/\/ Read Result\n\tcolumnCount, err := stmt.readPrepareResultPacket()\n\tif err == nil {\n\t\tif stmt.paramCount > 0 {\n\t\t\tstmt.params, err = stmt.mc.readColumns(stmt.paramCount)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tif columnCount > 0 {\n\t\t\terr = stmt.mc.readUntilEOF()\n\t\t}\n\t}\n\n\treturn stmt, err\n}\n\nfunc (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {\n\tif len(args) == 0 { \/\/ no args, fastpath\n\t\tmc.affectedRows = 0\n\t\tmc.insertId = 0\n\n\t\terr := mc.exec(query)\n\t\tif err == nil {\n\t\t\treturn &mysqlResult{\n\t\t\t\taffectedRows: int64(mc.affectedRows),\n\t\t\t\tinsertId: int64(mc.insertId),\n\t\t\t}, err\n\t\t}\n\t\treturn nil, err\n\t}\n\n\t\/\/ with args, must use prepared stmt\n\treturn nil, driver.ErrSkip\n\n}\n\n\/\/ Internal function to execute commands\nfunc (mc *mysqlConn) exec(query string) (err error) {\n\t\/\/ Send command\n\terr = mc.writeCommandPacketStr(comQuery, query)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Read Result\n\tvar resLen int\n\tresLen, err = mc.readResultSetHeaderPacket()\n\tif err == nil && resLen > 0 {\n\t\terr = mc.readUntilEOF()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\terr = mc.readUntilEOF()\n\t}\n\n\treturn\n}\n\nfunc (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {\n\tif len(args) == 0 { \/\/ no args, fastpath\n\t\t\/\/ Send command\n\t\terr := mc.writeCommandPacketStr(comQuery, query)\n\t\tif err == nil {\n\t\t\t\/\/ Read Result\n\t\t\tvar resLen int\n\t\t\tresLen, err = mc.readResultSetHeaderPacket()\n\t\t\tif err == nil {\n\t\t\t\trows := &mysqlRows{mc, false, nil, false}\n\n\t\t\t\tif resLen > 0 {\n\t\t\t\t\t\/\/ Columns\n\t\t\t\t\trows.columns, err = mc.readColumns(resLen)\n\t\t\t\t}\n\t\t\t\treturn rows, err\n\t\t\t}\n\t\t}\n\n\t\treturn nil, err\n\t}\n\n\t\/\/ with args, must use prepared stmt\n\treturn nil, driver.ErrSkip\n}\n\n\/\/ Gets the value of the given MySQL System Variable\n\/\/ The returned byte slice is only valid until the next read\nfunc (mc *mysqlConn) getSystemVar(name string) (val []byte, err error) {\n\t\/\/ Send command\n\terr = mc.writeCommandPacketStr(comQuery, \"SELECT @@\"+name)\n\tif err == nil {\n\t\t\/\/ Read Result\n\t\tvar resLen int\n\t\tresLen, err = mc.readResultSetHeaderPacket()\n\t\tif err == nil {\n\t\t\trows := &mysqlRows{mc, false, nil, false}\n\n\t\t\tif resLen > 0 {\n\t\t\t\t\/\/ Columns\n\t\t\t\trows.columns, err = mc.readColumns(resLen)\n\t\t\t}\n\n\t\t\tdest := make([]driver.Value, resLen)\n\t\t\terr = rows.readRow(dest)\n\t\t\tif err == nil {\n\t\t\t\tval = dest[0].([]byte)\n\t\t\t\terr = mc.readUntilEOF()\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage printers\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n)\n\nvar (\n\tinternalObjectPrinterErr = \"a versioned object must be passed to a printer\"\n\n\t\/\/ disallowedPackagePrefixes contains regular expression templates\n\t\/\/ for object package paths that are not allowed by printers.\n\tdisallowedPackagePrefixes = []string{\n\t\t\"k8s.io\/kubernetes\/pkg\/apis\/\",\n\t}\n)\n\nvar internalObjectPreventer = &illegalPackageSourceChecker{disallowedPackagePrefixes}\n\ntype NoCompatiblePrinterError struct {\n\tOutputFormat *string\n\tOptions interface{}\n}\n\nfunc (e NoCompatiblePrinterError) Error() string {\n\toutput := \"\"\n\tif e.OutputFormat != nil {\n\t\toutput = *e.OutputFormat\n\t}\n\n\treturn fmt.Sprintf(\"unable to match a printer suitable for the output format %q and the options specified: %#v\", output, e.Options)\n}\n\nfunc IsNoCompatiblePrinterError(err error) bool {\n\tif err == nil {\n\t\treturn false\n\t}\n\n\t_, ok := err.(NoCompatiblePrinterError)\n\treturn ok\n}\n\nfunc IsInternalObjectError(err error) bool {\n\tif err == nil {\n\t\treturn false\n\t}\n\n\treturn err.Error() == internalObjectPrinterErr\n}\n\n\/\/ PrintFlags composes common printer flag structs\n\/\/ used across all commands, and provides a method\n\/\/ of retrieving a known printer based on flag values provided.\ntype PrintFlags struct {\n\tJSONYamlPrintFlags *JSONYamlPrintFlags\n\tNamePrintFlags *NamePrintFlags\n\n\tOutputFormat *string\n\n\tScheme runtime.ObjectConvertor\n}\n\nfunc (f *PrintFlags) Complete(successTemplate string) error {\n\treturn f.NamePrintFlags.Complete(successTemplate)\n}\n\nfunc (f *PrintFlags) ToPrinter() (ResourcePrinter, error) {\n\toutputFormat := \"\"\n\tif f.OutputFormat != nil {\n\t\toutputFormat = *f.OutputFormat\n\t}\n\n\tif f.JSONYamlPrintFlags != nil {\n\t\tif p, err := f.JSONYamlPrintFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) {\n\t\t\treturn p, err\n\t\t}\n\t}\n\n\tif f.NamePrintFlags != nil {\n\t\tif p, err := f.NamePrintFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) {\n\t\t\treturn p, err\n\t\t}\n\t}\n\n\treturn nil, NoCompatiblePrinterError{Options: f, OutputFormat: f.OutputFormat}\n}\n\nfunc (f *PrintFlags) AddFlags(cmd *cobra.Command) {\n\tf.JSONYamlPrintFlags.AddFlags(cmd)\n\tf.NamePrintFlags.AddFlags(cmd)\n\n\tif f.OutputFormat != nil {\n\t\tcmd.Flags().StringVarP(f.OutputFormat, \"output\", \"o\", *f.OutputFormat, \"Output format. One of: json|yaml|wide|name|custom-columns=...|custom-columns-file=...|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See custom columns [http:\/\/kubernetes.io\/docs\/user-guide\/kubectl-overview\/#custom-columns], golang template [http:\/\/golang.org\/pkg\/text\/template\/#pkg-overview] and jsonpath template [http:\/\/kubernetes.io\/docs\/user-guide\/jsonpath].\")\n\t}\n}\n\n\/\/ WithDefaultOutput sets a default output format if one is not provided through a flag value\nfunc (f *PrintFlags) WithDefaultOutput(output string) *PrintFlags {\n\tf.OutputFormat = &output\n\treturn f\n}\n\nfunc NewPrintFlags(operation string, scheme runtime.ObjectConvertor) *PrintFlags {\n\toutputFormat := \"\"\n\n\treturn &PrintFlags{\n\t\tOutputFormat: &outputFormat,\n\n\t\tScheme: scheme,\n\n\t\tJSONYamlPrintFlags: NewJSONYamlPrintFlags(scheme),\n\t\tNamePrintFlags: NewNamePrintFlags(operation, scheme),\n\t}\n}\n\n\/\/ illegalPackageSourceChecker compares a given\n\/\/ object's package path, and determines if the\n\/\/ object originates from a disallowed source.\ntype illegalPackageSourceChecker struct {\n\t\/\/ disallowedPrefixes is a slice of disallowed package path\n\t\/\/ prefixes for a given runtime.Object that we are printing.\n\tdisallowedPrefixes []string\n}\n\nfunc (c *illegalPackageSourceChecker) IsForbidden(pkgPath string) bool {\n\tfor _, forbiddenPrefix := range c.disallowedPrefixes {\n\t\tif strings.HasPrefix(pkgPath, forbiddenPrefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n<commit_msg>remove unused PrintFlags.Scheme<commit_after>\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage printers\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n)\n\nvar (\n\tinternalObjectPrinterErr = \"a versioned object must be passed to a printer\"\n\n\t\/\/ disallowedPackagePrefixes contains regular expression templates\n\t\/\/ for object package paths that are not allowed by printers.\n\tdisallowedPackagePrefixes = []string{\n\t\t\"k8s.io\/kubernetes\/pkg\/apis\/\",\n\t}\n)\n\nvar internalObjectPreventer = &illegalPackageSourceChecker{disallowedPackagePrefixes}\n\ntype NoCompatiblePrinterError struct {\n\tOutputFormat *string\n\tOptions interface{}\n}\n\nfunc (e NoCompatiblePrinterError) Error() string {\n\toutput := \"\"\n\tif e.OutputFormat != nil {\n\t\toutput = *e.OutputFormat\n\t}\n\n\treturn fmt.Sprintf(\"unable to match a printer suitable for the output format %q and the options specified: %#v\", output, e.Options)\n}\n\nfunc IsNoCompatiblePrinterError(err error) bool {\n\tif err == nil {\n\t\treturn false\n\t}\n\n\t_, ok := err.(NoCompatiblePrinterError)\n\treturn ok\n}\n\nfunc IsInternalObjectError(err error) bool {\n\tif err == nil {\n\t\treturn false\n\t}\n\n\treturn err.Error() == internalObjectPrinterErr\n}\n\n\/\/ PrintFlags composes common printer flag structs\n\/\/ used across all commands, and provides a method\n\/\/ of retrieving a known printer based on flag values provided.\ntype PrintFlags struct {\n\tJSONYamlPrintFlags *JSONYamlPrintFlags\n\tNamePrintFlags *NamePrintFlags\n\n\tOutputFormat *string\n}\n\nfunc (f *PrintFlags) Complete(successTemplate string) error {\n\treturn f.NamePrintFlags.Complete(successTemplate)\n}\n\nfunc (f *PrintFlags) ToPrinter() (ResourcePrinter, error) {\n\toutputFormat := \"\"\n\tif f.OutputFormat != nil {\n\t\toutputFormat = *f.OutputFormat\n\t}\n\n\tif f.JSONYamlPrintFlags != nil {\n\t\tif p, err := f.JSONYamlPrintFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) {\n\t\t\treturn p, err\n\t\t}\n\t}\n\n\tif f.NamePrintFlags != nil {\n\t\tif p, err := f.NamePrintFlags.ToPrinter(outputFormat); !IsNoCompatiblePrinterError(err) {\n\t\t\treturn p, err\n\t\t}\n\t}\n\n\treturn nil, NoCompatiblePrinterError{Options: f, OutputFormat: f.OutputFormat}\n}\n\nfunc (f *PrintFlags) AddFlags(cmd *cobra.Command) {\n\tf.JSONYamlPrintFlags.AddFlags(cmd)\n\tf.NamePrintFlags.AddFlags(cmd)\n\n\tif f.OutputFormat != nil {\n\t\tcmd.Flags().StringVarP(f.OutputFormat, \"output\", \"o\", *f.OutputFormat, \"Output format. One of: json|yaml|wide|name|custom-columns=...|custom-columns-file=...|go-template=...|go-template-file=...|jsonpath=...|jsonpath-file=... See custom columns [http:\/\/kubernetes.io\/docs\/user-guide\/kubectl-overview\/#custom-columns], golang template [http:\/\/golang.org\/pkg\/text\/template\/#pkg-overview] and jsonpath template [http:\/\/kubernetes.io\/docs\/user-guide\/jsonpath].\")\n\t}\n}\n\n\/\/ WithDefaultOutput sets a default output format if one is not provided through a flag value\nfunc (f *PrintFlags) WithDefaultOutput(output string) *PrintFlags {\n\tf.OutputFormat = &output\n\treturn f\n}\n\nfunc NewPrintFlags(operation string, scheme runtime.ObjectConvertor) *PrintFlags {\n\toutputFormat := \"\"\n\n\treturn &PrintFlags{\n\t\tOutputFormat: &outputFormat,\n\n\t\tJSONYamlPrintFlags: NewJSONYamlPrintFlags(scheme),\n\t\tNamePrintFlags: NewNamePrintFlags(operation, scheme),\n\t}\n}\n\n\/\/ illegalPackageSourceChecker compares a given\n\/\/ object's package path, and determines if the\n\/\/ object originates from a disallowed source.\ntype illegalPackageSourceChecker struct {\n\t\/\/ disallowedPrefixes is a slice of disallowed package path\n\t\/\/ prefixes for a given runtime.Object that we are printing.\n\tdisallowedPrefixes []string\n}\n\nfunc (c *illegalPackageSourceChecker) IsForbidden(pkgPath string) bool {\n\tfor _, forbiddenPrefix := range c.disallowedPrefixes {\n\t\tif strings.HasPrefix(pkgPath, forbiddenPrefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2018 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage kpa\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"go.opencensus.io\/stats\"\n\t\"go.uber.org\/zap\"\n\n\tnv1alpha1 \"knative.dev\/networking\/pkg\/apis\/networking\/v1alpha1\"\n\t\"knative.dev\/pkg\/logging\"\n\tpkgmetrics \"knative.dev\/pkg\/metrics\"\n\t\"knative.dev\/pkg\/ptr\"\n\tpkgreconciler \"knative.dev\/pkg\/reconciler\"\n\tpav1alpha1 \"knative.dev\/serving\/pkg\/apis\/autoscaling\/v1alpha1\"\n\t\"knative.dev\/serving\/pkg\/apis\/serving\"\n\t\"knative.dev\/serving\/pkg\/autoscaler\/scaling\"\n\tpareconciler \"knative.dev\/serving\/pkg\/client\/injection\/reconciler\/autoscaling\/v1alpha1\/podautoscaler\"\n\t\"knative.dev\/serving\/pkg\/metrics\"\n\tareconciler \"knative.dev\/serving\/pkg\/reconciler\/autoscaling\"\n\t\"knative.dev\/serving\/pkg\/reconciler\/autoscaling\/config\"\n\t\"knative.dev\/serving\/pkg\/reconciler\/autoscaling\/kpa\/resources\"\n\tanames \"knative.dev\/serving\/pkg\/reconciler\/autoscaling\/resources\/names\"\n\tresourceutil \"knative.dev\/serving\/pkg\/resources\"\n\n\t\"k8s.io\/apimachinery\/pkg\/api\/equality\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tcorev1listers \"k8s.io\/client-go\/listers\/core\/v1\"\n)\n\nconst (\n\tnoPrivateServiceName = \"No Private Service Name\"\n\tnoTrafficReason = \"NoTraffic\"\n)\n\n\/\/ podCounts keeps record of various numbers of pods\n\/\/ for each revision.\ntype podCounts struct {\n\twant int\n\tready int\n\tnotReady int\n\tpending int\n\tterminating int\n}\n\n\/\/ Reconciler tracks PAs and right sizes the ScaleTargetRef based on the\n\/\/ information from Deciders.\ntype Reconciler struct {\n\t*areconciler.Base\n\n\tpodsLister corev1listers.PodLister\n\tdeciders resources.Deciders\n\tscaler *scaler\n}\n\n\/\/ Check that our Reconciler implements pareconciler.Interface\nvar _ pareconciler.Interface = (*Reconciler)(nil)\n\nfunc (c *Reconciler) ReconcileKind(ctx context.Context, pa *pav1alpha1.PodAutoscaler) pkgreconciler.Event {\n\tlogger := logging.FromContext(ctx)\n\n\t\/\/ We need the SKS object in order to optimize scale to zero\n\t\/\/ performance. It is OK if SKS is nil at this point.\n\tsksName := anames.SKS(pa.Name)\n\tsks, err := c.SKSLister.ServerlessServices(pa.Namespace).Get(sksName)\n\tif err != nil && !errors.IsNotFound(err) {\n\t\tlogger.Warnw(\"Error retrieving SKS for Scaler\", zap.Error(err))\n\t}\n\n\t\/\/ Having an SKS and its PrivateServiceName is a prerequisite for all upcoming steps.\n\tif sks == nil || sks.Status.PrivateServiceName == \"\" {\n\t\t\/\/ Before we can reconcile decider and get real number of activators\n\t\t\/\/ we start with default of 2.\n\t\tif _, err = c.ReconcileSKS(ctx, pa, nv1alpha1.SKSOperationModeServe, 0 \/*numActivators == all*\/); err != nil {\n\t\t\treturn fmt.Errorf(\"error reconciling SKS: %w\", err)\n\t\t}\n\t\tpa.Status.MarkSKSNotReady(noPrivateServiceName) \/\/ In both cases this is true.\n\t\treturn computeStatus(ctx, pa, podCounts{want: scaleUnknown}, logger)\n\t}\n\n\tpa.Status.MetricsServiceName = sks.Status.PrivateServiceName\n\tdecider, err := c.reconcileDecider(ctx, pa)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error reconciling Decider: %w\", err)\n\t}\n\n\tif err := c.ReconcileMetric(ctx, pa, resolveScrapeTarget(ctx, pa)); err != nil {\n\t\treturn fmt.Errorf(\"error reconciling Metric: %w\", err)\n\t}\n\n\t\/\/ Get the appropriate current scale from the metric, and right size\n\t\/\/ the scaleTargetRef based on it.\n\twant, err := c.scaler.scale(ctx, pa, sks, decider.Status.DesiredScale)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error scaling target: %w\", err)\n\t}\n\n\tmode := nv1alpha1.SKSOperationModeServe\n\t\/\/ We put activator in the serving path in the following cases:\n\t\/\/ 1. The revision is scaled to 0:\n\t\/\/ a. want == 0\n\t\/\/ b. want == -1 && PA is inactive (Autoscaler has no previous knowledge of\n\t\/\/\t\t\tthis revision, e.g. after a restart) but PA status is inactive (it was\n\t\/\/\t\t\talready scaled to 0).\n\t\/\/ 2. The excess burst capacity is negative.\n\tif want == 0 || decider.Status.ExcessBurstCapacity < 0 || want == scaleUnknown && pa.Status.IsInactive() {\n\t\tlogger.Infof(\"SKS should be in proxy mode: want = %d, ebc = %d, #act's = %d PA Inactive? = %v\",\n\t\t\twant, decider.Status.ExcessBurstCapacity, decider.Status.NumActivators,\n\t\t\tpa.Status.IsInactive())\n\t\tmode = nv1alpha1.SKSOperationModeProxy\n\t}\n\n\t\/\/ If we have not successfully reconciled Decider yet, NumActivators will be 0 and\n\t\/\/ we'll use all activators to back this revision.\n\tsks, err = c.ReconcileSKS(ctx, pa, mode, decider.Status.NumActivators)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error reconciling SKS: %w\", err)\n\t}\n\t\/\/ Propagate service name.\n\tpa.Status.ServiceName = sks.Status.ServiceName\n\n\t\/\/ Compare the desired and observed resources to determine our situation.\n\tpodCounter := resourceutil.NewPodAccessor(c.podsLister, pa.Namespace, pa.Labels[serving.RevisionLabelKey])\n\tready, notReady, pending, terminating, err := podCounter.PodCountsByState()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting pod counts %s: %w\", sks.Status.PrivateServiceName, err)\n\t}\n\t\/\/ If SKS is not ready — ensure we're not becoming ready.\n\t\/\/ TODO: see if we can perhaps propagate the SKS state to computing active status.\n\tif sks.IsReady() {\n\t\tlogger.Debug(\"SKS is ready, marking SKS status ready\")\n\t\tpa.Status.MarkSKSReady()\n\t} else {\n\t\tlogger.Debug(\"SKS is not ready, marking SKS status not ready\")\n\t\tpa.Status.MarkSKSNotReady(sks.Status.GetCondition(nv1alpha1.ServerlessServiceConditionReady).Message)\n\t}\n\n\tlogger.Infof(\"PA scale got=%d, want=%d, desiredPods=%d ebc=%d\", ready, want,\n\t\tdecider.Status.DesiredScale, decider.Status.ExcessBurstCapacity)\n\n\tpc := podCounts{\n\t\twant: int(want),\n\t\tready: ready,\n\t\tnotReady: notReady,\n\t\tpending: pending,\n\t\tterminating: terminating,\n\t}\n\tlogger.Infof(\"Observed pod counts=%#v\", pc)\n\treturn computeStatus(ctx, pa, pc, logger)\n}\n\nfunc (c *Reconciler) reconcileDecider(ctx context.Context, pa *pav1alpha1.PodAutoscaler) (*scaling.Decider, error) {\n\tdesiredDecider := resources.MakeDecider(ctx, pa, config.FromContext(ctx).Autoscaler)\n\tdecider, err := c.deciders.Get(ctx, desiredDecider.Namespace, desiredDecider.Name)\n\tif errors.IsNotFound(err) {\n\t\tdecider, err = c.deciders.Create(ctx, desiredDecider)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error creating Decider: %w\", err)\n\t\t}\n\t} else if err != nil {\n\t\treturn nil, fmt.Errorf(\"error fetching Decider: %w\", err)\n\t}\n\n\t\/\/ Ignore status when reconciling\n\tdesiredDecider.Status = decider.Status\n\tif !equality.Semantic.DeepEqual(desiredDecider, decider) {\n\t\tdecider, err = c.deciders.Update(ctx, desiredDecider)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error updating decider: %w\", err)\n\t\t}\n\t}\n\treturn decider, nil\n}\n\nfunc computeStatus(ctx context.Context, pa *pav1alpha1.PodAutoscaler, pc podCounts, logger *zap.SugaredLogger) error {\n\tpa.Status.DesiredScale, pa.Status.ActualScale = ptr.Int32(int32(pc.want)), ptr.Int32(int32(pc.ready))\n\n\tif err := reportMetrics(pa, pc); err != nil {\n\t\treturn fmt.Errorf(\"error reporting metrics: %w\", err)\n\t}\n\n\tcomputeActiveCondition(ctx, pa, pc)\n\tlogger.Debugf(\"PA Status after reconcile: %#v\", pa.Status.Status)\n\n\treturn nil\n}\n\nfunc reportMetrics(pa *pav1alpha1.PodAutoscaler, pc podCounts) error {\n\tserviceLabel := pa.Labels[serving.ServiceLabelKey] \/\/ This might be empty.\n\tconfigLabel := pa.Labels[serving.ConfigurationLabelKey]\n\n\tctx, err := metrics.RevisionContext(pa.Namespace, serviceLabel, configLabel, pa.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstats := []stats.Measurement{\n\t\tactualPodCountM.M(int64(pc.ready)), notReadyPodCountM.M(int64(pc.notReady)),\n\t\tpendingPodCountM.M(int64(pc.pending)), terminatingPodCountM.M(int64(pc.terminating)),\n\t}\n\t\/\/ Negative \"want\" values represent an empty metrics pipeline and thus no specific request is being made.\n\tif pc.want >= 0 {\n\t\tstats = append(stats, requestedPodCountM.M(int64(pc.want)))\n\t}\n\tpkgmetrics.RecordBatch(ctx, stats...)\n\treturn nil\n}\n\n\/\/ computeActiveCondition updates the status of a PA given the current scale (got), desired scale (want)\n\/\/ active threshold (min), and the current status, as per the following table:\n\/\/\n\/\/ | Want | Got | min | Status | New status |\n\/\/ | 0 | <any> | <any> | <any> | inactive |\n\/\/ | >0 | < min | <any> | <any> | activating |\n\/\/ | >0 | >= min | <any> | <any> | active |\n\/\/ | -1 | < min | <any> | inactive | inactive |\n\/\/ | -1 | < min | <any> | activating | activating |\n\/\/ | -1 | < min | <any> | active | activating |\n\/\/ | -1 | >= min | <any> | inactive | inactive |\n\/\/ | -1 | >= min | 0 | activating | inactive |\n\/\/ | -1 | >= min | 0 | active | inactive | <-- this case technically is impossible.\n\/\/ | -1 | >= min | >0 | activating | active |\n\/\/ | -1 | >= min | >0 | active | active |\nfunc computeActiveCondition(ctx context.Context, pa *pav1alpha1.PodAutoscaler, pc podCounts) {\n\tminReady := activeThreshold(ctx, pa)\n\t\/\/ In pre-0.17 we could have scaled down normally without ever setting ScaleTargetInitialized.\n\t\/\/ In this case we'll be in the NoTraffic\/inactive state.\n\t\/\/ TODO(taragu): remove after 0.19\n\talreadyScaledDownSuccessfully := minReady > 0 && pa.Status.GetCondition(pav1alpha1.PodAutoscalerConditionActive).Reason == noTrafficReason\n\tif pc.ready >= minReady || alreadyScaledDownSuccessfully {\n\t\tpa.Status.MarkScaleTargetInitialized()\n\t}\n\n\tswitch {\n\t\/\/ Need to check for minReady = 0 because in the initialScale 0 case, pc.want will be -1.\n\tcase pc.want == 0 || minReady == 0:\n\t\tif pa.Status.IsActivating() && minReady > 0 {\n\t\t\t\/\/ We only ever scale to zero while activating if we fail to activate within the progress deadline.\n\t\t\tpa.Status.MarkInactive(\"TimedOut\", \"The target could not be activated.\")\n\t\t} else {\n\t\t\tpa.Status.MarkInactive(noTrafficReason, \"The target is not receiving traffic.\")\n\t\t}\n\n\tcase pc.ready < minReady:\n\t\tif pc.want > 0 || !pa.Status.IsInactive() {\n\t\t\tpa.Status.MarkActivating(\n\t\t\t\t\"Queued\", \"Requests to the target are being buffered as resources are provisioned.\")\n\t\t} else {\n\t\t\t\/\/ This is for the initialScale 0 case. In the first iteration, minReady is 0,\n\t\t\t\/\/ but for the following iterations, minReady is 1. pc.want will continue being\n\t\t\t\/\/ -1 until we start receiving metrics, so we will end up here.\n\t\t\t\/\/ Even though PA is already been marked as inactive in the first iteration, we\n\t\t\t\/\/ still need to set it again. Otherwise reconciliation will fail with NewObservedGenFailure\n\t\t\t\/\/ because we cannot go through one iteration of reconciliation without setting\n\t\t\t\/\/ some status.\n\t\t\tpa.Status.MarkInactive(noTrafficReason, \"The target is not receiving traffic.\")\n\t\t}\n\n\tcase pc.ready >= minReady:\n\t\tif pc.want > 0 || !pa.Status.IsInactive() {\n\t\t\tpa.Status.MarkActive()\n\t\t}\n\t}\n}\n\n\/\/ activeThreshold returns the scale required for the pa to be marked Active\nfunc activeThreshold(ctx context.Context, pa *pav1alpha1.PodAutoscaler) int {\n\tasConfig := config.FromContext(ctx).Autoscaler\n\tmin, _ := pa.ScaleBounds(asConfig)\n\tif !pa.Status.IsScaleTargetInitialized() {\n\t\tinitialScale := resources.GetInitialScale(asConfig, pa)\n\t\treturn int(intMax(min, initialScale))\n\t}\n\treturn int(intMax(min, 1))\n}\n\n\/\/ resolveScrapeTarget returns metric service name to be scraped based on TBC configuration\n\/\/ TBC == -1 => activator in path, don't scrape the service\nfunc resolveScrapeTarget(ctx context.Context, pa *pav1alpha1.PodAutoscaler) string {\n\ttbc := resolveTBC(ctx, pa)\n\tif tbc == -1 {\n\t\treturn \"\"\n\t}\n\n\treturn pa.Status.MetricsServiceName\n}\n\nfunc resolveTBC(ctx context.Context, pa *pav1alpha1.PodAutoscaler) float64 {\n\tif v, ok := pa.TargetBC(); ok {\n\t\treturn v\n\t}\n\n\treturn config.FromContext(ctx).Autoscaler.TargetBurstCapacity\n}\n\nfunc intMax(a, b int32) int32 {\n\tif a < b {\n\t\treturn b\n\t}\n\treturn a\n}\n<commit_msg>Add log message when Serve mode happens (#9159)<commit_after>\/*\nCopyright 2018 The Knative Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage kpa\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"go.opencensus.io\/stats\"\n\t\"go.uber.org\/zap\"\n\n\tnv1alpha1 \"knative.dev\/networking\/pkg\/apis\/networking\/v1alpha1\"\n\t\"knative.dev\/pkg\/logging\"\n\tpkgmetrics \"knative.dev\/pkg\/metrics\"\n\t\"knative.dev\/pkg\/ptr\"\n\tpkgreconciler \"knative.dev\/pkg\/reconciler\"\n\tpav1alpha1 \"knative.dev\/serving\/pkg\/apis\/autoscaling\/v1alpha1\"\n\t\"knative.dev\/serving\/pkg\/apis\/serving\"\n\t\"knative.dev\/serving\/pkg\/autoscaler\/scaling\"\n\tpareconciler \"knative.dev\/serving\/pkg\/client\/injection\/reconciler\/autoscaling\/v1alpha1\/podautoscaler\"\n\t\"knative.dev\/serving\/pkg\/metrics\"\n\tareconciler \"knative.dev\/serving\/pkg\/reconciler\/autoscaling\"\n\t\"knative.dev\/serving\/pkg\/reconciler\/autoscaling\/config\"\n\t\"knative.dev\/serving\/pkg\/reconciler\/autoscaling\/kpa\/resources\"\n\tanames \"knative.dev\/serving\/pkg\/reconciler\/autoscaling\/resources\/names\"\n\tresourceutil \"knative.dev\/serving\/pkg\/resources\"\n\n\t\"k8s.io\/apimachinery\/pkg\/api\/equality\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tcorev1listers \"k8s.io\/client-go\/listers\/core\/v1\"\n)\n\nconst (\n\tnoPrivateServiceName = \"No Private Service Name\"\n\tnoTrafficReason = \"NoTraffic\"\n)\n\n\/\/ podCounts keeps record of various numbers of pods\n\/\/ for each revision.\ntype podCounts struct {\n\twant int\n\tready int\n\tnotReady int\n\tpending int\n\tterminating int\n}\n\n\/\/ Reconciler tracks PAs and right sizes the ScaleTargetRef based on the\n\/\/ information from Deciders.\ntype Reconciler struct {\n\t*areconciler.Base\n\n\tpodsLister corev1listers.PodLister\n\tdeciders resources.Deciders\n\tscaler *scaler\n}\n\n\/\/ Check that our Reconciler implements pareconciler.Interface\nvar _ pareconciler.Interface = (*Reconciler)(nil)\n\nfunc (c *Reconciler) ReconcileKind(ctx context.Context, pa *pav1alpha1.PodAutoscaler) pkgreconciler.Event {\n\tlogger := logging.FromContext(ctx)\n\n\t\/\/ We need the SKS object in order to optimize scale to zero\n\t\/\/ performance. It is OK if SKS is nil at this point.\n\tsksName := anames.SKS(pa.Name)\n\tsks, err := c.SKSLister.ServerlessServices(pa.Namespace).Get(sksName)\n\tif err != nil && !errors.IsNotFound(err) {\n\t\tlogger.Warnw(\"Error retrieving SKS for Scaler\", zap.Error(err))\n\t}\n\n\t\/\/ Having an SKS and its PrivateServiceName is a prerequisite for all upcoming steps.\n\tif sks == nil || sks.Status.PrivateServiceName == \"\" {\n\t\t\/\/ Before we can reconcile decider and get real number of activators\n\t\t\/\/ we start with default of 2.\n\t\tif _, err = c.ReconcileSKS(ctx, pa, nv1alpha1.SKSOperationModeServe, 0 \/*numActivators == all*\/); err != nil {\n\t\t\treturn fmt.Errorf(\"error reconciling SKS: %w\", err)\n\t\t}\n\t\tpa.Status.MarkSKSNotReady(noPrivateServiceName) \/\/ In both cases this is true.\n\t\treturn computeStatus(ctx, pa, podCounts{want: scaleUnknown}, logger)\n\t}\n\n\tpa.Status.MetricsServiceName = sks.Status.PrivateServiceName\n\tdecider, err := c.reconcileDecider(ctx, pa)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error reconciling Decider: %w\", err)\n\t}\n\n\tif err := c.ReconcileMetric(ctx, pa, resolveScrapeTarget(ctx, pa)); err != nil {\n\t\treturn fmt.Errorf(\"error reconciling Metric: %w\", err)\n\t}\n\n\t\/\/ Get the appropriate current scale from the metric, and right size\n\t\/\/ the scaleTargetRef based on it.\n\twant, err := c.scaler.scale(ctx, pa, sks, decider.Status.DesiredScale)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error scaling target: %w\", err)\n\t}\n\n\tmode := nv1alpha1.SKSOperationModeServe\n\t\/\/ We put activator in the serving path in the following cases:\n\t\/\/ 1. The revision is scaled to 0:\n\t\/\/ a. want == 0\n\t\/\/ b. want == -1 && PA is inactive (Autoscaler has no previous knowledge of\n\t\/\/\t\t\tthis revision, e.g. after a restart) but PA status is inactive (it was\n\t\/\/\t\t\talready scaled to 0).\n\t\/\/ 2. The excess burst capacity is negative.\n\tif want == 0 || decider.Status.ExcessBurstCapacity < 0 || want == scaleUnknown && pa.Status.IsInactive() {\n\t\tmode = nv1alpha1.SKSOperationModeProxy\n\t}\n\tlogger.Infof(\"SKS should be in %s mode: want = %d, ebc = %d, #act's = %d PA Inactive? = %v\",\n\t\tmode, want, decider.Status.ExcessBurstCapacity, decider.Status.NumActivators,\n\t\tpa.Status.IsInactive())\n\n\t\/\/ If we have not successfully reconciled Decider yet, NumActivators will be 0 and\n\t\/\/ we'll use all activators to back this revision.\n\tsks, err = c.ReconcileSKS(ctx, pa, mode, decider.Status.NumActivators)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error reconciling SKS: %w\", err)\n\t}\n\t\/\/ Propagate service name.\n\tpa.Status.ServiceName = sks.Status.ServiceName\n\n\t\/\/ Compare the desired and observed resources to determine our situation.\n\tpodCounter := resourceutil.NewPodAccessor(c.podsLister, pa.Namespace, pa.Labels[serving.RevisionLabelKey])\n\tready, notReady, pending, terminating, err := podCounter.PodCountsByState()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting pod counts %s: %w\", sks.Status.PrivateServiceName, err)\n\t}\n\t\/\/ If SKS is not ready — ensure we're not becoming ready.\n\t\/\/ TODO: see if we can perhaps propagate the SKS state to computing active status.\n\tif sks.IsReady() {\n\t\tlogger.Debug(\"SKS is ready, marking SKS status ready\")\n\t\tpa.Status.MarkSKSReady()\n\t} else {\n\t\tlogger.Debug(\"SKS is not ready, marking SKS status not ready\")\n\t\tpa.Status.MarkSKSNotReady(sks.Status.GetCondition(nv1alpha1.ServerlessServiceConditionReady).Message)\n\t}\n\n\tlogger.Infof(\"PA scale got=%d, want=%d, desiredPods=%d ebc=%d\", ready, want,\n\t\tdecider.Status.DesiredScale, decider.Status.ExcessBurstCapacity)\n\n\tpc := podCounts{\n\t\twant: int(want),\n\t\tready: ready,\n\t\tnotReady: notReady,\n\t\tpending: pending,\n\t\tterminating: terminating,\n\t}\n\tlogger.Infof(\"Observed pod counts=%#v\", pc)\n\treturn computeStatus(ctx, pa, pc, logger)\n}\n\nfunc (c *Reconciler) reconcileDecider(ctx context.Context, pa *pav1alpha1.PodAutoscaler) (*scaling.Decider, error) {\n\tdesiredDecider := resources.MakeDecider(ctx, pa, config.FromContext(ctx).Autoscaler)\n\tdecider, err := c.deciders.Get(ctx, desiredDecider.Namespace, desiredDecider.Name)\n\tif errors.IsNotFound(err) {\n\t\tdecider, err = c.deciders.Create(ctx, desiredDecider)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error creating Decider: %w\", err)\n\t\t}\n\t} else if err != nil {\n\t\treturn nil, fmt.Errorf(\"error fetching Decider: %w\", err)\n\t}\n\n\t\/\/ Ignore status when reconciling\n\tdesiredDecider.Status = decider.Status\n\tif !equality.Semantic.DeepEqual(desiredDecider, decider) {\n\t\tdecider, err = c.deciders.Update(ctx, desiredDecider)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error updating decider: %w\", err)\n\t\t}\n\t}\n\treturn decider, nil\n}\n\nfunc computeStatus(ctx context.Context, pa *pav1alpha1.PodAutoscaler, pc podCounts, logger *zap.SugaredLogger) error {\n\tpa.Status.DesiredScale, pa.Status.ActualScale = ptr.Int32(int32(pc.want)), ptr.Int32(int32(pc.ready))\n\n\tif err := reportMetrics(pa, pc); err != nil {\n\t\treturn fmt.Errorf(\"error reporting metrics: %w\", err)\n\t}\n\n\tcomputeActiveCondition(ctx, pa, pc)\n\tlogger.Debugf(\"PA Status after reconcile: %#v\", pa.Status.Status)\n\n\treturn nil\n}\n\nfunc reportMetrics(pa *pav1alpha1.PodAutoscaler, pc podCounts) error {\n\tserviceLabel := pa.Labels[serving.ServiceLabelKey] \/\/ This might be empty.\n\tconfigLabel := pa.Labels[serving.ConfigurationLabelKey]\n\n\tctx, err := metrics.RevisionContext(pa.Namespace, serviceLabel, configLabel, pa.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstats := []stats.Measurement{\n\t\tactualPodCountM.M(int64(pc.ready)), notReadyPodCountM.M(int64(pc.notReady)),\n\t\tpendingPodCountM.M(int64(pc.pending)), terminatingPodCountM.M(int64(pc.terminating)),\n\t}\n\t\/\/ Negative \"want\" values represent an empty metrics pipeline and thus no specific request is being made.\n\tif pc.want >= 0 {\n\t\tstats = append(stats, requestedPodCountM.M(int64(pc.want)))\n\t}\n\tpkgmetrics.RecordBatch(ctx, stats...)\n\treturn nil\n}\n\n\/\/ computeActiveCondition updates the status of a PA given the current scale (got), desired scale (want)\n\/\/ active threshold (min), and the current status, as per the following table:\n\/\/\n\/\/ | Want | Got | min | Status | New status |\n\/\/ | 0 | <any> | <any> | <any> | inactive |\n\/\/ | >0 | < min | <any> | <any> | activating |\n\/\/ | >0 | >= min | <any> | <any> | active |\n\/\/ | -1 | < min | <any> | inactive | inactive |\n\/\/ | -1 | < min | <any> | activating | activating |\n\/\/ | -1 | < min | <any> | active | activating |\n\/\/ | -1 | >= min | <any> | inactive | inactive |\n\/\/ | -1 | >= min | 0 | activating | inactive |\n\/\/ | -1 | >= min | 0 | active | inactive | <-- this case technically is impossible.\n\/\/ | -1 | >= min | >0 | activating | active |\n\/\/ | -1 | >= min | >0 | active | active |\nfunc computeActiveCondition(ctx context.Context, pa *pav1alpha1.PodAutoscaler, pc podCounts) {\n\tminReady := activeThreshold(ctx, pa)\n\t\/\/ In pre-0.17 we could have scaled down normally without ever setting ScaleTargetInitialized.\n\t\/\/ In this case we'll be in the NoTraffic\/inactive state.\n\t\/\/ TODO(taragu): remove after 0.19\n\talreadyScaledDownSuccessfully := minReady > 0 && pa.Status.GetCondition(pav1alpha1.PodAutoscalerConditionActive).Reason == noTrafficReason\n\tif pc.ready >= minReady || alreadyScaledDownSuccessfully {\n\t\tpa.Status.MarkScaleTargetInitialized()\n\t}\n\n\tswitch {\n\t\/\/ Need to check for minReady = 0 because in the initialScale 0 case, pc.want will be -1.\n\tcase pc.want == 0 || minReady == 0:\n\t\tif pa.Status.IsActivating() && minReady > 0 {\n\t\t\t\/\/ We only ever scale to zero while activating if we fail to activate within the progress deadline.\n\t\t\tpa.Status.MarkInactive(\"TimedOut\", \"The target could not be activated.\")\n\t\t} else {\n\t\t\tpa.Status.MarkInactive(noTrafficReason, \"The target is not receiving traffic.\")\n\t\t}\n\n\tcase pc.ready < minReady:\n\t\tif pc.want > 0 || !pa.Status.IsInactive() {\n\t\t\tpa.Status.MarkActivating(\n\t\t\t\t\"Queued\", \"Requests to the target are being buffered as resources are provisioned.\")\n\t\t} else {\n\t\t\t\/\/ This is for the initialScale 0 case. In the first iteration, minReady is 0,\n\t\t\t\/\/ but for the following iterations, minReady is 1. pc.want will continue being\n\t\t\t\/\/ -1 until we start receiving metrics, so we will end up here.\n\t\t\t\/\/ Even though PA is already been marked as inactive in the first iteration, we\n\t\t\t\/\/ still need to set it again. Otherwise reconciliation will fail with NewObservedGenFailure\n\t\t\t\/\/ because we cannot go through one iteration of reconciliation without setting\n\t\t\t\/\/ some status.\n\t\t\tpa.Status.MarkInactive(noTrafficReason, \"The target is not receiving traffic.\")\n\t\t}\n\n\tcase pc.ready >= minReady:\n\t\tif pc.want > 0 || !pa.Status.IsInactive() {\n\t\t\tpa.Status.MarkActive()\n\t\t}\n\t}\n}\n\n\/\/ activeThreshold returns the scale required for the pa to be marked Active\nfunc activeThreshold(ctx context.Context, pa *pav1alpha1.PodAutoscaler) int {\n\tasConfig := config.FromContext(ctx).Autoscaler\n\tmin, _ := pa.ScaleBounds(asConfig)\n\tif !pa.Status.IsScaleTargetInitialized() {\n\t\tinitialScale := resources.GetInitialScale(asConfig, pa)\n\t\treturn int(intMax(min, initialScale))\n\t}\n\treturn int(intMax(min, 1))\n}\n\n\/\/ resolveScrapeTarget returns metric service name to be scraped based on TBC configuration\n\/\/ TBC == -1 => activator in path, don't scrape the service\nfunc resolveScrapeTarget(ctx context.Context, pa *pav1alpha1.PodAutoscaler) string {\n\ttbc := resolveTBC(ctx, pa)\n\tif tbc == -1 {\n\t\treturn \"\"\n\t}\n\n\treturn pa.Status.MetricsServiceName\n}\n\nfunc resolveTBC(ctx context.Context, pa *pav1alpha1.PodAutoscaler) float64 {\n\tif v, ok := pa.TargetBC(); ok {\n\t\treturn v\n\t}\n\n\treturn config.FromContext(ctx).Autoscaler.TargetBurstCapacity\n}\n\nfunc intMax(a, b int32) int32 {\n\tif a < b {\n\t\treturn b\n\t}\n\treturn a\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package ucloud provides ...\npackage ucloud\n\nimport (\n\t\"bytes\"\n\t\"crypto\/sha1\"\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t_ \"reflect\"\n)\n\ntype UcloudApiClient struct {\n\tbaseURL string\n\tpublicKey string\n\tprivateKey string\n\tregionId string\n\tzoneId string\n\tconn *http.Client\n}\n\nfunc NewUcloudApiClient(baseURL, publicKey, privateKey, regionId, zoneId string) *UcloudApiClient {\n\n\tconn := &http.Client{}\n\treturn &UcloudApiClient{baseURL, publicKey, privateKey, regionId, zoneId, conn}\n}\n\nfunc (u *UcloudApiClient) verify_ac(params interface{}) ([]byte, error) {\n\n\tvar buf bytes.Buffer\n\tb, err := json.Marshal(params)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\tbuf.Write(b)\n\tbuf.WriteString(u.privateKey)\n\n\th := sha1.New()\n\n\treturn h.Sum(buf.Bytes()), nil\n}\n\nfunc (u *UcloudApiClient) MakeReqParams(origin_req []byte) []byte {\n\n\treturn []byte{}\n\n}\n\nfunc (u *UcloudApiClient) Get(url string, params interface{}) {\n\n}\n\nfunc (u *UcloudApiClient) Post(url string, params interface{}) {\n}\n\nfunc (u *UcloudApiClient) Put(url string, params interface{}) {\n\n}\nfunc (u *UcloudApiClient) Delete(url string, params interface{}) {\n\n}\n<commit_msg>API Get complete<commit_after>\/\/ Package ucloud provides ...\npackage ucloud\n\nimport (\n\t\"bytes\"\n\t\"crypto\/sha1\"\n\t_ \"encoding\/json\"\n\t\"net\/http\"\n\tURL \"net\/url\"\n\t\"sort\"\n\n\t\"github.com\/mengzhuo\/gopystr\"\n)\n\ntype UcloudApiClient struct {\n\tbaseURL string\n\tpublicKey string\n\tprivateKey string\n\tregionId string\n\tzoneId string\n\tconn *http.Client\n}\n\nfunc NewUcloudApiClient(baseURL, publicKey, privateKey, regionId, zoneId string) *UcloudApiClient {\n\n\tconn := &http.Client{}\n\treturn &UcloudApiClient{baseURL, publicKey, privateKey, regionId, zoneId, conn}\n}\n\nfunc (u *UcloudApiClient) verify_ac(params map[string]interface{}) []byte {\n\n\tvar buf bytes.Buffer\n\n\tkeys := make([]string, len(params))\n\ti := 0\n\tfor k, _ := range params {\n\t\tkeys[i] = k\n\t}\n\tsort.Strings(keys)\n\n\tfor _, k := range keys {\n\t\tbuf.WriteString(k)\n\t\tbuf.WriteString(gopystr.Str(params[k]))\n\t}\n\tbuf.WriteString(u.privateKey)\n\n\th := sha1.New()\n\n\treturn h.Sum(buf.Bytes())\n}\n\nfunc (u *UcloudApiClient) Get(url string, params map[string]interface{}) (*http.Response, error) {\n\n\tdata := URL.Values{}\n\tfor k, v := range params {\n\t\tdata.Set(k, gopystr.Str(v))\n\t}\n\tdata.Set(\"Signature\", string(u.verify_ac(params)))\n\tr, _ := http.NewRequest(\"GET\", url, bytes.NewBufferString(data.Encode()))\n\treturn u.conn.Do(r)\n}\n<|endoftext|>"} {"text":"<commit_before>package rbac\n\nimport (\n\t\"context\"\n\t\"regexp\"\n\n\tv1 \"k8s.io\/client-go\/kubernetes\/typed\/core\/v1\"\n\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\trbacv1 \"k8s.io\/api\/rbac\/v1\"\n\n\tmf \"github.com\/manifestival\/manifestival\"\n\t\"github.com\/tektoncd\/operator\/pkg\/apis\/operator\/v1alpha1\"\n\tclientset \"github.com\/tektoncd\/operator\/pkg\/client\/clientset\/versioned\"\n\t\"github.com\/tektoncd\/operator\/pkg\/reconciler\/common\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\tnsreconciler \"knative.dev\/pkg\/client\/injection\/kube\/reconciler\/core\/v1\/namespace\"\n\t\"knative.dev\/pkg\/logging\"\n\tpkgreconciler \"knative.dev\/pkg\/reconciler\"\n)\n\n\/\/ Reconciler implements controller.Reconciler for TektonPipeline resources.\ntype Reconciler struct {\n\t\/\/ kubeClientSet allows us to talk to the k8s for core APIs\n\tkubeClientSet kubernetes.Interface\n\t\/\/ operatorClientSet allows us to configure operator objects\n\toperatorClientSet clientset.Interface\n\t\/\/ manifest is empty, but with a valid client and logger. all\n\t\/\/ manifests are immutable, and any created during reconcile are\n\t\/\/ expected to be appended to this one, obviating the passing of\n\t\/\/ client & logger\n\tmanifest mf.Manifest\n\t\/\/ Platform-specific behavior to affect the transform\n\textension common.Extension\n}\n\n\/\/ Check that our Reconciler implements controller.Reconciler\nvar _ nsreconciler.Interface = (*Reconciler)(nil)\n\nconst (\n\tpipelineAnyuid = \"pipeline-anyuid\"\n\tpipelineSA = \"pipeline\"\n\tserviceCABundleCofigMap = \"config-service-cabundle\"\n\ttrustedCABundleConfigMap = \"config-trusted-cabundle\"\n)\n\n\/\/ FinalizeKind removes all resources after deletion of a TektonPipelines.\nfunc (r *Reconciler) FinalizeKind(ctx context.Context, original *v1alpha1.TektonPipeline) pkgreconciler.Event {\n\treturn nil\n}\n\n\/\/ ReconcileKind compares the actual state with the desired, and attempts to\n\/\/ converge the two.\nfunc (r *Reconciler) ReconcileKind(ctx context.Context, ns *corev1.Namespace) pkgreconciler.Event {\n\tlogger := logging.FromContext(ctx)\n\tlogger.Infow(\"Reconciling Namespace: Platform Openshift\", \"status\", ns.GetName())\n\n\tignorePattern := \"^(openshift|kube)-\"\n\tif ignore, _ := regexp.MatchString(ignorePattern, ns.GetName()); ignore {\n\t\tlogger.Infow(\"Reconciling Namespace: IGNORE\", \"status\", ns.GetName())\n\t\treturn nil\n\t}\n\n\tlogger.Infow(\"Reconciling inject CA bundle configmap in \", \"Namespace\", ns.GetName())\n\tif err := r.ensureCABundles(ctx, ns); err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Infow(\"Reconciling Default SA in \", \"Namespace\", ns.GetName())\n\n\tsa, err := r.ensureSA(ctx, ns)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Maintaining a separate cluster role for the scc declaration.\n\t\/\/ to assist us in managing this the scc association in a\n\t\/\/ granular way.\n\tif err := r.ensureSCClusterRole(ctx); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.ensureSCCRoleBinding(ctx, sa); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.ensureRoleBindings(ctx, sa); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (r *Reconciler) ensureCABundles(ctx context.Context, ns *corev1.Namespace) error {\n\tlogger := logging.FromContext(ctx)\n\tcfgInterface := r.kubeClientSet.CoreV1().ConfigMaps(ns.Name)\n\n\t\/\/ Ensure trusted CA bundle\n\tlogger.Info(\"finding configmap: %s\/%s\", ns.Name, trustedCABundleConfigMap)\n\t_, err := cfgInterface.Get(ctx, trustedCABundleConfigMap, metav1.GetOptions{})\n\tif err != nil && !errors.IsNotFound(err) {\n\t\treturn err\n\t}\n\tif err != nil && errors.IsNotFound(err) {\n\t\tlogger.Info(\"creating configmap\", trustedCABundleConfigMap, \"ns\", ns.Name)\n\t\tif err := createTrustedCABundleConfigMap(ctx, cfgInterface, trustedCABundleConfigMap, ns.Name); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Ensure service CA bundle\n\tlogger.Info(\"finding configmap: %s\/%s\", ns.Name, serviceCABundleCofigMap)\n\t_, err = cfgInterface.Get(ctx, serviceCABundleCofigMap, metav1.GetOptions{})\n\tif err != nil && !errors.IsNotFound(err) {\n\t\treturn err\n\t}\n\tif err != nil && errors.IsNotFound(err) {\n\t\tlogger.Info(\"creating configmap\", serviceCABundleCofigMap, \"ns\", ns.Name)\n\t\tif err := createServiceCABundleConfigMap(ctx, cfgInterface, serviceCABundleCofigMap, ns.Name); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (r *Reconciler) ensureSA(ctx context.Context, ns *corev1.Namespace) (*corev1.ServiceAccount, error) {\n\tlogger := logging.FromContext(ctx)\n\tlogger.Info(\"finding sa: %s\/%s\", ns.Name, \"pipeline\")\n\tsaInterface := r.kubeClientSet.CoreV1().ServiceAccounts(ns.Name)\n\tsa, err := saInterface.Get(ctx, pipelineSA, metav1.GetOptions{})\n\tif err != nil && !errors.IsNotFound(err) {\n\t\treturn nil, err\n\t}\n\tif err != nil && errors.IsNotFound(err) {\n\t\tlogger.Info(\"creating sa\", \"sa\", pipelineSA, \"ns\", ns.Name)\n\t\treturn createSA(ctx, saInterface, ns.Name)\n\t}\n\n\treturn sa, nil\n}\n\nfunc (r *Reconciler) ensureRoleBindings(ctx context.Context, sa *corev1.ServiceAccount) error {\n\tlogger := logging.FromContext(ctx)\n\n\tlogger.Info(\"finding role-binding edit\")\n\trbacClient := r.kubeClientSet.RbacV1()\n\n\teditRB, err := rbacClient.RoleBindings(sa.Namespace).Get(ctx, \"edit\", metav1.GetOptions{})\n\n\tif err == nil {\n\t\tlogger.Infof(\"found rolebinding %s\/%s\", editRB.Namespace, editRB.Name)\n\t\treturn r.updateRoleBinding(ctx, editRB, sa)\n\t}\n\n\tif errors.IsNotFound(err) {\n\t\treturn r.createRoleBinding(ctx, sa)\n\t}\n\n\treturn err\n}\n\nfunc (r *Reconciler) createRoleBinding(ctx context.Context, sa *corev1.ServiceAccount) error {\n\tlogger := logging.FromContext(ctx)\n\n\tlogger.Info(\"create new rolebinding edit, in Namespace\", sa.GetNamespace())\n\trbacClient := r.kubeClientSet.RbacV1()\n\n\tlogger.Info(\"finding clusterrole edit\")\n\t_, err := rbacClient.ClusterRoles().Get(ctx, \"edit\", metav1.GetOptions{})\n\tif err != nil {\n\t\tlogger.Error(err, \"getting clusterRole 'edit' failed\")\n\t\treturn err\n\t}\n\n\trb := &rbacv1.RoleBinding{\n\t\tObjectMeta: metav1.ObjectMeta{Name: \"edit\", Namespace: sa.Namespace},\n\t\tRoleRef: rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: \"ClusterRole\", Name: \"edit\"},\n\t\tSubjects: []rbacv1.Subject{{Kind: rbacv1.ServiceAccountKind, Name: sa.Name, Namespace: sa.Namespace}},\n\t}\n\n\t_, err = rbacClient.RoleBindings(sa.Namespace).Create(ctx, rb, metav1.CreateOptions{})\n\tif err != nil {\n\t\tlogger.Error(err, \"creation of 'edit' rolebinding failed, in Namespace\", sa.GetNamespace())\n\t}\n\treturn err\n}\n\nfunc (r *Reconciler) ensureSCClusterRole(ctx context.Context) error {\n\tlogger := logging.FromContext(ctx)\n\n\tlogger.Info(\"finding cluster role pipeline-anyuid\")\n\n\tclusterRole := &rbacv1.ClusterRole{\n\t\tObjectMeta: metav1.ObjectMeta{Name: pipelineAnyuid},\n\t\tRules: []rbacv1.PolicyRule{\n\t\t\t{\n\t\t\t\tAPIGroups: []string{\n\t\t\t\t\t\"security.openshift.io\",\n\t\t\t\t},\n\t\t\t\tResourceNames: []string{\n\t\t\t\t\t\"anyuid\",\n\t\t\t\t},\n\t\t\t\tResources: []string{\n\t\t\t\t\t\"securitycontextconstraints\",\n\t\t\t\t},\n\t\t\t\tVerbs: []string{\n\t\t\t\t\t\"use\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\trbacClient := r.kubeClientSet.RbacV1()\n\t_, err := rbacClient.ClusterRoles().Get(ctx, pipelineAnyuid, metav1.GetOptions{})\n\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t_, err = rbacClient.ClusterRoles().Create(ctx, clusterRole, metav1.CreateOptions{})\n\t\t}\n\t\treturn err\n\t}\n\t_, err = rbacClient.ClusterRoles().Update(ctx, clusterRole, metav1.UpdateOptions{})\n\treturn err\n}\n\nfunc (r *Reconciler) ensureSCCRoleBinding(ctx context.Context, sa *corev1.ServiceAccount) error {\n\tlogger := logging.FromContext(ctx)\n\n\tlogger.Info(\"finding role-binding pipeline-anyuid\")\n\trbacClient := r.kubeClientSet.RbacV1()\n\tpipelineRB, rbErr := rbacClient.RoleBindings(sa.Namespace).Get(ctx, pipelineAnyuid, metav1.GetOptions{})\n\tif rbErr != nil && !errors.IsNotFound(rbErr) {\n\t\tlogger.Error(rbErr, \"rbac pipeline-anyuid get error\")\n\t\treturn rbErr\n\t}\n\n\tlogger.Info(\"finding cluster role pipeline-anyuid\")\n\tif _, err := rbacClient.ClusterRoles().Get(ctx, pipelineAnyuid, metav1.GetOptions{}); err != nil {\n\t\tlogger.Error(err, \"finding pipeline-anyuid cluster role failed\")\n\t\treturn err\n\t}\n\n\tif rbErr != nil && errors.IsNotFound(rbErr) {\n\t\treturn r.createSCCRoleBinding(ctx, sa)\n\t}\n\n\tlogger.Info(\"found rbac\", \"subjects\", pipelineRB.Subjects)\n\treturn r.updateRoleBinding(ctx, pipelineRB, sa)\n}\n\nfunc (r *Reconciler) createSCCRoleBinding(ctx context.Context, sa *corev1.ServiceAccount) error {\n\tlogger := logging.FromContext(ctx)\n\n\tlogger.Info(\"create new rolebinding pipeline-anyuid\")\n\trbacClient := r.kubeClientSet.RbacV1()\n\trb := &rbacv1.RoleBinding{\n\t\tObjectMeta: metav1.ObjectMeta{Name: pipelineAnyuid, Namespace: sa.Namespace},\n\t\tRoleRef: rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: \"ClusterRole\", Name: pipelineAnyuid},\n\t\tSubjects: []rbacv1.Subject{{Kind: rbacv1.ServiceAccountKind, Name: sa.Name, Namespace: sa.Namespace}},\n\t}\n\n\t_, err := rbacClient.RoleBindings(sa.Namespace).Create(ctx, rb, metav1.CreateOptions{})\n\tif err != nil {\n\t\tlogger.Error(err, \"creation of pipeline-anyuid rb failed\")\n\t}\n\treturn err\n}\n\nfunc hasSubject(subjects []rbacv1.Subject, x rbacv1.Subject) bool {\n\tfor _, v := range subjects {\n\t\tif v.Name == x.Name && v.Kind == x.Kind && v.Namespace == x.Namespace {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (r *Reconciler) updateRoleBinding(ctx context.Context, rb *rbacv1.RoleBinding, sa *corev1.ServiceAccount) error {\n\tlogger := logging.FromContext(ctx)\n\n\tsubject := rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Name: sa.Name, Namespace: sa.Namespace}\n\n\tif hasSubject(rb.Subjects, subject) {\n\t\tlogger.Info(\"rolebinding is up to date\", \"action\", \"none\")\n\t\treturn nil\n\t}\n\n\tlogger.Info(\"update existing rolebinding edit\")\n\trbacClient := r.kubeClientSet.RbacV1()\n\trb.Subjects = append(rb.Subjects, subject)\n\t_, err := rbacClient.RoleBindings(sa.Namespace).Update(ctx, rb, metav1.UpdateOptions{})\n\tif err != nil {\n\t\tlogger.Error(err, \"updation of edit rb failed\")\n\t\treturn err\n\t}\n\tlogger.Error(err, \"successfully updated edit rb\")\n\treturn nil\n}\n\nfunc createSA(ctx context.Context, saInterface v1.ServiceAccountInterface, ns string) (*corev1.ServiceAccount, error) {\n\tsa := &corev1.ServiceAccount{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: pipelineSA,\n\t\t\tNamespace: ns,\n\t\t},\n\t}\n\n\tsa, err := saInterface.Create(ctx, sa, metav1.CreateOptions{})\n\tif err != nil && !errors.IsAlreadyExists(err) {\n\t\treturn nil, err\n\t}\n\n\treturn sa, nil\n}\n\nfunc createTrustedCABundleConfigMap(ctx context.Context, cfgInterface v1.ConfigMapInterface, name, ns string) error {\n\tc := &corev1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: ns,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"app.kubernetes.io\/part-of\": \"tekton-pipelines\",\n\t\t\t\t\/\/ user-provided and system CA certificates\n\t\t\t\t\"config.openshift.io\/inject-trusted-cabundle\": \"true\",\n\t\t\t},\n\t\t},\n\t}\n\n\t_, err := cfgInterface.Create(ctx, c, metav1.CreateOptions{})\n\tif err != nil && !errors.IsAlreadyExists(err) {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc createServiceCABundleConfigMap(ctx context.Context, cfgInterface v1.ConfigMapInterface, name, ns string) error {\n\tc := &corev1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: ns,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"app.kubernetes.io\/part-of\": \"tekton-pipelines\",\n\t\t\t},\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\/\/ service serving certificates (required to talk to the internal registry)\n\t\t\t\t\"service.beta.openshift.io\/inject-cabundle\": \"true\",\n\t\t\t},\n\t\t},\n\t}\n\n\t_, err := cfgInterface.Create(ctx, c, metav1.CreateOptions{})\n\tif err != nil && !errors.IsAlreadyExists(err) {\n\t\treturn err\n\t}\n\treturn nil\n}\n<commit_msg>openshift: move pipeline sa to restricted instead of anyuid<commit_after>package rbac\n\nimport (\n\t\"context\"\n\t\"regexp\"\n\n\tv1 \"k8s.io\/client-go\/kubernetes\/typed\/core\/v1\"\n\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\trbacv1 \"k8s.io\/api\/rbac\/v1\"\n\n\tmf \"github.com\/manifestival\/manifestival\"\n\t\"github.com\/tektoncd\/operator\/pkg\/apis\/operator\/v1alpha1\"\n\tclientset \"github.com\/tektoncd\/operator\/pkg\/client\/clientset\/versioned\"\n\t\"github.com\/tektoncd\/operator\/pkg\/reconciler\/common\"\n\t\"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\tnsreconciler \"knative.dev\/pkg\/client\/injection\/kube\/reconciler\/core\/v1\/namespace\"\n\t\"knative.dev\/pkg\/logging\"\n\tpkgreconciler \"knative.dev\/pkg\/reconciler\"\n)\n\n\/\/ Reconciler implements controller.Reconciler for TektonPipeline resources.\ntype Reconciler struct {\n\t\/\/ kubeClientSet allows us to talk to the k8s for core APIs\n\tkubeClientSet kubernetes.Interface\n\t\/\/ operatorClientSet allows us to configure operator objects\n\toperatorClientSet clientset.Interface\n\t\/\/ manifest is empty, but with a valid client and logger. all\n\t\/\/ manifests are immutable, and any created during reconcile are\n\t\/\/ expected to be appended to this one, obviating the passing of\n\t\/\/ client & logger\n\tmanifest mf.Manifest\n\t\/\/ Platform-specific behavior to affect the transform\n\textension common.Extension\n}\n\n\/\/ Check that our Reconciler implements controller.Reconciler\nvar _ nsreconciler.Interface = (*Reconciler)(nil)\n\nconst (\n\tpipelineRestricted = \"pipeline-restricted\"\n\tpipelineSA = \"pipeline\"\n\tserviceCABundleCofigMap = \"config-service-cabundle\"\n\ttrustedCABundleConfigMap = \"config-trusted-cabundle\"\n)\n\n\/\/ FinalizeKind removes all resources after deletion of a TektonPipelines.\nfunc (r *Reconciler) FinalizeKind(ctx context.Context, original *v1alpha1.TektonPipeline) pkgreconciler.Event {\n\treturn nil\n}\n\n\/\/ ReconcileKind compares the actual state with the desired, and attempts to\n\/\/ converge the two.\nfunc (r *Reconciler) ReconcileKind(ctx context.Context, ns *corev1.Namespace) pkgreconciler.Event {\n\tlogger := logging.FromContext(ctx)\n\tlogger.Infow(\"Reconciling Namespace: Platform Openshift\", \"status\", ns.GetName())\n\n\tignorePattern := \"^(openshift|kube)-\"\n\tif ignore, _ := regexp.MatchString(ignorePattern, ns.GetName()); ignore {\n\t\tlogger.Infow(\"Reconciling Namespace: IGNORE\", \"status\", ns.GetName())\n\t\treturn nil\n\t}\n\n\tlogger.Infow(\"Reconciling inject CA bundle configmap in \", \"Namespace\", ns.GetName())\n\tif err := r.ensureCABundles(ctx, ns); err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Infow(\"Reconciling Default SA in \", \"Namespace\", ns.GetName())\n\n\tsa, err := r.ensureSA(ctx, ns)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Maintaining a separate cluster role for the scc declaration.\n\t\/\/ to assist us in managing this the scc association in a\n\t\/\/ granular way.\n\tif err := r.ensureSCClusterRole(ctx); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.ensureSCCRoleBinding(ctx, sa); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.ensureRoleBindings(ctx, sa); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (r *Reconciler) ensureCABundles(ctx context.Context, ns *corev1.Namespace) error {\n\tlogger := logging.FromContext(ctx)\n\tcfgInterface := r.kubeClientSet.CoreV1().ConfigMaps(ns.Name)\n\n\t\/\/ Ensure trusted CA bundle\n\tlogger.Info(\"finding configmap: %s\/%s\", ns.Name, trustedCABundleConfigMap)\n\t_, err := cfgInterface.Get(ctx, trustedCABundleConfigMap, metav1.GetOptions{})\n\tif err != nil && !errors.IsNotFound(err) {\n\t\treturn err\n\t}\n\tif err != nil && errors.IsNotFound(err) {\n\t\tlogger.Info(\"creating configmap\", trustedCABundleConfigMap, \"ns\", ns.Name)\n\t\tif err := createTrustedCABundleConfigMap(ctx, cfgInterface, trustedCABundleConfigMap, ns.Name); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Ensure service CA bundle\n\tlogger.Info(\"finding configmap: %s\/%s\", ns.Name, serviceCABundleCofigMap)\n\t_, err = cfgInterface.Get(ctx, serviceCABundleCofigMap, metav1.GetOptions{})\n\tif err != nil && !errors.IsNotFound(err) {\n\t\treturn err\n\t}\n\tif err != nil && errors.IsNotFound(err) {\n\t\tlogger.Info(\"creating configmap\", serviceCABundleCofigMap, \"ns\", ns.Name)\n\t\tif err := createServiceCABundleConfigMap(ctx, cfgInterface, serviceCABundleCofigMap, ns.Name); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (r *Reconciler) ensureSA(ctx context.Context, ns *corev1.Namespace) (*corev1.ServiceAccount, error) {\n\tlogger := logging.FromContext(ctx)\n\tlogger.Info(\"finding sa: %s\/%s\", ns.Name, \"pipeline\")\n\tsaInterface := r.kubeClientSet.CoreV1().ServiceAccounts(ns.Name)\n\tsa, err := saInterface.Get(ctx, pipelineSA, metav1.GetOptions{})\n\tif err != nil && !errors.IsNotFound(err) {\n\t\treturn nil, err\n\t}\n\tif err != nil && errors.IsNotFound(err) {\n\t\tlogger.Info(\"creating sa\", \"sa\", pipelineSA, \"ns\", ns.Name)\n\t\treturn createSA(ctx, saInterface, ns.Name)\n\t}\n\n\treturn sa, nil\n}\n\nfunc (r *Reconciler) ensureRoleBindings(ctx context.Context, sa *corev1.ServiceAccount) error {\n\tlogger := logging.FromContext(ctx)\n\n\tlogger.Info(\"finding role-binding edit\")\n\trbacClient := r.kubeClientSet.RbacV1()\n\n\teditRB, err := rbacClient.RoleBindings(sa.Namespace).Get(ctx, \"edit\", metav1.GetOptions{})\n\n\tif err == nil {\n\t\tlogger.Infof(\"found rolebinding %s\/%s\", editRB.Namespace, editRB.Name)\n\t\treturn r.updateRoleBinding(ctx, editRB, sa)\n\t}\n\n\tif errors.IsNotFound(err) {\n\t\treturn r.createRoleBinding(ctx, sa)\n\t}\n\n\treturn err\n}\n\nfunc (r *Reconciler) createRoleBinding(ctx context.Context, sa *corev1.ServiceAccount) error {\n\tlogger := logging.FromContext(ctx)\n\n\tlogger.Info(\"create new rolebinding edit, in Namespace\", sa.GetNamespace())\n\trbacClient := r.kubeClientSet.RbacV1()\n\n\tlogger.Info(\"finding clusterrole edit\")\n\t_, err := rbacClient.ClusterRoles().Get(ctx, \"edit\", metav1.GetOptions{})\n\tif err != nil {\n\t\tlogger.Error(err, \"getting clusterRole 'edit' failed\")\n\t\treturn err\n\t}\n\n\trb := &rbacv1.RoleBinding{\n\t\tObjectMeta: metav1.ObjectMeta{Name: \"edit\", Namespace: sa.Namespace},\n\t\tRoleRef: rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: \"ClusterRole\", Name: \"edit\"},\n\t\tSubjects: []rbacv1.Subject{{Kind: rbacv1.ServiceAccountKind, Name: sa.Name, Namespace: sa.Namespace}},\n\t}\n\n\t_, err = rbacClient.RoleBindings(sa.Namespace).Create(ctx, rb, metav1.CreateOptions{})\n\tif err != nil {\n\t\tlogger.Error(err, \"creation of 'edit' rolebinding failed, in Namespace\", sa.GetNamespace())\n\t}\n\treturn err\n}\n\nfunc (r *Reconciler) ensureSCClusterRole(ctx context.Context) error {\n\tlogger := logging.FromContext(ctx)\n\n\tlogger.Info(\"finding cluster role pipeline-restricted\")\n\n\tclusterRole := &rbacv1.ClusterRole{\n\t\tObjectMeta: metav1.ObjectMeta{Name: pipelineRestricted},\n\t\tRules: []rbacv1.PolicyRule{\n\t\t\t{\n\t\t\t\tAPIGroups: []string{\n\t\t\t\t\t\"security.openshift.io\",\n\t\t\t\t},\n\t\t\t\tResourceNames: []string{\n\t\t\t\t\t\"restricted\",\n\t\t\t\t},\n\t\t\t\tResources: []string{\n\t\t\t\t\t\"securitycontextconstraints\",\n\t\t\t\t},\n\t\t\t\tVerbs: []string{\n\t\t\t\t\t\"use\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\trbacClient := r.kubeClientSet.RbacV1()\n\t_, err := rbacClient.ClusterRoles().Get(ctx, pipelineRestricted, metav1.GetOptions{})\n\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t_, err = rbacClient.ClusterRoles().Create(ctx, clusterRole, metav1.CreateOptions{})\n\t\t}\n\t\treturn err\n\t}\n\t_, err = rbacClient.ClusterRoles().Update(ctx, clusterRole, metav1.UpdateOptions{})\n\treturn err\n}\n\nfunc (r *Reconciler) ensureSCCRoleBinding(ctx context.Context, sa *corev1.ServiceAccount) error {\n\tlogger := logging.FromContext(ctx)\n\n\tlogger.Info(\"finding role-binding pipeline-restricted\")\n\trbacClient := r.kubeClientSet.RbacV1()\n\tpipelineRB, rbErr := rbacClient.RoleBindings(sa.Namespace).Get(ctx, pipelineRestricted, metav1.GetOptions{})\n\tif rbErr != nil && !errors.IsNotFound(rbErr) {\n\t\tlogger.Error(rbErr, \"rbac pipeline-restricted get error\")\n\t\treturn rbErr\n\t}\n\n\tlogger.Info(\"finding cluster role pipeline-restricted\")\n\tif _, err := rbacClient.ClusterRoles().Get(ctx, pipelineRestricted, metav1.GetOptions{}); err != nil {\n\t\tlogger.Error(err, \"finding pipeline-restricted cluster role failed\")\n\t\treturn err\n\t}\n\n\tif rbErr != nil && errors.IsNotFound(rbErr) {\n\t\treturn r.createSCCRoleBinding(ctx, sa)\n\t}\n\n\tlogger.Info(\"found rbac\", \"subjects\", pipelineRB.Subjects)\n\treturn r.updateRoleBinding(ctx, pipelineRB, sa)\n}\n\nfunc (r *Reconciler) createSCCRoleBinding(ctx context.Context, sa *corev1.ServiceAccount) error {\n\tlogger := logging.FromContext(ctx)\n\n\tlogger.Info(\"create new rolebinding pipeline-restricted\")\n\trbacClient := r.kubeClientSet.RbacV1()\n\trb := &rbacv1.RoleBinding{\n\t\tObjectMeta: metav1.ObjectMeta{Name: pipelineRestricted, Namespace: sa.Namespace},\n\t\tRoleRef: rbacv1.RoleRef{APIGroup: rbacv1.GroupName, Kind: \"ClusterRole\", Name: pipelineRestricted},\n\t\tSubjects: []rbacv1.Subject{{Kind: rbacv1.ServiceAccountKind, Name: sa.Name, Namespace: sa.Namespace}},\n\t}\n\n\t_, err := rbacClient.RoleBindings(sa.Namespace).Create(ctx, rb, metav1.CreateOptions{})\n\tif err != nil {\n\t\tlogger.Error(err, \"creation of pipeline-restricted rb failed\")\n\t}\n\treturn err\n}\n\nfunc hasSubject(subjects []rbacv1.Subject, x rbacv1.Subject) bool {\n\tfor _, v := range subjects {\n\t\tif v.Name == x.Name && v.Kind == x.Kind && v.Namespace == x.Namespace {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (r *Reconciler) updateRoleBinding(ctx context.Context, rb *rbacv1.RoleBinding, sa *corev1.ServiceAccount) error {\n\tlogger := logging.FromContext(ctx)\n\n\tsubject := rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Name: sa.Name, Namespace: sa.Namespace}\n\n\tif hasSubject(rb.Subjects, subject) {\n\t\tlogger.Info(\"rolebinding is up to date\", \"action\", \"none\")\n\t\treturn nil\n\t}\n\n\tlogger.Info(\"update existing rolebinding edit\")\n\trbacClient := r.kubeClientSet.RbacV1()\n\trb.Subjects = append(rb.Subjects, subject)\n\t_, err := rbacClient.RoleBindings(sa.Namespace).Update(ctx, rb, metav1.UpdateOptions{})\n\tif err != nil {\n\t\tlogger.Error(err, \"updation of edit rb failed\")\n\t\treturn err\n\t}\n\tlogger.Error(err, \"successfully updated edit rb\")\n\treturn nil\n}\n\nfunc createSA(ctx context.Context, saInterface v1.ServiceAccountInterface, ns string) (*corev1.ServiceAccount, error) {\n\tsa := &corev1.ServiceAccount{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: pipelineSA,\n\t\t\tNamespace: ns,\n\t\t},\n\t}\n\n\tsa, err := saInterface.Create(ctx, sa, metav1.CreateOptions{})\n\tif err != nil && !errors.IsAlreadyExists(err) {\n\t\treturn nil, err\n\t}\n\n\treturn sa, nil\n}\n\nfunc createTrustedCABundleConfigMap(ctx context.Context, cfgInterface v1.ConfigMapInterface, name, ns string) error {\n\tc := &corev1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: ns,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"app.kubernetes.io\/part-of\": \"tekton-pipelines\",\n\t\t\t\t\/\/ user-provided and system CA certificates\n\t\t\t\t\"config.openshift.io\/inject-trusted-cabundle\": \"true\",\n\t\t\t},\n\t\t},\n\t}\n\n\t_, err := cfgInterface.Create(ctx, c, metav1.CreateOptions{})\n\tif err != nil && !errors.IsAlreadyExists(err) {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc createServiceCABundleConfigMap(ctx context.Context, cfgInterface v1.ConfigMapInterface, name, ns string) error {\n\tc := &corev1.ConfigMap{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: name,\n\t\t\tNamespace: ns,\n\t\t\tLabels: map[string]string{\n\t\t\t\t\"app.kubernetes.io\/part-of\": \"tekton-pipelines\",\n\t\t\t},\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\/\/ service serving certificates (required to talk to the internal registry)\n\t\t\t\t\"service.beta.openshift.io\/inject-cabundle\": \"true\",\n\t\t\t},\n\t\t},\n\t}\n\n\t_, err := cfgInterface.Create(ctx, c, metav1.CreateOptions{})\n\tif err != nil && !errors.IsAlreadyExists(err) {\n\t\treturn err\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package url\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/viant\/toolbox\"\n\t\"github.com\/viant\/toolbox\/storage\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/Resource represents a URL based resource, with enriched meta info\ntype Resource struct {\n\tURL string `description:\"resource URL or relative or absolute path\" required:\"true\"` \/\/URL of resource\n\tCredentials string `description:\"credentials file\"` \/\/name of credential file or credential key depending on implementation\n\tParsedURL *url.URL `json:\"-\"` \/\/parsed URL resource\n\tCache string `description:\"local cache path\"` \/\/Cache path for the resource, if specified resource will be cached in the specified path\n\tCacheExpiryMs int \/\/CacheExpiryMs expiry time in ms\n\tmodificationTag int64\n\tinit bool\n}\n\n\/\/Clone creates a clone of the resource\nfunc (r *Resource) Clone() *Resource {\n\treturn &Resource{\n\t\tinit: r.init,\n\t\tURL: r.URL,\n\t\tCredentials: r.Credentials,\n\t\tParsedURL: r.ParsedURL,\n\t\tCache: r.Cache,\n\t\tCacheExpiryMs: r.CacheExpiryMs,\n\t}\n}\n\nvar defaultSchemePorts = map[string]int{\n\t\"ssh\": 22,\n\t\"scp\": 22,\n\t\"http\": 80,\n\t\"https\": 443,\n}\n\n\/\/Host returns url's host name with user name if user name is part of url\nfunc (r *Resource) Host() string {\n\tresult := r.ParsedURL.Hostname() + \":\" + r.Port()\n\tif r.ParsedURL.User != nil {\n\t\tresult = r.ParsedURL.User.Username() + \"@\" + result\n\t}\n\treturn result\n}\n\n\/\/CredentialURL returns url's with provided credential\nfunc (r *Resource) CredentialURL(username, password string) string {\n\tvar urlCredential = \"\"\n\tif username != \"\" {\n\t\turlCredential = username\n\t\tif password != \"\" {\n\t\t\turlCredential += \":\" + password\n\t\t}\n\t\turlCredential += \"@\"\n\t}\n\tresult := r.ParsedURL.Scheme + \":\/\/\" + urlCredential + r.ParsedURL.Hostname() + \":\" + r.Port() + r.ParsedURL.Path\n\tif r.ParsedURL.RawQuery != \"\" {\n\t\tresult += \"?\" + r.ParsedURL.RawQuery\n\t}\n\n\treturn result\n}\n\n\/\/Path returns url's path directory, assumption is that directory does not have extension, if path ends with '\/' it is being stripped.\nfunc (r *Resource) DirectoryPath() string {\n\tif r.ParsedURL == nil {\n\t\treturn \"\"\n\t}\n\tvar result = r.ParsedURL.Path\n\n\tparent, name := path.Split(result)\n\tif path.Ext(name) != \"\" {\n\t\tresult = parent\n\t}\n\tif strings.HasSuffix(result, \"\/\") {\n\t\tresult = string(result[:len(result)-1])\n\t}\n\treturn result\n}\n\n\/\/Port returns url's port\nfunc (r *Resource) Port() string {\n\tport := r.ParsedURL.Port()\n\tif port == \"\" && r.ParsedURL != nil {\n\t\tif value, ok := defaultSchemePorts[r.ParsedURL.Scheme]; ok {\n\t\t\tport = toolbox.AsString(value)\n\t\t}\n\t}\n\treturn port\n}\n\n\/\/Download downloads data from URL, it returns data as []byte, or error, if resource is cacheable it first look into cache\nfunc (r *Resource) Download() ([]byte, error) {\n\tif r == nil {\n\t\treturn nil, fmt.Errorf(\"Fail to download content on empty resource\")\n\t}\n\tif r.Cachable() {\n\t\tcontent := r.readFromCache()\n\t\tif content != nil {\n\t\t\treturn content, nil\n\t\t}\n\t}\n\tservice, err := storage.NewServiceForURL(r.URL, r.Credentials)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tobject, err := service.StorageObject(r.URL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treader, err := service.Download(object)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer reader.Close()\n\tcontent, err := ioutil.ReadAll(reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif r.Cachable() {\n\t\t_ = ioutil.WriteFile(r.Cache, content, 0666)\n\t}\n\treturn content, err\n}\n\n\/\/DownloadText returns a text downloaded from url\nfunc (r *Resource) DownloadText() (string, error) {\n\tvar result, err = r.Download()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(result), err\n}\n\n\/\/Decode decodes url's data into target, it support JSON and YAML exp.\nfunc (r *Resource) Decode(target interface{}) (err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"failed to decode: %v, %v\", r.URL, err)\n\t\t}\n\t}()\n\text := path.Ext(r.ParsedURL.Path)\n\tswitch ext {\n\tcase \".yaml\", \".yml\":\n\t\terr = r.YAMLDecode(target)\n\tdefault:\n\t\terr = r.JSONDecode(target)\n\t}\n\treturn err\n}\n\n\/\/DecoderFactory returns new decoder factory for resource\nfunc (r *Resource) DecoderFactory() toolbox.DecoderFactory {\n\text := path.Ext(r.ParsedURL.Path)\n\tswitch ext {\n\tcase \".yaml\", \".yml\":\n\t\treturn toolbox.NewYamlDecoderFactory()\n\tdefault:\n\t\treturn toolbox.NewJSONDecoderFactory()\n\t}\n}\n\n\/\/Decode decodes url's data into target, it takes decoderFactory which decodes data into target\nfunc (r *Resource) DecodeWith(target interface{}, decoderFactory toolbox.DecoderFactory) error {\n\tif r == nil {\n\t\treturn fmt.Errorf(\"fail to %T decode on empty resource\", decoderFactory)\n\t}\n\tif decoderFactory == nil {\n\t\treturn fmt.Errorf(\"fail to decode %v, decoderFactory was empty\", r.URL)\n\t}\n\tvar content, err = r.Download()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttext := string(content)\n\tif toolbox.IsNewLineDelimitedJSON(text) {\n\t\tif aSlice, err := toolbox.NewLineDelimitedJSON(text); err == nil {\n\t\t\treturn toolbox.DefaultConverter.AssignConverted(target, aSlice)\n\t\t}\n\t}\n\terr = decoderFactory.Create(bytes.NewReader(content)).Decode(target)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to decode: %v, payload: %s\", err, content)\n\t}\n\treturn err\n}\n\n\/\/Rename renames URI name of this resource\nfunc (r *Resource) Rename(name string) (err error) {\n\tvar _, currentName = toolbox.URLSplit(r.URL)\n\tif currentName == \"\" && strings.HasSuffix(r.URL, \"\/\") {\n\t\t_, currentName = toolbox.URLSplit(r.URL[:len(r.URL)-1])\n\t\tcurrentName += \"\/\"\n\t}\n\n\tr.URL = strings.Replace(r.URL, currentName, name, 1)\n\tr.ParsedURL, err = url.Parse(r.URL)\n\treturn err\n}\n\n\/\/JSONDecode decodes json resource into target\nfunc (r *Resource) JSONDecode(target interface{}) error {\n\treturn r.DecodeWith(target, toolbox.NewJSONDecoderFactory())\n}\n\n\/\/JSONDecode decodes yaml resource into target\nfunc (r *Resource) YAMLDecode(target interface{}) error {\n\tif interfacePrt, ok := target.(*interface{}); ok {\n\t\tvar data interface{}\n\t\tif err := r.DecodeWith(&data, toolbox.NewYamlDecoderFactory()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif toolbox.IsSlice(data) {\n\t\t\t*interfacePrt = data\n\t\t\treturn nil\n\t\t}\n\t}\n\tvar mapSlice = yaml.MapSlice{}\n\tif err := r.DecodeWith(&mapSlice, toolbox.NewYamlDecoderFactory()); err != nil {\n\t\treturn err\n\t}\n\tif !toolbox.IsMap(target) {\n\t\treturn toolbox.DefaultConverter.AssignConverted(target, mapSlice)\n\t}\n\tresultMap := toolbox.AsMap(target)\n\tfor _, v := range mapSlice {\n\t\tresultMap[toolbox.AsString(v.Key)] = v.Value\n\t}\n\treturn nil\n}\n\nfunc (r *Resource) readFromCache() []byte {\n\tif toolbox.FileExists(r.Cache) {\n\t\tinfo, err := os.Stat(r.Cache)\n\t\tvar isExpired = false\n\t\tif err == nil && r.CacheExpiryMs > 0 {\n\t\t\telapsed := time.Now().Sub(info.ModTime())\n\t\t\tisExpired = elapsed > time.Second*time.Duration(r.CacheExpiryMs)\n\t\t}\n\t\tcontent, err := ioutil.ReadFile(r.Cache)\n\t\tif err == nil && !isExpired {\n\t\t\treturn content\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/Cachable returns true if resource is cachable\nfunc (r *Resource) Cachable() bool {\n\treturn r.Cache != \"\"\n}\n\nfunc computeResourceModificationTag(resource *Resource) (int64, error) {\n\tservice, err := storage.NewServiceForURL(resource.URL, resource.Credentials)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tobject, err := service.StorageObject(resource.URL)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tvar fileInfo = object.FileInfo()\n\n\tif object.IsContent() {\n\t\treturn fileInfo.Size() + fileInfo.ModTime().UnixNano(), nil\n\t}\n\tvar result int64 = 0\n\tobjects, err := service.List(resource.URL)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tfor _, object := range objects {\n\t\tobjectResource := NewResource(object.URL())\n\t\tif objectResource.ParsedURL.Path == resource.ParsedURL.Path {\n\t\t\tcontinue\n\t\t}\n\t\tmodificationTag, err := computeResourceModificationTag(NewResource(object.URL(), resource.Credentials))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tresult += modificationTag\n\n\t}\n\treturn result, nil\n}\n\nfunc (r *Resource) HasChanged() (changed bool, err error) {\n\tif r.modificationTag == 0 {\n\t\tr.modificationTag, err = computeResourceModificationTag(r)\n\t\treturn false, err\n\t}\n\tvar recentModificationTag int64\n\trecentModificationTag, err = computeResourceModificationTag(r)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif recentModificationTag != r.modificationTag {\n\t\tchanged = true\n\t\tr.modificationTag = recentModificationTag\n\t}\n\treturn changed, err\n}\n\nfunc normalizeURL(URL string) string {\n\tif strings.Contains(URL, \":\/\/\") {\n\t\tvar protoPosition = strings.Index(URL, \":\/\/\")\n\t\tif protoPosition != -1 {\n\t\t\tvar urlSuffix = string(URL[protoPosition+3:])\n\t\t\turlSuffix = strings.Replace(urlSuffix, \"\/\/\", \"\/\", len(urlSuffix))\n\t\t\tURL = string(URL[:protoPosition+3]) + urlSuffix\n\t\t}\n\t\treturn URL\n\t}\n\tif !strings.HasPrefix(URL, \"\/\") {\n\t\tcurrentDirectory, _ := os.Getwd()\n\n\t\tif strings.Contains(URL, \"..\") {\n\t\t\tfragments := strings.Split(URL, \"\/\")\n\t\t\tvar index = 0\n\t\t\tvar offset = 0\n\t\t\tif fragments[0] == \".\" {\n\t\t\t\toffset = 1\n\t\t\t}\n\n\t\t\tfor index = offset; index < len(fragments); index++ {\n\t\t\t\tvar fragment = fragments[index]\n\t\t\t\tif fragment == \"..\" {\n\t\t\t\t\tcurrentDirectory, _ = path.Split(currentDirectory)\n\t\t\t\t\tif strings.HasSuffix(currentDirectory, \"\/\") {\n\t\t\t\t\t\tcurrentDirectory = string(currentDirectory[:len(currentDirectory)-1])\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn toolbox.FileSchema + path.Join(currentDirectory, strings.Join(fragments[index:], \"\/\"))\n\t\t}\n\n\t\tcurrentDirectory, err := os.Getwd()\n\t\tif err == nil {\n\t\t\tcandidate := path.Join(currentDirectory, URL)\n\t\t\tURL = candidate\n\t\t}\n\t}\n\treturn toolbox.FileSchema + URL\n}\n\nfunc (r *Resource) Init() (err error) {\n\tif r.init {\n\t\treturn nil\n\t}\n\tr.init = true\n\tr.URL = normalizeURL(r.URL)\n\tr.ParsedURL, err = url.Parse(r.URL)\n\treturn err\n}\n\n\/\/NewResource returns a new resource for provided URL, followed by optional credential, cache and cache expiryMs.\nfunc NewResource(Params ...interface{}) *Resource {\n\tif len(Params) == 0 {\n\t\treturn nil\n\t}\n\tvar URL = toolbox.AsString(Params[0])\n\tURL = normalizeURL(URL)\n\n\tvar credential string\n\tif len(Params) > 1 {\n\t\tcredential = toolbox.AsString(Params[1])\n\t}\n\tvar cache string\n\tif len(Params) > 2 {\n\t\tcache = toolbox.AsString(Params[2])\n\t}\n\tvar cacheExpiryMs int\n\tif len(Params) > 3 {\n\t\tcacheExpiryMs = toolbox.AsInt(Params[3])\n\t}\n\tparsedURL, _ := url.Parse(URL)\n\treturn &Resource{\n\t\tinit: true,\n\t\tParsedURL: parsedURL,\n\t\tURL: URL,\n\t\tCredentials: credential,\n\t\tCache: cache,\n\t\tCacheExpiryMs: cacheExpiryMs,\n\t}\n}\n<commit_msg>added url check<commit_after>package url\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/viant\/toolbox\"\n\t\"github.com\/viant\/toolbox\/storage\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/Resource represents a URL based resource, with enriched meta info\ntype Resource struct {\n\tURL string `description:\"resource URL or relative or absolute path\" required:\"true\"` \/\/URL of resource\n\tCredentials string `description:\"credentials file\"` \/\/name of credential file or credential key depending on implementation\n\tParsedURL *url.URL `json:\"-\"` \/\/parsed URL resource\n\tCache string `description:\"local cache path\"` \/\/Cache path for the resource, if specified resource will be cached in the specified path\n\tCacheExpiryMs int \/\/CacheExpiryMs expiry time in ms\n\tmodificationTag int64\n\tinit bool\n}\n\n\/\/Clone creates a clone of the resource\nfunc (r *Resource) Clone() *Resource {\n\treturn &Resource{\n\t\tinit: r.init,\n\t\tURL: r.URL,\n\t\tCredentials: r.Credentials,\n\t\tParsedURL: r.ParsedURL,\n\t\tCache: r.Cache,\n\t\tCacheExpiryMs: r.CacheExpiryMs,\n\t}\n}\n\nvar defaultSchemePorts = map[string]int{\n\t\"ssh\": 22,\n\t\"scp\": 22,\n\t\"http\": 80,\n\t\"https\": 443,\n}\n\n\/\/Host returns url's host name with user name if user name is part of url\nfunc (r *Resource) Host() string {\n\tresult := r.ParsedURL.Hostname() + \":\" + r.Port()\n\tif r.ParsedURL.User != nil {\n\t\tresult = r.ParsedURL.User.Username() + \"@\" + result\n\t}\n\treturn result\n}\n\n\/\/CredentialURL returns url's with provided credential\nfunc (r *Resource) CredentialURL(username, password string) string {\n\tvar urlCredential = \"\"\n\tif username != \"\" {\n\t\turlCredential = username\n\t\tif password != \"\" {\n\t\t\turlCredential += \":\" + password\n\t\t}\n\t\turlCredential += \"@\"\n\t}\n\tresult := r.ParsedURL.Scheme + \":\/\/\" + urlCredential + r.ParsedURL.Hostname() + \":\" + r.Port() + r.ParsedURL.Path\n\tif r.ParsedURL.RawQuery != \"\" {\n\t\tresult += \"?\" + r.ParsedURL.RawQuery\n\t}\n\n\treturn result\n}\n\n\/\/Path returns url's path directory, assumption is that directory does not have extension, if path ends with '\/' it is being stripped.\nfunc (r *Resource) DirectoryPath() string {\n\tif r.ParsedURL == nil {\n\t\treturn \"\"\n\t}\n\tvar result = r.ParsedURL.Path\n\n\tparent, name := path.Split(result)\n\tif path.Ext(name) != \"\" {\n\t\tresult = parent\n\t}\n\tif strings.HasSuffix(result, \"\/\") {\n\t\tresult = string(result[:len(result)-1])\n\t}\n\treturn result\n}\n\n\/\/Port returns url's port\nfunc (r *Resource) Port() string {\n\tport := r.ParsedURL.Port()\n\tif port == \"\" && r.ParsedURL != nil {\n\t\tif value, ok := defaultSchemePorts[r.ParsedURL.Scheme]; ok {\n\t\t\tport = toolbox.AsString(value)\n\t\t}\n\t}\n\treturn port\n}\n\n\/\/Download downloads data from URL, it returns data as []byte, or error, if resource is cacheable it first look into cache\nfunc (r *Resource) Download() ([]byte, error) {\n\tif r == nil {\n\t\treturn nil, fmt.Errorf(\"Fail to download content on empty resource\")\n\t}\n\tif r.Cachable() {\n\t\tcontent := r.readFromCache()\n\t\tif content != nil {\n\t\t\treturn content, nil\n\t\t}\n\t}\n\tservice, err := storage.NewServiceForURL(r.URL, r.Credentials)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tobject, err := service.StorageObject(r.URL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treader, err := service.Download(object)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer reader.Close()\n\tcontent, err := ioutil.ReadAll(reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif r.Cachable() {\n\t\t_ = ioutil.WriteFile(r.Cache, content, 0666)\n\t}\n\treturn content, err\n}\n\n\/\/DownloadText returns a text downloaded from url\nfunc (r *Resource) DownloadText() (string, error) {\n\tvar result, err = r.Download()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(result), err\n}\n\n\/\/Decode decodes url's data into target, it support JSON and YAML exp.\nfunc (r *Resource) Decode(target interface{}) (err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"failed to decode: %v, %v\", r.URL, err)\n\t\t}\n\t}()\n\tif r.ParsedURL == nil {\n\t\tif r.ParsedURL, err = url.Parse(r.URL); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\text := path.Ext(r.ParsedURL.Path)\n\tswitch ext {\n\tcase \".yaml\", \".yml\":\n\t\terr = r.YAMLDecode(target)\n\tdefault:\n\t\terr = r.JSONDecode(target)\n\t}\n\treturn err\n}\n\n\/\/DecoderFactory returns new decoder factory for resource\nfunc (r *Resource) DecoderFactory() toolbox.DecoderFactory {\n\text := path.Ext(r.ParsedURL.Path)\n\tswitch ext {\n\tcase \".yaml\", \".yml\":\n\t\treturn toolbox.NewYamlDecoderFactory()\n\tdefault:\n\t\treturn toolbox.NewJSONDecoderFactory()\n\t}\n}\n\n\/\/Decode decodes url's data into target, it takes decoderFactory which decodes data into target\nfunc (r *Resource) DecodeWith(target interface{}, decoderFactory toolbox.DecoderFactory) error {\n\tif r == nil {\n\t\treturn fmt.Errorf(\"fail to %T decode on empty resource\", decoderFactory)\n\t}\n\tif decoderFactory == nil {\n\t\treturn fmt.Errorf(\"fail to decode %v, decoderFactory was empty\", r.URL)\n\t}\n\tvar content, err = r.Download()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttext := string(content)\n\tif toolbox.IsNewLineDelimitedJSON(text) {\n\t\tif aSlice, err := toolbox.NewLineDelimitedJSON(text); err == nil {\n\t\t\treturn toolbox.DefaultConverter.AssignConverted(target, aSlice)\n\t\t}\n\t}\n\terr = decoderFactory.Create(bytes.NewReader(content)).Decode(target)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to decode: %v, payload: %s\", err, content)\n\t}\n\treturn err\n}\n\n\/\/Rename renames URI name of this resource\nfunc (r *Resource) Rename(name string) (err error) {\n\tvar _, currentName = toolbox.URLSplit(r.URL)\n\tif currentName == \"\" && strings.HasSuffix(r.URL, \"\/\") {\n\t\t_, currentName = toolbox.URLSplit(r.URL[:len(r.URL)-1])\n\t\tcurrentName += \"\/\"\n\t}\n\n\tr.URL = strings.Replace(r.URL, currentName, name, 1)\n\tr.ParsedURL, err = url.Parse(r.URL)\n\treturn err\n}\n\n\/\/JSONDecode decodes json resource into target\nfunc (r *Resource) JSONDecode(target interface{}) error {\n\treturn r.DecodeWith(target, toolbox.NewJSONDecoderFactory())\n}\n\n\/\/JSONDecode decodes yaml resource into target\nfunc (r *Resource) YAMLDecode(target interface{}) error {\n\tif interfacePrt, ok := target.(*interface{}); ok {\n\t\tvar data interface{}\n\t\tif err := r.DecodeWith(&data, toolbox.NewYamlDecoderFactory()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif toolbox.IsSlice(data) {\n\t\t\t*interfacePrt = data\n\t\t\treturn nil\n\t\t}\n\t}\n\tvar mapSlice = yaml.MapSlice{}\n\tif err := r.DecodeWith(&mapSlice, toolbox.NewYamlDecoderFactory()); err != nil {\n\t\treturn err\n\t}\n\tif !toolbox.IsMap(target) {\n\t\treturn toolbox.DefaultConverter.AssignConverted(target, mapSlice)\n\t}\n\tresultMap := toolbox.AsMap(target)\n\tfor _, v := range mapSlice {\n\t\tresultMap[toolbox.AsString(v.Key)] = v.Value\n\t}\n\treturn nil\n}\n\nfunc (r *Resource) readFromCache() []byte {\n\tif toolbox.FileExists(r.Cache) {\n\t\tinfo, err := os.Stat(r.Cache)\n\t\tvar isExpired = false\n\t\tif err == nil && r.CacheExpiryMs > 0 {\n\t\t\telapsed := time.Now().Sub(info.ModTime())\n\t\t\tisExpired = elapsed > time.Second*time.Duration(r.CacheExpiryMs)\n\t\t}\n\t\tcontent, err := ioutil.ReadFile(r.Cache)\n\t\tif err == nil && !isExpired {\n\t\t\treturn content\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/Cachable returns true if resource is cachable\nfunc (r *Resource) Cachable() bool {\n\treturn r.Cache != \"\"\n}\n\nfunc computeResourceModificationTag(resource *Resource) (int64, error) {\n\tservice, err := storage.NewServiceForURL(resource.URL, resource.Credentials)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tobject, err := service.StorageObject(resource.URL)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tvar fileInfo = object.FileInfo()\n\n\tif object.IsContent() {\n\t\treturn fileInfo.Size() + fileInfo.ModTime().UnixNano(), nil\n\t}\n\tvar result int64 = 0\n\tobjects, err := service.List(resource.URL)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tfor _, object := range objects {\n\t\tobjectResource := NewResource(object.URL())\n\t\tif objectResource.ParsedURL.Path == resource.ParsedURL.Path {\n\t\t\tcontinue\n\t\t}\n\t\tmodificationTag, err := computeResourceModificationTag(NewResource(object.URL(), resource.Credentials))\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tresult += modificationTag\n\n\t}\n\treturn result, nil\n}\n\nfunc (r *Resource) HasChanged() (changed bool, err error) {\n\tif r.modificationTag == 0 {\n\t\tr.modificationTag, err = computeResourceModificationTag(r)\n\t\treturn false, err\n\t}\n\tvar recentModificationTag int64\n\trecentModificationTag, err = computeResourceModificationTag(r)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif recentModificationTag != r.modificationTag {\n\t\tchanged = true\n\t\tr.modificationTag = recentModificationTag\n\t}\n\treturn changed, err\n}\n\nfunc normalizeURL(URL string) string {\n\tif strings.Contains(URL, \":\/\/\") {\n\t\tvar protoPosition = strings.Index(URL, \":\/\/\")\n\t\tif protoPosition != -1 {\n\t\t\tvar urlSuffix = string(URL[protoPosition+3:])\n\t\t\turlSuffix = strings.Replace(urlSuffix, \"\/\/\", \"\/\", len(urlSuffix))\n\t\t\tURL = string(URL[:protoPosition+3]) + urlSuffix\n\t\t}\n\t\treturn URL\n\t}\n\tif !strings.HasPrefix(URL, \"\/\") {\n\t\tcurrentDirectory, _ := os.Getwd()\n\n\t\tif strings.Contains(URL, \"..\") {\n\t\t\tfragments := strings.Split(URL, \"\/\")\n\t\t\tvar index = 0\n\t\t\tvar offset = 0\n\t\t\tif fragments[0] == \".\" {\n\t\t\t\toffset = 1\n\t\t\t}\n\n\t\t\tfor index = offset; index < len(fragments); index++ {\n\t\t\t\tvar fragment = fragments[index]\n\t\t\t\tif fragment == \"..\" {\n\t\t\t\t\tcurrentDirectory, _ = path.Split(currentDirectory)\n\t\t\t\t\tif strings.HasSuffix(currentDirectory, \"\/\") {\n\t\t\t\t\t\tcurrentDirectory = string(currentDirectory[:len(currentDirectory)-1])\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn toolbox.FileSchema + path.Join(currentDirectory, strings.Join(fragments[index:], \"\/\"))\n\t\t}\n\n\t\tcurrentDirectory, err := os.Getwd()\n\t\tif err == nil {\n\t\t\tcandidate := path.Join(currentDirectory, URL)\n\t\t\tURL = candidate\n\t\t}\n\t}\n\treturn toolbox.FileSchema + URL\n}\n\nfunc (r *Resource) Init() (err error) {\n\tif r.init {\n\t\treturn nil\n\t}\n\tr.init = true\n\tr.URL = normalizeURL(r.URL)\n\tr.ParsedURL, err = url.Parse(r.URL)\n\treturn err\n}\n\n\/\/NewResource returns a new resource for provided URL, followed by optional credential, cache and cache expiryMs.\nfunc NewResource(Params ...interface{}) *Resource {\n\tif len(Params) == 0 {\n\t\treturn nil\n\t}\n\tvar URL = toolbox.AsString(Params[0])\n\tURL = normalizeURL(URL)\n\n\tvar credential string\n\tif len(Params) > 1 {\n\t\tcredential = toolbox.AsString(Params[1])\n\t}\n\tvar cache string\n\tif len(Params) > 2 {\n\t\tcache = toolbox.AsString(Params[2])\n\t}\n\tvar cacheExpiryMs int\n\tif len(Params) > 3 {\n\t\tcacheExpiryMs = toolbox.AsInt(Params[3])\n\t}\n\tparsedURL, _ := url.Parse(URL)\n\treturn &Resource{\n\t\tinit: true,\n\t\tParsedURL: parsedURL,\n\t\tURL: URL,\n\t\tCredentials: credential,\n\t\tCache: cache,\n\t\tCacheExpiryMs: cacheExpiryMs,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage sidecar\n\nimport (\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"k8s.io\/dns\/pkg\/dnsmasq\"\n)\n\n\/\/ Server that runs the dnsmasq-metrics daemon.\ntype Server interface {\n\tRun(options *Options)\n}\n\ntype server struct {\n\toptions *Options\n\tmetricsClient dnsmasq.MetricsClient\n\tprobes []*dnsProbe\n}\n\n\/\/ NewServer creates a new server instance\nfunc NewServer() Server {\n\treturn &server{}\n}\n\n\/\/ Run the server (does not return)\nfunc (s *server) Run(options *Options) {\n\ts.options = options\n\tglog.Infof(\"Starting server (options %+v)\", *s.options)\n\n\tfor _, probeOption := range options.Probes {\n\t\tprobe := &dnsProbe{DNSProbeOption: probeOption}\n\t\ts.probes = append(s.probes, probe)\n\t\tprobe.Start(options)\n\t}\n\n\ts.runMetrics(options)\n}\n\nfunc (s *server) runMetrics(options *Options) {\n\tInitializeMetrics(options)\n\n\ts.metricsClient = dnsmasq.NewMetricsClient(options.DnsMasqAddr, options.DnsMasqPort)\n\n\tfor {\n\t\tmetrics, err := s.metricsClient.GetMetrics()\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"Error getting metrics from dnsmasq: %v\", err)\n\t\t\terrorsCounter.Add(1)\n\t\t} else {\n\t\t\tglog.V(3).Infof(\"DnsMasq metrics %+v\", metrics)\n\t\t\texportMetrics(metrics)\n\t\t}\n\n\t\ttime.Sleep(time.Duration(options.DnsMasqPollIntervalMs) * time.Millisecond)\n\t}\n}\n\nfunc exportMetrics(metrics *dnsmasq.Metrics) {\n\tfor key := range *metrics {\n\t\t\/\/ Retrieve the previous value of the metric and get the delta\n\t\t\/\/ between the previous and current values. Add the delta to the\n\t\t\/\/ previous to get the proper value. This is needed because the\n\t\t\/\/ Counter API does not allow us to set the counter to a value.\n\t\tpreviousValue := countersCache[key]\n\t\tdelta := float64((*metrics)[key]) - previousValue\n\t\tnewValue := previousValue + delta\n\t\t\/\/ Update cache to new value.\n\t\tcountersCache[key] = newValue\n\t\tcounters[key].Add(newValue)\n\t}\n}\n<commit_msg>Update server.go<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage sidecar\n\nimport (\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"k8s.io\/dns\/pkg\/dnsmasq\"\n)\n\n\/\/ Server that runs the dnsmasq-metrics daemon.\ntype Server interface {\n\tRun(options *Options)\n}\n\ntype server struct {\n\toptions *Options\n\tmetricsClient dnsmasq.MetricsClient\n\tprobes []*dnsProbe\n}\n\n\/\/ NewServer creates a new server instance\nfunc NewServer() Server {\n\treturn &server{}\n}\n\n\/\/ Run the server (does not return)\nfunc (s *server) Run(options *Options) {\n\ts.options = options\n\tglog.Infof(\"Starting server (options %+v)\", *s.options)\n\n\tfor _, probeOption := range options.Probes {\n\t\tprobe := &dnsProbe{DNSProbeOption: probeOption}\n\t\ts.probes = append(s.probes, probe)\n\t\tprobe.Start(options)\n\t}\n\n\ts.runMetrics(options)\n}\n\nfunc (s *server) runMetrics(options *Options) {\n\tInitializeMetrics(options)\n\n\ts.metricsClient = dnsmasq.NewMetricsClient(options.DnsMasqAddr, options.DnsMasqPort)\n\n\tfor {\n\t\tmetrics, err := s.metricsClient.GetMetrics()\n\t\tif err != nil {\n\t\t\tglog.Warningf(\"Error getting metrics from dnsmasq: %v\", err)\n\t\t\terrorsCounter.Add(1)\n\t\t} else {\n\t\t\tglog.V(3).Infof(\"DnsMasq metrics %+v\", metrics)\n\t\t\texportMetrics(metrics)\n\t\t}\n\n\t\ttime.Sleep(time.Duration(options.DnsMasqPollIntervalMs) * time.Millisecond)\n\t}\n}\n\nfunc exportMetrics(metrics *dnsmasq.Metrics) {\n\tfor key := range *metrics {\n\t\t\/\/ Retrieve the previous value of the metric and get the delta\n\t\t\/\/ between the previous and current values. Add the delta to the\n\t\t\/\/ previous to get the proper value. This is needed because the\n\t\t\/\/ Counter API does not allow us to set the counter to a value.\n\t\tpreviousValue := countersCache[key]\n\t\tdelta := float64((*metrics)[key]) - previousValue\n\t\tnewValue := previousValue + delta\n\t\t\/\/ Update cache to new value.\n\t\tcountersCache[key] = newValue\n\t\tcounters[key].Add(delta)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package stackit\n\nimport (\n\t\"github.com\/aws\/aws-sdk-go\/service\/cloudformation\"\n\t\"time\"\n)\n\ntype TailStackEvent struct {\n\tcloudformation.StackEvent\n\tStackitError error\n}\n\nfunc (s *Stackit) PollStackEvents(token string, callback func(event TailStackEvent)) TailStackEvent {\n\tlastSentEventId := \"\"\n\n\tfor {\n\t\ttime.Sleep(3 * time.Second)\n\n\t\tevents := []*cloudformation.StackEvent{}\n\n\t\ts.api.DescribeStackEventsPages(&cloudformation.DescribeStackEventsInput{\n\t\t\tStackName: &s.stackId,\n\t\t}, func(page *cloudformation.DescribeStackEventsOutput, lastPage bool) bool {\n\t\t\tfor _, event := range page.StackEvents {\n\t\t\t\tcrt := \"nil\"\n\t\t\t\tif event.ClientRequestToken != nil {\n\t\t\t\t\tcrt = *event.ClientRequestToken\n\t\t\t\t}\n\n\t\t\t\tif token == \"\" {\n\t\t\t\t\ttoken = crt\n\t\t\t\t}\n\n\t\t\t\tif *event.EventId == lastSentEventId || crt != token {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\n\t\t\t\tevents = append(events, event)\n\t\t\t}\n\t\t\treturn true\n\t\t})\n\n\t\tif len(events) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tlastSentEventId = *events[0].EventId\n\t\tstack, err := s.Describe()\n\t\tif err != nil {\n\t\t\tcallback(TailStackEvent{cloudformation.StackEvent{}, err})\n\t\t}\n\t\tterminal := IsTerminalStatus(*stack.StackStatus)\n\n\t\tfor ev_i := len(events) - 1; ev_i >= 0; ev_i-- {\n\t\t\tevent := events[ev_i]\n\t\t\ttailEvent := TailStackEvent{*event, nil}\n\n\t\t\tdone := terminal && ev_i == 0\n\t\t\tif done {\n\t\t\t\treturn tailEvent\n\t\t\t}\n\n\t\t\tcallback(tailEvent)\n\t\t}\n\t}\n}\n\nfunc IsTerminalStatus(status string) bool {\n\tswitch status {\n\tcase\n\t\t\"CREATE_COMPLETE\",\n\t\t\"DELETE_COMPLETE\",\n\t\t\"CREATE_FAILED\",\n\t\t\"DELETE_FAILED\",\n\t\t\"ROLLBACK_COMPLETE\",\n\t\t\"ROLLBACK_FAILED\",\n\t\t\"UPDATE_COMPLETE\",\n\t\t\"UPDATE_FAILED\",\n\t\t\"UPDATE_ROLLBACK_COMPLETE\",\n\t\t\"UPDATE_ROLLBACK_FAILED\":\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n<commit_msg>handle throttling in poller<commit_after>package stackit\n\nimport (\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/cloudformation\"\n\t\"time\"\n)\n\ntype TailStackEvent struct {\n\tcloudformation.StackEvent\n\tStackitError error\n}\n\nfunc (s *Stackit) PollStackEvents(token string, callback func(event TailStackEvent)) TailStackEvent {\n\tlastSentEventId := \"\"\n\n\tfor {\n\t\ttime.Sleep(3 * time.Second)\n\n\t\tevents := []*cloudformation.StackEvent{}\n\n\t\terr := s.api.DescribeStackEventsPages(&cloudformation.DescribeStackEventsInput{\n\t\t\tStackName: &s.stackId,\n\t\t}, func(page *cloudformation.DescribeStackEventsOutput, lastPage bool) bool {\n\t\t\tfor _, event := range page.StackEvents {\n\t\t\t\tcrt := \"nil\"\n\t\t\t\tif event.ClientRequestToken != nil {\n\t\t\t\t\tcrt = *event.ClientRequestToken\n\t\t\t\t}\n\n\t\t\t\tif token == \"\" {\n\t\t\t\t\ttoken = crt\n\t\t\t\t}\n\n\t\t\t\tif *event.EventId == lastSentEventId || crt != token {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\n\t\t\t\tevents = append(events, event)\n\t\t\t}\n\t\t\treturn true\n\t\t})\n\n\t\tif err != nil {\n\t\t\tif awsErr, ok := err.(awserr.Error); ok {\n\t\t\t\tcode := awsErr.Code()\n\t\t\t\tif code == \"ThrottlingException\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\tevent := TailStackEvent{cloudformation.StackEvent{}, err}\n\t\t\tcallback(event)\n\t\t\treturn event\n\t\t}\n\n\t\tif len(events) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tlastSentEventId = *events[0].EventId\n\t\tstack, err := s.Describe()\n\t\tif err != nil {\n\t\t\tevent := TailStackEvent{cloudformation.StackEvent{}, err}\n\t\t\tcallback(event)\n\t\t\treturn event\n\t\t}\n\t\tterminal := IsTerminalStatus(*stack.StackStatus)\n\n\t\tfor ev_i := len(events) - 1; ev_i >= 0; ev_i-- {\n\t\t\tevent := events[ev_i]\n\t\t\ttailEvent := TailStackEvent{*event, nil}\n\n\t\t\tdone := terminal && ev_i == 0\n\t\t\tif done {\n\t\t\t\treturn tailEvent\n\t\t\t}\n\n\t\t\tcallback(tailEvent)\n\t\t}\n\t}\n}\n\nfunc IsTerminalStatus(status string) bool {\n\tswitch status {\n\tcase\n\t\t\"CREATE_COMPLETE\",\n\t\t\"DELETE_COMPLETE\",\n\t\t\"CREATE_FAILED\",\n\t\t\"DELETE_FAILED\",\n\t\t\"ROLLBACK_COMPLETE\",\n\t\t\"ROLLBACK_FAILED\",\n\t\t\"UPDATE_COMPLETE\",\n\t\t\"UPDATE_FAILED\",\n\t\t\"UPDATE_ROLLBACK_COMPLETE\",\n\t\t\"UPDATE_ROLLBACK_FAILED\":\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cache\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"k8s.io\/kubernetes\/pkg\/util\/clock\"\n\n\t\"github.com\/golang\/groupcache\/lru\"\n)\n\nfunc expectEntry(t *testing.T, c *LRUExpireCache, key lru.Key, value interface{}) {\n\tresult, ok := c.Get(key)\n\tif !ok || result != value {\n\t\tt.Errorf(\"Expected cache[%v]: %v, got %v\", key, value, result)\n\t}\n}\n\nfunc expectNotEntry(t *testing.T, c *LRUExpireCache, key lru.Key) {\n\tif result, ok := c.Get(key); ok {\n\t\tt.Errorf(\"Expected cache[%v] to be empty, got %v\", key, result)\n\t}\n}\n\nfunc TestSimpleGet(t *testing.T) {\n\tc := NewLRUExpireCache(10)\n\tc.Add(\"long-lived\", \"12345\", 10*time.Hour)\n\texpectEntry(t, c, \"long-lived\", \"12345\")\n}\n\nfunc TestExpiredGet(t *testing.T) {\n\tfakeClock := clock.NewFakeClock(time.Now())\n\tc := NewLRUExpireCacheWithClock(10, fakeClock)\n\tc.Add(\"short-lived\", \"12345\", 1*time.Millisecond)\n\t\/\/ ensure the entry expired\n\tfakeClock.Step(2 * time.Millisecond)\n\texpectNotEntry(t, c, \"short-lived\")\n}\n\nfunc TestLRUOverflow(t *testing.T) {\n\tc := NewLRUExpireCache(4)\n\tc.Add(\"elem1\", \"1\", 10*time.Hour)\n\tc.Add(\"elem2\", \"2\", 10*time.Hour)\n\tc.Add(\"elem3\", \"3\", 10*time.Hour)\n\tc.Add(\"elem4\", \"4\", 10*time.Hour)\n\tc.Add(\"elem5\", \"5\", 10*time.Hour)\n\texpectNotEntry(t, c, \"elem1\")\n\texpectEntry(t, c, \"elem2\", \"2\")\n\texpectEntry(t, c, \"elem3\", \"3\")\n\texpectEntry(t, c, \"elem4\", \"4\")\n\texpectEntry(t, c, \"elem5\", \"5\")\n}\n<commit_msg>move utils used in restclient to client-go<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cache\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"k8s.io\/client-go\/pkg\/util\/clock\"\n\n\t\"github.com\/golang\/groupcache\/lru\"\n)\n\nfunc expectEntry(t *testing.T, c *LRUExpireCache, key lru.Key, value interface{}) {\n\tresult, ok := c.Get(key)\n\tif !ok || result != value {\n\t\tt.Errorf(\"Expected cache[%v]: %v, got %v\", key, value, result)\n\t}\n}\n\nfunc expectNotEntry(t *testing.T, c *LRUExpireCache, key lru.Key) {\n\tif result, ok := c.Get(key); ok {\n\t\tt.Errorf(\"Expected cache[%v] to be empty, got %v\", key, result)\n\t}\n}\n\nfunc TestSimpleGet(t *testing.T) {\n\tc := NewLRUExpireCache(10)\n\tc.Add(\"long-lived\", \"12345\", 10*time.Hour)\n\texpectEntry(t, c, \"long-lived\", \"12345\")\n}\n\nfunc TestExpiredGet(t *testing.T) {\n\tfakeClock := clock.NewFakeClock(time.Now())\n\tc := NewLRUExpireCacheWithClock(10, fakeClock)\n\tc.Add(\"short-lived\", \"12345\", 1*time.Millisecond)\n\t\/\/ ensure the entry expired\n\tfakeClock.Step(2 * time.Millisecond)\n\texpectNotEntry(t, c, \"short-lived\")\n}\n\nfunc TestLRUOverflow(t *testing.T) {\n\tc := NewLRUExpireCache(4)\n\tc.Add(\"elem1\", \"1\", 10*time.Hour)\n\tc.Add(\"elem2\", \"2\", 10*time.Hour)\n\tc.Add(\"elem3\", \"3\", 10*time.Hour)\n\tc.Add(\"elem4\", \"4\", 10*time.Hour)\n\tc.Add(\"elem5\", \"5\", 10*time.Hour)\n\texpectNotEntry(t, c, \"elem1\")\n\texpectEntry(t, c, \"elem2\", \"2\")\n\texpectEntry(t, c, \"elem3\", \"3\")\n\texpectEntry(t, c, \"elem4\", \"4\")\n\texpectEntry(t, c, \"elem5\", \"5\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage utils\n\nimport (\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"strings\"\n\n\t\"github.com\/pingcap\/errors\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/credentials\"\n)\n\n\/\/ TLS saves some information about tls\ntype TLS struct {\n\tinner *tls.Config\n\tclient *http.Client\n\turl string\n}\n\n\/\/ ToTLSConfig generates tls's config.\nfunc ToTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) {\n\treturn ToTLSConfigWithVerify(caPath, certPath, keyPath, nil)\n}\n\n\/\/ ToTLSConfigWithVerify constructs a `*tls.Config` from the CA, certification and key\n\/\/ paths, and add verify for CN.\n\/\/\n\/\/ If the CA path is empty, returns nil.\nfunc ToTLSConfigWithVerify(caPath, certPath, keyPath string, verifyCN []string) (*tls.Config, error) {\n\tif len(caPath) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t\/\/ Load the client certificates from disk\n\tvar certificates []tls.Certificate\n\tif len(certPath) != 0 && len(keyPath) != 0 {\n\t\tcert, err := tls.LoadX509KeyPair(certPath, keyPath)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Annotate(err, \"could not load client key pair\")\n\t\t}\n\t\tcertificates = []tls.Certificate{cert}\n\t}\n\n\t\/\/ Create a certificate pool from CA\n\tcertPool := x509.NewCertPool()\n\tca, err := ioutil.ReadFile(caPath)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"could not read ca certificate\")\n\t}\n\n\t\/\/ Append the certificates from the CA\n\tif !certPool.AppendCertsFromPEM(ca) {\n\t\treturn nil, errors.New(\"failed to append ca certs\")\n\t}\n\n\ttlsCfg := &tls.Config{\n\t\tCertificates: certificates,\n\t\tRootCAs: certPool,\n\t\tClientCAs: certPool,\n\t\tNextProtos: []string{\"h2\", \"http\/1.1\"}, \/\/ specify `h2` to let Go use HTTP\/2.\n\t}\n\n\tif len(verifyCN) != 0 {\n\t\tcheckCN := make(map[string]struct{})\n\t\tfor _, cn := range verifyCN {\n\t\t\tcn = strings.TrimSpace(cn)\n\t\t\tcheckCN[cn] = struct{}{}\n\t\t}\n\n\t\ttlsCfg.ClientAuth = tls.RequireAndVerifyClientCert\n\n\t\ttlsCfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {\n\t\t\tcns := make([]string, 0, len(verifiedChains))\n\t\t\tfor _, chains := range verifiedChains {\n\t\t\t\tfor _, chain := range chains {\n\t\t\t\t\tcns = append(cns, chain.Subject.CommonName)\n\t\t\t\t\tif _, match := checkCN[chain.Subject.CommonName]; match {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn errors.Errorf(\"client certificate authentication failed. The Common Name from the client certificate %v was not found in the configuration cluster-verify-cn with value: %s\", cns, verifyCN)\n\t\t}\n\t}\n\n\treturn tlsCfg, nil\n}\n\n\/\/ NewTLS constructs a new HTTP client with TLS configured with the CA,\n\/\/ certificate and key paths.\n\/\/\n\/\/ If the CA path is empty, returns an instance where TLS is disabled.\nfunc NewTLS(caPath, certPath, keyPath, host string, verifyCN []string) (*TLS, error) {\n\tif len(caPath) == 0 {\n\t\treturn &TLS{\n\t\t\tinner: nil,\n\t\t\tclient: &http.Client{},\n\t\t\turl: \"http:\/\/\" + host,\n\t\t}, nil\n\t}\n\tinner, err := ToTLSConfigWithVerify(caPath, certPath, keyPath, verifyCN)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient := ClientWithTLS(inner)\n\n\treturn &TLS{\n\t\tinner: inner,\n\t\tclient: client,\n\t\turl: \"https:\/\/\" + host,\n\t}, nil\n}\n\n\/\/ ClientWithTLS creates a http client wit tls\nfunc ClientWithTLS(tlsCfg *tls.Config) *http.Client {\n\ttransport := http.DefaultTransport.(*http.Transport).Clone()\n\ttransport.TLSClientConfig = tlsCfg\n\treturn &http.Client{Transport: transport}\n}\n\n\/\/ NewTLSFromMockServer constructs a new TLS instance from the certificates of\n\/\/ an *httptest.Server.\nfunc NewTLSFromMockServer(server *httptest.Server) *TLS {\n\treturn &TLS{\n\t\tinner: server.TLS,\n\t\tclient: server.Client(),\n\t\turl: server.URL,\n\t}\n}\n\n\/\/ WithHost creates a new TLS instance with the host replaced.\nfunc (tc *TLS) WithHost(host string) *TLS {\n\tvar url string\n\tif tc.inner != nil {\n\t\turl = \"https:\/\/\" + host\n\t} else {\n\t\turl = \"http:\/\/\" + host\n\t}\n\treturn &TLS{\n\t\tinner: tc.inner,\n\t\tclient: tc.client,\n\t\turl: url,\n\t}\n}\n\n\/\/ ToGRPCDialOption constructs a gRPC dial option.\nfunc (tc *TLS) ToGRPCDialOption() grpc.DialOption {\n\tif tc.inner != nil {\n\t\treturn grpc.WithTransportCredentials(credentials.NewTLS(tc.inner))\n\t}\n\treturn grpc.WithInsecure()\n}\n\n\/\/ WrapListener places a TLS layer on top of the existing listener.\nfunc (tc *TLS) WrapListener(l net.Listener) net.Listener {\n\tif tc.inner == nil {\n\t\treturn l\n\t}\n\treturn tls.NewListener(l, tc.inner)\n}\n\n\/\/ GetJSON obtains JSON result with the HTTP GET method.\nfunc (tc *TLS) GetJSON(path string, v interface{}) error {\n\treturn GetJSON(tc.client, tc.url+path, v)\n}\n<commit_msg>pkg: update tls (#329)<commit_after>\/\/ Copyright 2020 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage utils\n\nimport (\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"strings\"\n\n\t\"github.com\/pingcap\/errors\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/credentials\"\n)\n\n\/\/ TLS saves some information about tls\ntype TLS struct {\n\tinner *tls.Config\n\tclient *http.Client\n\turl string\n}\n\n\/\/ ToTLSConfig generates tls's config.\nfunc ToTLSConfig(caPath, certPath, keyPath string) (*tls.Config, error) {\n\treturn ToTLSConfigWithVerify(caPath, certPath, keyPath, nil)\n}\n\n\/\/ ToTLSConfigWithVerify constructs a `*tls.Config` from the CA, certification and key\n\/\/ paths, and add verify for CN.\n\/\/\n\/\/ If the CA path is empty, returns nil.\nfunc ToTLSConfigWithVerify(caPath, certPath, keyPath string, verifyCN []string) (*tls.Config, error) {\n\tif len(caPath) == 0 {\n\t\treturn nil, nil\n\t}\n\n\t\/\/ Load the client certificates from disk\n\tvar certificates []tls.Certificate\n\tif len(certPath) != 0 && len(keyPath) != 0 {\n\t\tcert, err := tls.LoadX509KeyPair(certPath, keyPath)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Annotate(err, \"could not load client key pair\")\n\t\t}\n\t\tcertificates = []tls.Certificate{cert}\n\t}\n\n\t\/\/ Create a certificate pool from CA\n\tcertPool := x509.NewCertPool()\n\tca, err := ioutil.ReadFile(caPath)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"could not read ca certificate\")\n\t}\n\n\t\/\/ Append the certificates from the CA\n\tif !certPool.AppendCertsFromPEM(ca) {\n\t\treturn nil, errors.New(\"failed to append ca certs\")\n\t}\n\n\ttlsCfg := &tls.Config{\n\t\tCertificates: certificates,\n\t\tRootCAs: certPool,\n\t\tClientCAs: certPool,\n\t\tNextProtos: []string{\"h2\", \"http\/1.1\"}, \/\/ specify `h2` to let Go use HTTP\/2.\n\t}\n\n\tif len(verifyCN) != 0 {\n\t\tcheckCN := make(map[string]struct{})\n\t\tfor _, cn := range verifyCN {\n\t\t\tcn = strings.TrimSpace(cn)\n\t\t\tcheckCN[cn] = struct{}{}\n\t\t}\n\n\t\ttlsCfg.ClientAuth = tls.RequireAndVerifyClientCert\n\n\t\ttlsCfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {\n\t\t\tcns := make([]string, 0, len(verifiedChains))\n\t\t\tfor _, chains := range verifiedChains {\n\t\t\t\tfor _, chain := range chains {\n\t\t\t\t\tcns = append(cns, chain.Subject.CommonName)\n\t\t\t\t\tif _, match := checkCN[chain.Subject.CommonName]; match {\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn errors.Errorf(\"client certificate authentication failed. The Common Name from the client certificate %v was not found in the configuration cluster-verify-cn with value: %s\", cns, verifyCN)\n\t\t}\n\t}\n\n\treturn tlsCfg, nil\n}\n\n\/\/ NewTLS constructs a new HTTP client with TLS configured with the CA,\n\/\/ certificate and key paths.\n\/\/\n\/\/ If the CA path is empty, returns an instance where TLS is disabled.\nfunc NewTLS(caPath, certPath, keyPath, host string, verifyCN []string) (*TLS, error) {\n\tif len(caPath) == 0 {\n\t\treturn &TLS{\n\t\t\tinner: nil,\n\t\t\tclient: &http.Client{},\n\t\t\turl: \"http:\/\/\" + host,\n\t\t}, nil\n\t}\n\tinner, err := ToTLSConfigWithVerify(caPath, certPath, keyPath, verifyCN)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tclient := ClientWithTLS(inner)\n\n\treturn &TLS{\n\t\tinner: inner,\n\t\tclient: client,\n\t\turl: \"https:\/\/\" + host,\n\t}, nil\n}\n\n\/\/ ClientWithTLS creates a http client wit tls\nfunc ClientWithTLS(tlsCfg *tls.Config) *http.Client {\n\ttransport := http.DefaultTransport.(*http.Transport).Clone()\n\ttransport.TLSClientConfig = tlsCfg\n\treturn &http.Client{Transport: transport}\n}\n\n\/\/ NewTLSFromMockServer constructs a new TLS instance from the certificates of\n\/\/ an *httptest.Server.\nfunc NewTLSFromMockServer(server *httptest.Server) *TLS {\n\treturn &TLS{\n\t\tinner: server.TLS,\n\t\tclient: server.Client(),\n\t\turl: server.URL,\n\t}\n}\n\n\/\/ WithHost creates a new TLS instance with the host replaced.\nfunc (tc *TLS) WithHost(host string) *TLS {\n\tvar url string\n\tif tc.inner != nil {\n\t\turl = \"https:\/\/\" + host\n\t} else {\n\t\turl = \"http:\/\/\" + host\n\t}\n\treturn &TLS{\n\t\tinner: tc.inner,\n\t\tclient: tc.client,\n\t\turl: url,\n\t}\n}\n\n\/\/ TLSConfig returns tls config\nfunc (tc *TLS) TLSConfig() *tls.Config {\n\treturn tc.inner\n}\n\n\/\/ ToGRPCDialOption constructs a gRPC dial option.\nfunc (tc *TLS) ToGRPCDialOption() grpc.DialOption {\n\tif tc.inner != nil {\n\t\treturn grpc.WithTransportCredentials(credentials.NewTLS(tc.inner))\n\t}\n\treturn grpc.WithInsecure()\n}\n\n\/\/ ToGRPCServerOption constructs a gRPC server option.\nfunc (tc *TLS) ToGRPCServerOption() grpc.ServerOption {\n\tif tc.inner != nil {\n\t\treturn grpc.Creds(credentials.NewTLS(tc.inner))\n\t}\n\n\treturn grpc.Creds(nil)\n}\n\n\/\/ WrapListener places a TLS layer on top of the existing listener.\nfunc (tc *TLS) WrapListener(l net.Listener) net.Listener {\n\tif tc.inner == nil {\n\t\treturn l\n\t}\n\treturn tls.NewListener(l, tc.inner)\n}\n\n\/\/ GetJSON obtains JSON result with the HTTP GET method.\nfunc (tc *TLS) GetJSON(path string, v interface{}) error {\n\treturn GetJSON(tc.client, tc.url+path, v)\n}\n<|endoftext|>"} {"text":"<commit_before>package plugin\n\nimport (\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n\n\t\"github.com\/labstack\/echo\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestHeader(t *testing.T) {\n\te := echo.New()\n\treq := httptest.NewRequest(echo.GET, \"\/\", nil)\n\trec := httptest.NewRecorder()\n\tc := e.NewContext(req, rec)\n\tok := func(c echo.Context) error {\n\t\treturn c.String(http.StatusOK, \"OK\")\n\t}\n\th := &Header{\n\t\tSet: map[string]string{\"Name\": \"Jon\"},\n\t\tAdd: map[string]string{\"Name\": \"Joe\"},\n\t\tDel: []string{\"Delete\"},\n\t}\n\trec.Header().Set(\"Delete\", \"me\")\n\n\th.Init()\n\th.Process(ok)(c)\n\n\tassert.Equal(t, \"Jon\", rec.Header().Get(\"Name\")) \/\/ Set\n\tassert.EqualValues(t, []string{\"Jon\", \"Joe\"}, rec.Header()[\"Name\"]) \/\/ Add\n\tassert.Equal(t, \"\", rec.Header().Get(\"Delete\")) \/\/ Del\n}\n<commit_msg>Fixed build<commit_after>package plugin\n\nimport (\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"testing\"\n\n\t\"github.com\/labstack\/echo\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestHeader(t *testing.T) {\n\te := echo.New()\n\treq := httptest.NewRequest(echo.GET, \"\/\", nil)\n\trec := httptest.NewRecorder()\n\tc := e.NewContext(req, rec)\n\tok := func(c echo.Context) error {\n\t\treturn c.String(http.StatusOK, \"OK\")\n\t}\n\th := new(Header)\n\th.Set = map[string]string{\"Name\": \"Jon\"}\n\th.Add = map[string]string{\"Name\": \"Joe\"}\n\th.Del = []string{\"Delete\"}\n\trec.Header().Set(\"Delete\", \"me\")\n\n\th.Initialize()\n\th.Process(ok)(c)\n\n\tassert.Equal(t, \"Jon\", rec.Header().Get(\"Name\")) \/\/ Set\n\tassert.EqualValues(t, []string{\"Jon\", \"Joe\"}, rec.Header()[\"Name\"]) \/\/ Add\n\tassert.Equal(t, \"\", rec.Header().Get(\"Delete\")) \/\/ Del\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package ready is used to signal readiness of the CoreDNS process. Once all\n\/\/ plugins have called in the plugin will signal readiness by returning a 200\n\/\/ OK on the HTTP handler (on port 8181). If not ready yet, the handler will\n\/\/ return a 503.\npackage ready\n\nimport (\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"sync\"\n\n\tclog \"github.com\/coredns\/coredns\/plugin\/pkg\/log\"\n\t\"github.com\/coredns\/coredns\/plugin\/pkg\/reuseport\"\n\t\"github.com\/coredns\/coredns\/plugin\/pkg\/uniq\"\n)\n\nvar (\n\tlog = clog.NewWithPlugin(\"ready\")\n\tplugins = &list{}\n\tuniqAddr = uniq.New()\n)\n\ntype ready struct {\n\tAddr string\n\n\tsync.RWMutex\n\tln net.Listener\n\tdone bool\n\tmux *http.ServeMux\n}\n\nfunc (rd *ready) onStartup() error {\n\tln, err := reuseport.Listen(\"tcp\", rd.Addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trd.Lock()\n\trd.ln = ln\n\trd.mux = http.NewServeMux()\n\trd.done = true\n\trd.Unlock()\n\n\trd.mux.HandleFunc(\"\/ready\", func(w http.ResponseWriter, _ *http.Request) {\n\t\tok, todo := plugins.Ready()\n\t\tif ok {\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\tio.WriteString(w, http.StatusText(http.StatusOK))\n\t\t\treturn\n\t\t}\n\t\tlog.Infof(\"Still waiting on: %q\", todo)\n\t\tw.WriteHeader(http.StatusServiceUnavailable)\n\t\tio.WriteString(w, todo)\n\t})\n\n\tgo func() { http.Serve(rd.ln, rd.mux) }()\n\n\treturn nil\n}\n\nfunc (rd *ready) onFinalShutdown() error {\n\trd.Lock()\n\tdefer rd.Unlock()\n\tif !rd.done {\n\t\treturn nil\n\t}\n\n\tuniqAddr.Unset(rd.Addr)\n\n\trd.ln.Close()\n\trd.done = false\n\treturn nil\n}\n<commit_msg>dont return 200 during shutdown (#4167)<commit_after>\/\/ Package ready is used to signal readiness of the CoreDNS process. Once all\n\/\/ plugins have called in the plugin will signal readiness by returning a 200\n\/\/ OK on the HTTP handler (on port 8181). If not ready yet, the handler will\n\/\/ return a 503.\npackage ready\n\nimport (\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"sync\"\n\n\tclog \"github.com\/coredns\/coredns\/plugin\/pkg\/log\"\n\t\"github.com\/coredns\/coredns\/plugin\/pkg\/reuseport\"\n\t\"github.com\/coredns\/coredns\/plugin\/pkg\/uniq\"\n)\n\nvar (\n\tlog = clog.NewWithPlugin(\"ready\")\n\tplugins = &list{}\n\tuniqAddr = uniq.New()\n)\n\ntype ready struct {\n\tAddr string\n\n\tsync.RWMutex\n\tln net.Listener\n\tdone bool\n\tmux *http.ServeMux\n}\n\nfunc (rd *ready) onStartup() error {\n\tln, err := reuseport.Listen(\"tcp\", rd.Addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trd.Lock()\n\trd.ln = ln\n\trd.mux = http.NewServeMux()\n\trd.done = true\n\trd.Unlock()\n\n\trd.mux.HandleFunc(\"\/ready\", func(w http.ResponseWriter, _ *http.Request) {\n\t\trd.Lock()\n\t\tdefer rd.Unlock()\n\t\tif !rd.done {\n\t\t\tw.WriteHeader(http.StatusServiceUnavailable)\n\t\t\tio.WriteString(w, \"Shutting down\")\n\t\t\treturn\n\t\t}\n\t\tok, todo := plugins.Ready()\n\t\tif ok {\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\tio.WriteString(w, http.StatusText(http.StatusOK))\n\t\t\treturn\n\t\t}\n\t\tlog.Infof(\"Still waiting on: %q\", todo)\n\t\tw.WriteHeader(http.StatusServiceUnavailable)\n\t\tio.WriteString(w, todo)\n\t})\n\n\tgo func() { http.Serve(rd.ln, rd.mux) }()\n\n\treturn nil\n}\n\nfunc (rd *ready) onFinalShutdown() error {\n\trd.Lock()\n\tdefer rd.Unlock()\n\tif !rd.done {\n\t\treturn nil\n\t}\n\n\tuniqAddr.Unset(rd.Addr)\n\n\trd.ln.Close()\n\trd.done = false\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package semaphore\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc testNewCloseableInvalidCount(t *testing.T) {\n\tfor _, c := range []int{0, -1} {\n\t\tt.Run(strconv.Itoa(c), func(t *testing.T) {\n\t\t\tassert.Panics(t, func() {\n\t\t\t\tNewCloseable(c)\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc testNewCloseableValidCount(t *testing.T) {\n\tfor _, c := range []int{1, 2, 5} {\n\t\tt.Run(strconv.Itoa(c), func(t *testing.T) {\n\t\t\ts := NewCloseable(c)\n\t\t\tassert.NotNil(t, s)\n\t\t})\n\t}\n}\n\nfunc TestNewCloseable(t *testing.T) {\n\tt.Run(\"InvalidCount\", testNewCloseableInvalidCount)\n\tt.Run(\"ValidCount\", testNewCloseableValidCount)\n}\n\nfunc testCloseableTryAcquire(t *testing.T, cs Closeable, totalCount int) {\n\tassert := assert.New(t)\n\tfor i := 0; i < totalCount; i++ {\n\t\tassert.True(cs.TryAcquire())\n\t}\n\n\tassert.False(cs.TryAcquire())\n\tassert.NoError(cs.Release())\n\tassert.True(cs.TryAcquire())\n\tassert.False(cs.TryAcquire())\n\n\tassert.NoError(cs.Release())\n\tassert.NoError(cs.Close())\n\tassert.False(cs.TryAcquire())\n\tassert.Equal(ErrClosed, cs.Close())\n\tassert.Equal(ErrClosed, cs.Release())\n}\n\nfunc testCloseableAcquireSuccess(t *testing.T, cs Closeable, totalCount int) {\n\tvar (\n\t\tassert = assert.New(t)\n\t\trequire = require.New(t)\n\t)\n\n\t\/\/ acquire all the things!\n\tfor i := 0; i < totalCount; i++ {\n\t\tdone := make(chan struct{})\n\t\tgo func() {\n\t\t\tdefer close(done)\n\t\t\tcs.Acquire()\n\t\t}()\n\n\t\tselect {\n\t\tcase <-done:\n\t\t\t\/\/ passing\n\t\tcase <-time.After(time.Second):\n\t\t\tassert.FailNow(\"Acquire blocked unexpectedly\")\n\t\t}\n\t}\n\n\t\/\/ post condition: no point continuing if this fails\n\trequire.False(cs.TryAcquire())\n\n\tvar (\n\t\tready = make(chan struct{})\n\t\tacquired = make(chan struct{})\n\t)\n\n\tgo func() {\n\t\tdefer close(acquired)\n\t\tclose(ready)\n\t\tcs.Acquire() \/\/ this should now block\n\t}()\n\n\tselect {\n\tcase <-ready:\n\t\t\/\/ passing\n\t\trequire.False(cs.TryAcquire())\n\t\tcs.Release()\n\tcase <-time.After(time.Second):\n\t\trequire.FailNow(\"Unable to spawn acquire goroutine\")\n\t}\n\n\tselect {\n\tcase <-acquired:\n\t\trequire.False(cs.TryAcquire())\n\tcase <-time.After(time.Second):\n\t\trequire.FailNow(\"Acquire blocked unexpectedly\")\n\t}\n\n\tassert.NoError(cs.Release())\n\tassert.True(cs.TryAcquire())\n\tassert.NoError(cs.Release())\n}\n\nfunc testCloseableAcquireClose(t *testing.T, cs Closeable, totalCount int) {\n\tvar (\n\t\tassert = assert.New(t)\n\t\trequire = require.New(t)\n\n\t\tacquiredAll = make(chan struct{})\n\t\tresults = make(chan error, totalCount)\n\t\tcloseWait = make(chan struct{})\n\t)\n\n\tgo func() {\n\t\tdefer close(acquiredAll)\n\t\tfor i := 0; i < totalCount; i++ {\n\t\t\tassert.NoError(cs.Acquire())\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-acquiredAll:\n\t\t\/\/ passing\n\tcase <-time.After(5 * time.Second):\n\t\tassert.FailNow(\"Unable to acquire all resources\")\n\t}\n\n\t\/\/ block multiple routines waiting to acquire the semaphore\n\tfor i := 0; i < totalCount; i++ {\n\t\tready := make(chan struct{})\n\t\tgo func() {\n\t\t\tclose(ready)\n\t\t\tresults <- cs.Acquire()\n\t\t}()\n\n\t\tselect {\n\t\tcase <-ready:\n\t\t\t\/\/ passing\n\t\tcase <-time.After(time.Second):\n\t\t\tassert.FailNow(\"Failed to spawn Acquire goroutine\")\n\t\t}\n\t}\n\n\tgo func() {\n\t\tdefer close(closeWait)\n\t\t<-cs.Closed()\n\t}()\n\n\t\/\/ post condition: no point continuing if this fails\n\trequire.False(cs.TryAcquire())\n\n\tassert.NoError(cs.Close())\n\tfor i := 0; i < totalCount; i++ {\n\t\tselect {\n\t\tcase err := <-results:\n\t\t\tassert.Equal(ErrClosed, err)\n\t\tcase <-time.After(5 * time.Second):\n\t\t\tassert.FailNow(\"Acquire blocked unexpectedly\")\n\t\t}\n\t}\n\n\tselect {\n\tcase <-closeWait:\n\t\tassert.False(cs.TryAcquire())\n\t\tassert.Equal(ErrClosed, cs.Close())\n\t\tassert.Equal(ErrClosed, cs.Acquire())\n\t\tassert.Equal(ErrClosed, cs.Release())\n\n\tcase <-time.After(5 * time.Second):\n\t\tassert.FailNow(\"Closed channel did not get signaled\")\n\t}\n}\n\nfunc testCloseableAcquireWait(t *testing.T, cs Closeable, totalCount int) {\n\tvar (\n\t\tassert = assert.New(t)\n\t\trequire = require.New(t)\n\t\ttimer = make(chan time.Time)\n\t)\n\n\t\/\/ acquire all the things!\n\tfor i := 0; i < totalCount; i++ {\n\t\tresult := make(chan error)\n\t\tgo func() {\n\t\t\tresult <- cs.AcquireWait(timer)\n\t\t}()\n\n\t\tselect {\n\t\tcase err := <-result:\n\t\t\tassert.NoError(err)\n\t\tcase <-time.After(time.Second):\n\t\t\tassert.FailNow(\"Acquire blocked unexpectedly\")\n\t\t}\n\t}\n\n\t\/\/ post condition: no point continuing if this fails\n\trequire.False(cs.TryAcquire())\n\n\tvar (\n\t\tready = make(chan struct{})\n\t\tresult = make(chan error)\n\t)\n\n\tgo func() {\n\t\tclose(ready)\n\t\tresult <- cs.AcquireWait(timer)\n\t}()\n\n\tselect {\n\tcase <-ready:\n\t\ttimer <- time.Time{}\n\tcase <-time.After(time.Second):\n\t\trequire.FailNow(\"Unable to spawn acquire goroutine\")\n\t}\n\n\tselect {\n\tcase err := <-result:\n\t\tassert.Equal(ErrTimeout, err)\n\tcase <-time.After(time.Second):\n\t\trequire.FailNow(\"AcquireWait blocked unexpectedly\")\n\t}\n}\n\nfunc testCloseableAcquireCtx(t *testing.T, cs Closeable, totalCount int) {\n\tvar (\n\t\tassert = assert.New(t)\n\t\trequire = require.New(t)\n\t\tctx, cancel = context.WithCancel(context.Background())\n\t)\n\n\tdefer cancel()\n\n\t\/\/ acquire all the things!\n\tfor i := 0; i < totalCount; i++ {\n\t\tresult := make(chan error)\n\t\tgo func() {\n\t\t\tresult <- cs.AcquireCtx(ctx)\n\t\t}()\n\n\t\tselect {\n\t\tcase err := <-result:\n\t\t\tassert.NoError(err)\n\t\tcase <-time.After(time.Second):\n\t\t\tassert.FailNow(\"Acquire blocked unexpectedly\")\n\t\t}\n\t}\n\n\t\/\/ post condition: no point continuing if this fails\n\trequire.False(cs.TryAcquire())\n\n\tvar (\n\t\tready = make(chan struct{})\n\t\tresult = make(chan error)\n\t)\n\n\tgo func() {\n\t\tclose(ready)\n\t\tresult <- cs.AcquireCtx(ctx)\n\t}()\n\n\tselect {\n\tcase <-ready:\n\t\tcancel()\n\tcase <-time.After(time.Second):\n\t\trequire.FailNow(\"Unable to spawn acquire goroutine\")\n\t}\n\n\tselect {\n\tcase err := <-result:\n\t\tassert.Equal(ctx.Err(), err)\n\tcase <-time.After(time.Second):\n\t\trequire.FailNow(\"AcquireWait blocked unexpectedly\")\n\t}\n}\n\nfunc TestCloseable(t *testing.T) {\n\tfor _, c := range []int{1, 2, 5} {\n\t\tt.Run(fmt.Sprintf(\"count=%d\", c), func(t *testing.T) {\n\t\t\tt.Run(\"TryAcquire\", func(t *testing.T) {\n\t\t\t\ttestCloseableTryAcquire(t, NewCloseable(c), c)\n\t\t\t})\n\n\t\t\tt.Run(\"Acquire\", func(t *testing.T) {\n\t\t\t\tt.Run(\"Success\", func(t *testing.T) {\n\t\t\t\t\ttestCloseableAcquireSuccess(t, NewCloseable(c), c)\n\t\t\t\t})\n\n\t\t\t\tt.Run(\"Close\", func(t *testing.T) {\n\t\t\t\t\ttestCloseableAcquireClose(t, NewCloseable(c), c)\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tt.Run(\"AcquireWait\", func(t *testing.T) {\n\t\t\t\ttestCloseableAcquireWait(t, NewCloseable(c), c)\n\t\t\t})\n\n\t\t\tt.Run(\"AcquireCtx\", func(t *testing.T) {\n\t\t\t\ttestCloseableAcquireCtx(t, NewCloseable(c), c)\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc TestCloseableMutex(t *testing.T) {\n\tt.Run(\"TryAcquire\", func(t *testing.T) {\n\t\ttestCloseableTryAcquire(t, CloseableMutex(), 1)\n\t})\n\n\tt.Run(\"Acquire\", func(t *testing.T) {\n\t\tt.Run(\"Success\", func(t *testing.T) {\n\t\t\ttestCloseableAcquireSuccess(t, CloseableMutex(), 1)\n\t\t})\n\n\t\tt.Run(\"Close\", func(t *testing.T) {\n\t\t\ttestCloseableAcquireClose(t, CloseableMutex(), 1)\n\t\t})\n\t})\n\n\tt.Run(\"AcquireWait\", func(t *testing.T) {\n\t\ttestCloseableAcquireWait(t, CloseableMutex(), 1)\n\t})\n\n\tt.Run(\"AcquireCtx\", func(t *testing.T) {\n\t\ttestCloseableAcquireCtx(t, CloseableMutex(), 1)\n\t})\n}\n<commit_msg>Full tests for Closeable<commit_after>package semaphore\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc testNewCloseableInvalidCount(t *testing.T) {\n\tfor _, c := range []int{0, -1} {\n\t\tt.Run(strconv.Itoa(c), func(t *testing.T) {\n\t\t\tassert.Panics(t, func() {\n\t\t\t\tNewCloseable(c)\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc testNewCloseableValidCount(t *testing.T) {\n\tfor _, c := range []int{1, 2, 5} {\n\t\tt.Run(strconv.Itoa(c), func(t *testing.T) {\n\t\t\ts := NewCloseable(c)\n\t\t\tassert.NotNil(t, s)\n\t\t})\n\t}\n}\n\nfunc TestNewCloseable(t *testing.T) {\n\tt.Run(\"InvalidCount\", testNewCloseableInvalidCount)\n\tt.Run(\"ValidCount\", testNewCloseableValidCount)\n}\n\nfunc testCloseableTryAcquire(t *testing.T, cs Closeable, totalCount int) {\n\tassert := assert.New(t)\n\tfor i := 0; i < totalCount; i++ {\n\t\tassert.True(cs.TryAcquire())\n\t}\n\n\tassert.False(cs.TryAcquire())\n\tassert.NoError(cs.Release())\n\tassert.True(cs.TryAcquire())\n\tassert.False(cs.TryAcquire())\n\n\tassert.NoError(cs.Release())\n\tassert.NoError(cs.Close())\n\tassert.False(cs.TryAcquire())\n\tassert.Equal(ErrClosed, cs.Close())\n\tassert.Equal(ErrClosed, cs.Release())\n}\n\nfunc testCloseableAcquireSuccess(t *testing.T, cs Closeable, totalCount int) {\n\tvar (\n\t\tassert = assert.New(t)\n\t\trequire = require.New(t)\n\t)\n\n\t\/\/ acquire all the things!\n\tfor i := 0; i < totalCount; i++ {\n\t\tdone := make(chan struct{})\n\t\tgo func() {\n\t\t\tdefer close(done)\n\t\t\tcs.Acquire()\n\t\t}()\n\n\t\tselect {\n\t\tcase <-done:\n\t\t\t\/\/ passing\n\t\tcase <-time.After(time.Second):\n\t\t\tassert.FailNow(\"Acquire blocked unexpectedly\")\n\t\t}\n\t}\n\n\t\/\/ post condition: no point continuing if this fails\n\trequire.False(cs.TryAcquire())\n\n\tvar (\n\t\tready = make(chan struct{})\n\t\tacquired = make(chan struct{})\n\t)\n\n\tgo func() {\n\t\tdefer close(acquired)\n\t\tclose(ready)\n\t\tcs.Acquire() \/\/ this should now block\n\t}()\n\n\tselect {\n\tcase <-ready:\n\t\t\/\/ passing\n\t\trequire.False(cs.TryAcquire())\n\t\tcs.Release()\n\tcase <-time.After(time.Second):\n\t\trequire.FailNow(\"Unable to spawn acquire goroutine\")\n\t}\n\n\tselect {\n\tcase <-acquired:\n\t\trequire.False(cs.TryAcquire())\n\tcase <-time.After(time.Second):\n\t\trequire.FailNow(\"Acquire blocked unexpectedly\")\n\t}\n\n\tassert.NoError(cs.Release())\n\tassert.True(cs.TryAcquire())\n\tassert.NoError(cs.Release())\n}\n\nfunc testCloseableAcquireClose(t *testing.T, cs Closeable, totalCount int) {\n\tvar (\n\t\tassert = assert.New(t)\n\t\trequire = require.New(t)\n\n\t\tacquiredAll = make(chan struct{})\n\t\tresults = make(chan error, totalCount)\n\t\tcloseWait = make(chan struct{})\n\t)\n\n\tdefer cs.Close()\n\n\tgo func() {\n\t\tdefer close(acquiredAll)\n\t\tfor i := 0; i < totalCount; i++ {\n\t\t\tassert.NoError(cs.Acquire())\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-acquiredAll:\n\t\t\/\/ passing\n\tcase <-time.After(5 * time.Second):\n\t\tassert.FailNow(\"Unable to acquire all resources\")\n\t}\n\n\t\/\/ block multiple routines waiting to acquire the semaphore\n\tfor i := 0; i < totalCount; i++ {\n\t\tready := make(chan struct{})\n\t\tgo func() {\n\t\t\tclose(ready)\n\t\t\tresults <- cs.Acquire()\n\t\t}()\n\n\t\tselect {\n\t\tcase <-ready:\n\t\t\t\/\/ passing\n\t\tcase <-time.After(time.Second):\n\t\t\tassert.FailNow(\"Failed to spawn Acquire goroutine\")\n\t\t}\n\t}\n\n\tgo func() {\n\t\tdefer close(closeWait)\n\t\t<-cs.Closed()\n\t}()\n\n\t\/\/ post condition: no point continuing if this fails\n\trequire.False(cs.TryAcquire())\n\n\tassert.NoError(cs.Close())\n\tfor i := 0; i < totalCount; i++ {\n\t\tselect {\n\t\tcase err := <-results:\n\t\t\tassert.Equal(ErrClosed, err)\n\t\tcase <-time.After(5 * time.Second):\n\t\t\tassert.FailNow(\"Acquire blocked unexpectedly\")\n\t\t}\n\t}\n\n\tselect {\n\tcase <-closeWait:\n\t\tassert.False(cs.TryAcquire())\n\t\tassert.Equal(ErrClosed, cs.Close())\n\t\tassert.Equal(ErrClosed, cs.Acquire())\n\t\tassert.Equal(ErrClosed, cs.Release())\n\n\tcase <-time.After(5 * time.Second):\n\t\tassert.FailNow(\"Closed channel did not get signaled\")\n\t}\n}\n\nfunc testCloseableAcquireWaitSuccess(t *testing.T, cs Closeable, totalCount int) {\n\tvar (\n\t\tassert = assert.New(t)\n\t\trequire = require.New(t)\n\t\ttimer = make(chan time.Time)\n\t)\n\n\t\/\/ acquire all the things!\n\tfor i := 0; i < totalCount; i++ {\n\t\tresult := make(chan error)\n\t\tgo func() {\n\t\t\tresult <- cs.AcquireWait(timer)\n\t\t}()\n\n\t\tselect {\n\t\tcase err := <-result:\n\t\t\tassert.NoError(err)\n\t\tcase <-time.After(time.Second):\n\t\t\tassert.FailNow(\"Acquire blocked unexpectedly\")\n\t\t}\n\t}\n\n\tdefer cs.Close()\n\n\t\/\/ post condition: no point continuing if this fails\n\trequire.False(cs.TryAcquire())\n\n\tvar (\n\t\tready = make(chan struct{})\n\t\tresult = make(chan error)\n\t)\n\n\tgo func() {\n\t\tclose(ready)\n\t\tresult <- cs.AcquireWait(timer)\n\t}()\n\n\tselect {\n\tcase <-ready:\n\t\ttimer <- time.Time{}\n\tcase <-time.After(time.Second):\n\t\trequire.FailNow(\"Unable to spawn acquire goroutine\")\n\t}\n\n\tselect {\n\tcase err := <-result:\n\t\tassert.Equal(ErrTimeout, err)\n\tcase <-time.After(time.Second):\n\t\trequire.FailNow(\"AcquireWait blocked unexpectedly\")\n\t}\n}\n\nfunc testCloseableAcquireWaitClose(t *testing.T, cs Closeable, totalCount int) {\n\tvar (\n\t\tassert = assert.New(t)\n\t\trequire = require.New(t)\n\t\ttimer = make(chan time.Time)\n\n\t\tacquiredAll = make(chan struct{})\n\t\tresults = make(chan error, totalCount)\n\t\tcloseWait = make(chan struct{})\n\t)\n\n\tdefer cs.Close()\n\n\tgo func() {\n\t\tdefer close(acquiredAll)\n\t\tfor i := 0; i < totalCount; i++ {\n\t\t\tassert.NoError(cs.Acquire())\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-acquiredAll:\n\t\t\/\/ passing\n\tcase <-time.After(5 * time.Second):\n\t\tassert.FailNow(\"Unable to acquire all resources\")\n\t}\n\n\t\/\/ acquire all the things!\n\tfor i := 0; i < totalCount; i++ {\n\t\tready := make(chan struct{})\n\t\tgo func() {\n\t\t\tclose(ready)\n\t\t\tresults <- cs.AcquireWait(timer)\n\t\t}()\n\n\t\tselect {\n\t\tcase <-ready:\n\t\t\t\/\/ passing\n\t\tcase <-time.After(5 * time.Second):\n\t\t\tassert.FailNow(\"Failed to spawn AcquireWait goroutine\")\n\t\t}\n\t}\n\n\t\/\/ post condition: no point continuing if this fails\n\trequire.False(cs.TryAcquire())\n\n\tgo func() {\n\t\tdefer close(closeWait)\n\t\t<-cs.Closed()\n\t}()\n\n\tassert.NoError(cs.Close())\n\tfor i := 0; i < totalCount; i++ {\n\t\tselect {\n\t\tcase err := <-results:\n\t\t\tassert.Equal(ErrClosed, err)\n\t\tcase <-time.After(5 * time.Second):\n\t\t\tassert.FailNow(\"AcquireWait blocked unexpectedly\")\n\t\t}\n\t}\n\n\tselect {\n\tcase <-closeWait:\n\t\tassert.False(cs.TryAcquire())\n\t\tassert.Equal(ErrClosed, cs.Close())\n\t\tassert.Equal(ErrClosed, cs.Acquire())\n\t\tassert.Equal(ErrClosed, cs.Release())\n\n\tcase <-time.After(5 * time.Second):\n\t\tassert.FailNow(\"Closed channel did not get signaled\")\n\t}\n}\n\nfunc testCloseableAcquireCtxSuccess(t *testing.T, cs Closeable, totalCount int) {\n\tvar (\n\t\tassert = assert.New(t)\n\t\trequire = require.New(t)\n\t\tctx, cancel = context.WithCancel(context.Background())\n\t)\n\n\tdefer cancel()\n\n\t\/\/ acquire all the things!\n\tfor i := 0; i < totalCount; i++ {\n\t\tresult := make(chan error)\n\t\tgo func() {\n\t\t\tresult <- cs.AcquireCtx(ctx)\n\t\t}()\n\n\t\tselect {\n\t\tcase err := <-result:\n\t\t\tassert.NoError(err)\n\t\tcase <-time.After(time.Second):\n\t\t\tassert.FailNow(\"Acquire blocked unexpectedly\")\n\t\t}\n\t}\n\n\t\/\/ post condition: no point continuing if this fails\n\trequire.False(cs.TryAcquire())\n\n\tvar (\n\t\tready = make(chan struct{})\n\t\tresult = make(chan error)\n\t)\n\n\tgo func() {\n\t\tclose(ready)\n\t\tresult <- cs.AcquireCtx(ctx)\n\t}()\n\n\tselect {\n\tcase <-ready:\n\t\tcancel()\n\tcase <-time.After(time.Second):\n\t\trequire.FailNow(\"Unable to spawn acquire goroutine\")\n\t}\n\n\tselect {\n\tcase err := <-result:\n\t\tassert.Equal(ctx.Err(), err)\n\tcase <-time.After(time.Second):\n\t\trequire.FailNow(\"AcquireWait blocked unexpectedly\")\n\t}\n}\n\nfunc testCloseableAcquireCtxClose(t *testing.T, cs Closeable, totalCount int) {\n\tvar (\n\t\tassert = assert.New(t)\n\t\trequire = require.New(t)\n\t\tctx, cancel = context.WithCancel(context.Background())\n\n\t\tacquiredAll = make(chan struct{})\n\t\tresults = make(chan error, totalCount)\n\t\tcloseWait = make(chan struct{})\n\t)\n\n\tdefer cancel()\n\n\tgo func() {\n\t\tdefer close(acquiredAll)\n\t\tfor i := 0; i < totalCount; i++ {\n\t\t\tassert.NoError(cs.Acquire())\n\t\t}\n\t}()\n\n\tselect {\n\tcase <-acquiredAll:\n\t\t\/\/ passing\n\tcase <-time.After(5 * time.Second):\n\t\tassert.FailNow(\"Unable to acquire all resources\")\n\t}\n\n\t\/\/ acquire all the things!\n\tfor i := 0; i < totalCount; i++ {\n\t\tready := make(chan struct{})\n\t\tgo func() {\n\t\t\tclose(ready)\n\t\t\tresults <- cs.AcquireCtx(ctx)\n\t\t}()\n\n\t\tselect {\n\t\tcase <-ready:\n\t\t\t\/\/ passing\n\t\tcase <-time.After(5 * time.Second):\n\t\t\tassert.FailNow(\"Could not spawn AcquireCtx goroutine\")\n\t\t}\n\t}\n\n\t\/\/ post condition: no point continuing if this fails\n\trequire.False(cs.TryAcquire())\n\n\tgo func() {\n\t\tdefer close(closeWait)\n\t\t<-cs.Closed()\n\t}()\n\n\tassert.NoError(cs.Close())\n\tfor i := 0; i < totalCount; i++ {\n\t\tselect {\n\t\tcase err := <-results:\n\t\t\tassert.Equal(ErrClosed, err)\n\t\tcase <-time.After(5 * time.Second):\n\t\t\tassert.FailNow(\"AcquireCtx blocked unexpectedly\")\n\t\t}\n\t}\n\n\tselect {\n\tcase <-closeWait:\n\t\tassert.False(cs.TryAcquire())\n\t\tassert.Equal(ErrClosed, cs.Close())\n\t\tassert.Equal(ErrClosed, cs.Acquire())\n\t\tassert.Equal(ErrClosed, cs.Release())\n\n\tcase <-time.After(5 * time.Second):\n\t\tassert.FailNow(\"Closed channel did not get signaled\")\n\t}\n}\n\nfunc TestCloseable(t *testing.T) {\n\tfor _, c := range []int{1, 2, 5} {\n\t\tt.Run(fmt.Sprintf(\"count=%d\", c), func(t *testing.T) {\n\t\t\tt.Run(\"TryAcquire\", func(t *testing.T) {\n\t\t\t\ttestCloseableTryAcquire(t, NewCloseable(c), c)\n\t\t\t})\n\n\t\t\tt.Run(\"Acquire\", func(t *testing.T) {\n\t\t\t\tt.Run(\"Success\", func(t *testing.T) {\n\t\t\t\t\ttestCloseableAcquireSuccess(t, NewCloseable(c), c)\n\t\t\t\t})\n\n\t\t\t\tt.Run(\"Close\", func(t *testing.T) {\n\t\t\t\t\ttestCloseableAcquireClose(t, NewCloseable(c), c)\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tt.Run(\"AcquireWait\", func(t *testing.T) {\n\t\t\t\tt.Run(\"Success\", func(t *testing.T) {\n\t\t\t\t\ttestCloseableAcquireWaitSuccess(t, NewCloseable(c), c)\n\t\t\t\t})\n\n\t\t\t\tt.Run(\"Close\", func(t *testing.T) {\n\t\t\t\t\ttestCloseableAcquireWaitClose(t, NewCloseable(c), c)\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tt.Run(\"AcquireCtx\", func(t *testing.T) {\n\t\t\t\tt.Run(\"Success\", func(t *testing.T) {\n\t\t\t\t\ttestCloseableAcquireCtxSuccess(t, NewCloseable(c), c)\n\t\t\t\t})\n\n\t\t\t\tt.Run(\"Close\", func(t *testing.T) {\n\t\t\t\t\ttestCloseableAcquireCtxClose(t, NewCloseable(c), c)\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc TestCloseableMutex(t *testing.T) {\n\tt.Run(\"TryAcquire\", func(t *testing.T) {\n\t\ttestCloseableTryAcquire(t, CloseableMutex(), 1)\n\t})\n\n\tt.Run(\"Acquire\", func(t *testing.T) {\n\t\tt.Run(\"Success\", func(t *testing.T) {\n\t\t\ttestCloseableAcquireSuccess(t, CloseableMutex(), 1)\n\t\t})\n\n\t\tt.Run(\"Close\", func(t *testing.T) {\n\t\t\ttestCloseableAcquireClose(t, CloseableMutex(), 1)\n\t\t})\n\t})\n\n\tt.Run(\"AcquireWait\", func(t *testing.T) {\n\t\tt.Run(\"Success\", func(t *testing.T) {\n\t\t\ttestCloseableAcquireWaitSuccess(t, CloseableMutex(), 1)\n\t\t})\n\n\t\tt.Run(\"Close\", func(t *testing.T) {\n\t\t\ttestCloseableAcquireWaitClose(t, CloseableMutex(), 1)\n\t\t})\n\t})\n\n\tt.Run(\"AcquireCtx\", func(t *testing.T) {\n\t\tt.Run(\"Success\", func(t *testing.T) {\n\t\t\ttestCloseableAcquireCtxSuccess(t, CloseableMutex(), 1)\n\t\t})\n\n\t\tt.Run(\"Close\", func(t *testing.T) {\n\t\t\ttestCloseableAcquireCtxClose(t, CloseableMutex(), 1)\n\t\t})\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport \"fmt\"\n\ntype NodeId int\ntype EdgeId int\n\ntype Graph struct {\n\tnodes map[NodeId]*Node\n\tedges map[EdgeId]*Edge\n}\n\nfunc NewGraph() *Graph {\n\treturn &Graph{\n\t\tnodes: make(map[NodeId]*Node),\n\t\tedges: make(map[EdgeId]*Edge),\n\t}\n}\n\nfunc (g *Graph) AddNode(node Node) {\n\tg.nodes[node.Id] = &node\n}\n\nfunc (g *Graph) GetNode(nodeId NodeId) Node {\n\treturn *g.nodes[nodeId]\n}\n\nfunc (g *Graph) AddEdge(edge Edge) {\n\tg.edges[edge.Id] = &edge\n}\n\nfunc (g *Graph) String() string {\n\treturn fmt.Sprintf(\"%+v\\n %+v\", g.nodes, g.edges)\n}\n\ntype Node struct {\n\tId NodeId\n}\n\ntype Edge struct {\n\tId EdgeId\n\tHead NodeId\n\tTail NodeId\n}\n<commit_msg>Removed pointers<commit_after>package main\n\nimport \"fmt\"\n\ntype NodeId int\ntype EdgeId int\n\ntype Graph struct {\n\tnodes map[NodeId]Node\n\tedges map[EdgeId]Edge\n}\n\nfunc NewGraph() *Graph {\n\treturn &Graph{\n\t\tnodes: make(map[NodeId]Node),\n\t\tedges: make(map[EdgeId]Edge),\n\t}\n}\n\nfunc (g *Graph) AddNode(node Node) {\n\tg.nodes[node.Id] = node\n}\n\nfunc (g *Graph) GetNode(nodeId NodeId) Node {\n\treturn g.nodes[nodeId]\n}\n\nfunc (g *Graph) AddEdge(edge Edge) {\n\tg.edges[edge.Id] = edge\n}\n\nfunc (g *Graph) String() string {\n\treturn fmt.Sprintf(\"%+v\\n %+v\", g.nodes, g.edges)\n}\n\ntype Node struct {\n\tId NodeId\n}\n\ntype Edge struct {\n\tId EdgeId\n\tHead NodeId\n\tTail NodeId\n}\n<|endoftext|>"} {"text":"<commit_before>package x86_16\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/lunixbochs\/argjoy\"\n\t\"github.com\/pkg\/errors\"\n\n\tco \"github.com\/lunixbochs\/usercorn\/go\/kernel\/common\"\n\t\"github.com\/lunixbochs\/usercorn\/go\/models\"\n\tuc \"github.com\/unicorn-engine\/unicorn\/bindings\/go\/unicorn\"\n)\n\nconst (\n\tSTACK_BASE = 0x8000\n\tSTACK_SIZE = 0x1000\n\tNUM_FDS = 256\n\n\t\/\/ Registers\n\tAH = uc.X86_REG_AH\n\tAL = uc.X86_REG_AL\n\tAX = uc.X86_REG_AX\n\tBH = uc.X86_REG_BH\n\tBL = uc.X86_REG_BL\n\tBP = uc.X86_REG_BP\n\tBX = uc.X86_REG_BX\n\tCH = uc.X86_REG_CH\n\tCL = uc.X86_REG_CL\n\tCS = uc.X86_REG_CS\n\tCX = uc.X86_REG_CX\n\tDH = uc.X86_REG_DH\n\tDI = uc.X86_REG_DI\n\tDL = uc.X86_REG_DL\n\tDS = uc.X86_REG_DS\n\tDX = uc.X86_REG_DX\n\tES = uc.X86_REG_ES\n\tFS = uc.X86_REG_FS\n\tGS = uc.X86_REG_GS\n\tIP = uc.X86_REG_IP\n\tSI = uc.X86_REG_SI\n\tSP = uc.X86_REG_SP\n\tSS = uc.X86_REG_SS\n\tFLAGS = uc.X86_REG_EFLAGS\n)\n\nfunc (k *DosKernel) reg16(enum int) uint16 {\n\tr, _ := k.U.RegRead(enum)\n\treturn uint16(r)\n}\nfunc (k *DosKernel) reg8(enum int) uint8 {\n\tr, _ := k.U.RegRead(enum)\n\treturn uint8(r)\n}\nfunc (k *DosKernel) wreg16(enum int, val uint16) {\n\tk.U.RegWrite(enum, uint64(val))\n}\nfunc (k *DosKernel) wreg8(enum int, val uint8) {\n\tk.U.RegWrite(enum, uint64(val))\n}\nfunc (k *DosKernel) setFlagC(set bool) {\n\t\/\/ TODO: Write setFlagX with enum for each flag\n\t\/\/ Unicorn doesn't have the non-extended FLAGS register, so we're\n\t\/\/ dealing with 32 bits here\n\tflags, _ := k.U.RegRead(FLAGS)\n\tif set {\n\t\tflags |= 1 \/\/ CF = 1\n\t} else {\n\t\tflags &= 0xfffffffe \/\/ CF = 0\n\t}\n\tk.U.RegWrite(FLAGS, flags)\n}\n\nvar dosSysNum = map[int]string{\n\t0x00: \"terminate\",\n\t0x01: \"char_in\",\n\t0x02: \"char_out\",\n\t0x09: \"display\",\n\t0x30: \"get_dos_version\",\n\t0x3C: \"create_or_truncate\",\n\t0x3D: \"open\",\n\t0x3E: \"close\",\n\t0x3F: \"read\",\n\t0x40: \"write\",\n\t0x41: \"unlink\",\n\t0x4C: \"terminate_with_code\",\n}\n\n\/\/ TODO: Create a reverse map of this for conciseness\nvar abiMap = map[int][]int{\n\t0x00: {},\n\t0x01: {DX},\n\t0x02: {DX},\n\t0x09: {DX},\n\t0x30: {},\n\t0x3C: {DX, CX},\n\t0x3D: {DX, AL},\n\t0x3E: {BX},\n\t0x3F: {BX, DX, CX},\n\t0x40: {BX, DX, CX},\n\t0x41: {DX, CX},\n\t0x4C: {AL},\n}\n\ntype PSP struct {\n\tCPMExit [2]byte\n\tFirstFreeSegment uint16\n\tReserved1 uint8\n\tCPMCall5Compat [5]byte\n\tOldTSRAddress uint32\n\tOldBreakAddress uint32\n\tCriticalErrorHandlerAddress uint32\n\tCallerPSPSegment uint16\n\tJobFileTable [20]byte\n\tEnvironmentSegment uint16\n\tINT21SSSP uint32\n\tJobFileTableSize uint16\n\tJobFileTablePointer uint32\n\tPreviousPSP uint32\n\tReserved2 uint32\n\tDOSVersion uint16\n\tReserved3 [14]byte\n\tDOSFarCall [3]byte\n\tReserved4 uint16\n\tExtendedFCB1 [7]byte\n\tFCB1 [16]byte\n\tFCB2 [20]byte\n\tCommandLineLength uint8\n\tCommandLine [127]byte\n}\n\ntype DosKernel struct {\n\t*co.KernelBase\n\tfds [NUM_FDS]int\n}\n\nfunc initPsp(argc int, argv []string) *PSP {\n\tpsp := &PSP{\n\t\tCPMExit: [2]byte{0xcd, 0x20}, \/\/ int 0x20\n\t\tDOSFarCall: [3]byte{0xcd, 0x21, 0xcd}, \/\/ int 0x21 + retf\n\t}\n\n\tpsp.FCB1[0] = 0x01\n\tpsp.FCB1[1] = 0x20\n\n\t\/\/ Combine all args into one string\n\tcommandline := strings.Join(argv, \" \")\n\tcopy(psp.CommandLine[:126], commandline)\n\tif len(commandline) > 126 {\n\t\tpsp.CommandLineLength = 126\n\t} else {\n\t\tpsp.CommandLineLength = uint8(len(commandline))\n\t}\n\n\treturn psp\n}\n\nfunc (k *DosKernel) readUntilChar(addr uint64, c byte) []byte {\n\tvar mem []byte\n\tvar i uint64\n\tvar char byte = 0\n\n\t\/\/ TODO: Read ahead? This'll be slow\n\tfor i = 1; char != c || i == 1; i++ {\n\t\tmem, _ = k.U.MemRead(addr, i)\n\t\tchar = mem[i-1]\n\t}\n\treturn mem[:i-2]\n}\n\nfunc (k *DosKernel) getFd(fd int) (uint16, error) {\n\tfor i := uint16(0); i < NUM_FDS; i++ {\n\t\tif k.fds[i] == -1 {\n\t\t\tk.fds[i] = fd\n\t\t\treturn i, nil\n\t\t}\n\t}\n\treturn 0xFFFF, errors.New(\"DOS FD table exhausted\")\n}\n\nfunc (k *DosKernel) freeFd(fd int) (int, error) {\n\trealfd := k.fds[fd]\n\tif realfd == -1 {\n\t\treturn 0xFFFF, errors.New(\"FD not found in FD table\")\n\t}\n\tk.fds[fd] = -1\n\treturn realfd, nil\n}\n\nfunc (k *DosKernel) Terminate() {\n\tk.U.Exit(models.ExitStatus(0))\n}\n\nfunc (k *DosKernel) CharIn(buf co.Buf) byte {\n\tvar char byte\n\tfmt.Scanf(\"%c\", &char)\n\tk.U.MemWrite(buf.Addr, []byte{char})\n\treturn char\n}\n\nfunc (k *DosKernel) CharOut(char uint16) byte {\n\tfmt.Printf(\"%c\", byte(char&0xFF))\n\treturn byte(char & 0xFF)\n}\n\nfunc (k *DosKernel) Display(buf co.Buf) int {\n\tmem := k.readUntilChar(buf.Addr, '$')\n\n\tsyscall.Write(1, mem)\n\tk.wreg8(AL, 0x24)\n\treturn 0x24\n}\n\nfunc (k *DosKernel) GetDosVersion() int {\n\tk.wreg16(AX, 0x7)\n\treturn 0x7\n}\n\nfunc (k *DosKernel) openFile(filename string, mode int) uint16 {\n\trealfd, err := syscall.Open(filename, mode, 0)\n\tif err != nil {\n\t\tk.wreg16(AX, 0xFFFF)\n\t\tk.setFlagC(true)\n\t\treturn 0xFFFF\n\t}\n\n\t\/\/ Find an internal fd number\n\tdosfd, err := k.getFd(realfd)\n\tif err != nil {\n\t\tk.wreg16(AX, dosfd)\n\t\tk.setFlagC(true)\n\t\treturn 0xFFFF\n\t}\n\tk.setFlagC(false)\n\tk.wreg16(AX, dosfd)\n\treturn dosfd\n}\n\nfunc (k *DosKernel) CreateOrTruncate(buf co.Buf, attr int) uint16 {\n\tfilename := string(k.readUntilChar(buf.Addr, '$'))\n\treturn k.openFile(filename, syscall.O_CREAT|syscall.O_TRUNC|syscall.O_RDWR)\n}\n\nfunc (k *DosKernel) Open(filename string, mode int) uint16 {\n\treturn k.openFile(filename, mode)\n}\n\nfunc (k *DosKernel) Close(fd int) {\n\t\/\/ Find and free the internal fd\n\trealfd, _ := k.freeFd(fd)\n\terr := syscall.Close(realfd)\n\tif err != nil {\n\t\tk.setFlagC(true)\n\t\t\/\/ TODO: Set AX to error code\n\t\tk.wreg16(AX, 0xFFFF)\n\t}\n\tk.setFlagC(false)\n\tk.wreg16(AX, 0)\n}\n\nfunc (k *DosKernel) Read(fd int, buf co.Obuf, len co.Len) int {\n\tmem := make([]byte, len)\n\tn, err := syscall.Read(fd, mem)\n\tif err != nil {\n\t\tk.setFlagC(true)\n\t\t\/\/ TODO: Set AX to error code\n\t\tk.wreg16(AX, 0xFFFF)\n\t}\n\tk.U.MemWrite(buf.Addr, mem)\n\tk.setFlagC(false)\n\tk.wreg16(AX, uint16(n))\n\treturn n\n}\n\nfunc (k *DosKernel) Write(fd uint, buf co.Buf, n co.Len) int {\n\tmem, _ := k.U.MemRead(buf.Addr, uint64(n))\n\twritten, err := syscall.Write(k.fds[fd], mem)\n\tif err != nil {\n\t\tk.setFlagC(true)\n\t\t\/\/ TODO: Set AX to error code\n\t\tk.wreg16(AX, 0xFFFF)\n\t}\n\tk.setFlagC(false)\n\tk.wreg16(AX, uint16(written))\n\treturn written\n}\n\nfunc (k *DosKernel) Unlink(filename string, attr int) int {\n\terr := syscall.Unlink(filename)\n\tif err != nil {\n\t\tk.setFlagC(true)\n\t\tk.wreg16(AX, 0xFFFF)\n\t\treturn 0xFFFF\n\t}\n\tk.setFlagC(false)\n\tk.wreg16(AX, 0)\n\treturn 0\n}\n\nfunc (k *DosKernel) TerminateWithCode(code int) {\n\tk.U.Exit(models.ExitStatus(code))\n}\n\nfunc NewKernel() *DosKernel {\n\tk := &DosKernel{\n\t\tKernelBase: &co.KernelBase{},\n\t}\n\n\t\/\/ Init FDs\n\tfor i := 0; i < NUM_FDS; i++ {\n\t\tk.fds[i] = -1\n\t}\n\tk.fds[0] = 0\n\tk.fds[1] = 1\n\tk.fds[2] = 2\n\n\tk.Argjoy.Register(k.getDosArgCodec())\n\treturn k\n}\n\nfunc DosInit(u models.Usercorn, args, env []string) error {\n\t\/\/ Setup PSP\n\t\/\/ TODO: Setup args\n\tpsp := initPsp(0, nil)\n\tu.StrucAt(0).Pack(psp)\n\n\t\/\/ Setup stack\n\tu.RegWrite(u.Arch().SP, STACK_BASE+STACK_SIZE)\n\tu.SetStackBase(STACK_BASE)\n\tu.SetStackSize(STACK_SIZE)\n\tu.SetEntry(0x100)\n\treturn nil\n}\n\nfunc DosSyscall(u models.Usercorn) {\n\tnum, _ := u.RegRead(AH)\n\tname, _ := dosSysNum[int(num)]\n\t\/\/ TODO: How are registers numbered from here?\n\tu.Syscall(int(num), name, dosArgs(u, int(num)))\n\t\/\/ TODO: Set error\n}\n\nfunc (k *DosKernel) getDosArgCodec() func(interface{}, []interface{}) error {\n\treturn func(arg interface{}, vals []interface{}) error {\n\t\t\/\/ DOS takes address as DS+DX\n\t\tif reg, ok := vals[0].(uint64); ok && len(vals) > 1 {\n\t\t\tds, _ := k.U.RegRead(DS)\n\t\t\treg += ds\n\t\t\tswitch v := arg.(type) {\n\t\t\tcase *co.Buf:\n\t\t\t\t*v = co.NewBuf(k, reg)\n\t\t\tcase *co.Obuf:\n\t\t\t\t*v = co.Obuf{co.NewBuf(k, reg)}\n\t\t\tcase *co.Ptr:\n\t\t\t\t*v = co.Ptr(reg)\n\t\t\tcase *string:\n\t\t\t\ts, err := k.U.Mem().ReadStrAt(reg)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn errors.Wrapf(err, \"ReadStrAt(%#x) failed\", reg)\n\t\t\t\t}\n\t\t\t\t*v = s\n\t\t\tdefault:\n\t\t\t\treturn argjoy.NoMatch\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\treturn argjoy.NoMatch\n\t}\n}\n\nfunc dosArgs(u models.Usercorn, num int) func(int) ([]uint64, error) {\n\treturn co.RegArgs(u, abiMap[num])\n}\n\nfunc DosInterrupt(u models.Usercorn, cause uint32) {\n\tintno := cause & 0xFF\n\tif intno == 0x21 {\n\t\tDosSyscall(u)\n\t} else if intno == 0x20 {\n\t\tu.Syscall(0, \"terminate\", func(int) ([]uint64, error) { return []uint64{}, nil })\n\t} else {\n\t\tpanic(fmt.Sprintf(\"unhandled X86 interrupt %#X\", intno))\n\t}\n}\nfunc DosKernels(u models.Usercorn) []interface{} {\n\treturn []interface{}{NewKernel()}\n}\n\nfunc init() {\n\tArch.RegisterOS(&models.OS{\n\t\tName: \"DOS\",\n\t\tInit: DosInit,\n\t\tInterrupt: DosInterrupt,\n\t\tKernels: DosKernels,\n\t})\n}\n<commit_msg>DOS: Use FD type where appropriate<commit_after>package x86_16\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/lunixbochs\/argjoy\"\n\t\"github.com\/pkg\/errors\"\n\n\tco \"github.com\/lunixbochs\/usercorn\/go\/kernel\/common\"\n\t\"github.com\/lunixbochs\/usercorn\/go\/models\"\n\tuc \"github.com\/unicorn-engine\/unicorn\/bindings\/go\/unicorn\"\n)\n\nconst (\n\tSTACK_BASE = 0x8000\n\tSTACK_SIZE = 0x1000\n\tNUM_FDS = 256\n\n\t\/\/ Registers\n\tAH = uc.X86_REG_AH\n\tAL = uc.X86_REG_AL\n\tAX = uc.X86_REG_AX\n\tBH = uc.X86_REG_BH\n\tBL = uc.X86_REG_BL\n\tBP = uc.X86_REG_BP\n\tBX = uc.X86_REG_BX\n\tCH = uc.X86_REG_CH\n\tCL = uc.X86_REG_CL\n\tCS = uc.X86_REG_CS\n\tCX = uc.X86_REG_CX\n\tDH = uc.X86_REG_DH\n\tDI = uc.X86_REG_DI\n\tDL = uc.X86_REG_DL\n\tDS = uc.X86_REG_DS\n\tDX = uc.X86_REG_DX\n\tES = uc.X86_REG_ES\n\tFS = uc.X86_REG_FS\n\tGS = uc.X86_REG_GS\n\tIP = uc.X86_REG_IP\n\tSI = uc.X86_REG_SI\n\tSP = uc.X86_REG_SP\n\tSS = uc.X86_REG_SS\n\tFLAGS = uc.X86_REG_EFLAGS\n)\n\nfunc (k *DosKernel) reg16(enum int) uint16 {\n\tr, _ := k.U.RegRead(enum)\n\treturn uint16(r)\n}\nfunc (k *DosKernel) reg8(enum int) uint8 {\n\tr, _ := k.U.RegRead(enum)\n\treturn uint8(r)\n}\nfunc (k *DosKernel) wreg16(enum int, val uint16) {\n\tk.U.RegWrite(enum, uint64(val))\n}\nfunc (k *DosKernel) wreg8(enum int, val uint8) {\n\tk.U.RegWrite(enum, uint64(val))\n}\nfunc (k *DosKernel) setFlagC(set bool) {\n\t\/\/ TODO: Write setFlagX with enum for each flag\n\t\/\/ Unicorn doesn't have the non-extended FLAGS register, so we're\n\t\/\/ dealing with 32 bits here\n\tflags, _ := k.U.RegRead(FLAGS)\n\tif set {\n\t\tflags |= 1 \/\/ CF = 1\n\t} else {\n\t\tflags &= 0xfffffffe \/\/ CF = 0\n\t}\n\tk.U.RegWrite(FLAGS, flags)\n}\n\nvar dosSysNum = map[int]string{\n\t0x00: \"terminate\",\n\t0x01: \"char_in\",\n\t0x02: \"char_out\",\n\t0x09: \"display\",\n\t0x30: \"get_dos_version\",\n\t0x3C: \"create_or_truncate\",\n\t0x3D: \"open\",\n\t0x3E: \"close\",\n\t0x3F: \"read\",\n\t0x40: \"write\",\n\t0x41: \"unlink\",\n\t0x4C: \"terminate_with_code\",\n}\n\n\/\/ TODO: Create a reverse map of this for conciseness\nvar abiMap = map[int][]int{\n\t0x00: {},\n\t0x01: {DX},\n\t0x02: {DX},\n\t0x09: {DX},\n\t0x30: {},\n\t0x3C: {DX, CX},\n\t0x3D: {DX, AL},\n\t0x3E: {BX},\n\t0x3F: {BX, DX, CX},\n\t0x40: {BX, DX, CX},\n\t0x41: {DX, CX},\n\t0x4C: {AL},\n}\n\ntype PSP struct {\n\tCPMExit [2]byte\n\tFirstFreeSegment uint16\n\tReserved1 uint8\n\tCPMCall5Compat [5]byte\n\tOldTSRAddress uint32\n\tOldBreakAddress uint32\n\tCriticalErrorHandlerAddress uint32\n\tCallerPSPSegment uint16\n\tJobFileTable [20]byte\n\tEnvironmentSegment uint16\n\tINT21SSSP uint32\n\tJobFileTableSize uint16\n\tJobFileTablePointer uint32\n\tPreviousPSP uint32\n\tReserved2 uint32\n\tDOSVersion uint16\n\tReserved3 [14]byte\n\tDOSFarCall [3]byte\n\tReserved4 uint16\n\tExtendedFCB1 [7]byte\n\tFCB1 [16]byte\n\tFCB2 [20]byte\n\tCommandLineLength uint8\n\tCommandLine [127]byte\n}\n\ntype DosKernel struct {\n\t*co.KernelBase\n\tfds [NUM_FDS]int\n}\n\nfunc initPsp(argc int, argv []string) *PSP {\n\tpsp := &PSP{\n\t\tCPMExit: [2]byte{0xcd, 0x20}, \/\/ int 0x20\n\t\tDOSFarCall: [3]byte{0xcd, 0x21, 0xcd}, \/\/ int 0x21 + retf\n\t}\n\n\tpsp.FCB1[0] = 0x01\n\tpsp.FCB1[1] = 0x20\n\n\t\/\/ Combine all args into one string\n\tcommandline := strings.Join(argv, \" \")\n\tcopy(psp.CommandLine[:126], commandline)\n\tif len(commandline) > 126 {\n\t\tpsp.CommandLineLength = 126\n\t} else {\n\t\tpsp.CommandLineLength = uint8(len(commandline))\n\t}\n\n\treturn psp\n}\n\nfunc (k *DosKernel) readUntilChar(addr uint64, c byte) []byte {\n\tvar mem []byte\n\tvar i uint64\n\tvar char byte = 0\n\n\t\/\/ TODO: Read ahead? This'll be slow\n\tfor i = 1; char != c || i == 1; i++ {\n\t\tmem, _ = k.U.MemRead(addr, i)\n\t\tchar = mem[i-1]\n\t}\n\treturn mem[:i-2]\n}\n\nfunc (k *DosKernel) getFd(fd int) (uint16, error) {\n\tfor i := uint16(0); i < NUM_FDS; i++ {\n\t\tif k.fds[i] == -1 {\n\t\t\tk.fds[i] = fd\n\t\t\treturn i, nil\n\t\t}\n\t}\n\treturn 0xFFFF, errors.New(\"DOS FD table exhausted\")\n}\n\nfunc (k *DosKernel) freeFd(fd int) (int, error) {\n\trealfd := k.fds[fd]\n\tif realfd == -1 {\n\t\treturn 0xFFFF, errors.New(\"FD not found in FD table\")\n\t}\n\tk.fds[fd] = -1\n\treturn realfd, nil\n}\n\nfunc (k *DosKernel) Terminate() {\n\tk.U.Exit(models.ExitStatus(0))\n}\n\nfunc (k *DosKernel) CharIn(buf co.Buf) byte {\n\tvar char byte\n\tfmt.Scanf(\"%c\", &char)\n\tk.U.MemWrite(buf.Addr, []byte{char})\n\treturn char\n}\n\nfunc (k *DosKernel) CharOut(char uint16) byte {\n\tfmt.Printf(\"%c\", byte(char&0xFF))\n\treturn byte(char & 0xFF)\n}\n\nfunc (k *DosKernel) Display(buf co.Buf) int {\n\tmem := k.readUntilChar(buf.Addr, '$')\n\n\tsyscall.Write(1, mem)\n\tk.wreg8(AL, 0x24)\n\treturn 0x24\n}\n\nfunc (k *DosKernel) GetDosVersion() int {\n\tk.wreg16(AX, 0x7)\n\treturn 0x7\n}\n\nfunc (k *DosKernel) openFile(filename string, mode int) co.Fd {\n\trealfd, err := syscall.Open(filename, mode, 0)\n\tif err != nil {\n\t\tk.wreg16(AX, 0xFFFF)\n\t\tk.setFlagC(true)\n\t\treturn 0xFFFF\n\t}\n\n\t\/\/ Find an internal fd number\n\tdosfd, err := k.getFd(realfd)\n\tif err != nil {\n\t\tk.wreg16(AX, dosfd)\n\t\tk.setFlagC(true)\n\t\treturn 0xFFFF\n\t}\n\tk.setFlagC(false)\n\tk.wreg16(AX, dosfd)\n\treturn co.Fd(dosfd)\n}\n\nfunc (k *DosKernel) CreateOrTruncate(buf co.Buf, attr int) co.Fd {\n\tfilename := string(k.readUntilChar(buf.Addr, '$'))\n\treturn k.openFile(filename, syscall.O_CREAT|syscall.O_TRUNC|syscall.O_RDWR)\n}\n\nfunc (k *DosKernel) Open(filename string, mode int) co.Fd {\n\treturn k.openFile(filename, mode)\n}\n\nfunc (k *DosKernel) Close(fd co.Fd) {\n\t\/\/ Find and free the internal fd\n\trealfd, _ := k.freeFd(int(fd))\n\terr := syscall.Close(realfd)\n\tif err != nil {\n\t\tk.setFlagC(true)\n\t\t\/\/ TODO: Set AX to error code\n\t\tk.wreg16(AX, 0xFFFF)\n\t}\n\tk.setFlagC(false)\n\tk.wreg16(AX, 0)\n}\n\nfunc (k *DosKernel) Read(fd co.Fd, buf co.Obuf, len co.Len) int {\n\tmem := make([]byte, len)\n\tn, err := syscall.Read(int(fd), mem)\n\tif err != nil {\n\t\tk.setFlagC(true)\n\t\t\/\/ TODO: Set AX to error code\n\t\tk.wreg16(AX, 0xFFFF)\n\t}\n\tk.U.MemWrite(buf.Addr, mem)\n\tk.setFlagC(false)\n\tk.wreg16(AX, uint16(n))\n\treturn n\n}\n\nfunc (k *DosKernel) Write(fd co.Fd, buf co.Buf, n co.Len) int {\n\tmem, _ := k.U.MemRead(buf.Addr, uint64(n))\n\twritten, err := syscall.Write(k.fds[fd], mem)\n\tif err != nil {\n\t\tk.setFlagC(true)\n\t\t\/\/ TODO: Set AX to error code\n\t\tk.wreg16(AX, 0xFFFF)\n\t}\n\tk.setFlagC(false)\n\tk.wreg16(AX, uint16(written))\n\treturn written\n}\n\nfunc (k *DosKernel) Unlink(filename string, attr int) int {\n\terr := syscall.Unlink(filename)\n\tif err != nil {\n\t\tk.setFlagC(true)\n\t\tk.wreg16(AX, 0xFFFF)\n\t\treturn 0xFFFF\n\t}\n\tk.setFlagC(false)\n\tk.wreg16(AX, 0)\n\treturn 0\n}\n\nfunc (k *DosKernel) TerminateWithCode(code int) {\n\tk.U.Exit(models.ExitStatus(code))\n}\n\nfunc NewKernel() *DosKernel {\n\tk := &DosKernel{\n\t\tKernelBase: &co.KernelBase{},\n\t}\n\n\t\/\/ Init FDs\n\tfor i := 0; i < NUM_FDS; i++ {\n\t\tk.fds[i] = -1\n\t}\n\tk.fds[0] = 0\n\tk.fds[1] = 1\n\tk.fds[2] = 2\n\n\tk.Argjoy.Register(k.getDosArgCodec())\n\treturn k\n}\n\nfunc DosInit(u models.Usercorn, args, env []string) error {\n\t\/\/ Setup PSP\n\t\/\/ TODO: Setup args\n\tpsp := initPsp(0, nil)\n\tu.StrucAt(0).Pack(psp)\n\n\t\/\/ Setup stack\n\tu.RegWrite(u.Arch().SP, STACK_BASE+STACK_SIZE)\n\tu.SetStackBase(STACK_BASE)\n\tu.SetStackSize(STACK_SIZE)\n\tu.SetEntry(0x100)\n\treturn nil\n}\n\nfunc DosSyscall(u models.Usercorn) {\n\tnum, _ := u.RegRead(AH)\n\tname, _ := dosSysNum[int(num)]\n\t\/\/ TODO: How are registers numbered from here?\n\tu.Syscall(int(num), name, dosArgs(u, int(num)))\n\t\/\/ TODO: Set error\n}\n\nfunc (k *DosKernel) getDosArgCodec() func(interface{}, []interface{}) error {\n\treturn func(arg interface{}, vals []interface{}) error {\n\t\t\/\/ DOS takes address as DS+DX\n\t\tif reg, ok := vals[0].(uint64); ok && len(vals) > 1 {\n\t\t\tds, _ := k.U.RegRead(DS)\n\t\t\treg += ds\n\t\t\tswitch v := arg.(type) {\n\t\t\tcase *co.Buf:\n\t\t\t\t*v = co.NewBuf(k, reg)\n\t\t\tcase *co.Obuf:\n\t\t\t\t*v = co.Obuf{co.NewBuf(k, reg)}\n\t\t\tcase *co.Ptr:\n\t\t\t\t*v = co.Ptr(reg)\n\t\t\tcase *string:\n\t\t\t\ts, err := k.U.Mem().ReadStrAt(reg)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn errors.Wrapf(err, \"ReadStrAt(%#x) failed\", reg)\n\t\t\t\t}\n\t\t\t\t*v = s\n\t\t\tdefault:\n\t\t\t\treturn argjoy.NoMatch\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\treturn argjoy.NoMatch\n\t}\n}\n\nfunc dosArgs(u models.Usercorn, num int) func(int) ([]uint64, error) {\n\treturn co.RegArgs(u, abiMap[num])\n}\n\nfunc DosInterrupt(u models.Usercorn, cause uint32) {\n\tintno := cause & 0xFF\n\tif intno == 0x21 {\n\t\tDosSyscall(u)\n\t} else if intno == 0x20 {\n\t\tu.Syscall(0, \"terminate\", func(int) ([]uint64, error) { return []uint64{}, nil })\n\t} else {\n\t\tpanic(fmt.Sprintf(\"unhandled X86 interrupt %#X\", intno))\n\t}\n}\nfunc DosKernels(u models.Usercorn) []interface{} {\n\treturn []interface{}{NewKernel()}\n}\n\nfunc init() {\n\tArch.RegisterOS(&models.OS{\n\t\tName: \"DOS\",\n\t\tInit: DosInit,\n\t\tInterrupt: DosInterrupt,\n\t\tKernels: DosKernels,\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\t\"sample\/sampleworld\"\n\n\t\"v.io\/v23\"\n\n\t\"v.io\/x\/ref\/lib\/expect\"\n\t\"v.io\/x\/ref\/lib\/flags\/consts\"\n\t\"v.io\/x\/ref\/lib\/modules\"\n\t\"v.io\/x\/ref\/lib\/modules\/core\"\n\t\"v.io\/x\/ref\/lib\/signals\"\n\t\"v.io\/x\/ref\/profiles\"\n)\n\nconst (\n\tSampleWorldCommand = \"sampleWorld\" \/\/ The modules library command.\n\tstdoutLog = \"tmp\/runner.stdout.log\" \/\/ Used as stdout drain when shutting down.\n\tstderrLog = \"tmp\/runner.stderr.log\" \/\/ Used as stderr drain when shutting down.\n)\n\nvar (\n\t\/\/ Flags used as input to this program.\n\trunSample bool\n\tserveHTTP bool\n\tportHTTP string\n\trootHTTP string\n\trunTests bool\n\trunTestsWatch bool\n)\n\nfunc init() {\n\tmodules.RegisterChild(SampleWorldCommand, \"desc\", sampleWorld)\n\tflag.BoolVar(&runSample, \"runSample\", false, \"if true, runs sample services\")\n\tflag.BoolVar(&serveHTTP, \"serveHTTP\", false, \"if true, serves HTTP\")\n\tflag.StringVar(&portHTTP, \"portHTTP\", \"9001\", \"default 9001, the port to serve HTTP on\")\n\tflag.StringVar(&rootHTTP, \"rootHTTP\", \".\", \"default '.', the root HTTP folder path\")\n\tflag.BoolVar(&runTests, \"runTests\", false, \"if true, runs the namespace browser tests\")\n\tflag.BoolVar(&runTestsWatch, \"runTestsWatch\", false, \"if true && runTests, runs the tests in watch mode\")\n}\n\n\/\/ Helper function to simply panic on error.\nfunc panicOnError(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/ updateVars captures the vars from the given Handle's stdout and adds them to\n\/\/ the given vars map, overwriting existing entries.\nfunc updateVars(h modules.Handle, vars map[string]string, varNames ...string) error {\n\tvarsToAdd := map[string]bool{}\n\tfor _, v := range varNames {\n\t\tvarsToAdd[v] = true\n\t}\n\tnumLeft := len(varsToAdd)\n\n\ts := expect.NewSession(nil, h.Stdout(), 30*time.Second)\n\tfor {\n\t\tl := s.ReadLine()\n\t\tif err := s.OriginalError(); err != nil {\n\t\t\treturn err \/\/ EOF or otherwise\n\t\t}\n\t\tparts := strings.Split(l, \"=\")\n\t\tif len(parts) != 2 {\n\t\t\treturn fmt.Errorf(\"Unexpected line: %s\", l)\n\t\t}\n\t\tif _, ok := varsToAdd[parts[0]]; ok {\n\t\t\tnumLeft--\n\t\t\tvars[parts[0]] = parts[1]\n\t\t\tif numLeft == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ The module command for running the sample world.\nfunc sampleWorld(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {\n\tctx, shutdown := v23.Init()\n\tdefer shutdown()\n\n\tsampleworld.RunSampleWorld(ctx)\n\n\tmodules.WaitForEOF(stdin)\n\treturn nil\n}\n\nfunc main() {\n\tif modules.IsModulesProcess() {\n\t\tpanicOnError(modules.Dispatch())\n\t\treturn\n\t}\n\n\t\/\/ Try running the program; on failure, exit with error status code.\n\tif !run() {\n\t\tos.Exit(1)\n\t}\n}\n\n\/\/ Runs the services and cleans up afterwards.\n\/\/ Returns true if the run was successful.\nfunc run() bool {\n\tctx, shutdown := v23.Init()\n\tdefer shutdown()\n\n\t\/\/ In order to prevent conflicts, tests and webapp use different mounttable ports.\n\tport := 5180\n\tcottagePort := 5181\n\thousePort := 5182\n\tif runTests {\n\t\tport = 8884\n\t\tcottagePort = 8885\n\t\thousePort = 8886\n\t}\n\n\t\/\/ Start a new shell module.\n\tvars := map[string]string{}\n\tsh, err := modules.NewShell(ctx, nil)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"modules.NewShell: %s\", err))\n\t}\n\n\t\/\/ Collect the output of this shell on termination.\n\terr = os.MkdirAll(\"tmp\", 0750)\n\tpanicOnError(err)\n\toutFile, err := os.Create(stdoutLog)\n\tpanicOnError(err)\n\tdefer outFile.Close()\n\terrFile, err := os.Create(stderrLog)\n\tpanicOnError(err)\n\tdefer errFile.Close()\n\tdefer sh.Cleanup(outFile, errFile)\n\n\t\/\/ Determine the hostname; this name will be used for mounting.\n\thostName, err := exec.Command(\"hostname\", \"-s\").Output()\n\tpanicOnError(err)\n\n\t\/\/ Run the host mounttable.\n\trootName := fmt.Sprintf(\"%s-home\", strings.TrimSpace(string(hostName))) \/\/ Must trim; hostname has \\n at the end.\n\thRoot, err := sh.Start(core.MTCommand, nil, \"--veyron.tcp.protocol=ws\", fmt.Sprintf(\"--veyron.tcp.address=127.0.0.1:%d\", port), rootName)\n\tpanicOnError(err)\n\tpanicOnError(updateVars(hRoot, vars, \"MT_NAME\"))\n\tdefer hRoot.Shutdown(outFile, errFile)\n\n\t\/\/ Set consts.NamespaceRootPrefix env var, consumed downstream.\n\tsh.SetVar(consts.NamespaceRootPrefix, vars[\"MT_NAME\"])\n\tv23.GetNamespace(ctx).SetRoots(vars[\"MT_NAME\"])\n\n\t\/\/ Run the cottage mounttable at host\/cottage.\n\thCottage, err := sh.Start(core.MTCommand, nil, \"--veyron.tcp.protocol=ws\", fmt.Sprintf(\"--veyron.tcp.address=127.0.0.1:%d\", cottagePort), \"cottage\")\n\tpanicOnError(err)\n\texpect.NewSession(nil, hCottage.Stdout(), 30*time.Second)\n\tdefer hCottage.Shutdown(outFile, errFile)\n\n\t\/\/ run the house mounttable at host\/house.\n\thHouse, err := sh.Start(core.MTCommand, nil, \"--veyron.tcp.protocol=ws\", fmt.Sprintf(\"--veyron.tcp.address=127.0.0.1:%d\", housePort), \"house\")\n\tpanicOnError(err)\n\texpect.NewSession(nil, hHouse.Stdout(), 30*time.Second)\n\tdefer hHouse.Shutdown(outFile, errFile)\n\n\t\/\/ Possibly run the sample world.\n\tif runSample {\n\t\tfmt.Println(\"Running Sample World\")\n\t\thSample, err := sh.Start(SampleWorldCommand, nil, \"--veyron.tcp.protocol=ws\", \"--veyron.tcp.address=127.0.0.1:0\")\n\t\tpanicOnError(err)\n\t\texpect.NewSession(nil, hSample.Stdout(), 30*time.Second)\n\t\tdefer hSample.Shutdown(outFile, errFile)\n\t}\n\n\t\/\/ Possibly serve the public bundle at the portHTTP.\n\tif serveHTTP {\n\t\tfmt.Printf(\"Also serving HTTP at %s for %s\\n\", portHTTP, rootHTTP)\n\t\thttp.ListenAndServe(\":\"+portHTTP, http.FileServer(http.Dir(rootHTTP)))\n\t}\n\n\t\/\/ Just print out the collected variables. This is for debugging purposes.\n\tbytes, err := json.Marshal(vars)\n\tpanicOnError(err)\n\tfmt.Println(string(bytes))\n\n\t\/\/ Possibly run the tests in Prova.\n\tif runTests {\n\t\t\/\/ Also set HOUSE_MOUNTTABLE (used in the tests)\n\t\tos.Setenv(\"HOUSE_MOUNTTABLE\", fmt.Sprintf(\"\/127.0.0.1:%d\", housePort))\n\n\t\tproxyShutdown, proxyEndpoint, err := profiles.NewProxy(ctx, \"ws\", \"127.0.0.1:0\", \"\", \"test\/proxy\")\n\t\tpanicOnError(err)\n\t\tdefer proxyShutdown()\n\t\tvars[\"PROXY_NAME\"] = proxyEndpoint.Name()\n\n\t\thIdentityd, err := sh.Start(core.TestIdentitydCommand, nil, \"--veyron.tcp.protocol=ws\", \"--veyron.tcp.address=127.0.0.1:0\", \"--veyron.proxy=test\/proxy\", \"--host=localhost\", \"--httpaddr=localhost:0\")\n\t\tpanicOnError(err)\n\t\tpanicOnError(updateVars(hIdentityd, vars, \"TEST_IDENTITYD_NAME\", \"TEST_IDENTITYD_HTTP_ADDR\"))\n\t\tdefer hIdentityd.Shutdown(outFile, errFile)\n\n\t\t\/\/ Setup a lot of environment variables; these are used for the tests and building the test extension.\n\t\tos.Setenv(\"NAMESPACE_ROOT\", vars[\"MT_NAME\"])\n\t\tos.Setenv(\"PROXY_ADDR\", vars[\"PROXY_NAME\"])\n\t\tos.Setenv(\"IDENTITYD\", fmt.Sprintf(\"%s\/google\", vars[\"TEST_IDENTITYD_NAME\"]))\n\t\tos.Setenv(\"IDENTITYD_BLESSING_URL\", fmt.Sprintf(\"%s\/blessing-root\", vars[\"TEST_IDENTITYD_HTTP_ADDR\"]))\n\t\tos.Setenv(\"DEBUG\", \"false\")\n\n\t\ttestsOk := runProva()\n\n\t\tfmt.Println(\"Cleaning up launched services...\")\n\t\treturn testsOk\n\t}\n\n\t\/\/ Not in a test, so run until the program is killed.\n\t<-signals.ShutdownOnSignals(ctx)\n\treturn true\n\n}\n\n\/\/ Run the prova tests and convert its tap output to xunit.\nfunc runProva() bool {\n\t\/\/ This is also useful information for routing the test output.\n\tVANADIUM_ROOT := os.Getenv(\"VANADIUM_ROOT\")\n\tVANADIUM_JS := fmt.Sprintf(\"%s\/release\/javascript\/core\", VANADIUM_ROOT)\n\tVANADIUM_BROWSER := fmt.Sprintf(\"%s\/release\/projects\/namespace_browser\", VANADIUM_ROOT)\n\n\tTAP_XUNIT := fmt.Sprintf(\"%s\/node_modules\/.bin\/tap-xunit\", VANADIUM_BROWSER)\n\tXUNIT_OUTPUT_FILE := os.Getenv(\"XUNIT_OUTPUT_FILE\")\n\tif XUNIT_OUTPUT_FILE == \"\" {\n\t\tXUNIT_OUTPUT_FILE = fmt.Sprintf(\"%s\/test_output.xml\", os.Getenv(\"TMPDIR\"))\n\t}\n\tTAP_XUNIT_OPTIONS := \" --package=namespace-browser\"\n\n\t\/\/ Make sure we're in the right folder when we run make test-extension.\n\tvbroot, err := os.Open(VANADIUM_BROWSER)\n\tpanicOnError(err)\n\terr = vbroot.Chdir()\n\tpanicOnError(err)\n\n\t\/\/ Make the test-extension, this should also remove the old one.\n\tfmt.Println(\"Rebuilding test extension...\")\n\tcmdExtensionClean := exec.Command(\"rm\", \"-fr\", fmt.Sprintf(\"%s\/extension\/build-test\", VANADIUM_JS))\n\terr = cmdExtensionClean.Run()\n\tpanicOnError(err)\n\tcmdExtensionBuild := exec.Command(\"make\", \"-C\", fmt.Sprintf(\"%s\/extension\", VANADIUM_JS), \"build-test\")\n\terr = cmdExtensionBuild.Run()\n\tpanicOnError(err)\n\n\t\/\/ These are the basic prova options.\n\toptions := []string{\n\t\t\"test\/**\/*.js\",\n\t\t\"--browser\",\n\t\t\"--includeFilenameAsPackage\",\n\t\t\"--launch\",\n\t\t\"chrome\",\n\t\t\"--plugin\",\n\t\t\"proxyquireify\/plugin\",\n\t\t\"--transform\",\n\t\t\"envify,.\/main-transform\",\n\t\t\"--log\",\n\t\t\"tmp\/chrome.log\",\n\t\tfmt.Sprintf(\"--options=--load-extension=%s\/extension\/build-test\/,--ignore-certificate-errors,--enable-logging=stderr\", VANADIUM_JS),\n\t}\n\n\t\/\/ Normal tests have a few more options and a different port from the watch tests.\n\tvar PROVA_PORT int\n\tif !runTestsWatch {\n\t\tPROVA_PORT = 8893\n\t\toptions = append(options, \"--headless\", \"--quit\", \"--progress\", \"--tap\")\n\t\tfmt.Printf(\"\\033[34m-Executing tests. See %s for test xunit output.\\033[0m\\n\", XUNIT_OUTPUT_FILE)\n\t} else {\n\t\tPROVA_PORT = 8894\n\t\tfmt.Println(\"\\033[34m-Running tests in watch mode.\\033[0m\")\n\t}\n\toptions = append(options, \"--port\", fmt.Sprintf(\"%d\", PROVA_PORT))\n\n\t\/\/ This is the prova command.\n\tcmdProva := exec.Command(\n\t\tfmt.Sprintf(\"%s\/node_modules\/.bin\/prova\", VANADIUM_BROWSER),\n\t\toptions...,\n\t)\n\tfmt.Printf(\"\\033[34m-Go to \\033[32mhttp:\/\/0.0.0.0:%d\\033[34m to see tests running.\\033[0m\\n\", PROVA_PORT)\n\tfmt.Println(cmdProva)\n\n\t\/\/ Collect the prova stdout. This information needs to be sent to xunit.\n\tprovaOut, err := cmdProva.StdoutPipe()\n\tpanicOnError(err)\n\n\t\/\/ Setup the tap to xunit command. It uses Prova's stdout as input.\n\t\/\/ The output will got the xunit output file.\n\tcmdTap := exec.Command(TAP_XUNIT, TAP_XUNIT_OPTIONS)\n\tcmdTap.Stdin = io.TeeReader(provaOut, os.Stdout) \/\/ Tee the prova output to see it on the console too.\n\toutfile, err := os.Create(XUNIT_OUTPUT_FILE)\n\tpanicOnError(err)\n\tdefer outfile.Close()\n\tbufferedWriter := bufio.NewWriter(outfile)\n\tcmdTap.Stdout = bufferedWriter\n\tdefer bufferedWriter.Flush() \/\/ Ensure that the full xunit output is written.\n\n\t\/\/ We start the tap command...\n\terr = cmdTap.Start()\n\tpanicOnError(err)\n\n\t\/\/ Meanwhile, run Prova to completion. If there was an error, print ERROR, otherwise PASS.\n\terr = cmdProva.Run()\n\ttestsOk := true\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tfmt.Println(\"\\033[31m\\033[1mERROR\\033[0m\")\n\t\ttestsOk = false\n\t} else {\n\t\tfmt.Println(\"\\033[32m\\033[1mPASS\\033[0m\")\n\t}\n\n\t\/\/ Wait for tap to xunit to finish itself off. This file will be ready for reading by Jenkins.\n\tfmt.Println(\"Converting Tap output to XUnit\")\n\terr = cmdTap.Wait()\n\tpanicOnError(err)\n\n\treturn testsOk\n}\n<commit_msg>namespace_browser: devtools: Move many libraries from v.io\/x\/ref\/lib to v.io\/x\/ref\/profiles\/internal.<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\t\"sample\/sampleworld\"\n\n\t\"v.io\/v23\"\n\n\t\"v.io\/x\/ref\/lib\/flags\/consts\"\n\t\"v.io\/x\/ref\/lib\/modules\"\n\t\"v.io\/x\/ref\/lib\/modules\/core\"\n\t\"v.io\/x\/ref\/lib\/signals\"\n\t\"v.io\/x\/ref\/lib\/testutil\/expect\"\n\t\"v.io\/x\/ref\/profiles\"\n)\n\nconst (\n\tSampleWorldCommand = \"sampleWorld\" \/\/ The modules library command.\n\tstdoutLog = \"tmp\/runner.stdout.log\" \/\/ Used as stdout drain when shutting down.\n\tstderrLog = \"tmp\/runner.stderr.log\" \/\/ Used as stderr drain when shutting down.\n)\n\nvar (\n\t\/\/ Flags used as input to this program.\n\trunSample bool\n\tserveHTTP bool\n\tportHTTP string\n\trootHTTP string\n\trunTests bool\n\trunTestsWatch bool\n)\n\nfunc init() {\n\tmodules.RegisterChild(SampleWorldCommand, \"desc\", sampleWorld)\n\tflag.BoolVar(&runSample, \"runSample\", false, \"if true, runs sample services\")\n\tflag.BoolVar(&serveHTTP, \"serveHTTP\", false, \"if true, serves HTTP\")\n\tflag.StringVar(&portHTTP, \"portHTTP\", \"9001\", \"default 9001, the port to serve HTTP on\")\n\tflag.StringVar(&rootHTTP, \"rootHTTP\", \".\", \"default '.', the root HTTP folder path\")\n\tflag.BoolVar(&runTests, \"runTests\", false, \"if true, runs the namespace browser tests\")\n\tflag.BoolVar(&runTestsWatch, \"runTestsWatch\", false, \"if true && runTests, runs the tests in watch mode\")\n}\n\n\/\/ Helper function to simply panic on error.\nfunc panicOnError(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/ updateVars captures the vars from the given Handle's stdout and adds them to\n\/\/ the given vars map, overwriting existing entries.\nfunc updateVars(h modules.Handle, vars map[string]string, varNames ...string) error {\n\tvarsToAdd := map[string]bool{}\n\tfor _, v := range varNames {\n\t\tvarsToAdd[v] = true\n\t}\n\tnumLeft := len(varsToAdd)\n\n\ts := expect.NewSession(nil, h.Stdout(), 30*time.Second)\n\tfor {\n\t\tl := s.ReadLine()\n\t\tif err := s.OriginalError(); err != nil {\n\t\t\treturn err \/\/ EOF or otherwise\n\t\t}\n\t\tparts := strings.Split(l, \"=\")\n\t\tif len(parts) != 2 {\n\t\t\treturn fmt.Errorf(\"Unexpected line: %s\", l)\n\t\t}\n\t\tif _, ok := varsToAdd[parts[0]]; ok {\n\t\t\tnumLeft--\n\t\t\tvars[parts[0]] = parts[1]\n\t\t\tif numLeft == 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ The module command for running the sample world.\nfunc sampleWorld(stdin io.Reader, stdout, stderr io.Writer, env map[string]string, args ...string) error {\n\tctx, shutdown := v23.Init()\n\tdefer shutdown()\n\n\tsampleworld.RunSampleWorld(ctx)\n\n\tmodules.WaitForEOF(stdin)\n\treturn nil\n}\n\nfunc main() {\n\tif modules.IsModulesProcess() {\n\t\tpanicOnError(modules.Dispatch())\n\t\treturn\n\t}\n\n\t\/\/ Try running the program; on failure, exit with error status code.\n\tif !run() {\n\t\tos.Exit(1)\n\t}\n}\n\n\/\/ Runs the services and cleans up afterwards.\n\/\/ Returns true if the run was successful.\nfunc run() bool {\n\tctx, shutdown := v23.Init()\n\tdefer shutdown()\n\n\t\/\/ In order to prevent conflicts, tests and webapp use different mounttable ports.\n\tport := 5180\n\tcottagePort := 5181\n\thousePort := 5182\n\tif runTests {\n\t\tport = 8884\n\t\tcottagePort = 8885\n\t\thousePort = 8886\n\t}\n\n\t\/\/ Start a new shell module.\n\tvars := map[string]string{}\n\tsh, err := modules.NewShell(ctx, nil)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"modules.NewShell: %s\", err))\n\t}\n\n\t\/\/ Collect the output of this shell on termination.\n\terr = os.MkdirAll(\"tmp\", 0750)\n\tpanicOnError(err)\n\toutFile, err := os.Create(stdoutLog)\n\tpanicOnError(err)\n\tdefer outFile.Close()\n\terrFile, err := os.Create(stderrLog)\n\tpanicOnError(err)\n\tdefer errFile.Close()\n\tdefer sh.Cleanup(outFile, errFile)\n\n\t\/\/ Determine the hostname; this name will be used for mounting.\n\thostName, err := exec.Command(\"hostname\", \"-s\").Output()\n\tpanicOnError(err)\n\n\t\/\/ Run the host mounttable.\n\trootName := fmt.Sprintf(\"%s-home\", strings.TrimSpace(string(hostName))) \/\/ Must trim; hostname has \\n at the end.\n\thRoot, err := sh.Start(core.MTCommand, nil, \"--veyron.tcp.protocol=ws\", fmt.Sprintf(\"--veyron.tcp.address=127.0.0.1:%d\", port), rootName)\n\tpanicOnError(err)\n\tpanicOnError(updateVars(hRoot, vars, \"MT_NAME\"))\n\tdefer hRoot.Shutdown(outFile, errFile)\n\n\t\/\/ Set consts.NamespaceRootPrefix env var, consumed downstream.\n\tsh.SetVar(consts.NamespaceRootPrefix, vars[\"MT_NAME\"])\n\tv23.GetNamespace(ctx).SetRoots(vars[\"MT_NAME\"])\n\n\t\/\/ Run the cottage mounttable at host\/cottage.\n\thCottage, err := sh.Start(core.MTCommand, nil, \"--veyron.tcp.protocol=ws\", fmt.Sprintf(\"--veyron.tcp.address=127.0.0.1:%d\", cottagePort), \"cottage\")\n\tpanicOnError(err)\n\texpect.NewSession(nil, hCottage.Stdout(), 30*time.Second)\n\tdefer hCottage.Shutdown(outFile, errFile)\n\n\t\/\/ run the house mounttable at host\/house.\n\thHouse, err := sh.Start(core.MTCommand, nil, \"--veyron.tcp.protocol=ws\", fmt.Sprintf(\"--veyron.tcp.address=127.0.0.1:%d\", housePort), \"house\")\n\tpanicOnError(err)\n\texpect.NewSession(nil, hHouse.Stdout(), 30*time.Second)\n\tdefer hHouse.Shutdown(outFile, errFile)\n\n\t\/\/ Possibly run the sample world.\n\tif runSample {\n\t\tfmt.Println(\"Running Sample World\")\n\t\thSample, err := sh.Start(SampleWorldCommand, nil, \"--veyron.tcp.protocol=ws\", \"--veyron.tcp.address=127.0.0.1:0\")\n\t\tpanicOnError(err)\n\t\texpect.NewSession(nil, hSample.Stdout(), 30*time.Second)\n\t\tdefer hSample.Shutdown(outFile, errFile)\n\t}\n\n\t\/\/ Possibly serve the public bundle at the portHTTP.\n\tif serveHTTP {\n\t\tfmt.Printf(\"Also serving HTTP at %s for %s\\n\", portHTTP, rootHTTP)\n\t\thttp.ListenAndServe(\":\"+portHTTP, http.FileServer(http.Dir(rootHTTP)))\n\t}\n\n\t\/\/ Just print out the collected variables. This is for debugging purposes.\n\tbytes, err := json.Marshal(vars)\n\tpanicOnError(err)\n\tfmt.Println(string(bytes))\n\n\t\/\/ Possibly run the tests in Prova.\n\tif runTests {\n\t\t\/\/ Also set HOUSE_MOUNTTABLE (used in the tests)\n\t\tos.Setenv(\"HOUSE_MOUNTTABLE\", fmt.Sprintf(\"\/127.0.0.1:%d\", housePort))\n\n\t\tproxyShutdown, proxyEndpoint, err := profiles.NewProxy(ctx, \"ws\", \"127.0.0.1:0\", \"\", \"test\/proxy\")\n\t\tpanicOnError(err)\n\t\tdefer proxyShutdown()\n\t\tvars[\"PROXY_NAME\"] = proxyEndpoint.Name()\n\n\t\thIdentityd, err := sh.Start(core.TestIdentitydCommand, nil, \"--veyron.tcp.protocol=ws\", \"--veyron.tcp.address=127.0.0.1:0\", \"--veyron.proxy=test\/proxy\", \"--host=localhost\", \"--httpaddr=localhost:0\")\n\t\tpanicOnError(err)\n\t\tpanicOnError(updateVars(hIdentityd, vars, \"TEST_IDENTITYD_NAME\", \"TEST_IDENTITYD_HTTP_ADDR\"))\n\t\tdefer hIdentityd.Shutdown(outFile, errFile)\n\n\t\t\/\/ Setup a lot of environment variables; these are used for the tests and building the test extension.\n\t\tos.Setenv(\"NAMESPACE_ROOT\", vars[\"MT_NAME\"])\n\t\tos.Setenv(\"PROXY_ADDR\", vars[\"PROXY_NAME\"])\n\t\tos.Setenv(\"IDENTITYD\", fmt.Sprintf(\"%s\/google\", vars[\"TEST_IDENTITYD_NAME\"]))\n\t\tos.Setenv(\"IDENTITYD_BLESSING_URL\", fmt.Sprintf(\"%s\/blessing-root\", vars[\"TEST_IDENTITYD_HTTP_ADDR\"]))\n\t\tos.Setenv(\"DEBUG\", \"false\")\n\n\t\ttestsOk := runProva()\n\n\t\tfmt.Println(\"Cleaning up launched services...\")\n\t\treturn testsOk\n\t}\n\n\t\/\/ Not in a test, so run until the program is killed.\n\t<-signals.ShutdownOnSignals(ctx)\n\treturn true\n\n}\n\n\/\/ Run the prova tests and convert its tap output to xunit.\nfunc runProva() bool {\n\t\/\/ This is also useful information for routing the test output.\n\tVANADIUM_ROOT := os.Getenv(\"VANADIUM_ROOT\")\n\tVANADIUM_JS := fmt.Sprintf(\"%s\/release\/javascript\/core\", VANADIUM_ROOT)\n\tVANADIUM_BROWSER := fmt.Sprintf(\"%s\/release\/projects\/namespace_browser\", VANADIUM_ROOT)\n\n\tTAP_XUNIT := fmt.Sprintf(\"%s\/node_modules\/.bin\/tap-xunit\", VANADIUM_BROWSER)\n\tXUNIT_OUTPUT_FILE := os.Getenv(\"XUNIT_OUTPUT_FILE\")\n\tif XUNIT_OUTPUT_FILE == \"\" {\n\t\tXUNIT_OUTPUT_FILE = fmt.Sprintf(\"%s\/test_output.xml\", os.Getenv(\"TMPDIR\"))\n\t}\n\tTAP_XUNIT_OPTIONS := \" --package=namespace-browser\"\n\n\t\/\/ Make sure we're in the right folder when we run make test-extension.\n\tvbroot, err := os.Open(VANADIUM_BROWSER)\n\tpanicOnError(err)\n\terr = vbroot.Chdir()\n\tpanicOnError(err)\n\n\t\/\/ Make the test-extension, this should also remove the old one.\n\tfmt.Println(\"Rebuilding test extension...\")\n\tcmdExtensionClean := exec.Command(\"rm\", \"-fr\", fmt.Sprintf(\"%s\/extension\/build-test\", VANADIUM_JS))\n\terr = cmdExtensionClean.Run()\n\tpanicOnError(err)\n\tcmdExtensionBuild := exec.Command(\"make\", \"-C\", fmt.Sprintf(\"%s\/extension\", VANADIUM_JS), \"build-test\")\n\terr = cmdExtensionBuild.Run()\n\tpanicOnError(err)\n\n\t\/\/ These are the basic prova options.\n\toptions := []string{\n\t\t\"test\/**\/*.js\",\n\t\t\"--browser\",\n\t\t\"--includeFilenameAsPackage\",\n\t\t\"--launch\",\n\t\t\"chrome\",\n\t\t\"--plugin\",\n\t\t\"proxyquireify\/plugin\",\n\t\t\"--transform\",\n\t\t\"envify,.\/main-transform\",\n\t\t\"--log\",\n\t\t\"tmp\/chrome.log\",\n\t\tfmt.Sprintf(\"--options=--load-extension=%s\/extension\/build-test\/,--ignore-certificate-errors,--enable-logging=stderr\", VANADIUM_JS),\n\t}\n\n\t\/\/ Normal tests have a few more options and a different port from the watch tests.\n\tvar PROVA_PORT int\n\tif !runTestsWatch {\n\t\tPROVA_PORT = 8893\n\t\toptions = append(options, \"--headless\", \"--quit\", \"--progress\", \"--tap\")\n\t\tfmt.Printf(\"\\033[34m-Executing tests. See %s for test xunit output.\\033[0m\\n\", XUNIT_OUTPUT_FILE)\n\t} else {\n\t\tPROVA_PORT = 8894\n\t\tfmt.Println(\"\\033[34m-Running tests in watch mode.\\033[0m\")\n\t}\n\toptions = append(options, \"--port\", fmt.Sprintf(\"%d\", PROVA_PORT))\n\n\t\/\/ This is the prova command.\n\tcmdProva := exec.Command(\n\t\tfmt.Sprintf(\"%s\/node_modules\/.bin\/prova\", VANADIUM_BROWSER),\n\t\toptions...,\n\t)\n\tfmt.Printf(\"\\033[34m-Go to \\033[32mhttp:\/\/0.0.0.0:%d\\033[34m to see tests running.\\033[0m\\n\", PROVA_PORT)\n\tfmt.Println(cmdProva)\n\n\t\/\/ Collect the prova stdout. This information needs to be sent to xunit.\n\tprovaOut, err := cmdProva.StdoutPipe()\n\tpanicOnError(err)\n\n\t\/\/ Setup the tap to xunit command. It uses Prova's stdout as input.\n\t\/\/ The output will got the xunit output file.\n\tcmdTap := exec.Command(TAP_XUNIT, TAP_XUNIT_OPTIONS)\n\tcmdTap.Stdin = io.TeeReader(provaOut, os.Stdout) \/\/ Tee the prova output to see it on the console too.\n\toutfile, err := os.Create(XUNIT_OUTPUT_FILE)\n\tpanicOnError(err)\n\tdefer outfile.Close()\n\tbufferedWriter := bufio.NewWriter(outfile)\n\tcmdTap.Stdout = bufferedWriter\n\tdefer bufferedWriter.Flush() \/\/ Ensure that the full xunit output is written.\n\n\t\/\/ We start the tap command...\n\terr = cmdTap.Start()\n\tpanicOnError(err)\n\n\t\/\/ Meanwhile, run Prova to completion. If there was an error, print ERROR, otherwise PASS.\n\terr = cmdProva.Run()\n\ttestsOk := true\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tfmt.Println(\"\\033[31m\\033[1mERROR\\033[0m\")\n\t\ttestsOk = false\n\t} else {\n\t\tfmt.Println(\"\\033[32m\\033[1mPASS\\033[0m\")\n\t}\n\n\t\/\/ Wait for tap to xunit to finish itself off. This file will be ready for reading by Jenkins.\n\tfmt.Println(\"Converting Tap output to XUnit\")\n\terr = cmdTap.Wait()\n\tpanicOnError(err)\n\n\treturn testsOk\n}\n<|endoftext|>"} {"text":"<commit_before>package worker\n\nimport (\n\t\"fmt\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/youtube\/vitess\/go\/sqltypes\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/topo\/topoproto\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/wrangler\"\n\n\ttabletmanagerdatapb \"github.com\/youtube\/vitess\/go\/vt\/proto\/tabletmanagerdata\"\n\ttopodatapb \"github.com\/youtube\/vitess\/go\/vt\/proto\/topodata\"\n)\n\nvar (\n\tcompleteChunk = chunk{sqltypes.NULL, sqltypes.NULL, 1, 1}\n\tsingleCompleteChunk = []chunk{completeChunk}\n)\n\n\/\/ chunk holds the information which subset of the table should be worked on.\n\/\/ The subset is the range of rows in the range [start, end) where start and end\n\/\/ both refer to the first column of the primary key.\n\/\/ If the column is not numeric, both start and end will be sqltypes.NULL.\ntype chunk struct {\n\tstart sqltypes.Value\n\tend sqltypes.Value\n\t\/\/ number records the position of this chunk among all \"total\" chunks.\n\t\/\/ The lowest value is 1.\n\tnumber int\n\t\/\/ total is the total number of chunks this chunk belongs to.\n\ttotal int\n}\n\n\/\/ String returns a human-readable presentation of the chunk range.\nfunc (c chunk) String() string {\n\t\/\/ Pad the chunk number such that all log messages align nicely.\n\tdigits := digits(c.total)\n\treturn fmt.Sprintf(\"%*d\/%d\", digits, c.number, c.total)\n}\n\nfunc digits(i int) int {\n\tdigits := 1\n\tfor {\n\t\ti \/= 10\n\t\tif i == 0 {\n\t\t\tbreak\n\t\t}\n\t\tdigits++\n\t}\n\treturn digits\n}\n\n\/\/ generateChunks returns an array of chunks to use for splitting up a table\n\/\/ into multiple data chunks. It only works for tables with a primary key\n\/\/ whose first column is a numeric type.\nfunc generateChunks(ctx context.Context, wr *wrangler.Wrangler, tablet *topodatapb.Tablet, td *tabletmanagerdatapb.TableDefinition, chunkCount, minRowsPerChunk int) ([]chunk, error) {\n\tif len(td.PrimaryKeyColumns) == 0 {\n\t\t\/\/ No explicit primary key. Cannot chunk the rows then.\n\t\twr.Logger().Infof(\"Not splitting table %v into multiple chunks because it has no primary key columns. This will reduce the performance of the clone.\", td.Name)\n\t\treturn singleCompleteChunk, nil\n\t}\n\tif td.RowCount < 2*uint64(minRowsPerChunk) {\n\t\t\/\/ The automatic adjustment of \"chunkCount\" based on \"minRowsPerChunk\"\n\t\t\/\/ below would set \"chunkCount\" to less than 2 i.e. 1 or 0 chunks.\n\t\t\/\/ In practice in this case there should be exactly one chunk.\n\t\t\/\/ Return early in this case and notice the user about this.\n\t\twr.Logger().Infof(\"Not splitting table %v into multiple chunks because it has only %d rows.\", td.Name, td.RowCount)\n\t\treturn singleCompleteChunk, nil\n\t}\n\tif chunkCount == 1 {\n\t\treturn singleCompleteChunk, nil\n\t}\n\n\t\/\/ Get the MIN and MAX of the leading column of the primary key.\n\tquery := fmt.Sprintf(\"SELECT MIN(%v), MAX(%v) FROM %v.%v\", escape(td.PrimaryKeyColumns[0]), escape(td.PrimaryKeyColumns[0]), escape(topoproto.TabletDbName(tablet)), escape(td.Name))\n\tshortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)\n\tqr, err := wr.TabletManagerClient().ExecuteFetchAsApp(shortCtx, tablet, true, []byte(query), 1)\n\tcancel()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Cannot determine MIN and MAX of the first primary key column. ExecuteFetchAsApp: %v\", err)\n\t}\n\tif len(qr.Rows) != 1 {\n\t\treturn nil, fmt.Errorf(\"Cannot determine MIN and MAX of the first primary key column. Zero rows were returned for the following query: %v\", query)\n\t}\n\n\tresult := sqltypes.Proto3ToResult(qr)\n\tmin := result.Rows[0][0].ToNative()\n\tmax := result.Rows[0][1].ToNative()\n\n\tif min == nil || max == nil {\n\t\twr.Logger().Infof(\"Not splitting table %v into multiple chunks, min or max is NULL: %v\", td.Name, qr.Rows[0])\n\t\treturn singleCompleteChunk, nil\n\t}\n\n\t\/\/ Determine the average number of rows per chunk for the given chunkCount.\n\tavgRowsPerChunk := td.RowCount \/ uint64(chunkCount)\n\tif avgRowsPerChunk < uint64(minRowsPerChunk) {\n\t\t\/\/ Reduce the chunkCount to fulfill minRowsPerChunk.\n\t\tnewChunkCount := td.RowCount \/ uint64(minRowsPerChunk)\n\t\twr.Logger().Infof(\"Reducing the number of chunks for table %v from the default %d to %d to make sure that each chunk has at least %d rows.\", td.Name, chunkCount, newChunkCount, minRowsPerChunk)\n\t\tchunkCount = int(newChunkCount)\n\t}\n\n\t\/\/ TODO(mberlin): Write a unit test for this part of the function.\n\tvar interval interface{}\n\tchunks := make([]chunk, chunkCount)\n\tswitch min := min.(type) {\n\tcase int64:\n\t\tmax := max.(int64)\n\t\tinterval = (max - min) \/ int64(chunkCount)\n\t\tif interval == 0 {\n\t\t\twr.Logger().Infof(\"Not splitting table %v into multiple chunks, interval=0: %v to %v\", td.Name, min, max)\n\t\t\treturn singleCompleteChunk, nil\n\t\t}\n\tcase uint64:\n\t\tmax := max.(uint64)\n\t\tinterval = (max - min) \/ uint64(chunkCount)\n\t\tif interval == 0 {\n\t\t\twr.Logger().Infof(\"Not splitting table %v into multiple chunks, interval=0: %v to %v\", td.Name, min, max)\n\t\t\treturn singleCompleteChunk, nil\n\t\t}\n\tcase float64:\n\t\tmax := max.(float64)\n\t\tinterval = (max - min) \/ float64(chunkCount)\n\t\tif interval == 0 {\n\t\t\twr.Logger().Infof(\"Not splitting table %v into multiple chunks, interval=0: %v to %v\", td.Name, min, max)\n\t\t\treturn singleCompleteChunk, nil\n\t\t}\n\tdefault:\n\t\twr.Logger().Infof(\"Not splitting table %v into multiple chunks, primary key not numeric.\", td.Name)\n\t\treturn singleCompleteChunk, nil\n\t}\n\n\t\/\/ Create chunks.\n\tstart := min\n\tfor i := 0; i < chunkCount; i++ {\n\t\tend := add(start, interval)\n\t\tchunk, err := toChunk(start, end, i+1, chunkCount)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tchunks[i] = chunk\n\t\tstart = end\n\t}\n\n\t\/\/ Clear out the MIN and MAX on the first and last chunk respectively\n\t\/\/ because other shards might have smaller or higher values than the one we\n\t\/\/ looked at.\n\tchunks[0].start = sqltypes.NULL\n\tchunks[chunkCount-1].end = sqltypes.NULL\n\treturn chunks, nil\n}\n\nfunc add(start, interval interface{}) interface{} {\n\tswitch start := start.(type) {\n\tcase int64:\n\t\treturn start + interval.(int64)\n\tcase uint64:\n\t\treturn start + interval.(uint64)\n\tcase float64:\n\t\treturn start + interval.(float64)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported type %T for interval start: %v\", start, start))\n\t}\n}\n\nfunc toChunk(start, end interface{}, number, total int) (chunk, error) {\n\tstartValue, err := sqltypes.BuildValue(start)\n\tif err != nil {\n\t\treturn chunk{}, fmt.Errorf(\"Failed to convert calculated start value (%v) into internal sqltypes.Value: %v\", start, err)\n\t}\n\tendValue, err := sqltypes.BuildValue(end)\n\tif err != nil {\n\t\treturn chunk{}, fmt.Errorf(\"Failed to convert calculated end value (%v) into internal sqltypes.Value: %v\", end, err)\n\t}\n\treturn chunk{startValue, endValue, number, total}, nil\n}\n<commit_msg>worker: Change the logging prefix in chunk.go. (#2017)<commit_after>package worker\n\nimport (\n\t\"fmt\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/youtube\/vitess\/go\/sqltypes\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/topo\/topoproto\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/wrangler\"\n\n\ttabletmanagerdatapb \"github.com\/youtube\/vitess\/go\/vt\/proto\/tabletmanagerdata\"\n\ttopodatapb \"github.com\/youtube\/vitess\/go\/vt\/proto\/topodata\"\n)\n\nvar (\n\tcompleteChunk = chunk{sqltypes.NULL, sqltypes.NULL, 1, 1}\n\tsingleCompleteChunk = []chunk{completeChunk}\n)\n\n\/\/ chunk holds the information which subset of the table should be worked on.\n\/\/ The subset is the range of rows in the range [start, end) where start and end\n\/\/ both refer to the first column of the primary key.\n\/\/ If the column is not numeric, both start and end will be sqltypes.NULL.\ntype chunk struct {\n\tstart sqltypes.Value\n\tend sqltypes.Value\n\t\/\/ number records the position of this chunk among all \"total\" chunks.\n\t\/\/ The lowest value is 1.\n\tnumber int\n\t\/\/ total is the total number of chunks this chunk belongs to.\n\ttotal int\n}\n\n\/\/ String returns a human-readable presentation of the chunk range.\nfunc (c chunk) String() string {\n\t\/\/ Pad the chunk number such that all log messages align nicely.\n\tdigits := digits(c.total)\n\treturn fmt.Sprintf(\"%*d\/%d\", digits, c.number, c.total)\n}\n\nfunc digits(i int) int {\n\tdigits := 1\n\tfor {\n\t\ti \/= 10\n\t\tif i == 0 {\n\t\t\tbreak\n\t\t}\n\t\tdigits++\n\t}\n\treturn digits\n}\n\n\/\/ generateChunks returns an array of chunks to use for splitting up a table\n\/\/ into multiple data chunks. It only works for tables with a primary key\n\/\/ whose first column is a numeric type.\nfunc generateChunks(ctx context.Context, wr *wrangler.Wrangler, tablet *topodatapb.Tablet, td *tabletmanagerdatapb.TableDefinition, chunkCount, minRowsPerChunk int) ([]chunk, error) {\n\tif len(td.PrimaryKeyColumns) == 0 {\n\t\t\/\/ No explicit primary key. Cannot chunk the rows then.\n\t\twr.Logger().Infof(\"table=%v: Not splitting the table into multiple chunks because it has no primary key columns. This will reduce the performance of the clone.\", td.Name)\n\t\treturn singleCompleteChunk, nil\n\t}\n\tif td.RowCount < 2*uint64(minRowsPerChunk) {\n\t\t\/\/ The automatic adjustment of \"chunkCount\" based on \"minRowsPerChunk\"\n\t\t\/\/ below would set \"chunkCount\" to less than 2 i.e. 1 or 0 chunks.\n\t\t\/\/ In practice in this case there should be exactly one chunk.\n\t\t\/\/ Return early in this case and notice the user about this.\n\t\twr.Logger().Infof(\"table=%v: Not splitting the table into multiple chunks because it has only %d rows.\", td.Name, td.RowCount)\n\t\treturn singleCompleteChunk, nil\n\t}\n\tif chunkCount == 1 {\n\t\treturn singleCompleteChunk, nil\n\t}\n\n\t\/\/ Get the MIN and MAX of the leading column of the primary key.\n\tquery := fmt.Sprintf(\"SELECT MIN(%v), MAX(%v) FROM %v.%v\", escape(td.PrimaryKeyColumns[0]), escape(td.PrimaryKeyColumns[0]), escape(topoproto.TabletDbName(tablet)), escape(td.Name))\n\tshortCtx, cancel := context.WithTimeout(ctx, *remoteActionsTimeout)\n\tqr, err := wr.TabletManagerClient().ExecuteFetchAsApp(shortCtx, tablet, true, []byte(query), 1)\n\tcancel()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Cannot determine MIN and MAX of the first primary key column. ExecuteFetchAsApp: %v\", err)\n\t}\n\tif len(qr.Rows) != 1 {\n\t\treturn nil, fmt.Errorf(\"Cannot determine MIN and MAX of the first primary key column. Zero rows were returned for the following query: %v\", query)\n\t}\n\n\tresult := sqltypes.Proto3ToResult(qr)\n\tmin := result.Rows[0][0].ToNative()\n\tmax := result.Rows[0][1].ToNative()\n\n\tif min == nil || max == nil {\n\t\twr.Logger().Infof(\"table=%v: Not splitting the table into multiple chunks, min or max is NULL: %v\", td.Name, qr.Rows[0])\n\t\treturn singleCompleteChunk, nil\n\t}\n\n\t\/\/ Determine the average number of rows per chunk for the given chunkCount.\n\tavgRowsPerChunk := td.RowCount \/ uint64(chunkCount)\n\tif avgRowsPerChunk < uint64(minRowsPerChunk) {\n\t\t\/\/ Reduce the chunkCount to fulfill minRowsPerChunk.\n\t\tnewChunkCount := td.RowCount \/ uint64(minRowsPerChunk)\n\t\twr.Logger().Infof(\"table=%v: Reducing the number of chunks from the default %d to %d to make sure that each chunk has at least %d rows.\", td.Name, chunkCount, newChunkCount, minRowsPerChunk)\n\t\tchunkCount = int(newChunkCount)\n\t}\n\n\t\/\/ TODO(mberlin): Write a unit test for this part of the function.\n\tvar interval interface{}\n\tchunks := make([]chunk, chunkCount)\n\tswitch min := min.(type) {\n\tcase int64:\n\t\tmax := max.(int64)\n\t\tinterval = (max - min) \/ int64(chunkCount)\n\t\tif interval == 0 {\n\t\t\twr.Logger().Infof(\"table=%v: Not splitting the table into multiple chunks, interval=0: %v to %v\", td.Name, min, max)\n\t\t\treturn singleCompleteChunk, nil\n\t\t}\n\tcase uint64:\n\t\tmax := max.(uint64)\n\t\tinterval = (max - min) \/ uint64(chunkCount)\n\t\tif interval == 0 {\n\t\t\twr.Logger().Infof(\"table=%v: Not splitting the table into multiple chunks, interval=0: %v to %v\", td.Name, min, max)\n\t\t\treturn singleCompleteChunk, nil\n\t\t}\n\tcase float64:\n\t\tmax := max.(float64)\n\t\tinterval = (max - min) \/ float64(chunkCount)\n\t\tif interval == 0 {\n\t\t\twr.Logger().Infof(\"table=%v: Not splitting the table into multiple chunks, interval=0: %v to %v\", td.Name, min, max)\n\t\t\treturn singleCompleteChunk, nil\n\t\t}\n\tdefault:\n\t\twr.Logger().Infof(\"table=%v: Not splitting the table into multiple chunks, primary key not numeric.\", td.Name)\n\t\treturn singleCompleteChunk, nil\n\t}\n\n\t\/\/ Create chunks.\n\tstart := min\n\tfor i := 0; i < chunkCount; i++ {\n\t\tend := add(start, interval)\n\t\tchunk, err := toChunk(start, end, i+1, chunkCount)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tchunks[i] = chunk\n\t\tstart = end\n\t}\n\n\t\/\/ Clear out the MIN and MAX on the first and last chunk respectively\n\t\/\/ because other shards might have smaller or higher values than the one we\n\t\/\/ looked at.\n\tchunks[0].start = sqltypes.NULL\n\tchunks[chunkCount-1].end = sqltypes.NULL\n\treturn chunks, nil\n}\n\nfunc add(start, interval interface{}) interface{} {\n\tswitch start := start.(type) {\n\tcase int64:\n\t\treturn start + interval.(int64)\n\tcase uint64:\n\t\treturn start + interval.(uint64)\n\tcase float64:\n\t\treturn start + interval.(float64)\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unsupported type %T for interval start: %v\", start, start))\n\t}\n}\n\nfunc toChunk(start, end interface{}, number, total int) (chunk, error) {\n\tstartValue, err := sqltypes.BuildValue(start)\n\tif err != nil {\n\t\treturn chunk{}, fmt.Errorf(\"Failed to convert calculated start value (%v) into internal sqltypes.Value: %v\", start, err)\n\t}\n\tendValue, err := sqltypes.BuildValue(end)\n\tif err != nil {\n\t\treturn chunk{}, fmt.Errorf(\"Failed to convert calculated end value (%v) into internal sqltypes.Value: %v\", end, err)\n\t}\n\treturn chunk{startValue, endValue, number, total}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\/\/\n\/\/ Author: Spencer Kimball (spencer.kimball@gmail.com)\n\npackage gossip\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/cockroachdb\/cockroach\/base\"\n\t\"github.com\/cockroachdb\/cockroach\/gossip\/resolver\"\n\t\"github.com\/cockroachdb\/cockroach\/roachpb\"\n\t\"github.com\/cockroachdb\/cockroach\/rpc\"\n\t\"github.com\/cockroachdb\/cockroach\/util\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/hlc\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/leaktest\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/log\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/stop\"\n)\n\n\/\/ TestGossipInfoStore verifies operation of gossip instance infostore.\nfunc TestGossipInfoStore(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\trpcContext := rpc.NewContext(&base.Context{}, hlc.NewClock(hlc.UnixNano), nil)\n\tg := New(rpcContext, TestBootstrap)\n\t\/\/ Have to call g.SetNodeID before call g.AddInfo\n\tg.SetNodeID(roachpb.NodeID(1))\n\tslice := []byte(\"b\")\n\tif err := g.AddInfo(\"s\", slice, time.Hour); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif val, err := g.GetInfo(\"s\"); !bytes.Equal(val, slice) || err != nil {\n\t\tt.Errorf(\"error fetching string: %v\", err)\n\t}\n\tif _, err := g.GetInfo(\"s2\"); err == nil {\n\t\tt.Errorf(\"expected error fetching nonexistent key \\\"s2\\\"\")\n\t}\n}\n\nfunc TestGossipGetNextBootstrapAddress(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tresolverSpecs := []string{\n\t\t\"127.0.0.1:9000\",\n\t\t\"tcp=127.0.0.1:9001\",\n\t\t\"unix=\/tmp\/unix-socket12345\",\n\t\t\"lb=127.0.0.1:9002\",\n\t\t\"foo=127.0.0.1:9003\", \/\/ error should not resolve.\n\t\t\"lb=\", \/\/ error should not resolve.\n\t\t\"localhost:9004\",\n\t\t\"lb=127.0.0.1:9005\",\n\t}\n\n\tresolvers := []resolver.Resolver{}\n\tfor _, rs := range resolverSpecs {\n\t\tresolver, err := resolver.NewResolver(&base.Context{}, rs)\n\t\tif err == nil {\n\t\t\tresolvers = append(resolvers, resolver)\n\t\t}\n\t}\n\tif len(resolvers) != 6 {\n\t\tt.Errorf(\"expected 6 resolvers; got %d\", len(resolvers))\n\t}\n\tg := New(nil, resolvers)\n\n\t\/\/ Using specified resolvers, fetch bootstrap addresses 10 times\n\t\/\/ and verify the results match expected addresses.\n\texpAddresses := []string{\n\t\t\"127.0.0.1:9000\",\n\t\t\"127.0.0.1:9001\",\n\t\t\"\/tmp\/unix-socket12345\",\n\t\t\"127.0.0.1:9002\",\n\t\t\"localhost:9004\",\n\t\t\"127.0.0.1:9005\",\n\t\t\"127.0.0.1:9002\",\n\t\t\"127.0.0.1:9005\",\n\t\t\"127.0.0.1:9002\",\n\t\t\"127.0.0.1:9005\",\n\t}\n\tfor i := 0; i < len(expAddresses); i++ {\n\t\tlog.Infof(\"getting next address\")\n\t\taddr := g.getNextBootstrapAddress()\n\t\tif addr == nil {\n\t\t\tt.Errorf(\"%d: unexpected nil addr when expecting %s\", i, expAddresses[i])\n\t\t} else if addr.String() != expAddresses[i] {\n\t\t\tt.Errorf(\"%d: expected addr %s; got %s\", i, expAddresses[i], addr.String())\n\t\t}\n\t}\n}\n\n\/\/ TestGossipCullNetwork verifies that a client will be culled from\n\/\/ the network periodically (at cullInterval duration intervals).\nfunc TestGossipCullNetwork(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\n\t\/\/ Set the cullInterval to a low value to guarantee it kicks in quickly.\n\torigCullInterval := cullInterval\n\tcullInterval = 5 * time.Millisecond\n\tdefer func() {\n\t\tcullInterval = origCullInterval\n\t}()\n\n\t\/\/ Create the local gossip and minPeers peers.\n\tstopper := stop.NewStopper()\n\tdefer stopper.Stop()\n\tlocal := startGossip(1, stopper, t)\n\tpeers := []*Gossip{}\n\tfor i := 0; i < minPeers; i++ {\n\t\tpeers = append(peers, startGossip(roachpb.NodeID(i+2), stopper, t))\n\t}\n\n\t\/\/ Start clients to all peers and start the local gossip's manage routine.\n\tlocal.mu.Lock()\n\tfor _, p := range peers {\n\t\tpAddr := p.is.NodeAddr\n\t\tlocal.startClient(pAddr, stopper)\n\t}\n\tlocal.mu.Unlock()\n\tlocal.manage(stopper)\n\n\tutil.SucceedsWithin(t, 10*time.Second, func() error {\n\t\t\/\/ Verify that a client is closed within the cull interval.\n\t\tif len(local.Outgoing()) == minPeers-1 {\n\t\t\treturn nil\n\t\t}\n\t\treturn errors.New(\"no network culling occurred\")\n\t})\n}\n<commit_msg>disable flaky TestGossipCullNetwork<commit_after>\/\/ Copyright 2014 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License.\n\/\/\n\/\/ Author: Spencer Kimball (spencer.kimball@gmail.com)\n\npackage gossip\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/cockroachdb\/cockroach\/base\"\n\t\"github.com\/cockroachdb\/cockroach\/gossip\/resolver\"\n\t\"github.com\/cockroachdb\/cockroach\/roachpb\"\n\t\"github.com\/cockroachdb\/cockroach\/rpc\"\n\t\"github.com\/cockroachdb\/cockroach\/util\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/hlc\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/leaktest\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/log\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/stop\"\n)\n\n\/\/ TestGossipInfoStore verifies operation of gossip instance infostore.\nfunc TestGossipInfoStore(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\trpcContext := rpc.NewContext(&base.Context{}, hlc.NewClock(hlc.UnixNano), nil)\n\tg := New(rpcContext, TestBootstrap)\n\t\/\/ Have to call g.SetNodeID before call g.AddInfo\n\tg.SetNodeID(roachpb.NodeID(1))\n\tslice := []byte(\"b\")\n\tif err := g.AddInfo(\"s\", slice, time.Hour); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif val, err := g.GetInfo(\"s\"); !bytes.Equal(val, slice) || err != nil {\n\t\tt.Errorf(\"error fetching string: %v\", err)\n\t}\n\tif _, err := g.GetInfo(\"s2\"); err == nil {\n\t\tt.Errorf(\"expected error fetching nonexistent key \\\"s2\\\"\")\n\t}\n}\n\nfunc TestGossipGetNextBootstrapAddress(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tresolverSpecs := []string{\n\t\t\"127.0.0.1:9000\",\n\t\t\"tcp=127.0.0.1:9001\",\n\t\t\"unix=\/tmp\/unix-socket12345\",\n\t\t\"lb=127.0.0.1:9002\",\n\t\t\"foo=127.0.0.1:9003\", \/\/ error should not resolve.\n\t\t\"lb=\", \/\/ error should not resolve.\n\t\t\"localhost:9004\",\n\t\t\"lb=127.0.0.1:9005\",\n\t}\n\n\tresolvers := []resolver.Resolver{}\n\tfor _, rs := range resolverSpecs {\n\t\tresolver, err := resolver.NewResolver(&base.Context{}, rs)\n\t\tif err == nil {\n\t\t\tresolvers = append(resolvers, resolver)\n\t\t}\n\t}\n\tif len(resolvers) != 6 {\n\t\tt.Errorf(\"expected 6 resolvers; got %d\", len(resolvers))\n\t}\n\tg := New(nil, resolvers)\n\n\t\/\/ Using specified resolvers, fetch bootstrap addresses 10 times\n\t\/\/ and verify the results match expected addresses.\n\texpAddresses := []string{\n\t\t\"127.0.0.1:9000\",\n\t\t\"127.0.0.1:9001\",\n\t\t\"\/tmp\/unix-socket12345\",\n\t\t\"127.0.0.1:9002\",\n\t\t\"localhost:9004\",\n\t\t\"127.0.0.1:9005\",\n\t\t\"127.0.0.1:9002\",\n\t\t\"127.0.0.1:9005\",\n\t\t\"127.0.0.1:9002\",\n\t\t\"127.0.0.1:9005\",\n\t}\n\tfor i := 0; i < len(expAddresses); i++ {\n\t\tlog.Infof(\"getting next address\")\n\t\taddr := g.getNextBootstrapAddress()\n\t\tif addr == nil {\n\t\t\tt.Errorf(\"%d: unexpected nil addr when expecting %s\", i, expAddresses[i])\n\t\t} else if addr.String() != expAddresses[i] {\n\t\t\tt.Errorf(\"%d: expected addr %s; got %s\", i, expAddresses[i], addr.String())\n\t\t}\n\t}\n}\n\n\/\/ TestGossipCullNetwork verifies that a client will be culled from\n\/\/ the network periodically (at cullInterval duration intervals).\nfunc TestGossipCullNetwork(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tt.Skip(\"#3620\")\n\n\t\/\/ Set the cullInterval to a low value to guarantee it kicks in quickly.\n\torigCullInterval := cullInterval\n\tcullInterval = 5 * time.Millisecond\n\tdefer func() {\n\t\tcullInterval = origCullInterval\n\t}()\n\n\t\/\/ Create the local gossip and minPeers peers.\n\tstopper := stop.NewStopper()\n\tdefer stopper.Stop()\n\tlocal := startGossip(1, stopper, t)\n\tpeers := []*Gossip{}\n\tfor i := 0; i < minPeers; i++ {\n\t\tpeers = append(peers, startGossip(roachpb.NodeID(i+2), stopper, t))\n\t}\n\n\t\/\/ Start clients to all peers and start the local gossip's manage routine.\n\tlocal.mu.Lock()\n\tfor _, p := range peers {\n\t\tpAddr := p.is.NodeAddr\n\t\tlocal.startClient(pAddr, stopper)\n\t}\n\tlocal.mu.Unlock()\n\tlocal.manage(stopper)\n\n\tutil.SucceedsWithin(t, 10*time.Second, func() error {\n\t\t\/\/ Verify that a client is closed within the cull interval.\n\t\tif len(local.Outgoing()) == minPeers-1 {\n\t\t\treturn nil\n\t\t}\n\t\treturn errors.New(\"no network culling occurred\")\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Primitives are basic shapes which can be directly drawn to screen.\n *\n *\/\n\npackage graphics\n\nimport \"math\"\nimport \"image\/color\"\nimport \"github.com\/banthar\/Go-SDL\/sdl\"\n\ntype Primitive interface {\n\tdraw(s *sdl.Surface)\n}\n\ntype Point struct {\n\tx, y int\n\tc color.Color\n}\n\nfunc (p Point) draw(s *sdl.Surface) {\n\ts.Set(p.x, p.y, p.c)\n}\n\ntype Rectangle struct {\n\tx, y int16\n\tw, h uint16\n\tc color.Color\n}\n\nfunc (r Rectangle) draw(s *sdl.Surface) {\n\tformat := sdl.GetVideoInfo().Vfmt\n\tcolor := sdl.ColorFromGoColor(r.c)\n\tcolorVal := sdl.MapRGB(format, color.R, color.G, color.B)\n\ts.FillRect(&sdl.Rect{r.x, r.y, r.w, r.h}, colorVal)\n}\n\ntype Circle struct {\n\tx, y int16 \/\/ Location on screen\n\tr uint16 \/\/ Radius\n\tb int \/\/ Border thickness\n\tc color.Color \/\/ Color\n}\n\n\/\/ Ewwwwww\nfunc (c Circle) draw(s *sdl.Surface) {\n\tformat := sdl.GetVideoInfo().Vfmt\n\tcolor := sdl.ColorFromGoColor(c.c)\n\tcolorVal := sdl.MapRGB(format, color.R, color.G, color.B)\n\n\tprevL := int16(c.r)\n\tfor h := 0; h <= int(c.r); h++ {\n\t\tl := int16(math.Sqrt(float64(int(c.r*c.r) - (h * h))))\n\n\t\tx := int16(c.x) - l\n\n\t\tif c.b == 0 {\n\t\t\ts.FillRect(&sdl.Rect{x, c.y + int16(h), uint16(2 * l), 1}, colorVal)\n\t\t\ts.FillRect(&sdl.Rect{x, c.y - int16(h), uint16(2 * l), 1}, colorVal)\n\t\t} else {\n\t\t\ts.FillRect(&sdl.Rect{x, c.y + int16(h), uint16(math.Max(1, math.Abs(float64(l-prevL)))), 1}, colorVal)\n\t\t\ts.FillRect(&sdl.Rect{x, c.y - int16(h), uint16(math.Max(1, math.Abs(float64(l-prevL)))), 1}, colorVal)\n\t\t\ts.FillRect(&sdl.Rect{x + l + prevL, c.y + int16(h), uint16(math.Max(1, math.Abs(float64(l-prevL)))), 1}, colorVal)\n\t\t\ts.FillRect(&sdl.Rect{x + l + prevL, c.y - int16(h), uint16(math.Max(1, math.Abs(float64(l-prevL)))), 1}, colorVal)\n\t\t}\n\n\t\tprevL = l\n\t}\n}\n<commit_msg>Implemented the integer midpoint circle algorithm to make circle drawing better & faster.<commit_after>\/*\n * Primitives are basic shapes which can be directly drawn to screen.\n *\n *\/\n\npackage graphics\n\nimport \"image\/color\"\nimport \"github.com\/banthar\/Go-SDL\/sdl\"\n\n\/\/ A Primitive is a basic shape which can be drawn directly by the artist.\ntype Primitive interface {\n\tdraw(s *sdl.Surface)\n}\n\n\/\/ A Point is as it sounds, a single point in space.\ntype Point struct {\n\tx, y int\n\tc color.Color\n}\n\n\/\/ Points are drawn by setting a single corresponding pixel.\nfunc (p Point) draw(s *sdl.Surface) {\n\tsafeSet(s, p.x, p.y, p.c)\n}\n\n\/\/ A Rectangle is... a rectangle.\ntype Rectangle struct {\n\tx, y int16\n\tw, h uint16\n\tc color.Color\n}\n\n\/\/ Rectangles are drawn by directly calling FillRect on the surface.\nfunc (r Rectangle) draw(s *sdl.Surface) {\n\tformat := sdl.GetVideoInfo().Vfmt\n\tcolor := sdl.ColorFromGoColor(r.c)\n\tcolorVal := sdl.MapRGB(format, color.R, color.G, color.B)\n\ts.FillRect(&sdl.Rect{r.x, r.y, r.w, r.h}, colorVal)\n}\n\n\/\/ Circles are, you guessed it. Circles.\ntype Circle struct {\n\tx, y int16 \/\/ Location on screen\n\tr uint16 \/\/ Radius\n\tb int \/\/ Border thickness. For now only controls if there IS a border or not, not actually it's thickness.\n\tc color.Color \/\/ Color\n}\n\n\/\/ Circles may be filled or not.\nfunc (c Circle) draw(s *sdl.Surface) {\n\tif c.b == 0 {\n\t\tdrawFilledCircle(c.x, c.y, c.r, c.c, s)\n\t} else {\n\t\tdrawOutlineCircle(c.x, c.y, c.r, c.c, s)\n\t}\n}\n\n\/\/ drawFilledCircle uses the integer midpoint circle algorithm to draw a filled\n\/\/ circle to the given surface.\nfunc drawFilledCircle(x0, y0 int16, r uint16, c color.Color, s *sdl.Surface) {\n\tformat := sdl.GetVideoInfo().Vfmt\n\tcolor := sdl.ColorFromGoColor(c)\n\tcolorVal := sdl.MapRGB(format, color.R, color.G, color.B)\n\n\tx := int16(r)\n\ty := int16(0)\n\te := 1 - x\n\n\tfor x >= y {\n\t\ts.FillRect(&sdl.Rect{-x + x0, y + y0, uint16(2 * x), 1}, colorVal)\n\t\ts.FillRect(&sdl.Rect{-x + x0, -y + y0, uint16(2 * x), 1}, colorVal)\n\t\ts.FillRect(&sdl.Rect{-y + x0, x + y0, uint16(2 * y), 1}, colorVal)\n\t\ts.FillRect(&sdl.Rect{-y + x0, -x + y0, uint16(2 * y), 1}, colorVal)\n\n\t\ty++\n\n\t\tif e < 0 {\n\t\t\te += 2*y + 1\n\t\t} else {\n\t\t\tx--\n\t\t\te += 2 * (y - x + 1)\n\t\t}\n\t}\n}\n\n\/\/ drawOutlineCircle uses the integer midpoint circle algorithm to draw the outline\n\/\/ of a circle (1 px thick) to the given surface.\nfunc drawOutlineCircle(x0, y0 int16, r uint16, c color.Color, s *sdl.Surface) {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tcolor := sdl.ColorFromGoColor(c)\n\n\tx := int16(r)\n\ty := int16(0)\n\te := 1 - x\n\n\tfor x >= y {\n\t\tsafeSet(s, int(x+x0), int(y+y0), color)\n\t\tsafeSet(s, int(x+x0), int(-y+y0), color)\n\t\tsafeSet(s, int(-x+x0), int(y+y0), color)\n\t\tsafeSet(s, int(-x+x0), int(-y+y0), color)\n\t\tsafeSet(s, int(y+x0), int(x+y0), color)\n\t\tsafeSet(s, int(y+x0), int(-x+y0), color)\n\t\tsafeSet(s, int(-y+x0), int(x+y0), color)\n\t\tsafeSet(s, int(-y+x0), int(-x+y0), color)\n\n\t\ty++\n\n\t\tif e < 0 {\n\t\t\te += 2*y + 1\n\t\t} else {\n\t\t\tx--\n\t\t\te += 2 * (y - x + 1)\n\t\t}\n\t}\n}\n\nfunc safeSet(s *sdl.Surface, x, y int, c sdl.Color) {\n\tif x >= 0 && y >= 0 && x < int(s.W) && y < int(s.H) {\n\t\ts.Set(x, y, c)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012, Google Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage topotools\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\t\"github.com\/youtube\/vitess\/go\/trace\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/concurrency\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/logutil\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/topo\"\n\t\"golang.org\/x\/net\/context\"\n\n\ttopodatapb \"github.com\/youtube\/vitess\/go\/vt\/proto\/topodata\"\n)\n\n\/\/ RebuildShard updates the SrvShard objects and underlying serving graph.\n\/\/\n\/\/ Re-read from TopologyServer to make sure we are using the side\n\/\/ effects of all actions.\n\/\/\n\/\/ This function will start each cell over from the beginning on ErrBadVersion,\n\/\/ so it doesn't need a lock on the shard.\nfunc RebuildShard(ctx context.Context, log logutil.Logger, ts topo.Server, keyspace, shard string, cells []string) (*topo.ShardInfo, error) {\n\tlog.Infof(\"RebuildShard %v\/%v\", keyspace, shard)\n\n\tspan := trace.NewSpanFromContext(ctx)\n\tspan.StartLocal(\"topotools.RebuildShard\")\n\tdefer span.Finish()\n\tctx = trace.NewContext(ctx, span)\n\n\t\/\/ read the existing shard info. It has to exist.\n\tshardInfo, err := ts.GetShard(ctx, keyspace, shard)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ rebuild all cells in parallel\n\twg := sync.WaitGroup{}\n\trec := concurrency.AllErrorRecorder{}\n\tfor _, cell := range shardInfo.Cells {\n\t\t\/\/ skip this cell if we shouldn't rebuild it\n\t\tif !topo.InCellList(cell, cells) {\n\t\t\tcontinue\n\t\t}\n\n\t\twg.Add(1)\n\t\tgo func(cell string) {\n\t\t\tdefer wg.Done()\n\t\t\trec.RecordError(rebuildCellSrvShard(ctx, log, ts, shardInfo, cell))\n\t\t}(cell)\n\t}\n\twg.Wait()\n\n\treturn shardInfo, rec.Error()\n}\n\n\/\/ rebuildCellSrvShard computes and writes the serving graph data to a\n\/\/ single cell\nfunc rebuildCellSrvShard(ctx context.Context, log logutil.Logger, ts topo.Server, si *topo.ShardInfo, cell string) (err error) {\n\tlog.Infof(\"rebuildCellSrvShard %v\/%v in cell %v\", si.Keyspace(), si.ShardName(), cell)\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t}\n\n\t\t\/\/ Read existing EndPoints node versions, so we know if any\n\t\t\/\/ changes sneak in after we read the tablets.\n\t\tversions, err := getEndPointsVersions(ctx, ts, cell, si.Keyspace(), si.ShardName())\n\n\t\t\/\/ Get all tablets in this cell\/shard.\n\t\ttablets, err := ts.GetTabletMapForShardByCell(ctx, si.Keyspace(), si.ShardName(), []string{cell})\n\t\tif err != nil {\n\t\t\tif err != topo.ErrPartialResult {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlog.Warningf(\"Got ErrPartialResult from topo.GetTabletMapForShardByCell(%v), some tablets may not be added properly to serving graph\", cell)\n\t\t}\n\n\t\t\/\/ Build up the serving graph from scratch.\n\t\tserving := make(map[topodatapb.TabletType]*topodatapb.EndPoints)\n\t\tfor _, tablet := range tablets {\n\t\t\t\/\/ Only add serving types.\n\t\t\tif !tablet.IsInServingGraph() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Check the Keyspace and Shard for the tablet are right.\n\t\t\tif tablet.Keyspace != si.Keyspace() || tablet.Shard != si.ShardName() {\n\t\t\t\treturn fmt.Errorf(\"CRITICAL: tablet %v is in replication graph for shard %v\/%v but belongs to shard %v:%v\", tablet.Alias, si.Keyspace(), si.ShardName(), tablet.Keyspace, tablet.Shard)\n\t\t\t}\n\n\t\t\t\/\/ Add the tablet to the list.\n\t\t\tendpoints, ok := serving[tablet.Type]\n\t\t\tif !ok {\n\t\t\t\tendpoints = topo.NewEndPoints()\n\t\t\t\tserving[tablet.Type] = endpoints\n\t\t\t}\n\t\t\tentry, err := topo.TabletEndPoint(tablet.Tablet)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warningf(\"EndPointForTablet failed for tablet %v: %v\", tablet.Alias, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tendpoints.Entries = append(endpoints.Entries, entry)\n\t\t}\n\n\t\twg := sync.WaitGroup{}\n\t\tfatalErrs := concurrency.AllErrorRecorder{}\n\t\tretryErrs := concurrency.AllErrorRecorder{}\n\n\t\t\/\/ Write nodes that should exist.\n\t\tfor tabletType, endpoints := range serving {\n\t\t\twg.Add(1)\n\t\t\tgo func(tabletType topodatapb.TabletType, endpoints *topodatapb.EndPoints) {\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\tlog.Infof(\"saving serving graph for cell %v shard %v\/%v tabletType %v\", cell, si.Keyspace(), si.ShardName(), tabletType)\n\n\t\t\t\tversion, ok := versions[tabletType]\n\t\t\t\tif !ok {\n\t\t\t\t\t\/\/ This type didn't exist when we first checked.\n\t\t\t\t\t\/\/ Try to create, but only if it still doesn't exist.\n\t\t\t\t\tif err := ts.CreateEndPoints(ctx, cell, si.Keyspace(), si.ShardName(), tabletType, endpoints); err != nil {\n\t\t\t\t\t\tlog.Warningf(\"CreateEndPoints(%v, %v, %v) failed during rebuild: %v\", cell, si, tabletType, err)\n\t\t\t\t\t\tswitch err {\n\t\t\t\t\t\tcase topo.ErrNodeExists:\n\t\t\t\t\t\t\tretryErrs.RecordError(err)\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tfatalErrs.RecordError(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ Update only if the version matches.\n\t\t\t\tif err := ts.UpdateEndPoints(ctx, cell, si.Keyspace(), si.ShardName(), tabletType, endpoints, version); err != nil {\n\t\t\t\t\tlog.Warningf(\"UpdateEndPoints(%v, %v, %v) failed during rebuild: %v\", cell, si, tabletType, err)\n\t\t\t\t\tswitch err {\n\t\t\t\t\tcase topo.ErrBadVersion, topo.ErrNoNode:\n\t\t\t\t\t\tretryErrs.RecordError(err)\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tfatalErrs.RecordError(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}(tabletType, endpoints)\n\t\t}\n\n\t\t\/\/ Delete nodes that shouldn't exist.\n\t\tfor tabletType, version := range versions {\n\t\t\tif _, ok := serving[tabletType]; !ok {\n\t\t\t\twg.Add(1)\n\t\t\t\tgo func(tabletType topodatapb.TabletType, version int64) {\n\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\tlog.Infof(\"removing stale db type from serving graph: %v\", tabletType)\n\t\t\t\t\tif err := ts.DeleteEndPoints(ctx, cell, si.Keyspace(), si.ShardName(), tabletType, version); err != nil && err != topo.ErrNoNode {\n\t\t\t\t\t\tlog.Warningf(\"DeleteEndPoints(%v, %v, %v) failed during rebuild: %v\", cell, si, tabletType, err)\n\t\t\t\t\t\tswitch err {\n\t\t\t\t\t\tcase topo.ErrNoNode:\n\t\t\t\t\t\t\t\/\/ Someone else deleted it, which is fine.\n\t\t\t\t\t\tcase topo.ErrBadVersion:\n\t\t\t\t\t\t\tretryErrs.RecordError(err)\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tfatalErrs.RecordError(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}(tabletType, version)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Update srvShard object\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tlog.Infof(\"updating shard serving graph in cell %v for %v\/%v\", cell, si.Keyspace(), si.ShardName())\n\t\t\tif err := UpdateSrvShard(ctx, ts, cell, si); err != nil {\n\t\t\t\tfatalErrs.RecordError(err)\n\t\t\t\tlog.Warningf(\"writing serving data in cell %v for %v\/%v failed: %v\", cell, si.Keyspace(), si.ShardName(), err)\n\t\t\t}\n\t\t}()\n\n\t\twg.Wait()\n\n\t\t\/\/ If there are any fatal errors, give up.\n\t\tif fatalErrs.HasErrors() {\n\t\t\treturn fatalErrs.Error()\n\t\t}\n\t\t\/\/ If there are any retry errors, try again.\n\t\tif retryErrs.HasErrors() {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Otherwise, success!\n\t\treturn nil\n\t}\n}\n\nfunc getEndPointsVersions(ctx context.Context, ts topo.Server, cell, keyspace, shard string) (map[topodatapb.TabletType]int64, error) {\n\t\/\/ Get all existing tablet types.\n\ttabletTypes, err := ts.GetSrvTabletTypesPerShard(ctx, cell, keyspace, shard)\n\tif err != nil {\n\t\tif err == topo.ErrNoNode {\n\t\t\t\/\/ This just means there aren't any EndPoints lists yet.\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\n\t\/\/ Get node versions.\n\twg := sync.WaitGroup{}\n\terrs := concurrency.AllErrorRecorder{}\n\tversions := make(map[topodatapb.TabletType]int64)\n\tmu := sync.Mutex{}\n\n\tfor _, tabletType := range tabletTypes {\n\t\twg.Add(1)\n\t\tgo func(tabletType topodatapb.TabletType) {\n\t\t\tdefer wg.Done()\n\n\t\t\t_, version, err := ts.GetEndPoints(ctx, cell, keyspace, shard, tabletType)\n\t\t\tif err != nil && err != topo.ErrNoNode {\n\t\t\t\terrs.RecordError(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmu.Lock()\n\t\t\tversions[tabletType] = version\n\t\t\tmu.Unlock()\n\t\t}(tabletType)\n\t}\n\n\twg.Wait()\n\treturn versions, errs.Error()\n}\n\nfunc updateEndpoint(ctx context.Context, ts topo.Server, cell, keyspace, shard string, tabletType topodatapb.TabletType, endpoint *topodatapb.EndPoint) error {\n\treturn retryUpdateEndpoints(ctx, ts, cell, keyspace, shard, tabletType, true, \/* create *\/\n\t\tfunc(endpoints *topodatapb.EndPoints) bool {\n\t\t\t\/\/ Look for an existing entry to update.\n\t\t\tfor i := range endpoints.Entries {\n\t\t\t\tif endpoints.Entries[i].Uid == endpoint.Uid {\n\t\t\t\t\tif topo.EndPointEquality(endpoints.Entries[i], endpoint) {\n\t\t\t\t\t\t\/\/ The entry already exists and is the same.\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ Update an existing entry.\n\t\t\t\t\tendpoints.Entries[i] = endpoint\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ The entry doesn't exist, so add it.\n\t\t\tendpoints.Entries = append(endpoints.Entries, endpoint)\n\t\t\treturn true\n\t\t})\n}\n\nfunc removeEndpoint(ctx context.Context, ts topo.Server, cell, keyspace, shard string, tabletType topodatapb.TabletType, tabletUID uint32) error {\n\terr := retryUpdateEndpoints(ctx, ts, cell, keyspace, shard, tabletType, false, \/* create *\/\n\t\tfunc(endpoints *topodatapb.EndPoints) bool {\n\t\t\t\/\/ Make a new list, excluding the given UID.\n\t\t\tentries := make([]*topodatapb.EndPoint, 0, len(endpoints.Entries))\n\t\t\tfor _, ep := range endpoints.Entries {\n\t\t\t\tif ep.Uid != tabletUID {\n\t\t\t\t\tentries = append(entries, ep)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(entries) == len(endpoints.Entries) {\n\t\t\t\t\/\/ Nothing was removed. Don't bother updating.\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t\/\/ Do the update.\n\t\t\tendpoints.Entries = entries\n\t\t\treturn true\n\t\t})\n\n\tif err == topo.ErrNoNode {\n\t\t\/\/ Our goal is to remove one endpoint. If the list is empty, we're fine.\n\t\terr = nil\n\t}\n\treturn err\n}\n\nfunc retryUpdateEndpoints(ctx context.Context, ts topo.Server, cell, keyspace, shard string, tabletType topodatapb.TabletType, create bool, updateFunc func(*topodatapb.EndPoints) bool) error {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t}\n\n\t\t\/\/ Get or create EndPoints list.\n\t\tendpoints, version, err := ts.GetEndPoints(ctx, cell, keyspace, shard, tabletType)\n\t\tif err == topo.ErrNoNode && create {\n\t\t\t\/\/ Create instead of updating.\n\t\t\tendpoints = &topodatapb.EndPoints{}\n\t\t\tif !updateFunc(endpoints) {\n\t\t\t\t\/\/ Nothing changed.\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\terr = ts.CreateEndPoints(ctx, cell, keyspace, shard, tabletType, endpoints)\n\t\t\tif err == topo.ErrNodeExists {\n\t\t\t\t\/\/ Someone else beat us to it. Try again.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ We got an existing EndPoints list. Try to update.\n\t\tif !updateFunc(endpoints) {\n\t\t\t\/\/ Nothing changed.\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ If there's nothing left, we should delete the list entirely.\n\t\tif len(endpoints.Entries) == 0 {\n\t\t\terr = ts.DeleteEndPoints(ctx, cell, keyspace, shard, tabletType, version)\n\t\t\tswitch err {\n\t\t\tcase topo.ErrNoNode:\n\t\t\t\t\/\/ Someone beat us to it, which is fine.\n\t\t\t\treturn nil\n\t\t\tcase topo.ErrBadVersion:\n\t\t\t\t\/\/ Someone else updated the list. Try again.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\terr = ts.UpdateEndPoints(ctx, cell, keyspace, shard, tabletType, endpoints, version)\n\t\tif err == topo.ErrBadVersion || (err == topo.ErrNoNode && create) {\n\t\t\t\/\/ Someone else updated or deleted the list in the meantime. Try again.\n\t\t\tcontinue\n\t\t}\n\t\treturn err\n\t}\n}\n\n\/\/ UpdateTabletEndpoints fixes up any entries in the serving graph that relate\n\/\/ to a given tablet.\nfunc UpdateTabletEndpoints(ctx context.Context, ts topo.Server, tablet *topodatapb.Tablet) (err error) {\n\tsrvTypes, err := ts.GetSrvTabletTypesPerShard(ctx, tablet.Alias.Cell, tablet.Keyspace, tablet.Shard)\n\tif err != nil {\n\t\tif err != topo.ErrNoNode {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ It's fine if there are no existing types.\n\t\tsrvTypes = nil\n\t}\n\n\twg := sync.WaitGroup{}\n\terrs := concurrency.AllErrorRecorder{}\n\n\t\/\/ Update the list that the tablet is supposed to be in (if any).\n\tif topo.IsInServingGraph(tablet.Type) {\n\t\tendpoint, err := topo.TabletEndPoint(tablet)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terrs.RecordError(\n\t\t\t\tupdateEndpoint(ctx, ts, tablet.Alias.Cell, tablet.Keyspace, tablet.Shard,\n\t\t\t\t\ttablet.Type, endpoint))\n\t\t}()\n\t}\n\n\t\/\/ Remove it from any other lists it isn't supposed to be in.\n\tfor _, srvType := range srvTypes {\n\t\tif srvType != tablet.Type {\n\t\t\twg.Add(1)\n\t\t\tgo func(tabletType topodatapb.TabletType) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\terrs.RecordError(\n\t\t\t\t\tremoveEndpoint(ctx, ts, tablet.Alias.Cell, tablet.Keyspace, tablet.Shard,\n\t\t\t\t\t\ttabletType, tablet.Alias.Uid))\n\t\t\t}(srvType)\n\t\t}\n\t}\n\n\twg.Wait()\n\treturn errs.Error()\n}\n\n\/\/ UpdateSrvShard creates the SrvShard object based on the global ShardInfo,\n\/\/ and writes it to the given cell.\nfunc UpdateSrvShard(ctx context.Context, ts topo.Server, cell string, si *topo.ShardInfo) error {\n\tsrvShard := &topodatapb.SrvShard{\n\t\tName: si.ShardName(),\n\t\tKeyRange: si.KeyRange,\n\t}\n\tif si.MasterAlias != nil {\n\t\tsrvShard.MasterCell = si.MasterAlias.Cell\n\t}\n\treturn ts.UpdateSrvShard(ctx, cell, si.Keyspace(), si.ShardName(), srvShard)\n}\n\n\/\/ UpdateAllSrvShards calls UpdateSrvShard for all cells concurrently.\nfunc UpdateAllSrvShards(ctx context.Context, ts topo.Server, si *topo.ShardInfo) error {\n\twg := sync.WaitGroup{}\n\terrs := concurrency.AllErrorRecorder{}\n\n\tfor _, cell := range si.Cells {\n\t\twg.Add(1)\n\t\tgo func(cell string) {\n\t\t\terrs.RecordError(UpdateSrvShard(ctx, ts, cell, si))\n\t\t\twg.Done()\n\t\t}(cell)\n\t}\n\twg.Wait()\n\treturn errs.Error()\n}\n<commit_msg>Remove confusing log messages in serving graph rebuild.<commit_after>\/\/ Copyright 2012, Google Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage topotools\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\t\"github.com\/youtube\/vitess\/go\/trace\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/concurrency\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/logutil\"\n\t\"github.com\/youtube\/vitess\/go\/vt\/topo\"\n\t\"golang.org\/x\/net\/context\"\n\n\ttopodatapb \"github.com\/youtube\/vitess\/go\/vt\/proto\/topodata\"\n)\n\n\/\/ RebuildShard updates the SrvShard objects and underlying serving graph.\n\/\/\n\/\/ Re-read from TopologyServer to make sure we are using the side\n\/\/ effects of all actions.\n\/\/\n\/\/ This function will start each cell over from the beginning on ErrBadVersion,\n\/\/ so it doesn't need a lock on the shard.\nfunc RebuildShard(ctx context.Context, log logutil.Logger, ts topo.Server, keyspace, shard string, cells []string) (*topo.ShardInfo, error) {\n\tlog.Infof(\"RebuildShard %v\/%v\", keyspace, shard)\n\n\tspan := trace.NewSpanFromContext(ctx)\n\tspan.StartLocal(\"topotools.RebuildShard\")\n\tdefer span.Finish()\n\tctx = trace.NewContext(ctx, span)\n\n\t\/\/ read the existing shard info. It has to exist.\n\tshardInfo, err := ts.GetShard(ctx, keyspace, shard)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ rebuild all cells in parallel\n\twg := sync.WaitGroup{}\n\trec := concurrency.AllErrorRecorder{}\n\tfor _, cell := range shardInfo.Cells {\n\t\t\/\/ skip this cell if we shouldn't rebuild it\n\t\tif !topo.InCellList(cell, cells) {\n\t\t\tcontinue\n\t\t}\n\n\t\twg.Add(1)\n\t\tgo func(cell string) {\n\t\t\tdefer wg.Done()\n\t\t\trec.RecordError(rebuildCellSrvShard(ctx, log, ts, shardInfo, cell))\n\t\t}(cell)\n\t}\n\twg.Wait()\n\n\treturn shardInfo, rec.Error()\n}\n\n\/\/ rebuildCellSrvShard computes and writes the serving graph data to a\n\/\/ single cell\nfunc rebuildCellSrvShard(ctx context.Context, log logutil.Logger, ts topo.Server, si *topo.ShardInfo, cell string) (err error) {\n\tlog.Infof(\"rebuildCellSrvShard %v\/%v in cell %v\", si.Keyspace(), si.ShardName(), cell)\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t}\n\n\t\t\/\/ Read existing EndPoints node versions, so we know if any\n\t\t\/\/ changes sneak in after we read the tablets.\n\t\tversions, err := getEndPointsVersions(ctx, ts, cell, si.Keyspace(), si.ShardName())\n\n\t\t\/\/ Get all tablets in this cell\/shard.\n\t\ttablets, err := ts.GetTabletMapForShardByCell(ctx, si.Keyspace(), si.ShardName(), []string{cell})\n\t\tif err != nil {\n\t\t\tif err != topo.ErrPartialResult {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlog.Warningf(\"Got ErrPartialResult from topo.GetTabletMapForShardByCell(%v), some tablets may not be added properly to serving graph\", cell)\n\t\t}\n\n\t\t\/\/ Build up the serving graph from scratch.\n\t\tserving := make(map[topodatapb.TabletType]*topodatapb.EndPoints)\n\t\tfor _, tablet := range tablets {\n\t\t\t\/\/ Only add serving types.\n\t\t\tif !tablet.IsInServingGraph() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Check the Keyspace and Shard for the tablet are right.\n\t\t\tif tablet.Keyspace != si.Keyspace() || tablet.Shard != si.ShardName() {\n\t\t\t\treturn fmt.Errorf(\"CRITICAL: tablet %v is in replication graph for shard %v\/%v but belongs to shard %v:%v\", tablet.Alias, si.Keyspace(), si.ShardName(), tablet.Keyspace, tablet.Shard)\n\t\t\t}\n\n\t\t\t\/\/ Add the tablet to the list.\n\t\t\tendpoints, ok := serving[tablet.Type]\n\t\t\tif !ok {\n\t\t\t\tendpoints = topo.NewEndPoints()\n\t\t\t\tserving[tablet.Type] = endpoints\n\t\t\t}\n\t\t\tentry, err := topo.TabletEndPoint(tablet.Tablet)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warningf(\"TabletEndPoint failed for tablet %v: %v\", tablet.Alias, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tendpoints.Entries = append(endpoints.Entries, entry)\n\t\t}\n\n\t\twg := sync.WaitGroup{}\n\t\tfatalErrs := concurrency.AllErrorRecorder{}\n\t\tretryErrs := concurrency.AllErrorRecorder{}\n\n\t\t\/\/ Write nodes that should exist.\n\t\tfor tabletType, endpoints := range serving {\n\t\t\twg.Add(1)\n\t\t\tgo func(tabletType topodatapb.TabletType, endpoints *topodatapb.EndPoints) {\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\tlog.Infof(\"saving serving graph for cell %v shard %v\/%v tabletType %v\", cell, si.Keyspace(), si.ShardName(), tabletType)\n\n\t\t\t\tversion, ok := versions[tabletType]\n\t\t\t\tif !ok {\n\t\t\t\t\t\/\/ This type didn't exist when we first checked.\n\t\t\t\t\t\/\/ Try to create, but only if it still doesn't exist.\n\t\t\t\t\tif err := ts.CreateEndPoints(ctx, cell, si.Keyspace(), si.ShardName(), tabletType, endpoints); err != nil {\n\t\t\t\t\t\tswitch err {\n\t\t\t\t\t\tcase topo.ErrNodeExists:\n\t\t\t\t\t\t\tretryErrs.RecordError(err)\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tfatalErrs.RecordError(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t\/\/ Update only if the version matches.\n\t\t\t\tif err := ts.UpdateEndPoints(ctx, cell, si.Keyspace(), si.ShardName(), tabletType, endpoints, version); err != nil {\n\t\t\t\t\tswitch err {\n\t\t\t\t\tcase topo.ErrBadVersion, topo.ErrNoNode:\n\t\t\t\t\t\tretryErrs.RecordError(err)\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tfatalErrs.RecordError(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}(tabletType, endpoints)\n\t\t}\n\n\t\t\/\/ Delete nodes that shouldn't exist.\n\t\tfor tabletType, version := range versions {\n\t\t\tif _, ok := serving[tabletType]; !ok {\n\t\t\t\twg.Add(1)\n\t\t\t\tgo func(tabletType topodatapb.TabletType, version int64) {\n\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\tif err := ts.DeleteEndPoints(ctx, cell, si.Keyspace(), si.ShardName(), tabletType, version); err != nil && err != topo.ErrNoNode {\n\t\t\t\t\t\tswitch err {\n\t\t\t\t\t\tcase topo.ErrNoNode:\n\t\t\t\t\t\t\t\/\/ Someone else deleted it, which is fine.\n\t\t\t\t\t\tcase topo.ErrBadVersion:\n\t\t\t\t\t\t\tretryErrs.RecordError(err)\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tfatalErrs.RecordError(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}(tabletType, version)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Update srvShard object\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tlog.Infof(\"updating shard serving graph in cell %v for %v\/%v\", cell, si.Keyspace(), si.ShardName())\n\t\t\tif err := UpdateSrvShard(ctx, ts, cell, si); err != nil {\n\t\t\t\tfatalErrs.RecordError(err)\n\t\t\t\tlog.Warningf(\"writing serving data in cell %v for %v\/%v failed: %v\", cell, si.Keyspace(), si.ShardName(), err)\n\t\t\t}\n\t\t}()\n\n\t\twg.Wait()\n\n\t\t\/\/ If there are any fatal errors, give up.\n\t\tif fatalErrs.HasErrors() {\n\t\t\treturn fatalErrs.Error()\n\t\t}\n\t\t\/\/ If there are any retry errors, try again.\n\t\tif retryErrs.HasErrors() {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Otherwise, success!\n\t\treturn nil\n\t}\n}\n\nfunc getEndPointsVersions(ctx context.Context, ts topo.Server, cell, keyspace, shard string) (map[topodatapb.TabletType]int64, error) {\n\t\/\/ Get all existing tablet types.\n\ttabletTypes, err := ts.GetSrvTabletTypesPerShard(ctx, cell, keyspace, shard)\n\tif err != nil {\n\t\tif err == topo.ErrNoNode {\n\t\t\t\/\/ This just means there aren't any EndPoints lists yet.\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\n\t\/\/ Get node versions.\n\twg := sync.WaitGroup{}\n\terrs := concurrency.AllErrorRecorder{}\n\tversions := make(map[topodatapb.TabletType]int64)\n\tmu := sync.Mutex{}\n\n\tfor _, tabletType := range tabletTypes {\n\t\twg.Add(1)\n\t\tgo func(tabletType topodatapb.TabletType) {\n\t\t\tdefer wg.Done()\n\n\t\t\t_, version, err := ts.GetEndPoints(ctx, cell, keyspace, shard, tabletType)\n\t\t\tif err != nil && err != topo.ErrNoNode {\n\t\t\t\terrs.RecordError(err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tmu.Lock()\n\t\t\tversions[tabletType] = version\n\t\t\tmu.Unlock()\n\t\t}(tabletType)\n\t}\n\n\twg.Wait()\n\treturn versions, errs.Error()\n}\n\nfunc updateEndpoint(ctx context.Context, ts topo.Server, cell, keyspace, shard string, tabletType topodatapb.TabletType, endpoint *topodatapb.EndPoint) error {\n\treturn retryUpdateEndpoints(ctx, ts, cell, keyspace, shard, tabletType, true, \/* create *\/\n\t\tfunc(endpoints *topodatapb.EndPoints) bool {\n\t\t\t\/\/ Look for an existing entry to update.\n\t\t\tfor i := range endpoints.Entries {\n\t\t\t\tif endpoints.Entries[i].Uid == endpoint.Uid {\n\t\t\t\t\tif topo.EndPointEquality(endpoints.Entries[i], endpoint) {\n\t\t\t\t\t\t\/\/ The entry already exists and is the same.\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ Update an existing entry.\n\t\t\t\t\tendpoints.Entries[i] = endpoint\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t\t\/\/ The entry doesn't exist, so add it.\n\t\t\tendpoints.Entries = append(endpoints.Entries, endpoint)\n\t\t\treturn true\n\t\t})\n}\n\nfunc removeEndpoint(ctx context.Context, ts topo.Server, cell, keyspace, shard string, tabletType topodatapb.TabletType, tabletUID uint32) error {\n\terr := retryUpdateEndpoints(ctx, ts, cell, keyspace, shard, tabletType, false, \/* create *\/\n\t\tfunc(endpoints *topodatapb.EndPoints) bool {\n\t\t\t\/\/ Make a new list, excluding the given UID.\n\t\t\tentries := make([]*topodatapb.EndPoint, 0, len(endpoints.Entries))\n\t\t\tfor _, ep := range endpoints.Entries {\n\t\t\t\tif ep.Uid != tabletUID {\n\t\t\t\t\tentries = append(entries, ep)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(entries) == len(endpoints.Entries) {\n\t\t\t\t\/\/ Nothing was removed. Don't bother updating.\n\t\t\t\treturn false\n\t\t\t}\n\t\t\t\/\/ Do the update.\n\t\t\tendpoints.Entries = entries\n\t\t\treturn true\n\t\t})\n\n\tif err == topo.ErrNoNode {\n\t\t\/\/ Our goal is to remove one endpoint. If the list is empty, we're fine.\n\t\terr = nil\n\t}\n\treturn err\n}\n\nfunc retryUpdateEndpoints(ctx context.Context, ts topo.Server, cell, keyspace, shard string, tabletType topodatapb.TabletType, create bool, updateFunc func(*topodatapb.EndPoints) bool) error {\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tdefault:\n\t\t}\n\n\t\t\/\/ Get or create EndPoints list.\n\t\tendpoints, version, err := ts.GetEndPoints(ctx, cell, keyspace, shard, tabletType)\n\t\tif err == topo.ErrNoNode && create {\n\t\t\t\/\/ Create instead of updating.\n\t\t\tendpoints = &topodatapb.EndPoints{}\n\t\t\tif !updateFunc(endpoints) {\n\t\t\t\t\/\/ Nothing changed.\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\terr = ts.CreateEndPoints(ctx, cell, keyspace, shard, tabletType, endpoints)\n\t\t\tif err == topo.ErrNodeExists {\n\t\t\t\t\/\/ Someone else beat us to it. Try again.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ We got an existing EndPoints list. Try to update.\n\t\tif !updateFunc(endpoints) {\n\t\t\t\/\/ Nothing changed.\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ If there's nothing left, we should delete the list entirely.\n\t\tif len(endpoints.Entries) == 0 {\n\t\t\terr = ts.DeleteEndPoints(ctx, cell, keyspace, shard, tabletType, version)\n\t\t\tswitch err {\n\t\t\tcase topo.ErrNoNode:\n\t\t\t\t\/\/ Someone beat us to it, which is fine.\n\t\t\t\treturn nil\n\t\t\tcase topo.ErrBadVersion:\n\t\t\t\t\/\/ Someone else updated the list. Try again.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\terr = ts.UpdateEndPoints(ctx, cell, keyspace, shard, tabletType, endpoints, version)\n\t\tif err == topo.ErrBadVersion || (err == topo.ErrNoNode && create) {\n\t\t\t\/\/ Someone else updated or deleted the list in the meantime. Try again.\n\t\t\tcontinue\n\t\t}\n\t\treturn err\n\t}\n}\n\n\/\/ UpdateTabletEndpoints fixes up any entries in the serving graph that relate\n\/\/ to a given tablet.\nfunc UpdateTabletEndpoints(ctx context.Context, ts topo.Server, tablet *topodatapb.Tablet) (err error) {\n\tsrvTypes, err := ts.GetSrvTabletTypesPerShard(ctx, tablet.Alias.Cell, tablet.Keyspace, tablet.Shard)\n\tif err != nil {\n\t\tif err != topo.ErrNoNode {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ It's fine if there are no existing types.\n\t\tsrvTypes = nil\n\t}\n\n\twg := sync.WaitGroup{}\n\terrs := concurrency.AllErrorRecorder{}\n\n\t\/\/ Update the list that the tablet is supposed to be in (if any).\n\tif topo.IsInServingGraph(tablet.Type) {\n\t\tendpoint, err := topo.TabletEndPoint(tablet)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\terrs.RecordError(\n\t\t\t\tupdateEndpoint(ctx, ts, tablet.Alias.Cell, tablet.Keyspace, tablet.Shard,\n\t\t\t\t\ttablet.Type, endpoint))\n\t\t}()\n\t}\n\n\t\/\/ Remove it from any other lists it isn't supposed to be in.\n\tfor _, srvType := range srvTypes {\n\t\tif srvType != tablet.Type {\n\t\t\twg.Add(1)\n\t\t\tgo func(tabletType topodatapb.TabletType) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\terrs.RecordError(\n\t\t\t\t\tremoveEndpoint(ctx, ts, tablet.Alias.Cell, tablet.Keyspace, tablet.Shard,\n\t\t\t\t\t\ttabletType, tablet.Alias.Uid))\n\t\t\t}(srvType)\n\t\t}\n\t}\n\n\twg.Wait()\n\treturn errs.Error()\n}\n\n\/\/ UpdateSrvShard creates the SrvShard object based on the global ShardInfo,\n\/\/ and writes it to the given cell.\nfunc UpdateSrvShard(ctx context.Context, ts topo.Server, cell string, si *topo.ShardInfo) error {\n\tsrvShard := &topodatapb.SrvShard{\n\t\tName: si.ShardName(),\n\t\tKeyRange: si.KeyRange,\n\t}\n\tif si.MasterAlias != nil {\n\t\tsrvShard.MasterCell = si.MasterAlias.Cell\n\t}\n\treturn ts.UpdateSrvShard(ctx, cell, si.Keyspace(), si.ShardName(), srvShard)\n}\n\n\/\/ UpdateAllSrvShards calls UpdateSrvShard for all cells concurrently.\nfunc UpdateAllSrvShards(ctx context.Context, ts topo.Server, si *topo.ShardInfo) error {\n\twg := sync.WaitGroup{}\n\terrs := concurrency.AllErrorRecorder{}\n\n\tfor _, cell := range si.Cells {\n\t\twg.Add(1)\n\t\tgo func(cell string) {\n\t\t\terrs.RecordError(UpdateSrvShard(ctx, ts, cell, si))\n\t\t\twg.Done()\n\t\t}(cell)\n\t}\n\twg.Wait()\n\treturn errs.Error()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Tests for the behavior of os.File objects on plain old posix file systems,\n\/\/ for use in verifying the intended behavior of memfs.\n\npackage memfs_test\n\nimport (\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"testing\"\n\n\t. \"github.com\/jacobsa\/ogletest\"\n)\n\nfunc TestPosix(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc getFileOffset(f *os.File) (offset int64, err error) {\n\tconst relativeToCurrent = 1\n\toffset, err = f.Seek(0, relativeToCurrent)\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Boilerplate\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype PosixTest struct {\n\t\/\/ A temporary directory.\n\tdir string\n\n\t\/\/ Files to close when tearing down. Nil entries are skipped.\n\ttoClose []io.Closer\n}\n\nvar _ SetUpInterface = &PosixTest{}\nvar _ TearDownInterface = &PosixTest{}\n\nfunc init() { RegisterTestSuite(&PosixTest{}) }\n\nfunc (t *PosixTest) SetUp(ti *TestInfo) {\n\tvar err error\n\n\t\/\/ Create a temporary directory.\n\tt.dir, err = ioutil.TempDir(\"\", \"posix_test\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc (t *PosixTest) TearDown() {\n\t\/\/ Close any files we opened.\n\tfor _, c := range t.toClose {\n\t\tif c == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\terr := c.Close()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\t\/\/ Remove the temporary directory.\n\terr := os.RemoveAll(t.dir)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Test functions\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *PosixTest) WriteOverlapsEndOfFile() {\n\tvar err error\n\tvar n int\n\n\t\/\/ Create a file.\n\tf, err := os.Create(path.Join(t.dir, \"foo\"))\n\tt.toClose = append(t.toClose, f)\n\tAssertEq(nil, err)\n\n\t\/\/ Make it 4 bytes long.\n\terr = f.Truncate(4)\n\tAssertEq(nil, err)\n\n\t\/\/ Write the range [2, 6).\n\tn, err = f.WriteAt([]byte(\"taco\"), 2)\n\tAssertEq(nil, err)\n\tAssertEq(4, n)\n\n\t\/\/ Read the full contents of the file.\n\tcontents, err := ioutil.ReadAll(f)\n\tAssertEq(nil, err)\n\tExpectEq(\"\\x00\\x00taco\", string(contents))\n}\n\nfunc (t *PosixTest) WriteStartsAtEndOfFile() {\n\tvar err error\n\tvar n int\n\n\t\/\/ Create a file.\n\tf, err := os.Create(path.Join(t.dir, \"foo\"))\n\tt.toClose = append(t.toClose, f)\n\tAssertEq(nil, err)\n\n\t\/\/ Make it 2 bytes long.\n\terr = f.Truncate(2)\n\tAssertEq(nil, err)\n\n\t\/\/ Write the range [2, 6).\n\tn, err = f.WriteAt([]byte(\"taco\"), 2)\n\tAssertEq(nil, err)\n\tAssertEq(4, n)\n\n\t\/\/ Read the full contents of the file.\n\tcontents, err := ioutil.ReadAll(f)\n\tAssertEq(nil, err)\n\tExpectEq(\"\\x00\\x00taco\", string(contents))\n}\n\nfunc (t *PosixTest) WriteStartsPastEndOfFile() {\n\tvar err error\n\tvar n int\n\n\t\/\/ Create a file.\n\tf, err := os.Create(path.Join(t.dir, \"foo\"))\n\tt.toClose = append(t.toClose, f)\n\tAssertEq(nil, err)\n\n\t\/\/ Write the range [2, 6).\n\tn, err = f.WriteAt([]byte(\"taco\"), 2)\n\tAssertEq(nil, err)\n\tAssertEq(4, n)\n\n\t\/\/ Read the full contents of the file.\n\tcontents, err := ioutil.ReadAll(f)\n\tAssertEq(nil, err)\n\tExpectEq(\"\\x00\\x00taco\", string(contents))\n}\n\nfunc (t *PosixTest) WriteAtDoesntChangeOffset_NotAppendMode() {\n\tvar err error\n\tvar n int\n\n\t\/\/ Create a file.\n\tf, err := os.Create(path.Join(t.dir, \"foo\"))\n\tt.toClose = append(t.toClose, f)\n\tAssertEq(nil, err)\n\n\t\/\/ Make it 16 bytes long.\n\terr = f.Truncate(16)\n\tAssertEq(nil, err)\n\n\t\/\/ Seek to offset 4.\n\t_, err = f.Seek(4, 0)\n\tAssertEq(nil, err)\n\n\t\/\/ Write the range [10, 14).\n\tn, err = f.WriteAt([]byte(\"taco\"), 2)\n\tAssertEq(nil, err)\n\tAssertEq(4, n)\n\n\t\/\/ We should still be at offset 4.\n\toffset, err := getFileOffset(f)\n\tAssertEq(nil, err)\n\tExpectEq(4, offset)\n}\n\nfunc (t *PosixTest) WriteAtDoesntChangeOffset_AppendMode() {\n\tvar err error\n\tvar n int\n\n\t\/\/ Create a file in append mode.\n\tf, err := os.OpenFile(\n\t\tpath.Join(t.dir, \"foo\"),\n\t\tos.O_RDWR|os.O_APPEND|os.O_CREATE,\n\t\t0600)\n\n\tt.toClose = append(t.toClose, f)\n\tAssertEq(nil, err)\n\n\t\/\/ Make it 16 bytes long.\n\terr = f.Truncate(16)\n\tAssertEq(nil, err)\n\n\t\/\/ Seek to offset 4.\n\t_, err = f.Seek(4, 0)\n\tAssertEq(nil, err)\n\n\t\/\/ Write the range [10, 14).\n\tn, err = f.WriteAt([]byte(\"taco\"), 2)\n\tAssertEq(nil, err)\n\tAssertEq(4, n)\n\n\t\/\/ We should still be at offset 4.\n\toffset, err := getFileOffset(f)\n\tAssertEq(nil, err)\n\tExpectEq(4, offset)\n}\n\nfunc (t *PosixTest) ReadsPastEndOfFile() {\n\tvar err error\n\tvar n int\n\tbuf := make([]byte, 1024)\n\n\t\/\/ Create a file.\n\tf, err := os.Create(path.Join(t.dir, \"foo\"))\n\tt.toClose = append(t.toClose, f)\n\tAssertEq(nil, err)\n\n\t\/\/ Give it some contents.\n\tn, err = f.Write([]byte(\"taco\"))\n\tAssertEq(nil, err)\n\tAssertEq(4, n)\n\n\t\/\/ Read a range overlapping EOF.\n\tn, err = f.ReadAt(buf[:4], 2)\n\tAssertEq(io.EOF, err)\n\tExpectEq(2, n)\n\tExpectEq(\"co\", string(buf[:n]))\n\n\t\/\/ Read a range starting at EOF.\n\tn, err = f.ReadAt(buf[:4], 4)\n\tAssertEq(io.EOF, err)\n\tExpectEq(0, n)\n\tExpectEq(\"\", string(buf[:n]))\n\n\t\/\/ Read a range starting past EOF.\n\tn, err = f.ReadAt(buf[:4], 100)\n\tAssertEq(io.EOF, err)\n\tExpectEq(0, n)\n\tExpectEq(\"\", string(buf[:n]))\n}\n<commit_msg>PosixTest.AppendMode<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Tests for the behavior of os.File objects on plain old posix file systems,\n\/\/ for use in verifying the intended behavior of memfs.\n\npackage memfs_test\n\nimport (\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"testing\"\n\n\t. \"github.com\/jacobsa\/ogletest\"\n)\n\nfunc TestPosix(t *testing.T) { RunTests(t) }\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Helpers\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc getFileOffset(f *os.File) (offset int64, err error) {\n\tconst relativeToCurrent = 1\n\toffset, err = f.Seek(0, relativeToCurrent)\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Boilerplate\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype PosixTest struct {\n\t\/\/ A temporary directory.\n\tdir string\n\n\t\/\/ Files to close when tearing down. Nil entries are skipped.\n\ttoClose []io.Closer\n}\n\nvar _ SetUpInterface = &PosixTest{}\nvar _ TearDownInterface = &PosixTest{}\n\nfunc init() { RegisterTestSuite(&PosixTest{}) }\n\nfunc (t *PosixTest) SetUp(ti *TestInfo) {\n\tvar err error\n\n\t\/\/ Create a temporary directory.\n\tt.dir, err = ioutil.TempDir(\"\", \"posix_test\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc (t *PosixTest) TearDown() {\n\t\/\/ Close any files we opened.\n\tfor _, c := range t.toClose {\n\t\tif c == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\terr := c.Close()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\t\/\/ Remove the temporary directory.\n\terr := os.RemoveAll(t.dir)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Test functions\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (t *PosixTest) WriteOverlapsEndOfFile() {\n\tvar err error\n\tvar n int\n\n\t\/\/ Create a file.\n\tf, err := os.Create(path.Join(t.dir, \"foo\"))\n\tt.toClose = append(t.toClose, f)\n\tAssertEq(nil, err)\n\n\t\/\/ Make it 4 bytes long.\n\terr = f.Truncate(4)\n\tAssertEq(nil, err)\n\n\t\/\/ Write the range [2, 6).\n\tn, err = f.WriteAt([]byte(\"taco\"), 2)\n\tAssertEq(nil, err)\n\tAssertEq(4, n)\n\n\t\/\/ Read the full contents of the file.\n\tcontents, err := ioutil.ReadAll(f)\n\tAssertEq(nil, err)\n\tExpectEq(\"\\x00\\x00taco\", string(contents))\n}\n\nfunc (t *PosixTest) WriteStartsAtEndOfFile() {\n\tvar err error\n\tvar n int\n\n\t\/\/ Create a file.\n\tf, err := os.Create(path.Join(t.dir, \"foo\"))\n\tt.toClose = append(t.toClose, f)\n\tAssertEq(nil, err)\n\n\t\/\/ Make it 2 bytes long.\n\terr = f.Truncate(2)\n\tAssertEq(nil, err)\n\n\t\/\/ Write the range [2, 6).\n\tn, err = f.WriteAt([]byte(\"taco\"), 2)\n\tAssertEq(nil, err)\n\tAssertEq(4, n)\n\n\t\/\/ Read the full contents of the file.\n\tcontents, err := ioutil.ReadAll(f)\n\tAssertEq(nil, err)\n\tExpectEq(\"\\x00\\x00taco\", string(contents))\n}\n\nfunc (t *PosixTest) WriteStartsPastEndOfFile() {\n\tvar err error\n\tvar n int\n\n\t\/\/ Create a file.\n\tf, err := os.Create(path.Join(t.dir, \"foo\"))\n\tt.toClose = append(t.toClose, f)\n\tAssertEq(nil, err)\n\n\t\/\/ Write the range [2, 6).\n\tn, err = f.WriteAt([]byte(\"taco\"), 2)\n\tAssertEq(nil, err)\n\tAssertEq(4, n)\n\n\t\/\/ Read the full contents of the file.\n\tcontents, err := ioutil.ReadAll(f)\n\tAssertEq(nil, err)\n\tExpectEq(\"\\x00\\x00taco\", string(contents))\n}\n\nfunc (t *PosixTest) WriteAtDoesntChangeOffset_NotAppendMode() {\n\tvar err error\n\tvar n int\n\n\t\/\/ Create a file.\n\tf, err := os.Create(path.Join(t.dir, \"foo\"))\n\tt.toClose = append(t.toClose, f)\n\tAssertEq(nil, err)\n\n\t\/\/ Make it 16 bytes long.\n\terr = f.Truncate(16)\n\tAssertEq(nil, err)\n\n\t\/\/ Seek to offset 4.\n\t_, err = f.Seek(4, 0)\n\tAssertEq(nil, err)\n\n\t\/\/ Write the range [10, 14).\n\tn, err = f.WriteAt([]byte(\"taco\"), 2)\n\tAssertEq(nil, err)\n\tAssertEq(4, n)\n\n\t\/\/ We should still be at offset 4.\n\toffset, err := getFileOffset(f)\n\tAssertEq(nil, err)\n\tExpectEq(4, offset)\n}\n\nfunc (t *PosixTest) WriteAtDoesntChangeOffset_AppendMode() {\n\tvar err error\n\tvar n int\n\n\t\/\/ Create a file in append mode.\n\tf, err := os.OpenFile(\n\t\tpath.Join(t.dir, \"foo\"),\n\t\tos.O_RDWR|os.O_APPEND|os.O_CREATE,\n\t\t0600)\n\n\tt.toClose = append(t.toClose, f)\n\tAssertEq(nil, err)\n\n\t\/\/ Make it 16 bytes long.\n\terr = f.Truncate(16)\n\tAssertEq(nil, err)\n\n\t\/\/ Seek to offset 4.\n\t_, err = f.Seek(4, 0)\n\tAssertEq(nil, err)\n\n\t\/\/ Write the range [10, 14).\n\tn, err = f.WriteAt([]byte(\"taco\"), 2)\n\tAssertEq(nil, err)\n\tAssertEq(4, n)\n\n\t\/\/ We should still be at offset 4.\n\toffset, err := getFileOffset(f)\n\tAssertEq(nil, err)\n\tExpectEq(4, offset)\n}\n\nfunc (t *PosixTest) AppendMode() {\n\tvar err error\n\tvar n int\n\tvar off int64\n\tbuf := make([]byte, 1024)\n\n\t\/\/ Create a file with some contents.\n\tfileName := path.Join(t.dir, \"foo\")\n\terr = ioutil.WriteFile(fileName, []byte(\"Jello, \"), 0600)\n\tAssertEq(nil, err)\n\n\t\/\/ Open the file in append mode.\n\tf, err := os.OpenFile(fileName, os.O_RDWR|os.O_APPEND, 0600)\n\tt.toClose = append(t.toClose, f)\n\tAssertEq(nil, err)\n\n\t\/\/ Seek to somewhere silly and then write.\n\toff, err = f.Seek(2, 0)\n\tAssertEq(nil, err)\n\tAssertEq(2, off)\n\n\tn, err = f.Write([]byte(\"world!\"))\n\tAssertEq(nil, err)\n\tAssertEq(6, n)\n\n\t\/\/ The offset should have been updated to point at the end of the file.\n\toff, err = getFileOffset(f)\n\tAssertEq(nil, err)\n\tExpectEq(13, off)\n\n\t\/\/ A random write should still work, without updating the offset.\n\tn, err = f.WriteAt([]byte(\"H\"), 0)\n\tAssertEq(nil, err)\n\tAssertEq(1, n)\n\n\toff, err = getFileOffset(f)\n\tAssertEq(nil, err)\n\tExpectEq(13, off)\n\n\t\/\/ Read back the contents of the file, which should be correct even though we\n\t\/\/ seeked to a silly place before writing the world part.\n\tn, err = f.ReadAt(buf, 0)\n\tAssertEq(io.EOF, err)\n\tExpectEq(\"Hello, world!\", string(buf[:n]))\n}\n\nfunc (t *PosixTest) ReadsPastEndOfFile() {\n\tvar err error\n\tvar n int\n\tbuf := make([]byte, 1024)\n\n\t\/\/ Create a file.\n\tf, err := os.Create(path.Join(t.dir, \"foo\"))\n\tt.toClose = append(t.toClose, f)\n\tAssertEq(nil, err)\n\n\t\/\/ Give it some contents.\n\tn, err = f.Write([]byte(\"taco\"))\n\tAssertEq(nil, err)\n\tAssertEq(4, n)\n\n\t\/\/ Read a range overlapping EOF.\n\tn, err = f.ReadAt(buf[:4], 2)\n\tAssertEq(io.EOF, err)\n\tExpectEq(2, n)\n\tExpectEq(\"co\", string(buf[:n]))\n\n\t\/\/ Read a range starting at EOF.\n\tn, err = f.ReadAt(buf[:4], 4)\n\tAssertEq(io.EOF, err)\n\tExpectEq(0, n)\n\tExpectEq(\"\", string(buf[:n]))\n\n\t\/\/ Read a range starting past EOF.\n\tn, err = f.ReadAt(buf[:4], 100)\n\tAssertEq(io.EOF, err)\n\tExpectEq(0, n)\n\tExpectEq(\"\", string(buf[:n]))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage schedule\n\nimport (\n\t\"fmt\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/pingcap\/pd\/server\/core\"\n)\n\n\/\/ MaxOperatorWaitTime is the duration that if an operator lives longer that it,\n\/\/ the operator is considered timeout.\nconst MaxOperatorWaitTime = 5 * time.Minute\n\n\/\/ OperatorStep describes the basic scheduling steps that can not be subdivided.\ntype OperatorStep interface {\n\tfmt.Stringer\n\tIsFinish(region *core.RegionInfo) bool\n\tInfluence(opInfluence OpInfluence, region *core.RegionInfo)\n}\n\n\/\/ TransferLeader is an OperatorStep that transfers a region's leader.\ntype TransferLeader struct {\n\tFromStore, ToStore uint64\n}\n\nfunc (tl TransferLeader) String() string {\n\treturn fmt.Sprintf(\"transfer leader from store %v to store %v\", tl.FromStore, tl.ToStore)\n}\n\n\/\/ IsFinish checks if current step is finished.\nfunc (tl TransferLeader) IsFinish(region *core.RegionInfo) bool {\n\treturn region.Leader.GetStoreId() == tl.ToStore\n}\n\n\/\/ Influence calculates the store difference that current step make\nfunc (tl TransferLeader) Influence(opInfluence OpInfluence, region *core.RegionInfo) {\n\tfrom := opInfluence.GetStoreInfluence(tl.FromStore)\n\tto := opInfluence.GetStoreInfluence(tl.ToStore)\n\n\tfrom.LeaderSize -= int(region.ApproximateSize)\n\tfrom.LeaderCount--\n\tto.LeaderSize += int(region.ApproximateSize)\n\tto.LeaderCount++\n}\n\n\/\/ AddPeer is an OperatorStep that adds a region peer.\ntype AddPeer struct {\n\tToStore, PeerID uint64\n}\n\nfunc (ap AddPeer) String() string {\n\treturn fmt.Sprintf(\"add peer %v on store %v\", ap.PeerID, ap.ToStore)\n}\n\n\/\/ IsFinish checks if current step is finished.\nfunc (ap AddPeer) IsFinish(region *core.RegionInfo) bool {\n\tif p := region.GetStorePeer(ap.ToStore); p != nil {\n\t\treturn region.GetPendingPeer(p.GetId()) == nil\n\t}\n\treturn false\n}\n\n\/\/ Influence calculates the store difference that current step make\nfunc (ap AddPeer) Influence(opInfluence OpInfluence, region *core.RegionInfo) {\n\tto := opInfluence.GetStoreInfluence(ap.ToStore)\n\n\tto.RegionSize += int(region.ApproximateSize)\n\tto.RegionCount++\n}\n\n\/\/ RemovePeer is an OperatorStep that removes a region peer.\ntype RemovePeer struct {\n\tFromStore uint64\n}\n\nfunc (rp RemovePeer) String() string {\n\treturn fmt.Sprintf(\"remove peer on store %v\", rp.FromStore)\n}\n\n\/\/ IsFinish checks if current step is finished.\nfunc (rp RemovePeer) IsFinish(region *core.RegionInfo) bool {\n\treturn region.GetStorePeer(rp.FromStore) == nil\n}\n\n\/\/ Influence calculates the store difference that current step make\nfunc (rp RemovePeer) Influence(opInfluence OpInfluence, region *core.RegionInfo) {\n\tfrom := opInfluence.GetStoreInfluence(rp.FromStore)\n\n\tfrom.RegionSize -= int(region.ApproximateSize)\n\tfrom.RegionCount--\n}\n\n\/\/ Operator contains execution steps generated by scheduler.\ntype Operator struct {\n\tdesc string\n\tregionID uint64\n\tkind OperatorKind\n\tsteps []OperatorStep\n\tcurrentStep int32\n\tcreateTime time.Time\n\tlevel core.PriorityLevel\n}\n\n\/\/ NewOperator creates a new operator.\nfunc NewOperator(desc string, regionID uint64, kind OperatorKind, steps ...OperatorStep) *Operator {\n\treturn &Operator{\n\t\tdesc: desc,\n\t\tregionID: regionID,\n\t\tkind: kind,\n\t\tsteps: steps,\n\t\tcreateTime: time.Now(),\n\t\tlevel: core.NormalPriority,\n\t}\n}\n\nfunc (o *Operator) String() string {\n\ts := fmt.Sprintf(\"%s (kind:%s, region:%v, createAt:%s, currentStep:%v, steps:%+v) \", o.desc, o.kind, o.regionID, o.createTime, atomic.LoadInt32(&o.currentStep), o.steps)\n\tif o.IsTimeout() {\n\t\ts = s + \"timeout\"\n\t}\n\tif o.IsFinish() {\n\t\ts = s + \"finished\"\n\t}\n\treturn s\n}\n\n\/\/ MarshalJSON serialize custom types to JSON\nfunc (o *Operator) MarshalJSON() ([]byte, error) {\n\treturn []byte(`\"` + o.String() + `\"`), nil\n}\n\n\/\/ Desc returns the operator's short description.\nfunc (o *Operator) Desc() string {\n\treturn o.desc\n}\n\n\/\/ RegionID returns the region that operator is targeted.\nfunc (o *Operator) RegionID() uint64 {\n\treturn o.regionID\n}\n\n\/\/ Kind returns operator's kind.\nfunc (o *Operator) Kind() OperatorKind {\n\treturn o.kind\n}\n\n\/\/ ElapsedTime returns duration since it was created.\nfunc (o *Operator) ElapsedTime() time.Duration {\n\treturn time.Since(o.createTime)\n}\n\n\/\/ Len returns the operator's steps count.\nfunc (o *Operator) Len() int {\n\treturn len(o.steps)\n}\n\n\/\/ Step returns the i-th step.\nfunc (o *Operator) Step(i int) OperatorStep {\n\tif i >= 0 && i < len(o.steps) {\n\t\treturn o.steps[i]\n\t}\n\treturn nil\n}\n\n\/\/ Check checks if current step is finished, returns next step to take action.\n\/\/ It's safe to be called by multiple goroutine concurrently.\nfunc (o *Operator) Check(region *core.RegionInfo) OperatorStep {\n\tfor step := atomic.LoadInt32(&o.currentStep); int(step) < len(o.steps); step++ {\n\t\tif o.steps[int(step)].IsFinish(region) {\n\t\t\tatomic.StoreInt32(&o.currentStep, step+1)\n\t\t} else {\n\t\t\treturn o.steps[int(step)]\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ SetPriorityLevel set the priority level for operator\nfunc (o *Operator) SetPriorityLevel(level core.PriorityLevel) {\n\to.level = level\n}\n\n\/\/ GetPriorityLevel get the priority level\nfunc (o *Operator) GetPriorityLevel() core.PriorityLevel {\n\treturn o.level\n}\n\n\/\/ IsFinish checks if all steps are finished.\nfunc (o *Operator) IsFinish() bool {\n\treturn atomic.LoadInt32(&o.currentStep) >= int32(len(o.steps))\n}\n\n\/\/ IsTimeout checks the operator's create time and determines if it is timeout.\nfunc (o *Operator) IsTimeout() bool {\n\tif o.IsFinish() {\n\t\treturn false\n\t}\n\treturn time.Since(o.createTime) > MaxOperatorWaitTime\n}\n\n\/\/ Influence calculates the store difference which unfinished operator steps make\nfunc (o *Operator) Influence(opInfluence OpInfluence, region *core.RegionInfo) {\n\tfor step := atomic.LoadInt32(&o.currentStep); int(step) < len(o.steps); step++ {\n\t\tif !o.steps[int(step)].IsFinish(region) {\n\t\t\to.steps[int(step)].Influence(opInfluence, region)\n\t\t}\n\t}\n}\n\n\/\/ OperatorHistory is used to log and visualize completed operators.\ntype OperatorHistory struct {\n\tFinishTime time.Time\n\tFrom, To uint64\n\tKind core.ResourceKind\n}\n\n\/\/ History transfers the operator's steps to operator histories.\nfunc (o *Operator) History() []OperatorHistory {\n\tnow := time.Now()\n\tvar histories []OperatorHistory\n\tvar addPeerStores, removePeerStores []uint64\n\tfor _, step := range o.steps {\n\t\tswitch s := step.(type) {\n\t\tcase TransferLeader:\n\t\t\thistories = append(histories, OperatorHistory{\n\t\t\t\tFinishTime: now,\n\t\t\t\tFrom: s.FromStore,\n\t\t\t\tTo: s.ToStore,\n\t\t\t\tKind: core.LeaderKind,\n\t\t\t})\n\t\tcase AddPeer:\n\t\t\taddPeerStores = append(addPeerStores, s.ToStore)\n\t\tcase RemovePeer:\n\t\t\tremovePeerStores = append(removePeerStores, s.FromStore)\n\t\t}\n\t}\n\tfor i := range addPeerStores {\n\t\tif i < len(removePeerStores) {\n\t\t\thistories = append(histories, OperatorHistory{\n\t\t\t\tFinishTime: now,\n\t\t\t\tFrom: removePeerStores[i],\n\t\t\t\tTo: addPeerStores[i],\n\t\t\t\tKind: core.RegionKind,\n\t\t\t})\n\t\t}\n\t}\n\treturn histories\n}\n\n\/\/ CreateRemovePeerOperator creates an Operator that removes a peer from region.\nfunc CreateRemovePeerOperator(desc string, cluster Cluster, kind OperatorKind, region *core.RegionInfo, storeID uint64) *Operator {\n\tremoveKind, steps := removePeerSteps(cluster, region, storeID)\n\treturn NewOperator(desc, region.GetId(), removeKind|kind, steps...)\n}\n\n\/\/ CreateMovePeerOperator creates an Operator that replaces an old peer with a\n\/\/ new peer\nfunc CreateMovePeerOperator(desc string, cluster Cluster, region *core.RegionInfo, kind OperatorKind, oldStore, newStore uint64, peerID uint64) *Operator {\n\tremoveKind, steps := removePeerSteps(cluster, region, oldStore)\n\tsteps = append([]OperatorStep{AddPeer{ToStore: newStore, PeerID: peerID}}, steps...)\n\treturn NewOperator(desc, region.GetId(), removeKind|kind|OpRegion, steps...)\n}\n\n\/\/ removePeerSteps returns the steps to safely remove a peer. It prevents removing leader by transfer its leadership first.\nfunc removePeerSteps(cluster Cluster, region *core.RegionInfo, storeID uint64) (kind OperatorKind, steps []OperatorStep) {\n\tif region.Leader != nil && region.Leader.GetStoreId() == storeID {\n\t\tfor id := range region.GetFollowers() {\n\t\t\tfollower := cluster.GetStore(id)\n\t\t\tif follower != nil && !cluster.CheckLabelProperty(RejectLeader, follower.Labels) {\n\t\t\t\tsteps = append(steps, TransferLeader{FromStore: storeID, ToStore: id})\n\t\t\t\tkind = OpLeader\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tsteps = append(steps, RemovePeer{FromStore: storeID})\n\tkind |= OpRegion\n\treturn\n}\n<commit_msg>schedule: increase operator timeout. (#981)<commit_after>\/\/ Copyright 2016 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage schedule\n\nimport (\n\t\"fmt\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/pingcap\/pd\/server\/core\"\n)\n\n\/\/ MaxOperatorWaitTime is the duration that if an operator lives longer that it,\n\/\/ the operator is considered timeout.\nconst MaxOperatorWaitTime = 10 * time.Minute\n\n\/\/ OperatorStep describes the basic scheduling steps that can not be subdivided.\ntype OperatorStep interface {\n\tfmt.Stringer\n\tIsFinish(region *core.RegionInfo) bool\n\tInfluence(opInfluence OpInfluence, region *core.RegionInfo)\n}\n\n\/\/ TransferLeader is an OperatorStep that transfers a region's leader.\ntype TransferLeader struct {\n\tFromStore, ToStore uint64\n}\n\nfunc (tl TransferLeader) String() string {\n\treturn fmt.Sprintf(\"transfer leader from store %v to store %v\", tl.FromStore, tl.ToStore)\n}\n\n\/\/ IsFinish checks if current step is finished.\nfunc (tl TransferLeader) IsFinish(region *core.RegionInfo) bool {\n\treturn region.Leader.GetStoreId() == tl.ToStore\n}\n\n\/\/ Influence calculates the store difference that current step make\nfunc (tl TransferLeader) Influence(opInfluence OpInfluence, region *core.RegionInfo) {\n\tfrom := opInfluence.GetStoreInfluence(tl.FromStore)\n\tto := opInfluence.GetStoreInfluence(tl.ToStore)\n\n\tfrom.LeaderSize -= int(region.ApproximateSize)\n\tfrom.LeaderCount--\n\tto.LeaderSize += int(region.ApproximateSize)\n\tto.LeaderCount++\n}\n\n\/\/ AddPeer is an OperatorStep that adds a region peer.\ntype AddPeer struct {\n\tToStore, PeerID uint64\n}\n\nfunc (ap AddPeer) String() string {\n\treturn fmt.Sprintf(\"add peer %v on store %v\", ap.PeerID, ap.ToStore)\n}\n\n\/\/ IsFinish checks if current step is finished.\nfunc (ap AddPeer) IsFinish(region *core.RegionInfo) bool {\n\tif p := region.GetStorePeer(ap.ToStore); p != nil {\n\t\treturn region.GetPendingPeer(p.GetId()) == nil\n\t}\n\treturn false\n}\n\n\/\/ Influence calculates the store difference that current step make\nfunc (ap AddPeer) Influence(opInfluence OpInfluence, region *core.RegionInfo) {\n\tto := opInfluence.GetStoreInfluence(ap.ToStore)\n\n\tto.RegionSize += int(region.ApproximateSize)\n\tto.RegionCount++\n}\n\n\/\/ RemovePeer is an OperatorStep that removes a region peer.\ntype RemovePeer struct {\n\tFromStore uint64\n}\n\nfunc (rp RemovePeer) String() string {\n\treturn fmt.Sprintf(\"remove peer on store %v\", rp.FromStore)\n}\n\n\/\/ IsFinish checks if current step is finished.\nfunc (rp RemovePeer) IsFinish(region *core.RegionInfo) bool {\n\treturn region.GetStorePeer(rp.FromStore) == nil\n}\n\n\/\/ Influence calculates the store difference that current step make\nfunc (rp RemovePeer) Influence(opInfluence OpInfluence, region *core.RegionInfo) {\n\tfrom := opInfluence.GetStoreInfluence(rp.FromStore)\n\n\tfrom.RegionSize -= int(region.ApproximateSize)\n\tfrom.RegionCount--\n}\n\n\/\/ Operator contains execution steps generated by scheduler.\ntype Operator struct {\n\tdesc string\n\tregionID uint64\n\tkind OperatorKind\n\tsteps []OperatorStep\n\tcurrentStep int32\n\tcreateTime time.Time\n\tlevel core.PriorityLevel\n}\n\n\/\/ NewOperator creates a new operator.\nfunc NewOperator(desc string, regionID uint64, kind OperatorKind, steps ...OperatorStep) *Operator {\n\treturn &Operator{\n\t\tdesc: desc,\n\t\tregionID: regionID,\n\t\tkind: kind,\n\t\tsteps: steps,\n\t\tcreateTime: time.Now(),\n\t\tlevel: core.NormalPriority,\n\t}\n}\n\nfunc (o *Operator) String() string {\n\ts := fmt.Sprintf(\"%s (kind:%s, region:%v, createAt:%s, currentStep:%v, steps:%+v) \", o.desc, o.kind, o.regionID, o.createTime, atomic.LoadInt32(&o.currentStep), o.steps)\n\tif o.IsTimeout() {\n\t\ts = s + \"timeout\"\n\t}\n\tif o.IsFinish() {\n\t\ts = s + \"finished\"\n\t}\n\treturn s\n}\n\n\/\/ MarshalJSON serialize custom types to JSON\nfunc (o *Operator) MarshalJSON() ([]byte, error) {\n\treturn []byte(`\"` + o.String() + `\"`), nil\n}\n\n\/\/ Desc returns the operator's short description.\nfunc (o *Operator) Desc() string {\n\treturn o.desc\n}\n\n\/\/ RegionID returns the region that operator is targeted.\nfunc (o *Operator) RegionID() uint64 {\n\treturn o.regionID\n}\n\n\/\/ Kind returns operator's kind.\nfunc (o *Operator) Kind() OperatorKind {\n\treturn o.kind\n}\n\n\/\/ ElapsedTime returns duration since it was created.\nfunc (o *Operator) ElapsedTime() time.Duration {\n\treturn time.Since(o.createTime)\n}\n\n\/\/ Len returns the operator's steps count.\nfunc (o *Operator) Len() int {\n\treturn len(o.steps)\n}\n\n\/\/ Step returns the i-th step.\nfunc (o *Operator) Step(i int) OperatorStep {\n\tif i >= 0 && i < len(o.steps) {\n\t\treturn o.steps[i]\n\t}\n\treturn nil\n}\n\n\/\/ Check checks if current step is finished, returns next step to take action.\n\/\/ It's safe to be called by multiple goroutine concurrently.\nfunc (o *Operator) Check(region *core.RegionInfo) OperatorStep {\n\tfor step := atomic.LoadInt32(&o.currentStep); int(step) < len(o.steps); step++ {\n\t\tif o.steps[int(step)].IsFinish(region) {\n\t\t\tatomic.StoreInt32(&o.currentStep, step+1)\n\t\t} else {\n\t\t\treturn o.steps[int(step)]\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ SetPriorityLevel set the priority level for operator\nfunc (o *Operator) SetPriorityLevel(level core.PriorityLevel) {\n\to.level = level\n}\n\n\/\/ GetPriorityLevel get the priority level\nfunc (o *Operator) GetPriorityLevel() core.PriorityLevel {\n\treturn o.level\n}\n\n\/\/ IsFinish checks if all steps are finished.\nfunc (o *Operator) IsFinish() bool {\n\treturn atomic.LoadInt32(&o.currentStep) >= int32(len(o.steps))\n}\n\n\/\/ IsTimeout checks the operator's create time and determines if it is timeout.\nfunc (o *Operator) IsTimeout() bool {\n\tif o.IsFinish() {\n\t\treturn false\n\t}\n\treturn time.Since(o.createTime) > MaxOperatorWaitTime\n}\n\n\/\/ Influence calculates the store difference which unfinished operator steps make\nfunc (o *Operator) Influence(opInfluence OpInfluence, region *core.RegionInfo) {\n\tfor step := atomic.LoadInt32(&o.currentStep); int(step) < len(o.steps); step++ {\n\t\tif !o.steps[int(step)].IsFinish(region) {\n\t\t\to.steps[int(step)].Influence(opInfluence, region)\n\t\t}\n\t}\n}\n\n\/\/ OperatorHistory is used to log and visualize completed operators.\ntype OperatorHistory struct {\n\tFinishTime time.Time\n\tFrom, To uint64\n\tKind core.ResourceKind\n}\n\n\/\/ History transfers the operator's steps to operator histories.\nfunc (o *Operator) History() []OperatorHistory {\n\tnow := time.Now()\n\tvar histories []OperatorHistory\n\tvar addPeerStores, removePeerStores []uint64\n\tfor _, step := range o.steps {\n\t\tswitch s := step.(type) {\n\t\tcase TransferLeader:\n\t\t\thistories = append(histories, OperatorHistory{\n\t\t\t\tFinishTime: now,\n\t\t\t\tFrom: s.FromStore,\n\t\t\t\tTo: s.ToStore,\n\t\t\t\tKind: core.LeaderKind,\n\t\t\t})\n\t\tcase AddPeer:\n\t\t\taddPeerStores = append(addPeerStores, s.ToStore)\n\t\tcase RemovePeer:\n\t\t\tremovePeerStores = append(removePeerStores, s.FromStore)\n\t\t}\n\t}\n\tfor i := range addPeerStores {\n\t\tif i < len(removePeerStores) {\n\t\t\thistories = append(histories, OperatorHistory{\n\t\t\t\tFinishTime: now,\n\t\t\t\tFrom: removePeerStores[i],\n\t\t\t\tTo: addPeerStores[i],\n\t\t\t\tKind: core.RegionKind,\n\t\t\t})\n\t\t}\n\t}\n\treturn histories\n}\n\n\/\/ CreateRemovePeerOperator creates an Operator that removes a peer from region.\nfunc CreateRemovePeerOperator(desc string, cluster Cluster, kind OperatorKind, region *core.RegionInfo, storeID uint64) *Operator {\n\tremoveKind, steps := removePeerSteps(cluster, region, storeID)\n\treturn NewOperator(desc, region.GetId(), removeKind|kind, steps...)\n}\n\n\/\/ CreateMovePeerOperator creates an Operator that replaces an old peer with a\n\/\/ new peer\nfunc CreateMovePeerOperator(desc string, cluster Cluster, region *core.RegionInfo, kind OperatorKind, oldStore, newStore uint64, peerID uint64) *Operator {\n\tremoveKind, steps := removePeerSteps(cluster, region, oldStore)\n\tsteps = append([]OperatorStep{AddPeer{ToStore: newStore, PeerID: peerID}}, steps...)\n\treturn NewOperator(desc, region.GetId(), removeKind|kind|OpRegion, steps...)\n}\n\n\/\/ removePeerSteps returns the steps to safely remove a peer. It prevents removing leader by transfer its leadership first.\nfunc removePeerSteps(cluster Cluster, region *core.RegionInfo, storeID uint64) (kind OperatorKind, steps []OperatorStep) {\n\tif region.Leader != nil && region.Leader.GetStoreId() == storeID {\n\t\tfor id := range region.GetFollowers() {\n\t\t\tfollower := cluster.GetStore(id)\n\t\t\tif follower != nil && !cluster.CheckLabelProperty(RejectLeader, follower.Labels) {\n\t\t\t\tsteps = append(steps, TransferLeader{FromStore: storeID, ToStore: id})\n\t\t\t\tkind = OpLeader\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tsteps = append(steps, RemovePeer{FromStore: storeID})\n\tkind |= OpRegion\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Licensed to the Apache Software Foundation (ASF) under one or more\n * contributor license agreements. See the NOTICE file distributed with\n * this work for additional information regarding copyright ownership.\n * The ASF licenses this file to You under the Apache License, Version 2.0\n * (the \"License\"); you may not use this file except in compliance with\n * the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage rbac\n\nimport (\n\t\"context\"\n\t\"crypto\/rsa\"\n\t\"errors\"\n\t\"github.com\/apache\/servicecomb-service-center\/pkg\/log\"\n\t\"github.com\/apache\/servicecomb-service-center\/pkg\/rbacframe\"\n\t\"github.com\/apache\/servicecomb-service-center\/server\/service\"\n\t\"github.com\/apache\/servicecomb-service-center\/server\/service\/cipher\"\n\t\"github.com\/apache\/servicecomb-service-center\/server\/service\/rbac\/dao\"\n\t\"github.com\/astaxie\/beego\"\n\t\"github.com\/go-chassis\/go-archaius\"\n\t\"github.com\/go-chassis\/go-chassis\/security\/authr\"\n\t\"github.com\/go-chassis\/go-chassis\/security\/secret\"\n\t\"io\/ioutil\"\n)\n\nconst (\n\tRootName = \"root\"\n\tInitPassword = \"SC_INIT_ROOT_PASSWORD\"\n\tPubFilePath = \"rbac_rsa_public_key_file\"\n)\n\nvar (\n\tErrEmptyCurrentPassword = errors.New(\"current password should not be empty\")\n\tErrNoPermChangeAccount = errors.New(\"can not change other account password\")\n\tErrWrongPassword = errors.New(\"current pwd is wrong\")\n\tErrSamePassword = errors.New(\"the password can not be same as old one\")\n)\n\n\/\/Init decide whether enable rbac function and save root account to db\n\/\/ if db has root account, abort creating.\nfunc Init() {\n\tif !Enabled() {\n\t\tlog.Info(\"rbac is disabled\")\n\t\treturn\n\t}\n\terr := authr.Init()\n\tif err != nil {\n\t\tlog.Fatal(\"can not enable auth module\", err)\n\t}\n\taccountExist, err := dao.AccountExist(context.Background(), RootName)\n\tif err != nil {\n\t\tlog.Fatal(\"can not enable auth module\", err)\n\t}\n\tif !accountExist {\n\t\tinitFirstTime(RootName)\n\t}\n\treadPrivateKey()\n\treadPublicKey()\n\trbacframe.Add2WhiteAPIList(\"\/health\", \"\/version\", \"\/v4\/token\")\n\tlog.Info(\"rbac is enabled\")\n}\n\n\/\/readPublicKey read key to memory\nfunc readPrivateKey() {\n\tpf := beego.AppConfig.String(\"rbac_rsa_private_key_file\")\n\t\/\/ 打开文件\n\tdata, err := ioutil.ReadFile(pf)\n\tif err != nil {\n\t\tlog.Fatal(\"can not read private key\", err)\n\t\treturn\n\t}\n\terr = archaius.Set(\"rbac_private_key\", string(data))\n\tif err != nil {\n\t\tlog.Fatal(\"can not init rbac\", err)\n\t}\n\tlog.Info(\"read private key success\")\n}\n\n\/\/readPublicKey read key to memory\nfunc readPublicKey() {\n\tpf := beego.AppConfig.String(PubFilePath)\n\t\/\/ 打开文件\n\tcontent, err := ioutil.ReadFile(pf)\n\tif err != nil {\n\t\tlog.Fatal(\"can not find public key\", err)\n\t\treturn\n\t}\n\terr = archaius.Set(\"rbac_public_key\", string(content))\n\tif err != nil {\n\t\tlog.Fatal(\"\", err)\n\t}\n\tlog.Info(\"read public key success\")\n}\nfunc initFirstTime(admin string) {\n\t\/\/handle root account\n\tpwd := archaius.GetString(InitPassword, \"\")\n\tif pwd == \"\" {\n\t\tlog.Fatal(\"can not enable rbac, password is empty\", nil)\n\t}\n\ta := &rbacframe.Account{\n\t\tName: admin,\n\t\tPassword: pwd,\n\t\tRole: rbacframe.RoleAdmin,\n\t}\n\terr := service.ValidateCreateAccount(a)\n\tif err != nil {\n\t\tlog.Fatal(\"invalid pwd\", err)\n\t\treturn\n\t}\n\tif err := dao.CreateAccount(context.Background(), a); err != nil {\n\t\tif err == dao.ErrDuplicated {\n\t\t\tlog.Info(\"rbac is enabled\")\n\t\t\treturn\n\t\t}\n\t\tlog.Fatal(\"can not enable rbac, init root account failed\", err)\n\t}\n\tlog.Info(\"root account init success\")\n}\n\nfunc Enabled() bool {\n\treturn beego.AppConfig.DefaultBool(\"rbac_enabled\", false)\n}\n\n\/\/PublicKey get public key to verify a token\nfunc PublicKey() string {\n\treturn archaius.GetString(\"rbac_public_key\", \"\")\n}\n\n\/\/privateKey get decrypted private key to verify a token\nfunc privateKey() (string, error) {\n\tep := archaius.GetString(\"rbac_private_key\", \"\")\n\tp, err := cipher.Decrypt(ep)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn p, nil\n}\n\n\/\/GetPrivateKey return rsa key instance\nfunc GetPrivateKey() (*rsa.PrivateKey, error) {\n\tsk, err := privateKey()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tp, err := secret.ParseRSAPrivateKey(sk)\n\tif err != nil {\n\t\tlog.Error(\"can not get key:\", err)\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}\n<commit_msg>use plain content when private key decrypted failed (#663)<commit_after>\/*\n * Licensed to the Apache Software Foundation (ASF) under one or more\n * contributor license agreements. See the NOTICE file distributed with\n * this work for additional information regarding copyright ownership.\n * The ASF licenses this file to You under the Apache License, Version 2.0\n * (the \"License\"); you may not use this file except in compliance with\n * the License. You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage rbac\n\nimport (\n\t\"context\"\n\t\"crypto\/rsa\"\n\t\"errors\"\n\t\"github.com\/apache\/servicecomb-service-center\/pkg\/log\"\n\t\"github.com\/apache\/servicecomb-service-center\/pkg\/rbacframe\"\n\t\"github.com\/apache\/servicecomb-service-center\/server\/service\"\n\t\"github.com\/apache\/servicecomb-service-center\/server\/service\/cipher\"\n\t\"github.com\/apache\/servicecomb-service-center\/server\/service\/rbac\/dao\"\n\t\"github.com\/astaxie\/beego\"\n\t\"github.com\/go-chassis\/go-archaius\"\n\t\"github.com\/go-chassis\/go-chassis\/security\/authr\"\n\t\"github.com\/go-chassis\/go-chassis\/security\/secret\"\n\t\"io\/ioutil\"\n)\n\nconst (\n\tRootName = \"root\"\n\tInitPassword = \"SC_INIT_ROOT_PASSWORD\"\n\tPubFilePath = \"rbac_rsa_public_key_file\"\n)\n\nvar (\n\tErrEmptyCurrentPassword = errors.New(\"current password should not be empty\")\n\tErrNoPermChangeAccount = errors.New(\"can not change other account password\")\n\tErrWrongPassword = errors.New(\"current pwd is wrong\")\n\tErrSamePassword = errors.New(\"the password can not be same as old one\")\n)\n\n\/\/Init decide whether enable rbac function and save root account to db\n\/\/ if db has root account, abort creating.\nfunc Init() {\n\tif !Enabled() {\n\t\tlog.Info(\"rbac is disabled\")\n\t\treturn\n\t}\n\terr := authr.Init()\n\tif err != nil {\n\t\tlog.Fatal(\"can not enable auth module\", err)\n\t}\n\taccountExist, err := dao.AccountExist(context.Background(), RootName)\n\tif err != nil {\n\t\tlog.Fatal(\"can not enable auth module\", err)\n\t}\n\tif !accountExist {\n\t\tinitFirstTime(RootName)\n\t}\n\treadPrivateKey()\n\treadPublicKey()\n\trbacframe.Add2WhiteAPIList(\"\/health\", \"\/version\", \"\/v4\/token\")\n\tlog.Info(\"rbac is enabled\")\n}\n\n\/\/readPublicKey read key to memory\nfunc readPrivateKey() {\n\tpf := beego.AppConfig.String(\"rbac_rsa_private_key_file\")\n\t\/\/ 打开文件\n\tdata, err := ioutil.ReadFile(pf)\n\tif err != nil {\n\t\tlog.Fatal(\"can not read private key\", err)\n\t\treturn\n\t}\n\terr = archaius.Set(\"rbac_private_key\", string(data))\n\tif err != nil {\n\t\tlog.Fatal(\"can not init rbac\", err)\n\t}\n\tlog.Info(\"read private key success\")\n}\n\n\/\/readPublicKey read key to memory\nfunc readPublicKey() {\n\tpf := beego.AppConfig.String(PubFilePath)\n\t\/\/ 打开文件\n\tcontent, err := ioutil.ReadFile(pf)\n\tif err != nil {\n\t\tlog.Fatal(\"can not find public key\", err)\n\t\treturn\n\t}\n\terr = archaius.Set(\"rbac_public_key\", string(content))\n\tif err != nil {\n\t\tlog.Fatal(\"\", err)\n\t}\n\tlog.Info(\"read public key success\")\n}\nfunc initFirstTime(admin string) {\n\t\/\/handle root account\n\tpwd := archaius.GetString(InitPassword, \"\")\n\tif pwd == \"\" {\n\t\tlog.Fatal(\"can not enable rbac, password is empty\", nil)\n\t}\n\ta := &rbacframe.Account{\n\t\tName: admin,\n\t\tPassword: pwd,\n\t\tRole: rbacframe.RoleAdmin,\n\t}\n\terr := service.ValidateCreateAccount(a)\n\tif err != nil {\n\t\tlog.Fatal(\"invalid pwd\", err)\n\t\treturn\n\t}\n\tif err := dao.CreateAccount(context.Background(), a); err != nil {\n\t\tif err == dao.ErrDuplicated {\n\t\t\tlog.Info(\"rbac is enabled\")\n\t\t\treturn\n\t\t}\n\t\tlog.Fatal(\"can not enable rbac, init root account failed\", err)\n\t}\n\tlog.Info(\"root account init success\")\n}\n\nfunc Enabled() bool {\n\treturn beego.AppConfig.DefaultBool(\"rbac_enabled\", false)\n}\n\n\/\/PublicKey get public key to verify a token\nfunc PublicKey() string {\n\treturn archaius.GetString(\"rbac_public_key\", \"\")\n}\n\n\/\/privateKey get decrypted private key to verify a token\nfunc privateKey() string {\n\tep := archaius.GetString(\"rbac_private_key\", \"\")\n\tp, err := cipher.Decrypt(ep)\n\tif err != nil {\n\t\treturn ep\n\t}\n\treturn p\n}\n\n\/\/GetPrivateKey return rsa key instance\nfunc GetPrivateKey() (*rsa.PrivateKey, error) {\n\tsk := privateKey()\n\tp, err := secret.ParseRSAPrivateKey(sk)\n\tif err != nil {\n\t\tlog.Error(\"can not get key:\", err)\n\t\treturn nil, err\n\t}\n\treturn p, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package util\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"time\"\n)\n\n\/\/ GetISO8601TimeStamp gets timestamp string in ISO8601 format\nfunc GetISO8601TimeStamp(ts time.Time) string {\n\tt := ts.UTC()\n\treturn fmt.Sprintf(\"%04d-%02d-%02dT%02d:%02d:%02dZ\", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second())\n}\n\nconst FormatISO8601 = \"2006-01-02T15:04:05Z\"\nconst jsonFormatISO8601 = `\"` + FormatISO8601 + `\"`\nconst formatISO8601withoutSeconds = \"2006-01-02T15:04Z\"\nconst jsonFormatISO8601withoutSeconds = `\"` + formatISO8601withoutSeconds + `\"`\n\n\/\/ A ISO6801Time represents a time in ISO8601 format\ntype ISO6801Time time.Time\n\n\/\/ New constructs a new iso8601.Time instance from an existing\n\/\/ time.Time instance. This causes the nanosecond field to be set to\n\/\/ 0, and its time zone set to a fixed zone with no offset from UTC\n\/\/ (but it is *not* UTC itself).\nfunc NewISO6801Time(t time.Time) ISO6801Time {\n\treturn ISO6801Time(time.Date(\n\t\tt.Year(),\n\t\tt.Month(),\n\t\tt.Day(),\n\t\tt.Hour(),\n\t\tt.Minute(),\n\t\tt.Second(),\n\t\t0,\n\t\ttime.UTC,\n\t))\n}\n\n\/\/ IsDefault checks if the time is default\nfunc (it *ISO6801Time) IsDefault() bool {\n\treturn *it == ISO6801Time{}\n}\n\n\/\/ MarshalJSON serializes the ISO6801Time into JSON string\nfunc (it ISO6801Time) MarshalJSON() ([]byte, error) {\n\treturn []byte(time.Time(it).Format(jsonFormatISO8601)), nil\n}\n\n\/\/ UnmarshalJSON deserializes the ISO6801Time from JSON string\nfunc (it *ISO6801Time) UnmarshalJSON(data []byte) error {\n\tstr := string(data)\n\n\tif str == \"\\\"\\\"\" || len(data) == 0 {\n\t\treturn nil\n\t}\n\tvar t time.Time\n\tvar err error\n\tif str[0] == '\"' {\n\t\tt, err = time.ParseInLocation(jsonFormatISO8601, str, time.UTC)\n\t\tif err != nil {\n\t\t\tt, err = time.ParseInLocation(jsonFormatISO8601withoutSeconds, str, time.UTC)\n\t\t}\n\t} else {\n\t\tvar i int64\n\t\ti, err = strconv.ParseInt(str, 10, 64)\n\t\tif err == nil {\n\t\t\tt = time.Unix(i \/ 1000, i % 1000)\n\t\t}\n\t}\n\tif err == nil {\n\t\t*it = ISO6801Time(t)\n\t}\n\treturn err\n}\n\/\/ String returns the time in ISO6801Time format\nfunc (it ISO6801Time) String() string {\n\treturn time.Time(it).Format(formatISO8601)\n}<commit_msg>FormatISO8601<commit_after>package util\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"time\"\n)\n\n\/\/ GetISO8601TimeStamp gets timestamp string in ISO8601 format\nfunc GetISO8601TimeStamp(ts time.Time) string {\n\tt := ts.UTC()\n\treturn fmt.Sprintf(\"%04d-%02d-%02dT%02d:%02d:%02dZ\", t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second())\n}\n\nconst FormatISO8601 = \"2006-01-02T15:04:05Z\"\nconst jsonFormatISO8601 = `\"` + FormatISO8601 + `\"`\nconst formatISO8601withoutSeconds = \"2006-01-02T15:04Z\"\nconst jsonFormatISO8601withoutSeconds = `\"` + formatISO8601withoutSeconds + `\"`\n\n\/\/ A ISO6801Time represents a time in ISO8601 format\ntype ISO6801Time time.Time\n\n\/\/ New constructs a new iso8601.Time instance from an existing\n\/\/ time.Time instance. This causes the nanosecond field to be set to\n\/\/ 0, and its time zone set to a fixed zone with no offset from UTC\n\/\/ (but it is *not* UTC itself).\nfunc NewISO6801Time(t time.Time) ISO6801Time {\n\treturn ISO6801Time(time.Date(\n\t\tt.Year(),\n\t\tt.Month(),\n\t\tt.Day(),\n\t\tt.Hour(),\n\t\tt.Minute(),\n\t\tt.Second(),\n\t\t0,\n\t\ttime.UTC,\n\t))\n}\n\n\/\/ IsDefault checks if the time is default\nfunc (it *ISO6801Time) IsDefault() bool {\n\treturn *it == ISO6801Time{}\n}\n\n\/\/ MarshalJSON serializes the ISO6801Time into JSON string\nfunc (it ISO6801Time) MarshalJSON() ([]byte, error) {\n\treturn []byte(time.Time(it).Format(jsonFormatISO8601)), nil\n}\n\n\/\/ UnmarshalJSON deserializes the ISO6801Time from JSON string\nfunc (it *ISO6801Time) UnmarshalJSON(data []byte) error {\n\tstr := string(data)\n\n\tif str == \"\\\"\\\"\" || len(data) == 0 {\n\t\treturn nil\n\t}\n\tvar t time.Time\n\tvar err error\n\tif str[0] == '\"' {\n\t\tt, err = time.ParseInLocation(jsonFormatISO8601, str, time.UTC)\n\t\tif err != nil {\n\t\t\tt, err = time.ParseInLocation(jsonFormatISO8601withoutSeconds, str, time.UTC)\n\t\t}\n\t} else {\n\t\tvar i int64\n\t\ti, err = strconv.ParseInt(str, 10, 64)\n\t\tif err == nil {\n\t\t\tt = time.Unix(i \/ 1000, i % 1000)\n\t\t}\n\t}\n\tif err == nil {\n\t\t*it = ISO6801Time(t)\n\t}\n\treturn err\n}\n\/\/ String returns the time in ISO6801Time format\nfunc (it ISO6801Time) String() string {\n\treturn time.Time(it).Format(FormatISO8601)\n}<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/otoolep\/rqlite\/server\"\n)\n\nvar spinUpDelay = time.Duration(2 * time.Second)\n\ntype testServer struct {\n\thost string\n\tport int\n\tserver *server.Server\n}\n\nfunc (t *testServer) URL() string {\n\treturn fmt.Sprintf(\"http:\/\/%s:%d\/db\", t.host, t.port)\n}\n\nfunc doPost(t *testing.T, url, body string) {\n\tresp, err := http.Post(url, \"text\/plain\", bytes.NewReader([]byte(body)))\n\tif err != nil {\n\t\tt.Fatalf(\"HTTP request failed: %s\", err.Error())\n\t}\n\tif resp.StatusCode != 200 {\n\t\tt.Fatalf(\"bad status code returned: %d\", resp.StatusCode)\n\t}\n}\n\nfunc doGet(t *testing.T, URL, query string) string {\n\tv, _ := url.Parse(URL)\n\tv.RawQuery = url.Values{\"q\": []string{query}}.Encode()\n\n\tresp, err := http.Get(v.String())\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to execute query '%s': %s\", query, err.Error())\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't read body of response: %s\", err.Error())\n\t}\n\treturn string(body)\n}\n\n\/\/ createCluster creates a cluster, numNodes in size, using path for storage\n\/\/ for all nodes. The first node in the returned slice of nodes will be the\n\/\/ cluster leader.\nfunc createCluster(t *testing.T, numNodes int, host string, basePort int, path string) []*testServer {\n\tvar nodes []*testServer\n\n\t\/\/ Create first differently, since it is the leader.\n\tnodePath := filepath.Join(path, \"0\")\n\tmustMkDirAll(nodePath)\n\ts := server.NewServer(nodePath, \"db.sqlite\", 100000, host, basePort)\n\tgo func() {\n\t\tt.Fatal(s.ListenAndServe(\"\"))\n\t}()\n\tnodes = append(nodes, &testServer{host: host, port: basePort, server: s})\n\ttime.Sleep(spinUpDelay)\n\n\t\/\/ Create remaining nodes in cluster.\n\tfor i := 1; i < numNodes; i++ {\n\t\tport := basePort + i\n\t\tnodePath := filepath.Join(path, strconv.Itoa(i))\n\t\tmustMkDirAll(nodePath)\n\n\t\ts := server.NewServer(nodePath, \"db.sqlite\", 100000, host, port)\n\t\tgo func() {\n\t\t\tt.Fatal(s.ListenAndServe(host + \":\" + strconv.Itoa(basePort)))\n\t\t}()\n\t\tnodes = append(nodes, &testServer{host: host, port: port, server: s})\n\t\ttime.Sleep(spinUpDelay)\n\t}\n\n\treturn nodes\n}\n\n\/\/ writeCluster POSTs the body against every node in the given cluster. Testing fails\n\/\/ if the POST does not succeed (HTTP 200)\nfunc writeCluster(t *testing.T, nodes []*testServer, body string) {\n\tfor _, n := range nodes {\n\t\tdoPost(t, n.URL(), body)\n\t}\n}\n\n\/\/ queryCluster performs the given query against each node in the given cluster.\n\/\/ Testing fails if the response returned from any node does not match the expected\n\/\/ string.\nfunc queryCluster(t *testing.T, nodes []*testServer, query, expected string) {\n\n}\n\nfunc runTests(t *testing.T, nodes []*testServer) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping for short testing\")\n\t}\n\ttests := []struct {\n\t\tname string \/\/ Name of test, printed during testing.\n\t\twrite string \/\/ The query to POST.\n\t\tquery string \/\/ The query to GET. Ignored if post is set.\n\t\texpected string \/\/ Expected response, as a string. Ignored if not set.\n\t}{\n\t\t{\n\t\t\tname: \"create table\",\n\t\t\twrite: \"CREATE TABLE foo (id integer not null primary key, name text)\",\n\t\t},\n\t\t{\n\t\t\tname: \"select from empty table\",\n\t\t\tquery: \"SELECT * FROM foo\",\n\t\t},\n\t\t{\n\t\t\tname: \"insert one record\",\n\t\t\tquery: `INSERT INTO foo(name) VALUES(\"fiona\")`,\n\t\t},\n\t\t{\n\t\t\tname: \"select after 1 record inserted\",\n\t\t\tquery: \"SELECT * FROM foo\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tif tt.write != \"\" {\n\t\t\tt.Logf(\"Executing '%s'\", tt.write)\n\t\t\twriteCluster(t, nodes[:1], tt.write)\n\t\t} else {\n\t\t\tt.Logf(\"Executing '%s'\", tt.query)\n\t\t\tqueryCluster(t, nodes, tt.query, tt.expected)\n\t\t}\n\t}\n}\n\nfunc TestOneNode_Test(t *testing.T) {\n\tpath := tempfile()\n\tdefer os.RemoveAll(path)\n\n\tnodes := createCluster(t, 1, \"localhost\", 8000, path)\n\tt.Logf(\"1 node cluster created in %s\", path)\n\n\trunTests(t, nodes)\n}\n\nfunc TestThreeNode_Test(t *testing.T) {\n\tt.Skip()\n\tpath := tempfile()\n\tdefer os.RemoveAll(path)\n\n\tnodes := createCluster(t, 3, \"localhost\", 8100, path)\n\tt.Logf(\"3 node cluster created in %s\", path)\n\n\trunTests(t, nodes)\n}\n\n\/\/ tempfile returns a temporary path.\nfunc tempfile() string {\n\tf, _ := ioutil.TempFile(\"\", \"rqlite_\")\n\tpath := f.Name()\n\tf.Close()\n\tos.Remove(path)\n\treturn path\n}\n\n\/\/ mustMkDirAll makes the requested directory, including parents, or panics.\nfunc mustMkDirAll(path string) {\n\tif err := os.MkdirAll(path, 0755); err != nil {\n\t\tpanic(fmt.Sprintf(\"unable to create directory at %s\", path))\n\t}\n}\n<commit_msg>Actually check query response<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/otoolep\/rqlite\/server\"\n)\n\nvar spinUpDelay = time.Duration(2 * time.Second)\n\ntype testServer struct {\n\thost string\n\tport int\n\tserver *server.Server\n}\n\nfunc (t *testServer) URL() string {\n\treturn fmt.Sprintf(\"http:\/\/%s:%d\/db\", t.host, t.port)\n}\n\nfunc doPost(t *testing.T, url, body string) {\n\tresp, err := http.Post(url, \"text\/plain\", bytes.NewReader([]byte(body)))\n\tif err != nil {\n\t\tt.Fatalf(\"HTTP request failed: %s\", err.Error())\n\t}\n\tif resp.StatusCode != 200 {\n\t\tt.Fatalf(\"bad status code returned: %d\", resp.StatusCode)\n\t}\n}\n\nfunc doGet(t *testing.T, URL, query string) string {\n\tv, _ := url.Parse(URL)\n\tv.RawQuery = url.Values{\"q\": []string{query}}.Encode()\n\n\tresp, err := http.Get(v.String())\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to execute query '%s': %s\", query, err.Error())\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Fatalf(\"Couldn't read body of response: %s\", err.Error())\n\t}\n\treturn string(body)\n}\n\n\/\/ createCluster creates a cluster, numNodes in size, using path for storage\n\/\/ for all nodes. The first node in the returned slice of nodes will be the\n\/\/ cluster leader.\nfunc createCluster(t *testing.T, numNodes int, host string, basePort int, path string) []*testServer {\n\tvar nodes []*testServer\n\n\t\/\/ Create first differently, since it is the leader.\n\tnodePath := filepath.Join(path, \"0\")\n\tmustMkDirAll(nodePath)\n\ts := server.NewServer(nodePath, \"db.sqlite\", 100000, host, basePort)\n\tgo func() {\n\t\tt.Fatal(s.ListenAndServe(\"\"))\n\t}()\n\tnodes = append(nodes, &testServer{host: host, port: basePort, server: s})\n\ttime.Sleep(spinUpDelay)\n\n\t\/\/ Create remaining nodes in cluster.\n\tfor i := 1; i < numNodes; i++ {\n\t\tport := basePort + i\n\t\tnodePath := filepath.Join(path, strconv.Itoa(i))\n\t\tmustMkDirAll(nodePath)\n\n\t\ts := server.NewServer(nodePath, \"db.sqlite\", 100000, host, port)\n\t\tgo func() {\n\t\t\tt.Fatal(s.ListenAndServe(host + \":\" + strconv.Itoa(basePort)))\n\t\t}()\n\t\tnodes = append(nodes, &testServer{host: host, port: port, server: s})\n\t\ttime.Sleep(spinUpDelay)\n\t}\n\n\treturn nodes\n}\n\n\/\/ writeCluster POSTs the body against every node in the given cluster. Testing fails\n\/\/ if the POST does not succeed (HTTP 200)\nfunc writeCluster(t *testing.T, nodes []*testServer, body string) {\n\tfor _, n := range nodes {\n\t\tdoPost(t, n.URL(), body)\n\t}\n}\n\n\/\/ queryCluster performs the given query against each node in the given cluster.\n\/\/ Testing fails if the response returned from any node does not match the expected\n\/\/ string after 10 attempts, waiting 200 milliseconds between each fetch. These\n\/\/ delays ensure consensus is reached.\nfunc queryCluster(t *testing.T, nodes []*testServer, query, expected string) {\n\tfor _, n := range nodes {\n\t\tnumAttempts := 0\n\n\t\tfor {\n\t\t\tnumAttempts++\n\t\t\te := doGet(t, n.URL(), query)\n\t\t\tif expected != \"\" && e != expected {\n\t\t\t\tif numAttempts > 10 {\n\t\t\t\t\tt.Errorf(\"'%s' failed.\\ngot: %s\\nexp:%s\\n\", query, e, expected)\n\t\t\t\t} else {\n\t\t\t\t\ttime.Sleep(200 * time.Millisecond)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc runTests(t *testing.T, nodes []*testServer) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping for short testing\")\n\t}\n\ttests := []struct {\n\t\tname string \/\/ Name of test, printed during testing.\n\t\twrite string \/\/ The query to POST.\n\t\tquery string \/\/ The query to GET. Ignored if post is set.\n\t\texpected string \/\/ Expected response, as a string. Ignored if not set.\n\t}{\n\t\t{\n\t\t\tname: \"create table\",\n\t\t\twrite: \"CREATE TABLE foo (id integer not null primary key, name text)\",\n\t\t},\n\t\t{\n\t\t\tname: \"select from empty table\",\n\t\t\tquery: \"SELECT * FROM foo\",\n\t\t\texpected: `{\"failures\":[],\"rows\":[]}`,\n\t\t},\n\t\t{\n\t\t\tname: \"insert one record\",\n\t\t\tquery: `INSERT INTO foo(name) VALUES(\"fiona\")`,\n\t\t},\n\t\t{\n\t\t\tname: \"select after 1 record inserted\",\n\t\t\tquery: \"SELECT * FROM foo\",\n\t\t\texpected: `{\"failures\":[],\"rows\":[{\"id\":\"1\",\"name\":\"fiona\"}]}`,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tif tt.write != \"\" {\n\t\t\tt.Logf(\"Executing '%s'\", tt.write)\n\t\t\twriteCluster(t, nodes[:1], tt.write)\n\t\t} else {\n\t\t\tt.Logf(\"Executing '%s'\", tt.query)\n\t\t\tqueryCluster(t, nodes, tt.query, tt.expected)\n\t\t}\n\t}\n}\n\nfunc TestOneNode_Test(t *testing.T) {\n\tpath := tempfile()\n\tdefer os.RemoveAll(path)\n\n\tnodes := createCluster(t, 1, \"localhost\", 8000, path)\n\tt.Logf(\"1 node cluster created in %s\", path)\n\n\trunTests(t, nodes)\n}\n\nfunc TestThreeNode_Test(t *testing.T) {\n\tt.Skip()\n\tpath := tempfile()\n\tdefer os.RemoveAll(path)\n\n\tnodes := createCluster(t, 3, \"localhost\", 8100, path)\n\tt.Logf(\"3 node cluster created in %s\", path)\n\n\trunTests(t, nodes)\n}\n\n\/\/ tempfile returns a temporary path.\nfunc tempfile() string {\n\tf, _ := ioutil.TempFile(\"\", \"rqlite_\")\n\tpath := f.Name()\n\tf.Close()\n\tos.Remove(path)\n\treturn path\n}\n\n\/\/ mustMkDirAll makes the requested directory, including parents, or panics.\nfunc mustMkDirAll(path string) {\n\tif err := os.MkdirAll(path, 0755); err != nil {\n\t\tpanic(fmt.Sprintf(\"unable to create directory at %s\", path))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>fscrypt: Use constant protector name<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Package table provides the main interfaces used to manipulate tabular data.\n\/\/ To understand why we need interfaces to process tabular data, it is useful to introduce\n\/\/ the concepts of the physical and the logical representation of data.\n\/\/\n\/\/ The physical representation of data refers to the representation of data as text on disk,\n\/\/ for example, in a CSV, JSON or XML file. This representation may have some type information (JSON,\n\/\/ where the primitive types that JSON supports can be used) or not (CSV, where all data is\n\/\/ represented in string form). In this project, those are going to be presented as packages that\n\/\/ provide structs which implement those interfaces. For instance, csv.NewTable creates a Table\n\/\/ which is backed up by a CSV.\n\/\/\n\/\/ The logical representation of data refers to the \"ideal\" representation of the data in terms of\n\/\/ primitive types, data structures, and relations, all as defined by the specification. We could say\n\/\/ that the specification is about the logical representation of data. That said, functions\n\/\/ exported for data processing should deal with logic representations. That functionality\n\/\/ is represented by interfaces in this package.\npackage table\n\n\/\/ Table provides functionality to iterate and write tabular data. This is the logical\n\/\/ representation and is meant to be encoding\/format agnostic.\ntype Table interface {\n\t\/\/ Headers returns the headers of the tabular data.\n\tHeaders() []string\n\n\t\/\/ Iter provides a convenient way to iterate over table's data.\n\t\/\/ The iteration process always start at the beginning of the table and\n\t\/\/ is backed by a new reading.\n\tIter() (Iterator, error)\n\n\t\/\/ ReadAll reads all rows from the table and return it as strings.\n\tReadAll() ([][]string, error)\n}\n\n\/\/ FromSlices creates a new SliceTable using passed-in arguments.\nfunc FromSlices(headers []string, content [][]string) *SliceTable {\n\treturn &SliceTable{headers, content}\n}\n\n\/\/ SliceTable offers a simple table implementation backed by slices.\ntype SliceTable struct {\n\theaders []string\n\tcontent [][]string\n}\n\n\/\/ Headers returns the headers of the tabular data.\nfunc (t *SliceTable) Headers() []string {\n\treturn t.headers\n}\n\n\/\/ Iter provides a convenient way to iterate over table's data.\n\/\/ The iteration process always start at the beginning of the table and\n\/\/ is backed by a new reading process.\nfunc (t *SliceTable) Iter() (Iterator, error) {\n\treturn &sliceIterator{content: t.content}, nil\n}\n\ntype sliceIterator struct {\n\tcontent [][]string\n\tpos int\n}\n\nfunc (i *sliceIterator) Next() bool {\n\ti.pos++\n\treturn i.pos <= len(i.content)\n}\nfunc (i *sliceIterator) Row() []string { return i.content[i.pos-1] }\nfunc (i *sliceIterator) Err() error { return nil }\nfunc (i *sliceIterator) Close() error { return nil }\n\n\/\/ Iterator is an interface which provides method to interating over tabular\n\/\/ data. It is heavly inspired by bufio.Scanner.\n\/\/ Iterating stops unrecoverably at EOF, the first I\/O error, or a token too large to fit in the buffer.\ntype Iterator interface {\n\t\/\/ Next advances the table interator to the next row, which will be available through the Cast or Row methods.\n\t\/\/ It returns false when the iterator stops, either by reaching the end of the table or an error.\n\t\/\/ After Next returns false, the Err method will return any error that ocurred during the iteration, except if it was io.EOF, Err\n\t\/\/ will return nil.\n\t\/\/ Next could automatically buffer some data, improving reading performance. It could also block, if necessary.\n\tNext() bool\n\n\t\/\/ Row returns the most recent row fetched by a call to Next as a newly allocated string slice\n\t\/\/ holding its fields.\n\tRow() []string\n\n\t\/\/ Err returns nil if no errors happened during iteration, or the actual error\n\t\/\/ otherwise.\n\tErr() error\n\n\t\/\/ Close frees up any resources used during the iteration process.\n\tClose() error\n}\n<commit_msg>Adding ReadAll to SliceTable<commit_after>\/\/ Package table provides the main interfaces used to manipulate tabular data.\n\/\/ To understand why we need interfaces to process tabular data, it is useful to introduce\n\/\/ the concepts of the physical and the logical representation of data.\n\/\/\n\/\/ The physical representation of data refers to the representation of data as text on disk,\n\/\/ for example, in a CSV, JSON or XML file. This representation may have some type information (JSON,\n\/\/ where the primitive types that JSON supports can be used) or not (CSV, where all data is\n\/\/ represented in string form). In this project, those are going to be presented as packages that\n\/\/ provide structs which implement those interfaces. For instance, csv.NewTable creates a Table\n\/\/ which is backed up by a CSV.\n\/\/\n\/\/ The logical representation of data refers to the \"ideal\" representation of the data in terms of\n\/\/ primitive types, data structures, and relations, all as defined by the specification. We could say\n\/\/ that the specification is about the logical representation of data. That said, functions\n\/\/ exported for data processing should deal with logic representations. That functionality\n\/\/ is represented by interfaces in this package.\npackage table\n\n\/\/ Table provides functionality to iterate and write tabular data. This is the logical\n\/\/ representation and is meant to be encoding\/format agnostic.\ntype Table interface {\n\t\/\/ Headers returns the headers of the tabular data.\n\tHeaders() []string\n\n\t\/\/ Iter provides a convenient way to iterate over table's data.\n\t\/\/ The iteration process always start at the beginning of the table and\n\t\/\/ is backed by a new reading.\n\tIter() (Iterator, error)\n\n\t\/\/ ReadAll reads all rows from the table and return it as strings.\n\tReadAll() ([][]string, error)\n}\n\n\/\/ FromSlices creates a new SliceTable using passed-in arguments.\nfunc FromSlices(headers []string, content [][]string) *SliceTable {\n\treturn &SliceTable{headers, content}\n}\n\n\/\/ SliceTable offers a simple table implementation backed by slices.\ntype SliceTable struct {\n\theaders []string\n\tcontent [][]string\n}\n\n\/\/ Headers returns the headers of the tabular data.\nfunc (t *SliceTable) Headers() []string {\n\treturn t.headers\n}\n\n\/\/ ReadAll reads all rows from the table and return it as strings.\nfunc (t *SliceTable) ReadAll() ([][]string, error) {\n\treturn t.content, nil\n}\n\n\/\/ Iter provides a convenient way to iterate over table's data.\n\/\/ The iteration process always start at the beginning of the table and\n\/\/ is backed by a new reading process.\nfunc (t *SliceTable) Iter() (Iterator, error) {\n\treturn &sliceIterator{content: t.content}, nil\n}\n\ntype sliceIterator struct {\n\tcontent [][]string\n\tpos int\n}\n\nfunc (i *sliceIterator) Next() bool {\n\ti.pos++\n\treturn i.pos <= len(i.content)\n}\nfunc (i *sliceIterator) Row() []string { return i.content[i.pos-1] }\nfunc (i *sliceIterator) Err() error { return nil }\nfunc (i *sliceIterator) Close() error { return nil }\n\n\/\/ Iterator is an interface which provides method to interating over tabular\n\/\/ data. It is heavly inspired by bufio.Scanner.\n\/\/ Iterating stops unrecoverably at EOF, the first I\/O error, or a token too large to fit in the buffer.\ntype Iterator interface {\n\t\/\/ Next advances the table interator to the next row, which will be available through the Cast or Row methods.\n\t\/\/ It returns false when the iterator stops, either by reaching the end of the table or an error.\n\t\/\/ After Next returns false, the Err method will return any error that ocurred during the iteration, except if it was io.EOF, Err\n\t\/\/ will return nil.\n\t\/\/ Next could automatically buffer some data, improving reading performance. It could also block, if necessary.\n\tNext() bool\n\n\t\/\/ Row returns the most recent row fetched by a call to Next as a newly allocated string slice\n\t\/\/ holding its fields.\n\tRow() []string\n\n\t\/\/ Err returns nil if no errors happened during iteration, or the actual error\n\t\/\/ otherwise.\n\tErr() error\n\n\t\/\/ Close frees up any resources used during the iteration process.\n\tClose() error\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/hyperledger\/fabric\/core\/chaincode\/shim\"\n\tpb \"github.com\/hyperledger\/fabric\/protos\/peer\"\n)\n\ntype CarChaincode struct {\n}\n\n\/\/ uuid for test mocks\nconst uuid string = \"1\"\n\n\/\/ indexes\nconst carIndexStr string = \"_cars\"\nconst userIndexStr string = \"_users\"\nconst insurerIndexStr string = \"_insurers\"\nconst registrationProposalIndexStr string = \"_registrationProposals\"\nconst revocationProposalIndexStr string = \"_revocationProposals\"\n\nfunc (t *CarChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response {\n\tfmt.Println(\"Car demo Init\")\n\n\tvar aval int\n\tvar err error\n\n\t_, args := stub.GetFunctionAndParameters()\n\tif len(args) != 1 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 1 integer to test chain.\")\n\t}\n\n\t\/\/ initialize the chaincode\n\taval, err = strconv.Atoi(args[0])\n\tif err != nil {\n\t\treturn shim.Error(\"Expecting integer value for asset holding\")\n\t}\n\n\t\/\/ write the state to the ledger\n\t\/\/ make a test var \"abc\" in order to able to query it and see if it worked\n\terr = stub.PutState(\"abc\", []byte(strconv.Itoa(aval)))\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\t\/\/ clear the car index\n\terr = clearStringIndex(carIndexStr, stub)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\t\/\/ clear the user index\n\terr = clearUserIndex(userIndexStr, stub)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\t\/\/ clear the revocation proposal index\n\terr = clearStringIndex(revocationProposalIndexStr, stub)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\t\/\/ clear the insurer index\n\terr = clearInsurerIndex(insurerIndexStr, stub)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\t\/\/ clear the registration proposal index\n\terr = clearRegistrationProposalIndex(registrationProposalIndexStr, stub)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tfmt.Println(\"Init terminated\")\n\treturn shim.Success(nil)\n}\n\n\/*\n * Invokes an action on the ledger.\n *\n * Expects 'username' and 'role' as first two parameters.\n * Unrestricted queries can only be done from test files.\n *\/\nfunc (t *CarChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response {\n\tfunction, args := stub.GetFunctionAndParameters()\n\n\tif len(args) < 2 {\n\t\treturn shim.Error(\"Invoke expects 'username' and 'role' as first two args.\")\n\t}\n\n\tusername := args[0]\n\trole := args[1]\n\targs = args[2:]\n\n\tfmt.Printf(\"Invoke is running as user '%s' with role '%s'\\n\", username, role)\n\tfmt.Printf(\"Invoke is running function '%s' with args: %s\\n\", function, strings.Join(args, \", \"))\n\n\tswitch function {\n\n\tcase \"create\":\n\t\tif role != \"garage\" {\n\t\t\treturn shim.Error(\"'create' expects you to be a garage user\")\n\t\t}\n\t\treturn t.create(stub, username, args)\n\n\tcase \"read\":\n\t\tif len(args) != 1 {\n\t\t\treturn shim.Error(\"'read' expects a key to do the look up\")\n\t\t} else if reflect.TypeOf(stub).String() != \"*shim.MockStub\" {\n\t\t\t\/\/ only allow unrestricted queries from the test files\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to do unrestricted queries on the ledger.\", role))\n\t\t} else {\n\t\t\treturn t.read(stub, args[0])\n\t\t}\n\n\tcase \"readCar\":\n\t\tif len(args) != 1 {\n\t\t\treturn shim.Error(\"'readCar' expects a car vin to do the look up\")\n\t\t} else {\n\t\t\treturn t.readCar(stub, username, args[0])\n\t\t}\n\n\tcase \"readRegistrationProposals\":\n\t\tif role != \"dot\" {\n\t\t\t\/\/ only the DOT is allowed to read registration proposals\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to read reigistration proposals.\", role))\n\t\t} else {\n\t\t\treturn t.readRegistrationProposals(stub)\n\t\t}\n\n\tcase \"register\":\n\t\tif len(args) != 1 {\n\t\t\treturn shim.Error(\"'register' expects a car vin to register\")\n\t\t} else if role != \"dot\" {\n\t\t\t\/\/ only the DOT is allowed to register new cars\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to register cars.\", role))\n\t\t} else {\n\t\t\treturn t.register(stub, username, args[0])\n\t\t}\n\n\tcase \"confirm\":\n\t\tif len(args) != 2 {\n\t\t\treturn shim.Error(fmt.Sprintf(\"'confirm' expects a car vin and numberplate to confirm a car.\\n You can choose your numberplate yourself.\"))\n\t\t} else if role != \"dot\" {\n\t\t\t\/\/ only the DOT is allowed to confirm cars\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to confirm cars.\", role))\n\t\t} else {\n\t\t\treturn t.confirm(stub, username, args)\n\t\t}\n\n\tcase \"transfer\":\n\t\tif len(args) != 2 {\n\t\t\treturn shim.Error(\"'transfer' expects a car vin and name of the new owner to transfer a car\")\n\t\t} else if role == \"user\" || role == \"garage\" {\n\t\t\t\/\/ only allow users and garage users to transer cars\n\t\t\treturn t.transfer(stub, username, args)\n\t\t} else {\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to transfer cars.\", role))\n\t\t}\n\n\tcase \"sell\":\n\t\tif len(args) != 3 {\n\t\t\treturn shim.Error(\"'sell' expects a price, car vin and buyer name to transfer a car\")\n\t\t} else if role == \"user\" || role == \"garage\" {\n\t\t\t\/\/ only allow users and garage users to transer cars\n\t\t\treturn t.sell(stub, username, args)\n\t\t} else {\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to sell cars.\", role))\n\t\t}\n\n\tcase \"getRevocationProposals\":\n\t\tif role != \"dot\" {\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to query revocation proposals.\", role))\n\t\t} else {\n\t\t\treturn t.getRevocationProposals(stub)\n\t\t}\n\n\tcase \"revocationProposal\":\n\t\tif len(args) != 1 {\n\t\t\treturn shim.Error(\"'revocationProposal' expects a car vin to revoke a car\")\n\t\t} else if role != \"user\" {\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to create a revocation proposal.\", role))\n\t\t} else {\n\t\t\treturn t.revocationProposal(stub, username, args[0])\n\t\t}\n\n\tcase \"revoke\":\n\t\tif len(args) != 1 {\n\t\t\treturn shim.Error(\"'revoke' expects a car vin to revoke a car\")\n\t\t} else if role != \"dot\" {\n\t\t\t\/\/ only the DOT is allowed to revoke cars\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to revoke cars.\", role))\n\t\t} else {\n\t\t\treturn t.revoke(stub, username, args[0])\n\t\t}\n\n\tcase \"delete\":\n\t\tif len(args) != 1 {\n\t\t\treturn shim.Error(\"'delete' expects a car vin to delete a car\")\n\t\t} else if role != \"dot\" {\n\t\t\t\/\/ only the DOT is allowed to delete cars\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to delete cars.\", role))\n\t\t} else {\n\t\t\treturn t.delete(stub, args[0])\n\t\t}\n\n\tcase \"insureProposal\":\n\t\tif len(args) != 2 {\n\t\t\treturn shim.Error(\"'insureProposal' expects a car vin and an insurance company\")\n\t\t} else if role != \"user\" {\n\t\t\t\/\/ only normal users are allowed to do insurance proposals\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to create an insurance proposal.\", role))\n\t\t} else {\n\t\t\treturn t.insureProposal(stub, username, args[0], args[1])\n\t\t}\n\n\tcase \"insuranceAccept\":\n\t\tif len(args) != 2 {\n\t\t\treturn shim.Error(\"'insuranceAccept' expects a car vin and an insurance company\")\n\t\t} else if role != \"insurer\" {\n\t\t\t\/\/ only insurers are allowed to create insurance contracts\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to create an insurance proposal.\", role))\n\t\t} else {\n\t\t\treturn t.insuranceAccept(stub, username, args[0], args[1])\n\t\t}\n\n\tcase \"getInsurer\":\n\t\tif len(args) != 1 {\n\t\t\treturn shim.Error(\"'getInsurer' expects an insurance company name\")\n\t\t} else if role != \"insurer\" {\n\t\t\t\/\/ only insurers are allowed to read their insurance proposals\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to create an insurance proposal.\", role))\n\t\t} else {\n\t\t\treturn t.getInsurer(stub, args[0])\n\t\t}\n\tcase \"somethingelse\":\n\n\tdefault:\n\t\treturn shim.Error(\"Invoke did not find function: \" + function)\n\t}\n\n\t\/*\tif function == \"create\" {\n\t\t\tif role != \"garage\" {\n\t\t\t\treturn shim.Error(\"'create' expects you to be a garage user\")\n\t\t\t}\n\t\t\treturn t.create(stub, username, args)\n\t\t} else if function == \"read\" {\n\t\t\tif len(args) != 1 {\n\t\t\t\treturn shim.Error(\"'read' expects a key to do the look up\")\n\t\t\t} else if reflect.TypeOf(stub).String() != \"*shim.MockStub\" {\n\t\t\t\t\/\/ only allow unrestricted queries from the test files\n\t\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to do unrestricted queries on the ledger.\", role))\n\t\t\t} else {\n\t\t\t\treturn t.read(stub, args[0])\n\t\t\t}\n\t\t} else if function == \"readCar\" {\n\t\t\tif len(args) != 1 {\n\t\t\t\treturn shim.Error(\"'readCar' expects a car vin to do the look up\")\n\t\t\t} else {\n\t\t\t\treturn t.readCar(stub, username, args[0])\n\t\t\t}\n\t\t} else if function == \"readRegistrationProposals\" {\n\t\t\tif role != \"dot\" {\n\t\t\t\t\/\/ only the DOT is allowed to read registration proposals\n\t\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to read reigistration proposals.\", role))\n\t\t\t} else {\n\t\t\t\treturn t.readRegistrationProposals(stub)\n\t\t\t}\n\t\t} else if function == \"register\" {\n\t\t\tif len(args) != 1 {\n\t\t\t\treturn shim.Error(\"'register' expects a car vin to register\")\n\t\t\t} else if role != \"dot\" {\n\t\t\t\t\/\/ only the DOT is allowed to register new cars\n\t\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to register cars.\", role))\n\t\t\t} else {\n\t\t\t\treturn t.register(stub, username, args[0])\n\t\t\t}\n\t\t} else if function == \"confirm\" {\n\t\t\tif len(args) != 2 {\n\t\t\t\treturn shim.Error(fmt.Sprintf(\"'confirm' expects a car vin and numberplate to confirm a car.\\n You can choose your numberplate yourself.\"))\n\t\t\t} else if role != \"dot\" {\n\t\t\t\t\/\/ only the DOT is allowed to confirm cars\n\t\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to confirm cars.\", role))\n\t\t\t} else {\n\t\t\t\treturn t.confirm(stub, username, args)\n\t\t\t}\n\t\t} else if function == \"transfer\" {\n\t\t\tif len(args) != 2 {\n\t\t\t\treturn shim.Error(\"'transfer' expects a car vin and name of the new owner to transfer a car\")\n\t\t\t} else if role == \"user\" || role == \"garage\" {\n\t\t\t\t\/\/ only allow users and garage users to transer cars\n\t\t\t\treturn t.transfer(stub, username, args)\n\t\t\t} else {\n\t\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to transfer cars.\", role))\n\t\t\t}\n\t\t} else if function == \"sell\" {\n\t\t\tif len(args) != 3 {\n\t\t\t\treturn shim.Error(\"'sell' expects a price, car vin and buyer name to transfer a car\")\n\t\t\t} else if role == \"user\" || role == \"garage\" {\n\t\t\t\t\/\/ only allow users and garage users to transer cars\n\t\t\t\treturn t.sell(stub, username, args)\n\t\t\t} else {\n\t\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to sell cars.\", role))\n\t\t\t}\n\t\t} else if function == \"getRevocationProposals\" {\n\t\t\tif role != \"dot\" {\n\t\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to query revocation proposals.\", role))\n\t\t\t} else {\n\t\t\t\treturn t.getRevocationProposals(stub)\n\t\t\t}\n\t\t} else if function == \"revocationProposal\" {\n\t\t\tif len(args) != 1 {\n\t\t\t\treturn shim.Error(\"'revocationProposal' expects a car vin to revoke a car\")\n\t\t\t} else if role != \"user\" {\n\t\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to create a revocation proposal.\", role))\n\t\t\t} else {\n\t\t\t\treturn t.revocationProposal(stub, username, args[0])\n\t\t\t}\n\t\t} else if function == \"revoke\" {\n\t\t\tif len(args) != 1 {\n\t\t\t\treturn shim.Error(\"'revoke' expects a car vin to revoke a car\")\n\t\t\t} else if role != \"dot\" {\n\t\t\t\t\/\/ only the DOT is allowed to revoke cars\n\t\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to revoke cars.\", role))\n\t\t\t} else {\n\t\t\t\treturn t.revoke(stub, username, args[0])\n\t\t\t}\n\t\t} else if function == \"delete\" {\n\t\t\tif len(args) != 1 {\n\t\t\t\treturn shim.Error(\"'delete' expects a car vin to delete a car\")\n\t\t\t} else if role != \"dot\" {\n\t\t\t\t\/\/ only the DOT is allowed to delete cars\n\t\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to delete cars.\", role))\n\t\t\t} else {\n\t\t\t\treturn t.delete(stub, args[0])\n\t\t\t}\n\t\t} else if function == \"insureProposal\" {\n\t\t\tif len(args) != 2 {\n\t\t\t\treturn shim.Error(\"'insureProposal' expects a car vin and an insurance company\")\n\t\t\t} else if role != \"user\" {\n\t\t\t\t\/\/ only normal users are allowed to do insurance proposals\n\t\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to create an insurance proposal.\", role))\n\t\t\t} else {\n\t\t\t\treturn t.insureProposal(stub, username, args[0], args[1])\n\t\t\t}\n\t\t} else if function == \"insuranceAccept\" {\n\t\t\tif len(args) != 2 {\n\t\t\t\treturn shim.Error(\"'insuranceAccept' expects a car vin and an insurance company\")\n\t\t\t} else if role != \"insurer\" {\n\t\t\t\t\/\/ only insurers are allowed to create insurance contracts\n\t\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to create an insurance proposal.\", role))\n\t\t\t} else {\n\t\t\t\treturn t.insuranceAccept(stub, username, args[0], args[1])\n\t\t\t}\n\t\t} else if function == \"getInsurer\" {\n\t\t\tif len(args) != 1 {\n\t\t\t\treturn shim.Error(\"'getInsurer' expects an insurance company name\")\n\t\t\t} else if role != \"insurer\" {\n\t\t\t\t\/\/ only insurers are allowed to read their insurance proposals\n\t\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to create an insurance proposal.\", role))\n\t\t\t} else {\n\t\t\t\treturn t.getInsurer(stub, args[0])\n\t\t\t}\n\t\t}\n\n\t*\/\n\n\treturn shim.Error(\"Invoke did not find function: \" + function)\n}\n\n\/*\n * Reads ledger state from position 'key'.\n *\n * Can be any of:\n * - Car (expects car timestamp as key)\n * - User (expects user name as key)\n * - or an index like '_cars'\n *\n * On success,\n * returns ledger state in bytes at position 'key'.\n *\/\nfunc (t *CarChaincode) read(stub shim.ChaincodeStubInterface, key string) pb.Response {\n\tif key == \"\" {\n\t\treturn shim.Error(\"'read' expects a non-empty key to do the look up\")\n\t}\n\n\tvalAsBytes, err := stub.GetState(key)\n\tif err != nil {\n\t\treturn shim.Error(\"Failed to fetch value at key '\" + key + \"' from ledger\")\n\t}\n\n\treturn shim.Success(valAsBytes)\n}\n\nfunc main() {\n\terr := shim.Start(new(CarChaincode))\n\tif err != nil {\n\t\tfmt.Printf(\"Error starting Simple chaincode: %s\", err)\n\t}\n}\n<commit_msg>code cleanup<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/hyperledger\/fabric\/core\/chaincode\/shim\"\n\tpb \"github.com\/hyperledger\/fabric\/protos\/peer\"\n)\n\ntype CarChaincode struct {\n}\n\n\/\/ uuid for test mocks\nconst uuid string = \"1\"\n\n\/\/ indexes\nconst carIndexStr string = \"_cars\"\nconst userIndexStr string = \"_users\"\nconst insurerIndexStr string = \"_insurers\"\nconst registrationProposalIndexStr string = \"_registrationProposals\"\nconst revocationProposalIndexStr string = \"_revocationProposals\"\n\nfunc (t *CarChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response {\n\tfmt.Println(\"Car demo Init\")\n\n\tvar aval int\n\tvar err error\n\n\t_, args := stub.GetFunctionAndParameters()\n\tif len(args) != 1 {\n\t\treturn shim.Error(\"Incorrect number of arguments. Expecting 1 integer to test chain.\")\n\t}\n\n\t\/\/ initialize the chaincode\n\taval, err = strconv.Atoi(args[0])\n\tif err != nil {\n\t\treturn shim.Error(\"Expecting integer value for asset holding\")\n\t}\n\n\t\/\/ write the state to the ledger\n\t\/\/ make a test var \"abc\" in order to able to query it and see if it worked\n\terr = stub.PutState(\"abc\", []byte(strconv.Itoa(aval)))\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\t\/\/ clear the car index\n\terr = clearStringIndex(carIndexStr, stub)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\t\/\/ clear the user index\n\terr = clearUserIndex(userIndexStr, stub)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\t\/\/ clear the revocation proposal index\n\terr = clearStringIndex(revocationProposalIndexStr, stub)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\t\/\/ clear the insurer index\n\terr = clearInsurerIndex(insurerIndexStr, stub)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\t\/\/ clear the registration proposal index\n\terr = clearRegistrationProposalIndex(registrationProposalIndexStr, stub)\n\tif err != nil {\n\t\treturn shim.Error(err.Error())\n\t}\n\n\tfmt.Println(\"Init terminated\")\n\treturn shim.Success(nil)\n}\n\n\/*\n * Invokes an action on the ledger.\n *\n * Expects 'username' and 'role' as first two parameters.\n * Unrestricted queries can only be done from test files.\n *\/\nfunc (t *CarChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response {\n\tfunction, args := stub.GetFunctionAndParameters()\n\n\tif len(args) < 2 {\n\t\treturn shim.Error(\"Invoke expects 'username' and 'role' as first two args.\")\n\t}\n\n\tusername := args[0]\n\trole := args[1]\n\targs = args[2:]\n\n\tfmt.Printf(\"Invoke is running as user '%s' with role '%s'\\n\", username, role)\n\tfmt.Printf(\"Invoke is running function '%s' with args: %s\\n\", function, strings.Join(args, \", \"))\n\n\tswitch function {\n\n\t\/\/ GENERAL FUNCTIONS\n\tcase \"read\":\n\t\tif len(args) != 1 {\n\t\t\treturn shim.Error(\"'read' expects a key to do the look up\")\n\t\t} else if reflect.TypeOf(stub).String() != \"*shim.MockStub\" {\n\t\t\t\/\/ only allow unrestricted queries from the test files\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to do unrestricted queries on the ledger.\", role))\n\t\t} else {\n\t\t\treturn t.read(stub, args[0])\n\t\t}\n\n\tcase \"readCar\":\n\t\tif len(args) != 1 {\n\t\t\treturn shim.Error(\"'readCar' expects a car vin to do the look up\")\n\t\t} else {\n\t\t\treturn t.readCar(stub, username, args[0])\n\t\t}\n\n\t\/\/ USER FUNCTIONS\n\tcase \"transfer\":\n\t\tif len(args) != 2 {\n\t\t\treturn shim.Error(\"'transfer' expects a car vin and name of the new owner to transfer a car\")\n\t\t} else if role == \"user\" || role == \"garage\" {\n\t\t\t\/\/ only allow users and garage users to transer cars\n\t\t\treturn t.transfer(stub, username, args)\n\t\t} else {\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to transfer cars.\", role))\n\t\t}\n\n\tcase \"revocationProposal\":\n\t\tif len(args) != 1 {\n\t\t\treturn shim.Error(\"'revocationProposal' expects a car vin to revoke a car\")\n\t\t} else if role != \"user\" {\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to create a revocation proposal.\", role))\n\t\t} else {\n\t\t\treturn t.revocationProposal(stub, username, args[0])\n\t\t}\n\n\tcase \"insureProposal\":\n\t\tif len(args) != 2 {\n\t\t\treturn shim.Error(\"'insureProposal' expects a car vin and an insurance company\")\n\t\t} else if role != \"user\" {\n\t\t\t\/\/ only normal users are allowed to do insurance proposals\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to create an insurance proposal.\", role))\n\t\t} else {\n\t\t\treturn t.insureProposal(stub, username, args[0], args[1])\n\t\t}\n\n\tcase \"sell\":\n\t\tif len(args) != 3 {\n\t\t\treturn shim.Error(\"'sell' expects a price, car vin and buyer name to transfer a car\")\n\t\t} else if role == \"user\" || role == \"garage\" {\n\t\t\t\/\/ only allow users and garage users to transer cars\n\t\t\treturn t.sell(stub, username, args)\n\t\t} else {\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to sell cars.\", role))\n\t\t}\n\n\tcase \"updateBalance\":\n\t\tif len(args) != 1 {\n\t\t\treturn shim.Error(\"'updateBalance' expects only one argument\")\n\t\t} else if role != \"user\" {\n\t\t\t\/\/ only a user is allowed to update balance\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to update the balance of a user.\", role))\n\t\t} else {\n\t\t\tnewBalance64, err := strconv.ParseInt(args[0], 10, 64)\n\t\t\tvar newBalance int\n\t\t\tnewBalance = int(newBalance64)\n\t\t\tif err != nil {\n\t\t\t\treturn shim.Error(\"Error converting string to int.\")\n\t\t\t}\n\t\t\t\/\/ Todo:\n\t\t\t\/\/ return t.updateBalance(shim, username, newBalance)\n\t\t}\n\n\t\/\/ GARAGE FUNCTIONS\n\tcase \"create\":\n\t\tif role != \"garage\" {\n\t\t\treturn shim.Error(\"'create' expects you to be a garage user\")\n\t\t}\n\t\treturn t.create(stub, username, args)\n\n\t\/\/ DOT FUNCTIONS\n\tcase \"revoke\":\n\t\tif len(args) != 1 {\n\t\t\treturn shim.Error(\"'revoke' expects a car vin to revoke a car\")\n\t\t} else if role != \"dot\" {\n\t\t\t\/\/ only the DOT is allowed to revoke cars\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to revoke cars.\", role))\n\t\t} else {\n\t\t\treturn t.revoke(stub, username, args[0])\n\t\t}\n\n\tcase \"delete\":\n\t\tif len(args) != 1 {\n\t\t\treturn shim.Error(\"'delete' expects a car vin to delete a car\")\n\t\t} else if role != \"dot\" {\n\t\t\t\/\/ only the DOT is allowed to delete cars\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to delete cars.\", role))\n\t\t} else {\n\t\t\treturn t.delete(stub, args[0])\n\t\t}\n\n\tcase \"readRegistrationProposals\":\n\t\tif role != \"dot\" {\n\t\t\t\/\/ only the DOT is allowed to read registration proposals\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to read reigistration proposals.\", role))\n\t\t} else {\n\t\t\treturn t.readRegistrationProposals(stub)\n\t\t}\n\n\tcase \"register\":\n\t\tif len(args) != 1 {\n\t\t\treturn shim.Error(\"'register' expects a car vin to register\")\n\t\t} else if role != \"dot\" {\n\t\t\t\/\/ only the DOT is allowed to register new cars\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to register cars.\", role))\n\t\t} else {\n\t\t\treturn t.register(stub, username, args[0])\n\t\t}\n\n\tcase \"confirm\":\n\t\tif len(args) != 2 {\n\t\t\treturn shim.Error(fmt.Sprintf(\"'confirm' expects a car vin and numberplate to confirm a car.\\n You can choose your numberplate yourself.\"))\n\t\t} else if role != \"dot\" {\n\t\t\t\/\/ only the DOT is allowed to confirm cars\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to confirm cars.\", role))\n\t\t} else {\n\t\t\treturn t.confirm(stub, username, args)\n\t\t}\n\n\tcase \"getRevocationProposals\":\n\t\tif role != \"dot\" {\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to query revocation proposals.\", role))\n\t\t} else {\n\t\t\treturn t.getRevocationProposals(stub)\n\t\t}\n\n\t\/\/ INSURANCE FUNCTIONS\n\tcase \"insuranceAccept\":\n\t\tif len(args) != 2 {\n\t\t\treturn shim.Error(\"'insuranceAccept' expects a car vin and an insurance company\")\n\t\t} else if role != \"insurer\" {\n\t\t\t\/\/ only insurers are allowed to create insurance contracts\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to create an insurance proposal.\", role))\n\t\t} else {\n\t\t\treturn t.insuranceAccept(stub, username, args[0], args[1])\n\t\t}\n\n\tcase \"getInsurer\":\n\t\tif len(args) != 1 {\n\t\t\treturn shim.Error(\"'getInsurer' expects an insurance company name\")\n\t\t} else if role != \"insurer\" {\n\t\t\t\/\/ only insurers are allowed to read their insurance proposals\n\t\t\treturn shim.Error(fmt.Sprintf(\"Sorry, role '%s' is not allowed to create an insurance proposal.\", role))\n\t\t} else {\n\t\t\treturn t.getInsurer(stub, args[0])\n\t\t}\n\n\tdefault:\n\t\treturn shim.Error(\"Invoke did not find function: \" + function)\n\t}\n\n\treturn shim.Error(\"Invoke did not find function: \" + function)\n}\n\n\/*\n * Reads ledger state from position 'key'.\n *\n * Can be any of:\n * - Car (expects car timestamp as key)\n * - User (expects user name as key)\n * - or an index like '_cars'\n *\n * On success,\n * returns ledger state in bytes at position 'key'.\n *\/\nfunc (t *CarChaincode) read(stub shim.ChaincodeStubInterface, key string) pb.Response {\n\tif key == \"\" {\n\t\treturn shim.Error(\"'read' expects a non-empty key to do the look up\")\n\t}\n\n\tvalAsBytes, err := stub.GetState(key)\n\tif err != nil {\n\t\treturn shim.Error(\"Failed to fetch value at key '\" + key + \"' from ledger\")\n\t}\n\n\treturn shim.Success(valAsBytes)\n}\n\nfunc main() {\n\terr := shim.Start(new(CarChaincode))\n\tif err != nil {\n\t\tfmt.Printf(\"Error starting Simple chaincode: %s\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package checksum\n\nimport (\n\t\"crypto\/md5\"\n\t\"crypto\/sha1\"\n\t\"crypto\/sha256\"\n\t\"crypto\/sha512\"\n\t\"fmt\"\n\t\"hash\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/mitchellh\/packer\/common\"\n\t\"github.com\/mitchellh\/packer\/helper\/config\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"github.com\/mitchellh\/packer\/template\/interpolate\"\n)\n\ntype Config struct {\n\tcommon.PackerConfig `mapstructure:\",squash\"`\n\n\tKeep bool `mapstructure:\"keep_input_artifact\"`\n\tChecksumTypes []string `mapstructure:\"checksum_types\"`\n\tOutputPath string `mapstructure:\"output\"`\n\tctx interpolate.Context\n}\n\ntype PostProcessor struct {\n\tconfig Config\n}\n\nfunc (p *PostProcessor) Configure(raws ...interface{}) error {\n\terr := config.Decode(&p.config, &config.DecodeOpts{\n\t\tInterpolate: true,\n\t\tInterpolateFilter: &interpolate.RenderFilter{\n\t\t\tExclude: []string{},\n\t\t},\n\t}, raws...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif p.config.ChecksumTypes == nil {\n\t\tp.config.ChecksumTypes = []string{\"md5\"}\n\t}\n\n\tif p.config.OutputPath == \"\" {\n\t\tp.config.OutputPath = \"packer_{{.BuildName}}_{{.BuilderType}}\" + \".checksum\"\n\t}\n\n\terrs := new(packer.MultiError)\n\n\tif err = interpolate.Validate(p.config.OutputPath, &p.config.ctx); err != nil {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, fmt.Errorf(\"Error parsing target template: %s\", err))\n\t}\n\n\tif len(errs.Errors) > 0 {\n\t\treturn errs\n\t}\n\n\treturn nil\n}\n\nfunc getHash(t string) hash.Hash {\n\tvar h hash.Hash\n\tswitch t {\n\tcase \"md5\":\n\t\th = md5.New()\n\tcase \"sha1\":\n\t\th = sha1.New()\n\tcase \"sha224\":\n\t\th = sha256.New224()\n\tcase \"sha256\":\n\t\th = sha256.New()\n\tcase \"sha384\":\n\t\th = sha512.New384()\n\tcase \"sha512\":\n\t\th = sha512.New()\n\t}\n\treturn h\n}\n\nfunc (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {\n\tfiles := artifact.Files()\n\tvar h hash.Hash\n\tvar checksumFile string\n\n\tnewartifact := NewArtifact(artifact.Files())\n\n\tfor _, ct := range p.config.ChecksumTypes {\n\t\th = getHash(ct)\n\n\t\tfor _, art := range files {\n\t\t\tif len(artifact.Files()) > 1 {\n\t\t\t\tchecksumFile = filepath.Join(filepath.Dir(art), ct+\"sums\")\n\t\t\t} else if p.config.OutputPath != \"\" {\n\t\t\t\tchecksumFile = p.config.OutputPath\n\t\t\t} else {\n\t\t\t\tchecksumFile = fmt.Sprintf(\"%s.%s\", art, ct+\"sum\")\n\t\t\t}\n\t\t\tif _, err := os.Stat(checksumFile); err != nil {\n\t\t\t\tnewartifact.files = append(newartifact.files, checksumFile)\n\t\t\t}\n\n\t\t\tfw, err := os.OpenFile(checksumFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.FileMode(0644))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, false, fmt.Errorf(\"unable to create file %s: %s\", checksumFile, err.Error())\n\t\t\t}\n\t\t\tfr, err := os.Open(art)\n\t\t\tif err != nil {\n\t\t\t\tfw.Close()\n\t\t\t\treturn nil, false, fmt.Errorf(\"unable to open file %s: %s\", art, err.Error())\n\t\t\t}\n\n\t\t\tif _, err = io.Copy(h, fr); err != nil {\n\t\t\t\tfr.Close()\n\t\t\t\tfw.Close()\n\t\t\t\treturn nil, false, fmt.Errorf(\"unable to compute %s hash for %s\", ct, art)\n\t\t\t}\n\t\t\tfr.Close()\n\t\t\tfw.WriteString(fmt.Sprintf(\"%x\\t%s\\n\", h.Sum(nil), filepath.Base(art)))\n\t\t\tfw.Close()\n\t\t}\n\t}\n\n\treturn newartifact, true, nil\n}\n<commit_msg>post-processor\/checksum: create dir for output file<commit_after>package checksum\n\nimport (\n\t\"crypto\/md5\"\n\t\"crypto\/sha1\"\n\t\"crypto\/sha256\"\n\t\"crypto\/sha512\"\n\t\"fmt\"\n\t\"hash\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/mitchellh\/packer\/common\"\n\t\"github.com\/mitchellh\/packer\/helper\/config\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"github.com\/mitchellh\/packer\/template\/interpolate\"\n)\n\ntype Config struct {\n\tcommon.PackerConfig `mapstructure:\",squash\"`\n\n\tKeep bool `mapstructure:\"keep_input_artifact\"`\n\tChecksumTypes []string `mapstructure:\"checksum_types\"`\n\tOutputPath string `mapstructure:\"output\"`\n\tctx interpolate.Context\n}\n\ntype PostProcessor struct {\n\tconfig Config\n}\n\nfunc (p *PostProcessor) Configure(raws ...interface{}) error {\n\terr := config.Decode(&p.config, &config.DecodeOpts{\n\t\tInterpolate: true,\n\t\tInterpolateFilter: &interpolate.RenderFilter{\n\t\t\tExclude: []string{},\n\t\t},\n\t}, raws...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif p.config.ChecksumTypes == nil {\n\t\tp.config.ChecksumTypes = []string{\"md5\"}\n\t}\n\n\tif p.config.OutputPath == \"\" {\n\t\tp.config.OutputPath = \"packer_{{.BuildName}}_{{.BuilderType}}\" + \".checksum\"\n\t}\n\n\terrs := new(packer.MultiError)\n\n\tif err = interpolate.Validate(p.config.OutputPath, &p.config.ctx); err != nil {\n\t\terrs = packer.MultiErrorAppend(\n\t\t\terrs, fmt.Errorf(\"Error parsing target template: %s\", err))\n\t}\n\n\tif len(errs.Errors) > 0 {\n\t\treturn errs\n\t}\n\n\treturn nil\n}\n\nfunc getHash(t string) hash.Hash {\n\tvar h hash.Hash\n\tswitch t {\n\tcase \"md5\":\n\t\th = md5.New()\n\tcase \"sha1\":\n\t\th = sha1.New()\n\tcase \"sha224\":\n\t\th = sha256.New224()\n\tcase \"sha256\":\n\t\th = sha256.New()\n\tcase \"sha384\":\n\t\th = sha512.New384()\n\tcase \"sha512\":\n\t\th = sha512.New()\n\t}\n\treturn h\n}\n\nfunc (p *PostProcessor) PostProcess(ui packer.Ui, artifact packer.Artifact) (packer.Artifact, bool, error) {\n\tfiles := artifact.Files()\n\tvar h hash.Hash\n\tvar checksumFile string\n\n\tnewartifact := NewArtifact(artifact.Files())\n\n\tfor _, ct := range p.config.ChecksumTypes {\n\t\th = getHash(ct)\n\n\t\tfor _, art := range files {\n\t\t\tif len(artifact.Files()) > 1 {\n\t\t\t\tchecksumFile = filepath.Join(filepath.Dir(art), ct+\"sums\")\n\t\t\t} else if p.config.OutputPath != \"\" {\n\t\t\t\tchecksumFile = p.config.OutputPath\n\t\t\t} else {\n\t\t\t\tchecksumFile = fmt.Sprintf(\"%s.%s\", art, ct+\"sum\")\n\t\t\t}\n\t\t\tif _, err := os.Stat(checksumFile); err != nil {\n\t\t\t\tnewartifact.files = append(newartifact.files, checksumFile)\n\t\t\t}\n\t\t\tif err := os.MkdirAll(filepath.Dir(checksumFile), os.FileMode(0755)); err != nil {\n\t\t\t\treturn nil, false, fmt.Errorf(\"unable to create dir: %s\", err.Error())\n\t\t\t}\n\t\t\tfw, err := os.OpenFile(checksumFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.FileMode(0644))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, false, fmt.Errorf(\"unable to create file %s: %s\", checksumFile, err.Error())\n\t\t\t}\n\t\t\tfr, err := os.Open(art)\n\t\t\tif err != nil {\n\t\t\t\tfw.Close()\n\t\t\t\treturn nil, false, fmt.Errorf(\"unable to open file %s: %s\", art, err.Error())\n\t\t\t}\n\n\t\t\tif _, err = io.Copy(h, fr); err != nil {\n\t\t\t\tfr.Close()\n\t\t\t\tfw.Close()\n\t\t\t\treturn nil, false, fmt.Errorf(\"unable to compute %s hash for %s\", ct, art)\n\t\t\t}\n\t\t\tfr.Close()\n\t\t\tfw.WriteString(fmt.Sprintf(\"%x\\t%s\\n\", h.Sum(nil), filepath.Base(art)))\n\t\t\tfw.Close()\n\t\t}\n\t}\n\n\treturn newartifact, true, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cloudup\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\n\t\"github.com\/blang\/semver\"\n\t\"github.com\/golang\/glog\"\n\tapi \"k8s.io\/kops\/pkg\/apis\/kops\"\n\t\"k8s.io\/kops\/pkg\/apis\/kops\/util\"\n\t\"k8s.io\/kops\/pkg\/assets\"\n)\n\nfunc usesCNI(c *api.Cluster) bool {\n\tnetworkConfig := c.Spec.Networking\n\tif networkConfig == nil || networkConfig.Classic != nil {\n\t\t\/\/ classic\n\t\treturn false\n\t}\n\n\tif networkConfig.Kubenet != nil {\n\t\t\/\/ kubenet\n\t\treturn true\n\t}\n\n\tif networkConfig.External != nil {\n\t\t\/\/ external: assume uses CNI\n\t\treturn true\n\t}\n\n\tif networkConfig.Kopeio != nil {\n\t\t\/\/ Kopeio uses kubenet (and thus CNI)\n\t\treturn true\n\t}\n\n\tif networkConfig.Weave != nil {\n\t\t\/\/ Weave uses CNI\n\t\treturn true\n\t}\n\n\tif networkConfig.Flannel != nil {\n\t\t\/\/ Flannel uses CNI\n\t\treturn true\n\t}\n\n\tif networkConfig.Calico != nil {\n\t\t\/\/ Calico uses CNI\n\t\treturn true\n\t}\n\n\tif networkConfig.Canal != nil {\n\t\t\/\/ Canal uses CNI\n\t\treturn true\n\t}\n\n\tif networkConfig.Kuberouter != nil {\n\t\t\/\/ Kuberouter uses CNI\n\t\treturn true\n\t}\n\n\tif networkConfig.Romana != nil {\n\t\t\/\/ Romana uses CNI\n\t\treturn true\n\t}\n\n\tif networkConfig.AmazonVPC != nil {\n\t\t\/\/ AmazonVPC uses CNI\n\t\treturn true\n\t}\n\n\tif networkConfig.CNI != nil {\n\t\t\/\/ CNI definitely uses CNI!\n\t\treturn true\n\t}\n\n\t\/\/ Assume other modes also use CNI\n\tglog.Warningf(\"Unknown networking mode configured\")\n\treturn true\n}\n\n\/\/ TODO: we really need to sort this out:\n\/\/ https:\/\/github.com\/kubernetes\/kops\/issues\/724\n\/\/ https:\/\/github.com\/kubernetes\/kops\/issues\/626\n\/\/ https:\/\/github.com\/kubernetes\/kubernetes\/issues\/30338\n\nconst (\n\t\/\/ 1.5.x k8s uses release 07a8a28637e97b22eb8dfe710eeae1344f69d16e\n\tdefaultCNIAssetK8s1_5 = \"https:\/\/storage.googleapis.com\/kubernetes-release\/network-plugins\/cni-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz\"\n\tdefaultCNIAssetHashStringK8s1_5 = \"19d49f7b2b99cd2493d5ae0ace896c64e289ccbb\"\n\n\t\/\/ 1.6.x k8s uses release 0799f5732f2a11b329d9e3d51b9c8f2e3759f2ff\n\tdefaultCNIAssetK8s1_6 = \"https:\/\/storage.googleapis.com\/kubernetes-release\/network-plugins\/cni-0799f5732f2a11b329d9e3d51b9c8f2e3759f2ff.tar.gz\"\n\tdefaultCNIAssetHashStringK8s1_6 = \"1d9788b0f5420e1a219aad2cb8681823fc515e7c\"\n\n\t\/\/ defaultCNIAssetK8s1_9 is the CNI tarball for for 1.9.x k8s.\n\tdefaultCNIAssetK8s1_9 = \"https:\/\/storage.googleapis.com\/kubernetes-release\/network-plugins\/cni-plugins-amd64-v0.6.0.tgz\"\n\tdefaultCNIAssetHashStringK8s1_9 = \"d595d3ded6499a64e8dac02466e2f5f2ce257c9f\"\n\n\t\/\/ Environment variable for overriding CNI url\n\tENV_VAR_CNI_VERSION_URL = \"CNI_VERSION_URL\"\n)\n\nfunc findCNIAssets(c *api.Cluster, assetBuilder *assets.AssetBuilder) (*url.URL, string, error) {\n\n\tif cniVersionURL := os.Getenv(ENV_VAR_CNI_VERSION_URL); cniVersionURL != \"\" {\n\t\tu, err := url.Parse(cniVersionURL)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", fmt.Errorf(\"unable to parse %q as a URL: %v\", cniVersionURL, err)\n\t\t}\n\t\tglog.Infof(\"Using CNI asset version %q, as set in %s\", cniVersionURL, ENV_VAR_CNI_VERSION_URL)\n\t\treturn u, \"\", nil\n\t}\n\n\tsv, err := util.ParseKubernetesVersion(c.Spec.KubernetesVersion)\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"failed to lookup kubernetes version: %v\", err)\n\t}\n\n\tsv.Pre = nil\n\tsv.Build = nil\n\n\tvar cniAsset, cniAssetHash string\n\tif sv.GTE(semver.Version{Major: 1, Minor: 9, Patch: 0, Pre: nil, Build: nil}) {\n\t\tcniAsset = defaultCNIAssetK8s1_9\n\t\tcniAssetHash = defaultCNIAssetHashStringK8s1_9\n\t\tglog.V(2).Infof(\"Adding default CNI asset for k8s 1.9.x and higher: %s\", defaultCNIAssetK8s1_9)\n\t} else if sv.GTE(semver.Version{Major: 1, Minor: 6, Patch: 0, Pre: nil, Build: nil}) {\n\t\tcniAsset = defaultCNIAssetK8s1_6\n\t\tcniAssetHash = defaultCNIAssetHashStringK8s1_6\n\t\tglog.V(2).Infof(\"Adding default CNI asset for k8s 1.6.x and higher: %s\", defaultCNIAssetK8s1_6)\n\t} else {\n\t\tcniAsset = defaultCNIAssetK8s1_5\n\t\tcniAssetHash = defaultCNIAssetHashStringK8s1_5\n\t\tglog.V(2).Infof(\"Adding default CNI asset for k8s 1.5: %s\", defaultCNIAssetK8s1_5)\n\t}\n\n\tu, err := url.Parse(cniAsset)\n\tif err != nil {\n\t\treturn nil, \"\", nil\n\t}\n\n\tu, err = assetBuilder.RemapFileAndSHAValue(u, cniAssetHash)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\treturn u, cniAssetHash, nil\n}\n<commit_msg>User can specify CNI_ASSET_HASH_STRING<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cloudup\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\n\t\"github.com\/blang\/semver\"\n\t\"github.com\/golang\/glog\"\n\tapi \"k8s.io\/kops\/pkg\/apis\/kops\"\n\t\"k8s.io\/kops\/pkg\/apis\/kops\/util\"\n\t\"k8s.io\/kops\/pkg\/assets\"\n)\n\nfunc usesCNI(c *api.Cluster) bool {\n\tnetworkConfig := c.Spec.Networking\n\tif networkConfig == nil || networkConfig.Classic != nil {\n\t\t\/\/ classic\n\t\treturn false\n\t}\n\n\tif networkConfig.Kubenet != nil {\n\t\t\/\/ kubenet\n\t\treturn true\n\t}\n\n\tif networkConfig.External != nil {\n\t\t\/\/ external: assume uses CNI\n\t\treturn true\n\t}\n\n\tif networkConfig.Kopeio != nil {\n\t\t\/\/ Kopeio uses kubenet (and thus CNI)\n\t\treturn true\n\t}\n\n\tif networkConfig.Weave != nil {\n\t\t\/\/ Weave uses CNI\n\t\treturn true\n\t}\n\n\tif networkConfig.Flannel != nil {\n\t\t\/\/ Flannel uses CNI\n\t\treturn true\n\t}\n\n\tif networkConfig.Calico != nil {\n\t\t\/\/ Calico uses CNI\n\t\treturn true\n\t}\n\n\tif networkConfig.Canal != nil {\n\t\t\/\/ Canal uses CNI\n\t\treturn true\n\t}\n\n\tif networkConfig.Kuberouter != nil {\n\t\t\/\/ Kuberouter uses CNI\n\t\treturn true\n\t}\n\n\tif networkConfig.Romana != nil {\n\t\t\/\/ Romana uses CNI\n\t\treturn true\n\t}\n\n\tif networkConfig.AmazonVPC != nil {\n\t\t\/\/ AmazonVPC uses CNI\n\t\treturn true\n\t}\n\n\tif networkConfig.CNI != nil {\n\t\t\/\/ CNI definitely uses CNI!\n\t\treturn true\n\t}\n\n\t\/\/ Assume other modes also use CNI\n\tglog.Warningf(\"Unknown networking mode configured\")\n\treturn true\n}\n\n\/\/ TODO: we really need to sort this out:\n\/\/ https:\/\/github.com\/kubernetes\/kops\/issues\/724\n\/\/ https:\/\/github.com\/kubernetes\/kops\/issues\/626\n\/\/ https:\/\/github.com\/kubernetes\/kubernetes\/issues\/30338\n\nconst (\n\t\/\/ 1.5.x k8s uses release 07a8a28637e97b22eb8dfe710eeae1344f69d16e\n\tdefaultCNIAssetK8s1_5 = \"https:\/\/storage.googleapis.com\/kubernetes-release\/network-plugins\/cni-07a8a28637e97b22eb8dfe710eeae1344f69d16e.tar.gz\"\n\tdefaultCNIAssetHashStringK8s1_5 = \"19d49f7b2b99cd2493d5ae0ace896c64e289ccbb\"\n\n\t\/\/ 1.6.x k8s uses release 0799f5732f2a11b329d9e3d51b9c8f2e3759f2ff\n\tdefaultCNIAssetK8s1_6 = \"https:\/\/storage.googleapis.com\/kubernetes-release\/network-plugins\/cni-0799f5732f2a11b329d9e3d51b9c8f2e3759f2ff.tar.gz\"\n\tdefaultCNIAssetHashStringK8s1_6 = \"1d9788b0f5420e1a219aad2cb8681823fc515e7c\"\n\n\t\/\/ defaultCNIAssetK8s1_9 is the CNI tarball for for 1.9.x k8s.\n\tdefaultCNIAssetK8s1_9 = \"https:\/\/storage.googleapis.com\/kubernetes-release\/network-plugins\/cni-plugins-amd64-v0.6.0.tgz\"\n\tdefaultCNIAssetHashStringK8s1_9 = \"d595d3ded6499a64e8dac02466e2f5f2ce257c9f\"\n\n\t\/\/ Environment variable for overriding CNI url\n\tENV_VAR_CNI_VERSION_URL = \"CNI_VERSION_URL\"\n\tENV_VAR_CNI_ASSET_HASH_STRING = \"CNI_ASSET_HASH_STRING\"\n)\n\nfunc findCNIAssets(c *api.Cluster, assetBuilder *assets.AssetBuilder) (*url.URL, string, error) {\n\n\tif cniVersionURL := os.Getenv(ENV_VAR_CNI_VERSION_URL); cniVersionURL != \"\" {\n\t\tu, err := url.Parse(cniVersionURL)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", fmt.Errorf(\"unable to parse %q as a URL: %v\", cniVersionURL, err)\n\t\t}\n\n\t\tglog.Infof(\"Using CNI asset version %q, as set in %s\", cniVersionURL, ENV_VAR_CNI_VERSION_URL)\n\n\t\tif cniAssetHashString := os.Getenv(ENV_VAR_CNI_ASSET_HASH_STRING); cniAssetHashString != \"\" {\n\n\t\t\tglog.Infof(\"Using CNI asset hash %q, as set in %s\", cniAssetHashString, ENV_VAR_CNI_ASSET_HASH_STRING)\n\n\t\t\treturn u, cniAssetHashString, nil\n\t\t} else {\n\t\t\treturn u, \"\", nil\n\t\t}\n\t}\n\n\tsv, err := util.ParseKubernetesVersion(c.Spec.KubernetesVersion)\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"failed to lookup kubernetes version: %v\", err)\n\t}\n\n\tsv.Pre = nil\n\tsv.Build = nil\n\n\tvar cniAsset, cniAssetHash string\n\tif sv.GTE(semver.Version{Major: 1, Minor: 9, Patch: 0, Pre: nil, Build: nil}) {\n\t\tcniAsset = defaultCNIAssetK8s1_9\n\t\tcniAssetHash = defaultCNIAssetHashStringK8s1_9\n\t\tglog.V(2).Infof(\"Adding default CNI asset for k8s 1.9.x and higher: %s\", defaultCNIAssetK8s1_9)\n\t} else if sv.GTE(semver.Version{Major: 1, Minor: 6, Patch: 0, Pre: nil, Build: nil}) {\n\t\tcniAsset = defaultCNIAssetK8s1_6\n\t\tcniAssetHash = defaultCNIAssetHashStringK8s1_6\n\t\tglog.V(2).Infof(\"Adding default CNI asset for k8s 1.6.x and higher: %s\", defaultCNIAssetK8s1_6)\n\t} else {\n\t\tcniAsset = defaultCNIAssetK8s1_5\n\t\tcniAssetHash = defaultCNIAssetHashStringK8s1_5\n\t\tglog.V(2).Infof(\"Adding default CNI asset for k8s 1.5: %s\", defaultCNIAssetK8s1_5)\n\t}\n\n\tu, err := url.Parse(cniAsset)\n\tif err != nil {\n\t\treturn nil, \"\", nil\n\t}\n\n\tu, err = assetBuilder.RemapFileAndSHAValue(u, cniAssetHash)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\treturn u, cniAssetHash, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage net\n\nimport (\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc testTimeout(t *testing.T, network, addr string, readFrom bool) {\n\tfd, err := Dial(network, \"\", addr)\n\tdefer fd.Close()\n\tif err != nil {\n\t\tt.Errorf(\"dial %s %s failed: %v\", network, addr, err)\n\t}\n\tt0 := time.Nanoseconds()\n\tfd.SetReadTimeout(1e8) \/\/ 100ms\n\tvar b [100]byte\n\tvar n int\n\tvar err1 os.Error\n\tif readFrom {\n\t\tn, _, err1 = fd.(PacketConn).ReadFrom(&b)\n\t} else {\n\t\tn, err1 = fd.Read(&b)\n\t}\n\tt1 := time.Nanoseconds()\n\twhat := \"Read\"\n\tif readFrom {\n\t\twhat = \"ReadFrom\"\n\t}\n\tif n != 0 || !isEAGAIN(err1) {\n\t\tt.Errorf(\"fd.%s on %s %s did not return 0, EAGAIN: %v, %v\", what, network, addr, n, err1)\n\t}\n\tif t1-t0 < 0.5e8 || t1-t0 > 1.5e8 {\n\t\tt.Errorf(\"fd.%s on %s %s took %f seconds, expected 0.1\", what, network, addr, float64(t1-t0)\/1e9)\n\t}\n}\n\nfunc TestTimeoutUDP(t *testing.T) {\n\ttestTimeout(t, \"udp\", \"127.0.0.1:53\", false)\n\ttestTimeout(t, \"udp\", \"127.0.0.1:53\", true)\n}\n\nfunc TestTimeoutTCP(t *testing.T) {\n\t\/\/ 74.125.19.99 is www.google.com.\n\t\/\/ could use dns, but dns depends on\n\t\/\/ timeouts and this is the timeout test.\n\ttestTimeout(t, \"tcp\", \"74.125.19.99:80\", false)\n}\n<commit_msg>net: fix nil deref in testTimeout when Dial fails Pointed out by Scott Schwartz.<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage net\n\nimport (\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc testTimeout(t *testing.T, network, addr string, readFrom bool) {\n\tfd, err := Dial(network, \"\", addr)\n\tif err != nil {\n\t\tt.Errorf(\"dial %s %s failed: %v\", network, addr, err)\n\t\treturn\n\t}\n\tdefer fd.Close()\n\tt0 := time.Nanoseconds()\n\tfd.SetReadTimeout(1e8) \/\/ 100ms\n\tvar b [100]byte\n\tvar n int\n\tvar err1 os.Error\n\tif readFrom {\n\t\tn, _, err1 = fd.(PacketConn).ReadFrom(&b)\n\t} else {\n\t\tn, err1 = fd.Read(&b)\n\t}\n\tt1 := time.Nanoseconds()\n\twhat := \"Read\"\n\tif readFrom {\n\t\twhat = \"ReadFrom\"\n\t}\n\tif n != 0 || !isEAGAIN(err1) {\n\t\tt.Errorf(\"fd.%s on %s %s did not return 0, EAGAIN: %v, %v\", what, network, addr, n, err1)\n\t}\n\tif t1-t0 < 0.5e8 || t1-t0 > 1.5e8 {\n\t\tt.Errorf(\"fd.%s on %s %s took %f seconds, expected 0.1\", what, network, addr, float64(t1-t0)\/1e9)\n\t}\n}\n\nfunc TestTimeoutUDP(t *testing.T) {\n\ttestTimeout(t, \"udp\", \"127.0.0.1:53\", false)\n\ttestTimeout(t, \"udp\", \"127.0.0.1:53\", true)\n}\n\nfunc TestTimeoutTCP(t *testing.T) {\n\t\/\/ 74.125.19.99 is www.google.com.\n\t\/\/ could use dns, but dns depends on\n\t\/\/ timeouts and this is the timeout test.\n\ttestTimeout(t, \"tcp\", \"74.125.19.99:80\", false)\n}\n<|endoftext|>"} {"text":"<commit_before>\/* Copyright (c) 2016 Thomas Findelkind\n#\n# This program is free software: you can redistribute it and\/or modify it under\n# the terms of the GNU General Public License as published by the Free Software\n# Foundation, either version 3 of the License, or (at your option) any later\n# version.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n#\n# MORE ABOUT THIS SCRIPT AVAILABLE IN THE README AND AT:\n#\n# http:\/\/tfindelkind.com\n#\n# ---------------------------------------------------------------------------- \n*\/\n\npackage main\n\nimport (\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/Tfindelkind\/ntnx-golang-client-sdk\"\n\n\t\"fmt\"\n\t\"os\"\n\t\/\/\"time\"\n\t\"flag\"\n)\n\nconst AppVersion = \"0.9 beta\"\n\nvar (\n\thost *string\n\tusername *string\n\tpassword *string\n\tvm_name *string\n\timage_name *string\n\tseed_name *string\n\timage_file *string\n\tseed_file *string\n\tvlan *string\n\tcontainer *string\n\tdebug *bool\n\thelp *bool\n\tversion *bool\n)\n\nfunc init() {\n\thost = flag.String(\"host\", \"192.168.178.130\", \"a string\")\n\tusername = flag.String(\"username\", \"admin\", \"a string\")\n\tpassword = flag.String(\"password\", \"nutanix\/4u\", \"a string\")\n\tvm_name = flag.String(\"vm-name\", \"NTNX-AVM\", \"a string\")\n\timage_name = flag.String(\"image-name\", \"Centos7-1606\", \"a string\")\n\tseed_name = flag.String(\"seed-name\", \"CloudInitSeed\", \"a string\")\n\timage_file = flag.String(\"image-file\", \"CentOS-7-x86_64-GenericCloud-1606.qcow2\", \"a string\")\n\tseed_file = flag.String(\"seed-file\", \"seed.iso\", \"a string\")\n\tvlan = flag.String(\"vlan\", \"VLAN0\", \"a string\")\n\tcontainer = flag.String(\"container\", \"ISO\", \"a string\")\t\n\tdebug = flag.Bool(\"debug\", false, \"a bool\")\n\thelp = flag.Bool(\"help\", false, \"a bool\")\n\tversion = flag.Bool(\"version\", false, \"a bool\")\n}\n\nfunc printHelp() {\n\n\tfmt.Println(\"Usage: deploy_cloud_vm [OPTIONS]\")\n\tfmt.Println(\"deploy_cloud_vm [ --help | --version ]\")\n\tfmt.Println(\"\")\n\tfmt.Println(\"Upload and deploy a cloud image with a CD seed\")\n\tfmt.Println(\"Example seed.iso at https:\/\/github.com\/Tfindelkind\/DCI\")\n\tfmt.Println(\"\")\n\tfmt.Println(\"Options:\")\n\tfmt.Println(\"\")\n\tfmt.Println(\"--host Specify CVM host or Cluster IP\")\n\tfmt.Println(\"--username Specify username for connect to host\")\n\tfmt.Println(\"--password Specify password for user\")\n\tfmt.Println(\"--vm-name Specify Virtual Machine name which will be created\")\n\tfmt.Println(\"--image-name Specify the name of the Cloud-Image in Image Service\")\n\tfmt.Println(\"--image-file Speficy the file name of the Cloud-Image\")\n\tfmt.Println(\"--seed-name Specify the name of the Seed.ISO in Image Service\")\n\tfmt.Println(\"--seed-file Speficy the file name of the Seed.ISO\")\n\tfmt.Println(\"--vlan Specify the VLAN to which the VM will be connected\")\n\tfmt.Println(\"--container Specify the container where images\/vm will be stored\")\n\tfmt.Println(\"--debug Enables debug mode\")\n\tfmt.Println(\"--help List this help\")\n\tfmt.Println(\"--version Show the deploy_cloud_vm version\")\n\tfmt.Println(\"\")\n\tfmt.Println(\"Example:\")\n\tfmt.Println(\"\")\n\tfmt.Println(\"deploy_cloud_vm --host=NTNX-CVM --username=admin --password=nutanix\/4u --vm-name=NTNX-AVM --image-name=Centos7-1606 --image-container=ISO --vm-container=prod vlan=VLAN0\")\n\tfmt.Println(\"\")\n}\n\nfunc main() {\n\n\tflag.Usage = printHelp\n\tflag.Parse()\n\t\t\n\n\tif *help {\n\t\tprintHelp()\n\t\tos.Exit(0)\n\t}\n\n\tif *version {\n\t\tfmt.Println(\"Version: \" + AppVersion)\n\t\tos.Exit(0)\n\t}\n\n\tif *debug {\n\t\tlog.SetLevel(log.DebugLevel)\n\t} else {\n\t\tlog.SetLevel(log.InfoLevel)\n\t}\n\tcustomFormatter := new(log.TextFormatter)\n\tcustomFormatter.TimestampFormat = \"2006-01-02 15:04:05\"\n\tlog.SetFormatter(customFormatter)\n\tcustomFormatter.FullTimestamp = true\n\n\tvar n ntnxAPI.NTNXConnection\n\tvar v ntnxAPI.VM_json_AHV\n\tvar nic1 ntnxAPI.Network_REST\n\tvar im ntnxAPI.Image_json_AHV\n\tvar seed ntnxAPI.Image_json_AHV\n\tvar taskUUID ntnxAPI.TaskUUID\n\n\tn.NutanixHost = *host\n\tn.Username = *username\n\tn.Password = *password\n\tim.Name = *image_name\n\tim.Annotation = \"deployed with deploy_cloud_vm\"\n\tim.ImageType = \"DISK_IMAGE\"\n\tseed.Name = *seed_name\n\tseed.Annotation = \"deployed with deploy_cloud_vm\"\n\tseed.ImageType = \"ISO_IMAGE\"\n\tv.Config.Name = *vm_name\n\tv.Config.Description = \"deployed with deploy_cloud_vm\"\n\tv.Config.MemoryMb = 4096\n\tv.Config.NumVcpus = 1\n\tv.Config.NumCoresPerVcpu = 1\n\n\tntnxAPI.EncodeCredentials(&n)\n\tntnxAPI.CreateHttpClient(&n)\n\n\t\/*\n\t Short description what will be done\n\n\t 1. Upload Image when file is specified and wait\n\t 2. Upload Cloud seed.iso to image\n\t 2. Create VM and wait\n\t 3. Clone Image to Disk and wait\n\t 4. Attach seed.iso\n\t 5. Add network\n\t 6. start VM\n\t*\/\n\n\t\/*To-DO:\n\t\n\t 1. Inplement progress bar while uploading- (concurreny and get progress from task)\n\n\n\t*\/\n\n\t\/\/ upload cloud image to image service\n\tif ntnxAPI.ImageExistbyName(&n, &im) {\n\t\tlog.Warn(\"Image \" + im.Name + \" already exists\")\n\t\t\/\/ get existing image ID\t\t\n\t} else {\n\t\ttaskUUID, _ = ntnxAPI.CreateImageObject(&n, &im)\n\n\t\ttask, err := ntnxAPI.WaitUntilTaskFinished(&n, taskUUID.TaskUUID)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Task does not exist\")\n\t\t}\n\n\t\tim.UUID = ntnxAPI.GetImageUUIDbyTask(&n, &task)\n\n\t\t_, statusCode := ntnxAPI.PutFileToImage(&n, ntnxAPI.NutanixAHVurl(&n), \"images\/\"+im.UUID+\"\/upload\", *image_file, *container)\n\n\t\tif statusCode != 200 {\n\t\t\tlog.Error(\"Image upload failed\")\n\t\t\tos.Exit(1)\n\t\t}\t\t\t\t\t\n\t}\n\n\t\/\/ upload seed.iso to image service\n\tif ntnxAPI.ImageExistbyName(&n, &seed) {\n\t\tlog.Warn(\"Image \" + seed.Name + \" already exists\")\n\t} else {\n\t\ttaskUUID, _ = ntnxAPI.CreateImageObject(&n, &seed)\n\n\t\ttask, err := ntnxAPI.WaitUntilTaskFinished(&n, taskUUID.TaskUUID)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Task does not exist\")\n\t\t}\n\n\t\tseed.UUID = ntnxAPI.GetImageUUIDbyTask(&n, &task)\n\n\t\t_, statusCode := ntnxAPI.PutFileToImage(&n, ntnxAPI.NutanixAHVurl(&n), \"images\/\"+seed.UUID+\"\/upload\", *seed_file, *container)\n\n\t\tif statusCode != 200 {\n\t\t\tlog.Error(\"Image upload failed\")\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\t\n\t\/\/ make sure cloud image is active and get all infos when active\n\tlog.Info(\"Wait that the cloud image is activated...\")\n\tImageActive, _ := ntnxAPI.WaitUntilImageIsActive(&n,&im)\n\tif ( !ImageActive ) {\n\t\tlog.Fatal(\"Cloud Image is not active\")\n\t\tos.Exit(1)\n\t}\n\tim , _ = ntnxAPI.GetImagebyName(&n, im.Name)\n\t\n\t\/\/ make sure seed image is active and get all infos when active\t\n\tlog.Info(\"Wait that the seed image is activated...\")\n\tImageActive, _ = ntnxAPI.WaitUntilImageIsActive(&n,&seed)\n\t\n\tif ( !ImageActive ) {\n\t\tlog.Fatal(\"Seed Image is not active\")\n\t\tos.Exit(1)\n\t}\n\tseed, _ = ntnxAPI.GetImagebyName(&n, seed.Name)\n\n\t\/\/check if VM exists\n\texist, _ := ntnxAPI.VMExist(&n, v.Config.Name)\n\n\tif exist {\n\t\tlog.Warn(\"VM \" + v.Config.Name + \" already exists\")\n\t} else {\n\n\t\t\/\/ Create VM\n\t\ttaskUUID, _ = ntnxAPI.CreateVM_AHV(&n, &v)\n\n\t\ttask, err := ntnxAPI.WaitUntilTaskFinished(&n, taskUUID.TaskUUID)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Task does not exist\")\n\t\t} else {\n\t\t\tlog.Info(\"VM \" + v.Config.Name + \" created\")\n\t\t}\n\n\t\t\/\/ Clone Cloud-Image disk\n\t\tv.UUID = ntnxAPI.GetVMIDbyTask(&n, &task)\n\n\t\ttaskUUID, _ = ntnxAPI.CloneDiskforVM(&n, &v, &im)\n\n\t\ttask, err = ntnxAPI.WaitUntilTaskFinished(&n, taskUUID.TaskUUID)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Task does not exist\")\n\t\t} else {\n\t\t\tlog.Info(\"Disk ID\" + v.UUID + \" cloned\")\n\t\t}\n\n\t\t\/\/ Clone Seed.iso to CDROM\n\t\ttaskUUID, _ = ntnxAPI.CloneCDforVM(&n, &v, &seed)\n\n\t\ttask, err = ntnxAPI.WaitUntilTaskFinished(&n, taskUUID.TaskUUID)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Task does not exist\")\n\t\t} else {\n\t\t\tlog.Info(\"CD ISO ID\" + v.UUID + \" cloned\")\n\t\t}\n\n\t\t\/\/\tCreate Nic1\n\t\tnic1.UUID = ntnxAPI.GetNetworkIDbyName(&n, \"VLAN0\")\n\n\t\ttaskUUID, _ = ntnxAPI.CreateVNicforVM(&n, &v, &nic1)\n\n\t\ttask, err = ntnxAPI.WaitUntilTaskFinished(&n, taskUUID.TaskUUID)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Task does not exist\")\n\t\t} else {\n\t\t\tlog.Info(\"Nic1 created\")\n\t\t}\n\n\t\t\/\/\tStart Cloud-VM\n\n\t\ttaskUUID, _ = ntnxAPI.StartVM(&n, &v)\n\n\t\ttask, err = ntnxAPI.WaitUntilTaskFinished(&n, taskUUID.TaskUUID)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Task does not exist\")\n\t\t} else {\n\t\t\tlog.Info(\"VM started\")\n\t\t}\n\n\t}\n}\n<commit_msg>moved files<commit_after><|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"math\/rand\"\n\t\"os\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/docker\/infrakit\/pkg\/util\/exec\"\n)\n\nfunc (p *plugin) terraformApply() error {\n\tif p.pretend {\n\t\treturn nil\n\t}\n\n\tp.applyLock.Lock()\n\tdefer p.applyLock.Unlock()\n\n\tif p.applying {\n\t\treturn nil\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tif err := p.lock.TryLock(); err == nil {\n\t\t\t\tdefer p.lock.Unlock()\n\t\t\t\tdoTerraformApply(p.Dir)\n\t\t\t}\n\t\t\tlog.Debugln(\"Can't acquire lock, waiting\")\n\t\t\ttime.Sleep(time.Duration(int64(rand.NormFloat64())%1000) * time.Millisecond)\n\t\t}\n\t}()\n\tp.applying = true\n\treturn nil\n}\n\nfunc doTerraformApply(dir string) error {\n\tlog.Infoln(time.Now().Format(time.RFC850) + \" Applying plan\")\n\tcommand := exec.Command(`terraform apply`).InheritEnvs(true).WithDir(dir)\n\terr := command.WithStdout(os.Stdout).WithStderr(os.Stdout).Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn command.Wait()\n}\n<commit_msg>Fix debug message when unable to get the terraform lock (#554)<commit_after>package main\n\nimport (\n\t\"math\/rand\"\n\t\"os\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/docker\/infrakit\/pkg\/util\/exec\"\n)\n\nfunc (p *plugin) terraformApply() error {\n\tif p.pretend {\n\t\treturn nil\n\t}\n\n\tp.applyLock.Lock()\n\tdefer p.applyLock.Unlock()\n\n\tif p.applying {\n\t\treturn nil\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tif err := p.lock.TryLock(); err == nil {\n\t\t\t\tdefer p.lock.Unlock()\n\t\t\t\tdoTerraformApply(p.Dir)\n\t\t\t} else {\n\t\t\t\tlog.Debugln(\"Can't acquire lock, waiting\")\n\t\t\t}\n\t\t\ttime.Sleep(time.Duration(int64(rand.NormFloat64())%1000) * time.Millisecond)\n\t\t}\n\t}()\n\tp.applying = true\n\treturn nil\n}\n\nfunc doTerraformApply(dir string) error {\n\tlog.Infoln(time.Now().Format(time.RFC850) + \" Applying plan\")\n\tcommand := exec.Command(`terraform apply`).InheritEnvs(true).WithDir(dir)\n\terr := command.WithStdout(os.Stdout).WithStderr(os.Stdout).Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn command.Wait()\n}\n<|endoftext|>"} {"text":"<commit_before>package observers\n\nimport (\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/ivan1993spb\/snake-server\/objects\/watermelon\"\n\t\"github.com\/ivan1993spb\/snake-server\/world\"\n)\n\nconst chanWatermelonObserverEventsBuffer = 32\n\nconst addWatermelonDelay = time.Minute\n\nconst oneWatermelonArea = 200\n\ntype WatermelonObserver struct{}\n\nfunc (WatermelonObserver) Observe(stop <-chan struct{}, w *world.World, logger logrus.FieldLogger) {\n\tvar size = int32(w.Size())\n\tvar maxWatermelonCount = size \/ oneWatermelonArea\n\n\tlogger.WithFields(logrus.Fields{\n\t\t\"map_size\": size,\n\t\t\"watermelon_count\": maxWatermelonCount,\n\t}).Debug(\"watermelon observer\")\n\n\tif maxWatermelonCount == 0 {\n\t\treturn\n\t}\n\n\tvar watermelonCount int32 = 0\n\n\tgo func() {\n\t\tfor event := range w.Events(stop, chanWatermelonObserverEventsBuffer) {\n\t\t\tif event.Type == world.EventTypeObjectDelete {\n\t\t\t\tif _, ok := event.Payload.(*watermelon.Watermelon); ok {\n\t\t\t\t\tatomic.AddInt32(&watermelonCount, -1)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tticker := time.NewTicker(addWatermelonDelay)\n\t\tdefer ticker.Stop()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tfor atomic.LoadInt32(&watermelonCount) < maxWatermelonCount {\n\t\t\t\t\tif _, err := watermelon.NewWatermelon(w); err != nil {\n\t\t\t\t\t\tlogger.WithError(err).Error(\"cannot create watermelon\")\n\t\t\t\t\t} else {\n\t\t\t\t\t\tatomic.AddInt32(&watermelonCount, 1)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase <-stop:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n<commit_msg>Increase chan buffer size in waterlemon observer<commit_after>package observers\n\nimport (\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/ivan1993spb\/snake-server\/objects\/watermelon\"\n\t\"github.com\/ivan1993spb\/snake-server\/world\"\n)\n\nconst chanWatermelonObserverEventsBuffer = 64\n\nconst addWatermelonDelay = time.Minute\n\nconst oneWatermelonArea = 200\n\ntype WatermelonObserver struct{}\n\nfunc (WatermelonObserver) Observe(stop <-chan struct{}, w *world.World, logger logrus.FieldLogger) {\n\tvar size = int32(w.Size())\n\tvar maxWatermelonCount = size \/ oneWatermelonArea\n\n\tlogger.WithFields(logrus.Fields{\n\t\t\"map_size\": size,\n\t\t\"watermelon_count\": maxWatermelonCount,\n\t}).Debug(\"watermelon observer\")\n\n\tif maxWatermelonCount == 0 {\n\t\treturn\n\t}\n\n\tvar watermelonCount int32 = 0\n\n\tgo func() {\n\t\tfor event := range w.Events(stop, chanWatermelonObserverEventsBuffer) {\n\t\t\tif event.Type == world.EventTypeObjectDelete {\n\t\t\t\tif _, ok := event.Payload.(*watermelon.Watermelon); ok {\n\t\t\t\t\tatomic.AddInt32(&watermelonCount, -1)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tticker := time.NewTicker(addWatermelonDelay)\n\t\tdefer ticker.Stop()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\t\tfor atomic.LoadInt32(&watermelonCount) < maxWatermelonCount {\n\t\t\t\t\tif _, err := watermelon.NewWatermelon(w); err != nil {\n\t\t\t\t\t\tlogger.WithError(err).Error(\"cannot create watermelon\")\n\t\t\t\t\t} else {\n\t\t\t\t\t\tatomic.AddInt32(&watermelonCount, 1)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase <-stop:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>bump preload<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>Fix TestGatherNotes<commit_after><|endoftext|>"} {"text":"<commit_before>package mario\n\nimport (\n\t\"fmt\"\n\tneural \"github.com\/poseidon4o\/go-neural\/src\/neural\"\n\tutil \"github.com\/poseidon4o\/go-neural\/src\/util\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"sort\"\n)\n\ntype NeuronName int\n\nconst (\n\tposX NeuronName = iota\n\tposY NeuronName = iota\n\tvelY NeuronName = iota\n\tvelX NeuronName = iota\n\tH1 NeuronName = iota\n\tH2 NeuronName = iota\n\tH3 NeuronName = iota\n\tH4 NeuronName = iota\n\tH5 NeuronName = iota\n\tH6 NeuronName = iota\n\tH7 NeuronName = iota\n\tH8 NeuronName = iota\n\tR1 NeuronName = iota\n\tR2 NeuronName = iota\n\tR3 NeuronName = iota\n\tR4 NeuronName = iota\n\tR5 NeuronName = iota\n\tR6 NeuronName = iota\n\tR7 NeuronName = iota\n\tR8 NeuronName = iota\n\tjump NeuronName = iota\n\txMove NeuronName = iota\n\tNRN_COUNT int = iota\n)\n\nfunc nrn(name NeuronName) int {\n\treturn int(name)\n}\n\ntype MarioNode struct {\n\tfig *Figure\n\tbrain *neural.Net\n\tbestX float64\n\tidleX float64\n\tdead bool\n\tidleFrames uint32\n}\n\ntype MarioCol []MarioNode\n\nfunc (figs MarioCol) Len() int {\n\treturn len(figs)\n}\n\nfunc (figs MarioCol) Less(c, r int) bool {\n\treturn figs[c].bestX > figs[r].bestX\n}\n\nfunc (figs MarioCol) Swap(c, r int) {\n\tfigs[c], figs[r] = figs[r], figs[c]\n}\n\ntype Mario struct {\n\tfigures MarioCol\n\tlvl Level\n\tdrawCb func(pos, size *util.Vector, color uint32)\n\tdrawSize int\n}\n\nfunc (m *Mario) Complete() float64 {\n\treturn m.figures[0].bestX \/ m.lvl.size.X\n}\n\nfunc (m *Mario) Done() bool {\n\treturn false\n}\n\nfunc (m *Mario) SetDrawRectCb(cb func(pos, size *util.Vector, color uint32)) {\n\tm.drawCb = cb\n}\n\nfunc (m *Mario) LogicTick(dt float64) {\n\tm.lvl.Step(dt)\n\tsort.Sort(m.figures)\n\n\twg := make(chan struct{}, len(m.figures))\n\n\tstepC := func(r int) {\n\t\tm.checkStep(r)\n\t\tm.mutateStep(r)\n\t\tif len(m.figures) > 1 {\n\t\t\tm.thnikStep(r)\n\t\t}\n\t\twg <- struct{}{}\n\t}\n\n\tfor c := range m.figures {\n\t\tgo stepC(c)\n\t}\n\n\tfor c := 0; c < len(m.figures); c++ {\n\t\t<-wg\n\t}\n}\n\nfunc (m *Mario) Jump() {\n\tm.figures[0].fig.Jump()\n}\n\nfunc (m *Mario) Move(dir int) {\n\tm.figures[0].fig.Move(dir)\n}\n\nfunc (m *Mario) Figs() MarioCol {\n\treturn m.figures\n}\n\nfunc NewMario(figCount int, size *util.Vector) *Mario {\n\tfmt.Println(\"\")\n\tlevel := NewLevel(int(size.X), int(size.Y))\n\tlevel.AddFigures(figCount)\n\n\tnets := make([]*neural.Net, figCount, figCount)\n\tfor c := range nets {\n\t\tnets[c] = neural.NewNet(NRN_COUNT)\n\n\t\tfor r := 0; r < (nrn(H8) - nrn(H1)); r++ {\n\t\t\t\/\/ input to H\n\t\t\t*nets[c].Synapse(nrn(posX), r+nrn(H1)) = 0.0\n\t\t\t*nets[c].Synapse(nrn(posY), r+nrn(H1)) = 0.0\n\t\t\t*nets[c].Synapse(nrn(velX), r+nrn(H1)) = 0.0\n\t\t\t*nets[c].Synapse(nrn(velY), r+nrn(H1)) = 0.0\n\n\t\t\t\/\/ R to output\n\t\t\t*nets[c].Synapse(r+nrn(R1), nrn(jump)) = 0.0\n\t\t\t*nets[c].Synapse(r+nrn(R1), nrn(xMove)) = 0.0\n\t\t}\n\n\t\tfor r := 0; r < (nrn(H8) - nrn(H1)); r++ {\n\t\t\tfor q := 0; q < (nrn(H8) - nrn(H1)); q++ {\n\t\t\t\t*nets[c].Synapse(r+nrn(H1), q+nrn(R1)) = 0.0\n\t\t\t}\n\t\t}\n\n\t\tnets[c].Randomize()\n\t}\n\n\tfigs := make(MarioCol, figCount, figCount)\n\tfor c := range figs {\n\t\tfigs[c].brain = nets[c]\n\t\tfigs[c].dead = false\n\t\tfigs[c].bestX = 0\n\t\tfigs[c].fig = level.figures[c]\n\t}\n\n\treturn &Mario{\n\t\tfigures: figs,\n\t\tlvl: *level,\n\t\tdrawCb: func(pos, size *util.Vector, color uint32) {},\n\t\tdrawSize: 5,\n\t}\n}\n\nfunc (m *Mario) DrawTick() {\n\tvar (\n\t\tred = uint32(0xffff0000)\n\t\tgreen = uint32(0xff00ff00)\n\t\tblue = uint32(0xff0000ff)\n\t)\n\n\tblSize := util.NewVector(float64(BLOCK_SIZE), float64(BLOCK_SIZE))\n\tblSizeSmall := blSize.Scale(0.5)\n\n\ttranslate := util.NewVector(6, 6)\n\n\tsize := util.NewVector(float64(m.drawSize), float64(m.drawSize))\n\n\tfor c := range m.lvl.blocks {\n\t\tfor r := range m.lvl.blocks[c] {\n\t\t\tif m.lvl.blocks[c][r] != nil {\n\t\t\t\tm.drawCb(m.lvl.blocks[c][r], blSize, red)\n\t\t\t\tm.drawCb(m.lvl.blocks[c][r].Add(translate), blSizeSmall, green)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor c := range m.figures {\n\t\tm.drawCb(m.figures[c].fig.pos.Add(size.Scale(0.5).Neg()), size, blue)\n\t}\n}\n\nfunc (m *Mario) checkStep(c int) {\n\tfig := m.figures[c].fig\n\n\tif fig.nextPos.Y > m.lvl.size.Y || fig.nextPos.Y < 0 {\n\t\tm.figures[c].dead = true\n\t\treturn\n\t}\n\n\tif fig.nextPos.X < 0 {\n\t\tfig.nextPos.X = 0\n\t} else if fig.nextPos.X > m.lvl.size.X {\n\t\tfig.nextPos.X = m.lvl.size.X\n\t}\n\n\tblock := m.lvl.FloorAt(&fig.pos)\n\n\tif block == nil || fig.nextPos.Y < block.Y {\n\t\tfig.pos.Y = fig.nextPos.Y\n\t} else {\n\t\t\/\/ m.drawCb(block, util.NewVector(float64(BLOCK_SIZE), float64(BLOCK_SIZE)), 0xff00ffff)\n\t\t\/\/ land on block\n\t\tfig.vel.Y = 0\n\t\tfig.pos.Y = block.Y - 0.1\n\t\tfig.Land()\n\t}\n\n\tif fig.pos.X != fig.nextPos.X {\n\t\tfig.nextPos.Y = fig.pos.Y\n\t\tcolide := m.lvl.CubeAt(&fig.nextPos)\n\t\tif colide != nil {\n\t\t\t\/\/ m.drawCb(colide, util.NewVector(float64(BLOCK_SIZE), float64(BLOCK_SIZE)), 0xff00ffff)\n\t\t\tif fig.pos.X < fig.nextPos.X {\n\t\t\t\t\/\/ collide right\n\t\t\t\tfig.pos.X = colide.X - 0.1\n\t\t\t} else {\n\t\t\t\t\/\/ colide left\n\t\t\t\tfig.pos.X = colide.X + float64(BLOCK_SIZE) + 0.1\n\t\t\t}\n\t\t} else {\n\t\t\tfig.pos.X = fig.nextPos.X\n\t\t}\n\t}\n\n}\n\nfunc (m *Mario) thnikStep(c int) {\n\tdiscreteX := float64(int(m.figures[c].fig.pos.X \/ float64(OBSTACLE_SPACING*BLOCK_SIZE)))\n\tm.figures[c].brain.Stimulate(nrn(posX), discreteX)\n\n\tm.figures[c].brain.Stimulate(nrn(posY), m.figures[c].fig.pos.Y)\n\tm.figures[c].brain.Stimulate(nrn(velX), m.figures[c].fig.vel.X)\n\tm.figures[c].brain.Stimulate(nrn(velY), m.figures[c].fig.vel.Y)\n\n\tm.figures[c].brain.Step()\n\n\tif m.figures[c].brain.ValueOf(nrn(jump)) > 0.9 {\n\t\tm.figures[c].fig.Jump()\n\t}\n\n\txMoveValue := m.figures[c].brain.ValueOf(nrn(xMove))\n\tif math.Abs(xMoveValue) > 0.9 {\n\t\tm.figures[c].fig.Move(int(xMoveValue * 10))\n\t}\n\n\tm.figures[c].brain.Clear()\n}\n\nconst idleThreshold uint32 = 2000\n\nfunc (m *Mario) randNet() *neural.Net {\n\tcutOff := 10.0\n\tidx := 0\n\tfor {\n\t\tr := rand.ExpFloat64()\n\t\tif r <= cutOff {\n\t\t\tidx = int((r * float64(len(m.figures))) \/ cutOff)\n\t\t\tbreak\n\t\t}\n\t}\n\treturn m.figures[idx].brain\n}\n\nfunc (m *Mario) mutateStep(c int) {\n\n\tif m.figures[c].dead {\n\t\tm.figures[c].dead = false\n\t\tm.figures[c].fig.pos = *m.lvl.NewFigurePos()\n\t\tm.figures[c].fig.vel = *util.NewVector(0, 0)\n\n\t\tif m.figures[c].idleFrames >= idleThreshold {\n\t\t\tm.figures[c].brain.Mutate(0.75)\n\t\t\tm.figures[c].bestX *= 0.25\n\t\t} else {\n\t\t\tswapChance := (float64(c) \/ float64(len(m.figures))) * 2.0\n\t\t\tif neural.Chance(swapChance) {\n\t\t\t\t*m.figures[c].brain = *neural.Cross2(m.randNet(), m.randNet())\n\t\t\t}\n\t\t\tm.figures[c].brain.MutateWithMagnitude(0.01, 0.1)\n\t\t\tm.figures[c].bestX *= 0.975\n\t\t}\n\n\t\tm.figures[c].idleFrames = 0\n\t\tm.figures[c].idleX = 0\n\t} else {\n\t\tif m.figures[c].fig.pos.X > m.figures[c].bestX {\n\t\t\tm.figures[c].bestX = m.figures[c].fig.pos.X\n\t\t}\n\n\t\tif m.figures[c].fig.pos.X > m.figures[c].idleX {\n\t\t\tm.figures[c].idleX = m.figures[c].fig.pos.X\n\t\t} else {\n\t\t\tm.figures[c].idleFrames++\n\t\t\tif m.figures[c].idleFrames >= idleThreshold {\n\t\t\t\tm.figures[c].dead = true\n\t\t\t\t\/\/ c--\n\t\t\t}\n\t\t}\n\n\t}\n}\n<commit_msg>Remove ugly dev hack<commit_after>package mario\n\nimport (\n\t\"fmt\"\n\tneural \"github.com\/poseidon4o\/go-neural\/src\/neural\"\n\tutil \"github.com\/poseidon4o\/go-neural\/src\/util\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"sort\"\n)\n\ntype NeuronName int\n\nconst (\n\tposX NeuronName = iota\n\tposY NeuronName = iota\n\tvelY NeuronName = iota\n\tvelX NeuronName = iota\n\tH1 NeuronName = iota\n\tH2 NeuronName = iota\n\tH3 NeuronName = iota\n\tH4 NeuronName = iota\n\tH5 NeuronName = iota\n\tH6 NeuronName = iota\n\tH7 NeuronName = iota\n\tH8 NeuronName = iota\n\tR1 NeuronName = iota\n\tR2 NeuronName = iota\n\tR3 NeuronName = iota\n\tR4 NeuronName = iota\n\tR5 NeuronName = iota\n\tR6 NeuronName = iota\n\tR7 NeuronName = iota\n\tR8 NeuronName = iota\n\tjump NeuronName = iota\n\txMove NeuronName = iota\n\tNRN_COUNT int = iota\n)\n\nfunc nrn(name NeuronName) int {\n\treturn int(name)\n}\n\ntype MarioNode struct {\n\tfig *Figure\n\tbrain *neural.Net\n\tbestX float64\n\tidleX float64\n\tdead bool\n\tidleFrames uint32\n}\n\ntype MarioCol []MarioNode\n\nfunc (figs MarioCol) Len() int {\n\treturn len(figs)\n}\n\nfunc (figs MarioCol) Less(c, r int) bool {\n\treturn figs[c].bestX > figs[r].bestX\n}\n\nfunc (figs MarioCol) Swap(c, r int) {\n\tfigs[c], figs[r] = figs[r], figs[c]\n}\n\ntype Mario struct {\n\tfigures MarioCol\n\tlvl Level\n\tdrawCb func(pos, size *util.Vector, color uint32)\n\tdrawSize int\n}\n\nfunc (m *Mario) Complete() float64 {\n\treturn m.figures[0].bestX \/ m.lvl.size.X\n}\n\nfunc (m *Mario) Done() bool {\n\treturn false\n}\n\nfunc (m *Mario) SetDrawRectCb(cb func(pos, size *util.Vector, color uint32)) {\n\tm.drawCb = cb\n}\n\nfunc (m *Mario) LogicTick(dt float64) {\n\tm.lvl.Step(dt)\n\tsort.Sort(m.figures)\n\n\twg := make(chan struct{}, len(m.figures))\n\n\tstepC := func(r int) {\n\t\tm.checkStep(r)\n\t\tm.mutateStep(r)\n\t\tm.thnikStep(r)\n\t\twg <- struct{}{}\n\t}\n\n\tfor c := range m.figures {\n\t\tgo stepC(c)\n\t}\n\n\tfor c := 0; c < len(m.figures); c++ {\n\t\t<-wg\n\t}\n}\n\nfunc (m *Mario) Jump() {\n\tm.figures[0].fig.Jump()\n}\n\nfunc (m *Mario) Move(dir int) {\n\tm.figures[0].fig.Move(dir)\n}\n\nfunc (m *Mario) Figs() MarioCol {\n\treturn m.figures\n}\n\nfunc NewMario(figCount int, size *util.Vector) *Mario {\n\tfmt.Println(\"\")\n\tlevel := NewLevel(int(size.X), int(size.Y))\n\tlevel.AddFigures(figCount)\n\n\tnets := make([]*neural.Net, figCount, figCount)\n\tfor c := range nets {\n\t\tnets[c] = neural.NewNet(NRN_COUNT)\n\n\t\tfor r := 0; r < (nrn(H8) - nrn(H1)); r++ {\n\t\t\t\/\/ input to H\n\t\t\t*nets[c].Synapse(nrn(posX), r+nrn(H1)) = 0.0\n\t\t\t*nets[c].Synapse(nrn(posY), r+nrn(H1)) = 0.0\n\t\t\t*nets[c].Synapse(nrn(velX), r+nrn(H1)) = 0.0\n\t\t\t*nets[c].Synapse(nrn(velY), r+nrn(H1)) = 0.0\n\n\t\t\t\/\/ R to output\n\t\t\t*nets[c].Synapse(r+nrn(R1), nrn(jump)) = 0.0\n\t\t\t*nets[c].Synapse(r+nrn(R1), nrn(xMove)) = 0.0\n\t\t}\n\n\t\tfor r := 0; r < (nrn(H8) - nrn(H1)); r++ {\n\t\t\tfor q := 0; q < (nrn(H8) - nrn(H1)); q++ {\n\t\t\t\t*nets[c].Synapse(r+nrn(H1), q+nrn(R1)) = 0.0\n\t\t\t}\n\t\t}\n\n\t\tnets[c].Randomize()\n\t}\n\n\tfigs := make(MarioCol, figCount, figCount)\n\tfor c := range figs {\n\t\tfigs[c].brain = nets[c]\n\t\tfigs[c].dead = false\n\t\tfigs[c].bestX = 0\n\t\tfigs[c].fig = level.figures[c]\n\t}\n\n\treturn &Mario{\n\t\tfigures: figs,\n\t\tlvl: *level,\n\t\tdrawCb: func(pos, size *util.Vector, color uint32) {},\n\t\tdrawSize: 5,\n\t}\n}\n\nfunc (m *Mario) DrawTick() {\n\tvar (\n\t\tred = uint32(0xffff0000)\n\t\tgreen = uint32(0xff00ff00)\n\t\tblue = uint32(0xff0000ff)\n\t)\n\n\tblSize := util.NewVector(float64(BLOCK_SIZE), float64(BLOCK_SIZE))\n\tblSizeSmall := blSize.Scale(0.5)\n\n\ttranslate := util.NewVector(6, 6)\n\n\tsize := util.NewVector(float64(m.drawSize), float64(m.drawSize))\n\n\tfor c := range m.lvl.blocks {\n\t\tfor r := range m.lvl.blocks[c] {\n\t\t\tif m.lvl.blocks[c][r] != nil {\n\t\t\t\tm.drawCb(m.lvl.blocks[c][r], blSize, red)\n\t\t\t\tm.drawCb(m.lvl.blocks[c][r].Add(translate), blSizeSmall, green)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor c := range m.figures {\n\t\tm.drawCb(m.figures[c].fig.pos.Add(size.Scale(0.5).Neg()), size, blue)\n\t}\n}\n\nfunc (m *Mario) checkStep(c int) {\n\tfig := m.figures[c].fig\n\n\tif fig.nextPos.Y > m.lvl.size.Y || fig.nextPos.Y < 0 {\n\t\tm.figures[c].dead = true\n\t\treturn\n\t}\n\n\tif fig.nextPos.X < 0 {\n\t\tfig.nextPos.X = 0\n\t} else if fig.nextPos.X > m.lvl.size.X {\n\t\tfig.nextPos.X = m.lvl.size.X\n\t}\n\n\tblock := m.lvl.FloorAt(&fig.pos)\n\n\tif block == nil || fig.nextPos.Y < block.Y {\n\t\tfig.pos.Y = fig.nextPos.Y\n\t} else {\n\t\t\/\/ m.drawCb(block, util.NewVector(float64(BLOCK_SIZE), float64(BLOCK_SIZE)), 0xff00ffff)\n\t\t\/\/ land on block\n\t\tfig.vel.Y = 0\n\t\tfig.pos.Y = block.Y - 0.1\n\t\tfig.Land()\n\t}\n\n\tif fig.pos.X != fig.nextPos.X {\n\t\tfig.nextPos.Y = fig.pos.Y\n\t\tcolide := m.lvl.CubeAt(&fig.nextPos)\n\t\tif colide != nil {\n\t\t\t\/\/ m.drawCb(colide, util.NewVector(float64(BLOCK_SIZE), float64(BLOCK_SIZE)), 0xff00ffff)\n\t\t\tif fig.pos.X < fig.nextPos.X {\n\t\t\t\t\/\/ collide right\n\t\t\t\tfig.pos.X = colide.X - 0.1\n\t\t\t} else {\n\t\t\t\t\/\/ colide left\n\t\t\t\tfig.pos.X = colide.X + float64(BLOCK_SIZE) + 0.1\n\t\t\t}\n\t\t} else {\n\t\t\tfig.pos.X = fig.nextPos.X\n\t\t}\n\t}\n\n}\n\nfunc (m *Mario) thnikStep(c int) {\n\tdiscreteX := float64(int(m.figures[c].fig.pos.X \/ float64(OBSTACLE_SPACING*BLOCK_SIZE)))\n\tm.figures[c].brain.Stimulate(nrn(posX), discreteX)\n\n\tm.figures[c].brain.Stimulate(nrn(posY), m.figures[c].fig.pos.Y)\n\tm.figures[c].brain.Stimulate(nrn(velX), m.figures[c].fig.vel.X)\n\tm.figures[c].brain.Stimulate(nrn(velY), m.figures[c].fig.vel.Y)\n\n\tm.figures[c].brain.Step()\n\n\tif m.figures[c].brain.ValueOf(nrn(jump)) > 0.9 {\n\t\tm.figures[c].fig.Jump()\n\t}\n\n\txMoveValue := m.figures[c].brain.ValueOf(nrn(xMove))\n\tif math.Abs(xMoveValue) > 0.9 {\n\t\tm.figures[c].fig.Move(int(xMoveValue * 10))\n\t}\n\n\tm.figures[c].brain.Clear()\n}\n\nconst idleThreshold uint32 = 2000\n\nfunc (m *Mario) randNet() *neural.Net {\n\tcutOff := 10.0\n\tidx := 0\n\tfor {\n\t\tr := rand.ExpFloat64()\n\t\tif r <= cutOff {\n\t\t\tidx = int((r * float64(len(m.figures))) \/ cutOff)\n\t\t\tbreak\n\t\t}\n\t}\n\treturn m.figures[idx].brain\n}\n\nfunc (m *Mario) mutateStep(c int) {\n\n\tif m.figures[c].dead {\n\t\tm.figures[c].dead = false\n\t\tm.figures[c].fig.pos = *m.lvl.NewFigurePos()\n\t\tm.figures[c].fig.vel = *util.NewVector(0, 0)\n\n\t\tif m.figures[c].idleFrames >= idleThreshold {\n\t\t\tm.figures[c].brain.Mutate(0.75)\n\t\t\tm.figures[c].bestX *= 0.25\n\t\t} else {\n\t\t\tswapChance := (float64(c) \/ float64(len(m.figures))) * 2.0\n\t\t\tif neural.Chance(swapChance) {\n\t\t\t\t*m.figures[c].brain = *neural.Cross2(m.randNet(), m.randNet())\n\t\t\t}\n\t\t\tm.figures[c].brain.MutateWithMagnitude(0.01, 0.1)\n\t\t\tm.figures[c].bestX *= 0.975\n\t\t}\n\n\t\tm.figures[c].idleFrames = 0\n\t\tm.figures[c].idleX = 0\n\t} else {\n\t\tif m.figures[c].fig.pos.X > m.figures[c].bestX {\n\t\t\tm.figures[c].bestX = m.figures[c].fig.pos.X\n\t\t}\n\n\t\tif m.figures[c].fig.pos.X > m.figures[c].idleX {\n\t\t\tm.figures[c].idleX = m.figures[c].fig.pos.X\n\t\t} else {\n\t\t\tm.figures[c].idleFrames++\n\t\t\tif m.figures[c].idleFrames >= idleThreshold {\n\t\t\t\tm.figures[c].dead = true\n\t\t\t\t\/\/ c--\n\t\t\t}\n\t\t}\n\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package schedule\n\nimport (\n\t\"time\"\n)\n\ntype ScheduleEntries int\n\ntype CommitSchedule [][]ScheduleEntries\n\nconst (\n\tNOT_A_FIELD ScheduleEntries = -1\n\tEMPTY ScheduleEntries = 0\n\tONE ScheduleEntries = 1\n\tTWO ScheduleEntries = 2\n\tTHREE ScheduleEntries = 3\n\tFOUR ScheduleEntries = 4\n\n\tNUM_WEEK_DAYS = 7\n)\n\n\/\/ BuildCommitSchedule returns an empty CommitSchedule, where all fiels are\n\/\/ initialized with EMPTY except those which are not in the range of days.\n\/\/ The CommitSchedule is a table of ints.\nfunc BuildCommitSchedule(days []time.Time) CommitSchedule {\n\t\/\/ get weeks, which determine width and height is seven\n\t\/\/ fill entries with EMPTY or NOT_A_FIELD\n\tschedule := make(CommitSchedule, 0) \/\/ TODO figure out num weeks\n\t\/\/ firstWeek := buildFirstWeek(days[0].Weekday())\n\t\/\/ lastWeek := buildLastWeek(days[len(days)-1].Weekday())\n\t\/\/ TODO get days inbetween first and last week and join them\n\treturn schedule\n}\n\n\/\/ buildFirstWeek creates NUM_WEEK_DAYS schedule entries, where the entries\n\/\/ before the given week day are NOT_A_FIELD and EMPTY afterwards (including given day)\nfunc buildFirstWeek(day time.Weekday) []ScheduleEntries {\n\tvar firstWeek []ScheduleEntries\n\tfor i := 0; i < NUM_WEEK_DAYS; i++ {\n\t\tif i < int(day) {\n\t\t\tfirstWeek = append(firstWeek, NOT_A_FIELD)\n\t\t} else {\n\t\t\tfirstWeek = append(firstWeek, EMPTY)\n\t\t}\n\t}\n\treturn firstWeek\n}\n\n\/\/ buildLastWeek creates NUM_WEEK_DAYS schedule entries, where the entries\n\/\/ after the given week day are NOT_A_FIELD and EMPTY before (including given day)\nfunc buildLastWeek(day time.Weekday) []ScheduleEntries {\n\tvar lastWeek []ScheduleEntries\n\tfor i := 0; i < NUM_WEEK_DAYS; i++ {\n\t\tif i > int(day) {\n\t\t\tlastWeek = append(lastWeek, NOT_A_FIELD)\n\t\t} else {\n\t\t\tlastWeek = append(lastWeek, EMPTY)\n\t\t}\n\t}\n\treturn lastWeek\n}\n<commit_msg>Add function header and adjust test.<commit_after>package schedule\n\nimport (\n\t\"time\"\n)\n\ntype ScheduleEntries int\n\ntype CommitSchedule [][]ScheduleEntries\n\nconst (\n\tNOT_A_FIELD ScheduleEntries = -1\n\tEMPTY ScheduleEntries = 0\n\tONE ScheduleEntries = 1\n\tTWO ScheduleEntries = 2\n\tTHREE ScheduleEntries = 3\n\tFOUR ScheduleEntries = 4\n\n\tNUM_WEEK_DAYS = 7\n)\n\n\/\/ BuildCommitSchedule returns an empty CommitSchedule, where all fiels are\n\/\/ initialized with EMPTY except those which are not in the range of days.\n\/\/ The CommitSchedule is a table of ints.\nfunc BuildCommitSchedule(days []time.Time) CommitSchedule {\n\t\/\/ get weeks, which determine width and height is seven\n\t\/\/ fill entries with EMPTY or NOT_A_FIELD\n\tschedule := make(CommitSchedule, 0) \/\/ TODO figure out num weeks\n\t\/\/ firstWeek := buildFirstWeek(days[0].Weekday())\n\t\/\/ lastWeek := buildLastWeek(days[len(days)-1].Weekday())\n\t\/\/ TODO get days inbetween first and last week and join them\n\treturn schedule\n}\n\n\/\/ buildFirstWeek creates NUM_WEEK_DAYS schedule entries, where the entries\n\/\/ before the given week day are NOT_A_FIELD and EMPTY afterwards (including given day)\nfunc buildFirstWeek(day time.Weekday) []ScheduleEntries {\n\tvar firstWeek []ScheduleEntries\n\tfor i := 0; i < NUM_WEEK_DAYS; i++ {\n\t\tif i < int(day) {\n\t\t\tfirstWeek = append(firstWeek, NOT_A_FIELD)\n\t\t} else {\n\t\t\tfirstWeek = append(firstWeek, EMPTY)\n\t\t}\n\t}\n\treturn firstWeek\n}\n\n\/\/ buildLastWeek creates NUM_WEEK_DAYS schedule entries, where the entries\n\/\/ after the given week day are NOT_A_FIELD and EMPTY before (including given day)\nfunc buildLastWeek(day time.Weekday) []ScheduleEntries {\n\tvar lastWeek []ScheduleEntries\n\tfor i := 0; i < NUM_WEEK_DAYS; i++ {\n\t\tif i > int(day) {\n\t\t\tlastWeek = append(lastWeek, NOT_A_FIELD)\n\t\t} else {\n\t\t\tlastWeek = append(lastWeek, EMPTY)\n\t\t}\n\t}\n\treturn lastWeek\n}\n\nfunc connectWeeksToSchedule(firstWeek, lastWeek []ScheduleEntries) [][]ScheduleEntries {\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ The MIT License (MIT)\n\/\/\n\/\/ Copyright (c) 2014 AT&T\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage impl\n\nimport (\n\t\"strconv\"\n\t\"math\"\n\n\t\"github.com\/golang\/glog\"\n\n\t\"github.com\/att-innovate\/charmander-scheduler\/scheduler\"\n\t\"github.com\/att-innovate\/charmander-scheduler\/mesosproto\"\n\n\tmanagerInterface \"github.com\/att-innovate\/charmander-scheduler\/manager\"\n)\n\n\/\/ my scheduler\nvar Scheduler = &scheduler.Scheduler{}\n\nconst (\n\tOneMB = 1000000\n\tTwoMB = 2000000\n)\n\nfunc init() {\n\tglog.Infoln(\"Initializing scheduler ...\")\n\n\tScheduler.Registered = func(manager managerInterface.Manager, frameworkId string) {\n\t\tglog.Infoln(\"Registered FrameworkId \", frameworkId)\n\t}\n\n\tScheduler.OverwriteTaskAttributes = func(manager managerInterface.Manager, taskRequest *managerInterface.Task) {\n\t\tmemObservedRaw := manager.GetTaskIntelligence(taskRequest.ID, \"mem\")\n\t\tif len(memObservedRaw) > 0 {\n\t\t\tmemObserved, _ := strconv.Atoi(memObservedRaw)\n\t\t\tif memObserved < OneMB {\n\t\t\t\ttaskRequest.Mem = uint64(TwoMB)\n\t\t\t} else {\n\t\t\t\ttaskRequest.Mem = uint64((math.Ceil(float64(memObserved \/ OneMB) * 1.1))) \/\/ add a 10% safety net\n\t\t\t}\n\t\t}\n\t}\n\n\tScheduler.ResourceOffers = func(manager managerInterface.Manager, offers []*mesosproto.Offer) {\n\t\tvar taskRequests []*managerInterface.Task\n\t\ttaskRequests = manager.GetOpenTaskRequests()\n\n\t\tglog.Infoln(\"Got \", len(offers), \"offer(s) from master.\")\n\n\t\tfor _, offer := range offers {\n\t\t\tmatchFound := false\n\n\t\t\tfor _, taskRequest := range taskRequests {\n\t\t\t\tif matchFound { break }\n\t\t\t\tif taskRequest.RequestSent { continue }\n\t\t\t\tif !manager.ResourceRequirementsWouldMatch(offer, taskRequest) { continue }\n\n\t\t\t\tmanager.AcceptOffer(offer.GetId(), offer.SlaveId, taskRequest)\n\t\t\t\tmatchFound = true\n\t\t\t}\n\n\t\t\tif !matchFound {\n\t\t\t\tmanager.DeclineOffer(offer.GetId())\n\t\t\t}\n\t\t}\n\t}\n\n}\n<commit_msg>fix - TwoMB from 2000000 to 2<commit_after>\/\/ The MIT License (MIT)\n\/\/\n\/\/ Copyright (c) 2014 AT&T\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\npackage impl\n\nimport (\n\t\"strconv\"\n\t\"math\"\n\n\t\"github.com\/golang\/glog\"\n\n\t\"github.com\/att-innovate\/charmander-scheduler\/scheduler\"\n\t\"github.com\/att-innovate\/charmander-scheduler\/mesosproto\"\n\n\tmanagerInterface \"github.com\/att-innovate\/charmander-scheduler\/manager\"\n)\n\n\/\/ my scheduler\nvar Scheduler = &scheduler.Scheduler{}\n\nconst (\n\tMBDivisor = 1000000\n\tTwoMB = 2\n)\n\nfunc init() {\n\tglog.Infoln(\"Initializing scheduler ...\")\n\n\tScheduler.Registered = func(manager managerInterface.Manager, frameworkId string) {\n\t\tglog.Infoln(\"Registered FrameworkId \", frameworkId)\n\t}\n\n\tScheduler.OverwriteTaskAttributes = func(manager managerInterface.Manager, taskRequest *managerInterface.Task) {\n\t\tmemObservedRaw := manager.GetTaskIntelligence(taskRequest.ID, \"mem\")\n\t\tif len(memObservedRaw) > 0 {\n\t\t\tmemObserved, _ := strconv.Atoi(memObservedRaw)\n\t\t\tif memObserved < MBDivisor {\n\t\t\t\ttaskRequest.Mem = uint64(TwoMB)\n\t\t\t} else {\n\t\t\t\ttaskRequest.Mem = uint64((math.Ceil(float64(memObserved \/ MBDivisor) * 1.1))) \/\/ add a 10% safety net\n\t\t\t}\n\t\t}\n\t}\n\n\tScheduler.ResourceOffers = func(manager managerInterface.Manager, offers []*mesosproto.Offer) {\n\t\tvar taskRequests []*managerInterface.Task\n\t\ttaskRequests = manager.GetOpenTaskRequests()\n\n\t\tglog.Infoln(\"Got \", len(offers), \"offer(s) from master.\")\n\n\t\tfor _, offer := range offers {\n\t\t\tmatchFound := false\n\n\t\t\tfor _, taskRequest := range taskRequests {\n\t\t\t\tif matchFound { break }\n\t\t\t\tif taskRequest.RequestSent { continue }\n\t\t\t\tif !manager.ResourceRequirementsWouldMatch(offer, taskRequest) { continue }\n\n\t\t\t\tmanager.AcceptOffer(offer.GetId(), offer.SlaveId, taskRequest)\n\t\t\t\tmatchFound = true\n\t\t\t}\n\n\t\t\tif !matchFound {\n\t\t\t\tmanager.DeclineOffer(offer.GetId())\n\t\t\t}\n\t\t}\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\nimport (\n\t\"os\"\n)\n\nconst (\n\n\t\/\/ AppName -\n\tAppName = \"diary-pwa\"\n)\n\nvar (\n\n\t\/\/ Server -\n\tServer = map[string]string{\n\t\t\"Port\": os.Getenv(\"PORT\"),\n\t\t\"Host\": os.Getenv(\"HOST\"),\n\t}\n\n\t\/\/ UseHTTPS -\n\tUseHTTPS = true\n)\n\nfunc init() {\n\n\tif Server[\"Port\"] == \"\" {\n\t\tServer[\"Port\"] = \"8080\"\n\t}\n\n\tif Server[\"Host\"] == \"\" {\n\t\tServer[\"Host\"] = \"\"\n\t}\n}\n<commit_msg>config change<commit_after>package config\n\nimport (\n\t\"os\"\n)\n\nconst (\n\n\t\/\/ AppName -\n\tAppName = \"diary-pwa\"\n)\n\nvar (\n\n\t\/\/ Server -\n\tServer = map[string]string{\n\t\t\"Port\": os.Getenv(\"PORT\"),\n\t\t\"Host\": os.Getenv(\"HOST\"),\n\t}\n)\n\nfunc init() {\n\n\tif Server[\"Port\"] == \"\" {\n\t\tServer[\"Port\"] = \"8080\"\n\t}\n\n\tif Server[\"Host\"] == \"\" {\n\t\tServer[\"Host\"] = \"\"\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package targz provides handy functions to create and extract tar.gz\n\/\/ files.\npackage targz\n\n\/*\nThe tar.gz functionality has been inspired the following resources:\n\nhttp:\/\/stackoverflow.com\/a\/40003617\nhttp:\/\/blog.ralch.com\/tutorial\/golang-working-with-tar-and-gzip\/\nhttps:\/\/medium.com\/@skdomino\/taring-untaring-files-in-go-6b07cf56bc07\n\nAll credit for these functions goes to the authors of the above posts.\n*\/\n\nimport (\n\t\"archive\/tar\"\n\t\"compress\/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\n\/\/ TarGz takes a source and variable writers and walks src writing each file\n\/\/ found to the tar writer; the purpose for accepting multiple writers is to allow\n\/\/ for multiple outputs (for example a file, or md5 hash), etc.\nfunc TarGz(src string, writers ...io.Writer) error {\n\t\/\/ ensure the src actually exists before trying to tar it\n\tif _, err := os.Stat(src); err != nil {\n\t\treturn fmt.Errorf(\"Unable to tar files - %v\", err.Error())\n\t}\n\n\tmw := io.MultiWriter(writers...)\n\n\tgzw := gzip.NewWriter(mw)\n\tdefer gzw.Close()\n\n\ttw := tar.NewWriter(gzw)\n\tdefer tw.Close()\n\n\tinfo, err := os.Stat(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar baseDir string\n\tif info.IsDir() {\n\t\tbaseDir = filepath.Base(src)\n\t}\n\n\t\/\/ walk path\n\treturn filepath.Walk(src, func(file string, fi os.FileInfo,\n\t\terr error) error {\n\n\t\t\/\/ return on any error\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tfmt.Println(\"\\nYou seem to have encountered a file \" +\n\t\t\t\t\"system error.\\nPlease file a bug report to\" +\n\t\t\t\t\" help me investigate it.\\n\")\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ We include symlinks as is and won't dereference them\n\t\tvar link string\n\t\tif string(fi.Mode().String()[0]) == \"L\" {\n\t\t\tif link, err = os.Readlink(src); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t\/\/ create a new dir\/file header\n\t\theader, err := tar.FileInfoHeader(fi, link)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ If there is no basedir, leave the filename as it is, else\n\t\t\/\/ update the name to reflect the desired destination.\n\t\tif baseDir != \"\" {\n\t\t\theader.Name = filepath.Join(baseDir,\n\t\t\t\tstrings.TrimPrefix(file, src))\n\t\t}\n\n\t\t\/\/ write the header\n\t\tif err := tw.WriteHeader(header); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ return on directories and symlinks since there will be no\n\t\t\/\/ content to tar\n\t\tif !fi.Mode().IsRegular() {\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ open files for taring\n\t\tf, err := os.Open(file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer func() {\n\t\t\tif err := f.Close(); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}()\n\n\t\t\/\/ copy file data into tar writer\n\t\tif _, err := io.Copy(tw, f); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\n\/\/ UntarGz takes a destination path and a reader; a tar reader loops over the\n\/\/ tarfile creating the file structure at dst along the way, and writes any\n\/\/ files to dst.\nfunc UntarGz(dst string, r io.Reader) error {\n\n\tgzr, err := gzip.NewReader(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer gzr.Close()\n\n\ttr := tar.NewReader(gzr)\n\n\tfor {\n\t\theader, err := tr.Next()\n\n\t\tswitch {\n\t\t\/\/ if no more files are found return\n\t\tcase err == io.EOF:\n\t\t\treturn nil\n\t\t\/\/ return any other error\n\t\tcase err != nil:\n\t\t\treturn err\n\t\t\/\/ if the header is nil, just skip it (not sure how this happens)\n\t\tcase header == nil:\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ the target location where the dir\/file should be created\n\t\ttarget := filepath.Join(dst, header.Name)\n\n\t\t\/\/ check the file type\n\t\tswitch header.Typeflag {\n\n\t\t\/\/ if its a dir and it doesn't exist create it\n\t\tcase tar.TypeDir:\n\t\t\tif _, err := os.Stat(target); err != nil {\n\t\t\t\tif err := os.MkdirAll(target, 0755); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\/\/ os.Stat() won't work on symlinks, so we try to create it anyway.\n\t\tcase tar.TypeSymlink:\n\t\t\terr := os.Symlink(header.Linkname, target)\n\t\t\tif os.IsExist(err) {\n\t\t\t\tfmt.Println(\"Symlink \" + target +\n\t\t\t\t\t\" already exists. Skipping.\")\n\t\t\t} else if err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\/\/ if it's a file create it\n\t\tcase tar.TypeReg:\n\t\t\tf, err := os.OpenFile(target,\n\t\t\t\tos.O_CREATE|os.O_RDWR, os.FileMode(header.Mode))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ Defer closure in case anything fails\n\t\t\tdefer f.Close()\n\n\t\t\t\/\/ copy over contents\n\t\t\tif _, err := io.Copy(f, tr); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ Close the file explicitly.\n\t\t\t\/\/ Deferred calls execute too late and for large\n\t\t\t\/\/ numbers of files we'd get a \"too many files open\"\n\t\t\t\/\/ panic before the function can return and close\n\t\t\t\/\/ the files.\n\t\t\tf.Close()\n\t\t}\n\t}\n}\n<commit_msg>Proper handling of symbolic links<commit_after>\/\/ Package targz provides handy functions to create and extract tar.gz\n\/\/ files.\npackage targz\n\n\/*\nThe tar.gz functionality has been inspired the following resources:\n\nhttp:\/\/stackoverflow.com\/a\/40003617\nhttp:\/\/blog.ralch.com\/tutorial\/golang-working-with-tar-and-gzip\/\nhttps:\/\/medium.com\/@skdomino\/taring-untaring-files-in-go-6b07cf56bc07\n\nAll credit for these functions goes to the authors of the above posts.\n*\/\n\nimport (\n\t\"archive\/tar\"\n\t\"compress\/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\n\/\/ TarGz takes a source and variable writers and walks src writing each file\n\/\/ found to the tar writer; the purpose for accepting multiple writers is to allow\n\/\/ for multiple outputs (for example a file, or md5 hash), etc.\nfunc TarGz(src string, writers ...io.Writer) error {\n\t\/\/ ensure the src actually exists before trying to tar it\n\tif _, err := os.Stat(src); err != nil {\n\t\treturn fmt.Errorf(\"Unable to tar files - %v\", err.Error())\n\t}\n\n\tmw := io.MultiWriter(writers...)\n\n\tgzw := gzip.NewWriter(mw)\n\tdefer gzw.Close()\n\n\ttw := tar.NewWriter(gzw)\n\tdefer tw.Close()\n\n\tinfo, err := os.Stat(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar baseDir string\n\tif info.IsDir() {\n\t\tbaseDir = filepath.Base(src)\n\t}\n\n\t\/\/ walk path\n\treturn filepath.Walk(src, func(file string, fi os.FileInfo,\n\t\terr error) error {\n\n\t\t\/\/ return on any error\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tfmt.Println(\"\\nYou seem to have encountered a file \" +\n\t\t\t\t\"system error.\\nPlease file a bug report to\" +\n\t\t\t\t\" help me investigate it.\\n\")\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ We include symlinks as is and won't dereference them\n\t\tvar link string\n\t\tif string(fi.Mode().String()[0]) == \"L\" {\n\t\t\tif link, err = os.Readlink(file); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t\/\/ create a new dir\/file header\n\t\theader, err := tar.FileInfoHeader(fi, link)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ If there is no basedir, leave the filename as it is, else\n\t\t\/\/ update the name to reflect the desired destination.\n\t\tif baseDir != \"\" {\n\t\t\theader.Name = filepath.Join(baseDir,\n\t\t\t\tstrings.TrimPrefix(file, src))\n\t\t}\n\n\t\t\/\/ write the header\n\t\tif err := tw.WriteHeader(header); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ return on directories and symlinks since there will be no\n\t\t\/\/ content to tar\n\t\tif !fi.Mode().IsRegular() {\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ open files for taring\n\t\tf, err := os.Open(file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer func() {\n\t\t\tif err := f.Close(); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}()\n\n\t\t\/\/ copy file data into tar writer\n\t\tif _, err := io.Copy(tw, f); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t})\n}\n\n\/\/ UntarGz takes a destination path and a reader; a tar reader loops over the\n\/\/ tarfile creating the file structure at dst along the way, and writes any\n\/\/ files to dst.\nfunc UntarGz(dst string, r io.Reader) error {\n\n\tgzr, err := gzip.NewReader(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer gzr.Close()\n\n\ttr := tar.NewReader(gzr)\n\n\tfor {\n\t\theader, err := tr.Next()\n\n\t\tswitch {\n\t\t\/\/ if no more files are found return\n\t\tcase err == io.EOF:\n\t\t\treturn nil\n\t\t\/\/ return any other error\n\t\tcase err != nil:\n\t\t\treturn err\n\t\t\/\/ if the header is nil, just skip it (not sure how this happens)\n\t\tcase header == nil:\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ the target location where the dir\/file should be created\n\t\ttarget := filepath.Join(dst, header.Name)\n\n\t\t\/\/ check the file type\n\t\tswitch header.Typeflag {\n\n\t\t\/\/ if its a dir and it doesn't exist create it\n\t\tcase tar.TypeDir:\n\t\t\tif _, err := os.Stat(target); err != nil {\n\t\t\t\tif err := os.MkdirAll(target, 0755); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\/\/ os.Stat() won't work on symlinks, so we try to create it anyway.\n\t\tcase tar.TypeSymlink:\n\t\t\terr := os.Symlink(header.Linkname, target)\n\t\t\tif os.IsExist(err) {\n\t\t\t\tfmt.Println(\"Symlink \" + target +\n\t\t\t\t\t\" already exists. Skipping.\")\n\t\t\t} else if err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\/\/ if it's a file create it\n\t\tcase tar.TypeReg:\n\t\t\tf, err := os.OpenFile(target,\n\t\t\t\tos.O_CREATE|os.O_RDWR, os.FileMode(header.Mode))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ Defer closure in case anything fails\n\t\t\tdefer f.Close()\n\n\t\t\t\/\/ copy over contents\n\t\t\tif _, err := io.Copy(f, tr); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ Close the file explicitly.\n\t\t\t\/\/ Deferred calls execute too late and for large\n\t\t\t\/\/ numbers of files we'd get a \"too many files open\"\n\t\t\t\/\/ panic before the function can return and close\n\t\t\t\/\/ the files.\n\t\t\tf.Close()\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ ************\n\/\/ Inspired by : https:\/\/semaphoreci.com\/community\/tutorials\/building-and-testing-a-rest-api-in-go-with-gorilla-mux-and-postgresql\n\/\/ ************\n\npackage main_test\n\nimport (\n\t\".\"\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"github.com\/spf13\/viper\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n)\n\nvar app main.App\n\nconst tableCreationQuery = `CREATE TABLE IF NOT EXISTS orders\n(\nid SERIAL,\nNAME TEXT NOT NULL,\nprice NUMERIC (10, 2) NOT NULL DEFAULT 0.00,\nCONSTRAINT orders_pkey PRIMARY KEY (id)\n)`\n\nfunc TestMain(m *testing.M) {\n\n\tviper.SetConfigName(\"config\")\n\tviper.AddConfigPath(\".\")\n\tviper.ReadInConfig()\n\n\tapp = main.App{}\n\tapp.Initialize(\n\t\tviper.GetString(\"testing.dbUser\"),\n\t\tviper.GetString(\"testing.dbPass\"),\n\t\tviper.GetString(\"testing.db\"))\n\n\tensureTableExists()\n\tcode := m.Run()\n\t\/\/ clearTable()\n\n\tos.Exit(code)\n}\n\nfunc ensureTableExists() {\n\tif _, err := app.DB.Exec(tableCreationQuery); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc clearTable() {\n\tapp.DB.Exec(\"DELETE FROM orders\")\n\tapp.DB.Exec(\"ALTER SEQUENCE orders_id_seq RESTART WITH 1\")\n}\n\nfunc executeRequest(req *http.Request) *httptest.ResponseRecorder {\n\trr := httptest.NewRecorder()\n\tapp.Router.ServeHTTP(rr, req)\n\n\treturn rr\n}\n\nfunc checkResponseCode(t *testing.T, expected, actual int) {\n\tif expected != actual {\n\t\tt.Errorf(\"Expected response code %d. Got %d\\n\", expected, actual)\n\t}\n}\n\n\/\/ Model: TODOs\nfunc TestGetTodos(t *testing.T) {\n\treq, _ := http.NewRequest(\"GET\", \"\/todos\", nil)\n\tresponse := executeRequest(req)\n\n\tcheckResponseCode(t, http.StatusOK, response.Code)\n}\n\n\/\/ Model: Order\nfunc TestEmptyTable(t *testing.T) {\n\tclearTable()\n\n\treq, _ := http.NewRequest(\"GET\", \"\/orders\", nil)\n\tresponse := executeRequest(req)\n\n\tcheckResponseCode(t, http.StatusOK, response.Code)\n\n\tif body := response.Body.String(); strings.TrimSpace(body) != \"[]\" {\n\t\tt.Errorf(\"Expected an empty array. Got %s\", body)\n\t}\n}\n\nfunc TestGetNonExistentProduct(t *testing.T) {\n\tclearTable()\n\n\treq, _ := http.NewRequest(\"GET\", \"\/order\/999\", nil)\n\tresponse := executeRequest(req)\n\n\tcheckResponseCode(t, http.StatusNotFound, response.Code)\n\n\tvar m map[string]interface{}\n\tjson.Unmarshal(response.Body.Bytes(), &m)\n\n\tif m[\"text\"] != \"order not found\" {\n\t\tt.Errorf(\"Expected the 'text' key of the response to be set to 'order not found'. Got '%s'\", m[\"text\"])\n\t}\n}\n\nfunc TestGetOrders(t *testing.T) {\n\treq, _ := http.NewRequest(\"GET\", \"\/orders\", nil)\n\tresponse := executeRequest(req)\n\n\tcheckResponseCode(t, http.StatusOK, response.Code)\n}\n\nfunc TestGetOrder(t *testing.T) {\n\tclearTable()\n\taddProducts(1)\n\n\treq, _ := http.NewRequest(\"GET\", \"\/order\/1\", nil)\n\tresponse := executeRequest(req)\n\n\tcheckResponseCode(t, http.StatusOK, response.Code)\n}\n\nfunc addProducts(count int) {\n\tif count < 1 {\n\t\tcount = 1\n\t}\n\n\tfor i := 0; i < count; i++ {\n\t\tapp.DB.Exec(\"INSERT INTO orders(name, price) VALUES($1, $2)\", \"Order \"+strconv.Itoa(i), (i+1.0)*10)\n\t}\n}\n\nfunc TestCreateorder(t *testing.T) {\n\tclearTable()\n\tpayload := []byte(`{\"name\": \"test order\", \"price\": 11.22 }`)\n\n\treq, _ := http.NewRequest(\"POST\", \"\/order\", bytes.NewBuffer(payload))\n\tresponse := executeRequest(req)\n\tcheckResponseCode(t, http.StatusCreated, response.Code)\n\n\tvar m map[string]interface{}\n\tjson.Unmarshal(response.Body.Bytes(), &m)\n\n\tif m[\"name\"] != \"test order\" {\n\t\tt.Errorf(\"expected name to be 'test order'. Got %v\", m[\"name\"])\n\t}\n\n\tif m[\"price\"] != 11.22 {\n\t\tt.Errorf(\"expected price to be '11.22'. Got %v\", m[\"price\"])\n\t}\n\n\t\/\/ m[string]interface{} converts int to float\n\tif m[\"id\"] != 1.0 {\n\t\tt.Errorf(\"expected id to be '1.0'. Got %v\", m[\"id\"])\n\t}\n\n}\n\nfunc TestUpdateOrder(t *testing.T) {\n\tclearTable()\n\taddProducts(1)\n\n\treq, _ := http.NewRequest(\"GET\", \"\/order\/1\", nil)\n\tresponse := executeRequest(req)\n\tcheckResponseCode(t, http.StatusOK, response.Code)\n\n\tvar originalOrder map[string]interface{}\n\tjson.Unmarshal(response.Body.Bytes(), &originalOrder)\n\n\tpayload := []byte(`{\"name\": \"updated order\", \"price\": 11.22 }`)\n\n\treq, _ = http.NewRequest(\"PUT\", \"\/order\/1\", bytes.NewBuffer(payload))\n\tresponse = executeRequest(req)\n\tcheckResponseCode(t, http.StatusOK, response.Code)\n\n\tvar m map[string]interface{}\n\tjson.Unmarshal(response.Body.Bytes(), &m)\n\n\tif m[\"id\"] != originalOrder[\"id\"] {\n\t\tt.Errorf(\"Expected the id to remain the same (%v). Got %v\", originalOrder[\"id\"], m[\"id\"])\n\t}\n\n\tif m[\"name\"] == originalOrder[\"name\"] {\n\t\tt.Errorf(\"Expected the name to change from '%v' to '%v'. Got '%v'\", originalOrder[\"name\"], m[\"name\"], m[\"name\"])\n\t}\n\n\tif m[\"price\"] == originalOrder[\"price\"] {\n\t\tt.Errorf(\"Expected the price to change from '%v' to '%v'. Got '%v'\", originalOrder[\"price\"], m[\"price\"], m[\"price\"])\n\t}\n}\n\nfunc TestDeleteOrder(t *testing.T) {\n\tclearTable()\n\taddProducts(1)\n\n\treq, _ := http.NewRequest(\"GET\", \"\/order\/1\", nil)\n\tresponse := executeRequest(req)\n\tcheckResponseCode(t, http.StatusOK, response.Code)\n\n\treq, _ = http.NewRequest(\"DELETE\", \"\/order\/1\", nil)\n\tresponse = executeRequest(req)\n\tcheckResponseCode(t, http.StatusOK, response.Code)\n\n\treq, _ = http.NewRequest(\"GET\", \"\/order\/1\", nil)\n\tresponse = executeRequest(req)\n\tcheckResponseCode(t, http.StatusNotFound, response.Code)\n\n}\n<commit_msg>v2: More test cases<commit_after>\/\/ ************\n\/\/ Inspired by : https:\/\/semaphoreci.com\/community\/tutorials\/building-and-testing-a-rest-api-in-go-with-gorilla-mux-and-postgresql\n\/\/ ************\n\npackage main_test\n\nimport (\n\t\".\"\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"github.com\/spf13\/viper\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n)\n\nvar app main.App\n\nconst tableCreationQuery = `CREATE TABLE IF NOT EXISTS orders\n(\nid SERIAL,\nNAME TEXT NOT NULL,\nprice NUMERIC (10, 2) NOT NULL DEFAULT 0.00,\nCONSTRAINT orders_pkey PRIMARY KEY (id)\n)`\n\nfunc TestMain(m *testing.M) {\n\n\tviper.SetConfigName(\"config\")\n\tviper.AddConfigPath(\".\")\n\tviper.ReadInConfig()\n\n\tapp = main.App{}\n\tapp.Initialize(\n\t\tviper.GetString(\"testing.dbUser\"),\n\t\tviper.GetString(\"testing.dbPass\"),\n\t\tviper.GetString(\"testing.db\"))\n\n\tensureTableExists()\n\tcode := m.Run()\n\tclearTable()\n\n\tos.Exit(code)\n}\n\nfunc ensureTableExists() {\n\tif _, err := app.DB.Exec(tableCreationQuery); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc clearTable() {\n\tapp.DB.Exec(\"DELETE FROM orders\")\n\tapp.DB.Exec(\"ALTER SEQUENCE orders_id_seq RESTART WITH 1\")\n}\n\nfunc executeRequest(req *http.Request) *httptest.ResponseRecorder {\n\trr := httptest.NewRecorder()\n\tapp.Router.ServeHTTP(rr, req)\n\n\treturn rr\n}\n\nfunc checkResponseCode(t *testing.T, expected, actual int) {\n\tif expected != actual {\n\t\tt.Errorf(\"Expected response code %d. Got %d\\n\", expected, actual)\n\t}\n}\n\n\/\/ Model: TODOs\nfunc TestGetTodos(t *testing.T) {\n\treq, _ := http.NewRequest(\"GET\", \"\/todos\", nil)\n\tresponse := executeRequest(req)\n\n\tcheckResponseCode(t, http.StatusOK, response.Code)\n}\n\n\/\/ Model: Order\nfunc TestEmptyTable(t *testing.T) {\n\tclearTable()\n\n\treq, _ := http.NewRequest(\"GET\", \"\/orders\", nil)\n\tresponse := executeRequest(req)\n\n\tcheckResponseCode(t, http.StatusOK, response.Code)\n\n\tif body := response.Body.String(); strings.TrimSpace(body) != \"[]\" {\n\t\tt.Errorf(\"Expected an empty array. Got %s\", body)\n\t}\n}\n\nfunc TestGetNonExistentProduct(t *testing.T) {\n\tclearTable()\n\n\treq, _ := http.NewRequest(\"GET\", \"\/order\/999\", nil)\n\tresponse := executeRequest(req)\n\n\tcheckResponseCode(t, http.StatusNotFound, response.Code)\n\n\tvar m map[string]interface{}\n\tjson.Unmarshal(response.Body.Bytes(), &m)\n\n\tif m[\"text\"] != \"order not found\" {\n\t\tt.Errorf(\"Expected the 'text' key of the response to be set to 'order not found'. Got '%s'\", m[\"text\"])\n\t}\n}\n\nfunc TestGetOrders(t *testing.T) {\n\treq, _ := http.NewRequest(\"GET\", \"\/orders\", nil)\n\tresponse := executeRequest(req)\n\n\tcheckResponseCode(t, http.StatusOK, response.Code)\n}\n\nfunc TestGetOrder(t *testing.T) {\n\tclearTable()\n\taddProducts(1)\n\n\treq, _ := http.NewRequest(\"GET\", \"\/order\/1\", nil)\n\tresponse := executeRequest(req)\n\n\tcheckResponseCode(t, http.StatusOK, response.Code)\n}\n\nfunc addProducts(count int) {\n\tif count < 1 {\n\t\tcount = 1\n\t}\n\n\tfor i := 0; i < count; i++ {\n\t\tapp.DB.Exec(\"INSERT INTO orders(name, price) VALUES($1, $2)\", \"Order \"+strconv.Itoa(i), (i+1.0)*10)\n\t}\n}\n\nfunc TestCreateorder(t *testing.T) {\n\tclearTable()\n\tpayload := []byte(`{\"name\": \"test order\", \"price\": 11.22 }`)\n\n\treq, _ := http.NewRequest(\"POST\", \"\/order\", bytes.NewBuffer(payload))\n\tresponse := executeRequest(req)\n\tcheckResponseCode(t, http.StatusCreated, response.Code)\n\n\tvar m map[string]interface{}\n\tjson.Unmarshal(response.Body.Bytes(), &m)\n\n\tif m[\"name\"] != \"test order\" {\n\t\tt.Errorf(\"expected name to be 'test order'. Got %v\", m[\"name\"])\n\t}\n\n\tif m[\"price\"] != 11.22 {\n\t\tt.Errorf(\"expected price to be '11.22'. Got %v\", m[\"price\"])\n\t}\n\n\t\/\/ m[string]interface{} converts int to float\n\tif m[\"id\"] != 1.0 {\n\t\tt.Errorf(\"expected id to be '1.0'. Got %v\", m[\"id\"])\n\t}\n\n}\n\nfunc TestUpdateOrder(t *testing.T) {\n\tclearTable()\n\taddProducts(1)\n\n\treq, _ := http.NewRequest(\"GET\", \"\/order\/1\", nil)\n\tresponse := executeRequest(req)\n\tcheckResponseCode(t, http.StatusOK, response.Code)\n\n\tvar originalOrder map[string]interface{}\n\tjson.Unmarshal(response.Body.Bytes(), &originalOrder)\n\n\tpayload := []byte(`{\"name\": \"updated order\", \"price\": 11.22 }`)\n\n\treq, _ = http.NewRequest(\"PUT\", \"\/order\/1\", bytes.NewBuffer(payload))\n\tresponse = executeRequest(req)\n\tcheckResponseCode(t, http.StatusOK, response.Code)\n\n\tvar m map[string]interface{}\n\tjson.Unmarshal(response.Body.Bytes(), &m)\n\n\tif m[\"id\"] != originalOrder[\"id\"] {\n\t\tt.Errorf(\"Expected the id to remain the same (%v). Got %v\", originalOrder[\"id\"], m[\"id\"])\n\t}\n\n\tif m[\"name\"] == originalOrder[\"name\"] {\n\t\tt.Errorf(\"Expected the name to change from '%v' to '%v'. Got '%v'\", originalOrder[\"name\"], m[\"name\"], m[\"name\"])\n\t}\n\n\tif m[\"price\"] == originalOrder[\"price\"] {\n\t\tt.Errorf(\"Expected the price to change from '%v' to '%v'. Got '%v'\", originalOrder[\"price\"], m[\"price\"], m[\"price\"])\n\t}\n}\n\nfunc TestDeleteOrder(t *testing.T) {\n\tclearTable()\n\taddProducts(1)\n\n\treq, _ := http.NewRequest(\"GET\", \"\/order\/1\", nil)\n\tresponse := executeRequest(req)\n\tcheckResponseCode(t, http.StatusOK, response.Code)\n\n\treq, _ = http.NewRequest(\"DELETE\", \"\/order\/1\", nil)\n\tresponse = executeRequest(req)\n\tcheckResponseCode(t, http.StatusOK, response.Code)\n\n\treq, _ = http.NewRequest(\"GET\", \"\/order\/1\", nil)\n\tresponse = executeRequest(req)\n\tcheckResponseCode(t, http.StatusNotFound, response.Code)\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2020 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage common\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n\n\tprowv1 \"k8s.io\/test-infra\/prow\/apis\/prowjobs\/v1\"\n\t\"k8s.io\/test-infra\/prow\/config\"\n\t\"k8s.io\/test-infra\/prow\/io\/providers\"\n\t\"k8s.io\/test-infra\/prow\/spyglass\/api\"\n)\n\nvar lensTemplate = template.Must(template.New(\"sg\").Parse(string(MustAsset(\"static\/spyglass-lens.html\"))))\nvar buildLogRegex = regexp.MustCompile(`^(?:[^\/]*-)?build-log\\.txt$`)\n\ntype LensWithConfiguration struct {\n\tConfig LensOpt\n\tLens api.Lens\n}\n\nfunc NewLensServer(\n\tlistenAddress string,\n\tpjFetcher ProwJobFetcher,\n\tstorageArtifactFetcher ArtifactFetcher,\n\tpodLogArtifactFetcher ArtifactFetcher,\n\tcfg config.Getter,\n\tlenses []LensWithConfiguration,\n) (*http.Server, error) {\n\n\tmux := http.NewServeMux()\n\n\tseenLens := sets.String{}\n\tfor _, lens := range lenses {\n\t\tif seenLens.Has(lens.Config.LensName) {\n\t\t\treturn nil, fmt.Errorf(\"duplicate lens named %q\", lens.Config.LensName)\n\t\t}\n\t\tseenLens.Insert(lens.Config.LensName)\n\n\t\tlogrus.WithField(\"Lens\", lens.Config.LensName).Info(\"Adding handler for lens\")\n\t\topt := lensHandlerOpts{\n\t\t\tPJFetcher: pjFetcher,\n\t\t\tStorageArtifactFetcher: storageArtifactFetcher,\n\t\t\tPodLogArtifactFetcher: podLogArtifactFetcher,\n\t\t\tConfigGetter: cfg,\n\t\t\tLensOpt: lens.Config,\n\t\t}\n\t\tmux.Handle(DyanmicPathForLens(lens.Config.LensName), newLensHandler(lens.Lens, opt))\n\t}\n\tmux.Handle(\"\/\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlogrus.WithField(\"path\", r.URL.Path).Error(\"LensServer got request on unhandled path\")\n\t\thttp.NotFound(w, r)\n\t}))\n\n\treturn &http.Server{Addr: listenAddress, Handler: mux}, nil\n}\n\ntype LensOpt struct {\n\tLensResourcesDir string\n\tLensName string\n\tLensTitle string\n}\n\ntype lensHandlerOpts struct {\n\tPJFetcher ProwJobFetcher\n\tStorageArtifactFetcher ArtifactFetcher\n\tPodLogArtifactFetcher ArtifactFetcher\n\tConfigGetter config.Getter\n\tLensOpt\n}\n\nfunc newLensHandler(lens api.Lens, opts lensHandlerOpts) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\twriteHTTPError(w, fmt.Errorf(\"failed to read request body: %w\", err), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\trequest := &api.LensRequest{}\n\t\tif err := json.Unmarshal(body, request); err != nil {\n\t\t\twriteHTTPError(w, fmt.Errorf(\"failed to unmarshal request: %w\", err), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tartifacts, err := FetchArtifacts(r.Context(), opts.PJFetcher, opts.ConfigGetter, opts.StorageArtifactFetcher, opts.PodLogArtifactFetcher, request.ArtifactSource, \"\", opts.ConfigGetter().Deck.Spyglass.SizeLimit, request.Artifacts)\n\t\tif err != nil || len(artifacts) == 0 {\n\t\t\tstatusCode := http.StatusInternalServerError\n\t\t\tif len(artifacts) == 0 {\n\t\t\t\tstatusCode = http.StatusNotFound\n\t\t\t\terr = errors.New(\"no artifacts found\")\n\t\t\t}\n\n\t\t\twriteHTTPError(w, fmt.Errorf(\"failed to retrieve expected artifacts: %w\", err), statusCode)\n\t\t\treturn\n\t\t}\n\n\t\tswitch request.Action {\n\t\tcase api.RequestActionInitial:\n\t\t\tw.Header().Set(\"Content-Type\", \"text\/html; encoding=utf-8\")\n\t\t\tlensTemplate.Execute(w, struct {\n\t\t\t\tTitle string\n\t\t\t\tBaseURL string\n\t\t\t\tHead template.HTML\n\t\t\t\tBody template.HTML\n\t\t\t}{\n\t\t\t\topts.LensTitle,\n\t\t\t\trequest.ResourceRoot,\n\t\t\t\ttemplate.HTML(lens.Header(artifacts, opts.LensResourcesDir, opts.ConfigGetter().Deck.Spyglass.Lenses[request.LensIndex].Lens.Config)),\n\t\t\t\ttemplate.HTML(lens.Body(artifacts, opts.LensResourcesDir, \"\", opts.ConfigGetter().Deck.Spyglass.Lenses[request.LensIndex].Lens.Config)),\n\t\t\t})\n\n\t\tcase api.RequestActionRerender:\n\t\t\tw.Header().Set(\"Content-Type\", \"text\/html; encoding=utf-8\")\n\t\t\tw.Write([]byte(lens.Body(artifacts, opts.LensResourcesDir, request.Data, opts.ConfigGetter().Deck.Spyglass.Lenses[request.LensIndex].Lens.Config)))\n\n\t\tcase api.RequestActionCallBack:\n\t\t\tw.Write([]byte(lens.Callback(artifacts, opts.LensResourcesDir, request.Data, opts.ConfigGetter().Deck.Spyglass.Lenses[request.LensIndex].Lens.Config)))\n\n\t\tdefault:\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\t\/\/ This is a bit weird as we proxy this and the request we are complaining about was issued by Deck, not by the original client that sees this error\n\t\t\tw.Write([]byte(fmt.Sprintf(\"Invalid action %q\", request.Action)))\n\t\t}\n\t}\n}\n\nfunc writeHTTPError(w http.ResponseWriter, err error, statusCode int) {\n\tif statusCode == 0 {\n\t\tstatusCode = http.StatusInternalServerError\n\t}\n\tlogrus.WithError(err).WithField(\"statusCode\", statusCode).Debug(\"Failed to process request\")\n\tw.WriteHeader(statusCode)\n\tif _, err := w.Write([]byte(err.Error())); err != nil {\n\t\tlogrus.WithError(err).Error(\"Failed to write response\")\n\t}\n}\n\n\/\/ ArtifactFetcher knows how to fetch artifacts\ntype ArtifactFetcher interface {\n\tArtifact(ctx context.Context, key string, artifactName string, sizeLimit int64) (api.Artifact, error)\n}\n\n\/\/ FetchArtifacts fetches artifacts.\n\/\/ TODO: Unexport once we only have remote lenses\nfunc FetchArtifacts(\n\tctx context.Context,\n\tpjFetcher ProwJobFetcher,\n\tcfg config.Getter,\n\tstorageArtifactFetcher ArtifactFetcher,\n\tpodLogArtifactFetcher ArtifactFetcher,\n\tsrc string,\n\tpodName string,\n\tsizeLimit int64,\n\tartifactNames []string,\n) ([]api.Artifact, error) {\n\tartStart := time.Now()\n\tarts := []api.Artifact{}\n\tkeyType, key, err := splitSrc(src)\n\tif err != nil {\n\t\treturn arts, fmt.Errorf(\"error parsing src: %v\", err)\n\t}\n\tgcsKey := \"\"\n\tswitch keyType {\n\tcase api.ProwKeyType:\n\t\tstorageProvider, key, err := ProwToGCS(pjFetcher, cfg, key)\n\t\tif err != nil {\n\t\t\tlogrus.Warningln(err)\n\t\t}\n\t\tgcsKey = fmt.Sprintf(\"%s:\/\/%s\", storageProvider, strings.TrimSuffix(key, \"\/\"))\n\tdefault:\n\t\tif keyType == api.GCSKeyType {\n\t\t\tkeyType = providers.GS\n\t\t}\n\t\tgcsKey = fmt.Sprintf(\"%s:\/\/%s\", keyType, strings.TrimSuffix(key, \"\/\"))\n\t}\n\n\tlogsNeeded := []string{}\n\n\tfor _, name := range artifactNames {\n\t\tart, err := storageArtifactFetcher.Artifact(ctx, gcsKey, name, sizeLimit)\n\t\tif err == nil {\n\t\t\t\/\/ Actually try making a request, because calling StorageArtifactFetcher.artifact does no I\/O.\n\t\t\t\/\/ (these files are being explicitly requested and so will presumably soon be accessed, so\n\t\t\t\/\/ the extra network I\/O should not be too problematic).\n\t\t\t_, err = art.Size()\n\t\t}\n\t\tif err != nil {\n\t\t\tif buildLogRegex.MatchString(name) {\n\t\t\t\tlogsNeeded = append(logsNeeded, name)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tarts = append(arts, art)\n\t}\n\n\tfor _, logName := range logsNeeded {\n\t\tart, err := podLogArtifactFetcher.Artifact(ctx, src, logName, sizeLimit)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"Failed to fetch pod log: %v\", err)\n\t\t} else {\n\t\t\tarts = append(arts, art)\n\t\t}\n\t}\n\n\tlogrus.WithField(\"duration\", time.Since(artStart).String()).Infof(\"Retrieved artifacts for %v\", src)\n\treturn arts, nil\n}\n\n\/\/ ProwJobFetcher knows how to get a ProwJob\ntype ProwJobFetcher interface {\n\tGetProwJob(job string, id string) (prowv1.ProwJob, error)\n}\n\n\/\/ prowToGCS returns the GCS key corresponding to the given prow key\n\/\/ TODO: Unexport once we only have remote lenses\nfunc ProwToGCS(fetcher ProwJobFetcher, config config.Getter, prowKey string) (string, string, error) {\n\tjobName, buildID, err := KeyToJob(prowKey)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"could not get GCS src: %v\", err)\n\t}\n\n\tjob, err := fetcher.GetProwJob(jobName, buildID)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"failed to get prow job from src %q: %v\", prowKey, err)\n\t}\n\n\turl := job.Status.URL\n\tprefix := config().Plank.GetJobURLPrefix(job.Spec.Refs)\n\tif !strings.HasPrefix(url, prefix) {\n\t\treturn \"\", \"\", fmt.Errorf(\"unexpected job URL %q when finding GCS path: expected something starting with %q\", url, prefix)\n\t}\n\n\t\/\/ example:\n\t\/\/ * url: https:\/\/prow.k8s.io\/view\/gs\/kubernetes-jenkins\/logs\/ci-benchmark-microbenchmarks\/1258197944759226371\n\t\/\/ * prefix: https:\/\/prow.k8s.io\/view\/\n\t\/\/ * storagePath: gs\/kubernetes-jenkins\/logs\/ci-benchmark-microbenchmarks\/1258197944759226371\n\tstoragePath := strings.TrimPrefix(url, prefix)\n\tif strings.HasPrefix(storagePath, api.GCSKeyType) {\n\t\tstoragePath = strings.Replace(storagePath, api.GCSKeyType, providers.GS, 1)\n\t}\n\tstoragePathWithoutProvider := storagePath\n\tstoragePathSegments := strings.SplitN(storagePath, \"\/\", 2)\n\tif providers.HasStorageProviderPrefix(storagePath) {\n\t\tstoragePathWithoutProvider = storagePathSegments[1]\n\t}\n\n\t\/\/ try to parse storageProvider from DecorationConfig.GCSConfiguration.Bucket\n\t\/\/ if it doesn't work fallback to URL parsing\n\tif job.Spec.DecorationConfig != nil && job.Spec.DecorationConfig.GCSConfiguration != nil {\n\t\tprowPath, err := prowv1.ParsePath(job.Spec.DecorationConfig.GCSConfiguration.Bucket)\n\t\tif err == nil {\n\t\t\treturn prowPath.StorageProvider(), storagePathWithoutProvider, nil\n\t\t}\n\t\tlogrus.Warnf(\"Could not parse storageProvider from DecorationConfig.GCSConfiguration.Bucket = %s: %v\", job.Spec.DecorationConfig.GCSConfiguration.Bucket, err)\n\t}\n\n\treturn storagePathSegments[0], storagePathWithoutProvider, nil\n}\n\nfunc splitSrc(src string) (keyType, key string, err error) {\n\tsplit := strings.SplitN(src, \"\/\", 2)\n\tif len(split) < 2 {\n\t\terr = fmt.Errorf(\"invalid src %s: expected <key-type>\/<key>\", src)\n\t\treturn\n\t}\n\tkeyType = split[0]\n\tkey = split[1]\n\treturn\n}\n\n\/\/ keyToJob takes a spyglass URL and returns the jobName and buildID.\nfunc KeyToJob(src string) (jobName string, buildID string, err error) {\n\tsrc = strings.Trim(src, \"\/\")\n\tparsed := strings.Split(src, \"\/\")\n\tif len(parsed) < 2 {\n\t\treturn \"\", \"\", fmt.Errorf(\"expected at least two path components in %q\", src)\n\t}\n\tjobName = parsed[len(parsed)-2]\n\tbuildID = parsed[len(parsed)-1]\n\treturn jobName, buildID, nil\n}\n\nconst prefixSpyglassDynamicHandlers = \"dynamic\"\n\nfunc DyanmicPathForLens(lensName string) string {\n\treturn fmt.Sprintf(\"\/%s\/%s\", prefixSpyglassDynamicHandlers, lensName)\n}\n<commit_msg>Spyglass: Log an error when artifact fetching fails<commit_after>\/*\nCopyright 2020 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage common\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/sets\"\n\n\tprowv1 \"k8s.io\/test-infra\/prow\/apis\/prowjobs\/v1\"\n\t\"k8s.io\/test-infra\/prow\/config\"\n\t\"k8s.io\/test-infra\/prow\/io\/providers\"\n\t\"k8s.io\/test-infra\/prow\/spyglass\/api\"\n)\n\nvar lensTemplate = template.Must(template.New(\"sg\").Parse(string(MustAsset(\"static\/spyglass-lens.html\"))))\nvar buildLogRegex = regexp.MustCompile(`^(?:[^\/]*-)?build-log\\.txt$`)\n\ntype LensWithConfiguration struct {\n\tConfig LensOpt\n\tLens api.Lens\n}\n\nfunc NewLensServer(\n\tlistenAddress string,\n\tpjFetcher ProwJobFetcher,\n\tstorageArtifactFetcher ArtifactFetcher,\n\tpodLogArtifactFetcher ArtifactFetcher,\n\tcfg config.Getter,\n\tlenses []LensWithConfiguration,\n) (*http.Server, error) {\n\n\tmux := http.NewServeMux()\n\n\tseenLens := sets.String{}\n\tfor _, lens := range lenses {\n\t\tif seenLens.Has(lens.Config.LensName) {\n\t\t\treturn nil, fmt.Errorf(\"duplicate lens named %q\", lens.Config.LensName)\n\t\t}\n\t\tseenLens.Insert(lens.Config.LensName)\n\n\t\tlogrus.WithField(\"Lens\", lens.Config.LensName).Info(\"Adding handler for lens\")\n\t\topt := lensHandlerOpts{\n\t\t\tPJFetcher: pjFetcher,\n\t\t\tStorageArtifactFetcher: storageArtifactFetcher,\n\t\t\tPodLogArtifactFetcher: podLogArtifactFetcher,\n\t\t\tConfigGetter: cfg,\n\t\t\tLensOpt: lens.Config,\n\t\t}\n\t\tmux.Handle(DyanmicPathForLens(lens.Config.LensName), newLensHandler(lens.Lens, opt))\n\t}\n\tmux.Handle(\"\/\", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlogrus.WithField(\"path\", r.URL.Path).Error(\"LensServer got request on unhandled path\")\n\t\thttp.NotFound(w, r)\n\t}))\n\n\treturn &http.Server{Addr: listenAddress, Handler: mux}, nil\n}\n\ntype LensOpt struct {\n\tLensResourcesDir string\n\tLensName string\n\tLensTitle string\n}\n\ntype lensHandlerOpts struct {\n\tPJFetcher ProwJobFetcher\n\tStorageArtifactFetcher ArtifactFetcher\n\tPodLogArtifactFetcher ArtifactFetcher\n\tConfigGetter config.Getter\n\tLensOpt\n}\n\nfunc newLensHandler(lens api.Lens, opts lensHandlerOpts) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\twriteHTTPError(w, fmt.Errorf(\"failed to read request body: %w\", err), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\trequest := &api.LensRequest{}\n\t\tif err := json.Unmarshal(body, request); err != nil {\n\t\t\twriteHTTPError(w, fmt.Errorf(\"failed to unmarshal request: %w\", err), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tartifacts, err := FetchArtifacts(r.Context(), opts.PJFetcher, opts.ConfigGetter, opts.StorageArtifactFetcher, opts.PodLogArtifactFetcher, request.ArtifactSource, \"\", opts.ConfigGetter().Deck.Spyglass.SizeLimit, request.Artifacts)\n\t\tif err != nil || len(artifacts) == 0 {\n\t\t\tstatusCode := http.StatusInternalServerError\n\t\t\tif len(artifacts) == 0 {\n\t\t\t\tstatusCode = http.StatusNotFound\n\t\t\t\terr = errors.New(\"no artifacts found\")\n\t\t\t}\n\n\t\t\twriteHTTPError(w, fmt.Errorf(\"failed to retrieve expected artifacts: %w\", err), statusCode)\n\t\t\treturn\n\t\t}\n\n\t\tswitch request.Action {\n\t\tcase api.RequestActionInitial:\n\t\t\tw.Header().Set(\"Content-Type\", \"text\/html; encoding=utf-8\")\n\t\t\tlensTemplate.Execute(w, struct {\n\t\t\t\tTitle string\n\t\t\t\tBaseURL string\n\t\t\t\tHead template.HTML\n\t\t\t\tBody template.HTML\n\t\t\t}{\n\t\t\t\topts.LensTitle,\n\t\t\t\trequest.ResourceRoot,\n\t\t\t\ttemplate.HTML(lens.Header(artifacts, opts.LensResourcesDir, opts.ConfigGetter().Deck.Spyglass.Lenses[request.LensIndex].Lens.Config)),\n\t\t\t\ttemplate.HTML(lens.Body(artifacts, opts.LensResourcesDir, \"\", opts.ConfigGetter().Deck.Spyglass.Lenses[request.LensIndex].Lens.Config)),\n\t\t\t})\n\n\t\tcase api.RequestActionRerender:\n\t\t\tw.Header().Set(\"Content-Type\", \"text\/html; encoding=utf-8\")\n\t\t\tw.Write([]byte(lens.Body(artifacts, opts.LensResourcesDir, request.Data, opts.ConfigGetter().Deck.Spyglass.Lenses[request.LensIndex].Lens.Config)))\n\n\t\tcase api.RequestActionCallBack:\n\t\t\tw.Write([]byte(lens.Callback(artifacts, opts.LensResourcesDir, request.Data, opts.ConfigGetter().Deck.Spyglass.Lenses[request.LensIndex].Lens.Config)))\n\n\t\tdefault:\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\t\/\/ This is a bit weird as we proxy this and the request we are complaining about was issued by Deck, not by the original client that sees this error\n\t\t\tw.Write([]byte(fmt.Sprintf(\"Invalid action %q\", request.Action)))\n\t\t}\n\t}\n}\n\nfunc writeHTTPError(w http.ResponseWriter, err error, statusCode int) {\n\tif statusCode == 0 {\n\t\tstatusCode = http.StatusInternalServerError\n\t}\n\tlogrus.WithError(err).WithField(\"statusCode\", statusCode).Debug(\"Failed to process request\")\n\tw.WriteHeader(statusCode)\n\tif _, err := w.Write([]byte(err.Error())); err != nil {\n\t\tlogrus.WithError(err).Error(\"Failed to write response\")\n\t}\n}\n\n\/\/ ArtifactFetcher knows how to fetch artifacts\ntype ArtifactFetcher interface {\n\tArtifact(ctx context.Context, key string, artifactName string, sizeLimit int64) (api.Artifact, error)\n}\n\n\/\/ FetchArtifacts fetches artifacts.\n\/\/ TODO: Unexport once we only have remote lenses\nfunc FetchArtifacts(\n\tctx context.Context,\n\tpjFetcher ProwJobFetcher,\n\tcfg config.Getter,\n\tstorageArtifactFetcher ArtifactFetcher,\n\tpodLogArtifactFetcher ArtifactFetcher,\n\tsrc string,\n\tpodName string,\n\tsizeLimit int64,\n\tartifactNames []string,\n) ([]api.Artifact, error) {\n\tartStart := time.Now()\n\tarts := []api.Artifact{}\n\tkeyType, key, err := splitSrc(src)\n\tif err != nil {\n\t\treturn arts, fmt.Errorf(\"error parsing src: %v\", err)\n\t}\n\tgcsKey := \"\"\n\tswitch keyType {\n\tcase api.ProwKeyType:\n\t\tstorageProvider, key, err := ProwToGCS(pjFetcher, cfg, key)\n\t\tif err != nil {\n\t\t\tlogrus.Warningln(err)\n\t\t}\n\t\tgcsKey = fmt.Sprintf(\"%s:\/\/%s\", storageProvider, strings.TrimSuffix(key, \"\/\"))\n\tdefault:\n\t\tif keyType == api.GCSKeyType {\n\t\t\tkeyType = providers.GS\n\t\t}\n\t\tgcsKey = fmt.Sprintf(\"%s:\/\/%s\", keyType, strings.TrimSuffix(key, \"\/\"))\n\t}\n\n\tlogsNeeded := []string{}\n\n\tfor _, name := range artifactNames {\n\t\tart, err := storageArtifactFetcher.Artifact(ctx, gcsKey, name, sizeLimit)\n\t\tif err == nil {\n\t\t\t\/\/ Actually try making a request, because calling StorageArtifactFetcher.artifact does no I\/O.\n\t\t\t\/\/ (these files are being explicitly requested and so will presumably soon be accessed, so\n\t\t\t\/\/ the extra network I\/O should not be too problematic).\n\t\t\t_, err = art.Size()\n\t\t}\n\t\tif err != nil {\n\t\t\tif buildLogRegex.MatchString(name) {\n\t\t\t\tlogsNeeded = append(logsNeeded, name)\n\t\t\t}\n\t\t\tlogrus.WithError(err).WithField(\"artifact\", name).Error(\"Failed to fetch artifact\")\n\t\t\tcontinue\n\t\t}\n\t\tarts = append(arts, art)\n\t}\n\n\tfor _, logName := range logsNeeded {\n\t\tart, err := podLogArtifactFetcher.Artifact(ctx, src, logName, sizeLimit)\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"Failed to fetch pod log: %v\", err)\n\t\t} else {\n\t\t\tarts = append(arts, art)\n\t\t}\n\t}\n\n\tlogrus.WithField(\"duration\", time.Since(artStart).String()).Infof(\"Retrieved artifacts for %v\", src)\n\treturn arts, nil\n}\n\n\/\/ ProwJobFetcher knows how to get a ProwJob\ntype ProwJobFetcher interface {\n\tGetProwJob(job string, id string) (prowv1.ProwJob, error)\n}\n\n\/\/ prowToGCS returns the GCS key corresponding to the given prow key\n\/\/ TODO: Unexport once we only have remote lenses\nfunc ProwToGCS(fetcher ProwJobFetcher, config config.Getter, prowKey string) (string, string, error) {\n\tjobName, buildID, err := KeyToJob(prowKey)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"could not get GCS src: %v\", err)\n\t}\n\n\tjob, err := fetcher.GetProwJob(jobName, buildID)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"failed to get prow job from src %q: %v\", prowKey, err)\n\t}\n\n\turl := job.Status.URL\n\tprefix := config().Plank.GetJobURLPrefix(job.Spec.Refs)\n\tif !strings.HasPrefix(url, prefix) {\n\t\treturn \"\", \"\", fmt.Errorf(\"unexpected job URL %q when finding GCS path: expected something starting with %q\", url, prefix)\n\t}\n\n\t\/\/ example:\n\t\/\/ * url: https:\/\/prow.k8s.io\/view\/gs\/kubernetes-jenkins\/logs\/ci-benchmark-microbenchmarks\/1258197944759226371\n\t\/\/ * prefix: https:\/\/prow.k8s.io\/view\/\n\t\/\/ * storagePath: gs\/kubernetes-jenkins\/logs\/ci-benchmark-microbenchmarks\/1258197944759226371\n\tstoragePath := strings.TrimPrefix(url, prefix)\n\tif strings.HasPrefix(storagePath, api.GCSKeyType) {\n\t\tstoragePath = strings.Replace(storagePath, api.GCSKeyType, providers.GS, 1)\n\t}\n\tstoragePathWithoutProvider := storagePath\n\tstoragePathSegments := strings.SplitN(storagePath, \"\/\", 2)\n\tif providers.HasStorageProviderPrefix(storagePath) {\n\t\tstoragePathWithoutProvider = storagePathSegments[1]\n\t}\n\n\t\/\/ try to parse storageProvider from DecorationConfig.GCSConfiguration.Bucket\n\t\/\/ if it doesn't work fallback to URL parsing\n\tif job.Spec.DecorationConfig != nil && job.Spec.DecorationConfig.GCSConfiguration != nil {\n\t\tprowPath, err := prowv1.ParsePath(job.Spec.DecorationConfig.GCSConfiguration.Bucket)\n\t\tif err == nil {\n\t\t\treturn prowPath.StorageProvider(), storagePathWithoutProvider, nil\n\t\t}\n\t\tlogrus.Warnf(\"Could not parse storageProvider from DecorationConfig.GCSConfiguration.Bucket = %s: %v\", job.Spec.DecorationConfig.GCSConfiguration.Bucket, err)\n\t}\n\n\treturn storagePathSegments[0], storagePathWithoutProvider, nil\n}\n\nfunc splitSrc(src string) (keyType, key string, err error) {\n\tsplit := strings.SplitN(src, \"\/\", 2)\n\tif len(split) < 2 {\n\t\terr = fmt.Errorf(\"invalid src %s: expected <key-type>\/<key>\", src)\n\t\treturn\n\t}\n\tkeyType = split[0]\n\tkey = split[1]\n\treturn\n}\n\n\/\/ keyToJob takes a spyglass URL and returns the jobName and buildID.\nfunc KeyToJob(src string) (jobName string, buildID string, err error) {\n\tsrc = strings.Trim(src, \"\/\")\n\tparsed := strings.Split(src, \"\/\")\n\tif len(parsed) < 2 {\n\t\treturn \"\", \"\", fmt.Errorf(\"expected at least two path components in %q\", src)\n\t}\n\tjobName = parsed[len(parsed)-2]\n\tbuildID = parsed[len(parsed)-1]\n\treturn jobName, buildID, nil\n}\n\nconst prefixSpyglassDynamicHandlers = \"dynamic\"\n\nfunc DyanmicPathForLens(lensName string) string {\n\treturn fmt.Sprintf(\"\/%s\/%s\", prefixSpyglassDynamicHandlers, lensName)\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Removed old hot fix on eof<commit_after><|endoftext|>"} {"text":"<commit_before>package client\n\nimport \"github.com\/MustWin\/baremetal-sdk-go\"\n\ntype BareMetalClient interface {\n\tCreateUser(name, description string, options ...baremetal.Options) (*baremetal.IdentityResource, error)\n\tGetUser(userID string) (*baremetal.IdentityResource, error)\n\tUpdateUser(userID, userDescription string, opts ...baremetal.Options) (*baremetal.IdentityResource, error)\n\tDeleteUser(userID string, opts ...baremetal.Options) error\n\n\tCreateGroup(name, description string, options ...baremetal.Options) (*baremetal.IdentityResource, error)\n\tGetGroup(userID string) (*baremetal.IdentityResource, error)\n\tUpdateGroup(userID, userDescription string, opts ...baremetal.Options) (*baremetal.IdentityResource, error)\n\tDeleteGroup(userID string, opts ...baremetal.Options) error\n\n\tCreatePolicy(name, description string, statements []string, opts ...baremetal.Options) (*baremetal.Policy, error)\n\tGetPolicy(id string) (*baremetal.Policy, error)\n\tUpdatePolicy(id, description string, statements []string, opts ...baremetal.Options) (*baremetal.Policy, error)\n\tDeletePolicy(id string, opts ...baremetal.Options) error\n\n\tCreateCompartment(name, description string, options ...baremetal.Options) (*baremetal.IdentityResource, error)\n\tGetCompartment(userID string) (*baremetal.IdentityResource, error)\n\tUpdateCompartment(userID, userDescription string, opts ...baremetal.Options) (*baremetal.IdentityResource, error)\n\n\tListShapes(compartmentID string, opt ...baremetal.Options) (*baremetal.ListShapes, error)\n\tListVnicAttachments(compartmentID string, opt ...baremetal.Options) (*baremetal.ListVnicAttachments, error)\n\n\tCreateCpe(compartmentID, displayName, IPAddress string, opts ...baremetal.Options) (cpe *baremetal.Cpe, e error)\n\tGetCpe(id string, opts ...baremetal.Options) (cpe *baremetal.Cpe, e error)\n\tDeleteCpe(id string, opts ...baremetal.Options) (e error)\n\tListCpes(compartmentID string, opts ...baremetal.Options) (cpes *baremetal.ListCpes, e error)\n\n\tCreateVolume(availabiltyDomain, compartmentID string, opts ...baremetal.Options) (vol *baremetal.Volume, e error)\n\tGetVolume(id string, opts ...baremetal.Options) (vol *baremetal.Volume, e error)\n\tUpdateVolume(id string, opts ...baremetal.Options) (vol *baremetal.Volume, e error)\n\tDeleteVolume(id string, opts ...baremetal.Options) (e error)\n\tListVolumes(compartmentID string, opts ...baremetal.Options) (vols *baremetal.ListVolumes, e error)\n\n\tLaunchInstance(availabilityDomain, compartmentID, image, shape, subnetID string, metadata map[string]string, opts ...baremetal.Options) (inst *baremetal.Instance, e error)\n\tGetInstance(instanceID string) (inst *baremetal.Instance, e error)\n\tUpdateInstance(instanceID string, opts ...baremetal.Options) (inst *baremetal.Instance, e error)\n\tTerminateInstance(instanceID string, opts ...baremetal.Options) (e error)\n\tListInstances(compartmentID string, opts ...baremetal.Options) (list *baremetal.ListInstances, e error)\n\n\tAttachVolume(compartmentID, instanceID, attachmentType, volumeID string, opts ...baremetal.Options) (vol *baremetal.VolumeAttachment, e error)\n\tGetVolumeAttachment(id string, opts ...baremetal.Options) (vol *baremetal.VolumeAttachment, e error)\n\tDetachVolume(id string, opts ...baremetal.Options) (e error)\n\tListVolumeAttachments(compartmentID string, opts ...baremetal.Options) (res *baremetal.ListVolumeAttachments, e error)\n\n\tCreateSubnet(availabilityDomain, cidrBlock, compartmentID, routeTableID, vcnID string, securityListIDs []string, opts ...baremetal.Options) (*baremetal.Subnet, error)\n\tGetSubnet(subnetID string) (sn *baremetal.Subnet, e error)\n\tListSubnets(compartmentID, vcnID string, opts ...baremetal.Options) (*baremetal.ListSubnets, error)\n\tDeleteSubnet(subnetID string, opts ...baremetal.Options) error\n\n\tCreateVirtualNetwork(cidrBlock, compartmentID string, opts ...baremetal.Options) (*baremetal.VirtualNetwork, error)\n\tGetVirtualNetwork(id string, opts ...baremetal.Options) (vcn *baremetal.VirtualNetwork, e error)\n\tDeleteVirtualNetwork(id string, opts ...baremetal.Options) error\n\tListVirtualNetworks(compartmentID string, opts ...baremetal.Options) (*baremetal.ListVirtualNetworks, error)\n\n\tCreateIPSecConnection(compartmentID, cpeID, drgID string, staticRoutes []string, opts ...baremetal.Options) (conn *baremetal.IPSecConnection, e error)\n\tListIPSecConnections(compartmentID string, opts ...baremetal.Options) (conns *baremetal.ListIPSecConnections, e error)\n\tGetIPSecConnection(id string) (conn *baremetal.IPSecConnection, e error)\n\tDeleteIPSecConnection(id string, opts ...baremetal.Options) (e error)\n\tGetIPSecConnectionDeviceStatus(id string) (status *baremetal.IPSecConnectionDeviceStatus, e error)\n\tGetIPSecConnectionDeviceConfig(id string) (status *baremetal.IPSecConnectionDeviceConfig, e error)\n\n\tCreateDrg(compartmentID string, opts ...baremetal.Options) (*baremetal.Drg, error)\n\tGetDrg(id string, opts ...baremetal.Options) (*baremetal.Drg, error)\n\tDeleteDrg(id string, opts ...baremetal.Options) error\n\tListDrgs(compartmentID string, opts ...baremetal.Options) (*baremetal.ListDrgs, error)\n\n\tCreateDrgAttachment(compartmentID, drgID, vcnID string, opts ...baremetal.Options) (vol *baremetal.DrgAttachment, e error)\n\tGetDrgAttachment(id string, opts ...baremetal.Options) (vol *baremetal.DrgAttachment, e error)\n\tDeleteDrgAttachment(id string, opts ...baremetal.Options) (e error)\n\tListDrgAttachments(compartmentID string, opts ...baremetal.Options) (res *baremetal.ListDrgAttachments, e error)\n\n\tCreateInternetGateway(compartmentID, vcnID string, isEnabled bool, opts ...baremetal.Options) (gw *baremetal.InternetGateway, e error)\n\tGetInternetGateway(id string) (gw *baremetal.InternetGateway, e error)\n\tUpdateInternetGateway(id string, isEnabled bool, opts ...baremetal.Options) (gw *baremetal.InternetGateway, e error)\n\tDeleteInternetGateway(id string, opts ...baremetal.Options) (e error)\n\tListInternetGateways(compartmentID, vcnID string, opts ...baremetal.Options) (l *baremetal.ListInternetGateways, e error)\n\n\tCreateRouteTable(compartmentID, vcnID string, routeRules []baremetal.RouteRule, opts ...baremetal.Options) (res *baremetal.RouteTable, e error)\n\tGetRouteTable(id string, opts ...baremetal.Options) (vol *baremetal.RouteTable, e error)\n\tUpdateRouteTable(id string, routeRules []baremetal.RouteRule, opts ...baremetal.Options) (res *baremetal.RouteTable, e error)\n\tDeleteRouteTable(id string, opts ...baremetal.Options) (e error)\n\tListRouteTables(compartmentID, vcnID string, opts ...baremetal.Options) (vols *baremetal.ListRouteTables, e error)\n\n\tCaptureConsoleHistory(instanceID string, opts ...baremetal.Options) (icHistory *baremetal.ConsoleHistoryMetadata, e error)\n\tGetConsoleHistory(id string, opts ...baremetal.Options) (icHistory *baremetal.ConsoleHistoryMetadata, e error)\n\tDeleteConsoleHistory(id string) (e error)\n\n\tGetVnic(vnicID string) (vnic *baremetal.Vnic, e error)\n\n\tCreateOrResetUIPassword(userID string, opts ...baremetal.Options) (resource *baremetal.UIPassword, e error)\n\n\tCreateVolumeBackup(volumeID string, opts ...baremetal.Options) (vol *baremetal.VolumeBackup, e error)\n\tGetVolumeBackup(id string, opts ...baremetal.Options) (vol *baremetal.VolumeBackup, e error)\n\tUpdateVolumeBackup(id string, opts ...baremetal.Options) (vol *baremetal.VolumeBackup, e error)\n\tDeleteVolumeBackup(id string, opts ...baremetal.Options) (e error)\n}\n<commit_msg>Added VNIC datasource<commit_after>package client\n\nimport \"github.com\/MustWin\/baremetal-sdk-go\"\n\ntype BareMetalClient interface {\n\tCreateUser(name, description string, options ...baremetal.Options) (*baremetal.IdentityResource, error)\n\tGetUser(userID string) (*baremetal.IdentityResource, error)\n\tUpdateUser(userID, userDescription string, opts ...baremetal.Options) (*baremetal.IdentityResource, error)\n\tDeleteUser(userID string, opts ...baremetal.Options) error\n\n\tCreateGroup(name, description string, options ...baremetal.Options) (*baremetal.IdentityResource, error)\n\tGetGroup(userID string) (*baremetal.IdentityResource, error)\n\tUpdateGroup(userID, userDescription string, opts ...baremetal.Options) (*baremetal.IdentityResource, error)\n\tDeleteGroup(userID string, opts ...baremetal.Options) error\n\n\tCreatePolicy(name, description string, statements []string, opts ...baremetal.Options) (*baremetal.Policy, error)\n\tGetPolicy(id string) (*baremetal.Policy, error)\n\tUpdatePolicy(id, description string, statements []string, opts ...baremetal.Options) (*baremetal.Policy, error)\n\tDeletePolicy(id string, opts ...baremetal.Options) error\n\n\tCreateCompartment(name, description string, options ...baremetal.Options) (*baremetal.IdentityResource, error)\n\tGetCompartment(userID string) (*baremetal.IdentityResource, error)\n\tUpdateCompartment(userID, userDescription string, opts ...baremetal.Options) (*baremetal.IdentityResource, error)\n\n\tListShapes(compartmentID string, opt ...baremetal.Options) (*baremetal.ListShapes, error)\n\tListVnicAttachments(compartmentID string, opt ...baremetal.Options) (*baremetal.ListVnicAttachments, error)\n\n\tCreateCpe(compartmentID, displayName, IPAddress string, opts ...baremetal.Options) (cpe *baremetal.Cpe, e error)\n\tGetCpe(id string, opts ...baremetal.Options) (cpe *baremetal.Cpe, e error)\n\tDeleteCpe(id string, opts ...baremetal.Options) (e error)\n\tListCpes(compartmentID string, opts ...baremetal.Options) (cpes *baremetal.ListCpes, e error)\n\n\tCreateVolume(availabiltyDomain, compartmentID string, opts ...baremetal.Options) (vol *baremetal.Volume, e error)\n\tGetVolume(id string, opts ...baremetal.Options) (vol *baremetal.Volume, e error)\n\tUpdateVolume(id string, opts ...baremetal.Options) (vol *baremetal.Volume, e error)\n\tDeleteVolume(id string, opts ...baremetal.Options) (e error)\n\tListVolumes(compartmentID string, opts ...baremetal.Options) (vols *baremetal.ListVolumes, e error)\n\n\tLaunchInstance(availabilityDomain, compartmentID, image, shape, subnetID string, metadata map[string]string, opts ...baremetal.Options) (inst *baremetal.Instance, e error)\n\tGetInstance(instanceID string) (inst *baremetal.Instance, e error)\n\tUpdateInstance(instanceID string, opts ...baremetal.Options) (inst *baremetal.Instance, e error)\n\tTerminateInstance(instanceID string, opts ...baremetal.Options) (e error)\n\tListInstances(compartmentID string, opts ...baremetal.Options) (list *baremetal.ListInstances, e error)\n\n\tAttachVolume(compartmentID, instanceID, attachmentType, volumeID string, opts ...baremetal.Options) (vol *baremetal.VolumeAttachment, e error)\n\tGetVolumeAttachment(id string, opts ...baremetal.Options) (vol *baremetal.VolumeAttachment, e error)\n\tDetachVolume(id string, opts ...baremetal.Options) (e error)\n\tListVolumeAttachments(compartmentID string, opts ...baremetal.Options) (res *baremetal.ListVolumeAttachments, e error)\n\n\tCreateSubnet(availabilityDomain, cidrBlock, compartmentID, routeTableID, vcnID string, securityListIDs []string, opts ...baremetal.Options) (*baremetal.Subnet, error)\n\tGetSubnet(subnetID string) (sn *baremetal.Subnet, e error)\n\tListSubnets(compartmentID, vcnID string, opts ...baremetal.Options) (*baremetal.ListSubnets, error)\n\tDeleteSubnet(subnetID string, opts ...baremetal.Options) error\n\n\tCreateVirtualNetwork(cidrBlock, compartmentID string, opts ...baremetal.Options) (*baremetal.VirtualNetwork, error)\n\tGetVirtualNetwork(id string, opts ...baremetal.Options) (vcn *baremetal.VirtualNetwork, e error)\n\tDeleteVirtualNetwork(id string, opts ...baremetal.Options) error\n\tListVirtualNetworks(compartmentID string, opts ...baremetal.Options) (*baremetal.ListVirtualNetworks, error)\n\n\tCreateIPSecConnection(compartmentID, cpeID, drgID string, staticRoutes []string, opts ...baremetal.Options) (conn *baremetal.IPSecConnection, e error)\n\tListIPSecConnections(compartmentID string, opts ...baremetal.Options) (conns *baremetal.ListIPSecConnections, e error)\n\tGetIPSecConnection(id string) (conn *baremetal.IPSecConnection, e error)\n\tDeleteIPSecConnection(id string, opts ...baremetal.Options) (e error)\n\tGetIPSecConnectionDeviceStatus(id string) (status *baremetal.IPSecConnectionDeviceStatus, e error)\n\tGetIPSecConnectionDeviceConfig(id string) (status *baremetal.IPSecConnectionDeviceConfig, e error)\n\n\tCreateDrg(compartmentID string, opts ...baremetal.Options) (*baremetal.Drg, error)\n\tGetDrg(id string, opts ...baremetal.Options) (*baremetal.Drg, error)\n\tDeleteDrg(id string, opts ...baremetal.Options) error\n\tListDrgs(compartmentID string, opts ...baremetal.Options) (*baremetal.ListDrgs, error)\n\n\tCreateDrgAttachment(compartmentID, drgID, vcnID string, opts ...baremetal.Options) (vol *baremetal.DrgAttachment, e error)\n\tGetDrgAttachment(id string, opts ...baremetal.Options) (vol *baremetal.DrgAttachment, e error)\n\tDeleteDrgAttachment(id string, opts ...baremetal.Options) (e error)\n\tListDrgAttachments(compartmentID string, opts ...baremetal.Options) (res *baremetal.ListDrgAttachments, e error)\n\n\tCreateInternetGateway(compartmentID, vcnID string, isEnabled bool, opts ...baremetal.Options) (gw *baremetal.InternetGateway, e error)\n\tGetInternetGateway(id string) (gw *baremetal.InternetGateway, e error)\n\tUpdateInternetGateway(id string, isEnabled bool, opts ...baremetal.Options) (gw *baremetal.InternetGateway, e error)\n\tDeleteInternetGateway(id string, opts ...baremetal.Options) (e error)\n\tListInternetGateways(compartmentID, vcnID string, opts ...baremetal.Options) (l *baremetal.ListInternetGateways, e error)\n\n\tCreateRouteTable(compartmentID, vcnID string, routeRules []baremetal.RouteRule, opts ...baremetal.Options) (res *baremetal.RouteTable, e error)\n\tGetRouteTable(id string, opts ...baremetal.Options) (vol *baremetal.RouteTable, e error)\n\tUpdateRouteTable(id string, routeRules []baremetal.RouteRule, opts ...baremetal.Options) (res *baremetal.RouteTable, e error)\n\tDeleteRouteTable(id string, opts ...baremetal.Options) (e error)\n\tListRouteTables(compartmentID, vcnID string, opts ...baremetal.Options) (vols *baremetal.ListRouteTables, e error)\n\n\tCaptureConsoleHistory(instanceID string, opts ...baremetal.Options) (icHistory *baremetal.ConsoleHistoryMetadata, e error)\n\tGetConsoleHistory(id string, opts ...baremetal.Options) (icHistory *baremetal.ConsoleHistoryMetadata, e error)\n\tDeleteConsoleHistory(id string) (e error)\n\n\tCreateOrResetUIPassword(userID string, opts ...baremetal.Options) (resource *baremetal.UIPassword, e error)\n\n\tCreateVolumeBackup(volumeID string, opts ...baremetal.Options) (vol *baremetal.VolumeBackup, e error)\n\tGetVolumeBackup(id string, opts ...baremetal.Options) (vol *baremetal.VolumeBackup, e error)\n\tUpdateVolumeBackup(id string, opts ...baremetal.Options) (vol *baremetal.VolumeBackup, e error)\n\tDeleteVolumeBackup(id string, opts ...baremetal.Options) (e error)\n\n\tGetVnic(vnicID string) (vnic *baremetal.Vnic, e error)\n}\n<|endoftext|>"} {"text":"<commit_before>package conn\n\nimport (\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"log\"\n)\n\ntype Memo struct {\n\tTime string `redis:\"time\"`\n\tContent string `redis:\"content\"`\n}\n\ntype Task struct {\n\tId int\n\tChatId int `redis:\"chatId\"`\n\tOwner string `redis:\"owner\"`\n\tDesc string `redis:\"content\"`\n\tWhen string `redis:\"time\"`\n}\n\n\/\/All redis actions\n\nfunc SetMasterId(id int) {\n\tc := Pool.Get()\n\tdefer c.Close()\n\tc.Do(\"SET\", \"evolsnowChatId\", id)\n}\n\nfunc GetMasterId() int {\n\tc := Pool.Get()\n\tdefer c.Close()\n\tid, _ := redis.Int(c.Do(\"GET\", \"evolsnowChatId\"))\n\treturn id\n}\n\nfunc SetUserChatId(user string, id int) {\n\tc := Pool.Get()\n\tdefer c.Close()\n\tkey := user + \"ChatId\"\n\tc.Do(\"SET\", key, id)\n}\n\nfunc GetUserChatId(user string) int {\n\tc := Pool.Get()\n\tdefer c.Close()\n\tkey := user + \"ChatId\"\n\tid, _ := redis.Int(c.Do(\"GET\", key))\n\treturn id\n}\n\nfunc HSetMemo(user, time, memo string) {\n\tc := Pool.Get()\n\tdefer c.Close()\n\tvar setMemoLua = `\n\tlocal id = redis.call(\"INCR\", \"memoIncrId\")\n\tredis.call(\"RPUSH\", KEYS[1]..\":memos\", id)\n\tredis.call(\"HMSET\", \"memo:\"..id, \"time\", KEYS[2], \"content\", KEYS[3])\n\t`\n\tscript := redis.NewScript(3, setMemoLua)\n\tscript.Do(c, user, time, memo)\n}\n\nfunc GetTaskId() int {\n\tc := Pool.Get()\n\tdefer c.Close()\n\tid, _ := redis.Int(c.Do(\"INCR\", \"taskIncrId\"))\n\treturn id\n}\n\nfunc HSetTask(ts Task) {\n\tc := Pool.Get()\n\tdefer c.Close()\n\tvar setTaskLua = `\n\tredis.call(\"RPUSH\", KEYS[1]..\":tasks\", id)\n\tredis.call(\"HMSET\", \"task:\"..id, \"owner\", KEYS[1], \"time\", KEYS[2], \"content\", KEYS[3], \"chatID\", KEYS[4])\n\t`\n\tscript := redis.NewScript(4, setTaskLua)\n\tscript.Do(c, ts.Owner, ts.When, ts.Desc, ts.ChatId)\n}\n\nfunc RemoveTask(ts Task) {\n\tc := Pool.Get()\n\tdefer c.Close()\n\tlog.Println(\"remove\", ts.Id)\n\tvar removeTaskLua = `\n\tredis.call(\"LREM\", KEYS[1]..\":tasks\", 1, KEYS[2])\n\tredis.call(\"DEL\", \"task:\"..KEYS[2])\n\t`\n\tscript := redis.NewScript(2, removeTaskLua)\n\tscript.Do(c, ts.Owner, ts.Id)\n}\n\nfunc HGetUserTasks(user string) []Task {\n\tc := Pool.Get()\n\tdefer c.Close()\n\tvar multiGetTaskLua = `\n\tlocal data = redis.call(\"LRANGE\", KEYS[1]..\":tasks\", \"0\", \"-1\")\n\tlocal ret = {}\n \tfor idx=1, #data do\n \t\tret[idx] = redis.call(\"HGETALL\", \"task:\"..data[idx])\n \t\tprint(ret[idx])\n \tend\n \treturn ret\n `\n\tvar tasks []Task\n\tscript := redis.NewScript(1, multiGetTaskLua)\n\tvalues, err := redis.Values(script.Do(c, user))\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tfor i := range values {\n\t\tt := new(Task)\n\t\tredis.ScanStruct(values[i].([]interface{}), t)\n\t\tlog.Println(\"task:\", *t)\n\t\ttasks = append(tasks, *t)\n\t}\n\treturn tasks\n}\n\nfunc HGetAllMemos(user string) []Memo {\n\tc := Pool.Get()\n\tdefer c.Close()\n\tvar multiGetMemoLua = `\n\tlocal data = redis.call(\"LRANGE\", KEYS[1]..\":memos\", \"0\", \"-1\")\n\tlocal ret = {}\n \tfor idx=1, #data do\n \t\tret[idx] = redis.call(\"HGETALL\", \"memo:\"..data[idx])\n \tend\n \treturn ret\n `\n\tvar memos []Memo\n\tscript := redis.NewScript(1, multiGetMemoLua)\n\tvalues, err := redis.Values(script.Do(c, user))\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tfor i := range values {\n\t\tm := new(Memo)\n\t\tredis.ScanStruct(values[i].([]interface{}), m)\n\t\tmemos = append(memos, *m)\n\t}\n\treturn memos\n}\n\n\/\/\n\/\/var multiGetScript = redis.NewScript(0, multiGetMemoLua)\n<commit_msg>get tasks<commit_after>package conn\n\nimport (\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"log\"\n)\n\ntype Memo struct {\n\tTime string `redis:\"time\"`\n\tContent string `redis:\"content\"`\n}\n\ntype Task struct {\n\tId int\n\tChatId int `redis:\"chatId\"`\n\tOwner string `redis:\"owner\"`\n\tDesc string `redis:\"content\"`\n\tWhen string `redis:\"time\"`\n}\n\n\/\/All redis actions\n\nfunc SetMasterId(id int) {\n\tc := Pool.Get()\n\tdefer c.Close()\n\tc.Do(\"SET\", \"evolsnowChatId\", id)\n}\n\nfunc GetMasterId() int {\n\tc := Pool.Get()\n\tdefer c.Close()\n\tid, _ := redis.Int(c.Do(\"GET\", \"evolsnowChatId\"))\n\treturn id\n}\n\nfunc SetUserChatId(user string, id int) {\n\tc := Pool.Get()\n\tdefer c.Close()\n\tkey := user + \"ChatId\"\n\tc.Do(\"SET\", key, id)\n}\n\nfunc GetUserChatId(user string) int {\n\tc := Pool.Get()\n\tdefer c.Close()\n\tkey := user + \"ChatId\"\n\tid, _ := redis.Int(c.Do(\"GET\", key))\n\treturn id\n}\n\nfunc HSetMemo(user, time, memo string) {\n\tc := Pool.Get()\n\tdefer c.Close()\n\tvar setMemoLua = `\n\tlocal id = redis.call(\"INCR\", \"memoIncrId\")\n\tredis.call(\"RPUSH\", KEYS[1]..\":memos\", id)\n\tredis.call(\"HMSET\", \"memo:\"..id, \"time\", KEYS[2], \"content\", KEYS[3])\n\t`\n\tscript := redis.NewScript(3, setMemoLua)\n\tscript.Do(c, user, time, memo)\n}\n\nfunc GetTaskId() int {\n\tc := Pool.Get()\n\tdefer c.Close()\n\tid, _ := redis.Int(c.Do(\"INCR\", \"taskIncrId\"))\n\treturn id\n}\n\nfunc HSetTask(ts Task) {\n\tc := Pool.Get()\n\tdefer c.Close()\n\tvar setTaskLua = `\n\tredis.call(\"RPUSH\", KEYS[2]..\":tasks\", id)\n\tredis.call(\"HMSET\", \"task:\"..KEYS[1], \"owner\", KEYS[2], \"time\", KEYS[3], \"content\", KEYS[4], \"chatID\", KEYS[5])\n\t`\n\tscript := redis.NewScript(5, setTaskLua)\n\tscript.Do(c, ts.Id, ts.Owner, ts.When, ts.Desc, ts.ChatId)\n}\n\nfunc RemoveTask(ts Task) {\n\tc := Pool.Get()\n\tdefer c.Close()\n\tlog.Println(\"remove\", ts.Id)\n\tvar removeTaskLua = `\n\tredis.call(\"LREM\", KEYS[1]..\":tasks\", 1, KEYS[2])\n\tredis.call(\"DEL\", \"task:\"..KEYS[2])\n\t`\n\tscript := redis.NewScript(2, removeTaskLua)\n\tscript.Do(c, ts.Owner, ts.Id)\n}\n\nfunc HGetUserTasks(user string) []Task {\n\tc := Pool.Get()\n\tdefer c.Close()\n\tvar multiGetTaskLua = `\n\tlocal data = redis.call(\"LRANGE\", KEYS[1]..\":tasks\", \"0\", \"-1\")\n\tlocal ret = {}\n \tfor idx=1, #data do\n \t\tret[idx] = redis.call(\"HGETALL\", \"task:\"..data[idx])\n \t\tprint(ret[idx])\n \tend\n \treturn ret\n `\n\tvar tasks []Task\n\tscript := redis.NewScript(1, multiGetTaskLua)\n\tvalues, err := redis.Values(script.Do(c, user))\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tfor i := range values {\n\t\tt := new(Task)\n\t\tredis.ScanStruct(values[i].([]interface{}), t)\n\t\tlog.Println(\"task:\", *t)\n\t\ttasks = append(tasks, *t)\n\t}\n\treturn tasks\n}\n\nfunc HGetAllMemos(user string) []Memo {\n\tc := Pool.Get()\n\tdefer c.Close()\n\tvar multiGetMemoLua = `\n\tlocal data = redis.call(\"LRANGE\", KEYS[1]..\":memos\", \"0\", \"-1\")\n\tlocal ret = {}\n \tfor idx=1, #data do\n \t\tret[idx] = redis.call(\"HGETALL\", \"memo:\"..data[idx])\n \tend\n \treturn ret\n `\n\tvar memos []Memo\n\tscript := redis.NewScript(1, multiGetMemoLua)\n\tvalues, err := redis.Values(script.Do(c, user))\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\tfor i := range values {\n\t\tm := new(Memo)\n\t\tredis.ScanStruct(values[i].([]interface{}), m)\n\t\tmemos = append(memos, *m)\n\t}\n\treturn memos\n}\n\n\/\/\n\/\/var multiGetScript = redis.NewScript(0, multiGetMemoLua)\n<|endoftext|>"} {"text":"<commit_before>\/\/ licence goes here\n\npackage serial\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"runtime\"\n)\n\n\/\/ const\n\n\/\/ Baud is the unit for the symbol rate. It describes the number of symbols transmitted per second.\ntype Baud uint32\n\nconst (\n\t\/\/ Baud4800 defines a transmission rate of 4800 symbols per second.\n\tBaud4800 = 4800\n\t\/\/ Baud9600 defines a transmission rate of 9600 symbols per second.\n\tBaud9600 = 9600\n\t\/\/ Baud19200 defines a transmission rate of 19200 symbols per second.\n\tBaud19200 = 19200\n\t\/\/ Baud38400 defines a transmission rate of 38400 symbols per second.\n\tBaud38400 = 38400\n\t\/\/ Baud57600 defines a transmission rate of 57600 symbols per second.\n\tBaud57600 = 57600\n\t\/\/ Baud115200 defines a transmission rate of 115200 symbols per second.\n\tBaud115200 = 115200\n)\n\n\/\/ DataBit is the number of bits representing a character.\ntype DataBit byte\n\nconst (\n\t\/\/ DataBit5 stands for a character length of five bits.\n\tDataBit5 = DataBit(iota + 5)\n\t\/\/ DataBit6 stands for a character length of six bits.\n\tDataBit6\n\t\/\/ DataBit7 stands for a character length of seven bits.\n\tDataBit7\n\t\/\/ DataBit8 stands for a character length of eight bits.\n\tDataBit8\n)\n\n\/\/ StopBit is the number of bits being send at the end of every character.\ntype StopBit byte\n\nconst (\n\t\/\/ StopBit1 represents a single bit being send as stopbit.\n\tStopBit1 = StopBit(iota + 1)\n\t\/\/ StopBit2 represents two bits being send as stopbit.\n\tStopBit2\n)\n\n\/\/ Parity is the method for detecting transmission errors.\ntype Parity byte\n\nconst (\n\t\/\/ ParityNone indicates that no error detection is being used.\n\tParityNone = Parity(iota)\n\t\/\/ ParityEven indicates that a bit is added to even out the bit count.\n\tParityEven\n\t\/\/ ParityOdd indicates that a bit is added to provide an odd bit count.\n\tParityOdd\n)\n\n\/\/ TODO flow control\n\n\/\/ var\nvar (\n\terrPort = errors.New(\"serial configuration error: invalid port\")\n\terrBaud = errors.New(\"serial configuration error: invalid baud rate (4800, 9600, 19200, 38400, 57600, 115200)\")\n\terrDataBit = errors.New(\"serial configuration error: invalid number of data bits (5, 6, 7, 8, 9)\")\n\terrStopBit = errors.New(\"serial configuration error: invalid number of stop bits (1, 2)\")\n\terrParity = errors.New(\"serial configuration error: invalid parity (0 - None, 1 - Even, 2 - Odd)\")\n)\n\n\/\/ structs and its functions\n\n\/\/ Connection represents a serial connection with all parameters.\ntype Connection struct {\n\tPort string\n\tBaud Baud\n\tDataBit DataBit\n\tStopBit StopBit\n\tParity Parity\n\tf *os.File\n\topen bool\n}\n\nfunc (connection *Connection) check() error {\n\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\t\/\/TODO Port should look like this: COM3, USB0\n\tcase \"freebsd\", \"linux\":\n\t\t\/\/TODO Port should look like this: \/dev\/ttyUSB0\n\t}\n\n\tswitch connection.Baud {\n\tcase Baud115200, Baud57600, Baud38400, Baud19200, Baud9600, Baud4800:\n\tdefault:\n\t\treturn errBaud\n\t}\n\n\tswitch connection.DataBit {\n\tcase DataBit5, DataBit6, DataBit7, DataBit8:\n\tdefault:\n\t\treturn errDataBit\n\t}\n\n\tswitch connection.StopBit {\n\tcase StopBit1, StopBit2:\n\tdefault:\n\t\treturn errStopBit\n\t}\n\n\tswitch connection.Parity {\n\tcase ParityNone, ParityEven, ParityOdd:\n\tdefault:\n\t\treturn errParity\n\t}\n\n\treturn nil\n}\n\nfunc (connection *Connection) String() string {\n\n\tvar parity string\n\tswitch connection.Parity {\n\tcase ParityNone:\n\t\tparity = \"N\"\n\tcase ParityEven:\n\t\tparity = \"E\"\n\tcase ParityOdd:\n\t\tparity = \"O\"\n\t}\n\n\treturn fmt.Sprintf(\"port: %s, baud rate:%d, parameters: %d%s%d\",\n\t\tconnection.Port, connection.Baud, connection.DataBit, parity, connection.StopBit)\n}\n\n\/\/ functions\n\n\/\/ Init provides a connection with the given parameters.\nfunc Init(port string, baudrate Baud, databit DataBit, stopbit StopBit, parity Parity) (*Connection, error) {\n\tconnection := &Connection{Port: port, Baud: baudrate, DataBit: databit, StopBit: stopbit, Parity: parity}\n\treturn connection, connection.check()\n}\n\n\/\/ Load provides a connection with the parameters being loaded from a json file.\nfunc Load(path string) (*Connection, error) {\n\tvar connection *Connection\n\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn connection, err\n\t}\n\n\tdec := json.NewDecoder(file)\n\tfor {\n\n\t\tif err := dec.Decode(&connection); err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn connection, err\n\t\t}\n\t}\n\n\treturn connection, connection.check()\n}\n<commit_msg>updated comments<commit_after>\/\/ licence goes here\n\npackage serial\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\n\/\/ const\n\n\/\/ Baud is the unit for the symbol rate. It describes the number of symbols transmitted per second.\ntype Baud uint32\n\nconst (\n\t\/\/ Baud4800 defines a transmission rate of 4800 symbols per second.\n\tBaud4800 = 4800\n\t\/\/ Baud9600 defines a transmission rate of 9600 symbols per second.\n\tBaud9600 = 9600\n\t\/\/ Baud19200 defines a transmission rate of 19200 symbols per second.\n\tBaud19200 = 19200\n\t\/\/ Baud38400 defines a transmission rate of 38400 symbols per second.\n\tBaud38400 = 38400\n\t\/\/ Baud57600 defines a transmission rate of 57600 symbols per second.\n\tBaud57600 = 57600\n\t\/\/ Baud115200 defines a transmission rate of 115200 symbols per second.\n\tBaud115200 = 115200\n)\n\n\/\/ DataBit is the number of bits representing a character.\ntype DataBit byte\n\nconst (\n\t\/\/ DataBit5 stands for a character length of five bits.\n\tDataBit5 = DataBit(iota + 5)\n\t\/\/ DataBit6 stands for a character length of six bits.\n\tDataBit6\n\t\/\/ DataBit7 stands for a character length of seven bits.\n\tDataBit7\n\t\/\/ DataBit8 stands for a character length of eight bits.\n\tDataBit8\n)\n\n\/\/ StopBit is the number of bits being send at the end of every character.\ntype StopBit byte\n\nconst (\n\t\/\/ StopBit1 represents a single bit being send as stopbit.\n\tStopBit1 = StopBit(iota + 1)\n\t\/\/ StopBit2 represents two bits being send as stopbit.\n\tStopBit2\n)\n\n\/\/ Parity is the method for detecting transmission errors.\ntype Parity byte\n\nconst (\n\t\/\/ ParityNone indicates that no error detection is being used.\n\tParityNone = Parity(iota)\n\t\/\/ ParityEven indicates that a bit is added to even out the bit count.\n\tParityEven\n\t\/\/ ParityOdd indicates that a bit is added to provide an odd bit count.\n\tParityOdd\n)\n\n\/\/ TODO flow control\n\n\/\/ var\nvar (\n\terrPort = errors.New(\"serial configuration error: invalid port\")\n\terrBaud = errors.New(\"serial configuration error: invalid baud rate (4800, 9600, 19200, 38400, 57600, 115200)\")\n\terrDataBit = errors.New(\"serial configuration error: invalid number of data bits (5, 6, 7, 8, 9)\")\n\terrStopBit = errors.New(\"serial configuration error: invalid number of stop bits (1, 2)\")\n\terrParity = errors.New(\"serial configuration error: invalid parity (0 - None, 1 - Even, 2 - Odd)\")\n)\n\n\/\/ structs and its functions\n\n\/\/ Connection represents a serial connection with all parameters.\ntype Connection struct {\n\tPort string\n\tBaud Baud\n\tDataBit DataBit\n\tStopBit StopBit\n\tParity Parity\n\tf *os.File\n\topen bool\n}\n\nfunc (connection *Connection) check() error {\n\n\tswitch connection.Baud {\n\tcase Baud115200, Baud57600, Baud38400, Baud19200, Baud9600, Baud4800:\n\tdefault:\n\t\treturn errBaud\n\t}\n\n\tswitch connection.DataBit {\n\tcase DataBit5, DataBit6, DataBit7, DataBit8:\n\tdefault:\n\t\treturn errDataBit\n\t}\n\n\tswitch connection.StopBit {\n\tcase StopBit1, StopBit2:\n\tdefault:\n\t\treturn errStopBit\n\t}\n\n\tswitch connection.Parity {\n\tcase ParityNone, ParityEven, ParityOdd:\n\tdefault:\n\t\treturn errParity\n\t}\n\n\treturn nil\n}\n\nfunc (connection *Connection) String() string {\n\n\tvar parity string\n\tswitch connection.Parity {\n\tcase ParityNone:\n\t\tparity = \"N\"\n\tcase ParityEven:\n\t\tparity = \"E\"\n\tcase ParityOdd:\n\t\tparity = \"O\"\n\t}\n\n\treturn fmt.Sprintf(\"port: %s, baud rate:%d, parameters: %d%s%d\",\n\t\tconnection.Port, connection.Baud, connection.DataBit, parity, connection.StopBit)\n}\n\n\/\/ functions\n\n\/\/ Init provides a connection with the given parameters.\nfunc Init(port string, baudrate Baud, databit DataBit, stopbit StopBit, parity Parity) (*Connection, error) {\n\tconnection := &Connection{Port: port, Baud: baudrate, DataBit: databit, StopBit: stopbit, Parity: parity}\n\treturn connection, connection.check()\n}\n\n\/\/ Load provides a connection with the parameters being loaded from a json file.\nfunc Load(path string) (*Connection, error) {\n\tvar connection *Connection\n\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn connection, err\n\t}\n\n\tdec := json.NewDecoder(file)\n\tfor {\n\n\t\tif err := dec.Decode(&connection); err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn connection, err\n\t\t}\n\t}\n\n\treturn connection, connection.check()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage fuse\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"path\"\n\t\"runtime\"\n\t\"sync\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/jacobsa\/bazilfuse\"\n\t\"github.com\/jacobsa\/fuse\/fuseops\"\n)\n\n\/\/ A connection to the fuse kernel process.\ntype Connection struct {\n\tlogger *log.Logger\n\twrapped *bazilfuse.Conn\n\topsInFlight sync.WaitGroup\n\n\t\/\/ The context from which all op contexts inherit.\n\tparentCtx context.Context\n\n\t\/\/ For logging purposes only.\n\tnextOpID uint32\n\n\tmu sync.Mutex\n\n\t\/\/ A map from bazilfuse request ID (*not* the op ID for logging used above)\n\t\/\/ to a function that cancel's its associated context.\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\tcancelFuncs map[bazilfuse.RequestID]func()\n}\n\n\/\/ Responsibility for closing the wrapped connection is transferred to the\n\/\/ result. You must call c.close() eventually.\nfunc newConnection(\n\tparentCtx context.Context,\n\tlogger *log.Logger,\n\twrapped *bazilfuse.Conn) (c *Connection, err error) {\n\tc = &Connection{\n\t\tlogger: logger,\n\t\twrapped: wrapped,\n\t\tparentCtx: parentCtx,\n\t}\n\n\treturn\n}\n\n\/\/ Log information for an operation with the given ID. calldepth is the depth\n\/\/ to use when recovering file:line information with runtime.Caller.\nfunc (c *Connection) log(\n\topID uint32,\n\tcalldepth int,\n\tformat string,\n\tv ...interface{}) {\n\t\/\/ Get file:line info.\n\tvar file string\n\tvar line int\n\tvar ok bool\n\n\t_, file, line, ok = runtime.Caller(calldepth)\n\tif !ok {\n\t\tfile = \"???\"\n\t}\n\n\tfileLine := fmt.Sprintf(\"%v:%v\", path.Base(file), line)\n\n\t\/\/ Format the actual message to be printed.\n\tmsg := fmt.Sprintf(\n\t\t\"Op 0x%08x %24s] %v\",\n\t\topID,\n\t\tfileLine,\n\t\tfmt.Sprintf(format, v...))\n\n\t\/\/ Print it.\n\tc.logger.Println(msg)\n}\n\n\/\/ LOCKS_EXCLUDED(c.mu)\nfunc (c *Connection) recordCancelFunc(\n\treqID bazilfuse.RequestID,\n\tf func()) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tif _, ok := c.cancelFuncs[reqID]; ok {\n\t\tpanic(fmt.Sprintf(\"Already have cancel func for request %v\", reqID))\n\t}\n\n\tc.cancelFuncs[reqID] = f\n}\n\n\/\/ Set up state for an op that is about to be returned to the user, given its\n\/\/ bazilfuse request ID.\n\/\/\n\/\/ Return a context that should be used for the op.\n\/\/\n\/\/ LOCKS_EXCLUDED(c.mu)\nfunc (c *Connection) beginOp(reqID bazilfuse.RequestID) (ctx context.Context) {\n\t\/\/ Note that the op is in flight.\n\tc.opsInFlight.Add(1)\n\n\t\/\/ Set up a cancellation function.\n\tctx, cancel := context.WithCancel(c.parentCtx)\n\tc.recordCancelFunc(reqID, cancel)\n\n\treturn\n}\n\n\/\/ Clean up all state associated with an op to which the user has responded.\n\/\/\n\/\/ LOCKS_EXCLUDED(c.mu)\nfunc (c *Connection) finishOp(reqID bazilfuse.RequestID) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\t\/\/ Even though the op is finished, context.WithCancel requires us to arrange\n\t\/\/ for the cancellation function to be invoked. We also must remove it from\n\t\/\/ our map.\n\tcancel, ok := c.cancelFuncs[reqID]\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"Unknown request ID in finishOp: %v\", reqID))\n\t}\n\n\tcancel()\n\tdelete(c.cancelFuncs, reqID)\n\n\t\/\/ Decrement the in-flight counter.\n\tc.opsInFlight.Done()\n}\n\n\/\/ Read the next op from the kernel process. Return io.EOF if the kernel has\n\/\/ closed the connection.\n\/\/\n\/\/ This function delivers ops in exactly the order they are received from\n\/\/ \/dev\/fuse. It must not be called multiple times concurrently.\n\/\/\n\/\/ LOCKS_EXCLUDED(c.mu)\nfunc (c *Connection) ReadOp() (op fuseops.Op, err error) {\n\t\/\/ Keep going until we find a request we know how to convert.\n\tfor {\n\t\t\/\/ Read a bazilfuse request.\n\t\tvar bfReq bazilfuse.Request\n\t\tbfReq, err = c.wrapped.ReadRequest()\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Choose an ID for this operation.\n\t\topID := c.nextOpID\n\t\tc.nextOpID++\n\n\t\t\/\/ Log the receipt of the operation.\n\t\tc.log(opID, 1, \"<- %v\", bfReq)\n\n\t\t\/\/ Special case: responding to this is required to make mounting work on OS\n\t\t\/\/ X. We don't currently expose the capability for the file system to\n\t\t\/\/ intercept this.\n\t\tif statfsReq, ok := bfReq.(*bazilfuse.StatfsRequest); ok {\n\t\t\tc.log(opID, 1, \"-> (Statfs) OK\")\n\t\t\tstatfsReq.Respond(&bazilfuse.StatfsResponse{})\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Set up op dependencies.\n\t\tvar reqID bazilfuse.RequestID = bfReq.Hdr().ID\n\t\topCtx := c.beginOp(reqID)\n\n\t\tlogForOp := func(calldepth int, format string, v ...interface{}) {\n\t\t\tc.log(opID, calldepth+1, format, v...)\n\t\t}\n\n\t\tfinished := func(err error) { c.finishOp(reqID) }\n\n\t\top = fuseops.Convert(opCtx, bfReq, logForOp, finished)\n\t\treturn\n\t}\n}\n\nfunc (c *Connection) waitForReady() (err error) {\n\t<-c.wrapped.Ready\n\terr = c.wrapped.MountError\n\treturn\n}\n\n\/\/ Close the connection and wait for in-flight ops.\nfunc (c *Connection) close() (err error) {\n\terr = c.wrapped.Close()\n\tc.opsInFlight.Wait()\n\treturn\n}\n<commit_msg>Fixed a panic.<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage fuse\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"path\"\n\t\"runtime\"\n\t\"sync\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/jacobsa\/bazilfuse\"\n\t\"github.com\/jacobsa\/fuse\/fuseops\"\n)\n\n\/\/ A connection to the fuse kernel process.\ntype Connection struct {\n\tlogger *log.Logger\n\twrapped *bazilfuse.Conn\n\topsInFlight sync.WaitGroup\n\n\t\/\/ The context from which all op contexts inherit.\n\tparentCtx context.Context\n\n\t\/\/ For logging purposes only.\n\tnextOpID uint32\n\n\tmu sync.Mutex\n\n\t\/\/ A map from bazilfuse request ID (*not* the op ID for logging used above)\n\t\/\/ to a function that cancel's its associated context.\n\t\/\/\n\t\/\/ GUARDED_BY(mu)\n\tcancelFuncs map[bazilfuse.RequestID]func()\n}\n\n\/\/ Responsibility for closing the wrapped connection is transferred to the\n\/\/ result. You must call c.close() eventually.\nfunc newConnection(\n\tparentCtx context.Context,\n\tlogger *log.Logger,\n\twrapped *bazilfuse.Conn) (c *Connection, err error) {\n\tc = &Connection{\n\t\tlogger: logger,\n\t\twrapped: wrapped,\n\t\tparentCtx: parentCtx,\n\t\tcancelFuncs: make(map[bazilfuse.RequestID]func()),\n\t}\n\n\treturn\n}\n\n\/\/ Log information for an operation with the given ID. calldepth is the depth\n\/\/ to use when recovering file:line information with runtime.Caller.\nfunc (c *Connection) log(\n\topID uint32,\n\tcalldepth int,\n\tformat string,\n\tv ...interface{}) {\n\t\/\/ Get file:line info.\n\tvar file string\n\tvar line int\n\tvar ok bool\n\n\t_, file, line, ok = runtime.Caller(calldepth)\n\tif !ok {\n\t\tfile = \"???\"\n\t}\n\n\tfileLine := fmt.Sprintf(\"%v:%v\", path.Base(file), line)\n\n\t\/\/ Format the actual message to be printed.\n\tmsg := fmt.Sprintf(\n\t\t\"Op 0x%08x %24s] %v\",\n\t\topID,\n\t\tfileLine,\n\t\tfmt.Sprintf(format, v...))\n\n\t\/\/ Print it.\n\tc.logger.Println(msg)\n}\n\n\/\/ LOCKS_EXCLUDED(c.mu)\nfunc (c *Connection) recordCancelFunc(\n\treqID bazilfuse.RequestID,\n\tf func()) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tif _, ok := c.cancelFuncs[reqID]; ok {\n\t\tpanic(fmt.Sprintf(\"Already have cancel func for request %v\", reqID))\n\t}\n\n\tc.cancelFuncs[reqID] = f\n}\n\n\/\/ Set up state for an op that is about to be returned to the user, given its\n\/\/ bazilfuse request ID.\n\/\/\n\/\/ Return a context that should be used for the op.\n\/\/\n\/\/ LOCKS_EXCLUDED(c.mu)\nfunc (c *Connection) beginOp(reqID bazilfuse.RequestID) (ctx context.Context) {\n\t\/\/ Note that the op is in flight.\n\tc.opsInFlight.Add(1)\n\n\t\/\/ Set up a cancellation function.\n\tctx, cancel := context.WithCancel(c.parentCtx)\n\tc.recordCancelFunc(reqID, cancel)\n\n\treturn\n}\n\n\/\/ Clean up all state associated with an op to which the user has responded.\n\/\/\n\/\/ LOCKS_EXCLUDED(c.mu)\nfunc (c *Connection) finishOp(reqID bazilfuse.RequestID) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\t\/\/ Even though the op is finished, context.WithCancel requires us to arrange\n\t\/\/ for the cancellation function to be invoked. We also must remove it from\n\t\/\/ our map.\n\tcancel, ok := c.cancelFuncs[reqID]\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"Unknown request ID in finishOp: %v\", reqID))\n\t}\n\n\tcancel()\n\tdelete(c.cancelFuncs, reqID)\n\n\t\/\/ Decrement the in-flight counter.\n\tc.opsInFlight.Done()\n}\n\n\/\/ Read the next op from the kernel process. Return io.EOF if the kernel has\n\/\/ closed the connection.\n\/\/\n\/\/ This function delivers ops in exactly the order they are received from\n\/\/ \/dev\/fuse. It must not be called multiple times concurrently.\n\/\/\n\/\/ LOCKS_EXCLUDED(c.mu)\nfunc (c *Connection) ReadOp() (op fuseops.Op, err error) {\n\t\/\/ Keep going until we find a request we know how to convert.\n\tfor {\n\t\t\/\/ Read a bazilfuse request.\n\t\tvar bfReq bazilfuse.Request\n\t\tbfReq, err = c.wrapped.ReadRequest()\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ Choose an ID for this operation.\n\t\topID := c.nextOpID\n\t\tc.nextOpID++\n\n\t\t\/\/ Log the receipt of the operation.\n\t\tc.log(opID, 1, \"<- %v\", bfReq)\n\n\t\t\/\/ Special case: responding to this is required to make mounting work on OS\n\t\t\/\/ X. We don't currently expose the capability for the file system to\n\t\t\/\/ intercept this.\n\t\tif statfsReq, ok := bfReq.(*bazilfuse.StatfsRequest); ok {\n\t\t\tc.log(opID, 1, \"-> (Statfs) OK\")\n\t\t\tstatfsReq.Respond(&bazilfuse.StatfsResponse{})\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Set up op dependencies.\n\t\tvar reqID bazilfuse.RequestID = bfReq.Hdr().ID\n\t\topCtx := c.beginOp(reqID)\n\n\t\tlogForOp := func(calldepth int, format string, v ...interface{}) {\n\t\t\tc.log(opID, calldepth+1, format, v...)\n\t\t}\n\n\t\tfinished := func(err error) { c.finishOp(reqID) }\n\n\t\top = fuseops.Convert(opCtx, bfReq, logForOp, finished)\n\t\treturn\n\t}\n}\n\nfunc (c *Connection) waitForReady() (err error) {\n\t<-c.wrapped.Ready\n\terr = c.wrapped.MountError\n\treturn\n}\n\n\/\/ Close the connection and wait for in-flight ops.\nfunc (c *Connection) close() (err error) {\n\terr = c.wrapped.Close()\n\tc.opsInFlight.Wait()\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package sphere\n\nimport (\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/websocket\"\n)\n\n\/\/ NewConnection returns a new ws connection instance\nfunc NewConnection(w http.ResponseWriter, r *http.Request) (*Connection, error) {\n\tconn, err := upgrader.Upgrade(w, r, nil)\n\tif err == nil {\n\t\treturn &Connection{guid.String(), []*Channel{}, make(chan []byte), r, conn}, nil\n\t}\n\treturn nil, err\n}\n\n\/\/ Connection allows you to interact with backend and other client sockets in realtime\ntype Connection struct {\n\t\/\/ the id of the connection\n\tid string\n\t\/\/ list of channels that this connection has been subscribed\n\tchannels []*Channel\n\t\/\/ buffered channel of outbound messages\n\tsend chan []byte\n\t\/\/ http request\n\trequest *http.Request\n\t\/\/ websocket connection\n\t*websocket.Conn\n}\n\n\/\/ write writes a message with the given message type and payload.\nfunc (conn *Connection) emit(mt int, payload []byte) error {\n\tconn.SetWriteDeadline(time.Now().Add(writeWait))\n\treturn conn.WriteMessage(mt, payload)\n}\n\n\/\/ writePump pumps messages from the sphere to the websocket connection.\nfunc (conn *Connection) writePump() error {\n\tticker := time.NewTicker(pingPeriod)\n\tdefer func() {\n\t\tticker.Stop()\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase msg, ok := <-conn.send:\n\t\t\tif !ok {\n\t\t\t\treturn conn.emit(websocket.CloseMessage, []byte{})\n\t\t\t}\n\t\t\tif err := conn.emit(websocket.TextMessage, msg); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase <-ticker.C:\n\t\t\tif err := conn.emit(websocket.PingMessage, []byte{}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (conn *Connection) readPump() {\n\tconn.SetReadLimit(maxMessageSize)\n\tconn.SetReadDeadline(time.Now().Add(pongWait))\n\tconn.SetPingHandler(func(string) error { conn.SetReadDeadline(time.Now().Add(pongWait)); return nil })\n\tfor {\n\t\t_, msg, err := conn.ReadMessage()\n\t\tif err != nil {\n\t\t}\n\t\tif msg != nil {\n\t\t}\n\t}\n}\n<commit_msg>connection improvments<commit_after>package sphere\n\nimport (\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/websocket\"\n)\n\n\/\/ NewConnection returns a new ws connection instance\nfunc NewConnection(w http.ResponseWriter, r *http.Request) (*Connection, error) {\n\tws, err := upgrader.Upgrade(w, r, nil)\n\tif err == nil {\n\t\tconn := &Connection{guid.String(), []*Channel{}, make(chan []byte), make(chan *Packet), r, ws}\n\t\tgo conn.queue()\n\t\treturn conn, nil\n\t}\n\treturn nil, err\n}\n\n\/\/ Connection allows you to interact with backend and other client sockets in realtime\ntype Connection struct {\n\t\/\/ the id of the connection\n\tid string\n\t\/\/ list of channels that this connection has been subscribed\n\tchannels []*Channel\n\t\/\/ buffered channel of outbound messages\n\tsend chan []byte\n\t\/\/ buffered channel of inbound messages\n\treceive chan *Packet\n\t\/\/ http request\n\trequest *http.Request\n\t\/\/ websocket connection\n\t*websocket.Conn\n}\n\nfunc (conn *Connection) queue() {\n\tfor {\n\t\tselect {\n\t\tcase <-conn.receive:\n\t\t}\n\t}\n}\n\n\/\/ write writes a message with the given message type and payload.\nfunc (conn *Connection) emit(mt int, payload []byte) error {\n\tconn.SetWriteDeadline(time.Now().Add(writeWait))\n\treturn conn.WriteMessage(mt, payload)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/dickeyxxx\/golock\"\n\t\"github.com\/heroku\/heroku-cli\/gode\"\n)\n\n\/\/ Plugin represents a javascript plugin\ntype Plugin struct {\n\tName string `json:\"name\"`\n\tVersion string `json:\"version\"`\n\tTopics TopicSet `json:\"topics\"`\n\tTopic *Topic `json:\"topic\"`\n\tCommands CommandSet `json:\"commands\"`\n}\n\n\/\/ SetupNode sets up node and npm in ~\/.heroku\nfunc SetupNode() {\n\tgode.SetRootPath(AppDir())\n\tsetup, err := gode.IsSetup()\n\tPrintError(err, false)\n\tif !setup {\n\t\tPrintError(gode.Setup(), true)\n\t}\n}\n\n\/\/ LoadPlugins loads the topics and commands from the JavaScript plugins into the CLI\nfunc (cli *Cli) LoadPlugins(plugins map[string]*Plugin) {\n\tfor _, plugin := range plugins {\n\t\tfor _, topic := range plugin.Topics {\n\t\t\tcli.AddTopic(topic)\n\t\t}\n\t\tif plugin.Topic != nil {\n\t\t\tcli.AddTopic(plugin.Topic)\n\t\t}\n\t\tfor _, command := range plugin.Commands {\n\t\t\tif !cli.AddCommand(command) {\n\t\t\t\tErrf(\"WARNING: command %s has already been defined\\n\", command)\n\t\t\t}\n\t\t}\n\t}\n\tsort.Sort(cli.Topics)\n\tsort.Sort(cli.Commands)\n}\n\nvar pluginsTopic = &Topic{\n\tName: \"plugins\",\n\tDescription: \"manage plugins\",\n}\n\nvar pluginsInstallCmd = &Command{\n\tTopic: \"plugins\",\n\tCommand: \"install\",\n\tHidden: true,\n\tArgs: []Arg{{Name: \"name\"}},\n\tDescription: \"Installs a plugin into the CLI\",\n\tHelp: `Install a Heroku plugin\n\n Example:\n $ heroku plugins:install dickeyxxx\/heroku-production-status`,\n\n\tRun: func(ctx *Context) {\n\t\tname := ctx.Args.(map[string]string)[\"name\"]\n\t\tif len(name) == 0 {\n\t\t\tErrln(\"Must specify a plugin name\")\n\t\t\treturn\n\t\t}\n\t\tErrf(\"Installing plugin %s...\", name)\n\t\tExitIfError(installPlugins(name), true)\n\t\tErrln(\" done\")\n\t},\n}\n\nvar pluginsLinkCmd = &Command{\n\tTopic: \"plugins\",\n\tCommand: \"link\",\n\tDescription: \"Links a local plugin into CLI\",\n\tArgs: []Arg{{Name: \"path\", Optional: true}},\n\tHelp: `Links a local plugin into CLI.\n\tThis is useful when developing plugins locally.\n\tIt simply symlinks the specified path into ~\/.heroku\/node_modules\n\n Example:\n\t$ heroku plugins:link .`,\n\n\tRun: func(ctx *Context) {\n\t\tpath := ctx.Args.(map[string]string)[\"path\"]\n\t\tif path == \"\" {\n\t\t\tpath = \".\"\n\t\t}\n\t\tpath, err := filepath.Abs(path)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif _, err := os.Stat(path); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tname := filepath.Base(path)\n\t\tnewPath := pluginPath(name)\n\t\tos.Remove(newPath)\n\t\tos.RemoveAll(newPath)\n\t\terr = os.Symlink(path, newPath)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tplugin, err := ParsePlugin(name)\n\t\tExitIfError(err, false)\n\t\tif name != plugin.Name {\n\t\t\tpath = newPath\n\t\t\tnewPath = pluginPath(plugin.Name)\n\t\t\tos.Remove(newPath)\n\t\t\tos.RemoveAll(newPath)\n\t\t\tos.Rename(path, newPath)\n\t\t}\n\t\tPrintln(\"Symlinked\", plugin.Name)\n\t\tAddPluginsToCache(plugin)\n\t},\n}\n\nvar pluginsUninstallCmd = &Command{\n\tTopic: \"plugins\",\n\tCommand: \"uninstall\",\n\tHidden: true,\n\tArgs: []Arg{{Name: \"name\"}},\n\tDescription: \"Uninstalls a plugin from the CLI\",\n\tHelp: `Uninstalls a Heroku plugin\n\n Example:\n $ heroku plugins:uninstall heroku-production-status`,\n\n\tRun: func(ctx *Context) {\n\t\tname := ctx.Args.(map[string]string)[\"name\"]\n\t\tif !contains(PluginNames(), name) {\n\t\t\tExitIfError(errors.New(name+\" is not installed\"), false)\n\t\t}\n\t\tErrf(\"Uninstalling plugin %s...\", name)\n\t\tExitIfError(gode.RemovePackages(name), true)\n\t\tRemovePluginFromCache(name)\n\t\tErrln(\" done\")\n\t},\n}\n\nvar pluginsListCmd = &Command{\n\tTopic: \"plugins\",\n\tHidden: true,\n\tDescription: \"Lists installed plugins\",\n\tHelp: `\nExample:\n $ heroku plugins`,\n\n\tRun: func(ctx *Context) {\n\t\tSetupBuiltinPlugins()\n\t\tvar plugins []string\n\t\tfor _, plugin := range GetPlugins() {\n\t\t\tif plugin != nil && len(plugin.Commands) > 0 {\n\t\t\t\tsymlinked := \"\"\n\t\t\t\tif isPluginSymlinked(plugin.Name) {\n\t\t\t\t\tsymlinked = \" (symlinked)\"\n\t\t\t\t}\n\t\t\t\tplugins = append(plugins, fmt.Sprintf(\"%s %s %s\", plugin.Name, plugin.Version, symlinked))\n\t\t\t}\n\t\t}\n\t\tsort.Strings(plugins)\n\t\tfor _, plugin := range plugins {\n\t\t\tPrintln(plugin)\n\t\t}\n\t},\n}\n\nfunc runFn(plugin *Plugin, topic, command string) func(ctx *Context) {\n\treturn func(ctx *Context) {\n\t\treadLockPlugin(plugin.Name)\n\t\tctx.Dev = isPluginSymlinked(plugin.Name)\n\t\tctxJSON, err := json.Marshal(ctx)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\ttitle, _ := json.Marshal(processTitle(ctx))\n\t\tscript := fmt.Sprintf(`\n\t\t'use strict';\n\t\tvar moduleName = '%s';\n\t\tvar moduleVersion = '%s';\n\t\tvar topic = '%s';\n\t\tvar command = '%s';\n\t\tprocess.title = %s;\n\t\tvar ctx = %s;\n\t\tctx.version = ctx.version + ' ' + moduleName + '\/' + moduleVersion + ' node-' + process.version;\n\t\tvar logPath = %s;\n\t\tprocess.chdir(ctx.cwd);\n\t\tif (!ctx.dev) {\n\t\t\tprocess.on('uncaughtException', function (err) {\n\t\t\t\t\/\/ ignore EPIPE errors (usually from piping to head)\n\t\t\t\tif (err.code === \"EPIPE\") return;\n\t\t\t\tconsole.error(' ! Error in ' + moduleName + ':')\n\t\t\t\tconsole.error(' ! ' + err.message || err);\n\t\t\t\tif (err.stack) {\n\t\t\t\t\tvar fs = require('fs');\n\t\t\t\t\tvar log = function (line) {\n\t\t\t\t\t\tvar d = new Date().toISOString()\n\t\t\t\t\t\t.replace(\/T\/, ' ')\n\t\t\t\t\t\t.replace(\/-\/g, '\/')\n\t\t\t\t\t\t.replace(\/\\..+\/, '');\n\t\t\t\t\t\tfs.appendFileSync(logPath, d + ' ' + line + '\\n');\n\t\t\t\t\t}\n\t\t\t\t\tlog('Error during ' + topic + ':' + command);\n\t\t\t\t\tlog(err.stack);\n\t\t\t\t\tconsole.error(' ! See ' + logPath + ' for more info.');\n\t\t\t\t}\n\t\t\t\tprocess.exit(1);\n\t\t\t});\n\t\t}\n\t\tif (command === '') { command = null }\n\t\tvar module = require(moduleName);\n\t\tvar cmd = module.commands.filter(function (c) {\n\t\t\treturn c.topic === topic && c.command == command;\n\t\t})[0];\n\t\tcmd.run(ctx);`, plugin.Name, plugin.Version, topic, command, string(title), ctxJSON, strconv.Quote(ErrLogPath))\n\n\t\t\/\/ swallow sigint since the plugin will handle it\n\t\tswallowSignal(os.Interrupt)\n\n\t\tcmd := gode.RunScript(script)\n\t\tcmd.Stdin = os.Stdin\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\t\tif ctx.Flags[\"debugger\"] == true {\n\t\t\tcmd = gode.DebugScript(script)\n\t\t}\n\t\tif err := cmd.Run(); err != nil {\n\t\t\tos.Exit(getExitCode(err))\n\t\t}\n\t}\n}\n\nfunc swallowSignal(s os.Signal) {\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, s)\n\tgo func() {\n\t\t<-c\n\t}()\n}\n\nfunc getExitCode(err error) int {\n\tswitch e := err.(type) {\n\tcase nil:\n\t\treturn 0\n\tcase *exec.ExitError:\n\t\tstatus, ok := e.Sys().(syscall.WaitStatus)\n\t\tif !ok {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn status.ExitStatus()\n\tdefault:\n\t\tpanic(err)\n\t}\n}\n\n\/\/ ParsePlugin requires the plugin's node module\n\/\/ to get the commands and metadata\nfunc ParsePlugin(name string) (*Plugin, error) {\n\tscript := `\n\tvar plugin = require('` + name + `');\n\tif (!plugin.commands) throw new Error('Contains no commands. Is this a real plugin?');\n\tvar pjson = require('` + name + `\/package.json');\n\n\tplugin.name = pjson.name;\n\tplugin.version = pjson.version;\n\n\tconsole.log(JSON.stringify(plugin))`\n\tcmd := gode.RunScript(script)\n\tcmd.Stderr = Stderr\n\toutput, err := cmd.Output()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error reading plugin: %s\\n%s\", name, err)\n\t}\n\tvar plugin Plugin\n\terr = json.Unmarshal([]byte(output), &plugin)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error parsing plugin: %s\\n%s\\n%s\", name, err, string(output))\n\t}\n\tfor _, command := range plugin.Commands {\n\t\tif command == nil {\n\t\t\tcontinue\n\t\t}\n\t\tcommand.Plugin = plugin.Name\n\t\tcommand.Help = strings.TrimSpace(command.Help)\n\t}\n\treturn &plugin, nil\n}\n\n\/\/ GetPlugins goes through all the node plugins and returns them in Go stucts\nfunc GetPlugins() map[string]*Plugin {\n\tplugins := FetchPluginCache()\n\tfor name, plugin := range plugins {\n\t\tif plugin == nil || !pluginExists(name) {\n\t\t\tdelete(plugins, name)\n\t\t} else {\n\t\t\tfor _, command := range plugin.Commands {\n\t\t\t\tcommand.Run = runFn(plugin, command.Topic, command.Command)\n\t\t\t}\n\t\t}\n\t}\n\treturn plugins\n}\n\n\/\/ PluginNames lists all the plugin names\nfunc PluginNames() []string {\n\tplugins := FetchPluginCache()\n\tnames := make([]string, 0, len(plugins))\n\tfor _, plugin := range plugins {\n\t\tif plugin != nil && pluginExists(plugin.Name) && len(plugin.Commands) > 0 {\n\t\t\tnames = append(names, plugin.Name)\n\t\t}\n\t}\n\treturn names\n}\n\n\/\/ PluginNamesNotSymlinked returns all the plugins that are not symlinked\nfunc PluginNamesNotSymlinked() []string {\n\ta := PluginNames()\n\tb := make([]string, 0, len(a))\n\tfor _, plugin := range a {\n\t\tif !isPluginSymlinked(plugin) {\n\t\t\tb = append(b, plugin)\n\t\t}\n\t}\n\treturn b\n}\n\nfunc isPluginSymlinked(plugin string) bool {\n\tpath := filepath.Join(AppDir(), \"node_modules\", plugin)\n\tfi, err := os.Lstat(path)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn fi.Mode()&os.ModeSymlink != 0\n}\n\n\/\/ SetupBuiltinPlugins ensures all the builtinPlugins are installed\nfunc SetupBuiltinPlugins() {\n\tpluginNames := difference(BuiltinPlugins, PluginNames())\n\tif len(pluginNames) == 0 {\n\t\treturn\n\t}\n\tErr(\"heroku-cli: Installing core plugins...\")\n\tif err := installPlugins(pluginNames...); err != nil {\n\t\t\/\/ retry once\n\t\tPrintError(gode.RemovePackages(pluginNames...), true)\n\t\tPrintError(gode.ClearCache(), true)\n\t\tErr(\"\\rheroku-cli: Installing core plugins (retrying)...\")\n\t\tExitIfError(installPlugins(pluginNames...), true)\n\t}\n\tErrln(\" done\")\n}\n\nfunc difference(a, b []string) []string {\n\tres := make([]string, 0, len(a))\n\tfor _, aa := range a {\n\t\tif !contains(b, aa) {\n\t\t\tres = append(res, aa)\n\t\t}\n\t}\n\treturn res\n}\n\nfunc contains(arr []string, s string) bool {\n\tfor _, a := range arr {\n\t\tif a == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc installPlugins(names ...string) error {\n\tfor _, name := range names {\n\t\tlockPlugin(name)\n\t}\n\tdefer func() {\n\t\tfor _, name := range names {\n\t\t\tunlockPlugin(name)\n\t\t}\n\t}()\n\terr := gode.InstallPackages(names...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tplugins := make([]*Plugin, 0, len(names))\n\tfor _, name := range names {\n\t\tplugin, err := ParsePlugin(name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tplugins = append(plugins, plugin)\n\t}\n\tAddPluginsToCache(plugins...)\n\treturn nil\n}\n\nfunc pluginExists(plugin string) bool {\n\texists, _ := fileExists(pluginPath(plugin))\n\treturn exists\n}\n\n\/\/ directory location of plugin\nfunc pluginPath(plugin string) string {\n\treturn filepath.Join(AppDir(), \"node_modules\", plugin)\n}\n\n\/\/ lock a plugin for reading\nfunc readLockPlugin(name string) {\n\tlockfile := updateLockPath + \".\" + name\n\tlocked, err := golock.IsLocked(lockfile)\n\tLogIfError(err)\n\tif locked {\n\t\tlockPlugin(name)\n\t\tunlockPlugin(name)\n\t}\n}\n\n\/\/ lock a plugin for writing\nfunc lockPlugin(name string) {\n\tLogIfError(golock.Lock(updateLockPath + \".\" + name))\n}\n\n\/\/ unlock a plugin\nfunc unlockPlugin(name string) {\n\tLogIfError(golock.Unlock(updateLockPath + \".\" + name))\n}\n<commit_msg>panic if gode fails to setup<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/dickeyxxx\/golock\"\n\t\"github.com\/heroku\/heroku-cli\/gode\"\n)\n\n\/\/ Plugin represents a javascript plugin\ntype Plugin struct {\n\tName string `json:\"name\"`\n\tVersion string `json:\"version\"`\n\tTopics TopicSet `json:\"topics\"`\n\tTopic *Topic `json:\"topic\"`\n\tCommands CommandSet `json:\"commands\"`\n}\n\n\/\/ SetupNode sets up node and npm in ~\/.heroku\nfunc SetupNode() {\n\tgode.SetRootPath(AppDir())\n\tsetup, err := gode.IsSetup()\n\tPrintError(err, false)\n\tif !setup {\n\t\tpanic(gode.Setup())\n\t}\n}\n\n\/\/ LoadPlugins loads the topics and commands from the JavaScript plugins into the CLI\nfunc (cli *Cli) LoadPlugins(plugins map[string]*Plugin) {\n\tfor _, plugin := range plugins {\n\t\tfor _, topic := range plugin.Topics {\n\t\t\tcli.AddTopic(topic)\n\t\t}\n\t\tif plugin.Topic != nil {\n\t\t\tcli.AddTopic(plugin.Topic)\n\t\t}\n\t\tfor _, command := range plugin.Commands {\n\t\t\tif !cli.AddCommand(command) {\n\t\t\t\tErrf(\"WARNING: command %s has already been defined\\n\", command)\n\t\t\t}\n\t\t}\n\t}\n\tsort.Sort(cli.Topics)\n\tsort.Sort(cli.Commands)\n}\n\nvar pluginsTopic = &Topic{\n\tName: \"plugins\",\n\tDescription: \"manage plugins\",\n}\n\nvar pluginsInstallCmd = &Command{\n\tTopic: \"plugins\",\n\tCommand: \"install\",\n\tHidden: true,\n\tArgs: []Arg{{Name: \"name\"}},\n\tDescription: \"Installs a plugin into the CLI\",\n\tHelp: `Install a Heroku plugin\n\n Example:\n $ heroku plugins:install dickeyxxx\/heroku-production-status`,\n\n\tRun: func(ctx *Context) {\n\t\tname := ctx.Args.(map[string]string)[\"name\"]\n\t\tif len(name) == 0 {\n\t\t\tErrln(\"Must specify a plugin name\")\n\t\t\treturn\n\t\t}\n\t\tErrf(\"Installing plugin %s...\", name)\n\t\tExitIfError(installPlugins(name), true)\n\t\tErrln(\" done\")\n\t},\n}\n\nvar pluginsLinkCmd = &Command{\n\tTopic: \"plugins\",\n\tCommand: \"link\",\n\tDescription: \"Links a local plugin into CLI\",\n\tArgs: []Arg{{Name: \"path\", Optional: true}},\n\tHelp: `Links a local plugin into CLI.\n\tThis is useful when developing plugins locally.\n\tIt simply symlinks the specified path into ~\/.heroku\/node_modules\n\n Example:\n\t$ heroku plugins:link .`,\n\n\tRun: func(ctx *Context) {\n\t\tpath := ctx.Args.(map[string]string)[\"path\"]\n\t\tif path == \"\" {\n\t\t\tpath = \".\"\n\t\t}\n\t\tpath, err := filepath.Abs(path)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif _, err := os.Stat(path); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tname := filepath.Base(path)\n\t\tnewPath := pluginPath(name)\n\t\tos.Remove(newPath)\n\t\tos.RemoveAll(newPath)\n\t\terr = os.Symlink(path, newPath)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tplugin, err := ParsePlugin(name)\n\t\tExitIfError(err, false)\n\t\tif name != plugin.Name {\n\t\t\tpath = newPath\n\t\t\tnewPath = pluginPath(plugin.Name)\n\t\t\tos.Remove(newPath)\n\t\t\tos.RemoveAll(newPath)\n\t\t\tos.Rename(path, newPath)\n\t\t}\n\t\tPrintln(\"Symlinked\", plugin.Name)\n\t\tAddPluginsToCache(plugin)\n\t},\n}\n\nvar pluginsUninstallCmd = &Command{\n\tTopic: \"plugins\",\n\tCommand: \"uninstall\",\n\tHidden: true,\n\tArgs: []Arg{{Name: \"name\"}},\n\tDescription: \"Uninstalls a plugin from the CLI\",\n\tHelp: `Uninstalls a Heroku plugin\n\n Example:\n $ heroku plugins:uninstall heroku-production-status`,\n\n\tRun: func(ctx *Context) {\n\t\tname := ctx.Args.(map[string]string)[\"name\"]\n\t\tif !contains(PluginNames(), name) {\n\t\t\tExitIfError(errors.New(name+\" is not installed\"), false)\n\t\t}\n\t\tErrf(\"Uninstalling plugin %s...\", name)\n\t\tExitIfError(gode.RemovePackages(name), true)\n\t\tRemovePluginFromCache(name)\n\t\tErrln(\" done\")\n\t},\n}\n\nvar pluginsListCmd = &Command{\n\tTopic: \"plugins\",\n\tHidden: true,\n\tDescription: \"Lists installed plugins\",\n\tHelp: `\nExample:\n $ heroku plugins`,\n\n\tRun: func(ctx *Context) {\n\t\tSetupBuiltinPlugins()\n\t\tvar plugins []string\n\t\tfor _, plugin := range GetPlugins() {\n\t\t\tif plugin != nil && len(plugin.Commands) > 0 {\n\t\t\t\tsymlinked := \"\"\n\t\t\t\tif isPluginSymlinked(plugin.Name) {\n\t\t\t\t\tsymlinked = \" (symlinked)\"\n\t\t\t\t}\n\t\t\t\tplugins = append(plugins, fmt.Sprintf(\"%s %s %s\", plugin.Name, plugin.Version, symlinked))\n\t\t\t}\n\t\t}\n\t\tsort.Strings(plugins)\n\t\tfor _, plugin := range plugins {\n\t\t\tPrintln(plugin)\n\t\t}\n\t},\n}\n\nfunc runFn(plugin *Plugin, topic, command string) func(ctx *Context) {\n\treturn func(ctx *Context) {\n\t\treadLockPlugin(plugin.Name)\n\t\tctx.Dev = isPluginSymlinked(plugin.Name)\n\t\tctxJSON, err := json.Marshal(ctx)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\ttitle, _ := json.Marshal(processTitle(ctx))\n\t\tscript := fmt.Sprintf(`\n\t\t'use strict';\n\t\tvar moduleName = '%s';\n\t\tvar moduleVersion = '%s';\n\t\tvar topic = '%s';\n\t\tvar command = '%s';\n\t\tprocess.title = %s;\n\t\tvar ctx = %s;\n\t\tctx.version = ctx.version + ' ' + moduleName + '\/' + moduleVersion + ' node-' + process.version;\n\t\tvar logPath = %s;\n\t\tprocess.chdir(ctx.cwd);\n\t\tif (!ctx.dev) {\n\t\t\tprocess.on('uncaughtException', function (err) {\n\t\t\t\t\/\/ ignore EPIPE errors (usually from piping to head)\n\t\t\t\tif (err.code === \"EPIPE\") return;\n\t\t\t\tconsole.error(' ! Error in ' + moduleName + ':')\n\t\t\t\tconsole.error(' ! ' + err.message || err);\n\t\t\t\tif (err.stack) {\n\t\t\t\t\tvar fs = require('fs');\n\t\t\t\t\tvar log = function (line) {\n\t\t\t\t\t\tvar d = new Date().toISOString()\n\t\t\t\t\t\t.replace(\/T\/, ' ')\n\t\t\t\t\t\t.replace(\/-\/g, '\/')\n\t\t\t\t\t\t.replace(\/\\..+\/, '');\n\t\t\t\t\t\tfs.appendFileSync(logPath, d + ' ' + line + '\\n');\n\t\t\t\t\t}\n\t\t\t\t\tlog('Error during ' + topic + ':' + command);\n\t\t\t\t\tlog(err.stack);\n\t\t\t\t\tconsole.error(' ! See ' + logPath + ' for more info.');\n\t\t\t\t}\n\t\t\t\tprocess.exit(1);\n\t\t\t});\n\t\t}\n\t\tif (command === '') { command = null }\n\t\tvar module = require(moduleName);\n\t\tvar cmd = module.commands.filter(function (c) {\n\t\t\treturn c.topic === topic && c.command == command;\n\t\t})[0];\n\t\tcmd.run(ctx);`, plugin.Name, plugin.Version, topic, command, string(title), ctxJSON, strconv.Quote(ErrLogPath))\n\n\t\t\/\/ swallow sigint since the plugin will handle it\n\t\tswallowSignal(os.Interrupt)\n\n\t\tcmd := gode.RunScript(script)\n\t\tcmd.Stdin = os.Stdin\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\t\tif ctx.Flags[\"debugger\"] == true {\n\t\t\tcmd = gode.DebugScript(script)\n\t\t}\n\t\tif err := cmd.Run(); err != nil {\n\t\t\tos.Exit(getExitCode(err))\n\t\t}\n\t}\n}\n\nfunc swallowSignal(s os.Signal) {\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, s)\n\tgo func() {\n\t\t<-c\n\t}()\n}\n\nfunc getExitCode(err error) int {\n\tswitch e := err.(type) {\n\tcase nil:\n\t\treturn 0\n\tcase *exec.ExitError:\n\t\tstatus, ok := e.Sys().(syscall.WaitStatus)\n\t\tif !ok {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn status.ExitStatus()\n\tdefault:\n\t\tpanic(err)\n\t}\n}\n\n\/\/ ParsePlugin requires the plugin's node module\n\/\/ to get the commands and metadata\nfunc ParsePlugin(name string) (*Plugin, error) {\n\tscript := `\n\tvar plugin = require('` + name + `');\n\tif (!plugin.commands) throw new Error('Contains no commands. Is this a real plugin?');\n\tvar pjson = require('` + name + `\/package.json');\n\n\tplugin.name = pjson.name;\n\tplugin.version = pjson.version;\n\n\tconsole.log(JSON.stringify(plugin))`\n\tcmd := gode.RunScript(script)\n\tcmd.Stderr = Stderr\n\toutput, err := cmd.Output()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error reading plugin: %s\\n%s\", name, err)\n\t}\n\tvar plugin Plugin\n\terr = json.Unmarshal([]byte(output), &plugin)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Error parsing plugin: %s\\n%s\\n%s\", name, err, string(output))\n\t}\n\tfor _, command := range plugin.Commands {\n\t\tif command == nil {\n\t\t\tcontinue\n\t\t}\n\t\tcommand.Plugin = plugin.Name\n\t\tcommand.Help = strings.TrimSpace(command.Help)\n\t}\n\treturn &plugin, nil\n}\n\n\/\/ GetPlugins goes through all the node plugins and returns them in Go stucts\nfunc GetPlugins() map[string]*Plugin {\n\tplugins := FetchPluginCache()\n\tfor name, plugin := range plugins {\n\t\tif plugin == nil || !pluginExists(name) {\n\t\t\tdelete(plugins, name)\n\t\t} else {\n\t\t\tfor _, command := range plugin.Commands {\n\t\t\t\tcommand.Run = runFn(plugin, command.Topic, command.Command)\n\t\t\t}\n\t\t}\n\t}\n\treturn plugins\n}\n\n\/\/ PluginNames lists all the plugin names\nfunc PluginNames() []string {\n\tplugins := FetchPluginCache()\n\tnames := make([]string, 0, len(plugins))\n\tfor _, plugin := range plugins {\n\t\tif plugin != nil && pluginExists(plugin.Name) && len(plugin.Commands) > 0 {\n\t\t\tnames = append(names, plugin.Name)\n\t\t}\n\t}\n\treturn names\n}\n\n\/\/ PluginNamesNotSymlinked returns all the plugins that are not symlinked\nfunc PluginNamesNotSymlinked() []string {\n\ta := PluginNames()\n\tb := make([]string, 0, len(a))\n\tfor _, plugin := range a {\n\t\tif !isPluginSymlinked(plugin) {\n\t\t\tb = append(b, plugin)\n\t\t}\n\t}\n\treturn b\n}\n\nfunc isPluginSymlinked(plugin string) bool {\n\tpath := filepath.Join(AppDir(), \"node_modules\", plugin)\n\tfi, err := os.Lstat(path)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn fi.Mode()&os.ModeSymlink != 0\n}\n\n\/\/ SetupBuiltinPlugins ensures all the builtinPlugins are installed\nfunc SetupBuiltinPlugins() {\n\tpluginNames := difference(BuiltinPlugins, PluginNames())\n\tif len(pluginNames) == 0 {\n\t\treturn\n\t}\n\tErr(\"heroku-cli: Installing core plugins...\")\n\tif err := installPlugins(pluginNames...); err != nil {\n\t\t\/\/ retry once\n\t\tPrintError(gode.RemovePackages(pluginNames...), true)\n\t\tPrintError(gode.ClearCache(), true)\n\t\tErr(\"\\rheroku-cli: Installing core plugins (retrying)...\")\n\t\tExitIfError(installPlugins(pluginNames...), true)\n\t}\n\tErrln(\" done\")\n}\n\nfunc difference(a, b []string) []string {\n\tres := make([]string, 0, len(a))\n\tfor _, aa := range a {\n\t\tif !contains(b, aa) {\n\t\t\tres = append(res, aa)\n\t\t}\n\t}\n\treturn res\n}\n\nfunc contains(arr []string, s string) bool {\n\tfor _, a := range arr {\n\t\tif a == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc installPlugins(names ...string) error {\n\tfor _, name := range names {\n\t\tlockPlugin(name)\n\t}\n\tdefer func() {\n\t\tfor _, name := range names {\n\t\t\tunlockPlugin(name)\n\t\t}\n\t}()\n\terr := gode.InstallPackages(names...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tplugins := make([]*Plugin, 0, len(names))\n\tfor _, name := range names {\n\t\tplugin, err := ParsePlugin(name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tplugins = append(plugins, plugin)\n\t}\n\tAddPluginsToCache(plugins...)\n\treturn nil\n}\n\nfunc pluginExists(plugin string) bool {\n\texists, _ := fileExists(pluginPath(plugin))\n\treturn exists\n}\n\n\/\/ directory location of plugin\nfunc pluginPath(plugin string) string {\n\treturn filepath.Join(AppDir(), \"node_modules\", plugin)\n}\n\n\/\/ lock a plugin for reading\nfunc readLockPlugin(name string) {\n\tlockfile := updateLockPath + \".\" + name\n\tlocked, err := golock.IsLocked(lockfile)\n\tLogIfError(err)\n\tif locked {\n\t\tlockPlugin(name)\n\t\tunlockPlugin(name)\n\t}\n}\n\n\/\/ lock a plugin for writing\nfunc lockPlugin(name string) {\n\tLogIfError(golock.Lock(updateLockPath + \".\" + name))\n}\n\n\/\/ unlock a plugin\nfunc unlockPlugin(name string) {\n\tLogIfError(golock.Unlock(updateLockPath + \".\" + name))\n}\n<|endoftext|>"} {"text":"<commit_before>package gautomator\n\nimport (\n\t\"fmt\"\n\t\"github.com\/gonum\/matrix\/mat64\" \/\/ Matrix\n\t\"io\"\n\t\"time\"\n)\n\n\/\/ A task is an action executed by a module\ntype Task struct {\n\tId int `json:\"id\"`\n\tOrigin string `json:\"origin\"`\n\tName string `json:\"name\"` \/\/the task name\n\tNode string `json:\"node\"` \/\/ The node name\n\tModule string `json:\"module\"`\n\tArgs []string `json:\"args\"`\n\tStatus int `json:\"status\"` \/\/-2: queued\n\t\/\/ -1: running\n\t\/\/ >=0 : return code\n\tStartTime time.Time `json:\"startTime\"`\n\tEndTime time.Time `json:\"endTime\"`\n\tTaskCanRunChan chan bool \/\/ true: run, false: wait\n}\n\ntype jsonStructure struct {\n\tNodes []*Task `json:\"task\"`\n}\n\n\/\/ This is the structure corresponding to the \"dot-graph\" of a task list\n\/\/ We store the nodes in a map\n\/\/ The index is the source node\ntype TaskGraphStructure struct {\n\tTasks map[int]*Task\n\tDegreeMatrix *mat64.Dense\n\tAdjacencyMatrix *mat64.Dense \/\/ Row id is the map id of the source task\n\t\/\/ Col id is the map id of the destination task\n}\n\nfunc (this *TaskGraphStructure) PrintAdjacencyMatrix() {\n\trowSize, colSize := this.AdjacencyMatrix.Dims()\n\tfmt.Printf(\" \")\n\tfor c := 0; c < colSize; c++ {\n\t\tfmt.Printf(\"%v \", this.Tasks[c].Name)\n\t}\n\tfmt.Printf(\"\\n\")\n\tfor r := 0; r < rowSize; r++ {\n\t\tfmt.Printf(\"%v \", this.Tasks[r].Name)\n\t\tfor c := 0; c < colSize; c++ {\n\t\t\tfmt.Printf(\"%v \", this.AdjacencyMatrix.At(r, c))\n\t\t}\n\t\tfmt.Printf(\"\\n\")\n\t}\n}\n\nfunc (this *TaskGraphStructure) PrintDegreeMatrix() {\n\trowSize, colSize := this.DegreeMatrix.Dims()\n\tfor r := 0; r < rowSize; r++ {\n\t\tfor c := 0; c < colSize; c++ {\n\t\t\tfmt.Printf(\"%v \", this.DegreeMatrix.At(r, c))\n\t\t}\n\t\tfmt.Printf(\"\\n\")\n\t}\n}\n\nfunc NewTask() *Task {\n\treturn &Task{\n\t\t-1,\n\t\t\"null\",\n\t\t\"null\",\n\t\t\"null\",\n\t\t\"dummy\",\n\t\tmake([]string, 1),\n\t\t-2,\n\t\ttime.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),\n\t\ttime.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),\n\t\tmake(chan bool),\n\t}\n\n}\nfunc NewTaskGraphStructure() *TaskGraphStructure {\n\treturn &TaskGraphStructure{\n\t\tmake(map[int]*Task, 0),\n\t\tmat64.NewDense(0, 0, nil),\n\t\tmat64.NewDense(0, 0, nil),\n\t}\n}\n\n\/\/ Returns a combination of the current structure\n\/\/ and the one passed as argument\nfunc (this *TaskGraphStructure) AugmentTaskStructure(taskStructure *TaskGraphStructure) *TaskGraphStructure {\n\t\/\/ merging adjacency matrix\n\tinitialRowLen, initialColLen := this.AdjacencyMatrix.Dims()\n\taddedRowLen, addedColLen := taskStructure.AdjacencyMatrix.Dims()\n\tthis.AdjacencyMatrix = mat64.DenseCopyOf(this.AdjacencyMatrix.Grow(addedRowLen, addedColLen))\n\t\/\/a, b := this.AdjacencyMatrix.Dims()\n\tfor r := 0; r < initialRowLen+addedRowLen; r++ {\n\t\tfor c := 0; c < initialColLen+addedColLen; c++ {\n\t\t\tswitch {\n\t\t\tcase r < initialRowLen && c < initialColLen:\n\t\t\t\t\/\/ If we are in the original matrix: do nothing\n\t\t\tcase r < initialRowLen && c > initialColLen:\n\t\t\t\t\/\/ If outside, put some zero\n\t\t\t\tthis.AdjacencyMatrix.Set(r, c, float64(0))\n\t\t\tcase r > initialRowLen && c < initialColLen:\n\t\t\t\t\/\/ If outside, put some zero\n\t\t\t\tthis.AdjacencyMatrix.Set(r, c, float64(0))\n\t\t\tcase r >= initialRowLen && c >= initialColLen:\n\t\t\t\t\/\/ Add the new matrix\n\t\t\t\tthis.AdjacencyMatrix.Set(r, c, taskStructure.AdjacencyMatrix.At(r-initialRowLen, c-initialColLen))\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ merging degree matrix\n\tinitialRowLen, initialColLen = this.DegreeMatrix.Dims()\n\taddedRowLen, addedColLen = taskStructure.DegreeMatrix.Dims()\n\tthis.DegreeMatrix = mat64.DenseCopyOf(this.DegreeMatrix.Grow(addedRowLen, addedColLen))\n\tfor r := 0; r < initialRowLen+addedRowLen; r++ {\n\t\tfor c := 0; c < initialColLen+addedColLen; c++ {\n\t\t\tswitch {\n\t\t\tcase r < initialRowLen && c < initialColLen:\n\t\t\t\t\/\/ If we are in the original matrix: do nothing\n\t\t\tcase r < initialRowLen && c > initialColLen:\n\t\t\t\t\/\/ If outside, set zero\n\t\t\t\tthis.DegreeMatrix.Set(r, c, float64(0))\n\t\t\tcase r > initialRowLen && c < initialColLen:\n\t\t\t\t\/\/ If outside, set zero\n\t\t\t\tthis.DegreeMatrix.Set(r, c, float64(0))\n\t\t\tcase r >= initialRowLen && c >= initialColLen:\n\t\t\t\t\/\/ Add the new matrix\n\t\t\t\tthis.DegreeMatrix.Set(r, c, taskStructure.DegreeMatrix.At(r-initialRowLen, c-initialColLen))\n\t\t\t}\n\t\t}\n\t}\n\tactualSize := len(this.Tasks)\n\tfor i, task := range taskStructure.Tasks {\n\t\ttask.Id = actualSize + i\n\t\tthis.Tasks[actualSize+i] = task\n\t}\n\treturn this\n}\n\nfunc (this *TaskGraphStructure) getTaskFromName(name string) (int, *Task) {\n\tfor index, task := range this.Tasks {\n\t\tif task.Name == name {\n\t\t\treturn index, task\n\t\t}\n\t}\n\treturn -1, nil\n}\n\nfunc colSum(matrix *mat64.Dense, colId int) float64 {\n\trow, _ := matrix.Dims()\n\tsum := float64(0)\n\tfor r := 0; r < row; r++ {\n\t\tsum += matrix.At(r, colId)\n\t}\n\treturn sum\n}\n\nfunc rowSum(matrix *mat64.Dense, rowId int) float64 {\n\t_, col := matrix.Dims()\n\tsum := float64(0)\n\tfor c := 0; c < col; c++ {\n\t\tsum += matrix.At(rowId, c)\n\t}\n\treturn sum\n}\n\n\/\/ the aim of this function is to find if a task has a subdefinition (aka an origin) and change it\n\/\/ Example:\n\/\/ imagine the graphs\n\/\/ digraph bla {\n\/\/ a -> b;\n\/\/ b -> c;\n\/\/ }\n\/\/ digraph b {\n\/\/ alpha -> gamma;\n\/\/ }\n\/\/ then alpha and beta will have \"b\" as Origin.\n\/\/ therefore we should add a link in the AdjacencyMatix and in the DegreeMatrix\nfunc (this *TaskGraphStructure) Relink() *TaskGraphStructure {\n\t\/\/ IN this array we store the row,col on which we set 1\n\tbackup := make([]int, 0)\n\t_, col := this.AdjacencyMatrix.Dims()\n\tfor _, task := range this.Tasks {\n\t\tif colSum(this.AdjacencyMatrix, task.Id) == 0 {\n\t\t\tid, _ := this.getTaskFromName(task.Origin)\n\t\t\tif id != -1 {\n\t\t\t\t\/\/ Task is a meta task\n\t\t\t\tthis.Tasks[id].Module = \"meta\"\n\t\t\t\tthis.AdjacencyMatrix.Set(id, task.Id, float64(1))\n\t\t\t\tbackup = append(backup, id, task.Id)\n\t\t\t}\n\t\t}\n\t\tif rowSum(this.AdjacencyMatrix, task.Id) == 0 {\n\t\t\tid, _ := this.getTaskFromName(task.Origin)\n\t\t\tif id != -1 {\n\t\t\t\tfor c := 0; c < col; c++ {\n\t\t\t\t\tadd := true\n\t\t\t\t\tfor counter := 0; counter < len(backup)-1; counter += 2 {\n\t\t\t\t\t\tif backup[counter] == id && backup[counter+1] == c {\n\t\t\t\t\t\t\tadd = false\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif add == true {\n\t\t\t\t\t\tthis.AdjacencyMatrix.Set(task.Id, c, this.AdjacencyMatrix.At(task.Id, c)+this.AdjacencyMatrix.At(id, c))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\/\/TODO: complete the degreematrix\n\treturn this\n}\n\n\/\/ Duplicate the task \"id\"\n\/\/ Returns the id of the new task and the whole structure\nfunc (this *TaskGraphStructure) DuplicateTask(id int) (int, *TaskGraphStructure) {\n\trow, _ := this.AdjacencyMatrix.Dims()\n\t\/\/ Add the task to the list\n\torigin := this.Tasks[id]\n\tnewId := row + 1\n\tnewTask := origin\n\tnewTask.Id = newId\n\tthis.Tasks[newId] = newTask\n\t\/\/ Adjust the AdjacencyMatrix\n\tthis.AdjacencyMatrix = mat64.DenseCopyOf(this.AdjacencyMatrix.Grow(1, 1))\n\t\/\/ Copy the row 'id' to row 'newId'\n\tfor r := 0; r < newId; r++ {\n\t\tthis.AdjacencyMatrix.Set(r, newId, this.AdjacencyMatrix.At(r, id))\n\t}\n\t\/\/ Copy the col 'id' to col 'newId'\n\tfor c := 0; c < newId; c++ {\n\t\tthis.AdjacencyMatrix.Set(newId, c, this.AdjacencyMatrix.At(id, c))\n\t}\n\treturn newId, this\n}\n\n\/\/ This function print the dot file associated with the graph\nfunc (this *TaskGraphStructure) PrintDot(w io.Writer) {\n\tfmt.Fprintln(w, \"digraph G {\")\n\t\/\/ Writing node definition\n\tfor _, task := range this.Tasks {\n\t\tfmt.Fprintf(w, \"\\t\\\"%v\\\" [\\n\", task.Id)\n\t\tfmt.Fprintf(w, \"\\t\\tid = \\\"%v\\\"\\n\", task.Id)\n\t\tif task.Module == \"meta\" {\n\t\t\tfmt.Fprintln(w, \"\\t\\tshape=diamond\")\n\t\t\tfmt.Fprintf(w, \"\\t\\tlabel=\\\"%v\\\"\", task.Name)\n\t\t} else {\n\t\t\tfmt.Fprintf(w, \"\\t\\tlabel = \\\"<name>%v|<node>%v|<module>%v\\\"\\n\", task.Name, task.Node, task.Module)\n\t\t\tfmt.Fprintf(w, \"\\t\\tshape = \\\"record\\\"\\n\")\n\t\t}\n\t\tfmt.Fprintf(w, \"\\t];\\n\")\n\t}\n\trow, col := this.AdjacencyMatrix.Dims()\n\tfor r := 0; r < row; r++ {\n\t\tfor c := 0; c < col; c++ {\n\t\t\tif this.AdjacencyMatrix.At(r, c) == 1 {\n\t\t\t\tfmt.Fprintf(w, \"\\t%v -> %v\\n\", this.Tasks[r].Id, this.Tasks[c].Id)\n\t\t\t}\n\t\t}\n\t}\n\tfmt.Fprintln(w, \"}\")\n}\n\n\/\/ Return a structure of all the task with the given origin\nfunc (this *TaskGraphStructure) GetSubstructure(origin string) *TaskGraphStructure {\n\tsubTaskStructure := NewTaskGraphStructure()\n\tindex := 0\n\ttasksToExtract := make(map[int]*Task, 0)\n\tfor _, task := range this.Tasks {\n\t\tif task.Origin == origin {\n\t\t\t\/\/fmt.Printf(\"Adding %v(%v) at index:%v\\n\", task.Name, task.Id, index)\n\t\t\ttasksToExtract[index] = task\n\t\t\tindex += 1\n\t\t}\n\t}\n\t\/\/ Create the matrix of the correct size\n\tsize := len(tasksToExtract)\n\tif size > 0 {\n\t\tsubTaskStructure.AdjacencyMatrix = mat64.NewDense(size, size, nil)\n\t\tsubTaskStructure.DegreeMatrix = mat64.NewDense(size, size, nil)\n\t\tfor i := 0; i < size; i++ {\n\t\t\ttask := tasksToExtract[i]\n\t\t\t\/\/fmt.Printf(\"Task with ID:%v and name:%v will have id:%v\\n\", task.Id, task.Name, i)\n\t\t\t\/\/ BUG here probably\n\t\t\t\/\/ Construct the AdjacencyMatrix line by line\n\t\t\tfor col := 0; col < size; col++ {\n\t\t\t\ttask2 := tasksToExtract[col]\n\t\t\t\t\/\/fmt.Printf(\"Setting %v,%v with value from %v,%v\\n\", i, col, task.Id, task2.Id)\n\t\t\t\tsubTaskStructure.AdjacencyMatrix.Set(i, col, this.AdjacencyMatrix.At(task.Id, task2.Id))\n\t\t\t}\n\t\t\tsubTaskStructure.DegreeMatrix.Set(i, i, this.DegreeMatrix.At(task.Id, task.Id))\n\t\t\tsubTaskStructure.Tasks[i] = NewTask()\n\t\t\tsubTaskStructure.Tasks[i].Name = task.Name\n\t\t\tsubTaskStructure.Tasks[i].Module = task.Module\n\t\t\tsubTaskStructure.Tasks[i].Args = task.Args\n\t\t\tsubTaskStructure.Tasks[i].Origin = task.Origin\n\t\t\tsubTaskStructure.Tasks[i].Id = i\n\t\t}\n\t\t\/\/subTaskStructure.PrintAdjacencyMatrix()\n\t\treturn subTaskStructure\n\t} else {\n\t\treturn nil\n\t}\n}\n<commit_msg>Cleanup<commit_after>package gautomator\n\nimport (\n\t\"fmt\"\n\t\"github.com\/gonum\/matrix\/mat64\" \/\/ Matrix\n\t\"io\"\n\t\"time\"\n)\n\n\/\/ A task is an action executed by a module\ntype Task struct {\n\tId int `json:\"id\"`\n\tOrigin string `json:\"origin\"`\n\tName string `json:\"name\"` \/\/the task name\n\tNode string `json:\"node\"` \/\/ The node name\n\tModule string `json:\"module\"`\n\tArgs []string `json:\"args\"`\n\tStatus int `json:\"status\"` \/\/-2: queued\n\t\/\/ -1: running\n\t\/\/ >=0 : return code\n\tStartTime time.Time `json:\"startTime\"`\n\tEndTime time.Time `json:\"endTime\"`\n\tTaskCanRunChan chan bool \/\/ true: run, false: wait\n}\n\n\/\/ This is the structure corresponding to the \"dot-graph\" of a task list\n\/\/ We store the nodes in a map\n\/\/ The index is the source node\ntype TaskGraphStructure struct {\n\tTasks map[int]*Task\n\tDegreeMatrix *mat64.Dense\n\tAdjacencyMatrix *mat64.Dense \/\/ Row id is the map id of the source task\n\t\/\/ Col id is the map id of the destination task\n}\n\nfunc (this *TaskGraphStructure) PrintAdjacencyMatrix() {\n\trowSize, colSize := this.AdjacencyMatrix.Dims()\n\tfmt.Printf(\" \")\n\tfor c := 0; c < colSize; c++ {\n\t\tfmt.Printf(\"%v \", this.Tasks[c].Name)\n\t}\n\tfmt.Printf(\"\\n\")\n\tfor r := 0; r < rowSize; r++ {\n\t\tfmt.Printf(\"%v \", this.Tasks[r].Name)\n\t\tfor c := 0; c < colSize; c++ {\n\t\t\tfmt.Printf(\"%v \", this.AdjacencyMatrix.At(r, c))\n\t\t}\n\t\tfmt.Printf(\"\\n\")\n\t}\n}\n\nfunc (this *TaskGraphStructure) PrintDegreeMatrix() {\n\trowSize, colSize := this.DegreeMatrix.Dims()\n\tfor r := 0; r < rowSize; r++ {\n\t\tfor c := 0; c < colSize; c++ {\n\t\t\tfmt.Printf(\"%v \", this.DegreeMatrix.At(r, c))\n\t\t}\n\t\tfmt.Printf(\"\\n\")\n\t}\n}\n\nfunc NewTask() *Task {\n\treturn &Task{\n\t\t-1,\n\t\t\"null\",\n\t\t\"null\",\n\t\t\"null\",\n\t\t\"dummy\",\n\t\tmake([]string, 1),\n\t\t-2,\n\t\ttime.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),\n\t\ttime.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),\n\t\tmake(chan bool),\n\t}\n\n}\nfunc NewTaskGraphStructure() *TaskGraphStructure {\n\treturn &TaskGraphStructure{\n\t\tmake(map[int]*Task, 0),\n\t\tmat64.NewDense(0, 0, nil),\n\t\tmat64.NewDense(0, 0, nil),\n\t}\n}\n\n\/\/ Returns a combination of the current structure\n\/\/ and the one passed as argument\nfunc (this *TaskGraphStructure) AugmentTaskStructure(taskStructure *TaskGraphStructure) *TaskGraphStructure {\n\t\/\/ merging adjacency matrix\n\tinitialRowLen, initialColLen := this.AdjacencyMatrix.Dims()\n\taddedRowLen, addedColLen := taskStructure.AdjacencyMatrix.Dims()\n\tthis.AdjacencyMatrix = mat64.DenseCopyOf(this.AdjacencyMatrix.Grow(addedRowLen, addedColLen))\n\t\/\/a, b := this.AdjacencyMatrix.Dims()\n\tfor r := 0; r < initialRowLen+addedRowLen; r++ {\n\t\tfor c := 0; c < initialColLen+addedColLen; c++ {\n\t\t\tswitch {\n\t\t\tcase r < initialRowLen && c < initialColLen:\n\t\t\t\t\/\/ If we are in the original matrix: do nothing\n\t\t\tcase r < initialRowLen && c > initialColLen:\n\t\t\t\t\/\/ If outside, put some zero\n\t\t\t\tthis.AdjacencyMatrix.Set(r, c, float64(0))\n\t\t\tcase r > initialRowLen && c < initialColLen:\n\t\t\t\t\/\/ If outside, put some zero\n\t\t\t\tthis.AdjacencyMatrix.Set(r, c, float64(0))\n\t\t\tcase r >= initialRowLen && c >= initialColLen:\n\t\t\t\t\/\/ Add the new matrix\n\t\t\t\tthis.AdjacencyMatrix.Set(r, c, taskStructure.AdjacencyMatrix.At(r-initialRowLen, c-initialColLen))\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ merging degree matrix\n\tinitialRowLen, initialColLen = this.DegreeMatrix.Dims()\n\taddedRowLen, addedColLen = taskStructure.DegreeMatrix.Dims()\n\tthis.DegreeMatrix = mat64.DenseCopyOf(this.DegreeMatrix.Grow(addedRowLen, addedColLen))\n\tfor r := 0; r < initialRowLen+addedRowLen; r++ {\n\t\tfor c := 0; c < initialColLen+addedColLen; c++ {\n\t\t\tswitch {\n\t\t\tcase r < initialRowLen && c < initialColLen:\n\t\t\t\t\/\/ If we are in the original matrix: do nothing\n\t\t\tcase r < initialRowLen && c > initialColLen:\n\t\t\t\t\/\/ If outside, set zero\n\t\t\t\tthis.DegreeMatrix.Set(r, c, float64(0))\n\t\t\tcase r > initialRowLen && c < initialColLen:\n\t\t\t\t\/\/ If outside, set zero\n\t\t\t\tthis.DegreeMatrix.Set(r, c, float64(0))\n\t\t\tcase r >= initialRowLen && c >= initialColLen:\n\t\t\t\t\/\/ Add the new matrix\n\t\t\t\tthis.DegreeMatrix.Set(r, c, taskStructure.DegreeMatrix.At(r-initialRowLen, c-initialColLen))\n\t\t\t}\n\t\t}\n\t}\n\tactualSize := len(this.Tasks)\n\tfor i, task := range taskStructure.Tasks {\n\t\ttask.Id = actualSize + i\n\t\tthis.Tasks[actualSize+i] = task\n\t}\n\treturn this\n}\n\nfunc (this *TaskGraphStructure) getTaskFromName(name string) (int, *Task) {\n\tfor index, task := range this.Tasks {\n\t\tif task.Name == name {\n\t\t\treturn index, task\n\t\t}\n\t}\n\treturn -1, nil\n}\n\nfunc colSum(matrix *mat64.Dense, colId int) float64 {\n\trow, _ := matrix.Dims()\n\tsum := float64(0)\n\tfor r := 0; r < row; r++ {\n\t\tsum += matrix.At(r, colId)\n\t}\n\treturn sum\n}\n\nfunc rowSum(matrix *mat64.Dense, rowId int) float64 {\n\t_, col := matrix.Dims()\n\tsum := float64(0)\n\tfor c := 0; c < col; c++ {\n\t\tsum += matrix.At(rowId, c)\n\t}\n\treturn sum\n}\n\n\/\/ the aim of this function is to find if a task has a subdefinition (aka an origin) and change it\n\/\/ Example:\n\/\/ imagine the graphs\n\/\/ digraph bla {\n\/\/ a -> b;\n\/\/ b -> c;\n\/\/ }\n\/\/ digraph b {\n\/\/ alpha -> gamma;\n\/\/ }\n\/\/ then alpha and beta will have \"b\" as Origin.\n\/\/ therefore we should add a link in the AdjacencyMatix and in the DegreeMatrix\nfunc (this *TaskGraphStructure) Relink() *TaskGraphStructure {\n\t\/\/ IN this array we store the row,col on which we set 1\n\tbackup := make([]int, 0)\n\t_, col := this.AdjacencyMatrix.Dims()\n\tfor _, task := range this.Tasks {\n\t\tif colSum(this.AdjacencyMatrix, task.Id) == 0 {\n\t\t\tid, _ := this.getTaskFromName(task.Origin)\n\t\t\tif id != -1 {\n\t\t\t\t\/\/ Task is a meta task\n\t\t\t\tthis.Tasks[id].Module = \"meta\"\n\t\t\t\tthis.AdjacencyMatrix.Set(id, task.Id, float64(1))\n\t\t\t\tbackup = append(backup, id, task.Id)\n\t\t\t}\n\t\t}\n\t\tif rowSum(this.AdjacencyMatrix, task.Id) == 0 {\n\t\t\tid, _ := this.getTaskFromName(task.Origin)\n\t\t\tif id != -1 {\n\t\t\t\tfor c := 0; c < col; c++ {\n\t\t\t\t\tadd := true\n\t\t\t\t\tfor counter := 0; counter < len(backup)-1; counter += 2 {\n\t\t\t\t\t\tif backup[counter] == id && backup[counter+1] == c {\n\t\t\t\t\t\t\tadd = false\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif add == true {\n\t\t\t\t\t\tthis.AdjacencyMatrix.Set(task.Id, c, this.AdjacencyMatrix.At(task.Id, c)+this.AdjacencyMatrix.At(id, c))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\/\/TODO: complete the degreematrix\n\treturn this\n}\n\n\/\/ Duplicate the task \"id\"\n\/\/ Returns the id of the new task and the whole structure\nfunc (this *TaskGraphStructure) DuplicateTask(id int) (int, *TaskGraphStructure) {\n\trow, _ := this.AdjacencyMatrix.Dims()\n\t\/\/ Add the task to the list\n\torigin := this.Tasks[id]\n\tnewId := row + 1\n\tnewTask := origin\n\tnewTask.Id = newId\n\tthis.Tasks[newId] = newTask\n\t\/\/ Adjust the AdjacencyMatrix\n\tthis.AdjacencyMatrix = mat64.DenseCopyOf(this.AdjacencyMatrix.Grow(1, 1))\n\t\/\/ Copy the row 'id' to row 'newId'\n\tfor r := 0; r < newId; r++ {\n\t\tthis.AdjacencyMatrix.Set(r, newId, this.AdjacencyMatrix.At(r, id))\n\t}\n\t\/\/ Copy the col 'id' to col 'newId'\n\tfor c := 0; c < newId; c++ {\n\t\tthis.AdjacencyMatrix.Set(newId, c, this.AdjacencyMatrix.At(id, c))\n\t}\n\treturn newId, this\n}\n\n\/\/ This function print the dot file associated with the graph\nfunc (this *TaskGraphStructure) PrintDot(w io.Writer) {\n\tfmt.Fprintln(w, \"digraph G {\")\n\t\/\/ Writing node definition\n\tfor _, task := range this.Tasks {\n\t\tfmt.Fprintf(w, \"\\t\\\"%v\\\" [\\n\", task.Id)\n\t\tfmt.Fprintf(w, \"\\t\\tid = \\\"%v\\\"\\n\", task.Id)\n\t\tif task.Module == \"meta\" {\n\t\t\tfmt.Fprintln(w, \"\\t\\tshape=diamond\")\n\t\t\tfmt.Fprintf(w, \"\\t\\tlabel=\\\"%v\\\"\", task.Name)\n\t\t} else {\n\t\t\tfmt.Fprintf(w, \"\\t\\tlabel = \\\"<name>%v|<node>%v|<module>%v\\\"\\n\", task.Name, task.Node, task.Module)\n\t\t\tfmt.Fprintf(w, \"\\t\\tshape = \\\"record\\\"\\n\")\n\t\t}\n\t\tfmt.Fprintf(w, \"\\t];\\n\")\n\t}\n\trow, col := this.AdjacencyMatrix.Dims()\n\tfor r := 0; r < row; r++ {\n\t\tfor c := 0; c < col; c++ {\n\t\t\tif this.AdjacencyMatrix.At(r, c) == 1 {\n\t\t\t\tfmt.Fprintf(w, \"\\t%v -> %v\\n\", this.Tasks[r].Id, this.Tasks[c].Id)\n\t\t\t}\n\t\t}\n\t}\n\tfmt.Fprintln(w, \"}\")\n}\n\n\/\/ Return a structure of all the task with the given origin\nfunc (this *TaskGraphStructure) GetSubstructure(origin string) *TaskGraphStructure {\n\tsubTaskStructure := NewTaskGraphStructure()\n\tindex := 0\n\ttasksToExtract := make(map[int]*Task, 0)\n\tfor _, task := range this.Tasks {\n\t\tif task.Origin == origin {\n\t\t\t\/\/fmt.Printf(\"Adding %v(%v) at index:%v\\n\", task.Name, task.Id, index)\n\t\t\ttasksToExtract[index] = task\n\t\t\tindex += 1\n\t\t}\n\t}\n\t\/\/ Create the matrix of the correct size\n\tsize := len(tasksToExtract)\n\tif size > 0 {\n\t\tsubTaskStructure.AdjacencyMatrix = mat64.NewDense(size, size, nil)\n\t\tsubTaskStructure.DegreeMatrix = mat64.NewDense(size, size, nil)\n\t\tfor i := 0; i < size; i++ {\n\t\t\ttask := tasksToExtract[i]\n\t\t\t\/\/fmt.Printf(\"Task with ID:%v and name:%v will have id:%v\\n\", task.Id, task.Name, i)\n\t\t\t\/\/ BUG here probably\n\t\t\t\/\/ Construct the AdjacencyMatrix line by line\n\t\t\tfor col := 0; col < size; col++ {\n\t\t\t\ttask2 := tasksToExtract[col]\n\t\t\t\t\/\/fmt.Printf(\"Setting %v,%v with value from %v,%v\\n\", i, col, task.Id, task2.Id)\n\t\t\t\tsubTaskStructure.AdjacencyMatrix.Set(i, col, this.AdjacencyMatrix.At(task.Id, task2.Id))\n\t\t\t}\n\t\t\tsubTaskStructure.DegreeMatrix.Set(i, i, this.DegreeMatrix.At(task.Id, task.Id))\n\t\t\tsubTaskStructure.Tasks[i] = NewTask()\n\t\t\tsubTaskStructure.Tasks[i].Name = task.Name\n\t\t\tsubTaskStructure.Tasks[i].Module = task.Module\n\t\t\tsubTaskStructure.Tasks[i].Args = task.Args\n\t\t\tsubTaskStructure.Tasks[i].Origin = task.Origin\n\t\t\tsubTaskStructure.Tasks[i].Id = i\n\t\t}\n\t\t\/\/subTaskStructure.PrintAdjacencyMatrix()\n\t\treturn subTaskStructure\n\t} else {\n\t\treturn nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package flue\n\nimport (\n\t\"fmt\"\n\t\"github.com\/gonum\/matrix\/mat64\" \/\/ Matrix\n\t\"time\"\n)\n\n\/\/ A task is an action executed by a module\ntype Task struct {\n\tId int\n\tName string \/\/the task name\n\tNode string \/\/ The node name\n\tModule string\n\tArgs []string\n\tStatus int \/\/ -2: queued\n\t\/\/ -1: running\n\t\/\/ >=0 : return code\n\tStartTime time.Time\n\tEndTime time.Time\n\tTaskCanRunChan chan bool \/\/ true: run, false: wait\n}\n\n\/\/ This is the structure corresponding to the \"dot-graph\" of a task list\n\/\/ We store the nodes in a map\n\/\/ The index is the source node\n\/\/ The value is an array of strings containing the destination\ntype TaskGraphStructure struct {\n\tTasks map[int]*Task\n\tDegreeMatrix *mat64.Dense\n\tAdjacencyMatrix *mat64.Dense \/\/ Row id is the map id of the source task\n\t\/\/ Col id is the map id of the destination task\n}\n\nfunc PrintAdjacencyMatrix(taskStructure *TaskGraphStructure) {\n\trowSize, colSize := taskStructure.AdjacencyMatrix.Dims()\n\tfmt.Printf(\" \")\n\tfor c := 0; c < colSize; c++ {\n\t\tfmt.Printf(\"%v \", taskStructure.Tasks[c].Name)\n\t}\n\tfmt.Printf(\"\\n\")\n\tfor r := 0; r < rowSize; r++ {\n\t\tfmt.Printf(\"%v \", taskStructure.Tasks[r].Name)\n\t\tfor c := 0; c < colSize; c++ {\n\t\t\tfmt.Printf(\"%v \", taskStructure.AdjacencyMatrix.At(r, c))\n\t\t}\n\t\tfmt.Printf(\"\\n\")\n\t}\n}\n\nfunc PrintDegreeMatrix(taskStructure *TaskGraphStructure) {\n\trowSize, colSize := taskStructure.DegreeMatrix.Dims()\n\tfor r := 0; r < rowSize; r++ {\n\t\tfor c := 0; c < colSize; c++ {\n\t\t\tfmt.Printf(\"%v \", taskStructure.DegreeMatrix.At(r, c))\n\t\t}\n\t\tfmt.Printf(\"\\n\")\n\t}\n}\n\nfunc NewTask() *Task {\n\treturn &Task{\n\t\t-1,\n\t\t\"null\",\n\t\t\"localhost\",\n\t\t\"dummy\",\n\t\tmake([]string, 1),\n\t\t-2,\n\t\ttime.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),\n\t\ttime.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),\n\t\tmake(chan bool),\n\t}\n\n}\nfunc NewTaskGraphStructure() *TaskGraphStructure {\n\treturn &TaskGraphStructure{\n\t\tmake(map[int]*Task, 0),\n\t\tmat64.NewDense(0, 0, nil),\n\t\tmat64.NewDense(0, 0, nil),\n\t}\n}\n<commit_msg>Comments<commit_after>package flue\n\nimport (\n\t\"fmt\"\n\t\"github.com\/gonum\/matrix\/mat64\" \/\/ Matrix\n\t\"time\"\n)\n\n\/\/ A task is an action executed by a module\ntype Task struct {\n\tId int\n\tName string \/\/the task name\n\tNode string \/\/ The node name\n\tModule string\n\tArgs []string\n\tStatus int \/\/ -2: queued\n\t\/\/ -1: running\n\t\/\/ >=0 : return code\n\tStartTime time.Time\n\tEndTime time.Time\n\tTaskCanRunChan chan bool \/\/ true: run, false: wait\n}\n\n\/\/ This is the structure corresponding to the \"dot-graph\" of a task list\n\/\/ We store the nodes in a map\n\/\/ The index is the source node\ntype TaskGraphStructure struct {\n\tTasks map[int]*Task\n\tDegreeMatrix *mat64.Dense\n\tAdjacencyMatrix *mat64.Dense \/\/ Row id is the map id of the source task\n\t\/\/ Col id is the map id of the destination task\n}\n\nfunc PrintAdjacencyMatrix(taskStructure *TaskGraphStructure) {\n\trowSize, colSize := taskStructure.AdjacencyMatrix.Dims()\n\tfmt.Printf(\" \")\n\tfor c := 0; c < colSize; c++ {\n\t\tfmt.Printf(\"%v \", taskStructure.Tasks[c].Name)\n\t}\n\tfmt.Printf(\"\\n\")\n\tfor r := 0; r < rowSize; r++ {\n\t\tfmt.Printf(\"%v \", taskStructure.Tasks[r].Name)\n\t\tfor c := 0; c < colSize; c++ {\n\t\t\tfmt.Printf(\"%v \", taskStructure.AdjacencyMatrix.At(r, c))\n\t\t}\n\t\tfmt.Printf(\"\\n\")\n\t}\n}\n\nfunc PrintDegreeMatrix(taskStructure *TaskGraphStructure) {\n\trowSize, colSize := taskStructure.DegreeMatrix.Dims()\n\tfor r := 0; r < rowSize; r++ {\n\t\tfor c := 0; c < colSize; c++ {\n\t\t\tfmt.Printf(\"%v \", taskStructure.DegreeMatrix.At(r, c))\n\t\t}\n\t\tfmt.Printf(\"\\n\")\n\t}\n}\n\nfunc NewTask() *Task {\n\treturn &Task{\n\t\t-1,\n\t\t\"null\",\n\t\t\"localhost\",\n\t\t\"dummy\",\n\t\tmake([]string, 1),\n\t\t-2,\n\t\ttime.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),\n\t\ttime.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC),\n\t\tmake(chan bool),\n\t}\n\n}\nfunc NewTaskGraphStructure() *TaskGraphStructure {\n\treturn &TaskGraphStructure{\n\t\tmake(map[int]*Task, 0),\n\t\tmat64.NewDense(0, 0, nil),\n\t\tmat64.NewDense(0, 0, nil),\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2018 The Decred developers\n\/\/ Use of this source code is governed by an ISC\n\/\/ license that can be found in the LICENSE file.\n\npackage deployments\n\nimport (\n\t\"github.com\/decred\/dcrd\/chaincfg\"\n\t\"github.com\/decred\/dcrd\/wire\"\n)\n\n\/\/ HardcodedDeployment specifies hardcoded block heights that a deployment\n\/\/ activates at. If the value is negative, the deployment is either inactive or\n\/\/ can't be determined due to the uniqueness properties of the network.\n\/\/\n\/\/ Since these are hardcoded deployments, and cannot support every possible\n\/\/ network, conditional logic should only be applied when a deployment is\n\/\/ active, not when it is inactive.\ntype HardcodedDeployment struct {\n\tMainNetActivationHeight int32\n\tTestNet2ActivationHeight int32\n\tTestNet3ActivationHeight int32\n\tSimNetActivationHeight int32\n}\n\n\/\/ DCP0001 specifies hard forking changes to the stake difficulty algorithm as\n\/\/ defined by https:\/\/github.com\/decred\/dcps\/blob\/master\/dcp-0001\/dcp-0001.mediawiki.\nvar DCP0001 = HardcodedDeployment{\n\tMainNetActivationHeight: 149248,\n\tTestNet2ActivationHeight: 46128,\n\tTestNet3ActivationHeight: 0,\n\tSimNetActivationHeight: -1,\n}\n\n\/\/ DCP0002 specifies the activation of the OP_SHA256 hard fork as defined by\n\/\/ https:\/\/github.com\/decred\/dcps\/blob\/master\/dcp-0002\/dcp-0002.mediawiki.\nvar DCP0002 = HardcodedDeployment{\n\tMainNetActivationHeight: 189568,\n\tTestNet2ActivationHeight: 151968,\n\tTestNet3ActivationHeight: 0,\n\tSimNetActivationHeight: -1,\n}\n\n\/\/ DCP0003 specifies the activation of a CSV soft fork as defined by\n\/\/ https:\/\/github.com\/decred\/dcps\/blob\/master\/dcp-0003\/dcp-0003.mediawiki.\nvar DCP0003 = HardcodedDeployment{\n\tMainNetActivationHeight: 189568,\n\tTestNet2ActivationHeight: 151968,\n\tTestNet3ActivationHeight: 0,\n\tSimNetActivationHeight: -1,\n}\n\n\/\/ Active returns whether the hardcoded deployment is active at height on the\n\/\/ network specified by params. Active always returns false for unrecognized\n\/\/ networks.\nfunc (d *HardcodedDeployment) Active(height int32, params *chaincfg.Params) bool {\n\tvar activationHeight int32 = -1\n\tswitch params.Net {\n\tcase wire.MainNet:\n\t\tactivationHeight = d.MainNetActivationHeight\n\tcase 0x48e7a065: \/\/ testnet2\n\t\tactivationHeight = d.TestNet2ActivationHeight\n\tcase wire.TestNet3:\n\t\tactivationHeight = d.TestNet3ActivationHeight\n\tcase wire.SimNet:\n\t\tactivationHeight = d.SimNetActivationHeight\n\t}\n\treturn activationHeight >= 0 && height >= activationHeight\n}\n<commit_msg>Activate simnet deployments from genesis block.<commit_after>\/\/ Copyright (c) 2018 The Decred developers\n\/\/ Use of this source code is governed by an ISC\n\/\/ license that can be found in the LICENSE file.\n\npackage deployments\n\nimport (\n\t\"github.com\/decred\/dcrd\/chaincfg\"\n\t\"github.com\/decred\/dcrd\/wire\"\n)\n\n\/\/ HardcodedDeployment specifies hardcoded block heights that a deployment\n\/\/ activates at. If the value is negative, the deployment is either inactive or\n\/\/ can't be determined due to the uniqueness properties of the network.\n\/\/\n\/\/ Since these are hardcoded deployments, and cannot support every possible\n\/\/ network, conditional logic should only be applied when a deployment is\n\/\/ active, not when it is inactive.\ntype HardcodedDeployment struct {\n\tMainNetActivationHeight int32\n\tTestNet2ActivationHeight int32\n\tTestNet3ActivationHeight int32\n\tSimNetActivationHeight int32\n}\n\n\/\/ DCP0001 specifies hard forking changes to the stake difficulty algorithm as\n\/\/ defined by https:\/\/github.com\/decred\/dcps\/blob\/master\/dcp-0001\/dcp-0001.mediawiki.\nvar DCP0001 = HardcodedDeployment{\n\tMainNetActivationHeight: 149248,\n\tTestNet2ActivationHeight: 46128,\n\tTestNet3ActivationHeight: 0,\n\tSimNetActivationHeight: 0,\n}\n\n\/\/ DCP0002 specifies the activation of the OP_SHA256 hard fork as defined by\n\/\/ https:\/\/github.com\/decred\/dcps\/blob\/master\/dcp-0002\/dcp-0002.mediawiki.\nvar DCP0002 = HardcodedDeployment{\n\tMainNetActivationHeight: 189568,\n\tTestNet2ActivationHeight: 151968,\n\tTestNet3ActivationHeight: 0,\n\tSimNetActivationHeight: 0,\n}\n\n\/\/ DCP0003 specifies the activation of a CSV soft fork as defined by\n\/\/ https:\/\/github.com\/decred\/dcps\/blob\/master\/dcp-0003\/dcp-0003.mediawiki.\nvar DCP0003 = HardcodedDeployment{\n\tMainNetActivationHeight: 189568,\n\tTestNet2ActivationHeight: 151968,\n\tTestNet3ActivationHeight: 0,\n\tSimNetActivationHeight: 0,\n}\n\n\/\/ Active returns whether the hardcoded deployment is active at height on the\n\/\/ network specified by params. Active always returns false for unrecognized\n\/\/ networks.\nfunc (d *HardcodedDeployment) Active(height int32, params *chaincfg.Params) bool {\n\tvar activationHeight int32 = -1\n\tswitch params.Net {\n\tcase wire.MainNet:\n\t\tactivationHeight = d.MainNetActivationHeight\n\tcase 0x48e7a065: \/\/ testnet2\n\t\tactivationHeight = d.TestNet2ActivationHeight\n\tcase wire.TestNet3:\n\t\tactivationHeight = d.TestNet3ActivationHeight\n\tcase wire.SimNet:\n\t\tactivationHeight = d.SimNetActivationHeight\n\t}\n\treturn activationHeight >= 0 && height >= activationHeight\n}\n<|endoftext|>"} {"text":"<commit_before>package storage\n\nimport (\n\t\"testing\"\n)\n\nvar (\n\ttestProjectName = \"andygrunwald\/TrendingGithub\"\n)\n\nfunc TestMarkRepositoryAsTweeted(t *testing.T) {\n\tstorage := MemoryStorage{}\n\tpool := storage.NewPool(\"\", \"\")\n\tconn := pool.Get()\n\n\tres, err := conn.MarkRepositoryAsTweeted(testProjectName, \"1440946305\")\n\tif err != nil {\n\t\tt.Fatalf(\"Error of marking repository: \\\"%s\\\"\", err)\n\t}\n\n\tif res == false {\n\t\tt.Fatal(\"Marking repositoriy failed, got false, expected true\")\n\t}\n}\n\nfunc TestIsRepositoryAlreadyTweeted(t *testing.T) {\n\tstorage := MemoryStorage{}\n\tpool := storage.NewPool(\"\", \"\")\n\tconn := pool.Get()\n\n\tres, err := conn.IsRepositoryAlreadyTweeted(testProjectName)\n\tif err != nil {\n\t\tt.Fatalf(\"First already tweeted check throws an error: \\\"%s\\\"\", err)\n\t}\n\tif res == true {\n\t\tt.Fatal(\"Repository was already tweeted, got true, expected false\")\n\t}\n\n\tres, err = conn.MarkRepositoryAsTweeted(testProjectName, \"1440946884\")\n\tif err != nil {\n\t\tt.Fatalf(\"Error of marking repository: \\\"%s\\\"\", err)\n\t}\n\n\tif res == false {\n\t\tt.Fatal(\"Marking repositoriy failed, got false, expected true\")\n\t}\n\n\tres, err = conn.IsRepositoryAlreadyTweeted(testProjectName)\n\tif err != nil {\n\t\tt.Fatalf(\"Second already tweeted check throws an error: \\\"%s\\\"\", err)\n\t}\n\tif res == false {\n\t\tt.Fatal(\"Repository was not already tweeted, got false, expected true\")\n\t}\n}\n<commit_msg>Renamed memory storage tests<commit_after>package storage\n\nimport (\n\t\"testing\"\n)\n\nvar (\n\ttestProjectName = \"andygrunwald\/TrendingGithub\"\n)\n\nfunc TestMemory_MarkRepositoryAsTweeted(t *testing.T) {\n\tstorage := MemoryStorage{}\n\tpool := storage.NewPool(\"\", \"\")\n\tconn := pool.Get()\n\n\tres, err := conn.MarkRepositoryAsTweeted(testProjectName, \"1440946305\")\n\tif err != nil {\n\t\tt.Fatalf(\"Error of marking repository: \\\"%s\\\"\", err)\n\t}\n\n\tif res == false {\n\t\tt.Fatal(\"Marking repositoriy failed, got false, expected true\")\n\t}\n}\n\nfunc TestMemory_IsRepositoryAlreadyTweeted(t *testing.T) {\n\tstorage := MemoryStorage{}\n\tpool := storage.NewPool(\"\", \"\")\n\tconn := pool.Get()\n\n\tres, err := conn.IsRepositoryAlreadyTweeted(testProjectName)\n\tif err != nil {\n\t\tt.Fatalf(\"First already tweeted check throws an error: \\\"%s\\\"\", err)\n\t}\n\tif res == true {\n\t\tt.Fatal(\"Repository was already tweeted, got true, expected false\")\n\t}\n\n\tres, err = conn.MarkRepositoryAsTweeted(testProjectName, \"1440946884\")\n\tif err != nil {\n\t\tt.Fatalf(\"Error of marking repository: \\\"%s\\\"\", err)\n\t}\n\n\tif res == false {\n\t\tt.Fatal(\"Marking repositoriy failed, got false, expected true\")\n\t}\n\n\tres, err = conn.IsRepositoryAlreadyTweeted(testProjectName)\n\tif err != nil {\n\t\tt.Fatalf(\"Second already tweeted check throws an error: \\\"%s\\\"\", err)\n\t}\n\tif res == false {\n\t\tt.Fatal(\"Repository was not already tweeted, got false, expected true\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package redis\n\nimport (\n \"sync\"\n \"time\"\n \"strconv\"\n \"github.com\/BluePecker\/JwtAuth\/storage\"\n \"github.com\/go-redis\/redis\"\n \"fmt\"\n)\n\ntype Redis struct {\n create time.Time\n mu sync.RWMutex\n mem storage.MemStore\n client *redis.Client\n}\n\nfunc (er *Redis) Initializer(opt storage.Options) error {\n er.client = redis.NewClient(&redis.Options{\n Network: \"tcp\",\n Addr: opt.Host + \":\" + strconv.Itoa(opt.Port),\n PoolSize: opt.PoolSize,\n })\n err := er.client.Ping().Err()\n if err != nil {\n defer er.client.Close()\n }\n return err\n}\n\nfunc (er *Redis) TTL(key string) int {\n er.mu.RLock()\n defer er.mu.RUnlock()\n if !er.mem.Exist(key) {\n return int(er.client.TTL(key).Val().Seconds())\n }\n return er.mem.TTL(key)\n}\n\nfunc (er *Redis) Read(key string) (interface{}, error) {\n er.mu.RLock()\n defer er.mu.RUnlock()\n if v, err := er.mem.Get(key); err != nil {\n status := er.client.Get(key)\n return status.Val(), status.Err()\n } else {\n return v, nil\n }\n}\n\nfunc (er *Redis) ReadInt(key string) (int, error) {\n er.mu.RLock()\n defer er.mu.RUnlock()\n v, err := er.mem.GetInt(key)\n if err != nil {\n status := er.client.Get(key)\n if status.Err() != nil {\n return 0, status.Err()\n }\n return strconv.Atoi(status.Val())\n }\n return v, nil\n}\n\nfunc (er *Redis) ReadString(key string) (string, error) {\n er.mu.RLock()\n defer er.mu.RUnlock()\n v, err := er.mem.GetString(key)\n if err != nil {\n status := er.client.Get(key)\n if status.Err() != nil {\n return \"\", status.Err()\n }\n return status.Val(), nil\n }\n return v, nil\n}\n\nfunc (er *Redis) Upgrade(key string, expire int) {\n er.mu.Lock()\n defer er.mu.Unlock()\n if v, err := er.Read(key); err != nil {\n er.Write(key, v, expire)\n }\n \n}\n\nfunc (er *Redis) Write(key string, value interface{}, expire int) {\n er.mu.Lock()\n defer er.mu.Unlock()\n if er.mem.Set(key, value, expire) == nil {\n fmt.Printf(key)\n er.flushToDB(key, value, expire)\n }\n}\n\nfunc (er *Redis) WriteImmutable(key string, value interface{}, expire int) {\n er.mu.Lock()\n defer er.mu.Unlock()\n if er.mem.SetImmutable(key, value, expire) == nil {\n er.flushToDB(key, value, expire)\n }\n}\n\nfunc (er *Redis) flushToDB(key string, value interface{}, expire int) error {\n cmdStatus := er.client.Set(key, value, time.Duration(expire) * time.Second)\n return cmdStatus.Err()\n}\n\nfunc init() {\n storage.Register(\"redis\", &Redis{})\n}<commit_msg>redis driver<commit_after>package redis\n\nimport (\n \"sync\"\n \"time\"\n \"strconv\"\n \"github.com\/BluePecker\/JwtAuth\/storage\"\n \"github.com\/go-redis\/redis\"\n \"fmt\"\n)\n\ntype Redis struct {\n create time.Time\n mu sync.RWMutex\n mem storage.MemStore\n client *redis.Client\n}\n\nfunc (er *Redis) Initializer(opt storage.Options) error {\n er.client = redis.NewClient(&redis.Options{\n Network: \"tcp\",\n Addr: opt.Host + \":\" + strconv.Itoa(opt.Port),\n PoolSize: opt.PoolSize,\n })\n err := er.client.Ping().Err()\n if err != nil {\n defer er.client.Close()\n }\n return err\n}\n\nfunc (er *Redis) TTL(key string) int {\n er.mu.RLock()\n defer er.mu.RUnlock()\n if !er.mem.Exist(key) {\n return int(er.client.TTL(key).Val().Seconds())\n }\n return er.mem.TTL(key)\n}\n\nfunc (er *Redis) Read(key string) (interface{}, error) {\n er.mu.RLock()\n defer er.mu.RUnlock()\n if v, err := er.mem.Get(key); err != nil {\n status := er.client.Get(key)\n return status.Val(), status.Err()\n } else {\n return v, nil\n }\n}\n\nfunc (er *Redis) ReadInt(key string) (int, error) {\n er.mu.RLock()\n defer er.mu.RUnlock()\n v, err := er.mem.GetInt(key)\n if err != nil {\n status := er.client.Get(key)\n if status.Err() != nil {\n return 0, status.Err()\n }\n return strconv.Atoi(status.Val())\n }\n return v, nil\n}\n\nfunc (er *Redis) ReadString(key string) (string, error) {\n er.mu.RLock()\n defer er.mu.RUnlock()\n v, err := er.mem.GetString(key)\n if err != nil {\n status := er.client.Get(key)\n if status.Err() != nil {\n return \"\", status.Err()\n }\n return status.Val(), nil\n }\n return v, nil\n}\n\nfunc (er *Redis) Upgrade(key string, expire int) {\n er.mu.Lock()\n defer er.mu.Unlock()\n if v, err := er.Read(key); err != nil {\n er.Write(key, v, expire)\n }\n \n}\n\nfunc (er *Redis) Write(key string, value interface{}, expire int) {\n er.mu.Lock()\n defer er.mu.Unlock()\n if er.mem.Set(key, value, expire) == nil {\n err := er.flushToDB(key, value, expire)\n fmt.Println(key, err)\n }\n}\n\nfunc (er *Redis) WriteImmutable(key string, value interface{}, expire int) {\n er.mu.Lock()\n defer er.mu.Unlock()\n if er.mem.SetImmutable(key, value, expire) == nil {\n er.flushToDB(key, value, expire)\n }\n}\n\nfunc (er *Redis) flushToDB(key string, value interface{}, expire int) error {\n cmdStatus := er.client.Set(key, value, time.Duration(expire) * time.Second)\n return cmdStatus.Err()\n}\n\nfunc init() {\n storage.Register(\"redis\", &Redis{})\n}<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.\n\/\/ A HTTP wrapper which handles some errors and provides a way to replace the HTTP client by a mock.\n\/\/ Author: bratseth\n\npackage utils\n\nimport (\n \"net\/http\"\n \"net\/url\"\n \"strings\"\n \"time\"\n)\n\n\/\/ Set this to a mock HttpClient instead to unit test HTTP requests\nvar ActiveHttpClient = CreateClient(time.Second * 10)\n\ntype HttpClient interface {\n Do(request *http.Request, timeout time.Duration) (response *http.Response, error error)\n}\n\ntype defaultHttpClient struct {\n client http.Client\n}\n\nfunc (c defaultHttpClient) Do(request *http.Request, timeout time.Duration) (response *http.Response, error error) {\n if c.client.Timeout != timeout { \/\/ Create a new client with the right timeout\n c.client = http.Client{Timeout: timeout,}\n }\n return c.client.Do(request)\n}\n\nfunc CreateClient(timeout time.Duration) HttpClient {\n return &defaultHttpClient{\n client: http.Client{Timeout: timeout,},\n }\n}\n\n\/\/ Convenience function for doing a HTTP GET\nfunc HttpGet(host string, path string, description string) *http.Response {\n url, urlError := url.Parse(host + path)\n if urlError != nil {\n Error(\"Invalid target url '\" + host + path + \"'\")\n return nil\n }\n return HttpDo(&http.Request{URL: url,}, time.Second * 10, description)\n}\n\nfunc HttpDo(request *http.Request, timeout time.Duration, description string) *http.Response {\n response, error := ActiveHttpClient.Do(request, timeout)\n if error != nil {\n Error(\"Could not connect to\", strings.ToLower(description), \"at\", request.URL.Host)\n Detail(error.Error())\n }\n return response\n}\n\n<commit_msg>Store reference<commit_after>\/\/ Copyright Verizon Media. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.\n\/\/ A HTTP wrapper which handles some errors and provides a way to replace the HTTP client by a mock.\n\/\/ Author: bratseth\n\npackage utils\n\nimport (\n \"net\/http\"\n \"net\/url\"\n \"strings\"\n \"time\"\n)\n\n\/\/ Set this to a mock HttpClient instead to unit test HTTP requests\nvar ActiveHttpClient = CreateClient(time.Second * 10)\n\ntype HttpClient interface {\n Do(request *http.Request, timeout time.Duration) (response *http.Response, error error)\n}\n\ntype defaultHttpClient struct {\n client *http.Client\n}\n\nfunc (c defaultHttpClient) Do(request *http.Request, timeout time.Duration) (response *http.Response, error error) {\n if c.client.Timeout != timeout { \/\/ Create a new client with the right timeout\n c.client = &http.Client{Timeout: timeout,}\n }\n return c.client.Do(request)\n}\n\nfunc CreateClient(timeout time.Duration) HttpClient {\n return &defaultHttpClient{\n client: &http.Client{Timeout: timeout,},\n }\n}\n\n\/\/ Convenience function for doing a HTTP GET\nfunc HttpGet(host string, path string, description string) *http.Response {\n url, urlError := url.Parse(host + path)\n if urlError != nil {\n Error(\"Invalid target url '\" + host + path + \"'\")\n return nil\n }\n return HttpDo(&http.Request{URL: url,}, time.Second * 10, description)\n}\n\nfunc HttpDo(request *http.Request, timeout time.Duration, description string) *http.Response {\n response, error := ActiveHttpClient.Do(request, timeout)\n if error != nil {\n Error(\"Could not connect to\", strings.ToLower(description), \"at\", request.URL.Host)\n Detail(error.Error())\n }\n return response\n}\n\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.\n\/\/ vespa document API client\n\/\/ Author: bratseth\n\npackage vespa\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/vespa-engine\/vespa\/client\/go\/curl\"\n\t\"github.com\/vespa-engine\/vespa\/client\/go\/util\"\n)\n\n\/\/ Sends the operation given in the file\nfunc Send(jsonFile string, service *Service, options OperationOptions) util.OperationResult {\n\treturn sendOperation(\"\", jsonFile, service, anyOperation, options)\n}\n\nfunc Put(documentId string, jsonFile string, service *Service, options OperationOptions) util.OperationResult {\n\treturn sendOperation(documentId, jsonFile, service, putOperation, options)\n}\n\nfunc Update(documentId string, jsonFile string, service *Service, options OperationOptions) util.OperationResult {\n\treturn sendOperation(documentId, jsonFile, service, updateOperation, options)\n}\n\nfunc RemoveId(documentId string, service *Service, options OperationOptions) util.OperationResult {\n\treturn sendOperation(documentId, \"\", service, removeOperation, options)\n}\n\nfunc RemoveOperation(jsonFile string, service *Service, options OperationOptions) util.OperationResult {\n\treturn sendOperation(\"\", jsonFile, service, removeOperation, options)\n}\n\nconst (\n\tanyOperation string = \"any\"\n\tputOperation string = \"put\"\n\tupdateOperation string = \"update\"\n\tremoveOperation string = \"remove\"\n)\n\ntype OperationOptions struct {\n\tCurlOutput io.Writer\n\tTimeout time.Duration\n}\n\nfunc sendOperation(documentId string, jsonFile string, service *Service, operation string, options OperationOptions) util.OperationResult {\n\theader := http.Header{}\n\theader.Add(\"Content-Type\", \"application\/json\")\n\n\tvar documentData []byte\n\tif operation == \"remove\" && jsonFile == \"\" {\n\t\tdocumentData = []byte(\"{\\n \\\"remove\\\": \\\"\" + documentId + \"\\\"\\n}\\n\")\n\t} else {\n\t\tfileReader, err := os.Open(jsonFile)\n\t\tif err != nil {\n\t\t\treturn util.FailureWithDetail(\"Could not open file '\"+jsonFile+\"'\", err.Error())\n\t\t}\n\t\tdefer fileReader.Close()\n\t\tdocumentData, err = ioutil.ReadAll(fileReader)\n\t\tif err != nil {\n\t\t\treturn util.FailureWithDetail(\"Failed to read '\"+jsonFile+\"'\", err.Error())\n\t\t}\n\t}\n\n\tvar doc map[string]interface{}\n\tjson.Unmarshal(documentData, &doc)\n\n\toperationInFile := operationIn(doc)\n\tif operation == anyOperation { \/\/ Operation is decided by file content\n\t\toperation = operationInFile\n\t} else if operationInFile != \"\" && operationInFile != operation { \/\/ Otherwise operation must match\n\t\treturn util.Failure(\"Wanted document operation is \" + operation + \" but the JSON file specifies \" + operationInFile)\n\t}\n\n\tif documentId == \"\" { \/\/ Document id is decided by file content\n\t\tif doc[operation] == nil {\n\t\t\treturn util.Failure(\"No document id given neither as argument or as a '\" + operation + \"' key in the json file\")\n\t\t}\n\t\tdocumentId = doc[operation].(string) \/\/ document feeder format\n\t}\n\n\tdocumentPath, documentPathError := IdToURLPath(documentId)\n\tif documentPathError != nil {\n\t\treturn util.Failure(\"Invalid document id '\" + documentId + \"': \" + documentPathError.Error())\n\t}\n\n\turl, urlParseError := url.Parse(service.BaseURL + \"\/document\/v1\/\" + documentPath)\n\tif urlParseError != nil {\n\t\treturn util.Failure(\"Invalid request path: '\" + service.BaseURL + \"\/document\/v1\/\" + documentPath + \"': \" + urlParseError.Error())\n\t}\n\n\trequest := &http.Request{\n\t\tURL: url,\n\t\tMethod: operationToHTTPMethod(operation),\n\t\tHeader: header,\n\t\tBody: ioutil.NopCloser(bytes.NewReader(documentData)),\n\t}\n\tresponse, err := serviceDo(service, request, jsonFile, options)\n\tif response == nil {\n\t\treturn util.Failure(\"Request failed: \" + err.Error())\n\t}\n\n\tdefer response.Body.Close()\n\tif response.StatusCode == 200 {\n\t\treturn util.Success(operation + \" \" + documentId)\n\t} else if response.StatusCode\/100 == 4 {\n\t\treturn util.FailureWithPayload(\"Invalid document operation: \"+response.Status, util.ReaderToJSON(response.Body))\n\t} else {\n\t\treturn util.FailureWithPayload(service.Description()+\" at \"+request.URL.Host+\": \"+response.Status, util.ReaderToJSON(response.Body))\n\t}\n}\n\nfunc operationIn(doc map[string]interface{}) string {\n\tif doc[\"put\"] != nil {\n\t\treturn \"put\"\n\t} else if doc[\"update\"] != nil {\n\t\treturn \"update\"\n\t} else if doc[\"remove\"] != nil {\n\t\treturn \"remove\"\n\t} else {\n\t\treturn \"\"\n\t}\n}\n\nfunc operationToHTTPMethod(operation string) string {\n\tswitch operation {\n\tcase \"put\":\n\t\treturn \"POST\"\n\tcase \"update\":\n\t\treturn \"PUT\"\n\tcase \"remove\":\n\t\treturn \"DELETE\"\n\t}\n\tpanic(\"Unexpected document operation ''\" + operation + \"'\")\n}\n\nfunc serviceDo(service *Service, request *http.Request, filename string, options OperationOptions) (*http.Response, error) {\n\tcmd, err := curl.RawArgs(request.URL.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcmd.Method = request.Method\n\tfor k, vs := range request.Header {\n\t\tfor _, v := range vs {\n\t\t\tcmd.Header(k, v)\n\t\t}\n\t}\n\tcmd.BodyFile = filename\n\tcmd.Certificate = service.TLSOptions.CertificateFile\n\tcmd.PrivateKey = service.TLSOptions.PrivateKeyFile\n\tout := cmd.String() + \"\\n\"\n\tif _, err := io.WriteString(options.CurlOutput, out); err != nil {\n\t\treturn nil, err\n\t}\n\treturn service.Do(request, options.Timeout)\n}\n\nfunc Get(documentId string, service *Service, options OperationOptions) util.OperationResult {\n\tdocumentPath, documentPathError := IdToURLPath(documentId)\n\tif documentPathError != nil {\n\t\treturn util.Failure(\"Invalid document id '\" + documentId + \"': \" + documentPathError.Error())\n\t}\n\n\turl, urlParseError := url.Parse(service.BaseURL + \"\/document\/v1\/\" + documentPath)\n\tif urlParseError != nil {\n\t\treturn util.Failure(\"Invalid request path: '\" + service.BaseURL + \"\/document\/v1\/\" + documentPath + \"': \" + urlParseError.Error())\n\t}\n\n\trequest := &http.Request{\n\t\tURL: url,\n\t\tMethod: \"GET\",\n\t}\n\tresponse, err := serviceDo(service, request, \"\", options)\n\tif response == nil {\n\t\treturn util.Failure(\"Request failed: \" + err.Error())\n\t}\n\n\tdefer response.Body.Close()\n\tif response.StatusCode == 200 {\n\t\treturn util.SuccessWithPayload(\"Read \"+documentId, util.ReaderToJSON(response.Body))\n\t} else if response.StatusCode\/100 == 4 {\n\t\treturn util.FailureWithPayload(\"Invalid document operation: \"+response.Status, util.ReaderToJSON(response.Body))\n\t} else {\n\t\treturn util.FailureWithPayload(service.Description()+\" at \"+request.URL.Host+\": \"+response.Status, util.ReaderToJSON(response.Body))\n\t}\n}\n<commit_msg>Check error for not nil, not response for nil<commit_after>\/\/ Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.\n\/\/ vespa document API client\n\/\/ Author: bratseth\n\npackage vespa\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/vespa-engine\/vespa\/client\/go\/curl\"\n\t\"github.com\/vespa-engine\/vespa\/client\/go\/util\"\n)\n\n\/\/ Sends the operation given in the file\nfunc Send(jsonFile string, service *Service, options OperationOptions) util.OperationResult {\n\treturn sendOperation(\"\", jsonFile, service, anyOperation, options)\n}\n\nfunc Put(documentId string, jsonFile string, service *Service, options OperationOptions) util.OperationResult {\n\treturn sendOperation(documentId, jsonFile, service, putOperation, options)\n}\n\nfunc Update(documentId string, jsonFile string, service *Service, options OperationOptions) util.OperationResult {\n\treturn sendOperation(documentId, jsonFile, service, updateOperation, options)\n}\n\nfunc RemoveId(documentId string, service *Service, options OperationOptions) util.OperationResult {\n\treturn sendOperation(documentId, \"\", service, removeOperation, options)\n}\n\nfunc RemoveOperation(jsonFile string, service *Service, options OperationOptions) util.OperationResult {\n\treturn sendOperation(\"\", jsonFile, service, removeOperation, options)\n}\n\nconst (\n\tanyOperation string = \"any\"\n\tputOperation string = \"put\"\n\tupdateOperation string = \"update\"\n\tremoveOperation string = \"remove\"\n)\n\ntype OperationOptions struct {\n\tCurlOutput io.Writer\n\tTimeout time.Duration\n}\n\nfunc sendOperation(documentId string, jsonFile string, service *Service, operation string, options OperationOptions) util.OperationResult {\n\theader := http.Header{}\n\theader.Add(\"Content-Type\", \"application\/json\")\n\n\tvar documentData []byte\n\tif operation == \"remove\" && jsonFile == \"\" {\n\t\tdocumentData = []byte(\"{\\n \\\"remove\\\": \\\"\" + documentId + \"\\\"\\n}\\n\")\n\t} else {\n\t\tfileReader, err := os.Open(jsonFile)\n\t\tif err != nil {\n\t\t\treturn util.FailureWithDetail(\"Could not open file '\"+jsonFile+\"'\", err.Error())\n\t\t}\n\t\tdefer fileReader.Close()\n\t\tdocumentData, err = ioutil.ReadAll(fileReader)\n\t\tif err != nil {\n\t\t\treturn util.FailureWithDetail(\"Failed to read '\"+jsonFile+\"'\", err.Error())\n\t\t}\n\t}\n\n\tvar doc map[string]interface{}\n\tjson.Unmarshal(documentData, &doc)\n\n\toperationInFile := operationIn(doc)\n\tif operation == anyOperation { \/\/ Operation is decided by file content\n\t\toperation = operationInFile\n\t} else if operationInFile != \"\" && operationInFile != operation { \/\/ Otherwise operation must match\n\t\treturn util.Failure(\"Wanted document operation is \" + operation + \" but the JSON file specifies \" + operationInFile)\n\t}\n\n\tif documentId == \"\" { \/\/ Document id is decided by file content\n\t\tif doc[operation] == nil {\n\t\t\treturn util.Failure(\"No document id given neither as argument or as a '\" + operation + \"' key in the json file\")\n\t\t}\n\t\tdocumentId = doc[operation].(string) \/\/ document feeder format\n\t}\n\n\tdocumentPath, documentPathError := IdToURLPath(documentId)\n\tif documentPathError != nil {\n\t\treturn util.Failure(\"Invalid document id '\" + documentId + \"': \" + documentPathError.Error())\n\t}\n\n\turl, urlParseError := url.Parse(service.BaseURL + \"\/document\/v1\/\" + documentPath)\n\tif urlParseError != nil {\n\t\treturn util.Failure(\"Invalid request path: '\" + service.BaseURL + \"\/document\/v1\/\" + documentPath + \"': \" + urlParseError.Error())\n\t}\n\n\trequest := &http.Request{\n\t\tURL: url,\n\t\tMethod: operationToHTTPMethod(operation),\n\t\tHeader: header,\n\t\tBody: ioutil.NopCloser(bytes.NewReader(documentData)),\n\t}\n\tresponse, err := serviceDo(service, request, jsonFile, options)\n\tif err != nil {\n\t\treturn util.Failure(\"Request failed: \" + err.Error())\n\t}\n\n\tdefer response.Body.Close()\n\tif response.StatusCode == 200 {\n\t\treturn util.Success(operation + \" \" + documentId)\n\t} else if response.StatusCode\/100 == 4 {\n\t\treturn util.FailureWithPayload(\"Invalid document operation: \"+response.Status, util.ReaderToJSON(response.Body))\n\t} else {\n\t\treturn util.FailureWithPayload(service.Description()+\" at \"+request.URL.Host+\": \"+response.Status, util.ReaderToJSON(response.Body))\n\t}\n}\n\nfunc operationIn(doc map[string]interface{}) string {\n\tif doc[\"put\"] != nil {\n\t\treturn \"put\"\n\t} else if doc[\"update\"] != nil {\n\t\treturn \"update\"\n\t} else if doc[\"remove\"] != nil {\n\t\treturn \"remove\"\n\t} else {\n\t\treturn \"\"\n\t}\n}\n\nfunc operationToHTTPMethod(operation string) string {\n\tswitch operation {\n\tcase \"put\":\n\t\treturn \"POST\"\n\tcase \"update\":\n\t\treturn \"PUT\"\n\tcase \"remove\":\n\t\treturn \"DELETE\"\n\t}\n\tpanic(\"Unexpected document operation ''\" + operation + \"'\")\n}\n\nfunc serviceDo(service *Service, request *http.Request, filename string, options OperationOptions) (*http.Response, error) {\n\tcmd, err := curl.RawArgs(request.URL.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcmd.Method = request.Method\n\tfor k, vs := range request.Header {\n\t\tfor _, v := range vs {\n\t\t\tcmd.Header(k, v)\n\t\t}\n\t}\n\tcmd.BodyFile = filename\n\tcmd.Certificate = service.TLSOptions.CertificateFile\n\tcmd.PrivateKey = service.TLSOptions.PrivateKeyFile\n\tout := cmd.String() + \"\\n\"\n\tif _, err := io.WriteString(options.CurlOutput, out); err != nil {\n\t\treturn nil, err\n\t}\n\treturn service.Do(request, options.Timeout)\n}\n\nfunc Get(documentId string, service *Service, options OperationOptions) util.OperationResult {\n\tdocumentPath, documentPathError := IdToURLPath(documentId)\n\tif documentPathError != nil {\n\t\treturn util.Failure(\"Invalid document id '\" + documentId + \"': \" + documentPathError.Error())\n\t}\n\n\turl, urlParseError := url.Parse(service.BaseURL + \"\/document\/v1\/\" + documentPath)\n\tif urlParseError != nil {\n\t\treturn util.Failure(\"Invalid request path: '\" + service.BaseURL + \"\/document\/v1\/\" + documentPath + \"': \" + urlParseError.Error())\n\t}\n\n\trequest := &http.Request{\n\t\tURL: url,\n\t\tMethod: \"GET\",\n\t}\n\tresponse, err := serviceDo(service, request, \"\", options)\n\tif err != nil {\n\t\treturn util.Failure(\"Request failed: \" + err.Error())\n\t}\n\n\tdefer response.Body.Close()\n\tif response.StatusCode == 200 {\n\t\treturn util.SuccessWithPayload(\"Read \"+documentId, util.ReaderToJSON(response.Body))\n\t} else if response.StatusCode\/100 == 4 {\n\t\treturn util.FailureWithPayload(\"Invalid document operation: \"+response.Status, util.ReaderToJSON(response.Body))\n\t} else {\n\t\treturn util.FailureWithPayload(service.Description()+\" at \"+request.URL.Host+\": \"+response.Status, util.ReaderToJSON(response.Body))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 SteelSeries ApS. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This package implements a basic LISP interpretor for embedding in a go program for scripting.\n\/\/ This file contains the built-in primitive functions.\n\npackage golisp\n\nimport (\n\t\"fmt\"\n)\n\nfunc RegisterSpecialFormPrimitives() {\n\tMakePrimitiveFunction(\"cond\", -1, CondImpl)\n\tMakePrimitiveFunction(\"case\", -1, CaseImpl)\n\tMakePrimitiveFunction(\"if\", -1, IfImpl)\n\tMakePrimitiveFunction(\"lambda\", -1, LambdaImpl)\n\tMakePrimitiveFunction(\"define\", -1, DefineImpl)\n\tMakePrimitiveFunction(\"defmacro\", -1, DefmacroImpl)\n\tMakePrimitiveFunction(\"let\", -1, LetImpl)\n\tMakePrimitiveFunction(\"begin\", -1, BeginImpl)\n\tMakePrimitiveFunction(\"do\", -1, DoImpl)\n\tMakePrimitiveFunction(\"apply\", -1, ApplyImpl)\n\tMakePrimitiveFunction(\"eval\", 1, EvalImpl)\n\tMakePrimitiveFunction(\"->\", -1, ChainImpl)\n\tMakePrimitiveFunction(\"=>\", -1, TapImpl)\n\tMakePrimitiveFunction(\"definition-of\", 1, DefinitionOfImpl)\n}\n\nfunc CondImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tvar condition *Data\n\tfor c := args; NotNilP(c); c = Cdr(c) {\n\t\tclause := Car(c)\n\t\tif !PairP(clause) {\n\t\t\terr = ProcessError(\"Cond expect a sequence of clauses that are lists\", env)\n\t\t\treturn\n\t\t}\n\t\tcondition, err = Eval(Car(clause), env)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tif BooleanValue(condition) || StringValue(Car(clause)) == \"else\" {\n\t\t\tfor e := Cdr(clause); NotNilP(e); e = Cdr(e) {\n\t\t\t\tresult, err = Eval(Car(e), env)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc evalList(l *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tfor sexpr := l; NotNilP(sexpr); sexpr = Cdr(sexpr) {\n\t\tresult, err = Eval(Car(sexpr), env)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc CaseImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tvar keyValue *Data\n\n\tkeyValue, err = Eval(Car(args), env)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor clauseCell := Cdr(args); NotNilP(clauseCell); clauseCell = Cdr(clauseCell) {\n\t\tclause := Car(clauseCell)\n\t\tif !PairP(clause) {\n\t\t\terr = ProcessError(\"Case requires non-atomic clauses\", env)\n\t\t\treturn\n\t\t}\n\t\tif ListP(Car(clause)) {\n\t\t\tfor v := Car(clause); NotNilP(v); v = Cdr(v) {\n\t\t\t\tif IsEqual(Car(v), keyValue) {\n\t\t\t\t\treturn evalList(Cdr(clause), env)\n\t\t\t\t}\n\t\t\t}\n\t\t} else if IsEqual(Car(clause), SymbolWithName(\"else\")) {\n\t\t\treturn evalList(Cdr(clause), env)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc IfImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tif Length(args) < 2 || Length(args) > 3 {\n\t\terr = ProcessError(fmt.Sprintf(\"IF requires 2 or 3 arguments. Received %d.\", Length(args)), env)\n\t\treturn\n\t}\n\n\tc, err := Eval(Car(args), env)\n\tif err != nil {\n\t\treturn\n\t}\n\tcondition := BooleanValue(c)\n\tthenClause := Second(args)\n\telseClause := Third(args)\n\n\tif condition {\n\t\treturn Eval(thenClause, env)\n\t} else {\n\t\treturn Eval(elseClause, env)\n\t}\n}\n\nfunc LambdaImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tparams := Car(args)\n\tbody := Cdr(args)\n\treturn FunctionWithNameParamsBodyAndParent(\"anonymous\", params, body, env), nil\n}\n\nfunc DefineImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tvar value *Data\n\tthing := Car(args)\n\tif SymbolP(thing) {\n\t\tvalue, err = Eval(Cadr(args), env)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t} else if PairP(thing) {\n\t\tname := Car(thing)\n\t\tparams := Cdr(thing)\n\t\tthing = name\n\t\tif !SymbolP(name) {\n\t\t\terr = ProcessError(\"Function name has to be a symbol\", env)\n\t\t\treturn\n\t\t}\n\t\tbody := Cdr(args)\n\t\tvalue = FunctionWithNameParamsBodyAndParent(StringValue(name), params, body, env)\n\t} else {\n\t\terr = ProcessError(\"Invalid definition\", env)\n\t\treturn\n\t}\n\tenv.BindLocallyTo(thing, value)\n\treturn value, nil\n}\n\nfunc DefmacroImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tvar value *Data\n\tthing := Car(args)\n\tif PairP(thing) {\n\t\tname := Car(thing)\n\t\tparams := Cdr(thing)\n\t\tthing = name\n\t\tif !SymbolP(name) {\n\t\t\terr = ProcessError(\"Macro name has to be a symbol\", env)\n\t\t\treturn\n\t\t}\n\t\tbody := Cadr(args)\n\t\tvalue = MacroWithNameParamsBodyAndParent(StringValue(name), params, body, env)\n\t} else {\n\t\terr = ProcessError(\"Invalid macro definition\", env)\n\t\treturn\n\t}\n\tenv.BindLocallyTo(thing, value)\n\treturn value, nil\n}\n\nfunc bindLetLocals(bindingForms *Data, env *SymbolTableFrame) (err error) {\n\tvar name *Data\n\tvar value *Data\n\n\tfor cell := bindingForms; NotNilP(cell); cell = Cdr(cell) {\n\t\tbindingPair := Car(cell)\n\t\tif !PairP(bindingPair) {\n\t\t\terr = ProcessError(\"Let requires a list of bindings (with are pairs) as it's first argument\", env)\n\t\t\treturn\n\t\t}\n\t\tname = Car(bindingPair)\n\t\tif !SymbolP(name) {\n\t\t\terr = ProcessError(\"First part of a let binding pair must be a symbol\", env)\n\t\t}\n\t\tvalue, err = Eval(Cadr(bindingPair), env)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tenv.BindLocallyTo(name, value)\n\t}\n\treturn\n}\n\nfunc LetImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tif Length(args) < 1 {\n\t\terr = ProcessError(\"Let requires at least a list of bindings\", env)\n\t\treturn\n\t}\n\n\tif !PairP(Car(args)) {\n\t\terr = ProcessError(\"Let requires a list of bindings as it's first argument\", env)\n\t\treturn\n\t}\n\n\tlocalEnv := NewSymbolTableFrameBelow(env)\n\tlocalEnv.Previous = env\n\tbindLetLocals(Car(args), localEnv)\n\n\tfor cell := Cdr(args); NotNilP(cell); cell = Cdr(cell) {\n\t\tsexpr := Car(cell)\n\t\tresult, err = Eval(sexpr, localEnv)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc BeginImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tfor cell := args; NotNilP(cell); cell = Cdr(cell) {\n\t\tsexpr := Car(cell)\n\t\tresult, err = Eval(sexpr, env)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc rebindDoLocals(bindingForms *Data, env *SymbolTableFrame) (err error) {\n\tvar name *Data\n\tvar value *Data\n\n\tfor cell := bindingForms; NotNilP(cell); cell = Cdr(cell) {\n\t\tbindingTuple := Car(cell)\n\t\tname = First(bindingTuple)\n\t\tif NotNilP(Third(bindingTuple)) {\n\t\t\tvalue, err = Eval(Third(bindingTuple), env)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tenv.BindLocallyTo(name, value)\n\t\t}\n\t}\n\treturn\n}\n\nfunc DoImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tif Length(args) < 2 {\n\t\terr = ProcessError(\"Do requires at least a list of bindings and a test clause\", env)\n\t\treturn\n\t}\n\n\tbindings := Car(args)\n\tif !PairP(bindings) {\n\t\terr = ProcessError(\"Do requires a list of bindings as it's first argument\", env)\n\t\treturn\n\t}\n\n\ttestClause := Cadr(args)\n\tif !PairP(testClause) {\n\t\terr = ProcessError(\"Do requires a list as it's second argument\", env)\n\t\treturn\n\t}\n\n\tlocalEnv := NewSymbolTableFrameBelow(env)\n\tlocalEnv.Previous = env\n\tbindLetLocals(bindings, localEnv)\n\n\tbody := Cddr(args)\n\n\tvar shouldExit *Data\n\n\tfor true {\n\t\tshouldExit, err = Eval(Car(testClause), localEnv)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif BooleanValue(shouldExit) {\n\t\t\tfor cell := Cdr(testClause); NotNilP(cell); cell = Cdr(cell) {\n\t\t\t\tsexpr := Car(cell)\n\t\t\t\tresult, err = Eval(sexpr, localEnv)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tfor cell := body; NotNilP(cell); cell = Cdr(cell) {\n\t\t\tsexpr := Car(cell)\n\t\t\tresult, err = Eval(sexpr, localEnv)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\trebindDoLocals(bindings, localEnv)\n\t}\n\treturn\n}\n\nfunc ApplyImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tif Length(args) < 1 {\n\t\terr = ProcessError(\"apply requires at least one argument\", env)\n\t\treturn\n\t}\n\n\tf, err := Eval(Car(args), env)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif !FunctionP(f) {\n\t\terr = ProcessError(fmt.Sprintf(\"apply requires a function as it's first argument, but got %s.\", String(f)), env)\n\t}\n\n\tary := make([]*Data, 0, Length(args)-1)\n\n\tvar v *Data\n\tfor c := Cdr(args); NotNilP(c); c = Cdr(c) {\n\t\tv, err = Eval(Car(c), env)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tary = append(ary, v)\n\t}\n\n\tvar argList *Data\n\tif ListP(ary[len(ary)-1]) {\n\t\tif len(ary) > 1 {\n\t\t\targList = ArrayToListWithTail(ary[0:len(ary)-1], ary[len(ary)-1])\n\t\t} else {\n\t\t\targList = ary[0]\n\t\t}\n\t} else {\n\t\terr = ProcessError(\"The last argument to apply must be a list\", env)\n\t\treturn\n\t}\n\n\treturn Apply(f, argList, env)\n}\n\nfunc EvalImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tsexpr, err := Eval(Car(args), env)\n\tif err != nil {\n\t\treturn\n\t}\n\tif !ListP(sexpr) {\n\t\terr = ProcessError(fmt.Sprintf(\"eval expect a list argument, received a %s.\", TypeName(TypeOf(sexpr))), env)\n\t}\n\treturn Eval(sexpr, env)\n}\n\nfunc ChainImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tif Length(args) == 0 {\n\t\terr = ProcessError(\"-> requires at least an initial value.\", env)\n\t\treturn\n\t}\n\n\tvar value *Data\n\n\tvalue, err = Eval(Car(args), env)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor cell := Cdr(args); NotNilP(cell); cell = Cdr(cell) {\n\t\tsexpr := Car(cell)\n\t\tvar newExpr *Data\n\t\tif ListP(sexpr) {\n\t\t\tnewExpr = Cons(Car(sexpr), Cons(value, Cdr(sexpr)))\n\t\t} else {\n\t\t\tnewExpr = Cons(sexpr, Cons(value, nil))\n\t\t}\n\t\tvalue, err = Eval(newExpr, env)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tresult = value\n\treturn\n}\n\nfunc TapImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tif Length(args) == 0 {\n\t\terr = ProcessError(\"tap requires at least an initial value.\", env)\n\t\treturn\n\t}\n\n\tvar value *Data\n\n\tvalue, err = Eval(Car(args), env)\n\tif err != nil {\n\t\treturn\n\t}\n\tresult = value\n\n\tfor cell := Cdr(args); NotNilP(cell); cell = Cdr(cell) {\n\t\tsexpr := Car(cell)\n\t\tvar newExpr *Data\n\t\tif ListP(sexpr) {\n\t\t\tnewExpr = Cons(Car(sexpr), Cons(value, Cdr(sexpr)))\n\t\t} else {\n\t\t\tnewExpr = Cons(sexpr, Cons(value, nil))\n\t\t}\n\t\t_, err = Eval(newExpr, env)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc DefinitionOfImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tvar name *Data = nil\n\tif SymbolP(Car(args)) {\n\t\tname = Car(args)\n\t} else {\n\t\tname = SymbolWithName(\"anonymous\")\n\t}\n\n\tf, err := Eval(Car(args), env)\n\tif err != nil {\n\t\treturn\n\t}\n\tif !FunctionP(f) {\n\t\terr = ProcessError(fmt.Sprintf(\"code requires a function argument, but received a %s.\", TypeName(TypeOf(f))), env)\n\t\treturn\n\t}\n\n\tfunction := FunctionValue(f)\n\tif function.Name == \"anonymous\" {\n\t\treturn Cons(SymbolWithName(\"define\"), Cons(name, Cons(Cons(SymbolWithName(\"lambda\"), Cons(function.Params, function.Body)), nil))), nil\n\t} else {\n\t\treturn Cons(SymbolWithName(\"define\"), Cons(Cons(SymbolWithName(function.Name), function.Params), function.Body)), nil\n\t}\n}\n<commit_msg>Added a version of eval that runs in the global environment.<commit_after>\/\/ Copyright 2014 SteelSeries ApS. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This package implements a basic LISP interpretor for embedding in a go program for scripting.\n\/\/ This file contains the built-in primitive functions.\n\npackage golisp\n\nimport (\n\t\"fmt\"\n)\n\nfunc RegisterSpecialFormPrimitives() {\n\tMakePrimitiveFunction(\"cond\", -1, CondImpl)\n\tMakePrimitiveFunction(\"case\", -1, CaseImpl)\n\tMakePrimitiveFunction(\"if\", -1, IfImpl)\n\tMakePrimitiveFunction(\"lambda\", -1, LambdaImpl)\n\tMakePrimitiveFunction(\"define\", -1, DefineImpl)\n\tMakePrimitiveFunction(\"defmacro\", -1, DefmacroImpl)\n\tMakePrimitiveFunction(\"let\", -1, LetImpl)\n\tMakePrimitiveFunction(\"begin\", -1, BeginImpl)\n\tMakePrimitiveFunction(\"do\", -1, DoImpl)\n\tMakePrimitiveFunction(\"apply\", -1, ApplyImpl)\n\tMakePrimitiveFunction(\"eval\", 1, EvalImpl)\n\tMakePrimitiveFunction(\"global-eval\", 1, GlobalEvalImpl)\n\tMakePrimitiveFunction(\"->\", -1, ChainImpl)\n\tMakePrimitiveFunction(\"=>\", -1, TapImpl)\n\tMakePrimitiveFunction(\"definition-of\", 1, DefinitionOfImpl)\n}\n\nfunc CondImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tvar condition *Data\n\tfor c := args; NotNilP(c); c = Cdr(c) {\n\t\tclause := Car(c)\n\t\tif !PairP(clause) {\n\t\t\terr = ProcessError(\"Cond expect a sequence of clauses that are lists\", env)\n\t\t\treturn\n\t\t}\n\t\tcondition, err = Eval(Car(clause), env)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tif BooleanValue(condition) || StringValue(Car(clause)) == \"else\" {\n\t\t\tfor e := Cdr(clause); NotNilP(e); e = Cdr(e) {\n\t\t\t\tresult, err = Eval(Car(e), env)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc evalList(l *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tfor sexpr := l; NotNilP(sexpr); sexpr = Cdr(sexpr) {\n\t\tresult, err = Eval(Car(sexpr), env)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc CaseImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tvar keyValue *Data\n\n\tkeyValue, err = Eval(Car(args), env)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor clauseCell := Cdr(args); NotNilP(clauseCell); clauseCell = Cdr(clauseCell) {\n\t\tclause := Car(clauseCell)\n\t\tif !PairP(clause) {\n\t\t\terr = ProcessError(\"Case requires non-atomic clauses\", env)\n\t\t\treturn\n\t\t}\n\t\tif ListP(Car(clause)) {\n\t\t\tfor v := Car(clause); NotNilP(v); v = Cdr(v) {\n\t\t\t\tif IsEqual(Car(v), keyValue) {\n\t\t\t\t\treturn evalList(Cdr(clause), env)\n\t\t\t\t}\n\t\t\t}\n\t\t} else if IsEqual(Car(clause), SymbolWithName(\"else\")) {\n\t\t\treturn evalList(Cdr(clause), env)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc IfImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tif Length(args) < 2 || Length(args) > 3 {\n\t\terr = ProcessError(fmt.Sprintf(\"IF requires 2 or 3 arguments. Received %d.\", Length(args)), env)\n\t\treturn\n\t}\n\n\tc, err := Eval(Car(args), env)\n\tif err != nil {\n\t\treturn\n\t}\n\tcondition := BooleanValue(c)\n\tthenClause := Second(args)\n\telseClause := Third(args)\n\n\tif condition {\n\t\treturn Eval(thenClause, env)\n\t} else {\n\t\treturn Eval(elseClause, env)\n\t}\n}\n\nfunc LambdaImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tparams := Car(args)\n\tbody := Cdr(args)\n\treturn FunctionWithNameParamsBodyAndParent(\"anonymous\", params, body, env), nil\n}\n\nfunc DefineImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tvar value *Data\n\tthing := Car(args)\n\tif SymbolP(thing) {\n\t\tvalue, err = Eval(Cadr(args), env)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t} else if PairP(thing) {\n\t\tname := Car(thing)\n\t\tparams := Cdr(thing)\n\t\tthing = name\n\t\tif !SymbolP(name) {\n\t\t\terr = ProcessError(\"Function name has to be a symbol\", env)\n\t\t\treturn\n\t\t}\n\t\tbody := Cdr(args)\n\t\tvalue = FunctionWithNameParamsBodyAndParent(StringValue(name), params, body, env)\n\t} else {\n\t\terr = ProcessError(\"Invalid definition\", env)\n\t\treturn\n\t}\n\tenv.BindLocallyTo(thing, value)\n\treturn value, nil\n}\n\nfunc DefmacroImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tvar value *Data\n\tthing := Car(args)\n\tif PairP(thing) {\n\t\tname := Car(thing)\n\t\tparams := Cdr(thing)\n\t\tthing = name\n\t\tif !SymbolP(name) {\n\t\t\terr = ProcessError(\"Macro name has to be a symbol\", env)\n\t\t\treturn\n\t\t}\n\t\tbody := Cadr(args)\n\t\tvalue = MacroWithNameParamsBodyAndParent(StringValue(name), params, body, env)\n\t} else {\n\t\terr = ProcessError(\"Invalid macro definition\", env)\n\t\treturn\n\t}\n\tenv.BindLocallyTo(thing, value)\n\treturn value, nil\n}\n\nfunc bindLetLocals(bindingForms *Data, env *SymbolTableFrame) (err error) {\n\tvar name *Data\n\tvar value *Data\n\n\tfor cell := bindingForms; NotNilP(cell); cell = Cdr(cell) {\n\t\tbindingPair := Car(cell)\n\t\tif !PairP(bindingPair) {\n\t\t\terr = ProcessError(\"Let requires a list of bindings (with are pairs) as it's first argument\", env)\n\t\t\treturn\n\t\t}\n\t\tname = Car(bindingPair)\n\t\tif !SymbolP(name) {\n\t\t\terr = ProcessError(\"First part of a let binding pair must be a symbol\", env)\n\t\t}\n\t\tvalue, err = Eval(Cadr(bindingPair), env)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tenv.BindLocallyTo(name, value)\n\t}\n\treturn\n}\n\nfunc LetImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tif Length(args) < 1 {\n\t\terr = ProcessError(\"Let requires at least a list of bindings\", env)\n\t\treturn\n\t}\n\n\tif !PairP(Car(args)) {\n\t\terr = ProcessError(\"Let requires a list of bindings as it's first argument\", env)\n\t\treturn\n\t}\n\n\tlocalEnv := NewSymbolTableFrameBelow(env)\n\tlocalEnv.Previous = env\n\tbindLetLocals(Car(args), localEnv)\n\n\tfor cell := Cdr(args); NotNilP(cell); cell = Cdr(cell) {\n\t\tsexpr := Car(cell)\n\t\tresult, err = Eval(sexpr, localEnv)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc BeginImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tfor cell := args; NotNilP(cell); cell = Cdr(cell) {\n\t\tsexpr := Car(cell)\n\t\tresult, err = Eval(sexpr, env)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc rebindDoLocals(bindingForms *Data, env *SymbolTableFrame) (err error) {\n\tvar name *Data\n\tvar value *Data\n\n\tfor cell := bindingForms; NotNilP(cell); cell = Cdr(cell) {\n\t\tbindingTuple := Car(cell)\n\t\tname = First(bindingTuple)\n\t\tif NotNilP(Third(bindingTuple)) {\n\t\t\tvalue, err = Eval(Third(bindingTuple), env)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tenv.BindLocallyTo(name, value)\n\t\t}\n\t}\n\treturn\n}\n\nfunc DoImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tif Length(args) < 2 {\n\t\terr = ProcessError(\"Do requires at least a list of bindings and a test clause\", env)\n\t\treturn\n\t}\n\n\tbindings := Car(args)\n\tif !PairP(bindings) {\n\t\terr = ProcessError(\"Do requires a list of bindings as it's first argument\", env)\n\t\treturn\n\t}\n\n\ttestClause := Cadr(args)\n\tif !PairP(testClause) {\n\t\terr = ProcessError(\"Do requires a list as it's second argument\", env)\n\t\treturn\n\t}\n\n\tlocalEnv := NewSymbolTableFrameBelow(env)\n\tlocalEnv.Previous = env\n\tbindLetLocals(bindings, localEnv)\n\n\tbody := Cddr(args)\n\n\tvar shouldExit *Data\n\n\tfor true {\n\t\tshouldExit, err = Eval(Car(testClause), localEnv)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif BooleanValue(shouldExit) {\n\t\t\tfor cell := Cdr(testClause); NotNilP(cell); cell = Cdr(cell) {\n\t\t\t\tsexpr := Car(cell)\n\t\t\t\tresult, err = Eval(sexpr, localEnv)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tfor cell := body; NotNilP(cell); cell = Cdr(cell) {\n\t\t\tsexpr := Car(cell)\n\t\t\tresult, err = Eval(sexpr, localEnv)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\trebindDoLocals(bindings, localEnv)\n\t}\n\treturn\n}\n\nfunc ApplyImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tif Length(args) < 1 {\n\t\terr = ProcessError(\"apply requires at least one argument\", env)\n\t\treturn\n\t}\n\n\tf, err := Eval(Car(args), env)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif !FunctionP(f) {\n\t\terr = ProcessError(fmt.Sprintf(\"apply requires a function as it's first argument, but got %s.\", String(f)), env)\n\t}\n\n\tary := make([]*Data, 0, Length(args)-1)\n\n\tvar v *Data\n\tfor c := Cdr(args); NotNilP(c); c = Cdr(c) {\n\t\tv, err = Eval(Car(c), env)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tary = append(ary, v)\n\t}\n\n\tvar argList *Data\n\tif ListP(ary[len(ary)-1]) {\n\t\tif len(ary) > 1 {\n\t\t\targList = ArrayToListWithTail(ary[0:len(ary)-1], ary[len(ary)-1])\n\t\t} else {\n\t\t\targList = ary[0]\n\t\t}\n\t} else {\n\t\terr = ProcessError(\"The last argument to apply must be a list\", env)\n\t\treturn\n\t}\n\n\treturn Apply(f, argList, env)\n}\n\nfunc EvalImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tsexpr, err := Eval(Car(args), env)\n\tif err != nil {\n\t\treturn\n\t}\n\tif !ListP(sexpr) {\n\t\terr = ProcessError(fmt.Sprintf(\"eval expect a list argument, received a %s.\", TypeName(TypeOf(sexpr))), env)\n\t}\n\treturn Eval(sexpr, env)\n}\n\nfunc GlobalEvalImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tsexpr, err := Eval(Car(args), env)\n\tif err != nil {\n\t\treturn\n\t}\n\tif !ListP(sexpr) {\n\t\terr = ProcessError(fmt.Sprintf(\"eval expect a list argument, received a %s.\", TypeName(TypeOf(sexpr))), env)\n\t}\n\treturn Eval(sexpr, Global)\n}\n\nfunc ChainImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tif Length(args) == 0 {\n\t\terr = ProcessError(\"-> requires at least an initial value.\", env)\n\t\treturn\n\t}\n\n\tvar value *Data\n\n\tvalue, err = Eval(Car(args), env)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor cell := Cdr(args); NotNilP(cell); cell = Cdr(cell) {\n\t\tsexpr := Car(cell)\n\t\tvar newExpr *Data\n\t\tif ListP(sexpr) {\n\t\t\tnewExpr = Cons(Car(sexpr), Cons(value, Cdr(sexpr)))\n\t\t} else {\n\t\t\tnewExpr = Cons(sexpr, Cons(value, nil))\n\t\t}\n\t\tvalue, err = Eval(newExpr, env)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\tresult = value\n\treturn\n}\n\nfunc TapImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tif Length(args) == 0 {\n\t\terr = ProcessError(\"tap requires at least an initial value.\", env)\n\t\treturn\n\t}\n\n\tvar value *Data\n\n\tvalue, err = Eval(Car(args), env)\n\tif err != nil {\n\t\treturn\n\t}\n\tresult = value\n\n\tfor cell := Cdr(args); NotNilP(cell); cell = Cdr(cell) {\n\t\tsexpr := Car(cell)\n\t\tvar newExpr *Data\n\t\tif ListP(sexpr) {\n\t\t\tnewExpr = Cons(Car(sexpr), Cons(value, Cdr(sexpr)))\n\t\t} else {\n\t\t\tnewExpr = Cons(sexpr, Cons(value, nil))\n\t\t}\n\t\t_, err = Eval(newExpr, env)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc DefinitionOfImpl(args *Data, env *SymbolTableFrame) (result *Data, err error) {\n\tvar name *Data = nil\n\tif SymbolP(Car(args)) {\n\t\tname = Car(args)\n\t} else {\n\t\tname = SymbolWithName(\"anonymous\")\n\t}\n\n\tf, err := Eval(Car(args), env)\n\tif err != nil {\n\t\treturn\n\t}\n\tif !FunctionP(f) {\n\t\terr = ProcessError(fmt.Sprintf(\"code requires a function argument, but received a %s.\", TypeName(TypeOf(f))), env)\n\t\treturn\n\t}\n\n\tfunction := FunctionValue(f)\n\tif function.Name == \"anonymous\" {\n\t\treturn Cons(SymbolWithName(\"define\"), Cons(name, Cons(Cons(SymbolWithName(\"lambda\"), Cons(function.Params, function.Body)), nil))), nil\n\t} else {\n\t\treturn Cons(SymbolWithName(\"define\"), Cons(Cons(SymbolWithName(function.Name), function.Params), function.Body)), nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017, Joseph deBlaquiere <jadeblaquiere@yahoo.com>\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions are met:\n\/\/\n\/\/ * Redistributions of source code must retain the above copyright notice, this\n\/\/ list of conditions and the following disclaimer.\n\/\/\n\/\/ * Redistributions in binary form must reproduce the above copyright notice,\n\/\/ this list of conditions and the following disclaimer in the documentation\n\/\/ and\/or other materials provided with the distribution.\n\/\/\n\/\/ * Neither the name of ciphrtxt nor the names of its\n\/\/ contributors may be used to endorse or promote products derived from\n\/\/ this software without specific prior written permission.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n\/\/ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n\/\/ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n\/\/ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n\/\/ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n\/\/ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n\/\/ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n\/\/ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n\/\/ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\/\/ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage ciphrtxt\n\nimport (\n\t\/\/ \"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\tcwebsocket \"github.com\/jadeblaquiere\/websocket-client\"\n)\n\nconst (\n\tDefaultWatchdogTimeout = 150 * time.Second\n\tDefaultTimeTickle = 30 * time.Second\n\tDefaultStatusTickle = 300 * time.Second\n\tDefaultPeersTickle = 300 * time.Second\n)\n\ntype WSDisconnectFunc func()\n\ntype WSProtocolHandler interface {\n\tTxHeader(rmh MessageHeader)\n\tOnDisconnect(f WSDisconnectFunc)\n\tDisconnect()\n\tStatus() *StatusResponse\n}\n\nfunc NewWSProtocolHandler(con cwebsocket.ClientConnection, local *LocalHeaderCache, remote *HeaderCache) WSProtocolHandler {\n\twsh := wsHandler{\n\t\tcon: con,\n\t\tlocal: local,\n\t\tremote: remote,\n\t}\n\tif remote == nil {\n\t\twsh.inbound = true\n\t}\n\twsh.setup()\n\twsHandlerListMutex.Lock()\n\tdefer wsHandlerListMutex.Unlock()\n\twsHandlerList = append(wsHandlerList, &wsh)\n\treturn &wsh\n}\n\ntype wsHandler struct {\n\tcon cwebsocket.ClientConnection\n\tlocal *LocalHeaderCache\n\tremote *HeaderCache\n\ttmpStatus StatusResponse\n\tdisconnect WSDisconnectFunc\n\twatchdog *time.Timer\n\ttimeTickle *time.Timer\n\tstatusTickle *time.Timer\n\tpeersTickle *time.Timer\n\tabort chan bool\n\tinbound bool\n}\n\nvar wsHandlerList []*wsHandler\nvar wsHandlerListMutex sync.Mutex\n\nfunc (wsh *wsHandler) resetTimeTickle() {\n\tif !wsh.timeTickle.Stop() {\n\t\t<-wsh.timeTickle.C\n\t}\n\twsh.timeTickle.Reset(DefaultTimeTickle)\n\twsh.resetWatchdog()\n}\n\nfunc (wsh *wsHandler) resetStatusTickle() {\n\tif !wsh.statusTickle.Stop() {\n\t\t<-wsh.statusTickle.C\n\t}\n\twsh.statusTickle.Reset(DefaultStatusTickle)\n\twsh.resetWatchdog()\n}\n\nfunc (wsh *wsHandler) resetWatchdog() {\n\tif !wsh.watchdog.Stop() {\n\t\t<-wsh.watchdog.C\n\t}\n\twsh.watchdog.Reset(DefaultWatchdogTimeout)\n}\n\nfunc (wsh *wsHandler) txTime(t int) {\n\twsh.resetTimeTickle()\n\twsh.log(\"tx->TIME to\")\n\t\/\/ if wsh.remote != nil {\n\t\/\/ fmt.Printf(\"tx->TIME to %s:%d\\n\", wsh.remote.host, wsh.remote.port)\n\t\/\/ } else {\n\t\/\/ fmt.Printf(\"tx->TIME to Pending Peer\\n\")\n\t\/\/ }\n\twsh.con.Emit(\"response-time\", int(time.Now().Unix()))\n}\n\nfunc (wsh *wsHandler) rxTime(t int) {\n\twsh.resetWatchdog()\n\twsh.log(\"rx<-TIME from\")\n\tif wsh.remote != nil {\n\t\t\/\/ fmt.Printf(\"rx<-TIME from %s:%d\\n\", wsh.remote.host, wsh.remote.port)\n\t\twsh.remote.serverTime = uint32(t)\n\t}\n}\n\nfunc (wsh *wsHandler) txStatus(t int) {\n\twsh.resetWatchdog()\n\tj, err := json.Marshal(wsh.local.Status())\n\tif err == nil {\n\t\twsh.log(\"tx->STATUS to\")\n\t\t\/\/ if wsh.remote != nil {\n\t\t\/\/ fmt.Printf(\"tx->STATUS to %s:%d\\n\", wsh.remote.host, wsh.remote.port)\n\t\t\/\/ } else {\n\t\t\/\/ fmt.Printf(\"tx->STATUS to Pending Peer\\n\")\n\t\t\/\/ }\n\t\twsh.con.Emit(\"response-status\", j)\n\t}\n}\n\nfunc (wsh *wsHandler) rxStatus(m []byte) {\n\tvar status StatusResponse\n\terr := json.Unmarshal(m, &status)\n\tif err == nil {\n\t\twsh.resetStatusTickle()\n\t\twsh.log(\"rx<-STATUS from\")\n\t\tif wsh.remote != nil {\n\t\t\t\/\/ fmt.Printf(\"rx<-STATUS from %s:%d\\n\", wsh.remote.host, wsh.remote.port)\n\t\t\twsh.remote.status = status\n\t\t} else {\n\t\t\t\/\/ fmt.Printf(\"rx<-STATUS from Pending Peer %s:%d\\n\", status.Network.Host, status.Network.MSGPort)\n\t\t\twsh.tmpStatus = status\n\t\t}\n\t}\n}\n\nfunc (wsh *wsHandler) txPeers(t int) {\n\twsh.resetWatchdog()\n\tpeers := wsh.local.ListPeers()\n\tfor _, peer := range peers {\n\t\tj, err := json.Marshal(peer)\n\t\tif err == nil {\n\t\t\twsh.log(fmt.Sprintf(\"tx->PEER (%s:%d) to \", peer.Host, peer.Port))\n\t\t\t\/\/ if wsh.remote != nil {\n\t\t\t\/\/ fmt.Printf(\"tx->PEER %s:%d to %s:%d\\n\", peer.Host, peer.Port, wsh.remote.host, wsh.remote.port)\n\t\t\t\/\/ } else {\n\t\t\t\/\/ fmt.Printf(\"tx->PEER %s:%d to Pending Peer\\n\", peer.Host, peer.Port)\n\t\t\t\/\/ }\n\t\t\twsh.con.Emit(\"response-peer\", j)\n\t\t}\n\t}\n}\n\nfunc (wsh *wsHandler) rxPeer(m []byte) {\n\twsh.resetWatchdog()\n\tvar peer PeerItemResponse\n\terr := json.Unmarshal(m, &peer)\n\tif err == nil {\n\t\twsh.log(fmt.Sprintf(\"rx<-PEER (%s:%d) from\", peer.Host, peer.Port))\n\t\t\/\/ if wsh.remote != nil {\n\t\t\/\/ fmt.Printf(\"rx<-PEER %s:%d from %s:%d\\n\", peer.Host, peer.Port, wsh.remote.host, wsh.remote.port)\n\t\t\/\/ } else {\n\t\t\/\/ fmt.Printf(\"rx<-PEER %s:%d from Pending Peer\\n\", peer.Host, peer.Port)\n\t\t\/\/}\n\t\twsh.local.AddPeer(peer.Host, peer.Port)\n\t}\n}\n\nfunc (wsh *wsHandler) TxHeader(rmh MessageHeader) {\n\t\/\/fmt.Printf(\"tx->HEADER to %s:%d\\n\", wsh.remote.host, wsh.remote.port)\n\twsh.log(\"tx->HEADER to\")\n\twsh.con.Emit(\"response-header\", rmh.Serialize())\n}\n\nfunc (wsh *wsHandler) rxHeader(s string) {\n\trmh := &RawMessageHeader{}\n\terr := rmh.Deserialize(s)\n\tif err == nil {\n\t\twsh.resetWatchdog()\n\t\twsh.log(\"rx<-HEADER from\")\n\t\tif wsh.remote != nil {\n\t\t\t\/\/ fmt.Printf(\"rx<-HEADER from %s:%d\\n\", wsh.remote.host, wsh.remote.port)\n\t\t\tinsert, err := wsh.remote.Insert(rmh)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif insert {\n\t\t\t\t_, _ = wsh.local.Insert(rmh)\n\t\t\t}\n\t\t\t\/\/ } else {\n\t\t\t\/\/ fmt.Printf(\"rx<-HEADER from Pending Peer\\n\")\n\t\t}\n\t} else {\n\t\tfmt.Printf(\"rx<-HEADER, error deserializing %s (len %d)\\n\", s, len(s))\n\t}\n}\n\nfunc (wsh *wsHandler) log(logmsg string) {\n\tif wsh.remote != nil {\n\t\tfmt.Printf(\"%s %s:%d\\n\", logmsg, wsh.remote.host, wsh.remote.port)\n\t} else {\n\t\tfmt.Printf(\"%s Pending (%s:%d)\\n\", logmsg, wsh.tmpStatus.Network.Host, wsh.tmpStatus.Network.MSGPort)\n\t}\n}\n\nfunc (wsh *wsHandler) OnDisconnect(f WSDisconnectFunc) {\n\twsh.disconnect = f\n}\n\nfunc (wsh *wsHandler) Status() *StatusResponse {\n\tif wsh.remote != nil {\n\t\treturn &wsh.remote.status\n\t} else {\n\t\treturn &wsh.tmpStatus\n\t}\n}\n\nfunc (wsh *wsHandler) setup() {\n\twsh.watchdog = time.NewTimer(DefaultWatchdogTimeout)\n\twsh.timeTickle = time.NewTimer(DefaultTimeTickle)\n\twsh.statusTickle = time.NewTimer(DefaultStatusTickle)\n\twsh.peersTickle = time.NewTimer(DefaultPeersTickle)\n\twsh.abort = make(chan bool)\n\twsh.con.On(\"request-time\", wsh.txTime)\n\twsh.con.On(\"response-time\", wsh.rxTime)\n\twsh.con.On(\"request-status\", wsh.txStatus)\n\twsh.con.On(\"response-status\", wsh.rxStatus)\n\twsh.con.On(\"response-header\", wsh.rxHeader)\n\twsh.con.On(\"request-peers\", wsh.txPeers)\n\twsh.con.On(\"response-peer\", wsh.rxPeer)\n\twsh.con.OnDisconnect(func() {\n\t\twsh.Disconnect()\n\t})\n\n\tgo wsh.eventLoop()\n\tgo wsh.txPeers(0)\n}\n\nfunc (wsh *wsHandler) Disconnect() {\n\tif wsh.disconnect != nil {\n\t\twsh.disconnect()\n\n\t}\n\tif !wsh.timeTickle.Stop() {\n\t\t<-wsh.timeTickle.C\n\t}\n\tif !wsh.statusTickle.Stop() {\n\t\t<-wsh.statusTickle.C\n\t}\n\tif !wsh.watchdog.Stop() {\n\t\t<-wsh.watchdog.C\n\t}\n\twsh.abort <- true\n\twsh.con.Disconnect()\n\twsHandlerListMutex.Lock()\n\tdefer wsHandlerListMutex.Unlock()\n\tfor i, w := range wsHandlerList {\n\t\tif w == wsh {\n\t\t\twsHandlerList[i] = wsHandlerList[len(wsHandlerList)-1]\n\t\t\twsHandlerList[len(wsHandlerList)-1] = nil\n\t\t\twsHandlerList = wsHandlerList[:len(wsHandlerList)-1]\n\t\t\treturn\n\t\t}\n\t}\n\tpanic(\"wsHandler.Disconnect: trying to remove element not in list\")\n}\n\nfunc (wsh *wsHandler) eventLoop() {\n\tfor {\n\t\tselect {\n\t\tcase <-wsh.watchdog.C:\n\t\t\tfmt.Println(\"Watchdog expired, closing connection\")\n\t\t\twsh.Disconnect()\n\t\t\treturn\n\t\tcase <-wsh.timeTickle.C:\n\t\t\tif wsh.remote != nil {\n\t\t\t\tfmt.Printf(\"tx->TIME REQUEST to %s:%d\\n\", wsh.remote.host, wsh.remote.port)\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"tx->TIME REQUEST to Pending Peer\\n\")\n\t\t\t}\n\t\t\twsh.con.Emit(\"request-time\", int(0))\n\t\t\twsh.timeTickle.Reset(DefaultTimeTickle)\n\t\t\tcontinue\n\t\tcase <-wsh.statusTickle.C:\n\t\t\tif wsh.remote != nil {\n\t\t\t\tfmt.Printf(\"tx->STATUS REQUEST to %s:%d\\n\", wsh.remote.host, wsh.remote.port)\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"tx->STATUS REQUEST to Pending Peer\\n\")\n\t\t\t}\n\t\t\twsh.con.Emit(\"request-status\", int(0))\n\t\t\twsh.statusTickle.Reset(DefaultStatusTickle)\n\t\t\tcontinue\n\t\tcase <-wsh.peersTickle.C:\n\t\t\tif wsh.remote != nil {\n\t\t\t\tfmt.Printf(\"tx->PEERS REQUEST to %s:%d\\n\", wsh.remote.host, wsh.remote.port)\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"tx->PEERS REQUEST to Pending Peer\\n\")\n\t\t\t}\n\t\t\twsh.con.Emit(\"request-peers\", int(0))\n\t\t\twsh.statusTickle.Reset(DefaultStatusTickle)\n\t\t\tcontinue\n\t\tcase done := <-wsh.abort:\n\t\t\tif done {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>detect no-connect state<commit_after>\/\/ Copyright (c) 2017, Joseph deBlaquiere <jadeblaquiere@yahoo.com>\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions are met:\n\/\/\n\/\/ * Redistributions of source code must retain the above copyright notice, this\n\/\/ list of conditions and the following disclaimer.\n\/\/\n\/\/ * Redistributions in binary form must reproduce the above copyright notice,\n\/\/ this list of conditions and the following disclaimer in the documentation\n\/\/ and\/or other materials provided with the distribution.\n\/\/\n\/\/ * Neither the name of ciphrtxt nor the names of its\n\/\/ contributors may be used to endorse or promote products derived from\n\/\/ this software without specific prior written permission.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n\/\/ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n\/\/ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n\/\/ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n\/\/ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n\/\/ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n\/\/ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n\/\/ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n\/\/ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\/\/ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage ciphrtxt\n\nimport (\n\t\/\/ \"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\tcwebsocket \"github.com\/jadeblaquiere\/websocket-client\"\n)\n\nconst (\n\tDefaultWatchdogTimeout = 150 * time.Second\n\tDefaultTimeTickle = 30 * time.Second\n\tDefaultStatusTickle = 300 * time.Second\n\tDefaultPeersTickle = 300 * time.Second\n)\n\ntype WSDisconnectFunc func()\n\ntype WSProtocolHandler interface {\n\tTxHeader(rmh MessageHeader)\n\tOnDisconnect(f WSDisconnectFunc)\n\tDisconnect()\n\tStatus() *StatusResponse\n}\n\nfunc NewWSProtocolHandler(con cwebsocket.ClientConnection, local *LocalHeaderCache, remote *HeaderCache) WSProtocolHandler {\n\twsh := wsHandler{\n\t\tcon: con,\n\t\tlocal: local,\n\t\tremote: remote,\n\t}\n\tif remote == nil {\n\t\twsh.inbound = true\n\t}\n\twsh.setup()\n\twsHandlerListMutex.Lock()\n\tdefer wsHandlerListMutex.Unlock()\n\twsHandlerList = append(wsHandlerList, &wsh)\n\treturn &wsh\n}\n\ntype wsHandler struct {\n\tcon cwebsocket.ClientConnection\n\tlocal *LocalHeaderCache\n\tremote *HeaderCache\n\ttmpStatus *StatusResponse\n\tdisconnect WSDisconnectFunc\n\twatchdog *time.Timer\n\ttimeTickle *time.Timer\n\tstatusTickle *time.Timer\n\tpeersTickle *time.Timer\n\tabort chan bool\n\tinbound bool\n}\n\nvar wsHandlerList []*wsHandler\nvar wsHandlerListMutex sync.Mutex\n\nfunc (wsh *wsHandler) resetTimeTickle() {\n\tif !wsh.timeTickle.Stop() {\n\t\t<-wsh.timeTickle.C\n\t}\n\twsh.timeTickle.Reset(DefaultTimeTickle)\n\twsh.resetWatchdog()\n}\n\nfunc (wsh *wsHandler) resetStatusTickle() {\n\tif !wsh.statusTickle.Stop() {\n\t\t<-wsh.statusTickle.C\n\t}\n\twsh.statusTickle.Reset(DefaultStatusTickle)\n\twsh.resetWatchdog()\n}\n\nfunc (wsh *wsHandler) resetWatchdog() {\n\tif !wsh.watchdog.Stop() {\n\t\t<-wsh.watchdog.C\n\t}\n\twsh.watchdog.Reset(DefaultWatchdogTimeout)\n}\n\nfunc (wsh *wsHandler) txTime(t int) {\n\twsh.resetTimeTickle()\n\twsh.log(\"tx->TIME to\")\n\t\/\/ if wsh.remote != nil {\n\t\/\/ fmt.Printf(\"tx->TIME to %s:%d\\n\", wsh.remote.host, wsh.remote.port)\n\t\/\/ } else {\n\t\/\/ fmt.Printf(\"tx->TIME to Pending Peer\\n\")\n\t\/\/ }\n\twsh.con.Emit(\"response-time\", int(time.Now().Unix()))\n}\n\nfunc (wsh *wsHandler) rxTime(t int) {\n\twsh.resetWatchdog()\n\twsh.log(\"rx<-TIME from\")\n\tif wsh.remote != nil {\n\t\t\/\/ fmt.Printf(\"rx<-TIME from %s:%d\\n\", wsh.remote.host, wsh.remote.port)\n\t\twsh.remote.serverTime = uint32(t)\n\t}\n}\n\nfunc (wsh *wsHandler) txStatus(t int) {\n\twsh.resetWatchdog()\n\tj, err := json.Marshal(wsh.local.Status())\n\tif err == nil {\n\t\twsh.log(\"tx->STATUS to\")\n\t\t\/\/ if wsh.remote != nil {\n\t\t\/\/ fmt.Printf(\"tx->STATUS to %s:%d\\n\", wsh.remote.host, wsh.remote.port)\n\t\t\/\/ } else {\n\t\t\/\/ fmt.Printf(\"tx->STATUS to Pending Peer\\n\")\n\t\t\/\/ }\n\t\twsh.con.Emit(\"response-status\", j)\n\t} else {\n\t\tfmt.Printf(\"CLIENT: failed to marshal status response\")\n\t}\n}\n\nfunc (wsh *wsHandler) rxStatus(m []byte) {\n\tvar status StatusResponse\n\terr := json.Unmarshal(m, &status)\n\tif err == nil {\n\t\twsh.resetStatusTickle()\n\t\twsh.log(\"rx<-STATUS from\")\n\t\tif wsh.remote != nil {\n\t\t\t\/\/ fmt.Printf(\"rx<-STATUS from %s:%d\\n\", wsh.remote.host, wsh.remote.port)\n\t\t\twsh.remote.status = status\n\t\t} else {\n\t\t\t\/\/ fmt.Printf(\"rx<-STATUS from Pending Peer %s:%d\\n\", status.Network.Host, status.Network.MSGPort)\n\t\t\twsh.tmpStatus = &status\n\t\t}\n\t}\n}\n\nfunc (wsh *wsHandler) txPeers(t int) {\n\twsh.resetWatchdog()\n\tpeers := wsh.local.ListPeers()\n\tfor _, peer := range peers {\n\t\tj, err := json.Marshal(peer)\n\t\tif err == nil {\n\t\t\twsh.log(fmt.Sprintf(\"tx->PEER (%s:%d) to\", peer.Host, peer.Port))\n\t\t\t\/\/ if wsh.remote != nil {\n\t\t\t\/\/ fmt.Printf(\"tx->PEER %s:%d to %s:%d\\n\", peer.Host, peer.Port, wsh.remote.host, wsh.remote.port)\n\t\t\t\/\/ } else {\n\t\t\t\/\/ fmt.Printf(\"tx->PEER %s:%d to Pending Peer\\n\", peer.Host, peer.Port)\n\t\t\t\/\/ }\n\t\t\twsh.con.Emit(\"response-peer\", j)\n\t\t}\n\t}\n}\n\nfunc (wsh *wsHandler) rxPeer(m []byte) {\n\twsh.resetWatchdog()\n\tvar peer PeerItemResponse\n\terr := json.Unmarshal(m, &peer)\n\tif err == nil {\n\t\twsh.log(fmt.Sprintf(\"rx<-PEER (%s:%d) from\", peer.Host, peer.Port))\n\t\t\/\/ if wsh.remote != nil {\n\t\t\/\/ fmt.Printf(\"rx<-PEER %s:%d from %s:%d\\n\", peer.Host, peer.Port, wsh.remote.host, wsh.remote.port)\n\t\t\/\/ } else {\n\t\t\/\/ fmt.Printf(\"rx<-PEER %s:%d from Pending Peer\\n\", peer.Host, peer.Port)\n\t\t\/\/}\n\t\twsh.local.AddPeer(peer.Host, peer.Port)\n\t}\n}\n\nfunc (wsh *wsHandler) TxHeader(rmh MessageHeader) {\n\t\/\/fmt.Printf(\"tx->HEADER to %s:%d\\n\", wsh.remote.host, wsh.remote.port)\n\twsh.log(\"tx->HEADER to\")\n\twsh.con.Emit(\"response-header\", rmh.Serialize())\n}\n\nfunc (wsh *wsHandler) rxHeader(s string) {\n\trmh := &RawMessageHeader{}\n\terr := rmh.Deserialize(s)\n\tif err == nil {\n\t\twsh.resetWatchdog()\n\t\twsh.log(\"rx<-HEADER from\")\n\t\tif wsh.remote != nil {\n\t\t\t\/\/ fmt.Printf(\"rx<-HEADER from %s:%d\\n\", wsh.remote.host, wsh.remote.port)\n\t\t\tinsert, err := wsh.remote.Insert(rmh)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif insert {\n\t\t\t\t_, _ = wsh.local.Insert(rmh)\n\t\t\t}\n\t\t\t\/\/ } else {\n\t\t\t\/\/ fmt.Printf(\"rx<-HEADER from Pending Peer\\n\")\n\t\t}\n\t} else {\n\t\tfmt.Printf(\"rx<-HEADER, error deserializing %s (len %d)\\n\", s, len(s))\n\t}\n}\n\nfunc (wsh *wsHandler) log(logmsg string) {\n\tif wsh.remote != nil {\n\t\tfmt.Printf(\"%s %s:%d\\n\", logmsg, wsh.remote.host, wsh.remote.port)\n\t} else {\n\t\tif wsh.tmpStatus != nil {\n\t\t\tfmt.Printf(\"%s Pending (%s:%d)\\n\", logmsg, wsh.tmpStatus.Network.Host, wsh.tmpStatus.Network.MSGPort)\n\t\t} else {\n\t\t\tfmt.Printf(\"%s Pending (unknown)\\n\", logmsg)\n\t\t}\n\t}\n}\n\nfunc (wsh *wsHandler) OnDisconnect(f WSDisconnectFunc) {\n\twsh.disconnect = f\n}\n\nfunc (wsh *wsHandler) Status() *StatusResponse {\n\tif wsh.remote != nil {\n\t\treturn &wsh.remote.status\n\t} else {\n\t\treturn wsh.tmpStatus\n\t}\n}\n\nfunc (wsh *wsHandler) setup() {\n\twsh.watchdog = time.NewTimer(DefaultWatchdogTimeout)\n\twsh.timeTickle = time.NewTimer(DefaultTimeTickle)\n\twsh.statusTickle = time.NewTimer(DefaultStatusTickle)\n\twsh.peersTickle = time.NewTimer(DefaultPeersTickle)\n\twsh.abort = make(chan bool)\n\twsh.con.On(\"request-time\", wsh.txTime)\n\twsh.con.On(\"response-time\", wsh.rxTime)\n\twsh.con.On(\"request-status\", wsh.txStatus)\n\twsh.con.On(\"response-status\", wsh.rxStatus)\n\twsh.con.On(\"response-header\", wsh.rxHeader)\n\twsh.con.On(\"request-peers\", wsh.txPeers)\n\twsh.con.On(\"response-peer\", wsh.rxPeer)\n\twsh.con.OnDisconnect(func() {\n\t\twsh.Disconnect()\n\t})\n\n\tgo wsh.eventLoop()\n\tgo wsh.txPeers(0)\n}\n\nfunc (wsh *wsHandler) Disconnect() {\n\tif wsh.disconnect != nil {\n\t\twsh.disconnect()\n\n\t}\n\tif !wsh.timeTickle.Stop() {\n\t\t<-wsh.timeTickle.C\n\t}\n\tif !wsh.statusTickle.Stop() {\n\t\t<-wsh.statusTickle.C\n\t}\n\tif !wsh.watchdog.Stop() {\n\t\t<-wsh.watchdog.C\n\t}\n\twsh.abort <- true\n\t\/\/wsh.con.Disconnect()\n\twsHandlerListMutex.Lock()\n\tdefer wsHandlerListMutex.Unlock()\n\tfor i, w := range wsHandlerList {\n\t\tif w == wsh {\n\t\t\twsHandlerList[i] = wsHandlerList[len(wsHandlerList)-1]\n\t\t\twsHandlerList[len(wsHandlerList)-1] = nil\n\t\t\twsHandlerList = wsHandlerList[:len(wsHandlerList)-1]\n\t\t\treturn\n\t\t}\n\t}\n\tpanic(\"wsHandler.Disconnect: trying to remove element not in list\")\n}\n\nfunc (wsh *wsHandler) eventLoop() {\n\tfor {\n\t\tselect {\n\t\tcase <-wsh.watchdog.C:\n\t\t\tfmt.Println(\"Watchdog expired, closing connection\")\n\t\t\twsh.Disconnect()\n\t\t\treturn\n\t\tcase <-wsh.timeTickle.C:\n\t\t\twsh.log(\"tx->TIME REQUEST to\")\n\t\t\t\/\/ if wsh.remote != nil {\n\t\t\t\/\/ fmt.Printf(\"tx->TIME REQUEST to %s:%d\\n\", wsh.remote.host, wsh.remote.port)\n\t\t\t\/\/ } else {\n\t\t\t\/\/ fmt.Printf(\"tx->TIME REQUEST to Pending Peer\\n\")\n\t\t\t\/\/ }\n\t\t\twsh.con.Emit(\"request-time\", int(0))\n\t\t\twsh.timeTickle.Reset(DefaultTimeTickle)\n\t\t\tcontinue\n\t\tcase <-wsh.statusTickle.C:\n\t\t\twsh.log(\"tx->STATUS REQUEST to\")\n\t\t\t\/\/ if wsh.remote != nil {\n\t\t\t\/\/ fmt.Printf(\"tx->STATUS REQUEST to %s:%d\\n\", wsh.remote.host, wsh.remote.port)\n\t\t\t\/\/ } else {\n\t\t\t\/\/ fmt.Printf(\"tx->STATUS REQUEST to Pending Peer\\n\")\n\t\t\t\/\/ }\n\t\t\twsh.con.Emit(\"request-status\", int(0))\n\t\t\twsh.statusTickle.Reset(DefaultStatusTickle)\n\t\t\tcontinue\n\t\tcase <-wsh.peersTickle.C:\n\t\t\twsh.log(\"tx->PEERS REQUEST to\")\n\t\t\t\/\/ if wsh.remote != nil {\n\t\t\t\/\/ \tfmt.Printf(\"tx->PEERS REQUEST to %s:%d\\n\", wsh.remote.host, wsh.remote.port)\n\t\t\t\/\/ } else {\n\t\t\t\/\/ \tfmt.Printf(\"tx->PEERS REQUEST to Pending Peer\\n\")\n\t\t\t\/\/ }\n\t\t\twsh.con.Emit(\"request-peers\", int(0))\n\t\t\twsh.statusTickle.Reset(DefaultStatusTickle)\n\t\t\tcontinue\n\t\tcase done := <-wsh.abort:\n\t\t\tif done {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux\n\/\/ +build cgo\n\npackage idmap\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"unsafe\"\n\n\t\"golang.org\/x\/sys\/unix\"\n\n\t\/\/ Used by cgo\n\t_ \"github.com\/lxc\/lxd\/lxd\/include\"\n\n\t\"github.com\/lxc\/lxd\/shared\"\n)\n\n\/\/ #cgo LDFLAGS: -lacl\n\/*\n#ifndef _GNU_SOURCE\n#define _GNU_SOURCE 1\n#endif\n#include <byteswap.h>\n#include <errno.h>\n#include <fcntl.h>\n#include <limits.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys\/capability.h>\n#include <unistd.h>\n#include <sys\/stat.h>\n#include <sys\/types.h>\n#include <sys\/acl.h>\n\n\/\/ Needs to be included at the end\n#include <sys\/xattr.h>\n\n#include \"..\/..\/lxd\/include\/memory_utils.h\"\n\n#ifndef VFS_CAP_REVISION_1\n#define VFS_CAP_REVISION_1 0x01000000\n#endif\n\n#ifndef VFS_CAP_REVISION_2\n#define VFS_CAP_REVISION_2 0x02000000\n#endif\n\n#ifndef VFS_CAP_REVISION_3\n#define VFS_CAP_REVISION_3 0x03000000\nstruct vfs_ns_cap_data {\n\t__le32 magic_etc;\n\tstruct {\n\t\t__le32 permitted;\n\t\t__le32 inheritable;\n\t} data[VFS_CAP_U32];\n\t__le32 rootid;\n};\n#endif\n\n#if __BYTE_ORDER == __BIG_ENDIAN\n#define BE32_TO_LE32(x) bswap_32(x)\n#else\n#define BE32_TO_LE32(x) (x)\n#endif\n\nint set_vfs_ns_caps(char *path, char *caps, ssize_t len, uint32_t uid)\n{\n\t\/\/ Works because vfs_ns_cap_data is a superset of vfs_cap_data (rootid\n\t\/\/ field added to the end)\n\tstruct vfs_ns_cap_data ns_xattr;\n\n\tmemset(&ns_xattr, 0, sizeof(ns_xattr));\n\tmemcpy(&ns_xattr, caps, len);\n\tns_xattr.magic_etc &= ~(VFS_CAP_REVISION_1 | VFS_CAP_REVISION_2);\n\tns_xattr.magic_etc |= VFS_CAP_REVISION_3;\n\tns_xattr.rootid = BE32_TO_LE32(uid);\n\n\treturn setxattr(path, \"security.capability\", &ns_xattr, sizeof(ns_xattr), 0);\n}\n\nint set_dummy_fs_ns_caps(const char *path)\n{\n\t#define __raise_cap_permitted(x, ns_cap_data) ns_cap_data.data[(x)>>5].permitted |= (1<<((x)&31))\n\n\tstruct vfs_ns_cap_data ns_xattr;\n\n\tmemset(&ns_xattr, 0, sizeof(ns_xattr));\n __raise_cap_permitted(CAP_NET_RAW, ns_xattr);\n\tns_xattr.magic_etc |= VFS_CAP_REVISION_3 | VFS_CAP_FLAGS_EFFECTIVE;\n\tns_xattr.rootid = BE32_TO_LE32(1000000);\n\n\treturn setxattr(path, \"security.capability\", &ns_xattr, sizeof(ns_xattr), 0);\n}\n\nint shiftowner(char *basepath, char *path, int uid, int gid)\n{\n\t__do_close int fd = -EBADF;\n\tint ret;\n\tchar fdpath[PATH_MAX], realpath[PATH_MAX];\n\tstruct stat sb;\n\n\tfd = open(path, O_PATH | O_NOFOLLOW);\n\tif (fd < 0) {\n\t\tperror(\"Failed open\");\n\t\treturn 1;\n\t}\n\n\tret = sprintf(fdpath, \"\/proc\/self\/fd\/%d\", fd);\n\tif (ret < 0) {\n\t\tperror(\"Failed sprintf\");\n\t\treturn 1;\n\t}\n\n\tret = readlink(fdpath, realpath, PATH_MAX);\n\tif (ret < 0) {\n\t\tperror(\"Failed readlink\");\n\t\treturn 1;\n\t}\n\n\tif (strlen(realpath) < strlen(basepath)) {\n\t\tprintf(\"Invalid path, source (%s) is outside of basepath (%s)\\n\", realpath, basepath);\n\t\treturn 1;\n\t}\n\n\tif (strncmp(realpath, basepath, strlen(basepath))) {\n\t\tprintf(\"Invalid path, source (%s) is outside of basepath \" \"(%s).\\n\", realpath, basepath);\n\t\treturn 1;\n\t}\n\n\tret = fstat(fd, &sb);\n\tif (ret < 0) {\n\t\tperror(\"Failed fstat\");\n\t\treturn 1;\n\t}\n\n\tret = fchownat(fd, \"\", uid, gid, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW);\n\tif (ret < 0) {\n\t\tperror(\"Failed chown\");\n\t\treturn 1;\n\t}\n\n\tif (!S_ISLNK(sb.st_mode)) {\n\t\tret = chmod(fdpath, sb.st_mode);\n\t\tif (ret < 0) {\n\t\t\tperror(\"Failed chmod\");\n\t\t\treturn 1;\n\t\t}\n\t}\n\n\treturn 0;\n}\n*\/\nimport \"C\"\n\n\/\/ ShiftOwner updates uid and gid for a file when entering\/exiting a namespace\nfunc ShiftOwner(basepath string, path string, uid int, gid int) error {\n\tcbasepath := C.CString(basepath)\n\tdefer C.free(unsafe.Pointer(cbasepath))\n\n\tcpath := C.CString(path)\n\tdefer C.free(unsafe.Pointer(cpath))\n\n\tr := C.shiftowner(cbasepath, cpath, C.int(uid), C.int(gid))\n\tif r != 0 {\n\t\treturn fmt.Errorf(\"Failed to change ownership of: %s\", path)\n\t}\n\n\treturn nil\n}\n\n\/\/ GetCaps extracts the list of capabilities effective on the file\nfunc GetCaps(path string) ([]byte, error) {\n\txattrs, err := shared.GetAllXattr(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvalueStr, ok := xattrs[\"security.capability\"]\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\n\treturn []byte(valueStr), nil\n}\n\n\/\/ SetCaps applies the caps for a particular root uid\nfunc SetCaps(path string, caps []byte, uid int64) error {\n\tcpath := C.CString(path)\n\tdefer C.free(unsafe.Pointer(cpath))\n\n\tccaps := C.CString(string(caps))\n\tdefer C.free(unsafe.Pointer(ccaps))\n\n\tr := C.set_vfs_ns_caps(cpath, ccaps, C.ssize_t(len(caps)), C.uint32_t(uid))\n\tif r != 0 {\n\t\treturn fmt.Errorf(\"Failed to apply capabilities to: %s\", path)\n\t}\n\n\treturn nil\n}\n\n\/\/ ShiftACL updates uid and gid for file ACLs when entering\/exiting a namespace\nfunc ShiftACL(path string, shiftIds func(uid int64, gid int64) (int64, int64)) error {\n\terr := shiftAclType(path, C.ACL_TYPE_ACCESS, shiftIds)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = shiftAclType(path, C.ACL_TYPE_DEFAULT, shiftIds)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc shiftAclType(path string, aclType int, shiftIds func(uid int64, gid int64) (int64, int64)) error {\n\t\/\/ Convert the path to something usable with cgo\n\tcpath := C.CString(path)\n\tdefer C.free(unsafe.Pointer(cpath))\n\n\t\/\/ Read the current ACL set for the requested type\n\tacl := C.acl_get_file(cpath, C.uint(aclType))\n\tif acl == nil {\n\t\treturn nil\n\t}\n\tdefer C.acl_free(unsafe.Pointer(acl))\n\n\t\/\/ Iterate through all ACL entries\n\tupdate := false\n\tfor entryId := C.ACL_FIRST_ENTRY; ; entryId = C.ACL_NEXT_ENTRY {\n\t\tvar ent C.acl_entry_t\n\t\tvar tag C.acl_tag_t\n\n\t\t\/\/ Get the ACL entry\n\t\tret := C.acl_get_entry(acl, C.int(entryId), &ent)\n\t\tif ret == 0 {\n\t\t\tbreak\n\t\t} else if ret < 0 {\n\t\t\treturn fmt.Errorf(\"Failed to get the ACL entry for %s\", path)\n\t\t}\n\n\t\t\/\/ Get the ACL type\n\t\tret = C.acl_get_tag_type(ent, &tag)\n\t\tif ret == -1 {\n\t\t\treturn fmt.Errorf(\"Failed to get the ACL type for %s\", path)\n\t\t}\n\n\t\t\/\/ We only care about user and group ACLs, copy anything else\n\t\tif tag != C.ACL_USER && tag != C.ACL_GROUP {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Get the value\n\t\tidp := (*C.id_t)(C.acl_get_qualifier(ent))\n\t\tif idp == nil {\n\t\t\treturn fmt.Errorf(\"Failed to get current ACL value for %s\", path)\n\t\t}\n\n\t\t\/\/ Shift the value\n\t\tnewId, _ := shiftIds((int64)(*idp), -1)\n\n\t\t\/\/ Update the new entry with the shifted value\n\t\tret = C.acl_set_qualifier(ent, unsafe.Pointer(&newId))\n\t\tif ret == -1 {\n\t\t\treturn fmt.Errorf(\"Failed to set ACL qualifier on %s\", path)\n\t\t}\n\n\t\tupdate = true\n\t}\n\n\t\/\/ Update the on-disk ACLs to match\n\tif update {\n\t\tret := C.acl_set_file(cpath, C.uint(aclType), acl)\n\t\tif ret == -1 {\n\t\t\treturn fmt.Errorf(\"Failed to change ACLs on %s\", path)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc SupportsVFS3Fscaps(prefix string) bool {\n\ttmpfile, err := ioutil.TempFile(prefix, \".lxd_fcaps_v3_\")\n\tif err != nil {\n\t\treturn false\n\t}\n\ttmpfile.Close()\n\tdefer os.Remove(tmpfile.Name())\n\n\terr = os.Chmod(tmpfile.Name(), 0001)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tcpath := C.CString(tmpfile.Name())\n\tdefer C.free(unsafe.Pointer(cpath))\n\n\tr := C.set_dummy_fs_ns_caps(cpath)\n\tif r != 0 {\n\t\treturn false\n\t}\n\n\tcmd := exec.Command(tmpfile.Name())\n\terr = cmd.Run()\n\tif err != nil {\n\t\terrno, isErrno := shared.GetErrno(err)\n\t\tif isErrno && (errno == unix.ERANGE || errno == unix.EOVERFLOW) {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}\n\n\treturn true\n}\n<commit_msg>shift_linux: tweak ACL handling<commit_after>\/\/ +build linux\n\/\/ +build cgo\n\npackage idmap\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"unsafe\"\n\n\t\"golang.org\/x\/sys\/unix\"\n\n\t\/\/ Used by cgo\n\t_ \"github.com\/lxc\/lxd\/lxd\/include\"\n\n\t\"github.com\/lxc\/lxd\/shared\"\n)\n\n\/\/ #cgo LDFLAGS: -lacl\n\/*\n#ifndef _GNU_SOURCE\n#define _GNU_SOURCE 1\n#endif\n#include <byteswap.h>\n#include <errno.h>\n#include <fcntl.h>\n#include <limits.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <sys\/capability.h>\n#include <unistd.h>\n#include <sys\/stat.h>\n#include <sys\/types.h>\n#include <sys\/acl.h>\n\n\/\/ Needs to be included at the end\n#include <sys\/xattr.h>\n\n#include \"..\/..\/lxd\/include\/memory_utils.h\"\n\n#ifndef VFS_CAP_REVISION_1\n#define VFS_CAP_REVISION_1 0x01000000\n#endif\n\n#ifndef VFS_CAP_REVISION_2\n#define VFS_CAP_REVISION_2 0x02000000\n#endif\n\n#ifndef VFS_CAP_REVISION_3\n#define VFS_CAP_REVISION_3 0x03000000\nstruct vfs_ns_cap_data {\n\t__le32 magic_etc;\n\tstruct {\n\t\t__le32 permitted;\n\t\t__le32 inheritable;\n\t} data[VFS_CAP_U32];\n\t__le32 rootid;\n};\n#endif\n\n#if __BYTE_ORDER == __BIG_ENDIAN\n#define BE32_TO_LE32(x) bswap_32(x)\n#else\n#define BE32_TO_LE32(x) (x)\n#endif\n\nint set_vfs_ns_caps(char *path, char *caps, ssize_t len, uint32_t uid)\n{\n\t\/\/ Works because vfs_ns_cap_data is a superset of vfs_cap_data (rootid\n\t\/\/ field added to the end)\n\tstruct vfs_ns_cap_data ns_xattr;\n\n\tmemset(&ns_xattr, 0, sizeof(ns_xattr));\n\tmemcpy(&ns_xattr, caps, len);\n\tns_xattr.magic_etc &= ~(VFS_CAP_REVISION_1 | VFS_CAP_REVISION_2);\n\tns_xattr.magic_etc |= VFS_CAP_REVISION_3;\n\tns_xattr.rootid = BE32_TO_LE32(uid);\n\n\treturn setxattr(path, \"security.capability\", &ns_xattr, sizeof(ns_xattr), 0);\n}\n\nint set_dummy_fs_ns_caps(const char *path)\n{\n\t#define __raise_cap_permitted(x, ns_cap_data) ns_cap_data.data[(x)>>5].permitted |= (1<<((x)&31))\n\n\tstruct vfs_ns_cap_data ns_xattr;\n\n\tmemset(&ns_xattr, 0, sizeof(ns_xattr));\n __raise_cap_permitted(CAP_NET_RAW, ns_xattr);\n\tns_xattr.magic_etc |= VFS_CAP_REVISION_3 | VFS_CAP_FLAGS_EFFECTIVE;\n\tns_xattr.rootid = BE32_TO_LE32(1000000);\n\n\treturn setxattr(path, \"security.capability\", &ns_xattr, sizeof(ns_xattr), 0);\n}\n\nint shiftowner(char *basepath, char *path, int uid, int gid)\n{\n\t__do_close int fd = -EBADF;\n\tint ret;\n\tchar fdpath[PATH_MAX], realpath[PATH_MAX];\n\tstruct stat sb;\n\n\tfd = open(path, O_PATH | O_NOFOLLOW);\n\tif (fd < 0) {\n\t\tperror(\"Failed open\");\n\t\treturn 1;\n\t}\n\n\tret = sprintf(fdpath, \"\/proc\/self\/fd\/%d\", fd);\n\tif (ret < 0) {\n\t\tperror(\"Failed sprintf\");\n\t\treturn 1;\n\t}\n\n\tret = readlink(fdpath, realpath, PATH_MAX);\n\tif (ret < 0) {\n\t\tperror(\"Failed readlink\");\n\t\treturn 1;\n\t}\n\n\tif (strlen(realpath) < strlen(basepath)) {\n\t\tprintf(\"Invalid path, source (%s) is outside of basepath (%s)\\n\", realpath, basepath);\n\t\treturn 1;\n\t}\n\n\tif (strncmp(realpath, basepath, strlen(basepath))) {\n\t\tprintf(\"Invalid path, source (%s) is outside of basepath \" \"(%s).\\n\", realpath, basepath);\n\t\treturn 1;\n\t}\n\n\tret = fstat(fd, &sb);\n\tif (ret < 0) {\n\t\tperror(\"Failed fstat\");\n\t\treturn 1;\n\t}\n\n\tret = fchownat(fd, \"\", uid, gid, AT_EMPTY_PATH | AT_SYMLINK_NOFOLLOW);\n\tif (ret < 0) {\n\t\tperror(\"Failed chown\");\n\t\treturn 1;\n\t}\n\n\tif (!S_ISLNK(sb.st_mode)) {\n\t\tret = chmod(fdpath, sb.st_mode);\n\t\tif (ret < 0) {\n\t\t\tperror(\"Failed chmod\");\n\t\t\treturn 1;\n\t\t}\n\t}\n\n\treturn 0;\n}\n*\/\nimport \"C\"\n\n\/\/ ShiftOwner updates uid and gid for a file when entering\/exiting a namespace\nfunc ShiftOwner(basepath string, path string, uid int, gid int) error {\n\tcbasepath := C.CString(basepath)\n\tdefer C.free(unsafe.Pointer(cbasepath))\n\n\tcpath := C.CString(path)\n\tdefer C.free(unsafe.Pointer(cpath))\n\n\tr := C.shiftowner(cbasepath, cpath, C.int(uid), C.int(gid))\n\tif r != 0 {\n\t\treturn fmt.Errorf(\"Failed to change ownership of: %s\", path)\n\t}\n\n\treturn nil\n}\n\n\/\/ GetCaps extracts the list of capabilities effective on the file\nfunc GetCaps(path string) ([]byte, error) {\n\txattrs, err := shared.GetAllXattr(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvalueStr, ok := xattrs[\"security.capability\"]\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\n\treturn []byte(valueStr), nil\n}\n\n\/\/ SetCaps applies the caps for a particular root uid\nfunc SetCaps(path string, caps []byte, uid int64) error {\n\tcpath := C.CString(path)\n\tdefer C.free(unsafe.Pointer(cpath))\n\n\tccaps := C.CString(string(caps))\n\tdefer C.free(unsafe.Pointer(ccaps))\n\n\tr := C.set_vfs_ns_caps(cpath, ccaps, C.ssize_t(len(caps)), C.uint32_t(uid))\n\tif r != 0 {\n\t\treturn fmt.Errorf(\"Failed to apply capabilities to: %s\", path)\n\t}\n\n\treturn nil\n}\n\n\/\/ ShiftACL updates uid and gid for file ACLs when entering\/exiting a namespace\nfunc ShiftACL(path string, shiftIds func(uid int64, gid int64) (int64, int64)) error {\n\terr := shiftAclType(path, C.ACL_TYPE_ACCESS, shiftIds)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = shiftAclType(path, C.ACL_TYPE_DEFAULT, shiftIds)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc shiftAclType(path string, aclType int, shiftIds func(uid int64, gid int64) (int64, int64)) error {\n\t\/\/ Convert the path to something usable with cgo\n\tcpath := C.CString(path)\n\tdefer C.free(unsafe.Pointer(cpath))\n\n\t\/\/ Read the current ACL set for the requested type\n\tacl := C.acl_get_file(cpath, C.uint(aclType))\n\tif acl == nil {\n\t\treturn nil\n\t}\n\tdefer C.acl_free(unsafe.Pointer(acl))\n\n\t\/\/ Iterate through all ACL entries\n\tupdate := false\n\tfor entryId := C.ACL_FIRST_ENTRY; ; entryId = C.ACL_NEXT_ENTRY {\n\t\tvar ent C.acl_entry_t\n\t\tvar tag C.acl_tag_t\n\n\t\t\/\/ Get the ACL entry\n\t\tret := C.acl_get_entry(acl, C.int(entryId), &ent)\n\t\tif ret == 0 {\n\t\t\tbreak\n\t\t} else if ret < 0 {\n\t\t\treturn fmt.Errorf(\"Failed to get the ACL entry for %s\", path)\n\t\t}\n\n\t\t\/\/ Get the ACL type\n\t\tret = C.acl_get_tag_type(ent, &tag)\n\t\tif ret == -1 {\n\t\t\treturn fmt.Errorf(\"Failed to get the ACL type for %s\", path)\n\t\t}\n\n\t\t\/\/ We only care about user and group ACLs, copy anything else\n\t\tif tag != C.ACL_USER && tag != C.ACL_GROUP {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Get the value\n\t\tidp := (*C.id_t)(C.acl_get_qualifier(ent))\n\t\tif idp == nil {\n\t\t\treturn fmt.Errorf(\"Failed to get current ACL value for %s\", path)\n\t\t}\n\n\t\t\/\/ Shift the value\n\t\tnewId := int64(-1)\n\t\tif tag == C.ACL_USER {\n\t\t\tnewId, _ = shiftIds((int64)(*idp), -1)\n\t\t} else {\n\t\t\t_, newId = shiftIds(-1, (int64)(*idp))\n\t\t}\n\n\t\t\/\/ Update the new entry with the shifted value\n\t\tret = C.acl_set_qualifier(ent, unsafe.Pointer(&newId))\n\t\tif ret == -1 {\n\t\t\treturn fmt.Errorf(\"Failed to set ACL qualifier on %s\", path)\n\t\t}\n\n\t\tupdate = true\n\t}\n\n\t\/\/ Update the on-disk ACLs to match\n\tif update {\n\t\tret, err := C.acl_set_file(cpath, C.uint(aclType), acl)\n\t\tif ret < 0 {\n\t\t\treturn fmt.Errorf(\"%s - Failed to change ACLs on %s\", err, path)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc SupportsVFS3Fscaps(prefix string) bool {\n\ttmpfile, err := ioutil.TempFile(prefix, \".lxd_fcaps_v3_\")\n\tif err != nil {\n\t\treturn false\n\t}\n\ttmpfile.Close()\n\tdefer os.Remove(tmpfile.Name())\n\n\terr = os.Chmod(tmpfile.Name(), 0001)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tcpath := C.CString(tmpfile.Name())\n\tdefer C.free(unsafe.Pointer(cpath))\n\n\tr := C.set_dummy_fs_ns_caps(cpath)\n\tif r != 0 {\n\t\treturn false\n\t}\n\n\tcmd := exec.Command(tmpfile.Name())\n\terr = cmd.Run()\n\tif err != nil {\n\t\terrno, isErrno := shared.GetErrno(err)\n\t\tif isErrno && (errno == unix.ERANGE || errno == unix.EOVERFLOW) {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\t}\n\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 sigu-399 ( https:\/\/github.com\/sigu-399 )\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ author \t\t\tsigu-399\n\/\/ author-github \thttps:\/\/github.com\/sigu-399\n\/\/ author-mail\t\tsigu.399@gmail.com\n\/\/\n\/\/ repository-name\tgojsonpointer\n\/\/ repository-desc\tAn implementation of JSON Pointer - Go language\n\/\/\n\/\/ description\t\tMain and unique file.\n\/\/\n\/\/ created \t25-02-2013\n\npackage gojsonpointer\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\tconst_empty_pointer = ``\n\tconst_pointer_separator = `\/`\n\n\tconst_invalid_start = `JSON pointer must be empty or start with a \"` + const_pointer_separator\n)\n\ntype implStruct struct {\n\tmode string \/\/ \"SET\" or \"GET\"\n\n\tinDocument interface{}\n\n\tsetInValue interface{}\n\n\tgetOutNode interface{}\n\tgetOutKind reflect.Kind\n\toutError error\n}\n\nfunc NewJsonPointer(jsonPointerString string) (JsonPointer, error) {\n\n\tvar p JsonPointer\n\terr := p.parse(jsonPointerString)\n\treturn p, err\n\n}\n\ntype JsonPointer struct {\n\treferenceTokens []string\n}\n\n\/\/ \"Constructor\", parses the given string JSON pointer\nfunc (p *JsonPointer) parse(jsonPointerString string) error {\n\n\tvar err error\n\n\tif jsonPointerString != const_empty_pointer {\n\t\tif !strings.HasPrefix(jsonPointerString, const_pointer_separator) {\n\t\t\terr = errors.New(const_invalid_start)\n\t\t} else {\n\t\t\treferenceTokens := strings.Split(jsonPointerString, const_pointer_separator)\n\t\t\tfor _, referenceToken := range referenceTokens[1:] {\n\t\t\t\tp.referenceTokens = append(p.referenceTokens, referenceToken)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn err\n}\n\n\/\/ Uses the pointer to retrieve a value from a JSON document\nfunc (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) {\n\n\tis := &implStruct{mode: \"GET\", inDocument: document}\n\tp.implementation(is)\n\treturn is.getOutNode, is.getOutKind, is.outError\n\n}\n\n\/\/ Uses the pointer to update a value from a JSON document\nfunc (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) {\n\n\tis := &implStruct{mode: \"SET\", inDocument: document, setInValue: value}\n\tp.implementation(is)\n\treturn document, is.outError\n\n}\n\n\/\/ Both Get and Set functions use the same implementation to avoid code duplication\nfunc (p *JsonPointer) implementation(i *implStruct) {\n\n\tkind := reflect.Invalid\n\n\t\/\/ Full document when empty\n\tif len(p.referenceTokens) == 0 {\n\t\ti.getOutNode = i.inDocument\n\t\ti.outError = nil\n\t\ti.getOutKind = kind\n\t\ti.outError = nil\n\t\treturn\n\t}\n\n\tnode := i.inDocument\n\n\tfor ti, token := range p.referenceTokens {\n\n\t\tdecodedToken := decodeReferenceToken(token)\n\t\tisLastToken := ti == len(p.referenceTokens)-1\n\n\t\trValue := reflect.ValueOf(node)\n\t\tkind = rValue.Kind()\n\n\t\tswitch kind {\n\n\t\tcase reflect.Map:\n\t\t\tm := node.(map[string]interface{})\n\t\t\tif _, ok := m[decodedToken]; ok {\n\t\t\t\tnode = m[decodedToken]\n\t\t\t\tif isLastToken && i.mode == \"SET\" {\n\t\t\t\t\tm[decodedToken] = i.setInValue\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\ti.outError = errors.New(fmt.Sprintf(\"Object has no key '%s'\", token))\n\t\t\t\ti.getOutKind = kind\n\t\t\t\ti.getOutNode = nil\n\t\t\t\treturn\n\t\t\t}\n\n\t\tcase reflect.Slice:\n\t\t\ts := node.([]interface{})\n\t\t\ttokenIndex, err := strconv.Atoi(token)\n\t\t\tif err != nil {\n\t\t\t\ti.outError = errors.New(fmt.Sprintf(\"Invalid array index '%s'\", token))\n\t\t\t\ti.getOutKind = kind\n\t\t\t\ti.getOutNode = nil\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsLength := len(s)\n\t\t\tif tokenIndex < 0 || tokenIndex >= sLength {\n\t\t\t\ti.outError = errors.New(fmt.Sprintf(\"Out of bound array[0,%d] index '%d'\", tokenIndex, sLength))\n\t\t\t\ti.getOutKind = kind\n\t\t\t\ti.getOutNode = nil\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tnode = s[tokenIndex]\n\t\t\tif isLastToken && i.mode == \"SET\" {\n\t\t\t\ts[tokenIndex] = i.setInValue\n\t\t\t}\n\n\t\tdefault:\n\t\t\ti.outError = errors.New(fmt.Sprintf(\"Invalid token reference '%s'\", token))\n\t\t\ti.getOutKind = kind\n\t\t\ti.getOutNode = nil\n\t\t\treturn\n\t\t}\n\n\t}\n\n\trValue := reflect.ValueOf(node)\n\tkind = rValue.Kind()\n\n\ti.getOutNode = node\n\ti.getOutKind = kind\n\ti.outError = nil\n}\n\n\/\/ Pointer to string representation function\nfunc (p *JsonPointer) String() string {\n\n\tif len(p.referenceTokens) == 0 {\n\t\treturn const_empty_pointer\n\t}\n\n\tpointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator)\n\n\treturn pointerString\n}\n\n\/\/ Specific JSON pointer encoding here\n\/\/ ~0 => ~\n\/\/ ~1 => \/\n\/\/ ... and vice versa\n\nconst (\n\tconst_encoded_reference_token_0 = `~0`\n\tconst_encoded_reference_token_1 = `~1`\n\tconst_decoded_reference_token_0 = `~`\n\tconst_decoded_reference_token_1 = `\/`\n)\n\nfunc decodeReferenceToken(token string) string {\n\tstep1 := strings.Replace(token, const_encoded_reference_token_1, const_decoded_reference_token_1, -1)\n\tstep2 := strings.Replace(step1, const_encoded_reference_token_0, const_decoded_reference_token_0, -1)\n\treturn step2\n}\n\nfunc encodeReferenceToken(token string) string {\n\tstep1 := strings.Replace(token, const_decoded_reference_token_1, const_encoded_reference_token_1, -1)\n\tstep2 := strings.Replace(step1, const_decoded_reference_token_0, const_encoded_reference_token_0, -1)\n\treturn step2\n}\n<commit_msg>fixed array out of bound error message<commit_after>\/\/ Copyright 2013 sigu-399 ( https:\/\/github.com\/sigu-399 )\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ author \t\t\tsigu-399\n\/\/ author-github \thttps:\/\/github.com\/sigu-399\n\/\/ author-mail\t\tsigu.399@gmail.com\n\/\/\n\/\/ repository-name\tgojsonpointer\n\/\/ repository-desc\tAn implementation of JSON Pointer - Go language\n\/\/\n\/\/ description\t\tMain and unique file.\n\/\/\n\/\/ created \t25-02-2013\n\npackage gojsonpointer\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nconst (\n\tconst_empty_pointer = ``\n\tconst_pointer_separator = `\/`\n\n\tconst_invalid_start = `JSON pointer must be empty or start with a \"` + const_pointer_separator\n)\n\ntype implStruct struct {\n\tmode string \/\/ \"SET\" or \"GET\"\n\n\tinDocument interface{}\n\n\tsetInValue interface{}\n\n\tgetOutNode interface{}\n\tgetOutKind reflect.Kind\n\toutError error\n}\n\nfunc NewJsonPointer(jsonPointerString string) (JsonPointer, error) {\n\n\tvar p JsonPointer\n\terr := p.parse(jsonPointerString)\n\treturn p, err\n\n}\n\ntype JsonPointer struct {\n\treferenceTokens []string\n}\n\n\/\/ \"Constructor\", parses the given string JSON pointer\nfunc (p *JsonPointer) parse(jsonPointerString string) error {\n\n\tvar err error\n\n\tif jsonPointerString != const_empty_pointer {\n\t\tif !strings.HasPrefix(jsonPointerString, const_pointer_separator) {\n\t\t\terr = errors.New(const_invalid_start)\n\t\t} else {\n\t\t\treferenceTokens := strings.Split(jsonPointerString, const_pointer_separator)\n\t\t\tfor _, referenceToken := range referenceTokens[1:] {\n\t\t\t\tp.referenceTokens = append(p.referenceTokens, referenceToken)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn err\n}\n\n\/\/ Uses the pointer to retrieve a value from a JSON document\nfunc (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) {\n\n\tis := &implStruct{mode: \"GET\", inDocument: document}\n\tp.implementation(is)\n\treturn is.getOutNode, is.getOutKind, is.outError\n\n}\n\n\/\/ Uses the pointer to update a value from a JSON document\nfunc (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) {\n\n\tis := &implStruct{mode: \"SET\", inDocument: document, setInValue: value}\n\tp.implementation(is)\n\treturn document, is.outError\n\n}\n\n\/\/ Both Get and Set functions use the same implementation to avoid code duplication\nfunc (p *JsonPointer) implementation(i *implStruct) {\n\n\tkind := reflect.Invalid\n\n\t\/\/ Full document when empty\n\tif len(p.referenceTokens) == 0 {\n\t\ti.getOutNode = i.inDocument\n\t\ti.outError = nil\n\t\ti.getOutKind = kind\n\t\ti.outError = nil\n\t\treturn\n\t}\n\n\tnode := i.inDocument\n\n\tfor ti, token := range p.referenceTokens {\n\n\t\tdecodedToken := decodeReferenceToken(token)\n\t\tisLastToken := ti == len(p.referenceTokens)-1\n\n\t\trValue := reflect.ValueOf(node)\n\t\tkind = rValue.Kind()\n\n\t\tswitch kind {\n\n\t\tcase reflect.Map:\n\t\t\tm := node.(map[string]interface{})\n\t\t\tif _, ok := m[decodedToken]; ok {\n\t\t\t\tnode = m[decodedToken]\n\t\t\t\tif isLastToken && i.mode == \"SET\" {\n\t\t\t\t\tm[decodedToken] = i.setInValue\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\ti.outError = errors.New(fmt.Sprintf(\"Object has no key '%s'\", token))\n\t\t\t\ti.getOutKind = kind\n\t\t\t\ti.getOutNode = nil\n\t\t\t\treturn\n\t\t\t}\n\n\t\tcase reflect.Slice:\n\t\t\ts := node.([]interface{})\n\t\t\ttokenIndex, err := strconv.Atoi(token)\n\t\t\tif err != nil {\n\t\t\t\ti.outError = errors.New(fmt.Sprintf(\"Invalid array index '%s'\", token))\n\t\t\t\ti.getOutKind = kind\n\t\t\t\ti.getOutNode = nil\n\t\t\t\treturn\n\t\t\t}\n\t\t\tsLength := len(s)\n\t\t\tif tokenIndex < 0 || tokenIndex >= sLength {\n\t\t\t\ti.outError = errors.New(fmt.Sprintf(\"Out of bound array[0,%d] index '%d'\", sLength, tokenIndex))\n\t\t\t\ti.getOutKind = kind\n\t\t\t\ti.getOutNode = nil\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tnode = s[tokenIndex]\n\t\t\tif isLastToken && i.mode == \"SET\" {\n\t\t\t\ts[tokenIndex] = i.setInValue\n\t\t\t}\n\n\t\tdefault:\n\t\t\ti.outError = errors.New(fmt.Sprintf(\"Invalid token reference '%s'\", token))\n\t\t\ti.getOutKind = kind\n\t\t\ti.getOutNode = nil\n\t\t\treturn\n\t\t}\n\n\t}\n\n\trValue := reflect.ValueOf(node)\n\tkind = rValue.Kind()\n\n\ti.getOutNode = node\n\ti.getOutKind = kind\n\ti.outError = nil\n}\n\n\/\/ Pointer to string representation function\nfunc (p *JsonPointer) String() string {\n\n\tif len(p.referenceTokens) == 0 {\n\t\treturn const_empty_pointer\n\t}\n\n\tpointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator)\n\n\treturn pointerString\n}\n\n\/\/ Specific JSON pointer encoding here\n\/\/ ~0 => ~\n\/\/ ~1 => \/\n\/\/ ... and vice versa\n\nconst (\n\tconst_encoded_reference_token_0 = `~0`\n\tconst_encoded_reference_token_1 = `~1`\n\tconst_decoded_reference_token_0 = `~`\n\tconst_decoded_reference_token_1 = `\/`\n)\n\nfunc decodeReferenceToken(token string) string {\n\tstep1 := strings.Replace(token, const_encoded_reference_token_1, const_decoded_reference_token_1, -1)\n\tstep2 := strings.Replace(step1, const_encoded_reference_token_0, const_decoded_reference_token_0, -1)\n\treturn step2\n}\n\nfunc encodeReferenceToken(token string) string {\n\tstep1 := strings.Replace(token, const_decoded_reference_token_1, const_encoded_reference_token_1, -1)\n\tstep2 := strings.Replace(step1, const_decoded_reference_token_0, const_encoded_reference_token_0, -1)\n\treturn step2\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Adding doc.go to util package.<commit_after><|endoftext|>"} {"text":"<commit_before>package node\n\nimport (\n\t\"regexp\"\n\t\"time\"\n\n\t\"github.com\/MG-RAST\/Shock\/shock-server\/cache\"\n\t\"github.com\/MG-RAST\/Shock\/shock-server\/conf\"\n\t\"github.com\/MG-RAST\/Shock\/shock-server\/logger\"\n\t\"github.com\/MG-RAST\/Shock\/shock-server\/node\/locker\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\nvar (\n\tTtl *NodeReaper\n\tExpireRegex = regexp.MustCompile(`^(\\d+)(M|H|D)$`)\n)\n\nfunc InitReaper() {\n\tTtl = NewNodeReaper()\n}\n\ntype NodeReaper struct{}\n\nfunc NewNodeReaper() *NodeReaper {\n\treturn &NodeReaper{}\n}\n\nfunc (nr *NodeReaper) Handle() {\n\twaitDuration := time.Duration(conf.EXPIRE_WAIT) * time.Minute\n\tfor {\n\n\t\t\/\/ sleep\n\t\ttime.Sleep(waitDuration)\n\t\t\/\/ query to get expired nodes\n\t\tnodes := Nodes{}\n\t\tquery := nr.getQuery()\n\t\tnodes.GetAll(query)\n\t\t\/\/ delete expired nodes\n\t\tfor _, n := range nodes {\n\t\t\tlogger.Infof(\"Deleting expired node: %s\", n.Id)\n\t\t\tif err := n.Delete(); err != nil {\n\t\t\t\terr_msg := \"err:@node_delete: \" + err.Error()\n\t\t\t\tlogger.Error(err_msg)\n\t\t\t}\n\t\t}\n\t\t\/\/ garbage collection: remove old nodes from Lockers, value is hours old\n\t\tlocker.NodeLockMgr.RemoveOld(1)\n\t\tlocker.FileLockMgr.RemoveOld(6)\n\t\tlocker.IndexLockMgr.RemoveOld(6)\n\n\t\t\/\/ we do not start deletings files if we are not in cache mode\n\t\tif conf.PATH_CACHE == \"\" {\n\t\t\tcontinue\n\t\t}\n\tLoop2:\n\t\t\/\/ start a FILE REAPER that loops thru CacheMap[*]\n\t\tfor ID := range cache.CacheMap {\n\n\t\t\t\/\/fmt.Printf(\"(Reaper-->FileReaper) checking %s in cache\\n\", ID)\n\n\t\t\tnow := time.Now()\n\t\t\tlru := cache.CacheMap[ID].Access\n\n\t\t\tdiff := now.Sub(lru)\n\n\t\t\t\/\/ we use a very simple scheme for caching initially (file not used for 1 day)\n\t\t\tif diff.Hours() < float64(conf.CACHE_TTL) {\n\t\t\t\t\/\/\tfmt.Printf(\"(Reaper-->FileReaper) not deleting %s from cache it was last accessed %s hours ago\\n\", ID, diff.Hours())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ START HERE ON MONDAY\n\t\t\t\/\/ ideally we would only have things in the cache that have a remote location\n\t\t\t\/\/ maybe init cache from Mongo instead of local filesystem? it'd be faster...\n\n\t\t\t\/\/fmt.Printf(\"(Reaper-->FileReaper) trying to delete %s from cache\\n \", ID)\n\n\t\t\tn, _ := Load(ID)\n\t\t\tfor _, loc := range n.Locations {\n\t\t\t\t\/\/ delete only if other locations exist\n\t\t\t\tlocObj, ok := conf.LocationsMap[loc]\n\t\t\t\tif !ok {\n\t\t\t\t\tlogger.Infof(\"(Reaper-->FileReaper) location %s is not OK \\n \", loc)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t\/\/fmt.Printf(\"(Reaper-->FileReaper) locObj.Persistent = %b \\n \", locObj.Persistent)\n\n\t\t\t\tif locObj.Persistent == true {\n\t\t\t\t\tlogger.Infof(\"(Reaper-->FileReaper) has remote Location (%s) removing from Cache: %s\", loc, ID)\n\n\t\t\t\t\tcache.Remove(ID)\n\t\t\t\t\tbreak Loop2 \/\/ the innermost loop\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tlogger.Infof(\"(Reaper-->FileReaper) cannot delete %s from cache\", ID)\n\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (nr *NodeReaper) getQuery() (query bson.M) {\n\thasExpire := bson.M{\"expiration\": bson.M{\"$exists\": true}} \/\/ has the field\n\ttoExpire := bson.M{\"expiration\": bson.M{\"$ne\": time.Time{}}} \/\/ value has been set, not default\n\tisExpired := bson.M{\"expiration\": bson.M{\"$lt\": time.Now()}} \/\/ value is too old\n\tquery = bson.M{\"$and\": []bson.M{hasExpire, toExpire, isExpired}}\n\treturn\n}\n<commit_msg>fixed main loop in expire to handle more than one cache item throw error when encountering location not configured for server<commit_after>package node\n\nimport (\n\t\"regexp\"\n\t\"time\"\n\n\t\"github.com\/MG-RAST\/Shock\/shock-server\/cache\"\n\t\"github.com\/MG-RAST\/Shock\/shock-server\/conf\"\n\t\"github.com\/MG-RAST\/Shock\/shock-server\/logger\"\n\t\"github.com\/MG-RAST\/Shock\/shock-server\/node\/locker\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\nvar (\n\tTtl *NodeReaper\n\tExpireRegex = regexp.MustCompile(`^(\\d+)(M|H|D)$`)\n)\n\nfunc InitReaper() {\n\tTtl = NewNodeReaper()\n}\n\ntype NodeReaper struct{}\n\nfunc NewNodeReaper() *NodeReaper {\n\treturn &NodeReaper{}\n}\n\nfunc (nr *NodeReaper) Handle() {\n\twaitDuration := time.Duration(conf.EXPIRE_WAIT) * time.Minute\n\tfor {\n\n\t\t\/\/ sleep\n\t\ttime.Sleep(waitDuration)\n\t\t\/\/ query to get expired nodes\n\t\tnodes := Nodes{}\n\t\tquery := nr.getQuery()\n\t\tnodes.GetAll(query)\n\t\t\/\/ delete expired nodes\n\t\tfor _, n := range nodes {\n\t\t\tlogger.Infof(\"Deleting expired node: %s\", n.Id)\n\t\t\tif err := n.Delete(); err != nil {\n\t\t\t\terr_msg := \"err:@node_delete: \" + err.Error()\n\t\t\t\tlogger.Error(err_msg)\n\t\t\t}\n\t\t}\n\t\t\/\/ garbage collection: remove old nodes from Lockers, value is hours old\n\t\tlocker.NodeLockMgr.RemoveOld(1)\n\t\tlocker.FileLockMgr.RemoveOld(6)\n\t\tlocker.IndexLockMgr.RemoveOld(6)\n\n\t\t\/\/ we do not start deletings files if we are not in cache mode\n\t\tif conf.PATH_CACHE == \"\" {\n\t\t\tcontinue\n\t\t}\n\tLoop2:\n\t\t\/\/ start a FILE REAPER that loops thru CacheMap[*]\n\t\tfor ID := range cache.CacheMap {\n\n\t\t\t\/\/fmt.Printf(\"(Reaper-->FileReaper) checking %s in cache\\n\", ID)\n\n\t\t\tnow := time.Now()\n\t\t\tlru := cache.CacheMap[ID].Access\n\t\t\tdiff := now.Sub(lru)\n\n\t\t\t\/\/ we use a very simple scheme for caching initially (file not used for 1 day)\n\t\t\tif diff.Hours() < float64(conf.CACHE_TTL) {\n\t\t\t\t\/\/\tfmt.Printf(\"(Reaper-->FileReaper) not deleting %s from cache it was last accessed %s hours ago\\n\", ID, diff.Hours())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tn, err := Load(ID)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Infof(\"(Reaper-->FileReaper) Cannot access CacheMapItem[%s] (%s)\", ID, err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, loc := range n.Locations {\n\t\t\t\t\/\/ delete only if other locations exist\n\t\t\t\tlocObj, ok := conf.LocationsMap[loc]\n\t\t\t\tif !ok {\n\t\t\t\t\tlogger.Errorf(\"(Reaper-->FileReaper) location %s is not defined in this server instance \\n \", loc)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t\/\/fmt.Printf(\"(Reaper-->FileReaper) locObj.Persistent = %b \\n \", locObj.Persistent)\n\t\t\t\tif locObj.Persistent == true {\n\t\t\t\t\tlogger.Infof(\"(Reaper-->FileReaper) has remote Location (%s) removing from Cache: %s\", loc, ID)\n\n\t\t\t\t\tcache.Remove(ID)\n\t\t\t\t\tcontinue Loop2 \/\/ the innermost loop\n\t\t\t\t}\n\t\t\t}\n\t\t\tlogger.Errorf(\"(Reaper-->FileReaper) cannot delete %s from cache [This should not happen!!]\", ID)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (nr *NodeReaper) getQuery() (query bson.M) {\n\thasExpire := bson.M{\"expiration\": bson.M{\"$exists\": true}} \/\/ has the field\n\ttoExpire := bson.M{\"expiration\": bson.M{\"$ne\": time.Time{}}} \/\/ value has been set, not default\n\tisExpired := bson.M{\"expiration\": bson.M{\"$lt\": time.Now()}} \/\/ value is too old\n\tquery = bson.M{\"$and\": []bson.M{hasExpire, toExpire, isExpired}}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package pgtype\n\nimport (\n\t\"database\/sql\/driver\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"math\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/jackc\/pgio\"\n\terrors \"golang.org\/x\/xerrors\"\n)\n\ntype Polygon struct {\n\tP []Vec2\n\tStatus Status\n}\n\nfunc (dst *Polygon) Set(src interface{}) error {\n\tif src == nil {\n\t\tdst.Status = Null\n\t\treturn nil\n\t}\n\terr := errors.Errorf(\"cannot convert %v to Polygon\", src)\n\tvar p *Polygon\n\tswitch value := src.(type) {\n\tcase string:\n\t\tp, err = parseString(value)\n\tcase []Vec2:\n\t\tp = &Polygon{Status: Present, P: value}\n\t\terr = nil\n\tcase []float64:\n\t\tp, err = parseFloat64(value)\n\tdefault:\n\t\treturn err\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\t*dst = *p\n\treturn nil\n}\n\nfunc parseString(src string) (*Polygon, error) {\n\tp := &Polygon{}\n\terr := p.DecodeText(nil, []byte(src))\n\treturn p, err\n}\n\nfunc parseFloat64(src []float64) (*Polygon, error) {\n\tp := &Polygon{Status: Null}\n\tif len(src) == 0 {\n\t\treturn p, nil\n\t}\n\tif len(src)%2 != 0 {\n\t\treturn p, errors.Errorf(\"invalid length for polygon: %v\", len(src))\n\t}\n\tp.Status = Present\n\tp.P = make([]Vec2, 0)\n\tfor i := 0; i < len(src); i += 2 {\n\t\tp.P = append(p.P, Vec2{X: src[i], Y: src[i+1]})\n\t}\n\treturn p, nil\n}\n\nfunc (dst Polygon) Get() interface{} {\n\tswitch dst.Status {\n\tcase Present:\n\t\treturn dst\n\tcase Null:\n\t\treturn nil\n\tdefault:\n\t\treturn dst.Status\n\t}\n}\n\nfunc (src *Polygon) AssignTo(dst interface{}) error {\n\treturn errors.Errorf(\"cannot assign %v to %T\", src, dst)\n}\n\nfunc (dst *Polygon) DecodeText(ci *ConnInfo, src []byte) error {\n\tif src == nil {\n\t\t*dst = Polygon{Status: Null}\n\t\treturn nil\n\t}\n\n\tif len(src) < 7 {\n\t\treturn errors.Errorf(\"invalid length for Polygon: %v\", len(src))\n\t}\n\n\tpoints := make([]Vec2, 0)\n\n\tstr := string(src[2:])\n\n\tfor {\n\t\tend := strings.IndexByte(str, ',')\n\t\tx, err := strconv.ParseFloat(str[:end], 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tstr = str[end+1:]\n\t\tend = strings.IndexByte(str, ')')\n\n\t\ty, err := strconv.ParseFloat(str[:end], 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpoints = append(points, Vec2{x, y})\n\n\t\tif end+3 < len(str) {\n\t\t\tstr = str[end+3:]\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t*dst = Polygon{P: points, Status: Present}\n\treturn nil\n}\n\nfunc (dst *Polygon) DecodeBinary(ci *ConnInfo, src []byte) error {\n\tif src == nil {\n\t\t*dst = Polygon{Status: Null}\n\t\treturn nil\n\t}\n\n\tif len(src) < 5 {\n\t\treturn errors.Errorf(\"invalid length for Polygon: %v\", len(src))\n\t}\n\n\tpointCount := int(binary.BigEndian.Uint32(src))\n\trp := 4\n\n\tif 4+pointCount*16 != len(src) {\n\t\treturn errors.Errorf(\"invalid length for Polygon with %d points: %v\", pointCount, len(src))\n\t}\n\n\tpoints := make([]Vec2, pointCount)\n\tfor i := 0; i < len(points); i++ {\n\t\tx := binary.BigEndian.Uint64(src[rp:])\n\t\trp += 8\n\t\ty := binary.BigEndian.Uint64(src[rp:])\n\t\trp += 8\n\t\tpoints[i] = Vec2{math.Float64frombits(x), math.Float64frombits(y)}\n\t}\n\n\t*dst = Polygon{\n\t\tP: points,\n\t\tStatus: Present,\n\t}\n\treturn nil\n}\n\nfunc (src Polygon) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {\n\tswitch src.Status {\n\tcase Null:\n\t\treturn nil, nil\n\tcase Undefined:\n\t\treturn nil, errUndefined\n\t}\n\n\tbuf = append(buf, '(')\n\n\tfor i, p := range src.P {\n\t\tif i > 0 {\n\t\t\tbuf = append(buf, ',')\n\t\t}\n\t\tbuf = append(buf, fmt.Sprintf(`(%s,%s)`,\n\t\t\tstrconv.FormatFloat(p.X, 'f', -1, 64),\n\t\t\tstrconv.FormatFloat(p.Y, 'f', -1, 64),\n\t\t)...)\n\t}\n\n\treturn append(buf, ')'), nil\n}\n\nfunc (src Polygon) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {\n\tswitch src.Status {\n\tcase Null:\n\t\treturn nil, nil\n\tcase Undefined:\n\t\treturn nil, errUndefined\n\t}\n\n\tbuf = pgio.AppendInt32(buf, int32(len(src.P)))\n\n\tfor _, p := range src.P {\n\t\tbuf = pgio.AppendUint64(buf, math.Float64bits(p.X))\n\t\tbuf = pgio.AppendUint64(buf, math.Float64bits(p.Y))\n\t}\n\n\treturn buf, nil\n}\n\n\/\/ Scan implements the database\/sql Scanner interface.\nfunc (dst *Polygon) Scan(src interface{}) error {\n\tif src == nil {\n\t\t*dst = Polygon{Status: Null}\n\t\treturn nil\n\t}\n\n\tswitch src := src.(type) {\n\tcase string:\n\t\treturn dst.DecodeText(nil, []byte(src))\n\tcase []byte:\n\t\tsrcCopy := make([]byte, len(src))\n\t\tcopy(srcCopy, src)\n\t\treturn dst.DecodeText(nil, srcCopy)\n\t}\n\n\treturn errors.Errorf(\"cannot scan %T\", src)\n}\n\n\/\/ Value implements the database\/sql\/driver Valuer interface.\nfunc (src Polygon) Value() (driver.Value, error) {\n\treturn EncodeValueText(src)\n}\n<commit_msg>Add Undefined status to invalid Polygon<commit_after>package pgtype\n\nimport (\n\t\"database\/sql\/driver\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"math\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/jackc\/pgio\"\n\terrors \"golang.org\/x\/xerrors\"\n)\n\ntype Polygon struct {\n\tP []Vec2\n\tStatus Status\n}\n\nfunc (dst *Polygon) Set(src interface{}) error {\n\tif src == nil {\n\t\tdst.Status = Null\n\t\treturn nil\n\t}\n\terr := errors.Errorf(\"cannot convert %v to Polygon\", src)\n\tvar p *Polygon\n\tswitch value := src.(type) {\n\tcase string:\n\t\tp, err = parseString(value)\n\tcase []Vec2:\n\t\tp = &Polygon{Status: Present, P: value}\n\t\terr = nil\n\tcase []float64:\n\t\tp, err = parseFloat64(value)\n\tdefault:\n\t\treturn err\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\t*dst = *p\n\treturn nil\n}\n\nfunc parseString(src string) (*Polygon, error) {\n\tp := &Polygon{}\n\terr := p.DecodeText(nil, []byte(src))\n\treturn p, err\n}\n\nfunc parseFloat64(src []float64) (*Polygon, error) {\n\tp := &Polygon{Status: Null}\n\tif len(src) == 0 {\n\t\treturn p, nil\n\t}\n\tif len(src)%2 != 0 {\n\t\tp.Status = Undefined\n\t\treturn p, errors.Errorf(\"invalid length for polygon: %v\", len(src))\n\t}\n\tp.Status = Present\n\tp.P = make([]Vec2, 0)\n\tfor i := 0; i < len(src); i += 2 {\n\t\tp.P = append(p.P, Vec2{X: src[i], Y: src[i+1]})\n\t}\n\treturn p, nil\n}\n\nfunc (dst Polygon) Get() interface{} {\n\tswitch dst.Status {\n\tcase Present:\n\t\treturn dst\n\tcase Null:\n\t\treturn nil\n\tdefault:\n\t\treturn dst.Status\n\t}\n}\n\nfunc (src *Polygon) AssignTo(dst interface{}) error {\n\treturn errors.Errorf(\"cannot assign %v to %T\", src, dst)\n}\n\nfunc (dst *Polygon) DecodeText(ci *ConnInfo, src []byte) error {\n\tif src == nil {\n\t\t*dst = Polygon{Status: Null}\n\t\treturn nil\n\t}\n\n\tif len(src) < 7 {\n\t\treturn errors.Errorf(\"invalid length for Polygon: %v\", len(src))\n\t}\n\n\tpoints := make([]Vec2, 0)\n\n\tstr := string(src[2:])\n\n\tfor {\n\t\tend := strings.IndexByte(str, ',')\n\t\tx, err := strconv.ParseFloat(str[:end], 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tstr = str[end+1:]\n\t\tend = strings.IndexByte(str, ')')\n\n\t\ty, err := strconv.ParseFloat(str[:end], 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpoints = append(points, Vec2{x, y})\n\n\t\tif end+3 < len(str) {\n\t\t\tstr = str[end+3:]\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t*dst = Polygon{P: points, Status: Present}\n\treturn nil\n}\n\nfunc (dst *Polygon) DecodeBinary(ci *ConnInfo, src []byte) error {\n\tif src == nil {\n\t\t*dst = Polygon{Status: Null}\n\t\treturn nil\n\t}\n\n\tif len(src) < 5 {\n\t\treturn errors.Errorf(\"invalid length for Polygon: %v\", len(src))\n\t}\n\n\tpointCount := int(binary.BigEndian.Uint32(src))\n\trp := 4\n\n\tif 4+pointCount*16 != len(src) {\n\t\treturn errors.Errorf(\"invalid length for Polygon with %d points: %v\", pointCount, len(src))\n\t}\n\n\tpoints := make([]Vec2, pointCount)\n\tfor i := 0; i < len(points); i++ {\n\t\tx := binary.BigEndian.Uint64(src[rp:])\n\t\trp += 8\n\t\ty := binary.BigEndian.Uint64(src[rp:])\n\t\trp += 8\n\t\tpoints[i] = Vec2{math.Float64frombits(x), math.Float64frombits(y)}\n\t}\n\n\t*dst = Polygon{\n\t\tP: points,\n\t\tStatus: Present,\n\t}\n\treturn nil\n}\n\nfunc (src Polygon) EncodeText(ci *ConnInfo, buf []byte) ([]byte, error) {\n\tswitch src.Status {\n\tcase Null:\n\t\treturn nil, nil\n\tcase Undefined:\n\t\treturn nil, errUndefined\n\t}\n\n\tbuf = append(buf, '(')\n\n\tfor i, p := range src.P {\n\t\tif i > 0 {\n\t\t\tbuf = append(buf, ',')\n\t\t}\n\t\tbuf = append(buf, fmt.Sprintf(`(%s,%s)`,\n\t\t\tstrconv.FormatFloat(p.X, 'f', -1, 64),\n\t\t\tstrconv.FormatFloat(p.Y, 'f', -1, 64),\n\t\t)...)\n\t}\n\n\treturn append(buf, ')'), nil\n}\n\nfunc (src Polygon) EncodeBinary(ci *ConnInfo, buf []byte) ([]byte, error) {\n\tswitch src.Status {\n\tcase Null:\n\t\treturn nil, nil\n\tcase Undefined:\n\t\treturn nil, errUndefined\n\t}\n\n\tbuf = pgio.AppendInt32(buf, int32(len(src.P)))\n\n\tfor _, p := range src.P {\n\t\tbuf = pgio.AppendUint64(buf, math.Float64bits(p.X))\n\t\tbuf = pgio.AppendUint64(buf, math.Float64bits(p.Y))\n\t}\n\n\treturn buf, nil\n}\n\n\/\/ Scan implements the database\/sql Scanner interface.\nfunc (dst *Polygon) Scan(src interface{}) error {\n\tif src == nil {\n\t\t*dst = Polygon{Status: Null}\n\t\treturn nil\n\t}\n\n\tswitch src := src.(type) {\n\tcase string:\n\t\treturn dst.DecodeText(nil, []byte(src))\n\tcase []byte:\n\t\tsrcCopy := make([]byte, len(src))\n\t\tcopy(srcCopy, src)\n\t\treturn dst.DecodeText(nil, srcCopy)\n\t}\n\n\treturn errors.Errorf(\"cannot scan %T\", src)\n}\n\n\/\/ Value implements the database\/sql\/driver Valuer interface.\nfunc (src Polygon) Value() (driver.Value, error) {\n\treturn EncodeValueText(src)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto\/ecdsa\"\n\t\"crypto\/sha256\"\n\t\"crypto\/x509\"\n\t\"encoding\/base64\"\n\t\"encoding\/pem\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"go.mozilla.org\/autograph\/signer\/contentsignature\"\n)\n\n\/\/ Certificates no longer in use but not yet removed from the autograph config.\n\/\/ we don't want to alert on those.\n\/\/ https:\/\/bugzilla.mozilla.org\/show_bug.cgi?id=1466523\nvar ignoredCerts = map[string]bool{\n\t\"fingerprinting-defenses.content-signature.mozilla.org\": true,\n\t\"fennec-dlc.content-signature.mozilla.org\": true,\n\t\"focus-experiments.content-signature.mozilla.org\": true,\n}\n\n\/\/ validate the signature and certificate chain of a content signature response\n\/\/\n\/\/ If an X5U value was provided, use the public key from the end entity certificate\n\/\/ to verify the sig. Otherwise, use the PublicKey contained in the response.\n\/\/\n\/\/ If the signature passes, verify the chain of trust maps.\nfunc verifyContentSignature(response signatureresponse) error {\n\tvar (\n\t\tkey *ecdsa.PublicKey\n\t\terr error\n\t\tcerts []*x509.Certificate\n\t)\n\tsig, err := contentsignature.Unmarshal(response.Signature)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tsig.X5U = response.X5U\n\tif sig.X5U != \"\" {\n\t\tcerts, err = getX5U(sig.X5U)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(certs) < 2 {\n\t\t\treturn fmt.Errorf(\"Found %d certs in X5U, expected at least 2\", len(certs))\n\t\t}\n\t\t\/\/ certs[0] is the end entity\n\t\tkey = certs[0].PublicKey.(*ecdsa.PublicKey)\n\t} else {\n\t\tkey, err = parsePublicKeyFromB64(response.PublicKey)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif !sig.VerifyData([]byte(inputdata), key) {\n\t\treturn fmt.Errorf(\"Signature verification failed\")\n\t}\n\tif certs != nil {\n\t\terr = verifyCertChain(certs)\n\t\tif err != nil {\n\t\t\t\/\/ check if we should ignore this cert\n\t\t\tif _, ok := ignoredCerts[certs[0].Subject.CommonName]; ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc getX5U(x5u string) (certs []*x509.Certificate, err error) {\n\tlog.Printf(\"Retrieving X5U %q\", x5u)\n\tresp, err := http.Get(x5u)\n\tif err != nil {\n\t\treturn certs, fmt.Errorf(\"Failed to retrieve X5U %s: %v\", x5u, err)\n\t}\n\tdefer resp.Body.Close()\n\tscanner := bufio.NewScanner(resp.Body)\n\t\/\/ the first row must contain BEGIN CERT for the end entity\n\tscanner.Scan()\n\tif scanner.Text() != \"-----BEGIN CERTIFICATE-----\" {\n\t\treturn certs, fmt.Errorf(\"Invalid X5U format for %s: first row isn't BEGIN CERTIFICATE\", x5u)\n\t}\n\tvar certPEM []byte\n\tcertPEM = append(certPEM, scanner.Bytes()...)\n\tcertPEM = append(certPEM, byte('\\n'))\n\tfor scanner.Scan() {\n\t\tcertPEM = append(certPEM, scanner.Bytes()...)\n\t\tcertPEM = append(certPEM, byte('\\n'))\n\t\tif scanner.Text() == \"-----END CERTIFICATE-----\" {\n\t\t\t\/\/ end of the current cert. Parse it, store it\n\t\t\t\/\/ and move on to next cert\n\t\t\tblock, _ := pem.Decode(certPEM)\n\t\t\tif block == nil {\n\t\t\t\treturn certs, fmt.Errorf(\"Failed to parse certificate PEM\")\n\t\t\t}\n\t\t\tcertX509, err := x509.ParseCertificate(block.Bytes)\n\t\t\tif err != nil {\n\t\t\t\treturn certs, fmt.Errorf(\"Could not parse X.509 certificate: %v\", err)\n\t\t\t}\n\t\t\tlog.Printf(\"Retrieved certificate CN=%q\", certX509.Subject.CommonName)\n\t\t\tcerts = append(certs, certX509)\n\t\t\tcertPEM = nil\n\t\t}\n\t}\n\treturn certs, nil\n}\n\nfunc parsePublicKeyFromB64(b64PubKey string) (pubkey *ecdsa.PublicKey, err error) {\n\tkeyBytes, err := base64.StdEncoding.DecodeString(b64PubKey)\n\tif err != nil {\n\t\treturn pubkey, fmt.Errorf(\"Failed to parse public key base64: %v\", err)\n\t}\n\tkeyInterface, err := x509.ParsePKIXPublicKey(keyBytes)\n\tif err != nil {\n\t\treturn pubkey, fmt.Errorf(\"Failed to parse public key DER: %v\", err)\n\t}\n\tpubkey = keyInterface.(*ecdsa.PublicKey)\n\treturn pubkey, nil\n}\n\nfunc verifyCertChain(certs []*x509.Certificate) error {\n\tfor i, cert := range certs {\n\t\tif (i + 1) == len(certs) {\n\t\t\terr := verifyRoot(cert)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Certificate %d %q is root but fails validation: %v\",\n\t\t\t\t\ti, cert.Subject.CommonName, err)\n\t\t\t}\n\t\t\tlog.Printf(\"Certificate %d %q is a valid root\", i, cert.Subject.CommonName)\n\t\t} else {\n\t\t\t\/\/ check that cert is signed by parent\n\t\t\terr := cert.CheckSignatureFrom(certs[i+1])\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Certificate %d %q is not signed by parent certificate %d %q: %v\",\n\t\t\t\t\ti, cert.Subject.CommonName, i+1, certs[i+1].Subject.CommonName, err)\n\t\t\t}\n\t\t\tlog.Printf(\"Certificate %d %q has a valid signature from parent certificate %d %q\",\n\t\t\t\ti, cert.Subject.CommonName, i+1, certs[i+1].Subject.CommonName)\n\t\t}\n\t\tif time.Now().Add(15 * 24 * time.Hour).After(cert.NotAfter) {\n\t\t\treturn fmt.Errorf(\"Certificate %d %q expires in less than 15 days: notAfter=%s\",\n\t\t\t\ti, cert.Subject.CommonName, cert.NotAfter)\n\t\t}\n\t\tif time.Now().Add(30 * 24 * time.Hour).After(cert.NotAfter) {\n\t\t\t\/\/ cert expires in less than 30 days, this is a soft error. send an email.\n\t\t\terr := sendSoftNotification(\n\t\t\t\tfmt.Sprintf(\"%x\", sha256.Sum256(cert.Raw)),\n\t\t\t\t\"Certificate %d %q expires in less than 30 days: notAfter=%s\",\n\t\t\t\ti, cert.Subject.CommonName, cert.NotAfter)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"failed to send soft notification: %v\", err)\n\t\t\t}\n\t\t}\n\t\tif time.Now().Before(cert.NotBefore) {\n\t\t\treturn fmt.Errorf(\"Certificate %d %q is not yet valid: notBefore=%s\",\n\t\t\t\ti, cert.Subject.CommonName, cert.NotBefore)\n\t\t}\n\t\tlog.Printf(\"Certificate %d %q is valid from %s to %s\",\n\t\t\ti, cert.Subject.CommonName, cert.NotBefore, cert.NotAfter)\n\t}\n\treturn nil\n}\n\nfunc verifyRoot(cert *x509.Certificate) error {\n\t\/\/ this is the last cert, it should be self signed\n\tif !bytes.Equal(cert.RawSubject, cert.RawIssuer) {\n\t\treturn fmt.Errorf(\"subject does not match issuer, should be equal\")\n\t}\n\tif !cert.IsCA {\n\t\treturn fmt.Errorf(\"missing IS CA extension\")\n\t}\n\tif conf.RootHash != \"\" {\n\t\trhash := strings.Replace(conf.RootHash, \":\", \"\", -1)\n\t\t\/\/ We're configure to check the root hash matches expected value\n\t\th := sha256.Sum256(cert.Raw)\n\t\tchash := fmt.Sprintf(\"%X\", h[:])\n\t\tif rhash != chash {\n\t\t\treturn fmt.Errorf(\"hash does not match expected root: expected=%s; got=%s\", rhash, chash)\n\t\t}\n\t}\n\thasCodeSigningExtension := false\n\tfor _, ext := range cert.ExtKeyUsage {\n\t\tif ext == x509.ExtKeyUsageCodeSigning {\n\t\t\thasCodeSigningExtension = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !hasCodeSigningExtension {\n\t\treturn fmt.Errorf(\"missing codeSigning key usage extension\")\n\t}\n\treturn nil\n}\n<commit_msg>autograph-monitor: send soft notification before hard one<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto\/ecdsa\"\n\t\"crypto\/sha256\"\n\t\"crypto\/x509\"\n\t\"encoding\/base64\"\n\t\"encoding\/pem\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"go.mozilla.org\/autograph\/signer\/contentsignature\"\n)\n\n\/\/ Certificates no longer in use but not yet removed from the autograph config.\n\/\/ we don't want to alert on those.\n\/\/ https:\/\/bugzilla.mozilla.org\/show_bug.cgi?id=1466523\nvar ignoredCerts = map[string]bool{\n\t\"fingerprinting-defenses.content-signature.mozilla.org\": true,\n\t\"fennec-dlc.content-signature.mozilla.org\": true,\n\t\"focus-experiments.content-signature.mozilla.org\": true,\n}\n\n\/\/ validate the signature and certificate chain of a content signature response\n\/\/\n\/\/ If an X5U value was provided, use the public key from the end entity certificate\n\/\/ to verify the sig. Otherwise, use the PublicKey contained in the response.\n\/\/\n\/\/ If the signature passes, verify the chain of trust maps.\nfunc verifyContentSignature(response signatureresponse) error {\n\tvar (\n\t\tkey *ecdsa.PublicKey\n\t\terr error\n\t\tcerts []*x509.Certificate\n\t)\n\tsig, err := contentsignature.Unmarshal(response.Signature)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tsig.X5U = response.X5U\n\tif sig.X5U != \"\" {\n\t\tcerts, err = getX5U(sig.X5U)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(certs) < 2 {\n\t\t\treturn fmt.Errorf(\"Found %d certs in X5U, expected at least 2\", len(certs))\n\t\t}\n\t\t\/\/ certs[0] is the end entity\n\t\tkey = certs[0].PublicKey.(*ecdsa.PublicKey)\n\t} else {\n\t\tkey, err = parsePublicKeyFromB64(response.PublicKey)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif !sig.VerifyData([]byte(inputdata), key) {\n\t\treturn fmt.Errorf(\"Signature verification failed\")\n\t}\n\tif certs != nil {\n\t\terr = verifyCertChain(certs)\n\t\tif err != nil {\n\t\t\t\/\/ check if we should ignore this cert\n\t\t\tif _, ok := ignoredCerts[certs[0].Subject.CommonName]; ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc getX5U(x5u string) (certs []*x509.Certificate, err error) {\n\tlog.Printf(\"Retrieving X5U %q\", x5u)\n\tresp, err := http.Get(x5u)\n\tif err != nil {\n\t\treturn certs, fmt.Errorf(\"Failed to retrieve X5U %s: %v\", x5u, err)\n\t}\n\tdefer resp.Body.Close()\n\tscanner := bufio.NewScanner(resp.Body)\n\t\/\/ the first row must contain BEGIN CERT for the end entity\n\tscanner.Scan()\n\tif scanner.Text() != \"-----BEGIN CERTIFICATE-----\" {\n\t\treturn certs, fmt.Errorf(\"Invalid X5U format for %s: first row isn't BEGIN CERTIFICATE\", x5u)\n\t}\n\tvar certPEM []byte\n\tcertPEM = append(certPEM, scanner.Bytes()...)\n\tcertPEM = append(certPEM, byte('\\n'))\n\tfor scanner.Scan() {\n\t\tcertPEM = append(certPEM, scanner.Bytes()...)\n\t\tcertPEM = append(certPEM, byte('\\n'))\n\t\tif scanner.Text() == \"-----END CERTIFICATE-----\" {\n\t\t\t\/\/ end of the current cert. Parse it, store it\n\t\t\t\/\/ and move on to next cert\n\t\t\tblock, _ := pem.Decode(certPEM)\n\t\t\tif block == nil {\n\t\t\t\treturn certs, fmt.Errorf(\"Failed to parse certificate PEM\")\n\t\t\t}\n\t\t\tcertX509, err := x509.ParseCertificate(block.Bytes)\n\t\t\tif err != nil {\n\t\t\t\treturn certs, fmt.Errorf(\"Could not parse X.509 certificate: %v\", err)\n\t\t\t}\n\t\t\tlog.Printf(\"Retrieved certificate CN=%q\", certX509.Subject.CommonName)\n\t\t\tcerts = append(certs, certX509)\n\t\t\tcertPEM = nil\n\t\t}\n\t}\n\treturn certs, nil\n}\n\nfunc parsePublicKeyFromB64(b64PubKey string) (pubkey *ecdsa.PublicKey, err error) {\n\tkeyBytes, err := base64.StdEncoding.DecodeString(b64PubKey)\n\tif err != nil {\n\t\treturn pubkey, fmt.Errorf(\"Failed to parse public key base64: %v\", err)\n\t}\n\tkeyInterface, err := x509.ParsePKIXPublicKey(keyBytes)\n\tif err != nil {\n\t\treturn pubkey, fmt.Errorf(\"Failed to parse public key DER: %v\", err)\n\t}\n\tpubkey = keyInterface.(*ecdsa.PublicKey)\n\treturn pubkey, nil\n}\n\nfunc verifyCertChain(certs []*x509.Certificate) error {\n\tfor i, cert := range certs {\n\t\tif (i + 1) == len(certs) {\n\t\t\terr := verifyRoot(cert)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Certificate %d %q is root but fails validation: %v\",\n\t\t\t\t\ti, cert.Subject.CommonName, err)\n\t\t\t}\n\t\t\tlog.Printf(\"Certificate %d %q is a valid root\", i, cert.Subject.CommonName)\n\t\t} else {\n\t\t\t\/\/ check that cert is signed by parent\n\t\t\terr := cert.CheckSignatureFrom(certs[i+1])\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Certificate %d %q is not signed by parent certificate %d %q: %v\",\n\t\t\t\t\ti, cert.Subject.CommonName, i+1, certs[i+1].Subject.CommonName, err)\n\t\t\t}\n\t\t\tlog.Printf(\"Certificate %d %q has a valid signature from parent certificate %d %q\",\n\t\t\t\ti, cert.Subject.CommonName, i+1, certs[i+1].Subject.CommonName)\n\t\t}\n\t\tif time.Now().Add(30 * 24 * time.Hour).After(cert.NotAfter) {\n\t\t\t\/\/ cert expires in less than 30 days, this is a soft error. send an email.\n\t\t\terr := sendSoftNotification(\n\t\t\t\tfmt.Sprintf(\"%x\", sha256.Sum256(cert.Raw)),\n\t\t\t\t\"Certificate %d %q expires in less than 30 days: notAfter=%s\",\n\t\t\t\ti, cert.Subject.CommonName, cert.NotAfter)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"failed to send soft notification: %v\", err)\n\t\t\t}\n\t\t}\n\t\tif time.Now().Add(15 * 24 * time.Hour).After(cert.NotAfter) {\n\t\t\treturn fmt.Errorf(\"Certificate %d %q expires in less than 15 days: notAfter=%s\",\n\t\t\t\ti, cert.Subject.CommonName, cert.NotAfter)\n\t\t}\n\t\tif time.Now().Before(cert.NotBefore) {\n\t\t\treturn fmt.Errorf(\"Certificate %d %q is not yet valid: notBefore=%s\",\n\t\t\t\ti, cert.Subject.CommonName, cert.NotBefore)\n\t\t}\n\t\tlog.Printf(\"Certificate %d %q is valid from %s to %s\",\n\t\t\ti, cert.Subject.CommonName, cert.NotBefore, cert.NotAfter)\n\t}\n\treturn nil\n}\n\nfunc verifyRoot(cert *x509.Certificate) error {\n\t\/\/ this is the last cert, it should be self signed\n\tif !bytes.Equal(cert.RawSubject, cert.RawIssuer) {\n\t\treturn fmt.Errorf(\"subject does not match issuer, should be equal\")\n\t}\n\tif !cert.IsCA {\n\t\treturn fmt.Errorf(\"missing IS CA extension\")\n\t}\n\tif conf.RootHash != \"\" {\n\t\trhash := strings.Replace(conf.RootHash, \":\", \"\", -1)\n\t\t\/\/ We're configure to check the root hash matches expected value\n\t\th := sha256.Sum256(cert.Raw)\n\t\tchash := fmt.Sprintf(\"%X\", h[:])\n\t\tif rhash != chash {\n\t\t\treturn fmt.Errorf(\"hash does not match expected root: expected=%s; got=%s\", rhash, chash)\n\t\t}\n\t}\n\thasCodeSigningExtension := false\n\tfor _, ext := range cert.ExtKeyUsage {\n\t\tif ext == x509.ExtKeyUsageCodeSigning {\n\t\t\thasCodeSigningExtension = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !hasCodeSigningExtension {\n\t\treturn fmt.Errorf(\"missing codeSigning key usage extension\")\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package ui\n\nimport (\n\t\"context\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"hash\/fnv\"\n\t\"net\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/temoto\/alive\"\n\t\"github.com\/temoto\/vender\/currency\"\n\t\"github.com\/temoto\/vender\/engine\"\n\t\"github.com\/temoto\/vender\/engine\/inventory\"\n\t\"github.com\/temoto\/vender\/hardware\/input\"\n\t\"github.com\/temoto\/vender\/head\/money\"\n\ttele_api \"github.com\/temoto\/vender\/head\/tele\/api\"\n\t\"github.com\/temoto\/vender\/helpers\"\n\t\"github.com\/temoto\/vender\/state\"\n)\n\nconst (\n\tserviceMenuInventory = \"inventory\"\n\tserviceMenuTest = \"test\"\n\tserviceMenuReboot = \"reboot\"\n\tserviceMenuNetwork = \"network\"\n\tserviceMenuReport = \"report\"\n)\n\nvar \/*const*\/ serviceMenu = []string{\n\tserviceMenuInventory,\n\tserviceMenuTest,\n\tserviceMenuReboot,\n\tserviceMenuNetwork,\n\tserviceMenuReport,\n}\nvar \/*const*\/ serviceMenuMax = uint8(len(serviceMenu) - 1)\n\ntype uiService struct {\n\t\/\/ config\n\tresetTimeout time.Duration\n\tSecretSalt []byte\n\n\t\/\/ state\n\taskReport bool\n\tmenuIdx uint8\n\tinvIdx uint8\n\tinvList []*inventory.Stock\n\ttestIdx uint8\n\ttestList []engine.Doer\n}\n\nfunc (self *uiService) Init(ctx context.Context) {\n\tg := state.GetGlobal(ctx)\n\tconfig := g.Config.UI.Service\n\tself.SecretSalt = []byte{0} \/\/ FIXME read from config\n\tself.resetTimeout = helpers.IntSecondDefault(config.ResetTimeoutSec, 3*time.Second)\n\terrs := make([]error, 0, len(config.Tests))\n\tfor _, t := range config.Tests {\n\t\tif d, err := g.Engine.ParseText(t.Name, t.Scenario); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t} else {\n\t\t\tself.testList = append(self.testList, d)\n\t\t}\n\t}\n\tif err := helpers.FoldErrors(errs); err != nil {\n\t\tg.Log.Fatal(err)\n\t}\n}\n\nfunc (self *UI) onServiceBegin(ctx context.Context) State {\n\tself.inputBuf = self.inputBuf[:0]\n\tself.lastActivity = time.Now()\n\tself.Service.askReport = false\n\tself.Service.menuIdx = 0\n\tself.Service.invIdx = 0\n\tself.Service.invList = make([]*inventory.Stock, 0, 16)\n\tself.Service.testIdx = 0\n\tself.g.Inventory.Iter(func(s *inventory.Stock) {\n\t\tself.g.Log.Debugf(\"ui service inventory: - %s\", s.String())\n\t\tself.Service.invList = append(self.Service.invList, s)\n\t})\n\tsort.Slice(self.Service.invList, func(a, b int) bool {\n\t\txa := self.Service.invList[a]\n\t\txb := self.Service.invList[b]\n\t\tif xa.Code != xb.Code {\n\t\t\treturn xa.Code < xb.Code\n\t\t}\n\t\treturn xa.Name < xb.Name\n\t})\n\t\/\/ self.g.Log.Debugf(\"invlist=%v, invidx=%d\", self.Service.invList, self.Service.invIdx)\n\n\terr := self.g.Engine.ExecList(ctx, \"on_service_begin\", self.g.Config.Engine.OnServiceBegin)\n\tif err != nil {\n\t\tself.g.Error(err)\n\t\treturn StateBroken\n\t}\n\n\tself.g.Log.Debugf(\"ui service begin\")\n\tself.g.Tele.State(tele_api.State_Service)\n\treturn StateServiceAuth\n}\n\nfunc (self *UI) onServiceAuth() State {\n\tserviceConfig := &self.g.Config.UI.Service\n\tif !serviceConfig.Auth.Enable {\n\t\treturn StateServiceMenu\n\t}\n\n\tpassVisualHash := VisualHash(self.inputBuf, self.Service.SecretSalt)\n\tself.display.SetLines(\n\t\tserviceConfig.MsgAuth,\n\t\tfmt.Sprintf(msgServiceInputAuth, passVisualHash),\n\t)\n\n\tnext, e := self.serviceWaitInput()\n\tif next != StateDefault {\n\t\treturn next\n\t}\n\n\tswitch {\n\tcase e.IsDigit():\n\t\tself.inputBuf = append(self.inputBuf, byte(e.Key))\n\t\tif len(self.inputBuf) > 16 {\n\t\t\tself.display.SetLines(MsgError, \"len\") \/\/ FIXME extract message string\n\t\t\tself.serviceWaitInput()\n\t\t\treturn StateServiceEnd\n\t\t}\n\t\treturn self.State()\n\n\tcase e.IsZero() || input.IsReject(&e):\n\t\treturn StateServiceEnd\n\n\tcase input.IsAccept(&e):\n\t\tif len(self.inputBuf) == 0 {\n\t\t\tself.display.SetLines(MsgError, \"empty\") \/\/ FIXME extract message string\n\t\t\tself.serviceWaitInput()\n\t\t\treturn StateServiceEnd\n\t\t}\n\n\t\t\/\/ FIXME fnv->secure hash for actual password comparison\n\t\tinputHash := VisualHash(self.inputBuf, self.Service.SecretSalt)\n\t\tfor i, p := range self.g.Config.UI.Service.Auth.Passwords {\n\t\t\tif inputHash == p {\n\t\t\t\tself.g.Log.Infof(\"service auth ok i=%d hash=%s\", i, inputHash)\n\t\t\t\treturn StateServiceMenu\n\t\t\t}\n\t\t}\n\n\t\tself.display.SetLines(MsgError, \"sorry\") \/\/ FIXME extract message string\n\t\tself.serviceWaitInput()\n\t\treturn StateServiceEnd\n\t}\n\tself.g.Log.Errorf(\"ui onServiceAuth unhandled branch\")\n\tself.display.SetLines(MsgError, \"code error\") \/\/ FIXME extract message string\n\tself.serviceWaitInput()\n\treturn StateServiceEnd\n}\n\nfunc (self *UI) onServiceMenu() State {\n\tmenuName := serviceMenu[self.Service.menuIdx]\n\tself.display.SetLines(\n\t\tmsgServiceMenu,\n\t\tfmt.Sprintf(\"%d %s\", self.Service.menuIdx+1, menuName),\n\t)\n\n\tnext, e := self.serviceWaitInput()\n\tif next != StateDefault {\n\t\treturn next\n\t}\n\n\tswitch {\n\tcase e.Key == input.EvendKeyCreamLess:\n\t\tself.Service.menuIdx = (self.Service.menuIdx + serviceMenuMax - 1) % (serviceMenuMax + 1)\n\tcase e.Key == input.EvendKeyCreamMore:\n\t\tself.Service.menuIdx = (self.Service.menuIdx + 1) % (serviceMenuMax + 1)\n\n\tcase input.IsAccept(&e):\n\t\tif int(self.Service.menuIdx) >= len(serviceMenu) {\n\t\t\tpanic(\"code error service menuIdx out of range\")\n\t\t}\n\t\tswitch serviceMenu[self.Service.menuIdx] {\n\t\tcase serviceMenuInventory:\n\t\t\treturn StateServiceInventory\n\t\tcase serviceMenuTest:\n\t\t\treturn StateServiceTest\n\t\tcase serviceMenuReboot:\n\t\t\treturn StateServiceReboot\n\t\tcase serviceMenuNetwork:\n\t\t\treturn StateServiceNetwork\n\t\tdefault:\n\t\t\tpanic(\"code error\")\n\t\t}\n\n\tcase input.IsReject(&e):\n\t\treturn StateServiceEnd\n\n\tcase e.IsDigit():\n\t\tx := byte(e.Key) - byte('0')\n\t\tif x > 0 && x <= serviceMenuMax {\n\t\t\tself.Service.menuIdx = x - 1\n\t\t}\n\t}\n\treturn StateServiceMenu\n}\n\nfunc (self *UI) onServiceInventory() State {\n\tif len(self.Service.invList) == 0 {\n\t\tself.display.SetLines(MsgError, \"inv empty\") \/\/ FIXME extract message string\n\t\tself.serviceWaitInput()\n\t\treturn StateServiceMenu\n\t}\n\tinvCurrent := self.Service.invList[self.Service.invIdx]\n\tself.display.SetLines(\n\t\tfmt.Sprintf(\"I%d %s\", invCurrent.Code, invCurrent.Name),\n\t\tfmt.Sprintf(\"%.1f %s\\x00\", invCurrent.Value(), string(self.inputBuf)), \/\/ TODO configurable decimal point\n\t)\n\n\tnext, e := self.serviceWaitInput()\n\tif next != StateDefault {\n\t\treturn next\n\t}\n\n\tinvIdxMax := uint8(len(self.Service.invList))\n\tswitch {\n\tcase e.Key == input.EvendKeyCreamLess:\n\t\tself.Service.invIdx = (self.Service.invIdx + invIdxMax - 1) % invIdxMax\n\t\tself.inputBuf = self.inputBuf[:0]\n\tcase e.Key == input.EvendKeyCreamMore:\n\t\tself.Service.invIdx = (self.Service.invIdx + 1) % invIdxMax\n\t\tself.inputBuf = self.inputBuf[:0]\n\n\tcase e.Key == input.EvendKeyDot || e.IsDigit():\n\t\tself.inputBuf = append(self.inputBuf, byte(e.Key))\n\n\tcase input.IsAccept(&e):\n\t\tif len(self.inputBuf) == 0 {\n\t\t\tself.g.Log.Errorf(\"ui onServiceInventory input=accept inputBuf=empty\")\n\t\t\tself.display.SetLines(MsgError, \"empty\") \/\/ FIXME extract message string\n\t\t\tself.serviceWaitInput()\n\t\t\treturn StateServiceInventory\n\t\t}\n\n\t\tx, err := strconv.ParseFloat(string(self.inputBuf), 32)\n\t\tself.inputBuf = self.inputBuf[:0]\n\t\tif err != nil {\n\t\t\tself.g.Log.Errorf(\"ui onServiceInventory input=accept inputBuf='%s'\", string(self.inputBuf))\n\t\t\tself.display.SetLines(MsgError, \"number-invalid\") \/\/ FIXME extract message string\n\t\t\tself.serviceWaitInput()\n\t\t\treturn StateServiceInventory\n\t\t}\n\n\t\tinvCurrent := self.Service.invList[self.Service.invIdx]\n\t\tinvCurrent.Set(float32(x))\n\t\tself.Service.askReport = true\n\n\tcase input.IsReject(&e):\n\t\t\/\/ backspace semantic\n\t\tif len(self.inputBuf) > 0 {\n\t\t\tself.inputBuf = self.inputBuf[:len(self.inputBuf)-1]\n\t\t\treturn StateServiceInventory\n\t\t}\n\t\treturn StateServiceMenu\n\t}\n\treturn StateServiceInventory\n}\n\nfunc (self *UI) onServiceTest(ctx context.Context) State {\n\tself.inputBuf = self.inputBuf[:0]\n\tif len(self.Service.testList) == 0 {\n\t\tself.display.SetLines(MsgError, \"no tests\") \/\/ FIXME extract message string\n\t\tself.serviceWaitInput()\n\t\treturn StateServiceMenu\n\t}\n\ttestCurrent := self.Service.testList[self.Service.testIdx]\n\tline1 := fmt.Sprintf(\"T%d %s\", self.Service.testIdx+1, testCurrent.String())\n\tself.display.SetLines(line1, \"\")\n\nwait:\n\tnext, e := self.serviceWaitInput()\n\tif next != StateDefault {\n\t\treturn next\n\t}\n\n\ttestIdxMax := uint8(len(self.Service.testList))\n\tswitch {\n\tcase e.Key == input.EvendKeyCreamLess:\n\t\tself.Service.testIdx = (self.Service.testIdx + testIdxMax - 1) % testIdxMax\n\tcase e.Key == input.EvendKeyCreamMore:\n\t\tself.Service.testIdx = (self.Service.testIdx + 1) % testIdxMax\n\n\tcase input.IsAccept(&e):\n\t\tself.Service.askReport = true\n\t\tself.display.SetLines(line1, \"in progress\")\n\t\terr := testCurrent.Do(ctx)\n\t\tif err == nil {\n\t\t\tself.display.SetLines(line1, \"OK\")\n\t\t} else {\n\t\t\tself.g.Error(err)\n\t\t\tself.display.SetLines(line1, \"error\")\n\t\t}\n\t\tgoto wait\n\n\tcase input.IsReject(&e):\n\t\treturn StateServiceMenu\n\t}\n\treturn StateServiceTest\n}\n\nfunc (self *UI) onServiceReboot() State {\n\tself.display.SetLines(\"for reboot\", \"press 1\") \/\/ FIXME extract message string\n\n\tnext, e := self.serviceWaitInput()\n\tif next != StateDefault {\n\t\treturn next\n\t}\n\n\tswitch {\n\tcase e.Key == '1':\n\t\tself.display.SetLines(\"reboot\", \"in progress\") \/\/ FIXME extract message string\n\t\t\/\/ os.Exit(0)\n\t\tself.g.Alive.Stop()\n\t\treturn StateServiceEnd\n\t}\n\treturn StateServiceMenu\n}\n\nfunc (self *UI) onServiceNetwork() State {\n\taddrs, _ := net.InterfaceAddrs()\n\t\/\/ TODO filter\n\tlistString := fmt.Sprintf(\"%v\", addrs)\n\tself.display.SetLines(\"network\", listString)\n\n\tfor {\n\t\tnext, e := self.serviceWaitInput()\n\t\tif next != StateDefault {\n\t\t\treturn next\n\t\t}\n\t\tif input.IsReject(&e) {\n\t\t\treturn StateServiceMenu\n\t\t}\n\t}\n}\n\nfunc (self *UI) onServiceReport(ctx context.Context) State {\n\t_ = self.g.Tele.Report(ctx, true)\n\tif err := self.g.Engine.ExecList(ctx, \"service-report\", []string{\"money.cashbox_zero\"}); err != nil {\n\t\tself.g.Error(err)\n\t}\n\treturn StateDefault\n}\n\nfunc (self *UI) onServiceEnd(ctx context.Context) State {\n\t_ = self.g.Inventory.Persist.Store()\n\tself.inputBuf = self.inputBuf[:0]\n\n\tif self.Service.askReport {\n\t\tself.display.SetLines(\"for tele report\", \"press 1\") \/\/ FIXME extract message string\n\t\tif e := self.wait(self.Service.resetTimeout); e.Kind == EventInput && e.Input.Key == '1' {\n\t\t\tself.Service.askReport = false\n\t\t\tself.onServiceReport(ctx)\n\t\t}\n\t}\n\n\terr := self.g.Engine.ExecList(ctx, \"on_service_end\", self.g.Config.Engine.OnServiceEnd)\n\tif err != nil {\n\t\tself.g.Error(err)\n\t\treturn StateBroken\n\t}\n\treturn StateDefault\n}\n\nfunc (self *UI) serviceWaitInput() (State, input.Event) {\n\te := self.wait(self.Service.resetTimeout)\n\tswitch e.Kind {\n\tcase EventInput:\n\t\treturn StateDefault, e.Input\n\n\tcase EventMoney:\n\t\tself.g.Log.Debugf(\"serviceWaitInput money event=%v\", e.Money)\n\t\treturn StateDefault, input.Event{}\n\n\tcase EventTime:\n\t\t\/\/ self.g.Log.Infof(\"inactive=%v\", inactive)\n\t\tself.g.Log.Debugf(\"serviceWaitInput resetTimeout\")\n\t\treturn StateServiceEnd, input.Event{}\n\n\tcase EventLock:\n\t\treturn StateLocked, input.Event{}\n\n\tcase EventStop:\n\t\tself.g.Log.Debugf(\"serviceWaitInput global stop\")\n\t\treturn StateServiceEnd, input.Event{}\n\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"code error serviceWaitInput unhandled event=%#v\", e))\n\t}\n}\n\nfunc VisualHash(input, salt []byte) string {\n\th := fnv.New32()\n\t_, _ = h.Write(salt)\n\t_, _ = h.Write(input)\n\t_, _ = h.Write(salt)\n\tvar buf [4]byte\n\tbinary := h.Sum(buf[:0])\n\tb64 := base64.RawStdEncoding.EncodeToString(binary)\n\treturn strings.ToLower(b64)\n}\n<commit_msg>service: (network) hide loopback addresses<commit_after>package ui\n\nimport (\n\t\"context\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"hash\/fnv\"\n\t\"net\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/temoto\/alive\"\n\t\"github.com\/temoto\/vender\/currency\"\n\t\"github.com\/temoto\/vender\/engine\"\n\t\"github.com\/temoto\/vender\/engine\/inventory\"\n\t\"github.com\/temoto\/vender\/hardware\/input\"\n\t\"github.com\/temoto\/vender\/head\/money\"\n\ttele_api \"github.com\/temoto\/vender\/head\/tele\/api\"\n\t\"github.com\/temoto\/vender\/helpers\"\n\t\"github.com\/temoto\/vender\/state\"\n)\n\nconst (\n\tserviceMenuInventory = \"inventory\"\n\tserviceMenuTest = \"test\"\n\tserviceMenuReboot = \"reboot\"\n\tserviceMenuNetwork = \"network\"\n\tserviceMenuReport = \"report\"\n)\n\nvar \/*const*\/ serviceMenu = []string{\n\tserviceMenuInventory,\n\tserviceMenuTest,\n\tserviceMenuReboot,\n\tserviceMenuNetwork,\n\tserviceMenuReport,\n}\nvar \/*const*\/ serviceMenuMax = uint8(len(serviceMenu) - 1)\n\ntype uiService struct {\n\t\/\/ config\n\tresetTimeout time.Duration\n\tSecretSalt []byte\n\n\t\/\/ state\n\taskReport bool\n\tmenuIdx uint8\n\tinvIdx uint8\n\tinvList []*inventory.Stock\n\ttestIdx uint8\n\ttestList []engine.Doer\n}\n\nfunc (self *uiService) Init(ctx context.Context) {\n\tg := state.GetGlobal(ctx)\n\tconfig := g.Config.UI.Service\n\tself.SecretSalt = []byte{0} \/\/ FIXME read from config\n\tself.resetTimeout = helpers.IntSecondDefault(config.ResetTimeoutSec, 3*time.Second)\n\terrs := make([]error, 0, len(config.Tests))\n\tfor _, t := range config.Tests {\n\t\tif d, err := g.Engine.ParseText(t.Name, t.Scenario); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t} else {\n\t\t\tself.testList = append(self.testList, d)\n\t\t}\n\t}\n\tif err := helpers.FoldErrors(errs); err != nil {\n\t\tg.Log.Fatal(err)\n\t}\n}\n\nfunc (self *UI) onServiceBegin(ctx context.Context) State {\n\tself.inputBuf = self.inputBuf[:0]\n\tself.lastActivity = time.Now()\n\tself.Service.askReport = false\n\tself.Service.menuIdx = 0\n\tself.Service.invIdx = 0\n\tself.Service.invList = make([]*inventory.Stock, 0, 16)\n\tself.Service.testIdx = 0\n\tself.g.Inventory.Iter(func(s *inventory.Stock) {\n\t\tself.g.Log.Debugf(\"ui service inventory: - %s\", s.String())\n\t\tself.Service.invList = append(self.Service.invList, s)\n\t})\n\tsort.Slice(self.Service.invList, func(a, b int) bool {\n\t\txa := self.Service.invList[a]\n\t\txb := self.Service.invList[b]\n\t\tif xa.Code != xb.Code {\n\t\t\treturn xa.Code < xb.Code\n\t\t}\n\t\treturn xa.Name < xb.Name\n\t})\n\t\/\/ self.g.Log.Debugf(\"invlist=%v, invidx=%d\", self.Service.invList, self.Service.invIdx)\n\n\terr := self.g.Engine.ExecList(ctx, \"on_service_begin\", self.g.Config.Engine.OnServiceBegin)\n\tif err != nil {\n\t\tself.g.Error(err)\n\t\treturn StateBroken\n\t}\n\n\tself.g.Log.Debugf(\"ui service begin\")\n\tself.g.Tele.State(tele_api.State_Service)\n\treturn StateServiceAuth\n}\n\nfunc (self *UI) onServiceAuth() State {\n\tserviceConfig := &self.g.Config.UI.Service\n\tif !serviceConfig.Auth.Enable {\n\t\treturn StateServiceMenu\n\t}\n\n\tpassVisualHash := VisualHash(self.inputBuf, self.Service.SecretSalt)\n\tself.display.SetLines(\n\t\tserviceConfig.MsgAuth,\n\t\tfmt.Sprintf(msgServiceInputAuth, passVisualHash),\n\t)\n\n\tnext, e := self.serviceWaitInput()\n\tif next != StateDefault {\n\t\treturn next\n\t}\n\n\tswitch {\n\tcase e.IsDigit():\n\t\tself.inputBuf = append(self.inputBuf, byte(e.Key))\n\t\tif len(self.inputBuf) > 16 {\n\t\t\tself.display.SetLines(MsgError, \"len\") \/\/ FIXME extract message string\n\t\t\tself.serviceWaitInput()\n\t\t\treturn StateServiceEnd\n\t\t}\n\t\treturn self.State()\n\n\tcase e.IsZero() || input.IsReject(&e):\n\t\treturn StateServiceEnd\n\n\tcase input.IsAccept(&e):\n\t\tif len(self.inputBuf) == 0 {\n\t\t\tself.display.SetLines(MsgError, \"empty\") \/\/ FIXME extract message string\n\t\t\tself.serviceWaitInput()\n\t\t\treturn StateServiceEnd\n\t\t}\n\n\t\t\/\/ FIXME fnv->secure hash for actual password comparison\n\t\tinputHash := VisualHash(self.inputBuf, self.Service.SecretSalt)\n\t\tfor i, p := range self.g.Config.UI.Service.Auth.Passwords {\n\t\t\tif inputHash == p {\n\t\t\t\tself.g.Log.Infof(\"service auth ok i=%d hash=%s\", i, inputHash)\n\t\t\t\treturn StateServiceMenu\n\t\t\t}\n\t\t}\n\n\t\tself.display.SetLines(MsgError, \"sorry\") \/\/ FIXME extract message string\n\t\tself.serviceWaitInput()\n\t\treturn StateServiceEnd\n\t}\n\tself.g.Log.Errorf(\"ui onServiceAuth unhandled branch\")\n\tself.display.SetLines(MsgError, \"code error\") \/\/ FIXME extract message string\n\tself.serviceWaitInput()\n\treturn StateServiceEnd\n}\n\nfunc (self *UI) onServiceMenu() State {\n\tmenuName := serviceMenu[self.Service.menuIdx]\n\tself.display.SetLines(\n\t\tmsgServiceMenu,\n\t\tfmt.Sprintf(\"%d %s\", self.Service.menuIdx+1, menuName),\n\t)\n\n\tnext, e := self.serviceWaitInput()\n\tif next != StateDefault {\n\t\treturn next\n\t}\n\n\tswitch {\n\tcase e.Key == input.EvendKeyCreamLess:\n\t\tself.Service.menuIdx = (self.Service.menuIdx + serviceMenuMax - 1) % (serviceMenuMax + 1)\n\tcase e.Key == input.EvendKeyCreamMore:\n\t\tself.Service.menuIdx = (self.Service.menuIdx + 1) % (serviceMenuMax + 1)\n\n\tcase input.IsAccept(&e):\n\t\tif int(self.Service.menuIdx) >= len(serviceMenu) {\n\t\t\tpanic(\"code error service menuIdx out of range\")\n\t\t}\n\t\tswitch serviceMenu[self.Service.menuIdx] {\n\t\tcase serviceMenuInventory:\n\t\t\treturn StateServiceInventory\n\t\tcase serviceMenuTest:\n\t\t\treturn StateServiceTest\n\t\tcase serviceMenuReboot:\n\t\t\treturn StateServiceReboot\n\t\tcase serviceMenuNetwork:\n\t\t\treturn StateServiceNetwork\n\t\tdefault:\n\t\t\tpanic(\"code error\")\n\t\t}\n\n\tcase input.IsReject(&e):\n\t\treturn StateServiceEnd\n\n\tcase e.IsDigit():\n\t\tx := byte(e.Key) - byte('0')\n\t\tif x > 0 && x <= serviceMenuMax {\n\t\t\tself.Service.menuIdx = x - 1\n\t\t}\n\t}\n\treturn StateServiceMenu\n}\n\nfunc (self *UI) onServiceInventory() State {\n\tif len(self.Service.invList) == 0 {\n\t\tself.display.SetLines(MsgError, \"inv empty\") \/\/ FIXME extract message string\n\t\tself.serviceWaitInput()\n\t\treturn StateServiceMenu\n\t}\n\tinvCurrent := self.Service.invList[self.Service.invIdx]\n\tself.display.SetLines(\n\t\tfmt.Sprintf(\"I%d %s\", invCurrent.Code, invCurrent.Name),\n\t\tfmt.Sprintf(\"%.1f %s\\x00\", invCurrent.Value(), string(self.inputBuf)), \/\/ TODO configurable decimal point\n\t)\n\n\tnext, e := self.serviceWaitInput()\n\tif next != StateDefault {\n\t\treturn next\n\t}\n\n\tinvIdxMax := uint8(len(self.Service.invList))\n\tswitch {\n\tcase e.Key == input.EvendKeyCreamLess:\n\t\tself.Service.invIdx = (self.Service.invIdx + invIdxMax - 1) % invIdxMax\n\t\tself.inputBuf = self.inputBuf[:0]\n\tcase e.Key == input.EvendKeyCreamMore:\n\t\tself.Service.invIdx = (self.Service.invIdx + 1) % invIdxMax\n\t\tself.inputBuf = self.inputBuf[:0]\n\n\tcase e.Key == input.EvendKeyDot || e.IsDigit():\n\t\tself.inputBuf = append(self.inputBuf, byte(e.Key))\n\n\tcase input.IsAccept(&e):\n\t\tif len(self.inputBuf) == 0 {\n\t\t\tself.g.Log.Errorf(\"ui onServiceInventory input=accept inputBuf=empty\")\n\t\t\tself.display.SetLines(MsgError, \"empty\") \/\/ FIXME extract message string\n\t\t\tself.serviceWaitInput()\n\t\t\treturn StateServiceInventory\n\t\t}\n\n\t\tx, err := strconv.ParseFloat(string(self.inputBuf), 32)\n\t\tself.inputBuf = self.inputBuf[:0]\n\t\tif err != nil {\n\t\t\tself.g.Log.Errorf(\"ui onServiceInventory input=accept inputBuf='%s'\", string(self.inputBuf))\n\t\t\tself.display.SetLines(MsgError, \"number-invalid\") \/\/ FIXME extract message string\n\t\t\tself.serviceWaitInput()\n\t\t\treturn StateServiceInventory\n\t\t}\n\n\t\tinvCurrent := self.Service.invList[self.Service.invIdx]\n\t\tinvCurrent.Set(float32(x))\n\t\tself.Service.askReport = true\n\n\tcase input.IsReject(&e):\n\t\t\/\/ backspace semantic\n\t\tif len(self.inputBuf) > 0 {\n\t\t\tself.inputBuf = self.inputBuf[:len(self.inputBuf)-1]\n\t\t\treturn StateServiceInventory\n\t\t}\n\t\treturn StateServiceMenu\n\t}\n\treturn StateServiceInventory\n}\n\nfunc (self *UI) onServiceTest(ctx context.Context) State {\n\tself.inputBuf = self.inputBuf[:0]\n\tif len(self.Service.testList) == 0 {\n\t\tself.display.SetLines(MsgError, \"no tests\") \/\/ FIXME extract message string\n\t\tself.serviceWaitInput()\n\t\treturn StateServiceMenu\n\t}\n\ttestCurrent := self.Service.testList[self.Service.testIdx]\n\tline1 := fmt.Sprintf(\"T%d %s\", self.Service.testIdx+1, testCurrent.String())\n\tself.display.SetLines(line1, \"\")\n\nwait:\n\tnext, e := self.serviceWaitInput()\n\tif next != StateDefault {\n\t\treturn next\n\t}\n\n\ttestIdxMax := uint8(len(self.Service.testList))\n\tswitch {\n\tcase e.Key == input.EvendKeyCreamLess:\n\t\tself.Service.testIdx = (self.Service.testIdx + testIdxMax - 1) % testIdxMax\n\tcase e.Key == input.EvendKeyCreamMore:\n\t\tself.Service.testIdx = (self.Service.testIdx + 1) % testIdxMax\n\n\tcase input.IsAccept(&e):\n\t\tself.Service.askReport = true\n\t\tself.display.SetLines(line1, \"in progress\")\n\t\terr := testCurrent.Do(ctx)\n\t\tif err == nil {\n\t\t\tself.display.SetLines(line1, \"OK\")\n\t\t} else {\n\t\t\tself.g.Error(err)\n\t\t\tself.display.SetLines(line1, \"error\")\n\t\t}\n\t\tgoto wait\n\n\tcase input.IsReject(&e):\n\t\treturn StateServiceMenu\n\t}\n\treturn StateServiceTest\n}\n\nfunc (self *UI) onServiceReboot() State {\n\tself.display.SetLines(\"for reboot\", \"press 1\") \/\/ FIXME extract message string\n\n\tnext, e := self.serviceWaitInput()\n\tif next != StateDefault {\n\t\treturn next\n\t}\n\n\tswitch {\n\tcase e.Key == '1':\n\t\tself.display.SetLines(\"reboot\", \"in progress\") \/\/ FIXME extract message string\n\t\t\/\/ os.Exit(0)\n\t\tself.g.Alive.Stop()\n\t\treturn StateServiceEnd\n\t}\n\treturn StateServiceMenu\n}\n\nfunc (self *UI) onServiceNetwork() State {\n\tallAddrs, _ := net.InterfaceAddrs()\n\taddrs := make([]string, 0, len(allAddrs))\n\t\/\/ TODO parse ignored networks from config\naddrLoop:\n\tfor _, addr := range allAddrs {\n\t\tip, _, err := net.ParseCIDR(addr.String())\n\t\tif err != nil {\n\t\t\tself.g.Log.Errorf(\"invalid local addr=%v\", addr)\n\t\t\tcontinue addrLoop\n\t\t}\n\t\tif ip.IsLoopback() {\n\t\t\tcontinue addrLoop\n\t\t}\n\t\taddrs = append(addrs, ip.String())\n\t}\n\tlistString := strings.Join(addrs, \" \")\n\tself.display.SetLines(\"network\", listString)\n\n\tfor {\n\t\tnext, e := self.serviceWaitInput()\n\t\tif next != StateDefault {\n\t\t\treturn next\n\t\t}\n\t\tif input.IsReject(&e) {\n\t\t\treturn StateServiceMenu\n\t\t}\n\t}\n}\n\nfunc (self *UI) onServiceReport(ctx context.Context) State {\n\t_ = self.g.Tele.Report(ctx, true)\n\tif err := self.g.Engine.ExecList(ctx, \"service-report\", []string{\"money.cashbox_zero\"}); err != nil {\n\t\tself.g.Error(err)\n\t}\n\treturn StateDefault\n}\n\nfunc (self *UI) onServiceEnd(ctx context.Context) State {\n\t_ = self.g.Inventory.Persist.Store()\n\tself.inputBuf = self.inputBuf[:0]\n\n\tif self.Service.askReport {\n\t\tself.display.SetLines(\"for tele report\", \"press 1\") \/\/ FIXME extract message string\n\t\tif e := self.wait(self.Service.resetTimeout); e.Kind == EventInput && e.Input.Key == '1' {\n\t\t\tself.Service.askReport = false\n\t\t\tself.onServiceReport(ctx)\n\t\t}\n\t}\n\n\terr := self.g.Engine.ExecList(ctx, \"on_service_end\", self.g.Config.Engine.OnServiceEnd)\n\tif err != nil {\n\t\tself.g.Error(err)\n\t\treturn StateBroken\n\t}\n\treturn StateDefault\n}\n\nfunc (self *UI) serviceWaitInput() (State, input.Event) {\n\te := self.wait(self.Service.resetTimeout)\n\tswitch e.Kind {\n\tcase EventInput:\n\t\treturn StateDefault, e.Input\n\n\tcase EventMoney:\n\t\tself.g.Log.Debugf(\"serviceWaitInput money event=%v\", e.Money)\n\t\treturn StateDefault, input.Event{}\n\n\tcase EventTime:\n\t\t\/\/ self.g.Log.Infof(\"inactive=%v\", inactive)\n\t\tself.g.Log.Debugf(\"serviceWaitInput resetTimeout\")\n\t\treturn StateServiceEnd, input.Event{}\n\n\tcase EventLock:\n\t\treturn StateLocked, input.Event{}\n\n\tcase EventStop:\n\t\tself.g.Log.Debugf(\"serviceWaitInput global stop\")\n\t\treturn StateServiceEnd, input.Event{}\n\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"code error serviceWaitInput unhandled event=%#v\", e))\n\t}\n}\n\nfunc VisualHash(input, salt []byte) string {\n\th := fnv.New32()\n\t_, _ = h.Write(salt)\n\t_, _ = h.Write(input)\n\t_, _ = h.Write(salt)\n\tvar buf [4]byte\n\tbinary := h.Sum(buf[:0])\n\tb64 := base64.RawStdEncoding.EncodeToString(binary)\n\treturn strings.ToLower(b64)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package tasks provides functions for scheduling\n\/\/ periodic tasks (e.g. background jobs).\npackage tasks\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"gnd.la\/app\"\n\t\"gnd.la\/log\"\n\t\"gnd.la\/signal\"\n\t\"gnd.la\/util\/internal\/runtimeutil\"\n\t\"reflect\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar running struct {\n\tsync.Mutex\n\ttasks map[*Task]int\n}\n\nvar registered struct {\n\tsync.RWMutex\n\ttasks map[string]*Task\n}\n\n\/\/ Task represent a scheduled task.\ntype Task struct {\n\tApp *app.App\n\tHandler app.Handler\n\tInterval time.Duration\n\tOptions *Options\n\tticker *time.Ticker\n}\n\n\/\/ Stop de-schedules the task. After stopping the task, it\n\/\/ won't be started again but if it's currently running, it will\n\/\/ be completed.\nfunc (t *Task) Stop() {\n\tif t.ticker != nil {\n\t\tt.ticker.Stop()\n\t\tt.ticker = nil\n\t}\n}\n\nfunc (t *Task) Resume(now bool) {\n\tt.Stop()\n\tt.ticker = time.NewTicker(t.Interval)\n\tgo t.execute(now)\n}\n\n\/\/ Name returns the task name.\nfunc (t *Task) Name() string {\n\tif t.Options != nil && t.Options.Name != \"\" {\n\t\treturn t.Options.Name\n\t}\n\treturn runtimeutil.FuncName(t.Handler)\n}\n\n\/\/ Delete stops the task by calling t.Stop() and then removes\n\/\/ it from the internal task register.\nfunc (t *Task) Delete() {\n\tregistered.Lock()\n\tdefer registered.Unlock()\n\tt.deleteLocked()\n}\n\nfunc (t *Task) deleteLocked() {\n\tt.Stop()\n\tdelete(registered.tasks, t.Name())\n}\n\nfunc (t *Task) execute(now bool) {\n\tif now {\n\t\texecuteTask(t)\n\t}\n\tfor _ = range t.ticker.C {\n\t\texecuteTask(t)\n\t}\n}\n\n\/\/ Options are used to specify task options when registering them.\ntype Options struct {\n\t\/\/ Name indicates the task name, used for checking the number\n\t\/\/ of instances running. If the task name is not provided, it's\n\t\/\/ derived from the function. Two tasks with the same name are\n\t\/\/ considered as equal, even if their functions are different.\n\tName string\n\t\/\/ MaxInstances indicates the maximum number of instances of\n\t\/\/ this function that can be simultaneously running. If zero,\n\t\/\/ there is no limit.\n\tMaxInstances int\n}\n\nfunc afterTask(task *Task, started time.Time, terr *error) {\n\tname := task.Name()\n\tif err := recover(); err != nil {\n\t\tskip, stackSkip, _, _ := runtimeutil.GetPanic()\n\t\tvar buf bytes.Buffer\n\t\tbuf.WriteString(fmt.Sprintf(\"Panic executing task %s: \", name))\n\t\tbuf.WriteString(fmt.Sprintf(\"%v\", err))\n\t\tbuf.WriteByte('\\n')\n\t\tstack := runtimeutil.FormatStack(stackSkip)\n\t\tlocation, code := runtimeutil.FormatCaller(skip, 5, true, true)\n\t\tif location != \"\" {\n\t\t\tbuf.WriteString(\"\\n At \")\n\t\t\tbuf.WriteString(location)\n\t\t\tif code != \"\" {\n\t\t\t\tbuf.WriteByte('\\n')\n\t\t\t\tbuf.WriteString(code)\n\t\t\t\tbuf.WriteByte('\\n')\n\t\t\t}\n\t\t}\n\t\tif stack != \"\" {\n\t\t\tbuf.WriteString(\"\\nStack:\\n\")\n\t\t\tbuf.WriteString(stack)\n\t\t}\n\t\t*terr = errors.New(buf.String())\n\t}\n\tend := time.Now()\n\tlog.Debugf(\"Finished task %s at %v (took %v)\", name, end, end.Sub(started))\n\trunning.Lock()\n\tdefer running.Unlock()\n\tc := running.tasks[task]\n\tif c > 1 {\n\t\trunning.tasks[task] = c - 1\n\t} else {\n\t\tdelete(running.tasks, task)\n\t}\n}\n\nfunc canRunTask(task *Task) error {\n\trunning.Lock()\n\tdefer running.Unlock()\n\tc := running.tasks[task]\n\tif task.Options != nil && task.Options.MaxInstances > 0 {\n\t\tif c >= task.Options.MaxInstances {\n\t\t\treturn fmt.Errorf(\"not starting task %s because it's already running %d instances\", task.Name(), c)\n\t\t}\n\t}\n\tif running.tasks == nil {\n\t\trunning.tasks = make(map[*Task]int)\n\t}\n\trunning.tasks[task] = c + 1\n\treturn nil\n}\n\nfunc executeTask(task *Task) (bool, error) {\n\tif err := canRunTask(task); err != nil {\n\t\treturn false, err\n\t}\n\tctx := task.App.NewContext(contextProvider(0))\n\tdefer task.App.CloseContext(ctx)\n\tstarted := time.Now()\n\tlog.Debugf(\"Starting task %s at %v\", task.Name(), started)\n\tvar err error\n\tdefer afterTask(task, started, &err)\n\ttask.Handler(ctx)\n\treturn true, err\n}\n\n\/\/ Register registers a new task that might be run with Run, but\n\/\/ without scheduling it. If there was previously another task\n\/\/ registered with the same name, it will be deleted.\nfunc Register(m *app.App, task app.Handler, opts *Options) *Task {\n\tt := &Task{App: m, Handler: task, Options: opts}\n\tregistered.Lock()\n\tdefer registered.Unlock()\n\tif registered.tasks == nil {\n\t\tregistered.tasks = make(map[string]*Task)\n\t}\n\tname := t.Name()\n\tif prev := registered.tasks[name]; prev != nil {\n\t\tlog.Debugf(\"There's already a task registered as %s, deleting it\", name)\n\t\tprev.deleteLocked()\n\t}\n\tregistered.tasks[name] = t\n\treturn t\n}\n\n\/\/ Schedule registers and schedules a task to be run at the given\n\/\/ interval. If interval is 0, the task is only registered, but not\n\/\/ scheduled. The now argument indicates if the task should also run\n\/\/ right now (in a goroutine) rather than waiting until interval for\n\/\/ the first run. Schedule returns a Task instance, which might be\n\/\/ used to stop, resume or delete a it.\nfunc Schedule(m *app.App, task app.Handler, opts *Options, interval time.Duration, now bool) *Task {\n\tt := Register(m, task, opts)\n\tt.Interval = interval\n\tsignal.Emit(signal.TASKS_WILL_SCHEDULE_TASK, t)\n\tgo t.Resume(now)\n\treturn t\n}\n\n\/\/ Run starts the given task identifier by it's name, unless\n\/\/ it has been previously registered with Options which\n\/\/ prevent from running it right now (e.g. it was registered\n\/\/ with MaxInstances = 2 and there are already 2 instances running).\n\/\/ The first return argument indicates if the task was executed, while\n\/\/ the second includes any errors which happened while running the task.\nfunc Run(name string) (bool, error) {\n\tregistered.RLock()\n\ttask := registered.tasks[name]\n\tregistered.RUnlock()\n\tif task == nil {\n\t\treturn false, fmt.Errorf(\"there's no task registered with the name %q\", name)\n\t}\n\treturn executeTask(task)\n}\n\n\/\/ RunHandler starts the given task identifier by it's handler. The same\n\/\/ restrictions in Run() apply to this function.\n\/\/ Return values are the same as Run().\nfunc RunHandler(handler app.Handler) (bool, error) {\n\tvar task *Task\n\tp := reflect.ValueOf(handler).Pointer()\n\tregistered.RLock()\n\tfor _, v := range registered.tasks {\n\t\tif reflect.ValueOf(v.Handler).Pointer() == p {\n\t\t\ttask = v\n\t\t\tbreak\n\t\t}\n\t}\n\tregistered.RUnlock()\n\tif task == nil {\n\t\treturn false, fmt.Errorf(\"there's no task registered with the handler %s\", runtimeutil.FuncName(handler))\n\t}\n\treturn executeTask(task)\n}\n\n\/\/ Execute runs the given handler in a task context. If the handler fails\n\/\/ with a panic, it will be returned in the error return value.\nfunc Execute(a *app.App, handler app.Handler) error {\n\tt := &Task{App: a, Handler: handler}\n\t_, err := executeTask(t)\n\treturn err\n}\n<commit_msg>Log errors from tasks<commit_after>\/\/ Package tasks provides functions for scheduling\n\/\/ periodic tasks (e.g. background jobs).\npackage tasks\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"gnd.la\/app\"\n\t\"gnd.la\/log\"\n\t\"gnd.la\/signal\"\n\t\"gnd.la\/util\/internal\/runtimeutil\"\n\t\"reflect\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar running struct {\n\tsync.Mutex\n\ttasks map[*Task]int\n}\n\nvar registered struct {\n\tsync.RWMutex\n\ttasks map[string]*Task\n}\n\n\/\/ Task represent a scheduled task.\ntype Task struct {\n\tApp *app.App\n\tHandler app.Handler\n\tInterval time.Duration\n\tOptions *Options\n\tticker *time.Ticker\n}\n\n\/\/ Stop de-schedules the task. After stopping the task, it\n\/\/ won't be started again but if it's currently running, it will\n\/\/ be completed.\nfunc (t *Task) Stop() {\n\tif t.ticker != nil {\n\t\tt.ticker.Stop()\n\t\tt.ticker = nil\n\t}\n}\n\nfunc (t *Task) Resume(now bool) {\n\tt.Stop()\n\tt.ticker = time.NewTicker(t.Interval)\n\tgo t.execute(now)\n}\n\n\/\/ Name returns the task name.\nfunc (t *Task) Name() string {\n\tif t.Options != nil && t.Options.Name != \"\" {\n\t\treturn t.Options.Name\n\t}\n\treturn runtimeutil.FuncName(t.Handler)\n}\n\n\/\/ Delete stops the task by calling t.Stop() and then removes\n\/\/ it from the internal task register.\nfunc (t *Task) Delete() {\n\tregistered.Lock()\n\tdefer registered.Unlock()\n\tt.deleteLocked()\n}\n\nfunc (t *Task) deleteLocked() {\n\tt.Stop()\n\tdelete(registered.tasks, t.Name())\n}\n\nfunc (t *Task) execute(now bool) {\n\tif now {\n\t\tt.executeTask()\n\t}\n\tfor _ = range t.ticker.C {\n\t\tt.executeTask()\n\t}\n}\n\nfunc (t *Task) executeTask() {\n\tif _, err := executeTask(t); err != nil {\n\t\tlog.Error(err)\n\t}\n}\n\n\/\/ Options are used to specify task options when registering them.\ntype Options struct {\n\t\/\/ Name indicates the task name, used for checking the number\n\t\/\/ of instances running. If the task name is not provided, it's\n\t\/\/ derived from the function. Two tasks with the same name are\n\t\/\/ considered as equal, even if their functions are different.\n\tName string\n\t\/\/ MaxInstances indicates the maximum number of instances of\n\t\/\/ this function that can be simultaneously running. If zero,\n\t\/\/ there is no limit.\n\tMaxInstances int\n}\n\nfunc afterTask(task *Task, started time.Time, terr *error) {\n\tname := task.Name()\n\tif err := recover(); err != nil {\n\t\tskip, stackSkip, _, _ := runtimeutil.GetPanic()\n\t\tvar buf bytes.Buffer\n\t\tfmt.Fprintf(&buf, \"Panic executing task %s: %v\\n\", name, err)\n\t\tstack := runtimeutil.FormatStack(stackSkip)\n\t\tlocation, code := runtimeutil.FormatCaller(skip, 5, true, true)\n\t\tif location != \"\" {\n\t\t\tbuf.WriteString(\"\\n At \")\n\t\t\tbuf.WriteString(location)\n\t\t\tif code != \"\" {\n\t\t\t\tbuf.WriteByte('\\n')\n\t\t\t\tbuf.WriteString(code)\n\t\t\t\tbuf.WriteByte('\\n')\n\t\t\t}\n\t\t}\n\t\tif stack != \"\" {\n\t\t\tbuf.WriteString(\"\\nStack:\\n\")\n\t\t\tbuf.WriteString(stack)\n\t\t}\n\t\t*terr = errors.New(buf.String())\n\t}\n\tend := time.Now()\n\tlog.Debugf(\"Finished task %s at %v (took %v)\", name, end, end.Sub(started))\n\trunning.Lock()\n\tdefer running.Unlock()\n\tc := running.tasks[task]\n\tif c > 1 {\n\t\trunning.tasks[task] = c - 1\n\t} else {\n\t\tdelete(running.tasks, task)\n\t}\n}\n\nfunc canRunTask(task *Task) error {\n\trunning.Lock()\n\tdefer running.Unlock()\n\tc := running.tasks[task]\n\tif task.Options != nil && task.Options.MaxInstances > 0 {\n\t\tif c >= task.Options.MaxInstances {\n\t\t\treturn fmt.Errorf(\"not starting task %s because it's already running %d instances\", task.Name(), c)\n\t\t}\n\t}\n\tif running.tasks == nil {\n\t\trunning.tasks = make(map[*Task]int)\n\t}\n\trunning.tasks[task] = c + 1\n\treturn nil\n}\n\nfunc executeTask(task *Task) (ran bool, err error) {\n\tif err = canRunTask(task); err != nil {\n\t\treturn\n\t}\n\tctx := task.App.NewContext(contextProvider(0))\n\tdefer task.App.CloseContext(ctx)\n\tstarted := time.Now()\n\tlog.Debugf(\"Starting task %s at %v\", task.Name(), started)\n\tran = true\n\tdefer afterTask(task, started, &err)\n\ttask.Handler(ctx)\n\treturn\n}\n\n\/\/ Register registers a new task that might be run with Run, but\n\/\/ without scheduling it. If there was previously another task\n\/\/ registered with the same name, it will be deleted.\nfunc Register(m *app.App, task app.Handler, opts *Options) *Task {\n\tt := &Task{App: m, Handler: task, Options: opts}\n\tregistered.Lock()\n\tdefer registered.Unlock()\n\tif registered.tasks == nil {\n\t\tregistered.tasks = make(map[string]*Task)\n\t}\n\tname := t.Name()\n\tif prev := registered.tasks[name]; prev != nil {\n\t\tlog.Debugf(\"There's already a task registered as %s, deleting it\", name)\n\t\tprev.deleteLocked()\n\t}\n\tregistered.tasks[name] = t\n\treturn t\n}\n\n\/\/ Schedule registers and schedules a task to be run at the given\n\/\/ interval. If interval is 0, the task is only registered, but not\n\/\/ scheduled. The now argument indicates if the task should also run\n\/\/ right now (in a goroutine) rather than waiting until interval for\n\/\/ the first run. Schedule returns a Task instance, which might be\n\/\/ used to stop, resume or delete a it.\nfunc Schedule(m *app.App, task app.Handler, opts *Options, interval time.Duration, now bool) *Task {\n\tt := Register(m, task, opts)\n\tt.Interval = interval\n\tsignal.Emit(signal.TASKS_WILL_SCHEDULE_TASK, t)\n\tgo t.Resume(now)\n\treturn t\n}\n\n\/\/ Run starts the given task identifier by it's name, unless\n\/\/ it has been previously registered with Options which\n\/\/ prevent from running it right now (e.g. it was registered\n\/\/ with MaxInstances = 2 and there are already 2 instances running).\n\/\/ The first return argument indicates if the task was executed, while\n\/\/ the second includes any errors which happened while running the task.\nfunc Run(name string) (bool, error) {\n\tregistered.RLock()\n\ttask := registered.tasks[name]\n\tregistered.RUnlock()\n\tif task == nil {\n\t\treturn false, fmt.Errorf(\"there's no task registered with the name %q\", name)\n\t}\n\treturn executeTask(task)\n}\n\n\/\/ RunHandler starts the given task identifier by it's handler. The same\n\/\/ restrictions in Run() apply to this function.\n\/\/ Return values are the same as Run().\nfunc RunHandler(handler app.Handler) (bool, error) {\n\tvar task *Task\n\tp := reflect.ValueOf(handler).Pointer()\n\tregistered.RLock()\n\tfor _, v := range registered.tasks {\n\t\tif reflect.ValueOf(v.Handler).Pointer() == p {\n\t\t\ttask = v\n\t\t\tbreak\n\t\t}\n\t}\n\tregistered.RUnlock()\n\tif task == nil {\n\t\treturn false, fmt.Errorf(\"there's no task registered with the handler %s\", runtimeutil.FuncName(handler))\n\t}\n\treturn executeTask(task)\n}\n\n\/\/ Execute runs the given handler in a task context. If the handler fails\n\/\/ with a panic, it will be returned in the error return value.\nfunc Execute(a *app.App, handler app.Handler) error {\n\tt := &Task{App: a, Handler: handler}\n\t_, err := executeTask(t)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build plan9\n\npackage runtime\n\nimport \"runtime\/internal\/atomic\"\n\nvar netpollInited uint32\nvar netpollWaiters uint32\n\nvar netpollStubLock mutex\nvar netpollNote note\n\n\/\/ netpollBroken, protected by netpollBrokenLock, avoids a double notewakeup.\nvar netpollBrokenLock mutex\nvar netpollBroken bool\n\nfunc netpollGenericInit() {\n\tatomic.Store(&netpollInited, 1)\n}\n\nfunc netpollBreak() {\n\tlock(&netpollBrokenLock)\n\tbroken := netpollBroken\n\tnetpollBroken = true\n\tif !broken {\n\t\tnotewakeup(&netpollNote)\n\t}\n\tunlock(&netpollBrokenLock)\n}\n\n\/\/ Polls for ready network connections.\n\/\/ Returns list of goroutines that become runnable.\nfunc netpoll(delay int64) gList {\n\t\/\/ Implementation for platforms that do not support\n\t\/\/ integrated network poller.\n\tif delay != 0 {\n\t\t\/\/ This lock ensures that only one goroutine tries to use\n\t\t\/\/ the note. It should normally be completely uncontended.\n\t\tlock(&netpollStubLock)\n\n\t\tlock(&netpollBrokenLock)\n\t\tnoteclear(&netpollNote)\n\t\tnetpollBroken = false\n\t\tunlock(&netpollBrokenLock)\n\n\t\tnotetsleep(&netpollNote, delay)\n\t\tunlock(&netpollStubLock)\n\t}\n\treturn gList{}\n}\n\nfunc netpollinited() bool {\n\treturn atomic.Load(&netpollInited) != 0\n}\n<commit_msg>runtime: avoid lock starvation in TestNetpollBreak on Plan 9<commit_after>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build plan9\n\npackage runtime\n\nimport \"runtime\/internal\/atomic\"\n\nvar netpollInited uint32\nvar netpollWaiters uint32\n\nvar netpollStubLock mutex\nvar netpollNote note\n\n\/\/ netpollBroken, protected by netpollBrokenLock, avoids a double notewakeup.\nvar netpollBrokenLock mutex\nvar netpollBroken bool\n\nfunc netpollGenericInit() {\n\tatomic.Store(&netpollInited, 1)\n}\n\nfunc netpollBreak() {\n\tlock(&netpollBrokenLock)\n\tbroken := netpollBroken\n\tnetpollBroken = true\n\tif !broken {\n\t\tnotewakeup(&netpollNote)\n\t}\n\tunlock(&netpollBrokenLock)\n}\n\n\/\/ Polls for ready network connections.\n\/\/ Returns list of goroutines that become runnable.\nfunc netpoll(delay int64) gList {\n\t\/\/ Implementation for platforms that do not support\n\t\/\/ integrated network poller.\n\tif delay != 0 {\n\t\t\/\/ This lock ensures that only one goroutine tries to use\n\t\t\/\/ the note. It should normally be completely uncontended.\n\t\tlock(&netpollStubLock)\n\n\t\tlock(&netpollBrokenLock)\n\t\tnoteclear(&netpollNote)\n\t\tnetpollBroken = false\n\t\tunlock(&netpollBrokenLock)\n\n\t\tnotetsleep(&netpollNote, delay)\n\t\tunlock(&netpollStubLock)\n\t\t\/\/ Guard against starvation in case the lock is contended\n\t\t\/\/ (eg when running TestNetpollBreak).\n\t\tosyield()\n\t}\n\treturn gList{}\n}\n\nfunc netpollinited() bool {\n\treturn atomic.Load(&netpollInited) != 0\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage runtime_test\n\nimport (\n\t\"bytes\"\n\t\"internal\/testenv\"\n\t\"io\"\n\t\"os\/exec\"\n\t. \"runtime\"\n\t\"runtime\/debug\"\n\t\"strings\"\n\t\"testing\"\n\t\"unsafe\"\n)\n\nfunc init() {\n\t\/\/ We're testing the runtime, so make tracebacks show things\n\t\/\/ in the runtime. This only raises the level, so it won't\n\t\/\/ override GOTRACEBACK=crash from the user.\n\tSetTracebackEnv(\"system\")\n}\n\nvar errf error\n\nfunc errfn() error {\n\treturn errf\n}\n\nfunc errfn1() error {\n\treturn io.EOF\n}\n\nfunc BenchmarkIfaceCmp100(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tfor j := 0; j < 100; j++ {\n\t\t\tif errfn() == io.EOF {\n\t\t\t\tb.Fatal(\"bad comparison\")\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc BenchmarkIfaceCmpNil100(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tfor j := 0; j < 100; j++ {\n\t\t\tif errfn1() == nil {\n\t\t\t\tb.Fatal(\"bad comparison\")\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar efaceCmp1 interface{}\nvar efaceCmp2 interface{}\n\nfunc BenchmarkEfaceCmpDiff(b *testing.B) {\n\tx := 5\n\tefaceCmp1 = &x\n\ty := 6\n\tefaceCmp2 = &y\n\tfor i := 0; i < b.N; i++ {\n\t\tfor j := 0; j < 100; j++ {\n\t\t\tif efaceCmp1 == efaceCmp2 {\n\t\t\t\tb.Fatal(\"bad comparison\")\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc BenchmarkDefer(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tdefer1()\n\t}\n}\n\nfunc defer1() {\n\tdefer func(x, y, z int) {\n\t\tif recover() != nil || x != 1 || y != 2 || z != 3 {\n\t\t\tpanic(\"bad recover\")\n\t\t}\n\t}(1, 2, 3)\n}\n\nfunc BenchmarkDefer10(b *testing.B) {\n\tfor i := 0; i < b.N\/10; i++ {\n\t\tdefer2()\n\t}\n}\n\nfunc defer2() {\n\tfor i := 0; i < 10; i++ {\n\t\tdefer func(x, y, z int) {\n\t\t\tif recover() != nil || x != 1 || y != 2 || z != 3 {\n\t\t\t\tpanic(\"bad recover\")\n\t\t\t}\n\t\t}(1, 2, 3)\n\t}\n}\n\nfunc BenchmarkDeferMany(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tdefer func(x, y, z int) {\n\t\t\tif recover() != nil || x != 1 || y != 2 || z != 3 {\n\t\t\t\tpanic(\"bad recover\")\n\t\t\t}\n\t\t}(1, 2, 3)\n\t}\n}\n\n\/\/ golang.org\/issue\/7063\nfunc TestStopCPUProfilingWithProfilerOff(t *testing.T) {\n\tSetCPUProfileRate(0)\n}\n\n\/\/ Addresses to test for faulting behavior.\n\/\/ This is less a test of SetPanicOnFault and more a check that\n\/\/ the operating system and the runtime can process these faults\n\/\/ correctly. That is, we're indirectly testing that without SetPanicOnFault\n\/\/ these would manage to turn into ordinary crashes.\n\/\/ Note that these are truncated on 32-bit systems, so the bottom 32 bits\n\/\/ of the larger addresses must themselves be invalid addresses.\n\/\/ We might get unlucky and the OS might have mapped one of these\n\/\/ addresses, but probably not: they're all in the first page, very high\n\/\/ addresses that normally an OS would reserve for itself, or malformed\n\/\/ addresses. Even so, we might have to remove one or two on different\n\/\/ systems. We will see.\n\nvar faultAddrs = []uint64{\n\t\/\/ low addresses\n\t0,\n\t1,\n\t0xfff,\n\t\/\/ high (kernel) addresses\n\t\/\/ or else malformed.\n\t0xffffffffffffffff,\n\t0xfffffffffffff001,\n\t0xffffffffffff0001,\n\t0xfffffffffff00001,\n\t0xffffffffff000001,\n\t0xfffffffff0000001,\n\t0xffffffff00000001,\n\t0xfffffff000000001,\n\t0xffffff0000000001,\n\t0xfffff00000000001,\n\t0xffff000000000001,\n\t0xfff0000000000001,\n\t0xff00000000000001,\n\t0xf000000000000001,\n\t0x8000000000000001,\n}\n\nfunc TestSetPanicOnFault(t *testing.T) {\n\told := debug.SetPanicOnFault(true)\n\tdefer debug.SetPanicOnFault(old)\n\n\tnfault := 0\n\tfor _, addr := range faultAddrs {\n\t\ttestSetPanicOnFault(t, uintptr(addr), &nfault)\n\t}\n\tif nfault == 0 {\n\t\tt.Fatalf(\"none of the addresses faulted\")\n\t}\n}\n\nfunc testSetPanicOnFault(t *testing.T, addr uintptr, nfault *int) {\n\tif GOOS == \"nacl\" {\n\t\tt.Skip(\"nacl doesn't seem to fault on high addresses\")\n\t}\n\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\t*nfault++\n\t\t}\n\t}()\n\n\t\/\/ The read should fault, except that sometimes we hit\n\t\/\/ addresses that have had C or kernel pages mapped there\n\t\/\/ readable by user code. So just log the content.\n\t\/\/ If no addresses fault, we'll fail the test.\n\tv := *(*byte)(unsafe.Pointer(addr))\n\tt.Logf(\"addr %#x: %#x\\n\", addr, v)\n}\n\nfunc eqstring_generic(s1, s2 string) bool {\n\tif len(s1) != len(s2) {\n\t\treturn false\n\t}\n\t\/\/ optimization in assembly versions:\n\t\/\/ if s1.str == s2.str { return true }\n\tfor i := 0; i < len(s1); i++ {\n\t\tif s1[i] != s2[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc TestEqString(t *testing.T) {\n\t\/\/ This isn't really an exhaustive test of == on strings, it's\n\t\/\/ just a convenient way of documenting (via eqstring_generic)\n\t\/\/ what == does.\n\ts := []string{\n\t\t\"\",\n\t\t\"a\",\n\t\t\"c\",\n\t\t\"aaa\",\n\t\t\"ccc\",\n\t\t\"cccc\"[:3], \/\/ same contents, different string\n\t\t\"1234567890\",\n\t}\n\tfor _, s1 := range s {\n\t\tfor _, s2 := range s {\n\t\t\tx := s1 == s2\n\t\t\ty := eqstring_generic(s1, s2)\n\t\t\tif x != y {\n\t\t\t\tt.Errorf(`(\"%s\" == \"%s\") = %t, want %t`, s1, s2, x, y)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestTrailingZero(t *testing.T) {\n\t\/\/ make sure we add padding for structs with trailing zero-sized fields\n\ttype T1 struct {\n\t\tn int32\n\t\tz [0]byte\n\t}\n\tif unsafe.Sizeof(T1{}) != 8 {\n\t\tt.Errorf(\"sizeof(%#v)==%d, want 8\", T1{}, unsafe.Sizeof(T1{}))\n\t}\n\ttype T2 struct {\n\t\tn int64\n\t\tz struct{}\n\t}\n\tif unsafe.Sizeof(T2{}) != 8+unsafe.Sizeof(Uintreg(0)) {\n\t\tt.Errorf(\"sizeof(%#v)==%d, want %d\", T2{}, unsafe.Sizeof(T2{}), 8+unsafe.Sizeof(Uintreg(0)))\n\t}\n\ttype T3 struct {\n\t\tn byte\n\t\tz [4]struct{}\n\t}\n\tif unsafe.Sizeof(T3{}) != 2 {\n\t\tt.Errorf(\"sizeof(%#v)==%d, want 2\", T3{}, unsafe.Sizeof(T3{}))\n\t}\n\t\/\/ make sure padding can double for both zerosize and alignment\n\ttype T4 struct {\n\t\ta int32\n\t\tb int16\n\t\tc int8\n\t\tz struct{}\n\t}\n\tif unsafe.Sizeof(T4{}) != 8 {\n\t\tt.Errorf(\"sizeof(%#v)==%d, want 8\", T4{}, unsafe.Sizeof(T4{}))\n\t}\n\t\/\/ make sure we don't pad a zero-sized thing\n\ttype T5 struct {\n\t}\n\tif unsafe.Sizeof(T5{}) != 0 {\n\t\tt.Errorf(\"sizeof(%#v)==%d, want 0\", T5{}, unsafe.Sizeof(T5{}))\n\t}\n}\n\nfunc TestBadOpen(t *testing.T) {\n\tif GOOS == \"windows\" || GOOS == \"nacl\" {\n\t\tt.Skip(\"skipping OS that doesn't have open\/read\/write\/close\")\n\t}\n\t\/\/ make sure we get the correct error code if open fails. Same for\n\t\/\/ read\/write\/close on the resulting -1 fd. See issue 10052.\n\tnonfile := []byte(\"\/notreallyafile\")\n\tfd := Open(&nonfile[0], 0, 0)\n\tif fd != -1 {\n\t\tt.Errorf(\"open(\\\"%s\\\")=%d, want -1\", string(nonfile), fd)\n\t}\n\tvar buf [32]byte\n\tr := Read(-1, unsafe.Pointer(&buf[0]), int32(len(buf)))\n\tif r != -1 {\n\t\tt.Errorf(\"read()=%d, want -1\", r)\n\t}\n\tw := Write(^uintptr(0), unsafe.Pointer(&buf[0]), int32(len(buf)))\n\tif w != -1 {\n\t\tt.Errorf(\"write()=%d, want -1\", w)\n\t}\n\tc := Close(-1)\n\tif c != -1 {\n\t\tt.Errorf(\"close()=%d, want -1\", c)\n\t}\n}\n\nfunc TestAppendGrowth(t *testing.T) {\n\tvar x []int64\n\tcheck := func(want int) {\n\t\tif cap(x) != want {\n\t\t\tt.Errorf(\"len=%d, cap=%d, want cap=%d\", len(x), cap(x), want)\n\t\t}\n\t}\n\n\tcheck(0)\n\twant := 1\n\tfor i := 1; i <= 100; i++ {\n\t\tx = append(x, 1)\n\t\tcheck(want)\n\t\tif i&(i-1) == 0 {\n\t\t\twant = 2 * i\n\t\t}\n\t}\n}\n\nvar One = []int64{1}\n\nfunc TestAppendSliceGrowth(t *testing.T) {\n\tvar x []int64\n\tcheck := func(want int) {\n\t\tif cap(x) != want {\n\t\t\tt.Errorf(\"len=%d, cap=%d, want cap=%d\", len(x), cap(x), want)\n\t\t}\n\t}\n\n\tcheck(0)\n\twant := 1\n\tfor i := 1; i <= 100; i++ {\n\t\tx = append(x, One...)\n\t\tcheck(want)\n\t\tif i&(i-1) == 0 {\n\t\t\twant = 2 * i\n\t\t}\n\t}\n}\n\nfunc TestGoroutineProfileTrivial(t *testing.T) {\n\t\/\/ Calling GoroutineProfile twice in a row should find the same number of goroutines,\n\t\/\/ but it's possible there are goroutines just about to exit, so we might end up\n\t\/\/ with fewer in the second call. Try a few times; it should converge once those\n\t\/\/ zombies are gone.\n\tfor i := 0; ; i++ {\n\t\tn1, ok := GoroutineProfile(nil) \/\/ should fail, there's at least 1 goroutine\n\t\tif n1 < 1 || ok {\n\t\t\tt.Fatalf(\"GoroutineProfile(nil) = %d, %v, want >0, false\", n1, ok)\n\t\t}\n\t\tn2, ok := GoroutineProfile(make([]StackRecord, n1))\n\t\tif n2 == n1 && ok {\n\t\t\tbreak\n\t\t}\n\t\tt.Logf(\"GoroutineProfile(%d) = %d, %v, want %d, true\", n1, n2, ok, n1)\n\t\tif i >= 10 {\n\t\t\tt.Fatalf(\"GoroutineProfile not converging\")\n\t\t}\n\t}\n}\n\nfunc TestVersion(t *testing.T) {\n\t\/\/ Test that version does not contain \\r or \\n.\n\tvers := Version()\n\tif strings.Contains(vers, \"\\r\") || strings.Contains(vers, \"\\n\") {\n\t\tt.Fatalf(\"cr\/nl in version: %q\", vers)\n\t}\n}\n\n\/\/ TestIntendedInlining tests that specific runtime functions are inlined.\n\/\/ This allows refactoring for code clarity and re-use without fear that\n\/\/ changes to the compiler will cause silent performance regressions.\nfunc TestIntendedInlining(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping in short mode\")\n\t}\n\ttestenv.MustHaveGoRun(t)\n\tt.Parallel()\n\n\t\/\/ want is the list of function names that should be inlined.\n\twant := []string{\"tophash\", \"add\", \"(*bmap).keys\"}\n\n\tm := make(map[string]bool, len(want))\n\tfor _, s := range want {\n\t\tm[s] = true\n\t}\n\n\tcmd := testEnv(exec.Command(testenv.GoToolPath(t), \"build\", \"-gcflags=-m\", \"runtime\"))\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Logf(\"%s\", out)\n\t\tt.Fatal(err)\n\t}\n\tlines := bytes.Split(out, []byte{'\\n'})\n\tfor _, x := range lines {\n\t\tf := bytes.Split(x, []byte(\": can inline \"))\n\t\tif len(f) < 2 {\n\t\t\tcontinue\n\t\t}\n\t\tfn := bytes.TrimSpace(f[1])\n\t\tdelete(m, string(fn))\n\t}\n\n\tfor s := range m {\n\t\tt.Errorf(\"function %s not inlined\", s)\n\t}\n}\n<commit_msg>runtime: always rebuild in TestIntendedInlining<commit_after>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage runtime_test\n\nimport (\n\t\"bytes\"\n\t\"internal\/testenv\"\n\t\"io\"\n\t\"os\/exec\"\n\t. \"runtime\"\n\t\"runtime\/debug\"\n\t\"strings\"\n\t\"testing\"\n\t\"unsafe\"\n)\n\nfunc init() {\n\t\/\/ We're testing the runtime, so make tracebacks show things\n\t\/\/ in the runtime. This only raises the level, so it won't\n\t\/\/ override GOTRACEBACK=crash from the user.\n\tSetTracebackEnv(\"system\")\n}\n\nvar errf error\n\nfunc errfn() error {\n\treturn errf\n}\n\nfunc errfn1() error {\n\treturn io.EOF\n}\n\nfunc BenchmarkIfaceCmp100(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tfor j := 0; j < 100; j++ {\n\t\t\tif errfn() == io.EOF {\n\t\t\t\tb.Fatal(\"bad comparison\")\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc BenchmarkIfaceCmpNil100(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tfor j := 0; j < 100; j++ {\n\t\t\tif errfn1() == nil {\n\t\t\t\tb.Fatal(\"bad comparison\")\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar efaceCmp1 interface{}\nvar efaceCmp2 interface{}\n\nfunc BenchmarkEfaceCmpDiff(b *testing.B) {\n\tx := 5\n\tefaceCmp1 = &x\n\ty := 6\n\tefaceCmp2 = &y\n\tfor i := 0; i < b.N; i++ {\n\t\tfor j := 0; j < 100; j++ {\n\t\t\tif efaceCmp1 == efaceCmp2 {\n\t\t\t\tb.Fatal(\"bad comparison\")\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc BenchmarkDefer(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tdefer1()\n\t}\n}\n\nfunc defer1() {\n\tdefer func(x, y, z int) {\n\t\tif recover() != nil || x != 1 || y != 2 || z != 3 {\n\t\t\tpanic(\"bad recover\")\n\t\t}\n\t}(1, 2, 3)\n}\n\nfunc BenchmarkDefer10(b *testing.B) {\n\tfor i := 0; i < b.N\/10; i++ {\n\t\tdefer2()\n\t}\n}\n\nfunc defer2() {\n\tfor i := 0; i < 10; i++ {\n\t\tdefer func(x, y, z int) {\n\t\t\tif recover() != nil || x != 1 || y != 2 || z != 3 {\n\t\t\t\tpanic(\"bad recover\")\n\t\t\t}\n\t\t}(1, 2, 3)\n\t}\n}\n\nfunc BenchmarkDeferMany(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tdefer func(x, y, z int) {\n\t\t\tif recover() != nil || x != 1 || y != 2 || z != 3 {\n\t\t\t\tpanic(\"bad recover\")\n\t\t\t}\n\t\t}(1, 2, 3)\n\t}\n}\n\n\/\/ golang.org\/issue\/7063\nfunc TestStopCPUProfilingWithProfilerOff(t *testing.T) {\n\tSetCPUProfileRate(0)\n}\n\n\/\/ Addresses to test for faulting behavior.\n\/\/ This is less a test of SetPanicOnFault and more a check that\n\/\/ the operating system and the runtime can process these faults\n\/\/ correctly. That is, we're indirectly testing that without SetPanicOnFault\n\/\/ these would manage to turn into ordinary crashes.\n\/\/ Note that these are truncated on 32-bit systems, so the bottom 32 bits\n\/\/ of the larger addresses must themselves be invalid addresses.\n\/\/ We might get unlucky and the OS might have mapped one of these\n\/\/ addresses, but probably not: they're all in the first page, very high\n\/\/ addresses that normally an OS would reserve for itself, or malformed\n\/\/ addresses. Even so, we might have to remove one or two on different\n\/\/ systems. We will see.\n\nvar faultAddrs = []uint64{\n\t\/\/ low addresses\n\t0,\n\t1,\n\t0xfff,\n\t\/\/ high (kernel) addresses\n\t\/\/ or else malformed.\n\t0xffffffffffffffff,\n\t0xfffffffffffff001,\n\t0xffffffffffff0001,\n\t0xfffffffffff00001,\n\t0xffffffffff000001,\n\t0xfffffffff0000001,\n\t0xffffffff00000001,\n\t0xfffffff000000001,\n\t0xffffff0000000001,\n\t0xfffff00000000001,\n\t0xffff000000000001,\n\t0xfff0000000000001,\n\t0xff00000000000001,\n\t0xf000000000000001,\n\t0x8000000000000001,\n}\n\nfunc TestSetPanicOnFault(t *testing.T) {\n\told := debug.SetPanicOnFault(true)\n\tdefer debug.SetPanicOnFault(old)\n\n\tnfault := 0\n\tfor _, addr := range faultAddrs {\n\t\ttestSetPanicOnFault(t, uintptr(addr), &nfault)\n\t}\n\tif nfault == 0 {\n\t\tt.Fatalf(\"none of the addresses faulted\")\n\t}\n}\n\nfunc testSetPanicOnFault(t *testing.T, addr uintptr, nfault *int) {\n\tif GOOS == \"nacl\" {\n\t\tt.Skip(\"nacl doesn't seem to fault on high addresses\")\n\t}\n\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\t*nfault++\n\t\t}\n\t}()\n\n\t\/\/ The read should fault, except that sometimes we hit\n\t\/\/ addresses that have had C or kernel pages mapped there\n\t\/\/ readable by user code. So just log the content.\n\t\/\/ If no addresses fault, we'll fail the test.\n\tv := *(*byte)(unsafe.Pointer(addr))\n\tt.Logf(\"addr %#x: %#x\\n\", addr, v)\n}\n\nfunc eqstring_generic(s1, s2 string) bool {\n\tif len(s1) != len(s2) {\n\t\treturn false\n\t}\n\t\/\/ optimization in assembly versions:\n\t\/\/ if s1.str == s2.str { return true }\n\tfor i := 0; i < len(s1); i++ {\n\t\tif s1[i] != s2[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc TestEqString(t *testing.T) {\n\t\/\/ This isn't really an exhaustive test of == on strings, it's\n\t\/\/ just a convenient way of documenting (via eqstring_generic)\n\t\/\/ what == does.\n\ts := []string{\n\t\t\"\",\n\t\t\"a\",\n\t\t\"c\",\n\t\t\"aaa\",\n\t\t\"ccc\",\n\t\t\"cccc\"[:3], \/\/ same contents, different string\n\t\t\"1234567890\",\n\t}\n\tfor _, s1 := range s {\n\t\tfor _, s2 := range s {\n\t\t\tx := s1 == s2\n\t\t\ty := eqstring_generic(s1, s2)\n\t\t\tif x != y {\n\t\t\t\tt.Errorf(`(\"%s\" == \"%s\") = %t, want %t`, s1, s2, x, y)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestTrailingZero(t *testing.T) {\n\t\/\/ make sure we add padding for structs with trailing zero-sized fields\n\ttype T1 struct {\n\t\tn int32\n\t\tz [0]byte\n\t}\n\tif unsafe.Sizeof(T1{}) != 8 {\n\t\tt.Errorf(\"sizeof(%#v)==%d, want 8\", T1{}, unsafe.Sizeof(T1{}))\n\t}\n\ttype T2 struct {\n\t\tn int64\n\t\tz struct{}\n\t}\n\tif unsafe.Sizeof(T2{}) != 8+unsafe.Sizeof(Uintreg(0)) {\n\t\tt.Errorf(\"sizeof(%#v)==%d, want %d\", T2{}, unsafe.Sizeof(T2{}), 8+unsafe.Sizeof(Uintreg(0)))\n\t}\n\ttype T3 struct {\n\t\tn byte\n\t\tz [4]struct{}\n\t}\n\tif unsafe.Sizeof(T3{}) != 2 {\n\t\tt.Errorf(\"sizeof(%#v)==%d, want 2\", T3{}, unsafe.Sizeof(T3{}))\n\t}\n\t\/\/ make sure padding can double for both zerosize and alignment\n\ttype T4 struct {\n\t\ta int32\n\t\tb int16\n\t\tc int8\n\t\tz struct{}\n\t}\n\tif unsafe.Sizeof(T4{}) != 8 {\n\t\tt.Errorf(\"sizeof(%#v)==%d, want 8\", T4{}, unsafe.Sizeof(T4{}))\n\t}\n\t\/\/ make sure we don't pad a zero-sized thing\n\ttype T5 struct {\n\t}\n\tif unsafe.Sizeof(T5{}) != 0 {\n\t\tt.Errorf(\"sizeof(%#v)==%d, want 0\", T5{}, unsafe.Sizeof(T5{}))\n\t}\n}\n\nfunc TestBadOpen(t *testing.T) {\n\tif GOOS == \"windows\" || GOOS == \"nacl\" {\n\t\tt.Skip(\"skipping OS that doesn't have open\/read\/write\/close\")\n\t}\n\t\/\/ make sure we get the correct error code if open fails. Same for\n\t\/\/ read\/write\/close on the resulting -1 fd. See issue 10052.\n\tnonfile := []byte(\"\/notreallyafile\")\n\tfd := Open(&nonfile[0], 0, 0)\n\tif fd != -1 {\n\t\tt.Errorf(\"open(\\\"%s\\\")=%d, want -1\", string(nonfile), fd)\n\t}\n\tvar buf [32]byte\n\tr := Read(-1, unsafe.Pointer(&buf[0]), int32(len(buf)))\n\tif r != -1 {\n\t\tt.Errorf(\"read()=%d, want -1\", r)\n\t}\n\tw := Write(^uintptr(0), unsafe.Pointer(&buf[0]), int32(len(buf)))\n\tif w != -1 {\n\t\tt.Errorf(\"write()=%d, want -1\", w)\n\t}\n\tc := Close(-1)\n\tif c != -1 {\n\t\tt.Errorf(\"close()=%d, want -1\", c)\n\t}\n}\n\nfunc TestAppendGrowth(t *testing.T) {\n\tvar x []int64\n\tcheck := func(want int) {\n\t\tif cap(x) != want {\n\t\t\tt.Errorf(\"len=%d, cap=%d, want cap=%d\", len(x), cap(x), want)\n\t\t}\n\t}\n\n\tcheck(0)\n\twant := 1\n\tfor i := 1; i <= 100; i++ {\n\t\tx = append(x, 1)\n\t\tcheck(want)\n\t\tif i&(i-1) == 0 {\n\t\t\twant = 2 * i\n\t\t}\n\t}\n}\n\nvar One = []int64{1}\n\nfunc TestAppendSliceGrowth(t *testing.T) {\n\tvar x []int64\n\tcheck := func(want int) {\n\t\tif cap(x) != want {\n\t\t\tt.Errorf(\"len=%d, cap=%d, want cap=%d\", len(x), cap(x), want)\n\t\t}\n\t}\n\n\tcheck(0)\n\twant := 1\n\tfor i := 1; i <= 100; i++ {\n\t\tx = append(x, One...)\n\t\tcheck(want)\n\t\tif i&(i-1) == 0 {\n\t\t\twant = 2 * i\n\t\t}\n\t}\n}\n\nfunc TestGoroutineProfileTrivial(t *testing.T) {\n\t\/\/ Calling GoroutineProfile twice in a row should find the same number of goroutines,\n\t\/\/ but it's possible there are goroutines just about to exit, so we might end up\n\t\/\/ with fewer in the second call. Try a few times; it should converge once those\n\t\/\/ zombies are gone.\n\tfor i := 0; ; i++ {\n\t\tn1, ok := GoroutineProfile(nil) \/\/ should fail, there's at least 1 goroutine\n\t\tif n1 < 1 || ok {\n\t\t\tt.Fatalf(\"GoroutineProfile(nil) = %d, %v, want >0, false\", n1, ok)\n\t\t}\n\t\tn2, ok := GoroutineProfile(make([]StackRecord, n1))\n\t\tif n2 == n1 && ok {\n\t\t\tbreak\n\t\t}\n\t\tt.Logf(\"GoroutineProfile(%d) = %d, %v, want %d, true\", n1, n2, ok, n1)\n\t\tif i >= 10 {\n\t\t\tt.Fatalf(\"GoroutineProfile not converging\")\n\t\t}\n\t}\n}\n\nfunc TestVersion(t *testing.T) {\n\t\/\/ Test that version does not contain \\r or \\n.\n\tvers := Version()\n\tif strings.Contains(vers, \"\\r\") || strings.Contains(vers, \"\\n\") {\n\t\tt.Fatalf(\"cr\/nl in version: %q\", vers)\n\t}\n}\n\n\/\/ TestIntendedInlining tests that specific runtime functions are inlined.\n\/\/ This allows refactoring for code clarity and re-use without fear that\n\/\/ changes to the compiler will cause silent performance regressions.\nfunc TestIntendedInlining(t *testing.T) {\n\tif testing.Short() && testenv.Builder() == \"\" {\n\t\tt.Skip(\"skipping in short mode\")\n\t}\n\ttestenv.MustHaveGoRun(t)\n\tt.Parallel()\n\n\t\/\/ want is the list of function names that should be inlined.\n\twant := []string{\"tophash\", \"add\", \"(*bmap).keys\"}\n\n\tm := make(map[string]bool, len(want))\n\tfor _, s := range want {\n\t\tm[s] = true\n\t}\n\n\tcmd := testEnv(exec.Command(testenv.GoToolPath(t), \"build\", \"-a\", \"-gcflags=-m\", \"runtime\"))\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Logf(\"%s\", out)\n\t\tt.Fatal(err)\n\t}\n\tlines := bytes.Split(out, []byte{'\\n'})\n\tfor _, x := range lines {\n\t\tf := bytes.Split(x, []byte(\": can inline \"))\n\t\tif len(f) < 2 {\n\t\t\tcontinue\n\t\t}\n\t\tfn := bytes.TrimSpace(f[1])\n\t\tdelete(m, string(fn))\n\t}\n\n\tfor s := range m {\n\t\tt.Errorf(\"function %s not inlined\", s)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package storage\n\nimport (\n\t\"errors\"\n\t\"gopkg.in\/mgo.v2\"\n)\n\nfunc init() {\n\tdbI, err := GetDB()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdb, ok := dbI.(*MongoDatabase)\n\tif !ok {\n\t\tpanic(errors.New(\"could not de-interface Database object\"))\n\t}\n\t\/*\n\t With DropDups set to true, documents with the\n\t same key as a previously indexed one will be dropped rather than an\n\t error returned.\n\n\t If Background is true, other connections will be allowed to proceed\n\t using the collection without the index while it's being built. Note that\n\t the session executing EnsureIndex will be blocked for as long as it\n\t takes for the index to be built.\n\n\t If Sparse is true, only documents containing the provided Key fields\n\t will be included in the index. When using a sparse index for sorting,\n\t only indexed documents will be returned.\n\t*\/\n\tdb.C(C_user).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"email\"},\n\t\tUnique: true,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\n\tdb.C(C_group).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"ownerEmail\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: true,\n\t})\n\tdb.C(C_group).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"readAccessEmails\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: true,\n\t})\n\tdb.C(C_eventMeta).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"ownerEmail\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tdb.C(C_eventMeta).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"groupId\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tdb.C(C_eventMeta).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"creationTime\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tdb.C(C_attending).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"eventId\", \"email\"},\n\t\tUnique: true,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tdb.C(C_attending).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"eventId\", \"attending\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tdb.C(C_attending).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"eventId\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\t\/*db.C(C_eventMetaChangeLog).EnsureIndex(mgo.Index{\n\t Key: []string{\"time\"},\n\t Unique: false,\n\t DropDups: false,\n\t Background: false,\n\t Sparse: false,\n\t})*\/\n\t\/*db.C(C_eventMetaChangeLog).EnsureIndex(mgo.Index{\n\t Key: []string{\"email\"},\n\t Unique: false,\n\t DropDups: false,\n\t Background: false,\n\t Sparse: false,\n\t})*\/\n\t\/*db.C(C_eventMetaChangeLog).EnsureIndex(mgo.Index{\n\t Key: []string{\"eventId\"},\n\t Unique: false,\n\t DropDups: false,\n\t Background: false,\n\t Sparse: false,\n\t})*\/\n\t\/*db.C(C_eventMetaChangeLog).EnsureIndex(mgo.Index{\n\t Key: []string{\"eventType\"},\n\t Unique: false,\n\t DropDups: false,\n\t Background: false,\n\t Sparse: false,\n\t})*\/\n\t\/*db.C(C_eventMetaChangeLog).EnsureIndex(mgo.Index{\n\t Key: []string{\"ownerEmail\"},\n\t Unique: false,\n\t DropDups: false,\n\t Background: false,\n\t Sparse: false,\n\t})*\/\n\t\/*db.C(C_eventMetaChangeLog).EnsureIndex(mgo.Index{\n\t Key: []string{\"groupId\"},\n\t Unique: false,\n\t DropDups: false,\n\t Background: false,\n\t Sparse: false,\n\t})*\/\n\t\/*db.C(C_eventMetaChangeLog).EnsureIndex(mgo.Index{\n\t Key: []string{\"accessEmails\"},\n\t Unique: false,\n\t DropDups: false,\n\t Background: false,\n\t Sparse: false,\n\t})*\/\n\tdb.C(C_revision).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"sha1\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tdb.C(C_revision).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"eventId\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tdb.C(C_revision).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"time\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\n\tdb.C(C_eventData).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"sha1\"},\n\t\tUnique: true,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\t\/*for _, colName := range []string{\n\t \"events_allDayTask\",\n\t \"events_custom\",\n\t \"events_dailyNote\",\n\t \"events_largeScale\",\n\t \"events_lifeTime\",\n\t \"events_monthly\",\n\t \"events_task\",\n\t \"events_universityClass\",\n\t \"events_universityExam\",\n\t \"events_weekly\",\n\t \"events_yearly\",\n\t } {\n\t db.C(colName).EnsureIndex(mgo.Index{\n\t Key: []string{\"sha1\"},\n\t Unique: true,\n\t DropDups: false,\n\t Background: false,\n\t Sparse: false,\n\t })\n\t }*\/\n\n}\n<commit_msg>storage\/indexes.go: panic if error happens, and fix space-indents in comments<commit_after>package storage\n\nimport (\n\t\"errors\"\n\t\"gopkg.in\/mgo.v2\"\n)\n\nfunc init() {\n\tdbI, err := GetDB()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdb, ok := dbI.(*MongoDatabase)\n\tif !ok {\n\t\tpanic(errors.New(\"could not de-interface Database object\"))\n\t}\n\t\/*\n\t\tWith DropDups set to true, documents with the\n\t\tsame key as a previously indexed one will be dropped rather than an\n\t\terror returned.\n\n\t\tIf Background is true, other connections will be allowed to proceed\n\t\tusing the collection without the index while it's being built. Note that\n\t\tthe session executing EnsureIndex will be blocked for as long as it\n\t\ttakes for the index to be built.\n\n\t\tIf Sparse is true, only documents containing the provided Key fields\n\t\twill be included in the index. When using a sparse index for sorting,\n\t\tonly indexed documents will be returned.\n\t*\/\n\terr = db.C(C_user).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"email\"},\n\t\tUnique: true,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\terr = db.C(C_group).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"ownerEmail\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: true,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = db.C(C_group).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"readAccessEmails\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: true,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = db.C(C_eventMeta).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"ownerEmail\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = db.C(C_eventMeta).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"groupId\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = db.C(C_eventMeta).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"creationTime\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = db.C(C_attending).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"eventId\", \"email\"},\n\t\tUnique: true,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = db.C(C_attending).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"eventId\", \"attending\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = db.C(C_attending).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"eventId\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t\/*db.C(C_eventMetaChangeLog).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"time\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}*\/\n\t\/*db.C(C_eventMetaChangeLog).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"email\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}*\/\n\t\/*db.C(C_eventMetaChangeLog).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"eventId\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}*\/\n\t\/*db.C(C_eventMetaChangeLog).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"eventType\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}*\/\n\t\/*db.C(C_eventMetaChangeLog).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"ownerEmail\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}*\/\n\t\/*db.C(C_eventMetaChangeLog).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"groupId\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}*\/\n\t\/*db.C(C_eventMetaChangeLog).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"accessEmails\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}*\/\n\terr = db.C(C_revision).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"sha1\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = db.C(C_revision).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"eventId\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\terr = db.C(C_revision).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"time\"},\n\t\tUnique: false,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\terr = db.C(C_eventData).EnsureIndex(mgo.Index{\n\t\tKey: []string{\"sha1\"},\n\t\tUnique: true,\n\t\tDropDups: false,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t\/*\n\t\tfor _, colName := range []string{\n\t\t\t\"events_allDayTask\",\n\t\t\t\"events_custom\",\n\t\t\t\"events_dailyNote\",\n\t\t\t\"events_largeScale\",\n\t\t\t\"events_lifeTime\",\n\t\t\t\"events_monthly\",\n\t\t\t\"events_task\",\n\t\t\t\"events_universityClass\",\n\t\t\t\"events_universityExam\",\n\t\t\t\"events_weekly\",\n\t\t\t\"events_yearly\",\n\t\t} {\n\t\t\terr = db.C(colName).EnsureIndex(mgo.Index{\n\t\t\t\tKey: []string{\"sha1\"},\n\t\t\t\tUnique: true,\n\t\t\t\tDropDups: false,\n\t\t\t\tBackground: false,\n\t\t\t\tSparse: false,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}\n\t*\/\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"strconv\"\n\n\t\"github.com\/shirou\/gopsutil\/process\"\n)\n\n\/\/ procinfo represent the processes informations\ntype procinfo struct {\n\ttotal string\n\trunning string\n\t\/\/TODO add zombies threads ?\n}\n\n\/\/ Get informations about the processes\nfunc getProcinfo() procinfo {\n\tret := procinfo{}\n\n\tpids, err := processPids() \/\/TODO replace by something like psutil.process_iter() if available in gopsutils\n\tif err == nil {\n\t\trun := 0\n\t\tfor i := range pids {\n\t\t\tproc, err := processNewProcess(pids[i])\n\t\t\tif err == nil { \/\/TODO rename `err` variables names to avoid confusion ?\n\t\t\t\tstatus, err := proc.Status()\n\t\t\t\tif err == nil {\n\t\t\t\t\tif status == \"R\" { \/\/ \"R\" for running process\n\t\t\t\t\t\trun++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tret.total = strconv.Itoa(len(pids))\n\t\tret.running = strconv.Itoa(run)\n\t}\n\treturn ret\n}\n\n\/\/ wrap `process.Pids()` in an unexported variable for testability\nvar processPids = func() ([]int32, error) {\n\treturn process.Pids()\n}\n\n\/\/ wrap `process.NewProcess()` in an unexported variable for testability\nvar processNewProcess = func(pid int32) (*process.Process, error) {\n\treturn process.NewProcess(pid)\n}\n<commit_msg>Code refactoring<commit_after>package main\n\nimport (\n\t\"strconv\"\n\n\t\"github.com\/shirou\/gopsutil\/process\"\n)\n\n\/\/ procinfo represent the processes informations\ntype procinfo struct {\n\ttotal string\n\trunning string\n\t\/\/TODO add zombies threads ?\n}\n\n\/\/ Get informations about the processes\nfunc getProcinfo() procinfo {\n\tret := procinfo{}\n\n\tpids, err := processPids() \/\/TODO replace by something like psutil.process_iter() if available in gopsutils\n\tif err == nil {\n\t\trun := 0\n\t\tfor i := range pids {\n\t\t\tproc, err := process.NewProcess(pids[i])\n\t\t\tif err == nil { \/\/TODO rename `err` variables names to avoid confusion ?\n\t\t\t\tstatus, err := procStatus(proc)\n\t\t\t\tif err == nil {\n\t\t\t\t\tif status == \"R\" { \/\/ \"R\" for running process\n\t\t\t\t\t\trun++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tret.total = strconv.Itoa(len(pids))\n\t\tret.running = strconv.Itoa(run)\n\t}\n\treturn ret\n}\n\n\/\/ wrap `process.Pids()` in an unexported variable for testability\nvar processPids = func() ([]int32, error) {\n\treturn process.Pids()\n}\n\n\/\/ wrap `process.Status()` in an unexported variable for testability\nvar procStatus = func(proc *process.Process) (string, error) {\n\treturn proc.Status()\n}\n<|endoftext|>"} {"text":"<commit_before>package procspy\n\nimport (\n\t\"bytes\"\n\t\"net\"\n)\n\ntype ProcNet struct {\n\tb []byte\n\tc Connection\n\twantedState uint\n\tbytesLocal, bytesRemote [16]byte\n}\n\nfunc NewProcNet(b []byte, wantedState uint) *ProcNet {\n\t\/\/ Skip header\n\tb = nextLine(b)\n\n\treturn &ProcNet{\n\t\tb: b,\n\t\tc: Connection{},\n\t\twantedState: wantedState,\n\t}\n}\n\nfunc (p *ProcNet) Next() *Connection {\nAGAIN:\n\tif len(p.b) == 0 {\n\t\treturn nil\n\t}\n\n\tvar (\n\t\tlocal, remote, state, inode []byte\n\t)\n\t_, p.b = nextField(p.b) \/\/ 'sl' column\n\tlocal, p.b = nextField(p.b)\n\tremote, p.b = nextField(p.b)\n\tstate, p.b = nextField(p.b)\n\tif parseHex(state) != p.wantedState {\n\t\tp.b = nextLine(p.b)\n\t\tgoto AGAIN\n\t}\n\t_, p.b = nextField(p.b) \/\/ 'tx_queue' column\n\t_, p.b = nextField(p.b) \/\/ 'rx_queue' column\n\t_, p.b = nextField(p.b) \/\/ 'tr' column\n\t_, p.b = nextField(p.b) \/\/ 'uid' column\n\t_, p.b = nextField(p.b) \/\/ 'timeout' column\n\tinode, p.b = nextField(p.b)\n\n\tp.c.LocalAddress, p.c.LocalPort = scanAddressNA(local, &p.bytesLocal)\n\tp.c.RemoteAddress, p.c.RemotePort = scanAddressNA(remote, &p.bytesRemote)\n\tp.c.inode = parseDec(inode)\n\tp.b = nextLine(p.b)\n\treturn &p.c\n}\n\n\/\/ scanAddress parses 'A12CF62E:00AA' to the address\/port. Handles IPv4 and\n\/\/ IPv6 addresses. The address is a big endian 32 bit ints, hex encoded. We\n\/\/ just decode the hex and flip the bytes in every group of 4.\nfunc scanAddressNA(in []byte, buf *[16]byte) (net.IP, uint16) {\n\tcol := bytes.IndexByte(in, ':')\n\tif col == -1 {\n\t\treturn nil, 0\n\t}\n\n\t\/\/ Network address is big endian. Can be either ipv4 or ipv6.\n\taddress := hexDecode32bigNA(in[:col], buf)\n\treturn net.IP(address), uint16(parseHex(in[col+1:]))\n}\n\n\/\/ hexDecode32big decodes sequences of 32bit big endian bytes.\nfunc hexDecode32bigNA(src []byte, buf *[16]byte) []byte {\n\tblocks := len(src) \/ 8\n\tfor block := 0; block < blocks; block++ {\n\t\tfor i := 0; i < 4; i++ {\n\t\t\ta := fromHexChar(src[block*8+i*2])\n\t\t\tb := fromHexChar(src[block*8+i*2+1])\n\t\t\tbuf[block*4+3-i] = (a << 4) | b\n\t\t}\n\t}\n\treturn buf[:blocks*4]\n}\n<commit_msg>Speedup by using a tmp variable.<commit_after>package procspy\n\nimport (\n\t\"bytes\"\n\t\"net\"\n)\n\ntype ProcNet struct {\n\tb []byte\n\tc Connection\n\twantedState uint\n\tbytesLocal, bytesRemote [16]byte\n}\n\nfunc NewProcNet(b []byte, wantedState uint) *ProcNet {\n\t\/\/ Skip header\n\tb = nextLine(b)\n\n\treturn &ProcNet{\n\t\tb: b,\n\t\tc: Connection{},\n\t\twantedState: wantedState,\n\t}\n}\n\nfunc (p *ProcNet) Next() *Connection {\nAGAIN:\n\tif len(p.b) == 0 {\n\t\treturn nil\n\t}\n\n\tvar (\n\t\tlocal, remote, state, inode []byte\n\t)\n\tb := p.b\n\t_, b = nextField(b) \/\/ 'sl' column\n\tlocal, b = nextField(b)\n\tremote, b = nextField(b)\n\tstate, b = nextField(b)\n\tif parseHex(state) != p.wantedState {\n\t\tp.b = nextLine(b)\n\t\tgoto AGAIN\n\t}\n\t_, b = nextField(b) \/\/ 'tx_queue' column\n\t_, b = nextField(b) \/\/ 'rx_queue' column\n\t_, b = nextField(b) \/\/ 'tr' column\n\t_, b = nextField(b) \/\/ 'uid' column\n\t_, b = nextField(b) \/\/ 'timeout' column\n\tinode, b = nextField(b)\n\n\tp.c.LocalAddress, p.c.LocalPort = scanAddressNA(local, &p.bytesLocal)\n\tp.c.RemoteAddress, p.c.RemotePort = scanAddressNA(remote, &p.bytesRemote)\n\tp.c.inode = parseDec(inode)\n\tp.b = nextLine(b)\n\treturn &p.c\n}\n\n\/\/ scanAddress parses 'A12CF62E:00AA' to the address\/port. Handles IPv4 and\n\/\/ IPv6 addresses. The address is a big endian 32 bit ints, hex encoded. We\n\/\/ just decode the hex and flip the bytes in every group of 4.\nfunc scanAddressNA(in []byte, buf *[16]byte) (net.IP, uint16) {\n\tcol := bytes.IndexByte(in, ':')\n\tif col == -1 {\n\t\treturn nil, 0\n\t}\n\n\t\/\/ Network address is big endian. Can be either ipv4 or ipv6.\n\taddress := hexDecode32bigNA(in[:col], buf)\n\treturn net.IP(address), uint16(parseHex(in[col+1:]))\n}\n\n\/\/ hexDecode32big decodes sequences of 32bit big endian bytes.\nfunc hexDecode32bigNA(src []byte, buf *[16]byte) []byte {\n\tblocks := len(src) \/ 8\n\tfor block := 0; block < blocks; block++ {\n\t\tfor i := 0; i < 4; i++ {\n\t\t\ta := fromHexChar(src[block*8+i*2])\n\t\t\tb := fromHexChar(src[block*8+i*2+1])\n\t\t\tbuf[block*4+3-i] = (a << 4) | b\n\t\t}\n\t}\n\treturn buf[:blocks*4]\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package profile provides a simple way to manage runtime\/pprof\n\/\/ profiling of your Go application.\npackage profile\n\nimport (\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\t\"sync\/atomic\"\n)\n\n\/\/ started counts the number of times Start has been called\nvar started uint32\n\nconst (\n\tcpuMode = iota\n\tmemMode\n\tblockMode\n)\n\ntype profile struct {\n\t\/\/ quiet suppresses informational messages during profiling.\n\tquiet bool\n\n\t\/\/ noShutdownHook controls whether the profiling package should\n\t\/\/ hook SIGINT to write profiles cleanly.\n\tnoShutdownHook bool\n\n\t\/\/ mode holds the type of profiling that will be made\n\tmode int\n\n\t\/\/ path holds the base path where various profiling files are written.\n\t\/\/ If blank, the base path will be generated by ioutil.TempDir.\n\tpath string\n\n\t\/\/ memProfileRate holds the rate for the memory profile.\n\tmemProfileRate int\n\n\t\/\/ closers holds the cleanup functions that run after each profile\n\tclosers []func()\n\n\t\/\/ stopped records if a call to profile.Stop has been made\n\tstopped uint32\n}\n\n\/\/ NoShutdownHook controls whether the profiling package should\n\/\/ hook SIGINT to write profiles cleanly.\n\/\/ Programs with more sophisticated signal handling should set\n\/\/ this to true and ensure the Stop() function returned from Start()\n\/\/ is called during shutdown.\nfunc NoShutdownHook(p *profile) { p.noShutdownHook = true }\n\n\/\/ Quiet suppresses informational messages during profiling.\nfunc Quiet(p *profile) { p.quiet = true }\n\n\/\/ CPUProfile controls if cpu profiling will be enabled. It disables any previous profiling settings.\nfunc CPUProfile(p *profile) { p.mode = cpuMode }\n\n\/\/ DefaultMemProfileRate is the default memory profiling rate.\n\/\/ See also http:\/\/golang.org\/pkg\/runtime\/#pkg-variables\nconst DefaultMemProfileRate = 4096\n\n\/\/ MemProfile controls if memory profiling will be enabled. It disables any previous profiling settings.\nfunc MemProfile(p *profile) {\n\tp.memProfileRate = DefaultMemProfileRate\n\tp.mode = memMode\n}\n\n\/\/ MemProfileRate controls if memory profiling will be enabled. Additionally, it takes a parameter which\n\/\/ allows the setting of the memory profile rate.\nfunc MemProfileRate(rate int) func(*profile) {\n\treturn func(p *profile) {\n\t\tp.memProfileRate = rate\n\t\tp.mode = memMode\n\t}\n}\n\n\/\/ BlockProfile controls if block (contention) profiling will be enabled. It disables any previous profiling settings.\nfunc BlockProfile(p *profile) { p.mode = blockMode }\n\n\/\/ ProfilePath controls the base path where various profiling\n\/\/ files are written. If blank, the base path will be generated\n\/\/ by ioutil.TempDir.\nfunc ProfilePath(path string) func(*profile) {\n\treturn func(p *profile) {\n\t\tp.path = path\n\t}\n}\n\n\/\/ Stop stops the profile and flushes any unwritten data.\nfunc (p *profile) Stop() {\n\tif !atomic.CompareAndSwapUint32(&p.stopped, 0, 1) {\n\t\t\/\/ someone has already called close\n\t\treturn\n\t}\n\tfor _, c := range p.closers {\n\t\tc()\n\t}\n}\n\n\/\/ Start starts a new profiling session.\n\/\/ The caller should call the Stop method on the value returned\n\/\/ to cleanly stop profiling.\nfunc Start(options ...func(*profile)) interface {\n\tStop()\n} {\n\tif !atomic.CompareAndSwapUint32(&started, 0, 1) {\n\t\tlog.Fatal(\"profile: Start() already called\")\n\t}\n\n\tvar prof profile\n\tfor _, option := range options {\n\t\toption(&prof)\n\t}\n\n\tpath, err := func() (string, error) {\n\t\tif p := prof.path; p != \"\" {\n\t\t\treturn p, os.MkdirAll(p, 0777)\n\t\t}\n\t\treturn ioutil.TempDir(\"\", \"profile\")\n\t}()\n\n\tif err != nil {\n\t\tlog.Fatalf(\"profile: could not create initial output directory: %v\", err)\n\t}\n\n\tswitch prof.mode {\n\tcase cpuMode:\n\t\tfn := filepath.Join(path, \"cpu.pprof\")\n\t\tf, err := os.Create(fn)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"profile: could not create cpu profile %q: %v\", fn, err)\n\t\t}\n\t\tif !prof.quiet {\n\t\t\tlog.Printf(\"profile: cpu profiling enabled, %s\", fn)\n\t\t}\n\t\tpprof.StartCPUProfile(f)\n\t\tprof.closers = append(prof.closers, func() {\n\t\t\tpprof.StopCPUProfile()\n\t\t\tf.Close()\n\t\t})\n\n\tcase memMode:\n\t\tfn := filepath.Join(path, \"mem.pprof\")\n\t\tf, err := os.Create(fn)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"profile: could not create memory profile %q: %v\", fn, err)\n\t\t}\n\t\told := runtime.MemProfileRate\n\t\truntime.MemProfileRate = prof.memProfileRate\n\t\tif !prof.quiet {\n\t\t\tlog.Printf(\"profile: memory profiling enabled (rate %d), %s\", runtime.MemProfileRate, fn)\n\t\t}\n\t\tprof.closers = append(prof.closers, func() {\n\t\t\tpprof.Lookup(\"heap\").WriteTo(f, 0)\n\t\t\tf.Close()\n\t\t\truntime.MemProfileRate = old\n\t\t})\n\n\tcase blockMode:\n\t\tfn := filepath.Join(path, \"block.pprof\")\n\t\tf, err := os.Create(fn)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"profile: could not create block profile %q: %v\", fn, err)\n\t\t}\n\t\truntime.SetBlockProfileRate(1)\n\t\tif !prof.quiet {\n\t\t\tlog.Printf(\"profile: block profiling enabled, %s\", fn)\n\t\t}\n\t\tprof.closers = append(prof.closers, func() {\n\t\t\tpprof.Lookup(\"block\").WriteTo(f, 0)\n\t\t\tf.Close()\n\t\t\truntime.SetBlockProfileRate(0)\n\t\t})\n\t}\n\n\tif !prof.noShutdownHook {\n\t\tgo func() {\n\t\t\tc := make(chan os.Signal, 1)\n\t\t\tsignal.Notify(c, os.Interrupt)\n\t\t\t<-c\n\n\t\t\tlog.Println(\"profile: caught interrupt, stopping profiles\")\n\t\t\tprof.Stop()\n\n\t\t\tos.Exit(0)\n\t\t}()\n\t}\n\n\tprof.closers = append(prof.closers, func() {\n\t\tatomic.SwapUint32(&started, 0)\n\t})\n\n\treturn &prof\n}\n<commit_msg>profile: remove support for starting a profile twice<commit_after>\/\/ Package profile provides a simple way to manage runtime\/pprof\n\/\/ profiling of your Go application.\npackage profile\n\nimport (\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\t\"sync\/atomic\"\n)\n\n\/\/ started counts the number of times Start has been called\nvar started uint32\n\nconst (\n\tcpuMode = iota\n\tmemMode\n\tblockMode\n)\n\ntype profile struct {\n\t\/\/ quiet suppresses informational messages during profiling.\n\tquiet bool\n\n\t\/\/ noShutdownHook controls whether the profiling package should\n\t\/\/ hook SIGINT to write profiles cleanly.\n\tnoShutdownHook bool\n\n\t\/\/ mode holds the type of profiling that will be made\n\tmode int\n\n\t\/\/ path holds the base path where various profiling files are written.\n\t\/\/ If blank, the base path will be generated by ioutil.TempDir.\n\tpath string\n\n\t\/\/ memProfileRate holds the rate for the memory profile.\n\tmemProfileRate int\n\n\t\/\/ closers holds the cleanup functions that run after each profile\n\tclosers []func()\n\n\t\/\/ stopped records if a call to profile.Stop has been made\n\tstopped uint32\n}\n\n\/\/ NoShutdownHook controls whether the profiling package should\n\/\/ hook SIGINT to write profiles cleanly.\n\/\/ Programs with more sophisticated signal handling should set\n\/\/ this to true and ensure the Stop() function returned from Start()\n\/\/ is called during shutdown.\nfunc NoShutdownHook(p *profile) { p.noShutdownHook = true }\n\n\/\/ Quiet suppresses informational messages during profiling.\nfunc Quiet(p *profile) { p.quiet = true }\n\n\/\/ CPUProfile controls if cpu profiling will be enabled. It disables any previous profiling settings.\nfunc CPUProfile(p *profile) { p.mode = cpuMode }\n\n\/\/ DefaultMemProfileRate is the default memory profiling rate.\n\/\/ See also http:\/\/golang.org\/pkg\/runtime\/#pkg-variables\nconst DefaultMemProfileRate = 4096\n\n\/\/ MemProfile controls if memory profiling will be enabled. It disables any previous profiling settings.\nfunc MemProfile(p *profile) {\n\tp.memProfileRate = DefaultMemProfileRate\n\tp.mode = memMode\n}\n\n\/\/ MemProfileRate controls if memory profiling will be enabled. Additionally, it takes a parameter which\n\/\/ allows the setting of the memory profile rate.\nfunc MemProfileRate(rate int) func(*profile) {\n\treturn func(p *profile) {\n\t\tp.memProfileRate = rate\n\t\tp.mode = memMode\n\t}\n}\n\n\/\/ BlockProfile controls if block (contention) profiling will be enabled. It disables any previous profiling settings.\nfunc BlockProfile(p *profile) { p.mode = blockMode }\n\n\/\/ ProfilePath controls the base path where various profiling\n\/\/ files are written. If blank, the base path will be generated\n\/\/ by ioutil.TempDir.\nfunc ProfilePath(path string) func(*profile) {\n\treturn func(p *profile) {\n\t\tp.path = path\n\t}\n}\n\n\/\/ Stop stops the profile and flushes any unwritten data.\nfunc (p *profile) Stop() {\n\tif !atomic.CompareAndSwapUint32(&p.stopped, 0, 1) {\n\t\t\/\/ someone has already called close\n\t\treturn\n\t}\n\tfor _, c := range p.closers {\n\t\tc()\n\t}\n}\n\n\/\/ Start starts a new profiling session.\n\/\/ The caller should call the Stop method on the value returned\n\/\/ to cleanly stop profiling. Start can only be called once\n\/\/ per program execution.\nfunc Start(options ...func(*profile)) interface {\n\tStop()\n} {\n\tif !atomic.CompareAndSwapUint32(&started, 0, 1) {\n\t\tlog.Fatal(\"profile: Start() already called\")\n\t}\n\n\tvar prof profile\n\tfor _, option := range options {\n\t\toption(&prof)\n\t}\n\n\tpath, err := func() (string, error) {\n\t\tif p := prof.path; p != \"\" {\n\t\t\treturn p, os.MkdirAll(p, 0777)\n\t\t}\n\t\treturn ioutil.TempDir(\"\", \"profile\")\n\t}()\n\n\tif err != nil {\n\t\tlog.Fatalf(\"profile: could not create initial output directory: %v\", err)\n\t}\n\n\tswitch prof.mode {\n\tcase cpuMode:\n\t\tfn := filepath.Join(path, \"cpu.pprof\")\n\t\tf, err := os.Create(fn)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"profile: could not create cpu profile %q: %v\", fn, err)\n\t\t}\n\t\tif !prof.quiet {\n\t\t\tlog.Printf(\"profile: cpu profiling enabled, %s\", fn)\n\t\t}\n\t\tpprof.StartCPUProfile(f)\n\t\tprof.closers = append(prof.closers, func() {\n\t\t\tpprof.StopCPUProfile()\n\t\t\tf.Close()\n\t\t})\n\n\tcase memMode:\n\t\tfn := filepath.Join(path, \"mem.pprof\")\n\t\tf, err := os.Create(fn)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"profile: could not create memory profile %q: %v\", fn, err)\n\t\t}\n\t\told := runtime.MemProfileRate\n\t\truntime.MemProfileRate = prof.memProfileRate\n\t\tif !prof.quiet {\n\t\t\tlog.Printf(\"profile: memory profiling enabled (rate %d), %s\", runtime.MemProfileRate, fn)\n\t\t}\n\t\tprof.closers = append(prof.closers, func() {\n\t\t\tpprof.Lookup(\"heap\").WriteTo(f, 0)\n\t\t\tf.Close()\n\t\t\truntime.MemProfileRate = old\n\t\t})\n\n\tcase blockMode:\n\t\tfn := filepath.Join(path, \"block.pprof\")\n\t\tf, err := os.Create(fn)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"profile: could not create block profile %q: %v\", fn, err)\n\t\t}\n\t\truntime.SetBlockProfileRate(1)\n\t\tif !prof.quiet {\n\t\t\tlog.Printf(\"profile: block profiling enabled, %s\", fn)\n\t\t}\n\t\tprof.closers = append(prof.closers, func() {\n\t\t\tpprof.Lookup(\"block\").WriteTo(f, 0)\n\t\t\tf.Close()\n\t\t\truntime.SetBlockProfileRate(0)\n\t\t})\n\t}\n\n\tif !prof.noShutdownHook {\n\t\tgo func() {\n\t\t\tc := make(chan os.Signal, 1)\n\t\t\tsignal.Notify(c, os.Interrupt)\n\t\t\t<-c\n\n\t\t\tlog.Println(\"profile: caught interrupt, stopping profiles\")\n\t\t\tprof.Stop()\n\n\t\t\tos.Exit(0)\n\t\t}()\n\t}\n\n\treturn &prof\n}\n<|endoftext|>"} {"text":"<commit_before>package client_test\n\nimport (\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/mail\"\n\n\t\"github.com\/emersion\/go-imap\"\n\t\"github.com\/emersion\/go-imap\/client\"\n)\n\nfunc ExampleClient() {\n\tlog.Println(\"Connecting to server...\")\n\n\t\/\/ Connect to server\n\tc, err := client.DialTLS(\"mail.example.org:993\", nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Println(\"Connected\")\n\n\t\/\/ Don't forget to logout\n\tdefer c.Logout()\n\n\t\/\/ Login\n\tif err := c.Login(\"username\", \"password\"); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Println(\"Logged in\")\n\n\t\/\/ List mailboxes\n\tmailboxes := make(chan *imap.MailboxInfo, 10)\n\tgo func() {\n\t\t\/\/ c.List will send mailboxes to the channel and close it when done\n\t\tif err := c.List(\"\", \"*\", mailboxes); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}()\n\n\tlog.Println(\"Mailboxes:\")\n\tfor m := range mailboxes {\n\t\tlog.Println(\"* \" + m.Name)\n\t}\n\n\t\/\/ Select INBOX\n\tmbox, err := c.Select(\"INBOX\", false)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Println(\"Flags for INBOX:\", mbox.Flags)\n\n\t\/\/ Get the last 4 messages\n\tfrom := uint32(1)\n\tto := mbox.Messages\n\tif mbox.Messages > 3 {\n\t\t\/\/ We're using unsigned integers here, only substract if the result is > 0\n\t\tfrom = mbox.Messages - 3\n\t}\n\tseqset := new(imap.SeqSet)\n\tseqset.AddRange(from, to)\n\n\tmessages := make(chan *imap.Message, 10)\n\tgo func() {\n\t\tif err := c.Fetch(seqset, []string{imap.EnvelopeMsgAttr}, messages); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}()\n\n\tlog.Println(\"Last 4 messages:\")\n\tfor msg := range messages {\n\t\tlog.Println(\"* \" + msg.Envelope.Subject)\n\t}\n\n\tlog.Println(\"Done!\")\n}\n\nfunc ExampleClient_Fetch() {\n\t\/\/ Let's assume c is a client\n\tvar c *client.Client\n\n\t\/\/ Select INBOX\n\tmbox, err := c.Select(\"INBOX\", false)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Get the last message\n\tif mbox.Messages == 0 {\n\t\tlog.Fatal(\"No message in mailbox\")\n\t}\n\tseqset := new(imap.SeqSet)\n\tseqset.AddRange(mbox.Messages, mbox.Messages)\n\n\t\/\/ Get the whole message body\n\tattrs := []string{\"BODY[]\"}\n\n\tmessages := make(chan *imap.Message, 1)\n\tgo func() {\n\t\tif err := c.Fetch(seqset, attrs, messages); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}()\n\n\tlog.Println(\"Last message:\")\n\tmsg := <-messages\n\tr := msg.GetBody(\"BODY[]\")\n\tif r == nil {\n\t\tlog.Fatal(\"Server didn't returned message body\")\n\t}\n\n\tm, err := mail.ReadMessage(r)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\theader := m.Header\n\tlog.Println(\"Date:\", header.Get(\"Date\"))\n\tlog.Println(\"From:\", header.Get(\"From\"))\n\tlog.Println(\"To:\", header.Get(\"To\"))\n\tlog.Println(\"Subject:\", header.Get(\"Subject\"))\n\n\tbody, err := ioutil.ReadAll(m.Body)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Println(body)\n}\n\nfunc ExampleClient_DeleteMessages() {\n\t\/\/ Let's assume c is a client\n\tvar c *client.Client\n\n\t\/\/ Select INBOX\n\tmbox, err := c.Select(\"INBOX\", false)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ We will delete the last message\n\tif mbox.Messages == 0 {\n\t\tlog.Fatal(\"No message in mailbox\")\n\t}\n\tseqset := new(imap.SeqSet)\n\tseqset.AddRange(mbox.Messages, mbox.Messages)\n\n\t\/\/ First mark the message as deleted\n\toperation := \"+FLAGS.SILENT\"\n\tflags := []string{imap.DeletedFlag}\n\tif err := c.Store(seqset, operation, flags, nil); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Then delete it\n\tif err := c.Expunge(nil); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Println(\"Last message has been deleted\")\n}\n<commit_msg>client: renames Client.Expunge example<commit_after>package client_test\n\nimport (\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/mail\"\n\n\t\"github.com\/emersion\/go-imap\"\n\t\"github.com\/emersion\/go-imap\/client\"\n)\n\nfunc ExampleClient() {\n\tlog.Println(\"Connecting to server...\")\n\n\t\/\/ Connect to server\n\tc, err := client.DialTLS(\"mail.example.org:993\", nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Println(\"Connected\")\n\n\t\/\/ Don't forget to logout\n\tdefer c.Logout()\n\n\t\/\/ Login\n\tif err := c.Login(\"username\", \"password\"); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Println(\"Logged in\")\n\n\t\/\/ List mailboxes\n\tmailboxes := make(chan *imap.MailboxInfo, 10)\n\tgo func() {\n\t\t\/\/ c.List will send mailboxes to the channel and close it when done\n\t\tif err := c.List(\"\", \"*\", mailboxes); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}()\n\n\tlog.Println(\"Mailboxes:\")\n\tfor m := range mailboxes {\n\t\tlog.Println(\"* \" + m.Name)\n\t}\n\n\t\/\/ Select INBOX\n\tmbox, err := c.Select(\"INBOX\", false)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Println(\"Flags for INBOX:\", mbox.Flags)\n\n\t\/\/ Get the last 4 messages\n\tfrom := uint32(1)\n\tto := mbox.Messages\n\tif mbox.Messages > 3 {\n\t\t\/\/ We're using unsigned integers here, only substract if the result is > 0\n\t\tfrom = mbox.Messages - 3\n\t}\n\tseqset := new(imap.SeqSet)\n\tseqset.AddRange(from, to)\n\n\tmessages := make(chan *imap.Message, 10)\n\tgo func() {\n\t\tif err := c.Fetch(seqset, []string{imap.EnvelopeMsgAttr}, messages); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}()\n\n\tlog.Println(\"Last 4 messages:\")\n\tfor msg := range messages {\n\t\tlog.Println(\"* \" + msg.Envelope.Subject)\n\t}\n\n\tlog.Println(\"Done!\")\n}\n\nfunc ExampleClient_Fetch() {\n\t\/\/ Let's assume c is a client\n\tvar c *client.Client\n\n\t\/\/ Select INBOX\n\tmbox, err := c.Select(\"INBOX\", false)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Get the last message\n\tif mbox.Messages == 0 {\n\t\tlog.Fatal(\"No message in mailbox\")\n\t}\n\tseqset := new(imap.SeqSet)\n\tseqset.AddRange(mbox.Messages, mbox.Messages)\n\n\t\/\/ Get the whole message body\n\tattrs := []string{\"BODY[]\"}\n\n\tmessages := make(chan *imap.Message, 1)\n\tgo func() {\n\t\tif err := c.Fetch(seqset, attrs, messages); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}()\n\n\tlog.Println(\"Last message:\")\n\tmsg := <-messages\n\tr := msg.GetBody(\"BODY[]\")\n\tif r == nil {\n\t\tlog.Fatal(\"Server didn't returned message body\")\n\t}\n\n\tm, err := mail.ReadMessage(r)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\theader := m.Header\n\tlog.Println(\"Date:\", header.Get(\"Date\"))\n\tlog.Println(\"From:\", header.Get(\"From\"))\n\tlog.Println(\"To:\", header.Get(\"To\"))\n\tlog.Println(\"Subject:\", header.Get(\"Subject\"))\n\n\tbody, err := ioutil.ReadAll(m.Body)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Println(body)\n}\n\nfunc ExampleClient_Expunge() {\n\t\/\/ Let's assume c is a client\n\tvar c *client.Client\n\n\t\/\/ Select INBOX\n\tmbox, err := c.Select(\"INBOX\", false)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ We will delete the last message\n\tif mbox.Messages == 0 {\n\t\tlog.Fatal(\"No message in mailbox\")\n\t}\n\tseqset := new(imap.SeqSet)\n\tseqset.AddRange(mbox.Messages, mbox.Messages)\n\n\t\/\/ First mark the message as deleted\n\toperation := \"+FLAGS.SILENT\"\n\tflags := []string{imap.DeletedFlag}\n\tif err := c.Store(seqset, operation, flags, nil); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Then delete it\n\tif err := c.Expunge(nil); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tlog.Println(\"Last message has been deleted\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/\n\/\/ Copyright (c) 2017\n\/\/ Mainflux\n\/\/ Cavium\n\/\/\n\/\/ SPDX-License-Identifier: Apache-2.0\n\/\/\n\npackage client\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/drasko\/edgex-export\"\n\t\"github.com\/drasko\/edgex-export\/mongo\"\n\t\"github.com\/go-zoo\/bone\"\n\t\"go.uber.org\/zap\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\nconst (\n\t\/\/ TODO this consts need to be configurable somehow\n\tdistroHost = \"127.0.0.1\"\n\tdistroPort int = 48070\n)\n\nfunc getRegByID(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\n\tid := bone.GetValue(r, \"id\")\n\n\ts := repo.Session.Copy()\n\tdefer s.Close()\n\tc := s.DB(mongo.DBName).C(mongo.CollectionName)\n\n\treg := export.Registration{}\n\tif err := c.Find(bson.M{\"id\": id}).One(®); err != nil {\n\t\tlogger.Error(\"Failed to query by id\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tres, err := json.Marshal(reg)\n\tif err != nil {\n\t\tlogger.Error(\"Failed to query by id\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tio.WriteString(w, string(res))\n}\n\nfunc getRegList(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\n\tt := bone.GetValue(r, \"type\")\n\n\tvar l string\n\n\tswitch t {\n\tcase \"algorithms\":\n\t\tl = `[\"None\",\"Aes\"]`\n\tcase \"compressions\":\n\t\tl = `[\"None\",\"Gzip\",\"Zip\"]`\n\tcase \"formats\":\n\t\tl = `[\"JSON\",\"XML\",\"Serialized\",\"IotCoreJSON\",\"AzureJSON\",\"CSV\"]`\n\tcase \"destinations\":\n\t\tl = `[\"DestMQTT\", \"TeDestZMQller\", \"DestIotCoreMQTT,\n\t\t\t\"DestAzureMQTT\", \"DestRest\"]`\n\tdefault:\n\t\tlogger.Error(\"Unknown type: \" + t)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tio.WriteString(w, \"Unknown type: \"+t)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tio.WriteString(w, l)\n}\n\nfunc getAllReg(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\n\ts := repo.Session.Copy()\n\tdefer s.Close()\n\tc := s.DB(mongo.DBName).C(mongo.CollectionName)\n\n\treg := []export.Registration{}\n\tif err := c.Find(nil).All(®); err != nil {\n\t\tlogger.Error(\"Failed to query all registrations\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tres, err := json.Marshal(reg)\n\tif err != nil {\n\t\tlogger.Error(\"Failed to query all registrations\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tio.WriteString(w, string(res))\n}\n\nfunc getRegByName(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\n\tname := bone.GetValue(r, \"name\")\n\n\ts := repo.Session.Copy()\n\tdefer s.Close()\n\tc := s.DB(mongo.DBName).C(mongo.CollectionName)\n\n\treg := export.Registration{}\n\tif err := c.Find(bson.M{\"name\": name}).One(®); err != nil {\n\t\tlogger.Error(\"Failed to query by name\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tres, err := json.Marshal(reg)\n\tif err != nil {\n\t\tlogger.Error(\"Failed to query by name\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tio.WriteString(w, string(res))\n}\n\nfunc addReg(w http.ResponseWriter, r *http.Request) {\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlogger.Error(\"Failed to query add registration\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\treg := export.Registration{}\n\tif err := json.Unmarshal(data, ®); err != nil {\n\t\tlogger.Error(\"Failed to query add registration\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\ts := repo.Session.Copy()\n\tdefer s.Close()\n\tc := s.DB(mongo.DBName).C(mongo.CollectionName)\n\n\tcount, err := c.Find(bson.M{\"name\": reg.Name}).Count()\n\tif err != nil {\n\t\tlogger.Error(\"Failed to query add registration\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\tif count != 0 {\n\t\tlogger.Error(\"Username already taken: \" + reg.Name)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif err := c.Insert(reg); err != nil {\n\t\tlogger.Error(\"Failed to query add registration\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusCreated)\n\tnotifyUpdatedRegistrations(export.NotifyUpdate{reg.Name, \"add\"})\n}\n\nfunc updateReg(w http.ResponseWriter, r *http.Request) {\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlogger.Error(\"Failed to query update registration\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tvar body map[string]interface{}\n\tif err := json.Unmarshal(data, &body); err != nil {\n\t\tlogger.Error(\"Failed to query update registration\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t}\n\n\ts := repo.Session.Copy()\n\tdefer s.Close()\n\tc := s.DB(mongo.DBName).C(mongo.CollectionName)\n\n\tname := body[\"name\"]\n\tquery := bson.M{\"name\": name}\n\tupdate := bson.M{\"$set\": body}\n\n\tif err := c.Update(query, update); err != nil {\n\t\tlogger.Error(\"Failed to query update registration\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tnotifyUpdatedRegistrations(export.NotifyUpdate{name.(string), \"update\"})\n}\n\nfunc delRegByID(w http.ResponseWriter, r *http.Request) {\n\tid := bone.GetValue(r, \"id\")\n\n\ts := repo.Session.Copy()\n\tdefer s.Close()\n\tc := s.DB(mongo.DBName).C(mongo.CollectionName)\n\n\tif err := c.Remove(bson.M{\"id\": id}); err != nil {\n\t\tlogger.Error(\"Failed to query by id\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tnotifyUpdatedRegistrations(export.NotifyUpdate{\"TODO\", \"delete\"})\n}\n\nfunc delRegByName(w http.ResponseWriter, r *http.Request) {\n\tname := bone.GetValue(r, \"name\")\n\n\ts := repo.Session.Copy()\n\tdefer s.Close()\n\tc := s.DB(mongo.DBName).C(mongo.CollectionName)\n\n\tif err := c.Remove(bson.M{\"name\": name}); err != nil {\n\t\tlogger.Error(\"Failed to query by name\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tnotifyUpdatedRegistrations(export.NotifyUpdate{name, \"delete\"})\n}\n\nfunc notifyUpdatedRegistrations(update export.NotifyUpdate) {\n\tgo func() {\n\t\t\/\/ TODO make configurable distro host\/port\n\t\tclient := &http.Client{}\n\t\turl := \"http:\/\/\" + distroHost + \":\" + strconv.Itoa(distroPort) +\n\t\t\t\"\/api\/v1\/notify\/registrations\"\n\n\t\tdata, err := json.Marshal(update)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"Error generating update json\", zap.Error(err))\n\t\t\treturn\n\t\t}\n\n\t\treq, err := http.NewRequest(http.MethodPut, url, bytes.NewBuffer([]byte(data)))\n\t\tif err != nil {\n\t\t\tlogger.Error(\"Error creating http request\")\n\t\t\treturn\n\t\t}\n\t\t_, err = client.Do(req)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"Error notifying updated registrations to distro\", zap.String(\"url\", url))\n\t\t}\n\t}()\n}\n<commit_msg>Read the registration from mongo before deleting to get the name<commit_after>\/\/\n\/\/ Copyright (c) 2017\n\/\/ Mainflux\n\/\/ Cavium\n\/\/\n\/\/ SPDX-License-Identifier: Apache-2.0\n\/\/\n\npackage client\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/drasko\/edgex-export\"\n\t\"github.com\/drasko\/edgex-export\/mongo\"\n\t\"github.com\/go-zoo\/bone\"\n\t\"go.uber.org\/zap\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\nconst (\n\t\/\/ TODO this consts need to be configurable somehow\n\tdistroHost = \"127.0.0.1\"\n\tdistroPort int = 48070\n)\n\nfunc getRegByID(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\n\tid := bone.GetValue(r, \"id\")\n\n\ts := repo.Session.Copy()\n\tdefer s.Close()\n\tc := s.DB(mongo.DBName).C(mongo.CollectionName)\n\n\treg := export.Registration{}\n\tif err := c.Find(bson.M{\"id\": id}).One(®); err != nil {\n\t\tlogger.Error(\"Failed to query by id\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tres, err := json.Marshal(reg)\n\tif err != nil {\n\t\tlogger.Error(\"Failed to query by id\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tio.WriteString(w, string(res))\n}\n\nfunc getRegList(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\n\tt := bone.GetValue(r, \"type\")\n\n\tvar l string\n\n\tswitch t {\n\tcase \"algorithms\":\n\t\tl = `[\"None\",\"Aes\"]`\n\tcase \"compressions\":\n\t\tl = `[\"None\",\"Gzip\",\"Zip\"]`\n\tcase \"formats\":\n\t\tl = `[\"JSON\",\"XML\",\"Serialized\",\"IotCoreJSON\",\"AzureJSON\",\"CSV\"]`\n\tcase \"destinations\":\n\t\tl = `[\"DestMQTT\", \"TeDestZMQller\", \"DestIotCoreMQTT,\n\t\t\t\"DestAzureMQTT\", \"DestRest\"]`\n\tdefault:\n\t\tlogger.Error(\"Unknown type: \" + t)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tio.WriteString(w, \"Unknown type: \"+t)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tio.WriteString(w, l)\n}\n\nfunc getAllReg(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\n\ts := repo.Session.Copy()\n\tdefer s.Close()\n\tc := s.DB(mongo.DBName).C(mongo.CollectionName)\n\n\treg := []export.Registration{}\n\tif err := c.Find(nil).All(®); err != nil {\n\t\tlogger.Error(\"Failed to query all registrations\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tres, err := json.Marshal(reg)\n\tif err != nil {\n\t\tlogger.Error(\"Failed to query all registrations\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tio.WriteString(w, string(res))\n}\n\nfunc getRegByName(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\n\tname := bone.GetValue(r, \"name\")\n\n\ts := repo.Session.Copy()\n\tdefer s.Close()\n\tc := s.DB(mongo.DBName).C(mongo.CollectionName)\n\n\treg := export.Registration{}\n\tif err := c.Find(bson.M{\"name\": name}).One(®); err != nil {\n\t\tlogger.Error(\"Failed to query by name\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tres, err := json.Marshal(reg)\n\tif err != nil {\n\t\tlogger.Error(\"Failed to query by name\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tio.WriteString(w, string(res))\n}\n\nfunc addReg(w http.ResponseWriter, r *http.Request) {\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlogger.Error(\"Failed to query add registration\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\treg := export.Registration{}\n\tif err := json.Unmarshal(data, ®); err != nil {\n\t\tlogger.Error(\"Failed to query add registration\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\ts := repo.Session.Copy()\n\tdefer s.Close()\n\tc := s.DB(mongo.DBName).C(mongo.CollectionName)\n\n\tcount, err := c.Find(bson.M{\"name\": reg.Name}).Count()\n\tif err != nil {\n\t\tlogger.Error(\"Failed to query add registration\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\tif count != 0 {\n\t\tlogger.Error(\"Username already taken: \" + reg.Name)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif err := c.Insert(reg); err != nil {\n\t\tlogger.Error(\"Failed to query add registration\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusCreated)\n\tnotifyUpdatedRegistrations(export.NotifyUpdate{reg.Name, \"add\"})\n}\n\nfunc updateReg(w http.ResponseWriter, r *http.Request) {\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlogger.Error(\"Failed to query update registration\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tvar body map[string]interface{}\n\tif err := json.Unmarshal(data, &body); err != nil {\n\t\tlogger.Error(\"Failed to query update registration\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t}\n\n\ts := repo.Session.Copy()\n\tdefer s.Close()\n\tc := s.DB(mongo.DBName).C(mongo.CollectionName)\n\n\tname := body[\"name\"]\n\tquery := bson.M{\"name\": name}\n\tupdate := bson.M{\"$set\": body}\n\n\tif err := c.Update(query, update); err != nil {\n\t\tlogger.Error(\"Failed to query update registration\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tnotifyUpdatedRegistrations(export.NotifyUpdate{name.(string), \"update\"})\n}\n\nfunc delRegByID(w http.ResponseWriter, r *http.Request) {\n\tid := bone.GetValue(r, \"id\")\n\n\ts := repo.Session.Copy()\n\tdefer s.Close()\n\tc := s.DB(mongo.DBName).C(mongo.CollectionName)\n\n\t\/\/ Read the registration from mongo, the registration name is needed to\n\t\/\/ notify distro of the deletion\n\treg := export.Registration{}\n\tif err := c.Find(bson.M{\"id\": id}).One(®); err != nil {\n\t\tlogger.Error(\"Failed to query by id\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tif err := c.Remove(bson.M{\"id\": id}); err != nil {\n\t\tlogger.Error(\"Failed to query by id\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tnotifyUpdatedRegistrations(export.NotifyUpdate{reg.Name, \"delete\"})\n}\n\nfunc delRegByName(w http.ResponseWriter, r *http.Request) {\n\tname := bone.GetValue(r, \"name\")\n\n\ts := repo.Session.Copy()\n\tdefer s.Close()\n\tc := s.DB(mongo.DBName).C(mongo.CollectionName)\n\n\tif err := c.Remove(bson.M{\"name\": name}); err != nil {\n\t\tlogger.Error(\"Failed to query by name\", zap.Error(err))\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tio.WriteString(w, err.Error())\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tnotifyUpdatedRegistrations(export.NotifyUpdate{name, \"delete\"})\n}\n\nfunc notifyUpdatedRegistrations(update export.NotifyUpdate) {\n\tgo func() {\n\t\t\/\/ TODO make configurable distro host\/port\n\t\tclient := &http.Client{}\n\t\turl := \"http:\/\/\" + distroHost + \":\" + strconv.Itoa(distroPort) +\n\t\t\t\"\/api\/v1\/notify\/registrations\"\n\n\t\tdata, err := json.Marshal(update)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"Error generating update json\", zap.Error(err))\n\t\t\treturn\n\t\t}\n\n\t\treq, err := http.NewRequest(http.MethodPut, url, bytes.NewBuffer([]byte(data)))\n\t\tif err != nil {\n\t\t\tlogger.Error(\"Error creating http request\")\n\t\t\treturn\n\t\t}\n\t\t_, err = client.Do(req)\n\t\tif err != nil {\n\t\t\tlogger.Error(\"Error notifying updated registrations to distro\", zap.String(\"url\", url))\n\t\t}\n\t}()\n}\n<|endoftext|>"} {"text":"<commit_before>package dashboard\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n)\n\nvar (\n\tdefaultProjectMap map[string]*Project\n\tdefaultProjects = []*Project{\n\t\tmakeProject(\"jekyll\", \"jekyll\/jekyll\", \"master\", \"jekyll\"),\n\t\tmakeProject(\"jemoji\", \"jekyll\/jemoji\", \"master\", \"jemoji\"),\n\t\tmakeProject(\"mercenary\", \"jekyll\/mercenary\", \"master\", \"mercenary\"),\n\t\tmakeProject(\"jekyll-import\", \"jekyll\/jekyll-import\", \"master\", \"jekyll-import\"),\n\t\tmakeProject(\"jekyll-feed\", \"jekyll\/jekyll-feed\", \"master\", \"jekyll-feed\"),\n\t\tmakeProject(\"jekyll-sitemap\", \"jekyll\/jekyll-sitemap\", \"master\", \"jekyll-sitemap\"),\n\t\tmakeProject(\"jekyll-mentions\", \"jekyll\/jekyll-mentions\", \"master\", \"jekyll-mentions\"),\n\t\tmakeProject(\"jekyll-watch\", \"jekyll\/jekyll-watch\", \"master\", \"jekyll-watch\"),\n\t\tmakeProject(\"jekyll-compose\", \"jekyll\/jekyll-compose\", \"master\", \"jekyll-compose\"),\n\t\tmakeProject(\"jekyll-paginate\", \"jekyll\/jekyll-paginate\", \"master\", \"jekyll-paginate\"),\n\t\tmakeProject(\"jekyll-gist\", \"jekyll\/jekyll-gist\", \"master\", \"jekyll-gist\"),\n\t\tmakeProject(\"jekyll-coffeescript\", \"jekyll\/jekyll-coffeescript\", \"master\", \"jekyll-coffeescript\"),\n\t\tmakeProject(\"jekyll-opal\", \"jekyll\/jekyll-opal\", \"master\", \"jekyll-opal\"),\n\t\tmakeProject(\"classifier-reborn\", \"jekyll\/classifier-reborn\", \"master\", \"classifier-reborn\"),\n\t\tmakeProject(\"jekyll-sass-converter\", \"jekyll\/jekyll-sass-converter\", \"master\", \"jekyll-sass-converter\"),\n\t\tmakeProject(\"jekyll-textile-converter\", \"jekyll\/jekyll-textile-converter\", \"master\", \"jekyll-textile-converter\"),\n\t\tmakeProject(\"jekyll-redirect-from\", \"jekyll\/jekyll-redirect-from\", \"master\", \"jekyll-redirect-from\"),\n\t\tmakeProject(\"github-metadata\", \"jekyll\/github-metadata\", \"master\", \"jekyll-github-metadata\"),\n\t\tmakeProject(\"plugins.jekyllrb\", \"jekyll\/plugins\", \"gh-pages\", \"\"),\n\t\tmakeProject(\"jekyll docker\", \"jekyll\/docker\", \"\", \"\"),\n\t}\n)\n\ntype Project struct {\n\tName string `json:\"name\"`\n\tNwo string `json:\"nwo\"`\n\tBranch string `json:\"branch\"`\n\tGemName string `json:\"gem_name\"`\n\n\tGem *RubyGem `json:\"gem\"`\n\tTravis *TravisReport `json:\"travis\"`\n\tGitHub *GitHub `json:\"github\"`\n\tfetched bool\n}\n\nfunc (p *Project) fetch() {\n\tif !p.fetched {\n\t\trubyGemChan := rubygem(p.GemName)\n\t\ttravisChan := travis(p.Nwo, p.Branch)\n\t\tgithubChan := github(p.Nwo)\n\t\tp.Gem = <-rubyGemChan\n\t\tp.Travis = <-travisChan\n\t\tp.GitHub = <-githubChan\n\t\tp.fetched = true\n\t}\n}\n\nfunc buildProjectMap() {\n\tdefaultProjectMap = map[string]*Project{}\n\tfor _, p := range defaultProjects {\n\t\tdefaultProjectMap[p.Name] = p\n\t}\n}\n\nfunc makeProject(name, nwo, branch, rubygem string) *Project {\n\treturn &Project{\n\t\tName: name,\n\t\tNwo: nwo,\n\t\tBranch: branch,\n\t\tGemName: rubygem,\n\t}\n}\n\nfunc getProject(name string) Project {\n\tif defaultProjectMap == nil {\n\t\tbuildProjectMap()\n\t}\n\n\tif p, ok := defaultProjectMap[name]; ok {\n\t\tif !p.fetched {\n\t\t\tp.fetch()\n\t\t}\n\t\treturn *p\n\t}\n\tpanic(fmt.Sprintf(\"no project named '%s'\", name))\n}\n\nfunc getAllProjects() []*Project {\n\tvar wg sync.WaitGroup\n\tfor _, p := range defaultProjects {\n\t\twg.Add(1)\n\t\tgo func(project *Project) {\n\t\t\tproject.fetch()\n\t\t\twg.Done()\n\t\t}(p)\n\t}\n\twg.Wait()\n\treturn defaultProjects\n}\n<commit_msg>reset cache every half hour<commit_after>package dashboard\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar (\n\tdefaultProjectMap map[string]*Project\n\tdefaultProjects = []*Project{\n\t\tmakeProject(\"jekyll\", \"jekyll\/jekyll\", \"master\", \"jekyll\"),\n\t\tmakeProject(\"jemoji\", \"jekyll\/jemoji\", \"master\", \"jemoji\"),\n\t\tmakeProject(\"mercenary\", \"jekyll\/mercenary\", \"master\", \"mercenary\"),\n\t\tmakeProject(\"jekyll-import\", \"jekyll\/jekyll-import\", \"master\", \"jekyll-import\"),\n\t\tmakeProject(\"jekyll-feed\", \"jekyll\/jekyll-feed\", \"master\", \"jekyll-feed\"),\n\t\tmakeProject(\"jekyll-sitemap\", \"jekyll\/jekyll-sitemap\", \"master\", \"jekyll-sitemap\"),\n\t\tmakeProject(\"jekyll-mentions\", \"jekyll\/jekyll-mentions\", \"master\", \"jekyll-mentions\"),\n\t\tmakeProject(\"jekyll-watch\", \"jekyll\/jekyll-watch\", \"master\", \"jekyll-watch\"),\n\t\tmakeProject(\"jekyll-compose\", \"jekyll\/jekyll-compose\", \"master\", \"jekyll-compose\"),\n\t\tmakeProject(\"jekyll-paginate\", \"jekyll\/jekyll-paginate\", \"master\", \"jekyll-paginate\"),\n\t\tmakeProject(\"jekyll-gist\", \"jekyll\/jekyll-gist\", \"master\", \"jekyll-gist\"),\n\t\tmakeProject(\"jekyll-coffeescript\", \"jekyll\/jekyll-coffeescript\", \"master\", \"jekyll-coffeescript\"),\n\t\tmakeProject(\"jekyll-opal\", \"jekyll\/jekyll-opal\", \"master\", \"jekyll-opal\"),\n\t\tmakeProject(\"classifier-reborn\", \"jekyll\/classifier-reborn\", \"master\", \"classifier-reborn\"),\n\t\tmakeProject(\"jekyll-sass-converter\", \"jekyll\/jekyll-sass-converter\", \"master\", \"jekyll-sass-converter\"),\n\t\tmakeProject(\"jekyll-textile-converter\", \"jekyll\/jekyll-textile-converter\", \"master\", \"jekyll-textile-converter\"),\n\t\tmakeProject(\"jekyll-redirect-from\", \"jekyll\/jekyll-redirect-from\", \"master\", \"jekyll-redirect-from\"),\n\t\tmakeProject(\"github-metadata\", \"jekyll\/github-metadata\", \"master\", \"jekyll-github-metadata\"),\n\t\tmakeProject(\"plugins.jekyllrb\", \"jekyll\/plugins\", \"gh-pages\", \"\"),\n\t\tmakeProject(\"jekyll docker\", \"jekyll\/docker\", \"\", \"\"),\n\t}\n)\n\nfunc init() {\n\tgo resetProjectsPeriodically()\n}\n\nfunc resetProjectsPeriodically() {\n\tfor range time.Tick(time.Hour \/ 2) {\n\t\tlog.Println(\"resetting projects' cache\")\n\t\tfor _, p := range defaultProjects {\n\t\t\tp.reset()\n\t\t}\n\t}\n}\n\ntype Project struct {\n\tName string `json:\"name\"`\n\tNwo string `json:\"nwo\"`\n\tBranch string `json:\"branch\"`\n\tGemName string `json:\"gem_name\"`\n\n\tGem *RubyGem `json:\"gem\"`\n\tTravis *TravisReport `json:\"travis\"`\n\tGitHub *GitHub `json:\"github\"`\n\tfetched bool\n}\n\nfunc (p *Project) fetch() {\n\tif !p.fetched {\n\t\trubyGemChan := rubygem(p.GemName)\n\t\ttravisChan := travis(p.Nwo, p.Branch)\n\t\tgithubChan := github(p.Nwo)\n\t\tp.Gem = <-rubyGemChan\n\t\tp.Travis = <-travisChan\n\t\tp.GitHub = <-githubChan\n\t\tp.fetched = true\n\t}\n}\n\nfunc (p *Project) reset() {\n\tp.fetched = false\n\tp.Gem = nil\n\tp.Travis = nil\n\tp.GitHub = nil\n}\n\nfunc buildProjectMap() {\n\tdefaultProjectMap = map[string]*Project{}\n\tfor _, p := range defaultProjects {\n\t\tdefaultProjectMap[p.Name] = p\n\t}\n}\n\nfunc makeProject(name, nwo, branch, rubygem string) *Project {\n\treturn &Project{\n\t\tName: name,\n\t\tNwo: nwo,\n\t\tBranch: branch,\n\t\tGemName: rubygem,\n\t}\n}\n\nfunc getProject(name string) Project {\n\tif defaultProjectMap == nil {\n\t\tbuildProjectMap()\n\t}\n\n\tif p, ok := defaultProjectMap[name]; ok {\n\t\tif !p.fetched {\n\t\t\tp.fetch()\n\t\t}\n\t\treturn *p\n\t}\n\tpanic(fmt.Sprintf(\"no project named '%s'\", name))\n}\n\nfunc getAllProjects() []*Project {\n\tvar wg sync.WaitGroup\n\tfor _, p := range defaultProjects {\n\t\twg.Add(1)\n\t\tgo func(project *Project) {\n\t\t\tproject.fetch()\n\t\t\twg.Done()\n\t\t}(p)\n\t}\n\twg.Wait()\n\treturn defaultProjects\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ estcequecestbientot HTTP server\n\/\/ usage:\n\/\/ PORT=8080 HOST=localhost .\/estcequecestbientot\npackage main\n\nimport (\n\t\"github.com\/arnaudcordier\/estcequecestbientot-go\/estcequecest\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n)\n\nvar app *estcequecest.App\n\nfunc main() {\n\tapp = estcequecest.NewApp(\"messages\", \"messages_\")\n\n\thttp.HandleFunc(\"\/\", index)\n\thttp.HandleFunc(\"\/load\/\", load)\n\thttp.HandleFunc(\"\/unload\/\", unload)\n\thttp.HandleFunc(\"\/list\", list)\n\n\tport := os.Getenv(\"PORT\")\n\tif len(port) == 0 {\n\t\tport = \"8080\"\n\t}\n\thost := os.Getenv(\"HOST\")\n\tfmt.Println(\"Est-ce-que c'est bientôt? running on \" + host + \":\" + port)\n\n\thttp.ListenAndServe(host+\":\"+port, nil)\n}\n\nfunc index(w http.ResponseWriter, r *http.Request) {\n\tmessages := app.GetMessages()\n\tt, _ := template.ParseFiles(\"templates\/estcequecestbientot.html\")\n\tt.Execute(w, &messages)\n}\n\nfunc load(w http.ResponseWriter, r *http.Request) {\n\tname := strings.Replace(r.URL.Path, \"\/load\/\", \"\", 1)\n\tif len(name) > 0 {\n\t\tapp.Load(name)\n\t}\n\thttp.Redirect(w, r, \"\/\", 303)\n}\n\nfunc unload(w http.ResponseWriter, r *http.Request) {\n\tname := strings.Replace(r.URL.Path, \"\/unload\/\", \"\", 1)\n\tif len(name) > 0 {\n\t\tapp.Unload(name)\n\t}\n\thttp.Redirect(w, r, \"\/\", 303)\n}\n\nfunc list(w http.ResponseWriter, r *http.Request) {\n\tnotloaded, loaded := app.List()\n\tt, _ := template.ParseFiles(\"templates\/list.html\")\n\tt.Execute(w, map[string]*[]string{\"notloaded\": ¬loaded, \"loaded\": &loaded})\n}\n<commit_msg>gofmt<commit_after>\/\/ estcequecestbientot HTTP server\n\/\/ usage:\n\/\/ PORT=8080 HOST=localhost .\/estcequecestbientot\npackage main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/arnaudcordier\/estcequecestbientot-go\/estcequecest\"\n\t\"html\/template\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n)\n\nvar app *estcequecest.App\n\nfunc main() {\n\tapp = estcequecest.NewApp(\"messages\", \"messages_\")\n\n\thttp.HandleFunc(\"\/\", index)\n\thttp.HandleFunc(\"\/load\/\", load)\n\thttp.HandleFunc(\"\/unload\/\", unload)\n\thttp.HandleFunc(\"\/list\", list)\n\n\tport := os.Getenv(\"PORT\")\n\tif len(port) == 0 {\n\t\tport = \"8080\"\n\t}\n\thost := os.Getenv(\"HOST\")\n\tfmt.Println(\"Est-ce-que c'est bientôt? running on \" + host + \":\" + port)\n\n\thttp.ListenAndServe(host+\":\"+port, nil)\n}\n\nfunc index(w http.ResponseWriter, r *http.Request) {\n\tmessages := app.GetMessages()\n\tt, _ := template.ParseFiles(\"templates\/estcequecestbientot.html\")\n\tt.Execute(w, &messages)\n}\n\nfunc load(w http.ResponseWriter, r *http.Request) {\n\tname := strings.Replace(r.URL.Path, \"\/load\/\", \"\", 1)\n\tif len(name) > 0 {\n\t\tapp.Load(name)\n\t}\n\thttp.Redirect(w, r, \"\/\", 303)\n}\n\nfunc unload(w http.ResponseWriter, r *http.Request) {\n\tname := strings.Replace(r.URL.Path, \"\/unload\/\", \"\", 1)\n\tif len(name) > 0 {\n\t\tapp.Unload(name)\n\t}\n\thttp.Redirect(w, r, \"\/\", 303)\n}\n\nfunc list(w http.ResponseWriter, r *http.Request) {\n\tnotloaded, loaded := app.List()\n\tt, _ := template.ParseFiles(\"templates\/list.html\")\n\tt.Execute(w, map[string]*[]string{\"notloaded\": ¬loaded, \"loaded\": &loaded})\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"encoding\/json\"\n \"fmt\"\n \"github.com\/mattbaird\/elastigo\/api\"\n \"github.com\/mattbaird\/elastigo\/core\"\n \"labix.org\/v2\/mgo\/bson\"\n \"os\"\n \"strconv\"\n \"strings\"\n \"time\"\n)\n\ntype PublisherType struct {\n name string\n disabled bool\n\n RefreshTopologyTimer <-chan time.Time\n TopologyMap map[string]string\n}\n\nvar Publisher PublisherType\n\n\/\/ Config\ntype tomlAgent struct {\n Name string\n Refresh_topology_freq int\n Ignore_outgoing bool\n}\ntype tomlMothership struct {\n Host string\n Port int\n Protocol string\n Username string\n Password string\n}\n\ntype Event struct {\n Timestamp time.Time `json:\"@timestamp\"`\n Type string `json:\"type\"`\n Agent string `json:\"agent\"`\n Src_ip string `json:\"src_ip\"`\n Src_port uint16 `json:\"src_port\"`\n Src_proc string `json:\"src_proc\"`\n Src_country string `json:\"src_country\"`\n Src_server string `json:\"src_server\"`\n Dst_ip string `json:\"dst_ip\"`\n Dst_port uint16 `json:\"dst_port\"`\n Dst_proc string `json:\"dst_proc\"`\n Dst_server string `json:\"dst_server\"`\n ResponseTime int32 `json:\"responsetime\"`\n Status string `json:\"status\"`\n RequestRaw string `json:\"request_raw\"`\n ResponseRaw string `json:\"response_raw\"`\n\n Mysql bson.M `json:\"mysql\"`\n Http bson.M `json:\"http\"`\n Redis bson.M `json:\"redis\"`\n Pgsql bson.M `json:\"pgsql\"`\n}\n\ntype Topology struct {\n Name string `json:\"name\"`\n Ip string `json:\"ip\"`\n}\n\nfunc PrintPublishEvent(event *Event) {\n json, err := json.MarshalIndent(event, \"\", \" \")\n if err != nil {\n ERR(\"json.Marshal: %s\", err)\n } else {\n DEBUG(\"publish\", \"Publish: %s\", string(json))\n }\n}\n\nfunc (publisher *PublisherType) GetServerName(ip string) string {\n \/\/ in case the IP is localhost, return current agent name\n islocal, err := IsLoopback(ip)\n if err != nil {\n ERR(\"Parsing IP %s fails with: %s\", ip, err)\n return \"\"\n } else {\n if islocal {\n return publisher.name\n }\n }\n \/\/ find the agent with the desired IP\n name, exists := publisher.TopologyMap[ip]\n if !exists {\n return \"\"\n }\n return name\n}\n\nfunc (publisher *PublisherType) PublishHttpTransaction(t *HttpTransaction) error {\n\n event := Event{}\n\n event.Type = \"http\"\n code := t.Http[\"code\"].(int)\n if code < 400 {\n event.Status = \"OK\"\n } else {\n event.Status = \"Error\"\n }\n event.ResponseTime = t.ResponseTime\n event.RequestRaw = t.Request_raw\n event.ResponseRaw = t.Response_raw\n event.Http = t.Http\n\n return publisher.PublishEvent(t.ts, &t.Src, &t.Dst, &event)\n\n}\n\nfunc (publisher *PublisherType) PublishMysqlTransaction(t *MysqlTransaction) error {\n\n event := Event{}\n event.Type = \"mysql\"\n\n if t.Mysql[\"iserror\"].(bool) {\n event.Status = \"Error\"\n } else {\n event.Status = \"OK\"\n }\n\n event.ResponseTime = t.ResponseTime\n event.RequestRaw = t.Request_raw\n event.ResponseRaw = t.Response_raw\n event.Mysql = t.Mysql\n\n return publisher.PublishEvent(t.ts, &t.Src, &t.Dst, &event)\n}\n\nfunc (publisher *PublisherType) PublishRedisTransaction(t *RedisTransaction) error {\n\n event := Event{}\n event.Type = \"redis\"\n event.Status = \"OK\"\n event.ResponseTime = t.ResponseTime\n event.RequestRaw = t.Request_raw\n event.ResponseRaw = t.Response_raw\n event.Redis = t.Redis\n\n return publisher.PublishEvent(t.ts, &t.Src, &t.Dst, &event)\n}\n\nfunc (publisher *PublisherType) PublishEvent(ts time.Time, src *Endpoint, dst *Endpoint, event *Event) error {\n index := fmt.Sprintf(\"packetbeat-%d.%02d.%02d\", ts.Year(), ts.Month(), ts.Day())\n\n event.Src_server = publisher.GetServerName(src.Ip)\n event.Dst_server = publisher.GetServerName(dst.Ip)\n\n if _Config.Agent.Ignore_outgoing && event.Dst_server != \"\" &&\n event.Dst_server != publisher.name {\n \/\/ duplicated transaction -> ignore it\n DEBUG(\"publish\", \"Ignore duplicated REDIS transaction on %s: %s -> %s\", publisher.name, event.Src_server, event.Dst_server)\n return nil\n }\n\n event.Timestamp = ts\n event.Agent = publisher.name\n event.Src_ip = src.Ip\n event.Src_port = src.Port\n event.Src_proc = src.Proc\n event.Dst_ip = dst.Ip\n event.Dst_port = dst.Port\n event.Dst_proc = dst.Proc\n\n \/\/ set src_country if no src_server is set\n event.Src_country = \"\"\n if _GeoLite != nil {\n if len(event.Src_server) == 0 { \/\/ only for external IP addresses\n loc := _GeoLite.GetLocationByIP(src.Ip)\n if loc != nil {\n event.Src_country = loc.CountryCode\n }\n }\n }\n\n if IS_DEBUG(\"publish\") {\n PrintPublishEvent(event)\n }\n\n \/\/ add Redis transaction\n var err error\n if !publisher.disabled {\n _, err = core.Index(index, event.Type, \"\", nil, event)\n }\n\n return err\n}\nfunc (publisher *PublisherType) PublishPgsqlTransaction(t *PgsqlTransaction) error {\n\n event := Event{}\n\n event.Type = \"pgsql\"\n if t.Pgsql[\"iserror\"].(bool) {\n event.Status = \"Error\"\n } else {\n event.Status = \"OK\"\n }\n event.ResponseTime = t.ResponseTime\n event.RequestRaw = t.Request_raw\n event.ResponseRaw = t.Response_raw\n event.Pgsql = t.Pgsql\n\n return publisher.PublishEvent(t.ts, &t.Src, &t.Dst, &event)\n}\n\nfunc (publisher *PublisherType) UpdateTopologyPeriodically() {\n for _ = range publisher.RefreshTopologyTimer {\n publisher.UpdateTopology()\n }\n}\n\nfunc (publisher *PublisherType) UpdateTopology() {\n\n DEBUG(\"publish\", \"Updating Topology\")\n\n \/\/ get all agents IPs from Elasticsearch\n TopologyMapTmp := make(map[string]string)\n res, err := core.SearchUri(\"packetbeat-topology\", \"server-ip\", nil)\n if err == nil {\n for _, server := range res.Hits.Hits {\n var top Topology\n err = json.Unmarshal([]byte(*server.Source), &top)\n if err != nil {\n ERR(\"json.Unmarshal fails with: %s\", err)\n }\n \/\/ add mapping\n TopologyMapTmp[top.Ip] = top.Name\n }\n } else {\n ERR(\"core.SearchRequest fails with: %s\", err)\n }\n\n \/\/ update topology map\n publisher.TopologyMap = TopologyMapTmp\n\n DEBUG(\"publish\", \"[%s] Map: %s\", publisher.name, publisher.TopologyMap)\n}\n\nfunc (publisher *PublisherType) PublishTopology(params ...string) error {\n\n var localAddrs []string = params\n\n if len(params) == 0 {\n addrs, err := LocalIpAddrsAsStrings(false)\n if err != nil {\n ERR(\"Getting local IP addresses fails with: %s\", err)\n return err\n }\n localAddrs = addrs\n }\n\n DEBUG(\"publish\", \"Local IP addresses (without loopbacks): %s\", localAddrs)\n\n \/\/ delete old IP addresses\n searchJson := fmt.Sprintf(\"{query: {term: {name: %s}}}\", strconv.Quote(publisher.name))\n res, err := core.SearchRequest(\"packetbeat-topology\", \"server-ip\", nil, searchJson)\n if err == nil {\n for _, server := range res.Hits.Hits {\n\n var top Topology\n err = json.Unmarshal([]byte(*server.Source), &top)\n if err != nil {\n ERR(\"Failed to unmarshal json data: %s\", err)\n }\n if !stringInSlice(top.Ip, localAddrs) {\n res, err := core.Delete(\"packetbeat-topology\", \"server-ip\" \/*id*\/, top.Ip, nil)\n if err != nil {\n ERR(\"Failed to delete the old IP address from packetbeat-topology\")\n }\n if !res.Ok {\n ERR(\"Fail to delete old topology entry\")\n }\n }\n\n }\n }\n\n \/\/ add new IP addresses\n for _, addr := range localAddrs {\n\n \/\/ check if the IP is already in the elasticsearch, before adding it\n found, err := core.Exists(\"packetbeat-topology\", \"server-ip\" \/*id*\/, addr, nil)\n if err != nil {\n ERR(\"core.Exists fails with: %s\", err)\n } else {\n\n if !found {\n res, err := core.Index(\"packetbeat-topology\", \"server-ip\" \/*id*\/, addr, nil,\n Topology{publisher.name, addr})\n if err != nil {\n return err\n }\n if !res.Ok {\n ERR(\"Fail to add new topology entry\")\n }\n }\n }\n }\n\n DEBUG(\"publish\", \"Topology: name=%s, ips=%s\", publisher.name, strings.Join(localAddrs, \" \"))\n\n \/\/ initialize local topology map\n publisher.TopologyMap = make(map[string]string)\n\n return nil\n}\n\nfunc (publisher *PublisherType) Init(publishDisabled bool) error {\n var err error\n\n \/\/ Set the Elasticsearch Host to Connect to\n api.Domain = _Config.Elasticsearch.Host\n api.Port = fmt.Sprintf(\"%d\", _Config.Elasticsearch.Port)\n api.Username = _Config.Elasticsearch.Username\n api.Password = _Config.Elasticsearch.Password\n\n if _Config.Elasticsearch.Protocol != \"\" {\n api.Protocol = _Config.Elasticsearch.Protocol\n }\n\n INFO(\"Use %s:\/\/%s:%s as publisher\", api.Protocol, api.Domain, api.Port)\n\n publisher.name = _Config.Agent.Name\n if len(publisher.name) == 0 {\n \/\/ use the hostname\n publisher.name, err = os.Hostname()\n if err != nil {\n return err\n }\n\n INFO(\"No agent name configured, using hostname '%s'\", publisher.name)\n }\n\n publisher.disabled = publishDisabled\n if publisher.disabled {\n INFO(\"Dry run mode. Elasticsearch won't be updated or queried.\")\n }\n\n RefreshTopologyFreq := 10 * time.Second\n if _Config.Agent.Refresh_topology_freq != 0 {\n RefreshTopologyFreq = time.Duration(_Config.Agent.Refresh_topology_freq) * time.Second\n }\n publisher.RefreshTopologyTimer = time.Tick(RefreshTopologyFreq)\n\n if !publisher.disabled {\n \/\/ register agent and its public IP addresses\n err = publisher.PublishTopology()\n if err != nil {\n ERR(\"Failed to publish topology: %s\", err)\n return err\n }\n\n \/\/ update topology periodically\n go publisher.UpdateTopologyPeriodically()\n }\n\n return nil\n}\n<commit_msg>Use predefined states for status instead of string<commit_after>package main\n\nimport (\n \"encoding\/json\"\n \"fmt\"\n \"github.com\/mattbaird\/elastigo\/api\"\n \"github.com\/mattbaird\/elastigo\/core\"\n \"labix.org\/v2\/mgo\/bson\"\n \"os\"\n \"strconv\"\n \"strings\"\n \"time\"\n)\n\ntype PublisherType struct {\n name string\n disabled bool\n\n RefreshTopologyTimer <-chan time.Time\n TopologyMap map[string]string\n}\n\nvar Publisher PublisherType\n\n\/\/ Config\ntype tomlAgent struct {\n Name string\n Refresh_topology_freq int\n Ignore_outgoing bool\n}\ntype tomlMothership struct {\n Host string\n Port int\n Protocol string\n Username string\n Password string\n}\n\ntype Event struct {\n Timestamp time.Time `json:\"@timestamp\"`\n Type string `json:\"type\"`\n Agent string `json:\"agent\"`\n Src_ip string `json:\"src_ip\"`\n Src_port uint16 `json:\"src_port\"`\n Src_proc string `json:\"src_proc\"`\n Src_country string `json:\"src_country\"`\n Src_server string `json:\"src_server\"`\n Dst_ip string `json:\"dst_ip\"`\n Dst_port uint16 `json:\"dst_port\"`\n Dst_proc string `json:\"dst_proc\"`\n Dst_server string `json:\"dst_server\"`\n ResponseTime int32 `json:\"responsetime\"`\n Status string `json:\"status\"`\n RequestRaw string `json:\"request_raw\"`\n ResponseRaw string `json:\"response_raw\"`\n\n Mysql bson.M `json:\"mysql\"`\n Http bson.M `json:\"http\"`\n Redis bson.M `json:\"redis\"`\n Pgsql bson.M `json:\"pgsql\"`\n}\n\ntype Topology struct {\n Name string `json:\"name\"`\n Ip string `json:\"ip\"`\n}\n\nfunc PrintPublishEvent(event *Event) {\n json, err := json.MarshalIndent(event, \"\", \" \")\n if err != nil {\n ERR(\"json.Marshal: %s\", err)\n } else {\n DEBUG(\"publish\", \"Publish: %s\", string(json))\n }\n}\n\nconst (\n OK_STATUS = \"OK\"\n ERROR_STATUS = \"Error\"\n)\n\nfunc (publisher *PublisherType) GetServerName(ip string) string {\n \/\/ in case the IP is localhost, return current agent name\n islocal, err := IsLoopback(ip)\n if err != nil {\n ERR(\"Parsing IP %s fails with: %s\", ip, err)\n return \"\"\n } else {\n if islocal {\n return publisher.name\n }\n }\n \/\/ find the agent with the desired IP\n name, exists := publisher.TopologyMap[ip]\n if !exists {\n return \"\"\n }\n return name\n}\n\nfunc (publisher *PublisherType) PublishHttpTransaction(t *HttpTransaction) error {\n\n event := Event{}\n\n event.Type = \"http\"\n code := t.Http[\"code\"].(int)\n if code < 400 {\n event.Status = OK_STATUS\n } else {\n event.Status = ERROR_STATUS\n }\n event.ResponseTime = t.ResponseTime\n event.RequestRaw = t.Request_raw\n event.ResponseRaw = t.Response_raw\n event.Http = t.Http\n\n return publisher.PublishEvent(t.ts, &t.Src, &t.Dst, &event)\n\n}\n\nfunc (publisher *PublisherType) PublishMysqlTransaction(t *MysqlTransaction) error {\n\n event := Event{}\n event.Type = \"mysql\"\n\n if t.Mysql[\"iserror\"].(bool) {\n event.Status = ERROR_STATUS\n } else {\n event.Status = OK_STATUS\n }\n\n event.ResponseTime = t.ResponseTime\n event.RequestRaw = t.Request_raw\n event.ResponseRaw = t.Response_raw\n event.Mysql = t.Mysql\n\n return publisher.PublishEvent(t.ts, &t.Src, &t.Dst, &event)\n}\n\nfunc (publisher *PublisherType) PublishRedisTransaction(t *RedisTransaction) error {\n\n event := Event{}\n event.Type = \"redis\"\n event.Status = OK_STATUS\n event.ResponseTime = t.ResponseTime\n event.RequestRaw = t.Request_raw\n event.ResponseRaw = t.Response_raw\n event.Redis = t.Redis\n\n return publisher.PublishEvent(t.ts, &t.Src, &t.Dst, &event)\n}\n\nfunc (publisher *PublisherType) PublishEvent(ts time.Time, src *Endpoint, dst *Endpoint, event *Event) error {\n index := fmt.Sprintf(\"packetbeat-%d.%02d.%02d\", ts.Year(), ts.Month(), ts.Day())\n\n event.Src_server = publisher.GetServerName(src.Ip)\n event.Dst_server = publisher.GetServerName(dst.Ip)\n\n if _Config.Agent.Ignore_outgoing && event.Dst_server != \"\" &&\n event.Dst_server != publisher.name {\n \/\/ duplicated transaction -> ignore it\n DEBUG(\"publish\", \"Ignore duplicated REDIS transaction on %s: %s -> %s\", publisher.name, event.Src_server, event.Dst_server)\n return nil\n }\n\n event.Timestamp = ts\n event.Agent = publisher.name\n event.Src_ip = src.Ip\n event.Src_port = src.Port\n event.Src_proc = src.Proc\n event.Dst_ip = dst.Ip\n event.Dst_port = dst.Port\n event.Dst_proc = dst.Proc\n\n \/\/ set src_country if no src_server is set\n event.Src_country = \"\"\n if _GeoLite != nil {\n if len(event.Src_server) == 0 { \/\/ only for external IP addresses\n loc := _GeoLite.GetLocationByIP(src.Ip)\n if loc != nil {\n event.Src_country = loc.CountryCode\n }\n }\n }\n\n if IS_DEBUG(\"publish\") {\n PrintPublishEvent(event)\n }\n\n \/\/ add Redis transaction\n var err error\n if !publisher.disabled {\n _, err = core.Index(index, event.Type, \"\", nil, event)\n }\n\n return err\n}\nfunc (publisher *PublisherType) PublishPgsqlTransaction(t *PgsqlTransaction) error {\n\n event := Event{}\n\n event.Type = \"pgsql\"\n if t.Pgsql[\"iserror\"].(bool) {\n event.Status = ERROR_STATUS\n } else {\n event.Status = OK_STATUS\n }\n event.ResponseTime = t.ResponseTime\n event.RequestRaw = t.Request_raw\n event.ResponseRaw = t.Response_raw\n event.Pgsql = t.Pgsql\n\n return publisher.PublishEvent(t.ts, &t.Src, &t.Dst, &event)\n}\n\nfunc (publisher *PublisherType) UpdateTopologyPeriodically() {\n for _ = range publisher.RefreshTopologyTimer {\n publisher.UpdateTopology()\n }\n}\n\nfunc (publisher *PublisherType) UpdateTopology() {\n\n DEBUG(\"publish\", \"Updating Topology\")\n\n \/\/ get all agents IPs from Elasticsearch\n TopologyMapTmp := make(map[string]string)\n res, err := core.SearchUri(\"packetbeat-topology\", \"server-ip\", nil)\n if err == nil {\n for _, server := range res.Hits.Hits {\n var top Topology\n err = json.Unmarshal([]byte(*server.Source), &top)\n if err != nil {\n ERR(\"json.Unmarshal fails with: %s\", err)\n }\n \/\/ add mapping\n TopologyMapTmp[top.Ip] = top.Name\n }\n } else {\n ERR(\"core.SearchRequest fails with: %s\", err)\n }\n\n \/\/ update topology map\n publisher.TopologyMap = TopologyMapTmp\n\n DEBUG(\"publish\", \"[%s] Map: %s\", publisher.name, publisher.TopologyMap)\n}\n\nfunc (publisher *PublisherType) PublishTopology(params ...string) error {\n\n var localAddrs []string = params\n\n if len(params) == 0 {\n addrs, err := LocalIpAddrsAsStrings(false)\n if err != nil {\n ERR(\"Getting local IP addresses fails with: %s\", err)\n return err\n }\n localAddrs = addrs\n }\n\n DEBUG(\"publish\", \"Local IP addresses (without loopbacks): %s\", localAddrs)\n\n \/\/ delete old IP addresses\n searchJson := fmt.Sprintf(\"{query: {term: {name: %s}}}\", strconv.Quote(publisher.name))\n res, err := core.SearchRequest(\"packetbeat-topology\", \"server-ip\", nil, searchJson)\n if err == nil {\n for _, server := range res.Hits.Hits {\n\n var top Topology\n err = json.Unmarshal([]byte(*server.Source), &top)\n if err != nil {\n ERR(\"Failed to unmarshal json data: %s\", err)\n }\n if !stringInSlice(top.Ip, localAddrs) {\n res, err := core.Delete(\"packetbeat-topology\", \"server-ip\" \/*id*\/, top.Ip, nil)\n if err != nil {\n ERR(\"Failed to delete the old IP address from packetbeat-topology\")\n }\n if !res.Ok {\n ERR(\"Fail to delete old topology entry\")\n }\n }\n\n }\n }\n\n \/\/ add new IP addresses\n for _, addr := range localAddrs {\n\n \/\/ check if the IP is already in the elasticsearch, before adding it\n found, err := core.Exists(\"packetbeat-topology\", \"server-ip\" \/*id*\/, addr, nil)\n if err != nil {\n ERR(\"core.Exists fails with: %s\", err)\n } else {\n\n if !found {\n res, err := core.Index(\"packetbeat-topology\", \"server-ip\" \/*id*\/, addr, nil,\n Topology{publisher.name, addr})\n if err != nil {\n return err\n }\n if !res.Ok {\n ERR(\"Fail to add new topology entry\")\n }\n }\n }\n }\n\n DEBUG(\"publish\", \"Topology: name=%s, ips=%s\", publisher.name, strings.Join(localAddrs, \" \"))\n\n \/\/ initialize local topology map\n publisher.TopologyMap = make(map[string]string)\n\n return nil\n}\n\nfunc (publisher *PublisherType) Init(publishDisabled bool) error {\n var err error\n\n \/\/ Set the Elasticsearch Host to Connect to\n api.Domain = _Config.Elasticsearch.Host\n api.Port = fmt.Sprintf(\"%d\", _Config.Elasticsearch.Port)\n api.Username = _Config.Elasticsearch.Username\n api.Password = _Config.Elasticsearch.Password\n\n if _Config.Elasticsearch.Protocol != \"\" {\n api.Protocol = _Config.Elasticsearch.Protocol\n }\n\n INFO(\"Use %s:\/\/%s:%s as publisher\", api.Protocol, api.Domain, api.Port)\n\n publisher.name = _Config.Agent.Name\n if len(publisher.name) == 0 {\n \/\/ use the hostname\n publisher.name, err = os.Hostname()\n if err != nil {\n return err\n }\n\n INFO(\"No agent name configured, using hostname '%s'\", publisher.name)\n }\n\n publisher.disabled = publishDisabled\n if publisher.disabled {\n INFO(\"Dry run mode. Elasticsearch won't be updated or queried.\")\n }\n\n RefreshTopologyFreq := 10 * time.Second\n if _Config.Agent.Refresh_topology_freq != 0 {\n RefreshTopologyFreq = time.Duration(_Config.Agent.Refresh_topology_freq) * time.Second\n }\n publisher.RefreshTopologyTimer = time.Tick(RefreshTopologyFreq)\n\n if !publisher.disabled {\n \/\/ register agent and its public IP addresses\n err = publisher.PublishTopology()\n if err != nil {\n ERR(\"Failed to publish topology: %s\", err)\n return err\n }\n\n \/\/ update topology periodically\n go publisher.UpdateTopologyPeriodically()\n }\n\n return nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build android\n\npackage nk\n\nimport (\n\t\"sync\"\n\t\"time\"\n\t\"unsafe\"\n\n\t\"github.com\/xlab\/android-go\/android\"\n\t\"github.com\/xlab\/android-go\/egl\"\n)\n\nfunc NkPlatformShutdown() {\n\tNkFontAtlasClear(state.atlas)\n\tNkFree(state.ctx)\n\tdeviceDestroy()\n\tstate = nil\n}\n\nfunc NkFontStashBegin(atlas **FontAtlas) {\n\tstate.atlas = NewFontAtlas()\n\tNkFontAtlasInitDefault(state.atlas)\n\tNkFontAtlasBegin(state.atlas)\n\t*atlas = state.atlas\n}\n\nfunc NkFontStashEnd() {\n\tdev := state.ogl\n\tvar width, height int32\n\timage := NkFontAtlasBake(state.atlas, &width, &height, FontAtlasRgba32)\n\tdeviceUploadAtlas(image, width, height)\n\tNkFontAtlasEnd(state.atlas, NkHandleId(int32(state.ogl.font_tex[0])), &dev.null)\n\tif font := state.atlas.DefaultFont(); font != nil {\n\t\tNkStyleSetFont(state.ctx, font.Handle())\n\t}\n}\n\ntype PlatformKeyEvent struct {\n}\n\ntype PlatformTouchEvent struct {\n\tAction int32\n\tX, Y int32\n}\n\nfunc NkPlatformInput(touch *PlatformTouchEvent, key *PlatformKeyEvent) {\n\tif touch != nil && state != nil {\n\t\tstate.touch.Add(*touch)\n\t}\n\tif key != nil {\n\t\t\/\/ TODO\n\t}\n}\n\nfunc NkPlatformNewFrame() {\n\tdisplay := state.display\n\tctx := state.ctx\n\n\t\/\/ for Android scale ratio can be tricky stuff\n\tstate.width, state.height = display.Width, display.Height\n\tstate.display_width, state.display_height = display.Width, display.Height\n\tstate.fbScaleX = float32(state.display_width) \/ float32(state.width)\n\tstate.fbScaleY = float32(state.display_height) \/ float32(state.height)\n\n\tNkInputBegin(ctx)\n\tfor _, r := range state.text {\n\t\tNkInputUnicode(ctx, Rune(r))\n\t}\n\tif state.touch.CurrentAction() == android.MotionEventActionUp {\n\t\tctx.Input().Mouse().SetPos(0, 0)\n\t}\n\tstate.touch.Observe(func(action, x, y int32) {\n\t\tswitch action {\n\t\tcase android.MotionEventActionDown:\n\t\t\tctx.Input().Mouse().SetPos(x, y)\n\t\t\tNkInputButton(ctx, ButtonLeft, x, y, 1)\n\t\tcase android.MotionEventActionMove:\n\t\t\tNkInputMotion(ctx, x, y)\n\t\tcase android.MotionEventActionUp:\n\t\t\tctx.Input().Mouse().SetPos(x, y)\n\t\t\tNkInputButton(ctx, ButtonLeft, x, y, 0)\n\t\t}\n\t})\n\tif m := ctx.Input().Mouse(); m.Grabbed() {\n\t\tprevX, prevY := m.Prev()\n\t\tm.SetPos(prevX, prevY)\n\t}\n\tNkInputEnd(ctx)\n}\n\nvar (\n\tsizeofDrawIndex = unsafe.Sizeof(DrawIndex(0))\n\temptyVertex = platformVertex{}\n)\n\ntype platformVertex struct {\n\tposition [2]float32\n\tuv [2]float32\n\tcol [4]Byte\n}\n\nconst (\n\tplatformVertexSize = unsafe.Sizeof(platformVertex{})\n\tplatformVertexAlign = unsafe.Alignof(platformVertex{})\n)\n\ntype platformState struct {\n\tdisplay *egl.DisplayHandle\n\ttouch *touchHandler\n\n\twidth int\n\theight int\n\tdisplay_width int\n\tdisplay_height int\n\n\togl *platformDevice\n\tctx *Context\n\tatlas *FontAtlas\n\n\tfbScaleX float32\n\tfbScaleY float32\n\n\ttext string\n\tscroll float32\n}\n\nfunc newPlatformState() *platformState {\n\treturn &platformState{\n\t\togl: &platformDevice{},\n\t\ttouch: newTouchHandler(),\n\t}\n}\n\nfunc NkPlatformDisplayHandle() *egl.DisplayHandle {\n\tif state != nil {\n\t\treturn state.display\n\t}\n\treturn nil\n}\n\nvar state *platformState\n\ntype platformDevice struct {\n\tcmds *Buffer\n\tnull DrawNullTexture\n\n\tvbo, vao, ebo []uint32\n\tprog uint32\n\tvert_shdr uint32\n\tfrag_shdr uint32\n\n\tattrib_pos uint32\n\tattrib_uv uint32\n\tattrib_col uint32\n\tuniform_tex int32\n\tuniform_proj int32\n\n\tfont_tex []uint32\n}\n\nconst touchDecayTime = 500 * time.Millisecond\n\ntype touchHandler struct {\n\tcurrent *PlatformTouchEvent\n\tqueue []PlatformTouchEvent\n\tmux *sync.RWMutex\n\tdecay *time.Timer\n}\n\nfunc newTouchHandler() *touchHandler {\n\th := &touchHandler{\n\t\tqueue: make([]PlatformTouchEvent, 0, 1024),\n\t\tmux: new(sync.RWMutex),\n\t}\n\th.decay = time.NewTimer(time.Minute)\n\th.decay.Stop()\n\tgo func() {\n\t\tfor range h.decay.C {\n\t\t\th.decay.Reset(touchDecayTime)\n\t\t}\n\t}()\n\treturn h\n}\n\nfunc (t *touchHandler) Add(ev PlatformTouchEvent) {\n\tt.mux.Lock()\n\tt.queue = append(t.queue, ev)\n\tt.current = &ev\n\tt.decay.Reset(touchDecayTime)\n\tt.mux.Unlock()\n}\n\nfunc (t *touchHandler) Reset() {\n\tt.mux.Lock()\n\tif ql := len(t.queue); ql > 0 {\n\t\tt.queue = t.queue[:0]\n\t}\n\tt.mux.Unlock()\n}\n\nfunc (t *touchHandler) Observe(fn func(action, x, y int32)) {\n\tt.mux.Lock()\n\tif len(t.queue) > 0 {\n\t\tfor i := range t.queue {\n\t\t\tfn(t.queue[i].Action, t.queue[i].X, t.queue[i].Y)\n\t\t}\n\t\tt.queue = t.queue[:0]\n\t}\n\tt.mux.Unlock()\n}\n\nfunc (t *touchHandler) CurrentPos() (x int32, y int32) {\n\tt.mux.RLock()\n\tif t.current != nil {\n\t\tx, y = t.current.X, t.current.Y\n\t}\n\tt.mux.RUnlock()\n\treturn x, y\n}\n\nfunc (t *touchHandler) CurrentAction() (a int32) {\n\tt.mux.RLock()\n\tif t.current != nil {\n\t\ta = t.current.Action\n\t} else {\n\t\ta = -1\n\t}\n\tt.mux.RUnlock()\n\treturn a\n}\n<commit_msg>Fix touch drag.<commit_after>\/\/ +build android\n\npackage nk\n\nimport (\n\t\"sync\"\n\t\"time\"\n\t\"unsafe\"\n\n\t\"github.com\/xlab\/android-go\/android\"\n\t\"github.com\/xlab\/android-go\/egl\"\n)\n\nfunc NkPlatformShutdown() {\n\tNkFontAtlasClear(state.atlas)\n\tNkFree(state.ctx)\n\tdeviceDestroy()\n\tstate = nil\n}\n\nfunc NkFontStashBegin(atlas **FontAtlas) {\n\tstate.atlas = NewFontAtlas()\n\tNkFontAtlasInitDefault(state.atlas)\n\tNkFontAtlasBegin(state.atlas)\n\t*atlas = state.atlas\n}\n\nfunc NkFontStashEnd() {\n\tdev := state.ogl\n\tvar width, height int32\n\timage := NkFontAtlasBake(state.atlas, &width, &height, FontAtlasRgba32)\n\tdeviceUploadAtlas(image, width, height)\n\tNkFontAtlasEnd(state.atlas, NkHandleId(int32(state.ogl.font_tex[0])), &dev.null)\n\tif font := state.atlas.DefaultFont(); font != nil {\n\t\tNkStyleSetFont(state.ctx, font.Handle())\n\t}\n}\n\ntype PlatformKeyEvent struct {\n}\n\ntype PlatformTouchEvent struct {\n\tAction int32\n\tX, Y int32\n}\n\nfunc NkPlatformInput(touch *PlatformTouchEvent, key *PlatformKeyEvent) {\n\tif touch != nil && state != nil {\n\t\tstate.touch.Add(*touch)\n\t}\n\tif key != nil {\n\t\t\/\/ TODO\n\t}\n}\n\nfunc NkPlatformNewFrame() {\n\tdisplay := state.display\n\tctx := state.ctx\n\n\t\/\/ for Android scale ratio can be tricky stuff\n\tstate.width, state.height = display.Width, display.Height\n\tstate.display_width, state.display_height = display.Width, display.Height\n\tstate.fbScaleX = float32(state.display_width) \/ float32(state.width)\n\tstate.fbScaleY = float32(state.display_height) \/ float32(state.height)\n\n\tNkInputBegin(ctx)\n\tfor _, r := range state.text {\n\t\tNkInputUnicode(ctx, Rune(r))\n\t}\n\tif state.touch.CurrentAction() == android.MotionEventActionUp {\n\t\tctx.Input().Mouse().SetPos(0, 0)\n\t}\n\tstate.touch.Observe(func(action, x, y int32) {\n\t\tswitch action {\n\t\tcase android.MotionEventActionDown:\n\t\t\tctx.Input().Mouse().SetPos(x, y)\n\t\t\tNkInputButton(ctx, ButtonLeft, x, y, 1)\n\t\tcase android.MotionEventActionMove:\n\t\t\tNkInputMotion(ctx, x, y)\n\t\tcase android.MotionEventActionUp:\n\t\t\tctx.Input().Mouse().SetPos(x, y)\n\t\t\tNkInputButton(ctx, ButtonLeft, x, y, 0)\n\t\t}\n\t})\n\tNkInputEnd(ctx)\n}\n\nvar (\n\tsizeofDrawIndex = unsafe.Sizeof(DrawIndex(0))\n\temptyVertex = platformVertex{}\n)\n\ntype platformVertex struct {\n\tposition [2]float32\n\tuv [2]float32\n\tcol [4]Byte\n}\n\nconst (\n\tplatformVertexSize = unsafe.Sizeof(platformVertex{})\n\tplatformVertexAlign = unsafe.Alignof(platformVertex{})\n)\n\ntype platformState struct {\n\tdisplay *egl.DisplayHandle\n\ttouch *touchHandler\n\n\twidth int\n\theight int\n\tdisplay_width int\n\tdisplay_height int\n\n\togl *platformDevice\n\tctx *Context\n\tatlas *FontAtlas\n\n\tfbScaleX float32\n\tfbScaleY float32\n\n\ttext string\n\tscroll float32\n}\n\nfunc newPlatformState() *platformState {\n\treturn &platformState{\n\t\togl: &platformDevice{},\n\t\ttouch: newTouchHandler(),\n\t}\n}\n\nfunc NkPlatformDisplayHandle() *egl.DisplayHandle {\n\tif state != nil {\n\t\treturn state.display\n\t}\n\treturn nil\n}\n\nvar state *platformState\n\ntype platformDevice struct {\n\tcmds *Buffer\n\tnull DrawNullTexture\n\n\tvbo, vao, ebo []uint32\n\tprog uint32\n\tvert_shdr uint32\n\tfrag_shdr uint32\n\n\tattrib_pos uint32\n\tattrib_uv uint32\n\tattrib_col uint32\n\tuniform_tex int32\n\tuniform_proj int32\n\n\tfont_tex []uint32\n}\n\nconst touchDecayTime = 500 * time.Millisecond\n\ntype touchHandler struct {\n\tcurrent *PlatformTouchEvent\n\tqueue []PlatformTouchEvent\n\tmux *sync.RWMutex\n\tdecay *time.Timer\n}\n\nfunc newTouchHandler() *touchHandler {\n\th := &touchHandler{\n\t\tqueue: make([]PlatformTouchEvent, 0, 1024),\n\t\tmux: new(sync.RWMutex),\n\t}\n\th.decay = time.NewTimer(time.Minute)\n\th.decay.Stop()\n\tgo func() {\n\t\tfor range h.decay.C {\n\t\t\th.decay.Reset(touchDecayTime)\n\t\t}\n\t}()\n\treturn h\n}\n\nfunc (t *touchHandler) Add(ev PlatformTouchEvent) {\n\tt.mux.Lock()\n\tt.queue = append(t.queue, ev)\n\tt.current = &ev\n\tt.decay.Reset(touchDecayTime)\n\tt.mux.Unlock()\n}\n\nfunc (t *touchHandler) Reset() {\n\tt.mux.Lock()\n\tif ql := len(t.queue); ql > 0 {\n\t\tt.queue = t.queue[:0]\n\t}\n\tt.mux.Unlock()\n}\n\nfunc (t *touchHandler) Observe(fn func(action, x, y int32)) {\n\tt.mux.Lock()\n\tif len(t.queue) > 0 {\n\t\tfor i := range t.queue {\n\t\t\tfn(t.queue[i].Action, t.queue[i].X, t.queue[i].Y)\n\t\t}\n\t\tt.queue = t.queue[:0]\n\t}\n\tt.mux.Unlock()\n}\n\nfunc (t *touchHandler) CurrentPos() (x int32, y int32) {\n\tt.mux.RLock()\n\tif t.current != nil {\n\t\tx, y = t.current.X, t.current.Y\n\t}\n\tt.mux.RUnlock()\n\treturn x, y\n}\n\nfunc (t *touchHandler) CurrentAction() (a int32) {\n\tt.mux.RLock()\n\tif t.current != nil {\n\t\ta = t.current.Action\n\t} else {\n\t\ta = -1\n\t}\n\tt.mux.RUnlock()\n\treturn a\n}\n<|endoftext|>"} {"text":"<commit_before>package cienv\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"os\"\n)\n\n\/\/ https:\/\/help.github.com\/en\/articles\/virtual-environments-for-github-actions#default-environment-variables\ntype GitHubEvent struct {\n\tPullRequest GitHubPullRequest `json:\"pull_request\"`\n\tRepository struct {\n\t\tOwner struct {\n\t\t\tLogin string `json:\"login\"`\n\t\t} `json:\"owner\"`\n\t\tName string `json:\"name\"`\n\t} `json:\"repository\"`\n\tCheckSuite struct {\n\t\tAfter string `json:\"after\"`\n\t\tPullRequests []GitHubPullRequest `json:\"pull_requests\"`\n\t} `json:\"check_suite\"`\n}\n\ntype GitHubPullRequest struct {\n\tNumber int `json:\"number\"`\n\tHead struct {\n\t\tSha string `json:\"sha\"`\n\t\tRef string `json:\"ref\"`\n\t} `json:\"head\"`\n}\n\nfunc getBuildInfoFromGitHubAction() (*BuildInfo, bool, error) {\n\teventPath := os.Getenv(\"GITHUB_EVENT_PATH\")\n\tif eventPath == \"\" {\n\t\treturn nil, false, errors.New(\"GITHUB_EVENT_PATH not found\")\n\t}\n\treturn getBuildInfoFromGitHubActionEventPath(eventPath)\n}\nfunc getBuildInfoFromGitHubActionEventPath(eventPath string) (*BuildInfo, bool, error) {\n\tf, err := os.Open(eventPath)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\tdefer f.Close()\n\tvar event GitHubEvent\n\tif err := json.NewDecoder(f).Decode(&event); err != nil {\n\t\treturn nil, false, err\n\t}\n\tinfo := &BuildInfo{\n\t\tOwner: event.Repository.Owner.Login,\n\t\tRepo: event.Repository.Name,\n\t\tPullRequest: event.PullRequest.Number,\n\t\tBranch: event.PullRequest.Head.Ref,\n\t\tSHA: event.PullRequest.Head.Sha,\n\t}\n\t\/\/ For re-run check_suite event.\n\tif info.PullRequest == 0 && len(event.CheckSuite.PullRequests) > 0 {\n\t\tpr := event.CheckSuite.PullRequests[0]\n\t\tinfo.PullRequest = pr.Number\n\t\tinfo.Branch = pr.Head.Ref\n\t\tinfo.SHA = pr.Head.Sha\n\t}\n\treturn info, info.PullRequest != 0, nil\n}\n\nfunc IsInGitHubAction() bool {\n\t\/\/ https:\/\/help.github.com\/en\/articles\/virtual-environments-for-github-actions#default-environment-variables\n\treturn os.Getenv(\"GITHUB_ACTION\") != \"\"\n}\n<commit_msg>add comment to IsInGitHubAction<commit_after>package cienv\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"os\"\n)\n\n\/\/ https:\/\/help.github.com\/en\/articles\/virtual-environments-for-github-actions#default-environment-variables\ntype GitHubEvent struct {\n\tPullRequest GitHubPullRequest `json:\"pull_request\"`\n\tRepository struct {\n\t\tOwner struct {\n\t\t\tLogin string `json:\"login\"`\n\t\t} `json:\"owner\"`\n\t\tName string `json:\"name\"`\n\t} `json:\"repository\"`\n\tCheckSuite struct {\n\t\tAfter string `json:\"after\"`\n\t\tPullRequests []GitHubPullRequest `json:\"pull_requests\"`\n\t} `json:\"check_suite\"`\n}\n\ntype GitHubPullRequest struct {\n\tNumber int `json:\"number\"`\n\tHead struct {\n\t\tSha string `json:\"sha\"`\n\t\tRef string `json:\"ref\"`\n\t} `json:\"head\"`\n}\n\nfunc getBuildInfoFromGitHubAction() (*BuildInfo, bool, error) {\n\teventPath := os.Getenv(\"GITHUB_EVENT_PATH\")\n\tif eventPath == \"\" {\n\t\treturn nil, false, errors.New(\"GITHUB_EVENT_PATH not found\")\n\t}\n\treturn getBuildInfoFromGitHubActionEventPath(eventPath)\n}\nfunc getBuildInfoFromGitHubActionEventPath(eventPath string) (*BuildInfo, bool, error) {\n\tf, err := os.Open(eventPath)\n\tif err != nil {\n\t\treturn nil, false, err\n\t}\n\tdefer f.Close()\n\tvar event GitHubEvent\n\tif err := json.NewDecoder(f).Decode(&event); err != nil {\n\t\treturn nil, false, err\n\t}\n\tinfo := &BuildInfo{\n\t\tOwner: event.Repository.Owner.Login,\n\t\tRepo: event.Repository.Name,\n\t\tPullRequest: event.PullRequest.Number,\n\t\tBranch: event.PullRequest.Head.Ref,\n\t\tSHA: event.PullRequest.Head.Sha,\n\t}\n\t\/\/ For re-run check_suite event.\n\tif info.PullRequest == 0 && len(event.CheckSuite.PullRequests) > 0 {\n\t\tpr := event.CheckSuite.PullRequests[0]\n\t\tinfo.PullRequest = pr.Number\n\t\tinfo.Branch = pr.Head.Ref\n\t\tinfo.SHA = pr.Head.Sha\n\t}\n\treturn info, info.PullRequest != 0, nil\n}\n\n\/\/ IsInGitHubAction returns true if reviewdog is running in GitHub Actions.\nfunc IsInGitHubAction() bool {\n\t\/\/ https:\/\/help.github.com\/en\/articles\/virtual-environments-for-github-actions#default-environment-variables\n\treturn os.Getenv(\"GITHUB_ACTION\") != \"\"\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018-2019 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/cilium\/cilium\/api\/v1\/models\"\n\tpkg \"github.com\/cilium\/cilium\/pkg\/client\"\n\t\"github.com\/cilium\/cilium\/pkg\/command\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar nodeListCmd = &cobra.Command{\n\tUse: \"list\",\n\tAliases: []string{\"ls\"},\n\tShort: \"List nodes\",\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tresp, err := client.Daemon.GetClusterNodes(nil)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", pkg.Hint(err))\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tcluster := resp.Payload.NodesAdded\n\t\tif cluster == nil {\n\t\t\treturn\n\t\t}\n\n\t\tif command.OutputJSON() {\n\t\t\tif err := command.PrintOutput(cluster); err != nil {\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t} else {\n\t\t\tw := tabwriter.NewWriter(os.Stdout, 2, 0, 3, ' ', 0)\n\t\t\tformatStatusResponse(w, cluster)\n\t\t\tw.Flush()\n\t\t}\n\t},\n}\n\nfunc init() {\n\tnodeCmd.AddCommand(nodeListCmd)\n\tcommand.AddJSONOutput(nodeListCmd)\n}\n\nfunc formatStatusResponse(w io.Writer, nodes []*models.NodeElement) {\n\tnodesOutput := []string{\"Name\\tIPv4 Address\\tEndpoint CIDR\\tIPv6 Address\\tEndpoint CIDR\\n\"}\n\n\tfor _, node := range nodes {\n\t\tipv4, ipv4Range, ipv6, ipv6Range := \"\", \"\", \"\", \"\"\n\t\tif node.PrimaryAddress != nil {\n\t\t\tif node.PrimaryAddress.IPV4 != nil {\n\t\t\t\tipv4 = node.PrimaryAddress.IPV4.IP\n\t\t\t\tipv4Range = node.PrimaryAddress.IPV4.AllocRange\n\t\t\t}\n\t\t\tif node.PrimaryAddress.IPV6 != nil {\n\t\t\t\tipv6 = node.PrimaryAddress.IPV6.IP\n\t\t\t\tipv6Range = node.PrimaryAddress.IPV6.AllocRange\n\t\t\t}\n\t\t}\n\n\t\tnodesOutput = append(nodesOutput, fmt.Sprintf(\"%s\\t%s\\t%s\\t%s\\t%s\\n\",\n\t\t\tnode.Name, ipv4, ipv4Range, ipv6, ipv6Range))\n\t}\n\n\tif len(nodesOutput) > 1 {\n\t\ttab := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0)\n\t\tsort.Strings(nodesOutput)\n\t\tfor _, s := range nodesOutput {\n\t\t\tfmt.Fprint(tab, s)\n\t\t}\n\t\ttab.Flush()\n\t}\n}\n<commit_msg>Print header before the list of nodes<commit_after>\/\/ Copyright 2018-2019 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/cilium\/cilium\/api\/v1\/models\"\n\tpkg \"github.com\/cilium\/cilium\/pkg\/client\"\n\t\"github.com\/cilium\/cilium\/pkg\/command\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar nodeListCmd = &cobra.Command{\n\tUse: \"list\",\n\tAliases: []string{\"ls\"},\n\tShort: \"List nodes\",\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tresp, err := client.Daemon.GetClusterNodes(nil)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", pkg.Hint(err))\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tcluster := resp.Payload.NodesAdded\n\t\tif cluster == nil {\n\t\t\treturn\n\t\t}\n\n\t\tif command.OutputJSON() {\n\t\t\tif err := command.PrintOutput(cluster); err != nil {\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t} else {\n\t\t\tw := tabwriter.NewWriter(os.Stdout, 2, 0, 3, ' ', 0)\n\t\t\tformatStatusResponse(w, cluster)\n\t\t\tw.Flush()\n\t\t}\n\t},\n}\n\nfunc init() {\n\tnodeCmd.AddCommand(nodeListCmd)\n\tcommand.AddJSONOutput(nodeListCmd)\n}\n\nfunc formatStatusResponse(w io.Writer, nodes []*models.NodeElement) {\n\tnodesOutputHeader := \"Name\\tIPv4 Address\\tEndpoint CIDR\\tIPv6 Address\\tEndpoint CIDR\\n\"\n\tnodesOutput := make([]string, len(nodes))\n\n\tfor _, node := range nodes {\n\t\tipv4, ipv4Range, ipv6, ipv6Range := \"\", \"\", \"\", \"\"\n\t\tif node.PrimaryAddress != nil {\n\t\t\tif node.PrimaryAddress.IPV4 != nil {\n\t\t\t\tipv4 = node.PrimaryAddress.IPV4.IP\n\t\t\t\tipv4Range = node.PrimaryAddress.IPV4.AllocRange\n\t\t\t}\n\t\t\tif node.PrimaryAddress.IPV6 != nil {\n\t\t\t\tipv6 = node.PrimaryAddress.IPV6.IP\n\t\t\t\tipv6Range = node.PrimaryAddress.IPV6.AllocRange\n\t\t\t}\n\t\t}\n\n\t\tnodesOutput = append(nodesOutput, fmt.Sprintf(\"%s\\t%s\\t%s\\t%s\\t%s\\n\",\n\t\t\tnode.Name, ipv4, ipv4Range, ipv6, ipv6Range))\n\t}\n\n\tif len(nodesOutput) > 1 {\n\t\ttab := tabwriter.NewWriter(w, 0, 0, 3, ' ', 0)\n\t\tfmt.Fprintf(tab, nodesOutputHeader)\n\t\tsort.Strings(nodesOutput)\n\t\tfor _, s := range nodesOutput {\n\t\t\tfmt.Fprint(tab, s)\n\t\t}\n\t\ttab.Flush()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package app\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/sclevine\/agouti\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/viper\"\n\t\"golang.org\/x\/sync\/errgroup\"\n)\n\ntype FileInfo struct {\n\tFilename string\n\tLine uint64\n\tStart time.Time\n\tDone *sync.WaitGroup \/\/ just include parse, exclude write file\n}\n\ntype URLRes struct {\n\tURL string\n\tRes map[string]interface{}\n}\n\ntype URLInfo struct {\n\tURL string\n\tJsFuncs []string\n\tDumpHTML bool\n\tResChan chan URLRes\n\tFInfo *FileInfo\n}\n\nvar (\n\tfParallel int\n\tfDataDir string\n\tfScriptDir string\n\tfOutputDir string\n\tfSplitCount int\n)\n\nvar CrawlCmd = &cobra.Command{\n\tUse: \"hamal\",\n\tShort: \"Hamal parse webpage based on scripts.\",\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\tif _, err := os.Stat(fDataDir); os.IsNotExist(err) {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"dataDir\": fDataDir,\n\t\t\t}).Fatal(\"No script dir\")\n\t\t\treturn err\n\t\t}\n\t\tif _, err := os.Stat(fScriptDir); os.IsNotExist(err) {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"scriptDir\": fScriptDir,\n\t\t\t}).Fatal(\"No script dir\")\n\t\t\treturn err\n\t\t}\n\t\tif _, err := os.Stat(fOutputDir); os.IsNotExist(err) {\n\t\t\tos.Mkdir(fOutputDir, os.ModePerm)\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"outputDir\": fOutputDir,\n\t\t\t}).Debug(\"Create output dir\")\n\t\t}\n\n\t\treturn mainFunc()\n\t},\n}\n\nfunc init() {\n\tflags := CrawlCmd.Flags()\n\tflags.IntVarP(&fParallel, \"parallel\", \"p\", 10, \"max number of parallel exector\")\n\tflags.StringVar(&fDataDir, \"dataDir\", \".\/data\", \"dir for storage url files\")\n\tflags.StringVar(&fScriptDir, \"scriptDir\", \".\/script\", \"dir for storage scripts\")\n\tflags.StringVar(&fOutputDir, \"outputDir\", \".\/output\", \"dir for storage parse result files\")\n\tflags.IntVarP(&fSplitCount, \"splitCount\", \"c\", 10000, \"max line count for one output file\")\n\tviper.BindPFlag(\"parallel\", flags.Lookup(\"parallel\"))\n\tviper.BindPFlag(\"dataDir\", flags.Lookup(\"dataDir\"))\n\tviper.BindPFlag(\"scriptDir\", flags.Lookup(\"scriptDir\"))\n\tviper.BindPFlag(\"outputDir\", flags.Lookup(\"outputDir\"))\n\tviper.BindPFlag(\"splitCount\", flags.Lookup(\"splitCount\"))\n}\n\nfunc mainFunc() error {\n\tvar infoeg errgroup.Group\n\tvar retryeg errgroup.Group\n\n\tinfoChan := make(chan URLInfo, fParallel)\n\tretryNum := fParallel\/2 + 1\n\tretryChan := make(chan URLInfo, retryNum)\n\tfor i := 0; i < fParallel; i++ {\n\t\tindex := i\n\t\tinfoeg.Go(func() error {\n\t\tRESTART:\n\t\t\tdriver := agouti.PhantomJS()\n\t\t\tif err := driver.Start(); err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"index\": index,\n\t\t\t\t\t\"err\": err,\n\t\t\t\t}).Fatalf(\"Failed to start driver:%v\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlog.WithField(\"index\", index).Debug(\"Success to start worker\")\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase info, ok := <-infoChan:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tlog.WithField(\"index\", index).Debug(\"Worker exit\")\n\t\t\t\t\t\tdriver.Stop()\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\n\t\t\t\t\terr := parseURL(index, info, driver)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\/\/ sometimes phantomjs crashed or just navigate timeout\n\t\t\t\t\t\t\/\/ we can't differentiate cause of errors\n\t\t\t\t\t\t\/\/ so we just restart the worker and push the *info* to retry queue\n\t\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\t\"index\": index,\n\t\t\t\t\t\t\t\"info\": info,\n\t\t\t\t\t\t}).Warn(\"Failed to parse, will retry later\")\n\t\t\t\t\t\tgo func(info URLInfo) {\n\t\t\t\t\t\t\ttime.Sleep(10 * time.Second)\n\t\t\t\t\t\t\tretryChan <- info\n\t\t\t\t\t\t}(info)\n\n\t\t\t\t\t\tdriver.Stop()\n\t\t\t\t\t\tgoto RESTART\n\t\t\t\t\t}\n\n\t\t\t\t\tlog.WithField(\"url\", info.URL).Info(\"Success to parse\")\n\t\t\t\t\tinfo.FInfo.Done.Done()\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n\tfor i := 0; i < retryNum; i++ {\n\t\tindex := i\n\t\tretryeg.Go(func() error {\n\t\tRESTART:\n\t\t\tdriver := agouti.PhantomJS()\n\t\t\tif err := driver.Start(); err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"index\": index,\n\t\t\t\t\t\"err\": err,\n\t\t\t\t}).Fatalf(\"Failed to start driver:%v\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlog.WithField(\"index\", index).Debug(\"Success to start retry worker\")\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase info, ok := <-retryChan:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tlog.WithField(\"index\", index).Debug(\"Retry worker exit\")\n\t\t\t\t\t\tdriver.Stop()\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\terr := parseURL(index, info, driver)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\/\/ failed to retry, no more retry for this url, just mark completed\n\t\t\t\t\t\tlog.WithField(\"url\", info.URL).Info(\"Failed to retry\")\n\t\t\t\t\t\tinfo.FInfo.Done.Done()\n\n\t\t\t\t\t\t\/\/ we need restart retry worker\n\t\t\t\t\t\tdriver.Stop()\n\t\t\t\t\t\tgoto RESTART\n\t\t\t\t\t}\n\t\t\t\t\tlog.WithField(\"url\", info.URL).Info(\"Success to parse\")\n\t\t\t\t\tinfo.FInfo.Done.Done()\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n\n\tlog.WithField(\"dataDir\", fDataDir).Debug(\"Start to traversal data files\")\n\terr := filepath.Walk(fDataDir, walkFile(infoChan))\n\tclose(infoChan)\n\tinfoeg.Wait()\n\tclose(retryChan)\n\tretryeg.Wait()\n\tlog.Debug(\"Finish all tasks\")\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc parseURL(index int, info URLInfo, driver *agouti.WebDriver) error {\n\tpage, err := driver.NewPage(agouti.Browser(\"phantomjs\"))\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"index\": index,\n\t\t\t\"err\": err,\n\t\t}).Warn(\"Failed to create session\")\n\t\treturn err\n\t}\n\tdefer page.Destroy()\n\n\tpage.Session().SetPageLoad(300000)\n\tpage.Session().SetScriptTimeout(30000)\n\tpage.Session().SetImplicitWait(0)\n\n\turl := info.URL\n\tlog.WithFields(log.Fields{\n\t\t\"index\": index,\n\t\t\"url\": url,\n\t}).Debug(\"Get target url\")\n\n\t\/\/ this step may be blocked until 'page load' timeout\n\tif err := page.Navigate(url); err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"index\": index,\n\t\t\t\"url\": url,\n\t\t\t\"err\": err,\n\t\t}).Warn(\"Failed to navigate to target url\")\n\t\treturn err\n\t}\n\tlog.WithFields(log.Fields{\n\t\t\"index\": index,\n\t\t\"url\": url,\n\t}).Debug(\"Success to open url\")\n\n\tres := make(map[string]interface{})\n\tfor i, jsFunc := range info.JsFuncs {\n\t\terr := page.RunScript(jsFunc, res, &res)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"index\": index,\n\t\t\t\t\"url\": url,\n\t\t\t\t\"scriptIndex\": i,\n\t\t\t\t\"err\": err,\n\t\t\t}).Warn(\"Failed to run script\")\n\t\t\treturn err\n\t\t}\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"index\": index,\n\t\t\t\"url\": url,\n\t\t\t\"scriptIndex\": i,\n\t\t\t\"res\": res,\n\t\t}).Debug(\"Get parse result\")\n\n\t\tif val, ok := res[\"stop\"]; ok && val.(bool) {\n\t\t\treturn nil\n\t\t}\n\n\t\tif val, ok := res[\"waitTime\"]; ok {\n\t\t\tdelete(res, \"stop\")\n\t\t\tdelete(res, \"waitTime\")\n\t\t\td := time.Duration(val.(float64)) * time.Millisecond\n\t\t\ttime.Sleep(d)\n\t\t}\n\t}\n\tlog.WithFields(log.Fields{\n\t\t\"index\": index,\n\t\t\"url\": url,\n\t}).Debug(\"Parse finished\")\n\n\tif info.DumpHTML {\n\t\tres[\"html\"], err = page.HTML()\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"index\": index,\n\t\t\t\t\"url\": url,\n\t\t\t}).Warn(\"Failed to get html\")\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) == 0 {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"index\": index,\n\t\t\t\"url\": url,\n\t\t}).Debug(\"Get empty response\")\n\t\treturn nil\n\t}\n\n\tinfo.ResChan <- URLRes{URL: url, Res: res}\n\n\treturn nil\n}\n\nfunc walkFile(infoChan chan<- URLInfo) func(path string, f os.FileInfo, err error) error {\n\tinfo := new(URLInfo)\n\treturn func(path string, f os.FileInfo, err error) error {\n\t\tif f.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"path\": path,\n\t\t\t\"fileName\": f.Name(),\n\t\t}).Debug(\"Get crawler data file\")\n\n\t\t\/\/ find script\n\t\tfilename := f.Name()\n\t\tif !viper.IsSet(filename) {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"filename\": filename,\n\t\t\t}).Warn(\"No conf item for data file\")\n\t\t\treturn nil\n\t\t}\n\t\tconf := viper.GetStringMap(filename)\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"filename\": filename,\n\t\t\t\"conf\": conf,\n\t\t}).Debug(\"Read conf for data file\")\n\t\tif val, ok := conf[\"ignore\"]; ok && val.(bool) {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"conf\": conf,\n\t\t\t}).Warn(\"Data file is ignored by conf\")\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ get 'dump' setting\n\t\tif val, ok := conf[\"dump_html\"]; ok && val.(bool) {\n\t\t\tinfo.DumpHTML = val.(bool)\n\t\t\tlog.WithField(\"dumpHTML\", info.DumpHTML).Debug(\"Read dump_html conf\")\n\t\t}\n\n\t\t\/\/ we can have multi scripts for one page\n\t\tif _, ok := conf[\"script_name\"]; !ok {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"conf\": conf,\n\t\t\t}).Warn(\"Data file's conf has no 'script_name' item\")\n\t\t\treturn nil\n\t\t}\n\t\tvar scriptPath []string\n\t\tswitch scripts := conf[\"script_name\"].(type) {\n\t\tcase string:\n\t\t\tscriptPath = append(scriptPath, filepath.Join(fScriptDir, scripts))\n\t\tcase []interface{}:\n\t\t\tfor _, v := range scripts {\n\t\t\t\tscriptPath = append(scriptPath, filepath.Join(fScriptDir, v.(string)))\n\t\t\t}\n\t\tdefault:\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"script_name\": scripts,\n\t\t\t}).Warn(\"Conf[script_name] is not string or array of string\")\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ load scripts\n\t\tfor _, v := range scriptPath {\n\t\t\tdata, err := ioutil.ReadFile(v)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"script_path\": v,\n\t\t\t\t\t\"err\": err,\n\t\t\t\t}).Warn(\"Failed to read script\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tinfo.JsFuncs = append(info.JsFuncs, string(data))\n\t\t}\n\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\t\/\/ write output file routine\n\t\tresChan := make(chan URLRes, fParallel+fParallel\/2+1)\n\t\t\/\/ TODO: make sure finish to write output file before exit\n\t\tgo func() {\n\t\t\t\/\/ create output file\n\t\t\tvar resFilename string\n\t\t\tif val, ok := conf[\"output_file\"]; ok {\n\t\t\t\tresFilename = val.(string)\n\t\t\t} else {\n\t\t\t\tresFilename = filename\n\t\t\t}\n\t\t\tnoSuffix := strings.TrimSuffix(resFilename, filepath.Ext(resFilename))\n\t\t\tresPath := filepath.Join(fOutputDir, noSuffix+\".txt\")\n\t\t\tresFile, err := os.Create(resPath)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"res_path\": resPath,\n\t\t\t\t\t\"err\": err,\n\t\t\t\t}).Warn(\"Failed to create output file\")\n\t\t\t\tcancel()\n\t\t\t}\n\n\t\t\tline := 0\n\t\t\tindex := 0\n\t\t\tfor res := range resChan {\n\t\t\t\tdata, _ := json.Marshal(res.Res)\n\t\t\t\tresFile.WriteString(fmt.Sprintf(\"%s\\t%s\\n\", res.URL, string(data)))\n\t\t\t\tline++\n\t\t\t\tif line >= fSplitCount {\n\t\t\t\t\tresFile.Close()\n\t\t\t\t\tline = 0\n\t\t\t\t\tindex++\n\t\t\t\t\tresPath = filepath.Join(fOutputDir, noSuffix+\"_\"+strconv.Itoa(index)+\".txt\")\n\t\t\t\t\tresFile, err = os.Create(resPath)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\t\"res_path\": resPath,\n\t\t\t\t\t\t\t\"err\": err,\n\t\t\t\t\t\t}).Warn(\"Failed to create output file\")\n\t\t\t\t\t\tcancel()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tresFile.Close()\n\t\t}()\n\n\t\t\/\/ read url from data file\n\t\tinFile, err := os.Open(path)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"path\": path,\n\t\t\t\t\"err\": err,\n\t\t\t}).Fatal(\"Failed to open data file\")\n\t\t\treturn nil\n\t\t}\n\t\tdefer inFile.Close()\n\n\t\tfi := FileInfo{\n\t\t\tFilename: filename,\n\t\t\tStart: time.Now(),\n\t\t\tDone: new(sync.WaitGroup),\n\t\t}\n\t\tinfo.FInfo = &fi\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"filename\": filename,\n\t\t\t\"start\": info.FInfo.Start,\n\t\t}).Info(\"Start to crawler urls in one file\")\n\n\t\tsc := bufio.NewScanner(inFile)\n\t\tfor sc.Scan() {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"filename\": filename,\n\t\t\t\t\t\"line\": atomic.LoadUint64(&info.FInfo.Line),\n\t\t\t\t\t\"elapsed\": time.Since(info.FInfo.Start),\n\t\t\t\t}).Info(\"Partial finished to crawler urls in one file\")\n\n\t\t\t\treturn ctx.Err()\n\t\t\tdefault:\n\t\t\t\tinfo.FInfo.Done.Add(1)\n\t\t\t\tinfo.URL = sc.Text()\n\t\t\t\tinfo.ResChan = resChan\n\t\t\t\tatomic.AddUint64(&info.FInfo.Line, 1)\n\t\t\t\t\/\/ log.WithField(\"info\", info).Debug(\"Create one info\")\n\t\t\t\tinfoChan <- *info\n\t\t\t}\n\t\t}\n\n\t\tgo func() {\n\t\t\tinfo.FInfo.Done.Wait()\n\t\t\tclose(resChan)\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"filename\": filename,\n\t\t\t\t\"line\": atomic.LoadUint64(&info.FInfo.Line),\n\t\t\t\t\"elapsed\": time.Since(info.FInfo.Start),\n\t\t\t}).Info(\"Finished to crawler urls in one file\")\n\t\t}()\n\n\t\treturn nil\n\t}\n}\n<commit_msg>fix crawl subcommand<commit_after>package app\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/sclevine\/agouti\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/viper\"\n\t\"golang.org\/x\/sync\/errgroup\"\n)\n\ntype FileInfo struct {\n\tFilename string\n\tLine uint64\n\tStart time.Time\n\tDone *sync.WaitGroup \/\/ just include parse, exclude write file\n}\n\ntype URLRes struct {\n\tURL string\n\tRes map[string]interface{}\n}\n\ntype URLInfo struct {\n\tURL string\n\tJsFuncs []string\n\tDumpHTML bool\n\tResChan chan URLRes\n\tFInfo *FileInfo\n}\n\nvar (\n\tfParallel int\n\tfDataDir string\n\tfScriptDir string\n\tfOutputDir string\n\tfSplitCount int\n)\n\nvar CrawlCmd = &cobra.Command{\n\tUse: \"crawl\",\n\tShort: \"Crawl parse webpage based on javascripts, then save parse results.\",\n\tPreRunE: func(cmd *cobra.Command, args []string) error {\n\t\tif _, err := os.Stat(fDataDir); os.IsNotExist(err) {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"dataDir\": fDataDir,\n\t\t\t}).Fatal(\"No data dir\")\n\t\t\treturn err\n\t\t}\n\t\tif _, err := os.Stat(fScriptDir); os.IsNotExist(err) {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"scriptDir\": fScriptDir,\n\t\t\t}).Fatal(\"No script dir\")\n\t\t\treturn err\n\t\t}\n\t\tif _, err := os.Stat(fOutputDir); os.IsNotExist(err) {\n\t\t\tos.Mkdir(fOutputDir, os.ModePerm)\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"outputDir\": fOutputDir,\n\t\t\t}).Debug(\"Create output dir\")\n\t\t}\n\n\t\treturn mainFunc()\n\t},\n}\n\nfunc init() {\n\tflags := CrawlCmd.Flags()\n\tflags.IntVarP(&fParallel, \"parallel\", \"p\", 10, \"max number of parallel exector\")\n\tflags.StringVar(&fDataDir, \"dataDir\", \".\/data\", \"dir for storage url files\")\n\tflags.StringVar(&fScriptDir, \"scriptDir\", \".\/script\", \"dir for storage scripts\")\n\tflags.StringVar(&fOutputDir, \"outputDir\", \".\/output\", \"dir for storage parse result files\")\n\tflags.IntVarP(&fSplitCount, \"splitCount\", \"c\", 10000, \"max line count for one output file\")\n\tviper.BindPFlag(\"parallel\", flags.Lookup(\"parallel\"))\n\tviper.BindPFlag(\"dataDir\", flags.Lookup(\"dataDir\"))\n\tviper.BindPFlag(\"scriptDir\", flags.Lookup(\"scriptDir\"))\n\tviper.BindPFlag(\"outputDir\", flags.Lookup(\"outputDir\"))\n\tviper.BindPFlag(\"splitCount\", flags.Lookup(\"splitCount\"))\n}\n\nfunc mainFunc() error {\n\tvar infoeg errgroup.Group\n\tvar retryeg errgroup.Group\n\n\tinfoChan := make(chan URLInfo, fParallel)\n\tretryNum := fParallel\/2 + 1\n\tretryChan := make(chan URLInfo, retryNum)\n\tfor i := 0; i < fParallel; i++ {\n\t\tindex := i\n\t\tinfoeg.Go(func() error {\n\t\tRESTART:\n\t\t\tdriver := agouti.PhantomJS()\n\t\t\tif err := driver.Start(); err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"index\": index,\n\t\t\t\t\t\"err\": err,\n\t\t\t\t}).Fatalf(\"Failed to start driver:%v\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlog.WithField(\"index\", index).Debug(\"Success to start worker\")\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase info, ok := <-infoChan:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tlog.WithField(\"index\", index).Debug(\"Worker exit\")\n\t\t\t\t\t\tdriver.Stop()\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\n\t\t\t\t\terr := parseURL(index, info, driver)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\/\/ sometimes phantomjs crashed or just navigate timeout\n\t\t\t\t\t\t\/\/ we can't differentiate cause of errors\n\t\t\t\t\t\t\/\/ so we just restart the worker and push the *info* to retry queue\n\t\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\t\"index\": index,\n\t\t\t\t\t\t\t\"info\": info,\n\t\t\t\t\t\t}).Warn(\"Failed to parse, will retry later\")\n\t\t\t\t\t\tgo func(info URLInfo) {\n\t\t\t\t\t\t\ttime.Sleep(10 * time.Second)\n\t\t\t\t\t\t\tretryChan <- info\n\t\t\t\t\t\t}(info)\n\n\t\t\t\t\t\tdriver.Stop()\n\t\t\t\t\t\tgoto RESTART\n\t\t\t\t\t}\n\n\t\t\t\t\tlog.WithField(\"url\", info.URL).Info(\"Success to parse\")\n\t\t\t\t\tinfo.FInfo.Done.Done()\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n\tfor i := 0; i < retryNum; i++ {\n\t\tindex := i\n\t\tretryeg.Go(func() error {\n\t\tRESTART:\n\t\t\tdriver := agouti.PhantomJS()\n\t\t\tif err := driver.Start(); err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"index\": index,\n\t\t\t\t\t\"err\": err,\n\t\t\t\t}).Fatalf(\"Failed to start driver:%v\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tlog.WithField(\"index\", index).Debug(\"Success to start retry worker\")\n\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase info, ok := <-retryChan:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tlog.WithField(\"index\", index).Debug(\"Retry worker exit\")\n\t\t\t\t\t\tdriver.Stop()\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t\terr := parseURL(index, info, driver)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\/\/ failed to retry, no more retry for this url, just mark completed\n\t\t\t\t\t\tlog.WithField(\"url\", info.URL).Info(\"Failed to retry\")\n\t\t\t\t\t\tinfo.FInfo.Done.Done()\n\n\t\t\t\t\t\t\/\/ we need restart retry worker\n\t\t\t\t\t\tdriver.Stop()\n\t\t\t\t\t\tgoto RESTART\n\t\t\t\t\t}\n\t\t\t\t\tlog.WithField(\"url\", info.URL).Info(\"Success to parse\")\n\t\t\t\t\tinfo.FInfo.Done.Done()\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n\n\tlog.WithField(\"dataDir\", fDataDir).Debug(\"Start to traversal data files\")\n\terr := filepath.Walk(fDataDir, walkFile(infoChan))\n\tclose(infoChan)\n\tinfoeg.Wait()\n\tclose(retryChan)\n\tretryeg.Wait()\n\tlog.Debug(\"Finish all tasks\")\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc parseURL(index int, info URLInfo, driver *agouti.WebDriver) error {\n\tpage, err := driver.NewPage(agouti.Browser(\"phantomjs\"))\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"index\": index,\n\t\t\t\"err\": err,\n\t\t}).Warn(\"Failed to create session\")\n\t\treturn err\n\t}\n\tdefer page.Destroy()\n\n\tpage.Session().SetPageLoad(300000)\n\tpage.Session().SetScriptTimeout(30000)\n\tpage.Session().SetImplicitWait(0)\n\n\turl := info.URL\n\tlog.WithFields(log.Fields{\n\t\t\"index\": index,\n\t\t\"url\": url,\n\t}).Debug(\"Get target url\")\n\n\t\/\/ this step may be blocked until 'page load' timeout\n\tif err := page.Navigate(url); err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"index\": index,\n\t\t\t\"url\": url,\n\t\t\t\"err\": err,\n\t\t}).Warn(\"Failed to navigate to target url\")\n\t\treturn err\n\t}\n\tlog.WithFields(log.Fields{\n\t\t\"index\": index,\n\t\t\"url\": url,\n\t}).Debug(\"Success to open url\")\n\n\tres := make(map[string]interface{})\n\tfor i, jsFunc := range info.JsFuncs {\n\t\terr := page.RunScript(jsFunc, res, &res)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"index\": index,\n\t\t\t\t\"url\": url,\n\t\t\t\t\"scriptIndex\": i,\n\t\t\t\t\"err\": err,\n\t\t\t}).Warn(\"Failed to run script\")\n\t\t\treturn err\n\t\t}\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"index\": index,\n\t\t\t\"url\": url,\n\t\t\t\"scriptIndex\": i,\n\t\t\t\"res\": res,\n\t\t}).Debug(\"Get parse result\")\n\n\t\tif val, ok := res[\"stop\"]; ok && val.(bool) {\n\t\t\treturn nil\n\t\t}\n\n\t\tif val, ok := res[\"waitTime\"]; ok {\n\t\t\tdelete(res, \"stop\")\n\t\t\tdelete(res, \"waitTime\")\n\t\t\td := time.Duration(val.(float64)) * time.Millisecond\n\t\t\ttime.Sleep(d)\n\t\t}\n\t}\n\tlog.WithFields(log.Fields{\n\t\t\"index\": index,\n\t\t\"url\": url,\n\t}).Debug(\"Parse finished\")\n\n\tif info.DumpHTML {\n\t\tres[\"html\"], err = page.HTML()\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"index\": index,\n\t\t\t\t\"url\": url,\n\t\t\t}).Warn(\"Failed to get html\")\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) == 0 {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"index\": index,\n\t\t\t\"url\": url,\n\t\t}).Debug(\"Get empty response\")\n\t\treturn nil\n\t}\n\n\tinfo.ResChan <- URLRes{URL: url, Res: res}\n\n\treturn nil\n}\n\nfunc walkFile(infoChan chan<- URLInfo) func(path string, f os.FileInfo, err error) error {\n\tinfo := new(URLInfo)\n\treturn func(path string, f os.FileInfo, err error) error {\n\t\tif f.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"path\": path,\n\t\t\t\"fileName\": f.Name(),\n\t\t}).Debug(\"Get crawler data file\")\n\n\t\t\/\/ find script\n\t\tfilename := f.Name()\n\t\tif !viper.IsSet(filename) {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"filename\": filename,\n\t\t\t}).Warn(\"No conf item for data file\")\n\t\t\treturn nil\n\t\t}\n\t\tconf := viper.GetStringMap(filename)\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"filename\": filename,\n\t\t\t\"conf\": conf,\n\t\t}).Debug(\"Read conf for data file\")\n\t\tif val, ok := conf[\"ignore\"]; ok && val.(bool) {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"conf\": conf,\n\t\t\t}).Warn(\"Data file is ignored by conf\")\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ get 'dump' setting\n\t\tif val, ok := conf[\"dump_html\"]; ok && val.(bool) {\n\t\t\tinfo.DumpHTML = val.(bool)\n\t\t\tlog.WithField(\"dumpHTML\", info.DumpHTML).Debug(\"Read dump_html conf\")\n\t\t}\n\n\t\t\/\/ we can have multi scripts for one page\n\t\tif _, ok := conf[\"script_name\"]; !ok {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"conf\": conf,\n\t\t\t}).Warn(\"Data file's conf has no 'script_name' item\")\n\t\t\treturn nil\n\t\t}\n\t\tvar scriptPath []string\n\t\tswitch scripts := conf[\"script_name\"].(type) {\n\t\tcase string:\n\t\t\tscriptPath = append(scriptPath, filepath.Join(fScriptDir, scripts))\n\t\tcase []interface{}:\n\t\t\tfor _, v := range scripts {\n\t\t\t\tscriptPath = append(scriptPath, filepath.Join(fScriptDir, v.(string)))\n\t\t\t}\n\t\tdefault:\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"script_name\": scripts,\n\t\t\t}).Warn(\"Conf[script_name] is not string or array of string\")\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ load scripts\n\t\tfor _, v := range scriptPath {\n\t\t\tdata, err := ioutil.ReadFile(v)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"script_path\": v,\n\t\t\t\t\t\"err\": err,\n\t\t\t\t}).Warn(\"Failed to read script\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tinfo.JsFuncs = append(info.JsFuncs, string(data))\n\t\t}\n\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\t\/\/ write output file routine\n\t\tresChan := make(chan URLRes, fParallel+fParallel\/2+1)\n\t\t\/\/ TODO: make sure finish to write output file before exit\n\t\tgo func() {\n\t\t\t\/\/ create output file\n\t\t\tvar resFilename string\n\t\t\tif val, ok := conf[\"output_file\"]; ok {\n\t\t\t\tresFilename = val.(string)\n\t\t\t} else {\n\t\t\t\tresFilename = filename\n\t\t\t}\n\t\t\tnoSuffix := strings.TrimSuffix(resFilename, filepath.Ext(resFilename))\n\t\t\tresPath := filepath.Join(fOutputDir, noSuffix+\".txt\")\n\t\t\tresFile, err := os.Create(resPath)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"res_path\": resPath,\n\t\t\t\t\t\"err\": err,\n\t\t\t\t}).Warn(\"Failed to create output file\")\n\t\t\t\tcancel()\n\t\t\t}\n\n\t\t\tline := 0\n\t\t\tindex := 0\n\t\t\tfor res := range resChan {\n\t\t\t\tdata, _ := json.Marshal(res.Res)\n\t\t\t\tresFile.WriteString(fmt.Sprintf(\"%s\\t%s\\n\", res.URL, string(data)))\n\t\t\t\tline++\n\t\t\t\tif line >= fSplitCount {\n\t\t\t\t\tresFile.Close()\n\t\t\t\t\tline = 0\n\t\t\t\t\tindex++\n\t\t\t\t\tresPath = filepath.Join(fOutputDir, noSuffix+\"_\"+strconv.Itoa(index)+\".txt\")\n\t\t\t\t\tresFile, err = os.Create(resPath)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\t\t\"res_path\": resPath,\n\t\t\t\t\t\t\t\"err\": err,\n\t\t\t\t\t\t}).Warn(\"Failed to create output file\")\n\t\t\t\t\t\tcancel()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tresFile.Close()\n\t\t}()\n\n\t\t\/\/ read url from data file\n\t\tinFile, err := os.Open(path)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"path\": path,\n\t\t\t\t\"err\": err,\n\t\t\t}).Fatal(\"Failed to open data file\")\n\t\t\treturn nil\n\t\t}\n\t\tdefer inFile.Close()\n\n\t\tfi := FileInfo{\n\t\t\tFilename: filename,\n\t\t\tStart: time.Now(),\n\t\t\tDone: new(sync.WaitGroup),\n\t\t}\n\t\tinfo.FInfo = &fi\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"filename\": filename,\n\t\t\t\"start\": info.FInfo.Start,\n\t\t}).Info(\"Start to crawler urls in one file\")\n\n\t\tsc := bufio.NewScanner(inFile)\n\t\tfor sc.Scan() {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\t\"filename\": filename,\n\t\t\t\t\t\"line\": atomic.LoadUint64(&info.FInfo.Line),\n\t\t\t\t\t\"elapsed\": time.Since(info.FInfo.Start),\n\t\t\t\t}).Info(\"Partial finished to crawler urls in one file\")\n\n\t\t\t\treturn ctx.Err()\n\t\t\tdefault:\n\t\t\t\tinfo.FInfo.Done.Add(1)\n\t\t\t\tinfo.URL = sc.Text()\n\t\t\t\tinfo.ResChan = resChan\n\t\t\t\tatomic.AddUint64(&info.FInfo.Line, 1)\n\t\t\t\t\/\/ log.WithField(\"info\", info).Debug(\"Create one info\")\n\t\t\t\tinfoChan <- *info\n\t\t\t}\n\t\t}\n\n\t\tgo func() {\n\t\t\tinfo.FInfo.Done.Wait()\n\t\t\tclose(resChan)\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"filename\": filename,\n\t\t\t\t\"line\": atomic.LoadUint64(&info.FInfo.Line),\n\t\t\t\t\"elapsed\": time.Since(info.FInfo.Start),\n\t\t\t}).Info(\"Finished to crawler urls in one file\")\n\t\t}()\n\n\t\treturn nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage md\n\nimport (\n\t\"bytes\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/html\"\n\t\"golang.org\/x\/net\/html\/atom\"\n)\n\nvar (\n\t\/\/ headerLevel maps HTML tags to their level in parser.HeaderNode.\n\t\/\/ we -1 as H2 is a new step\n\theaderLevel = map[atom.Atom]int{\n\t\tatom.H3: 2,\n\t\tatom.H4: 3,\n\t\tatom.H5: 4,\n\t\tatom.H6: 5,\n\t}\n)\n\n\/\/ isHeader returns true if hn is one of secondary headers.\n\/\/ Step header is not one of them.\nfunc isHeader(hn *html.Node) bool {\n\t_, ok := headerLevel[hn.DataAtom]\n\treturn ok\n}\n\nfunc isMeta(hn *html.Node) bool {\n\telem := strings.ToLower(hn.Data)\n\treturn strings.HasPrefix(elem, metaDuration+metaSep) || strings.HasPrefix(elem, metaEnvironment+metaSep)\n}\n\nfunc isBold(hn *html.Node) bool {\n\tif hn.Type == html.TextNode {\n\t\thn = hn.Parent\n\t} else if hn.DataAtom == atom.Code {\n\t\t\/\/ Look up as many as 2 levels, to handle the case of e.g. <bold><em><code>\n\t\tfor i:= 0; i < 2; i++ {\n\t\t\thn = hn.Parent\n\t\t\tif hn.DataAtom == atom.Strong || hn.DataAtom == atom.B {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\treturn hn.DataAtom == atom.Strong ||\n\t\thn.DataAtom == atom.B\n}\n\nfunc isItalic(hn *html.Node) bool {\n\tif hn.Type == html.TextNode {\n\t\thn = hn.Parent\n\t} else if hn.DataAtom == atom.Code {\n\t\t\/\/ Look up as many as 2 levels, to handle the case of e.g. <em><bold><code>\n\t\tfor i:= 0; i < 2; i++ {\n\t\t\thn = hn.Parent\n\t\t\tif hn.DataAtom == atom.Em || hn.DataAtom == atom.I {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\treturn hn.DataAtom == atom.Em ||\n\t\thn.DataAtom == atom.I\n}\n\n\/\/ This is different to calling isBold and isItalic seperately as we must look\n\/\/ up an extra level in the tree\nfunc isBoldAndItalic(hn *html.Node) bool {\n\tif hn.Parent == nil || hn.Parent.Parent == nil {\n\t\treturn false\n\t}\n\tif hn.Type == html.TextNode {\n\t\thn = hn.Parent\n\t}\n\treturn (isItalic(hn) && isBold(hn.Parent)) || (isItalic(hn.Parent) && isBold(hn))\n\n}\n\nfunc isConsole(hn *html.Node) bool {\n if hn.Type == html.TextNode {\n hn = hn.Parent\n }\n if (hn.DataAtom == atom.Code) {\n for _, a := range hn.Attr {\n if (a.Key == \"class\" && a.Val == \"language-console\") {\n return true;\n }\n }\n }\n return false;\n}\n\nfunc isCode(hn *html.Node) bool {\n\tif hn.Type == html.TextNode {\n\t\thn = hn.Parent\n\t}\n\treturn hn.DataAtom == atom.Code && !isConsole(hn)\n}\n\nfunc isButton(hn *html.Node) bool {\n\treturn hn.DataAtom == atom.Button\n}\n\nfunc isAside(hn *html.Node) bool {\n\treturn hn.DataAtom == atom.Aside\n}\n\nfunc isNewAside(hn *html.Node) bool {\n\tif hn.FirstChild == nil ||\n\t hn.FirstChild.NextSibling == nil ||\n\t hn.FirstChild.NextSibling.FirstChild == nil {\n\t\treturn false\n\t}\n\n\tbq := hn.DataAtom == atom.Blockquote\n\tapn := strings.HasPrefix(strings.ToLower(hn.FirstChild.NextSibling.FirstChild.Data), \"aside positive\") ||\n\t strings.HasPrefix(strings.ToLower(hn.FirstChild.NextSibling.FirstChild.Data), \"aside negative\")\n\treturn bq && apn\n}\n\nfunc isInfobox(hn *html.Node) bool {\n\tif hn.DataAtom != atom.Dt {\n\t\treturn false\n\t}\n\treturn strings.ToLower(hn.FirstChild.Data) == \"positive\" || isInfoboxNegative(hn)\n}\n\nfunc isInfoboxNegative(hn *html.Node) bool {\n\tif hn.DataAtom != atom.Dt {\n\t\treturn false\n\t}\n\treturn strings.ToLower(hn.FirstChild.Data) == \"negative\"\n}\n\nfunc isSurvey(hn *html.Node) bool {\n\tif hn.DataAtom != atom.Form {\n\t\treturn false\n\t}\n\tif findAtom(hn, atom.Name) == nil {\n\t\treturn false\n\t}\n\tif len(findChildAtoms(hn, atom.Input)) == 0 {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc isTable(hn *html.Node) bool {\n\tif hn.DataAtom != atom.Table {\n\t\treturn false\n\t}\n\treturn countTwo(hn, atom.Tr) >= 1 || countTwo(hn, atom.Td) >= 1\n}\n\nfunc isList(hn *html.Node) bool {\n\treturn hn.DataAtom == atom.Ul || hn.DataAtom == atom.Ol\n}\n\nfunc isYoutube(hn *html.Node) bool {\n\treturn hn.DataAtom == atom.Video\n}\n\nfunc isFragmentImport(hn *html.Node) bool {\n\treturn hn.DataAtom == 0 && strings.HasPrefix(hn.Data, convertedImportsDataPrefix)\n}\n\n\/\/ countTwo starts counting the number of a Atom children in hn.\n\/\/ It returns as soon as the count exceeds 1, so the returned value is inexact.\n\/\/\n\/\/ The callers can test for > 1 to verify whether a node contains two\n\/\/ or more children of the Atom a.\nfunc countTwo(hn *html.Node, a atom.Atom) int {\n\tvar count int\n\tfor c := hn.FirstChild; c != nil; c = c.NextSibling {\n\t\tif c.DataAtom == a {\n\t\t\tcount++\n\t\t} else {\n\t\t\tcount += countTwo(c, a)\n\t\t}\n\t\tif count > 1 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn count\n}\n\n\/\/ countDirect returns the number of immediate children of hn.\nfunc countDirect(hn *html.Node) int {\n\tvar count int\n\tfor c := hn.FirstChild; c != nil; c = c.NextSibling {\n\t\tcount++\n\t}\n\treturn count\n}\n\n\/\/ findAtom returns first child of root which matches a, nil otherwise.\n\/\/ It returns root if it is the same Atom as a.\nfunc findAtom(root *html.Node, a atom.Atom) *html.Node {\n\tif root.DataAtom == a {\n\t\treturn root\n\t}\n\tfor c := root.FirstChild; c != nil; c = c.NextSibling {\n\t\tif v := findAtom(c, a); v != nil {\n\t\t\treturn v\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc findChildAtoms(root *html.Node, a atom.Atom) []*html.Node {\n\tvar nodes []*html.Node\n\tfor hn := root.FirstChild; hn != nil; hn = hn.NextSibling {\n\t\tif hn.DataAtom == a {\n\t\t\tnodes = append(nodes, hn)\n\t\t}\n\t\tnodes = append(nodes, findChildAtoms(hn, a)...)\n\t}\n\treturn nodes\n}\n\n\/\/ findParent is like findAtom but search is in the opposite direction.\n\/\/ It is faster to look for parent than child lookup in findAtom.\nfunc findParent(root *html.Node, a atom.Atom) *html.Node {\n\tif root.DataAtom == a {\n\t\treturn root\n\t}\n\tfor c := root.Parent; c != nil; c = c.Parent {\n\t\tif c.DataAtom == a {\n\t\t\treturn c\n\t\t}\n\t}\n\treturn nil\n}\n\nvar blockParents = map[atom.Atom]struct{}{\n\tatom.H1: {},\n\tatom.H2: {},\n\tatom.H3: {},\n\tatom.H4: {},\n\tatom.H5: {},\n\tatom.H6: {},\n\tatom.Li: {},\n\tatom.P: {},\n\tatom.Div: {},\n}\n\n\/\/ findBlockParent looks up nearest block parent node of hn.\n\/\/ For instance, block parent of \"text\" in <ul><li>text<\/li><\/ul> is <li>,\n\/\/ while block parent of \"text\" in <p><span>text<\/span><\/p> is <p>.\nfunc findBlockParent(hn *html.Node) *html.Node {\n\tfor p := hn.Parent; p != nil; p = p.Parent {\n\t\tif _, ok := blockParents[p.DataAtom]; ok {\n\t\t\treturn p\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ nodeAttr returns node attribute value of the key name.\n\/\/ Attribute keys are case insensitive.\nfunc nodeAttr(n *html.Node, name string) string {\n\tname = strings.ToLower(name)\n\tfor _, a := range n.Attr {\n\t\tif strings.ToLower(a.Key) == name {\n\t\t\treturn a.Val\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ stringifyNode extracts and concatenates all text nodes starting with root.\n\/\/ Line breaks are inserted at <br> and any non-<span> elements.\nfunc stringifyNode(root *html.Node, trim bool) string {\n\tif root.Type == html.TextNode {\n\t\ts := textCleaner.Replace(root.Data)\n\t\ts = strings.Replace(s, \"\\n\", \" \", -1)\n\t\tif !trim {\n\t\t\treturn s\n\t\t}\n\t\treturn strings.TrimSpace(s)\n\t}\n\tif root.DataAtom == atom.Br && !trim {\n\t\treturn \"\\n\"\n\t}\n\tvar buf bytes.Buffer\n\tfor c := root.FirstChild; c != nil; c = c.NextSibling {\n\t\tif c.DataAtom == atom.Br {\n\t\t\tbuf.WriteRune('\\n')\n\t\t\tcontinue\n\t\t}\n\t\tif c.Type == html.TextNode {\n\t\t\tbuf.WriteString(c.Data)\n\t\t\tcontinue\n\t\t}\n\t\tif c.DataAtom != atom.Span && c.DataAtom != atom.A {\n\t\t\tbuf.WriteRune('\\n')\n\t\t}\n\t\tbuf.WriteString(stringifyNode(c, false))\n\t}\n\ts := textCleaner.Replace(buf.String())\n\tif !trim {\n\t\treturn s\n\t}\n\treturn strings.TrimSpace(s)\n}\n<commit_msg>Fix typo<commit_after>\/\/ Copyright 2016 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage md\n\nimport (\n\t\"bytes\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/html\"\n\t\"golang.org\/x\/net\/html\/atom\"\n)\n\nvar (\n\t\/\/ headerLevel maps HTML tags to their level in parser.HeaderNode.\n\t\/\/ we -1 as H2 is a new step\n\theaderLevel = map[atom.Atom]int{\n\t\tatom.H3: 2,\n\t\tatom.H4: 3,\n\t\tatom.H5: 4,\n\t\tatom.H6: 5,\n\t}\n)\n\n\/\/ isHeader returns true if hn is one of secondary headers.\n\/\/ Step header is not one of them.\nfunc isHeader(hn *html.Node) bool {\n\t_, ok := headerLevel[hn.DataAtom]\n\treturn ok\n}\n\nfunc isMeta(hn *html.Node) bool {\n\telem := strings.ToLower(hn.Data)\n\treturn strings.HasPrefix(elem, metaDuration+metaSep) || strings.HasPrefix(elem, metaEnvironment+metaSep)\n}\n\nfunc isBold(hn *html.Node) bool {\n\tif hn.Type == html.TextNode {\n\t\thn = hn.Parent\n\t} else if hn.DataAtom == atom.Code {\n\t\t\/\/ Look up as many as 2 levels, to handle the case of e.g. <bold><em><code>\n\t\tfor i := 0; i < 2; i++ {\n\t\t\thn = hn.Parent\n\t\t\tif hn.DataAtom == atom.Strong || hn.DataAtom == atom.B {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\treturn hn.DataAtom == atom.Strong ||\n\t\thn.DataAtom == atom.B\n}\n\nfunc isItalic(hn *html.Node) bool {\n\tif hn.Type == html.TextNode {\n\t\thn = hn.Parent\n\t} else if hn.DataAtom == atom.Code {\n\t\t\/\/ Look up as many as 2 levels, to handle the case of e.g. <em><bold><code>\n\t\tfor i := 0; i < 2; i++ {\n\t\t\thn = hn.Parent\n\t\t\tif hn.DataAtom == atom.Em || hn.DataAtom == atom.I {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\treturn hn.DataAtom == atom.Em ||\n\t\thn.DataAtom == atom.I\n}\n\n\/\/ This is different to calling isBold and isItalic separately as we must look\n\/\/ up an extra level in the tree\nfunc isBoldAndItalic(hn *html.Node) bool {\n\tif hn.Parent == nil || hn.Parent.Parent == nil {\n\t\treturn false\n\t}\n\tif hn.Type == html.TextNode {\n\t\thn = hn.Parent\n\t}\n\treturn (isItalic(hn) && isBold(hn.Parent)) || (isItalic(hn.Parent) && isBold(hn))\n\n}\n\nfunc isConsole(hn *html.Node) bool {\n\tif hn.Type == html.TextNode {\n\t\thn = hn.Parent\n\t}\n\tif hn.DataAtom == atom.Code {\n\t\tfor _, a := range hn.Attr {\n\t\t\tif a.Key == \"class\" && a.Val == \"language-console\" {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\nfunc isCode(hn *html.Node) bool {\n\tif hn.Type == html.TextNode {\n\t\thn = hn.Parent\n\t}\n\treturn hn.DataAtom == atom.Code && !isConsole(hn)\n}\n\nfunc isButton(hn *html.Node) bool {\n\treturn hn.DataAtom == atom.Button\n}\n\nfunc isAside(hn *html.Node) bool {\n\treturn hn.DataAtom == atom.Aside\n}\n\nfunc isNewAside(hn *html.Node) bool {\n\tif hn.FirstChild == nil ||\n\t\thn.FirstChild.NextSibling == nil ||\n\t\thn.FirstChild.NextSibling.FirstChild == nil {\n\t\treturn false\n\t}\n\n\tbq := hn.DataAtom == atom.Blockquote\n\tapn := strings.HasPrefix(strings.ToLower(hn.FirstChild.NextSibling.FirstChild.Data), \"aside positive\") ||\n\t\tstrings.HasPrefix(strings.ToLower(hn.FirstChild.NextSibling.FirstChild.Data), \"aside negative\")\n\treturn bq && apn\n}\n\nfunc isInfobox(hn *html.Node) bool {\n\tif hn.DataAtom != atom.Dt {\n\t\treturn false\n\t}\n\treturn strings.ToLower(hn.FirstChild.Data) == \"positive\" || isInfoboxNegative(hn)\n}\n\nfunc isInfoboxNegative(hn *html.Node) bool {\n\tif hn.DataAtom != atom.Dt {\n\t\treturn false\n\t}\n\treturn strings.ToLower(hn.FirstChild.Data) == \"negative\"\n}\n\nfunc isSurvey(hn *html.Node) bool {\n\tif hn.DataAtom != atom.Form {\n\t\treturn false\n\t}\n\tif findAtom(hn, atom.Name) == nil {\n\t\treturn false\n\t}\n\tif len(findChildAtoms(hn, atom.Input)) == 0 {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc isTable(hn *html.Node) bool {\n\tif hn.DataAtom != atom.Table {\n\t\treturn false\n\t}\n\treturn countTwo(hn, atom.Tr) >= 1 || countTwo(hn, atom.Td) >= 1\n}\n\nfunc isList(hn *html.Node) bool {\n\treturn hn.DataAtom == atom.Ul || hn.DataAtom == atom.Ol\n}\n\nfunc isYoutube(hn *html.Node) bool {\n\treturn hn.DataAtom == atom.Video\n}\n\nfunc isFragmentImport(hn *html.Node) bool {\n\treturn hn.DataAtom == 0 && strings.HasPrefix(hn.Data, convertedImportsDataPrefix)\n}\n\n\/\/ countTwo starts counting the number of a Atom children in hn.\n\/\/ It returns as soon as the count exceeds 1, so the returned value is inexact.\n\/\/\n\/\/ The callers can test for > 1 to verify whether a node contains two\n\/\/ or more children of the Atom a.\nfunc countTwo(hn *html.Node, a atom.Atom) int {\n\tvar count int\n\tfor c := hn.FirstChild; c != nil; c = c.NextSibling {\n\t\tif c.DataAtom == a {\n\t\t\tcount++\n\t\t} else {\n\t\t\tcount += countTwo(c, a)\n\t\t}\n\t\tif count > 1 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn count\n}\n\n\/\/ countDirect returns the number of immediate children of hn.\nfunc countDirect(hn *html.Node) int {\n\tvar count int\n\tfor c := hn.FirstChild; c != nil; c = c.NextSibling {\n\t\tcount++\n\t}\n\treturn count\n}\n\n\/\/ findAtom returns first child of root which matches a, nil otherwise.\n\/\/ It returns root if it is the same Atom as a.\nfunc findAtom(root *html.Node, a atom.Atom) *html.Node {\n\tif root.DataAtom == a {\n\t\treturn root\n\t}\n\tfor c := root.FirstChild; c != nil; c = c.NextSibling {\n\t\tif v := findAtom(c, a); v != nil {\n\t\t\treturn v\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc findChildAtoms(root *html.Node, a atom.Atom) []*html.Node {\n\tvar nodes []*html.Node\n\tfor hn := root.FirstChild; hn != nil; hn = hn.NextSibling {\n\t\tif hn.DataAtom == a {\n\t\t\tnodes = append(nodes, hn)\n\t\t}\n\t\tnodes = append(nodes, findChildAtoms(hn, a)...)\n\t}\n\treturn nodes\n}\n\n\/\/ findParent is like findAtom but search is in the opposite direction.\n\/\/ It is faster to look for parent than child lookup in findAtom.\nfunc findParent(root *html.Node, a atom.Atom) *html.Node {\n\tif root.DataAtom == a {\n\t\treturn root\n\t}\n\tfor c := root.Parent; c != nil; c = c.Parent {\n\t\tif c.DataAtom == a {\n\t\t\treturn c\n\t\t}\n\t}\n\treturn nil\n}\n\nvar blockParents = map[atom.Atom]struct{}{\n\tatom.H1: {},\n\tatom.H2: {},\n\tatom.H3: {},\n\tatom.H4: {},\n\tatom.H5: {},\n\tatom.H6: {},\n\tatom.Li: {},\n\tatom.P: {},\n\tatom.Div: {},\n}\n\n\/\/ findBlockParent looks up nearest block parent node of hn.\n\/\/ For instance, block parent of \"text\" in <ul><li>text<\/li><\/ul> is <li>,\n\/\/ while block parent of \"text\" in <p><span>text<\/span><\/p> is <p>.\nfunc findBlockParent(hn *html.Node) *html.Node {\n\tfor p := hn.Parent; p != nil; p = p.Parent {\n\t\tif _, ok := blockParents[p.DataAtom]; ok {\n\t\t\treturn p\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ nodeAttr returns node attribute value of the key name.\n\/\/ Attribute keys are case insensitive.\nfunc nodeAttr(n *html.Node, name string) string {\n\tname = strings.ToLower(name)\n\tfor _, a := range n.Attr {\n\t\tif strings.ToLower(a.Key) == name {\n\t\t\treturn a.Val\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ stringifyNode extracts and concatenates all text nodes starting with root.\n\/\/ Line breaks are inserted at <br> and any non-<span> elements.\nfunc stringifyNode(root *html.Node, trim bool) string {\n\tif root.Type == html.TextNode {\n\t\ts := textCleaner.Replace(root.Data)\n\t\ts = strings.Replace(s, \"\\n\", \" \", -1)\n\t\tif !trim {\n\t\t\treturn s\n\t\t}\n\t\treturn strings.TrimSpace(s)\n\t}\n\tif root.DataAtom == atom.Br && !trim {\n\t\treturn \"\\n\"\n\t}\n\tvar buf bytes.Buffer\n\tfor c := root.FirstChild; c != nil; c = c.NextSibling {\n\t\tif c.DataAtom == atom.Br {\n\t\t\tbuf.WriteRune('\\n')\n\t\t\tcontinue\n\t\t}\n\t\tif c.Type == html.TextNode {\n\t\t\tbuf.WriteString(c.Data)\n\t\t\tcontinue\n\t\t}\n\t\tif c.DataAtom != atom.Span && c.DataAtom != atom.A {\n\t\t\tbuf.WriteRune('\\n')\n\t\t}\n\t\tbuf.WriteString(stringifyNode(c, false))\n\t}\n\ts := textCleaner.Replace(buf.String())\n\tif !trim {\n\t\treturn s\n\t}\n\treturn strings.TrimSpace(s)\n}\n<|endoftext|>"} {"text":"<commit_before>\/* Copyright (c) 2015, Daniel Martí <mvdan@mvdan.cc> *\/\n\/* See LICENSE for licensing information *\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/mvdan\/fdroidcl\"\n\t\"github.com\/mvdan\/fdroidcl\/adb\"\n)\n\nvar cmdSearch = &Command{\n\tUsageLine: \"search <regexp...>\",\n\tShort: \"Search available apps\",\n}\n\nvar (\n\tquiet = cmdSearch.Flag.Bool(\"q\", false, \"Print package names only\")\n\tinstalled = cmdSearch.Flag.Bool(\"i\", false, \"Filter installed packages\")\n)\n\nfunc init() {\n\tcmdSearch.Run = runSearch\n}\n\nfunc runSearch(args []string) {\n\tindex := mustLoadIndex()\n\tapps := filterAppsSearch(index.Apps, args)\n\tif *installed {\n\t\tdevice := oneDevice()\n\t\tinstalled := mustInstalled(device)\n\t\tapps = filterAppsInstalled(apps, installed)\n\t}\n\tif *quiet {\n\t\tfor _, app := range apps {\n\t\t\tfmt.Println(app.ID)\n\t\t}\n\t} else {\n\t\tprintApps(apps)\n\t}\n}\n\nfunc filterAppsSearch(apps []fdroidcl.App, terms []string) []fdroidcl.App {\n\tregexes := make([]*regexp.Regexp, len(terms))\n\tfor i, term := range terms {\n\t\tregexes[i] = regexp.MustCompile(term)\n\t}\n\tvar result []fdroidcl.App\n\tfor _, app := range apps {\n\t\tfields := []string{\n\t\t\tstrings.ToLower(app.ID),\n\t\t\tstrings.ToLower(app.Name),\n\t\t\tstrings.ToLower(app.Summary),\n\t\t\tstrings.ToLower(app.Desc),\n\t\t}\n\t\tif !appMatches(fields, regexes) {\n\t\t\tcontinue\n\t\t}\n\t\tresult = append(result, app)\n\t}\n\treturn result\n}\n\nfunc appMatches(fields []string, regexes []*regexp.Regexp) bool {\nfieldLoop:\n\tfor _, field := range fields {\n\t\tfor _, regex := range regexes {\n\t\t\tif !regex.MatchString(field) {\n\t\t\t\tcontinue fieldLoop\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc printApps(apps []fdroidcl.App) {\n\tmaxIDLen := 0\n\tfor _, app := range apps {\n\t\tif len(app.ID) > maxIDLen {\n\t\t\tmaxIDLen = len(app.ID)\n\t\t}\n\t}\n\tfor _, app := range apps {\n\t\tprintApp(app, maxIDLen)\n\t}\n}\n\nfunc printApp(app fdroidcl.App, IDLen int) {\n\tfmt.Printf(\"%s%s %s %s\\n\", app.ID, strings.Repeat(\" \", IDLen-len(app.ID)),\n\t\tapp.Name, app.CurApk.VName)\n\tfmt.Printf(\" %s\\n\", app.Summary)\n}\n\nfunc mustInstalled(device adb.Device) []adb.Package {\n\tinstalled, err := device.Installed()\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not get installed packages: %v\", err)\n\t}\n\treturn installed\n}\n\nfunc filterAppsInstalled(apps []fdroidcl.App, installed []adb.Package) []fdroidcl.App {\n\tinstMap := make(map[string]struct{}, len(installed))\n\tfor _, p := range installed {\n\t\tinstMap[p.ID] = struct{}{}\n\t}\n\tvar result []fdroidcl.App\n\tfor _, app := range apps {\n\t\tif _, e := instMap[app.ID]; !e {\n\t\t\tcontinue\n\t\t}\n\t\tresult = append(result, app)\n\t}\n\treturn result\n}\n<commit_msg>Add -u flag to search to filter updates<commit_after>\/* Copyright (c) 2015, Daniel Martí <mvdan@mvdan.cc> *\/\n\/* See LICENSE for licensing information *\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/mvdan\/fdroidcl\"\n\t\"github.com\/mvdan\/fdroidcl\/adb\"\n)\n\nvar cmdSearch = &Command{\n\tUsageLine: \"search <regexp...>\",\n\tShort: \"Search available apps\",\n}\n\nvar (\n\tquiet = cmdSearch.Flag.Bool(\"q\", false, \"Print package names only\")\n\tinstalled = cmdSearch.Flag.Bool(\"i\", false, \"Filter installed apps\")\n\tupdates = cmdSearch.Flag.Bool(\"u\", false, \"Filter apps with updates\")\n)\n\nfunc init() {\n\tcmdSearch.Run = runSearch\n}\n\nfunc runSearch(args []string) {\n\tindex := mustLoadIndex()\n\tapps := filterAppsSearch(index.Apps, args)\n\tif *installed && *updates {\n\t\tfmt.Println(\"-i and -u at the same time don't make sense\")\n\t\tcmdSearch.Flag.Usage()\n\t}\n\tif *installed {\n\t\tdevice := oneDevice()\n\t\tinstalled := mustInstalled(device)\n\t\tapps = filterAppsInstalled(apps, installed)\n\t}\n\tif *updates {\n\t\tdevice := oneDevice()\n\t\tinstalled := mustInstalled(device)\n\t\tapps = filterAppsUpdates(apps, installed)\n\t}\n\tif *quiet {\n\t\tfor _, app := range apps {\n\t\t\tfmt.Println(app.ID)\n\t\t}\n\t} else {\n\t\tprintApps(apps)\n\t}\n}\n\nfunc filterAppsSearch(apps []fdroidcl.App, terms []string) []fdroidcl.App {\n\tregexes := make([]*regexp.Regexp, len(terms))\n\tfor i, term := range terms {\n\t\tregexes[i] = regexp.MustCompile(term)\n\t}\n\tvar result []fdroidcl.App\n\tfor _, app := range apps {\n\t\tfields := []string{\n\t\t\tstrings.ToLower(app.ID),\n\t\t\tstrings.ToLower(app.Name),\n\t\t\tstrings.ToLower(app.Summary),\n\t\t\tstrings.ToLower(app.Desc),\n\t\t}\n\t\tif !appMatches(fields, regexes) {\n\t\t\tcontinue\n\t\t}\n\t\tresult = append(result, app)\n\t}\n\treturn result\n}\n\nfunc appMatches(fields []string, regexes []*regexp.Regexp) bool {\nfieldLoop:\n\tfor _, field := range fields {\n\t\tfor _, regex := range regexes {\n\t\t\tif !regex.MatchString(field) {\n\t\t\t\tcontinue fieldLoop\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc printApps(apps []fdroidcl.App) {\n\tmaxIDLen := 0\n\tfor _, app := range apps {\n\t\tif len(app.ID) > maxIDLen {\n\t\t\tmaxIDLen = len(app.ID)\n\t\t}\n\t}\n\tfor _, app := range apps {\n\t\tprintApp(app, maxIDLen)\n\t}\n}\n\nfunc printApp(app fdroidcl.App, IDLen int) {\n\tfmt.Printf(\"%s%s %s %s\\n\", app.ID, strings.Repeat(\" \", IDLen-len(app.ID)),\n\t\tapp.Name, app.CurApk.VName)\n\tfmt.Printf(\" %s\\n\", app.Summary)\n}\n\nfunc mustInstalled(device adb.Device) []adb.Package {\n\tinstalled, err := device.Installed()\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not get installed packages: %v\", err)\n\t}\n\treturn installed\n}\n\nfunc filterAppsInstalled(apps []fdroidcl.App, installed []adb.Package) []fdroidcl.App {\n\tinstMap := make(map[string]struct{}, len(installed))\n\tfor _, p := range installed {\n\t\tinstMap[p.ID] = struct{}{}\n\t}\n\tvar result []fdroidcl.App\n\tfor _, app := range apps {\n\t\tif _, e := instMap[app.ID]; !e {\n\t\t\tcontinue\n\t\t}\n\t\tresult = append(result, app)\n\t}\n\treturn result\n}\n\nfunc filterAppsUpdates(apps []fdroidcl.App, installed []adb.Package) []fdroidcl.App {\n\tinstMap := make(map[string]*adb.Package, len(installed))\n\tfor i := range installed {\n\t\tp := &installed[i]\n\t\tinstMap[p.ID] = p\n\t}\n\tvar result []fdroidcl.App\n\tfor _, app := range apps {\n\t\tp, e := instMap[app.ID]\n\t\tif !e {\n\t\t\tcontinue\n\t\t}\n\t\tif p.VCode >= app.CurApk.VCode {\n\t\t\tcontinue\n\t\t}\n\t\tresult = append(result, app)\n\t}\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"expvar\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sync\/atomic\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/cloud\/storage\"\n\n\t\"github.com\/thecodearchive\/gitarchive\/git\"\n\t\"github.com\/thecodearchive\/gitarchive\/index\"\n\t\"github.com\/thecodearchive\/gitarchive\/queue\"\n\t\"github.com\/thecodearchive\/gitarchive\/weekmap\"\n)\n\ntype Fetcher struct {\n\tq *queue.Queue\n\ti *index.Index\n\tbucket *storage.BucketHandle\n\tschedule *weekmap.WeekMap\n\n\texp *expvar.Map\n\n\tclosing uint32\n}\n\nfunc (f *Fetcher) Run() error {\n\tf.exp.Set(\"fetchbytes\", &expvar.Int{})\n\tfor atomic.LoadUint32(&f.closing) == 0 {\n\t\tif !f.schedule.Get(time.Now()) {\n\t\t\tf.exp.Add(\"sleep\", 1)\n\t\t\tinterruptableSleep(5 * time.Minute)\n\t\t\tcontinue\n\t\t}\n\n\t\tname, parent, err := f.q.Pop()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif name == \"\" {\n\t\t\tf.exp.Add(\"emptyqueue\", 1)\n\t\t\tinterruptableSleep(30 * time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := f.Fetch(name, parent); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (f *Fetcher) Fetch(name, parent string) error {\n\tf.exp.Add(\"fetches\", 1)\n\n\tname = \"github.com\/\" + name\n\thaves, deps, err := f.i.GetHaves(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif haves == nil {\n\t\tf.exp.Add(\"new\", 1)\n\t}\n\tif parent != \"\" {\n\t\tf.exp.Add(\"forks\", 1)\n\t}\n\n\tlogVerb, logFork := \"Cloning\", \"\"\n\tif haves != nil {\n\t\tlogVerb = \"Fetching\"\n\t}\n\tif parent != \"\" {\n\t\tlogFork = fmt.Sprintf(\" (fork of %s)\", parent)\n\t}\n\tlog.Printf(\"[+] %s %s%s...\", logVerb, name, logFork)\n\n\tstart := time.Now()\n\tbw := f.exp.Get(\"fetchbytes\").(*expvar.Int)\n\trefs, r, err := git.Fetch(\"git:\/\/\"+name+\".git\", haves, os.Stderr, bw)\n\tif err == git.RepoNotFoundError {\n\t\tlog.Println(\"[-] Repository vanished :(\")\n\t\tf.exp.Add(\"vanished\", 1)\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpackRefName := fmt.Sprintf(\"%s\/%d\", name, time.Now().UnixNano())\n\tif r != nil {\n\t\tw := f.bucket.Object(packRefName).NewWriter(context.Background())\n\n\t\tbytesFetched, err := io.Copy(w, r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tw.Close()\n\t\tr.Close()\n\t\tf.exp.Add(\"fetchtime\", int64(time.Since(start)))\n\t\tlog.Printf(\"[+] Got %d refs, %d bytes in %s.\", len(refs), bytesFetched, time.Since(start))\n\t} else {\n\t\t\/\/ Empty packfile.\n\t\tpackRefName = \"EMPTY|\" + packRefName\n\t\tf.exp.Add(\"emptypack\", 1)\n\t\tlog.Printf(\"[+] Got %d refs, and a empty packfile.\", len(refs))\n\t}\n\n\tif parent != \"\" {\n\t\tparent = \"github.com\/\" + parent\n\t}\n\n\treturn f.i.AddFetch(name, parent, time.Now(), refs, packRefName, deps)\n}\n\nfunc (f *Fetcher) Stop() {\n\tatomic.StoreUint32(&f.closing, 1)\n}\n\nfunc interruptableSleep(d time.Duration) bool {\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt, syscall.SIGTERM)\n\tdefer signal.Stop(c)\n\tt := time.NewTimer(d)\n\tselect {\n\tcase <-c:\n\t\treturn false\n\tcase <-t.C:\n\t\treturn true\n\t}\n}\n<commit_msg>fetcher: bail on large repos<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"expvar\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"sync\/atomic\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/cloud\/storage\"\n\n\t\"github.com\/thecodearchive\/gitarchive\/git\"\n\t\"github.com\/thecodearchive\/gitarchive\/index\"\n\t\"github.com\/thecodearchive\/gitarchive\/queue\"\n\t\"github.com\/thecodearchive\/gitarchive\/weekmap\"\n)\n\nconst maxSize = 100 << 20\n\ntype Fetcher struct {\n\tq *queue.Queue\n\ti *index.Index\n\tbucket *storage.BucketHandle\n\tschedule *weekmap.WeekMap\n\n\texp *expvar.Map\n\n\tclosing uint32\n}\n\nfunc (f *Fetcher) Run() error {\n\tf.exp.Set(\"fetchbytes\", &expvar.Int{})\n\tfor atomic.LoadUint32(&f.closing) == 0 {\n\t\tif !f.schedule.Get(time.Now()) {\n\t\t\tf.exp.Add(\"sleep\", 1)\n\t\t\tinterruptableSleep(5 * time.Minute)\n\t\t\tcontinue\n\t\t}\n\n\t\tname, parent, err := f.q.Pop()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif name == \"\" {\n\t\t\tf.exp.Add(\"emptyqueue\", 1)\n\t\t\tinterruptableSleep(30 * time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := f.Fetch(name, parent); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (f *Fetcher) Fetch(name, parent string) error {\n\tf.exp.Add(\"fetches\", 1)\n\n\tname = \"github.com\/\" + name\n\thaves, deps, err := f.i.GetHaves(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif haves == nil {\n\t\tf.exp.Add(\"new\", 1)\n\t}\n\tif parent != \"\" {\n\t\tf.exp.Add(\"forks\", 1)\n\t}\n\n\tlogVerb, logFork := \"Cloning\", \"\"\n\tif haves != nil {\n\t\tlogVerb = \"Fetching\"\n\t}\n\tif parent != \"\" {\n\t\tlogFork = fmt.Sprintf(\" (fork of %s)\", parent)\n\t}\n\tlog.Printf(\"[+] %s %s%s...\", logVerb, name, logFork)\n\n\tstart := time.Now()\n\tbw := f.exp.Get(\"fetchbytes\").(*expvar.Int)\n\trefs, r, err := git.Fetch(\"git:\/\/\"+name+\".git\", haves, os.Stderr, bw)\n\tif err == git.RepoNotFoundError {\n\t\tlog.Println(\"[-] Repository vanished :(\")\n\t\tf.exp.Add(\"vanished\", 1)\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpackRefName := fmt.Sprintf(\"%s\/%d\", name, time.Now().UnixNano())\n\tif r != nil {\n\t\tw := f.bucket.Object(packRefName).NewWriter(context.Background())\n\n\t\tlr := &io.LimitedReader{R: r, N: maxSize}\n\t\tbytesFetched, err := io.Copy(w, lr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tr.Close()\n\t\tif lr.N <= 0 {\n\t\t\tw.CloseWithError(errors.New(\"too big\"))\n\t\t\tlog.Printf(\"[-] Repository too big :(\")\n\t\t\tf.exp.Add(\"toobig\", 1)\n\t\t\treturn nil\n\t\t}\n\t\tw.Close()\n\t\tf.exp.Add(\"fetchtime\", int64(time.Since(start)))\n\t\tlog.Printf(\"[+] Got %d refs, %d bytes in %s.\", len(refs), bytesFetched, time.Since(start))\n\t} else {\n\t\t\/\/ Empty packfile.\n\t\tpackRefName = \"EMPTY|\" + packRefName\n\t\tf.exp.Add(\"emptypack\", 1)\n\t\tlog.Printf(\"[+] Got %d refs, and a empty packfile.\", len(refs))\n\t}\n\n\tif parent != \"\" {\n\t\tparent = \"github.com\/\" + parent\n\t}\n\n\treturn f.i.AddFetch(name, parent, time.Now(), refs, packRefName, deps)\n}\n\nfunc (f *Fetcher) Stop() {\n\tatomic.StoreUint32(&f.closing, 1)\n}\n\nfunc interruptableSleep(d time.Duration) bool {\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt, syscall.SIGTERM)\n\tdefer signal.Stop(c)\n\tt := time.NewTimer(d)\n\tselect {\n\tcase <-c:\n\t\treturn false\n\tcase <-t.C:\n\t\treturn true\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright The Helm Authors.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\t\"helm.sh\/helm\/v3\/cmd\/helm\/require\"\n)\n\nconst completionDesc = `\nGenerate autocompletion scripts for Helm for the specified shell.\n`\nconst bashCompDesc = `\nGenerate the autocompletion script for Helm for the bash shell.\n\nTo load completions in your current shell session:\n\n source <(helm completion bash)\n\nTo load completions for every new session, execute once:\n- Linux:\n\n helm completion bash > \/etc\/bash_completion.d\/helm\n\n- MacOS:\n\n helm completion bash > \/usr\/local\/etc\/bash_completion.d\/helm\n`\n\nconst zshCompDesc = `\nGenerate the autocompletion script for Helm for the zsh shell.\n\nTo load completions in your current shell session:\n\n source <(helm completion zsh)\n\nTo load completions for every new session, execute once:\n\n helm completion zsh > \"${fpath[1]}\/_helm\"\n`\n\nconst fishCompDesc = `\nGenerate the autocompletion script for Helm for the fish shell.\n\nTo load completions in your current shell session:\n\n helm completion fish | source\n\nTo load completions for every new session, execute once:\n\n helm completion fish > ~\/.config\/fish\/completions\/helm.fish\n\nYou will need to start a new shell for this setup to take effect.\n`\n\nconst (\n\tnoDescFlagName = \"no-descriptions\"\n\tnoDescFlagText = \"disable completion descriptions\"\n)\n\nvar disableCompDescriptions bool\n\nfunc newCompletionCmd(out io.Writer) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"completion\",\n\t\tShort: \"generate autocompletion scripts for the specified shell\",\n\t\tLong: completionDesc,\n\t\tArgs: require.NoArgs,\n\t}\n\n\tbash := &cobra.Command{\n\t\tUse: \"bash\",\n\t\tShort: \"generate autocompletion script for bash\",\n\t\tLong: bashCompDesc,\n\t\tArgs: require.NoArgs,\n\t\tDisableFlagsInUseLine: true,\n\t\tValidArgsFunction: noCompletions,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn runCompletionBash(out, cmd)\n\t\t},\n\t}\n\n\tzsh := &cobra.Command{\n\t\tUse: \"zsh\",\n\t\tShort: \"generate autocompletion script for zsh\",\n\t\tLong: zshCompDesc,\n\t\tArgs: require.NoArgs,\n\t\tValidArgsFunction: noCompletions,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn runCompletionZsh(out, cmd)\n\t\t},\n\t}\n\tzsh.Flags().BoolVar(&disableCompDescriptions, noDescFlagName, false, noDescFlagText)\n\n\tfish := &cobra.Command{\n\t\tUse: \"fish\",\n\t\tShort: \"generate autocompletion script for fish\",\n\t\tLong: fishCompDesc,\n\t\tArgs: require.NoArgs,\n\t\tValidArgsFunction: noCompletions,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn runCompletionFish(out, cmd)\n\t\t},\n\t}\n\tfish.Flags().BoolVar(&disableCompDescriptions, noDescFlagName, false, noDescFlagText)\n\n\tcmd.AddCommand(bash, zsh, fish)\n\n\treturn cmd\n}\n\nfunc runCompletionBash(out io.Writer, cmd *cobra.Command) error {\n\terr := cmd.Root().GenBashCompletion(out)\n\n\t\/\/ In case the user renamed the helm binary (e.g., to be able to run\n\t\/\/ both helm2 and helm3), we hook the new binary name to the completion function\n\tif binary := filepath.Base(os.Args[0]); binary != \"helm\" {\n\t\trenamedBinaryHook := `\n# Hook the command used to generate the completion script\n# to the helm completion function to handle the case where\n# the user renamed the helm binary\nif [[ $(type -t compopt) = \"builtin\" ]]; then\n complete -o default -F __start_helm %[1]s\nelse\n complete -o default -o nospace -F __start_helm %[1]s\nfi\n`\n\t\tfmt.Fprintf(out, renamedBinaryHook, binary)\n\t}\n\n\treturn err\n}\n\nfunc runCompletionZsh(out io.Writer, cmd *cobra.Command) error {\n\tvar err error\n\tif disableCompDescriptions {\n\t\terr = cmd.Root().GenZshCompletionNoDesc(out)\n\t} else {\n\t\terr = cmd.Root().GenZshCompletion(out)\n\t}\n\n\t\/\/ In case the user renamed the helm binary (e.g., to be able to run\n\t\/\/ both helm2 and helm3), we hook the new binary name to the completion function\n\tif binary := filepath.Base(os.Args[0]); binary != \"helm\" {\n\t\trenamedBinaryHook := `\n# Hook the command used to generate the completion script\n# to the helm completion function to handle the case where\n# the user renamed the helm binary\ncompdef _helm %[1]s\n`\n\t\tfmt.Fprintf(out, renamedBinaryHook, binary)\n\t}\n\n\t\/\/ Cobra doesn't source zsh completion file, explicitly doing it here\n\tfmt.Fprintf(out, \"compdef _helm helm\")\n\n\treturn err\n}\n\nfunc runCompletionFish(out io.Writer, cmd *cobra.Command) error {\n\treturn cmd.Root().GenFishCompletion(out, !disableCompDescriptions)\n}\n\n\/\/ Function to disable file completion\nfunc noCompletions(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {\n\treturn nil, cobra.ShellCompDirectiveNoFileComp\n}\n<commit_msg>feat(comp): Add support for powershell completion<commit_after>\/*\nCopyright The Helm Authors.\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\t\"helm.sh\/helm\/v3\/cmd\/helm\/require\"\n)\n\nconst completionDesc = `\nGenerate autocompletion scripts for Helm for the specified shell.\n`\nconst bashCompDesc = `\nGenerate the autocompletion script for Helm for the bash shell.\n\nTo load completions in your current shell session:\n\n source <(helm completion bash)\n\nTo load completions for every new session, execute once:\n- Linux:\n\n helm completion bash > \/etc\/bash_completion.d\/helm\n\n- MacOS:\n\n helm completion bash > \/usr\/local\/etc\/bash_completion.d\/helm\n`\n\nconst zshCompDesc = `\nGenerate the autocompletion script for Helm for the zsh shell.\n\nTo load completions in your current shell session:\n\n source <(helm completion zsh)\n\nTo load completions for every new session, execute once:\n\n helm completion zsh > \"${fpath[1]}\/_helm\"\n`\n\nconst fishCompDesc = `\nGenerate the autocompletion script for Helm for the fish shell.\n\nTo load completions in your current shell session:\n\n helm completion fish | source\n\nTo load completions for every new session, execute once:\n\n helm completion fish > ~\/.config\/fish\/completions\/helm.fish\n\nYou will need to start a new shell for this setup to take effect.\n`\n\nconst powershellCompDesc = `\nGenerate the autocompletion script for powershell.\n\nTo load completions in your current shell session:\nPS C:\\> helm completion powershell | Out-String | Invoke-Expression\n\nTo load completions for every new session, add the output of the above command\nto your powershell profile.\n`\n\nconst (\n\tnoDescFlagName = \"no-descriptions\"\n\tnoDescFlagText = \"disable completion descriptions\"\n)\n\nvar disableCompDescriptions bool\n\nfunc newCompletionCmd(out io.Writer) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"completion\",\n\t\tShort: \"generate autocompletion scripts for the specified shell\",\n\t\tLong: completionDesc,\n\t\tArgs: require.NoArgs,\n\t}\n\n\tbash := &cobra.Command{\n\t\tUse: \"bash\",\n\t\tShort: \"generate autocompletion script for bash\",\n\t\tLong: bashCompDesc,\n\t\tArgs: require.NoArgs,\n\t\tDisableFlagsInUseLine: true,\n\t\tValidArgsFunction: noCompletions,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn runCompletionBash(out, cmd)\n\t\t},\n\t}\n\n\tzsh := &cobra.Command{\n\t\tUse: \"zsh\",\n\t\tShort: \"generate autocompletion script for zsh\",\n\t\tLong: zshCompDesc,\n\t\tArgs: require.NoArgs,\n\t\tValidArgsFunction: noCompletions,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn runCompletionZsh(out, cmd)\n\t\t},\n\t}\n\tzsh.Flags().BoolVar(&disableCompDescriptions, noDescFlagName, false, noDescFlagText)\n\n\tfish := &cobra.Command{\n\t\tUse: \"fish\",\n\t\tShort: \"generate autocompletion script for fish\",\n\t\tLong: fishCompDesc,\n\t\tArgs: require.NoArgs,\n\t\tValidArgsFunction: noCompletions,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn runCompletionFish(out, cmd)\n\t\t},\n\t}\n\tfish.Flags().BoolVar(&disableCompDescriptions, noDescFlagName, false, noDescFlagText)\n\n\tpowershell := &cobra.Command{\n\t\tUse: \"powershell\",\n\t\tShort: \"generate autocompletion script for powershell\",\n\t\tLong: powershellCompDesc,\n\t\tArgs: require.NoArgs,\n\t\tValidArgsFunction: noCompletions,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn runCompletionPowershell(out, cmd)\n\t\t},\n\t}\n\tpowershell.Flags().BoolVar(&disableCompDescriptions, noDescFlagName, false, noDescFlagText)\n\n\tcmd.AddCommand(bash, zsh, fish, powershell)\n\n\treturn cmd\n}\n\nfunc runCompletionBash(out io.Writer, cmd *cobra.Command) error {\n\terr := cmd.Root().GenBashCompletion(out)\n\n\t\/\/ In case the user renamed the helm binary (e.g., to be able to run\n\t\/\/ both helm2 and helm3), we hook the new binary name to the completion function\n\tif binary := filepath.Base(os.Args[0]); binary != \"helm\" {\n\t\trenamedBinaryHook := `\n# Hook the command used to generate the completion script\n# to the helm completion function to handle the case where\n# the user renamed the helm binary\nif [[ $(type -t compopt) = \"builtin\" ]]; then\n complete -o default -F __start_helm %[1]s\nelse\n complete -o default -o nospace -F __start_helm %[1]s\nfi\n`\n\t\tfmt.Fprintf(out, renamedBinaryHook, binary)\n\t}\n\n\treturn err\n}\n\nfunc runCompletionZsh(out io.Writer, cmd *cobra.Command) error {\n\tvar err error\n\tif disableCompDescriptions {\n\t\terr = cmd.Root().GenZshCompletionNoDesc(out)\n\t} else {\n\t\terr = cmd.Root().GenZshCompletion(out)\n\t}\n\n\t\/\/ In case the user renamed the helm binary (e.g., to be able to run\n\t\/\/ both helm2 and helm3), we hook the new binary name to the completion function\n\tif binary := filepath.Base(os.Args[0]); binary != \"helm\" {\n\t\trenamedBinaryHook := `\n# Hook the command used to generate the completion script\n# to the helm completion function to handle the case where\n# the user renamed the helm binary\ncompdef _helm %[1]s\n`\n\t\tfmt.Fprintf(out, renamedBinaryHook, binary)\n\t}\n\n\t\/\/ Cobra doesn't source zsh completion file, explicitly doing it here\n\tfmt.Fprintf(out, \"compdef _helm helm\")\n\n\treturn err\n}\n\nfunc runCompletionFish(out io.Writer, cmd *cobra.Command) error {\n\treturn cmd.Root().GenFishCompletion(out, !disableCompDescriptions)\n}\n\nfunc runCompletionPowershell(out io.Writer, cmd *cobra.Command) error {\n\tif disableCompDescriptions {\n\t\treturn cmd.Root().GenPowerShellCompletion(out)\n\t}\n\treturn cmd.Root().GenPowerShellCompletionWithDesc(out)\n}\n\n\/\/ Function to disable file completion\nfunc noCompletions(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {\n\treturn nil, cobra.ShellCompDirectiveNoFileComp\n}\n<|endoftext|>"} {"text":"<commit_before>package internal\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/ready-steady\/numeric\/basis\/linhat\"\n\t\"github.com\/ready-steady\/numeric\/grid\/newcot\"\n\t\"github.com\/ready-steady\/numeric\/interpolation\/adhier\"\n)\n\ntype Solver struct {\n\tadhier.Interpolator\n}\n\ntype Solution struct {\n\tadhier.Surrogate\n\tExpectation []float64\n}\n\nfunc NewSolver(problem *Problem, target Target) (*Solver, error) {\n\tni, _ := target.Dimensions()\n\n\tvar grid adhier.Grid\n\tvar basis adhier.Basis\n\n\tswitch strings.ToLower(problem.Config.Interpolation.Rule) {\n\tcase \"open\":\n\t\tgrid, basis = newcot.NewOpen(ni), linhat.NewOpen(ni)\n\tcase \"closed\":\n\t\tgrid, basis = newcot.NewClosed(ni), linhat.NewClosed(ni)\n\tdefault:\n\t\treturn nil, errors.New(\"the interpolation rule is unknown\")\n\t}\n\n\tinterpolator := adhier.New(grid, basis,\n\t\t(*adhier.Config)(&problem.Config.Interpolation.Config))\n\n\treturn &Solver{*interpolator}, nil\n}\n\nfunc (s *Solver) Compute(target Target) *Solution {\n\tsurrogate := s.Interpolator.Compute(target)\n\ttarget.Monitor(surrogate.Level, 0, surrogate.Nodes)\n\treturn &Solution{\n\t\tSurrogate: *surrogate,\n\t\tExpectation: s.Interpolator.Integrate(surrogate),\n\t}\n}\n\nfunc (s *Solver) Evaluate(solution *Solution, nodes []float64) []float64 {\n\treturn s.Interpolator.Evaluate(&solution.Surrogate, nodes)\n}\n\nfunc (s *Solution) String() string {\n\treturn fmt.Sprintf(\"Solution{inputs: %d, outputs: %d, level: %d, nodes: %d}\",\n\t\ts.Inputs, s.Outputs, s.Level, s.Nodes)\n}\n<commit_msg>Fix a typo in solver<commit_after>package internal\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/ready-steady\/numeric\/basis\/linhat\"\n\t\"github.com\/ready-steady\/numeric\/grid\/newcot\"\n\t\"github.com\/ready-steady\/numeric\/interpolation\/adhier\"\n)\n\ntype Solver struct {\n\tadhier.Interpolator\n}\n\ntype Solution struct {\n\tadhier.Surrogate\n\tExpectation []float64\n}\n\nfunc NewSolver(problem *Problem, target Target) (*Solver, error) {\n\tni, _ := target.Dimensions()\n\n\tvar grid adhier.Grid\n\tvar basis adhier.Basis\n\n\tswitch strings.ToLower(problem.Config.Interpolation.Rule) {\n\tcase \"open\":\n\t\tgrid, basis = newcot.NewOpen(ni), linhat.NewOpen(ni)\n\tcase \"closed\":\n\t\tgrid, basis = newcot.NewClosed(ni), linhat.NewClosed(ni)\n\tdefault:\n\t\treturn nil, errors.New(\"the interpolation rule is unknown\")\n\t}\n\n\tinterpolator := adhier.New(grid, basis,\n\t\t(*adhier.Config)(&problem.Config.Interpolation.Config))\n\n\treturn &Solver{*interpolator}, nil\n}\n\nfunc (s *Solver) Compute(target Target) *Solution {\n\tsurrogate := s.Interpolator.Compute(target)\n\ttarget.Monitor(surrogate.Level, surrogate.Nodes, 0)\n\treturn &Solution{\n\t\tSurrogate: *surrogate,\n\t\tExpectation: s.Interpolator.Integrate(surrogate),\n\t}\n}\n\nfunc (s *Solver) Evaluate(solution *Solution, nodes []float64) []float64 {\n\treturn s.Interpolator.Evaluate(&solution.Surrogate, nodes)\n}\n\nfunc (s *Solution) String() string {\n\treturn fmt.Sprintf(\"Solution{inputs: %d, outputs: %d, level: %d, nodes: %d}\",\n\t\ts.Inputs, s.Outputs, s.Level, s.Nodes)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\n\t\"istio.io\/manager\/cmd\"\n\t\"istio.io\/manager\/cmd\/version\"\n\t\"istio.io\/manager\/platform\/kube\/inject\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\thub string\n\ttag string\n\tmanagerAddr string\n\tmixerAddr string\n\tsidecarProxyUID int64\n\tsidecarProxyPort int\n\truntimeVerbosity int\n\tversionStr string \/\/ override build version\n\n\tinFilename string\n\toutFilename string\n)\n\nvar (\n\tinjectCmd = &cobra.Command{\n\t\tUse: \"kube-inject\",\n\t\tShort: \"Inject istio runtime into kubernete resources\",\n\t\tRunE: func(_ *cobra.Command, _ []string) (err error) {\n\t\t\tif inFilename == \"\" {\n\t\t\t\treturn errors.New(\"filename not specified (see --filename or -f)\")\n\t\t\t}\n\t\t\tvar reader io.Reader\n\t\t\tif inFilename == \"-\" {\n\t\t\t\treader = os.Stdin\n\t\t\t} else {\n\t\t\t\treader, err = os.Open(inFilename)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar writer io.Writer\n\t\t\tif outFilename == \"\" {\n\t\t\t\twriter = os.Stdout\n\t\t\t} else {\n\t\t\t\tfile, err := os.Create(outFilename)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\twriter = file\n\t\t\t\tdefer func() { err = file.Close() }()\n\t\t\t}\n\n\t\t\tif versionStr == \"\" {\n\t\t\t\tversionStr = fmt.Sprintf(\"%v@%v-%v-%v\",\n\t\t\t\t\tversion.Info.User,\n\t\t\t\t\tversion.Info.Host,\n\t\t\t\t\tversion.Info.Version,\n\t\t\t\t\tversion.Info.GitRevision)\n\t\t\t}\n\t\t\tparams := &inject.Params{\n\t\t\t\tInitImage: inject.InitImageName(hub, tag),\n\t\t\t\tRuntimeImage: inject.RuntimeImageName(hub, tag),\n\t\t\t\tRuntimeVerbosity: runtimeVerbosity,\n\t\t\t\tManagerAddr: managerAddr,\n\t\t\t\tMixerAddr: mixerAddr,\n\t\t\t\tSidecarProxyUID: sidecarProxyUID,\n\t\t\t\tSidecarProxyPort: sidecarProxyPort,\n\t\t\t\tVersion: versionStr,\n\t\t\t}\n\t\t\treturn inject.IntoResourceFile(params, reader, writer)\n\t\t},\n\t}\n)\n\nfunc init() {\n\tinjectCmd.PersistentFlags().StringVar(&hub, \"hub\",\n\t\tinject.DefaultHub, \"Docker hub\")\n\tinjectCmd.PersistentFlags().StringVar(&tag, \"tag\",\n\t\tinject.DefaultTag, \"Docker tag\")\n\tinjectCmd.PersistentFlags().StringVarP(&inFilename, \"filename\", \"f\",\n\t\t\"\", \"Input kubernetes resource filename\")\n\tinjectCmd.PersistentFlags().StringVarP(&outFilename, \"output\", \"o\",\n\t\t\"\", \"Modified output kubernetes resource filename\")\n\tinjectCmd.PersistentFlags().StringVar(&managerAddr, \"managerAddr\",\n\t\tinject.DefaultManagerAddr, \"Manager service DNS address\")\n\tinjectCmd.PersistentFlags().StringVar(&mixerAddr, \"mixerAddr\",\n\t\tinject.DefaultMixerAddr, \"Mixer DNS address\")\n\tinjectCmd.PersistentFlags().IntVar(&runtimeVerbosity, \"verbosity\",\n\t\tinject.DefaultRuntimeVerbosity, \"Runtime verbosity\")\n\tinjectCmd.PersistentFlags().Int64Var(&sidecarProxyUID, \"sidecarProxyUID\",\n\t\tinject.DefaultSidecarProxyUID, \"Sidecar proxy UID\")\n\tinjectCmd.PersistentFlags().IntVar(&sidecarProxyPort, \"sidecarProxyPort\",\n\t\tinject.DefaultSidecarProxyPort, \"Sidecar proxy Port\")\n\tinjectCmd.PersistentFlags().StringVar(&versionStr, \"setVersionString\",\n\t\t\"\", \"Override version info injected into resource\")\n\tcmd.RootCmd.AddCommand(injectCmd)\n}\n<commit_msg>add long help text and example usage to kube-inject command (#393)<commit_after>\/\/ Copyright 2017 Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\n\t\"istio.io\/manager\/cmd\"\n\t\"istio.io\/manager\/cmd\/version\"\n\t\"istio.io\/manager\/platform\/kube\/inject\"\n\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar (\n\thub string\n\ttag string\n\tmanagerAddr string\n\tmixerAddr string\n\tsidecarProxyUID int64\n\tsidecarProxyPort int\n\truntimeVerbosity int\n\tversionStr string \/\/ override build version\n\n\tinFilename string\n\toutFilename string\n)\n\nvar (\n\tinjectCmd = &cobra.Command{\n\t\tUse: \"kube-inject\",\n\t\tShort: \"Inject istio sidecar proxy into kubernetes resources\",\n\t\tLong: `\nUse kube-inject to manually inject istio sidecar proxy into kubernetes\nresource files. Unsupported resources are left unmodified so it is\nsafe to run kube-inject over a single file that contains multiple\nService, ConfigMap, Deployment, etc. definitions for a complex\napplication. Its best to do this when the resource is initially\ncreated.\n\nExample usage:\n\n\tkubectl apply -f <(istioctl kube-inject -f <resource.yaml>)\n`,\n\t\tRunE: func(_ *cobra.Command, _ []string) (err error) {\n\t\t\tif inFilename == \"\" {\n\t\t\t\treturn errors.New(\"filename not specified (see --filename or -f)\")\n\t\t\t}\n\t\t\tvar reader io.Reader\n\t\t\tif inFilename == \"-\" {\n\t\t\t\treader = os.Stdin\n\t\t\t} else {\n\t\t\t\treader, err = os.Open(inFilename)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar writer io.Writer\n\t\t\tif outFilename == \"\" {\n\t\t\t\twriter = os.Stdout\n\t\t\t} else {\n\t\t\t\tfile, err := os.Create(outFilename)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\twriter = file\n\t\t\t\tdefer func() { err = file.Close() }()\n\t\t\t}\n\n\t\t\tif versionStr == \"\" {\n\t\t\t\tversionStr = fmt.Sprintf(\"%v@%v-%v-%v\",\n\t\t\t\t\tversion.Info.User,\n\t\t\t\t\tversion.Info.Host,\n\t\t\t\t\tversion.Info.Version,\n\t\t\t\t\tversion.Info.GitRevision)\n\t\t\t}\n\t\t\tparams := &inject.Params{\n\t\t\t\tInitImage: inject.InitImageName(hub, tag),\n\t\t\t\tRuntimeImage: inject.RuntimeImageName(hub, tag),\n\t\t\t\tRuntimeVerbosity: runtimeVerbosity,\n\t\t\t\tManagerAddr: managerAddr,\n\t\t\t\tMixerAddr: mixerAddr,\n\t\t\t\tSidecarProxyUID: sidecarProxyUID,\n\t\t\t\tSidecarProxyPort: sidecarProxyPort,\n\t\t\t\tVersion: versionStr,\n\t\t\t}\n\t\t\treturn inject.IntoResourceFile(params, reader, writer)\n\t\t},\n\t}\n)\n\nfunc init() {\n\tinjectCmd.PersistentFlags().StringVar(&hub, \"hub\",\n\t\tinject.DefaultHub, \"Docker hub\")\n\tinjectCmd.PersistentFlags().StringVar(&tag, \"tag\",\n\t\tinject.DefaultTag, \"Docker tag\")\n\tinjectCmd.PersistentFlags().StringVarP(&inFilename, \"filename\", \"f\",\n\t\t\"\", \"Input kubernetes resource filename\")\n\tinjectCmd.PersistentFlags().StringVarP(&outFilename, \"output\", \"o\",\n\t\t\"\", \"Modified output kubernetes resource filename\")\n\tinjectCmd.PersistentFlags().StringVar(&managerAddr, \"managerAddr\",\n\t\tinject.DefaultManagerAddr, \"Manager service DNS address\")\n\tinjectCmd.PersistentFlags().StringVar(&mixerAddr, \"mixerAddr\",\n\t\tinject.DefaultMixerAddr, \"Mixer DNS address\")\n\tinjectCmd.PersistentFlags().IntVar(&runtimeVerbosity, \"verbosity\",\n\t\tinject.DefaultRuntimeVerbosity, \"Runtime verbosity\")\n\tinjectCmd.PersistentFlags().Int64Var(&sidecarProxyUID, \"sidecarProxyUID\",\n\t\tinject.DefaultSidecarProxyUID, \"Sidecar proxy UID\")\n\tinjectCmd.PersistentFlags().IntVar(&sidecarProxyPort, \"sidecarProxyPort\",\n\t\tinject.DefaultSidecarProxyPort, \"Sidecar proxy Port\")\n\tinjectCmd.PersistentFlags().StringVar(&versionStr, \"setVersionString\",\n\t\t\"\", \"Override version info injected into resource\")\n\tcmd.RootCmd.AddCommand(injectCmd)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\t\"net\/http\"\n\t\"runtime\"\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\nvar rc *rateCalculator\n\nfunc statusService(addr string) {\n\trc = newRateCalculator(360, 10*time.Second, &bytesProxied)\n\n\thttp.HandleFunc(\"\/status\", getStatus)\n\tif err := http.ListenAndServe(addr, nil); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc getStatus(w http.ResponseWriter, r *http.Request) {\n\tstatus := make(map[string]interface{})\n\n\tsessionMut.Lock()\n\t\/\/ This can potentially be double the number of pending sessions, as each session has two keys, one for each side.\n\tstatus[\"startTime\"] = rc.startTime\n\tstatus[\"uptime\"] = time.Since(rc.startTime)\n\tstatus[\"numPendingSessionKeys\"] = len(pendingSessions)\n\tstatus[\"numActiveSessions\"] = len(activeSessions)\n\tsessionMut.Unlock()\n\tstatus[\"numConnections\"] = atomic.LoadInt64(&numConnections)\n\tstatus[\"numProxies\"] = atomic.LoadInt64(&numProxies)\n\tstatus[\"bytesProxied\"] = atomic.LoadInt64(&bytesProxied)\n\tstatus[\"goVersion\"] = runtime.Version()\n\tstatus[\"goOS\"] = runtime.GOOS\n\tstatus[\"goAarch\"] = runtime.GOARCH\n\tstatus[\"goMaxProcs\"] = runtime.GOMAXPROCS(-1)\n\tstatus[\"kbps10s1m5m15m30m60m\"] = []int64{\n\t\trc.rate(10\/10) * 8 \/ 1000,\n\t\trc.rate(60\/10) * 8 \/ 1000,\n\t\trc.rate(5*60\/10) * 8 \/ 1000,\n\t\trc.rate(15*60\/10) * 8 \/ 1000,\n\t\trc.rate(30*60\/10) * 8 \/ 1000,\n\t\trc.rate(60*60\/10) * 8 \/ 1000,\n\t}\n\tstatus[\"options\"] = map[string]interface{}{\n\t\t\"network-timeout\": networkTimeout,\n\t\t\"ping-interval\": pingInterval,\n\t\t\"message-timeout\": messageTimeout,\n\t\t\"per-session-rate\": sessionLimitBps,\n\t\t\"global-rate\": globalLimitBps,\n\t\t\"pools\": defaultPoolAddrs,\n\t}\n\n\tbs, err := json.MarshalIndent(status, \"\", \" \")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(bs)\n}\n\ntype rateCalculator struct {\n\trates []int64\n\tprev int64\n\tcounter *int64\n\tstartTime time.Time\n}\n\nfunc newRateCalculator(keepIntervals int, interval time.Duration, counter *int64) *rateCalculator {\n\tr := &rateCalculator{\n\t\trates: make([]int64, keepIntervals),\n\t\tcounter: counter,\n\t\tstartTime: time.Now(),\n\t}\n\n\tgo r.updateRates(interval)\n\n\treturn r\n}\n\nfunc (r *rateCalculator) updateRates(interval time.Duration) {\n\tfor {\n\t\tnow := time.Now()\n\t\tnext := now.Truncate(interval).Add(interval)\n\t\ttime.Sleep(next.Sub(now))\n\n\t\tcur := atomic.LoadInt64(r.counter)\n\t\trate := int64(float64(cur-r.prev) \/ interval.Seconds())\n\t\tcopy(r.rates[1:], r.rates)\n\t\tr.rates[0] = rate\n\t\tr.prev = cur\n\t}\n}\n\nfunc (r *rateCalculator) rate(periods int) int64 {\n\tvar tot int64\n\tfor i := 0; i < periods; i++ {\n\t\ttot += r.rates[i]\n\t}\n\treturn tot \/ int64(periods)\n}\n<commit_msg>Expose provided by in status endpoint<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\t\"net\/http\"\n\t\"runtime\"\n\t\"sync\/atomic\"\n\t\"time\"\n)\n\nvar rc *rateCalculator\n\nfunc statusService(addr string) {\n\trc = newRateCalculator(360, 10*time.Second, &bytesProxied)\n\n\thttp.HandleFunc(\"\/status\", getStatus)\n\tif err := http.ListenAndServe(addr, nil); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc getStatus(w http.ResponseWriter, r *http.Request) {\n\tstatus := make(map[string]interface{})\n\n\tsessionMut.Lock()\n\t\/\/ This can potentially be double the number of pending sessions, as each session has two keys, one for each side.\n\tstatus[\"startTime\"] = rc.startTime\n\tstatus[\"uptime\"] = time.Since(rc.startTime)\n\tstatus[\"numPendingSessionKeys\"] = len(pendingSessions)\n\tstatus[\"numActiveSessions\"] = len(activeSessions)\n\tsessionMut.Unlock()\n\tstatus[\"numConnections\"] = atomic.LoadInt64(&numConnections)\n\tstatus[\"numProxies\"] = atomic.LoadInt64(&numProxies)\n\tstatus[\"bytesProxied\"] = atomic.LoadInt64(&bytesProxied)\n\tstatus[\"goVersion\"] = runtime.Version()\n\tstatus[\"goOS\"] = runtime.GOOS\n\tstatus[\"goAarch\"] = runtime.GOARCH\n\tstatus[\"goMaxProcs\"] = runtime.GOMAXPROCS(-1)\n\tstatus[\"kbps10s1m5m15m30m60m\"] = []int64{\n\t\trc.rate(10\/10) * 8 \/ 1000,\n\t\trc.rate(60\/10) * 8 \/ 1000,\n\t\trc.rate(5*60\/10) * 8 \/ 1000,\n\t\trc.rate(15*60\/10) * 8 \/ 1000,\n\t\trc.rate(30*60\/10) * 8 \/ 1000,\n\t\trc.rate(60*60\/10) * 8 \/ 1000,\n\t}\n\tstatus[\"options\"] = map[string]interface{}{\n\t\t\"network-timeout\": networkTimeout,\n\t\t\"ping-interval\": pingInterval,\n\t\t\"message-timeout\": messageTimeout,\n\t\t\"per-session-rate\": sessionLimitBps,\n\t\t\"global-rate\": globalLimitBps,\n\t\t\"pools\": defaultPoolAddrs,\n\t\t\"provided-by\": providedBy,\n\t}\n\n\tbs, err := json.MarshalIndent(status, \"\", \" \")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(bs)\n}\n\ntype rateCalculator struct {\n\trates []int64\n\tprev int64\n\tcounter *int64\n\tstartTime time.Time\n}\n\nfunc newRateCalculator(keepIntervals int, interval time.Duration, counter *int64) *rateCalculator {\n\tr := &rateCalculator{\n\t\trates: make([]int64, keepIntervals),\n\t\tcounter: counter,\n\t\tstartTime: time.Now(),\n\t}\n\n\tgo r.updateRates(interval)\n\n\treturn r\n}\n\nfunc (r *rateCalculator) updateRates(interval time.Duration) {\n\tfor {\n\t\tnow := time.Now()\n\t\tnext := now.Truncate(interval).Add(interval)\n\t\ttime.Sleep(next.Sub(now))\n\n\t\tcur := atomic.LoadInt64(r.counter)\n\t\trate := int64(float64(cur-r.prev) \/ interval.Seconds())\n\t\tcopy(r.rates[1:], r.rates)\n\t\tr.rates[0] = rate\n\t\tr.prev = cur\n\t}\n}\n\nfunc (r *rateCalculator) rate(periods int) int64 {\n\tvar tot int64\n\tfor i := 0; i < periods; i++ {\n\t\ttot += r.rates[i]\n\t}\n\treturn tot \/ int64(periods)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2015-2022 MinIO, Inc.\n\/\/\n\/\/ This file is part of MinIO Object Storage stack\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Affero General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/\n\/\/ This program is distributed in the hope that it will be useful\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Affero General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU Affero General Public License\n\/\/ along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/charmbracelet\/bubbles\/spinner\"\n\ttea \"github.com\/charmbracelet\/bubbletea\"\n\t\"github.com\/charmbracelet\/lipgloss\"\n\t\"github.com\/dustin\/go-humanize\"\n\t\"github.com\/minio\/madmin-go\"\n\t\"github.com\/olekukonko\/tablewriter\"\n)\n\n\/\/ TODO: Add ART (Average Response Time) latency\ntype topAPIStats struct {\n\tTotalCalls uint64\n\tTotalBytesRX uint64\n\tTotalBytesTX uint64\n}\n\nfunc (s *topAPIStats) addAPICall(n int) {\n\tatomic.AddUint64(&s.TotalCalls, uint64(n))\n}\n\nfunc (s *topAPIStats) addAPIBytesRX(n int) {\n\tatomic.AddUint64(&s.TotalBytesRX, uint64(n))\n}\n\nfunc (s *topAPIStats) addAPIBytesTX(n int) {\n\tatomic.AddUint64(&s.TotalBytesTX, uint64(n))\n}\n\nfunc (s *topAPIStats) loadAPICall() uint64 {\n\treturn atomic.LoadUint64(&s.TotalCalls)\n}\n\nfunc (s *topAPIStats) loadAPIBytesRX() uint64 {\n\treturn atomic.LoadUint64(&s.TotalBytesRX)\n}\n\nfunc (s *topAPIStats) loadAPIBytesTX() uint64 {\n\treturn atomic.LoadUint64(&s.TotalBytesTX)\n}\n\ntype traceUI struct {\n\tspinner spinner.Model\n\tquitting bool\n\tstartTime time.Time\n\tresult topAPIResult\n\tlastResult topAPIResult\n\tapiStatsMap map[string]*topAPIStats\n}\n\ntype topAPIResult struct {\n\tfinal bool\n\tapiCallInfo madmin.ServiceTraceInfo\n}\n\nfunc initTraceUI() *traceUI {\n\ts := spinner.New()\n\ts.Spinner = spinner.Points\n\ts.Style = lipgloss.NewStyle().Foreground(lipgloss.Color(\"205\"))\n\treturn &traceUI{\n\t\tspinner: s,\n\t\tapiStatsMap: make(map[string]*topAPIStats),\n\t}\n}\n\nfunc (m *traceUI) Init() tea.Cmd {\n\treturn m.spinner.Tick\n}\n\nfunc (m *traceUI) Update(msg tea.Msg) (tea.Model, tea.Cmd) {\n\tswitch msg := msg.(type) {\n\tcase tea.KeyMsg:\n\t\tswitch msg.String() {\n\t\tcase \"ctrl+c\":\n\t\t\tm.quitting = true\n\t\t\treturn m, tea.Quit\n\t\tdefault:\n\t\t\treturn m, nil\n\t\t}\n\tcase topAPIResult:\n\t\tm.result = msg\n\t\tif m.result.apiCallInfo.Trace.FuncName != \"\" {\n\t\t\tm.lastResult = m.result\n\t\t}\n\t\tif msg.final {\n\t\t\tm.quitting = true\n\t\t\treturn m, tea.Quit\n\t\t}\n\t\treturn m, nil\n\n\tcase spinner.TickMsg:\n\t\tvar cmd tea.Cmd\n\t\tm.spinner, cmd = m.spinner.Update(msg)\n\t\treturn m, cmd\n\tdefault:\n\t\treturn m, nil\n\t}\n}\n\nfunc (m *traceUI) View() string {\n\tvar s strings.Builder\n\ts.WriteString(\"\\n\")\n\n\t\/\/ Set table header\n\ttable := tablewriter.NewWriter(&s)\n\ttable.SetAutoWrapText(false)\n\ttable.SetAutoFormatHeaders(true)\n\ttable.SetHeaderAlignment(tablewriter.ALIGN_LEFT)\n\ttable.SetAlignment(tablewriter.ALIGN_LEFT)\n\ttable.SetCenterSeparator(\"\")\n\ttable.SetColumnSeparator(\"\")\n\ttable.SetRowSeparator(\"\")\n\ttable.SetHeaderLine(false)\n\ttable.SetBorder(false)\n\ttable.SetTablePadding(\"\\t\") \/\/ pad with tabs\n\ttable.SetNoWhiteSpace(true)\n\n\tres := m.result.apiCallInfo\n\tif m.startTime.IsZero() && !res.Trace.Time.IsZero() {\n\t\tm.startTime = res.Trace.Time\n\t}\n\tif res.Trace.FuncName != \"\" && res.Trace.FuncName != \"errorResponseHandler\" {\n\t\ttraceSt, ok := m.apiStatsMap[res.Trace.FuncName]\n\t\tif !ok {\n\t\t\ttraceSt = &topAPIStats{}\n\t\t}\n\t\ttraceSt.addAPICall(1)\n\t\tif res.Trace.HTTP != nil {\n\t\t\ttraceSt.addAPIBytesRX(res.Trace.HTTP.CallStats.InputBytes)\n\t\t\ttraceSt.addAPIBytesTX(res.Trace.HTTP.CallStats.OutputBytes)\n\t\t}\n\t\tm.apiStatsMap[res.Trace.FuncName] = traceSt\n\t}\n\n\ttable.SetHeader([]string{\"API\", \"CALLS\", \"RX\", \"TX\"})\n\tdata := make([][]string, 0, len(m.apiStatsMap))\n\n\tfor k, stats := range m.apiStatsMap {\n\t\tdata = append(data, []string{\n\t\t\tk,\n\t\t\twhiteStyle.Render(fmt.Sprintf(\"%d\", stats.loadAPICall())),\n\t\t\twhiteStyle.Render(humanize.IBytes(stats.loadAPIBytesRX())),\n\t\t\twhiteStyle.Render(humanize.IBytes(stats.loadAPIBytesTX())),\n\t\t})\n\t}\n\tsort.Slice(data, func(i, j int) bool {\n\t\treturn data[i][0] < data[j][0]\n\t})\n\n\ttable.AppendBulk(data)\n\ttable.Render()\n\n\tif !m.quitting {\n\t\ts.WriteString(fmt.Sprintf(\"\\nTopAPI: %s\", m.spinner.View()))\n\t} else {\n\t\tvar totalTX, totalRX, totalCalls uint64\n\t\tlastReqTime := m.lastResult.apiCallInfo.Trace.Time\n\t\tif m.lastResult.apiCallInfo.Trace.Time.IsZero() {\n\t\t\tlastReqTime = time.Now()\n\t\t}\n\t\tfor _, stats := range m.apiStatsMap {\n\t\t\ttotalRX += stats.loadAPIBytesRX()\n\t\t\ttotalTX += stats.loadAPIBytesTX()\n\t\t\ttotalCalls += stats.loadAPICall()\n\t\t}\n\t\tmsg := fmt.Sprintf(\"\\n========\\nTotal: %d CALLS, %s RX, %s TX - in %.02fs\",\n\t\t\ttotalCalls,\n\t\t\thumanize.IBytes(totalRX),\n\t\t\thumanize.IBytes(totalTX),\n\t\t\tlastReqTime.Sub(m.startTime).Seconds(),\n\t\t)\n\t\ts.WriteString(msg)\n\t\ts.WriteString(\"\\n\")\n\t}\n\treturn s.String()\n}\n<commit_msg>add errors to admin top api command (#4176)<commit_after>\/\/ Copyright (c) 2015-2022 MinIO, Inc.\n\/\/\n\/\/ This file is part of MinIO Object Storage stack\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU Affero General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/\n\/\/ This program is distributed in the hope that it will be useful\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU Affero General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU Affero General Public License\n\/\/ along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/charmbracelet\/bubbles\/spinner\"\n\ttea \"github.com\/charmbracelet\/bubbletea\"\n\t\"github.com\/charmbracelet\/lipgloss\"\n\t\"github.com\/dustin\/go-humanize\"\n\t\"github.com\/minio\/madmin-go\"\n\t\"github.com\/olekukonko\/tablewriter\"\n)\n\n\/\/ TODO: Add ART (Average Response Time) latency\ntype topAPIStats struct {\n\tTotalCalls uint64\n\tTotalBytesRX uint64\n\tTotalBytesTX uint64\n\tTotalErrors uint64\n}\n\nfunc (s *topAPIStats) addAPICall(n int) {\n\tatomic.AddUint64(&s.TotalCalls, uint64(n))\n}\n\nfunc (s *topAPIStats) addAPIBytesRX(n int) {\n\tatomic.AddUint64(&s.TotalBytesRX, uint64(n))\n}\n\nfunc (s *topAPIStats) addAPIBytesTX(n int) {\n\tatomic.AddUint64(&s.TotalBytesTX, uint64(n))\n}\n\nfunc (s *topAPIStats) addAPIErrors(n int) {\n\tatomic.AddUint64(&s.TotalErrors, uint64(n))\n}\n\nfunc (s *topAPIStats) loadAPICall() uint64 {\n\treturn atomic.LoadUint64(&s.TotalCalls)\n}\n\nfunc (s *topAPIStats) loadAPIBytesRX() uint64 {\n\treturn atomic.LoadUint64(&s.TotalBytesRX)\n}\n\nfunc (s *topAPIStats) loadAPIBytesTX() uint64 {\n\treturn atomic.LoadUint64(&s.TotalBytesTX)\n}\n\nfunc (s *topAPIStats) loadAPIErrors() uint64 {\n\treturn atomic.LoadUint64(&s.TotalErrors)\n}\n\ntype traceUI struct {\n\tspinner spinner.Model\n\tquitting bool\n\tstartTime time.Time\n\tresult topAPIResult\n\tlastResult topAPIResult\n\tapiStatsMap map[string]*topAPIStats\n}\n\ntype topAPIResult struct {\n\tfinal bool\n\tapiCallInfo madmin.ServiceTraceInfo\n}\n\nfunc initTraceUI() *traceUI {\n\ts := spinner.New()\n\ts.Spinner = spinner.Points\n\ts.Style = lipgloss.NewStyle().Foreground(lipgloss.Color(\"205\"))\n\treturn &traceUI{\n\t\tspinner: s,\n\t\tapiStatsMap: make(map[string]*topAPIStats),\n\t}\n}\n\nfunc (m *traceUI) Init() tea.Cmd {\n\treturn m.spinner.Tick\n}\n\nfunc (m *traceUI) Update(msg tea.Msg) (tea.Model, tea.Cmd) {\n\tswitch msg := msg.(type) {\n\tcase tea.KeyMsg:\n\t\tswitch msg.String() {\n\t\tcase \"ctrl+c\":\n\t\t\tm.quitting = true\n\t\t\treturn m, tea.Quit\n\t\tdefault:\n\t\t\treturn m, nil\n\t\t}\n\tcase topAPIResult:\n\t\tm.result = msg\n\t\tif m.result.apiCallInfo.Trace.FuncName != \"\" {\n\t\t\tm.lastResult = m.result\n\t\t}\n\t\tif msg.final {\n\t\t\tm.quitting = true\n\t\t\treturn m, tea.Quit\n\t\t}\n\t\treturn m, nil\n\n\tcase spinner.TickMsg:\n\t\tvar cmd tea.Cmd\n\t\tm.spinner, cmd = m.spinner.Update(msg)\n\t\treturn m, cmd\n\tdefault:\n\t\treturn m, nil\n\t}\n}\n\nfunc (m *traceUI) View() string {\n\tvar s strings.Builder\n\ts.WriteString(\"\\n\")\n\n\t\/\/ Set table header\n\ttable := tablewriter.NewWriter(&s)\n\ttable.SetAutoWrapText(false)\n\ttable.SetAutoFormatHeaders(true)\n\ttable.SetHeaderAlignment(tablewriter.ALIGN_LEFT)\n\ttable.SetAlignment(tablewriter.ALIGN_LEFT)\n\ttable.SetCenterSeparator(\"\")\n\ttable.SetColumnSeparator(\"\")\n\ttable.SetRowSeparator(\"\")\n\ttable.SetHeaderLine(false)\n\ttable.SetBorder(false)\n\ttable.SetTablePadding(\"\\t\") \/\/ pad with tabs\n\ttable.SetNoWhiteSpace(true)\n\n\tres := m.result.apiCallInfo\n\tif m.startTime.IsZero() && !res.Trace.Time.IsZero() {\n\t\tm.startTime = res.Trace.Time\n\t}\n\tif res.Trace.FuncName != \"\" && res.Trace.FuncName != \"errorResponseHandler\" {\n\t\ttraceSt, ok := m.apiStatsMap[res.Trace.FuncName]\n\t\tif !ok {\n\t\t\ttraceSt = &topAPIStats{}\n\t\t}\n\t\ttraceSt.addAPICall(1)\n\t\tif res.Trace.HTTP != nil {\n\t\t\ttraceSt.addAPIBytesRX(res.Trace.HTTP.CallStats.InputBytes)\n\t\t\ttraceSt.addAPIBytesTX(res.Trace.HTTP.CallStats.OutputBytes)\n\t\t}\n\t\tif res.Trace.HTTP.RespInfo.StatusCode >= 499 {\n\t\t\ttraceSt.addAPIErrors(1)\n\t\t}\n\t\tm.apiStatsMap[res.Trace.FuncName] = traceSt\n\t}\n\n\ttable.SetHeader([]string{\"API\", \"RX\", \"TX\", \"CALLS\", \"ERRORS\"})\n\tdata := make([][]string, 0, len(m.apiStatsMap))\n\n\tfor k, stats := range m.apiStatsMap {\n\t\tdata = append(data, []string{\n\t\t\tk,\n\t\t\twhiteStyle.Render(humanize.IBytes(stats.loadAPIBytesRX())),\n\t\t\twhiteStyle.Render(humanize.IBytes(stats.loadAPIBytesTX())),\n\t\t\twhiteStyle.Render(fmt.Sprintf(\"%d\", stats.loadAPICall())),\n\t\t\twhiteStyle.Render(fmt.Sprintf(\"%d\", stats.loadAPIErrors())),\n\t\t})\n\t}\n\tsort.Slice(data, func(i, j int) bool {\n\t\treturn data[i][0] < data[j][0]\n\t})\n\n\ttable.AppendBulk(data)\n\ttable.Render()\n\n\tif !m.quitting {\n\t\ts.WriteString(fmt.Sprintf(\"\\nTopAPI: %s\", m.spinner.View()))\n\t} else {\n\t\tvar totalTX, totalRX, totalCalls uint64\n\t\tlastReqTime := m.lastResult.apiCallInfo.Trace.Time\n\t\tif m.lastResult.apiCallInfo.Trace.Time.IsZero() {\n\t\t\tlastReqTime = time.Now()\n\t\t}\n\t\tfor _, stats := range m.apiStatsMap {\n\t\t\ttotalRX += stats.loadAPIBytesRX()\n\t\t\ttotalTX += stats.loadAPIBytesTX()\n\t\t\ttotalCalls += stats.loadAPICall()\n\t\t}\n\t\tmsg := fmt.Sprintf(\"\\n========\\nTotal: %d CALLS, %s RX, %s TX - in %.02fs\",\n\t\t\ttotalCalls,\n\t\t\thumanize.IBytes(totalRX),\n\t\t\thumanize.IBytes(totalTX),\n\t\t\tlastReqTime.Sub(m.startTime).Seconds(),\n\t\t)\n\t\ts.WriteString(msg)\n\t\ts.WriteString(\"\\n\")\n\t}\n\treturn s.String()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/andrewcopp\/coup\"\n)\n\nfunc init() {\n\n}\n\nfunc main() {\n\n\twins := 0\n\tlosses := 0\n\tfor i := 0; i < 1; i++ {\n\t\tvar chooser coup.Chooser\n\t\tchooser = coup.NewAgent(i, 0.8)\n\t\tone := coup.NewPlayer(\"Player One\", chooser, false)\n\t\tfor j := 0; j < 2000; j++ {\n\t\t\tchooser = coup.NewRandom()\n\t\t\ttwo := coup.NewPlayer(\"Player Two\", chooser, false)\n\t\t\tthree := coup.NewPlayer(\"Player Three\", chooser, true)\n\t\t\tfour := coup.NewPlayer(\"Player Four\", chooser, true)\n\t\t\tfive := coup.NewPlayer(\"Player Five\", chooser, true)\n\n\t\t\tgame := coup.NewGame([]*coup.Player{one, two, three, four, five})\n\n\t\t\tgame.Setup()\n\t\t\twinner := game.Play()\n\t\t\tif winner == one {\n\t\t\t\twins++\n\t\t\t} else {\n\t\t\t\tlosses++\n\t\t\t}\n\t\t}\n\t}\n\n\tfmt.Println()\n\tfmt.Println(float64(wins)\/float64((wins+losses))*100.0, \"%\")\n\tfmt.Println()\n\n}\n<commit_msg>Properly Training (Minus TensorFlow)<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"time\"\n\n\t\"github.com\/andrewcopp\/coup\"\n)\n\nfunc init() {\n\n}\n\nfunc main() {\n\n\tfor i := 0; i < 5; i++ {\n\t\twins := 0\n\t\tlosses := 0\n\t\tvar chooser coup.Chooser\n\t\tchooser = coup.NewAgent(i, 0.8)\n\t\tone := coup.NewPlayer(\"Player One\", chooser, false)\n\t\tfor j := 0; j < 1000; j++ {\n\t\t\trand.Seed(time.Now().UnixNano())\n\t\t\tif i > 0 {\n\t\t\t\tchooser = coup.NewAgent(rand.Intn(i), 0.0)\n\t\t\t} else {\n\t\t\t\tchooser = coup.NewRandom()\n\t\t\t}\n\t\t\ttwo := coup.NewPlayer(\"Player Two\", chooser, false)\n\t\t\tchooser = coup.NewRandom()\n\t\t\tthree := coup.NewPlayer(\"Player Three\", chooser, true)\n\t\t\tfour := coup.NewPlayer(\"Player Four\", chooser, true)\n\t\t\tfive := coup.NewPlayer(\"Player Five\", chooser, true)\n\n\t\t\tgame := coup.NewGame([]*coup.Player{one, two, three, four, five})\n\n\t\t\tgame.Setup()\n\t\t\twinner := game.Play()\n\t\t\tif winner == one {\n\t\t\t\twins++\n\t\t\t} else {\n\t\t\t\tlosses++\n\t\t\t}\n\t\t}\n\t\tfmt.Println()\n\t\tfmt.Println(float64(wins)\/float64((wins+losses))*100.0, \"%\")\n\t\tfmt.Println()\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/datawire\/ambassador\/pkg\/consulwatch\"\n\t\"github.com\/datawire\/ambassador\/pkg\/limiter\"\n\t\"github.com\/datawire\/ambassador\/pkg\/watt\"\n\n\t\"github.com\/datawire\/ambassador\/pkg\/k8s\"\n\t\"github.com\/datawire\/ambassador\/pkg\/supervisor\"\n)\n\ntype WatchHook func(p *supervisor.Process, snapshot string) WatchSet\n\ntype aggregator struct {\n\t\/\/ Input channel used to tell us about kubernetes state.\n\tKubernetesEvents chan k8sEvent\n\t\/\/ Input channel used to tell us about consul endpoints.\n\tConsulEvents chan consulEvent\n\t\/\/ Output channel used to communicate with the k8s watch manager.\n\tk8sWatches chan<- []KubernetesWatchSpec\n\t\/\/ Output channel used to communicate with the consul watch manager.\n\tconsulWatches chan<- []ConsulWatchSpec\n\t\/\/ Output channel used to communicate with the invoker.\n\tsnapshots chan<- string\n\t\/\/ We won't consider ourselves \"bootstrapped\" until we hear\n\t\/\/ about all these kinds.\n\trequiredKinds []string\n\twatchHook WatchHook\n\tlimiter limiter.Limiter\n\tids map[string]bool\n\tkubernetesResources map[string]map[string][]k8s.Resource\n\tconsulEndpoints map[string]consulwatch.Endpoints\n\tbootstrapped bool\n\tnotifyMux sync.Mutex\n\terrors map[string][]watt.Error\n}\n\nfunc NewAggregator(snapshots chan<- string, k8sWatches chan<- []KubernetesWatchSpec, consulWatches chan<- []ConsulWatchSpec,\n\trequiredKinds []string, watchHook WatchHook, limiter limiter.Limiter) *aggregator {\n\treturn &aggregator{\n\t\tKubernetesEvents: make(chan k8sEvent),\n\t\tConsulEvents: make(chan consulEvent),\n\t\tk8sWatches: k8sWatches,\n\t\tconsulWatches: consulWatches,\n\t\tsnapshots: snapshots,\n\t\trequiredKinds: requiredKinds,\n\t\twatchHook: watchHook,\n\t\tlimiter: limiter,\n\t\tids: make(map[string]bool),\n\t\tkubernetesResources: make(map[string]map[string][]k8s.Resource),\n\t\tconsulEndpoints: make(map[string]consulwatch.Endpoints),\n\t\terrors: make(map[string][]watt.Error),\n\t}\n}\n\nfunc (a *aggregator) Work(p *supervisor.Process) error {\n\t\/\/ In order to invoke `maybeNotify`, which is a very time consuming\n\t\/\/ operation, we coalesce events:\n\t\/\/\n\t\/\/ 1. Be continuously reading all available events from\n\t\/\/ a.KubernetesEvents and a.ConsulEvents and store k8sEvents\n\t\/\/ in the potentialKubernetesEventSignal variable. This means\n\t\/\/ at any given point (modulo caveats below), the\n\t\/\/ potentialKubernetesEventSignal variable will have the\n\t\/\/ latest Kubernetes event available.\n\t\/\/\n\t\/\/ 2. At the same time, whenever there is capacity to write\n\t\/\/ down the kubernetesEventProcessor channel, we send\n\t\/\/ potentialKubernetesEventSignal to be processed.\n\t\/\/\n\t\/\/ The anonymous goroutine below will be constantly reading\n\t\/\/ from the kubernetesEventProcessor channel and performing\n\t\/\/ a blocking a.maybeNotify(). This means that we can only\n\t\/\/ *write* to the kubernetesEventProcessor channel when we are\n\t\/\/ not currently processing an event, but when that happens, we\n\t\/\/ will still read from a.KubernetesEvents and a.ConsulEvents\n\t\/\/ and update potentialKubernetesEventSignal.\n\t\/\/\n\t\/\/ There are three caveats to the above:\n\t\/\/\n\t\/\/ 1. At startup, we don't yet have a event to write, but\n\t\/\/ we're not processing anything, so we will try to write\n\t\/\/ something down the kubernetesEventProcessor channel.\n\t\/\/ To cope with this, the invoking goroutine will ignore events\n\t\/\/ signals that have a event.skip flag.\n\t\/\/\n\t\/\/ 2. If we process an event quickly, or if there aren't new\n\t\/\/ events available, then we end up busy looping and\n\t\/\/ sending the same potentialKubernetesEventSignal value down\n\t\/\/ the kubernetesEventProcessor channel multiple times. To cope\n\t\/\/ with this, whenever we have successfully written to the\n\t\/\/ kubernetesEventProcessor channel, we do a *blocking* read of\n\t\/\/ the next event from a.KubernetesEvents and a.ConsulEvents.\n\t\/\/\n\t\/\/ 3. Always be calling a.setKubernetesResources as soon as we\n\t\/\/ receive an event. This is a fast non-blocking call that\n\t\/\/ update watches, we can't coalesce this call.\n\n\tp.Ready()\n\n\ttype eventSignal struct {\n\t\tkubernetesEvent k8sEvent\n\t\tskip bool\n\t}\n\n\tkubernetesEventProcessor := make(chan eventSignal)\n\tgo func() {\n\t\tfor event := range kubernetesEventProcessor {\n\t\t\tif event.skip {\n\t\t\t\t\/\/ ignore the initial eventSignal to deal with the\n\t\t\t\t\/\/ corner case where we haven't yet received an event yet.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ta.maybeNotify(p)\n\t\t}\n\t}()\n\n\tpotentialKubernetesEventSignal := eventSignal{kubernetesEvent: k8sEvent{}, skip: true}\n\tfor {\n\t\tselect {\n\t\tcase potentialKubernetesEvent := <-a.KubernetesEvents:\n\t\t\t\/\/ if a new KubernetesEvents is available to be read,\n\t\t\t\/\/ and we can't write to the kubernetesEventProcessor channel,\n\t\t\t\/\/ then we will overwrite potentialKubernetesEvent\n\t\t\t\/\/ with a newer event while still processing a.setKubernetesResources\n\t\t\ta.setKubernetesResources(potentialKubernetesEvent)\n\t\t\tpotentialKubernetesEventSignal = eventSignal{kubernetesEvent: potentialKubernetesEvent, skip: false}\n\t\tcase kubernetesEventProcessor <- potentialKubernetesEventSignal:\n\t\t\t\/\/ if we aren't currently blocked in\n\t\t\t\/\/ a.maybeNotify() then the above goroutine will be\n\t\t\t\/\/ reading from the kubernetesEventProcessor channel and we\n\t\t\t\/\/ will send the current potentialKubernetesEventSignal\n\t\t\t\/\/ value over the kubernetesEventProcessor channel to be\n\t\t\t\/\/ processed\n\t\t\tselect {\n\t\t\tcase potentialKubernetesEvent := <-a.KubernetesEvents:\n\t\t\t\ta.setKubernetesResources(potentialKubernetesEvent)\n\t\t\t\tpotentialKubernetesEventSignal = eventSignal{kubernetesEvent: potentialKubernetesEvent, skip: false}\n\t\t\tcase event := <-a.ConsulEvents:\n\t\t\t\ta.updateConsulResources(event)\n\t\t\t\ta.maybeNotify(p)\n\t\t\tcase <-p.Shutdown():\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase event := <-a.ConsulEvents:\n\t\t\t\/\/ we are always reading and processing ConsulEvents directly,\n\t\t\t\/\/ not coalescing them.\n\t\t\ta.updateConsulResources(event)\n\t\t\ta.maybeNotify(p)\n\t\tcase <-p.Shutdown():\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (a *aggregator) updateConsulResources(event consulEvent) {\n\ta.ids[event.WatchId] = true\n\ta.consulEndpoints[event.Endpoints.Service] = event.Endpoints\n}\n\nfunc (a *aggregator) setKubernetesResources(event k8sEvent) {\n\tif len(event.errors) > 0 {\n\t\tfor _, kError := range event.errors {\n\t\t\ta.errors[kError.Source] = append(a.errors[kError.Source], kError)\n\t\t}\n\t\treturn\n\t}\n\ta.ids[event.watchId] = true\n\tsubmap, ok := a.kubernetesResources[event.watchId]\n\tif !ok {\n\t\tsubmap = make(map[string][]k8s.Resource)\n\t\ta.kubernetesResources[event.watchId] = submap\n\t}\n\tsubmap[event.kind] = event.resources\n}\n\nfunc (a *aggregator) generateSnapshot() (string, error) {\n\tk8sResources := make(map[string][]k8s.Resource)\n\tfor _, submap := range a.kubernetesResources {\n\t\tfor k, v := range submap {\n\t\t\tk8sResources[k] = append(k8sResources[k], v...)\n\t\t}\n\t}\n\ts := watt.Snapshot{\n\t\tConsul: watt.ConsulSnapshot{Endpoints: a.consulEndpoints},\n\t\tKubernetes: k8sResources,\n\t\tErrors: a.errors,\n\t}\n\n\tjsonBytes, err := json.MarshalIndent(s, \"\", \" \")\n\tif err != nil {\n\t\treturn \"{}\", err\n\t}\n\n\treturn string(jsonBytes), nil\n}\n\nfunc (a *aggregator) isKubernetesBootstrapped(p *supervisor.Process) bool {\n\tsubmap, sok := a.kubernetesResources[\"\"]\n\tif !sok {\n\t\treturn false\n\t}\n\tfor _, k := range a.requiredKinds {\n\t\t_, ok := submap[k]\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ Returns true if the current state of the world is complete. The\n\/\/ kubernetes state of the world is always complete by definition\n\/\/ because the kubernetes client provides that guarantee. The\n\/\/ aggregate state of the world is complete when any consul services\n\/\/ referenced by kubernetes have populated endpoint information (even\n\/\/ if the value of the populated info is an empty set of endpoints).\nfunc (a *aggregator) isComplete(p *supervisor.Process, watchset WatchSet) bool {\n\tcomplete := true\n\n\tfor _, w := range watchset.KubernetesWatches {\n\t\tif _, ok := a.ids[w.WatchId()]; ok {\n\t\t\tp.Logf(\"initialized k8s watch: %s\", w.WatchId())\n\t\t} else {\n\t\t\tcomplete = false\n\t\t\tp.Logf(\"waiting for k8s watch: %s\", w.WatchId())\n\t\t}\n\t}\n\n\tfor _, w := range watchset.ConsulWatches {\n\t\tif _, ok := a.ids[w.WatchId()]; ok {\n\t\t\tp.Logf(\"initialized consul watch: %s\", w.WatchId())\n\t\t} else {\n\t\t\tcomplete = false\n\t\t\tp.Logf(\"waiting for consul watch: %s\", w.WatchId())\n\t\t}\n\t}\n\n\treturn complete\n}\n\nfunc (a *aggregator) maybeNotify(p *supervisor.Process) {\n\tnow := time.Now()\n\tdelay := a.limiter.Limit(now)\n\tif delay == 0 {\n\t\ta.notify(p)\n\t} else if delay > 0 {\n\t\ttime.AfterFunc(delay, func() {\n\t\t\ta.notify(p)\n\t\t})\n\t}\n}\n\nfunc (a *aggregator) notify(p *supervisor.Process) {\n\ta.notifyMux.Lock()\n\tdefer a.notifyMux.Unlock()\n\n\tif !a.isKubernetesBootstrapped(p) {\n\t\treturn\n\t}\n\n\twatchset := a.getWatches(p)\n\n\tp.Logf(\"found %d kubernetes watches\", len(watchset.KubernetesWatches))\n\tp.Logf(\"found %d consul watches\", len(watchset.ConsulWatches))\n\ta.k8sWatches <- watchset.KubernetesWatches\n\ta.consulWatches <- watchset.ConsulWatches\n\n\tif !a.bootstrapped && a.isComplete(p, watchset) {\n\t\tp.Logf(\"bootstrapped!\")\n\t\ta.bootstrapped = true\n\t}\n\n\tif a.bootstrapped {\n\t\tsnapshot, err := a.generateSnapshot()\n\t\tif err != nil {\n\t\t\tp.Logf(\"generate snapshot failed %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\ta.snapshots <- snapshot\n\t}\n}\n\nfunc (a *aggregator) getWatches(p *supervisor.Process) WatchSet {\n\tsnapshot, err := a.generateSnapshot()\n\tif err != nil {\n\t\tp.Logf(\"generate snapshot failed %v\", err)\n\t\treturn WatchSet{}\n\t}\n\tresult := a.watchHook(p, snapshot)\n\treturn result.interpolate()\n}\n\nfunc ExecWatchHook(watchHooks []string) WatchHook {\n\treturn func(p *supervisor.Process, snapshot string) WatchSet {\n\t\tresult := WatchSet{}\n\n\t\tfor _, hook := range watchHooks {\n\t\t\tws := invokeHook(p, hook, snapshot)\n\t\t\tresult.KubernetesWatches = append(result.KubernetesWatches, ws.KubernetesWatches...)\n\t\t\tresult.ConsulWatches = append(result.ConsulWatches, ws.ConsulWatches...)\n\t\t}\n\n\t\treturn result\n\t}\n}\n\nfunc lines(st string) []string {\n\treturn strings.Split(st, \"\\n\")\n}\n\nfunc invokeHook(p *supervisor.Process, hook, snapshot string) WatchSet {\n\tcmd := exec.Command(\"sh\", \"-c\", hook)\n\tcmd.Stdin = strings.NewReader(snapshot)\n\tvar watches, errors strings.Builder\n\tcmd.Stdout = &watches\n\tcmd.Stderr = &errors\n\terr := cmd.Run()\n\tstderr := errors.String()\n\tif stderr != \"\" {\n\t\tfor _, line := range lines(stderr) {\n\t\t\tp.Logf(\"watch hook stderr: %s\", line)\n\t\t}\n\t}\n\tif err != nil {\n\t\tp.Logf(\"watch hook failed: %v\", err)\n\t\treturn WatchSet{}\n\t}\n\n\tencoded := watches.String()\n\n\tdecoder := json.NewDecoder(strings.NewReader(encoded))\n\tdecoder.DisallowUnknownFields()\n\tresult := WatchSet{}\n\terr = decoder.Decode(&result)\n\tif err != nil {\n\t\tfor _, line := range lines(encoded) {\n\t\t\tp.Logf(\"watch hook: %s\", line)\n\t\t}\n\t\tp.Logf(\"watchset decode failed: %v\", err)\n\t\treturn WatchSet{}\n\t}\n\n\treturn result\n}\n<commit_msg>Added additional comment about blocking read<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/datawire\/ambassador\/pkg\/consulwatch\"\n\t\"github.com\/datawire\/ambassador\/pkg\/limiter\"\n\t\"github.com\/datawire\/ambassador\/pkg\/watt\"\n\n\t\"github.com\/datawire\/ambassador\/pkg\/k8s\"\n\t\"github.com\/datawire\/ambassador\/pkg\/supervisor\"\n)\n\ntype WatchHook func(p *supervisor.Process, snapshot string) WatchSet\n\ntype aggregator struct {\n\t\/\/ Input channel used to tell us about kubernetes state.\n\tKubernetesEvents chan k8sEvent\n\t\/\/ Input channel used to tell us about consul endpoints.\n\tConsulEvents chan consulEvent\n\t\/\/ Output channel used to communicate with the k8s watch manager.\n\tk8sWatches chan<- []KubernetesWatchSpec\n\t\/\/ Output channel used to communicate with the consul watch manager.\n\tconsulWatches chan<- []ConsulWatchSpec\n\t\/\/ Output channel used to communicate with the invoker.\n\tsnapshots chan<- string\n\t\/\/ We won't consider ourselves \"bootstrapped\" until we hear\n\t\/\/ about all these kinds.\n\trequiredKinds []string\n\twatchHook WatchHook\n\tlimiter limiter.Limiter\n\tids map[string]bool\n\tkubernetesResources map[string]map[string][]k8s.Resource\n\tconsulEndpoints map[string]consulwatch.Endpoints\n\tbootstrapped bool\n\tnotifyMux sync.Mutex\n\terrors map[string][]watt.Error\n}\n\nfunc NewAggregator(snapshots chan<- string, k8sWatches chan<- []KubernetesWatchSpec, consulWatches chan<- []ConsulWatchSpec,\n\trequiredKinds []string, watchHook WatchHook, limiter limiter.Limiter) *aggregator {\n\treturn &aggregator{\n\t\tKubernetesEvents: make(chan k8sEvent),\n\t\tConsulEvents: make(chan consulEvent),\n\t\tk8sWatches: k8sWatches,\n\t\tconsulWatches: consulWatches,\n\t\tsnapshots: snapshots,\n\t\trequiredKinds: requiredKinds,\n\t\twatchHook: watchHook,\n\t\tlimiter: limiter,\n\t\tids: make(map[string]bool),\n\t\tkubernetesResources: make(map[string]map[string][]k8s.Resource),\n\t\tconsulEndpoints: make(map[string]consulwatch.Endpoints),\n\t\terrors: make(map[string][]watt.Error),\n\t}\n}\n\nfunc (a *aggregator) Work(p *supervisor.Process) error {\n\t\/\/ In order to invoke `maybeNotify`, which is a very time consuming\n\t\/\/ operation, we coalesce events:\n\t\/\/\n\t\/\/ 1. Be continuously reading all available events from\n\t\/\/ a.KubernetesEvents and a.ConsulEvents and store k8sEvents\n\t\/\/ in the potentialKubernetesEventSignal variable. This means\n\t\/\/ at any given point (modulo caveats below), the\n\t\/\/ potentialKubernetesEventSignal variable will have the\n\t\/\/ latest Kubernetes event available.\n\t\/\/\n\t\/\/ 2. At the same time, whenever there is capacity to write\n\t\/\/ down the kubernetesEventProcessor channel, we send\n\t\/\/ potentialKubernetesEventSignal to be processed.\n\t\/\/\n\t\/\/ The anonymous goroutine below will be constantly reading\n\t\/\/ from the kubernetesEventProcessor channel and performing\n\t\/\/ a blocking a.maybeNotify(). This means that we can only\n\t\/\/ *write* to the kubernetesEventProcessor channel when we are\n\t\/\/ not currently processing an event, but when that happens, we\n\t\/\/ will still read from a.KubernetesEvents and a.ConsulEvents\n\t\/\/ and update potentialKubernetesEventSignal.\n\t\/\/\n\t\/\/ There are three caveats to the above:\n\t\/\/\n\t\/\/ 1. At startup, we don't yet have a event to write, but\n\t\/\/ we're not processing anything, so we will try to write\n\t\/\/ something down the kubernetesEventProcessor channel.\n\t\/\/ To cope with this, the invoking goroutine will ignore events\n\t\/\/ signals that have a event.skip flag.\n\t\/\/\n\t\/\/ 2. If we process an event quickly, or if there aren't new\n\t\/\/ events available, then we end up busy looping and\n\t\/\/ sending the same potentialKubernetesEventSignal value down\n\t\/\/ the kubernetesEventProcessor channel multiple times. To cope\n\t\/\/ with this, whenever we have successfully written to the\n\t\/\/ kubernetesEventProcessor channel, we do a *blocking* read of\n\t\/\/ the next event from a.KubernetesEvents and a.ConsulEvents.\n\t\/\/\n\t\/\/ 3. Always be calling a.setKubernetesResources as soon as we\n\t\/\/ receive an event. This is a fast non-blocking call that\n\t\/\/ update watches, we can't coalesce this call.\n\n\tp.Ready()\n\n\ttype eventSignal struct {\n\t\tkubernetesEvent k8sEvent\n\t\tskip bool\n\t}\n\n\tkubernetesEventProcessor := make(chan eventSignal)\n\tgo func() {\n\t\tfor event := range kubernetesEventProcessor {\n\t\t\tif event.skip {\n\t\t\t\t\/\/ ignore the initial eventSignal to deal with the\n\t\t\t\t\/\/ corner case where we haven't yet received an event yet.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ta.maybeNotify(p)\n\t\t}\n\t}()\n\n\tpotentialKubernetesEventSignal := eventSignal{kubernetesEvent: k8sEvent{}, skip: true}\n\tfor {\n\t\tselect {\n\t\tcase potentialKubernetesEvent := <-a.KubernetesEvents:\n\t\t\t\/\/ if a new KubernetesEvents is available to be read,\n\t\t\t\/\/ and we can't write to the kubernetesEventProcessor channel,\n\t\t\t\/\/ then we will overwrite potentialKubernetesEvent\n\t\t\t\/\/ with a newer event while still processing a.setKubernetesResources\n\t\t\ta.setKubernetesResources(potentialKubernetesEvent)\n\t\t\tpotentialKubernetesEventSignal = eventSignal{kubernetesEvent: potentialKubernetesEvent, skip: false}\n\t\tcase kubernetesEventProcessor <- potentialKubernetesEventSignal:\n\t\t\t\/\/ if we aren't currently blocked in\n\t\t\t\/\/ a.maybeNotify() then the above goroutine will be\n\t\t\t\/\/ reading from the kubernetesEventProcessor channel and we\n\t\t\t\/\/ will send the current potentialKubernetesEventSignal\n\t\t\t\/\/ value over the kubernetesEventProcessor channel to be\n\t\t\t\/\/ processed\n\t\t\tselect {\n\t\t\tcase potentialKubernetesEvent := <-a.KubernetesEvents:\n\t\t\t\t\/\/ here we do blocking read of the next event for caveat #2.\n\t\t\t\ta.setKubernetesResources(potentialKubernetesEvent)\n\t\t\t\tpotentialKubernetesEventSignal = eventSignal{kubernetesEvent: potentialKubernetesEvent, skip: false}\n\t\t\tcase event := <-a.ConsulEvents:\n\t\t\t\ta.updateConsulResources(event)\n\t\t\t\ta.maybeNotify(p)\n\t\t\tcase <-p.Shutdown():\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase event := <-a.ConsulEvents:\n\t\t\t\/\/ we are always reading and processing ConsulEvents directly,\n\t\t\t\/\/ not coalescing them.\n\t\t\ta.updateConsulResources(event)\n\t\t\ta.maybeNotify(p)\n\t\tcase <-p.Shutdown():\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (a *aggregator) updateConsulResources(event consulEvent) {\n\ta.ids[event.WatchId] = true\n\ta.consulEndpoints[event.Endpoints.Service] = event.Endpoints\n}\n\nfunc (a *aggregator) setKubernetesResources(event k8sEvent) {\n\tif len(event.errors) > 0 {\n\t\tfor _, kError := range event.errors {\n\t\t\ta.errors[kError.Source] = append(a.errors[kError.Source], kError)\n\t\t}\n\t\treturn\n\t}\n\ta.ids[event.watchId] = true\n\tsubmap, ok := a.kubernetesResources[event.watchId]\n\tif !ok {\n\t\tsubmap = make(map[string][]k8s.Resource)\n\t\ta.kubernetesResources[event.watchId] = submap\n\t}\n\tsubmap[event.kind] = event.resources\n}\n\nfunc (a *aggregator) generateSnapshot() (string, error) {\n\tk8sResources := make(map[string][]k8s.Resource)\n\tfor _, submap := range a.kubernetesResources {\n\t\tfor k, v := range submap {\n\t\t\tk8sResources[k] = append(k8sResources[k], v...)\n\t\t}\n\t}\n\ts := watt.Snapshot{\n\t\tConsul: watt.ConsulSnapshot{Endpoints: a.consulEndpoints},\n\t\tKubernetes: k8sResources,\n\t\tErrors: a.errors,\n\t}\n\n\tjsonBytes, err := json.MarshalIndent(s, \"\", \" \")\n\tif err != nil {\n\t\treturn \"{}\", err\n\t}\n\n\treturn string(jsonBytes), nil\n}\n\nfunc (a *aggregator) isKubernetesBootstrapped(p *supervisor.Process) bool {\n\tsubmap, sok := a.kubernetesResources[\"\"]\n\tif !sok {\n\t\treturn false\n\t}\n\tfor _, k := range a.requiredKinds {\n\t\t_, ok := submap[k]\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ Returns true if the current state of the world is complete. The\n\/\/ kubernetes state of the world is always complete by definition\n\/\/ because the kubernetes client provides that guarantee. The\n\/\/ aggregate state of the world is complete when any consul services\n\/\/ referenced by kubernetes have populated endpoint information (even\n\/\/ if the value of the populated info is an empty set of endpoints).\nfunc (a *aggregator) isComplete(p *supervisor.Process, watchset WatchSet) bool {\n\tcomplete := true\n\n\tfor _, w := range watchset.KubernetesWatches {\n\t\tif _, ok := a.ids[w.WatchId()]; ok {\n\t\t\tp.Logf(\"initialized k8s watch: %s\", w.WatchId())\n\t\t} else {\n\t\t\tcomplete = false\n\t\t\tp.Logf(\"waiting for k8s watch: %s\", w.WatchId())\n\t\t}\n\t}\n\n\tfor _, w := range watchset.ConsulWatches {\n\t\tif _, ok := a.ids[w.WatchId()]; ok {\n\t\t\tp.Logf(\"initialized consul watch: %s\", w.WatchId())\n\t\t} else {\n\t\t\tcomplete = false\n\t\t\tp.Logf(\"waiting for consul watch: %s\", w.WatchId())\n\t\t}\n\t}\n\n\treturn complete\n}\n\nfunc (a *aggregator) maybeNotify(p *supervisor.Process) {\n\tnow := time.Now()\n\tdelay := a.limiter.Limit(now)\n\tif delay == 0 {\n\t\ta.notify(p)\n\t} else if delay > 0 {\n\t\ttime.AfterFunc(delay, func() {\n\t\t\ta.notify(p)\n\t\t})\n\t}\n}\n\nfunc (a *aggregator) notify(p *supervisor.Process) {\n\ta.notifyMux.Lock()\n\tdefer a.notifyMux.Unlock()\n\n\tif !a.isKubernetesBootstrapped(p) {\n\t\treturn\n\t}\n\n\twatchset := a.getWatches(p)\n\n\tp.Logf(\"found %d kubernetes watches\", len(watchset.KubernetesWatches))\n\tp.Logf(\"found %d consul watches\", len(watchset.ConsulWatches))\n\ta.k8sWatches <- watchset.KubernetesWatches\n\ta.consulWatches <- watchset.ConsulWatches\n\n\tif !a.bootstrapped && a.isComplete(p, watchset) {\n\t\tp.Logf(\"bootstrapped!\")\n\t\ta.bootstrapped = true\n\t}\n\n\tif a.bootstrapped {\n\t\tsnapshot, err := a.generateSnapshot()\n\t\tif err != nil {\n\t\t\tp.Logf(\"generate snapshot failed %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\ta.snapshots <- snapshot\n\t}\n}\n\nfunc (a *aggregator) getWatches(p *supervisor.Process) WatchSet {\n\tsnapshot, err := a.generateSnapshot()\n\tif err != nil {\n\t\tp.Logf(\"generate snapshot failed %v\", err)\n\t\treturn WatchSet{}\n\t}\n\tresult := a.watchHook(p, snapshot)\n\treturn result.interpolate()\n}\n\nfunc ExecWatchHook(watchHooks []string) WatchHook {\n\treturn func(p *supervisor.Process, snapshot string) WatchSet {\n\t\tresult := WatchSet{}\n\n\t\tfor _, hook := range watchHooks {\n\t\t\tws := invokeHook(p, hook, snapshot)\n\t\t\tresult.KubernetesWatches = append(result.KubernetesWatches, ws.KubernetesWatches...)\n\t\t\tresult.ConsulWatches = append(result.ConsulWatches, ws.ConsulWatches...)\n\t\t}\n\n\t\treturn result\n\t}\n}\n\nfunc lines(st string) []string {\n\treturn strings.Split(st, \"\\n\")\n}\n\nfunc invokeHook(p *supervisor.Process, hook, snapshot string) WatchSet {\n\tcmd := exec.Command(\"sh\", \"-c\", hook)\n\tcmd.Stdin = strings.NewReader(snapshot)\n\tvar watches, errors strings.Builder\n\tcmd.Stdout = &watches\n\tcmd.Stderr = &errors\n\terr := cmd.Run()\n\tstderr := errors.String()\n\tif stderr != \"\" {\n\t\tfor _, line := range lines(stderr) {\n\t\t\tp.Logf(\"watch hook stderr: %s\", line)\n\t\t}\n\t}\n\tif err != nil {\n\t\tp.Logf(\"watch hook failed: %v\", err)\n\t\treturn WatchSet{}\n\t}\n\n\tencoded := watches.String()\n\n\tdecoder := json.NewDecoder(strings.NewReader(encoded))\n\tdecoder.DisallowUnknownFields()\n\tresult := WatchSet{}\n\terr = decoder.Decode(&result)\n\tif err != nil {\n\t\tfor _, line := range lines(encoded) {\n\t\t\tp.Logf(\"watch hook: %s\", line)\n\t\t}\n\t\tp.Logf(\"watchset decode failed: %v\", err)\n\t\treturn WatchSet{}\n\t}\n\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>package ifviva\n\nimport (\n\t\"encoding\/json\"\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"path\"\n)\n\nconst (\n\tcontentType = \"Content-Type\"\n\tcontentText = \"text\/plain\"\n\tcontentJSON = \"application\/json\"\n\tcontentHTML = \"text\/html\"\n\tdefaultCharset = \"UTF-8\"\n)\n\nvar (\n\tcacheTemplate *template.Template\n)\n\ntype Controller struct {\n\tReq *http.Request\n\tRes http.ResponseWriter\n\tParams map[string]string\n\tstatusCode int\n\tErr error\n\tCharset string\n}\n\nfunc SetViewPath(dir string) {\n\tviewPaths := []string{}\n\tscanDir(dir, func(viewPath string) {\n\t\tviewPaths = append(viewPaths, viewPath)\n\t})\n\tvar err error\n\tcacheTemplate, err = template.ParseFiles(viewPaths...)\n\tif err != nil {\n\t\tlog.Println(\"[ifviva]Set view path error: \", err)\n\t}\n}\n\nfunc scanDir(dir string, fn func(string)) {\n\tfileInfos, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\tlog.Println(\"[ifviva]Set view path error: \", err)\n\t\treturn\n\t}\n\tfor _, fileInfo := range fileInfos {\n\t\tif fileInfo.IsDir() {\n\t\t\tscanDir(path.Join(dir, fileInfo.Name()), fn)\n\t\t} else {\n\t\t\tfn(path.Join(dir, fileInfo.Name()))\n\t\t}\n\t}\n}\n\nfunc (ctrl *Controller) Init(ctx Context) {\n\tctrl.Req = ctx.Req\n\tctrl.Res = ctx.Res\n\tctrl.Params = ctx.Params\n\tctrl.statusCode = 200\n\tctrl.Charset = defaultCharset\n}\n\nfunc (ctrl *Controller) Status(status int) {\n\tctrl.statusCode = status\n}\n\nfunc (ctrl *Controller) Text(text string) {\n\tctrl.Res.Header().Set(contentType, appendCharset(contentText, ctrl.Charset))\n\tctrl.Res.WriteHeader(ctrl.statusCode)\n\tctrl.Res.Write([]byte(text))\n}\n\nfunc (ctrl *Controller) Json(v interface{}) {\n\tresult, err := json.MarshalIndent(v, \"\", \" \")\n\tif err != nil {\n\t\tctrl.InternalError(err)\n\t\treturn\n\t}\n\tctrl.Res.Header().Set(contentType, appendCharset(contentJSON, ctrl.Charset))\n\tctrl.Res.WriteHeader(ctrl.statusCode)\n\tctrl.Res.Write(result)\n}\n\nfunc (ctrl *Controller) View(name string, data interface{}) {\n\tctrl.Res.Header().Set(contentType, appendCharset(contentHTML, ctrl.Charset))\n\tctrl.Res.WriteHeader(ctrl.statusCode)\n\terr := cacheTemplate.ExecuteTemplate(ctrl.Res, name, data)\n\tif err != nil {\n\t\tctrl.InternalError(err)\n\t\treturn\n\t}\n}\n\nfunc (ctrl *Controller) InternalError(err error) {\n\tctrl.Err = err\n\tctrl.Res.WriteHeader(500)\n\tctrl.Res.Write([]byte(\"Internal Server Error\"))\n}\n\nfunc appendCharset(content string, charset string) string {\n\treturn content + \"; charset=\" + charset\n}\n<commit_msg>添加开启和关闭视图缓存的功能<commit_after>package ifviva\n\nimport (\n\t\"encoding\/json\"\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"path\"\n)\n\nconst (\n\tcontentType = \"Content-Type\"\n\tcontentText = \"text\/plain\"\n\tcontentJSON = \"application\/json\"\n\tcontentHTML = \"text\/html\"\n\tdefaultCharset = \"UTF-8\"\n)\n\nvar (\n\tisCache bool = true\n\tcacheTemplate *template.Template\n\tviewDir string\n)\n\ntype Controller struct {\n\tReq *http.Request\n\tRes http.ResponseWriter\n\tParams map[string]string\n\tstatusCode int\n\tErr error\n\tCharset string\n}\n\nfunc SetViewPath(dir string) {\n\tviewDir = dir\n\tsetCacheTemplate()\n}\n\nfunc OpenViewCache() {\n\tisCache = true\n}\n\nfunc CloseViewCache() {\n\tisCache = false\n}\n\nfunc setCacheTemplate() {\n\tviewPaths := []string{}\n\tscanDir(viewDir, func(viewPath string) {\n\t\tviewPaths = append(viewPaths, viewPath)\n\t})\n\tvar err error\n\tcacheTemplate, err = template.ParseFiles(viewPaths...)\n\tif err != nil {\n\t\tlog.Println(\"[ifviva]Set view path error: \", err)\n\t}\n}\n\nfunc scanDir(dir string, fn func(string)) {\n\tfileInfos, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\tlog.Println(\"[ifviva]Set view path error: \", err)\n\t\treturn\n\t}\n\tfor _, fileInfo := range fileInfos {\n\t\tif fileInfo.IsDir() {\n\t\t\tscanDir(path.Join(dir, fileInfo.Name()), fn)\n\t\t} else {\n\t\t\tfn(path.Join(dir, fileInfo.Name()))\n\t\t}\n\t}\n}\n\nfunc (ctrl *Controller) Init(ctx Context) {\n\tctrl.Req = ctx.Req\n\tctrl.Res = ctx.Res\n\tctrl.Params = ctx.Params\n\tctrl.statusCode = 200\n\tctrl.Charset = defaultCharset\n}\n\nfunc (ctrl *Controller) Status(status int) {\n\tctrl.statusCode = status\n}\n\nfunc (ctrl *Controller) Text(text string) {\n\tctrl.Res.Header().Set(contentType, appendCharset(contentText, ctrl.Charset))\n\tctrl.Res.WriteHeader(ctrl.statusCode)\n\tctrl.Res.Write([]byte(text))\n}\n\nfunc (ctrl *Controller) Json(v interface{}) {\n\tresult, err := json.MarshalIndent(v, \"\", \" \")\n\tif err != nil {\n\t\tctrl.InternalError(err)\n\t\treturn\n\t}\n\tctrl.Res.Header().Set(contentType, appendCharset(contentJSON, ctrl.Charset))\n\tctrl.Res.WriteHeader(ctrl.statusCode)\n\tctrl.Res.Write(result)\n}\n\nfunc (ctrl *Controller) View(name string, data interface{}) {\n\tif isCache != true {\n\t\tsetCacheTemplate()\n\t}\n\n\tctrl.Res.Header().Set(contentType, appendCharset(contentHTML, ctrl.Charset))\n\tctrl.Res.WriteHeader(ctrl.statusCode)\n\terr := cacheTemplate.ExecuteTemplate(ctrl.Res, name, data)\n\tif err != nil {\n\t\tctrl.InternalError(err)\n\t\treturn\n\t}\n}\n\nfunc (ctrl *Controller) InternalError(err error) {\n\tctrl.Err = err\n\tctrl.Res.WriteHeader(500)\n\tctrl.Res.Write([]byte(\"Internal Server Error\"))\n}\n\nfunc appendCharset(content string, charset string) string {\n\treturn content + \"; charset=\" + charset\n}\n<|endoftext|>"} {"text":"<commit_before>package dogo\n\nimport (\n\t\"net\/http\"\n)\n\ntype dogoController interface {\n\thandler(http.ResponseWriter, *http.Request)\n}\n\ntype BaseController struct {\n\tGet map[string][]string\n\tPost map[string][]string\n\t\/\/ Get and Post merge\n\tRequest map[string][]string\n}\n\nfunc (c *BaseController) handler(response http.ResponseWriter, request *http.Request) {\n\n\tresponse.Write([]byte(\"wuciyou hello word\"))\n}\n<commit_msg>删除控制器文件<commit_after><|endoftext|>"} {"text":"<commit_before>package terraform\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/hashicorp\/terraform\/config\"\n)\n\n\/\/ NodeApplyableResource represents a resource that is \"applyable\":\n\/\/ it is ready to be applied and is represented by a diff.\ntype NodeApplyableResource struct {\n\tAddr *ResourceAddress \/\/ Addr is the address for this resource\n\tConfig *config.Resource \/\/ Config is the resource in the config\n\tResourceState *ResourceState \/\/ ResourceState is the ResourceState for this\n}\n\nfunc (n *NodeApplyableResource) Name() string {\n\treturn n.Addr.String()\n}\n\n\/\/ GraphNodeSubPath\nfunc (n *NodeApplyableResource) Path() []string {\n\treturn n.Addr.Path\n}\n\n\/\/ GraphNodeProviderConsumer\nfunc (n *NodeApplyableResource) ProvidedBy() []string {\n\t\/\/ If we have a config we prefer that above all else\n\tif n.Config != nil {\n\t\treturn []string{resourceProvider(n.Config.Type, n.Config.Provider)}\n\t}\n\n\t\/\/ If we have state, then we will use the provider from there\n\tif n.ResourceState != nil {\n\t\treturn []string{n.ResourceState.Provider}\n\t}\n\n\t\/\/ Use our type\n\treturn []string{resourceProvider(n.Addr.Type, \"\")}\n}\n\n\/\/ GraphNodeProvisionerConsumer\nfunc (n *NodeApplyableResource) ProvisionedBy() []string {\n\t\/\/ If we have no configuration, then we have no provisioners\n\tif n.Config == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Build the list of provisioners we need based on the configuration.\n\t\/\/ It is okay to have duplicates here.\n\tresult := make([]string, len(n.Config.Provisioners))\n\tfor i, p := range n.Config.Provisioners {\n\t\tresult[i] = p.Type\n\t}\n\n\treturn result\n}\n\n\/\/ GraphNodeEvalable\nfunc (n *NodeApplyableResource) EvalTree() EvalNode {\n\t\/\/ stateId is the ID to put into the state\n\tstateId := n.Addr.stateId()\n\tif n.Addr.Index > -1 {\n\t\tstateId = fmt.Sprintf(\"%s.%d\", stateId, n.Addr.Index)\n\t}\n\n\t\/\/ Build the instance info. More of this will be populated during eval\n\tinfo := &InstanceInfo{\n\t\tId: stateId,\n\t\tType: n.Addr.Type,\n\t}\n\n\t\/\/ Build the resource for eval\n\tresource := &Resource{\n\t\tName: n.Addr.Name,\n\t\tType: n.Addr.Type,\n\t\tCountIndex: n.Addr.Index,\n\t}\n\tif resource.CountIndex < 0 {\n\t\tresource.CountIndex = 0\n\t}\n\n\t\/\/ Determine the dependencies for the state. We use some older\n\t\/\/ code for this that we've used for a long time.\n\tvar stateDeps []string\n\t{\n\t\toldN := &graphNodeExpandedResource{Resource: n.Config}\n\t\tstateDeps = oldN.StateDependencies()\n\t}\n\n\t\/\/ Declare a bunch of variables that are used for state during\n\t\/\/ evaluation. Most of this are written to by-address below.\n\tvar provider ResourceProvider\n\tvar diff *InstanceDiff\n\tvar state *InstanceState\n\tvar resourceConfig *ResourceConfig\n\tvar err error\n\tvar createNew bool\n\tvar createBeforeDestroyEnabled bool\n\n\treturn &EvalSequence{\n\t\tNodes: []EvalNode{\n\t\t\t\/\/ Build the instance info\n\t\t\t&EvalInstanceInfo{\n\t\t\t\tInfo: info,\n\t\t\t},\n\n\t\t\t\/\/ Get the saved diff for apply\n\t\t\t&EvalReadDiff{\n\t\t\t\tName: stateId,\n\t\t\t\tDiff: &diff,\n\t\t\t},\n\n\t\t\t\/\/ We don't want to do any destroys\n\t\t\t&EvalIf{\n\t\t\t\tIf: func(ctx EvalContext) (bool, error) {\n\t\t\t\t\tif diff == nil {\n\t\t\t\t\t\treturn true, EvalEarlyExitError{}\n\t\t\t\t\t}\n\n\t\t\t\t\tif diff.GetDestroy() && diff.GetAttributesLen() == 0 {\n\t\t\t\t\t\treturn true, EvalEarlyExitError{}\n\t\t\t\t\t}\n\n\t\t\t\t\tdiff.SetDestroy(false)\n\t\t\t\t\treturn true, nil\n\t\t\t\t},\n\t\t\t\tThen: EvalNoop{},\n\t\t\t},\n\n\t\t\t&EvalIf{\n\t\t\t\tIf: func(ctx EvalContext) (bool, error) {\n\t\t\t\t\tdestroy := false\n\t\t\t\t\tif diff != nil {\n\t\t\t\t\t\tdestroy = diff.GetDestroy() || diff.RequiresNew()\n\t\t\t\t\t}\n\n\t\t\t\t\tcreateBeforeDestroyEnabled =\n\t\t\t\t\t\tn.Config.Lifecycle.CreateBeforeDestroy &&\n\t\t\t\t\t\t\tdestroy\n\n\t\t\t\t\treturn createBeforeDestroyEnabled, nil\n\t\t\t\t},\n\t\t\t\tThen: &EvalDeposeState{\n\t\t\t\t\tName: stateId,\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t&EvalInterpolate{\n\t\t\t\tConfig: n.Config.RawConfig.Copy(),\n\t\t\t\tResource: resource,\n\t\t\t\tOutput: &resourceConfig,\n\t\t\t},\n\t\t\t&EvalGetProvider{\n\t\t\t\tName: n.ProvidedBy()[0],\n\t\t\t\tOutput: &provider,\n\t\t\t},\n\t\t\t&EvalReadState{\n\t\t\t\tName: stateId,\n\t\t\t\tOutput: &state,\n\t\t\t},\n\t\t\t\/\/ Re-run validation to catch any errors we missed, e.g. type\n\t\t\t\/\/ mismatches on computed values.\n\t\t\t&EvalValidateResource{\n\t\t\t\tProvider: &provider,\n\t\t\t\tConfig: &resourceConfig,\n\t\t\t\tResourceName: n.Config.Name,\n\t\t\t\tResourceType: n.Config.Type,\n\t\t\t\tResourceMode: n.Config.Mode,\n\t\t\t\tIgnoreWarnings: true,\n\t\t\t},\n\t\t\t&EvalDiff{\n\t\t\t\tInfo: info,\n\t\t\t\tConfig: &resourceConfig,\n\t\t\t\tResource: n.Config,\n\t\t\t\tProvider: &provider,\n\t\t\t\tDiff: &diff,\n\t\t\t\tState: &state,\n\t\t\t\tOutputDiff: &diff,\n\t\t\t},\n\n\t\t\t\/\/ Get the saved diff\n\t\t\t&EvalReadDiff{\n\t\t\t\tName: stateId,\n\t\t\t\tDiff: &diff,\n\t\t\t},\n\n\t\t\t\/\/ Compare the diffs\n\t\t\t&EvalCompareDiff{\n\t\t\t\tInfo: info,\n\t\t\t\tOne: &diff,\n\t\t\t\tTwo: &diff,\n\t\t\t},\n\n\t\t\t&EvalGetProvider{\n\t\t\t\tName: n.ProvidedBy()[0],\n\t\t\t\tOutput: &provider,\n\t\t\t},\n\t\t\t&EvalReadState{\n\t\t\t\tName: stateId,\n\t\t\t\tOutput: &state,\n\t\t\t},\n\t\t\t&EvalApply{\n\t\t\t\tInfo: info,\n\t\t\t\tState: &state,\n\t\t\t\tDiff: &diff,\n\t\t\t\tProvider: &provider,\n\t\t\t\tOutput: &state,\n\t\t\t\tError: &err,\n\t\t\t\tCreateNew: &createNew,\n\t\t\t},\n\t\t\t&EvalWriteState{\n\t\t\t\tName: stateId,\n\t\t\t\tResourceType: n.Config.Type,\n\t\t\t\tProvider: n.Config.Provider,\n\t\t\t\tDependencies: stateDeps,\n\t\t\t\tState: &state,\n\t\t\t},\n\t\t\t\/*\n\t\t\t\tTODO: this has to work\n\t\t\t\t&EvalApplyProvisioners{\n\t\t\t\t\tInfo: info,\n\t\t\t\t\tState: &state,\n\t\t\t\t\tResource: n.Config,\n\t\t\t\t\tInterpResource: resource,\n\t\t\t\t\tCreateNew: &createNew,\n\t\t\t\t\tError: &err,\n\t\t\t\t},\n\t\t\t*\/\n\t\t\t&EvalIf{\n\t\t\t\tIf: func(ctx EvalContext) (bool, error) {\n\t\t\t\t\treturn createBeforeDestroyEnabled && err != nil, nil\n\t\t\t\t},\n\t\t\t\tThen: &EvalUndeposeState{\n\t\t\t\t\tName: stateId,\n\t\t\t\t\tState: &state,\n\t\t\t\t},\n\t\t\t\tElse: &EvalWriteState{\n\t\t\t\t\tName: stateId,\n\t\t\t\t\tResourceType: n.Config.Type,\n\t\t\t\t\tProvider: n.Config.Provider,\n\t\t\t\t\tDependencies: stateDeps,\n\t\t\t\t\tState: &state,\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\/\/ We clear the diff out here so that future nodes\n\t\t\t\/\/ don't see a diff that is already complete. There\n\t\t\t\/\/ is no longer a diff!\n\t\t\t&EvalWriteDiff{\n\t\t\t\tName: stateId,\n\t\t\t\tDiff: nil,\n\t\t\t},\n\n\t\t\t&EvalApplyPost{\n\t\t\t\tInfo: info,\n\t\t\t\tState: &state,\n\t\t\t\tError: &err,\n\t\t\t},\n\t\t\t&EvalUpdateStateHook{},\n\t\t},\n\t}\n}\n<commit_msg>terraform: enable provisioners to execute<commit_after>package terraform\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/hashicorp\/terraform\/config\"\n)\n\n\/\/ NodeApplyableResource represents a resource that is \"applyable\":\n\/\/ it is ready to be applied and is represented by a diff.\ntype NodeApplyableResource struct {\n\tAddr *ResourceAddress \/\/ Addr is the address for this resource\n\tConfig *config.Resource \/\/ Config is the resource in the config\n\tResourceState *ResourceState \/\/ ResourceState is the ResourceState for this\n}\n\nfunc (n *NodeApplyableResource) Name() string {\n\treturn n.Addr.String()\n}\n\n\/\/ GraphNodeSubPath\nfunc (n *NodeApplyableResource) Path() []string {\n\treturn n.Addr.Path\n}\n\n\/\/ GraphNodeProviderConsumer\nfunc (n *NodeApplyableResource) ProvidedBy() []string {\n\t\/\/ If we have a config we prefer that above all else\n\tif n.Config != nil {\n\t\treturn []string{resourceProvider(n.Config.Type, n.Config.Provider)}\n\t}\n\n\t\/\/ If we have state, then we will use the provider from there\n\tif n.ResourceState != nil {\n\t\treturn []string{n.ResourceState.Provider}\n\t}\n\n\t\/\/ Use our type\n\treturn []string{resourceProvider(n.Addr.Type, \"\")}\n}\n\n\/\/ GraphNodeProvisionerConsumer\nfunc (n *NodeApplyableResource) ProvisionedBy() []string {\n\t\/\/ If we have no configuration, then we have no provisioners\n\tif n.Config == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Build the list of provisioners we need based on the configuration.\n\t\/\/ It is okay to have duplicates here.\n\tresult := make([]string, len(n.Config.Provisioners))\n\tfor i, p := range n.Config.Provisioners {\n\t\tresult[i] = p.Type\n\t}\n\n\treturn result\n}\n\n\/\/ GraphNodeEvalable\nfunc (n *NodeApplyableResource) EvalTree() EvalNode {\n\t\/\/ stateId is the ID to put into the state\n\tstateId := n.Addr.stateId()\n\tif n.Addr.Index > -1 {\n\t\tstateId = fmt.Sprintf(\"%s.%d\", stateId, n.Addr.Index)\n\t}\n\n\t\/\/ Build the instance info. More of this will be populated during eval\n\tinfo := &InstanceInfo{\n\t\tId: stateId,\n\t\tType: n.Addr.Type,\n\t}\n\n\t\/\/ Build the resource for eval\n\tresource := &Resource{\n\t\tName: n.Addr.Name,\n\t\tType: n.Addr.Type,\n\t\tCountIndex: n.Addr.Index,\n\t}\n\tif resource.CountIndex < 0 {\n\t\tresource.CountIndex = 0\n\t}\n\n\t\/\/ Determine the dependencies for the state. We use some older\n\t\/\/ code for this that we've used for a long time.\n\tvar stateDeps []string\n\t{\n\t\toldN := &graphNodeExpandedResource{Resource: n.Config}\n\t\tstateDeps = oldN.StateDependencies()\n\t}\n\n\t\/\/ Declare a bunch of variables that are used for state during\n\t\/\/ evaluation. Most of this are written to by-address below.\n\tvar provider ResourceProvider\n\tvar diff *InstanceDiff\n\tvar state *InstanceState\n\tvar resourceConfig *ResourceConfig\n\tvar err error\n\tvar createNew bool\n\tvar createBeforeDestroyEnabled bool\n\n\treturn &EvalSequence{\n\t\tNodes: []EvalNode{\n\t\t\t\/\/ Build the instance info\n\t\t\t&EvalInstanceInfo{\n\t\t\t\tInfo: info,\n\t\t\t},\n\n\t\t\t\/\/ Get the saved diff for apply\n\t\t\t&EvalReadDiff{\n\t\t\t\tName: stateId,\n\t\t\t\tDiff: &diff,\n\t\t\t},\n\n\t\t\t\/\/ We don't want to do any destroys\n\t\t\t&EvalIf{\n\t\t\t\tIf: func(ctx EvalContext) (bool, error) {\n\t\t\t\t\tif diff == nil {\n\t\t\t\t\t\treturn true, EvalEarlyExitError{}\n\t\t\t\t\t}\n\n\t\t\t\t\tif diff.GetDestroy() && diff.GetAttributesLen() == 0 {\n\t\t\t\t\t\treturn true, EvalEarlyExitError{}\n\t\t\t\t\t}\n\n\t\t\t\t\tdiff.SetDestroy(false)\n\t\t\t\t\treturn true, nil\n\t\t\t\t},\n\t\t\t\tThen: EvalNoop{},\n\t\t\t},\n\n\t\t\t&EvalIf{\n\t\t\t\tIf: func(ctx EvalContext) (bool, error) {\n\t\t\t\t\tdestroy := false\n\t\t\t\t\tif diff != nil {\n\t\t\t\t\t\tdestroy = diff.GetDestroy() || diff.RequiresNew()\n\t\t\t\t\t}\n\n\t\t\t\t\tcreateBeforeDestroyEnabled =\n\t\t\t\t\t\tn.Config.Lifecycle.CreateBeforeDestroy &&\n\t\t\t\t\t\t\tdestroy\n\n\t\t\t\t\treturn createBeforeDestroyEnabled, nil\n\t\t\t\t},\n\t\t\t\tThen: &EvalDeposeState{\n\t\t\t\t\tName: stateId,\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t&EvalInterpolate{\n\t\t\t\tConfig: n.Config.RawConfig.Copy(),\n\t\t\t\tResource: resource,\n\t\t\t\tOutput: &resourceConfig,\n\t\t\t},\n\t\t\t&EvalGetProvider{\n\t\t\t\tName: n.ProvidedBy()[0],\n\t\t\t\tOutput: &provider,\n\t\t\t},\n\t\t\t&EvalReadState{\n\t\t\t\tName: stateId,\n\t\t\t\tOutput: &state,\n\t\t\t},\n\t\t\t\/\/ Re-run validation to catch any errors we missed, e.g. type\n\t\t\t\/\/ mismatches on computed values.\n\t\t\t&EvalValidateResource{\n\t\t\t\tProvider: &provider,\n\t\t\t\tConfig: &resourceConfig,\n\t\t\t\tResourceName: n.Config.Name,\n\t\t\t\tResourceType: n.Config.Type,\n\t\t\t\tResourceMode: n.Config.Mode,\n\t\t\t\tIgnoreWarnings: true,\n\t\t\t},\n\t\t\t&EvalDiff{\n\t\t\t\tInfo: info,\n\t\t\t\tConfig: &resourceConfig,\n\t\t\t\tResource: n.Config,\n\t\t\t\tProvider: &provider,\n\t\t\t\tDiff: &diff,\n\t\t\t\tState: &state,\n\t\t\t\tOutputDiff: &diff,\n\t\t\t},\n\n\t\t\t\/\/ Get the saved diff\n\t\t\t&EvalReadDiff{\n\t\t\t\tName: stateId,\n\t\t\t\tDiff: &diff,\n\t\t\t},\n\n\t\t\t\/\/ Compare the diffs\n\t\t\t&EvalCompareDiff{\n\t\t\t\tInfo: info,\n\t\t\t\tOne: &diff,\n\t\t\t\tTwo: &diff,\n\t\t\t},\n\n\t\t\t&EvalGetProvider{\n\t\t\t\tName: n.ProvidedBy()[0],\n\t\t\t\tOutput: &provider,\n\t\t\t},\n\t\t\t&EvalReadState{\n\t\t\t\tName: stateId,\n\t\t\t\tOutput: &state,\n\t\t\t},\n\t\t\t&EvalApply{\n\t\t\t\tInfo: info,\n\t\t\t\tState: &state,\n\t\t\t\tDiff: &diff,\n\t\t\t\tProvider: &provider,\n\t\t\t\tOutput: &state,\n\t\t\t\tError: &err,\n\t\t\t\tCreateNew: &createNew,\n\t\t\t},\n\t\t\t&EvalWriteState{\n\t\t\t\tName: stateId,\n\t\t\t\tResourceType: n.Config.Type,\n\t\t\t\tProvider: n.Config.Provider,\n\t\t\t\tDependencies: stateDeps,\n\t\t\t\tState: &state,\n\t\t\t},\n\t\t\t&EvalApplyProvisioners{\n\t\t\t\tInfo: info,\n\t\t\t\tState: &state,\n\t\t\t\tResource: n.Config,\n\t\t\t\tInterpResource: resource,\n\t\t\t\tCreateNew: &createNew,\n\t\t\t\tError: &err,\n\t\t\t},\n\t\t\t&EvalIf{\n\t\t\t\tIf: func(ctx EvalContext) (bool, error) {\n\t\t\t\t\treturn createBeforeDestroyEnabled && err != nil, nil\n\t\t\t\t},\n\t\t\t\tThen: &EvalUndeposeState{\n\t\t\t\t\tName: stateId,\n\t\t\t\t\tState: &state,\n\t\t\t\t},\n\t\t\t\tElse: &EvalWriteState{\n\t\t\t\t\tName: stateId,\n\t\t\t\t\tResourceType: n.Config.Type,\n\t\t\t\t\tProvider: n.Config.Provider,\n\t\t\t\t\tDependencies: stateDeps,\n\t\t\t\t\tState: &state,\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\/\/ We clear the diff out here so that future nodes\n\t\t\t\/\/ don't see a diff that is already complete. There\n\t\t\t\/\/ is no longer a diff!\n\t\t\t&EvalWriteDiff{\n\t\t\t\tName: stateId,\n\t\t\t\tDiff: nil,\n\t\t\t},\n\n\t\t\t&EvalApplyPost{\n\t\t\t\tInfo: info,\n\t\t\t\tState: &state,\n\t\t\t\tError: &err,\n\t\t\t},\n\t\t\t&EvalUpdateStateHook{},\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017 - The Event Horizon authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage scheduler\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\teh \"github.com\/looplab\/eventhorizon\"\n\t\"github.com\/looplab\/eventhorizon\/uuid\"\n)\n\n\/\/ ErrCanceled is when a scheduled command has been canceled.\nvar ErrCanceled = errors.New(\"canceled\")\n\n\/\/ Command is a scheduled command with an execution time.\ntype Command interface {\n\teh.Command\n\n\t\/\/ ExecuteAt returns the time when the command will execute.\n\tExecuteAt() time.Time\n}\n\n\/\/ CommandWithExecuteTime returns a wrapped command with a execution time set.\nfunc CommandWithExecuteTime(cmd eh.Command, t time.Time) Command {\n\treturn &command{Command: cmd, t: t}\n}\n\n\/\/ private implementation to wrap ordinary commands and add a execution time.\ntype command struct {\n\teh.Command\n\tt time.Time\n}\n\n\/\/ ExecuteAt implements the ExecuteAt method of the Command interface.\nfunc (c *command) ExecuteAt() time.Time {\n\treturn c.t\n}\n\n\/\/ NewMiddleware returns a new command handler middleware and a scheduler helper.\nfunc NewMiddleware(repo eh.ReadWriteRepo, codec eh.CommandCodec) (eh.CommandHandlerMiddleware, *Scheduler) {\n\ts := &Scheduler{\n\t\trepo: repo,\n\t\tcmdCh: make(chan *scheduledCommand, 100),\n\t\tcancelScheduling: map[uuid.UUID]chan struct{}{},\n\t\terrCh: make(chan error, 100),\n\t\tcodec: codec,\n\t}\n\n\treturn eh.CommandHandlerMiddleware(func(h eh.CommandHandler) eh.CommandHandler {\n\t\ts.setHandler(h)\n\n\t\treturn eh.CommandHandlerFunc(func(ctx context.Context, cmd eh.Command) error {\n\t\t\t\/\/ Delayed command execution if there is time set.\n\t\t\tif c, ok := cmd.(Command); ok && !c.ExecuteAt().IsZero() {\n\t\t\t\t\/\/ Use the wrapped command when created by the helper func.\n\t\t\t\tinnerCmd, ok := c.(*command)\n\t\t\t\tif ok {\n\t\t\t\t\tcmd = innerCmd.Command\n\t\t\t\t}\n\n\t\t\t\t\/\/ Ignore the persisted command ID in this case.\n\t\t\t\t_, err := s.ScheduleCommand(ctx, cmd, c.ExecuteAt())\n\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ Immediate command execution.\n\t\t\treturn h.HandleCommand(ctx, cmd)\n\t\t})\n\t}), s\n}\n\n\/\/ PersistedCommand is a persisted command.\ntype PersistedCommand struct {\n\tID uuid.UUID `json:\"_\" bson:\"_id\"`\n\tIDStr string `json:\"id\" bson:\"_\"`\n\tRawCommand []byte `json:\"command\" bson:\"command\"`\n\tExecuteAt time.Time `json:\"timestamp\" bson:\"timestamp\"`\n\tCommand eh.Command `json:\"-\" bson:\"-\"`\n\tContext context.Context `json:\"-\" bson:\"-\"`\n}\n\n\/\/ EntityID implements the EntityID method of the eventhorizon.Entity interface.\nfunc (c *PersistedCommand) EntityID() uuid.UUID {\n\treturn c.ID\n}\n\n\/\/ Scheduler is a scheduled of commands.\ntype Scheduler struct {\n\th eh.CommandHandler\n\thMu sync.Mutex\n\trepo eh.ReadWriteRepo\n\tcmdCh chan *scheduledCommand\n\tcancelScheduling map[uuid.UUID]chan struct{}\n\tcancelSchedulingMu sync.Mutex\n\terrCh chan error\n\tcctx context.Context\n\tcancel context.CancelFunc\n\tdone chan struct{}\n\tcodec eh.CommandCodec\n}\n\nfunc (s *Scheduler) setHandler(h eh.CommandHandler) {\n\ts.hMu.Lock()\n\tdefer s.hMu.Unlock()\n\n\tif s.h != nil {\n\t\tpanic(\"eventhorizon: handler already set for outbox\")\n\t}\n\n\ts.h = h\n}\n\n\/\/ Start starts the scheduler by first loading all persisted commands.\nfunc (s *Scheduler) Start() error {\n\tif s.h == nil {\n\t\treturn fmt.Errorf(\"command handler not set\")\n\t}\n\n\tif err := s.loadCommands(); err != nil {\n\t\treturn fmt.Errorf(\"could not load commands: %w\", err)\n\t}\n\n\ts.cctx, s.cancel = context.WithCancel(context.Background())\n\ts.done = make(chan struct{})\n\n\tgo s.run()\n\n\treturn nil\n}\n\n\/\/ Stop stops all scheduled commands.\nfunc (s *Scheduler) Stop() error {\n\ts.cancel()\n\n\t<-s.done\n\n\treturn nil\n}\n\n\/\/ Errors returns an error channel that will receive errors from handling of\n\/\/ scheduled commands.\nfunc (s *Scheduler) Errors() <-chan error {\n\treturn s.errCh\n}\n\ntype scheduledCommand struct {\n\tid uuid.UUID\n\tctx context.Context\n\tcmd eh.Command\n\texecuteAt time.Time\n}\n\n\/\/ ScheduleCommand schedules a command to be executed at `executeAt`. It is persisted\n\/\/ to the repo.\nfunc (s *Scheduler) ScheduleCommand(ctx context.Context, cmd eh.Command, executeAt time.Time) (uuid.UUID, error) {\n\tb, err := s.codec.MarshalCommand(ctx, cmd)\n\tif err != nil {\n\t\treturn uuid.Nil, &Error{\n\t\t\tErr: fmt.Errorf(\"could not marshal command: %w\", err),\n\t\t\tCtx: ctx,\n\t\t\tCommand: cmd,\n\t\t}\n\t}\n\n\t\/\/ Use the command ID as persisted ID if available.\n\tvar id uuid.UUID\n\tif cmd, ok := cmd.(eh.CommandIDer); ok {\n\t\tid = cmd.CommandID()\n\t} else {\n\t\tid = uuid.New()\n\t}\n\n\tpc := &PersistedCommand{\n\t\tID: id,\n\t\tIDStr: id.String(),\n\t\tRawCommand: b,\n\t\tExecuteAt: executeAt,\n\t}\n\n\tif err := s.repo.Save(context.Background(), pc); err != nil {\n\t\treturn uuid.Nil, &Error{\n\t\t\tErr: fmt.Errorf(\"could not persist command: %w\", err),\n\t\t\tCtx: ctx,\n\t\t\tCommand: cmd,\n\t\t}\n\t}\n\n\tselect {\n\tcase s.cmdCh <- &scheduledCommand{id, ctx, cmd, executeAt}:\n\tdefault:\n\t\treturn uuid.Nil, &Error{\n\t\t\tErr: fmt.Errorf(\"command queue full\"),\n\t\t\tCtx: ctx,\n\t\t\tCommand: cmd,\n\t\t}\n\t}\n\n\treturn pc.ID, nil\n}\n\n\/\/ Commands returns all scheduled commands.\nfunc (s *Scheduler) Commands(ctx context.Context) ([]*PersistedCommand, error) {\n\tentities, err := s.repo.FindAll(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not load scheduled commands: %w\", err)\n\t}\n\n\tcommands := make([]*PersistedCommand, len(entities))\n\n\tfor i, entity := range entities {\n\t\tc, ok := entity.(*PersistedCommand)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"command is not schedulable: %T\", entity)\n\t\t}\n\n\t\tif c.Command, c.Context, err = s.codec.UnmarshalCommand(ctx, c.RawCommand); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not unmarshal command: %w\", err)\n\t\t}\n\n\t\tc.RawCommand = nil\n\n\t\tif c.IDStr != \"\" {\n\t\t\tid, err := uuid.Parse(c.IDStr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"could not parse command ID: %w\", err)\n\t\t\t}\n\n\t\t\tc.ID = id\n\t\t}\n\n\t\tcommands[i] = c\n\t}\n\n\treturn commands, nil\n}\n\n\/\/ CancelCommand cancels a scheduled command.\nfunc (s *Scheduler) CancelCommand(ctx context.Context, id uuid.UUID) error {\n\ts.cancelSchedulingMu.Lock()\n\tdefer s.cancelSchedulingMu.Unlock()\n\n\tcancel, ok := s.cancelScheduling[id]\n\tif !ok {\n\t\treturn fmt.Errorf(\"command %s not scheduled\", id)\n\t}\n\n\tclose(cancel)\n\n\treturn nil\n}\n\nfunc (s *Scheduler) loadCommands() error {\n\tcommands, err := s.Commands(context.Background())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not load scheduled commands: %w\", err)\n\t}\n\n\tfor _, pc := range commands {\n\t\tsc := &scheduledCommand{\n\t\t\tid: pc.ID,\n\t\t\tctx: pc.Context,\n\t\t\tcmd: pc.Command,\n\t\t\texecuteAt: pc.ExecuteAt,\n\t\t}\n\n\t\tselect {\n\t\tcase s.cmdCh <- sc:\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"could not schedule command: %w\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (s *Scheduler) run() {\n\tvar wg sync.WaitGroup\n\nloop:\n\tfor {\n\t\tselect {\n\t\tcase <-s.cctx.Done():\n\t\t\tbreak loop\n\t\tcase sc := <-s.cmdCh:\n\t\t\twg.Add(1)\n\n\t\t\ts.cancelSchedulingMu.Lock()\n\t\t\tcancel := make(chan struct{})\n\t\t\ts.cancelScheduling[sc.id] = cancel\n\t\t\ts.cancelSchedulingMu.Unlock()\n\n\t\t\tgo func(cancel chan struct{}) {\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\tt := time.NewTimer(time.Until(sc.executeAt))\n\t\t\t\tdefer t.Stop()\n\n\t\t\t\tselect {\n\t\t\t\tcase <-s.cctx.Done():\n\t\t\t\t\t\/\/ Stop without removing persisted cmd.\n\t\t\t\tcase <-t.C:\n\t\t\t\t\tif err := s.h.HandleCommand(sc.ctx, sc.cmd); err != nil {\n\t\t\t\t\t\t\/\/ Always try to deliver errors.\n\t\t\t\t\t\ts.errCh <- &Error{\n\t\t\t\t\t\t\tErr: err,\n\t\t\t\t\t\t\tCtx: sc.ctx,\n\t\t\t\t\t\t\tCommand: sc.cmd,\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif err := s.repo.Remove(context.Background(), sc.id); err != nil {\n\t\t\t\t\t\ts.errCh <- &Error{\n\t\t\t\t\t\t\tErr: fmt.Errorf(\"could not remove persisted command: %w\", err),\n\t\t\t\t\t\t\tCtx: sc.ctx,\n\t\t\t\t\t\t\tCommand: sc.cmd,\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase <-cancel:\n\t\t\t\t\tif err := s.repo.Remove(context.Background(), sc.id); err != nil {\n\t\t\t\t\t\ts.errCh <- &Error{\n\t\t\t\t\t\t\tErr: fmt.Errorf(\"could not remove persisted command: %w\", err),\n\t\t\t\t\t\t\tCtx: sc.ctx,\n\t\t\t\t\t\t\tCommand: sc.cmd,\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\ts.errCh <- &Error{\n\t\t\t\t\t\tErr: ErrCanceled,\n\t\t\t\t\t\tCtx: sc.ctx,\n\t\t\t\t\t\tCommand: sc.cmd,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}(cancel)\n\t\t}\n\t}\n\n\twg.Wait()\n\n\tclose(s.done)\n}\n\n\/\/ Error is an async error containing the error and the command.\ntype Error struct {\n\t\/\/ Err is the error that happened when handling the command.\n\tErr error\n\t\/\/ Ctx is the context used when the error happened.\n\tCtx context.Context\n\t\/\/ Command is the command handeled when the error happened.\n\tCommand eh.Command\n}\n\n\/\/ Error implements the Error method of the error interface.\nfunc (e *Error) Error() string {\n\treturn fmt.Sprintf(\"%s (%s): %s\", e.Command.CommandType(), e.Command.AggregateID(), e.Err.Error())\n}\n\n\/\/ Unwrap implements the errors.Unwrap method.\nfunc (e *Error) Unwrap() error {\n\treturn e.Err\n}\n\n\/\/ Cause implements the github.com\/pkg\/errors Unwrap method.\nfunc (e *Error) Cause() error {\n\treturn e.Unwrap()\n}\n<commit_msg>fix: use correct context when removing scheduled items<commit_after>\/\/ Copyright (c) 2017 - The Event Horizon authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage scheduler\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\teh \"github.com\/looplab\/eventhorizon\"\n\t\"github.com\/looplab\/eventhorizon\/uuid\"\n)\n\n\/\/ ErrCanceled is when a scheduled command has been canceled.\nvar ErrCanceled = errors.New(\"canceled\")\n\n\/\/ Command is a scheduled command with an execution time.\ntype Command interface {\n\teh.Command\n\n\t\/\/ ExecuteAt returns the time when the command will execute.\n\tExecuteAt() time.Time\n}\n\n\/\/ CommandWithExecuteTime returns a wrapped command with a execution time set.\nfunc CommandWithExecuteTime(cmd eh.Command, t time.Time) Command {\n\treturn &command{Command: cmd, t: t}\n}\n\n\/\/ private implementation to wrap ordinary commands and add a execution time.\ntype command struct {\n\teh.Command\n\tt time.Time\n}\n\n\/\/ ExecuteAt implements the ExecuteAt method of the Command interface.\nfunc (c *command) ExecuteAt() time.Time {\n\treturn c.t\n}\n\n\/\/ NewMiddleware returns a new command handler middleware and a scheduler helper.\nfunc NewMiddleware(repo eh.ReadWriteRepo, codec eh.CommandCodec) (eh.CommandHandlerMiddleware, *Scheduler) {\n\ts := &Scheduler{\n\t\trepo: repo,\n\t\tcmdCh: make(chan *scheduledCommand, 100),\n\t\tcancelScheduling: map[uuid.UUID]chan struct{}{},\n\t\terrCh: make(chan error, 100),\n\t\tcodec: codec,\n\t}\n\n\treturn eh.CommandHandlerMiddleware(func(h eh.CommandHandler) eh.CommandHandler {\n\t\ts.setHandler(h)\n\n\t\treturn eh.CommandHandlerFunc(func(ctx context.Context, cmd eh.Command) error {\n\t\t\t\/\/ Delayed command execution if there is time set.\n\t\t\tif c, ok := cmd.(Command); ok && !c.ExecuteAt().IsZero() {\n\t\t\t\t\/\/ Use the wrapped command when created by the helper func.\n\t\t\t\tinnerCmd, ok := c.(*command)\n\t\t\t\tif ok {\n\t\t\t\t\tcmd = innerCmd.Command\n\t\t\t\t}\n\n\t\t\t\t\/\/ Ignore the persisted command ID in this case.\n\t\t\t\t_, err := s.ScheduleCommand(ctx, cmd, c.ExecuteAt())\n\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t\/\/ Immediate command execution.\n\t\t\treturn h.HandleCommand(ctx, cmd)\n\t\t})\n\t}), s\n}\n\n\/\/ PersistedCommand is a persisted command.\ntype PersistedCommand struct {\n\tID uuid.UUID `json:\"_\" bson:\"_id\"`\n\tIDStr string `json:\"id\" bson:\"_\"`\n\tRawCommand []byte `json:\"command\" bson:\"command\"`\n\tExecuteAt time.Time `json:\"timestamp\" bson:\"timestamp\"`\n\tCommand eh.Command `json:\"-\" bson:\"-\"`\n\tContext context.Context `json:\"-\" bson:\"-\"`\n}\n\n\/\/ EntityID implements the EntityID method of the eventhorizon.Entity interface.\nfunc (c *PersistedCommand) EntityID() uuid.UUID {\n\treturn c.ID\n}\n\n\/\/ Scheduler is a scheduled of commands.\ntype Scheduler struct {\n\th eh.CommandHandler\n\thMu sync.Mutex\n\trepo eh.ReadWriteRepo\n\tcmdCh chan *scheduledCommand\n\tcancelScheduling map[uuid.UUID]chan struct{}\n\tcancelSchedulingMu sync.Mutex\n\terrCh chan error\n\tcctx context.Context\n\tcancel context.CancelFunc\n\tdone chan struct{}\n\tcodec eh.CommandCodec\n}\n\nfunc (s *Scheduler) setHandler(h eh.CommandHandler) {\n\ts.hMu.Lock()\n\tdefer s.hMu.Unlock()\n\n\tif s.h != nil {\n\t\tpanic(\"eventhorizon: handler already set for outbox\")\n\t}\n\n\ts.h = h\n}\n\n\/\/ Start starts the scheduler by first loading all persisted commands.\nfunc (s *Scheduler) Start() error {\n\tif s.h == nil {\n\t\treturn fmt.Errorf(\"command handler not set\")\n\t}\n\n\tif err := s.loadCommands(); err != nil {\n\t\treturn fmt.Errorf(\"could not load commands: %w\", err)\n\t}\n\n\ts.cctx, s.cancel = context.WithCancel(context.Background())\n\ts.done = make(chan struct{})\n\n\tgo s.run()\n\n\treturn nil\n}\n\n\/\/ Stop stops all scheduled commands.\nfunc (s *Scheduler) Stop() error {\n\ts.cancel()\n\n\t<-s.done\n\n\treturn nil\n}\n\n\/\/ Errors returns an error channel that will receive errors from handling of\n\/\/ scheduled commands.\nfunc (s *Scheduler) Errors() <-chan error {\n\treturn s.errCh\n}\n\ntype scheduledCommand struct {\n\tid uuid.UUID\n\tctx context.Context\n\tcmd eh.Command\n\texecuteAt time.Time\n}\n\n\/\/ ScheduleCommand schedules a command to be executed at `executeAt`. It is persisted\n\/\/ to the repo.\nfunc (s *Scheduler) ScheduleCommand(ctx context.Context, cmd eh.Command, executeAt time.Time) (uuid.UUID, error) {\n\tb, err := s.codec.MarshalCommand(ctx, cmd)\n\tif err != nil {\n\t\treturn uuid.Nil, &Error{\n\t\t\tErr: fmt.Errorf(\"could not marshal command: %w\", err),\n\t\t\tCtx: ctx,\n\t\t\tCommand: cmd,\n\t\t}\n\t}\n\n\t\/\/ Use the command ID as persisted ID if available.\n\tvar id uuid.UUID\n\tif cmd, ok := cmd.(eh.CommandIDer); ok {\n\t\tid = cmd.CommandID()\n\t} else {\n\t\tid = uuid.New()\n\t}\n\n\tpc := &PersistedCommand{\n\t\tID: id,\n\t\tIDStr: id.String(),\n\t\tRawCommand: b,\n\t\tExecuteAt: executeAt,\n\t}\n\n\tif err := s.repo.Save(ctx, pc); err != nil {\n\t\treturn uuid.Nil, &Error{\n\t\t\tErr: fmt.Errorf(\"could not persist command: %w\", err),\n\t\t\tCtx: ctx,\n\t\t\tCommand: cmd,\n\t\t}\n\t}\n\n\tselect {\n\tcase s.cmdCh <- &scheduledCommand{id, ctx, cmd, executeAt}:\n\tdefault:\n\t\treturn uuid.Nil, &Error{\n\t\t\tErr: fmt.Errorf(\"command queue full\"),\n\t\t\tCtx: ctx,\n\t\t\tCommand: cmd,\n\t\t}\n\t}\n\n\treturn pc.ID, nil\n}\n\n\/\/ Commands returns all scheduled commands.\nfunc (s *Scheduler) Commands(ctx context.Context) ([]*PersistedCommand, error) {\n\tentities, err := s.repo.FindAll(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not load scheduled commands: %w\", err)\n\t}\n\n\tcommands := make([]*PersistedCommand, len(entities))\n\n\tfor i, entity := range entities {\n\t\tc, ok := entity.(*PersistedCommand)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"command is not schedulable: %T\", entity)\n\t\t}\n\n\t\tif c.Command, c.Context, err = s.codec.UnmarshalCommand(ctx, c.RawCommand); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not unmarshal command: %w\", err)\n\t\t}\n\n\t\tc.RawCommand = nil\n\n\t\tif c.IDStr != \"\" {\n\t\t\tid, err := uuid.Parse(c.IDStr)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"could not parse command ID: %w\", err)\n\t\t\t}\n\n\t\t\tc.ID = id\n\t\t}\n\n\t\tcommands[i] = c\n\t}\n\n\treturn commands, nil\n}\n\n\/\/ CancelCommand cancels a scheduled command.\nfunc (s *Scheduler) CancelCommand(ctx context.Context, id uuid.UUID) error {\n\ts.cancelSchedulingMu.Lock()\n\tdefer s.cancelSchedulingMu.Unlock()\n\n\tcancel, ok := s.cancelScheduling[id]\n\tif !ok {\n\t\treturn fmt.Errorf(\"command %s not scheduled\", id)\n\t}\n\n\tclose(cancel)\n\n\treturn nil\n}\n\nfunc (s *Scheduler) loadCommands() error {\n\tcommands, err := s.Commands(context.Background())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not load scheduled commands: %w\", err)\n\t}\n\n\tfor _, pc := range commands {\n\t\tsc := &scheduledCommand{\n\t\t\tid: pc.ID,\n\t\t\tctx: pc.Context,\n\t\t\tcmd: pc.Command,\n\t\t\texecuteAt: pc.ExecuteAt,\n\t\t}\n\n\t\tselect {\n\t\tcase s.cmdCh <- sc:\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"could not schedule command: %w\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (s *Scheduler) run() {\n\tvar wg sync.WaitGroup\n\nloop:\n\tfor {\n\t\tselect {\n\t\tcase <-s.cctx.Done():\n\t\t\tbreak loop\n\t\tcase sc := <-s.cmdCh:\n\t\t\twg.Add(1)\n\n\t\t\ts.cancelSchedulingMu.Lock()\n\t\t\tcancel := make(chan struct{})\n\t\t\ts.cancelScheduling[sc.id] = cancel\n\t\t\ts.cancelSchedulingMu.Unlock()\n\n\t\t\tgo func(cancel chan struct{}) {\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\tt := time.NewTimer(time.Until(sc.executeAt))\n\t\t\t\tdefer t.Stop()\n\n\t\t\t\tselect {\n\t\t\t\tcase <-s.cctx.Done():\n\t\t\t\t\t\/\/ Stop without removing persisted cmd.\n\t\t\t\tcase <-t.C:\n\t\t\t\t\tif err := s.h.HandleCommand(sc.ctx, sc.cmd); err != nil {\n\t\t\t\t\t\t\/\/ Always try to deliver errors.\n\t\t\t\t\t\ts.errCh <- &Error{\n\t\t\t\t\t\t\tErr: err,\n\t\t\t\t\t\t\tCtx: sc.ctx,\n\t\t\t\t\t\t\tCommand: sc.cmd,\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif err := s.repo.Remove(sc.ctx, sc.id); err != nil {\n\t\t\t\t\t\ts.errCh <- &Error{\n\t\t\t\t\t\t\tErr: fmt.Errorf(\"could not remove persisted command: %w\", err),\n\t\t\t\t\t\t\tCtx: sc.ctx,\n\t\t\t\t\t\t\tCommand: sc.cmd,\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\tcase <-cancel:\n\t\t\t\t\tif err := s.repo.Remove(sc.ctx, sc.id); err != nil {\n\t\t\t\t\t\ts.errCh <- &Error{\n\t\t\t\t\t\t\tErr: fmt.Errorf(\"could not remove persisted command: %w\", err),\n\t\t\t\t\t\t\tCtx: sc.ctx,\n\t\t\t\t\t\t\tCommand: sc.cmd,\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\ts.errCh <- &Error{\n\t\t\t\t\t\tErr: ErrCanceled,\n\t\t\t\t\t\tCtx: sc.ctx,\n\t\t\t\t\t\tCommand: sc.cmd,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}(cancel)\n\t\t}\n\t}\n\n\twg.Wait()\n\n\tclose(s.done)\n}\n\n\/\/ Error is an async error containing the error and the command.\ntype Error struct {\n\t\/\/ Err is the error that happened when handling the command.\n\tErr error\n\t\/\/ Ctx is the context used when the error happened.\n\tCtx context.Context\n\t\/\/ Command is the command handeled when the error happened.\n\tCommand eh.Command\n}\n\n\/\/ Error implements the Error method of the error interface.\nfunc (e *Error) Error() string {\n\treturn fmt.Sprintf(\"%s (%s): %s\", e.Command.CommandType(), e.Command.AggregateID(), e.Err.Error())\n}\n\n\/\/ Unwrap implements the errors.Unwrap method.\nfunc (e *Error) Unwrap() error {\n\treturn e.Err\n}\n\n\/\/ Cause implements the github.com\/pkg\/errors Unwrap method.\nfunc (e *Error) Cause() error {\n\treturn e.Unwrap()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Http provider\npackage http\n\nimport (\n\t\"fmt\"\n\t\"github.com\/pierrre\/imageserver\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n)\n\nvar contentTypeRegexp, _ = regexp.Compile(\"^image\/(.+)$\")\n\n\/\/ Returns image from an http source\n\/\/\n\/\/ If the source is not an url, the string representation of the source will be used to create one.\n\/\/\n\/\/ Returns an error if the http status code is not 200 (OK).\n\/\/\n\/\/ The image type is determined by the \"Content-Type\" header.\ntype HttpProvider struct {\n}\n\nfunc (provider *HttpProvider) Get(source interface{}, parameters imageserver.Parameters) (*imageserver.Image, error) {\n\tsourceUrl, err := provider.getSourceUrl(source)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresponse, err := provider.request(sourceUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer response.Body.Close()\n\tif err = provider.checkResponse(response); err != nil {\n\t\treturn nil, err\n\t}\n\timage, err := provider.createImage(response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn image, nil\n}\n\nfunc (provider *HttpProvider) getSourceUrl(source interface{}) (*url.URL, error) {\n\tsourceUrl, ok := source.(*url.URL)\n\tif !ok {\n\t\tvar err error\n\t\tsourceUrl, err = url.ParseRequestURI(fmt.Sprint(source))\n\t\tif err != nil {\n\t\t\treturn nil, imageserver.NewError(\"Invalid source url\")\n\t\t}\n\t}\n\tif sourceUrl.Scheme != \"http\" && sourceUrl.Scheme != \"https\" {\n\t\treturn nil, imageserver.NewError(\"Invalid source scheme\")\n\t}\n\treturn sourceUrl, nil\n}\n\nfunc (provider *HttpProvider) request(sourceUrl *url.URL) (*http.Response, error) {\n\t\/\/TODO optional http client\n\treturn http.Get(sourceUrl.String())\n}\n\nfunc (provider *HttpProvider) checkResponse(response *http.Response) error {\n\tif response.StatusCode != http.StatusOK {\n\t\treturn imageserver.NewError(fmt.Sprintf(\"Error %d while downloading source\", response.StatusCode))\n\t}\n\treturn nil\n}\n\nfunc (provider *HttpProvider) createImage(response *http.Response) (*imageserver.Image, error) {\n\timage := &imageserver.Image{}\n\tprovider.parseType(response, image)\n\tif err := provider.parseData(response, image); err != nil {\n\t\treturn nil, err\n\t}\n\treturn image, nil\n}\n\nfunc (provider *HttpProvider) parseType(response *http.Response, image *imageserver.Image) {\n\tcontentType := response.Header.Get(\"Content-Type\")\n\tif len(contentType) > 0 {\n\t\tmatches := contentTypeRegexp.FindStringSubmatch(contentType)\n\t\tif matches != nil && len(matches) == 2 {\n\t\t\timage.Type = matches[1]\n\t\t}\n\t}\n}\n\nfunc (provider *HttpProvider) parseData(response *http.Response, image *imageserver.Image) error {\n\tdata, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\timage.Data = data\n\treturn nil\n}\n<commit_msg>Refactor header parsing in http provider<commit_after>\/\/ Http provider\npackage http\n\nimport (\n\t\"fmt\"\n\t\"github.com\/pierrre\/imageserver\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n)\n\nvar contentTypeRegexp, _ = regexp.Compile(\"^image\/(.+)$\")\n\n\/\/ Returns image from an http source\n\/\/\n\/\/ If the source is not an url, the string representation of the source will be used to create one.\n\/\/\n\/\/ Returns an error if the http status code is not 200 (OK).\n\/\/\n\/\/ The image type is determined by the \"Content-Type\" header.\ntype HttpProvider struct {\n}\n\nfunc (provider *HttpProvider) Get(source interface{}, parameters imageserver.Parameters) (*imageserver.Image, error) {\n\tsourceUrl, err := provider.getSourceUrl(source)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresponse, err := provider.request(sourceUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer response.Body.Close()\n\tif err = provider.checkResponse(response); err != nil {\n\t\treturn nil, err\n\t}\n\timage, err := provider.createImage(response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn image, nil\n}\n\nfunc (provider *HttpProvider) getSourceUrl(source interface{}) (*url.URL, error) {\n\tsourceUrl, ok := source.(*url.URL)\n\tif !ok {\n\t\tvar err error\n\t\tsourceUrl, err = url.ParseRequestURI(fmt.Sprint(source))\n\t\tif err != nil {\n\t\t\treturn nil, imageserver.NewError(\"Invalid source url\")\n\t\t}\n\t}\n\tif sourceUrl.Scheme != \"http\" && sourceUrl.Scheme != \"https\" {\n\t\treturn nil, imageserver.NewError(\"Invalid source scheme\")\n\t}\n\treturn sourceUrl, nil\n}\n\nfunc (provider *HttpProvider) request(sourceUrl *url.URL) (*http.Response, error) {\n\t\/\/TODO optional http client\n\treturn http.Get(sourceUrl.String())\n}\n\nfunc (provider *HttpProvider) checkResponse(response *http.Response) error {\n\tif response.StatusCode != http.StatusOK {\n\t\treturn imageserver.NewError(fmt.Sprintf(\"Error %d while downloading source\", response.StatusCode))\n\t}\n\treturn nil\n}\n\nfunc (provider *HttpProvider) createImage(response *http.Response) (*imageserver.Image, error) {\n\timage := &imageserver.Image{}\n\tprovider.parseType(response, image)\n\tif err := provider.parseData(response, image); err != nil {\n\t\treturn nil, err\n\t}\n\treturn image, nil\n}\n\nfunc (provider *HttpProvider) parseType(response *http.Response, image *imageserver.Image) {\n\tcontentType := response.Header.Get(\"Content-Type\")\n\tif len(contentType) == 0 {\n\t\treturn\n\t}\n\tmatches := contentTypeRegexp.FindStringSubmatch(contentType)\n\tif matches == nil || len(matches) != 2 {\n\t\treturn\n\t}\n\timage.Type = matches[1]\n}\n\nfunc (provider *HttpProvider) parseData(response *http.Response, image *imageserver.Image) error {\n\tdata, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\timage.Data = data\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ Package report contains helpers for writing comments and updating\n\/\/ statuses in Github.\npackage report\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"k8s.io\/test-infra\/prow\/apis\/prowjobs\/v1\"\n\t\"k8s.io\/test-infra\/prow\/github\"\n\t\"k8s.io\/test-infra\/prow\/plugins\"\n)\n\nconst (\n\tcommentTag = \"<!-- test report -->\"\n)\n\n\/\/ GithubClient provides a client interface to report job status updates\n\/\/ through Github comments.\ntype GithubClient interface {\n\tBotName() (string, error)\n\tCreateStatus(org, repo, ref string, s github.Status) error\n\tListIssueComments(org, repo string, number int) ([]github.IssueComment, error)\n\tCreateComment(org, repo string, number int, comment string) error\n\tDeleteComment(org, repo string, ID int) error\n\tEditComment(org, repo string, ID int, comment string) error\n}\n\n\/\/ prowjobStateToGithubStatus maps prowjob status to github states.\n\/\/ Github states can be one of error, failure, pending, or success.\n\/\/ https:\/\/developer.github.com\/v3\/repos\/statuses\/#create-a-status\nfunc prowjobStateToGithubStatus(pjState v1.ProwJobState) (string, error) {\n\tswitch pjState {\n\tcase v1.TriggeredState:\n\t\treturn github.StatusPending, nil\n\tcase v1.PendingState:\n\t\treturn github.StatusPending, nil\n\tcase v1.SuccessState:\n\t\treturn github.StatusSuccess, nil\n\tcase v1.ErrorState:\n\t\treturn github.StatusError, nil\n\tcase v1.FailureState:\n\t\treturn github.StatusFailure, nil\n\tcase v1.AbortedState:\n\t\treturn github.StatusFailure, nil\n\t}\n\treturn \"\", fmt.Errorf(\"Unknown prowjob state: %v\", pjState)\n}\n\nconst (\n\tmaxLen = 140 \/\/ https:\/\/developer.github.com\/v3\/repos\/deployments\/#parameters-2\n\telide = \" ... \"\n)\n\n\/\/ truncate converts \"really long messages\" into \"really ... messages\".\nfunc truncate(in string) string {\n\tconst (\n\t\thalf = (maxLen - len(elide)) \/ 2\n\t)\n\tif len(in) <= maxLen {\n\t\treturn in\n\t}\n\treturn in[:half] + elide + in[len(in)-half:]\n}\n\n\/\/ reportStatus should be called on status different from Success.\n\/\/ Once a parent ProwJob is pending, all children should be marked as Pending\n\/\/ Same goes for failed status.\nfunc reportStatus(ghc GithubClient, pj v1.ProwJob, childDescription string) error {\n\trefs := pj.Spec.Refs\n\tif pj.Spec.Report {\n\t\tcontextState, err := prowjobStateToGithubStatus(pj.Status.State)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := ghc.CreateStatus(refs.Org, refs.Repo, refs.Pulls[0].SHA, github.Status{\n\t\t\tState: contextState,\n\t\t\tDescription: truncate(pj.Status.Description),\n\t\t\tContext: pj.Spec.Context, \/\/ consider truncating this too\n\t\t\tTargetURL: pj.Status.URL,\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Report is creating\/updating\/removing reports in Github based on the state of\n\/\/ the provided ProwJob.\nfunc Report(ghc GithubClient, reportTemplate *template.Template, pj v1.ProwJob) error {\n\tif ghc == nil {\n\t\treturn fmt.Errorf(\"trying to report pj %s, but found empty github client\", pj.ObjectMeta.Name)\n\t}\n\tif !pj.Spec.Report {\n\t\treturn nil\n\t}\n\trefs := pj.Spec.Refs\n\tif len(refs.Pulls) != 1 {\n\t\treturn fmt.Errorf(\"prowjob %s has %d pulls, not 1\", pj.ObjectMeta.Name, len(refs.Pulls))\n\t}\n\tchildDescription := fmt.Sprintf(\"Waiting on: %s\", pj.Spec.Context)\n\tif err := reportStatus(ghc, pj, childDescription); err != nil {\n\t\treturn fmt.Errorf(\"error setting status: %v\", err)\n\t}\n\t\/\/ Report manually aborted Jenkins jobs and jobs with invalid pod specs alongside\n\t\/\/ test successes\/failures.\n\tif !pj.Complete() {\n\t\treturn nil\n\t}\n\tics, err := ghc.ListIssueComments(refs.Org, refs.Repo, refs.Pulls[0].Number)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error listing comments: %v\", err)\n\t}\n\tbotName, err := ghc.BotName()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting bot name: %v\", err)\n\t}\n\tdeletes, entries, updateID := parseIssueComments(pj, botName, ics)\n\tfor _, delete := range deletes {\n\t\tif err := ghc.DeleteComment(refs.Org, refs.Repo, delete); err != nil {\n\t\t\treturn fmt.Errorf(\"error deleting comment: %v\", err)\n\t\t}\n\t}\n\tif len(entries) > 0 {\n\t\tcomment, err := createComment(reportTemplate, pj, entries)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"generating comment: %v\", err)\n\t\t}\n\t\tif updateID == 0 {\n\t\t\tif err := ghc.CreateComment(refs.Org, refs.Repo, refs.Pulls[0].Number, comment); err != nil {\n\t\t\t\treturn fmt.Errorf(\"error creating comment: %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tif err := ghc.EditComment(refs.Org, refs.Repo, updateID, comment); err != nil {\n\t\t\t\treturn fmt.Errorf(\"error updating comment: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ parseIssueComments returns a list of comments to delete, a list of table\n\/\/ entries, and the ID of the comment to update. If there are no table entries\n\/\/ then don't make a new comment. Otherwise, if the comment to update is 0,\n\/\/ create a new comment.\nfunc parseIssueComments(pj v1.ProwJob, botName string, ics []github.IssueComment) ([]int, []string, int) {\n\tvar delete []int\n\tvar previousComments []int\n\tvar latestComment int\n\tvar entries []string\n\t\/\/ First accumulate result entries and comment IDs\n\tfor _, ic := range ics {\n\t\tif ic.User.Login != botName {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Old report comments started with the context. Delete them.\n\t\t\/\/ TODO(spxtr): Delete this check a few weeks after this merges.\n\t\tif strings.HasPrefix(ic.Body, pj.Spec.Context) {\n\t\t\tdelete = append(delete, ic.ID)\n\t\t}\n\t\tif !strings.Contains(ic.Body, commentTag) {\n\t\t\tcontinue\n\t\t}\n\t\tif latestComment != 0 {\n\t\t\tpreviousComments = append(previousComments, latestComment)\n\t\t}\n\t\tlatestComment = ic.ID\n\t\tvar tracking bool\n\t\tfor _, line := range strings.Split(ic.Body, \"\\n\") {\n\t\t\tline = strings.TrimSpace(line)\n\t\t\tif strings.HasPrefix(line, \"---\") {\n\t\t\t\ttracking = true\n\t\t\t} else if len(line) == 0 {\n\t\t\t\ttracking = false\n\t\t\t} else if tracking {\n\t\t\t\tentries = append(entries, line)\n\t\t\t}\n\t\t}\n\t}\n\tvar newEntries []string\n\t\/\/ Next decide which entries to keep.\n\tfor i := range entries {\n\t\tkeep := true\n\t\tf1 := strings.Split(entries[i], \" | \")\n\t\tfor j := range entries {\n\t\t\tif i == j {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tf2 := strings.Split(entries[j], \" | \")\n\t\t\t\/\/ Use the newer results if there are multiple.\n\t\t\tif j > i && f2[0] == f1[0] {\n\t\t\t\tkeep = false\n\t\t\t}\n\t\t}\n\t\t\/\/ Use the current result if there is an old one.\n\t\tif pj.Spec.Context == f1[0] {\n\t\t\tkeep = false\n\t\t}\n\t\tif keep {\n\t\t\tnewEntries = append(newEntries, entries[i])\n\t\t}\n\t}\n\tvar createNewComment bool\n\tif string(pj.Status.State) == github.StatusFailure {\n\t\tnewEntries = append(newEntries, createEntry(pj))\n\t\tcreateNewComment = true\n\t}\n\tdelete = append(delete, previousComments...)\n\tif (createNewComment || len(newEntries) == 0) && latestComment != 0 {\n\t\tdelete = append(delete, latestComment)\n\t\tlatestComment = 0\n\t}\n\treturn delete, newEntries, latestComment\n}\n\nfunc createEntry(pj v1.ProwJob) string {\n\treturn strings.Join([]string{\n\t\tpj.Spec.Context,\n\t\tpj.Spec.Refs.Pulls[0].SHA,\n\t\tfmt.Sprintf(\"[link](%s)\", pj.Status.URL),\n\t\tfmt.Sprintf(\"`%s`\", pj.Spec.RerunCommand),\n\t}, \" | \")\n}\n\n\/\/ createComment take a ProwJob and a list of entries generated with\n\/\/ createEntry and returns a nicely formatted comment. It may fail if template\n\/\/ execution fails.\nfunc createComment(reportTemplate *template.Template, pj v1.ProwJob, entries []string) (string, error) {\n\tplural := \"\"\n\tif len(entries) > 1 {\n\t\tplural = \"s\"\n\t}\n\tvar b bytes.Buffer\n\tif reportTemplate != nil {\n\t\tif err := reportTemplate.Execute(&b, &pj); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\tlines := []string{\n\t\tfmt.Sprintf(\"@%s: The following test%s **failed**, say `\/retest` to rerun them all:\", pj.Spec.Refs.Pulls[0].Author, plural),\n\t\t\"\",\n\t\t\"Test name | Commit | Details | Rerun command\",\n\t\t\"--- | --- | --- | ---\",\n\t}\n\tlines = append(lines, entries...)\n\tif reportTemplate != nil {\n\t\tlines = append(lines, \"\", b.String())\n\t}\n\tlines = append(lines, []string{\n\t\t\"\",\n\t\t\"<details>\",\n\t\t\"\",\n\t\tplugins.AboutThisBot,\n\t\t\"<\/details>\",\n\t\tcommentTag,\n\t}...)\n\treturn strings.Join(lines, \"\\n\"), nil\n}\n<commit_msg>Use the prowapi import alias in the report lib<commit_after>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ Package report contains helpers for writing comments and updating\n\/\/ statuses in Github.\npackage report\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strings\"\n\t\"text\/template\"\n\n\tprowapi \"k8s.io\/test-infra\/prow\/apis\/prowjobs\/v1\"\n\t\"k8s.io\/test-infra\/prow\/github\"\n\t\"k8s.io\/test-infra\/prow\/plugins\"\n)\n\nconst (\n\tcommentTag = \"<!-- test report -->\"\n)\n\n\/\/ GithubClient provides a client interface to report job status updates\n\/\/ through Github comments.\ntype GithubClient interface {\n\tBotName() (string, error)\n\tCreateStatus(org, repo, ref string, s github.Status) error\n\tListIssueComments(org, repo string, number int) ([]github.IssueComment, error)\n\tCreateComment(org, repo string, number int, comment string) error\n\tDeleteComment(org, repo string, ID int) error\n\tEditComment(org, repo string, ID int, comment string) error\n}\n\n\/\/ prowjobStateToGithubStatus maps prowjob status to github states.\n\/\/ Github states can be one of error, failure, pending, or success.\n\/\/ https:\/\/developer.github.com\/v3\/repos\/statuses\/#create-a-status\nfunc prowjobStateToGithubStatus(pjState prowapi.ProwJobState) (string, error) {\n\tswitch pjState {\n\tcase prowapi.TriggeredState:\n\t\treturn github.StatusPending, nil\n\tcase prowapi.PendingState:\n\t\treturn github.StatusPending, nil\n\tcase prowapi.SuccessState:\n\t\treturn github.StatusSuccess, nil\n\tcase prowapi.ErrorState:\n\t\treturn github.StatusError, nil\n\tcase prowapi.FailureState:\n\t\treturn github.StatusFailure, nil\n\tcase prowapi.AbortedState:\n\t\treturn github.StatusFailure, nil\n\t}\n\treturn \"\", fmt.Errorf(\"Unknown prowjob state: %v\", pjState)\n}\n\nconst (\n\tmaxLen = 140 \/\/ https:\/\/developer.github.com\/v3\/repos\/deployments\/#parameters-2\n\telide = \" ... \"\n)\n\n\/\/ truncate converts \"really long messages\" into \"really ... messages\".\nfunc truncate(in string) string {\n\tconst (\n\t\thalf = (maxLen - len(elide)) \/ 2\n\t)\n\tif len(in) <= maxLen {\n\t\treturn in\n\t}\n\treturn in[:half] + elide + in[len(in)-half:]\n}\n\n\/\/ reportStatus should be called on status different from Success.\n\/\/ Once a parent ProwJob is pending, all children should be marked as Pending\n\/\/ Same goes for failed status.\nfunc reportStatus(ghc GithubClient, pj prowapi.ProwJob, childDescription string) error {\n\trefs := pj.Spec.Refs\n\tif pj.Spec.Report {\n\t\tcontextState, err := prowjobStateToGithubStatus(pj.Status.State)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := ghc.CreateStatus(refs.Org, refs.Repo, refs.Pulls[0].SHA, github.Status{\n\t\t\tState: contextState,\n\t\t\tDescription: truncate(pj.Status.Description),\n\t\t\tContext: pj.Spec.Context, \/\/ consider truncating this too\n\t\t\tTargetURL: pj.Status.URL,\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Report is creating\/updating\/removing reports in Github based on the state of\n\/\/ the provided ProwJob.\nfunc Report(ghc GithubClient, reportTemplate *template.Template, pj prowapi.ProwJob) error {\n\tif ghc == nil {\n\t\treturn fmt.Errorf(\"trying to report pj %s, but found empty github client\", pj.ObjectMeta.Name)\n\t}\n\tif !pj.Spec.Report {\n\t\treturn nil\n\t}\n\trefs := pj.Spec.Refs\n\tif len(refs.Pulls) != 1 {\n\t\treturn fmt.Errorf(\"prowjob %s has %d pulls, not 1\", pj.ObjectMeta.Name, len(refs.Pulls))\n\t}\n\tchildDescription := fmt.Sprintf(\"Waiting on: %s\", pj.Spec.Context)\n\tif err := reportStatus(ghc, pj, childDescription); err != nil {\n\t\treturn fmt.Errorf(\"error setting status: %v\", err)\n\t}\n\t\/\/ Report manually aborted Jenkins jobs and jobs with invalid pod specs alongside\n\t\/\/ test successes\/failures.\n\tif !pj.Complete() {\n\t\treturn nil\n\t}\n\tics, err := ghc.ListIssueComments(refs.Org, refs.Repo, refs.Pulls[0].Number)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error listing comments: %v\", err)\n\t}\n\tbotName, err := ghc.BotName()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error getting bot name: %v\", err)\n\t}\n\tdeletes, entries, updateID := parseIssueComments(pj, botName, ics)\n\tfor _, delete := range deletes {\n\t\tif err := ghc.DeleteComment(refs.Org, refs.Repo, delete); err != nil {\n\t\t\treturn fmt.Errorf(\"error deleting comment: %v\", err)\n\t\t}\n\t}\n\tif len(entries) > 0 {\n\t\tcomment, err := createComment(reportTemplate, pj, entries)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"generating comment: %v\", err)\n\t\t}\n\t\tif updateID == 0 {\n\t\t\tif err := ghc.CreateComment(refs.Org, refs.Repo, refs.Pulls[0].Number, comment); err != nil {\n\t\t\t\treturn fmt.Errorf(\"error creating comment: %v\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tif err := ghc.EditComment(refs.Org, refs.Repo, updateID, comment); err != nil {\n\t\t\t\treturn fmt.Errorf(\"error updating comment: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ parseIssueComments returns a list of comments to delete, a list of table\n\/\/ entries, and the ID of the comment to update. If there are no table entries\n\/\/ then don't make a new comment. Otherwise, if the comment to update is 0,\n\/\/ create a new comment.\nfunc parseIssueComments(pj prowapi.ProwJob, botName string, ics []github.IssueComment) ([]int, []string, int) {\n\tvar delete []int\n\tvar previousComments []int\n\tvar latestComment int\n\tvar entries []string\n\t\/\/ First accumulate result entries and comment IDs\n\tfor _, ic := range ics {\n\t\tif ic.User.Login != botName {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ Old report comments started with the context. Delete them.\n\t\t\/\/ TODO(spxtr): Delete this check a few weeks after this merges.\n\t\tif strings.HasPrefix(ic.Body, pj.Spec.Context) {\n\t\t\tdelete = append(delete, ic.ID)\n\t\t}\n\t\tif !strings.Contains(ic.Body, commentTag) {\n\t\t\tcontinue\n\t\t}\n\t\tif latestComment != 0 {\n\t\t\tpreviousComments = append(previousComments, latestComment)\n\t\t}\n\t\tlatestComment = ic.ID\n\t\tvar tracking bool\n\t\tfor _, line := range strings.Split(ic.Body, \"\\n\") {\n\t\t\tline = strings.TrimSpace(line)\n\t\t\tif strings.HasPrefix(line, \"---\") {\n\t\t\t\ttracking = true\n\t\t\t} else if len(line) == 0 {\n\t\t\t\ttracking = false\n\t\t\t} else if tracking {\n\t\t\t\tentries = append(entries, line)\n\t\t\t}\n\t\t}\n\t}\n\tvar newEntries []string\n\t\/\/ Next decide which entries to keep.\n\tfor i := range entries {\n\t\tkeep := true\n\t\tf1 := strings.Split(entries[i], \" | \")\n\t\tfor j := range entries {\n\t\t\tif i == j {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tf2 := strings.Split(entries[j], \" | \")\n\t\t\t\/\/ Use the newer results if there are multiple.\n\t\t\tif j > i && f2[0] == f1[0] {\n\t\t\t\tkeep = false\n\t\t\t}\n\t\t}\n\t\t\/\/ Use the current result if there is an old one.\n\t\tif pj.Spec.Context == f1[0] {\n\t\t\tkeep = false\n\t\t}\n\t\tif keep {\n\t\t\tnewEntries = append(newEntries, entries[i])\n\t\t}\n\t}\n\tvar createNewComment bool\n\tif string(pj.Status.State) == github.StatusFailure {\n\t\tnewEntries = append(newEntries, createEntry(pj))\n\t\tcreateNewComment = true\n\t}\n\tdelete = append(delete, previousComments...)\n\tif (createNewComment || len(newEntries) == 0) && latestComment != 0 {\n\t\tdelete = append(delete, latestComment)\n\t\tlatestComment = 0\n\t}\n\treturn delete, newEntries, latestComment\n}\n\nfunc createEntry(pj prowapi.ProwJob) string {\n\treturn strings.Join([]string{\n\t\tpj.Spec.Context,\n\t\tpj.Spec.Refs.Pulls[0].SHA,\n\t\tfmt.Sprintf(\"[link](%s)\", pj.Status.URL),\n\t\tfmt.Sprintf(\"`%s`\", pj.Spec.RerunCommand),\n\t}, \" | \")\n}\n\n\/\/ createComment take a ProwJob and a list of entries generated with\n\/\/ createEntry and returns a nicely formatted comment. It may fail if template\n\/\/ execution fails.\nfunc createComment(reportTemplate *template.Template, pj prowapi.ProwJob, entries []string) (string, error) {\n\tplural := \"\"\n\tif len(entries) > 1 {\n\t\tplural = \"s\"\n\t}\n\tvar b bytes.Buffer\n\tif reportTemplate != nil {\n\t\tif err := reportTemplate.Execute(&b, &pj); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\tlines := []string{\n\t\tfmt.Sprintf(\"@%s: The following test%s **failed**, say `\/retest` to rerun them all:\", pj.Spec.Refs.Pulls[0].Author, plural),\n\t\t\"\",\n\t\t\"Test name | Commit | Details | Rerun command\",\n\t\t\"--- | --- | --- | ---\",\n\t}\n\tlines = append(lines, entries...)\n\tif reportTemplate != nil {\n\t\tlines = append(lines, \"\", b.String())\n\t}\n\tlines = append(lines, []string{\n\t\t\"\",\n\t\t\"<details>\",\n\t\t\"\",\n\t\tplugins.AboutThisBot,\n\t\t\"<\/details>\",\n\t\tcommentTag,\n\t}...)\n\treturn strings.Join(lines, \"\\n\"), nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage control\n\nimport (\n\t\"errors\"\n\n\tclientv2 \"github.com\/coreos\/etcd\/client\"\n\t\"github.com\/coreos\/etcd\/clientv3\"\n\tconsulapi \"github.com\/hashicorp\/consul\/api\"\n\t\"github.com\/samuel\/go-zookeeper\/zk\"\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype ReqHandler func(ctx context.Context, req *request) error\n\nfunc newPutEtcd2(conn clientv2.KeysAPI) ReqHandler {\n\treturn func(ctx context.Context, req *request) error {\n\t\top := req.etcdv2Op\n\t\t_, err := conn.Set(context.Background(), op.key, op.value, nil)\n\t\treturn err\n\t}\n}\n\nfunc newPutEtcd3(conn clientv3.KV) ReqHandler {\n\treturn func(ctx context.Context, req *request) error {\n\t\t_, err := conn.Do(ctx, req.etcdv3Op)\n\t\treturn err\n\t}\n}\n\nfunc newPutOverwriteZK(conn *zk.Conn) ReqHandler {\n\treturn func(ctx context.Context, req *request) error {\n\t\top := req.zkOp\n\t\t_, err := conn.Set(op.key, op.value, int32(-1))\n\t\treturn err\n\t}\n}\n\nfunc newPutCreateZK(conn *zk.Conn) ReqHandler {\n\t\/\/ samekey\n\treturn func(ctx context.Context, req *request) error {\n\t\top := req.zkOp\n\t\t_, err := conn.Create(op.key, op.value, zkCreateFlags, zkCreateAcl)\n\t\treturn err\n\t}\n}\n\nfunc newPutConsul(conn *consulapi.KV) ReqHandler {\n\treturn func(ctx context.Context, req *request) error {\n\t\top := req.consulOp\n\t\t_, err := conn.Put(&consulapi.KVPair{Key: op.key, Value: op.value}, nil)\n\t\treturn err\n\t}\n}\n\nfunc newGetEtcd2(conn clientv2.KeysAPI) ReqHandler {\n\treturn func(ctx context.Context, req *request) error {\n\t\t_, err := conn.Get(ctx, req.etcdv2Op.key, nil)\n\t\treturn err\n\t}\n}\n\nfunc newGetEtcd3(conn clientv3.KV) ReqHandler {\n\treturn func(ctx context.Context, req *request) error {\n\t\t_, err := conn.Do(ctx, req.etcdv3Op)\n\t\treturn err\n\t}\n}\n\nfunc newGetZK(conn *zk.Conn) ReqHandler {\n\treturn func(ctx context.Context, req *request) error {\n\t\terrt := \"\"\n\t\tif !req.zkOp.staleRead {\n\t\t\t_, err := conn.Sync(req.zkOp.key)\n\t\t\tif err != nil {\n\t\t\t\terrt += err.Error()\n\t\t\t}\n\t\t}\n\t\t_, _, err := conn.Get(req.zkOp.key)\n\t\tif err != nil {\n\t\t\terrt += \";\" + err.Error()\n\t\t}\n\t\tif errt != \"\" {\n\t\t\treturn errors.New(errt)\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc newGetConsul(conn *consulapi.KV) ReqHandler {\n\treturn func(ctx context.Context, req *request) error {\n\t\t_, _, err := conn.Get(req.consulOp.key, &consulapi.QueryOptions{AllowStale: req.consulOp.staleRead})\n\t\treturn err\n\t}\n}\n<commit_msg>control: add more logging, Zk get with prefix \"\/\"<commit_after>\/\/ Copyright 2016 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage control\n\nimport (\n\t\"errors\"\n\n\t\"fmt\"\n\n\tclientv2 \"github.com\/coreos\/etcd\/client\"\n\t\"github.com\/coreos\/etcd\/clientv3\"\n\tconsulapi \"github.com\/hashicorp\/consul\/api\"\n\t\"github.com\/samuel\/go-zookeeper\/zk\"\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype ReqHandler func(ctx context.Context, req *request) error\n\nfunc newPutEtcd2(conn clientv2.KeysAPI) ReqHandler {\n\treturn func(ctx context.Context, req *request) error {\n\t\top := req.etcdv2Op\n\t\t_, err := conn.Set(context.Background(), op.key, op.value, nil)\n\t\treturn err\n\t}\n}\n\nfunc newPutEtcd3(conn clientv3.KV) ReqHandler {\n\treturn func(ctx context.Context, req *request) error {\n\t\t_, err := conn.Do(ctx, req.etcdv3Op)\n\t\treturn err\n\t}\n}\n\nfunc newPutOverwriteZK(conn *zk.Conn) ReqHandler {\n\treturn func(ctx context.Context, req *request) error {\n\t\top := req.zkOp\n\t\t_, err := conn.Set(op.key, op.value, int32(-1))\n\t\treturn err\n\t}\n}\n\nfunc newPutCreateZK(conn *zk.Conn) ReqHandler {\n\t\/\/ samekey\n\treturn func(ctx context.Context, req *request) error {\n\t\top := req.zkOp\n\t\t_, err := conn.Create(op.key, op.value, zkCreateFlags, zkCreateAcl)\n\t\treturn err\n\t}\n}\n\nfunc newPutConsul(conn *consulapi.KV) ReqHandler {\n\treturn func(ctx context.Context, req *request) error {\n\t\top := req.consulOp\n\t\t_, err := conn.Put(&consulapi.KVPair{Key: op.key, Value: op.value}, nil)\n\t\treturn err\n\t}\n}\n\nfunc newGetEtcd2(conn clientv2.KeysAPI) ReqHandler {\n\treturn func(ctx context.Context, req *request) error {\n\t\t_, err := conn.Get(ctx, req.etcdv2Op.key, nil)\n\t\treturn err\n\t}\n}\n\nfunc newGetEtcd3(conn clientv3.KV) ReqHandler {\n\treturn func(ctx context.Context, req *request) error {\n\t\t_, err := conn.Do(ctx, req.etcdv3Op)\n\t\treturn err\n\t}\n}\n\nfunc newGetZK(conn *zk.Conn) ReqHandler {\n\treturn func(ctx context.Context, req *request) error {\n\t\terrt := \"\"\n\t\tif !req.zkOp.staleRead {\n\t\t\t_, err := conn.Sync(\"\/\" + req.zkOp.key)\n\t\t\tif err != nil {\n\t\t\t\terrt += err.Error()\n\t\t\t}\n\t\t}\n\t\t_, _, err := conn.Get(\"\/\" + req.zkOp.key)\n\t\tif err != nil {\n\t\t\tif errt != \"\" {\n\t\t\t\terrt += \"; \"\n\t\t\t}\n\t\t\terrt += fmt.Sprintf(\"%q while getting %q\", err.Error(), \"\/\"+req.zkOp.key)\n\t\t}\n\t\tif errt != \"\" {\n\t\t\treturn errors.New(errt)\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc newGetConsul(conn *consulapi.KV) ReqHandler {\n\treturn func(ctx context.Context, req *request) error {\n\t\t_, _, err := conn.Get(req.consulOp.key, &consulapi.QueryOptions{AllowStale: req.consulOp.staleRead})\n\t\treturn err\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package apptail\n\nimport (\n\t\"fmt\"\n\t\"github.com\/ActiveState\/log\"\n\t\"github.com\/ActiveState\/tail\"\n\t\"github.com\/ActiveState\/zmqpubsub\"\n\t\"logyard\"\n\t\"logyard\/clients\/messagecommon\"\n\t\"os\"\n\t\"time\"\n)\n\n\/\/ Instance is the NATS message sent by dea_ng to notify of new instances.\ntype Instance struct {\n\tAppGUID string\n\tAppName string\n\tAppSpace string `json:\"space\"`\n\tType string\n\tIndex int\n\tDockerId string `json:\"docker_id\"`\n\tLogFiles map[string]string\n}\n\nfunc (instance *Instance) Identifier() string {\n\treturn fmt.Sprintf(\"%v[%v:%v]\", instance.AppName, instance.Index, instance.DockerId[:ID_LENGTH])\n}\n\n\/\/ Tail begins tailing the files for this instance.\nfunc (instance *Instance) Tail() {\n\tlog.Infof(\"Tailing %v logs for %v -- %+v\",\n\t\tinstance.Type, instance.Identifier(), instance)\n\n\tstopCh := make(chan bool)\n\n\tfor name, filename := range instance.LogFiles {\n\t\tgo instance.tailFile(name, filename, stopCh)\n\t}\n\n\tgo func() {\n\t\tDockerListener.WaitForContainer(instance.DockerId)\n\t\tlog.Infof(\"Container for %v exited\", instance.Identifier())\n\t\tclose(stopCh)\n\t}()\n}\n\nfunc (instance *Instance) tailFile(name, filename string, stopCh chan bool) {\n\tvar err error\n\n\tpub := logyard.Broker.NewPublisherMust()\n\tdefer pub.Stop()\n\n\tlimit, err := instance.getReadLimit(pub, name, filename)\n\tif err != nil {\n\t\tlog.Warn(err)\n\t\treturn\n\t}\n\n\ttail, err := tail.TailFile(filename, tail.Config{\n\t\tMaxLineSize: GetConfig().MaxRecordSize,\n\t\tMustExist: true,\n\t\tFollow: true,\n\t\tLocation: &tail.SeekInfo{-limit, os.SEEK_END},\n\t\tReOpen: false,\n\t\tPoll: false,\n\t\tLimitRate: GetConfig().RateLimit})\n\tif err != nil {\n\t\tlog.Warnf(\"Cannot tail file (%s); %s\", filename, err)\n\t\treturn\n\t}\n\nFORLOOP:\n\tfor {\n\t\tselect {\n\t\tcase line, ok := <-tail.Lines:\n\t\t\tif !ok {\n\t\t\t\terr = tail.Wait()\n\t\t\t\tbreak FORLOOP\n\t\t\t}\n\t\t\tinstance.publishLine(pub, name, line)\n\t\tcase <-stopCh:\n\t\t\terr = tail.Stop()\n\t\t\tbreak FORLOOP\n\t\t}\n\t}\n\n\tif err != nil {\n\t\tlog.Warn(err)\n\t}\n\n\tlog.Infof(\"Completed tailing %v log for %v\", name, instance.Identifier())\n}\n\nfunc (instance *Instance) getReadLimit(\n\tpub *zmqpubsub.Publisher,\n\tlogname string,\n\tfilename string) (int64, error) {\n\t\/\/ convert MB to limit in bytes.\n\tfilesizeLimit := GetConfig().FileSizeLimit * 1024 * 1024\n\tif !(filesizeLimit > 0) {\n\t\tpanic(\"invalid value for `read_limit' in apptail config\")\n\t}\n\n\tfi, err := os.Stat(filename)\n\tif err != nil {\n\t\treturn -1, fmt.Errorf(\"Cannot stat file (%s); %s\", filename, err)\n\t}\n\tsize := fi.Size()\n\tlimit := filesizeLimit\n\tif size > filesizeLimit {\n\t\terr := fmt.Errorf(\"Skipping much of a large log file (%s); size (%v bytes) > read_limit (%v bytes)\",\n\t\t\tlogname, size, filesizeLimit)\n\t\t\/\/ Publish special error message.\n\t\tinstance.publishLine(pub, logname, &tail.Line{\n\t\t\tText: err.Error(),\n\t\t\tTime: time.Now(),\n\t\t\tErr: err})\n\t} else {\n\t\tlimit = size\n\t}\n\treturn limit, nil\n}\n\n\/\/ publishLine zmq-publishes a log line corresponding to this instance\nfunc (instance *Instance) publishLine(\n\tpub *zmqpubsub.Publisher,\n\tlogname string,\n\tline *tail.Line) {\n\n\tif line == nil {\n\t\tpanic(\"line is nil\")\n\t}\n\n\tmsg := &Message{\n\t\tLogFilename: logname,\n\t\tSource: instance.Type,\n\t\tInstanceIndex: instance.Index,\n\t\tAppGUID: instance.AppGUID,\n\t\tAppName: instance.AppName,\n\t\tAppSpace: instance.AppSpace,\n\t\tMessageCommon: messagecommon.New(line.Text, line.Time, LocalNodeId()),\n\t}\n\n\tif line.Err != nil {\n\t\t\/\/ Mark this as a special error record, as it is\n\t\t\/\/ coming from tail, not the app.\n\t\tmsg.Source = \"stackato.apptail\"\n\t\tmsg.LogFilename = \"\"\n\t\tlog.Warnf(\"[%s] %s\", instance.AppName, line.Text)\n\t}\n\n\terr := msg.Publish(pub, false)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<commit_msg>expecting AppSpace (case insensitive) as the field in nats message<commit_after>package apptail\n\nimport (\n\t\"fmt\"\n\t\"github.com\/ActiveState\/log\"\n\t\"github.com\/ActiveState\/tail\"\n\t\"github.com\/ActiveState\/zmqpubsub\"\n\t\"logyard\"\n\t\"logyard\/clients\/messagecommon\"\n\t\"os\"\n\t\"time\"\n)\n\n\/\/ Instance is the NATS message sent by dea_ng to notify of new instances.\ntype Instance struct {\n\tAppGUID string\n\tAppName string\n\tAppSpace string\n\tType string\n\tIndex int\n\tDockerId string `json:\"docker_id\"`\n\tLogFiles map[string]string\n}\n\nfunc (instance *Instance) Identifier() string {\n\treturn fmt.Sprintf(\"%v[%v:%v]\", instance.AppName, instance.Index, instance.DockerId[:ID_LENGTH])\n}\n\n\/\/ Tail begins tailing the files for this instance.\nfunc (instance *Instance) Tail() {\n\tlog.Infof(\"Tailing %v logs for %v -- %+v\",\n\t\tinstance.Type, instance.Identifier(), instance)\n\n\tstopCh := make(chan bool)\n\n\tfor name, filename := range instance.LogFiles {\n\t\tgo instance.tailFile(name, filename, stopCh)\n\t}\n\n\tgo func() {\n\t\tDockerListener.WaitForContainer(instance.DockerId)\n\t\tlog.Infof(\"Container for %v exited\", instance.Identifier())\n\t\tclose(stopCh)\n\t}()\n}\n\nfunc (instance *Instance) tailFile(name, filename string, stopCh chan bool) {\n\tvar err error\n\n\tpub := logyard.Broker.NewPublisherMust()\n\tdefer pub.Stop()\n\n\tlimit, err := instance.getReadLimit(pub, name, filename)\n\tif err != nil {\n\t\tlog.Warn(err)\n\t\treturn\n\t}\n\n\ttail, err := tail.TailFile(filename, tail.Config{\n\t\tMaxLineSize: GetConfig().MaxRecordSize,\n\t\tMustExist: true,\n\t\tFollow: true,\n\t\tLocation: &tail.SeekInfo{-limit, os.SEEK_END},\n\t\tReOpen: false,\n\t\tPoll: false,\n\t\tLimitRate: GetConfig().RateLimit})\n\tif err != nil {\n\t\tlog.Warnf(\"Cannot tail file (%s); %s\", filename, err)\n\t\treturn\n\t}\n\nFORLOOP:\n\tfor {\n\t\tselect {\n\t\tcase line, ok := <-tail.Lines:\n\t\t\tif !ok {\n\t\t\t\terr = tail.Wait()\n\t\t\t\tbreak FORLOOP\n\t\t\t}\n\t\t\tinstance.publishLine(pub, name, line)\n\t\tcase <-stopCh:\n\t\t\terr = tail.Stop()\n\t\t\tbreak FORLOOP\n\t\t}\n\t}\n\n\tif err != nil {\n\t\tlog.Warn(err)\n\t}\n\n\tlog.Infof(\"Completed tailing %v log for %v\", name, instance.Identifier())\n}\n\nfunc (instance *Instance) getReadLimit(\n\tpub *zmqpubsub.Publisher,\n\tlogname string,\n\tfilename string) (int64, error) {\n\t\/\/ convert MB to limit in bytes.\n\tfilesizeLimit := GetConfig().FileSizeLimit * 1024 * 1024\n\tif !(filesizeLimit > 0) {\n\t\tpanic(\"invalid value for `read_limit' in apptail config\")\n\t}\n\n\tfi, err := os.Stat(filename)\n\tif err != nil {\n\t\treturn -1, fmt.Errorf(\"Cannot stat file (%s); %s\", filename, err)\n\t}\n\tsize := fi.Size()\n\tlimit := filesizeLimit\n\tif size > filesizeLimit {\n\t\terr := fmt.Errorf(\"Skipping much of a large log file (%s); size (%v bytes) > read_limit (%v bytes)\",\n\t\t\tlogname, size, filesizeLimit)\n\t\t\/\/ Publish special error message.\n\t\tinstance.publishLine(pub, logname, &tail.Line{\n\t\t\tText: err.Error(),\n\t\t\tTime: time.Now(),\n\t\t\tErr: err})\n\t} else {\n\t\tlimit = size\n\t}\n\treturn limit, nil\n}\n\n\/\/ publishLine zmq-publishes a log line corresponding to this instance\nfunc (instance *Instance) publishLine(\n\tpub *zmqpubsub.Publisher,\n\tlogname string,\n\tline *tail.Line) {\n\n\tif line == nil {\n\t\tpanic(\"line is nil\")\n\t}\n\n\tmsg := &Message{\n\t\tLogFilename: logname,\n\t\tSource: instance.Type,\n\t\tInstanceIndex: instance.Index,\n\t\tAppGUID: instance.AppGUID,\n\t\tAppName: instance.AppName,\n\t\tAppSpace: instance.AppSpace,\n\t\tMessageCommon: messagecommon.New(line.Text, line.Time, LocalNodeId()),\n\t}\n\n\tif line.Err != nil {\n\t\t\/\/ Mark this as a special error record, as it is\n\t\t\/\/ coming from tail, not the app.\n\t\tmsg.Source = \"stackato.apptail\"\n\t\tmsg.LogFilename = \"\"\n\t\tlog.Warnf(\"[%s] %s\", instance.AppName, line.Text)\n\t}\n\n\terr := msg.Publish(pub, false)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package widgets\n\nimport (\n\t\"code.google.com\/p\/draw2d\/draw2d\"\n\t\"github.com\/skelterjohn\/geom\"\n\t\"github.com\/skelterjohn\/go.uik\"\n\t\"image\"\n\t\"image\/color\"\n\t\"time\"\n)\n\ntype Entry struct {\n\tuik.Block\n\ttextBuffer *image.RGBA\n\ttext []rune\n\truneOffsets []float64\n\tcursor int\n\tselectCursor int\n\tselecting bool\n\tselected bool\n\ttextOffset float64\n\tfd draw2d.FontData\n\tfontSize float64\n}\n\nfunc NewEntry(size geom.Coord) (e *Entry) {\n\te = new(Entry)\n\te.Size = size\n\te.Initialize()\n\n\te.text = []rune(\"hello world\")\n\te.cursor = len(e.text)\n\n\te.render()\n\n\tgo e.handleEvents()\n\n\te.SetSizeHint(uik.SizeHint{\n\t\tMinSize: e.Size,\n\t\tPreferredSize: e.Size,\n\t\tMaxSize: e.Size,\n\t})\n\n\te.Paint = func(gc draw2d.GraphicContext) {\n\t\te.draw(gc)\n\t}\n\n\treturn\n}\n\nfunc (e *Entry) Initialize() {\n\te.Block.Initialize()\n\n\te.fd = uik.DefaultFontData\n\te.fontSize = 12\n}\n\nfunc (e *Entry) render() {\n\tconst stretchFactor = 1.2\n\n\ttext := string(e.text)\n\n\theight := uik.GetFontHeight(e.fd, e.fontSize) * stretchFactor\n\twidthMax := float64(len(text)) * e.fontSize\n\n\tbuf := image.NewRGBA(image.Rectangle{\n\t\tMin: image.Point{0, 0},\n\t\tMax: image.Point{int(widthMax + 1), int(height + 1)},\n\t})\n\n\tgc := draw2d.NewGraphicContext(buf)\n\tgc.Translate(0, height\/stretchFactor)\n\tgc.SetFontData(e.fd)\n\tgc.SetFontSize(e.fontSize)\n\tgc.SetStrokeColor(color.Black)\n\n\tvar left float64\n\te.runeOffsets = []float64{0}\n\tfor _, r := range e.text {\n\t\trt := string(r)\n\t\twidth := gc.FillString(rt)\n\t\tgc.Translate(width, 0)\n\t\tleft += width\n\t\te.runeOffsets = append(e.runeOffsets, left)\n\t}\n\n\te.textBuffer = buf.SubImage(image.Rectangle{\n\t\tMin: image.Point{0, 0},\n\t\tMax: image.Point{int(left + 1), int(height + 1)},\n\t}).(*image.RGBA)\n}\n\nfunc (e *Entry) GrabFocus() {\n\tif e.HasKeyFocus {\n\t\treturn\n\t}\n\te.Parent.UserEventsIn <- uik.KeyFocusRequest{\n\t\tBlock: &e.Block,\n\t}\n}\n\nfunc (e *Entry) draw(gc draw2d.GraphicContext) {\n\tif e.textOffset+e.runeOffsets[e.cursor] < 5 {\n\t\te.textOffset = 5 - e.runeOffsets[e.cursor]\n\t}\n\tif e.textOffset+e.runeOffsets[e.cursor] > e.Size.X-5 {\n\t\te.textOffset = e.Size.X - 5 - e.runeOffsets[e.cursor]\n\t}\n\n\tgc.Clear()\n\tif e.HasKeyFocus {\n\t\tgc.SetFillColor(color.RGBA{150, 150, 150, 255})\n\t\tsafeRect(gc, geom.Coord{0, 0}, e.Size)\n\t\tgc.Fill()\n\t}\n\tth := float64(e.textBuffer.Bounds().Max.Y - e.textBuffer.Bounds().Min.Y)\n\tgc.Save()\n\tgc.Translate(e.textOffset, 0)\n\n\tif e.selecting {\n\t\tstart := e.runeOffsets[e.cursor]\n\t\tend := e.runeOffsets[e.selectCursor]\n\t\tif start > end {\n\t\t\tstart, end = end, start\n\t\t}\n\t\tgc.SetFillColor(color.RGBA{200, 200, 200, 255})\n\t\tsafeRect(gc, geom.Coord{start, 0}, geom.Coord{end, e.Size.Y})\n\t\tgc.Fill()\n\t}\n\n\tgc.Translate(0, (e.Size.Y-th)\/2)\n\n\tgc.DrawImage(e.textBuffer)\n\tgc.Restore()\n\tif e.HasKeyFocus {\n\t\tun := time.Duration(time.Now().UnixNano())\n\t\tms := un \/ time.Millisecond\n\t\tms = 10000 - (ms % 1000)\n\t\tintensity := uint8((ms * 255) \/ 1000)\n\t\tgc.SetStrokeColor(color.RGBA{A: intensity})\n\t\tgc.MoveTo(e.runeOffsets[e.cursor]+e.textOffset, 0)\n\t\tgc.LineTo(e.runeOffsets[e.cursor]+e.textOffset, e.Size.Y)\n\t\tgc.Stroke()\n\t\te.Invalidate()\n\t}\n}\n\nfunc (e *Entry) cursorForCoord(p geom.Coord) (cursor int) {\n\ttextX := p.X - e.textOffset\n\tfor i, co := range e.runeOffsets {\n\t\t\/\/ uik.Report(i, co)\n\t\tif textX > co {\n\t\t\tcursor = i\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}\n\nfunc (e *Entry) handleEvents() {\n\tfor {\n\t\tselect {\n\t\tcase ev := <-e.UserEvents:\n\t\t\tswitch ev := ev.(type) {\n\t\t\tcase uik.MouseDownEvent:\n\t\t\t\te.GrabFocus()\n\t\t\t\tnewcursor := e.cursorForCoord(ev.Loc)\n\t\t\t\tif e.cursor != newcursor {\n\t\t\t\t\te.cursor = newcursor\n\t\t\t\t\te.Invalidate()\n\t\t\t\t}\n\t\t\t\te.selecting = true\n\t\t\t\te.selectCursor = e.cursor\n\t\t\tcase uik.MouseUpEvent:\n\t\t\t\tif e.selecting {\n\t\t\t\t\te.Invalidate()\n\t\t\t\t}\n\t\t\tcase uik.MouseDraggedEvent:\n\t\t\t\tif !e.selecting {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tnewSelectCursor := e.cursorForCoord(ev.Loc)\n\t\t\t\tif e.selectCursor != newSelectCursor {\n\t\t\t\t\te.selectCursor = newSelectCursor\n\t\t\t\t\te.Invalidate()\n\t\t\t\t}\n\t\t\tcase uik.KeyTypedEvent:\n\n\t\t\t\t\/\/ uik.Report(\"key\", ev.Code, ev.Letter)\n\t\t\t\tif uik.IsGlyph(ev.Code) {\n\t\t\t\t\tstart, end := e.cursor, e.cursor\n\t\t\t\t\tif e.selecting {\n\t\t\t\t\t\tend = e.selectCursor\n\t\t\t\t\t\tif end < start {\n\t\t\t\t\t\t\tstart, end = end, start\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\thead := e.text[:start]\n\t\t\t\t\ttail := e.text[end:]\n\t\t\t\t\te.text = make([]rune, len(head)+len(tail)+1)[:0]\n\t\t\t\t\te.text = append(e.text, head...)\n\t\t\t\t\te.text = append(e.text, []rune(ev.Letter)[0])\n\t\t\t\t\te.text = append(e.text, tail...)\n\t\t\t\t\te.cursor = start + 1\n\t\t\t\t} else {\n\t\t\t\t\tswitch ev.Code {\n\t\t\t\t\tcase uik.KeyBackspace:\n\t\t\t\t\t\tif len(e.text) == 0 {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif !e.selecting && e.cursor == 0 {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tstart, end := e.cursor-1, e.cursor\n\t\t\t\t\t\tif e.selecting {\n\t\t\t\t\t\t\tstart = e.cursor\n\t\t\t\t\t\t\tend = e.selectCursor\n\t\t\t\t\t\t\tif end < start {\n\t\t\t\t\t\t\t\tstart, end = end, start\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcopy(e.text[start:], e.text[end:])\n\t\t\t\t\t\te.text = e.text[:len(e.text)-(end-start)]\n\t\t\t\t\t\te.cursor = start\n\t\t\t\t\tcase uik.KeyDelete:\n\t\t\t\t\t\tif len(e.text) == 0 {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif !e.selecting && e.cursor == len(e.text) {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tstart, end := e.cursor, e.cursor+1\n\t\t\t\t\t\tif e.selecting {\n\t\t\t\t\t\t\tstart = e.cursor\n\t\t\t\t\t\t\tend = e.selectCursor\n\t\t\t\t\t\t\tif end < start {\n\t\t\t\t\t\t\t\tstart, end = end, start\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcopy(e.text[start:], e.text[end:])\n\t\t\t\t\t\te.text = e.text[:len(e.text)-(end-start)]\n\t\t\t\t\t\te.cursor = start\n\t\t\t\t\tcase uik.KeyArrowLeft:\n\t\t\t\t\t\tif e.cursor > 0 {\n\t\t\t\t\t\t\te.cursor--\n\t\t\t\t\t\t}\n\t\t\t\t\tcase uik.KeyArrowRight:\n\t\t\t\t\t\tif e.cursor < len(e.text) {\n\t\t\t\t\t\t\te.cursor++\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\te.selecting = false\n\t\t\t\te.render()\n\t\t\t\te.Invalidate()\n\t\t\tcase uik.KeyFocusEvent:\n\t\t\t\te.HandleEvent(ev)\n\t\t\t\te.Invalidate()\n\t\t\tdefault:\n\t\t\t\te.HandleEvent(ev)\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>better blinking, selecting<commit_after>package widgets\n\nimport (\n\t\"code.google.com\/p\/draw2d\/draw2d\"\n\t\"github.com\/skelterjohn\/geom\"\n\t\"github.com\/skelterjohn\/go.uik\"\n\t\"image\"\n\t\"image\/color\"\n\t\"time\"\n)\n\ntype Entry struct {\n\tuik.Block\n\ttextBuffer *image.RGBA\n\ttext []rune\n\truneOffsets []float64\n\tcursor int\n\tselectCursor int\n\tselecting bool\n\tselected bool\n\ttextOffset float64\n\tfd draw2d.FontData\n\tfontSize float64\n}\n\nfunc NewEntry(size geom.Coord) (e *Entry) {\n\te = new(Entry)\n\te.Size = size\n\te.Initialize()\n\n\te.text = []rune(\"hello world\")\n\te.cursor = len(e.text)\n\n\te.render()\n\n\tgo e.handleEvents()\n\n\te.SetSizeHint(uik.SizeHint{\n\t\tMinSize: e.Size,\n\t\tPreferredSize: e.Size,\n\t\tMaxSize: e.Size,\n\t})\n\n\te.Paint = func(gc draw2d.GraphicContext) {\n\t\te.draw(gc)\n\t}\n\n\treturn\n}\n\nfunc (e *Entry) Initialize() {\n\te.Block.Initialize()\n\n\te.fd = uik.DefaultFontData\n\te.fontSize = 12\n}\n\nfunc (e *Entry) render() {\n\tconst stretchFactor = 1.2\n\n\ttext := string(e.text)\n\n\theight := uik.GetFontHeight(e.fd, e.fontSize) * stretchFactor\n\twidthMax := float64(len(text)) * e.fontSize\n\n\tbuf := image.NewRGBA(image.Rectangle{\n\t\tMin: image.Point{0, 0},\n\t\tMax: image.Point{int(widthMax + 1), int(height + 1)},\n\t})\n\n\tgc := draw2d.NewGraphicContext(buf)\n\tgc.Translate(0, height\/stretchFactor)\n\tgc.SetFontData(e.fd)\n\tgc.SetFontSize(e.fontSize)\n\tgc.SetStrokeColor(color.Black)\n\n\tvar left float64\n\te.runeOffsets = []float64{0}\n\tfor _, r := range e.text {\n\t\trt := string(r)\n\t\twidth := gc.FillString(rt)\n\t\tgc.Translate(width, 0)\n\t\tleft += width\n\t\te.runeOffsets = append(e.runeOffsets, left)\n\t}\n\n\te.textBuffer = buf.SubImage(image.Rectangle{\n\t\tMin: image.Point{0, 0},\n\t\tMax: image.Point{int(left + 1), int(height + 1)},\n\t}).(*image.RGBA)\n}\n\nfunc (e *Entry) GrabFocus() {\n\tif e.HasKeyFocus {\n\t\treturn\n\t}\n\te.Parent.UserEventsIn <- uik.KeyFocusRequest{\n\t\tBlock: &e.Block,\n\t}\n}\n\nfunc (e *Entry) draw(gc draw2d.GraphicContext) {\n\tif e.textOffset+e.runeOffsets[e.cursor] < 5 {\n\t\te.textOffset = 5 - e.runeOffsets[e.cursor]\n\t}\n\tif e.textOffset+e.runeOffsets[e.cursor] > e.Size.X-5 {\n\t\te.textOffset = e.Size.X - 5 - e.runeOffsets[e.cursor]\n\t}\n\n\tgc.Clear()\n\tif e.HasKeyFocus {\n\t\tgc.SetFillColor(color.RGBA{150, 150, 150, 255})\n\t\tsafeRect(gc, geom.Coord{0, 0}, e.Size)\n\t\tgc.Fill()\n\t}\n\tth := float64(e.textBuffer.Bounds().Max.Y - e.textBuffer.Bounds().Min.Y)\n\tgc.Save()\n\tgc.Translate(e.textOffset, 0)\n\n\tif e.selecting {\n\t\tstart := e.runeOffsets[e.cursor]\n\t\tend := e.runeOffsets[e.selectCursor]\n\t\tif start > end {\n\t\t\tstart, end = end, start\n\t\t}\n\t\tgc.SetFillColor(color.RGBA{200, 200, 200, 255})\n\t\tsafeRect(gc, geom.Coord{start, 0}, geom.Coord{end, e.Size.Y})\n\t\tgc.Fill()\n\t}\n\n\tgc.Translate(0, (e.Size.Y-th)\/2)\n\n\tgc.DrawImage(e.textBuffer)\n\tgc.Restore()\n\tif e.HasKeyFocus {\n\t\tun := time.Duration(time.Now().UnixNano())\n\t\tms := un \/ time.Millisecond\n\t\tms = ms % 750\n\t\tvar intensity uint8 = 255\n\t\tif ms > 550 {\n\t\t\tdiff := 650 - ms\n\t\t\tif diff < 0 {\n\t\t\t\tdiff *= -1\n\t\t\t}\n\t\t\tintensity = uint8((diff * 255) \/ 200)\n\t\t}\n\t\toffset := float64(int(e.runeOffsets[e.cursor] + e.textOffset))\n\t\tgc.SetStrokeColor(color.RGBA{A: intensity})\n\t\tgc.MoveTo(offset, 0)\n\t\tgc.LineTo(offset, e.Size.Y)\n\t\tgc.Stroke()\n\t\te.Invalidate()\n\t}\n}\n\nfunc (e *Entry) cursorForCoord(p geom.Coord) (cursor int) {\n\ttextX := p.X - e.textOffset\n\tfor i, co := range e.runeOffsets {\n\t\t\/\/ uik.Report(i, co)\n\t\tif textX > co {\n\t\t\tcursor = i\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}\n\nfunc (e *Entry) handleEvents() {\n\tfor {\n\t\tselect {\n\t\tcase ev := <-e.UserEvents:\n\t\t\tswitch ev := ev.(type) {\n\t\t\tcase uik.MouseDownEvent:\n\t\t\t\te.GrabFocus()\n\t\t\t\tnewcursor := e.cursorForCoord(ev.Loc)\n\t\t\t\tif e.cursor != newcursor {\n\t\t\t\t\te.cursor = newcursor\n\t\t\t\t\te.Invalidate()\n\t\t\t\t}\n\t\t\t\te.selecting = true\n\t\t\t\te.selectCursor = e.cursor\n\t\t\tcase uik.MouseUpEvent:\n\t\t\t\tif e.selecting {\n\t\t\t\t\te.Invalidate()\n\t\t\t\t}\n\t\t\tcase uik.MouseDraggedEvent:\n\t\t\t\tif !e.selecting {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tnewSelectCursor := e.cursorForCoord(ev.Loc)\n\t\t\t\tif e.cursor != newSelectCursor {\n\t\t\t\t\te.cursor = newSelectCursor\n\t\t\t\t\te.Invalidate()\n\t\t\t\t}\n\t\t\tcase uik.KeyTypedEvent:\n\n\t\t\t\t\/\/ uik.Report(\"key\", ev.Code, ev.Letter)\n\t\t\t\tif uik.IsGlyph(ev.Code) {\n\t\t\t\t\tstart, end := e.cursor, e.cursor\n\t\t\t\t\tif e.selecting {\n\t\t\t\t\t\tend = e.selectCursor\n\t\t\t\t\t\tif end < start {\n\t\t\t\t\t\t\tstart, end = end, start\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\thead := e.text[:start]\n\t\t\t\t\ttail := e.text[end:]\n\t\t\t\t\te.text = make([]rune, len(head)+len(tail)+1)[:0]\n\t\t\t\t\te.text = append(e.text, head...)\n\t\t\t\t\te.text = append(e.text, []rune(ev.Letter)[0])\n\t\t\t\t\te.text = append(e.text, tail...)\n\t\t\t\t\te.cursor = start + 1\n\t\t\t\t} else {\n\t\t\t\t\tswitch ev.Code {\n\t\t\t\t\tcase uik.KeyBackspace:\n\t\t\t\t\t\tif len(e.text) == 0 {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif !e.selecting && e.cursor == 0 {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tstart, end := e.cursor-1, e.cursor\n\t\t\t\t\t\tif e.selecting {\n\t\t\t\t\t\t\tstart = e.cursor\n\t\t\t\t\t\t\tend = e.selectCursor\n\t\t\t\t\t\t\tif end < start {\n\t\t\t\t\t\t\t\tstart, end = end, start\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcopy(e.text[start:], e.text[end:])\n\t\t\t\t\t\te.text = e.text[:len(e.text)-(end-start)]\n\t\t\t\t\t\te.cursor = start\n\t\t\t\t\tcase uik.KeyDelete:\n\t\t\t\t\t\tif len(e.text) == 0 {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif !e.selecting && e.cursor == len(e.text) {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tstart, end := e.cursor, e.cursor+1\n\t\t\t\t\t\tif e.selecting {\n\t\t\t\t\t\t\tstart = e.cursor\n\t\t\t\t\t\t\tend = e.selectCursor\n\t\t\t\t\t\t\tif end < start {\n\t\t\t\t\t\t\t\tstart, end = end, start\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tcopy(e.text[start:], e.text[end:])\n\t\t\t\t\t\te.text = e.text[:len(e.text)-(end-start)]\n\t\t\t\t\t\te.cursor = start\n\t\t\t\t\tcase uik.KeyArrowLeft:\n\t\t\t\t\t\tif e.cursor > 0 {\n\t\t\t\t\t\t\te.cursor--\n\t\t\t\t\t\t}\n\t\t\t\t\tcase uik.KeyArrowRight:\n\t\t\t\t\t\tif e.cursor < len(e.text) {\n\t\t\t\t\t\t\te.cursor++\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\te.selecting = false\n\t\t\t\te.render()\n\t\t\t\te.Invalidate()\n\t\t\tcase uik.KeyFocusEvent:\n\t\t\t\te.HandleEvent(ev)\n\t\t\t\te.Invalidate()\n\t\t\tdefault:\n\t\t\t\te.HandleEvent(ev)\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/globocom\/tsuru\/app\"\n\t\"github.com\/globocom\/tsuru\/db\"\n\t\"github.com\/globocom\/tsuru\/log\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\t\"launchpad.net\/goyaml\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Service struct {\n\tUnits map[string]app.Unit\n}\n\ntype output struct {\n\tServices map[string]Service\n\tMachines map[int]interface{}\n}\n\nfunc execWithTimeout(timeout time.Duration, cmd string, args ...string) (output []byte, err error) {\n\tch := make(chan []byte, 1)\n\terrCh := make(chan error, 1)\n\tcommand := exec.Command(cmd, args...)\n\tgo func() {\n\t\tif out, err := command.Output(); err == nil {\n\t\t\tch <- out\n\t\t} else {\n\t\t\terrCh <- err\n\t\t}\n\t}()\n\tselect {\n\tcase output = <-ch:\n\tcase err = <-errCh:\n\tcase <-time.After(timeout):\n\t\targsStr := strings.Join(args, \" \")\n\t\terr = fmt.Errorf(\"%q ran for more than %s.\", cmd+\" \"+argsStr, timeout)\n\t\tif command.Process != nil {\n\t\t\tcommand.Process.Kill()\n\t\t}\n\t}\n\treturn output, err\n}\n\nfunc collect() ([]byte, error) {\n\tlog.Print(\"collecting status from juju\")\n\treturn execWithTimeout(30e9, \"juju\", \"status\")\n}\n\nfunc parse(data []byte) *output {\n\tlog.Print(\"parsing juju yaml\")\n\traw := new(output)\n\t_ = goyaml.Unmarshal(data, raw)\n\treturn raw\n}\n\nfunc update(out *output) {\n\tlog.Print(\"updating status from juju\")\n\tfor serviceName, service := range out.Services {\n\t\tfor _, yUnit := range service.Units {\n\t\t\tu := app.Unit{}\n\t\t\ta := app.App{Name: serviceName}\n\t\t\ta.Get()\n\t\t\tuMachine := out.Machines[yUnit.Machine].(map[interface{}]interface{})\n\t\t\tif uMachine[\"instance-id\"] != nil {\n\t\t\t\tu.InstanceId = uMachine[\"instance-id\"].(string)\n\t\t\t}\n\t\t\tif uMachine[\"dns-name\"] != nil {\n\t\t\t\tu.Ip = uMachine[\"dns-name\"].(string)\n\t\t\t}\n\t\t\tu.Machine = yUnit.Machine\n\t\t\tif uMachine[\"instance-state\"] != nil {\n\t\t\t\tu.InstanceState = uMachine[\"instance-state\"].(string)\n\t\t\t}\n\t\t\tif uMachine[\"agent-state\"] != nil {\n\t\t\t\tu.MachineAgentState = uMachine[\"agent-state\"].(string)\n\t\t\t}\n\t\t\tu.AgentState = yUnit.AgentState\n\t\t\ta.State = u.State()\n\t\t\ta.AddUnit(&u)\n\t\t\tdb.Session.Apps().Update(bson.M{\"name\": a.Name}, a)\n\t\t}\n\t}\n}\n<commit_msg>collector: fix execWithTimeout function<commit_after>\/\/ Copyright 2012 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/globocom\/tsuru\/app\"\n\t\"github.com\/globocom\/tsuru\/db\"\n\t\"github.com\/globocom\/tsuru\/log\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\t\"launchpad.net\/goyaml\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype Service struct {\n\tUnits map[string]app.Unit\n}\n\ntype output struct {\n\tServices map[string]Service\n\tMachines map[int]interface{}\n}\n\nfunc execWithTimeout(timeout time.Duration, cmd string, args ...string) (output []byte, err error) {\n\tvar buf bytes.Buffer\n\tch := make(chan []byte, 1)\n\terrCh := make(chan error, 1)\n\tcommand := exec.Command(cmd, args...)\n\tcommand.Stdout = &buf\n\tif err = command.Start(); err != nil {\n\t\treturn nil, err\n\t}\n\tgo func() {\n\t\tif err := command.Wait(); err == nil {\n\t\t\tch <- buf.Bytes()\n\t\t} else {\n\t\t\terrCh <- err\n\t\t}\n\t}()\n\tselect {\n\tcase output = <-ch:\n\tcase err = <-errCh:\n\tcase <-time.After(timeout):\n\t\targsStr := strings.Join(args, \" \")\n\t\terr = fmt.Errorf(\"%q ran for more than %s.\", cmd+\" \"+argsStr, timeout)\n\t\tcommand.Process.Kill()\n\t}\n\treturn output, err\n}\n\nfunc collect() ([]byte, error) {\n\tlog.Print(\"collecting status from juju\")\n\treturn execWithTimeout(30e9, \"juju\", \"status\")\n}\n\nfunc parse(data []byte) *output {\n\tlog.Print(\"parsing juju yaml\")\n\traw := new(output)\n\t_ = goyaml.Unmarshal(data, raw)\n\treturn raw\n}\n\nfunc update(out *output) {\n\tlog.Print(\"updating status from juju\")\n\tfor serviceName, service := range out.Services {\n\t\tfor _, yUnit := range service.Units {\n\t\t\tu := app.Unit{}\n\t\t\ta := app.App{Name: serviceName}\n\t\t\ta.Get()\n\t\t\tuMachine := out.Machines[yUnit.Machine].(map[interface{}]interface{})\n\t\t\tif uMachine[\"instance-id\"] != nil {\n\t\t\t\tu.InstanceId = uMachine[\"instance-id\"].(string)\n\t\t\t}\n\t\t\tif uMachine[\"dns-name\"] != nil {\n\t\t\t\tu.Ip = uMachine[\"dns-name\"].(string)\n\t\t\t}\n\t\t\tu.Machine = yUnit.Machine\n\t\t\tif uMachine[\"instance-state\"] != nil {\n\t\t\t\tu.InstanceState = uMachine[\"instance-state\"].(string)\n\t\t\t}\n\t\t\tif uMachine[\"agent-state\"] != nil {\n\t\t\t\tu.MachineAgentState = uMachine[\"agent-state\"].(string)\n\t\t\t}\n\t\t\tu.AgentState = yUnit.AgentState\n\t\t\ta.State = u.State()\n\t\t\ta.AddUnit(&u)\n\t\t\tdb.Session.Apps().Update(bson.M{\"name\": a.Name}, a)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"github.com\/globocom\/tsuru\/app\"\n\t\"github.com\/globocom\/tsuru\/db\"\n\t\"github.com\/globocom\/tsuru\/log\"\n\t\"github.com\/globocom\/tsuru\/provision\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\t\"sort\"\n)\n\n\/\/ AppList is a list of apps. It's not thread safe.\ntype AppList []*app.App\n\nfunc (l AppList) Search(name string) (*app.App, int) {\n\tindex := sort.Search(len(l), func(i int) bool {\n\t\treturn l[i].Name >= name\n\t})\n\tif index < len(l) && l[index].Name == name {\n\t\treturn l[index], -1\n\t} else if index < len(l) {\n\t\treturn &app.App{Name: name}, index\n\t}\n\treturn &app.App{Name: name}, len(l)\n}\n\nfunc (l *AppList) Add(a *app.App, index int) {\n\tlength := len(*l)\n\t*l = append(*l, a)\n\tif index < length {\n\t\tfor i := length; i > index; i-- {\n\t\t\t(*l)[i] = (*l)[i-1]\n\t\t}\n\t\t(*l)[index] = a\n\t}\n}\n\nfunc update(units []provision.Unit) {\n\tlog.Print(\"updating status from provisioner\")\n\tvar l AppList\n\tfor _, unit := range units {\n\t\ta, index := l.Search(unit.AppName)\n\t\tif index > -1 {\n\t\t\terr := a.Get()\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"collector: app %q not found. Skipping.\\n\", unit.AppName)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tu := app.Unit{}\n\t\tu.Name = unit.Name\n\t\tu.Type = unit.Type\n\t\tu.Machine = unit.Machine\n\t\tu.InstanceId = unit.InstanceId\n\t\tu.Ip = unit.Ip\n\t\tu.State = string(unit.Status)\n\t\ta.AddUnit(&u)\n\t\tif index > -1 {\n\t\t\tl.Add(a, index)\n\t\t}\n\t}\n\tconn, err := db.Conn()\n\tif err != nil {\n\t\tlog.Printf(\"collector failed to connect to the database: %s\", err)\n\t\treturn\n\t}\n\tfor _, a := range l {\n\t\ta.Ip, _ = app.Provisioner.Addr(a)\n\t\tconn.Apps().Update(bson.M{\"name\": a.Name}, a)\n\t}\n}\n<commit_msg>collector: call Close after using database connection<commit_after>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"github.com\/globocom\/tsuru\/app\"\n\t\"github.com\/globocom\/tsuru\/db\"\n\t\"github.com\/globocom\/tsuru\/log\"\n\t\"github.com\/globocom\/tsuru\/provision\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\t\"sort\"\n)\n\n\/\/ AppList is a list of apps. It's not thread safe.\ntype AppList []*app.App\n\nfunc (l AppList) Search(name string) (*app.App, int) {\n\tindex := sort.Search(len(l), func(i int) bool {\n\t\treturn l[i].Name >= name\n\t})\n\tif index < len(l) && l[index].Name == name {\n\t\treturn l[index], -1\n\t} else if index < len(l) {\n\t\treturn &app.App{Name: name}, index\n\t}\n\treturn &app.App{Name: name}, len(l)\n}\n\nfunc (l *AppList) Add(a *app.App, index int) {\n\tlength := len(*l)\n\t*l = append(*l, a)\n\tif index < length {\n\t\tfor i := length; i > index; i-- {\n\t\t\t(*l)[i] = (*l)[i-1]\n\t\t}\n\t\t(*l)[index] = a\n\t}\n}\n\nfunc update(units []provision.Unit) {\n\tlog.Print(\"updating status from provisioner\")\n\tvar l AppList\n\tfor _, unit := range units {\n\t\ta, index := l.Search(unit.AppName)\n\t\tif index > -1 {\n\t\t\terr := a.Get()\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"collector: app %q not found. Skipping.\\n\", unit.AppName)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tu := app.Unit{}\n\t\tu.Name = unit.Name\n\t\tu.Type = unit.Type\n\t\tu.Machine = unit.Machine\n\t\tu.InstanceId = unit.InstanceId\n\t\tu.Ip = unit.Ip\n\t\tu.State = string(unit.Status)\n\t\ta.AddUnit(&u)\n\t\tif index > -1 {\n\t\t\tl.Add(a, index)\n\t\t}\n\t}\n\tconn, err := db.Conn()\n\tif err != nil {\n\t\tlog.Printf(\"collector failed to connect to the database: %s\", err)\n\t\treturn\n\t}\n\tdefer conn.Close()\n\tfor _, a := range l {\n\t\ta.Ip, _ = app.Provisioner.Addr(a)\n\t\tconn.Apps().Update(bson.M{\"name\": a.Name}, a)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package cassandra\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com\/gocql\/gocql\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/filer\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/filer_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/util\"\n)\n\nfunc init() {\n\tfiler.Stores = append(filer.Stores, &CassandraStore{})\n}\n\ntype CassandraStore struct {\n\tcluster *gocql.ClusterConfig\n\tsession *gocql.Session\n\tsuperLargeDirectoryHash map[string]string\n}\n\nfunc (store *CassandraStore) GetName() string {\n\treturn \"cassandra\"\n}\n\nfunc (store *CassandraStore) Initialize(configuration util.Configuration, prefix string) (err error) {\n\treturn store.initialize(\n\t\tconfiguration.GetString(prefix+\"keyspace\"),\n\t\tconfiguration.GetStringSlice(prefix+\"hosts\"),\n\t\tconfiguration.GetString(prefix+\"username\"),\n\t\tconfiguration.GetString(prefix+\"password\"),\n\t\tconfiguration.GetStringSlice(prefix+\"superLargeDirectories\"),\n\t\tconfiguration.GetString(prefix+\"localDC\"),\n\t)\n}\n\nfunc (store *CassandraStore) isSuperLargeDirectory(dir string) (dirHash string, isSuperLargeDirectory bool) {\n\tdirHash, isSuperLargeDirectory = store.superLargeDirectoryHash[dir]\n\treturn\n}\n\nfunc (store *CassandraStore) initialize(keyspace string, hosts []string, username string, password string, superLargeDirectories []string, localDC string) (err error) {\n\tstore.cluster = gocql.NewCluster(hosts...)\n\tif username != \"\" && password != \"\" {\n\t\tstore.cluster.Authenticator = gocql.PasswordAuthenticator{Username: username, Password: password}\n\t}\n\tstore.cluster.Keyspace = keyspace\n\tfallback := gocql.RoundRobinHostPolicy()\n\tif localDC != \"\" {\n\t\tfallback = gocql.DCAwareRoundRobinPolicy(localDC)\n\t}\n\tstore.cluster.PoolConfig.HostSelectionPolicy = gocql.TokenAwareHostPolicy(fallback)\n\tstore.cluster.Consistency = gocql.LocalQuorum\n\n\tstore.session, err = store.cluster.CreateSession()\n\tif err != nil {\n\t\tglog.V(0).Infof(\"Failed to open cassandra store, hosts %v, keyspace %s\", hosts, keyspace)\n\t}\n\n\t\/\/ set directory hash\n\tstore.superLargeDirectoryHash = make(map[string]string)\n\texistingHash := make(map[string]string)\n\tfor _, dir := range superLargeDirectories {\n\t\t\/\/ adding dir hash to avoid duplicated names\n\t\tdirHash := util.Md5String([]byte(dir))[:4]\n\t\tstore.superLargeDirectoryHash[dir] = dirHash\n\t\tif existingDir, found := existingHash[dirHash]; found {\n\t\t\tglog.Fatalf(\"directory %s has the same hash as %s\", dir, existingDir)\n\t\t}\n\t\texistingHash[dirHash] = dir\n\t}\n\treturn\n}\n\nfunc (store *CassandraStore) BeginTransaction(ctx context.Context) (context.Context, error) {\n\treturn ctx, nil\n}\nfunc (store *CassandraStore) CommitTransaction(ctx context.Context) error {\n\treturn nil\n}\nfunc (store *CassandraStore) RollbackTransaction(ctx context.Context) error {\n\treturn nil\n}\n\nfunc (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {\n\n\tdir, name := entry.FullPath.DirAndName()\n\tif dirHash, ok := store.isSuperLargeDirectory(dir); ok {\n\t\tdir, name = dirHash+name, \"\"\n\t}\n\n\tmeta, err := entry.EncodeAttributesAndChunks()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"encode %s: %s\", entry.FullPath, err)\n\t}\n\n\tif len(entry.Chunks) > 50 {\n\t\tmeta = util.MaybeGzipData(meta)\n\t}\n\n\tif err := store.session.Query(\n\t\t\"INSERT INTO filemeta (directory,name,meta) VALUES(?,?,?) USING TTL ? \",\n\t\tdir, name, meta, entry.TtlSec).Exec(); err != nil {\n\t\treturn fmt.Errorf(\"insert %s: %s\", entry.FullPath, err)\n\t}\n\n\treturn nil\n}\n\nfunc (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {\n\n\treturn store.InsertEntry(ctx, entry)\n}\n\nfunc (store *CassandraStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {\n\n\tdir, name := fullpath.DirAndName()\n\tif dirHash, ok := store.isSuperLargeDirectory(dir); ok {\n\t\tdir, name = dirHash+name, \"\"\n\t}\n\n\tvar data []byte\n\tif err := store.session.Query(\n\t\t\"SELECT meta FROM filemeta WHERE directory=? AND name=?\",\n\t\tdir, name).Consistency(gocql.One).Scan(&data); err != nil {\n\t\tif err != gocql.ErrNotFound {\n\t\t\treturn nil, filer_pb.ErrNotFound\n\t\t}\n\t}\n\n\tif len(data) == 0 {\n\t\treturn nil, filer_pb.ErrNotFound\n\t}\n\n\tentry = &filer.Entry{\n\t\tFullPath: fullpath,\n\t}\n\terr = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data))\n\tif err != nil {\n\t\treturn entry, fmt.Errorf(\"decode %s : %v\", entry.FullPath, err)\n\t}\n\n\treturn entry, nil\n}\n\nfunc (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {\n\n\tdir, name := fullpath.DirAndName()\n\tif dirHash, ok := store.isSuperLargeDirectory(dir); ok {\n\t\tdir, name = dirHash+name, \"\"\n\t}\n\n\tif err := store.session.Query(\n\t\t\"DELETE FROM filemeta WHERE directory=? AND name=?\",\n\t\tdir, name).Exec(); err != nil {\n\t\treturn fmt.Errorf(\"delete %s : %v\", fullpath, err)\n\t}\n\n\treturn nil\n}\n\nfunc (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath, limit int64) error {\n\tif _, ok := store.isSuperLargeDirectory(string(fullpath)); ok {\n\t\treturn nil \/\/ filer.ErrUnsupportedSuperLargeDirectoryListing\n\t}\n\n\tif err := store.session.Query(\n\t\t\"DELETE FROM filemeta WHERE directory=?\",\n\t\tfullpath).Exec(); err != nil {\n\t\treturn fmt.Errorf(\"delete %s : %v\", fullpath, err)\n\t}\n\n\treturn nil\n}\n\nfunc (store *CassandraStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {\n\treturn lastFileName, filer.ErrUnsupportedListDirectoryPrefixed\n}\n\nfunc (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {\n\n\tif _, ok := store.isSuperLargeDirectory(string(dirPath)); ok {\n\t\treturn \/\/ nil, filer.ErrUnsupportedSuperLargeDirectoryListing\n\t}\n\n\tcqlStr := \"SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?\"\n\tif includeStartFile {\n\t\tcqlStr = \"SELECT NAME, meta FROM filemeta WHERE directory=? AND name>=? ORDER BY NAME ASC LIMIT ?\"\n\t}\n\n\tvar data []byte\n\tvar name string\n\titer := store.session.Query(cqlStr, string(dirPath), startFileName, limit+1).Iter()\n\tfor iter.Scan(&name, &data) {\n\t\tentry := &filer.Entry{\n\t\t\tFullPath: util.NewFullPath(string(dirPath), name),\n\t\t}\n\t\tlastFileName = name\n\t\tif decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {\n\t\t\terr = decodeErr\n\t\t\tglog.V(0).Infof(\"list %s : %v\", entry.FullPath, err)\n\t\t\tbreak\n\t\t}\n\t\tif !eachEntryFunc(entry) {\n\t\t\tbreak\n\t\t}\n\t}\n\tif err := iter.Close(); err != nil {\n\t\tglog.V(0).Infof(\"list iterator close: %v\", err)\n\t}\n\n\treturn lastFileName, err\n}\n\nfunc (store *CassandraStore) Shutdown() {\n\tstore.session.Close()\n}\n<commit_msg>cassandra: Use LocalOne instead of One consistency<commit_after>package cassandra\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com\/gocql\/gocql\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/filer\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/pb\/filer_pb\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/util\"\n)\n\nfunc init() {\n\tfiler.Stores = append(filer.Stores, &CassandraStore{})\n}\n\ntype CassandraStore struct {\n\tcluster *gocql.ClusterConfig\n\tsession *gocql.Session\n\tsuperLargeDirectoryHash map[string]string\n}\n\nfunc (store *CassandraStore) GetName() string {\n\treturn \"cassandra\"\n}\n\nfunc (store *CassandraStore) Initialize(configuration util.Configuration, prefix string) (err error) {\n\treturn store.initialize(\n\t\tconfiguration.GetString(prefix+\"keyspace\"),\n\t\tconfiguration.GetStringSlice(prefix+\"hosts\"),\n\t\tconfiguration.GetString(prefix+\"username\"),\n\t\tconfiguration.GetString(prefix+\"password\"),\n\t\tconfiguration.GetStringSlice(prefix+\"superLargeDirectories\"),\n\t\tconfiguration.GetString(prefix+\"localDC\"),\n\t)\n}\n\nfunc (store *CassandraStore) isSuperLargeDirectory(dir string) (dirHash string, isSuperLargeDirectory bool) {\n\tdirHash, isSuperLargeDirectory = store.superLargeDirectoryHash[dir]\n\treturn\n}\n\nfunc (store *CassandraStore) initialize(keyspace string, hosts []string, username string, password string, superLargeDirectories []string, localDC string) (err error) {\n\tstore.cluster = gocql.NewCluster(hosts...)\n\tif username != \"\" && password != \"\" {\n\t\tstore.cluster.Authenticator = gocql.PasswordAuthenticator{Username: username, Password: password}\n\t}\n\tstore.cluster.Keyspace = keyspace\n\tfallback := gocql.RoundRobinHostPolicy()\n\tif localDC != \"\" {\n\t\tfallback = gocql.DCAwareRoundRobinPolicy(localDC)\n\t}\n\tstore.cluster.PoolConfig.HostSelectionPolicy = gocql.TokenAwareHostPolicy(fallback)\n\tstore.cluster.Consistency = gocql.LocalQuorum\n\n\tstore.session, err = store.cluster.CreateSession()\n\tif err != nil {\n\t\tglog.V(0).Infof(\"Failed to open cassandra store, hosts %v, keyspace %s\", hosts, keyspace)\n\t}\n\n\t\/\/ set directory hash\n\tstore.superLargeDirectoryHash = make(map[string]string)\n\texistingHash := make(map[string]string)\n\tfor _, dir := range superLargeDirectories {\n\t\t\/\/ adding dir hash to avoid duplicated names\n\t\tdirHash := util.Md5String([]byte(dir))[:4]\n\t\tstore.superLargeDirectoryHash[dir] = dirHash\n\t\tif existingDir, found := existingHash[dirHash]; found {\n\t\t\tglog.Fatalf(\"directory %s has the same hash as %s\", dir, existingDir)\n\t\t}\n\t\texistingHash[dirHash] = dir\n\t}\n\treturn\n}\n\nfunc (store *CassandraStore) BeginTransaction(ctx context.Context) (context.Context, error) {\n\treturn ctx, nil\n}\nfunc (store *CassandraStore) CommitTransaction(ctx context.Context) error {\n\treturn nil\n}\nfunc (store *CassandraStore) RollbackTransaction(ctx context.Context) error {\n\treturn nil\n}\n\nfunc (store *CassandraStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {\n\n\tdir, name := entry.FullPath.DirAndName()\n\tif dirHash, ok := store.isSuperLargeDirectory(dir); ok {\n\t\tdir, name = dirHash+name, \"\"\n\t}\n\n\tmeta, err := entry.EncodeAttributesAndChunks()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"encode %s: %s\", entry.FullPath, err)\n\t}\n\n\tif len(entry.Chunks) > 50 {\n\t\tmeta = util.MaybeGzipData(meta)\n\t}\n\n\tif err := store.session.Query(\n\t\t\"INSERT INTO filemeta (directory,name,meta) VALUES(?,?,?) USING TTL ? \",\n\t\tdir, name, meta, entry.TtlSec).Exec(); err != nil {\n\t\treturn fmt.Errorf(\"insert %s: %s\", entry.FullPath, err)\n\t}\n\n\treturn nil\n}\n\nfunc (store *CassandraStore) UpdateEntry(ctx context.Context, entry *filer.Entry) (err error) {\n\n\treturn store.InsertEntry(ctx, entry)\n}\n\nfunc (store *CassandraStore) FindEntry(ctx context.Context, fullpath util.FullPath) (entry *filer.Entry, err error) {\n\n\tdir, name := fullpath.DirAndName()\n\tif dirHash, ok := store.isSuperLargeDirectory(dir); ok {\n\t\tdir, name = dirHash+name, \"\"\n\t}\n\n\tvar data []byte\n\tif err := store.session.Query(\n\t\t\"SELECT meta FROM filemeta WHERE directory=? AND name=?\",\n\t\tdir, name).Consistency(gocql.LocalOne).Scan(&data); err != nil {\n\t\tif err != gocql.ErrNotFound {\n\t\t\treturn nil, filer_pb.ErrNotFound\n\t\t}\n\t}\n\n\tif len(data) == 0 {\n\t\treturn nil, filer_pb.ErrNotFound\n\t}\n\n\tentry = &filer.Entry{\n\t\tFullPath: fullpath,\n\t}\n\terr = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data))\n\tif err != nil {\n\t\treturn entry, fmt.Errorf(\"decode %s : %v\", entry.FullPath, err)\n\t}\n\n\treturn entry, nil\n}\n\nfunc (store *CassandraStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) error {\n\n\tdir, name := fullpath.DirAndName()\n\tif dirHash, ok := store.isSuperLargeDirectory(dir); ok {\n\t\tdir, name = dirHash+name, \"\"\n\t}\n\n\tif err := store.session.Query(\n\t\t\"DELETE FROM filemeta WHERE directory=? AND name=?\",\n\t\tdir, name).Exec(); err != nil {\n\t\treturn fmt.Errorf(\"delete %s : %v\", fullpath, err)\n\t}\n\n\treturn nil\n}\n\nfunc (store *CassandraStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath, limit int64) error {\n\tif _, ok := store.isSuperLargeDirectory(string(fullpath)); ok {\n\t\treturn nil \/\/ filer.ErrUnsupportedSuperLargeDirectoryListing\n\t}\n\n\tif err := store.session.Query(\n\t\t\"DELETE FROM filemeta WHERE directory=?\",\n\t\tfullpath).Exec(); err != nil {\n\t\treturn fmt.Errorf(\"delete %s : %v\", fullpath, err)\n\t}\n\n\treturn nil\n}\n\nfunc (store *CassandraStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {\n\treturn lastFileName, filer.ErrUnsupportedListDirectoryPrefixed\n}\n\nfunc (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {\n\n\tif _, ok := store.isSuperLargeDirectory(string(dirPath)); ok {\n\t\treturn \/\/ nil, filer.ErrUnsupportedSuperLargeDirectoryListing\n\t}\n\n\tcqlStr := \"SELECT NAME, meta FROM filemeta WHERE directory=? AND name>? ORDER BY NAME ASC LIMIT ?\"\n\tif includeStartFile {\n\t\tcqlStr = \"SELECT NAME, meta FROM filemeta WHERE directory=? AND name>=? ORDER BY NAME ASC LIMIT ?\"\n\t}\n\n\tvar data []byte\n\tvar name string\n\titer := store.session.Query(cqlStr, string(dirPath), startFileName, limit+1).Iter()\n\tfor iter.Scan(&name, &data) {\n\t\tentry := &filer.Entry{\n\t\t\tFullPath: util.NewFullPath(string(dirPath), name),\n\t\t}\n\t\tlastFileName = name\n\t\tif decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {\n\t\t\terr = decodeErr\n\t\t\tglog.V(0).Infof(\"list %s : %v\", entry.FullPath, err)\n\t\t\tbreak\n\t\t}\n\t\tif !eachEntryFunc(entry) {\n\t\t\tbreak\n\t\t}\n\t}\n\tif err := iter.Close(); err != nil {\n\t\tglog.V(0).Infof(\"list iterator close: %v\", err)\n\t}\n\n\treturn lastFileName, err\n}\n\nfunc (store *CassandraStore) Shutdown() {\n\tstore.session.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build windows\n\n\/\/ Package winlog provides convenience functions for using the Windows Event Log API.\npackage winlog\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"syscall\"\n\t\"unsafe\"\n\n\t\"golang.org\/x\/sys\/windows\"\n\t\"github.com\/google\/winops\/winlog\/wevtapi\"\n)\n\n\/\/ Windows function parameters.\nconst (\n\tlocalMachine = 0 \/\/ Identifies the local machine for Windows API functions.\n\tmustBeZero = 0 \/\/ For reserved Windows API function parameters.\n)\n\n\/\/ SubscribeConfig describes parameters for initializing a Windows Event Log subscription.\ntype SubscribeConfig struct {\n\tSession windows.Handle\n\tSignalEvent windows.Handle\n\tChannelPath *uint16\n\tQuery *uint16\n\tBookmark windows.Handle\n\tContext uintptr\n\tCallback uintptr\n\tFlags uint32\n}\n\n\/\/ Close closes a Windows event log handle.\nfunc Close(h windows.Handle) error {\n\treturn wevtapi.EvtClose(h)\n}\n\n\/\/ DefaultSubscribeConfig creates a default subscriber configuration to be used\n\/\/ to initialize a pull subscription for the classic Windows Event Log channels.\nfunc DefaultSubscribeConfig() (*SubscribeConfig, error) {\n\tvar config SubscribeConfig\n\tvar err error\n\n\t\/\/ Create a subscription signaler.\n\tconfig.SignalEvent, err = windows.CreateEvent(\n\t\tnil, \/\/ Default security descriptor.\n\t\t1, \/\/ Manual reset.\n\t\t1, \/\/ Initial state is signaled.\n\t\tnil) \/\/ Optional name.\n\tif err != nil {\n\t\treturn &config, fmt.Errorf(\"windows.CreateEvent failed: %v\", err)\n\t}\n\n\t\/\/ Build a structured XML query retrieving all the events from the classic\n\t\/\/ Windows Event Log channels and start the subscription from the oldest record.\n\tconfig.Flags = wevtapi.EvtSubscribeStartAtOldestRecord\n\txpaths := map[string]string{\"Application\": \"*\", \"Security\": \"*\", \"System\": \"*\"}\n\txmlQuery, err := BuildStructuredXMLQuery(xpaths)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"BuildStructuredXMLQuery failed: %v\", err)\n\t}\n\tconfig.Query, err = syscall.UTF16PtrFromString(string(xmlQuery))\n\tif err != nil {\n\t\treturn &config, fmt.Errorf(\"syscall.UTF16PtrFromString failed: %v\", err)\n\t}\n\n\treturn &config, nil\n}\n\n\/\/ GetRenderedEvents iterates over a subscription or query result set up to a configurable\n\/\/ maximum and returns the rendered events as a slice of UTF8 formatted XML strings.\n\/\/ publisherCache is a cache of Handles for publisher metadata to avoid\n\/\/ expensive Windows API calls. Pass in an empty map on the first call. Once\n\/\/ you've finished using GetRenderedEvents, pass all the contained values to Close.\nfunc GetRenderedEvents(config *SubscribeConfig, publisherCache map[string]windows.Handle, resultSet windows.Handle, maxEvents int, locale uint32) ([]string, error) {\n\tvar events = make([]windows.Handle, maxEvents)\n\tvar returned uint32\n\n\t\/\/ Get handles to events from the result set.\n\terr := wevtapi.EvtNext(\n\t\tresultSet, \/\/ Handle to query or subscription result set.\n\t\tuint32(len(events)), \/\/ The number of events to attempt to retrieve.\n\t\t&events[0], \/\/ Pointer to the array of event handles.\n\t\t2000, \/\/ Timeout in milliseconds to wait.\n\t\t0, \/\/ Reserved. Must be zero.\n\t\t&returned) \/\/ The number of handles in the array that are set by the API.\n\tif err == windows.ERROR_NO_MORE_ITEMS {\n\t\treturn nil, err\n\t} else if err != nil {\n\t\treturn nil, fmt.Errorf(\"wevtapi.EvtNext failed: %v\", err)\n\t}\n\n\t\/\/ Event handles must be closed after they are returned by EvtNext whether or not we use them.\n\tdefer func() {\n\t\tfor _, event := range events[:returned] {\n\t\t\tClose(event)\n\t\t}\n\t}()\n\n\t\/\/ Render events.\n\tvar renderedEvents []string\n\tfor _, event := range events[:returned] {\n\t\t\/\/ Render the basic XML representation of the event.\n\t\trenderedEvent, err := RenderFragment(event, wevtapi.EvtRenderEventXml)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"RenderEventXML failed: %v\", err)\n\t\t}\n\n\t\t\/\/ Attempt to render the full event using the basic event.\n\t\trenderedEvent, err = RenderFormattedMessageXML(event, renderedEvent, locale, publisherCache)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"RenderFormattedMessageXML failed: %v\", err)\n\t\t}\n\t\trenderedEvents = append(renderedEvents, renderedEvent)\n\t}\n\n\t\/\/ If a bookmark is used in the configuration, update it.\n\tif config.Bookmark != 0 {\n\t\terr = wevtapi.EvtUpdateBookmark(config.Bookmark, events[returned-1])\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"wevtapi.EvtUpdateBookmark failed: %v\", err)\n\t\t}\n\t}\n\n\treturn renderedEvents, err\n}\n\n\/\/ RenderFragment renders a Windows Event Log fragment according to the specified flag.\n\/\/ Supports rendering events and bookmarks as UTF8 formatted XML strings.\nfunc RenderFragment(fragment windows.Handle, flag uint32) (string, error) {\n\tvar bufferUsed uint32\n\tvar propertyCount uint32\n\n\t\/\/ Call EvtRender with a null buffer to get the required buffer size.\n\terr := wevtapi.EvtRender(\n\t\t0,\n\t\tfragment,\n\t\tflag,\n\t\t0,\n\t\tnil,\n\t\t&bufferUsed,\n\t\t&propertyCount)\n\tif err != syscall.ERROR_INSUFFICIENT_BUFFER {\n\t\treturn \"\", fmt.Errorf(\"wevtapi.EvtRender failed: %v\", err)\n\t}\n\n\t\/\/ Create a buffer based on the buffer size required.\n\tbuf := make([]uint16, bufferUsed\/2)\n\n\t\/\/ Render the fragment according to the flag.\n\terr = wevtapi.EvtRender(\n\t\t0,\n\t\tfragment,\n\t\tflag,\n\t\tbufferUsed,\n\t\tunsafe.Pointer(&buf[0]),\n\t\t&bufferUsed,\n\t\t&propertyCount)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"wevtapi.EvtRender failed: %v\", err)\n\t}\n\n\treturn syscall.UTF16ToString(buf), nil\n}\n\n\/\/ RenderFormattedMessageXML renders a Windows Event Log event as a UTF8 formatted XML string.\n\/\/ This includes the RenderingInfo node parsed by leveraging the event publisher and desired\n\/\/ locale (LCID). Returns the original raw XML if a publisher for the event is unavailable.\nfunc RenderFormattedMessageXML(event windows.Handle, renderedEvent string, locale uint32, cache map[string]windows.Handle) (string, error) {\n\t\/\/ Find the event publisher using the raw event XML.\n\tre := regexp.MustCompile(`Provider Name='(.*?)\\'`)\n\tpublisherMatch := re.FindStringSubmatch(renderedEvent)\n\tif len(publisherMatch) < 2 {\n\t\treturn \"\", fmt.Errorf(\"RenderFormattedMessageXML: no publisher name found in event\")\n\t}\n\tpublisherName := publisherMatch[1]\n\n\t\/\/ Lookup publisher metadata.\n\tvar pubHandle windows.Handle\n\tif val, ok := cache[publisherName]; ok {\n\t\tpubHandle = val\n\t} else {\n\t\tvar err error\n\t\tpubHandle, err = OpenPublisherMetadata(localMachine, publisherName, locale)\n\t\t\/\/ If there is no publisher metadata available return the original event.\n\t\tif err == syscall.ERROR_FILE_NOT_FOUND {\n\t\t\treturn renderedEvent, nil\n\t\t} else if err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"OpenPublisherMetadata failed: %v\", err)\n\t\t}\n\t\tcache[publisherName] = pubHandle\n\t}\n\n\t\/\/ Call EvtFormatMessage with a null buffer to get the required buffer size.\n\tvar bufferUsed uint32\n\terr := wevtapi.EvtFormatMessage(\n\t\tpubHandle, \/\/ Handle to provider metadata.\n\t\tevent, \/\/ Handle to an event.\n\t\t0, \/\/ Resource identifier of the message string. Null if flag isn't EvtFormatMessageId.\n\t\t0, \/\/ Number of values in the values parameter.\n\t\t0, \/\/ An array of insertion values to be used when formatting the event string. Typically set to null.\n\t\twevtapi.EvtFormatMessageXml, \/\/ Format message as an XML string.\n\t\t0, \/\/ Size of buffer.\n\t\tnil, \/\/ Null buffer.\n\t\t&bufferUsed) \/\/ Get the required buffer size.\n\tif err != syscall.ERROR_INSUFFICIENT_BUFFER {\n\t\treturn \"\", fmt.Errorf(\"wevtapi.EvtFormatMessage failed to get buffer size: %v\", err)\n\t}\n\n\tbuf := make([]uint16, bufferUsed\/2)\n\n\t\/\/ Render the event as a formatted XML string with RenderingInfo node.\n\terr = wevtapi.EvtFormatMessage(pubHandle, event, 0, 0, 0, wevtapi.EvtFormatMessageXml,\n\t\tbufferUsed, (*byte)(unsafe.Pointer(&buf[0])), &bufferUsed)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"wevtapi.EvtFormatMessage failed to render events as formatted XML: %v\", err)\n\t}\n\n\treturn syscall.UTF16ToString(buf), nil\n}\n\n\/\/ Subscribe initializes a subscription and returns a handle to the subscription.\n\/\/ Close must be called on the returned handle when finished.\nfunc Subscribe(config *SubscribeConfig) (windows.Handle, error) {\n\t\/\/ Initialize the subscription.\n\tsubscription, err := wevtapi.EvtSubscribe(\n\t\tconfig.Session,\n\t\tconfig.SignalEvent,\n\t\tconfig.ChannelPath,\n\t\tconfig.Query,\n\t\tconfig.Bookmark,\n\t\tconfig.Context,\n\t\tconfig.Callback,\n\t\tconfig.Flags)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"wevtapi.EvtSubscribe failed: %v\", err)\n\t}\n\n\treturn subscription, nil\n}\n<commit_msg>Currently we detect the specific case that an event has no associated publisher metadata, and in that case return the event un-augmented. However in other cases such as OpenPublisherMetadata we error out of the whole routine.<commit_after>\/\/ +build windows\n\n\/\/ Package winlog provides convenience functions for using the Windows Event Log API.\npackage winlog\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"syscall\"\n\t\"unsafe\"\n\n\tlog \"github.com\/golang\/glog\"\n\t\"golang.org\/x\/sys\/windows\"\n\t\"github.com\/google\/winops\/winlog\/wevtapi\"\n)\n\n\/\/ Windows function parameters.\nconst (\n\tlocalMachine = 0 \/\/ Identifies the local machine for Windows API functions.\n\tmustBeZero = 0 \/\/ For reserved Windows API function parameters.\n)\n\n\/\/ SubscribeConfig describes parameters for initializing a Windows Event Log subscription.\ntype SubscribeConfig struct {\n\tSession windows.Handle\n\tSignalEvent windows.Handle\n\tChannelPath *uint16\n\tQuery *uint16\n\tBookmark windows.Handle\n\tContext uintptr\n\tCallback uintptr\n\tFlags uint32\n}\n\n\/\/ Close closes a Windows event log handle.\nfunc Close(h windows.Handle) error {\n\treturn wevtapi.EvtClose(h)\n}\n\n\/\/ DefaultSubscribeConfig creates a default subscriber configuration to be used\n\/\/ to initialize a pull subscription for the classic Windows Event Log channels.\nfunc DefaultSubscribeConfig() (*SubscribeConfig, error) {\n\tvar config SubscribeConfig\n\tvar err error\n\n\t\/\/ Create a subscription signaler.\n\tconfig.SignalEvent, err = windows.CreateEvent(\n\t\tnil, \/\/ Default security descriptor.\n\t\t1, \/\/ Manual reset.\n\t\t1, \/\/ Initial state is signaled.\n\t\tnil) \/\/ Optional name.\n\tif err != nil {\n\t\treturn &config, fmt.Errorf(\"windows.CreateEvent failed: %v\", err)\n\t}\n\n\t\/\/ Build a structured XML query retrieving all the events from the classic\n\t\/\/ Windows Event Log channels and start the subscription from the oldest record.\n\tconfig.Flags = wevtapi.EvtSubscribeStartAtOldestRecord\n\txpaths := map[string]string{\"Application\": \"*\", \"Security\": \"*\", \"System\": \"*\"}\n\txmlQuery, err := BuildStructuredXMLQuery(xpaths)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"BuildStructuredXMLQuery failed: %v\", err)\n\t}\n\tconfig.Query, err = syscall.UTF16PtrFromString(string(xmlQuery))\n\tif err != nil {\n\t\treturn &config, fmt.Errorf(\"syscall.UTF16PtrFromString failed: %v\", err)\n\t}\n\n\treturn &config, nil\n}\n\n\/\/ GetRenderedEvents iterates over a subscription or query result set up to a configurable\n\/\/ maximum and returns the rendered events as a slice of UTF8 formatted XML strings.\n\/\/ publisherCache is a cache of Handles for publisher metadata to avoid\n\/\/ expensive Windows API calls. Pass in an empty map on the first call. Once\n\/\/ you've finished using GetRenderedEvents, pass all the contained values to Close.\nfunc GetRenderedEvents(config *SubscribeConfig, publisherCache map[string]windows.Handle, resultSet windows.Handle, maxEvents int, locale uint32) ([]string, error) {\n\tvar events = make([]windows.Handle, maxEvents)\n\tvar returned uint32\n\n\t\/\/ Get handles to events from the result set.\n\terr := wevtapi.EvtNext(\n\t\tresultSet, \/\/ Handle to query or subscription result set.\n\t\tuint32(len(events)), \/\/ The number of events to attempt to retrieve.\n\t\t&events[0], \/\/ Pointer to the array of event handles.\n\t\t2000, \/\/ Timeout in milliseconds to wait.\n\t\t0, \/\/ Reserved. Must be zero.\n\t\t&returned) \/\/ The number of handles in the array that are set by the API.\n\tif err == windows.ERROR_NO_MORE_ITEMS {\n\t\treturn nil, err\n\t} else if err != nil {\n\t\treturn nil, fmt.Errorf(\"wevtapi.EvtNext failed: %v\", err)\n\t}\n\n\t\/\/ Event handles must be closed after they are returned by EvtNext whether or not we use them.\n\tdefer func() {\n\t\tfor _, event := range events[:returned] {\n\t\t\tClose(event)\n\t\t}\n\t}()\n\n\t\/\/ Render events.\n\tvar renderedEvents []string\n\tfor _, event := range events[:returned] {\n\t\t\/\/ Render the basic XML representation of the event.\n\t\tfragment, err := RenderFragment(event, wevtapi.EvtRenderEventXml)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"RenderEventXML failed: %v\", err)\n\t\t}\n\n\t\t\/\/ Attempt to render the full event using the basic event.\n\t\trenderedEvent, err := RenderFormattedMessageXML(event, fragment, locale, publisherCache)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Failed to fully render event, returning fragment: %v\\n%v\", err, fragment)\n\t\t\trenderedEvent = fragment\n\t\t}\n\t\trenderedEvents = append(renderedEvents, renderedEvent)\n\t}\n\n\t\/\/ If a bookmark is used in the configuration, update it.\n\tif config.Bookmark != 0 {\n\t\terr = wevtapi.EvtUpdateBookmark(config.Bookmark, events[returned-1])\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"wevtapi.EvtUpdateBookmark failed: %v\", err)\n\t\t}\n\t}\n\n\treturn renderedEvents, err\n}\n\n\/\/ RenderFragment renders a Windows Event Log fragment according to the specified flag.\n\/\/ Supports rendering events and bookmarks as UTF8 formatted XML strings.\nfunc RenderFragment(fragment windows.Handle, flag uint32) (string, error) {\n\tvar bufferUsed uint32\n\tvar propertyCount uint32\n\n\t\/\/ Call EvtRender with a null buffer to get the required buffer size.\n\terr := wevtapi.EvtRender(\n\t\t0,\n\t\tfragment,\n\t\tflag,\n\t\t0,\n\t\tnil,\n\t\t&bufferUsed,\n\t\t&propertyCount)\n\tif err != syscall.ERROR_INSUFFICIENT_BUFFER {\n\t\treturn \"\", fmt.Errorf(\"wevtapi.EvtRender failed: %v\", err)\n\t}\n\n\t\/\/ Create a buffer based on the buffer size required.\n\tbuf := make([]uint16, bufferUsed\/2)\n\n\t\/\/ Render the fragment according to the flag.\n\terr = wevtapi.EvtRender(\n\t\t0,\n\t\tfragment,\n\t\tflag,\n\t\tbufferUsed,\n\t\tunsafe.Pointer(&buf[0]),\n\t\t&bufferUsed,\n\t\t&propertyCount)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"wevtapi.EvtRender failed: %v\", err)\n\t}\n\n\treturn syscall.UTF16ToString(buf), nil\n}\n\n\/\/ RenderFormattedMessageXML renders a Windows Event Log event as a UTF8 formatted XML string.\n\/\/ This includes the RenderingInfo node parsed by leveraging the event publisher and desired\n\/\/ locale (LCID). Returns the original raw XML if a publisher for the event is unavailable.\nfunc RenderFormattedMessageXML(event windows.Handle, renderedEvent string, locale uint32, cache map[string]windows.Handle) (string, error) {\n\t\/\/ Find the event publisher using the raw event XML.\n\tre := regexp.MustCompile(`Provider Name='(.*?)\\'`)\n\tpublisherMatch := re.FindStringSubmatch(renderedEvent)\n\tif len(publisherMatch) < 2 {\n\t\treturn \"\", fmt.Errorf(\"RenderFormattedMessageXML: no publisher name found in event\")\n\t}\n\tpublisherName := publisherMatch[1]\n\n\t\/\/ Lookup publisher metadata.\n\tvar pubHandle windows.Handle\n\tif val, ok := cache[publisherName]; ok {\n\t\tpubHandle = val\n\t} else {\n\t\tvar err error\n\t\tpubHandle, err = OpenPublisherMetadata(localMachine, publisherName, locale)\n\t\t\/\/ If there is no publisher metadata available return the original event.\n\t\tif err == syscall.ERROR_FILE_NOT_FOUND {\n\t\t\treturn renderedEvent, nil\n\t\t} else if err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"OpenPublisherMetadata failed: %v\", err)\n\t\t}\n\t\tcache[publisherName] = pubHandle\n\t}\n\n\t\/\/ Call EvtFormatMessage with a null buffer to get the required buffer size.\n\tvar bufferUsed uint32\n\terr := wevtapi.EvtFormatMessage(\n\t\tpubHandle, \/\/ Handle to provider metadata.\n\t\tevent, \/\/ Handle to an event.\n\t\t0, \/\/ Resource identifier of the message string. Null if flag isn't EvtFormatMessageId.\n\t\t0, \/\/ Number of values in the values parameter.\n\t\t0, \/\/ An array of insertion values to be used when formatting the event string. Typically set to null.\n\t\twevtapi.EvtFormatMessageXml, \/\/ Format message as an XML string.\n\t\t0, \/\/ Size of buffer.\n\t\tnil, \/\/ Null buffer.\n\t\t&bufferUsed) \/\/ Get the required buffer size.\n\tif err != syscall.ERROR_INSUFFICIENT_BUFFER {\n\t\treturn \"\", fmt.Errorf(\"wevtapi.EvtFormatMessage failed to get buffer size: %v\", err)\n\t}\n\n\tbuf := make([]uint16, bufferUsed\/2)\n\n\t\/\/ Render the event as a formatted XML string with RenderingInfo node.\n\terr = wevtapi.EvtFormatMessage(pubHandle, event, 0, 0, 0, wevtapi.EvtFormatMessageXml,\n\t\tbufferUsed, (*byte)(unsafe.Pointer(&buf[0])), &bufferUsed)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"wevtapi.EvtFormatMessage failed to render events as formatted XML: %v\", err)\n\t}\n\n\treturn syscall.UTF16ToString(buf), nil\n}\n\n\/\/ Subscribe initializes a subscription and returns a handle to the subscription.\n\/\/ Close must be called on the returned handle when finished.\nfunc Subscribe(config *SubscribeConfig) (windows.Handle, error) {\n\t\/\/ Initialize the subscription.\n\tsubscription, err := wevtapi.EvtSubscribe(\n\t\tconfig.Session,\n\t\tconfig.SignalEvent,\n\t\tconfig.ChannelPath,\n\t\tconfig.Query,\n\t\tconfig.Bookmark,\n\t\tconfig.Context,\n\t\tconfig.Callback,\n\t\tconfig.Flags)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"wevtapi.EvtSubscribe failed: %v\", err)\n\t}\n\n\treturn subscription, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package httpevents\n\nimport (\n\t\"bufio\"\n\t\"net\"\n\t\"net\/http\"\n\n\t\"github.com\/segmentio\/events\"\n)\n\n\/\/ NewHandler wraps the HTTP handler and returns a new handler which logs all\n\/\/ requests to logger.\n\/\/\n\/\/ Panics from handler are intercepted and trigger a 500 response if no response\n\/\/ header was sent yet. The panic is not slienced tho and is propagated to the\n\/\/ parent handler.\nfunc NewHandler(logger *events.Logger, handler http.Handler) http.Handler {\n\tif logger == nil {\n\t\tlogger = events.DefaultLogger\n\t}\n\treturn http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {\n\t\tvar laddr string\n\n\t\tif value, ok := req.Context().Value(http.LocalAddrContextKey).(net.Addr); ok {\n\t\t\tladdr = value.String()\n\t\t}\n\n\t\tw := &responseWriter{\n\t\t\tResponseWriter: res,\n\t\t\t\/\/ We capture all the values we need from req in case the object\n\t\t\t\/\/ gets modified by the handler.\n\t\t\tlogger: logger,\n\t\t\trequest: makeRequest(req, laddr),\n\t\t}\n\n\t\t\/\/ If the handler panics we want to make sure we report the issue in the\n\t\t\/\/ access log, while also ensuring that a response is going to be sent\n\t\t\/\/ down to the client.\n\t\t\/\/ We don't silence the panic here tho and instead we forward it back to\n\t\t\/\/ the parent handler which may need to be aware that a panic occurred.\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}()\n\n\t\t\/\/ The request is forwarded to the handler, if it never calls the\n\t\t\/\/ writer's WriteHeader method we force the call with \"200 OK\" status\n\t\t\/\/ to match the default behavior of the net\/http package (and also make\n\t\t\/\/ sure an access log will be written).\n\t\thandler.ServeHTTP(w, req)\n\t\tw.WriteHeader(http.StatusOK)\n\t})\n}\n\ntype responseWriter struct {\n\thttp.ResponseWriter\n\tlogger *events.Logger\n\trequest\n\twroteHeader bool\n}\n\nfunc (w *responseWriter) WriteHeader(status int) {\n\tif logger := w.logger; logger != nil {\n\t\tw.logger = nil\n\t\tw.status = status\n\t\tw.log(logger, 1)\n\t}\n\tif !w.wroteHeader {\n\t\tw.wroteHeader = true\n\t\tw.ResponseWriter.WriteHeader(status)\n\t}\n}\n\nfunc (w *responseWriter) Hijack() (conn net.Conn, rw *bufio.ReadWriter, err error) {\n\tif conn, rw, err = w.ResponseWriter.(http.Hijacker).Hijack(); err == nil {\n\t\tw.logger = nil\n\t\tw.wroteHeader = true\n\t}\n\treturn\n}\n<commit_msg>cleanup<commit_after>package httpevents\n\nimport (\n\t\"bufio\"\n\t\"net\"\n\t\"net\/http\"\n\n\t\"github.com\/segmentio\/events\"\n)\n\n\/\/ NewHandler wraps the HTTP handler and returns a new handler which logs all\n\/\/ requests to logger.\n\/\/\n\/\/ Panics from handler are intercepted and trigger a 500 response if no response\n\/\/ header was sent yet. The panic is not slienced tho and is propagated to the\n\/\/ parent handler.\nfunc NewHandler(logger *events.Logger, handler http.Handler) http.Handler {\n\tif logger == nil {\n\t\tlogger = events.DefaultLogger\n\t}\n\treturn http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) {\n\t\tvar laddr string\n\n\t\tif value, ok := req.Context().Value(http.LocalAddrContextKey).(net.Addr); ok {\n\t\t\tladdr = value.String()\n\t\t}\n\n\t\tw := &responseWriter{\n\t\t\tResponseWriter: res,\n\t\t\t\/\/ We capture all the values we need from req in case the object\n\t\t\t\/\/ gets modified by the handler.\n\t\t\tlogger: logger,\n\t\t\trequest: makeRequest(req, laddr),\n\t\t}\n\n\t\t\/\/ If the handler panics we want to make sure we report the issue in the\n\t\t\/\/ access log, while also ensuring that a response is going to be sent\n\t\t\/\/ down to the client.\n\t\t\/\/ We don't silence the panic here tho and instead we forward it back to\n\t\t\/\/ the parent handler which may need to be aware that a panic occurred.\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}()\n\n\t\t\/\/ The request is forwarded to the handler, if it never calls the\n\t\t\/\/ writer's WriteHeader method we force the call with \"200 OK\" status\n\t\t\/\/ to match the default behavior of the net\/http package (and also make\n\t\t\/\/ sure an access log will be written).\n\t\thandler.ServeHTTP(w, req)\n\t\tw.WriteHeader(http.StatusOK)\n\t})\n}\n\ntype responseWriter struct {\n\thttp.ResponseWriter\n\tlogger *events.Logger\n\trequest\n\twroteHeader bool\n}\n\nfunc (w *responseWriter) WriteHeader(status int) {\n\tw.log(1, status)\n\n\tif !w.wroteHeader {\n\t\tw.wroteHeader = true\n\t\tw.ResponseWriter.WriteHeader(status)\n\t}\n}\n\nfunc (w *responseWriter) Hijack() (conn net.Conn, rw *bufio.ReadWriter, err error) {\n\tif conn, rw, err = w.ResponseWriter.(http.Hijacker).Hijack(); err == nil {\n\t\tw.log(1, http.StatusSwitchingProtocols)\n\t}\n\treturn\n}\n\nfunc (w *responseWriter) log(depth int, status int) {\n\tif logger := w.logger; logger != nil {\n\t\tw.logger = nil\n\t\tw.request.status = status\n\t\tw.request.log(logger, depth+1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2013-2015 Conformal Systems LLC.\n\/\/ Use of this source code is governed by an ISC\n\/\/ license that can be found in the LICENSE file.\n\npackage wire\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/FactomProject\/FactomCode\/util\"\n\t\"github.com\/davecgh\/go-spew\/spew\"\n)\n\n\/\/ defaultTransactionAlloc is the default size used for the backing array\n\/\/ for transactions. The transaction array will dynamically grow as needed, but\n\/\/ this figure is intended to provide enough space for the number of\n\/\/ transactions in the vast majority of blocks without needing to grow the\n\/\/ backing array multiple times.\nconst defaultTransactionAlloc = 2048\n\n\/\/ MaxBlocksPerMsg is the maximum number of blocks allowed per message.\nconst MaxBlocksPerMsg = 500\n\n\/\/ MaxBlockPayload is the maximum bytes a block message can be in bytes.\nconst MaxBlockPayload = 1000000 \/\/ Not actually 1MB which would be 1024 * 1024\n\n\/\/ maxTxPerBlock is the maximum number of transactions that could\n\/\/ possibly fit into a block.\nconst maxTxPerBlock = (MaxBlockPayload \/ minTxPayload) + 1\n\n\/\/ TxLoc holds locator data for the offset and length of where a transaction is\n\/\/ located within a MsgBlock data buffer.\ntype TxLoc struct {\n\tTxStart int\n\tTxLen int\n}\n\n\/\/ MsgBlock implements the Message interface and represents a bitcoin\n\/\/ block message. It is used to deliver block and transaction information in\n\/\/ response to a getdata message (MsgGetData) for a given block hash.\ntype MsgBlock struct {\n\tHeader BlockHeader\n\tTransactions []*MsgTx\n}\n\n\/\/ AddTransaction adds a transaction to the message.\nfunc (msg *MsgBlock) AddTransaction(tx *MsgTx) error {\n\tmsg.Transactions = append(msg.Transactions, tx)\n\treturn nil\n\n}\n\n\/\/ ClearTransactions removes all transactions from the message.\nfunc (msg *MsgBlock) ClearTransactions() {\n\tmsg.Transactions = make([]*MsgTx, 0, defaultTransactionAlloc)\n}\n\n\/\/ BtcDecode decodes r using the bitcoin protocol encoding into the receiver.\n\/\/ This is part of the Message interface implementation.\n\/\/ See Deserialize for decoding blocks stored to disk, such as in a database, as\n\/\/ opposed to decoding blocks from the wire.\nfunc (msg *MsgBlock) BtcDecode(r io.Reader, pver uint32) error {\n\terr := readBlockHeader(r, pver, &msg.Header)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttxCount, err := readVarInt(r, pver)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Prevent more transactions than could possibly fit into a block.\n\t\/\/ It would be possible to cause memory exhaustion and panics without\n\t\/\/ a sane upper bound on this count.\n\tif txCount > maxTxPerBlock {\n\t\tstr := fmt.Sprintf(\"too many transactions to fit into a block \"+\n\t\t\t\"[count %d, max %d]\", txCount, maxTxPerBlock)\n\t\treturn messageError(\"MsgBlock.BtcDecode\", str)\n\t}\n\n\tmsg.Transactions = make([]*MsgTx, 0, txCount)\n\tfor i := uint64(0); i < txCount; i++ {\n\t\ttx := MsgTx{}\n\t\terr := tx.BtcDecode(r, pver)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmsg.Transactions = append(msg.Transactions, &tx)\n\t}\n\n\treturn nil\n}\n\n\/\/ Deserialize decodes a block from r into the receiver using a format that is\n\/\/ suitable for long-term storage such as a database while respecting the\n\/\/ Version field in the block. This function differs from BtcDecode in that\n\/\/ BtcDecode decodes from the bitcoin wire protocol as it was sent across the\n\/\/ network. The wire encoding can technically differ depending on the protocol\n\/\/ version and doesn't even really need to match the format of a stored block at\n\/\/ all. As of the time this comment was written, the encoded block is the same\n\/\/ in both instances, but there is a distinct difference and separating the two\n\/\/ allows the API to be flexible enough to deal with changes.\nfunc (msg *MsgBlock) Deserialize(r io.Reader) error {\n\t\/\/ At the current time, there is no difference between the wire encoding\n\t\/\/ at protocol version 0 and the stable long-term storage format. As\n\t\/\/ a result, make use of BtcDecode.\n\treturn msg.BtcDecode(r, 0)\n}\n\n\/\/ DeserializeTxLoc decodes r in the same manner Deserialize does, but it takes\n\/\/ a byte buffer instead of a generic reader and returns a slice containing the start and length of\n\/\/ each transaction within the raw data that is being deserialized.\nfunc (msg *MsgBlock) DeserializeTxLoc(r *bytes.Buffer) ([]TxLoc, error) {\n\tutil.Trace()\n\tfullLen := r.Len()\n\n\tfmt.Println(\"fullLen=\", fullLen, spew.Sdump(r.Bytes()))\n\n\t\/\/ At the current time, there is no difference between the wire encoding\n\t\/\/ at protocol version 0 and the stable long-term storage format. As\n\t\/\/ a result, make use of existing wire protocol functions.\n\terr := readBlockHeader(r, 0, &msg.Header)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tutil.Trace()\n\n\ttxCount, err := readVarInt(r, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tutil.Trace(fmt.Sprintf(\"txCount= %d\", txCount))\n\n\t\/\/ Prevent more transactions than could possibly fit into a block.\n\t\/\/ It would be possible to cause memory exhaustion and panics without\n\t\/\/ a sane upper bound on this count.\n\tif txCount > maxTxPerBlock {\n\t\tstr := fmt.Sprintf(\"too many transactions to fit into a block \"+\n\t\t\t\"[count %d, max %d]\", txCount, maxTxPerBlock)\n\t\treturn nil, messageError(\"MsgBlock.DeserializeTxLoc\", str)\n\t}\n\tutil.Trace()\n\n\t\/\/ Deserialize each transaction while keeping track of its location\n\t\/\/ within the byte stream.\n\tmsg.Transactions = make([]*MsgTx, 0, txCount)\n\ttxLocs := make([]TxLoc, txCount)\n\tfor i := uint64(0); i < txCount; i++ {\n\t\ttxLocs[i].TxStart = fullLen - r.Len()\n\t\ttx := MsgTx{}\n\t\terr := tx.Deserialize(r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tutil.Trace(\"tx deserialized= \" + spew.Sdump(tx))\n\n\t\tmsg.Transactions = append(msg.Transactions, &tx)\n\t\ttxLocs[i].TxLen = (fullLen - r.Len()) - txLocs[i].TxStart\n\t}\n\n\treturn txLocs, nil\n}\n\n\/\/ BtcEncode encodes the receiver to w using the bitcoin protocol encoding.\n\/\/ This is part of the Message interface implementation.\n\/\/ See Serialize for encoding blocks to be stored to disk, such as in a\n\/\/ database, as opposed to encoding blocks for the wire.\nfunc (msg *MsgBlock) BtcEncode(w io.Writer, pver uint32) error {\n\terr := writeBlockHeader(w, pver, &msg.Header)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = writeVarInt(w, pver, uint64(len(msg.Transactions)))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, tx := range msg.Transactions {\n\t\terr = tx.BtcEncode(w, pver)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Serialize encodes the block to w using a format that suitable for long-term\n\/\/ storage such as a database while respecting the Version field in the block.\n\/\/ This function differs from BtcEncode in that BtcEncode encodes the block to\n\/\/ the bitcoin wire protocol in order to be sent across the network. The wire\n\/\/ encoding can technically differ depending on the protocol version and doesn't\n\/\/ even really need to match the format of a stored block at all. As of the\n\/\/ time this comment was written, the encoded block is the same in both\n\/\/ instances, but there is a distinct difference and separating the two allows\n\/\/ the API to be flexible enough to deal with changes.\nfunc (msg *MsgBlock) Serialize(w io.Writer) error {\n\t\/\/ At the current time, there is no difference between the wire encoding\n\t\/\/ at protocol version 0 and the stable long-term storage format. As\n\t\/\/ a result, make use of BtcEncode.\n\treturn msg.BtcEncode(w, 0)\n}\n\n\/\/ SerializeSize returns the number of bytes it would take to serialize the\n\/\/ the block.\nfunc (msg *MsgBlock) SerializeSize() int {\n\t\/\/ Block header bytes + Serialized varint size for the number of\n\t\/\/ transactions.\n\tn := blockHeaderLen + VarIntSerializeSize(uint64(len(msg.Transactions)))\n\n\tfor _, tx := range msg.Transactions {\n\t\tn += tx.SerializeSize()\n\t}\n\n\treturn n\n}\n\n\/\/ Command returns the protocol command string for the message. This is part\n\/\/ of the Message interface implementation.\nfunc (msg *MsgBlock) Command() string {\n\treturn CmdBlock\n}\n\n\/\/ MaxPayloadLength returns the maximum length the payload can be for the\n\/\/ receiver. This is part of the Message interface implementation.\nfunc (msg *MsgBlock) MaxPayloadLength(pver uint32) uint32 {\n\t\/\/ Block header at 80 bytes + transaction count + max transactions\n\t\/\/ which can vary up to the MaxBlockPayload (including the block header\n\t\/\/ and transaction count).\n\treturn MaxBlockPayload\n}\n\n\/\/ BlockSha computes the block identifier hash for this block.\nfunc (msg *MsgBlock) BlockSha() (ShaHash, error) {\n\treturn msg.Header.BlockSha()\n}\n\n\/\/ TxShas returns a slice of hashes of all of transactions in this block.\nfunc (msg *MsgBlock) TxShas() ([]ShaHash, error) {\n\tshaList := make([]ShaHash, 0, len(msg.Transactions))\n\tfor _, tx := range msg.Transactions {\n\t\t\/\/ Ignore error here since TxSha can't fail in the current\n\t\t\/\/ implementation except due to run-time panics.\n\t\tsha, _ := tx.TxSha()\n\t\tshaList = append(shaList, sha)\n\t}\n\treturn shaList, nil\n}\n\n\/\/ NewMsgBlock returns a new bitcoin block message that conforms to the\n\/\/ Message interface. See MsgBlock for details.\nfunc NewMsgBlock(blockHeader *BlockHeader) *MsgBlock {\n\treturn &MsgBlock{\n\t\tHeader: *blockHeader,\n\t\tTransactions: make([]*MsgTx, 0, defaultTransactionAlloc),\n\t}\n}\n<commit_msg>Changed the batch size to 10 for debugging<commit_after>\/\/ Copyright (c) 2013-2015 Conformal Systems LLC.\n\/\/ Use of this source code is governed by an ISC\n\/\/ license that can be found in the LICENSE file.\n\npackage wire\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/FactomProject\/FactomCode\/util\"\n\t\"github.com\/davecgh\/go-spew\/spew\"\n)\n\n\/\/ defaultTransactionAlloc is the default size used for the backing array\n\/\/ for transactions. The transaction array will dynamically grow as needed, but\n\/\/ this figure is intended to provide enough space for the number of\n\/\/ transactions in the vast majority of blocks without needing to grow the\n\/\/ backing array multiple times.\nconst defaultTransactionAlloc = 2048\n\n\/\/ MaxBlocksPerMsg is the maximum number of blocks allowed per message.\nconst MaxBlocksPerMsg = 10\n\n\/\/ MaxBlockPayload is the maximum bytes a block message can be in bytes.\nconst MaxBlockPayload = 1000000 \/\/ Not actually 1MB which would be 1024 * 1024\n\n\/\/ maxTxPerBlock is the maximum number of transactions that could\n\/\/ possibly fit into a block.\nconst maxTxPerBlock = (MaxBlockPayload \/ minTxPayload) + 1\n\n\/\/ TxLoc holds locator data for the offset and length of where a transaction is\n\/\/ located within a MsgBlock data buffer.\ntype TxLoc struct {\n\tTxStart int\n\tTxLen int\n}\n\n\/\/ MsgBlock implements the Message interface and represents a bitcoin\n\/\/ block message. It is used to deliver block and transaction information in\n\/\/ response to a getdata message (MsgGetData) for a given block hash.\ntype MsgBlock struct {\n\tHeader BlockHeader\n\tTransactions []*MsgTx\n}\n\n\/\/ AddTransaction adds a transaction to the message.\nfunc (msg *MsgBlock) AddTransaction(tx *MsgTx) error {\n\tmsg.Transactions = append(msg.Transactions, tx)\n\treturn nil\n\n}\n\n\/\/ ClearTransactions removes all transactions from the message.\nfunc (msg *MsgBlock) ClearTransactions() {\n\tmsg.Transactions = make([]*MsgTx, 0, defaultTransactionAlloc)\n}\n\n\/\/ BtcDecode decodes r using the bitcoin protocol encoding into the receiver.\n\/\/ This is part of the Message interface implementation.\n\/\/ See Deserialize for decoding blocks stored to disk, such as in a database, as\n\/\/ opposed to decoding blocks from the wire.\nfunc (msg *MsgBlock) BtcDecode(r io.Reader, pver uint32) error {\n\terr := readBlockHeader(r, pver, &msg.Header)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttxCount, err := readVarInt(r, pver)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Prevent more transactions than could possibly fit into a block.\n\t\/\/ It would be possible to cause memory exhaustion and panics without\n\t\/\/ a sane upper bound on this count.\n\tif txCount > maxTxPerBlock {\n\t\tstr := fmt.Sprintf(\"too many transactions to fit into a block \"+\n\t\t\t\"[count %d, max %d]\", txCount, maxTxPerBlock)\n\t\treturn messageError(\"MsgBlock.BtcDecode\", str)\n\t}\n\n\tmsg.Transactions = make([]*MsgTx, 0, txCount)\n\tfor i := uint64(0); i < txCount; i++ {\n\t\ttx := MsgTx{}\n\t\terr := tx.BtcDecode(r, pver)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmsg.Transactions = append(msg.Transactions, &tx)\n\t}\n\n\treturn nil\n}\n\n\/\/ Deserialize decodes a block from r into the receiver using a format that is\n\/\/ suitable for long-term storage such as a database while respecting the\n\/\/ Version field in the block. This function differs from BtcDecode in that\n\/\/ BtcDecode decodes from the bitcoin wire protocol as it was sent across the\n\/\/ network. The wire encoding can technically differ depending on the protocol\n\/\/ version and doesn't even really need to match the format of a stored block at\n\/\/ all. As of the time this comment was written, the encoded block is the same\n\/\/ in both instances, but there is a distinct difference and separating the two\n\/\/ allows the API to be flexible enough to deal with changes.\nfunc (msg *MsgBlock) Deserialize(r io.Reader) error {\n\t\/\/ At the current time, there is no difference between the wire encoding\n\t\/\/ at protocol version 0 and the stable long-term storage format. As\n\t\/\/ a result, make use of BtcDecode.\n\treturn msg.BtcDecode(r, 0)\n}\n\n\/\/ DeserializeTxLoc decodes r in the same manner Deserialize does, but it takes\n\/\/ a byte buffer instead of a generic reader and returns a slice containing the start and length of\n\/\/ each transaction within the raw data that is being deserialized.\nfunc (msg *MsgBlock) DeserializeTxLoc(r *bytes.Buffer) ([]TxLoc, error) {\n\tutil.Trace()\n\tfullLen := r.Len()\n\n\tfmt.Println(\"fullLen=\", fullLen, spew.Sdump(r.Bytes()))\n\n\t\/\/ At the current time, there is no difference between the wire encoding\n\t\/\/ at protocol version 0 and the stable long-term storage format. As\n\t\/\/ a result, make use of existing wire protocol functions.\n\terr := readBlockHeader(r, 0, &msg.Header)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tutil.Trace()\n\n\ttxCount, err := readVarInt(r, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tutil.Trace(fmt.Sprintf(\"txCount= %d\", txCount))\n\n\t\/\/ Prevent more transactions than could possibly fit into a block.\n\t\/\/ It would be possible to cause memory exhaustion and panics without\n\t\/\/ a sane upper bound on this count.\n\tif txCount > maxTxPerBlock {\n\t\tstr := fmt.Sprintf(\"too many transactions to fit into a block \"+\n\t\t\t\"[count %d, max %d]\", txCount, maxTxPerBlock)\n\t\treturn nil, messageError(\"MsgBlock.DeserializeTxLoc\", str)\n\t}\n\tutil.Trace()\n\n\t\/\/ Deserialize each transaction while keeping track of its location\n\t\/\/ within the byte stream.\n\tmsg.Transactions = make([]*MsgTx, 0, txCount)\n\ttxLocs := make([]TxLoc, txCount)\n\tfor i := uint64(0); i < txCount; i++ {\n\t\ttxLocs[i].TxStart = fullLen - r.Len()\n\t\ttx := MsgTx{}\n\t\terr := tx.Deserialize(r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tutil.Trace(\"tx deserialized= \" + spew.Sdump(tx))\n\n\t\tmsg.Transactions = append(msg.Transactions, &tx)\n\t\ttxLocs[i].TxLen = (fullLen - r.Len()) - txLocs[i].TxStart\n\t}\n\n\treturn txLocs, nil\n}\n\n\/\/ BtcEncode encodes the receiver to w using the bitcoin protocol encoding.\n\/\/ This is part of the Message interface implementation.\n\/\/ See Serialize for encoding blocks to be stored to disk, such as in a\n\/\/ database, as opposed to encoding blocks for the wire.\nfunc (msg *MsgBlock) BtcEncode(w io.Writer, pver uint32) error {\n\terr := writeBlockHeader(w, pver, &msg.Header)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = writeVarInt(w, pver, uint64(len(msg.Transactions)))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, tx := range msg.Transactions {\n\t\terr = tx.BtcEncode(w, pver)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ Serialize encodes the block to w using a format that suitable for long-term\n\/\/ storage such as a database while respecting the Version field in the block.\n\/\/ This function differs from BtcEncode in that BtcEncode encodes the block to\n\/\/ the bitcoin wire protocol in order to be sent across the network. The wire\n\/\/ encoding can technically differ depending on the protocol version and doesn't\n\/\/ even really need to match the format of a stored block at all. As of the\n\/\/ time this comment was written, the encoded block is the same in both\n\/\/ instances, but there is a distinct difference and separating the two allows\n\/\/ the API to be flexible enough to deal with changes.\nfunc (msg *MsgBlock) Serialize(w io.Writer) error {\n\t\/\/ At the current time, there is no difference between the wire encoding\n\t\/\/ at protocol version 0 and the stable long-term storage format. As\n\t\/\/ a result, make use of BtcEncode.\n\treturn msg.BtcEncode(w, 0)\n}\n\n\/\/ SerializeSize returns the number of bytes it would take to serialize the\n\/\/ the block.\nfunc (msg *MsgBlock) SerializeSize() int {\n\t\/\/ Block header bytes + Serialized varint size for the number of\n\t\/\/ transactions.\n\tn := blockHeaderLen + VarIntSerializeSize(uint64(len(msg.Transactions)))\n\n\tfor _, tx := range msg.Transactions {\n\t\tn += tx.SerializeSize()\n\t}\n\n\treturn n\n}\n\n\/\/ Command returns the protocol command string for the message. This is part\n\/\/ of the Message interface implementation.\nfunc (msg *MsgBlock) Command() string {\n\treturn CmdBlock\n}\n\n\/\/ MaxPayloadLength returns the maximum length the payload can be for the\n\/\/ receiver. This is part of the Message interface implementation.\nfunc (msg *MsgBlock) MaxPayloadLength(pver uint32) uint32 {\n\t\/\/ Block header at 80 bytes + transaction count + max transactions\n\t\/\/ which can vary up to the MaxBlockPayload (including the block header\n\t\/\/ and transaction count).\n\treturn MaxBlockPayload\n}\n\n\/\/ BlockSha computes the block identifier hash for this block.\nfunc (msg *MsgBlock) BlockSha() (ShaHash, error) {\n\treturn msg.Header.BlockSha()\n}\n\n\/\/ TxShas returns a slice of hashes of all of transactions in this block.\nfunc (msg *MsgBlock) TxShas() ([]ShaHash, error) {\n\tshaList := make([]ShaHash, 0, len(msg.Transactions))\n\tfor _, tx := range msg.Transactions {\n\t\t\/\/ Ignore error here since TxSha can't fail in the current\n\t\t\/\/ implementation except due to run-time panics.\n\t\tsha, _ := tx.TxSha()\n\t\tshaList = append(shaList, sha)\n\t}\n\treturn shaList, nil\n}\n\n\/\/ NewMsgBlock returns a new bitcoin block message that conforms to the\n\/\/ Message interface. See MsgBlock for details.\nfunc NewMsgBlock(blockHeader *BlockHeader) *MsgBlock {\n\treturn &MsgBlock{\n\t\tHeader: *blockHeader,\n\t\tTransactions: make([]*MsgTx, 0, defaultTransactionAlloc),\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package tarfile_test\n\nimport (\n\t\"archive\/tar\"\n\t\/\/\t\"fmt\"\n\t\"github.com\/APTrust\/exchange\/tarfile\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestNewWriter(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"tarwriter_test\")\n\tif err != nil {\n\t\tassert.FailNow(t, \"Cannot create temp dir\", err.Error())\n\t}\n\ttempFilePath := filepath.Join(dir, \"test_file.tar\")\n\tdefer os.RemoveAll(dir)\n\tw := tarfile.NewWriter(tempFilePath)\n\tassert.NotNil(t, w)\n\tassert.Equal(t, tempFilePath, w.PathToTarFile)\n}\n\nfunc TestAndCloseOpen(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"tarwriter_test\")\n\tif err != nil {\n\t\tassert.FailNow(t, \"Cannot create temp dir\", err.Error())\n\t}\n\ttempFilePath := filepath.Join(dir, \"test_file.tar\")\n\tdefer os.RemoveAll(dir)\n\tw := tarfile.NewWriter(tempFilePath)\n\tdefer w.Close()\n\terr = w.Open()\n\tassert.Nil(t, err)\n\tif _, err := os.Stat(w.PathToTarFile); os.IsNotExist(err) {\n\t\tassert.Fail(t, \"Tar file does not exist at %s\", w.PathToTarFile)\n\t}\n\terr = w.Close()\n\tassert.Nil(t, err)\n}\n\nfunc TestAddToArchive(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"tarwriter_test\")\n\tif err != nil {\n\t\tassert.FailNow(t, \"Cannot create temp dir\", err.Error())\n\t}\n\ttempFilePath := filepath.Join(dir, \"test_file.tar\")\n\tdefer os.RemoveAll(dir)\n\tw := tarfile.NewWriter(tempFilePath)\n\tdefer w.Close()\n\terr = w.Open()\n\tassert.Nil(t, err)\n\tif _, err := os.Stat(w.PathToTarFile); os.IsNotExist(err) {\n\t\tassert.Fail(t, \"Tar file does not exist at %s\", w.PathToTarFile)\n\t}\n\terr = w.AddToArchive(pathToTestFile(\"cleanup_result.json\"), \"file1.json\")\n\tassert.Nil(t, err)\n\terr = w.AddToArchive(pathToTestFile(\"ingest_result.json\"), \"data\/subdir\/file2.json\")\n\tassert.Nil(t, err)\n\tw.Close()\n\n\tfile, err := os.Open(w.PathToTarFile)\n\tif file != nil {\n\t\tdefer file.Close()\n\t}\n\tif err != nil {\n\t\tassert.FailNow(t, \"Could not open tar file\", err.Error())\n\t}\n\tfilesInArchive := make([]string, 0)\n\treader := tar.NewReader(file)\n\tfor {\n\t\theader, err := reader.Next()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tfilesInArchive = append(filesInArchive, header.Name)\n\t}\n\tassert.Equal(t, \"file1.json\", filesInArchive[0])\n\tassert.Equal(t, \"data\/subdir\/file2.json\", filesInArchive[1])\n}\n\nfunc TestAddToArchiveWithClosedWriter(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"tarwriter_test\")\n\tif err != nil {\n\t\tassert.FailNow(t, \"Cannot create temp dir\", err.Error())\n\t}\n\ttempFilePath := filepath.Join(dir, \"test_file.tar\")\n\tdefer os.RemoveAll(dir)\n\tw := tarfile.NewWriter(tempFilePath)\n\n\t\/\/ Note that we have not opened the writer\n\terr = w.AddToArchive(pathToTestFile(\"cleanup_result.json\"), \"file1.json\")\n\tif err == nil {\n\t\tassert.FailNow(t, \"Should have gotten a tar write error\")\n\t}\n\tassert.True(t, strings.HasPrefix(err.Error(), \"Underlying TarWriter is nil\"))\n\n\t\/\/ Open and close the writer, so the file exists.\n\tw.Open()\n\tw.Close()\n\tif _, err := os.Stat(w.PathToTarFile); os.IsNotExist(err) {\n\t\tassert.Fail(t, \"Tar file does not exist at %s\", w.PathToTarFile)\n\t}\n\terr = w.AddToArchive(pathToTestFile(\"cleanup_result.json\"), \"file1.json\")\n\tif err == nil {\n\t\tassert.FailNow(t, \"Should have gotten a tar write error\")\n\t}\n\tassert.True(t, strings.HasPrefix(err.Error(), \"archive\/tar: write after close\"))\n\n}\n\nfunc TestAddToArchiveWithBadFilePath(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"tarwriter_test\")\n\tif err != nil {\n\t\tassert.FailNow(t, \"Cannot create temp dir\", err.Error())\n\t}\n\ttempFilePath := filepath.Join(dir, \"test_file.tar\")\n\tdefer os.RemoveAll(dir)\n\tw := tarfile.NewWriter(tempFilePath)\n\tdefer w.Close()\n\terr = w.Open()\n\tassert.Nil(t, err)\n\tif _, err := os.Stat(w.PathToTarFile); os.IsNotExist(err) {\n\t\tassert.Fail(t, \"Tar file does not exist at %s\", w.PathToTarFile)\n\t}\n\n\t\/\/ This file doesn't exist. Make sure we get the right error.\n\terr = w.AddToArchive(pathToTestFile(\"this_file_does_not_exist\"), \"file1.json\")\n\tif err == nil {\n\t\tassert.FailNow(t, \"Should have gotten a tar write error\")\n\t}\n\tassert.True(t, strings.Contains(err.Error(), \"no such file or directory\"))\n}\n\nfunc pathToTestFile(name string) string {\n\t_, filename, _, _ := runtime.Caller(0)\n\ttestDataPath, _ := filepath.Abs(path.Join(filepath.Dir(filename), \"..\", \"testdata\", \"json_objects\"))\n\treturn path.Join(testDataPath, name)\n}\n<commit_msg>Log specific test failure message for latest version of Go<commit_after>package tarfile_test\n\nimport (\n\t\"archive\/tar\"\n\t\/\/\t\"fmt\"\n\t\"github.com\/APTrust\/exchange\/tarfile\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestNewWriter(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"tarwriter_test\")\n\tif err != nil {\n\t\tassert.FailNow(t, \"Cannot create temp dir\", err.Error())\n\t}\n\ttempFilePath := filepath.Join(dir, \"test_file.tar\")\n\tdefer os.RemoveAll(dir)\n\tw := tarfile.NewWriter(tempFilePath)\n\tassert.NotNil(t, w)\n\tassert.Equal(t, tempFilePath, w.PathToTarFile)\n}\n\nfunc TestAndCloseOpen(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"tarwriter_test\")\n\tif err != nil {\n\t\tassert.FailNow(t, \"Cannot create temp dir\", err.Error())\n\t}\n\ttempFilePath := filepath.Join(dir, \"test_file.tar\")\n\tdefer os.RemoveAll(dir)\n\tw := tarfile.NewWriter(tempFilePath)\n\tdefer w.Close()\n\terr = w.Open()\n\tassert.Nil(t, err)\n\tif _, err := os.Stat(w.PathToTarFile); os.IsNotExist(err) {\n\t\tassert.Fail(t, \"Tar file does not exist at %s\", w.PathToTarFile)\n\t}\n\terr = w.Close()\n\tassert.Nil(t, err)\n}\n\nfunc TestAddToArchive(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"tarwriter_test\")\n\tif err != nil {\n\t\tassert.FailNow(t, \"Cannot create temp dir\", err.Error())\n\t}\n\ttempFilePath := filepath.Join(dir, \"test_file.tar\")\n\tdefer os.RemoveAll(dir)\n\tw := tarfile.NewWriter(tempFilePath)\n\tdefer w.Close()\n\terr = w.Open()\n\tassert.Nil(t, err)\n\tif _, err := os.Stat(w.PathToTarFile); os.IsNotExist(err) {\n\t\tassert.Fail(t, \"Tar file does not exist at %s\", w.PathToTarFile)\n\t}\n\terr = w.AddToArchive(pathToTestFile(\"cleanup_result.json\"), \"file1.json\")\n\tassert.Nil(t, err)\n\terr = w.AddToArchive(pathToTestFile(\"ingest_result.json\"), \"data\/subdir\/file2.json\")\n\tassert.Nil(t, err)\n\tw.Close()\n\n\tfile, err := os.Open(w.PathToTarFile)\n\tif file != nil {\n\t\tdefer file.Close()\n\t}\n\tif err != nil {\n\t\tassert.FailNow(t, \"Could not open tar file\", err.Error())\n\t}\n\tfilesInArchive := make([]string, 0)\n\treader := tar.NewReader(file)\n\tfor {\n\t\theader, err := reader.Next()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tfilesInArchive = append(filesInArchive, header.Name)\n\t}\n\tassert.Equal(t, \"file1.json\", filesInArchive[0])\n\tassert.Equal(t, \"data\/subdir\/file2.json\", filesInArchive[1])\n}\n\nfunc TestAddToArchiveWithClosedWriter(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"tarwriter_test\")\n\tif err != nil {\n\t\tassert.FailNow(t, \"Cannot create temp dir\", err.Error())\n\t}\n\ttempFilePath := filepath.Join(dir, \"test_file.tar\")\n\tdefer os.RemoveAll(dir)\n\tw := tarfile.NewWriter(tempFilePath)\n\n\t\/\/ Note that we have not opened the writer\n\terr = w.AddToArchive(pathToTestFile(\"cleanup_result.json\"), \"file1.json\")\n\tif err == nil {\n\t\tassert.FailNow(t, \"Should have gotten a tar write error\")\n\t}\n\tassert.True(t, strings.HasPrefix(err.Error(), \"Underlying TarWriter is nil\"))\n\n\t\/\/ Open and close the writer, so the file exists.\n\tw.Open()\n\tw.Close()\n\tif _, err := os.Stat(w.PathToTarFile); os.IsNotExist(err) {\n\t\tassert.Fail(t, \"Tar file does not exist at %s\", w.PathToTarFile)\n\t}\n\terr = w.AddToArchive(pathToTestFile(\"cleanup_result.json\"), \"file1.json\")\n\tif err == nil {\n\t\tassert.FailNow(t, \"Should have gotten a tar write error\")\n\t}\n\tassert.True(t, strings.HasPrefix(err.Error(), \"archive\/tar: write after close\"), err.Error())\n\n}\n\nfunc TestAddToArchiveWithBadFilePath(t *testing.T) {\n\tdir, err := ioutil.TempDir(\"\", \"tarwriter_test\")\n\tif err != nil {\n\t\tassert.FailNow(t, \"Cannot create temp dir\", err.Error())\n\t}\n\ttempFilePath := filepath.Join(dir, \"test_file.tar\")\n\tdefer os.RemoveAll(dir)\n\tw := tarfile.NewWriter(tempFilePath)\n\tdefer w.Close()\n\terr = w.Open()\n\tassert.Nil(t, err)\n\tif _, err := os.Stat(w.PathToTarFile); os.IsNotExist(err) {\n\t\tassert.Fail(t, \"Tar file does not exist at %s\", w.PathToTarFile)\n\t}\n\n\t\/\/ This file doesn't exist. Make sure we get the right error.\n\terr = w.AddToArchive(pathToTestFile(\"this_file_does_not_exist\"), \"file1.json\")\n\tif err == nil {\n\t\tassert.FailNow(t, \"Should have gotten a tar write error\")\n\t}\n\tassert.True(t, strings.Contains(err.Error(), \"no such file or directory\"))\n}\n\nfunc pathToTestFile(name string) string {\n\t_, filename, _, _ := runtime.Caller(0)\n\ttestDataPath, _ := filepath.Abs(path.Join(filepath.Dir(filename), \"..\", \"testdata\", \"json_objects\"))\n\treturn path.Join(testDataPath, name)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Licensed to the Apache Software Foundation (ASF) under one or more\n\/\/ contributor license agreements. See the NOTICE file distributed with\n\/\/ this work for additional information regarding copyright ownership.\n\/\/ The ASF licenses this file to You under the Apache License, Version 2.0\n\/\/ (the \"License\"); you may not use this file except in compliance with\n\/\/ the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ boot is the boot code for the Java SDK harness container. It is responsible\n\/\/ for retrieving staged files and invoking the JVM correctly.\npackage main\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/apache\/beam\/sdks\/go\/pkg\/beam\/artifact\"\n\tpb \"github.com\/apache\/beam\/sdks\/go\/pkg\/beam\/model\/fnexecution_v1\"\n\t\"github.com\/apache\/beam\/sdks\/go\/pkg\/beam\/provision\"\n\t\"github.com\/apache\/beam\/sdks\/go\/pkg\/beam\/util\/execx\"\n\t\"github.com\/apache\/beam\/sdks\/go\/pkg\/beam\/util\/grpcx\"\n\t\"github.com\/apache\/beam\/sdks\/go\/pkg\/beam\/util\/syscallx\"\n)\n\nvar (\n\t\/\/ Contract: https:\/\/s.apache.org\/beam-fn-api-container-contract.\n\n\tid = flag.String(\"id\", \"\", \"Local identifier (required).\")\n\tloggingEndpoint = flag.String(\"logging_endpoint\", \"\", \"Logging endpoint (required).\")\n\tartifactEndpoint = flag.String(\"artifact_endpoint\", \"\", \"Artifact endpoint (required).\")\n\tprovisionEndpoint = flag.String(\"provision_endpoint\", \"\", \"Provision endpoint (required).\")\n\tcontrolEndpoint = flag.String(\"control_endpoint\", \"\", \"Control endpoint (required).\")\n\tsemiPersistDir = flag.String(\"semi_persist_dir\", \"\/tmp\", \"Local semi-persistent directory (optional).\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tif *id == \"\" {\n\t\tlog.Fatal(\"No id provided.\")\n\t}\n\tif *loggingEndpoint == \"\" {\n\t\tlog.Fatal(\"No logging endpoint provided.\")\n\t}\n\tif *artifactEndpoint == \"\" {\n\t\tlog.Fatal(\"No artifact endpoint provided.\")\n\t}\n\tif *provisionEndpoint == \"\" {\n\t\tlog.Fatal(\"No provision endpoint provided.\")\n\t}\n\tif *controlEndpoint == \"\" {\n\t\tlog.Fatal(\"No control endpoint provided.\")\n\t}\n\n\tlog.Printf(\"Initializing java harness: %v\", strings.Join(os.Args, \" \"))\n\n\tctx := grpcx.WriteWorkerID(context.Background(), *id)\n\n\t\/\/ (1) Obtain the pipeline options\n\n\tinfo, err := provision.Info(ctx, *provisionEndpoint)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to obtain provisioning information: %v\", err)\n\t}\n\toptions, err := provision.ProtoToJSON(info.GetPipelineOptions())\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to convert pipeline options: %v\", err)\n\t}\n\n\t\/\/ (2) Retrieve the staged user jars. We ignore any disk limit,\n\t\/\/ because the staged jars are mandatory.\n\n\tdir := filepath.Join(*semiPersistDir, \"staged\")\n\n\tartifacts, err := artifact.Materialize(ctx, *artifactEndpoint, dir)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to retrieve staged files: %v\", err)\n\t}\n\n\t\/\/ (3) Invoke the Java harness, preserving artifact ordering in classpath.\n\n\tos.Setenv(\"PIPELINE_OPTIONS\", options)\n\tos.Setenv(\"LOGGING_API_SERVICE_DESCRIPTOR\", fmt.Sprintf(\"url: \\\"%v\\\"\\n\", *loggingEndpoint))\n\tos.Setenv(\"CONTROL_API_SERVICE_DESCRIPTOR\", fmt.Sprintf(\"url: \\\"%v\\\"\\n\", *controlEndpoint))\n\n\tconst jarsDir = \"\/opt\/apache\/beam\/jars\"\n\tcp := []string{\n\t\tfilepath.Join(jarsDir, \"slf4j-api.jar\"),\n\t\tfilepath.Join(jarsDir, \"slf4j-jdk14.jar\"),\n\t\tfilepath.Join(jarsDir, \"beam-sdks-java-harness.jar\"),\n\t}\n\tfor _, md := range artifacts {\n\t\tcp = append(cp, filepath.Join(dir, filepath.FromSlash(md.Name)))\n\t}\n\n\targs := []string{\n\t\t\"-Xmx\" + strconv.FormatUint(heapSizeLimit(info), 10),\n\t\t\"-cp\", strings.Join(cp, \":\"),\n\t\t\"org.apache.beam.fn.harness.FnHarness\",\n\t}\n\n\tlog.Printf(\"Executing: java %v\", strings.Join(args, \" \"))\n\n\tlog.Fatalf(\"Java exited: %v\", execx.Execute(\"java\", args...))\n}\n\n\/\/ heapSizeLimit returns 80% of the runner limit, if provided. If not provided,\n\/\/ it returns 70% of the physical memory on the machine. If it cannot determine\n\/\/ that value, it returns 1GB. This is an imperfect heuristic. It aims to\n\/\/ ensure there is memory for non-heap use and other overhead, while also not\n\/\/ underutilizing the machine.\nfunc heapSizeLimit(info *pb.ProvisionInfo) uint64 {\n\tif provided := info.GetResourceLimits().GetMemory().GetSize(); provided > 0 {\n\t\treturn (provided * 80) \/ 100\n\t}\n\tif size, err := syscallx.PhysicalMemorySize(); err == nil {\n\t\treturn (size * 70) \/ 100\n\t}\n\treturn 1 << 30\n}\n<commit_msg>[BEAM-3113] Disable stack trace optimization in java container<commit_after>\/\/ Licensed to the Apache Software Foundation (ASF) under one or more\n\/\/ contributor license agreements. See the NOTICE file distributed with\n\/\/ this work for additional information regarding copyright ownership.\n\/\/ The ASF licenses this file to You under the Apache License, Version 2.0\n\/\/ (the \"License\"); you may not use this file except in compliance with\n\/\/ the License. You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ boot is the boot code for the Java SDK harness container. It is responsible\n\/\/ for retrieving staged files and invoking the JVM correctly.\npackage main\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/apache\/beam\/sdks\/go\/pkg\/beam\/artifact\"\n\tpb \"github.com\/apache\/beam\/sdks\/go\/pkg\/beam\/model\/fnexecution_v1\"\n\t\"github.com\/apache\/beam\/sdks\/go\/pkg\/beam\/provision\"\n\t\"github.com\/apache\/beam\/sdks\/go\/pkg\/beam\/util\/execx\"\n\t\"github.com\/apache\/beam\/sdks\/go\/pkg\/beam\/util\/grpcx\"\n\t\"github.com\/apache\/beam\/sdks\/go\/pkg\/beam\/util\/syscallx\"\n)\n\nvar (\n\t\/\/ Contract: https:\/\/s.apache.org\/beam-fn-api-container-contract.\n\n\tid = flag.String(\"id\", \"\", \"Local identifier (required).\")\n\tloggingEndpoint = flag.String(\"logging_endpoint\", \"\", \"Logging endpoint (required).\")\n\tartifactEndpoint = flag.String(\"artifact_endpoint\", \"\", \"Artifact endpoint (required).\")\n\tprovisionEndpoint = flag.String(\"provision_endpoint\", \"\", \"Provision endpoint (required).\")\n\tcontrolEndpoint = flag.String(\"control_endpoint\", \"\", \"Control endpoint (required).\")\n\tsemiPersistDir = flag.String(\"semi_persist_dir\", \"\/tmp\", \"Local semi-persistent directory (optional).\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tif *id == \"\" {\n\t\tlog.Fatal(\"No id provided.\")\n\t}\n\tif *loggingEndpoint == \"\" {\n\t\tlog.Fatal(\"No logging endpoint provided.\")\n\t}\n\tif *artifactEndpoint == \"\" {\n\t\tlog.Fatal(\"No artifact endpoint provided.\")\n\t}\n\tif *provisionEndpoint == \"\" {\n\t\tlog.Fatal(\"No provision endpoint provided.\")\n\t}\n\tif *controlEndpoint == \"\" {\n\t\tlog.Fatal(\"No control endpoint provided.\")\n\t}\n\n\tlog.Printf(\"Initializing java harness: %v\", strings.Join(os.Args, \" \"))\n\n\tctx := grpcx.WriteWorkerID(context.Background(), *id)\n\n\t\/\/ (1) Obtain the pipeline options\n\n\tinfo, err := provision.Info(ctx, *provisionEndpoint)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to obtain provisioning information: %v\", err)\n\t}\n\toptions, err := provision.ProtoToJSON(info.GetPipelineOptions())\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to convert pipeline options: %v\", err)\n\t}\n\n\t\/\/ (2) Retrieve the staged user jars. We ignore any disk limit,\n\t\/\/ because the staged jars are mandatory.\n\n\tdir := filepath.Join(*semiPersistDir, \"staged\")\n\n\tartifacts, err := artifact.Materialize(ctx, *artifactEndpoint, dir)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to retrieve staged files: %v\", err)\n\t}\n\n\t\/\/ (3) Invoke the Java harness, preserving artifact ordering in classpath.\n\n\tos.Setenv(\"PIPELINE_OPTIONS\", options)\n\tos.Setenv(\"LOGGING_API_SERVICE_DESCRIPTOR\", fmt.Sprintf(\"url: \\\"%v\\\"\\n\", *loggingEndpoint))\n\tos.Setenv(\"CONTROL_API_SERVICE_DESCRIPTOR\", fmt.Sprintf(\"url: \\\"%v\\\"\\n\", *controlEndpoint))\n\n\tconst jarsDir = \"\/opt\/apache\/beam\/jars\"\n\tcp := []string{\n\t\tfilepath.Join(jarsDir, \"slf4j-api.jar\"),\n\t\tfilepath.Join(jarsDir, \"slf4j-jdk14.jar\"),\n\t\tfilepath.Join(jarsDir, \"beam-sdks-java-harness.jar\"),\n\t}\n\tfor _, md := range artifacts {\n\t\tcp = append(cp, filepath.Join(dir, filepath.FromSlash(md.Name)))\n\t}\n\n\targs := []string{\n\t\t\"-Xmx\" + strconv.FormatUint(heapSizeLimit(info), 10),\n\t\t\"-XX:-OmitStackTraceInFastThrow\",\n\t\t\"-cp\", strings.Join(cp, \":\"),\n\t\t\"org.apache.beam.fn.harness.FnHarness\",\n\t}\n\n\tlog.Printf(\"Executing: java %v\", strings.Join(args, \" \"))\n\n\tlog.Fatalf(\"Java exited: %v\", execx.Execute(\"java\", args...))\n}\n\n\/\/ heapSizeLimit returns 80% of the runner limit, if provided. If not provided,\n\/\/ it returns 70% of the physical memory on the machine. If it cannot determine\n\/\/ that value, it returns 1GB. This is an imperfect heuristic. It aims to\n\/\/ ensure there is memory for non-heap use and other overhead, while also not\n\/\/ underutilizing the machine.\nfunc heapSizeLimit(info *pb.ProvisionInfo) uint64 {\n\tif provided := info.GetResourceLimits().GetMemory().GetSize(); provided > 0 {\n\t\treturn (provided * 80) \/ 100\n\t}\n\tif size, err := syscallx.PhysicalMemorySize(); err == nil {\n\t\treturn (size * 70) \/ 100\n\t}\n\treturn 1 << 30\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage instancepoller\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\tgc \"launchpad.net\/gocheck\"\n\n\t\"launchpad.net\/juju-core\/environs\"\n\t\"launchpad.net\/juju-core\/errors\"\n\t\"launchpad.net\/juju-core\/instance\"\n\t\"launchpad.net\/juju-core\/testing\/testbase\"\n)\n\ntype aggregateSuite struct {\n\ttestbase.LoggingSuite\n}\n\nvar _ = gc.Suite(&aggregateSuite{})\n\ntype testInstance struct {\n\tinstance.Instance\n\taddresses []instance.Address\n\tstatus string\n\terr error\n}\n\nvar _ instance.Instance = (*testInstance)(nil)\n\nfunc (t *testInstance) Addresses() ([]instance.Address, error) {\n\tif t.err != nil {\n\t\treturn nil, t.err\n\t}\n\treturn t.addresses, nil\n}\n\nfunc (t *testInstance) Status() string {\n\treturn t.status\n}\n\ntype testInstanceGetter struct {\n\tids []instance.Id\n\tresults []*testInstance\n\terr error\n}\n\nfunc (i *testInstanceGetter) Instances(ids []instance.Id) (result []instance.Instance, err error) {\n\ti.ids = ids\n\terr = i.err\n\tfor _, inst := range i.results {\n\t\tif inst == nil {\n\t\t\tresult = append(result, nil)\n\t\t} else {\n\t\t\tresult = append(result, inst)\n\t\t}\n\t}\n\treturn\n}\n\nfunc newTestInstance(status string, addresses []string) *testInstance {\n\tthisInstance := testInstance{status: status}\n\tfor _, address := range addresses {\n\t\tthisInstance.addresses = append(thisInstance.addresses, instance.NewAddress(address))\n\t}\n\treturn &thisInstance\n}\n\nfunc (s *aggregateSuite) TestSingleRequest(c *gc.C) {\n\ttestGetter := new(testInstanceGetter)\n\tinstance1 := newTestInstance(\"foobar\", []string{\"127.0.0.1\", \"192.168.1.1\"})\n\ttestGetter.results = []*testInstance{instance1}\n\taggregator := newAggregator(testGetter)\n\n\tinfo, err := aggregator.instanceInfo(\"foo\")\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(info, gc.DeepEquals, instanceInfo{\n\t\tstatus: \"foobar\",\n\t\taddresses: instance1.addresses,\n\t})\n\tc.Assert(testGetter.ids, gc.DeepEquals, []instance.Id{\"foo\"})\n}\n\nfunc (s *aggregateSuite) TestRequestBatching(c *gc.C) {\n\ts.PatchValue(&gatherTime, 30*time.Millisecond)\n\ttestGetter := new(testInstanceGetter)\n\n\tinstance1 := newTestInstance(\"foobar\", []string{\"127.0.0.1\", \"192.168.1.1\"})\n\ttestGetter.results = []*testInstance{instance1}\n\taggregator := newAggregator(testGetter)\n\n\treplyChan := make(chan instanceInfoReply)\n\treq := instanceInfoReq{\n\t\treply: replyChan,\n\t\tinstId: instance.Id(\"foo\"),\n\t}\n\taggregator.reqc <- req\n\treply := <-replyChan\n\tc.Assert(reply.err, gc.IsNil)\n\n\tinstance2 := newTestInstance(\"not foobar\", []string{\"192.168.1.2\"})\n\tinstance3 := newTestInstance(\"ok-ish\", []string{\"192.168.1.3\"})\n\n\treplyChan2 := make(chan instanceInfoReply)\n\treplyChan3 := make(chan instanceInfoReply)\n\n\taggregator.reqc <- instanceInfoReq{reply: replyChan2, instId: instance.Id(\"foo2\")}\n\taggregator.reqc <- instanceInfoReq{reply: replyChan3, instId: instance.Id(\"foo3\")}\n\n\ttestGetter.results = []*testInstance{instance2, instance3}\n\treply2 := <-replyChan2\n\treply3 := <-replyChan3\n\tc.Assert(reply2.err, gc.IsNil)\n\tc.Assert(reply3.err, gc.IsNil)\n\tc.Assert(reply2.info.status, gc.Equals, \"not foobar\")\n\tc.Assert(reply3.info.status, gc.Equals, \"ok-ish\")\n\n\tc.Assert(testGetter.ids, gc.DeepEquals, []instance.Id{instance.Id(\"foo2\"), instance.Id(\"foo3\")})\n}\n\nfunc (s *aggregateSuite) TestError(c *gc.C) {\n\ttestGetter := new(testInstanceGetter)\n\tourError := fmt.Errorf(\"Some error\")\n\ttestGetter.err = ourError\n\n\taggregator := newAggregator(testGetter)\n\n\t_, err := aggregator.instanceInfo(\"foo\")\n\tc.Assert(err, gc.Equals, ourError)\n}\n\nfunc (s *aggregateSuite) TestPartialErrResponse(c *gc.C) {\n\ttestGetter := new(testInstanceGetter)\n\ttestGetter.err = environs.ErrPartialInstances\n\ttestGetter.results = []*testInstance{nil}\n\n\taggregator := newAggregator(testGetter)\n\t_, err := aggregator.instanceInfo(\"foo\")\n\n\tc.Assert(err, gc.DeepEquals, errors.NotFoundf(\"instance foo\"))\n}\n\nfunc (s *aggregateSuite) TestAddressesError(c *gc.C) {\n\ttestGetter := new(testInstanceGetter)\n\tinstance1 := newTestInstance(\"foobar\", []string{\"127.0.0.1\", \"192.168.1.1\"})\n\tourError := fmt.Errorf(\"gotcha\")\n\tinstance1.err = ourError\n\ttestGetter.results = []*testInstance{instance1}\n\n\taggregator := newAggregator(testGetter)\n\t_, err := aggregator.instanceInfo(\"foo\")\n\tc.Assert(err, gc.Equals, ourError)\n}\n\nfunc (s *aggregateSuite) TestKillAndWait(c *gc.C) {\n\ttestGetter := new(testInstanceGetter)\n\taggregator := newAggregator(testGetter)\n\taggregator.Kill()\n\terr := aggregator.Wait()\n\tc.Assert(err, gc.IsNil)\n}\n<commit_msg>Don't use channel directly in batch test<commit_after>\/\/ Copyright 2014 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage instancepoller\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\tgc \"launchpad.net\/gocheck\"\n\n\t\"launchpad.net\/juju-core\/environs\"\n\t\"launchpad.net\/juju-core\/errors\"\n\t\"launchpad.net\/juju-core\/instance\"\n\t\"launchpad.net\/juju-core\/testing\/testbase\"\n)\n\ntype aggregateSuite struct {\n\ttestbase.LoggingSuite\n}\n\nvar _ = gc.Suite(&aggregateSuite{})\n\ntype testInstance struct {\n\tinstance.Instance\n\taddresses []instance.Address\n\tstatus string\n\terr error\n}\n\nvar _ instance.Instance = (*testInstance)(nil)\n\nfunc (t *testInstance) Addresses() ([]instance.Address, error) {\n\tif t.err != nil {\n\t\treturn nil, t.err\n\t}\n\treturn t.addresses, nil\n}\n\nfunc (t *testInstance) Status() string {\n\treturn t.status\n}\n\ntype testInstanceGetter struct {\n\tids []instance.Id\n\tresults []*testInstance\n\terr error\n}\n\nfunc (i *testInstanceGetter) Instances(ids []instance.Id) (result []instance.Instance, err error) {\n\ti.ids = ids\n\terr = i.err\n\tfor _, inst := range i.results {\n\t\tif inst == nil {\n\t\t\tresult = append(result, nil)\n\t\t} else {\n\t\t\tresult = append(result, inst)\n\t\t}\n\t}\n\treturn\n}\n\nfunc newTestInstance(status string, addresses []string) *testInstance {\n\tthisInstance := testInstance{status: status}\n\tfor _, address := range addresses {\n\t\tthisInstance.addresses = append(thisInstance.addresses, instance.NewAddress(address))\n\t}\n\treturn &thisInstance\n}\n\nfunc (s *aggregateSuite) TestSingleRequest(c *gc.C) {\n\ttestGetter := new(testInstanceGetter)\n\tinstance1 := newTestInstance(\"foobar\", []string{\"127.0.0.1\", \"192.168.1.1\"})\n\ttestGetter.results = []*testInstance{instance1}\n\taggregator := newAggregator(testGetter)\n\n\tinfo, err := aggregator.instanceInfo(\"foo\")\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(info, gc.DeepEquals, instanceInfo{\n\t\tstatus: \"foobar\",\n\t\taddresses: instance1.addresses,\n\t})\n\tc.Assert(testGetter.ids, gc.DeepEquals, []instance.Id{\"foo\"})\n}\n\nfunc (s *aggregateSuite) TestRequestBatching(c *gc.C) {\n\ts.PatchValue(&gatherTime, 30*time.Millisecond)\n\ttestGetter := new(testInstanceGetter)\n\n\tinstance1 := newTestInstance(\"foobar\", []string{\"127.0.0.1\", \"192.168.1.1\"})\n\ttestGetter.results = []*testInstance{instance1}\n\taggregator := newAggregator(testGetter)\n\n\treplyChan := make(chan instanceInfoReply)\n\treq := instanceInfoReq{\n\t\treply: replyChan,\n\t\tinstId: instance.Id(\"foo\"),\n\t}\n\taggregator.reqc <- req\n\treply := <-replyChan\n\tc.Assert(reply.err, gc.IsNil)\n\n\tinstance2 := newTestInstance(\"not foobar\", []string{\"192.168.1.2\"})\n\tinstance3 := newTestInstance(\"ok-ish\", []string{\"192.168.1.3\"})\n\ttestGetter.results = []*testInstance{instance2, instance3}\n\n\tvar wg sync.WaitGroup\n\tcheckInfo := func(id instance.Id, expectStatus string) {\n\t\tinfo, err := aggregator.instanceInfo(id)\n\t\tc.Check(err, gc.IsNil)\n\t\tc.Check(info.status, gc.Equals, expectStatus)\n\t\twg.Done()\n\t}\n\n\twg.Add(2)\n\tgo checkInfo(\"foo2\", \"not foobar\")\n\tgo checkInfo(\"foo3\", \"ok-ish\")\n\twg.Wait()\n\n\tc.Assert(len(testGetter.ids), gc.DeepEquals, 2)\n}\n\nfunc (s *aggregateSuite) TestError(c *gc.C) {\n\ttestGetter := new(testInstanceGetter)\n\tourError := fmt.Errorf(\"Some error\")\n\ttestGetter.err = ourError\n\n\taggregator := newAggregator(testGetter)\n\n\t_, err := aggregator.instanceInfo(\"foo\")\n\tc.Assert(err, gc.Equals, ourError)\n}\n\nfunc (s *aggregateSuite) TestPartialErrResponse(c *gc.C) {\n\ttestGetter := new(testInstanceGetter)\n\ttestGetter.err = environs.ErrPartialInstances\n\ttestGetter.results = []*testInstance{nil}\n\n\taggregator := newAggregator(testGetter)\n\t_, err := aggregator.instanceInfo(\"foo\")\n\n\tc.Assert(err, gc.DeepEquals, errors.NotFoundf(\"instance foo\"))\n}\n\nfunc (s *aggregateSuite) TestAddressesError(c *gc.C) {\n\ttestGetter := new(testInstanceGetter)\n\tinstance1 := newTestInstance(\"foobar\", []string{\"127.0.0.1\", \"192.168.1.1\"})\n\tourError := fmt.Errorf(\"gotcha\")\n\tinstance1.err = ourError\n\ttestGetter.results = []*testInstance{instance1}\n\n\taggregator := newAggregator(testGetter)\n\t_, err := aggregator.instanceInfo(\"foo\")\n\tc.Assert(err, gc.Equals, ourError)\n}\n\nfunc (s *aggregateSuite) TestKillAndWait(c *gc.C) {\n\ttestGetter := new(testInstanceGetter)\n\taggregator := newAggregator(testGetter)\n\taggregator.Kill()\n\terr := aggregator.Wait()\n\tc.Assert(err, gc.IsNil)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package reference is a fork of the upstream docker\/docker\/reference package.\n\/\/ The package is forked because we need consistency especially when storing and\n\/\/ checking signatures (RH patches break this consistency because they modify\n\/\/ docker\/docker\/reference as part of a patch carried in projectatomic\/docker).\n\/\/ The version of this package is v1.12.1 from upstream, update as necessary.\npackage reference\n<commit_msg>Finally, get rid of the last remains of c\/i\/docker\/reference.<commit_after><|endoftext|>"} {"text":"<commit_before>package driver\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/nomad\/client\/allocdir\"\n\t\"github.com\/hashicorp\/nomad\/client\/config\"\n\t\"github.com\/hashicorp\/nomad\/client\/driver\/env\"\n\t\"github.com\/hashicorp\/nomad\/client\/fingerprint\"\n\t\"github.com\/hashicorp\/nomad\/nomad\/structs\"\n\n\tdstructs \"github.com\/hashicorp\/nomad\/client\/driver\/structs\"\n\tcstructs \"github.com\/hashicorp\/nomad\/client\/structs\"\n)\n\nvar (\n\t\/\/ BuiltinDrivers contains the built in registered drivers\n\t\/\/ which are available for allocation handling\n\tBuiltinDrivers = map[string]Factory{\n\t\t\"docker\": NewDockerDriver,\n\t\t\"exec\": NewExecDriver,\n\t\t\"raw_exec\": NewRawExecDriver,\n\t\t\"java\": NewJavaDriver,\n\t\t\"qemu\": NewQemuDriver,\n\t\t\"rkt\": NewRktDriver,\n\t}\n\n\t\/\/ DriverStatsNotImplemented is the error to be returned if a driver doesn't\n\t\/\/ implement stats.\n\tDriverStatsNotImplemented = errors.New(\"stats not implemented for driver\")\n)\n\n\/\/ NewDriver is used to instantiate and return a new driver\n\/\/ given the name and a logger\nfunc NewDriver(name string, ctx *DriverContext) (Driver, error) {\n\t\/\/ Lookup the factory function\n\tfactory, ok := BuiltinDrivers[name]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unknown driver '%s'\", name)\n\t}\n\n\t\/\/ Instantiate the driver\n\tf := factory(ctx)\n\treturn f, nil\n}\n\n\/\/ Factory is used to instantiate a new Driver\ntype Factory func(*DriverContext) Driver\n\n\/\/ CreatedResources is a map of resources (eg downloaded images) created by a driver\n\/\/ that must be cleaned up.\ntype CreatedResources struct {\n\tResources map[string][]string\n}\n\nfunc NewCreatedResources() *CreatedResources {\n\treturn &CreatedResources{Resources: make(map[string][]string)}\n}\n\n\/\/ Add a new resource if it doesn't already exist.\nfunc (r *CreatedResources) Add(k, v string) {\n\tif r.Resources == nil {\n\t\tr.Resources = map[string][]string{k: []string{v}}\n\t\treturn\n\t}\n\texisting, ok := r.Resources[k]\n\tif !ok {\n\t\t\/\/ Key doesn't exist, create it\n\t\tr.Resources[k] = []string{v}\n\t\treturn\n\t}\n\tfor _, item := range existing {\n\t\tif item == v {\n\t\t\t\/\/ resource exists, return\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Resource type exists but value did not, append it\n\tr.Resources[k] = append(existing, v)\n\treturn\n}\n\n\/\/ Remove a resource. Return true if removed, otherwise false.\n\/\/\n\/\/ Removes the entire key if the needle is the last value in the list.\nfunc (r *CreatedResources) Remove(k, needle string) bool {\n\thaystack := r.Resources[k]\n\tfor i, item := range haystack {\n\t\tif item == needle {\n\t\t\tr.Resources[k] = append(haystack[:i], haystack[i+1:]...)\n\t\t\tif len(r.Resources[k]) == 0 {\n\t\t\t\tdelete(r.Resources, k)\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Copy returns a new deep copy of CreatedResrouces.\nfunc (r *CreatedResources) Copy() *CreatedResources {\n\tnewr := CreatedResources{\n\t\tResources: make(map[string][]string, len(r.Resources)),\n\t}\n\tfor k, v := range r.Resources {\n\t\tnewv := make([]string, len(v))\n\t\tcopy(newv, v)\n\t\tnewr.Resources[k] = newv\n\t}\n\treturn &newr\n}\n\n\/\/ Merge another CreatedResources into this one. If the other CreatedResources\n\/\/ is nil this method is a noop.\nfunc (r *CreatedResources) Merge(o *CreatedResources) {\n\tif o == nil {\n\t\treturn\n\t}\n\n\tfor k, v := range o.Resources {\n\t\t\/\/ New key\n\t\tif len(r.Resources[k]) == 0 {\n\t\t\tr.Resources[k] = v\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Existing key\n\tOUTER:\n\t\tfor _, item := range v {\n\t\t\tfor _, existing := range r.Resources[k] {\n\t\t\t\tif item == existing {\n\t\t\t\t\t\/\/ Found it, move on\n\t\t\t\t\tcontinue OUTER\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ New item, append it\n\t\t\tr.Resources[k] = append(r.Resources[k], item)\n\t\t}\n\t}\n}\n\n\/\/ Driver is used for execution of tasks. This allows Nomad\n\/\/ to support many pluggable implementations of task drivers.\n\/\/ Examples could include LXC, Docker, Qemu, etc.\ntype Driver interface {\n\t\/\/ Drivers must support the fingerprint interface for detection\n\tfingerprint.Fingerprint\n\n\t\/\/ Prestart prepares the task environment and performs expensive\n\t\/\/ intialization steps like downloading images.\n\t\/\/\n\t\/\/ CreatedResources may be non-nil even when an error occurs.\n\tPrestart(*ExecContext, *structs.Task) (*CreatedResources, error)\n\n\t\/\/ Start is used to being task execution\n\tStart(ctx *ExecContext, task *structs.Task) (DriverHandle, error)\n\n\t\/\/ Open is used to re-open a handle to a task\n\tOpen(ctx *ExecContext, handleID string) (DriverHandle, error)\n\n\t\/\/ Cleanup is called to remove resources which were created for a task\n\t\/\/ and no longer needed.\n\t\/\/\n\t\/\/ If Cleanup returns a recoverable error it may be retried. On retry\n\t\/\/ it will be passed the same CreatedResources, so all successfully\n\t\/\/ cleaned up resources should be removed.\n\tCleanup(*ExecContext, *CreatedResources) error\n\n\t\/\/ Drivers must validate their configuration\n\tValidate(map[string]interface{}) error\n\n\t\/\/ Abilities returns the abilities of the driver\n\tAbilities() DriverAbilities\n\n\t\/\/ FSIsolation returns the method of filesystem isolation used\n\tFSIsolation() cstructs.FSIsolation\n}\n\n\/\/ DriverAbilities marks the abilities the driver has.\ntype DriverAbilities struct {\n\t\/\/ SendSignals marks the driver as being able to send signals\n\tSendSignals bool\n}\n\n\/\/ LogEventFn is a callback which allows Drivers to emit task events.\ntype LogEventFn func(message string, args ...interface{})\n\n\/\/ DriverContext is a means to inject dependencies such as loggers, configs, and\n\/\/ node attributes into a Driver without having to change the Driver interface\n\/\/ each time we do it. Used in conjection with Factory, above.\ntype DriverContext struct {\n\ttaskName string\n\tconfig *config.Config\n\tlogger *log.Logger\n\tnode *structs.Node\n\ttaskEnv *env.TaskEnvironment\n\n\temitEvent LogEventFn\n}\n\n\/\/ NewEmptyDriverContext returns a DriverContext with all fields set to their\n\/\/ zero value.\nfunc NewEmptyDriverContext() *DriverContext {\n\treturn &DriverContext{}\n}\n\n\/\/ NewDriverContext initializes a new DriverContext with the specified fields.\n\/\/ This enables other packages to create DriverContexts but keeps the fields\n\/\/ private to the driver. If we want to change this later we can gorename all of\n\/\/ the fields in DriverContext.\nfunc NewDriverContext(taskName string, config *config.Config, node *structs.Node,\n\tlogger *log.Logger, taskEnv *env.TaskEnvironment, eventEmitter LogEventFn) *DriverContext {\n\treturn &DriverContext{\n\t\ttaskName: taskName,\n\t\tconfig: config,\n\t\tnode: node,\n\t\tlogger: logger,\n\t\ttaskEnv: taskEnv,\n\t\temitEvent: eventEmitter,\n\t}\n}\n\n\/\/ DriverHandle is an opaque handle into a driver used for task\n\/\/ manipulation\ntype DriverHandle interface {\n\t\/\/ Returns an opaque handle that can be used to re-open the handle\n\tID() string\n\n\t\/\/ WaitCh is used to return a channel used wait for task completion\n\tWaitCh() chan *dstructs.WaitResult\n\n\t\/\/ Update is used to update the task if possible and update task related\n\t\/\/ configurations.\n\tUpdate(task *structs.Task) error\n\n\t\/\/ Kill is used to stop the task\n\tKill() error\n\n\t\/\/ Stats returns aggregated stats of the driver\n\tStats() (*cstructs.TaskResourceUsage, error)\n\n\t\/\/ Signal is used to send a signal to the task\n\tSignal(s os.Signal) error\n}\n\n\/\/ ExecContext is a task's execution context\ntype ExecContext struct {\n\t\/\/ TaskDir contains information about the task directory structure.\n\tTaskDir *allocdir.TaskDir\n\n\t\/\/ Alloc ID\n\tAllocID string\n}\n\n\/\/ NewExecContext is used to create a new execution context\nfunc NewExecContext(td *allocdir.TaskDir, allocID string) *ExecContext {\n\treturn &ExecContext{\n\t\tTaskDir: td,\n\t\tAllocID: allocID,\n\t}\n}\n\n\/\/ GetTaskEnv converts the alloc dir, the node, task and alloc into a\n\/\/ TaskEnvironment.\nfunc GetTaskEnv(taskDir *allocdir.TaskDir, node *structs.Node,\n\ttask *structs.Task, alloc *structs.Allocation, conf *config.Config,\n\tvaultToken string) (*env.TaskEnvironment, error) {\n\n\tenv := env.NewTaskEnvironment(node).\n\t\tSetTaskMeta(alloc.Job.CombinedTaskMeta(alloc.TaskGroup, task.Name)).\n\t\tSetJobName(alloc.Job.Name).\n\t\tSetEnvvars(task.Env).\n\t\tSetTaskName(task.Name)\n\n\t\/\/ Vary paths by filesystem isolation used\n\tdrv, err := NewDriver(task.Driver, NewEmptyDriverContext())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tswitch drv.FSIsolation() {\n\tcase cstructs.FSIsolationNone:\n\t\t\/\/ Use host paths\n\t\tenv.SetAllocDir(taskDir.SharedAllocDir)\n\t\tenv.SetTaskLocalDir(taskDir.LocalDir)\n\t\tenv.SetSecretsDir(taskDir.SecretsDir)\n\tdefault:\n\t\t\/\/ filesystem isolation; use container paths\n\t\tenv.SetAllocDir(allocdir.SharedAllocContainerPath)\n\t\tenv.SetTaskLocalDir(allocdir.TaskLocalContainerPath)\n\t\tenv.SetSecretsDir(allocdir.TaskSecretsContainerPath)\n\t}\n\n\tif task.Resources != nil {\n\t\tenv.SetMemLimit(task.Resources.MemoryMB).\n\t\t\tSetCpuLimit(task.Resources.CPU).\n\t\t\tSetNetworks(task.Resources.Networks)\n\t}\n\n\tif alloc != nil {\n\t\tenv.SetAlloc(alloc)\n\t}\n\n\tif task.Vault != nil {\n\t\tenv.SetVaultToken(vaultToken, task.Vault.Env)\n\t}\n\n\t\/\/ Set the host environment variables for non-image based drivers\n\tif drv.FSIsolation() != cstructs.FSIsolationImage {\n\t\tfilter := strings.Split(conf.ReadDefault(\"env.blacklist\", config.DefaultEnvBlacklist), \",\")\n\t\tenv.AppendHostEnvvars(filter)\n\t}\n\n\treturn env.Build(), nil\n}\n\nfunc mapMergeStrInt(maps ...map[string]int) map[string]int {\n\tout := map[string]int{}\n\tfor _, in := range maps {\n\t\tfor key, val := range in {\n\t\t\tout[key] = val\n\t\t}\n\t}\n\treturn out\n}\n\nfunc mapMergeStrStr(maps ...map[string]string) map[string]string {\n\tout := map[string]string{}\n\tfor _, in := range maps {\n\t\tfor key, val := range in {\n\t\t\tout[key] = val\n\t\t}\n\t}\n\treturn out\n}\n<commit_msg>Add nil guard<commit_after>package driver\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/nomad\/client\/allocdir\"\n\t\"github.com\/hashicorp\/nomad\/client\/config\"\n\t\"github.com\/hashicorp\/nomad\/client\/driver\/env\"\n\t\"github.com\/hashicorp\/nomad\/client\/fingerprint\"\n\t\"github.com\/hashicorp\/nomad\/nomad\/structs\"\n\n\tdstructs \"github.com\/hashicorp\/nomad\/client\/driver\/structs\"\n\tcstructs \"github.com\/hashicorp\/nomad\/client\/structs\"\n)\n\nvar (\n\t\/\/ BuiltinDrivers contains the built in registered drivers\n\t\/\/ which are available for allocation handling\n\tBuiltinDrivers = map[string]Factory{\n\t\t\"docker\": NewDockerDriver,\n\t\t\"exec\": NewExecDriver,\n\t\t\"raw_exec\": NewRawExecDriver,\n\t\t\"java\": NewJavaDriver,\n\t\t\"qemu\": NewQemuDriver,\n\t\t\"rkt\": NewRktDriver,\n\t}\n\n\t\/\/ DriverStatsNotImplemented is the error to be returned if a driver doesn't\n\t\/\/ implement stats.\n\tDriverStatsNotImplemented = errors.New(\"stats not implemented for driver\")\n)\n\n\/\/ NewDriver is used to instantiate and return a new driver\n\/\/ given the name and a logger\nfunc NewDriver(name string, ctx *DriverContext) (Driver, error) {\n\t\/\/ Lookup the factory function\n\tfactory, ok := BuiltinDrivers[name]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unknown driver '%s'\", name)\n\t}\n\n\t\/\/ Instantiate the driver\n\tf := factory(ctx)\n\treturn f, nil\n}\n\n\/\/ Factory is used to instantiate a new Driver\ntype Factory func(*DriverContext) Driver\n\n\/\/ CreatedResources is a map of resources (eg downloaded images) created by a driver\n\/\/ that must be cleaned up.\ntype CreatedResources struct {\n\tResources map[string][]string\n}\n\nfunc NewCreatedResources() *CreatedResources {\n\treturn &CreatedResources{Resources: make(map[string][]string)}\n}\n\n\/\/ Add a new resource if it doesn't already exist.\nfunc (r *CreatedResources) Add(k, v string) {\n\tif r.Resources == nil {\n\t\tr.Resources = map[string][]string{k: []string{v}}\n\t\treturn\n\t}\n\texisting, ok := r.Resources[k]\n\tif !ok {\n\t\t\/\/ Key doesn't exist, create it\n\t\tr.Resources[k] = []string{v}\n\t\treturn\n\t}\n\tfor _, item := range existing {\n\t\tif item == v {\n\t\t\t\/\/ resource exists, return\n\t\t\treturn\n\t\t}\n\t}\n\n\t\/\/ Resource type exists but value did not, append it\n\tr.Resources[k] = append(existing, v)\n\treturn\n}\n\n\/\/ Remove a resource. Return true if removed, otherwise false.\n\/\/\n\/\/ Removes the entire key if the needle is the last value in the list.\nfunc (r *CreatedResources) Remove(k, needle string) bool {\n\thaystack := r.Resources[k]\n\tfor i, item := range haystack {\n\t\tif item == needle {\n\t\t\tr.Resources[k] = append(haystack[:i], haystack[i+1:]...)\n\t\t\tif len(r.Resources[k]) == 0 {\n\t\t\t\tdelete(r.Resources, k)\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Copy returns a new deep copy of CreatedResrouces.\nfunc (r *CreatedResources) Copy() *CreatedResources {\n\tif r == nil {\n\t\treturn nil\n\t}\n\n\tnewr := CreatedResources{\n\t\tResources: make(map[string][]string, len(r.Resources)),\n\t}\n\tfor k, v := range r.Resources {\n\t\tnewv := make([]string, len(v))\n\t\tcopy(newv, v)\n\t\tnewr.Resources[k] = newv\n\t}\n\treturn &newr\n}\n\n\/\/ Merge another CreatedResources into this one. If the other CreatedResources\n\/\/ is nil this method is a noop.\nfunc (r *CreatedResources) Merge(o *CreatedResources) {\n\tif o == nil {\n\t\treturn\n\t}\n\n\tfor k, v := range o.Resources {\n\t\t\/\/ New key\n\t\tif len(r.Resources[k]) == 0 {\n\t\t\tr.Resources[k] = v\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Existing key\n\tOUTER:\n\t\tfor _, item := range v {\n\t\t\tfor _, existing := range r.Resources[k] {\n\t\t\t\tif item == existing {\n\t\t\t\t\t\/\/ Found it, move on\n\t\t\t\t\tcontinue OUTER\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ New item, append it\n\t\t\tr.Resources[k] = append(r.Resources[k], item)\n\t\t}\n\t}\n}\n\n\/\/ Driver is used for execution of tasks. This allows Nomad\n\/\/ to support many pluggable implementations of task drivers.\n\/\/ Examples could include LXC, Docker, Qemu, etc.\ntype Driver interface {\n\t\/\/ Drivers must support the fingerprint interface for detection\n\tfingerprint.Fingerprint\n\n\t\/\/ Prestart prepares the task environment and performs expensive\n\t\/\/ intialization steps like downloading images.\n\t\/\/\n\t\/\/ CreatedResources may be non-nil even when an error occurs.\n\tPrestart(*ExecContext, *structs.Task) (*CreatedResources, error)\n\n\t\/\/ Start is used to being task execution\n\tStart(ctx *ExecContext, task *structs.Task) (DriverHandle, error)\n\n\t\/\/ Open is used to re-open a handle to a task\n\tOpen(ctx *ExecContext, handleID string) (DriverHandle, error)\n\n\t\/\/ Cleanup is called to remove resources which were created for a task\n\t\/\/ and no longer needed.\n\t\/\/\n\t\/\/ If Cleanup returns a recoverable error it may be retried. On retry\n\t\/\/ it will be passed the same CreatedResources, so all successfully\n\t\/\/ cleaned up resources should be removed.\n\tCleanup(*ExecContext, *CreatedResources) error\n\n\t\/\/ Drivers must validate their configuration\n\tValidate(map[string]interface{}) error\n\n\t\/\/ Abilities returns the abilities of the driver\n\tAbilities() DriverAbilities\n\n\t\/\/ FSIsolation returns the method of filesystem isolation used\n\tFSIsolation() cstructs.FSIsolation\n}\n\n\/\/ DriverAbilities marks the abilities the driver has.\ntype DriverAbilities struct {\n\t\/\/ SendSignals marks the driver as being able to send signals\n\tSendSignals bool\n}\n\n\/\/ LogEventFn is a callback which allows Drivers to emit task events.\ntype LogEventFn func(message string, args ...interface{})\n\n\/\/ DriverContext is a means to inject dependencies such as loggers, configs, and\n\/\/ node attributes into a Driver without having to change the Driver interface\n\/\/ each time we do it. Used in conjection with Factory, above.\ntype DriverContext struct {\n\ttaskName string\n\tconfig *config.Config\n\tlogger *log.Logger\n\tnode *structs.Node\n\ttaskEnv *env.TaskEnvironment\n\n\temitEvent LogEventFn\n}\n\n\/\/ NewEmptyDriverContext returns a DriverContext with all fields set to their\n\/\/ zero value.\nfunc NewEmptyDriverContext() *DriverContext {\n\treturn &DriverContext{}\n}\n\n\/\/ NewDriverContext initializes a new DriverContext with the specified fields.\n\/\/ This enables other packages to create DriverContexts but keeps the fields\n\/\/ private to the driver. If we want to change this later we can gorename all of\n\/\/ the fields in DriverContext.\nfunc NewDriverContext(taskName string, config *config.Config, node *structs.Node,\n\tlogger *log.Logger, taskEnv *env.TaskEnvironment, eventEmitter LogEventFn) *DriverContext {\n\treturn &DriverContext{\n\t\ttaskName: taskName,\n\t\tconfig: config,\n\t\tnode: node,\n\t\tlogger: logger,\n\t\ttaskEnv: taskEnv,\n\t\temitEvent: eventEmitter,\n\t}\n}\n\n\/\/ DriverHandle is an opaque handle into a driver used for task\n\/\/ manipulation\ntype DriverHandle interface {\n\t\/\/ Returns an opaque handle that can be used to re-open the handle\n\tID() string\n\n\t\/\/ WaitCh is used to return a channel used wait for task completion\n\tWaitCh() chan *dstructs.WaitResult\n\n\t\/\/ Update is used to update the task if possible and update task related\n\t\/\/ configurations.\n\tUpdate(task *structs.Task) error\n\n\t\/\/ Kill is used to stop the task\n\tKill() error\n\n\t\/\/ Stats returns aggregated stats of the driver\n\tStats() (*cstructs.TaskResourceUsage, error)\n\n\t\/\/ Signal is used to send a signal to the task\n\tSignal(s os.Signal) error\n}\n\n\/\/ ExecContext is a task's execution context\ntype ExecContext struct {\n\t\/\/ TaskDir contains information about the task directory structure.\n\tTaskDir *allocdir.TaskDir\n\n\t\/\/ Alloc ID\n\tAllocID string\n}\n\n\/\/ NewExecContext is used to create a new execution context\nfunc NewExecContext(td *allocdir.TaskDir, allocID string) *ExecContext {\n\treturn &ExecContext{\n\t\tTaskDir: td,\n\t\tAllocID: allocID,\n\t}\n}\n\n\/\/ GetTaskEnv converts the alloc dir, the node, task and alloc into a\n\/\/ TaskEnvironment.\nfunc GetTaskEnv(taskDir *allocdir.TaskDir, node *structs.Node,\n\ttask *structs.Task, alloc *structs.Allocation, conf *config.Config,\n\tvaultToken string) (*env.TaskEnvironment, error) {\n\n\tenv := env.NewTaskEnvironment(node).\n\t\tSetTaskMeta(alloc.Job.CombinedTaskMeta(alloc.TaskGroup, task.Name)).\n\t\tSetJobName(alloc.Job.Name).\n\t\tSetEnvvars(task.Env).\n\t\tSetTaskName(task.Name)\n\n\t\/\/ Vary paths by filesystem isolation used\n\tdrv, err := NewDriver(task.Driver, NewEmptyDriverContext())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tswitch drv.FSIsolation() {\n\tcase cstructs.FSIsolationNone:\n\t\t\/\/ Use host paths\n\t\tenv.SetAllocDir(taskDir.SharedAllocDir)\n\t\tenv.SetTaskLocalDir(taskDir.LocalDir)\n\t\tenv.SetSecretsDir(taskDir.SecretsDir)\n\tdefault:\n\t\t\/\/ filesystem isolation; use container paths\n\t\tenv.SetAllocDir(allocdir.SharedAllocContainerPath)\n\t\tenv.SetTaskLocalDir(allocdir.TaskLocalContainerPath)\n\t\tenv.SetSecretsDir(allocdir.TaskSecretsContainerPath)\n\t}\n\n\tif task.Resources != nil {\n\t\tenv.SetMemLimit(task.Resources.MemoryMB).\n\t\t\tSetCpuLimit(task.Resources.CPU).\n\t\t\tSetNetworks(task.Resources.Networks)\n\t}\n\n\tif alloc != nil {\n\t\tenv.SetAlloc(alloc)\n\t}\n\n\tif task.Vault != nil {\n\t\tenv.SetVaultToken(vaultToken, task.Vault.Env)\n\t}\n\n\t\/\/ Set the host environment variables for non-image based drivers\n\tif drv.FSIsolation() != cstructs.FSIsolationImage {\n\t\tfilter := strings.Split(conf.ReadDefault(\"env.blacklist\", config.DefaultEnvBlacklist), \",\")\n\t\tenv.AppendHostEnvvars(filter)\n\t}\n\n\treturn env.Build(), nil\n}\n\nfunc mapMergeStrInt(maps ...map[string]int) map[string]int {\n\tout := map[string]int{}\n\tfor _, in := range maps {\n\t\tfor key, val := range in {\n\t\t\tout[key] = val\n\t\t}\n\t}\n\treturn out\n}\n\nfunc mapMergeStrStr(maps ...map[string]string) map[string]string {\n\tout := map[string]string{}\n\tfor _, in := range maps {\n\t\tfor key, val := range in {\n\t\t\tout[key] = val\n\t\t}\n\t}\n\treturn out\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright 2014 GoPivotal (UK) Limited.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage rootfs_test\n\nimport (\n\t\"code.google.com\/p\/gomock\/gomock\"\n\t\"fmt\"\n\t\"github.com\/cf-guardian\/guardian\/gerror\"\n\t\"github.com\/cf-guardian\/guardian\/kernel\/fileutils\"\n\t\"github.com\/cf-guardian\/guardian\/kernel\/fileutils\/mock_fileutils\"\n\t\"github.com\/cf-guardian\/guardian\/kernel\/rootfs\"\n\t\"github.com\/cf-guardian\/guardian\/kernel\/syscall\/mock_syscall\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestNonExistentReadWriteBaseDir(t *testing.T) {\n\tmockCtrl, mockFileUtils, mockSyscallFS := setupMocks(t)\n\tdefer mockCtrl.Finish()\n\n\tmockFileUtils.EXPECT().Filemode(\"\/nosuch\").Return(os.FileMode(0), gerror.New(fileutils.ErrFileNotFound, \"test error\"))\n\n\trfs, gerr := rootfs.NewRootFS(mockSyscallFS, mockFileUtils, \"\/nosuch\")\n\tif rfs != nil || !gerr.EqualTag(rootfs.ErrRwBaseDirMissing) {\n\t\tt.Errorf(\"Incorrect return values (%s, %s)\", rfs, gerr)\n\t\treturn\n\t}\n}\n\nfunc TestNonDirReadWriteBaseDir(t *testing.T) {\n\tmockCtrl, mockFileUtils, mockSyscallFS := setupMocks(t)\n\tdefer mockCtrl.Finish()\n\n\ttempDir := createTempDir()\n\tfilePath := createFile(tempDir, \"testFile\")\n\tmockFileUtils.EXPECT().Filemode(filePath).Return(os.FileMode(0700), nil)\n\n\trfs, gerr := rootfs.NewRootFS(mockSyscallFS, mockFileUtils, filePath)\n\tif rfs != nil || !gerr.EqualTag(rootfs.ErrRwBaseDirIsFile) {\n\t\tt.Errorf(\"Incorrect return values (%s, %s)\", rfs, gerr)\n\t\treturn\n\t}\n}\n\nfunc TestReadOnlyReadWriteBaseDir(t *testing.T) {\n\tmockCtrl, mockFileUtils, mockSyscallFS := setupMocks(t)\n\tdefer mockCtrl.Finish()\n\n\ttempDir := createTempDir()\n\tdirPath := createDirWithMode(tempDir, \"test-rootfs\", os.FileMode(0400))\n\tmockFileUtils.EXPECT().Filemode(dirPath).Return(os.ModeDir|os.FileMode(0100), nil)\n\n\trfs, gerr := rootfs.NewRootFS(mockSyscallFS, mockFileUtils, dirPath)\n\tif rfs != nil || !gerr.EqualTag(rootfs.ErrRwBaseDirNotRw) {\n\t\tt.Errorf(\"Incorrect return values (%s, %s)\", rfs, gerr)\n\t\treturn\n\t}\n}\n\nfunc TestGenerate(t *testing.T) {\n\tmockCtrl, mockFileUtils, mockSyscallFS := setupMocks(t)\n\tdefer mockCtrl.Finish()\n\n\ttempDir := createTempDir()\n\tmockFileUtils.EXPECT().Filemode(tempDir).Return(os.ModeDir|os.FileMode(0700), nil)\n\trfs, gerr := rootfs.NewRootFS(mockSyscallFS, mockFileUtils, tempDir)\n\tif gerr != nil {\n\t\tt.Errorf(\"%s\", gerr)\n\t\treturn\n\t}\n\tprototypeDir := filepath.Join(tempDir, \"test-prototype\")\n\n\tmockSyscallFS.EXPECT().BindMountReadOnly(prototypeDir, &stringPrefixMatcher{filepath.Join(tempDir, \"mnt\")})\n\n\tdirs := []string{`proc`, `dev`, `etc`, `home`, `sbin`, `var`, `tmp`}\n\tfor _, dir := range dirs {\n\t\tmockSyscallFS.EXPECT().BindMountReadWrite(&stringRegexMatcher{filepath.Join(tempDir, \"tmp-rootfs-.*\", dir)}, &stringRegexMatcher{filepath.Join(tempDir, \"mnt-.*\", dir)})\n\t}\n\n\troot, gerr := rfs.Generate(prototypeDir)\n\tif gerr != nil {\n\t\tt.Errorf(\"%s\", gerr)\n\t\treturn\n\t}\n\n\t_ = root\n}\n\ntype stringPrefixMatcher struct {\n\tprefix string\n}\n\nfunc (m *stringPrefixMatcher) Matches(x interface{}) bool {\n\tif x, ok := x.(string); ok {\n\t\treturn strings.HasPrefix(x, m.prefix)\n\t} else {\n\t\treturn false\n\t}\n}\n\nfunc (m *stringPrefixMatcher) String() string {\n\treturn fmt.Sprintf(\"is a string with prefix %s\", m.prefix)\n}\n\ntype stringRegexMatcher struct {\n\tregex string\n}\n\nfunc (m *stringRegexMatcher) Matches(x interface{}) bool {\n\tif x, ok := x.(string); ok {\n\t\tif matched, err := regexp.MatchString(m.regex, x); err == nil {\n\t\t\treturn matched\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\treturn false\n\t}\n}\n\nfunc (m *stringRegexMatcher) String() string {\n\treturn fmt.Sprintf(\"is a string which matches regular expression %s\", m.regex)\n}\n\nfunc createTempDir() string {\n\ttempDir, err := ioutil.TempDir(\"\/tmp\", \"guardian-test-\")\n\tcheck(err)\n\treturn tempDir\n}\n\nfunc setupMocks(t *testing.T) (*gomock.Controller, *mock_fileutils.MockFileutils, *mock_syscall.MockSyscall_FS) {\n\tmockCtrl := gomock.NewController(t)\n\tmockFileUtils := mock_fileutils.NewMockFileutils(mockCtrl)\n\tmockSyscallFS := mock_syscall.NewMockSyscall_FS(mockCtrl)\n\treturn mockCtrl, mockFileUtils, mockSyscallFS\n}\n\n\/\/ TODO: Remove duplication with fileutils_test.\nfunc createFile(td string, fileName string) string {\n\treturn createFileWithMode(td, fileName, os.FileMode(0666))\n}\n\nfunc createFileWithMode(td string, fileName string, mode os.FileMode) string {\n\tfp := filepath.Join(td, fileName)\n\tf, err := os.OpenFile(fp, os.O_CREATE|os.O_EXCL|os.O_WRONLY, mode)\n\tcheck(err)\n\t_, err = f.WriteString(\"test contents\")\n\tcheck(err)\n\tcheck(f.Close())\n\treturn fp\n}\n\nfunc createDir(td string, dirName string) string {\n\treturn createDirWithMode(td, dirName, os.FileMode(0777))\n}\n\nfunc createDirWithMode(td string, dirName string, mode os.FileMode) string {\n\tfp := filepath.Join(td, dirName)\n\terr := os.Mkdir(fp, mode)\n\tcheck(err)\n\treturn fp\n}\n\nfunc check(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n<commit_msg>Check return value of Generate<commit_after>\/*\n Copyright 2014 GoPivotal (UK) Limited.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage rootfs_test\n\nimport (\n\t\"code.google.com\/p\/gomock\/gomock\"\n\t\"fmt\"\n\t\"github.com\/cf-guardian\/guardian\/gerror\"\n\t\"github.com\/cf-guardian\/guardian\/kernel\/fileutils\"\n\t\"github.com\/cf-guardian\/guardian\/kernel\/fileutils\/mock_fileutils\"\n\t\"github.com\/cf-guardian\/guardian\/kernel\/rootfs\"\n\t\"github.com\/cf-guardian\/guardian\/kernel\/syscall\/mock_syscall\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestNonExistentReadWriteBaseDir(t *testing.T) {\n\tmockCtrl, mockFileUtils, mockSyscallFS := setupMocks(t)\n\tdefer mockCtrl.Finish()\n\n\tmockFileUtils.EXPECT().Filemode(\"\/nosuch\").Return(os.FileMode(0), gerror.New(fileutils.ErrFileNotFound, \"test error\"))\n\n\trfs, gerr := rootfs.NewRootFS(mockSyscallFS, mockFileUtils, \"\/nosuch\")\n\tif rfs != nil || !gerr.EqualTag(rootfs.ErrRwBaseDirMissing) {\n\t\tt.Errorf(\"Incorrect return values (%s, %s)\", rfs, gerr)\n\t\treturn\n\t}\n}\n\nfunc TestNonDirReadWriteBaseDir(t *testing.T) {\n\tmockCtrl, mockFileUtils, mockSyscallFS := setupMocks(t)\n\tdefer mockCtrl.Finish()\n\n\ttempDir := createTempDir()\n\tfilePath := createFile(tempDir, \"testFile\")\n\tmockFileUtils.EXPECT().Filemode(filePath).Return(os.FileMode(0700), nil)\n\n\trfs, gerr := rootfs.NewRootFS(mockSyscallFS, mockFileUtils, filePath)\n\tif rfs != nil || !gerr.EqualTag(rootfs.ErrRwBaseDirIsFile) {\n\t\tt.Errorf(\"Incorrect return values (%s, %s)\", rfs, gerr)\n\t\treturn\n\t}\n}\n\nfunc TestReadOnlyReadWriteBaseDir(t *testing.T) {\n\tmockCtrl, mockFileUtils, mockSyscallFS := setupMocks(t)\n\tdefer mockCtrl.Finish()\n\n\ttempDir := createTempDir()\n\tdirPath := createDirWithMode(tempDir, \"test-rootfs\", os.FileMode(0400))\n\tmockFileUtils.EXPECT().Filemode(dirPath).Return(os.ModeDir|os.FileMode(0100), nil)\n\n\trfs, gerr := rootfs.NewRootFS(mockSyscallFS, mockFileUtils, dirPath)\n\tif rfs != nil || !gerr.EqualTag(rootfs.ErrRwBaseDirNotRw) {\n\t\tt.Errorf(\"Incorrect return values (%s, %s)\", rfs, gerr)\n\t\treturn\n\t}\n}\n\nfunc TestGenerate(t *testing.T) {\n\tmockCtrl, mockFileUtils, mockSyscallFS := setupMocks(t)\n\tdefer mockCtrl.Finish()\n\n\ttempDir := createTempDir()\n\tmockFileUtils.EXPECT().Filemode(tempDir).Return(os.ModeDir|os.FileMode(0700), nil)\n\trfs, gerr := rootfs.NewRootFS(mockSyscallFS, mockFileUtils, tempDir)\n\tif gerr != nil {\n\t\tt.Errorf(\"%s\", gerr)\n\t\treturn\n\t}\n\tprototypeDir := filepath.Join(tempDir, \"test-prototype\")\n\n\tmockSyscallFS.EXPECT().BindMountReadOnly(prototypeDir, &stringPrefixMatcher{filepath.Join(tempDir, \"mnt\")})\n\n\tdirs := []string{`proc`, `dev`, `etc`, `home`, `sbin`, `var`, `tmp`}\n\tfor _, dir := range dirs {\n\t\tmockSyscallFS.EXPECT().BindMountReadWrite(&stringRegexMatcher{filepath.Join(tempDir, \"tmp-rootfs-.*\", dir)}, &stringRegexMatcher{filepath.Join(tempDir, \"mnt-.*\", dir)})\n\t}\n\n\troot, gerr := rfs.Generate(prototypeDir)\n\tif gerr != nil {\n\t\tt.Errorf(\"%s\", gerr)\n\t\treturn\n\t}\n\n\trootPrefix := filepath.Join(tempDir, \"mnt-\")\n\tif !strings.HasPrefix(root, rootPrefix) {\n\t\tt.Errorf(\"root was %s, but expected it to have prefix %s\", root, rootPrefix)\n\t\treturn\n\t}\n}\n\ntype stringPrefixMatcher struct {\n\tprefix string\n}\n\nfunc (m *stringPrefixMatcher) Matches(x interface{}) bool {\n\tif x, ok := x.(string); ok {\n\t\treturn strings.HasPrefix(x, m.prefix)\n\t} else {\n\t\treturn false\n\t}\n}\n\nfunc (m *stringPrefixMatcher) String() string {\n\treturn fmt.Sprintf(\"is a string with prefix %s\", m.prefix)\n}\n\ntype stringRegexMatcher struct {\n\tregex string\n}\n\nfunc (m *stringRegexMatcher) Matches(x interface{}) bool {\n\tif x, ok := x.(string); ok {\n\t\tif matched, err := regexp.MatchString(m.regex, x); err == nil {\n\t\t\treturn matched\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t} else {\n\t\treturn false\n\t}\n}\n\nfunc (m *stringRegexMatcher) String() string {\n\treturn fmt.Sprintf(\"is a string which matches regular expression %s\", m.regex)\n}\n\nfunc createTempDir() string {\n\ttempDir, err := ioutil.TempDir(\"\/tmp\", \"guardian-test-\")\n\tcheck(err)\n\treturn tempDir\n}\n\nfunc setupMocks(t *testing.T) (*gomock.Controller, *mock_fileutils.MockFileutils, *mock_syscall.MockSyscall_FS) {\n\tmockCtrl := gomock.NewController(t)\n\tmockFileUtils := mock_fileutils.NewMockFileutils(mockCtrl)\n\tmockSyscallFS := mock_syscall.NewMockSyscall_FS(mockCtrl)\n\treturn mockCtrl, mockFileUtils, mockSyscallFS\n}\n\n\/\/ TODO: Remove duplication with fileutils_test.\nfunc createFile(td string, fileName string) string {\n\treturn createFileWithMode(td, fileName, os.FileMode(0666))\n}\n\nfunc createFileWithMode(td string, fileName string, mode os.FileMode) string {\n\tfp := filepath.Join(td, fileName)\n\tf, err := os.OpenFile(fp, os.O_CREATE|os.O_EXCL|os.O_WRONLY, mode)\n\tcheck(err)\n\t_, err = f.WriteString(\"test contents\")\n\tcheck(err)\n\tcheck(f.Close())\n\treturn fp\n}\n\nfunc createDir(td string, dirName string) string {\n\treturn createDirWithMode(td, dirName, os.FileMode(0777))\n}\n\nfunc createDirWithMode(td string, dirName string, mode os.FileMode) string {\n\tfp := filepath.Join(td, dirName)\n\terr := os.Mkdir(fp, mode)\n\tcheck(err)\n\treturn fp\n}\n\nfunc check(err error) {\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package fix\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"os\"\n\t\"strings\"\n)\n\ntype Command byte\n\nfunc (Command) Help() string {\n\treturn strings.TrimSpace(helpString)\n}\n\nfunc (c Command) Run(env packer.Environment, args []string) int {\n\tcmdFlags := flag.NewFlagSet(\"fix\", flag.ContinueOnError)\n\tcmdFlags.Usage = func() { env.Ui().Say(c.Help()) }\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\targs = cmdFlags.Args()\n\tif len(args) != 1 {\n\t\tcmdFlags.Usage()\n\t\treturn 1\n\t}\n\n\t\/\/ Read the file for decoding\n\ttplF, err := os.Open(args[0])\n\tif err != nil {\n\t\tenv.Ui().Error(fmt.Sprintf(\"Error opening template: %s\", err))\n\t\treturn 1\n\t}\n\tdefer tplF.Close()\n\n\t\/\/ Decode the JSON into a generic map structure\n\tvar templateData map[string]interface{}\n\tdecoder := json.NewDecoder(tplF)\n\tif err := decoder.Decode(&templateData); err != nil {\n\t\tenv.Ui().Error(fmt.Sprintf(\"Error parsing template: %s\", err))\n\t\treturn 1\n\t}\n\n\t\/\/ Close the file since we're done with that\n\ttplF.Close()\n\n\t\/\/ Run the template through the various fixers\n\tfixers := []Fixer{Fixers[\"iso-md5\"]}\n\tinput := templateData\n\tfor _, fixer := range fixers {\n\t\tvar err error\n\t\tinput, err = fixer.Fix(input)\n\t\tif err != nil {\n\t\t\tenv.Ui().Error(fmt.Sprintf(\"Error fixing: %s\", err))\n\t\t\treturn 1\n\t\t}\n\t}\n\n\tvar output bytes.Buffer\n\tencoder := json.NewEncoder(&output)\n\tif err := encoder.Encode(input); err != nil {\n\t\tenv.Ui().Error(fmt.Sprintf(\"Error encoding: %s\", err))\n\t\treturn 1\n\t}\n\n\tvar indented bytes.Buffer\n\tif err := json.Indent(&indented, output.Bytes(), \"\", \" \"); err != nil {\n\t\tenv.Ui().Error(fmt.Sprintf(\"Error encoding: %s\", err))\n\t\treturn 1\n\t}\n\n\tresult := indented.String()\n\tresult = strings.Replace(result, `\\u003c`, \"<\", -1)\n\tresult = strings.Replace(result, `\\u003e`, \">\", -1)\n\tenv.Ui().Say(result)\n\treturn 0\n}\n\nfunc (c Command) Synopsis() string {\n\treturn \"fixes templates from old versions of packer\"\n}\n<commit_msg>command\/fix: use strings instead of Fixer for list<commit_after>package fix\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"os\"\n\t\"strings\"\n)\n\ntype Command byte\n\nfunc (Command) Help() string {\n\treturn strings.TrimSpace(helpString)\n}\n\nfunc (c Command) Run(env packer.Environment, args []string) int {\n\tcmdFlags := flag.NewFlagSet(\"fix\", flag.ContinueOnError)\n\tcmdFlags.Usage = func() { env.Ui().Say(c.Help()) }\n\tif err := cmdFlags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\targs = cmdFlags.Args()\n\tif len(args) != 1 {\n\t\tcmdFlags.Usage()\n\t\treturn 1\n\t}\n\n\t\/\/ Read the file for decoding\n\ttplF, err := os.Open(args[0])\n\tif err != nil {\n\t\tenv.Ui().Error(fmt.Sprintf(\"Error opening template: %s\", err))\n\t\treturn 1\n\t}\n\tdefer tplF.Close()\n\n\t\/\/ Decode the JSON into a generic map structure\n\tvar templateData map[string]interface{}\n\tdecoder := json.NewDecoder(tplF)\n\tif err := decoder.Decode(&templateData); err != nil {\n\t\tenv.Ui().Error(fmt.Sprintf(\"Error parsing template: %s\", err))\n\t\treturn 1\n\t}\n\n\t\/\/ Close the file since we're done with that\n\ttplF.Close()\n\n\t\/\/ Run the template through the various fixers\n\tfixers := []string{\n\t\t\"iso-md5\",\n\t}\n\n\tinput := templateData\n\tfor _, name := range fixers {\n\t\tvar err error\n\t\tfixer, ok := Fixers[name]\n\t\tif !ok {\n\t\t\tpanic(\"fixer not found: \" + name)\n\t\t}\n\n\t\tinput, err = fixer.Fix(input)\n\t\tif err != nil {\n\t\t\tenv.Ui().Error(fmt.Sprintf(\"Error fixing: %s\", err))\n\t\t\treturn 1\n\t\t}\n\t}\n\n\tvar output bytes.Buffer\n\tencoder := json.NewEncoder(&output)\n\tif err := encoder.Encode(input); err != nil {\n\t\tenv.Ui().Error(fmt.Sprintf(\"Error encoding: %s\", err))\n\t\treturn 1\n\t}\n\n\tvar indented bytes.Buffer\n\tif err := json.Indent(&indented, output.Bytes(), \"\", \" \"); err != nil {\n\t\tenv.Ui().Error(fmt.Sprintf(\"Error encoding: %s\", err))\n\t\treturn 1\n\t}\n\n\tresult := indented.String()\n\tresult = strings.Replace(result, `\\u003c`, \"<\", -1)\n\tresult = strings.Replace(result, `\\u003e`, \">\", -1)\n\tenv.Ui().Say(result)\n\treturn 0\n}\n\nfunc (c Command) Synopsis() string {\n\treturn \"fixes templates from old versions of packer\"\n}\n<|endoftext|>"} {"text":"<commit_before>package worker\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/discoproject\/goworker\/jobutil\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n)\n\nconst (\n\tDEBUG = true\n)\n\nfunc Check(err error) {\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc debug(prefix string, msg interface{}) {\n\tif DEBUG {\n\t\tfile, err := os.OpenFile(\"\/tmp\/debug\", os.O_RDWR|os.O_APPEND|os.O_CREATE, 0644)\n\t\tCheck(err)\n\t\tdefer file.Close()\n\t\tfmt.Fprintf(file, \"%s: %v\\n\", prefix, msg)\n\t}\n}\n\nfunc send(key string, payload interface{}) {\n\tenc, err := json.Marshal(payload)\n\tif err != nil {\n\t\tpanic(\"could not encode\")\n\t}\n\tstr := fmt.Sprintf(\"%s %d %s\\n\", key, len(enc), enc)\n\tfmt.Printf(str)\n\tdebug(\"send\", str)\n}\n\nfunc recv() (string, int, []byte) {\n\tvar size int\n\tvar status string\n\tfmt.Scanf(\"%s %d\", &status, &size)\n\treader := bufio.NewReader(os.Stdin)\n\tinput := make([]byte, size)\n\treader.Read(input)\n\tdebug(\"recv\", fmt.Sprintf(\"%d \", size)+string(input))\n\treturn status, size, input\n}\n\nfunc send_worker() {\n\ttype WorkerMsg struct {\n\t\tPid int `json:\"pid\"`\n\t\tVersion string `json:\"version\"`\n\t}\n\twm := WorkerMsg{os.Getpid(), \"1.1\"}\n\tsend(\"WORKER\", wm)\n\n\t_, _, response := recv()\n\tif string(response) != \"\\\"ok\\\"\" {\n\t\tpanic(response)\n\t}\n}\n\nfunc request_task() *Task {\n\ttask := new(Task)\n\tsend(\"TASK\", \"\")\n\t_, _, line := recv()\n\tjson.Unmarshal(line, &task)\n\tdebug(\"info\", task)\n\treturn task\n}\n\nfunc request_input() *Input {\n\tsend(\"INPUT\", \"\")\n\t_, _, line := recv()\n\tvar mj []interface{}\n\tjson.Unmarshal(line, &mj)\n\n\tflag := mj[0].(string)\n\tif flag != \"done\" {\n\t\tpanic(flag)\n\t}\n\t_inputs := mj[1].([]interface{})\n\tinputs := _inputs[0].([]interface{})\n\n\tid := inputs[0].(float64)\n\tstatus := inputs[1].(string)\n\n\tlabel := -1\n\tswitch t := inputs[2].(type) {\n\tcase string:\n\t\tlabel = -1\n\tcase float64:\n\t\tlabel = int(t)\n\t}\n\t_replicas := inputs[3].([]interface{})\n\n\treplicas := _replicas[0].([]interface{})\n\n\t\/\/FIXME avoid conversion to float when reading the item\n\treplica_id := replicas[0].(float64)\n\treplica_location := replicas[1].(string)\n\n\tdebug(\"info\", fmt.Sprintln(id, status, label, replica_id, replica_location))\n\n\tinput := new(Input)\n\tinput.id = int(id)\n\tinput.status = status\n\tinput.label = label\n\tinput.replica_id = int(replica_id)\n\tinput.replica_location = replica_location\n\treturn input\n}\n\nfunc send_output(output *Output) {\n\tv := make([]interface{}, 3)\n\tv[0] = output.label\n\tv[1] = output.output_location \/\/\"http:\/\/example.com\"\n\tv[2] = output.output_size\n\n\tsend(\"OUTPUT\", v)\n\t_, _, line := recv()\n\tdebug(\"info\", string(line))\n}\n\nfunc request_done() {\n\tsend(\"DONE\", \"\")\n\t_, _, line := recv()\n\tdebug(\"info\", string(line))\n}\n\ntype Task struct {\n\tHost string\n\tMaster string\n\tJobname string\n\tTaskid int\n\tStage string\n\tGrouping string\n\tGroup string\n\tDisco_port int\n\tPut_port int\n\tDisco_data string\n\tDdfs_data string\n\tJobfile string\n}\n\ntype Input struct {\n\tid int\n\tstatus string\n\tlabel int\n\treplica_id int\n\treplica_location string\n}\n\ntype Output struct {\n\tlabel int\n\toutput_location string\n\toutput_size int64\n}\n\ntype Worker struct {\n\ttask *Task\n\tinput *Input\n\toutput *Output\n}\n\ntype Process func(io.Reader, io.Writer)\n\nfunc (w *Worker) runStage(output_name string, process Process) {\n\toutput, err := os.Create(output_name)\n\tCheck(err)\n\treadCloser := jobutil.AddressReader(w.input.replica_location,\n\t\tjobutil.Setting(\"DISCO_DATA\"))\n\tprocess(readCloser, output)\n\treadCloser.Close()\n\toutput.Close()\n\tw.output.output_location =\n\t\t\"disco:\/\/\" + jobutil.Setting(\"HOST\") + \"\/disco\/\" + output_name[len(w.task.Disco_data)+1:]\n\toutput, err = os.Open(output_name)\n\tCheck(err)\n\tfileinfo, err := output.Stat()\n\tCheck(err)\n\tw.output.output_size = fileinfo.Size()\n}\n\nfunc Run(Map Process, Reduce Process) {\n\tvar w Worker\n\tsend_worker()\n\tw.task = request_task()\n\n\tjobutil.SetKeyValue(\"HOST\", w.task.Host)\n\tmaster, port := jobutil.HostAndPort(w.task.Master)\n\tjobutil.SetKeyValue(\"DISCO_MASTER\", master)\n\tif port != fmt.Sprintf(\"%d\", w.task.Disco_port) {\n\t\tpanic(\"port mismatch: \" + port)\n\t}\n\tjobutil.SetKeyValue(\"DISCO_PORT\", port)\n\tjobutil.SetKeyValue(\"PUT_PORT\", string(w.task.Put_port))\n\tjobutil.SetKeyValue(\"DISCO_DATA\", w.task.Disco_data)\n\tjobutil.SetKeyValue(\"DDFS_DATA\", w.task.Ddfs_data)\n\n\tw.input = request_input()\n\n\tpwd, err := os.Getwd()\n\tCheck(err)\n\n\tw.output = new(Output)\n\tif w.task.Stage == \"map\" {\n\t\tw.runStage(pwd+\"\/map_out\", Map)\n\t} else if w.task.Stage == \"map_shuffle\" {\n\t\tw.output.output_location = w.input.replica_location\n\t} else {\n\t\tw.runStage(pwd+\"\/reduce_out\", Reduce)\n\t}\n\n\tsend_output(w.output)\n\trequest_done()\n}\n<commit_msg>worker: use random filenames for outputs of the stages.<commit_after>package worker\n\nimport (\n\t\"bufio\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/discoproject\/goworker\/jobutil\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n)\n\nconst (\n\tDEBUG = true\n)\n\nfunc Check(err error) {\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc debug(prefix string, msg interface{}) {\n\tif DEBUG {\n\t\tfile, err := os.OpenFile(\"\/tmp\/debug\", os.O_RDWR|os.O_APPEND|os.O_CREATE, 0644)\n\t\tCheck(err)\n\t\tdefer file.Close()\n\t\tfmt.Fprintf(file, \"%s: %v\\n\", prefix, msg)\n\t}\n}\n\nfunc send(key string, payload interface{}) {\n\tenc, err := json.Marshal(payload)\n\tif err != nil {\n\t\tpanic(\"could not encode\")\n\t}\n\tstr := fmt.Sprintf(\"%s %d %s\\n\", key, len(enc), enc)\n\tfmt.Printf(str)\n\tdebug(\"send\", str)\n}\n\nfunc recv() (string, int, []byte) {\n\tvar size int\n\tvar status string\n\tfmt.Scanf(\"%s %d\", &status, &size)\n\treader := bufio.NewReader(os.Stdin)\n\tinput := make([]byte, size)\n\treader.Read(input)\n\tdebug(\"recv\", fmt.Sprintf(\"%d \", size)+string(input))\n\treturn status, size, input\n}\n\nfunc send_worker() {\n\ttype WorkerMsg struct {\n\t\tPid int `json:\"pid\"`\n\t\tVersion string `json:\"version\"`\n\t}\n\twm := WorkerMsg{os.Getpid(), \"1.1\"}\n\tsend(\"WORKER\", wm)\n\n\t_, _, response := recv()\n\tif string(response) != \"\\\"ok\\\"\" {\n\t\tpanic(response)\n\t}\n}\n\nfunc request_task() *Task {\n\ttask := new(Task)\n\tsend(\"TASK\", \"\")\n\t_, _, line := recv()\n\tjson.Unmarshal(line, &task)\n\tdebug(\"info\", task)\n\treturn task\n}\n\nfunc request_input() *Input {\n\tsend(\"INPUT\", \"\")\n\t_, _, line := recv()\n\tvar mj []interface{}\n\tjson.Unmarshal(line, &mj)\n\n\tflag := mj[0].(string)\n\tif flag != \"done\" {\n\t\tpanic(flag)\n\t}\n\t_inputs := mj[1].([]interface{})\n\tinputs := _inputs[0].([]interface{})\n\n\tid := inputs[0].(float64)\n\tstatus := inputs[1].(string)\n\n\tlabel := -1\n\tswitch t := inputs[2].(type) {\n\tcase string:\n\t\tlabel = -1\n\tcase float64:\n\t\tlabel = int(t)\n\t}\n\t_replicas := inputs[3].([]interface{})\n\n\treplicas := _replicas[0].([]interface{})\n\n\t\/\/FIXME avoid conversion to float when reading the item\n\treplica_id := replicas[0].(float64)\n\treplica_location := replicas[1].(string)\n\n\tdebug(\"info\", fmt.Sprintln(id, status, label, replica_id, replica_location))\n\n\tinput := new(Input)\n\tinput.id = int(id)\n\tinput.status = status\n\tinput.label = label\n\tinput.replica_id = int(replica_id)\n\tinput.replica_location = replica_location\n\treturn input\n}\n\nfunc send_output(output *Output) {\n\tv := make([]interface{}, 3)\n\tv[0] = output.label\n\tv[1] = output.output_location \/\/\"http:\/\/example.com\"\n\tv[2] = output.output_size\n\n\tsend(\"OUTPUT\", v)\n\t_, _, line := recv()\n\tdebug(\"info\", string(line))\n}\n\nfunc request_done() {\n\tsend(\"DONE\", \"\")\n\t_, _, line := recv()\n\tdebug(\"info\", string(line))\n}\n\ntype Task struct {\n\tHost string\n\tMaster string\n\tJobname string\n\tTaskid int\n\tStage string\n\tGrouping string\n\tGroup string\n\tDisco_port int\n\tPut_port int\n\tDisco_data string\n\tDdfs_data string\n\tJobfile string\n}\n\ntype Input struct {\n\tid int\n\tstatus string\n\tlabel int\n\treplica_id int\n\treplica_location string\n}\n\ntype Output struct {\n\tlabel int\n\toutput_location string\n\toutput_size int64\n}\n\ntype Worker struct {\n\ttask *Task\n\tinput *Input\n\toutput *Output\n}\n\ntype Process func(io.Reader, io.Writer)\n\nfunc (w *Worker) runStage(pwd string, prefix string, process Process) {\n\toutput, err := ioutil.TempFile(pwd, prefix)\n\toutput_name := output.Name()\n\tCheck(err)\n\treadCloser := jobutil.AddressReader(w.input.replica_location,\n\t\tjobutil.Setting(\"DISCO_DATA\"))\n\tprocess(readCloser, output)\n\treadCloser.Close()\n\toutput.Close()\n\tw.output.output_location =\n\t\t\"disco:\/\/\" + jobutil.Setting(\"HOST\") + \"\/disco\/\" + output_name[len(w.task.Disco_data)+1:]\n\toutput, err = os.Open(output_name)\n\tCheck(err)\n\tfileinfo, err := output.Stat()\n\tCheck(err)\n\tw.output.output_size = fileinfo.Size()\n}\n\nfunc Run(Map Process, Reduce Process) {\n\tvar w Worker\n\tsend_worker()\n\tw.task = request_task()\n\n\tjobutil.SetKeyValue(\"HOST\", w.task.Host)\n\tmaster, port := jobutil.HostAndPort(w.task.Master)\n\tjobutil.SetKeyValue(\"DISCO_MASTER\", master)\n\tif port != fmt.Sprintf(\"%d\", w.task.Disco_port) {\n\t\tpanic(\"port mismatch: \" + port)\n\t}\n\tjobutil.SetKeyValue(\"DISCO_PORT\", port)\n\tjobutil.SetKeyValue(\"PUT_PORT\", string(w.task.Put_port))\n\tjobutil.SetKeyValue(\"DISCO_DATA\", w.task.Disco_data)\n\tjobutil.SetKeyValue(\"DDFS_DATA\", w.task.Ddfs_data)\n\n\tw.input = request_input()\n\n\tpwd, err := os.Getwd()\n\tCheck(err)\n\n\tw.output = new(Output)\n\tif w.task.Stage == \"map\" {\n\t\tw.runStage(pwd, \"map_out_\", Map)\n\t} else if w.task.Stage == \"map_shuffle\" {\n\t\tw.output.output_location = w.input.replica_location\n\t} else {\n\t\tw.runStage(pwd, \"reduce_out_\", Reduce)\n\t}\n\n\tsend_output(w.output)\n\trequest_done()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 clair authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package worker implements the logic to extract useful informations from a\n\/\/ container layer and store it in the database.\npackage worker\n\nimport (\n\t\"github.com\/coreos\/pkg\/capnslog\"\n\n\t\"github.com\/coreos\/clair\/database\"\n\t\"github.com\/coreos\/clair\/utils\"\n\tcerrors \"github.com\/coreos\/clair\/utils\/errors\"\n\t\"github.com\/coreos\/clair\/worker\/detectors\"\n)\n\nconst (\n\t\/\/ Version (integer) represents the worker version.\n\t\/\/ Increased each time the engine changes.\n\tVersion = 2\n\n\t\/\/ maxFileSize is the maximum size of a single file we should extract.\n\tmaxFileSize = 200 * 1024 * 1024 \/\/ 200 MiB\n)\n\nvar (\n\tlog = capnslog.NewPackageLogger(\"github.com\/coreos\/clair\", \"worker\")\n\n\t\/\/ ErrUnsupported is the error that should be raised when an OS or package\n\t\/\/ manager is not supported.\n\tErrUnsupported = cerrors.NewBadRequestError(\"worker: OS and\/or package manager are not supported\")\n\n\t\/\/ ErrParentUnknown is the error that should be raised when a parent layer\n\t\/\/ has yet to be processed for the current layer.\n\tErrParentUnknown = cerrors.NewBadRequestError(\"worker: parent layer is unknown, it must be processed first\")\n)\n\n\/\/ Process detects the Namespace of a layer, the features it adds\/removes, and\n\/\/ then stores everything in the database.\n\/\/ TODO(Quentin-M): We could have a goroutine that looks for layers that have been analyzed with an\n\/\/ older engine version and that processes them.\nfunc Process(datastore database.Datastore, imageFormat, name, parentName, path string, headers map[string]string) error {\n\t\/\/ Verify parameters.\n\tif name == \"\" {\n\t\treturn cerrors.NewBadRequestError(\"could not process a layer which does not have a name\")\n\t}\n\n\tif path == \"\" {\n\t\treturn cerrors.NewBadRequestError(\"could not process a layer which does not have a path\")\n\t}\n\n\tif imageFormat == \"\" {\n\t\treturn cerrors.NewBadRequestError(\"could not process a layer which does not have a format\")\n\t}\n\n\tlog.Debugf(\"layer %s: processing (Location: %s, Engine version: %d, Parent: %s, Format: %s)\",\n\t\tname, utils.CleanURL(path), Version, parentName, imageFormat)\n\n\t\/\/ Check to see if the layer is already in the database.\n\tlayer, err := datastore.FindLayer(name, false, false)\n\tif err != nil && err != cerrors.ErrNotFound {\n\t\treturn err\n\t}\n\n\tif err == cerrors.ErrNotFound {\n\t\t\/\/ New layer case.\n\t\tlayer = database.Layer{Name: name, EngineVersion: Version}\n\n\t\t\/\/ Retrieve the parent if it has one.\n\t\t\/\/ We need to get it with its Features in order to diff them.\n\t\tif parentName != \"\" {\n\t\t\tparent, err := datastore.FindLayer(parentName, true, false)\n\t\t\tif err != nil && err != cerrors.ErrNotFound {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err == cerrors.ErrNotFound {\n\t\t\t\tlog.Warningf(\"layer %s: the parent layer (%s) is unknown. it must be processed first\", name,\n\t\t\t\t\tparentName)\n\t\t\t\treturn ErrParentUnknown\n\t\t\t}\n\t\t\tlayer.Parent = &parent\n\t\t}\n\t} else {\n\t\t\/\/ The layer is already in the database, check if we need to update it.\n\t\tif layer.EngineVersion >= Version {\n\t\t\tlog.Debugf(`layer %s: layer content has already been processed in the past with engine %d.\n Current engine is %d. skipping analysis`, name, layer.EngineVersion, Version)\n\t\t\treturn nil\n\t\t}\n\n\t\tlog.Debugf(`layer %s: layer content has been analyzed in the past with engine %d. Current\n engine is %d. analyzing again`, name, layer.EngineVersion, Version)\n\t}\n\n\t\/\/ Analyze the content.\n\tlayer.Namespace, layer.Features, err = detectContent(imageFormat, name, path, headers, layer.Parent)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn datastore.InsertLayer(layer)\n}\n\n\/\/ detectContent downloads a layer's archive and extracts its Namespace and Features.\nfunc detectContent(imageFormat, name, path string, headers map[string]string, parent *database.Layer) (namespace *database.Namespace, featureVersions []database.FeatureVersion, err error) {\n\tdata, err := detectors.DetectData(imageFormat, path, headers, append(detectors.GetRequiredFilesFeatures(), detectors.GetRequiredFilesNamespace()...), maxFileSize)\n\tif err != nil {\n\t\tlog.Errorf(\"layer %s: failed to extract data from %s: %s\", name, utils.CleanURL(path), err)\n\t\treturn\n\t}\n\n\t\/\/ Detect namespace.\n\tnamespace = detectNamespace(name, data, parent)\n\n\t\/\/ Detect features.\n\tfeatureVersions, err = detectFeatureVersions(name, data, namespace, parent)\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(featureVersions) > 0 {\n\t\tlog.Debugf(\"layer %s: detected %d features\", name, len(featureVersions))\n\t}\n\n\treturn\n}\n\nfunc detectNamespace(name string, data map[string][]byte, parent *database.Layer) (namespace *database.Namespace) {\n\t\/\/ Use registered detectors to get the Namespace.\n\tnamespace = detectors.DetectNamespace(data)\n\tif namespace != nil {\n\t\tlog.Debugf(\"layer %s: detected namespace %q\", name, namespace.Name)\n\t\treturn\n\t}\n\n\t\/\/ Use the parent's Namespace.\n\tif parent != nil {\n\t\tnamespace = parent.Namespace\n\t\tif namespace != nil {\n\t\t\tlog.Debugf(\"layer %s: detected namespace %q (from parent)\", name, namespace.Name)\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc detectFeatureVersions(name string, data map[string][]byte, namespace *database.Namespace, parent *database.Layer) (features []database.FeatureVersion, err error) {\n\t\/\/ TODO(Quentin-M): We need to pass the parent image to DetectFeatures because it's possible that\n\t\/\/ some detectors would need it in order to produce the entire feature list (if they can only\n\t\/\/ detect a diff). Also, we should probably pass the detected namespace so detectors could\n\t\/\/ make their own decision.\n\tfeatures, err = detectors.DetectFeatures(data)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ If there are no FeatureVersions, use parent's FeatureVersions if possible.\n\t\/\/ TODO(Quentin-M): We eventually want to give the choice to each detectors to use none\/some of\n\t\/\/ their parent's FeatureVersions. It would be useful for detectors that can't find their entire\n\t\/\/ result using one Layer.\n\tif len(features) == 0 && parent != nil {\n\t\tfeatures = parent.Features\n\t\treturn\n\t}\n\n\t\/\/ Build a map of the namespaces for each FeatureVersion in our parent layer.\n\tparentFeatureNamespaces := make(map[string]database.Namespace)\n\tif parent != nil {\n\t\tfor _, parentFeature := range parent.Features {\n\t\t\tparentFeatureNamespaces[parentFeature.Feature.Name+\":\"+parentFeature.Version.String()] = parentFeature.Feature.Namespace\n\t\t}\n\t}\n\n\t\/\/ Ensure that each FeatureVersion has an associated Namespace.\n\tfor i, feature := range features {\n\t\tif feature.Feature.Namespace.Name != \"\" {\n\t\t\t\/\/ There is a Namespace associated.\n\t\t\tcontinue\n\t\t}\n\n\t\tif parentFeatureNamespace, ok := parentFeatureNamespaces[feature.Feature.Name+\":\"+feature.Version.String()]; ok {\n\t\t\t\/\/ The FeatureVersion is present in the parent layer; associate with their Namespace.\n\t\t\tfeatures[i].Feature.Namespace = parentFeatureNamespace\n\t\t\tcontinue\n\t\t}\n\n\t\tif namespace != nil {\n\t\t\t\/\/ The Namespace has been detected in this layer; associate it.\n\t\t\tfeatures[i].Feature.Namespace = *namespace\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Warningf(\"layer %s: Layer's namespace is unknown but non-namespaced features have been detected\", name)\n\t\terr = ErrUnsupported\n\t\treturn\n\t}\n\n\treturn\n}\n<commit_msg>worker: bump engine version<commit_after>\/\/ Copyright 2015 clair authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package worker implements the logic to extract useful informations from a\n\/\/ container layer and store it in the database.\npackage worker\n\nimport (\n\t\"github.com\/coreos\/pkg\/capnslog\"\n\n\t\"github.com\/coreos\/clair\/database\"\n\t\"github.com\/coreos\/clair\/utils\"\n\tcerrors \"github.com\/coreos\/clair\/utils\/errors\"\n\t\"github.com\/coreos\/clair\/worker\/detectors\"\n)\n\nconst (\n\t\/\/ Version (integer) represents the worker version.\n\t\/\/ Increased each time the engine changes.\n\tVersion = 3\n\n\t\/\/ maxFileSize is the maximum size of a single file we should extract.\n\tmaxFileSize = 200 * 1024 * 1024 \/\/ 200 MiB\n)\n\nvar (\n\tlog = capnslog.NewPackageLogger(\"github.com\/coreos\/clair\", \"worker\")\n\n\t\/\/ ErrUnsupported is the error that should be raised when an OS or package\n\t\/\/ manager is not supported.\n\tErrUnsupported = cerrors.NewBadRequestError(\"worker: OS and\/or package manager are not supported\")\n\n\t\/\/ ErrParentUnknown is the error that should be raised when a parent layer\n\t\/\/ has yet to be processed for the current layer.\n\tErrParentUnknown = cerrors.NewBadRequestError(\"worker: parent layer is unknown, it must be processed first\")\n)\n\n\/\/ Process detects the Namespace of a layer, the features it adds\/removes, and\n\/\/ then stores everything in the database.\n\/\/ TODO(Quentin-M): We could have a goroutine that looks for layers that have been analyzed with an\n\/\/ older engine version and that processes them.\nfunc Process(datastore database.Datastore, imageFormat, name, parentName, path string, headers map[string]string) error {\n\t\/\/ Verify parameters.\n\tif name == \"\" {\n\t\treturn cerrors.NewBadRequestError(\"could not process a layer which does not have a name\")\n\t}\n\n\tif path == \"\" {\n\t\treturn cerrors.NewBadRequestError(\"could not process a layer which does not have a path\")\n\t}\n\n\tif imageFormat == \"\" {\n\t\treturn cerrors.NewBadRequestError(\"could not process a layer which does not have a format\")\n\t}\n\n\tlog.Debugf(\"layer %s: processing (Location: %s, Engine version: %d, Parent: %s, Format: %s)\",\n\t\tname, utils.CleanURL(path), Version, parentName, imageFormat)\n\n\t\/\/ Check to see if the layer is already in the database.\n\tlayer, err := datastore.FindLayer(name, false, false)\n\tif err != nil && err != cerrors.ErrNotFound {\n\t\treturn err\n\t}\n\n\tif err == cerrors.ErrNotFound {\n\t\t\/\/ New layer case.\n\t\tlayer = database.Layer{Name: name, EngineVersion: Version}\n\n\t\t\/\/ Retrieve the parent if it has one.\n\t\t\/\/ We need to get it with its Features in order to diff them.\n\t\tif parentName != \"\" {\n\t\t\tparent, err := datastore.FindLayer(parentName, true, false)\n\t\t\tif err != nil && err != cerrors.ErrNotFound {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err == cerrors.ErrNotFound {\n\t\t\t\tlog.Warningf(\"layer %s: the parent layer (%s) is unknown. it must be processed first\", name,\n\t\t\t\t\tparentName)\n\t\t\t\treturn ErrParentUnknown\n\t\t\t}\n\t\t\tlayer.Parent = &parent\n\t\t}\n\t} else {\n\t\t\/\/ The layer is already in the database, check if we need to update it.\n\t\tif layer.EngineVersion >= Version {\n\t\t\tlog.Debugf(`layer %s: layer content has already been processed in the past with engine %d.\n Current engine is %d. skipping analysis`, name, layer.EngineVersion, Version)\n\t\t\treturn nil\n\t\t}\n\n\t\tlog.Debugf(`layer %s: layer content has been analyzed in the past with engine %d. Current\n engine is %d. analyzing again`, name, layer.EngineVersion, Version)\n\t}\n\n\t\/\/ Analyze the content.\n\tlayer.Namespace, layer.Features, err = detectContent(imageFormat, name, path, headers, layer.Parent)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn datastore.InsertLayer(layer)\n}\n\n\/\/ detectContent downloads a layer's archive and extracts its Namespace and Features.\nfunc detectContent(imageFormat, name, path string, headers map[string]string, parent *database.Layer) (namespace *database.Namespace, featureVersions []database.FeatureVersion, err error) {\n\tdata, err := detectors.DetectData(imageFormat, path, headers, append(detectors.GetRequiredFilesFeatures(), detectors.GetRequiredFilesNamespace()...), maxFileSize)\n\tif err != nil {\n\t\tlog.Errorf(\"layer %s: failed to extract data from %s: %s\", name, utils.CleanURL(path), err)\n\t\treturn\n\t}\n\n\t\/\/ Detect namespace.\n\tnamespace = detectNamespace(name, data, parent)\n\n\t\/\/ Detect features.\n\tfeatureVersions, err = detectFeatureVersions(name, data, namespace, parent)\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(featureVersions) > 0 {\n\t\tlog.Debugf(\"layer %s: detected %d features\", name, len(featureVersions))\n\t}\n\n\treturn\n}\n\nfunc detectNamespace(name string, data map[string][]byte, parent *database.Layer) (namespace *database.Namespace) {\n\t\/\/ Use registered detectors to get the Namespace.\n\tnamespace = detectors.DetectNamespace(data)\n\tif namespace != nil {\n\t\tlog.Debugf(\"layer %s: detected namespace %q\", name, namespace.Name)\n\t\treturn\n\t}\n\n\t\/\/ Use the parent's Namespace.\n\tif parent != nil {\n\t\tnamespace = parent.Namespace\n\t\tif namespace != nil {\n\t\t\tlog.Debugf(\"layer %s: detected namespace %q (from parent)\", name, namespace.Name)\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc detectFeatureVersions(name string, data map[string][]byte, namespace *database.Namespace, parent *database.Layer) (features []database.FeatureVersion, err error) {\n\t\/\/ TODO(Quentin-M): We need to pass the parent image to DetectFeatures because it's possible that\n\t\/\/ some detectors would need it in order to produce the entire feature list (if they can only\n\t\/\/ detect a diff). Also, we should probably pass the detected namespace so detectors could\n\t\/\/ make their own decision.\n\tfeatures, err = detectors.DetectFeatures(data)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ If there are no FeatureVersions, use parent's FeatureVersions if possible.\n\t\/\/ TODO(Quentin-M): We eventually want to give the choice to each detectors to use none\/some of\n\t\/\/ their parent's FeatureVersions. It would be useful for detectors that can't find their entire\n\t\/\/ result using one Layer.\n\tif len(features) == 0 && parent != nil {\n\t\tfeatures = parent.Features\n\t\treturn\n\t}\n\n\t\/\/ Build a map of the namespaces for each FeatureVersion in our parent layer.\n\tparentFeatureNamespaces := make(map[string]database.Namespace)\n\tif parent != nil {\n\t\tfor _, parentFeature := range parent.Features {\n\t\t\tparentFeatureNamespaces[parentFeature.Feature.Name+\":\"+parentFeature.Version.String()] = parentFeature.Feature.Namespace\n\t\t}\n\t}\n\n\t\/\/ Ensure that each FeatureVersion has an associated Namespace.\n\tfor i, feature := range features {\n\t\tif feature.Feature.Namespace.Name != \"\" {\n\t\t\t\/\/ There is a Namespace associated.\n\t\t\tcontinue\n\t\t}\n\n\t\tif parentFeatureNamespace, ok := parentFeatureNamespaces[feature.Feature.Name+\":\"+feature.Version.String()]; ok {\n\t\t\t\/\/ The FeatureVersion is present in the parent layer; associate with their Namespace.\n\t\t\tfeatures[i].Feature.Namespace = parentFeatureNamespace\n\t\t\tcontinue\n\t\t}\n\n\t\tif namespace != nil {\n\t\t\t\/\/ The Namespace has been detected in this layer; associate it.\n\t\t\tfeatures[i].Feature.Namespace = *namespace\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Warningf(\"layer %s: Layer's namespace is unknown but non-namespaced features have been detected\", name)\n\t\terr = ErrUnsupported\n\t\treturn\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"fmt\"\n\t\"github.com\/centurylinkcloud\/clc-go-cli\/base\"\n\t\"github.com\/centurylinkcloud\/clc-go-cli\/models\"\n\t\"github.com\/centurylinkcloud\/clc-go-cli\/models\/datacenter\"\n\t\"github.com\/centurylinkcloud\/clc-go-cli\/models\/group\"\n)\n\nconst (\n\tBaseURL = \"https:\/\/api.ctl.io\"\n)\n\ntype GroupList struct {\n\tCommandBase\n}\n\nfunc NewGroupList(info CommandExcInfo) *GroupList {\n\tg := GroupList{}\n\tg.ExcInfo = info\n\treturn &g\n}\n\nfunc (g *GroupList) Execute(cn base.Connection) error {\n\tvar err error\n\n\tg.Output, err = GetGroups(cn)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc GetGroups(cn base.Connection) ([]group.Entity, error) {\n\tvar err error\n\tvar groups []group.Entity\n\n\tdatacenters := []datacenter.GetRes{}\n\tdcURL := fmt.Sprintf(\"%s\/v2\/datacenters\/{accountAlias}\", BaseURL)\n\terr = cn.ExecuteRequest(\"GET\", dcURL, nil, &datacenters)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, ref := range datacenters {\n\t\t\/\/ Get detailed DC info.\n\t\td := datacenter.GetRes{}\n\t\tdcURL = fmt.Sprintf(\"%s\/%s?groupLinks=true\", BaseURL, GetLink(ref.Links, \"self\"))\n\t\terr = cn.ExecuteRequest(\"GET\", dcURL, nil, &d)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t\/\/ Get the root group of the given DC.\n\t\tg := group.Entity{}\n\t\tgURL := fmt.Sprintf(\"%s\/%s\", BaseURL, GetLink(d.Links, \"group\"))\n\t\terr = cn.ExecuteRequest(\"GET\", gURL, nil, &g)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tgroups = append(groups, g)\n\t}\n\treturn groups, nil\n}\n\nfunc GetLink(links []models.LinkEntity, resource string) string {\n\tfor _, link := range links {\n\t\tif link.Rel == resource {\n\t\t\treturn link.Href\n\t\t}\n\t}\n\tpanic(fmt.Sprintf(\"No %s link found\", resource))\n}\n<commit_msg>Fix the URL joints.<commit_after>package commands\n\nimport (\n\t\"fmt\"\n\t\"github.com\/centurylinkcloud\/clc-go-cli\/base\"\n\t\"github.com\/centurylinkcloud\/clc-go-cli\/models\"\n\t\"github.com\/centurylinkcloud\/clc-go-cli\/models\/datacenter\"\n\t\"github.com\/centurylinkcloud\/clc-go-cli\/models\/group\"\n)\n\nconst (\n\tBaseURL = \"https:\/\/api.ctl.io\"\n)\n\ntype GroupList struct {\n\tCommandBase\n}\n\nfunc NewGroupList(info CommandExcInfo) *GroupList {\n\tg := GroupList{}\n\tg.ExcInfo = info\n\treturn &g\n}\n\nfunc (g *GroupList) Execute(cn base.Connection) error {\n\tvar err error\n\n\tg.Output, err = GetGroups(cn)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc GetGroups(cn base.Connection) ([]group.Entity, error) {\n\tvar err error\n\tvar groups []group.Entity\n\n\tdatacenters := []datacenter.GetRes{}\n\tdcURL := fmt.Sprintf(\"%s\/v2\/datacenters\/{accountAlias}\", BaseURL)\n\terr = cn.ExecuteRequest(\"GET\", dcURL, nil, &datacenters)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, ref := range datacenters {\n\t\t\/\/ Get detailed DC info.\n\t\td := datacenter.GetRes{}\n\t\tdcURL = fmt.Sprintf(\"%s%s?groupLinks=true\", BaseURL, GetLink(ref.Links, \"self\"))\n\t\terr = cn.ExecuteRequest(\"GET\", dcURL, nil, &d)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t\/\/ Get the root group of the given DC.\n\t\tg := group.Entity{}\n\t\tgURL := fmt.Sprintf(\"%s%s\", BaseURL, GetLink(d.Links, \"group\"))\n\t\terr = cn.ExecuteRequest(\"GET\", gURL, nil, &g)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tgroups = append(groups, g)\n\t}\n\treturn groups, nil\n}\n\nfunc GetLink(links []models.LinkEntity, resource string) string {\n\tfor _, link := range links {\n\t\tif link.Rel == resource {\n\t\t\treturn link.Href\n\t\t}\n\t}\n\tpanic(fmt.Sprintf(\"No %s link found\", resource))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t_ \"github.com\/bmizerany\/pq\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n)\n\nconst OPEN311_API_URI = \"http:\/\/311api.cityofchicago.org\/open311\/v2\/requests.json?extensions=true&page_size=500\"\n\ntype Open311Request struct {\n\tLat, Long float64\n\tWard, Police_district int\n\tService_request_id, Status, Service_name, Service_code, Agency_responsible, Address, Channel, Media_url string\n\tRequested_datetime, Updated_datetime string \/\/ FIXME: should these be proper time objects?\n\tExtended_attributes map[string]interface{}\n}\n\nfunc main() {\n\t\/\/ open database\n\tdb, err := sql.Open(\"postgres\", \"dbname=cwfy sslmode=disable\")\n\tif err != nil {\n\t\tlog.Fatal(\"Cannot open database connection\", err)\n\t}\n\tdefer db.Close()\n\n\t\/\/ load requests from open311\n\trequests := fetchRequests()\n\n\tfor _, request := range requests {\n\t\t\/\/ for each request, either create or update the\n\t\t\/\/ corresponding record in the database.\n\n\t\tif request.Service_request_id == \"\" {\n\t\t\tlog.Printf(\"Ignoring a request type %s because there is no SR number assigned\", request.Service_name)\n\t\t\tcontinue\n\t\t}\n\n\t\tinsert_stmt, err := db.Prepare(\"INSERT INTO service_requests(service_request_id,\" +\n\t\t\t\"status, service_name, service_code, agency_responsible, \" +\n\t\t\t\"address, requested_datetime, updated_datetime, lat, long,\" +\n\t\t\t\"ward, police_district, media_url, channel, duplicate, parent_service_request_id) \" +\n\t\t\t\"SELECT $1::varchar, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16 \" +\n\t\t\t\"WHERE NOT EXISTS (SELECT 1 FROM service_requests WHERE service_request_id = $1);\")\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"error preparing database insert statement\", err)\n\t\t}\n\n\t\tupdate_stmt, err := db.Prepare(\"UPDATE service_requests SET \" +\n\t\t\t\"status = $2, service_name = $3, service_code = $4, agency_responsible = $5, \" +\n\t\t\t\"address = $6, requested_datetime = $7, updated_datetime = $8, lat = $9, long = $10,\" +\n\t\t\t\"ward = $11, police_district = $12, media_url = $13, channel = $14, duplicate = $15, \" +\n\t\t\t\"parent_service_request_id = $16, updated_at = NOW() WHERE service_request_id = $1;\")\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"error preparing database update statement\", err)\n\t\t}\n\n\t\ttx, err := db.Begin()\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"error beginning transaction\", err)\n\t\t}\n\n\t\t_, err = tx.Stmt(update_stmt).Exec(request.Service_request_id,\n\t\t\trequest.Status,\n\t\t\trequest.Service_name,\n\t\t\trequest.Service_code,\n\t\t\trequest.Agency_responsible,\n\t\t\trequest.Address,\n\t\t\trequest.Requested_datetime,\n\t\t\trequest.Updated_datetime,\n\t\t\trequest.Lat,\n\t\t\trequest.Long,\n\t\t\trequest.Extended_attributes[\"ward\"],\n\t\t\trequest.Extended_attributes[\"police_district\"],\n\t\t\trequest.Media_url,\n\t\t\trequest.Extended_attributes[\"channel\"],\n\t\t\trequest.Extended_attributes[\"duplicate\"],\n\t\t\trequest.Extended_attributes[\"parent_service_request_id\"])\n\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"could not update %s because %s\", request.Service_request_id, err)\n\t\t}\n\n\t\t_, err = tx.Stmt(insert_stmt).Exec(request.Service_request_id,\n\t\t\trequest.Status,\n\t\t\trequest.Service_name,\n\t\t\trequest.Service_code,\n\t\t\trequest.Agency_responsible,\n\t\t\trequest.Address,\n\t\t\trequest.Requested_datetime,\n\t\t\trequest.Updated_datetime,\n\t\t\trequest.Lat,\n\t\t\trequest.Long,\n\t\t\trequest.Extended_attributes[\"ward\"],\n\t\t\trequest.Extended_attributes[\"police_district\"],\n\t\t\trequest.Media_url,\n\t\t\trequest.Extended_attributes[\"channel\"],\n\t\t\trequest.Extended_attributes[\"duplicate\"],\n\t\t\trequest.Extended_attributes[\"parent_service_request_id\"])\n\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"could not save %s because %s\", request.Service_request_id, err)\n\t\t} else {\n\t\t\tlog.Printf(\"saved SR %s\", request)\n\t\t}\n\n\t\terr = tx.Commit()\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"error closing transaction\", err)\n\t\t}\n\t}\n}\n\nfunc (req Open311Request) String() string {\n\t\/\/ pretty print SR information\n\treturn fmt.Sprintf(\"%s: %s at %s %f,%f, last update %s\", req.Service_request_id, req.Service_name, req.Address, req.Lat, req.Long, req.Updated_datetime)\n}\n\nfunc fetchRequests() (requests []Open311Request) {\n\tdb, err := sql.Open(\"postgres\", \"dbname=cwfy sslmode=disable\")\n\tif err != nil {\n\t\tlog.Fatal(\"Cannot open database connection\", err)\n\t}\n\tdefer db.Close()\n\n\t\/\/ find the most recent SR that we know about in the database\n\trows, err := db.Query(\"SELECT MAX(updated_datetime) FROM service_requests;\")\n\tif err != nil {\n\t\tlog.Fatal(\"error finding most recent service request\", err)\n\t}\n\n\tlast_updated_at := time.Now()\n\tfor rows.Next() {\n\t\tif err := rows.Scan(&last_updated_at); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tlog.Printf(\"most recent SR timestamp %s\", last_updated_at)\n\t}\n\n\t\/\/ janky hack to transform the last updated timestamp into\n\t\/\/ a format that plays nicely with the Open311 API\n\t\/\/ FIXME: there HAS to be a better way to handle this.\n\tformatted_date_string := last_updated_at.Format(time.RFC3339)\n\tformatted_date_string_with_tz := formatted_date_string[0:len(formatted_date_string)-1] + \"-0500\" \/\/ trunc the trailing 'Z' and tack on timezone\n\n\t\/\/ construct the request URI using base params and the proper time\n\topen311_api_endpoint := OPEN311_API_URI + \"&updated_after=\" + formatted_date_string_with_tz\n\n\tlog.Printf(\"fetching from %s\", open311_api_endpoint)\n\tresp, err := http.Get(open311_api_endpoint)\n\tdefer resp.Body.Close()\n\n\tif err != nil {\n\t\tlog.Fatalln(\"error fetching from Open311 endpoint\", err)\n\t}\n\n\t\/\/ load response body\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Fatal(\"error loading response body\", err)\n\t}\n\n\t\/\/ parse JSON and load into an array of Open311Request objects\n\terr = json.Unmarshal(body, &requests)\n\tif err != nil {\n\t\tlog.Fatal(\"error parsing JSON:\", err)\n\t}\n\n\tlog.Printf(\"received %d requests from Open311\", len(requests))\n\t\n\treturn requests\n}\n<commit_msg>poll on a 30 second interval<commit_after>package main\n\nimport (\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t_ \"github.com\/bmizerany\/pq\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"time\"\n)\n\nconst OPEN311_API_URI = \"http:\/\/311api.cityofchicago.org\/open311\/v2\/requests.json?extensions=true&page_size=500\"\n\ntype Open311Request struct {\n\tLat, Long float64\n\tWard, Police_district int\n\tService_request_id, Status, Service_name, Service_code, Agency_responsible, Address, Channel, Media_url string\n\tRequested_datetime, Updated_datetime string \/\/ FIXME: should these be proper time objects?\n\tExtended_attributes map[string]interface{}\n}\n\nfunc main() {\n\t\/\/ open database\n\tdb, err := sql.Open(\"postgres\", \"dbname=cwfy sslmode=disable\")\n\tif err != nil {\n\t\tlog.Fatal(\"Cannot open database connection\", err)\n\t}\n\tdefer db.Close()\n\n\tlast_run_at := time.Now()\n\n\tfor {\n\t\tswitch {\n\t\tcase time.Since(last_run_at) > (30 * time.Second):\n\t\t\tpoll_open311(db)\n\t\t\tlast_run_at = time.Now()\n\t\tdefault:\n\t\t\tlog.Print(\"sleeping for 10 seconds\")\n\t\t\ttime.Sleep(10 * time.Second)\n\t\t}\n\t}\n}\n\nfunc (req Open311Request) String() string {\n\t\/\/ pretty print SR information\n\treturn fmt.Sprintf(\"%s: %s at %s %f,%f, last update %s\", req.Service_request_id, req.Service_name, req.Address, req.Lat, req.Long, req.Updated_datetime)\n}\n\nfunc fetchRequests() (requests []Open311Request) {\n\tdb, err := sql.Open(\"postgres\", \"dbname=cwfy sslmode=disable\")\n\tif err != nil {\n\t\tlog.Fatal(\"Cannot open database connection\", err)\n\t}\n\tdefer db.Close()\n\n\t\/\/ find the most recent SR that we know about in the database\n\trows, err := db.Query(\"SELECT MAX(updated_datetime) FROM service_requests;\")\n\tif err != nil {\n\t\tlog.Fatal(\"error finding most recent service request\", err)\n\t}\n\n\tlast_updated_at := time.Now()\n\tfor rows.Next() {\n\t\tif err := rows.Scan(&last_updated_at); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tlog.Printf(\"most recent SR timestamp %s\", last_updated_at)\n\t}\n\n\t\/\/ janky hack to transform the last updated timestamp into\n\t\/\/ a format that plays nicely with the Open311 API\n\t\/\/ FIXME: there HAS to be a better way to handle this.\n\tformatted_date_string := last_updated_at.Format(time.RFC3339)\n\tformatted_date_string_with_tz := formatted_date_string[0:len(formatted_date_string)-1] + \"-0500\" \/\/ trunc the trailing 'Z' and tack on timezone\n\n\t\/\/ construct the request URI using base params and the proper time\n\topen311_api_endpoint := OPEN311_API_URI + \"&updated_after=\" + formatted_date_string_with_tz\n\n\tlog.Printf(\"fetching from %s\", open311_api_endpoint)\n\tresp, err := http.Get(open311_api_endpoint)\n\tdefer resp.Body.Close()\n\n\tif err != nil {\n\t\tlog.Fatalln(\"error fetching from Open311 endpoint\", err)\n\t}\n\n\t\/\/ load response body\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Fatal(\"error loading response body\", err)\n\t}\n\n\t\/\/ parse JSON and load into an array of Open311Request objects\n\terr = json.Unmarshal(body, &requests)\n\tif err != nil {\n\t\tlog.Fatal(\"error parsing JSON:\", err)\n\t}\n\n\tlog.Printf(\"received %d requests from Open311\", len(requests))\n\n\treturn requests\n}\n\nfunc poll_open311(db *sql.DB) {\n\t\/\/ load requests from open311\n\trequests := fetchRequests()\n\n\tfor _, request := range requests {\n\t\t\/\/ for each request, either create or update the\n\t\t\/\/ corresponding record in the database.\n\n\t\tif request.Service_request_id == \"\" {\n\t\t\tlog.Printf(\"Ignoring a request type %s because there is no SR number assigned\", request.Service_name)\n\t\t\tcontinue\n\t\t}\n\n\t\tinsert_stmt, err := db.Prepare(\"INSERT INTO service_requests(service_request_id,\" +\n\t\t\t\"status, service_name, service_code, agency_responsible, \" +\n\t\t\t\"address, requested_datetime, updated_datetime, lat, long,\" +\n\t\t\t\"ward, police_district, media_url, channel, duplicate, parent_service_request_id) \" +\n\t\t\t\"SELECT $1::varchar, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16 \" +\n\t\t\t\"WHERE NOT EXISTS (SELECT 1 FROM service_requests WHERE service_request_id = $1);\")\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"error preparing database insert statement\", err)\n\t\t}\n\n\t\tupdate_stmt, err := db.Prepare(\"UPDATE service_requests SET \" +\n\t\t\t\"status = $2, service_name = $3, service_code = $4, agency_responsible = $5, \" +\n\t\t\t\"address = $6, requested_datetime = $7, updated_datetime = $8, lat = $9, long = $10,\" +\n\t\t\t\"ward = $11, police_district = $12, media_url = $13, channel = $14, duplicate = $15, \" +\n\t\t\t\"parent_service_request_id = $16, updated_at = NOW() WHERE service_request_id = $1;\")\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"error preparing database update statement\", err)\n\t\t}\n\n\t\ttx, err := db.Begin()\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"error beginning transaction\", err)\n\t\t}\n\n\t\t_, err = tx.Stmt(update_stmt).Exec(request.Service_request_id,\n\t\t\trequest.Status,\n\t\t\trequest.Service_name,\n\t\t\trequest.Service_code,\n\t\t\trequest.Agency_responsible,\n\t\t\trequest.Address,\n\t\t\trequest.Requested_datetime,\n\t\t\trequest.Updated_datetime,\n\t\t\trequest.Lat,\n\t\t\trequest.Long,\n\t\t\trequest.Extended_attributes[\"ward\"],\n\t\t\trequest.Extended_attributes[\"police_district\"],\n\t\t\trequest.Media_url,\n\t\t\trequest.Extended_attributes[\"channel\"],\n\t\t\trequest.Extended_attributes[\"duplicate\"],\n\t\t\trequest.Extended_attributes[\"parent_service_request_id\"])\n\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"could not update %s because %s\", request.Service_request_id, err)\n\t\t}\n\n\t\t_, err = tx.Stmt(insert_stmt).Exec(request.Service_request_id,\n\t\t\trequest.Status,\n\t\t\trequest.Service_name,\n\t\t\trequest.Service_code,\n\t\t\trequest.Agency_responsible,\n\t\t\trequest.Address,\n\t\t\trequest.Requested_datetime,\n\t\t\trequest.Updated_datetime,\n\t\t\trequest.Lat,\n\t\t\trequest.Long,\n\t\t\trequest.Extended_attributes[\"ward\"],\n\t\t\trequest.Extended_attributes[\"police_district\"],\n\t\t\trequest.Media_url,\n\t\t\trequest.Extended_attributes[\"channel\"],\n\t\t\trequest.Extended_attributes[\"duplicate\"],\n\t\t\trequest.Extended_attributes[\"parent_service_request_id\"])\n\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"could not save %s because %s\", request.Service_request_id, err)\n\t\t} else {\n\t\t\tlog.Printf(\"saved SR %s\", request)\n\t\t}\n\n\t\terr = tx.Commit()\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"error closing transaction\", err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package sparta\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strings\"\n\t\"text\/template\"\n\n\tgocf \"github.com\/mweagle\/go-cloudformation\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ Utility function to marshal an interface\nfunc marshalInterface(item interface{}) interface{} {\n\tif item != nil {\n\t\treturn item\n\t}\n\treturn item\n}\n\n\/\/ Utility function to marshal an int\nfunc marshalInt(intVal int64) *gocf.IntegerExpr {\n\tif intVal != 0 {\n\t\treturn gocf.Integer(intVal)\n\t}\n\treturn nil\n}\n\n\/\/ Utility function to marshal a string\nfunc marshalString(stringVal string) *gocf.StringExpr {\n\tif stringVal != \"\" {\n\t\treturn gocf.String(stringVal)\n\t}\n\treturn nil\n}\n\nfunc marshalStringExpr(stringExpr gocf.Stringable) *gocf.StringExpr {\n\tif stringExpr != nil {\n\t\treturn stringExpr.String()\n\t}\n\treturn nil\n}\n\n\/\/ Utility function to marshal a string lsit\nfunc marshalStringList(stringVals []string) *gocf.StringListExpr {\n\tif len(stringVals) != 0 {\n\t\tstringableList := make([]gocf.Stringable, len(stringVals))\n\t\tfor eachIndex, eachStringVal := range stringVals {\n\t\t\tstringableList[eachIndex] = gocf.String(eachStringVal)\n\t\t}\n\t\treturn gocf.StringList(stringableList...)\n\t}\n\treturn nil\n}\n\n\/\/ Utility function to marshal a boolean\nfunc marshalBool(boolValue bool) *gocf.BoolExpr {\n\tif !boolValue {\n\t\treturn gocf.Bool(boolValue)\n\t}\n\treturn nil\n}\n\n\/\/ resourceOutputs is responsible for returning the conditional\n\/\/ set of CloudFormation outputs for a given resource type.\nfunc resourceOutputs(resourceName string,\n\tresource gocf.ResourceProperties,\n\tlogger *logrus.Logger) ([]string, error) {\n\n\toutputProps := []string{}\n\tswitch typedResource := resource.(type) {\n\tcase gocf.IAMRole:\n\t\t\/\/ NOP\n\tcase *gocf.DynamoDBTable:\n\t\tif typedResource.StreamSpecification != nil {\n\t\t\toutputProps = append(outputProps, \"StreamArn\")\n\t\t}\n\tcase gocf.DynamoDBTable:\n\t\tif typedResource.StreamSpecification != nil {\n\t\t\toutputProps = append(outputProps, \"StreamArn\")\n\t\t}\n\tcase gocf.KinesisStream,\n\t\t*gocf.KinesisStream:\n\t\toutputProps = append(outputProps, \"Arn\")\n\tcase gocf.Route53RecordSet,\n\t\t*gocf.Route53RecordSet:\n\t\t\/\/ NOP\n\tcase gocf.S3Bucket,\n\t\t*gocf.S3Bucket:\n\t\toutputProps = append(outputProps, \"DomainName\", \"WebsiteURL\")\n\tcase gocf.SNSTopic,\n\t\t*gocf.SNSTopic:\n\t\toutputProps = append(outputProps, \"TopicName\")\n\tcase gocf.SQSQueue,\n\t\t*gocf.SQSQueue:\n\t\toutputProps = append(outputProps, \"Arn\", \"QueueName\")\n\tdefault:\n\t\tlogger.WithFields(logrus.Fields{\n\t\t\t\"ResourceType\": fmt.Sprintf(\"%T\", typedResource),\n\t\t}).Warn(\"Discovery information for dependency not yet implemented\")\n\t}\n\treturn outputProps, nil\n}\n\nfunc newCloudFormationResource(resourceType string, logger *logrus.Logger) (gocf.ResourceProperties, error) {\n\tresProps := gocf.NewResourceByType(resourceType)\n\tif nil == resProps {\n\t\tlogger.WithFields(logrus.Fields{\n\t\t\t\"Type\": resourceType,\n\t\t}).Fatal(\"Failed to create CloudFormation CustomResource!\")\n\t\treturn nil, fmt.Errorf(\"unsupported CustomResourceType: %s\", resourceType)\n\t}\n\treturn resProps, nil\n}\n\ntype discoveryDataTemplate struct {\n\tResourceID string\n\tResourceType string\n\tResourceProperties string\n}\n\nvar discoveryDataForResourceDependency = `\n\t{\n\t\t\"ResourceID\" : \"<< .ResourceID >>\",\n\t\t\"ResourceRef\" : \"{\"Ref\":\"<< .ResourceID >>\"}\",\n\t\t\"ResourceType\" : \"<< .ResourceType >>\",\n\t\t\"Properties\" : {\n\t\t\t<< .ResourceProperties >>\n\t\t}\n\t}\n`\n\nfunc discoveryResourceInfoForDependency(cfTemplate *gocf.Template,\n\tlogicalResourceName string,\n\tlogger *logrus.Logger) ([]byte, error) {\n\n\titem, ok := cfTemplate.Resources[logicalResourceName]\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\tresourceOutputs, resourceOutputsErr := resourceOutputs(logicalResourceName,\n\t\titem.Properties,\n\t\tlogger)\n\tif resourceOutputsErr != nil {\n\t\treturn nil, resourceOutputsErr\n\t}\n\t\/\/ Template data\n\ttemplateData := &discoveryDataTemplate{\n\t\tResourceID: logicalResourceName,\n\t\tResourceType: item.Properties.CfnResourceType(),\n\t}\n\tquotedAttrs := make([]string, len(resourceOutputs))\n\tfor eachIndex, eachOutput := range resourceOutputs {\n\t\tquotedAttrs[eachIndex] = fmt.Sprintf(`\"%s\" :\"{ \"Fn::GetAtt\" : [ \"%s\", \"%s\" ] }\"`,\n\t\t\teachOutput,\n\t\t\tlogicalResourceName,\n\t\t\teachOutput)\n\t}\n\ttemplateData.ResourceProperties = strings.Join(quotedAttrs, \",\")\n\n\t\/\/ Create the data that can be stuffed into Environment\n\tdiscoveryTemplate, discoveryTemplateErr := template.New(\"discoveryResourceData\").\n\t\tDelims(\"<<\", \">>\").\n\t\tParse(discoveryDataForResourceDependency)\n\tif nil != discoveryTemplateErr {\n\t\treturn nil, discoveryTemplateErr\n\t}\n\n\tvar templateResults bytes.Buffer\n\tevalResultErr := discoveryTemplate.Execute(&templateResults, templateData)\n\treturn templateResults.Bytes(), evalResultErr\n}\nfunc safeAppendDependency(resource *gocf.Resource, dependencyName string) {\n\tif nil == resource.DependsOn {\n\t\tresource.DependsOn = []string{}\n\t}\n\tresource.DependsOn = append(resource.DependsOn, dependencyName)\n}\nfunc safeMetadataInsert(resource *gocf.Resource, key string, value interface{}) {\n\tif nil == resource.Metadata {\n\t\tresource.Metadata = make(map[string]interface{})\n\t}\n\tresource.Metadata[key] = value\n}\n<commit_msg>Be a bit more explicit<commit_after>package sparta\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strings\"\n\t\"text\/template\"\n\n\tgocf \"github.com\/mweagle\/go-cloudformation\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ Utility function to marshal an interface\nfunc marshalInterface(item interface{}) interface{} {\n\tif item != nil {\n\t\treturn item\n\t}\n\treturn nil\n}\n\n\/\/ Utility function to marshal an int\nfunc marshalInt(intVal int64) *gocf.IntegerExpr {\n\tif intVal != 0 {\n\t\treturn gocf.Integer(intVal)\n\t}\n\treturn nil\n}\n\n\/\/ Utility function to marshal a string\nfunc marshalString(stringVal string) *gocf.StringExpr {\n\tif stringVal != \"\" {\n\t\treturn gocf.String(stringVal)\n\t}\n\treturn nil\n}\n\nfunc marshalStringExpr(stringExpr gocf.Stringable) *gocf.StringExpr {\n\tif stringExpr != nil {\n\t\treturn stringExpr.String()\n\t}\n\treturn nil\n}\n\n\/\/ Utility function to marshal a string lsit\nfunc marshalStringList(stringVals []string) *gocf.StringListExpr {\n\tif len(stringVals) != 0 {\n\t\tstringableList := make([]gocf.Stringable, len(stringVals))\n\t\tfor eachIndex, eachStringVal := range stringVals {\n\t\t\tstringableList[eachIndex] = gocf.String(eachStringVal)\n\t\t}\n\t\treturn gocf.StringList(stringableList...)\n\t}\n\treturn nil\n}\n\n\/\/ Utility function to marshal a boolean\nfunc marshalBool(boolValue bool) *gocf.BoolExpr {\n\tif !boolValue {\n\t\treturn gocf.Bool(boolValue)\n\t}\n\treturn nil\n}\n\n\/\/ resourceOutputs is responsible for returning the conditional\n\/\/ set of CloudFormation outputs for a given resource type.\nfunc resourceOutputs(resourceName string,\n\tresource gocf.ResourceProperties,\n\tlogger *logrus.Logger) ([]string, error) {\n\n\toutputProps := []string{}\n\tswitch typedResource := resource.(type) {\n\tcase gocf.IAMRole:\n\t\t\/\/ NOP\n\tcase *gocf.DynamoDBTable:\n\t\tif typedResource.StreamSpecification != nil {\n\t\t\toutputProps = append(outputProps, \"StreamArn\")\n\t\t}\n\tcase gocf.DynamoDBTable:\n\t\tif typedResource.StreamSpecification != nil {\n\t\t\toutputProps = append(outputProps, \"StreamArn\")\n\t\t}\n\tcase gocf.KinesisStream,\n\t\t*gocf.KinesisStream:\n\t\toutputProps = append(outputProps, \"Arn\")\n\tcase gocf.Route53RecordSet,\n\t\t*gocf.Route53RecordSet:\n\t\t\/\/ NOP\n\tcase gocf.S3Bucket,\n\t\t*gocf.S3Bucket:\n\t\toutputProps = append(outputProps, \"DomainName\", \"WebsiteURL\")\n\tcase gocf.SNSTopic,\n\t\t*gocf.SNSTopic:\n\t\toutputProps = append(outputProps, \"TopicName\")\n\tcase gocf.SQSQueue,\n\t\t*gocf.SQSQueue:\n\t\toutputProps = append(outputProps, \"Arn\", \"QueueName\")\n\tdefault:\n\t\tlogger.WithFields(logrus.Fields{\n\t\t\t\"ResourceType\": fmt.Sprintf(\"%T\", typedResource),\n\t\t}).Warn(\"Discovery information for dependency not yet implemented\")\n\t}\n\treturn outputProps, nil\n}\n\nfunc newCloudFormationResource(resourceType string, logger *logrus.Logger) (gocf.ResourceProperties, error) {\n\tresProps := gocf.NewResourceByType(resourceType)\n\tif nil == resProps {\n\t\tlogger.WithFields(logrus.Fields{\n\t\t\t\"Type\": resourceType,\n\t\t}).Fatal(\"Failed to create CloudFormation CustomResource!\")\n\t\treturn nil, fmt.Errorf(\"unsupported CustomResourceType: %s\", resourceType)\n\t}\n\treturn resProps, nil\n}\n\ntype discoveryDataTemplate struct {\n\tResourceID string\n\tResourceType string\n\tResourceProperties string\n}\n\nvar discoveryDataForResourceDependency = `\n\t{\n\t\t\"ResourceID\" : \"<< .ResourceID >>\",\n\t\t\"ResourceRef\" : \"{\"Ref\":\"<< .ResourceID >>\"}\",\n\t\t\"ResourceType\" : \"<< .ResourceType >>\",\n\t\t\"Properties\" : {\n\t\t\t<< .ResourceProperties >>\n\t\t}\n\t}\n`\n\nfunc discoveryResourceInfoForDependency(cfTemplate *gocf.Template,\n\tlogicalResourceName string,\n\tlogger *logrus.Logger) ([]byte, error) {\n\n\titem, ok := cfTemplate.Resources[logicalResourceName]\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\tresourceOutputs, resourceOutputsErr := resourceOutputs(logicalResourceName,\n\t\titem.Properties,\n\t\tlogger)\n\tif resourceOutputsErr != nil {\n\t\treturn nil, resourceOutputsErr\n\t}\n\t\/\/ Template data\n\ttemplateData := &discoveryDataTemplate{\n\t\tResourceID: logicalResourceName,\n\t\tResourceType: item.Properties.CfnResourceType(),\n\t}\n\tquotedAttrs := make([]string, len(resourceOutputs))\n\tfor eachIndex, eachOutput := range resourceOutputs {\n\t\tquotedAttrs[eachIndex] = fmt.Sprintf(`\"%s\" :\"{ \"Fn::GetAtt\" : [ \"%s\", \"%s\" ] }\"`,\n\t\t\teachOutput,\n\t\t\tlogicalResourceName,\n\t\t\teachOutput)\n\t}\n\ttemplateData.ResourceProperties = strings.Join(quotedAttrs, \",\")\n\n\t\/\/ Create the data that can be stuffed into Environment\n\tdiscoveryTemplate, discoveryTemplateErr := template.New(\"discoveryResourceData\").\n\t\tDelims(\"<<\", \">>\").\n\t\tParse(discoveryDataForResourceDependency)\n\tif nil != discoveryTemplateErr {\n\t\treturn nil, discoveryTemplateErr\n\t}\n\n\tvar templateResults bytes.Buffer\n\tevalResultErr := discoveryTemplate.Execute(&templateResults, templateData)\n\treturn templateResults.Bytes(), evalResultErr\n}\nfunc safeAppendDependency(resource *gocf.Resource, dependencyName string) {\n\tif nil == resource.DependsOn {\n\t\tresource.DependsOn = []string{}\n\t}\n\tresource.DependsOn = append(resource.DependsOn, dependencyName)\n}\nfunc safeMetadataInsert(resource *gocf.Resource, key string, value interface{}) {\n\tif nil == resource.Metadata {\n\t\tresource.Metadata = make(map[string]interface{})\n\t}\n\tresource.Metadata[key] = value\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage workload\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n\n\t\"github.com\/juju\/errors\"\n\t\"gopkg.in\/juju\/charm.v5\"\n)\n\n\/\/ Payload holds information about a charm payload.\ntype Payload struct {\n\tcharm.PayloadClass\n\n\t\/\/ ID is a unique string identifying the payload to\n\t\/\/ the underlying technology.\n\tID string\n\n\t\/\/ Status is the Juju-level status of the workload.\n\tStatus string\n\n\t\/\/ Tags are tags associated with the payload.\n\tTags []string\n\n\t\/\/ Unit identifies the Juju unit associated with the payload.\n\tUnit string\n\n\t\/\/ Machine identifies the Juju machine associated with the payload.\n\tMachine string\n}\n\n\/\/ Info holds information about a workload that Juju needs. Iff the\n\/\/ workload has not been registered with Juju then the Status and\n\/\/ Details fields will be zero values.\n\/\/\n\/\/ A registered workload is one which has been defined in Juju (e.g. in\n\/\/ charm metadata) and subsequently was launched by Juju (e.g. in a\n\/\/ unit hook context).\ntype Info struct {\n\tcharm.Workload\n\n\t\/\/ Status is the Juju-level status of the workload.\n\tStatus Status\n\n\t\/\/ Tags is the set of tags associated with the workload.\n\tTags []string\n\n\t\/\/ Details is the information about the workload which the plugin provided.\n\tDetails Details\n}\n\n\/\/ ID returns a uniqueID for a workload (relative to the unit\/charm).\nfunc (info Info) ID() string {\n\treturn BuildID(info.Workload.Name, info.Details.ID)\n}\n\n\/\/ BuildID composes an ID from a class and id\nfunc BuildID(class, id string) string {\n\tif id == \"\" {\n\t\t\/\/ TODO(natefinch) remove this special case when we can be sure the ID\n\t\t\/\/ is never empty (and fix the tests).\n\t\treturn class\n\t}\n\treturn class + \"\/\" + id\n}\n\n\/\/ ParseID extracts the workload name and details ID from the provided string.\n\/\/ The format is expected to be name\/pluginID. If no separator is found, the\n\/\/ whole string is assumed to be the name.\nfunc ParseID(id string) (name, pluginID string) {\n\tparts := strings.SplitN(id, \"\/\", 2)\n\tif len(parts) == 2 {\n\t\treturn parts[0], parts[1]\n\t}\n\treturn id, \"\"\n}\n\n\/\/ Validate checks the workload info to ensure it is correct.\nfunc (info Info) Validate() error {\n\tif err := info.Workload.Validate(); err != nil {\n\t\treturn errors.NewNotValid(err, \"\")\n\t}\n\n\tif err := info.Status.Validate(); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tif err := info.Details.Validate(); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\treturn nil\n}\n\n\/\/ IsTracked indicates whether the represented workload has\n\/\/ is already being tracked by Juju.\nfunc (info Info) IsTracked() bool {\n\t\/\/ An untracked workload will not have the Status and Details\n\t\/\/ fields set (they will be zero values). Thus a trackeded\n\t\/\/ workload can be identified by non-zero values in those fields.\n\t\/\/ We use that fact here.\n\treturn !reflect.DeepEqual(info, Info{Workload: info.Workload})\n}\n\n\/\/ AsPayload converts the Info into a Payload.\nfunc (info Info) AsPayload() Payload {\n\ttags := make([]string, len(info.Tags))\n\tcopy(tags, info.Tags)\n\treturn Payload{\n\t\tPayloadClass: charm.PayloadClass{\n\t\t\tName: info.Name,\n\t\t\tType: info.Type,\n\t\t},\n\t\tID: info.Details.ID,\n\t\tStatus: info.Status.State,\n\t\tTags: tags,\n\t}\n}\n<commit_msg>Add Payload filtering.<commit_after>\/\/ Copyright 2015 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage workload\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n\n\t\"github.com\/juju\/errors\"\n\t\"gopkg.in\/juju\/charm.v5\"\n)\n\n\/\/ Payload holds information about a charm payload.\ntype Payload struct {\n\tcharm.PayloadClass\n\n\t\/\/ ID is a unique string identifying the payload to\n\t\/\/ the underlying technology.\n\tID string\n\n\t\/\/ Status is the Juju-level status of the workload.\n\tStatus string\n\n\t\/\/ Tags are tags associated with the payload.\n\tTags []string\n\n\t\/\/ Unit identifies the Juju unit associated with the payload.\n\tUnit string\n\n\t\/\/ Machine identifies the Juju machine associated with the payload.\n\tMachine string\n}\n\n\/\/ Info holds information about a workload that Juju needs. Iff the\n\/\/ workload has not been registered with Juju then the Status and\n\/\/ Details fields will be zero values.\n\/\/\n\/\/ A registered workload is one which has been defined in Juju (e.g. in\n\/\/ charm metadata) and subsequently was launched by Juju (e.g. in a\n\/\/ unit hook context).\ntype Info struct {\n\tcharm.Workload\n\n\t\/\/ Status is the Juju-level status of the workload.\n\tStatus Status\n\n\t\/\/ Tags is the set of tags associated with the workload.\n\tTags []string\n\n\t\/\/ Details is the information about the workload which the plugin provided.\n\tDetails Details\n}\n\n\/\/ ID returns a uniqueID for a workload (relative to the unit\/charm).\nfunc (info Info) ID() string {\n\treturn BuildID(info.Workload.Name, info.Details.ID)\n}\n\n\/\/ BuildID composes an ID from a class and id\nfunc BuildID(class, id string) string {\n\tif id == \"\" {\n\t\t\/\/ TODO(natefinch) remove this special case when we can be sure the ID\n\t\t\/\/ is never empty (and fix the tests).\n\t\treturn class\n\t}\n\treturn class + \"\/\" + id\n}\n\n\/\/ ParseID extracts the workload name and details ID from the provided string.\n\/\/ The format is expected to be name\/pluginID. If no separator is found, the\n\/\/ whole string is assumed to be the name.\nfunc ParseID(id string) (name, pluginID string) {\n\tparts := strings.SplitN(id, \"\/\", 2)\n\tif len(parts) == 2 {\n\t\treturn parts[0], parts[1]\n\t}\n\treturn id, \"\"\n}\n\n\/\/ Validate checks the workload info to ensure it is correct.\nfunc (info Info) Validate() error {\n\tif err := info.Workload.Validate(); err != nil {\n\t\treturn errors.NewNotValid(err, \"\")\n\t}\n\n\tif err := info.Status.Validate(); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tif err := info.Details.Validate(); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\treturn nil\n}\n\n\/\/ IsTracked indicates whether the represented workload has\n\/\/ is already being tracked by Juju.\nfunc (info Info) IsTracked() bool {\n\t\/\/ An untracked workload will not have the Status and Details\n\t\/\/ fields set (they will be zero values). Thus a trackeded\n\t\/\/ workload can be identified by non-zero values in those fields.\n\t\/\/ We use that fact here.\n\treturn !reflect.DeepEqual(info, Info{Workload: info.Workload})\n}\n\n\/\/ AsPayload converts the Info into a Payload.\nfunc (info Info) AsPayload() Payload {\n\ttags := make([]string, len(info.Tags))\n\tcopy(tags, info.Tags)\n\treturn Payload{\n\t\tPayloadClass: charm.PayloadClass{\n\t\t\tName: info.Name,\n\t\t\tType: info.Type,\n\t\t},\n\t\tID: info.Details.ID,\n\t\tStatus: info.Status.State,\n\t\tTags: tags,\n\t}\n}\n\n\/\/ Filter applies the provided predicates to the payloads and returns\n\/\/ only those that matched.\nfunc Filter(payloads []Payload, predicates ...func(Payload) bool) []Payload {\n\tvar results []Payload\n\tfor _, payload := range payloads {\n\t\tif matched := filterOne(payload, predicates); matched {\n\t\t\tresults = append(results, payload)\n\t\t}\n\t}\n\treturn results\n}\n\nfunc filterOne(payload Payload, predicates []func(Payload) bool) bool {\n\tif len(predicates) == 0 {\n\t\treturn true\n\t}\n\n\tfor _, pred := range predicates {\n\t\tif matched := pred(payload); matched {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ TODO(ericsnow) ParseEntityFilters is mostly something that can be generalized...\n\n\/\/ BuildPredicatesFor converts the provided patterns into predicates\n\/\/ that may be passed to Filter.\nfunc BuildPredicatesFor(patterns []string) ([]func(Payload) bool, error) {\n\tvar predicates []func(Payload) bool\n\tfor _, pattern := range patterns {\n\t\tpredicates = append(predicates, func(payload Payload) bool {\n\t\t\tswitch {\n\t\t\tcase strings.ToLower(payload.Name) == strings.ToLower(pattern):\n\t\t\t\treturn true\n\t\t\tcase strings.ToLower(payload.Type) == strings.ToLower(pattern):\n\t\t\t\treturn true\n\t\t\tcase strings.ToLower(payload.ID) == strings.ToLower(pattern):\n\t\t\t\treturn true\n\t\t\tcase strings.ToLower(payload.Status) == strings.ToLower(pattern):\n\t\t\t\treturn true\n\t\t\tcase strings.ToLower(payload.Unit) == strings.ToLower(pattern):\n\t\t\t\treturn true\n\t\t\tcase strings.ToLower(payload.Machine) == strings.ToLower(pattern):\n\t\t\t\treturn true\n\t\t\tdefault:\n\t\t\t\tfor _, tag := range payload.Tags {\n\t\t\t\t\tif strings.ToLower(tag) == strings.ToLower(pattern) {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn false\n\t\t})\n\t}\n\treturn predicates, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The Ebiten Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build example\n\npackage main\n\nimport (\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/hajimehoshi\/ebiten\"\n\t\"github.com\/hajimehoshi\/ebiten\/ebitenutil\"\n)\n\nvar (\n\ttext = \"Type on the keyboard:\\n\"\n\tcounter = 0\n)\n\nfunc update(screen *ebiten.Image) error {\n\ttext += string(ebiten.InputChars())\n\tif ebiten.IsKeyPressed(ebiten.KeyEnter) && !strings.HasSuffix(text, \"\\n\") {\n\t\ttext += \"\\n\"\n\t}\n\tcounter++\n\n\tif ebiten.IsRunningSlowly() {\n\t\treturn nil\n\t}\n\n\tt := text\n\tif counter%60 < 30 {\n\t\tt += \"_\"\n\t}\n\tebitenutil.DebugPrint(screen, t)\n\treturn nil\n}\n\nfunc main() {\n\tif err := ebiten.Run(update, 320, 240, 2.0, \"Runes (Ebiten Demo)\"); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<commit_msg>examples\/runes: Trim the text if it has too many lines<commit_after>\/\/ Copyright 2017 The Ebiten Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build example\n\npackage main\n\nimport (\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com\/hajimehoshi\/ebiten\"\n\t\"github.com\/hajimehoshi\/ebiten\/ebitenutil\"\n)\n\nvar (\n\ttext = \"Type on the keyboard:\\n\"\n\tcounter = 0\n)\n\nfunc update(screen *ebiten.Image) error {\n\ttext += string(ebiten.InputChars())\n\tss := strings.Split(text, \"\\n\")\n\tif len(ss) > 10 {\n\t\ttext = strings.Join(ss[len(ss)-10:], \"\\n\")\n\t}\n\tif ebiten.IsKeyPressed(ebiten.KeyEnter) && !strings.HasSuffix(text, \"\\n\") {\n\t\ttext += \"\\n\"\n\t}\n\n\tcounter++\n\n\tif ebiten.IsRunningSlowly() {\n\t\treturn nil\n\t}\n\n\tt := text\n\tif counter%60 < 30 {\n\t\tt += \"_\"\n\t}\n\tebitenutil.DebugPrint(screen, t)\n\treturn nil\n}\n\nfunc main() {\n\tif err := ebiten.Run(update, 320, 240, 2.0, \"Runes (Ebiten Demo)\"); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package core\n\nimport (\n\t\"errors\"\n\t\"log\"\n\n\t\"github.com\/nikhan\/go-fetch\"\n)\n\n\/\/ NewBlock creates a new block from a spec\nfunc NewBlock(s Spec) *Block {\n\tvar in []Route\n\tvar out []Output\n\n\tfor _, v := range s.Inputs {\n\n\t\tq, _ := fetch.Parse(\".\")\n\t\tin = append(in, Route{\n\t\t\tName: v.Name,\n\t\t\tValue: q,\n\t\t\tC: make(chan Message),\n\t\t})\n\t}\n\n\tfor _, v := range s.Outputs {\n\t\tout = append(out, Output{\n\t\t\tName: v.Name,\n\t\t\tConnections: make(map[Connection]struct{}),\n\t\t})\n\t}\n\n\treturn &Block{\n\t\tstate: BlockState{\n\t\t\tmake(MessageMap),\n\t\t\tmake(MessageMap),\n\t\t\tmake(MessageMap),\n\t\t\tmake(Manifest),\n\t\t\tfalse,\n\t\t},\n\t\trouting: BlockRouting{\n\t\t\tInputs: in,\n\t\t\tOutputs: out,\n\t\t\tInterruptChan: make(chan Interrupt),\n\t\t\tShared: SharedStore{\n\t\t\t\tType: s.Shared,\n\t\t\t},\n\t\t},\n\t\tkernel: s.Kernel,\n\t}\n}\n\n\/\/ suture: the main routine the block runs\nfunc (b *Block) Serve() {\n\tfor {\n\t\tvar interrupt Interrupt\n\n\t\tb.routing.RLock()\n\t\tfor {\n\t\t\tinterrupt = b.receive()\n\t\t\tif interrupt != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tinterrupt = b.process()\n\t\t\tif interrupt != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tinterrupt = b.broadcast()\n\t\t\tif interrupt != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tb.crank()\n\t\t}\n\t\tb.routing.RUnlock()\n\t\tb.routing.Lock()\n\t\tif ok := interrupt(); !ok {\n\t\t\treturn\n\t\t}\n\t\tb.routing.Unlock()\n\t}\n}\n\nfunc (b *Block) exportRoute(id RouteID) (*Route, error) {\n\tif int(id) >= len(b.routing.Inputs) || int(id) < 0 {\n\t\treturn nil, errors.New(\"index out of range\")\n\t}\n\n\t\/\/ copy Value on route export\n\tvar v interface{}\n\tvar q *fetch.Query\n\tvar ok bool\n\n\tq, ok = b.routing.Inputs[id].Value.(*fetch.Query)\n\tif !ok {\n\t\tv = Copy(b.routing.Inputs[id].Value)\n\t} else {\n\t\tt := *q\n\t\tv = &t\n\t}\n\n\treturn &Route{\n\t\tValue: v,\n\t\tC: b.routing.Inputs[id].C,\n\t\tName: b.routing.Inputs[id].Name,\n\t}, nil\n}\n\n\/\/ Input returns the specfied Route\nfunc (b *Block) GetRoute(id RouteID) (*Route, error) {\n\tb.routing.RLock()\n\tr, err := b.exportRoute(id)\n\tb.routing.RUnlock()\n\treturn r, err\n}\n\nfunc (b *Block) GetRoutes() []Route {\n\tb.routing.RLock()\n\tre := make([]Route, len(b.routing.Inputs), len(b.routing.Inputs))\n\tfor i, _ := range b.routing.Inputs {\n\t\tr, _ := b.exportRoute(RouteID(i))\n\t\tre[i] = *r\n\t}\n\tb.routing.RUnlock()\n\treturn re\n}\n\n\/\/ Outputs return a list of manifest pairs for the block\nfunc (b *Block) GetOutputs() []Output {\n\tb.routing.RLock()\n\tm := make([]Output, len(b.routing.Outputs), len(b.routing.Outputs))\n\tfor id, out := range b.routing.Outputs {\n\t\tm[id] = out\n\t}\n\tb.routing.RUnlock()\n\treturn m\n}\n\nfunc (b *Block) GetStore() Store {\n\tb.routing.RLock()\n\tv := b.routing.Shared.Store\n\tb.routing.RUnlock()\n\treturn v\n}\n\n\/\/ sets a store for the block. can be set to nil\nfunc (b *Block) SetStore(s Store) {\n\tb.routing.InterruptChan <- func() bool {\n\t\tb.routing.Shared.Store = s\n\t\treturn true\n\t}\n}\n\n\/\/ RouteValue sets the route to always be the specified value\nfunc (b *Block) SetRoute(id RouteID, v interface{}) error {\n\treturnVal := make(chan error, 1)\n\tb.routing.InterruptChan <- func() bool {\n\t\tif int(id) < 0 || int(id) >= len(b.routing.Inputs) {\n\t\t\treturnVal <- errors.New(\"input out of range\")\n\t\t\treturn true\n\t\t}\n\n\t\tb.routing.Inputs[id].Value = v\n\t\treturnVal <- nil\n\t\treturn true\n\t}\n\treturn <-returnVal\n}\n\n\/\/ Connect connects a Route, specified by ID, to a connection\nfunc (b *Block) Connect(id RouteID, c Connection) error {\n\treturnVal := make(chan error, 1)\n\tb.routing.InterruptChan <- func() bool {\n\t\tif int(id) < 0 || int(id) >= len(b.routing.Outputs) {\n\t\t\treturnVal <- errors.New(\"output out of range\")\n\t\t\treturn true\n\t\t}\n\n\t\tif _, ok := b.routing.Outputs[id].Connections[c]; ok {\n\t\t\treturnVal <- errors.New(\"this connection already exists on this output\")\n\t\t\treturn true\n\t\t}\n\n\t\tb.routing.Outputs[id].Connections[c] = struct{}{}\n\t\treturnVal <- nil\n\t\treturn true\n\t}\n\treturn <-returnVal\n}\n\n\/\/ Disconnect removes a connection from a Route\nfunc (b *Block) Disconnect(id RouteID, c Connection) error {\n\treturnVal := make(chan error, 1)\n\tb.routing.InterruptChan <- func() bool {\n\t\tif int(id) < 0 || int(id) >= len(b.routing.Outputs) {\n\t\t\treturnVal <- errors.New(\"output out of range\")\n\t\t\treturn true\n\t\t}\n\n\t\tif _, ok := b.routing.Outputs[id].Connections[c]; !ok {\n\t\t\treturnVal <- errors.New(\"connection does not exist\")\n\t\t\treturn true\n\t\t}\n\n\t\tdelete(b.routing.Outputs[id].Connections, c)\n\t\treturnVal <- nil\n\t\treturn true\n\t}\n\treturn <-returnVal\n}\n\n\/\/ suture: stop the block\nfunc (b *Block) Stop() {\n\tb.routing.InterruptChan <- func() bool {\n\t\treturn false\n\t}\n}\n\n\/\/ wait and listen for all kernel inputs to be filled.\nfunc (b *Block) receive() Interrupt {\n\tvar err error\n\tfor id, input := range b.routing.Inputs {\n\t\t\/\/if we have already received a value on this input, skip.\n\t\tif _, ok := b.state.inputValues[RouteID(id)]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ if there is a value set for this input, place value on\n\t\t\/\/ buffer and set it in map.\n\t\tquery, ok := input.Value.(*fetch.Query)\n\t\tif !ok {\n\t\t\tb.state.inputValues[RouteID(id)] = Copy(input.Value)\n\t\t\tcontinue\n\t\t}\n\n\t\tselect {\n\t\tcase m := <-input.C:\n\t\t\tb.state.inputValues[RouteID(id)], err = fetch.Run(query, m)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\tcase f := <-b.routing.InterruptChan:\n\t\t\treturn f\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ run kernel on inputs, produce outputs\nfunc (b *Block) process() Interrupt {\n\tif b.state.Processed == true {\n\t\treturn nil\n\t}\n\n\t\/\/ if this kernel relies on an external shared state then we need to\n\t\/\/ block until an interrupt connects us to a shared external state.\n\tif b.routing.Shared.Type != NONE && b.routing.Shared.Store == nil {\n\t\tselect {\n\t\tcase f := <-b.routing.InterruptChan:\n\t\t\treturn f\n\t\t}\n\t}\n\n\t\/\/ we should only be able to get here if\n\t\/\/ - we don't need an shared state\n\t\/\/ - we have an external shared state and it has been attached\n\tif b.routing.Shared.Type != NONE {\n\t\tb.routing.Shared.Store.Lock()\n\t}\n\n\t\/\/ run the kernel\n\tinterrupt := b.kernel(b.state.inputValues,\n\t\tb.state.outputValues,\n\t\tb.state.internalValues,\n\t\tb.routing.Shared.Store,\n\t\tb.routing.InterruptChan)\n\n\tif interrupt != nil {\n\t\tif b.routing.Shared.Type != NONE {\n\t\t\tb.routing.Shared.Store.Unlock()\n\t\t}\n\t\treturn interrupt\n\t}\n\n\tif b.routing.Shared.Type != NONE {\n\t\tb.routing.Shared.Store.Unlock()\n\t}\n\n\tb.state.Processed = true\n\n\treturn nil\n}\n\n\/\/ broadcast the kernel output to all connections on all outputs.\nfunc (b *Block) broadcast() Interrupt {\n\tfor id, out := range b.routing.Outputs {\n\t\t\/\/ if there no connection for this output then wait until there\n\t\t\/\/ is one. that means we have to wait for an interrupt.\n\t\tif len(out.Connections) == 0 {\n\t\t\tselect {\n\t\t\tcase f := <-b.routing.InterruptChan:\n\t\t\t\treturn f\n\t\t\t}\n\t\t}\n\t\tfor c, _ := range out.Connections {\n\t\t\t\/\/ check to see if we have delivered a message to this\n\t\t\t\/\/ connection for this block crank. if we have, then\n\t\t\t\/\/ skip this delivery.\n\t\t\tm := ManifestPair{id, c}\n\t\t\tif _, ok := b.state.manifest[m]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase c <- b.state.outputValues[RouteID(id)]:\n\t\t\t\t\/\/ set that we have delivered the message.\n\t\t\t\tb.state.manifest[m] = struct{}{}\n\t\t\tcase f := <-b.routing.InterruptChan:\n\t\t\t\treturn f\n\t\t\t}\n\t\t}\n\n\t}\n\treturn nil\n}\n\n\/\/ cleanup all block state for this crank of the block\nfunc (b *Block) crank() {\n\tfor k, _ := range b.state.inputValues {\n\t\tdelete(b.state.inputValues, k)\n\t}\n\tfor k, _ := range b.state.outputValues {\n\t\tdelete(b.state.outputValues, k)\n\t}\n\tfor k, _ := range b.state.manifest {\n\t\tdelete(b.state.manifest, k)\n\t}\n\tb.state.Processed = false\n}\n<commit_msg>fixing value copy<commit_after>package core\n\nimport (\n\t\"errors\"\n\t\"log\"\n\n\t\"github.com\/nikhan\/go-fetch\"\n)\n\n\/\/ NewBlock creates a new block from a spec\nfunc NewBlock(s Spec) *Block {\n\tvar in []Route\n\tvar out []Output\n\n\tfor _, v := range s.Inputs {\n\n\t\tq, _ := fetch.Parse(\".\")\n\t\tin = append(in, Route{\n\t\t\tName: v.Name,\n\t\t\tValue: q,\n\t\t\tC: make(chan Message),\n\t\t})\n\t}\n\n\tfor _, v := range s.Outputs {\n\t\tout = append(out, Output{\n\t\t\tName: v.Name,\n\t\t\tConnections: make(map[Connection]struct{}),\n\t\t})\n\t}\n\n\treturn &Block{\n\t\tstate: BlockState{\n\t\t\tmake(MessageMap),\n\t\t\tmake(MessageMap),\n\t\t\tmake(MessageMap),\n\t\t\tmake(Manifest),\n\t\t\tfalse,\n\t\t},\n\t\trouting: BlockRouting{\n\t\t\tInputs: in,\n\t\t\tOutputs: out,\n\t\t\tInterruptChan: make(chan Interrupt),\n\t\t\tShared: SharedStore{\n\t\t\t\tType: s.Shared,\n\t\t\t},\n\t\t},\n\t\tkernel: s.Kernel,\n\t}\n}\n\n\/\/ suture: the main routine the block runs\nfunc (b *Block) Serve() {\n\tfor {\n\t\tvar interrupt Interrupt\n\n\t\tb.routing.RLock()\n\t\tfor {\n\t\t\tinterrupt = b.receive()\n\t\t\tif interrupt != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tinterrupt = b.process()\n\t\t\tif interrupt != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tinterrupt = b.broadcast()\n\t\t\tif interrupt != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tb.crank()\n\t\t}\n\t\tb.routing.RUnlock()\n\t\tb.routing.Lock()\n\t\tif ok := interrupt(); !ok {\n\t\t\treturn\n\t\t}\n\t\tb.routing.Unlock()\n\t}\n}\n\nfunc (b *Block) exportRoute(id RouteID) (*Route, error) {\n\tif int(id) >= len(b.routing.Inputs) || int(id) < 0 {\n\t\treturn nil, errors.New(\"index out of range\")\n\t}\n\n\tvar v interface{}\n\tswitch n := b.routing.Inputs[id].Value.(type) {\n\tcase *fetch.Query:\n\t\t\/\/ yuck copy\n\t\tv, _ = fetch.Parse(n.String())\n\tdefault:\n\t\tv = Copy(n)\n\t}\n\n\treturn &Route{\n\t\tValue: v,\n\t\tC: b.routing.Inputs[id].C,\n\t\tName: b.routing.Inputs[id].Name,\n\t}, nil\n}\n\n\/\/ Input returns the specfied Route\nfunc (b *Block) GetRoute(id RouteID) (*Route, error) {\n\tb.routing.RLock()\n\tr, err := b.exportRoute(id)\n\tb.routing.RUnlock()\n\treturn r, err\n}\n\nfunc (b *Block) GetRoutes() []Route {\n\tb.routing.RLock()\n\tre := make([]Route, len(b.routing.Inputs), len(b.routing.Inputs))\n\tfor i, _ := range b.routing.Inputs {\n\t\tr, _ := b.exportRoute(RouteID(i))\n\t\tre[i] = *r\n\t}\n\tb.routing.RUnlock()\n\treturn re\n}\n\n\/\/ Outputs return a list of manifest pairs for the block\nfunc (b *Block) GetOutputs() []Output {\n\tb.routing.RLock()\n\tm := make([]Output, len(b.routing.Outputs), len(b.routing.Outputs))\n\tfor id, out := range b.routing.Outputs {\n\t\tm[id] = out\n\t}\n\tb.routing.RUnlock()\n\treturn m\n}\n\nfunc (b *Block) GetStore() Store {\n\tb.routing.RLock()\n\tv := b.routing.Shared.Store\n\tb.routing.RUnlock()\n\treturn v\n}\n\n\/\/ sets a store for the block. can be set to nil\nfunc (b *Block) SetStore(s Store) {\n\tb.routing.InterruptChan <- func() bool {\n\t\tb.routing.Shared.Store = s\n\t\treturn true\n\t}\n}\n\n\/\/ RouteValue sets the route to always be the specified value\nfunc (b *Block) SetRoute(id RouteID, v interface{}) error {\n\treturnVal := make(chan error, 1)\n\tb.routing.InterruptChan <- func() bool {\n\t\tif int(id) < 0 || int(id) >= len(b.routing.Inputs) {\n\t\t\treturnVal <- errors.New(\"input out of range\")\n\t\t\treturn true\n\t\t}\n\n\t\tb.routing.Inputs[id].Value = v\n\t\treturnVal <- nil\n\t\treturn true\n\t}\n\treturn <-returnVal\n}\n\n\/\/ Connect connects a Route, specified by ID, to a connection\nfunc (b *Block) Connect(id RouteID, c Connection) error {\n\treturnVal := make(chan error, 1)\n\tb.routing.InterruptChan <- func() bool {\n\t\tif int(id) < 0 || int(id) >= len(b.routing.Outputs) {\n\t\t\treturnVal <- errors.New(\"output out of range\")\n\t\t\treturn true\n\t\t}\n\n\t\tif _, ok := b.routing.Outputs[id].Connections[c]; ok {\n\t\t\treturnVal <- errors.New(\"this connection already exists on this output\")\n\t\t\treturn true\n\t\t}\n\n\t\tb.routing.Outputs[id].Connections[c] = struct{}{}\n\t\treturnVal <- nil\n\t\treturn true\n\t}\n\treturn <-returnVal\n}\n\n\/\/ Disconnect removes a connection from a Route\nfunc (b *Block) Disconnect(id RouteID, c Connection) error {\n\treturnVal := make(chan error, 1)\n\tb.routing.InterruptChan <- func() bool {\n\t\tif int(id) < 0 || int(id) >= len(b.routing.Outputs) {\n\t\t\treturnVal <- errors.New(\"output out of range\")\n\t\t\treturn true\n\t\t}\n\n\t\tif _, ok := b.routing.Outputs[id].Connections[c]; !ok {\n\t\t\treturnVal <- errors.New(\"connection does not exist\")\n\t\t\treturn true\n\t\t}\n\n\t\tdelete(b.routing.Outputs[id].Connections, c)\n\t\treturnVal <- nil\n\t\treturn true\n\t}\n\treturn <-returnVal\n}\n\n\/\/ suture: stop the block\nfunc (b *Block) Stop() {\n\tb.routing.InterruptChan <- func() bool {\n\t\treturn false\n\t}\n}\n\n\/\/ wait and listen for all kernel inputs to be filled.\nfunc (b *Block) receive() Interrupt {\n\tvar err error\n\tfor id, input := range b.routing.Inputs {\n\t\t\/\/if we have already received a value on this input, skip.\n\t\tif _, ok := b.state.inputValues[RouteID(id)]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ if there is a value set for this input, place value on\n\t\t\/\/ buffer and set it in map.\n\t\tquery, ok := input.Value.(*fetch.Query)\n\t\tif !ok {\n\t\t\tb.state.inputValues[RouteID(id)] = Copy(input.Value)\n\t\t\tcontinue\n\t\t}\n\n\t\tselect {\n\t\tcase m := <-input.C:\n\t\t\tb.state.inputValues[RouteID(id)], err = fetch.Run(query, m)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\tcase f := <-b.routing.InterruptChan:\n\t\t\treturn f\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ run kernel on inputs, produce outputs\nfunc (b *Block) process() Interrupt {\n\tif b.state.Processed == true {\n\t\treturn nil\n\t}\n\n\t\/\/ if this kernel relies on an external shared state then we need to\n\t\/\/ block until an interrupt connects us to a shared external state.\n\tif b.routing.Shared.Type != NONE && b.routing.Shared.Store == nil {\n\t\tselect {\n\t\tcase f := <-b.routing.InterruptChan:\n\t\t\treturn f\n\t\t}\n\t}\n\n\t\/\/ we should only be able to get here if\n\t\/\/ - we don't need an shared state\n\t\/\/ - we have an external shared state and it has been attached\n\tif b.routing.Shared.Type != NONE {\n\t\tb.routing.Shared.Store.Lock()\n\t}\n\n\t\/\/ run the kernel\n\tinterrupt := b.kernel(b.state.inputValues,\n\t\tb.state.outputValues,\n\t\tb.state.internalValues,\n\t\tb.routing.Shared.Store,\n\t\tb.routing.InterruptChan)\n\n\tif interrupt != nil {\n\t\tif b.routing.Shared.Type != NONE {\n\t\t\tb.routing.Shared.Store.Unlock()\n\t\t}\n\t\treturn interrupt\n\t}\n\n\tif b.routing.Shared.Type != NONE {\n\t\tb.routing.Shared.Store.Unlock()\n\t}\n\n\tb.state.Processed = true\n\n\treturn nil\n}\n\n\/\/ broadcast the kernel output to all connections on all outputs.\nfunc (b *Block) broadcast() Interrupt {\n\tfor id, out := range b.routing.Outputs {\n\t\t\/\/ if there no connection for this output then wait until there\n\t\t\/\/ is one. that means we have to wait for an interrupt.\n\t\tif len(out.Connections) == 0 {\n\t\t\tselect {\n\t\t\tcase f := <-b.routing.InterruptChan:\n\t\t\t\treturn f\n\t\t\t}\n\t\t}\n\t\tfor c, _ := range out.Connections {\n\t\t\t\/\/ check to see if we have delivered a message to this\n\t\t\t\/\/ connection for this block crank. if we have, then\n\t\t\t\/\/ skip this delivery.\n\t\t\tm := ManifestPair{id, c}\n\t\t\tif _, ok := b.state.manifest[m]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase c <- b.state.outputValues[RouteID(id)]:\n\t\t\t\t\/\/ set that we have delivered the message.\n\t\t\t\tb.state.manifest[m] = struct{}{}\n\t\t\tcase f := <-b.routing.InterruptChan:\n\t\t\t\treturn f\n\t\t\t}\n\t\t}\n\n\t}\n\treturn nil\n}\n\n\/\/ cleanup all block state for this crank of the block\nfunc (b *Block) crank() {\n\tfor k, _ := range b.state.inputValues {\n\t\tdelete(b.state.inputValues, k)\n\t}\n\tfor k, _ := range b.state.outputValues {\n\t\tdelete(b.state.outputValues, k)\n\t}\n\tfor k, _ := range b.state.manifest {\n\t\tdelete(b.state.manifest, k)\n\t}\n\tb.state.Processed = false\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Debug functions.\npackage core\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/DanielRenne\/GoCore\/core\/extensions\"\n\t\"github.com\/DanielRenne\/GoCore\/core\/serverSettings\"\n\t\"github.com\/davidrenne\/reflections\"\n\t\"github.com\/go-errors\/errors\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"runtime\/debug\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype core_debug struct{}\n\nvar core_logger = log.New(os.Stdout, \"\", 0)\n\nvar TransactionLog string\nvar Debug = core_debug{}\nvar Logger = core_logger\nvar TransactionLogMutex *sync.RWMutex\n\nfunc init() {\n\tTransactionLogMutex = &sync.RWMutex{}\n}\n\n\/\/ Nop is a dummy function that can be called in source files where\n\/\/ other debug functions are constantly added and removed.\n\/\/ That way import \"github.com\/ungerik\/go-start\/debug\" won't cause an error when\n\/\/ no other debug function is currently used.\n\/\/ Arbitrary objects can be passed as arguments to avoid \"declared and not used\"\n\/\/ error messages when commenting code out and in.\n\/\/ The result is a nil interface{} dummy value.\nfunc (self *core_debug) Nop(dummiesIn ...interface{}) (dummyOut interface{}) {\n\treturn nil\n}\n\nfunc (self *core_debug) CallStackInfo(skip int) (info string) {\n\tpc, file, line, ok := runtime.Caller(skip)\n\tif ok {\n\t\tfuncName := runtime.FuncForPC(pc).Name()\n\t\tinfo += fmt.Sprintf(\"In function %s()\", funcName)\n\t}\n\tfor i := 0; ok; i++ {\n\t\tinfo += fmt.Sprintf(\"\\n%s:%d\", file, line)\n\t\t_, file, line, ok = runtime.Caller(skip + i)\n\t}\n\treturn info\n}\n\nfunc (self *core_debug) PrintCallStack() {\n\tdebug.PrintStack()\n}\n\nfunc (self *core_debug) LogCallStack() {\n\tlog.Print(self.Stack())\n}\n\nfunc (self *core_debug) Stack() string {\n\treturn string(debug.Stack())\n}\n\nfunc (self *core_debug) formatValue(value interface{}) string {\n\treturn fmt.Sprintf(\"\\n Type: %T\\n Value: %v\\nGo Syntax: %#v\", value, value, value)\n}\n\nfunc (self *core_debug) formatCallstack(skip int) string {\n\treturn fmt.Sprintf(\"\\nCallstack: %s\", self.CallStackInfo(skip+1))\n}\n\nfunc (self *core_debug) FormatSkip(skip int, value interface{}) string {\n\treturn self.formatValue(value) + self.formatCallstack(skip+1)\n}\n\nfunc (self *core_debug) Format(value interface{}) string {\n\treturn self.FormatSkip(2, value)\n}\n\nfunc (self *core_debug) DumpQuiet(values ...interface{}) {\n\t\/\/ uncomment below to find your callers to quiet\n\tself.Print(\"Silently not dumping \" + extensions.IntToString(len(values)) + \" values\")\n\t\/\/Logger.Println(\"DumpQuiet has \" + extensions.IntToString(len(values)) + \" parameters called\")\n\t\/\/Logger.Println(\"\")\n\t\/\/self.ThrowAndPrintError()\n}\n\nfunc IsZeroOfUnderlyingType(x interface{}) bool {\n\treturn reflect.DeepEqual(x, reflect.Zero(reflect.TypeOf(x)).Interface())\n}\n\nfunc IsZeroOfUnderlyingType2(x interface{}) bool {\n\treturn x == reflect.Zero(reflect.TypeOf(x)).Interface()\n}\n\nfunc (self *core_debug) HandleError(err error) (s string) {\n\tif err != nil {\n\t\t\/\/ notice that we're using 1, so it will actually log the where\n\t\t\/\/ the error happened, 0 = this function, we don't want that.\n\t\t_, fn, line, _ := runtime.Caller(1)\n\t\tfileNameParts := strings.Split(fn, \"\/\")\n\t\treturn fmt.Sprintf(\" Error Info: %s Line %d. ErrorType: %v\", fileNameParts[len(fileNameParts)-1], line, err)\n\t}\n\treturn \"\"\n}\n\nfunc (self *core_debug) Dump(valuesOriginal ...interface{}) {\n\tt := time.Now()\n\tl := \"!!!!!!!!!!!!! DEBUG \" + t.String() + \"!!!!!!!!!!!!!\\n\\n\"\n\tLogger.Println(l)\n\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\tfor _, value := range valuesOriginal {\n\t\tl := self.DumpBase(value)\n\t\tLogger.Print(l)\n\t\tserverSettings.WebConfigMutex.RLock()\n\t\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\t\tTransactionLogMutex.Lock()\n\t\t\tTransactionLog += l\n\t\t\tTransactionLogMutex.Unlock()\n\t\t}\n\t\tserverSettings.WebConfigMutex.RUnlock()\n\t}\n\tl = self.ThrowAndPrintError()\n\tLogger.Print(l)\n\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\tl = \"!!!!!!!!!!!!! ENDDEBUG \" + t.String() + \"!!!!!!!!!!!!!\"\n\tLogger.Println(l)\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n}\n\nfunc (self *core_debug) GetDump(valuesOriginal ...interface{}) (output string) {\n\tfor _, value := range valuesOriginal {\n\t\toutput += self.DumpBase(value)\n\t}\n\t\/\/output += self.ThrowAndPrintError()\n\treturn output\n}\n\nfunc (self *core_debug) GetDumpWithInfo(valuesOriginal ...interface{}) (output string) {\n\n\tt := time.Now()\n\tl := \"\\n!!!!!!!!!!!!! DEBUG \" + t.String() + \"!!!!!!!!!!!!!\\n\\n\"\n\toutput += l\n\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\n\tfor _, value := range valuesOriginal {\n\t\toutput += self.DumpBase(value)\n\t}\n\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += output\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\n\tl = self.ThrowAndPrintError()\n\toutput += l\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\tl = \"!!!!!!!!!!!!! ENDDEBUG \" + t.String() + \"!!!!!!!!!!!!!\\n\"\n\toutput += l\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\treturn output\n}\n\nfunc (self *core_debug) DumpBase(values ...interface{}) (output string) {\n\t\/\/golog \"github.com\/DanielRenne\/GoCore\/core\/log\"\n\t\/\/defer golog.TimeTrack(time.Now(), \"Dump\")\n\tvar jsonString string\n\tvar err error\n\tvar structKeys []string\n\tif Logger != nil {\n\t\tfor _, value := range values {\n\t\t\tisAllJSON := true\n\t\t\tvar kind string\n\t\t\tkind = strings.TrimSpace(fmt.Sprintf(\"%T\", value))\n\t\t\tvar pieces = strings.Split(kind, \" \")\n\t\t\tif pieces[0] == \"struct\" || strings.Index(pieces[0], \"model.\") != -1 || strings.Index(pieces[0], \"viewModel.\") != -1 {\n\t\t\t\tkind = reflections.ReflectKind(value)\n\t\t\t\tstructKeys, err = reflections.FieldsDeep(value)\n\t\t\t\tif err == nil {\n\t\t\t\t\tfor _, field := range structKeys {\n\t\t\t\t\t\tjsonString, err = reflections.GetFieldTag(value, field, \"json\")\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tisAllJSON = false\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif jsonString == \"\" {\n\t\t\t\t\t\t\tisAllJSON = false\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tisAllJSON = false\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tisAllJSON = false\n\t\t\t}\n\t\t\tif isAllJSON || kind == \"map\" || kind == \"bson.M\" || kind == \"slice\" {\n\t\t\t\tvar rawBytes []byte\n\t\t\t\trawBytes, err = json.MarshalIndent(value, \"\", \"\\t\")\n\t\t\t\tif err == nil {\n\t\t\t\t\toutput += fmt.Sprintf(\"#### %-39s ####\\n%+v\", kind, string(rawBytes[:]))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif strings.TrimSpace(kind) == \"string\" {\n\t\t\t\t\tvar stringVal = value.(string)\n\t\t\t\t\tposition := strings.Index(stringVal, \"Desc->\")\n\t\t\t\t\tif position == -1 {\n\t\t\t\t\t\toutput += \"#### + \" + kind + \" \" + stringVal + \"[len:\" + extensions.IntToString(len(stringVal)) + \"]####\" + \"\\n\"\n\t\t\t\t\t\t\/\/ for _, tmp := range strings.Split(stringVal, \"\\\\n\") {\n\t\t\t\t\t\t\/\/ \toutput += \"\\n\" + tmp\n\t\t\t\t\t\t\/\/ }\n\t\t\t\t\t} else {\n\t\t\t\t\t\toutput += stringVal[6:] + \" --> \"\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\toutput += fmt.Sprintf(\"#### %-39s ####\\n%+v\", kind, value)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn output\n}\n\nfunc (self *core_debug) ThrowAndPrintError() (output string) {\n\n\tserverSettings.WebConfigMutex.RLock()\n\tok := serverSettings.WebConfig.Application.CoreDebugStackTrace\n\tserverSettings.WebConfigMutex.RUnlock()\n\tif ok {\n\t\toutput += \"\\n\"\n\t\terrorInfo := self.ThrowError()\n\t\tstack := strings.Split(errorInfo.ErrorStack(), \"\\n\")\n\t\tfilePathSplit := strings.Split(stack[7], \".go:\")\n\t\tfilePaths := strings.Split(filePathSplit[0], \"\/\")\n\t\tfileName := filePaths[len(filePaths)-1] + \".go\"\n\t\tlineParts := strings.Split(filePathSplit[1], \"(\")\n\t\tlineNumber := strings.TrimSpace(lineParts[0])\n\n\t\tfinalLineOfCode := strings.TrimSpace(stack[8])\n\n\t\tif strings.Index(finalLineOfCode, \"Desc->Caller for Query\") == -1 {\n\t\t\toutput += \"\\nDump Caller (\" + fileName + \":\" + lineNumber + \"):\"\n\t\t\toutput += \"\\n---------------\"\n\t\t\t\/\/output += strings.Join(stack, \",\")\n\t\t\toutput += \"\\n golines ==> \" + strings.TrimSpace(stack[6])\n\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[7])\n\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[8])\n\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[9])\n\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[10])\n\t\t\toutput += \"\\n---------------\"\n\t\t\toutput += \"\\n\"\n\t\t\toutput += \"\\n\"\n\t\t}\n\t}\n\treturn output\n}\n\nfunc (self *core_debug) ThrowError() *errors.Error {\n\treturn errors.Errorf(\"Debug Dump\")\n}\n\nfunc (self *core_debug) Print(values ...interface{}) {\n\tif Logger != nil {\n\t\tLogger.Print(values...)\n\t}\n}\n\nfunc (self *core_debug) Printf(format string, values ...interface{}) {\n\tif Logger != nil {\n\t\tLogger.Printf(format, values...)\n\t}\n}\n<commit_msg>line endings<commit_after>\/\/ Debug functions.\npackage core\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/DanielRenne\/GoCore\/core\/extensions\"\n\t\"github.com\/DanielRenne\/GoCore\/core\/serverSettings\"\n\t\"github.com\/davidrenne\/reflections\"\n\t\"github.com\/go-errors\/errors\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"runtime\/debug\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype core_debug struct{}\n\nvar core_logger = log.New(os.Stdout, \"\", 0)\n\nvar TransactionLog string\nvar Debug = core_debug{}\nvar Logger = core_logger\nvar TransactionLogMutex *sync.RWMutex\n\nfunc init() {\n\tTransactionLogMutex = &sync.RWMutex{}\n}\n\n\/\/ Nop is a dummy function that can be called in source files where\n\/\/ other debug functions are constantly added and removed.\n\/\/ That way import \"github.com\/ungerik\/go-start\/debug\" won't cause an error when\n\/\/ no other debug function is currently used.\n\/\/ Arbitrary objects can be passed as arguments to avoid \"declared and not used\"\n\/\/ error messages when commenting code out and in.\n\/\/ The result is a nil interface{} dummy value.\nfunc (self *core_debug) Nop(dummiesIn ...interface{}) (dummyOut interface{}) {\n\treturn nil\n}\n\nfunc (self *core_debug) CallStackInfo(skip int) (info string) {\n\tpc, file, line, ok := runtime.Caller(skip)\n\tif ok {\n\t\tfuncName := runtime.FuncForPC(pc).Name()\n\t\tinfo += fmt.Sprintf(\"In function %s()\", funcName)\n\t}\n\tfor i := 0; ok; i++ {\n\t\tinfo += fmt.Sprintf(\"\\n%s:%d\", file, line)\n\t\t_, file, line, ok = runtime.Caller(skip + i)\n\t}\n\treturn info\n}\n\nfunc (self *core_debug) PrintCallStack() {\n\tdebug.PrintStack()\n}\n\nfunc (self *core_debug) LogCallStack() {\n\tlog.Print(self.Stack())\n}\n\nfunc (self *core_debug) Stack() string {\n\treturn string(debug.Stack())\n}\n\nfunc (self *core_debug) formatValue(value interface{}) string {\n\treturn fmt.Sprintf(\"\\n Type: %T\\n Value: %v\\nGo Syntax: %#v\", value, value, value)\n}\n\nfunc (self *core_debug) formatCallstack(skip int) string {\n\treturn fmt.Sprintf(\"\\nCallstack: %s\", self.CallStackInfo(skip+1))\n}\n\nfunc (self *core_debug) FormatSkip(skip int, value interface{}) string {\n\treturn self.formatValue(value) + self.formatCallstack(skip+1)\n}\n\nfunc (self *core_debug) Format(value interface{}) string {\n\treturn self.FormatSkip(2, value)\n}\n\nfunc (self *core_debug) DumpQuiet(values ...interface{}) {\n\t\/\/ uncomment below to find your callers to quiet\n\tself.Print(\"Silently not dumping \" + extensions.IntToString(len(values)) + \" values\")\n\t\/\/Logger.Println(\"DumpQuiet has \" + extensions.IntToString(len(values)) + \" parameters called\")\n\t\/\/Logger.Println(\"\")\n\t\/\/self.ThrowAndPrintError()\n}\n\nfunc IsZeroOfUnderlyingType(x interface{}) bool {\n\treturn reflect.DeepEqual(x, reflect.Zero(reflect.TypeOf(x)).Interface())\n}\n\nfunc IsZeroOfUnderlyingType2(x interface{}) bool {\n\treturn x == reflect.Zero(reflect.TypeOf(x)).Interface()\n}\n\nfunc (self *core_debug) HandleError(err error) (s string) {\n\tif err != nil {\n\t\t\/\/ notice that we're using 1, so it will actually log the where\n\t\t\/\/ the error happened, 0 = this function, we don't want that.\n\t\t_, fn, line, _ := runtime.Caller(1)\n\t\tfileNameParts := strings.Split(fn, \"\/\")\n\t\treturn fmt.Sprintf(\" Error Info: %s Line %d. ErrorType: %v\", fileNameParts[len(fileNameParts)-1], line, err)\n\t}\n\treturn \"\"\n}\n\nfunc (self *core_debug) Dump(valuesOriginal ...interface{}) {\n\tt := time.Now()\n\tl := \"!!!!!!!!!!!!! DEBUG \" + t.String() + \"!!!!!!!!!!!!!\\n\\n\"\n\tLogger.Println(l)\n\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\tfor _, value := range valuesOriginal {\n\t\tl := self.DumpBase(value)\n\t\tLogger.Print(l)\n\t\tserverSettings.WebConfigMutex.RLock()\n\t\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\t\tTransactionLogMutex.Lock()\n\t\t\tTransactionLog += l\n\t\t\tTransactionLogMutex.Unlock()\n\t\t}\n\t\tserverSettings.WebConfigMutex.RUnlock()\n\t}\n\tl = self.ThrowAndPrintError()\n\tLogger.Print(l)\n\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\tl = \"!!!!!!!!!!!!! ENDDEBUG \" + t.String() + \"!!!!!!!!!!!!!\"\n\tLogger.Println(l)\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n}\n\nfunc (self *core_debug) GetDump(valuesOriginal ...interface{}) (output string) {\n\tfor _, value := range valuesOriginal {\n\t\toutput += self.DumpBase(value)\n\t}\n\t\/\/output += self.ThrowAndPrintError()\n\treturn output\n}\n\nfunc (self *core_debug) GetDumpWithInfo(valuesOriginal ...interface{}) (output string) {\n\n\tt := time.Now()\n\tl := \"\\n!!!!!!!!!!!!! DEBUG \" + t.String() + \"!!!!!!!!!!!!!\\n\\n\"\n\toutput += l\n\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\n\tfor _, value := range valuesOriginal {\n\t\toutput += self.DumpBase(value) + \"\\n\"\n\t}\n\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += output\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\n\tl = self.ThrowAndPrintError()\n\toutput += l\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\tl = \"!!!!!!!!!!!!! ENDDEBUG \" + t.String() + \"!!!!!!!!!!!!!\\n\"\n\toutput += l\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\treturn output\n}\n\nfunc (self *core_debug) DumpBase(values ...interface{}) (output string) {\n\t\/\/golog \"github.com\/DanielRenne\/GoCore\/core\/log\"\n\t\/\/defer golog.TimeTrack(time.Now(), \"Dump\")\n\tvar jsonString string\n\tvar err error\n\tvar structKeys []string\n\tif Logger != nil {\n\t\tfor _, value := range values {\n\t\t\tisAllJSON := true\n\t\t\tvar kind string\n\t\t\tkind = strings.TrimSpace(fmt.Sprintf(\"%T\", value))\n\t\t\tvar pieces = strings.Split(kind, \" \")\n\t\t\tif pieces[0] == \"struct\" || strings.Index(pieces[0], \"model.\") != -1 || strings.Index(pieces[0], \"viewModel.\") != -1 {\n\t\t\t\tkind = reflections.ReflectKind(value)\n\t\t\t\tstructKeys, err = reflections.FieldsDeep(value)\n\t\t\t\tif err == nil {\n\t\t\t\t\tfor _, field := range structKeys {\n\t\t\t\t\t\tjsonString, err = reflections.GetFieldTag(value, field, \"json\")\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tisAllJSON = false\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif jsonString == \"\" {\n\t\t\t\t\t\t\tisAllJSON = false\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tisAllJSON = false\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tisAllJSON = false\n\t\t\t}\n\t\t\tif isAllJSON || kind == \"map\" || kind == \"bson.M\" || kind == \"slice\" {\n\t\t\t\tvar rawBytes []byte\n\t\t\t\trawBytes, err = json.MarshalIndent(value, \"\", \"\\t\")\n\t\t\t\tif err == nil {\n\t\t\t\t\toutput += fmt.Sprintf(\"#### %-39s ####\\n%+v\", kind, string(rawBytes[:]))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif strings.TrimSpace(kind) == \"string\" {\n\t\t\t\t\tvar stringVal = value.(string)\n\t\t\t\t\tposition := strings.Index(stringVal, \"Desc->\")\n\t\t\t\t\tif position == -1 {\n\t\t\t\t\t\toutput += \"#### + \" + kind + \" \" + stringVal + \"[len:\" + extensions.IntToString(len(stringVal)) + \"]####\" + \"\\n\"\n\t\t\t\t\t\t\/\/ for _, tmp := range strings.Split(stringVal, \"\\\\n\") {\n\t\t\t\t\t\t\/\/ \toutput += \"\\n\" + tmp\n\t\t\t\t\t\t\/\/ }\n\t\t\t\t\t} else {\n\t\t\t\t\t\toutput += stringVal[6:] + \" --> \"\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\toutput += fmt.Sprintf(\"#### %-39s ####\\n%+v\", kind, value)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn output\n}\n\nfunc (self *core_debug) ThrowAndPrintError() (output string) {\n\n\tserverSettings.WebConfigMutex.RLock()\n\tok := serverSettings.WebConfig.Application.CoreDebugStackTrace\n\tserverSettings.WebConfigMutex.RUnlock()\n\tif ok {\n\t\toutput += \"\\n\"\n\t\terrorInfo := self.ThrowError()\n\t\tstack := strings.Split(errorInfo.ErrorStack(), \"\\n\")\n\t\tfilePathSplit := strings.Split(stack[7], \".go:\")\n\t\tfilePaths := strings.Split(filePathSplit[0], \"\/\")\n\t\tfileName := filePaths[len(filePaths)-1] + \".go\"\n\t\tlineParts := strings.Split(filePathSplit[1], \"(\")\n\t\tlineNumber := strings.TrimSpace(lineParts[0])\n\n\t\tfinalLineOfCode := strings.TrimSpace(stack[8])\n\n\t\tif strings.Index(finalLineOfCode, \"Desc->Caller for Query\") == -1 {\n\t\t\toutput += \"\\nDump Caller (\" + fileName + \":\" + lineNumber + \"):\"\n\t\t\toutput += \"\\n---------------\"\n\t\t\t\/\/output += strings.Join(stack, \",\")\n\t\t\toutput += \"\\n golines ==> \" + strings.TrimSpace(stack[6])\n\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[7])\n\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[8])\n\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[9])\n\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[10])\n\t\t\toutput += \"\\n---------------\"\n\t\t\toutput += \"\\n\"\n\t\t\toutput += \"\\n\"\n\t\t}\n\t}\n\treturn output\n}\n\nfunc (self *core_debug) ThrowError() *errors.Error {\n\treturn errors.Errorf(\"Debug Dump\")\n}\n\nfunc (self *core_debug) Print(values ...interface{}) {\n\tif Logger != nil {\n\t\tLogger.Print(values...)\n\t}\n}\n\nfunc (self *core_debug) Printf(format string, values ...interface{}) {\n\tif Logger != nil {\n\t\tLogger.Printf(format, values...)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package core\n\nimport context \"golang.org\/x\/net\/context\"\n\ntype ServerCodec interface {\n\tReadRequestHeader(context.Context, *Request) error\n\tReadRequestBody(context.Context, interface{}) error\n\t\/\/ WriteResponse must be safe for concurrent use by multiple goroutines.\n\tWriteResponse(context.Context, *Response, interface{}) error\n\n\tClose() error\n}\n<commit_msg>重写原生rpc框架中的clientCodec接口<commit_after>package core\n\nimport context \"golang.org\/x\/net\/context\"\n\n\ntype ServerCodec interface {\n\tReadRequestHeader(context.Context, *Request) error\n\tReadRequestBody(context.Context, interface{}) error\n\t\/\/ WriteResponse must be safe for concurrent use by multiple goroutines.\n\tWriteResponse(context.Context, *Response, interface{}) error\n\n\tClose() error\n}\n\n\ntype ClientCodec interface {\n\t\/\/ WriteRequest must be safe for concurrent use by multiple goroutines.\n\tWriteRequest(context.Context, *Request, interface{}) error\n\tReadResponseHeader(context.Context, *Response) error\n\tReadResponseBody(context.Context, interface{}) error\n\n\tClose() error\n}<|endoftext|>"} {"text":"<commit_before>\/\/ Debug functions.\npackage core\n\nimport (\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"runtime\/debug\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/DanielRenne\/GoCore\/core\/extensions\"\n\t\"github.com\/DanielRenne\/GoCore\/core\/serverSettings\"\n\t\"github.com\/davidrenne\/reflections\"\n\t\"github.com\/go-errors\/errors\"\n)\n\ntype core_debug struct{}\n\nvar core_logger = log.New(os.Stdout, \"\", 0)\n\nvar TransactionLog string\nvar Debug = core_debug{}\nvar Logger = core_logger\nvar TransactionLogMutex *sync.RWMutex\n\nfunc init() {\n\tTransactionLogMutex = &sync.RWMutex{}\n}\n\n\/\/ Nop is a dummy function that can be called in source files where\n\/\/ other debug functions are constantly added and removed.\n\/\/ That way import \"github.com\/ungerik\/go-start\/debug\" won't cause an error when\n\/\/ no other debug function is currently used.\n\/\/ Arbitrary objects can be passed as arguments to avoid \"declared and not used\"\n\/\/ error messages when commenting code out and in.\n\/\/ The result is a nil interface{} dummy value.\nfunc (self *core_debug) Nop(dummiesIn ...interface{}) (dummyOut interface{}) {\n\treturn nil\n}\n\nfunc (self *core_debug) CallStackInfo(skip int) (info string) {\n\tpc, file, line, ok := runtime.Caller(skip)\n\tif ok {\n\t\tfuncName := runtime.FuncForPC(pc).Name()\n\t\tinfo += fmt.Sprintf(\"In function %s()\", funcName)\n\t}\n\tfor i := 0; ok; i++ {\n\t\tinfo += fmt.Sprintf(\"\\n%s:%d\", file, line)\n\t\t_, file, line, ok = runtime.Caller(skip + i)\n\t}\n\treturn info\n}\n\nfunc (self *core_debug) PrintCallStack() {\n\tdebug.PrintStack()\n}\n\nfunc (self *core_debug) LogCallStack() {\n\tlog.Print(self.Stack())\n}\n\nfunc (self *core_debug) Stack() string {\n\treturn string(debug.Stack())\n}\n\nfunc (self *core_debug) formatValue(value interface{}) string {\n\treturn fmt.Sprintf(\"\\n Type: %T\\n Value: %v\\nGo Syntax: %#v\", value, value, value)\n}\n\nfunc (self *core_debug) formatCallstack(skip int) string {\n\treturn fmt.Sprintf(\"\\nCallstack: %s\", self.CallStackInfo(skip+1))\n}\n\nfunc (self *core_debug) FormatSkip(skip int, value interface{}) string {\n\treturn self.formatValue(value) + self.formatCallstack(skip+1)\n}\n\nfunc (self *core_debug) Format(value interface{}) string {\n\treturn self.FormatSkip(2, value)\n}\n\nfunc (self *core_debug) DumpQuiet(values ...interface{}) {\n\t\/\/ uncomment below to find your callers to quiet\n\tself.Print(\"Silently not dumping \" + extensions.IntToString(len(values)) + \" values\")\n\t\/\/Logger.Println(\"DumpQuiet has \" + extensions.IntToString(len(values)) + \" parameters called\")\n\t\/\/Logger.Println(\"\")\n\t\/\/self.ThrowAndPrintError()\n}\n\nfunc IsZeroOfUnderlyingType(x interface{}) bool {\n\treturn reflect.DeepEqual(x, reflect.Zero(reflect.TypeOf(x)).Interface())\n}\n\nfunc IsZeroOfUnderlyingType2(x interface{}) bool {\n\treturn x == reflect.Zero(reflect.TypeOf(x)).Interface()\n}\n\nfunc (self *core_debug) HandleError(err error) (s string) {\n\tif err != nil {\n\t\t\/\/ notice that we're using 1, so it will actually log the where\n\t\t\/\/ the error happened, 0 = this function, we don't want that.\n\t\t_, fn, line, _ := runtime.Caller(1)\n\t\tfileNameParts := strings.Split(fn, \"\/\")\n\t\treturn fmt.Sprintf(\" Error Info: %s Line %d. ErrorType: %v\", fileNameParts[len(fileNameParts)-1], line, err)\n\t}\n\treturn \"\"\n}\n\nfunc (self *core_debug) ErrLineAndFile(err error) (s string) {\n\tif err != nil {\n\t\t\/\/ notice that we're using 1, so it will actually log the where\n\t\t\/\/ the error happened, 0 = this function, we don't want that.\n\t\t_, fn, line, _ := runtime.Caller(1)\n\t\tfileNameParts := strings.Split(fn, \"\/\")\n\t\treturn fmt.Sprintf(\"%s Line %d\", fileNameParts[len(fileNameParts)-1], line)\n\t}\n\treturn \"\"\n}\n\nfunc (self *core_debug) Dump(valuesOriginal ...interface{}) {\n\tt := time.Now()\n\tl := \"!!!!!!!!!!!!! DEBUG \" + t.Format(\"2006-01-02 15:04:05.000000\") + \"!!!!!!!!!!!!!\\n\\n\"\n\tLogger.Println(l)\n\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\tfor _, value := range valuesOriginal {\n\t\tl := self.DumpBase(value)\n\t\tLogger.Print(l)\n\t\tserverSettings.WebConfigMutex.RLock()\n\t\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\t\tTransactionLogMutex.Lock()\n\t\t\tTransactionLog += l\n\t\t\tTransactionLogMutex.Unlock()\n\t\t}\n\t\tserverSettings.WebConfigMutex.RUnlock()\n\t}\n\tl = self.ThrowAndPrintError()\n\tLogger.Print(l)\n\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\tl = \"!!!!!!!!!!!!! ENDDEBUG \" + t.Format(\"2006-01-02 15:04:05.000000\") + \"!!!!!!!!!!!!!\"\n\tLogger.Println(l)\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n}\n\nfunc (self *core_debug) GetDump(valuesOriginal ...interface{}) (output string) {\n\tfor _, value := range valuesOriginal {\n\t\toutput += self.DumpBase(value)\n\t}\n\t\/\/output += self.ThrowAndPrintError()\n\treturn output\n}\n\nfunc (self *core_debug) GetDumpWithInfo(valuesOriginal ...interface{}) (output string) {\n\tt := time.Now()\n\treturn self.GetDumpWithInfoAndTimeString(t.String(), valuesOriginal...)\n}\n\nfunc (self *core_debug) GetDumpWithInfoAndTimeString(timeStr string, valuesOriginal ...interface{}) (output string) {\n\tl := \"\\n!!!!!!!!!!!!! DEBUG \" + timeStr + \"!!!!!!!!!!!!!\\n\\n\"\n\toutput += l\n\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\n\tfor _, value := range valuesOriginal {\n\t\toutput += self.DumpBase(value) + \"\\n\"\n\t}\n\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += output\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\n\tl = self.ThrowAndPrintError()\n\toutput += l\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\tl = \"!!!!!!!!!!!!! ENDDEBUG \" + timeStr + \"!!!!!!!!!!!!!\\n\"\n\toutput += l\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\treturn output\n}\n\nfunc (self *core_debug) DumpBase(values ...interface{}) (output string) {\n\t\/\/golog \"github.com\/DanielRenne\/GoCore\/core\/log\"\n\t\/\/defer golog.TimeTrack(time.Now(), \"Dump\")\n\tvar jsonString string\n\tvar err error\n\tvar structKeys []string\n\tif Logger != nil {\n\t\tfor _, value := range values {\n\t\t\tisAllJSON := true\n\t\t\tvar kind string\n\t\t\tkind = strings.TrimSpace(fmt.Sprintf(\"%T\", value))\n\t\t\tvar pieces = strings.Split(kind, \" \")\n\t\t\tif pieces[0] == \"struct\" || strings.Index(pieces[0], \"model.\") != -1 || strings.Index(pieces[0], \"viewModel.\") != -1 {\n\t\t\t\tkind = reflections.ReflectKind(value)\n\t\t\t\tstructKeys, err = reflections.FieldsDeep(value)\n\t\t\t\tif err == nil {\n\t\t\t\t\tfor _, field := range structKeys {\n\t\t\t\t\t\tjsonString, err = reflections.GetFieldTag(value, field, \"json\")\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tisAllJSON = false\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif jsonString == \"\" {\n\t\t\t\t\t\t\tisAllJSON = false\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tisAllJSON = false\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tisAllJSON = false\n\t\t\t}\n\t\t\tif isAllJSON || kind == \"map\" || kind == \"bson.M\" || kind == \"slice\" {\n\t\t\t\tvar rawBytes []byte\n\t\t\t\trawBytes, err = json.MarshalIndent(value, \"\", \"\\t\")\n\t\t\t\tif err == nil {\n\t\t\t\t\tif kind == \"slice\" || strings.Index(kind, \"[]\") != -1 {\n\t\t\t\t\t\tvalReflected := reflect.ValueOf(value)\n\t\t\t\t\t\toutput += fmt.Sprintf(\"#### %-39s [len:%s]####\\n%+v\", kind, extensions.IntToString(valReflected.Len()), string(rawBytes[:]))\n\t\t\t\t\t} else {\n\t\t\t\t\t\toutput += fmt.Sprintf(\"#### %-39s ####\\n%+v\", kind, string(rawBytes[:]))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif strings.TrimSpace(kind) == \"string\" {\n\t\t\t\t\tvar stringVal = value.(string)\n\t\t\t\t\tposition := strings.Index(stringVal, \"Desc->\")\n\t\t\t\t\tif position == -1 {\n\t\t\t\t\t\tif !extensions.IsPrintable(stringVal) {\n\t\t\t\t\t\t\tstringVal = hex.Dump([]byte(stringVal))\n\t\t\t\t\t\t}\n\t\t\t\t\t\tvalReflected := reflect.ValueOf(value)\n\t\t\t\t\t\toutput += fmt.Sprintf(\"#### %-39s [len:%s]####\\n%s\", kind, extensions.IntToString(valReflected.Len()), stringVal)\n\t\t\t\t\t} else {\n\t\t\t\t\t\toutput += stringVal[6:] + \" --> \"\n\t\t\t\t\t}\n\t\t\t\t} else if strings.Index(kind, \"[]\") != -1 || strings.TrimSpace(kind) == \"array\" {\n\t\t\t\t\tvalReflected := reflect.ValueOf(value)\n\t\t\t\t\toutput += fmt.Sprintf(\"#### %-39s [len:%s]####\\n%+v\", kind, extensions.IntToString(valReflected.Len()), value)\n\t\t\t\t} else {\n\t\t\t\t\toutput += fmt.Sprintf(\"#### %-39s ####\\n%+v\", kind, value)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn output\n}\n\nfunc (self *core_debug) ThrowAndPrintError() (output string) {\n\n\tserverSettings.WebConfigMutex.RLock()\n\tok := serverSettings.WebConfig.Application.CoreDebugStackTrace\n\tserverSettings.WebConfigMutex.RUnlock()\n\tif ok {\n\t\toutput += \"\\n\"\n\t\terrorInfo := self.ThrowError()\n\t\tstack := strings.Split(errorInfo.ErrorStack(), \"\\n\")\n\t\tif len(stack) >= 8 {\n\t\t\toutput += \"\\nDump Caller:\"\n\t\t\toutput += \"\\n---------------\"\n\t\t\t\/\/output += strings.Join(stack, \",\")\n\t\t\toutput += \"\\n golines ==> \" + strings.TrimSpace(stack[6])\n\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[7])\n\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[8])\n\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[9])\n\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[10])\n\t\t\tif len(stack) >= 12 {\n\t\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[11])\n\t\t\t}\n\t\t\tif len(stack) >= 13 {\n\t\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[12])\n\t\t\t}\n\t\t\tif len(stack) >= 14 {\n\t\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[13])\n\t\t\t}\n\t\t\tif len(stack) >= 15 {\n\t\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[14])\n\t\t\t}\n\t\t\tif len(stack) >= 16 {\n\t\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[15])\n\t\t\t}\n\t\t\toutput += \"\\n---------------\"\n\t\t\toutput += \"\\n\"\n\t\t\toutput += \"\\n\"\n\t\t}\n\t}\n\treturn output\n}\n\nfunc (self *core_debug) ThrowError() *errors.Error {\n\treturn errors.Errorf(\"Debug Dump\")\n}\n\nfunc (self *core_debug) Print(values ...interface{}) {\n\tif Logger != nil {\n\t\tLogger.Print(values...)\n\t}\n}\n\nfunc (self *core_debug) Printf(format string, values ...interface{}) {\n\tif Logger != nil {\n\t\tLogger.Printf(format, values...)\n\t}\n}\n<commit_msg>Only call Len() if interfcace{} is a slice in first two bytes<commit_after>\/\/ Debug functions.\npackage core\n\nimport (\n\t\"encoding\/hex\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"runtime\/debug\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/DanielRenne\/GoCore\/core\/extensions\"\n\t\"github.com\/DanielRenne\/GoCore\/core\/serverSettings\"\n\t\"github.com\/davidrenne\/reflections\"\n\t\"github.com\/go-errors\/errors\"\n)\n\ntype core_debug struct{}\n\nvar core_logger = log.New(os.Stdout, \"\", 0)\n\nvar TransactionLog string\nvar Debug = core_debug{}\nvar Logger = core_logger\nvar TransactionLogMutex *sync.RWMutex\n\nfunc init() {\n\tTransactionLogMutex = &sync.RWMutex{}\n}\n\n\/\/ Nop is a dummy function that can be called in source files where\n\/\/ other debug functions are constantly added and removed.\n\/\/ That way import \"github.com\/ungerik\/go-start\/debug\" won't cause an error when\n\/\/ no other debug function is currently used.\n\/\/ Arbitrary objects can be passed as arguments to avoid \"declared and not used\"\n\/\/ error messages when commenting code out and in.\n\/\/ The result is a nil interface{} dummy value.\nfunc (self *core_debug) Nop(dummiesIn ...interface{}) (dummyOut interface{}) {\n\treturn nil\n}\n\nfunc (self *core_debug) CallStackInfo(skip int) (info string) {\n\tpc, file, line, ok := runtime.Caller(skip)\n\tif ok {\n\t\tfuncName := runtime.FuncForPC(pc).Name()\n\t\tinfo += fmt.Sprintf(\"In function %s()\", funcName)\n\t}\n\tfor i := 0; ok; i++ {\n\t\tinfo += fmt.Sprintf(\"\\n%s:%d\", file, line)\n\t\t_, file, line, ok = runtime.Caller(skip + i)\n\t}\n\treturn info\n}\n\nfunc (self *core_debug) PrintCallStack() {\n\tdebug.PrintStack()\n}\n\nfunc (self *core_debug) LogCallStack() {\n\tlog.Print(self.Stack())\n}\n\nfunc (self *core_debug) Stack() string {\n\treturn string(debug.Stack())\n}\n\nfunc (self *core_debug) formatValue(value interface{}) string {\n\treturn fmt.Sprintf(\"\\n Type: %T\\n Value: %v\\nGo Syntax: %#v\", value, value, value)\n}\n\nfunc (self *core_debug) formatCallstack(skip int) string {\n\treturn fmt.Sprintf(\"\\nCallstack: %s\", self.CallStackInfo(skip+1))\n}\n\nfunc (self *core_debug) FormatSkip(skip int, value interface{}) string {\n\treturn self.formatValue(value) + self.formatCallstack(skip+1)\n}\n\nfunc (self *core_debug) Format(value interface{}) string {\n\treturn self.FormatSkip(2, value)\n}\n\nfunc (self *core_debug) DumpQuiet(values ...interface{}) {\n\t\/\/ uncomment below to find your callers to quiet\n\tself.Print(\"Silently not dumping \" + extensions.IntToString(len(values)) + \" values\")\n\t\/\/Logger.Println(\"DumpQuiet has \" + extensions.IntToString(len(values)) + \" parameters called\")\n\t\/\/Logger.Println(\"\")\n\t\/\/self.ThrowAndPrintError()\n}\n\nfunc IsZeroOfUnderlyingType(x interface{}) bool {\n\treturn reflect.DeepEqual(x, reflect.Zero(reflect.TypeOf(x)).Interface())\n}\n\nfunc IsZeroOfUnderlyingType2(x interface{}) bool {\n\treturn x == reflect.Zero(reflect.TypeOf(x)).Interface()\n}\n\nfunc (self *core_debug) HandleError(err error) (s string) {\n\tif err != nil {\n\t\t\/\/ notice that we're using 1, so it will actually log the where\n\t\t\/\/ the error happened, 0 = this function, we don't want that.\n\t\t_, fn, line, _ := runtime.Caller(1)\n\t\tfileNameParts := strings.Split(fn, \"\/\")\n\t\treturn fmt.Sprintf(\" Error Info: %s Line %d. ErrorType: %v\", fileNameParts[len(fileNameParts)-1], line, err)\n\t}\n\treturn \"\"\n}\n\nfunc (self *core_debug) ErrLineAndFile(err error) (s string) {\n\tif err != nil {\n\t\t\/\/ notice that we're using 1, so it will actually log the where\n\t\t\/\/ the error happened, 0 = this function, we don't want that.\n\t\t_, fn, line, _ := runtime.Caller(1)\n\t\tfileNameParts := strings.Split(fn, \"\/\")\n\t\treturn fmt.Sprintf(\"%s Line %d\", fileNameParts[len(fileNameParts)-1], line)\n\t}\n\treturn \"\"\n}\n\nfunc (self *core_debug) Dump(valuesOriginal ...interface{}) {\n\tt := time.Now()\n\tl := \"!!!!!!!!!!!!! DEBUG \" + t.Format(\"2006-01-02 15:04:05.000000\") + \"!!!!!!!!!!!!!\\n\\n\"\n\tLogger.Println(l)\n\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\tfor _, value := range valuesOriginal {\n\t\tl := self.DumpBase(value)\n\t\tLogger.Print(l)\n\t\tserverSettings.WebConfigMutex.RLock()\n\t\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\t\tTransactionLogMutex.Lock()\n\t\t\tTransactionLog += l\n\t\t\tTransactionLogMutex.Unlock()\n\t\t}\n\t\tserverSettings.WebConfigMutex.RUnlock()\n\t}\n\tl = self.ThrowAndPrintError()\n\tLogger.Print(l)\n\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\tl = \"!!!!!!!!!!!!! ENDDEBUG \" + t.Format(\"2006-01-02 15:04:05.000000\") + \"!!!!!!!!!!!!!\"\n\tLogger.Println(l)\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n}\n\nfunc (self *core_debug) GetDump(valuesOriginal ...interface{}) (output string) {\n\tfor _, value := range valuesOriginal {\n\t\toutput += self.DumpBase(value)\n\t}\n\t\/\/output += self.ThrowAndPrintError()\n\treturn output\n}\n\nfunc (self *core_debug) GetDumpWithInfo(valuesOriginal ...interface{}) (output string) {\n\tt := time.Now()\n\treturn self.GetDumpWithInfoAndTimeString(t.String(), valuesOriginal...)\n}\n\nfunc (self *core_debug) GetDumpWithInfoAndTimeString(timeStr string, valuesOriginal ...interface{}) (output string) {\n\tl := \"\\n!!!!!!!!!!!!! DEBUG \" + timeStr + \"!!!!!!!!!!!!!\\n\\n\"\n\toutput += l\n\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\n\tfor _, value := range valuesOriginal {\n\t\toutput += self.DumpBase(value) + \"\\n\"\n\t}\n\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += output\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\n\tl = self.ThrowAndPrintError()\n\toutput += l\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\tl = \"!!!!!!!!!!!!! ENDDEBUG \" + timeStr + \"!!!!!!!!!!!!!\\n\"\n\toutput += l\n\tserverSettings.WebConfigMutex.RLock()\n\tif serverSettings.WebConfig.Application.ReleaseMode == \"development\" {\n\t\tTransactionLogMutex.Lock()\n\t\tTransactionLog += l\n\t\tTransactionLogMutex.Unlock()\n\t}\n\tserverSettings.WebConfigMutex.RUnlock()\n\treturn output\n}\n\nfunc (self *core_debug) DumpBase(values ...interface{}) (output string) {\n\t\/\/golog \"github.com\/DanielRenne\/GoCore\/core\/log\"\n\t\/\/defer golog.TimeTrack(time.Now(), \"Dump\")\n\tvar jsonString string\n\tvar err error\n\tvar structKeys []string\n\tif Logger != nil {\n\t\tfor _, value := range values {\n\t\t\tisAllJSON := true\n\t\t\tvar kind string\n\t\t\tkind = strings.TrimSpace(fmt.Sprintf(\"%T\", value))\n\t\t\tvar pieces = strings.Split(kind, \" \")\n\t\t\tif pieces[0] == \"struct\" || strings.Index(pieces[0], \"model.\") != -1 || strings.Index(pieces[0], \"viewModel.\") != -1 {\n\t\t\t\tkind = reflections.ReflectKind(value)\n\t\t\t\tstructKeys, err = reflections.FieldsDeep(value)\n\t\t\t\tif err == nil {\n\t\t\t\t\tfor _, field := range structKeys {\n\t\t\t\t\t\tjsonString, err = reflections.GetFieldTag(value, field, \"json\")\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tisAllJSON = false\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif jsonString == \"\" {\n\t\t\t\t\t\t\tisAllJSON = false\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tisAllJSON = false\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tisAllJSON = false\n\t\t\t}\n\t\t\tif isAllJSON || kind == \"map\" || kind == \"bson.M\" || kind == \"slice\" {\n\t\t\t\tvar rawBytes []byte\n\t\t\t\trawBytes, err = json.MarshalIndent(value, \"\", \"\\t\")\n\t\t\t\tif err == nil {\n\t\t\t\t\tif kind == \"slice\" || kind[:2] == \"[]\" {\n\t\t\t\t\t\tvalReflected := reflect.ValueOf(value)\n\t\t\t\t\t\toutput += fmt.Sprintf(\"#### %-39s [len:%s]####\\n%+v\", kind, extensions.IntToString(valReflected.Len()), string(rawBytes[:]))\n\t\t\t\t\t} else {\n\t\t\t\t\t\toutput += fmt.Sprintf(\"#### %-39s ####\\n%+v\", kind, string(rawBytes[:]))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif strings.TrimSpace(kind) == \"string\" {\n\t\t\t\t\tvar stringVal = value.(string)\n\t\t\t\t\tposition := strings.Index(stringVal, \"Desc->\")\n\t\t\t\t\tif position == -1 {\n\t\t\t\t\t\tif !extensions.IsPrintable(stringVal) {\n\t\t\t\t\t\t\tstringVal = hex.Dump([]byte(stringVal))\n\t\t\t\t\t\t}\n\t\t\t\t\t\tvalReflected := reflect.ValueOf(value)\n\t\t\t\t\t\toutput += fmt.Sprintf(\"#### %-39s [len:%s]####\\n%s\", kind, extensions.IntToString(valReflected.Len()), stringVal)\n\t\t\t\t\t} else {\n\t\t\t\t\t\toutput += stringVal[6:] + \" --> \"\n\t\t\t\t\t}\n\t\t\t\t} else if kind[:2] == \"[]\" || strings.TrimSpace(kind) == \"array\" {\n\t\t\t\t\tvalReflected := reflect.ValueOf(value)\n\t\t\t\t\toutput += fmt.Sprintf(\"#### %-39s [len:%s]####\\n%+v\", kind, extensions.IntToString(valReflected.Len()), value)\n\t\t\t\t} else {\n\t\t\t\t\toutput += fmt.Sprintf(\"#### %-39s ####\\n%+v\", kind, value)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn output\n}\n\nfunc (self *core_debug) ThrowAndPrintError() (output string) {\n\n\tserverSettings.WebConfigMutex.RLock()\n\tok := serverSettings.WebConfig.Application.CoreDebugStackTrace\n\tserverSettings.WebConfigMutex.RUnlock()\n\tif ok {\n\t\toutput += \"\\n\"\n\t\terrorInfo := self.ThrowError()\n\t\tstack := strings.Split(errorInfo.ErrorStack(), \"\\n\")\n\t\tif len(stack) >= 8 {\n\t\t\toutput += \"\\nDump Caller:\"\n\t\t\toutput += \"\\n---------------\"\n\t\t\t\/\/output += strings.Join(stack, \",\")\n\t\t\toutput += \"\\n golines ==> \" + strings.TrimSpace(stack[6])\n\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[7])\n\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[8])\n\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[9])\n\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[10])\n\t\t\tif len(stack) >= 12 {\n\t\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[11])\n\t\t\t}\n\t\t\tif len(stack) >= 13 {\n\t\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[12])\n\t\t\t}\n\t\t\tif len(stack) >= 14 {\n\t\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[13])\n\t\t\t}\n\t\t\tif len(stack) >= 15 {\n\t\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[14])\n\t\t\t}\n\t\t\tif len(stack) >= 16 {\n\t\t\t\toutput += \"\\n ==> \" + strings.TrimSpace(stack[15])\n\t\t\t}\n\t\t\toutput += \"\\n---------------\"\n\t\t\toutput += \"\\n\"\n\t\t\toutput += \"\\n\"\n\t\t}\n\t}\n\treturn output\n}\n\nfunc (self *core_debug) ThrowError() *errors.Error {\n\treturn errors.Errorf(\"Debug Dump\")\n}\n\nfunc (self *core_debug) Print(values ...interface{}) {\n\tif Logger != nil {\n\t\tLogger.Print(values...)\n\t}\n}\n\nfunc (self *core_debug) Printf(format string, values ...interface{}) {\n\tif Logger != nil {\n\t\tLogger.Printf(format, values...)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2016 Blue Medora, Inc. All rights reserved.\n\/\/ This file is subject to the terms and conditions defined in the included file 'LICENSE.txt'.\n\npackage nozzleconfiguration\n\nimport (\n \"testing\"\n \"os\"\n \"fmt\"\n \"encoding\/json\"\n \"io\/ioutil\"\n \"strings\"\n \n \"github.com\/BlueMedora\/bluemedora-firehose-nozzle\/logger\"\n) \n\nconst (\n defaultLogDirectory = \"..\/logs\"\n\tnozzleLogFile = \"bm_nozzle.log\"\n\tnozzleLogName = \"bm_firehose_nozzle\"\n \n configFile = \"..\/config\/bluemedora-firehose-nozzle.json\"\n tempConfigFile = \"..\/config\/bluemedora-firehose-nozzle.json.real\"\n \n testUAAURL = \"UAAURL\"\n testUsername = \"username\"\n testPassword = \"password\"\n testTrafficControllerURL = \"traffic_url\"\n testDisableAccessControl = false\n testInsecureSSLSkipVerify = false\n testIdleTimeout = uint32(60)\n testMetricCacheDuration = uint32(60)\n testWebServerPort = uint32(8081)\n)\n\nfunc TestConfigParsing(t *testing.T) {\n \/\/Setup Environment\n err := setupGoodEnvironment(t)\n if err != nil {\n tearDownEnvironment(t)\n t.Fatalf(\"Setup failed due to: %s\", err.Error())\n }\n \n t.Log(\"Creating configuration...\")\n logger := logger.New(defaultLogDirectory, nozzleLogFile, nozzleLogName)\n \n \/\/Create new configuration\n var config *NozzleConfiguration\n config, err = New(configFile, logger)\n \n if err != nil {\n tearDownEnvironment(t)\n t.Fatalf(\"Error occrued while creating configuration %s\", err)\n }\n \n \/\/Test values\n t.Log(fmt.Sprintf(\"Checking UAA URL... (expected value: %s)\", testUAAURL))\n if config.UAAURL != testUAAURL {\n t.Errorf(\"Expected UAA URL of %s, but received %s\", testUAAURL, config.UAAURL)\n }\n \n t.Log(fmt.Sprintf(\"Checking UAA Username... (expected value: %s)\", testUsername))\n if config.UAAUsername != testUsername {\n t.Errorf(\"Expected UAA Username of %s, but received %s\", testUsername, config.UAAUsername)\n }\n\n t.Log(fmt.Sprintf(\"Checking UAA Password... (expected value: %s)\", testPassword))\n if config.UAAPassword != testPassword {\n t.Errorf(\"Expected UAA Password of %s, but received %s\", testPassword, config.UAAPassword)\n }\n \n t.Log(fmt.Sprintf(\"Checking Traffic Controller URL... (expected value: %s)\", testTrafficControllerURL))\n if config.TrafficControllerURL != testTrafficControllerURL {\n t.Errorf(\"Expected Traffic Controller URL of %s, but received %s\", testTrafficControllerURL, config.TrafficControllerURL)\n }\n \n t.Log(fmt.Sprintf(\"Checking Disable Access Control... (expected value: %v)\", testDisableAccessControl))\n if config.DisableAccessControl != testDisableAccessControl {\n t.Errorf(\"Expected Disable Access Control of %v, but received %v\", testDisableAccessControl, config.DisableAccessControl)\n }\n\n t.Log(fmt.Sprintf(\"Checking Insecure SSL Skip Verify... (expected value: %v)\", testInsecureSSLSkipVerify))\n if config.InsecureSSLSkipVerify != testInsecureSSLSkipVerify {\n t.Errorf(\"Expected Insecure SSL Skip Verify of %v, but received %v\", testInsecureSSLSkipVerify, config.InsecureSSLSkipVerify)\n }\n \n t.Log(fmt.Sprintf(\"Checking Idle Timeout... (expected value: %v)\", testIdleTimeout))\n if config.IdleTimeoutSeconds != testIdleTimeout {\n t.Errorf(\"Expected Idle Timeout of %v, but received %v\", testIdleTimeout, config.IdleTimeoutSeconds)\n }\n \n t.Log(fmt.Sprintf(\"Checking Metric Cache Duration... (expected value: %v)\", testMetricCacheDuration))\n if config.MetricCacheDurationSeconds != testMetricCacheDuration {\n t.Errorf(\"Expected Metric Cache Duration of %v, but received %v\", testMetricCacheDuration, config.MetricCacheDurationSeconds)\n }\n \n t.Log(fmt.Sprintf(\"Checking Web Server Port... (expected value: %v)\", testWebServerPort))\n if config.WebServerPort != testWebServerPort {\n t.Errorf(\"Expected Web Server Port of %v, but received %v\", testWebServerPort, config.WebServerPort)\n }\n \n err = tearDownEnvironment(t)\n if err != nil {\n t.Fatalf(\"Tear down failed due to: %s\", err.Error())\n }\n}\n\nfunc TestBadConfigFile(t *testing.T) {\n err := setupBadEnvironment(t)\n if err != nil {\n tearDownEnvironment(t)\n t.Fatalf(\"Setup failed due to: %s\", err.Error())\n }\n \n logger := logger.New(defaultLogDirectory, nozzleLogFile, nozzleLogName)\n \n \/\/Create new configuration\n t.Log(\"Checking loading of bad config file... (expecting error)\")\n _, err = New(configFile, logger)\n \n if err != nil {\n if !strings.Contains(err.Error(), \"Error parsing config file bluemedora-firehose-nozzle.json:\") {\n t.Errorf(\"Expected error containing %s, but received %s\", \"Error parsing config file bluemedora-firehose-nozzle.json:\", err.Error())\n }\n } else {\n t.Errorf(\"Expected error from loading a bad config file, but loaded correctly\")\n }\n \n err = tearDownEnvironment(t)\n if err != nil {\n t.Fatalf(\"Tear down failed due to: %s\", err.Error())\n }\n}\n\nfunc TestNoConfigFile(t *testing.T) {\n t.Log(\"Creating configuration...\")\n logger := logger.New(defaultLogDirectory, nozzleLogFile, nozzleLogName)\n \n \/\/Create new configuration\n t.Log(\"Checking loading of non-existent file... (expecting error)\")\n _, err := New(\"fake_file.json\", logger)\n \n if err != nil {\n if !strings.Contains(err.Error(), \"Unable to load config file bluemedora-firehose-nozzle.json:\") {\n t.Errorf(\"Expected error containing %s, but received %s\", \"Unable to load config file bluemedora-firehose-nozzle.json:\", err.Error())\n }\n } else {\n t.Errorf(\"Expected error from loading non-existsent file, but loaded correctly\")\n }\n}\n\nfunc setupGoodEnvironment(t *testing.T) error {\n t.Log(\"Setting up good environment...\")\n \n err := renameConfigFile(t)\n if err != nil {\n return err\n }\n\n err = createGoodConfigFile(t)\n if err != nil {\n return err\n }\n \n t.Log(\"Setup good test environment\")\n return nil\n}\n\nfunc setupBadEnvironment(t *testing.T) error {\n t.Log(\"Setting up bad envrionment...\")\n \n err := renameConfigFile(t)\n if err != nil {\n return err\n }\n \n err = createBadConfigFile(t)\n if err != nil {\n return err\n }\n \n \n t.Log(\"Setup bad test envrionment\")\n return nil\n}\n\nfunc renameConfigFile(t *testing.T) error {\n t.Log(\"Renaming real config file...\")\n \n err := os.Rename(configFile, tempConfigFile)\n if err != nil {\n return fmt.Errorf(\"Error renaming config file. Ensure bluemedora-firehose-nozzle.json exists in config directory: %s\", err)\n }\n \n t.Log(\"Renamed real config file\")\n return nil\n}\n\nfunc createGoodConfigFile(t *testing.T) error {\n t.Log(\"Creating good config file...\")\n \n message := NozzleConfiguration{\n testUAAURL, testUsername, \n testPassword, testTrafficControllerURL, \n testDisableAccessControl, testInsecureSSLSkipVerify, \n testIdleTimeout, testMetricCacheDuration,\n testWebServerPort}\n \n messageBytes, _ := json.Marshal(message)\n \n err := ioutil.WriteFile(configFile, messageBytes, os.ModePerm)\n if err != nil {\n return fmt.Errorf(\"Error creating good config file: %s\", err)\n }\n \n t.Log(\"Created good config file\")\n return nil\n}\n\nfunc createBadConfigFile(t *testing.T) error {\n t.Log(\"Creating bad config file...\")\n \n _, err := os.Create(configFile)\n if err != nil {\n return fmt.Errorf(\"Error creating bad config file: %s\", err)\n }\n \n t.Log(\"Created bad config file\")\n return nil\n}\n\nfunc tearDownEnvironment(t *testing.T) error {\n t.Log(\"Tearing down test environment...\")\n if _, err := os.Stat(tempConfigFile); os.IsNotExist(err) {\n t.Log(\"bluemedora-firehose-nozzle.json.real not found no clean up needed\")\n return nil\n }\n \n if _, err := os.Stat(configFile); err == nil {\n err = os.Remove(configFile)\n if err != nil {\n return fmt.Errorf(\"Error removing test config file: %s\", err)\n }\n }\n \n err := os.Rename(tempConfigFile, configFile)\n if err != nil {\n return fmt.Errorf(\"Error renaming config file. Ensure bluemedora-firehose-nozzle.json exists in config directory: %s\", err)\n }\n \n t.Log(\"Tore down test environment\")\n return nil\n}<commit_msg>Fixed nozzleconfiguration test depedency on log test running first<commit_after>\/\/ Copyright (c) 2016 Blue Medora, Inc. All rights reserved.\n\/\/ This file is subject to the terms and conditions defined in the included file 'LICENSE.txt'.\n\npackage nozzleconfiguration\n\nimport (\n \"testing\"\n \"os\"\n \"fmt\"\n \"encoding\/json\"\n \"io\/ioutil\"\n \"strings\"\n \n \"github.com\/BlueMedora\/bluemedora-firehose-nozzle\/logger\"\n) \n\nconst (\n defaultLogDirectory = \"..\/logs\"\n\tnozzleLogFile = \"bm_nozzle.log\"\n\tnozzleLogName = \"bm_firehose_nozzle\"\n \n configFile = \"..\/config\/bluemedora-firehose-nozzle.json\"\n tempConfigFile = \"..\/config\/bluemedora-firehose-nozzle.json.real\"\n \n testUAAURL = \"UAAURL\"\n testUsername = \"username\"\n testPassword = \"password\"\n testTrafficControllerURL = \"traffic_url\"\n testDisableAccessControl = false\n testInsecureSSLSkipVerify = false\n testIdleTimeout = uint32(60)\n testMetricCacheDuration = uint32(60)\n testWebServerPort = uint32(8081)\n)\n\nfunc TestConfigParsing(t *testing.T) {\n \/\/Setup Environment\n err := setupGoodEnvironment(t)\n if err != nil {\n tearDownEnvironment(t)\n t.Fatalf(\"Setup failed due to: %s\", err.Error())\n }\n \n t.Log(\"Creating configuration...\")\n logger.CreateLogDirectory(defaultLogDirectory)\n logger := logger.New(defaultLogDirectory, nozzleLogFile, nozzleLogName)\n \n \/\/Create new configuration\n var config *NozzleConfiguration\n config, err = New(configFile, logger)\n \n if err != nil {\n tearDownEnvironment(t)\n t.Fatalf(\"Error occrued while creating configuration %s\", err)\n }\n \n \/\/Test values\n t.Log(fmt.Sprintf(\"Checking UAA URL... (expected value: %s)\", testUAAURL))\n if config.UAAURL != testUAAURL {\n t.Errorf(\"Expected UAA URL of %s, but received %s\", testUAAURL, config.UAAURL)\n }\n \n t.Log(fmt.Sprintf(\"Checking UAA Username... (expected value: %s)\", testUsername))\n if config.UAAUsername != testUsername {\n t.Errorf(\"Expected UAA Username of %s, but received %s\", testUsername, config.UAAUsername)\n }\n\n t.Log(fmt.Sprintf(\"Checking UAA Password... (expected value: %s)\", testPassword))\n if config.UAAPassword != testPassword {\n t.Errorf(\"Expected UAA Password of %s, but received %s\", testPassword, config.UAAPassword)\n }\n \n t.Log(fmt.Sprintf(\"Checking Traffic Controller URL... (expected value: %s)\", testTrafficControllerURL))\n if config.TrafficControllerURL != testTrafficControllerURL {\n t.Errorf(\"Expected Traffic Controller URL of %s, but received %s\", testTrafficControllerURL, config.TrafficControllerURL)\n }\n \n t.Log(fmt.Sprintf(\"Checking Disable Access Control... (expected value: %v)\", testDisableAccessControl))\n if config.DisableAccessControl != testDisableAccessControl {\n t.Errorf(\"Expected Disable Access Control of %v, but received %v\", testDisableAccessControl, config.DisableAccessControl)\n }\n\n t.Log(fmt.Sprintf(\"Checking Insecure SSL Skip Verify... (expected value: %v)\", testInsecureSSLSkipVerify))\n if config.InsecureSSLSkipVerify != testInsecureSSLSkipVerify {\n t.Errorf(\"Expected Insecure SSL Skip Verify of %v, but received %v\", testInsecureSSLSkipVerify, config.InsecureSSLSkipVerify)\n }\n \n t.Log(fmt.Sprintf(\"Checking Idle Timeout... (expected value: %v)\", testIdleTimeout))\n if config.IdleTimeoutSeconds != testIdleTimeout {\n t.Errorf(\"Expected Idle Timeout of %v, but received %v\", testIdleTimeout, config.IdleTimeoutSeconds)\n }\n \n t.Log(fmt.Sprintf(\"Checking Metric Cache Duration... (expected value: %v)\", testMetricCacheDuration))\n if config.MetricCacheDurationSeconds != testMetricCacheDuration {\n t.Errorf(\"Expected Metric Cache Duration of %v, but received %v\", testMetricCacheDuration, config.MetricCacheDurationSeconds)\n }\n \n t.Log(fmt.Sprintf(\"Checking Web Server Port... (expected value: %v)\", testWebServerPort))\n if config.WebServerPort != testWebServerPort {\n t.Errorf(\"Expected Web Server Port of %v, but received %v\", testWebServerPort, config.WebServerPort)\n }\n \n err = tearDownEnvironment(t)\n if err != nil {\n t.Fatalf(\"Tear down failed due to: %s\", err.Error())\n }\n}\n\nfunc TestBadConfigFile(t *testing.T) {\n err := setupBadEnvironment(t)\n if err != nil {\n tearDownEnvironment(t)\n t.Fatalf(\"Setup failed due to: %s\", err.Error())\n }\n \n logger.CreateLogDirectory(defaultLogDirectory)\n logger := logger.New(defaultLogDirectory, nozzleLogFile, nozzleLogName)\n \n \/\/Create new configuration\n t.Log(\"Checking loading of bad config file... (expecting error)\")\n _, err = New(configFile, logger)\n \n if err != nil {\n if !strings.Contains(err.Error(), \"Error parsing config file bluemedora-firehose-nozzle.json:\") {\n t.Errorf(\"Expected error containing %s, but received %s\", \"Error parsing config file bluemedora-firehose-nozzle.json:\", err.Error())\n }\n } else {\n t.Errorf(\"Expected error from loading a bad config file, but loaded correctly\")\n }\n \n err = tearDownEnvironment(t)\n if err != nil {\n t.Fatalf(\"Tear down failed due to: %s\", err.Error())\n }\n}\n\nfunc TestNoConfigFile(t *testing.T) {\n t.Log(\"Creating configuration...\")\n logger.CreateLogDirectory(defaultLogDirectory)\n logger := logger.New(defaultLogDirectory, nozzleLogFile, nozzleLogName)\n \n \/\/Create new configuration\n t.Log(\"Checking loading of non-existent file... (expecting error)\")\n _, err := New(\"fake_file.json\", logger)\n \n if err != nil {\n if !strings.Contains(err.Error(), \"Unable to load config file bluemedora-firehose-nozzle.json:\") {\n t.Errorf(\"Expected error containing %s, but received %s\", \"Unable to load config file bluemedora-firehose-nozzle.json:\", err.Error())\n }\n } else {\n t.Errorf(\"Expected error from loading non-existsent file, but loaded correctly\")\n }\n}\n\nfunc setupGoodEnvironment(t *testing.T) error {\n t.Log(\"Setting up good environment...\")\n \n err := renameConfigFile(t)\n if err != nil {\n return err\n }\n\n err = createGoodConfigFile(t)\n if err != nil {\n return err\n }\n \n t.Log(\"Setup good test environment\")\n return nil\n}\n\nfunc setupBadEnvironment(t *testing.T) error {\n t.Log(\"Setting up bad envrionment...\")\n \n err := renameConfigFile(t)\n if err != nil {\n return err\n }\n \n err = createBadConfigFile(t)\n if err != nil {\n return err\n }\n \n \n t.Log(\"Setup bad test envrionment\")\n return nil\n}\n\nfunc renameConfigFile(t *testing.T) error {\n t.Log(\"Renaming real config file...\")\n \n err := os.Rename(configFile, tempConfigFile)\n if err != nil {\n return fmt.Errorf(\"Error renaming config file. Ensure bluemedora-firehose-nozzle.json exists in config directory: %s\", err)\n }\n \n t.Log(\"Renamed real config file\")\n return nil\n}\n\nfunc createGoodConfigFile(t *testing.T) error {\n t.Log(\"Creating good config file...\")\n \n message := NozzleConfiguration{\n testUAAURL, testUsername, \n testPassword, testTrafficControllerURL, \n testDisableAccessControl, testInsecureSSLSkipVerify, \n testIdleTimeout, testMetricCacheDuration,\n testWebServerPort}\n \n messageBytes, _ := json.Marshal(message)\n \n err := ioutil.WriteFile(configFile, messageBytes, os.ModePerm)\n if err != nil {\n return fmt.Errorf(\"Error creating good config file: %s\", err)\n }\n \n t.Log(\"Created good config file\")\n return nil\n}\n\nfunc createBadConfigFile(t *testing.T) error {\n t.Log(\"Creating bad config file...\")\n \n _, err := os.Create(configFile)\n if err != nil {\n return fmt.Errorf(\"Error creating bad config file: %s\", err)\n }\n \n t.Log(\"Created bad config file\")\n return nil\n}\n\nfunc tearDownEnvironment(t *testing.T) error {\n t.Log(\"Tearing down test environment...\")\n if _, err := os.Stat(tempConfigFile); os.IsNotExist(err) {\n t.Log(\"bluemedora-firehose-nozzle.json.real not found no clean up needed\")\n return nil\n }\n \n if _, err := os.Stat(configFile); err == nil {\n err = os.Remove(configFile)\n if err != nil {\n return fmt.Errorf(\"Error removing test config file: %s\", err)\n }\n }\n \n err := os.Rename(tempConfigFile, configFile)\n if err != nil {\n return fmt.Errorf(\"Error renaming config file. Ensure bluemedora-firehose-nozzle.json exists in config directory: %s\", err)\n }\n \n t.Log(\"Tore down test environment\")\n return nil\n}<|endoftext|>"} {"text":"<commit_before>package violetear\n\nimport \"net\/http\"\n\ntype violetearError interface {\n\terror\n\tStatus() int\n}\n\n\/\/ Error represents an error with an associated HTTP status code.\ntype Error struct {\n\tCode int\n\tErr error\n}\n\n\/\/ Error return error message\nfunc (e Error) Error() string {\n\treturn e.Err.Error()\n}\n\n\/\/ Status return HTTP status code.\nfunc (e Error) Status() int {\n\treturn e.Code\n}\n\n\/\/ ErrorHandler struct that returns error\ntype ErrorHandler func(w http.ResponseWriter, r *http.Request) error\n\n\/\/ ServeHTTP allows ErrorHandler type to satisfy http.Handler.\nfunc (h ErrorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tif err := h(w, r); err != nil {\n\t\tswitch e := err.(type) {\n\t\tcase violetearError:\n\t\t\thttp.Error(w, e.Error(), e.Status())\n\t\tdefault:\n\t\t\thttp.Error(\n\t\t\t\tw,\n\t\t\t\thttp.StatusText(http.StatusInternalServerError),\n\t\t\t\thttp.StatusInternalServerError,\n\t\t\t)\n\t\t}\n\t}\n}\n<commit_msg>Created other branch for testing the centralized errorHandler<commit_after><|endoftext|>"} {"text":"<commit_before>package worker\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/funkygao\/fae\/servant\/mysql\"\n\t\"github.com\/funkygao\/gafka\/cmd\/kateway\/job\"\n\tjm \"github.com\/funkygao\/gafka\/cmd\/kateway\/job\/mysql\"\n\t\"github.com\/funkygao\/gafka\/cmd\/kateway\/store\"\n\tlog \"github.com\/funkygao\/log4go\"\n)\n\n\/\/ Worker polls a single JobQueue and handle each Job.\ntype Worker struct {\n\tparentId string \/\/ controller short id\n\tcluster, topic string\n\tmc *mysql.MysqlCluster\n\tstopper <-chan struct{}\n\tdueJobs chan job.JobItem\n\tauditor log.Logger\n\n\t\/\/ cached values\n\tappid string\n\taid int\n\ttable string\n\tident string\n}\n\nfunc New(parentId, cluster, topic string, mc *mysql.MysqlCluster,\n\tstopper <-chan struct{}, auditor log.Logger) *Worker {\n\tthis := &Worker{\n\t\tparentId: parentId,\n\t\tcluster: cluster,\n\t\ttopic: topic,\n\t\tmc: mc,\n\t\tstopper: stopper,\n\t\tdueJobs: make(chan job.JobItem, 200),\n\t\tauditor: auditor,\n\t}\n\n\tthis.appid = topic[:strings.IndexByte(topic, '.')]\n\tthis.aid = jm.App_id(this.appid)\n\tthis.table = jm.JobTable(topic)\n\tthis.ident = fmt.Sprintf(\"worker{cluster:%s app:%s aid:%d topic:%s table:%s}\",\n\t\tthis.cluster, this.appid, this.aid, this.topic, this.table)\n\n\treturn this\n}\n\n\/\/ poll mysql for due jobs and send to kafka.\nfunc (this *Worker) Run() {\n\tlog.Trace(\"starting %s\", this.Ident())\n\n\tvar (\n\t\twg sync.WaitGroup\n\t\titem job.JobItem\n\t\ttick = time.NewTicker(time.Second)\n\t\tsql = fmt.Sprintf(\"SELECT job_id,app_id,payload,due_time FROM %s WHERE due_time<=?\", this.table)\n\t)\n\n\t\/\/ handler pool, currently to guarantee the order, we use pool=1\n\tfor i := 0; i < 1; i++ {\n\t\twg.Add(1)\n\t\tgo this.handleDueJobs(&wg)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-this.stopper:\n\t\t\tlog.Debug(\"%s stopping\", this.ident)\n\t\t\twg.Wait()\n\t\t\treturn\n\n\t\tcase now := <-tick.C:\n\t\t\trows, err := this.mc.Query(jm.AppPool, this.topic, this.aid, sql, now.Unix())\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"%s: %v\", this.ident, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor rows.Next() {\n\t\t\t\t\/\/ FIXME ctime not handled\n\t\t\t\terr = rows.Scan(&item.JobId, &item.AppId, &item.Payload, &item.DueTime)\n\t\t\t\tif err == nil {\n\t\t\t\t\tthis.dueJobs <- item\n\t\t\t\t\tlog.Debug(\"%s due %s\", this.ident, item)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Error(\"%s: %s\", this.ident, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif err = rows.Err(); err != nil {\n\t\t\t\tlog.Error(\"%s: %s\", this.ident, err)\n\t\t\t}\n\n\t\t\trows.Close()\n\t\t}\n\t}\n\n}\n\n\/\/ TODO batch DELETE\/INSERT for better performance.\nfunc (this *Worker) handleDueJobs(wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\n\tvar (\n\t\tsqlDeleteJob = fmt.Sprintf(\"DELETE FROM %s WHERE job_id=?\", this.table)\n\t\tsqlInsertArchive = fmt.Sprintf(\"INSERT INTO %s(app_id,job_id,payload,ctime,due_time,invoke_time,actor_id) VALUES(?,?,?,?,?,?,?)\",\n\t\t\tjm.HistoryTable(this.topic))\n\t)\n\tfor {\n\t\tselect {\n\t\tcase <-this.stopper:\n\t\t\treturn\n\n\t\tcase item := <-this.dueJobs:\n\t\t\tlog.Debug(\"%s handling %s\", this.ident, item)\n\t\t\taffectedRows, _, err := this.mc.Exec(jm.AppPool, this.table, this.aid, sqlDeleteJob, item.JobId)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"%s: %s\", this.ident, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif affectedRows == 0 {\n\t\t\t\t\/\/ race fails, client Delete wins\n\t\t\t\tlog.Warn(\"%s: %s race fails\", this.ident, item)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlog.Debug(\"%s deleted from real time table: %s\", this.ident, item)\n\t\t\t_, _, err = store.DefaultPubStore.SyncPub(this.cluster, this.topic, nil, item.Payload)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"%s: %s\", this.ident, err)\n\t\t\t\t\/\/ TODO insert back to job table\n\t\t\t} else {\n\t\t\t\tthis.auditor.Trace(item.String())\n\n\t\t\t\t\/\/ mv job to archive table\n\t\t\t\t_, _, err = this.mc.Exec(jm.AppPool, this.table, this.aid, sqlInsertArchive,\n\t\t\t\t\titem.AppId, item.JobId, item.Payload, item.Ctime, item.DueTime, time.Now().UnixNano(), this.parentId)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(\"%s: %s\", this.ident, err)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Debug(\"%s archived %s\", this.ident, item)\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t}\n}\n\nfunc (this *Worker) Ident() string {\n\treturn this.ident\n}\n<commit_msg>reuse func<commit_after>package worker\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/funkygao\/fae\/servant\/mysql\"\n\t\"github.com\/funkygao\/gafka\/cmd\/kateway\/job\"\n\tjm \"github.com\/funkygao\/gafka\/cmd\/kateway\/job\/mysql\"\n\t\"github.com\/funkygao\/gafka\/cmd\/kateway\/manager\"\n\t\"github.com\/funkygao\/gafka\/cmd\/kateway\/manager\/dummy\"\n\t\"github.com\/funkygao\/gafka\/cmd\/kateway\/store\"\n\tlog \"github.com\/funkygao\/log4go\"\n)\n\n\/\/ Worker polls a single JobQueue and handle each Job.\ntype Worker struct {\n\tparentId string \/\/ controller short id\n\tcluster, topic string\n\tmc *mysql.MysqlCluster\n\tstopper <-chan struct{}\n\tdueJobs chan job.JobItem\n\tauditor log.Logger\n\n\t\/\/ cached values\n\tappid string\n\taid int\n\ttable string\n\tident string\n}\n\nfunc New(parentId, cluster, topic string, mc *mysql.MysqlCluster,\n\tstopper <-chan struct{}, auditor log.Logger) *Worker {\n\tthis := &Worker{\n\t\tparentId: parentId,\n\t\tcluster: cluster,\n\t\ttopic: topic,\n\t\tmc: mc,\n\t\tstopper: stopper,\n\t\tdueJobs: make(chan job.JobItem, 200),\n\t\tauditor: auditor,\n\t}\n\n\treturn this\n}\n\n\/\/ poll mysql for due jobs and send to kafka.\nfunc (this *Worker) Run() {\n\tlog.Trace(\"starting %s\", this.Ident())\n\n\tmanager.Default = dummy.New(\"\")\n\tthis.appid = manager.Default.TopicAppid(this.topic)\n\tif this.appid == \"\" {\n\t\tlog.Warn(\"invalid topic: %s\", this.topic)\n\t\treturn\n\t}\n\tthis.aid = jm.App_id(this.appid)\n\tthis.table = jm.JobTable(this.topic)\n\tthis.ident = fmt.Sprintf(\"worker{cluster:%s app:%s aid:%d topic:%s table:%s}\",\n\t\tthis.cluster, this.appid, this.aid, this.topic, this.table)\n\n\tvar (\n\t\twg sync.WaitGroup\n\t\titem job.JobItem\n\t\ttick = time.NewTicker(time.Second)\n\t\tsql = fmt.Sprintf(\"SELECT job_id,app_id,payload,due_time FROM %s WHERE due_time<=?\", this.table)\n\t)\n\n\t\/\/ handler pool, currently to guarantee the order, we use pool=1\n\tfor i := 0; i < 1; i++ {\n\t\twg.Add(1)\n\t\tgo this.handleDueJobs(&wg)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-this.stopper:\n\t\t\tlog.Debug(\"%s stopping\", this.ident)\n\t\t\twg.Wait()\n\t\t\treturn\n\n\t\tcase now := <-tick.C:\n\t\t\trows, err := this.mc.Query(jm.AppPool, this.topic, this.aid, sql, now.Unix())\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"%s: %v\", this.ident, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor rows.Next() {\n\t\t\t\t\/\/ FIXME ctime not handled\n\t\t\t\terr = rows.Scan(&item.JobId, &item.AppId, &item.Payload, &item.DueTime)\n\t\t\t\tif err == nil {\n\t\t\t\t\tthis.dueJobs <- item\n\t\t\t\t\tlog.Debug(\"%s due %s\", this.ident, item)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Error(\"%s: %s\", this.ident, err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif err = rows.Err(); err != nil {\n\t\t\t\tlog.Error(\"%s: %s\", this.ident, err)\n\t\t\t}\n\n\t\t\trows.Close()\n\t\t}\n\t}\n\n}\n\n\/\/ TODO batch DELETE\/INSERT for better performance.\nfunc (this *Worker) handleDueJobs(wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\n\tvar (\n\t\tsqlDeleteJob = fmt.Sprintf(\"DELETE FROM %s WHERE job_id=?\", this.table)\n\t\tsqlInsertArchive = fmt.Sprintf(\"INSERT INTO %s(app_id,job_id,payload,ctime,due_time,invoke_time,actor_id) VALUES(?,?,?,?,?,?,?)\",\n\t\t\tjm.HistoryTable(this.topic))\n\t)\n\tfor {\n\t\tselect {\n\t\tcase <-this.stopper:\n\t\t\treturn\n\n\t\tcase item := <-this.dueJobs:\n\t\t\tlog.Debug(\"%s handling %s\", this.ident, item)\n\t\t\taffectedRows, _, err := this.mc.Exec(jm.AppPool, this.table, this.aid, sqlDeleteJob, item.JobId)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"%s: %s\", this.ident, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif affectedRows == 0 {\n\t\t\t\t\/\/ race fails, client Delete wins\n\t\t\t\tlog.Warn(\"%s: %s race fails\", this.ident, item)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlog.Debug(\"%s deleted from real time table: %s\", this.ident, item)\n\t\t\t_, _, err = store.DefaultPubStore.SyncPub(this.cluster, this.topic, nil, item.Payload)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"%s: %s\", this.ident, err)\n\t\t\t\t\/\/ TODO insert back to job table\n\t\t\t} else {\n\t\t\t\tthis.auditor.Trace(item.String())\n\n\t\t\t\t\/\/ mv job to archive table\n\t\t\t\t_, _, err = this.mc.Exec(jm.AppPool, this.table, this.aid, sqlInsertArchive,\n\t\t\t\t\titem.AppId, item.JobId, item.Payload, item.Ctime, item.DueTime, time.Now().UnixNano(), this.parentId)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(\"%s: %s\", this.ident, err)\n\t\t\t\t} else {\n\t\t\t\t\tlog.Debug(\"%s archived %s\", this.ident, item)\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t}\n}\n\nfunc (this *Worker) Ident() string {\n\treturn this.ident\n}\n<|endoftext|>"} {"text":"<commit_before>package store\n\nimport (\n\t\"errors\"\n)\n\nvar (\n\tErrShuttingDown = errors.New(\"server shutting down\")\n\tErrBusy = errors.New(\"server too busy\")\n\tErrTooManyConsumers = errors.New(\"consumers larger than available partitions\")\n\tErrRebalancing = errors.New(\"rebalancing, please retry after a while\")\n\tErrInvalidCluster = errors.New(\"invalid cluster\")\n\tErrEmptyBrokers = errors.New(\"empty broker list\")\n)\n<commit_msg>more meaningful err msg<commit_after>package store\n\nimport (\n\t\"errors\"\n)\n\nvar (\n\tErrShuttingDown = errors.New(\"server shutting down\")\n\tErrBusy = errors.New(\"server too busy\")\n\tErrTooManyConsumers = errors.New(\"consumers more than available partitions\")\n\tErrRebalancing = errors.New(\"rebalancing, please retry after a while\")\n\tErrInvalidCluster = errors.New(\"invalid cluster\")\n\tErrEmptyBrokers = errors.New(\"empty broker list\")\n)\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc Test_loadMetadata(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tyml string\n\t\twant metadata\n\t\twantErr string\n\t}{\n\t\t{\n\t\t\tname: \"all options\",\n\t\t\tyml: \"testdata\/all_options.yaml\",\n\t\t\twant: metadata{\n\t\t\t\tName: \"metricreceiver\",\n\t\t\t\tAttributes: map[attributeName]attribute{\n\t\t\t\t\t\"enumAttribute\": {\n\t\t\t\t\t\tDescription: \"Attribute with a known set of values.\",\n\t\t\t\t\t\tValue: \"\",\n\t\t\t\t\t\tEnum: []string{\"red\", \"green\", \"blue\"}},\n\t\t\t\t\t\"freeFormAttribute\": {\n\t\t\t\t\t\tDescription: \"Attribute that can take on any value.\",\n\t\t\t\t\t\tValue: \"\"},\n\t\t\t\t\t\"freeFormAttributeWithValue\": {\n\t\t\t\t\t\tDescription: \"Attribute that has alternate value set.\",\n\t\t\t\t\t\tValue: \"state\"}},\n\t\t\t\tMetrics: map[metricName]metric{\n\t\t\t\t\t\"system.cpu.time\": {\n\t\t\t\t\t\tDescription: \"Total CPU seconds broken down by different states.\",\n\t\t\t\t\t\tExtendedDocumentation: \"Additional information on CPU Time can be found [here](https:\/\/en.wikipedia.org\/wiki\/CPU_time).\",\n\t\t\t\t\t\tUnit: \"s\",\n\t\t\t\t\t\tSum: &sum{\n\t\t\t\t\t\t\tAggregated: Aggregated{Aggregation: \"cumulative\"},\n\t\t\t\t\t\t\tMono: Mono{Monotonic: true},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\/\/ YmlData: nil,\n\t\t\t\t\t\tAttributes: []attributeName{\"freeFormAttribute\", \"freeFormAttributeWithValue\",\n\t\t\t\t\t\t\t\"enumAttribute\"}}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"unknown metric attribute\",\n\t\t\tyml: \"testdata\/unknown_metric_attribute.yaml\",\n\t\t\twant: metadata{},\n\t\t\twantErr: \"error validating struct:\\n\\tmetadata.Metrics[system.cpu.time].\" +\n\t\t\t\t\"Attributes[missing]: unknown attribute value\\n\",\n\t\t},\n\t\t{\n\t\t\tname: \"no metric type\",\n\t\t\tyml: \"testdata\/no_metric_type.yaml\",\n\t\t\twant: metadata{},\n\t\t\twantErr: \"metric system.cpu.time doesn't have a metric type key, \" +\n\t\t\t\t\"one of the following has to be specified: sum, gauge, histogram\",\n\t\t},\n\t\t{\n\t\t\tname: \"two metric types\",\n\t\t\tyml: \"testdata\/two_metric_types.yaml\",\n\t\t\twant: metadata{},\n\t\t\twantErr: \"metric system.cpu.time has more than one metric type keys, \" +\n\t\t\t\t\"only one of the following has to be specified: sum, gauge, histogram\",\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tymlData, err := os.ReadFile(tt.yml)\n\t\t\trequire.NoError(t, err)\n\t\t\tgot, err := loadMetadata(ymlData)\n\t\t\tif tt.wantErr != \"\" {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\trequire.EqualError(t, err, tt.wantErr)\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.Equal(t, tt.want, got)\n\t\t\t}\n\t\t})\n\t}\n}\n<commit_msg>[mdatagen] Use path.Join instead of hardcoding testdata paths (#6472)<commit_after>\/\/ Copyright The OpenTelemetry Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"os\"\n\t\"path\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc Test_loadMetadata(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tyml string\n\t\twant metadata\n\t\twantErr string\n\t}{\n\t\t{\n\t\t\tname: \"all options\",\n\t\t\tyml: \"all_options.yaml\",\n\t\t\twant: metadata{\n\t\t\t\tName: \"metricreceiver\",\n\t\t\t\tAttributes: map[attributeName]attribute{\n\t\t\t\t\t\"enumAttribute\": {\n\t\t\t\t\t\tDescription: \"Attribute with a known set of values.\",\n\t\t\t\t\t\tValue: \"\",\n\t\t\t\t\t\tEnum: []string{\"red\", \"green\", \"blue\"}},\n\t\t\t\t\t\"freeFormAttribute\": {\n\t\t\t\t\t\tDescription: \"Attribute that can take on any value.\",\n\t\t\t\t\t\tValue: \"\"},\n\t\t\t\t\t\"freeFormAttributeWithValue\": {\n\t\t\t\t\t\tDescription: \"Attribute that has alternate value set.\",\n\t\t\t\t\t\tValue: \"state\"}},\n\t\t\t\tMetrics: map[metricName]metric{\n\t\t\t\t\t\"system.cpu.time\": {\n\t\t\t\t\t\tDescription: \"Total CPU seconds broken down by different states.\",\n\t\t\t\t\t\tExtendedDocumentation: \"Additional information on CPU Time can be found [here](https:\/\/en.wikipedia.org\/wiki\/CPU_time).\",\n\t\t\t\t\t\tUnit: \"s\",\n\t\t\t\t\t\tSum: &sum{\n\t\t\t\t\t\t\tAggregated: Aggregated{Aggregation: \"cumulative\"},\n\t\t\t\t\t\t\tMono: Mono{Monotonic: true},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\/\/ YmlData: nil,\n\t\t\t\t\t\tAttributes: []attributeName{\"freeFormAttribute\", \"freeFormAttributeWithValue\",\n\t\t\t\t\t\t\t\"enumAttribute\"}}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"unknown metric attribute\",\n\t\t\tyml: \"unknown_metric_attribute.yaml\",\n\t\t\twant: metadata{},\n\t\t\twantErr: \"error validating struct:\\n\\tmetadata.Metrics[system.cpu.time].\" +\n\t\t\t\t\"Attributes[missing]: unknown attribute value\\n\",\n\t\t},\n\t\t{\n\t\t\tname: \"no metric type\",\n\t\t\tyml: \"no_metric_type.yaml\",\n\t\t\twant: metadata{},\n\t\t\twantErr: \"metric system.cpu.time doesn't have a metric type key, \" +\n\t\t\t\t\"one of the following has to be specified: sum, gauge, histogram\",\n\t\t},\n\t\t{\n\t\t\tname: \"two metric types\",\n\t\t\tyml: \"two_metric_types.yaml\",\n\t\t\twant: metadata{},\n\t\t\twantErr: \"metric system.cpu.time has more than one metric type keys, \" +\n\t\t\t\t\"only one of the following has to be specified: sum, gauge, histogram\",\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tymlData, err := os.ReadFile(path.Join(\"testdata\", tt.yml))\n\t\t\trequire.NoError(t, err)\n\t\t\tgot, err := loadMetadata(ymlData)\n\t\t\tif tt.wantErr != \"\" {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\trequire.EqualError(t, err, tt.wantErr)\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.Equal(t, tt.want, got)\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"log\"\n\t\"net\"\n\n\t\"github.com\/konjoot\/grpc\/services\/sessions\"\n\t\"google.golang.org\/grpc\"\n\n\tpb \"github.com\/konjoot\/grpc\/proto\/sessions\"\n)\n\nconst (\n\tport = \":50051\"\n)\n\nfunc main() {\n\tlis, err := net.Listen(\"tcp\", port)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\ts := grpc.NewServer()\n\tpb.RegisterSessionServer(s, sessions.New())\n\ts.Serve(lis)\n}\n<commit_msg>enable tracing<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\n\t\"github.com\/konjoot\/grpc\/services\/sessions\"\n\t\"google.golang.org\/grpc\"\n\n\tpb \"github.com\/konjoot\/grpc\/proto\/sessions\"\n)\n\nconst (\n\tport = \":50051\"\n)\n\nvar trace = flag.Bool(\"trace\", false, \"Whether tracing is on\")\n\nfunc main() {\n\tflag.Parse()\n\tgrpc.EnableTracing = *trace\n\n\tlis, err := net.Listen(\"tcp\", port)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\n\tif grpc.EnableTracing {\n\t\tgo http.ListenAndServe(\"localhost:34567\", nil)\n\t}\n\n\ts := grpc.NewServer()\n\tpb.RegisterSessionServer(s, sessions.New())\n\ts.Serve(lis)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\n\tconfig \"github.com\/johnweldon\/mqd\/config\"\n\t\"github.com\/johnweldon\/mqd\/dispatcher\"\n\t\"github.com\/johnweldon\/mqd\/mailer\"\n)\n\nvar (\n\tsettingsfile = \".smtp-dispatcher.settings\"\n)\n\nfunc init() {\n\tflag.StringVar(&settingsfile, \"settingsfile\", settingsfile, \"json encoded settings file\")\n\tflag.StringVar(&settingsfile, \"sf\", settingsfile, \"json encoded settings file (short version)\")\n}\n\nfunc printHelp() {\n\tfmt.Fprintf(os.Stdout, \"Usage: %s [ run | generate ]\\n\\n run: run the mailqueue dispatcher\\n generate: generate settings file\\n\\n\", os.Args[0])\n\tflag.PrintDefaults()\n\tos.Exit(1)\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif len(flag.Args()) != 1 {\n\t\tprintHelp()\n\t}\n\n\tswitch strings.ToLower(flag.Arg(0)) {\n\tcase \"run\":\n\t\trunLoop()\n\tcase \"generate\":\n\t\tgenerate()\n\tdefault:\n\t\tprintHelp()\n\t}\n}\n\nfunc runLoop() {\n\tsettings, err := config.ReadSettings(settingsfile)\n\tif err != nil {\n\t\tglog.Fatalf(\"couldn't read settings %q\\n\", err)\n\t\tos.Exit(2)\n\t}\n\tq := dispatcher.NewPickupFolderQueue(settings.MailQueue, settings.BadMail)\n\tm := mailer.NewMailer(settings)\n\n\tloop(settings.Interval, q, m)\n}\n\nfunc loop(interval int, q dispatcher.MailQueueDispatcher, m mailer.Mailer) {\n\tq.Process(m.ConvertAndSend)\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(time.Duration(interval) * time.Second):\n\t\t\tq.Process(m.ConvertAndSend)\n\t\t}\n\t}\n}\n\nfunc generate() {\n\tsettings := config.NewSettings(\"mailqueue_folder_path\", \"badmail_folder_path\")\n\tsettings.Connections[\"foo@bar.com\"] = config.ConnectionDetails{\n\t\tAuthType: config.PlainAuth,\n\t\tSender: \"foo@bar.com\",\n\t\tServer: \"smtp.example.com:587\",\n\t\tHost: \"smtp.example.com\",\n\t\tUsername: \"username\",\n\t\tPassword: \"P455w0rd\",\n\t}\n\n\tif err := config.WriteSettings(settingsfile, settings); err != nil {\n\t\tglog.Fatalf(\"couldn't write settings %q\\n\", err)\n\t\tos.Exit(3)\n\t}\n\tos.Exit(0)\n}\n<commit_msg>add tomb, signal handling<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"time\"\n\n\t\"gopkg.in\/tomb.v2\"\n\n\t\"github.com\/golang\/glog\"\n\n\tconfig \"github.com\/johnweldon\/mqd\/config\"\n\t\"github.com\/johnweldon\/mqd\/dispatcher\"\n\t\"github.com\/johnweldon\/mqd\/mailer\"\n)\n\nvar (\n\tsettingsfile = \".smtp-dispatcher.settings\"\n\tgenerateSettings = false\n\tsettings config.Settings\n)\n\nfunc init() {\n\tflag.StringVar(&settingsfile, \"settingsfile\", settingsfile, \"json encoded settings file\")\n\tflag.StringVar(&settingsfile, \"sf\", settingsfile, \"json encoded settings file (short version)\")\n\n\tflag.BoolVar(&generateSettings, \"generate\", generateSettings, \"generate settings file\")\n\tflag.BoolVar(&generateSettings, \"g\", generateSettings, \"generate settings file (short version)\")\n\n\tflag.Set(\"log_dir\", \".\")\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif generateSettings {\n\t\tgenerate()\n\t\treturn\n\t}\n\n\ts, err := config.ReadSettings(settingsfile)\n\tif err != nil {\n\t\tglog.Fatalf(\"couldn't read settings %q\\n\", err)\n\t\tos.Exit(2)\n\t}\n\n\tsettings = s\n\trunLoop()\n\tos.Exit(0)\n}\n\nfunc runLoop() {\n\tq := dispatcher.NewPickupFolderQueue(settings.MailQueue, settings.BadMail)\n\tm := mailer.NewMailer(settings)\n\n\tc := make(chan os.Signal)\n\tsignal.Notify(c, os.Interrupt, os.Kill)\n\n\tvar t tomb.Tomb\n\tt.Go(func() error {\n\t\tq.Process(m.ConvertAndSend)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase s := <-c:\n\t\t\t\tglog.Infof(\"got signal %v\", s)\n\t\t\t\tglog.Flush()\n\t\t\t\treturn nil\n\t\t\tcase <-t.Dying():\n\t\t\t\treturn nil\n\t\t\tcase <-time.After(time.Duration(settings.Interval) * time.Second):\n\t\t\t\tq.Process(m.ConvertAndSend)\n\t\t\t\tglog.Flush()\n\t\t\t}\n\t\t}\n\t})\n\tt.Wait()\n}\n\nfunc generate() {\n\tsettings := config.NewSettings(\"mailqueue_folder_path\", \"badmail_folder_path\")\n\tsettings.Connections[\"foo@bar.com\"] = config.ConnectionDetails{\n\t\tAuthType: config.PlainAuth,\n\t\tSender: \"foo@bar.com\",\n\t\tServer: \"smtp.example.com:587\",\n\t\tHost: \"smtp.example.com\",\n\t\tUsername: \"username\",\n\t\tPassword: \"P455w0rd\",\n\t}\n\n\tif err := config.WriteSettings(settingsfile, settings); err != nil {\n\t\tglog.Fatalf(\"couldn't write settings %q\\n\", err)\n\t\tos.Exit(3)\n\t}\n\tos.Exit(0)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package filterflags implements command line flags to set up a filter\npackage filterflags\n\nimport (\n\t\"github.com\/ncw\/rclone\/fs\/config\/flags\"\n\t\"github.com\/ncw\/rclone\/fs\/filter\"\n\t\"github.com\/spf13\/pflag\"\n)\n\n\/\/ Options set by command line flags\nvar (\n\tOpt = filter.DefaultOpt\n)\n\n\/\/ AddFlags adds the non filing system specific flags to the command\nfunc AddFlags(flagSet *pflag.FlagSet) {\n\tflags.BoolVarP(flagSet, &Opt.DeleteExcluded, \"delete-excluded\", \"\", false, \"Delete files on dest excluded from sync\")\n\tflags.StringArrayVarP(flagSet, &Opt.FilterRule, \"filter\", \"f\", nil, \"Add a file-filtering rule\")\n\tflags.StringArrayVarP(flagSet, &Opt.FilterFrom, \"filter-from\", \"\", nil, \"Read filtering patterns from a file\")\n\tflags.StringArrayVarP(flagSet, &Opt.ExcludeRule, \"exclude\", \"\", nil, \"Exclude files matching pattern\")\n\tflags.StringArrayVarP(flagSet, &Opt.ExcludeFrom, \"exclude-from\", \"\", nil, \"Read exclude patterns from file\")\n\tflags.StringVarP(flagSet, &Opt.ExcludeFile, \"exclude-if-present\", \"\", \"\", \"Exclude directories if filename is present\")\n\tflags.StringArrayVarP(flagSet, &Opt.IncludeRule, \"include\", \"\", nil, \"Include files matching pattern\")\n\tflags.StringArrayVarP(flagSet, &Opt.IncludeFrom, \"include-from\", \"\", nil, \"Read include patterns from file\")\n\tflags.StringArrayVarP(flagSet, &Opt.FilesFrom, \"files-from\", \"\", nil, \"Read list of source-file names from file\")\n\tflags.FVarP(flagSet, &Opt.MinAge, \"min-age\", \"\", \"Don't transfer any file younger than this in s or suffix ms|s|m|h|d|w|M|y\")\n\tflags.FVarP(flagSet, &Opt.MaxAge, \"max-age\", \"\", \"Don't transfer any file older than this in s or suffix ms|s|m|h|d|w|M|y\")\n\tflags.FVarP(flagSet, &Opt.MinSize, \"min-size\", \"\", \"Don't transfer any file smaller than this in k or suffix b|k|M|G\")\n\tflags.FVarP(flagSet, &Opt.MaxSize, \"max-size\", \"\", \"Don't transfer any file larger than this in k or suffix b|k|M|G\")\n\t\/\/cvsExclude = BoolP(\"cvs-exclude\", \"C\", false, \"Exclude files in the same way CVS does\")\n}\n<commit_msg>filter: take double negatives out of filter flag help<commit_after>\/\/ Package filterflags implements command line flags to set up a filter\npackage filterflags\n\nimport (\n\t\"github.com\/ncw\/rclone\/fs\/config\/flags\"\n\t\"github.com\/ncw\/rclone\/fs\/filter\"\n\t\"github.com\/spf13\/pflag\"\n)\n\n\/\/ Options set by command line flags\nvar (\n\tOpt = filter.DefaultOpt\n)\n\n\/\/ AddFlags adds the non filing system specific flags to the command\nfunc AddFlags(flagSet *pflag.FlagSet) {\n\tflags.BoolVarP(flagSet, &Opt.DeleteExcluded, \"delete-excluded\", \"\", false, \"Delete files on dest excluded from sync\")\n\tflags.StringArrayVarP(flagSet, &Opt.FilterRule, \"filter\", \"f\", nil, \"Add a file-filtering rule\")\n\tflags.StringArrayVarP(flagSet, &Opt.FilterFrom, \"filter-from\", \"\", nil, \"Read filtering patterns from a file\")\n\tflags.StringArrayVarP(flagSet, &Opt.ExcludeRule, \"exclude\", \"\", nil, \"Exclude files matching pattern\")\n\tflags.StringArrayVarP(flagSet, &Opt.ExcludeFrom, \"exclude-from\", \"\", nil, \"Read exclude patterns from file\")\n\tflags.StringVarP(flagSet, &Opt.ExcludeFile, \"exclude-if-present\", \"\", \"\", \"Exclude directories if filename is present\")\n\tflags.StringArrayVarP(flagSet, &Opt.IncludeRule, \"include\", \"\", nil, \"Include files matching pattern\")\n\tflags.StringArrayVarP(flagSet, &Opt.IncludeFrom, \"include-from\", \"\", nil, \"Read include patterns from file\")\n\tflags.StringArrayVarP(flagSet, &Opt.FilesFrom, \"files-from\", \"\", nil, \"Read list of source-file names from file\")\n\tflags.FVarP(flagSet, &Opt.MinAge, \"min-age\", \"\", \"Only transfer files older than this in s or suffix ms|s|m|h|d|w|M|y\")\n\tflags.FVarP(flagSet, &Opt.MaxAge, \"max-age\", \"\", \"Only transfer files younger than this in s or suffix ms|s|m|h|d|w|M|y\")\n\tflags.FVarP(flagSet, &Opt.MinSize, \"min-size\", \"\", \"Only transfer files bigger than this in k or suffix b|k|M|G\")\n\tflags.FVarP(flagSet, &Opt.MaxSize, \"max-size\", \"\", \"Only transfer files smaller than this in k or suffix b|k|M|G\")\n\t\/\/cvsExclude = BoolP(\"cvs-exclude\", \"C\", false, \"Exclude files in the same way CVS does\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage envoye2e\n\nimport (\n\tenv \"istio.io\/proxy\/test\/envoye2e\/env\"\n)\n\nvar ProxyE2ETests *env.TestInventory\n\nfunc init() {\n\t\/\/ TODO(bianpengyuan): automatically generate this.\n\tProxyE2ETests = &env.TestInventory{\n\t\tTests: []string{\n\t\t\t\"TestBasicFlow\",\n\t\t\t\"TestBasicHTTP\",\n\t\t\t\"TestBasicHTTPwithTLS\",\n\t\t\t\"TestBasicHTTPGateway\",\n\t\t\t\"TestBasicTCPFlow\",\n\t\t\t\"TestHTTPExchange\",\n\t\t\t\"TestStackdriverAccessLog\/StackdriverAndAccessLogPlugin\",\n\t\t\t\"TestStackdriverAccessLog\/RequestGetsLoggedAgain\",\n\t\t\t\"TestStackdriverAccessLog\/AllErrorRequestsGetsLogged\",\n\t\t\t\"TestStackdriverPayload\",\n\t\t\t\"TestStackdriverPayloadGateway\",\n\t\t\t\"TestStackdriverPayloadWithTLS\",\n\t\t\t\"TestStackdriverReload\",\n\t\t\t\"TestStackdriverVMReload\",\n\t\t\t\"TestStackdriverParallel\",\n\t\t\t\"TestStatsPayload\/Default\/envoy.wasm.runtime.null\",\n\t\t\t\"TestStatsPayload\/Customized\/envoy.wasm.runtime.null\",\n\t\t\t\"TestStatsPayload\/UseHostHeader\/envoy.wasm.runtime.null\",\n\t\t\t\"TestStatsPayload\/DisableHostHeader\/envoy.wasm.runtime.null\",\n\t\t\t\"TestStatsPayload\/Default\/envoy.wasm.runtime.v8\",\n\t\t\t\"TestStatsPayload\/Customized\/envoy.wasm.runtime.v8\",\n\t\t\t\"TestStatsPayload\/UseHostHeader\/envoy.wasm.runtime.v8\",\n\t\t\t\"TestStatsPayload\/DisableHostHeader\/envoy.wasm.runtime.v8\",\n\t\t\t\"TestStatsParallel\",\n\t\t\t\"TestStatsGrpc\",\n\t\t\t\"TestTCPMetadataExchange\",\n\t\t\t\"TestTCPMetadataExchangeNoAlpn\",\n\t\t},\n\t}\n}\n<commit_msg>add two missing stackdriver integration test to test inventory (#2906)<commit_after>\/\/ Copyright 2020 Istio Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage envoye2e\n\nimport (\n\tenv \"istio.io\/proxy\/test\/envoye2e\/env\"\n)\n\nvar ProxyE2ETests *env.TestInventory\n\nfunc init() {\n\t\/\/ TODO(bianpengyuan): automatically generate this.\n\tProxyE2ETests = &env.TestInventory{\n\t\tTests: []string{\n\t\t\t\"TestBasicFlow\",\n\t\t\t\"TestBasicHTTP\",\n\t\t\t\"TestBasicHTTPwithTLS\",\n\t\t\t\"TestBasicHTTPGateway\",\n\t\t\t\"TestBasicTCPFlow\",\n\t\t\t\"TestHTTPExchange\",\n\t\t\t\"TestStackdriverAccessLog\/StackdriverAndAccessLogPlugin\",\n\t\t\t\"TestStackdriverAccessLog\/RequestGetsLoggedAgain\",\n\t\t\t\"TestStackdriverAccessLog\/AllErrorRequestsGetsLogged\",\n\t\t\t\"TestStackdriverPayload\",\n\t\t\t\"TestStackdriverPayloadGateway\",\n\t\t\t\"TestStackdriverPayloadWithTLS\",\n\t\t\t\"TestStackdriverReload\",\n\t\t\t\"TestStackdriverVMReload\",\n\t\t\t\"TestStackdriverParallel\",\n\t\t\t\"TestStackdriverGCEInstances\",\n\t\t\t\"TestStackdriverTCPMetadataExchange\",\n\t\t\t\"TestStatsPayload\/Default\/envoy.wasm.runtime.null\",\n\t\t\t\"TestStatsPayload\/Customized\/envoy.wasm.runtime.null\",\n\t\t\t\"TestStatsPayload\/UseHostHeader\/envoy.wasm.runtime.null\",\n\t\t\t\"TestStatsPayload\/DisableHostHeader\/envoy.wasm.runtime.null\",\n\t\t\t\"TestStatsPayload\/Default\/envoy.wasm.runtime.v8\",\n\t\t\t\"TestStatsPayload\/Customized\/envoy.wasm.runtime.v8\",\n\t\t\t\"TestStatsPayload\/UseHostHeader\/envoy.wasm.runtime.v8\",\n\t\t\t\"TestStatsPayload\/DisableHostHeader\/envoy.wasm.runtime.v8\",\n\t\t\t\"TestStatsParallel\",\n\t\t\t\"TestStatsGrpc\",\n\t\t\t\"TestTCPMetadataExchange\",\n\t\t\t\"TestTCPMetadataExchangeNoAlpn\",\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The etcd Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage naming\n\nimport (\n\t\"encoding\/json\"\n\n\tetcd \"github.com\/coreos\/etcd\/clientv3\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/codes\"\n\t\"google.golang.org\/grpc\/naming\"\n)\n\n\/\/ GRPCResolver creates a grpc.Watcher for a target to track its resolution changes.\ntype GRPCResolver struct {\n\t\/\/ Client is an initialized etcd client.\n\tClient *etcd.Client\n}\n\nfunc (gr *GRPCResolver) Update(ctx context.Context, target string, nm naming.Update, opts ...etcd.OpOption) (err error) {\n\tswitch nm.Op {\n\tcase naming.Add:\n\t\tvar v []byte\n\t\tif v, err = json.Marshal(nm); err != nil {\n\t\t\treturn grpc.Errorf(codes.InvalidArgument, err.Error())\n\t\t}\n\t\t_, err = gr.Client.KV.Put(ctx, target+\"\/\"+nm.Addr, string(v), opts...)\n\tcase naming.Delete:\n\t\t_, err = gr.Client.Delete(ctx, target+\"\/\"+nm.Addr, opts...)\n\tdefault:\n\t\treturn grpc.Errorf(codes.InvalidArgument, \"naming: bad naming op\")\n\t}\n\treturn err\n}\n\nfunc (gr *GRPCResolver) Resolve(target string) (naming.Watcher, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tw := &gRPCWatcher{c: gr.Client, target: target + \"\/\", ctx: ctx, cancel: cancel}\n\treturn w, nil\n}\n\ntype gRPCWatcher struct {\n\tc *etcd.Client\n\ttarget string\n\tctx context.Context\n\tcancel context.CancelFunc\n\twch etcd.WatchChan\n\terr error\n}\n\n\/\/ Next gets the next set of updates from the etcd resolver.\n\/\/ Calls to Next should be serialized; concurrent calls are not safe since\n\/\/ there is no way to reconcile the update ordering.\nfunc (gw *gRPCWatcher) Next() ([]*naming.Update, error) {\n\tif gw.wch == nil {\n\t\t\/\/ first Next() returns all addresses\n\t\treturn gw.firstNext()\n\t}\n\tif gw.err != nil {\n\t\treturn nil, gw.err\n\t}\n\n\t\/\/ process new events on target\/*\n\twr, ok := <-gw.wch\n\tif !ok {\n\t\tgw.err = grpc.Errorf(codes.Unavailable, \"naming: watch closed\")\n\t\treturn nil, gw.err\n\t}\n\tif gw.err = wr.Err(); gw.err != nil {\n\t\treturn nil, gw.err\n\t}\n\n\tupdates := make([]*naming.Update, 0, len(wr.Events))\n\tfor _, e := range wr.Events {\n\t\tvar jupdate naming.Update\n\t\tvar err error\n\t\tswitch e.Type {\n\t\tcase etcd.EventTypePut:\n\t\t\terr = json.Unmarshal(e.Kv.Value, &jupdate)\n\t\t\tjupdate.Op = naming.Add\n\t\tcase etcd.EventTypeDelete:\n\t\t\terr = json.Unmarshal(e.PrevKv.Value, &jupdate)\n\t\t\tjupdate.Op = naming.Delete\n\t\t}\n\t\tif err == nil {\n\t\t\tupdates = append(updates, &jupdate)\n\t\t}\n\t}\n\treturn updates, nil\n}\n\nfunc (gw *gRPCWatcher) firstNext() ([]*naming.Update, error) {\n\t\/\/ Use serialized request so resolution still works if the target etcd\n\t\/\/ server is partitioned away from the quorum.\n\tresp, err := gw.c.Get(gw.ctx, gw.target, etcd.WithPrefix(), etcd.WithSerializable())\n\tif gw.err = err; err != nil {\n\t\treturn nil, err\n\t}\n\n\tupdates := make([]*naming.Update, 0, len(resp.Kvs))\n\tfor _, kv := range resp.Kvs {\n\t\tvar jupdate naming.Update\n\t\tif err := json.Unmarshal(kv.Value, &jupdate); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tupdates = append(updates, &jupdate)\n\t}\n\n\topts := []etcd.OpOption{etcd.WithRev(resp.Header.Revision + 1), etcd.WithPrefix(), etcd.WithPrevKV()}\n\tgw.wch = gw.c.Watch(gw.ctx, gw.target, opts...)\n\treturn updates, nil\n}\n\nfunc (gw *gRPCWatcher) Close() { gw.cancel() }\n<commit_msg>clientv3: define error type for closed watcher<commit_after>\/\/ Copyright 2016 The etcd Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage naming\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\tetcd \"github.com\/coreos\/etcd\/clientv3\"\n\t\"golang.org\/x\/net\/context\"\n\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/codes\"\n\t\"google.golang.org\/grpc\/naming\"\n)\n\nvar ErrWatcherClosed = fmt.Errorf(\"naming: watch closed\")\n\n\/\/ GRPCResolver creates a grpc.Watcher for a target to track its resolution changes.\ntype GRPCResolver struct {\n\t\/\/ Client is an initialized etcd client.\n\tClient *etcd.Client\n}\n\nfunc (gr *GRPCResolver) Update(ctx context.Context, target string, nm naming.Update, opts ...etcd.OpOption) (err error) {\n\tswitch nm.Op {\n\tcase naming.Add:\n\t\tvar v []byte\n\t\tif v, err = json.Marshal(nm); err != nil {\n\t\t\treturn grpc.Errorf(codes.InvalidArgument, err.Error())\n\t\t}\n\t\t_, err = gr.Client.KV.Put(ctx, target+\"\/\"+nm.Addr, string(v), opts...)\n\tcase naming.Delete:\n\t\t_, err = gr.Client.Delete(ctx, target+\"\/\"+nm.Addr, opts...)\n\tdefault:\n\t\treturn grpc.Errorf(codes.InvalidArgument, \"naming: bad naming op\")\n\t}\n\treturn err\n}\n\nfunc (gr *GRPCResolver) Resolve(target string) (naming.Watcher, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tw := &gRPCWatcher{c: gr.Client, target: target + \"\/\", ctx: ctx, cancel: cancel}\n\treturn w, nil\n}\n\ntype gRPCWatcher struct {\n\tc *etcd.Client\n\ttarget string\n\tctx context.Context\n\tcancel context.CancelFunc\n\twch etcd.WatchChan\n\terr error\n}\n\n\/\/ Next gets the next set of updates from the etcd resolver.\n\/\/ Calls to Next should be serialized; concurrent calls are not safe since\n\/\/ there is no way to reconcile the update ordering.\nfunc (gw *gRPCWatcher) Next() ([]*naming.Update, error) {\n\tif gw.wch == nil {\n\t\t\/\/ first Next() returns all addresses\n\t\treturn gw.firstNext()\n\t}\n\tif gw.err != nil {\n\t\treturn nil, gw.err\n\t}\n\n\t\/\/ process new events on target\/*\n\twr, ok := <-gw.wch\n\tif !ok {\n\t\tgw.err = grpc.Errorf(codes.Unavailable, \"%s\", ErrWatcherClosed)\n\t\treturn nil, gw.err\n\t}\n\tif gw.err = wr.Err(); gw.err != nil {\n\t\treturn nil, gw.err\n\t}\n\n\tupdates := make([]*naming.Update, 0, len(wr.Events))\n\tfor _, e := range wr.Events {\n\t\tvar jupdate naming.Update\n\t\tvar err error\n\t\tswitch e.Type {\n\t\tcase etcd.EventTypePut:\n\t\t\terr = json.Unmarshal(e.Kv.Value, &jupdate)\n\t\t\tjupdate.Op = naming.Add\n\t\tcase etcd.EventTypeDelete:\n\t\t\terr = json.Unmarshal(e.PrevKv.Value, &jupdate)\n\t\t\tjupdate.Op = naming.Delete\n\t\t}\n\t\tif err == nil {\n\t\t\tupdates = append(updates, &jupdate)\n\t\t}\n\t}\n\treturn updates, nil\n}\n\nfunc (gw *gRPCWatcher) firstNext() ([]*naming.Update, error) {\n\t\/\/ Use serialized request so resolution still works if the target etcd\n\t\/\/ server is partitioned away from the quorum.\n\tresp, err := gw.c.Get(gw.ctx, gw.target, etcd.WithPrefix(), etcd.WithSerializable())\n\tif gw.err = err; err != nil {\n\t\treturn nil, err\n\t}\n\n\tupdates := make([]*naming.Update, 0, len(resp.Kvs))\n\tfor _, kv := range resp.Kvs {\n\t\tvar jupdate naming.Update\n\t\tif err := json.Unmarshal(kv.Value, &jupdate); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tupdates = append(updates, &jupdate)\n\t}\n\n\topts := []etcd.OpOption{etcd.WithRev(resp.Header.Revision + 1), etcd.WithPrefix(), etcd.WithPrevKV()}\n\tgw.wch = gw.c.Watch(gw.ctx, gw.target, opts...)\n\treturn updates, nil\n}\n\nfunc (gw *gRPCWatcher) Close() { gw.cancel() }\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The etcd Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage ordering\n\nimport (\n\t\"context\"\n\t\"sync\"\n\n\t\"github.com\/coreos\/etcd\/clientv3\"\n)\n\n\/\/ kvOrdering ensures that serialized requests do not return\n\/\/ get with revisions less than the previous\n\/\/ returned revision.\ntype kvOrdering struct {\n\tclientv3.KV\n\torderViolationFunc OrderViolationFunc\n\tprevRev int64\n\trevMu sync.RWMutex\n}\n\nfunc NewKV(kv clientv3.KV, orderViolationFunc OrderViolationFunc) *kvOrdering {\n\treturn &kvOrdering{kv, orderViolationFunc, 0, sync.RWMutex{}}\n}\n\nfunc (kv *kvOrdering) getPrevRev() int64 {\n\tkv.revMu.RLock()\n\tdefer kv.revMu.RUnlock()\n\treturn kv.prevRev\n}\n\nfunc (kv *kvOrdering) setPrevRev(currRev int64) {\n\tprevRev := kv.getPrevRev()\n\tkv.revMu.Lock()\n\tdefer kv.revMu.Unlock()\n\tif currRev > prevRev {\n\t\tkv.prevRev = currRev\n\t}\n}\n\nfunc (kv *kvOrdering) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {\n\t\/\/ prevRev is stored in a local variable in order to record the prevRev\n\t\/\/ at the beginning of the Get operation, because concurrent\n\t\/\/ access to kvOrdering could change the prevRev field in the\n\t\/\/ middle of the Get operation.\n\tprevRev := kv.getPrevRev()\n\top := clientv3.OpGet(key, opts...)\n\tfor {\n\t\tr, err := kv.KV.Do(ctx, op)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresp := r.Get()\n\t\tif resp.Header.Revision >= prevRev {\n\t\t\tkv.setPrevRev(resp.Header.Revision)\n\t\t\treturn resp, nil\n\t\t}\n\t\terr = kv.orderViolationFunc(op, r, prevRev)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n}\n\nfunc (kv *kvOrdering) Txn(ctx context.Context) clientv3.Txn {\n\treturn &txnOrdering{\n\t\tkv.KV.Txn(ctx),\n\t\tkv,\n\t\tctx,\n\t\tsync.Mutex{},\n\t\t[]clientv3.Cmp{},\n\t\t[]clientv3.Op{},\n\t\t[]clientv3.Op{},\n\t}\n}\n\n\/\/ txnOrdering ensures that serialized requests do not return\n\/\/ txn responses with revisions less than the previous\n\/\/ returned revision.\ntype txnOrdering struct {\n\tclientv3.Txn\n\t*kvOrdering\n\tctx context.Context\n\tmu sync.Mutex\n\tcmps []clientv3.Cmp\n\tthenOps []clientv3.Op\n\telseOps []clientv3.Op\n}\n\nfunc (txn *txnOrdering) If(cs ...clientv3.Cmp) clientv3.Txn {\n\ttxn.mu.Lock()\n\tdefer txn.mu.Unlock()\n\ttxn.cmps = cs\n\ttxn.Txn.If(cs...)\n\treturn txn\n}\n\nfunc (txn *txnOrdering) Then(ops ...clientv3.Op) clientv3.Txn {\n\ttxn.mu.Lock()\n\tdefer txn.mu.Unlock()\n\ttxn.thenOps = ops\n\ttxn.Txn.Then(ops...)\n\treturn txn\n}\n\nfunc (txn *txnOrdering) Else(ops ...clientv3.Op) clientv3.Txn {\n\ttxn.mu.Lock()\n\tdefer txn.mu.Unlock()\n\ttxn.elseOps = ops\n\ttxn.Txn.Else(ops...)\n\treturn txn\n}\n\nfunc (txn *txnOrdering) Commit() (*clientv3.TxnResponse, error) {\n\t\/\/ prevRev is stored in a local variable in order to record the prevRev\n\t\/\/ at the beginning of the Commit operation, because concurrent\n\t\/\/ access to txnOrdering could change the prevRev field in the\n\t\/\/ middle of the Commit operation.\n\tprevRev := txn.getPrevRev()\n\topTxn := clientv3.OpTxn(txn.cmps, txn.thenOps, txn.elseOps)\n\tfor {\n\t\topResp, err := txn.KV.Do(txn.ctx, opTxn)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttxnResp := opResp.Txn()\n\t\tif txnResp.Header.Revision >= prevRev {\n\t\t\ttxn.setPrevRev(txnResp.Header.Revision)\n\t\t\treturn txnResp, nil\n\t\t}\n\t\terr = txn.orderViolationFunc(opTxn, opResp, prevRev)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n}\n<commit_msg>clientv3\/ordering: acquire setPrevRev mutex only when needed<commit_after>\/\/ Copyright 2017 The etcd Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage ordering\n\nimport (\n\t\"context\"\n\t\"sync\"\n\n\t\"github.com\/coreos\/etcd\/clientv3\"\n)\n\n\/\/ kvOrdering ensures that serialized requests do not return\n\/\/ get with revisions less than the previous\n\/\/ returned revision.\ntype kvOrdering struct {\n\tclientv3.KV\n\torderViolationFunc OrderViolationFunc\n\tprevRev int64\n\trevMu sync.RWMutex\n}\n\nfunc NewKV(kv clientv3.KV, orderViolationFunc OrderViolationFunc) *kvOrdering {\n\treturn &kvOrdering{kv, orderViolationFunc, 0, sync.RWMutex{}}\n}\n\nfunc (kv *kvOrdering) getPrevRev() int64 {\n\tkv.revMu.RLock()\n\tdefer kv.revMu.RUnlock()\n\treturn kv.prevRev\n}\n\nfunc (kv *kvOrdering) setPrevRev(currRev int64) {\n\tprevRev := kv.getPrevRev()\n\tif currRev > prevRev {\n\t\tkv.revMu.Lock()\n\t\tkv.prevRev = currRev\n\t\tkv.revMu.Unlock()\n\t}\n}\n\nfunc (kv *kvOrdering) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (*clientv3.GetResponse, error) {\n\t\/\/ prevRev is stored in a local variable in order to record the prevRev\n\t\/\/ at the beginning of the Get operation, because concurrent\n\t\/\/ access to kvOrdering could change the prevRev field in the\n\t\/\/ middle of the Get operation.\n\tprevRev := kv.getPrevRev()\n\top := clientv3.OpGet(key, opts...)\n\tfor {\n\t\tr, err := kv.KV.Do(ctx, op)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresp := r.Get()\n\t\tif resp.Header.Revision >= prevRev {\n\t\t\tkv.setPrevRev(resp.Header.Revision)\n\t\t\treturn resp, nil\n\t\t}\n\t\terr = kv.orderViolationFunc(op, r, prevRev)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n}\n\nfunc (kv *kvOrdering) Txn(ctx context.Context) clientv3.Txn {\n\treturn &txnOrdering{\n\t\tkv.KV.Txn(ctx),\n\t\tkv,\n\t\tctx,\n\t\tsync.Mutex{},\n\t\t[]clientv3.Cmp{},\n\t\t[]clientv3.Op{},\n\t\t[]clientv3.Op{},\n\t}\n}\n\n\/\/ txnOrdering ensures that serialized requests do not return\n\/\/ txn responses with revisions less than the previous\n\/\/ returned revision.\ntype txnOrdering struct {\n\tclientv3.Txn\n\t*kvOrdering\n\tctx context.Context\n\tmu sync.Mutex\n\tcmps []clientv3.Cmp\n\tthenOps []clientv3.Op\n\telseOps []clientv3.Op\n}\n\nfunc (txn *txnOrdering) If(cs ...clientv3.Cmp) clientv3.Txn {\n\ttxn.mu.Lock()\n\tdefer txn.mu.Unlock()\n\ttxn.cmps = cs\n\ttxn.Txn.If(cs...)\n\treturn txn\n}\n\nfunc (txn *txnOrdering) Then(ops ...clientv3.Op) clientv3.Txn {\n\ttxn.mu.Lock()\n\tdefer txn.mu.Unlock()\n\ttxn.thenOps = ops\n\ttxn.Txn.Then(ops...)\n\treturn txn\n}\n\nfunc (txn *txnOrdering) Else(ops ...clientv3.Op) clientv3.Txn {\n\ttxn.mu.Lock()\n\tdefer txn.mu.Unlock()\n\ttxn.elseOps = ops\n\ttxn.Txn.Else(ops...)\n\treturn txn\n}\n\nfunc (txn *txnOrdering) Commit() (*clientv3.TxnResponse, error) {\n\t\/\/ prevRev is stored in a local variable in order to record the prevRev\n\t\/\/ at the beginning of the Commit operation, because concurrent\n\t\/\/ access to txnOrdering could change the prevRev field in the\n\t\/\/ middle of the Commit operation.\n\tprevRev := txn.getPrevRev()\n\topTxn := clientv3.OpTxn(txn.cmps, txn.thenOps, txn.elseOps)\n\tfor {\n\t\topResp, err := txn.KV.Do(txn.ctx, opTxn)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttxnResp := opResp.Txn()\n\t\tif txnResp.Header.Revision >= prevRev {\n\t\t\ttxn.setPrevRev(txnResp.Header.Revision)\n\t\t\treturn txnResp, nil\n\t\t}\n\t\terr = txn.orderViolationFunc(opTxn, opResp, prevRev)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/urfave\/cli\"\n\t\"golang.org\/x\/net\/context\"\n\tpb \"k8s.io\/kubernetes\/pkg\/kubelet\/api\/v1alpha1\/runtime\"\n)\n\nvar containerCommand = cli.Command{\n\tName: \"container\",\n\tAliases: []string{\"ctr\"},\n\tSubcommands: []cli.Command{\n\t\tcreateContainerCommand,\n\t\tstartContainerCommand,\n\t\tstopContainerCommand,\n\t\tremoveContainerCommand,\n\t\tcontainerStatusCommand,\n\t},\n}\n\nvar createContainerCommand = cli.Command{\n\tName: \"create\",\n\tUsage: \"create a container\",\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"pod\",\n\t\t\tUsage: \"the id of the pod sandbox to which the container belongs\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"config\",\n\t\t\tValue: \"config.json\",\n\t\t\tUsage: \"the path of a container config file\",\n\t\t},\n\t},\n\tAction: func(context *cli.Context) error {\n\t\t\/\/ Set up a connection to the server.\n\t\tconn, err := getClientConnection(context)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to connect: %v\", err)\n\t\t}\n\t\tdefer conn.Close()\n\t\tclient := pb.NewRuntimeServiceClient(conn)\n\n\t\tif !context.IsSet(\"pod\") {\n\t\t\treturn fmt.Errorf(\"Please specify the id of the pod sandbox to which the container belongs via the --pod option\")\n\t\t}\n\t\t\/\/ Test RuntimeServiceClient.CreateContainer\n\t\terr = CreateContainer(client, context.String(\"pod\"), context.String(\"config\"))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Creating container failed: %v\", err)\n\t\t}\n\t\treturn nil\n\t},\n}\n\nvar startContainerCommand = cli.Command{\n\tName: \"start\",\n\tUsage: \"start a container\",\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"id\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"id of the container\",\n\t\t},\n\t},\n\tAction: func(context *cli.Context) error {\n\t\t\/\/ Set up a connection to the server.\n\t\tconn, err := getClientConnection(context)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to connect: %v\", err)\n\t\t}\n\t\tdefer conn.Close()\n\t\tclient := pb.NewRuntimeServiceClient(conn)\n\n\t\terr = StartContainer(client, context.String(\"id\"))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Starting the container failed: %v\", err)\n\t\t}\n\t\treturn nil\n\t},\n}\n\nvar stopContainerCommand = cli.Command{\n\tName: \"stop\",\n\tUsage: \"stop a container\",\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"id\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"id of the container\",\n\t\t},\n\t},\n\tAction: func(context *cli.Context) error {\n\t\t\/\/ Set up a connection to the server.\n\t\tconn, err := getClientConnection(context)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to connect: %v\", err)\n\t\t}\n\t\tdefer conn.Close()\n\t\tclient := pb.NewRuntimeServiceClient(conn)\n\n\t\terr = StopContainer(client, context.String(\"id\"))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Stopping the container failed: %v\", err)\n\t\t}\n\t\treturn nil\n\t},\n}\n\nvar removeContainerCommand = cli.Command{\n\tName: \"remove\",\n\tUsage: \"remove a container\",\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"id\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"id of the container\",\n\t\t},\n\t},\n\tAction: func(context *cli.Context) error {\n\t\t\/\/ Set up a connection to the server.\n\t\tconn, err := getClientConnection(context)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to connect: %v\", err)\n\t\t}\n\t\tdefer conn.Close()\n\t\tclient := pb.NewRuntimeServiceClient(conn)\n\n\t\terr = RemoveContainer(client, context.String(\"id\"))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Removing the container failed: %v\", err)\n\t\t}\n\t\treturn nil\n\t},\n}\n\nvar containerStatusCommand = cli.Command{\n\tName: \"status\",\n\tUsage: \"get the status of a container\",\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"id\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"id of the container\",\n\t\t},\n\t},\n\tAction: func(context *cli.Context) error {\n\t\t\/\/ Set up a connection to the server.\n\t\tconn, err := getClientConnection(context)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to connect: %v\", err)\n\t\t}\n\t\tdefer conn.Close()\n\t\tclient := pb.NewRuntimeServiceClient(conn)\n\n\t\terr = ContainerStatus(client, context.String(\"id\"))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Getting the status of the container failed: %v\", err)\n\t\t}\n\t\treturn nil\n\t},\n}\n\n\/\/ CreateContainer sends a CreateContainerRequest to the server, and parses\n\/\/ the returned CreateContainerResponse.\nfunc CreateContainer(client pb.RuntimeServiceClient, sandbox string, path string) error {\n\tconfig, err := loadContainerConfig(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr, err := client.CreateContainer(context.Background(), &pb.CreateContainerRequest{\n\t\tPodSandboxId: &sandbox,\n\t\tConfig: config,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println(*r.ContainerId)\n\treturn nil\n}\n\n\/\/ StartContainer sends a StartContainerRequest to the server, and parses\n\/\/ the returned StartContainerResponse.\nfunc StartContainer(client pb.RuntimeServiceClient, ID string) error {\n\tif ID == \"\" {\n\t\treturn fmt.Errorf(\"ID cannot be empty\")\n\t}\n\t_, err := client.StartContainer(context.Background(), &pb.StartContainerRequest{\n\t\tContainerId: &ID,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println(ID)\n\treturn nil\n}\n\n\/\/ StopContainer sends a StopContainerRequest to the server, and parses\n\/\/ the returned StopContainerResponse.\nfunc StopContainer(client pb.RuntimeServiceClient, ID string) error {\n\tif ID == \"\" {\n\t\treturn fmt.Errorf(\"ID cannot be empty\")\n\t}\n\t_, err := client.StopContainer(context.Background(), &pb.StopContainerRequest{\n\t\tContainerId: &ID,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println(ID)\n\treturn nil\n}\n\n\/\/ RemoveContainer sends a RemoveContainerRequest to the server, and parses\n\/\/ the returned RemoveContainerResponse.\nfunc RemoveContainer(client pb.RuntimeServiceClient, ID string) error {\n\tif ID == \"\" {\n\t\treturn fmt.Errorf(\"ID cannot be empty\")\n\t}\n\t_, err := client.RemoveContainer(context.Background(), &pb.RemoveContainerRequest{\n\t\tContainerId: &ID,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println(ID)\n\treturn nil\n}\n\n\/\/ ContainerStatus sends a ContainerStatusRequest to the server, and parses\n\/\/ the returned ContainerStatusResponse.\nfunc ContainerStatus(client pb.RuntimeServiceClient, ID string) error {\n\tif ID == \"\" {\n\t\treturn fmt.Errorf(\"ID cannot be empty\")\n\t}\n\tr, err := client.ContainerStatus(context.Background(), &pb.ContainerStatusRequest{\n\t\tContainerId: &ID})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(\"ID: %s\\n\", *r.Status.Id)\n\tif r.Status.State != nil {\n\t\tfmt.Printf(\"Status: %s\\n\", r.Status.State)\n\t}\n\tif r.Status.CreatedAt != nil {\n\t\tctm := time.Unix(*r.Status.CreatedAt, 0)\n\t\tfmt.Printf(\"Created: %v\\n\", ctm)\n\t}\n\tif r.Status.StartedAt != nil {\n\t\tstm := time.Unix(*r.Status.StartedAt, 0)\n\t\tfmt.Printf(\"Started: %v\\n\", stm)\n\t}\n\tif r.Status.FinishedAt != nil {\n\t\tftm := time.Unix(*r.Status.FinishedAt, 0)\n\t\tfmt.Printf(\"Finished: %v\\n\", ftm)\n\t}\n\tif r.Status.ExitCode != nil {\n\t\tfmt.Printf(\"Exit Code: %v\\n\", *r.Status.ExitCode)\n\t}\n\n\treturn nil\n}\n<commit_msg>Add client impl for listing containers<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/urfave\/cli\"\n\t\"golang.org\/x\/net\/context\"\n\tpb \"k8s.io\/kubernetes\/pkg\/kubelet\/api\/v1alpha1\/runtime\"\n)\n\nvar containerCommand = cli.Command{\n\tName: \"container\",\n\tAliases: []string{\"ctr\"},\n\tSubcommands: []cli.Command{\n\t\tcreateContainerCommand,\n\t\tstartContainerCommand,\n\t\tstopContainerCommand,\n\t\tremoveContainerCommand,\n\t\tcontainerStatusCommand,\n\t\tlistContainersCommand,\n\t},\n}\n\nvar createContainerCommand = cli.Command{\n\tName: \"create\",\n\tUsage: \"create a container\",\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"pod\",\n\t\t\tUsage: \"the id of the pod sandbox to which the container belongs\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"config\",\n\t\t\tValue: \"config.json\",\n\t\t\tUsage: \"the path of a container config file\",\n\t\t},\n\t},\n\tAction: func(context *cli.Context) error {\n\t\t\/\/ Set up a connection to the server.\n\t\tconn, err := getClientConnection(context)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to connect: %v\", err)\n\t\t}\n\t\tdefer conn.Close()\n\t\tclient := pb.NewRuntimeServiceClient(conn)\n\n\t\tif !context.IsSet(\"pod\") {\n\t\t\treturn fmt.Errorf(\"Please specify the id of the pod sandbox to which the container belongs via the --pod option\")\n\t\t}\n\t\t\/\/ Test RuntimeServiceClient.CreateContainer\n\t\terr = CreateContainer(client, context.String(\"pod\"), context.String(\"config\"))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Creating container failed: %v\", err)\n\t\t}\n\t\treturn nil\n\t},\n}\n\nvar startContainerCommand = cli.Command{\n\tName: \"start\",\n\tUsage: \"start a container\",\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"id\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"id of the container\",\n\t\t},\n\t},\n\tAction: func(context *cli.Context) error {\n\t\t\/\/ Set up a connection to the server.\n\t\tconn, err := getClientConnection(context)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to connect: %v\", err)\n\t\t}\n\t\tdefer conn.Close()\n\t\tclient := pb.NewRuntimeServiceClient(conn)\n\n\t\terr = StartContainer(client, context.String(\"id\"))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Starting the container failed: %v\", err)\n\t\t}\n\t\treturn nil\n\t},\n}\n\nvar stopContainerCommand = cli.Command{\n\tName: \"stop\",\n\tUsage: \"stop a container\",\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"id\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"id of the container\",\n\t\t},\n\t},\n\tAction: func(context *cli.Context) error {\n\t\t\/\/ Set up a connection to the server.\n\t\tconn, err := getClientConnection(context)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to connect: %v\", err)\n\t\t}\n\t\tdefer conn.Close()\n\t\tclient := pb.NewRuntimeServiceClient(conn)\n\n\t\terr = StopContainer(client, context.String(\"id\"))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Stopping the container failed: %v\", err)\n\t\t}\n\t\treturn nil\n\t},\n}\n\nvar removeContainerCommand = cli.Command{\n\tName: \"remove\",\n\tUsage: \"remove a container\",\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"id\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"id of the container\",\n\t\t},\n\t},\n\tAction: func(context *cli.Context) error {\n\t\t\/\/ Set up a connection to the server.\n\t\tconn, err := getClientConnection(context)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to connect: %v\", err)\n\t\t}\n\t\tdefer conn.Close()\n\t\tclient := pb.NewRuntimeServiceClient(conn)\n\n\t\terr = RemoveContainer(client, context.String(\"id\"))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Removing the container failed: %v\", err)\n\t\t}\n\t\treturn nil\n\t},\n}\n\nvar containerStatusCommand = cli.Command{\n\tName: \"status\",\n\tUsage: \"get the status of a container\",\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"id\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"id of the container\",\n\t\t},\n\t},\n\tAction: func(context *cli.Context) error {\n\t\t\/\/ Set up a connection to the server.\n\t\tconn, err := getClientConnection(context)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to connect: %v\", err)\n\t\t}\n\t\tdefer conn.Close()\n\t\tclient := pb.NewRuntimeServiceClient(conn)\n\n\t\terr = ContainerStatus(client, context.String(\"id\"))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Getting the status of the container failed: %v\", err)\n\t\t}\n\t\treturn nil\n\t},\n}\n\nvar listContainersCommand = cli.Command{\n\tName: \"list\",\n\tUsage: \"list containers\",\n\tAction: func(context *cli.Context) error {\n\t\t\/\/ Set up a connection to the server.\n\t\tconn, err := getClientConnection(context)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to connect: %v\", err)\n\t\t}\n\t\tdefer conn.Close()\n\t\tclient := pb.NewRuntimeServiceClient(conn)\n\n\t\terr = ListContainers(client)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"listing containers failed: %v\", err)\n\t\t}\n\t\treturn nil\n\t},\n}\n\n\/\/ CreateContainer sends a CreateContainerRequest to the server, and parses\n\/\/ the returned CreateContainerResponse.\nfunc CreateContainer(client pb.RuntimeServiceClient, sandbox string, path string) error {\n\tconfig, err := loadContainerConfig(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr, err := client.CreateContainer(context.Background(), &pb.CreateContainerRequest{\n\t\tPodSandboxId: &sandbox,\n\t\tConfig: config,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println(*r.ContainerId)\n\treturn nil\n}\n\n\/\/ StartContainer sends a StartContainerRequest to the server, and parses\n\/\/ the returned StartContainerResponse.\nfunc StartContainer(client pb.RuntimeServiceClient, ID string) error {\n\tif ID == \"\" {\n\t\treturn fmt.Errorf(\"ID cannot be empty\")\n\t}\n\t_, err := client.StartContainer(context.Background(), &pb.StartContainerRequest{\n\t\tContainerId: &ID,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println(ID)\n\treturn nil\n}\n\n\/\/ StopContainer sends a StopContainerRequest to the server, and parses\n\/\/ the returned StopContainerResponse.\nfunc StopContainer(client pb.RuntimeServiceClient, ID string) error {\n\tif ID == \"\" {\n\t\treturn fmt.Errorf(\"ID cannot be empty\")\n\t}\n\t_, err := client.StopContainer(context.Background(), &pb.StopContainerRequest{\n\t\tContainerId: &ID,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println(ID)\n\treturn nil\n}\n\n\/\/ RemoveContainer sends a RemoveContainerRequest to the server, and parses\n\/\/ the returned RemoveContainerResponse.\nfunc RemoveContainer(client pb.RuntimeServiceClient, ID string) error {\n\tif ID == \"\" {\n\t\treturn fmt.Errorf(\"ID cannot be empty\")\n\t}\n\t_, err := client.RemoveContainer(context.Background(), &pb.RemoveContainerRequest{\n\t\tContainerId: &ID,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println(ID)\n\treturn nil\n}\n\n\/\/ ContainerStatus sends a ContainerStatusRequest to the server, and parses\n\/\/ the returned ContainerStatusResponse.\nfunc ContainerStatus(client pb.RuntimeServiceClient, ID string) error {\n\tif ID == \"\" {\n\t\treturn fmt.Errorf(\"ID cannot be empty\")\n\t}\n\tr, err := client.ContainerStatus(context.Background(), &pb.ContainerStatusRequest{\n\t\tContainerId: &ID})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(\"ID: %s\\n\", *r.Status.Id)\n\tif r.Status.State != nil {\n\t\tfmt.Printf(\"Status: %s\\n\", r.Status.State)\n\t}\n\tif r.Status.CreatedAt != nil {\n\t\tctm := time.Unix(*r.Status.CreatedAt, 0)\n\t\tfmt.Printf(\"Created: %v\\n\", ctm)\n\t}\n\tif r.Status.StartedAt != nil {\n\t\tstm := time.Unix(*r.Status.StartedAt, 0)\n\t\tfmt.Printf(\"Started: %v\\n\", stm)\n\t}\n\tif r.Status.FinishedAt != nil {\n\t\tftm := time.Unix(*r.Status.FinishedAt, 0)\n\t\tfmt.Printf(\"Finished: %v\\n\", ftm)\n\t}\n\tif r.Status.ExitCode != nil {\n\t\tfmt.Printf(\"Exit Code: %v\\n\", *r.Status.ExitCode)\n\t}\n\n\treturn nil\n}\n\n\/\/ ListContainers sends a ListContainerRequest to the server, and parses\n\/\/ the returned ListContainerResponse.\nfunc ListContainers(client pb.RuntimeServiceClient) error {\n\tr, err := client.ListContainers(context.Background(), &pb.ListContainersRequest{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, c := range r.GetContainers() {\n\t\tfmt.Printf(\"ID: %s\\n\", *c.Id)\n\t\tfmt.Printf(\"Pod: %s\\n\", *c.PodSandboxId)\n\t\tif c.State != nil {\n\t\t\tfmt.Printf(\"Status: %s\\n\", *c.State)\n\t\t}\n\t\tif c.CreatedAt != nil {\n\t\t\tctm := time.Unix(*c.CreatedAt, 0)\n\t\t\tfmt.Printf(\"Created: %v\\n\", ctm)\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"golang.org\/x\/crypto\/ssh\"\n\n\t\"code.cloudfoundry.org\/lager\"\n\t\"github.com\/concourse\/atc\"\n\t\"github.com\/concourse\/baggageclaim\/baggageclaimcmd\"\n\t\"github.com\/tedsuo\/ifrit\"\n\t\"github.com\/tedsuo\/ifrit\/grouper\"\n\t\"github.com\/tedsuo\/ifrit\/restart\"\n\t\"github.com\/tedsuo\/ifrit\/sigmon\"\n)\n\ntype WorkerCommand struct {\n\tName string `long:\"name\" description:\"The name to set for the worker during registration. If not specified, the hostname will be used.\"`\n\tTags []string `long:\"tag\" description:\"A tag to set during registration. Can be specified multiple times.\"`\n\n\tTeamName string `long:\"team\" description:\"The name of the team that this worker will be assigned to.\"`\n\n\tHTTPProxy URLFlag `long:\"http-proxy\" env:\"http_proxy\" description:\"HTTP proxy endpoint to use for containers.\"`\n\tHTTPSProxy URLFlag `long:\"https-proxy\" env:\"https_proxy\" description:\"HTTPS proxy endpoint to use for containers.\"`\n\tNoProxy []string `long:\"no-proxy\" env:\"no_proxy\" env-delim:\",\" description:\"Blacklist of addresses to skip the proxy when reaching.\"`\n\n\tWorkDir string `long:\"work-dir\" required:\"true\" description:\"Directory in which to place container data.\"`\n\n\tBindIP IPFlag `long:\"bind-ip\" default:\"0.0.0.0\" description:\"IP address on which to listen for the Garden server.\"`\n\tBindPort uint16 `long:\"bind-port\" default:\"7777\" description:\"Port on which to listen for the Garden server.\"`\n\n\tPeerIP IPFlag `long:\"peer-ip\" description:\"IP used to reach this worker from the ATC nodes. If omitted, the worker will be forwarded through the SSH connection to the TSA.\"`\n\n\tGarden GardenBackend `group:\"Garden Configuration\" namespace:\"garden\"`\n\n\tBaggageclaim baggageclaimcmd.BaggageclaimCommand `group:\"Baggageclaim Configuration\" namespace:\"baggageclaim\"`\n\n\tTSA BeaconConfig `group:\"TSA Configuration\" namespace:\"tsa\"`\n\n\tMetrics struct {\n\t\tYellerAPIKey string `long:\"yeller-api-key\" description:\"Yeller API key. If specified, all errors logged will be emitted.\"`\n\t\tYellerEnvironment string `long:\"yeller-environment\" description:\"Environment to tag on all Yeller events emitted.\"`\n\t} `group:\"Metrics & Diagnostics\"`\n}\n\nfunc (cmd *WorkerCommand) Execute(args []string) error {\n\tlogger := lager.NewLogger(\"worker\")\n\tlogger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.INFO))\n\n\tworker, gardenRunner, err := cmd.gardenRunner(logger.Session(\"garden\"), args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbaggageclaimRunner, err := cmd.baggageclaimRunner(logger.Session(\"baggageclaim\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmembers := grouper.Members{\n\t\t{\n\t\t\tName: \"garden\",\n\t\t\tRunner: gardenRunner,\n\t\t},\n\t\t{\n\t\t\tName: \"baggageclaim\",\n\t\t\tRunner: baggageclaimRunner,\n\t\t},\n\t}\n\n\tif cmd.TSA.WorkerPrivateKey != \"\" {\n\t\tmembers = append(members, grouper.Member{\n\t\t\tName: \"beacon\",\n\t\t\tRunner: cmd.beaconRunner(logger.Session(\"beacon\"), worker),\n\t\t})\n\t}\n\n\trunner := sigmon.New(grouper.NewParallel(os.Interrupt, members))\n\n\treturn <-ifrit.Invoke(runner).Wait()\n}\n\nfunc (cmd *WorkerCommand) workerName() (string, error) {\n\tif cmd.Name != \"\" {\n\t\treturn cmd.Name, nil\n\t}\n\n\treturn os.Hostname()\n}\n\nfunc (cmd *WorkerCommand) beaconRunner(logger lager.Logger, worker atc.Worker) ifrit.Runner {\n\tbeacon := Beacon{\n\t\tLogger: logger,\n\t\tConfig: cmd.TSA,\n\t}\n\n\tvar beaconRunner ifrit.RunFunc\n\tif cmd.PeerIP != nil {\n\t\tworker.GardenAddr = fmt.Sprintf(\"%s:%d\", cmd.PeerIP.IP(), cmd.BindPort)\n\t\tworker.BaggageclaimURL = fmt.Sprintf(\"http:\/\/%s:%d\", cmd.PeerIP.IP(), cmd.Baggageclaim.BindPort)\n\t\tbeaconRunner = beacon.Register\n\t} else {\n\t\tworker.GardenAddr = fmt.Sprintf(\"%s:%d\", cmd.BindIP.IP(), cmd.BindPort)\n\t\tworker.BaggageclaimURL = fmt.Sprintf(\"http:\/\/%s:%d\", cmd.Baggageclaim.BindIP.IP(), cmd.Baggageclaim.BindPort)\n\t\tbeaconRunner = beacon.Forward\n\t}\n\n\tbeacon.Worker = worker\n\n\treturn restart.Restarter{\n\t\tRunner: beaconRunner,\n\t\tLoad: func(prevRunner ifrit.Runner, prevErr error) ifrit.Runner {\n\t\t\tif _, ok := prevErr.(*ssh.ExitError); !ok {\n\t\t\t\tlogger.Error(\"restarting\", prevErr)\n\t\t\t\ttime.Sleep(5 * time.Second)\n\t\t\t\treturn beaconRunner\n\t\t\t}\n\n\t\t\treturn nil\n\t\t},\n\t}\n}\n<commit_msg>default worker bind IP to 127.0.0.1, not 0.0.0.0<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"golang.org\/x\/crypto\/ssh\"\n\n\t\"code.cloudfoundry.org\/lager\"\n\t\"github.com\/concourse\/atc\"\n\t\"github.com\/concourse\/baggageclaim\/baggageclaimcmd\"\n\t\"github.com\/tedsuo\/ifrit\"\n\t\"github.com\/tedsuo\/ifrit\/grouper\"\n\t\"github.com\/tedsuo\/ifrit\/restart\"\n\t\"github.com\/tedsuo\/ifrit\/sigmon\"\n)\n\ntype WorkerCommand struct {\n\tName string `long:\"name\" description:\"The name to set for the worker during registration. If not specified, the hostname will be used.\"`\n\tTags []string `long:\"tag\" description:\"A tag to set during registration. Can be specified multiple times.\"`\n\n\tTeamName string `long:\"team\" description:\"The name of the team that this worker will be assigned to.\"`\n\n\tHTTPProxy URLFlag `long:\"http-proxy\" env:\"http_proxy\" description:\"HTTP proxy endpoint to use for containers.\"`\n\tHTTPSProxy URLFlag `long:\"https-proxy\" env:\"https_proxy\" description:\"HTTPS proxy endpoint to use for containers.\"`\n\tNoProxy []string `long:\"no-proxy\" env:\"no_proxy\" env-delim:\",\" description:\"Blacklist of addresses to skip the proxy when reaching.\"`\n\n\tWorkDir string `long:\"work-dir\" required:\"true\" description:\"Directory in which to place container data.\"`\n\n\tBindIP IPFlag `long:\"bind-ip\" default:\"127.0.0.1\" description:\"IP address on which to listen for the Garden server.\"`\n\tBindPort uint16 `long:\"bind-port\" default:\"7777\" description:\"Port on which to listen for the Garden server.\"`\n\n\tPeerIP IPFlag `long:\"peer-ip\" description:\"IP used to reach this worker from the ATC nodes. If omitted, the worker will be forwarded through the SSH connection to the TSA.\"`\n\n\tGarden GardenBackend `group:\"Garden Configuration\" namespace:\"garden\"`\n\n\tBaggageclaim baggageclaimcmd.BaggageclaimCommand `group:\"Baggageclaim Configuration\" namespace:\"baggageclaim\"`\n\n\tTSA BeaconConfig `group:\"TSA Configuration\" namespace:\"tsa\"`\n\n\tMetrics struct {\n\t\tYellerAPIKey string `long:\"yeller-api-key\" description:\"Yeller API key. If specified, all errors logged will be emitted.\"`\n\t\tYellerEnvironment string `long:\"yeller-environment\" description:\"Environment to tag on all Yeller events emitted.\"`\n\t} `group:\"Metrics & Diagnostics\"`\n}\n\nfunc (cmd *WorkerCommand) Execute(args []string) error {\n\tlogger := lager.NewLogger(\"worker\")\n\tlogger.RegisterSink(lager.NewWriterSink(os.Stdout, lager.INFO))\n\n\tworker, gardenRunner, err := cmd.gardenRunner(logger.Session(\"garden\"), args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbaggageclaimRunner, err := cmd.baggageclaimRunner(logger.Session(\"baggageclaim\"))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmembers := grouper.Members{\n\t\t{\n\t\t\tName: \"garden\",\n\t\t\tRunner: gardenRunner,\n\t\t},\n\t\t{\n\t\t\tName: \"baggageclaim\",\n\t\t\tRunner: baggageclaimRunner,\n\t\t},\n\t}\n\n\tif cmd.TSA.WorkerPrivateKey != \"\" {\n\t\tmembers = append(members, grouper.Member{\n\t\t\tName: \"beacon\",\n\t\t\tRunner: cmd.beaconRunner(logger.Session(\"beacon\"), worker),\n\t\t})\n\t}\n\n\trunner := sigmon.New(grouper.NewParallel(os.Interrupt, members))\n\n\treturn <-ifrit.Invoke(runner).Wait()\n}\n\nfunc (cmd *WorkerCommand) workerName() (string, error) {\n\tif cmd.Name != \"\" {\n\t\treturn cmd.Name, nil\n\t}\n\n\treturn os.Hostname()\n}\n\nfunc (cmd *WorkerCommand) beaconRunner(logger lager.Logger, worker atc.Worker) ifrit.Runner {\n\tbeacon := Beacon{\n\t\tLogger: logger,\n\t\tConfig: cmd.TSA,\n\t}\n\n\tvar beaconRunner ifrit.RunFunc\n\tif cmd.PeerIP != nil {\n\t\tworker.GardenAddr = fmt.Sprintf(\"%s:%d\", cmd.PeerIP.IP(), cmd.BindPort)\n\t\tworker.BaggageclaimURL = fmt.Sprintf(\"http:\/\/%s:%d\", cmd.PeerIP.IP(), cmd.Baggageclaim.BindPort)\n\t\tbeaconRunner = beacon.Register\n\t} else {\n\t\tworker.GardenAddr = fmt.Sprintf(\"%s:%d\", cmd.BindIP.IP(), cmd.BindPort)\n\t\tworker.BaggageclaimURL = fmt.Sprintf(\"http:\/\/%s:%d\", cmd.Baggageclaim.BindIP.IP(), cmd.Baggageclaim.BindPort)\n\t\tbeaconRunner = beacon.Forward\n\t}\n\n\tbeacon.Worker = worker\n\n\treturn restart.Restarter{\n\t\tRunner: beaconRunner,\n\t\tLoad: func(prevRunner ifrit.Runner, prevErr error) ifrit.Runner {\n\t\t\tif _, ok := prevErr.(*ssh.ExitError); !ok {\n\t\t\t\tlogger.Error(\"restarting\", prevErr)\n\t\t\t\ttime.Sleep(5 * time.Second)\n\t\t\t\treturn beaconRunner\n\t\t\t}\n\n\t\t\treturn nil\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package notmain\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/letsencrypt\/boulder\/cmd\"\n\t\"github.com\/letsencrypt\/boulder\/crl\/crl_x509\"\n\t\"github.com\/letsencrypt\/boulder\/issuance\"\n\t\"github.com\/letsencrypt\/boulder\/linter\"\n\tcrlint \"github.com\/letsencrypt\/boulder\/linter\/lints\/crl\"\n)\n\nfunc downloadShard(url string) (*crl_x509.RevocationList, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"downloading crl: %w\", err)\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"downloading crl: http status %d\", resp.StatusCode)\n\t}\n\n\tcrlBytes, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reading CRL bytes: %w\", err)\n\t}\n\n\tcrl, err := crl_x509.ParseRevocationList(crlBytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parsing CRL: %w\", err)\n\t}\n\n\treturn crl, nil\n}\n\nfunc validateShard(crl *crl_x509.RevocationList, issuer *issuance.Certificate, ageLimit time.Duration) error {\n\terr := linter.ProcessResultSet(crlint.LintCRL(crl))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"linting CRL: %w\", err)\n\t}\n\n\terr = crl.CheckSignatureFrom(issuer.Certificate)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"checking CRL signature: %w\", err)\n\t}\n\n\tif time.Since(crl.ThisUpdate) >= ageLimit {\n\t\treturn fmt.Errorf(\"thisUpdate more than %s in the past: %v\", ageLimit, crl.ThisUpdate)\n\t}\n\n\treturn nil\n}\n\nfunc main() {\n\turlFile := flag.String(\"crls\", \"\", \"path to a file containing a JSON Array of CRL URLs\")\n\tissuerFile := flag.String(\"issuer\", \"\", \"path to an issuer certificate on disk\")\n\tageLimitStr := flag.String(\"ageLimit\", \"168h\", \"maximum allowable age of a CRL shard\")\n\temitRevoked := flag.Bool(\"emitRevoked\", false, \"emit revoked serial numbers on stdout, one per line, hex-encoded\")\n\tflag.Parse()\n\n\tlogger := cmd.NewLogger(cmd.SyslogConfig{StdoutLevel: 6, SyslogLevel: -1})\n\n\turlFileContents, err := os.ReadFile(*urlFile)\n\tcmd.FailOnError(err, \"Reading CRL URLs file\")\n\n\tvar urls []string\n\terr = json.Unmarshal(urlFileContents, &urls)\n\tcmd.FailOnError(err, \"Parsing JSON Array of CRL URLs\")\n\n\tissuer, err := issuance.LoadCertificate(*issuerFile)\n\tcmd.FailOnError(err, \"Loading issuer certificate\")\n\n\tageLimit, err := time.ParseDuration(*ageLimitStr)\n\tcmd.FailOnError(err, \"Parsing age limit\")\n\n\terrCount := 0\n\tfor _, url := range urls {\n\t\tcrl, err := downloadShard(url)\n\t\tif err != nil {\n\t\t\terrCount += 1\n\t\t\tlogger.Errf(\"fetching CRL %q failed: %s\", url, err)\n\t\t\tcontinue\n\t\t}\n\n\t\terr = validateShard(crl, issuer, ageLimit)\n\t\tif err != nil {\n\t\t\terrCount += 1\n\t\t\tlogger.Errf(\"checking CRL %q failed: %s\", url, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif *emitRevoked {\n\t\t\tfor _, c := range crl.RevokedCertificates {\n\t\t\t\tfmt.Printf(\"%x\\n\", c.SerialNumber)\n\t\t\t}\n\t\t}\n\t}\n\n\tif errCount != 0 {\n\t\tcmd.Fail(fmt.Sprintf(\"Encountered %d errors\", errCount))\n\t}\n\tlogger.AuditInfo(\"All CRLs validated\")\n}\n\nfunc init() {\n\tcmd.RegisterCommand(\"crl-checker\", main)\n}\n<commit_msg>crl-checker: check for duplicate serials (#6418)<commit_after>package notmain\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/letsencrypt\/boulder\/cmd\"\n\t\"github.com\/letsencrypt\/boulder\/core\"\n\t\"github.com\/letsencrypt\/boulder\/crl\/crl_x509\"\n\t\"github.com\/letsencrypt\/boulder\/issuance\"\n\t\"github.com\/letsencrypt\/boulder\/linter\"\n\tcrlint \"github.com\/letsencrypt\/boulder\/linter\/lints\/crl\"\n)\n\nfunc downloadShard(url string) (*crl_x509.RevocationList, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"downloading crl: %w\", err)\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"downloading crl: http status %d\", resp.StatusCode)\n\t}\n\n\tcrlBytes, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reading CRL bytes: %w\", err)\n\t}\n\n\tcrl, err := crl_x509.ParseRevocationList(crlBytes)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parsing CRL: %w\", err)\n\t}\n\n\treturn crl, nil\n}\n\nfunc validateShard(crl *crl_x509.RevocationList, issuer *issuance.Certificate, ageLimit time.Duration) error {\n\terr := linter.ProcessResultSet(crlint.LintCRL(crl))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"linting CRL: %w\", err)\n\t}\n\n\terr = crl.CheckSignatureFrom(issuer.Certificate)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"checking CRL signature: %w\", err)\n\t}\n\n\tif time.Since(crl.ThisUpdate) >= ageLimit {\n\t\treturn fmt.Errorf(\"thisUpdate more than %s in the past: %v\", ageLimit, crl.ThisUpdate)\n\t}\n\n\treturn nil\n}\n\nfunc main() {\n\turlFile := flag.String(\"crls\", \"\", \"path to a file containing a JSON Array of CRL URLs\")\n\tissuerFile := flag.String(\"issuer\", \"\", \"path to an issuer certificate on disk\")\n\tageLimitStr := flag.String(\"ageLimit\", \"168h\", \"maximum allowable age of a CRL shard\")\n\temitRevoked := flag.Bool(\"emitRevoked\", false, \"emit revoked serial numbers on stdout, one per line, hex-encoded\")\n\tflag.Parse()\n\n\tlogger := cmd.NewLogger(cmd.SyslogConfig{StdoutLevel: 6, SyslogLevel: -1})\n\n\turlFileContents, err := os.ReadFile(*urlFile)\n\tcmd.FailOnError(err, \"Reading CRL URLs file\")\n\n\tvar urls []string\n\terr = json.Unmarshal(urlFileContents, &urls)\n\tcmd.FailOnError(err, \"Parsing JSON Array of CRL URLs\")\n\n\tissuer, err := issuance.LoadCertificate(*issuerFile)\n\tcmd.FailOnError(err, \"Loading issuer certificate\")\n\n\tageLimit, err := time.ParseDuration(*ageLimitStr)\n\tcmd.FailOnError(err, \"Parsing age limit\")\n\n\terrCount := 0\n\tseenSerials := make(map[string]struct{})\n\tfor _, url := range urls {\n\t\tcrl, err := downloadShard(url)\n\t\tif err != nil {\n\t\t\terrCount += 1\n\t\t\tlogger.Errf(\"fetching CRL %q failed: %s\", url, err)\n\t\t\tcontinue\n\t\t}\n\n\t\terr = validateShard(crl, issuer, ageLimit)\n\t\tif err != nil {\n\t\t\terrCount += 1\n\t\t\tlogger.Errf(\"checking CRL %q failed: %s\", url, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, c := range crl.RevokedCertificates {\n\t\t\tserial := core.SerialToString(c.SerialNumber)\n\t\t\tif _, seen := seenSerials[serial]; seen {\n\t\t\t\terrCount += 1\n\t\t\t\tlogger.Errf(\"serial seen in multiple shards: %s\", serial)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tseenSerials[serial] = struct{}{}\n\t\t}\n\t}\n\n\tif *emitRevoked {\n\t\tfor serial := range seenSerials {\n\t\t\tfmt.Println(serial)\n\t\t}\n\t}\n\n\tif errCount != 0 {\n\t\tcmd.Fail(fmt.Sprintf(\"Encountered %d errors\", errCount))\n\t}\n\tlogger.AuditInfo(\"All CRLs validated\")\n}\n\nfunc init() {\n\tcmd.RegisterCommand(\"crl-checker\", main)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\t\"unsafe\"\n\n\t\"code.cloudfoundry.org\/garden\"\n\t\"code.cloudfoundry.org\/guardian\/rundmc\/execrunner\/dadoo\"\n\n\t\"github.com\/eapache\/go-resiliency\/retrier\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/system\"\n\n\tcmsg \"github.com\/opencontainers\/runc\/libcontainer\/utils\"\n)\n\nconst (\n\tMaxSocketDirPathLength = 80\n\tRuncExecTimeout = time.Second * 5\n)\n\nfunc main() {\n\tos.Exit(run())\n}\n\nfunc run() int {\n\ttty := flag.Bool(\"tty\", false, \"tty requested\")\n\tsocketDirPath := flag.String(\"socket-dir-path\", \"\", \"path to a dir in which to store console sockets\")\n\tflag.Parse()\n\n\trunMode := flag.Args()[0] \/\/ exec or run\n\truntime := flag.Args()[1] \/\/ e.g. runc\n\tprocessStateDir := flag.Args()[2]\n\tcontainerId := flag.Args()[3]\n\n\tsignals := make(chan os.Signal, 100)\n\tsignal.Notify(signals, syscall.SIGCHLD)\n\n\truncExitCodePipe := os.NewFile(3, \"\/proc\/self\/fd\/3\")\n\tlogFile := fmt.Sprintf(\"\/proc\/%d\/fd\/4\", os.Getpid())\n\tlogFD := os.NewFile(4, \"\/proc\/self\/fd\/4\")\n\tsyncPipe := os.NewFile(5, \"\/proc\/self\/fd\/5\")\n\tpidFilePath := filepath.Join(processStateDir, \"pidfile\")\n\n\tstdinR, stdoutW, stderrW, err := openStdioAndExitFifos(processStateDir)\n\tdefer closeFile(stdinR, stdoutW, stderrW)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn 2\n\t}\n\n\tsyncPipe.Write([]byte{0})\n\n\tstdoutR, stderrR, err := openStdioKeepAlivePipes(processStateDir)\n\tdefer closeFile(stdoutR, stderrR)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn 2\n\t}\n\n\tioWg := &sync.WaitGroup{}\n\tvar runcExecCmd *exec.Cmd\n\tif *tty {\n\t\twinsz, err := openFile(filepath.Join(processStateDir, \"winsz\"), os.O_RDWR)\n\t\tdefer closeFile(winsz)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn 2\n\t\t}\n\n\t\tif len(*socketDirPath) > MaxSocketDirPathLength {\n\t\t\treturn logAndExit(fmt.Sprintf(\"value for --socket-dir-path cannot exceed %d characters in length\", MaxSocketDirPathLength))\n\t\t}\n\t\tttySocketPath := setupTTYSocket(stdinR, stdoutW, winsz, pidFilePath, *socketDirPath, ioWg)\n\t\truncExecCmd = dadoo.BuildRuncCommand(runtime, runMode, processStateDir, containerId, ttySocketPath, logFile)\n\t} else {\n\t\truncExecCmd = dadoo.BuildRuncCommand(runtime, runMode, processStateDir, containerId, \"\", logFile)\n\t\truncExecCmd.Stdin = stdinR\n\t\truncExecCmd.Stdout = stdoutW\n\t\truncExecCmd.Stderr = stderrW\n\t}\n\n\t\/\/ we need to be the subreaper so we can wait on the detached container process\n\tsystem.SetSubreaper(os.Getpid())\n\n\tif err := runcExecCmd.Start(); err != nil {\n\t\truncExitCodePipe.Write([]byte{2})\n\t\treturn 2\n\t}\n\n\truncExitStatus := awaitRuncExit(runcExecCmd.Process)\n\tlogFD.Close() \/\/ No more logs from runc so close fd\n\n\t\/\/ also check that masterFD is received and streaming or whatevs\n\truncExitCodePipe.Write([]byte{byte(runcExitStatus)})\n\tif runcExitStatus != 0 {\n\t\treturn 3 \/\/ nothing to wait for, container didn't launch\n\t}\n\n\tcontainerPid, err := parsePid(pidFilePath)\n\tcheck(err)\n\n\treturn waitForContainerToExit(processStateDir, containerPid, signals, ioWg)\n}\n\nfunc awaitRuncExit(runcProc *os.Process) int {\n\truncExitStatusCh := make(chan int)\n\tgo func() {\n\t\tvar status syscall.WaitStatus\n\t\tvar rusage syscall.Rusage\n\t\t_, err := syscall.Wait4(runcProc.Pid, &status, 0, &rusage)\n\t\tcheck(err) \/\/ Start succeeded but Wait4 failed, this can only be a programmer error\n\t\truncExitStatusCh <- status.ExitStatus()\n\t}()\n\n\t\/\/ Dadoo is waiting for `runc {exec|run} -d` to exit. This runc process has a\n\t\/\/ child, runc[2:INIT], that will exec and become the user process. Just\n\t\/\/ before execing, it reads the process metadata from a fifo. Once it's done\n\t\/\/ this, its parent (runc -d) unblocks from pipe-opening and can exit.\n\t\/\/\n\t\/\/ There is a race between this run operation and concurrent deletion of the\n\t\/\/ same garden container. When deleting the container, runc SIGKILLs the\n\t\/\/ container init process. Since this init process is PID1 in a pidns, the\n\t\/\/ kernel first SIGKILLs all other members of this pidns. This includes\n\t\/\/ runc[2:INIT], but not runc exec -d. Runc exec -d is not waiting on\n\t\/\/ runc[2:INIT], so it becomes a zombie. Runc exec -d will never unblock,\n\t\/\/ because no process will come along to open the other end of the fifo.\n\t\/\/\n\t\/\/ Runc delete times out after 10 seconds, if the container init is still\n\t\/\/ alive. It will still be alive, because the kernel won't kill it until all\n\t\/\/ the zombies in the same pidns are reaped. We resolve this race by killing\n\t\/\/ runc exec -d after 5 seconds.\n\t\/\/\n\t\/\/ https:\/\/www.pivotaltracker.com\/story\/show\/154242239\n\tselect {\n\tcase runcExitStatus := <-runcExitStatusCh:\n\t\treturn runcExitStatus\n\tcase <-time.After(RuncExecTimeout):\n\t\tcheck(runcProc.Kill())\n\t\treturn <-runcExitStatusCh\n\t}\n}\n\n\/\/ If gdn server process dies, we need dadoo to keep stdout\/err reader\n\/\/ FDs so that Linux does not SIGPIPE the user process if it tries to use its end of\n\/\/ these pipes.\nfunc openStdioKeepAlivePipes(processStateDir string) (io.ReadCloser, io.ReadCloser, error) {\n\tkeepStdoutAlive, err := openFile(filepath.Join(processStateDir, \"stdout\"), os.O_RDONLY)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tkeepStderrAlive, err := openFile(filepath.Join(processStateDir, \"stderr\"), os.O_RDONLY)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn keepStdoutAlive, keepStderrAlive, nil\n}\n\nfunc waitForContainerToExit(processStateDir string, containerPid int, signals chan os.Signal, ioWg *sync.WaitGroup) (exitCode int) {\n\tfor range signals {\n\t\tfor {\n\t\t\tvar status syscall.WaitStatus\n\t\t\tvar rusage syscall.Rusage\n\t\t\twpid, err := syscall.Wait4(-1, &status, syscall.WNOHANG, &rusage)\n\t\t\tif err != nil || wpid <= 0 {\n\t\t\t\tbreak \/\/ wait for next SIGCHLD\n\t\t\t}\n\n\t\t\tif wpid == containerPid {\n\t\t\t\texitCode = status.ExitStatus()\n\t\t\t\tif status.Signaled() {\n\t\t\t\t\texitCode = 128 + int(status.Signal())\n\t\t\t\t}\n\n\t\t\t\tioWg.Wait() \/\/ wait for full output to be collected\n\n\t\t\t\tcheck(ioutil.WriteFile(filepath.Join(processStateDir, \"exitcode\"), []byte(strconv.Itoa(exitCode)), 0600))\n\t\t\t\treturn exitCode\n\t\t\t}\n\t\t}\n\t}\n\n\treturn logAndExit(\"ran out of signals\") \/\/ cant happen\n}\n\nfunc openStdioAndExitFifos(processStateDir string) (io.ReadCloser, io.WriteCloser, io.WriteCloser, error) {\n\tstdin, err := openFile(filepath.Join(processStateDir, \"stdin\"), os.O_RDONLY)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tstdout, err := openFile(filepath.Join(processStateDir, \"stdout\"), os.O_WRONLY)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tstderr, err := openFile(filepath.Join(processStateDir, \"stderr\"), os.O_WRONLY)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\t\/\/ open just so guardian can detect it being closed when we exit\n\tif _, err := openFile(filepath.Join(processStateDir, \"exit\"), os.O_RDWR); err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\treturn stdin, stdout, stderr, nil\n}\n\nfunc openFile(path string, flags int) (*os.File, error) {\n\treturn os.OpenFile(path, flags, 0600)\n}\n\nfunc setupTTYSocket(stdin io.Reader, stdout io.Writer, winszFifo io.Reader, pidFilePath, sockDirBase string, ioWg *sync.WaitGroup) string {\n\tsockDir, err := ioutil.TempDir(sockDirBase, \"\")\n\tcheck(err)\n\n\tttySockPath := filepath.Join(sockDir, \"tty.sock\")\n\tl, err := net.Listen(\"unix\", ttySockPath)\n\tcheck(err)\n\n\t\/\/go to the background and set master\n\tgo func(ln net.Listener) (err error) {\n\t\t\/\/ if any of the following errors, it means runc has connected to the\n\t\t\/\/ socket, so it must've started, thus we might need to kill the process\n\t\tdefer func() {\n\t\t\tif err != nil {\n\t\t\t\tkillProcess(pidFilePath)\n\t\t\t\tcheck(err)\n\t\t\t}\n\t\t}()\n\n\t\tconn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer conn.Close()\n\n\t\t\/\/ Close ln, to allow for other instances to take over.\n\t\tln.Close()\n\n\t\t\/\/ Get the fd of the connection.\n\t\tunixconn, ok := conn.(*net.UnixConn)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\tsocket, err := unixconn.File()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer socket.Close()\n\n\t\t\/\/ Get the master file descriptor from runC.\n\t\tmaster, err := cmsg.RecvFd(socket)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif err = os.RemoveAll(sockDir); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif err = setOnlcr(master); err != nil {\n\t\t\treturn\n\t\t}\n\t\tstreamProcess(master, stdin, stdout, winszFifo, ioWg)\n\n\t\treturn\n\t}(l)\n\n\treturn ttySockPath\n}\n\nfunc streamProcess(m *os.File, stdin io.Reader, stdout io.Writer, winszFifo io.Reader, ioWg *sync.WaitGroup) {\n\tioWg.Add(1)\n\tgo func() {\n\t\tdefer ioWg.Done()\n\t\tio.Copy(stdout, m)\n\t}()\n\n\tgo io.Copy(m, stdin)\n\n\tgo func() {\n\t\tfor {\n\t\t\tvar winSize garden.WindowSize\n\t\t\tif err := json.NewDecoder(winszFifo).Decode(&winSize); err != nil {\n\t\t\t\tfmt.Printf(\"invalid winsz event: %s\\n\", err)\n\t\t\t\tcontinue \/\/ not much we can do here..\n\t\t\t}\n\t\t\tdadoo.SetWinSize(m, winSize)\n\t\t}\n\t}()\n}\n\nfunc killProcess(pidFilePath string) {\n\tpid, err := readPid(pidFilePath)\n\tif err == nil {\n\t\tsyscall.Kill(pid, syscall.SIGKILL)\n\t}\n}\n\nfunc readPid(pidFilePath string) (int, error) {\n\tretrier := retrier.New(retrier.ConstantBackoff(20, 500*time.Millisecond), nil)\n\tvar (\n\t\tpid = -1\n\t\terr error\n\t)\n\tretrier.Run(func() error {\n\t\tpid, err = parsePid(pidFilePath)\n\t\treturn err\n\t})\n\n\treturn pid, err\n}\n\nfunc parsePid(pidFile string) (int, error) {\n\tb, err := ioutil.ReadFile(pidFile)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\tvar pid int\n\tif _, err := fmt.Sscanf(string(b), \"%d\", &pid); err != nil {\n\t\treturn -1, err\n\t}\n\n\treturn pid, nil\n}\n\nfunc logAndExit(msg string) int {\n\tfmt.Println(msg)\n\treturn 2\n}\n\nfunc check(err error) {\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(2)\n\t}\n}\n\nfunc closeFile(closers ...io.Closer) {\n\tfor _, closer := range closers {\n\t\tcloser.Close()\n\t}\n}\n\n\/\/ setOnlcr copied from runc\n\/\/ https:\/\/github.com\/cloudfoundry-incubator\/runc\/blob\/02ec89829b24dfce45bb207d2344e0e6d078a93c\/libcontainer\/console_linux.go#L144-L160\nfunc setOnlcr(terminal *os.File) error {\n\tvar termios syscall.Termios\n\n\tif err := ioctl(terminal.Fd(), syscall.TCGETS, uintptr(unsafe.Pointer(&termios))); err != nil {\n\t\treturn fmt.Errorf(\"ioctl(tty, tcgets): %s\", err.Error())\n\t}\n\n\ttermios.Oflag |= syscall.ONLCR\n\n\tif err := ioctl(terminal.Fd(), syscall.TCSETS, uintptr(unsafe.Pointer(&termios))); err != nil {\n\t\treturn fmt.Errorf(\"ioctl(tty, tcsets): %s\", err.Error())\n\t}\n\n\treturn nil\n}\n\nfunc ioctl(fd uintptr, flag, data uintptr) error {\n\tif _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, flag, data); err != 0 {\n\t\treturn err\n\t}\n\treturn nil\n}\n<commit_msg>Add logging to dadoo on runc timeout<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\t\"unsafe\"\n\n\t\"code.cloudfoundry.org\/garden\"\n\t\"code.cloudfoundry.org\/guardian\/rundmc\/execrunner\/dadoo\"\n\n\t\"github.com\/eapache\/go-resiliency\/retrier\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/system\"\n\n\tcmsg \"github.com\/opencontainers\/runc\/libcontainer\/utils\"\n)\n\nconst (\n\tMaxSocketDirPathLength = 80\n\tRuncExecTimeout = time.Second * 5\n)\n\nfunc main() {\n\tos.Exit(run())\n}\n\nfunc run() int {\n\ttty := flag.Bool(\"tty\", false, \"tty requested\")\n\tsocketDirPath := flag.String(\"socket-dir-path\", \"\", \"path to a dir in which to store console sockets\")\n\tflag.Parse()\n\n\trunMode := flag.Args()[0] \/\/ exec or run\n\truntime := flag.Args()[1] \/\/ e.g. runc\n\tprocessStateDir := flag.Args()[2]\n\tcontainerId := flag.Args()[3]\n\n\tsignals := make(chan os.Signal, 100)\n\tsignal.Notify(signals, syscall.SIGCHLD)\n\n\truncExitCodePipe := os.NewFile(3, \"\/proc\/self\/fd\/3\")\n\tlogFile := fmt.Sprintf(\"\/proc\/%d\/fd\/4\", os.Getpid())\n\tlogFD := os.NewFile(4, \"\/proc\/self\/fd\/4\")\n\tsyncPipe := os.NewFile(5, \"\/proc\/self\/fd\/5\")\n\tpidFilePath := filepath.Join(processStateDir, \"pidfile\")\n\n\tstdinR, stdoutW, stderrW, err := openStdioAndExitFifos(processStateDir)\n\tdefer closeFile(stdinR, stdoutW, stderrW)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn 2\n\t}\n\n\tsyncPipe.Write([]byte{0})\n\n\tstdoutR, stderrR, err := openStdioKeepAlivePipes(processStateDir)\n\tdefer closeFile(stdoutR, stderrR)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn 2\n\t}\n\n\tioWg := &sync.WaitGroup{}\n\tvar runcExecCmd *exec.Cmd\n\tif *tty {\n\t\twinsz, err := openFile(filepath.Join(processStateDir, \"winsz\"), os.O_RDWR)\n\t\tdefer closeFile(winsz)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn 2\n\t\t}\n\n\t\tif len(*socketDirPath) > MaxSocketDirPathLength {\n\t\t\treturn logAndExit(fmt.Sprintf(\"value for --socket-dir-path cannot exceed %d characters in length\", MaxSocketDirPathLength))\n\t\t}\n\t\tttySocketPath := setupTTYSocket(stdinR, stdoutW, winsz, pidFilePath, *socketDirPath, ioWg)\n\t\truncExecCmd = dadoo.BuildRuncCommand(runtime, runMode, processStateDir, containerId, ttySocketPath, logFile)\n\t} else {\n\t\truncExecCmd = dadoo.BuildRuncCommand(runtime, runMode, processStateDir, containerId, \"\", logFile)\n\t\truncExecCmd.Stdin = stdinR\n\t\truncExecCmd.Stdout = stdoutW\n\t\truncExecCmd.Stderr = stderrW\n\t}\n\n\t\/\/ we need to be the subreaper so we can wait on the detached container process\n\tsystem.SetSubreaper(os.Getpid())\n\n\tif err := runcExecCmd.Start(); err != nil {\n\t\truncExitCodePipe.Write([]byte{2})\n\t\treturn 2\n\t}\n\n\truncExitStatus := awaitRuncExit(runcExecCmd.Process)\n\tlogFD.Close() \/\/ No more logs from runc so close fd\n\n\t\/\/ also check that masterFD is received and streaming or whatevs\n\truncExitCodePipe.Write([]byte{byte(runcExitStatus)})\n\tif runcExitStatus != 0 {\n\t\treturn 3 \/\/ nothing to wait for, container didn't launch\n\t}\n\n\tcontainerPid, err := parsePid(pidFilePath)\n\tcheck(err)\n\n\treturn waitForContainerToExit(processStateDir, containerPid, signals, ioWg)\n}\n\nfunc awaitRuncExit(runcProc *os.Process) int {\n\truncExitStatusCh := make(chan int)\n\tgo func() {\n\t\tvar status syscall.WaitStatus\n\t\tvar rusage syscall.Rusage\n\t\t_, err := syscall.Wait4(runcProc.Pid, &status, 0, &rusage)\n\t\tcheck(err) \/\/ Start succeeded but Wait4 failed, this can only be a programmer error\n\t\truncExitStatusCh <- status.ExitStatus()\n\t}()\n\n\t\/\/ Dadoo is waiting for `runc {exec|run} -d` to exit. This runc process has a\n\t\/\/ child, runc[2:INIT], that will exec and become the user process. Just\n\t\/\/ before execing, it reads the process metadata from a fifo. Once it's done\n\t\/\/ this, its parent (runc -d) unblocks from pipe-opening and can exit.\n\t\/\/\n\t\/\/ There is a race between this run operation and concurrent deletion of the\n\t\/\/ same garden container. When deleting the container, runc SIGKILLs the\n\t\/\/ container init process. Since this init process is PID1 in a pidns, the\n\t\/\/ kernel first SIGKILLs all other members of this pidns. This includes\n\t\/\/ runc[2:INIT], but not runc exec -d. Runc exec -d is not waiting on\n\t\/\/ runc[2:INIT], so it becomes a zombie. Runc exec -d will never unblock,\n\t\/\/ because no process will come along to open the other end of the fifo.\n\t\/\/\n\t\/\/ Runc delete times out after 10 seconds, if the container init is still\n\t\/\/ alive. It will still be alive, because the kernel won't kill it until all\n\t\/\/ the zombies in the same pidns are reaped. We resolve this race by killing\n\t\/\/ runc exec -d after 5 seconds.\n\t\/\/\n\t\/\/ https:\/\/www.pivotaltracker.com\/story\/show\/154242239\n\tselect {\n\tcase runcExitStatus := <-runcExitStatusCh:\n\t\treturn runcExitStatus\n\tcase <-time.After(RuncExecTimeout):\n\t\tfmt.Printf(\"runc process with PID %d timed out after %d\\n\", runcProc.Pid, RuncExecTimeout)\n\t\tcheck(runcProc.Kill())\n\t\tfmt.Println(\"killed runc\")\n\t\treturn <-runcExitStatusCh\n\t}\n}\n\n\/\/ If gdn server process dies, we need dadoo to keep stdout\/err reader\n\/\/ FDs so that Linux does not SIGPIPE the user process if it tries to use its end of\n\/\/ these pipes.\nfunc openStdioKeepAlivePipes(processStateDir string) (io.ReadCloser, io.ReadCloser, error) {\n\tkeepStdoutAlive, err := openFile(filepath.Join(processStateDir, \"stdout\"), os.O_RDONLY)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tkeepStderrAlive, err := openFile(filepath.Join(processStateDir, \"stderr\"), os.O_RDONLY)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn keepStdoutAlive, keepStderrAlive, nil\n}\n\nfunc waitForContainerToExit(processStateDir string, containerPid int, signals chan os.Signal, ioWg *sync.WaitGroup) (exitCode int) {\n\tfor range signals {\n\t\tfor {\n\t\t\tvar status syscall.WaitStatus\n\t\t\tvar rusage syscall.Rusage\n\t\t\twpid, err := syscall.Wait4(-1, &status, syscall.WNOHANG, &rusage)\n\t\t\tif err != nil || wpid <= 0 {\n\t\t\t\tbreak \/\/ wait for next SIGCHLD\n\t\t\t}\n\n\t\t\tif wpid == containerPid {\n\t\t\t\texitCode = status.ExitStatus()\n\t\t\t\tif status.Signaled() {\n\t\t\t\t\texitCode = 128 + int(status.Signal())\n\t\t\t\t}\n\n\t\t\t\tioWg.Wait() \/\/ wait for full output to be collected\n\n\t\t\t\tcheck(ioutil.WriteFile(filepath.Join(processStateDir, \"exitcode\"), []byte(strconv.Itoa(exitCode)), 0600))\n\t\t\t\treturn exitCode\n\t\t\t}\n\t\t}\n\t}\n\n\treturn logAndExit(\"ran out of signals\") \/\/ cant happen\n}\n\nfunc openStdioAndExitFifos(processStateDir string) (io.ReadCloser, io.WriteCloser, io.WriteCloser, error) {\n\tstdin, err := openFile(filepath.Join(processStateDir, \"stdin\"), os.O_RDONLY)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tstdout, err := openFile(filepath.Join(processStateDir, \"stdout\"), os.O_WRONLY)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\tstderr, err := openFile(filepath.Join(processStateDir, \"stderr\"), os.O_WRONLY)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\t\/\/ open just so guardian can detect it being closed when we exit\n\tif _, err := openFile(filepath.Join(processStateDir, \"exit\"), os.O_RDWR); err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\treturn stdin, stdout, stderr, nil\n}\n\nfunc openFile(path string, flags int) (*os.File, error) {\n\treturn os.OpenFile(path, flags, 0600)\n}\n\nfunc setupTTYSocket(stdin io.Reader, stdout io.Writer, winszFifo io.Reader, pidFilePath, sockDirBase string, ioWg *sync.WaitGroup) string {\n\tsockDir, err := ioutil.TempDir(sockDirBase, \"\")\n\tcheck(err)\n\n\tttySockPath := filepath.Join(sockDir, \"tty.sock\")\n\tl, err := net.Listen(\"unix\", ttySockPath)\n\tcheck(err)\n\n\t\/\/go to the background and set master\n\tgo func(ln net.Listener) (err error) {\n\t\t\/\/ if any of the following errors, it means runc has connected to the\n\t\t\/\/ socket, so it must've started, thus we might need to kill the process\n\t\tdefer func() {\n\t\t\tif err != nil {\n\t\t\t\tkillProcess(pidFilePath)\n\t\t\t\tcheck(err)\n\t\t\t}\n\t\t}()\n\n\t\tconn, err := ln.Accept()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer conn.Close()\n\n\t\t\/\/ Close ln, to allow for other instances to take over.\n\t\tln.Close()\n\n\t\t\/\/ Get the fd of the connection.\n\t\tunixconn, ok := conn.(*net.UnixConn)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\n\t\tsocket, err := unixconn.File()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer socket.Close()\n\n\t\t\/\/ Get the master file descriptor from runC.\n\t\tmaster, err := cmsg.RecvFd(socket)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif err = os.RemoveAll(sockDir); err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif err = setOnlcr(master); err != nil {\n\t\t\treturn\n\t\t}\n\t\tstreamProcess(master, stdin, stdout, winszFifo, ioWg)\n\n\t\treturn\n\t}(l)\n\n\treturn ttySockPath\n}\n\nfunc streamProcess(m *os.File, stdin io.Reader, stdout io.Writer, winszFifo io.Reader, ioWg *sync.WaitGroup) {\n\tioWg.Add(1)\n\tgo func() {\n\t\tdefer ioWg.Done()\n\t\tio.Copy(stdout, m)\n\t}()\n\n\tgo io.Copy(m, stdin)\n\n\tgo func() {\n\t\tfor {\n\t\t\tvar winSize garden.WindowSize\n\t\t\tif err := json.NewDecoder(winszFifo).Decode(&winSize); err != nil {\n\t\t\t\tfmt.Printf(\"invalid winsz event: %s\\n\", err)\n\t\t\t\tcontinue \/\/ not much we can do here..\n\t\t\t}\n\t\t\tdadoo.SetWinSize(m, winSize)\n\t\t}\n\t}()\n}\n\nfunc killProcess(pidFilePath string) {\n\tpid, err := readPid(pidFilePath)\n\tif err == nil {\n\t\tsyscall.Kill(pid, syscall.SIGKILL)\n\t}\n}\n\nfunc readPid(pidFilePath string) (int, error) {\n\tretrier := retrier.New(retrier.ConstantBackoff(20, 500*time.Millisecond), nil)\n\tvar (\n\t\tpid = -1\n\t\terr error\n\t)\n\tretrier.Run(func() error {\n\t\tpid, err = parsePid(pidFilePath)\n\t\treturn err\n\t})\n\n\treturn pid, err\n}\n\nfunc parsePid(pidFile string) (int, error) {\n\tb, err := ioutil.ReadFile(pidFile)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\tvar pid int\n\tif _, err := fmt.Sscanf(string(b), \"%d\", &pid); err != nil {\n\t\treturn -1, err\n\t}\n\n\treturn pid, nil\n}\n\nfunc logAndExit(msg string) int {\n\tfmt.Println(msg)\n\treturn 2\n}\n\nfunc check(err error) {\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(2)\n\t}\n}\n\nfunc closeFile(closers ...io.Closer) {\n\tfor _, closer := range closers {\n\t\tcloser.Close()\n\t}\n}\n\n\/\/ setOnlcr copied from runc\n\/\/ https:\/\/github.com\/cloudfoundry-incubator\/runc\/blob\/02ec89829b24dfce45bb207d2344e0e6d078a93c\/libcontainer\/console_linux.go#L144-L160\nfunc setOnlcr(terminal *os.File) error {\n\tvar termios syscall.Termios\n\n\tif err := ioctl(terminal.Fd(), syscall.TCGETS, uintptr(unsafe.Pointer(&termios))); err != nil {\n\t\treturn fmt.Errorf(\"ioctl(tty, tcgets): %s\", err.Error())\n\t}\n\n\ttermios.Oflag |= syscall.ONLCR\n\n\tif err := ioctl(terminal.Fd(), syscall.TCSETS, uintptr(unsafe.Pointer(&termios))); err != nil {\n\t\treturn fmt.Errorf(\"ioctl(tty, tcsets): %s\", err.Error())\n\t}\n\n\treturn nil\n}\n\nfunc ioctl(fd uintptr, flag, data uintptr) error {\n\tif _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, flag, data); err != 0 {\n\t\treturn err\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"strings\"\n\n\t\"go\/format\"\n\t\"go\/token\"\n\n\t\"text\/template\"\n)\n\ntype globalData struct {\n\tInvocation string\n\tAdditionalImports string\n\n\tPackage string\n\tMapName string\n\tLockName string\n\tKeyType string\n\tFnType string\n\n\tArgs string\n\tReturnParams string\n\tErrorReturnVals string\n\tCallArgs string\n}\n\nfunc (g *Generator) generateGlobal(key string, fn string) {\n\targs := []string{}\n\tcallargs := []string{}\n\n\tfor _, field := range g.Params.List {\n\n\t\tbuf := bytes.NewBuffer([]byte(\"\"))\n\t\tformat.Node(buf, token.NewFileSet(), field.Type)\n\n\t\targs = append(args, field.Names[0].Name+\" \"+string(buf.Bytes()))\n\t\tcallargs = append(callargs, field.Names[0].Name)\n\t}\n\n\targStr := \"\"\n\tif len(args) > 0 {\n\t\targStr = strings.Join(args, \", \")\n\t}\n\n\tcallArgStr := \"\"\n\tif len(callargs) > 0 {\n\t\tcallArgStr = strings.Join(callargs, \", \")\n\t}\n\n\tretargs := []string{}\n\terrRetVals := []string{}\n\n\tfor _, field := range g.Results.List {\n\t\tbuf := bytes.NewBuffer([]byte(\"\"))\n\t\tformat.Node(buf, token.NewFileSet(), field.Type)\n\n\t\tt := string(buf.Bytes())\n\t\tswitch t {\n\t\tcase \"error\":\n\t\t\terrRetVals = append(errRetVals, \"errors.New(\\\"Can't find route\\\")\")\n\t\tcase \"int\", \"uint\", \"int32\", \"uint32\", \"uint16\", \"int16\", \"int8\", \"uint8\", \"byte\", \"char\", \"uint64\", \"int64\", \"float64\", \"float32\", \"float\":\n\t\t\terrRetVals = append(errRetVals, \"0\")\n\t\tcase \"string\":\n\t\t\terrRetVals = append(errRetVals, \"\\\"\\\"\")\n\t\tcase \"context.Context\":\n\t\t\terrRetVals = append(errRetVals, \"ctx\")\n\t\tdefault:\n\t\t\terrRetVals = append(errRetVals, \"nil\")\n\t\t}\n\n\t\tretargs = append(retargs, t)\n\t}\n\n\tretArgStr := \"(\" + strings.Join(retargs, \", \") + \")\"\n\terrRetStr := strings.Join(errRetVals, \", \")\n\n\tdata := globalData{\n\t\tInvocation: strings.Join(os.Args[1:], \" \"),\n\t\tPackage: g.pkg.name,\n\t\tMapName: strings.ToLower(fn) + \"s\",\n\t\tLockName: strings.ToLower(fn) + \"s\" + \"Lock\",\n\t\tKeyType: key,\n\t\tFnType: fn,\n\n\t\tArgs: argStr,\n\t\tReturnParams: retArgStr,\n\t\tErrorReturnVals: errRetStr,\n\t\tCallArgs: callArgStr,\n\t}\n\n\tt := template.Must(template.New(\"global\").Parse(globalTemplate))\n\n\terr := t.Execute(&g.buf, data)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n<commit_msg>genrouter - add ctx as an import for global, if needed<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"strings\"\n\n\t\"go\/format\"\n\t\"go\/token\"\n\n\t\"text\/template\"\n)\n\ntype globalData struct {\n\tInvocation string\n\tAdditionalImports string\n\n\tPackage string\n\tMapName string\n\tLockName string\n\tKeyType string\n\tFnType string\n\n\tArgs string\n\tReturnParams string\n\tErrorReturnVals string\n\tCallArgs string\n}\n\nfunc (g *Generator) generateGlobal(key string, fn string) {\n\targs := []string{}\n\tcallargs := []string{}\n\n\tfor _, field := range g.Params.List {\n\n\t\tbuf := bytes.NewBuffer([]byte(\"\"))\n\t\tformat.Node(buf, token.NewFileSet(), field.Type)\n\n\t\targs = append(args, field.Names[0].Name+\" \"+string(buf.Bytes()))\n\t\tcallargs = append(callargs, field.Names[0].Name)\n\t}\n\n\targStr := \"\"\n\tif len(args) > 0 {\n\t\targStr = strings.Join(args, \", \")\n\t}\n\n\tcallArgStr := \"\"\n\tif len(callargs) > 0 {\n\t\tcallArgStr = strings.Join(callargs, \", \")\n\t}\n\n\tretargs := []string{}\n\terrRetVals := []string{}\n\n\timports := \"\"\n\n\tfor _, field := range g.Results.List {\n\t\tbuf := bytes.NewBuffer([]byte(\"\"))\n\t\tformat.Node(buf, token.NewFileSet(), field.Type)\n\n\t\tt := string(buf.Bytes())\n\t\tswitch t {\n\t\tcase \"error\":\n\t\t\terrRetVals = append(errRetVals, \"errors.New(\\\"Can't find route\\\")\")\n\t\tcase \"int\", \"uint\", \"int32\", \"uint32\", \"uint16\", \"int16\", \"int8\", \"uint8\", \"byte\", \"char\", \"uint64\", \"int64\", \"float64\", \"float32\", \"float\":\n\t\t\terrRetVals = append(errRetVals, \"0\")\n\t\tcase \"string\":\n\t\t\terrRetVals = append(errRetVals, \"\\\"\\\"\")\n\t\tcase \"context.Context\":\n\t\t\timports = imports + \" \\\"golang.org\/x\/net\/context\\\" \"\n\t\t\terrRetVals = append(errRetVals, \"ctx\")\n\t\tdefault:\n\t\t\terrRetVals = append(errRetVals, \"nil\")\n\t\t}\n\n\t\tretargs = append(retargs, t)\n\t}\n\n\tretArgStr := \"(\" + strings.Join(retargs, \", \") + \")\"\n\terrRetStr := strings.Join(errRetVals, \", \")\n\n\tdata := globalData{\n\t\tInvocation: strings.Join(os.Args[1:], \" \"),\n\t\tPackage: g.pkg.name,\n\t\tMapName: strings.ToLower(fn) + \"s\",\n\t\tLockName: strings.ToLower(fn) + \"s\" + \"Lock\",\n\t\tKeyType: key,\n\t\tFnType: fn,\n\n\t\tAdditionalImports: imports,\n\n\t\tArgs: argStr,\n\t\tReturnParams: retArgStr,\n\t\tErrorReturnVals: errRetStr,\n\t\tCallArgs: callArgStr,\n\t}\n\n\tt := template.Must(template.New(\"global\").Parse(globalTemplate))\n\n\terr := t.Execute(&g.buf, data)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright The Helm Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/Masterminds\/semver\/v3\"\n\t\"github.com\/gosuri\/uitable\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\"helm.sh\/helm\/v3\/cmd\/helm\/search\"\n\t\"helm.sh\/helm\/v3\/pkg\/cli\/output\"\n\t\"helm.sh\/helm\/v3\/pkg\/helmpath\"\n\t\"helm.sh\/helm\/v3\/pkg\/repo\"\n)\n\nconst searchRepoDesc = `\nSearch reads through all of the repositories configured on the system, and\nlooks for matches. Search of these repositories uses the metadata stored on\nthe system.\n\nIt will display the latest stable versions of the charts found. If you\nspecify the --devel flag, the output will include pre-release versions.\nIf you want to search using a version constraint, use --version.\n\nExamples:\n\n # Search for stable release versions matching the keyword \"nginx\"\n $ helm search repo nginx\n\n # Search for release versions matching the keyword \"nginx\", including pre-release versions\n $ helm search repo nginx --devel\n\n # Search for the latest patch release for nginx-ingress 1.x\n $ helm search repo nginx-ingress --version ^1.0.0\n\nRepositories are managed with 'helm repo' commands.\n`\n\n\/\/ searchMaxScore suggests that any score higher than this is not considered a match.\nconst searchMaxScore = 25\n\ntype searchRepoOptions struct {\n\tversions bool\n\tregexp bool\n\tdevel bool\n\tversion string\n\tmaxColWidth uint\n\trepoFile string\n\trepoCacheDir string\n\toutputFormat output.Format\n}\n\nfunc newSearchRepoCmd(out io.Writer) *cobra.Command {\n\to := &searchRepoOptions{}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"repo [keyword]\",\n\t\tShort: \"search repositories for a keyword in charts\",\n\t\tLong: searchRepoDesc,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\to.repoFile = settings.RepositoryConfig\n\t\t\to.repoCacheDir = settings.RepositoryCache\n\t\t\treturn o.run(out, args)\n\t\t},\n\t}\n\n\tf := cmd.Flags()\n\tf.BoolVarP(&o.regexp, \"regexp\", \"r\", false, \"use regular expressions for searching repositories you have added\")\n\tf.BoolVarP(&o.versions, \"versions\", \"l\", false, \"show the long listing, with each version of each chart on its own line, for repositories you have added\")\n\tf.BoolVar(&o.devel, \"devel\", false, \"use development versions (alpha, beta, and release candidate releases), too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored\")\n\tf.StringVar(&o.version, \"version\", \"\", \"search using semantic versioning constraints on repositories you have added\")\n\tf.UintVar(&o.maxColWidth, \"max-col-width\", 50, \"maximum column width for output table\")\n\tbindOutputFlag(cmd, &o.outputFormat)\n\n\treturn cmd\n}\n\nfunc (o *searchRepoOptions) run(out io.Writer, args []string) error {\n\to.setupSearchedVersion()\n\n\tindex, err := o.buildIndex(out)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar res []*search.Result\n\tif len(args) == 0 {\n\t\tres = index.All()\n\t} else {\n\t\tq := strings.Join(args, \" \")\n\t\tres, err = index.Search(q, searchMaxScore, o.regexp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tsearch.SortScore(res)\n\tdata, err := o.applyConstraint(res)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn o.outputFormat.Write(out, &repoSearchWriter{data, o.maxColWidth})\n}\n\nfunc (o *searchRepoOptions) setupSearchedVersion() {\n\tdebug(\"Original chart version: %q\", o.version)\n\n\tif o.version != \"\" {\n\t\treturn\n\t}\n\n\tif o.devel { \/\/ search for releases and prereleases (alpha, beta, and release candidate releases).\n\t\tdebug(\"setting version to >0.0.0-0\")\n\t\to.version = \">0.0.0-0\"\n\t} else { \/\/ search only for stable releases, prerelease versions will be skip\n\t\tdebug(\"setting version to >0.0.0\")\n\t\to.version = \">0.0.0\"\n\t}\n}\n\nfunc (o *searchRepoOptions) applyConstraint(res []*search.Result) ([]*search.Result, error) {\n\tif len(o.version) == 0 {\n\t\treturn res, nil\n\t}\n\n\tconstraint, err := semver.NewConstraint(o.version)\n\tif err != nil {\n\t\treturn res, errors.Wrap(err, \"an invalid version\/constraint format\")\n\t}\n\n\tdata := res[:0]\n\tfoundNames := map[string]bool{}\n\tfor _, r := range res {\n\t\tif _, found := foundNames[r.Name]; found {\n\t\t\tcontinue\n\t\t}\n\t\tv, err := semver.NewVersion(r.Chart.Version)\n\t\tif err != nil || constraint.Check(v) {\n\t\t\tdata = append(data, r)\n\t\t\tif !o.versions {\n\t\t\t\tfoundNames[r.Name] = true \/\/ If user hasn't requested all versions, only show the latest that matches\n\t\t\t}\n\t\t}\n\t}\n\n\treturn data, nil\n}\n\nfunc (o *searchRepoOptions) buildIndex(out io.Writer) (*search.Index, error) {\n\t\/\/ Load the repositories.yaml\n\trf, err := repo.LoadFile(o.repoFile)\n\tif isNotExist(err) || len(rf.Repositories) == 0 {\n\t\treturn nil, errors.New(\"no repositories configured\")\n\t}\n\n\ti := search.NewIndex()\n\tfor _, re := range rf.Repositories {\n\t\tn := re.Name\n\t\tf := filepath.Join(o.repoCacheDir, helmpath.CacheIndexFile(n))\n\t\tind, err := repo.LoadIndexFile(f)\n\t\tif err != nil {\n\t\t\t\/\/ TODO should print to stderr\n\t\t\tfmt.Fprintf(out, \"WARNING: Repo %q is corrupt or missing. Try 'helm repo update'.\", n)\n\t\t\tcontinue\n\t\t}\n\n\t\ti.AddRepo(n, ind, o.versions || len(o.version) > 0)\n\t}\n\treturn i, nil\n}\n\ntype repoChartElement struct {\n\tName string\n\tVersion string\n\tAppVersion string\n\tDescription string\n}\n\ntype repoSearchWriter struct {\n\tresults []*search.Result\n\tcolumnWidth uint\n}\n\nfunc (r *repoSearchWriter) WriteTable(out io.Writer) error {\n\tif len(r.results) == 0 {\n\t\t_, err := out.Write([]byte(\"No results found\\n\"))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to write results: %s\", err)\n\t\t}\n\t\treturn nil\n\t}\n\ttable := uitable.New()\n\ttable.MaxColWidth = r.columnWidth\n\ttable.AddRow(\"NAME\", \"CHART VERSION\", \"APP VERSION\", \"DESCRIPTION\")\n\tfor _, r := range r.results {\n\t\ttable.AddRow(r.Name, r.Chart.Version, r.Chart.AppVersion, r.Chart.Description)\n\t}\n\treturn output.EncodeTable(out, table)\n}\n\nfunc (r *repoSearchWriter) WriteJSON(out io.Writer) error {\n\treturn r.encodeByFormat(out, output.JSON)\n}\n\nfunc (r *repoSearchWriter) WriteYAML(out io.Writer) error {\n\treturn r.encodeByFormat(out, output.YAML)\n}\n\nfunc (r *repoSearchWriter) encodeByFormat(out io.Writer, format output.Format) error {\n\t\/\/ Initialize the array so no results returns an empty array instead of null\n\tchartList := make([]repoChartElement, 0, len(r.results))\n\n\tfor _, r := range r.results {\n\t\tchartList = append(chartList, repoChartElement{r.Name, r.Chart.Version, r.Chart.AppVersion, r.Chart.Description})\n\t}\n\n\tswitch format {\n\tcase output.JSON:\n\t\treturn output.EncodeJSON(out, chartList)\n\tcase output.YAML:\n\t\treturn output.EncodeYAML(out, chartList)\n\t}\n\n\t\/\/ Because this is a non-exported function and only called internally by\n\t\/\/ WriteJSON and WriteYAML, we shouldn't get invalid types\n\treturn nil\n}\n<commit_msg>Updating the usage language for search repo<commit_after>\/*\nCopyright The Helm Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/Masterminds\/semver\/v3\"\n\t\"github.com\/gosuri\/uitable\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\"helm.sh\/helm\/v3\/cmd\/helm\/search\"\n\t\"helm.sh\/helm\/v3\/pkg\/cli\/output\"\n\t\"helm.sh\/helm\/v3\/pkg\/helmpath\"\n\t\"helm.sh\/helm\/v3\/pkg\/repo\"\n)\n\nconst searchRepoDesc = `\nSearch reads through all of the repositories configured on the system, and\nlooks for matches. Search of these repositories uses the metadata stored on\nthe system.\n\nIt will display the latest stable versions of the charts found. If you\nspecify the --devel flag, the output will include pre-release versions.\nIf you want to search using a version constraint, use --version.\n\nExamples:\n\n # Search for stable release versions matching the keyword \"nginx\"\n $ helm search repo nginx\n\n # Search for release versions matching the keyword \"nginx\", including pre-release versions\n $ helm search repo nginx --devel\n\n # Search for the latest stable release for nginx-ingress with a major version of 1\n $ helm search repo nginx-ingress --version ^1.0.0\n\nRepositories are managed with 'helm repo' commands.\n`\n\n\/\/ searchMaxScore suggests that any score higher than this is not considered a match.\nconst searchMaxScore = 25\n\ntype searchRepoOptions struct {\n\tversions bool\n\tregexp bool\n\tdevel bool\n\tversion string\n\tmaxColWidth uint\n\trepoFile string\n\trepoCacheDir string\n\toutputFormat output.Format\n}\n\nfunc newSearchRepoCmd(out io.Writer) *cobra.Command {\n\to := &searchRepoOptions{}\n\n\tcmd := &cobra.Command{\n\t\tUse: \"repo [keyword]\",\n\t\tShort: \"search repositories for a keyword in charts\",\n\t\tLong: searchRepoDesc,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\to.repoFile = settings.RepositoryConfig\n\t\t\to.repoCacheDir = settings.RepositoryCache\n\t\t\treturn o.run(out, args)\n\t\t},\n\t}\n\n\tf := cmd.Flags()\n\tf.BoolVarP(&o.regexp, \"regexp\", \"r\", false, \"use regular expressions for searching repositories you have added\")\n\tf.BoolVarP(&o.versions, \"versions\", \"l\", false, \"show the long listing, with each version of each chart on its own line, for repositories you have added\")\n\tf.BoolVar(&o.devel, \"devel\", false, \"use development versions (alpha, beta, and release candidate releases), too. Equivalent to version '>0.0.0-0'. If --version is set, this is ignored\")\n\tf.StringVar(&o.version, \"version\", \"\", \"search using semantic versioning constraints on repositories you have added\")\n\tf.UintVar(&o.maxColWidth, \"max-col-width\", 50, \"maximum column width for output table\")\n\tbindOutputFlag(cmd, &o.outputFormat)\n\n\treturn cmd\n}\n\nfunc (o *searchRepoOptions) run(out io.Writer, args []string) error {\n\to.setupSearchedVersion()\n\n\tindex, err := o.buildIndex(out)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar res []*search.Result\n\tif len(args) == 0 {\n\t\tres = index.All()\n\t} else {\n\t\tq := strings.Join(args, \" \")\n\t\tres, err = index.Search(q, searchMaxScore, o.regexp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tsearch.SortScore(res)\n\tdata, err := o.applyConstraint(res)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn o.outputFormat.Write(out, &repoSearchWriter{data, o.maxColWidth})\n}\n\nfunc (o *searchRepoOptions) setupSearchedVersion() {\n\tdebug(\"Original chart version: %q\", o.version)\n\n\tif o.version != \"\" {\n\t\treturn\n\t}\n\n\tif o.devel { \/\/ search for releases and prereleases (alpha, beta, and release candidate releases).\n\t\tdebug(\"setting version to >0.0.0-0\")\n\t\to.version = \">0.0.0-0\"\n\t} else { \/\/ search only for stable releases, prerelease versions will be skip\n\t\tdebug(\"setting version to >0.0.0\")\n\t\to.version = \">0.0.0\"\n\t}\n}\n\nfunc (o *searchRepoOptions) applyConstraint(res []*search.Result) ([]*search.Result, error) {\n\tif len(o.version) == 0 {\n\t\treturn res, nil\n\t}\n\n\tconstraint, err := semver.NewConstraint(o.version)\n\tif err != nil {\n\t\treturn res, errors.Wrap(err, \"an invalid version\/constraint format\")\n\t}\n\n\tdata := res[:0]\n\tfoundNames := map[string]bool{}\n\tfor _, r := range res {\n\t\tif _, found := foundNames[r.Name]; found {\n\t\t\tcontinue\n\t\t}\n\t\tv, err := semver.NewVersion(r.Chart.Version)\n\t\tif err != nil || constraint.Check(v) {\n\t\t\tdata = append(data, r)\n\t\t\tif !o.versions {\n\t\t\t\tfoundNames[r.Name] = true \/\/ If user hasn't requested all versions, only show the latest that matches\n\t\t\t}\n\t\t}\n\t}\n\n\treturn data, nil\n}\n\nfunc (o *searchRepoOptions) buildIndex(out io.Writer) (*search.Index, error) {\n\t\/\/ Load the repositories.yaml\n\trf, err := repo.LoadFile(o.repoFile)\n\tif isNotExist(err) || len(rf.Repositories) == 0 {\n\t\treturn nil, errors.New(\"no repositories configured\")\n\t}\n\n\ti := search.NewIndex()\n\tfor _, re := range rf.Repositories {\n\t\tn := re.Name\n\t\tf := filepath.Join(o.repoCacheDir, helmpath.CacheIndexFile(n))\n\t\tind, err := repo.LoadIndexFile(f)\n\t\tif err != nil {\n\t\t\t\/\/ TODO should print to stderr\n\t\t\tfmt.Fprintf(out, \"WARNING: Repo %q is corrupt or missing. Try 'helm repo update'.\", n)\n\t\t\tcontinue\n\t\t}\n\n\t\ti.AddRepo(n, ind, o.versions || len(o.version) > 0)\n\t}\n\treturn i, nil\n}\n\ntype repoChartElement struct {\n\tName string\n\tVersion string\n\tAppVersion string\n\tDescription string\n}\n\ntype repoSearchWriter struct {\n\tresults []*search.Result\n\tcolumnWidth uint\n}\n\nfunc (r *repoSearchWriter) WriteTable(out io.Writer) error {\n\tif len(r.results) == 0 {\n\t\t_, err := out.Write([]byte(\"No results found\\n\"))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to write results: %s\", err)\n\t\t}\n\t\treturn nil\n\t}\n\ttable := uitable.New()\n\ttable.MaxColWidth = r.columnWidth\n\ttable.AddRow(\"NAME\", \"CHART VERSION\", \"APP VERSION\", \"DESCRIPTION\")\n\tfor _, r := range r.results {\n\t\ttable.AddRow(r.Name, r.Chart.Version, r.Chart.AppVersion, r.Chart.Description)\n\t}\n\treturn output.EncodeTable(out, table)\n}\n\nfunc (r *repoSearchWriter) WriteJSON(out io.Writer) error {\n\treturn r.encodeByFormat(out, output.JSON)\n}\n\nfunc (r *repoSearchWriter) WriteYAML(out io.Writer) error {\n\treturn r.encodeByFormat(out, output.YAML)\n}\n\nfunc (r *repoSearchWriter) encodeByFormat(out io.Writer, format output.Format) error {\n\t\/\/ Initialize the array so no results returns an empty array instead of null\n\tchartList := make([]repoChartElement, 0, len(r.results))\n\n\tfor _, r := range r.results {\n\t\tchartList = append(chartList, repoChartElement{r.Name, r.Chart.Version, r.Chart.AppVersion, r.Chart.Description})\n\t}\n\n\tswitch format {\n\tcase output.JSON:\n\t\treturn output.EncodeJSON(out, chartList)\n\tcase output.YAML:\n\t\treturn output.EncodeYAML(out, chartList)\n\t}\n\n\t\/\/ Because this is a non-exported function and only called internally by\n\t\/\/ WriteJSON and WriteYAML, we shouldn't get invalid types\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/golang\/protobuf\/ptypes\/timestamp\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\"k8s.io\/helm\/pkg\/helm\"\n\t\"k8s.io\/helm\/pkg\/proto\/hapi\/release\"\n\t\"k8s.io\/helm\/pkg\/timeconv\"\n)\n\nvar (\n\tdate = timestamp.Timestamp{Seconds: 242085845, Nanos: 0}\n\tdateString = timeconv.String(&date)\n)\n\n\/\/ statusCase describes a test case dealing with the status of a release\ntype statusCase struct {\n\tname string\n\targs []string\n\tflags []string\n\texpected string\n\terr bool\n\trel *release.Release\n}\n\nfunc TestStatusCmd(t *testing.T) {\n\ttests := []statusCase{\n\t\t{\n\t\t\tname: \"get status of a deployed release\",\n\t\t\targs: []string{\"flummoxed-chickadee\"},\n\t\t\texpected: outputWithStatus(\"DEPLOYED\\n\\n\"),\n\t\t\trel: releaseMockWithStatus(&release.Status{\n\t\t\t\tCode: release.Status_DEPLOYED,\n\t\t\t}),\n\t\t},\n\t\t{\n\t\t\tname: \"get status of a deployed release with notes\",\n\t\t\targs: []string{\"flummoxed-chickadee\"},\n\t\t\texpected: outputWithStatus(\"DEPLOYED\\n\\nNOTES:\\nrelease notes\\n\"),\n\t\t\trel: releaseMockWithStatus(&release.Status{\n\t\t\t\tCode: release.Status_DEPLOYED,\n\t\t\t\tNotes: \"release notes\",\n\t\t\t}),\n\t\t},\n\t\t{\n\t\t\tname: \"get status of a deployed release with notes in json\",\n\t\t\targs: []string{\"flummoxed-chickadee\"},\n\t\t\tflags: []string{\"-o\", \"json\"},\n\t\t\texpected: `{\"name\":\"flummoxed-chickadee\",\"info\":{\"status\":{\"code\":1,\"notes\":\"release notes\"},\"first_deployed\":{\"seconds\":242085845},\"last_deployed\":{\"seconds\":242085845}}}`,\n\t\t\trel: releaseMockWithStatus(&release.Status{\n\t\t\t\tCode: release.Status_DEPLOYED,\n\t\t\t\tNotes: \"release notes\",\n\t\t\t}),\n\t\t},\n\t\t{\n\t\t\tname: \"get status of a deployed release with resources\",\n\t\t\targs: []string{\"flummoxed-chickadee\"},\n\t\t\texpected: outputWithStatus(\"DEPLOYED\\n\\nRESOURCES:\\nresource A\\nresource B\\n\\n\"),\n\t\t\trel: releaseMockWithStatus(&release.Status{\n\t\t\t\tCode: release.Status_DEPLOYED,\n\t\t\t\tResources: \"resource A\\nresource B\\n\",\n\t\t\t}),\n\t\t},\n\t\t{\n\t\t\tname: \"get status of a deployed release with resources in YAML\",\n\t\t\targs: []string{\"flummoxed-chickadee\"},\n\t\t\tflags: []string{\"-o\", \"yaml\"},\n\t\t\texpected: \"info:\\nfirst_deployed:\\nseconds:242085845\\nlast_deployed:\\nseconds:242085845\\nstatus:\\ncode:1\\nresources:|\\nresourceA\\nresourceB\\nname:flummoxed-chickadee\\n\",\n\t\t\trel: releaseMockWithStatus(&release.Status{\n\t\t\t\tCode: release.Status_DEPLOYED,\n\t\t\t\tResources: \"resource A\\nresource B\\n\",\n\t\t\t}),\n\t\t},\n\t\t{\n\t\t\tname: \"get status of a deployed release with test suite\",\n\t\t\targs: []string{\"flummoxed-chickadee\"},\n\t\t\texpected: outputWithStatus(\n\t\t\t\tfmt.Sprintf(\"DEPLOYED\\n\\nTEST SUITE:\\nLast Started: %s\\nLast Completed: %s\\n\\n\", dateString, dateString) +\n\t\t\t\t\t\"TEST \\tSTATUS \\tINFO \\tSTARTED \\tCOMPLETED \\n\" +\n\t\t\t\t\tfmt.Sprintf(\"test run 1\\tSUCCESS \\textra info\\t%s\\t%s\\n\", dateString, dateString) +\n\t\t\t\t\tfmt.Sprintf(\"test run 2\\tFAILURE \\t \\t%s\\t%s\\n\", dateString, dateString)),\n\t\t\trel: releaseMockWithStatus(&release.Status{\n\t\t\t\tCode: release.Status_DEPLOYED,\n\t\t\t\tLastTestSuiteRun: &release.TestSuite{\n\t\t\t\t\tStartedAt: &date,\n\t\t\t\t\tCompletedAt: &date,\n\t\t\t\t\tResults: []*release.TestRun{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"test run 1\",\n\t\t\t\t\t\t\tStatus: release.TestRun_SUCCESS,\n\t\t\t\t\t\t\tInfo: \"extra info\",\n\t\t\t\t\t\t\tStartedAt: &date,\n\t\t\t\t\t\t\tCompletedAt: &date,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"test run 2\",\n\t\t\t\t\t\t\tStatus: release.TestRun_FAILURE,\n\t\t\t\t\t\t\tStartedAt: &date,\n\t\t\t\t\t\t\tCompletedAt: &date,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}),\n\t\t},\n\t}\n\n\tscmd := func(c *helm.FakeClient, out io.Writer) *cobra.Command {\n\t\treturn newStatusCmd(c, out)\n\t}\n\n\tvar buf bytes.Buffer\n\tfor _, tt := range tests {\n\t\tc := &helm.FakeClient{\n\t\t\tRels: []*release.Release{tt.rel},\n\t\t}\n\t\tcmd := scmd(c, &buf)\n\t\tcmd.ParseFlags(tt.flags)\n\t\terr := cmd.RunE(cmd, tt.args)\n\t\tif (err != nil) != tt.err {\n\t\t\tt.Errorf(\"%q. expected error, got '%v'\", tt.name, err)\n\t\t}\n\n\t\texpected := strings.Replace(tt.expected, \" \", \"\", -1)\n\t\tgot := strings.Replace(buf.String(), \" \", \"\", -1)\n\t\tif expected != got {\n\t\t\tt.Errorf(\"%q. expected\\n%q\\ngot\\n%q\", tt.name, expected, got)\n\t\t}\n\t\tbuf.Reset()\n\t}\n}\n\nfunc outputWithStatus(status string) string {\n\treturn fmt.Sprintf(\"LAST DEPLOYED: %s\\nNAMESPACE: \\nSTATUS: %s\",\n\t\tdateString,\n\t\tstatus)\n}\n\nfunc releaseMockWithStatus(status *release.Status) *release.Release {\n\treturn &release.Release{\n\t\tName: \"flummoxed-chickadee\",\n\t\tInfo: &release.Info{\n\t\t\tFirstDeployed: &date,\n\t\t\tLastDeployed: &date,\n\t\t\tStatus: status,\n\t\t},\n\t}\n}\n<commit_msg>fix(helm) refactor helm status command tests to use releaseCase struct and the corresponding function runReleaseCases. Fixes #3659<commit_after>\/*\nCopyright 2017 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"testing\"\n\n\t\"github.com\/golang\/protobuf\/ptypes\/timestamp\"\n\t\"github.com\/spf13\/cobra\"\n\n\t\"k8s.io\/helm\/pkg\/helm\"\n\t\"k8s.io\/helm\/pkg\/proto\/hapi\/release\"\n\t\"k8s.io\/helm\/pkg\/timeconv\"\n)\n\nvar (\n\tdate = timestamp.Timestamp{Seconds: 242085845, Nanos: 0}\n\tdateString = timeconv.String(&date)\n)\n\nfunc TestStatusCmd(t *testing.T) {\n\ttests := []releaseCase{\n\t\t{\n\t\t\tname: \"get status of a deployed release\",\n\t\t\targs: []string{\"flummoxed-chickadee\"},\n\t\t\texpected: outputWithStatus(\"DEPLOYED\\n\\n\"),\n\t\t\trels: []*release.Release{\n\t\t\t\treleaseMockWithStatus(&release.Status{\n\t\t\t\t\tCode: release.Status_DEPLOYED,\n\t\t\t\t}),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"get status of a deployed release with notes\",\n\t\t\targs: []string{\"flummoxed-chickadee\"},\n\t\t\texpected: outputWithStatus(\"DEPLOYED\\n\\nNOTES:\\nrelease notes\\n\"),\n\t\t\trels: []*release.Release{\n\t\t\t\treleaseMockWithStatus(&release.Status{\n\t\t\t\t\tCode: release.Status_DEPLOYED,\n\t\t\t\t\tNotes: \"release notes\",\n\t\t\t\t}),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"get status of a deployed release with notes in json\",\n\t\t\targs: []string{\"flummoxed-chickadee\"},\n\t\t\tflags: []string{\"-o\", \"json\"},\n\t\t\texpected: `{\"name\":\"flummoxed-chickadee\",\"info\":{\"status\":{\"code\":1,\"notes\":\"release notes\"},\"first_deployed\":{\"seconds\":242085845},\"last_deployed\":{\"seconds\":242085845}}}`,\n\t\t\trels: []*release.Release{\n\t\t\t\treleaseMockWithStatus(&release.Status{\n\t\t\t\t\tCode: release.Status_DEPLOYED,\n\t\t\t\t\tNotes: \"release notes\",\n\t\t\t\t}),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"get status of a deployed release with resources\",\n\t\t\targs: []string{\"flummoxed-chickadee\"},\n\t\t\texpected: outputWithStatus(\"DEPLOYED\\n\\nRESOURCES:\\nresource A\\nresource B\\n\\n\"),\n\t\t\trels: []*release.Release{\n\t\t\t\treleaseMockWithStatus(&release.Status{\n\t\t\t\t\tCode: release.Status_DEPLOYED,\n\t\t\t\t\tResources: \"resource A\\nresource B\\n\",\n\t\t\t\t}),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"get status of a deployed release with resources in YAML\",\n\t\t\targs: []string{\"flummoxed-chickadee\"},\n\t\t\tflags: []string{\"-o\", \"yaml\"},\n\t\t\texpected: \"info:\\n (.*)first_deployed:\\n (.*)seconds: 242085845\\n (.*)last_deployed:\\n (.*)seconds: 242085845\\n (.*)status:\\n code: 1\\n (.*)resources: |\\n (.*)resource A\\n (.*)resource B\\nname: flummoxed-chickadee\\n\",\n\t\t\trels: []*release.Release{\n\t\t\t\treleaseMockWithStatus(&release.Status{\n\t\t\t\t\tCode: release.Status_DEPLOYED,\n\t\t\t\t\tResources: \"resource A\\nresource B\\n\",\n\t\t\t\t}),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"get status of a deployed release with test suite\",\n\t\t\targs: []string{\"flummoxed-chickadee\"},\n\t\t\texpected: outputWithStatus(\n\t\t\t\tfmt.Sprintf(\"DEPLOYED\\n\\nTEST SUITE:\\nLast Started: %s\\nLast Completed: %s\\n\\n\", dateString, dateString) +\n\t\t\t\t\t\"TEST \\tSTATUS (.*)\\tINFO (.*)\\tSTARTED (.*)\\tCOMPLETED (.*)\\n\" +\n\t\t\t\t\tfmt.Sprintf(\"test run 1\\tSUCCESS (.*)\\textra info\\t%s\\t%s\\n\", dateString, dateString) +\n\t\t\t\t\tfmt.Sprintf(\"test run 2\\tFAILURE (.*)\\t (.*)\\t%s\\t%s\\n\", dateString, dateString)),\n\t\t\trels: []*release.Release{\n\t\t\t\treleaseMockWithStatus(&release.Status{\n\t\t\t\t\tCode: release.Status_DEPLOYED,\n\t\t\t\t\tLastTestSuiteRun: &release.TestSuite{\n\t\t\t\t\t\tStartedAt: &date,\n\t\t\t\t\t\tCompletedAt: &date,\n\t\t\t\t\t\tResults: []*release.TestRun{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"test run 1\",\n\t\t\t\t\t\t\t\tStatus: release.TestRun_SUCCESS,\n\t\t\t\t\t\t\t\tInfo: \"extra info\",\n\t\t\t\t\t\t\t\tStartedAt: &date,\n\t\t\t\t\t\t\t\tCompletedAt: &date,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName: \"test run 2\",\n\t\t\t\t\t\t\t\tStatus: release.TestRun_FAILURE,\n\t\t\t\t\t\t\t\tStartedAt: &date,\n\t\t\t\t\t\t\t\tCompletedAt: &date,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}),\n\t\t\t},\n\t\t},\n\t}\n\n\trunReleaseCases(t, tests, func(c *helm.FakeClient, out io.Writer) *cobra.Command {\n\t\treturn newStatusCmd(c, out)\n\t})\n\n}\n\nfunc outputWithStatus(status string) string {\n\treturn fmt.Sprintf(\"LAST DEPLOYED: %s\\nNAMESPACE: \\nSTATUS: %s\",\n\t\tdateString,\n\t\tstatus)\n}\n\nfunc releaseMockWithStatus(status *release.Status) *release.Release {\n\treturn &release.Release{\n\t\tName: \"flummoxed-chickadee\",\n\t\tInfo: &release.Info{\n\t\t\tFirstDeployed: &date,\n\t\t\tLastDeployed: &date,\n\t\t\tStatus: status,\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"launchpad.net\/gnuflag\"\n\n\t\"launchpad.net\/juju-core\/cmd\"\n\t\"launchpad.net\/juju-core\/constraints\"\n\t\"launchpad.net\/juju-core\/juju\"\n\t\"launchpad.net\/juju-core\/names\"\n\t\"launchpad.net\/juju-core\/state\/api\/params\"\n\t\"launchpad.net\/juju-core\/state\/statecmd\"\n)\n\n\/\/ GetConstraintsCommand shows the constraints for a service or environment.\ntype GetConstraintsCommand struct {\n\tcmd.EnvCommandBase\n\tServiceName string\n\tout cmd.Output\n}\n\nfunc (c *GetConstraintsCommand) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"get-constraints\",\n\t\tArgs: \"[<service>]\",\n\t\tPurpose: \"view constraints\",\n\t}\n}\n\nfunc formatConstraints(value interface{}) ([]byte, error) {\n\treturn []byte(value.(constraints.Value).String()), nil\n}\n\nfunc (c *GetConstraintsCommand) SetFlags(f *gnuflag.FlagSet) {\n\tc.EnvCommandBase.SetFlags(f)\n\tc.out.AddFlags(f, \"constraints\", map[string]cmd.Formatter{\n\t\t\"constraints\": formatConstraints,\n\t\t\/\/ TODO(nate): CONSTRAINTS_YAML: re-add yaml as a format when \n\t\t\/\/ we can properly handle yaml serialization\n\t\t\/\/ see constraints\/constrains.go\n\t\t\/\/ \"yaml\": cmd.FormatYaml,\n\t\t\"json\": cmd.FormatJson,\n\t})\n}\n\nfunc (c *GetConstraintsCommand) Init(args []string) error {\n\tif len(args) > 0 {\n\t\tif !names.IsService(args[0]) {\n\t\t\treturn fmt.Errorf(\"invalid service name %q\", args[0])\n\t\t}\n\t\tc.ServiceName, args = args[0], args[1:]\n\t}\n\treturn cmd.CheckEmpty(args)\n}\n\nfunc (c *GetConstraintsCommand) Run(ctx *cmd.Context) error {\n\tconn, err := juju.NewConnFromName(c.EnvName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\tvar cons constraints.Value\n\tif c.ServiceName != \"\" {\n\t\targs := params.GetServiceConstraints{\n\t\t\tServiceName: c.ServiceName,\n\t\t}\n\t\tvar results params.GetServiceConstraintsResults\n\t\tresults, err = statecmd.GetServiceConstraints(conn.State, args)\n\t\tcons = results.Constraints\n\t} else {\n\t\tcons, err = conn.State.EnvironConstraints()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.out.Write(ctx, cons)\n}\n\n\/\/ SetConstraintsCommand shows the constraints for a service or environment.\ntype SetConstraintsCommand struct {\n\tcmd.EnvCommandBase\n\tServiceName string\n\tConstraints constraints.Value\n}\n\nfunc (c *SetConstraintsCommand) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"set-constraints\",\n\t\tArgs: \"[key=[value] ...]\",\n\t\tPurpose: \"replace constraints\",\n\t}\n}\n\nfunc (c *SetConstraintsCommand) SetFlags(f *gnuflag.FlagSet) {\n\tc.EnvCommandBase.SetFlags(f)\n\tf.StringVar(&c.ServiceName, \"s\", \"\", \"set service constraints\")\n\tf.StringVar(&c.ServiceName, \"service\", \"\", \"\")\n}\n\nfunc (c *SetConstraintsCommand) Init(args []string) (err error) {\n\tif c.ServiceName != \"\" && !names.IsService(c.ServiceName) {\n\t\treturn fmt.Errorf(\"invalid service name %q\", c.ServiceName)\n\t}\n\tc.Constraints, err = constraints.Parse(args...)\n\treturn err\n}\n\nfunc (c *SetConstraintsCommand) Run(_ *cmd.Context) (err error) {\n\tconn, err := juju.NewConnFromName(c.EnvName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\tif c.ServiceName == \"\" {\n\t\treturn conn.State.SetEnvironConstraints(c.Constraints)\n\t}\n\tparams := params.SetServiceConstraints{\n\t\tServiceName: c.ServiceName,\n\t\tConstraints: c.Constraints,\n\t}\n\treturn statecmd.SetServiceConstraints(conn.State, params)\n}\n<commit_msg>gofmt was sad<commit_after>\/\/ Copyright 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"launchpad.net\/gnuflag\"\n\n\t\"launchpad.net\/juju-core\/cmd\"\n\t\"launchpad.net\/juju-core\/constraints\"\n\t\"launchpad.net\/juju-core\/juju\"\n\t\"launchpad.net\/juju-core\/names\"\n\t\"launchpad.net\/juju-core\/state\/api\/params\"\n\t\"launchpad.net\/juju-core\/state\/statecmd\"\n)\n\n\/\/ GetConstraintsCommand shows the constraints for a service or environment.\ntype GetConstraintsCommand struct {\n\tcmd.EnvCommandBase\n\tServiceName string\n\tout cmd.Output\n}\n\nfunc (c *GetConstraintsCommand) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"get-constraints\",\n\t\tArgs: \"[<service>]\",\n\t\tPurpose: \"view constraints\",\n\t}\n}\n\nfunc formatConstraints(value interface{}) ([]byte, error) {\n\treturn []byte(value.(constraints.Value).String()), nil\n}\n\nfunc (c *GetConstraintsCommand) SetFlags(f *gnuflag.FlagSet) {\n\tc.EnvCommandBase.SetFlags(f)\n\tc.out.AddFlags(f, \"constraints\", map[string]cmd.Formatter{\n\t\t\"constraints\": formatConstraints,\n\t\t\/\/ TODO(nate): CONSTRAINTS_YAML: re-add yaml as a format when\n\t\t\/\/ we can properly handle yaml serialization\n\t\t\/\/ see constraints\/constrains.go\n\t\t\/\/ \"yaml\": cmd.FormatYaml,\n\t\t\"json\": cmd.FormatJson,\n\t})\n}\n\nfunc (c *GetConstraintsCommand) Init(args []string) error {\n\tif len(args) > 0 {\n\t\tif !names.IsService(args[0]) {\n\t\t\treturn fmt.Errorf(\"invalid service name %q\", args[0])\n\t\t}\n\t\tc.ServiceName, args = args[0], args[1:]\n\t}\n\treturn cmd.CheckEmpty(args)\n}\n\nfunc (c *GetConstraintsCommand) Run(ctx *cmd.Context) error {\n\tconn, err := juju.NewConnFromName(c.EnvName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\tvar cons constraints.Value\n\tif c.ServiceName != \"\" {\n\t\targs := params.GetServiceConstraints{\n\t\t\tServiceName: c.ServiceName,\n\t\t}\n\t\tvar results params.GetServiceConstraintsResults\n\t\tresults, err = statecmd.GetServiceConstraints(conn.State, args)\n\t\tcons = results.Constraints\n\t} else {\n\t\tcons, err = conn.State.EnvironConstraints()\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.out.Write(ctx, cons)\n}\n\n\/\/ SetConstraintsCommand shows the constraints for a service or environment.\ntype SetConstraintsCommand struct {\n\tcmd.EnvCommandBase\n\tServiceName string\n\tConstraints constraints.Value\n}\n\nfunc (c *SetConstraintsCommand) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"set-constraints\",\n\t\tArgs: \"[key=[value] ...]\",\n\t\tPurpose: \"replace constraints\",\n\t}\n}\n\nfunc (c *SetConstraintsCommand) SetFlags(f *gnuflag.FlagSet) {\n\tc.EnvCommandBase.SetFlags(f)\n\tf.StringVar(&c.ServiceName, \"s\", \"\", \"set service constraints\")\n\tf.StringVar(&c.ServiceName, \"service\", \"\", \"\")\n}\n\nfunc (c *SetConstraintsCommand) Init(args []string) (err error) {\n\tif c.ServiceName != \"\" && !names.IsService(c.ServiceName) {\n\t\treturn fmt.Errorf(\"invalid service name %q\", c.ServiceName)\n\t}\n\tc.Constraints, err = constraints.Parse(args...)\n\treturn err\n}\n\nfunc (c *SetConstraintsCommand) Run(_ *cmd.Context) (err error) {\n\tconn, err := juju.NewConnFromName(c.EnvName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\tif c.ServiceName == \"\" {\n\t\treturn conn.State.SetEnvironConstraints(c.Constraints)\n\t}\n\tparams := params.SetServiceConstraints{\n\t\tServiceName: c.ServiceName,\n\t\tConstraints: c.Constraints,\n\t}\n\treturn statecmd.SetServiceConstraints(conn.State, params)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage main\n\nconst helpBasics = `\nJuju -- devops distilled\nhttps:\/\/juju.ubuntu.com\/\n\nJuju provides easy, intelligent service orchestration on top of environments\nsuch as OpenStack, Amazon AWS, or bare metal.\n\nBasic commands:\n juju init generate boilerplate configuration for juju environments\n juju bootstrap start up an environment from scratch\n\n juju deploy deploy a new service\n juju add-relation add a relation between two services\n juju expose expose a service\n\n juju help bootstrap more help on e.g. bootstrap command\n juju help commands list all commands\n juju help glossary glossary of terms\n juju help topics list all help topics\n\nProvider information:\n juju help local use on this computer\n juju help aws use on AWS\n juju help openstack use on OpenStack\n juju help hpcloud use on HP Cloud\n`\n\nconst helpLocalProvider = `\nFirst install Juju and some dependencies it needs. Keep in mind that LXC and\nmongodb are needed for the local provider to work.\n\n sudo add-apt-repository ppa:juju\/stable\n sudo apt-get update\n sudo apt-get install juju-core lxc mongodb-server\n\nAfter that you might get error for SSH authorized\/public key not found. ERROR\nSSH authorized\/public key not found.\n\n ssh-keygen -t rsa\n\nFirst configure your environment local environment, if you've not set up Juju\nbefore do a:\n\n juju init -w\n\nThis will write out an example config file that will work. Then you need to\ntell Juju to use the local provider and then bootstrap:\n\n juju switch local\n sudo juju bootstrap\n\nThe first time this runs it might take a bit, as it's doing a netinstall for\nthe container, it's around a 300 megabyte download. Subsequent bootstraps\nshould be much quicker. 'sudo' is needed because only root can create LXC\ncontainers. After the initial bootstrap, you do not need 'sudo' anymore,\nexcept to 'sudo juju destroy-environment' when you want to tear everything\ndown.\n\nYou deploy charms from the charm store using the following commands:\n\n juju deploy mysql\n juju deploy wordpress\n juju add-relation wordpress mysql\n\nReferences:\n\n - Source: Question on Ask Ubuntu [1]\n - [Documentation][2]\n\n [1]: http:\/\/askubuntu.com\/questions\/65359\/how-do-i-configure-juju-for-local-usage\n [2]: https:\/\/juju.ubuntu.com\/docs\/getting-started.html\n`\n\nconst helpOpenstackProvider = `\n\nFirst off you need juju and charm-tools, ensure you have the latest stable\njuju:\n\n sudo add-apt-repository ppa:juju\/stable\n sudo apt-get update && sudo apt-get install juju-core charm-tools\n\nDo a 'juju generate-config -w' to generate a config for OpenStack that you can\ncustomize for your needs.\n\nHere's an example OpenStack configuration for '~\/.juju\/environments.yaml',\nincluding the commented out sections:\n\n openstack:\n type: openstack\n # Specifies whether the use of a floating IP address is required to\n # give the nodes a public IP address. Some installations assign public\n # IP addresses by default without requiring a floating IP address.\n # use-floating-ip: false\n admin-secret: 13850d1b9786065cadd0f477e8c97cd3\n # Globally unique swift bucket name\n control-bucket: juju-fd6ab8d02393af742bfbe8b9629707ee\n # Usually set via the env variable OS_AUTH_URL, but can be specified here\n # auth-url: https:\/\/yourkeystoneurl:443\/v2.0\/\n # override if your workstation is running a different series to which\n # you are deploying\n # default-series: precise\n # The following are used for userpass authentication (the default)\n auth-mode: userpass\n # Usually set via the env variable OS_USERNAME, but can be specified here\n # username: <your username>\n # Usually set via the env variable OS_PASSWORD, but can be specified here\n # password: <secret>\n # Usually set via the env variable OS_TENANT_NAME, but can be specified here\n # tenant-name: <your tenant name>\n # Usually set via the env variable OS_REGION_NAME, but can be specified here\n # region: <your region>\n\nReferences:\n\n - Source: Question on Ask Ubuntu [1]\n - Official Docs [2]\n\n [1]: http:\/\/askubuntu.com\/questions\/132411\/how-can-i-configure-juju-for-deployment-on-openstack\n [2]: http:\/\/juju.ubuntu.com\/docs\/provider-configuration-openstack.html\n\nOther OpenStack Based Clouds:\n\nThis answer is for generic upstream OpenStack support, if you're using an\nOpenStack-based provider check these questions out for provider-specific\ninformation:\n\n - http:\/\/askubuntu.com\/questions\/116174\/how-can-i-configure-juju-for-deployment-to-the-hp-cloud\n - http:\/\/askubuntu.com\/questions\/166102\/how-do-i-configure-juju-for-deployment-on-rackspace-cloud\n\n`\n\nconst helpEC2Provider = `\nFirst install Juju:\n\n sudo add-apt-repository ppa:juju\/stable\n sudo apt-get update && sudo apt-get -y install juju-core\n\nDo a 'juju generate-config -w' to generate a config for AWS that you can\ncustomize for your needs. This will create the file\n'~\/.juju\/environments.yaml'.\n\nWhich is a sample environment configured to run with EC2 machines and S3\npermanent storage.\n\nTo make this environment actually useful, you will need to tell juju about an\nAWS access key and secret key. To do this, you can either set the\n'AWS_ACCESS_KEY_ID' and 'AWS_SECRET_ACCESS_KEY' [environment variables][1] (as\nusual for other EC2 tools) or you can add access-key and secret-key options to\nyour environments.yaml. These are already in place in the generated config,\nyou just need to uncomment them out. For example:\n\n default: sample\n environments:\n sample:\n type: ec2\n access-key: YOUR-ACCESS-KEY-GOES-HERE\n secret-key: YOUR-SECRET-KEY-GOES-HERE\n control-bucket: juju-faefb490d69a41f0a3616a4808e0766b\n admin-secret: 81a1e7429e6847c4941fda7591246594\n default-series: precise\n ssl-hostname-verification: true\n\nSee the [EC2 provider documentation][2] for more options. The S3 bucket does\nnot need to exist already.\n\nNote If you already have an AWS account, you can determine your access key by\nvisiting [your account page][3], clicking \"Security Credentials\" and then\nclicking \"Access Credentials\". You'll be taken to a table that lists your\naccess keys and has a \"show\" link for each access key that will reveal the\nassociated secret key.\n\nAnd that's it, you're ready to go!\n\n - https:\/\/juju.ubuntu.com\/docs\/getting-started.html\n - https:\/\/juju.ubuntu.com\/docs\/provider-configuration-ec2.html\n\nReferences:\n\n - Source: Question on Ask Ubuntu [4]\n\n [1]: http:\/\/askubuntu.com\/questions\/730\/how-do-i-set-environment-variables\n [2]: https:\/\/juju.ubuntu.com\/docs\/provider-configuration-ec2.html\n [3]: http:\/\/aws.amazon.com\/account\n [4]: http:\/\/askubuntu.com\/questions\/225513\/how-do-i-configure-juju-to-use-amazon-web-services-aws\n`\n\nconst helpHPCloud = `\n\nYou should start by generating a generic configuration file for Juju, using\nthe command:\n\n 'juju generate-config -w'\n\nThis will generate a file, 'environments.yaml', which will live in your\n'~\/.juju\/' directory (and will create the directory if it doesn't already\nexist).\n\nThe essential configuration sections for HP Cloud look like this:\n\n hpcloud:\n type: openstack\n admin-secret: 6638bebf0c54ffff1007e0247d4dae98\n control-bucket: juju-bc66a4a4adbee50b2ceeee70436528e5\n tenant-name: \"juju-project1\"\n auth-url: https:\/\/region-a.geo-1.identity.hpcloudsvc.com:35357\/v2.0\n auth-mode: userpass\n username: \"xxxyour-hpcloud-usernamexxx\"\n password: \"xxxpasswordxxx\"\n region: az-1.region-a.geo-1\n public-bucket-url: https:\/\/region-a.geo-1.objects.hpcloudsvc.com\/v1\/60502529753910\n\nPlease refer to the question on Ask Ubuntu [1] for details on how to get\nthe relevant information to finish configuring your hpcloud environment.\n\nOfficial docs:\n\n - [Documentation][2]\n - General OpenStack configuration: [3]\n\nReferences:\n\n - Source: Question on Ask Ubuntu [1]\n\n [1]: http:\/\/askubuntu.com\/questions\/116174\/how-can-i-configure-juju-for-deployment-on-hp-cloud\n [2]: https:\/\/juju.ubuntu.com\/docs\/provider-configuration-openstack.html#openstack-configuration\n [3]: http:\/\/askubuntu.com\/questions\/132411\/how-can-i-configure-juju-for-deployment-on-openstack\n`\n\nconst helpGlossary = `\nBootstrap\n To boostrap an environment means initializing it so that Services may be\n deployed on it.\n\nCharm\n A Charm provides the definition of the service, including its metadata,\n dependencies to other services, packages necessary, as well as the logic\n for management of the application. It is the layer that integrates an\n external application component like Postgres or WordPress into juju. A juju\n Service may generally be seen as the composition of its juju Charm and the\n upstream application (traditionally made available through its package).\n\nCharm URL\n A Charm URL is a resource locator for a charm, with the following format\n and restrictions:\n\n <schema>:[~<user>\/]<collection>\/<name>[-<revision>]\n\n schema must be either \"cs\", for a charm from the Juju charm store, or\n \"local\", for a charm from a local repository.\n\n user is only valid in charm store URLs, and allows you to source charms\n from individual users (rather than from the main charm store); it must be a\n valid Launchpad user name.\n\n collection denotes a charm's purpose and status, and is derived from the\n Ubuntu series targeted by its contained charms: examples include \"precise\",\n \"quantal\", \"oneiric-universe\".\n\n name is just the name of the charm; it must start and end with lowercase\n (ascii) letters, and can otherwise contain any combination of lowercase\n letters, digits, and \"-\"s.\n\n revision, if specified, points to a specific revision of the charm pointed\n to by the rest of the URL. It must be a non-negative integer.\n\nEndpoint\n The combination of a service name and a relation name.\n\nEnvironment\n An Environment is a configured location where Services can be deployed\n onto. An Environment typically has a name, which can usually be omitted\n when there's a single Environment configured, or when a default is\n explicitly defined. Depending on the type of Environment, it may have to be\n bootstrapped before interactions with it may take place (e.g. EC2). The\n local environment configuration is defined in the ~\/.juju\/environments.yaml\n file.\n\nMachine Agent\n Software which runs inside each machine that is part of an Environment, and\n is able to handle the needs of deploying and managing Service Units in this\n machine.\n\nProvisioning Agent\n Software responsible for automatically allocating and terminating machines\n in an Environment, as necessary for the requested configuration.\n\nRelation\n Relations are the way in which juju enables Services to communicate to each\n other, and the way in which the topology of Services is assembled. The\n Charm defines which Relations a given Service may establish, and what kind\n of interface these Relations require.\n\n In many cases, the establishment of a Relation will result into an actual\n TCP connection being created between the Service Units, but that's not\n necessarily the case. Relations may also be established to inform Services\n of configuration parameters, to request monitoring information, or any\n other details which the Charm author has chosen to make available.\n\nRepository\n A location where multiple charms are stored. Repositories may be as simple\n as a directory structure on a local disk, or as complex as a rich smart\n server supporting remote searching and so on.\n\nService\n juju operates in terms of services. A service is any application (or set of\n applications) that is integrated into the framework as an individual\n component which should generally be joined with other components to perform\n a more complex goal.\n\n As an example, WordPress could be deployed as a service and, to perform its\n tasks properly, might communicate with a database service and a load\n balancer service.\n\nService Configuration\n There are many different settings in a juju deployment, but the term\n Service Configuration refers to the settings which a user can define to\n customize the behavior of a Service.\n\n The behavior of a Service when its Service Configuration changes is\n entirely defined by its Charm.\n\nService Unit\n A running instance of a given juju Service. Simple Services may be deployed\n with a single Service Unit, but it is possible for an individual Service to\n have multiple Service Units running in independent machines. All Service\n Units for a given Service will share the same Charm, the same relations,\n and the same user-provided configuration.\n\n For instance, one may deploy a single MongoDB Service, and specify that it\n should run 3 Units, so that the replica set is resilient to\n failures. Internally, even though the replica set shares the same\n user-provided configuration, each Unit may be performing different roles\n within the replica set, as defined by the Charm.\n\nService Unit Agent\n Software which manages all the lifecycle of a single Service Unit.\n\n`\n<commit_msg>Update the header block.<commit_after>\/\/ Copyright 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage main\n\nconst helpBasics = `\nJuju -- devops distilled\nhttps:\/\/juju.ubuntu.com\/\n\nJuju provides easy, intelligent service orchestration on top of environments\nsuch as OpenStack, Amazon AWS, bare metal, or your own local machine.\n\nBasic commands:\n juju init generate boilerplate configuration for juju environments\n juju bootstrap start up an environment from scratch\n\n juju deploy deploy a new service\n juju add-relation add a relation between two services\n juju expose expose a service\n\n juju help bootstrap more help on e.g. bootstrap command\n juju help commands list all commands\n juju help glossary glossary of terms\n juju help topics list all help topics\n\nProvider information:\n juju help local use on this computer\n juju help aws use on AWS\n juju help openstack use on OpenStack\n juju help hpcloud use on HP Cloud\n`\n\nconst helpLocalProvider = `\nFirst install Juju and some dependencies it needs. Keep in mind that LXC and\nmongodb are needed for the local provider to work.\n\n sudo add-apt-repository ppa:juju\/stable\n sudo apt-get update\n sudo apt-get install juju-core lxc mongodb-server\n\nAfter that you might get error for SSH authorized\/public key not found. ERROR\nSSH authorized\/public key not found.\n\n ssh-keygen -t rsa\n\nFirst configure your environment local environment, if you've not set up Juju\nbefore do a:\n\n juju init -w\n\nThis will write out an example config file that will work. Then you need to\ntell Juju to use the local provider and then bootstrap:\n\n juju switch local\n sudo juju bootstrap\n\nThe first time this runs it might take a bit, as it's doing a netinstall for\nthe container, it's around a 300 megabyte download. Subsequent bootstraps\nshould be much quicker. 'sudo' is needed because only root can create LXC\ncontainers. After the initial bootstrap, you do not need 'sudo' anymore,\nexcept to 'sudo juju destroy-environment' when you want to tear everything\ndown.\n\nYou deploy charms from the charm store using the following commands:\n\n juju deploy mysql\n juju deploy wordpress\n juju add-relation wordpress mysql\n\nReferences:\n\n - Source: Question on Ask Ubuntu [1]\n - [Documentation][2]\n\n [1]: http:\/\/askubuntu.com\/questions\/65359\/how-do-i-configure-juju-for-local-usage\n [2]: https:\/\/juju.ubuntu.com\/docs\/getting-started.html\n`\n\nconst helpOpenstackProvider = `\n\nFirst off you need juju and charm-tools, ensure you have the latest stable\njuju:\n\n sudo add-apt-repository ppa:juju\/stable\n sudo apt-get update && sudo apt-get install juju-core charm-tools\n\nDo a 'juju generate-config -w' to generate a config for OpenStack that you can\ncustomize for your needs.\n\nHere's an example OpenStack configuration for '~\/.juju\/environments.yaml',\nincluding the commented out sections:\n\n openstack:\n type: openstack\n # Specifies whether the use of a floating IP address is required to\n # give the nodes a public IP address. Some installations assign public\n # IP addresses by default without requiring a floating IP address.\n # use-floating-ip: false\n admin-secret: 13850d1b9786065cadd0f477e8c97cd3\n # Globally unique swift bucket name\n control-bucket: juju-fd6ab8d02393af742bfbe8b9629707ee\n # Usually set via the env variable OS_AUTH_URL, but can be specified here\n # auth-url: https:\/\/yourkeystoneurl:443\/v2.0\/\n # override if your workstation is running a different series to which\n # you are deploying\n # default-series: precise\n # The following are used for userpass authentication (the default)\n auth-mode: userpass\n # Usually set via the env variable OS_USERNAME, but can be specified here\n # username: <your username>\n # Usually set via the env variable OS_PASSWORD, but can be specified here\n # password: <secret>\n # Usually set via the env variable OS_TENANT_NAME, but can be specified here\n # tenant-name: <your tenant name>\n # Usually set via the env variable OS_REGION_NAME, but can be specified here\n # region: <your region>\n\nReferences:\n\n - Source: Question on Ask Ubuntu [1]\n - Official Docs [2]\n\n [1]: http:\/\/askubuntu.com\/questions\/132411\/how-can-i-configure-juju-for-deployment-on-openstack\n [2]: http:\/\/juju.ubuntu.com\/docs\/provider-configuration-openstack.html\n\nOther OpenStack Based Clouds:\n\nThis answer is for generic upstream OpenStack support, if you're using an\nOpenStack-based provider check these questions out for provider-specific\ninformation:\n\n - http:\/\/askubuntu.com\/questions\/116174\/how-can-i-configure-juju-for-deployment-to-the-hp-cloud\n - http:\/\/askubuntu.com\/questions\/166102\/how-do-i-configure-juju-for-deployment-on-rackspace-cloud\n\n`\n\nconst helpEC2Provider = `\nFirst install Juju:\n\n sudo add-apt-repository ppa:juju\/stable\n sudo apt-get update && sudo apt-get -y install juju-core\n\nDo a 'juju generate-config -w' to generate a config for AWS that you can\ncustomize for your needs. This will create the file\n'~\/.juju\/environments.yaml'.\n\nWhich is a sample environment configured to run with EC2 machines and S3\npermanent storage.\n\nTo make this environment actually useful, you will need to tell juju about an\nAWS access key and secret key. To do this, you can either set the\n'AWS_ACCESS_KEY_ID' and 'AWS_SECRET_ACCESS_KEY' [environment variables][1] (as\nusual for other EC2 tools) or you can add access-key and secret-key options to\nyour environments.yaml. These are already in place in the generated config,\nyou just need to uncomment them out. For example:\n\n default: sample\n environments:\n sample:\n type: ec2\n access-key: YOUR-ACCESS-KEY-GOES-HERE\n secret-key: YOUR-SECRET-KEY-GOES-HERE\n control-bucket: juju-faefb490d69a41f0a3616a4808e0766b\n admin-secret: 81a1e7429e6847c4941fda7591246594\n default-series: precise\n ssl-hostname-verification: true\n\nSee the [EC2 provider documentation][2] for more options. The S3 bucket does\nnot need to exist already.\n\nNote If you already have an AWS account, you can determine your access key by\nvisiting [your account page][3], clicking \"Security Credentials\" and then\nclicking \"Access Credentials\". You'll be taken to a table that lists your\naccess keys and has a \"show\" link for each access key that will reveal the\nassociated secret key.\n\nAnd that's it, you're ready to go!\n\n - https:\/\/juju.ubuntu.com\/docs\/getting-started.html\n - https:\/\/juju.ubuntu.com\/docs\/provider-configuration-ec2.html\n\nReferences:\n\n - Source: Question on Ask Ubuntu [4]\n\n [1]: http:\/\/askubuntu.com\/questions\/730\/how-do-i-set-environment-variables\n [2]: https:\/\/juju.ubuntu.com\/docs\/provider-configuration-ec2.html\n [3]: http:\/\/aws.amazon.com\/account\n [4]: http:\/\/askubuntu.com\/questions\/225513\/how-do-i-configure-juju-to-use-amazon-web-services-aws\n`\n\nconst helpHPCloud = `\n\nYou should start by generating a generic configuration file for Juju, using\nthe command:\n\n 'juju generate-config -w'\n\nThis will generate a file, 'environments.yaml', which will live in your\n'~\/.juju\/' directory (and will create the directory if it doesn't already\nexist).\n\nThe essential configuration sections for HP Cloud look like this:\n\n hpcloud:\n type: openstack\n admin-secret: 6638bebf0c54ffff1007e0247d4dae98\n control-bucket: juju-bc66a4a4adbee50b2ceeee70436528e5\n tenant-name: \"juju-project1\"\n auth-url: https:\/\/region-a.geo-1.identity.hpcloudsvc.com:35357\/v2.0\n auth-mode: userpass\n username: \"xxxyour-hpcloud-usernamexxx\"\n password: \"xxxpasswordxxx\"\n region: az-1.region-a.geo-1\n public-bucket-url: https:\/\/region-a.geo-1.objects.hpcloudsvc.com\/v1\/60502529753910\n\nPlease refer to the question on Ask Ubuntu [1] for details on how to get\nthe relevant information to finish configuring your hpcloud environment.\n\nOfficial docs:\n\n - [Documentation][2]\n - General OpenStack configuration: [3]\n\nReferences:\n\n - Source: Question on Ask Ubuntu [1]\n\n [1]: http:\/\/askubuntu.com\/questions\/116174\/how-can-i-configure-juju-for-deployment-on-hp-cloud\n [2]: https:\/\/juju.ubuntu.com\/docs\/provider-configuration-openstack.html#openstack-configuration\n [3]: http:\/\/askubuntu.com\/questions\/132411\/how-can-i-configure-juju-for-deployment-on-openstack\n`\n\nconst helpGlossary = `\nBootstrap\n To boostrap an environment means initializing it so that Services may be\n deployed on it.\n\nCharm\n A Charm provides the definition of the service, including its metadata,\n dependencies to other services, packages necessary, as well as the logic\n for management of the application. It is the layer that integrates an\n external application component like Postgres or WordPress into juju. A juju\n Service may generally be seen as the composition of its juju Charm and the\n upstream application (traditionally made available through its package).\n\nCharm URL\n A Charm URL is a resource locator for a charm, with the following format\n and restrictions:\n\n <schema>:[~<user>\/]<collection>\/<name>[-<revision>]\n\n schema must be either \"cs\", for a charm from the Juju charm store, or\n \"local\", for a charm from a local repository.\n\n user is only valid in charm store URLs, and allows you to source charms\n from individual users (rather than from the main charm store); it must be a\n valid Launchpad user name.\n\n collection denotes a charm's purpose and status, and is derived from the\n Ubuntu series targeted by its contained charms: examples include \"precise\",\n \"quantal\", \"oneiric-universe\".\n\n name is just the name of the charm; it must start and end with lowercase\n (ascii) letters, and can otherwise contain any combination of lowercase\n letters, digits, and \"-\"s.\n\n revision, if specified, points to a specific revision of the charm pointed\n to by the rest of the URL. It must be a non-negative integer.\n\nEndpoint\n The combination of a service name and a relation name.\n\nEnvironment\n An Environment is a configured location where Services can be deployed\n onto. An Environment typically has a name, which can usually be omitted\n when there's a single Environment configured, or when a default is\n explicitly defined. Depending on the type of Environment, it may have to be\n bootstrapped before interactions with it may take place (e.g. EC2). The\n local environment configuration is defined in the ~\/.juju\/environments.yaml\n file.\n\nMachine Agent\n Software which runs inside each machine that is part of an Environment, and\n is able to handle the needs of deploying and managing Service Units in this\n machine.\n\nProvisioning Agent\n Software responsible for automatically allocating and terminating machines\n in an Environment, as necessary for the requested configuration.\n\nRelation\n Relations are the way in which juju enables Services to communicate to each\n other, and the way in which the topology of Services is assembled. The\n Charm defines which Relations a given Service may establish, and what kind\n of interface these Relations require.\n\n In many cases, the establishment of a Relation will result into an actual\n TCP connection being created between the Service Units, but that's not\n necessarily the case. Relations may also be established to inform Services\n of configuration parameters, to request monitoring information, or any\n other details which the Charm author has chosen to make available.\n\nRepository\n A location where multiple charms are stored. Repositories may be as simple\n as a directory structure on a local disk, or as complex as a rich smart\n server supporting remote searching and so on.\n\nService\n juju operates in terms of services. A service is any application (or set of\n applications) that is integrated into the framework as an individual\n component which should generally be joined with other components to perform\n a more complex goal.\n\n As an example, WordPress could be deployed as a service and, to perform its\n tasks properly, might communicate with a database service and a load\n balancer service.\n\nService Configuration\n There are many different settings in a juju deployment, but the term\n Service Configuration refers to the settings which a user can define to\n customize the behavior of a Service.\n\n The behavior of a Service when its Service Configuration changes is\n entirely defined by its Charm.\n\nService Unit\n A running instance of a given juju Service. Simple Services may be deployed\n with a single Service Unit, but it is possible for an individual Service to\n have multiple Service Units running in independent machines. All Service\n Units for a given Service will share the same Charm, the same relations,\n and the same user-provided configuration.\n\n For instance, one may deploy a single MongoDB Service, and specify that it\n should run 3 Units, so that the replica set is resilient to\n failures. Internally, even though the replica set shares the same\n user-provided configuration, each Unit may be performing different roles\n within the replica set, as defined by the Charm.\n\nService Unit Agent\n Software which manages all the lifecycle of a single Service Unit.\n\n`\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Minio Cloud Storage, (C) 2019 Minio, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage cmd\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"encoding\/gob\"\n\t\"io\"\n\t\"net\/url\"\n\n\t\"github.com\/minio\/minio\/cmd\/http\"\n\t\"github.com\/minio\/minio\/cmd\/logger\"\n\t\"github.com\/minio\/minio\/cmd\/rest\"\n\t\"github.com\/minio\/minio\/pkg\/event\"\n\txnet \"github.com\/minio\/minio\/pkg\/net\"\n\t\"github.com\/minio\/minio\/pkg\/policy\"\n)\n\n\/\/ client to talk to peer Nodes.\ntype peerRESTClient struct {\n\thost *xnet.Host\n\trestClient *rest.Client\n\tconnected bool\n}\n\n\/\/ Reconnect to a peer rest server.\nfunc (client *peerRESTClient) reConnect() error {\n\t\/\/ correct (intelligent) retry logic will be\n\t\/\/ implemented in subsequent PRs.\n\tclient.connected = true\n\treturn nil\n}\n\n\/\/ Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected\n\/\/ permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()\n\/\/ after verifying format.json\nfunc (client *peerRESTClient) call(method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {\n\tif !client.connected {\n\t\terr := client.reConnect()\n\t\tlogger.LogIf(context.Background(), err)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif values == nil {\n\t\tvalues = make(url.Values)\n\t}\n\n\trespBody, err = client.restClient.Call(method, values, body, length)\n\tif err == nil {\n\t\treturn respBody, nil\n\t}\n\n\tif isNetworkError(err) {\n\t\tclient.connected = false\n\t}\n\n\treturn nil, err\n}\n\n\/\/ Stringer provides a canonicalized representation of node.\nfunc (client *peerRESTClient) String() string {\n\treturn client.host.String()\n}\n\n\/\/ IsOnline - returns whether RPC client failed to connect or not.\nfunc (client *peerRESTClient) IsOnline() bool {\n\treturn client.connected\n}\n\n\/\/ Close - marks the client as closed.\nfunc (client *peerRESTClient) Close() error {\n\tclient.connected = false\n\tclient.restClient.Close()\n\treturn nil\n}\n\n\/\/ GetLocksResp stores various info from the client for each lock that is requested.\ntype GetLocksResp map[string][]lockRequesterInfo\n\n\/\/ GetLocks - fetch older locks for a remote node.\nfunc (client *peerRESTClient) GetLocks() (locks GetLocksResp, err error) {\n\trespBody, err := client.call(peerRESTMethodGetLocks, nil, nil, -1)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer http.DrainBody(respBody)\n\terr = gob.NewDecoder(respBody).Decode(&locks)\n\treturn locks, err\n}\n\n\/\/ ServerInfo - fetch server information for a remote node.\nfunc (client *peerRESTClient) ServerInfo() (info ServerInfoData, err error) {\n\trespBody, err := client.call(peerRESTMethodServerInfo, nil, nil, -1)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer http.DrainBody(respBody)\n\terr = gob.NewDecoder(respBody).Decode(&info)\n\treturn info, err\n}\n\n\/\/ CPULoadInfo - fetch CPU information for a remote node.\nfunc (client *peerRESTClient) CPULoadInfo() (info ServerCPULoadInfo, err error) {\n\trespBody, err := client.call(peerRESTMethodCPULoadInfo, nil, nil, -1)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer http.DrainBody(respBody)\n\terr = gob.NewDecoder(respBody).Decode(&info)\n\treturn info, err\n}\n\n\/\/ DrivePerfInfo - fetch Drive performance information for a remote node.\nfunc (client *peerRESTClient) DrivePerfInfo() (info ServerDrivesPerfInfo, err error) {\n\trespBody, err := client.call(peerRESTMethodDrivePerfInfo, nil, nil, -1)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer http.DrainBody(respBody)\n\terr = gob.NewDecoder(respBody).Decode(&info)\n\treturn info, err\n}\n\n\/\/ MemUsageInfo - fetch memory usage information for a remote node.\nfunc (client *peerRESTClient) MemUsageInfo() (info ServerMemUsageInfo, err error) {\n\trespBody, err := client.call(peerRESTMethodMemUsageInfo, nil, nil, -1)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer http.DrainBody(respBody)\n\terr = gob.NewDecoder(respBody).Decode(&info)\n\treturn info, err\n}\n\n\/\/ StartProfiling - Issues profiling command on the peer node.\nfunc (client *peerRESTClient) StartProfiling(profiler string) error {\n\tvalues := make(url.Values)\n\tvalues.Set(peerRESTProfiler, profiler)\n\trespBody, err := client.call(peerRESTMethodStartProfiling, values, nil, -1)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer http.DrainBody(respBody)\n\treturn nil\n}\n\n\/\/ DownloadProfileData - download profiled data from a remote node.\nfunc (client *peerRESTClient) DownloadProfileData() (data []byte, err error) {\n\trespBody, err := client.call(peerRESTMethodDownloadProfilingData, nil, nil, -1)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer http.DrainBody(respBody)\n\terr = gob.NewDecoder(respBody).Decode(&data)\n\treturn data, err\n}\n\n\/\/ DeleteBucket - Delete notification and policies related to the bucket.\nfunc (client *peerRESTClient) DeleteBucket(bucket string) error {\n\tvalues := make(url.Values)\n\tvalues.Set(peerRESTBucket, bucket)\n\trespBody, err := client.call(peerRESTMethodDeleteBucket, values, nil, -1)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer http.DrainBody(respBody)\n\treturn nil\n}\n\n\/\/ ReloadFormat - reload format on the peer node.\nfunc (client *peerRESTClient) ReloadFormat(dryRun bool) error {\n\tvalues := make(url.Values)\n\tif dryRun {\n\t\tvalues.Set(peerRESTDryRun, \"true\")\n\t} else {\n\t\tvalues.Set(peerRESTDryRun, \"false\")\n\t}\n\n\trespBody, err := client.call(peerRESTMethodReloadFormat, values, nil, -1)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer http.DrainBody(respBody)\n\treturn nil\n}\n\n\/\/ ListenBucketNotification - send listent bucket notification to peer nodes.\nfunc (client *peerRESTClient) ListenBucketNotification(bucket string, eventNames []event.Name,\n\tpattern string, targetID event.TargetID, addr xnet.Host) error {\n\targs := listenBucketNotificationReq{\n\t\tEventNames: eventNames,\n\t\tPattern: pattern,\n\t\tTargetID: targetID,\n\t\tAddr: addr,\n\t}\n\n\tvalues := make(url.Values)\n\tvalues.Set(peerRESTBucket, bucket)\n\n\tvar reader bytes.Buffer\n\terr := gob.NewEncoder(&reader).Encode(args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trespBody, err := client.call(peerRESTMethodBucketNotificationListen, values, &reader, -1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer http.DrainBody(respBody)\n\treturn nil\n}\n\n\/\/ SendEvent - calls send event RPC.\nfunc (client *peerRESTClient) SendEvent(bucket string, targetID, remoteTargetID event.TargetID, eventData event.Event) error {\n\targs := sendEventRequest{\n\t\tTargetID: remoteTargetID,\n\t\tEvent: eventData,\n\t}\n\n\tvalues := make(url.Values)\n\tvalues.Set(peerRESTBucket, bucket)\n\n\tvar reader bytes.Buffer\n\terr := gob.NewEncoder(&reader).Encode(args)\n\tif err != nil {\n\t\treturn err\n\t}\n\trespBody, err := client.call(peerRESTMethodSendEvent, values, &reader, -1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar eventResp sendEventResp\n\tdefer http.DrainBody(respBody)\n\terr = gob.NewDecoder(respBody).Decode(&eventResp)\n\n\tif err != nil || !eventResp.Success {\n\t\treqInfo := &logger.ReqInfo{BucketName: bucket}\n\t\treqInfo.AppendTags(\"targetID\", targetID.Name)\n\t\treqInfo.AppendTags(\"event\", eventData.EventName.String())\n\t\tctx := logger.SetReqInfo(context.Background(), reqInfo)\n\t\tlogger.LogIf(ctx, err)\n\t\tglobalNotificationSys.RemoveRemoteTarget(bucket, targetID)\n\t}\n\n\treturn err\n}\n\n\/\/ RemoteTargetExist - calls remote target ID exist REST API.\nfunc (client *peerRESTClient) RemoteTargetExist(bucket string, targetID event.TargetID) (bool, error) {\n\tvalues := make(url.Values)\n\tvalues.Set(peerRESTBucket, bucket)\n\n\tvar reader bytes.Buffer\n\terr := gob.NewEncoder(&reader).Encode(targetID)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\trespBody, err := client.call(peerRESTMethodTargetExists, values, &reader, -1)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer http.DrainBody(respBody)\n\tvar targetExists remoteTargetExistsResp\n\terr = gob.NewDecoder(respBody).Decode(&targetExists)\n\treturn targetExists.Exists, err\n}\n\n\/\/ RemoveBucketPolicy - Remove bucket policy on the peer node.\nfunc (client *peerRESTClient) RemoveBucketPolicy(bucket string) error {\n\tvalues := make(url.Values)\n\tvalues.Set(peerRESTBucket, bucket)\n\trespBody, err := client.call(peerRESTMethodBucketPolicyRemove, values, nil, -1)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer http.DrainBody(respBody)\n\treturn nil\n}\n\n\/\/ SetBucketPolicy - Set bucket policy on the peer node.\nfunc (client *peerRESTClient) SetBucketPolicy(bucket string, bucketPolicy *policy.Policy) error {\n\tvalues := make(url.Values)\n\tvalues.Set(peerRESTBucket, bucket)\n\n\tvar reader bytes.Buffer\n\terr := gob.NewEncoder(&reader).Encode(bucketPolicy)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trespBody, err := client.call(peerRESTMethodBucketPolicySet, values, &reader, -1)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer http.DrainBody(respBody)\n\treturn nil\n}\n\n\/\/ PutBucketNotification - Put bucket notification on the peer node.\nfunc (client *peerRESTClient) PutBucketNotification(bucket string, rulesMap event.RulesMap) error {\n\tvalues := make(url.Values)\n\tvalues.Set(peerRESTBucket, bucket)\n\n\tvar reader bytes.Buffer\n\terr := gob.NewEncoder(&reader).Encode(&rulesMap)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trespBody, err := client.call(peerRESTMethodBucketNotificationPut, values, &reader, -1)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer http.DrainBody(respBody)\n\treturn nil\n}\n\n\/\/ LoadUsers - send load users command to peer nodes.\nfunc (client *peerRESTClient) LoadUsers() (err error) {\n\trespBody, err := client.call(peerRESTMethodLoadUsers, nil, nil, -1)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer http.DrainBody(respBody)\n\treturn nil\n}\n\n\/\/ LoadCredentials - send load credentials command to peer nodes.\nfunc (client *peerRESTClient) LoadCredentials() (err error) {\n\trespBody, err := client.call(peerRESTMethodLoadCredentials, nil, nil, -1)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer http.DrainBody(respBody)\n\treturn nil\n}\n\n\/\/ SignalService - sends signal to peer nodes.\nfunc (client *peerRESTClient) SignalService(sig serviceSignal) error {\n\tvalues := make(url.Values)\n\tvalues.Set(peerRESTSignal, string(sig))\n\trespBody, err := client.call(peerRESTMethodSignalService, values, nil, -1)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer http.DrainBody(respBody)\n\treturn nil\n}\n\nfunc getRemoteHosts(endpoints EndpointList) []*xnet.Host {\n\tvar remoteHosts []*xnet.Host\n\tfor _, hostStr := range GetRemotePeers(endpoints) {\n\t\thost, err := xnet.ParseHost(hostStr)\n\t\tlogger.FatalIf(err, \"Unable to parse peer Host\")\n\t\tremoteHosts = append(remoteHosts, host)\n\t}\n\n\treturn remoteHosts\n}\n\nfunc getRestClients(peerHosts []*xnet.Host) ([]*peerRESTClient, error) {\n\trestClients := make([]*peerRESTClient, len(peerHosts))\n\tfor i, host := range peerHosts {\n\t\tclient, err := newPeerRESTClient(host)\n\t\tif err != nil {\n\t\t\tlogger.LogIf(context.Background(), err)\n\t\t}\n\t\trestClients[i] = client\n\t}\n\n\treturn restClients, nil\n}\n\n\/\/ Returns a peer rest client.\nfunc newPeerRESTClient(peer *xnet.Host) (*peerRESTClient, error) {\n\n\tscheme := \"http\"\n\tif globalIsSSL {\n\t\tscheme = \"https\"\n\t}\n\n\tserverURL := &url.URL{\n\t\tScheme: scheme,\n\t\tHost: peer.String(),\n\t\tPath: peerRESTPath,\n\t}\n\n\tvar tlsConfig *tls.Config\n\tif globalIsSSL {\n\t\ttlsConfig = &tls.Config{\n\t\t\tServerName: peer.String(),\n\t\t\tRootCAs: globalRootCAs,\n\t\t}\n\t}\n\n\trestClient, err := rest.NewClient(serverURL, tlsConfig, rest.DefaultRESTTimeout, newAuthToken)\n\n\tif err != nil {\n\t\treturn &peerRESTClient{host: peer, restClient: restClient, connected: false}, err\n\t}\n\n\treturn &peerRESTClient{host: peer, restClient: restClient, connected: true}, nil\n}\n<commit_msg>Fix regression in peer clients in TLS setups (#7391)<commit_after>\/*\n * Minio Cloud Storage, (C) 2019 Minio, Inc.\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage cmd\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"encoding\/gob\"\n\t\"io\"\n\t\"net\/url\"\n\n\t\"github.com\/minio\/minio\/cmd\/http\"\n\t\"github.com\/minio\/minio\/cmd\/logger\"\n\t\"github.com\/minio\/minio\/cmd\/rest\"\n\t\"github.com\/minio\/minio\/pkg\/event\"\n\txnet \"github.com\/minio\/minio\/pkg\/net\"\n\t\"github.com\/minio\/minio\/pkg\/policy\"\n)\n\n\/\/ client to talk to peer Nodes.\ntype peerRESTClient struct {\n\thost *xnet.Host\n\trestClient *rest.Client\n\tconnected bool\n}\n\n\/\/ Reconnect to a peer rest server.\nfunc (client *peerRESTClient) reConnect() error {\n\t\/\/ correct (intelligent) retry logic will be\n\t\/\/ implemented in subsequent PRs.\n\tclient.connected = true\n\treturn nil\n}\n\n\/\/ Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected\n\/\/ permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()\n\/\/ after verifying format.json\nfunc (client *peerRESTClient) call(method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {\n\tif !client.connected {\n\t\terr := client.reConnect()\n\t\tlogger.LogIf(context.Background(), err)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif values == nil {\n\t\tvalues = make(url.Values)\n\t}\n\n\trespBody, err = client.restClient.Call(method, values, body, length)\n\tif err == nil {\n\t\treturn respBody, nil\n\t}\n\n\tif isNetworkError(err) {\n\t\tclient.connected = false\n\t}\n\n\treturn nil, err\n}\n\n\/\/ Stringer provides a canonicalized representation of node.\nfunc (client *peerRESTClient) String() string {\n\treturn client.host.String()\n}\n\n\/\/ IsOnline - returns whether RPC client failed to connect or not.\nfunc (client *peerRESTClient) IsOnline() bool {\n\treturn client.connected\n}\n\n\/\/ Close - marks the client as closed.\nfunc (client *peerRESTClient) Close() error {\n\tclient.connected = false\n\tclient.restClient.Close()\n\treturn nil\n}\n\n\/\/ GetLocksResp stores various info from the client for each lock that is requested.\ntype GetLocksResp map[string][]lockRequesterInfo\n\n\/\/ GetLocks - fetch older locks for a remote node.\nfunc (client *peerRESTClient) GetLocks() (locks GetLocksResp, err error) {\n\trespBody, err := client.call(peerRESTMethodGetLocks, nil, nil, -1)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer http.DrainBody(respBody)\n\terr = gob.NewDecoder(respBody).Decode(&locks)\n\treturn locks, err\n}\n\n\/\/ ServerInfo - fetch server information for a remote node.\nfunc (client *peerRESTClient) ServerInfo() (info ServerInfoData, err error) {\n\trespBody, err := client.call(peerRESTMethodServerInfo, nil, nil, -1)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer http.DrainBody(respBody)\n\terr = gob.NewDecoder(respBody).Decode(&info)\n\treturn info, err\n}\n\n\/\/ CPULoadInfo - fetch CPU information for a remote node.\nfunc (client *peerRESTClient) CPULoadInfo() (info ServerCPULoadInfo, err error) {\n\trespBody, err := client.call(peerRESTMethodCPULoadInfo, nil, nil, -1)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer http.DrainBody(respBody)\n\terr = gob.NewDecoder(respBody).Decode(&info)\n\treturn info, err\n}\n\n\/\/ DrivePerfInfo - fetch Drive performance information for a remote node.\nfunc (client *peerRESTClient) DrivePerfInfo() (info ServerDrivesPerfInfo, err error) {\n\trespBody, err := client.call(peerRESTMethodDrivePerfInfo, nil, nil, -1)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer http.DrainBody(respBody)\n\terr = gob.NewDecoder(respBody).Decode(&info)\n\treturn info, err\n}\n\n\/\/ MemUsageInfo - fetch memory usage information for a remote node.\nfunc (client *peerRESTClient) MemUsageInfo() (info ServerMemUsageInfo, err error) {\n\trespBody, err := client.call(peerRESTMethodMemUsageInfo, nil, nil, -1)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer http.DrainBody(respBody)\n\terr = gob.NewDecoder(respBody).Decode(&info)\n\treturn info, err\n}\n\n\/\/ StartProfiling - Issues profiling command on the peer node.\nfunc (client *peerRESTClient) StartProfiling(profiler string) error {\n\tvalues := make(url.Values)\n\tvalues.Set(peerRESTProfiler, profiler)\n\trespBody, err := client.call(peerRESTMethodStartProfiling, values, nil, -1)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer http.DrainBody(respBody)\n\treturn nil\n}\n\n\/\/ DownloadProfileData - download profiled data from a remote node.\nfunc (client *peerRESTClient) DownloadProfileData() (data []byte, err error) {\n\trespBody, err := client.call(peerRESTMethodDownloadProfilingData, nil, nil, -1)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer http.DrainBody(respBody)\n\terr = gob.NewDecoder(respBody).Decode(&data)\n\treturn data, err\n}\n\n\/\/ DeleteBucket - Delete notification and policies related to the bucket.\nfunc (client *peerRESTClient) DeleteBucket(bucket string) error {\n\tvalues := make(url.Values)\n\tvalues.Set(peerRESTBucket, bucket)\n\trespBody, err := client.call(peerRESTMethodDeleteBucket, values, nil, -1)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer http.DrainBody(respBody)\n\treturn nil\n}\n\n\/\/ ReloadFormat - reload format on the peer node.\nfunc (client *peerRESTClient) ReloadFormat(dryRun bool) error {\n\tvalues := make(url.Values)\n\tif dryRun {\n\t\tvalues.Set(peerRESTDryRun, \"true\")\n\t} else {\n\t\tvalues.Set(peerRESTDryRun, \"false\")\n\t}\n\n\trespBody, err := client.call(peerRESTMethodReloadFormat, values, nil, -1)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer http.DrainBody(respBody)\n\treturn nil\n}\n\n\/\/ ListenBucketNotification - send listent bucket notification to peer nodes.\nfunc (client *peerRESTClient) ListenBucketNotification(bucket string, eventNames []event.Name,\n\tpattern string, targetID event.TargetID, addr xnet.Host) error {\n\targs := listenBucketNotificationReq{\n\t\tEventNames: eventNames,\n\t\tPattern: pattern,\n\t\tTargetID: targetID,\n\t\tAddr: addr,\n\t}\n\n\tvalues := make(url.Values)\n\tvalues.Set(peerRESTBucket, bucket)\n\n\tvar reader bytes.Buffer\n\terr := gob.NewEncoder(&reader).Encode(args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trespBody, err := client.call(peerRESTMethodBucketNotificationListen, values, &reader, -1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer http.DrainBody(respBody)\n\treturn nil\n}\n\n\/\/ SendEvent - calls send event RPC.\nfunc (client *peerRESTClient) SendEvent(bucket string, targetID, remoteTargetID event.TargetID, eventData event.Event) error {\n\targs := sendEventRequest{\n\t\tTargetID: remoteTargetID,\n\t\tEvent: eventData,\n\t}\n\n\tvalues := make(url.Values)\n\tvalues.Set(peerRESTBucket, bucket)\n\n\tvar reader bytes.Buffer\n\terr := gob.NewEncoder(&reader).Encode(args)\n\tif err != nil {\n\t\treturn err\n\t}\n\trespBody, err := client.call(peerRESTMethodSendEvent, values, &reader, -1)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar eventResp sendEventResp\n\tdefer http.DrainBody(respBody)\n\terr = gob.NewDecoder(respBody).Decode(&eventResp)\n\n\tif err != nil || !eventResp.Success {\n\t\treqInfo := &logger.ReqInfo{BucketName: bucket}\n\t\treqInfo.AppendTags(\"targetID\", targetID.Name)\n\t\treqInfo.AppendTags(\"event\", eventData.EventName.String())\n\t\tctx := logger.SetReqInfo(context.Background(), reqInfo)\n\t\tlogger.LogIf(ctx, err)\n\t\tglobalNotificationSys.RemoveRemoteTarget(bucket, targetID)\n\t}\n\n\treturn err\n}\n\n\/\/ RemoteTargetExist - calls remote target ID exist REST API.\nfunc (client *peerRESTClient) RemoteTargetExist(bucket string, targetID event.TargetID) (bool, error) {\n\tvalues := make(url.Values)\n\tvalues.Set(peerRESTBucket, bucket)\n\n\tvar reader bytes.Buffer\n\terr := gob.NewEncoder(&reader).Encode(targetID)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\trespBody, err := client.call(peerRESTMethodTargetExists, values, &reader, -1)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer http.DrainBody(respBody)\n\tvar targetExists remoteTargetExistsResp\n\terr = gob.NewDecoder(respBody).Decode(&targetExists)\n\treturn targetExists.Exists, err\n}\n\n\/\/ RemoveBucketPolicy - Remove bucket policy on the peer node.\nfunc (client *peerRESTClient) RemoveBucketPolicy(bucket string) error {\n\tvalues := make(url.Values)\n\tvalues.Set(peerRESTBucket, bucket)\n\trespBody, err := client.call(peerRESTMethodBucketPolicyRemove, values, nil, -1)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer http.DrainBody(respBody)\n\treturn nil\n}\n\n\/\/ SetBucketPolicy - Set bucket policy on the peer node.\nfunc (client *peerRESTClient) SetBucketPolicy(bucket string, bucketPolicy *policy.Policy) error {\n\tvalues := make(url.Values)\n\tvalues.Set(peerRESTBucket, bucket)\n\n\tvar reader bytes.Buffer\n\terr := gob.NewEncoder(&reader).Encode(bucketPolicy)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trespBody, err := client.call(peerRESTMethodBucketPolicySet, values, &reader, -1)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer http.DrainBody(respBody)\n\treturn nil\n}\n\n\/\/ PutBucketNotification - Put bucket notification on the peer node.\nfunc (client *peerRESTClient) PutBucketNotification(bucket string, rulesMap event.RulesMap) error {\n\tvalues := make(url.Values)\n\tvalues.Set(peerRESTBucket, bucket)\n\n\tvar reader bytes.Buffer\n\terr := gob.NewEncoder(&reader).Encode(&rulesMap)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trespBody, err := client.call(peerRESTMethodBucketNotificationPut, values, &reader, -1)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer http.DrainBody(respBody)\n\treturn nil\n}\n\n\/\/ LoadUsers - send load users command to peer nodes.\nfunc (client *peerRESTClient) LoadUsers() (err error) {\n\trespBody, err := client.call(peerRESTMethodLoadUsers, nil, nil, -1)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer http.DrainBody(respBody)\n\treturn nil\n}\n\n\/\/ LoadCredentials - send load credentials command to peer nodes.\nfunc (client *peerRESTClient) LoadCredentials() (err error) {\n\trespBody, err := client.call(peerRESTMethodLoadCredentials, nil, nil, -1)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer http.DrainBody(respBody)\n\treturn nil\n}\n\n\/\/ SignalService - sends signal to peer nodes.\nfunc (client *peerRESTClient) SignalService(sig serviceSignal) error {\n\tvalues := make(url.Values)\n\tvalues.Set(peerRESTSignal, string(sig))\n\trespBody, err := client.call(peerRESTMethodSignalService, values, nil, -1)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer http.DrainBody(respBody)\n\treturn nil\n}\n\nfunc getRemoteHosts(endpoints EndpointList) []*xnet.Host {\n\tvar remoteHosts []*xnet.Host\n\tfor _, hostStr := range GetRemotePeers(endpoints) {\n\t\thost, err := xnet.ParseHost(hostStr)\n\t\tlogger.FatalIf(err, \"Unable to parse peer Host\")\n\t\tremoteHosts = append(remoteHosts, host)\n\t}\n\n\treturn remoteHosts\n}\n\nfunc getRestClients(peerHosts []*xnet.Host) ([]*peerRESTClient, error) {\n\trestClients := make([]*peerRESTClient, len(peerHosts))\n\tfor i, host := range peerHosts {\n\t\tclient, err := newPeerRESTClient(host)\n\t\tif err != nil {\n\t\t\tlogger.LogIf(context.Background(), err)\n\t\t}\n\t\trestClients[i] = client\n\t}\n\n\treturn restClients, nil\n}\n\n\/\/ Returns a peer rest client.\nfunc newPeerRESTClient(peer *xnet.Host) (*peerRESTClient, error) {\n\n\tscheme := \"http\"\n\tif globalIsSSL {\n\t\tscheme = \"https\"\n\t}\n\n\tserverURL := &url.URL{\n\t\tScheme: scheme,\n\t\tHost: peer.String(),\n\t\tPath: peerRESTPath,\n\t}\n\n\tvar tlsConfig *tls.Config\n\tif globalIsSSL {\n\t\ttlsConfig = &tls.Config{\n\t\t\tServerName: peer.Name,\n\t\t\tRootCAs: globalRootCAs,\n\t\t}\n\t}\n\n\trestClient, err := rest.NewClient(serverURL, tlsConfig, rest.DefaultRESTTimeout, newAuthToken)\n\n\tif err != nil {\n\t\treturn &peerRESTClient{host: peer, restClient: restClient, connected: false}, err\n\t}\n\n\treturn &peerRESTClient{host: peer, restClient: restClient, connected: true}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/akhenakh\/regionagogo\"\n\t\"github.com\/akhenakh\/regionagogo\/db\/boltdb\"\n\tpb \"github.com\/akhenakh\/regionagogo\/regionagogosvc\"\n\t\"google.golang.org\/grpc\"\n)\n\ntype server struct {\n\tregionagogo.GeoFenceDB\n}\n\nfunc (s *server) GetRegion(ctx context.Context, p *pb.Point) (*pb.RegionResponse, error) {\n\tregion, err := s.StubbingQuery(float64(p.Latitude), float64(p.Longitude))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif region == nil || len(region) == 0 {\n\t\treturn &pb.RegionResponse{Code: \"unknown\"}, nil\n\t}\n\n\t\/\/ default is to lookup for \"iso\"\n\tiso, ok := region[0].Data[\"iso\"]\n\tif !ok {\n\t\treturn &pb.RegionResponse{Code: \"unknown\"}, nil\n\t}\n\n\trs := pb.RegionResponse{Code: iso}\n\treturn &rs, nil\n}\n\n\/\/ queryHandler takes a lat & lng query params and return a JSON\n\/\/ with the country of the coordinate\nfunc (s *server) queryHandler(w http.ResponseWriter, r *http.Request) {\n\tquery := r.URL.Query()\n\tslat := query.Get(\"lat\")\n\tlat, err := strconv.ParseFloat(slat, 64)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 400)\n\t\treturn\n\t}\n\tslng := query.Get(\"lng\")\n\tlng, err := strconv.ParseFloat(slng, 64)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 400)\n\t\treturn\n\t}\n\n\tfences, err := s.StubbingQuery(lat, lng)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\n\tif len(fences) < 1 {\n\t\tjs, _ := json.Marshal(map[string]string{\"name\": \"unknown\"})\n\t\tw.Write(js)\n\t\treturn\n\t}\n\n\tjs, _ := json.Marshal(fences[0].Data)\n\tw.Write(js)\n}\n\nfunc main() {\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n\n\tdbpath := flag.String(\"dbpath\", \"\", \"Database path\")\n\tdebug := flag.Bool(\"debug\", false, \"Enable debug\")\n\tcachedEntries := flag.Uint(\"cachedEntries\", 0, \"Region Cache size, 0 for disabled\")\n\n\tflag.Parse()\n\topts := []boltdb.GeoFenceBoltDBOption{\n\t\tboltdb.WithCachedEntries(*cachedEntries),\n\t\tboltdb.WithDebug(*debug),\n\t}\n\tgs, err := boltdb.NewGeoFenceBoltDB(*dbpath, opts...)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ts := &server{GeoFenceDB: gs}\n\thttp.HandleFunc(\"\/query\", s.queryHandler)\n\tgo func() {\n\t\tlog.Println(http.ListenAndServe(\":8082\", nil))\n\t}()\n\n\tlis, err := net.Listen(\"tcp\", \":8083\")\n\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\n\tgrpcServer := grpc.NewServer()\n\tpb.RegisterRegionAGogoServer(grpcServer, s)\n\tgrpcServer.Serve(lis)\n}\n<commit_msg>optionnal ports<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/akhenakh\/regionagogo\"\n\t\"github.com\/akhenakh\/regionagogo\/db\/boltdb\"\n\tpb \"github.com\/akhenakh\/regionagogo\/regionagogosvc\"\n\t\"google.golang.org\/grpc\"\n)\n\ntype server struct {\n\tregionagogo.GeoFenceDB\n}\n\nfunc (s *server) GetRegion(ctx context.Context, p *pb.Point) (*pb.RegionResponse, error) {\n\tregion, err := s.StubbingQuery(float64(p.Latitude), float64(p.Longitude))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif region == nil || len(region) == 0 {\n\t\treturn &pb.RegionResponse{Code: \"unknown\"}, nil\n\t}\n\n\t\/\/ default is to lookup for \"iso\"\n\tiso, ok := region[0].Data[\"iso\"]\n\tif !ok {\n\t\treturn &pb.RegionResponse{Code: \"unknown\"}, nil\n\t}\n\n\trs := pb.RegionResponse{Code: iso}\n\treturn &rs, nil\n}\n\n\/\/ queryHandler takes a lat & lng query params and return a JSON\n\/\/ with the country of the coordinate\nfunc (s *server) queryHandler(w http.ResponseWriter, r *http.Request) {\n\tquery := r.URL.Query()\n\tslat := query.Get(\"lat\")\n\tlat, err := strconv.ParseFloat(slat, 64)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 400)\n\t\treturn\n\t}\n\tslng := query.Get(\"lng\")\n\tlng, err := strconv.ParseFloat(slng, 64)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 400)\n\t\treturn\n\t}\n\n\tfences, err := s.StubbingQuery(lat, lng)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\n\tif len(fences) < 1 {\n\t\tjs, _ := json.Marshal(map[string]string{\"name\": \"unknown\"})\n\t\tw.Write(js)\n\t\treturn\n\t}\n\n\tjs, _ := json.Marshal(fences[0].Data)\n\tw.Write(js)\n}\n\nfunc main() {\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n\n\tdbpath := flag.String(\"dbpath\", \"\", \"Database path\")\n\tdebug := flag.Bool(\"debug\", false, \"Enable debug\")\n\thttpPort := flag.Int(\"httpPort\", 8082, \"http debug port to listen on\")\n\tgrpcPort := flag.Int(\"grpcPort\", 8083, \"grpc port to listen on\")\n\tcachedEntries := flag.Uint(\"cachedEntries\", 0, \"Region Cache size, 0 for disabled\")\n\n\tflag.Parse()\n\topts := []boltdb.GeoFenceBoltDBOption{\n\t\tboltdb.WithCachedEntries(*cachedEntries),\n\t\tboltdb.WithDebug(*debug),\n\t}\n\tgs, err := boltdb.NewGeoFenceBoltDB(*dbpath, opts...)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ts := &server{GeoFenceDB: gs}\n\thttp.HandleFunc(\"\/query\", s.queryHandler)\n\tgo func() {\n\t\tlog.Println(http.ListenAndServe(fmt.Sprintf(\":%d\", *httpPort), nil))\n\t}()\n\n\tlis, err := net.Listen(\"tcp\", fmt.Sprintf(\":%d\", *grpcPort))\n\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to listen: %v\", err)\n\t}\n\n\tgrpcServer := grpc.NewServer()\n\tpb.RegisterRegionAGogoServer(grpcServer, s)\n\tgrpcServer.Serve(lis)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\n\t\"github.com\/restic\/restic\/internal\/backend\"\n\t\"github.com\/restic\/restic\/internal\/restic\"\n\t\"github.com\/restic\/restic\/internal\/walker\"\n\n\t\"github.com\/minio\/sha256-simd\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar cmdStats = &cobra.Command{\n\tUse: \"stats [flags] [snapshot ID] [...]\",\n\tShort: \"Scan the repository and show basic statistics\",\n\tLong: `\nThe \"stats\" command walks one or multiple snapshots in a repository\nand accumulates statistics about the data stored therein. It reports \non the number of unique files and their sizes, according to one of\nthe counting modes as given by the --mode flag.\n\nIt operates on all snapshots matching the selection criteria or all\nsnapshots if nothing is specified. The special snapshot ID \"latest\"\nis also supported. Some modes make more sense over \njust a single snapshot, while others are useful across all snapshots,\ndepending on what you are trying to calculate.\n\nThe modes are:\n\n* restore-size: (default) Counts the size of the restored files.\n* files-by-contents: Counts total size of files, where a file is\n considered unique if it has unique contents.\n* raw-data: Counts the size of blobs in the repository, regardless of\n how many files reference them.\n* blobs-per-file: A combination of files-by-contents and raw-data.\n\nRefer to the online manual for more details about each mode.\n\nEXIT STATUS\n===========\n\nExit status is 0 if the command was successful, and non-zero if there was any error.\n`,\n\tDisableAutoGenTag: true,\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\treturn runStats(globalOptions, args)\n\t},\n}\n\n\/\/ StatsOptions collects all options for the stats command.\ntype StatsOptions struct {\n\t\/\/ the mode of counting to perform (see consts for available modes)\n\tcountMode string\n\n\t\/\/ filter snapshots by, if given by user\n\tHosts []string\n\tTags restic.TagLists\n\tPaths []string\n}\n\nvar statsOptions StatsOptions\n\nfunc init() {\n\tcmdRoot.AddCommand(cmdStats)\n\tf := cmdStats.Flags()\n\tf.StringVar(&statsOptions.countMode, \"mode\", countModeRestoreSize, \"counting mode: restore-size (default), files-by-contents, blobs-per-file or raw-data\")\n\tf.StringArrayVarP(&statsOptions.Hosts, \"host\", \"H\", nil, \"only consider snapshots with the given `host` (can be specified multiple times)\")\n\tf.Var(&statsOptions.Tags, \"tag\", \"only consider snapshots which include this `taglist` in the format `tag[,tag,...]` (can be specified multiple times)\")\n\tf.StringArrayVar(&statsOptions.Paths, \"path\", nil, \"only consider snapshots which include this (absolute) `path` (can be specified multiple times)\")\n}\n\nfunc runStats(gopts GlobalOptions, args []string) error {\n\terr := verifyStatsInput(gopts, args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx, cancel := context.WithCancel(gopts.ctx)\n\tdefer cancel()\n\n\trepo, err := OpenRepository(gopts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !gopts.NoLock {\n\t\tlock, err := lockRepo(ctx, repo)\n\t\tdefer unlockRepo(lock)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tsnapshotLister, err := backend.MemorizeList(gopts.ctx, repo.Backend(), restic.SnapshotFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = repo.LoadIndex(ctx); err != nil {\n\t\treturn err\n\t}\n\n\tif !gopts.JSON {\n\t\tPrintf(\"scanning...\\n\")\n\t}\n\n\t\/\/ create a container for the stats (and other needed state)\n\tstats := &statsContainer{\n\t\tuniqueFiles: make(map[fileID]struct{}),\n\t\tfileBlobs: make(map[string]restic.IDSet),\n\t\tblobs: restic.NewBlobSet(),\n\t\tSnapshotsCount: 0,\n\t}\n\n\tfor sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, statsOptions.Hosts, statsOptions.Tags, statsOptions.Paths, args) {\n\t\terr = statsWalkSnapshot(ctx, sn, repo, stats)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error walking snapshot: %v\", err)\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif statsOptions.countMode == countModeRawData {\n\t\t\/\/ the blob handles have been collected, but not yet counted\n\t\tfor blobHandle := range stats.blobs {\n\t\t\tpbs := repo.Index().Lookup(blobHandle)\n\t\t\tif len(pbs) == 0 {\n\t\t\t\treturn fmt.Errorf(\"blob %v not found\", blobHandle)\n\t\t\t}\n\t\t\tstats.TotalSize += uint64(pbs[0].Length)\n\t\t\tstats.TotalBlobCount++\n\t\t}\n\t}\n\n\tif gopts.JSON {\n\t\terr = json.NewEncoder(globalOptions.stdout).Encode(stats)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"encoding output: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\n\tPrintf(\"Stats in %s mode:\\n\", statsOptions.countMode)\n\tPrintf(\"Snapshots processed: %d\\n\", stats.SnapshotsCount)\n\n\tif stats.TotalBlobCount > 0 {\n\t\tPrintf(\" Total Blob Count: %d\\n\", stats.TotalBlobCount)\n\t}\n\tif stats.TotalFileCount > 0 {\n\t\tPrintf(\" Total File Count: %d\\n\", stats.TotalFileCount)\n\t}\n\tPrintf(\" Total Size: %-5s\\n\", formatBytes(stats.TotalSize))\n\n\treturn nil\n}\n\nfunc statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo restic.Repository, stats *statsContainer) error {\n\tif snapshot.Tree == nil {\n\t\treturn fmt.Errorf(\"snapshot %s has nil tree\", snapshot.ID().Str())\n\t}\n\n\tstats.SnapshotsCount++\n\n\tif statsOptions.countMode == countModeRawData {\n\t\t\/\/ count just the sizes of unique blobs; we don't need to walk the tree\n\t\t\/\/ ourselves in this case, since a nifty function does it for us\n\t\treturn restic.FindUsedBlobs(ctx, repo, restic.IDs{*snapshot.Tree}, stats.blobs, nil)\n\t}\n\n\tuniqueInodes := make(map[uint64]struct{})\n\terr := walker.Walk(ctx, repo, *snapshot.Tree, restic.NewIDSet(), statsWalkTree(repo, stats, uniqueInodes))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"walking tree %s: %v\", *snapshot.Tree, err)\n\t}\n\n\treturn nil\n}\n\nfunc statsWalkTree(repo restic.Repository, stats *statsContainer, uniqueInodes map[uint64]struct{}) walker.WalkFunc {\n\treturn func(parentTreeID restic.ID, npath string, node *restic.Node, nodeErr error) (bool, error) {\n\t\tif nodeErr != nil {\n\t\t\treturn true, nodeErr\n\t\t}\n\t\tif node == nil {\n\t\t\treturn true, nil\n\t\t}\n\n\t\tif statsOptions.countMode == countModeUniqueFilesByContents || statsOptions.countMode == countModeBlobsPerFile {\n\t\t\t\/\/ only count this file if we haven't visited it before\n\t\t\tfid := makeFileIDByContents(node)\n\t\t\tif _, ok := stats.uniqueFiles[fid]; !ok {\n\t\t\t\t\/\/ mark the file as visited\n\t\t\t\tstats.uniqueFiles[fid] = struct{}{}\n\n\t\t\t\tif statsOptions.countMode == countModeUniqueFilesByContents {\n\t\t\t\t\t\/\/ simply count the size of each unique file (unique by contents only)\n\t\t\t\t\tstats.TotalSize += node.Size\n\t\t\t\t\tstats.TotalFileCount++\n\t\t\t\t}\n\t\t\t\tif statsOptions.countMode == countModeBlobsPerFile {\n\t\t\t\t\t\/\/ count the size of each unique blob reference, which is\n\t\t\t\t\t\/\/ by unique file (unique by contents and file path)\n\t\t\t\t\tfor _, blobID := range node.Content {\n\t\t\t\t\t\t\/\/ ensure we have this file (by path) in our map; in this\n\t\t\t\t\t\t\/\/ mode, a file is unique by both contents and path\n\t\t\t\t\t\tnodePath := filepath.Join(npath, node.Name)\n\t\t\t\t\t\tif _, ok := stats.fileBlobs[nodePath]; !ok {\n\t\t\t\t\t\t\tstats.fileBlobs[nodePath] = restic.NewIDSet()\n\t\t\t\t\t\t\tstats.TotalFileCount++\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif _, ok := stats.fileBlobs[nodePath][blobID]; !ok {\n\t\t\t\t\t\t\t\/\/ is always a data blob since we're accessing it via a file's Content array\n\t\t\t\t\t\t\tblobSize, found := repo.LookupBlobSize(blobID, restic.DataBlob)\n\t\t\t\t\t\t\tif !found {\n\t\t\t\t\t\t\t\treturn true, fmt.Errorf(\"blob %s not found for tree %s\", blobID, parentTreeID)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\/\/ count the blob's size, then add this blob by this\n\t\t\t\t\t\t\t\/\/ file (path) so we don't double-count it\n\t\t\t\t\t\t\tstats.TotalSize += uint64(blobSize)\n\t\t\t\t\t\t\tstats.fileBlobs[nodePath].Insert(blobID)\n\t\t\t\t\t\t\t\/\/ this mode also counts total unique blob _references_ per file\n\t\t\t\t\t\t\tstats.TotalBlobCount++\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif statsOptions.countMode == countModeRestoreSize {\n\t\t\t\/\/ as this is a file in the snapshot, we can simply count its\n\t\t\t\/\/ size without worrying about uniqueness, since duplicate files\n\t\t\t\/\/ will still be restored\n\t\t\tstats.TotalFileCount++\n\n\t\t\t\/\/ if inodes are present, only count each inode once\n\t\t\t\/\/ (hard links do not increase restore size)\n\t\t\tif _, ok := uniqueInodes[node.Inode]; !ok || node.Inode == 0 {\n\t\t\t\tuniqueInodes[node.Inode] = struct{}{}\n\t\t\t\tstats.TotalSize += node.Size\n\t\t\t}\n\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn true, nil\n\t}\n}\n\n\/\/ makeFileIDByContents returns a hash of the blob IDs of the\n\/\/ node's Content in sequence.\nfunc makeFileIDByContents(node *restic.Node) fileID {\n\tvar bb []byte\n\tfor _, c := range node.Content {\n\t\tbb = append(bb, []byte(c[:])...)\n\t}\n\treturn sha256.Sum256(bb)\n}\n\nfunc verifyStatsInput(gopts GlobalOptions, args []string) error {\n\t\/\/ require a recognized counting mode\n\tswitch statsOptions.countMode {\n\tcase countModeRestoreSize:\n\tcase countModeUniqueFilesByContents:\n\tcase countModeBlobsPerFile:\n\tcase countModeRawData:\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown counting mode: %s (use the -h flag to get a list of supported modes)\", statsOptions.countMode)\n\t}\n\n\treturn nil\n}\n\n\/\/ statsContainer holds information during a walk of a repository\n\/\/ to collect information about it, as well as state needed\n\/\/ for a successful and efficient walk.\ntype statsContainer struct {\n\tTotalSize uint64 `json:\"total_size\"`\n\tTotalFileCount uint64 `json:\"total_file_count\"`\n\tTotalBlobCount uint64 `json:\"total_blob_count,omitempty\"`\n\t\/\/ holds count of all considered snapshots\n\tSnapshotsCount int `json:\"snapshots_count\"`\n\n\t\/\/ uniqueFiles marks visited files according to their\n\t\/\/ contents (hashed sequence of content blob IDs)\n\tuniqueFiles map[fileID]struct{}\n\n\t\/\/ fileBlobs maps a file name (path) to the set of\n\t\/\/ blobs that have been seen as a part of the file\n\tfileBlobs map[string]restic.IDSet\n\n\t\/\/ blobs is used to count individual unique blobs,\n\t\/\/ independent of references to files\n\tblobs restic.BlobSet\n}\n\n\/\/ fileID is a 256-bit hash that distinguishes unique files.\ntype fileID [32]byte\n\nconst (\n\tcountModeRestoreSize = \"restore-size\"\n\tcountModeUniqueFilesByContents = \"files-by-contents\"\n\tcountModeBlobsPerFile = \"blobs-per-file\"\n\tcountModeRawData = \"raw-data\"\n)\n<commit_msg>restic stats: print uncompressed size in mode raw-data<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\n\t\"github.com\/restic\/restic\/internal\/backend\"\n\t\"github.com\/restic\/restic\/internal\/restic\"\n\t\"github.com\/restic\/restic\/internal\/walker\"\n\n\t\"github.com\/minio\/sha256-simd\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar cmdStats = &cobra.Command{\n\tUse: \"stats [flags] [snapshot ID] [...]\",\n\tShort: \"Scan the repository and show basic statistics\",\n\tLong: `\nThe \"stats\" command walks one or multiple snapshots in a repository\nand accumulates statistics about the data stored therein. It reports \non the number of unique files and their sizes, according to one of\nthe counting modes as given by the --mode flag.\n\nIt operates on all snapshots matching the selection criteria or all\nsnapshots if nothing is specified. The special snapshot ID \"latest\"\nis also supported. Some modes make more sense over \njust a single snapshot, while others are useful across all snapshots,\ndepending on what you are trying to calculate.\n\nThe modes are:\n\n* restore-size: (default) Counts the size of the restored files.\n* files-by-contents: Counts total size of files, where a file is\n considered unique if it has unique contents.\n* raw-data: Counts the size of blobs in the repository, regardless of\n how many files reference them.\n* blobs-per-file: A combination of files-by-contents and raw-data.\n\nRefer to the online manual for more details about each mode.\n\nEXIT STATUS\n===========\n\nExit status is 0 if the command was successful, and non-zero if there was any error.\n`,\n\tDisableAutoGenTag: true,\n\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\treturn runStats(globalOptions, args)\n\t},\n}\n\n\/\/ StatsOptions collects all options for the stats command.\ntype StatsOptions struct {\n\t\/\/ the mode of counting to perform (see consts for available modes)\n\tcountMode string\n\n\t\/\/ filter snapshots by, if given by user\n\tHosts []string\n\tTags restic.TagLists\n\tPaths []string\n}\n\nvar statsOptions StatsOptions\n\nfunc init() {\n\tcmdRoot.AddCommand(cmdStats)\n\tf := cmdStats.Flags()\n\tf.StringVar(&statsOptions.countMode, \"mode\", countModeRestoreSize, \"counting mode: restore-size (default), files-by-contents, blobs-per-file or raw-data\")\n\tf.StringArrayVarP(&statsOptions.Hosts, \"host\", \"H\", nil, \"only consider snapshots with the given `host` (can be specified multiple times)\")\n\tf.Var(&statsOptions.Tags, \"tag\", \"only consider snapshots which include this `taglist` in the format `tag[,tag,...]` (can be specified multiple times)\")\n\tf.StringArrayVar(&statsOptions.Paths, \"path\", nil, \"only consider snapshots which include this (absolute) `path` (can be specified multiple times)\")\n}\n\nfunc runStats(gopts GlobalOptions, args []string) error {\n\terr := verifyStatsInput(gopts, args)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx, cancel := context.WithCancel(gopts.ctx)\n\tdefer cancel()\n\n\trepo, err := OpenRepository(gopts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !gopts.NoLock {\n\t\tlock, err := lockRepo(ctx, repo)\n\t\tdefer unlockRepo(lock)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tsnapshotLister, err := backend.MemorizeList(gopts.ctx, repo.Backend(), restic.SnapshotFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = repo.LoadIndex(ctx); err != nil {\n\t\treturn err\n\t}\n\n\tif !gopts.JSON {\n\t\tPrintf(\"scanning...\\n\")\n\t}\n\n\t\/\/ create a container for the stats (and other needed state)\n\tstats := &statsContainer{\n\t\tuniqueFiles: make(map[fileID]struct{}),\n\t\tfileBlobs: make(map[string]restic.IDSet),\n\t\tblobs: restic.NewBlobSet(),\n\t\tSnapshotsCount: 0,\n\t}\n\n\tfor sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, statsOptions.Hosts, statsOptions.Tags, statsOptions.Paths, args) {\n\t\terr = statsWalkSnapshot(ctx, sn, repo, stats)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error walking snapshot: %v\", err)\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif statsOptions.countMode == countModeRawData {\n\t\t\/\/ the blob handles have been collected, but not yet counted\n\t\tfor blobHandle := range stats.blobs {\n\t\t\tpbs := repo.Index().Lookup(blobHandle)\n\t\t\tif len(pbs) == 0 {\n\t\t\t\treturn fmt.Errorf(\"blob %v not found\", blobHandle)\n\t\t\t}\n\t\t\tstats.TotalSize += uint64(pbs[0].Length)\n\t\t\tif repo.Config().Version >= 2 {\n\t\t\t\tif pbs[0].IsCompressed() {\n\t\t\t\t\tstats.TotalUncompressedSize += uint64(pbs[0].UncompressedLength)\n\t\t\t\t} else {\n\t\t\t\t\tstats.TotalUncompressedSize += uint64(pbs[0].Length)\n\t\t\t\t}\n\t\t\t}\n\t\t\tstats.TotalBlobCount++\n\t\t}\n\t}\n\n\tif gopts.JSON {\n\t\terr = json.NewEncoder(globalOptions.stdout).Encode(stats)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"encoding output: %v\", err)\n\t\t}\n\t\treturn nil\n\t}\n\n\tPrintf(\"Stats in %s mode:\\n\", statsOptions.countMode)\n\tPrintf(\" Snapshots processed: %d\\n\", stats.SnapshotsCount)\n\n\tif stats.TotalBlobCount > 0 {\n\t\tPrintf(\" Total Blob Count: %d\\n\", stats.TotalBlobCount)\n\t}\n\tif stats.TotalFileCount > 0 {\n\t\tPrintf(\" Total File Count: %d\\n\", stats.TotalFileCount)\n\t}\n\tif stats.TotalUncompressedSize > 0 {\n\t\tPrintf(\"Total Uncompressed Size: %-5s\\n\", formatBytes(stats.TotalUncompressedSize))\n\t}\n\tPrintf(\" Total Size: %-5s\\n\", formatBytes(stats.TotalSize))\n\n\treturn nil\n}\n\nfunc statsWalkSnapshot(ctx context.Context, snapshot *restic.Snapshot, repo restic.Repository, stats *statsContainer) error {\n\tif snapshot.Tree == nil {\n\t\treturn fmt.Errorf(\"snapshot %s has nil tree\", snapshot.ID().Str())\n\t}\n\n\tstats.SnapshotsCount++\n\n\tif statsOptions.countMode == countModeRawData {\n\t\t\/\/ count just the sizes of unique blobs; we don't need to walk the tree\n\t\t\/\/ ourselves in this case, since a nifty function does it for us\n\t\treturn restic.FindUsedBlobs(ctx, repo, restic.IDs{*snapshot.Tree}, stats.blobs, nil)\n\t}\n\n\tuniqueInodes := make(map[uint64]struct{})\n\terr := walker.Walk(ctx, repo, *snapshot.Tree, restic.NewIDSet(), statsWalkTree(repo, stats, uniqueInodes))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"walking tree %s: %v\", *snapshot.Tree, err)\n\t}\n\n\treturn nil\n}\n\nfunc statsWalkTree(repo restic.Repository, stats *statsContainer, uniqueInodes map[uint64]struct{}) walker.WalkFunc {\n\treturn func(parentTreeID restic.ID, npath string, node *restic.Node, nodeErr error) (bool, error) {\n\t\tif nodeErr != nil {\n\t\t\treturn true, nodeErr\n\t\t}\n\t\tif node == nil {\n\t\t\treturn true, nil\n\t\t}\n\n\t\tif statsOptions.countMode == countModeUniqueFilesByContents || statsOptions.countMode == countModeBlobsPerFile {\n\t\t\t\/\/ only count this file if we haven't visited it before\n\t\t\tfid := makeFileIDByContents(node)\n\t\t\tif _, ok := stats.uniqueFiles[fid]; !ok {\n\t\t\t\t\/\/ mark the file as visited\n\t\t\t\tstats.uniqueFiles[fid] = struct{}{}\n\n\t\t\t\tif statsOptions.countMode == countModeUniqueFilesByContents {\n\t\t\t\t\t\/\/ simply count the size of each unique file (unique by contents only)\n\t\t\t\t\tstats.TotalSize += node.Size\n\t\t\t\t\tstats.TotalFileCount++\n\t\t\t\t}\n\t\t\t\tif statsOptions.countMode == countModeBlobsPerFile {\n\t\t\t\t\t\/\/ count the size of each unique blob reference, which is\n\t\t\t\t\t\/\/ by unique file (unique by contents and file path)\n\t\t\t\t\tfor _, blobID := range node.Content {\n\t\t\t\t\t\t\/\/ ensure we have this file (by path) in our map; in this\n\t\t\t\t\t\t\/\/ mode, a file is unique by both contents and path\n\t\t\t\t\t\tnodePath := filepath.Join(npath, node.Name)\n\t\t\t\t\t\tif _, ok := stats.fileBlobs[nodePath]; !ok {\n\t\t\t\t\t\t\tstats.fileBlobs[nodePath] = restic.NewIDSet()\n\t\t\t\t\t\t\tstats.TotalFileCount++\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif _, ok := stats.fileBlobs[nodePath][blobID]; !ok {\n\t\t\t\t\t\t\t\/\/ is always a data blob since we're accessing it via a file's Content array\n\t\t\t\t\t\t\tblobSize, found := repo.LookupBlobSize(blobID, restic.DataBlob)\n\t\t\t\t\t\t\tif !found {\n\t\t\t\t\t\t\t\treturn true, fmt.Errorf(\"blob %s not found for tree %s\", blobID, parentTreeID)\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\/\/ count the blob's size, then add this blob by this\n\t\t\t\t\t\t\t\/\/ file (path) so we don't double-count it\n\t\t\t\t\t\t\tstats.TotalSize += uint64(blobSize)\n\t\t\t\t\t\t\tstats.fileBlobs[nodePath].Insert(blobID)\n\t\t\t\t\t\t\t\/\/ this mode also counts total unique blob _references_ per file\n\t\t\t\t\t\t\tstats.TotalBlobCount++\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif statsOptions.countMode == countModeRestoreSize {\n\t\t\t\/\/ as this is a file in the snapshot, we can simply count its\n\t\t\t\/\/ size without worrying about uniqueness, since duplicate files\n\t\t\t\/\/ will still be restored\n\t\t\tstats.TotalFileCount++\n\n\t\t\t\/\/ if inodes are present, only count each inode once\n\t\t\t\/\/ (hard links do not increase restore size)\n\t\t\tif _, ok := uniqueInodes[node.Inode]; !ok || node.Inode == 0 {\n\t\t\t\tuniqueInodes[node.Inode] = struct{}{}\n\t\t\t\tstats.TotalSize += node.Size\n\t\t\t}\n\n\t\t\treturn false, nil\n\t\t}\n\n\t\treturn true, nil\n\t}\n}\n\n\/\/ makeFileIDByContents returns a hash of the blob IDs of the\n\/\/ node's Content in sequence.\nfunc makeFileIDByContents(node *restic.Node) fileID {\n\tvar bb []byte\n\tfor _, c := range node.Content {\n\t\tbb = append(bb, []byte(c[:])...)\n\t}\n\treturn sha256.Sum256(bb)\n}\n\nfunc verifyStatsInput(gopts GlobalOptions, args []string) error {\n\t\/\/ require a recognized counting mode\n\tswitch statsOptions.countMode {\n\tcase countModeRestoreSize:\n\tcase countModeUniqueFilesByContents:\n\tcase countModeBlobsPerFile:\n\tcase countModeRawData:\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown counting mode: %s (use the -h flag to get a list of supported modes)\", statsOptions.countMode)\n\t}\n\n\treturn nil\n}\n\n\/\/ statsContainer holds information during a walk of a repository\n\/\/ to collect information about it, as well as state needed\n\/\/ for a successful and efficient walk.\ntype statsContainer struct {\n\tTotalSize uint64 `json:\"total_size\"`\n\tTotalUncompressedSize uint64 `json:\"total_uncompressed_size\"`\n\tTotalFileCount uint64 `json:\"total_file_count\"`\n\tTotalBlobCount uint64 `json:\"total_blob_count,omitempty\"`\n\t\/\/ holds count of all considered snapshots\n\tSnapshotsCount int `json:\"snapshots_count\"`\n\n\t\/\/ uniqueFiles marks visited files according to their\n\t\/\/ contents (hashed sequence of content blob IDs)\n\tuniqueFiles map[fileID]struct{}\n\n\t\/\/ fileBlobs maps a file name (path) to the set of\n\t\/\/ blobs that have been seen as a part of the file\n\tfileBlobs map[string]restic.IDSet\n\n\t\/\/ blobs is used to count individual unique blobs,\n\t\/\/ independent of references to files\n\tblobs restic.BlobSet\n}\n\n\/\/ fileID is a 256-bit hash that distinguishes unique files.\ntype fileID [32]byte\n\nconst (\n\tcountModeRestoreSize = \"restore-size\"\n\tcountModeUniqueFilesByContents = \"files-by-contents\"\n\tcountModeBlobsPerFile = \"blobs-per-file\"\n\tcountModeRawData = \"raw-data\"\n)\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package rebecca is lightweight convenience library for work with database\n\/\/\n\/\/ See github README for instructions: https:\/\/github.com\/waterlink\/rebecca#rebecca\n\/\/\n\/\/ See examples: https:\/\/godoc.org\/github.com\/waterlink\/rebecca#pkg-examples\n\/\/\n\/\/ Simple example:\n\/\/\n\/\/ type Person struct {\n\/\/ rebecca.ModelMetadata `tablename:\"people\"`\n\/\/\n\/\/ ID int `rebecca:\"id\" rebecca_primary:\"true\"`\n\/\/ Name string `rebecca:\"name\"`\n\/\/ Age int `rebecca:\"age\"`\n\/\/ }\n\/\/\n\/\/ \/\/ Create new record\n\/\/ p := &Person{Name: \"John\", Age: 34}\n\/\/ if err := rebecca.Save(p); err != nil {\n\/\/ \/\/ handle error here\n\/\/ }\n\/\/ fmt.Print(p)\n\/\/\n\/\/ \/\/ Update existing record\n\/\/ p.Name = \"John Smith\"\n\/\/ if err := rebecca.Save(p); err != nil {\n\/\/ \/\/ handle error here\n\/\/ }\n\/\/ fmt.Print(p)\n\/\/\n\/\/ \/\/ Get record by its primary key\n\/\/ p = &Person{}\n\/\/ if err := rebecca.Get(25, p); err != nil {\n\/\/ \/\/ handle error here\n\/\/ }\n\/\/ fmt.Print(p)\npackage rebecca\n\n\/\/ This file contains thin exported functions only.\n\/\/\n\/\/ For unexported functions see: helpers.go\n\/\/\n\/\/ For Context see: context.go\n\n\/\/ Get is for fetching one record\nfunc Get(ID interface{}, record interface{}) error {\n\treturn get(nil, ID, record)\n}\n\n\/\/ All is for fetching all records\nfunc All(records interface{}) error {\n\tctx := &Context{}\n\treturn ctx.All(records)\n}\n\n\/\/ Where is for fetching specific records\nfunc Where(records interface{}, where string, args ...interface{}) error {\n\tctx := &Context{}\n\treturn ctx.Where(records, where, args...)\n}\n\n\/\/ First is for fetching only one specific record\nfunc First(record interface{}, where string, args ...interface{}) error {\n\tctx := &Context{}\n\treturn ctx.First(record, where, args...)\n}\n\n\/\/ Save is for saving one record (either creating or updating)\nfunc Save(record interface{}) error {\n\treturn save(nil, record)\n}\n\n\/\/ Remove is for removing the record\nfunc Remove(record interface{}) error {\n\treturn remove(nil, record)\n}\n<commit_msg>Add convenience rebecca.SetupDriver to not expose details of rebecca\/driver package<commit_after>\/\/ Package rebecca is lightweight convenience library for work with database\n\/\/\n\/\/ See github README for instructions: https:\/\/github.com\/waterlink\/rebecca#rebecca\n\/\/\n\/\/ See examples: https:\/\/godoc.org\/github.com\/waterlink\/rebecca#pkg-examples\n\/\/\n\/\/ Simple example:\n\/\/\n\/\/ type Person struct {\n\/\/ rebecca.ModelMetadata `tablename:\"people\"`\n\/\/\n\/\/ ID int `rebecca:\"id\" rebecca_primary:\"true\"`\n\/\/ Name string `rebecca:\"name\"`\n\/\/ Age int `rebecca:\"age\"`\n\/\/ }\n\/\/\n\/\/ \/\/ Create new record\n\/\/ p := &Person{Name: \"John\", Age: 34}\n\/\/ if err := rebecca.Save(p); err != nil {\n\/\/ \/\/ handle error here\n\/\/ }\n\/\/ fmt.Print(p)\n\/\/\n\/\/ \/\/ Update existing record\n\/\/ p.Name = \"John Smith\"\n\/\/ if err := rebecca.Save(p); err != nil {\n\/\/ \/\/ handle error here\n\/\/ }\n\/\/ fmt.Print(p)\n\/\/\n\/\/ \/\/ Get record by its primary key\n\/\/ p = &Person{}\n\/\/ if err := rebecca.Get(25, p); err != nil {\n\/\/ \/\/ handle error here\n\/\/ }\n\/\/ fmt.Print(p)\npackage rebecca\n\nimport \"github.com\/waterlink\/rebecca\/driver\"\n\n\/\/ This file contains thin exported functions only.\n\/\/\n\/\/ For unexported functions see: helpers.go\n\/\/\n\/\/ For Context see: context.go\n\n\/\/ SetupDriver is for configuring database driver\nfunc SetupDriver(d driver.Driver) {\n\tdriver.SetupDriver(d)\n}\n\n\/\/ Get is for fetching one record\nfunc Get(ID interface{}, record interface{}) error {\n\treturn get(nil, ID, record)\n}\n\n\/\/ All is for fetching all records\nfunc All(records interface{}) error {\n\tctx := &Context{}\n\treturn ctx.All(records)\n}\n\n\/\/ Where is for fetching specific records\nfunc Where(records interface{}, where string, args ...interface{}) error {\n\tctx := &Context{}\n\treturn ctx.Where(records, where, args...)\n}\n\n\/\/ First is for fetching only one specific record\nfunc First(record interface{}, where string, args ...interface{}) error {\n\tctx := &Context{}\n\treturn ctx.First(record, where, args...)\n}\n\n\/\/ Save is for saving one record (either creating or updating)\nfunc Save(record interface{}) error {\n\treturn save(nil, record)\n}\n\n\/\/ Remove is for removing the record\nfunc Remove(record interface{}) error {\n\treturn remove(nil, record)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\n\t\"golang.org\/x\/net\/http2\"\n\t\"golang.org\/x\/net\/http2\/hpack\"\n\n\t\"github.com\/lucas-clemente\/quic-go\"\n\t\"github.com\/lucas-clemente\/quic-go\/protocol\"\n)\n\nvar supportedVersions = map[protocol.VersionNumber]bool{\n\t30: true,\n\t32: true,\n}\n\nfunc main() {\n\tpath := os.Getenv(\"GOPATH\") + \"\/src\/github.com\/lucas-clemente\/quic-go\/example\/\"\n\n\tserver, err := quic.NewServer(path+\"cert.der\", path+\"key.der\", handleStream)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Write([]byte(\"Hello world!\"))\n\t})\n\n\terr = server.ListenAndServe(\"localhost:6121\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\ntype responseWriter struct {\n\tsession *quic.Session\n\tdataStreamID protocol.StreamID\n\theaderStream *quic.Stream\n\tdataStream *quic.Stream\n\n\theader http.Header\n\theaderWritten bool\n}\n\nfunc (w *responseWriter) Header() http.Header {\n\treturn w.header\n}\n\nfunc (w *responseWriter) WriteHeader(status int) {\n\tw.headerWritten = true\n\n\tvar headers bytes.Buffer\n\tenc := hpack.NewEncoder(&headers)\n\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: strconv.Itoa(status)})\n\t\/\/ enc.WriteField(hpack.HeaderField{Name: \"content-length\", Value: strconv.Itoa(len(p))})\n\t\/\/ enc.WriteField(hpack.HeaderField{Name: \"content-type\", Value: http.DetectContentType(p)})\n\n\tfor k, v := range w.header {\n\t\tenc.WriteField(hpack.HeaderField{Name: k, Value: v[0]})\n\t}\n\n\tfmt.Printf(\"responding with %d %#v\\n\", status, w.header)\n\th2framer := http2.NewFramer(w.headerStream, nil)\n\th2framer.WriteHeaders(http2.HeadersFrameParam{\n\t\tStreamID: uint32(w.dataStreamID),\n\t\tEndHeaders: true,\n\t\tBlockFragment: headers.Bytes(),\n\t})\n}\n\nfunc (w *responseWriter) Write(p []byte) (int, error) {\n\tif !w.headerWritten {\n\t\tw.WriteHeader(200)\n\t}\n\n\tif len(p) != 0 {\n\t\tdataStream, err := w.session.NewStream(w.dataStreamID)\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"error creating data stream: %s\\n\", err.Error())\n\t\t}\n\t\tdefer dataStream.Close()\n\t\treturn dataStream.Write(p)\n\t}\n\n\treturn 0, nil\n}\n\nfunc handleStream(session *quic.Session, headerStream *quic.Stream) {\n\thpackDecoder := hpack.NewDecoder(4096, nil)\n\th2framer := http2.NewFramer(nil, headerStream)\n\n\tgo func() {\n\t\tfor {\n\t\t\th2frame, err := h2framer.ReadFrame()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"invalid http2 frame: %s\\n\", err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\th2headersFrame := h2frame.(*http2.HeadersFrame)\n\t\t\tif !h2headersFrame.HeadersEnded() {\n\t\t\t\tfmt.Printf(\"http2 header continuation not implemented\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\theaders, err := hpackDecoder.DecodeFull(h2headersFrame.HeaderBlockFragment())\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"invalid http2 headers encoding: %s\\n\", err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\theadersMap := map[string]string{}\n\t\t\tfor _, h := range headers {\n\t\t\t\theadersMap[h.Name] = h.Value\n\t\t\t}\n\n\t\t\tfmt.Printf(\"Request: %s %s:\/\/%s%s on stream %d\\n\", headersMap[\":method\"], headersMap[\":scheme\"], headersMap[\":authority\"], headersMap[\":path\"], h2headersFrame.StreamID)\n\n\t\t\treq, err := http.NewRequest(headersMap[\":method\"], headersMap[\":path\"], nil)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"invalid http2 frame: %s\\n\", err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tresponseWriter := &responseWriter{\n\t\t\t\theader: http.Header{},\n\t\t\t\theaderStream: headerStream,\n\t\t\t\tdataStreamID: protocol.StreamID(h2headersFrame.StreamID),\n\t\t\t\tsession: session,\n\t\t\t}\n\n\t\t\tgo http.DefaultServeMux.ServeHTTP(responseWriter, req)\n\t\t}\n\t}()\n}\n<commit_msg>fix content-length handling and multiple writes in http2 responsewriter<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strconv\"\n\n\t\"golang.org\/x\/net\/http2\"\n\t\"golang.org\/x\/net\/http2\/hpack\"\n\n\t\"github.com\/lucas-clemente\/quic-go\"\n\t\"github.com\/lucas-clemente\/quic-go\/protocol\"\n)\n\nvar supportedVersions = map[protocol.VersionNumber]bool{\n\t30: true,\n\t32: true,\n}\n\nfunc main() {\n\tpath := os.Getenv(\"GOPATH\") + \"\/src\/github.com\/lucas-clemente\/quic-go\/example\/\"\n\n\tserver, err := quic.NewServer(path+\"cert.der\", path+\"key.der\", handleStream)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Write([]byte(\"Hello world!\"))\n\t})\n\n\terr = server.ListenAndServe(\"localhost:6121\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\ntype responseWriter struct {\n\tsession *quic.Session\n\tdataStreamID protocol.StreamID\n\theaderStream *quic.Stream\n\tdataStream *quic.Stream\n\n\theader http.Header\n\theaderWritten bool\n\n\tbytesWritten int\n\tcontentLength int\n}\n\nfunc (w *responseWriter) Header() http.Header {\n\treturn w.header\n}\n\nfunc (w *responseWriter) WriteHeader(status int) {\n\tw.headerWritten = true\n\n\tvar headers bytes.Buffer\n\tenc := hpack.NewEncoder(&headers)\n\tenc.WriteField(hpack.HeaderField{Name: \":status\", Value: strconv.Itoa(status)})\n\t\/\/ enc.WriteField(hpack.HeaderField{Name: \"content-length\", Value: strconv.Itoa(len(p))})\n\t\/\/ enc.WriteField(hpack.HeaderField{Name: \"content-type\", Value: http.DetectContentType(p)})\n\n\tfor k, v := range w.header {\n\t\tenc.WriteField(hpack.HeaderField{Name: k, Value: v[0]})\n\t}\n\n\tfmt.Printf(\"Responding with %d %#v\\n\", status, w.header)\n\th2framer := http2.NewFramer(w.headerStream, nil)\n\th2framer.WriteHeaders(http2.HeadersFrameParam{\n\t\tStreamID: uint32(w.dataStreamID),\n\t\tEndHeaders: true,\n\t\tBlockFragment: headers.Bytes(),\n\t})\n\n\tw.contentLength, _ = strconv.Atoi(w.header.Get(\"content-length\"))\n}\n\nfunc (w *responseWriter) Write(p []byte) (int, error) {\n\tif !w.headerWritten {\n\t\tw.WriteHeader(200)\n\t}\n\n\tif len(p) != 0 {\n\t\tif w.dataStream == nil {\n\t\t\tvar err error\n\t\t\tw.dataStream, err = w.session.NewStream(w.dataStreamID)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, fmt.Errorf(\"error creating data stream: %s\\n\", err.Error())\n\t\t\t}\n\t\t}\n\n\t\tn, err := w.dataStream.Write(p)\n\t\tw.bytesWritten += n\n\n\t\tif w.bytesWritten >= w.contentLength {\n\t\t\tdefer w.dataStream.Close()\n\t\t}\n\n\t\treturn n, err\n\t}\n\n\treturn 0, nil\n}\n\nfunc handleStream(session *quic.Session, headerStream *quic.Stream) {\n\thpackDecoder := hpack.NewDecoder(4096, nil)\n\th2framer := http2.NewFramer(nil, headerStream)\n\n\tgo func() {\n\t\tfor {\n\t\t\th2frame, err := h2framer.ReadFrame()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"invalid http2 frame: %s\\n\", err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\th2headersFrame := h2frame.(*http2.HeadersFrame)\n\t\t\tif !h2headersFrame.HeadersEnded() {\n\t\t\t\tfmt.Printf(\"http2 header continuation not implemented\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\theaders, err := hpackDecoder.DecodeFull(h2headersFrame.HeaderBlockFragment())\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"invalid http2 headers encoding: %s\\n\", err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\theadersMap := map[string]string{}\n\t\t\tfor _, h := range headers {\n\t\t\t\theadersMap[h.Name] = h.Value\n\t\t\t}\n\n\t\t\tfmt.Printf(\"Request: %s %s:\/\/%s%s on stream %d\\n\", headersMap[\":method\"], headersMap[\":scheme\"], headersMap[\":authority\"], headersMap[\":path\"], h2headersFrame.StreamID)\n\n\t\t\treq, err := http.NewRequest(headersMap[\":method\"], headersMap[\":path\"], nil)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"invalid http2 frame: %s\\n\", err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tresponseWriter := &responseWriter{\n\t\t\t\theader: http.Header{},\n\t\t\t\theaderStream: headerStream,\n\t\t\t\tdataStreamID: protocol.StreamID(h2headersFrame.StreamID),\n\t\t\t\tsession: session,\n\t\t\t}\n\n\t\t\tgo http.DefaultServeMux.ServeHTTP(responseWriter, req)\n\t\t}\n\t}()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"archive\/zip\"\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\/sha1\"\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\/s3manager\"\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/sync\/errgroup\"\n)\n\nvar NetlifyHookURL = os.Getenv(\"NETLIFY_DEPLOY_HOOK\")\n\nfunc create_comparison(r *http.Request) (interface{}, error) {\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not read request body\")\n\t}\n\n\tparams := &struct {\n\t\tTitle string\n\t\tImages [][]*image\n\t}{}\n\terr = json.Unmarshal(body, ¶ms)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not decode request body\")\n\t}\n\n\t\/\/ Step 1: Decode & sanitize the request\n\timages := map[string][]*image{}\n\tpaths := []interface{}{}\n\tfor _, row := range params.Images {\n\t\tnames := map[string]struct{}{}\n\t\tfor _, img := range row {\n\t\t\tif _, ok := names[img.Name]; ok {\n\t\t\t\treturn nil, fmt.Errorf(\"Duplicate Name\")\n\t\t\t}\n\t\t\tif !hashRe.MatchString(img.Sha) {\n\t\t\t\treturn nil, fmt.Errorf(\"Invalid hash: %v\", img.Sha)\n\t\t\t}\n\n\t\t\timg.Path = fmt.Sprintf(\"%s\/%s\/%s\", img.Sha[0:2], img.Sha[2:4], img.Sha[4:])\n\t\t\tnames[img.Name] = struct{}{}\n\t\t\timages[img.Path] = append(images[img.Path], img)\n\t\t\tpaths = append(paths, img.Path)\n\t\t}\n\t}\n\tif len(paths) == 0 {\n\t\treturn nil, fmt.Errorf(\"No images to compare\")\n\t}\n\n\t\/\/ Step 2: Look up all the images we're comparing\n\trows, err := db.Query(fmt.Sprintf(\"SELECT `id`, `path`, `thumb`, `format` FROM `images` WHERE `path` IN (?%s)\", strings.Repeat(\",?\", len(paths)-1)), paths...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Invalid SQL: %v\", err)\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar (\n\t\t\tid string\n\t\t\tpath string\n\t\t\tthumb string\n\t\t\tformat string\n\t\t)\n\t\terr = rows.Scan(&id, &path, &thumb, &format)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Invalid SQL: %v\", err)\n\t\t}\n\t\tfor _, img := range images[path] {\n\t\t\timg.ID = id\n\t\t\timg.Thumb = thumb\n\t\t\timg.Format = format\n\t\t}\n\t}\n\terr = rows.Err()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Invalid SQL: %v\", err)\n\t}\n\n\t\/\/ Step 3: Wait for images to finish processing if they're not done yet (they should be)\n\ttime.Sleep(10 * time.Second) \/\/ TODO: use webhooks for this detection, should speed things up by 10 seconds and make it more reliable\n\n\t\/\/ Step 4: Download all the images (to make the ZIP file)\n\teg, ctx := errgroup.WithContext(context.Background())\n\tfor _, row := range params.Images {\n\t\tfor _, img := range row {\n\t\t\timg := img\n\t\t\teg.Go(func() error {\n\t\t\t\tbuf := aws.NewWriteAtBuffer([]byte{})\n\t\t\t\t_, err := downloader.DownloadWithContext(ctx, buf, &s3.GetObjectInput{\n\t\t\t\t\tBucket: aws.String(\"diff.pics\"),\n\t\t\t\t\tKey: aws.String(\"images\/\" + img.Path),\n\t\t\t\t})\n\t\t\t\timg.Data = buf.Bytes()\n\t\t\t\treturn errors.Wrap(err, img.Path)\n\t\t\t})\n\t\t}\n\t}\n\tif err := eg.Wait(); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to download images: %v\", err)\n\t}\n\n\t\/\/ Step 5: Make the zip file\n\tbuf := &bytes.Buffer{}\n\tzw := zip.NewWriter(buf)\n\tfor i, row := range params.Images {\n\t\tfor _, img := range row {\n\t\t\tf, err := zw.Create(fmt.Sprintf(\"%d\/%s.%s\", i, img.Name, img.Format))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Failed to create zip file: %v\", err)\n\t\t\t}\n\t\t\t_, err = f.Write(img.Data)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Failed to create zip file: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\terr = zw.Close()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to create zip file: %v\", err)\n\t}\n\n\t\/\/ Step 6: Upload the zip file\n\tzipName := fmt.Sprintf(\"%X\", sha1.Sum(buf.Bytes()))\n\tzipName = fmt.Sprintf(\"%s\/%s\/%s\", zipName[0:2], zipName[2:4], zipName[4:])\n\t_, err = uploader.Upload(&s3manager.UploadInput{\n\t\tBucket: aws.String(\"diff.pics\"),\n\t\tKey: aws.String(\"zips\/\" + zipName),\n\t\tBody: buf,\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to upload zip file: %v\", err)\n\t}\n\n\t\/\/ Step 7: Find a key for the comparison\n\tvar key string\n\tfor keyLen := 12; keyLen <= 51; keyLen++ {\n\t\tkey = randString(keyLen)\n\n\t\tvar _ignore string\n\t\terr = db.QueryRow(\"SELECT `id` FROM `comparisons` WHERE `key` = ?\", key).Scan(&_ignore)\n\t\tif err == sql.ErrNoRows {\n\t\t\tbreak\n\t\t}\n\t}\n\tif len(key) > 50 {\n\t\treturn nil, fmt.Errorf(\"Couldn't generate key\")\n\t}\n\n\t\/\/ Step 8: Add comparison to the database\n\tresult, err := db.Exec(\"INSERT INTO `comparisons`(`key`, `title`, `zip`) VALUES(?,?,?)\", key, params.Title, zipName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to insert comparison: %v\", err)\n\t}\n\tcomparisonID, err := result.LastInsertId()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to fetch comparison id: %v\", err)\n\t}\n\n\t\/\/ Step 9: Add images to the database\n\tnumImages := 0\n\targs := []interface{}{}\n\tfor r, row := range params.Images {\n\t\tfor c, img := range row {\n\t\t\tnumImages++\n\t\t\targs = append(args, comparisonID, r, c, img.ID, img.Name)\n\t\t}\n\t}\n\trowTemplate := \"(?,?,?,?,?)\"\n\t_, err = db.Exec(fmt.Sprintf(\"INSERT INTO `comparison_images`(`comparison_id`, `row`, `column`, `image_id`, `name`) VALUES %s%s\", rowTemplate, strings.Repeat(\",\"+rowTemplate, numImages-1)), args...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to insert comparison images: %v\", err)\n\t}\n\n\t\/\/ Step 10: Run a full build\n\tfor {\n\t\tif _, err := http.Post(NetlifyHookURL, \"\", nil); err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tvar deployID string\n\tfor {\n\t\ttime.Sleep(2 * time.Second)\n\t\tresp, err := http.Get(\"https:\/\/api.netlify.com\/api\/v1\/sites\/diff.pics\/deploys?access_token=\" + AccessToken)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tbody, err = ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tdeployResp := []struct {\n\t\t\tDeployID string `json:\"id\"`\n\t\t}{}\n\t\terr = json.Unmarshal(body, &deployResp)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tdeployID = deployResp[0].DeployID\n\t\tbreak\n\t}\n\tfor {\n\t\tif err = fullBuild(); err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tdigest := &struct {\n\t\tState string `json:\"state\"`\n\t\tErrorMessage string `json:\"error_message\"`\n\t}{}\n\tfor digest.State != \"prepared\" && digest.State != \"ready\" {\n\t\t\/\/ Check for errors in preproccessing\n\t\tif digest.State == \"error\" {\n\t\t\treturn nil, fmt.Errorf(\"Deploy failed: %s -- Your comparison will eventually be at https:\/\/diff.pics\/%s\/1\", digest.ErrorMessage, key)\n\t\t}\n\t\ttime.Sleep(2 * time.Second)\n\t\tresp, err := http.Get(\"https:\/\/api.netlify.com\/api\/v1\/deploys\/\" + deployID + \"?access_token=\" + AccessToken)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tbody, err = ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\terr = json.Unmarshal(body, &digest)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t\/\/ Step 11: return the url to go to\n\treturn map[string]string{\"URL\": fmt.Sprintf(\"\/%s\/1\", key)}, nil\n}\n\ntype image struct {\n\tName string\n\tSha string\n\n\tID string\n\tPath string\n\tThumb string\n\tFormat string\n\n\tData []byte\n}\n\nconst randStringChars = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-\"\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n}\nfunc randString(length int) string {\n\tb := make([]byte, length)\n\n\tnumBytes := int(1 + math.Log2(float64(len(randStringChars)-1)))\n\tmask := int64(1<<uint(numBytes) - 1)\n\tcharsPerRand := 63 \/ numBytes\n\n\tfor i := 0; i < length; i += charsPerRand {\n\t\tfor j, r := 0, rand.Int63(); j < charsPerRand && i+j < length; j++ {\n\t\t\tidx := int((r >> uint(j*numBytes)) & mask)\n\t\t\tb[i+j] = randStringChars[idx]\n\t\t}\n\t}\n\n\treturn string(b)\n}\n<commit_msg>Fixes<commit_after>package main\n\nimport (\n\t\"archive\/zip\"\n\t\"bytes\"\n\t\"context\"\n\t\"crypto\/sha1\"\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/s3\/s3manager\"\n\t\"github.com\/pkg\/errors\"\n\t\"golang.org\/x\/sync\/errgroup\"\n)\n\nvar NetlifyHookURL = os.Getenv(\"NETLIFY_DEPLOY_HOOK\")\n\nfunc create_comparison(r *http.Request) (interface{}, error) {\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not read request body\")\n\t}\n\n\tparams := &struct {\n\t\tTitle string\n\t\tImages [][]*image\n\t}{}\n\terr = json.Unmarshal(body, ¶ms)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not decode request body\")\n\t}\n\n\t\/\/ Step 1: Decode & sanitize the request\n\timages := map[string][]*image{}\n\tpaths := []interface{}{}\n\tfor _, row := range params.Images {\n\t\tnames := map[string]struct{}{}\n\t\tfor _, img := range row {\n\t\t\tif _, ok := names[img.Name]; ok {\n\t\t\t\treturn nil, fmt.Errorf(\"Duplicate Name\")\n\t\t\t}\n\t\t\tif !hashRe.MatchString(img.Sha) {\n\t\t\t\treturn nil, fmt.Errorf(\"Invalid hash: %v\", img.Sha)\n\t\t\t}\n\n\t\t\timg.Path = fmt.Sprintf(\"%s\/%s\/%s\", img.Sha[0:2], img.Sha[2:4], img.Sha[4:])\n\t\t\tnames[img.Name] = struct{}{}\n\t\t\timages[img.Path] = append(images[img.Path], img)\n\t\t\tpaths = append(paths, img.Path)\n\t\t}\n\t}\n\tif len(paths) == 0 {\n\t\treturn nil, fmt.Errorf(\"No images to compare\")\n\t}\n\n\t\/\/ Step 3: Wait for images to finish processing if they're not done yet (they should be)\n\ttime.Sleep(10 * time.Second) \/\/ TODO: use webhooks for this detection, should speed things up by 10 seconds and make it more reliable\n\n\t\/\/ Step 2: Look up all the images we're comparing\n\trows, err := db.Query(fmt.Sprintf(\"SELECT `id`, `path`, `thumb`, `format` FROM `images` WHERE `path` IN (?%s)\", strings.Repeat(\",?\", len(paths)-1)), paths...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Invalid SQL: %v\", err)\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\tvar (\n\t\t\tid string\n\t\t\tpath string\n\t\t\tthumb string\n\t\t\tformat string\n\t\t)\n\t\terr = rows.Scan(&id, &path, &thumb, &format)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Invalid SQL: %v\", err)\n\t\t}\n\t\tfor _, img := range images[path] {\n\t\t\timg.ID = id\n\t\t\timg.Thumb = thumb\n\t\t\timg.Format = format\n\t\t}\n\t}\n\terr = rows.Err()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Invalid SQL: %v\", err)\n\t}\n\n\t\/\/ Step 4: Download all the images (to make the ZIP file)\n\teg, ctx := errgroup.WithContext(context.Background())\n\tfor _, row := range params.Images {\n\t\tfor _, img := range row {\n\t\t\timg := img\n\t\t\tif img.ID == \"\" {\n\t\t\t\treturn nil, fmt.Errorf(\"Couldn't lookup image %q\", img.Sha)\n\t\t\t}\n\t\t\teg.Go(func() error {\n\t\t\t\tbuf := aws.NewWriteAtBuffer([]byte{})\n\t\t\t\t_, err := downloader.DownloadWithContext(ctx, buf, &s3.GetObjectInput{\n\t\t\t\t\tBucket: aws.String(\"diff.pics\"),\n\t\t\t\t\tKey: aws.String(\"images\/\" + img.Path),\n\t\t\t\t})\n\t\t\t\timg.Data = buf.Bytes()\n\t\t\t\treturn errors.Wrap(err, img.Path)\n\t\t\t})\n\t\t}\n\t}\n\tif err := eg.Wait(); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to download images: %v\", err)\n\t}\n\n\t\/\/ Step 5: Make the zip file\n\tbuf := &bytes.Buffer{}\n\tzw := zip.NewWriter(buf)\n\tfor i, row := range params.Images {\n\t\tfor _, img := range row {\n\t\t\tf, err := zw.Create(fmt.Sprintf(\"%d\/%s.%s\", i, img.Name, img.Format))\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Failed to create zip file: %v\", err)\n\t\t\t}\n\t\t\t_, err = f.Write(img.Data)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Failed to create zip file: %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\terr = zw.Close()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to create zip file: %v\", err)\n\t}\n\n\t\/\/ Step 6: Upload the zip file\n\tzipName := fmt.Sprintf(\"%X\", sha1.Sum(buf.Bytes()))\n\tzipName = fmt.Sprintf(\"%s\/%s\/%s\", zipName[0:2], zipName[2:4], zipName[4:])\n\t_, err = uploader.Upload(&s3manager.UploadInput{\n\t\tBucket: aws.String(\"diff.pics\"),\n\t\tKey: aws.String(\"zips\/\" + zipName),\n\t\tBody: buf,\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to upload zip file: %v\", err)\n\t}\n\n\t\/\/ Step 7: Find a key for the comparison\n\tvar key string\n\tfor keyLen := 12; keyLen <= 51; keyLen++ {\n\t\tkey = randString(keyLen)\n\n\t\tvar _ignore string\n\t\terr = db.QueryRow(\"SELECT `id` FROM `comparisons` WHERE `key` = ?\", key).Scan(&_ignore)\n\t\tif err == sql.ErrNoRows {\n\t\t\tbreak\n\t\t}\n\t}\n\tif len(key) > 50 {\n\t\treturn nil, fmt.Errorf(\"Couldn't generate key\")\n\t}\n\n\t\/\/ Step 8: Add comparison to the database\n\tresult, err := db.Exec(\"INSERT INTO `comparisons`(`key`, `title`, `zip`) VALUES(?,?,?)\", key, params.Title, zipName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to insert comparison: %v\", err)\n\t}\n\tcomparisonID, err := result.LastInsertId()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to fetch comparison id: %v\", err)\n\t}\n\n\t\/\/ Step 9: Add images to the database\n\tnumImages := 0\n\targs := []interface{}{}\n\tfor r, row := range params.Images {\n\t\tfor c, img := range row {\n\t\t\tnumImages++\n\t\t\targs = append(args, comparisonID, r, c, img.ID, img.Name)\n\t\t}\n\t}\n\trowTemplate := \"(?,?,?,?,?)\"\n\t_, err = db.Exec(fmt.Sprintf(\"INSERT INTO `comparison_images`(`comparison_id`, `row`, `column`, `image_id`, `name`) VALUES %s%s\", rowTemplate, strings.Repeat(\",\"+rowTemplate, numImages-1)), args...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to insert comparison images: %v\", err)\n\t}\n\n\t\/\/ Step 10: Run a full build\n\tfor {\n\t\tif _, err := http.Post(NetlifyHookURL, \"\", nil); err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tvar deployID string\n\tfor {\n\t\ttime.Sleep(2 * time.Second)\n\t\tresp, err := http.Get(\"https:\/\/api.netlify.com\/api\/v1\/sites\/diff.pics\/deploys?access_token=\" + AccessToken)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tbody, err = ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tdeployResp := []struct {\n\t\t\tDeployID string `json:\"id\"`\n\t\t}{}\n\t\terr = json.Unmarshal(body, &deployResp)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tdeployID = deployResp[0].DeployID\n\t\tbreak\n\t}\n\tfor {\n\t\tif err = fullBuild(); err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tdigest := &struct {\n\t\tState string `json:\"state\"`\n\t\tErrorMessage string `json:\"error_message\"`\n\t}{}\n\tfor digest.State != \"prepared\" && digest.State != \"ready\" {\n\t\t\/\/ Check for errors in preproccessing\n\t\tif digest.State == \"error\" {\n\t\t\treturn nil, fmt.Errorf(\"Deploy failed: %s -- Your comparison will eventually be at https:\/\/diff.pics\/%s\/1\", digest.ErrorMessage, key)\n\t\t}\n\t\ttime.Sleep(2 * time.Second)\n\t\tresp, err := http.Get(\"https:\/\/api.netlify.com\/api\/v1\/deploys\/\" + deployID + \"?access_token=\" + AccessToken)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tbody, err = ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\terr = json.Unmarshal(body, &digest)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t\/\/ Step 11: return the url to go to\n\treturn map[string]string{\"URL\": fmt.Sprintf(\"\/%s\/1\", key)}, nil\n}\n\ntype image struct {\n\tName string\n\tSha string\n\n\tID string\n\tPath string\n\tThumb string\n\tFormat string\n\n\tData []byte\n}\n\nconst randStringChars = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-\"\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n}\nfunc randString(length int) string {\n\tb := make([]byte, length)\n\n\tnumBytes := int(1 + math.Log2(float64(len(randStringChars)-1)))\n\tmask := int64(1<<uint(numBytes) - 1)\n\tcharsPerRand := 63 \/ numBytes\n\n\tfor i := 0; i < length; i += charsPerRand {\n\t\tfor j, r := 0, rand.Int63(); j < charsPerRand && i+j < length; j++ {\n\t\t\tidx := int((r >> uint(j*numBytes)) & mask)\n\t\t\tb[i+j] = randStringChars[idx]\n\t\t}\n\t}\n\n\treturn string(b)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n \"..\/..\/cmd\"\n\n \"fmt\"\n )\n\nfunc main() {\n commander := &cmd.Cmd{}\n commander.Init()\n\n commander.Commands[\"ls\"] = func(line string) (stop bool) {\n fmt.Println(\"listing stuff\")\n return\n }\n\n commander.Commands[\"exit\"] = func(line string) (stop bool) {\n fmt.Println(\"goodbye!\")\n return true\n }\n\n commander.CmdLoop()\n}\n<commit_msg>Use the correct import path<commit_after>package main\n\nimport (\n \"github.com\/gobs\/cmd\"\n\n \"fmt\"\n )\n\nfunc main() {\n commander := &cmd.Cmd{}\n commander.Init()\n\n commander.Commands[\"ls\"] = func(line string) (stop bool) {\n fmt.Println(\"listing stuff\")\n return\n }\n\n commander.Commands[\"exit\"] = func(line string) (stop bool) {\n fmt.Println(\"goodbye!\")\n return true\n }\n\n commander.CmdLoop()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"log\"\n\n\t\"github.com\/RobotsAndPencils\/buford\"\n)\n\nfunc main() {\n\tvar deviceToken, filename, password string\n\n\tflag.StringVar(&deviceToken, \"d\", \"\", \"Device token\")\n\tflag.StringVar(&filename, \"c\", \"\", \"Path to p12 certificate file\")\n\tflag.StringVar(&password, \"p\", \"\", \"Password for p12 file.\")\n\tflag.Parse()\n\n\tcert, err := buford.LoadCert(filename, password)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tservice := buford.Service{\n\t\tClient: buford.NewClient(cert),\n\t\tHost: buford.Sandbox,\n\t}\n\n\terr = service.Push(deviceToken, buford.Headers{}, []byte(`{ \"aps\" : { \"alert\" : \"Hello HTTP\/2\" } }`))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<commit_msg>example: flag to use live environment<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"log\"\n\n\t\"github.com\/RobotsAndPencils\/buford\"\n)\n\nfunc main() {\n\tvar deviceToken, filename, password, environment string\n\n\tflag.StringVar(&deviceToken, \"d\", \"\", \"Device token\")\n\tflag.StringVar(&filename, \"c\", \"\", \"Path to p12 certificate file\")\n\tflag.StringVar(&password, \"p\", \"\", \"Password for p12 file.\")\n\tflag.StringVar(&environment, \"e\", \"development\", \"Environment\")\n\tflag.Parse()\n\n\tcert, err := buford.LoadCert(filename, password)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tservice := buford.Service{\n\t\tClient: buford.NewClient(cert),\n\t\tHost: buford.Sandbox,\n\t}\n\tif environment == \"production\" {\n\t\tservice.Host = buford.Live\n\t}\n\n\terr = service.Push(deviceToken, buford.Headers{}, []byte(`{ \"aps\" : { \"alert\" : \"Hello HTTP\/2\" } }`))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package notifier\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/oinume\/lekcije\/server\/config\"\n\t\"github.com\/oinume\/lekcije\/server\/errors\"\n\t\"github.com\/oinume\/lekcije\/server\/fetcher\"\n\t\"github.com\/oinume\/lekcije\/server\/logger\"\n\t\"github.com\/oinume\/lekcije\/server\/model\"\n\t\"github.com\/sendgrid\/sendgrid-go\"\n\t\"github.com\/sendgrid\/sendgrid-go\/helpers\/mail\"\n\t\"github.com\/uber-go\/zap\"\n)\n\nvar lessonFetcher *fetcher.TeacherLessonFetcher\n\nfunc init() {\n\tlessonFetcher = fetcher.NewTeacherLessonFetcher(nil, logger.AppLogger)\n}\n\ntype Notifier struct {\n\tdb *gorm.DB\n\tdryRun bool\n\tlessonService *model.LessonService\n\tteachers map[uint32]*model.Teacher\n}\n\nfunc NewNotifier(db *gorm.DB, dryRun bool) *Notifier {\n\treturn &Notifier{\n\t\tdb: db,\n\t\tdryRun: dryRun,\n\t\tteachers: make(map[uint32]*model.Teacher, 1000),\n\t}\n}\n\nfunc (n *Notifier) SendNotification(user *model.User) error {\n\tfollowingTeacherService := model.NewFollowingTeacherService(n.db)\n\tn.lessonService = model.NewLessonService(n.db)\n\n\tteacherIDs, err := followingTeacherService.FindTeacherIDsByUserID(user.ID)\n\tif err != nil {\n\t\treturn errors.Wrapperf(err, \"Failed to FindTeacherIDsByUserID(): userID=%v\", user.ID)\n\t}\n\n\tavailableLessonsPerTeacher := make(map[uint32][]*model.Lesson, 1000)\n\tallFetchedLessons := make([]*model.Lesson, 0, 5000)\n\tfor _, teacherID := range teacherIDs {\n\t\tteacher, fetchedLessons, newAvailableLessons, err := n.fetchAndExtractNewAvailableLessons(teacherID)\n\t\tif err != nil {\n\t\t\tswitch err.(type) {\n\t\t\tcase *errors.NotFound:\n\t\t\t\t\/\/ TODO: update teacher table flag\n\t\t\t\t\/\/ TODO: Not need to log\n\t\t\t\tlogger.AppLogger.Warn(\"Cannot fetch teacher\", zap.Uint(\"teacherID\", uint(teacherID)))\n\t\t\t\tcontinue\n\t\t\tdefault:\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tallFetchedLessons = append(allFetchedLessons, fetchedLessons...)\n\t\tn.teachers[teacherID] = teacher\n\t\tif len(newAvailableLessons) > 0 {\n\t\t\tavailableLessonsPerTeacher[teacherID] = newAvailableLessons\n\t\t}\n\t}\n\n\tif err := n.sendNotificationToUser(user, availableLessonsPerTeacher); err != nil {\n\t\treturn err\n\t}\n\n\tif !n.dryRun {\n\t\tn.lessonService.UpdateLessons(allFetchedLessons)\n\t}\n\n\ttime.Sleep(500 * time.Millisecond)\n\n\treturn nil\n}\n\n\/\/ Returns teacher, fetchedLessons, newAvailableLessons, error\nfunc (n *Notifier) fetchAndExtractNewAvailableLessons(teacherID uint32) (\n\t*model.Teacher, []*model.Lesson, []*model.Lesson, error,\n) {\n\tteacher, fetchedLessons, err := lessonFetcher.Fetch(teacherID)\n\tif err != nil {\n\t\tlogger.AppLogger.Error(\n\t\t\t\"TeacherLessonFetcher.Fetch\",\n\t\t\tzap.Uint(\"teacherID\", uint(teacherID)), zap.Error(err),\n\t\t)\n\t\treturn nil, nil, nil, err\n\t}\n\tlogger.AppLogger.Info(\n\t\t\"TeacherLessonFetcher.Fetch\",\n\t\tzap.Uint(\"teacherID\", uint(teacher.ID)),\n\t\tzap.String(\"teacherName\", teacher.Name),\n\t\tzap.Int(\"fetchedLessons\", len(fetchedLessons)),\n\t)\n\n\t\/\/fmt.Printf(\"fetchedLessons ---\\n\")\n\t\/\/for _, l := range fetchedLessons {\n\t\/\/\tfmt.Printf(\"teacherID=%v, datetime=%v, status=%v\\n\", l.TeacherId, l.Datetime, l.Status)\n\t\/\/}\n\n\tnow := time.Now()\n\tfromDate := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, config.LocalTimezone())\n\ttoDate := fromDate.Add(24 * 6 * time.Hour)\n\tlastFetchedLessons, err := n.lessonService.FindLessons(teacher.ID, fromDate, toDate)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\t\/\/fmt.Printf(\"lastFetchedLessons ---\\n\")\n\t\/\/for _, l := range lastFetchedLessons {\n\t\/\/\tfmt.Printf(\"teacherID=%v, datetime=%v, status=%v\\n\", l.TeacherId, l.Datetime, l.Status)\n\t\/\/}\n\n\tnewAvailableLessons := n.lessonService.GetNewAvailableLessons(lastFetchedLessons, fetchedLessons)\n\t\/\/fmt.Printf(\"newAvailableLessons ---\\n\")\n\t\/\/for _, l := range newAvailableLessons {\n\t\/\/\tfmt.Printf(\"teacherID=%v, datetime=%v, status=%v\\n\", l.TeacherId, l.Datetime, l.Status)\n\t\/\/}\n\treturn teacher, fetchedLessons, newAvailableLessons, nil\n}\n\nfunc (n *Notifier) sendNotificationToUser(\n\tuser *model.User,\n\tlessonsPerTeacher map[uint32][]*model.Lesson,\n) error {\n\tlessonsCount := 0\n\tvar teacherIDs []int\n\tfor teacherID, lessons := range lessonsPerTeacher {\n\t\tteacherIDs = append(teacherIDs, int(teacherID))\n\t\tlessonsCount += len(lessons)\n\t}\n\tif lessonsCount == 0 {\n\t\t\/\/ Don't send notification\n\t\treturn nil\n\t}\n\n\tsort.Ints(teacherIDs)\n\tvar teacherIDs2 []uint32\n\tvar teacherNames []string\n\tfor _, id := range teacherIDs {\n\t\tteacherIDs2 = append(teacherIDs2, uint32(id))\n\t\tteacherNames = append(teacherNames, n.teachers[uint32(id)].Name)\n\t}\n\n\tt := template.New(\"email\")\n\tt = template.Must(t.Parse(getEmailTemplateJP()))\n\ttype TemplateData struct {\n\t\tTeacherIDs []uint32\n\t\tTeachers map[uint32]*model.Teacher\n\t\tLessonsPerTeacher map[uint32][]*model.Lesson\n\t\tWebURL string\n\t}\n\tdata := &TemplateData{\n\t\tTeacherIDs: teacherIDs2,\n\t\tTeachers: n.teachers,\n\t\tLessonsPerTeacher: lessonsPerTeacher,\n\t\tWebURL: config.WebURL(),\n\t}\n\n\tvar body bytes.Buffer\n\tif err := t.Execute(&body, data); err != nil {\n\t\treturn errors.InternalWrapf(err, \"Failed to execute template.\")\n\t}\n\t\/\/fmt.Printf(\"--- mail ---\\n%s\", body.String())\n\n\t\/\/subject := \"Schedule of teacher \" + strings.Join(teacherNames, \", \")\n\tsubject := strings.Join(teacherNames, \", \") + \"の空きレッスンがあります\"\n\tsender := &EmailNotificationSender{}\n\treturn sender.Send(user, subject, body.String())\n}\n\nfunc getEmailTemplateJP() string {\n\treturn strings.TrimSpace(`\n{{- range $teacherID := .TeacherIDs }}\n{{- $teacher := index $.Teachers $teacherID -}}\n--- {{ $teacher.Name }} ---\n {{- $lessons := index $.LessonsPerTeacher $teacherID }}\n {{- range $lesson := $lessons }}\n{{ $lesson.Datetime.Format \"2006-01-02 15:04\" }}\n {{- end }}\n\nレッスンの予約はこちらから:\n<a href=\"http:\/\/eikaiwa.dmm.com\/teacher\/index\/{{ $teacherID }}\/\">PC<\/a>\n<a href=\"http:\/\/eikaiwa.dmm.com\/teacher\/schedule\/{{ $teacherID }}\/\">Mobile<\/a>\n\n{{ end }}\n空きレッスンの通知の解除は<a href=\"{{ .WebURL }}\/\">こちら<\/a>\n\t`)\n}\n\nfunc getEmailTemplateEN() string {\n\treturn strings.TrimSpace(`\n{{- range $teacherID := .TeacherIDs }}\n{{- $teacher := index $.Teachers $teacherID -}}\n--- {{ $teacher.Name }} ---\n {{- $lessons := index $.LessonsPerTeacher $teacherID }}\n {{- range $lesson := $lessons }}\n{{ $lesson.Datetime.Format \"2006-01-02 15:04\" }}\n {{- end }}\n\nReserve here:\n<a href=\"http:\/\/eikaiwa.dmm.com\/teacher\/index\/{{ $teacherID }}\/\">PC<\/a>\n<a href=\"http:\/\/eikaiwa.dmm.com\/teacher\/schedule\/{{ $teacherID }}\/\">Mobile<\/a>\n{{ end }}\nClick <a href=\"{{ .WebURL }}\/\">here<\/a> if you want to stop notification of the teacher.\n\t`)\n}\n\ntype NotificationSender interface {\n\tSend(user *model.User, subject, body string) error\n}\n\ntype EmailNotificationSender struct{}\n\nfunc (s *EmailNotificationSender) Send(user *model.User, subject, body string) error {\n\tfrom := mail.NewEmail(\"lekcije\", \"lekcije@lekcije.com\")\n\tto := mail.NewEmail(user.Name, user.Email.Raw())\n\tcontent := mail.NewContent(\"text\/html\", strings.Replace(body, \"\\n\", \"<br>\", -1))\n\tm := mail.NewV3MailInit(from, subject, to, content)\n\n\treq := sendgrid.GetRequest(\n\t\tos.Getenv(\"SENDGRID_API_KEY\"),\n\t\t\"\/v3\/mail\/send\",\n\t\t\"https:\/\/api.sendgrid.com\",\n\t)\n\treq.Method = \"POST\"\n\treq.Body = mail.GetRequestBody(m)\n\tresp, err := sendgrid.API(req)\n\tif err != nil {\n\t\treturn errors.InternalWrapf(err, \"Failed to send email by sendgrid\")\n\t}\n\tif resp.StatusCode >= 300 {\n\t\tmessage := fmt.Sprintf(\n\t\t\t\"Failed to send email by sendgrid: statusCode=%v, body=%v\",\n\t\t\tresp.StatusCode, strings.Replace(resp.Body, \"\\n\", \"\\\\n\", -1),\n\t\t)\n\t\tlogger.AppLogger.Error(message)\n\t\treturn errors.InternalWrapf(\n\t\t\terr,\n\t\t\t\"Failed to send email by sendgrid: statusCode=%v\",\n\t\t\tresp.StatusCode,\n\t\t)\n\t}\n\n\treturn nil\n}\n<commit_msg>Change URL: \/ -> \/me<commit_after>package notifier\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/oinume\/lekcije\/server\/config\"\n\t\"github.com\/oinume\/lekcije\/server\/errors\"\n\t\"github.com\/oinume\/lekcije\/server\/fetcher\"\n\t\"github.com\/oinume\/lekcije\/server\/logger\"\n\t\"github.com\/oinume\/lekcije\/server\/model\"\n\t\"github.com\/sendgrid\/sendgrid-go\"\n\t\"github.com\/sendgrid\/sendgrid-go\/helpers\/mail\"\n\t\"github.com\/uber-go\/zap\"\n)\n\nvar lessonFetcher *fetcher.TeacherLessonFetcher\n\nfunc init() {\n\tlessonFetcher = fetcher.NewTeacherLessonFetcher(nil, logger.AppLogger)\n}\n\ntype Notifier struct {\n\tdb *gorm.DB\n\tdryRun bool\n\tlessonService *model.LessonService\n\tteachers map[uint32]*model.Teacher\n}\n\nfunc NewNotifier(db *gorm.DB, dryRun bool) *Notifier {\n\treturn &Notifier{\n\t\tdb: db,\n\t\tdryRun: dryRun,\n\t\tteachers: make(map[uint32]*model.Teacher, 1000),\n\t}\n}\n\nfunc (n *Notifier) SendNotification(user *model.User) error {\n\tfollowingTeacherService := model.NewFollowingTeacherService(n.db)\n\tn.lessonService = model.NewLessonService(n.db)\n\n\tteacherIDs, err := followingTeacherService.FindTeacherIDsByUserID(user.ID)\n\tif err != nil {\n\t\treturn errors.Wrapperf(err, \"Failed to FindTeacherIDsByUserID(): userID=%v\", user.ID)\n\t}\n\n\tavailableLessonsPerTeacher := make(map[uint32][]*model.Lesson, 1000)\n\tallFetchedLessons := make([]*model.Lesson, 0, 5000)\n\tfor _, teacherID := range teacherIDs {\n\t\tteacher, fetchedLessons, newAvailableLessons, err := n.fetchAndExtractNewAvailableLessons(teacherID)\n\t\tif err != nil {\n\t\t\tswitch err.(type) {\n\t\t\tcase *errors.NotFound:\n\t\t\t\t\/\/ TODO: update teacher table flag\n\t\t\t\t\/\/ TODO: Not need to log\n\t\t\t\tlogger.AppLogger.Warn(\"Cannot fetch teacher\", zap.Uint(\"teacherID\", uint(teacherID)))\n\t\t\t\tcontinue\n\t\t\tdefault:\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tallFetchedLessons = append(allFetchedLessons, fetchedLessons...)\n\t\tn.teachers[teacherID] = teacher\n\t\tif len(newAvailableLessons) > 0 {\n\t\t\tavailableLessonsPerTeacher[teacherID] = newAvailableLessons\n\t\t}\n\t}\n\n\tif err := n.sendNotificationToUser(user, availableLessonsPerTeacher); err != nil {\n\t\treturn err\n\t}\n\n\tif !n.dryRun {\n\t\tn.lessonService.UpdateLessons(allFetchedLessons)\n\t}\n\n\ttime.Sleep(500 * time.Millisecond)\n\n\treturn nil\n}\n\n\/\/ Returns teacher, fetchedLessons, newAvailableLessons, error\nfunc (n *Notifier) fetchAndExtractNewAvailableLessons(teacherID uint32) (\n\t*model.Teacher, []*model.Lesson, []*model.Lesson, error,\n) {\n\tteacher, fetchedLessons, err := lessonFetcher.Fetch(teacherID)\n\tif err != nil {\n\t\tlogger.AppLogger.Error(\n\t\t\t\"TeacherLessonFetcher.Fetch\",\n\t\t\tzap.Uint(\"teacherID\", uint(teacherID)), zap.Error(err),\n\t\t)\n\t\treturn nil, nil, nil, err\n\t}\n\tlogger.AppLogger.Info(\n\t\t\"TeacherLessonFetcher.Fetch\",\n\t\tzap.Uint(\"teacherID\", uint(teacher.ID)),\n\t\tzap.String(\"teacherName\", teacher.Name),\n\t\tzap.Int(\"fetchedLessons\", len(fetchedLessons)),\n\t)\n\n\t\/\/fmt.Printf(\"fetchedLessons ---\\n\")\n\t\/\/for _, l := range fetchedLessons {\n\t\/\/\tfmt.Printf(\"teacherID=%v, datetime=%v, status=%v\\n\", l.TeacherId, l.Datetime, l.Status)\n\t\/\/}\n\n\tnow := time.Now()\n\tfromDate := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, config.LocalTimezone())\n\ttoDate := fromDate.Add(24 * 6 * time.Hour)\n\tlastFetchedLessons, err := n.lessonService.FindLessons(teacher.ID, fromDate, toDate)\n\tif err != nil {\n\t\treturn nil, nil, nil, err\n\t}\n\t\/\/fmt.Printf(\"lastFetchedLessons ---\\n\")\n\t\/\/for _, l := range lastFetchedLessons {\n\t\/\/\tfmt.Printf(\"teacherID=%v, datetime=%v, status=%v\\n\", l.TeacherId, l.Datetime, l.Status)\n\t\/\/}\n\n\tnewAvailableLessons := n.lessonService.GetNewAvailableLessons(lastFetchedLessons, fetchedLessons)\n\t\/\/fmt.Printf(\"newAvailableLessons ---\\n\")\n\t\/\/for _, l := range newAvailableLessons {\n\t\/\/\tfmt.Printf(\"teacherID=%v, datetime=%v, status=%v\\n\", l.TeacherId, l.Datetime, l.Status)\n\t\/\/}\n\treturn teacher, fetchedLessons, newAvailableLessons, nil\n}\n\nfunc (n *Notifier) sendNotificationToUser(\n\tuser *model.User,\n\tlessonsPerTeacher map[uint32][]*model.Lesson,\n) error {\n\tlessonsCount := 0\n\tvar teacherIDs []int\n\tfor teacherID, lessons := range lessonsPerTeacher {\n\t\tteacherIDs = append(teacherIDs, int(teacherID))\n\t\tlessonsCount += len(lessons)\n\t}\n\tif lessonsCount == 0 {\n\t\t\/\/ Don't send notification\n\t\treturn nil\n\t}\n\n\tsort.Ints(teacherIDs)\n\tvar teacherIDs2 []uint32\n\tvar teacherNames []string\n\tfor _, id := range teacherIDs {\n\t\tteacherIDs2 = append(teacherIDs2, uint32(id))\n\t\tteacherNames = append(teacherNames, n.teachers[uint32(id)].Name)\n\t}\n\n\tt := template.New(\"email\")\n\tt = template.Must(t.Parse(getEmailTemplateJP()))\n\ttype TemplateData struct {\n\t\tTeacherIDs []uint32\n\t\tTeachers map[uint32]*model.Teacher\n\t\tLessonsPerTeacher map[uint32][]*model.Lesson\n\t\tWebURL string\n\t}\n\tdata := &TemplateData{\n\t\tTeacherIDs: teacherIDs2,\n\t\tTeachers: n.teachers,\n\t\tLessonsPerTeacher: lessonsPerTeacher,\n\t\tWebURL: config.WebURL(),\n\t}\n\n\tvar body bytes.Buffer\n\tif err := t.Execute(&body, data); err != nil {\n\t\treturn errors.InternalWrapf(err, \"Failed to execute template.\")\n\t}\n\t\/\/fmt.Printf(\"--- mail ---\\n%s\", body.String())\n\n\t\/\/subject := \"Schedule of teacher \" + strings.Join(teacherNames, \", \")\n\tsubject := strings.Join(teacherNames, \", \") + \"の空きレッスンがあります\"\n\tsender := &EmailNotificationSender{}\n\treturn sender.Send(user, subject, body.String())\n}\n\nfunc getEmailTemplateJP() string {\n\treturn strings.TrimSpace(`\n{{- range $teacherID := .TeacherIDs }}\n{{- $teacher := index $.Teachers $teacherID -}}\n--- {{ $teacher.Name }} ---\n {{- $lessons := index $.LessonsPerTeacher $teacherID }}\n {{- range $lesson := $lessons }}\n{{ $lesson.Datetime.Format \"2006-01-02 15:04\" }}\n {{- end }}\n\nレッスンの予約はこちらから:\n<a href=\"http:\/\/eikaiwa.dmm.com\/teacher\/index\/{{ $teacherID }}\/\">PC<\/a>\n<a href=\"http:\/\/eikaiwa.dmm.com\/teacher\/schedule\/{{ $teacherID }}\/\">Mobile<\/a>\n\n{{ end }}\n空きレッスンの通知の解除は<a href=\"{{ .WebURL }}\/me\">こちら<\/a>\n\t`)\n}\n\nfunc getEmailTemplateEN() string {\n\treturn strings.TrimSpace(`\n{{- range $teacherID := .TeacherIDs }}\n{{- $teacher := index $.Teachers $teacherID -}}\n--- {{ $teacher.Name }} ---\n {{- $lessons := index $.LessonsPerTeacher $teacherID }}\n {{- range $lesson := $lessons }}\n{{ $lesson.Datetime.Format \"2006-01-02 15:04\" }}\n {{- end }}\n\nReserve here:\n<a href=\"http:\/\/eikaiwa.dmm.com\/teacher\/index\/{{ $teacherID }}\/\">PC<\/a>\n<a href=\"http:\/\/eikaiwa.dmm.com\/teacher\/schedule\/{{ $teacherID }}\/\">Mobile<\/a>\n{{ end }}\nClick <a href=\"{{ .WebURL }}\/me\">here<\/a> if you want to stop notification of the teacher.\n\t`)\n}\n\ntype NotificationSender interface {\n\tSend(user *model.User, subject, body string) error\n}\n\ntype EmailNotificationSender struct{}\n\nfunc (s *EmailNotificationSender) Send(user *model.User, subject, body string) error {\n\tfrom := mail.NewEmail(\"lekcije\", \"lekcije@lekcije.com\")\n\tto := mail.NewEmail(user.Name, user.Email.Raw())\n\tcontent := mail.NewContent(\"text\/html\", strings.Replace(body, \"\\n\", \"<br>\", -1))\n\tm := mail.NewV3MailInit(from, subject, to, content)\n\n\treq := sendgrid.GetRequest(\n\t\tos.Getenv(\"SENDGRID_API_KEY\"),\n\t\t\"\/v3\/mail\/send\",\n\t\t\"https:\/\/api.sendgrid.com\",\n\t)\n\treq.Method = \"POST\"\n\treq.Body = mail.GetRequestBody(m)\n\tresp, err := sendgrid.API(req)\n\tif err != nil {\n\t\treturn errors.InternalWrapf(err, \"Failed to send email by sendgrid\")\n\t}\n\tif resp.StatusCode >= 300 {\n\t\tmessage := fmt.Sprintf(\n\t\t\t\"Failed to send email by sendgrid: statusCode=%v, body=%v\",\n\t\t\tresp.StatusCode, strings.Replace(resp.Body, \"\\n\", \"\\\\n\", -1),\n\t\t)\n\t\tlogger.AppLogger.Error(message)\n\t\treturn errors.InternalWrapf(\n\t\t\terr,\n\t\t\t\"Failed to send email by sendgrid: statusCode=%v\",\n\t\t\tresp.StatusCode,\n\t\t)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package throttler\n\nimport \"fmt\"\n\ntype httpPkg struct{}\n\nfunc (httpPkg) Get(url string) {}\n\nvar http httpPkg\n\n\/\/ This example fetches several URLs concurrently,\n\/\/ using a Throttler to block until all the fetches are complete.\n\/\/ Compare to http:\/\/golang.org\/pkg\/sync\/#example_WaitGroup\nfunc ExampleThrottler() {\n\tvar urls = []string{\n\t\t\"http:\/\/www.golang.org\/\",\n\t\t\"http:\/\/www.google.com\/\",\n\t\t\"http:\/\/www.somestupidname.com\/\",\n\t}\n\t\/\/ Create a new Throttler that will get 2 urls at a time\n\tt := throttler.New(2, len(urls))\n\tfor _, url := range urls {\n\t\t\/\/ Launch a goroutine to fetch the URL.\n\t\tgo func(url string) {\n\t\t\t\/\/ Fetch the URL.\n\t\t\terr := http.Get(url)\n\t\t\t\/\/ Let Throttler know when the goroutine completes\n\t\t\t\/\/ so it can dispatch another worker\n\t\t\tt.Done(err)\n\t\t}(url)\n\t\t\/\/ Pauses until a worker is available or all jobs have been completed\n\t\t\/\/ Returning the total number of goroutines that have errored\n\t\t\/\/ lets you choose to break out of the loop without starting any more\n\t\terrorCount := t.Throttle()\n\t}\n}\n\n\/\/ This example fetches several URLs concurrently,\n\/\/ using a Throttler to block until all the fetches are complete\n\/\/ and checks the errors returned.\n\/\/ Compare to http:\/\/golang.org\/pkg\/sync\/#example_WaitGroup\nfunc ExampleThrottler_errors() error {\n\tvar urls = []string{\n\t\t\"http:\/\/www.golang.org\/\",\n\t\t\"http:\/\/www.google.com\/\",\n\t\t\"http:\/\/www.somestupidname.com\/\",\n\t}\n\t\/\/ Create a new Throttler that will get 2 urls at a time\n\tt := New(2, len(urls))\n\tfor _, url := range urls {\n\t\t\/\/ Launch a goroutine to fetch the URL.\n\t\tgo func(url string) {\n\t\t\t\/\/ Let Throttler know when the goroutine completes\n\t\t\t\/\/ so it can dispatch another worker\n\t\t\tdefer t.Done(nil)\n\t\t\t\/\/ Fetch the URL.\n\t\t\thttp.Get(url)\n\t\t}(url)\n\t\t\/\/ Pauses until a worker is available or all jobs have been completed\n\t\tt.Throttle()\n\t}\n\n\tif t.Err() != nil {\n\t\t\/\/ Loop through the errors to see the details\n\t\tfor i, err := range t.Errs() {\n\t\t\tfmt.Printf(\"error #%d: %s\", i, err)\n\t\t}\n\t\treturn t.Err()\n\t}\n\n\treturn nil\n}\n<commit_msg>Fixed example_test.go<commit_after>package throttler\n\nimport \"fmt\"\n\ntype httpPkg struct{}\n\nfunc (httpPkg) Get(url string) error { return nil }\n\nvar http httpPkg\n\n\/\/ This example fetches several URLs concurrently,\n\/\/ using a Throttler to block until all the fetches are complete.\n\/\/ Compare to http:\/\/golang.org\/pkg\/sync\/#example_WaitGroup\nfunc ExampleThrottler() {\n\tvar urls = []string{\n\t\t\"http:\/\/www.golang.org\/\",\n\t\t\"http:\/\/www.google.com\/\",\n\t\t\"http:\/\/www.somestupidname.com\/\",\n\t}\n\t\/\/ Create a new Throttler that will get 2 urls at a time\n\tt := New(2, len(urls))\n\tfor _, url := range urls {\n\t\t\/\/ Launch a goroutine to fetch the URL.\n\t\tgo func(url string) {\n\t\t\t\/\/ Fetch the URL.\n\t\t\terr := http.Get(url)\n\t\t\t\/\/ Let Throttler know when the goroutine completes\n\t\t\t\/\/ so it can dispatch another worker\n\t\t\tt.Done(err)\n\t\t}(url)\n\t\t\/\/ Pauses until a worker is available or all jobs have been completed\n\t\t\/\/ Returning the total number of goroutines that have errored\n\t\t\/\/ lets you choose to break out of the loop without starting any more\n\t\terrorCount := t.Throttle()\n\t\tif errorCount > 0 {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\n\/\/ This example fetches several URLs concurrently,\n\/\/ using a Throttler to block until all the fetches are complete\n\/\/ and checks the errors returned.\n\/\/ Compare to http:\/\/golang.org\/pkg\/sync\/#example_WaitGroup\nfunc ExampleThrottler_errors() error {\n\tvar urls = []string{\n\t\t\"http:\/\/www.golang.org\/\",\n\t\t\"http:\/\/www.google.com\/\",\n\t\t\"http:\/\/www.somestupidname.com\/\",\n\t}\n\t\/\/ Create a new Throttler that will get 2 urls at a time\n\tt := New(2, len(urls))\n\tfor _, url := range urls {\n\t\t\/\/ Launch a goroutine to fetch the URL.\n\t\tgo func(url string) {\n\t\t\t\/\/ Let Throttler know when the goroutine completes\n\t\t\t\/\/ so it can dispatch another worker\n\t\t\tdefer t.Done(nil)\n\t\t\t\/\/ Fetch the URL.\n\t\t\thttp.Get(url)\n\t\t}(url)\n\t\t\/\/ Pauses until a worker is available or all jobs have been completed\n\t\tt.Throttle()\n\t}\n\n\tif t.Err() != nil {\n\t\t\/\/ Loop through the errors to see the details\n\t\tfor i, err := range t.Errs() {\n\t\t\tfmt.Printf(\"error #%d: %s\", i, err)\n\t\t}\n\t\treturn t.Err()\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright...\n\n\/\/ This example demonstrates opening a Connection and doing some basic operations.\npackage swift_test\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/ncw\/swift\"\n)\n\nfunc ExampleConnection() {\n\t\/\/ Create a v1 auth connection\n\tc := swift.Connection{\n\t\t\/\/ This should be your username\n\t\tUserName: \"user\",\n\t\t\/\/ This should be your api key\n\t\tApiKey: \"key\",\n\t\t\/\/ This should be a v1 auth url, eg\n\t\t\/\/ Rackspace US https:\/\/auth.api.rackspacecloud.com\/v1.0\n\t\t\/\/ Rackspace UK https:\/\/lon.auth.api.rackspacecloud.com\/v1.0\n\t\t\/\/ Memset Memstore UK https:\/\/auth.storage.memset.com\/v1.0\n\t\tAuthUrl: \"auth_url\",\n\t}\n\n\t\/\/ Authenticate\n\terr := c.Authenticate()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t\/\/ List all the containers\n\tcontainers, err := c.ContainerNames(nil)\n\tfmt.Println(containers)\n\t\/\/ etc...\n\n\t\/\/ ------ or alternatively create a v2 connection ------\n\n\t\/\/ Create a v2 auth connection\n\tc = swift.Connection{\n\t\t\/\/ This is the sub user for the storage - eg \"admin\"\n\t\tUserName: \"user\",\n\t\t\/\/ This should be your api key\n\t\tApiKey: \"key\",\n\t\t\/\/ This should be a version2 auth url, eg\n\t\t\/\/ Rackspace v2 https:\/\/identity.api.rackspacecloud.com\/v2.0\n\t\t\/\/ Memset Memstore v2 https:\/\/auth.storage.memset.com\/v2.0\n\t\tAuthUrl: \"v2_auth_url\",\n\t\t\/\/ Region to use - default is use first region if unset\n\t\tRegion: \"LON\",\n\t\t\/\/ Name of the tenant - this is likely your username\n\t\tTenant: \"jim\",\n\t}\n\n\t\/\/ as above...\n}\n\nvar container string\n\nfunc ExampleConnection_ObjectsWalk() {\n\tobjects := make([]string, 0)\n\terr := c.ObjectsWalk(container, nil, func(opts *swift.ObjectsOpts) (interface{}, error) {\n\t\tnewObjects, err := c.ObjectNames(container, opts)\n\t\tif err == nil {\n\t\t\tobjects = append(objects, newObjects...)\n\t\t}\n\t\treturn newObjects, err\n\t})\n\tfmt.Println(\"Found all the objects\", objects, err)\n}\n\nfunc ExampleConnection_VersionContainerCreate() {\n\t\/\/ Use the helper method to create the current and versions container.\n\tif err := c.VersionContainerCreate(\"cds\", \"cd-versions\"); err != nil {\n\t\tfmt.Print(err.Error())\n\t}\n}\n\nfunc ExampleConnection_VersionEnable() {\n\t\/\/ Build the containers manually and enable them.\n\tif err := c.ContainerCreate(\"movie-versions\", nil); err != nil {\n\t\tfmt.Print(err.Error())\n\t}\n\tif err := c.ContainerCreate(\"movies\", nil); err != nil {\n\t\tfmt.Print(err.Error())\n\t}\n\tif err := c.VersionEnable(\"movies\", \"movie-versions\"); err != nil {\n\t\tfmt.Print(err.Error())\n\t}\n\n\t\/\/ Access the primary container as usual with ObjectCreate(), ObjectPut(), etc.\n\t\/\/ etc...\n}\n\nfunc ExampleConnection_VersionDisable() {\n\t\/\/ Disable versioning on a container. Note that this does not delete the versioning container.\n\tc.VersionDisable(\"movies\")\n}\n<commit_msg>Fix go vet errors<commit_after>\/\/ Copyright...\n\n\/\/ This example demonstrates opening a Connection and doing some basic operations.\npackage swift_test\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/ncw\/swift\"\n)\n\nfunc ExampleConnection() {\n\t\/\/ Create a v1 auth connection\n\tc := &swift.Connection{\n\t\t\/\/ This should be your username\n\t\tUserName: \"user\",\n\t\t\/\/ This should be your api key\n\t\tApiKey: \"key\",\n\t\t\/\/ This should be a v1 auth url, eg\n\t\t\/\/ Rackspace US https:\/\/auth.api.rackspacecloud.com\/v1.0\n\t\t\/\/ Rackspace UK https:\/\/lon.auth.api.rackspacecloud.com\/v1.0\n\t\t\/\/ Memset Memstore UK https:\/\/auth.storage.memset.com\/v1.0\n\t\tAuthUrl: \"auth_url\",\n\t}\n\n\t\/\/ Authenticate\n\terr := c.Authenticate()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t\/\/ List all the containers\n\tcontainers, err := c.ContainerNames(nil)\n\tfmt.Println(containers)\n\t\/\/ etc...\n\n\t\/\/ ------ or alternatively create a v2 connection ------\n\n\t\/\/ Create a v2 auth connection\n\tc = &swift.Connection{\n\t\t\/\/ This is the sub user for the storage - eg \"admin\"\n\t\tUserName: \"user\",\n\t\t\/\/ This should be your api key\n\t\tApiKey: \"key\",\n\t\t\/\/ This should be a version2 auth url, eg\n\t\t\/\/ Rackspace v2 https:\/\/identity.api.rackspacecloud.com\/v2.0\n\t\t\/\/ Memset Memstore v2 https:\/\/auth.storage.memset.com\/v2.0\n\t\tAuthUrl: \"v2_auth_url\",\n\t\t\/\/ Region to use - default is use first region if unset\n\t\tRegion: \"LON\",\n\t\t\/\/ Name of the tenant - this is likely your username\n\t\tTenant: \"jim\",\n\t}\n\n\t\/\/ as above...\n}\n\nvar container string\n\nfunc ExampleConnection_ObjectsWalk() {\n\tobjects := make([]string, 0)\n\terr := c.ObjectsWalk(container, nil, func(opts *swift.ObjectsOpts) (interface{}, error) {\n\t\tnewObjects, err := c.ObjectNames(container, opts)\n\t\tif err == nil {\n\t\t\tobjects = append(objects, newObjects...)\n\t\t}\n\t\treturn newObjects, err\n\t})\n\tfmt.Println(\"Found all the objects\", objects, err)\n}\n\nfunc ExampleConnection_VersionContainerCreate() {\n\t\/\/ Use the helper method to create the current and versions container.\n\tif err := c.VersionContainerCreate(\"cds\", \"cd-versions\"); err != nil {\n\t\tfmt.Print(err.Error())\n\t}\n}\n\nfunc ExampleConnection_VersionEnable() {\n\t\/\/ Build the containers manually and enable them.\n\tif err := c.ContainerCreate(\"movie-versions\", nil); err != nil {\n\t\tfmt.Print(err.Error())\n\t}\n\tif err := c.ContainerCreate(\"movies\", nil); err != nil {\n\t\tfmt.Print(err.Error())\n\t}\n\tif err := c.VersionEnable(\"movies\", \"movie-versions\"); err != nil {\n\t\tfmt.Print(err.Error())\n\t}\n\n\t\/\/ Access the primary container as usual with ObjectCreate(), ObjectPut(), etc.\n\t\/\/ etc...\n}\n\nfunc ExampleConnection_VersionDisable() {\n\t\/\/ Disable versioning on a container. Note that this does not delete the versioning container.\n\tc.VersionDisable(\"movies\")\n}\n<|endoftext|>"} {"text":"<commit_before>package graph_test\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\n\t\"github.com\/urandom\/graph\"\n\t\"github.com\/urandom\/graph\/base\"\n)\n\ntype Processor interface {\n\tProcess(wd graph.WalkData, output chan<- int)\n\tResult() int\n}\n\ntype RandomNumberNode struct {\n\tgraph.Node\n\tresult int\n}\n\ntype MultiplyNode struct {\n\tgraph.Node\n\tresult int\n}\n\ntype SummingNode struct {\n\tgraph.Node\n\tresult int\n}\n\nfunc (n *RandomNumberNode) Process(wd graph.WalkData, output chan<- int) {\n\tn.result = rand.Intn(50-10) + 10\n\n\twd.Close()\n}\n\nfunc (n RandomNumberNode) Result() int {\n\treturn n.result\n}\n\nfunc (n *MultiplyNode) Process(wd graph.WalkData, output chan<- int) {\n\tparent := wd.Parents[0]\n\n\tif p, ok := parent.Node.(Processor); ok {\n\t\tn.result = p.Result()*rand.Intn(10-1) + 1\n\t}\n\n\twd.Close()\n}\n\nfunc (n MultiplyNode) Result() int {\n\treturn n.result\n}\n\nfunc (n *SummingNode) Process(wd graph.WalkData, output chan<- int) {\n\tfor _, parent := range wd.Parents {\n\t\tif p, ok := parent.Node.(Processor); ok {\n\t\t\tn.result += p.Result()\n\t\t}\n\t}\n\n\twd.Close()\n\toutput <- n.result\n}\n\nfunc (n SummingNode) Result() int {\n\treturn n.result\n}\n\nfunc Example() {\n\troot := CreateGraph()\n\n\twalker := graph.NewWalker(root)\n\tdata := walker.Walk()\n\n\toutput := make(chan int)\n\n\tfor wd := range data {\n\t\tif p, ok := wd.Node.(Processor); ok {\n\t\t\tgo p.Process(wd, output)\n\t\t}\n\t}\n\n\tselect {\n\tcase r := <-output:\n\t\tfmt.Println(r)\n\t}\n}\n\nfunc CreateGraph() graph.Linker {\n\tlinkers := make([]graph.Linker, 4)\n\n\tfor i := range linkers {\n\t\tl := base.NewLinker()\n\n\t\tswitch i {\n\t\tcase 0:\n\t\t\tl.Data = &RandomNumberNode{Node: l.Data}\n\t\tcase 1:\n\t\t\tl.Data = &RandomNumberNode{Node: l.Data}\n\t\tcase 2:\n\t\t\tl.Data = &MultiplyNode{Node: l.Data}\n\t\t\tl.Connect(linkers[0], l.Connector(graph.InputName), linkers[0].Connector(graph.OutputName, graph.OutputType))\n\t\tcase 3:\n\t\t\tc := base.NewInputConnector(\"aux\")\n\t\t\tl.InputConnectors[c.Name()] = c\n\t\t\tl.Data = &SummingNode{Node: l.Data}\n\n\t\t\tl.Connect(linkers[1], l.Connector(graph.InputName), linkers[1].Connector(graph.OutputName, graph.OutputType))\n\t\t\tl.Connect(linkers[2], l.Connector(c.Name()), linkers[2].Connector(graph.OutputName, graph.OutputType))\n\t\t}\n\n\t\tlinkers[i] = l\n\t}\n\n\treturn linkers[0]\n}\n<commit_msg>always close the walk data<commit_after>package graph_test\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\n\t\"github.com\/urandom\/graph\"\n\t\"github.com\/urandom\/graph\/base\"\n)\n\ntype Processor interface {\n\tProcess(wd graph.WalkData, output chan<- int)\n\tResult() int\n}\n\ntype RandomNumberNode struct {\n\tgraph.Node\n\tresult int\n}\n\ntype MultiplyNode struct {\n\tgraph.Node\n\tresult int\n}\n\ntype SummingNode struct {\n\tgraph.Node\n\tresult int\n}\n\nfunc (n *RandomNumberNode) Process(wd graph.WalkData, output chan<- int) {\n\tn.result = rand.Intn(50-10) + 10\n\n\twd.Close()\n}\n\nfunc (n RandomNumberNode) Result() int {\n\treturn n.result\n}\n\nfunc (n *MultiplyNode) Process(wd graph.WalkData, output chan<- int) {\n\tparent := wd.Parents[0]\n\n\tif p, ok := parent.Node.(Processor); ok {\n\t\tn.result = p.Result()*rand.Intn(10-1) + 1\n\t}\n\n\twd.Close()\n}\n\nfunc (n MultiplyNode) Result() int {\n\treturn n.result\n}\n\nfunc (n *SummingNode) Process(wd graph.WalkData, output chan<- int) {\n\tfor _, parent := range wd.Parents {\n\t\tif p, ok := parent.Node.(Processor); ok {\n\t\t\tn.result += p.Result()\n\t\t}\n\t}\n\n\twd.Close()\n\toutput <- n.result\n}\n\nfunc (n SummingNode) Result() int {\n\treturn n.result\n}\n\nfunc Example() {\n\troot := CreateGraph()\n\n\twalker := graph.NewWalker(root)\n\tdata := walker.Walk()\n\n\toutput := make(chan int)\n\n\tfor wd := range data {\n\t\tif p, ok := wd.Node.(Processor); ok {\n\t\t\tgo p.Process(wd, output)\n\t\t} else {\n\t\t\twd.Close()\n\t\t}\n\t}\n\n\tselect {\n\tcase r := <-output:\n\t\tfmt.Println(r)\n\t}\n}\n\nfunc CreateGraph() graph.Linker {\n\tlinkers := make([]graph.Linker, 4)\n\n\tfor i := range linkers {\n\t\tl := base.NewLinker()\n\n\t\tswitch i {\n\t\tcase 0:\n\t\t\tl.Data = &RandomNumberNode{Node: l.Data}\n\t\tcase 1:\n\t\t\tl.Data = &RandomNumberNode{Node: l.Data}\n\t\tcase 2:\n\t\t\tl.Data = &MultiplyNode{Node: l.Data}\n\t\t\tl.Connect(linkers[0], l.Connector(graph.InputName), linkers[0].Connector(graph.OutputName, graph.OutputType))\n\t\tcase 3:\n\t\t\tc := base.NewInputConnector(\"aux\")\n\t\t\tl.InputConnectors[c.Name()] = c\n\t\t\tl.Data = &SummingNode{Node: l.Data}\n\n\t\t\tl.Connect(linkers[1], l.Connector(graph.InputName), linkers[1].Connector(graph.OutputName, graph.OutputType))\n\t\t\tl.Connect(linkers[2], l.Connector(c.Name()), linkers[2].Connector(graph.OutputName, graph.OutputType))\n\t\t}\n\n\t\tlinkers[i] = l\n\t}\n\n\treturn linkers[0]\n}\n<|endoftext|>"} {"text":"<commit_before>package durafmt\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"time\"\n)\n\nfunc ExampleParseString() {\n\tduration, err := ParseString(\"354h22m3.24s\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tfmt.Println(duration) \/\/ 2 weeks 18 hours 22 minutes 3 seconds\n\t\/\/ duration.String() \/\/ String representation. \"2 weeks 18 hours 22 minutes 3 seconds\"\n}\n\nfunc ExampleParseString_sequence() {\n\tfor hours := 1.0; hours < 12.0; hours++ {\n\t\thour := fmt.Sprintf(\"%fh\", math.Pow(2, hours))\n\t\tduration, err := ParseString(hour)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\tfmt.Println(duration) \/\/ 2 hours, 4 hours, ...\n\t}\n}\n\n\/\/ Version of durafmt.ParseString() that only returns the first part of the duration string.\nfunc ExampleParseStringShort() {\n\tduration, err := ParseStringShort(\"354h22m3.24s\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tfmt.Println(duration) \/\/ 2 weeks 18 hours 22 minutes 3 seconds\n\t\/\/ duration.String() \/\/ String representation. \"2 weeks 18 hours 22 minutes 3 seconds\"\n}\n\nfunc ExampleParse() {\n\ttimeduration := (354 * time.Hour) + (22 * time.Minute) + (3 * time.Second)\n\tduration := Parse(timeduration).String()\n\tfmt.Println(duration) \/\/ 2 weeks 18 hours 22 minutes 3 seconds\n}\n\n\/\/ Version of durafmt.Parse() that only returns the first part of the duration string.\nfunc ExampleParseShort() {\n\ttimeduration := (354 * time.Hour) + (22 * time.Minute) + (3 * time.Second)\n\tduration := ParseShort(timeduration).String()\n\tfmt.Println(duration) \/\/ 2 weeks\n}<commit_msg>fmt examples (again)<commit_after>package durafmt\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"time\"\n)\n\nfunc ExampleParseString() {\n\tduration, err := ParseString(\"354h22m3.24s\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tfmt.Println(duration) \/\/ 2 weeks 18 hours 22 minutes 3 seconds\n\t\/\/ duration.String() \/\/ String representation. \"2 weeks 18 hours 22 minutes 3 seconds\"\n}\n\nfunc ExampleParseString_sequence() {\n\tfor hours := 1.0; hours < 12.0; hours++ {\n\t\thour := fmt.Sprintf(\"%fh\", math.Pow(2, hours))\n\t\tduration, err := ParseString(hour)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t}\n\t\tfmt.Println(duration) \/\/ 2 hours, 4 hours, ...\n\t}\n}\n\n\/\/ Version of durafmt.ParseString() that only returns the first part of the duration string.\nfunc ExampleParseStringShort() {\n\tduration, err := ParseStringShort(\"354h22m3.24s\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tfmt.Println(duration) \/\/ 2 weeks 18 hours 22 minutes 3 seconds\n\t\/\/ duration.String() \/\/ String representation. \"2 weeks 18 hours 22 minutes 3 seconds\"\n}\n\nfunc ExampleParse() {\n\ttimeduration := (354 * time.Hour) + (22 * time.Minute) + (3 * time.Second)\n\tduration := Parse(timeduration).String()\n\tfmt.Println(duration) \/\/ 2 weeks 18 hours 22 minutes 3 seconds\n}\n\n\/\/ Version of durafmt.Parse() that only returns the first part of the duration string.\nfunc ExampleParseShort() {\n\ttimeduration := (354 * time.Hour) + (22 * time.Minute) + (3 * time.Second)\n\tduration := ParseShort(timeduration).String()\n\tfmt.Println(duration) \/\/ 2 weeks\n}\n<|endoftext|>"} {"text":"<commit_before>package socks5_test\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"time\"\n\n\t\"github.com\/txthinking\/socks5\"\n)\n\nfunc ExampleSocks5Server() {\n\ttimeout := 60 \/\/ 60s\n\tsocks5.Debug = true \/\/ enable socks5 debug log\n\n\tl, err := net.Listen(\"tcp\", \":1980\")\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tdefer l.Close()\n\n\tfor {\n\t\tc, err := l.Accept()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tgo func(c net.Conn) {\n\t\t\tdefer c.Close()\n\t\t\tif err := c.SetReadDeadline(time.Now().Add(time.Duration(timeout) * time.Second)); err != nil {\n\t\t\t\tlog.Println(\"set local timeout:\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ts5s := &socks5.Server{\n\t\t\t\tC: c,\n\t\t\t\tSelectMethod: func(methods []byte) (method byte, got bool) {\n\t\t\t\t\tfor _, m := range methods {\n\t\t\t\t\t\tif m == socks5.MethodNone {\n\t\t\t\t\t\t\tmethod = socks5.MethodNone\n\t\t\t\t\t\t\tgot = true\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t},\n\t\t\t\tSupportedCommands: []byte{socks5.CmdConnect},\n\t\t\t}\n\t\t\tif err := s5s.Negotiate(); err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tr, err := s5s.GetRequest()\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\trc, err := r.Connect(c)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer rc.Close()\n\t\t\tif err := rc.SetReadDeadline(time.Now().Add(time.Duration(timeout) * time.Second)); err != nil {\n\t\t\t\tlog.Println(\"set remote timeout:\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tgo func() {\n\t\t\t\t_, _ = io.Copy(c, rc)\n\t\t\t}()\n\t\t\t_, _ = io.Copy(rc, c)\n\n\t\t}(c)\n\n\t}\n}\n<commit_msg>fixed misspelling<commit_after>package socks5_test\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"time\"\n\n\t\"github.com\/txthinking\/socks5\"\n)\n\nfunc ExampleServer() {\n\ttimeout := 60 \/\/ 60s\n\tsocks5.Debug = true \/\/ enable socks5 debug log\n\n\tl, err := net.Listen(\"tcp\", \":1980\")\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tdefer l.Close()\n\n\tfor {\n\t\tc, err := l.Accept()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tgo func(c net.Conn) {\n\t\t\tdefer c.Close()\n\t\t\tif err := c.SetReadDeadline(time.Now().Add(time.Duration(timeout) * time.Second)); err != nil {\n\t\t\t\tlog.Println(\"set local timeout:\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ts5s := &socks5.Server{\n\t\t\t\tC: c,\n\t\t\t\tSelectMethod: func(methods []byte) (method byte, got bool) {\n\t\t\t\t\tfor _, m := range methods {\n\t\t\t\t\t\tif m == socks5.MethodNone {\n\t\t\t\t\t\t\tmethod = socks5.MethodNone\n\t\t\t\t\t\t\tgot = true\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t},\n\t\t\t\tSupportedCommands: []byte{socks5.CmdConnect},\n\t\t\t}\n\t\t\tif err := s5s.Negotiate(); err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tr, err := s5s.GetRequest()\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\trc, err := r.Connect(c)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer rc.Close()\n\t\t\tif err := rc.SetReadDeadline(time.Now().Add(time.Duration(timeout) * time.Second)); err != nil {\n\t\t\t\tlog.Println(\"set remote timeout:\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tgo func() {\n\t\t\t\t_, _ = io.Copy(c, rc)\n\t\t\t}()\n\t\t\t_, _ = io.Copy(rc, c)\n\n\t\t}(c)\n\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package komorebi\n\nimport (\n\t\"strings\"\n)\n\ntype User struct {\n\tDbModel\n\tImagePath string `json:\"image_path\"`\n}\n\ntype Users []User\n\nfunc NewUser(name string, image_path string) User {\n\treturn User{\n\t\tImagePath: image_path,\n\t\tDbModel: DbModel{\n\t\t\tName: name,\n\t\t},\n\t}\n}\n\nfunc (u User) Save() bool {\n\treturn dbMapper.Save(&u)\n}\n\nfunc (u User) TableName() string {\n\treturn \"users\"\n}\n\nfunc (u Users) TableName() string {\n\treturn \"users\"\n}\n\nfunc (u User) Destroy() bool {\n\tif u.Id == 0 {\n\t\treturn true\n\t}\n\n\tif _, errDelete := dbMapper.Connection.Delete(&u); errDelete != nil {\n\t\tLogger.Printf(\"delete of user failed.\", errDelete)\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (u User) Validate() (bool, map[string][]string) {\n\tsuccess := true\n\terrors := map[string][]string{}\n\n\tif len(u.Name) <= 0 {\n\t\tLogger.Printf(\"User validation failed. Name not present\")\n\t\tsuccess = false\n\t\terrors[\"name\"] = append(errors[\"name\"], \"Name not present.\")\n\t}\n\n\tvar otherUser User\n\tGetByName(&otherUser, u.Name)\n\tif otherUser.Id != 0 && otherUser.Id != u.Id {\n\t\tLogger.Printf(\"User validation failed. Name not uniq\")\n\t\tsuccess = false\n\t\terrors[\"name\"] = append(errors[\"name\"], \"Name not uniq.\")\n\t}\n\n\treturn success, errors\n}\n\nfunc GetUsersByBoardId(board_id int) Users {\n\tvar users Users\n\tvar ids []string\n\n\t_, err := dbMapper.Connection.Select(&ids,\n\t\t\"select UserId from board_users where BoardId=?\", board_id)\n\tif err != nil {\n\t\tLogger.Printf(\"Could not get user_ids by board id\", board_id)\n\t}\n\tuser_ids := strings.Join(ids, \", \")\n\n\t_, err = dbMapper.Connection.Select(&users,\n\t\t\"select * from users where Id IN (\"+user_ids+\")\")\n\tif err != nil {\n\t\tLogger.Printf(\"Could not get users by board id\", board_id)\n\t}\n\n\treturn users\n}\n\nfunc GetUsersByTaskId(task_id int) Users {\n\tvar users Users\n\tvar ids []string\n\n\t_, err := dbMapper.Connection.Select(&ids,\n\t\t\"select UserId from task_users where TaskId=?\", task_id)\n\tif err != nil {\n\t\tLogger.Println(\"Could not get user_ids by task id\", task_id)\n\t}\n\tuser_ids := strings.Join(ids, \", \")\n\n\t_, err = dbMapper.Connection.Select(&users,\n\t\t\"select * from users where Id IN (\"+user_ids+\")\")\n\tif err != nil {\n\t\tLogger.Println(\"Could not get users by task id\", task_id)\n\t}\n\n\treturn users\n}\n<commit_msg>optimize sql query<commit_after>package komorebi\n\nimport (\n\t\"strings\"\n)\n\ntype User struct {\n\tDbModel\n\tImagePath string `json:\"image_path\"`\n}\n\ntype Users []User\n\nfunc NewUser(name string, image_path string) User {\n\treturn User{\n\t\tImagePath: image_path,\n\t\tDbModel: DbModel{\n\t\t\tName: name,\n\t\t},\n\t}\n}\n\nfunc (u User) Save() bool {\n\treturn dbMapper.Save(&u)\n}\n\nfunc (u User) TableName() string {\n\treturn \"users\"\n}\n\nfunc (u Users) TableName() string {\n\treturn \"users\"\n}\n\nfunc (u User) Destroy() bool {\n\tif u.Id == 0 {\n\t\treturn true\n\t}\n\n\tif _, errDelete := dbMapper.Connection.Delete(&u); errDelete != nil {\n\t\tLogger.Printf(\"delete of user failed.\", errDelete)\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (u User) Validate() (bool, map[string][]string) {\n\tsuccess := true\n\terrors := map[string][]string{}\n\n\tif len(u.Name) <= 0 {\n\t\tLogger.Printf(\"User validation failed. Name not present\")\n\t\tsuccess = false\n\t\terrors[\"name\"] = append(errors[\"name\"], \"Name not present.\")\n\t}\n\n\tvar otherUser User\n\tGetByName(&otherUser, u.Name)\n\tif otherUser.Id != 0 && otherUser.Id != u.Id {\n\t\tLogger.Printf(\"User validation failed. Name not uniq\")\n\t\tsuccess = false\n\t\terrors[\"name\"] = append(errors[\"name\"], \"Name not uniq.\")\n\t}\n\n\treturn success, errors\n}\n\nfunc GetUsersByBoardId(board_id int) Users {\n\tusers := make([]User, 0)\n\tvar ids []string\n\n\t_, err := dbMapper.Connection.Select(&ids,\n\t\t\"select UserId from board_users where BoardId=?\", board_id)\n\tif err != nil {\n\t\tLogger.Printf(\"Could not get user_ids by board id\", board_id)\n\t}\n\tuser_ids := strings.Join(ids, \", \")\n\n\tif len(user_ids) <= 0 {\n\t\treturn users\n\t}\n\t_, err = dbMapper.Connection.Select(&users,\n\t\t\"select * from users where Id IN (\"+user_ids+\")\")\n\tif err != nil {\n\t\tLogger.Printf(\"Could not get users by board id\", board_id)\n\t}\n\n\treturn users\n}\n\nfunc GetUsersByTaskId(task_id int) Users {\n\tusers := make([]User, 0)\n\tvar ids []string\n\n\t_, err := dbMapper.Connection.Select(&ids,\n\t\t\"select UserId from task_users where TaskId=?\", task_id)\n\tif err != nil {\n\t\tLogger.Println(\"Could not get user_ids by task id\", task_id)\n\t}\n\tuser_ids := strings.Join(ids, \", \")\n\n\tif len(user_ids) <= 0 {\n\t\treturn users\n\t}\n\n\t_, err = dbMapper.Connection.Select(&users,\n\t\t\"select * from users where Id IN (\"+user_ids+\")\")\n\tif err != nil {\n\t\tLogger.Println(\"Could not get users by task id\", task_id)\n\t}\n\n\treturn users\n}\n<|endoftext|>"} {"text":"<commit_before>package complete_test\n\nimport \"github.com\/posener\/complete\"\n\nfunc main() {\n\n\t\/\/ create a Command object, that represents the command we want\n\t\/\/ to complete.\n\trun := complete.Command{\n\n\t\t\/\/ Name must be exactly as the binary that we want to complete\n\t\tName: \"run\",\n\n\t\t\/\/ Sub defines a list of sub commands of the program,\n\t\t\/\/ this is recursive, since every command is of type command also.\n\t\tSub: complete.Commands{\n\n\t\t\t\/\/ add a build sub command\n\t\t\t\"build\": complete.Command{\n\n\t\t\t\t\/\/ define flags of the build sub command\n\t\t\t\tFlags: complete.Flags{\n\t\t\t\t\t\/\/ build sub command has a flag '-fast', which\n\t\t\t\t\t\/\/ does not expects anything after it.\n\t\t\t\t\t\"-fast\": complete.PredictNothing,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t\/\/ define flags of the 'run' main command\n\t\tFlags: complete.Flags{\n\n\t\t\t\/\/ a flag '-h' which does not expects anything after it\n\t\t\t\"-h\": complete.PredictNothing,\n\n\t\t\t\/\/ a flag -o, which expects a file ending with .out after\n\t\t\t\/\/ it, the tab completion will auto complete for files matching\n\t\t\t\/\/ the given pattern.\n\t\t\t\"-o\": complete.PredictFiles(\"*.out\"),\n\t\t},\n\t}\n\n\t\/\/ run the command completion, as part of the main() function.\n\t\/\/ this triggers the autocompletion when needed.\n\tcomplete.Run(run)\n}\n<commit_msg>Remove example test<commit_after><|endoftext|>"} {"text":"<commit_before>package catlistnodes\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/consul\/api\"\n\t\"github.com\/hashicorp\/consul\/command\/flags\"\n\t\"github.com\/mitchellh\/cli\"\n\t\"github.com\/ryanuber\/columnize\"\n)\n\nfunc New(ui cli.Ui) *cmd {\n\tc := &cmd{UI: ui}\n\tc.init()\n\treturn c\n}\n\ntype cmd struct {\n\tUI cli.Ui\n\tflags *flag.FlagSet\n\thttp *flags.HTTPFlags\n\thelpStr string\n\n\t\/\/ flags\n\tdetailed bool\n\tnear string\n\tnodeMeta map[string]string\n\tservice string\n}\n\nconst helpPrefix = `Usage: consul catalog nodes [options]\n\n Retrieves the list nodes registered in a given datacenter. By default, the\n datacenter of the local agent is queried.\n\n To retrieve the list of nodes:\n\n $ consul catalog nodes\n\n To print detailed information including full node IDs, tagged addresses, and\n metadata information:\n\n $ consul catalog nodes -detailed\n\n To list nodes which are running a particular service:\n\n $ consul catalog nodes -service=web\n\n To filter by node metadata:\n\n $ consul catalog nodes -node-meta=\"foo=bar\"\n\n To sort nodes by estimated round-trip time from node-web:\n\n $ consul catalog nodes -near=node-web\n\n For a full list of options and examples, please see the Consul documentation.`\n\n\/\/ init sets up command flags and help text\nfunc (c *cmd) init() {\n\tc.flags = flag.NewFlagSet(\"\", flag.ContinueOnError)\n\tc.flags.BoolVar(&c.detailed, \"detailed\", false, \"Output detailed information about \"+\n\t\t\"the nodes including their addresses and metadata.\")\n\tc.flags.StringVar(&c.near, \"near\", \"\", \"Node name to sort the node list in ascending \"+\n\t\t\"order based on estimated round-trip time from that node. \"+\n\t\t\"Passing \\\"_agent\\\" will use this agent's node for sorting.\")\n\tc.flags.Var((*flags.FlagMapValue)(&c.nodeMeta), \"node-meta\", \"Metadata to \"+\n\t\t\"filter nodes with the given `key=value` pairs. This flag may be \"+\n\t\t\"specified multiple times to filter on multiple sources of metadata.\")\n\tc.flags.StringVar(&c.service, \"service\", \"\", \"Service `id or name` to filter nodes. \"+\n\t\t\"Only nodes which are providing the given service will be returned.\")\n\n\tc.http = &flags.HTTPFlags{}\n\tflags.Merge(c.flags, c.http.ClientFlags())\n\tflags.Merge(c.flags, c.http.ServerFlags())\n\n\tc.helpStr = flags.Usage(helpPrefix, c.flags, c.http.ClientFlags(), c.http.ServerFlags())\n}\n\nfunc (c *cmd) Run(args []string) int {\n\tif err := c.flags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\tif l := len(c.flags.Args()); l > 0 {\n\t\tc.UI.Error(fmt.Sprintf(\"Too many arguments (expected 0, got %d)\", l))\n\t\treturn 1\n\t}\n\n\t\/\/ Create and test the HTTP client\n\tclient, err := c.http.APIClient()\n\tif err != nil {\n\t\tc.UI.Error(fmt.Sprintf(\"Error connecting to Consul agent: %s\", err))\n\t\treturn 1\n\t}\n\n\tvar nodes []*api.Node\n\tif c.service != \"\" {\n\t\tservices, _, err := client.Catalog().Service(c.service, \"\", &api.QueryOptions{\n\t\t\tNear: c.near,\n\t\t\tNodeMeta: c.nodeMeta,\n\t\t})\n\t\tif err != nil {\n\t\t\tc.UI.Error(fmt.Sprintf(\"Error listing nodes for service: %s\", err))\n\t\t\treturn 1\n\t\t}\n\n\t\tnodes = make([]*api.Node, len(services))\n\t\tfor i, s := range services {\n\t\t\tnodes[i] = &api.Node{\n\t\t\t\tID: s.ID,\n\t\t\t\tNode: s.Node,\n\t\t\t\tAddress: s.Address,\n\t\t\t\tDatacenter: s.Datacenter,\n\t\t\t\tTaggedAddresses: s.TaggedAddresses,\n\t\t\t\tMeta: s.NodeMeta,\n\t\t\t\tCreateIndex: s.CreateIndex,\n\t\t\t\tModifyIndex: s.ModifyIndex,\n\t\t\t}\n\t\t}\n\t} else {\n\t\tnodes, _, err = client.Catalog().Nodes(&api.QueryOptions{\n\t\t\tNear: c.near,\n\t\t\tNodeMeta: c.nodeMeta,\n\t\t})\n\t\tif err != nil {\n\t\t\tc.UI.Error(fmt.Sprintf(\"Error listing nodes: %s\", err))\n\t\t\treturn 1\n\t\t}\n\t}\n\n\t\/\/ Handle the edge case where there are no nodes that match the query.\n\tif len(nodes) == 0 {\n\t\tc.UI.Error(\"No nodes match the given query - try expanding your search.\")\n\t\treturn 0\n\t}\n\n\toutput, err := printNodes(nodes, c.detailed)\n\tif err != nil {\n\t\tc.UI.Error(fmt.Sprintf(\"Error printing nodes: %s\", err))\n\t\treturn 1\n\t}\n\n\tc.UI.Info(output)\n\n\treturn 0\n}\n\nfunc (c *cmd) Synopsis() string {\n\treturn \"Lists all nodes in the given datacenter\"\n}\n\nfunc (c *cmd) Help() string {\n\treturn c.helpStr\n}\n\n\/\/ printNodes accepts a list of nodes and prints information in a tabular\n\/\/ format about the nodes.\nfunc printNodes(nodes []*api.Node, detailed bool) (string, error) {\n\tvar result []string\n\tif detailed {\n\t\tresult = detailedNodes(nodes)\n\t} else {\n\t\tresult = simpleNodes(nodes)\n\t}\n\n\treturn columnize.SimpleFormat(result), nil\n}\n\nfunc detailedNodes(nodes []*api.Node) []string {\n\tresult := make([]string, 0, len(nodes)+1)\n\theader := \"Node|ID|Address|DC|TaggedAddresses|Meta\"\n\tresult = append(result, header)\n\n\tfor _, node := range nodes {\n\t\tresult = append(result, fmt.Sprintf(\"%s|%s|%s|%s|%s|%s\",\n\t\t\tnode.Node, node.ID, node.Address, node.Datacenter,\n\t\t\tmapToKV(node.TaggedAddresses, \", \"), mapToKV(node.Meta, \", \")))\n\t}\n\n\treturn result\n}\n\nfunc simpleNodes(nodes []*api.Node) []string {\n\tresult := make([]string, 0, len(nodes)+1)\n\theader := \"Node|ID|Address|DC\"\n\tresult = append(result, header)\n\n\tfor _, node := range nodes {\n\t\t\/\/ Shorten the ID in non-detailed mode to just the first octet.\n\t\tid := node.ID\n\t\tidx := strings.Index(id, \"-\")\n\t\tif idx > 0 {\n\t\t\tid = id[0:idx]\n\t\t}\n\t\tresult = append(result, fmt.Sprintf(\"%s|%s|%s|%s\",\n\t\t\tnode.Node, id, node.Address, node.Datacenter))\n\t}\n\n\treturn result\n}\n\n\/\/ mapToKV converts a map[string]string into a human-friendly key=value list,\n\/\/ sorted by name.\nfunc mapToKV(m map[string]string, joiner string) string {\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\tr := make([]string, len(keys))\n\tfor i, k := range keys {\n\t\tr[i] = fmt.Sprintf(\"%s=%s\", k, m[k])\n\t}\n\treturn strings.Join(r, joiner)\n}\n<commit_msg>Better name for usage string and moving constant definition down<commit_after>package catlistnodes\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/consul\/api\"\n\t\"github.com\/hashicorp\/consul\/command\/flags\"\n\t\"github.com\/mitchellh\/cli\"\n\t\"github.com\/ryanuber\/columnize\"\n)\n\nfunc New(ui cli.Ui) *cmd {\n\tc := &cmd{UI: ui}\n\tc.init()\n\treturn c\n}\n\ntype cmd struct {\n\tUI cli.Ui\n\tflags *flag.FlagSet\n\thttp *flags.HTTPFlags\n\tusage string\n\n\t\/\/ flags\n\tdetailed bool\n\tnear string\n\tnodeMeta map[string]string\n\tservice string\n}\n\n\/\/ init sets up command flags and help text\nfunc (c *cmd) init() {\n\tc.flags = flag.NewFlagSet(\"\", flag.ContinueOnError)\n\tc.flags.BoolVar(&c.detailed, \"detailed\", false, \"Output detailed information about \"+\n\t\t\"the nodes including their addresses and metadata.\")\n\tc.flags.StringVar(&c.near, \"near\", \"\", \"Node name to sort the node list in ascending \"+\n\t\t\"order based on estimated round-trip time from that node. \"+\n\t\t\"Passing \\\"_agent\\\" will use this agent's node for sorting.\")\n\tc.flags.Var((*flags.FlagMapValue)(&c.nodeMeta), \"node-meta\", \"Metadata to \"+\n\t\t\"filter nodes with the given `key=value` pairs. This flag may be \"+\n\t\t\"specified multiple times to filter on multiple sources of metadata.\")\n\tc.flags.StringVar(&c.service, \"service\", \"\", \"Service `id or name` to filter nodes. \"+\n\t\t\"Only nodes which are providing the given service will be returned.\")\n\n\tc.http = &flags.HTTPFlags{}\n\tflags.Merge(c.flags, c.http.ClientFlags())\n\tflags.Merge(c.flags, c.http.ServerFlags())\n\n\tc.usage = flags.Usage(usage, c.flags, c.http.ClientFlags(), c.http.ServerFlags())\n}\n\nfunc (c *cmd) Run(args []string) int {\n\tif err := c.flags.Parse(args); err != nil {\n\t\treturn 1\n\t}\n\n\tif l := len(c.flags.Args()); l > 0 {\n\t\tc.UI.Error(fmt.Sprintf(\"Too many arguments (expected 0, got %d)\", l))\n\t\treturn 1\n\t}\n\n\t\/\/ Create and test the HTTP client\n\tclient, err := c.http.APIClient()\n\tif err != nil {\n\t\tc.UI.Error(fmt.Sprintf(\"Error connecting to Consul agent: %s\", err))\n\t\treturn 1\n\t}\n\n\tvar nodes []*api.Node\n\tif c.service != \"\" {\n\t\tservices, _, err := client.Catalog().Service(c.service, \"\", &api.QueryOptions{\n\t\t\tNear: c.near,\n\t\t\tNodeMeta: c.nodeMeta,\n\t\t})\n\t\tif err != nil {\n\t\t\tc.UI.Error(fmt.Sprintf(\"Error listing nodes for service: %s\", err))\n\t\t\treturn 1\n\t\t}\n\n\t\tnodes = make([]*api.Node, len(services))\n\t\tfor i, s := range services {\n\t\t\tnodes[i] = &api.Node{\n\t\t\t\tID: s.ID,\n\t\t\t\tNode: s.Node,\n\t\t\t\tAddress: s.Address,\n\t\t\t\tDatacenter: s.Datacenter,\n\t\t\t\tTaggedAddresses: s.TaggedAddresses,\n\t\t\t\tMeta: s.NodeMeta,\n\t\t\t\tCreateIndex: s.CreateIndex,\n\t\t\t\tModifyIndex: s.ModifyIndex,\n\t\t\t}\n\t\t}\n\t} else {\n\t\tnodes, _, err = client.Catalog().Nodes(&api.QueryOptions{\n\t\t\tNear: c.near,\n\t\t\tNodeMeta: c.nodeMeta,\n\t\t})\n\t\tif err != nil {\n\t\t\tc.UI.Error(fmt.Sprintf(\"Error listing nodes: %s\", err))\n\t\t\treturn 1\n\t\t}\n\t}\n\n\t\/\/ Handle the edge case where there are no nodes that match the query.\n\tif len(nodes) == 0 {\n\t\tc.UI.Error(\"No nodes match the given query - try expanding your search.\")\n\t\treturn 0\n\t}\n\n\toutput, err := printNodes(nodes, c.detailed)\n\tif err != nil {\n\t\tc.UI.Error(fmt.Sprintf(\"Error printing nodes: %s\", err))\n\t\treturn 1\n\t}\n\n\tc.UI.Info(output)\n\n\treturn 0\n}\n\nfunc (c *cmd) Synopsis() string {\n\treturn \"Lists all nodes in the given datacenter\"\n}\n\nfunc (c *cmd) Help() string {\n\treturn c.usage\n}\n\n\/\/ printNodes accepts a list of nodes and prints information in a tabular\n\/\/ format about the nodes.\nfunc printNodes(nodes []*api.Node, detailed bool) (string, error) {\n\tvar result []string\n\tif detailed {\n\t\tresult = detailedNodes(nodes)\n\t} else {\n\t\tresult = simpleNodes(nodes)\n\t}\n\n\treturn columnize.SimpleFormat(result), nil\n}\n\nfunc detailedNodes(nodes []*api.Node) []string {\n\tresult := make([]string, 0, len(nodes)+1)\n\theader := \"Node|ID|Address|DC|TaggedAddresses|Meta\"\n\tresult = append(result, header)\n\n\tfor _, node := range nodes {\n\t\tresult = append(result, fmt.Sprintf(\"%s|%s|%s|%s|%s|%s\",\n\t\t\tnode.Node, node.ID, node.Address, node.Datacenter,\n\t\t\tmapToKV(node.TaggedAddresses, \", \"), mapToKV(node.Meta, \", \")))\n\t}\n\n\treturn result\n}\n\nfunc simpleNodes(nodes []*api.Node) []string {\n\tresult := make([]string, 0, len(nodes)+1)\n\theader := \"Node|ID|Address|DC\"\n\tresult = append(result, header)\n\n\tfor _, node := range nodes {\n\t\t\/\/ Shorten the ID in non-detailed mode to just the first octet.\n\t\tid := node.ID\n\t\tidx := strings.Index(id, \"-\")\n\t\tif idx > 0 {\n\t\t\tid = id[0:idx]\n\t\t}\n\t\tresult = append(result, fmt.Sprintf(\"%s|%s|%s|%s\",\n\t\t\tnode.Node, id, node.Address, node.Datacenter))\n\t}\n\n\treturn result\n}\n\n\/\/ mapToKV converts a map[string]string into a human-friendly key=value list,\n\/\/ sorted by name.\nfunc mapToKV(m map[string]string, joiner string) string {\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\tr := make([]string, len(keys))\n\tfor i, k := range keys {\n\t\tr[i] = fmt.Sprintf(\"%s=%s\", k, m[k])\n\t}\n\treturn strings.Join(r, joiner)\n}\n\nconst usage = `Usage: consul catalog nodes [options]\n\n Retrieves the list nodes registered in a given datacenter. By default, the\n datacenter of the local agent is queried.\n\n To retrieve the list of nodes:\n\n $ consul catalog nodes\n\n To print detailed information including full node IDs, tagged addresses, and\n metadata information:\n\n $ consul catalog nodes -detailed\n\n To list nodes which are running a particular service:\n\n $ consul catalog nodes -service=web\n\n To filter by node metadata:\n\n $ consul catalog nodes -node-meta=\"foo=bar\"\n\n To sort nodes by estimated round-trip time from node-web:\n\n $ consul catalog nodes -near=node-web\n\n For a full list of options and examples, please see the Consul documentation.`\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage release\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"code.gitea.io\/gitea\/models\"\n\t\"code.gitea.io\/gitea\/modules\/git\"\n\t\"code.gitea.io\/gitea\/modules\/log\"\n\t\"code.gitea.io\/gitea\/modules\/notification\"\n\t\"code.gitea.io\/gitea\/modules\/repository\"\n\t\"code.gitea.io\/gitea\/modules\/timeutil\"\n)\n\nfunc createTag(gitRepo *git.Repository, rel *models.Release) error {\n\t\/\/ Only actual create when publish.\n\tif !rel.IsDraft {\n\t\tif !gitRepo.IsTagExist(rel.TagName) {\n\t\t\tcommit, err := gitRepo.GetCommit(rel.Target)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"GetCommit: %v\", err)\n\t\t\t}\n\n\t\t\t\/\/ Trim '--' prefix to prevent command line argument vulnerability.\n\t\t\trel.TagName = strings.TrimPrefix(rel.TagName, \"--\")\n\t\t\tif err = gitRepo.CreateTag(rel.TagName, commit.ID.String()); err != nil {\n\t\t\t\tif strings.Contains(err.Error(), \"is not a valid tag name\") {\n\t\t\t\t\treturn models.ErrInvalidTagName{\n\t\t\t\t\t\tTagName: rel.TagName,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t\trel.LowerTagName = strings.ToLower(rel.TagName)\n\t\t\t\/\/ Prepare Notify\n\t\t\tif err := rel.LoadAttributes(); err != nil {\n\t\t\t\tlog.Error(\"LoadAttributes: %v\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tnotification.NotifyPushCommits(\n\t\t\t\trel.Publisher, rel.Repo, git.TagPrefix+rel.TagName,\n\t\t\t\tgit.EmptySHA, commit.ID.String(), repository.NewPushCommits())\n\t\t\tnotification.NotifyCreateRef(rel.Publisher, rel.Repo, \"tag\", git.TagPrefix+rel.TagName)\n\t\t}\n\t\tcommit, err := gitRepo.GetTagCommit(rel.TagName)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"GetTagCommit: %v\", err)\n\t\t}\n\n\t\trel.Sha1 = commit.ID.String()\n\t\trel.CreatedUnix = timeutil.TimeStamp(commit.Author.When.Unix())\n\t\trel.NumCommits, err = commit.CommitsCount()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"CommitsCount: %v\", err)\n\t\t}\n\t} else {\n\t\trel.CreatedUnix = timeutil.TimeStampNow()\n\t}\n\treturn nil\n}\n\n\/\/ CreateRelease creates a new release of repository.\nfunc CreateRelease(gitRepo *git.Repository, rel *models.Release, attachmentUUIDs []string) error {\n\tisExist, err := models.IsReleaseExist(rel.RepoID, rel.TagName)\n\tif err != nil {\n\t\treturn err\n\t} else if isExist {\n\t\treturn models.ErrReleaseAlreadyExist{\n\t\t\tTagName: rel.TagName,\n\t\t}\n\t}\n\n\tif err = createTag(gitRepo, rel); err != nil {\n\t\treturn err\n\t}\n\n\trel.LowerTagName = strings.ToLower(rel.TagName)\n\tif err = models.InsertRelease(rel); err != nil {\n\t\treturn err\n\t}\n\n\tif err = models.AddReleaseAttachments(rel.ID, attachmentUUIDs); err != nil {\n\t\treturn err\n\t}\n\n\tif !rel.IsDraft {\n\t\tnotification.NotifyNewRelease(rel)\n\t}\n\n\treturn nil\n}\n\n\/\/ UpdateRelease updates information of a release.\nfunc UpdateRelease(doer *models.User, gitRepo *git.Repository, rel *models.Release, attachmentUUIDs []string) (err error) {\n\tif err = createTag(gitRepo, rel); err != nil {\n\t\treturn err\n\t}\n\trel.LowerTagName = strings.ToLower(rel.TagName)\n\n\tif err = models.UpdateRelease(models.DefaultDBContext(), rel); err != nil {\n\t\treturn err\n\t}\n\n\tif err = models.AddReleaseAttachments(rel.ID, attachmentUUIDs); err != nil {\n\t\tlog.Error(\"AddReleaseAttachments: %v\", err)\n\t}\n\n\tnotification.NotifyUpdateRelease(doer, rel)\n\n\treturn err\n}\n\n\/\/ DeleteReleaseByID deletes a release and corresponding Git tag by given ID.\nfunc DeleteReleaseByID(id int64, doer *models.User, delTag bool) error {\n\trel, err := models.GetReleaseByID(id)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"GetReleaseByID: %v\", err)\n\t}\n\n\trepo, err := models.GetRepositoryByID(rel.RepoID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"GetRepositoryByID: %v\", err)\n\t}\n\n\tif delTag {\n\t\tif stdout, err := git.NewCommand(\"tag\", \"-d\", rel.TagName).\n\t\t\tSetDescription(fmt.Sprintf(\"DeleteReleaseByID (git tag -d): %d\", rel.ID)).\n\t\t\tRunInDir(repo.RepoPath()); err != nil && !strings.Contains(err.Error(), \"not found\") {\n\t\t\tlog.Error(\"DeleteReleaseByID (git tag -d): %d in %v Failed:\\nStdout: %s\\nError: %v\", rel.ID, repo, stdout, err)\n\t\t\treturn fmt.Errorf(\"git tag -d: %v\", err)\n\t\t}\n\n\t\tif err := models.DeleteReleaseByID(id); err != nil {\n\t\t\treturn fmt.Errorf(\"DeleteReleaseByID: %v\", err)\n\t\t}\n\t} else {\n\t\trel.IsTag = true\n\t\trel.IsDraft = false\n\t\trel.IsPrerelease = false\n\t\trel.Title = \"\"\n\t\trel.Note = \"\"\n\n\t\tif err = models.UpdateRelease(models.DefaultDBContext(), rel); err != nil {\n\t\t\treturn fmt.Errorf(\"Update: %v\", err)\n\t\t}\n\t}\n\n\trel.Repo = repo\n\tif err = rel.LoadAttributes(); err != nil {\n\t\treturn fmt.Errorf(\"LoadAttributes: %v\", err)\n\t}\n\n\tif err := models.DeleteAttachmentsByRelease(rel.ID); err != nil {\n\t\treturn fmt.Errorf(\"DeleteAttachments: %v\", err)\n\t}\n\n\tfor i := range rel.Attachments {\n\t\tattachment := rel.Attachments[i]\n\t\tif err := os.RemoveAll(attachment.LocalPath()); err != nil {\n\t\t\tlog.Error(\"Delete attachment %s of release %s failed: %v\", attachment.UUID, rel.ID, err)\n\t\t}\n\t}\n\n\tnotification.NotifyDeleteRelease(doer, rel)\n\n\treturn nil\n}\n<commit_msg>When using API CreateRelease set created_unix to the tag commit time (#11218)<commit_after>\/\/ Copyright 2019 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage release\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"code.gitea.io\/gitea\/models\"\n\t\"code.gitea.io\/gitea\/modules\/git\"\n\t\"code.gitea.io\/gitea\/modules\/log\"\n\t\"code.gitea.io\/gitea\/modules\/notification\"\n\t\"code.gitea.io\/gitea\/modules\/repository\"\n\t\"code.gitea.io\/gitea\/modules\/timeutil\"\n)\n\nfunc createTag(gitRepo *git.Repository, rel *models.Release) error {\n\t\/\/ Only actual create when publish.\n\tif !rel.IsDraft {\n\t\tif !gitRepo.IsTagExist(rel.TagName) {\n\t\t\tcommit, err := gitRepo.GetCommit(rel.Target)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"GetCommit: %v\", err)\n\t\t\t}\n\n\t\t\t\/\/ Trim '--' prefix to prevent command line argument vulnerability.\n\t\t\trel.TagName = strings.TrimPrefix(rel.TagName, \"--\")\n\t\t\tif err = gitRepo.CreateTag(rel.TagName, commit.ID.String()); err != nil {\n\t\t\t\tif strings.Contains(err.Error(), \"is not a valid tag name\") {\n\t\t\t\t\treturn models.ErrInvalidTagName{\n\t\t\t\t\t\tTagName: rel.TagName,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t\trel.LowerTagName = strings.ToLower(rel.TagName)\n\t\t\t\/\/ Prepare Notify\n\t\t\tif err := rel.LoadAttributes(); err != nil {\n\t\t\t\tlog.Error(\"LoadAttributes: %v\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tnotification.NotifyPushCommits(\n\t\t\t\trel.Publisher, rel.Repo, git.TagPrefix+rel.TagName,\n\t\t\t\tgit.EmptySHA, commit.ID.String(), repository.NewPushCommits())\n\t\t\tnotification.NotifyCreateRef(rel.Publisher, rel.Repo, \"tag\", git.TagPrefix+rel.TagName)\n\t\t}\n\t\tcommit, err := gitRepo.GetTagCommit(rel.TagName)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"GetTagCommit: %v\", err)\n\t\t}\n\n\t\trel.Sha1 = commit.ID.String()\n\t\trel.CreatedUnix = timeutil.TimeStampNow()\n\t\trel.NumCommits, err = commit.CommitsCount()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"CommitsCount: %v\", err)\n\t\t}\n\t} else {\n\t\trel.CreatedUnix = timeutil.TimeStampNow()\n\t}\n\treturn nil\n}\n\n\/\/ CreateRelease creates a new release of repository.\nfunc CreateRelease(gitRepo *git.Repository, rel *models.Release, attachmentUUIDs []string) error {\n\tisExist, err := models.IsReleaseExist(rel.RepoID, rel.TagName)\n\tif err != nil {\n\t\treturn err\n\t} else if isExist {\n\t\treturn models.ErrReleaseAlreadyExist{\n\t\t\tTagName: rel.TagName,\n\t\t}\n\t}\n\n\tif err = createTag(gitRepo, rel); err != nil {\n\t\treturn err\n\t}\n\n\trel.LowerTagName = strings.ToLower(rel.TagName)\n\tif err = models.InsertRelease(rel); err != nil {\n\t\treturn err\n\t}\n\n\tif err = models.AddReleaseAttachments(rel.ID, attachmentUUIDs); err != nil {\n\t\treturn err\n\t}\n\n\tif !rel.IsDraft {\n\t\tnotification.NotifyNewRelease(rel)\n\t}\n\n\treturn nil\n}\n\n\/\/ UpdateRelease updates information of a release.\nfunc UpdateRelease(doer *models.User, gitRepo *git.Repository, rel *models.Release, attachmentUUIDs []string) (err error) {\n\tif err = createTag(gitRepo, rel); err != nil {\n\t\treturn err\n\t}\n\trel.LowerTagName = strings.ToLower(rel.TagName)\n\n\tif err = models.UpdateRelease(models.DefaultDBContext(), rel); err != nil {\n\t\treturn err\n\t}\n\n\tif err = models.AddReleaseAttachments(rel.ID, attachmentUUIDs); err != nil {\n\t\tlog.Error(\"AddReleaseAttachments: %v\", err)\n\t}\n\n\tnotification.NotifyUpdateRelease(doer, rel)\n\n\treturn err\n}\n\n\/\/ DeleteReleaseByID deletes a release and corresponding Git tag by given ID.\nfunc DeleteReleaseByID(id int64, doer *models.User, delTag bool) error {\n\trel, err := models.GetReleaseByID(id)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"GetReleaseByID: %v\", err)\n\t}\n\n\trepo, err := models.GetRepositoryByID(rel.RepoID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"GetRepositoryByID: %v\", err)\n\t}\n\n\tif delTag {\n\t\tif stdout, err := git.NewCommand(\"tag\", \"-d\", rel.TagName).\n\t\t\tSetDescription(fmt.Sprintf(\"DeleteReleaseByID (git tag -d): %d\", rel.ID)).\n\t\t\tRunInDir(repo.RepoPath()); err != nil && !strings.Contains(err.Error(), \"not found\") {\n\t\t\tlog.Error(\"DeleteReleaseByID (git tag -d): %d in %v Failed:\\nStdout: %s\\nError: %v\", rel.ID, repo, stdout, err)\n\t\t\treturn fmt.Errorf(\"git tag -d: %v\", err)\n\t\t}\n\n\t\tif err := models.DeleteReleaseByID(id); err != nil {\n\t\t\treturn fmt.Errorf(\"DeleteReleaseByID: %v\", err)\n\t\t}\n\t} else {\n\t\trel.IsTag = true\n\t\trel.IsDraft = false\n\t\trel.IsPrerelease = false\n\t\trel.Title = \"\"\n\t\trel.Note = \"\"\n\n\t\tif err = models.UpdateRelease(models.DefaultDBContext(), rel); err != nil {\n\t\t\treturn fmt.Errorf(\"Update: %v\", err)\n\t\t}\n\t}\n\n\trel.Repo = repo\n\tif err = rel.LoadAttributes(); err != nil {\n\t\treturn fmt.Errorf(\"LoadAttributes: %v\", err)\n\t}\n\n\tif err := models.DeleteAttachmentsByRelease(rel.ID); err != nil {\n\t\treturn fmt.Errorf(\"DeleteAttachments: %v\", err)\n\t}\n\n\tfor i := range rel.Attachments {\n\t\tattachment := rel.Attachments[i]\n\t\tif err := os.RemoveAll(attachment.LocalPath()); err != nil {\n\t\t\tlog.Error(\"Delete attachment %s of release %s failed: %v\", attachment.UUID, rel.ID, err)\n\t\t}\n\t}\n\n\tnotification.NotifyDeleteRelease(doer, rel)\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package redis\n\nimport (\n\t\"errors\"\n\t\"github.com\/blacklightops\/libbeat\/common\"\n\t\"github.com\/blacklightops\/libbeat\/logp\"\n\t\"github.com\/blacklightops\/turnbeat\/inputs\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"net\"\n\t\"time\"\n)\n\ntype RedisInput struct {\n\tConfig inputs.MothershipConfig\n\tHost\tstring\t\/* the host to connect to *\/\n\tPort\tint\t\t\/* the port to connect to *\/\n\tDB\t\tint\t\t\/* the database to read from *\/\n\tKey\t\tstring\t\/* the key to POP from *\/\n\tType\tstring\t\/* the type to add to events *\/\n}\n\nfunc (l *RedisInput) InputType() string {\n\treturn \"RedisInput\"\n}\n\nfunc (l *RedisInput) InputVersion() string {\n\treturn \"0.0.1\"\n}\n\nfunc (l *RedisInput) Init(config inputs.MothershipConfig) error {\n\n\tl.Config = config\n\n\tif config.Host == \"\" {\n\t\treturn errors.New(\"No Input Host specified\")\n\t}\n\tl.Host = config.Host\n\n\tif config.Port == 0 {\n\t\treturn errors.New(\"No Input Port specified\")\n\t}\n\tl.Port = config.Port\n\n\tl.DB = config.DB\n\t\n\tif config.Key == \"\" {\n\t\treturn errors.New(\"No Input Key specified\")\n\t}\n\tl.Key = config.Key\n\n\tif config.Type == \"\" {\n\t\treturn errors.New(\"No Event Type specified\")\n\t}\n\tl.Type = config.Type\n\n\tlogp.Debug(\"redisinput\", \"Using Host %s\", l.Host)\n\tlogp.Debug(\"redisinput\", \"Using Port %d\", l.Port)\n\tlogp.Debug(\"redisinput\", \"Using Database %d\", l.DB)\n\tlogp.Debug(\"redisinput\", \"Using Key %s\", l.Key)\n\tlogp.Debug(\"redisinput\", \"Adding Event Type %s\", l.Type)\n\n\treturn nil\n}\n\nfunc (l *RedisInput) GetConfig() inputs.MothershipConfig {\n\treturn l.Config\n}\n\nfunc (l *RedisInput) Run(output chan common.MapStr) error {\n\tlogp.Info(\"[RedisInput] Running Redis Input\")\n\tredisHostname := fmt.Sprintf(\"%s:%d\", l.Host, l.Port)\n\tserver, err := redis.Dial(\"tcp\", redisHostname)\n\tif err != nil {\n\t\tlogp.Err(\"couldn't start listening: \" + err.Error())\n\t\treturn nil\n\t}\n\tlogp.Info(\"[RedisInput] Connected to Redis Server\")\n\n\t\/\/ dispatch the master listen thread\n\tgo func(server redis.Conn) {\n\t\tvar args []interface{}\n\t\tfor {\n\t\t\texists, err := redis.Bool(server.Do(\"EXISTS\", append(args, l.Key)))\n\t\t\tif err != nil {\n\t\t\t\tlogp.Err(\"An error occured while executing EXISTS command\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif exists != true {\n\t\t\t\tlogp.Err(\"Key %s does not exist!\", l.Key)\n\t\t\t\treturn nil;\n\t\t\t}\n\t\t\thandleConn(server, output)\n\t\t}\n\t}(server)\n\treturn nil\n}\n\nfunc (l *RedisInput) handleConn(client net.Conn, output chan common.MapStr) {\n\tvar offset int64 = 0\n\tvar line uint64 = 0\n\n\tlogp.Debug(\"redisinput\", \"Reading events from %s\", l.Key)\n\n\tnow := func() time.Time {\n\t\tt := time.Now()\n\t\treturn t\n\t}\n\n\tfor {\n\t\targs = []interface{}\n\t\treply, err := server.Do(\"LPOP\", appends(args, l.Key))\n\t\ttext, err := redis.String(reply, err)\n\t\tbytesread += len(text)\n\n\t\tif err != nil {\n\t\t\tlogp.Info(\"Unexpected state reading from %s; error: %s\\n\", l.Key, err)\n\t\t\treturn\n\t\t}\n\n\t\tlogp.Debug(\"redisinputlines\", \"New Line: %s\", &text)\n\n\t\tline++\n\n\t\tevent := common.MapStr{}\n\t\tevent[\"source\"] = l.Key\n\t\tevent[\"offset\"] = offset\n\t\tevent[\"line\"] = line\n\t\tevent[\"message\"] = text\n\t\tevent[\"type\"] = l.Type\n\n\t\tevent.EnsureTimestampField(now)\n\t\tevent.EnsureCountField()\n\n\t\toffset += int64(bytesread)\n\n\t\tlogp.Debug(\"redisinput\", \"InputEvent: %v\", event)\n\t\toutput <- event \/\/ ship the new event downstream\n\t\tclient.Write([]byte(\"OK\"))\n\t}\n\tlogp.Debug(\"redisinput\", \"Finished reading from %s\", l.Key)\n}\n<commit_msg>Fixing errors<commit_after>package redis\n\nimport (\n\t\"errors\"\n\t\"github.com\/blacklightops\/libbeat\/common\"\n\t\"github.com\/blacklightops\/libbeat\/logp\"\n\t\"github.com\/blacklightops\/turnbeat\/inputs\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"net\"\n\t\"fmt\"\n\t\"time\"\n)\n\ntype RedisInput struct {\n\tConfig inputs.MothershipConfig\n\tHost\tstring\t\/* the host to connect to *\/\n\tPort\tint\t\t\/* the port to connect to *\/\n\tDB\t\tint\t\t\/* the database to read from *\/\n\tKey\t\tstring\t\/* the key to POP from *\/\n\tType\tstring\t\/* the type to add to events *\/\n}\n\nfunc (l *RedisInput) InputType() string {\n\treturn \"RedisInput\"\n}\n\nfunc (l *RedisInput) InputVersion() string {\n\treturn \"0.0.1\"\n}\n\nfunc (l *RedisInput) Init(config inputs.MothershipConfig) error {\n\n\tl.Config = config\n\n\tif config.Host == \"\" {\n\t\treturn errors.New(\"No Input Host specified\")\n\t}\n\tl.Host = config.Host\n\n\tif config.Port == 0 {\n\t\treturn errors.New(\"No Input Port specified\")\n\t}\n\tl.Port = config.Port\n\n\tl.DB = config.DB\n\t\n\tif config.Key == \"\" {\n\t\treturn errors.New(\"No Input Key specified\")\n\t}\n\tl.Key = config.Key\n\n\tif config.Type == \"\" {\n\t\treturn errors.New(\"No Event Type specified\")\n\t}\n\tl.Type = config.Type\n\n\tlogp.Debug(\"redisinput\", \"Using Host %s\", l.Host)\n\tlogp.Debug(\"redisinput\", \"Using Port %d\", l.Port)\n\tlogp.Debug(\"redisinput\", \"Using Database %d\", l.DB)\n\tlogp.Debug(\"redisinput\", \"Using Key %s\", l.Key)\n\tlogp.Debug(\"redisinput\", \"Adding Event Type %s\", l.Type)\n\n\treturn nil\n}\n\nfunc (l *RedisInput) GetConfig() inputs.MothershipConfig {\n\treturn l.Config\n}\n\nfunc (l *RedisInput) Run(output chan common.MapStr) error {\n\tlogp.Info(\"[RedisInput] Running Redis Input\")\n\tredisHostname := fmt.Sprintf(\"%s:%d\", l.Host, l.Port)\n\tserver, err := redis.Dial(\"tcp\", redisHostname)\n\tif err != nil {\n\t\tlogp.Err(\"couldn't start listening: \" + err.Error())\n\t\treturn nil\n\t}\n\tlogp.Info(\"[RedisInput] Connected to Redis Server\")\n\n\t\/\/ dispatch the master listen thread\n\tgo func(server redis.Conn) {\n\t\tvar args []interface{}\n\t\tfor {\n\t\t\texists, err := redis.Bool(server.Do(\"EXISTS\", append(args, l.Key)))\n\t\t\tif err != nil {\n\t\t\t\tlogp.Err(\"An error occured while executing EXISTS command\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif exists != true {\n\t\t\t\tlogp.Err(\"Key %s does not exist!\", l.Key)\n\t\t\t\treturn\n\t\t\t}\n\t\t\thandleConn(server, output)\n\t\t}\n\t}(server)\n\treturn nil\n}\n\nfunc (l *RedisInput) handleConn(server redis.Conn, output chan common.MapStr) {\n\tvar offset int64 = 0\n\tvar line uint64 = 0\n\tvar bytesread uint65 = 0\n\tvar args = []interface{}\n\n\tlogp.Debug(\"redisinput\", \"Reading events from %s\", l.Key)\n\n\tnow := func() time.Time {\n\t\tt := time.Now()\n\t\treturn t\n\t}\n\n\tfor {\n\t\targs = []interface{}\n\t\treply, err := server.Do(\"LPOP\", append(args, l.Key))\n\t\ttext, err := redis.String(reply, err)\n\t\tbytesread += len(text)\n\n\t\tif err != nil {\n\t\t\tlogp.Info(\"Unexpected state reading from %s; error: %s\\n\", l.Key, err)\n\t\t\treturn\n\t\t}\n\n\t\tlogp.Debug(\"redisinputlines\", \"New Line: %s\", &text)\n\n\t\tline++\n\n\t\tevent := common.MapStr{}\n\t\tevent[\"source\"] = l.Key\n\t\tevent[\"offset\"] = offset\n\t\tevent[\"line\"] = line\n\t\tevent[\"message\"] = text\n\t\tevent[\"type\"] = l.Type\n\n\t\tevent.EnsureTimestampField(now)\n\t\tevent.EnsureCountField()\n\n\t\toffset += int64(bytesread)\n\n\t\tlogp.Debug(\"redisinput\", \"InputEvent: %v\", event)\n\t\toutput <- event \/\/ ship the new event downstream\n\t\tclient.Write([]byte(\"OK\"))\n\t}\n\tlogp.Debug(\"redisinput\", \"Finished reading from %s\", l.Key)\n}\n<|endoftext|>"} {"text":"<commit_before>package challenge52\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"testing\"\n)\n\nfunc TestCheapestHashEver(t *testing.T) {\n\tinput := []byte(\"happiness is ever present in the sandwich\")\n\thash := CheapestHashEver(input, []byte(\"hi\"))\n\n\tif !bytes.Equal(hash, []byte{203, 212}) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestCheapHash(t *testing.T) {\n\tinput := []byte(\"happiness is ever present in the sandwich\")\n\thash := CheapHash(input, []byte(\"hi\"))\n\n\tif !bytes.Equal(hash, []byte{88, 39, 89}) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestBeefyHash(t *testing.T) {\n\tinput := []byte(\"happiness is ever present in the sandwich\")\n\thash := BeefyHash(input, []byte(\"hi\"))\n\n\tif !bytes.Equal(hash, []byte{203, 212, 88, 39, 89}) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestCheapestCollisionMachine(t *testing.T) {\n\tinitialState := []byte(\"hi\")\n\tcollision := CheapestCollisionMachine(initialState)\n\tif !bytes.Equal(CheapestHashEver(collision.a, initialState), CheapestHashEver(collision.b, initialState)) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestFindCheapestCollisions(t *testing.T) {\n\thashMap := make(map[string][]byte)\n\tcollisions := FindCheapestCollisions(3)\n\tfor _, c := range collisions {\n\t\ths := fmt.Sprintf(\"%0x\", c)\n\t\tif _, found := hashMap[hs]; found {\n\t\t\t\/\/ already in map -- duplicate collision, doesn't count\n\t\t\tt.Fail()\n\t\t} else {\n\t\t\thashMap[hs] = c\n\t\t}\n\t}\n\tif len(collisions) != 8 || len(hashMap) != 8 {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestFindMultiCollision(t *testing.T) {\n\tinitialState := []byte(\"hi\")\n\tcollision := FindMultiCollision()\n\tcheapestHa := CheapestHashEver(collision.a, initialState)\n\tcheapestHb := CheapestHashEver(collision.b, initialState)\n\tcheapHa := CheapHash(collision.a, initialState)\n\tcheapHb := CheapHash(collision.b, initialState)\n\tif bytes.Equal(collision.a, collision.b) || !bytes.Equal(cheapHa, cheapHb) || !bytes.Equal(cheapestHa, cheapestHb) {\n\t\tt.Fail()\n\t}\n\n\t\/\/ and of course the whole point of this:\n\tif !bytes.Equal(BeefyHash(collision.a, initialState), BeefyHash(collision.b, initialState)) {\n\t\tt.Fail()\n\t}\n}\n<commit_msg>Add benchmark<commit_after>package challenge52\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"testing\"\n)\n\nfunc TestCheapestHashEver(t *testing.T) {\n\tinput := []byte(\"happiness is ever present in the sandwich\")\n\thash := CheapestHashEver(input, []byte(\"hi\"))\n\n\tif !bytes.Equal(hash, []byte{203, 212}) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestCheapHash(t *testing.T) {\n\tinput := []byte(\"happiness is ever present in the sandwich\")\n\thash := CheapHash(input, []byte(\"hi\"))\n\n\tif !bytes.Equal(hash, []byte{88, 39, 89}) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestBeefyHash(t *testing.T) {\n\tinput := []byte(\"happiness is ever present in the sandwich\")\n\thash := BeefyHash(input, []byte(\"hi\"))\n\n\tif !bytes.Equal(hash, []byte{203, 212, 88, 39, 89}) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestCheapestCollisionMachine(t *testing.T) {\n\tinitialState := []byte(\"hi\")\n\tcollision := CheapestCollisionMachine(initialState)\n\tif !bytes.Equal(CheapestHashEver(collision.a, initialState), CheapestHashEver(collision.b, initialState)) {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestFindCheapestCollisions(t *testing.T) {\n\thashMap := make(map[string][]byte)\n\tcollisions := FindCheapestCollisions(3)\n\tfor _, c := range collisions {\n\t\ths := fmt.Sprintf(\"%0x\", c)\n\t\tif _, found := hashMap[hs]; found {\n\t\t\t\/\/ already in map -- duplicate collision, doesn't count\n\t\t\tt.Fail()\n\t\t} else {\n\t\t\thashMap[hs] = c\n\t\t}\n\t}\n\tif len(collisions) != 8 || len(hashMap) != 8 {\n\t\tt.Fail()\n\t}\n}\n\nfunc TestFindMultiCollision(t *testing.T) {\n\tinitialState := []byte(\"hi\")\n\tcollision := FindMultiCollision()\n\tcheapestHa := CheapestHashEver(collision.a, initialState)\n\tcheapestHb := CheapestHashEver(collision.b, initialState)\n\tcheapHa := CheapHash(collision.a, initialState)\n\tcheapHb := CheapHash(collision.b, initialState)\n\tif bytes.Equal(collision.a, collision.b) || !bytes.Equal(cheapHa, cheapHb) || !bytes.Equal(cheapestHa, cheapestHb) {\n\t\tt.Fail()\n\t}\n\n\t\/\/ and of course the whole point of this:\n\tif !bytes.Equal(BeefyHash(collision.a, initialState), BeefyHash(collision.b, initialState)) {\n\t\tt.Fail()\n\t}\n}\n\nvar benchmarkResult *Collision\n\nfunc BenchmarkFindMultiCollision(b *testing.B) {\n\tvar collision *Collision\n\tfor i := 0; i < b.N; i++ {\n\t\tcollision = FindMultiCollision()\n\t}\n\n\tbenchmarkResult = collision\n}\n<|endoftext|>"} {"text":"<commit_before>package bytecode\n\nimport (\n\t\"fmt\"\n\t\"github.com\/goby-lang\/goby\/compiler\/ast\"\n)\n\nfunc (g *Generator) compileExpression(is *InstructionSet, exp ast.Expression, scope *scope, table *localTable) {\n\t\/\/ See fsm initialization's comment\n\tif g.fsm.Is(keepExp) {\n\t\tswitch exp := exp.(type) {\n\t\tcase *ast.Constant:\n\t\t\tis.define(GetConstant, exp.Value, fmt.Sprint(exp.IsNamespace))\n\t\tcase *ast.InstanceVariable:\n\t\t\tis.define(GetInstanceVariable, exp.Value)\n\t\tcase *ast.IntegerLiteral:\n\t\t\tis.define(PutObject, fmt.Sprint(exp.Value))\n\t\tcase *ast.StringLiteral:\n\t\t\tis.define(PutString, fmt.Sprintf(\"\\\"%s\\\"\", exp.Value))\n\t\tcase *ast.BooleanExpression:\n\t\t\tis.define(PutObject, fmt.Sprint(exp.Value))\n\t\tcase *ast.NilExpression:\n\t\t\tis.define(PutNull)\n\t\tcase *ast.RangeExpression:\n\t\t\tg.compileExpression(is, exp.Start, scope, table)\n\t\t\tg.compileExpression(is, exp.End, scope, table)\n\t\t\tis.define(NewRange, 0)\n\t\tcase *ast.ArrayExpression:\n\t\t\tfor _, elem := range exp.Elements {\n\t\t\t\tg.compileExpression(is, elem, scope, table)\n\t\t\t}\n\t\t\tis.define(NewArray, len(exp.Elements))\n\t\tcase *ast.HashExpression:\n\t\t\tfor key, value := range exp.Data {\n\t\t\t\tis.define(PutString, fmt.Sprintf(\"\\\"%s\\\"\", key))\n\t\t\t\tg.compileExpression(is, value, scope, table)\n\t\t\t}\n\t\t\tis.define(NewHash, len(exp.Data)*2)\n\t\tcase *ast.SelfExpression:\n\t\t\tis.define(PutSelf)\n\t\tcase *ast.PrefixExpression:\n\t\t\tg.compilePrefixExpression(is, exp, scope, table)\n\t\tcase *ast.InfixExpression:\n\t\t\tg.compileInfixExpression(is, exp, scope, table)\n\t\t}\n\t}\n\n\tswitch exp := exp.(type) {\n\tcase *ast.Identifier:\n\t\tg.compileIdentifier(is, exp, scope, table)\n\tcase *ast.AssignExpression:\n\t\tg.compileAssignExpression(is, exp, scope, table)\n\tcase *ast.IfExpression:\n\t\tg.compileIfExpression(is, exp, scope, table)\n\tcase *ast.YieldExpression:\n\t\tg.compileYieldExpression(is, exp, scope, table)\n\tcase *ast.CallExpression:\n\t\tg.compileCallExpression(is, exp, scope, table)\n\t}\n}\n\nfunc (g *Generator) compileIdentifier(is *InstructionSet, exp *ast.Identifier, scope *scope, table *localTable) {\n\tindex, depth, ok := table.getLCL(exp.Value, table.depth)\n\n\t\/\/ This means it's a local variable.\n\t\/\/ But we only define the instruction when we'll need it.\n\tif ok && g.fsm.Is(keepExp) {\n\t\tis.define(GetLocal, depth, index)\n\t\treturn\n\t}\n\n\t\/\/ otherwise it's a method call\n\tis.define(PutSelf)\n\tis.define(Send, exp.Value, 0)\n}\n\nfunc (g *Generator) compileYieldExpression(is *InstructionSet, exp *ast.YieldExpression, scope *scope, table *localTable) {\n\toldState := g.fsm.Current()\n\tg.fsm.Event(keepExp)\n\n\tis.define(PutSelf)\n\n\tfor _, arg := range exp.Arguments {\n\t\tg.compileExpression(is, arg, scope, table)\n\t}\n\n\tis.define(InvokeBlock, len(exp.Arguments))\n\tg.fsm.Event(oldState)\n}\n\nfunc (g *Generator) compileCallExpression(is *InstructionSet, exp *ast.CallExpression, scope *scope, table *localTable) {\n\toldState := g.fsm.Current()\n\n\t\/\/ We need the receiver expression and argument expressions\n\tg.fsm.Event(keepExp)\n\tg.compileExpression(is, exp.Receiver, scope, table)\n\n\tfor _, arg := range exp.Arguments {\n\t\tg.compileExpression(is, arg, scope, table)\n\t}\n\n\tif exp.Block != nil {\n\t\t\/\/ Inside block should be one level deeper than outside\n\t\tnewTable := newLocalTable(table.depth + 1)\n\t\tnewTable.upper = table\n\t\tblockIndex := g.blockCounter\n\t\tg.blockCounter++\n\t\tg.compileBlockArgExpression(blockIndex, exp, scope, newTable)\n\t\tis.define(Send, exp.Method, len(exp.Arguments), fmt.Sprintf(\"block:%d\", blockIndex))\n\t\treturn\n\t}\n\tis.define(Send, exp.Method, len(exp.Arguments))\n\n\tif exp.Method == \"++\" || exp.Method == \"--\" {\n\t\t\/\/ ++ and -- are methods with side effect and shouldn't return anything\n\t\tis.define(Pop)\n\t}\n\n\tg.fsm.Event(oldState)\n}\n\nfunc (g *Generator) compileAssignExpression(is *InstructionSet, exp *ast.AssignExpression, scope *scope, table *localTable) {\n\toldState := g.fsm.Current()\n\tg.fsm.Event(keepExp)\n\tg.compileExpression(is, exp.Value, scope, table)\n\tg.fsm.Event(oldState)\n\n\tif len(exp.Variables) > 1 {\n\t\tis.define(ExpandArray, len(exp.Variables))\n\t}\n\n\tfor _, v := range exp.Variables {\n\t\tswitch name := v.(type) {\n\t\tcase *ast.Identifier:\n\t\t\tindex, depth := table.setLCL(name.Value, table.depth)\n\n\t\t\tif exp.Optioned != 0 {\n\t\t\t\tis.define(SetLocal, depth, index, exp.Optioned)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tis.define(SetLocal, depth, index)\n\t\tcase *ast.InstanceVariable:\n\t\t\tis.define(SetInstanceVariable, name.Value)\n\t\tcase *ast.Constant:\n\t\t\tis.define(SetConstant, name.Value)\n\t\t}\n\n\t\tif exp.IsStmt {\n\t\t\tis.define(Pop)\n\t\t}\n\t}\n}\n\nfunc (g *Generator) compileBlockArgExpression(index int, exp *ast.CallExpression, scope *scope, table *localTable) {\n\toldState := g.fsm.Current()\n\t\/\/ We don't need any unused expression inside block\n\tg.fsm.Event(removeExp)\n\n\tis := &InstructionSet{}\n\tis.name = fmt.Sprint(index)\n\tis.isType = Block\n\n\tfor i := 0; i < len(exp.BlockArguments); i++ {\n\t\ttable.set(exp.BlockArguments[i].Value)\n\t}\n\n\tg.compileCodeBlock(is, exp.Block, scope, table)\n\tg.endInstructions(is)\n\tg.instructionSets = append(g.instructionSets, is)\n\n\tg.fsm.Event(oldState)\n}\n\nfunc (g *Generator) compileIfExpression(is *InstructionSet, exp *ast.IfExpression, scope *scope, table *localTable) {\n\toldState := g.fsm.Current()\n\n\t\/\/ Compiles condition so we need every expression\n\tg.fsm.Event(keepExp)\n\tg.compileExpression(is, exp.Condition, scope, table)\n\n\tanchor1 := &anchor{}\n\tanchor2 := &anchor{}\n\n\tis.define(BranchUnless, anchor1)\n\n\t\/\/ We don't need unused expression in consequence block\n\tg.fsm.Event(removeExp)\n\tg.compileCodeBlock(is, exp.Consequence, scope, table)\n\tg.fsm.Event(oldState)\n\n\tanchor1.line = is.count\n\n\t\/\/ This and the PutNull bellow is needed when we need the returned result\n\tif g.fsm.Is(keepExp) {\n\t\t\/\/ BranchIf needs move one more line because the we'll add jump into instructions\n\t\tanchor1.line++\n\t\tis.define(Jump, anchor2)\n\t}\n\n\tif exp.Alternative == nil {\n\t\tif g.fsm.Is(keepExp) {\n\t\t\t\/\/ jump over the `putnil` in false case\n\t\t\tanchor2.line = anchor1.line + 1\n\t\t\tis.define(PutNull)\n\t\t}\n\n\t\treturn\n\t}\n\n\t\/\/ We don't need unused expression in alternative block either\n\tg.fsm.Event(removeExp)\n\tg.compileCodeBlock(is, exp.Alternative, scope, table)\n\tg.fsm.Event(oldState)\n\n\tanchor2.line = is.count\n}\n\nfunc (g *Generator) compilePrefixExpression(is *InstructionSet, exp *ast.PrefixExpression, scope *scope, table *localTable) {\n\tswitch exp.Operator {\n\tcase \"!\":\n\t\tg.compileExpression(is, exp.Right, scope, table)\n\t\tis.define(Send, exp.Operator, 0)\n\tcase \"-\":\n\t\tis.define(PutObject, 0)\n\t\tg.compileExpression(is, exp.Right, scope, table)\n\t\tis.define(Send, exp.Operator, 1)\n\t}\n}\n\nfunc (g *Generator) compileInfixExpression(is *InstructionSet, node *ast.InfixExpression, scope *scope, table *localTable) {\n\tg.compileExpression(is, node.Left, scope, table)\n\tg.compileExpression(is, node.Right, scope, table)\n\n\tif node.Operator != \"::\" {\n\t\tis.define(Send, node.Operator, \"1\")\n\t}\n}\n<commit_msg>Returns assignment value in REPL.<commit_after>package bytecode\n\nimport (\n\t\"fmt\"\n\t\"github.com\/goby-lang\/goby\/compiler\/ast\"\n)\n\nfunc (g *Generator) compileExpression(is *InstructionSet, exp ast.Expression, scope *scope, table *localTable) {\n\t\/\/ See fsm initialization's comment\n\tif g.fsm.Is(keepExp) {\n\t\tswitch exp := exp.(type) {\n\t\tcase *ast.Constant:\n\t\t\tis.define(GetConstant, exp.Value, fmt.Sprint(exp.IsNamespace))\n\t\tcase *ast.InstanceVariable:\n\t\t\tis.define(GetInstanceVariable, exp.Value)\n\t\tcase *ast.IntegerLiteral:\n\t\t\tis.define(PutObject, fmt.Sprint(exp.Value))\n\t\tcase *ast.StringLiteral:\n\t\t\tis.define(PutString, fmt.Sprintf(\"\\\"%s\\\"\", exp.Value))\n\t\tcase *ast.BooleanExpression:\n\t\t\tis.define(PutObject, fmt.Sprint(exp.Value))\n\t\tcase *ast.NilExpression:\n\t\t\tis.define(PutNull)\n\t\tcase *ast.RangeExpression:\n\t\t\tg.compileExpression(is, exp.Start, scope, table)\n\t\t\tg.compileExpression(is, exp.End, scope, table)\n\t\t\tis.define(NewRange, 0)\n\t\tcase *ast.ArrayExpression:\n\t\t\tfor _, elem := range exp.Elements {\n\t\t\t\tg.compileExpression(is, elem, scope, table)\n\t\t\t}\n\t\t\tis.define(NewArray, len(exp.Elements))\n\t\tcase *ast.HashExpression:\n\t\t\tfor key, value := range exp.Data {\n\t\t\t\tis.define(PutString, fmt.Sprintf(\"\\\"%s\\\"\", key))\n\t\t\t\tg.compileExpression(is, value, scope, table)\n\t\t\t}\n\t\t\tis.define(NewHash, len(exp.Data)*2)\n\t\tcase *ast.SelfExpression:\n\t\t\tis.define(PutSelf)\n\t\tcase *ast.PrefixExpression:\n\t\t\tg.compilePrefixExpression(is, exp, scope, table)\n\t\tcase *ast.InfixExpression:\n\t\t\tg.compileInfixExpression(is, exp, scope, table)\n\t\t}\n\t}\n\n\tswitch exp := exp.(type) {\n\tcase *ast.Identifier:\n\t\tg.compileIdentifier(is, exp, scope, table)\n\tcase *ast.AssignExpression:\n\t\tg.compileAssignExpression(is, exp, scope, table)\n\tcase *ast.IfExpression:\n\t\tg.compileIfExpression(is, exp, scope, table)\n\tcase *ast.YieldExpression:\n\t\tg.compileYieldExpression(is, exp, scope, table)\n\tcase *ast.CallExpression:\n\t\tg.compileCallExpression(is, exp, scope, table)\n\t}\n}\n\nfunc (g *Generator) compileIdentifier(is *InstructionSet, exp *ast.Identifier, scope *scope, table *localTable) {\n\tindex, depth, ok := table.getLCL(exp.Value, table.depth)\n\n\t\/\/ This means it's a local variable.\n\t\/\/ But we only define the instruction when we'll need it.\n\tif ok && g.fsm.Is(keepExp) {\n\t\tis.define(GetLocal, depth, index)\n\t\treturn\n\t}\n\n\t\/\/ otherwise it's a method call\n\tis.define(PutSelf)\n\tis.define(Send, exp.Value, 0)\n}\n\nfunc (g *Generator) compileYieldExpression(is *InstructionSet, exp *ast.YieldExpression, scope *scope, table *localTable) {\n\toldState := g.fsm.Current()\n\tg.fsm.Event(keepExp)\n\n\tis.define(PutSelf)\n\n\tfor _, arg := range exp.Arguments {\n\t\tg.compileExpression(is, arg, scope, table)\n\t}\n\n\tis.define(InvokeBlock, len(exp.Arguments))\n\tg.fsm.Event(oldState)\n}\n\nfunc (g *Generator) compileCallExpression(is *InstructionSet, exp *ast.CallExpression, scope *scope, table *localTable) {\n\toldState := g.fsm.Current()\n\n\t\/\/ We need the receiver expression and argument expressions\n\tg.fsm.Event(keepExp)\n\tg.compileExpression(is, exp.Receiver, scope, table)\n\n\tfor _, arg := range exp.Arguments {\n\t\tg.compileExpression(is, arg, scope, table)\n\t}\n\n\tif exp.Block != nil {\n\t\t\/\/ Inside block should be one level deeper than outside\n\t\tnewTable := newLocalTable(table.depth + 1)\n\t\tnewTable.upper = table\n\t\tblockIndex := g.blockCounter\n\t\tg.blockCounter++\n\t\tg.compileBlockArgExpression(blockIndex, exp, scope, newTable)\n\t\tis.define(Send, exp.Method, len(exp.Arguments), fmt.Sprintf(\"block:%d\", blockIndex))\n\t\treturn\n\t}\n\tis.define(Send, exp.Method, len(exp.Arguments))\n\n\tif exp.Method == \"++\" || exp.Method == \"--\" {\n\t\t\/\/ ++ and -- are methods with side effect and shouldn't return anything\n\t\tis.define(Pop)\n\t}\n\n\tg.fsm.Event(oldState)\n}\n\nfunc (g *Generator) compileAssignExpression(is *InstructionSet, exp *ast.AssignExpression, scope *scope, table *localTable) {\n\toldState := g.fsm.Current()\n\tg.fsm.Event(keepExp)\n\tg.compileExpression(is, exp.Value, scope, table)\n\tg.fsm.Event(oldState)\n\n\tif len(exp.Variables) > 1 {\n\t\tis.define(ExpandArray, len(exp.Variables))\n\t}\n\n\tfor _, v := range exp.Variables {\n\t\tswitch name := v.(type) {\n\t\tcase *ast.Identifier:\n\t\t\tindex, depth := table.setLCL(name.Value, table.depth)\n\n\t\t\tif exp.Optioned != 0 {\n\t\t\t\tis.define(SetLocal, depth, index, exp.Optioned)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tis.define(SetLocal, depth, index)\n\t\tcase *ast.InstanceVariable:\n\t\t\tis.define(SetInstanceVariable, name.Value)\n\t\tcase *ast.Constant:\n\t\t\tis.define(SetConstant, name.Value)\n\t\t}\n\n\t\tif exp.IsStmt && !g.REPL {\n\t\t\tis.define(Pop)\n\t\t}\n\t}\n}\n\nfunc (g *Generator) compileBlockArgExpression(index int, exp *ast.CallExpression, scope *scope, table *localTable) {\n\toldState := g.fsm.Current()\n\t\/\/ We don't need any unused expression inside block\n\tg.fsm.Event(removeExp)\n\n\tis := &InstructionSet{}\n\tis.name = fmt.Sprint(index)\n\tis.isType = Block\n\n\tfor i := 0; i < len(exp.BlockArguments); i++ {\n\t\ttable.set(exp.BlockArguments[i].Value)\n\t}\n\n\tg.compileCodeBlock(is, exp.Block, scope, table)\n\tg.endInstructions(is)\n\tg.instructionSets = append(g.instructionSets, is)\n\n\tg.fsm.Event(oldState)\n}\n\nfunc (g *Generator) compileIfExpression(is *InstructionSet, exp *ast.IfExpression, scope *scope, table *localTable) {\n\toldState := g.fsm.Current()\n\n\t\/\/ Compiles condition so we need every expression\n\tg.fsm.Event(keepExp)\n\tg.compileExpression(is, exp.Condition, scope, table)\n\n\tanchor1 := &anchor{}\n\tanchor2 := &anchor{}\n\n\tis.define(BranchUnless, anchor1)\n\n\t\/\/ We don't need unused expression in consequence block\n\tg.fsm.Event(removeExp)\n\tg.compileCodeBlock(is, exp.Consequence, scope, table)\n\tg.fsm.Event(oldState)\n\n\tanchor1.line = is.count\n\n\t\/\/ This and the PutNull bellow is needed when we need the returned result\n\tif g.fsm.Is(keepExp) {\n\t\t\/\/ BranchIf needs move one more line because the we'll add jump into instructions\n\t\tanchor1.line++\n\t\tis.define(Jump, anchor2)\n\t}\n\n\tif exp.Alternative == nil {\n\t\tif g.fsm.Is(keepExp) {\n\t\t\t\/\/ jump over the `putnil` in false case\n\t\t\tanchor2.line = anchor1.line + 1\n\t\t\tis.define(PutNull)\n\t\t}\n\n\t\treturn\n\t}\n\n\t\/\/ We don't need unused expression in alternative block either\n\tg.fsm.Event(removeExp)\n\tg.compileCodeBlock(is, exp.Alternative, scope, table)\n\tg.fsm.Event(oldState)\n\n\tanchor2.line = is.count\n}\n\nfunc (g *Generator) compilePrefixExpression(is *InstructionSet, exp *ast.PrefixExpression, scope *scope, table *localTable) {\n\tswitch exp.Operator {\n\tcase \"!\":\n\t\tg.compileExpression(is, exp.Right, scope, table)\n\t\tis.define(Send, exp.Operator, 0)\n\tcase \"-\":\n\t\tis.define(PutObject, 0)\n\t\tg.compileExpression(is, exp.Right, scope, table)\n\t\tis.define(Send, exp.Operator, 1)\n\t}\n}\n\nfunc (g *Generator) compileInfixExpression(is *InstructionSet, node *ast.InfixExpression, scope *scope, table *localTable) {\n\tg.compileExpression(is, node.Left, scope, table)\n\tg.compileExpression(is, node.Right, scope, table)\n\n\tif node.Operator != \"::\" {\n\t\tis.define(Send, node.Operator, \"1\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package directory\n\nimport (\n\t\"gob\"\n\t\"os\"\n\t\"path\"\n\t\"rand\"\n\t\"log\"\n\t\"storage\"\n\t\"strconv\"\n\t\"sync\"\n)\n\nconst (\n\tFileIdSaveInterval = 10000\n)\n\ntype MachineInfo struct {\n\tUrl string \/\/<server name\/ip>[:port]\n\tPublicUrl string\n}\ntype Machine struct {\n\tServer MachineInfo\n\tVolumes []storage.VolumeInfo\n}\n\ntype Mapper struct {\n\tdir string\n\tfileName string\n\n\tlock sync.Mutex\n\tMachines []*Machine\n\tvid2machineId map[uint32]int\n\tWriters []int \/\/ transient array of Writers volume id\n\n\tFileIdSequence uint64\n\tfileIdCounter uint64\n\t\n\tvolumeSizeLimit uint32\n}\n\nfunc NewMachine(server, publicUrl string, volumes []storage.VolumeInfo) *Machine {\n\treturn &Machine{Server: MachineInfo{Url: server, PublicUrl: publicUrl}, Volumes: volumes}\n}\n\nfunc NewMapper(dirname string, filename string, volumeSizeLimit uint32) (m *Mapper) {\n\tm = &Mapper{dir: dirname, fileName: filename}\n\tm.vid2machineId = make(map[uint32]int)\n\tm.volumeSizeLimit = volumeSizeLimit\n\tm.Writers = *new([]int)\n\tm.Machines = *new([]*Machine)\n\n\tseqFile, se := os.OpenFile(path.Join(m.dir, m.fileName+\".seq\"), os.O_RDONLY, 0644)\n\tif se != nil {\n\t\tm.FileIdSequence = FileIdSaveInterval\n\t\tlog.Println(\"Setting file id sequence\", m.FileIdSequence)\n\t} else {\n\t\tdecoder := gob.NewDecoder(seqFile)\n\t\tdefer seqFile.Close()\n\t\tdecoder.Decode(&m.FileIdSequence)\n\t\tlog.Println(\"Loading file id sequence\", m.FileIdSequence, \"=>\", m.FileIdSequence+FileIdSaveInterval)\n\t\t\/\/in case the server stops between intervals\n\t\tm.FileIdSequence += FileIdSaveInterval\n\t}\n\treturn\n}\nfunc (m *Mapper) PickForWrite() (string, MachineInfo, os.Error) {\n len_writers := len(m.Writers)\n if len_writers<=0 {\n log.Println(\"No more writable volumes!\")\n return \"\",m.Machines[rand.Intn(len(m.Machines))].Server, os.NewError(\"No more writable volumes!\")\n }\n\tmachine := m.Machines[m.Writers[rand.Intn(len_writers)]]\n\tvid := machine.Volumes[rand.Intn(len(machine.Volumes))].Id\n\treturn NewFileId(vid, m.NextFileId(), rand.Uint32()).String(), machine.Server,nil\n}\nfunc (m *Mapper) NextFileId() uint64 {\n\tif m.fileIdCounter <= 0 {\n\t\tm.fileIdCounter = FileIdSaveInterval\n\t\tm.FileIdSequence += FileIdSaveInterval\n\t\tm.saveSequence()\n\t}\n\tm.fileIdCounter--\n\treturn m.FileIdSequence - m.fileIdCounter\n}\nfunc (m *Mapper) Get(vid uint32) (*Machine, os.Error) {\n machineId := m.vid2machineId[vid]\n if machineId <=0{\n return nil, os.NewError(\"invalid volume id \" + strconv.Uitob64(uint64(vid),10))\n }\n\treturn m.Machines[machineId-1],nil\n}\nfunc (m *Mapper) Add(machine Machine){\n\t\/\/check existing machine, linearly\n\tm.lock.Lock()\n\tfoundExistingMachineId := -1\n\tfor index, entry := range m.Machines {\n\t\tif machine.Server.Url == entry.Server.Url {\n\t\t\tfoundExistingMachineId = index\n\t\t\tbreak\n\t\t}\n\t}\n\tmachineId := foundExistingMachineId\n\tif machineId < 0 {\n\t\tmachineId = len(m.Machines)\n\t\tm.Machines = append(m.Machines, &machine)\n\t}else{\n\t m.Machines[machineId] = &machine\n\t}\n\tm.lock.Unlock()\n\n\t\/\/add to vid2machineId map, and Writers array\n\tfor _, v := range machine.Volumes {\n\t\t\/\/log.Println(\"Setting volume\", v.Id, \"to\", machine.Server.Url)\n\t\tm.vid2machineId[v.Id] = machineId+1 \/\/use base 1 indexed, to detect not found cases\n\t}\n\t\/\/setting Writers, copy-on-write because of possible updating\n\tvar writers []int\n\tfor machine_index, machine_entry := range m.Machines {\n\t\tfor _, v := range machine_entry.Volumes {\n\t\t\tif v.Size < int64(m.volumeSizeLimit) {\n\t\t\t\twriters = append(writers, machine_index)\n\t\t\t}\n\t\t}\n\t}\n\tm.Writers = writers\n}\nfunc (m *Mapper) saveSequence() {\n\tlog.Println(\"Saving file id sequence\", m.FileIdSequence, \"to\", path.Join(m.dir, m.fileName+\".seq\"))\n\tseqFile, e := os.OpenFile(path.Join(m.dir, m.fileName+\".seq\"), os.O_CREATE|os.O_WRONLY, 0644)\n\tif e != nil {\n\t\tlog.Fatalf(\"Sequence File Save [ERROR] %s\\n\", e)\n\t}\n\tdefer seqFile.Close()\n\tencoder := gob.NewEncoder(seqFile)\n\tencoder.Encode(m.FileIdSequence)\n}\n<commit_msg>fix enforcing volume size limit<commit_after>package directory\n\nimport (\n\t\"gob\"\n\t\"os\"\n\t\"path\"\n\t\"rand\"\n\t\"log\"\n\t\"storage\"\n\t\"strconv\"\n\t\"sync\"\n)\n\nconst (\n\tFileIdSaveInterval = 10000\n)\n\ntype MachineInfo struct {\n\tUrl string \/\/<server name\/ip>[:port]\n\tPublicUrl string\n}\ntype Machine struct {\n\tServer MachineInfo\n\tVolumes []storage.VolumeInfo\n}\n\ntype Mapper struct {\n\tdir string\n\tfileName string\n\n\tlock sync.Mutex\n\tMachines []*Machine\n\tvid2machineId map[uint32]int \/\/machineId is +1 of the index of []*Machine, to detect not found entries\n\tWriters []uint32 \/\/ transient array of Writers volume id\n\n\tFileIdSequence uint64\n\tfileIdCounter uint64\n\n\tvolumeSizeLimit uint32\n}\n\nfunc NewMachine(server, publicUrl string, volumes []storage.VolumeInfo) *Machine {\n\treturn &Machine{Server: MachineInfo{Url: server, PublicUrl: publicUrl}, Volumes: volumes}\n}\n\nfunc NewMapper(dirname string, filename string, volumeSizeLimit uint32) (m *Mapper) {\n\tm = &Mapper{dir: dirname, fileName: filename}\n\tm.vid2machineId = make(map[uint32]int)\n\tm.volumeSizeLimit = volumeSizeLimit\n\tm.Writers = *new([]uint32)\n\tm.Machines = *new([]*Machine)\n\n\tseqFile, se := os.OpenFile(path.Join(m.dir, m.fileName+\".seq\"), os.O_RDONLY, 0644)\n\tif se != nil {\n\t\tm.FileIdSequence = FileIdSaveInterval\n\t\tlog.Println(\"Setting file id sequence\", m.FileIdSequence)\n\t} else {\n\t\tdecoder := gob.NewDecoder(seqFile)\n\t\tdefer seqFile.Close()\n\t\tdecoder.Decode(&m.FileIdSequence)\n\t\tlog.Println(\"Loading file id sequence\", m.FileIdSequence, \"=>\", m.FileIdSequence+FileIdSaveInterval)\n\t\t\/\/in case the server stops between intervals\n\t\tm.FileIdSequence += FileIdSaveInterval\n\t}\n\treturn\n}\nfunc (m *Mapper) PickForWrite() (string, MachineInfo, os.Error) {\n\tlen_writers := len(m.Writers)\n\tif len_writers <= 0 {\n\t\tlog.Println(\"No more writable volumes!\")\n\t\treturn \"\", m.Machines[rand.Intn(len(m.Machines))].Server, os.NewError(\"No more writable volumes!\")\n\t}\n\tvid := m.Writers[rand.Intn(len_writers)]\n\tmachine_id := m.vid2machineId[vid]\n\tif machine_id > 0 {\n\t\tmachine := m.Machines[machine_id-1]\n\t\treturn NewFileId(vid, m.NextFileId(), rand.Uint32()).String(), machine.Server, nil\n\t}\n\treturn \"\", m.Machines[rand.Intn(len(m.Machines))].Server, os.NewError(\"Strangely vid \" + strconv.Uitoa64(uint64(vid)) + \" is on no machine!\")\n}\nfunc (m *Mapper) NextFileId() uint64 {\n\tif m.fileIdCounter <= 0 {\n\t\tm.fileIdCounter = FileIdSaveInterval\n\t\tm.FileIdSequence += FileIdSaveInterval\n\t\tm.saveSequence()\n\t}\n\tm.fileIdCounter--\n\treturn m.FileIdSequence - m.fileIdCounter\n}\nfunc (m *Mapper) Get(vid uint32) (*Machine, os.Error) {\n\tmachineId := m.vid2machineId[vid]\n\tif machineId <= 0 {\n\t\treturn nil, os.NewError(\"invalid volume id \" + strconv.Uitob64(uint64(vid), 10))\n\t}\n\treturn m.Machines[machineId-1], nil\n}\nfunc (m *Mapper) Add(machine Machine) {\n\t\/\/check existing machine, linearly\n\tm.lock.Lock()\n\tfoundExistingMachineId := -1\n\tfor index, entry := range m.Machines {\n\t\tif machine.Server.Url == entry.Server.Url {\n\t\t\tfoundExistingMachineId = index\n\t\t\tbreak\n\t\t}\n\t}\n\tmachineId := foundExistingMachineId\n\tif machineId < 0 {\n\t\tmachineId = len(m.Machines)\n\t\tm.Machines = append(m.Machines, &machine)\n\t} else {\n\t\tm.Machines[machineId] = &machine\n\t}\n\tm.lock.Unlock()\n\n\t\/\/add to vid2machineId map, and Writers array\n\tfor _, v := range machine.Volumes {\n\t\t\/\/log.Println(\"Setting volume\", v.Id, \"to\", machine.Server.Url)\n\t\tm.vid2machineId[v.Id] = machineId + 1 \/\/use base 1 indexed, to detect not found cases\n\t}\n\t\/\/setting Writers, copy-on-write because of possible updating\n\tvar writers []uint32\n\tfor _, machine_entry := range m.Machines {\n\t\tfor _, v := range machine_entry.Volumes {\n\t\t\tif v.Size < int64(m.volumeSizeLimit) {\n\t\t\t\twriters = append(writers, v.Id)\n\t\t\t}\n\t\t}\n\t}\n\tm.Writers = writers\n}\nfunc (m *Mapper) saveSequence() {\n\tlog.Println(\"Saving file id sequence\", m.FileIdSequence, \"to\", path.Join(m.dir, m.fileName+\".seq\"))\n\tseqFile, e := os.OpenFile(path.Join(m.dir, m.fileName+\".seq\"), os.O_CREATE|os.O_WRONLY, 0644)\n\tif e != nil {\n\t\tlog.Fatalf(\"Sequence File Save [ERROR] %s\\n\", e)\n\t}\n\tdefer seqFile.Close()\n\tencoder := gob.NewEncoder(seqFile)\n\tencoder.Encode(m.FileIdSequence)\n}\n<|endoftext|>"} {"text":"<commit_before>package ogdatv21\n\nimport (\n\t\"encoding\/csv\"\n\t\"fmt\"\n\t\"github.com\/the42\/ogdat\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"unicode\/utf8\"\n)\n\ntype ISO6392Lang struct {\n\tCode, Identifier string\n}\n\nconst iso639file = \"ISO-639-2_utf-8.txt\"\nconst schema_langauge = \"ger\"\nconst schema_characterset = \"utf8\"\n\nvar isolangfilemap map[string]*ISO6392Lang\n\nfunc loadisolanguagefile(filename string) (isolangfilemap map[string]*ISO6392Lang) {\n\treader, err := os.Open(iso639file)\n\n\tif err == nil {\n\t\tdefer reader.Close()\n\t\tisolangfilemap = make(map[string]*ISO6392Lang)\n\t\tcsvreader := csv.NewReader(reader)\n\t\tcsvreader.Comma = '|'\n\n\t\tfor record, err := csvreader.Read(); err != io.EOF; record, err = csvreader.Read() {\n\t\t\tisorecord := &ISO6392Lang{Code: record[0], Identifier: record[3]}\n\t\t\tisolangfilemap[record[0]] = isorecord\n\t\t\tif len(record[1]) > 0 {\n\t\t\t\tisorecord = &ISO6392Lang{Code: record[1], Identifier: record[3]}\n\t\t\t\tisolangfilemap[record[1]] = isorecord\n\t\t\t}\n\t\t}\n\t\tlog.Printf(\"Info: Read %d ISO language records\", len(isolangfilemap))\n\t} else {\n\t\tlog.Printf(\"Warning: Can not read ISO language records\")\n\t}\n\treturn\n}\n\nfunc min(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nvar regexphtmlcodecheck = regexp.MustCompile(`<\\w+.*('|\"|)>`)\nvar regexphtmlescape = regexp.MustCompile(`&\\w{1,10};|&#\\d{1,6};`)\nvar regexpurlencode = regexp.MustCompile(`%[0-9a-fA-F][0-9a-fA-F]`)\nvar regexpposixescape = regexp.MustCompile(`\\\\n|\\\\b|\\\\v|\\\\t`)\n\n\/\/ return values are:\n\/\/ status: <> 0 indicates sthg. was wrong, \/\/ 1 = Info, 2 = Warning, 3 = Error\n\/\/ position > -1: position of offending input in string, only set if status <> 0\n\/\/ message: clear text of reason why the input string failes to be a correct OGD string\nfunc CheckOGDTextStringForSaneCharacters(str string) (status, position int, message string) {\n\tif !utf8.ValidString(str) {\n\t\treturn 3, -1, \"Zeichenfolge ist nicht als UTF8 kodiert\"\n\t}\n\tif idx := regexphtmlcodecheck.FindIndex([]byte(str)); idx != nil {\n\t\treturn 2, idx[0], fmt.Sprintf(\"Mögliche HTML-Sequenz: '%s'\", str[idx[0]:min(20, idx[1]-idx[0])])\n\t}\n\tif idx := regexphtmlescape.FindIndex([]byte(str)); idx != nil {\n\t\treturn 2, idx[0], fmt.Sprintf(\"Mögliche HTML-Escapes: '%s'\", str[idx[0]:min(15, idx[1]-idx[0])])\n\t}\n\tif idx := regexpurlencode.FindIndex([]byte(str)); idx != nil {\n\t\treturn 2, idx[0], fmt.Sprintf(\"Mögliche Url-Escapes: '%s'\", str[idx[0]:min(8, idx[1]-idx[0])])\n\t}\n\tif idx := regexpposixescape.FindIndex([]byte(str)); idx != nil {\n\t\treturn 2, idx[0], fmt.Sprintf(\"Mögliche Posix-Escapes: '%s'\", str[idx[0]:min(5, idx[1]-idx[0])])\n\t}\n\treturn\n}\n\nfunc (md *MetaData) Check() (message []ogdat.CheckMessage, err error) {\n\tconst pflichtfeldfehlt = \"Pflichtfeld nicht gesetzt\"\n\n\togdset := ogdat.GetOGDSetForVersion(Version)\n\tif ogdset == nil {\n\t\treturn nil, fmt.Errorf(\"Beschreibung für OGD Version %s ist nicht vorhanden, check kann nicht durchgeführt werden\", Version)\n\t}\n\nnextbeschreibung:\n\tfor _, elm := range ogdset.Beschreibung {\n\t\tif elm.IsRequired() && elm.Anzahl != \"N\" { \/\/ check required fields for their existence. However, if the cardinality is defined\n\t\t\t\/\/ as 'N', it may be ok that the field is not present, in which case we check explicitely and issue a warning\n\t\t\tielements := reflect.TypeOf(md).Elem().NumField()\n\t\t\tfor i := 0; i < ielements; i++ {\n\t\t\t\tf := reflect.TypeOf(md).Elem().Field(i)\n\t\t\t\tif ogdat.GetIDFromMetaDataStructField(f) == elm.ID && ogdat.MetaDataStructFieldIsNil(f) {\n\t\t\t\t\tmessage = append(message, ogdat.CheckMessage{Type: 3, OGDID: ogdat.GetIDFromMetaDataStructField(f), Text: pflichtfeldfehlt})\n\t\t\t\t\tbreak nextbeschreibung \/\/ required field is not present - nothing more to check\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tswitch elm.OGD_Kurzname {\n\t\tcase \"metadata_identifier\":\n\t\t\tif md.Extras.Metadata_Identifier.UUID == nil {\n\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\tType: 3,\n\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\tText: fmt.Sprintf(\"Feldwert vom Typ UUID erwartet, Wert ist aber keine UUID: '%s'\", md.Extras.Metadata_Identifier.Raw)})\n\t\t\t}\n\t\tcase \"metadata_modified\":\n\t\t\tif md.Extras.Metadata_Modified.Format != CustomTimeSpecifier2 {\n\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\tType: 3,\n\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\tText: fmt.Sprintf(\"Feldwert vom Typ ÖNORM ISO 8601 YYYY-MM-DD erwartet, Wert entspricht aber nicht diesem Typ: '%s'\", md.Extras.Metadata_Modified.Raw)})\n\t\t\t}\n\t\tcase \"title\":\n\t\t\tif err, idx, msg := CheckOGDTextStringForSaneCharacters(*md.Title); err > 0 {\n\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\tType: err,\n\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\tText: fmt.Sprintf(\"Zeichenfolge enthält potentiell ungeeignete Zeichen ab Position %d: %s\", idx, msg)})\n\t\t\t}\n\t\tcase \"description\":\n\t\t\tif err, idx, msg := CheckOGDTextStringForSaneCharacters(*md.Description); err > 0 {\n\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\tType: err,\n\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\tText: fmt.Sprintf(\"Zeichenfolge enthält potentiell ungeeignete Zeichen ab Position %d: %s\", idx, msg)})\n\t\t\t}\n\t\tcase \"categorization\":\n\t\t\tif cat := md.Extras.Categorization; cat == nil {\n\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\tType: 2,\n\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\tText: \"Kategorisierung darf zwar mit Karidnalität 'N' auftreten, jedoch sollte zumindest eine Zuordnung getroffen werden\"})\n\n\t\t\t} else {\n\t\t\t\tfor _, element := range cat {\n\t\t\t\t\tif element.NumID == -1 {\n\t\t\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\t\t\tType: 3,\n\t\t\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\t\t\tText: fmt.Sprintf(\"Die Kategorie '%s' ist keine normierte OGD-Kategorie\", element.ID)})\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc init() {\n\tisolangfilemap = loadisolanguagefile(iso639file)\n}\n<commit_msg>extend Check<commit_after>package ogdatv21\n\nimport (\n\t\"encoding\/csv\"\n\t\"fmt\"\n\t\"github.com\/the42\/ogdat\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"unicode\/utf8\"\n)\n\ntype ISO6392Lang struct {\n\tCode, Identifier string\n}\n\nconst iso639file = \"ISO-639-2_utf-8.txt\"\nconst schema_langauge = \"ger\"\nconst schema_characterset = \"utf8\"\n\nvar isolangfilemap map[string]*ISO6392Lang\n\nfunc loadisolanguagefile(filename string) (isolangfilemap map[string]*ISO6392Lang) {\n\treader, err := os.Open(iso639file)\n\n\tif err == nil {\n\t\tdefer reader.Close()\n\t\tisolangfilemap = make(map[string]*ISO6392Lang)\n\t\tcsvreader := csv.NewReader(reader)\n\t\tcsvreader.Comma = '|'\n\n\t\tfor record, err := csvreader.Read(); err != io.EOF; record, err = csvreader.Read() {\n\t\t\tisorecord := &ISO6392Lang{Code: record[0], Identifier: record[3]}\n\t\t\tisolangfilemap[record[0]] = isorecord\n\t\t\tif len(record[1]) > 0 {\n\t\t\t\tisorecord = &ISO6392Lang{Code: record[1], Identifier: record[3]}\n\t\t\t\tisolangfilemap[record[1]] = isorecord\n\t\t\t}\n\t\t}\n\t\tlog.Printf(\"Info: Read %d ISO language records\", len(isolangfilemap))\n\t} else {\n\t\tlog.Printf(\"Warning: Can not read ISO language records\")\n\t}\n\treturn\n}\n\nfunc min(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nvar regexphtmlcodecheck = regexp.MustCompile(`<\\w+.*('|\"|)>`)\nvar regexphtmlescape = regexp.MustCompile(`&\\w{1,10};|&#\\d{1,6};`)\nvar regexpurlencode = regexp.MustCompile(`%[0-9a-fA-F][0-9a-fA-F]`)\nvar regexpposixescape = regexp.MustCompile(`\\\\n|\\\\b|\\\\v|\\\\t`)\nvar regexpbboxWKT = regexp.MustCompile(`POLYGON\\s{0,1}\\({1,2}\\s{0,2}[-+]?[0-9]*\\.?[0-9]+\\s{1,2}[-+]?[0-9]*\\.?[0-9]+,\\s{0,2}[-+]?[0-9]*\\.?[0-9]+\\s{1,2}[-+]?[0-9]*\\.?[0-9]+\\s{0,2}\\){1,2}`)\n\n\/\/ return values are:\n\/\/ status: <> 0 indicates sthg. was wrong, \/\/ 1 = Info, 2 = Warning, 3 = Error\n\/\/ position > -1: position of offending input in string, only set if status <> 0\n\/\/ message: clear text of reason why the input string failes to be a correct OGD string\nfunc CheckOGDTextStringForSaneCharacters(str string) (status, position int, message string) {\n\tif !utf8.ValidString(str) {\n\t\treturn 3, -1, \"Zeichenfolge ist nicht durchgängig gültig als UTF8 kodiert\"\n\t}\n\tif idx := regexphtmlcodecheck.FindIndex([]byte(str)); idx != nil {\n\t\treturn 2, idx[0], fmt.Sprintf(\"Mögliche HTML-Sequenz: '%s'\", str[idx[0]:min(20, idx[1]-idx[0])])\n\t}\n\tif idx := regexphtmlescape.FindIndex([]byte(str)); idx != nil {\n\t\treturn 2, idx[0], fmt.Sprintf(\"Mögliche HTML-Escapes: '%s'\", str[idx[0]:min(15, idx[1]-idx[0])])\n\t}\n\tif idx := regexpurlencode.FindIndex([]byte(str)); idx != nil {\n\t\treturn 2, idx[0], fmt.Sprintf(\"Mögliche Url-Escapes: '%s'\", str[idx[0]:min(8, idx[1]-idx[0])])\n\t}\n\tif idx := regexpposixescape.FindIndex([]byte(str)); idx != nil {\n\t\treturn 2, idx[0], fmt.Sprintf(\"Mögliche Posix-Escapes: '%s'\", str[idx[0]:min(5, idx[1]-idx[0])])\n\t}\n\treturn\n}\n\nfunc CheckOGDBBox(str string) (bool, error) {\n\tif !utf8.ValidString(str) {\n\t\treturn false, fmt.Errorf(\"Zeichenfolge ist nicht durchgängig gültig als UTF8 kodiert\")\n\t}\n\tif idx := regexpbboxWKT.FindIndex([]byte(str)); idx == nil {\n\t\treturn false, fmt.Errorf(\"Keine gültige WKT-Angabe einer BoundingBox\")\n\t}\n\treturn true, nil\n}\n\nfunc (md *MetaData) Check() (message []ogdat.CheckMessage, err error) {\n\tconst pflichtfeldfehlt = \"Pflichtfeld nicht gesetzt\"\n\n\togdset := ogdat.GetOGDSetForVersion(Version)\n\tif ogdset == nil {\n\t\treturn nil, fmt.Errorf(\"Beschreibung für OGD Version %s ist nicht vorhanden, check kann nicht durchgeführt werden\", Version)\n\t}\n\nnextbeschreibung:\n\tfor _, elm := range ogdset.Beschreibung {\n\n\t\t\/\/ check required fields for their presence. However, if the\n\t\t\/\/ cardinality on a required fiels is defined as 'N', it may be ok\n\t\t\/\/ that the field is not present, in which case we check\n\t\t\/\/ later explicitely and issue a warning\n\t\tif elm.IsRequired() && elm.Anzahl != \"N\" {\n\t\t\tielements := reflect.TypeOf(md).Elem().NumField()\n\t\t\tfor i := 0; i < ielements; i++ {\n\t\t\t\tf := reflect.TypeOf(md).Elem().Field(i)\n\t\t\t\tif ogdat.GetIDFromMetaDataStructField(f) == elm.ID && ogdat.MetaDataStructFieldIsNil(f) {\n\t\t\t\t\tmessage = append(message, ogdat.CheckMessage{Type: 3, OGDID: ogdat.GetIDFromMetaDataStructField(f), Text: pflichtfeldfehlt})\n\t\t\t\t\tbreak nextbeschreibung \/\/ required field is not present - nothing more to check\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tswitch elm.OGD_Kurzname {\n\t\tcase \"metadata_identifier\":\n\t\t\tif md.Extras.Metadata_Identifier.UUID == nil {\n\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\tType: 3,\n\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\tText: fmt.Sprintf(\"Feldwert vom Typ UUID erwartet, Wert ist aber keine UUID: '%s'\", md.Extras.Metadata_Identifier.Raw)})\n\t\t\t}\n\t\tcase \"metadata_modified\":\n\t\t\tif md.Extras.Metadata_Modified.Format != CustomTimeSpecifier2 {\n\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\tType: 3,\n\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\tText: fmt.Sprintf(\"Feldwert vom Typ ÖNORM ISO 8601 'YYYY-MM-DD' erwartet, Wert entspricht aber nicht diesem Typ: '%s'\", md.Extras.Metadata_Modified.Raw)})\n\t\t\t}\n\t\tcase \"title\":\n\t\t\tif err, idx, msg := CheckOGDTextStringForSaneCharacters(*md.Title); err > 0 {\n\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\tType: err,\n\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\tText: fmt.Sprintf(\"Zeichenfolge enthält potentiell ungeeignete Zeichen ab Position %d: '%s'\", idx, msg)})\n\t\t\t}\n\t\tcase \"description\":\n\t\t\tif err, idx, msg := CheckOGDTextStringForSaneCharacters(*md.Description); err > 0 {\n\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\tType: err,\n\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\tText: fmt.Sprintf(\"Zeichenfolge enthält potentiell ungeeignete Zeichen ab Position %d: '%s'\", idx, msg)})\n\t\t\t}\n\t\tcase \"categorization\":\n\t\t\tif cat := md.Extras.Categorization; cat == nil {\n\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\tType: 2,\n\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\tText: \"Die Kategorisierung darf zwar mit Kardinalität 'N' optional auftreten, jedoch sollte zumindest eine Zuordnung getroffen werden\"})\n\n\t\t\t} else {\n\t\t\t\tfor _, element := range cat {\n\t\t\t\t\tif element.NumID == -1 {\n\t\t\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\t\t\tType: 3,\n\t\t\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\t\t\tText: fmt.Sprintf(\"Die Kategorie '%s' ist keine normierte OGD-Kategorie\", element.ID)})\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"keywords\":\n\t\t\tif keywords := md.Schlagworte; keywords == nil {\n\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\tType: 2,\n\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\tText: \"Schlagworte dürfen zwar mit Kardinalität 'N' optional auftreten, die Angabe von Schlagworten wäre aber wünschenswert\"})\n\n\t\t\t}\n\t\tcase \"maintainer\":\n\t\t\tif err, idx, msg := CheckOGDTextStringForSaneCharacters(*md.Maintainer); err > 0 {\n\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\tType: err,\n\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\tText: fmt.Sprintf(\"Zeichenfolge enthält potentiell ungeeignete Zeichen ab Position %d: '%s'\", idx, msg)})\n\t\t\t}\n\t\tcase \"license\":\n\t\t\tif err, idx, msg := CheckOGDTextStringForSaneCharacters(*md.License); err > 0 {\n\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\tType: err,\n\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\tText: fmt.Sprintf(\"Zeichenfolge enthält potentiell ungeeignete Zeichen ab Position %d: '%s'\", idx, msg)})\n\t\t\t}\n\t\tcase \"begin_datetime\":\n\t\t\tif md.Extras.Begin_DateTime.Format != CustomTimeSpecifier1 {\n\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\tType: 3,\n\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\tText: fmt.Sprintf(\"Feldwert vom Typ ÖNORM ISO 8601 TM_Primitive 'YYYY-MM-DDThh:mm:ss' erwartet, Wert entspricht aber nicht diesem Typ: '%s'\", md.Extras.Begin_DateTime.Raw)})\n\t\t\t}\n\t\t\t\/\/ ###################### OPTIONALE FELDER ######################\n\t\tcase \"schema_name\":\n\t\t\tif schemaname := md.Extras.Schema_Name; schemaname != nil {\n\t\t\t\tif err, idx, msg := CheckOGDTextStringForSaneCharacters(*schemaname); err > 0 {\n\t\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\t\tType: err,\n\t\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\t\tText: fmt.Sprintf(\"Zeichenfolge enthält potentiell ungeeignete Zeichen ab Position %d: '%s'\", idx, msg)})\n\t\t\t\t}\n\t\t\t\tconst ogdschemaspec = \"OGD Austria Metadata 2.0\"\n\t\t\t\tif *schemaname != ogdschemaspec {\n\t\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\t\tType: 1,\n\t\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\t\tText: fmt.Sprintf(\"Schemabezeichnung als '%s' erwartet, der Wert ist aber '%s'\", ogdschemaspec, *schemaname)})\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"schema_language\":\n\t\t\tif lang := md.Extras.Schema_Language; lang != nil {\n\t\t\t\tconst ogdschemalanguage = \"ger\"\n\t\t\t\tif *lang != ogdschemalanguage {\n\t\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\t\tType: 3,\n\t\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\t\tText: fmt.Sprintf(\"Schemasprache als '%s' erwartet, der Wert ist aber '%s'\", ogdschemalanguage, *lang)})\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"schema_characterset\":\n\t\t\tif charset := md.Extras.Schema_Characterset; charset != nil {\n\t\t\t\tconst ogdschemacharacterset = \"utf8\"\n\t\t\t\tif *charset != ogdschemacharacterset {\n\t\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\t\tType: 3,\n\t\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\t\tText: fmt.Sprintf(\"Characterset des Schemas als '%s' erwartet, der Wert ist aber '%s'\", ogdschemacharacterset, *charset)})\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"metadata_linkage\":\n\t\t\tfor _, element := range md.Extras.Metadata_Linkage {\n\t\t\t\tif element.URL == nil {\n\t\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\t\tType: 3,\n\t\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\t\tText: fmt.Sprintf(\"Gültigen Verweis (Link) erwartet, der Wert '%s' stellt keinen gültigen Link dar\", element.Raw)})\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"attribute_description\":\n\t\t\tif desc := md.Extras.Attribute_Description; desc != nil {\n\t\t\t\tconst ogddesclen = 20\n\t\t\t\tif i := len(*desc); i < ogddesclen {\n\t\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\t\tType: 2,\n\t\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\t\tText: fmt.Sprintf(\"Beschreibung enthält weniger als %d Zeichen\", i)})\n\n\t\t\t\t}\n\t\t\t\tif err, idx, msg := CheckOGDTextStringForSaneCharacters(*desc); err > 0 {\n\t\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\t\tType: err,\n\t\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\t\tText: fmt.Sprintf(\"Zeichenfolge enthält potentiell ungeeignete Zeichen ab Position %d: '%s'\", idx, msg)})\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"maintainer_link\":\n\t\t\tif link := md.Extras.Maintainer_Link; link != nil {\n\t\t\t\tif link.URL == nil {\n\t\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\t\tType: 3,\n\t\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\t\tText: fmt.Sprintf(\"Gültigen Verweis (Link) erwartet, der Wert '%s' stellt keinen gültigen Link dar\", link.Raw)})\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"publisher\":\n\t\t\tif publisher := md.Extras.Publisher; publisher != nil {\n\t\t\t\tif err, idx, msg := CheckOGDTextStringForSaneCharacters(*publisher); err > 0 {\n\t\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\t\tType: err,\n\t\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\t\tText: fmt.Sprintf(\"Zeichenfolge enthält potentiell ungeeignete Zeichen ab Position %d: '%s'\", idx, msg)})\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"geographic_toponym\":\n\t\t\tif toponym := md.Extras.Geographich_Toponym; toponym != nil {\n\t\t\t\tif err, idx, msg := CheckOGDTextStringForSaneCharacters(*toponym); err > 0 {\n\t\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\t\tType: err,\n\t\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\t\tText: fmt.Sprintf(\"Zeichenfolge enthält potentiell ungeeignete Zeichen ab Position %d: '%s'\", idx, msg)})\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"geographic_bbox\":\n\t\t\tif bbox := md.Extras.Geographic_BBox; bbox != nil {\n\t\t\t\tif ok, err := CheckOGDBBox(*bbox); !ok {\n\t\t\t\t\tmessage = append(message, ogdat.CheckMessage{\n\t\t\t\t\t\tType: 3,\n\t\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\t\tText: fmt.Sprintf(\"Zeichenfolge enthält keinen gültigen WKT für die örtliche Begrenzung (Boundingbox): '%s'\", err)})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n\n\t\/*\n\t\t \t\tcase \"resource_url\":\n\t\t\t if keywords := md.Resources.Url; keywords == nil {\n\t\t\t message = append(message, ogdat.CheckMessage{\n\t\t\t\t\t\tType: 2,\n\t\t\t\t\t\tOGDID: elm.ID,\n\t\t\t\t\t\tText: \"Schlagworte dürfen zwar mit Karidnalität 'N' optional auftreten, die Angabe von Schlagworten wäre aber wünschenswert\"})\n\t*\/\n}\n\nfunc init() {\n\tisolangfilemap = loadisolanguagefile(iso639file)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage internal\n\nimport (\n\t\"path\"\n\t\"time\"\n\n\t\"golang.org\/x\/mod\/module\"\n\t\"golang.org\/x\/pkgsite\/internal\/licenses\"\n\t\"golang.org\/x\/pkgsite\/internal\/source\"\n\t\"golang.org\/x\/pkgsite\/internal\/stdlib\"\n\t\"golang.org\/x\/pkgsite\/internal\/version\"\n)\n\nconst (\n\t\/\/ LatestVersion signifies the latest available version in requests to the\n\t\/\/ proxy client.\n\tLatestVersion = \"latest\"\n\n\t\/\/ MasterVersion signifies the version at master.\n\tMasterVersion = \"master\"\n\n\t\/\/ UnknownModulePath signifies that the module path for a given package\n\t\/\/ path is ambiguous or not known. This is because requests to the\n\t\/\/ frontend can come in the form of <import-path>[@<version>], and it is\n\t\/\/ not clear which part of the import-path is the module path.\n\tUnknownModulePath = \"unknownModulePath\"\n)\n\n\/\/ ModuleInfo holds metadata associated with a module.\ntype ModuleInfo struct {\n\tModulePath string\n\tVersion string\n\tCommitTime time.Time\n\tReadmeFilePath string\n\tReadmeContents string\n\tVersionType version.Type\n\tIsRedistributable bool\n\tHasGoMod bool \/\/ whether the module zip has a go.mod file\n\tSourceInfo *source.Info\n}\n\n\/\/ VersionMap holds metadata associated with module queries for a version.\ntype VersionMap struct {\n\tModulePath string\n\tRequestedVersion string\n\tResolvedVersion string\n\tGoModPath string\n\tStatus int\n\tError string\n}\n\n\/\/ SeriesPath returns the series path for the module.\n\/\/\n\/\/ A series is a group of modules that share the same base path and are assumed\n\/\/ to be major-version variants.\n\/\/\n\/\/ The series path is the module path without the version. For most modules,\n\/\/ this will be the module path for all module versions with major version 0 or\n\/\/ 1. For gopkg.in modules, the series path does not correspond to any module\n\/\/ version.\n\/\/\n\/\/ Examples:\n\/\/ The module paths \"a\/b\" and \"a\/b\/v2\" both have series path \"a\/b\".\n\/\/ The module paths \"gopkg.in\/yaml.v1\" and \"gopkg.in\/yaml.v2\" both have series path \"gopkg.in\/yaml\".\nfunc (v *ModuleInfo) SeriesPath() string {\n\treturn SeriesPathForModule(v.ModulePath)\n}\n\n\/\/ SeriesPathForModule returns the series path for the provided modulePath.\nfunc SeriesPathForModule(modulePath string) string {\n\tseriesPath, _, _ := module.SplitPathVersion(modulePath)\n\treturn seriesPath\n}\n\n\/\/ V1Path returns the path for version 1 of the package whose path\n\/\/ is modulePath + \"\/\" + suffix. If modulePath is the standard\n\/\/ library, then V1Path returns suffix.\nfunc V1Path(modulePath, suffix string) string {\n\tif modulePath == stdlib.ModulePath {\n\t\treturn suffix\n\t}\n\treturn path.Join(SeriesPathForModule(modulePath), suffix)\n}\n\n\/\/ A Module is a specific, reproducible build of a module.\ntype Module struct {\n\tModuleInfo\n\tPackages []*Package\n\t\/\/ Licenses holds all licenses within this module version, including those\n\t\/\/ that may be contained in nested subdirectories.\n\tLicenses []*licenses.License\n\n\tDirectories []*DirectoryNew\n}\n\n\/\/ A Package is a group of one or more Go source files with the same package\n\/\/ header. Packages are part of a module.\ntype Package struct {\n\tPath string\n\tName string\n\tSynopsis string\n\tIsRedistributable bool\n\tLicenses []*licenses.Metadata \/\/ metadata of applicable version licenses\n\tImports []string\n\tDocumentationHTML string\n\t\/\/ The values of the GOOS and GOARCH environment variables used to parse the package.\n\tGOOS string\n\tGOARCH string\n\n\t\/\/ V1Path is the package path of a package with major version 1 in a given series.\n\tV1Path string\n}\n\n\/\/ VersionedPackage is a Package along with its corresponding module\n\/\/ information.\ntype VersionedPackage struct {\n\tPackage\n\tModuleInfo\n}\n\n\/\/ Directory represents a folder in a module version, and all of the packages\n\/\/ inside that folder.\ntype Directory struct {\n\tModuleInfo\n\tPath string\n\tPackages []*Package\n}\n\n\/\/ VersionedDirectory is a DirectoryNew along with its corresponding module\n\/\/ information.\ntype VersionedDirectory struct {\n\tDirectoryNew\n\tModuleInfo\n}\n\n\/\/ DirectoryNew is a folder in a module version, and all of the packages\n\/\/ inside that folder. It will replace Directory once everything has been migrated.\ntype DirectoryNew struct {\n\tPath string\n\tV1Path string\n\tIsRedistributable bool\n\tLicenses []*licenses.Metadata \/\/ metadata of applicable version licenses\n\tReadme *Readme\n\tPackage *PackageNew\n}\n\n\/\/ PackageNew is a group of one or more Go source files with the same package\n\/\/ header. A PackageNew is part of a directory.\n\/\/ It will replace Package once everything has been migrated.\ntype PackageNew struct {\n\tName string\n\tPath string\n\tDocumentation *Documentation\n\tImports []string\n}\n\n\/\/ VersionedPackageNew is a PackageNew along with associated module information.\ntype VersionedPackageNew struct {\n\tPackageNew\n\tModuleInfo\n}\n\n\/\/ Documentation is the rendered documentation for a given package\n\/\/ for a specific GOOS and GOARCH.\ntype Documentation struct {\n\t\/\/ The values of the GOOS and GOARCH environment variables used to parse the package.\n\tGOOS string\n\tGOARCH string\n\tSynopsis string\n\tHTML string\n}\n\n\/\/ Readme is a README at a given directory.\ntype Readme struct {\n\tFilepath string\n\tContents string\n}\n\n\/\/ IndexVersion holds the version information returned by the module index.\ntype IndexVersion struct {\n\tPath string\n\tVersion string\n\tTimestamp time.Time\n}\n\n\/\/ ModuleVersionState holds a worker module version state.\ntype ModuleVersionState struct {\n\tModulePath string\n\tVersion string\n\n\t\/\/ IndexTimestamp is the timestamp received from the Index for this version,\n\t\/\/ which should correspond to the time this version was committed to the\n\t\/\/ Index.\n\tIndexTimestamp time.Time\n\t\/\/ CreatedAt is the time this version was originally inserted into the\n\t\/\/ module version state table.\n\tCreatedAt time.Time\n\n\t\/\/ Status is the most recent HTTP status code received from the Fetch service\n\t\/\/ for this version, or nil if no request to the fetch service has been made.\n\tStatus int\n\t\/\/ Error is the most recent HTTP response body received from the Fetch\n\t\/\/ service, for a response with an unsuccessful status code. It is used for\n\t\/\/ debugging only, and has no semantic significance.\n\tError string\n\t\/\/ TryCount is the number of times a fetch of this version has been\n\t\/\/ attempted.\n\tTryCount int\n\t\/\/ LastProcessedAt is the last time this version was updated with a result\n\t\/\/ from the fetch service.\n\tLastProcessedAt *time.Time\n\t\/\/ NextProcessedAfter is the next time a fetch for this version should be\n\t\/\/ attempted.\n\tNextProcessedAfter time.Time\n\n\t\/\/ AppVersion is the value of the GAE_VERSION environment variable, which is\n\t\/\/ set by app engine. It is a timestamp in the format 20190709t112655 that\n\t\/\/ is close to, but not the same as, the deployment time. For example, the\n\t\/\/ deployment time for the above timestamp might be Jul 9, 2019, 11:29:59 AM.\n\tAppVersion string\n\n\t\/\/ GoModPath is the path declared in the go.mod file.\n\tGoModPath string\n\n\t\/\/ NumPackages it the number of packages that were processed as part of the\n\t\/\/ module (regardless of whether the processing was successful).\n\tNumPackages *int\n}\n\n\/\/ PackageVersionState holds a worker package version state. It is associated\n\/\/ with a given module version state.\ntype PackageVersionState struct {\n\tPackagePath string\n\tModulePath string\n\tVersion string\n\tStatus int\n\tError string\n}\n\n\/\/ SearchResult represents a single search result from SearchDocuments.\ntype SearchResult struct {\n\tName string\n\tPackagePath string\n\tModulePath string\n\tVersion string\n\tSynopsis string\n\tLicenses []string\n\n\tCommitTime time.Time\n\t\/\/ Score is used to sort items in an array of SearchResult.\n\tScore float64\n\n\t\/\/ NumImportedBy is the number of packages that import Package.\n\tNumImportedBy uint64\n\n\t\/\/ NumResults is the total number of packages that were returned for this search.\n\tNumResults uint64\n\t\/\/ Approximate reports whether NumResults is an approximate count. NumResults\n\t\/\/ can be approximate if search scanned only a subset of documents, and\n\t\/\/ result count is estimated using the hyperloglog algorithm.\n\tApproximate bool\n}\n\n\/\/ A FieldSet is a bit set of struct fields. It is used to avoid reading large\n\/\/ struct fields from the data store. FieldSet is also the type of the\n\/\/ individual bit values. (Think of them as singleton sets.)\n\/\/\n\/\/ NoFields (the zero value) is the empty set. AllFields is the set containing\n\/\/ every field.\n\/\/\n\/\/ FieldSet bits are unique across the entire project, because some types are\n\/\/ concatenations (via embedding) of others. For example, a VersionedPackage\n\/\/ contains the fields of both a ModuleInfo and a Package, so we can't use the\n\/\/ same bits for both ModuleInfo's ReadmeContents field and Package's\n\/\/ DocumentationHTML field.\ntype FieldSet int64\n\n\/\/ MinimalFields is the empty FieldSet.\nconst MinimalFields FieldSet = 0\n\n\/\/ AllFields is the FieldSet that contains all fields.\nconst AllFields FieldSet = -1\n\n\/\/ StringFieldMissing is the value for string fields that are not present\n\/\/ in a struct. We use it to distinguish a (possibly valid) empty string\n\/\/ from a field that was never populated.\nconst StringFieldMissing = \"!MISSING\"\n\n\/\/ FieldSet bits for fields that can be conditionally read from the data store.\nconst (\n\tWithReadmeContents FieldSet = 1 << iota\n\tWithDocumentationHTML\n)\n<commit_msg>internal: remove unused VersionedPackageNew<commit_after>\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage internal\n\nimport (\n\t\"path\"\n\t\"time\"\n\n\t\"golang.org\/x\/mod\/module\"\n\t\"golang.org\/x\/pkgsite\/internal\/licenses\"\n\t\"golang.org\/x\/pkgsite\/internal\/source\"\n\t\"golang.org\/x\/pkgsite\/internal\/stdlib\"\n\t\"golang.org\/x\/pkgsite\/internal\/version\"\n)\n\nconst (\n\t\/\/ LatestVersion signifies the latest available version in requests to the\n\t\/\/ proxy client.\n\tLatestVersion = \"latest\"\n\n\t\/\/ MasterVersion signifies the version at master.\n\tMasterVersion = \"master\"\n\n\t\/\/ UnknownModulePath signifies that the module path for a given package\n\t\/\/ path is ambiguous or not known. This is because requests to the\n\t\/\/ frontend can come in the form of <import-path>[@<version>], and it is\n\t\/\/ not clear which part of the import-path is the module path.\n\tUnknownModulePath = \"unknownModulePath\"\n)\n\n\/\/ ModuleInfo holds metadata associated with a module.\ntype ModuleInfo struct {\n\tModulePath string\n\tVersion string\n\tCommitTime time.Time\n\tReadmeFilePath string\n\tReadmeContents string\n\tVersionType version.Type\n\tIsRedistributable bool\n\tHasGoMod bool \/\/ whether the module zip has a go.mod file\n\tSourceInfo *source.Info\n}\n\n\/\/ VersionMap holds metadata associated with module queries for a version.\ntype VersionMap struct {\n\tModulePath string\n\tRequestedVersion string\n\tResolvedVersion string\n\tGoModPath string\n\tStatus int\n\tError string\n}\n\n\/\/ SeriesPath returns the series path for the module.\n\/\/\n\/\/ A series is a group of modules that share the same base path and are assumed\n\/\/ to be major-version variants.\n\/\/\n\/\/ The series path is the module path without the version. For most modules,\n\/\/ this will be the module path for all module versions with major version 0 or\n\/\/ 1. For gopkg.in modules, the series path does not correspond to any module\n\/\/ version.\n\/\/\n\/\/ Examples:\n\/\/ The module paths \"a\/b\" and \"a\/b\/v2\" both have series path \"a\/b\".\n\/\/ The module paths \"gopkg.in\/yaml.v1\" and \"gopkg.in\/yaml.v2\" both have series path \"gopkg.in\/yaml\".\nfunc (v *ModuleInfo) SeriesPath() string {\n\treturn SeriesPathForModule(v.ModulePath)\n}\n\n\/\/ SeriesPathForModule returns the series path for the provided modulePath.\nfunc SeriesPathForModule(modulePath string) string {\n\tseriesPath, _, _ := module.SplitPathVersion(modulePath)\n\treturn seriesPath\n}\n\n\/\/ V1Path returns the path for version 1 of the package whose path\n\/\/ is modulePath + \"\/\" + suffix. If modulePath is the standard\n\/\/ library, then V1Path returns suffix.\nfunc V1Path(modulePath, suffix string) string {\n\tif modulePath == stdlib.ModulePath {\n\t\treturn suffix\n\t}\n\treturn path.Join(SeriesPathForModule(modulePath), suffix)\n}\n\n\/\/ A Module is a specific, reproducible build of a module.\ntype Module struct {\n\tModuleInfo\n\tPackages []*Package\n\t\/\/ Licenses holds all licenses within this module version, including those\n\t\/\/ that may be contained in nested subdirectories.\n\tLicenses []*licenses.License\n\n\tDirectories []*DirectoryNew\n}\n\n\/\/ A Package is a group of one or more Go source files with the same package\n\/\/ header. Packages are part of a module.\ntype Package struct {\n\tPath string\n\tName string\n\tSynopsis string\n\tIsRedistributable bool\n\tLicenses []*licenses.Metadata \/\/ metadata of applicable version licenses\n\tImports []string\n\tDocumentationHTML string\n\t\/\/ The values of the GOOS and GOARCH environment variables used to parse the package.\n\tGOOS string\n\tGOARCH string\n\n\t\/\/ V1Path is the package path of a package with major version 1 in a given series.\n\tV1Path string\n}\n\n\/\/ VersionedPackage is a Package along with its corresponding module\n\/\/ information.\ntype VersionedPackage struct {\n\tPackage\n\tModuleInfo\n}\n\n\/\/ Directory represents a folder in a module version, and all of the packages\n\/\/ inside that folder.\ntype Directory struct {\n\tModuleInfo\n\tPath string\n\tPackages []*Package\n}\n\n\/\/ VersionedDirectory is a DirectoryNew along with its corresponding module\n\/\/ information.\ntype VersionedDirectory struct {\n\tDirectoryNew\n\tModuleInfo\n}\n\n\/\/ DirectoryNew is a folder in a module version, and all of the packages\n\/\/ inside that folder. It will replace Directory once everything has been migrated.\ntype DirectoryNew struct {\n\tPath string\n\tV1Path string\n\tIsRedistributable bool\n\tLicenses []*licenses.Metadata \/\/ metadata of applicable version licenses\n\tReadme *Readme\n\tPackage *PackageNew\n}\n\n\/\/ PackageNew is a group of one or more Go source files with the same package\n\/\/ header. A PackageNew is part of a directory.\n\/\/ It will replace Package once everything has been migrated.\ntype PackageNew struct {\n\tName string\n\tPath string\n\tDocumentation *Documentation\n\tImports []string\n}\n\n\/\/ Documentation is the rendered documentation for a given package\n\/\/ for a specific GOOS and GOARCH.\ntype Documentation struct {\n\t\/\/ The values of the GOOS and GOARCH environment variables used to parse the package.\n\tGOOS string\n\tGOARCH string\n\tSynopsis string\n\tHTML string\n}\n\n\/\/ Readme is a README at a given directory.\ntype Readme struct {\n\tFilepath string\n\tContents string\n}\n\n\/\/ IndexVersion holds the version information returned by the module index.\ntype IndexVersion struct {\n\tPath string\n\tVersion string\n\tTimestamp time.Time\n}\n\n\/\/ ModuleVersionState holds a worker module version state.\ntype ModuleVersionState struct {\n\tModulePath string\n\tVersion string\n\n\t\/\/ IndexTimestamp is the timestamp received from the Index for this version,\n\t\/\/ which should correspond to the time this version was committed to the\n\t\/\/ Index.\n\tIndexTimestamp time.Time\n\t\/\/ CreatedAt is the time this version was originally inserted into the\n\t\/\/ module version state table.\n\tCreatedAt time.Time\n\n\t\/\/ Status is the most recent HTTP status code received from the Fetch service\n\t\/\/ for this version, or nil if no request to the fetch service has been made.\n\tStatus int\n\t\/\/ Error is the most recent HTTP response body received from the Fetch\n\t\/\/ service, for a response with an unsuccessful status code. It is used for\n\t\/\/ debugging only, and has no semantic significance.\n\tError string\n\t\/\/ TryCount is the number of times a fetch of this version has been\n\t\/\/ attempted.\n\tTryCount int\n\t\/\/ LastProcessedAt is the last time this version was updated with a result\n\t\/\/ from the fetch service.\n\tLastProcessedAt *time.Time\n\t\/\/ NextProcessedAfter is the next time a fetch for this version should be\n\t\/\/ attempted.\n\tNextProcessedAfter time.Time\n\n\t\/\/ AppVersion is the value of the GAE_VERSION environment variable, which is\n\t\/\/ set by app engine. It is a timestamp in the format 20190709t112655 that\n\t\/\/ is close to, but not the same as, the deployment time. For example, the\n\t\/\/ deployment time for the above timestamp might be Jul 9, 2019, 11:29:59 AM.\n\tAppVersion string\n\n\t\/\/ GoModPath is the path declared in the go.mod file.\n\tGoModPath string\n\n\t\/\/ NumPackages it the number of packages that were processed as part of the\n\t\/\/ module (regardless of whether the processing was successful).\n\tNumPackages *int\n}\n\n\/\/ PackageVersionState holds a worker package version state. It is associated\n\/\/ with a given module version state.\ntype PackageVersionState struct {\n\tPackagePath string\n\tModulePath string\n\tVersion string\n\tStatus int\n\tError string\n}\n\n\/\/ SearchResult represents a single search result from SearchDocuments.\ntype SearchResult struct {\n\tName string\n\tPackagePath string\n\tModulePath string\n\tVersion string\n\tSynopsis string\n\tLicenses []string\n\n\tCommitTime time.Time\n\t\/\/ Score is used to sort items in an array of SearchResult.\n\tScore float64\n\n\t\/\/ NumImportedBy is the number of packages that import Package.\n\tNumImportedBy uint64\n\n\t\/\/ NumResults is the total number of packages that were returned for this search.\n\tNumResults uint64\n\t\/\/ Approximate reports whether NumResults is an approximate count. NumResults\n\t\/\/ can be approximate if search scanned only a subset of documents, and\n\t\/\/ result count is estimated using the hyperloglog algorithm.\n\tApproximate bool\n}\n\n\/\/ A FieldSet is a bit set of struct fields. It is used to avoid reading large\n\/\/ struct fields from the data store. FieldSet is also the type of the\n\/\/ individual bit values. (Think of them as singleton sets.)\n\/\/\n\/\/ NoFields (the zero value) is the empty set. AllFields is the set containing\n\/\/ every field.\n\/\/\n\/\/ FieldSet bits are unique across the entire project, because some types are\n\/\/ concatenations (via embedding) of others. For example, a VersionedPackage\n\/\/ contains the fields of both a ModuleInfo and a Package, so we can't use the\n\/\/ same bits for both ModuleInfo's ReadmeContents field and Package's\n\/\/ DocumentationHTML field.\ntype FieldSet int64\n\n\/\/ MinimalFields is the empty FieldSet.\nconst MinimalFields FieldSet = 0\n\n\/\/ AllFields is the FieldSet that contains all fields.\nconst AllFields FieldSet = -1\n\n\/\/ StringFieldMissing is the value for string fields that are not present\n\/\/ in a struct. We use it to distinguish a (possibly valid) empty string\n\/\/ from a field that was never populated.\nconst StringFieldMissing = \"!MISSING\"\n\n\/\/ FieldSet bits for fields that can be conditionally read from the data store.\nconst (\n\tWithReadmeContents FieldSet = 1 << iota\n\tWithDocumentationHTML\n)\n<|endoftext|>"} {"text":"<commit_before>package keys\n\nimport \"fmt\"\n\n\/\/ Kind expresses usage of the ambient internal key.\ntype Kind int\n\nconst (\n\t\/\/ Delete represents deletion of this key.\n\tDelete = 0\n\t\/\/ Value represents value setting of this key.\n\tValue = 1\n\tmaxKind = Value\n\n\t\/\/ Seek is maximum(Value, Delete), which is a valid Kind and\n\t\/\/ serves as termination point for keys with same sequence.\n\tSeek = maxKind\n)\n\nfunc (k Kind) String() string {\n\tswitch k {\n\tcase Delete:\n\t\treturn \"value deletion\"\n\tcase Value:\n\t\treturn \"value setting\"\n\t}\n\treturn fmt.Sprintf(\"unknown kind: %d\", k)\n}\n<commit_msg>Fix document for keys.Kind Seek<commit_after>package keys\n\nimport \"fmt\"\n\n\/\/ Kind expresses usage of the ambient internal key.\ntype Kind int\n\nconst (\n\t\/\/ Delete represents deletion of this key.\n\tDelete = 0\n\t\/\/ Value represents value setting of this key.\n\tValue = 1\n\tmaxKind = Value\n\n\t\/\/ Seek is maximum(Value, Delete), which is a valid Kind and\n\t\/\/ serves as start point for keys with same sequence.\n\t\/\/\n\t\/\/ See InternalComparator.Compare for ordering among internal keys.\n\tSeek = maxKind\n)\n\nfunc (k Kind) String() string {\n\tswitch k {\n\tcase Delete:\n\t\treturn \"value deletion\"\n\tcase Value:\n\t\treturn \"value setting\"\n\t}\n\treturn fmt.Sprintf(\"unknown kind: %d\", k)\n}\n<|endoftext|>"} {"text":"<commit_before>package util\n\nimport (\n\t\"go\/ast\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"github.com\/gedex\/inflector\"\n\t\"github.com\/serenize\/snaker\"\n)\n\nfunc GetFuncsMap() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"snakecase\": snaker.CamelToSnake,\n\t\t\"plural\": inflector.Pluralize,\n\t\t\"unexport\": strings.ToLower,\n\t\t\"exported\": ast.IsExported,\n\t\t\"primitive\": Primitive,\n\t\t\"numerical\": Numerical,\n\t\t\"lower\": strings.ToLower,\n\t\t\"upper\": strings.ToTitle,\n\t\t\"zeroValue\": ZeroValue,\n\t}\n}\n\nfunc Plural(s string) string {\n\treturn inflector.Pluralize(s)\n}\n\nfunc Snakecase(s string) string {\n\treturn snaker.CamelToSnake(s)\n}\n\nfunc GetPath(path string) string {\n\treturn strings.TrimSuffix(path, string(os.PathSeparator))\n}\n\nfunc GetImportPath(path string) string {\n\treturn GetPath(strings.Replace(path, os.Getenv(\"GOPATH\")+string(os.PathSeparator)+\"src\"+string(os.PathSeparator), \"\", 1))\n}\n\nfunc GetFullPath(path string, subpath string) string {\n\treturn path + string(os.PathSeparator) + strings.Replace(subpath, \"\/\", string(os.PathSeparator), -1)\n}\n\nfunc Mkdir(name string) error {\n\treturn os.MkdirAll(name, 0777)\n}\n\nfunc SaveFile(p string, data []byte) error {\n\treturn ioutil.WriteFile(p, data, 0666)\n}\n\nfunc Primitive(a string) bool {\n\tif a == \"uint\" || a == \"uint8\" || a == \"uint16\" || a == \"uint32\" || a == \"uint64\" ||\n\t\ta == \"int\" || a == \"int8\" || a == \"int16\" || a == \"int32\" || a == \"int64\" ||\n\t\ta == \"float32\" || a == \"float64\" ||\n\t\ta == \"complex32\" || a == \"complex64\" ||\n\t\ta == \"byte\" || a == \"[]byte\" ||\n\t\ta == \"time.Time\" ||\n\t\ta == \"rune\" ||\n\t\ta == \"bool\" ||\n\t\ta == \"string\" {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc ZeroValue(a string) string {\n\tif a == \"uint\" || a == \"uint8\" || a == \"uint16\" || a == \"uint32\" || a == \"uint64\" ||\n\t\ta == \"int\" || a == \"int8\" || a == \"int16\" || a == \"int32\" || a == \"int64\" ||\n\t\ta == \"float32\" || a == \"float64\" ||\n\t\ta == \"complex32\" || a == \"complex64\" {\n\t\treturn \"0\"\n\t}\n\tif a == \"byte\" || a == \"[]byte\" || a == \"*time.Time\" {\n\t\treturn \"nil\"\n\t}\n\tif a == \"rune\" {\n\t\treturn \"''\"\n\t}\n\tif a == \"bool\" {\n\t\treturn \"f\"\n\t}\n\tif a == \"string\" {\n\t\treturn \"\\\"\\\"\"\n\t}\n\treturn \"\"\n}\n\nfunc Numerical(a string) bool {\n\tif a == \"uint\" || a == \"uint8\" || a == \"uint16\" || a == \"uint32\" || a == \"uint64\" ||\n\t\ta == \"int\" || a == \"int8\" || a == \"int16\" || a == \"int32\" || a == \"int64\" ||\n\t\ta == \"time.Time\" ||\n\t\ta == \"float32\" || a == \"float64\" ||\n\t\ta == \"complex32\" || a == \"complex64\" {\n\t\treturn true\n\t}\n\treturn false\n}\n<commit_msg>Changed templates funcs<commit_after>package util\n\nimport (\n\t\"go\/ast\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/template\"\n\n\t\"github.com\/gedex\/inflector\"\n\t\"github.com\/serenize\/snaker\"\n)\n\nfunc GetFuncsMap() template.FuncMap {\n\treturn template.FuncMap{\n\t\t\"plural\": inflector.Pluralize,\n\t\t\"unexport\": strings.ToLower,\n\t\t\"exported\": ast.IsExported,\n\t\t\"primitive\": Primitive,\n\t\t\"numerical\": Numerical,\n\t\t\"lower\": strings.ToLower,\n\t\t\"upper\": Upper,\n\t\t\"zeroValue\": ZeroValue,\n\t}\n}\n\nfunc Plural(s string) string {\n\treturn inflector.Pluralize(s)\n}\n\nfunc Upper(s string) string {\n\treturn strings.ToUpper(string(s[0])) + s[1:]\n}\n\nfunc Snakecase(s string) string {\n\treturn snaker.CamelToSnake(s)\n}\n\nfunc GetPath(path string) string {\n\treturn strings.TrimSuffix(path, string(os.PathSeparator))\n}\n\nfunc GetImportPath(path string) string {\n\treturn GetPath(strings.Replace(path, os.Getenv(\"GOPATH\")+string(os.PathSeparator)+\"src\"+string(os.PathSeparator), \"\", 1))\n}\n\nfunc GetFullPath(path string, subpath string) string {\n\treturn path + string(os.PathSeparator) + strings.Replace(subpath, \"\/\", string(os.PathSeparator), -1)\n}\n\nfunc Mkdir(name string) error {\n\treturn os.MkdirAll(name, 0777)\n}\n\nfunc SaveFile(p string, data []byte) error {\n\treturn ioutil.WriteFile(p, data, 0666)\n}\n\nfunc Primitive(a string) bool {\n\tif a == \"uint\" || a == \"uint8\" || a == \"uint16\" || a == \"uint32\" || a == \"uint64\" ||\n\t\ta == \"int\" || a == \"int8\" || a == \"int16\" || a == \"int32\" || a == \"int64\" ||\n\t\ta == \"float32\" || a == \"float64\" ||\n\t\ta == \"complex32\" || a == \"complex64\" ||\n\t\ta == \"byte\" || a == \"[]byte\" ||\n\t\ta == \"time.Time\" ||\n\t\ta == \"rune\" ||\n\t\ta == \"bool\" ||\n\t\ta == \"string\" {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc ZeroValue(a string) string {\n\tif a == \"uint\" || a == \"uint8\" || a == \"uint16\" || a == \"uint32\" || a == \"uint64\" ||\n\t\ta == \"int\" || a == \"int8\" || a == \"int16\" || a == \"int32\" || a == \"int64\" ||\n\t\ta == \"float32\" || a == \"float64\" ||\n\t\ta == \"complex32\" || a == \"complex64\" {\n\t\treturn \"0\"\n\t}\n\tif a == \"byte\" || a == \"[]byte\" || a == \"*time.Time\" {\n\t\treturn \"nil\"\n\t}\n\tif a == \"rune\" {\n\t\treturn \"''\"\n\t}\n\tif a == \"bool\" {\n\t\treturn \"f\"\n\t}\n\tif a == \"string\" {\n\t\treturn \"\\\"\\\"\"\n\t}\n\treturn \"\"\n}\n\nfunc Numerical(a string) bool {\n\tif a == \"uint\" || a == \"uint8\" || a == \"uint16\" || a == \"uint32\" || a == \"uint64\" ||\n\t\ta == \"int\" || a == \"int8\" || a == \"int16\" || a == \"int32\" || a == \"int64\" ||\n\t\ta == \"time.Time\" ||\n\t\ta == \"float32\" || a == \"float64\" ||\n\t\ta == \"complex32\" || a == \"complex64\" {\n\t\treturn true\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package expr\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/grafana\/metrictank\/api\/models\"\n\tschema \"gopkg.in\/raintank\/schema.v1\"\n)\n\ntype FuncAsPercent struct {\n\tin GraphiteFunc\n\ttotalFloat float64\n\ttotalSeries GraphiteFunc\n\tnodes []expr\n}\n\nfunc NewAsPercent() GraphiteFunc {\n\treturn &FuncAsPercent{totalFloat: math.NaN()}\n}\n\nfunc (s *FuncAsPercent) Signature() ([]Arg, []Arg) {\n\treturn []Arg{\n\t\tArgSeriesList{val: &s.in},\n\t\tArgIn{\n\t\t\tkey: \"total\",\n\t\t\topt: true,\n\t\t\targs: []Arg{\n\t\t\t\tArgFloat{val: &s.totalFloat},\n\t\t\t\tArgSeriesList{val: &s.totalSeries},\n\t\t\t},\n\t\t},\n\t\tArgStringsOrInts{val: &s.nodes, opt: true, key: \"nodes\"},\n\t}, []Arg{ArgSeriesList{}}\n}\n\nfunc (s *FuncAsPercent) Context(context Context) Context {\n\treturn context\n}\n\nfunc (s *FuncAsPercent) Exec(cache map[Req][]models.Series) ([]models.Series, error) {\n\tseries, err := s.in.Exec(cache)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar outSeries []models.Series\n\tvar totals []models.Series\n\tif s.totalSeries != nil {\n\t\ttotals, err = s.totalSeries.Exec(cache)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif s.nodes != nil {\n\t\tif !math.IsNaN(s.totalFloat) {\n\t\t\treturn nil, errors.New(\"total must be None or a seriesList\")\n\t\t}\n\t\toutSeries, err = s.execWithNodes(series, totals, cache)\n\t} else {\n\t\tif totals != nil && len(totals) != 1 && len(totals) != len(series) {\n\t\t\treturn nil, errors.New(\"asPercent second argument (total) must be missing, a single digit, reference exactly 1 series or reference the same number of series as the first argument\")\n\t\t}\n\t\toutSeries, err = s.execWithoutNodes(series, totals, cache)\n\t}\n\treturn outSeries, err\n}\n\nfunc (s *FuncAsPercent) execWithNodes(series, totals []models.Series, cache map[Req][]models.Series) ([]models.Series, error) {\n\tvar outSeries []models.Series\n\t\/\/ Set of keys\n\tkeys := make(map[string]struct{})\n\t\/\/ Series grouped by key\n\tmetaSeries := groupSeriesByKey(series, s.nodes, &keys)\n\t\/\/ The totals series for each key\n\tvar totalSeries map[string]models.Series\n\n\t\/\/ calculate the sum\n\tif math.IsNaN(s.totalFloat) && totals == nil {\n\t\ttotalSeries = getTotalSeries(metaSeries, cache)\n\t\t\/\/ calculate sum of totals series\n\t} else if totals != nil {\n\t\ttotalSeriesLists := groupSeriesByKey(totals, s.nodes, &keys)\n\t\ttotalSeries = getTotalSeries(totalSeriesLists, cache)\n\t}\n\n\tvar nones []schema.Point\n\n\tfor key := range keys {\n\t\t\/\/ No input series for a corresponding total series\n\t\tif _, ok := metaSeries[key]; !ok {\n\t\t\tnonesSerie := totalSeries[key]\n\t\t\tnonesSerie.QueryPatt = fmt.Sprintf(\"asPercent(MISSING,%s)\", totalSeries[key].QueryPatt)\n\t\t\tnonesSerie.Target = fmt.Sprintf(\"asPercent(MISSING,%s)\", totalSeries[key].Target)\n\t\t\tnonesSerie.Tags = map[string]string{\"name\": nonesSerie.Target}\n\n\t\t\tif nones == nil {\n\t\t\t\tnones := pointSlicePool.Get().([]schema.Point)\n\t\t\t\tfor _, p := range totalSeries[key].Datapoints {\n\t\t\t\t\tp.Val = math.NaN()\n\t\t\t\t\tnones = append(nones, p)\n\t\t\t\t}\n\t\t\t\tcache[Req{}] = append(cache[Req{}], nonesSerie)\n\t\t\t}\n\n\t\t\tnonesSerie.Datapoints = nones\n\t\t\toutSeries = append(outSeries, nonesSerie)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, serie1 := range metaSeries[key] {\n\t\t\t\/\/ No total series for a corresponding input series\n\t\t\tif _, ok := totalSeries[key]; !ok {\n\t\t\t\tnonesSerie := serie1\n\t\t\t\tnonesSerie.QueryPatt = fmt.Sprintf(\"asPercent(%s,MISSING)\", serie1.QueryPatt)\n\t\t\t\tnonesSerie.Target = fmt.Sprintf(\"asPercent(%s,MISSING)\", serie1.Target)\n\t\t\t\tnonesSerie.Tags = map[string]string{\"name\": nonesSerie.Target}\n\n\t\t\t\tif nones == nil {\n\t\t\t\t\tnones := pointSlicePool.Get().([]schema.Point)\n\t\t\t\t\tfor _, p := range serie1.Datapoints {\n\t\t\t\t\t\tp.Val = math.NaN()\n\t\t\t\t\t\tnones = append(nones, p)\n\t\t\t\t\t}\n\t\t\t\t\tcache[Req{}] = append(cache[Req{}], nonesSerie)\n\t\t\t\t}\n\n\t\t\t\tnonesSerie.Datapoints = nones\n\t\t\t\toutSeries = append(outSeries, nonesSerie)\n\t\t\t} else {\n\t\t\t\t\/\/ key found in both metaSeries and totalSeries\n\t\t\t\tserie1 = serie1.Copy(pointSlicePool.Get().([]schema.Point))\n\t\t\t\tserie2 := totalSeries[key]\n\t\t\t\tserie1.QueryPatt = fmt.Sprintf(\"asPercent(%s,%s)\", serie1.QueryPatt, serie2.QueryPatt)\n\t\t\t\tserie1.Target = fmt.Sprintf(\"asPercent(%s,%s)\", serie1.Target, serie2.Target)\n\t\t\t\tserie1.Tags = map[string]string{\"name\": serie1.Target}\n\t\t\t\tfor i := range serie1.Datapoints {\n\t\t\t\t\tserie1.Datapoints[i].Val = computeAsPercent(serie1.Datapoints[i].Val, serie2.Datapoints[i].Val)\n\t\t\t\t}\n\t\t\t\toutSeries = append(outSeries, serie1)\n\t\t\t\tcache[Req{}] = append(cache[Req{}], serie1)\n\t\t\t}\n\n\t\t}\n\t}\n\treturn outSeries, nil\n}\n\nfunc (s *FuncAsPercent) execWithoutNodes(series, totals []models.Series, cache map[Req][]models.Series) ([]models.Series, error) {\n\tvar outSeries []models.Series\n\tvar totalsSerie models.Series\n\tif math.IsNaN(s.totalFloat) && totals == nil {\n\t\ttotalsSerie = sumSeries(series, cache)\n\t\tif len(series) == 1 {\n\t\t\ttotalsSerie.Target = fmt.Sprintf(\"sumSeries(%s)\", totalsSerie.QueryPatt)\n\t\t\ttotalsSerie.QueryPatt = fmt.Sprintf(\"sumSeries(%s)\", totalsSerie.QueryPatt)\n\t\t\ttotalsSerie.Tags = map[string]string{\"name\": totalsSerie.Target}\n\t\t}\n\t} else if totals != nil {\n\t\tif len(totals) == 1 {\n\t\t\ttotalsSerie = totals[0]\n\t\t} else if len(totals) == len(series) {\n\t\t\t\/\/ Sorted to match the input series with the total series based on Target.\n\t\t\t\/\/ Mimics Graphite's implementation\n\t\t\tsort.Slice(series, func(i, j int) bool {\n\t\t\t\treturn series[i].Target < series[j].Target\n\t\t\t})\n\t\t\tsort.Slice(totals, func(i, j int) bool {\n\t\t\t\treturn totals[i].Target < totals[j].Target\n\t\t\t})\n\t\t}\n\t} else {\n\t\ttotalsSerie.QueryPatt = fmt.Sprint(s.totalFloat)\n\t\ttotalsSerie.Target = fmt.Sprint(s.totalFloat)\n\t}\n\n\tfor i, serie := range series {\n\t\tif len(totals) == len(series) {\n\t\t\ttotalsSerie = totals[i]\n\t\t}\n\t\tserie = serie.Copy(pointSlicePool.Get().([]schema.Point))\n\t\tserie.QueryPatt = fmt.Sprintf(\"asPercent(%s,%s)\", serie.QueryPatt, totalsSerie.QueryPatt)\n\t\tserie.Target = fmt.Sprintf(\"asPercent(%s,%s)\", serie.Target, totalsSerie.Target)\n\t\tserie.Tags = map[string]string{\"name\": serie.Target}\n\t\tfor i := range serie.Datapoints {\n\t\t\tvar totalVal float64\n\t\t\tif len(totalsSerie.Datapoints) > 0 {\n\t\t\t\ttotalVal = totalsSerie.Datapoints[i].Val\n\t\t\t} else {\n\t\t\t\ttotalVal = s.totalFloat\n\t\t\t}\n\t\t\tserie.Datapoints[i].Val = computeAsPercent(serie.Datapoints[i].Val, totalVal)\n\t\t}\n\t\toutSeries = append(outSeries, serie)\n\t\tcache[Req{}] = append(cache[Req{}], serie)\n\t}\n\treturn outSeries, nil\n}\n\nfunc computeAsPercent(in, total float64) float64 {\n\tif math.IsNaN(in) || math.IsNaN(total) {\n\t\treturn math.NaN()\n\t}\n\tif total == 0 {\n\t\treturn math.NaN()\n\t}\n\treturn in \/ total * 100\n}\n\nfunc groupSeriesByKey(series []models.Series, nodes []expr, keys *map[string]struct{}) map[string][]models.Series {\n\tkeyedSeries := make(map[string][]models.Series)\n\tfor _, serie := range series {\n\t\tkey := aggKey(serie, nodes)\n\t\tif _, ok := keyedSeries[key]; !ok {\n\t\t\tkeyedSeries[key] = []models.Series{serie}\n\t\t\t(*keys)[key] = struct{}{}\n\t\t} else {\n\t\t\tkeyedSeries[key] = append(keyedSeries[key], serie)\n\t\t}\n\t}\n\treturn keyedSeries\n}\n\n\/\/ Sums each seriesList in map of seriesLists\nfunc getTotalSeries(totalSeriesLists map[string][]models.Series, cache map[Req][]models.Series) map[string]models.Series {\n\ttotalSeries := make(map[string]models.Series, len(totalSeriesLists))\n\tfor key := range totalSeriesLists {\n\t\ttotalSeries[key] = sumSeries(totalSeriesLists[key], cache)\n\t}\n\treturn totalSeries\n}\n\n\/\/ sumSeries returns a copy-on-write series that is the sum of the inputs\nfunc sumSeries(series []models.Series, cache map[Req][]models.Series) models.Series {\n\tif len(series) == 1 {\n\t\treturn series[0]\n\t}\n\tout := pointSlicePool.Get().([]schema.Point)\n\tcrossSeriesSum(series, &out)\n\tvar queryPatts []string\n\nLoop:\n\tfor _, v := range series {\n\t\t\/\/ avoid duplicates\n\t\tfor _, qp := range queryPatts {\n\t\t\tif qp == v.QueryPatt {\n\t\t\t\tcontinue Loop\n\t\t\t}\n\t\t}\n\t\tqueryPatts = append(queryPatts, v.QueryPatt)\n\t}\n\tname := fmt.Sprintf(\"sumSeries(%s)\", strings.Join(queryPatts, \",\"))\n\tcons, queryCons := summarizeCons(series)\n\tsum := models.Series{\n\t\tTarget: name,\n\t\tQueryPatt: name,\n\t\tDatapoints: out,\n\t\tInterval: series[0].Interval,\n\t\tConsolidator: cons,\n\t\tQueryCons: queryCons,\n\t\tTags: map[string]string{\"name\": name},\n\t}\n\tcache[Req{}] = append(cache[Req{}], sum)\n\treturn sum\n}\n<commit_msg>fix scoping issue<commit_after>package expr\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/grafana\/metrictank\/api\/models\"\n\tschema \"gopkg.in\/raintank\/schema.v1\"\n)\n\ntype FuncAsPercent struct {\n\tin GraphiteFunc\n\ttotalFloat float64\n\ttotalSeries GraphiteFunc\n\tnodes []expr\n}\n\nfunc NewAsPercent() GraphiteFunc {\n\treturn &FuncAsPercent{totalFloat: math.NaN()}\n}\n\nfunc (s *FuncAsPercent) Signature() ([]Arg, []Arg) {\n\treturn []Arg{\n\t\tArgSeriesList{val: &s.in},\n\t\tArgIn{\n\t\t\tkey: \"total\",\n\t\t\topt: true,\n\t\t\targs: []Arg{\n\t\t\t\tArgFloat{val: &s.totalFloat},\n\t\t\t\tArgSeriesList{val: &s.totalSeries},\n\t\t\t},\n\t\t},\n\t\tArgStringsOrInts{val: &s.nodes, opt: true, key: \"nodes\"},\n\t}, []Arg{ArgSeriesList{}}\n}\n\nfunc (s *FuncAsPercent) Context(context Context) Context {\n\treturn context\n}\n\nfunc (s *FuncAsPercent) Exec(cache map[Req][]models.Series) ([]models.Series, error) {\n\tseries, err := s.in.Exec(cache)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar outSeries []models.Series\n\tvar totals []models.Series\n\tif s.totalSeries != nil {\n\t\ttotals, err = s.totalSeries.Exec(cache)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tif s.nodes != nil {\n\t\tif !math.IsNaN(s.totalFloat) {\n\t\t\treturn nil, errors.New(\"total must be None or a seriesList\")\n\t\t}\n\t\toutSeries, err = s.execWithNodes(series, totals, cache)\n\t} else {\n\t\tif totals != nil && len(totals) != 1 && len(totals) != len(series) {\n\t\t\treturn nil, errors.New(\"asPercent second argument (total) must be missing, a single digit, reference exactly 1 series or reference the same number of series as the first argument\")\n\t\t}\n\t\toutSeries, err = s.execWithoutNodes(series, totals, cache)\n\t}\n\treturn outSeries, err\n}\n\nfunc (s *FuncAsPercent) execWithNodes(series, totals []models.Series, cache map[Req][]models.Series) ([]models.Series, error) {\n\tvar outSeries []models.Series\n\t\/\/ Set of keys\n\tkeys := make(map[string]struct{})\n\t\/\/ Series grouped by key\n\tmetaSeries := groupSeriesByKey(series, s.nodes, &keys)\n\t\/\/ The totals series for each key\n\tvar totalSeries map[string]models.Series\n\n\t\/\/ calculate the sum\n\tif math.IsNaN(s.totalFloat) && totals == nil {\n\t\ttotalSeries = getTotalSeries(metaSeries, cache)\n\t\t\/\/ calculate sum of totals series\n\t} else if totals != nil {\n\t\ttotalSeriesLists := groupSeriesByKey(totals, s.nodes, &keys)\n\t\ttotalSeries = getTotalSeries(totalSeriesLists, cache)\n\t}\n\n\tvar nones []schema.Point\n\n\tfor key := range keys {\n\t\t\/\/ No input series for a corresponding total series\n\t\tif _, ok := metaSeries[key]; !ok {\n\t\t\tnonesSerie := totalSeries[key]\n\t\t\tnonesSerie.QueryPatt = fmt.Sprintf(\"asPercent(MISSING,%s)\", totalSeries[key].QueryPatt)\n\t\t\tnonesSerie.Target = fmt.Sprintf(\"asPercent(MISSING,%s)\", totalSeries[key].Target)\n\t\t\tnonesSerie.Tags = map[string]string{\"name\": nonesSerie.Target}\n\n\t\t\tif nones == nil {\n\t\t\t\tnones = pointSlicePool.Get().([]schema.Point)\n\t\t\t\tfor _, p := range totalSeries[key].Datapoints {\n\t\t\t\t\tp.Val = math.NaN()\n\t\t\t\t\tnones = append(nones, p)\n\t\t\t\t}\n\t\t\t\tcache[Req{}] = append(cache[Req{}], nonesSerie)\n\t\t\t}\n\n\t\t\tnonesSerie.Datapoints = nones\n\t\t\toutSeries = append(outSeries, nonesSerie)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, serie1 := range metaSeries[key] {\n\t\t\t\/\/ No total series for a corresponding input series\n\t\t\tif _, ok := totalSeries[key]; !ok {\n\t\t\t\tnonesSerie := serie1\n\t\t\t\tnonesSerie.QueryPatt = fmt.Sprintf(\"asPercent(%s,MISSING)\", serie1.QueryPatt)\n\t\t\t\tnonesSerie.Target = fmt.Sprintf(\"asPercent(%s,MISSING)\", serie1.Target)\n\t\t\t\tnonesSerie.Tags = map[string]string{\"name\": nonesSerie.Target}\n\n\t\t\t\tif nones == nil {\n\t\t\t\t\tnones = pointSlicePool.Get().([]schema.Point)\n\t\t\t\t\tfor _, p := range serie1.Datapoints {\n\t\t\t\t\t\tp.Val = math.NaN()\n\t\t\t\t\t\tnones = append(nones, p)\n\t\t\t\t\t}\n\t\t\t\t\tcache[Req{}] = append(cache[Req{}], nonesSerie)\n\t\t\t\t}\n\n\t\t\t\tnonesSerie.Datapoints = nones\n\t\t\t\toutSeries = append(outSeries, nonesSerie)\n\t\t\t} else {\n\t\t\t\t\/\/ key found in both metaSeries and totalSeries\n\t\t\t\tserie1 = serie1.Copy(pointSlicePool.Get().([]schema.Point))\n\t\t\t\tserie2 := totalSeries[key]\n\t\t\t\tserie1.QueryPatt = fmt.Sprintf(\"asPercent(%s,%s)\", serie1.QueryPatt, serie2.QueryPatt)\n\t\t\t\tserie1.Target = fmt.Sprintf(\"asPercent(%s,%s)\", serie1.Target, serie2.Target)\n\t\t\t\tserie1.Tags = map[string]string{\"name\": serie1.Target}\n\t\t\t\tfor i := range serie1.Datapoints {\n\t\t\t\t\tserie1.Datapoints[i].Val = computeAsPercent(serie1.Datapoints[i].Val, serie2.Datapoints[i].Val)\n\t\t\t\t}\n\t\t\t\toutSeries = append(outSeries, serie1)\n\t\t\t\tcache[Req{}] = append(cache[Req{}], serie1)\n\t\t\t}\n\n\t\t}\n\t}\n\treturn outSeries, nil\n}\n\nfunc (s *FuncAsPercent) execWithoutNodes(series, totals []models.Series, cache map[Req][]models.Series) ([]models.Series, error) {\n\tvar outSeries []models.Series\n\tvar totalsSerie models.Series\n\tif math.IsNaN(s.totalFloat) && totals == nil {\n\t\ttotalsSerie = sumSeries(series, cache)\n\t\tif len(series) == 1 {\n\t\t\ttotalsSerie.Target = fmt.Sprintf(\"sumSeries(%s)\", totalsSerie.QueryPatt)\n\t\t\ttotalsSerie.QueryPatt = fmt.Sprintf(\"sumSeries(%s)\", totalsSerie.QueryPatt)\n\t\t\ttotalsSerie.Tags = map[string]string{\"name\": totalsSerie.Target}\n\t\t}\n\t} else if totals != nil {\n\t\tif len(totals) == 1 {\n\t\t\ttotalsSerie = totals[0]\n\t\t} else if len(totals) == len(series) {\n\t\t\t\/\/ Sorted to match the input series with the total series based on Target.\n\t\t\t\/\/ Mimics Graphite's implementation\n\t\t\tsort.Slice(series, func(i, j int) bool {\n\t\t\t\treturn series[i].Target < series[j].Target\n\t\t\t})\n\t\t\tsort.Slice(totals, func(i, j int) bool {\n\t\t\t\treturn totals[i].Target < totals[j].Target\n\t\t\t})\n\t\t}\n\t} else {\n\t\ttotalsSerie.QueryPatt = fmt.Sprint(s.totalFloat)\n\t\ttotalsSerie.Target = fmt.Sprint(s.totalFloat)\n\t}\n\n\tfor i, serie := range series {\n\t\tif len(totals) == len(series) {\n\t\t\ttotalsSerie = totals[i]\n\t\t}\n\t\tserie = serie.Copy(pointSlicePool.Get().([]schema.Point))\n\t\tserie.QueryPatt = fmt.Sprintf(\"asPercent(%s,%s)\", serie.QueryPatt, totalsSerie.QueryPatt)\n\t\tserie.Target = fmt.Sprintf(\"asPercent(%s,%s)\", serie.Target, totalsSerie.Target)\n\t\tserie.Tags = map[string]string{\"name\": serie.Target}\n\t\tfor i := range serie.Datapoints {\n\t\t\tvar totalVal float64\n\t\t\tif len(totalsSerie.Datapoints) > 0 {\n\t\t\t\ttotalVal = totalsSerie.Datapoints[i].Val\n\t\t\t} else {\n\t\t\t\ttotalVal = s.totalFloat\n\t\t\t}\n\t\t\tserie.Datapoints[i].Val = computeAsPercent(serie.Datapoints[i].Val, totalVal)\n\t\t}\n\t\toutSeries = append(outSeries, serie)\n\t\tcache[Req{}] = append(cache[Req{}], serie)\n\t}\n\treturn outSeries, nil\n}\n\nfunc computeAsPercent(in, total float64) float64 {\n\tif math.IsNaN(in) || math.IsNaN(total) {\n\t\treturn math.NaN()\n\t}\n\tif total == 0 {\n\t\treturn math.NaN()\n\t}\n\treturn in \/ total * 100\n}\n\nfunc groupSeriesByKey(series []models.Series, nodes []expr, keys *map[string]struct{}) map[string][]models.Series {\n\tkeyedSeries := make(map[string][]models.Series)\n\tfor _, serie := range series {\n\t\tkey := aggKey(serie, nodes)\n\t\tif _, ok := keyedSeries[key]; !ok {\n\t\t\tkeyedSeries[key] = []models.Series{serie}\n\t\t\t(*keys)[key] = struct{}{}\n\t\t} else {\n\t\t\tkeyedSeries[key] = append(keyedSeries[key], serie)\n\t\t}\n\t}\n\treturn keyedSeries\n}\n\n\/\/ Sums each seriesList in map of seriesLists\nfunc getTotalSeries(totalSeriesLists map[string][]models.Series, cache map[Req][]models.Series) map[string]models.Series {\n\ttotalSeries := make(map[string]models.Series, len(totalSeriesLists))\n\tfor key := range totalSeriesLists {\n\t\ttotalSeries[key] = sumSeries(totalSeriesLists[key], cache)\n\t}\n\treturn totalSeries\n}\n\n\/\/ sumSeries returns a copy-on-write series that is the sum of the inputs\nfunc sumSeries(series []models.Series, cache map[Req][]models.Series) models.Series {\n\tif len(series) == 1 {\n\t\treturn series[0]\n\t}\n\tout := pointSlicePool.Get().([]schema.Point)\n\tcrossSeriesSum(series, &out)\n\tvar queryPatts []string\n\nLoop:\n\tfor _, v := range series {\n\t\t\/\/ avoid duplicates\n\t\tfor _, qp := range queryPatts {\n\t\t\tif qp == v.QueryPatt {\n\t\t\t\tcontinue Loop\n\t\t\t}\n\t\t}\n\t\tqueryPatts = append(queryPatts, v.QueryPatt)\n\t}\n\tname := fmt.Sprintf(\"sumSeries(%s)\", strings.Join(queryPatts, \",\"))\n\tcons, queryCons := summarizeCons(series)\n\tsum := models.Series{\n\t\tTarget: name,\n\t\tQueryPatt: name,\n\t\tDatapoints: out,\n\t\tInterval: series[0].Interval,\n\t\tConsolidator: cons,\n\t\tQueryCons: queryCons,\n\t\tTags: map[string]string{\"name\": name},\n\t}\n\tcache[Req{}] = append(cache[Req{}], sum)\n\treturn sum\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cidrset\n\nimport (\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\/big\"\n\t\"math\/bits\"\n\t\"net\"\n\t\"sync\"\n)\n\n\/\/ CidrSet manages a set of CIDR ranges from which blocks of IPs can\n\/\/ be allocated from.\ntype CidrSet struct {\n\tsync.Mutex\n\t\/\/ clusterCIDR is the CIDR assigned to the cluster\n\tclusterCIDR *net.IPNet\n\t\/\/ clusterMaskSize is the mask size, in bits, assigned to the cluster\n\t\/\/ caches the mask size to avoid the penalty of calling clusterCIDR.Mask.Size()\n\tclusterMaskSize int\n\t\/\/ nodeMask is the network mask assigned to the nodes\n\tnodeMask net.IPMask\n\t\/\/ nodeMaskSize is the mask size, in bits,assigned to the nodes\n\t\/\/ caches the mask size to avoid the penalty of calling nodeMask.Size()\n\tnodeMaskSize int\n\t\/\/ maxCIDRs is the maximum number of CIDRs that can be allocated\n\tmaxCIDRs int\n\t\/\/ allocatedCIDRs counts the number of CIDRs allocated\n\tallocatedCIDRs int\n\t\/\/ nextCandidate points to the next CIDR that should be free\n\tnextCandidate int\n\t\/\/ used is a bitmap used to track the CIDRs allocated\n\tused big.Int\n\t\/\/ label is used to identify the metrics\n\tlabel string\n}\n\nconst (\n\t\/\/ The subnet mask size cannot be greater than 16 more than the cluster mask size\n\t\/\/ TODO: https:\/\/github.com\/kubernetes\/kubernetes\/issues\/44918\n\t\/\/ clusterSubnetMaxDiff limited to 16 due to the uncompressed bitmap\n\t\/\/ Due to this limitation the subnet mask for IPv6 cluster cidr needs to be >= 48\n\t\/\/ as default mask size for IPv6 is 64.\n\tclusterSubnetMaxDiff = 16\n\t\/\/ halfIPv6Len is the half of the IPv6 length\n\thalfIPv6Len = net.IPv6len \/ 2\n)\n\nvar (\n\t\/\/ ErrCIDRRangeNoCIDRsRemaining occurs when there is no more space\n\t\/\/ to allocate CIDR ranges.\n\tErrCIDRRangeNoCIDRsRemaining = errors.New(\n\t\t\"CIDR allocation failed; there are no remaining CIDRs left to allocate in the accepted range\")\n\t\/\/ ErrCIDRSetSubNetTooBig occurs when the subnet mask size is too\n\t\/\/ big compared to the CIDR mask size.\n\tErrCIDRSetSubNetTooBig = errors.New(\n\t\t\"New CIDR set failed; the node CIDR size is too big\")\n)\n\n\/\/ NewCIDRSet creates a new CidrSet.\nfunc NewCIDRSet(clusterCIDR *net.IPNet, subNetMaskSize int) (*CidrSet, error) {\n\tclusterMask := clusterCIDR.Mask\n\tclusterMaskSize, bits := clusterMask.Size()\n\n\tvar maxCIDRs int\n\tif (clusterCIDR.IP.To4() == nil) && (subNetMaskSize-clusterMaskSize > clusterSubnetMaxDiff) {\n\t\treturn nil, ErrCIDRSetSubNetTooBig\n\t}\n\n\t\/\/ register CidrSet metrics\n\tregisterCidrsetMetrics()\n\n\tmaxCIDRs = 1 << uint32(subNetMaskSize-clusterMaskSize)\n\treturn &CidrSet{\n\t\tclusterCIDR: clusterCIDR,\n\t\tnodeMask: net.CIDRMask(subNetMaskSize, bits),\n\t\tclusterMaskSize: clusterMaskSize,\n\t\tmaxCIDRs: maxCIDRs,\n\t\tnodeMaskSize: subNetMaskSize,\n\t\tlabel: clusterCIDR.String(),\n\t}, nil\n}\n\nfunc (s *CidrSet) indexToCIDRBlock(index int) *net.IPNet {\n\tvar ip []byte\n\tswitch \/*v4 or v6*\/ {\n\tcase s.clusterCIDR.IP.To4() != nil:\n\t\t{\n\t\t\tj := uint32(index) << uint32(32-s.nodeMaskSize)\n\t\t\tipInt := (binary.BigEndian.Uint32(s.clusterCIDR.IP)) | j\n\t\t\tip = make([]byte, net.IPv4len)\n\t\t\tbinary.BigEndian.PutUint32(ip, ipInt)\n\t\t}\n\tcase s.clusterCIDR.IP.To16() != nil:\n\t\t{\n\t\t\t\/\/ leftClusterIP | rightClusterIP\n\t\t\t\/\/ 2001:0DB8:1234:0000:0000:0000:0000:0000\n\t\t\tconst v6NBits = 128\n\t\t\tconst halfV6NBits = v6NBits \/ 2\n\t\t\tleftClusterIP := binary.BigEndian.Uint64(s.clusterCIDR.IP[:halfIPv6Len])\n\t\t\trightClusterIP := binary.BigEndian.Uint64(s.clusterCIDR.IP[halfIPv6Len:])\n\n\t\t\tip = make([]byte, net.IPv6len)\n\n\t\t\tif s.nodeMaskSize <= halfV6NBits {\n\t\t\t\t\/\/ We only care about left side IP\n\t\t\t\tleftClusterIP |= uint64(index) << uint(halfV6NBits-s.nodeMaskSize)\n\t\t\t} else {\n\t\t\t\tif s.clusterMaskSize < halfV6NBits {\n\t\t\t\t\t\/\/ see how many bits are needed to reach the left side\n\t\t\t\t\tbtl := uint(s.nodeMaskSize - halfV6NBits)\n\t\t\t\t\tindexMaxBit := uint(64 - bits.LeadingZeros64(uint64(index)))\n\t\t\t\t\tif indexMaxBit > btl {\n\t\t\t\t\t\tleftClusterIP |= uint64(index) >> btl\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\/\/ the right side will be calculated the same way either the\n\t\t\t\t\/\/ subNetMaskSize affects both left and right sides\n\t\t\t\trightClusterIP |= uint64(index) << uint(v6NBits-s.nodeMaskSize)\n\t\t\t}\n\t\t\tbinary.BigEndian.PutUint64(ip[:halfIPv6Len], leftClusterIP)\n\t\t\tbinary.BigEndian.PutUint64(ip[halfIPv6Len:], rightClusterIP)\n\t\t}\n\t}\n\treturn &net.IPNet{\n\t\tIP: ip,\n\t\tMask: s.nodeMask,\n\t}\n}\n\n\/\/ AllocateNext allocates the next free CIDR range. This will set the range\n\/\/ as occupied and return the allocated range.\nfunc (s *CidrSet) AllocateNext() (*net.IPNet, error) {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tif s.allocatedCIDRs == s.maxCIDRs {\n\t\treturn nil, ErrCIDRRangeNoCIDRsRemaining\n\t}\n\tcandidate := s.nextCandidate\n\tvar i int\n\tfor i = 0; i < s.maxCIDRs; i++ {\n\t\tif s.used.Bit(candidate) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tcandidate = (candidate + 1) % s.maxCIDRs\n\t}\n\n\ts.nextCandidate = (candidate + 1) % s.maxCIDRs\n\ts.used.SetBit(&s.used, candidate, 1)\n\ts.allocatedCIDRs++\n\t\/\/ Update metrics\n\tcidrSetAllocations.WithLabelValues(s.label).Inc()\n\tcidrSetAllocationTriesPerRequest.WithLabelValues(s.label).Observe(float64(i))\n\tcidrSetUsage.WithLabelValues(s.label).Set(float64(s.allocatedCIDRs) \/ float64(s.maxCIDRs))\n\n\treturn s.indexToCIDRBlock(candidate), nil\n}\n\nfunc (s *CidrSet) getBeginingAndEndIndices(cidr *net.IPNet) (begin, end int, err error) {\n\tif cidr == nil {\n\t\treturn -1, -1, fmt.Errorf(\"error getting indices for cluster cidr %v, cidr is nil\", s.clusterCIDR)\n\t}\n\tbegin, end = 0, s.maxCIDRs-1\n\tcidrMask := cidr.Mask\n\tmaskSize, _ := cidrMask.Size()\n\tvar ipSize int\n\n\tif !s.clusterCIDR.Contains(cidr.IP.Mask(s.clusterCIDR.Mask)) && !cidr.Contains(s.clusterCIDR.IP.Mask(cidr.Mask)) {\n\t\treturn -1, -1, fmt.Errorf(\"cidr %v is out the range of cluster cidr %v\", cidr, s.clusterCIDR)\n\t}\n\n\tif s.clusterMaskSize < maskSize {\n\n\t\tipSize = net.IPv4len\n\t\tif cidr.IP.To4() == nil {\n\t\t\tipSize = net.IPv6len\n\t\t}\n\t\tbegin, err = s.getIndexForCIDR(&net.IPNet{\n\t\t\tIP: cidr.IP.Mask(s.nodeMask),\n\t\t\tMask: s.nodeMask,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn -1, -1, err\n\t\t}\n\t\tip := make([]byte, ipSize)\n\t\tif cidr.IP.To4() != nil {\n\t\t\tipInt := binary.BigEndian.Uint32(cidr.IP) | (^binary.BigEndian.Uint32(cidr.Mask))\n\t\t\tbinary.BigEndian.PutUint32(ip, ipInt)\n\t\t} else {\n\t\t\t\/\/ ipIntLeft | ipIntRight\n\t\t\t\/\/ 2001:0DB8:1234:0000:0000:0000:0000:0000\n\t\t\tipIntLeft := binary.BigEndian.Uint64(cidr.IP[:net.IPv6len\/2]) | (^binary.BigEndian.Uint64(cidr.Mask[:net.IPv6len\/2]))\n\t\t\tipIntRight := binary.BigEndian.Uint64(cidr.IP[net.IPv6len\/2:]) | (^binary.BigEndian.Uint64(cidr.Mask[net.IPv6len\/2:]))\n\t\t\tbinary.BigEndian.PutUint64(ip[:net.IPv6len\/2], ipIntLeft)\n\t\t\tbinary.BigEndian.PutUint64(ip[net.IPv6len\/2:], ipIntRight)\n\t\t}\n\t\tend, err = s.getIndexForCIDR(&net.IPNet{\n\t\t\tIP: net.IP(ip).Mask(s.nodeMask),\n\t\t\tMask: s.nodeMask,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn -1, -1, err\n\t\t}\n\t}\n\treturn begin, end, nil\n}\n\n\/\/ Release releases the given CIDR range.\nfunc (s *CidrSet) Release(cidr *net.IPNet) error {\n\tbegin, end, err := s.getBeginingAndEndIndices(cidr)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.Lock()\n\tdefer s.Unlock()\n\tfor i := begin; i <= end; i++ {\n\t\t\/\/ Only change the counters if we change the bit to prevent\n\t\t\/\/ double counting.\n\t\tif s.used.Bit(i) != 0 {\n\t\t\ts.used.SetBit(&s.used, i, 0)\n\t\t\ts.allocatedCIDRs--\n\t\t\tcidrSetReleases.WithLabelValues(s.label).Inc()\n\t\t}\n\t}\n\n\tcidrSetUsage.WithLabelValues(s.label).Set(float64(s.allocatedCIDRs) \/ float64(s.maxCIDRs))\n\treturn nil\n}\n\n\/\/ Occupy marks the given CIDR range as used. Occupy succeeds even if the CIDR\n\/\/ range was previously used.\nfunc (s *CidrSet) Occupy(cidr *net.IPNet) (err error) {\n\tbegin, end, err := s.getBeginingAndEndIndices(cidr)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.Lock()\n\tdefer s.Unlock()\n\tfor i := begin; i <= end; i++ {\n\t\t\/\/ Only change the counters if we change the bit to prevent\n\t\t\/\/ double counting.\n\t\tif s.used.Bit(i) == 0 {\n\t\t\ts.used.SetBit(&s.used, i, 1)\n\t\t\ts.allocatedCIDRs++\n\t\t\tcidrSetAllocations.WithLabelValues(s.label).Inc()\n\t\t}\n\t}\n\n\tcidrSetUsage.WithLabelValues(s.label).Set(float64(s.allocatedCIDRs) \/ float64(s.maxCIDRs))\n\treturn nil\n}\n\nfunc (s *CidrSet) getIndexForCIDR(cidr *net.IPNet) (int, error) {\n\treturn s.getIndexForIP(cidr.IP)\n}\n\nfunc (s *CidrSet) getIndexForIP(ip net.IP) (int, error) {\n\tif ip.To4() != nil {\n\t\tcidrIndex := (binary.BigEndian.Uint32(s.clusterCIDR.IP) ^ binary.BigEndian.Uint32(ip.To4())) >> uint32(32-s.nodeMaskSize)\n\t\tif cidrIndex >= uint32(s.maxCIDRs) {\n\t\t\treturn 0, fmt.Errorf(\"CIDR: %v\/%v is out of the range of CIDR allocator\", ip, s.nodeMaskSize)\n\t\t}\n\t\treturn int(cidrIndex), nil\n\t}\n\tif ip.To16() != nil {\n\t\tbigIP := big.NewInt(0).SetBytes(s.clusterCIDR.IP)\n\t\tbigIP = bigIP.Xor(bigIP, big.NewInt(0).SetBytes(ip))\n\t\tcidrIndexBig := bigIP.Rsh(bigIP, uint(net.IPv6len*8-s.nodeMaskSize))\n\t\tcidrIndex := cidrIndexBig.Uint64()\n\t\tif cidrIndex >= uint64(s.maxCIDRs) {\n\t\t\treturn 0, fmt.Errorf(\"CIDR: %v\/%v is out of the range of CIDR allocator\", ip, s.nodeMaskSize)\n\t\t}\n\t\treturn int(cidrIndex), nil\n\t}\n\n\treturn 0, fmt.Errorf(\"invalid IP: %v\", ip)\n}\n<commit_msg>modify incorrect words<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cidrset\n\nimport (\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\/big\"\n\t\"math\/bits\"\n\t\"net\"\n\t\"sync\"\n)\n\n\/\/ CidrSet manages a set of CIDR ranges from which blocks of IPs can\n\/\/ be allocated from.\ntype CidrSet struct {\n\tsync.Mutex\n\t\/\/ clusterCIDR is the CIDR assigned to the cluster\n\tclusterCIDR *net.IPNet\n\t\/\/ clusterMaskSize is the mask size, in bits, assigned to the cluster\n\t\/\/ caches the mask size to avoid the penalty of calling clusterCIDR.Mask.Size()\n\tclusterMaskSize int\n\t\/\/ nodeMask is the network mask assigned to the nodes\n\tnodeMask net.IPMask\n\t\/\/ nodeMaskSize is the mask size, in bits,assigned to the nodes\n\t\/\/ caches the mask size to avoid the penalty of calling nodeMask.Size()\n\tnodeMaskSize int\n\t\/\/ maxCIDRs is the maximum number of CIDRs that can be allocated\n\tmaxCIDRs int\n\t\/\/ allocatedCIDRs counts the number of CIDRs allocated\n\tallocatedCIDRs int\n\t\/\/ nextCandidate points to the next CIDR that should be free\n\tnextCandidate int\n\t\/\/ used is a bitmap used to track the CIDRs allocated\n\tused big.Int\n\t\/\/ label is used to identify the metrics\n\tlabel string\n}\n\nconst (\n\t\/\/ The subnet mask size cannot be greater than 16 more than the cluster mask size\n\t\/\/ TODO: https:\/\/github.com\/kubernetes\/kubernetes\/issues\/44918\n\t\/\/ clusterSubnetMaxDiff limited to 16 due to the uncompressed bitmap\n\t\/\/ Due to this limitation the subnet mask for IPv6 cluster cidr needs to be >= 48\n\t\/\/ as default mask size for IPv6 is 64.\n\tclusterSubnetMaxDiff = 16\n\t\/\/ halfIPv6Len is the half of the IPv6 length\n\thalfIPv6Len = net.IPv6len \/ 2\n)\n\nvar (\n\t\/\/ ErrCIDRRangeNoCIDRsRemaining occurs when there is no more space\n\t\/\/ to allocate CIDR ranges.\n\tErrCIDRRangeNoCIDRsRemaining = errors.New(\n\t\t\"CIDR allocation failed; there are no remaining CIDRs left to allocate in the accepted range\")\n\t\/\/ ErrCIDRSetSubNetTooBig occurs when the subnet mask size is too\n\t\/\/ big compared to the CIDR mask size.\n\tErrCIDRSetSubNetTooBig = errors.New(\n\t\t\"New CIDR set failed; the node CIDR size is too big\")\n)\n\n\/\/ NewCIDRSet creates a new CidrSet.\nfunc NewCIDRSet(clusterCIDR *net.IPNet, subNetMaskSize int) (*CidrSet, error) {\n\tclusterMask := clusterCIDR.Mask\n\tclusterMaskSize, bits := clusterMask.Size()\n\n\tvar maxCIDRs int\n\tif (clusterCIDR.IP.To4() == nil) && (subNetMaskSize-clusterMaskSize > clusterSubnetMaxDiff) {\n\t\treturn nil, ErrCIDRSetSubNetTooBig\n\t}\n\n\t\/\/ register CidrSet metrics\n\tregisterCidrsetMetrics()\n\n\tmaxCIDRs = 1 << uint32(subNetMaskSize-clusterMaskSize)\n\treturn &CidrSet{\n\t\tclusterCIDR: clusterCIDR,\n\t\tnodeMask: net.CIDRMask(subNetMaskSize, bits),\n\t\tclusterMaskSize: clusterMaskSize,\n\t\tmaxCIDRs: maxCIDRs,\n\t\tnodeMaskSize: subNetMaskSize,\n\t\tlabel: clusterCIDR.String(),\n\t}, nil\n}\n\nfunc (s *CidrSet) indexToCIDRBlock(index int) *net.IPNet {\n\tvar ip []byte\n\tswitch \/*v4 or v6*\/ {\n\tcase s.clusterCIDR.IP.To4() != nil:\n\t\t{\n\t\t\tj := uint32(index) << uint32(32-s.nodeMaskSize)\n\t\t\tipInt := (binary.BigEndian.Uint32(s.clusterCIDR.IP)) | j\n\t\t\tip = make([]byte, net.IPv4len)\n\t\t\tbinary.BigEndian.PutUint32(ip, ipInt)\n\t\t}\n\tcase s.clusterCIDR.IP.To16() != nil:\n\t\t{\n\t\t\t\/\/ leftClusterIP | rightClusterIP\n\t\t\t\/\/ 2001:0DB8:1234:0000:0000:0000:0000:0000\n\t\t\tconst v6NBits = 128\n\t\t\tconst halfV6NBits = v6NBits \/ 2\n\t\t\tleftClusterIP := binary.BigEndian.Uint64(s.clusterCIDR.IP[:halfIPv6Len])\n\t\t\trightClusterIP := binary.BigEndian.Uint64(s.clusterCIDR.IP[halfIPv6Len:])\n\n\t\t\tip = make([]byte, net.IPv6len)\n\n\t\t\tif s.nodeMaskSize <= halfV6NBits {\n\t\t\t\t\/\/ We only care about left side IP\n\t\t\t\tleftClusterIP |= uint64(index) << uint(halfV6NBits-s.nodeMaskSize)\n\t\t\t} else {\n\t\t\t\tif s.clusterMaskSize < halfV6NBits {\n\t\t\t\t\t\/\/ see how many bits are needed to reach the left side\n\t\t\t\t\tbtl := uint(s.nodeMaskSize - halfV6NBits)\n\t\t\t\t\tindexMaxBit := uint(64 - bits.LeadingZeros64(uint64(index)))\n\t\t\t\t\tif indexMaxBit > btl {\n\t\t\t\t\t\tleftClusterIP |= uint64(index) >> btl\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t\/\/ the right side will be calculated the same way either the\n\t\t\t\t\/\/ subNetMaskSize affects both left and right sides\n\t\t\t\trightClusterIP |= uint64(index) << uint(v6NBits-s.nodeMaskSize)\n\t\t\t}\n\t\t\tbinary.BigEndian.PutUint64(ip[:halfIPv6Len], leftClusterIP)\n\t\t\tbinary.BigEndian.PutUint64(ip[halfIPv6Len:], rightClusterIP)\n\t\t}\n\t}\n\treturn &net.IPNet{\n\t\tIP: ip,\n\t\tMask: s.nodeMask,\n\t}\n}\n\n\/\/ AllocateNext allocates the next free CIDR range. This will set the range\n\/\/ as occupied and return the allocated range.\nfunc (s *CidrSet) AllocateNext() (*net.IPNet, error) {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tif s.allocatedCIDRs == s.maxCIDRs {\n\t\treturn nil, ErrCIDRRangeNoCIDRsRemaining\n\t}\n\tcandidate := s.nextCandidate\n\tvar i int\n\tfor i = 0; i < s.maxCIDRs; i++ {\n\t\tif s.used.Bit(candidate) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tcandidate = (candidate + 1) % s.maxCIDRs\n\t}\n\n\ts.nextCandidate = (candidate + 1) % s.maxCIDRs\n\ts.used.SetBit(&s.used, candidate, 1)\n\ts.allocatedCIDRs++\n\t\/\/ Update metrics\n\tcidrSetAllocations.WithLabelValues(s.label).Inc()\n\tcidrSetAllocationTriesPerRequest.WithLabelValues(s.label).Observe(float64(i))\n\tcidrSetUsage.WithLabelValues(s.label).Set(float64(s.allocatedCIDRs) \/ float64(s.maxCIDRs))\n\n\treturn s.indexToCIDRBlock(candidate), nil\n}\n\nfunc (s *CidrSet) getBeginningAndEndIndices(cidr *net.IPNet) (begin, end int, err error) {\n\tif cidr == nil {\n\t\treturn -1, -1, fmt.Errorf(\"error getting indices for cluster cidr %v, cidr is nil\", s.clusterCIDR)\n\t}\n\tbegin, end = 0, s.maxCIDRs-1\n\tcidrMask := cidr.Mask\n\tmaskSize, _ := cidrMask.Size()\n\tvar ipSize int\n\n\tif !s.clusterCIDR.Contains(cidr.IP.Mask(s.clusterCIDR.Mask)) && !cidr.Contains(s.clusterCIDR.IP.Mask(cidr.Mask)) {\n\t\treturn -1, -1, fmt.Errorf(\"cidr %v is out the range of cluster cidr %v\", cidr, s.clusterCIDR)\n\t}\n\n\tif s.clusterMaskSize < maskSize {\n\n\t\tipSize = net.IPv4len\n\t\tif cidr.IP.To4() == nil {\n\t\t\tipSize = net.IPv6len\n\t\t}\n\t\tbegin, err = s.getIndexForCIDR(&net.IPNet{\n\t\t\tIP: cidr.IP.Mask(s.nodeMask),\n\t\t\tMask: s.nodeMask,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn -1, -1, err\n\t\t}\n\t\tip := make([]byte, ipSize)\n\t\tif cidr.IP.To4() != nil {\n\t\t\tipInt := binary.BigEndian.Uint32(cidr.IP) | (^binary.BigEndian.Uint32(cidr.Mask))\n\t\t\tbinary.BigEndian.PutUint32(ip, ipInt)\n\t\t} else {\n\t\t\t\/\/ ipIntLeft | ipIntRight\n\t\t\t\/\/ 2001:0DB8:1234:0000:0000:0000:0000:0000\n\t\t\tipIntLeft := binary.BigEndian.Uint64(cidr.IP[:net.IPv6len\/2]) | (^binary.BigEndian.Uint64(cidr.Mask[:net.IPv6len\/2]))\n\t\t\tipIntRight := binary.BigEndian.Uint64(cidr.IP[net.IPv6len\/2:]) | (^binary.BigEndian.Uint64(cidr.Mask[net.IPv6len\/2:]))\n\t\t\tbinary.BigEndian.PutUint64(ip[:net.IPv6len\/2], ipIntLeft)\n\t\t\tbinary.BigEndian.PutUint64(ip[net.IPv6len\/2:], ipIntRight)\n\t\t}\n\t\tend, err = s.getIndexForCIDR(&net.IPNet{\n\t\t\tIP: net.IP(ip).Mask(s.nodeMask),\n\t\t\tMask: s.nodeMask,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn -1, -1, err\n\t\t}\n\t}\n\treturn begin, end, nil\n}\n\n\/\/ Release releases the given CIDR range.\nfunc (s *CidrSet) Release(cidr *net.IPNet) error {\n\tbegin, end, err := s.getBeginningAndEndIndices(cidr)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.Lock()\n\tdefer s.Unlock()\n\tfor i := begin; i <= end; i++ {\n\t\t\/\/ Only change the counters if we change the bit to prevent\n\t\t\/\/ double counting.\n\t\tif s.used.Bit(i) != 0 {\n\t\t\ts.used.SetBit(&s.used, i, 0)\n\t\t\ts.allocatedCIDRs--\n\t\t\tcidrSetReleases.WithLabelValues(s.label).Inc()\n\t\t}\n\t}\n\n\tcidrSetUsage.WithLabelValues(s.label).Set(float64(s.allocatedCIDRs) \/ float64(s.maxCIDRs))\n\treturn nil\n}\n\n\/\/ Occupy marks the given CIDR range as used. Occupy succeeds even if the CIDR\n\/\/ range was previously used.\nfunc (s *CidrSet) Occupy(cidr *net.IPNet) (err error) {\n\tbegin, end, err := s.getBeginningAndEndIndices(cidr)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.Lock()\n\tdefer s.Unlock()\n\tfor i := begin; i <= end; i++ {\n\t\t\/\/ Only change the counters if we change the bit to prevent\n\t\t\/\/ double counting.\n\t\tif s.used.Bit(i) == 0 {\n\t\t\ts.used.SetBit(&s.used, i, 1)\n\t\t\ts.allocatedCIDRs++\n\t\t\tcidrSetAllocations.WithLabelValues(s.label).Inc()\n\t\t}\n\t}\n\n\tcidrSetUsage.WithLabelValues(s.label).Set(float64(s.allocatedCIDRs) \/ float64(s.maxCIDRs))\n\treturn nil\n}\n\nfunc (s *CidrSet) getIndexForCIDR(cidr *net.IPNet) (int, error) {\n\treturn s.getIndexForIP(cidr.IP)\n}\n\nfunc (s *CidrSet) getIndexForIP(ip net.IP) (int, error) {\n\tif ip.To4() != nil {\n\t\tcidrIndex := (binary.BigEndian.Uint32(s.clusterCIDR.IP) ^ binary.BigEndian.Uint32(ip.To4())) >> uint32(32-s.nodeMaskSize)\n\t\tif cidrIndex >= uint32(s.maxCIDRs) {\n\t\t\treturn 0, fmt.Errorf(\"CIDR: %v\/%v is out of the range of CIDR allocator\", ip, s.nodeMaskSize)\n\t\t}\n\t\treturn int(cidrIndex), nil\n\t}\n\tif ip.To16() != nil {\n\t\tbigIP := big.NewInt(0).SetBytes(s.clusterCIDR.IP)\n\t\tbigIP = bigIP.Xor(bigIP, big.NewInt(0).SetBytes(ip))\n\t\tcidrIndexBig := bigIP.Rsh(bigIP, uint(net.IPv6len*8-s.nodeMaskSize))\n\t\tcidrIndex := cidrIndexBig.Uint64()\n\t\tif cidrIndex >= uint64(s.maxCIDRs) {\n\t\t\treturn 0, fmt.Errorf(\"CIDR: %v\/%v is out of the range of CIDR allocator\", ip, s.nodeMaskSize)\n\t\t}\n\t\treturn int(cidrIndex), nil\n\t}\n\n\treturn 0, fmt.Errorf(\"invalid IP: %v\", ip)\n}\n<|endoftext|>"} {"text":"<commit_before>package agent\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tmqtt \"git.eclipse.org\/gitroot\/paho\/org.eclipse.paho.mqtt.golang.git\"\n\t\"github.com\/juju\/loggo\"\n)\n\nvar AlreadyConfigured = errors.New(\"Already configured\")\nvar AlreadyUnConfigured = errors.New(\"Already unconfigured\")\n\n\/\/\n\/\/ Acts as a bridge between local and cloud brokers, this includes reconnecting\n\/\/ and emitting status changes.\n\/\/\n\/\/ Once configured and started this will attempt to connect to connect to local\n\/\/ and cloud brokers, if something dies it will reconnect based on the configured\n\/\/ reconnect backoff.\n\/\/\ntype Bridge struct {\n\tconf *Config\n\tlocal *mqtt.MqttClient\n\tremote *mqtt.MqttClient\n\tlog loggo.Logger\n\n\tlocalTopics []replaceTopic\n\tcloudTopics []replaceTopic\n\n\tcloudUrl *url.URL\n\ttoken string\n\n\ttimer *time.Timer\n\treconnectCh chan bool\n\tshutdownCh chan bool\n\n\tConfigured bool\n\tConnected bool\n\tCounter int64\n\n\tIngressCounter int64\n\tEgressCounter int64\n\n\tIngressBytes int64\n\tEgressBytes int64\n\n\tLastError error\n\n\tbridgeLock sync.Mutex\n}\n\ntype replaceTopic struct {\n\ton string\n\treplace string\n\twith string\n}\n\nfunc (r *replaceTopic) updated(originalTopic string) string {\n\treturn strings.Replace(originalTopic, r.replace, r.with, 1)\n}\n\nvar localTopics = []replaceTopic{\n\t\/\/ location related topics (TODO: move to cloud userspace RPC)\n\t{on: \"$location\/calibration\", replace: \"$location\", with: \"$cloud\/location\"},\n\t{on: \"$location\/delete\", replace: \"$location\", with: \"$cloud\/location\"},\n\t{on: \"$device\/+\/+\/rssi\", replace: \"$device\", with: \"$cloud\/device\"},\n\n\t\/\/ module health statistics\n\t\/\/{on: \"$node\/+\/module\/status\", replace: \"$node\", with: \"$cloud\/node\"},\n\n\t\/\/ cloud userspace RPC requests\n\t{on: \"$ninja\/services\/rpc\/+\/+\", replace: \"$ninja\", with: \"$cloud\/ninja\"},\n\t{on: \"$ninja\/services\/+\", replace: \"$ninja\", with: \"$cloud\/ninja\"},\n\n\t\/\/ temporary alternate topic to distinguish remote device replies from local-destined ones\n\t\/\/ used by the phone app for remote actuations\n\t\/\/ the alternate remote_ topic is to prevent a loopback with the below rule in the other direction\n\t\/\/ TODO: use a tag like $mesh-source to prevent loops (never re-proxy msgs with your source)\n\t{on: \"$device\/+\/channel\/+\/reply\", replace: \"$device\", with: \"$cloud\/remote_device\"},\n\n\t\/\/ push up all local RPC methods in case the cloud is responding,\n\t\/\/ this is currently used for the push notification channel\n\t{on: \"$device\/+\/channel\/+\", replace: \"$device\", with: \"$cloud\/device\"},\n\n\t\/\/ push up state changes to the cloud\n\t{on: \"$device\/+\/channel\/+\/event\/state\", replace: \"$device\", with: \"$cloud\/device\"},\n}\n\nvar cloudTopics = []replaceTopic{\n\t\/\/ location related topics\n\t{on: \"$cloud\/location\/calibration\/progress\", replace: \"$cloud\/location\", with: \"$location\"},\n\t{on: \"$cloud\/device\/+\/+\/location\", replace: \"$cloud\/device\", with: \"$device\"},\n\n\t\/\/ cloud userspace RPC replies\n\t{on: \"$cloud\/ninja\/services\/rpc\/+\/+\/reply\", replace: \"$cloud\/ninja\", with: \"$ninja\"},\n\n\t\/\/ see comment for $device\/+\/channel\/+\/reply above\n\t{on: \"$cloud\/remote_device\/+\/channel\/+\", replace: \"$cloud\/remote_device\", with: \"$device\"},\n\n\t\/\/ allow cloud to announce devices and channels (used for phone on 3G and notification subscription channel)\n\t{on: \"$cloud\/device\/+\/event\/announce\", replace: \"$cloud\/device\", with: \"$device\"},\n\t{on: \"$cloud\/device\/+\/channel\/+\/event\/announce\", replace: \"$cloud\/device\", with: \"$device\"},\n\n\t\/\/ retrieve RPC responses from the cloud,\n\t\/\/ this is currently used for the push notification channel\n\t{on: \"$cloud\/device\/+\/channel\/+\/reply\", replace: \"$cloud\/device\", with: \"$device\"},\n}\n\nfunc createBridge(conf *Config) *Bridge {\n\treturn &Bridge{conf: conf, localTopics: localTopics, cloudTopics: cloudTopics, log: loggo.GetLogger(\"bridge\")}\n}\n\nfunc (b *Bridge) start(cloudUrl string, token string) (err error) {\n\n\tif b.Configured {\n\t\tb.log.Warningf(\"Already configured.\")\n\t\treturn AlreadyConfigured\n\t}\n\n\tdefer b.bridgeLock.Unlock()\n\n\tb.bridgeLock.Lock()\n\n\tb.log.Infof(\"Connecting the bridge\")\n\n\tb.Configured = true\n\n\turl, err := url.Parse(cloudUrl)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb.cloudUrl = url\n\tb.token = token\n\n\tb.reconnectCh = make(chan bool, 1)\n\tb.shutdownCh = make(chan bool, 1)\n\n\tif err = b.connect(); err != nil {\n\t\tb.log.Errorf(\"Connect failed %s\", err)\n\t\tb.scheduleReconnect(err)\n\t}\n\n\tgo b.mainBridgeLoop()\n\n\treturn err\n}\n\nfunc (b *Bridge) stop() error {\n\n\tif !b.Configured {\n\t\tb.log.Warningf(\"Already unconfigured.\")\n\t\treturn AlreadyUnConfigured\n\t}\n\n\tdefer b.bridgeLock.Unlock()\n\n\tb.bridgeLock.Lock()\n\n\tb.log.Infof(\"Disconnecting bridge\")\n\n\tif b.Configured {\n\t\t\/\/ tell the worker to shutdown\n\t\tb.shutdownCh <- true\n\n\t\tb.Configured = false\n\t}\n\n\tb.resetTimer()\n\n\tb.disconnectAll()\n\n\treturn nil\n}\n\nfunc (b *Bridge) connect() (err error) {\n\n\tif b.local, err = b.buildClient(b.conf.LocalUrl, \"\"); err != nil {\n\t\tb.Connected = false\n\t\treturn err\n\t}\n\n\tif b.remote, err = b.buildClient(b.cloudUrl.String(), b.token); err != nil {\n\t\tb.Connected = false\n\t\treturn err\n\t}\n\n\tif err = b.subscriptions(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ we are now connected\n\tb.Connected = true\n\n\treturn nil\n}\n\nfunc (b *Bridge) reconnect() (err error) {\n\n\tif b.local, err = b.buildClient(b.conf.LocalUrl, \"\"); err != nil {\n\t\tb.Connected = false\n\t\treturn err\n\t}\n\n\tif b.remote, err = b.buildClient(b.cloudUrl.String(), b.token); err != nil {\n\t\tb.Connected = false\n\t\treturn err\n\t}\n\n\tif err = b.subscriptions(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ we are now connected\n\tb.Connected = true\n\tb.LastError = nil\n\n\treturn nil\n}\n\nfunc (b *Bridge) subscriptions() (err error) {\n\n\tif err = b.subscribe(b.local, b.remote, b.localTopics, \"local\"); err != nil {\n\t\treturn err\n\t}\n\n\tif err = b.subscribe(b.remote, b.local, b.cloudTopics, \"cloud\"); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}\n\nfunc (b *Bridge) disconnectAll() {\n\tb.log.Infof(\"disconnectAll\")\n\t\/\/ we are now disconnected\n\tb.Connected = false\n\tif b.local != nil && b.local.IsConnected() {\n\t\tb.local.Disconnect(100)\n\t}\n\tif b.remote != nil && b.remote.IsConnected() {\n\t\tb.remote.Disconnect(100)\n\t}\n}\n\nfunc (b *Bridge) mainBridgeLoop() {\n\n\tfor {\n\t\tselect {\n\t\tcase <-b.reconnectCh:\n\t\t\tb.log.Infof(\"reconnecting\")\n\t\t\tif err := b.reconnect(); err != nil {\n\t\t\t\tb.log.Errorf(\"Reconnect failed %s\", err)\n\t\t\t\tb.scheduleReconnect(err)\n\t\t\t}\n\t\tcase <-b.shutdownCh:\n\t\t\tb.log.Infof(\"shutting down bridge\")\n\t\t\treturn\n\t\t}\n\n\t}\n\n}\n\nfunc (b *Bridge) buildClient(server string, token string) (*mqtt.MqttClient, error) {\n\n\tb.log.Infof(\"building client for %s\", server)\n\n\topts := mqtt.NewClientOptions().AddBroker(server).SetTlsConfig(&tls.Config{InsecureSkipVerify: true})\n\n\tif token != \"\" {\n\t\topts.SetUsername(token)\n\t}\n\n\topts.SetClientId(fmt.Sprintf(\"%d\", time.Now().Unix()))\n\n\topts.SetKeepAlive(15) \/\/ set a 15 second ping time for ELB\n\n\t\/\/ pretty much log the reason and quit\n\topts.SetOnConnectionLost(b.onConnectionLoss)\n\n\tclient := mqtt.NewClient(opts)\n\t_, err := client.Start()\n\n\treturn client, err\n}\n\nfunc (b *Bridge) subscribe(src *mqtt.MqttClient, dst *mqtt.MqttClient, topics []replaceTopic, tag string) (err error) {\n\n\tfor _, topic := range topics {\n\n\t\ttopicFilter, _ := mqtt.NewTopicFilter(topic.on, 0)\n\t\tb.log.Infof(\"(%s) subscribed to %s\", tag, topic.on)\n\n\t\tif receipt, err := src.StartSubscription(b.buildHandler(topic, tag, dst), topicFilter); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\t<-receipt\n\t\t\tb.log.Infof(\"(%s) subscribed to %+v\", tag, topicFilter)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (b *Bridge) unsubscribe(client *mqtt.MqttClient, topics []replaceTopic, tag string) {\n\n\ttopicNames := []string{}\n\n\tfor _, topic := range topics {\n\t\ttopicNames = append(topicNames, topic.on)\n\t}\n\n\tb.log.Infof(\"(%s) unsubscribed to %s\", tag, topicNames)\n\tclient.EndSubscription(topicNames...)\n}\n\nfunc (b *Bridge) buildHandler(topic replaceTopic, tag string, dst *mqtt.MqttClient) mqtt.MessageHandler {\n\treturn func(src *mqtt.MqttClient, msg mqtt.Message) {\n\t\tif b.log.IsDebugEnabled() {\n\t\t\tb.log.Debugf(\"(%s) topic: %s updated: %s len: %d\", tag, msg.Topic(), topic.updated(msg.Topic()), len(msg.Payload()))\n\t\t}\n\t\tb.updateCounters(tag, msg)\n\t\tpayload := b.updateSource(msg.Payload(), b.buildSource(tag))\n\t\tdst.PublishMessage(topic.updated(msg.Topic()), mqtt.NewMessage(payload))\n\t}\n}\n\nfunc (b *Bridge) scheduleReconnect(reason error) {\n\tb.LastError = reason\n\tb.disconnectAll()\n\tb.resetTimer()\n\n\tswitch reason {\n\tcase mqtt.ErrBadCredentials:\n\t\tb.log.Warningf(\"Reconnect failed trying again in 30s\")\n\n\t\tb.timer = time.AfterFunc(30*time.Second, func() {\n\t\t\tb.reconnectCh <- true\n\t\t})\n\n\tdefault:\n\t\tb.log.Warningf(\"Reconnect failed trying again in 5s\")\n\t\t\/\/ TODO add exponential backoff\n\t\tb.timer = time.AfterFunc(5*time.Second, func() {\n\t\t\tb.reconnectCh <- true\n\t\t})\n\t}\n\n}\n\nfunc (b *Bridge) resetTimer() {\n\tif b.timer != nil {\n\t\tb.timer.Stop()\n\t}\n}\n\nfunc (b *Bridge) onConnectionLoss(client *mqtt.MqttClient, reason error) {\n\tb.log.Errorf(\"Connection failed %s\", reason)\n\n\t\/\/ we are now disconnected\n\tb.Connected = false\n\n\tb.scheduleReconnect(reason)\n\n}\n\nfunc (b *Bridge) IsConnected() bool {\n\tif b.remote == nil || b.local == nil {\n\t\treturn false\n\t}\n\treturn (b.remote.IsConnected() && b.local.IsConnected())\n}\n\nfunc (b *Bridge) buildSource(tag string) string {\n\n\tswitch tag {\n\tcase \"local\":\n\t\treturn b.conf.SerialNo\n\tcase \"cloud\":\n\t\treturn \"cloud-\" + strings.Replace(b.cloudUrl.Host, \".\", \"_\", -1) \/\/ encoded to look less wierd\n\t}\n\n\treturn \"\"\n}\n\nfunc (b *Bridge) updateSource(payload []byte, source string) []byte {\n\n\tif !bytes.Contains(payload, []byte(\"$mesh-source\")) {\n\t\tpayload = bytes.Replace(payload, []byte(\"{\"), []byte(`{\"$mesh-source\":\"`+source+`\", `), 1)\n\t}\n\n\tb.log.Debugf(\"msg %s\", string(payload))\n\n\treturn payload\n}\n\nfunc (b *Bridge) updateCounters(tag string, msg mqtt.Message) {\n\tswitch tag {\n\tcase \"local\":\n\t\tb.EgressCounter++\n\t\tb.EgressBytes += int64(len(msg.Bytes())) \/\/ message size not payload size\n\tcase \"cloud\":\n\t\tb.IngressCounter++\n\t\tb.IngressBytes += int64(len(msg.Bytes())) \/\/ message size not payload size\n\t}\n\n}\n<commit_msg>Add $device\/+\/channel\/+\/event\/controlstate to the cloud so that we can observe these.<commit_after>package agent\n\nimport (\n\t\"bytes\"\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tmqtt \"git.eclipse.org\/gitroot\/paho\/org.eclipse.paho.mqtt.golang.git\"\n\t\"github.com\/juju\/loggo\"\n)\n\nvar AlreadyConfigured = errors.New(\"Already configured\")\nvar AlreadyUnConfigured = errors.New(\"Already unconfigured\")\n\n\/\/\n\/\/ Acts as a bridge between local and cloud brokers, this includes reconnecting\n\/\/ and emitting status changes.\n\/\/\n\/\/ Once configured and started this will attempt to connect to connect to local\n\/\/ and cloud brokers, if something dies it will reconnect based on the configured\n\/\/ reconnect backoff.\n\/\/\ntype Bridge struct {\n\tconf *Config\n\tlocal *mqtt.MqttClient\n\tremote *mqtt.MqttClient\n\tlog loggo.Logger\n\n\tlocalTopics []replaceTopic\n\tcloudTopics []replaceTopic\n\n\tcloudUrl *url.URL\n\ttoken string\n\n\ttimer *time.Timer\n\treconnectCh chan bool\n\tshutdownCh chan bool\n\n\tConfigured bool\n\tConnected bool\n\tCounter int64\n\n\tIngressCounter int64\n\tEgressCounter int64\n\n\tIngressBytes int64\n\tEgressBytes int64\n\n\tLastError error\n\n\tbridgeLock sync.Mutex\n}\n\ntype replaceTopic struct {\n\ton string\n\treplace string\n\twith string\n}\n\nfunc (r *replaceTopic) updated(originalTopic string) string {\n\treturn strings.Replace(originalTopic, r.replace, r.with, 1)\n}\n\nvar localTopics = []replaceTopic{\n\t\/\/ location related topics (TODO: move to cloud userspace RPC)\n\t{on: \"$location\/calibration\", replace: \"$location\", with: \"$cloud\/location\"},\n\t{on: \"$location\/delete\", replace: \"$location\", with: \"$cloud\/location\"},\n\t{on: \"$device\/+\/+\/rssi\", replace: \"$device\", with: \"$cloud\/device\"},\n\n\t\/\/ module health statistics\n\t\/\/{on: \"$node\/+\/module\/status\", replace: \"$node\", with: \"$cloud\/node\"},\n\n\t\/\/ cloud userspace RPC requests\n\t{on: \"$ninja\/services\/rpc\/+\/+\", replace: \"$ninja\", with: \"$cloud\/ninja\"},\n\t{on: \"$ninja\/services\/+\", replace: \"$ninja\", with: \"$cloud\/ninja\"},\n\n\t\/\/ temporary alternate topic to distinguish remote device replies from local-destined ones\n\t\/\/ used by the phone app for remote actuations\n\t\/\/ the alternate remote_ topic is to prevent a loopback with the below rule in the other direction\n\t\/\/ TODO: use a tag like $mesh-source to prevent loops (never re-proxy msgs with your source)\n\t{on: \"$device\/+\/channel\/+\/reply\", replace: \"$device\", with: \"$cloud\/remote_device\"},\n\n\t\/\/ push up all local RPC methods in case the cloud is responding,\n\t\/\/ this is currently used for the push notification channel\n\t{on: \"$device\/+\/channel\/+\", replace: \"$device\", with: \"$cloud\/device\"},\n\n\t\/\/ push up state changes to the cloud\n\t{on: \"$device\/+\/channel\/+\/event\/state\", replace: \"$device\", with: \"$cloud\/device\"},\n\n\t\/\/ push up control state changes to the cloud\n\t{on: \"$device\/+\/channel\/+\/event\/controlstate\", replace: \"$device\", with: \"$cloud\/device\"},\n}\n\nvar cloudTopics = []replaceTopic{\n\t\/\/ location related topics\n\t{on: \"$cloud\/location\/calibration\/progress\", replace: \"$cloud\/location\", with: \"$location\"},\n\t{on: \"$cloud\/device\/+\/+\/location\", replace: \"$cloud\/device\", with: \"$device\"},\n\n\t\/\/ cloud userspace RPC replies\n\t{on: \"$cloud\/ninja\/services\/rpc\/+\/+\/reply\", replace: \"$cloud\/ninja\", with: \"$ninja\"},\n\n\t\/\/ see comment for $device\/+\/channel\/+\/reply above\n\t{on: \"$cloud\/remote_device\/+\/channel\/+\", replace: \"$cloud\/remote_device\", with: \"$device\"},\n\n\t\/\/ allow cloud to announce devices and channels (used for phone on 3G and notification subscription channel)\n\t{on: \"$cloud\/device\/+\/event\/announce\", replace: \"$cloud\/device\", with: \"$device\"},\n\t{on: \"$cloud\/device\/+\/channel\/+\/event\/announce\", replace: \"$cloud\/device\", with: \"$device\"},\n\n\t\/\/ retrieve RPC responses from the cloud,\n\t\/\/ this is currently used for the push notification channel\n\t{on: \"$cloud\/device\/+\/channel\/+\/reply\", replace: \"$cloud\/device\", with: \"$device\"},\n}\n\nfunc createBridge(conf *Config) *Bridge {\n\treturn &Bridge{conf: conf, localTopics: localTopics, cloudTopics: cloudTopics, log: loggo.GetLogger(\"bridge\")}\n}\n\nfunc (b *Bridge) start(cloudUrl string, token string) (err error) {\n\n\tif b.Configured {\n\t\tb.log.Warningf(\"Already configured.\")\n\t\treturn AlreadyConfigured\n\t}\n\n\tdefer b.bridgeLock.Unlock()\n\n\tb.bridgeLock.Lock()\n\n\tb.log.Infof(\"Connecting the bridge\")\n\n\tb.Configured = true\n\n\turl, err := url.Parse(cloudUrl)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb.cloudUrl = url\n\tb.token = token\n\n\tb.reconnectCh = make(chan bool, 1)\n\tb.shutdownCh = make(chan bool, 1)\n\n\tif err = b.connect(); err != nil {\n\t\tb.log.Errorf(\"Connect failed %s\", err)\n\t\tb.scheduleReconnect(err)\n\t}\n\n\tgo b.mainBridgeLoop()\n\n\treturn err\n}\n\nfunc (b *Bridge) stop() error {\n\n\tif !b.Configured {\n\t\tb.log.Warningf(\"Already unconfigured.\")\n\t\treturn AlreadyUnConfigured\n\t}\n\n\tdefer b.bridgeLock.Unlock()\n\n\tb.bridgeLock.Lock()\n\n\tb.log.Infof(\"Disconnecting bridge\")\n\n\tif b.Configured {\n\t\t\/\/ tell the worker to shutdown\n\t\tb.shutdownCh <- true\n\n\t\tb.Configured = false\n\t}\n\n\tb.resetTimer()\n\n\tb.disconnectAll()\n\n\treturn nil\n}\n\nfunc (b *Bridge) connect() (err error) {\n\n\tif b.local, err = b.buildClient(b.conf.LocalUrl, \"\"); err != nil {\n\t\tb.Connected = false\n\t\treturn err\n\t}\n\n\tif b.remote, err = b.buildClient(b.cloudUrl.String(), b.token); err != nil {\n\t\tb.Connected = false\n\t\treturn err\n\t}\n\n\tif err = b.subscriptions(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ we are now connected\n\tb.Connected = true\n\n\treturn nil\n}\n\nfunc (b *Bridge) reconnect() (err error) {\n\n\tif b.local, err = b.buildClient(b.conf.LocalUrl, \"\"); err != nil {\n\t\tb.Connected = false\n\t\treturn err\n\t}\n\n\tif b.remote, err = b.buildClient(b.cloudUrl.String(), b.token); err != nil {\n\t\tb.Connected = false\n\t\treturn err\n\t}\n\n\tif err = b.subscriptions(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ we are now connected\n\tb.Connected = true\n\tb.LastError = nil\n\n\treturn nil\n}\n\nfunc (b *Bridge) subscriptions() (err error) {\n\n\tif err = b.subscribe(b.local, b.remote, b.localTopics, \"local\"); err != nil {\n\t\treturn err\n\t}\n\n\tif err = b.subscribe(b.remote, b.local, b.cloudTopics, \"cloud\"); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}\n\nfunc (b *Bridge) disconnectAll() {\n\tb.log.Infof(\"disconnectAll\")\n\t\/\/ we are now disconnected\n\tb.Connected = false\n\tif b.local != nil && b.local.IsConnected() {\n\t\tb.local.Disconnect(100)\n\t}\n\tif b.remote != nil && b.remote.IsConnected() {\n\t\tb.remote.Disconnect(100)\n\t}\n}\n\nfunc (b *Bridge) mainBridgeLoop() {\n\n\tfor {\n\t\tselect {\n\t\tcase <-b.reconnectCh:\n\t\t\tb.log.Infof(\"reconnecting\")\n\t\t\tif err := b.reconnect(); err != nil {\n\t\t\t\tb.log.Errorf(\"Reconnect failed %s\", err)\n\t\t\t\tb.scheduleReconnect(err)\n\t\t\t}\n\t\tcase <-b.shutdownCh:\n\t\t\tb.log.Infof(\"shutting down bridge\")\n\t\t\treturn\n\t\t}\n\n\t}\n\n}\n\nfunc (b *Bridge) buildClient(server string, token string) (*mqtt.MqttClient, error) {\n\n\tb.log.Infof(\"building client for %s\", server)\n\n\topts := mqtt.NewClientOptions().AddBroker(server).SetTlsConfig(&tls.Config{InsecureSkipVerify: true})\n\n\tif token != \"\" {\n\t\topts.SetUsername(token)\n\t}\n\n\topts.SetClientId(fmt.Sprintf(\"%d\", time.Now().Unix()))\n\n\topts.SetKeepAlive(15) \/\/ set a 15 second ping time for ELB\n\n\t\/\/ pretty much log the reason and quit\n\topts.SetOnConnectionLost(b.onConnectionLoss)\n\n\tclient := mqtt.NewClient(opts)\n\t_, err := client.Start()\n\n\treturn client, err\n}\n\nfunc (b *Bridge) subscribe(src *mqtt.MqttClient, dst *mqtt.MqttClient, topics []replaceTopic, tag string) (err error) {\n\n\tfor _, topic := range topics {\n\n\t\ttopicFilter, _ := mqtt.NewTopicFilter(topic.on, 0)\n\t\tb.log.Infof(\"(%s) subscribed to %s\", tag, topic.on)\n\n\t\tif receipt, err := src.StartSubscription(b.buildHandler(topic, tag, dst), topicFilter); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\t<-receipt\n\t\t\tb.log.Infof(\"(%s) subscribed to %+v\", tag, topicFilter)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (b *Bridge) unsubscribe(client *mqtt.MqttClient, topics []replaceTopic, tag string) {\n\n\ttopicNames := []string{}\n\n\tfor _, topic := range topics {\n\t\ttopicNames = append(topicNames, topic.on)\n\t}\n\n\tb.log.Infof(\"(%s) unsubscribed to %s\", tag, topicNames)\n\tclient.EndSubscription(topicNames...)\n}\n\nfunc (b *Bridge) buildHandler(topic replaceTopic, tag string, dst *mqtt.MqttClient) mqtt.MessageHandler {\n\treturn func(src *mqtt.MqttClient, msg mqtt.Message) {\n\t\tif b.log.IsDebugEnabled() {\n\t\t\tb.log.Debugf(\"(%s) topic: %s updated: %s len: %d\", tag, msg.Topic(), topic.updated(msg.Topic()), len(msg.Payload()))\n\t\t}\n\t\tb.updateCounters(tag, msg)\n\t\tpayload := b.updateSource(msg.Payload(), b.buildSource(tag))\n\t\tdst.PublishMessage(topic.updated(msg.Topic()), mqtt.NewMessage(payload))\n\t}\n}\n\nfunc (b *Bridge) scheduleReconnect(reason error) {\n\tb.LastError = reason\n\tb.disconnectAll()\n\tb.resetTimer()\n\n\tswitch reason {\n\tcase mqtt.ErrBadCredentials:\n\t\tb.log.Warningf(\"Reconnect failed trying again in 30s\")\n\n\t\tb.timer = time.AfterFunc(30*time.Second, func() {\n\t\t\tb.reconnectCh <- true\n\t\t})\n\n\tdefault:\n\t\tb.log.Warningf(\"Reconnect failed trying again in 5s\")\n\t\t\/\/ TODO add exponential backoff\n\t\tb.timer = time.AfterFunc(5*time.Second, func() {\n\t\t\tb.reconnectCh <- true\n\t\t})\n\t}\n\n}\n\nfunc (b *Bridge) resetTimer() {\n\tif b.timer != nil {\n\t\tb.timer.Stop()\n\t}\n}\n\nfunc (b *Bridge) onConnectionLoss(client *mqtt.MqttClient, reason error) {\n\tb.log.Errorf(\"Connection failed %s\", reason)\n\n\t\/\/ we are now disconnected\n\tb.Connected = false\n\n\tb.scheduleReconnect(reason)\n\n}\n\nfunc (b *Bridge) IsConnected() bool {\n\tif b.remote == nil || b.local == nil {\n\t\treturn false\n\t}\n\treturn (b.remote.IsConnected() && b.local.IsConnected())\n}\n\nfunc (b *Bridge) buildSource(tag string) string {\n\n\tswitch tag {\n\tcase \"local\":\n\t\treturn b.conf.SerialNo\n\tcase \"cloud\":\n\t\treturn \"cloud-\" + strings.Replace(b.cloudUrl.Host, \".\", \"_\", -1) \/\/ encoded to look less wierd\n\t}\n\n\treturn \"\"\n}\n\nfunc (b *Bridge) updateSource(payload []byte, source string) []byte {\n\n\tif !bytes.Contains(payload, []byte(\"$mesh-source\")) {\n\t\tpayload = bytes.Replace(payload, []byte(\"{\"), []byte(`{\"$mesh-source\":\"`+source+`\", `), 1)\n\t}\n\n\tb.log.Debugf(\"msg %s\", string(payload))\n\n\treturn payload\n}\n\nfunc (b *Bridge) updateCounters(tag string, msg mqtt.Message) {\n\tswitch tag {\n\tcase \"local\":\n\t\tb.EgressCounter++\n\t\tb.EgressBytes += int64(len(msg.Bytes())) \/\/ message size not payload size\n\tcase \"cloud\":\n\t\tb.IngressCounter++\n\t\tb.IngressBytes += int64(len(msg.Bytes())) \/\/ message size not payload size\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package command\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/mitchellh\/cli\"\n\tv1auth \"github.com\/nerdalize\/nerd\/nerd\/client\/auth\/v1\"\n\t\"github.com\/nerdalize\/nerd\/nerd\/oauth\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/WorkerStartOpts describes command options\ntype WorkerStartOpts struct {\n\tEnv []string `long:\"env\" short:\"e\" description:\"environment variables\"`\n}\n\n\/\/WorkerStart command\ntype WorkerStart struct {\n\t*command\n\topts *WorkerStartOpts\n}\n\n\/\/WorkerStartFactory returns a factory method for the join command\nfunc WorkerStartFactory() (cli.Command, error) {\n\topts := &WorkerStartOpts{}\n\tcomm, err := newCommand(\"nerd worker start <image>\", \"provision a new worker to provide compute\", \"\", opts)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to create command\")\n\t}\n\tcmd := &WorkerStart{\n\t\tcommand: comm,\n\t\topts: opts,\n\t}\n\tcmd.runFunc = cmd.DoRun\n\n\treturn cmd, nil\n}\n\n\/\/DoRun is called by run and allows an error to be returned\nfunc (cmd *WorkerStart) DoRun(args []string) (err error) {\n\tif len(args) < 1 {\n\t\treturn fmt.Errorf(\"not enough arguments, see --help\")\n\t}\n\n\t\/\/fetching a worker JWT\n\tauthbase, err := url.Parse(cmd.config.Auth.APIEndpoint)\n\tif err != nil {\n\t\tHandleError(errors.Wrapf(err, \"auth endpoint '%v' is not a valid URL\", cmd.config.Auth.APIEndpoint))\n\t}\n\n\tauthOpsClient := v1auth.NewOpsClient(v1auth.OpsClientConfig{\n\t\tBase: authbase,\n\t\tLogger: logrus.StandardLogger(),\n\t})\n\n\tauthclient := v1auth.NewClient(v1auth.ClientConfig{\n\t\tBase: authbase,\n\t\tLogger: logrus.StandardLogger(),\n\t\tOAuthTokenProvider: oauth.NewConfigProvider(authOpsClient, cmd.config.Auth.ClientID, cmd.session),\n\t})\n\n\tss, err := cmd.session.Read()\n\tif err != nil {\n\t\tHandleError(err)\n\t}\n\n\tworkerJWT, err := authclient.GetWorkerJWT(ss.Project.Name, v1auth.NCEScope)\n\tif err != nil {\n\t\tHandleError(errors.Wrap(err, \"failed to get worker JWT\"))\n\t}\n\n\tbclient, err := NewClient(cmd.ui, cmd.config, cmd.session)\n\tif err != nil {\n\t\tHandleError(err)\n\t}\n\n\twenv := map[string]string{}\n\tfor _, l := range cmd.opts.Env {\n\t\tsplit := strings.SplitN(l, \"=\", 2)\n\t\tif len(split) < 2 {\n\t\t\tHandleError(fmt.Errorf(\"invalid environment variable format, expected 'FOO=bar' fromat, got: %v\", l))\n\t\t}\n\t\twenv[split[0]] = split[1]\n\t}\n\n\twenv[\"NERD_WORKER_TOKEN\"] = workerJWT.Token\n\twenv[\"NERD_WORKER_SECRET\"] = workerJWT.Secret\n\n\tworker, err := bclient.StartWorker(ss.Project.Name, args[0], wenv)\n\tif err != nil {\n\t\tHandleError(err)\n\t}\n\n\tlogrus.Infof(\"Worker Started: %v\", worker)\n\treturn nil\n}\n<commit_msg>worker start now uses the jwt env provider env variables<commit_after>package command\n\nimport (\n\t\"fmt\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/mitchellh\/cli\"\n\tv1auth \"github.com\/nerdalize\/nerd\/nerd\/client\/auth\/v1\"\n\t\"github.com\/nerdalize\/nerd\/nerd\/jwt\"\n\t\"github.com\/nerdalize\/nerd\/nerd\/oauth\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/WorkerStartOpts describes command options\ntype WorkerStartOpts struct {\n\tEnv []string `long:\"env\" short:\"e\" description:\"environment variables\"`\n}\n\n\/\/WorkerStart command\ntype WorkerStart struct {\n\t*command\n\topts *WorkerStartOpts\n}\n\n\/\/WorkerStartFactory returns a factory method for the join command\nfunc WorkerStartFactory() (cli.Command, error) {\n\topts := &WorkerStartOpts{}\n\tcomm, err := newCommand(\"nerd worker start <image>\", \"provision a new worker to provide compute\", \"\", opts)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to create command\")\n\t}\n\tcmd := &WorkerStart{\n\t\tcommand: comm,\n\t\topts: opts,\n\t}\n\tcmd.runFunc = cmd.DoRun\n\n\treturn cmd, nil\n}\n\n\/\/DoRun is called by run and allows an error to be returned\nfunc (cmd *WorkerStart) DoRun(args []string) (err error) {\n\tif len(args) < 1 {\n\t\treturn fmt.Errorf(\"not enough arguments, see --help\")\n\t}\n\n\t\/\/fetching a worker JWT\n\tauthbase, err := url.Parse(cmd.config.Auth.APIEndpoint)\n\tif err != nil {\n\t\tHandleError(errors.Wrapf(err, \"auth endpoint '%v' is not a valid URL\", cmd.config.Auth.APIEndpoint))\n\t}\n\n\tauthOpsClient := v1auth.NewOpsClient(v1auth.OpsClientConfig{\n\t\tBase: authbase,\n\t\tLogger: logrus.StandardLogger(),\n\t})\n\n\tauthclient := v1auth.NewClient(v1auth.ClientConfig{\n\t\tBase: authbase,\n\t\tLogger: logrus.StandardLogger(),\n\t\tOAuthTokenProvider: oauth.NewConfigProvider(authOpsClient, cmd.config.Auth.ClientID, cmd.session),\n\t})\n\n\tss, err := cmd.session.Read()\n\tif err != nil {\n\t\tHandleError(err)\n\t}\n\n\tworkerJWT, err := authclient.GetWorkerJWT(ss.Project.Name, v1auth.NCEScope)\n\tif err != nil {\n\t\tHandleError(errors.Wrap(err, \"failed to get worker JWT\"))\n\t}\n\n\tbclient, err := NewClient(cmd.ui, cmd.config, cmd.session)\n\tif err != nil {\n\t\tHandleError(err)\n\t}\n\n\twenv := map[string]string{}\n\tfor _, l := range cmd.opts.Env {\n\t\tsplit := strings.SplitN(l, \"=\", 2)\n\t\tif len(split) < 2 {\n\t\t\tHandleError(fmt.Errorf(\"invalid environment variable format, expected 'FOO=bar' fromat, got: %v\", l))\n\t\t}\n\t\twenv[split[0]] = split[1]\n\t}\n\n\twenv[jwt.NerdTokenEnvVar] = workerJWT.Token\n\twenv[jwt.NerdSecretEnvVar] = workerJWT.Secret\n\n\tworker, err := bclient.StartWorker(ss.Project.Name, args[0], wenv)\n\tif err != nil {\n\t\tHandleError(err)\n\t}\n\n\tlogrus.Infof(\"Worker Started: %v\", worker)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package openrtb3\n\n\/\/ NoBidReason lists the options for a bidder to signal the exchange as to why it did not offer a bid for the item.\ntype NoBidReason int64\n\n\/\/ NoBidReason options.\n\/\/\n\/\/ Values 500+ are exchange specific values; should be communicated with buyers beforehand.\nconst (\n\tNoBidUnknownError NoBidReason = 0 \/\/ Unknown Error\n\tNoBidTechnicalError NoBidReason = 1 \/\/ Technical Error\n\tNoBidInvalidRequest NoBidReason = 2 \/\/ Invalid Request\n\tNoBidCrawler NoBidReason = 3 \/\/ Known Web Crawler\n\tNoBidNonHuman NoBidReason = 4 \/\/ Suspected Non-Human Traffic\n\tNoBidProxy NoBidReason = 5 \/\/ Cloud, Data Center, or Proxy IP\n\tNoBidUnsupportedDevice NoBidReason = 6 \/\/ Unsupported Device\n\tNoBidBlockedPublisher NoBidReason = 7 \/\/ Blocked Publisher or Site\n\tNoBidUnmatchedUser NoBidReason = 8 \/\/ Unmatched User\n\tNoBidDailyUserCap NoBidReason = 9 \/\/ Daily User Cap Met\n\tNoBidDailyDomainCap NoBidReason = 10 \/\/ Daily Domain Cap Met\n\tNoBidAuthorizationUnavailable NoBidReason = 11 \/\/ Ads.txt Authorization Unavailable\n\tNoBidAuthorizationViolation NoBidReason = 12 \/\/ Ads.txt Authorization Violation\n\tNoBidAuthenticationUnavailable NoBidReason = 13 \/\/ Ads.cert Authentication Unavailable\n\tNoBidAuthenticationViolation NoBidReason = 14 \/\/ Ads.cert Authentication Violation\n\tNoBidInsufficientTime NoBidReason = 15 \/\/ Insufficient Auction Time\n\tNoBidIncompleteSupplyChain NoBidReason = 16 \/\/ Incomplete SupplyChain\n\tNoBidBlockedSupplyChainNode NoBidReason = 17 \/\/ Blocked SupplyChain Node\n)\n<commit_msg>openrtb3.NoBidReason pointer helpers<commit_after>package openrtb3\n\n\/\/ NoBidReason lists the options for a bidder to signal the exchange as to why it did not offer a bid for the item.\ntype NoBidReason int64\n\n\/\/ NoBidReason options.\n\/\/\n\/\/ Values 500+ are exchange specific values; should be communicated with buyers beforehand.\nconst (\n\tNoBidUnknownError NoBidReason = 0 \/\/ Unknown Error\n\tNoBidTechnicalError NoBidReason = 1 \/\/ Technical Error\n\tNoBidInvalidRequest NoBidReason = 2 \/\/ Invalid Request\n\tNoBidCrawler NoBidReason = 3 \/\/ Known Web Crawler\n\tNoBidNonHuman NoBidReason = 4 \/\/ Suspected Non-Human Traffic\n\tNoBidProxy NoBidReason = 5 \/\/ Cloud, Data Center, or Proxy IP\n\tNoBidUnsupportedDevice NoBidReason = 6 \/\/ Unsupported Device\n\tNoBidBlockedPublisher NoBidReason = 7 \/\/ Blocked Publisher or Site\n\tNoBidUnmatchedUser NoBidReason = 8 \/\/ Unmatched User\n\tNoBidDailyUserCap NoBidReason = 9 \/\/ Daily User Cap Met\n\tNoBidDailyDomainCap NoBidReason = 10 \/\/ Daily Domain Cap Met\n\tNoBidAuthorizationUnavailable NoBidReason = 11 \/\/ Ads.txt Authorization Unavailable\n\tNoBidAuthorizationViolation NoBidReason = 12 \/\/ Ads.txt Authorization Violation\n\tNoBidAuthenticationUnavailable NoBidReason = 13 \/\/ Ads.cert Authentication Unavailable\n\tNoBidAuthenticationViolation NoBidReason = 14 \/\/ Ads.cert Authentication Violation\n\tNoBidInsufficientTime NoBidReason = 15 \/\/ Insufficient Auction Time\n\tNoBidIncompleteSupplyChain NoBidReason = 16 \/\/ Incomplete SupplyChain\n\tNoBidBlockedSupplyChainNode NoBidReason = 17 \/\/ Blocked SupplyChain Node\n)\n\n\/\/ Ptr returns pointer to own value.\nfunc (n NoBidReason) Ptr() *NoBidReason {\n\treturn &n\n}\n\n\/\/ Val safely dereferences pointer, returning default value (NoBidUnknownError) for nil.\nfunc (n *NoBidReason) Val() NoBidReason {\n\tif n == nil {\n\t\treturn NoBidUnknownError\n\t}\n\treturn *n\n}\n<|endoftext|>"} {"text":"<commit_before>package terraform\n\nimport (\n\t\"fmt\"\n\t\"log\"\n)\n\n\/\/ EvalCompareDiff is an EvalNode implementation that compares two diffs\n\/\/ and errors if the diffs are not equal.\ntype EvalCompareDiff struct {\n\tInfo *InstanceInfo\n\tOne, Two **InstanceDiff\n}\n\n\/\/ TODO: test\nfunc (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) {\n\tone, two := *n.One, *n.Two\n\n\t\/\/ If either are nil, let them be empty\n\tif one == nil {\n\t\tone = new(InstanceDiff)\n\t\tone.init()\n\t}\n\tif two == nil {\n\t\ttwo = new(InstanceDiff)\n\t\ttwo.init()\n\t}\n\toneId := one.Attributes[\"id\"]\n\ttwoId := two.Attributes[\"id\"]\n\tdelete(one.Attributes, \"id\")\n\tdelete(two.Attributes, \"id\")\n\tdefer func() {\n\t\tif oneId != nil {\n\t\t\tone.Attributes[\"id\"] = oneId\n\t\t}\n\t\tif twoId != nil {\n\t\t\ttwo.Attributes[\"id\"] = twoId\n\t\t}\n\t}()\n\n\tif same, reason := one.Same(two); !same {\n\t\tlog.Printf(\"[ERROR] %s: diffs didn't match\", n.Info.Id)\n\t\tlog.Printf(\"[ERROR] %s: reason: %s\", n.Info.Id, reason)\n\t\tlog.Printf(\"[ERROR] %s: diff one: %#v\", n.Info.Id, one)\n\t\tlog.Printf(\"[ERROR] %s: diff two: %#v\", n.Info.Id, two)\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"%s: diffs didn't match during apply. This is a bug with \"+\n\t\t\t\t\"Terraform and should be reported.\", n.Info.Id)\n\t}\n\n\treturn nil, nil\n}\n\n\/\/ EvalDiff is an EvalNode implementation that does a refresh for\n\/\/ a resource.\ntype EvalDiff struct {\n\tInfo *InstanceInfo\n\tConfig **ResourceConfig\n\tProvider *ResourceProvider\n\tState **InstanceState\n\tOutput **InstanceDiff\n\tOutputState **InstanceState\n}\n\n\/\/ TODO: test\nfunc (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {\n\tstate := *n.State\n\tconfig := *n.Config\n\tprovider := *n.Provider\n\n\t\/\/ Call pre-diff hook\n\terr := ctx.Hook(func(h Hook) (HookAction, error) {\n\t\treturn h.PreDiff(n.Info, state)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ The state for the diff must never be nil\n\tdiffState := state\n\tif diffState == nil {\n\t\tdiffState = new(InstanceState)\n\t}\n\tdiffState.init()\n\n\t\/\/ Diff!\n\tdiff, err := provider.Diff(n.Info, diffState, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif diff == nil {\n\t\tdiff = new(InstanceDiff)\n\t}\n\n\t\/\/ Require a destroy if there is no ID and it requires new.\n\tif diff.RequiresNew() && state != nil && state.ID != \"\" {\n\t\tdiff.Destroy = true\n\t}\n\n\t\/\/ If we're creating a new resource, compute its ID\n\tif diff.RequiresNew() || state == nil || state.ID == \"\" {\n\t\tvar oldID string\n\t\tif state != nil {\n\t\t\toldID = state.Attributes[\"id\"]\n\t\t}\n\n\t\t\/\/ Add diff to compute new ID\n\t\tdiff.init()\n\t\tdiff.Attributes[\"id\"] = &ResourceAttrDiff{\n\t\t\tOld: oldID,\n\t\t\tNewComputed: true,\n\t\t\tRequiresNew: true,\n\t\t\tType: DiffAttrOutput,\n\t\t}\n\t}\n\n\t\/\/ Call post-refresh hook\n\terr = ctx.Hook(func(h Hook) (HookAction, error) {\n\t\treturn h.PostDiff(n.Info, diff)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Update our output\n\t*n.Output = diff\n\n\t\/\/ Update the state if we care\n\tif n.OutputState != nil {\n\t\t*n.OutputState = state\n\n\t\t\/\/ Merge our state so that the state is updated with our plan\n\t\tif !diff.Empty() && n.OutputState != nil {\n\t\t\t*n.OutputState = state.MergeDiff(diff)\n\t\t}\n\t}\n\n\treturn nil, nil\n}\n\n\/\/ EvalDiffDestroy is an EvalNode implementation that returns a plain\n\/\/ destroy diff.\ntype EvalDiffDestroy struct {\n\tInfo *InstanceInfo\n\tState **InstanceState\n\tOutput **InstanceDiff\n}\n\n\/\/ TODO: test\nfunc (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) {\n\tstate := *n.State\n\n\t\/\/ If there is no state or we don't have an ID, we're already destroyed\n\tif state == nil || state.ID == \"\" {\n\t\treturn nil, nil\n\t}\n\n\t\/\/ Call pre-diff hook\n\terr := ctx.Hook(func(h Hook) (HookAction, error) {\n\t\treturn h.PreDiff(n.Info, state)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ The diff\n\tdiff := &InstanceDiff{Destroy: true}\n\n\t\/\/ Call post-diff hook\n\terr = ctx.Hook(func(h Hook) (HookAction, error) {\n\t\treturn h.PostDiff(n.Info, diff)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Update our output\n\t*n.Output = diff\n\n\treturn nil, nil\n}\n\n\/\/ EvalDiffDestroyModule is an EvalNode implementation that writes the diff to\n\/\/ the full diff.\ntype EvalDiffDestroyModule struct {\n\tPath []string\n}\n\n\/\/ TODO: test\nfunc (n *EvalDiffDestroyModule) Eval(ctx EvalContext) (interface{}, error) {\n\tdiff, lock := ctx.Diff()\n\n\t\/\/ Acquire the lock so that we can do this safely concurrently\n\tlock.Lock()\n\tdefer lock.Unlock()\n\n\t\/\/ Write the diff\n\tmodDiff := diff.ModuleByPath(n.Path)\n\tif modDiff == nil {\n\t\tmodDiff = diff.AddModule(n.Path)\n\t}\n\tmodDiff.Destroy = true\n\n\treturn nil, nil\n}\n\n\/\/ EvalDiffTainted is an EvalNode implementation that writes the diff to\n\/\/ the full diff.\ntype EvalDiffTainted struct {\n\tName string\n\tDiff **InstanceDiff\n}\n\n\/\/ TODO: test\nfunc (n *EvalDiffTainted) Eval(ctx EvalContext) (interface{}, error) {\n\tstate, lock := ctx.State()\n\n\t\/\/ Get a read lock so we can access this instance\n\tlock.RLock()\n\tdefer lock.RUnlock()\n\n\t\/\/ Look for the module state. If we don't have one, then it doesn't matter.\n\tmod := state.ModuleByPath(ctx.Path())\n\tif mod == nil {\n\t\treturn nil, nil\n\t}\n\n\t\/\/ Look for the resource state. If we don't have one, then it is okay.\n\trs := mod.Resources[n.Name]\n\tif rs == nil {\n\t\treturn nil, nil\n\t}\n\n\t\/\/ If we have tainted, then mark it on the diff\n\tif len(rs.Tainted) > 0 {\n\t\t(*n.Diff).DestroyTainted = true\n\t}\n\n\treturn nil, nil\n}\n\n\/\/ EvalFilterDiff is an EvalNode implementation that filters the diff\n\/\/ according to some filter.\ntype EvalFilterDiff struct {\n\t\/\/ Input and output\n\tDiff **InstanceDiff\n\tOutput **InstanceDiff\n\n\t\/\/ Destroy, if true, will only include a destroy diff if it is set.\n\tDestroy bool\n}\n\nfunc (n *EvalFilterDiff) Eval(ctx EvalContext) (interface{}, error) {\n\tif *n.Diff == nil {\n\t\treturn nil, nil\n\t}\n\n\tinput := *n.Diff\n\tresult := new(InstanceDiff)\n\n\tif n.Destroy {\n\t\tif input.Destroy || input.RequiresNew() {\n\t\t\tresult.Destroy = true\n\t\t}\n\t}\n\n\tif n.Output != nil {\n\t\t*n.Output = result\n\t}\n\n\treturn nil, nil\n}\n\n\/\/ EvalReadDiff is an EvalNode implementation that writes the diff to\n\/\/ the full diff.\ntype EvalReadDiff struct {\n\tName string\n\tDiff **InstanceDiff\n}\n\nfunc (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) {\n\tdiff, lock := ctx.Diff()\n\n\t\/\/ Acquire the lock so that we can do this safely concurrently\n\tlock.Lock()\n\tdefer lock.Unlock()\n\n\t\/\/ Write the diff\n\tmodDiff := diff.ModuleByPath(ctx.Path())\n\tif modDiff == nil {\n\t\treturn nil, nil\n\t}\n\n\t*n.Diff = modDiff.Resources[n.Name]\n\n\treturn nil, nil\n}\n\n\/\/ EvalWriteDiff is an EvalNode implementation that writes the diff to\n\/\/ the full diff.\ntype EvalWriteDiff struct {\n\tName string\n\tDiff **InstanceDiff\n}\n\n\/\/ TODO: test\nfunc (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) {\n\tdiff, lock := ctx.Diff()\n\n\t\/\/ The diff to write, if its empty it should write nil\n\tvar diffVal *InstanceDiff\n\tif n.Diff != nil {\n\t\tdiffVal = *n.Diff\n\t}\n\tif diffVal.Empty() {\n\t\tdiffVal = nil\n\t}\n\n\t\/\/ Acquire the lock so that we can do this safely concurrently\n\tlock.Lock()\n\tdefer lock.Unlock()\n\n\t\/\/ Write the diff\n\tmodDiff := diff.ModuleByPath(ctx.Path())\n\tif modDiff == nil {\n\t\tmodDiff = diff.AddModule(ctx.Path())\n\t}\n\tif diffVal != nil {\n\t\tmodDiff.Resources[n.Name] = diffVal\n\t} else {\n\t\tdelete(modDiff.Resources, n.Name)\n\t}\n\n\treturn nil, nil\n}\n<commit_msg>core: output \"diffs didn't match\" error details<commit_after>package terraform\n\nimport (\n\t\"fmt\"\n\t\"log\"\n)\n\n\/\/ EvalCompareDiff is an EvalNode implementation that compares two diffs\n\/\/ and errors if the diffs are not equal.\ntype EvalCompareDiff struct {\n\tInfo *InstanceInfo\n\tOne, Two **InstanceDiff\n}\n\n\/\/ TODO: test\nfunc (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) {\n\tone, two := *n.One, *n.Two\n\n\t\/\/ If either are nil, let them be empty\n\tif one == nil {\n\t\tone = new(InstanceDiff)\n\t\tone.init()\n\t}\n\tif two == nil {\n\t\ttwo = new(InstanceDiff)\n\t\ttwo.init()\n\t}\n\toneId := one.Attributes[\"id\"]\n\ttwoId := two.Attributes[\"id\"]\n\tdelete(one.Attributes, \"id\")\n\tdelete(two.Attributes, \"id\")\n\tdefer func() {\n\t\tif oneId != nil {\n\t\t\tone.Attributes[\"id\"] = oneId\n\t\t}\n\t\tif twoId != nil {\n\t\t\ttwo.Attributes[\"id\"] = twoId\n\t\t}\n\t}()\n\n\tif same, reason := one.Same(two); !same {\n\t\tlog.Printf(\"[ERROR] %s: diffs didn't match\", n.Info.Id)\n\t\tlog.Printf(\"[ERROR] %s: reason: %s\", n.Info.Id, reason)\n\t\tlog.Printf(\"[ERROR] %s: diff one: %#v\", n.Info.Id, one)\n\t\tlog.Printf(\"[ERROR] %s: diff two: %#v\", n.Info.Id, two)\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"%s: diffs didn't match during apply. This is a bug with \"+\n\t\t\t\t\"Terraform and should be reported as a GitHub Issue.\\n\"+\n\t\t\t\t\"\\n\"+\n\t\t\t\t\"Please include the following information in your report:\\n\"+\n\t\t\t\t\" Terraform Version: %s\\n\"+\n\t\t\t\t\" Resource ID: %s\\n\"+\n\t\t\t\t\" Mismatch reason: %s\\n\"+\n\t\t\t\t\" Diff One (usually from plan): %#v\\n\"+\n\t\t\t\t\" Diff Two (usually from apply): %#v\\n\"+\n\t\t\t\t\"\\n\"+\n\t\t\t\t\"Also include as much context as you can about your config, state,\\n\"+\n\t\t\t\t\" and the steps you performed to trigger this error.\\n\",\n\t\t\tn.Info.Id, Version, n.Info.Id, reason, one, two)\n\t}\n\n\treturn nil, nil\n}\n\n\/\/ EvalDiff is an EvalNode implementation that does a refresh for\n\/\/ a resource.\ntype EvalDiff struct {\n\tInfo *InstanceInfo\n\tConfig **ResourceConfig\n\tProvider *ResourceProvider\n\tState **InstanceState\n\tOutput **InstanceDiff\n\tOutputState **InstanceState\n}\n\n\/\/ TODO: test\nfunc (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) {\n\tstate := *n.State\n\tconfig := *n.Config\n\tprovider := *n.Provider\n\n\t\/\/ Call pre-diff hook\n\terr := ctx.Hook(func(h Hook) (HookAction, error) {\n\t\treturn h.PreDiff(n.Info, state)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ The state for the diff must never be nil\n\tdiffState := state\n\tif diffState == nil {\n\t\tdiffState = new(InstanceState)\n\t}\n\tdiffState.init()\n\n\t\/\/ Diff!\n\tdiff, err := provider.Diff(n.Info, diffState, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif diff == nil {\n\t\tdiff = new(InstanceDiff)\n\t}\n\n\t\/\/ Require a destroy if there is no ID and it requires new.\n\tif diff.RequiresNew() && state != nil && state.ID != \"\" {\n\t\tdiff.Destroy = true\n\t}\n\n\t\/\/ If we're creating a new resource, compute its ID\n\tif diff.RequiresNew() || state == nil || state.ID == \"\" {\n\t\tvar oldID string\n\t\tif state != nil {\n\t\t\toldID = state.Attributes[\"id\"]\n\t\t}\n\n\t\t\/\/ Add diff to compute new ID\n\t\tdiff.init()\n\t\tdiff.Attributes[\"id\"] = &ResourceAttrDiff{\n\t\t\tOld: oldID,\n\t\t\tNewComputed: true,\n\t\t\tRequiresNew: true,\n\t\t\tType: DiffAttrOutput,\n\t\t}\n\t}\n\n\t\/\/ Call post-refresh hook\n\terr = ctx.Hook(func(h Hook) (HookAction, error) {\n\t\treturn h.PostDiff(n.Info, diff)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Update our output\n\t*n.Output = diff\n\n\t\/\/ Update the state if we care\n\tif n.OutputState != nil {\n\t\t*n.OutputState = state\n\n\t\t\/\/ Merge our state so that the state is updated with our plan\n\t\tif !diff.Empty() && n.OutputState != nil {\n\t\t\t*n.OutputState = state.MergeDiff(diff)\n\t\t}\n\t}\n\n\treturn nil, nil\n}\n\n\/\/ EvalDiffDestroy is an EvalNode implementation that returns a plain\n\/\/ destroy diff.\ntype EvalDiffDestroy struct {\n\tInfo *InstanceInfo\n\tState **InstanceState\n\tOutput **InstanceDiff\n}\n\n\/\/ TODO: test\nfunc (n *EvalDiffDestroy) Eval(ctx EvalContext) (interface{}, error) {\n\tstate := *n.State\n\n\t\/\/ If there is no state or we don't have an ID, we're already destroyed\n\tif state == nil || state.ID == \"\" {\n\t\treturn nil, nil\n\t}\n\n\t\/\/ Call pre-diff hook\n\terr := ctx.Hook(func(h Hook) (HookAction, error) {\n\t\treturn h.PreDiff(n.Info, state)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ The diff\n\tdiff := &InstanceDiff{Destroy: true}\n\n\t\/\/ Call post-diff hook\n\terr = ctx.Hook(func(h Hook) (HookAction, error) {\n\t\treturn h.PostDiff(n.Info, diff)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Update our output\n\t*n.Output = diff\n\n\treturn nil, nil\n}\n\n\/\/ EvalDiffDestroyModule is an EvalNode implementation that writes the diff to\n\/\/ the full diff.\ntype EvalDiffDestroyModule struct {\n\tPath []string\n}\n\n\/\/ TODO: test\nfunc (n *EvalDiffDestroyModule) Eval(ctx EvalContext) (interface{}, error) {\n\tdiff, lock := ctx.Diff()\n\n\t\/\/ Acquire the lock so that we can do this safely concurrently\n\tlock.Lock()\n\tdefer lock.Unlock()\n\n\t\/\/ Write the diff\n\tmodDiff := diff.ModuleByPath(n.Path)\n\tif modDiff == nil {\n\t\tmodDiff = diff.AddModule(n.Path)\n\t}\n\tmodDiff.Destroy = true\n\n\treturn nil, nil\n}\n\n\/\/ EvalDiffTainted is an EvalNode implementation that writes the diff to\n\/\/ the full diff.\ntype EvalDiffTainted struct {\n\tName string\n\tDiff **InstanceDiff\n}\n\n\/\/ TODO: test\nfunc (n *EvalDiffTainted) Eval(ctx EvalContext) (interface{}, error) {\n\tstate, lock := ctx.State()\n\n\t\/\/ Get a read lock so we can access this instance\n\tlock.RLock()\n\tdefer lock.RUnlock()\n\n\t\/\/ Look for the module state. If we don't have one, then it doesn't matter.\n\tmod := state.ModuleByPath(ctx.Path())\n\tif mod == nil {\n\t\treturn nil, nil\n\t}\n\n\t\/\/ Look for the resource state. If we don't have one, then it is okay.\n\trs := mod.Resources[n.Name]\n\tif rs == nil {\n\t\treturn nil, nil\n\t}\n\n\t\/\/ If we have tainted, then mark it on the diff\n\tif len(rs.Tainted) > 0 {\n\t\t(*n.Diff).DestroyTainted = true\n\t}\n\n\treturn nil, nil\n}\n\n\/\/ EvalFilterDiff is an EvalNode implementation that filters the diff\n\/\/ according to some filter.\ntype EvalFilterDiff struct {\n\t\/\/ Input and output\n\tDiff **InstanceDiff\n\tOutput **InstanceDiff\n\n\t\/\/ Destroy, if true, will only include a destroy diff if it is set.\n\tDestroy bool\n}\n\nfunc (n *EvalFilterDiff) Eval(ctx EvalContext) (interface{}, error) {\n\tif *n.Diff == nil {\n\t\treturn nil, nil\n\t}\n\n\tinput := *n.Diff\n\tresult := new(InstanceDiff)\n\n\tif n.Destroy {\n\t\tif input.Destroy || input.RequiresNew() {\n\t\t\tresult.Destroy = true\n\t\t}\n\t}\n\n\tif n.Output != nil {\n\t\t*n.Output = result\n\t}\n\n\treturn nil, nil\n}\n\n\/\/ EvalReadDiff is an EvalNode implementation that writes the diff to\n\/\/ the full diff.\ntype EvalReadDiff struct {\n\tName string\n\tDiff **InstanceDiff\n}\n\nfunc (n *EvalReadDiff) Eval(ctx EvalContext) (interface{}, error) {\n\tdiff, lock := ctx.Diff()\n\n\t\/\/ Acquire the lock so that we can do this safely concurrently\n\tlock.Lock()\n\tdefer lock.Unlock()\n\n\t\/\/ Write the diff\n\tmodDiff := diff.ModuleByPath(ctx.Path())\n\tif modDiff == nil {\n\t\treturn nil, nil\n\t}\n\n\t*n.Diff = modDiff.Resources[n.Name]\n\n\treturn nil, nil\n}\n\n\/\/ EvalWriteDiff is an EvalNode implementation that writes the diff to\n\/\/ the full diff.\ntype EvalWriteDiff struct {\n\tName string\n\tDiff **InstanceDiff\n}\n\n\/\/ TODO: test\nfunc (n *EvalWriteDiff) Eval(ctx EvalContext) (interface{}, error) {\n\tdiff, lock := ctx.Diff()\n\n\t\/\/ The diff to write, if its empty it should write nil\n\tvar diffVal *InstanceDiff\n\tif n.Diff != nil {\n\t\tdiffVal = *n.Diff\n\t}\n\tif diffVal.Empty() {\n\t\tdiffVal = nil\n\t}\n\n\t\/\/ Acquire the lock so that we can do this safely concurrently\n\tlock.Lock()\n\tdefer lock.Unlock()\n\n\t\/\/ Write the diff\n\tmodDiff := diff.ModuleByPath(ctx.Path())\n\tif modDiff == nil {\n\t\tmodDiff = diff.AddModule(ctx.Path())\n\t}\n\tif diffVal != nil {\n\t\tmodDiff.Resources[n.Name] = diffVal\n\t} else {\n\t\tdelete(modDiff.Resources, n.Name)\n\t}\n\n\treturn nil, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright IBM Corp. 2016 All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"net\"\n\t_ \"net\/http\/pprof\"\n\t\"os\"\n\n\t\"github.com\/hyperledger\/fabric\/orderer\/common\/bootstrap\/provisional\"\n\tlocalconfig \"github.com\/hyperledger\/fabric\/orderer\/localconfig\"\n\t\"github.com\/hyperledger\/fabric\/orderer\/rawledger\/fileledger\"\n\t\"github.com\/hyperledger\/fabric\/orderer\/sbft\"\n\t\"github.com\/hyperledger\/fabric\/orderer\/sbft\/backend\"\n\t\"github.com\/hyperledger\/fabric\/orderer\/sbft\/connection\"\n\t\"github.com\/hyperledger\/fabric\/orderer\/sbft\/persist\"\n\tpb \"github.com\/hyperledger\/fabric\/orderer\/sbft\/simplebft\"\n\tab \"github.com\/hyperledger\/fabric\/protos\/orderer\"\n\t\"github.com\/op\/go-logging\"\n\t\"google.golang.org\/grpc\"\n)\n\ntype consensusStack struct {\n\tpersist *persist.Persist\n\tbackend *backend.Backend\n}\n\ntype flags struct {\n\tlistenAddr string\n\tgrpcAddr string\n\ttelemetryAddr string\n\tcertFile string\n\tkeyFile string\n\tdataDir string\n\tgenesisFile string\n\tverbose string\n\tinit string\n}\n\nvar logger = logging.MustGetLogger(\"orderer\/main\")\n\n\/\/ TODO move to_test after integration with common components\nfunc init() {\n\tlogging.SetLevel(logging.DEBUG, \"\")\n}\n\nfunc main() {\n\tvar c flags\n\n\tflag.StringVar(&c.init, \"init\", \"\", \"initialized instance from pbft config `file`\")\n\tflag.StringVar(&c.listenAddr, \"addr\", \":6100\", \"`addr`ess\/port of service\")\n\tflag.StringVar(&c.grpcAddr, \"gaddr\", \":7100\", \"`addr`ess\/port of GRPC atomic broadcast server\")\n\tflag.StringVar(&c.telemetryAddr, \"telemetry\", \":7100\", \"`addr`ess of telemetry\/profiler\")\n\tflag.StringVar(&c.certFile, \"cert\", \"\", \"certificate `file`\")\n\tflag.StringVar(&c.keyFile, \"key\", \"\", \"key `file`\")\n\tflag.StringVar(&c.dataDir, \"data-dir\", \"\", \"data `dir`ectory\")\n\tflag.StringVar(&c.genesisFile, \"genesis-file\", \"\", \"`gen`esis block file\")\n\tflag.StringVar(&c.verbose, \"verbose\", \"info\", \"set verbosity `level` (critical, error, warning, notice, info, debug)\")\n\n\tflag.Parse()\n\n\tlevel, err := logging.LogLevel(c.verbose)\n\tif err != nil {\n\t\tlogger.Panicf(\"Failed to set loglevel: %s\", err)\n\t}\n\tlogging.SetLevel(level, \"\")\n\n\tif c.init != \"\" {\n\t\terr = initInstance(c)\n\t\tif err != nil {\n\t\t\tlogger.Panicf(\"Failed to initialize SBFT instance: %s\", err)\n\t\t}\n\t\treturn\n\t}\n\n\tserve(c)\n}\n\nfunc initInstance(c flags) error {\n\tconfig, err := sbft.ReadJsonConfig(c.init)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = os.Mkdir(c.dataDir, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp := persist.New(c.dataDir)\n\terr = sbft.SaveConfig(p, config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Infof(\"Initialized new peer: listening at %v GRPC at %v\", c.listenAddr, c.grpcAddr)\n\treturn nil\n}\n\nfunc serve(c flags) {\n\tif c.dataDir == \"\" {\n\t\tlogger.Panic(\"No data directory was given.\")\n\t}\n\n\tpersist := persist.New(c.dataDir)\n\tconfig, err := sbft.RestoreConfig(persist)\n\tif err != nil {\n\t\tlogger.Panicf(\"Failed to restore configuration: %s\", err)\n\t}\n\n\tconn, err := connection.New(c.listenAddr, c.certFile, c.keyFile)\n\tif err != nil {\n\t\tlogger.Panicf(\"Error when trying to connect: %s\", err)\n\t}\n\ts := &consensusStack{\n\t\tpersist: nil,\n\t}\n\n\tlocalConf := localconfig.Load()\n\tlocalConf.General.OrdererType = provisional.ConsensusTypeSbft\n\tgenesisBlock := provisional.New(localConf).GenesisBlock()\n\n\tflf := fileledger.New(c.dataDir)\n\tledger, _ := flf.GetOrCreate(provisional.TestChainID)\n\tledger.Append(genesisBlock)\n\ts.backend, err = backend.NewBackend(config.Peers, conn, ledger, persist)\n\tif err != nil {\n\t\tlogger.Panicf(\"Failed to create a new backend instance: %s\", err)\n\t}\n\n\tsbft, _ := pb.New(s.backend.GetMyId(), config.Consensus, s.backend)\n\ts.backend.SetReceiver(sbft)\n\n\tgrpcServer := grpc.NewServer()\n\tlis, err := net.Listen(\"tcp\", c.grpcAddr)\n\tif err != nil {\n\t\tlogger.Panicf(\"Failed to listen: %s\", err)\n\t}\n\tbroadcastab := backend.NewBackendAB(s.backend)\n\tab.RegisterAtomicBroadcastServer(grpcServer, broadcastab)\n\tgrpcServer.Serve(lis)\n}\n<commit_msg>Add an initial Consenter interface skeleton to SBFT<commit_after>\/*\nCopyright IBM Corp. 2016 All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"net\"\n\t_ \"net\/http\/pprof\"\n\t\"os\"\n\n\t\"github.com\/hyperledger\/fabric\/orderer\/common\/bootstrap\/provisional\"\n\tlocalconfig \"github.com\/hyperledger\/fabric\/orderer\/localconfig\"\n\t\"github.com\/hyperledger\/fabric\/orderer\/multichain\"\n\t\"github.com\/hyperledger\/fabric\/orderer\/rawledger\/fileledger\"\n\t\"github.com\/hyperledger\/fabric\/orderer\/sbft\"\n\t\"github.com\/hyperledger\/fabric\/orderer\/sbft\/backend\"\n\t\"github.com\/hyperledger\/fabric\/orderer\/sbft\/connection\"\n\t\"github.com\/hyperledger\/fabric\/orderer\/sbft\/persist\"\n\tpb \"github.com\/hyperledger\/fabric\/orderer\/sbft\/simplebft\"\n\tcb \"github.com\/hyperledger\/fabric\/protos\/common\"\n\tab \"github.com\/hyperledger\/fabric\/protos\/orderer\"\n\t\"github.com\/op\/go-logging\"\n\t\"google.golang.org\/grpc\"\n)\n\ntype consensusStack struct {\n\tpersist *persist.Persist\n\tbackend *backend.Backend\n}\n\ntype flags struct {\n\tlistenAddr string\n\tgrpcAddr string\n\ttelemetryAddr string\n\tcertFile string\n\tkeyFile string\n\tdataDir string\n\tgenesisFile string\n\tverbose string\n\tinit string\n}\n\nvar logger = logging.MustGetLogger(\"orderer\/main\")\n\n\/\/ TODO move to_test after integration with common components\nfunc init() {\n\tlogging.SetLevel(logging.DEBUG, \"\")\n}\n\n\/\/ Consenter interface implementation for new main application:\n\/\/ This part is Work In Progress\n\ntype consenter struct{}\n\ntype chain struct {\n\tsupport multichain.ConsenterSupport\n}\n\n\/\/ New creates a new consenter for the SBFT consensus scheme.\n\/\/ It accepts messages being delivered via Enqueue, orders them, and then\n\/\/ uses the blockcutter to form the messages into blocks before writing to\n\/\/ the given ledger.\nfunc New() multichain.Consenter {\n\treturn &consenter{}\n}\n\n\/\/ HandleChain creates\/returns a reference to a Chain for the given set of support resources.\nfunc (solo *consenter) HandleChain(support multichain.ConsenterSupport) (multichain.Chain, error) {\n\treturn newChain(support), nil\n}\n\nfunc newChain(support multichain.ConsenterSupport) *chain {\n\treturn &chain{\n\t\tsupport: support,\n\t}\n}\n\n\/\/ Chain interface implementation:\n\n\/\/ TODO\n\/\/ Start allocates whatever resources are needed for staying up to date with the chain\nfunc (ch *chain) Start() {\n\n}\n\n\/\/ TODO\n\/\/ Halt frees the resources which were allocated for this Chain\nfunc (ch *chain) Halt() {\n\n}\n\n\/\/ TODO\n\/\/ Enqueue accepts a message and returns true on acceptance, or false on shutdown\nfunc (ch *chain) Enqueue(env *cb.Envelope) bool {\n\treturn false\n}\n\n\/\/ The \"old\", SBFT only application:\nfunc main() {\n\tvar c flags\n\n\tflag.StringVar(&c.init, \"init\", \"\", \"initialized instance from pbft config `file`\")\n\tflag.StringVar(&c.listenAddr, \"addr\", \":6100\", \"`addr`ess\/port of service\")\n\tflag.StringVar(&c.grpcAddr, \"gaddr\", \":7100\", \"`addr`ess\/port of GRPC atomic broadcast server\")\n\tflag.StringVar(&c.telemetryAddr, \"telemetry\", \":7100\", \"`addr`ess of telemetry\/profiler\")\n\tflag.StringVar(&c.certFile, \"cert\", \"\", \"certificate `file`\")\n\tflag.StringVar(&c.keyFile, \"key\", \"\", \"key `file`\")\n\tflag.StringVar(&c.dataDir, \"data-dir\", \"\", \"data `dir`ectory\")\n\tflag.StringVar(&c.genesisFile, \"genesis-file\", \"\", \"`gen`esis block file\")\n\tflag.StringVar(&c.verbose, \"verbose\", \"info\", \"set verbosity `level` (critical, error, warning, notice, info, debug)\")\n\n\tflag.Parse()\n\n\tlevel, err := logging.LogLevel(c.verbose)\n\tif err != nil {\n\t\tlogger.Panicf(\"Failed to set loglevel: %s\", err)\n\t}\n\tlogging.SetLevel(level, \"\")\n\n\tif c.init != \"\" {\n\t\terr = initInstance(c)\n\t\tif err != nil {\n\t\t\tlogger.Panicf(\"Failed to initialize SBFT instance: %s\", err)\n\t\t}\n\t\treturn\n\t}\n\n\tserve(c)\n}\n\nfunc initInstance(c flags) error {\n\tconfig, err := sbft.ReadJsonConfig(c.init)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = os.Mkdir(c.dataDir, 0755)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp := persist.New(c.dataDir)\n\terr = sbft.SaveConfig(p, config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogger.Infof(\"Initialized new peer: listening at %v GRPC at %v\", c.listenAddr, c.grpcAddr)\n\treturn nil\n}\n\nfunc serve(c flags) {\n\tif c.dataDir == \"\" {\n\t\tlogger.Panic(\"No data directory was given.\")\n\t}\n\n\tpersist := persist.New(c.dataDir)\n\tconfig, err := sbft.RestoreConfig(persist)\n\tif err != nil {\n\t\tlogger.Panicf(\"Failed to restore configuration: %s\", err)\n\t}\n\n\tconn, err := connection.New(c.listenAddr, c.certFile, c.keyFile)\n\tif err != nil {\n\t\tlogger.Panicf(\"Error when trying to connect: %s\", err)\n\t}\n\ts := &consensusStack{\n\t\tpersist: nil,\n\t}\n\n\tlocalConf := localconfig.Load()\n\tlocalConf.General.OrdererType = provisional.ConsensusTypeSbft\n\tgenesisBlock := provisional.New(localConf).GenesisBlock()\n\n\tflf := fileledger.New(c.dataDir)\n\tledger, _ := flf.GetOrCreate(provisional.TestChainID)\n\tledger.Append(genesisBlock)\n\ts.backend, err = backend.NewBackend(config.Peers, conn, ledger, persist)\n\tif err != nil {\n\t\tlogger.Panicf(\"Failed to create a new backend instance: %s\", err)\n\t}\n\n\tsbft, _ := pb.New(s.backend.GetMyId(), config.Consensus, s.backend)\n\ts.backend.SetReceiver(sbft)\n\n\tgrpcServer := grpc.NewServer()\n\tlis, err := net.Listen(\"tcp\", c.grpcAddr)\n\tif err != nil {\n\t\tlogger.Panicf(\"Failed to listen: %s\", err)\n\t}\n\tbroadcastab := backend.NewBackendAB(s.backend)\n\tab.RegisterAtomicBroadcastServer(grpcServer, broadcastab)\n\tgrpcServer.Serve(lis)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"io\"\n)\n\n\/\/ A Writer writes records to a MySQL compatible CSV encoded file.\n\/\/ It is heavily influenced by the std lib encoding\/CSV package.\n\/\/\n\/\/ As returned by NewWriter, a Writer writes fields delimited by a comma, escapes special\n\/\/ characters with a back slash and lines are terminated with a newline. The exported fields\n\/\/ can be changed to customize the details before the first call to Write or WriteAll.\ntype Writer struct {\n\tDelimiter rune \/\/ Field delimiter (set to ',' by NewWriter)\n\tQuote rune \/\/ Quote character\n\tEscape rune \/\/ Escape character\n\tTerminator string \/\/ Character to end each line\n\tw *bufio.Writer\n}\n\n\/\/ NewWriter returns a new Writer that writes to w.\nfunc NewWriter(w io.Writer) *Writer {\n\treturn &Writer{\n\t\tDelimiter: ',',\n\t\tQuote: '\\\\',\n\t\tTerminator: \"\\n\",\n\t\tw: bufio.NewWriter(w),\n\t}\n}\n\n\/\/ Writer writes a single CSV record to w along with any necessary quoting.\n\/\/ A record is a slice of NullRawBytes so NULL's can be detected and escaped.\nfunc (w *Writer) Write(record []NullRawBytes) (buf int, err error) {\n\tfor n, field := range record {\n\t\t\/\/ Shortcut exit for empty strings\n\t\tif n > 0 {\n\t\t\tif _, err = w.w.WriteRune(w.Delimiter); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Check if and escape\/translate if field is NULL\n\t\tif !field.Valid {\n\t\t\t_, err = w.w.WriteString(\"\\\\N\")\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Write quote character if set\n\t\tif w.Quote > 0 {\n\t\t\tif _, err = w.w.WriteRune(w.Quote); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t\/\/ We need to examine each byte to determine if special characters need to be escaped\n\t\tbytes := field.RawBytes\n\t\tfor _, byte := range bytes {\n\t\t\tswitch rune(byte) {\n\t\t\tcase w.Delimiter:\n\t\t\t\tif w.Quote < 0 {\n\t\t\t\t\t_, err = w.w.WriteString(string(w.Escape) + string(w.Delimiter))\n\t\t\t\t}\n\t\t\tcase w.Quote:\n\t\t\t\t_, err = w.w.WriteString(string(w.Escape) + string(w.Quote))\n\t\t\tcase w.Escape:\n\t\t\t\t_, err = w.w.WriteString(string(w.Escape) + string(w.Escape))\n\t\t\tcase 0x00:\n\t\t\t\t_, err = w.w.WriteString(string(w.Escape) + \"0\")\n\t\t\tdefault:\n\t\t\t\t_, err = w.w.WriteString(string(byte))\n\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Write quote character if set\n\t\tif w.Quote > 0 {\n\t\t\tif _, err = w.w.WriteRune(w.Quote); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Write line terminator\n\t_, err = w.w.WriteString(w.Terminator)\n\n\t\/\/ Return the number of bytes written to the current buffer\n\tbuf = w.w.Buffered()\n\n\treturn buf, err\n}\n\n\/\/ Flush writes any buffered data to the underlying io.Writer.\n\/\/ To check if an error occurred during the Flush, call Error.\nfunc (w *Writer) Flush() {\n\tw.w.Flush()\n}\n\n\/\/ Error reports any error that has occurred during a previous Write or Flush.\nfunc (w *Writer) Error() error {\n\t_, err := w.w.Write(nil)\n\treturn err\n}\n<commit_msg>Fixed writer defaults, added WriteAll method used by tests<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"io\"\n)\n\n\/\/ A Writer writes records to a MySQL compatible CSV encoded file.\n\/\/ It is heavily influenced by the std lib encoding\/CSV package.\n\/\/\n\/\/ As returned by NewWriter, a Writer writes fields delimited by a comma, escapes special\n\/\/ characters with a back slash and lines are terminated with a newline. The exported fields\n\/\/ can be changed to customize the details before the first call to Write or WriteAll.\ntype Writer struct {\n\tDelimiter rune \/\/ Field delimiter (set to ',' by NewWriter)\n\tQuote rune \/\/ Quote character\n\tEscape rune \/\/ Escape character\n\tTerminator string \/\/ Character to end each line\n\tw *bufio.Writer\n}\n\n\/\/ NewWriter returns a new Writer that writes to w.\nfunc NewWriter(w io.Writer) *Writer {\n\treturn &Writer{\n\t\tDelimiter: ',',\n\t\tQuote: '\"',\n\t\tEscape: '\\\\',\n\t\tTerminator: \"\\n\",\n\t\tw: bufio.NewWriter(w),\n\t}\n}\n\n\/\/ Writer writes a single CSV record to w along with any necessary quoting.\n\/\/ A record is a slice of NullRawBytes so NULL's can be detected and escaped.\nfunc (w *Writer) Write(record []NullRawBytes) (buf int, err error) {\n\tfor n, field := range record {\n\t\t\/\/ Shortcut exit for empty strings\n\t\tif n > 0 {\n\t\t\tif _, err = w.w.WriteRune(w.Delimiter); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Check if and escape\/translate if field is NULL\n\t\tif !field.Valid {\n\t\t\t_, err = w.w.WriteString(\"\\\\N\")\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Write quote character if set\n\t\tif w.Quote > 0 {\n\t\t\tif _, err = w.w.WriteRune(w.Quote); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t\/\/ We need to examine each byte to determine if special characters need to be escaped\n\t\tbytes := field.RawBytes\n\t\tfor _, byte := range bytes {\n\t\t\tswitch rune(byte) {\n\t\t\tcase w.Delimiter:\n\t\t\t\tif w.Quote < 0 {\n\t\t\t\t\t_, err = w.w.WriteString(string(w.Escape) + string(w.Delimiter))\n\t\t\t\t}\n\t\t\tcase w.Quote:\n\t\t\t\t_, err = w.w.WriteString(string(w.Escape) + string(w.Quote))\n\t\t\tcase w.Escape:\n\t\t\t\t_, err = w.w.WriteString(string(w.Escape) + string(w.Escape))\n\t\t\tcase 0x00:\n\t\t\t\t_, err = w.w.WriteString(string(w.Escape) + \"0\")\n\t\t\tdefault:\n\t\t\t\t_, err = w.w.WriteString(string(byte))\n\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Write quote character if set\n\t\tif w.Quote > 0 {\n\t\t\tif _, err = w.w.WriteRune(w.Quote); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Write line terminator\n\t_, err = w.w.WriteString(w.Terminator)\n\n\t\/\/ Return the number of bytes written to the current buffer\n\tbuf = w.w.Buffered()\n\n\treturn buf, err\n}\n\n\/\/ Flush writes any buffered data to the underlying io.Writer.\n\/\/ To check if an error occurred during the Flush, call Error.\nfunc (w *Writer) Flush() {\n\tw.w.Flush()\n}\n\n\/\/ Error reports any error that has occurred during a previous Write or Flush.\nfunc (w *Writer) Error() error {\n\t_, err := w.w.Write(nil)\n\treturn err\n}\n\n\/\/ WriteAll writes multiple CSV records to w using Write and then calls Flush.\nfunc (w *Writer) WriteAll(records [][]NullRawBytes) (err error) {\n\tfor _, record := range records {\n\t\t_, err = w.Write(record)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn w.w.Flush()\n}\n<|endoftext|>"} {"text":"<commit_before>package ergo\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/wlMalk\/ergo\/validation\"\n)\n\ntype Request struct {\n\t*http.Request\n\tInput map[string]validation.Valuer\n\tPathParams map[string]string\n\toperation *Operation \/\/ Operation object\n}\n\nfunc NewRequest(httpRequest *http.Request) *Request {\n\treturn &Request{\n\t\tRequest: httpRequest,\n\t\tPathParams: map[string]string{},\n\t\tInput: map[string]validation.Valuer{},\n\t}\n}\n\n\/\/ Req returns the request.\nfunc (req *Request) Req() *http.Request {\n\treturn req.Request\n}\n\n\/\/ Param returns the input parameter value by its name.\nfunc (req *Request) Param(name string) validation.Valuer {\n\treturn req.Input[name]\n}\n\n\/\/ ParamOk returns the input parameter value by its name.\nfunc (req *Request) ParamOk(name string) (validation.Valuer, bool) {\n\tp, ok := req.Input[name]\n\treturn p, ok\n}\n\n\/\/ Params returns a map of input parameters values by their names.\n\/\/ If no names given then it returns r.Input\nfunc (req *Request) Params(names ...string) map[string]validation.Valuer {\n\tif len(names) == 0 {\n\t\treturn req.Input\n\t}\n\tparams := map[string]validation.Valuer{}\n\tfor _, n := range names {\n\t\tp, ok := req.Input[n]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tparams[n] = p\n\t}\n\treturn params\n}\n\nfunc (req *Request) GetOperation() Operationer {\n\treturn req.operation\n}\n<commit_msg>Moved Operation to Context instead of Request<commit_after>package ergo\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/wlMalk\/ergo\/validation\"\n)\n\ntype Request struct {\n\t*http.Request\n\tInput map[string]validation.Valuer\n\tPathParams map[string]string\n}\n\nfunc NewRequest(httpRequest *http.Request) *Request {\n\treturn &Request{\n\t\tRequest: httpRequest,\n\t\tPathParams: map[string]string{},\n\t\tInput: map[string]validation.Valuer{},\n\t}\n}\n\n\/\/ Req returns the request.\nfunc (req *Request) Req() *http.Request {\n\treturn req.Request\n}\n\n\/\/ Param returns the input parameter value by its name.\nfunc (req *Request) Param(name string) validation.Valuer {\n\treturn req.Input[name]\n}\n\n\/\/ ParamOk returns the input parameter value by its name.\nfunc (req *Request) ParamOk(name string) (validation.Valuer, bool) {\n\tp, ok := req.Input[name]\n\treturn p, ok\n}\n\n\/\/ Params returns a map of input parameters values by their names.\n\/\/ If no names given then it returns r.Input\nfunc (req *Request) Params(names ...string) map[string]validation.Valuer {\n\tif len(names) == 0 {\n\t\treturn req.Input\n\t}\n\tparams := map[string]validation.Valuer{}\n\tfor _, n := range names {\n\t\tp, ok := req.Input[n]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tparams[n] = p\n\t}\n\treturn params\n}\n<|endoftext|>"} {"text":"<commit_before>package filestore\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tds \"github.com\/ipfs\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/ipfs\/go-datastore\"\n\t\"github.com\/ipfs\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/ipfs\/go-datastore\/query\"\n\tk \"github.com\/ipfs\/go-ipfs\/blocks\/key\"\n\t\/\/mh \"gx\/ipfs\/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku\/go-multihash\"\n\tu \"gx\/ipfs\/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1\/go-ipfs-util\"\n)\n\ntype datastore struct {\n\tds ds.Datastore\n\talwaysVerify bool\n}\n\nfunc New(d ds.Datastore, fileStorePath string) (ds.Datastore, error) {\n\treturn &datastore{d, true}, nil\n}\n\nfunc (d *datastore) Put(key ds.Key, value interface{}) (err error) {\n\tval, ok := value.(*DataWOpts)\n\tif !ok {\n\t\tpanic(ds.ErrInvalidType)\n\t}\n\n\taddType, ok := val.AddOpts.(int)\n\tif !ok {\n\t\tpanic(ds.ErrInvalidType)\n\t}\n\tif addType != AddNoCopy {\n\t\treturn errors.New(\"Only \\\"no-copy\\\" mode supported for now.\")\n\t}\n\n\tdataObj, ok := val.DataObj.(*DataObj)\n\tif !ok {\n\t\tpanic(ds.ErrInvalidType)\n\t}\n\n\t\/\/ Make sure the filename is an absolute path\n\tif !filepath.IsAbs(dataObj.FilePath) {\n\t\treturn errors.New(\"datastore put: non-absolute filename: \" + dataObj.FilePath)\n\t}\n\n\t\/\/ Make sure we can read the file as a sanity check\n\tif file, err := os.Open(dataObj.FilePath); err != nil {\n\t\treturn err\n\t} else {\n\t\tfile.Close()\n\t}\n\n\tdata, err := dataObj.Marshal()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn d.ds.Put(key, data)\n}\n\nfunc (d *datastore) Get(key ds.Key) (value interface{}, err error) {\n\tdataObj, err := d.ds.Get(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdata := dataObj.([]byte)\n\tval := new(DataObj)\n\terr = val.Unmarshal(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif val.NoBlockData {\n\t\tfile, err := os.Open(val.FilePath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t_, err = file.Seek(int64(val.Offset), 0)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbuf := make([]byte, val.Size)\n\t\t_, err = io.ReadFull(file, buf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdata, err := reconstruct(val.Data, buf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif d.alwaysVerify {\n\t\t\tnewKey := k.Key(u.Hash(data)).DsKey()\n\t\t\tif newKey != key {\n\t\t\t\treturn nil, errors.New(\"Filestore: Block Verification Failed\")\n\t\t\t}\n\t\t}\n\t\treturn data, nil\n\t} else {\n\t\treturn val.Data, nil\n\t}\n}\n\nfunc (d *datastore) Has(key ds.Key) (exists bool, err error) {\n\treturn d.ds.Has(key)\n}\n\nfunc (d *datastore) Delete(key ds.Key) error {\n\treturn ds.ErrNotFound\n}\n\nfunc (d *datastore) Query(q query.Query) (query.Results, error) {\n\treturn nil, errors.New(\"queries not supported yet\")\n}\n\nfunc (d *datastore) Close() error {\n\tc, ok := d.ds.(io.Closer)\n\tif ok {\n\t\treturn c.Close()\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc (d *datastore) Batch() (ds.Batch, error) {\n\treturn ds.NewBasicBatch(d), nil\n}\n<commit_msg>Add Basic Query and \"Direct\" commands to filestore. Needs Testing.<commit_after>package filestore\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tds \"github.com\/ipfs\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/ipfs\/go-datastore\"\n\t\"github.com\/ipfs\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/ipfs\/go-datastore\/query\"\n\tk \"github.com\/ipfs\/go-ipfs\/blocks\/key\"\n\t\/\/mh \"gx\/ipfs\/QmYf7ng2hG5XBtJA3tN34DQ2GUN5HNksEw1rLDkmr6vGku\/go-multihash\"\n\tu \"gx\/ipfs\/QmZNVWh8LLjAavuQ2JXuFmuYH3C11xo988vSgp7UQrTRj1\/go-ipfs-util\"\n)\n\ntype datastore struct {\n\tds ds.Datastore\n\talwaysVerify bool\n}\n\nfunc New(d ds.Datastore, fileStorePath string) (ds.Datastore, error) {\n\treturn &datastore{d, true}, nil\n}\n\nfunc (d *datastore) Put(key ds.Key, value interface{}) (err error) {\n\tval, ok := value.(*DataWOpts)\n\tif !ok {\n\t\tpanic(ds.ErrInvalidType)\n\t}\n\n\taddType, ok := val.AddOpts.(int)\n\tif !ok {\n\t\tpanic(ds.ErrInvalidType)\n\t}\n\tif addType != AddNoCopy {\n\t\treturn errors.New(\"Only \\\"no-copy\\\" mode supported for now.\")\n\t}\n\n\tdataObj, ok := val.DataObj.(*DataObj)\n\tif !ok {\n\t\tpanic(ds.ErrInvalidType)\n\t}\n\n\t\/\/ Make sure the filename is an absolute path\n\tif !filepath.IsAbs(dataObj.FilePath) {\n\t\treturn errors.New(\"datastore put: non-absolute filename: \" + dataObj.FilePath)\n\t}\n\n\t\/\/ Make sure we can read the file as a sanity check\n\tif file, err := os.Open(dataObj.FilePath); err != nil {\n\t\treturn err\n\t} else {\n\t\tfile.Close()\n\t}\n\n\tdata, err := dataObj.Marshal()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn d.ds.Put(key, data)\n}\n\nfunc (d *datastore) Get(key ds.Key) (value interface{}, err error) {\n\tdataObj, err := d.ds.Get(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tval, err := d.decode(dataObj)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn d.GetData(key, val, d.alwaysVerify)\n}\n\n\/\/ Get the key as a DataObj\nfunc (d *datastore) GetDirect(key ds.Key) (*DataObj, error) {\n\tdataObj, err := d.ds.Get(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn d.decode(dataObj)\n}\n\nfunc (d *datastore) decode(dataObj interface{}) (*DataObj, error) {\n\tdata := dataObj.([]byte)\n\tval := new(DataObj)\n\terr := val.Unmarshal(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn val, nil\n}\n\n\/\/ Get the orignal data out of the DataObj\nfunc (d *datastore) GetData(key ds.Key, val *DataObj, verify bool) ([]byte, error) {\n\tif val.NoBlockData {\n\t\tfile, err := os.Open(val.FilePath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t_, err = file.Seek(int64(val.Offset), 0)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbuf := make([]byte, val.Size)\n\t\t_, err = io.ReadFull(file, buf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdata, err := reconstruct(val.Data, buf)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif verify {\n\t\t\tnewKey := k.Key(u.Hash(data)).DsKey()\n\t\t\tif newKey != key {\n\t\t\t\treturn nil, errors.New(\"Filestore: Block Verification Failed\")\n\t\t\t}\n\t\t}\n\t\treturn data, nil\n\t} else {\n\t\treturn val.Data, nil\n\t}\n}\n\nfunc (d *datastore) Has(key ds.Key) (exists bool, err error) {\n\treturn d.ds.Has(key)\n}\n\nfunc (d *datastore) Delete(key ds.Key) error {\n\treturn ds.ErrNotFound\n}\n\nfunc (d *datastore) DeleteDirect(key ds.Key) error {\n\treturn d.ds.Delete(key)\n}\n\nfunc (d *datastore) Query(q query.Query) (query.Results, error) {\n\tres, err := d.ds.Query(q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif q.KeysOnly {\n\t\treturn res, nil\n\t}\n\treturn nil, errors.New(\"filestore currently only supports keyonly queries\")\n\t\/\/ return &queryResult{res, func(r query.Result) query.Result {\n\t\/\/ \tval, err := d.decode(r.Value)\n\t\/\/ \tif err != nil {\n\t\/\/ \t\treturn query.Result{query.Entry{r.Key, nil}, err}\n\t\/\/ \t}\n\t\/\/ \t\/\/ Note: It should not be necessary to reclean the key\n\t\/\/ \t\/\/ here (by calling ds.NewKey) just to convert the\n\t\/\/ \t\/\/ string back to a ds.Key\n\t\/\/ \tdata, err := d.GetData(ds.NewKey(r.Key), val, d.alwaysVerify)\n\t\/\/ \tif err != nil {\n\t\/\/ \t\treturn query.Result{query.Entry{r.Key, nil}, err}\n\t\/\/ \t}\n\t\/\/ \treturn query.Result{query.Entry{r.Key, data}, r.Error}\n\t\/\/ }}, nil\n}\n\nfunc (d *datastore) QueryDirect(q query.Query) (query.Results, error) {\n\tres, err := d.ds.Query(q)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif q.KeysOnly {\n\t\treturn res, nil\n\t}\n\treturn nil, errors.New(\"filestore currently only supports keyonly queries\")\n\t\/\/ return &queryResult{res, func(r query.Result) query.Result {\n\t\/\/ \tval, err := d.decode(r.Value)\n\t\/\/ \tif err != nil {\n\t\/\/ \t\treturn query.Result{query.Entry{r.Key, nil}, err}\n\t\/\/ \t}\n\t\/\/ \treturn query.Result{query.Entry{r.Key, val}, r.Error}\n\t\/\/ }}, nil\n}\n\n\/\/ type queryResult struct {\n\/\/ \tquery.Results\n\/\/ \tadjResult func(query.Result) query.Result\n\/\/ }\n\n\/\/ func (q *queryResult) Next() <-chan query.Result {\n\/\/ \tin := q.Results.Next()\n\/\/ \tout := make(chan query.Result)\n\/\/ \tgo func() {\n\/\/ \t\tres := <-in\n\/\/ \t\tif res.Error == nil {\n\/\/ \t\t\tout <- res\n\/\/ \t\t}\n\/\/ \t\tout <- q.adjResult(res)\n\/\/ \t}()\n\/\/ \treturn out\n\/\/ }\n\n\/\/ func (q *queryResult) Rest() ([]query.Entry, error) {\n\/\/ \tres, err := q.Results.Rest()\n\/\/ \tif err != nil {\n\/\/ \t\treturn nil, err\n\/\/ \t}\n\/\/ \tfor _, entry := range res {\n\/\/ \t\tnewRes := q.adjResult(query.Result{entry, nil})\n\/\/ \t\tif newRes.Error != nil {\n\/\/ \t\t\treturn nil, newRes.Error\n\/\/ \t\t}\n\/\/ \t\tentry.Value = newRes.Value\n\/\/ \t}\n\/\/ \treturn res, nil\n\/\/ }\n\nfunc (d *datastore) Close() error {\n\tc, ok := d.ds.(io.Closer)\n\tif ok {\n\t\treturn c.Close()\n\t} else {\n\t\treturn nil\n\t}\n}\n\nfunc (d *datastore) Batch() (ds.Batch, error) {\n\treturn ds.NewBasicBatch(d), nil\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Start with the first text field focused<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2017 Pennock Tech, LLC.\n\/\/ All rights reserved, except as granted under license.\n\/\/ Licensed per file LICENSE.txt\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"sync\"\n\n\t\"go.pennock.tech\/smtpdane\/internal\/errorlist\"\n\n\t\"github.com\/miekg\/dns\"\n)\n\nconst EnvKeyDNSResolver = \"DNS_RESOLVER\"\n\nvar dnsSettings struct {\n\tsync.RWMutex\n\tconf *dns.ClientConfig\n\tclient *dns.Client\n}\n\nfunc initDNS() (*dns.ClientConfig, *dns.Client, error) {\n\tdnsSettings.RLock()\n\tif dnsSettings.client != nil {\n\t\tdefer dnsSettings.RUnlock()\n\t\treturn dnsSettings.conf, dnsSettings.client, nil\n\t}\n\tdnsSettings.RUnlock()\n\tdnsSettings.Lock()\n\tdefer dnsSettings.Unlock()\n\tif dnsSettings.client != nil {\n\t\treturn dnsSettings.conf, dnsSettings.client, nil\n\t}\n\n\tvar (\n\t\tconf *dns.ClientConfig\n\t\terr error\n\t)\n\tif os.Getenv(EnvKeyDNSResolver) == \"\" {\n\t\tconf, err = dns.ClientConfigFromFile(\"\/etc\/resolv.conf\")\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t} else {\n\t\t\/\/ we now use the config always, for things like timeouts,\n\t\t\/\/ so construct a skeletal one\n\t\tconf = &dns.ClientConfig{\n\t\t\tAttempts: 3,\n\t\t}\n\t}\n\n\tc := new(dns.Client)\n\n\tdnsSettings.conf = conf\n\tdnsSettings.client = c\n\n\treturn dnsSettings.conf, dnsSettings.client, nil\n}\n\nfunc resolversFromList(input []string, defDNSPort string) []string {\n\tr := make([]string, len(input))\n\tfor i := range input {\n\t\tr[i] = HostPortWithDefaultPort(input[i], defDNSPort)\n\t}\n\treturn r\n}\n\nvar resolverSplitRE *regexp.Regexp\n\nfunc init() {\n\tresolverSplitRE = regexp.MustCompile(`[,\\s]+`)\n}\n\nfunc resolversFromString(input string) []string {\n\treturn resolversFromList(resolverSplitRE.Split(input, -1), \"53\")\n}\n\n\/\/ FIXME: This is not doing DNS validation locally.\n\/\/ It's resolving DNS, delegating trust in validation to the resolver by\n\/\/ trusting the AD bit.\n\/\/ I want to get this working without needing a validating resolver.\n\/\/ This should be a standalone monitoring tool.\nfunc resolveRRSecure(\n\t\/\/ the cbfunc is called the the confirmed RR type and the rr and the rrname;\n\t\/\/ it should return an item to be added to the resolveRRSecure return list,\n\t\/\/ and an error; non-nil error inhibits appending to the list.\n\tcbfunc func(typ uint16, rr dns.RR, rrname string) (interface{}, error),\n\trrname string,\n\ttyplist ...uint16,\n) ([]interface{}, error) {\n\tconfig, c, err := initDNS()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resolvers []string\n\tif r := os.Getenv(EnvKeyDNSResolver); r != \"\" {\n\t\tresolvers = resolversFromString(r)\n\t} else {\n\t\tresolvers = resolversFromList(config.Servers, config.Port)\n\t}\n\n\tresultList := make([]interface{}, 0, 20)\n\terrList := errorlist.New()\n\n\tm := new(dns.Msg)\n\tm.SetEdns0(dns.DefaultMsgSize, true)\n\n\t\/\/ why is this uint16 ipv dns.Type ? Infelicity stuck in API?\nDNS_RRTYPE_LOOP:\n\tfor _, typ := range typlist {\n\t\tm.SetQuestion(rrname, typ)\n\n\t\tvar (\n\t\t\tr *dns.Msg\n\t\t\terr error\n\t\t)\n\n\tDNS_RESOLVER_LOOP:\n\t\tfor _, resolver := range resolvers {\n\t\t\tc.Net = \"udp\"\n\t\tRETRY_DNS_LOOKUP:\n\t\t\tfor i := 0; i < config.Attempts; i++ {\n\t\t\t\tr, _, err = c.Exchange(m, resolver)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif nerr, ok := err.(net.Error); ok && nerr.Timeout() && i < config.Attempts {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\terrList.Add(err)\n\t\t\t\t\tr = nil\n\t\t\t\t\tcontinue DNS_RESOLVER_LOOP\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif r.Rcode != dns.RcodeSuccess {\n\t\t\t\tfailure, known := dns.RcodeToString[r.Rcode]\n\t\t\t\tif !known {\n\t\t\t\t\tfailure = fmt.Sprintf(\"Rcode<%d> (unknown)\", r.Rcode)\n\t\t\t\t}\n\t\t\t\terrList.AddErrorf(\"DNS lookup non-successful [resolver %v]: %v\", resolver, failure)\n\t\t\t\tr = nil\n\t\t\t\tcontinue DNS_RESOLVER_LOOP\n\t\t\t}\n\n\t\t\t\/\/ Check for truncation first, in case some bad servers truncate\n\t\t\t\/\/ the DNSSEC data needed to be AD.\n\t\t\tif r.Truncated {\n\t\t\t\tc.Net = \"tcp\"\n\t\t\t\tgoto RETRY_DNS_LOOKUP\n\t\t\t}\n\n\t\t\t\/\/ Here we depend upon AD bit and so are still secure, assuming secure\n\t\t\t\/\/ link to trusted resolver.\n\t\t\t\/\/ Assume all our resolvers are equivalent for AD\/not, so if not AD, try the\n\t\t\t\/\/ next type (because some DNS servers break horribly on AAAA).\n\t\t\tif !r.AuthenticatedData {\n\t\t\t\terrList.AddErrorf(\"not AD set for results from %v for %q\/%v query\", resolver, rrname, dns.Type(typ))\n\t\t\t\tr = nil\n\t\t\t\tcontinue DNS_RRTYPE_LOOP\n\t\t\t}\n\t\t}\n\n\t\tif r == nil {\n\t\t\terrList.AddErrorf(\"[%q\/%v]: all DNS resolver queries failed, unable to get authentic result\", rrname, dns.Type(typ))\n\t\t\t\/\/ seems likely might be SERVFAIL from broken auth servers for AAAA records\n\t\t\tcontinue DNS_RRTYPE_LOOP\n\t\t}\n\n\t\tfor _, rr := range r.Answer {\n\t\t\t\/\/ TODO: CNAME?\n\t\t\tif rr.Header().Rrtype != typ {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tx, err := cbfunc(typ, dns.Copy(rr), rrname)\n\t\t\tif err != nil {\n\t\t\t\terrList.Add(err)\n\t\t\t} else {\n\t\t\t\tresultList = append(resultList, x)\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(resultList) == 0 {\n\t\terrList.Add(errors.New(\"no results found\"))\n\t\treturn nil, errList\n\t}\n\treturn resultList, errList.Maybe()\n}\n\n\/\/ There's a lot of repetition\/boilerplate in the below.\n\/\/ If we expand beyond where we are at now, then we really should consider reflection; more complexity, less repetition.\n\nfunc cbRRTypeAddr(typ uint16, rr dns.RR, rrname string) (interface{}, error) {\n\tswitch typ {\n\tcase dns.TypeA:\n\t\tif ip, ok := rr.(*dns.A); ok {\n\t\t\treturn ip.A, nil\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"A record failed to cast to *dns.A [%q\/%v]\", rrname, dns.Type(typ))\n\t\t}\n\tcase dns.TypeAAAA:\n\t\tif ip, ok := rr.(*dns.AAAA); ok {\n\t\t\treturn ip.AAAA, nil\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"AAAA record failed to cast to *dns.AAAA [%q\/%v]\", rrname, dns.Type(typ))\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"BUG: cbRRTypeAddr(%v,..,%q) called, expected A\/AAAA\", dns.Type(typ), rrname)\n}\n\nfunc ResolveAddrSecure(hostname string) ([]net.IP, error) {\n\trl, e := resolveRRSecure(cbRRTypeAddr, dns.Fqdn(hostname), dns.TypeAAAA, dns.TypeA)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\taddrList := make([]net.IP, len(rl))\n\tfor i := range rl {\n\t\taddrList[i] = rl[i].(net.IP)\n\t}\n\treturn addrList, nil\n}\n\ntype TLSAset struct {\n\tRRs []*dns.TLSA\n\tname string\n\tfoundName string\n}\n\nfunc cbRRTypeTLSA(typ uint16, rr dns.RR, rrname string) (interface{}, error) {\n\tswitch typ {\n\tcase dns.TypeTLSA:\n\t\tif tlsa, ok := rr.(*dns.TLSA); ok {\n\t\t\treturn tlsa, nil\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"TLSA record failed to cast to *dns.TLSA [%q\/%v]\", rrname, dns.Type(typ))\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"BUG: cbRRTypeTLSA(%v,..,%q) called, expected TLSA\", dns.Type(typ), rrname)\n}\n\nfunc ResolveTLSA(hostname string, port int) (*TLSAset, error) {\n\ttlsaName := fmt.Sprintf(\"_%d._tcp.%s\", port, dns.Fqdn(hostname))\n\trl, e := resolveRRSecure(cbRRTypeTLSA, tlsaName, dns.TypeTLSA)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\tTLSAList := make([]*dns.TLSA, len(rl))\n\tfor i := range rl {\n\t\tTLSAList[i] = rl[i].(*dns.TLSA)\n\t}\n\n\treturn &TLSAset{\n\t\tRRs: TLSAList,\n\t\tname: tlsaName,\n\t\tfoundName: TLSAList[0].Hdr.Name,\n\t}, nil\n}\n\n\/\/ TLSAShortString provides something suitable for output without showing the\n\/\/ full contents; for our uses, we don't need the RR_Header and for\n\/\/ full-certs-in-DNS we don't _want_ to print it all.\n\/\/ Viktor points out that for full certs in DNS, the start of the record will\n\/\/ be less useful, so show the _last_ 16 octets\n\/\/ TLSAShortString is \"enough to probably fit on a line with much other text\".\nfunc TLSAShortString(rr *dns.TLSA) string {\n\toffset := len(rr.Certificate) - 16\n\tprefix := \"...\"\n\tif offset < 0 {\n\t\toffset = 0\n\t\tprefix = \"\"\n\t}\n\treturn strconv.Itoa(int(rr.Usage)) + \" \" +\n\t\tstrconv.Itoa(int(rr.Selector)) + \" \" +\n\t\tstrconv.Itoa(int(rr.MatchingType)) + \" \" +\n\t\tprefix + rr.Certificate[offset:]\n}\n\n\/\/ TLSAMediumString is for where the TLSA record is probably all that's on a line.\n\/\/ Assume 2 leading spaces, 1 digit for each of the three leading fields, a space\n\/\/ after each, that's 8, allow for 70.\nfunc TLSAMediumString(rr *dns.TLSA) string {\n\tvar rest, prefix string\n\tif len(rr.Certificate) <= 70 {\n\t\trest = rr.Certificate\n\t} else {\n\t\tprefix = \"...\"\n\t\trest = rr.Certificate[(len(rr.Certificate) - 67):]\n\t}\n\treturn strconv.Itoa(int(rr.Usage)) + \" \" +\n\t\tstrconv.Itoa(int(rr.Selector)) + \" \" +\n\t\tstrconv.Itoa(int(rr.MatchingType)) + \" \" +\n\t\tprefix + rest\n}\n\nfunc cbRRTypeMX(typ uint16, rr dns.RR, rrname string) (interface{}, error) {\n\tswitch typ {\n\tcase dns.TypeMX:\n\t\tif mx, ok := rr.(*dns.MX); ok {\n\t\t\treturn mx.Mx, nil\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"MX record failed to cast to *dns.MX [%q\/%v]\", rrname, dns.Type(typ))\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"BUG: cbRRTypeMX(%v,..,%q) called, expected MX\", dns.Type(typ), rrname)\n}\n\n\/\/ ResolveMX only returns the hostnames, we don't care about the Preference\nfunc ResolveMX(hostname string) ([]string, error) {\n\trl, e := resolveRRSecure(cbRRTypeMX, dns.Fqdn(hostname), dns.TypeMX)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\thostnameList := make([]string, len(rl))\n\tfor i := range rl {\n\t\thostnameList[i] = rl[i].(string)\n\t}\n\treturn hostnameList, nil\n}\n\nfunc cbRRTypeSRV(typ uint16, rr dns.RR, rrname string) (interface{}, error) {\n\tswitch typ {\n\tcase dns.TypeSRV:\n\t\tif srv, ok := rr.(*dns.SRV); ok {\n\t\t\treturn srv, nil\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"SRV record failed to cast to *dns.SRV [%q\/%v]\", rrname, dns.Type(typ))\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"BUG: cbRRTypeSRV(%v,..,%q) called, expected SRV\", dns.Type(typ), rrname)\n}\n\n\/\/ ResolveSRV returns MX records, we need at least the Port, not just the Target\nfunc ResolveSRV(lookup string) ([]*dns.SRV, error) {\n\trl, e := resolveRRSecure(cbRRTypeSRV, dns.Fqdn(lookup), dns.TypeSRV)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\tsrvList := make([]*dns.SRV, len(rl))\n\tfor i := range rl {\n\t\tsrvList[i] = rl[i].(*dns.SRV)\n\t}\n\treturn srvList, nil\n}\n<commit_msg>Treat NXDOMAIN as definitive, skip other resolvers<commit_after>\/\/ Copyright © 2017 Pennock Tech, LLC.\n\/\/ All rights reserved, except as granted under license.\n\/\/ Licensed per file LICENSE.txt\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"sync\"\n\n\t\"go.pennock.tech\/smtpdane\/internal\/errorlist\"\n\n\t\"github.com\/miekg\/dns\"\n)\n\nconst EnvKeyDNSResolver = \"DNS_RESOLVER\"\n\nvar dnsSettings struct {\n\tsync.RWMutex\n\tconf *dns.ClientConfig\n\tclient *dns.Client\n}\n\nfunc initDNS() (*dns.ClientConfig, *dns.Client, error) {\n\tdnsSettings.RLock()\n\tif dnsSettings.client != nil {\n\t\tdefer dnsSettings.RUnlock()\n\t\treturn dnsSettings.conf, dnsSettings.client, nil\n\t}\n\tdnsSettings.RUnlock()\n\tdnsSettings.Lock()\n\tdefer dnsSettings.Unlock()\n\tif dnsSettings.client != nil {\n\t\treturn dnsSettings.conf, dnsSettings.client, nil\n\t}\n\n\tvar (\n\t\tconf *dns.ClientConfig\n\t\terr error\n\t)\n\tif os.Getenv(EnvKeyDNSResolver) == \"\" {\n\t\tconf, err = dns.ClientConfigFromFile(\"\/etc\/resolv.conf\")\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t} else {\n\t\t\/\/ we now use the config always, for things like timeouts,\n\t\t\/\/ so construct a skeletal one\n\t\tconf = &dns.ClientConfig{\n\t\t\tAttempts: 3,\n\t\t}\n\t}\n\n\tc := new(dns.Client)\n\n\tdnsSettings.conf = conf\n\tdnsSettings.client = c\n\n\treturn dnsSettings.conf, dnsSettings.client, nil\n}\n\nfunc resolversFromList(input []string, defDNSPort string) []string {\n\tr := make([]string, len(input))\n\tfor i := range input {\n\t\tr[i] = HostPortWithDefaultPort(input[i], defDNSPort)\n\t}\n\treturn r\n}\n\nvar resolverSplitRE *regexp.Regexp\n\nfunc init() {\n\tresolverSplitRE = regexp.MustCompile(`[,\\s]+`)\n}\n\nfunc resolversFromString(input string) []string {\n\treturn resolversFromList(resolverSplitRE.Split(input, -1), \"53\")\n}\n\n\/\/ FIXME: This is not doing DNS validation locally.\n\/\/ It's resolving DNS, delegating trust in validation to the resolver by\n\/\/ trusting the AD bit.\n\/\/ I want to get this working without needing a validating resolver.\n\/\/ This should be a standalone monitoring tool.\nfunc resolveRRSecure(\n\t\/\/ the cbfunc is called the the confirmed RR type and the rr and the rrname;\n\t\/\/ it should return an item to be added to the resolveRRSecure return list,\n\t\/\/ and an error; non-nil error inhibits appending to the list.\n\tcbfunc func(typ uint16, rr dns.RR, rrname string) (interface{}, error),\n\trrname string,\n\ttyplist ...uint16,\n) ([]interface{}, error) {\n\tconfig, c, err := initDNS()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resolvers []string\n\tif r := os.Getenv(EnvKeyDNSResolver); r != \"\" {\n\t\tresolvers = resolversFromString(r)\n\t} else {\n\t\tresolvers = resolversFromList(config.Servers, config.Port)\n\t}\n\n\tresultList := make([]interface{}, 0, 20)\n\terrList := errorlist.New()\n\n\tm := new(dns.Msg)\n\tm.SetEdns0(dns.DefaultMsgSize, true)\n\n\t\/\/ why is this uint16 ipv dns.Type ? Infelicity stuck in API?\nDNS_RRTYPE_LOOP:\n\tfor _, typ := range typlist {\n\t\tm.SetQuestion(rrname, typ)\n\n\t\tvar (\n\t\t\tr *dns.Msg\n\t\t\terr error\n\t\t)\n\n\tDNS_RESOLVER_LOOP:\n\t\tfor _, resolver := range resolvers {\n\t\t\tc.Net = \"udp\"\n\t\tRETRY_DNS_LOOKUP:\n\t\t\tfor i := 0; i < config.Attempts; i++ {\n\t\t\t\tr, _, err = c.Exchange(m, resolver)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif nerr, ok := err.(net.Error); ok && nerr.Timeout() && i < config.Attempts {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\terrList.Add(err)\n\t\t\t\t\tr = nil\n\t\t\t\t\tcontinue DNS_RESOLVER_LOOP\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif r.Rcode != dns.RcodeSuccess {\n\t\t\t\tfailure, known := dns.RcodeToString[r.Rcode]\n\t\t\t\tif !known {\n\t\t\t\t\tfailure = fmt.Sprintf(\"Rcode<%d> (unknown)\", r.Rcode)\n\t\t\t\t}\n\t\t\t\terrList.AddErrorf(\"DNS lookup non-successful [resolver %v]: %v\", resolver, failure)\n\t\t\t\trcode := r.Rcode\n\t\t\t\tr = nil\n\t\t\t\t\/\/ There are enough broken server implementations when it comes\n\t\t\t\t\/\/ to AD and unknown types (often including AAAA) that we\n\t\t\t\t\/\/ currently only consider NXDOMAIN definitive.\n\t\t\t\t\/\/ We can expand upon this as needed.\n\t\t\t\tswitch rcode {\n\t\t\t\tcase dns.RcodeNameError:\n\t\t\t\t\tcontinue DNS_RRTYPE_LOOP\n\t\t\t\tdefault:\n\t\t\t\t\tcontinue DNS_RESOLVER_LOOP\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Check for truncation first, in case some bad servers truncate\n\t\t\t\/\/ the DNSSEC data needed to be AD.\n\t\t\tif r.Truncated {\n\t\t\t\tc.Net = \"tcp\"\n\t\t\t\tgoto RETRY_DNS_LOOKUP\n\t\t\t}\n\n\t\t\t\/\/ Here we depend upon AD bit and so are still secure, assuming secure\n\t\t\t\/\/ link to trusted resolver.\n\t\t\t\/\/ Assume all our resolvers are equivalent for AD\/not, so if not AD, try the\n\t\t\t\/\/ next type (because some DNS servers break horribly on AAAA).\n\t\t\tif !r.AuthenticatedData {\n\t\t\t\terrList.AddErrorf(\"not AD set for results from %v for %q\/%v query\", resolver, rrname, dns.Type(typ))\n\t\t\t\tr = nil\n\t\t\t\tcontinue DNS_RRTYPE_LOOP\n\t\t\t}\n\t\t}\n\n\t\tif r == nil {\n\t\t\terrList.AddErrorf(\"[%q\/%v]: all DNS resolver queries failed, unable to get authentic result\", rrname, dns.Type(typ))\n\t\t\t\/\/ seems likely might be SERVFAIL from broken auth servers for AAAA records\n\t\t\tcontinue DNS_RRTYPE_LOOP\n\t\t}\n\n\t\tfor _, rr := range r.Answer {\n\t\t\t\/\/ TODO: CNAME?\n\t\t\tif rr.Header().Rrtype != typ {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tx, err := cbfunc(typ, dns.Copy(rr), rrname)\n\t\t\tif err != nil {\n\t\t\t\terrList.Add(err)\n\t\t\t} else {\n\t\t\t\tresultList = append(resultList, x)\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(resultList) == 0 {\n\t\terrList.Add(errors.New(\"no results found\"))\n\t\treturn nil, errList\n\t}\n\treturn resultList, errList.Maybe()\n}\n\n\/\/ There's a lot of repetition\/boilerplate in the below.\n\/\/ If we expand beyond where we are at now, then we really should consider reflection; more complexity, less repetition.\n\nfunc cbRRTypeAddr(typ uint16, rr dns.RR, rrname string) (interface{}, error) {\n\tswitch typ {\n\tcase dns.TypeA:\n\t\tif ip, ok := rr.(*dns.A); ok {\n\t\t\treturn ip.A, nil\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"A record failed to cast to *dns.A [%q\/%v]\", rrname, dns.Type(typ))\n\t\t}\n\tcase dns.TypeAAAA:\n\t\tif ip, ok := rr.(*dns.AAAA); ok {\n\t\t\treturn ip.AAAA, nil\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"AAAA record failed to cast to *dns.AAAA [%q\/%v]\", rrname, dns.Type(typ))\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"BUG: cbRRTypeAddr(%v,..,%q) called, expected A\/AAAA\", dns.Type(typ), rrname)\n}\n\nfunc ResolveAddrSecure(hostname string) ([]net.IP, error) {\n\trl, e := resolveRRSecure(cbRRTypeAddr, dns.Fqdn(hostname), dns.TypeAAAA, dns.TypeA)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\taddrList := make([]net.IP, len(rl))\n\tfor i := range rl {\n\t\taddrList[i] = rl[i].(net.IP)\n\t}\n\treturn addrList, nil\n}\n\ntype TLSAset struct {\n\tRRs []*dns.TLSA\n\tname string\n\tfoundName string\n}\n\nfunc cbRRTypeTLSA(typ uint16, rr dns.RR, rrname string) (interface{}, error) {\n\tswitch typ {\n\tcase dns.TypeTLSA:\n\t\tif tlsa, ok := rr.(*dns.TLSA); ok {\n\t\t\treturn tlsa, nil\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"TLSA record failed to cast to *dns.TLSA [%q\/%v]\", rrname, dns.Type(typ))\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"BUG: cbRRTypeTLSA(%v,..,%q) called, expected TLSA\", dns.Type(typ), rrname)\n}\n\nfunc ResolveTLSA(hostname string, port int) (*TLSAset, error) {\n\ttlsaName := fmt.Sprintf(\"_%d._tcp.%s\", port, dns.Fqdn(hostname))\n\trl, e := resolveRRSecure(cbRRTypeTLSA, tlsaName, dns.TypeTLSA)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\n\tTLSAList := make([]*dns.TLSA, len(rl))\n\tfor i := range rl {\n\t\tTLSAList[i] = rl[i].(*dns.TLSA)\n\t}\n\n\treturn &TLSAset{\n\t\tRRs: TLSAList,\n\t\tname: tlsaName,\n\t\tfoundName: TLSAList[0].Hdr.Name,\n\t}, nil\n}\n\n\/\/ TLSAShortString provides something suitable for output without showing the\n\/\/ full contents; for our uses, we don't need the RR_Header and for\n\/\/ full-certs-in-DNS we don't _want_ to print it all.\n\/\/ Viktor points out that for full certs in DNS, the start of the record will\n\/\/ be less useful, so show the _last_ 16 octets\n\/\/ TLSAShortString is \"enough to probably fit on a line with much other text\".\nfunc TLSAShortString(rr *dns.TLSA) string {\n\toffset := len(rr.Certificate) - 16\n\tprefix := \"...\"\n\tif offset < 0 {\n\t\toffset = 0\n\t\tprefix = \"\"\n\t}\n\treturn strconv.Itoa(int(rr.Usage)) + \" \" +\n\t\tstrconv.Itoa(int(rr.Selector)) + \" \" +\n\t\tstrconv.Itoa(int(rr.MatchingType)) + \" \" +\n\t\tprefix + rr.Certificate[offset:]\n}\n\n\/\/ TLSAMediumString is for where the TLSA record is probably all that's on a line.\n\/\/ Assume 2 leading spaces, 1 digit for each of the three leading fields, a space\n\/\/ after each, that's 8, allow for 70.\nfunc TLSAMediumString(rr *dns.TLSA) string {\n\tvar rest, prefix string\n\tif len(rr.Certificate) <= 70 {\n\t\trest = rr.Certificate\n\t} else {\n\t\tprefix = \"...\"\n\t\trest = rr.Certificate[(len(rr.Certificate) - 67):]\n\t}\n\treturn strconv.Itoa(int(rr.Usage)) + \" \" +\n\t\tstrconv.Itoa(int(rr.Selector)) + \" \" +\n\t\tstrconv.Itoa(int(rr.MatchingType)) + \" \" +\n\t\tprefix + rest\n}\n\nfunc cbRRTypeMX(typ uint16, rr dns.RR, rrname string) (interface{}, error) {\n\tswitch typ {\n\tcase dns.TypeMX:\n\t\tif mx, ok := rr.(*dns.MX); ok {\n\t\t\treturn mx.Mx, nil\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"MX record failed to cast to *dns.MX [%q\/%v]\", rrname, dns.Type(typ))\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"BUG: cbRRTypeMX(%v,..,%q) called, expected MX\", dns.Type(typ), rrname)\n}\n\n\/\/ ResolveMX only returns the hostnames, we don't care about the Preference\nfunc ResolveMX(hostname string) ([]string, error) {\n\trl, e := resolveRRSecure(cbRRTypeMX, dns.Fqdn(hostname), dns.TypeMX)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\thostnameList := make([]string, len(rl))\n\tfor i := range rl {\n\t\thostnameList[i] = rl[i].(string)\n\t}\n\treturn hostnameList, nil\n}\n\nfunc cbRRTypeSRV(typ uint16, rr dns.RR, rrname string) (interface{}, error) {\n\tswitch typ {\n\tcase dns.TypeSRV:\n\t\tif srv, ok := rr.(*dns.SRV); ok {\n\t\t\treturn srv, nil\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"SRV record failed to cast to *dns.SRV [%q\/%v]\", rrname, dns.Type(typ))\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"BUG: cbRRTypeSRV(%v,..,%q) called, expected SRV\", dns.Type(typ), rrname)\n}\n\n\/\/ ResolveSRV returns MX records, we need at least the Port, not just the Target\nfunc ResolveSRV(lookup string) ([]*dns.SRV, error) {\n\trl, e := resolveRRSecure(cbRRTypeSRV, dns.Fqdn(lookup), dns.TypeSRV)\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\tsrvList := make([]*dns.SRV, len(rl))\n\tfor i := range rl {\n\t\tsrvList[i] = rl[i].(*dns.SRV)\n\t}\n\treturn srvList, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"time\"\n)\n\ntype FileMeta struct {\n\tFileName string\n\tModTime time.Time\n\tLineCount int\n}\n\nfunc main() {\n\tflag.Parse()\n\targs := flag.Args()\n\n\tif len(args) != 2 {\n\t\tfmt.Printf(\"Command line usage: file-watcher <folder> <file-pattern>\\n\")\n\t\treturn\n\t}\n\n\troot := flag.Arg(0)\n\tpattern := flag.Arg(1)\n\n\tvar previousFiles, currentFiles, modifiedFiles map[string]FileMeta\n\n\tpreviousFiles = getFilesMatchingPattern(pattern, root)\n\tcountFilesLines(previousFiles)\n\n\tfor fileName, fileMeta := range previousFiles {\n\t\tfmt.Printf(\"Start values: %s %d\\n\", fileName, fileMeta.LineCount)\n\t}\n\n\tticker := time.NewTicker(1 * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\t\/\/ Get a current listing of the files and their mod times\n\t\t\tcurrentFiles = getFilesMatchingPattern(pattern, root)\n\n\t\t\tnewFiles, deletedFiles := getNewAndDeletedFiles(previousFiles, currentFiles)\n\n\t\t\tfor newFile := range newFiles {\n\t\t\t\tfmt.Printf(\"New file: %s\\n\", newFile)\n\t\t\t}\n\n\t\t\tfor deletedFile := range deletedFiles {\n\t\t\t\tfmt.Printf(\"Deleted file: %s\\n\", deletedFile)\n\t\t\t}\n\n\t\t\t\/\/ Get a list of the file names that have been modified based off the mod time\n\t\t\t\/\/ Go through and get updated line counts for just the modified files\n\t\t\tmodifiedFiles = getModifiedFiles(previousFiles, currentFiles)\n\t\t\tif len(modifiedFiles) > 0 {\n\t\t\t\tcountFilesLines(modifiedFiles)\n\t\t\t\t\/\/ Loop through the modified files and list the line count change compared to the preivous check.\n\t\t\t\t\/\/ Update the LineCount property in the currentFiles list as we go.\n\t\t\t\tfor fileName, fileMeta := range modifiedFiles {\n\t\t\t\t\tpreviousFileMeta, _ := previousFiles[fileName]\n\t\t\t\t\tfmt.Printf(\"Last count: %d new count: %d\\n\", previousFileMeta.LineCount, fileMeta.LineCount)\n\t\t\t\t\tcurrentFiles[fileName] = fileMeta\n\t\t\t\t}\n\n\t\t\t}\n\t\t\tpreviousFiles = currentFiles\n\t\t}\n\t}\n}\n\nfunc getFilesMatchingPattern(pattern string, root string) map[string]FileMeta {\n\tfiles := make(map[string]FileMeta)\n\tvisit := func(fileName string, f os.FileInfo, err error) error {\n\t\tmatch, err := path.Match(pattern, f.Name())\n\t\tif match {\n\t\t\tfiles[fileName] = FileMeta{FileName: fileName, ModTime: f.ModTime()}\n\t\t}\n\t\treturn nil\n\t}\n\tfilepath.Walk(root, visit)\n\treturn files\n}\n\nfunc getModifiedFiles(previousFiles, currentFiles map[string]FileMeta) map[string]FileMeta {\n\tmodifiedFiles := make(map[string]FileMeta)\n\n\tfor fileName, previousFileMeta := range previousFiles {\n\t\tif currentFileMeta, ok := currentFiles[fileName]; ok {\n\t\t\tif currentFileMeta.ModTime.After(previousFileMeta.ModTime) {\n\t\t\t\tmodifiedFiles[fileName] = currentFileMeta\n\t\t\t}\n\t\t}\n\t}\n\treturn modifiedFiles\n}\n\nfunc getNewAndDeletedFiles(previousFiles, currentFiles map[string]FileMeta) ([]string, []string) {\n\tvar newFiles, deletedFiles []string\n\tfor fileName, _ := range previousFiles {\n\t\tif _, ok := currentFiles[fileName]; !ok {\n\t\t\tdeletedFiles = append(deletedFiles, fileName)\n\t\t}\n\t}\n\n\tfor fileName, _ := range currentFiles {\n\t\tif _, ok := previousFiles[fileName]; !ok {\n\t\t\tnewFiles = append(newFiles, fileName)\n\t\t}\n\t}\n\n\treturn newFiles, deletedFiles\n}\n\nfunc countFilesLines(files map[string]FileMeta) {\n\tdoneCountingLinesChan := make(chan int)\n\tfor fileName, fileMeta := range files {\n\t\tgo func(fileName string, fileMeta FileMeta, doneCountingLinesChan chan<- int) {\n\t\t\tcurrentFileMeta, _ := files[fileName]\n\t\t\tcurrentFileMeta.LineCount = countFileLines(fileMeta)\n\t\t\tfiles[fileName] = currentFileMeta\n\t\t\tdoneCountingLinesChan <- 1\n\t\t}(fileName, fileMeta, doneCountingLinesChan)\n\t}\n\tfor i := 0; i < len(files); i++ {\n\t\t<-doneCountingLinesChan\n\t}\n\treturn\n}\n\nfunc countFileLines(fileMeta FileMeta) int {\n\tfile, _ := os.Open(fileMeta.FileName)\n\tscanner := bufio.NewScanner(file)\n\tlineCount := 0\n\n\tfor scanner.Scan() {\n\t\tlineCount++\n\t}\n\n\treturn lineCount\n}\n<commit_msg>Almost finished. Just cleaning up<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"time\"\n)\n\ntype FileStatus struct {\n\tFileName string\n\tModTime time.Time\n\tLineCount int\n}\n\nvar root, pattern string\n\nfunc init() {\n\tflag.Parse()\n\targs := flag.Args()\n\n\tif len(args) != 2 {\n\t\tfmt.Printf(\"Command line usage: file-watcher <folder> <file-pattern>\\n\")\n\t\treturn\n\t}\n\n\troot = args[0]\n\tpattern = args[1]\n}\n\nfunc main() {\n\tvar previousFiles, currentFiles, modifiedFiles map[string]FileStatus\n\n\tpreviousFiles = getFilesMatchingPattern(pattern, root)\n\tcountFilesLines(previousFiles)\n\n\tfor fileName, fileStatus := range previousFiles {\n\t\tfmt.Printf(\"Start values: %s %d\\n\", fileName, fileStatus.LineCount)\n\t}\n\n\tticker := time.NewTicker(1 * time.Second)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\t\/\/ Get a current listing of the files and their mod times\n\t\t\tcurrentFiles = getFilesMatchingPattern(pattern, root)\n\n\t\t\t\/\/ Get a list of new files and deleted files\n\t\t\tnewFiles, deletedFiles := getNewAndDeletedFiles(previousFiles, currentFiles)\n\t\t\tfor fileName, fileStatus := range newFiles {\n\t\t\t\tfmt.Printf(\"%s: %s: %d\\n\", \"New\", fileName, fileStatus.LineCount)\n\t\t\t}\n\n\t\t\tfor _, fileName := range deletedFiles {\n\t\t\t\tfmt.Printf(\"%s: %s\\n\", \"Deleted\", fileName)\n\t\t\t}\n\n\t\t\t\/\/ Get a list of the file names that have been modified based off the mod time\n\t\t\t\/\/ Go through and get updated line counts for just the modified files\n\t\t\tmodifiedFiles = getModifiedFiles(previousFiles, currentFiles)\n\t\t\tif len(modifiedFiles) > 0 {\n\t\t\t\tcountFilesLines(modifiedFiles)\n\t\t\t\t\/\/ Loop through the modified files and list the line count change compared to the previous check.\n\t\t\t\tfor fileName, fileStatus := range modifiedFiles {\n\t\t\t\t\tpreviousFileStatus, _ := previousFiles[fileName]\n\t\t\t\t\tdisplayFileDiff(previousFileStatus, fileStatus)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Make sure new files have line counts, and only include the actual current state of the file system.\n\t\t\tpreviousFiles = syncFileStatus(previousFiles, currentFiles, modifiedFiles, newFiles)\n\t\t}\n\t}\n}\n\nfunc displayFileDiff(previousFileStatus, currentFileStatus FileStatus) {\n\tif lineDiff := currentFileStatus.LineCount - previousFileStatus.LineCount; lineDiff == 0 {\n\t\treturn\n\t}\n\tsign := \"\"\n\tif lineDiff > 0 {\n\t\tsign = \"+\"\n\t}\n\tfmt.Printf(\"%s %s%d\\n\", currentFileStatus.FileName, sign, lineDiff)\n}\n\nfunc getFilesMatchingPattern(pattern string, root string) map[string]FileStatus {\n\tfiles := make(map[string]FileStatus)\n\tvisit := func(fileName string, f os.FileInfo, err error) error {\n\t\tmatch, err := path.Match(pattern, f.Name())\n\t\tif match {\n\t\t\tfiles[fileName] = FileStatus{FileName: fileName, ModTime: f.ModTime()}\n\t\t}\n\t\treturn nil\n\t}\n\tfilepath.Walk(root, visit)\n\treturn files\n}\n\nfunc getModifiedFiles(previousFiles, currentFiles map[string]FileStatus) map[string]FileStatus {\n\tmodifiedFiles := make(map[string]FileStatus)\n\n\tfor fileName, previousFileStatus := range previousFiles {\n\t\tif currentFileStatus, ok := currentFiles[fileName]; ok {\n\t\t\tif currentFileStatus.ModTime.After(previousFileStatus.ModTime) {\n\t\t\t\tmodifiedFiles[fileName] = currentFileStatus\n\t\t\t}\n\t\t}\n\t}\n\treturn modifiedFiles\n}\n\nfunc getNewAndDeletedFiles(previousFiles, currentFiles map[string]FileStatus) (map[string]FileStatus, []string) {\n\tnewFiles := make(map[string]FileStatus)\n\tvar deletedFiles []string\n\n\tfor fileName, fileStatus := range currentFiles {\n\t\tif _, ok := previousFiles[fileName]; !ok {\n\t\t\tnewFiles[fileName] = fileStatus\n\t\t}\n\t}\n\tcountFilesLines(newFiles)\n\n\tfor fileName, _ := range previousFiles {\n\t\tif _, ok := currentFiles[fileName]; !ok {\n\t\t\tdeletedFiles = append(deletedFiles, fileName)\n\t\t}\n\t}\n\n\treturn newFiles, deletedFiles\n}\n\nfunc countFilesLines(files map[string]FileStatus) {\n\tdone := make(chan int)\n\tfor fileName, fileStatus := range files {\n\t\tgo func(fileName string, fileStatus FileStatus, done chan<- int) {\n\t\t\tcurrentFileStatus, _ := files[fileName]\n\t\t\tcurrentFileStatus.LineCount = countFileLines(fileStatus.FileName)\n\t\t\tfiles[fileName] = currentFileStatus\n\t\t\tdone <- 1\n\t\t}(fileName, fileStatus, done)\n\t}\n\tfor i := 0; i < len(files); i++ {\n\t\t<-done\n\t}\n\treturn\n}\n\nfunc countFileLines(fileName string) int {\n\tfile, err := os.Open(fileName)\n\tdefer file.Close()\n\tif err != nil {\n\t\tpanic(fmt.Printf(\"Error opening file: %s\", fileName))\n\t}\n\n\tscanner := bufio.NewScanner(file)\n\tlineCount := 0\n\n\tfor scanner.Scan() {\n\t\tlineCount++\n\t}\n\n\treturn lineCount\n}\n\nfunc syncFileStatus(previousFiles, currentFiles, modifiedFiles, newFiles map[string]FileStatus) map[string]FileStatus {\n\t\/\/ Loop through the current files, and get the FileStatus information from the previous files so we don't have to count the lines again.\n\t\/\/ Only do this for items where the ModTime is the same - otherwise keep the currentFiles copy.\n\tfor fileName, fileStatus := range currentFiles {\n\t\tif previousFileStatus, ok := previousFiles[fileName]; ok {\n\t\t\tif fileStatus.ModTime == previousFileStatus.ModTime {\n\t\t\t\t\/\/ Keep the old state - it has the accurate line count!\n\t\t\t\tcurrentFiles[fileName] = previousFileStatus\n\t\t\t}\n\t\t}\n\t}\n\n\tfor fileName, fileStatus := range modifiedFiles {\n\t\tcurrentFiles[fileName] = fileStatus\n\t}\n\n\tfor fileName, fileStatus := range newFiles {\n\t\tcurrentFiles[fileName] = fileStatus\n\t}\n\n\treturn currentFiles\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n)\n\nfunc newFileStorage(params *params) storager {\n\treturn &fileStorage{params}\n}\n\ntype fileStorage struct {\n\tparams *params\n}\n\nfunc (f fileStorage) isExist() bool {\n\tif _, err := os.Stat(f.pathByParams()); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (f fileStorage) save(from string) error {\n\tif err := os.MkdirAll(f.pathByParams(), 0755); err != nil {\n\t\treturn err\n\t}\n\treturn os.Rename(from, filepath.Join(f.pathByParams(), path.Base(from)))\n}\n\nfunc (f fileStorage) pathByParams() string {\n\treturn filepath.Join(\n\t\tstorageDir,\n\t\tf.params.remote,\n\t\tf.params.owner(),\n\t\tf.params.repo,\n\t\tf.params.goos,\n\t\tf.params.goarch,\n\t\tf.params.version,\n\t)\n}\n\nfunc (f fileStorage) get(file string) (string, error) {\n\treturn filepath.Join(f.pathByParams(), file), nil\n}\n<commit_msg>Use mv command instead of os.Rename because avoid invalid cross-device link error.<commit_after>package main\n\nimport (\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"path\/filepath\"\n)\n\nfunc newFileStorage(params *params) storager {\n\treturn &fileStorage{params}\n}\n\ntype fileStorage struct {\n\tparams *params\n}\n\nfunc (f fileStorage) isExist() bool {\n\tif _, err := os.Stat(f.pathByParams()); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (f fileStorage) save(from string) error {\n\tif err := os.MkdirAll(f.pathByParams(), 0755); err != nil {\n\t\treturn err\n\t}\n\treturn exec.Command(\"mv\", from, filepath.Join(f.pathByParams(), path.Base(from))).Run()\n}\n\nfunc (f fileStorage) pathByParams() string {\n\treturn filepath.Join(\n\t\tstorageDir,\n\t\tf.params.remote,\n\t\tf.params.owner(),\n\t\tf.params.repo,\n\t\tf.params.goos,\n\t\tf.params.goarch,\n\t\tf.params.version,\n\t)\n}\n\nfunc (f fileStorage) get(file string) (string, error) {\n\treturn filepath.Join(f.pathByParams(), file), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package random\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/base64\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"math\/big\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/errwrap\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/helper\/schema\"\n)\n\nfunc resourceId() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: CreateID,\n\t\tRead: RepopulateEncodings,\n\t\tDelete: schema.RemoveFromState,\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: ImportID,\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"keepers\": {\n\t\t\t\tType: schema.TypeMap,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"byte_length\": {\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"prefix\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"b64\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t\tDeprecated: \"Use b64_url for old behavior, or b64_std for standard base64 encoding\",\n\t\t\t},\n\n\t\t\t\"b64_url\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"b64_std\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"hex\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"dec\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc CreateID(d *schema.ResourceData, meta interface{}) error {\n\tbyteLength := d.Get(\"byte_length\").(int)\n\tbytes := make([]byte, byteLength)\n\n\tn, err := rand.Reader.Read(bytes)\n\tif n != byteLength {\n\t\treturn errors.New(\"generated insufficient random bytes\")\n\t}\n\tif err != nil {\n\t\treturn errwrap.Wrapf(\"error generating random bytes: {{err}}\", err)\n\t}\n\n\tb64Str := base64.RawURLEncoding.EncodeToString(bytes)\n\td.SetId(b64Str)\n\n\treturn RepopulateEncodings(d, meta)\n}\n\nfunc RepopulateEncodings(d *schema.ResourceData, _ interface{}) error {\n\tprefix := d.Get(\"prefix\").(string)\n\tbase64Str := d.Id()\n\n\tbytes, err := base64.RawURLEncoding.DecodeString(base64Str)\n\tif err != nil {\n\t\treturn errwrap.Wrapf(\"Error decoding ID: {{err}}\", err)\n\t}\n\n\tb64StdStr := base64.StdEncoding.EncodeToString(bytes)\n\thexStr := hex.EncodeToString(bytes)\n\n\tbigInt := big.Int{}\n\tbigInt.SetBytes(bytes)\n\tdecStr := bigInt.String()\n\n\td.Set(\"b64\", prefix+base64Str)\n\td.Set(\"b64_url\", prefix+base64Str)\n\td.Set(\"b64_std\", prefix+b64StdStr)\n\n\td.Set(\"hex\", prefix+hexStr)\n\td.Set(\"dec\", prefix+decStr)\n\n\treturn nil\n}\n\nfunc ImportID(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {\n\tid := d.Id()\n\n\tsep := strings.LastIndex(id, \",\")\n\tif sep != -1 {\n\t\td.Set(\"prefix\", id[:sep])\n\t\tid = id[sep+1:]\n\t}\n\n\tbytes, err := base64.RawURLEncoding.DecodeString(id)\n\tif err != nil {\n\t\treturn nil, errwrap.Wrapf(\"Error decoding ID: {{err}}\", err)\n\t}\n\n\td.Set(\"byte_length\", len(bytes))\n\td.SetId(id)\n\n\treturn []*schema.ResourceData{d}, nil\n}\n<commit_msg>Remove deprecated attribute<commit_after>package random\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/base64\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"math\/big\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/errwrap\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/helper\/schema\"\n)\n\nfunc resourceId() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: CreateID,\n\t\tRead: RepopulateEncodings,\n\t\tDelete: schema.RemoveFromState,\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: ImportID,\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"keepers\": {\n\t\t\t\tType: schema.TypeMap,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"byte_length\": {\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"prefix\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\n\t\t\t\"b64_url\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"b64_std\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"hex\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"dec\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc CreateID(d *schema.ResourceData, meta interface{}) error {\n\tbyteLength := d.Get(\"byte_length\").(int)\n\tbytes := make([]byte, byteLength)\n\n\tn, err := rand.Reader.Read(bytes)\n\tif n != byteLength {\n\t\treturn errors.New(\"generated insufficient random bytes\")\n\t}\n\tif err != nil {\n\t\treturn errwrap.Wrapf(\"error generating random bytes: {{err}}\", err)\n\t}\n\n\tb64Str := base64.RawURLEncoding.EncodeToString(bytes)\n\td.SetId(b64Str)\n\n\treturn RepopulateEncodings(d, meta)\n}\n\nfunc RepopulateEncodings(d *schema.ResourceData, _ interface{}) error {\n\tprefix := d.Get(\"prefix\").(string)\n\tbase64Str := d.Id()\n\n\tbytes, err := base64.RawURLEncoding.DecodeString(base64Str)\n\tif err != nil {\n\t\treturn errwrap.Wrapf(\"Error decoding ID: {{err}}\", err)\n\t}\n\n\tb64StdStr := base64.StdEncoding.EncodeToString(bytes)\n\thexStr := hex.EncodeToString(bytes)\n\n\tbigInt := big.Int{}\n\tbigInt.SetBytes(bytes)\n\tdecStr := bigInt.String()\n\n\td.Set(\"b64_url\", prefix+base64Str)\n\td.Set(\"b64_std\", prefix+b64StdStr)\n\n\td.Set(\"hex\", prefix+hexStr)\n\td.Set(\"dec\", prefix+decStr)\n\n\treturn nil\n}\n\nfunc ImportID(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) {\n\tid := d.Id()\n\n\tsep := strings.LastIndex(id, \",\")\n\tif sep != -1 {\n\t\td.Set(\"prefix\", id[:sep])\n\t\tid = id[sep+1:]\n\t}\n\n\tbytes, err := base64.RawURLEncoding.DecodeString(id)\n\tif err != nil {\n\t\treturn nil, errwrap.Wrapf(\"Error decoding ID: {{err}}\", err)\n\t}\n\n\td.Set(\"byte_length\", len(bytes))\n\td.SetId(id)\n\n\treturn []*schema.ResourceData{d}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package revel\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n)\n\n\/\/ Map from \"Controller\" or \"Controller.Method\" to the Filter chain\nvar filterOverrides = make(map[string][]Filter)\n\n\/\/ FilterConfigurator allows the developer configure the filter chain on a\n\/\/ per-controller or per-action basis. The filter configuration is applied by\n\/\/ the FilterConfiguringFilter, which is itself a filter stage. For example,\n\/\/\n\/\/ Assuming:\n\/\/ Filters = []Filter{\n\/\/ RouterFilter,\n\/\/ FilterConfiguringFilter,\n\/\/ SessionFilter,\n\/\/ ActionInvoker,\n\/\/ }\n\/\/\n\/\/ Add:\n\/\/ FilterAction(App.Action).\n\/\/ Add(OtherFilter)\n\/\/\n\/\/ => RouterFilter, FilterConfiguringFilter, SessionFilter, OtherFilter, ActionInvoker\n\/\/\n\/\/ Remove:\n\/\/ FilterAction(App.Action).\n\/\/ Remove(SessionFilter)\n\/\/\n\/\/ => RouterFilter, FilterConfiguringFilter, OtherFilter, ActionInvoker\n\/\/\n\/\/ Insert:\n\/\/ FilterAction(App.Action).\n\/\/ Insert(OtherFilter, revel.BEFORE, SessionFilter)\n\/\/\n\/\/ => RouterFilter, FilterConfiguringFilter, OtherFilter, SessionFilter, ActionInvoker\n\/\/\n\/\/ Filter modifications may be combined between Controller and Action. For example:\n\/\/ FilterController(App{}).\n\/\/ Add(Filter1)\n\/\/ FilterAction(App.Action).\n\/\/ Add(Filter2)\n\/\/\n\/\/ .. would result in App.Action being filtered by both Filter1 and Filter2.\n\/\/\n\/\/ Note: the last filter stage is not subject to the configurator. In\n\/\/ particular, Add() adds a filter to the second-to-last place.\ntype FilterConfigurator struct {\n\tkey string \/\/ e.g. \"App\", \"App.Action\"\n\tcontrollerName string \/\/ e.g. \"App\"\n}\n\nfunc newFilterConfigurator(controllerName, methodName string) FilterConfigurator {\n\tif methodName == \"\" {\n\t\treturn FilterConfigurator{controllerName, controllerName}\n\t}\n\treturn FilterConfigurator{controllerName + \".\" + methodName, controllerName}\n}\n\n\/\/ FilterController returns a configurator for the filters applied to all\n\/\/ actions on the given controller instance. For example:\n\/\/ FilterAction(MyController{})\nfunc FilterController(controllerInstance interface{}) FilterConfigurator {\n\tt := reflect.TypeOf(controllerInstance)\n\tfor t.Kind() == reflect.Ptr {\n\t\tt = t.Elem()\n\t}\n\treturn newFilterConfigurator(t.Name(), \"\")\n}\n\n\/\/ FilterAction returns a configurator for the filters applied to the given\n\/\/ controller method. For example:\n\/\/ FilterAction(MyController.MyAction)\nfunc FilterAction(methodRef interface{}) FilterConfigurator {\n\tvar (\n\t\tmethodValue = reflect.ValueOf(methodRef)\n\t\tmethodType = methodValue.Type()\n\t)\n\tif methodType.Kind() != reflect.Func || methodType.NumIn() == 0 {\n\t\tpanic(\"Expecting a controller method reference (e.g. Controller.Action), got a \" +\n\t\t\tmethodType.String())\n\t}\n\n\tcontrollerType := methodType.In(0)\n\tmethod := FindMethod(controllerType, methodValue)\n\tif method == nil {\n\t\tpanic(\"Action not found on controller \" + controllerType.Name())\n\t}\n\n\tfor controllerType.Kind() == reflect.Ptr {\n\t\tcontrollerType = controllerType.Elem()\n\t}\n\n\treturn newFilterConfigurator(controllerType.Name(), method.Name)\n}\n\n\/\/ Add the given filter in the second-to-last position in the filter chain.\n\/\/ (Second-to-last so that it is before ActionInvoker)\nfunc (conf FilterConfigurator) Add(f Filter) FilterConfigurator {\n\tconf.apply(func(fc []Filter) []Filter {\n\t\treturn conf.addFilter(f, fc)\n\t})\n\treturn conf\n}\n\nfunc (conf FilterConfigurator) addFilter(f Filter, fc []Filter) []Filter {\n\treturn append(fc[:len(fc)-1], f, fc[len(fc)-1])\n}\n\n\/\/ Remove a filter from the filter chain.\nfunc (conf FilterConfigurator) Remove(target Filter) FilterConfigurator {\n\tconf.apply(func(fc []Filter) []Filter {\n\t\treturn conf.rmFilter(target, fc)\n\t})\n\treturn conf\n}\n\nfunc (conf FilterConfigurator) rmFilter(target Filter, fc []Filter) []Filter {\n\tfor i, f := range fc {\n\t\tif FilterEq(f, target) {\n\t\t\treturn append(fc[:i], fc[i+1:]...)\n\t\t}\n\t}\n\treturn fc\n}\n\n\/\/ Insert a filter into the filter chain before or after another.\n\/\/ This may be called with the BEFORE or AFTER constants, for example:\n\/\/ revel.FilterAction(App.Index).\n\/\/ Insert(MyFilter, revel.BEFORE, revel.ActionInvoker).\n\/\/ Insert(MyFilter2, revel.AFTER, revel.PanicFilter)\nfunc (conf FilterConfigurator) Insert(insert Filter, where When, target Filter) FilterConfigurator {\n\tif where != BEFORE && where != AFTER {\n\t\tpanic(\"where must be BEFORE or AFTER\")\n\t}\n\tconf.apply(func(fc []Filter) []Filter {\n\t\treturn conf.insertFilter(insert, where, target, fc)\n\t})\n\treturn conf\n}\n\nfunc (conf FilterConfigurator) insertFilter(insert Filter, where When, target Filter, fc []Filter) []Filter {\n\tfor i, f := range fc {\n\t\tif FilterEq(f, target) {\n\t\t\tif where == BEFORE {\n\t\t\t\treturn append(fc[:i], append([]Filter{insert}, fc[i:]...)...)\n\t\t\t} else {\n\t\t\t\treturn append(fc[:i+1], append([]Filter{insert}, fc[i+1:]...)...)\n\t\t\t}\n\t\t}\n\t}\n\treturn fc\n}\n\n\/\/ getChain returns the filter chain that applies to the given controller or\n\/\/ action. If no overrides are configured, then a copy of the default filter\n\/\/ chain is returned.\nfunc (conf FilterConfigurator) getChain() []Filter {\n\tvar filters []Filter\n\tif filters = getOverrideChain(conf.controllerName, conf.key); filters == nil {\n\t\t\/\/ The override starts with all filters after FilterConfiguringFilter\n\t\tfor i, f := range Filters {\n\t\t\tif FilterEq(f, FilterConfiguringFilter) {\n\t\t\t\tfilters = make([]Filter, len(Filters)-i-1)\n\t\t\t\tcopy(filters, Filters[i+1:])\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif filters == nil {\n\t\t\tpanic(\"FilterConfiguringFilter not found in revel.Filters.\")\n\t\t}\n\t}\n\treturn filters\n}\n\n\/\/ apply applies the given functional change to the filter overrides.\n\/\/ No other function modifies the filterOverrides map.\nfunc (conf FilterConfigurator) apply(f func([]Filter) []Filter) {\n\t\/\/ Updates any actions that have had their filters overridden, if this is a\n\t\/\/ Controller configurator.\n\tif conf.controllerName == conf.key {\n\t\tfor k, v := range filterOverrides {\n\t\t\tif strings.HasPrefix(k, conf.controllerName+\".\") {\n\t\t\t\tfilterOverrides[k] = f(v)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Update the Controller or Action overrides.\n\tfilterOverrides[conf.key] = f(conf.getChain())\n}\n\n\/\/ FilterEq returns true if the two filters reference the same filter.\nfunc FilterEq(a, b Filter) bool {\n\treturn reflect.ValueOf(a).Pointer() == reflect.ValueOf(b).Pointer()\n}\n\n\/\/ FilterConfiguringFilter is a filter stage that customizes the remaining\n\/\/ filter chain for the action being invoked.\nfunc FilterConfiguringFilter(c *Controller, fc []Filter) {\n\tif newChain := getOverrideChain(c.Name, c.Action); newChain != nil {\n\t\tnewChain[0](c, newChain[1:])\n\t\treturn\n\t}\n\tfc[0](c, fc[1:])\n}\n\n\/\/ getOverrideChain retrieves the overrides for the action that is set\nfunc getOverrideChain(controllerName, action string) []Filter {\n\tif newChain, ok := filterOverrides[action]; ok {\n\t\treturn newChain\n\t}\n\tif newChain, ok := filterOverrides[controllerName]; ok {\n\t\treturn newChain\n\t}\n\treturn nil\n}\n<commit_msg>fix FilterController example<commit_after>package revel\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n)\n\n\/\/ Map from \"Controller\" or \"Controller.Method\" to the Filter chain\nvar filterOverrides = make(map[string][]Filter)\n\n\/\/ FilterConfigurator allows the developer configure the filter chain on a\n\/\/ per-controller or per-action basis. The filter configuration is applied by\n\/\/ the FilterConfiguringFilter, which is itself a filter stage. For example,\n\/\/\n\/\/ Assuming:\n\/\/ Filters = []Filter{\n\/\/ RouterFilter,\n\/\/ FilterConfiguringFilter,\n\/\/ SessionFilter,\n\/\/ ActionInvoker,\n\/\/ }\n\/\/\n\/\/ Add:\n\/\/ FilterAction(App.Action).\n\/\/ Add(OtherFilter)\n\/\/\n\/\/ => RouterFilter, FilterConfiguringFilter, SessionFilter, OtherFilter, ActionInvoker\n\/\/\n\/\/ Remove:\n\/\/ FilterAction(App.Action).\n\/\/ Remove(SessionFilter)\n\/\/\n\/\/ => RouterFilter, FilterConfiguringFilter, OtherFilter, ActionInvoker\n\/\/\n\/\/ Insert:\n\/\/ FilterAction(App.Action).\n\/\/ Insert(OtherFilter, revel.BEFORE, SessionFilter)\n\/\/\n\/\/ => RouterFilter, FilterConfiguringFilter, OtherFilter, SessionFilter, ActionInvoker\n\/\/\n\/\/ Filter modifications may be combined between Controller and Action. For example:\n\/\/ FilterController(App{}).\n\/\/ Add(Filter1)\n\/\/ FilterAction(App.Action).\n\/\/ Add(Filter2)\n\/\/\n\/\/ .. would result in App.Action being filtered by both Filter1 and Filter2.\n\/\/\n\/\/ Note: the last filter stage is not subject to the configurator. In\n\/\/ particular, Add() adds a filter to the second-to-last place.\ntype FilterConfigurator struct {\n\tkey string \/\/ e.g. \"App\", \"App.Action\"\n\tcontrollerName string \/\/ e.g. \"App\"\n}\n\nfunc newFilterConfigurator(controllerName, methodName string) FilterConfigurator {\n\tif methodName == \"\" {\n\t\treturn FilterConfigurator{controllerName, controllerName}\n\t}\n\treturn FilterConfigurator{controllerName + \".\" + methodName, controllerName}\n}\n\n\/\/ FilterController returns a configurator for the filters applied to all\n\/\/ actions on the given controller instance. For example:\n\/\/ FilterController(MyController{})\nfunc FilterController(controllerInstance interface{}) FilterConfigurator {\n\tt := reflect.TypeOf(controllerInstance)\n\tfor t.Kind() == reflect.Ptr {\n\t\tt = t.Elem()\n\t}\n\treturn newFilterConfigurator(t.Name(), \"\")\n}\n\n\/\/ FilterAction returns a configurator for the filters applied to the given\n\/\/ controller method. For example:\n\/\/ FilterAction(MyController.MyAction)\nfunc FilterAction(methodRef interface{}) FilterConfigurator {\n\tvar (\n\t\tmethodValue = reflect.ValueOf(methodRef)\n\t\tmethodType = methodValue.Type()\n\t)\n\tif methodType.Kind() != reflect.Func || methodType.NumIn() == 0 {\n\t\tpanic(\"Expecting a controller method reference (e.g. Controller.Action), got a \" +\n\t\t\tmethodType.String())\n\t}\n\n\tcontrollerType := methodType.In(0)\n\tmethod := FindMethod(controllerType, methodValue)\n\tif method == nil {\n\t\tpanic(\"Action not found on controller \" + controllerType.Name())\n\t}\n\n\tfor controllerType.Kind() == reflect.Ptr {\n\t\tcontrollerType = controllerType.Elem()\n\t}\n\n\treturn newFilterConfigurator(controllerType.Name(), method.Name)\n}\n\n\/\/ Add the given filter in the second-to-last position in the filter chain.\n\/\/ (Second-to-last so that it is before ActionInvoker)\nfunc (conf FilterConfigurator) Add(f Filter) FilterConfigurator {\n\tconf.apply(func(fc []Filter) []Filter {\n\t\treturn conf.addFilter(f, fc)\n\t})\n\treturn conf\n}\n\nfunc (conf FilterConfigurator) addFilter(f Filter, fc []Filter) []Filter {\n\treturn append(fc[:len(fc)-1], f, fc[len(fc)-1])\n}\n\n\/\/ Remove a filter from the filter chain.\nfunc (conf FilterConfigurator) Remove(target Filter) FilterConfigurator {\n\tconf.apply(func(fc []Filter) []Filter {\n\t\treturn conf.rmFilter(target, fc)\n\t})\n\treturn conf\n}\n\nfunc (conf FilterConfigurator) rmFilter(target Filter, fc []Filter) []Filter {\n\tfor i, f := range fc {\n\t\tif FilterEq(f, target) {\n\t\t\treturn append(fc[:i], fc[i+1:]...)\n\t\t}\n\t}\n\treturn fc\n}\n\n\/\/ Insert a filter into the filter chain before or after another.\n\/\/ This may be called with the BEFORE or AFTER constants, for example:\n\/\/ revel.FilterAction(App.Index).\n\/\/ Insert(MyFilter, revel.BEFORE, revel.ActionInvoker).\n\/\/ Insert(MyFilter2, revel.AFTER, revel.PanicFilter)\nfunc (conf FilterConfigurator) Insert(insert Filter, where When, target Filter) FilterConfigurator {\n\tif where != BEFORE && where != AFTER {\n\t\tpanic(\"where must be BEFORE or AFTER\")\n\t}\n\tconf.apply(func(fc []Filter) []Filter {\n\t\treturn conf.insertFilter(insert, where, target, fc)\n\t})\n\treturn conf\n}\n\nfunc (conf FilterConfigurator) insertFilter(insert Filter, where When, target Filter, fc []Filter) []Filter {\n\tfor i, f := range fc {\n\t\tif FilterEq(f, target) {\n\t\t\tif where == BEFORE {\n\t\t\t\treturn append(fc[:i], append([]Filter{insert}, fc[i:]...)...)\n\t\t\t} else {\n\t\t\t\treturn append(fc[:i+1], append([]Filter{insert}, fc[i+1:]...)...)\n\t\t\t}\n\t\t}\n\t}\n\treturn fc\n}\n\n\/\/ getChain returns the filter chain that applies to the given controller or\n\/\/ action. If no overrides are configured, then a copy of the default filter\n\/\/ chain is returned.\nfunc (conf FilterConfigurator) getChain() []Filter {\n\tvar filters []Filter\n\tif filters = getOverrideChain(conf.controllerName, conf.key); filters == nil {\n\t\t\/\/ The override starts with all filters after FilterConfiguringFilter\n\t\tfor i, f := range Filters {\n\t\t\tif FilterEq(f, FilterConfiguringFilter) {\n\t\t\t\tfilters = make([]Filter, len(Filters)-i-1)\n\t\t\t\tcopy(filters, Filters[i+1:])\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif filters == nil {\n\t\t\tpanic(\"FilterConfiguringFilter not found in revel.Filters.\")\n\t\t}\n\t}\n\treturn filters\n}\n\n\/\/ apply applies the given functional change to the filter overrides.\n\/\/ No other function modifies the filterOverrides map.\nfunc (conf FilterConfigurator) apply(f func([]Filter) []Filter) {\n\t\/\/ Updates any actions that have had their filters overridden, if this is a\n\t\/\/ Controller configurator.\n\tif conf.controllerName == conf.key {\n\t\tfor k, v := range filterOverrides {\n\t\t\tif strings.HasPrefix(k, conf.controllerName+\".\") {\n\t\t\t\tfilterOverrides[k] = f(v)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Update the Controller or Action overrides.\n\tfilterOverrides[conf.key] = f(conf.getChain())\n}\n\n\/\/ FilterEq returns true if the two filters reference the same filter.\nfunc FilterEq(a, b Filter) bool {\n\treturn reflect.ValueOf(a).Pointer() == reflect.ValueOf(b).Pointer()\n}\n\n\/\/ FilterConfiguringFilter is a filter stage that customizes the remaining\n\/\/ filter chain for the action being invoked.\nfunc FilterConfiguringFilter(c *Controller, fc []Filter) {\n\tif newChain := getOverrideChain(c.Name, c.Action); newChain != nil {\n\t\tnewChain[0](c, newChain[1:])\n\t\treturn\n\t}\n\tfc[0](c, fc[1:])\n}\n\n\/\/ getOverrideChain retrieves the overrides for the action that is set\nfunc getOverrideChain(controllerName, action string) []Filter {\n\tif newChain, ok := filterOverrides[action]; ok {\n\t\treturn newChain\n\t}\n\tif newChain, ok := filterOverrides[controllerName]; ok {\n\t\treturn newChain\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package vcsclient\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\n\t\"sort\"\n\n\t\"golang.org\/x\/tools\/godoc\/vfs\"\n\t\"sourcegraph.com\/sourcegraph\/go-vcs\/vcs\"\n\t\"sourcegraph.com\/sqs\/pbtypes\"\n)\n\ntype FileSystem interface {\n\tvfs.FileSystem\n\tGet(path string) (*TreeEntry, error)\n}\n\ntype repositoryFS struct {\n\tat vcs.CommitID\n\trepo *repository\n}\n\nvar _ FileSystem = &repositoryFS{}\n\nfunc (fs *repositoryFS) Open(name string) (vfs.ReadSeekCloser, error) {\n\te, err := fs.Get(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nopCloser{bytes.NewReader(e.Contents)}, nil\n}\n\nfunc (fs *repositoryFS) Lstat(path string) (os.FileInfo, error) {\n\te, err := fs.Get(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn e.Stat()\n}\n\nfunc (fs *repositoryFS) Stat(path string) (os.FileInfo, error) {\n\t\/\/ TODO(sqs): follow symlinks (as Stat specification requires)\n\te, err := fs.Get(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn e.Stat()\n}\n\nfunc (fs *repositoryFS) ReadDir(path string) ([]os.FileInfo, error) {\n\te, err := fs.Get(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfis := make([]os.FileInfo, len(e.Entries))\n\tfor i, e := range e.Entries {\n\t\tfis[i], err = e.Stat()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn fis, nil\n}\n\nfunc (fs *repositoryFS) String() string {\n\treturn fmt.Sprintf(\"repository %s commit %s (client)\", fs.repo.repoPath, fs.at)\n}\n\n\/\/ Get returns the whole TreeEntry struct for a tree entry.\nfunc (fs *repositoryFS) Get(path string) (*TreeEntry, error) {\n\turl, err := fs.url(path, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := fs.repo.client.NewRequest(\"GET\", url.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar entry *TreeEntry\n\t_, err = fs.repo.client.Do(req, &entry)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn entry, nil\n}\n\n\/\/ FileWithRange is returned by GetFileWithOptions and includes the\n\/\/ returned file's TreeEntry as well as the actual range of lines and\n\/\/ bytes returned (based on the GetFileOptions parameters). That is,\n\/\/ if Start\/EndLine are set in GetFileOptions, this struct's\n\/\/ Start\/EndByte will be set to the actual start and end bytes of\n\/\/ those specified lines, and so on for the other fields in\n\/\/ GetFileOptions.\ntype FileWithRange struct {\n\t*TreeEntry\n\tFileRange \/\/ range of actual returned tree entry contents within file\n}\n\n\/\/ GetFileWithOptions gets a file and allows additional configuration\n\/\/ of the range to return, etc.\nfunc (fs *repositoryFS) GetFileWithOptions(path string, opt GetFileOptions) (*FileWithRange, error) {\n\turl, err := fs.url(path, opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := fs.repo.client.NewRequest(\"GET\", url.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar file *FileWithRange\n\t_, err = fs.repo.client.Do(req, &file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn file, nil\n}\n\n\/\/ A FileGetter is a repository FileSystem that can get files with\n\/\/ extended range options (GetFileWithOptions).\n\/\/\n\/\/ It's generally more efficient to use the client's implementation of\n\/\/ the GetFileWithOptions method instead of calling the\n\/\/ vcsclient.GetFileWithOptions func because the former causes only\n\/\/ the requested range to be sent over the network, while the latter\n\/\/ requests the whole file and narrows the range on the client side.\ntype FileGetter interface {\n\tGetFileWithOptions(path string, opt GetFileOptions) (*FileWithRange, error)\n}\n\n\/\/ GetFileWithOptions gets a file and observes the options specified\n\/\/ in opt. If fs implements FileGetter, fs.GetFileWithOptions is\n\/\/ called; otherwise the options are applied on the client side after\n\/\/ fetching the whole file.\nfunc GetFileWithOptions(fs vfs.FileSystem, path string, opt GetFileOptions) (*FileWithRange, error) {\n\tif fg, ok := fs.(FileGetter); ok {\n\t\treturn fg.GetFileWithOptions(path, opt)\n\t}\n\n\tfi, err := fs.Lstat(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\te := newTreeEntry(fi)\n\tfwr := FileWithRange{TreeEntry: e}\n\n\tif fi.Mode().IsDir() {\n\t\tee, err := readDir(fs, path, int(opt.RecurseSingleSubfolder), true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsort.Sort(TreeEntriesByTypeByName(ee))\n\t\te.Entries = ee\n\t} else if fi.Mode().IsRegular() {\n\t\tf, err := fs.Open(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer f.Close()\n\n\t\tcontents, err := ioutil.ReadAll(f)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\te.Contents = contents\n\n\t\tif empty := (GetFileOptions{}); opt != empty {\n\t\t\tfr, _, err := ComputeFileRange(contents, opt)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t\/\/ Trim to only requested range.\n\t\t\te.Contents = e.Contents[fr.StartByte:fr.EndByte]\n\t\t\tfwr.FileRange = *fr\n\t\t}\n\t}\n\n\treturn &fwr, nil\n}\n\n\/\/ readDir uses the passed vfs.FileSystem to read from starting at the base path.\n\/\/ If recurseSingleSubfolder is true, it will descend and include sub-folders\n\/\/ with a single sub-folder inside. first should always be set to true, other values are used internally.\nfunc readDir(fs vfs.FileSystem, base string, recurseSingleSubfolder int, first bool) ([]*TreeEntry, error) {\n\tentries, err := fs.ReadDir(base)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif recurseSingleSubfolder > 0 && !first && !singleSubDir(entries) {\n\t\treturn nil, nil\n\t}\n\tte := make([]*TreeEntry, len(entries))\n\tfor i, fi := range entries {\n\t\tte[i] = newTreeEntry(fi)\n\t}\n\n\tif recurseSingleSubfolder > 0 {\n\t\tdirEntries := make(map[int]os.FileInfo)\n\t\tfor i, fi := range entries {\n\t\t\t\/\/ expand at most recurseSingleSubfolder subdirectories\n\t\t\tif len(dirEntries) > recurseSingleSubfolder {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif fi.Mode().IsDir() {\n\t\t\t\tdirEntries[i] = fi\n\t\t\t}\n\t\t}\n\t\tfor i, fi := range dirEntries {\n\t\t\tee, err := readDir(fs, path.Join(base, fi.Name()), recurseSingleSubfolder, false)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tte[i].Entries = ee\n\t\t}\n\t}\n\n\treturn te, nil\n}\n\nfunc singleSubDir(entries []os.FileInfo) bool {\n\treturn len(entries) == 1 && entries[0].IsDir()\n}\n\nfunc newTreeEntry(fi os.FileInfo) *TreeEntry {\n\te := &TreeEntry{\n\t\tName: fi.Name(),\n\t\tSize: fi.Size(),\n\t\tModTime: pbtypes.NewTimestamp(fi.ModTime()),\n\t}\n\tif fi.Mode().IsDir() {\n\t\te.Type = DirEntry\n\t} else if fi.Mode().IsRegular() {\n\t\te.Type = FileEntry\n\t} else if fi.Mode()&os.ModeSymlink != 0 {\n\t\te.Type = SymlinkEntry\n\t}\n\treturn e\n}\n\n\/\/ url generates the URL to RouteRepoTreeEntry for the given path (all other\n\/\/ route vars are taken from repositoryFS fields).\nfunc (fs *repositoryFS) url(path string, opt interface{}) (*url.URL, error) {\n\treturn fs.repo.url(RouteRepoTreeEntry, map[string]string{\n\t\t\"CommitID\": string(fs.at),\n\t\t\"Path\": path,\n\t}, opt)\n}\n\ntype nopCloser struct {\n\tio.ReadSeeker\n}\n\nfunc (nc nopCloser) Close() error { return nil }\n<commit_msg>Simplify recursionLimit implementation for readDir<commit_after>package vcsclient\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"path\"\n\n\t\"sort\"\n\n\t\"golang.org\/x\/tools\/godoc\/vfs\"\n\t\"sourcegraph.com\/sourcegraph\/go-vcs\/vcs\"\n\t\"sourcegraph.com\/sqs\/pbtypes\"\n)\n\ntype FileSystem interface {\n\tvfs.FileSystem\n\tGet(path string) (*TreeEntry, error)\n}\n\ntype repositoryFS struct {\n\tat vcs.CommitID\n\trepo *repository\n}\n\nvar _ FileSystem = &repositoryFS{}\n\nfunc (fs *repositoryFS) Open(name string) (vfs.ReadSeekCloser, error) {\n\te, err := fs.Get(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nopCloser{bytes.NewReader(e.Contents)}, nil\n}\n\nfunc (fs *repositoryFS) Lstat(path string) (os.FileInfo, error) {\n\te, err := fs.Get(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn e.Stat()\n}\n\nfunc (fs *repositoryFS) Stat(path string) (os.FileInfo, error) {\n\t\/\/ TODO(sqs): follow symlinks (as Stat specification requires)\n\te, err := fs.Get(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn e.Stat()\n}\n\nfunc (fs *repositoryFS) ReadDir(path string) ([]os.FileInfo, error) {\n\te, err := fs.Get(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfis := make([]os.FileInfo, len(e.Entries))\n\tfor i, e := range e.Entries {\n\t\tfis[i], err = e.Stat()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn fis, nil\n}\n\nfunc (fs *repositoryFS) String() string {\n\treturn fmt.Sprintf(\"repository %s commit %s (client)\", fs.repo.repoPath, fs.at)\n}\n\n\/\/ Get returns the whole TreeEntry struct for a tree entry.\nfunc (fs *repositoryFS) Get(path string) (*TreeEntry, error) {\n\turl, err := fs.url(path, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := fs.repo.client.NewRequest(\"GET\", url.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar entry *TreeEntry\n\t_, err = fs.repo.client.Do(req, &entry)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn entry, nil\n}\n\n\/\/ FileWithRange is returned by GetFileWithOptions and includes the\n\/\/ returned file's TreeEntry as well as the actual range of lines and\n\/\/ bytes returned (based on the GetFileOptions parameters). That is,\n\/\/ if Start\/EndLine are set in GetFileOptions, this struct's\n\/\/ Start\/EndByte will be set to the actual start and end bytes of\n\/\/ those specified lines, and so on for the other fields in\n\/\/ GetFileOptions.\ntype FileWithRange struct {\n\t*TreeEntry\n\tFileRange \/\/ range of actual returned tree entry contents within file\n}\n\n\/\/ GetFileWithOptions gets a file and allows additional configuration\n\/\/ of the range to return, etc.\nfunc (fs *repositoryFS) GetFileWithOptions(path string, opt GetFileOptions) (*FileWithRange, error) {\n\turl, err := fs.url(path, opt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := fs.repo.client.NewRequest(\"GET\", url.String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar file *FileWithRange\n\t_, err = fs.repo.client.Do(req, &file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn file, nil\n}\n\n\/\/ A FileGetter is a repository FileSystem that can get files with\n\/\/ extended range options (GetFileWithOptions).\n\/\/\n\/\/ It's generally more efficient to use the client's implementation of\n\/\/ the GetFileWithOptions method instead of calling the\n\/\/ vcsclient.GetFileWithOptions func because the former causes only\n\/\/ the requested range to be sent over the network, while the latter\n\/\/ requests the whole file and narrows the range on the client side.\ntype FileGetter interface {\n\tGetFileWithOptions(path string, opt GetFileOptions) (*FileWithRange, error)\n}\n\n\/\/ GetFileWithOptions gets a file and observes the options specified\n\/\/ in opt. If fs implements FileGetter, fs.GetFileWithOptions is\n\/\/ called; otherwise the options are applied on the client side after\n\/\/ fetching the whole file.\nfunc GetFileWithOptions(fs vfs.FileSystem, path string, opt GetFileOptions) (*FileWithRange, error) {\n\tif fg, ok := fs.(FileGetter); ok {\n\t\treturn fg.GetFileWithOptions(path, opt)\n\t}\n\n\tfi, err := fs.Lstat(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\te := newTreeEntry(fi)\n\tfwr := FileWithRange{TreeEntry: e}\n\n\tif fi.Mode().IsDir() {\n\t\tee, err := readDir(fs, path, int(opt.RecurseSingleSubfolder), true)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsort.Sort(TreeEntriesByTypeByName(ee))\n\t\te.Entries = ee\n\t} else if fi.Mode().IsRegular() {\n\t\tf, err := fs.Open(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer f.Close()\n\n\t\tcontents, err := ioutil.ReadAll(f)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\te.Contents = contents\n\n\t\tif empty := (GetFileOptions{}); opt != empty {\n\t\t\tfr, _, err := ComputeFileRange(contents, opt)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t\/\/ Trim to only requested range.\n\t\t\te.Contents = e.Contents[fr.StartByte:fr.EndByte]\n\t\t\tfwr.FileRange = *fr\n\t\t}\n\t}\n\n\treturn &fwr, nil\n}\n\n\/\/ readDir uses the passed vfs.FileSystem to read from starting at the base path.\n\/\/ If recurseSingleSubfolder is true, it will descend and include sub-folders\n\/\/ with a single sub-folder inside. first should always be set to true, other values are used internally.\nfunc readDir(fs vfs.FileSystem, base string, recurseSingleSubfolder int, first bool) ([]*TreeEntry, error) {\n\tentries, err := fs.ReadDir(base)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif recurseSingleSubfolder > 0 && !first && !singleSubDir(entries) {\n\t\treturn nil, nil\n\t}\n\tte := make([]*TreeEntry, len(entries))\n\tdirCount := 0\n\tfor i, fi := range entries {\n\t\tte[i] = newTreeEntry(fi)\n\t\tif fi.Mode().IsDir() && dirCount < recurseSingleSubfolder {\n\t\t\tdirCount++\n\t\t\tee, err := readDir(fs, path.Join(base, fi.Name()), recurseSingleSubfolder, false)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tte[i].Entries = ee\n\t\t}\n\t}\n\treturn te, nil\n}\n\nfunc singleSubDir(entries []os.FileInfo) bool {\n\treturn len(entries) == 1 && entries[0].IsDir()\n}\n\nfunc newTreeEntry(fi os.FileInfo) *TreeEntry {\n\te := &TreeEntry{\n\t\tName: fi.Name(),\n\t\tSize: fi.Size(),\n\t\tModTime: pbtypes.NewTimestamp(fi.ModTime()),\n\t}\n\tif fi.Mode().IsDir() {\n\t\te.Type = DirEntry\n\t} else if fi.Mode().IsRegular() {\n\t\te.Type = FileEntry\n\t} else if fi.Mode()&os.ModeSymlink != 0 {\n\t\te.Type = SymlinkEntry\n\t}\n\treturn e\n}\n\n\/\/ url generates the URL to RouteRepoTreeEntry for the given path (all other\n\/\/ route vars are taken from repositoryFS fields).\nfunc (fs *repositoryFS) url(path string, opt interface{}) (*url.URL, error) {\n\treturn fs.repo.url(RouteRepoTreeEntry, map[string]string{\n\t\t\"CommitID\": string(fs.at),\n\t\t\"Path\": path,\n\t}, opt)\n}\n\ntype nopCloser struct {\n\tio.ReadSeeker\n}\n\nfunc (nc nopCloser) Close() error { return nil }\n<|endoftext|>"} {"text":"<commit_before>package mackerel\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n)\n\n\/*\n`\/dashboards` Response\nLegacy\n{\n \"dashboards\": [\n\t{\n\t \"id\": \"2c5bLca8d\",\n\t \"title\": \"My Dashboard\",\n\t \"bodyMarkdown\": \"# A test dashboard\",\n\t \"urlPath\": \"2u4PP3TJqbu\",\n\t \"createdAt\": 1439346145003,\n\t \"updatedAt\": 1439346145003,\n\t \"isLegacy\": true\n\t}\n ]\n}\n\nCurrent\n{\n\t\"dashboards\": [\n\t\t{\n\t\t\t\"id\": \"2c5bLca8e\",\n\t\t\t\"title\": \"My Custom Dashboard(Current)\",\n\t\t\t\"urlPath\": \"2u4PP3TJqbv\",\n\t\t\t\"createdAt\": 1552909732,\n\t\t\t\"updatedAt\": 1552992837,\n\t\t\t\"memo\": \"A test Current Dashboard\"\n\t\t}\n\t]\n}\n*\/\n\n\/*\n`\/dashboards\/${ID}` Response`\nLegacy\n{\n\t\"id\": \"2c5bLca8d\",\n\t\"title\": \"My Dashboard\",\n\t\"bodyMarkdown\": \"# A test dashboard\",\n\t\"urlPath\": \"2u4PP3TJqbu\",\n\t\"createdAt\": 1439346145003,\n\t\"updatedAt\": 1439346145003,\n\t\"isLegacy\": true\n}\nCurrent\n{\n \"id\": \"2c5bLca8e\",\n \"createdAt\": 1552909732,\n \"updatedAt\": 1552992837,\n \"title\": \"My Custom Dashboard(Current),\n \"urlPath\": \"2u4PP3TJqbv\",\n \"memo\": \"A test Current Dashboard\",\n \"widgets\": [\n {\n \"type\": \"markdown\",\n \"title\": \"markdown\",\n \"markdown\": \"# body\",\n \"layout\": {\n \"x\": 0,\n \"y\": 0,\n \"width\": 24,\n \"height\": 3\n }\n },\n {\n \"type\": \"graph\",\n \"title\": \"graph\",\n \"graph\": {\n \"type\": \"host\",\n \"hostId\": \"2u4PP3TJqbw\",\n \"name\": \"loadavg.loadavg15\"\n },\n \"layout\": {\n \"x\": 0,\n \"y\": 7,\n \"width\": 8,\n \"height\": 10\n }\n },\n {\n \"type\": \"value\",\n \"title\": \"value\",\n \"metric\": {\n \"type\": \"expression\",\n \"expression\": \"alias(scale(\\nsum(\\n group(\\n host(2u4PP3TJqbx,loadavg.*)\\n )\\n),\\n1\\n), 'test')\"\n },\n \"layout\": {\n \"x\": 0,\n \"y\": 17,\n \"width\": 8,\n \"height\": 5\n }\n }\n ]\n}\n*\/\n\n\/\/ Dashboard information\ntype Dashboard struct {\n\tID string `json:\"id,omitempty\"`\n\tTitle string `json:\"title,omitempty\"`\n\tBodyMarkDown string `json:\"bodyMarkdown,omitempty\"`\n\tURLPath string `json:\"urlPath,omitempty\"`\n\tCreatedAt int64 `json:\"createdAt,omitempty\"`\n\tUpdatedAt int64 `json:\"updatedAt,omitempty\"`\n\tIsLegacy bool `json:\"isLegacy,omitempty\"`\n\tMemo string `json:\"memo,omitempty\"`\n\tWidgets []Widget `json:\"widgets,omitenpty\"`\n}\n\n\/\/ Widget information\ntype Widget struct {\n\tType string `json:\"type,omitempty\"`\n\tTitle string `json:\"title,omitempty\"`\n\tMetric Metric `json:\"metric,omitempty\"`\n\tGraph Graph `json:\"graph,omitempty\"`\n\tLayout Layout `json:\"layout,omitempty\"`\n\tMarkdown string `json:\"markdown,omitempty\"`\n\tRange Range `json:\"range,omitempty\"`\n}\n\n\/\/ Metric information\ntype Metric struct {\n\tType string `json:\"type,omitempty\"`\n\tHostID string `json:\"hostId,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tServiceName string `json:\"seriviceName,omitempty\"`\n\tExpression string `json:\"expression,omitempty\"`\n}\n\n\/\/ Graph information\ntype Graph struct {\n\tType string `json:\"type,omitempty\"`\n\tHostID string `json:\"hostId,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tRoleFullName string `json:\"roleFullname,omitempty\"`\n\tServiceName string `json:\"seriviceName,omitempty\"`\n\tExpression string `json:\"expression,omitempty\"`\n\tIsStacked bool `json:\"isStacked,omitempty\"`\n}\n\n\/\/ Range information\ntype Range struct {\n\tType string `json:\"type,omitempty\"`\n\tPeriod int64 `json:\"period,omitempty\"`\n\tOffset int64 `json:\"offset,omitempty\"`\n\tStart int64 `json:\"start,omitempty\"`\n\tEnd int64 `json:\"end,omitempty\"`\n}\n\n\/\/ Layout information\ntype Layout struct {\n\tX int64 `json:\"x,omitempty\"`\n\tY int64 `json:\"y,omitempty\"`\n\tWidth int64 `json:\"width,omitempty\"`\n\tHeight int64 `json:\"height,omitempty\"`\n}\n\n\/\/ FindDashboards find dashboards\nfunc (c *Client) FindDashboards() ([]*Dashboard, error) {\n\treq, err := http.NewRequest(\"GET\", c.urlFor(\"\/api\/v0\/dashboards\").String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := c.Request(req)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data struct {\n\t\tDashboards []*Dashboard `json:\"dashboards\"`\n\t}\n\terr = json.NewDecoder(resp.Body).Decode(&data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn data.Dashboards, err\n}\n\n\/\/ FindDashboard find dashboard\nfunc (c *Client) FindDashboard(dashboardID string) (*Dashboard, error) {\n\treq, err := http.NewRequest(\"GET\", c.urlFor(fmt.Sprintf(\"\/api\/v0\/dashboards\/%s\", dashboardID)).String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := c.Request(req)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data Dashboard\n\terr = json.NewDecoder(resp.Body).Decode(&data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &data, err\n}\n\n\/\/ CreateDashboard creating dashboard\nfunc (c *Client) CreateDashboard(param *Dashboard) (*Dashboard, error) {\n\tresp, err := c.PostJSON(\"\/api\/v0\/dashboards\", param)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data Dashboard\n\terr = json.NewDecoder(resp.Body).Decode(&data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &data, nil\n}\n\n\/\/ UpdateDashboard update dashboard\nfunc (c *Client) UpdateDashboard(dashboardID string, param *Dashboard) (*Dashboard, error) {\n\tresp, err := c.PutJSON(fmt.Sprintf(\"\/api\/v0\/dashboards\/%s\", dashboardID), param)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data Dashboard\n\terr = json.NewDecoder(resp.Body).Decode(&data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &data, nil\n}\n\n\/\/ DeleteDashboard delete dashboard\nfunc (c *Client) DeleteDashboard(dashboardID string) (*Dashboard, error) {\n\treq, err := http.NewRequest(\n\t\t\"DELETE\",\n\t\tc.urlFor(fmt.Sprintf(\"\/api\/v0\/dashboards\/%s\", dashboardID)).String(),\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\n\tresp, err := c.Request(req)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data Dashboard\n\terr = json.NewDecoder(resp.Body).Decode(&data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &data, nil\n}\n<commit_msg>fix typo<commit_after>package mackerel\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n)\n\n\/*\n`\/dashboards` Response\nLegacy\n{\n \"dashboards\": [\n\t{\n\t \"id\": \"2c5bLca8d\",\n\t \"title\": \"My Dashboard\",\n\t \"bodyMarkdown\": \"# A test dashboard\",\n\t \"urlPath\": \"2u4PP3TJqbu\",\n\t \"createdAt\": 1439346145003,\n\t \"updatedAt\": 1439346145003,\n\t \"isLegacy\": true\n\t}\n ]\n}\n\nCurrent\n{\n\t\"dashboards\": [\n\t\t{\n\t\t\t\"id\": \"2c5bLca8e\",\n\t\t\t\"title\": \"My Custom Dashboard(Current)\",\n\t\t\t\"urlPath\": \"2u4PP3TJqbv\",\n\t\t\t\"createdAt\": 1552909732,\n\t\t\t\"updatedAt\": 1552992837,\n\t\t\t\"memo\": \"A test Current Dashboard\"\n\t\t}\n\t]\n}\n*\/\n\n\/*\n`\/dashboards\/${ID}` Response`\nLegacy\n{\n\t\"id\": \"2c5bLca8d\",\n\t\"title\": \"My Dashboard\",\n\t\"bodyMarkdown\": \"# A test dashboard\",\n\t\"urlPath\": \"2u4PP3TJqbu\",\n\t\"createdAt\": 1439346145003,\n\t\"updatedAt\": 1439346145003,\n\t\"isLegacy\": true\n}\nCurrent\n{\n \"id\": \"2c5bLca8e\",\n \"createdAt\": 1552909732,\n \"updatedAt\": 1552992837,\n \"title\": \"My Custom Dashboard(Current),\n \"urlPath\": \"2u4PP3TJqbv\",\n \"memo\": \"A test Current Dashboard\",\n \"widgets\": [\n {\n \"type\": \"markdown\",\n \"title\": \"markdown\",\n \"markdown\": \"# body\",\n \"layout\": {\n \"x\": 0,\n \"y\": 0,\n \"width\": 24,\n \"height\": 3\n }\n },\n {\n \"type\": \"graph\",\n \"title\": \"graph\",\n \"graph\": {\n \"type\": \"host\",\n \"hostId\": \"2u4PP3TJqbw\",\n \"name\": \"loadavg.loadavg15\"\n },\n \"layout\": {\n \"x\": 0,\n \"y\": 7,\n \"width\": 8,\n \"height\": 10\n }\n },\n {\n \"type\": \"value\",\n \"title\": \"value\",\n \"metric\": {\n \"type\": \"expression\",\n \"expression\": \"alias(scale(\\nsum(\\n group(\\n host(2u4PP3TJqbx,loadavg.*)\\n )\\n),\\n1\\n), 'test')\"\n },\n \"layout\": {\n \"x\": 0,\n \"y\": 17,\n \"width\": 8,\n \"height\": 5\n }\n }\n ]\n}\n*\/\n\n\/\/ Dashboard information\ntype Dashboard struct {\n\tID string `json:\"id,omitempty\"`\n\tTitle string `json:\"title,omitempty\"`\n\tBodyMarkDown string `json:\"bodyMarkdown,omitempty\"`\n\tURLPath string `json:\"urlPath,omitempty\"`\n\tCreatedAt int64 `json:\"createdAt,omitempty\"`\n\tUpdatedAt int64 `json:\"updatedAt,omitempty\"`\n\tIsLegacy bool `json:\"isLegacy,omitempty\"`\n\tMemo string `json:\"memo,omitempty\"`\n\tWidgets []Widget `json:\"widgets,omitenpty\"`\n}\n\n\/\/ Widget information\ntype Widget struct {\n\tType string `json:\"type,omitempty\"`\n\tTitle string `json:\"title,omitempty\"`\n\tMetric Metric `json:\"metric,omitempty\"`\n\tGraph Graph `json:\"graph,omitempty\"`\n\tLayout Layout `json:\"layout,omitempty\"`\n\tMarkdown string `json:\"markdown,omitempty\"`\n\tRange Range `json:\"range,omitempty\"`\n}\n\n\/\/ Metric information\ntype Metric struct {\n\tType string `json:\"type,omitempty\"`\n\tHostID string `json:\"hostId,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tServiceName string `json:\"serviceName,omitempty\"`\n\tExpression string `json:\"expression,omitempty\"`\n}\n\n\/\/ Graph information\ntype Graph struct {\n\tType string `json:\"type,omitempty\"`\n\tHostID string `json:\"hostId,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tRoleFullName string `json:\"roleFullname,omitempty\"`\n\tServiceName string `json:\"seriviceName,omitempty\"`\n\tExpression string `json:\"expression,omitempty\"`\n\tIsStacked bool `json:\"isStacked,omitempty\"`\n}\n\n\/\/ Range information\ntype Range struct {\n\tType string `json:\"type,omitempty\"`\n\tPeriod int64 `json:\"period,omitempty\"`\n\tOffset int64 `json:\"offset,omitempty\"`\n\tStart int64 `json:\"start,omitempty\"`\n\tEnd int64 `json:\"end,omitempty\"`\n}\n\n\/\/ Layout information\ntype Layout struct {\n\tX int64 `json:\"x,omitempty\"`\n\tY int64 `json:\"y,omitempty\"`\n\tWidth int64 `json:\"width,omitempty\"`\n\tHeight int64 `json:\"height,omitempty\"`\n}\n\n\/\/ FindDashboards find dashboards\nfunc (c *Client) FindDashboards() ([]*Dashboard, error) {\n\treq, err := http.NewRequest(\"GET\", c.urlFor(\"\/api\/v0\/dashboards\").String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := c.Request(req)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data struct {\n\t\tDashboards []*Dashboard `json:\"dashboards\"`\n\t}\n\terr = json.NewDecoder(resp.Body).Decode(&data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn data.Dashboards, err\n}\n\n\/\/ FindDashboard find dashboard\nfunc (c *Client) FindDashboard(dashboardID string) (*Dashboard, error) {\n\treq, err := http.NewRequest(\"GET\", c.urlFor(fmt.Sprintf(\"\/api\/v0\/dashboards\/%s\", dashboardID)).String(), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := c.Request(req)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data Dashboard\n\terr = json.NewDecoder(resp.Body).Decode(&data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &data, err\n}\n\n\/\/ CreateDashboard creating dashboard\nfunc (c *Client) CreateDashboard(param *Dashboard) (*Dashboard, error) {\n\tresp, err := c.PostJSON(\"\/api\/v0\/dashboards\", param)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data Dashboard\n\terr = json.NewDecoder(resp.Body).Decode(&data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &data, nil\n}\n\n\/\/ UpdateDashboard update dashboard\nfunc (c *Client) UpdateDashboard(dashboardID string, param *Dashboard) (*Dashboard, error) {\n\tresp, err := c.PutJSON(fmt.Sprintf(\"\/api\/v0\/dashboards\/%s\", dashboardID), param)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data Dashboard\n\terr = json.NewDecoder(resp.Body).Decode(&data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &data, nil\n}\n\n\/\/ DeleteDashboard delete dashboard\nfunc (c *Client) DeleteDashboard(dashboardID string) (*Dashboard, error) {\n\treq, err := http.NewRequest(\n\t\t\"DELETE\",\n\t\tc.urlFor(fmt.Sprintf(\"\/api\/v0\/dashboards\/%s\", dashboardID)).String(),\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"Content-Type\", \"application\/json\")\n\n\tresp, err := c.Request(req)\n\tdefer closeResponse(resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data Dashboard\n\terr = json.NewDecoder(resp.Body).Decode(&data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &data, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package easyssh\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestGetKeyFile(t *testing.T) {\n\t\/\/ missing file\n\t_, err := getKeyFile(\"abc\")\n\tassert.Error(t, err)\n\tassert.Equal(t, \"open abc: no such file or directory\", err.Error())\n\n\t\/\/ wrong format\n\t_, err = getKeyFile(\"..\/tests\/.ssh\/id_rsa.pub\")\n\tassert.Error(t, err)\n\tassert.Equal(t, \"ssh: no key found\", err.Error())\n\n\t_, err = getKeyFile(\"..\/tests\/.ssh\/id_rsa\")\n\tassert.NoError(t, err)\n}\n<commit_msg>test: add easyssh scp and ssh command testing. (#29)<commit_after>package easyssh\n\nimport (\n\t\"os\"\n\t\"os\/user\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestGetKeyFile(t *testing.T) {\n\t\/\/ missing file\n\t_, err := getKeyFile(\"abc\")\n\tassert.Error(t, err)\n\tassert.Equal(t, \"open abc: no such file or directory\", err.Error())\n\n\t\/\/ wrong format\n\t_, err = getKeyFile(\"..\/tests\/.ssh\/id_rsa.pub\")\n\tassert.Error(t, err)\n\tassert.Equal(t, \"ssh: no key found\", err.Error())\n\n\t_, err = getKeyFile(\"..\/tests\/.ssh\/id_rsa\")\n\tassert.NoError(t, err)\n}\n\nfunc TestRunCommand(t *testing.T) {\n\tssh := &MakeConfig{\n\t\tServer: \"localhost\",\n\t\tUser: \"drone-scp\",\n\t\tPort: \"22\",\n\t\tKeyPath: \"..\/tests\/.ssh\/id_rsa\",\n\t}\n\n\toutput, err := ssh.Run(\"whoami\")\n\tassert.Equal(t, \"drone-scp\\n\", output)\n\tassert.NoError(t, err)\n}\n\nfunc TestSCPCommand(t *testing.T) {\n\tssh := &MakeConfig{\n\t\tServer: \"localhost\",\n\t\tUser: \"drone-scp\",\n\t\tPort: \"22\",\n\t\tKeyPath: \"..\/tests\/.ssh\/id_rsa\",\n\t}\n\n\terr := ssh.Scp(\"..\/tests\/a.txt\")\n\tassert.NoError(t, err)\n\n\tu, err := user.Lookup(\"drone-scp\")\n\tif err != nil {\n\t\tt.Fatalf(\"Lookup: %v\", err)\n\t}\n\n\t\/\/ check file exist\n\tif _, err := os.Stat(u.HomeDir + \"\/a.txt\"); os.IsNotExist(err) {\n\t\tt.Fatalf(\"SCP-error: %v\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2021 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage reader\n\nimport (\n\t\"math\"\n\t\"sort\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestListMetrics(t *testing.T) {\n\treader := NewReader(\"testdata\")\n\tmetrics, err := reader.Metrics()\n\trequire.NoError(t, err)\n\n\tsort.Strings(metrics)\n\texpected := []string{\n\t\t\"test-whisper.load.load.longterm\",\n\t\t\"test-whisper.load.load.shortterm\",\n\t\t\"test-whisper.load.load.midterm\",\n\t}\n\n\tsort.Strings(expected)\n\n\trequire.Equal(t, expected, metrics)\n}\n\nfunc TestGetMinAndMaxTimestamp(t *testing.T) {\n\treader := NewReader(\"testdata\")\n\tmin, max, err := reader.GetMinAndMaxTimestamps()\n\trequire.NoError(t, err)\n\n\trequire.True(t, min > math.MinInt64)\n\trequire.Equal(t, int64(1611068400000), max)\n}\n\nfunc TestGetPoints(t *testing.T) {\n\treader := NewReader(\"testdata\")\n\tpoints, err := reader.Points(\"test-whisper.load.load.longterm\", 1000*math.MinInt32, 1000*math.MaxInt32)\n\trequire.NoError(t, err)\n\n\texpectedLastPoints := []Point{\n\t\t{\n\t\t\tTimestamp: 1611067800000,\n\t\t\tValue: 1.0511666666666666,\n\t\t},\n\t\t{\n\t\t\tTimestamp: 1611068400000,\n\t\t\tValue: 1.0636666666666668,\n\t\t},\n\t}\n\n\tfor i, p := range expectedLastPoints {\n\t\tpos := len(points) - len(expectedLastPoints) + i\n\t\trequire.Equal(t, p, points[pos])\n\t}\n}\n<commit_msg>Fix Whisper reader tests<commit_after>\/\/ Copyright 2021 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage reader\n\nimport (\n\t\"math\"\n\t\"sort\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/go-graphite\/go-whisper\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc init() {\n\t\/\/ Pin go-whisper to a fixed timestamp so that the test data is in the window of retention.\n\twhisper.Now = func() time.Time { return time.Unix(1640000000, 0) }\n}\n\nfunc TestListMetrics(t *testing.T) {\n\treader := NewReader(\"testdata\")\n\tmetrics, err := reader.Metrics()\n\trequire.NoError(t, err)\n\n\tsort.Strings(metrics)\n\texpected := []string{\n\t\t\"test-whisper.load.load.longterm\",\n\t\t\"test-whisper.load.load.shortterm\",\n\t\t\"test-whisper.load.load.midterm\",\n\t}\n\n\tsort.Strings(expected)\n\n\trequire.Equal(t, expected, metrics)\n}\n\nfunc TestGetMinAndMaxTimestamp(t *testing.T) {\n\treader := NewReader(\"testdata\")\n\tmin, max, err := reader.GetMinAndMaxTimestamps()\n\trequire.NoError(t, err)\n\n\trequire.True(t, min > math.MinInt64)\n\trequire.Equal(t, int64(1611068400000), max)\n}\n\nfunc TestGetPoints(t *testing.T) {\n\treader := NewReader(\"testdata\")\n\tpoints, err := reader.Points(\"test-whisper.load.load.longterm\", 1000*math.MinInt32, 1000*math.MaxInt32)\n\trequire.NoError(t, err)\n\n\texpectedLastPoints := []Point{\n\t\t{\n\t\t\tTimestamp: 1611067800000,\n\t\t\tValue: 1.0511666666666666,\n\t\t},\n\t\t{\n\t\t\tTimestamp: 1611068400000,\n\t\t\tValue: 1.0636666666666668,\n\t\t},\n\t}\n\n\tfor i, p := range expectedLastPoints {\n\t\tpos := len(points) - len(expectedLastPoints) + i\n\t\trequire.Equal(t, p, points[pos])\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package platform \/\/ import \"v2ray.com\/core\/common\/platform\"\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype EnvFlag struct {\n\tName string\n\tAltName string\n}\n\nfunc NewEnvFlag(name string) EnvFlag {\n\treturn EnvFlag{\n\t\tName: name,\n\t\tAltName: NormalizeEnvName(name),\n\t}\n}\n\nfunc (f EnvFlag) GetValue(defaultValue func() string) string {\n\tif v, found := os.LookupEnv(f.Name); found {\n\t\treturn v\n\t}\n\tif len(f.AltName) > 0 {\n\t\tif v, found := os.LookupEnv(f.AltName); found {\n\t\t\treturn v\n\t\t}\n\t}\n\n\treturn defaultValue()\n}\n\nfunc (f EnvFlag) GetValueAsInt(defaultValue int) int {\n\tuseDefaultValue := false\n\ts := f.GetValue(func() string {\n\t\tuseDefaultValue = true\n\t\treturn \"\"\n\t})\n\tif useDefaultValue {\n\t\treturn defaultValue\n\t}\n\tv, err := strconv.ParseInt(s, 10, 32)\n\tif err != nil {\n\t\treturn defaultValue\n\t}\n\treturn int(v)\n}\n\nfunc NormalizeEnvName(name string) string {\n\treturn strings.Replace(strings.ToUpper(strings.TrimSpace(name)), \".\", \"_\", -1)\n}\n\nfunc getExecutableDir() string {\n\texec, err := os.Executable()\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn filepath.Dir(exec)\n}\n\nfunc getExecutableSubDir(dir string) func() string {\n\treturn func() string {\n\t\treturn filepath.Join(getExecutableDir(), dir)\n\t}\n}\n\nfunc GetAssetLocation(file string) string {\n\tconst name = \"v2ray.location.asset\"\n\tassetPath := NewEnvFlag(name).GetValue(getExecutableDir)\n\treturn filepath.Join(assetPath, file)\n}\n\nfunc GetPluginDirectory() string {\n\tconst name = \"v2ray.location.plugin\"\n\tpluginDir := NewEnvFlag(name).GetValue(getExecutableSubDir(\"plugins\"))\n\treturn pluginDir\n}\n\nfunc GetConfigurationPath() string {\n\tconst name = \"v2ray.location.config\"\n\tconfigPath := NewEnvFlag(name).GetValue(getExecutableDir)\n\treturn filepath.Join(configPath, \"config.json\")\n}\n\nfunc GetConfDirPath() string {\n\tconst name = \"v2ray.location.confdir\"\n\tconfigPath := NewEnvFlag(name).GetValue(func() string { return \"\" })\n\treturn configPath\n}\n<commit_msg>add comment to please codacy<commit_after>package platform \/\/ import \"v2ray.com\/core\/common\/platform\"\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype EnvFlag struct {\n\tName string\n\tAltName string\n}\n\nfunc NewEnvFlag(name string) EnvFlag {\n\treturn EnvFlag{\n\t\tName: name,\n\t\tAltName: NormalizeEnvName(name),\n\t}\n}\n\nfunc (f EnvFlag) GetValue(defaultValue func() string) string {\n\tif v, found := os.LookupEnv(f.Name); found {\n\t\treturn v\n\t}\n\tif len(f.AltName) > 0 {\n\t\tif v, found := os.LookupEnv(f.AltName); found {\n\t\t\treturn v\n\t\t}\n\t}\n\n\treturn defaultValue()\n}\n\nfunc (f EnvFlag) GetValueAsInt(defaultValue int) int {\n\tuseDefaultValue := false\n\ts := f.GetValue(func() string {\n\t\tuseDefaultValue = true\n\t\treturn \"\"\n\t})\n\tif useDefaultValue {\n\t\treturn defaultValue\n\t}\n\tv, err := strconv.ParseInt(s, 10, 32)\n\tif err != nil {\n\t\treturn defaultValue\n\t}\n\treturn int(v)\n}\n\nfunc NormalizeEnvName(name string) string {\n\treturn strings.Replace(strings.ToUpper(strings.TrimSpace(name)), \".\", \"_\", -1)\n}\n\nfunc getExecutableDir() string {\n\texec, err := os.Executable()\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn filepath.Dir(exec)\n}\n\nfunc getExecutableSubDir(dir string) func() string {\n\treturn func() string {\n\t\treturn filepath.Join(getExecutableDir(), dir)\n\t}\n}\n\nfunc GetAssetLocation(file string) string {\n\tconst name = \"v2ray.location.asset\"\n\tassetPath := NewEnvFlag(name).GetValue(getExecutableDir)\n\treturn filepath.Join(assetPath, file)\n}\n\nfunc GetPluginDirectory() string {\n\tconst name = \"v2ray.location.plugin\"\n\tpluginDir := NewEnvFlag(name).GetValue(getExecutableSubDir(\"plugins\"))\n\treturn pluginDir\n}\n\nfunc GetConfigurationPath() string {\n\tconst name = \"v2ray.location.config\"\n\tconfigPath := NewEnvFlag(name).GetValue(getExecutableDir)\n\treturn filepath.Join(configPath, \"config.json\")\n}\n\n\/\/ GetConfDirPath reads \"v2ray.location.confdir\"\nfunc GetConfDirPath() string {\n\tconst name = \"v2ray.location.confdir\"\n\tconfigPath := NewEnvFlag(name).GetValue(func() string { return \"\" })\n\treturn configPath\n}\n<|endoftext|>"} {"text":"<commit_before>package context\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n)\n\nvar WechatTypeEnum = map[string]int{\n\t\"Private\": 1,\n\t\"Group\": 2,\n\t\"GroupAt\": 3,\n}\n\nfunc PrivateChat(ctx Context) bool {\n\tt := wechatType(ctx)\n\n\treturn t == 1 || t == 3\n}\n\nfunc GroupChat(ctx Context) bool {\n\tt := wechatType(ctx)\n\n\treturn t == 2\n}\n\n\/\/ {\"BotName\":\"客服\",\n\/\/ \"Content\":\"这个新版的微信, 容易挂掉\",\n\/\/ \"MessageId\":\"859f8ba8-4ff7-44ae-8d13-1ed3deedd6a9\",\n\/\/ \"MsgID\":\"5571466961726285032\",\n\/\/ \"MsgType\":\"1\",\n\/\/ \"ReceiptHandle\":\"xxx\",\n\/\/ \"RoomID\":\"@Room\\u003c测试群\\u003e\",\n\/\/ \"RoomName\":\"测试群\",\n\/\/ \"SendId\":\"@f048968daa9e01bb4fbf98067a21ec32\",\n\/\/ \"SenderNickName\":\"胡义\",\n\/\/ \"TaskId\":\"528219641\",\n\/\/ \"WeixinUin\":\"528219641\"}\nfunc wechatType(ctx Context) int {\n\t\/\/ log.Printf(\"[WECHAT GROUP] BEGIN\")\n\tdata, _ := json.Marshal(ctx.Value(\"WECHAT_INFO\"))\n\tlog.Printf(\"[WECHAT GROUP] ctx value: %v\", string(data))\n\t\/\/ log.Printf(\"[WECHAT GROUP] END\")\n\n\twechatInfo := ctx.Value(\"WECHAT_INFO\")\n\n\tif wechatInfo == nil {\n\t\treturn WechatTypeEnum[\"Private\"]\n\t} else {\n\t\tmessage := wechatInfo.(map[string]string)\n\n\t\trooId, ok := message[\"RoomID\"]\n\t\tif ok && rooId != \"\" {\n\t\t\tif bootName, ok := message[\"BotName\"]; ok {\n\t\t\t\tcontent, _ := message[\"Content\"]\n\t\t\t\tif strings.Contains(content, fmt.Sprintf(\"@%v\", bootName)) {\n\t\t\t\t\treturn WechatTypeEnum[\"GroupAt\"]\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn WechatTypeEnum[\"Group\"]\n\t\t} else {\n\t\t\treturn WechatTypeEnum[\"Private\"]\n\t\t}\n\t}\n\n\treturn 0\n}\n<commit_msg>remove debug output<commit_after>package context\n\nimport (\n\t\/\/ \"encoding\/json\"\n\t\"fmt\"\n\t\/\/ \"log\"\n\t\"strings\"\n)\n\nvar WechatTypeEnum = map[string]int{\n\t\"Private\": 1,\n\t\"Group\": 2,\n\t\"GroupAt\": 3,\n}\n\nfunc PrivateChat(ctx Context) bool {\n\tt := wechatType(ctx)\n\n\treturn t == 1 || t == 3\n}\n\nfunc GroupChat(ctx Context) bool {\n\tt := wechatType(ctx)\n\n\treturn t == 2\n}\n\n\/\/ {\"BotName\":\"客服\",\n\/\/ \"Content\":\"这个新版的微信, 容易挂掉\",\n\/\/ \"MessageId\":\"859f8ba8-4ff7-44ae-8d13-1ed3deedd6a9\",\n\/\/ \"MsgID\":\"5571466961726285032\",\n\/\/ \"MsgType\":\"1\",\n\/\/ \"ReceiptHandle\":\"xxx\",\n\/\/ \"RoomID\":\"@Room\\u003c测试群\\u003e\",\n\/\/ \"RoomName\":\"测试群\",\n\/\/ \"SendId\":\"@f048968daa9e01bb4fbf98067a21ec32\",\n\/\/ \"SenderNickName\":\"胡义\",\n\/\/ \"TaskId\":\"528219641\",\n\/\/ \"WeixinUin\":\"528219641\"}\nfunc wechatType(ctx Context) int {\n\t\/\/ log.Printf(\"[WECHAT GROUP] BEGIN\")\n\t\/\/ data, _ := json.Marshal(ctx.Value(\"WECHAT_INFO\"))\n\t\/\/ log.Printf(\"[WECHAT GROUP] ctx value: %v\", string(data))\n\t\/\/ log.Printf(\"[WECHAT GROUP] END\")\n\n\twechatInfo := ctx.Value(\"WECHAT_INFO\")\n\n\tif wechatInfo == nil {\n\t\treturn WechatTypeEnum[\"Private\"]\n\t} else {\n\t\tmessage := wechatInfo.(map[string]string)\n\n\t\trooId, ok := message[\"RoomID\"]\n\t\tif ok && rooId != \"\" {\n\t\t\tif bootName, ok := message[\"BotName\"]; ok {\n\t\t\t\tcontent, _ := message[\"Content\"]\n\t\t\t\tif strings.Contains(content, fmt.Sprintf(\"@%v\", bootName)) {\n\t\t\t\t\treturn WechatTypeEnum[\"GroupAt\"]\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn WechatTypeEnum[\"Group\"]\n\t\t} else {\n\t\t\treturn WechatTypeEnum[\"Private\"]\n\t\t}\n\t}\n\n\treturn 0\n}\n<|endoftext|>"} {"text":"<commit_before>package protobuf_test\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com\/nats-io\/nats\"\n\t\"github.com\/nats-io\/nats\/test\"\n\n\t\"github.com\/nats-io\/nats\/encoders\/protobuf\"\n\tpb \"github.com\/nats-io\/nats\/encoders\/protobuf\/testdata\"\n)\n\nconst TEST_PORT = 8068\n\nfunc NewProtoEncodedConn(tl test.TestLogger) *nats.EncodedConn {\n\tec, err := nats.NewEncodedConn(test.NewConnection(tl, TEST_PORT), protobuf.PROTOBUF_ENCODER)\n\tif err != nil {\n\t\ttl.Fatalf(\"Failed to create an encoded connection: %v\\n\", err)\n\t}\n\treturn ec\n}\n\nfunc TestProtoMarshalStruct(t *testing.T) {\n\ts := test.RunServerOnPort(TEST_PORT)\n\tdefer s.Shutdown()\n\n\tec := NewProtoEncodedConn(t)\n\tdefer ec.Close()\n\tch := make(chan bool)\n\n\tme := &pb.Person{Name: \"derek\", Age: 22, Address: \"140 New Montgomery St\"}\n\tme.Children = make(map[string]*pb.Person)\n\n\tme.Children[\"sam\"] = &pb.Person{Name: \"sam\", Age: 19, Address: \"140 New Montgomery St\"}\n\tme.Children[\"meg\"] = &pb.Person{Name: \"meg\", Age: 17, Address: \"140 New Montgomery St\"}\n\n\tec.Subscribe(\"protobuf_test\", func(p *pb.Person) {\n\t\tif !reflect.DeepEqual(p, me) {\n\t\t\tt.Fatal(\"Did not receive the correct protobuf response\")\n\t\t}\n\t\tch <- true\n\t})\n\n\tec.Publish(\"protobuf_test\", me)\n\tif e := test.Wait(ch); e != nil {\n\t\tt.Fatal(\"Did not receive the message\")\n\t}\n}\n\nfunc BenchmarkProtobufMarshalStruct(b *testing.B) {\n\tme := &pb.Person{Name: \"derek\", Age: 22, Address: \"140 New Montgomery St\"}\n\tme.Children = make(map[string]*pb.Person)\n\n\tme.Children[\"sam\"] = &pb.Person{Name: \"sam\", Age: 19, Address: \"140 New Montgomery St\"}\n\tme.Children[\"meg\"] = &pb.Person{Name: \"meg\", Age: 17, Address: \"140 New Montgomery St\"}\n\n\tencoder := &protobuf.ProtobufEncoder{}\n\tfor n := 0; n < b.N; n++ {\n\t\tif _, err := encoder.Encode(\"protobuf_test\", me); err != nil {\n\t\t\tb.Fatal(\"Couldn't serialize object\", err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkPublishProtobufStruct(b *testing.B) {\n\t\/\/ stop benchmark for set-up\n\tb.StopTimer()\n\n\ts := test.RunServerOnPort(TEST_PORT)\n\tdefer s.Shutdown()\n\n\tec := NewProtoEncodedConn(b)\n\tdefer ec.Close()\n\tch := make(chan bool)\n\n\tme := &pb.Person{Name: \"derek\", Age: 22, Address: \"140 New Montgomery St\"}\n\tme.Children = make(map[string]*pb.Person)\n\n\tme.Children[\"sam\"] = &pb.Person{Name: \"sam\", Age: 19, Address: \"140 New Montgomery St\"}\n\tme.Children[\"meg\"] = &pb.Person{Name: \"meg\", Age: 17, Address: \"140 New Montgomery St\"}\n\n\tec.Subscribe(\"protobuf_test\", func(p *pb.Person) {\n\t\tif !reflect.DeepEqual(p, me) {\n\t\t\tb.Fatalf(\"Did not receive the correct protobuf response\")\n\t\t}\n\t\tch <- true\n\t})\n\n\t\/\/ resume benchmark\n\tb.StartTimer()\n\n\tfor n := 0; n < b.N; n++ {\n\t\tec.Publish(\"protobuf_test\", me)\n\t\tif e := test.Wait(ch); e != nil {\n\t\t\tb.Fatal(\"Did not receive the message\")\n\t\t}\n\t}\n}\n<commit_msg>Add tests to show failed scenario for proto connection<commit_after>package protobuf_test\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/nats-io\/nats\"\n\t\"github.com\/nats-io\/nats\/test\"\n\n\t\"github.com\/nats-io\/nats\/encoders\/protobuf\"\n\tpb \"github.com\/nats-io\/nats\/encoders\/protobuf\/testdata\"\n)\n\nconst TEST_PORT = 8068\n\nfunc NewProtoEncodedConn(tl test.TestLogger) *nats.EncodedConn {\n\tec, err := nats.NewEncodedConn(test.NewConnection(tl, TEST_PORT), protobuf.PROTOBUF_ENCODER)\n\tif err != nil {\n\t\ttl.Fatalf(\"Failed to create an encoded connection: %v\\n\", err)\n\t}\n\treturn ec\n}\n\nfunc TestProtoMarshalStruct(t *testing.T) {\n\ts := test.RunServerOnPort(TEST_PORT)\n\tdefer s.Shutdown()\n\n\tec := NewProtoEncodedConn(t)\n\tdefer ec.Close()\n\tch := make(chan bool)\n\n\tme := &pb.Person{Name: \"derek\", Age: 22, Address: \"140 New Montgomery St\"}\n\tme.Children = make(map[string]*pb.Person)\n\n\tme.Children[\"sam\"] = &pb.Person{Name: \"sam\", Age: 19, Address: \"140 New Montgomery St\"}\n\tme.Children[\"meg\"] = &pb.Person{Name: \"meg\", Age: 17, Address: \"140 New Montgomery St\"}\n\n\tec.Subscribe(\"protobuf_test\", func(p *pb.Person) {\n\t\tif !reflect.DeepEqual(p, me) {\n\t\t\tt.Fatal(\"Did not receive the correct protobuf response\")\n\t\t}\n\t\tch <- true\n\t})\n\n\tec.Publish(\"protobuf_test\", me)\n\tif e := test.Wait(ch); e != nil {\n\t\tt.Fatal(\"Did not receive the message\")\n\t}\n}\n\nfunc TestProtoNilRequest(t *testing.T) {\n\ts := test.RunServerOnPort(TEST_PORT)\n\tdefer s.Shutdown()\n\n\tec := NewProtoEncodedConn(t)\n\tdefer ec.Close()\n\n\ttestPerson := &pb.Person{Name: \"Anatolii\", Age: 25, Address: \"Ukraine, Nikolaev\"}\n\n\t\/\/Subscribe with empty interface shouldn't failed on empty message\n\tec.Subscribe(\"nil_test\", func(_, reply string, _ interface{}) {\n\t\tec.Publish(reply, testPerson)\n\t})\n\n\tresp := new(pb.Person)\n\n\t\/\/Request with nil argument shouldn't failed with nil argument\n\terr := ec.Request(\"nil_test\", nil, resp, 100*time.Millisecond)\n\tec.Flush()\n\n\tif err != nil {\n\t\tt.Error(\"Fail to send empty message via encoded proto connection\")\n\t}\n\n\tif !reflect.DeepEqual(testPerson, resp) {\n\t\tt.Error(\"Fail to receive encoded response\")\n\t}\n}\n\nfunc BenchmarkProtobufMarshalStruct(b *testing.B) {\n\tme := &pb.Person{Name: \"derek\", Age: 22, Address: \"140 New Montgomery St\"}\n\tme.Children = make(map[string]*pb.Person)\n\n\tme.Children[\"sam\"] = &pb.Person{Name: \"sam\", Age: 19, Address: \"140 New Montgomery St\"}\n\tme.Children[\"meg\"] = &pb.Person{Name: \"meg\", Age: 17, Address: \"140 New Montgomery St\"}\n\n\tencoder := &protobuf.ProtobufEncoder{}\n\tfor n := 0; n < b.N; n++ {\n\t\tif _, err := encoder.Encode(\"protobuf_test\", me); err != nil {\n\t\t\tb.Fatal(\"Couldn't serialize object\", err)\n\t\t}\n\t}\n}\n\nfunc BenchmarkPublishProtobufStruct(b *testing.B) {\n\t\/\/ stop benchmark for set-up\n\tb.StopTimer()\n\n\ts := test.RunServerOnPort(TEST_PORT)\n\tdefer s.Shutdown()\n\n\tec := NewProtoEncodedConn(b)\n\tdefer ec.Close()\n\tch := make(chan bool)\n\n\tme := &pb.Person{Name: \"derek\", Age: 22, Address: \"140 New Montgomery St\"}\n\tme.Children = make(map[string]*pb.Person)\n\n\tme.Children[\"sam\"] = &pb.Person{Name: \"sam\", Age: 19, Address: \"140 New Montgomery St\"}\n\tme.Children[\"meg\"] = &pb.Person{Name: \"meg\", Age: 17, Address: \"140 New Montgomery St\"}\n\n\tec.Subscribe(\"protobuf_test\", func(p *pb.Person) {\n\t\tif !reflect.DeepEqual(p, me) {\n\t\t\tb.Fatalf(\"Did not receive the correct protobuf response\")\n\t\t}\n\t\tch <- true\n\t})\n\n\t\/\/ resume benchmark\n\tb.StartTimer()\n\n\tfor n := 0; n < b.N; n++ {\n\t\tec.Publish(\"protobuf_test\", me)\n\t\tif e := test.Wait(ch); e != nil {\n\t\t\tb.Fatal(\"Did not receive the message\")\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ service for discovering Lantern instances in the local network\npackage localdiscovery\n\nimport (\n\t\"github.com\/getlantern\/flashlight\/ui\"\n\t\"github.com\/getlantern\/golog\"\n\t\"github.com\/getlantern\/multicast\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tmessageType = `LocalDiscovery`\n\tupdatePeriod = 10\n)\n\nvar (\n\tlog = golog.LoggerFor(\"flashlight.localdiscovery\")\n\tservice *ui.Service\n\tmc *multicast.Multicast\n\tlastPeers []multicast.PeerInfo\n\tpeersMutex sync.Mutex\n)\n\nfunc Start(portToAdvertise string) {\n\tif service == nil {\n\t\thelloFn := func(write func(interface{}) error) error {\n\t\t\tlog.Debugf(\"Sending local Lanterns list to the Lantern UI\")\n\t\t\treturn write(buildPeersList())\n\t\t}\n\t\tvar err error\n\t\tservice, err = ui.Register(messageType, nil, helloFn)\n\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Unable to register Local Discovery service: %q\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tmc = multicast.JoinMulticast()\n\n\tmc.Payload = portToAdvertise\n\tmc.AddPeerCallback = func(peer string, peersInfo []multicast.PeerInfo) {\n\t\tpeersMutex.Lock()\n\t\tlastPeers = peersInfo\n\t\tpeersMutex.Unlock()\n\n\t\tservice.Out <- peersInfo\n\t}\n\tmc.RemovePeerCallback = func(peer string, peersInfo []multicast.PeerInfo) {\n\t\tpeersMutex.Lock()\n\t\tlastPeers = peersInfo\n\t\tpeersMutex.Unlock()\n\n\t\tservice.Out <- peersInfo\n\t}\n\n\tmc.StartMulticast()\n\n\tgo func() {\n\t\tc := time.Tick(updatePeriod * time.Second)\n\t\tfor range c {\n\t\t\tservice.Out <- buildPeersList()\n\t\t}\n\t}()\n}\n\nfunc Stop() {\n\tmc.LeaveMulticast()\n}\n\nfunc buildPeersList() []string {\n\tpeersList := make([]string, len(lastPeers))\n\n\tpeersMutex.Lock()\n\tfor i, peer := range lastPeers {\n\t\tpeersList[i] = \"http:\/\/\" + peer.IP.String() + \":\" + peer.Payload\n\t}\n\tpeersMutex.Unlock()\n\n\treturn peersList\n}\n<commit_msg>Fixed wrongly structured UI localdiscovery messages<commit_after>\/\/ service for discovering Lantern instances in the local network\npackage localdiscovery\n\nimport (\n\t\"github.com\/getlantern\/flashlight\/ui\"\n\t\"github.com\/getlantern\/golog\"\n\t\"github.com\/getlantern\/multicast\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tmessageType = `LocalDiscovery`\n\tupdatePeriod = 10\n)\n\nvar (\n\tlog = golog.LoggerFor(\"flashlight.localdiscovery\")\n\tservice *ui.Service\n\tmc *multicast.Multicast\n\tlastPeers []multicast.PeerInfo\n\tpeersMutex sync.Mutex\n)\n\nfunc Start(portToAdvertise string) {\n\tif service == nil {\n\t\tvar err error\n\t\tservice, err = ui.Register(messageType, nil, func(write func(interface{}) error) error {\n\t\t\treturn write(buildPeersList())\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Unable to register Local Discovery service: %q\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tmc = multicast.JoinMulticast()\n\n\tmc.Payload = portToAdvertise\n\tmc.AddPeerCallback = func(peer string, peersInfo []multicast.PeerInfo) {\n\t\tpeersMutex.Lock()\n\t\tlastPeers = peersInfo\n\t\tpeersMutex.Unlock()\n\n\t\tservice.Out <- buildPeersList()\n\t}\n\tmc.RemovePeerCallback = func(peer string, peersInfo []multicast.PeerInfo) {\n\t\tpeersMutex.Lock()\n\t\tlastPeers = peersInfo\n\t\tpeersMutex.Unlock()\n\n\t\tservice.Out <- buildPeersList()\n\t}\n\n\tmc.StartMulticast()\n\n\tgo func() {\n\t\tc := time.Tick(updatePeriod * time.Second)\n\t\tfor range c {\n\t\t\tservice.Out <- buildPeersList()\n\t\t}\n\t}()\n}\n\nfunc Stop() {\n\tmc.LeaveMulticast()\n}\n\nfunc buildPeersList() []string {\n\tpeersList := make([]string, len(lastPeers))\n\n\tpeersMutex.Lock()\n\tfor i, peer := range lastPeers {\n\t\tpeersList[i] = \"http:\/\/\" + peer.IP.String() + \":\" + peer.Payload\n\t}\n\tpeersMutex.Unlock()\n\n\treturn peersList\n}\n<|endoftext|>"} {"text":"<commit_before>package controllers\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/astaxie\/beego\"\n\t\"github.com\/lfq7413\/tomato\/auth\"\n\t\"github.com\/lfq7413\/tomato\/config\"\n\t\"github.com\/lfq7413\/tomato\/orm\"\n\t\"github.com\/lfq7413\/tomato\/rest\"\n\t\"github.com\/lfq7413\/tomato\/utils\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\n\/\/ ObjectsController ...\ntype ObjectsController struct {\n\tbeego.Controller\n\tInfo *RequestInfo\n\tAuth *auth.Auth\n\tClassName string\n\tObjectID string\n}\n\n\/\/ RequestInfo ...\ntype RequestInfo struct {\n\tAppID string\n\tMasterKey string\n\tClientKey string\n\tSessionToken string\n\tInstallationID string\n}\n\n\/\/ Prepare ...\nfunc (o *ObjectsController) Prepare() {\n\t\/\/TODO 1、获取请求头\n\tinfo := &RequestInfo{}\n\tinfo.AppID = o.Ctx.Input.Header(\"X-Parse-Application-Id\")\n\tinfo.MasterKey = o.Ctx.Input.Header(\"X-Parse-Master-Key\")\n\tinfo.ClientKey = o.Ctx.Input.Header(\"X-Parse-Client-Key\")\n\tinfo.SessionToken = o.Ctx.Input.Header(\"X-Parse-Session-Token\")\n\tinfo.InstallationID = o.Ctx.Input.Header(\"X-Parse-Installation-Id\")\n\to.Info = info\n\t\/\/TODO 2、校验头部数据\n\tif info.AppID != config.TConfig.AppID {\n\t\t\/\/TODO AppID 不正确\n\t}\n\tif info.MasterKey == config.TConfig.MasterKey {\n\t\to.Auth = &auth.Auth{InstallationID: info.InstallationID, IsMaster: true}\n\t\treturn\n\t}\n\tif info.ClientKey != config.TConfig.ClientKey {\n\t\t\/\/TODO ClientKey 不正确\n\t}\n\t\/\/TODO 3、生成当前会话用户权限信息\n\tif info.SessionToken == \"\" {\n\t\to.Auth = &auth.Auth{InstallationID: info.InstallationID, IsMaster: false}\n\t} else {\n\t\to.Auth = auth.GetAuthForSessionToken(info.SessionToken, info.InstallationID)\n\t}\n\n}\n\n\/\/ Post ...\n\/\/ @router \/:className [post]\nfunc (o *ObjectsController) Post() {\n\n\tvar object map[string]interface{}\n\tjson.Unmarshal(o.Ctx.Input.RequestBody, &object)\n\n\trest.Create(o.Auth, o.ClassName, object)\n\n\tclassName := o.Ctx.Input.Param(\":className\")\n\n\tvar cls bson.M\n\tjson.Unmarshal(o.Ctx.Input.RequestBody, &cls)\n\n\tobjectId := utils.CreateObjectId()\n\tnow := time.Now().UTC()\n\tcls[\"_id\"] = objectId\n\tcls[\"createdAt\"] = now\n\tcls[\"updatedAt\"] = now\n\n\terr := orm.TomatoDB.Insert(className, cls)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdata := bson.M{}\n\tdata[\"objectId\"] = objectId\n\tdata[\"createdAt\"] = utils.TimetoString(now)\n\n\to.Data[\"json\"] = data\n\to.Ctx.Output.SetStatus(201)\n\to.Ctx.Output.Header(\"Location\", config.TConfig.ServerURL+\"\/classes\/\"+className+\"\/\"+objectId)\n\to.ServeJSON()\n}\n\n\/\/ Get ...\n\/\/ @router \/:className\/:objectId [get]\nfunc (o *ObjectsController) Get() {\n\n\toptions := map[string]interface{}{}\n\twhere := map[string]interface{}{\"objectId\": o.ObjectID}\n\n\trest.Find(o.Auth, o.ClassName, where, options)\n\n\tclassName := o.Ctx.Input.Param(\":className\")\n\tobjectId := o.Ctx.Input.Param(\":objectId\")\n\n\tcls := bson.M{}\n\tcls[\"_id\"] = objectId\n\n\tdata, err := orm.TomatoDB.FindOne(className, cls)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdata[\"objectId\"] = data[\"_id\"]\n\tdelete(data, \"_id\")\n\tif createdAt, ok := data[\"createdAt\"].(time.Time); ok {\n\t\tdata[\"createdAt\"] = utils.TimetoString(createdAt.UTC())\n\t}\n\tif updatedAt, ok := data[\"updatedAt\"].(time.Time); ok {\n\t\tdata[\"updatedAt\"] = utils.TimetoString(updatedAt.UTC())\n\t}\n\n\to.Data[\"json\"] = data\n\to.ServeJSON()\n}\n\n\/\/ Put ...\n\/\/ @router \/:className\/:objectId [put]\nfunc (o *ObjectsController) Put() {\n\tclassName := o.Ctx.Input.Param(\":className\")\n\tobjectId := o.Ctx.Input.Param(\":objectId\")\n\n\tvar cls bson.M\n\tjson.Unmarshal(o.Ctx.Input.RequestBody, &cls)\n\n\tnow := time.Now().UTC()\n\tcls[\"updatedAt\"] = now\n\tupdate := bson.M{\"$set\": cls}\n\n\terr := orm.TomatoDB.Update(className, bson.M{\"_id\": objectId}, update)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdata := bson.M{}\n\tdata[\"updatedAt\"] = utils.TimetoString(now)\n\to.Data[\"json\"] = data\n\to.ServeJSON()\n}\n\n\/\/ GetAll ...\n\/\/ @router \/:className [get]\nfunc (o *ObjectsController) GetAll() {\n\n\t\/\/ TODO 获取查询参数,并组装\n\toptions := map[string]interface{}{}\n\tif o.GetString(\"skip\") != \"\" {\n\t\tif i, err := strconv.Atoi(o.GetString(\"skip\")); err == nil {\n\t\t\toptions[\"skip\"] = i\n\t\t} else {\n\t\t\t\/\/ TODO return error\n\t\t}\n\t}\n\tif o.GetString(\"limit\") != \"\" {\n\t\tif i, err := strconv.Atoi(o.GetString(\"limit\")); err == nil {\n\t\t\toptions[\"limit\"] = i\n\t\t} else {\n\t\t\t\/\/ TODO return error\n\t\t}\n\t} else {\n\t\toptions[\"limit\"] = 100\n\t}\n\tif o.GetString(\"order\") != \"\" {\n\t\toptions[\"order\"] = o.GetString(\"order\")\n\t}\n\tif o.GetString(\"count\") != \"\" {\n\t\toptions[\"count\"] = true\n\t}\n\tif o.GetString(\"keys\") != \"\" {\n\t\toptions[\"keys\"] = o.GetString(\"keys\")\n\t}\n\tif o.GetString(\"include\") != \"\" {\n\t\toptions[\"include\"] = o.GetString(\"include\")\n\t}\n\n\twhere := map[string]interface{}{}\n\tif o.GetString(\"where\") != \"\" {\n\t\terr := json.Unmarshal([]byte(o.GetString(\"where\")), &where)\n\t\tif err != nil {\n\t\t\t\/\/ TODO return err\n\t\t}\n\t}\n\n\trest.Find(o.Auth, o.ClassName, where, options)\n\n\tclassName := o.Ctx.Input.Param(\":className\")\n\n\tcls := bson.M{}\n\n\tdata, err := orm.TomatoDB.Find(className, cls)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor _, v := range data {\n\t\tv[\"objectId\"] = v[\"_id\"]\n\t\tdelete(v, \"_id\")\n\t\tif createdAt, ok := v[\"createdAt\"].(time.Time); ok {\n\t\t\tv[\"createdAt\"] = utils.TimetoString(createdAt.UTC())\n\t\t}\n\t\tif updatedAt, ok := v[\"updatedAt\"].(time.Time); ok {\n\t\t\tv[\"updatedAt\"] = utils.TimetoString(updatedAt.UTC())\n\t\t}\n\t}\n\to.Data[\"json\"] = bson.M{\"results\": data}\n\to.ServeJSON()\n}\n\n\/\/ Delete ...\n\/\/ @router \/:className\/:objectId [delete]\nfunc (o *ObjectsController) Delete() {\n\tclassName := o.Ctx.Input.Param(\":className\")\n\tobjectId := o.Ctx.Input.Param(\":objectId\")\n\n\tcls := bson.M{}\n\tcls[\"_id\"] = objectId\n\n\terr := orm.TomatoDB.Remove(className, cls)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdata := bson.M{}\n\to.Data[\"json\"] = data\n\to.ServeJSON()\n}\n<commit_msg>Delete() 组装参数<commit_after>package controllers\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/astaxie\/beego\"\n\t\"github.com\/lfq7413\/tomato\/auth\"\n\t\"github.com\/lfq7413\/tomato\/config\"\n\t\"github.com\/lfq7413\/tomato\/orm\"\n\t\"github.com\/lfq7413\/tomato\/rest\"\n\t\"github.com\/lfq7413\/tomato\/utils\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n)\n\n\/\/ ObjectsController ...\ntype ObjectsController struct {\n\tbeego.Controller\n\tInfo *RequestInfo\n\tAuth *auth.Auth\n\tClassName string\n\tObjectID string\n}\n\n\/\/ RequestInfo ...\ntype RequestInfo struct {\n\tAppID string\n\tMasterKey string\n\tClientKey string\n\tSessionToken string\n\tInstallationID string\n}\n\n\/\/ Prepare ...\nfunc (o *ObjectsController) Prepare() {\n\t\/\/TODO 1、获取请求头\n\tinfo := &RequestInfo{}\n\tinfo.AppID = o.Ctx.Input.Header(\"X-Parse-Application-Id\")\n\tinfo.MasterKey = o.Ctx.Input.Header(\"X-Parse-Master-Key\")\n\tinfo.ClientKey = o.Ctx.Input.Header(\"X-Parse-Client-Key\")\n\tinfo.SessionToken = o.Ctx.Input.Header(\"X-Parse-Session-Token\")\n\tinfo.InstallationID = o.Ctx.Input.Header(\"X-Parse-Installation-Id\")\n\to.Info = info\n\t\/\/TODO 2、校验头部数据\n\tif info.AppID != config.TConfig.AppID {\n\t\t\/\/TODO AppID 不正确\n\t}\n\tif info.MasterKey == config.TConfig.MasterKey {\n\t\to.Auth = &auth.Auth{InstallationID: info.InstallationID, IsMaster: true}\n\t\treturn\n\t}\n\tif info.ClientKey != config.TConfig.ClientKey {\n\t\t\/\/TODO ClientKey 不正确\n\t}\n\t\/\/TODO 3、生成当前会话用户权限信息\n\tif info.SessionToken == \"\" {\n\t\to.Auth = &auth.Auth{InstallationID: info.InstallationID, IsMaster: false}\n\t} else {\n\t\to.Auth = auth.GetAuthForSessionToken(info.SessionToken, info.InstallationID)\n\t}\n\n}\n\n\/\/ Post ...\n\/\/ @router \/:className [post]\nfunc (o *ObjectsController) Post() {\n\n\tvar object map[string]interface{}\n\tjson.Unmarshal(o.Ctx.Input.RequestBody, &object)\n\n\trest.Create(o.Auth, o.ClassName, object)\n\n\tclassName := o.Ctx.Input.Param(\":className\")\n\n\tvar cls bson.M\n\tjson.Unmarshal(o.Ctx.Input.RequestBody, &cls)\n\n\tobjectId := utils.CreateObjectId()\n\tnow := time.Now().UTC()\n\tcls[\"_id\"] = objectId\n\tcls[\"createdAt\"] = now\n\tcls[\"updatedAt\"] = now\n\n\terr := orm.TomatoDB.Insert(className, cls)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdata := bson.M{}\n\tdata[\"objectId\"] = objectId\n\tdata[\"createdAt\"] = utils.TimetoString(now)\n\n\to.Data[\"json\"] = data\n\to.Ctx.Output.SetStatus(201)\n\to.Ctx.Output.Header(\"Location\", config.TConfig.ServerURL+\"\/classes\/\"+className+\"\/\"+objectId)\n\to.ServeJSON()\n}\n\n\/\/ Get ...\n\/\/ @router \/:className\/:objectId [get]\nfunc (o *ObjectsController) Get() {\n\n\toptions := map[string]interface{}{}\n\twhere := map[string]interface{}{\"objectId\": o.ObjectID}\n\n\trest.Find(o.Auth, o.ClassName, where, options)\n\n\tclassName := o.Ctx.Input.Param(\":className\")\n\tobjectId := o.Ctx.Input.Param(\":objectId\")\n\n\tcls := bson.M{}\n\tcls[\"_id\"] = objectId\n\n\tdata, err := orm.TomatoDB.FindOne(className, cls)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdata[\"objectId\"] = data[\"_id\"]\n\tdelete(data, \"_id\")\n\tif createdAt, ok := data[\"createdAt\"].(time.Time); ok {\n\t\tdata[\"createdAt\"] = utils.TimetoString(createdAt.UTC())\n\t}\n\tif updatedAt, ok := data[\"updatedAt\"].(time.Time); ok {\n\t\tdata[\"updatedAt\"] = utils.TimetoString(updatedAt.UTC())\n\t}\n\n\to.Data[\"json\"] = data\n\to.ServeJSON()\n}\n\n\/\/ Put ...\n\/\/ @router \/:className\/:objectId [put]\nfunc (o *ObjectsController) Put() {\n\tclassName := o.Ctx.Input.Param(\":className\")\n\tobjectId := o.Ctx.Input.Param(\":objectId\")\n\n\tvar cls bson.M\n\tjson.Unmarshal(o.Ctx.Input.RequestBody, &cls)\n\n\tnow := time.Now().UTC()\n\tcls[\"updatedAt\"] = now\n\tupdate := bson.M{\"$set\": cls}\n\n\terr := orm.TomatoDB.Update(className, bson.M{\"_id\": objectId}, update)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdata := bson.M{}\n\tdata[\"updatedAt\"] = utils.TimetoString(now)\n\to.Data[\"json\"] = data\n\to.ServeJSON()\n}\n\n\/\/ GetAll ...\n\/\/ @router \/:className [get]\nfunc (o *ObjectsController) GetAll() {\n\n\t\/\/ TODO 获取查询参数,并组装\n\toptions := map[string]interface{}{}\n\tif o.GetString(\"skip\") != \"\" {\n\t\tif i, err := strconv.Atoi(o.GetString(\"skip\")); err == nil {\n\t\t\toptions[\"skip\"] = i\n\t\t} else {\n\t\t\t\/\/ TODO return error\n\t\t}\n\t}\n\tif o.GetString(\"limit\") != \"\" {\n\t\tif i, err := strconv.Atoi(o.GetString(\"limit\")); err == nil {\n\t\t\toptions[\"limit\"] = i\n\t\t} else {\n\t\t\t\/\/ TODO return error\n\t\t}\n\t} else {\n\t\toptions[\"limit\"] = 100\n\t}\n\tif o.GetString(\"order\") != \"\" {\n\t\toptions[\"order\"] = o.GetString(\"order\")\n\t}\n\tif o.GetString(\"count\") != \"\" {\n\t\toptions[\"count\"] = true\n\t}\n\tif o.GetString(\"keys\") != \"\" {\n\t\toptions[\"keys\"] = o.GetString(\"keys\")\n\t}\n\tif o.GetString(\"include\") != \"\" {\n\t\toptions[\"include\"] = o.GetString(\"include\")\n\t}\n\n\twhere := map[string]interface{}{}\n\tif o.GetString(\"where\") != \"\" {\n\t\terr := json.Unmarshal([]byte(o.GetString(\"where\")), &where)\n\t\tif err != nil {\n\t\t\t\/\/ TODO return err\n\t\t}\n\t}\n\n\trest.Find(o.Auth, o.ClassName, where, options)\n\n\tclassName := o.Ctx.Input.Param(\":className\")\n\n\tcls := bson.M{}\n\n\tdata, err := orm.TomatoDB.Find(className, cls)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor _, v := range data {\n\t\tv[\"objectId\"] = v[\"_id\"]\n\t\tdelete(v, \"_id\")\n\t\tif createdAt, ok := v[\"createdAt\"].(time.Time); ok {\n\t\t\tv[\"createdAt\"] = utils.TimetoString(createdAt.UTC())\n\t\t}\n\t\tif updatedAt, ok := v[\"updatedAt\"].(time.Time); ok {\n\t\t\tv[\"updatedAt\"] = utils.TimetoString(updatedAt.UTC())\n\t\t}\n\t}\n\to.Data[\"json\"] = bson.M{\"results\": data}\n\to.ServeJSON()\n}\n\n\/\/ Delete ...\n\/\/ @router \/:className\/:objectId [delete]\nfunc (o *ObjectsController) Delete() {\n\n\trest.Delete(o.Auth, o.ClassName, o.ObjectID)\n\n\tclassName := o.Ctx.Input.Param(\":className\")\n\tobjectId := o.Ctx.Input.Param(\":objectId\")\n\n\tcls := bson.M{}\n\tcls[\"_id\"] = objectId\n\n\terr := orm.TomatoDB.Remove(className, cls)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tdata := bson.M{}\n\to.Data[\"json\"] = data\n\to.ServeJSON()\n}\n<|endoftext|>"} {"text":"<commit_before>package controllers\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/dgrijalva\/jwt-go\"\n\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"gopkg.in\/mgo.v2\"\n\n\t\"github.com\/herald-it\/goncord\/models\"\n\t. \"github.com\/herald-it\/goncord\/utils\"\n\t\"github.com\/herald-it\/goncord\/utils\/querying\"\n)\n\ntype ServiceController struct {\n\tsession *mgo.Session\n}\n\nfunc (sc ServiceController) GetDB() *mgo.Database {\n\treturn sc.session.DB(models.Set.Database.DbName)\n}\n\nfunc NewServiceController(s *mgo.Session) *ServiceController {\n\treturn &ServiceController{s}\n}\n\n\/\/ Logout removes the current token from\n\/\/ the database. The next validation\n\/\/ the user is not authorized.\nfunc (sc ServiceController) Logout(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) *HttpError {\n\n\tcollect := sc.GetDB().C(models.Set.Database.TokenTable)\n\ttoken := models.DumpToken{}\n\n\ttokenTmp, httpErr := getToken(r)\n\tif httpErr != nil {\n\t\treturn httpErr\n\t}\n\ttoken.Token = tokenTmp\n\n\tif token.Token == \"\" {\n\t\treturn &HttpError{nil, \"Invalid token value.\", 500}\n\t}\n\n\tif err := collect.Remove(token); err != nil {\n\t\treturn &HttpError{err, \"Delete token error.\", 500}\n\t}\n\n\treturn nil\n}\n\n\/\/ IsValid Check the token for validity.\n\/\/ The token can be a cookie or transferred\n\/\/ post the form. First we checked the cookies.\n\/\/ If the token is valid, the response will contain\n\/\/ user model in json format.\nfunc (sc ServiceController) IsValid(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) *HttpError {\n\n\tcollect := sc.GetDB().C(models.Set.Database.TokenTable)\n\ttoken := &models.DumpToken{}\n\n\ttokenTmp, httpErr := getToken(r)\n\tif httpErr != nil {\n\t\treturn httpErr\n\t}\n\ttoken.Token = tokenTmp\n\n\tif token.Token == \"\" {\n\t\treturn &HttpError{nil, \"Invalid token value.\", 500}\n\t}\n\n\tfindDumpToken, err := querying.FindDumpToken(token, collect)\n\tif err != nil || findDumpToken == nil {\n\t\treturn &HttpError{err, \"Token not found.\", 500}\n\t}\n\n\ttokenParse, err := jwt.Parse(findDumpToken.Token, nil)\n\tif checkLifeTime(tokenParse) {\n\t\tcollect.Remove(findDumpToken)\n\t\treturn &HttpError{nil, \"Time token life has expired.\", 500}\n\t}\n\n\tusr := new(models.User)\n\tusr.ID = findDumpToken.UserId\n\n\tfindUsr, err := querying.FindUserID(usr, sc.GetDB().C(models.Set.Database.UserTable))\n\tif err != nil {\n\t\treturn &HttpError{err, \"User not found.\", 500}\n\t}\n\n\tjsonUsr, err := json.Marshal(findUsr)\n\tif err != nil {\n\t\treturn &HttpError{err, \"User can not convert to json.\", 500}\n\t}\n\n\tw.Write(jsonUsr)\n\treturn nil\n}\n\n\/\/ getToken returns the token from the cookie,\n\/\/ if the cookie is not present in the token, then looking in\n\/\/ post the form if the token is not exist, then returned\n\/\/ an empty string and error code.\nfunc getToken(r *http.Request) (string, *HttpError) {\n\tjwtCookie, err := r.Cookie(\"jwt\")\n\tif err != nil {\n\t\tif err := r.ParseForm(); err != nil {\n\t\t\treturn \"\", &HttpError{err, \"Post form can not be parsed.\", 500}\n\t\t}\n\n\t\ttoken := r.PostForm.Get(\"jwt\")\n\t\treturn token, nil\n\t}\n\n\treturn jwtCookie.Value, nil\n}\n\n\/\/ checkLifeTime checks the token lifetime.\nfunc checkLifeTime(token *jwt.Token) bool {\n\tclaims := token.Claims.(jwt.MapClaims)\n\n\tlifeTime := claims[\"iat\"]\n\ttimeSpan := time.Now().Unix() - int64(lifeTime.(float64))\n\n\treturn timeSpan > (7 * 24 * 60 * 60)\n}\n<commit_msg>Change error info on more detail info.<commit_after>package controllers\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/dgrijalva\/jwt-go\"\n\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"gopkg.in\/mgo.v2\"\n\n\t\"github.com\/herald-it\/goncord\/models\"\n\t. \"github.com\/herald-it\/goncord\/utils\"\n\t\"github.com\/herald-it\/goncord\/utils\/querying\"\n)\n\ntype ServiceController struct {\n\tsession *mgo.Session\n}\n\nfunc (sc ServiceController) GetDB() *mgo.Database {\n\treturn sc.session.DB(models.Set.Database.DbName)\n}\n\nfunc NewServiceController(s *mgo.Session) *ServiceController {\n\treturn &ServiceController{s}\n}\n\n\/\/ Logout removes the current token from\n\/\/ the database. The next validation\n\/\/ the user is not authorized.\nfunc (sc ServiceController) Logout(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) *HttpError {\n\n\tcollect := sc.GetDB().C(models.Set.Database.TokenTable)\n\ttoken := models.DumpToken{}\n\n\ttokenTmp, httpErr := getToken(r)\n\tif httpErr != nil {\n\t\treturn httpErr\n\t}\n\ttoken.Token = tokenTmp\n\n\tif token.Token == \"\" {\n\t\treturn &HttpError{nil, \"Empty token value.\", 500}\n\t}\n\n\tif err := collect.Remove(token); err != nil {\n\t\treturn &HttpError{err, \"Delete token error.\", 500}\n\t}\n\n\treturn nil\n}\n\n\/\/ IsValid Check the token for validity.\n\/\/ The token can be a cookie or transferred\n\/\/ post the form. First we checked the cookies.\n\/\/ If the token is valid, the response will contain\n\/\/ user model in json format.\nfunc (sc ServiceController) IsValid(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tps httprouter.Params) *HttpError {\n\n\tcollect := sc.GetDB().C(models.Set.Database.TokenTable)\n\ttoken := &models.DumpToken{}\n\n\ttokenTmp, httpErr := getToken(r)\n\tif httpErr != nil {\n\t\treturn httpErr\n\t}\n\ttoken.Token = tokenTmp\n\n\tif token.Token == \"\" {\n\t\treturn &HttpError{nil, \"Empty token value.\", 500}\n\t}\n\n\tfindDumpToken, err := querying.FindDumpToken(token, collect)\n\tif err != nil || findDumpToken == nil {\n\t\treturn &HttpError{err, \"Token not found.\", 500}\n\t}\n\n\ttokenParse, err := jwt.Parse(findDumpToken.Token, nil)\n\tif checkLifeTime(tokenParse) {\n\t\tcollect.Remove(findDumpToken)\n\t\treturn &HttpError{nil, \"Time token life has expired.\", 500}\n\t}\n\n\tusr := new(models.User)\n\tusr.ID = findDumpToken.UserId\n\n\tfindUsr, err := querying.FindUserID(usr, sc.GetDB().C(models.Set.Database.UserTable))\n\tif err != nil {\n\t\treturn &HttpError{err, \"User not found.\", 500}\n\t}\n\n\tjsonUsr, err := json.Marshal(findUsr)\n\tif err != nil {\n\t\treturn &HttpError{err, \"User can not convert to json.\", 500}\n\t}\n\n\tw.Write(jsonUsr)\n\treturn nil\n}\n\n\/\/ getToken returns the token from the cookie,\n\/\/ if the cookie is not present in the token, then looking in\n\/\/ post the form if the token is not exist, then returned\n\/\/ an empty string and error code.\nfunc getToken(r *http.Request) (string, *HttpError) {\n\tjwtCookie, err := r.Cookie(\"jwt\")\n\tif err != nil {\n\t\tif err := r.ParseForm(); err != nil {\n\t\t\treturn \"\", &HttpError{err, \"Post form can not be parsed.\", 500}\n\t\t}\n\n\t\ttoken := r.PostForm.Get(\"jwt\")\n\t\treturn token, nil\n\t}\n\n\treturn jwtCookie.Value, nil\n}\n\n\/\/ checkLifeTime checks the token lifetime.\nfunc checkLifeTime(token *jwt.Token) bool {\n\tclaims := token.Claims.(jwt.MapClaims)\n\n\tlifeTime := claims[\"iat\"]\n\ttimeSpan := time.Now().Unix() - int64(lifeTime.(float64))\n\n\treturn timeSpan > (7 * 24 * 60 * 60)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Garbage collection benchmark: parse Go packages repeatedly.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/parser\"\n\t\"os\"\n\t\"path\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc isGoFile(dir *os.FileInfo) bool {\n\treturn dir.IsRegular() &&\n\t\t!strings.HasPrefix(dir.Name, \".\") && \/\/ ignore .files\n\t\tpath.Ext(dir.Name) == \".go\"\n}\n\nfunc isPkgFile(dir *os.FileInfo) bool {\n\treturn isGoFile(dir) &&\n\t\t!strings.HasSuffix(dir.Name, \"_test.go\") \/\/ ignore test files\n}\n\nfunc pkgName(filename string) string {\n\tfile, err := parser.ParseFile(filename, nil, nil, parser.PackageClauseOnly)\n\tif err != nil || file == nil {\n\t\treturn \"\"\n\t}\n\treturn file.Name.Name()\n}\n\nfunc parseDir(dirpath string) map[string]*ast.Package {\n\t\/\/ the package name is the directory name within its parent\n\t\/\/ (use dirname instead of path because dirname is clean; i.e. has no trailing '\/')\n\t_, pkgname := path.Split(dirpath)\n\n\t\/\/ filter function to select the desired .go files\n\tfilter := func(d *os.FileInfo) bool {\n\t\tif isPkgFile(d) {\n\t\t\t\/\/ Some directories contain main packages: Only accept\n\t\t\t\/\/ files that belong to the expected package so that\n\t\t\t\/\/ parser.ParsePackage doesn't return \"multiple packages\n\t\t\t\/\/ found\" errors.\n\t\t\t\/\/ Additionally, accept the special package name\n\t\t\t\/\/ fakePkgName if we are looking at cmd documentation.\n\t\t\tname := pkgName(dirpath + \"\/\" + d.Name)\n\t\t\treturn name == pkgname\n\t\t}\n\t\treturn false\n\t}\n\n\t\/\/ get package AST\n\tpkgs, err := parser.ParseDir(dirpath, filter, parser.ParseComments)\n\tif err != nil {\n\t\tprintln(\"parse\", dirpath, err.String())\n\t\tpanic(\"fail\")\n\t}\n\treturn pkgs\n}\n\nfunc main() {\n\tst := &runtime.MemStats\n\tn := flag.Int(\"n\", 4, \"iterations\")\n\tp := flag.Int(\"p\", len(packages), \"# of packages to keep in memory\")\n\tflag.BoolVar(&st.DebugGC, \"d\", st.DebugGC, \"print GC debugging info (pause times)\")\n\tflag.Parse()\n\n\tvar t0 int64\n\tpkgroot := runtime.GOROOT() + \"\/src\/pkg\/\"\n\tfor pass := 0; pass < 2; pass++ {\n\t\t\/\/ Once the heap is grown to full size, reset counters.\n\t\t\/\/ This hides the start-up pauses, which are much smaller\n\t\t\/\/ than the normal pauses and would otherwise make\n\t\t\/\/ the average look much better than it actually is.\n\t\tst.NumGC = 0\n\t\tst.PauseNs = 0\n\t\tt0 = time.Nanoseconds()\n\n\t\tfor i := 0; i < *n; i++ {\n\t\t\tparsed := make([]map[string]*ast.Package, *p)\n\t\t\tfor j := range parsed {\n\t\t\t\tparsed[j] = parseDir(pkgroot + packages[j%len(packages)])\n\t\t\t}\n\t\t}\n\t\truntime.GC()\n\t}\n\tt1 := time.Nanoseconds()\n\n\tfmt.Printf(\"Alloc=%d\/%d Heap=%d Mallocs=%d PauseTime=%.3f\/%d = %.3f\\n\",\n\t\tst.Alloc, st.TotalAlloc,\n\t\tst.Sys,\n\t\tst.Mallocs, float64(st.PauseNs)\/1e9,\n\t\tst.NumGC, float64(st.PauseNs)\/1e9\/float64(st.NumGC))\n\n\tfmt.Printf(\"%10s %10s %10s\\n\", \"size\", \"#alloc\", \"#free\")\n\tfor _, s := range st.BySize {\n\t\tfmt.Printf(\"%10d %10d %10d\\n\", s.Size, s.Mallocs, s.Frees)\n\t}\n\n\t\/\/ Standard gotest benchmark output, collected by build dashboard.\n\tfmt.Printf(\"garbage.BenchmarkParser %d %d ns\/op\\n\", *n, (t1-t0)\/int64(*n))\n\tfmt.Printf(\"garbage.BenchmarkParserPause %d %d ns\/op\\n\", st.NumGC, int64(st.PauseNs)\/int64(st.NumGC))\n}\n\n\nvar packages = []string{\n\t\"archive\/tar\",\n\t\"asn1\",\n\t\"big\",\n\t\"bufio\",\n\t\"bytes\",\n\t\"cmath\",\n\t\"compress\/flate\",\n\t\"compress\/gzip\",\n\t\"compress\/zlib\",\n\t\"container\/heap\",\n\t\"container\/list\",\n\t\"container\/ring\",\n\t\"container\/vector\",\n\t\"crypto\/aes\",\n\t\"crypto\/block\",\n\t\"crypto\/blowfish\",\n\t\"crypto\/hmac\",\n\t\"crypto\/md4\",\n\t\"crypto\/md5\",\n\t\"crypto\/rand\",\n\t\"crypto\/rc4\",\n\t\"crypto\/rsa\",\n\t\"crypto\/sha1\",\n\t\"crypto\/sha256\",\n\t\"crypto\/sha512\",\n\t\"crypto\/subtle\",\n\t\"crypto\/tls\",\n\t\"crypto\/x509\",\n\t\"crypto\/xtea\",\n\t\"debug\/dwarf\",\n\t\"debug\/macho\",\n\t\"debug\/elf\",\n\t\"debug\/gosym\",\n\t\"debug\/proc\",\n\t\"ebnf\",\n\t\"encoding\/ascii85\",\n\t\"encoding\/base64\",\n\t\"encoding\/binary\",\n\t\"encoding\/git85\",\n\t\"encoding\/hex\",\n\t\"encoding\/pem\",\n\t\"exec\",\n\t\"exp\/datafmt\",\n\t\"exp\/draw\",\n\t\"exp\/eval\",\n\t\"exp\/iterable\",\n\t\"expvar\",\n\t\"flag\",\n\t\"fmt\",\n\t\"go\/ast\",\n\t\"go\/doc\",\n\t\"go\/parser\",\n\t\"go\/printer\",\n\t\"go\/scanner\",\n\t\"go\/token\",\n\t\"gob\",\n\t\"hash\",\n\t\"hash\/adler32\",\n\t\"hash\/crc32\",\n\t\"hash\/crc64\",\n\t\"http\",\n\t\"image\",\n\t\"image\/jpeg\",\n\t\"image\/png\",\n\t\"io\",\n\t\"io\/ioutil\",\n\t\"json\",\n\t\"log\",\n\t\"math\",\n\t\"mime\",\n\t\"net\",\n\t\"nntp\",\n\t\"os\",\n\t\"os\/signal\",\n\t\"patch\",\n\t\"path\",\n\t\"rand\",\n\t\"reflect\",\n\t\"regexp\",\n\t\"rpc\",\n\t\"runtime\",\n\t\"scanner\",\n\t\"sort\",\n\t\"strconv\",\n\t\"strings\",\n\t\"sync\",\n\t\"syscall\",\n\t\"syslog\",\n\t\"tabwriter\",\n\t\"template\",\n\t\"testing\",\n\t\"testing\/iotest\",\n\t\"testing\/quick\",\n\t\"testing\/script\",\n\t\"time\",\n\t\"unicode\",\n\t\"utf8\",\n\t\"utf16\",\n\t\"websocket\",\n\t\"xml\",\n}\n<commit_msg>test\/garbage\/parser: sync with recent parser changes<commit_after>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Garbage collection benchmark: parse Go packages repeatedly.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/parser\"\n\t\"os\"\n\t\"path\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc isGoFile(dir *os.FileInfo) bool {\n\treturn dir.IsRegular() &&\n\t\t!strings.HasPrefix(dir.Name, \".\") && \/\/ ignore .files\n\t\tpath.Ext(dir.Name) == \".go\"\n}\n\nfunc isPkgFile(dir *os.FileInfo) bool {\n\treturn isGoFile(dir) &&\n\t\t!strings.HasSuffix(dir.Name, \"_test.go\") \/\/ ignore test files\n}\n\nfunc pkgName(filename string) string {\n\tfile, err := parser.ParseFile(filename, nil, parser.PackageClauseOnly)\n\tif err != nil || file == nil {\n\t\treturn \"\"\n\t}\n\treturn file.Name.Name\n}\n\nfunc parseDir(dirpath string) map[string]*ast.Package {\n\t\/\/ the package name is the directory name within its parent\n\t\/\/ (use dirname instead of path because dirname is clean; i.e. has no trailing '\/')\n\t_, pkgname := path.Split(dirpath)\n\n\t\/\/ filter function to select the desired .go files\n\tfilter := func(d *os.FileInfo) bool {\n\t\tif isPkgFile(d) {\n\t\t\t\/\/ Some directories contain main packages: Only accept\n\t\t\t\/\/ files that belong to the expected package so that\n\t\t\t\/\/ parser.ParsePackage doesn't return \"multiple packages\n\t\t\t\/\/ found\" errors.\n\t\t\t\/\/ Additionally, accept the special package name\n\t\t\t\/\/ fakePkgName if we are looking at cmd documentation.\n\t\t\tname := pkgName(dirpath + \"\/\" + d.Name)\n\t\t\treturn name == pkgname\n\t\t}\n\t\treturn false\n\t}\n\n\t\/\/ get package AST\n\tpkgs, err := parser.ParseDir(dirpath, filter, parser.ParseComments)\n\tif err != nil {\n\t\tprintln(\"parse\", dirpath, err.String())\n\t\tpanic(\"fail\")\n\t}\n\treturn pkgs\n}\n\nfunc main() {\n\tst := &runtime.MemStats\n\tn := flag.Int(\"n\", 4, \"iterations\")\n\tp := flag.Int(\"p\", len(packages), \"# of packages to keep in memory\")\n\tflag.BoolVar(&st.DebugGC, \"d\", st.DebugGC, \"print GC debugging info (pause times)\")\n\tflag.Parse()\n\n\tvar t0 int64\n\tpkgroot := runtime.GOROOT() + \"\/src\/pkg\/\"\n\tfor pass := 0; pass < 2; pass++ {\n\t\t\/\/ Once the heap is grown to full size, reset counters.\n\t\t\/\/ This hides the start-up pauses, which are much smaller\n\t\t\/\/ than the normal pauses and would otherwise make\n\t\t\/\/ the average look much better than it actually is.\n\t\tst.NumGC = 0\n\t\tst.PauseNs = 0\n\t\tt0 = time.Nanoseconds()\n\n\t\tfor i := 0; i < *n; i++ {\n\t\t\tparsed := make([]map[string]*ast.Package, *p)\n\t\t\tfor j := range parsed {\n\t\t\t\tparsed[j] = parseDir(pkgroot + packages[j%len(packages)])\n\t\t\t}\n\t\t}\n\t\truntime.GC()\n\t}\n\tt1 := time.Nanoseconds()\n\n\tfmt.Printf(\"Alloc=%d\/%d Heap=%d Mallocs=%d PauseTime=%.3f\/%d = %.3f\\n\",\n\t\tst.Alloc, st.TotalAlloc,\n\t\tst.Sys,\n\t\tst.Mallocs, float64(st.PauseNs)\/1e9,\n\t\tst.NumGC, float64(st.PauseNs)\/1e9\/float64(st.NumGC))\n\n\tfmt.Printf(\"%10s %10s %10s\\n\", \"size\", \"#alloc\", \"#free\")\n\tfor _, s := range st.BySize {\n\t\tfmt.Printf(\"%10d %10d %10d\\n\", s.Size, s.Mallocs, s.Frees)\n\t}\n\n\t\/\/ Standard gotest benchmark output, collected by build dashboard.\n\tfmt.Printf(\"garbage.BenchmarkParser %d %d ns\/op\\n\", *n, (t1-t0)\/int64(*n))\n\tfmt.Printf(\"garbage.BenchmarkParserPause %d %d ns\/op\\n\", st.NumGC, int64(st.PauseNs)\/int64(st.NumGC))\n}\n\n\nvar packages = []string{\n\t\"archive\/tar\",\n\t\"asn1\",\n\t\"big\",\n\t\"bufio\",\n\t\"bytes\",\n\t\"cmath\",\n\t\"compress\/flate\",\n\t\"compress\/gzip\",\n\t\"compress\/zlib\",\n\t\"container\/heap\",\n\t\"container\/list\",\n\t\"container\/ring\",\n\t\"container\/vector\",\n\t\"crypto\/aes\",\n\t\"crypto\/block\",\n\t\"crypto\/blowfish\",\n\t\"crypto\/hmac\",\n\t\"crypto\/md4\",\n\t\"crypto\/md5\",\n\t\"crypto\/rand\",\n\t\"crypto\/rc4\",\n\t\"crypto\/rsa\",\n\t\"crypto\/sha1\",\n\t\"crypto\/sha256\",\n\t\"crypto\/sha512\",\n\t\"crypto\/subtle\",\n\t\"crypto\/tls\",\n\t\"crypto\/x509\",\n\t\"crypto\/xtea\",\n\t\"debug\/dwarf\",\n\t\"debug\/macho\",\n\t\"debug\/elf\",\n\t\"debug\/gosym\",\n\t\"debug\/proc\",\n\t\"ebnf\",\n\t\"encoding\/ascii85\",\n\t\"encoding\/base64\",\n\t\"encoding\/binary\",\n\t\"encoding\/git85\",\n\t\"encoding\/hex\",\n\t\"encoding\/pem\",\n\t\"exec\",\n\t\"exp\/datafmt\",\n\t\"exp\/draw\",\n\t\"exp\/eval\",\n\t\"exp\/iterable\",\n\t\"expvar\",\n\t\"flag\",\n\t\"fmt\",\n\t\"go\/ast\",\n\t\"go\/doc\",\n\t\"go\/parser\",\n\t\"go\/printer\",\n\t\"go\/scanner\",\n\t\"go\/token\",\n\t\"gob\",\n\t\"hash\",\n\t\"hash\/adler32\",\n\t\"hash\/crc32\",\n\t\"hash\/crc64\",\n\t\"http\",\n\t\"image\",\n\t\"image\/jpeg\",\n\t\"image\/png\",\n\t\"io\",\n\t\"io\/ioutil\",\n\t\"json\",\n\t\"log\",\n\t\"math\",\n\t\"mime\",\n\t\"net\",\n\t\"nntp\",\n\t\"os\",\n\t\"os\/signal\",\n\t\"patch\",\n\t\"path\",\n\t\"rand\",\n\t\"reflect\",\n\t\"regexp\",\n\t\"rpc\",\n\t\"runtime\",\n\t\"scanner\",\n\t\"sort\",\n\t\"strconv\",\n\t\"strings\",\n\t\"sync\",\n\t\"syscall\",\n\t\"syslog\",\n\t\"tabwriter\",\n\t\"template\",\n\t\"testing\",\n\t\"testing\/iotest\",\n\t\"testing\/quick\",\n\t\"testing\/script\",\n\t\"time\",\n\t\"unicode\",\n\t\"utf8\",\n\t\"utf16\",\n\t\"websocket\",\n\t\"xml\",\n}\n<|endoftext|>"} {"text":"<commit_before>package dashboard_graph\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\n\th \"github.com\/Cepave\/open-falcon-backend\/modules\/f2e-api\/app\/helper\"\n\tm \"github.com\/Cepave\/open-falcon-backend\/modules\/f2e-api\/app\/model\/dashboard\"\n\t\"github.com\/gin-gonic\/gin\"\n)\n\ntype APIGraphCreateReqDataWithNewScreenInputs struct {\n\tScreenName string `json:\"screen_name\" form:\"screen_name\" binding:\"required\"`\n\tTitle string `json:\"title\" form:\"title\" binding:\"required\"`\n\tEndpoints []string `json:\"endpoints\" form:\"endpoints\" binding:\"required\"`\n\tCounters []string `json:\"counters\" form:\"counters\" binding:\"required\"`\n\tTimeSpan int64 `json:\"timespan\" form:\"timespan\"`\n\tGraphType string `json:\"graph_type\" form:\"graph_type\" binding:\"required\"`\n\tMethod string `json:\"method\" form:\"method\"`\n\tPosition int64 `json:\"position\" form:\"position\"`\n\tFalconTags string `json:\"falcon_tags\" form:\"falcon_tags\"`\n}\n\nfunc (mine APIGraphCreateReqDataWithNewScreenInputs) Check() (err error) {\n\tsc := m.DashboardScreen{Name: mine.ScreenName}\n\t\/\/ check screen_id\n\tif sc.ExistName() {\n\t\terr = fmt.Errorf(\"screen name:%v already existing\", mine.ScreenName)\n\t\treturn\n\t}\n\n\tif mine.TimeSpan%60 != 0 {\n\t\terr = fmt.Errorf(\"value of timespan is not vaild: %v\", mine.TimeSpan)\n\t\treturn\n\t}\n\n\tif mine.GraphType != \"h\" && mine.GraphType != \"k\" && mine.GraphType != \"a\" {\n\t\terr = fmt.Errorf(\"value of graph_type only accpet 'k' or 'h' or 'a', you typed: %v\", mine.GraphType)\n\t\treturn\n\t}\n\treturn\n}\n\nfunc GraphCreateReqDataWithNewScreen(c *gin.Context) {\n\tinputs := APIGraphCreateReqDataWithNewScreenInputs{}\n\t\/\/ set default value\n\tinputs.TimeSpan = 3600\n\tinputs.GraphType = \"h\"\n\tif err := c.Bind(&inputs); err != nil {\n\t\th.JSONR(c, badstatus, err)\n\t\treturn\n\t}\n\n\tif err := inputs.Check(); err != nil {\n\t\th.JSONR(c, badstatus, err)\n\t\treturn\n\t}\n\n\tdt := db.Dashboard.Begin()\n\tsc := m.DashboardScreen{Name: inputs.ScreenName}\n\tdt = dt.Save(&sc)\n\tif dt.Error != nil {\n\t\th.JSONR(c, badstatus, dt.Error)\n\t\tdt.Rollback()\n\t\treturn\n\t}\n\n\tes := inputs.Endpoints\n\tcs := inputs.Counters\n\tsort.Strings(es)\n\tsort.Strings(cs)\n\tesString := strings.Join(es, TMP_GRAPH_FILED_DELIMITER)\n\tcsString := strings.Join(cs, TMP_GRAPH_FILED_DELIMITER)\n\tuser, _ := h.GetUser(c)\n\n\td := m.DashboardGraph{\n\t\tTitle: inputs.Title,\n\t\tHosts: esString,\n\t\tCounters: csString,\n\t\tScreenId: sc.ID,\n\t\tTimeSpan: inputs.TimeSpan,\n\t\tGraphType: inputs.GraphType,\n\t\tMethod: inputs.Method,\n\t\tPosition: inputs.Position,\n\t\tCreator: user.Name,\n\t}\n\tdt = dt.Save(&d)\n\tif dt.Error != nil {\n\t\tdt.Rollback()\n\t\th.JSONR(c, badstatus, dt.Error)\n\t\treturn\n\t}\n\n\tvar lid []int\n\tdt = dt.Table(d.TableName()).Raw(\"select LAST_INSERT_ID() as id\").Pluck(\"id\", &lid)\n\tif dt.Error != nil {\n\t\tdt.Rollback()\n\t\th.JSONR(c, badstatus, dt.Error)\n\t\treturn\n\t}\n\tdt.Commit()\n\taid := lid[0]\n\n\th.JSONR(c, map[string]interface{}{\"id\": aid, \"screen_id\": d.ScreenId})\n\n}\n<commit_msg>add screen_name response for create_graph_screen api<commit_after>package dashboard_graph\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\n\th \"github.com\/Cepave\/open-falcon-backend\/modules\/f2e-api\/app\/helper\"\n\tm \"github.com\/Cepave\/open-falcon-backend\/modules\/f2e-api\/app\/model\/dashboard\"\n\t\"github.com\/gin-gonic\/gin\"\n)\n\ntype APIGraphCreateReqDataWithNewScreenInputs struct {\n\tScreenName string `json:\"screen_name\" form:\"screen_name\" binding:\"required\"`\n\tTitle string `json:\"title\" form:\"title\" binding:\"required\"`\n\tEndpoints []string `json:\"endpoints\" form:\"endpoints\" binding:\"required\"`\n\tCounters []string `json:\"counters\" form:\"counters\" binding:\"required\"`\n\tTimeSpan int64 `json:\"timespan\" form:\"timespan\"`\n\tGraphType string `json:\"graph_type\" form:\"graph_type\" binding:\"required\"`\n\tMethod string `json:\"method\" form:\"method\"`\n\tPosition int64 `json:\"position\" form:\"position\"`\n\tFalconTags string `json:\"falcon_tags\" form:\"falcon_tags\"`\n}\n\nfunc (mine APIGraphCreateReqDataWithNewScreenInputs) Check() (err error) {\n\tsc := m.DashboardScreen{Name: mine.ScreenName}\n\t\/\/ check screen_id\n\tif sc.ExistName() {\n\t\terr = fmt.Errorf(\"screen name:%v already existing\", mine.ScreenName)\n\t\treturn\n\t}\n\n\tif mine.TimeSpan%60 != 0 {\n\t\terr = fmt.Errorf(\"value of timespan is not vaild: %v\", mine.TimeSpan)\n\t\treturn\n\t}\n\n\tif mine.GraphType != \"h\" && mine.GraphType != \"k\" && mine.GraphType != \"a\" {\n\t\terr = fmt.Errorf(\"value of graph_type only accpet 'k' or 'h' or 'a', you typed: %v\", mine.GraphType)\n\t\treturn\n\t}\n\treturn\n}\n\nfunc GraphCreateReqDataWithNewScreen(c *gin.Context) {\n\tinputs := APIGraphCreateReqDataWithNewScreenInputs{}\n\t\/\/ set default value\n\tinputs.TimeSpan = 3600\n\tinputs.GraphType = \"h\"\n\tif err := c.Bind(&inputs); err != nil {\n\t\th.JSONR(c, badstatus, err)\n\t\treturn\n\t}\n\n\tif err := inputs.Check(); err != nil {\n\t\th.JSONR(c, badstatus, err)\n\t\treturn\n\t}\n\n\tdt := db.Dashboard.Begin()\n\tsc := m.DashboardScreen{Name: inputs.ScreenName}\n\tdt = dt.Save(&sc)\n\tif dt.Error != nil {\n\t\th.JSONR(c, badstatus, dt.Error)\n\t\tdt.Rollback()\n\t\treturn\n\t}\n\n\tes := inputs.Endpoints\n\tcs := inputs.Counters\n\tsort.Strings(es)\n\tsort.Strings(cs)\n\tesString := strings.Join(es, TMP_GRAPH_FILED_DELIMITER)\n\tcsString := strings.Join(cs, TMP_GRAPH_FILED_DELIMITER)\n\tuser, _ := h.GetUser(c)\n\n\td := m.DashboardGraph{\n\t\tTitle: inputs.Title,\n\t\tHosts: esString,\n\t\tCounters: csString,\n\t\tScreenId: sc.ID,\n\t\tTimeSpan: inputs.TimeSpan,\n\t\tGraphType: inputs.GraphType,\n\t\tMethod: inputs.Method,\n\t\tPosition: inputs.Position,\n\t\tCreator: user.Name,\n\t}\n\tdt = dt.Save(&d)\n\tif dt.Error != nil {\n\t\tdt.Rollback()\n\t\th.JSONR(c, badstatus, dt.Error)\n\t\treturn\n\t}\n\n\tvar lid []int\n\tdt = dt.Table(d.TableName()).Raw(\"select LAST_INSERT_ID() as id\").Pluck(\"id\", &lid)\n\tif dt.Error != nil {\n\t\tdt.Rollback()\n\t\th.JSONR(c, badstatus, dt.Error)\n\t\treturn\n\t}\n\tdt.Commit()\n\taid := lid[0]\n\n\th.JSONR(c, map[string]interface{}{\"id\": aid, \"screen_id\": d.ScreenId, \"screen_name\": inputs.ScreenName})\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/Dieterbe\/statsd-go\"\n\t\"github.com\/vimeo\/statsdaemon\"\n\t\"github.com\/vimeo\/statsdaemon\/timers\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\/pprof\"\n\t\"strings\"\n\t\"time\"\n \"runtime\"\n)\n\nvar cpuprofile = flag.String(\"cpuprofile\", \"\", \"write cpu profile to file\")\nvar memprofile = flag.String(\"memprofile\", \"\", \"write memory profile to this file\")\n\nfunc main() {\n runtime.GOMAXPROCS(4)\n\tflag.Parse()\n\tif *cpuprofile != \"\" {\n\t\tf, err := os.Create(*cpuprofile)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tpprof.StartCPUProfile(f)\n\t\tdefer pprof.StopCPUProfile()\n\t}\n\tif *memprofile != \"\" {\n\t\tf, err := os.Create(*memprofile)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tdefer f.Close()\n\t\tdefer pprof.WriteHeapProfile(f)\n\t}\n\n\tcl, clerr := statsd.NewClient(true, \"localhost:8125\", \"statsd-tester\")\n\tladdr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:2003\")\n\tif nil != clerr {\n\t\tpanic(clerr)\n\t}\n\tif nil != err {\n\t\tpanic(err)\n\t}\n\tw := NewWatcher(laddr)\n\tgo w.Run()\n\tpct := timers.Percentiles{}\n\tdaemon := statsdaemon.New(\"test\", \":8125\", \":8126\", \":2003\", \"rates.\", \"timers.\", \"gauges.\", pct, 10, 1000, 1000, nil, false)\n\ttick := time.Tick(time.Duration(1) * time.Second)\n\tgo func() {\n\t\tfor range tick {\n\t\t\t\/\/ send 1M packets per second in theory. in practice this takes more than a second\n msg := []byte(\"test.counter:1|c\")\n\t\t\tfor i := 0; i < 1000000; i++ {\n\t\t\t\t\/\/cl.Increment(\"test-counter\")\n cl.SendRaw(msg)\n\t\t\t}\n\t\t}\n\t}()\n\n\tdaemon.Run()\n}\n\ntype watcher struct {\n\tl *net.TCPListener\n\tseen int\n\tvalues chan string\n}\n\nfunc NewWatcher(laddr *net.TCPAddr) *watcher {\n\tl, err := net.ListenTCP(\"tcp\", laddr)\n\tif nil != err {\n\t\tpanic(err)\n\t}\n\treturn &watcher{\n\t\tl,\n\t\t0,\n\t\tmake(chan string),\n\t}\n}\n\nfunc (w *watcher) Run() {\n\tgo w.accept()\n\tcounter_per_s_key := \"service_is_statsdaemon.instance_is_test.direction_is_in.statsd_type_is_counter.target_type_is_rate.unit_is_Metricps\"\n\tfor {\n\t\tselect {\n\t\tcase str := <-w.values:\n\t\t\tif strings.HasPrefix(str, counter_per_s_key) {\n\t\t\t\tvals := strings.Fields(str)\n\t\t\t\tfmt.Println(\"counters received per second:\", vals[1])\n\t\t\t\tw.seen += 1\n\t\t\t\tif w.seen == 10 {\n\t\t\t\t\tos.Exit(0)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (w *watcher) accept() {\n\tfor {\n\t\tc, err := w.l.AcceptTCP()\n\t\tif nil != err {\n\t\t\tpanic(err)\n\t\t}\n\t\tgo w.handle(c)\n\t}\n}\nfunc (w *watcher) handle(c *net.TCPConn) {\n\tdefer c.Close()\n\tr := bufio.NewReaderSize(c, 4096)\n\tfor {\n\t\tbuf, _, err := r.ReadLine()\n\t\tif nil != err {\n\t\t\tif io.EOF != err {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tstr := string(buf)\n\t\tw.values <- str\n\t}\n}\n<commit_msg>gofmt<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/Dieterbe\/statsd-go\"\n\t\"github.com\/vimeo\/statsdaemon\"\n\t\"github.com\/vimeo\/statsdaemon\/timers\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar cpuprofile = flag.String(\"cpuprofile\", \"\", \"write cpu profile to file\")\nvar memprofile = flag.String(\"memprofile\", \"\", \"write memory profile to this file\")\n\nfunc main() {\n\truntime.GOMAXPROCS(4)\n\tflag.Parse()\n\tif *cpuprofile != \"\" {\n\t\tf, err := os.Create(*cpuprofile)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tpprof.StartCPUProfile(f)\n\t\tdefer pprof.StopCPUProfile()\n\t}\n\tif *memprofile != \"\" {\n\t\tf, err := os.Create(*memprofile)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tdefer f.Close()\n\t\tdefer pprof.WriteHeapProfile(f)\n\t}\n\n\tcl, clerr := statsd.NewClient(true, \"localhost:8125\", \"statsd-tester\")\n\tladdr, err := net.ResolveTCPAddr(\"tcp\", \"localhost:2003\")\n\tif nil != clerr {\n\t\tpanic(clerr)\n\t}\n\tif nil != err {\n\t\tpanic(err)\n\t}\n\tw := NewWatcher(laddr)\n\tgo w.Run()\n\tpct := timers.Percentiles{}\n\tdaemon := statsdaemon.New(\"test\", \":8125\", \":8126\", \":2003\", \"rates.\", \"timers.\", \"gauges.\", pct, 10, 1000, 1000, nil, false)\n\ttick := time.Tick(time.Duration(1) * time.Second)\n\tgo func() {\n\t\tfor range tick {\n\t\t\t\/\/ send 1M packets per second in theory. in practice this takes more than a second\n\t\t\tmsg := []byte(\"test.counter:1|c\")\n\t\t\tfor i := 0; i < 1000000; i++ {\n\t\t\t\t\/\/cl.Increment(\"test-counter\")\n\t\t\t\tcl.SendRaw(msg)\n\t\t\t}\n\t\t}\n\t}()\n\n\tdaemon.Run()\n}\n\ntype watcher struct {\n\tl *net.TCPListener\n\tseen int\n\tvalues chan string\n}\n\nfunc NewWatcher(laddr *net.TCPAddr) *watcher {\n\tl, err := net.ListenTCP(\"tcp\", laddr)\n\tif nil != err {\n\t\tpanic(err)\n\t}\n\treturn &watcher{\n\t\tl,\n\t\t0,\n\t\tmake(chan string),\n\t}\n}\n\nfunc (w *watcher) Run() {\n\tgo w.accept()\n\tcounter_per_s_key := \"service_is_statsdaemon.instance_is_test.direction_is_in.statsd_type_is_counter.target_type_is_rate.unit_is_Metricps\"\n\tfor {\n\t\tselect {\n\t\tcase str := <-w.values:\n\t\t\tif strings.HasPrefix(str, counter_per_s_key) {\n\t\t\t\tvals := strings.Fields(str)\n\t\t\t\tfmt.Println(\"counters received per second:\", vals[1])\n\t\t\t\tw.seen += 1\n\t\t\t\tif w.seen == 10 {\n\t\t\t\t\tos.Exit(0)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (w *watcher) accept() {\n\tfor {\n\t\tc, err := w.l.AcceptTCP()\n\t\tif nil != err {\n\t\t\tpanic(err)\n\t\t}\n\t\tgo w.handle(c)\n\t}\n}\nfunc (w *watcher) handle(c *net.TCPConn) {\n\tdefer c.Close()\n\tr := bufio.NewReaderSize(c, 4096)\n\tfor {\n\t\tbuf, _, err := r.ReadLine()\n\t\tif nil != err {\n\t\t\tif io.EOF != err {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tstr := string(buf)\n\t\tw.values <- str\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"container\/heap\"\n\t\"context\"\n\t\"time\"\n\n\t\"github.com\/dshearer\/jobber\/common\"\n\t\"github.com\/dshearer\/jobber\/jobfile\"\n)\n\nfunc nextRunTime(job *jobfile.Job, now time.Time) *time.Time {\n\t\/*\n\t * We test every second from now till 2 years from now,\n\t * looking for a time that satisfies the job's schedule\n\t * criteria.\n\t *\/\n\n\tvar year time.Duration = time.Hour * 24 * 365\n\tvar max time.Time = now.Add(2 * year)\n\tfor next := now; next.Before(max); next = next.Add(time.Second) {\n\t\tif job.FullTimeSpec.Satisfied(next) {\n\t\t\treturn &next\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/*\n * jobQueueImpl is a priority queue containing Jobs that sorts\n * them by next run time.\n *\/\ntype jobQueueImpl []*jobfile.Job \/\/ implements heap.Interface\n\nfunc (q jobQueueImpl) Len() int {\n\treturn len(q)\n}\n\nfunc (q jobQueueImpl) Less(i, j int) bool {\n\treturn q[i].NextRunTime.Before(*q[j].NextRunTime)\n}\n\nfunc (q jobQueueImpl) Swap(i, j int) {\n\tq[i], q[j] = q[j], q[i]\n}\n\nfunc (q *jobQueueImpl) Push(x interface{}) {\n\t*q = append(*q, x.(*jobfile.Job))\n}\n\nfunc (q *jobQueueImpl) Pop() interface{} {\n\tn := len(*q)\n\tif n == 0 {\n\t\treturn nil\n\t} else {\n\t\titem := (*q)[n-1]\n\t\t*q = (*q)[0 : n-1]\n\t\treturn item\n\t}\n}\n\n\/*\n * A priority queue containing jobs. It's a public\n * wrapper for an instance of jobQueueImpl.\n *\/\ntype JobQueue struct {\n\tq jobQueueImpl\n}\n\nfunc (jq *JobQueue) SetJobs(now time.Time, jobs []*jobfile.Job) {\n\tjq.q = make(jobQueueImpl, 0)\n\theap.Init(&jq.q)\n\n\tfor i := 0; i < len(jobs); i++ {\n\t\tvar job *jobfile.Job = jobs[i]\n\t\tjob.NextRunTime = nextRunTime(job, now)\n\t\tif job.NextRunTime != nil {\n\t\t\theap.Push(&jq.q, job)\n\t\t}\n\t}\n}\n\nfunc (jq *JobQueue) Empty() bool {\n\treturn jq.q.Len() == 0\n}\n\n\/*!\n * Get the next job to run, after sleeping until the time it's supposed\n * to run.\n *\n * @return The next job to run, or nil if the context has been canceled.\n *\/\nfunc (jq *JobQueue) Pop(ctx context.Context, now time.Time) *jobfile.Job {\n\tif jq.Empty() {\n\t\t\/\/ just wait till the context has been canceled\n\t\tcommon.Logger.Println(\"Queue: waiting...\")\n\t\t<-ctx.Done()\n\t\tcommon.Logger.Println(\"Queue: done\")\n\t\treturn nil\n\n\t} else {\n\t\t\/\/ get next-scheduled job\n\t\tjob := heap.Pop(&jq.q).(*jobfile.Job)\n\n\t\tvar timeFmt = \"Jan _2 15:04:05\"\n\n\t\tcommon.Logger.Printf(\"Next job to run is %v, at %v.\", job.Name,\n\t\t\tjob.NextRunTime.Format(timeFmt))\n\n\t\t\/\/ sleep till it's time to run it\n\t\tfor now.Before(*job.NextRunTime) {\n\t\t\tsleepDur := job.NextRunTime.Sub(now)\n\n\t\t\tcommon.Logger.Printf(\"It is now %v.\", now.Format(timeFmt))\n\t\t\tcommon.Logger.Printf(\"Sleeping for %v.\", sleepDur)\n\n\t\t\tafterChan := time.After(sleepDur)\n\t\t\tselect {\n\t\t\tcase now = <-afterChan:\n\t\t\tcase <-ctx.Done():\n\t\t\t\t\/\/ abort!\n\t\t\t\theap.Push(&jq.q, job)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\tcommon.Logger.Printf(\"It is now %v, which is NOT before %v\",\n\t\t\tnow.Format(timeFmt), job.NextRunTime.Format(timeFmt))\n\n\t\t\/\/ schedule this job's next run\n\t\tjob.NextRunTime = nextRunTime(job, now.Add(time.Second))\n\t\tif job.NextRunTime != nil {\n\t\t\theap.Push(&jq.q, job)\n\t\t}\n\n\t\t\/\/ decide whether we really should run this job\n\t\tif job.ShouldRun() {\n\t\t\tcommon.Logger.Printf(\"Running %v\", job.Name)\n\t\t\treturn job\n\t\t} else {\n\t\t\t\/\/ skip this job\n\t\t\tcommon.Logger.Printf(\"Skipping %v\", job.Name)\n\t\t\treturn jq.Pop(ctx, now)\n\t\t}\n\t}\n}\n<commit_msg>(#192) Bugfix in queue.go: Job execed multiple times<commit_after>package main\n\nimport (\n\t\"container\/heap\"\n\t\"context\"\n\t\"time\"\n\n\t\"github.com\/dshearer\/jobber\/common\"\n\t\"github.com\/dshearer\/jobber\/jobfile\"\n)\n\nfunc nextRunTime(job *jobfile.Job, now time.Time) *time.Time {\n\t\/*\n\t * We test every second from now till 2 years from now,\n\t * looking for a time that satisfies the job's schedule\n\t * criteria.\n\t *\/\n\n\tvar year time.Duration = time.Hour * 24 * 365\n\tvar max time.Time = now.Add(2 * year)\n\tfor next := now; next.Before(max); next = next.Add(time.Second) {\n\t\tif job.FullTimeSpec.Satisfied(next) {\n\t\t\treturn &next\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/*\n * jobQueueImpl is a priority queue containing Jobs that sorts\n * them by next run time.\n *\/\ntype jobQueueImpl []*jobfile.Job \/\/ implements heap.Interface\n\nfunc (q jobQueueImpl) Len() int {\n\treturn len(q)\n}\n\nfunc (q jobQueueImpl) Less(i, j int) bool {\n\treturn q[i].NextRunTime.Before(*q[j].NextRunTime)\n}\n\nfunc (q jobQueueImpl) Swap(i, j int) {\n\tq[i], q[j] = q[j], q[i]\n}\n\nfunc (q *jobQueueImpl) Push(x interface{}) {\n\t*q = append(*q, x.(*jobfile.Job))\n}\n\nfunc (q *jobQueueImpl) Pop() interface{} {\n\tn := len(*q)\n\tif n == 0 {\n\t\treturn nil\n\t} else {\n\t\titem := (*q)[n-1]\n\t\t*q = (*q)[0 : n-1]\n\t\treturn item\n\t}\n}\n\n\/*\n * A priority queue containing jobs. It's a public\n * wrapper for an instance of jobQueueImpl.\n *\/\ntype JobQueue struct {\n\tq jobQueueImpl\n}\n\nfunc (jq *JobQueue) SetJobs(now time.Time, jobs []*jobfile.Job) {\n\tjq.q = make(jobQueueImpl, 0)\n\theap.Init(&jq.q)\n\n\tfor i := 0; i < len(jobs); i++ {\n\t\tvar job *jobfile.Job = jobs[i]\n\t\tjob.NextRunTime = nextRunTime(job, now)\n\t\tif job.NextRunTime != nil {\n\t\t\theap.Push(&jq.q, job)\n\t\t}\n\t}\n}\n\nfunc (jq *JobQueue) Empty() bool {\n\treturn jq.q.Len() == 0\n}\n\n\/*!\n * Get the next job to run, after sleeping until the time it's supposed\n * to run.\n *\n * @return The next job to run, or nil if the context has been canceled.\n *\/\nfunc (jq *JobQueue) Pop(ctx context.Context, now time.Time) *jobfile.Job {\n\tif jq.Empty() {\n\t\t\/\/ just wait till the context has been canceled\n\t\tcommon.Logger.Println(\"Queue: waiting...\")\n\t\t<-ctx.Done()\n\t\tcommon.Logger.Println(\"Queue: done\")\n\t\treturn nil\n\n\t} else {\n\t\t\/\/ get next-scheduled job\n\t\tjob := heap.Pop(&jq.q).(*jobfile.Job)\n\n\t\tvar timeFmt = \"Jan _2 15:04:05\"\n\n\t\tcommon.Logger.Printf(\"Next job to run is %v, at %v.\", job.Name,\n\t\t\tjob.NextRunTime.Format(timeFmt))\n\n\t\t\/*\n\t\t\tGolang has a bug in its time package. We must avoid using most of the\n\t\t\tTime methods. Cf. https:\/\/github.com\/golang\/go\/issues\/27090\n\t\t*\/\n\n\t\t\/\/ sleep till it's time to run it\n\t\tfor now.UnixNano() < job.NextRunTime.UnixNano() {\n\t\t\tnanoDiff := job.NextRunTime.UnixNano() - now.UnixNano()\n\t\t\tsleepDur := time.Duration(nanoDiff) * time.Nanosecond\n\n\t\t\tcommon.Logger.Printf(\"It is now %v.\", now.Format(timeFmt))\n\t\t\tcommon.Logger.Printf(\"Sleeping for %v.\", sleepDur)\n\n\t\t\tafterChan := time.After(sleepDur)\n\t\t\tselect {\n\t\t\tcase now = <-afterChan:\n\t\t\tcase <-ctx.Done():\n\t\t\t\t\/\/ abort!\n\t\t\t\theap.Push(&jq.q, job)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\n\t\tcommon.Logger.Printf(\"It is now %v, which is NOT before %v\",\n\t\t\tnow.Format(timeFmt), job.NextRunTime.Format(timeFmt))\n\n\t\t\/\/ schedule this job's next run\n\t\tjob.NextRunTime = nextRunTime(job, now.Add(time.Second))\n\t\tif job.NextRunTime != nil {\n\t\t\theap.Push(&jq.q, job)\n\t\t}\n\n\t\t\/\/ decide whether we really should run this job\n\t\tif job.ShouldRun() {\n\t\t\tcommon.Logger.Printf(\"Running %v\", job.Name)\n\t\t\treturn job\n\t\t} else {\n\t\t\t\/\/ skip this job\n\t\t\tcommon.Logger.Printf(\"Skipping %v\", job.Name)\n\t\t\treturn jq.Pop(ctx, now)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n)\n\ntype testCase struct {\n\tXMLName xml.Name `xml:\"testcase\"`\n\tClassName string `xml:\"classname,attr\"`\n\tName string `xml:\"name,attr\"`\n\tTime float64 `xml:\"time,attr\"`\n\tFailure string `xml:\"failure,omitempty\"`\n}\n\ntype TestSuite struct {\n\tXMLName xml.Name `xml:\"testsuite\"`\n\tFailures int `xml:\"failures,attr\"`\n\tTests int `xml:\"tests,attr\"`\n\tTime float64 `xml:\"time,attr\"`\n\tCases []testCase\n}\n\n\/\/ append(errs, err) if err != nil\nfunc appendError(errs []error, err error) []error {\n\tif err != nil {\n\t\treturn append(errs, err)\n\t}\n\treturn errs\n}\n\nfunc writeXML(dump string, start time.Time) {\n\tsuite.Time = time.Since(start).Seconds()\n\tout, err := xml.MarshalIndent(&suite, \"\", \" \")\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not marshal XML: %s\", err)\n\t}\n\tpath := filepath.Join(dump, \"junit_runner.xml\")\n\tf, err := os.Create(path)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not create file: %s\", err)\n\t}\n\tdefer f.Close()\n\tif _, err := f.WriteString(xml.Header); err != nil {\n\t\tlog.Fatalf(\"Error writing XML header: %s\", err)\n\t}\n\tif _, err := f.Write(out); err != nil {\n\t\tlog.Fatalf(\"Error writing XML data: %s\", err)\n\t}\n\tlog.Printf(\"Saved XML output to %s.\", path)\n}\n\n\/\/ return f(), adding junit xml testcase result for name\nfunc xmlWrap(name string, f func() error) error {\n\tstart := time.Now()\n\terr := f()\n\tduration := time.Since(start)\n\tc := testCase{\n\t\tName: name,\n\t\tClassName: \"e2e.go\",\n\t\tTime: duration.Seconds(),\n\t}\n\tif err != nil {\n\t\tc.Failure = err.Error()\n\t\tsuite.Failures++\n\t}\n\tsuite.Cases = append(suite.Cases, c)\n\tsuite.Tests++\n\treturn err\n}\n\nvar (\n\tinterruptTimeout = time.Duration(10 * time.Minute)\n\tterminateTimeout = time.Duration(5 * time.Minute) \/\/ terminate 5 minutes after SIGINT is sent.\n\n\tinterrupt = time.NewTimer(interruptTimeout) \/\/ interrupt testing at this time.\n\tterminate = time.NewTimer(time.Duration(0)) \/\/ terminate testing at this time.\n\n\tsuite TestSuite\n)\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n}\n\nvar letterRunes = []rune(\"abcdefghijklmnopqrstuvwxyz\")\n\n\/\/ return cmd.Output(), potentially timing out in the process.\nfunc output(cmd *exec.Cmd) ([]byte, error) {\n\tinterrupt.Reset(interruptTimeout)\n\tstepName := strings.Join(cmd.Args, \" \")\n\tcmd.Stderr = os.Stderr\n\n\tlog.Printf(\"Running: %v\", stepName)\n\tdefer func(start time.Time) {\n\t\tlog.Printf(\"Step '%s' finished in %s\", stepName, time.Since(start))\n\t}(time.Now())\n\n\tcmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}\n\ttype result struct {\n\t\tbytes []byte\n\t\terr error\n\t}\n\tfinished := make(chan result)\n\tgo func() {\n\t\tb, err := cmd.Output()\n\t\tfinished <- result{b, err}\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase <-terminate.C:\n\t\t\tterminate.Reset(time.Duration(0)) \/\/ Kill subsequent processes immediately.\n\t\t\tsyscall.Kill(-cmd.Process.Pid, syscall.SIGKILL)\n\t\t\tcmd.Process.Kill()\n\t\t\treturn nil, fmt.Errorf(\"Terminate testing after 15m after %s timeout during %s\", interruptTimeout, stepName)\n\t\tcase <-interrupt.C:\n\t\t\tlog.Printf(\"Interrupt testing after %s timeout. Will terminate in another %s\", interruptTimeout, terminateTimeout)\n\t\t\tterminate.Reset(terminateTimeout)\n\t\t\tif err := syscall.Kill(-cmd.Process.Pid, syscall.SIGINT); err != nil {\n\t\t\t\tlog.Printf(\"Failed to interrupt %v. Will terminate immediately: %v\", stepName, err)\n\t\t\t\tsyscall.Kill(-cmd.Process.Pid, syscall.SIGTERM)\n\t\t\t\tcmd.Process.Kill()\n\t\t\t}\n\t\tcase fin := <-finished:\n\t\t\treturn fin.bytes, fin.err\n\t\t}\n\t}\n}\n\nfunc randStringRunes(n int) string {\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letterRunes[rand.Intn(len(letterRunes))]\n\t}\n\treturn string(b)\n}\n\nfunc main() {\n\tmatches, err := filepath.Glob(\"stable\/*\")\n\tdefer writeXML(\"\/workspace\/_artifacts\", time.Now())\n\tif !terminate.Stop() {\n\t\t<-terminate.C \/\/ Drain the value if necessary.\n\t}\n\n\tif !interrupt.Stop() {\n\t\t<-interrupt.C \/\/ Drain value\n\t}\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tfor _, dir := range matches {\n\t\tns := randStringRunes(10)\n\t\trel := randStringRunes(3)\n\n\t\txmlWrap(fmt.Sprintf(\"lint %s\", dir), func() error {\n\t\t\t_, execErr := output(exec.Command(\"linux-amd64\/helm\", \"lint\", dir))\n\t\t\treturn execErr\n\t\t})\n\n\t\txmlWrap(fmt.Sprintf(\"install %s\", dir), func() error {\n\t\t\to, execErr := output(exec.Command(\"linux-amd64\/helm\", \"install\", dir, \"--namespace\", ns, \"--name\", rel, \"--wait\"))\n\t\t\tif execErr != nil {\n\t\t\t\treturn fmt.Errorf(\"%s Command output: %s\", execErr, string(o[:]))\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\txmlWrap(fmt.Sprintf(\"test %s\", dir), func() error {\n\t\t\to, execErr := output(exec.Command(\"linux-amd64\/helm\", \"test\", rel))\n\t\t\tif execErr != nil {\n\t\t\t\treturn fmt.Errorf(\"%s Command output: %s\", execErr, string(o[:]))\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\txmlWrap(fmt.Sprintf(\"purge %s\", dir), func() error {\n\t\t\to, execErr := output(exec.Command(\"linux-amd64\/helm\", \"delete\", rel, \"--purge\"))\n\t\t\tif execErr != nil {\n\t\t\t\treturn fmt.Errorf(\"%s Command output: %s\", execErr, string(o[:]))\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n}\n<commit_msg>Update junit file name (#937)<commit_after>\/*\nCopyright 2017 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage main\n\nimport (\n\t\"encoding\/xml\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n)\n\ntype testCase struct {\n\tXMLName xml.Name `xml:\"testcase\"`\n\tClassName string `xml:\"classname,attr\"`\n\tName string `xml:\"name,attr\"`\n\tTime float64 `xml:\"time,attr\"`\n\tFailure string `xml:\"failure,omitempty\"`\n}\n\ntype TestSuite struct {\n\tXMLName xml.Name `xml:\"testsuite\"`\n\tFailures int `xml:\"failures,attr\"`\n\tTests int `xml:\"tests,attr\"`\n\tTime float64 `xml:\"time,attr\"`\n\tCases []testCase\n}\n\n\/\/ append(errs, err) if err != nil\nfunc appendError(errs []error, err error) []error {\n\tif err != nil {\n\t\treturn append(errs, err)\n\t}\n\treturn errs\n}\n\nfunc writeXML(dump string, start time.Time) {\n\tsuite.Time = time.Since(start).Seconds()\n\tout, err := xml.MarshalIndent(&suite, \"\", \" \")\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not marshal XML: %s\", err)\n\t}\n\tpath := filepath.Join(dump, \"junit_01.xml\")\n\tf, err := os.Create(path)\n\tif err != nil {\n\t\tlog.Fatalf(\"Could not create file: %s\", err)\n\t}\n\tdefer f.Close()\n\tif _, err := f.WriteString(xml.Header); err != nil {\n\t\tlog.Fatalf(\"Error writing XML header: %s\", err)\n\t}\n\tif _, err := f.Write(out); err != nil {\n\t\tlog.Fatalf(\"Error writing XML data: %s\", err)\n\t}\n\tlog.Printf(\"Saved XML output to %s.\", path)\n}\n\n\/\/ return f(), adding junit xml testcase result for name\nfunc xmlWrap(name string, f func() error) error {\n\tstart := time.Now()\n\terr := f()\n\tduration := time.Since(start)\n\tc := testCase{\n\t\tName: name,\n\t\tClassName: \"e2e.go\",\n\t\tTime: duration.Seconds(),\n\t}\n\tif err != nil {\n\t\tc.Failure = err.Error()\n\t\tsuite.Failures++\n\t}\n\tsuite.Cases = append(suite.Cases, c)\n\tsuite.Tests++\n\treturn err\n}\n\nvar (\n\tinterruptTimeout = time.Duration(10 * time.Minute)\n\tterminateTimeout = time.Duration(5 * time.Minute) \/\/ terminate 5 minutes after SIGINT is sent.\n\n\tinterrupt = time.NewTimer(interruptTimeout) \/\/ interrupt testing at this time.\n\tterminate = time.NewTimer(time.Duration(0)) \/\/ terminate testing at this time.\n\n\tsuite TestSuite\n)\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n}\n\nvar letterRunes = []rune(\"abcdefghijklmnopqrstuvwxyz\")\n\n\/\/ return cmd.Output(), potentially timing out in the process.\nfunc output(cmd *exec.Cmd) ([]byte, error) {\n\tinterrupt.Reset(interruptTimeout)\n\tstepName := strings.Join(cmd.Args, \" \")\n\tcmd.Stderr = os.Stderr\n\n\tlog.Printf(\"Running: %v\", stepName)\n\tdefer func(start time.Time) {\n\t\tlog.Printf(\"Step '%s' finished in %s\", stepName, time.Since(start))\n\t}(time.Now())\n\n\tcmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}\n\ttype result struct {\n\t\tbytes []byte\n\t\terr error\n\t}\n\tfinished := make(chan result)\n\tgo func() {\n\t\tb, err := cmd.Output()\n\t\tfinished <- result{b, err}\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase <-terminate.C:\n\t\t\tterminate.Reset(time.Duration(0)) \/\/ Kill subsequent processes immediately.\n\t\t\tsyscall.Kill(-cmd.Process.Pid, syscall.SIGKILL)\n\t\t\tcmd.Process.Kill()\n\t\t\treturn nil, fmt.Errorf(\"Terminate testing after 15m after %s timeout during %s\", interruptTimeout, stepName)\n\t\tcase <-interrupt.C:\n\t\t\tlog.Printf(\"Interrupt testing after %s timeout. Will terminate in another %s\", interruptTimeout, terminateTimeout)\n\t\t\tterminate.Reset(terminateTimeout)\n\t\t\tif err := syscall.Kill(-cmd.Process.Pid, syscall.SIGINT); err != nil {\n\t\t\t\tlog.Printf(\"Failed to interrupt %v. Will terminate immediately: %v\", stepName, err)\n\t\t\t\tsyscall.Kill(-cmd.Process.Pid, syscall.SIGTERM)\n\t\t\t\tcmd.Process.Kill()\n\t\t\t}\n\t\tcase fin := <-finished:\n\t\t\treturn fin.bytes, fin.err\n\t\t}\n\t}\n}\n\nfunc randStringRunes(n int) string {\n\tb := make([]rune, n)\n\tfor i := range b {\n\t\tb[i] = letterRunes[rand.Intn(len(letterRunes))]\n\t}\n\treturn string(b)\n}\n\nfunc main() {\n\tmatches, err := filepath.Glob(\"stable\/*\")\n\tdefer writeXML(\"\/workspace\/_artifacts\", time.Now())\n\tif !terminate.Stop() {\n\t\t<-terminate.C \/\/ Drain the value if necessary.\n\t}\n\n\tif !interrupt.Stop() {\n\t\t<-interrupt.C \/\/ Drain value\n\t}\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\tfor _, dir := range matches {\n\t\tns := randStringRunes(10)\n\t\trel := randStringRunes(3)\n\n\t\txmlWrap(fmt.Sprintf(\"lint %s\", dir), func() error {\n\t\t\t_, execErr := output(exec.Command(\"linux-amd64\/helm\", \"lint\", dir))\n\t\t\treturn execErr\n\t\t})\n\n\t\txmlWrap(fmt.Sprintf(\"install %s\", dir), func() error {\n\t\t\to, execErr := output(exec.Command(\"linux-amd64\/helm\", \"install\", dir, \"--namespace\", ns, \"--name\", rel, \"--wait\"))\n\t\t\tif execErr != nil {\n\t\t\t\treturn fmt.Errorf(\"%s Command output: %s\", execErr, string(o[:]))\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\txmlWrap(fmt.Sprintf(\"test %s\", dir), func() error {\n\t\t\to, execErr := output(exec.Command(\"linux-amd64\/helm\", \"test\", rel))\n\t\t\tif execErr != nil {\n\t\t\t\treturn fmt.Errorf(\"%s Command output: %s\", execErr, string(o[:]))\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\n\t\txmlWrap(fmt.Sprintf(\"purge %s\", dir), func() error {\n\t\t\to, execErr := output(exec.Command(\"linux-amd64\/helm\", \"delete\", rel, \"--purge\"))\n\t\t\tif execErr != nil {\n\t\t\t\treturn fmt.Errorf(\"%s Command output: %s\", execErr, string(o[:]))\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package watcher holds tools for loglist-files versioning and updates propagation.\npackage watcher\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/google\/certificate-transparency-go\/x509util\"\n\t\"github.com\/sergi\/go-diff\/diffmatchpatch\"\n)\n\n\/\/ Diff regularly check data at path provided, notifies if changes detected.\ntype Diff struct {\n\tlatest []byte\n\n\tsynced []byte\n\tdiffs []diffmatchpatch.Diff\n\t\/\/ mu guards all data-fields: latest, synced and diffs.\n\tmu sync.Mutex\n\n\t\/\/ Exactly one of url\/filepath fields is specified.\n\turl string\n\tfilepath string\n\n\tcheckInterval time.Duration\n\tticker *time.Ticker\n\n\tevents chan<- DiffEvent\n}\n\n\/\/ DiffEvent refelects diff\/error detection.\ntype DiffEvent struct {\n\tDiffs []diffmatchpatch.Diff\n\tErr error\n}\n\n\/\/ NewDiff is factory for Diff.\nfunc NewDiff(ctx context.Context, path string, isPathURL bool, checkInterval time.Duration, events chan<- DiffEvent) *Diff {\n\tvar d Diff\n\tif isPathURL {\n\t\td.url = path\n\t} else {\n\t\td.filepath = path\n\t}\n\td.checkInterval = checkInterval\n\td.events = events\n\n\td.init(ctx)\n\treturn &d\n}\n\nfunc (d *Diff) init(ctx context.Context) {\n\td.checkUpdate()\n\td.ticker = time.NewTicker(d.checkInterval)\n\tgo func() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\td.ticker.Stop()\n\t\tcase <-d.ticker.C:\n\t\t\td.checkUpdate()\n\t\t}\n\t}()\n}\n\nfunc (d *Diff) checkUpdate() {\n\tvar path string\n\tif len(d.url) > 0 {\n\t\tpath = d.url\n\t} else {\n\t\tpath = d.filepath\n\t}\n\tllData, err := x509util.ReadFileOrURL(path, &http.Client{Timeout: time.Second * 10})\n\tif err != nil {\n\t\td.events <- DiffEvent{Err: err}\n\t\treturn\n\t}\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\td.latest = llData\n\t\/\/ Compare data as strings\n\tdmp := diffmatchpatch.New()\n\tdiffs := dmp.DiffMain(string(d.synced), string(d.latest), false)\n\td.diffs = diffs\n\tif len(diffs) > 0 && d.events != nil {\n\t\td.events <- DiffEvent{Diffs: d.diffs}\n\t}\n}\n\nfunc (d *Diff) sync() {\n\td.mu.Lock()\n\td.synced = d.latest\n\td.diffs = []diffmatchpatch.Diff{}\n\td.mu.Unlock()\n}\n\n\/\/ GetSyncedData provides acces to Diff data.\nfunc (d *Diff) GetSyncedData() []byte {\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\treturn d.synced\n}\n<commit_msg>watcher: silence warning about as-yet unused function<commit_after>\/\/ Copyright 2018 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package watcher holds tools for loglist-files versioning and updates propagation.\npackage watcher\n\nimport (\n\t\"context\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/google\/certificate-transparency-go\/x509util\"\n\t\"github.com\/sergi\/go-diff\/diffmatchpatch\"\n)\n\n\/\/ Diff regularly check data at path provided, notifies if changes detected.\ntype Diff struct {\n\tlatest []byte\n\n\tsynced []byte\n\tdiffs []diffmatchpatch.Diff\n\t\/\/ mu guards all data-fields: latest, synced and diffs.\n\tmu sync.Mutex\n\n\t\/\/ Exactly one of url\/filepath fields is specified.\n\turl string\n\tfilepath string\n\n\tcheckInterval time.Duration\n\tticker *time.Ticker\n\n\tevents chan<- DiffEvent\n}\n\n\/\/ DiffEvent refelects diff\/error detection.\ntype DiffEvent struct {\n\tDiffs []diffmatchpatch.Diff\n\tErr error\n}\n\n\/\/ NewDiff is factory for Diff.\nfunc NewDiff(ctx context.Context, path string, isPathURL bool, checkInterval time.Duration, events chan<- DiffEvent) *Diff {\n\tvar d Diff\n\tif isPathURL {\n\t\td.url = path\n\t} else {\n\t\td.filepath = path\n\t}\n\td.checkInterval = checkInterval\n\td.events = events\n\n\td.init(ctx)\n\treturn &d\n}\n\nfunc (d *Diff) init(ctx context.Context) {\n\td.checkUpdate()\n\td.ticker = time.NewTicker(d.checkInterval)\n\tgo func() {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\td.ticker.Stop()\n\t\tcase <-d.ticker.C:\n\t\t\td.checkUpdate()\n\t\t}\n\t}()\n}\n\nfunc (d *Diff) checkUpdate() {\n\tvar path string\n\tif len(d.url) > 0 {\n\t\tpath = d.url\n\t} else {\n\t\tpath = d.filepath\n\t}\n\tllData, err := x509util.ReadFileOrURL(path, &http.Client{Timeout: time.Second * 10})\n\tif err != nil {\n\t\td.events <- DiffEvent{Err: err}\n\t\treturn\n\t}\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\td.latest = llData\n\t\/\/ Compare data as strings\n\tdmp := diffmatchpatch.New()\n\tdiffs := dmp.DiffMain(string(d.synced), string(d.latest), false)\n\td.diffs = diffs\n\tif len(diffs) > 0 && d.events != nil {\n\t\td.events <- DiffEvent{Diffs: d.diffs}\n\t}\n}\n\nfunc (d *Diff) sync() { \/\/ nolint:unused\n\td.mu.Lock()\n\td.synced = d.latest\n\td.diffs = []diffmatchpatch.Diff{}\n\td.mu.Unlock()\n}\n\n\/\/ GetSyncedData provides acces to Diff data.\nfunc (d *Diff) GetSyncedData() []byte {\n\td.mu.Lock()\n\tdefer d.mu.Unlock()\n\treturn d.synced\n}\n<|endoftext|>"} {"text":"<commit_before>package notify\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc nonil(err ...error) error {\n\tfor _, err := range err {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc strip(e Event) Event {\n\treturn e & ^Recursive\n}\n\nfunc test(t *testing.T, w Watcher, ei []EventInfo, d time.Duration) {\n\tdone, c, fn := make(chan error), make(chan EventInfo, len(ei)), filepath.WalkFunc(nil)\n\twalk, exec, cleanup := Tree.Create(t)\n\tdefer cleanup()\n\tif w.IsRecursive() {\n\t\tvar once sync.Once\n\t\tfn = func(p string, fi os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tonce.Do(func() {\n\t\t\t\terr = w.Watch(p, All)\n\t\t\t})\n\t\t\treturn nonil(err, filepath.SkipDir)\n\t\t}\n\t} else {\n\t\tfn = func(p string, fi os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif fi.IsDir() {\n\t\t\t\terr = w.Watch(p, strip(All))\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\tw.Fanin(c)\n\tif err := walk(fn); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgo func() {\n\t\tfor _, ei := range ei {\n\t\t\texec(ei)\n\t\t\tif err := equal(<-c, ei); err != nil {\n\t\t\t\tdone <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tdone <- nil\n\t}()\n\tselect {\n\tcase <-time.After(d):\n\t\tt.Fatalf(\"test has timed out after %v\", d)\n\tcase err := <-done:\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestWatcher(t *testing.T) {\n\tif global.Watcher == nil {\n\t\tt.Skip(\"no watcher to test\")\n\t}\n\tei := []EventInfo{\n\t\tEv(\"github.com\/rjeczalik\/fs\/fs_test.go\", Create, false),\n\t\tEv(\"github.com\/rjeczalik\/fs\/binfs\", Create, true),\n\t\tEv(\"github.com\/rjeczalik\/fs\/binfs.go\", Create, false),\n\t\tEv(\"github.com\/rjeczalik\/fs\/binfs_test.go\", Create, false),\n\t\tEv(\"github.com\/rjeczalik\/fs\/binfs\", Delete, true),\n\t\tEv(\"github.com\/rjeczalik\/fs\/binfs\", Create, true),\n\t\t\/\/ BUG(OS X): Fsnotify claims, the following is Create not Move.\n\t\t\/\/ Ev(\"github.com\/rjeczalik\/fs\/binfs\", Move, true),\n\t\tEv(\"github.com\/rjeczalik\/fs\/virfs\", Create, false),\n\t\t\/\/ BUG(OS X): When being watched by fsnotify, writing to the newly-created\n\t\t\/\/ file fails with \"bad file descriptor\".\n\t\t\/\/ Ev(\"github.com\/rjeczalik\/fs\/virfs\", Write, false),\n\t\tEv(\"github.com\/rjeczalik\/fs\/virfs\", Delete, false),\n\t\t\/\/ BUG(OS X): The same as above, this time \"bad file descriptor\" on a file\n\t\t\/\/ that was created previously.\n\t\tEv(\"github.com\/rjeczalik\/fs\/LICENSE\", Write, false),\n\t}\n\ttest(t, global.Watcher, ei, time.Second)\n}\n<commit_msg>Rename TestWatcher -> TestGlobalWatcher<commit_after>package notify\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc nonil(err ...error) error {\n\tfor _, err := range err {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc strip(e Event) Event {\n\treturn e & ^Recursive\n}\n\nfunc test(t *testing.T, w Watcher, ei []EventInfo, d time.Duration) {\n\tdone, c, fn := make(chan error), make(chan EventInfo, len(ei)), filepath.WalkFunc(nil)\n\twalk, exec, cleanup := Tree.Create(t)\n\tdefer cleanup()\n\tif w.IsRecursive() {\n\t\tvar once sync.Once\n\t\tfn = func(p string, fi os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tonce.Do(func() {\n\t\t\t\terr = w.Watch(p, All)\n\t\t\t})\n\t\t\treturn nonil(err, filepath.SkipDir)\n\t\t}\n\t} else {\n\t\tfn = func(p string, fi os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif fi.IsDir() {\n\t\t\t\terr = w.Watch(p, strip(All))\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\tw.Fanin(c)\n\tif err := walk(fn); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgo func() {\n\t\tfor _, ei := range ei {\n\t\t\texec(ei)\n\t\t\tif err := equal(<-c, ei); err != nil {\n\t\t\t\tdone <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tdone <- nil\n\t}()\n\tselect {\n\tcase <-time.After(d):\n\t\tt.Fatalf(\"test has timed out after %v\", d)\n\tcase err := <-done:\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc TestGlobalWatcher(t *testing.T) {\n\tif global.Watcher == nil {\n\t\tt.Skip(\"no global watcher to test\")\n\t}\n\tei := []EventInfo{\n\t\tEv(\"github.com\/rjeczalik\/fs\/fs_test.go\", Create, false),\n\t\tEv(\"github.com\/rjeczalik\/fs\/binfs\", Create, true),\n\t\tEv(\"github.com\/rjeczalik\/fs\/binfs.go\", Create, false),\n\t\tEv(\"github.com\/rjeczalik\/fs\/binfs_test.go\", Create, false),\n\t\tEv(\"github.com\/rjeczalik\/fs\/binfs\", Delete, true),\n\t\tEv(\"github.com\/rjeczalik\/fs\/binfs\", Create, true),\n\t\t\/\/ BUG(OS X): Fsnotify claims, the following is Create not Move.\n\t\t\/\/ Ev(\"github.com\/rjeczalik\/fs\/binfs\", Move, true),\n\t\tEv(\"github.com\/rjeczalik\/fs\/virfs\", Create, false),\n\t\t\/\/ BUG(OS X): When being watched by fsnotify, writing to the newly-created\n\t\t\/\/ file fails with \"bad file descriptor\".\n\t\t\/\/ Ev(\"github.com\/rjeczalik\/fs\/virfs\", Write, false),\n\t\tEv(\"github.com\/rjeczalik\/fs\/virfs\", Delete, false),\n\t\t\/\/ BUG(OS X): The same as above, this time \"bad file descriptor\" on a file\n\t\t\/\/ that was created previously.\n\t\tEv(\"github.com\/rjeczalik\/fs\/LICENSE\", Write, false),\n\t}\n\ttest(t, global.Watcher, ei, time.Second)\n}\n<|endoftext|>"} {"text":"<commit_before>package commands\n\nimport (\n\t\"fmt\"\n\tcmds \"github.com\/jbenet\/go-ipfs\/commands\"\n\t\"strings\"\n)\n\ntype TestOutput struct {\n\tFoo string\n\tBar int\n}\n\nvar Root = &cmds.Command{\n\tOptions: []cmds.Option{\n\t\tcmds.Option{[]string{\"config\", \"c\"}, cmds.String},\n\t\tcmds.Option{[]string{\"debug\", \"D\"}, cmds.Bool},\n\t\tcmds.Option{[]string{\"help\", \"h\"}, cmds.Bool},\n\t\tcmds.Option{[]string{\"local\", \"L\"}, cmds.Bool},\n\t},\n\tHelp: `ipfs - global versioned p2p merkledag file system\n\nBasic commands:\n\n init Initialize ipfs local configuration.\n add <path> Add an object to ipfs.\n cat <ref> Show ipfs object data.\n ls <ref> List links from an object.\n refs <ref> List link hashes from an object.\n\nTool commands:\n\n config Manage configuration.\n version Show ipfs version information.\n commands List all available commands.\n\nAdvanced Commands:\n\n mount Mount an ipfs read-only mountpoint.\n serve Serve an interface to ipfs.\n net-diag Print network diagnostic.\n\nUse \"ipfs help <command>\" for more information about a command.\n`,\n\tSubcommands: map[string]*cmds.Command{\n\t\t\"cat\": cat,\n\n\t\t\/\/ test subcommands\n\t\t\/\/ TODO: remove these when we don't need them anymore\n\t\t\"beep\": &cmds.Command{\n\t\t\tRun: func(req cmds.Request, res cmds.Response) {\n\t\t\t\tv := &TestOutput{\"hello, world\", 1337}\n\t\t\t\tres.SetValue(v)\n\t\t\t},\n\t\t\tFormat: func(res cmds.Response) (string, error) {\n\t\t\t\tv := res.Value().(*TestOutput)\n\t\t\t\ts := fmt.Sprintf(\"Foo: %s\\n\", v.Foo)\n\t\t\t\ts += fmt.Sprintf(\"Bar: %v\\n\", v.Bar)\n\t\t\t\treturn s, nil\n\t\t\t},\n\t\t\tType: &TestOutput{},\n\t\t},\n\t\t\"boop\": &cmds.Command{\n\t\t\tRun: func(req cmds.Request, res cmds.Response) {\n\t\t\t\tv := strings.NewReader(\"hello, world\")\n\t\t\t\tres.SetValue(v)\n\t\t\t},\n\t\t},\n\t\t\"warp\": &cmds.Command{\n\t\t\tOptions: []cmds.Option{\n\t\t\t\tcmds.Option{[]string{\"power\", \"p\"}, cmds.Float},\n\t\t\t},\n\t\t\tRun: func(req cmds.Request, res cmds.Response) {\n\t\t\t\tthreshold := 1.21\n\n\t\t\t\tif power, found := req.Option(\"power\"); found && power.(float64) >= threshold {\n\t\t\t\t\tres.SetValue(struct {\n\t\t\t\t\t\tStatus string\n\t\t\t\t\t\tPower float64\n\t\t\t\t\t}{\"Flux capacitor activated!\", power.(float64)})\n\n\t\t\t\t} else {\n\t\t\t\t\terr := fmt.Errorf(\"Insufficient power (%v jiggawatts required)\", threshold)\n\t\t\t\t\tres.SetError(err, cmds.ErrClient)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"args\": &cmds.Command{\n\t\t\tRun: func(req cmds.Request, res cmds.Response) {\n\t\t\t\tres.SetValue(req.Arguments())\n\t\t\t},\n\t\t},\n\t\t\"echo\": &cmds.Command{\n\t\t\tRun: func(req cmds.Request, res cmds.Response) {\n\t\t\t\tres.SetValue(req.Stream())\n\t\t\t},\n\t\t},\n\t},\n}\n<commit_msg>fix(core\/commands2) ipfs help message<commit_after>package commands\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\tcmds \"github.com\/jbenet\/go-ipfs\/commands\"\n)\n\ntype TestOutput struct {\n\tFoo string\n\tBar int\n}\n\nvar Root = &cmds.Command{\n\tOptions: []cmds.Option{\n\t\tcmds.Option{[]string{\"config\", \"c\"}, cmds.String},\n\t\tcmds.Option{[]string{\"debug\", \"D\"}, cmds.Bool},\n\t\tcmds.Option{[]string{\"help\", \"h\"}, cmds.Bool},\n\t\tcmds.Option{[]string{\"local\", \"L\"}, cmds.Bool},\n\t},\n\tHelp: `ipfs - global versioned p2p merkledag file system\n\nBasic commands:\n\n init Initialize ipfs local configuration.\n add <path> Add an object to ipfs.\n cat <ref> Show ipfs object data.\n ls <ref> List links from an object.\n refs <ref> List link hashes from an object.\n\nTool commands:\n\n config Manage configuration.\n update Download and apply go-ipfs updates.\n version Show ipfs version information.\n commands List all available commands.\n\nAdvanced Commands:\n\n mount Mount an ipfs read-only mountpoint.\n serve Serve an interface to ipfs.\n net-diag Print network diagnostic\n\nPlumbing commands:\n\n block Interact with raw blocks in the datastore\n object Interact with raw dag nodes\n\n\nUse \"ipfs help <command>\" for more information about a command.\n`,\n\tSubcommands: map[string]*cmds.Command{\n\t\t\"cat\": cat,\n\n\t\t\/\/ test subcommands\n\t\t\/\/ TODO: remove these when we don't need them anymore\n\t\t\"beep\": &cmds.Command{\n\t\t\tRun: func(req cmds.Request, res cmds.Response) {\n\t\t\t\tv := &TestOutput{\"hello, world\", 1337}\n\t\t\t\tres.SetValue(v)\n\t\t\t},\n\t\t\tFormat: func(res cmds.Response) (string, error) {\n\t\t\t\tv := res.Value().(*TestOutput)\n\t\t\t\ts := fmt.Sprintf(\"Foo: %s\\n\", v.Foo)\n\t\t\t\ts += fmt.Sprintf(\"Bar: %v\\n\", v.Bar)\n\t\t\t\treturn s, nil\n\t\t\t},\n\t\t\tType: &TestOutput{},\n\t\t},\n\t\t\"boop\": &cmds.Command{\n\t\t\tRun: func(req cmds.Request, res cmds.Response) {\n\t\t\t\tv := strings.NewReader(\"hello, world\")\n\t\t\t\tres.SetValue(v)\n\t\t\t},\n\t\t},\n\t\t\"warp\": &cmds.Command{\n\t\t\tOptions: []cmds.Option{\n\t\t\t\tcmds.Option{[]string{\"power\", \"p\"}, cmds.Float},\n\t\t\t},\n\t\t\tRun: func(req cmds.Request, res cmds.Response) {\n\t\t\t\tthreshold := 1.21\n\n\t\t\t\tif power, found := req.Option(\"power\"); found && power.(float64) >= threshold {\n\t\t\t\t\tres.SetValue(struct {\n\t\t\t\t\t\tStatus string\n\t\t\t\t\t\tPower float64\n\t\t\t\t\t}{\"Flux capacitor activated!\", power.(float64)})\n\n\t\t\t\t} else {\n\t\t\t\t\terr := fmt.Errorf(\"Insufficient power (%v jiggawatts required)\", threshold)\n\t\t\t\t\tres.SetError(err, cmds.ErrClient)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"args\": &cmds.Command{\n\t\t\tRun: func(req cmds.Request, res cmds.Response) {\n\t\t\t\tres.SetValue(req.Arguments())\n\t\t\t},\n\t\t},\n\t\t\"echo\": &cmds.Command{\n\t\t\tRun: func(req cmds.Request, res cmds.Response) {\n\t\t\t\tres.SetValue(req.Stream())\n\t\t\t},\n\t\t},\n\t},\n}\n<|endoftext|>"} {"text":"<commit_before>package compute\n\nimport \"fmt\"\n\n\/\/ Entity represents a Cloud Control entity.\ntype Entity interface {\n\t\/\/ GetID retrieves the entity's ID.\n\tGetID() string\n}\n\n\/\/ NamedEntity represents a named Cloud Control entity.\ntype NamedEntity interface {\n\tEntity\n\n\t\/\/ GetName retrieves the entity's name.\n\tGetName() string\n\n\t\/\/ ToEntityReference creates an EntityReference representing the entity.\n\tToEntityReference() EntityReference\n}\n\n\/\/ EntityReference is used to group an entity Id and name together for serialisation \/ deserialisation purposes.\ntype EntityReference struct {\n\t\/\/ The entity Id.\n\tID string `json:\"id\"`\n\t\/\/ The entity name.\n\tName string `json:\"name,omitempty\"`\n}\n\n\/\/ IPRange represents an IPvX range.\ntype IPRange interface {\n\t\/\/ Convert the IPvX range to a display string.\n\tToDisplayString() string\n}\n\n\/\/ IPv4Range represents an IPv4 network (base address and prefix size)\ntype IPv4Range struct {\n\t\/\/ The network base address.\n\tBaseAddress string `json:\"address\"`\n\t\/\/ The network prefix size.\n\tPrefixSize int `json:\"prefixSize\"`\n}\n\n\/\/ ToDisplayString converts the IPv4 range to a display string.\nfunc (network IPv4Range) ToDisplayString() string {\n\treturn fmt.Sprintf(\"%s\/%d\", network.BaseAddress, network.PrefixSize)\n}\n\n\/\/ IPv6Range represents an IPv6 network (base address and prefix size)\ntype IPv6Range struct {\n\t\/\/ The network base address.\n\tBaseAddress string `json:\"address\"`\n\t\/\/ The network prefix size.\n\tPrefixSize int `json:\"prefixSize\"`\n}\n\n\/\/ ToDisplayString converts the IPv6 range to a display string.\nfunc (network IPv6Range) ToDisplayString() string {\n\treturn fmt.Sprintf(\"%s\/%d\", network.BaseAddress, network.PrefixSize)\n}\n\n\/\/ OperatingSystem represents a well-known operating system for virtual machines.\ntype OperatingSystem struct {\n\t\/\/ The operating system Id.\n\tID string `json:\"id\"`\n\n\t\/\/ The operating system type.\n\tFamily string `json:\"family\"`\n\n\t\/\/ The operating system display-name.\n\tDisplayName string `json:\"displayName\"`\n}\n\n\/\/ VirtualMachineCPU represents the CPU configuration for a virtual machine.\ntype VirtualMachineCPU struct {\n\tCount int `json:\"count,omitempty\"`\n\tSpeed string `json:\"speed,omitempty\"`\n\tCoresPerSocket int `json:\"coresPerSocket,omitempty\"`\n}\n\n\/\/ VirtualMachineSCSIController represents the configuration for a SCSI controller in a virtual machine.\ntype VirtualMachineSCSIController struct {\n\tID string `json:\"id,omitempty\"`\n\tBusNumber int `json:\"busNumber\"`\n\tAdapterType string `json:\"adapterType\"`\n\tDisks []VirtualMachineDisk `json:\"disk\"`\n\tState string `json:\"state,omitempty\"`\n}\n\n\/\/ VirtualMachineDisk represents the configuration for disk in a virtual machine.\ntype VirtualMachineDisk struct {\n\tID string `json:\"id,omitempty\"`\n\tSCSIUnitID int `json:\"scsiId\"`\n\tSizeGB int `json:\"sizeGb\"`\n\tSpeed string `json:\"speed\"`\n\tState string `json:\"state,omitempty\"`\n}\n\n\/\/ VirtualMachineNetwork represents the networking configuration for a virtual machine.\ntype VirtualMachineNetwork struct {\n\tNetworkDomainID string `json:\"networkDomainId,omitempty\"`\n\tPrimaryAdapter VirtualMachineNetworkAdapter `json:\"primaryNic\"`\n\tAdditionalNetworkAdapters []VirtualMachineNetworkAdapter `json:\"additionalNic\"`\n}\n\n\/\/ VirtualMachineNetworkAdapter represents the configuration for a virtual machine's network adapter.\n\/\/ If deploying a new VM, exactly one of VLANID \/ PrivateIPv4Address must be specified.\n\/\/\n\/\/ AdapterType (if specified) must be either E1000 or VMXNET3.\ntype VirtualMachineNetworkAdapter struct {\n\tID *string `json:\"id,omitempty\"`\n\tMACAddress *string `json:\"macAddress,omitempty\"` \/\/ CloudControl v2.4 and higher\n\tVLANID *string `json:\"vlanId,omitempty\"`\n\tVLANName *string `json:\"vlanName,omitempty\"`\n\tPrivateIPv4Address *string `json:\"privateIpv4,omitempty\"`\n\tPrivateIPv6Address *string `json:\"ipv6,omitempty\"`\n\tAdapterType *string `json:\"networkAdapter,omitempty\"`\n\tAdapterKey *int `json:\"key,omitempty\"` \/\/ CloudControl v2.4 and higher\n\tState *string `json:\"state,omitempty\"`\n}\n\n\/\/ GetID returns the network adapter's Id.\nfunc (networkAdapter *VirtualMachineNetworkAdapter) GetID() string {\n\tif networkAdapter.ID == nil {\n\t\treturn \"\"\n\t}\n\n\treturn *networkAdapter.ID\n}\n\n\/\/ GetResourceType returns the network domain's resource type.\nfunc (networkAdapter *VirtualMachineNetworkAdapter) GetResourceType() ResourceType {\n\treturn ResourceTypeNetworkAdapter\n}\n\n\/\/ GetName returns the network adapter's name (actually Id, since adapters don't have names).\nfunc (networkAdapter *VirtualMachineNetworkAdapter) GetName() string {\n\treturn networkAdapter.GetID()\n}\n\n\/\/ GetState returns the network adapter's current state.\nfunc (networkAdapter *VirtualMachineNetworkAdapter) GetState() string {\n\tif networkAdapter.State == nil {\n\t\treturn \"\"\n\t}\n\n\treturn *networkAdapter.State\n}\n\n\/\/ IsDeleted determines whether the network adapter has been deleted (is nil).\nfunc (networkAdapter *VirtualMachineNetworkAdapter) IsDeleted() bool {\n\treturn networkAdapter == nil\n}\n\n\/\/ ToEntityReference creates an EntityReference representing the CustomerImage.\nfunc (networkAdapter *VirtualMachineNetworkAdapter) ToEntityReference() EntityReference {\n\tid := \"\"\n\tif networkAdapter.ID != nil {\n\t\tid = *networkAdapter.ID\n\t}\n\tname := \"\"\n\tif networkAdapter.VLANName != nil {\n\t\tname = *networkAdapter.VLANName\n\t}\n\n\treturn EntityReference{\n\t\tID: id,\n\t\tName: name,\n\t}\n}\n\nvar _ Resource = &VirtualMachineNetworkAdapter{}\n<commit_msg>Add \"Key\" field to VirtualMachineSCSIController.<commit_after>package compute\n\nimport \"fmt\"\n\n\/\/ Entity represents a Cloud Control entity.\ntype Entity interface {\n\t\/\/ GetID retrieves the entity's ID.\n\tGetID() string\n}\n\n\/\/ NamedEntity represents a named Cloud Control entity.\ntype NamedEntity interface {\n\tEntity\n\n\t\/\/ GetName retrieves the entity's name.\n\tGetName() string\n\n\t\/\/ ToEntityReference creates an EntityReference representing the entity.\n\tToEntityReference() EntityReference\n}\n\n\/\/ EntityReference is used to group an entity Id and name together for serialisation \/ deserialisation purposes.\ntype EntityReference struct {\n\t\/\/ The entity Id.\n\tID string `json:\"id\"`\n\t\/\/ The entity name.\n\tName string `json:\"name,omitempty\"`\n}\n\n\/\/ IPRange represents an IPvX range.\ntype IPRange interface {\n\t\/\/ Convert the IPvX range to a display string.\n\tToDisplayString() string\n}\n\n\/\/ IPv4Range represents an IPv4 network (base address and prefix size)\ntype IPv4Range struct {\n\t\/\/ The network base address.\n\tBaseAddress string `json:\"address\"`\n\t\/\/ The network prefix size.\n\tPrefixSize int `json:\"prefixSize\"`\n}\n\n\/\/ ToDisplayString converts the IPv4 range to a display string.\nfunc (network IPv4Range) ToDisplayString() string {\n\treturn fmt.Sprintf(\"%s\/%d\", network.BaseAddress, network.PrefixSize)\n}\n\n\/\/ IPv6Range represents an IPv6 network (base address and prefix size)\ntype IPv6Range struct {\n\t\/\/ The network base address.\n\tBaseAddress string `json:\"address\"`\n\t\/\/ The network prefix size.\n\tPrefixSize int `json:\"prefixSize\"`\n}\n\n\/\/ ToDisplayString converts the IPv6 range to a display string.\nfunc (network IPv6Range) ToDisplayString() string {\n\treturn fmt.Sprintf(\"%s\/%d\", network.BaseAddress, network.PrefixSize)\n}\n\n\/\/ OperatingSystem represents a well-known operating system for virtual machines.\ntype OperatingSystem struct {\n\t\/\/ The operating system Id.\n\tID string `json:\"id\"`\n\n\t\/\/ The operating system type.\n\tFamily string `json:\"family\"`\n\n\t\/\/ The operating system display-name.\n\tDisplayName string `json:\"displayName\"`\n}\n\n\/\/ VirtualMachineCPU represents the CPU configuration for a virtual machine.\ntype VirtualMachineCPU struct {\n\tCount int `json:\"count,omitempty\"`\n\tSpeed string `json:\"speed,omitempty\"`\n\tCoresPerSocket int `json:\"coresPerSocket,omitempty\"`\n}\n\n\/\/ VirtualMachineSCSIController represents the configuration for a SCSI controller in a virtual machine.\ntype VirtualMachineSCSIController struct {\n\tID string `json:\"id,omitempty\"`\n\tBusNumber int `json:\"busNumber\"`\n\tKey int `json:\"key\"`\n\tAdapterType string `json:\"adapterType\"`\n\tDisks []VirtualMachineDisk `json:\"disk\"`\n\tState string `json:\"state,omitempty\"`\n}\n\n\/\/ VirtualMachineDisk represents the configuration for disk in a virtual machine.\ntype VirtualMachineDisk struct {\n\tID string `json:\"id,omitempty\"`\n\tSCSIUnitID int `json:\"scsiId\"`\n\tSizeGB int `json:\"sizeGb\"`\n\tSpeed string `json:\"speed\"`\n\tState string `json:\"state,omitempty\"`\n}\n\n\/\/ VirtualMachineNetwork represents the networking configuration for a virtual machine.\ntype VirtualMachineNetwork struct {\n\tNetworkDomainID string `json:\"networkDomainId,omitempty\"`\n\tPrimaryAdapter VirtualMachineNetworkAdapter `json:\"primaryNic\"`\n\tAdditionalNetworkAdapters []VirtualMachineNetworkAdapter `json:\"additionalNic\"`\n}\n\n\/\/ VirtualMachineNetworkAdapter represents the configuration for a virtual machine's network adapter.\n\/\/ If deploying a new VM, exactly one of VLANID \/ PrivateIPv4Address must be specified.\n\/\/\n\/\/ AdapterType (if specified) must be either E1000 or VMXNET3.\ntype VirtualMachineNetworkAdapter struct {\n\tID *string `json:\"id,omitempty\"`\n\tMACAddress *string `json:\"macAddress,omitempty\"` \/\/ CloudControl v2.4 and higher\n\tVLANID *string `json:\"vlanId,omitempty\"`\n\tVLANName *string `json:\"vlanName,omitempty\"`\n\tPrivateIPv4Address *string `json:\"privateIpv4,omitempty\"`\n\tPrivateIPv6Address *string `json:\"ipv6,omitempty\"`\n\tAdapterType *string `json:\"networkAdapter,omitempty\"`\n\tAdapterKey *int `json:\"key,omitempty\"` \/\/ CloudControl v2.4 and higher\n\tState *string `json:\"state,omitempty\"`\n}\n\n\/\/ GetID returns the network adapter's Id.\nfunc (networkAdapter *VirtualMachineNetworkAdapter) GetID() string {\n\tif networkAdapter.ID == nil {\n\t\treturn \"\"\n\t}\n\n\treturn *networkAdapter.ID\n}\n\n\/\/ GetResourceType returns the network domain's resource type.\nfunc (networkAdapter *VirtualMachineNetworkAdapter) GetResourceType() ResourceType {\n\treturn ResourceTypeNetworkAdapter\n}\n\n\/\/ GetName returns the network adapter's name (actually Id, since adapters don't have names).\nfunc (networkAdapter *VirtualMachineNetworkAdapter) GetName() string {\n\treturn networkAdapter.GetID()\n}\n\n\/\/ GetState returns the network adapter's current state.\nfunc (networkAdapter *VirtualMachineNetworkAdapter) GetState() string {\n\tif networkAdapter.State == nil {\n\t\treturn \"\"\n\t}\n\n\treturn *networkAdapter.State\n}\n\n\/\/ IsDeleted determines whether the network adapter has been deleted (is nil).\nfunc (networkAdapter *VirtualMachineNetworkAdapter) IsDeleted() bool {\n\treturn networkAdapter == nil\n}\n\n\/\/ ToEntityReference creates an EntityReference representing the CustomerImage.\nfunc (networkAdapter *VirtualMachineNetworkAdapter) ToEntityReference() EntityReference {\n\tid := \"\"\n\tif networkAdapter.ID != nil {\n\t\tid = *networkAdapter.ID\n\t}\n\tname := \"\"\n\tif networkAdapter.VLANName != nil {\n\t\tname = *networkAdapter.VLANName\n\t}\n\n\treturn EntityReference{\n\t\tID: id,\n\t\tName: name,\n\t}\n}\n\nvar _ Resource = &VirtualMachineNetworkAdapter{}\n<|endoftext|>"} {"text":"<commit_before>package corehttp\n\n\/\/ TODO: move to IPNS\nconst WebUIPath = \"\/ipfs\/QmUnXcWZC5Ve21gUseouJsH5mLAyz5JPp8aHsg8qVUUK8e\"\n\n\/\/ this is a list of all past webUI paths.\nvar WebUIPaths = []string{\n\tWebUIPath,\n\t\"\/ipfs\/QmSDgpiHco5yXdyVTfhKxr3aiJ82ynz8V14QcGKicM3rVh\",\n\t\"\/ipfs\/QmRuvWJz1Fc8B9cTsAYANHTXqGmKR9DVfY5nvMD1uA2WQ8\",\n\t\"\/ipfs\/QmQLXHs7K98JNQdWrBB2cQLJahPhmupbDjRuH1b9ibmwVa\",\n\t\"\/ipfs\/QmXX7YRpU7nNBKfw75VG7Y1c3GwpSAGHRev67XVPgZFv9R\",\n\t\"\/ipfs\/QmXdu7HWdV6CUaUabd9q2ZeA4iHZLVyDRj3Gi4dsJsWjbr\",\n\t\"\/ipfs\/QmaaqrHyAQm7gALkRW8DcfGX3u8q9rWKnxEMmf7m9z515w\",\n\t\"\/ipfs\/QmSHDxWsMPuJQKWmVA1rB5a3NX2Eme5fPqNb63qwaqiqSp\",\n\t\"\/ipfs\/QmctngrQAt9fjpQUZr7Bx3BsXUcif52eZGTizWhvcShsjz\",\n\t\"\/ipfs\/QmS2HL9v5YeKgQkkWMvs1EMnFtUowTEdFfSSeMT4pos1e6\",\n\t\"\/ipfs\/QmR9MzChjp1MdFWik7NjEjqKQMzVmBkdK3dz14A6B5Cupm\",\n\t\"\/ipfs\/QmRyWyKWmphamkMRnJVjUTzSFSAAZowYP4rnbgnfMXC9Mr\",\n\t\"\/ipfs\/QmU3o9bvfenhTKhxUakbYrLDnZU7HezAVxPM6Ehjw9Xjqy\",\n\t\"\/ipfs\/QmPhnvn747LqwPYMJmQVorMaGbMSgA7mRRoyyZYz3DoZRQ\",\n}\n\nvar WebUIOption = RedirectOption(\"webui\", WebUIPath)\n<commit_msg>feat: update to Web UI v2.3.2<commit_after>package corehttp\n\n\/\/ TODO: move to IPNS\nconst WebUIPath = \"\/ipfs\/QmenEBWcAk3tN94fSKpKFtUMwty1qNwSYw3DMDFV6cPBXA\"\n\n\/\/ this is a list of all past webUI paths.\nvar WebUIPaths = []string{\n\tWebUIPath,\n\t\"\/ipfs\/QmUnXcWZC5Ve21gUseouJsH5mLAyz5JPp8aHsg8qVUUK8e\",\n\t\"\/ipfs\/QmSDgpiHco5yXdyVTfhKxr3aiJ82ynz8V14QcGKicM3rVh\",\n\t\"\/ipfs\/QmRuvWJz1Fc8B9cTsAYANHTXqGmKR9DVfY5nvMD1uA2WQ8\",\n\t\"\/ipfs\/QmQLXHs7K98JNQdWrBB2cQLJahPhmupbDjRuH1b9ibmwVa\",\n\t\"\/ipfs\/QmXX7YRpU7nNBKfw75VG7Y1c3GwpSAGHRev67XVPgZFv9R\",\n\t\"\/ipfs\/QmXdu7HWdV6CUaUabd9q2ZeA4iHZLVyDRj3Gi4dsJsWjbr\",\n\t\"\/ipfs\/QmaaqrHyAQm7gALkRW8DcfGX3u8q9rWKnxEMmf7m9z515w\",\n\t\"\/ipfs\/QmSHDxWsMPuJQKWmVA1rB5a3NX2Eme5fPqNb63qwaqiqSp\",\n\t\"\/ipfs\/QmctngrQAt9fjpQUZr7Bx3BsXUcif52eZGTizWhvcShsjz\",\n\t\"\/ipfs\/QmS2HL9v5YeKgQkkWMvs1EMnFtUowTEdFfSSeMT4pos1e6\",\n\t\"\/ipfs\/QmR9MzChjp1MdFWik7NjEjqKQMzVmBkdK3dz14A6B5Cupm\",\n\t\"\/ipfs\/QmRyWyKWmphamkMRnJVjUTzSFSAAZowYP4rnbgnfMXC9Mr\",\n\t\"\/ipfs\/QmU3o9bvfenhTKhxUakbYrLDnZU7HezAVxPM6Ehjw9Xjqy\",\n\t\"\/ipfs\/QmPhnvn747LqwPYMJmQVorMaGbMSgA7mRRoyyZYz3DoZRQ\",\n}\n\nvar WebUIOption = RedirectOption(\"webui\", WebUIPath)\n<|endoftext|>"} {"text":"<commit_before>package web\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"gopkg.in\/mgo.v2\"\n\n\t\"context\"\n\n\t\"github.com\/tuna-timer\/tuna-timer-api\/commands\"\n\t\"github.com\/tuna-timer\/tuna-timer-api\/models\"\n\t\"github.com\/tuna-timer\/tuna-timer-api\/utils\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n\t\"log\"\n\t\/\/\"github.com\/tuna-timer\/tuna-timer-api\/data\"\n\t\"github.com\/tuna-timer\/tuna-timer-api\/data\"\n\t\"github.com\/nlopes\/slack\"\n)\n\n\/\/ Handlers is a collection of net\/http handlers to serve the API\ntype Handlers struct {\n\tenv *utils.Environment\n\tmongoSession *mgo.Session\n\tstatus map[string]string\n\tcommandLookupFunction func(ctx context.Context, slackCommand models.SlackCustomCommand) (commands.SlackCustomCommandHandler, error)\n\tslackOAuth SlackOAuth\n}\n\n\/\/ NewHandlers constructs a Handlers collection\nfunc NewHandlers(env *utils.Environment, mongoSession *mgo.Session) *Handlers {\n\treturn &Handlers{\n\t\tenv: env,\n\t\tmongoSession: mongoSession,\n\t\tstatus: map[string]string{\n\t\t\t\"env\": env.Name,\n\t\t\t\"version\": env.AppVersion,\n\t\t},\n\t\tcommandLookupFunction: commands.LookupHandler,\n\t\tslackOAuth: NewSlackOAuth(),\n\t}\n}\n\n\/\/ Timer handles Slack \/timer command\nfunc (h *Handlers) Timer(w http.ResponseWriter, r *http.Request) {\n\tnow := time.Now()\n\n\tslackCommand := models.SlackCustomCommand{\n\t\tChannelID: r.PostFormValue(\"channel_id\"),\n\t\tChannelName: r.PostFormValue(\"channel_name\"),\n\t\tCommand: r.PostFormValue(\"command\"),\n\t\tResponseURL: r.PostFormValue(\"response_url\"),\n\t\tTeamDomain: r.PostFormValue(\"team_domain\"),\n\t\tTeamID: r.PostFormValue(\"team_id\"),\n\t\tText: r.PostFormValue(\"text\"),\n\t\tToken: r.PostFormValue(\"token\"),\n\t\tUserID: r.PostFormValue(\"user_id\"),\n\t\tUserName: r.PostFormValue(\"user_name\"),\n\t}\n\n\tslackCommand = utils.NormalizeSlackCustomCommand(slackCommand)\n\n\tsession := h.mongoSession.Clone()\n\tdefer session.Close()\n\n\tctx := utils.PutMongoSessionInContext(r.Context(), session)\n\n\tselfBaseURL := utils.GetSelfURLFromRequest(r)\n\tctx = utils.PutSelfBaseURLInContext(ctx, selfBaseURL)\n\n\tcommand, err := h.commandLookupFunction(ctx, slackCommand)\n\tif err != nil { \/\/todo it is going to be a nicely formatted slack message sent back to user\n\t\tw.Write([]byte(fmt.Sprintf(\"Unknown command: %s!\", slackCommand.SubCommand)))\n\t\treturn\n\t}\n\n\tresult := command.Handle(ctx, slackCommand)\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(result.Body)\n\n\t\/\/todo: rather defer it\n\tlog.Printf(\"Timer command took %s\", time.Since(now).String())\n}\n\n\/\/ SlackOauth2Redirect handles the OAuth2 redirect from Slack and exchanges the `code` with `accessToken`\n\/\/ https:\/\/api.slack.com\/methods\/oauth.access\nfunc (h *Handlers) SlackOauth2Redirect(w http.ResponseWriter, r *http.Request) {\n\tcode := r.URL.Query().Get(\"code\")\n\tclientID := h.env.Config.UString(\"slack.client_id\")\n\tclientSecret := h.env.Config.UString(\"slack.client_secret\")\n\n\tif code == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"`code` parameter is either missed or blank!\"))\n\t\treturn\n\t}\n\n\toauthResponse, err := h.slackOAuth.GetOAuthResponse(clientID, clientSecret, code)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Got a failure during getting an access token from Slack: %s\", err)\n\t\tlog.Println(msg)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(msg))\n\t\treturn\n\t}\n\n\tteamService := data.NewTeamService(h.mongoSession)\n\n\terr = teamService.CreateOrUpdateWithSlackOAuthResponse(oauthResponse)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Got a failure during creating a team: %s\", err)\n\t\tlog.Println(msg)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(msg))\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n}\n\n\/\/ Health handles a call for app health request\nfunc (h *Handlers) Health(w http.ResponseWriter, r *http.Request) {\n\tuptime := time.Since(h.env.CreatedAt)\n\th.status[\"uptime\"] = uptime.String() \/\/is it good or not if I modify the map here?\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tjson.NewEncoder(w).Encode(h.status)\n}\n\nfunc (h *Handlers) SendSampleMessageFromBot(w http.ResponseWriter, r *http.Request) {\n\n\tteamRepo := data.NewTeamRepository(h.mongoSession)\n\tteam, _ := teamRepo.FindByExternalID(\"T02BC0MM7\")\n\n\taccessToken := team.SlackOAuth.Bot.BotAccessToken\n\tslackAPI := slack.New(accessToken)\n\n\tslackAPI.PostMessage(\"U02BC0MM9\", \"You're about stopping a timer...\", slack.PostMessageParameters{\n\t\tAttachments: []slack.Attachment {\n\t\t\t{\n\t\t\t\tText: \"Would you like to stop the timer?\",\n\t\t\t\tAuthorName: \"Pavlo\",\n\t\t\t\tActions: []slack.AttachmentAction {\n\t\t\t\t\t{\n\t\t\t\t\t\tText: \"Yes, I'd like to stop it\",\n\t\t\t\t\t\tName: \"yes\",\n\t\t\t\t\t\tType: \"button\",\n\t\t\t\t\t\tStyle: \"danger\",\n\t\t\t\t\t\tConfirm: []slack.ConfirmationField {\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tText: \"Are you sure?\",\n\t\t\t\t\t\t\t\tDismissText: \"Cancel\",\n\t\t\t\t\t\t\t\tOkText: \"Yes!\",\n\t\t\t\t\t\t\t\tTitle: \"Are you sure you want to stop the timer?\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tText: \"I am not sure yet\",\n\t\t\t\t\t\tName: \"not sure\",\n\t\t\t\t\t\tType: \"button\",\n\t\t\t\t\t\tStyle: \"default\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tText: \"No, let's keep it!\",\n\t\t\t\t\t\tName: \"no\",\n\t\t\t\t\t\tType: \"button\",\n\t\t\t\t\t\tStyle: \"primary\",\n\t\t\t\t\t},\n\t\t\t\t},\n\n\n\t\t\t},\n\t\t},\n\t})\n}\n\n\/\/ ClearAllData - is supposed to be called by the QA team during early testing stage\nfunc (h *Handlers) ClearAllData(w http.ResponseWriter, r *http.Request) {\n\tsession := h.mongoSession.Clone()\n\tdefer session.Close()\n\tutils.TruncateTables(session)\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tjson.NewEncoder(w).Encode(bson.M{\"success\": true})\n}\n<commit_msg>playing with sending messages back to slack from the backend (AsUser: true)<commit_after>package web\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"gopkg.in\/mgo.v2\"\n\n\t\"context\"\n\n\t\"github.com\/tuna-timer\/tuna-timer-api\/commands\"\n\t\"github.com\/tuna-timer\/tuna-timer-api\/models\"\n\t\"github.com\/tuna-timer\/tuna-timer-api\/utils\"\n\t\"gopkg.in\/mgo.v2\/bson\"\n\t\"log\"\n\t\/\/\"github.com\/tuna-timer\/tuna-timer-api\/data\"\n\t\"github.com\/tuna-timer\/tuna-timer-api\/data\"\n\t\"github.com\/nlopes\/slack\"\n)\n\n\/\/ Handlers is a collection of net\/http handlers to serve the API\ntype Handlers struct {\n\tenv *utils.Environment\n\tmongoSession *mgo.Session\n\tstatus map[string]string\n\tcommandLookupFunction func(ctx context.Context, slackCommand models.SlackCustomCommand) (commands.SlackCustomCommandHandler, error)\n\tslackOAuth SlackOAuth\n}\n\n\/\/ NewHandlers constructs a Handlers collection\nfunc NewHandlers(env *utils.Environment, mongoSession *mgo.Session) *Handlers {\n\treturn &Handlers{\n\t\tenv: env,\n\t\tmongoSession: mongoSession,\n\t\tstatus: map[string]string{\n\t\t\t\"env\": env.Name,\n\t\t\t\"version\": env.AppVersion,\n\t\t},\n\t\tcommandLookupFunction: commands.LookupHandler,\n\t\tslackOAuth: NewSlackOAuth(),\n\t}\n}\n\n\/\/ Timer handles Slack \/timer command\nfunc (h *Handlers) Timer(w http.ResponseWriter, r *http.Request) {\n\tnow := time.Now()\n\n\tslackCommand := models.SlackCustomCommand{\n\t\tChannelID: r.PostFormValue(\"channel_id\"),\n\t\tChannelName: r.PostFormValue(\"channel_name\"),\n\t\tCommand: r.PostFormValue(\"command\"),\n\t\tResponseURL: r.PostFormValue(\"response_url\"),\n\t\tTeamDomain: r.PostFormValue(\"team_domain\"),\n\t\tTeamID: r.PostFormValue(\"team_id\"),\n\t\tText: r.PostFormValue(\"text\"),\n\t\tToken: r.PostFormValue(\"token\"),\n\t\tUserID: r.PostFormValue(\"user_id\"),\n\t\tUserName: r.PostFormValue(\"user_name\"),\n\t}\n\n\tslackCommand = utils.NormalizeSlackCustomCommand(slackCommand)\n\n\tsession := h.mongoSession.Clone()\n\tdefer session.Close()\n\n\tctx := utils.PutMongoSessionInContext(r.Context(), session)\n\n\tselfBaseURL := utils.GetSelfURLFromRequest(r)\n\tctx = utils.PutSelfBaseURLInContext(ctx, selfBaseURL)\n\n\tcommand, err := h.commandLookupFunction(ctx, slackCommand)\n\tif err != nil { \/\/todo it is going to be a nicely formatted slack message sent back to user\n\t\tw.Write([]byte(fmt.Sprintf(\"Unknown command: %s!\", slackCommand.SubCommand)))\n\t\treturn\n\t}\n\n\tresult := command.Handle(ctx, slackCommand)\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.Write(result.Body)\n\n\t\/\/todo: rather defer it\n\tlog.Printf(\"Timer command took %s\", time.Since(now).String())\n}\n\n\/\/ SlackOauth2Redirect handles the OAuth2 redirect from Slack and exchanges the `code` with `accessToken`\n\/\/ https:\/\/api.slack.com\/methods\/oauth.access\nfunc (h *Handlers) SlackOauth2Redirect(w http.ResponseWriter, r *http.Request) {\n\tcode := r.URL.Query().Get(\"code\")\n\tclientID := h.env.Config.UString(\"slack.client_id\")\n\tclientSecret := h.env.Config.UString(\"slack.client_secret\")\n\n\tif code == \"\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"`code` parameter is either missed or blank!\"))\n\t\treturn\n\t}\n\n\toauthResponse, err := h.slackOAuth.GetOAuthResponse(clientID, clientSecret, code)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Got a failure during getting an access token from Slack: %s\", err)\n\t\tlog.Println(msg)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(msg))\n\t\treturn\n\t}\n\n\tteamService := data.NewTeamService(h.mongoSession)\n\n\terr = teamService.CreateOrUpdateWithSlackOAuthResponse(oauthResponse)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Got a failure during creating a team: %s\", err)\n\t\tlog.Println(msg)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(msg))\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n}\n\n\/\/ Health handles a call for app health request\nfunc (h *Handlers) Health(w http.ResponseWriter, r *http.Request) {\n\tuptime := time.Since(h.env.CreatedAt)\n\th.status[\"uptime\"] = uptime.String() \/\/is it good or not if I modify the map here?\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tjson.NewEncoder(w).Encode(h.status)\n}\n\nfunc (h *Handlers) SendSampleMessageFromBot(w http.ResponseWriter, r *http.Request) {\n\n\tteamRepo := data.NewTeamRepository(h.mongoSession)\n\tteam, _ := teamRepo.FindByExternalID(\"T02BC0MM7\")\n\n\taccessToken := team.SlackOAuth.Bot.BotAccessToken\n\tslackAPI := slack.New(accessToken)\n\n\tslackAPI.PostMessage(\"U02BC0MM9\", \"You're about stopping a timer...\", slack.PostMessageParameters{\n\t\tAsUser: true,\n\t\tAttachments: []slack.Attachment {\n\t\t\t{\n\t\t\t\tText: \"Would you like to stop the timer?\",\n\t\t\t\tAuthorName: \"Pavlo\",\n\t\t\t\tActions: []slack.AttachmentAction {\n\t\t\t\t\t{\n\t\t\t\t\t\tText: \"Yes, I'd like to stop it\",\n\t\t\t\t\t\tName: \"yes\",\n\t\t\t\t\t\tType: \"button\",\n\t\t\t\t\t\tStyle: \"danger\",\n\t\t\t\t\t\tConfirm: []slack.ConfirmationField {\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tText: \"Are you sure?\",\n\t\t\t\t\t\t\t\tDismissText: \"Cancel\",\n\t\t\t\t\t\t\t\tOkText: \"Yes!\",\n\t\t\t\t\t\t\t\tTitle: \"Are you sure you want to stop the timer?\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tText: \"I am not sure yet\",\n\t\t\t\t\t\tName: \"not sure\",\n\t\t\t\t\t\tType: \"button\",\n\t\t\t\t\t\tStyle: \"default\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tText: \"No, let's keep it!\",\n\t\t\t\t\t\tName: \"no\",\n\t\t\t\t\t\tType: \"button\",\n\t\t\t\t\t\tStyle: \"primary\",\n\t\t\t\t\t},\n\t\t\t\t},\n\n\n\t\t\t},\n\t\t},\n\t})\n}\n\n\/\/ ClearAllData - is supposed to be called by the QA team during early testing stage\nfunc (h *Handlers) ClearAllData(w http.ResponseWriter, r *http.Request) {\n\tsession := h.mongoSession.Clone()\n\tdefer session.Close()\n\tutils.TruncateTables(session)\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tjson.NewEncoder(w).Encode(bson.M{\"success\": true})\n}\n<|endoftext|>"} {"text":"<commit_before>package bagman_test\n\nimport (\n \"testing\"\n \"fmt\"\n \"os\"\n \"path\/filepath\"\n\t\"encoding\/base64\"\n\t\"crypto\/md5\"\n \"github.com\/APTrust\/bagman\"\n\t\"github.com\/crowdmob\/goamz\/aws\"\n\t\"github.com\/crowdmob\/goamz\/s3\"\n)\n\nvar skipMessagePrinted bool = false\nvar testBucket string = \"aptrust.test\"\nvar testPreservationBucket string = \"aptrust.test.preservation\"\n\n\/\/ Returns true if the AWS environment variables\n\/\/ AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY\n\/\/ are set, false if not.\nfunc awsEnvAvailable() (envVarsOk bool) {\n _, err := aws.EnvAuth()\n return err == nil\n}\n\n\/\/ This prints a message saying S3 integration tests\n\/\/ will be skipped.\nfunc printSkipMessage() {\n if !skipMessagePrinted {\n fmt.Fprintln(os.Stderr,\n \"Skipping S3 integration tests because environment variables \" +\n \"AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are not set.\")\n skipMessagePrinted = true\n }\n}\n\n\/\/ Test that we can get an S3 client.\nfunc TestNewS3Client(t *testing.T) {\n if !awsEnvAvailable() {\n printSkipMessage()\n return\n }\n _, err := bagman.NewS3Client(aws.APNortheast)\n if err != nil {\n t.Error(\"Cannot create S3 client: %v\\n\", err)\n }\n}\n\n\/\/ Test that we can list the contents of an S3 bucket.\n\/\/ TODO: Test listing a bucket with >1000 items.\nfunc TestListBucket(t *testing.T) {\n if !awsEnvAvailable() {\n printSkipMessage()\n return\n }\n s3Client, err := bagman.NewS3Client(aws.USEast)\n if err != nil {\n t.Error(\"Cannot create S3 client: %v\\n\", err)\n }\n keys, err := s3Client.ListBucket(testBucket, 20)\n if err != nil {\n t.Error(\"Cannot get list of S3 bucket contents: %v\\n\", err)\n }\n if len(keys) < 1 {\n t.Error(\"ListBucket returned empty list\")\n }\n}\n\n\/\/ Test that we can save an S3 file to the local filesystem,\n\/\/ and that the data in the FetchResult is what we expect.\n\/\/ TODO: Test case where md5 sum does not match.\n\/\/ TODO: Test case where md5 sum cannot be verified.\nfunc TestFetchToFile(t *testing.T) {\n if !awsEnvAvailable() {\n printSkipMessage()\n return\n }\n s3Client, err := bagman.NewS3Client(aws.USEast)\n if err != nil {\n t.Error(\"Cannot create S3 client: %v\\n\", err)\n }\n keys, err := s3Client.ListBucket(testBucket, 20)\n if len(keys) < 1 {\n t.Error(\"ListBucket returned empty list\")\n }\n\n var keyToFetch s3.Key\n for _, key := range(keys) {\n if key.Key == \"sample_good.tar\" {\n keyToFetch = key\n break\n }\n }\n if &keyToFetch == nil {\n t.Error(\"Can't run s3 fetch test because aptrust.test\/sample_good.tar is missing\")\n }\n\n \/\/ Fetch the first file from the test bucket and store\n \/\/ it in the testdata directory. Note that testDataPath\n \/\/ is defined in bag_test.go, which is part of the\n \/\/ bagman_test package.\n outputDir := filepath.Join(testDataPath, \"tmp\")\n os.MkdirAll(outputDir, 0755)\n outputFile := filepath.Join(outputDir, keyToFetch.Key)\n outputFileAbs, _ := filepath.Abs(outputFile)\n result := s3Client.FetchToFile(testBucket, keyToFetch, outputFile)\n defer os.Remove(filepath.Join(outputDir, keyToFetch.Key))\n if result.ErrorMessage != \"\" {\n t.Error(\"FetchToFile returned an error: %s\", result.ErrorMessage)\n }\n if result.BucketName != testBucket {\n t.Error(\"Expected bucket name %s, got %s\", testBucket, result.BucketName)\n }\n if result.Key != keyToFetch.Key {\n t.Error(\"Expected key name %s, got %s\", keyToFetch.Key, result.Key)\n }\n if result.LocalTarFile != outputFileAbs {\n t.Error(\"Expected local file name %s, got %s\",\n outputFileAbs, result.LocalTarFile)\n }\n if result.RemoteMd5 != \"22ecc8c4146ad65bd0f9ddb0db32e8b9\" {\n t.Error(\"Expected remote md5 sum %s, got %s\",\n \"22ecc8c4146ad65bd0f9ddb0db32e8b9\", result.RemoteMd5)\n }\n if result.LocalMd5 != \"22ecc8c4146ad65bd0f9ddb0db32e8b9\" {\n t.Error(\"Expected local md5 sum %s, got %s\",\n \"22ecc8c4146ad65bd0f9ddb0db32e8b9\", result.LocalMd5)\n }\n if result.Md5Verified == false {\n t.Error(\"md5 sum should have been verified but was not\")\n }\n if result.Md5Verifiable == false {\n t.Error(\"md5 sum incorrectly marked as not verifiable\")\n }\n if result.Warning != \"\" {\n t.Error(\"Fetch result returned warning: %s\", result.Warning)\n }\n \/\/ Retry should be true, unless file does not exist.\n if result.Retry == false {\n t.Error(\"Fetch result retry was false, but should be true.\")\n }\n}\n\nfunc TestFetchNonExistentFile(t *testing.T) {\n if !awsEnvAvailable() {\n printSkipMessage()\n return\n }\n s3Client, err := bagman.NewS3Client(aws.USEast)\n if err != nil {\n t.Error(\"Cannot create S3 client: %v\\n\", err)\n }\n keys, err := s3Client.ListBucket(testBucket, 20)\n if len(keys) < 1 {\n t.Error(\"ListBucket returned empty list\")\n }\n \/\/ trickery!\n keys[0].Key = \"non_existent_file.tar\"\n outputDir := filepath.Join(testDataPath, \"tmp\")\n os.MkdirAll(outputDir, 0755)\n outputFile := filepath.Join(outputDir, keys[0].Key)\n result := s3Client.FetchToFile(testBucket, keys[0], outputFile)\n\n \/\/ Make sure we have the bucket name and file name, because we\n \/\/ want to know what we failed to fetch.\n if result.BucketName != testBucket {\n t.Error(\"Expected bucket name %s, got %s\", testBucket, result.BucketName)\n }\n if result.Key != keys[0].Key {\n t.Error(\"Expected key name %s, got %s\", keys[0].Key, result.Key)\n }\n if result.ErrorMessage == \"\" {\n t.Error(\"FetchToFile should have returned a 'not found' error, but did not.\")\n }\n if result.ErrorMessage != \"Error retrieving file from receiving bucket: The specified key does not exist.\" {\n t.Error(\"Got unexpected error message: %v\", result.ErrorMessage)\n }\n \/\/ Retry should be false, because file does not exist and we don't\n \/\/ want to waste any more time on it.\n if result.Retry == true {\n t.Error(\"Fetch result retry was true, but should be false.\")\n }\n}\n\nfunc TestSaveToS3(t *testing.T) {\n if !awsEnvAvailable() {\n printSkipMessage()\n return\n }\n\t\/\/ Copy this file from the testdata directory to the\n\t\/\/ test preservation bucket.\n\terr := SaveToS3(\"sample_good.tar\", testPreservationBucket)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestGetKey(t *testing.T) {\n if !awsEnvAvailable() {\n printSkipMessage()\n return\n }\n s3Client, err := bagman.NewS3Client(aws.USEast)\n if err != nil {\n t.Error(\"Cannot create S3 client: %v\\n\", err)\n }\n key, err := s3Client.GetKey(testPreservationBucket, \"sample_good.tar\")\n if err != nil {\n t.Error(err)\n }\n\tif key == nil {\n\t\tt.Error(\"s3Client.GetKey returned nil\")\n\t\treturn\n\t}\n expectedETag := \"\\\"7d5c7c1727fd538888f3eb89658abfdf\\\"\"\n if key.ETag != expectedETag {\n t.Errorf(\"Expected ETag %s, got %s\", expectedETag, key.ETag)\n }\n if key.Size != int64(23552) {\n t.Errorf(\"Expected Size %d, got %d\", int64(23552), key.Size)\n }\n}\n\nfunc TestDeleteFromS3(t *testing.T) {\n if !awsEnvAvailable() {\n printSkipMessage()\n return\n }\n\t\/\/ Make sure we have a file there to delete.\n\terr := SaveToS3(\"sample_good.tar\", testPreservationBucket)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\t\/\/ Now make sure the delete function works.\n s3Client, err := bagman.NewS3Client(aws.USEast)\n if err != nil {\n t.Error(\"Cannot create S3 client: %v\\n\", err)\n }\n\terr = s3Client.Delete(testPreservationBucket, \"test_file.tar\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n\n\/\/ Copies localFile to bucketName on S3. localFile is assumed\n\/\/ to be inside the testdata directory.\nfunc SaveToS3(localFile, bucketName string) (error) {\n s3Client, err := bagman.NewS3Client(aws.USEast)\n if err != nil {\n return fmt.Errorf(\"Cannot create S3 client: %v\\n\", err)\n }\n bagmanHome, err := bagman.BagmanHome()\n if err != nil {\n return err\n }\n path := filepath.Join(bagmanHome, \"testdata\", localFile)\n file, err := os.Open(path)\n if err != nil {\n return fmt.Errorf(\"Error opening local test file: %v\", err)\n }\n defer file.Close()\n fileInfo, err := file.Stat()\n if err != nil {\n return fmt.Errorf(\"Can't stat local test file: %v\", err)\n }\n\tfileBytes := make([]byte, fileInfo.Size())\n\t_, _ = file.Read(fileBytes)\n\t_, _ = file.Seek(0, 0)\n\tmd5Bytes := md5.Sum(fileBytes)\n\tbase64md5 := base64.StdEncoding.EncodeToString(md5Bytes[:])\n\toptions := s3Client.MakeOptions(base64md5, nil)\n url, err := s3Client.SaveToS3(bucketName, localFile,\n \"application\/binary\", file, fileInfo.Size(), options)\n if err != nil {\n return err\n }\n\texpectedUrl := fmt.Sprintf(\"https:\/\/s3.amazonaws.com\/%s\/%s\",\n\t\tbucketName, localFile)\n\tif url != expectedUrl {\n\t\treturn fmt.Errorf(\"Expected url '%s' but got '%s'\", expectedUrl, url)\n\t}\n\treturn nil\n}\n\nfunc TestSaveLargeFileToS3(t *testing.T) {\n if !awsEnvAvailable() {\n printSkipMessage()\n return\n }\n\n\t\/\/ Copy this local file to remote bucket.\n\tlocalFile := \"multi_mb_test_bag.tar\"\n\tbucketName := testPreservationBucket\n\n\n bagmanHome, err := bagman.BagmanHome()\n if err != nil {\n t.Error(err)\n }\n path := filepath.Join(bagmanHome, \"testdata\", localFile)\n\n\t\/\/ Our multi-megabyte test file is not in the github repo\n\t\/\/ and we don't want to perform this test all the time anyway.\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\tfmt.Printf(\"Skipping TestSaveLargeFileToS3 because test file \" +\n\t\t\t\"%s does not exist\", path)\n\t\treturn\n\t}\n\n\n s3Client, err := bagman.NewS3Client(aws.USEast)\n if err != nil {\n t.Errorf(\"Cannot create S3 client: %v\\n\", err)\n }\n\n\t\/\/ Delete the file if it's already there.\n\t_ = s3Client.Delete(bucketName, localFile)\n\n file, err := os.Open(path)\n if err != nil {\n t.Errorf(\"Error opening local test file: %v\", err)\n }\n defer file.Close()\n fileInfo, err := file.Stat()\n if err != nil {\n t.Errorf(\"Can't stat local test file: %v\", err)\n }\n\tfileBytes := make([]byte, fileInfo.Size())\n\t_, _ = file.Read(fileBytes)\n\t_, _ = file.Seek(0, 0)\n\tmd5Bytes := md5.Sum(fileBytes)\n\tbase64md5 := base64.StdEncoding.EncodeToString(md5Bytes[:])\n\n\ts3Metadata := make(map[string][]string)\n\ts3Metadata[\"md5\"] = []string{ \"Test12345678\" }\n\ts3Metadata[\"institution\"] = []string{ \"aptrust.org\" }\n\ts3Metadata[\"bag\"] = []string{ \"test_bag\" }\n\ts3Metadata[\"bagpath\"] = []string{ \"data\/test_file.pdf\" }\n\n\toptions := s3Client.MakeOptions(base64md5, s3Metadata)\n\n\t\/\/ Send the file up in 6mb chunks.\n url, err := s3Client.SaveLargeFileToS3(bucketName, localFile,\n \"application\/binary\", file, fileInfo.Size(), options, int64(6000000))\n if err != nil {\n t.Error(err)\n }\n\n\texpectedUrl := fmt.Sprintf(\"https:\/\/s3.amazonaws.com\/%s\/%s\",\n\t\tbucketName, localFile)\n\tif url != expectedUrl {\n\t\tt.Errorf(\"Expected url '%s' but got '%s'\", expectedUrl, url)\n\t}\n}\n<commit_msg>No changes<commit_after>package bagman_test\n\nimport (\n \"testing\"\n \"fmt\"\n \"os\"\n \"path\/filepath\"\n\t\"encoding\/base64\"\n\t\"crypto\/md5\"\n \"github.com\/APTrust\/bagman\"\n\t\"github.com\/crowdmob\/goamz\/aws\"\n\t\"github.com\/crowdmob\/goamz\/s3\"\n)\n\nvar skipMessagePrinted bool = false\nvar testBucket string = \"aptrust.test\"\nvar testPreservationBucket string = \"aptrust.test.preservation\"\n\n\/\/ Returns true if the AWS environment variables\n\/\/ AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY\n\/\/ are set, false if not.\nfunc awsEnvAvailable() (envVarsOk bool) {\n _, err := aws.EnvAuth()\n return err == nil\n}\n\n\/\/ This prints a message saying S3 integration tests\n\/\/ will be skipped.\nfunc printSkipMessage() {\n if !skipMessagePrinted {\n fmt.Fprintln(os.Stderr,\n \"Skipping S3 integration tests because environment variables \" +\n \"AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are not set.\")\n skipMessagePrinted = true\n }\n}\n\n\/\/ Test that we can get an S3 client.\nfunc TestNewS3Client(t *testing.T) {\n if !awsEnvAvailable() {\n printSkipMessage()\n return\n }\n _, err := bagman.NewS3Client(aws.APNortheast)\n if err != nil {\n t.Error(\"Cannot create S3 client: %v\\n\", err)\n }\n}\n\n\/\/ Test that we can list the contents of an S3 bucket.\n\/\/ TODO: Test listing a bucket with >1000 items.\nfunc TestListBucket(t *testing.T) {\n if !awsEnvAvailable() {\n printSkipMessage()\n return\n }\n s3Client, err := bagman.NewS3Client(aws.USEast)\n if err != nil {\n t.Error(\"Cannot create S3 client: %v\\n\", err)\n }\n keys, err := s3Client.ListBucket(testBucket, 20)\n if err != nil {\n t.Error(\"Cannot get list of S3 bucket contents: %v\\n\", err)\n }\n if len(keys) < 1 {\n t.Error(\"ListBucket returned empty list\")\n }\n}\n\n\/\/ Test that we can save an S3 file to the local filesystem,\n\/\/ and that the data in the FetchResult is what we expect.\n\/\/ TODO: Test case where md5 sum does not match.\n\/\/ TODO: Test case where md5 sum cannot be verified.\nfunc TestFetchToFile(t *testing.T) {\n if !awsEnvAvailable() {\n printSkipMessage()\n return\n }\n s3Client, err := bagman.NewS3Client(aws.USEast)\n if err != nil {\n t.Error(\"Cannot create S3 client: %v\\n\", err)\n }\n keys, err := s3Client.ListBucket(testBucket, 20)\n if len(keys) < 1 {\n t.Error(\"ListBucket returned empty list\")\n }\n\n var keyToFetch s3.Key\n for _, key := range(keys) {\n if key.Key == \"sample_good.tar\" {\n keyToFetch = key\n break\n }\n }\n if &keyToFetch == nil {\n t.Error(\"Can't run s3 fetch test because aptrust.test\/sample_good.tar is missing\")\n }\n\n \/\/ Fetch the first file from the test bucket and store\n \/\/ it in the testdata directory. Note that testDataPath\n \/\/ is defined in bag_test.go, which is part of the\n \/\/ bagman_test package.\n outputDir := filepath.Join(testDataPath, \"tmp\")\n os.MkdirAll(outputDir, 0755)\n outputFile := filepath.Join(outputDir, keyToFetch.Key)\n outputFileAbs, _ := filepath.Abs(outputFile)\n result := s3Client.FetchToFile(testBucket, keyToFetch, outputFile)\n defer os.Remove(filepath.Join(outputDir, keyToFetch.Key))\n if result.ErrorMessage != \"\" {\n t.Error(\"FetchToFile returned an error: %s\", result.ErrorMessage)\n }\n if result.BucketName != testBucket {\n t.Error(\"Expected bucket name %s, got %s\", testBucket, result.BucketName)\n }\n if result.Key != keyToFetch.Key {\n t.Error(\"Expected key name %s, got %s\", keyToFetch.Key, result.Key)\n }\n if result.LocalTarFile != outputFileAbs {\n t.Error(\"Expected local file name %s, got %s\",\n outputFileAbs, result.LocalTarFile)\n }\n if result.RemoteMd5 != \"22ecc8c4146ad65bd0f9ddb0db32e8b9\" {\n t.Error(\"Expected remote md5 sum %s, got %s\",\n \"22ecc8c4146ad65bd0f9ddb0db32e8b9\", result.RemoteMd5)\n }\n if result.LocalMd5 != \"22ecc8c4146ad65bd0f9ddb0db32e8b9\" {\n t.Error(\"Expected local md5 sum %s, got %s\",\n \"22ecc8c4146ad65bd0f9ddb0db32e8b9\", result.LocalMd5)\n }\n if result.Md5Verified == false {\n t.Error(\"md5 sum should have been verified but was not\")\n }\n if result.Md5Verifiable == false {\n t.Error(\"md5 sum incorrectly marked as not verifiable\")\n }\n if result.Warning != \"\" {\n t.Error(\"Fetch result returned warning: %s\", result.Warning)\n }\n \/\/ Retry should be true, unless file does not exist.\n if result.Retry == false {\n t.Error(\"Fetch result retry was false, but should be true.\")\n }\n}\n\nfunc TestFetchNonExistentFile(t *testing.T) {\n if !awsEnvAvailable() {\n printSkipMessage()\n return\n }\n s3Client, err := bagman.NewS3Client(aws.USEast)\n if err != nil {\n t.Error(\"Cannot create S3 client: %v\\n\", err)\n }\n keys, err := s3Client.ListBucket(testBucket, 20)\n if len(keys) < 1 {\n t.Error(\"ListBucket returned empty list\")\n }\n \/\/ trickery!\n keys[0].Key = \"non_existent_file.tar\"\n outputDir := filepath.Join(testDataPath, \"tmp\")\n os.MkdirAll(outputDir, 0755)\n outputFile := filepath.Join(outputDir, keys[0].Key)\n result := s3Client.FetchToFile(testBucket, keys[0], outputFile)\n\n \/\/ Make sure we have the bucket name and file name, because we\n \/\/ want to know what we failed to fetch.\n if result.BucketName != testBucket {\n t.Error(\"Expected bucket name %s, got %s\", testBucket, result.BucketName)\n }\n if result.Key != keys[0].Key {\n t.Error(\"Expected key name %s, got %s\", keys[0].Key, result.Key)\n }\n if result.ErrorMessage == \"\" {\n t.Error(\"FetchToFile should have returned a 'not found' error, but did not.\")\n }\n if result.ErrorMessage != \"Error retrieving file from receiving bucket: The specified key does not exist.\" {\n t.Error(\"Got unexpected error message: %v\", result.ErrorMessage)\n }\n \/\/ Retry should be false, because file does not exist and we don't\n \/\/ want to waste any more time on it.\n if result.Retry == true {\n t.Error(\"Fetch result retry was true, but should be false.\")\n }\n}\n\nfunc TestSaveToS3(t *testing.T) {\n if !awsEnvAvailable() {\n printSkipMessage()\n return\n }\n\t\/\/ Copy this file from the testdata directory to the\n\t\/\/ test preservation bucket.\n\terr := SaveToS3(\"sample_good.tar\", testPreservationBucket)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestGetKey(t *testing.T) {\n if !awsEnvAvailable() {\n printSkipMessage()\n return\n }\n s3Client, err := bagman.NewS3Client(aws.USEast)\n if err != nil {\n t.Error(\"Cannot create S3 client: %v\\n\", err)\n }\n key, err := s3Client.GetKey(testPreservationBucket, \"sample_good.tar\")\n if err != nil {\n t.Error(err)\n }\n\tif key == nil {\n\t\tt.Error(\"s3Client.GetKey returned nil\")\n\t\treturn\n\t}\n expectedETag := \"\\\"7d5c7c1727fd538888f3eb89658abfdf\\\"\"\n if key.ETag != expectedETag {\n t.Errorf(\"Expected ETag %s, got %s\", expectedETag, key.ETag)\n }\n if key.Size != int64(23552) {\n t.Errorf(\"Expected Size %d, got %d\", int64(23552), key.Size)\n }\n}\n\nfunc TestDeleteFromS3(t *testing.T) {\n if !awsEnvAvailable() {\n printSkipMessage()\n return\n }\n\t\/\/ Make sure we have a file there to delete.\n\terr := SaveToS3(\"sample_good.tar\", testPreservationBucket)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\t\/\/ Now make sure the delete function works.\n s3Client, err := bagman.NewS3Client(aws.USEast)\n if err != nil {\n t.Error(\"Cannot create S3 client: %v\\n\", err)\n }\n\terr = s3Client.Delete(testPreservationBucket, \"test_file.tar\")\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}\n\n\/\/ Copies localFile to bucketName on S3. localFile is assumed\n\/\/ to be inside the testdata directory.\nfunc SaveToS3(localFile, bucketName string) (error) {\n s3Client, err := bagman.NewS3Client(aws.USEast)\n if err != nil {\n return fmt.Errorf(\"Cannot create S3 client: %v\\n\", err)\n }\n bagmanHome, err := bagman.BagmanHome()\n if err != nil {\n return err\n }\n path := filepath.Join(bagmanHome, \"testdata\", localFile)\n file, err := os.Open(path)\n if err != nil {\n return fmt.Errorf(\"Error opening local test file: %v\", err)\n }\n defer file.Close()\n fileInfo, err := file.Stat()\n if err != nil {\n return fmt.Errorf(\"Can't stat local test file: %v\", err)\n }\n\tfileBytes := make([]byte, fileInfo.Size())\n\t_, _ = file.Read(fileBytes)\n\t_, _ = file.Seek(0, 0)\n\tmd5Bytes := md5.Sum(fileBytes)\n\tbase64md5 := base64.StdEncoding.EncodeToString(md5Bytes[:])\n\toptions := s3Client.MakeOptions(base64md5, nil)\n url, err := s3Client.SaveToS3(bucketName, localFile,\n \"application\/binary\", file, fileInfo.Size(), options)\n if err != nil {\n return err\n }\n\texpectedUrl := fmt.Sprintf(\"https:\/\/s3.amazonaws.com\/%s\/%s\",\n\t\tbucketName, localFile)\n\tif url != expectedUrl {\n\t\treturn fmt.Errorf(\"Expected url '%s' but got '%s'\", expectedUrl, url)\n\t}\n\treturn nil\n}\n\nfunc TestSaveLargeFileToS3(t *testing.T) {\n if !awsEnvAvailable() {\n printSkipMessage()\n return\n }\n\n\t\/\/ Copy this local file to remote bucket.\n\tlocalFile := \"multi_mb_test_bag.tar\"\n\tbucketName := testPreservationBucket\n\n bagmanHome, err := bagman.BagmanHome()\n if err != nil {\n t.Error(err)\n }\n path := filepath.Join(bagmanHome, \"testdata\", localFile)\n\n\t\/\/ Our multi-megabyte test file is not in the github repo\n\t\/\/ and we don't want to perform this test all the time anyway.\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\tfmt.Printf(\"Skipping TestSaveLargeFileToS3 because test file \" +\n\t\t\t\"%s does not exist\", path)\n\t\treturn\n\t}\n\n s3Client, err := bagman.NewS3Client(aws.USEast)\n if err != nil {\n t.Errorf(\"Cannot create S3 client: %v\\n\", err)\n }\n\n\t\/\/ Delete the file if it's already there.\n\t_ = s3Client.Delete(bucketName, localFile)\n\n file, err := os.Open(path)\n if err != nil {\n t.Errorf(\"Error opening local test file: %v\", err)\n }\n defer file.Close()\n fileInfo, err := file.Stat()\n if err != nil {\n t.Errorf(\"Can't stat local test file: %v\", err)\n }\n\tfileBytes := make([]byte, fileInfo.Size())\n\t_, _ = file.Read(fileBytes)\n\t_, _ = file.Seek(0, 0)\n\tmd5Bytes := md5.Sum(fileBytes)\n\tbase64md5 := base64.StdEncoding.EncodeToString(md5Bytes[:])\n\n\ts3Metadata := make(map[string][]string)\n\ts3Metadata[\"md5\"] = []string{ \"Test12345678\" }\n\ts3Metadata[\"institution\"] = []string{ \"aptrust.org\" }\n\ts3Metadata[\"bag\"] = []string{ \"test_bag\" }\n\ts3Metadata[\"bagpath\"] = []string{ \"data\/test_file.pdf\" }\n\n\toptions := s3Client.MakeOptions(base64md5, s3Metadata)\n\n\t\/\/ Send the file up in 6mb chunks.\n url, err := s3Client.SaveLargeFileToS3(bucketName, localFile,\n \"application\/binary\", file, fileInfo.Size(), options, int64(6000000))\n if err != nil {\n t.Error(err)\n }\n\n\texpectedUrl := fmt.Sprintf(\"https:\/\/s3.amazonaws.com\/%s\/%s\",\n\t\tbucketName, localFile)\n\tif url != expectedUrl {\n\t\tt.Errorf(\"Expected url '%s' but got '%s'\", expectedUrl, url)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package kv\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n\tabci \"github.com\/tendermint\/abci\/types\"\n\t\"github.com\/tendermint\/tendermint\/state\/txindex\"\n\t\"github.com\/tendermint\/tendermint\/types\"\n\tdb \"github.com\/tendermint\/tmlibs\/db\"\n\t\"github.com\/tendermint\/tmlibs\/pubsub\/query\"\n)\n\nfunc TestTxIndex(t *testing.T) {\n\tindexer := NewTxIndex(db.NewMemDB())\n\n\ttx := types.Tx(\"HELLO WORLD\")\n\ttxResult := &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeType_OK, Log: \"\", Tags: []*abci.KVPair{}}}\n\thash := tx.Hash()\n\n\tbatch := txindex.NewBatch(1)\n\tif err := batch.Add(txResult); err != nil {\n\t\tt.Error(err)\n\t}\n\terr := indexer.AddBatch(batch)\n\trequire.NoError(t, err)\n\n\tloadedTxResult, err := indexer.Get(hash)\n\trequire.NoError(t, err)\n\tassert.Equal(t, txResult, loadedTxResult)\n\n\ttx2 := types.Tx(\"BYE BYE WORLD\")\n\ttxResult2 := &types.TxResult{1, 0, tx2, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeType_OK, Log: \"\", Tags: []*abci.KVPair{}}}\n\thash2 := tx2.Hash()\n\n\terr = indexer.Index(txResult2)\n\trequire.NoError(t, err)\n\n\tloadedTxResult2, err := indexer.Get(hash2)\n\trequire.NoError(t, err)\n\tassert.Equal(t, txResult2, loadedTxResult2)\n}\n\nfunc TestTxSearch(t *testing.T) {\n\tallowedTags := []string{\"account.number\", \"account.owner\", \"account.date\"}\n\tindexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags))\n\n\ttx := types.Tx(\"HELLO WORLD\")\n\ttags := []*abci.KVPair{\n\t\t{Key: \"account.number\", ValueType: abci.KVPair_INT, ValueInt: 1},\n\t\t{Key: \"account.owner\", ValueType: abci.KVPair_STRING, ValueString: \"Ivan\"},\n\t\t{Key: \"not_allowed\", ValueType: abci.KVPair_STRING, ValueString: \"Vlad\"},\n\t}\n\ttxResult := &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeType_OK, Log: \"\", Tags: tags}}\n\thash := tx.Hash()\n\n\terr := indexer.Index(txResult)\n\trequire.NoError(t, err)\n\n\ttestCases := []struct {\n\t\tq string\n\t\tresultsLength int\n\t}{\n\t\t\/\/ search by hash\n\t\t{fmt.Sprintf(\"tx.hash = '%X'\", hash), 1},\n\t\t\/\/ search by exact match (one tag)\n\t\t{\"account.number = 1\", 1},\n\t\t\/\/ search by exact match (two tags)\n\t\t{\"account.number = 1 AND account.owner = 'Ivan'\", 1},\n\t\t\/\/ search by exact match (two tags)\n\t\t{\"account.number = 1 AND account.owner = 'Vlad'\", 0},\n\t\t\/\/ search by range\n\t\t{\"account.number >= 1 AND account.number <= 5\", 1},\n\t\t\/\/ search by range (lower bound)\n\t\t{\"account.number >= 1\", 1},\n\t\t\/\/ search by range (upper bound)\n\t\t{\"account.number <= 5\", 1},\n\t\t\/\/ search using not allowed tag\n\t\t{\"not_allowed = 'boom'\", 0},\n\t\t\/\/ search for not existing tx result\n\t\t{\"account.number >= 2 AND account.number <= 5\", 0},\n\t\t\/\/ search using not existing tag\n\t\t{\"account.date >= TIME 2013-05-03T14:45:00Z\", 0},\n\t\t\/\/ search using CONTAINS\n\t\t{\"account.owner CONTAINS 'an'\", 1},\n\t\t\/\/ search using CONTAINS\n\t\t{\"account.owner CONTAINS 'Vlad'\", 0},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.q, func(t *testing.T) {\n\t\t\tresults, err := indexer.Search(query.MustParse(tc.q))\n\t\t\tassert.NoError(t, err)\n\n\t\t\tassert.Len(t, results, tc.resultsLength)\n\t\t\tif tc.resultsLength > 0 {\n\t\t\t\tassert.Equal(t, []*types.TxResult{txResult}, results)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) {\n\tallowedTags := []string{\"account.number\"}\n\tindexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags))\n\n\ttx := types.Tx(\"SAME MULTIPLE TAGS WITH DIFFERENT VALUES\")\n\ttags := []*abci.KVPair{\n\t\t{Key: \"account.number\", ValueType: abci.KVPair_INT, ValueInt: 1},\n\t\t{Key: \"account.number\", ValueType: abci.KVPair_INT, ValueInt: 2},\n\t}\n\ttxResult := &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeType_OK, Log: \"\", Tags: tags}}\n\n\terr := indexer.Index(txResult)\n\trequire.NoError(t, err)\n\n\tresults, err := indexer.Search(query.MustParse(\"account.number >= 1\"))\n\tassert.NoError(t, err)\n\n\tassert.Len(t, results, 1)\n\tassert.Equal(t, []*types.TxResult{txResult}, results)\n}\n\nfunc benchmarkTxIndex(txsCount int, b *testing.B) {\n\ttx := types.Tx(\"HELLO WORLD\")\n\ttxResult := &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeType_OK, Log: \"\", Tags: []*abci.KVPair{}}}\n\n\tdir, err := ioutil.TempDir(\"\", \"tx_index_db\")\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\tdefer os.RemoveAll(dir) \/\/ nolint: errcheck\n\n\tstore := db.NewDB(\"tx_index\", \"leveldb\", dir)\n\tindexer := NewTxIndex(store)\n\n\tbatch := txindex.NewBatch(txsCount)\n\tfor i := 0; i < txsCount; i++ {\n\t\tif err := batch.Add(txResult); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\ttxResult.Index += 1\n\t}\n\n\tb.ResetTimer()\n\n\tfor n := 0; n < b.N; n++ {\n\t\terr = indexer.AddBatch(batch)\n\t}\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n}\n\nfunc BenchmarkTxIndex1(b *testing.B) { benchmarkTxIndex(1, b) }\nfunc BenchmarkTxIndex500(b *testing.B) { benchmarkTxIndex(500, b) }\nfunc BenchmarkTxIndex1000(b *testing.B) { benchmarkTxIndex(1000, b) }\nfunc BenchmarkTxIndex2000(b *testing.B) { benchmarkTxIndex(2000, b) }\nfunc BenchmarkTxIndex10000(b *testing.B) { benchmarkTxIndex(10000, b) }\n<commit_msg>TestIndexAllTags (unit)<commit_after>package kv\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n\tabci \"github.com\/tendermint\/abci\/types\"\n\t\"github.com\/tendermint\/tendermint\/state\/txindex\"\n\t\"github.com\/tendermint\/tendermint\/types\"\n\tdb \"github.com\/tendermint\/tmlibs\/db\"\n\t\"github.com\/tendermint\/tmlibs\/pubsub\/query\"\n)\n\nfunc TestTxIndex(t *testing.T) {\n\tindexer := NewTxIndex(db.NewMemDB())\n\n\ttx := types.Tx(\"HELLO WORLD\")\n\ttxResult := &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeType_OK, Log: \"\", Tags: []*abci.KVPair{}}}\n\thash := tx.Hash()\n\n\tbatch := txindex.NewBatch(1)\n\tif err := batch.Add(txResult); err != nil {\n\t\tt.Error(err)\n\t}\n\terr := indexer.AddBatch(batch)\n\trequire.NoError(t, err)\n\n\tloadedTxResult, err := indexer.Get(hash)\n\trequire.NoError(t, err)\n\tassert.Equal(t, txResult, loadedTxResult)\n\n\ttx2 := types.Tx(\"BYE BYE WORLD\")\n\ttxResult2 := &types.TxResult{1, 0, tx2, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeType_OK, Log: \"\", Tags: []*abci.KVPair{}}}\n\thash2 := tx2.Hash()\n\n\terr = indexer.Index(txResult2)\n\trequire.NoError(t, err)\n\n\tloadedTxResult2, err := indexer.Get(hash2)\n\trequire.NoError(t, err)\n\tassert.Equal(t, txResult2, loadedTxResult2)\n}\n\nfunc TestTxSearch(t *testing.T) {\n\tallowedTags := []string{\"account.number\", \"account.owner\", \"account.date\"}\n\tindexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags))\n\n\ttxResult := txResultWithTags([]*abci.KVPair{\n\t\t{Key: \"account.number\", ValueType: abci.KVPair_INT, ValueInt: 1},\n\t\t{Key: \"account.owner\", ValueType: abci.KVPair_STRING, ValueString: \"Ivan\"},\n\t\t{Key: \"not_allowed\", ValueType: abci.KVPair_STRING, ValueString: \"Vlad\"},\n\t})\n\thash := txResult.Tx.Hash()\n\n\terr := indexer.Index(txResult)\n\trequire.NoError(t, err)\n\n\ttestCases := []struct {\n\t\tq string\n\t\tresultsLength int\n\t}{\n\t\t\/\/ search by hash\n\t\t{fmt.Sprintf(\"tx.hash = '%X'\", hash), 1},\n\t\t\/\/ search by exact match (one tag)\n\t\t{\"account.number = 1\", 1},\n\t\t\/\/ search by exact match (two tags)\n\t\t{\"account.number = 1 AND account.owner = 'Ivan'\", 1},\n\t\t\/\/ search by exact match (two tags)\n\t\t{\"account.number = 1 AND account.owner = 'Vlad'\", 0},\n\t\t\/\/ search by range\n\t\t{\"account.number >= 1 AND account.number <= 5\", 1},\n\t\t\/\/ search by range (lower bound)\n\t\t{\"account.number >= 1\", 1},\n\t\t\/\/ search by range (upper bound)\n\t\t{\"account.number <= 5\", 1},\n\t\t\/\/ search using not allowed tag\n\t\t{\"not_allowed = 'boom'\", 0},\n\t\t\/\/ search for not existing tx result\n\t\t{\"account.number >= 2 AND account.number <= 5\", 0},\n\t\t\/\/ search using not existing tag\n\t\t{\"account.date >= TIME 2013-05-03T14:45:00Z\", 0},\n\t\t\/\/ search using CONTAINS\n\t\t{\"account.owner CONTAINS 'an'\", 1},\n\t\t\/\/ search using CONTAINS\n\t\t{\"account.owner CONTAINS 'Vlad'\", 0},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.q, func(t *testing.T) {\n\t\t\tresults, err := indexer.Search(query.MustParse(tc.q))\n\t\t\tassert.NoError(t, err)\n\n\t\t\tassert.Len(t, results, tc.resultsLength)\n\t\t\tif tc.resultsLength > 0 {\n\t\t\t\tassert.Equal(t, []*types.TxResult{txResult}, results)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) {\n\tallowedTags := []string{\"account.number\"}\n\tindexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags))\n\n\ttxResult := txResultWithTags([]*abci.KVPair{\n\t\t{Key: \"account.number\", ValueType: abci.KVPair_INT, ValueInt: 1},\n\t\t{Key: \"account.number\", ValueType: abci.KVPair_INT, ValueInt: 2},\n\t})\n\n\terr := indexer.Index(txResult)\n\trequire.NoError(t, err)\n\n\tresults, err := indexer.Search(query.MustParse(\"account.number >= 1\"))\n\tassert.NoError(t, err)\n\n\tassert.Len(t, results, 1)\n\tassert.Equal(t, []*types.TxResult{txResult}, results)\n}\n\nfunc TestIndexAllTags(t *testing.T) {\n\tindexer := NewTxIndex(db.NewMemDB(), IndexAllTags())\n\n\ttxResult := txResultWithTags([]*abci.KVPair{\n\t\tabci.KVPairString(\"account.owner\", \"Ivan\"),\n\t\tabci.KVPairInt(\"account.number\", 1),\n\t})\n\n\terr := indexer.Index(txResult)\n\trequire.NoError(t, err)\n\n\tresults, err := indexer.Search(query.MustParse(\"account.number >= 1\"))\n\tassert.NoError(t, err)\n\tassert.Len(t, results, 1)\n\tassert.Equal(t, []*types.TxResult{txResult}, results)\n\n\tresults, err = indexer.Search(query.MustParse(\"account.owner = 'Ivan'\"))\n\tassert.NoError(t, err)\n\tassert.Len(t, results, 1)\n\tassert.Equal(t, []*types.TxResult{txResult}, results)\n}\n\nfunc txResultWithTags(tags []*abci.KVPair) *types.TxResult {\n\ttx := types.Tx(\"HELLO WORLD\")\n\treturn &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeType_OK, Log: \"\", Tags: tags}}\n}\n\nfunc benchmarkTxIndex(txsCount int, b *testing.B) {\n\ttx := types.Tx(\"HELLO WORLD\")\n\ttxResult := &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeType_OK, Log: \"\", Tags: []*abci.KVPair{}}}\n\n\tdir, err := ioutil.TempDir(\"\", \"tx_index_db\")\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\tdefer os.RemoveAll(dir) \/\/ nolint: errcheck\n\n\tstore := db.NewDB(\"tx_index\", \"leveldb\", dir)\n\tindexer := NewTxIndex(store)\n\n\tbatch := txindex.NewBatch(txsCount)\n\tfor i := 0; i < txsCount; i++ {\n\t\tif err := batch.Add(txResult); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\ttxResult.Index += 1\n\t}\n\n\tb.ResetTimer()\n\n\tfor n := 0; n < b.N; n++ {\n\t\terr = indexer.AddBatch(batch)\n\t}\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n}\n\nfunc BenchmarkTxIndex1(b *testing.B) { benchmarkTxIndex(1, b) }\nfunc BenchmarkTxIndex500(b *testing.B) { benchmarkTxIndex(500, b) }\nfunc BenchmarkTxIndex1000(b *testing.B) { benchmarkTxIndex(1000, b) }\nfunc BenchmarkTxIndex2000(b *testing.B) { benchmarkTxIndex(2000, b) }\nfunc BenchmarkTxIndex10000(b *testing.B) { benchmarkTxIndex(10000, b) }\n<|endoftext|>"} {"text":"<commit_before>package weblogs_test\n\nimport (\n \"bytes\"\n \"fmt\"\n \"github.com\/keep94\/weblogs\"\n \"net\/http\"\n \"net\/url\"\n \"testing\"\n \"time\"\n)\n\nvar (\n kNilResponseWriter nilResponseWriter\n kTime = time.Date(2013, time.March, 23, 13, 14, 15, 123456789, time.UTC)\n)\n\nfunc TestNormalLogs(t *testing.T) {\n buf := &bytes.Buffer{}\n clock := &clock{Time: kTime}\n handler := weblogs.HandlerWithOptions(\n &handler{Status: 321, Clock: clock, ElapsedMillis: 387},\n &weblogs.Options{Writer: buf, Now: clock.Now()})\n handler.ServeHTTP(\n kNilResponseWriter,\n newRequest(\"192.168.5.1\", \"GET\", \"\/foo\/bar?query=tall\"))\n expected := \"03\/23\/2013 13:14:15.123456 192.168.5.1 GET \/foo\/bar?query=tall 321 387\\n\"\n verifyLogs(t, expected, buf.String())\n}\n\nfunc TestCommonLogs(t *testing.T) {\n buf := &bytes.Buffer{}\n clock := &clock{Time: kTime}\n handler := weblogs.HandlerWithOptions(\n &handler{Status: 321, Message: \"1234567\"},\n &weblogs.Options{\n Writer: buf,\n Logger: weblogs.ApacheCommonLogger{},\n Now: clock.Now()})\n request := newRequest(\"192.168.5.1\", \"GET\", \"\/foo\/bar?query=tall\")\n request.URL.User = url.User(\"fred\")\n handler.ServeHTTP(\n kNilResponseWriter,\n request)\n expected := \"192.168.5.1 - fred [23\/Mar\/2013:13:14:15 +0000] \\\"GET \/foo\/bar?query=tall HTTP\/1.0\\\" 321 7\\n\"\n verifyLogs(t, expected, buf.String())\n}\n\nfunc TestApacheUser(t *testing.T) {\n verifyString(t, \"-\", weblogs.ApacheUser(nil))\n verifyString(t, \"-\", weblogs.ApacheUser(url.User(\"\")))\n verifyString(t, \"bill\", weblogs.ApacheUser(url.User(\"bill\")))\n}\n\nfunc TestAppendedLogs(t *testing.T) {\n buf := &bytes.Buffer{}\n clock := &clock{Time: kTime}\n handler := weblogs.HandlerWithOptions(\n &handler{Status: 321, LogExtra: \"behere\"},\n &weblogs.Options{Writer: buf, Now: clock.Now()})\n handler.ServeHTTP(\n kNilResponseWriter,\n newRequest(\"192.168.5.1\", \"GET\", \"\/foo\/bar?query=tall\"))\n expected := \"03\/23\/2013 13:14:15.123456 192.168.5.1 GET \/foo\/bar?query=tall 321 0 behere\\n\"\n verifyLogs(t, expected, buf.String())\n}\n\nfunc TestSend500OnNoOutput(t *testing.T) {\n buf := &bytes.Buffer{}\n clock := &clock{Time: kTime}\n handler := weblogs.HandlerWithOptions(\n &handler{LogExtra: \"behere\", Clock: clock, ElapsedMillis: 23},\n &weblogs.Options{Writer: buf, Now: clock.Now()})\n w := &spyResponseWriter{}\n handler.ServeHTTP(\n w,\n newRequest(\"192.168.5.1\", \"GET\", \"\/foo\/bar?query=tall\"))\n expected := \"03\/23\/2013 13:14:15.123456 192.168.5.1 GET \/foo\/bar?query=tall 500 23 behere\\n\"\n verifyLogs(t, expected, buf.String())\n if w.Status != 500 {\n t.Errorf(\"Expected 500 error to be sent, but %d was sent.\", w.Status)\n }\n}\n\nfunc TestUnwrappedCallToWriter(t *testing.T) {\n \/\/ logging extra should should be silently ignored.\n handler := &handler{LogExtra: \"behere\"}\n handler.ServeHTTP(\n kNilResponseWriter,\n newRequest(\"192.168.5.1\", \"GET\", \"\/foo\/bar?query=tall\"))\n}\n\ntype clock struct {\n Time time.Time\n}\n\nfunc (c *clock) AddMillis(millis int) {\n c.Time = c.Time.Add(time.Duration(millis) * time.Millisecond)\n}\n\nfunc (c *clock) Now() func() time.Time {\n return func() time.Time {\n return c.Time\n }\n}\n\nfunc verifyLogs(t *testing.T, expected, actual string) {\n verifyString(t, expected, actual)\n}\n\nfunc verifyString(t *testing.T, expected, actual string) {\n if expected != actual {\n t.Errorf(\"Want: %s, Got: %s\", expected, actual)\n }\n}\n\nfunc newRequest(remoteAddr, method, urlStr string) *http.Request {\n u, _ := url.Parse(urlStr)\n return &http.Request{\n RemoteAddr: remoteAddr,\n Method: method,\n Proto: \"HTTP\/1.0\",\n URL: u}\n}\n\ntype handler struct {\n Status int\n Message string\n LogExtra string\n Clock *clock\n ElapsedMillis int\n}\n\nfunc (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n if h.Status != 0 {\n w.WriteHeader(h.Status)\n fmt.Fprintf(w, \"%s\", h.Message)\n }\n if h.LogExtra != \"\" {\n fmt.Fprintf(weblogs.Writer(r), \" %s\", h.LogExtra)\n }\n if h.Clock != nil {\n h.Clock.AddMillis(h.ElapsedMillis)\n }\n}\n\ntype nilResponseWriter struct {\n}\n\nfunc (w nilResponseWriter) Write(b []byte) (n int, err error) {\n return len(b), nil\n}\n\nfunc (w nilResponseWriter) WriteHeader(status int) {\n}\n\nfunc (w nilResponseWriter) Header() http.Header {\n return http.Header{}\n}\n\ntype spyResponseWriter struct {\n nilResponseWriter\n Status int\n}\n\nfunc (w *spyResponseWriter) WriteHeader(status int) {\n w.Status = status\n}\n \n<commit_msg>Add test to verify that handlers that mutate request object don't affect logging.<commit_after>package weblogs_test\n\nimport (\n \"bytes\"\n \"fmt\"\n \"github.com\/keep94\/weblogs\"\n \"net\/http\"\n \"net\/url\"\n \"testing\"\n \"time\"\n)\n\nvar (\n kNilResponseWriter nilResponseWriter\n kTime = time.Date(2013, time.March, 23, 13, 14, 15, 123456789, time.UTC)\n)\n\nfunc TestNormalLogs(t *testing.T) {\n buf := &bytes.Buffer{}\n clock := &clock{Time: kTime}\n handler := weblogs.HandlerWithOptions(\n &handler{Status: 321, Clock: clock, ElapsedMillis: 387},\n &weblogs.Options{Writer: buf, Now: clock.Now()})\n handler.ServeHTTP(\n kNilResponseWriter,\n newRequest(\"192.168.5.1\", \"GET\", \"\/foo\/bar?query=tall\"))\n expected := \"03\/23\/2013 13:14:15.123456 192.168.5.1 GET \/foo\/bar?query=tall 321 387\\n\"\n verifyLogs(t, expected, buf.String())\n}\n\nfunc TestCommonLogs(t *testing.T) {\n buf := &bytes.Buffer{}\n clock := &clock{Time: kTime}\n handler := weblogs.HandlerWithOptions(\n &handler{Status: 321, Message: \"1234567\"},\n &weblogs.Options{\n Writer: buf,\n Logger: weblogs.ApacheCommonLogger{},\n Now: clock.Now()})\n request := newRequest(\"192.168.5.1\", \"GET\", \"\/foo\/bar?query=tall\")\n request.URL.User = url.User(\"fred\")\n handler.ServeHTTP(\n kNilResponseWriter,\n request)\n expected := \"192.168.5.1 - fred [23\/Mar\/2013:13:14:15 +0000] \\\"GET \/foo\/bar?query=tall HTTP\/1.0\\\" 321 7\\n\"\n verifyLogs(t, expected, buf.String())\n}\n\nfunc TestApacheUser(t *testing.T) {\n verifyString(t, \"-\", weblogs.ApacheUser(nil))\n verifyString(t, \"-\", weblogs.ApacheUser(url.User(\"\")))\n verifyString(t, \"bill\", weblogs.ApacheUser(url.User(\"bill\")))\n}\n\nfunc TestAppendedLogs(t *testing.T) {\n buf := &bytes.Buffer{}\n clock := &clock{Time: kTime}\n handler := weblogs.HandlerWithOptions(\n &handler{Status: 321, LogExtra: \"behere\"},\n &weblogs.Options{Writer: buf, Now: clock.Now()})\n handler.ServeHTTP(\n kNilResponseWriter,\n newRequest(\"192.168.5.1\", \"GET\", \"\/foo\/bar?query=tall\"))\n expected := \"03\/23\/2013 13:14:15.123456 192.168.5.1 GET \/foo\/bar?query=tall 321 0 behere\\n\"\n verifyLogs(t, expected, buf.String())\n}\n\nfunc TestSend500OnNoOutput(t *testing.T) {\n buf := &bytes.Buffer{}\n clock := &clock{Time: kTime}\n handler := weblogs.HandlerWithOptions(\n &handler{LogExtra: \"behere\", Clock: clock, ElapsedMillis: 23},\n &weblogs.Options{Writer: buf, Now: clock.Now()})\n w := &spyResponseWriter{}\n handler.ServeHTTP(\n w,\n newRequest(\"192.168.5.1\", \"GET\", \"\/foo\/bar?query=tall\"))\n expected := \"03\/23\/2013 13:14:15.123456 192.168.5.1 GET \/foo\/bar?query=tall 500 23 behere\\n\"\n verifyLogs(t, expected, buf.String())\n if w.Status != 500 {\n t.Errorf(\"Expected 500 error to be sent, but %d was sent.\", w.Status)\n }\n}\n\nfunc TestUnwrappedCallToWriter(t *testing.T) {\n \/\/ logging extra should should be silently ignored.\n handler := &handler{LogExtra: \"behere\"}\n handler.ServeHTTP(\n kNilResponseWriter,\n newRequest(\"192.168.5.1\", \"GET\", \"\/foo\/bar?query=tall\"))\n}\n\ntype clock struct {\n Time time.Time\n}\n\nfunc (c *clock) AddMillis(millis int) {\n c.Time = c.Time.Add(time.Duration(millis) * time.Millisecond)\n}\n\nfunc (c *clock) Now() func() time.Time {\n return func() time.Time {\n return c.Time\n }\n}\n\nfunc verifyLogs(t *testing.T, expected, actual string) {\n verifyString(t, expected, actual)\n}\n\nfunc verifyString(t *testing.T, expected, actual string) {\n if expected != actual {\n t.Errorf(\"Want: %s, Got: %s\", expected, actual)\n }\n}\n\nfunc newRequest(remoteAddr, method, urlStr string) *http.Request {\n u, _ := url.Parse(urlStr)\n return &http.Request{\n RemoteAddr: remoteAddr,\n Method: method,\n Proto: \"HTTP\/1.0\",\n URL: u}\n}\n\ntype handler struct {\n Status int\n Message string\n LogExtra string\n Clock *clock\n ElapsedMillis int\n}\n\nfunc (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n \/\/ Misbehave by mutating request object to verify that this does not affect\n \/\/ logs\n r.URL.Path = \"\/HandlerMutatedRequest\"\n if h.Status != 0 {\n w.WriteHeader(h.Status)\n fmt.Fprintf(w, \"%s\", h.Message)\n }\n if h.LogExtra != \"\" {\n fmt.Fprintf(weblogs.Writer(r), \" %s\", h.LogExtra)\n }\n if h.Clock != nil {\n h.Clock.AddMillis(h.ElapsedMillis)\n }\n}\n\ntype nilResponseWriter struct {\n}\n\nfunc (w nilResponseWriter) Write(b []byte) (n int, err error) {\n return len(b), nil\n}\n\nfunc (w nilResponseWriter) WriteHeader(status int) {\n}\n\nfunc (w nilResponseWriter) Header() http.Header {\n return http.Header{}\n}\n\ntype spyResponseWriter struct {\n nilResponseWriter\n Status int\n}\n\nfunc (w *spyResponseWriter) WriteHeader(status int) {\n w.Status = status\n}\n \n<|endoftext|>"} {"text":"<commit_before>package tests\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"gopkg.in\/BTrDB\/btrdb.v4\"\n)\n\ntype QueryFunc func(t *testing.T, ctx context.Context, s *btrdb.Stream, start int64, end int64, count int64) ([]btrdb.StatPoint, uint64, int64)\n\nfunc RunTestQueryWithHoles(t *testing.T, query QueryFunc) {\n\tctx := context.Background()\n\tdb := helperConnect(t, ctx)\n\tstream := helperCreateDefaultStream(t, ctx, db, nil, nil)\n\tstart := int64(1519088910) \/\/ Random unix datetime\n\tmidEnd := start + 1000000\n\tmidStart := midEnd + 100000\n\tfinalEnd := midStart + 1000000\n\tcount := int64(100000)\n\tfirstData := helperRandomDataCount(start, midEnd, count)\n\thelperInsert(t, ctx, stream, firstData)\n\tsecondData := helperRandomDataCount(midStart, finalEnd, count)\n\thelperInsert(t, ctx, stream, secondData)\n\tspts, _, width := query(t, ctx, stream, start, finalEnd, count*2)\n\tallData := make([]btrdb.RawPoint, 0)\n\tallData = append(allData, firstData...)\n\tallData = append(allData, secondData...)\n\terr := helperCheckStatisticalCorrect(allData, spts, int64(width))\n\tif err != nil {\n\t\tt.Fatalf(\"Queried data was invalid: %v\", err)\n\t}\n}\n\nfunc RunTestQueryFlushing(t *testing.T, query QueryFunc) {\n\tctx := context.Background()\n\tdb := helperConnect(t, ctx)\n\tstream := helperCreateDefaultStream(t, ctx, db, nil, nil)\n\tstart := int64(1519088910) \/\/ Random unix datetime\n\tend := start + 1000000\n\tcount := int64(100000)\n\tdata := helperRandomDataCount(start, end, count)\n\terr := stream.Insert(ctx, data)\n\tif err != nil {\n\t\tt.Fatalf(\"Error from insert %v\", err)\n\t}\n\tunflushed, _, _ := query(t, ctx, stream, start, end, count)\n\terr = stream.Flush(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"Error from Flush %v\", err)\n\t}\n\tif len(unflushed) == 0 {\n\t\tt.Fatal(\"Unflushed query was empty\")\n\t}\n\tflushed, _, _ := query(t, ctx, stream, start, end, count)\n\tif len(flushed) == 0 {\n\t\tt.Fatal(\"Flushed query was empty\")\n\t}\n\terr = helperCheckStatisticalEqual(unflushed, flushed)\n\tif err != nil {\n\t\tt.Fatal(\"Flushed and unflushed queries were not equal.\")\n\t}\n}\n\nfunc doWindowsQuery(t *testing.T, ctx context.Context, s *btrdb.Stream, start int64, end int64, count int64) ([]btrdb.StatPoint, uint64, int64) {\n\twidth := int64(end - start)\n\tresult, version := helperWindowQuery(t, ctx, s, start, end+width, uint64(width), 0, 0)\n\treturn result, version, width\n}\n\nfunc doAlignedWindowsQuery(t *testing.T, ctx context.Context, s *btrdb.Stream, start int64, end int64, count int64) ([]btrdb.StatPoint, uint64, int64) {\n\tpwe := uint8(48)\n\twidth := int64(1) << pwe\n\tresult, version := helperStatisticalQuery(t, ctx, s, start, end+width, pwe, 0)\n\treturn result, version, width\n}\n\nfunc TestWindowsQueryWithHole(t *testing.T) {\n\tRunTestQueryWithHoles(t, doWindowsQuery)\n}\n\nfunc TestAlignedWindowsQueryWithHole(t *testing.T) {\n\tRunTestQueryWithHoles(t, doAlignedWindowsQuery)\n}\n\nfunc TestWindowsQueryFlushing(t *testing.T) {\n\tRunTestQueryFlushing(t, doWindowsQuery)\n}\n\nfunc TestAlignedWindowsQueryFlushing(t *testing.T) {\n\tRunTestQueryFlushing(t, doAlignedWindowsQuery)\n}\n<commit_msg>Add checks for statistical correctness of flushed and unflushed queries<commit_after>package tests\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"gopkg.in\/BTrDB\/btrdb.v4\"\n)\n\ntype QueryFunc func(t *testing.T, ctx context.Context, s *btrdb.Stream, start int64, end int64, count int64) ([]btrdb.StatPoint, uint64, int64)\n\nfunc RunTestQueryWithHoles(t *testing.T, query QueryFunc) {\n\tctx := context.Background()\n\tdb := helperConnect(t, ctx)\n\tstream := helperCreateDefaultStream(t, ctx, db, nil, nil)\n\tstart := int64(1519088910) \/\/ Random unix datetime\n\tmidEnd := start + 1000000\n\tmidStart := midEnd + 100000\n\tfinalEnd := midStart + 1000000\n\tcount := int64(100000)\n\tfirstData := helperRandomDataCount(start, midEnd, count)\n\thelperInsert(t, ctx, stream, firstData)\n\tsecondData := helperRandomDataCount(midStart, finalEnd, count)\n\thelperInsert(t, ctx, stream, secondData)\n\tspts, _, width := query(t, ctx, stream, start, finalEnd, count*2)\n\tallData := make([]btrdb.RawPoint, 0)\n\tallData = append(allData, firstData...)\n\tallData = append(allData, secondData...)\n\terr := helperCheckStatisticalCorrect(allData, spts, int64(width))\n\tif err != nil {\n\t\tt.Fatalf(\"Queried data was invalid: %v\", err)\n\t}\n}\n\nfunc RunTestQueryFlushing(t *testing.T, query QueryFunc) {\n\tctx := context.Background()\n\tdb := helperConnect(t, ctx)\n\tstream := helperCreateDefaultStream(t, ctx, db, nil, nil)\n\tstart := int64(1519088910) \/\/ Random unix datetime\n\tend := start + 1000000\n\tcount := int64(100000)\n\tdata := helperRandomDataCount(start, end, count)\n\terr := stream.Insert(ctx, data)\n\tif err != nil {\n\t\tt.Fatalf(\"Error from insert %v\", err)\n\t}\n\tunflushed, _, width := query(t, ctx, stream, start, end, count)\n\terr = stream.Flush(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"Error from Flush %v\", err)\n\t}\n\tif len(unflushed) == 0 {\n\t\tt.Fatal(\"Unflushed query was empty\")\n\t}\n\tflushed, _, _ := query(t, ctx, stream, start, end, count)\n\tif len(flushed) == 0 {\n\t\tt.Fatal(\"Flushed query was empty\")\n\t}\n\terr = helperCheckStatisticalEqual(unflushed, flushed)\n\tif err != nil {\n\t\tt.Fatal(\"Flushed and unflushed queries were not equal.\")\n\t}\n\terr = helperCheckStatisticalCorrect(data, unflushed, width)\n\tif err != nil {\n\t\tt.Fatalf(\"Flushed results did not match generated data: %v\", err)\n\t}\n\terr = helperCheckStatisticalCorrect(data, flushed, width)\n\tif err != nil {\n\t\tt.Fatalf(\"Unflushed results did not match generated data: %v\", err)\n\t}\n}\n\nfunc doWindowsQuery(t *testing.T, ctx context.Context, s *btrdb.Stream, start int64, end int64, count int64) ([]btrdb.StatPoint, uint64, int64) {\n\twidth := int64(end - start)\n\tresult, version := helperWindowQuery(t, ctx, s, start, end+width, uint64(width), 0, 0)\n\treturn result, version, width\n}\n\nfunc doAlignedWindowsQuery(t *testing.T, ctx context.Context, s *btrdb.Stream, start int64, end int64, count int64) ([]btrdb.StatPoint, uint64, int64) {\n\tpwe := uint8(48)\n\twidth := int64(1) << pwe\n\tresult, version := helperStatisticalQuery(t, ctx, s, start, end+width, pwe, 0)\n\treturn result, version, width\n}\n\nfunc TestWindowsQueryWithHole(t *testing.T) {\n\tRunTestQueryWithHoles(t, doWindowsQuery)\n}\n\nfunc TestAlignedWindowsQueryWithHole(t *testing.T) {\n\tRunTestQueryWithHoles(t, doAlignedWindowsQuery)\n}\n\nfunc TestWindowsQueryFlushing(t *testing.T) {\n\tRunTestQueryFlushing(t, doWindowsQuery)\n}\n\nfunc TestAlignedWindowsQueryFlushing(t *testing.T) {\n\tRunTestQueryFlushing(t, doAlignedWindowsQuery)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com\/fjukstad\/kvik\/kompute\"\n\t\"github.com\/fjukstad\/kvik\/pipeline\"\n)\n\nfunc main() {\n\taddr := \"192.168.99.100:32805\"\n\t\/\/addr := \"public.opencpu.org:80\"\n\tusername := \"user\"\n\tpassword := \"password\"\n\n\tk := kompute.NewKompute(addr, username, password)\n\n\tp := pipeline.NewPipeline(\"boots\", k)\n\n\t\/\/ --------------- LOAD DATA ---------------- \/\/\n\tname := \"loaddata\"\n\tfunction := \"syntheticdata\"\n\tpkg := \"github.com\/fjukstad\/boots\"\n\targnames := []string{\"nsamples\", \"class\", \"noisevars\"}\n\targs := []string{\n\t\t\"1050\",\n\t\t\"T\",\n\t\t\"9000\",\n\t\t\/\/\"10\", \"T\", \"10\",\n\t}\n\ts := pipeline.NewStage(name, function, pkg, argnames, args)\n\tp.AddStage(s)\n\n\tname = \"response\"\n\tfunction = \"responses\"\n\tpkg = \"github.com\/fjukstad\/boots\"\n\targnames = []string{\"dataset\"}\n\targs = []string{\n\t\t\"from:loaddata\",\n\t}\n\n\ts = pipeline.NewStage(name, function, pkg, argnames, args)\n\tp.AddStage(s)\n\n\tname = \"predictors\"\n\tfunction = \"predictors\"\n\tpkg = \"github.com\/fjukstad\/boots\"\n\targnames = []string{\"dataset\"}\n\targs = []string{\"from:loaddata\"}\n\n\ts = pipeline.NewStage(name, function, pkg, argnames, args)\n\tp.AddStage(s)\n\n\tnumBoots := 3\n\n\tfor i := 0; i < numBoots; i++ {\n\t\tname = \"boots-\" + strconv.Itoa(i)\n\t\tfunction = \"boots\"\n\t\tpkg = \"github.com\/fjukstad\/boots\"\n\t\targnames = []string{\"X\", \"Y\"}\n\t\targs = []string{\"from:predictors\", \"from:response\"}\n\n\t\ts = pipeline.NewStage(name, function, pkg, argnames, args)\n\t\tp.AddStage(s)\n\n\t\tname = \"results-\" + strconv.Itoa(i)\n\t\tpkg = \"base\"\n\t\tif i > 1 {\n\t\t\tfunction = \"append\"\n\t\t\targnames = []string{\"x\", \"values\"}\n\t\t\targs = []string{\"from:results-\" + strconv.Itoa(i-1), \"from:boots-\" + strconv.Itoa(i)}\n\t\t} else {\n\t\t\tfunction = \"as.vector\"\n\t\t\targnames = []string{\"x\"}\n\t\t\targs = []string{\"from:boots-\" + strconv.Itoa(i)}\n\t\t}\n\n\t\ts = pipeline.NewStage(name, function, pkg, argnames, args)\n\t\tp.AddStage(s)\n\t}\n\n\tp.Run()\n\t\/\/p.Print()\n\tp.Save()\n\n\tjson, _ := p.Results(\"print\")\n\tfmt.Println(\"Final results: \\n\", json)\n\n}\n<commit_msg>rm depr boots<commit_after><|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/spf13\/cobra\"\n\tkapi \"k8s.io\/kubernetes\/pkg\/api\"\n\tkcmd \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\"\n\tkcmdutil \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/util\"\n\t\"k8s.io\/kubernetes\/pkg\/kubectl\/resource\"\n\n\tcmdutil \"github.com\/openshift\/origin\/pkg\/cmd\/util\"\n\t\"github.com\/openshift\/origin\/pkg\/cmd\/util\/clientcmd\"\n)\n\nconst (\n\texposeLong = `\nExpose containers internally as services or externally via routes\n\nThere is also the ability to expose a deployment configuration, replication controller, service, or pod\nas a new service on a specified port. If no labels are specified, the new object will re-use the\nlabels from the object it exposes.`\n\n\texposeExample = ` # Create a route based on service nginx. The new route will re-use nginx's labels\n %[1]s expose service nginx\n\n # Create a route and specify your own label and route name\n %[1]s expose service nginx -l name=myroute --name=fromdowntown\n\n # Create a route and specify a hostname\n %[1]s expose service nginx --hostname=www.example.com\n\n # Expose a deployment configuration as a service and use the specified port\n %[1]s expose dc ruby-hello-world --port=8080\n\n # Expose a service as a route in the specified path\n %[1]s expose service nginx --path=\/nginx`\n)\n\n\/\/ NewCmdExpose is a wrapper for the Kubernetes cli expose command\nfunc NewCmdExpose(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdExposeService(f.Factory, out)\n\tcmd.Short = \"Expose a replicated application as a service or route\"\n\tcmd.Long = exposeLong\n\tcmd.Example = fmt.Sprintf(exposeExample, fullName)\n\t\/\/ Default generator to an empty string so we can get more flexibility\n\t\/\/ when setting defaults based on input resources\n\tcmd.Flags().Set(\"generator\", \"\")\n\tcmd.Flag(\"generator\").Usage = \"The name of the API generator to use.\"\n\tcmd.Flag(\"generator\").DefValue = \"\"\n\t\/\/ Default protocol to an empty string so we can get more flexibility\n\t\/\/ when validating the use of it (invalid for routes)\n\tcmd.Flags().Set(\"protocol\", \"\")\n\tcmd.Flag(\"protocol\").DefValue = \"\"\n\tcmd.Flag(\"protocol\").Changed = false\n\tcmd.Flag(\"port\").Usage = \"The port that the resource should serve on.\"\n\tdefRun := cmd.Run\n\tcmd.Run = func(cmd *cobra.Command, args []string) {\n\t\terr := validate(cmd, f, args)\n\t\tkcmdutil.CheckErr(err)\n\t\tdefRun(cmd, args)\n\t}\n\tcmd.Flags().String(\"hostname\", \"\", \"Set a hostname for the new route\")\n\tcmd.Flags().String(\"path\", \"\", \"Set a path for the new route\")\n\treturn cmd\n}\n\n\/\/ validate adds one layer of validation prior to calling the upstream\n\/\/ expose command.\nfunc validate(cmd *cobra.Command, f *clientcmd.Factory, args []string) error {\n\tnamespace, enforceNamespace, err := f.DefaultNamespace()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, kc, err := f.Clients()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmapper, typer := f.Object(false)\n\tr := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), kapi.Codecs.UniversalDecoder()).\n\t\tContinueOnError().\n\t\tNamespaceParam(namespace).DefaultNamespace().\n\t\tFilenameParam(enforceNamespace, false, kcmdutil.GetFlagStringSlice(cmd, \"filename\")...).\n\t\tResourceTypeOrNameArgs(false, args...).\n\t\tFlatten().\n\t\tDo()\n\tinfos, err := r.Infos()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(infos) > 1 {\n\t\treturn fmt.Errorf(\"multiple resources provided: %v\", args)\n\t}\n\tinfo := infos[0]\n\tmapping := info.ResourceMapping()\n\n\tgenerator := kcmdutil.GetFlagString(cmd, \"generator\")\n\tswitch mapping.GroupVersionKind.GroupKind() {\n\tcase kapi.Kind(\"Service\"):\n\t\tswitch generator {\n\t\tcase \"service\/v1\", \"service\/v2\":\n\t\t\t\/\/ Set default protocol back for generating services\n\t\t\tif len(kcmdutil.GetFlagString(cmd, \"protocol\")) == 0 {\n\t\t\t\tcmd.Flags().Set(\"protocol\", \"TCP\")\n\t\t\t}\n\t\tcase \"\":\n\t\t\t\/\/ Default exposing services as a route\n\t\t\tgenerator = \"route\/v1\"\n\t\t\tcmd.Flags().Set(\"generator\", generator)\n\t\t\tfallthrough\n\t\tcase \"route\/v1\":\n\t\t\troute, err := cmdutil.UnsecuredRoute(kc, namespace, info.Name, info.Name, kcmdutil.GetFlagString(cmd, \"port\"))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif route.Spec.Port != nil {\n\t\t\t\tcmd.Flags().Set(\"port\", route.Spec.Port.TargetPort.String())\n\t\t\t}\n\t\t}\n\n\tdefault:\n\t\tswitch generator {\n\t\tcase \"route\/v1\":\n\t\t\treturn fmt.Errorf(\"cannot expose a %s as a route\", mapping.GroupVersionKind.Kind)\n\t\tcase \"\":\n\t\t\t\/\/ Default exposing everything except services as a service\n\t\t\tgenerator = \"service\/v2\"\n\t\t\tcmd.Flags().Set(\"generator\", generator)\n\t\t\tfallthrough\n\t\tcase \"service\/v1\", \"service\/v2\":\n\t\t\t\/\/ Set default protocol back for generating services\n\t\t\tif len(kcmdutil.GetFlagString(cmd, \"protocol\")) == 0 {\n\t\t\t\tcmd.Flags().Set(\"protocol\", \"TCP\")\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>suggest-exposable resources in oc expose<commit_after>package cmd\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/spf13\/cobra\"\n\tkapi \"k8s.io\/kubernetes\/pkg\/api\"\n\tkcmd \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\"\n\tkcmdutil \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/util\"\n\t\"k8s.io\/kubernetes\/pkg\/kubectl\/resource\"\n\n\tcmdutil \"github.com\/openshift\/origin\/pkg\/cmd\/util\"\n\t\"github.com\/openshift\/origin\/pkg\/cmd\/util\/clientcmd\"\n)\n\nconst (\n\texposeLong = `\nExpose containers internally as services or externally via routes\n\nThere is also the ability to expose a deployment configuration, replication controller, service, or pod\nas a new service on a specified port. If no labels are specified, the new object will re-use the\nlabels from the object it exposes.`\n\n\texposeExample = ` # Create a route based on service nginx. The new route will re-use nginx's labels\n %[1]s expose service nginx\n\n # Create a route and specify your own label and route name\n %[1]s expose service nginx -l name=myroute --name=fromdowntown\n\n # Create a route and specify a hostname\n %[1]s expose service nginx --hostname=www.example.com\n\n # Expose a deployment configuration as a service and use the specified port\n %[1]s expose dc ruby-hello-world --port=8080\n\n # Expose a service as a route in the specified path\n %[1]s expose service nginx --path=\/nginx`\n)\n\n\/\/ NewCmdExpose is a wrapper for the Kubernetes cli expose command\nfunc NewCmdExpose(fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\tcmd := kcmd.NewCmdExposeService(f.Factory, out)\n\tcmd.Short = \"Expose a replicated application as a service or route\"\n\tcmd.Long = exposeLong\n\tcmd.Example = fmt.Sprintf(exposeExample, fullName)\n\t\/\/ Default generator to an empty string so we can get more flexibility\n\t\/\/ when setting defaults based on input resources\n\tcmd.Flags().Set(\"generator\", \"\")\n\tcmd.Flag(\"generator\").Usage = \"The name of the API generator to use.\"\n\tcmd.Flag(\"generator\").DefValue = \"\"\n\t\/\/ Default protocol to an empty string so we can get more flexibility\n\t\/\/ when validating the use of it (invalid for routes)\n\tcmd.Flags().Set(\"protocol\", \"\")\n\tcmd.Flag(\"protocol\").DefValue = \"\"\n\tcmd.Flag(\"protocol\").Changed = false\n\tcmd.Flag(\"port\").Usage = \"The port that the resource should serve on.\"\n\tdefRun := cmd.Run\n\tcmd.Run = func(cmd *cobra.Command, args []string) {\n\t\terr := validate(cmd, f, args)\n\t\tkcmdutil.CheckErr(err)\n\t\tdefRun(cmd, args)\n\t}\n\tcmd.Flags().String(\"hostname\", \"\", \"Set a hostname for the new route\")\n\tcmd.Flags().String(\"path\", \"\", \"Set a path for the new route\")\n\treturn cmd\n}\n\n\/\/ validate adds one layer of validation prior to calling the upstream\n\/\/ expose command.\nfunc validate(cmd *cobra.Command, f *clientcmd.Factory, args []string) error {\n\tnamespace, enforceNamespace, err := f.DefaultNamespace()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, kc, err := f.Clients()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmapper, typer := f.Object(false)\n\tr := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), kapi.Codecs.UniversalDecoder()).\n\t\tContinueOnError().\n\t\tNamespaceParam(namespace).DefaultNamespace().\n\t\tFilenameParam(enforceNamespace, false, kcmdutil.GetFlagStringSlice(cmd, \"filename\")...).\n\t\tResourceTypeOrNameArgs(false, args...).\n\t\tFlatten().\n\t\tDo()\n\tinfos, err := r.Infos()\n\tif err != nil {\n\t\treturn kcmdutil.UsageError(cmd, err.Error())\n\t}\n\tif len(infos) > 1 {\n\t\treturn fmt.Errorf(\"multiple resources provided: %v\", args)\n\t}\n\tinfo := infos[0]\n\tmapping := info.ResourceMapping()\n\n\tgenerator := kcmdutil.GetFlagString(cmd, \"generator\")\n\tswitch mapping.GroupVersionKind.GroupKind() {\n\tcase kapi.Kind(\"Service\"):\n\t\tswitch generator {\n\t\tcase \"service\/v1\", \"service\/v2\":\n\t\t\t\/\/ Set default protocol back for generating services\n\t\t\tif len(kcmdutil.GetFlagString(cmd, \"protocol\")) == 0 {\n\t\t\t\tcmd.Flags().Set(\"protocol\", \"TCP\")\n\t\t\t}\n\t\tcase \"\":\n\t\t\t\/\/ Default exposing services as a route\n\t\t\tgenerator = \"route\/v1\"\n\t\t\tcmd.Flags().Set(\"generator\", generator)\n\t\t\tfallthrough\n\t\tcase \"route\/v1\":\n\t\t\troute, err := cmdutil.UnsecuredRoute(kc, namespace, info.Name, info.Name, kcmdutil.GetFlagString(cmd, \"port\"))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif route.Spec.Port != nil {\n\t\t\t\tcmd.Flags().Set(\"port\", route.Spec.Port.TargetPort.String())\n\t\t\t}\n\t\t}\n\n\tdefault:\n\t\tswitch generator {\n\t\tcase \"route\/v1\":\n\t\t\treturn fmt.Errorf(\"cannot expose a %s as a route\", mapping.GroupVersionKind.Kind)\n\t\tcase \"\":\n\t\t\t\/\/ Default exposing everything except services as a service\n\t\t\tgenerator = \"service\/v2\"\n\t\t\tcmd.Flags().Set(\"generator\", generator)\n\t\t\tfallthrough\n\t\tcase \"service\/v1\", \"service\/v2\":\n\t\t\t\/\/ Set default protocol back for generating services\n\t\t\tif len(kcmdutil.GetFlagString(cmd, \"protocol\")) == 0 {\n\t\t\t\tcmd.Flags().Set(\"protocol\", \"TCP\")\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package cmd\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\tkcmdutil \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/util\"\n\n\tosclient \"github.com\/openshift\/origin\/pkg\/client\"\n\t\"github.com\/openshift\/origin\/pkg\/cmd\/templates\"\n\t\"github.com\/openshift\/origin\/pkg\/cmd\/util\/clientcmd\"\n\tuserapi \"github.com\/openshift\/origin\/pkg\/user\/api\"\n)\n\nconst WhoAmIRecommendedCommandName = \"whoami\"\n\nvar whoamiLong = templates.LongDesc(`\n\tShow information about the current session\n\n\tThe default options for this command will return the currently authenticated user name\n\tor an empty string. Other flags support returning the currently used token or the\n\tuser context.`)\n\ntype WhoAmIOptions struct {\n\tUserInterface osclient.UserInterface\n\n\tOut io.Writer\n}\n\nfunc NewCmdWhoAmI(name, fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\to := &WhoAmIOptions{}\n\n\tcmd := &cobra.Command{\n\t\tUse: name,\n\t\tShort: \"Return information about the current session\",\n\t\tLong: whoamiLong,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\terr := RunWhoAmI(f, out, cmd, args, o)\n\t\t\tkcmdutil.CheckErr(err)\n\t\t},\n\t}\n\n\t\/\/ Deprecated in 1.4\n\t\/\/ Remove in 1.5 so we can use --token and --context for building the client again\n\tcmd.Flags().Bool(\"token\", false, \"Deprecated, use --show-token instead\")\n\tcmd.Flags().Bool(\"context\", false, \"Deprecated, use --show-context instead\")\n\tcmd.Flags().MarkDeprecated(\"token\", \"use -t or --show-token instead\")\n\tcmd.Flags().MarkDeprecated(\"context\", \"use -c or --show-context instead\")\n\tcmd.Flags().MarkHidden(\"token\")\n\tcmd.Flags().MarkHidden(\"context\")\n\n\tcmd.Flags().BoolP(\"show-token\", \"t\", false, \"Print the token the current session is using. This will return an error if you are using a different form of authentication.\")\n\tcmd.Flags().BoolP(\"show-context\", \"c\", false, \"Print the current user context name\")\n\tcmd.Flags().Bool(\"show-server\", false, \"Print the current server's REST API URL\")\n\n\treturn cmd\n}\n\nfunc (o WhoAmIOptions) WhoAmI() (*userapi.User, error) {\n\tme, err := o.UserInterface.Get(\"~\")\n\tif err == nil {\n\t\tfmt.Fprintf(o.Out, \"%s\\n\", me.Name)\n\t}\n\n\treturn me, err\n}\n\nfunc RunWhoAmI(f *clientcmd.Factory, out io.Writer, cmd *cobra.Command, args []string, o *WhoAmIOptions) error {\n\tif kcmdutil.GetFlagBool(cmd, \"token\") || kcmdutil.GetFlagBool(cmd, \"show-token\") {\n\t\tcfg, err := f.OpenShiftClientConfig.ClientConfig()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(cfg.BearerToken) == 0 {\n\t\t\treturn fmt.Errorf(\"no token is currently in use for this session\")\n\t\t}\n\t\tfmt.Fprintf(out, \"%s\\n\", cfg.BearerToken)\n\t\treturn nil\n\t}\n\tif kcmdutil.GetFlagBool(cmd, \"context\") || kcmdutil.GetFlagBool(cmd, \"show-context\") {\n\t\tcfg, err := f.OpenShiftClientConfig.RawConfig()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(cfg.CurrentContext) == 0 {\n\t\t\treturn fmt.Errorf(\"no context has been set\")\n\t\t}\n\t\tfmt.Fprintf(out, \"%s\\n\", cfg.CurrentContext)\n\t\treturn nil\n\t}\n\tif kcmdutil.GetFlagBool(cmd, \"show-server\") {\n\t\tcfg, err := f.OpenShiftClientConfig.RawConfig()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, c := range cfg.Clusters {\n\t\t\tfmt.Fprintf(out, \"%s\\n\", c.Server)\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"unable to get clusters. Cannot retrieve server URL.\")\n\t}\n\n\tclient, _, err := f.Clients()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\to.UserInterface = client.Users()\n\to.Out = out\n\n\t_, err = o.WhoAmI()\n\treturn err\n}\n<commit_msg>fix oc whoami --show-server output<commit_after>package cmd\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/spf13\/cobra\"\n\n\tkcmdutil \"k8s.io\/kubernetes\/pkg\/kubectl\/cmd\/util\"\n\n\tosclient \"github.com\/openshift\/origin\/pkg\/client\"\n\t\"github.com\/openshift\/origin\/pkg\/cmd\/templates\"\n\t\"github.com\/openshift\/origin\/pkg\/cmd\/util\/clientcmd\"\n\tuserapi \"github.com\/openshift\/origin\/pkg\/user\/api\"\n)\n\nconst WhoAmIRecommendedCommandName = \"whoami\"\n\nvar whoamiLong = templates.LongDesc(`\n\tShow information about the current session\n\n\tThe default options for this command will return the currently authenticated user name\n\tor an empty string. Other flags support returning the currently used token or the\n\tuser context.`)\n\ntype WhoAmIOptions struct {\n\tUserInterface osclient.UserInterface\n\n\tOut io.Writer\n}\n\nfunc NewCmdWhoAmI(name, fullName string, f *clientcmd.Factory, out io.Writer) *cobra.Command {\n\to := &WhoAmIOptions{}\n\n\tcmd := &cobra.Command{\n\t\tUse: name,\n\t\tShort: \"Return information about the current session\",\n\t\tLong: whoamiLong,\n\t\tRun: func(cmd *cobra.Command, args []string) {\n\t\t\terr := RunWhoAmI(f, out, cmd, args, o)\n\t\t\tkcmdutil.CheckErr(err)\n\t\t},\n\t}\n\n\t\/\/ Deprecated in 1.4\n\t\/\/ Remove in 1.5 so we can use --token and --context for building the client again\n\tcmd.Flags().Bool(\"token\", false, \"Deprecated, use --show-token instead\")\n\tcmd.Flags().Bool(\"context\", false, \"Deprecated, use --show-context instead\")\n\tcmd.Flags().MarkDeprecated(\"token\", \"use -t or --show-token instead\")\n\tcmd.Flags().MarkDeprecated(\"context\", \"use -c or --show-context instead\")\n\tcmd.Flags().MarkHidden(\"token\")\n\tcmd.Flags().MarkHidden(\"context\")\n\n\tcmd.Flags().BoolP(\"show-token\", \"t\", false, \"Print the token the current session is using. This will return an error if you are using a different form of authentication.\")\n\tcmd.Flags().BoolP(\"show-context\", \"c\", false, \"Print the current user context name\")\n\tcmd.Flags().Bool(\"show-server\", false, \"Print the current server's REST API URL\")\n\n\treturn cmd\n}\n\nfunc (o WhoAmIOptions) WhoAmI() (*userapi.User, error) {\n\tme, err := o.UserInterface.Get(\"~\")\n\tif err == nil {\n\t\tfmt.Fprintf(o.Out, \"%s\\n\", me.Name)\n\t}\n\n\treturn me, err\n}\n\nfunc RunWhoAmI(f *clientcmd.Factory, out io.Writer, cmd *cobra.Command, args []string, o *WhoAmIOptions) error {\n\tif kcmdutil.GetFlagBool(cmd, \"token\") || kcmdutil.GetFlagBool(cmd, \"show-token\") {\n\t\tcfg, err := f.OpenShiftClientConfig.ClientConfig()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(cfg.BearerToken) == 0 {\n\t\t\treturn fmt.Errorf(\"no token is currently in use for this session\")\n\t\t}\n\t\tfmt.Fprintf(out, \"%s\\n\", cfg.BearerToken)\n\t\treturn nil\n\t}\n\tif kcmdutil.GetFlagBool(cmd, \"context\") || kcmdutil.GetFlagBool(cmd, \"show-context\") {\n\t\tcfg, err := f.OpenShiftClientConfig.RawConfig()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(cfg.CurrentContext) == 0 {\n\t\t\treturn fmt.Errorf(\"no context has been set\")\n\t\t}\n\t\tfmt.Fprintf(out, \"%s\\n\", cfg.CurrentContext)\n\t\treturn nil\n\t}\n\tif kcmdutil.GetFlagBool(cmd, \"show-server\") {\n\t\tcfg, err := f.OpenShiftClientConfig.ClientConfig()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintf(out, \"%s\\n\", cfg.Host)\n\t\treturn nil\n\t}\n\n\tclient, _, err := f.Clients()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\to.UserInterface = client.Users()\n\to.Out = out\n\n\t_, err = o.WhoAmI()\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package job\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/appscode\/go\/log\"\n\tapi \"github.com\/kubedb\/apimachinery\/apis\/kubedb\/v1alpha1\"\n\t\"github.com\/kubedb\/apimachinery\/client\/typed\/kubedb\/v1alpha1\/util\"\n\t\"github.com\/kubedb\/apimachinery\/pkg\/eventer\"\n\tbatch \"k8s.io\/api\/batch\/v1\"\n\tcore \"k8s.io\/api\/core\/v1\"\n\tkerr \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\nfunc (c *Controller) completeJob(job *batch.Job) error {\n\tdeletePolicy := metav1.DeletePropagationBackground\n\terr := c.Client.BatchV1().Jobs(job.Namespace).Delete(job.Name, &metav1.DeleteOptions{\n\t\tPropagationPolicy: &deletePolicy,\n\t})\n\n\tif err != nil && !kerr.IsNotFound(err) {\n\t\treturn fmt.Errorf(\"failed to delete job: %s, reason: %s\", job.Name, err)\n\t}\n\n\tjobType := job.Annotations[api.AnnotationJobType]\n\tif jobType == api.JobTypeBackup {\n\t\treturn c.handleBackupJob(job)\n\t} else if jobType == api.JobTypeRestore {\n\t\treturn c.handleRestoreJob(job)\n\t}\n\n\treturn nil\n}\n\nfunc (c *Controller) handleBackupJob(job *batch.Job) error {\n\tfor _, o := range job.OwnerReferences {\n\t\tif o.Kind == api.ResourceKindSnapshot {\n\t\t\tsnapshot, err := c.ExtClient.Snapshots(job.Namespace).Get(o.Name, metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tjobSucceeded := job.Status.Succeeded > 0\n\n\t\t\t_, _, err = util.PatchSnapshot(c.ExtClient, snapshot, func(in *api.Snapshot) *api.Snapshot {\n\t\t\t\tif jobSucceeded {\n\t\t\t\t\tin.Status.Phase = api.SnapshotPhaseSucceeded\n\t\t\t\t} else {\n\t\t\t\t\tin.Status.Phase = api.SnapshotPhaseFailed\n\t\t\t\t}\n\t\t\t\tt := metav1.Now()\n\t\t\t\tin.Status.CompletionTime = &t\n\t\t\t\tdelete(in.Labels, api.LabelSnapshotStatus)\n\t\t\t\treturn in\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tc.eventRecorder.Eventf(snapshot.ObjectReference(), core.EventTypeWarning, eventer.EventReasonFailedToUpdate, err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\truntimeObj, err := c.snapshotter.GetDatabase(metav1.ObjectMeta{Name: snapshot.Spec.DatabaseName, Namespace: snapshot.Namespace})\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif jobSucceeded {\n\t\t\t\tc.eventRecorder.Event(\n\t\t\t\t\tapi.ObjectReferenceFor(runtimeObj),\n\t\t\t\t\tcore.EventTypeNormal,\n\t\t\t\t\teventer.EventReasonSuccessfulSnapshot,\n\t\t\t\t\t\"Successfully completed snapshot\",\n\t\t\t\t)\n\t\t\t\tc.eventRecorder.Event(\n\t\t\t\t\tsnapshot.ObjectReference(),\n\t\t\t\t\tcore.EventTypeNormal,\n\t\t\t\t\teventer.EventReasonSuccessfulSnapshot,\n\t\t\t\t\t\"Successfully completed snapshot\",\n\t\t\t\t)\n\t\t\t} else {\n\t\t\t\tc.eventRecorder.Event(\n\t\t\t\t\tapi.ObjectReferenceFor(runtimeObj),\n\t\t\t\t\tcore.EventTypeWarning,\n\t\t\t\t\teventer.EventReasonSnapshotFailed,\n\t\t\t\t\t\"Failed to complete snapshot\",\n\t\t\t\t)\n\t\t\t\tc.eventRecorder.Event(\n\t\t\t\t\tsnapshot.ObjectReference(),\n\t\t\t\t\tcore.EventTypeWarning,\n\t\t\t\t\teventer.EventReasonSnapshotFailed,\n\t\t\t\t\t\"Failed to complete snapshot\",\n\t\t\t\t)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tlog.Errorf(`resource Job \"%s\/%s\" doesn't have OwnerReference for Snapshot`, job.Namespace, job.Name)\n\treturn nil\n}\n\nfunc (c *Controller) handleRestoreJob(job *batch.Job) error {\n\tfor _, o := range job.OwnerReferences {\n\t\tif o.Kind == job.Labels[api.LabelDatabaseKind] {\n\t\t\tjobSucceeded := job.Status.Succeeded > 0\n\n\t\t\tvar phase api.DatabasePhase\n\t\t\tvar reason string\n\t\t\tif jobSucceeded {\n\t\t\t\tphase = api.DatabasePhaseRunning\n\t\t\t} else {\n\t\t\t\tphase = api.DatabasePhaseFailed\n\t\t\t\treason = \"Failed to complete initialization\"\n\t\t\t}\n\t\t\tobjectMeta := metav1.ObjectMeta{Name: o.Name, Namespace: job.Namespace}\n\t\t\terr := c.snapshotter.SetDatabaseStatus(objectMeta, phase, reason)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif jobSucceeded {\n\t\t\t\terr = c.snapshotter.UpsertDatabaseAnnotation(objectMeta, map[string]string{\n\t\t\t\t\tapi.AnnotationInitialized: \"\",\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\truntimeObj, err := c.snapshotter.GetDatabase(objectMeta)\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif jobSucceeded {\n\t\t\t\tc.eventRecorder.Event(\n\t\t\t\t\tapi.ObjectReferenceFor(runtimeObj),\n\t\t\t\t\tcore.EventTypeNormal,\n\t\t\t\t\teventer.EventReasonSuccessfulSnapshot,\n\t\t\t\t\t\"Successfully completed initialization\",\n\t\t\t\t)\n\t\t\t} else {\n\t\t\t\tc.eventRecorder.Event(\n\t\t\t\t\tapi.ObjectReferenceFor(runtimeObj),\n\t\t\t\t\tcore.EventTypeWarning,\n\t\t\t\t\teventer.EventReasonSnapshotFailed,\n\t\t\t\t\t\"Failed to complete initialization\",\n\t\t\t\t)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\tlog.Errorf(`resource Job \"%s\/%s\" doesn't have OwnerReference for %s`, job.Namespace, job.Name, job.Labels[api.LabelDatabaseKind])\n\treturn nil\n}\n<commit_msg>Fixed event reason for Restore Job (#187)<commit_after>package job\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/appscode\/go\/log\"\n\tapi \"github.com\/kubedb\/apimachinery\/apis\/kubedb\/v1alpha1\"\n\t\"github.com\/kubedb\/apimachinery\/client\/typed\/kubedb\/v1alpha1\/util\"\n\t\"github.com\/kubedb\/apimachinery\/pkg\/eventer\"\n\tbatch \"k8s.io\/api\/batch\/v1\"\n\tcore \"k8s.io\/api\/core\/v1\"\n\tkerr \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\nfunc (c *Controller) completeJob(job *batch.Job) error {\n\tdeletePolicy := metav1.DeletePropagationBackground\n\terr := c.Client.BatchV1().Jobs(job.Namespace).Delete(job.Name, &metav1.DeleteOptions{\n\t\tPropagationPolicy: &deletePolicy,\n\t})\n\n\tif err != nil && !kerr.IsNotFound(err) {\n\t\treturn fmt.Errorf(\"failed to delete job: %s, reason: %s\", job.Name, err)\n\t}\n\n\tjobType := job.Annotations[api.AnnotationJobType]\n\tif jobType == api.JobTypeBackup {\n\t\treturn c.handleBackupJob(job)\n\t} else if jobType == api.JobTypeRestore {\n\t\treturn c.handleRestoreJob(job)\n\t}\n\n\treturn nil\n}\n\nfunc (c *Controller) handleBackupJob(job *batch.Job) error {\n\tfor _, o := range job.OwnerReferences {\n\t\tif o.Kind == api.ResourceKindSnapshot {\n\t\t\tsnapshot, err := c.ExtClient.Snapshots(job.Namespace).Get(o.Name, metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tjobSucceeded := job.Status.Succeeded > 0\n\n\t\t\t_, _, err = util.PatchSnapshot(c.ExtClient, snapshot, func(in *api.Snapshot) *api.Snapshot {\n\t\t\t\tif jobSucceeded {\n\t\t\t\t\tin.Status.Phase = api.SnapshotPhaseSucceeded\n\t\t\t\t} else {\n\t\t\t\t\tin.Status.Phase = api.SnapshotPhaseFailed\n\t\t\t\t}\n\t\t\t\tt := metav1.Now()\n\t\t\t\tin.Status.CompletionTime = &t\n\t\t\t\tdelete(in.Labels, api.LabelSnapshotStatus)\n\t\t\t\treturn in\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tc.eventRecorder.Eventf(snapshot.ObjectReference(), core.EventTypeWarning, eventer.EventReasonFailedToUpdate, err.Error())\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\truntimeObj, err := c.snapshotter.GetDatabase(metav1.ObjectMeta{Name: snapshot.Spec.DatabaseName, Namespace: snapshot.Namespace})\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif jobSucceeded {\n\t\t\t\tc.eventRecorder.Event(\n\t\t\t\t\tapi.ObjectReferenceFor(runtimeObj),\n\t\t\t\t\tcore.EventTypeNormal,\n\t\t\t\t\teventer.EventReasonSuccessfulSnapshot,\n\t\t\t\t\t\"Successfully completed snapshot\",\n\t\t\t\t)\n\t\t\t\tc.eventRecorder.Event(\n\t\t\t\t\tsnapshot.ObjectReference(),\n\t\t\t\t\tcore.EventTypeNormal,\n\t\t\t\t\teventer.EventReasonSuccessfulSnapshot,\n\t\t\t\t\t\"Successfully completed snapshot\",\n\t\t\t\t)\n\t\t\t} else {\n\t\t\t\tc.eventRecorder.Event(\n\t\t\t\t\tapi.ObjectReferenceFor(runtimeObj),\n\t\t\t\t\tcore.EventTypeWarning,\n\t\t\t\t\teventer.EventReasonSnapshotFailed,\n\t\t\t\t\t\"Failed to complete snapshot\",\n\t\t\t\t)\n\t\t\t\tc.eventRecorder.Event(\n\t\t\t\t\tsnapshot.ObjectReference(),\n\t\t\t\t\tcore.EventTypeWarning,\n\t\t\t\t\teventer.EventReasonSnapshotFailed,\n\t\t\t\t\t\"Failed to complete snapshot\",\n\t\t\t\t)\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tlog.Errorf(`resource Job \"%s\/%s\" doesn't have OwnerReference for Snapshot`, job.Namespace, job.Name)\n\treturn nil\n}\n\nfunc (c *Controller) handleRestoreJob(job *batch.Job) error {\n\tfor _, o := range job.OwnerReferences {\n\t\tif o.Kind == job.Labels[api.LabelDatabaseKind] {\n\t\t\tjobSucceeded := job.Status.Succeeded > 0\n\n\t\t\tvar phase api.DatabasePhase\n\t\t\tvar reason string\n\t\t\tif jobSucceeded {\n\t\t\t\tphase = api.DatabasePhaseRunning\n\t\t\t} else {\n\t\t\t\tphase = api.DatabasePhaseFailed\n\t\t\t\treason = \"Failed to complete initialization\"\n\t\t\t}\n\t\t\tobjectMeta := metav1.ObjectMeta{Name: o.Name, Namespace: job.Namespace}\n\t\t\terr := c.snapshotter.SetDatabaseStatus(objectMeta, phase, reason)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif jobSucceeded {\n\t\t\t\terr = c.snapshotter.UpsertDatabaseAnnotation(objectMeta, map[string]string{\n\t\t\t\t\tapi.AnnotationInitialized: \"\",\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\truntimeObj, err := c.snapshotter.GetDatabase(objectMeta)\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif jobSucceeded {\n\t\t\t\tc.eventRecorder.Event(\n\t\t\t\t\tapi.ObjectReferenceFor(runtimeObj),\n\t\t\t\t\tcore.EventTypeNormal,\n\t\t\t\t\teventer.EventReasonSuccessfulInitialize,\n\t\t\t\t\t\"Successfully completed initialization\",\n\t\t\t\t)\n\t\t\t} else {\n\t\t\t\tc.eventRecorder.Event(\n\t\t\t\t\tapi.ObjectReferenceFor(runtimeObj),\n\t\t\t\t\tcore.EventTypeWarning,\n\t\t\t\t\teventer.EventReasonFailedToInitialize,\n\t\t\t\t\t\"Failed to complete initialization\",\n\t\t\t\t)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\tlog.Errorf(`resource Job \"%s\/%s\" doesn't have OwnerReference for %s`, job.Namespace, job.Name, job.Labels[api.LabelDatabaseKind])\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Palantir Technologies, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage dirchecksum\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com\/termie\/go-shutil\"\n)\n\n\/\/ ChecksumForDirAction returns the Checksumset for the state of the target directory after running the provided action\n\/\/ on it. The target directory is returned to its original state after the action is run. Uses the following process:\n\/\/\n\/\/ * Copies the target directory to a temporary location (unique location in the same parent directory)\n\/\/ * Moves the target directory to another temporary location (unique location in the same parent directory)\n\/\/ * Moves the copied directory to the original location\n\/\/ * Runs the provided action\n\/\/ * Computes the checksums for the directory\n\/\/ * Removes the directory\n\/\/ * Moves the original target directory from the temporary location back to its original location\n\/\/\n\/\/ The result of the above is that the checksums are computed for the directory after the action is run, but the target\n\/\/ directory stays in its original state. This function registers a signal handler that restores the state to the\n\/\/ original state on SIGINT or SIGTERM signals and then calls os.Exit(1).\nfunc ChecksumForDirAction(dir string, action func(dir string) error) (ChecksumSet, error) {\n\tvar origDirCopy string\n\tvar origDirMoved string\n\n\tcleanupFn := mustDefer(func() {\n\t\t\/\/ remove copied directory\n\t\tif origDirCopy != \"\" {\n\t\t\t_ = os.RemoveAll(origDirCopy)\n\t\t}\n\t\t\/\/ move original directory back to original location\n\t\tif origDirMoved != \"\" {\n\t\t\t_ = os.RemoveAll(dir)\n\t\t\t_ = os.Rename(origDirMoved, dir)\n\t\t}\n\t})\n\tdefer cleanupFn()\n\n\t\/\/ copy original directory to temporary location\n\tvar err error\n\torigDirCopy, err = createTmpDirPath(path.Dir(dir))\n\tif err != nil {\n\t\treturn ChecksumSet{}, err\n\t}\n\tif err := shutil.CopyTree(dir, origDirCopy, nil); err != nil {\n\t\treturn ChecksumSet{}, fmt.Errorf(\"failed to copy directory: %v\", err)\n\t}\n\n\t\/\/ move original directory to temporary location\n\torigDirMoved, err = createTmpDirPath(path.Dir(dir))\n\tif err != nil {\n\t\treturn ChecksumSet{}, err\n\t}\n\tif err := os.Rename(dir, origDirMoved); err != nil {\n\t\treturn ChecksumSet{}, fmt.Errorf(\"failed to move original output directory to temporary location: %v\", err)\n\t}\n\n\t\/\/ move copied directory to original location\n\tif err := os.Rename(origDirCopy, dir); err != nil {\n\t\treturn ChecksumSet{}, fmt.Errorf(\"failed to move copied output directory to original location: %v\", err)\n\t}\n\torigDirCopy = \"\"\n\n\tif err := action(dir); err != nil {\n\t\treturn ChecksumSet{}, err\n\t}\n\n\tnewChecksums, err := ChecksumsForMatchingPaths(dir, nil)\n\tif err != nil {\n\t\treturn ChecksumSet{}, fmt.Errorf(\"failed to compute new checksums: %v\", err)\n\t}\n\treturn newChecksums, nil\n}\n\nfunc createTmpDirPath(parentDir string) (string, error) {\n\ttmpDir, err := ioutil.TempDir(parentDir, \"amalgomate-verify-\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create temporary directory: %v\", err)\n\t}\n\tif err := os.RemoveAll(tmpDir); err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to remove temporary directory: %v\", err)\n\t}\n\treturn tmpDir, nil\n}\n<commit_msg>Rename function for consistency (#253)<commit_after>\/\/ Copyright 2016 Palantir Technologies, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage dirchecksum\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com\/termie\/go-shutil\"\n)\n\n\/\/ ChecksumsForDirAfterAction returns the Checksumset for the state of the target directory after running the provided\n\/\/ action on it. The target directory is returned to its original state after the action is run. Uses the following\n\/\/ process:\n\/\/\n\/\/ * Copies the target directory to a temporary location (unique location in the same parent directory)\n\/\/ * Moves the target directory to another temporary location (unique location in the same parent directory)\n\/\/ * Moves the copied directory to the original location\n\/\/ * Runs the provided action\n\/\/ * Computes the checksums for the directory\n\/\/ * Removes the directory\n\/\/ * Moves the original target directory from the temporary location back to its original location\n\/\/\n\/\/ The result of the above is that the checksums are computed for the directory after the action is run, but the target\n\/\/ directory stays in its original state. This function registers a signal handler that restores the state to the\n\/\/ original state on SIGINT or SIGTERM signals and then calls os.Exit(1).\nfunc ChecksumsForDirAfterAction(dir string, action func(dir string) error) (ChecksumSet, error) {\n\tvar origDirCopy string\n\tvar origDirMoved string\n\n\tcleanupFn := mustDefer(func() {\n\t\t\/\/ remove copied directory\n\t\tif origDirCopy != \"\" {\n\t\t\t_ = os.RemoveAll(origDirCopy)\n\t\t}\n\t\t\/\/ move original directory back to original location\n\t\tif origDirMoved != \"\" {\n\t\t\t_ = os.RemoveAll(dir)\n\t\t\t_ = os.Rename(origDirMoved, dir)\n\t\t}\n\t})\n\tdefer cleanupFn()\n\n\t\/\/ copy original directory to temporary location\n\tvar err error\n\torigDirCopy, err = createTmpDirPath(path.Dir(dir))\n\tif err != nil {\n\t\treturn ChecksumSet{}, err\n\t}\n\tif err := shutil.CopyTree(dir, origDirCopy, nil); err != nil {\n\t\treturn ChecksumSet{}, fmt.Errorf(\"failed to copy directory: %v\", err)\n\t}\n\n\t\/\/ move original directory to temporary location\n\torigDirMoved, err = createTmpDirPath(path.Dir(dir))\n\tif err != nil {\n\t\treturn ChecksumSet{}, err\n\t}\n\tif err := os.Rename(dir, origDirMoved); err != nil {\n\t\treturn ChecksumSet{}, fmt.Errorf(\"failed to move original output directory to temporary location: %v\", err)\n\t}\n\n\t\/\/ move copied directory to original location\n\tif err := os.Rename(origDirCopy, dir); err != nil {\n\t\treturn ChecksumSet{}, fmt.Errorf(\"failed to move copied output directory to original location: %v\", err)\n\t}\n\torigDirCopy = \"\"\n\n\tif err := action(dir); err != nil {\n\t\treturn ChecksumSet{}, err\n\t}\n\n\tnewChecksums, err := ChecksumsForMatchingPaths(dir, nil)\n\tif err != nil {\n\t\treturn ChecksumSet{}, fmt.Errorf(\"failed to compute new checksums: %v\", err)\n\t}\n\treturn newChecksums, nil\n}\n\nfunc createTmpDirPath(parentDir string) (string, error) {\n\ttmpDir, err := ioutil.TempDir(parentDir, \"amalgomate-verify-\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create temporary directory: %v\", err)\n\t}\n\tif err := os.RemoveAll(tmpDir); err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to remove temporary directory: %v\", err)\n\t}\n\treturn tmpDir, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package kubelego\n\nimport (\n\t\"github.com\/jetstack\/kube-lego\/pkg\/kubelego_const\"\n\n\t\"fmt\"\n\t\"strings\"\n)\n\nfunc (kl *KubeLego) TlsIgnoreDuplicatedSecrets(tlsSlice []kubelego.Tls) []kubelego.Tls {\n\n\ttlsBySecret := map[string][]kubelego.Tls{}\n\n\tfor _, elm := range tlsSlice {\n\t\tkey := fmt.Sprintf(\n\t\t\t\"%s\/%s\",\n\t\t\telm.SecretMetadata().Namespace,\n\t\t\telm.SecretMetadata().Name,\n\t\t)\n\t\ttlsBySecret[key] = append(\n\t\t\ttlsBySecret[key],\n\t\t\telm,\n\t\t)\n\t}\n\n\toutput := []kubelego.Tls{}\n\tfor key, slice := range tlsBySecret {\n\t\tif len(slice) == 1 {\n\t\t\toutput = append(output, slice...)\n\t\t\tcontinue\n\t\t}\n\n\t\ttexts := []string{}\n\t\tfor _, elem := range slice {\n\t\t\ttexts = append(\n\t\t\t\ttexts,\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"ingress %s\/%s (hosts: %s)\",\n\t\t\t\t\telem.IngressMetadata().Namespace,\n\t\t\t\t\telem.IngressMetadata().Name,\n\t\t\t\t\tstrings.Join(elem.Hosts(), \", \"),\n\t\t\t\t),\n\t\t\t)\n\t\t}\n\t\tkl.Log().Warnf(\n\t\t\t\"the secret %s is used multiple times. These linked TLS ingress elements where ignored: %s\",\n\t\t\tkey,\n\t\t\tstrings.Join(texts, \", \"),\n\t\t)\n\t}\n\n\treturn output\n}\n\nfunc (kl *KubeLego) processProvider(ing kubelego.Ingress) (err error) {\n\n\tfor providerName, provider := range kl.legoIngressProvider {\n\t\terr := provider.Reset()\n\t\tif err != nil {\n\t\t\tprovider.Log().Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif providerName == ing.IngressProvider() {\n\t\t\terr = provider.Process(ing)\n\t\t\tif err != nil {\n\t\t\t\tprovider.Log().Error(err)\n\t\t\t}\n\t\t}\n\n\t\terr = provider.Finalize()\n\t\tif err != nil {\n\t\t\tprovider.Log().Error(err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (kl *KubeLego) reconfigure(ing kubelego.Ingress) error {\n\t\/\/ setup providers\n\tkl.processProvider(ing)\n\n\t\/\/ normify tls config\n\t\/\/ NOTE: this no longer performs a global deduplication\n\ttlsSlice := kl.TlsIgnoreDuplicatedSecrets(ing.Tls())\n\n\t\/\/ process certificate validity\n\tkl.Log().Info(\"process certificate requests for ingresses\")\n\terrs := kl.TlsProcessHosts(tlsSlice)\n\tif len(errs) > 0 {\n\t\terrsStr := []string{}\n\t\tfor _, err := range errs {\n\t\t\terrsStr = append(errsStr, fmt.Sprintf(\"%s\", err))\n\t\t}\n\t\tkl.Log().Error(\"Error while processing certificate requests: \", strings.Join(errsStr, \", \"))\n\n\t\t\/\/ request a rerun of reconfigure\n\t\tkl.workQueue.Add(true)\n\t}\n\n\treturn nil\n}\n\nfunc (kl *KubeLego) TlsProcessHosts(tlsSlice []kubelego.Tls) []error {\n\terrs := []error{}\n\tfor _, tlsElem := range tlsSlice {\n\t\terr := tlsElem.Process()\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\treturn errs\n}\n<commit_msg>Remove erroneous call to workQueue.Add<commit_after>package kubelego\n\nimport (\n\t\"github.com\/jetstack\/kube-lego\/pkg\/kubelego_const\"\n\n\t\"fmt\"\n\t\"strings\"\n)\n\nfunc (kl *KubeLego) TlsIgnoreDuplicatedSecrets(tlsSlice []kubelego.Tls) []kubelego.Tls {\n\n\ttlsBySecret := map[string][]kubelego.Tls{}\n\n\tfor _, elm := range tlsSlice {\n\t\tkey := fmt.Sprintf(\n\t\t\t\"%s\/%s\",\n\t\t\telm.SecretMetadata().Namespace,\n\t\t\telm.SecretMetadata().Name,\n\t\t)\n\t\ttlsBySecret[key] = append(\n\t\t\ttlsBySecret[key],\n\t\t\telm,\n\t\t)\n\t}\n\n\toutput := []kubelego.Tls{}\n\tfor key, slice := range tlsBySecret {\n\t\tif len(slice) == 1 {\n\t\t\toutput = append(output, slice...)\n\t\t\tcontinue\n\t\t}\n\n\t\ttexts := []string{}\n\t\tfor _, elem := range slice {\n\t\t\ttexts = append(\n\t\t\t\ttexts,\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"ingress %s\/%s (hosts: %s)\",\n\t\t\t\t\telem.IngressMetadata().Namespace,\n\t\t\t\t\telem.IngressMetadata().Name,\n\t\t\t\t\tstrings.Join(elem.Hosts(), \", \"),\n\t\t\t\t),\n\t\t\t)\n\t\t}\n\t\tkl.Log().Warnf(\n\t\t\t\"the secret %s is used multiple times. These linked TLS ingress elements where ignored: %s\",\n\t\t\tkey,\n\t\t\tstrings.Join(texts, \", \"),\n\t\t)\n\t}\n\n\treturn output\n}\n\nfunc (kl *KubeLego) processProvider(ing kubelego.Ingress) (err error) {\n\n\tfor providerName, provider := range kl.legoIngressProvider {\n\t\terr := provider.Reset()\n\t\tif err != nil {\n\t\t\tprovider.Log().Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif providerName == ing.IngressProvider() {\n\t\t\terr = provider.Process(ing)\n\t\t\tif err != nil {\n\t\t\t\tprovider.Log().Error(err)\n\t\t\t}\n\t\t}\n\n\t\terr = provider.Finalize()\n\t\tif err != nil {\n\t\t\tprovider.Log().Error(err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (kl *KubeLego) reconfigure(ing kubelego.Ingress) error {\n\t\/\/ setup providers\n\tkl.processProvider(ing)\n\n\t\/\/ normify tls config\n\t\/\/ NOTE: this no longer performs a global deduplication\n\ttlsSlice := kl.TlsIgnoreDuplicatedSecrets(ing.Tls())\n\n\t\/\/ process certificate validity\n\tkl.Log().Info(\"process certificate requests for ingresses\")\n\terrs := kl.TlsProcessHosts(tlsSlice)\n\tif len(errs) > 0 {\n\t\terrsStr := []string{}\n\t\tfor _, err := range errs {\n\t\t\terrsStr = append(errsStr, fmt.Sprintf(\"%s\", err))\n\t\t}\n\t\tkl.Log().Error(\"Error while processing certificate requests: \", strings.Join(errsStr, \", \"))\n\t}\n\n\treturn nil\n}\n\nfunc (kl *KubeLego) TlsProcessHosts(tlsSlice []kubelego.Tls) []error {\n\terrs := []error{}\n\tfor _, tlsElem := range tlsSlice {\n\t\terr := tlsElem.Process()\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\treturn errs\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 The OpenPitrix Authors. All rights reserved.\n\/\/ Use of this source code is governed by a Apache license\n\/\/ that can be found in the LICENSE file.\n\npackage logger\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc readBuf(buf *bytes.Buffer) string {\n\tstr := buf.String()\n\tbuf.Reset()\n\treturn str\n}\n\nfunc TestLogger(t *testing.T) {\n\tbuf := new(bytes.Buffer)\n\tSetOutput(buf)\n\n\tDebug(\"debug log, should ignore by default\")\n\tassert.Empty(t, readBuf(buf))\n\n\tInfo(\"info log, should visable\")\n\tassert.Contains(t, readBuf(buf), \"info log, should visable\")\n\n\tInfo(\"format [%d]\", 111)\n\tassert.Contains(t, readBuf(buf), \"format [111]\")\n\n\tSetLevelByString(\"debug\")\n\tDebug(\"debug log, now it becomes visible\")\n\tassert.Contains(t, readBuf(buf), \"debug log, now it becomes visible\")\n\n\tlogger = NewLogger()\n\tlogger.SetPrefix(\"(prefix)\").SetSuffix(\"(suffix)\").SetOutput(buf)\n\n\tlogger.Warn(\"log_content\")\n\tassert.Contains(t, readBuf(buf), \" -WARNING- (prefix)log_content (testing.go:777)(suffix)\")\n}\n<commit_msg>Fix ci error<commit_after>\/\/ Copyright 2018 The OpenPitrix Authors. All rights reserved.\n\/\/ Use of this source code is governed by a Apache license\n\/\/ that can be found in the LICENSE file.\n\npackage logger\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc readBuf(buf *bytes.Buffer) string {\n\tstr := buf.String()\n\tbuf.Reset()\n\treturn str\n}\n\nfunc TestLogger(t *testing.T) {\n\tbuf := new(bytes.Buffer)\n\tSetOutput(buf)\n\n\tDebug(\"debug log, should ignore by default\")\n\tassert.Empty(t, readBuf(buf))\n\n\tInfo(\"info log, should visable\")\n\tassert.Contains(t, readBuf(buf), \"info log, should visable\")\n\n\tInfo(\"format [%d]\", 111)\n\tassert.Contains(t, readBuf(buf), \"format [111]\")\n\n\tSetLevelByString(\"debug\")\n\tDebug(\"debug log, now it becomes visible\")\n\tassert.Contains(t, readBuf(buf), \"debug log, now it becomes visible\")\n\n\tlogger = NewLogger()\n\tlogger.SetPrefix(\"(prefix)\").SetSuffix(\"(suffix)\").SetOutput(buf)\n\n\tlogger.Warn(\"log_content\")\n\tassert.Regexp(t, \" -WARNING- \\\\(prefix\\\\)log_content \\\\(testing.go:\\\\d+\\\\)\\\\(suffix\\\\)\", readBuf(buf))\n}\n<|endoftext|>"} {"text":"<commit_before>package basic\n\nimport (\n\t\"errors\"\n\n\t\"github.com\/hellofresh\/janus\/pkg\/plugin\"\n\t\"github.com\/hellofresh\/janus\/pkg\/proxy\"\n\t\"github.com\/hellofresh\/janus\/pkg\/router\"\n)\n\nvar (\n\trepo Repository\n\tadminRouter router.Router\n)\n\nfunc init() {\n\tplugin.RegisterEventHook(plugin.StartupEvent, onStartup)\n\tplugin.RegisterEventHook(plugin.AdminAPIStartupEvent, onAdminAPIStartup)\n\n\tplugin.RegisterPlugin(\"basic_auth\", plugin.Plugin{\n\t\tAction: setupBasicAuth,\n\t})\n}\n\nfunc setupBasicAuth(def *proxy.RouterDefinition, rawConfig plugin.Config) error {\n\tif repo == nil {\n\t\treturn errors.New(\"the repository was not set by onStartup event\")\n\t}\n\n\tdef.AddMiddleware(NewBasicAuth(repo))\n\treturn nil\n}\n\nfunc onAdminAPIStartup(event interface{}) error {\n\te, ok := event.(plugin.OnAdminAPIStartup)\n\tif !ok {\n\t\treturn errors.New(\"could not convert event to admin startup type\")\n\t}\n\n\tadminRouter = e.Router\n\treturn nil\n}\n\nfunc onStartup(event interface{}) error {\n\tvar err error\n\n\te, ok := event.(plugin.OnStartup)\n\tif !ok {\n\t\treturn errors.New(\"could not convert event to startup type\")\n\t}\n\n\tif e.MongoSession == nil {\n\t\treturn ErrInvalidMongoDBSession\n\t}\n\n\tif adminRouter == nil {\n\t\treturn ErrInvalidAdminRouter\n\t}\n\n\trepo, err = NewMongoRepository(e.MongoSession)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thandlers := NewHandler(repo)\n\tgroup := adminRouter.Group(\"\/credentials\/basic_auth\")\n\t{\n\t\tgroup.GET(\"\/\", handlers.Index())\n\t\tgroup.POST(\"\/\", handlers.Create())\n\t\tgroup.GET(\"\/{username}\", handlers.Show())\n\t\tgroup.PUT(\"\/{username}\", handlers.Update())\n\t\tgroup.DELETE(\"\/{username}\", handlers.Delete())\n\t}\n\n\treturn nil\n}\n<commit_msg>Use memory repository when no mongo connection for basic auth pligin<commit_after>package basic\n\nimport (\n\t\"errors\"\n\n\t\"github.com\/hellofresh\/janus\/pkg\/plugin\"\n\t\"github.com\/hellofresh\/janus\/pkg\/proxy\"\n\t\"github.com\/hellofresh\/janus\/pkg\/router\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nvar (\n\trepo Repository\n\tadminRouter router.Router\n)\n\nfunc init() {\n\tplugin.RegisterEventHook(plugin.StartupEvent, onStartup)\n\tplugin.RegisterEventHook(plugin.AdminAPIStartupEvent, onAdminAPIStartup)\n\n\tplugin.RegisterPlugin(\"basic_auth\", plugin.Plugin{\n\t\tAction: setupBasicAuth,\n\t})\n}\n\nfunc setupBasicAuth(def *proxy.RouterDefinition, rawConfig plugin.Config) error {\n\tif repo == nil {\n\t\treturn errors.New(\"the repository was not set by onStartup event\")\n\t}\n\n\tdef.AddMiddleware(NewBasicAuth(repo))\n\treturn nil\n}\n\nfunc onAdminAPIStartup(event interface{}) error {\n\te, ok := event.(plugin.OnAdminAPIStartup)\n\tif !ok {\n\t\treturn errors.New(\"could not convert event to admin startup type\")\n\t}\n\n\tadminRouter = e.Router\n\treturn nil\n}\n\nfunc onStartup(event interface{}) error {\n\tvar err error\n\n\te, ok := event.(plugin.OnStartup)\n\tif !ok {\n\t\treturn errors.New(\"could not convert event to startup type\")\n\t}\n\n\tvar repo Repository\n\tif e.MongoSession == nil {\n\t\tlog.Debug(\"Mongo session is not set, using memory repository for basic auth plugin\")\n\n\t\trepo = NewInMemoryRepository()\n\t} else {\n\t\tlog.Debug(\"Mongo session is set, using mongo repository for basic auth plugin\")\n\n\t\trepo, err = NewMongoRepository(e.MongoSession)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif adminRouter == nil {\n\t\treturn ErrInvalidAdminRouter\n\t}\n\n\thandlers := NewHandler(repo)\n\tgroup := adminRouter.Group(\"\/credentials\/basic_auth\")\n\t{\n\t\tgroup.GET(\"\/\", handlers.Index())\n\t\tgroup.POST(\"\/\", handlers.Create())\n\t\tgroup.GET(\"\/{username}\", handlers.Show())\n\t\tgroup.PUT(\"\/{username}\", handlers.Update())\n\t\tgroup.DELETE(\"\/{username}\", handlers.Delete())\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015-2016 trivago GmbH\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage core\n\nimport (\n\t\"github.com\/sirupsen\/logrus\"\n\t\"hash\/fnv\"\n\t\"sync\"\n)\n\nvar (\n\t\/\/ GeneratedRouterPrefix is prepended to all generated routers\n\tGeneratedRouterPrefix = \"_GENERATED_\"\n)\n\n\/\/ streamRegistry holds routers mapped by their MessageStreamID as well as a\n\/\/ reverse lookup of MessageStreamID to stream name.\ntype streamRegistry struct {\n\trouters map[MessageStreamID]Router\n\tname map[MessageStreamID]string\n\tnameGuard *sync.RWMutex\n\tstreamGuard *sync.RWMutex\n\twildcard []Producer\n}\n\n\/\/ StreamRegistry is the global instance of streamRegistry used to store the\n\/\/ all registered routers.\nvar StreamRegistry = streamRegistry{\n\trouters: make(map[MessageStreamID]Router),\n\tstreamGuard: new(sync.RWMutex),\n\tname: make(map[MessageStreamID]string),\n\tnameGuard: new(sync.RWMutex),\n}\n\n\/\/ GetStreamID is deprecated\nfunc GetStreamID(stream string) MessageStreamID {\n\treturn StreamRegistry.GetStreamID(stream)\n}\n\n\/\/ GetStreamID returns the integer representation of a given stream name.\nfunc (registry *streamRegistry) GetStreamID(stream string) MessageStreamID {\n\thash := fnv.New64a()\n\thash.Write([]byte(stream))\n\tstreamID := MessageStreamID(hash.Sum64())\n\n\tregistry.nameGuard.Lock()\n\tregistry.name[streamID] = stream\n\tregistry.nameGuard.Unlock()\n\n\treturn streamID\n}\n\n\/\/ GetStreamName does a reverse lookup for a given MessageStreamID and returns\n\/\/ the corresponding name. If the MessageStreamID is not registered, an empty\n\/\/ string is returned.\nfunc (registry streamRegistry) GetStreamName(streamID MessageStreamID) string {\n\tswitch streamID {\n\tcase LogInternalStreamID:\n\t\treturn LogInternalStream\n\n\tcase WildcardStreamID:\n\t\treturn WildcardStream\n\n\tcase InvalidStreamID:\n\t\treturn InvalidStream\n\n\tdefault:\n\t\tregistry.nameGuard.RLock()\n\t\tname, exists := registry.name[streamID]\n\t\tregistry.nameGuard.RUnlock()\n\n\t\tif exists {\n\t\t\treturn name \/\/ ### return, found ###\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ GetRouterByStreamName returns a registered stream by name. See GetRouter.\nfunc (registry streamRegistry) GetRouterByStreamName(name string) Router {\n\tstreamID := registry.GetStreamID(name)\n\treturn registry.GetRouter(streamID)\n}\n\n\/\/ GetRouter returns a registered stream or nil\nfunc (registry streamRegistry) GetRouter(id MessageStreamID) Router {\n\tregistry.streamGuard.RLock()\n\tstream, exists := registry.routers[id]\n\tregistry.streamGuard.RUnlock()\n\n\tif exists {\n\t\treturn stream\n\t}\n\treturn nil\n}\n\n\/\/ IsStreamRegistered returns true if the stream for the given id is registered.\nfunc (registry streamRegistry) IsStreamRegistered(id MessageStreamID) bool {\n\tregistry.streamGuard.RLock()\n\t_, exists := registry.routers[id]\n\tregistry.streamGuard.RUnlock()\n\n\treturn exists\n}\n\n\/\/ ForEachStream loops over all registered routers and calls the given function.\nfunc (registry streamRegistry) ForEachStream(callback func(streamID MessageStreamID, stream Router)) {\n\tregistry.streamGuard.RLock()\n\tdefer registry.streamGuard.RUnlock()\n\n\tfor streamID, router := range registry.routers {\n\t\tcallback(streamID, router)\n\t}\n}\n\n\/\/ WildcardProducersExist returns true if any producer is listening to the\n\/\/ wildcard stream.\nfunc (registry *streamRegistry) WildcardProducersExist() bool {\n\treturn len(registry.wildcard) > 0\n}\n\n\/\/ RegisterWildcardProducer adds a new producer to the list of known wildcard\n\/\/ prodcuers. This list has to be added to new routers upon creation to send\n\/\/ messages to producers listening to *.\n\/\/ Duplicates will be filtered.\n\/\/ This state of this list is undefined during the configuration phase.\nfunc (registry *streamRegistry) RegisterWildcardProducer(producers ...Producer) {\nnextProd:\n\tfor _, prod := range producers {\n\t\tfor _, existing := range registry.wildcard {\n\t\t\tif existing == prod {\n\t\t\t\tcontinue nextProd\n\t\t\t}\n\t\t}\n\t\tregistry.wildcard = append(registry.wildcard, prod)\n\t}\n}\n\n\/\/ AddWildcardProducersToRouter adds all known wildcard producers to a given\n\/\/ router. The state of the wildcard list is undefined during the configuration\n\/\/ phase.\nfunc (registry streamRegistry) AddWildcardProducersToRouter(router Router) {\n\tstreamID := router.GetStreamID()\n\tif streamID != LogInternalStreamID {\n\t\trouter.AddProducer(registry.wildcard...)\n\t}\n}\n\n\/\/ AddAllWildcardProducersToAllRouters executes AddWildcardProducersToRouter on\n\/\/ all currently registered routers\nfunc (registry *streamRegistry) AddAllWildcardProducersToAllRouters() {\n\tregistry.ForEachStream(\n\t\tfunc(streamID MessageStreamID, router Router) {\n\t\t\tregistry.AddWildcardProducersToRouter(router)\n\t\t})\n}\n\n\/\/ Register registers a router plugin to a given stream id\nfunc (registry *streamRegistry) Register(router Router, streamID MessageStreamID) {\n\tregistry.streamGuard.RLock()\n\t_, exists := registry.routers[streamID]\n\tregistry.streamGuard.RUnlock()\n\n\tif exists {\n\t\tlogrus.Warningf(\"%T attaches to an already occupied router (%s)\", router, registry.GetStreamName(streamID))\n\t\treturn \/\/ ### return, double registration ###\n\t}\n\n\tregistry.streamGuard.Lock()\n\tdefer registry.streamGuard.Unlock()\n\n\t\/\/ Test again inside critical section to avoid races\n\tif _, exists := registry.routers[streamID]; !exists {\n\t\tregistry.routers[streamID] = router\n\t\tCountRouters()\n\t}\n}\n\n\/\/ GetRouterOrFallback returns the router for the given streamID if it is registered.\n\/\/ If no router is registered for the given streamID the default router is used.\n\/\/ The default router is equivalent to an unconfigured router.Broadcast with\n\/\/ all wildcard producers already added.\nfunc (registry *streamRegistry) GetRouterOrFallback(streamID MessageStreamID) Router {\n\tif streamID == InvalidStreamID {\n\t\treturn nil \/\/ ### return, invalid stream does not have a router ###\n\t}\n\n\tregistry.streamGuard.RLock()\n\trouter, exists := registry.routers[streamID]\n\tregistry.streamGuard.RUnlock()\n\tif exists {\n\t\treturn router \/\/ ### return, already registered ###\n\t}\n\n\tregistry.streamGuard.Lock()\n\tdefer registry.streamGuard.Unlock()\n\n\t\/\/ Create router, avoid race conditions by check again in ciritical section\n\tif router, exists = registry.routers[streamID]; exists {\n\t\treturn router \/\/ ### return, lost the race ###\n\t}\n\n\tdefaultRouter := registry.createFallback(streamID)\n\tregistry.AddWildcardProducersToRouter(defaultRouter)\n\tregistry.routers[streamID] = defaultRouter\n\n\tCountRouters()\n\tCountFallbackRouters()\n\n\treturn defaultRouter\n}\n\nfunc (registry *streamRegistry) createFallback(streamID MessageStreamID) Router {\n\tstreamName := registry.GetStreamName(streamID)\n\tlogrus.Debug(\"Creating fallback stream for \", streamName)\n\n\tconfig := NewPluginConfig(GeneratedRouterPrefix+streamName, \"router.Broadcast\")\n\tconfig.Override(\"stream\", streamName)\n\n\tplugin, err := NewPluginWithConfig(config)\n\tif err != nil {\n\t\tpanic(err) \/\/ this has to always work, otherwise: panic\n\t}\n\n\tstream := plugin.(Router) \/\/ panic if not!\n\treturn stream\n}\n<commit_msg>added missing TraceInternalStreamID<commit_after>\/\/ Copyright 2015-2016 trivago GmbH\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage core\n\nimport (\n\t\"github.com\/sirupsen\/logrus\"\n\t\"hash\/fnv\"\n\t\"sync\"\n)\n\nvar (\n\t\/\/ GeneratedRouterPrefix is prepended to all generated routers\n\tGeneratedRouterPrefix = \"_GENERATED_\"\n)\n\n\/\/ streamRegistry holds routers mapped by their MessageStreamID as well as a\n\/\/ reverse lookup of MessageStreamID to stream name.\ntype streamRegistry struct {\n\trouters map[MessageStreamID]Router\n\tname map[MessageStreamID]string\n\tnameGuard *sync.RWMutex\n\tstreamGuard *sync.RWMutex\n\twildcard []Producer\n}\n\n\/\/ StreamRegistry is the global instance of streamRegistry used to store the\n\/\/ all registered routers.\nvar StreamRegistry = streamRegistry{\n\trouters: make(map[MessageStreamID]Router),\n\tstreamGuard: new(sync.RWMutex),\n\tname: make(map[MessageStreamID]string),\n\tnameGuard: new(sync.RWMutex),\n}\n\n\/\/ GetStreamID is deprecated\nfunc GetStreamID(stream string) MessageStreamID {\n\treturn StreamRegistry.GetStreamID(stream)\n}\n\n\/\/ GetStreamID returns the integer representation of a given stream name.\nfunc (registry *streamRegistry) GetStreamID(stream string) MessageStreamID {\n\thash := fnv.New64a()\n\thash.Write([]byte(stream))\n\tstreamID := MessageStreamID(hash.Sum64())\n\n\tregistry.nameGuard.Lock()\n\tregistry.name[streamID] = stream\n\tregistry.nameGuard.Unlock()\n\n\treturn streamID\n}\n\n\/\/ GetStreamName does a reverse lookup for a given MessageStreamID and returns\n\/\/ the corresponding name. If the MessageStreamID is not registered, an empty\n\/\/ string is returned.\nfunc (registry streamRegistry) GetStreamName(streamID MessageStreamID) string {\n\tswitch streamID {\n\tcase LogInternalStreamID:\n\t\treturn LogInternalStream\n\n\tcase WildcardStreamID:\n\t\treturn WildcardStream\n\n\tcase InvalidStreamID:\n\t\treturn InvalidStream\n\n\tcase TraceInternalStreamID:\n\t\treturn TraceInternalStream\n\n\tdefault:\n\t\tregistry.nameGuard.RLock()\n\t\tname, exists := registry.name[streamID]\n\t\tregistry.nameGuard.RUnlock()\n\n\t\tif exists {\n\t\t\treturn name \/\/ ### return, found ###\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ GetRouterByStreamName returns a registered stream by name. See GetRouter.\nfunc (registry streamRegistry) GetRouterByStreamName(name string) Router {\n\tstreamID := registry.GetStreamID(name)\n\treturn registry.GetRouter(streamID)\n}\n\n\/\/ GetRouter returns a registered stream or nil\nfunc (registry streamRegistry) GetRouter(id MessageStreamID) Router {\n\tregistry.streamGuard.RLock()\n\tstream, exists := registry.routers[id]\n\tregistry.streamGuard.RUnlock()\n\n\tif exists {\n\t\treturn stream\n\t}\n\treturn nil\n}\n\n\/\/ IsStreamRegistered returns true if the stream for the given id is registered.\nfunc (registry streamRegistry) IsStreamRegistered(id MessageStreamID) bool {\n\tregistry.streamGuard.RLock()\n\t_, exists := registry.routers[id]\n\tregistry.streamGuard.RUnlock()\n\n\treturn exists\n}\n\n\/\/ ForEachStream loops over all registered routers and calls the given function.\nfunc (registry streamRegistry) ForEachStream(callback func(streamID MessageStreamID, stream Router)) {\n\tregistry.streamGuard.RLock()\n\tdefer registry.streamGuard.RUnlock()\n\n\tfor streamID, router := range registry.routers {\n\t\tcallback(streamID, router)\n\t}\n}\n\n\/\/ WildcardProducersExist returns true if any producer is listening to the\n\/\/ wildcard stream.\nfunc (registry *streamRegistry) WildcardProducersExist() bool {\n\treturn len(registry.wildcard) > 0\n}\n\n\/\/ RegisterWildcardProducer adds a new producer to the list of known wildcard\n\/\/ prodcuers. This list has to be added to new routers upon creation to send\n\/\/ messages to producers listening to *.\n\/\/ Duplicates will be filtered.\n\/\/ This state of this list is undefined during the configuration phase.\nfunc (registry *streamRegistry) RegisterWildcardProducer(producers ...Producer) {\nnextProd:\n\tfor _, prod := range producers {\n\t\tfor _, existing := range registry.wildcard {\n\t\t\tif existing == prod {\n\t\t\t\tcontinue nextProd\n\t\t\t}\n\t\t}\n\t\tregistry.wildcard = append(registry.wildcard, prod)\n\t}\n}\n\n\/\/ AddWildcardProducersToRouter adds all known wildcard producers to a given\n\/\/ router. The state of the wildcard list is undefined during the configuration\n\/\/ phase.\nfunc (registry streamRegistry) AddWildcardProducersToRouter(router Router) {\n\tstreamID := router.GetStreamID()\n\tif streamID != LogInternalStreamID {\n\t\trouter.AddProducer(registry.wildcard...)\n\t}\n}\n\n\/\/ AddAllWildcardProducersToAllRouters executes AddWildcardProducersToRouter on\n\/\/ all currently registered routers\nfunc (registry *streamRegistry) AddAllWildcardProducersToAllRouters() {\n\tregistry.ForEachStream(\n\t\tfunc(streamID MessageStreamID, router Router) {\n\t\t\tregistry.AddWildcardProducersToRouter(router)\n\t\t})\n}\n\n\/\/ Register registers a router plugin to a given stream id\nfunc (registry *streamRegistry) Register(router Router, streamID MessageStreamID) {\n\tregistry.streamGuard.RLock()\n\t_, exists := registry.routers[streamID]\n\tregistry.streamGuard.RUnlock()\n\n\tif exists {\n\t\tlogrus.Warningf(\"%T attaches to an already occupied router (%s)\", router, registry.GetStreamName(streamID))\n\t\treturn \/\/ ### return, double registration ###\n\t}\n\n\tregistry.streamGuard.Lock()\n\tdefer registry.streamGuard.Unlock()\n\n\t\/\/ Test again inside critical section to avoid races\n\tif _, exists := registry.routers[streamID]; !exists {\n\t\tregistry.routers[streamID] = router\n\t\tCountRouters()\n\t}\n}\n\n\/\/ GetRouterOrFallback returns the router for the given streamID if it is registered.\n\/\/ If no router is registered for the given streamID the default router is used.\n\/\/ The default router is equivalent to an unconfigured router.Broadcast with\n\/\/ all wildcard producers already added.\nfunc (registry *streamRegistry) GetRouterOrFallback(streamID MessageStreamID) Router {\n\tif streamID == InvalidStreamID {\n\t\treturn nil \/\/ ### return, invalid stream does not have a router ###\n\t}\n\n\tregistry.streamGuard.RLock()\n\trouter, exists := registry.routers[streamID]\n\tregistry.streamGuard.RUnlock()\n\tif exists {\n\t\treturn router \/\/ ### return, already registered ###\n\t}\n\n\tregistry.streamGuard.Lock()\n\tdefer registry.streamGuard.Unlock()\n\n\t\/\/ Create router, avoid race conditions by check again in ciritical section\n\tif router, exists = registry.routers[streamID]; exists {\n\t\treturn router \/\/ ### return, lost the race ###\n\t}\n\n\tdefaultRouter := registry.createFallback(streamID)\n\tregistry.AddWildcardProducersToRouter(defaultRouter)\n\tregistry.routers[streamID] = defaultRouter\n\n\tCountRouters()\n\tCountFallbackRouters()\n\n\treturn defaultRouter\n}\n\nfunc (registry *streamRegistry) createFallback(streamID MessageStreamID) Router {\n\tstreamName := registry.GetStreamName(streamID)\n\tlogrus.Debug(\"Creating fallback stream for \", streamName)\n\n\tconfig := NewPluginConfig(GeneratedRouterPrefix+streamName, \"router.Broadcast\")\n\tconfig.Override(\"stream\", streamName)\n\n\tplugin, err := NewPluginWithConfig(config)\n\tif err != nil {\n\t\tpanic(err) \/\/ this has to always work, otherwise: panic\n\t}\n\n\tstream := plugin.(Router) \/\/ panic if not!\n\treturn stream\n}\n<|endoftext|>"} {"text":"<commit_before>package geojson\n\nimport (\n\t\"encoding\/json\"\n\t\"github.com\/foobaz\/geom\"\n\t\"reflect\"\n)\n\nfunc ToGeoJSON(t geom.T) (interface{}, error) {\n\tswitch g := t.(type) {\n\tcase geom.Point:\n\t\treturn &Geometry{\n\t\t\tType: \"Point\",\n\t\t\tCoordinates: g,\n\t\t}, nil\n\tcase geom.LineString:\n\t\treturn Geometry{\n\t\t\tType: \"LineString\",\n\t\t\tCoordinates: g,\n\t\t}, nil\n\tcase geom.Polygon:\n\t\treturn Geometry{\n\t\t\tType: \"Polygon\",\n\t\t\tCoordinates: g,\n\t\t}, nil\n\tcase geom.MultiPolygon:\n\t\treturn Geometry{\n\t\t\tType: \"MultiPolygon\",\n\t\t\tCoordinates: g,\n\t\t}, nil\n\tcase geom.Feature:\n\t\tserializable, err := ToGeoJSON(g.T)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tgeometry, ok := serializable.(Geometry)\n\t\tif !ok {\n\t\t\treturn nil, &UnsupportedGeometryError{reflect.TypeOf(geometry).String()}\n\t\t}\n\n\t\treturn Feature{\n\t\t\tType: \"Feature\",\n\t\t\tGeometry: geometry,\n\t\t\tProperties: g.Properties,\n\t\t}, nil\n\tcase geom.FeatureCollection:\n\t\tfeatures := make([]Feature, 0, len(g.Features))\n\t\tfor _, geomFeature := range g.Features {\n\t\t\tserializable, err := ToGeoJSON(geomFeature)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tjsonFeature, ok := serializable.(Feature)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfeatures = append(features, jsonFeature)\n\t\t}\n\t\treturn FeatureCollection{\n\t\t\tType: \"FeatureCollection\",\n\t\t\tFeatures: features,\n\t\t\tProperties: g.Properties,\n\t\t}, nil\n\tdefault:\n\t\treturn nil, &UnsupportedGeometryError{reflect.TypeOf(g).String()}\n\t}\n}\n\nfunc Encode(g geom.T) ([]byte, error) {\n\tif object, err := ToGeoJSON(g); err == nil {\n\t\treturn json.Marshal(object)\n\t} else {\n\t\treturn nil, err\n\t}\n}\n<commit_msg>validate geojson more, return errors for invalid geometry<commit_after>package geojson\n\nimport (\n\t\"encoding\/json\"\n\t\"reflect\"\n\n\t\"github.com\/foobaz\/geom\"\n)\n\nfunc ToGeoJSON(t geom.T) (interface{}, error) {\n\tswitch g := t.(type) {\n\tcase geom.Point:\n\t\treturn &Geometry{\n\t\t\tType: \"Point\",\n\t\t\tCoordinates: g,\n\t\t}, nil\n\tcase geom.LineString:\n\t\terr := validateLineString(g)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn Geometry{\n\t\t\tType: \"LineString\",\n\t\t\tCoordinates: g,\n\t\t}, nil\n\tcase geom.MultiLineString:\n\t\terr := validateMultiLineString(g)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn Geometry{\n\t\t\tType: \"MultiLineString\",\n\t\t\tCoordinates: g,\n\t\t}, nil\n\tcase geom.Polygon:\n\t\terr := validatePolygon(g)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn Geometry{\n\t\t\tType: \"Polygon\",\n\t\t\tCoordinates: g,\n\t\t}, nil\n\tcase geom.MultiPolygon:\n\t\terr := validateMultiPolygon(g)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn Geometry{\n\t\t\tType: \"MultiPolygon\",\n\t\t\tCoordinates: g,\n\t\t}, nil\n\tcase geom.Feature:\n\t\tserializable, err := ToGeoJSON(g.T)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tgeometry, ok := serializable.(Geometry)\n\t\tif !ok {\n\t\t\treturn nil, &UnsupportedGeometryError{reflect.TypeOf(geometry).String()}\n\t\t}\n\n\t\treturn Feature{\n\t\t\tType: \"Feature\",\n\t\t\tGeometry: geometry,\n\t\t\tProperties: g.Properties,\n\t\t}, nil\n\tcase geom.FeatureCollection:\n\t\tfeatures := make([]Feature, 0, len(g.Features))\n\t\tfor _, geomFeature := range g.Features {\n\t\t\tserializable, err := ToGeoJSON(geomFeature)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tjsonFeature, ok := serializable.(Feature)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfeatures = append(features, jsonFeature)\n\t\t}\n\t\treturn FeatureCollection{\n\t\t\tType: \"FeatureCollection\",\n\t\t\tFeatures: features,\n\t\t\tProperties: g.Properties,\n\t\t}, nil\n\tdefault:\n\t\treturn nil, &UnsupportedGeometryError{reflect.TypeOf(g).String()}\n\t}\n}\n\nfunc Encode(g geom.T) ([]byte, error) {\n\tif object, err := ToGeoJSON(g); err == nil {\n\t\treturn json.Marshal(object)\n\t} else {\n\t\treturn nil, err\n\t}\n}\n\nfunc validateLineString(l geom.LineString) error {\n\tpointCount := len(l)\n\tif pointCount < 2 {\n\t\treturn InsufficientPointsError{pointCount}\n\t}\n\n\treturn nil\n}\n\nfunc validateMultiLineString(m geom.MultiLineString) error {\n\tfor _, l := range m {\n\t\terr := validateLineString(l)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc validatePolygon(p geom.Polygon) error {\n\tif len(p) == 0 {\n\t\treturn InsufficientPointsError{0}\n\t}\n\n\tfor _, r := range p {\n\t\tpointCount := len(r)\n\t\tif pointCount < 4 {\n\t\t\treturn InsufficientPointsError{pointCount}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc validateMultiPolygon(m geom.MultiPolygon) error {\n\tfor _, p := range m {\n\t\terr := validatePolygon(p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package realtime\n\nimport (\n\t\"encoding\/json\"\n\n\t\"github.com\/cozy\/cozy-stack\/pkg\/logger\"\n\tredis \"github.com\/go-redis\/redis\"\n)\n\nconst eventsRedisKey = \"realtime:events\"\n\ntype redisHub struct {\n\tc *redis.Client\n\tmem *memHub\n\tlocal *topic\n}\n\nfunc newRedisHub(c *redis.Client) *redisHub {\n\tlocal := newTopic(\"*\")\n\tmem := newMemHub()\n\thub := &redisHub{c, mem, local}\n\tgo hub.start()\n\treturn hub\n}\n\ntype jsonDoc struct {\n\tM map[string]interface{}\n\tType string\n}\n\nfunc (j jsonDoc) ID() string { id, _ := j.M[\"_id\"].(string); return id }\nfunc (j jsonDoc) DocType() string { return j.Type }\nfunc (j *jsonDoc) MarshalJSON() ([]byte, error) {\n\tj.M[\"_type\"] = j.Type\n\tdefer delete(j.M, \"_type\")\n\treturn json.Marshal(j.M)\n}\n\nfunc toJSONDoc(d map[string]interface{}) *jsonDoc {\n\tif d == nil {\n\t\treturn nil\n\t}\n\tdoctype, _ := d[\"_type\"].(string)\n\tdelete(d, \"_type\")\n\treturn &jsonDoc{d, doctype}\n}\n\ntype jsonEvent struct {\n\tDomain string\n\tVerb string\n\tDoc *jsonDoc\n\tOld *jsonDoc\n}\n\nfunc (j *jsonEvent) UnmarshalJSON(buf []byte) error {\n\tvar m map[string]interface{}\n\tif err := json.Unmarshal(buf, &m); err != nil {\n\t\treturn err\n\t}\n\tj.Domain, _ = m[\"domain\"].(string)\n\tj.Verb, _ = m[\"verb\"].(string)\n\tif doc, ok := m[\"doc\"].(map[string]interface{}); ok {\n\t\tj.Doc = toJSONDoc(doc)\n\t}\n\tif old, ok := m[\"old\"].(map[string]interface{}); ok {\n\t\tj.Old = toJSONDoc(old)\n\t}\n\treturn nil\n}\n\nfunc (h *redisHub) start() {\n\tsub := h.c.Subscribe(eventsRedisKey)\n\tlog := logger.WithNamespace(\"realtime-redis\")\n\tfor msg := range sub.Channel() {\n\t\tje := jsonEvent{}\n\t\tbuf := []byte(msg.Payload)\n\t\tif err := json.Unmarshal(buf, &je); err != nil {\n\t\t\tlog.Warnf(\"Error on start: %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\te := &Event{\n\t\t\tDomain: je.Domain,\n\t\t\tVerb: je.Verb,\n\t\t\tDoc: je.Doc,\n\t\t\tOldDoc: je.Old,\n\t\t}\n\t\th.mem.Publish(e)\n\t}\n}\n\nfunc (h *redisHub) GetTopic(domain, doctype string) *topic {\n\treturn nil\n}\n\nfunc (h *redisHub) Publish(e *Event) {\n\th.local.broadcast <- e\n\tbuf, err := json.Marshal(e)\n\tif err != nil {\n\t\tlog := logger.WithNamespace(\"realtime-redis\")\n\t\tlog.Warnf(\"Error on publish: %s\", err)\n\t\treturn\n\t}\n\th.c.Publish(eventsRedisKey, string(buf))\n}\n\nfunc (h *redisHub) Subscriber(domain string) *DynamicSubscriber {\n\treturn h.mem.Subscriber(domain)\n}\n\nfunc (h *redisHub) SubscribeLocalAll() *DynamicSubscriber {\n\tds := newDynamicSubscriber(nil, \"\")\n\tds.addTopic(h.local, \"\")\n\treturn ds\n}\n<commit_msg>Preserve doctype when transfering docs in redis for realtime<commit_after>package realtime\n\nimport (\n\t\"encoding\/json\"\n\t\"strings\"\n\n\t\"github.com\/cozy\/cozy-stack\/pkg\/logger\"\n\tredis \"github.com\/go-redis\/redis\"\n)\n\nconst eventsRedisKey = \"realtime:events\"\n\ntype redisHub struct {\n\tc *redis.Client\n\tmem *memHub\n\tlocal *topic\n}\n\nfunc newRedisHub(c *redis.Client) *redisHub {\n\tlocal := newTopic(\"*\")\n\tmem := newMemHub()\n\thub := &redisHub{c, mem, local}\n\tgo hub.start()\n\treturn hub\n}\n\ntype jsonDoc struct {\n\tM map[string]interface{}\n\tType string\n}\n\nfunc (j jsonDoc) ID() string { id, _ := j.M[\"_id\"].(string); return id }\nfunc (j jsonDoc) DocType() string { return j.Type }\nfunc (j *jsonDoc) MarshalJSON() ([]byte, error) {\n\tj.M[\"_type\"] = j.Type\n\tdefer delete(j.M, \"_type\")\n\treturn json.Marshal(j.M)\n}\n\nfunc toJSONDoc(d map[string]interface{}) *jsonDoc {\n\tif d == nil {\n\t\treturn nil\n\t}\n\tdoctype, _ := d[\"_type\"].(string)\n\tdelete(d, \"_type\")\n\treturn &jsonDoc{d, doctype}\n}\n\ntype jsonEvent struct {\n\tDomain string\n\tVerb string\n\tDoc *jsonDoc\n\tOld *jsonDoc\n}\n\nfunc (j *jsonEvent) UnmarshalJSON(buf []byte) error {\n\tvar m map[string]interface{}\n\tif err := json.Unmarshal(buf, &m); err != nil {\n\t\treturn err\n\t}\n\tj.Domain, _ = m[\"domain\"].(string)\n\tj.Verb, _ = m[\"verb\"].(string)\n\tif doc, ok := m[\"doc\"].(map[string]interface{}); ok {\n\t\tj.Doc = toJSONDoc(doc)\n\t}\n\tif old, ok := m[\"old\"].(map[string]interface{}); ok {\n\t\tj.Old = toJSONDoc(old)\n\t}\n\treturn nil\n}\n\nfunc (h *redisHub) start() {\n\tsub := h.c.Subscribe(eventsRedisKey)\n\tlog := logger.WithNamespace(\"realtime-redis\")\n\tfor msg := range sub.Channel() {\n\t\tje := jsonEvent{}\n\t\tparts := strings.SplitN(msg.Payload, \",\", 2)\n\t\tif len(parts) < 2 {\n\t\t\tlog.Warnf(\"Invalid payload: %s\", msg.Payload)\n\t\t\tcontinue\n\t\t}\n\t\tdoctype := parts[0]\n\t\tbuf := []byte(parts[1])\n\t\tif err := json.Unmarshal(buf, &je); err != nil {\n\t\t\tlog.Warnf(\"Error on start: %s\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif je.Doc != nil {\n\t\t\tje.Doc.Type = doctype\n\t\t}\n\t\tif je.Old != nil {\n\t\t\tje.Old.Type = doctype\n\t\t}\n\t\te := &Event{\n\t\t\tDomain: je.Domain,\n\t\t\tVerb: je.Verb,\n\t\t\tDoc: je.Doc,\n\t\t\tOldDoc: je.Old,\n\t\t}\n\t\th.mem.Publish(e)\n\t}\n}\n\nfunc (h *redisHub) GetTopic(domain, doctype string) *topic {\n\treturn nil\n}\n\nfunc (h *redisHub) Publish(e *Event) {\n\th.local.broadcast <- e\n\tbuf, err := json.Marshal(e)\n\tif err != nil {\n\t\tlog := logger.WithNamespace(\"realtime-redis\")\n\t\tlog.Warnf(\"Error on publish: %s\", err)\n\t\treturn\n\t}\n\th.c.Publish(eventsRedisKey, e.Doc.DocType()+\",\"+string(buf))\n}\n\nfunc (h *redisHub) Subscriber(domain string) *DynamicSubscriber {\n\treturn h.mem.Subscriber(domain)\n}\n\nfunc (h *redisHub) SubscribeLocalAll() *DynamicSubscriber {\n\tds := newDynamicSubscriber(nil, \"\")\n\tds.addTopic(h.local, \"\")\n\treturn ds\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2018 The Skaffold Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage sync\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"path\/filepath\"\n\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/build\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/schema\/latest\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/util\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/watch\"\n\t\"github.com\/pkg\/errors\"\n)\n\ntype Syncer interface {\n\tSync(s *Item) error\n}\n\ntype Item struct {\n\tImage string\n\tCopy map[string]string\n\tDelete map[string]string\n}\n\nfunc NewItem(a *latest.Artifact, e watch.Events, builds []build.Artifact) (*Item, error) {\n\t\/\/ If there are no changes, short circuit and don't sync anything\n\tif !e.HasChanged() || a.Sync == nil || len(a.Sync) == 0 {\n\t\treturn nil, nil\n\t}\n\n\ttoCopy, err := intersect(a.Workspace, a.Sync, append(e.Added, e.Modified...))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"intersecting sync map and added, modified files\")\n\t}\n\n\ttoDelete, err := intersect(a.Workspace, a.Sync, e.Deleted)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"intersecting sync map and added, modified files\")\n\t}\n\n\tif toCopy == nil || toDelete == nil {\n\t\treturn nil, nil\n\t}\n\n\ttag := latestTag(a.ImageName, builds)\n\tif tag == \"\" {\n\t\treturn nil, fmt.Errorf(\"Could not find latest tag for image %s in builds: %s\", a.ImageName, builds)\n\t}\n\n\treturn &Item{\n\t\tImage: tag,\n\t\tCopy: toCopy,\n\t\tDelete: toDelete,\n\t}, nil\n}\n\nfunc latestTag(image string, builds []build.Artifact) string {\n\tfor _, build := range builds {\n\t\tif build.ImageName == image {\n\t\t\treturn build.Tag\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc intersect(context string, syncMap map[string]string, files []string) (map[string]string, error) {\n\tret := map[string]string{}\n\tfor _, f := range files {\n\t\trelPath, err := filepath.Rel(context, f)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"changed file %s can't be found relative to context %s\", f, context)\n\t\t}\n\t\tfor p, dst := range syncMap {\n\t\t\tmatch, err := filepath.Match(p, relPath)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"pattern error for %s\", relPath)\n\t\t\t}\n\t\t\tif !match {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t\t\/\/ If the source has special match characters,\n\t\t\t\/\/ the destination must be a directory\n\t\t\t\/\/ The path package must be used here, since the destination is always\n\t\t\t\/\/ a linux filesystem.\n\t\t\tif util.HasMeta(p) {\n\t\t\t\tdst = path.Join(dst, filepath.Base(relPath))\n\t\t\t}\n\t\t\tret[f] = dst\n\t\t}\n\t}\n\treturn ret, nil\n}\n<commit_msg>Fix errors<commit_after>\/*\nCopyright 2018 The Skaffold Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage sync\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"path\/filepath\"\n\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/build\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/schema\/latest\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/util\"\n\t\"github.com\/GoogleContainerTools\/skaffold\/pkg\/skaffold\/watch\"\n\t\"github.com\/pkg\/errors\"\n)\n\ntype Syncer interface {\n\tSync(s *Item) error\n}\n\ntype Item struct {\n\tImage string\n\tCopy map[string]string\n\tDelete map[string]string\n}\n\nfunc NewItem(a *latest.Artifact, e watch.Events, builds []build.Artifact) (*Item, error) {\n\t\/\/ If there are no changes, short circuit and don't sync anything\n\tif !e.HasChanged() || a.Sync == nil || len(a.Sync) == 0 {\n\t\treturn nil, nil\n\t}\n\n\ttoCopy, err := intersect(a.Workspace, a.Sync, append(e.Added, e.Modified...))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"intersecting sync map and added, modified files\")\n\t}\n\n\ttoDelete, err := intersect(a.Workspace, a.Sync, e.Deleted)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"intersecting sync map and deleted files\")\n\t}\n\n\tif toCopy == nil || toDelete == nil {\n\t\treturn nil, nil\n\t}\n\n\ttag := latestTag(a.ImageName, builds)\n\tif tag == \"\" {\n\t\treturn nil, fmt.Errorf(\"could not find latest tag for image %s in builds: %v\", a.ImageName, builds)\n\t}\n\n\treturn &Item{\n\t\tImage: tag,\n\t\tCopy: toCopy,\n\t\tDelete: toDelete,\n\t}, nil\n}\n\nfunc latestTag(image string, builds []build.Artifact) string {\n\tfor _, build := range builds {\n\t\tif build.ImageName == image {\n\t\t\treturn build.Tag\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc intersect(context string, syncMap map[string]string, files []string) (map[string]string, error) {\n\tret := map[string]string{}\n\tfor _, f := range files {\n\t\trelPath, err := filepath.Rel(context, f)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"changed file %s can't be found relative to context %s\", f, context)\n\t\t}\n\t\tfor p, dst := range syncMap {\n\t\t\tmatch, err := filepath.Match(p, relPath)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"pattern error for %s\", relPath)\n\t\t\t}\n\t\t\tif !match {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t\t\/\/ If the source has special match characters,\n\t\t\t\/\/ the destination must be a directory\n\t\t\t\/\/ The path package must be used here, since the destination is always\n\t\t\t\/\/ a linux filesystem.\n\t\t\tif util.HasMeta(p) {\n\t\t\t\tdst = path.Join(dst, filepath.Base(relPath))\n\t\t\t}\n\t\t\tret[f] = dst\n\t\t}\n\t}\n\treturn ret, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Walter Schulze\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage funcs\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n)\n\nfunc reverse(uniq string) (name string) {\n\tfor n, us := range funcsMap.nameToUniq {\n\t\tfor _, u := range us {\n\t\t\tif uniq == u {\n\t\t\t\treturn n\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}\n\ntype stringer interface {\n\tString() string\n}\n\nfunc isVarConst(p1, p2 interface{}) (string, bool) {\n\tswitch p1.(type) {\n\tcase *varBool, *varBytes, *varDouble, *varInt, *varString, *varUint:\n\t\tswitch p2.(type) {\n\t\tcase *constBool, *constBytes, *constDouble, *constInt, *constString, *constUint:\n\t\t\treturn Sprint(p2), true\n\t\t}\n\t}\n\tswitch p2.(type) {\n\tcase *varBool, *varBytes, *varDouble, *varInt, *varString, *varUint:\n\t\tswitch p1.(type) {\n\t\tcase *constBool, *constBytes, *constDouble, *constInt, *constString, *constUint:\n\t\t\treturn Sprint(p1), true\n\t\t}\n\t}\n\treturn \"\", false\n}\n\nfunc Sprint(i interface{}) string {\n\te := reflect.ValueOf(i).Elem()\n\tuniqName := e.Type().Name()\n\tname := reverse(uniqName)\n\tnumFields := e.NumField()\n\tif numFields == 2 {\n\t\tp1 := e.Field(0).Interface()\n\t\tp2 := e.Field(1).Interface()\n\t\tv, ok := isVarConst(p1, p2)\n\t\tif ok {\n\t\t\tif name == \"eq\" {\n\t\t\t\treturn \"== \" + v\n\t\t\t}\n\t\t\tif name == \"ne\" {\n\t\t\t\treturn \"!= \" + v\n\t\t\t}\n\t\t\tif name == \"gt\" {\n\t\t\t\treturn \"> \" + v\n\t\t\t}\n\t\t\tif name == \"lt\" {\n\t\t\t\treturn \"< \" + v\n\t\t\t}\n\t\t\tif name == \"ge\" {\n\t\t\t\treturn \">= \" + v\n\t\t\t}\n\t\t\tif name == \"le\" {\n\t\t\t\treturn \"<= \" + v\n\t\t\t}\n\t\t}\n\t}\n\treturn sprint(i)\n}\n\nfunc sprint(i interface{}) string {\n\te := reflect.ValueOf(i).Elem()\n\tuniqName := e.Type().Name()\n\tname := reverse(uniqName)\n\tif len(name) == 0 {\n\t\tstrer, ok := i.(stringer)\n\t\tif !ok {\n\t\t\tpanic(\"unknown function without String() string method\")\n\t\t}\n\t\tname = strer.String()\n\t}\n\tif _, ok := i.(Const); ok {\n\t\treturn name\n\t}\n\tif _, ok := i.(Decoder); ok {\n\t\treturn name\n\t}\n\tnumFields := e.NumField()\n\tss := make([]string, 0, numFields)\n\tfor i := 0; i < numFields; i++ {\n\t\tif _, ok := e.Field(i).Type().MethodByName(\"Eval\"); !ok {\n\t\t\tcontinue\n\t\t}\n\t\tss = append(ss, sprint(e.Field(i).Interface()))\n\t}\n\treturn name + \"(\" + strings.Join(ss, \",\") + \")\"\n}\n\nfunc Equal(l, r interface{}) bool {\n\tle := reflect.ValueOf(l).Elem()\n\tlUniqName := le.Type().Name()\n\tre := reflect.ValueOf(r).Elem()\n\trUniqName := re.Type().Name()\n\tif lUniqName != rUniqName {\n\t\treturn false\n\t}\n\tif len(reverse(lUniqName)) == 0 {\n\t\t\/\/TODO maybe this could be done better or we could just always convert functions to strings to compare\n\t\treturn Sprint(l) == Sprint(r)\n\t}\n\tnumFields := le.NumField()\n\tfor i := 0; i < numFields; i++ {\n\t\tif _, ok := le.Field(i).Type().MethodByName(\"Eval\"); !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif !Equal(le.Field(i).Interface(), re.Field(i).Interface()) {\n\t\t\treturn false\n\t\t}\n\t}\n\tif Sprint(l) != Sprint(r) {\n\t\tfmt.Printf(\"wtf %v == %v\\n\", Sprint(l), Sprint(r))\n\t\tpanic(\"two non equal functions are equal\")\n\t}\n\treturn true\n}\n\nfunc Simplify(f Bool) Bool {\n\tif ff, ok := f.(*and); ok {\n\t\tv1 := Simplify(ff.V1)\n\t\tv2 := Simplify(ff.V2)\n\t\tif l, ok := v1.(*constBool); ok {\n\t\t\tif l.v == false {\n\t\t\t\treturn BoolConst(false)\n\t\t\t} else {\n\t\t\t\treturn v2\n\t\t\t}\n\t\t}\n\t\tif r, ok := v2.(*constBool); ok {\n\t\t\tif r.v == false {\n\t\t\t\treturn BoolConst(false)\n\t\t\t} else {\n\t\t\t\treturn v1\n\t\t\t}\n\t\t}\n\t\tif Equal(v1, v2) {\n\t\t\treturn v1\n\t\t}\n\t\tif l, ok := v1.(*not); ok {\n\t\t\tif Equal(l.V1, v2) {\n\t\t\t\treturn BoolConst(false)\n\t\t\t}\n\t\t}\n\t\tif r, ok := v2.(*not); ok {\n\t\t\tif Equal(v1, r.V1) {\n\t\t\t\treturn BoolConst(false)\n\t\t\t}\n\t\t}\n\t\tswitch vv1 := v1.(type) {\n\t\tcase *stringEq:\n\t\t\tif vv2, ok := v2.(*stringEq); ok {\n\t\t\t\tif vvv1, ok1 := isVarConst(vv1.V1, vv1.V2); ok1 {\n\t\t\t\t\tif vvv2, ok2 := isVarConst(vv2.V1, vv2.V2); ok2 {\n\t\t\t\t\t\tif vvv1 != vvv2 {\n\t\t\t\t\t\t\treturn BoolConst(false)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif vv2, ok := v2.(*stringNe); ok {\n\t\t\t\tif vvv1, ok1 := isVarConst(vv1.V1, vv1.V2); ok1 {\n\t\t\t\t\tif vvv2, ok2 := isVarConst(vv2.V1, vv2.V2); ok2 {\n\t\t\t\t\t\tif vvv1 != vvv2 {\n\t\t\t\t\t\t\treturn v1\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\treturn BoolConst(false)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase *stringNe:\n\t\t\tif vv2, ok := v2.(*stringEq); ok {\n\t\t\t\tif vvv1, ok1 := isVarConst(vv1.V1, vv1.V2); ok1 {\n\t\t\t\t\tif vvv2, ok2 := isVarConst(vv2.V1, vv2.V2); ok2 {\n\t\t\t\t\t\tif vvv1 != vvv2 {\n\t\t\t\t\t\t\treturn v2\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\treturn BoolConst(false)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase *intEq:\n\t\t\tif vv2, ok := v2.(*intEq); ok {\n\t\t\t\tif vvv1, ok1 := isVarConst(vv1.V1, vv1.V2); ok1 {\n\t\t\t\t\tif vvv2, ok2 := isVarConst(vv2.V1, vv2.V2); ok2 {\n\t\t\t\t\t\tif vvv1 != vvv2 {\n\t\t\t\t\t\t\treturn BoolConst(false)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif vv2, ok := v2.(*intNe); ok {\n\t\t\t\tif vvv1, ok1 := isVarConst(vv1.V1, vv1.V2); ok1 {\n\t\t\t\t\tif vvv2, ok2 := isVarConst(vv2.V1, vv2.V2); ok2 {\n\t\t\t\t\t\tif vvv1 != vvv2 {\n\t\t\t\t\t\t\treturn v1\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\treturn BoolConst(false)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase *intNe:\n\t\t\tif vv2, ok := v2.(*intEq); ok {\n\t\t\t\tif vvv1, ok1 := isVarConst(vv1.V1, vv1.V2); ok1 {\n\t\t\t\t\tif vvv2, ok2 := isVarConst(vv2.V1, vv2.V2); ok2 {\n\t\t\t\t\t\tif vvv1 != vvv2 {\n\t\t\t\t\t\t\treturn v2\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\treturn BoolConst(false)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn And(v1, v2)\n\t}\n\tif ff, ok := f.(*or); ok {\n\t\tv1 := Simplify(ff.V1)\n\t\tv2 := Simplify(ff.V2)\n\t\tif l, ok := v1.(*constBool); ok {\n\t\t\tif l.v == true {\n\t\t\t\treturn BoolConst(true)\n\t\t\t} else {\n\t\t\t\treturn v2\n\t\t\t}\n\t\t}\n\t\tif r, ok := v2.(*constBool); ok {\n\t\t\tif r.v == true {\n\t\t\t\treturn BoolConst(true)\n\t\t\t} else {\n\t\t\t\treturn v1\n\t\t\t}\n\t\t}\n\t\tif Equal(v1, v2) {\n\t\t\treturn v1\n\t\t}\n\t\tif l, ok := v1.(*not); ok {\n\t\t\tif Equal(l.V1, v2) {\n\t\t\t\treturn BoolConst(true)\n\t\t\t}\n\t\t}\n\t\tif r, ok := v2.(*not); ok {\n\t\t\tif Equal(v1, r.V1) {\n\t\t\t\treturn BoolConst(true)\n\t\t\t}\n\t\t}\n\t\treturn Or(v1, v2)\n\t}\n\tif ff, ok := f.(*not); ok {\n\t\tv1 := Simplify(ff.V1)\n\t\tif vv, ok := v1.(*not); ok {\n\t\t\treturn vv.V1\n\t\t}\n\t\tif vv, ok := v1.(*constBool); ok {\n\t\t\treturn BoolConst(!vv.v)\n\t\t}\n\t\tif vv, ok := v1.(*and); ok {\n\t\t\treturn Simplify(Or(Not(vv.V1), Not(vv.V2)))\n\t\t}\n\t\tif vv, ok := v1.(*or); ok {\n\t\t\treturn Simplify(And(Not(vv.V1), Not(vv.V2)))\n\t\t}\n\t\tswitch vv := v1.(type) {\n\t\tcase *boolEq:\n\t\t\treturn BoolNe(vv.V1, vv.V2)\n\t\tcase *bytesEq:\n\t\t\treturn BytesNe(vv.V1, vv.V2)\n\t\tcase *doubleEq:\n\t\t\treturn DoubleNe(vv.V1, vv.V2)\n\t\tcase *intEq:\n\t\t\treturn IntNe(vv.V1, vv.V2)\n\t\tcase *stringEq:\n\t\t\treturn StringNe(vv.V1, vv.V2)\n\t\tcase *uintEq:\n\t\t\treturn UintNe(vv.V1, vv.V2)\n\t\tcase *boolNe:\n\t\t\treturn BoolEq(vv.V1, vv.V2)\n\t\tcase *bytesNe:\n\t\t\treturn BytesEq(vv.V1, vv.V2)\n\t\tcase *doubleNe:\n\t\t\treturn DoubleEq(vv.V1, vv.V2)\n\t\tcase *intNe:\n\t\t\treturn IntEq(vv.V1, vv.V2)\n\t\tcase *stringNe:\n\t\t\treturn StringEq(vv.V1, vv.V2)\n\t\tcase *uintNe:\n\t\t\treturn UintEq(vv.V1, vv.V2)\n\t\t}\n\t\treturn Not(v1)\n\t}\n\treturn f\n}\n<commit_msg>fixed sprint better<commit_after>\/\/ Copyright 2013 Walter Schulze\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage funcs\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n)\n\nfunc reverse(uniq string) (name string) {\n\tfor n, us := range funcsMap.nameToUniq {\n\t\tfor _, u := range us {\n\t\t\tif uniq == u {\n\t\t\t\treturn n\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}\n\ntype stringer interface {\n\tString() string\n}\n\nfunc isVarConst(p1, p2 interface{}) (string, bool) {\n\tswitch p1.(type) {\n\tcase *varBool, *varBytes, *varDouble, *varInt, *varString, *varUint:\n\t\tswitch p2.(type) {\n\t\tcase *constBool, *constBytes, *constDouble, *constInt, *constString, *constUint:\n\t\t\treturn Sprint(p2), true\n\t\t}\n\t}\n\tswitch p2.(type) {\n\tcase *varBool, *varBytes, *varDouble, *varInt, *varString, *varUint:\n\t\tswitch p1.(type) {\n\t\tcase *constBool, *constBytes, *constDouble, *constInt, *constString, *constUint:\n\t\t\treturn Sprint(p1), true\n\t\t}\n\t}\n\treturn \"\", false\n}\n\nfunc Sprint(i interface{}) string {\n\te := reflect.ValueOf(i).Elem()\n\tuniqName := e.Type().Name()\n\tname := reverse(uniqName)\n\tnumFields := e.NumField()\n\tif numFields == 2 {\n\t\tp1 := e.Field(0).Interface()\n\t\tp2 := e.Field(1).Interface()\n\t\tv, ok := isVarConst(p1, p2)\n\t\tif ok {\n\t\t\tif name == \"eq\" {\n\t\t\t\treturn \"== \" + v\n\t\t\t}\n\t\t\tif name == \"ne\" {\n\t\t\t\treturn \"!= \" + v\n\t\t\t}\n\t\t\tif name == \"gt\" {\n\t\t\t\treturn \"> \" + v\n\t\t\t}\n\t\t\tif name == \"lt\" {\n\t\t\t\treturn \"< \" + v\n\t\t\t}\n\t\t\tif name == \"ge\" {\n\t\t\t\treturn \">= \" + v\n\t\t\t}\n\t\t\tif name == \"le\" {\n\t\t\t\treturn \"<= \" + v\n\t\t\t}\n\t\t}\n\t}\n\treturn sprint(i)\n}\n\nfunc sprint(i interface{}) string {\n\te := reflect.ValueOf(i).Elem()\n\tuniqName := e.Type().Name()\n\tname := reverse(uniqName)\n\tif len(name) == 0 {\n\t\tstrer, ok := i.(stringer)\n\t\tif !ok {\n\t\t\tpanic(\"unknown function without String() string method\")\n\t\t}\n\t\tname = strer.String()\n\t}\n\tswitch i.(type) {\n\tcase Const:\n\t\treturn name\n\tcase Decoder:\n\t\treturn name\n\tcase ListOf:\n\t\treturn name\n\t}\n\tnumFields := e.NumField()\n\tss := make([]string, 0, numFields)\n\tfor i := 0; i < numFields; i++ {\n\t\tif _, ok := e.Field(i).Type().MethodByName(\"Eval\"); !ok {\n\t\t\tcontinue\n\t\t}\n\t\tss = append(ss, sprint(e.Field(i).Interface()))\n\t}\n\treturn name + \"(\" + strings.Join(ss, \",\") + \")\"\n}\n\nfunc Equal(l, r interface{}) bool {\n\tle := reflect.ValueOf(l).Elem()\n\tlUniqName := le.Type().Name()\n\tre := reflect.ValueOf(r).Elem()\n\trUniqName := re.Type().Name()\n\tif lUniqName != rUniqName {\n\t\treturn false\n\t}\n\tif len(reverse(lUniqName)) == 0 {\n\t\t\/\/TODO maybe this could be done better or we could just always convert functions to strings to compare\n\t\treturn Sprint(l) == Sprint(r)\n\t}\n\tnumFields := le.NumField()\n\tfor i := 0; i < numFields; i++ {\n\t\tif _, ok := le.Field(i).Type().MethodByName(\"Eval\"); !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif !Equal(le.Field(i).Interface(), re.Field(i).Interface()) {\n\t\t\treturn false\n\t\t}\n\t}\n\tif Sprint(l) != Sprint(r) {\n\t\tfmt.Printf(\"wtf %v == %v\\n\", Sprint(l), Sprint(r))\n\t\tpanic(\"two non equal functions are equal\")\n\t}\n\treturn true\n}\n\nfunc Simplify(f Bool) Bool {\n\tif ff, ok := f.(*and); ok {\n\t\tv1 := Simplify(ff.V1)\n\t\tv2 := Simplify(ff.V2)\n\t\tif l, ok := v1.(*constBool); ok {\n\t\t\tif l.v == false {\n\t\t\t\treturn BoolConst(false)\n\t\t\t} else {\n\t\t\t\treturn v2\n\t\t\t}\n\t\t}\n\t\tif r, ok := v2.(*constBool); ok {\n\t\t\tif r.v == false {\n\t\t\t\treturn BoolConst(false)\n\t\t\t} else {\n\t\t\t\treturn v1\n\t\t\t}\n\t\t}\n\t\tif Equal(v1, v2) {\n\t\t\treturn v1\n\t\t}\n\t\tif l, ok := v1.(*not); ok {\n\t\t\tif Equal(l.V1, v2) {\n\t\t\t\treturn BoolConst(false)\n\t\t\t}\n\t\t}\n\t\tif r, ok := v2.(*not); ok {\n\t\t\tif Equal(v1, r.V1) {\n\t\t\t\treturn BoolConst(false)\n\t\t\t}\n\t\t}\n\t\tswitch vv1 := v1.(type) {\n\t\tcase *stringEq:\n\t\t\tif vv2, ok := v2.(*stringEq); ok {\n\t\t\t\tif vvv1, ok1 := isVarConst(vv1.V1, vv1.V2); ok1 {\n\t\t\t\t\tif vvv2, ok2 := isVarConst(vv2.V1, vv2.V2); ok2 {\n\t\t\t\t\t\tif vvv1 != vvv2 {\n\t\t\t\t\t\t\treturn BoolConst(false)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif vv2, ok := v2.(*stringNe); ok {\n\t\t\t\tif vvv1, ok1 := isVarConst(vv1.V1, vv1.V2); ok1 {\n\t\t\t\t\tif vvv2, ok2 := isVarConst(vv2.V1, vv2.V2); ok2 {\n\t\t\t\t\t\tif vvv1 != vvv2 {\n\t\t\t\t\t\t\treturn v1\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\treturn BoolConst(false)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase *stringNe:\n\t\t\tif vv2, ok := v2.(*stringEq); ok {\n\t\t\t\tif vvv1, ok1 := isVarConst(vv1.V1, vv1.V2); ok1 {\n\t\t\t\t\tif vvv2, ok2 := isVarConst(vv2.V1, vv2.V2); ok2 {\n\t\t\t\t\t\tif vvv1 != vvv2 {\n\t\t\t\t\t\t\treturn v2\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\treturn BoolConst(false)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase *intEq:\n\t\t\tif vv2, ok := v2.(*intEq); ok {\n\t\t\t\tif vvv1, ok1 := isVarConst(vv1.V1, vv1.V2); ok1 {\n\t\t\t\t\tif vvv2, ok2 := isVarConst(vv2.V1, vv2.V2); ok2 {\n\t\t\t\t\t\tif vvv1 != vvv2 {\n\t\t\t\t\t\t\treturn BoolConst(false)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif vv2, ok := v2.(*intNe); ok {\n\t\t\t\tif vvv1, ok1 := isVarConst(vv1.V1, vv1.V2); ok1 {\n\t\t\t\t\tif vvv2, ok2 := isVarConst(vv2.V1, vv2.V2); ok2 {\n\t\t\t\t\t\tif vvv1 != vvv2 {\n\t\t\t\t\t\t\treturn v1\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\treturn BoolConst(false)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase *intNe:\n\t\t\tif vv2, ok := v2.(*intEq); ok {\n\t\t\t\tif vvv1, ok1 := isVarConst(vv1.V1, vv1.V2); ok1 {\n\t\t\t\t\tif vvv2, ok2 := isVarConst(vv2.V1, vv2.V2); ok2 {\n\t\t\t\t\t\tif vvv1 != vvv2 {\n\t\t\t\t\t\t\treturn v2\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\treturn BoolConst(false)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn And(v1, v2)\n\t}\n\tif ff, ok := f.(*or); ok {\n\t\tv1 := Simplify(ff.V1)\n\t\tv2 := Simplify(ff.V2)\n\t\tif l, ok := v1.(*constBool); ok {\n\t\t\tif l.v == true {\n\t\t\t\treturn BoolConst(true)\n\t\t\t} else {\n\t\t\t\treturn v2\n\t\t\t}\n\t\t}\n\t\tif r, ok := v2.(*constBool); ok {\n\t\t\tif r.v == true {\n\t\t\t\treturn BoolConst(true)\n\t\t\t} else {\n\t\t\t\treturn v1\n\t\t\t}\n\t\t}\n\t\tif Equal(v1, v2) {\n\t\t\treturn v1\n\t\t}\n\t\tif l, ok := v1.(*not); ok {\n\t\t\tif Equal(l.V1, v2) {\n\t\t\t\treturn BoolConst(true)\n\t\t\t}\n\t\t}\n\t\tif r, ok := v2.(*not); ok {\n\t\t\tif Equal(v1, r.V1) {\n\t\t\t\treturn BoolConst(true)\n\t\t\t}\n\t\t}\n\t\treturn Or(v1, v2)\n\t}\n\tif ff, ok := f.(*not); ok {\n\t\tv1 := Simplify(ff.V1)\n\t\tif vv, ok := v1.(*not); ok {\n\t\t\treturn vv.V1\n\t\t}\n\t\tif vv, ok := v1.(*constBool); ok {\n\t\t\treturn BoolConst(!vv.v)\n\t\t}\n\t\tif vv, ok := v1.(*and); ok {\n\t\t\treturn Simplify(Or(Not(vv.V1), Not(vv.V2)))\n\t\t}\n\t\tif vv, ok := v1.(*or); ok {\n\t\t\treturn Simplify(And(Not(vv.V1), Not(vv.V2)))\n\t\t}\n\t\tswitch vv := v1.(type) {\n\t\tcase *boolEq:\n\t\t\treturn BoolNe(vv.V1, vv.V2)\n\t\tcase *bytesEq:\n\t\t\treturn BytesNe(vv.V1, vv.V2)\n\t\tcase *doubleEq:\n\t\t\treturn DoubleNe(vv.V1, vv.V2)\n\t\tcase *intEq:\n\t\t\treturn IntNe(vv.V1, vv.V2)\n\t\tcase *stringEq:\n\t\t\treturn StringNe(vv.V1, vv.V2)\n\t\tcase *uintEq:\n\t\t\treturn UintNe(vv.V1, vv.V2)\n\t\tcase *boolNe:\n\t\t\treturn BoolEq(vv.V1, vv.V2)\n\t\tcase *bytesNe:\n\t\t\treturn BytesEq(vv.V1, vv.V2)\n\t\tcase *doubleNe:\n\t\t\treturn DoubleEq(vv.V1, vv.V2)\n\t\tcase *intNe:\n\t\t\treturn IntEq(vv.V1, vv.V2)\n\t\tcase *stringNe:\n\t\t\treturn StringEq(vv.V1, vv.V2)\n\t\tcase *uintNe:\n\t\t\treturn UintEq(vv.V1, vv.V2)\n\t\t}\n\t\treturn Not(v1)\n\t}\n\treturn f\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013-2017 Aerospike, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage aerospike\n\nimport \"math\"\n\nconst (\n\t\/\/ TTLServerDefault will default to namespace configuration variable \"default-ttl\" on the server.\n\tTTLServerDefault = 0\n\t\/\/ TTLDontExpire will never expire for Aerospike 2 server versions >= 2.7.2 and Aerospike 3 server.\n\tTTLDontExpire = math.MaxUint32\n\t\/\/ TTLDontUpdate will not change the record's ttl when record is written. Supported by Aerospike server versions >= 3.10.1\n\tTTLDontUpdate = math.MaxUint32 - 1\n)\n\n\/\/ WritePolicy encapsulates parameters for policy attributes used in write operations.\n\/\/ This object is passed into methods where database writes can occur.\ntype WritePolicy struct {\n\tBasePolicy\n\n\t\/\/ RecordExistsAction qualifies how to handle writes where the record already exists.\n\tRecordExistsAction RecordExistsAction \/\/= RecordExistsAction.UPDATE;\n\n\t\/\/ GenerationPolicy qualifies how to handle record writes based on record generation. The default (NONE)\n\t\/\/ indicates that the generation is not used to restrict writes.\n\tGenerationPolicy GenerationPolicy \/\/= GenerationPolicy.NONE;\n\n\t\/\/ Desired consistency guarantee when committing a transaction on the server. The default\n\t\/\/ (COMMIT_ALL) indicates that the server should wait for master and all replica commits to\n\t\/\/ be successful before returning success to the client.\n\tCommitLevel CommitLevel \/\/= COMMIT_ALL\n\n\t\/\/ Generation determines expected generation.\n\t\/\/ Generation is the number of times a record has been\n\t\/\/ modified (including creation) on the server.\n\t\/\/ If a write operation is creating a record, the expected generation would be 0.\n\tGeneration uint32\n\n\t\/\/ Expiration determines record expiration in seconds. Also known as TTL (Time-To-Live).\n\t\/\/ Seconds record will live before being removed by the server.\n\t\/\/ Expiration values:\n\t\/\/ TTLServerDefault (0): Default to namespace configuration variable \"default-ttl\" on the server.\n\t\/\/ TTLDontExpire (MaxUint32): Never expire for Aerospike 2 server versions >= 2.7.2 and Aerospike 3 server\n\t\/\/ TTLDontUpdate (MaxUint32 - 1): Do not change ttl when record is written. Supported by Aerospike server versions >= 3.10.1\n\t\/\/ > 0: Actual expiration in seconds.\n\tExpiration uint32\n\n\t\/\/ RespondPerEachOp defines for client.Operate() method, return a result for every operation.\n\t\/\/ Some list operations do not return results by default (ListClearOp() for example).\n\t\/\/ This can sometimes make it difficult to determine the desired result offset in the returned\n\t\/\/ bin's result list.\n\t\/\/\n\t\/\/ Setting RespondPerEachOp to true makes it easier to identify the desired result offset\n\t\/\/ (result offset equals bin's operate sequence). This only makes sense when multiple list\n\t\/\/ operations are used in one operate call and some of those operations do not return results\n\t\/\/ by default.\n\tRespondPerEachOp bool\n\n\t\/\/ DurableDelete leaves a tombstone for the record if the transaction results in a record deletion.\n\t\/\/ This prevents deleted records from reappearing after node failures.\n\t\/\/ Valid for Aerospike Server Enterprise Edition 4+ only.\n\tDurableDelete bool\n}\n\n\/\/ NewWritePolicy initializes a new WritePolicy instance with default parameters.\nfunc NewWritePolicy(generation, expiration uint32) *WritePolicy {\n\tres := &WritePolicy{\n\t\tBasePolicy: *NewPolicy(),\n\t\tRecordExistsAction: UPDATE,\n\t\tGenerationPolicy: NONE,\n\t\tCommitLevel: COMMIT_ALL,\n\t\tGeneration: generation,\n\t\tExpiration: expiration,\n\t}\n\n\t\/\/ Writes may not be idempotent.\n\t\/\/ do not allow retries on writes by default.\n\tres.MaxRetries = 0\n\n\treturn res\n}\n<commit_msg>Minor doc fix<commit_after>\/\/ Copyright 2013-2017 Aerospike, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage aerospike\n\nimport \"math\"\n\nconst (\n\t\/\/ TTLServerDefault will default to namespace configuration variable \"default-ttl\" on the server.\n\tTTLServerDefault = 0\n\t\/\/ TTLDontExpire will never expire for Aerospike 2 server versions >= 2.7.2 and Aerospike 3 server.\n\tTTLDontExpire = math.MaxUint32\n\t\/\/ TTLDontUpdate will not change the record's ttl when record is written. Supported by Aerospike server versions >= 3.10.1\n\tTTLDontUpdate = math.MaxUint32 - 1\n)\n\n\/\/ WritePolicy encapsulates parameters for policy attributes used in write operations.\n\/\/ This object is passed into methods where database writes can occur.\ntype WritePolicy struct {\n\tBasePolicy\n\n\t\/\/ RecordExistsAction qualifies how to handle writes where the record already exists.\n\tRecordExistsAction RecordExistsAction \/\/= RecordExistsAction.UPDATE;\n\n\t\/\/ GenerationPolicy qualifies how to handle record writes based on record generation. The default (NONE)\n\t\/\/ indicates that the generation is not used to restrict writes.\n\tGenerationPolicy GenerationPolicy \/\/= GenerationPolicy.NONE;\n\n\t\/\/ Desired consistency guarantee when committing a transaction on the server. The default\n\t\/\/ (COMMIT_ALL) indicates that the server should wait for master and all replica commits to\n\t\/\/ be successful before returning success to the client.\n\tCommitLevel CommitLevel \/\/= COMMIT_ALL\n\n\t\/\/ Generation determines expected generation.\n\t\/\/ Generation is the number of times a record has been\n\t\/\/ modified (including creation) on the server.\n\t\/\/ If a write operation is creating a record, the expected generation would be 0.\n\tGeneration uint32\n\n\t\/\/ Expiration determines record expiration in seconds. Also known as TTL (Time-To-Live).\n\t\/\/ Seconds record will live before being removed by the server.\n\t\/\/ Expiration values:\n\t\/\/ TTLServerDefault (0): Default to namespace configuration variable \"default-ttl\" on the server.\n\t\/\/ TTLDontExpire (MaxUint32): Never expire for Aerospike 2 server versions >= 2.7.2 and Aerospike 3 server\n\t\/\/ TTLDontUpdate (MaxUint32 - 1): Do not change ttl when record is written. Supported by Aerospike server versions >= 3.10.1\n\t\/\/ > 0: Actual expiration in seconds.\n\tExpiration uint32\n\n\t\/\/ RespondPerEachOp defines for client.Operate() method, return a result for every operation.\n\t\/\/ Some list operations do not return results by default (ListClearOp() for example).\n\t\/\/ This can sometimes make it difficult to determine the desired result offset in the returned\n\t\/\/ bin's result list.\n\t\/\/\n\t\/\/ Setting RespondPerEachOp to true makes it easier to identify the desired result offset\n\t\/\/ (result offset equals bin's operate sequence). This only makes sense when multiple list\n\t\/\/ operations are used in one operate call and some of those operations do not return results\n\t\/\/ by default.\n\tRespondPerEachOp bool\n\n\t\/\/ DurableDelete leaves a tombstone for the record if the transaction results in a record deletion.\n\t\/\/ This prevents deleted records from reappearing after node failures.\n\t\/\/ Valid for Aerospike Server Enterprise Edition 3.10+ only.\n\tDurableDelete bool\n}\n\n\/\/ NewWritePolicy initializes a new WritePolicy instance with default parameters.\nfunc NewWritePolicy(generation, expiration uint32) *WritePolicy {\n\tres := &WritePolicy{\n\t\tBasePolicy: *NewPolicy(),\n\t\tRecordExistsAction: UPDATE,\n\t\tGenerationPolicy: NONE,\n\t\tCommitLevel: COMMIT_ALL,\n\t\tGeneration: generation,\n\t\tExpiration: expiration,\n\t}\n\n\t\/\/ Writes may not be idempotent.\n\t\/\/ do not allow retries on writes by default.\n\tres.MaxRetries = 0\n\n\treturn res\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 syzkaller project authors. All rights reserved.\n\/\/ Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.\n\n\/\/ execprog executes a single program passed via a flag\n\/\/ and prints information about execution.\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/google\/syzkaller\/cover\"\n\t\"github.com\/google\/syzkaller\/ipc\"\n\t\"github.com\/google\/syzkaller\/prog\"\n)\n\nvar (\n\tflagExecutor = flag.String(\"executor\", \"\", \"path to executor binary\")\n\tflagProg = flag.String(\"prog\", \"\", \"file with a program to execute\")\n\tflagThreaded = flag.Bool(\"threaded\", false, \"use threaded mode in executor\")\n\tflagDebug = flag.Bool(\"debug\", true, \"debug output from executor\")\n\tflagStrace = flag.Bool(\"strace\", false, \"run executor under strace\")\n\tflagCover = flag.String(\"cover\", \"\", \"collect coverage and write to the file\")\n\tflagDedup = flag.Bool(\"dedup\", false, \"deduplicate coverage in executor\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tdata, err := ioutil.ReadFile(*flagProg)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"failed to read prog file: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tp, err := prog.Deserialize(data)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"failed to deserialize the program: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tvar flags uint64\n\tif *flagThreaded {\n\t\tflags |= ipc.FlagThreaded\n\t}\n\tif *flagDebug {\n\t\tflags |= ipc.FlagDebug\n\t}\n\tif *flagStrace {\n\t\tflags |= ipc.FlagStrace\n\t}\n\tif *flagCover != \"\" {\n\t\tflags |= ipc.FlagCover\n\t}\n\tif *flagDedup {\n\t\tflags |= ipc.FlagDedupCover\n\t}\n\tenv, err := ipc.MakeEnv(*flagExecutor, 3*time.Second, flags)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"failed to create execution environment: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\toutput, strace, cov, failed, hanged, err := env.Exec(p)\n\tfmt.Printf(\"result: failed=%v hanged=%v err=%v\\n\\n%s\", failed, hanged, err, output)\n\tif *flagStrace {\n\t\tfmt.Printf(\"strace output:\\n%s\", strace)\n\t}\n\t\/\/ Coverage is dumped in sanitizer format.\n\t\/\/ github.com\/google\/sanitizers\/tools\/sancov command can be used to dump PCs,\n\t\/\/ then they can be piped via addr2line to symbolize.\n\tfor i, c := range cov {\n\t\tfmt.Printf(\"call #%v: coverage %v\\n\", i, len(c))\n\t\tif len(c) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tbuf := new(bytes.Buffer)\n\t\tbinary.Write(buf, binary.LittleEndian, uint64(0xC0BFFFFFFFFFFF64))\n\t\tfor _, pc := range c {\n\t\t\tbinary.Write(buf, binary.LittleEndian, cover.RestorePC(pc))\n\t\t}\n\t\terr := ioutil.WriteFile(fmt.Sprintf(\"%v.%v\", *flagCover, i), buf.Bytes(), 0660)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"failed to write coverage file: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}\n<commit_msg>clean up temp files in tools\/execprog<commit_after>\/\/ Copyright 2015 syzkaller project authors. All rights reserved.\n\/\/ Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.\n\n\/\/ execprog executes a single program passed via a flag\n\/\/ and prints information about execution.\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/google\/syzkaller\/cover\"\n\t\"github.com\/google\/syzkaller\/ipc\"\n\t\"github.com\/google\/syzkaller\/prog\"\n)\n\nvar (\n\tflagExecutor = flag.String(\"executor\", \"\", \"path to executor binary\")\n\tflagProg = flag.String(\"prog\", \"\", \"file with a program to execute\")\n\tflagThreaded = flag.Bool(\"threaded\", false, \"use threaded mode in executor\")\n\tflagDebug = flag.Bool(\"debug\", true, \"debug output from executor\")\n\tflagStrace = flag.Bool(\"strace\", false, \"run executor under strace\")\n\tflagCover = flag.String(\"cover\", \"\", \"collect coverage and write to the file\")\n\tflagDedup = flag.Bool(\"dedup\", false, \"deduplicate coverage in executor\")\n)\n\nfunc main() {\n\tflag.Parse()\n\tdata, err := ioutil.ReadFile(*flagProg)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"failed to read prog file: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tp, err := prog.Deserialize(data)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"failed to deserialize the program: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tvar flags uint64\n\tif *flagThreaded {\n\t\tflags |= ipc.FlagThreaded\n\t}\n\tif *flagDebug {\n\t\tflags |= ipc.FlagDebug\n\t}\n\tif *flagStrace {\n\t\tflags |= ipc.FlagStrace\n\t}\n\tif *flagCover != \"\" {\n\t\tflags |= ipc.FlagCover\n\t}\n\tif *flagDedup {\n\t\tflags |= ipc.FlagDedupCover\n\t}\n\tenv, err := ipc.MakeEnv(*flagExecutor, 3*time.Second, flags)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"failed to create execution environment: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tdefer env.Close()\n\toutput, strace, cov, failed, hanged, err := env.Exec(p)\n\tfmt.Printf(\"result: failed=%v hanged=%v err=%v\\n\\n%s\", failed, hanged, err, output)\n\tif *flagStrace {\n\t\tfmt.Printf(\"strace output:\\n%s\", strace)\n\t}\n\t\/\/ Coverage is dumped in sanitizer format.\n\t\/\/ github.com\/google\/sanitizers\/tools\/sancov command can be used to dump PCs,\n\t\/\/ then they can be piped via addr2line to symbolize.\n\tfor i, c := range cov {\n\t\tfmt.Printf(\"call #%v: coverage %v\\n\", i, len(c))\n\t\tif len(c) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tbuf := new(bytes.Buffer)\n\t\tbinary.Write(buf, binary.LittleEndian, uint64(0xC0BFFFFFFFFFFF64))\n\t\tfor _, pc := range c {\n\t\t\tbinary.Write(buf, binary.LittleEndian, cover.RestorePC(pc))\n\t\t}\n\t\terr := ioutil.WriteFile(fmt.Sprintf(\"%v.%v\", *flagCover, i), buf.Bytes(), 0660)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"failed to write coverage file: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package tfdiags\n\nimport (\n\t\"github.com\/hashicorp\/hcl2\/hcl\"\n)\n\n\/\/ hclDiagnostic is a Diagnostic implementation that wraps a HCL Diagnostic\ntype hclDiagnostic struct {\n\tdiag *hcl.Diagnostic\n}\n\nvar _ Diagnostic = hclDiagnostic{}\n\nfunc (d hclDiagnostic) Severity() Severity {\n\tswitch d.diag.Severity {\n\tcase hcl.DiagWarning:\n\t\treturn Warning\n\tdefault:\n\t\treturn Error\n\t}\n}\n\nfunc (d hclDiagnostic) Description() Description {\n\treturn Description{\n\t\tSummary: d.diag.Summary,\n\t\tDetail: d.diag.Detail,\n\t}\n}\n\nfunc (d hclDiagnostic) Source() Source {\n\tvar ret Source\n\tif d.diag.Subject != nil {\n\t\tret.Subject = &SourceRange{\n\t\t\tFilename: d.diag.Subject.Filename,\n\t\t\tStart: SourcePos{\n\t\t\t\tLine: d.diag.Subject.Start.Line,\n\t\t\t\tColumn: d.diag.Subject.Start.Column,\n\t\t\t\tByte: d.diag.Subject.Start.Byte,\n\t\t\t},\n\t\t\tEnd: SourcePos{\n\t\t\t\tLine: d.diag.Subject.End.Line,\n\t\t\t\tColumn: d.diag.Subject.End.Column,\n\t\t\t\tByte: d.diag.Subject.End.Byte,\n\t\t\t},\n\t\t}\n\t}\n\tif d.diag.Context != nil {\n\t\tret.Context = &SourceRange{\n\t\t\tFilename: d.diag.Context.Filename,\n\t\t\tStart: SourcePos{\n\t\t\t\tLine: d.diag.Context.Start.Line,\n\t\t\t\tColumn: d.diag.Context.Start.Column,\n\t\t\t\tByte: d.diag.Context.Start.Byte,\n\t\t\t},\n\t\t\tEnd: SourcePos{\n\t\t\t\tLine: d.diag.Context.End.Line,\n\t\t\t\tColumn: d.diag.Context.End.Column,\n\t\t\t\tByte: d.diag.Context.End.Byte,\n\t\t\t},\n\t\t}\n\t}\n\treturn ret\n}\n<commit_msg>tfdiags: Helper to construct SourceRange from hcl.Range<commit_after>package tfdiags\n\nimport (\n\t\"github.com\/hashicorp\/hcl2\/hcl\"\n)\n\n\/\/ hclDiagnostic is a Diagnostic implementation that wraps a HCL Diagnostic\ntype hclDiagnostic struct {\n\tdiag *hcl.Diagnostic\n}\n\nvar _ Diagnostic = hclDiagnostic{}\n\nfunc (d hclDiagnostic) Severity() Severity {\n\tswitch d.diag.Severity {\n\tcase hcl.DiagWarning:\n\t\treturn Warning\n\tdefault:\n\t\treturn Error\n\t}\n}\n\nfunc (d hclDiagnostic) Description() Description {\n\treturn Description{\n\t\tSummary: d.diag.Summary,\n\t\tDetail: d.diag.Detail,\n\t}\n}\n\nfunc (d hclDiagnostic) Source() Source {\n\tvar ret Source\n\tif d.diag.Subject != nil {\n\t\trng := SourceRangeFromHCL(*d.diag.Subject)\n\t\tret.Subject = &rng\n\t}\n\tif d.diag.Context != nil {\n\t\trng := SourceRangeFromHCL(*d.diag.Context)\n\t\tret.Context = &rng\n\t}\n\treturn ret\n}\n\n\/\/ SourceRangeFromHCL constructs a SourceRange from the corresponding range\n\/\/ type within the HCL package.\nfunc SourceRangeFromHCL(hclRange hcl.Range) SourceRange {\n\treturn SourceRange{\n\t\tFilename: hclRange.Filename,\n\t\tStart: SourcePos{\n\t\t\tLine: hclRange.Start.Line,\n\t\t\tColumn: hclRange.Start.Column,\n\t\t\tByte: hclRange.Start.Byte,\n\t\t},\n\t\tEnd: SourcePos{\n\t\t\tLine: hclRange.End.Line,\n\t\t\tColumn: hclRange.End.Column,\n\t\t\tByte: hclRange.End.Byte,\n\t\t},\n\t}\n}\n\n\/\/ ToHCL constructs a HCL Range from the receiving SourceRange. This is the\n\/\/ opposite of SourceRangeFromHCL.\nfunc (r SourceRange) ToHCL() hcl.Range {\n\treturn hcl.Range{\n\t\tFilename: r.Filename,\n\t\tStart: hcl.Pos{\n\t\t\tLine: r.Start.Line,\n\t\t\tColumn: r.Start.Column,\n\t\t\tByte: r.Start.Byte,\n\t\t},\n\t\tEnd: hcl.Pos{\n\t\t\tLine: r.End.Line,\n\t\t\tColumn: r.End.Column,\n\t\t\tByte: r.End.Byte,\n\t\t},\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package nodefs\n\nimport (\n\t\"log\"\n\t\"sync\"\n\t\"unsafe\"\n\n\t\"github.com\/hanwen\/go-fuse\/fuse\"\n)\n\n\/\/ openedFile stores either an open dir or an open file.\ntype openedFile struct {\n\thandled\n\n\tWithFlags\n\n\tdir *connectorDir\n}\n\ntype fileSystemMount struct {\n\t\/\/ Node that we were mounted on.\n\tmountInode *Inode\n\n\t\/\/ Parent to the mountInode.\n\tparentInode *Inode\n\n\t\/\/ Options for the mount.\n\toptions *Options\n\n\t\/\/ Protects Children hashmaps within the mount. treeLock\n\t\/\/ should be acquired before openFilesLock.\n\t\/\/\n\t\/\/ If multiple treeLocks must be acquired, the treeLocks\n\t\/\/ closer to the root must be acquired first.\n\ttreeLock sync.RWMutex\n\n\t\/\/ Manage filehandles of open files.\n\topenFiles handleMap\n\n\tDebug bool\n\n\tconnector *FileSystemConnector\n}\n\n\/\/ Must called with lock for parent held.\nfunc (m *fileSystemMount) mountName() string {\n\tfor k, v := range m.parentInode.children {\n\t\tif m.mountInode == v {\n\t\t\treturn k\n\t\t}\n\t}\n\tpanic(\"not found\")\n\treturn \"\"\n}\n\nfunc (m *fileSystemMount) setOwner(attr *fuse.Attr) {\n\tif m.options.Owner != nil {\n\t\tattr.Owner = *(*fuse.Owner)(m.options.Owner)\n\t}\n}\n\nfunc (m *fileSystemMount) fillEntry(out *fuse.EntryOut) {\n\tsplitDuration(m.options.EntryTimeout, &out.EntryValid, &out.EntryValidNsec)\n\tsplitDuration(m.options.AttrTimeout, &out.AttrValid, &out.AttrValidNsec)\n\tm.setOwner(&out.Attr)\n\tif out.Mode&fuse.S_IFDIR == 0 && out.Nlink == 0 {\n\t\tout.Nlink = 1\n\t}\n}\n\nfunc (m *fileSystemMount) fillAttr(out *fuse.AttrOut, nodeId uint64) {\n\tsplitDuration(m.options.AttrTimeout, &out.AttrValid, &out.AttrValidNsec)\n\tm.setOwner(&out.Attr)\n\tout.Ino = nodeId\n}\n\nfunc (m *fileSystemMount) getOpenedFile(h uint64) *openedFile {\n\tvar b *openedFile\n\tif h != 0 {\n\t\tb = (*openedFile)(unsafe.Pointer(m.openFiles.Decode(h)))\n\t}\n\n\tif b != nil && m.connector.debug && b.WithFlags.Description != \"\" {\n\t\tlog.Printf(\"File %d = %q\", h, b.WithFlags.Description)\n\t}\n\treturn b\n}\n\nfunc (m *fileSystemMount) unregisterFileHandle(handle uint64, node *Inode) *openedFile {\n\t_, obj := m.openFiles.Forget(handle, 1)\n\topened := (*openedFile)(unsafe.Pointer(obj))\n\tnode.openFilesMutex.Lock()\n\tidx := -1\n\tfor i, v := range node.openFiles {\n\t\tif v == opened {\n\t\t\tidx = i\n\t\t\tbreak\n\t\t}\n\t}\n\n\tl := len(node.openFiles)\n\tnode.openFiles[idx] = node.openFiles[l-1]\n\tnode.openFiles = node.openFiles[:l-1]\n\tnode.openFilesMutex.Unlock()\n\n\treturn opened\n}\n\nfunc (m *fileSystemMount) registerFileHandle(node *Inode, dir *connectorDir, f File, flags uint32) (uint64, *openedFile) {\n\tnode.openFilesMutex.Lock()\n\tb := &openedFile{\n\t\tdir: dir,\n\t\tWithFlags: WithFlags{\n\t\t\tFile: f,\n\t\t\tOpenFlags: flags,\n\t\t},\n\t}\n\n\tfor {\n\t\twithFlags, ok := f.(*WithFlags)\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\n\t\tb.WithFlags.File = withFlags.File\n\t\tb.WithFlags.FuseFlags |= withFlags.FuseFlags\n\t\tb.WithFlags.Description += withFlags.Description\n\t\tf = withFlags.File\n\t}\n\n\tif b.WithFlags.File != nil {\n\t\tb.WithFlags.File.SetInode(node)\n\t}\n\tnode.openFiles = append(node.openFiles, b)\n\thandle := m.openFiles.Register(&b.handled)\n\tnode.openFilesMutex.Unlock()\n\treturn handle, b\n}\n\n\/\/ Creates a return entry for a non-existent path.\nfunc (m *fileSystemMount) negativeEntry(out *fuse.EntryOut) bool {\n\tif m.options.NegativeTimeout > 0.0 {\n\t\tout.NodeId = 0\n\t\tsplitDuration(m.options.NegativeTimeout, &out.EntryValid, &out.EntryValidNsec)\n\t\treturn true\n\t}\n\treturn false\n}\n<commit_msg>Fix memory leak when unregistering most recently opened file handle.<commit_after>package nodefs\n\nimport (\n\t\"log\"\n\t\"sync\"\n\t\"unsafe\"\n\n\t\"github.com\/hanwen\/go-fuse\/fuse\"\n)\n\n\/\/ openedFile stores either an open dir or an open file.\ntype openedFile struct {\n\thandled\n\n\tWithFlags\n\n\tdir *connectorDir\n}\n\ntype fileSystemMount struct {\n\t\/\/ Node that we were mounted on.\n\tmountInode *Inode\n\n\t\/\/ Parent to the mountInode.\n\tparentInode *Inode\n\n\t\/\/ Options for the mount.\n\toptions *Options\n\n\t\/\/ Protects Children hashmaps within the mount. treeLock\n\t\/\/ should be acquired before openFilesLock.\n\t\/\/\n\t\/\/ If multiple treeLocks must be acquired, the treeLocks\n\t\/\/ closer to the root must be acquired first.\n\ttreeLock sync.RWMutex\n\n\t\/\/ Manage filehandles of open files.\n\topenFiles handleMap\n\n\tDebug bool\n\n\tconnector *FileSystemConnector\n}\n\n\/\/ Must called with lock for parent held.\nfunc (m *fileSystemMount) mountName() string {\n\tfor k, v := range m.parentInode.children {\n\t\tif m.mountInode == v {\n\t\t\treturn k\n\t\t}\n\t}\n\tpanic(\"not found\")\n\treturn \"\"\n}\n\nfunc (m *fileSystemMount) setOwner(attr *fuse.Attr) {\n\tif m.options.Owner != nil {\n\t\tattr.Owner = *(*fuse.Owner)(m.options.Owner)\n\t}\n}\n\nfunc (m *fileSystemMount) fillEntry(out *fuse.EntryOut) {\n\tsplitDuration(m.options.EntryTimeout, &out.EntryValid, &out.EntryValidNsec)\n\tsplitDuration(m.options.AttrTimeout, &out.AttrValid, &out.AttrValidNsec)\n\tm.setOwner(&out.Attr)\n\tif out.Mode&fuse.S_IFDIR == 0 && out.Nlink == 0 {\n\t\tout.Nlink = 1\n\t}\n}\n\nfunc (m *fileSystemMount) fillAttr(out *fuse.AttrOut, nodeId uint64) {\n\tsplitDuration(m.options.AttrTimeout, &out.AttrValid, &out.AttrValidNsec)\n\tm.setOwner(&out.Attr)\n\tout.Ino = nodeId\n}\n\nfunc (m *fileSystemMount) getOpenedFile(h uint64) *openedFile {\n\tvar b *openedFile\n\tif h != 0 {\n\t\tb = (*openedFile)(unsafe.Pointer(m.openFiles.Decode(h)))\n\t}\n\n\tif b != nil && m.connector.debug && b.WithFlags.Description != \"\" {\n\t\tlog.Printf(\"File %d = %q\", h, b.WithFlags.Description)\n\t}\n\treturn b\n}\n\nfunc (m *fileSystemMount) unregisterFileHandle(handle uint64, node *Inode) *openedFile {\n\t_, obj := m.openFiles.Forget(handle, 1)\n\topened := (*openedFile)(unsafe.Pointer(obj))\n\tnode.openFilesMutex.Lock()\n\tidx := -1\n\tfor i, v := range node.openFiles {\n\t\tif v == opened {\n\t\t\tidx = i\n\t\t\tbreak\n\t\t}\n\t}\n\n\tl := len(node.openFiles)\n\tif idx == l-1 {\n\t\tnode.openFiles[idx] = nil\n\t} else {\n\t\tnode.openFiles[idx] = node.openFiles[l-1]\n\t}\n\tnode.openFiles = node.openFiles[:l-1]\n\tnode.openFilesMutex.Unlock()\n\n\treturn opened\n}\n\nfunc (m *fileSystemMount) registerFileHandle(node *Inode, dir *connectorDir, f File, flags uint32) (uint64, *openedFile) {\n\tnode.openFilesMutex.Lock()\n\tb := &openedFile{\n\t\tdir: dir,\n\t\tWithFlags: WithFlags{\n\t\t\tFile: f,\n\t\t\tOpenFlags: flags,\n\t\t},\n\t}\n\n\tfor {\n\t\twithFlags, ok := f.(*WithFlags)\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\n\t\tb.WithFlags.File = withFlags.File\n\t\tb.WithFlags.FuseFlags |= withFlags.FuseFlags\n\t\tb.WithFlags.Description += withFlags.Description\n\t\tf = withFlags.File\n\t}\n\n\tif b.WithFlags.File != nil {\n\t\tb.WithFlags.File.SetInode(node)\n\t}\n\tnode.openFiles = append(node.openFiles, b)\n\thandle := m.openFiles.Register(&b.handled)\n\tnode.openFilesMutex.Unlock()\n\treturn handle, b\n}\n\n\/\/ Creates a return entry for a non-existent path.\nfunc (m *fileSystemMount) negativeEntry(out *fuse.EntryOut) bool {\n\tif m.options.NegativeTimeout > 0.0 {\n\t\tout.NodeId = 0\n\t\tsplitDuration(m.options.NegativeTimeout, &out.EntryValid, &out.EntryValidNsec)\n\t\treturn true\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package websocket\n\nimport (\n\t\"time\"\n\n\tjsonrpc2websocket \"github.com\/asuleymanov\/jsonrpc2\/websocket\"\n\t\"github.com\/asuleymanov\/websocket\"\n)\n\n\/\/ ObjectStream implements jsonrpc2.ObjectStream that uses a WebSocket.\n\/\/ It extends jsonrpc2\/websocket.ObjectStream with read\/write timeouts.\ntype ObjectStream struct {\n\tconn *websocket.Conn\n\tstream jsonrpc2websocket.ObjectStream\n\n\twriteTimeout time.Duration\n\treadTimeout time.Duration\n}\n\n\/\/NewObjectStream initialised ObjectStream\nfunc NewObjectStream(conn *websocket.Conn, writeTimeout, readTimeout time.Duration) *ObjectStream {\n\treturn &ObjectStream{conn, jsonrpc2websocket.NewObjectStream(conn), writeTimeout, readTimeout}\n}\n\n\/\/WriteObject data record in ObjectStream\nfunc (stream *ObjectStream) WriteObject(v interface{}) error {\n\tstream.conn.SetWriteDeadline(time.Now().Add(stream.writeTimeout))\n\treturn stream.stream.WriteObject(v)\n}\n\n\/\/ReadObject reading data from ObjectStream\nfunc (stream *ObjectStream) ReadObject(v interface{}) error {\n\tstream.conn.SetReadDeadline(time.Now().Add(stream.readTimeout))\n\treturn stream.stream.ReadObject(v)\n}\n\n\/\/Close closing the ObjectStream\nfunc (stream *ObjectStream) Close() error {\n\treturn stream.stream.Close()\n}\n<commit_msg>Update object_stream.go<commit_after>package websocket\n\nimport (\n\t\"time\"\n\n\tjsonrpc2websocket \"github.com\/asuleymanov\/jsonrpc2\/websocket\"\n\t\"github.com\/asuleymanov\/websocket\"\n)\n\n\/\/ ObjectStream implements jsonrpc2.ObjectStream that uses a WebSocket.\n\/\/ It extends jsonrpc2\/websocket.ObjectStream with read\/write timeouts.\ntype ObjectStream struct {\n\tconn *websocket.Conn\n\tstream jsonrpc2websocket.ObjectStream\n\n\twriteTimeout time.Duration\n\treadTimeout time.Duration\n}\n\n\/\/NewObjectStream initialised ObjectStream\nfunc NewObjectStream(conn *websocket.Conn, writeTimeout, readTimeout time.Duration) *ObjectStream {\n\treturn &ObjectStream{conn, jsonrpc2websocket.NewObjectStream(conn), writeTimeout, readTimeout}\n}\n\n\/\/WriteObject data record in ObjectStream\nfunc (stream *ObjectStream) WriteObject(v interface{}) error {\n\terr:=stream.conn.SetWriteDeadline(time.Now().Add(stream.writeTimeout))\n\tif err!=nil {\n\t\treturn err\n\t}\n\treturn stream.stream.WriteObject(v)\n}\n\n\/\/ReadObject reading data from ObjectStream\nfunc (stream *ObjectStream) ReadObject(v interface{}) error {\n\terr:=stream.conn.SetReadDeadline(time.Now().Add(stream.readTimeout))\n\tif err!=nil {\n\t\treturn err\n\t}\n\treturn stream.stream.ReadObject(v)\n}\n\n\/\/Close closing the ObjectStream\nfunc (stream *ObjectStream) Close() error {\n\treturn stream.stream.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package gatherrun\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\tabuseInterval = time.Minute * 5\n\tanalyticsInterval = time.Hour * 24\n\tenvVarName = \"GATHER\"\n)\n\ntype GatherRun struct {\n\tDestFolder string\n\tExporter Exporter\n\tFetcher Fetcher\n\tEnv string\n\tUsername string\n\tScriptType string\n}\n\nfunc Run(env, username string) {\n\tfetcher := &S3Fetcher{\n\t\tBucketName: \"koding-gather\",\n\t\tFileName: \"koding-kernel.tar\",\n\t\tRegion: \"us-east-1\",\n\t}\n\n\texporter := NewKodingExporter()\n\n\tgo func() {\n\t\tNew(fetcher, exporter, env, username, \"abuse\").Run()\n\t\tNew(fetcher, exporter, env, username, \"analytics\").Run()\n\t}()\n\n\tabuseTimer := time.NewTimer(abuseInterval)\n\tanalyticsTimer := time.NewTimer(analyticsInterval)\n\n\tfor {\n\t\tselect {\n\t\tcase <-abuseTimer.C:\n\t\t\tNew(fetcher, exporter, env, username, \"abuse\").Run()\n\t\tcase <-analyticsTimer.C:\n\t\t\tNew(fetcher, exporter, env, username, \"analytics\").Run()\n\t\t}\n\t}\n}\n\nfunc New(fetcher Fetcher, exporter Exporter, env, username, scriptType string) *GatherRun {\n\ttmpDir, err := ioutil.TempDir(\"\/tmp\", \"koding-kernel\")\n\tif err != nil {\n\t\t\/\/ TODO: how to deal with errs\n\t}\n\n\treturn &GatherRun{\n\t\tFetcher: fetcher,\n\t\tExporter: exporter,\n\t\tDestFolder: tmpDir,\n\t\tEnv: env,\n\t\tUsername: username,\n\t\tScriptType: scriptType,\n\t}\n}\n\nfunc (c *GatherRun) Run() (err error) {\n\tdefer func() { err = c.Cleanup() }()\n\n\tbinary, err := c.GetGatherBinary()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = c.Export(binary.Run()); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *GatherRun) GetGatherBinary() (*GatherBinary, error) {\n\tif err := os.MkdirAll(c.DestFolder, 0777); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := c.DownloadScripts(c.DestFolder); err != nil {\n\t\treturn nil, err\n\t}\n\n\ttarFile := filepath.Join(c.DestFolder, c.Fetcher.GetFileName())\n\tif err := untar(tarFile, c.DestFolder); err != nil {\n\t\treturn nil, err\n\t}\n\n\tbinaryPath := strings.TrimSuffix(tarFile, tarSuffix)\n\treturn &GatherBinary{Path: binaryPath, ScriptType: c.ScriptType}, nil\n}\n\nfunc (c *GatherRun) DownloadScripts(folderName string) error {\n\treturn c.Fetcher.Download(folderName)\n}\n\nfunc (c *GatherRun) Export(raw []interface{}, err error) error {\n\tif err != nil {\n\t\treturn c.sendErrors(err)\n\t}\n\n\tvar stats = []GatherSingleStat{}\n\tvar errors = []error{}\n\n\tfor _, r := range raw {\n\t\tbuf := new(bytes.Buffer)\n\t\tif err := json.NewEncoder(buf).Encode(r); err != nil {\n\t\t\terrors = append(errors, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar stat GatherSingleStat\n\t\tif err := json.NewDecoder(buf).Decode(&stat); err != nil {\n\t\t\terrors = append(errors, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tstats = append(stats, stat)\n\t}\n\n\tif len(errors) > 0 {\n\t\tc.sendErrors(errors...)\n\t}\n\n\tif len(stats) > 0 {\n\t\tgStat := &GatherStat{\n\t\t\tEnv: c.Env, Username: c.Username, Stats: stats, Type: c.ScriptType,\n\t\t}\n\t\treturn c.Exporter.SendStats(gStat)\n\t}\n\n\treturn nil\n}\n\nfunc (c *GatherRun) Cleanup() error {\n\treturn os.RemoveAll(c.DestFolder)\n}\n\nfunc (c *GatherRun) sendErrors(errs ...error) error {\n\tgErr := &GatherError{\n\t\tEnv: c.Env, Username: c.Username, Errors: errs,\n\t}\n\n\treturn c.Exporter.SendError(gErr)\n}\n<commit_msg>gather: increase abuse checking to every 15 mins<commit_after>package gatherrun\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\tabuseInterval = time.Minute * 15\n\tanalyticsInterval = time.Hour * 24\n\tenvVarName = \"GATHER\"\n)\n\ntype GatherRun struct {\n\tDestFolder string\n\tExporter Exporter\n\tFetcher Fetcher\n\tEnv string\n\tUsername string\n\tScriptType string\n}\n\nfunc Run(env, username string) {\n\tfetcher := &S3Fetcher{\n\t\tBucketName: \"koding-gather\",\n\t\tFileName: \"koding-kernel.tar\",\n\t\tRegion: \"us-east-1\",\n\t}\n\n\texporter := NewKodingExporter()\n\n\tgo func() {\n\t\tNew(fetcher, exporter, env, username, \"abuse\").Run()\n\t\tNew(fetcher, exporter, env, username, \"analytics\").Run()\n\t}()\n\n\tabuseTimer := time.NewTimer(abuseInterval)\n\tanalyticsTimer := time.NewTimer(analyticsInterval)\n\n\tfor {\n\t\tselect {\n\t\tcase <-abuseTimer.C:\n\t\t\tNew(fetcher, exporter, env, username, \"abuse\").Run()\n\t\tcase <-analyticsTimer.C:\n\t\t\tNew(fetcher, exporter, env, username, \"analytics\").Run()\n\t\t}\n\t}\n}\n\nfunc New(fetcher Fetcher, exporter Exporter, env, username, scriptType string) *GatherRun {\n\ttmpDir, err := ioutil.TempDir(\"\/tmp\", \"koding-kernel\")\n\tif err != nil {\n\t\t\/\/ TODO: how to deal with errs\n\t}\n\n\treturn &GatherRun{\n\t\tFetcher: fetcher,\n\t\tExporter: exporter,\n\t\tDestFolder: tmpDir,\n\t\tEnv: env,\n\t\tUsername: username,\n\t\tScriptType: scriptType,\n\t}\n}\n\nfunc (c *GatherRun) Run() (err error) {\n\tdefer func() { err = c.Cleanup() }()\n\n\tbinary, err := c.GetGatherBinary()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = c.Export(binary.Run()); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *GatherRun) GetGatherBinary() (*GatherBinary, error) {\n\tif err := os.MkdirAll(c.DestFolder, 0777); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := c.DownloadScripts(c.DestFolder); err != nil {\n\t\treturn nil, err\n\t}\n\n\ttarFile := filepath.Join(c.DestFolder, c.Fetcher.GetFileName())\n\tif err := untar(tarFile, c.DestFolder); err != nil {\n\t\treturn nil, err\n\t}\n\n\tbinaryPath := strings.TrimSuffix(tarFile, tarSuffix)\n\treturn &GatherBinary{Path: binaryPath, ScriptType: c.ScriptType}, nil\n}\n\nfunc (c *GatherRun) DownloadScripts(folderName string) error {\n\treturn c.Fetcher.Download(folderName)\n}\n\nfunc (c *GatherRun) Export(raw []interface{}, err error) error {\n\tif err != nil {\n\t\treturn c.sendErrors(err)\n\t}\n\n\tvar stats = []GatherSingleStat{}\n\tvar errors = []error{}\n\n\tfor _, r := range raw {\n\t\tbuf := new(bytes.Buffer)\n\t\tif err := json.NewEncoder(buf).Encode(r); err != nil {\n\t\t\terrors = append(errors, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar stat GatherSingleStat\n\t\tif err := json.NewDecoder(buf).Decode(&stat); err != nil {\n\t\t\terrors = append(errors, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tstats = append(stats, stat)\n\t}\n\n\tif len(errors) > 0 {\n\t\tc.sendErrors(errors...)\n\t}\n\n\tif len(stats) > 0 {\n\t\tgStat := &GatherStat{\n\t\t\tEnv: c.Env, Username: c.Username, Stats: stats, Type: c.ScriptType,\n\t\t}\n\t\treturn c.Exporter.SendStats(gStat)\n\t}\n\n\treturn nil\n}\n\nfunc (c *GatherRun) Cleanup() error {\n\treturn os.RemoveAll(c.DestFolder)\n}\n\nfunc (c *GatherRun) sendErrors(errs ...error) error {\n\tgErr := &GatherError{\n\t\tEnv: c.Env, Username: c.Username, Errors: errs,\n\t}\n\n\treturn c.Exporter.SendError(gErr)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 Crisp IM SARL All rights reserved.\n\/\/\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage crisp\n\n\nimport (\n \"fmt\"\n)\n\n\n\/\/ WebsiteBatchConversationsOperation mapping\ntype WebsiteBatchConversationsOperation struct {\n Sessions *[]string `json:\"sessions,omitempty\"`\n}\n\n\/\/ WebsiteBatchPeopleOperation mapping\ntype WebsiteBatchPeopleOperation struct {\n People *WebsiteBatchPeopleOperationInner `json:\"people,omitempty\"`\n}\n\n\/\/ WebsiteBatchPeopleOperationInner mapping\ntype WebsiteBatchPeopleOperationInner struct {\n Profiles *[]string `json:\"profiles,omitempty\"`\n Search *WebsiteBatchPeopleOperationInnerSearch `json:\"search,omitempty\"`\n}\n\n\/\/ WebsiteBatchPeopleOperationInnerSearch mapping\ntype WebsiteBatchPeopleOperationInnerSearch struct {\n Filter []WebsiteFilter `json:\"filter,omitempty\"`\n Operator string `json:\"operator,omitempty\"`\n}\n\n\n\/\/ BatchResolveConversations resolves given (or all) items in website (conversation variant).\nfunc (service *WebsiteService) BatchResolveConversations(websiteID string, sessions []string) (*Response, error) {\n url := fmt.Sprintf(\"website\/%s\/batch\/resolve\", websiteID)\n req, _ := service.client.NewRequest(\"PATCH\", url, WebsiteBatchConversationsOperation{Sessions: &sessions})\n\n return service.client.Do(req, nil)\n}\n\n\n\/\/ BatchReadConversations marks given (or all) items as read in website (conversation variant).\nfunc (service *WebsiteService) BatchReadConversations(websiteID string, sessions []string) (*Response, error) {\n url := fmt.Sprintf(\"website\/%s\/batch\/read\", websiteID)\n req, _ := service.client.NewRequest(\"PATCH\", url, WebsiteBatchConversationsOperation{Sessions: &sessions})\n\n return service.client.Do(req, nil)\n}\n\n\n\/\/ BatchRemoveConversations removes given items in website (conversation variant).\nfunc (service *WebsiteService) BatchRemoveConversations(websiteID string, sessions []string) (*Response, error) {\n url := fmt.Sprintf(\"website\/%s\/batch\/remove\", websiteID)\n req, _ := service.client.NewRequest(\"PATCH\", url, WebsiteBatchConversationsOperation{Sessions: &sessions})\n\n return service.client.Do(req, nil)\n}\n\n\n\/\/ BatchRemovePeople removes given items in website (people variant).\nfunc (service *WebsiteService) BatchRemovePeople(websiteID string, people WebsiteBatchPeopleOperationInner) (*Response, error) {\n url := fmt.Sprintf(\"website\/%s\/batch\/remove\", websiteID)\n req, _ := service.client.NewRequest(\"PATCH\", url, WebsiteBatchPeopleOperation{People: &people})\n\n return service.client.Do(req, nil)\n}\n<commit_msg>Fix Website Batch operations for all sessions<commit_after>\/\/ Copyright 2018 Crisp IM SARL All rights reserved.\n\/\/\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage crisp\n\nimport (\n\t\"fmt\"\n)\n\n\/\/ WebsiteBatchConversationsOperation mapping\ntype WebsiteBatchConversationsOperation struct {\n\tSessions []string `json:\"sessions,omitempty\"`\n}\n\n\/\/ WebsiteBatchPeopleOperation mapping\ntype WebsiteBatchPeopleOperation struct {\n\tPeople *WebsiteBatchPeopleOperationInner `json:\"people,omitempty\"`\n}\n\n\/\/ WebsiteBatchPeopleOperationInner mapping\ntype WebsiteBatchPeopleOperationInner struct {\n\tProfiles *[]string `json:\"profiles,omitempty\"`\n\tSearch *WebsiteBatchPeopleOperationInnerSearch `json:\"search,omitempty\"`\n}\n\n\/\/ WebsiteBatchPeopleOperationInnerSearch mapping\ntype WebsiteBatchPeopleOperationInnerSearch struct {\n\tFilter []WebsiteFilter `json:\"filter,omitempty\"`\n\tOperator string `json:\"operator,omitempty\"`\n}\n\n\/\/ BatchResolveConversations resolves given (or all) items in website (conversation variant).\nfunc (service *WebsiteService) BatchResolveConversations(websiteID string, sessions []string) (*Response, error) {\n\turl := fmt.Sprintf(\"website\/%s\/batch\/resolve\", websiteID)\n\treq, _ := service.client.NewRequest(\"PATCH\", url, WebsiteBatchConversationsOperation{Sessions: sessions})\n\n\treturn service.client.Do(req, nil)\n}\n\n\/\/ BatchReadConversations marks given (or all) items as read in website (conversation variant).\nfunc (service *WebsiteService) BatchReadConversations(websiteID string, sessions []string) (*Response, error) {\n\turl := fmt.Sprintf(\"website\/%s\/batch\/read\", websiteID)\n\treq, _ := service.client.NewRequest(\"PATCH\", url, WebsiteBatchConversationsOperation{Sessions: sessions})\n\n\treturn service.client.Do(req, nil)\n}\n\n\/\/ BatchRemoveConversations removes given items in website (conversation variant).\nfunc (service *WebsiteService) BatchRemoveConversations(websiteID string, sessions []string) (*Response, error) {\n\turl := fmt.Sprintf(\"website\/%s\/batch\/remove\", websiteID)\n\treq, _ := service.client.NewRequest(\"PATCH\", url, WebsiteBatchConversationsOperation{Sessions: sessions})\n\n\treturn service.client.Do(req, nil)\n}\n\n\/\/ BatchRemovePeople removes given items in website (people variant).\nfunc (service *WebsiteService) BatchRemovePeople(websiteID string, people WebsiteBatchPeopleOperationInner) (*Response, error) {\n\turl := fmt.Sprintf(\"website\/%s\/batch\/remove\", websiteID)\n\treq, _ := service.client.NewRequest(\"PATCH\", url, WebsiteBatchPeopleOperation{People: &people})\n\n\treturn service.client.Do(req, nil)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The go-toggl AUTHORS. All rights reserved.\n\/\/\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage toggl\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n)\n\n\/\/ TasksService handles communication with the tasks related\n\/\/ methods of the Toggl API.\n\/\/\n\/\/ Toggl API docs: https:\/\/github.com\/toggl\/toggl_api_docs\/blob\/master\/chapters\/tasks.md\ntype TasksService struct {\n\tclient *Client\n}\n\n\/\/ Task represents a task.\ntype Task struct {\n\tID int `json:\"id,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tProjectID int `json:\"pid,omitempty\"`\n\tUserID int `json:\"uid,omitempty\"`\n\tWorkspaceID int `json:\"wid,omitempty\"`\n\tEstimatedSeconds int `json:\"estimated_seconds,omitempty\"`\n\tActive bool `json:\"active,omitempty\"`\n\tAt *time.Time `json:\"time,omitempty\"`\n}\n\n\/\/ TaskResponse acts as a response wrapper where response returns\n\/\/ in format of \"data\": Tasks's object.\ntype TaskResponse struct {\n\tData *Task `json:\"data,omitempty\"`\n}\n\n\/\/ TaskMassResponse acts as a response wrapper where response returns\n\/\/ in format of \"data\": [ ... ].\ntype TaskMaskResponse struct {\n\tData []Task `json:\"data,omitempty\"`\n}\n\n\/\/ TaskCreate represents posted data to be sent to task endpoint.\ntype TaskCreate struct {\n\tTask *Task `json:\"task,omitempty\"`\n}\n\n\/\/ Create a task.\n\/\/\n\/\/ Toggl API docs: https:\/\/github.com\/toggl\/toggl_api_docs\/blob\/master\/chapters\/tasks.md#actions-for-single-project-user\nfunc (s *TasksService) Create(t *Task) (*Task, error) {\n\tu := \"tasks\"\n\ttc := &TaskCreate{t}\n\treq, err := s.client.NewRequest(\"POST\", u, tc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := new(TaskResponse)\n\t_, err = s.client.Do(req, data)\n\n\treturn data.Data, err\n}\n\n\/\/ Get task details by task_id.\n\/\/\n\/\/ Toggl API docs: https:\/\/github.com\/toggl\/toggl_api_docs\/blob\/master\/chapters\/tasks.md#get-task-details\nfunc (s *TasksService) Get(id int) (*Task, error) {\n\tu := fmt.Sprintf(\"tasks\/%v\", id)\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := new(TaskResponse)\n\t_, err = s.client.Do(req, data)\n\n\treturn data.Data, err\n}\n\n\/\/ Update a task.\n\/\/\n\/\/ Toggl API docs: https:\/\/github.com\/toggl\/toggl_api_docs\/blob\/master\/chapters\/tasks.md#update-a-task\nfunc (s *TasksService) Update(t *Task) (*Task, error) {\n\tif t == nil {\n\t\treturn nil, errors.New(\"Task cannot be nil\")\n\t}\n\tif t.ID <= 0 {\n\t\treturn nil, errors.New(\"Invalid Task.ID\")\n\t}\n\n\tu := fmt.Sprintf(\"tasks\/%v\", t.ID)\n\n\ttc := &TaskCreate{t}\n\treq, err := s.client.NewRequest(\"PUT\", u, tc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := new(TaskResponse)\n\t_, err = s.client.Do(req, data)\n\n\treturn data.Data, err\n}\n\n\/\/ MassUpdate mass update tasks.\n\/\/\n\/\/ Toggl API docs: https:\/\/github.com\/toggl\/toggl_api_docs\/blob\/master\/chapters\/tasks.md#update-multiple-tasks\nfunc (s *TasksService) MassUpdate(ids string, t *Task) ([]Task, error) {\n\tu := fmt.Sprintf(\"tasks\/%v\", ids)\n\n\ttc := &TaskCreate{t}\n\treq, err := s.client.NewRequest(\"PUT\", u, tc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := new(TaskMaskResponse)\n\t_, err = s.client.Do(req, data)\n\n\treturn data.Data, err\n}\n\n\/\/ Delete a task.\n\/\/\n\/\/ Toggl API docs: https:\/\/github.com\/toggl\/toggl_api_docs\/blob\/master\/chapters\/tasks.md#delete-a-task\nfunc (s *TasksService) Delete(id int) error {\n\tu := fmt.Sprintf(\"tasks\/%v\", id)\n\treq, err := s.client.NewRequest(\"DELETE\", u, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = s.client.Do(req, nil)\n\treturn err\n}\n\n\/\/ MassDelete mass delete tasks.\n\/\/\n\/\/ Toggl API docs: https:\/\/github.com\/toggl\/toggl_api_docs\/blob\/master\/chapters\/tasks.md#delete-multiple-tasks\nfunc (s *TasksService) MassDelete(ids string) error {\n\tu := fmt.Sprintf(\"tasks\/%v\", ids)\n\treq, err := s.client.NewRequest(\"DELETE\", u, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = s.client.Do(req, nil)\n\treturn err\n}\n<commit_msg>Fixed invalid 'at' key's name.<commit_after>\/\/ Copyright 2013 The go-toggl AUTHORS. All rights reserved.\n\/\/\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage toggl\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n)\n\n\/\/ TasksService handles communication with the tasks related\n\/\/ methods of the Toggl API.\n\/\/\n\/\/ Toggl API docs: https:\/\/github.com\/toggl\/toggl_api_docs\/blob\/master\/chapters\/tasks.md\ntype TasksService struct {\n\tclient *Client\n}\n\n\/\/ Task represents a task.\ntype Task struct {\n\tID int `json:\"id,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tProjectID int `json:\"pid,omitempty\"`\n\tUserID int `json:\"uid,omitempty\"`\n\tWorkspaceID int `json:\"wid,omitempty\"`\n\tEstimatedSeconds int `json:\"estimated_seconds,omitempty\"`\n\tActive bool `json:\"active,omitempty\"`\n\tAt *time.Time `json:\"at,omitempty\"`\n}\n\n\/\/ TaskResponse acts as a response wrapper where response returns\n\/\/ in format of \"data\": Tasks's object.\ntype TaskResponse struct {\n\tData *Task `json:\"data,omitempty\"`\n}\n\n\/\/ TaskMassResponse acts as a response wrapper where response returns\n\/\/ in format of \"data\": [ ... ].\ntype TaskMaskResponse struct {\n\tData []Task `json:\"data,omitempty\"`\n}\n\n\/\/ TaskCreate represents posted data to be sent to task endpoint.\ntype TaskCreate struct {\n\tTask *Task `json:\"task,omitempty\"`\n}\n\n\/\/ Create a task.\n\/\/\n\/\/ Toggl API docs: https:\/\/github.com\/toggl\/toggl_api_docs\/blob\/master\/chapters\/tasks.md#actions-for-single-project-user\nfunc (s *TasksService) Create(t *Task) (*Task, error) {\n\tu := \"tasks\"\n\ttc := &TaskCreate{t}\n\treq, err := s.client.NewRequest(\"POST\", u, tc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := new(TaskResponse)\n\t_, err = s.client.Do(req, data)\n\n\treturn data.Data, err\n}\n\n\/\/ Get task details by task_id.\n\/\/\n\/\/ Toggl API docs: https:\/\/github.com\/toggl\/toggl_api_docs\/blob\/master\/chapters\/tasks.md#get-task-details\nfunc (s *TasksService) Get(id int) (*Task, error) {\n\tu := fmt.Sprintf(\"tasks\/%v\", id)\n\treq, err := s.client.NewRequest(\"GET\", u, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := new(TaskResponse)\n\t_, err = s.client.Do(req, data)\n\n\treturn data.Data, err\n}\n\n\/\/ Update a task.\n\/\/\n\/\/ Toggl API docs: https:\/\/github.com\/toggl\/toggl_api_docs\/blob\/master\/chapters\/tasks.md#update-a-task\nfunc (s *TasksService) Update(t *Task) (*Task, error) {\n\tif t == nil {\n\t\treturn nil, errors.New(\"Task cannot be nil\")\n\t}\n\tif t.ID <= 0 {\n\t\treturn nil, errors.New(\"Invalid Task.ID\")\n\t}\n\n\tu := fmt.Sprintf(\"tasks\/%v\", t.ID)\n\n\ttc := &TaskCreate{t}\n\treq, err := s.client.NewRequest(\"PUT\", u, tc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := new(TaskResponse)\n\t_, err = s.client.Do(req, data)\n\n\treturn data.Data, err\n}\n\n\/\/ MassUpdate mass update tasks.\n\/\/\n\/\/ Toggl API docs: https:\/\/github.com\/toggl\/toggl_api_docs\/blob\/master\/chapters\/tasks.md#update-multiple-tasks\nfunc (s *TasksService) MassUpdate(ids string, t *Task) ([]Task, error) {\n\tu := fmt.Sprintf(\"tasks\/%v\", ids)\n\n\ttc := &TaskCreate{t}\n\treq, err := s.client.NewRequest(\"PUT\", u, tc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := new(TaskMaskResponse)\n\t_, err = s.client.Do(req, data)\n\n\treturn data.Data, err\n}\n\n\/\/ Delete a task.\n\/\/\n\/\/ Toggl API docs: https:\/\/github.com\/toggl\/toggl_api_docs\/blob\/master\/chapters\/tasks.md#delete-a-task\nfunc (s *TasksService) Delete(id int) error {\n\tu := fmt.Sprintf(\"tasks\/%v\", id)\n\treq, err := s.client.NewRequest(\"DELETE\", u, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = s.client.Do(req, nil)\n\treturn err\n}\n\n\/\/ MassDelete mass delete tasks.\n\/\/\n\/\/ Toggl API docs: https:\/\/github.com\/toggl\/toggl_api_docs\/blob\/master\/chapters\/tasks.md#delete-multiple-tasks\nfunc (s *TasksService) MassDelete(ids string) error {\n\tu := fmt.Sprintf(\"tasks\/%v\", ids)\n\treq, err := s.client.NewRequest(\"DELETE\", u, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = s.client.Do(req, nil)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ EVH is designed to be a single-use file transfer system. Its purpose is to replace\n\/\/ aging methods of sharing files such as FTP. With the advent of services like\n\/\/ DropBox, Box, Google Drive and the like, this type of service is becoming more\n\/\/ commonplace EVH has some differentiating features that make it an especially\n\/\/ good tool for corporations and\/or home use.\n\/\/\n\/\/ EVH runs in two modes: server and client. Server hosts a web server interface for\n\/\/ uploading and downloading files. The Client is for uploading only and runs\n\/\/ in a terminal. This app is designed to run on all platforms that Go supports.\npackage main\n\nimport (\n\t\"flag\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n)\n\n\/\/ Flags\nvar ConfigFileFlag string\nvar DstEmailFlag string\nvar ExpirationFlag string\nvar FileDescrFlag string\nvar FilesFieldFlag string\nvar ProgressFlag bool\nvar ServerFlag bool\nvar SrcEmailFlag string\nvar UrlFlag string\n\n\/\/ Global Variables\nvar UploadUrlPath = \"\/upload\/\"\nvar DownloadUrlPath = \"\/download\/\"\nvar Files []string\nvar HttpProto = \"http\"\nvar SiteDown bool\nvar Templates *template.Template\n\n\/\/ Constants\nconst VERSION = \"2.0.7\"\nconst TimeLayout = \"Jan 2, 2006 at 3:04pm (MST)\"\n\nfunc init() {\n\tflag.StringVar(&ConfigFileFlag, \"c\", \"\", \"Location of the Configuration file\")\n\tflag.BoolVar(&ServerFlag, \"server\", false, \"Listen for incoming file uploads\")\n\n\t\/\/ Client flags\n\tflag.StringVar(&UrlFlag, \"url\", \"\", \"Remote server URL to send files to (client only)\")\n\tflag.StringVar(&FilesFieldFlag, \"field\", \"\", \"Field name of the form (client only)\")\n\tflag.StringVar(&SrcEmailFlag, \"from\", \"\", \"Email address of uploader (client only)\")\n\tflag.StringVar(&DstEmailFlag, \"to\", \"\", \"Comma separated set of email address(es) of file recipient(s) (client only)\")\n\tflag.StringVar(&FileDescrFlag, \"description\", \"\", \"File desription (use quotes) (client only)\")\n\tflag.BoolVar(&ProgressFlag, \"progress\", true, \"Show progress bar during upload (client only)\")\n\tflag.StringVar(&ExpirationFlag, \"expires\", \"\", \"Example 1:d for 1 day (client only)\")\n}\n\nfunc homeHandler(w http.ResponseWriter, r *http.Request) {\n\t\/\/ Redirect to SSL if enabled\n\tif r.TLS == nil && Config.Server.Ssl {\n\t\tredirectToSsl(w, r)\n\t\treturn\n\t}\n\n\tvar page = NewPage()\n\tDisplayPage(w, r, \"home\", page)\n}\n\nfunc adminHandler(w http.ResponseWriter, r *http.Request) {\n\t\/\/ Redirect to SSL if enabled\n\tif r.TLS == nil && Config.Server.Ssl {\n\t\tredirectToSsl(w, r)\n\t\treturn\n\t}\n\n\tvar page = NewPage()\n\tif Config.Server.AllowAdmin {\n\t\tpage.TrackerOfTrackers = NewTrackerOfTrackers()\n\t} else {\n\t\tpage.Message = \"Access denied.\"\n\t}\n\n\tDisplayPage(w, r, \"admin\", page)\n}\n\nfunc redirectToSsl(w http.ResponseWriter, r *http.Request) {\n\thttp.Redirect(w, r, \"https:\/\/\"+Config.Server.Address+\":\"+Config.Server.SslPort+r.RequestURI, http.StatusTemporaryRedirect)\n\treturn\n}\n\nfunc main() {\n\tflag.Parse()\n\n\t\/\/ Load in our Config\n\tConfig = NewConfig(ConfigFileFlag)\n\tConfig.ImportFlags()\n\n\tif ServerFlag {\n\t\t\/\/ Final sanity check\n\t\tif Config.Server.Assets == \"\" {\n\t\t\tlog.Fatal(\"ERROR: Cannot continue without specifying assets path\")\n\t\t}\n\t\tif Config.Server.Templates == \"\" {\n\t\t\tlog.Fatal(\"ERROR: Cannot continue without specifying templates path\")\n\t\t}\n\t\tif Config.Server.ListenAddr == \"\" {\n\t\t\tlog.Fatal(\"ERROR: Cannot continue without specifying listenaddr value\")\n\t\t}\n\t\tif Config.Server.Mailserver == \"\" {\n\t\t\tlog.Println(\"WARNING: cannot send emails, mailserver not set\")\n\t\t}\n\n\t\t\/\/ Set so all generated URLs use https if enabled\n\t\tif Config.Server.Ssl {\n\t\t\tHttpProto = \"https\"\n\t\t}\n\n\t\t\/\/ Setup our assets dir (if it don't already exist)\n\t\terr := os.MkdirAll(Config.Server.Assets, 0700)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Cannot setup assetdir as needed: \" + err.Error())\n\t\t}\n\n\t\t\/\/ Parse our html templates\n\t\tgo RefreshTemplates()\n\t\tgo ScrubDownloads()\n\n\t\t\/\/ Register our handler functions\n\t\thttp.HandleFunc(UploadUrlPath, uploadHandler)\n\t\thttp.HandleFunc(DownloadUrlPath, assetHandler)\n\t\thttp.HandleFunc(\"\/admin\/\", BasicAuth(adminHandler))\n\t\thttp.HandleFunc(\"\/\", homeHandler)\n\n\t\t\/\/ Listen\n\t\tlog.Println(\"Listening...\")\n\n\t\t\/\/ Spawn HTTPS listener in another thread\n\t\tgo func() {\n\t\t\tif Config.Server.Ssl == false || Config.Server.SslPort == \"\" {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvar addrSsl = Config.Server.ListenAddr + \":\" + Config.Server.SslPort\n\t\t\tlistenErrSsl := http.ListenAndServeTLS(addrSsl, Config.Server.CertFile, Config.Server.KeyFile, nil)\n\t\t\tif listenErrSsl != nil {\n\t\t\t\tlog.Fatal(\"ERROR: ssl listen problem: \" + listenErrSsl.Error())\n\t\t\t}\n\t\t}()\n\n\t\t\/\/ Start non-SSL listener\n\t\tvar addrNonSsl = Config.Server.ListenAddr + \":\" + Config.Server.NonSslPort\n\t\tlistenErr := http.ListenAndServe(addrNonSsl, nil)\n\t\tif listenErr != nil {\n\t\t\tlog.Fatal(\"ERROR: non-ssl listen problem: \" + listenErr.Error())\n\t\t}\n\t} else {\n\t\t\/\/ Final sanity check\n\t\tif Config.Client.DestEmail == \"\" {\n\t\t\tlog.Println(\"WARNING: no -destemail value set, cannot send reciever an email\")\n\t\t}\n\t\tif Config.Client.Email == \"\" {\n\t\t\tlog.Println(\"WARNING: no -email value set, cannot send email to uploader\")\n\t\t}\n\t\tif Config.Client.Field == \"\" {\n\t\t\tlog.Println(\"WARNING: no -field value set, using \\\"file\\\" instead\")\n\t\t\tConfig.Client.Field = \"file\"\n\t\t}\n\t\tif Config.Client.Url == \"\" {\n\t\t\tlog.Fatal(\"ERROR: Cannot continue without specifying -url value\")\n\t\t}\n\n\t\t\/\/ All filenames are unflagged arguments, loop through them and uplod the file(s)\n\t\tfor _, fname := range flag.Args() {\n\t\t\tfi, err := os.Stat(fname)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"WARNING: Cannot read file, skipping \", fname, \": \", err.Error())\n\t\t\t} else {\n\t\t\t\tif fi.Mode().IsRegular() {\n\t\t\t\t\tFiles = append(Files, fname)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tUpload(Files)\n\t}\n}\n<commit_msg>Version bump to 2.1.0<commit_after>\/\/ EVH is designed to be a single-use file transfer system. Its purpose is to replace\n\/\/ aging methods of sharing files such as FTP. With the advent of services like\n\/\/ DropBox, Box, Google Drive and the like, this type of service is becoming more\n\/\/ commonplace EVH has some differentiating features that make it an especially\n\/\/ good tool for corporations and\/or home use.\n\/\/\n\/\/ EVH runs in two modes: server and client. Server hosts a web server interface for\n\/\/ uploading and downloading files. The Client is for uploading only and runs\n\/\/ in a terminal. This app is designed to run on all platforms that Go supports.\npackage main\n\nimport (\n\t\"flag\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n)\n\n\/\/ Flags\nvar ConfigFileFlag string\nvar DstEmailFlag string\nvar ExpirationFlag string\nvar FileDescrFlag string\nvar FilesFieldFlag string\nvar ProgressFlag bool\nvar ServerFlag bool\nvar SrcEmailFlag string\nvar UrlFlag string\n\n\/\/ Global Variables\nvar UploadUrlPath = \"\/upload\/\"\nvar DownloadUrlPath = \"\/download\/\"\nvar Files []string\nvar HttpProto = \"http\"\nvar SiteDown bool\nvar Templates *template.Template\n\n\/\/ Constants\nconst VERSION = \"2.1.0\"\nconst TimeLayout = \"Jan 2, 2006 at 3:04pm (MST)\"\n\nfunc init() {\n\tflag.StringVar(&ConfigFileFlag, \"c\", \"\", \"Location of the Configuration file\")\n\tflag.BoolVar(&ServerFlag, \"server\", false, \"Listen for incoming file uploads\")\n\n\t\/\/ Client flags\n\tflag.StringVar(&UrlFlag, \"url\", \"\", \"Remote server URL to send files to (client only)\")\n\tflag.StringVar(&FilesFieldFlag, \"field\", \"\", \"Field name of the form (client only)\")\n\tflag.StringVar(&SrcEmailFlag, \"from\", \"\", \"Email address of uploader (client only)\")\n\tflag.StringVar(&DstEmailFlag, \"to\", \"\", \"Comma separated set of email address(es) of file recipient(s) (client only)\")\n\tflag.StringVar(&FileDescrFlag, \"description\", \"\", \"File desription (use quotes) (client only)\")\n\tflag.BoolVar(&ProgressFlag, \"progress\", true, \"Show progress bar during upload (client only)\")\n\tflag.StringVar(&ExpirationFlag, \"expires\", \"\", \"Example 1:d for 1 day (client only)\")\n}\n\nfunc homeHandler(w http.ResponseWriter, r *http.Request) {\n\t\/\/ Redirect to SSL if enabled\n\tif r.TLS == nil && Config.Server.Ssl {\n\t\tredirectToSsl(w, r)\n\t\treturn\n\t}\n\n\tvar page = NewPage()\n\tDisplayPage(w, r, \"home\", page)\n}\n\nfunc adminHandler(w http.ResponseWriter, r *http.Request) {\n\t\/\/ Redirect to SSL if enabled\n\tif r.TLS == nil && Config.Server.Ssl {\n\t\tredirectToSsl(w, r)\n\t\treturn\n\t}\n\n\tvar page = NewPage()\n\tif Config.Server.AllowAdmin {\n\t\tpage.TrackerOfTrackers = NewTrackerOfTrackers()\n\t} else {\n\t\tpage.Message = \"Access denied.\"\n\t}\n\n\tDisplayPage(w, r, \"admin\", page)\n}\n\nfunc redirectToSsl(w http.ResponseWriter, r *http.Request) {\n\thttp.Redirect(w, r, \"https:\/\/\"+Config.Server.Address+\":\"+Config.Server.SslPort+r.RequestURI, http.StatusTemporaryRedirect)\n\treturn\n}\n\nfunc main() {\n\tflag.Parse()\n\n\t\/\/ Load in our Config\n\tConfig = NewConfig(ConfigFileFlag)\n\tConfig.ImportFlags()\n\n\tif ServerFlag {\n\t\t\/\/ Final sanity check\n\t\tif Config.Server.Assets == \"\" {\n\t\t\tlog.Fatal(\"ERROR: Cannot continue without specifying assets path\")\n\t\t}\n\t\tif Config.Server.Templates == \"\" {\n\t\t\tlog.Fatal(\"ERROR: Cannot continue without specifying templates path\")\n\t\t}\n\t\tif Config.Server.ListenAddr == \"\" {\n\t\t\tlog.Fatal(\"ERROR: Cannot continue without specifying listenaddr value\")\n\t\t}\n\t\tif Config.Server.Mailserver == \"\" {\n\t\t\tlog.Println(\"WARNING: cannot send emails, mailserver not set\")\n\t\t}\n\n\t\t\/\/ Set so all generated URLs use https if enabled\n\t\tif Config.Server.Ssl {\n\t\t\tHttpProto = \"https\"\n\t\t}\n\n\t\t\/\/ Setup our assets dir (if it don't already exist)\n\t\terr := os.MkdirAll(Config.Server.Assets, 0700)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Cannot setup assetdir as needed: \" + err.Error())\n\t\t}\n\n\t\t\/\/ Parse our html templates\n\t\tgo RefreshTemplates()\n\t\tgo ScrubDownloads()\n\n\t\t\/\/ Register our handler functions\n\t\thttp.HandleFunc(UploadUrlPath, uploadHandler)\n\t\thttp.HandleFunc(DownloadUrlPath, assetHandler)\n\t\thttp.HandleFunc(\"\/admin\/\", BasicAuth(adminHandler))\n\t\thttp.HandleFunc(\"\/\", homeHandler)\n\n\t\t\/\/ Listen\n\t\tlog.Println(\"Listening...\")\n\n\t\t\/\/ Spawn HTTPS listener in another thread\n\t\tgo func() {\n\t\t\tif Config.Server.Ssl == false || Config.Server.SslPort == \"\" {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvar addrSsl = Config.Server.ListenAddr + \":\" + Config.Server.SslPort\n\t\t\tlistenErrSsl := http.ListenAndServeTLS(addrSsl, Config.Server.CertFile, Config.Server.KeyFile, nil)\n\t\t\tif listenErrSsl != nil {\n\t\t\t\tlog.Fatal(\"ERROR: ssl listen problem: \" + listenErrSsl.Error())\n\t\t\t}\n\t\t}()\n\n\t\t\/\/ Start non-SSL listener\n\t\tvar addrNonSsl = Config.Server.ListenAddr + \":\" + Config.Server.NonSslPort\n\t\tlistenErr := http.ListenAndServe(addrNonSsl, nil)\n\t\tif listenErr != nil {\n\t\t\tlog.Fatal(\"ERROR: non-ssl listen problem: \" + listenErr.Error())\n\t\t}\n\t} else {\n\t\t\/\/ Final sanity check\n\t\tif Config.Client.DestEmail == \"\" {\n\t\t\tlog.Println(\"WARNING: no -destemail value set, cannot send reciever an email\")\n\t\t}\n\t\tif Config.Client.Email == \"\" {\n\t\t\tlog.Println(\"WARNING: no -email value set, cannot send email to uploader\")\n\t\t}\n\t\tif Config.Client.Field == \"\" {\n\t\t\tlog.Println(\"WARNING: no -field value set, using \\\"file\\\" instead\")\n\t\t\tConfig.Client.Field = \"file\"\n\t\t}\n\t\tif Config.Client.Url == \"\" {\n\t\t\tlog.Fatal(\"ERROR: Cannot continue without specifying -url value\")\n\t\t}\n\n\t\t\/\/ All filenames are unflagged arguments, loop through them and uplod the file(s)\n\t\tfor _, fname := range flag.Args() {\n\t\t\tfi, err := os.Stat(fname)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"WARNING: Cannot read file, skipping \", fname, \": \", err.Error())\n\t\t\t} else {\n\t\t\t\tif fi.Mode().IsRegular() {\n\t\t\t\t\tFiles = append(Files, fname)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tUpload(Files)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The etcd Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage stats\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/raft\"\n)\n\n\/\/ ServerStats encapsulates various statistics about an EtcdServer and its\n\/\/ communication with other members of the cluster\ntype ServerStats struct {\n\tserverStats\n\tsync.Mutex\n}\n\nfunc NewServerStats(name, id string) *ServerStats {\n\tss := &ServerStats{\n\t\tserverStats: serverStats{\n\t\t\tName: name,\n\t\t\tID: id,\n\t\t},\n\t}\n\tnow := time.Now()\n\tss.StartTime = now\n\tss.LeaderInfo.StartTime = now\n\tss.sendRateQueue = &statsQueue{back: -1}\n\tss.recvRateQueue = &statsQueue{back: -1}\n\treturn ss\n}\n\ntype serverStats struct {\n\tName string `json:\"name\"`\n\t\/\/ ID is the raft ID of the node.\n\t\/\/ TODO(jonboulle): use ID instead of name?\n\tID string `json:\"id\"`\n\tState raft.StateType `json:\"state\"`\n\tStartTime time.Time `json:\"startTime\"`\n\n\tLeaderInfo struct {\n\t\tName string `json:\"leader\"`\n\t\tUptime string `json:\"uptime\"`\n\t\tStartTime time.Time `json:\"startTime\"`\n\t} `json:\"leaderInfo\"`\n\n\tRecvAppendRequestCnt uint64 `json:\"recvAppendRequestCnt,\"`\n\tRecvingPkgRate float64 `json:\"recvPkgRate,omitempty\"`\n\tRecvingBandwidthRate float64 `json:\"recvBandwidthRate,omitempty\"`\n\n\tSendAppendRequestCnt uint64 `json:\"sendAppendRequestCnt\"`\n\tSendingPkgRate float64 `json:\"sendPkgRate,omitempty\"`\n\tSendingBandwidthRate float64 `json:\"sendBandwidthRate,omitempty\"`\n\n\tsendRateQueue *statsQueue\n\trecvRateQueue *statsQueue\n}\n\nfunc (ss *ServerStats) JSON() []byte {\n\tss.Lock()\n\tstats := ss.serverStats\n\tss.Unlock()\n\tstats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String()\n\tstats.SendingPkgRate, stats.SendingBandwidthRate = stats.sendRateQueue.Rate()\n\tstats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.recvRateQueue.Rate()\n\tb, err := json.Marshal(stats)\n\t\/\/ TODO(jonboulle): appropriate error handling?\n\tif err != nil {\n\t\tlog.Printf(\"stats: error marshalling server stats: %v\", err)\n\t}\n\treturn b\n}\n\n\/\/ RecvAppendReq updates the ServerStats in response to an AppendRequest\n\/\/ from the given leader being received\nfunc (ss *ServerStats) RecvAppendReq(leader string, reqSize int) {\n\tss.Lock()\n\tdefer ss.Unlock()\n\n\tnow := time.Now()\n\n\tss.State = raft.StateFollower\n\tif leader != ss.LeaderInfo.Name {\n\t\tss.LeaderInfo.Name = leader\n\t\tss.LeaderInfo.StartTime = now\n\t}\n\n\tss.recvRateQueue.Insert(\n\t\t&RequestStats{\n\t\t\tSendingTime: now,\n\t\t\tSize: reqSize,\n\t\t},\n\t)\n\tss.RecvAppendRequestCnt++\n}\n\n\/\/ SendAppendReq updates the ServerStats in response to an AppendRequest\n\/\/ being sent by this server\nfunc (ss *ServerStats) SendAppendReq(reqSize int) {\n\tss.Lock()\n\tdefer ss.Unlock()\n\n\tss.becomeLeader()\n\n\tss.sendRateQueue.Insert(\n\t\t&RequestStats{\n\t\t\tSendingTime: time.Now(),\n\t\t\tSize: reqSize,\n\t\t},\n\t)\n\n\tss.SendAppendRequestCnt++\n}\n\nfunc (ss *ServerStats) BecomeLeader() {\n\tss.Lock()\n\tdefer ss.Unlock()\n\tss.becomeLeader()\n}\n\nfunc (ss *ServerStats) becomeLeader() {\n\tif ss.State != raft.StateLeader {\n\t\tss.State = raft.StateLeader\n\t\tss.LeaderInfo.Name = ss.ID\n\t\tss.LeaderInfo.StartTime = time.Now()\n\t}\n}\n<commit_msg>etcdserver\/stats: fix stats data race.<commit_after>\/\/ Copyright 2015 The etcd Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage stats\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/raft\"\n)\n\n\/\/ ServerStats encapsulates various statistics about an EtcdServer and its\n\/\/ communication with other members of the cluster\ntype ServerStats struct {\n\tserverStats\n\tsync.Mutex\n}\n\nfunc NewServerStats(name, id string) *ServerStats {\n\tss := &ServerStats{\n\t\tserverStats: serverStats{\n\t\t\tName: name,\n\t\t\tID: id,\n\t\t},\n\t}\n\tnow := time.Now()\n\tss.StartTime = now\n\tss.LeaderInfo.StartTime = now\n\tss.sendRateQueue = &statsQueue{back: -1}\n\tss.recvRateQueue = &statsQueue{back: -1}\n\treturn ss\n}\n\ntype serverStats struct {\n\tName string `json:\"name\"`\n\t\/\/ ID is the raft ID of the node.\n\t\/\/ TODO(jonboulle): use ID instead of name?\n\tID string `json:\"id\"`\n\tState raft.StateType `json:\"state\"`\n\tStartTime time.Time `json:\"startTime\"`\n\n\tLeaderInfo struct {\n\t\tName string `json:\"leader\"`\n\t\tUptime string `json:\"uptime\"`\n\t\tStartTime time.Time `json:\"startTime\"`\n\t} `json:\"leaderInfo\"`\n\n\tRecvAppendRequestCnt uint64 `json:\"recvAppendRequestCnt,\"`\n\tRecvingPkgRate float64 `json:\"recvPkgRate,omitempty\"`\n\tRecvingBandwidthRate float64 `json:\"recvBandwidthRate,omitempty\"`\n\n\tSendAppendRequestCnt uint64 `json:\"sendAppendRequestCnt\"`\n\tSendingPkgRate float64 `json:\"sendPkgRate,omitempty\"`\n\tSendingBandwidthRate float64 `json:\"sendBandwidthRate,omitempty\"`\n\n\tsendRateQueue *statsQueue\n\trecvRateQueue *statsQueue\n}\n\nfunc (ss *ServerStats) JSON() []byte {\n\tss.Lock()\n\tstats := ss.serverStats\n\tstats.SendingPkgRate, stats.SendingBandwidthRate = stats.sendRateQueue.Rate()\n\tstats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.recvRateQueue.Rate()\n\tss.Unlock()\n\tstats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String()\n\tb, err := json.Marshal(stats)\n\t\/\/ TODO(jonboulle): appropriate error handling?\n\tif err != nil {\n\t\tlog.Printf(\"stats: error marshalling server stats: %v\", err)\n\t}\n\treturn b\n}\n\n\/\/ RecvAppendReq updates the ServerStats in response to an AppendRequest\n\/\/ from the given leader being received\nfunc (ss *ServerStats) RecvAppendReq(leader string, reqSize int) {\n\tss.Lock()\n\tdefer ss.Unlock()\n\n\tnow := time.Now()\n\n\tss.State = raft.StateFollower\n\tif leader != ss.LeaderInfo.Name {\n\t\tss.LeaderInfo.Name = leader\n\t\tss.LeaderInfo.StartTime = now\n\t}\n\n\tss.recvRateQueue.Insert(\n\t\t&RequestStats{\n\t\t\tSendingTime: now,\n\t\t\tSize: reqSize,\n\t\t},\n\t)\n\tss.RecvAppendRequestCnt++\n}\n\n\/\/ SendAppendReq updates the ServerStats in response to an AppendRequest\n\/\/ being sent by this server\nfunc (ss *ServerStats) SendAppendReq(reqSize int) {\n\tss.Lock()\n\tdefer ss.Unlock()\n\n\tss.becomeLeader()\n\n\tss.sendRateQueue.Insert(\n\t\t&RequestStats{\n\t\t\tSendingTime: time.Now(),\n\t\t\tSize: reqSize,\n\t\t},\n\t)\n\n\tss.SendAppendRequestCnt++\n}\n\nfunc (ss *ServerStats) BecomeLeader() {\n\tss.Lock()\n\tdefer ss.Unlock()\n\tss.becomeLeader()\n}\n\nfunc (ss *ServerStats) becomeLeader() {\n\tif ss.State != raft.StateLeader {\n\t\tss.State = raft.StateLeader\n\t\tss.LeaderInfo.Name = ss.ID\n\t\tss.LeaderInfo.StartTime = time.Now()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 The Gorilla Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage scanner\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\t\"unicode\"\n\t\"unicode\/utf8\"\n)\n\n\/\/ tokenType identifies the type of lexical tokens.\ntype tokenType int\n\n\/\/ String returns a string representation of the token type.\nfunc (t tokenType) String() string {\n\treturn tokenNames[t]\n}\n\n\/\/ Token represents a token and the corresponding string.\ntype Token struct {\n\tType tokenType\n\tValue string\n\tLine int\n\tColumn int\n}\n\n\/\/ String returns a string representation of the token.\nfunc (t *Token) String() string {\n\tif len(t.Value) > 10 {\n\t\treturn fmt.Sprintf(\"%s (line: %d, column: %d): %.10q...\",\n\t\t\tt.Type, t.Line, t.Column, t.Value)\n\t}\n\treturn fmt.Sprintf(\"%s (line: %d, column: %d): %q\",\n\t\tt.Type, t.Line, t.Column, t.Value)\n}\n\n\/\/ All tokens -----------------------------------------------------------------\n\n\/\/ The complete list of tokens in CSS3.\nconst (\n\t\/\/ Scanner flags.\n\tTokenError tokenType = iota\n\tTokenEOF\n\t\/\/ From now on, only tokens from the CSS specification.\n\tTokenIdent\n\tTokenAtKeyword\n\tTokenString\n\tTokenHash\n\tTokenNumber\n\tTokenPercentage\n\tTokenDimension\n\tTokenURI\n\tTokenUnicodeRange\n\tTokenCDO\n\tTokenCDC\n\tTokenS\n\tTokenComment\n\tTokenFunction\n\tTokenIncludes\n\tTokenDashMatch\n\tTokenPrefixMatch\n\tTokenSuffixMatch\n\tTokenSubstringMatch\n\tTokenChar\n\tTokenBOM\n)\n\n\/\/ tokenNames maps tokenType's to their names. Used for conversion to string.\nvar tokenNames = map[tokenType]string{\n\tTokenError: \"error\",\n\tTokenEOF: \"EOF\",\n\tTokenIdent: \"IDENT\",\n\tTokenAtKeyword: \"ATKEYWORD\",\n\tTokenString: \"STRING\",\n\tTokenHash: \"HASH\",\n\tTokenNumber: \"NUMBER\",\n\tTokenPercentage: \"PERCENTAGE\",\n\tTokenDimension: \"DIMENSION\",\n\tTokenURI: \"URI\",\n\tTokenUnicodeRange: \"UNICODE-RANGE\",\n\tTokenCDO: \"CDO\",\n\tTokenCDC: \"CDC\",\n\tTokenS: \"S\",\n\tTokenComment: \"COMMENT\",\n\tTokenFunction: \"FUNCTION\",\n\tTokenIncludes: \"INCLUDES\",\n\tTokenDashMatch: \"DASHMATCH\",\n\tTokenPrefixMatch: \"PREFIXMATCH\",\n\tTokenSuffixMatch: \"SUFFIXMATCH\",\n\tTokenSubstringMatch: \"SUBSTRINGMATCH\",\n\tTokenChar: \"CHAR\",\n\tTokenBOM: \"BOM\",\n}\n\n\/\/ Macros and productions -----------------------------------------------------\n\/\/ http:\/\/www.w3.org\/TR\/css3-syntax\/#tokenization\n\nvar macroRegexp = regexp.MustCompile(`\\{[a-z]+\\}`)\n\n\/\/ macros maps macro names to patterns to be expanded.\nvar macros = map[string]string{\n\t\/\/ must be escaped: `\\.+*?()|[]{}^$`\n\t\"ident\": `-?{nmstart}{nmchar}*`,\n\t\"name\": `{nmchar}+`,\n\t\"nmstart\": `[a-zA-Z_]|{nonascii}|{escape}`,\n\t\"nonascii\": \"[\\u0080-\\uD7FF\\uE000-\\uFFFD\\U00010000-\\U0010FFFF]\",\n\t\"unicode\": `\\\\[0-9a-fA-F]{1,6}{wc}?`,\n\t\"escape\": \"{unicode}|\\\\[\\u0020-\\u007E\\u0080-\\uD7FF\\uE000-\\uFFFD\\U00010000-\\U0010FFFF]\",\n\t\"nmchar\": `[a-zA-Z0-9_-]|{nonascii}|{escape}`,\n\t\"num\": `[0-9]+|[0-9]*\\.[0-9]+`,\n\t\"string\": `\"(?:{stringchar}|')*\"|'(?:{stringchar}|\")*'`,\n\t\"stringchar\": `{urlchar}|[ ]|\\\\{nl}`,\n\t\"urlchar\": \"[\\u0009\\u0021\\u0023-\\u0026\\u0027-\\u007E]|{nonascii}|{escape}\",\n\t\"nl\": `[\\n\\r\\f]|\\r\\n`,\n\t\"w\": `{wc}*`,\n\t\"wc\": `[\\t\\n\\f\\r ]`,\n}\n\n\/\/ productions maps the list of tokens to patterns to be expanded.\nvar productions = map[tokenType]string{\n\tTokenIdent: `{ident}`,\n\tTokenAtKeyword: `@{ident}`,\n\tTokenString: `{string}`,\n\tTokenHash: `#{name}`,\n\tTokenNumber: `{num}`,\n\tTokenPercentage: `{num}%`,\n\tTokenDimension: `{num}{ident}`,\n\tTokenURI: `url\\({w}(?:{string}|{urlchar}*){w}\\)`,\n\tTokenUnicodeRange: `U\\+[0-9A-F\\?]{1,6}(?:-[0-9A-F]{1,6})?`,\n\tTokenCDO: `<!--`,\n\tTokenCDC: `-->`,\n\tTokenS: `{wc}+`,\n\tTokenComment: `\/\\*[^\\*]*[\\*]+(?:[^\/][^\\*]*[\\*]+)*\/`,\n\tTokenFunction: `{ident}\\(`,\n\tTokenIncludes: `~=`,\n\tTokenDashMatch: `\\|=`,\n\tTokenPrefixMatch: `\\^=`,\n\tTokenSuffixMatch: `\\$=`,\n\tTokenSubstringMatch: `\\*=`,\n\tTokenChar: `[^\"']`,\n\tTokenBOM: \"\\uFEFF\",\n}\n\n\/\/ matchers maps the list of tokens to compiled regular expressions.\n\/\/\n\/\/ The map is filled on init() using the macros and productions defined in\n\/\/ the CSS specification.\nvar matchers = map[tokenType]*regexp.Regexp{}\n\nvar matchOrder = []tokenType{\n\t\/\/ The ones scanned using first-char shortcut are commented out.\n\t\/\/TokenS,\n\tTokenURI,\n\tTokenFunction,\n\tTokenUnicodeRange,\n\tTokenIdent,\n\tTokenDimension,\n\tTokenPercentage,\n\tTokenNumber,\n\t\/\/TokenHash,\n\tTokenComment,\n\t\/\/TokenString,\n\tTokenAtKeyword,\n\t\/\/TokenIncludes,\n\t\/\/TokenDashMatch,\n\t\/\/TokenPrefixMatch,\n\t\/\/TokenSuffixMatch,\n\t\/\/TokenSubstringMatch,\n\t\/\/TokenCDO,\n\tTokenCDC,\n\tTokenChar,\n\t\/\/TokenBOM\n}\n\nfunc init() {\n\t\/\/ replace macros and compile regexps for productions.\n\treplaceMacro := func(s string) string {\n\t\treturn \"(?:\" + macros[s[1:len(s)-1]] + \")\"\n\t}\n\tfor t, s := range productions {\n\t\tfor macroRegexp.MatchString(s) {\n\t\t\ts = macroRegexp.ReplaceAllStringFunc(s, replaceMacro)\n\t\t}\n\t\tmatchers[t] = regexp.MustCompile(\"^(?:\" + s + \")\")\n\t}\n}\n\n\/\/ Scanner --------------------------------------------------------------------\n\n\/\/ New returns a new CSS scanner for the given input.\nfunc New(input string) *Scanner {\n\t\/\/ Normalize newlines.\n\tinput = strings.Replace(input, \"\\r\\n\", \"\\n\", -1)\n\treturn &Scanner{\n\t\tinput: input,\n\t\tline: 1,\n\t\tcolumn: 1,\n\t}\n}\n\n\/\/ Scanner scans an input and emits tokens following the CSS3 specification.\ntype Scanner struct {\n\tinput string\n\tpos int\n\tline int\n\tcolumn int\n\tlast *Token\n}\n\n\/\/ Next returns the next token from the input.\n\/\/\n\/\/ At the end of the input the token type is TokenEOF.\n\/\/\n\/\/ If the input can't be tokenized the token type is TokenError. This occurs\n\/\/ in case of unclosed quotation marks or comments.\nfunc (s *Scanner) Next() *Token {\n\tif s.last != nil {\n\t\treturn s.last\n\t}\n\tif s.pos >= len(s.input) {\n\t\ts.last = &Token{TokenEOF, \"\", -1, -1}\n\t\treturn s.last\n\t}\n\tinput := s.input[s.pos:]\n\tif s.pos == 0 {\n\t\t\/\/ Test BOM only at the beginning of the file.\n\t\tif strings.HasPrefix(input, \"\\uFEFF\") {\n\t\t\treturn s.emitToken(TokenBOM, \"\\uFEFF\")\n\t\t}\n\t}\n\t\/\/ There's a lot we can guess based on the current rune so we'll take this\n\t\/\/ shortcut before testing multiple regexp's.\n\tr := input[0]\n\tswitch r {\n\tcase '\\t', '\\n', '\\f', '\\r', ' ':\n\t\t\/\/ Whitespace.\n\t\treturn s.emitToken(TokenS, matchers[TokenS].FindString(input))\n\tcase '.':\n\t\t\/\/ Dot is too common to not have a quick check.\n\t\t\/\/ We'll test if this is a Char; if it is followed by a number it is a\n\t\t\/\/ dimension\/percentage\/number, and this will be matched later.\n\t\tif len(input) > 1 && !unicode.IsDigit(rune(input[1])) {\n\t\t\treturn s.emitToken(TokenChar, \".\")\n\t\t}\n\tcase '#':\n\t\t\/\/ Hash is also a common one. If the regexp doesn't match it is a Char.\n\t\tif match := matchers[TokenHash].FindString(input); match != \"\" {\n\t\t\treturn s.emitToken(TokenHash, match)\n\t\t}\n\t\treturn s.emitToken(TokenChar, \"#\")\n\tcase ':', ',', ';', '%', '&', '+', '=', '>', '(', ')', '[', ']', '{', '}':\n\t\t\/\/ Other common chars.\n\t\treturn s.emitToken(TokenChar, string(r))\n\tcase '\"', '\\'':\n\t\t\/\/ String or error.\n\t\tmatch := matchers[TokenString].FindString(input)\n\t\tif match != \"\" {\n\t\t\treturn s.emitToken(TokenString, match)\n\t\t} else {\n\t\t\ts.last = s.emitToken(TokenError, \"unclosed quotation mark\")\n\t\t\treturn s.last\n\t\t}\n\tcase '~':\n\t\t\/\/ Includes or Char.\n\t\treturn s.emitPrefixOrChar(TokenIncludes, \"~=\")\n\tcase '|':\n\t\t\/\/ DashMatch or Char.\n\t\treturn s.emitPrefixOrChar(TokenDashMatch, \"|=\")\n\tcase '^':\n\t\t\/\/ PrefixMatch or Char.\n\t\treturn s.emitPrefixOrChar(TokenPrefixMatch, \"^=\")\n\tcase '$':\n\t\t\/\/ SuffixMatch or Char.\n\t\treturn s.emitPrefixOrChar(TokenSuffixMatch, \"$=\")\n\tcase '*':\n\t\t\/\/ SubstringMatch or Char.\n\t\treturn s.emitPrefixOrChar(TokenSubstringMatch, \"*=\")\n\tcase '<':\n\t\t\/\/ CDO or Char.\n\t\treturn s.emitPrefixOrChar(TokenCDO, \"<!--\")\n\t}\n\t\/\/ Test all regexps, in order.\n\tfor _, token := range matchOrder {\n\t\tif match := matchers[token].FindString(input); match != \"\" {\n\t\t\treturn s.emitToken(token, match)\n\t\t}\n\t}\n\ts.last = s.emitToken(TokenError, \"impossible to tokenize\")\n\treturn s.last\n}\n\n\/\/ updatePosition updates input coordinates based on the consumed text.\nfunc (s *Scanner) updatePosition(text string) {\n\tcount := utf8.RuneCountInString(text)\n\tlines := strings.Count(text, \"\\n\")\n\ts.line += lines\n\tif lines == 0 {\n\t\ts.column += count\n\t} else {\n\t\ts.column = utf8.RuneCountInString(text[strings.LastIndex(text, \"\\n\"):])\n\t}\n\ts.pos += count\n}\n\n\/\/ emitToken returns a Token for the string v and updates the scanner position.\nfunc (s *Scanner) emitToken(t tokenType, v string) *Token {\n\ttoken := &Token{t, v, s.line, s.column}\n\ts.updatePosition(v)\n\treturn token\n}\n\n\/\/ emitPrefixOrChar returns a Token for type t if the current position\n\/\/ matches the given prefix. Otherwise it returns a Char token using the\n\/\/ first character from the prefix.\nfunc (s *Scanner) emitPrefixOrChar(t tokenType, prefix string) *Token {\n\tif strings.HasPrefix(s.input[s.pos:], prefix) {\n\t\treturn s.emitToken(t, prefix)\n\t}\n\treturn s.emitToken(TokenChar, string(prefix[0]))\n}\n<commit_msg>css\/scanner: added first-char shortcut for Comment and AtKeyword.<commit_after>\/\/ Copyright 2012 The Gorilla Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage scanner\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\t\"unicode\"\n\t\"unicode\/utf8\"\n)\n\n\/\/ tokenType identifies the type of lexical tokens.\ntype tokenType int\n\n\/\/ String returns a string representation of the token type.\nfunc (t tokenType) String() string {\n\treturn tokenNames[t]\n}\n\n\/\/ Token represents a token and the corresponding string.\ntype Token struct {\n\tType tokenType\n\tValue string\n\tLine int\n\tColumn int\n}\n\n\/\/ String returns a string representation of the token.\nfunc (t *Token) String() string {\n\tif len(t.Value) > 10 {\n\t\treturn fmt.Sprintf(\"%s (line: %d, column: %d): %.10q...\",\n\t\t\tt.Type, t.Line, t.Column, t.Value)\n\t}\n\treturn fmt.Sprintf(\"%s (line: %d, column: %d): %q\",\n\t\tt.Type, t.Line, t.Column, t.Value)\n}\n\n\/\/ All tokens -----------------------------------------------------------------\n\n\/\/ The complete list of tokens in CSS3.\nconst (\n\t\/\/ Scanner flags.\n\tTokenError tokenType = iota\n\tTokenEOF\n\t\/\/ From now on, only tokens from the CSS specification.\n\tTokenIdent\n\tTokenAtKeyword\n\tTokenString\n\tTokenHash\n\tTokenNumber\n\tTokenPercentage\n\tTokenDimension\n\tTokenURI\n\tTokenUnicodeRange\n\tTokenCDO\n\tTokenCDC\n\tTokenS\n\tTokenComment\n\tTokenFunction\n\tTokenIncludes\n\tTokenDashMatch\n\tTokenPrefixMatch\n\tTokenSuffixMatch\n\tTokenSubstringMatch\n\tTokenChar\n\tTokenBOM\n)\n\n\/\/ tokenNames maps tokenType's to their names. Used for conversion to string.\nvar tokenNames = map[tokenType]string{\n\tTokenError: \"error\",\n\tTokenEOF: \"EOF\",\n\tTokenIdent: \"IDENT\",\n\tTokenAtKeyword: \"ATKEYWORD\",\n\tTokenString: \"STRING\",\n\tTokenHash: \"HASH\",\n\tTokenNumber: \"NUMBER\",\n\tTokenPercentage: \"PERCENTAGE\",\n\tTokenDimension: \"DIMENSION\",\n\tTokenURI: \"URI\",\n\tTokenUnicodeRange: \"UNICODE-RANGE\",\n\tTokenCDO: \"CDO\",\n\tTokenCDC: \"CDC\",\n\tTokenS: \"S\",\n\tTokenComment: \"COMMENT\",\n\tTokenFunction: \"FUNCTION\",\n\tTokenIncludes: \"INCLUDES\",\n\tTokenDashMatch: \"DASHMATCH\",\n\tTokenPrefixMatch: \"PREFIXMATCH\",\n\tTokenSuffixMatch: \"SUFFIXMATCH\",\n\tTokenSubstringMatch: \"SUBSTRINGMATCH\",\n\tTokenChar: \"CHAR\",\n\tTokenBOM: \"BOM\",\n}\n\n\/\/ Macros and productions -----------------------------------------------------\n\/\/ http:\/\/www.w3.org\/TR\/css3-syntax\/#tokenization\n\nvar macroRegexp = regexp.MustCompile(`\\{[a-z]+\\}`)\n\n\/\/ macros maps macro names to patterns to be expanded.\nvar macros = map[string]string{\n\t\/\/ must be escaped: `\\.+*?()|[]{}^$`\n\t\"ident\": `-?{nmstart}{nmchar}*`,\n\t\"name\": `{nmchar}+`,\n\t\"nmstart\": `[a-zA-Z_]|{nonascii}|{escape}`,\n\t\"nonascii\": \"[\\u0080-\\uD7FF\\uE000-\\uFFFD\\U00010000-\\U0010FFFF]\",\n\t\"unicode\": `\\\\[0-9a-fA-F]{1,6}{wc}?`,\n\t\"escape\": \"{unicode}|\\\\[\\u0020-\\u007E\\u0080-\\uD7FF\\uE000-\\uFFFD\\U00010000-\\U0010FFFF]\",\n\t\"nmchar\": `[a-zA-Z0-9_-]|{nonascii}|{escape}`,\n\t\"num\": `[0-9]+|[0-9]*\\.[0-9]+`,\n\t\"string\": `\"(?:{stringchar}|')*\"|'(?:{stringchar}|\")*'`,\n\t\"stringchar\": `{urlchar}|[ ]|\\\\{nl}`,\n\t\"urlchar\": \"[\\u0009\\u0021\\u0023-\\u0026\\u0027-\\u007E]|{nonascii}|{escape}\",\n\t\"nl\": `[\\n\\r\\f]|\\r\\n`,\n\t\"w\": `{wc}*`,\n\t\"wc\": `[\\t\\n\\f\\r ]`,\n}\n\n\/\/ productions maps the list of tokens to patterns to be expanded.\nvar productions = map[tokenType]string{\n\tTokenIdent: `{ident}`,\n\tTokenAtKeyword: `@{ident}`,\n\tTokenString: `{string}`,\n\tTokenHash: `#{name}`,\n\tTokenNumber: `{num}`,\n\tTokenPercentage: `{num}%`,\n\tTokenDimension: `{num}{ident}`,\n\tTokenURI: `url\\({w}(?:{string}|{urlchar}*){w}\\)`,\n\tTokenUnicodeRange: `U\\+[0-9A-F\\?]{1,6}(?:-[0-9A-F]{1,6})?`,\n\tTokenCDO: `<!--`,\n\tTokenCDC: `-->`,\n\tTokenS: `{wc}+`,\n\tTokenComment: `\/\\*[^\\*]*[\\*]+(?:[^\/][^\\*]*[\\*]+)*\/`,\n\tTokenFunction: `{ident}\\(`,\n\tTokenIncludes: `~=`,\n\tTokenDashMatch: `\\|=`,\n\tTokenPrefixMatch: `\\^=`,\n\tTokenSuffixMatch: `\\$=`,\n\tTokenSubstringMatch: `\\*=`,\n\tTokenChar: `[^\"']`,\n\tTokenBOM: \"\\uFEFF\",\n}\n\n\/\/ matchers maps the list of tokens to compiled regular expressions.\n\/\/\n\/\/ The map is filled on init() using the macros and productions defined in\n\/\/ the CSS specification.\nvar matchers = map[tokenType]*regexp.Regexp{}\n\n\/\/ matchOrder is the order to test regexps when first-char shortcuts\n\/\/ can't be used.\nvar matchOrder = []tokenType{\n\tTokenURI,\n\tTokenFunction,\n\tTokenUnicodeRange,\n\tTokenIdent,\n\tTokenDimension,\n\tTokenPercentage,\n\tTokenNumber,\n\tTokenCDC,\n\tTokenChar,\n}\n\nfunc init() {\n\t\/\/ replace macros and compile regexps for productions.\n\treplaceMacro := func(s string) string {\n\t\treturn \"(?:\" + macros[s[1:len(s)-1]] + \")\"\n\t}\n\tfor t, s := range productions {\n\t\tfor macroRegexp.MatchString(s) {\n\t\t\ts = macroRegexp.ReplaceAllStringFunc(s, replaceMacro)\n\t\t}\n\t\tmatchers[t] = regexp.MustCompile(\"^(?:\" + s + \")\")\n\t}\n}\n\n\/\/ Scanner --------------------------------------------------------------------\n\n\/\/ New returns a new CSS scanner for the given input.\nfunc New(input string) *Scanner {\n\t\/\/ Normalize newlines.\n\tinput = strings.Replace(input, \"\\r\\n\", \"\\n\", -1)\n\treturn &Scanner{\n\t\tinput: input,\n\t\tline: 1,\n\t\tcolumn: 1,\n\t}\n}\n\n\/\/ Scanner scans an input and emits tokens following the CSS3 specification.\ntype Scanner struct {\n\tinput string\n\tpos int\n\tline int\n\tcolumn int\n\tlast *Token\n}\n\n\/\/ Next returns the next token from the input.\n\/\/\n\/\/ At the end of the input the token type is TokenEOF.\n\/\/\n\/\/ If the input can't be tokenized the token type is TokenError. This occurs\n\/\/ in case of unclosed quotation marks or comments.\nfunc (s *Scanner) Next() *Token {\n\tif s.last != nil {\n\t\treturn s.last\n\t}\n\tif s.pos >= len(s.input) {\n\t\ts.last = &Token{TokenEOF, \"\", -1, -1}\n\t\treturn s.last\n\t}\n\tinput := s.input[s.pos:]\n\tif s.pos == 0 {\n\t\t\/\/ Test BOM only at the beginning of the file.\n\t\tif strings.HasPrefix(input, \"\\uFEFF\") {\n\t\t\treturn s.emitToken(TokenBOM, \"\\uFEFF\")\n\t\t}\n\t}\n\t\/\/ There's a lot we can guess based on the current rune so we'll take this\n\t\/\/ shortcut before testing multiple regexps.\n\tr := input[0]\n\tswitch r {\n\tcase '\\t', '\\n', '\\f', '\\r', ' ':\n\t\t\/\/ Whitespace.\n\t\treturn s.emitToken(TokenS, matchers[TokenS].FindString(input))\n\tcase '.':\n\t\t\/\/ Dot is too common to not have a quick check.\n\t\t\/\/ We'll test if this is a Char; if it is followed by a number it is a\n\t\t\/\/ dimension\/percentage\/number, and this will be matched later.\n\t\tif len(input) > 1 && !unicode.IsDigit(rune(input[1])) {\n\t\t\treturn s.emitToken(TokenChar, \".\")\n\t\t}\n\tcase '#':\n\t\t\/\/ Hash is also a common one. If the regexp doesn't match it is a Char.\n\t\tif match := matchers[TokenHash].FindString(input); match != \"\" {\n\t\t\treturn s.emitToken(TokenHash, match)\n\t\t}\n\t\treturn s.emitToken(TokenChar, \"#\")\n\tcase '@':\n\t\t\/\/ Another common one. If the regexp doesn't match it is a Char.\n\t\tif match := matchers[TokenAtKeyword].FindString(input); match != \"\" {\n\t\t\treturn s.emitToken(TokenAtKeyword, match)\n\t\t}\n\t\treturn s.emitToken(TokenChar, \"@\")\n\tcase ':', ',', ';', '%', '&', '+', '=', '>', '(', ')', '[', ']', '{', '}':\n\t\t\/\/ More common chars.\n\t\treturn s.emitToken(TokenChar, string(r))\n\tcase '\"', '\\'':\n\t\t\/\/ String or error.\n\t\tmatch := matchers[TokenString].FindString(input)\n\t\tif match != \"\" {\n\t\t\treturn s.emitToken(TokenString, match)\n\t\t} else {\n\t\t\ts.last = s.emitToken(TokenError, \"unclosed quotation mark\")\n\t\t\treturn s.last\n\t\t}\n\tcase '\/':\n\t\tif len(input) > 1 && input[1] == '*' {\n\t\t\t\/\/ Comment or error.\n\t\t\tmatch := matchers[TokenComment].FindString(input)\n\t\t\tif match != \"\" {\n\t\t\t\treturn s.emitToken(TokenComment, match)\n\t\t\t} else {\n\t\t\t\ts.last = s.emitToken(TokenError, \"unclosed comment\")\n\t\t\t\treturn s.last\n\t\t\t}\n\t\t}\n\t\t\/\/ A simple char.\n\t\treturn s.emitToken(TokenChar, \"\/\")\n\tcase '~':\n\t\t\/\/ Includes or Char.\n\t\treturn s.emitPrefixOrChar(TokenIncludes, \"~=\")\n\tcase '|':\n\t\t\/\/ DashMatch or Char.\n\t\treturn s.emitPrefixOrChar(TokenDashMatch, \"|=\")\n\tcase '^':\n\t\t\/\/ PrefixMatch or Char.\n\t\treturn s.emitPrefixOrChar(TokenPrefixMatch, \"^=\")\n\tcase '$':\n\t\t\/\/ SuffixMatch or Char.\n\t\treturn s.emitPrefixOrChar(TokenSuffixMatch, \"$=\")\n\tcase '*':\n\t\t\/\/ SubstringMatch or Char.\n\t\treturn s.emitPrefixOrChar(TokenSubstringMatch, \"*=\")\n\tcase '<':\n\t\t\/\/ CDO or Char.\n\t\treturn s.emitPrefixOrChar(TokenCDO, \"<!--\")\n\t}\n\t\/\/ Test all regexps, in order.\n\tfor _, token := range matchOrder {\n\t\tif match := matchers[token].FindString(input); match != \"\" {\n\t\t\treturn s.emitToken(token, match)\n\t\t}\n\t}\n\ts.last = s.emitToken(TokenError, \"impossible to tokenize\")\n\treturn s.last\n}\n\n\/\/ updatePosition updates input coordinates based on the consumed text.\nfunc (s *Scanner) updatePosition(text string) {\n\tcount := utf8.RuneCountInString(text)\n\tlines := strings.Count(text, \"\\n\")\n\ts.line += lines\n\tif lines == 0 {\n\t\ts.column += count\n\t} else {\n\t\ts.column = utf8.RuneCountInString(text[strings.LastIndex(text, \"\\n\"):])\n\t}\n\ts.pos += count\n}\n\n\/\/ emitToken returns a Token for the string v and updates the scanner position.\nfunc (s *Scanner) emitToken(t tokenType, v string) *Token {\n\ttoken := &Token{t, v, s.line, s.column}\n\ts.updatePosition(v)\n\treturn token\n}\n\n\/\/ emitPrefixOrChar returns a Token for type t if the current position\n\/\/ matches the given prefix. Otherwise it returns a Char token using the\n\/\/ first character from the prefix.\nfunc (s *Scanner) emitPrefixOrChar(t tokenType, prefix string) *Token {\n\tif strings.HasPrefix(s.input[s.pos:], prefix) {\n\t\treturn s.emitToken(t, prefix)\n\t}\n\treturn s.emitToken(TokenChar, string(prefix[0]))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build linux netbsd openbsd dragonfly nacl\n\npackage os\n\nimport (\n\t\"errors\"\n\t\"runtime\"\n)\n\n\/\/ We query the executable path at init time to avoid the problem of\n\/\/ readlink returns a path appended with \" (deleted)\" when the original\n\/\/ binary gets deleted.\nvar executablePath, executablePathErr = func () (string, error) {\n\tvar procfn string\n\tswitch runtime.GOOS {\n\tdefault:\n\t\treturn \"\", errors.New(\"Executable not implemented for \" + runtime.GOOS)\n\tcase \"linux\":\n\t\tprocfn = \"\/proc\/self\/exe\"\n\tcase \"netbsd\":\n\t\tprocfn = \"\/proc\/curproc\/exe\"\n\tcase \"openbsd\":\n\t\tprocfn = \"\/proc\/curproc\/file\"\n\tcase \"dragonfly\":\n\t\tprocfn = \"\/proc\/curproc\/file\"\n\t}\n\treturn Readlink(procfn)\n}()\n\nfunc executable() (string, error) {\n\treturn executablePath, executablePathErr\n}\n<commit_msg>os: gofmt -w -s<commit_after>\/\/ Copyright 2016 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build linux netbsd openbsd dragonfly nacl\n\npackage os\n\nimport (\n\t\"errors\"\n\t\"runtime\"\n)\n\n\/\/ We query the executable path at init time to avoid the problem of\n\/\/ readlink returns a path appended with \" (deleted)\" when the original\n\/\/ binary gets deleted.\nvar executablePath, executablePathErr = func() (string, error) {\n\tvar procfn string\n\tswitch runtime.GOOS {\n\tdefault:\n\t\treturn \"\", errors.New(\"Executable not implemented for \" + runtime.GOOS)\n\tcase \"linux\":\n\t\tprocfn = \"\/proc\/self\/exe\"\n\tcase \"netbsd\":\n\t\tprocfn = \"\/proc\/curproc\/exe\"\n\tcase \"openbsd\":\n\t\tprocfn = \"\/proc\/curproc\/file\"\n\tcase \"dragonfly\":\n\t\tprocfn = \"\/proc\/curproc\/file\"\n\t}\n\treturn Readlink(procfn)\n}()\n\nfunc executable() (string, error) {\n\treturn executablePath, executablePathErr\n}\n<|endoftext|>"} {"text":"<commit_before>package generator\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"path\/filepath\"\n\n\ttl \"github.com\/xlab\/c-for-go\/translator\"\n)\n\nfunc (gen *Generator) writeDefinesGroup(wr io.Writer, defines []*tl.CDecl) (n int) {\n\twriteStartConst(wr)\n\tfor _, decl := range defines {\n\t\tif !decl.IsDefine {\n\t\t\tcontinue\n\t\t}\n\t\tname := gen.tr.TransformName(tl.TargetConst, decl.Name)\n\t\tif decl.Value == nil && string(name) == decl.Expression {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(wr, \"\/\/ %s as defined in %s\\n\", name,\n\t\t\tfilepath.ToSlash(gen.tr.SrcLocation(tl.TargetConst, decl.Name, decl.Pos)))\n\n\t\tif decl.Value != nil {\n\t\t\tfmt.Fprintf(wr, \"%s = %v\", name, decl.Value)\n\t\t} else if len(decl.Expression) > 0 {\n\t\t\tfmt.Fprintf(wr, \"%s = %s\", name, decl.Expression)\n\t\t} else {\n\t\t\tfmt.Fprint(wr, name)\n\t\t}\n\t\twriteSpace(wr, 1)\n\t\tn++\n\t}\n\twriteEndConst(wr)\n\treturn\n}\n\nfunc (gen *Generator) writeConstDeclaration(wr io.Writer, decl *tl.CDecl) {\n\tdeclName := gen.tr.TransformName(tl.TargetConst, decl.Name)\n\tif decl.Value == nil && string(declName) == decl.Expression {\n\t\treturn\n\t}\n\tfmt.Fprintf(wr, \"\/\/ %s as declared in %s\\n\", declName,\n\t\tfilepath.ToSlash(gen.tr.SrcLocation(tl.TargetConst, decl.Name, decl.Pos)))\n\tgoSpec := gen.tr.TranslateSpec(decl.Spec)\n\n\tif decl.Value != nil {\n\t\tfmt.Fprintf(wr, \"const %s %s = %v\", declName, goSpec, decl.Value)\n\t\treturn\n\t} else if len(decl.Expression) > 0 {\n\t\tfmt.Fprintf(wr, \"const %s %s = %s\", declName, goSpec, decl.Expression)\n\t\treturn\n\t}\n\t\/\/ const must have values, otherwise variable\n\tfmt.Fprintf(wr, \"var %s %s\", declName, goSpec)\n}\n\nfunc (gen *Generator) expandEnumAnonymous(wr io.Writer, decl *tl.CDecl, namesSeen map[string]bool) {\n\tvar typeName []byte\n\tvar hasType bool\n\tif decl.IsTypedef {\n\t\tif typeName = gen.tr.TransformName(tl.TargetType, decl.Name); len(typeName) > 0 {\n\t\t\thasType = true\n\t\t}\n\t}\n\n\tspec := decl.Spec.(*tl.CEnumSpec)\n\tif hasType {\n\t\tenumType := gen.tr.TranslateSpec(&spec.Type)\n\t\tfmt.Fprintf(wr, \"\/\/ %s as declared in %s\\n\", typeName,\n\t\t\tfilepath.ToSlash(gen.tr.SrcLocation(tl.TargetConst, decl.Name, decl.Pos)))\n\t\tfmt.Fprintf(wr, \"type %s %s\\n\", typeName, enumType)\n\t\twriteSpace(wr, 1)\n\t\tfmt.Fprintf(wr, \"\/\/ %s enumeration from %s\\n\", typeName,\n\t\t\tfilepath.ToSlash(gen.tr.SrcLocation(tl.TargetConst, decl.Name, decl.Pos)))\n\t}\n\twriteStartConst(wr)\n\tfor i, m := range spec.Members {\n\t\tif !gen.tr.IsAcceptableName(tl.TargetConst, m.Name) {\n\t\t\tcontinue\n\t\t}\n\t\tmName := gen.tr.TransformName(tl.TargetConst, m.Name)\n\t\tif len(mName) == 0 {\n\t\t\tcontinue\n\t\t} else if namesSeen[string(mName)] {\n\t\t\tcontinue\n\t\t} else {\n\t\t\tnamesSeen[string(mName)] = true\n\t\t}\n\t\tif !hasType {\n\t\t\tfmt.Fprintf(wr, \"\/\/ %s as declared in %s\\n\", mName,\n\t\t\t\tfilepath.ToSlash(gen.tr.SrcLocation(tl.TargetConst, m.Name, m.Pos)))\n\t\t}\n\t\tswitch {\n\t\tcase m.Value != nil:\n\t\t\tif hasType {\n\t\t\t\tfmt.Fprintf(wr, \"%s %s = %s\\n\", mName, typeName, iotaOnZero(i, m.Value))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfmt.Fprintf(wr, \"%s = %s\\n\", mName, iotaOnZero(i, m.Value))\n\t\tcase len(m.Expression) > 0:\n\t\t\tif hasType {\n\t\t\t\tfmt.Fprintf(wr, \"%s %s = %s\\n\", mName, typeName, iotaOnZero(i, m.Expression))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfmt.Fprintf(wr, \"%s = %s\\n\", mName, iotaOnZero(i, m.Expression))\n\t\tdefault:\n\t\t\tif i == 0 && hasType {\n\t\t\t\tfmt.Fprintf(wr, \"%s %s = iota\\n\", mName, typeName)\n\t\t\t\tcontinue\n\t\t\t} else if i == 0 {\n\t\t\t\tfmt.Fprintf(wr, \"%s = iota\\n\", mName)\n\t\t\t}\n\t\t\tfmt.Fprintf(wr, \"%s\\n\", mName)\n\t\t}\n\t}\n\twriteEndConst(wr)\n\twriteSpace(wr, 1)\n}\n\nfunc (gen *Generator) expandEnum(wr io.Writer, decl *tl.CDecl, namesSeen map[string]bool) {\n\tvar declName []byte\n\tvar isTypedef bool\n\tif decl.IsTypedef {\n\t\tif declName = gen.tr.TransformName(tl.TargetType, decl.Name); len(declName) > 0 {\n\t\t\tisTypedef = true\n\t\t}\n\t}\n\n\tspec := decl.Spec.(*tl.CEnumSpec)\n\ttagName := gen.tr.TransformName(tl.TargetType, decl.Spec.GetBase())\n\tenumType := gen.tr.TranslateSpec(&spec.Type)\n\tfmt.Fprintf(wr, \"\/\/ %s as declared in %s\\n\", tagName,\n\t\tfilepath.ToSlash(gen.tr.SrcLocation(tl.TargetConst, decl.Name, decl.Pos)))\n\tfmt.Fprintf(wr, \"type %s %s\\n\", tagName, enumType)\n\twriteSpace(wr, 1)\n\tif isTypedef {\n\t\tif !bytes.Equal(tagName, declName) && len(declName) > 0 {\n\t\t\t\/\/ alias type decl name to the tag\n\t\t\tfmt.Fprintf(wr, \"\/\/ %s as declared in %s\\n\", declName,\n\t\t\t\tfilepath.ToSlash(gen.tr.SrcLocation(tl.TargetConst, decl.Name, decl.Pos)))\n\t\t\tfmt.Fprintf(wr, \"type %s %s\", declName, tagName)\n\t\t\twriteSpace(wr, 1)\n\t\t}\n\t}\n\n\tfmt.Fprintf(wr, \"\/\/ %s enumeration from %s\\n\", tagName,\n\t\tfilepath.ToSlash(gen.tr.SrcLocation(tl.TargetConst, decl.Name, decl.Pos)))\n\twriteStartConst(wr)\n\tfor i, m := range spec.Members {\n\t\tif !gen.tr.IsAcceptableName(tl.TargetConst, m.Name) {\n\t\t\tcontinue\n\t\t}\n\t\tmName := gen.tr.TransformName(tl.TargetConst, m.Name)\n\t\tif len(mName) == 0 {\n\t\t\tcontinue\n\t\t} else if namesSeen[string(mName)] {\n\t\t\tcontinue\n\t\t} else {\n\t\t\tnamesSeen[string(mName)] = true\n\t\t}\n\t\tswitch {\n\t\tcase m.Value != nil:\n\t\t\tfmt.Fprintf(wr, \"%s %s = %v\\n\", mName, declName, iotaOnZero(i, m.Value))\n\t\tcase len(m.Expression) > 0:\n\t\t\tfmt.Fprintf(wr, \"%s %s = %v\\n\", mName, declName, iotaOnZero(i, m.Expression))\n\t\tdefault:\n\t\t\tif i == 0 {\n\t\t\t\tfmt.Fprintf(wr, \"%s %s = iota\\n\", mName, declName)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfmt.Fprintf(wr, \"%s\\n\", mName)\n\t\t}\n\t}\n\twriteEndConst(wr)\n\twriteSpace(wr, 1)\n}\n\nfunc iotaOnZero(i int, v interface{}) string {\n\tresult := fmt.Sprintf(\"%v\", v)\n\tif i == 0 {\n\t\tif result == \"0\" {\n\t\t\treturn \"iota\"\n\t\t}\n\t}\n\treturn result\n}\n\nfunc writeStartConst(wr io.Writer) {\n\tfmt.Fprintln(wr, \"const (\")\n}\n\nfunc writeEndConst(wr io.Writer) {\n\tfmt.Fprintln(wr, \")\")\n}\n<commit_msg>Fix parse error for empty constants<commit_after>package generator\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"path\/filepath\"\n\n\ttl \"github.com\/xlab\/c-for-go\/translator\"\n)\n\nfunc (gen *Generator) writeDefinesGroup(wr io.Writer, defines []*tl.CDecl) (n int) {\n\twriteStartConst(wr)\n\tfor _, decl := range defines {\n\t\tif !decl.IsDefine {\n\t\t\tcontinue\n\t\t}\n\t\tname := gen.tr.TransformName(tl.TargetConst, decl.Name)\n\t\tif decl.Value == nil && string(name) == decl.Expression {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(wr, \"\/\/ %s as defined in %s\\n\", name,\n\t\t\tfilepath.ToSlash(gen.tr.SrcLocation(tl.TargetConst, decl.Name, decl.Pos)))\n\n\t\tif decl.Value != nil {\n\t\t\tfmt.Fprintf(wr, \"%s = %v\", name, decl.Value)\n\t\t} else if len(decl.Expression) > 0 {\n\t\t\tfmt.Fprintf(wr, \"%s = %s\", name, decl.Expression)\n\t\t} else {\n\t\t\t\/\/ In this case, it's nil or the expression is zero length.\n\t\t\t\/\/ fmt.Fprint(wr, name)\n\t\t}\n\t\twriteSpace(wr, 1)\n\t\tn++\n\t}\n\twriteEndConst(wr)\n\treturn\n}\n\nfunc (gen *Generator) writeConstDeclaration(wr io.Writer, decl *tl.CDecl) {\n\tdeclName := gen.tr.TransformName(tl.TargetConst, decl.Name)\n\tif decl.Value == nil && string(declName) == decl.Expression {\n\t\treturn\n\t}\n\tfmt.Fprintf(wr, \"\/\/ %s as declared in %s\\n\", declName,\n\t\tfilepath.ToSlash(gen.tr.SrcLocation(tl.TargetConst, decl.Name, decl.Pos)))\n\tgoSpec := gen.tr.TranslateSpec(decl.Spec)\n\n\tif decl.Value != nil {\n\t\tfmt.Fprintf(wr, \"const %s %s = %v\", declName, goSpec, decl.Value)\n\t\treturn\n\t} else if len(decl.Expression) > 0 {\n\t\tfmt.Fprintf(wr, \"const %s %s = %s\", declName, goSpec, decl.Expression)\n\t\treturn\n\t}\n\t\/\/ const must have values, otherwise variable\n\tfmt.Fprintf(wr, \"var %s %s\", declName, goSpec)\n}\n\nfunc (gen *Generator) expandEnumAnonymous(wr io.Writer, decl *tl.CDecl, namesSeen map[string]bool) {\n\tvar typeName []byte\n\tvar hasType bool\n\tif decl.IsTypedef {\n\t\tif typeName = gen.tr.TransformName(tl.TargetType, decl.Name); len(typeName) > 0 {\n\t\t\thasType = true\n\t\t}\n\t}\n\n\tspec := decl.Spec.(*tl.CEnumSpec)\n\tif hasType {\n\t\tenumType := gen.tr.TranslateSpec(&spec.Type)\n\t\tfmt.Fprintf(wr, \"\/\/ %s as declared in %s\\n\", typeName,\n\t\t\tfilepath.ToSlash(gen.tr.SrcLocation(tl.TargetConst, decl.Name, decl.Pos)))\n\t\tfmt.Fprintf(wr, \"type %s %s\\n\", typeName, enumType)\n\t\twriteSpace(wr, 1)\n\t\tfmt.Fprintf(wr, \"\/\/ %s enumeration from %s\\n\", typeName,\n\t\t\tfilepath.ToSlash(gen.tr.SrcLocation(tl.TargetConst, decl.Name, decl.Pos)))\n\t}\n\twriteStartConst(wr)\n\tfor i, m := range spec.Members {\n\t\tif !gen.tr.IsAcceptableName(tl.TargetConst, m.Name) {\n\t\t\tcontinue\n\t\t}\n\t\tmName := gen.tr.TransformName(tl.TargetConst, m.Name)\n\t\tif len(mName) == 0 {\n\t\t\tcontinue\n\t\t} else if namesSeen[string(mName)] {\n\t\t\tcontinue\n\t\t} else {\n\t\t\tnamesSeen[string(mName)] = true\n\t\t}\n\t\tif !hasType {\n\t\t\tfmt.Fprintf(wr, \"\/\/ %s as declared in %s\\n\", mName,\n\t\t\t\tfilepath.ToSlash(gen.tr.SrcLocation(tl.TargetConst, m.Name, m.Pos)))\n\t\t}\n\t\tswitch {\n\t\tcase m.Value != nil:\n\t\t\tif hasType {\n\t\t\t\tfmt.Fprintf(wr, \"%s %s = %s\\n\", mName, typeName, iotaOnZero(i, m.Value))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfmt.Fprintf(wr, \"%s = %s\\n\", mName, iotaOnZero(i, m.Value))\n\t\tcase len(m.Expression) > 0:\n\t\t\tif hasType {\n\t\t\t\tfmt.Fprintf(wr, \"%s %s = %s\\n\", mName, typeName, iotaOnZero(i, m.Expression))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfmt.Fprintf(wr, \"%s = %s\\n\", mName, iotaOnZero(i, m.Expression))\n\t\tdefault:\n\t\t\tif i == 0 && hasType {\n\t\t\t\tfmt.Fprintf(wr, \"%s %s = iota\\n\", mName, typeName)\n\t\t\t\tcontinue\n\t\t\t} else if i == 0 {\n\t\t\t\tfmt.Fprintf(wr, \"%s = iota\\n\", mName)\n\t\t\t}\n\t\t\tfmt.Fprintf(wr, \"%s\\n\", mName)\n\t\t}\n\t}\n\twriteEndConst(wr)\n\twriteSpace(wr, 1)\n}\n\nfunc (gen *Generator) expandEnum(wr io.Writer, decl *tl.CDecl, namesSeen map[string]bool) {\n\tvar declName []byte\n\tvar isTypedef bool\n\tif decl.IsTypedef {\n\t\tif declName = gen.tr.TransformName(tl.TargetType, decl.Name); len(declName) > 0 {\n\t\t\tisTypedef = true\n\t\t}\n\t}\n\n\tspec := decl.Spec.(*tl.CEnumSpec)\n\ttagName := gen.tr.TransformName(tl.TargetType, decl.Spec.GetBase())\n\tenumType := gen.tr.TranslateSpec(&spec.Type)\n\tfmt.Fprintf(wr, \"\/\/ %s as declared in %s\\n\", tagName,\n\t\tfilepath.ToSlash(gen.tr.SrcLocation(tl.TargetConst, decl.Name, decl.Pos)))\n\tfmt.Fprintf(wr, \"type %s %s\\n\", tagName, enumType)\n\twriteSpace(wr, 1)\n\tif isTypedef {\n\t\tif !bytes.Equal(tagName, declName) && len(declName) > 0 {\n\t\t\t\/\/ alias type decl name to the tag\n\t\t\tfmt.Fprintf(wr, \"\/\/ %s as declared in %s\\n\", declName,\n\t\t\t\tfilepath.ToSlash(gen.tr.SrcLocation(tl.TargetConst, decl.Name, decl.Pos)))\n\t\t\tfmt.Fprintf(wr, \"type %s %s\", declName, tagName)\n\t\t\twriteSpace(wr, 1)\n\t\t}\n\t}\n\n\tfmt.Fprintf(wr, \"\/\/ %s enumeration from %s\\n\", tagName,\n\t\tfilepath.ToSlash(gen.tr.SrcLocation(tl.TargetConst, decl.Name, decl.Pos)))\n\twriteStartConst(wr)\n\tfor i, m := range spec.Members {\n\t\tif !gen.tr.IsAcceptableName(tl.TargetConst, m.Name) {\n\t\t\tcontinue\n\t\t}\n\t\tmName := gen.tr.TransformName(tl.TargetConst, m.Name)\n\t\tif len(mName) == 0 {\n\t\t\tcontinue\n\t\t} else if namesSeen[string(mName)] {\n\t\t\tcontinue\n\t\t} else {\n\t\t\tnamesSeen[string(mName)] = true\n\t\t}\n\t\tswitch {\n\t\tcase m.Value != nil:\n\t\t\tfmt.Fprintf(wr, \"%s %s = %v\\n\", mName, declName, iotaOnZero(i, m.Value))\n\t\tcase len(m.Expression) > 0:\n\t\t\tfmt.Fprintf(wr, \"%s %s = %v\\n\", mName, declName, iotaOnZero(i, m.Expression))\n\t\tdefault:\n\t\t\tif i == 0 {\n\t\t\t\tfmt.Fprintf(wr, \"%s %s = iota\\n\", mName, declName)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfmt.Fprintf(wr, \"%s\\n\", mName)\n\t\t}\n\t}\n\twriteEndConst(wr)\n\twriteSpace(wr, 1)\n}\n\nfunc iotaOnZero(i int, v interface{}) string {\n\tresult := fmt.Sprintf(\"%v\", v)\n\tif i == 0 {\n\t\tif result == \"0\" {\n\t\t\treturn \"iota\"\n\t\t}\n\t}\n\treturn result\n}\n\nfunc writeStartConst(wr io.Writer) {\n\tfmt.Fprintln(wr, \"const (\")\n}\n\nfunc writeEndConst(wr io.Writer) {\n\tfmt.Fprintln(wr, \")\")\n}\n<|endoftext|>"} {"text":"<commit_before>package arangolite\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"strings\"\n)\n\n\/\/ Transaction represents an ArangoDB transaction.\ntype Transaction struct {\n\treadCol, writeCol []string\n\tresultVars []string\n\tqueries []Query\n\treturnVar string\n\tbindVars map[string]string\n}\n\n\/\/ NewTransaction returns a new Transaction object.\nfunc NewTransaction(readCol, writeCol []string) *Transaction {\n\tif readCol == nil {\n\t\treadCol = []string{}\n\t}\n\n\tif writeCol == nil {\n\t\twriteCol = []string{}\n\t}\n\n\treturn &Transaction{readCol: readCol, writeCol: writeCol}\n}\n\n\/\/ AddQuery adds a new AQL query to the transaction. The result will be set in\n\/\/ a temp variable named after the value of \"resultVar\".\n\/\/ To use it from elsewhere in the transaction, use the Go templating convention.\n\/\/\n\/\/ e.g. NewTransaction([]string{}, []string{}).\n\/\/ AddQuery(\"var1\", \"FOR d IN documents RETURN d\").\n\/\/ AddQuery(\"var2\", \"FOR d IN {{.var1}} RETURN d._id\").Run(db)\n\/\/\nfunc (t *Transaction) AddQuery(resultVar, aql string, params ...interface{}) *Transaction {\n\tt.resultVars = append(t.resultVars, resultVar)\n\tt.queries = append(t.queries, *NewQuery(toES6Template(aql), params...))\n\treturn t\n}\n\n\/\/ Bind sets the name and value of a bind parameter\n\/\/ Binding parameters prevents AQL injection\n\/\/ Example:\n\/\/ transaction := arangolite.NewTransaction([]string{}, []string{}).\n\/\/ \t\tAddQuery(\"var1\", \"FOR d IN nodes FILTER d._key == @key RETURN d._id\").\n\/\/ \t\tAddQuery(\"var2\", \"FOR n IN nodes FILTER n._id == {{.var1}}[0] RETURN n._key\").Return(\"var2\")\n\/\/ transaction.Bind(\"key\", 123)\n\/\/\nfunc (t *Transaction) Bind(name string, value interface{}) *Transaction {\n\tif t.bindVars == nil {\n\t\tt.bindVars = make(map[string]string)\n\t}\n\tm, _ := json.Marshal(value)\n\tt.bindVars[name] = strings.Replace(string(m), `\"`, \"'\", -1)\n\treturn t\n}\n\n\/\/ Return sets the final \"resultVar\" that is returned at the end of the transaction.\nfunc (t *Transaction) Return(resultVar string) *Transaction {\n\tt.returnVar = resultVar\n\treturn t\n}\n\nfunc (t *Transaction) description() string {\n\treturn \"TRANSACTION\"\n}\n\nfunc (t *Transaction) path() string {\n\treturn \"\/_api\/transaction\"\n}\n\nfunc (t *Transaction) method() string {\n\treturn \"POST\"\n}\n\nfunc (t *Transaction) generate() []byte {\n\ttype TransactionFmt struct {\n\t\tCollections struct {\n\t\t\tRead []string `json:\"read\"`\n\t\t\tWrite []string `json:\"write\"`\n\t\t} `json:\"collections\"`\n\t\tAction string `json:\"action\"`\n\t}\n\n\ttransactionFmt := &TransactionFmt{}\n\ttransactionFmt.Collections.Read = t.readCol\n\ttransactionFmt.Collections.Write = t.writeCol\n\n\tjsFunc := bytes.NewBufferString(\"function () { var db = require('internal').db; \")\n\n\tfor name, value := range t.bindVars {\n\t\tjsFunc.WriteString(\"var \")\n\t\tjsFunc.WriteString(name)\n\t\tjsFunc.WriteString(\" = \")\n\t\tjsFunc.WriteString(value)\n\t\tjsFunc.WriteString(\"; \")\n\t}\n\n\tfor i, query := range t.queries {\n\t\twriteQuery(jsFunc, query.aql, t.resultVars[i])\n\t}\n\n\tif len(t.returnVar) > 0 {\n\t\tjsFunc.WriteString(\"return \")\n\t\tjsFunc.WriteString(t.returnVar)\n\t\tjsFunc.WriteString(\";\")\n\t}\n\n\tjsFunc.WriteRune('}')\n\n\ttransactionFmt.Action = jsFunc.String()\n\tjsonTransaction, _ := json.Marshal(transactionFmt)\n\n\treturn jsonTransaction\n}\n\n\/\/ writeQuery translate a given aql query to bytes\n\/\/ buff the buffer containing the resulting bytes\n\/\/ aql the AQL query\n\/\/ resultVarName the name of the variable that will accept the query result, if any - may be empty\nfunc writeQuery(buff *bytes.Buffer, aql string, resultVarName string) {\n\tif len(resultVarName) > 0 {\n\t\tbuff.WriteString(\"var \")\n\t\tbuff.WriteString(resultVarName)\n\t\tbuff.WriteString(\" = \")\n\t}\n\n\tbuff.WriteString(\"db._query(aqlQuery`\")\n\tbuff.WriteString(aql)\n\tbuff.WriteString(\"`).toArray(); \")\n}\n\nfunc toES6Template(query string) string {\n\tbuf := bytes.NewBuffer(nil)\n\tlookingForEnd := false\n\n\tfor _, b := range query {\n\t\tif lookingForEnd {\n\t\t\tif b == ' ' || b == '\\n' || b == ',' || b == ';' {\n\t\t\t\tlookingForEnd = false\n\t\t\t\tbuf.WriteString(\"} \")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else {\n\t\t\tif b == '@' {\n\t\t\t\tlookingForEnd = true\n\t\t\t\tbuf.WriteString(\"${\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tbuf.WriteRune(b)\n\t}\n\n\tquery = buf.String()\n\n\tquery = strings.Replace(query, \"{{.\", \"${\", -1)\n\treturn strings.Replace(query, \"}}\", \"}\", -1)\n}\n<commit_msg>Transaction bind parameters fixed.<commit_after>package arangolite\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"strings\"\n)\n\n\/\/ Transaction represents an ArangoDB transaction.\ntype Transaction struct {\n\treadCol, writeCol []string\n\tresultVars []string\n\tqueries []Query\n\treturnVar string\n\tbindVars map[string]string\n}\n\n\/\/ NewTransaction returns a new Transaction object.\nfunc NewTransaction(readCol, writeCol []string) *Transaction {\n\tif readCol == nil {\n\t\treadCol = []string{}\n\t}\n\n\tif writeCol == nil {\n\t\twriteCol = []string{}\n\t}\n\n\treturn &Transaction{readCol: readCol, writeCol: writeCol}\n}\n\n\/\/ AddQuery adds a new AQL query to the transaction. The result will be set in\n\/\/ a temp variable named after the value of \"resultVar\".\n\/\/ To use it from elsewhere in the transaction, use the Go templating convention.\n\/\/\n\/\/ e.g. NewTransaction([]string{}, []string{}).\n\/\/ AddQuery(\"var1\", \"FOR d IN documents RETURN d\").\n\/\/ AddQuery(\"var2\", \"FOR d IN {{.var1}} RETURN d._id\").Run(db)\n\/\/\nfunc (t *Transaction) AddQuery(resultVar, aql string, params ...interface{}) *Transaction {\n\tt.resultVars = append(t.resultVars, resultVar)\n\tt.queries = append(t.queries, *NewQuery(toES6Template(aql), params...))\n\treturn t\n}\n\n\/\/ Bind sets the name and value of a bind parameter\n\/\/ Binding parameters prevents AQL injection\n\/\/ Example:\n\/\/ transaction := arangolite.NewTransaction([]string{}, []string{}).\n\/\/ \t\tAddQuery(\"var1\", \"FOR d IN nodes FILTER d._key == @key RETURN d._id\").\n\/\/ \t\tAddQuery(\"var2\", \"FOR n IN nodes FILTER n._id == {{.var1}}[0] RETURN n._key\").Return(\"var2\")\n\/\/ transaction.Bind(\"key\", 123)\n\/\/\nfunc (t *Transaction) Bind(name string, value interface{}) *Transaction {\n\tif t.bindVars == nil {\n\t\tt.bindVars = make(map[string]string)\n\t}\n\tm, _ := json.Marshal(value)\n\tt.bindVars[name] = strings.Replace(string(m), `\"`, \"'\", -1)\n\treturn t\n}\n\n\/\/ Return sets the final \"resultVar\" that is returned at the end of the transaction.\nfunc (t *Transaction) Return(resultVar string) *Transaction {\n\tt.returnVar = resultVar\n\treturn t\n}\n\nfunc (t *Transaction) description() string {\n\treturn \"TRANSACTION\"\n}\n\nfunc (t *Transaction) path() string {\n\treturn \"\/_api\/transaction\"\n}\n\nfunc (t *Transaction) method() string {\n\treturn \"POST\"\n}\n\nfunc (t *Transaction) generate() []byte {\n\ttype TransactionFmt struct {\n\t\tCollections struct {\n\t\t\tRead []string `json:\"read\"`\n\t\t\tWrite []string `json:\"write\"`\n\t\t} `json:\"collections\"`\n\t\tAction string `json:\"action\"`\n\t}\n\n\ttransactionFmt := &TransactionFmt{}\n\ttransactionFmt.Collections.Read = t.readCol\n\ttransactionFmt.Collections.Write = t.writeCol\n\n\tjsFunc := bytes.NewBufferString(\"function () { var db = require('internal').db; \")\n\n\tfor name, value := range t.bindVars {\n\t\tjsFunc.WriteString(\"var \")\n\t\tjsFunc.WriteString(name)\n\t\tjsFunc.WriteString(\" = \")\n\t\tjsFunc.WriteString(value)\n\t\tjsFunc.WriteString(\"; \")\n\t}\n\n\tfor i, query := range t.queries {\n\t\twriteQuery(jsFunc, query.aql, t.resultVars[i])\n\t}\n\n\tif len(t.returnVar) > 0 {\n\t\tjsFunc.WriteString(\"return \")\n\t\tjsFunc.WriteString(t.returnVar)\n\t\tjsFunc.WriteString(\";\")\n\t}\n\n\tjsFunc.WriteRune('}')\n\n\ttransactionFmt.Action = jsFunc.String()\n\tjsonTransaction, _ := json.Marshal(transactionFmt)\n\n\treturn jsonTransaction\n}\n\n\/\/ writeQuery translate a given aql query to bytes\n\/\/ buff the buffer containing the resulting bytes\n\/\/ aql the AQL query\n\/\/ resultVarName the name of the variable that will accept the query result, if any - may be empty\nfunc writeQuery(buff *bytes.Buffer, aql string, resultVarName string) {\n\tif len(resultVarName) > 0 {\n\t\tbuff.WriteString(\"var \")\n\t\tbuff.WriteString(resultVarName)\n\t\tbuff.WriteString(\" = \")\n\t}\n\n\tbuff.WriteString(\"db._query(aqlQuery`\")\n\tbuff.WriteString(aql)\n\tbuff.WriteString(\"`).toArray(); \")\n}\n\nfunc toES6Template(query string) string {\n\tbuf := bytes.NewBuffer(nil)\n\tlookingForEnd := false\n\n\tfor _, b := range query {\n\t\tif lookingForEnd {\n\t\t\tif b == ' ' || b == '\\n' || b == ',' || b == ';' {\n\t\t\t\tlookingForEnd = false\n\t\t\t\tbuf.WriteRune('}')\n\t\t\t\tbuf.WriteRune(b)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t} else {\n\t\t\tif b == '@' {\n\t\t\t\tlookingForEnd = true\n\t\t\t\tbuf.WriteString(\"${\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tbuf.WriteRune(b)\n\t}\n\n\tquery = buf.String()\n\n\tquery = strings.Replace(query, \"{{.\", \"${\", -1)\n\treturn strings.Replace(query, \"}}\", \"}\", -1)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage rand\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"math\/big\"\n)\n\n\/\/ smallPrimes is a list of small, prime numbers that allows us to rapidly\n\/\/ exclude some fraction of composite candidates when searching for a random\n\/\/ prime. This list is truncated at the point where smallPrimesProduct exceeds\n\/\/ a uint64. It does not include two because we ensure that the candidates are\n\/\/ odd by construction.\nvar smallPrimes = []uint8{\n\t3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53,\n}\n\n\/\/ smallPrimesProduct is the product of the values in smallPrimes and allows us\n\/\/ to reduce a candidate prime by this number and then determine whether it's\n\/\/ coprime to all the elements of smallPrimes without further big.Int\n\/\/ operations.\nvar smallPrimesProduct = new(big.Int).SetUint64(16294579238595022365)\n\n\/\/ Prime returns a number, p, of the given size, such that p is prime\n\/\/ with high probability.\nfunc Prime(rand io.Reader, bits int) (p *big.Int, err error) {\n\tif bits < 1 {\n\t\terr = errors.New(\"crypto\/rand: prime size must be positive\")\n\t}\n\n\tb := uint(bits % 8)\n\tif b == 0 {\n\t\tb = 8\n\t}\n\n\tbytes := make([]byte, (bits+7)\/8)\n\tp = new(big.Int)\n\n\tbigMod := new(big.Int)\n\n\tfor {\n\t\t_, err = io.ReadFull(rand, bytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Clear bits in the first byte to make sure the candidate has a size <= bits.\n\t\tbytes[0] &= uint8(int(1<<b) - 1)\n\t\t\/\/ Don't let the value be too small, i.e, set the most significant two bits.\n\t\t\/\/ Setting the top two bits, rather than just the top bit,\n\t\t\/\/ means that when two of these values are multiplied together,\n\t\t\/\/ the result isn't ever one bit short.\n\t\tif b >= 2 {\n\t\t\tbytes[0] |= 3 << (b - 2)\n\t\t} else {\n\t\t\t\/\/ Here b==1, because b cannot be zero.\n\t\t\tbytes[0] |= 1\n\t\t\tif len(bytes) > 1 {\n\t\t\t\tbytes[1] |= 0x80\n\t\t\t}\n\t\t}\n\t\t\/\/ Make the value odd since an even number this large certainly isn't prime.\n\t\tbytes[len(bytes)-1] |= 1\n\n\t\tp.SetBytes(bytes)\n\n\t\t\/\/ Calculate the value mod the product of smallPrimes. If it's\n\t\t\/\/ a multiple of any of these primes we add two until it isn't.\n\t\t\/\/ The probability of overflowing is minimal and can be ignored\n\t\t\/\/ because we still perform Miller-Rabin tests on the result.\n\t\tbigMod.Mod(p, smallPrimesProduct)\n\t\tmod := bigMod.Uint64()\n\n\tNextDelta:\n\t\tfor delta := uint64(0); delta < 1<<20; delta += 2 {\n\t\t\tm := mod + delta\n\t\t\tfor _, prime := range smallPrimes {\n\t\t\t\tif m%uint64(prime) == 0 {\n\t\t\t\t\tcontinue NextDelta\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif delta > 0 {\n\t\t\t\tbigMod.SetUint64(delta)\n\t\t\t\tp.Add(p, bigMod)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ There is a tiny possibility that, by adding delta, we caused\n\t\t\/\/ the number to be one bit too long. Thus we check BitLen\n\t\t\/\/ here.\n\t\tif p.ProbablyPrime(20) && p.BitLen() == bits {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Int returns a uniform random value in [0, max).\nfunc Int(rand io.Reader, max *big.Int) (n *big.Int, err error) {\n\tk := (max.BitLen() + 7) \/ 8\n\n\t\/\/ b is the number of bits in the most significant byte of max.\n\tb := uint(max.BitLen() % 8)\n\tif b == 0 {\n\t\tb = 8\n\t}\n\n\tbytes := make([]byte, k)\n\tn = new(big.Int)\n\n\tfor {\n\t\t_, err = io.ReadFull(rand, bytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Clear bits in the first byte to increase the probability\n\t\t\/\/ that the candidate is < max.\n\t\tbytes[0] &= uint8(int(1<<b) - 1)\n\n\t\tn.SetBytes(bytes)\n\t\tif n.Cmp(max) < 0 {\n\t\t\treturn\n\t\t}\n\t}\n}\n<commit_msg>crypto\/rand: better panic message for invalid Int argument.<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage rand\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"math\/big\"\n)\n\n\/\/ smallPrimes is a list of small, prime numbers that allows us to rapidly\n\/\/ exclude some fraction of composite candidates when searching for a random\n\/\/ prime. This list is truncated at the point where smallPrimesProduct exceeds\n\/\/ a uint64. It does not include two because we ensure that the candidates are\n\/\/ odd by construction.\nvar smallPrimes = []uint8{\n\t3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53,\n}\n\n\/\/ smallPrimesProduct is the product of the values in smallPrimes and allows us\n\/\/ to reduce a candidate prime by this number and then determine whether it's\n\/\/ coprime to all the elements of smallPrimes without further big.Int\n\/\/ operations.\nvar smallPrimesProduct = new(big.Int).SetUint64(16294579238595022365)\n\n\/\/ Prime returns a number, p, of the given size, such that p is prime\n\/\/ with high probability.\nfunc Prime(rand io.Reader, bits int) (p *big.Int, err error) {\n\tif bits < 1 {\n\t\terr = errors.New(\"crypto\/rand: prime size must be positive\")\n\t}\n\n\tb := uint(bits % 8)\n\tif b == 0 {\n\t\tb = 8\n\t}\n\n\tbytes := make([]byte, (bits+7)\/8)\n\tp = new(big.Int)\n\n\tbigMod := new(big.Int)\n\n\tfor {\n\t\t_, err = io.ReadFull(rand, bytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Clear bits in the first byte to make sure the candidate has a size <= bits.\n\t\tbytes[0] &= uint8(int(1<<b) - 1)\n\t\t\/\/ Don't let the value be too small, i.e, set the most significant two bits.\n\t\t\/\/ Setting the top two bits, rather than just the top bit,\n\t\t\/\/ means that when two of these values are multiplied together,\n\t\t\/\/ the result isn't ever one bit short.\n\t\tif b >= 2 {\n\t\t\tbytes[0] |= 3 << (b - 2)\n\t\t} else {\n\t\t\t\/\/ Here b==1, because b cannot be zero.\n\t\t\tbytes[0] |= 1\n\t\t\tif len(bytes) > 1 {\n\t\t\t\tbytes[1] |= 0x80\n\t\t\t}\n\t\t}\n\t\t\/\/ Make the value odd since an even number this large certainly isn't prime.\n\t\tbytes[len(bytes)-1] |= 1\n\n\t\tp.SetBytes(bytes)\n\n\t\t\/\/ Calculate the value mod the product of smallPrimes. If it's\n\t\t\/\/ a multiple of any of these primes we add two until it isn't.\n\t\t\/\/ The probability of overflowing is minimal and can be ignored\n\t\t\/\/ because we still perform Miller-Rabin tests on the result.\n\t\tbigMod.Mod(p, smallPrimesProduct)\n\t\tmod := bigMod.Uint64()\n\n\tNextDelta:\n\t\tfor delta := uint64(0); delta < 1<<20; delta += 2 {\n\t\t\tm := mod + delta\n\t\t\tfor _, prime := range smallPrimes {\n\t\t\t\tif m%uint64(prime) == 0 {\n\t\t\t\t\tcontinue NextDelta\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif delta > 0 {\n\t\t\t\tbigMod.SetUint64(delta)\n\t\t\t\tp.Add(p, bigMod)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ There is a tiny possibility that, by adding delta, we caused\n\t\t\/\/ the number to be one bit too long. Thus we check BitLen\n\t\t\/\/ here.\n\t\tif p.ProbablyPrime(20) && p.BitLen() == bits {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Int returns a uniform random value in [0, max). It panics if max <= 0.\nfunc Int(rand io.Reader, max *big.Int) (n *big.Int, err error) {\n\tif max.Sign() <= 0 {\n\t\tpanic(\"crypto\/rand: argument to Int is <= 0\")\n\t}\n\tk := (max.BitLen() + 7) \/ 8\n\n\t\/\/ b is the number of bits in the most significant byte of max.\n\tb := uint(max.BitLen() % 8)\n\tif b == 0 {\n\t\tb = 8\n\t}\n\n\tbytes := make([]byte, k)\n\tn = new(big.Int)\n\n\tfor {\n\t\t_, err = io.ReadFull(rand, bytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Clear bits in the first byte to increase the probability\n\t\t\/\/ that the candidate is < max.\n\t\tbytes[0] &= uint8(int(1<<b) - 1)\n\n\t\tn.SetBytes(bytes)\n\t\tif n.Cmp(max) < 0 {\n\t\t\treturn\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage sort\n\nimport \"testing\"\n\n\nfunc f(a []int, x int) func(int) bool {\n\treturn func(i int) bool {\n\t\treturn a[i] <= x\n\t}\n}\n\n\nvar data = []int{0: -10, 1: -5, 2: 0, 3: 1, 4: 2, 5: 3, 6: 5, 7: 7, 8: 11, 9: 100, 10: 100, 11: 100, 12: 1000, 13: 10000}\n\nvar tests = []struct {\n\tname string\n\tn int\n\tf func(int) bool\n\ti int\n}{\n\t{\"empty\", 0, nil, 0},\n\t{\"1 1\", 1, func(i int) bool { return i <= 1 }, 0},\n\t{\"1 false\", 1, func(i int) bool { return false }, 0},\n\t{\"1 true\", 1, func(i int) bool { return true }, 0},\n\t{\"1e9 991\", 1e9, func(i int) bool { return i <= 991 }, 991},\n\t{\"1e9 false\", 1e9, func(i int) bool { return false }, 0},\n\t{\"1e9 true\", 1e9, func(i int) bool { return true }, 1e9 - 1},\n\t{\"data -20\", len(data), f(data, -20), 0},\n\t{\"data -10\", len(data), f(data, -10), 0},\n\t{\"data -9\", len(data), f(data, -9), 0},\n\t{\"data -6\", len(data), f(data, -6), 0},\n\t{\"data -5\", len(data), f(data, -5), 1},\n\t{\"data 3\", len(data), f(data, 3), 5},\n\t{\"data 99\", len(data), f(data, 99), 8},\n\t{\"data 100\", len(data), f(data, 100), 11},\n\t{\"data 101\", len(data), f(data, 101), 11},\n\t{\"data 10000\", len(data), f(data, 10000), 13},\n\t{\"data 10001\", len(data), f(data, 10001), 13},\n\t{\"descending a\", 7, func(i int) bool { return []int{99, 99, 59, 42, 7, 0, -1, -1}[i] >= 7 }, 4},\n\t{\"descending 7\", 1e9, func(i int) bool { return 1e9-i >= 7 }, 1e9 - 7},\n}\n\n\nfunc TestSearch(t *testing.T) {\n\tfor _, e := range tests {\n\t\ti := Search(e.n, e.f)\n\t\tif i != e.i {\n\t\t\tt.Errorf(\"%s: expected index %d; got %d\", e.name, e.i, i)\n\t\t}\n\t}\n}\n\n\n\/\/ Smoke tests for convenience wrappers - not comprehensive.\n\nvar fdata = []float{0: -3.14, 1: 0, 2: 1, 3: 2, 4: 1000.7}\nvar sdata = []string{0: \"f\", 1: \"foo\", 2: \"foobar\", 3: \"x\"}\n\nvar wrappertests = []struct {\n\tname string\n\tresult int\n\ti int\n}{\n\t{\"SearchInts\", SearchInts(data, 11), 8},\n\t{\"SearchFloats\", SearchFloats(fdata, 2.1), 3},\n\t{\"SearchStrings\", SearchStrings(sdata, \"\"), 0},\n\t{\"IntArray.Search\", IntArray(data).Search(0), 2},\n\t{\"FloatArray.Search\", FloatArray(fdata).Search(2.0), 3},\n\t{\"StringArray.Search\", StringArray(sdata).Search(\"x\"), 3},\n}\n\n\nfunc TestSearchWrappers(t *testing.T) {\n\tfor _, e := range wrappertests {\n\t\tif e.result != e.i {\n\t\t\tt.Errorf(\"%s: expected index %d; got %d\", e.name, e.i, e.result)\n\t\t}\n\t}\n}\n<commit_msg>sort.Search: added extra test to verify efficiency<commit_after>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage sort\n\nimport \"testing\"\n\n\nfunc f(a []int, x int) func(int) bool {\n\treturn func(i int) bool {\n\t\treturn a[i] <= x\n\t}\n}\n\n\nvar data = []int{0: -10, 1: -5, 2: 0, 3: 1, 4: 2, 5: 3, 6: 5, 7: 7, 8: 11, 9: 100, 10: 100, 11: 100, 12: 1000, 13: 10000}\n\nvar tests = []struct {\n\tname string\n\tn int\n\tf func(int) bool\n\ti int\n}{\n\t{\"empty\", 0, nil, 0},\n\t{\"1 1\", 1, func(i int) bool { return i <= 1 }, 0},\n\t{\"1 false\", 1, func(i int) bool { return false }, 0},\n\t{\"1 true\", 1, func(i int) bool { return true }, 0},\n\t{\"1e9 991\", 1e9, func(i int) bool { return i <= 991 }, 991},\n\t{\"1e9 false\", 1e9, func(i int) bool { return false }, 0},\n\t{\"1e9 true\", 1e9, func(i int) bool { return true }, 1e9 - 1},\n\t{\"data -20\", len(data), f(data, -20), 0},\n\t{\"data -10\", len(data), f(data, -10), 0},\n\t{\"data -9\", len(data), f(data, -9), 0},\n\t{\"data -6\", len(data), f(data, -6), 0},\n\t{\"data -5\", len(data), f(data, -5), 1},\n\t{\"data 3\", len(data), f(data, 3), 5},\n\t{\"data 99\", len(data), f(data, 99), 8},\n\t{\"data 100\", len(data), f(data, 100), 11},\n\t{\"data 101\", len(data), f(data, 101), 11},\n\t{\"data 10000\", len(data), f(data, 10000), 13},\n\t{\"data 10001\", len(data), f(data, 10001), 13},\n\t{\"descending a\", 7, func(i int) bool { return []int{99, 99, 59, 42, 7, 0, -1, -1}[i] >= 7 }, 4},\n\t{\"descending 7\", 1e9, func(i int) bool { return 1e9-i >= 7 }, 1e9 - 7},\n}\n\n\nfunc TestSearch(t *testing.T) {\n\tfor _, e := range tests {\n\t\ti := Search(e.n, e.f)\n\t\tif i != e.i {\n\t\t\tt.Errorf(\"%s: expected index %d; got %d\", e.name, e.i, i)\n\t\t}\n\t}\n}\n\n\n\/\/ log2 computes the binary logarithm of x, rounded up to the next integer.\n\/\/ (log2(0) == 0, log2(1) == 0, log2(2) == 1, log2(3) == 2, etc.)\n\/\/\nfunc log2(x int) int {\n\tn := 0\n\tfor p := 1; p < x; p += p {\n\t\t\/\/ p == 2**n\n\t\tn++\n\t}\n\t\/\/ p\/2 < x <= p == 2**n\n\treturn n\n}\n\n\nfunc TestSearchEfficiency(t *testing.T) {\n\tn := 100\n\tstep := 1\n\tfor exp := 2; exp < 10; exp++ {\n\t\t\/\/ n == 10**exp\n\t\t\/\/ step == 10**(exp-2)\n\t\tmax := log2(n)\n\t\tfor x := 0; x < n; x += step {\n\t\t\tcount := 0\n\t\t\ti := Search(n, func(i int) bool { count++; return i <= x })\n\t\t\tif i != x {\n\t\t\t\tt.Errorf(\"n = %d: expected index %d; got %d\", n, x, i)\n\t\t\t}\n\t\t\tif count > max {\n\t\t\t\tt.Errorf(\"n = %d, x = %d: expected <= %d calls; got %d\", n, x, max, count)\n\t\t\t}\n\t\t}\n\t\tn *= 10\n\t\tstep *= 10\n\t}\n}\n\n\n\/\/ Smoke tests for convenience wrappers - not comprehensive.\n\nvar fdata = []float{0: -3.14, 1: 0, 2: 1, 3: 2, 4: 1000.7}\nvar sdata = []string{0: \"f\", 1: \"foo\", 2: \"foobar\", 3: \"x\"}\n\nvar wrappertests = []struct {\n\tname string\n\tresult int\n\ti int\n}{\n\t{\"SearchInts\", SearchInts(data, 11), 8},\n\t{\"SearchFloats\", SearchFloats(fdata, 2.1), 3},\n\t{\"SearchStrings\", SearchStrings(sdata, \"\"), 0},\n\t{\"IntArray.Search\", IntArray(data).Search(0), 2},\n\t{\"FloatArray.Search\", FloatArray(fdata).Search(2.0), 3},\n\t{\"StringArray.Search\", StringArray(sdata).Search(\"x\"), 3},\n}\n\n\nfunc TestSearchWrappers(t *testing.T) {\n\tfor _, e := range wrappertests {\n\t\tif e.result != e.i {\n\t\t\tt.Errorf(\"%s: expected index %d; got %d\", e.name, e.i, e.result)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main_test\n\nimport (\n\t\"strings\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/gexec\"\n\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/generator\"\n\n\t\"time\"\n\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/cf\"\n)\n\nvar _ = Describe(\"Pushing an app\", func() {\n\tIt(\"stops the time for pushing an app\", func() {\n\t\tfor index := 0; index < loopCount; index++ {\n\t\t\tstartTime := time.Now()\n\n\t\t\tappName := generator.PrefixedRandomName(\"APP\")\n\t\t\tExpect(\n\t\t\t\tcf.Cf(\"push\", appName, \"-p\", \"assets\/golang\", \"-b\", \"go_buildpack\").Wait(cfPushTimeout)).\n\t\t\t\tTo(Exit(0))\n\n\t\t\tmetricsService.SendTimingMetric(asSparseMetric(\"cf-push\"), time.Since(startTime))\n\n\t\t\tExpect(\n\t\t\t\tcf.Cf(\"delete\", appName, \"-f\").Wait(cfPushTimeout)).\n\t\t\t\tTo(Exit(0))\n\t\t}\n\n\t\tExpect(metricsService.filename).To(BeAnExistingFile())\n\n\t\tlastLine, err := lastLine(metricsService.filename)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tparts := strings.Split(lastLine, \",\")\n\t\tExpect(parts).To(HaveLen(4))\n\n\t\t_, err = time.Parse(time.RFC3339, parts[0])\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tExpect(parts[1]).To(Equal(\"cf-push_sparse-avg\"))\n\t\tExpect(parts[2]).To(MatchRegexp(\"\\\\d+\"))\n\t\tExpect(parts[3]).To(MatchRegexp(\"ms\"))\n\t})\n})\n\nfunc asSparseMetric(metricName string) string {\n\treturn metricsPrefix + metricName + \"_sparse-avg\"\n}\n<commit_msg>Ignore metrics prefix<commit_after>package main_test\n\nimport (\n\t\"strings\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t. \"github.com\/onsi\/gomega\/gexec\"\n\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/generator\"\n\n\t\"time\"\n\n\t\"github.com\/cloudfoundry-incubator\/cf-test-helpers\/cf\"\n)\n\nvar _ = Describe(\"Pushing an app\", func() {\n\tIt(\"stops the time for pushing an app\", func() {\n\t\tfor index := 0; index < loopCount; index++ {\n\t\t\tstartTime := time.Now()\n\n\t\t\tappName := generator.PrefixedRandomName(\"APP\")\n\t\t\tExpect(\n\t\t\t\tcf.Cf(\"push\", appName, \"-p\", \"assets\/golang\", \"-b\", \"go_buildpack\").Wait(cfPushTimeout)).\n\t\t\t\tTo(Exit(0))\n\n\t\t\tmetricsService.SendTimingMetric(asSparseMetric(\"cf-push\"), time.Since(startTime))\n\n\t\t\tExpect(\n\t\t\t\tcf.Cf(\"delete\", appName, \"-f\").Wait(cfPushTimeout)).\n\t\t\t\tTo(Exit(0))\n\t\t}\n\n\t\tExpect(metricsService.filename).To(BeAnExistingFile())\n\n\t\tlastLine, err := lastLine(metricsService.filename)\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tparts := strings.Split(lastLine, \",\")\n\t\tExpect(parts).To(HaveLen(4))\n\n\t\t_, err = time.Parse(time.RFC3339, parts[0])\n\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\tExpect(parts[1]).To(HaveSuffix(\"cf-push_sparse-avg\"))\n\t\tExpect(parts[2]).To(MatchRegexp(\"\\\\d+\"))\n\t\tExpect(parts[3]).To(MatchRegexp(\"ms\"))\n\t})\n})\n\nfunc asSparseMetric(metricName string) string {\n\treturn metricsPrefix + metricName + \"_sparse-avg\"\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Google Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package repl contains the implementation of the command that prints the\n\/\/ BQL version.\npackage repl\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/google\/badwolf\/bql\/grammar\"\n\t\"github.com\/google\/badwolf\/bql\/planner\"\n\t\"github.com\/google\/badwolf\/bql\/semantic\"\n\t\"github.com\/google\/badwolf\/bql\/table\"\n\t\"github.com\/google\/badwolf\/bql\/version\"\n\t\"github.com\/google\/badwolf\/storage\"\n\t\"github.com\/google\/badwolf\/tools\/vcli\/bw\/command\"\n\t\"github.com\/google\/badwolf\/tools\/vcli\/bw\/export\"\n\tbio \"github.com\/google\/badwolf\/tools\/vcli\/bw\/io\"\n\t\"github.com\/google\/badwolf\/tools\/vcli\/bw\/load\"\n)\n\nconst prompt = \"bql> \"\n\n\/\/ New create the version command.\nfunc New(driver storage.Store, chanSize, bulkSize, builderSize int, rl ReadLiner, done chan bool) *command.Command {\n\treturn &command.Command{\n\t\tRun: func(ctx context.Context, args []string) int {\n\t\t\tREPL(driver, os.Stdin, rl, chanSize, bulkSize, builderSize, done)\n\t\t\treturn 0\n\t\t},\n\t\tUsageLine: \"bql\",\n\t\tShort: \"starts a REPL to run BQL statements.\",\n\t\tLong: \"Starts a REPL from the command line to accept BQL statements. Type quit; to leave the REPL.\",\n\t}\n}\n\n\/\/ ReadLiner returns a channel with the imput to be used for the REPL.\ntype ReadLiner func(done chan bool) <-chan string\n\n\/\/ SimpleReadLine reads a line from the provided file. This does not support\n\/\/ any advanced terminal capabilities.\n\/\/\n\/\/ This function can be replaced with more advanced functionality, as shown\n\/\/ https:\/\/github.com\/xllora\/bwdrivers\/blob\/master\/bw\/main.go.\nfunc SimpleReadLine(done chan bool) <-chan string {\n\tc := make(chan string)\n\tgo func() {\n\t\tdefer close(c)\n\t\tscanner := bufio.NewScanner(os.Stdin)\n\t\tcmd := \"\"\n\t\tfmt.Print(\"bql> \")\n\t\tfor {\n\t\t\tif !scanner.Scan() {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcmd = strings.TrimSpace(cmd + \" \" + strings.TrimSpace(scanner.Text()))\n\t\t\tif strings.HasSuffix(cmd, \";\") {\n\t\t\t\tc <- cmd\n\t\t\t\tif <-done {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tcmd = \"\"\n\t\t\t\tfmt.Print(\"bql> \")\n\t\t\t}\n\t\t}\n\t}()\n\treturn c\n}\n\n\/\/ REPL starts a read-evaluation-print-loop to run BQL commands.\nfunc REPL(driver storage.Store, input *os.File, rl ReadLiner, chanSize, bulkSize, builderSize int, done chan bool) int {\n\tvar tracer io.Writer\n\tctx, isTracingToFile, sessionStart := context.Background(), false, time.Now()\n\n\tstopTracing := func() {\n\t\tif tracer != nil {\n\t\t\tif isTracingToFile {\n\t\t\t\tfmt.Println(\"Closing tracing file.\")\n\t\t\t\ttracer.(*os.File).Close()\n\t\t\t}\n\t\t\ttracer, isTracingToFile = nil, false\n\t\t}\n\t}\n\tdefer stopTracing()\n\n\tfmt.Printf(\"Welcome to BadWolf vCli (%d.%d.%d-%s)\\n\", version.Major, version.Minor, version.Patch, version.Release)\n\tfmt.Printf(\"Using driver %q. Type quit; to exit\\n\", driver.Name(ctx))\n\tfmt.Printf(\"Session started at %v\\n\\n\", sessionStart)\n\tdefer func() {\n\t\tfmt.Printf(\"\\n\\nThanks for all those BQL queries!\\nSession duration: %v\\n\\n\", time.Now().Sub(sessionStart))\n\t}()\n\n\tfor l := range rl(done) {\n\t\tif strings.HasPrefix(l, \"quit\") {\n\t\t\tdone <- true\n\t\t\tbreak\n\t\t}\n\t\tif strings.HasPrefix(l, \"help\") {\n\t\t\tprintHelp()\n\t\t\tdone <- false\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(l, \"start tracing\") {\n\t\t\targs := strings.Split(strings.TrimSpace(l)[:len(l)-1], \" \")\n\t\t\tswitch len(args) {\n\t\t\tcase 2:\n\t\t\t\t\/\/ Start tracing to the console.\n\t\t\t\tstopTracing()\n\t\t\t\ttracer, isTracingToFile = os.Stdout, false\n\t\t\t\tfmt.Println(\"[WARNING] Tracing is on. This may slow your BQL queries.\")\n\t\t\tcase 3:\n\t\t\t\t\/\/ Start tracing to file.\n\t\t\t\tstopTracing()\n\t\t\t\tf, err := os.Create(args[2])\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t} else {\n\t\t\t\t\ttracer, isTracingToFile = f, true\n\t\t\t\t\tfmt.Println(\"[WARNING] Tracing is on. This may slow your BQL queries.\")\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tfmt.Println(\"Invalid syntax\\n\\tstart tracing [trace_file]\")\n\t\t\t}\n\t\t\tdone <- false\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(l, \"stop tracing\") {\n\t\t\tstopTracing()\n\t\t\tfmt.Println(\"Tracing is off.\")\n\t\t\tdone <- false\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(l, \"export\") {\n\t\t\tnow := time.Now()\n\t\t\targs := strings.Split(\"bw \"+strings.TrimSpace(l)[:len(l)-1], \" \")\n\t\t\tusage := \"Wrong syntax\\n\\n\\tload <graph_names_separated_by_commas> <file_path>\\n\"\n\t\t\texport.Eval(ctx, usage, args, driver, bulkSize)\n\t\t\tfmt.Println(\"[OK] Time spent: \", time.Now().Sub(now))\n\t\t\tdone <- false\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(l, \"load\") {\n\t\t\tnow := time.Now()\n\t\t\targs := strings.Split(\"bw \"+strings.TrimSpace(l[:len(l)-1]), \" \")\n\t\t\tusage := \"Wrong syntax\\n\\n\\tload <file_path> <graph_names_separated_by_commas>\\n\"\n\t\t\tload.Eval(ctx, usage, args, driver, bulkSize, builderSize)\n\t\t\tfmt.Println(\"[OK] Time spent: \", time.Now().Sub(now))\n\t\t\tdone <- false\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(l, \"desc\") {\n\t\t\tpln, err := planBQL(ctx, l[4:], driver, chanSize, bulkSize, nil)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"[ERROR] %s\\n\\n\", err)\n\t\t\t} else {\n\t\t\t\tfmt.Println(pln.String())\n\t\t\t\tfmt.Println(\"[OK]\")\n\t\t\t}\n\t\t\tdone <- false\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(l, \"run\") {\n\t\t\tnow := time.Now()\n\t\t\tpath, cmds, err := runBQLFromFile(ctx, driver, chanSize, bulkSize, strings.TrimSpace(l[:len(l)-1]), tracer)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"[ERROR] %s\\n\\n\", err)\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"Loaded %q and run %d BQL commands successfully\\n\\n\", path, cmds)\n\t\t\t}\n\t\t\tfmt.Println(\"Time spent: \", time.Now().Sub(now))\n\t\t\tdone <- false\n\t\t\tcontinue\n\t\t}\n\n\t\tnow := time.Now()\n\t\ttable, err := runBQL(ctx, l, driver, chanSize, bulkSize, tracer)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"[ERROR] %s\\n\", err)\n\t\t\tfmt.Println(\"Time spent: \", time.Now().Sub(now))\n\t\t\tfmt.Println()\n\t\t} else {\n\t\t\tif len(table.Bindings()) > 0 {\n\t\t\t\tfmt.Println(table.String())\n\t\t\t}\n\t\t\tfmt.Printf(\"[OK] %d rows retrived. Time spent: %v.\\n\", table.NumRows(), time.Now().Sub(now))\n\t\t}\n\t\tdone <- false\n\t}\n\treturn 0\n}\n\n\/\/ printHelp prints help for the console commands.\nfunc printHelp() {\n\tfmt.Println(\"help - prints help for the bw console.\")\n\tfmt.Println(\"export <graph_names_separated_by_commas> <file_path> - dumps triples from graphs into a file path.\")\n\tfmt.Println(\"desc <BQL> - prints the execution plan for a BQL statement.\")\n\tfmt.Println(\"load <file_path> <graph_names_separated_by_commas> - load triples into the specified graphs.\")\n\tfmt.Println(\"run <file_with_bql_statements> - runs all the BQL statements in the file.\")\n\tfmt.Println(\"start tracing [trace_file] - starts tracing queries.\")\n\tfmt.Println(\"stop tracing - stops tracing queries.\")\n\tfmt.Println(\"quit - quits the console.\")\n\tfmt.Println()\n}\n\n\/\/ runBQLFromFile loads all the statements in the file and runs them.\nfunc runBQLFromFile(ctx context.Context, driver storage.Store, chanSize, bulkSize int, line string, w io.Writer) (string, int, error) {\n\tss := strings.Split(strings.TrimSpace(line), \" \")\n\tif len(ss) != 2 {\n\t\treturn \"\", 0, fmt.Errorf(\"wrong syntax: run <file_with_bql_statements>\")\n\t}\n\tpath := ss[1]\n\tlines, err := bio.GetStatementsFromFile(path)\n\tif err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"failed to read file %q with error %v on\\n\", path, err)\n\t}\n\tfor idx, stm := range lines {\n\t\tfmt.Printf(\"Processing statement (%d\/%d)\\n\", idx+1, len(lines))\n\t\t_, err := runBQL(ctx, stm, driver, chanSize, bulkSize, w)\n\t\tif err != nil {\n\t\t\treturn \"\", 0, fmt.Errorf(\"%v on\\n%s\\n\", err, stm)\n\t\t}\n\t}\n\tfmt.Println()\n\treturn path, len(lines), nil\n}\n\n\/\/ runBQL attempts to execute the provided query against the given store.\nfunc runBQL(ctx context.Context, bql string, s storage.Store, chanSize, bulkSize int, w io.Writer) (*table.Table, error) {\n\tpln, err := planBQL(ctx, bql, s, chanSize, bulkSize, w)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres, err := pln.Execute(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"planner.Execute: failed to execute query plan with error %v\", err)\n\t}\n\treturn res, nil\n}\n\n\/\/ planBQL attempts to create the execution plan for the provided query against the given store.\nfunc planBQL(ctx context.Context, bql string, s storage.Store, chanSize, bulkSize int, w io.Writer) (planner.Executor, error) {\n\tp, err := grammar.NewParser(grammar.SemanticBQL())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to initilize a valid BQL parser\")\n\t}\n\tstm := &semantic.Statement{}\n\tif err := p.Parse(grammar.NewLLk(bql, 1), stm); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse BQL statement with error %v\", err)\n\t}\n\tpln, err := planner.New(ctx, s, stm, chanSize, bulkSize, w)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"should have not failed to create a plan using memory.DefaultStorage for statement %v with error %v\", stm, err)\n\t}\n\treturn pln, nil\n}\n<commit_msg>Split time shown at the end uf the REPL<commit_after>\/\/ Copyright 2016 Google Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package repl contains the implementation of the command that prints the\n\/\/ BQL version.\npackage repl\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\t\"github.com\/google\/badwolf\/bql\/grammar\"\n\t\"github.com\/google\/badwolf\/bql\/planner\"\n\t\"github.com\/google\/badwolf\/bql\/semantic\"\n\t\"github.com\/google\/badwolf\/bql\/table\"\n\t\"github.com\/google\/badwolf\/bql\/version\"\n\t\"github.com\/google\/badwolf\/storage\"\n\t\"github.com\/google\/badwolf\/tools\/vcli\/bw\/command\"\n\t\"github.com\/google\/badwolf\/tools\/vcli\/bw\/export\"\n\tbio \"github.com\/google\/badwolf\/tools\/vcli\/bw\/io\"\n\t\"github.com\/google\/badwolf\/tools\/vcli\/bw\/load\"\n)\n\nconst prompt = \"bql> \"\n\n\/\/ New create the version command.\nfunc New(driver storage.Store, chanSize, bulkSize, builderSize int, rl ReadLiner, done chan bool) *command.Command {\n\treturn &command.Command{\n\t\tRun: func(ctx context.Context, args []string) int {\n\t\t\tREPL(driver, os.Stdin, rl, chanSize, bulkSize, builderSize, done)\n\t\t\treturn 0\n\t\t},\n\t\tUsageLine: \"bql\",\n\t\tShort: \"starts a REPL to run BQL statements.\",\n\t\tLong: \"Starts a REPL from the command line to accept BQL statements. Type quit; to leave the REPL.\",\n\t}\n}\n\n\/\/ ReadLiner returns a channel with the imput to be used for the REPL.\ntype ReadLiner func(done chan bool) <-chan string\n\n\/\/ SimpleReadLine reads a line from the provided file. This does not support\n\/\/ any advanced terminal capabilities.\n\/\/\n\/\/ This function can be replaced with more advanced functionality, as shown\n\/\/ https:\/\/github.com\/xllora\/bwdrivers\/blob\/master\/bw\/main.go.\nfunc SimpleReadLine(done chan bool) <-chan string {\n\tc := make(chan string)\n\tgo func() {\n\t\tdefer close(c)\n\t\tscanner := bufio.NewScanner(os.Stdin)\n\t\tcmd := \"\"\n\t\tfmt.Print(\"bql> \")\n\t\tfor {\n\t\t\tif !scanner.Scan() {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcmd = strings.TrimSpace(cmd + \" \" + strings.TrimSpace(scanner.Text()))\n\t\t\tif strings.HasSuffix(cmd, \";\") {\n\t\t\t\tc <- cmd\n\t\t\t\tif <-done {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tcmd = \"\"\n\t\t\t\tfmt.Print(\"bql> \")\n\t\t\t}\n\t\t}\n\t}()\n\treturn c\n}\n\n\/\/ REPL starts a read-evaluation-print-loop to run BQL commands.\nfunc REPL(driver storage.Store, input *os.File, rl ReadLiner, chanSize, bulkSize, builderSize int, done chan bool) int {\n\tvar tracer io.Writer\n\tctx, isTracingToFile, sessionStart := context.Background(), false, time.Now()\n\n\tstopTracing := func() {\n\t\tif tracer != nil {\n\t\t\tif isTracingToFile {\n\t\t\t\tfmt.Println(\"Closing tracing file.\")\n\t\t\t\ttracer.(*os.File).Close()\n\t\t\t}\n\t\t\ttracer, isTracingToFile = nil, false\n\t\t}\n\t}\n\tdefer stopTracing()\n\n\tfmt.Printf(\"Welcome to BadWolf vCli (%d.%d.%d-%s)\\n\", version.Major, version.Minor, version.Patch, version.Release)\n\tfmt.Printf(\"Using driver %q. Type quit; to exit\\n\", driver.Name(ctx))\n\tfmt.Printf(\"Session started at %v\\n\\n\", sessionStart)\n\tdefer func() {\n\t\tfmt.Printf(\"\\n\\nThanks for all those BQL queries!\\nSession duration: %v\\n\\n\", time.Now().Sub(sessionStart))\n\t}()\n\n\tfor l := range rl(done) {\n\t\tif strings.HasPrefix(l, \"quit\") {\n\t\t\tdone <- true\n\t\t\tbreak\n\t\t}\n\t\tif strings.HasPrefix(l, \"help\") {\n\t\t\tprintHelp()\n\t\t\tdone <- false\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(l, \"start tracing\") {\n\t\t\targs := strings.Split(strings.TrimSpace(l)[:len(l)-1], \" \")\n\t\t\tswitch len(args) {\n\t\t\tcase 2:\n\t\t\t\t\/\/ Start tracing to the console.\n\t\t\t\tstopTracing()\n\t\t\t\ttracer, isTracingToFile = os.Stdout, false\n\t\t\t\tfmt.Println(\"[WARNING] Tracing is on. This may slow your BQL queries.\")\n\t\t\tcase 3:\n\t\t\t\t\/\/ Start tracing to file.\n\t\t\t\tstopTracing()\n\t\t\t\tf, err := os.Create(args[2])\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t} else {\n\t\t\t\t\ttracer, isTracingToFile = f, true\n\t\t\t\t\tfmt.Println(\"[WARNING] Tracing is on. This may slow your BQL queries.\")\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tfmt.Println(\"Invalid syntax\\n\\tstart tracing [trace_file]\")\n\t\t\t}\n\t\t\tdone <- false\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(l, \"stop tracing\") {\n\t\t\tstopTracing()\n\t\t\tfmt.Println(\"Tracing is off.\")\n\t\t\tdone <- false\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(l, \"export\") {\n\t\t\tnow := time.Now()\n\t\t\targs := strings.Split(\"bw \"+strings.TrimSpace(l)[:len(l)-1], \" \")\n\t\t\tusage := \"Wrong syntax\\n\\n\\tload <graph_names_separated_by_commas> <file_path>\\n\"\n\t\t\texport.Eval(ctx, usage, args, driver, bulkSize)\n\t\t\tfmt.Println(\"[OK] Time spent: \", time.Now().Sub(now))\n\t\t\tdone <- false\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(l, \"load\") {\n\t\t\tnow := time.Now()\n\t\t\targs := strings.Split(\"bw \"+strings.TrimSpace(l[:len(l)-1]), \" \")\n\t\t\tusage := \"Wrong syntax\\n\\n\\tload <file_path> <graph_names_separated_by_commas>\\n\"\n\t\t\tload.Eval(ctx, usage, args, driver, bulkSize, builderSize)\n\t\t\tfmt.Println(\"[OK] Time spent: \", time.Now().Sub(now))\n\t\t\tdone <- false\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(l, \"desc\") {\n\t\t\tpln, err := planBQL(ctx, l[4:], driver, chanSize, bulkSize, nil)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"[ERROR] %s\\n\\n\", err)\n\t\t\t} else {\n\t\t\t\tfmt.Println(pln.String())\n\t\t\t\tfmt.Println(\"[OK]\")\n\t\t\t}\n\t\t\tdone <- false\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(l, \"run\") {\n\t\t\tnow := time.Now()\n\t\t\tpath, cmds, err := runBQLFromFile(ctx, driver, chanSize, bulkSize, strings.TrimSpace(l[:len(l)-1]), tracer)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"[ERROR] %s\\n\\n\", err)\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"Loaded %q and run %d BQL commands successfully\\n\\n\", path, cmds)\n\t\t\t}\n\t\t\tfmt.Println(\"Time spent: \", time.Now().Sub(now))\n\t\t\tdone <- false\n\t\t\tcontinue\n\t\t}\n\n\t\tnow := time.Now()\n\t\ttable, err := runBQL(ctx, l, driver, chanSize, bulkSize, tracer)\n\t\tbqlDiff := time.Now().Sub(now)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"[ERROR] %s\\n\", err)\n\t\t\tfmt.Println(\"Time spent: \", time.Now().Sub(now))\n\t\t\tfmt.Println()\n\t\t} else {\n\t\t\tif len(table.Bindings()) > 0 {\n\t\t\t\tfmt.Println(table.String())\n\t\t\t}\n\t\t\tfmt.Printf(\"[OK] %d rows retrived. BQL time: %v. Display time: %v\\n\",\n\t\t\t\ttable.NumRows(), bqlDiff, time.Now().Sub(now)-bqlDiff)\n\t\t}\n\t\tdone <- false\n\t}\n\treturn 0\n}\n\n\/\/ printHelp prints help for the console commands.\nfunc printHelp() {\n\tfmt.Println(\"help - prints help for the bw console.\")\n\tfmt.Println(\"export <graph_names_separated_by_commas> <file_path> - dumps triples from graphs into a file path.\")\n\tfmt.Println(\"desc <BQL> - prints the execution plan for a BQL statement.\")\n\tfmt.Println(\"load <file_path> <graph_names_separated_by_commas> - load triples into the specified graphs.\")\n\tfmt.Println(\"run <file_with_bql_statements> - runs all the BQL statements in the file.\")\n\tfmt.Println(\"start tracing [trace_file] - starts tracing queries.\")\n\tfmt.Println(\"stop tracing - stops tracing queries.\")\n\tfmt.Println(\"quit - quits the console.\")\n\tfmt.Println()\n}\n\n\/\/ runBQLFromFile loads all the statements in the file and runs them.\nfunc runBQLFromFile(ctx context.Context, driver storage.Store, chanSize, bulkSize int, line string, w io.Writer) (string, int, error) {\n\tss := strings.Split(strings.TrimSpace(line), \" \")\n\tif len(ss) != 2 {\n\t\treturn \"\", 0, fmt.Errorf(\"wrong syntax: run <file_with_bql_statements>\")\n\t}\n\tpath := ss[1]\n\tlines, err := bio.GetStatementsFromFile(path)\n\tif err != nil {\n\t\treturn \"\", 0, fmt.Errorf(\"failed to read file %q with error %v on\\n\", path, err)\n\t}\n\tfor idx, stm := range lines {\n\t\tfmt.Printf(\"Processing statement (%d\/%d)\\n\", idx+1, len(lines))\n\t\t_, err := runBQL(ctx, stm, driver, chanSize, bulkSize, w)\n\t\tif err != nil {\n\t\t\treturn \"\", 0, fmt.Errorf(\"%v on\\n%s\\n\", err, stm)\n\t\t}\n\t}\n\tfmt.Println()\n\treturn path, len(lines), nil\n}\n\n\/\/ runBQL attempts to execute the provided query against the given store.\nfunc runBQL(ctx context.Context, bql string, s storage.Store, chanSize, bulkSize int, w io.Writer) (*table.Table, error) {\n\tpln, err := planBQL(ctx, bql, s, chanSize, bulkSize, w)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres, err := pln.Execute(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"planner.Execute: failed to execute query plan with error %v\", err)\n\t}\n\treturn res, nil\n}\n\n\/\/ planBQL attempts to create the execution plan for the provided query against the given store.\nfunc planBQL(ctx context.Context, bql string, s storage.Store, chanSize, bulkSize int, w io.Writer) (planner.Executor, error) {\n\tp, err := grammar.NewParser(grammar.SemanticBQL())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to initilize a valid BQL parser\")\n\t}\n\tstm := &semantic.Statement{}\n\tif err := p.Parse(grammar.NewLLk(bql, 1), stm); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse BQL statement with error %v\", err)\n\t}\n\tpln, err := planner.New(ctx, s, stm, chanSize, bulkSize, w)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"should have not failed to create a plan using memory.DefaultStorage for statement %v with error %v\", stm, err)\n\t}\n\treturn pln, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"log\"\n\t\"regexp\"\n\t\"sort\"\n\n\t\"github.com\/shirou\/gopsutil\/process\"\n)\n\nvar javaRegexp = regexp.MustCompile(`javaw?(\\.exe)?`)\n\ntype byMemoryUsage []*process.Process\n\nfunc (a byMemoryUsage) Len() int { return len(a) }\n\nfunc (a byMemoryUsage) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n\nfunc (a byMemoryUsage) Less(i, j int) bool {\n\tiMU, err := a[i].MemoryPercent()\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tjMU, err := a[j].MemoryPercent()\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\treturn iMU < jMU\n}\n\nfunc killJava() {\n\tlog.Println(\"=== killJava ===\")\n\tdefer log.Println(\"================\")\n\n\tpids, err := process.Pids()\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tvar procs []*process.Process\n\tfor _, pid := range pids {\n\t\tproc, err := process.NewProcess(pid)\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\n\t\tname, err := proc.Name()\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\n\t\tif javaRegexp.MatchString(name) {\n\t\t\tprocs = append(procs, proc)\n\t\t}\n\t}\n\n\tsort.Sort(byMemoryUsage(procs))\n\tlog.Println(\"Found\", len(procs), \"Java processes.\")\n\tif len(procs) == 0 {\n\t\treturn\n\t}\n\n\tlog.Println(\"procs:\", procs)\n\tlog.Println(\"Killing top memory user:\", procs[0])\n\tif err := procs[0].Kill(); err != nil {\n\t\tlog.Println(err)\n\t}\n}\n<commit_msg>Fixes client for Windows.<commit_after>package main\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"sort\"\n\n\t\"github.com\/shirou\/gopsutil\/process\"\n)\n\nvar javaRegexp = regexp.MustCompile(`javaw?(\\.exe)?`)\n\ntype byMemoryUsage []*process.Process\n\nfunc (a byMemoryUsage) Len() int { return len(a) }\n\nfunc (a byMemoryUsage) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\n\nfunc (a byMemoryUsage) Less(i, j int) bool {\n\tiMU, err := a[i].MemoryPercent()\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tjMU, err := a[j].MemoryPercent()\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\treturn iMU < jMU\n}\n\nfunc killJava() {\n\tlog.Println(\"=== killJava ===\")\n\tdefer log.Println(\"================\")\n\n\tpids, err := process.Pids()\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tvar procs []*process.Process\n\tfor _, pid := range pids {\n\t\tproc, err := process.NewProcess(pid)\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\n\t\tname, err := proc.Name()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif javaRegexp.MatchString(name) {\n\t\t\tprocs = append(procs, proc)\n\t\t}\n\t}\n\n\tsort.Sort(byMemoryUsage(procs))\n\tlog.Println(\"Found\", len(procs), \"Java processes.\")\n\tif len(procs) == 0 {\n\t\treturn\n\t}\n\n\tlog.Println(\"procs:\", procs)\n\tlog.Println(\"Killing top memory user:\", procs[0])\n\tosProc, err := os.FindProcess(int(procs[0].Pid))\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tif err = osProc.Kill(); err != nil {\n\t\tlog.Panic(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package cwriter\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"testing\"\n)\n\nfunc TestWriter(t *testing.T) {\n\tb := &bytes.Buffer{}\n\tw := New(b)\n\tfor i := 0; i < 2; i++ {\n\t\tfmt.Fprintln(w, \"foo\")\n\t}\n\tw.Flush()\n\twant := \"foo\\nfoo\\n\"\n\tif b.String() != want {\n\t\tt.Fatalf(\"want %q, got %q\", want, b.String())\n\t}\n}\n<commit_msg>Remove duplicate test<commit_after><|endoftext|>"} {"text":"<commit_before>package git\n\nimport (\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\tgit2go \"github.com\/lucas-clemente\/git2go\"\n\t\"github.com\/lucas-clemente\/treewatch\"\n)\n\n\/\/ GitRepo is a git repository implementing the Repo interface for goldfish.\ntype GitRepo struct {\n\tpath string\n\trepo *git2go.Repository\n\ttw treewatch.TreeWatcher\n}\n\n\/\/ NewGitRepo opens or makes a git repo at the given path\nfunc NewGitRepo(repoPath string) (*GitRepo, error) {\n\trepo, err := git2go.OpenRepository(repoPath)\n\tif err != nil {\n\t\trepo, err = git2go.InitRepository(repoPath, false)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Make empty tree\n\t\tindex, err := repo.Index()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer index.Free()\n\n\t\ttreeID, err := index.WriteTree()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttree, err := repo.LookupTree(treeID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdefer tree.Free()\n\t\tsig := &git2go.Signature{Name: \"system\", Email: \"goldfish@clemente.io\", When: time.Now()}\n\t\t_, err = repo.CreateCommit(\"refs\/heads\/master\", sig, sig, \"initial commit\", tree)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\ttw, err := treewatch.NewTreeWatcher(repoPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := &GitRepo{path: repoPath, repo: repo, tw: tw}\n\n\tgo func() {\n\t\tfor file := range tw.Changes() {\n\t\t\tif strings.Contains(file, \"\/.git\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr := r.addAllAndCommit(\"changed \" + path.Base(file))\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn r, nil\n}\n\n\/\/ StopWatching stops watching for changes in the repo\nfunc (r *GitRepo) StopWatching() {\n\tr.tw.Stop()\n}\n\n\/\/ ReadFile reads a file from the repo\nfunc (r *GitRepo) ReadFile(path string) (io.ReadCloser, error) {\n\treturn os.Open(r.absolutePath(path))\n}\n\n\/\/ StoreFile writes a file to the repo and commits it\nfunc (r *GitRepo) StoreFile(p string, data io.Reader) error {\n\tif err := os.MkdirAll(path.Dir(r.absolutePath(p)), 0755); err != nil {\n\t\treturn err\n\t}\n\n\tfile, err := os.OpenFile(r.absolutePath(p), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\t_, err = io.Copy(file, data)\n\treturn err\n}\n\n\/\/ ListFiles lists the files in a given directory\nfunc (r *GitRepo) ListFiles(prefix string) ([]string, error) {\n\tfileInfos, err := ioutil.ReadDir(r.absolutePath(prefix))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfiles := make([]string, 0, len(fileInfos))\n\n\tfor _, f := range fileInfos {\n\t\tname := f.Name()\n\t\tif name[0] == '.' {\n\t\t\tcontinue\n\t\t}\n\t\tname = prefix + name\n\t\tif f.IsDir() {\n\t\t\tname += \"\/\"\n\t\t}\n\t\tfiles = append(files, name)\n\t}\n\n\treturn files, nil\n}\n\nfunc (r *GitRepo) addAllAndCommit(message string) error {\n\tindex, err := r.repo.Index()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer index.Free()\n\n\tif err := index.AddAll([]string{}, git2go.IndexAddDefault, nil, nil); err != nil {\n\t\treturn err\n\t}\n\n\tif err := index.Write(); err != nil {\n\t\treturn err\n\t}\n\n\ttreeID, err := index.WriteTree()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn r.commit(treeID, message)\n}\n\nfunc (r *GitRepo) headCommit() (*git2go.Commit, error) {\n\theadRef, err := r.repo.Head()\n\tdefer headRef.Free()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\theadID := headRef.Target()\n\treturn r.repo.LookupCommit(headID)\n}\n\nfunc (r *GitRepo) commit(treeID *git2go.Oid, message string) error {\n\ttree, err := r.repo.LookupTree(treeID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tree.Free()\n\n\theadCommit, err := r.headCommit()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer headCommit.Free()\n\n\tif *treeID == *headCommit.TreeId() {\n\t\treturn nil\n\t}\n\n\tsig := &git2go.Signature{Name: \"system\", Email: \"goldfish@clemente.io\", When: time.Now()}\n\t_, err = r.repo.CreateCommit(\"refs\/heads\/master\", sig, sig, message, tree, headCommit)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (r *GitRepo) absolutePath(path string) string {\n\treturn r.path + path\n}\n<commit_msg>fix auto-committing deleted \/ moved files<commit_after>package git\n\nimport (\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\tgit2go \"github.com\/lucas-clemente\/git2go\"\n\t\"github.com\/lucas-clemente\/treewatch\"\n)\n\n\/\/ GitRepo is a git repository implementing the Repo interface for goldfish.\ntype GitRepo struct {\n\tpath string\n\trepo *git2go.Repository\n\ttw treewatch.TreeWatcher\n}\n\n\/\/ NewGitRepo opens or makes a git repo at the given path\nfunc NewGitRepo(repoPath string) (*GitRepo, error) {\n\trepo, err := git2go.OpenRepository(repoPath)\n\tif err != nil {\n\t\trepo, err = git2go.InitRepository(repoPath, false)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ Make empty tree\n\t\tindex, err := repo.Index()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer index.Free()\n\n\t\ttreeID, err := index.WriteTree()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttree, err := repo.LookupTree(treeID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdefer tree.Free()\n\t\tsig := &git2go.Signature{Name: \"system\", Email: \"goldfish@clemente.io\", When: time.Now()}\n\t\t_, err = repo.CreateCommit(\"refs\/heads\/master\", sig, sig, \"initial commit\", tree)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\ttw, err := treewatch.NewTreeWatcher(repoPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := &GitRepo{path: repoPath, repo: repo, tw: tw}\n\n\tgo func() {\n\t\tfor file := range tw.Changes() {\n\t\t\tif strings.Contains(file, \"\/.git\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\terr := r.addAllAndCommit(\"changed \" + path.Base(file))\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(err)\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn r, nil\n}\n\n\/\/ StopWatching stops watching for changes in the repo\nfunc (r *GitRepo) StopWatching() {\n\tr.tw.Stop()\n}\n\n\/\/ ReadFile reads a file from the repo\nfunc (r *GitRepo) ReadFile(path string) (io.ReadCloser, error) {\n\treturn os.Open(r.absolutePath(path))\n}\n\n\/\/ StoreFile writes a file to the repo and commits it\nfunc (r *GitRepo) StoreFile(p string, data io.Reader) error {\n\tif err := os.MkdirAll(path.Dir(r.absolutePath(p)), 0755); err != nil {\n\t\treturn err\n\t}\n\n\tfile, err := os.OpenFile(r.absolutePath(p), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\t_, err = io.Copy(file, data)\n\treturn err\n}\n\n\/\/ ListFiles lists the files in a given directory\nfunc (r *GitRepo) ListFiles(prefix string) ([]string, error) {\n\tfileInfos, err := ioutil.ReadDir(r.absolutePath(prefix))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfiles := make([]string, 0, len(fileInfos))\n\n\tfor _, f := range fileInfos {\n\t\tname := f.Name()\n\t\tif name[0] == '.' {\n\t\t\tcontinue\n\t\t}\n\t\tname = prefix + name\n\t\tif f.IsDir() {\n\t\t\tname += \"\/\"\n\t\t}\n\t\tfiles = append(files, name)\n\t}\n\n\treturn files, nil\n}\n\nfunc (r *GitRepo) addAllAndCommit(message string) error {\n\tindex, err := r.repo.Index()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer index.Free()\n\n\tif err := index.AddAll([]string{}, git2go.IndexAddDefault, nil); err != nil {\n\t\treturn err\n\t}\n\n\tif err := index.UpdateAll([]string{}, nil); err != nil {\n\t\treturn err\n\t}\n\n\tif err := index.Write(); err != nil {\n\t\treturn err\n\t}\n\n\ttreeID, err := index.WriteTree()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn r.commit(treeID, message)\n}\n\nfunc (r *GitRepo) headCommit() (*git2go.Commit, error) {\n\theadRef, err := r.repo.Head()\n\tdefer headRef.Free()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\theadID := headRef.Target()\n\treturn r.repo.LookupCommit(headID)\n}\n\nfunc (r *GitRepo) commit(treeID *git2go.Oid, message string) error {\n\ttree, err := r.repo.LookupTree(treeID)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer tree.Free()\n\n\theadCommit, err := r.headCommit()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer headCommit.Free()\n\n\tif *treeID == *headCommit.TreeId() {\n\t\treturn nil\n\t}\n\n\tsig := &git2go.Signature{Name: \"system\", Email: \"goldfish@clemente.io\", When: time.Now()}\n\t_, err = r.repo.CreateCommit(\"refs\/heads\/master\", sig, sig, message, tree, headCommit)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (r *GitRepo) absolutePath(path string) string {\n\treturn r.path + path\n}\n<|endoftext|>"} {"text":"<commit_before>package gps\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/armon\/go-radix\"\n)\n\n\/\/ Typed implementations of radix trees. These are just simple wrappers that let\n\/\/ us avoid having to type assert anywhere else, cleaning up other code a bit.\n\/\/\n\/\/ Some of the more annoying things to implement (like walks) aren't\n\/\/ implemented. They can be added if\/when we actually need them.\n\/\/\n\/\/ Oh generics, where art thou...\n\ntype deducerTrie struct {\n\tt *radix.Tree\n}\n\nfunc newDeducerTrie() deducerTrie {\n\treturn deducerTrie{\n\t\tt: radix.New(),\n\t}\n}\n\n\/\/ Delete is used to delete a key, returning the previous value and if it was deleted\nfunc (t deducerTrie) Delete(s string) (pathDeducer, bool) {\n\tif v, had := t.t.Delete(s); had {\n\t\treturn v.(pathDeducer), had\n\t}\n\treturn nil, false\n}\n\n\/\/ Get is used to lookup a specific key, returning the value and if it was found\nfunc (t deducerTrie) Get(s string) (pathDeducer, bool) {\n\tif v, has := t.t.Get(s); has {\n\t\treturn v.(pathDeducer), has\n\t}\n\treturn nil, false\n}\n\n\/\/ Insert is used to add a newentry or update an existing entry. Returns if updated.\nfunc (t deducerTrie) Insert(s string, v pathDeducer) (pathDeducer, bool) {\n\tif v2, had := t.t.Insert(s, v); had {\n\t\treturn v2.(pathDeducer), had\n\t}\n\treturn nil, false\n}\n\n\/\/ Len is used to return the number of elements in the tree\nfunc (t deducerTrie) Len() int {\n\treturn t.t.Len()\n}\n\n\/\/ LongestPrefix is like Get, but instead of an exact match, it will return the\n\/\/ longest prefix match.\nfunc (t deducerTrie) LongestPrefix(s string) (string, pathDeducer, bool) {\n\tif p, v, has := t.t.LongestPrefix(s); has {\n\t\treturn p, v.(pathDeducer), has\n\t}\n\treturn \"\", nil, false\n}\n\n\/\/ ToMap is used to walk the tree and convert it to a map.\nfunc (t deducerTrie) ToMap() map[string]pathDeducer {\n\tm := make(map[string]pathDeducer)\n\tt.t.Walk(func(s string, v interface{}) bool {\n\t\tm[s] = v.(pathDeducer)\n\t\treturn false\n\t})\n\n\treturn m\n}\n\ntype prTrie struct {\n\tt *radix.Tree\n}\n\nfunc newProjectRootTrie() prTrie {\n\treturn prTrie{\n\t\tt: radix.New(),\n\t}\n}\n\n\/\/ Delete is used to delete a key, returning the previous value and if it was deleted\nfunc (t prTrie) Delete(s string) (ProjectRoot, bool) {\n\tif v, had := t.t.Delete(s); had {\n\t\treturn v.(ProjectRoot), had\n\t}\n\treturn \"\", false\n}\n\n\/\/ Get is used to lookup a specific key, returning the value and if it was found\nfunc (t prTrie) Get(s string) (ProjectRoot, bool) {\n\tif v, has := t.t.Get(s); has {\n\t\treturn v.(ProjectRoot), has\n\t}\n\treturn \"\", false\n}\n\n\/\/ Insert is used to add a newentry or update an existing entry. Returns if updated.\nfunc (t prTrie) Insert(s string, v ProjectRoot) (ProjectRoot, bool) {\n\tif v2, had := t.t.Insert(s, v); had {\n\t\treturn v2.(ProjectRoot), had\n\t}\n\treturn \"\", false\n}\n\n\/\/ Len is used to return the number of elements in the tree\nfunc (t prTrie) Len() int {\n\treturn t.t.Len()\n}\n\n\/\/ LongestPrefix is like Get, but instead of an exact match, it will return the\n\/\/ longest prefix match.\nfunc (t prTrie) LongestPrefix(s string) (string, ProjectRoot, bool) {\n\tif p, v, has := t.t.LongestPrefix(s); has && isPathPrefixOrEqual(p, s) {\n\t\treturn p, v.(ProjectRoot), has\n\t}\n\treturn \"\", \"\", false\n}\n\n\/\/ ToMap is used to walk the tree and convert it to a map.\nfunc (t prTrie) ToMap() map[string]ProjectRoot {\n\tm := make(map[string]ProjectRoot)\n\tt.t.Walk(func(s string, v interface{}) bool {\n\t\tm[s] = v.(ProjectRoot)\n\t\treturn false\n\t})\n\n\treturn m\n}\n\n\/\/ isPathPrefixOrEqual is an additional helper check to ensure that the literal\n\/\/ string prefix returned from a radix tree prefix match is also a tree match.\n\/\/\n\/\/ The radix tree gets it mostly right, but we have to guard against\n\/\/ possibilities like this:\n\/\/\n\/\/ github.com\/sdboyer\/foo\n\/\/ github.com\/sdboyer\/foobar\/baz\n\/\/\n\/\/ The latter would incorrectly be conflated with the former. As we know we're\n\/\/ operating on strings that describe paths, guard against this case by\n\/\/ verifying that either the input is the same length as the match (in which\n\/\/ case we know they're equal), or that the next character is a \"\/\".\nfunc isPathPrefixOrEqual(pre, path string) bool {\n\tprflen, pathlen := len(pre), len(path)\n\tif pathlen == prflen+1 {\n\t\t\/\/ this can never be the case\n\t\treturn false\n\t}\n\n\t\/\/ we assume something else (a trie) has done equality check up to the point\n\t\/\/ of the prefix, so we just check len\n\treturn prflen == pathlen || strings.Index(path[prflen:], \"\/\") == 0\n}\n<commit_msg>Improve some var names and docs in typed_radix<commit_after>package gps\n\nimport (\n\t\"strings\"\n\n\t\"github.com\/armon\/go-radix\"\n)\n\n\/\/ Typed implementations of radix trees. These are just simple wrappers that let\n\/\/ us avoid having to type assert anywhere else, cleaning up other code a bit.\n\/\/\n\/\/ Some of the more annoying things to implement (like walks) aren't\n\/\/ implemented. They can be added if\/when we actually need them.\n\/\/\n\/\/ Oh generics, where art thou...\n\ntype deducerTrie struct {\n\tt *radix.Tree\n}\n\nfunc newDeducerTrie() deducerTrie {\n\treturn deducerTrie{\n\t\tt: radix.New(),\n\t}\n}\n\n\/\/ Delete is used to delete a key, returning the previous value and if it was deleted\nfunc (t deducerTrie) Delete(s string) (pathDeducer, bool) {\n\tif d, had := t.t.Delete(s); had {\n\t\treturn d.(pathDeducer), had\n\t}\n\treturn nil, false\n}\n\n\/\/ Get is used to lookup a specific key, returning the value and if it was found\nfunc (t deducerTrie) Get(s string) (pathDeducer, bool) {\n\tif d, has := t.t.Get(s); has {\n\t\treturn d.(pathDeducer), has\n\t}\n\treturn nil, false\n}\n\n\/\/ Insert is used to add a newentry or update an existing entry. Returns if updated.\nfunc (t deducerTrie) Insert(s string, d pathDeducer) (pathDeducer, bool) {\n\tif d2, had := t.t.Insert(s, d); had {\n\t\treturn d2.(pathDeducer), had\n\t}\n\treturn nil, false\n}\n\n\/\/ Len is used to return the number of elements in the tree\nfunc (t deducerTrie) Len() int {\n\treturn t.t.Len()\n}\n\n\/\/ LongestPrefix is like Get, but instead of an exact match, it will return the\n\/\/ longest prefix match.\nfunc (t deducerTrie) LongestPrefix(s string) (string, pathDeducer, bool) {\n\tif p, d, has := t.t.LongestPrefix(s); has {\n\t\treturn p, d.(pathDeducer), has\n\t}\n\treturn \"\", nil, false\n}\n\n\/\/ ToMap is used to walk the tree and convert it to a map.\nfunc (t deducerTrie) ToMap() map[string]pathDeducer {\n\tm := make(map[string]pathDeducer)\n\tt.t.Walk(func(s string, d interface{}) bool {\n\t\tm[s] = d.(pathDeducer)\n\t\treturn false\n\t})\n\n\treturn m\n}\n\ntype prTrie struct {\n\tt *radix.Tree\n}\n\nfunc newProjectRootTrie() prTrie {\n\treturn prTrie{\n\t\tt: radix.New(),\n\t}\n}\n\n\/\/ Delete is used to delete a key, returning the previous value and if it was deleted\nfunc (t prTrie) Delete(s string) (ProjectRoot, bool) {\n\tif pr, had := t.t.Delete(s); had {\n\t\treturn pr.(ProjectRoot), had\n\t}\n\treturn \"\", false\n}\n\n\/\/ Get is used to lookup a specific key, returning the value and if it was found\nfunc (t prTrie) Get(s string) (ProjectRoot, bool) {\n\tif pr, has := t.t.Get(s); has {\n\t\treturn pr.(ProjectRoot), has\n\t}\n\treturn \"\", false\n}\n\n\/\/ Insert is used to add a newentry or update an existing entry. Returns if updated.\nfunc (t prTrie) Insert(s string, pr ProjectRoot) (ProjectRoot, bool) {\n\tif pr2, had := t.t.Insert(s, pr); had {\n\t\treturn pr2.(ProjectRoot), had\n\t}\n\treturn \"\", false\n}\n\n\/\/ Len is used to return the number of elements in the tree\nfunc (t prTrie) Len() int {\n\treturn t.t.Len()\n}\n\n\/\/ LongestPrefix is like Get, but instead of an exact match, it will return the\n\/\/ longest prefix match.\nfunc (t prTrie) LongestPrefix(s string) (string, ProjectRoot, bool) {\n\tif p, pr, has := t.t.LongestPrefix(s); has && isPathPrefixOrEqual(p, s) {\n\t\treturn p, pr.(ProjectRoot), has\n\t}\n\treturn \"\", \"\", false\n}\n\n\/\/ ToMap is used to walk the tree and convert it to a map.\nfunc (t prTrie) ToMap() map[string]ProjectRoot {\n\tm := make(map[string]ProjectRoot)\n\tt.t.Walk(func(s string, pr interface{}) bool {\n\t\tm[s] = pr.(ProjectRoot)\n\t\treturn false\n\t})\n\n\treturn m\n}\n\n\/\/ isPathPrefixOrEqual is an additional helper check to ensure that the literal\n\/\/ string prefix returned from a radix tree prefix match is also a path tree\n\/\/ match.\n\/\/\n\/\/ The radix tree gets it mostly right, but we have to guard against\n\/\/ possibilities like this:\n\/\/\n\/\/ github.com\/sdboyer\/foo\n\/\/ github.com\/sdboyer\/foobar\/baz\n\/\/\n\/\/ The latter would incorrectly be conflated with the former. As we know we're\n\/\/ operating on strings that describe import paths, guard against this case by\n\/\/ verifying that either the input is the same length as the match (in which\n\/\/ case we know they're equal), or that the next character is a \"\/\". (Import\n\/\/ paths are defined to always use \"\/\", not the OS-specific path separator.)\nfunc isPathPrefixOrEqual(pre, path string) bool {\n\tprflen, pathlen := len(pre), len(path)\n\tif pathlen == prflen+1 {\n\t\t\/\/ this can never be the case\n\t\treturn false\n\t}\n\n\t\/\/ we assume something else (a trie) has done equality check up to the point\n\t\/\/ of the prefix, so we just check len\n\treturn prflen == pathlen || strings.Index(path[prflen:], \"\/\") == 0\n}\n<|endoftext|>"} {"text":"<commit_before>package types\n\nimport \"errors\"\n\ntype RoomDescription struct {\n\tVisibility Visibility `json:\"visibility\"`\n\tAlias *Alias `json:\"room_alias_name\"`\n\tName *string `json:\"name\"`\n\tTopic *string `json:\"topic\"`\n\tInvited []UserId `json:\"invite\"`\n}\n\ntype Visibility int\n\nconst (\n\tVisibilityPrivate Visibility = 0\n\tVisibilityPublic = 1\n)\n\ntype JoinRule int\n\nconst (\n\tJoinRuleNone JoinRule = 0\n\tJoinRulePublic = 1\n\tJoinRuleInvite = 2\n)\n\nfunc (v Visibility) ToJoinRule() JoinRule {\n\tif v == VisibilityPublic {\n\t\treturn JoinRulePublic\n\t} else {\n\t\treturn JoinRuleInvite\n\t}\n}\n\nfunc (v *Visibility) UnmarshalJSON(bytes []byte) error {\n\tstr := string(bytes)\n\tswitch str {\n\tcase \"\\\"private\\\"\":\n\t\t*v = VisibilityPrivate\n\t\treturn nil\n\tcase \"\\\"public\\\"\":\n\t\t*v = VisibilityPublic\n\t\treturn nil\n\t}\n\treturn errors.New(\"invalid visibility identifier: '\" + str + \"'\")\n}\n\nfunc (v Visibility) MarshalJSON() ([]byte, error) {\n\tswitch v {\n\tcase VisibilityPrivate:\n\t\treturn []byte(\"\\\"private\\\"\"), nil\n\tcase VisibilityPublic:\n\t\treturn []byte(\"\\\"public\\\"\"), nil\n\t}\n\treturn nil, errors.New(\"invalid visibility value: '\" + string(v) + \"'\")\n}\n\nfunc (v *JoinRule) UnmarshalJSON(bytes []byte) error {\n\tstr := string(bytes)\n\tswitch str {\n\tcase \"\\\"public\\\"\":\n\t\t*v = JoinRulePublic\n\t\treturn nil\n\tcase \"\\\"invite\\\"\":\n\t\t*v = JoinRuleInvite\n\t\treturn nil\n\t}\n\treturn errors.New(\"invalid join rule identifier: '\" + str + \"'\")\n}\n\nfunc (v JoinRule) MarshalJSON() ([]byte, error) {\n\tswitch v {\n\tcase JoinRuleNone:\n\t\treturn []byte(\"null\"), nil\n\tcase JoinRulePublic:\n\t\treturn []byte(\"\\\"public\\\"\"), nil\n\tcase JoinRuleInvite:\n\t\treturn []byte(\"\\\"invite\\\"\"), nil\n\t}\n\treturn nil, errors.New(\"invalid join rule value: '\" + string(v) + \"'\")\n}\n<commit_msg>types\/rooms: added membership type<commit_after>package types\n\nimport \"errors\"\n\ntype RoomDescription struct {\n\tVisibility Visibility `json:\"visibility\"`\n\tAlias *Alias `json:\"room_alias_name\"`\n\tName *string `json:\"name\"`\n\tTopic *string `json:\"topic\"`\n\tInvited []UserId `json:\"invite\"`\n}\n\ntype Visibility int\n\nconst (\n\tVisibilityPrivate Visibility = 0\n\tVisibilityPublic = 1\n)\n\ntype JoinRule int\n\nconst (\n\tJoinRuleNone JoinRule = 0\n\tJoinRulePublic = 1\n\tJoinRuleInvite = 2\n)\n\ntype Membership int\n\nconst (\n\tMembershipNone Membership = 0\n\tMembershipInvited = 1\n\tMembershipMember = 2\n\tMembershipKnocking = 3\n\tMembershipLeaving = 4\n\tMembershipBanned = 5\n)\n\nfunc (v Visibility) ToJoinRule() JoinRule {\n\tif v == VisibilityPublic {\n\t\treturn JoinRulePublic\n\t} else {\n\t\treturn JoinRuleInvite\n\t}\n}\n\nfunc (v *Visibility) UnmarshalJSON(bytes []byte) error {\n\tstr := string(bytes)\n\tswitch str {\n\tcase \"\\\"private\\\"\":\n\t\t*v = VisibilityPrivate\n\t\treturn nil\n\tcase \"\\\"public\\\"\":\n\t\t*v = VisibilityPublic\n\t\treturn nil\n\t}\n\treturn errors.New(\"invalid visibility identifier: '\" + str + \"'\")\n}\n\nfunc (v Visibility) MarshalJSON() ([]byte, error) {\n\tswitch v {\n\tcase VisibilityPrivate:\n\t\treturn []byte(\"\\\"private\\\"\"), nil\n\tcase VisibilityPublic:\n\t\treturn []byte(\"\\\"public\\\"\"), nil\n\t}\n\treturn nil, errors.New(\"invalid visibility value: '\" + string(v) + \"'\")\n}\n\nfunc (v *JoinRule) UnmarshalJSON(bytes []byte) error {\n\tstr := string(bytes)\n\tswitch str {\n\tcase \"\\\"public\\\"\":\n\t\t*v = JoinRulePublic\n\t\treturn nil\n\tcase \"\\\"invite\\\"\":\n\t\t*v = JoinRuleInvite\n\t\treturn nil\n\t}\n\treturn errors.New(\"invalid join rule identifier: '\" + str + \"'\")\n}\n\nfunc (v JoinRule) MarshalJSON() ([]byte, error) {\n\tswitch v {\n\tcase JoinRuleNone:\n\t\treturn []byte(\"null\"), nil\n\tcase JoinRulePublic:\n\t\treturn []byte(\"\\\"public\\\"\"), nil\n\tcase JoinRuleInvite:\n\t\treturn []byte(\"\\\"invite\\\"\"), nil\n\t}\n\treturn nil, errors.New(\"invalid join rule value: '\" + string(v) + \"'\")\n}\n\nfunc (v *Membership) UnmarshalJSON(bytes []byte) error {\n\tstr := string(bytes)\n\tswitch str {\n\tcase \"null\":\n\t\t*v = MembershipNone\n\t\treturn nil\n\tcase \"\\\"invite\\\"\":\n\t\t*v = MembershipInvited\n\t\treturn nil\n\tcase \"\\\"join\\\"\":\n\t\t*v = MembershipMember\n\t\treturn nil\n\tcase \"\\\"knock\\\"\":\n\t\t*v = MembershipKnocking\n\t\treturn nil\n\tcase \"\\\"leave\\\"\":\n\t\t*v = MembershipLeaving\n\t\treturn nil\n\tcase \"\\\"ban\\\"\":\n\t\t*v = MembershipBanned\n\t\treturn nil\n\t}\n\treturn errors.New(\"invalid membership identifier: '\" + str + \"'\")\n}\n\nfunc (v Membership) MarshalJSON() ([]byte, error) {\n\tswitch v {\n\tcase MembershipNone:\n\t\treturn []byte(\"null\"), nil\n\tcase MembershipInvited:\n\t\treturn []byte(\"\\\"invite\\\"\"), nil\n\tcase MembershipMember:\n\t\treturn []byte(\"\\\"join\\\"\"), nil\n\tcase MembershipKnocking:\n\t\treturn []byte(\"\\\"knock\\\"\"), nil\n\tcase MembershipLeaving:\n\t\treturn []byte(\"\\\"leave\\\"\"), nil\n\tcase MembershipBanned:\n\t\treturn []byte(\"\\\"ban\\\"\"), nil\n\t}\n\treturn nil, errors.New(\"invalid membership value: '\" + string(v) + \"'\")\n}\n<|endoftext|>"} {"text":"<commit_before>package types\n\nimport (\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/docker\/engine-api\/types\/container\"\n\t\"github.com\/docker\/engine-api\/types\/network\"\n\t\"github.com\/docker\/engine-api\/types\/registry\"\n\t\"github.com\/docker\/go-connections\/nat\"\n)\n\n\/\/ ContainerCreateResponse contains the information returned to a client on the\n\/\/ creation of a new container.\ntype ContainerCreateResponse struct {\n\t\/\/ ID is the ID of the created container.\n\tID string `json:\"Id\"`\n\n\t\/\/ Warnings are any warnings encountered during the creation of the container.\n\tWarnings []string `json:\"Warnings\"`\n}\n\n\/\/ ContainerExecCreateResponse contains response of Remote API:\n\/\/ POST \"\/containers\/{name:.*}\/exec\"\ntype ContainerExecCreateResponse struct {\n\t\/\/ ID is the exec ID.\n\tID string `json:\"Id\"`\n}\n\n\/\/ ContainerUpdateResponse contains response of Remote API:\n\/\/ POST \/containers\/{name:.*}\/update\ntype ContainerUpdateResponse struct {\n\t\/\/ Warnings are any warnings encountered during the updating of the container.\n\tWarnings []string `json:\"Warnings\"`\n}\n\n\/\/ AuthResponse contains response of Remote API:\n\/\/ POST \"\/auth\"\ntype AuthResponse struct {\n\t\/\/ Status is the authentication status\n\tStatus string `json:\"Status\"`\n}\n\n\/\/ ContainerWaitResponse contains response of Remote API:\n\/\/ POST \"\/containers\/\"+containerID+\"\/wait\"\ntype ContainerWaitResponse struct {\n\t\/\/ StatusCode is the status code of the wait job\n\tStatusCode int `json:\"StatusCode\"`\n}\n\n\/\/ ContainerCommitResponse contains response of Remote API:\n\/\/ POST \"\/commit?container=\"+containerID\ntype ContainerCommitResponse struct {\n\tID string `json:\"Id\"`\n}\n\n\/\/ ContainerChange contains response of Remote API:\n\/\/ GET \"\/containers\/{name:.*}\/changes\"\ntype ContainerChange struct {\n\tKind int\n\tPath string\n}\n\n\/\/ ImageHistory contains response of Remote API:\n\/\/ GET \"\/images\/{name:.*}\/history\"\ntype ImageHistory struct {\n\tID string `json:\"Id\"`\n\tCreated int64\n\tCreatedBy string\n\tTags []string\n\tSize int64\n\tComment string\n}\n\n\/\/ ImageDelete contains response of Remote API:\n\/\/ DELETE \"\/images\/{name:.*}\"\ntype ImageDelete struct {\n\tUntagged string `json:\",omitempty\"`\n\tDeleted string `json:\",omitempty\"`\n}\n\n\/\/ Image contains response of Remote API:\n\/\/ GET \"\/images\/json\"\ntype Image struct {\n\tID string `json:\"Id\"`\n\tParentID string `json:\"ParentId\"`\n\tRepoTags []string\n\tRepoDigests []string\n\tCreated int64\n\tSize int64\n\tVirtualSize int64\n\tLabels map[string]string\n}\n\n\/\/ GraphDriverData returns Image's graph driver config info\n\/\/ when calling inspect command\ntype GraphDriverData struct {\n\tName string\n\tData map[string]string\n}\n\n\/\/ ImageInspect contains response of Remote API:\n\/\/ GET \"\/images\/{name:.*}\/json\"\ntype ImageInspect struct {\n\tID string `json:\"Id\"`\n\tRepoTags []string\n\tRepoDigests []string\n\tParent string\n\tComment string\n\tCreated string\n\tContainer string\n\tContainerConfig *container.Config\n\tDockerVersion string\n\tAuthor string\n\tConfig *container.Config\n\tArchitecture string\n\tOs string\n\tSize int64\n\tVirtualSize int64\n\tGraphDriver GraphDriverData\n}\n\n\/\/ Port stores open ports info of container\n\/\/ e.g. {\"PrivatePort\": 8080, \"PublicPort\": 80, \"Type\": \"tcp\"}\ntype Port struct {\n\tIP string `json:\",omitempty\"`\n\tPrivatePort int\n\tPublicPort int `json:\",omitempty\"`\n\tType string\n}\n\n\/\/ Container contains response of Remote API:\n\/\/ GET \"\/containers\/json\"\ntype Container struct {\n\tID string `json:\"Id\"`\n\tNames []string\n\tImage string\n\tImageID string\n\tCommand string\n\tCreated int64\n\tPorts []Port\n\tSizeRw int64 `json:\",omitempty\"`\n\tSizeRootFs int64 `json:\",omitempty\"`\n\tLabels map[string]string\n\tState string\n\tStatus string\n\tHostConfig struct {\n\t\tNetworkMode string `json:\",omitempty\"`\n\t}\n\tNetworkSettings *SummaryNetworkSettings\n}\n\n\/\/ CopyConfig contains request body of Remote API:\n\/\/ POST \"\/containers\/\"+containerID+\"\/copy\"\ntype CopyConfig struct {\n\tResource string\n}\n\n\/\/ ContainerPathStat is used to encode the header from\n\/\/ GET \"\/containers\/{name:.*}\/archive\"\n\/\/ \"Name\" is the file or directory name.\ntype ContainerPathStat struct {\n\tName string `json:\"name\"`\n\tSize int64 `json:\"size\"`\n\tMode os.FileMode `json:\"mode\"`\n\tMtime time.Time `json:\"mtime\"`\n\tLinkTarget string `json:\"linkTarget\"`\n}\n\n\/\/ ContainerProcessList contains response of Remote API:\n\/\/ GET \"\/containers\/{name:.*}\/top\"\ntype ContainerProcessList struct {\n\tProcesses [][]string\n\tTitles []string\n}\n\n\/\/ Version contains response of Remote API:\n\/\/ GET \"\/version\"\ntype Version struct {\n\tVersion string\n\tAPIVersion string `json:\"ApiVersion\"`\n\tGitCommit string\n\tGoVersion string\n\tOs string\n\tArch string\n\tKernelVersion string `json:\",omitempty\"`\n\tExperimental bool `json:\",omitempty\"`\n\tBuildTime string `json:\",omitempty\"`\n}\n\n\/\/ Info contains response of Remote API:\n\/\/ GET \"\/info\"\ntype Info struct {\n\tID string\n\tContainers int\n\tContainersRunning int\n\tContainersPaused int\n\tContainersStopped int\n\tImages int\n\tDriver string\n\tDriverStatus [][2]string\n\tSystemStatus [][2]string\n\tPlugins PluginsInfo\n\tMemoryLimit bool\n\tSwapLimit bool\n\tCPUCfsPeriod bool `json:\"CpuCfsPeriod\"`\n\tCPUCfsQuota bool `json:\"CpuCfsQuota\"`\n\tCPUShares bool\n\tCPUSet bool\n\tIPv4Forwarding bool\n\tBridgeNfIptables bool\n\tBridgeNfIP6tables bool `json:\"BridgeNfIp6tables\"`\n\tDebug bool\n\tNFd int\n\tOomKillDisable bool\n\tNGoroutines int\n\tSystemTime string\n\tExecutionDriver string\n\tLoggingDriver string\n\tNEventsListener int\n\tKernelVersion string\n\tOperatingSystem string\n\tOSType string\n\tArchitecture string\n\tIndexServerAddress string\n\tRegistryConfig *registry.ServiceConfig\n\tNCPU int\n\tMemTotal int64\n\tDockerRootDir string\n\tHTTPProxy string `json:\"HttpProxy\"`\n\tHTTPSProxy string `json:\"HttpsProxy\"`\n\tNoProxy string\n\tName string\n\tLabels []string\n\tExperimentalBuild bool\n\tServerVersion string\n\tClusterStore string\n\tClusterAdvertise string\n}\n\n\/\/ PluginsInfo is a temp struct holding Plugins name\n\/\/ registered with docker daemon. It is used by Info struct\ntype PluginsInfo struct {\n\t\/\/ List of Volume plugins registered\n\tVolume []string\n\t\/\/ List of Network plugins registered\n\tNetwork []string\n\t\/\/ List of Authorization plugins registered\n\tAuthorization []string\n}\n\n\/\/ ExecStartCheck is a temp struct used by execStart\n\/\/ Config fields is part of ExecConfig in runconfig package\ntype ExecStartCheck struct {\n\t\/\/ ExecStart will first check if it's detached\n\tDetach bool\n\t\/\/ Check if there's a tty\n\tTty bool\n}\n\n\/\/ ContainerState stores container's running state\n\/\/ it's part of ContainerJSONBase and will return by \"inspect\" command\ntype ContainerState struct {\n\tStatus string\n\tRunning bool\n\tPaused bool\n\tRestarting bool\n\tOOMKilled bool\n\tDead bool\n\tPid int\n\tExitCode int\n\tError string\n\tStartedAt string\n\tFinishedAt string\n}\n\n\/\/ ContainerJSONBase contains response of Remote API:\n\/\/ GET \"\/containers\/{name:.*}\/json\"\ntype ContainerJSONBase struct {\n\tID string `json:\"Id\"`\n\tCreated string\n\tPath string\n\tArgs []string\n\tState *ContainerState\n\tImage string\n\tResolvConfPath string\n\tHostnamePath string\n\tHostsPath string\n\tLogPath string\n\tName string\n\tRestartCount int\n\tDriver string\n\tMountLabel string\n\tProcessLabel string\n\tAppArmorProfile string\n\tExecIDs []string\n\tHostConfig *container.HostConfig\n\tGraphDriver GraphDriverData\n\tSizeRw *int64 `json:\",omitempty\"`\n\tSizeRootFs *int64 `json:\",omitempty\"`\n}\n\n\/\/ ContainerJSON is newly used struct along with MountPoint\ntype ContainerJSON struct {\n\t*ContainerJSONBase\n\tMounts []MountPoint\n\tConfig *container.Config\n\tNetworkSettings *NetworkSettings\n}\n\n\/\/ NetworkSettings exposes the network settings in the api\ntype NetworkSettings struct {\n\tNetworkSettingsBase\n\tDefaultNetworkSettings\n\tNetworks map[string]*network.EndpointSettings\n}\n\n\/\/ SummaryNetworkSettings provides a summary of container's networks\n\/\/ in \/containers\/json\ntype SummaryNetworkSettings struct {\n\tNetworks map[string]*network.EndpointSettings\n}\n\n\/\/ NetworkSettingsBase holds basic information about networks\ntype NetworkSettingsBase struct {\n\tBridge string\n\tSandboxID string\n\tHairpinMode bool\n\tLinkLocalIPv6Address string\n\tLinkLocalIPv6PrefixLen int\n\tPorts nat.PortMap\n\tSandboxKey string\n\tSecondaryIPAddresses []network.Address\n\tSecondaryIPv6Addresses []network.Address\n}\n\n\/\/ DefaultNetworkSettings holds network information\n\/\/ during the 2 release deprecation period.\n\/\/ It will be removed in Docker 1.11.\ntype DefaultNetworkSettings struct {\n\tEndpointID string\n\tGateway string\n\tGlobalIPv6Address string\n\tGlobalIPv6PrefixLen int\n\tIPAddress string\n\tIPPrefixLen int\n\tIPv6Gateway string\n\tMacAddress string\n}\n\n\/\/ MountPoint represents a mount point configuration inside the container.\ntype MountPoint struct {\n\tName string `json:\",omitempty\"`\n\tSource string\n\tDestination string\n\tDriver string `json:\",omitempty\"`\n\tMode string\n\tRW bool\n\tPropagation string\n}\n\n\/\/ Volume represents the configuration of a volume for the remote API\ntype Volume struct {\n\tName string \/\/ Name is the name of the volume\n\tDriver string \/\/ Driver is the Driver name used to create the volume\n\tMountpoint string \/\/ Mountpoint is the location on disk of the volume\n}\n\n\/\/ VolumesListResponse contains the response for the remote API:\n\/\/ GET \"\/volumes\"\ntype VolumesListResponse struct {\n\tVolumes []*Volume \/\/ Volumes is the list of volumes being returned\n\tWarnings []string \/\/ Warnings is a list of warnings that occurred when getting the list from the volume drivers\n}\n\n\/\/ VolumeCreateRequest contains the response for the remote API:\n\/\/ POST \"\/volumes\/create\"\ntype VolumeCreateRequest struct {\n\tName string \/\/ Name is the requested name of the volume\n\tDriver string \/\/ Driver is the name of the driver that should be used to create the volume\n\tDriverOpts map[string]string \/\/ DriverOpts holds the driver specific options to use for when creating the volume.\n}\n\n\/\/ NetworkResource is the body of the \"get network\" http response message\ntype NetworkResource struct {\n\tName string\n\tID string `json:\"Id\"`\n\tScope string\n\tDriver string\n\tEnableIPv6 bool\n\tIPAM network.IPAM\n\tInternal bool\n\tContainers map[string]EndpointResource\n\tOptions map[string]string\n}\n\n\/\/ EndpointResource contains network resources allocated and used for a container in a network\ntype EndpointResource struct {\n\tName string\n\tEndpointID string\n\tMacAddress string\n\tIPv4Address string\n\tIPv6Address string\n}\n\n\/\/ NetworkCreate is the expected body of the \"create network\" http request message\ntype NetworkCreate struct {\n\tName string\n\tCheckDuplicate bool\n\tDriver string\n\tEnableIPv6 bool\n\tIPAM network.IPAM\n\tInternal bool\n\tOptions map[string]string\n}\n\n\/\/ NetworkCreateResponse is the response message sent by the server for network create call\ntype NetworkCreateResponse struct {\n\tID string `json:\"Id\"`\n\tWarning string\n}\n\n\/\/ NetworkConnect represents the data to be used to connect a container to the network\ntype NetworkConnect struct {\n\tContainer string\n\tEndpointConfig *network.EndpointSettings `json:\",omitempty\"`\n}\n\n\/\/ NetworkDisconnect represents the data to be used to disconnect a container from the network\ntype NetworkDisconnect struct {\n\tContainer string\n\tForce bool\n}\n<commit_msg>Expose container mounts in PS structure.<commit_after>package types\n\nimport (\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/docker\/engine-api\/types\/container\"\n\t\"github.com\/docker\/engine-api\/types\/network\"\n\t\"github.com\/docker\/engine-api\/types\/registry\"\n\t\"github.com\/docker\/go-connections\/nat\"\n)\n\n\/\/ ContainerCreateResponse contains the information returned to a client on the\n\/\/ creation of a new container.\ntype ContainerCreateResponse struct {\n\t\/\/ ID is the ID of the created container.\n\tID string `json:\"Id\"`\n\n\t\/\/ Warnings are any warnings encountered during the creation of the container.\n\tWarnings []string `json:\"Warnings\"`\n}\n\n\/\/ ContainerExecCreateResponse contains response of Remote API:\n\/\/ POST \"\/containers\/{name:.*}\/exec\"\ntype ContainerExecCreateResponse struct {\n\t\/\/ ID is the exec ID.\n\tID string `json:\"Id\"`\n}\n\n\/\/ ContainerUpdateResponse contains response of Remote API:\n\/\/ POST \/containers\/{name:.*}\/update\ntype ContainerUpdateResponse struct {\n\t\/\/ Warnings are any warnings encountered during the updating of the container.\n\tWarnings []string `json:\"Warnings\"`\n}\n\n\/\/ AuthResponse contains response of Remote API:\n\/\/ POST \"\/auth\"\ntype AuthResponse struct {\n\t\/\/ Status is the authentication status\n\tStatus string `json:\"Status\"`\n}\n\n\/\/ ContainerWaitResponse contains response of Remote API:\n\/\/ POST \"\/containers\/\"+containerID+\"\/wait\"\ntype ContainerWaitResponse struct {\n\t\/\/ StatusCode is the status code of the wait job\n\tStatusCode int `json:\"StatusCode\"`\n}\n\n\/\/ ContainerCommitResponse contains response of Remote API:\n\/\/ POST \"\/commit?container=\"+containerID\ntype ContainerCommitResponse struct {\n\tID string `json:\"Id\"`\n}\n\n\/\/ ContainerChange contains response of Remote API:\n\/\/ GET \"\/containers\/{name:.*}\/changes\"\ntype ContainerChange struct {\n\tKind int\n\tPath string\n}\n\n\/\/ ImageHistory contains response of Remote API:\n\/\/ GET \"\/images\/{name:.*}\/history\"\ntype ImageHistory struct {\n\tID string `json:\"Id\"`\n\tCreated int64\n\tCreatedBy string\n\tTags []string\n\tSize int64\n\tComment string\n}\n\n\/\/ ImageDelete contains response of Remote API:\n\/\/ DELETE \"\/images\/{name:.*}\"\ntype ImageDelete struct {\n\tUntagged string `json:\",omitempty\"`\n\tDeleted string `json:\",omitempty\"`\n}\n\n\/\/ Image contains response of Remote API:\n\/\/ GET \"\/images\/json\"\ntype Image struct {\n\tID string `json:\"Id\"`\n\tParentID string `json:\"ParentId\"`\n\tRepoTags []string\n\tRepoDigests []string\n\tCreated int64\n\tSize int64\n\tVirtualSize int64\n\tLabels map[string]string\n}\n\n\/\/ GraphDriverData returns Image's graph driver config info\n\/\/ when calling inspect command\ntype GraphDriverData struct {\n\tName string\n\tData map[string]string\n}\n\n\/\/ ImageInspect contains response of Remote API:\n\/\/ GET \"\/images\/{name:.*}\/json\"\ntype ImageInspect struct {\n\tID string `json:\"Id\"`\n\tRepoTags []string\n\tRepoDigests []string\n\tParent string\n\tComment string\n\tCreated string\n\tContainer string\n\tContainerConfig *container.Config\n\tDockerVersion string\n\tAuthor string\n\tConfig *container.Config\n\tArchitecture string\n\tOs string\n\tSize int64\n\tVirtualSize int64\n\tGraphDriver GraphDriverData\n}\n\n\/\/ Port stores open ports info of container\n\/\/ e.g. {\"PrivatePort\": 8080, \"PublicPort\": 80, \"Type\": \"tcp\"}\ntype Port struct {\n\tIP string `json:\",omitempty\"`\n\tPrivatePort int\n\tPublicPort int `json:\",omitempty\"`\n\tType string\n}\n\n\/\/ Container contains response of Remote API:\n\/\/ GET \"\/containers\/json\"\ntype Container struct {\n\tID string `json:\"Id\"`\n\tNames []string\n\tImage string\n\tImageID string\n\tCommand string\n\tCreated int64\n\tPorts []Port\n\tSizeRw int64 `json:\",omitempty\"`\n\tSizeRootFs int64 `json:\",omitempty\"`\n\tLabels map[string]string\n\tState string\n\tStatus string\n\tHostConfig struct {\n\t\tNetworkMode string `json:\",omitempty\"`\n\t}\n\tNetworkSettings *SummaryNetworkSettings\n\tMounts []MountPoint\n}\n\n\/\/ CopyConfig contains request body of Remote API:\n\/\/ POST \"\/containers\/\"+containerID+\"\/copy\"\ntype CopyConfig struct {\n\tResource string\n}\n\n\/\/ ContainerPathStat is used to encode the header from\n\/\/ GET \"\/containers\/{name:.*}\/archive\"\n\/\/ \"Name\" is the file or directory name.\ntype ContainerPathStat struct {\n\tName string `json:\"name\"`\n\tSize int64 `json:\"size\"`\n\tMode os.FileMode `json:\"mode\"`\n\tMtime time.Time `json:\"mtime\"`\n\tLinkTarget string `json:\"linkTarget\"`\n}\n\n\/\/ ContainerProcessList contains response of Remote API:\n\/\/ GET \"\/containers\/{name:.*}\/top\"\ntype ContainerProcessList struct {\n\tProcesses [][]string\n\tTitles []string\n}\n\n\/\/ Version contains response of Remote API:\n\/\/ GET \"\/version\"\ntype Version struct {\n\tVersion string\n\tAPIVersion string `json:\"ApiVersion\"`\n\tGitCommit string\n\tGoVersion string\n\tOs string\n\tArch string\n\tKernelVersion string `json:\",omitempty\"`\n\tExperimental bool `json:\",omitempty\"`\n\tBuildTime string `json:\",omitempty\"`\n}\n\n\/\/ Info contains response of Remote API:\n\/\/ GET \"\/info\"\ntype Info struct {\n\tID string\n\tContainers int\n\tContainersRunning int\n\tContainersPaused int\n\tContainersStopped int\n\tImages int\n\tDriver string\n\tDriverStatus [][2]string\n\tSystemStatus [][2]string\n\tPlugins PluginsInfo\n\tMemoryLimit bool\n\tSwapLimit bool\n\tCPUCfsPeriod bool `json:\"CpuCfsPeriod\"`\n\tCPUCfsQuota bool `json:\"CpuCfsQuota\"`\n\tCPUShares bool\n\tCPUSet bool\n\tIPv4Forwarding bool\n\tBridgeNfIptables bool\n\tBridgeNfIP6tables bool `json:\"BridgeNfIp6tables\"`\n\tDebug bool\n\tNFd int\n\tOomKillDisable bool\n\tNGoroutines int\n\tSystemTime string\n\tExecutionDriver string\n\tLoggingDriver string\n\tNEventsListener int\n\tKernelVersion string\n\tOperatingSystem string\n\tOSType string\n\tArchitecture string\n\tIndexServerAddress string\n\tRegistryConfig *registry.ServiceConfig\n\tNCPU int\n\tMemTotal int64\n\tDockerRootDir string\n\tHTTPProxy string `json:\"HttpProxy\"`\n\tHTTPSProxy string `json:\"HttpsProxy\"`\n\tNoProxy string\n\tName string\n\tLabels []string\n\tExperimentalBuild bool\n\tServerVersion string\n\tClusterStore string\n\tClusterAdvertise string\n}\n\n\/\/ PluginsInfo is a temp struct holding Plugins name\n\/\/ registered with docker daemon. It is used by Info struct\ntype PluginsInfo struct {\n\t\/\/ List of Volume plugins registered\n\tVolume []string\n\t\/\/ List of Network plugins registered\n\tNetwork []string\n\t\/\/ List of Authorization plugins registered\n\tAuthorization []string\n}\n\n\/\/ ExecStartCheck is a temp struct used by execStart\n\/\/ Config fields is part of ExecConfig in runconfig package\ntype ExecStartCheck struct {\n\t\/\/ ExecStart will first check if it's detached\n\tDetach bool\n\t\/\/ Check if there's a tty\n\tTty bool\n}\n\n\/\/ ContainerState stores container's running state\n\/\/ it's part of ContainerJSONBase and will return by \"inspect\" command\ntype ContainerState struct {\n\tStatus string\n\tRunning bool\n\tPaused bool\n\tRestarting bool\n\tOOMKilled bool\n\tDead bool\n\tPid int\n\tExitCode int\n\tError string\n\tStartedAt string\n\tFinishedAt string\n}\n\n\/\/ ContainerJSONBase contains response of Remote API:\n\/\/ GET \"\/containers\/{name:.*}\/json\"\ntype ContainerJSONBase struct {\n\tID string `json:\"Id\"`\n\tCreated string\n\tPath string\n\tArgs []string\n\tState *ContainerState\n\tImage string\n\tResolvConfPath string\n\tHostnamePath string\n\tHostsPath string\n\tLogPath string\n\tName string\n\tRestartCount int\n\tDriver string\n\tMountLabel string\n\tProcessLabel string\n\tAppArmorProfile string\n\tExecIDs []string\n\tHostConfig *container.HostConfig\n\tGraphDriver GraphDriverData\n\tSizeRw *int64 `json:\",omitempty\"`\n\tSizeRootFs *int64 `json:\",omitempty\"`\n}\n\n\/\/ ContainerJSON is newly used struct along with MountPoint\ntype ContainerJSON struct {\n\t*ContainerJSONBase\n\tMounts []MountPoint\n\tConfig *container.Config\n\tNetworkSettings *NetworkSettings\n}\n\n\/\/ NetworkSettings exposes the network settings in the api\ntype NetworkSettings struct {\n\tNetworkSettingsBase\n\tDefaultNetworkSettings\n\tNetworks map[string]*network.EndpointSettings\n}\n\n\/\/ SummaryNetworkSettings provides a summary of container's networks\n\/\/ in \/containers\/json\ntype SummaryNetworkSettings struct {\n\tNetworks map[string]*network.EndpointSettings\n}\n\n\/\/ NetworkSettingsBase holds basic information about networks\ntype NetworkSettingsBase struct {\n\tBridge string\n\tSandboxID string\n\tHairpinMode bool\n\tLinkLocalIPv6Address string\n\tLinkLocalIPv6PrefixLen int\n\tPorts nat.PortMap\n\tSandboxKey string\n\tSecondaryIPAddresses []network.Address\n\tSecondaryIPv6Addresses []network.Address\n}\n\n\/\/ DefaultNetworkSettings holds network information\n\/\/ during the 2 release deprecation period.\n\/\/ It will be removed in Docker 1.11.\ntype DefaultNetworkSettings struct {\n\tEndpointID string\n\tGateway string\n\tGlobalIPv6Address string\n\tGlobalIPv6PrefixLen int\n\tIPAddress string\n\tIPPrefixLen int\n\tIPv6Gateway string\n\tMacAddress string\n}\n\n\/\/ MountPoint represents a mount point configuration inside the container.\ntype MountPoint struct {\n\tName string `json:\",omitempty\"`\n\tSource string\n\tDestination string\n\tDriver string `json:\",omitempty\"`\n\tMode string\n\tRW bool\n\tPropagation string\n}\n\n\/\/ Volume represents the configuration of a volume for the remote API\ntype Volume struct {\n\tName string \/\/ Name is the name of the volume\n\tDriver string \/\/ Driver is the Driver name used to create the volume\n\tMountpoint string \/\/ Mountpoint is the location on disk of the volume\n}\n\n\/\/ VolumesListResponse contains the response for the remote API:\n\/\/ GET \"\/volumes\"\ntype VolumesListResponse struct {\n\tVolumes []*Volume \/\/ Volumes is the list of volumes being returned\n\tWarnings []string \/\/ Warnings is a list of warnings that occurred when getting the list from the volume drivers\n}\n\n\/\/ VolumeCreateRequest contains the response for the remote API:\n\/\/ POST \"\/volumes\/create\"\ntype VolumeCreateRequest struct {\n\tName string \/\/ Name is the requested name of the volume\n\tDriver string \/\/ Driver is the name of the driver that should be used to create the volume\n\tDriverOpts map[string]string \/\/ DriverOpts holds the driver specific options to use for when creating the volume.\n}\n\n\/\/ NetworkResource is the body of the \"get network\" http response message\ntype NetworkResource struct {\n\tName string\n\tID string `json:\"Id\"`\n\tScope string\n\tDriver string\n\tEnableIPv6 bool\n\tIPAM network.IPAM\n\tInternal bool\n\tContainers map[string]EndpointResource\n\tOptions map[string]string\n}\n\n\/\/ EndpointResource contains network resources allocated and used for a container in a network\ntype EndpointResource struct {\n\tName string\n\tEndpointID string\n\tMacAddress string\n\tIPv4Address string\n\tIPv6Address string\n}\n\n\/\/ NetworkCreate is the expected body of the \"create network\" http request message\ntype NetworkCreate struct {\n\tName string\n\tCheckDuplicate bool\n\tDriver string\n\tEnableIPv6 bool\n\tIPAM network.IPAM\n\tInternal bool\n\tOptions map[string]string\n}\n\n\/\/ NetworkCreateResponse is the response message sent by the server for network create call\ntype NetworkCreateResponse struct {\n\tID string `json:\"Id\"`\n\tWarning string\n}\n\n\/\/ NetworkConnect represents the data to be used to connect a container to the network\ntype NetworkConnect struct {\n\tContainer string\n\tEndpointConfig *network.EndpointSettings `json:\",omitempty\"`\n}\n\n\/\/ NetworkDisconnect represents the data to be used to disconnect a container from the network\ntype NetworkDisconnect struct {\n\tContainer string\n\tForce bool\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage portable\n\nimport (\n\t\"image\"\n\t\"image\/color\"\n\t\"math\"\n)\n\nfunc bilinear(src image.Image, x, y float32) color.Color {\n\tswitch src := src.(type) {\n\tcase *image.RGBA:\n\t\treturn bilinearRGBA(src, x, y)\n\tcase *image.Alpha:\n\t\treturn bilinearAlpha(src, x, y)\n\tdefault:\n\t\treturn bilinearGeneral(src, x, y)\n\t}\n}\n\nfunc bilinearGeneral(src image.Image, x, y float32) color.RGBA64 {\n\tp := findLinearSrc(src.Bounds(), x, y)\n\n\tr00, g00, b00, a00 := src.At(p.low.X, p.low.Y).RGBA()\n\tr01, g01, b01, a01 := src.At(p.high.X, p.low.Y).RGBA()\n\tr10, g10, b10, a10 := src.At(p.low.X, p.high.Y).RGBA()\n\tr11, g11, b11, a11 := src.At(p.high.X, p.high.Y).RGBA()\n\n\tfr := float32(r00) * p.frac00\n\tfg := float32(g00) * p.frac00\n\tfb := float32(b00) * p.frac00\n\tfa := float32(a00) * p.frac00\n\n\tfr += float32(r01) * p.frac01\n\tfg += float32(g01) * p.frac01\n\tfb += float32(b01) * p.frac01\n\tfa += float32(a01) * p.frac01\n\n\tfr += float32(r10) * p.frac10\n\tfg += float32(g10) * p.frac10\n\tfb += float32(b10) * p.frac10\n\tfa += float32(a10) * p.frac10\n\n\tfr += float32(r11) * p.frac11\n\tfg += float32(g11) * p.frac11\n\tfb += float32(b11) * p.frac11\n\tfa += float32(a11) * p.frac11\n\n\treturn color.RGBA64{\n\t\tR: uint16(fr + 0.5),\n\t\tG: uint16(fg + 0.5),\n\t\tB: uint16(fb + 0.5),\n\t\tA: uint16(fa + 0.5),\n\t}\n}\n\nfunc bilinearRGBA(src *image.RGBA, x, y float32) color.RGBA {\n\tp := findLinearSrc(src.Bounds(), x, y)\n\n\t\/\/ Slice offsets for the surrounding pixels.\n\toff00 := src.PixOffset(p.low.X, p.low.Y)\n\toff01 := src.PixOffset(p.high.X, p.low.Y)\n\toff10 := src.PixOffset(p.low.X, p.high.Y)\n\toff11 := src.PixOffset(p.high.X, p.high.Y)\n\n\tfr := float32(src.Pix[off00+0]) * p.frac00\n\tfg := float32(src.Pix[off00+1]) * p.frac00\n\tfb := float32(src.Pix[off00+2]) * p.frac00\n\tfa := float32(src.Pix[off00+3]) * p.frac00\n\n\tfr += float32(src.Pix[off01+0]) * p.frac01\n\tfg += float32(src.Pix[off01+1]) * p.frac01\n\tfb += float32(src.Pix[off01+2]) * p.frac01\n\tfa += float32(src.Pix[off01+3]) * p.frac01\n\n\tfr += float32(src.Pix[off10+0]) * p.frac10\n\tfg += float32(src.Pix[off10+1]) * p.frac10\n\tfb += float32(src.Pix[off10+2]) * p.frac10\n\tfa += float32(src.Pix[off10+3]) * p.frac10\n\n\tfr += float32(src.Pix[off11+0]) * p.frac11\n\tfg += float32(src.Pix[off11+1]) * p.frac11\n\tfb += float32(src.Pix[off11+2]) * p.frac11\n\tfa += float32(src.Pix[off11+3]) * p.frac11\n\n\treturn color.RGBA{\n\t\tR: uint8(fr + 0.5),\n\t\tG: uint8(fg + 0.5),\n\t\tB: uint8(fb + 0.5),\n\t\tA: uint8(fa + 0.5),\n\t}\n}\n\nfunc bilinearAlpha(src *image.Alpha, x, y float32) color.Alpha {\n\tp := findLinearSrc(src.Bounds(), x, y)\n\n\t\/\/ Slice offsets for the surrounding pixels.\n\toff00 := src.PixOffset(p.low.X, p.low.Y)\n\toff01 := src.PixOffset(p.high.X, p.low.Y)\n\toff10 := src.PixOffset(p.low.X, p.high.Y)\n\toff11 := src.PixOffset(p.high.X, p.high.Y)\n\n\tfa := float32(src.Pix[off00]) * p.frac00\n\tfa += float32(src.Pix[off01]) * p.frac01\n\tfa += float32(src.Pix[off10]) * p.frac10\n\tfa += float32(src.Pix[off11]) * p.frac11\n\n\treturn color.Alpha{A: uint8(fa + 0.5)}\n}\n\ntype bilinearSrc struct {\n\t\/\/ Top-left and bottom-right interpolation sources\n\tlow, high image.Point\n\t\/\/ Fraction of each pixel to take. The 0 suffix indicates\n\t\/\/ top\/left, and the 1 suffix indicates bottom\/right.\n\tfrac00, frac01, frac10, frac11 float32\n}\n\nfunc floor(x float32) float32 { return float32(math.Floor(float64(x))) }\nfunc ceil(x float32) float32 { return float32(math.Ceil(float64(x))) }\n\nfunc findLinearSrc(b image.Rectangle, sx, sy float32) bilinearSrc {\n\tmaxX := float32(b.Max.X)\n\tmaxY := float32(b.Max.Y)\n\tminX := float32(b.Min.X)\n\tminY := float32(b.Min.Y)\n\tlowX := floor(sx - 0.5)\n\tlowY := floor(sy - 0.5)\n\tif lowX < minX {\n\t\tlowX = minX\n\t}\n\tif lowY < minY {\n\t\tlowY = minY\n\t}\n\n\thighX := ceil(sx - 0.5)\n\thighY := ceil(sy - 0.5)\n\tif highX >= maxX {\n\t\thighX = maxX - 1\n\t}\n\tif highY >= maxY {\n\t\thighY = maxY - 1\n\t}\n\n\t\/\/ In the variables below, the 0 suffix indicates top\/left, and the\n\t\/\/ 1 suffix indicates bottom\/right.\n\n\t\/\/ Center of each surrounding pixel.\n\tx00 := lowX + 0.5\n\ty00 := lowY + 0.5\n\tx01 := highX + 0.5\n\ty01 := lowY + 0.5\n\tx10 := lowX + 0.5\n\ty10 := highY + 0.5\n\tx11 := highX + 0.5\n\ty11 := highY + 0.5\n\n\tp := bilinearSrc{\n\t\tlow: image.Pt(int(lowX), int(lowY)),\n\t\thigh: image.Pt(int(highX), int(highY)),\n\t}\n\n\t\/\/ Literally, edge cases. If we are close enough to the edge of\n\t\/\/ the image, curtail the interpolation sources.\n\tif lowX == highX && lowY == highY {\n\t\tp.frac00 = 1.0\n\t} else if sy-minY <= 0.5 && sx-minX <= 0.5 {\n\t\tp.frac00 = 1.0\n\t} else if maxY-sy <= 0.5 && maxX-sx <= 0.5 {\n\t\tp.frac11 = 1.0\n\t} else if sy-minY <= 0.5 || lowY == highY {\n\t\tp.frac00 = x01 - sx\n\t\tp.frac01 = sx - x00\n\t} else if sx-minX <= 0.5 || lowX == highX {\n\t\tp.frac00 = y10 - sy\n\t\tp.frac10 = sy - y00\n\t} else if maxY-sy <= 0.5 {\n\t\tp.frac10 = x11 - sx\n\t\tp.frac11 = sx - x10\n\t} else if maxX-sx <= 0.5 {\n\t\tp.frac01 = y11 - sy\n\t\tp.frac11 = sy - y01\n\t} else {\n\t\tp.frac00 = (x01 - sx) * (y10 - sy)\n\t\tp.frac01 = (sx - x00) * (y11 - sy)\n\t\tp.frac10 = (x11 - sx) * (sy - y00)\n\t\tp.frac11 = (sx - x10) * (sy - y01)\n\t}\n\n\treturn p\n}\n<commit_msg>go.mobile\/sprite\/portable: bilinear fast path for *image.Uniform<commit_after>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage portable\n\nimport (\n\t\"image\"\n\t\"image\/color\"\n\t\"math\"\n)\n\nfunc bilinear(src image.Image, x, y float32) color.Color {\n\tswitch src := src.(type) {\n\tcase *image.RGBA:\n\t\treturn bilinearRGBA(src, x, y)\n\tcase *image.Alpha:\n\t\treturn bilinearAlpha(src, x, y)\n\tcase *image.Uniform:\n\t\treturn src.C\n\tdefault:\n\t\treturn bilinearGeneral(src, x, y)\n\t}\n}\n\nfunc bilinearGeneral(src image.Image, x, y float32) color.RGBA64 {\n\tp := findLinearSrc(src.Bounds(), x, y)\n\n\tr00, g00, b00, a00 := src.At(p.low.X, p.low.Y).RGBA()\n\tr01, g01, b01, a01 := src.At(p.high.X, p.low.Y).RGBA()\n\tr10, g10, b10, a10 := src.At(p.low.X, p.high.Y).RGBA()\n\tr11, g11, b11, a11 := src.At(p.high.X, p.high.Y).RGBA()\n\n\tfr := float32(r00) * p.frac00\n\tfg := float32(g00) * p.frac00\n\tfb := float32(b00) * p.frac00\n\tfa := float32(a00) * p.frac00\n\n\tfr += float32(r01) * p.frac01\n\tfg += float32(g01) * p.frac01\n\tfb += float32(b01) * p.frac01\n\tfa += float32(a01) * p.frac01\n\n\tfr += float32(r10) * p.frac10\n\tfg += float32(g10) * p.frac10\n\tfb += float32(b10) * p.frac10\n\tfa += float32(a10) * p.frac10\n\n\tfr += float32(r11) * p.frac11\n\tfg += float32(g11) * p.frac11\n\tfb += float32(b11) * p.frac11\n\tfa += float32(a11) * p.frac11\n\n\treturn color.RGBA64{\n\t\tR: uint16(fr + 0.5),\n\t\tG: uint16(fg + 0.5),\n\t\tB: uint16(fb + 0.5),\n\t\tA: uint16(fa + 0.5),\n\t}\n}\n\nfunc bilinearRGBA(src *image.RGBA, x, y float32) color.RGBA {\n\tp := findLinearSrc(src.Bounds(), x, y)\n\n\t\/\/ Slice offsets for the surrounding pixels.\n\toff00 := src.PixOffset(p.low.X, p.low.Y)\n\toff01 := src.PixOffset(p.high.X, p.low.Y)\n\toff10 := src.PixOffset(p.low.X, p.high.Y)\n\toff11 := src.PixOffset(p.high.X, p.high.Y)\n\n\tfr := float32(src.Pix[off00+0]) * p.frac00\n\tfg := float32(src.Pix[off00+1]) * p.frac00\n\tfb := float32(src.Pix[off00+2]) * p.frac00\n\tfa := float32(src.Pix[off00+3]) * p.frac00\n\n\tfr += float32(src.Pix[off01+0]) * p.frac01\n\tfg += float32(src.Pix[off01+1]) * p.frac01\n\tfb += float32(src.Pix[off01+2]) * p.frac01\n\tfa += float32(src.Pix[off01+3]) * p.frac01\n\n\tfr += float32(src.Pix[off10+0]) * p.frac10\n\tfg += float32(src.Pix[off10+1]) * p.frac10\n\tfb += float32(src.Pix[off10+2]) * p.frac10\n\tfa += float32(src.Pix[off10+3]) * p.frac10\n\n\tfr += float32(src.Pix[off11+0]) * p.frac11\n\tfg += float32(src.Pix[off11+1]) * p.frac11\n\tfb += float32(src.Pix[off11+2]) * p.frac11\n\tfa += float32(src.Pix[off11+3]) * p.frac11\n\n\treturn color.RGBA{\n\t\tR: uint8(fr + 0.5),\n\t\tG: uint8(fg + 0.5),\n\t\tB: uint8(fb + 0.5),\n\t\tA: uint8(fa + 0.5),\n\t}\n}\n\nfunc bilinearAlpha(src *image.Alpha, x, y float32) color.Alpha {\n\tp := findLinearSrc(src.Bounds(), x, y)\n\n\t\/\/ Slice offsets for the surrounding pixels.\n\toff00 := src.PixOffset(p.low.X, p.low.Y)\n\toff01 := src.PixOffset(p.high.X, p.low.Y)\n\toff10 := src.PixOffset(p.low.X, p.high.Y)\n\toff11 := src.PixOffset(p.high.X, p.high.Y)\n\n\tfa := float32(src.Pix[off00]) * p.frac00\n\tfa += float32(src.Pix[off01]) * p.frac01\n\tfa += float32(src.Pix[off10]) * p.frac10\n\tfa += float32(src.Pix[off11]) * p.frac11\n\n\treturn color.Alpha{A: uint8(fa + 0.5)}\n}\n\ntype bilinearSrc struct {\n\t\/\/ Top-left and bottom-right interpolation sources\n\tlow, high image.Point\n\t\/\/ Fraction of each pixel to take. The 0 suffix indicates\n\t\/\/ top\/left, and the 1 suffix indicates bottom\/right.\n\tfrac00, frac01, frac10, frac11 float32\n}\n\nfunc floor(x float32) float32 { return float32(math.Floor(float64(x))) }\nfunc ceil(x float32) float32 { return float32(math.Ceil(float64(x))) }\n\nfunc findLinearSrc(b image.Rectangle, sx, sy float32) bilinearSrc {\n\tmaxX := float32(b.Max.X)\n\tmaxY := float32(b.Max.Y)\n\tminX := float32(b.Min.X)\n\tminY := float32(b.Min.Y)\n\tlowX := floor(sx - 0.5)\n\tlowY := floor(sy - 0.5)\n\tif lowX < minX {\n\t\tlowX = minX\n\t}\n\tif lowY < minY {\n\t\tlowY = minY\n\t}\n\n\thighX := ceil(sx - 0.5)\n\thighY := ceil(sy - 0.5)\n\tif highX >= maxX {\n\t\thighX = maxX - 1\n\t}\n\tif highY >= maxY {\n\t\thighY = maxY - 1\n\t}\n\n\t\/\/ In the variables below, the 0 suffix indicates top\/left, and the\n\t\/\/ 1 suffix indicates bottom\/right.\n\n\t\/\/ Center of each surrounding pixel.\n\tx00 := lowX + 0.5\n\ty00 := lowY + 0.5\n\tx01 := highX + 0.5\n\ty01 := lowY + 0.5\n\tx10 := lowX + 0.5\n\ty10 := highY + 0.5\n\tx11 := highX + 0.5\n\ty11 := highY + 0.5\n\n\tp := bilinearSrc{\n\t\tlow: image.Pt(int(lowX), int(lowY)),\n\t\thigh: image.Pt(int(highX), int(highY)),\n\t}\n\n\t\/\/ Literally, edge cases. If we are close enough to the edge of\n\t\/\/ the image, curtail the interpolation sources.\n\tif lowX == highX && lowY == highY {\n\t\tp.frac00 = 1.0\n\t} else if sy-minY <= 0.5 && sx-minX <= 0.5 {\n\t\tp.frac00 = 1.0\n\t} else if maxY-sy <= 0.5 && maxX-sx <= 0.5 {\n\t\tp.frac11 = 1.0\n\t} else if sy-minY <= 0.5 || lowY == highY {\n\t\tp.frac00 = x01 - sx\n\t\tp.frac01 = sx - x00\n\t} else if sx-minX <= 0.5 || lowX == highX {\n\t\tp.frac00 = y10 - sy\n\t\tp.frac10 = sy - y00\n\t} else if maxY-sy <= 0.5 {\n\t\tp.frac10 = x11 - sx\n\t\tp.frac11 = sx - x10\n\t} else if maxX-sx <= 0.5 {\n\t\tp.frac01 = y11 - sy\n\t\tp.frac11 = sy - y01\n\t} else {\n\t\tp.frac00 = (x01 - sx) * (y10 - sy)\n\t\tp.frac01 = (sx - x00) * (y11 - sy)\n\t\tp.frac10 = (x11 - sx) * (sy - y00)\n\t\tp.frac11 = (sx - x10) * (sy - y01)\n\t}\n\n\treturn p\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/julienschmidt\/httprouter\"\n)\n\nvar videosPath string\nvar bindAddr string\nvar p Player\n\n\/\/ Page is the HTML page struct\ntype Page struct {\n\tTitle string\n}\n\n\/\/ Video struct contains has two fields:\n\/\/ the filename and base64 the hash of the filepath\ntype Video struct {\n\tFile string `json:\"file\"`\n\tHash string `json:\"hash\"`\n}\n\n\/\/ Index func that serves the HTML for the home page\nfunc Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tp := &Page{Title: \"go-omxremote\"}\n\ttmpl, err := FSString(false, \"\/views\/index.html\")\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t}\n\n\tt, _ := template.New(\"index\").Parse(tmpl)\n\tt.Execute(w, p)\n}\n\n\/\/ List function - outputs json with all video files in the videoPath\nfunc List(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tvar files []*Video\n\tvar root = videosPath\n\t_ = filepath.Walk(root, func(path string, f os.FileInfo, _ error) error {\n\t\tif f.IsDir() == false {\n\t\t\tif filepath.Ext(path) == \".mkv\" || filepath.Ext(path) == \".mp4\" || filepath.Ext(path) == \".avi\" {\n\t\t\t\tfiles = append(files, &Video{File: filepath.Base(path), Hash: base64.StdEncoding.EncodeToString([]byte(path))})\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tencoder := json.NewEncoder(w)\n\tencoder.Encode(files)\n}\n\n\/\/ Start playback http handler\nfunc Start(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tfilename, _ := base64.StdEncoding.DecodeString(ps.ByName(\"name\"))\n\tstringFilename := string(filename[:])\n\n\terr := p.Start(stringFilename)\n\tif err != nil {\n\t\tp.Playing = false\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tlog.Printf(\"Playing media file: %s\\n\", stringFilename)\n\tstartTime := time.Now()\n\terr = p.Command.Wait()\n\n\tlog.Printf(\"Stopped media file: %s after %s\\n\", stringFilename, time.Since(startTime))\n\tp.Playing = false\n\tw.WriteHeader(http.StatusOK)\n}\n\n\/\/ SendCommnd is the HTTP handler for sending a command to the player\nfunc SendCommand(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\terr := p.SendCommand(ps.ByName(\"command\"))\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc main() {\n\tflag.StringVar(&videosPath, \"media\", \".\", \"Path to look for videos in\")\n\tflag.StringVar(&bindAddr, \"bind\", \":31415\", \"Address to bind on.\")\n\tflag.Parse()\n\n\trouter := httprouter.New()\n\trouter.GET(\"\/\", Index)\n\trouter.GET(\"\/files\", List)\n\n\trouter.POST(\"\/start\/:name\", Start)\n\trouter.POST(\"\/file\/:name\/:command\", SendCommand)\n\n\trouter.ServeFiles(\"\/assets\/*filepath\", FS(false))\n\tlog.Fatal(http.ListenAndServe(bindAddr, router))\n}\n<commit_msg>Fix last golint warning<commit_after>package main\n\nimport (\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/julienschmidt\/httprouter\"\n)\n\nvar videosPath string\nvar bindAddr string\nvar p Player\n\n\/\/ Page is the HTML page struct\ntype Page struct {\n\tTitle string\n}\n\n\/\/ Video struct contains has two fields:\n\/\/ the filename and base64 the hash of the filepath\ntype Video struct {\n\tFile string `json:\"file\"`\n\tHash string `json:\"hash\"`\n}\n\n\/\/ Index func that serves the HTML for the home page\nfunc Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tp := &Page{Title: \"go-omxremote\"}\n\ttmpl, err := FSString(false, \"\/views\/index.html\")\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t}\n\n\tt, _ := template.New(\"index\").Parse(tmpl)\n\tt.Execute(w, p)\n}\n\n\/\/ List function - outputs json with all video files in the videoPath\nfunc List(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tvar files []*Video\n\tvar root = videosPath\n\t_ = filepath.Walk(root, func(path string, f os.FileInfo, _ error) error {\n\t\tif f.IsDir() == false {\n\t\t\tif filepath.Ext(path) == \".mkv\" || filepath.Ext(path) == \".mp4\" || filepath.Ext(path) == \".avi\" {\n\t\t\t\tfiles = append(files, &Video{File: filepath.Base(path), Hash: base64.StdEncoding.EncodeToString([]byte(path))})\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tencoder := json.NewEncoder(w)\n\tencoder.Encode(files)\n}\n\n\/\/ Start playback http handler\nfunc Start(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\tfilename, _ := base64.StdEncoding.DecodeString(ps.ByName(\"name\"))\n\tstringFilename := string(filename[:])\n\n\terr := p.Start(stringFilename)\n\tif err != nil {\n\t\tp.Playing = false\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tlog.Printf(\"Playing media file: %s\\n\", stringFilename)\n\tstartTime := time.Now()\n\terr = p.Command.Wait()\n\n\tlog.Printf(\"Stopped media file: %s after %s\\n\", stringFilename, time.Since(startTime))\n\tp.Playing = false\n\tw.WriteHeader(http.StatusOK)\n}\n\n\/\/ SendCommand is the HTTP handler for sending a command to the player\nfunc SendCommand(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\terr := p.SendCommand(ps.ByName(\"command\"))\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc main() {\n\tflag.StringVar(&videosPath, \"media\", \".\", \"Path to look for videos in\")\n\tflag.StringVar(&bindAddr, \"bind\", \":31415\", \"Address to bind on.\")\n\tflag.Parse()\n\n\trouter := httprouter.New()\n\trouter.GET(\"\/\", Index)\n\trouter.GET(\"\/files\", List)\n\n\trouter.POST(\"\/start\/:name\", Start)\n\trouter.POST(\"\/file\/:name\/:command\", SendCommand)\n\n\trouter.ServeFiles(\"\/assets\/*filepath\", FS(false))\n\tlog.Fatal(http.ListenAndServe(bindAddr, router))\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"image\"\n\t\"log\"\n\t\"rtsengine\"\n\t\"time\"\n)\n\n\/*\n Main entry point for the go rtsengine.\n*\/\n\ntype flags struct {\n\tport *int\n\thost *string\n\tverbose *bool\n\tquiet *bool\n\twidth *int\n\theight *int\n}\n\nfunc main() {\n\n\tvar cargs flags\n\n\tcargs.port = flag.Int(\"port\", 8080, \"port of rts server\")\n\tcargs.host = flag.String(\"host\", \"localhost\", \"hostname of rts server\")\n\tcargs.verbose = flag.Bool(\"verbose\", false, \"Emit excessive progress reporting during rts server execution .\")\n\tcargs.quiet = flag.Bool(\"quiet\", false, \"Silent testing.\")\n\tcargs.width = flag.Int(\"width\", 1000, \"Width of the world.\")\n\tcargs.height = flag.Int(\"height\", 1000, \"Height of the world.\")\n\n\t\/\/ Command line arguments parsinmg\n\tflag.Parse()\n\n\tif !*cargs.quiet {\n\t\tlog.Print(\"GO RTS Engine starting\")\n\t}\n\n\tgame, err := rtsengine.NewGame(\"Game Test\", 10000, 1, 0, 50, 50, *cargs.width, *cargs.height)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn\n\t}\n\n\t\/\/ Construct a fence with the pathing as a simple test.\n\tstart := time.Now()\n\tpathList, err := game.FindPath(&image.Point{10, 10}, &image.Point{45, 45})\n\telapsed := time.Since(start)\n\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn\n\t}\n\n\t\/*\n\t\tfor e := pathList.Front(); e != nil; e = e.Next() {\n\t\t\tsquare := e.Value.(*rtsengine.Square)\n\t\t\tfence := rtsengine.Fence{}\n\t\t\tfence.Initialize()\n\t\t\t_ = game.OurWorld.Grid.Set(&square.Locus, &fence)\n\t\t}\n\t*\/\n\n\tgame.FreeList(pathList)\n\tgame.ItemPool.PrintAllocatedWaypoints()\n\n\tlog.Printf(\"\\n\\nPathfinding took %s\\n\\n\", elapsed)\n\n\terr = game.AcceptNetConnections(*cargs.host, *cargs.port)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn\n\t}\n\n\tgame.Start()\n\n\tselect {} \/\/ wait forever without eating CPU.\n}\n<commit_msg>cleanup<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"image\"\n\t\"log\"\n\t\"rtsengine\"\n\t\"time\"\n)\n\n\/*\n Main entry point for the go rtsengine.\n*\/\n\ntype flags struct {\n\tport *int\n\thost *string\n\tverbose *bool\n\tquiet *bool\n\twidth *int\n\theight *int\n}\n\nfunc main() {\n\n\tvar cargs flags\n\n\tcargs.port = flag.Int(\"port\", 8080, \"port of rts server\")\n\tcargs.host = flag.String(\"host\", \"localhost\", \"hostname of rts server\")\n\tcargs.verbose = flag.Bool(\"verbose\", false, \"Emit excessive progress reporting during rts server execution .\")\n\tcargs.quiet = flag.Bool(\"quiet\", false, \"Silent testing.\")\n\tcargs.width = flag.Int(\"width\", 1000, \"Width of the world.\")\n\tcargs.height = flag.Int(\"height\", 1000, \"Height of the world.\")\n\n\t\/\/ Command line arguments parsinmg\n\tflag.Parse()\n\n\tif !*cargs.quiet {\n\t\tlog.Print(\"GO RTS Engine starting\")\n\t}\n\n\tgame, err := rtsengine.NewGame(\"Game Test\", 10000, 1, 0, 50, 50, *cargs.width, *cargs.height)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn\n\t}\n\n\t\/\/ Construct a fence with the pathing as a simple test.\n\tstart := time.Now()\n\tpathList, err := game.FindPath(&image.Point{10, 10}, &image.Point{45, 45})\n\telapsed := time.Since(start)\n\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn\n\t}\n\n\tgame.FreeList(pathList)\n\tgame.ItemPool.PrintAllocatedWaypoints()\n\n\tlog.Printf(\"\\n\\nPathfinding took %s\\n\\n\", elapsed)\n\n\terr = game.AcceptNetConnections(*cargs.host, *cargs.port)\n\tif err != nil {\n\t\tlog.Print(err)\n\t\treturn\n\t}\n\n\tgame.Start()\n\n\tselect {} \/\/ wait forever without eating CPU.\n}\n<|endoftext|>"} {"text":"<commit_before>package util\n\nimport (\n\t\"strings\"\n\t\"strconv\"\n\t\"time\"\n\t\"unicode\/utf8\"\n)\n\ntype Date struct{\n\tMonth string\n\tDay string\n\tTime string\n}\n\nvar months = [12]string{\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"}\n\n\/\/ Determines time layout for the following:\n\/\/ YYYY-MM-DD, MM-DD-YYYY, MM-DD-YY (the punctuation is variable)\n\/\/ Month and Day can be single digits\n\/\/ The punctuation (-,\/,_,etc) is determined by the second parameter\nfunc parseTimeLayout(s []string, p string) string {\n\tif len(s) == 3 {\n\t\tif utf8.RuneCountInString(s[0]) == 4 {\n\t\t\treturn \"2006\" + p + \"1\" + p + \"_2\"\n\t\t} else {\n\t\t\tif utf8.RuneCountInString(s[2]) == 4 {\n\t\t\t\treturn \"_2\" + p + \"1\" + p + \"2006\"\n\t\t\t} else if utf8.RuneCountInString(s[2])== 2 {\n\t\t\t\treturn \"_2\" + p + \"1\" + p + \"06\"\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ Returns time layout for time.Parse\nfunc returnTimeLayout(t string) string {\n\tsplits := strings.Split(t, \"\/\")\n\tlayout := parseTimeLayout(splits, \"\/\")\n\tif layout == \"\" {\n\t\tsplits = strings.Split(t, \"-\")\n\t\tlayout = parseTimeLayout(splits, \"-\")\n\t}\n\tif layout == \"\" {\n\t\treturn \"1-_2-2006\"\n\t} else {\n\t\treturn layout\n\t}\n}\n\n\/\/ Takes year\/month\/day (in a variety of formats) and HH:MM parameters\n\/\/ Checks if the time is valid and returns error if it is not\n\/\/ Parses in the location of Santiago\nfunc ReturnTime(d string, t string) (time.Time, error) {\n\tloc, err := time.LoadLocation(\"America\/Santiago\")\n\tif err != nil {\n\t\treturn time.Time{}, NewError(err, \"Internal server error\", 500)\n\t}\n\n\tlayout := returnTimeLayout(d)\n\tsplits := strings.Split(t, \":\")\n\tif len(splits) == 1 {\n\t\tif utf8.RuneCountInString(t) == 2 {\n\t\t\tlayout += \" 15\"\n\t\t} else {\n\t\t\tlayout += \" 1500\"\n\t\t}\n\t} else {\n\t\tlayout += \" 15:04\"\n\t}\n\n\ttimeVar, err := time.ParseInLocation(layout, d + \" \" + t, loc)\n\tif err != nil {\n\t\treturn time.Time{}, NewError(nil, \"Invalid time format\", 400)\n\t}\n\n\treturn timeVar, nil\n}\n\n\/\/ Normalizes time format to one of two layouts (machine or human readable)\n\/\/ Checks if the time is valid and returns error if it is not\n\/\/ Parses in the location of Santiago\nfunc ReturnTimeString(humanReadable bool, d string, t string) (string, string, error) {\n\tconst (\n\t\tlayoutHuman = \"2\/1\/2006\"\n\t\tlayoutMachine = \"2006-01-02\"\n\t)\n\tloc, err := time.LoadLocation(\"America\/Santiago\")\n\tif err != nil {\n\t\treturn \"\", \"\", NewError(err, \"Internal server error\", 500)\n\t}\n\n\tlayout := returnTimeLayout(d)\n\tsplits := strings.Split(t, \":\")\n\tif len(splits) == 1 {\n\t\tlayout += \" 15\"\n\t} else {\n\t\tlayout += \" 15:04\"\n\t}\n\ttimeVar, err := time.ParseInLocation(layout, d + \" \" + t, loc)\n\tif err != nil {\n\t\treturn \"\", \"\", NewError(nil, \"Invalid time format\", 400)\n\t}\n\tif humanReadable {\n\t\treturn timeVar.Format(layoutHuman), timeVar.Format(\"15:04\"), nil\n\t} else {\n\t\treturn timeVar.Format(layoutMachine), timeVar.Format(\"15:04\"), nil\n\t}\n}\n\nfunc ReturnCurrentTimeString(rounded bool) (string, string) {\n\tif rounded {\n\t\treturn time.Now().Local().Format(\"2006-01-02\"), time.Now().Local().Format(\"15\") + \":00\"\n\t} else {\n\t\treturn time.Now().Local().Format(\"2006-01-02\"), time.Now().Local().Format(\"15:04\")\n\t}\n}\n\n\/\/FORM yyyy-mm-dd hh:mm:ss Drop the seconds. Parse the rest.\nfunc PrettyDate(timestamp string, suffix bool) (Date, error) {\n\tvar splits []string = strings.Split(timestamp, \"\")\n\tvar date Date\n\tmonth := splits[5] + splits[6]\n\tm, err := strconv.ParseInt(month, 10, 8)\n\tif err != nil {\n\t\treturn date, NewError(err, \"Internal server error\", 500)\n\t}\n\tdate.Month = months[m-1]\n\tvar day string\n\tif splits[8]==\"0\" {\n\t\tday = splits[9]\n\t} else {\n\t\tday = splits[8] + splits[9]\n\t}\n\tif suffix {\n\t\tif splits[9]==\"1\" {\n\t\t\tday+=\"st\"\n\t\t} else if splits[9]==\"2\" {\n\t\t\tday+=\"nd\"\n\t\t} else if splits[9]==\"3\" {\n\t\t\tday+=\"rd\"\n\t\t} else{\n\t\t\tday+=\"th\"\n\t\t}\n\t}\n\tdate.Day = day\n\tdate.Time = splits[11] + splits[12] + \":\" + splits[14] + splits[15]\n\treturn date, nil\n}<commit_msg>-Fixed bug with new time input parameter<commit_after>package util\n\nimport (\n\t\"strings\"\n\t\"strconv\"\n\t\"time\"\n\t\"unicode\/utf8\"\n)\n\ntype Date struct{\n\tMonth string\n\tDay string\n\tTime string\n}\n\nvar months = [12]string{\"Jan\", \"Feb\", \"Mar\", \"Apr\", \"May\", \"Jun\", \"Jul\", \"Aug\", \"Sep\", \"Oct\", \"Nov\", \"Dec\"}\n\n\/\/ Determines time layout for the following:\n\/\/ YYYY-MM-DD, MM-DD-YYYY, MM-DD-YY (the punctuation is variable)\n\/\/ Month and Day can be single digits\n\/\/ The punctuation (-,\/,_,etc) is determined by the second parameter\nfunc parseTimeLayout(s []string, p string) string {\n\tif len(s) == 3 {\n\t\tif utf8.RuneCountInString(s[0]) == 4 {\n\t\t\treturn \"2006\" + p + \"1\" + p + \"_2\"\n\t\t} else {\n\t\t\tif utf8.RuneCountInString(s[2]) == 4 {\n\t\t\t\treturn \"_2\" + p + \"1\" + p + \"2006\"\n\t\t\t} else if utf8.RuneCountInString(s[2])== 2 {\n\t\t\t\treturn \"_2\" + p + \"1\" + p + \"06\"\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ Returns time layout for time.Parse\nfunc returnTimeLayout(t string) string {\n\tsplits := strings.Split(t, \"\/\")\n\tlayout := parseTimeLayout(splits, \"\/\")\n\tif layout == \"\" {\n\t\tsplits = strings.Split(t, \"-\")\n\t\tlayout = parseTimeLayout(splits, \"-\")\n\t}\n\tif layout == \"\" {\n\t\treturn \"1-_2-2006\"\n\t} else {\n\t\treturn layout\n\t}\n}\n\n\/\/ Takes year\/month\/day (in a variety of formats) and HH:MM parameters\n\/\/ Checks if the time is valid and returns error if it is not\n\/\/ Parses in the location of Santiago\nfunc ReturnTime(d string, t string) (time.Time, error) {\n\tloc, err := time.LoadLocation(\"America\/Santiago\")\n\tif err != nil {\n\t\treturn time.Time{}, NewError(err, \"Internal server error\", 500)\n\t}\n\n\tlayout := returnTimeLayout(d)\n\tsplits := strings.Split(t, \":\")\n\tif len(splits) == 1 {\n\t\tif utf8.RuneCountInString(t) == 2 {\n\t\t\tlayout += \" 15\"\n\t\t} else {\n\t\t\tlayout += \" 1504\"\n\t\t}\n\t} else {\n\t\tlayout += \" 15:04\"\n\t}\n\n\ttimeVar, err := time.ParseInLocation(layout, d + \" \" + t, loc)\n\tif err != nil {\n\t\treturn time.Time{}, NewError(nil, \"Invalid time format\", 400)\n\t}\n\n\treturn timeVar, nil\n}\n\n\/\/ Normalizes time format to one of two layouts (machine or human readable)\n\/\/ Checks if the time is valid and returns error if it is not\n\/\/ Parses in the location of Santiago\nfunc ReturnTimeString(humanReadable bool, d string, t string) (string, string, error) {\n\tconst (\n\t\tlayoutHuman = \"2\/1\/2006\"\n\t\tlayoutMachine = \"2006-01-02\"\n\t)\n\tloc, err := time.LoadLocation(\"America\/Santiago\")\n\tif err != nil {\n\t\treturn \"\", \"\", NewError(err, \"Internal server error\", 500)\n\t}\n\n\tlayout := returnTimeLayout(d)\n\tsplits := strings.Split(t, \":\")\n\tif len(splits) == 1 {\n\t\tlayout += \" 15\"\n\t} else {\n\t\tlayout += \" 15:04\"\n\t}\n\ttimeVar, err := time.ParseInLocation(layout, d + \" \" + t, loc)\n\tif err != nil {\n\t\treturn \"\", \"\", NewError(nil, \"Invalid time format\", 400)\n\t}\n\tif humanReadable {\n\t\treturn timeVar.Format(layoutHuman), timeVar.Format(\"15:04\"), nil\n\t} else {\n\t\treturn timeVar.Format(layoutMachine), timeVar.Format(\"15:04\"), nil\n\t}\n}\n\nfunc ReturnCurrentTimeString(rounded bool) (string, string) {\n\tif rounded {\n\t\treturn time.Now().Local().Format(\"2006-01-02\"), time.Now().Local().Format(\"15\") + \":00\"\n\t} else {\n\t\treturn time.Now().Local().Format(\"2006-01-02\"), time.Now().Local().Format(\"15:04\")\n\t}\n}\n\n\/\/FORM yyyy-mm-dd hh:mm:ss Drop the seconds. Parse the rest.\nfunc PrettyDate(timestamp string, suffix bool) (Date, error) {\n\tvar splits []string = strings.Split(timestamp, \"\")\n\tvar date Date\n\tmonth := splits[5] + splits[6]\n\tm, err := strconv.ParseInt(month, 10, 8)\n\tif err != nil {\n\t\treturn date, NewError(err, \"Internal server error\", 500)\n\t}\n\tdate.Month = months[m-1]\n\tvar day string\n\tif splits[8]==\"0\" {\n\t\tday = splits[9]\n\t} else {\n\t\tday = splits[8] + splits[9]\n\t}\n\tif suffix {\n\t\tif splits[9]==\"1\" {\n\t\t\tday+=\"st\"\n\t\t} else if splits[9]==\"2\" {\n\t\t\tday+=\"nd\"\n\t\t} else if splits[9]==\"3\" {\n\t\t\tday+=\"rd\"\n\t\t} else{\n\t\t\tday+=\"th\"\n\t\t}\n\t}\n\tdate.Day = day\n\tdate.Time = splits[11] + splits[12] + \":\" + splits[14] + splits[15]\n\treturn date, nil\n}<|endoftext|>"} {"text":"<commit_before>package goflake\n\nimport (\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestCustomTimestamp(t *testing.T) {\n\n\t\/\/ 2014-04-16 17:49:37 +0100 => 1397666977\n\ttm := time.Unix(1397666977, 0)\n\tnewT := customTimestamp(tm)\n\n\t\/\/ timestamp - epoch = adjusted time\n\t\/\/ 1397666977000 - 1325376000000 = 72290977000\n\tassert.Equal(t, newT, 72290977000, \"Times should match\")\n}\n\nfunc TestValidWorkerId(t *testing.T) {\n\tvalidIds := []uint32{0, 545, 1023}\n\tfor _, v := range validIds {\n\t\t_, err := NewGoFlake(v)\n\t\tassert.Equal(t, err, nil, \"Error should be nil\")\n\t}\n}\n\nfunc TestInvalidWorkerId(t *testing.T) {\n\tinvalidIds := []uint32{1024, 5841, 892347934}\n\tfor _, v := range invalidIds {\n\t\t_, err := NewGoFlake(v)\n\t\tassert.Equal(t, err, ErrInvalidWorkerId, \"Error should match\")\n\t}\n}\n<commit_msg>Add more test cases for time adjustment<commit_after>package goflake\n\nimport (\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestCustomTimestamp(t *testing.T) {\n\n\t\/\/ timestamp - epoch = adjusted time\n\ttestCases := []struct {\n\t\tts int64\n\t\tadjTs int64\n\t}{\n\t\t{1397666977000, 72290977000}, \/\/ Now\n\t\t{1397666978000, 72290978000}, \/\/ in 1 second\n\t\t{1395881056000, 70505056000}, \/\/ 3 weeks ago\n\t\t{1303001162000, -22374838000}, \/\/ 3 years ago\n\t\t{1492390054000, 167014054000}, \/\/ in 3 years\n\t\t{2344466898000, 1019090898000}, \/\/ in 30 years\n\t}\n\n\tfor _, tc := range testCases {\n\t\tadjTs := customTimestamp(time.Unix(tc.ts\/1000, 0))\n\t\tassert.Equal(t, adjTs, tc.adjTs, \"Times should match\")\n\t}\n}\n\nfunc TestValidWorkerId(t *testing.T) {\n\tvalidIds := []uint32{0, 545, 1023}\n\tfor _, v := range validIds {\n\t\t_, err := NewGoFlake(v)\n\t\tassert.Equal(t, err, nil, \"Error should be nil\")\n\t}\n}\n\nfunc TestInvalidWorkerId(t *testing.T) {\n\tinvalidIds := []uint32{1024, 5841, 892347934}\n\tfor _, v := range invalidIds {\n\t\t_, err := NewGoFlake(v)\n\t\tassert.Equal(t, err, ErrInvalidWorkerId, \"Error should match\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package gohinetradio is to get hichannel radio path and with token to play without flash.\npackage gohinetradio\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"runtime\"\n\t\"sync\"\n\t\"text\/tabwriter\"\n)\n\n\/\/ Base URL.\nconst (\n\tPLAYURL string = \"http:\/\/hichannel.hinet.net\/radio\/play.do?id=%s\"\n\tLISTURL string = \"http:\/\/hichannel.hinet.net\/radio\/channelList.do?radioType=&freqType=&freq=&area=&pN=%s\"\n\tLISTPAGE int = 4\n)\n\n\/\/ RadioData is the json of `http:\/\/hichannel.hinet.net\/radio\/play.do?id=232`\ntype RadioData struct {\n\tChannelTitle string `json:\"channel_title\"`\n\tPlayRadio string `json:\"playRadio\"`\n\tProgramName string `json:\"programName\"`\n\tChannelCollect bool `json:\"channel_collect\"`\n}\n\n\/\/ GetURL is getting radio channel url with token.\nfunc GetURL(No string) (r RadioData) {\n\tresp, _ := http.Get(fmt.Sprintf(PLAYURL, No))\n\tdefer resp.Body.Close()\n\tdata, _ := ioutil.ReadAll(resp.Body)\n\tjsonData := json.NewDecoder(bytes.NewReader(data))\n\tjsonData.Decode(&r)\n\treturn\n}\n\n\/\/ RadioListData is the json of `http:\/\/hichannel.hinet.net\/radio\/channelList.do?radioType=&freqType=&freq=&area=&pN=1`\ntype RadioListData struct {\n\tPageNo int `json:\"pageNo\"`\n\tPageSize int `json:\"pageSize\"`\n\tList []RadioListDatas `json:\"list\"`\n}\n\n\/\/RadioListDatas is RadioListData.List type.\ntype RadioListDatas struct {\n\tChannelImage string `json:\"channel_image\"`\n\tChannelTitle string `json:\"channel_title\"`\n\tRadioType string `json:\"radio_type\"`\n\tIsChannel bool `json:\"isChannel\"`\n\tProgramName string `json:\"program_name\"`\n\tChannelID string `json:\"channel_id\"`\n}\n\nfunc getRadioPageList(page int) (r RadioListData) {\n\tresp, _ := http.Get(fmt.Sprintf(LISTURL, page))\n\tdefer resp.Body.Close()\n\tdata, _ := ioutil.ReadAll(resp.Body)\n\tjsonData := json.NewDecoder(bytes.NewReader(data))\n\tjsonData.Decode(&r)\n\treturn\n}\n\n\/\/ GetRadioList is getting all channel list.\nfunc GetRadioList(total int) (result []RadioListDatas) {\n\tqueue := make(chan RadioListData)\n\tvar wg sync.WaitGroup\n\twg.Add(int(LISTPAGE))\n\tfor i := 1; i <= total; i++ {\n\t\tgo func(i int) {\n\t\t\tdefer wg.Done()\n\t\t\truntime.Gosched()\n\t\t\tqueue <- getRadioPageList(i)\n\t\t}(i)\n\t}\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor v := range queue {\n\t\t\tfor _, data := range v.List {\n\t\t\t\tresult = append(result, data)\n\t\t\t}\n\t\t}\n\t}()\n\twg.Wait()\n\treturn\n}\n\n\/\/ GenList is to output table list.\nfunc GenList() {\n\tw := new(tabwriter.Writer)\n\tw.Init(os.Stdout, 0, 8, 0, '\\t', 0)\n\tvar output string\n\tfor no, data := range GetRadioList(LISTPAGE) {\n\t\toutput += fmt.Sprintf(\"%d. [%s] %s\\t\", no+1, data.ChannelID, data.ChannelTitle)\n\t\tif (no+1)%3 == 0 {\n\t\t\tfmt.Fprintln(w, output)\n\t\t\toutput = \"\"\n\t\t}\n\t}\n\tfmt.Fprintln(w, output)\n\tw.Flush()\n}\n\n\/\/ PrintChannel is my fav channel XD.\nfunc PrintChannel() {\n\tfmt.Println(\"[207] 中廣新聞網\")\n\tfmt.Println(\"[205] 中廣流行網 i like\")\n\tfmt.Println(\"[206] 中廣音樂網i radio\")\n\tfmt.Println(\"[232] 飛碟電台\")\n\tfmt.Println(\"[222] HitFm聯播網 Taipei 北部\")\n\tfmt.Println(\"[156] KISS RADIO 大眾廣播電台\")\n\tfmt.Println(\"[308] KISS RADIO 網路音樂台\")\n\tfmt.Println(\"[187] NEWS98新聞網\")\n\tfmt.Println(\"[370] POP Radio 台北流行廣播電台\")\n}\n<commit_msg>Tiny changed.<commit_after>\/\/ Package gohinetradio is to get hichannel radio path and with token to play without flash.\npackage gohinetradio\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"runtime\"\n\t\"sync\"\n\t\"text\/tabwriter\"\n)\n\n\/\/ Base URL.\nconst (\n\tPLAYURL string = \"http:\/\/hichannel.hinet.net\/radio\/play.do?id=%s\"\n\tLISTURL string = \"http:\/\/hichannel.hinet.net\/radio\/channelList.do?radioType=&freqType=&freq=&area=&pN=%s\"\n\tLISTPAGE int = 4\n)\n\n\/\/ RadioData is the json of `http:\/\/hichannel.hinet.net\/radio\/play.do?id=232`\ntype RadioData struct {\n\tChannelTitle string `json:\"channel_title\"`\n\tPlayRadio string `json:\"playRadio\"`\n\tProgramName string `json:\"programName\"`\n\tChannelCollect bool `json:\"channel_collect\"`\n}\n\n\/\/ GetURL is getting radio channel url with token.\nfunc GetURL(No string) (r RadioData) {\n\tresp, _ := http.Get(fmt.Sprintf(PLAYURL, No))\n\tdefer resp.Body.Close()\n\tdata, _ := ioutil.ReadAll(resp.Body)\n\tjsonData := json.NewDecoder(bytes.NewReader(data))\n\tjsonData.Decode(&r)\n\treturn\n}\n\n\/\/ RadioListData is the json of `http:\/\/hichannel.hinet.net\/radio\/channelList.do?radioType=&freqType=&freq=&area=&pN=1`\ntype RadioListData struct {\n\tPageNo int `json:\"pageNo\"`\n\tPageSize int `json:\"pageSize\"`\n\tList []RadioListDatas `json:\"list\"`\n}\n\n\/\/RadioListDatas is RadioListData.List type.\ntype RadioListDatas struct {\n\tChannelImage string `json:\"channel_image\"`\n\tChannelTitle string `json:\"channel_title\"`\n\tRadioType string `json:\"radio_type\"`\n\tIsChannel bool `json:\"isChannel\"`\n\tProgramName string `json:\"program_name\"`\n\tChannelID string `json:\"channel_id\"`\n}\n\nfunc getRadioPageList(page int) (r RadioListData) {\n\tresp, _ := http.Get(fmt.Sprintf(LISTURL, page))\n\tdefer resp.Body.Close()\n\tdata, _ := ioutil.ReadAll(resp.Body)\n\tjsonData := json.NewDecoder(bytes.NewReader(data))\n\tjsonData.Decode(&r)\n\treturn\n}\n\n\/\/ GetRadioList is getting all channel list.\nfunc GetRadioList(total int) (r []RadioListDatas) {\n\tqueue := make(chan RadioListData)\n\tvar wg sync.WaitGroup\n\twg.Add(int(LISTPAGE))\n\tfor i := 1; i <= total; i++ {\n\t\tgo func(i int) {\n\t\t\tdefer wg.Done()\n\t\t\truntime.Gosched()\n\t\t\tqueue <- getRadioPageList(i)\n\t\t}(i)\n\t}\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tfor v := range queue {\n\t\t\tfor _, data := range v.List {\n\t\t\t\tr = append(r, data)\n\t\t\t}\n\t\t}\n\t}()\n\twg.Wait()\n\treturn\n}\n\n\/\/ GenList is to output table list.\nfunc GenList() {\n\tw := new(tabwriter.Writer)\n\tw.Init(os.Stdout, 0, 8, 0, '\\t', 0)\n\tvar output string\n\tfor no, data := range GetRadioList(LISTPAGE) {\n\t\toutput += fmt.Sprintf(\"%d. [%s] %s\\t\", no+1, data.ChannelID, data.ChannelTitle)\n\t\tif (no+1)%3 == 0 {\n\t\t\tfmt.Fprintln(w, output)\n\t\t\toutput = \"\"\n\t\t}\n\t}\n\tfmt.Fprintln(w, output)\n\tw.Flush()\n}\n\n\/\/ PrintChannel is my fav channel XD.\nfunc PrintChannel() {\n\tfmt.Println(\"[207] 中廣新聞網\")\n\tfmt.Println(\"[205] 中廣流行網 i like\")\n\tfmt.Println(\"[206] 中廣音樂網i radio\")\n\tfmt.Println(\"[232] 飛碟電台\")\n\tfmt.Println(\"[222] HitFm聯播網 Taipei 北部\")\n\tfmt.Println(\"[156] KISS RADIO 大眾廣播電台\")\n\tfmt.Println(\"[308] KISS RADIO 網路音樂台\")\n\tfmt.Println(\"[187] NEWS98新聞網\")\n\tfmt.Println(\"[370] POP Radio 台北流行廣播電台\")\n}\n<|endoftext|>"} {"text":"<commit_before>package goinsta\n\nimport (\n\t\"encoding\/json\"\n\t\"os\"\n\t\"strconv\"\n\t\"testing\"\n)\n\nvar (\n\tusername string\n\tpassword string\n\tpullrequest bool\n\tinsta *Instagram\n\tskip bool\n)\n\nfunc TestHandlesNonExistingItems(t *testing.T) {\n\tusername = os.Getenv(\"INSTA_USERNAME\")\n\tpassword = os.Getenv(\"INSTA_PASSWORD\")\n\tpullrequest = os.Getenv(\"INSTA_PULL\") == \"true\"\n\n\tt.Log(\"Pull Request\", pullrequest)\n\n\tif len(username)*len(password) == 0 && !pullrequest {\n\t\tskip = true\n\t\tt.Fatal(\"Username or Password is empty\")\n\t}\n\tskip = pullrequest\n}\n\nfunc TestDeviceID(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\tinsta = New(username, password)\n\tt.Log(insta.Informations.DeviceID)\n}\n\nfunc TestLogin(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\terr := insta.Login()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\tt.Log(\"status : ok\")\n}\n\nfunc TestUserFollowings(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\tresp, err := insta.UserFollowing(insta.Informations.UsernameId, \"\")\n\tif err != nil {\n\t\tt.Log(insta.GetLastJson())\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\tt.Log(resp.Status)\n}\n\nfunc TestUserFollowers(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\tresp, err := insta.UserFollowers(insta.Informations.UsernameId, \"\")\n\tif err != nil {\n\t\tt.Log(insta.GetLastJson())\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\tt.Log(resp.Status)\n}\n\nfunc TestSelfUserFeed(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\tresp, err := insta.UserFeed()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\tt.Log(resp.Status)\n}\n\nfunc TestMediaLikers(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\tresp, err := insta.UserFeed()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tif len(resp.Items) > 0 {\n\t\tresult, err := insta.MediaLikers(resp.Items[0].ID)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t\treturn\n\t\t}\n\t\tt.Log(result.Status)\n\t} else {\n\t\tt.Skip(\"Empty feed\")\n\t}\n}\n\nfunc TestFollow(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\tuser, err := insta.GetUsername(\"elonmusk\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tresp, err := insta.Follow(user.User.StringID())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(resp.Status)\n}\n\nfunc TestUnFollow(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\tuser, err := insta.GetUsername(\"elonmusk\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tresp, err := insta.UnFollow(user.User.StringID())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(resp.Status)\n}\n\nfunc TestLike(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\t_, err := insta.Like(\"1363799876794028707\") \/\/ random image ! from search by tags on pizza\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(\"Finished\")\n}\n\nfunc TestMediaInfo(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\tresp, err := insta.MediaInfo(\"1363799876794028707\") \/\/ random image ! from search by tags on pizza\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(resp.Status)\n}\n\nfunc TestTagFeed(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\tresp, err := insta.TagFeed(\"pizza\") \/\/ one of ahmdrz images\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(resp.Items[0])\n}\n\nfunc TestCommentAndDeleteComment(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\tbytes, err := insta.Comment(\"1363799876794028707\", \"Wow <3 pizza !\") \/\/ random image ! from search by tags on pizza\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(\"Finished\")\n\n\ttype Comment struct {\n\t\tID int64 `json:\"pk\"`\n\t}\n\n\tvar Result struct {\n\t\tComment Comment `json:\"comment\"`\n\t\tStatus string `json:\"status\"`\n\t}\n\n\terr = json.Unmarshal(bytes, &Result)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tif Result.Status != \"ok\" {\n\t\tt.Fatalf(\"Incorrect format for comment\")\n\t\treturn\n\t}\n\n\tbytes, err = insta.DeleteComment(\"1363799876794028707\", strconv.FormatInt(Result.Comment.ID, 10))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(\"Finished\")\n}\n\nfunc TestGetUserID(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\tresp, err := insta.GetUserID(\"17644112\") \/\/ ID of \"elonmusk\"\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif resp.Status != \"ok\" {\n\t\tt.Fatalf(\"Incorrect status\" + resp.Status)\n\t}\n\n\tif resp.User.Username != \"elonmusk\" {\n\t\tt.Fatalf(\"Username mismatch\" + resp.User.Username)\n\t}\n\n\tt.Log(resp.Status)\n}\n\nfunc TestGetUsername(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\tresp, err := insta.GetUsername(\"ahmd.rz\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif resp.Status != \"ok\" {\n\t\tt.Fatalf(\"Incorrect status\" + resp.Status)\n\t}\n\n\tif resp.User.Username != \"ahmd.rz\" {\n\t\tt.Fatalf(\"Incorrect username\" + resp.User.Username)\n\t}\n\n\tt.Log(resp.Status)\n}\n\nfunc TestGetProfileData(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\t_, err := insta.GetProfileData()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tt.Log(\"Finished\")\n}\n\nfunc TestRecentActivity(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\t_, err := insta.GetRecentActivity()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tt.Log(\"Finished\")\n}\n\nfunc TestSearchUsername(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\t_, err := insta.SearchUsername(\"ahmd.rz\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(\"Finished\")\n}\n\nfunc TestSearchTags(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\t_, err := insta.SearchTags(\"instagram\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(\"Finished\")\n}\n\nfunc TestGetLastJson(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\t_, err := insta.SearchTags(\"instagram\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(\"Finished\")\n}\n\nfunc TestGetSessions(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\tm := insta.GetSessions()\n\tfor _, session := range m {\n\t\tfor _, cookie := range session {\n\t\t\tt.Log(generateMD5Hash(cookie.String()))\n\t\t}\n\t}\n}\n\nfunc TestExpose(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\terr := insta.Expose()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(\"status : ok\")\n}\n\n\/\/\/\/\/\/\/\/\/\/\/ logout\n\nfunc TestLogout(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\terr := insta.Logout()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(\"status : ok\")\n}\n<commit_msg>errors on test fixed<commit_after>package goinsta\n\nimport (\n\t\"encoding\/json\"\n\t\"os\"\n\t\"strconv\"\n\t\"testing\"\n)\n\nvar (\n\tusername string\n\tpassword string\n\tpullrequest bool\n\tinsta *Instagram\n\tskip bool\n)\n\nfunc TestHandlesNonExistingItems(t *testing.T) {\n\tusername = os.Getenv(\"INSTA_USERNAME\")\n\tpassword = os.Getenv(\"INSTA_PASSWORD\")\n\tpullrequest = os.Getenv(\"INSTA_PULL\") == \"true\"\n\n\tt.Log(\"Pull Request\", pullrequest)\n\n\tif len(username)*len(password) == 0 && !pullrequest {\n\t\tskip = true\n\t\tt.Fatal(\"Username or Password is empty\")\n\t}\n\tskip = pullrequest\n}\n\nfunc TestDeviceID(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\tinsta = New(username, password)\n\tt.Log(insta.Informations.DeviceID)\n}\n\nfunc TestLogin(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\terr := insta.Login()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\tt.Log(\"status : ok\")\n}\n\nfunc TestUserFollowings(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\tresp, err := insta.UserFollowing(insta.Informations.UsernameId, \"\")\n\tif err != nil {\n\t\tt.Log(insta.GetLastJson())\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\tt.Log(resp.Status)\n}\n\nfunc TestUserFollowers(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\tresp, err := insta.UserFollowers(insta.Informations.UsernameId, \"\")\n\tif err != nil {\n\t\tt.Log(insta.GetLastJson())\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\tt.Log(resp.Status)\n}\n\nfunc TestSelfUserFeed(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\tresp, err := insta.UserFeed()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\tt.Log(resp.Status)\n}\n\nfunc TestMediaLikers(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\tresp, err := insta.UserFeed()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tif len(resp.Items) > 0 {\n\t\tresult, err := insta.MediaLikers(resp.Items[0].ID)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t\treturn\n\t\t}\n\t\tt.Log(result.Status)\n\t} else {\n\t\tt.Skip(\"Empty feed\")\n\t}\n}\n\nfunc TestFollow(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\tuser, err := insta.GetUsername(\"elonmusk\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tresp, err := insta.Follow(strconv.Itoa(user.User.Pk))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(resp.Status)\n}\n\nfunc TestUnFollow(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\tuser, err := insta.GetUsername(\"elonmusk\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tresp, err := insta.UnFollow(strconv.Itoa(user.User.Pk))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(resp.Status)\n}\n\nfunc TestLike(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\t_, err := insta.Like(\"1363799876794028707\") \/\/ random image ! from search by tags on pizza\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(\"Finished\")\n}\n\nfunc TestMediaInfo(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\tresp, err := insta.MediaInfo(\"1363799876794028707\") \/\/ random image ! from search by tags on pizza\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(resp.Status)\n}\n\nfunc TestTagFeed(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\tresp, err := insta.TagFeed(\"pizza\") \/\/ one of ahmdrz images\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(resp.Items[0])\n}\n\nfunc TestCommentAndDeleteComment(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\tbytes, err := insta.Comment(\"1363799876794028707\", \"Wow <3 pizza !\") \/\/ random image ! from search by tags on pizza\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(\"Finished\")\n\n\ttype Comment struct {\n\t\tID int64 `json:\"pk\"`\n\t}\n\n\tvar Result struct {\n\t\tComment Comment `json:\"comment\"`\n\t\tStatus string `json:\"status\"`\n\t}\n\n\terr = json.Unmarshal(bytes, &Result)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tif Result.Status != \"ok\" {\n\t\tt.Fatalf(\"Incorrect format for comment\")\n\t\treturn\n\t}\n\n\tbytes, err = insta.DeleteComment(\"1363799876794028707\", strconv.FormatInt(Result.Comment.ID, 10))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(\"Finished\")\n}\n\nfunc TestGetUserID(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\tresp, err := insta.GetUserID(\"17644112\") \/\/ ID of \"elonmusk\"\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif resp.Status != \"ok\" {\n\t\tt.Fatalf(\"Incorrect status\" + resp.Status)\n\t}\n\n\tif resp.User.Username != \"elonmusk\" {\n\t\tt.Fatalf(\"Username mismatch\" + resp.User.Username)\n\t}\n\n\tt.Log(resp.Status)\n}\n\nfunc TestGetUsername(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\tresp, err := insta.GetUsername(\"ahmd.rz\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif resp.Status != \"ok\" {\n\t\tt.Fatalf(\"Incorrect status\" + resp.Status)\n\t}\n\n\tif resp.User.Username != \"ahmd.rz\" {\n\t\tt.Fatalf(\"Incorrect username\" + resp.User.Username)\n\t}\n\n\tt.Log(resp.Status)\n}\n\nfunc TestGetProfileData(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\t_, err := insta.GetProfileData()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tt.Log(\"Finished\")\n}\n\nfunc TestRecentActivity(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\t_, err := insta.GetRecentActivity()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tt.Log(\"Finished\")\n}\n\nfunc TestSearchUsername(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\t_, err := insta.SearchUsername(\"ahmd.rz\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(\"Finished\")\n}\n\nfunc TestSearchTags(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\t_, err := insta.SearchTags(\"instagram\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(\"Finished\")\n}\n\nfunc TestGetLastJson(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\t_, err := insta.SearchTags(\"instagram\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(\"Finished\")\n}\n\nfunc TestGetSessions(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\tm := insta.GetSessions()\n\tfor _, session := range m {\n\t\tfor _, cookie := range session {\n\t\t\tt.Log(generateMD5Hash(cookie.String()))\n\t\t}\n\t}\n}\n\nfunc TestExpose(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\terr := insta.Expose()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(\"status : ok\")\n}\n\n\/\/\/\/\/\/\/\/\/\/\/ logout\n\nfunc TestLogout(t *testing.T) {\n\tif skip {\n\t\tt.Skip(\"Empty username or password , Skipping ...\")\n\t}\n\n\terr := insta.Logout()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t\treturn\n\t}\n\n\tt.Log(\"status : ok\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"crypto\/rand\"\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\tMQTT \"github.com\/eclipse\/paho.mqtt.golang\"\n\t\"github.com\/urfave\/cli\"\n)\n\nvar usage = `\nUsage here\n`\n\nvar version = \"sample\"\nvar Subscribed map[string]byte\n\nfunc init() {\n\tlog.SetLevel(log.WarnLevel)\n}\n\n\/\/ MQTT operations\nfunc getRandomClientId() string {\n\tconst alphanum = \"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\"\n\tvar bytes = make([]byte, 9)\n\trand.Read(bytes)\n\tfor i, b := range bytes {\n\t\tbytes[i] = alphanum[b%byte(len(alphanum))]\n\t}\n\treturn \"mqttwrk-\" + string(bytes)\n}\n\nfunc Connect(opts *MQTT.ClientOptions) (*MQTT.Client, error) {\n\tm := MQTT.NewClient(opts)\n\n\tlog.Info(\"connecting...\")\n\n\tif token := m.Connect(); token.Wait() && token.Error() != nil {\n\t\treturn m, token.Error()\n\t}\n\treturn m, nil\n}\n\nfunc Publish(m *MQTT.Client, topic string, payload []byte, qos int, retain bool, sync bool) error {\n\ttoken := m.Publish(topic, byte(qos), retain, payload)\n\n\treturn nil\n}\n\nfunc Disconnect(m *MQTT.Client) error {\n\tif m.IsConnected() {\n\t\tm.Disconnect(20)\n\t\tlog.Info(\"client disconnected\")\n\t}\n\treturn nil\n}\n\nfunc SubscribeOnConnect(client *MQTT.Client) {\n\tlog.Infof(\"client connected\")\n\n\tif len(Subscribed) > 0 {\n\t\ttoken := client.SubscribeMultiple(Subscribed, OnMessageReceived)\n\t\ttoken.Wait()\n\t\tif token.Error() != nil {\n\t\t\tlog.Error(token.Error())\n\t\t}\n\t}\n}\n\nfunc ConnectionLost(client *MQTT.Client, reason error) {\n\tlog.Errorf(\"client disconnected: %s\", reason)\n}\n\nfunc OnMessageReceived(client *MQTT.Client, message MQTT.Message) {\n\tlog.Infof(\"topic:%s \/ msg:%s\", message.Topic(), message.Payload())\n\tfmt.Println(string(message.Payload()))\n}\n\n\/\/ connects MQTT broker\nfunc connect(opts *MQTT.ClientOptions, subscribed map[string]byte) (*MQTT.Client, error) {\n\n\tclient := MQTT.NewClient(opts)\n\tclient.Subscribed = subscribed\n\n\topts.SetOnConnectHandler(SubscribeOnConnect)\n\topts.SetConnectionLostHandler(ConnectionLost)\n\n\t_, err := Connect(client)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn client, nil\n}\n\n\/\/ newOption returns ClientOptions via parsing command line options.\nfunc newOption(c *cli.Context) (*MQTT.ClientOptions, error) {\n\topts := MQTT.NewClientOptions()\n\n\thost := c.String(\"host\")\n\tport := c.Int(\"p\")\n\n\tclientId := getRandomClientId()\n\topts.SetClientID(clientId)\n\n\tscheme := \"tcp\"\n\tbrokerUri := fmt.Sprintf(\"%s:\/\/%s:%d\", scheme, host, port)\n\tlog.Infof(\"Broker URI: %s\", brokerUri)\n\topts.AddBroker(brokerUri)\n\n\topts.SetAutoReconnect(true)\n\treturn opts, nil\n}\n\n\/\/ pubsubloop is a func of pub-sub event loop\nfunc pubsubloop(c *cli.Context) error {\n\topts, err := newOption(c)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\tos.Exit(1)\n\t}\n\n\tqos := 0\n\tsubtopic := c.String(\"sub\")\n\tif subtopic == \"\" {\n\t\tlog.Errorf(\"Please specify sub topic\")\n\t\tos.Exit(1)\n\t}\n\tlog.Infof(\"Sub Topic: %s\", subtopic)\n\tpubtopic := c.String(\"pub\")\n\tif pubtopic == \"\" {\n\t\tlog.Errorf(\"Please specify pub topic\")\n\t\tos.Exit(1)\n\t}\n\tlog.Infof(\"Pub Topic: %s\", pubtopic)\n\tretain := c.Bool(\"r\")\n\n\tsubscribed := map[string]byte{\n\t\tsubtopic: byte(0),\n\t}\n\n\tclient, err := connect(c, opts, subscribed)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\tos.Exit(1)\n\t}\n\n\tgo func() {\n\t\t\/\/ Read from Stdin and publish\n\t\tscanner := bufio.NewScanner(os.Stdin)\n\t\tfor scanner.Scan() {\n\t\t\terr = client.Publish(pubtopic, []byte(scanner.Text()), qos, retain, false)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ while loop\n\tfor {\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\treturn nil\n}\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"mqttworkerforenocean-sample\"\n\tapp.Usage = \"worker -c config-file\"\n\tapp.Version = version\n\n\tapp.Flags = []cli.Flag{\n\t\tcli.IntFlag{\n\t\t\tName: \"port, p\",\n\t\t\tValue: 1883,\n\t\t\tUsage: \"port number of broker\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"host, h\",\n\t\t\tValue: \"localhost\",\n\t\t\tUsage: \"broker hostname\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"u,user\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"provide a username\",\n\t\t\tEnvVar: \"USERNAME\"},\n\t\tcli.StringFlag{\n\t\t\tName: \"P,password\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"password\",\n\t\t\tEnvVar: \"PASSWORD\"},\n\t\tcli.StringFlag{\n\t\t\tName: \"sub\",\n\t\t\tValue: \"prefix\/gateway\/enocean\/publish\",\n\t\t\tUsage: \"subscribe topic\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"pub\",\n\t\t\tValue: \"prefix\/worker\/enocean\/publish\",\n\t\t\tUsage: \"publish parsed data topic\",\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"q\",\n\t\t\tValue: 0,\n\t\t\tUsage: \"Qos level to publish\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"d\",\n\t\t\tUsage: \"run in verbose mode\",\n\t\t},\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"loop\",\n\t\t\tUsage: \"loop\",\n\t\t\tFlags: app.Flags,\n\t\t\tAction: \"pubsubloop\",\n\t\t},\n\t}\n\n\tapp.Run(os.Args)\n}\n<commit_msg>mqttworker passed compilation<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"crypto\/rand\"\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\tMQTT \"github.com\/eclipse\/paho.mqtt.golang\"\n\t\"github.com\/urfave\/cli\"\n)\n\nvar usage = `\ngo run mqttworker.go --host hostname --sub subscribetopic --pub publishtopic\n`\n\nvar version = \"sample\"\nvar Subscribed map[string]byte\n\nfunc init() {\n\tlog.SetLevel(log.WarnLevel)\n}\n\n\/\/ MQTT operations\nfunc getRandomClientId() string {\n\t\/\/ 0, 1, 6, 9 like characters are removed to avoid mis-reading\n\tconst alphanum = \"234578ABCDEFGHJKLMNPQRSTUVWXYZacefghjkrstuvwxyz\"\n\tvar bytes = make([]byte, 9)\n\trand.Read(bytes)\n\tfor i, b := range bytes {\n\t\tbytes[i] = alphanum[b%byte(len(alphanum))]\n\t}\n\treturn \"mqttwrk-\" + string(bytes)\n}\n\n\nfunc Publish(m *MQTT.Client, topic string, payload []byte, qos int, retain bool, sync bool) error {\n\ttoken := m.Publish(topic, byte(qos), retain, payload)\n\ttoken.Wait()\n\n\treturn nil\n}\n\nfunc Disconnect(m *MQTT.Client) error {\n\tif m.IsConnected() {\n\t\tm.Disconnect(20)\n\t\tlog.Info(\"client disconnected\")\n\t}\n\treturn nil\n}\n\nfunc SubscribeOnConnect(client *MQTT.Client) {\n\tlog.Infof(\"client connected\")\n\n\tif len(Subscribed) > 0 {\n\t\ttoken := client.SubscribeMultiple(Subscribed, OnMessageReceived)\n\t\ttoken.Wait()\n\t\tif token.Error() != nil {\n\t\t\tlog.Error(token.Error())\n\t\t}\n\t}\n}\n\nfunc ConnectionLost(client *MQTT.Client, reason error) {\n\tlog.Errorf(\"client disconnected: %s\", reason)\n}\n\nfunc OnMessageReceived(client *MQTT.Client, message MQTT.Message) {\n\tlog.Infof(\"topic:%s \/ msg:%s\", message.Topic(), message.Payload())\n\tfmt.Println(string(message.Payload()))\n}\n\n\/\/ connects MQTT broker\nfunc connect(opts *MQTT.ClientOptions) (*MQTT.Client, error) {\n\n\n\topts.SetOnConnectHandler(SubscribeOnConnect)\n\topts.SetConnectionLostHandler(ConnectionLost)\n\n\tm := MQTT.NewClient(opts)\n\n\tlog.Info(\"connecting...\")\n\n\tif token := m.Connect(); token.Wait() && token.Error() != nil {\n\t\treturn m, token.Error()\n\t}\n\n\treturn m, nil\n}\n\n\/\/ newOption returns ClientOptions via parsing command line options.\nfunc newOption(c *cli.Context) (*MQTT.ClientOptions, error) {\n\topts := MQTT.NewClientOptions()\n\n\thost := c.String(\"host\")\n\tport := c.Int(\"p\")\n\n\tclientId := getRandomClientId()\n\topts.SetClientID(clientId)\n\n\tscheme := \"tcp\"\n\tbrokerUri := fmt.Sprintf(\"%s:\/\/%s:%d\", scheme, host, port)\n\tlog.Infof(\"Broker URI: %s\", brokerUri)\n\topts.AddBroker(brokerUri)\n\n\topts.SetAutoReconnect(true)\n\treturn opts, nil\n}\n\n\/\/ pubsubloop is a func of pub-sub event loop\nfunc pubsubloop(c *cli.Context) error {\n\topts, err := newOption(c)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\tos.Exit(1)\n\t}\n\n\tqos := 0\n\tsubtopic := c.String(\"sub\")\n\tif subtopic == \"\" {\n\t\tlog.Errorf(\"Please specify sub topic\")\n\t\tos.Exit(1)\n\t}\n\tlog.Infof(\"Sub Topic: %s\", subtopic)\n\tpubtopic := c.String(\"pub\")\n\tif pubtopic == \"\" {\n\t\tlog.Errorf(\"Please specify pub topic\")\n\t\tos.Exit(1)\n\t}\n\tlog.Infof(\"Pub Topic: %s\", pubtopic)\n\tretain := c.Bool(\"r\")\n\n\tSubscribed = map[string]byte{\n\t\tsubtopic: byte(0),\n\t}\n\n\tclient, err := connect(opts)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\tos.Exit(1)\n\t}\n\n\tgo func() {\n\t\t\/\/ Read from Stdin and publish\n\t\tscanner := bufio.NewScanner(os.Stdin)\n\t\tfor scanner.Scan() {\n\t\t\terr = Publish(client, pubtopic, []byte(scanner.Text()), qos, retain, false)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ while loop\n\tfor {\n\t\ttime.Sleep(1 * time.Second)\n\t}\n\treturn nil\n}\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"mqttworkerforenocean-sample\"\n\tapp.Usage = \"worker -c config-file\"\n\tapp.Version = version\n\n\tcommonFlags := []cli.Flag{\n\t\tcli.IntFlag{\n\t\t\tName: \"port, p\",\n\t\t\tValue: 1883,\n\t\t\tUsage: \"port number of broker\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"host, h\",\n\t\t\tValue: \"localhost\",\n\t\t\tUsage: \"broker hostname\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"u,user\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"provide a username\",\n\t\t\tEnvVar: \"USERNAME\"},\n\t\tcli.StringFlag{\n\t\t\tName: \"P,password\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"password\",\n\t\t\tEnvVar: \"PASSWORD\"},\n\t\tcli.StringFlag{\n\t\t\tName: \"sub\",\n\t\t\tValue: \"prefix\/gateway\/enocean\/publish\",\n\t\t\tUsage: \"subscribe topic\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"pub\",\n\t\t\tValue: \"prefix\/worker\/enocean\/publish\",\n\t\t\tUsage: \"publish parsed data topic\",\n\t\t},\n\t\tcli.IntFlag{\n\t\t\tName: \"q\",\n\t\t\tValue: 0,\n\t\t\tUsage: \"Qos level to publish\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName: \"d\",\n\t\t\tUsage: \"run in verbose mode\",\n\t\t},\n\t}\n\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName: \"loop\",\n\t\t\tUsage: \"loop\",\n\t\t\tFlags:\tcommonFlags,\n\t\t\tAction: pubsubloop,\n\t\t},\n\t}\n\n\tapp.Run(os.Args)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build windows\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"io\"\n\t\"os\"\n\n\t\"github.com\/ying32\/govcl\/vcl\"\n\t\"github.com\/ying32\/govcl\/vcl\/rtl\"\n\t\"github.com\/ying32\/govcl\/vcl\/types\"\n)\n\nvar styleNames = make(map[string]string, 0)\n\nfunc main() {\n\tvcl.Application.SetIconResId(3)\n\tvcl.Application.Initialize()\n\tvcl.Application.SetMainFormOnTaskBar(true)\n\n\tvcl.StyleManager.SetStyle(vcl.StyleManager.LoadFromFile(\"..\\\\..\\\\bin\\\\styles\\\\TabletLight.vsf\"))\n\n\tmainForm := vcl.Application.CreateForm()\n\tmainForm.SetCaption(\"Hello\")\n\tmainForm.SetPosition(types.PoScreenCenter)\n\tmainForm.SetWidth(500)\n\tmainForm.SetHeight(700)\n\n\tvar top int32 = 40\n\t\/\/ TButton\n\tbtn := vcl.NewButton(mainForm)\n\tbtn.SetParent(mainForm)\n\tbtn.SetLeft(10)\n\tbtn.SetTop(top)\n\tbtn.SetWidth(150)\n\tbtn.SetCaption(\"打印已注册的样式\")\n\tbtn.SetOnClick(func(vcl.IObject) {\n\t\tfmt.Println(\"按钮1单击\")\n\t\tstyleNames := vcl.StyleManager.StyleNames()\n\t\tfmt.Println(\"len:\", len(styleNames))\n\t\tfor _, s := range styleNames {\n\t\t\tfmt.Println(\"已注册样式:\", s)\n\t\t}\n\t})\n\n\ttop += btn.Height() + 5\n\n\t\/\/ TEdit\n\tedit := vcl.NewEdit(mainForm)\n\tedit.SetParent(mainForm)\n\tedit.SetLeft(10)\n\tedit.SetTop(top)\n\tedit.SetTextHint(\"提示\")\n\t\/\/edit.SetText(\"文字\")\n\t\/\/\tedit.SetReadOnly(true)\n\tedit.SetOnChange(func(vcl.IObject) {\n\t\tfmt.Println(\"文字改变了\")\n\t})\n\n\ttop += edit.Height() + 5\n\t\/\/ TEdit Password\n\tedit = vcl.NewEdit(mainForm)\n\tedit.SetParent(mainForm)\n\tedit.SetLeft(10)\n\tedit.SetTop(top)\n\tedit.SetText(\"文字\")\n\tedit.SetPasswordChar('*')\n\n\ttop += edit.Height() + 5\n\n\t\/\/ TLabel\n\tlbl := vcl.NewLabel(mainForm)\n\tlbl.SetParent(mainForm)\n\tlbl.SetLeft(10)\n\tlbl.SetTop(top)\n\tlbl.SetCaption(\"标签1\")\n\t\/\/ 解除样式对Label Font的Hook\n\tlbl.SetStyleElements(rtl.Include(0, types.SeClient, types.SeBorder))\n\tlbl.Font().SetColor(255)\n\n\ttop += lbl.Height() + 5\n\n\t\/\/ TCheckBox\n\tchk := vcl.NewCheckBox(mainForm)\n\tchk.SetParent(mainForm)\n\tchk.SetLeft(10)\n\tchk.SetTop(top)\n\tchk.SetCaption(\"选择框1\")\n\tchk.SetOnClick(func(vcl.IObject) {\n\t\tfmt.Println(\"checked: \", chk.Checked())\n\t})\n\n\t\/\/ TStatusBar\n\tstat := vcl.NewStatusBar(mainForm)\n\tstat.SetParent(mainForm)\n\t\/\/stat.SetSizeGrip(false) \/\/ 右解是否有可调的\n\tspnl := stat.Panels().Add()\n\tspnl.SetText(\"第一个\")\n\tspnl.SetWidth(80)\n\n\tspnl = stat.Panels().Add()\n\tspnl.SetText(\"第二个\")\n\tspnl.SetWidth(80)\n\n\t\/\/ TToolBar\n\ttlbar := vcl.NewToolBar(mainForm)\n\ttlbar.SetParent(mainForm)\n\ttlbar.SetShowCaptions(true)\n\n\t\/\/ 倒过来创建\n\ttlbtn := vcl.NewToolButton(mainForm)\n\ttlbtn.SetParent(tlbar)\n\ttlbtn.SetCaption(\"2\")\n\ttlbtn.SetStyle(types.TbsDropDown)\n\n\ttlbtn = vcl.NewToolButton(mainForm)\n\ttlbtn.SetParent(tlbar)\n\ttlbtn.SetStyle(types.TbsSeparator)\n\n\ttlbtn = vcl.NewToolButton(mainForm)\n\ttlbtn.SetParent(tlbar)\n\ttlbtn.SetCaption(\"1\")\n\n\ttop += chk.Height() + 5\n\t\/\/ TRadioButton\n\trd := vcl.NewRadioButton(mainForm)\n\trd.SetParent(mainForm)\n\trd.SetLeft(10)\n\trd.SetTop(top)\n\trd.SetCaption(\"选项1\")\n\n\tvar left int32 = rd.Left() + rd.Width() + 5\n\n\trd = vcl.NewRadioButton(mainForm)\n\trd.SetParent(mainForm)\n\trd.SetLeft(left)\n\trd.SetTop(top)\n\trd.SetCaption(\"选项2\")\n\n\ttop += rd.Height() + 5\n\t\/\/ TMemo\n\tmmo := vcl.NewMemo(mainForm)\n\tmmo.SetParent(mainForm)\n\tmmo.SetBounds(10, top, 167, 50)\n\t\/\/ mmo.Text()\n\tmmo.Lines().Add(\"1\")\n\tmmo.Lines().Add(\"2\")\n\n\ttop += mmo.Height() + 5\n\t\/\/ TComboBox\n\tcb := vcl.NewComboBox(mainForm)\n\tcb.SetParent(mainForm)\n\tcb.SetLeft(10)\n\tcb.SetTop(top)\n\tcb.SetStyle(types.CsDropDownList)\n\tcb.Items().Add(\"1\")\n\tcb.Items().Add(\"2\")\n\tcb.Items().Add(\"3\")\n\tcb.SetItemIndex(0)\n\tcb.SetOnChange(func(vcl.IObject) {\n\t\tif cb.ItemIndex() != -1 {\n\t\t\tfmt.Println(cb.Items().Strings(cb.ItemIndex()))\n\t\t}\n\t})\n\n\t\/\/ TListBox\n\ttop += cb.Height() + 5\n\tlst := vcl.NewListBox(mainForm)\n\tlst.SetParent(mainForm)\n\tlst.SetBounds(10, top, 167, 50)\n\tlst.Items().Add(\"1\")\n\tlst.Items().Add(\"2\")\n\tlst.Items().Add(\"3\")\n\n\t\/\/ TPanel\n\ttop += lst.Height() + 5\n\tpnl := vcl.NewPanel(mainForm)\n\tpnl.SetParent(mainForm)\n\tpnl.SetCaption(\"fff\")\n\t\/\/ pnl.SetShowCaption(false)\n\tpnl.SetBounds(10, top, 167, 50)\n\n\t\/\/ color\n\ttop += pnl.Height() + 5\n\tclr := vcl.NewColorBox(mainForm)\n\tclr.SetParent(mainForm)\n\tclr.SetLeft(10)\n\tclr.SetTop(top)\n\tclr.SetOnChange(func(vcl.IObject) {\n\t\tif clr.ItemIndex() != -1 {\n\t\t\tlbl.Font().SetColor(clr.Selected())\n\t\t}\n\t})\n\n\t\/\/ TPageControl\n\ttop += clr.Height() + 5\n\tpgc := vcl.NewPageControl(mainForm)\n\tpgc.SetParent(mainForm)\n\tpgc.SetBounds(10, top, 167, 100)\n\tpgc.SetOnChange(func(vcl.IObject) {\n\t\tfmt.Println(\"当前索引:\", pgc.ActivePageIndex())\n\t})\n\n\tsheet := vcl.NewTabSheet(mainForm)\n\tsheet.SetPageControl(pgc)\n\tsheet.SetCaption(\"一\")\n\tbtn = vcl.NewButton(mainForm)\n\tbtn.SetParent(sheet)\n\tbtn.SetLeft(10)\n\tbtn.SetTop(10)\n\tbtn.SetCaption(\"按钮1\")\n\n\tsheet = vcl.NewTabSheet(mainForm)\n\tsheet.SetPageControl(pgc)\n\tsheet.SetCaption(\"二\")\n\tbtn = vcl.NewButton(mainForm)\n\tbtn.SetParent(sheet)\n\tbtn.SetLeft(10)\n\tbtn.SetTop(10)\n\tbtn.SetCaption(\"按钮2\")\n\n\tsheet = vcl.NewTabSheet(mainForm)\n\tsheet.SetPageControl(pgc)\n\tsheet.SetCaption(\"三\")\n\tbtn = vcl.NewButton(mainForm)\n\tbtn.SetParent(sheet)\n\tbtn.SetLeft(10)\n\tbtn.SetTop(10)\n\tbtn.SetCaption(\"按钮3\")\n\n\t\/\/ TImage\n\ttop += pgc.Height() + 5\n\timg := vcl.NewImage(mainForm)\n\timg.SetBounds(10, top, 167, 97)\n\timg.SetParent(mainForm)\n\timg.Picture().LoadFromFile(\"1.jpg\")\n\t\/\/img.SetStretch(true)\n\timg.SetProportional(true)\n\n\tleft = 210\n\ttop = 10\n\t\/\/ TTrackBar\n\ttrkbar := vcl.NewTrackBar(mainForm)\n\ttrkbar.SetParent(mainForm)\n\ttrkbar.SetBounds(left, top, 167, 20)\n\ttrkbar.SetMax(100)\n\ttrkbar.SetMin(0)\n\ttrkbar.SetPosition(50)\n\n\t\/\/ TProgressBar\n\ttop += trkbar.Height() + 10\n\tprgbar := vcl.NewProgressBar(mainForm)\n\tprgbar.SetParent(mainForm)\n\tprgbar.SetBounds(left, top, 10, 167)\n\tprgbar.SetMax(100)\n\tprgbar.SetMin(0)\n\tprgbar.SetPosition(1)\n\tprgbar.SetOrientation(types.PbVertical)\n\n\ttrkbar.SetOnChange(func(vcl.IObject) {\n\t\tprgbar.SetPosition(trkbar.Position())\n\t})\n\n\tstylelist := vcl.NewListBox(mainForm)\n\tstylelist.SetParent(mainForm)\n\tstylelist.SetLeft(prgbar.Left() + prgbar.Width() + 10)\n\tstylelist.SetTop(prgbar.Top())\n\tstylelist.SetHeight(prgbar.Height())\n\tstylelist.SetWidth(240)\n\tstylelist.SetOnDblClick(func(vcl.IObject) {\n\t\tindex := stylelist.ItemIndex()\n\t\tif index != -1 {\n\t\t\t\/\/ 这里直接替换是因为原本文件名就是样式名,只是简单下,实际样式名要通过相关函数取得,但这里没有给出相关函数\n\t\t\t\/\/styleName := strings.Replace(stylelist.Items().Strings(stylelist.ItemIndex()), \".vsf\", \"\", 1)\n\t\t\t\/\/styleHandle := vcl.StyleManager.Style(styleName)\n\t\t\ttext := stylelist.Items().Strings(index)\n\t\t\tif name, ok := styleNames[text]; ok {\n\t\t\t\tvcl.StyleManager.SetStyle2(name)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tstyleFileName := \"..\\\\..\\\\bin\\\\styles\\\\\" + text\n\t\t\tif rtl.FileExists(styleFileName) {\n\t\t\t\tif ok, name := vcl.StyleManager.IsValidStyle2(styleFileName); ok {\n\t\t\t\t\tstyleNames[text] = name\n\t\t\t\t\tvcl.StyleManager.SetStyleFromFileName(styleFileName)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"样式无效\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"样式文件不存在\")\n\t\t\t}\n\t\t}\n\t})\n\taddStyleFileName(stylelist)\n\n\ttop += prgbar.Height() + 10\n\n\tdtp := vcl.NewDateTimePicker(mainForm)\n\tdtp.SetParent(mainForm)\n\tdtp.SetBounds(left, top, 167, 25)\n\tdtp.SetFormat(\"yyyy-MM-dd HH:mm:ss\")\n\n\t\/\/ 在xp下应用了style需要解除一个样式属性,应该是冲突引起的\n\tdtp.SetStyleElements(rtl.Include(0, types.SeFont))\n\n\ttop += dtp.Height() + 10\n\n\tmdtp := vcl.NewMonthCalendar(mainForm)\n\tmdtp.SetParent(mainForm)\n\tmdtp.SetBounds(left, top, 250, 250)\n\tmdtp.SetOnClick(func(vcl.IObject) {\n\t\tfmt.Println(mdtp.Date())\n\t})\n\n\ttop += mdtp.Height() + 10\n\tdtp.SetDateTime(time.Now().Add(time.Hour * 48))\n\tdtp.SetDate(time.Now().AddDate(1, 0, 0))\n\tfmt.Println(\"time: \", mdtp.Date())\n\n\tbtn = vcl.NewButton(mainForm)\n\tbtn.SetParent(mainForm)\n\tbtn.SetLeft(left)\n\tbtn.SetTop(top)\n\tbtn.SetCaption(\"改变日期\")\n\tbtn.SetOnClick(func(vcl.IObject) {\n\t\tmdtp.SetDate(time.Now().AddDate(-20, 0, 0))\n\t})\n\n\t\/\/ 样式已改变\n\tmainForm.SetOnStyleChanged(func(sender vcl.IObject) {\n\t\tfmt.Println(\"样式已经改变\")\n\t\tmainForm.SetAllowDropFiles(false)\n\t\tmainForm.SetAllowDropFiles(true)\n\t})\n\n\tmainForm.SetAllowDropFiles(true)\n\tmainForm.SetOnDropFiles(func(sender vcl.IObject, aFileNames []string) {\n\t\tfmt.Println(\"当前拖放文件事件执行,文件数:\", len(aFileNames))\n\t\tfor i, s := range aFileNames {\n\t\t\tfmt.Println(\"index:\", i, \", filename:\", s)\n\t\t}\n\t})\n\n\t\/\/ run\n\tvcl.Application.Run()\n}\n\nfunc addStyleFileName(list *vcl.TListBox) {\n\tfd, err := os.Open(\"..\\\\..\\\\bin\\\\styles\\\\\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tif os.IsNotExist(err) {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\tdefer fd.Close()\n\tlist.Items().BeginUpdate()\n\tdefer list.Items().EndUpdate()\n\tfor {\n\t\tfiles, err := fd.Readdir(100)\n\t\tfor _, f := range files {\n\t\t\tif !f.IsDir() {\n\t\t\t\tlist.Items().Add(f.Name())\n\t\t\t}\n\t\t}\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif len(files) == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n}\n<commit_msg>update style example<commit_after>\/\/ +build windows\n\n\/\/ +build windows\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"io\"\n\t\"os\"\n\n\t\"github.com\/ying32\/govcl\/vcl\"\n\t\"github.com\/ying32\/govcl\/vcl\/rtl\"\n\t\"github.com\/ying32\/govcl\/vcl\/types\"\n)\n\nvar styleNames = make(map[string]string, 0)\n\nfunc main() {\n\n\tif rtl.LcLLoaded() {\n\t\tvcl.ShowMessage(\"样式不支持liblcl。\")\n\t\treturn\n\t}\n\n\tvcl.Application.SetIconResId(3)\n\tvcl.Application.Initialize()\n\tvcl.Application.SetMainFormOnTaskBar(true)\n\n\tvcl.StyleManager.SetStyle(vcl.StyleManager.LoadFromFile(\"..\\\\..\\\\bin\\\\styles\\\\TabletLight.vsf\"))\n\n\tmainForm := vcl.Application.CreateForm()\n\tmainForm.SetCaption(\"Hello\")\n\tmainForm.SetPosition(types.PoScreenCenter)\n\tmainForm.SetWidth(500)\n\tmainForm.SetHeight(700)\n\n\tvar top int32 = 40\n\t\/\/ TButton\n\tbtn := vcl.NewButton(mainForm)\n\tbtn.SetParent(mainForm)\n\tbtn.SetLeft(10)\n\tbtn.SetTop(top)\n\tbtn.SetWidth(150)\n\tbtn.SetCaption(\"打印已注册的样式\")\n\tbtn.SetOnClick(func(vcl.IObject) {\n\t\tfmt.Println(\"按钮1单击\")\n\t\tstyleNames := vcl.StyleManager.StyleNames()\n\t\tfmt.Println(\"len:\", len(styleNames))\n\t\tfor _, s := range styleNames {\n\t\t\tfmt.Println(\"已注册样式:\", s)\n\t\t}\n\t})\n\n\ttop += btn.Height() + 5\n\n\t\/\/ TEdit\n\tedit := vcl.NewEdit(mainForm)\n\tedit.SetParent(mainForm)\n\tedit.SetLeft(10)\n\tedit.SetTop(top)\n\tedit.SetTextHint(\"提示\")\n\t\/\/edit.SetText(\"文字\")\n\t\/\/\tedit.SetReadOnly(true)\n\tedit.SetOnChange(func(vcl.IObject) {\n\t\tfmt.Println(\"文字改变了\")\n\t})\n\n\ttop += edit.Height() + 5\n\t\/\/ TEdit Password\n\tedit = vcl.NewEdit(mainForm)\n\tedit.SetParent(mainForm)\n\tedit.SetLeft(10)\n\tedit.SetTop(top)\n\tedit.SetText(\"文字\")\n\tedit.SetPasswordChar('*')\n\n\ttop += edit.Height() + 5\n\n\t\/\/ TLabel\n\tlbl := vcl.NewLabel(mainForm)\n\tlbl.SetParent(mainForm)\n\tlbl.SetLeft(10)\n\tlbl.SetTop(top)\n\tlbl.SetCaption(\"标签1\")\n\t\/\/ 解除样式对Label Font的Hook\n\tlbl.SetStyleElements(rtl.Include(0, types.SeClient, types.SeBorder))\n\tlbl.Font().SetColor(255)\n\n\ttop += lbl.Height() + 5\n\n\t\/\/ TCheckBox\n\tchk := vcl.NewCheckBox(mainForm)\n\tchk.SetParent(mainForm)\n\tchk.SetLeft(10)\n\tchk.SetTop(top)\n\tchk.SetCaption(\"选择框1\")\n\tchk.SetOnClick(func(vcl.IObject) {\n\t\tfmt.Println(\"checked: \", chk.Checked())\n\t})\n\n\t\/\/ TStatusBar\n\tstat := vcl.NewStatusBar(mainForm)\n\tstat.SetParent(mainForm)\n\t\/\/stat.SetSizeGrip(false) \/\/ 右解是否有可调的\n\tspnl := stat.Panels().Add()\n\tspnl.SetText(\"第一个\")\n\tspnl.SetWidth(80)\n\n\tspnl = stat.Panels().Add()\n\tspnl.SetText(\"第二个\")\n\tspnl.SetWidth(80)\n\n\t\/\/ TToolBar\n\ttlbar := vcl.NewToolBar(mainForm)\n\ttlbar.SetParent(mainForm)\n\ttlbar.SetShowCaptions(true)\n\n\t\/\/ 倒过来创建\n\ttlbtn := vcl.NewToolButton(mainForm)\n\ttlbtn.SetParent(tlbar)\n\ttlbtn.SetCaption(\"2\")\n\ttlbtn.SetStyle(types.TbsDropDown)\n\n\ttlbtn = vcl.NewToolButton(mainForm)\n\ttlbtn.SetParent(tlbar)\n\ttlbtn.SetStyle(types.TbsSeparator)\n\n\ttlbtn = vcl.NewToolButton(mainForm)\n\ttlbtn.SetParent(tlbar)\n\ttlbtn.SetCaption(\"1\")\n\n\ttop += chk.Height() + 5\n\t\/\/ TRadioButton\n\trd := vcl.NewRadioButton(mainForm)\n\trd.SetParent(mainForm)\n\trd.SetLeft(10)\n\trd.SetTop(top)\n\trd.SetCaption(\"选项1\")\n\n\tvar left int32 = rd.Left() + rd.Width() + 5\n\n\trd = vcl.NewRadioButton(mainForm)\n\trd.SetParent(mainForm)\n\trd.SetLeft(left)\n\trd.SetTop(top)\n\trd.SetCaption(\"选项2\")\n\n\ttop += rd.Height() + 5\n\t\/\/ TMemo\n\tmmo := vcl.NewMemo(mainForm)\n\tmmo.SetParent(mainForm)\n\tmmo.SetBounds(10, top, 167, 50)\n\t\/\/ mmo.Text()\n\tmmo.Lines().Add(\"1\")\n\tmmo.Lines().Add(\"2\")\n\n\ttop += mmo.Height() + 5\n\t\/\/ TComboBox\n\tcb := vcl.NewComboBox(mainForm)\n\tcb.SetParent(mainForm)\n\tcb.SetLeft(10)\n\tcb.SetTop(top)\n\tcb.SetStyle(types.CsDropDownList)\n\tcb.Items().Add(\"1\")\n\tcb.Items().Add(\"2\")\n\tcb.Items().Add(\"3\")\n\tcb.SetItemIndex(0)\n\tcb.SetOnChange(func(vcl.IObject) {\n\t\tif cb.ItemIndex() != -1 {\n\t\t\tfmt.Println(cb.Items().Strings(cb.ItemIndex()))\n\t\t}\n\t})\n\n\t\/\/ TListBox\n\ttop += cb.Height() + 5\n\tlst := vcl.NewListBox(mainForm)\n\tlst.SetParent(mainForm)\n\tlst.SetBounds(10, top, 167, 50)\n\tlst.Items().Add(\"1\")\n\tlst.Items().Add(\"2\")\n\tlst.Items().Add(\"3\")\n\n\t\/\/ TPanel\n\ttop += lst.Height() + 5\n\tpnl := vcl.NewPanel(mainForm)\n\tpnl.SetParent(mainForm)\n\tpnl.SetCaption(\"fff\")\n\t\/\/ pnl.SetShowCaption(false)\n\tpnl.SetBounds(10, top, 167, 50)\n\n\t\/\/ color\n\ttop += pnl.Height() + 5\n\tclr := vcl.NewColorBox(mainForm)\n\tclr.SetParent(mainForm)\n\tclr.SetLeft(10)\n\tclr.SetTop(top)\n\tclr.SetOnChange(func(vcl.IObject) {\n\t\tif clr.ItemIndex() != -1 {\n\t\t\tlbl.Font().SetColor(clr.Selected())\n\t\t}\n\t})\n\n\t\/\/ TPageControl\n\ttop += clr.Height() + 5\n\tpgc := vcl.NewPageControl(mainForm)\n\tpgc.SetParent(mainForm)\n\tpgc.SetBounds(10, top, 167, 100)\n\tpgc.SetOnChange(func(vcl.IObject) {\n\t\tfmt.Println(\"当前索引:\", pgc.ActivePageIndex())\n\t})\n\n\tsheet := vcl.NewTabSheet(mainForm)\n\tsheet.SetPageControl(pgc)\n\tsheet.SetCaption(\"一\")\n\tbtn = vcl.NewButton(mainForm)\n\tbtn.SetParent(sheet)\n\tbtn.SetLeft(10)\n\tbtn.SetTop(10)\n\tbtn.SetCaption(\"按钮1\")\n\n\tsheet = vcl.NewTabSheet(mainForm)\n\tsheet.SetPageControl(pgc)\n\tsheet.SetCaption(\"二\")\n\tbtn = vcl.NewButton(mainForm)\n\tbtn.SetParent(sheet)\n\tbtn.SetLeft(10)\n\tbtn.SetTop(10)\n\tbtn.SetCaption(\"按钮2\")\n\n\tsheet = vcl.NewTabSheet(mainForm)\n\tsheet.SetPageControl(pgc)\n\tsheet.SetCaption(\"三\")\n\tbtn = vcl.NewButton(mainForm)\n\tbtn.SetParent(sheet)\n\tbtn.SetLeft(10)\n\tbtn.SetTop(10)\n\tbtn.SetCaption(\"按钮3\")\n\n\t\/\/ TImage\n\ttop += pgc.Height() + 5\n\timg := vcl.NewImage(mainForm)\n\timg.SetBounds(10, top, 167, 97)\n\timg.SetParent(mainForm)\n\timg.Picture().LoadFromFile(\"1.jpg\")\n\t\/\/img.SetStretch(true)\n\timg.SetProportional(true)\n\n\tleft = 210\n\ttop = 10\n\t\/\/ TTrackBar\n\ttrkbar := vcl.NewTrackBar(mainForm)\n\ttrkbar.SetParent(mainForm)\n\ttrkbar.SetBounds(left, top, 167, 20)\n\ttrkbar.SetMax(100)\n\ttrkbar.SetMin(0)\n\ttrkbar.SetPosition(50)\n\n\t\/\/ TProgressBar\n\ttop += trkbar.Height() + 10\n\tprgbar := vcl.NewProgressBar(mainForm)\n\tprgbar.SetParent(mainForm)\n\tprgbar.SetBounds(left, top, 10, 167)\n\tprgbar.SetMax(100)\n\tprgbar.SetMin(0)\n\tprgbar.SetPosition(1)\n\tprgbar.SetOrientation(types.PbVertical)\n\n\ttrkbar.SetOnChange(func(vcl.IObject) {\n\t\tprgbar.SetPosition(trkbar.Position())\n\t})\n\n\tstylelist := vcl.NewListBox(mainForm)\n\tstylelist.SetParent(mainForm)\n\tstylelist.SetLeft(prgbar.Left() + prgbar.Width() + 10)\n\tstylelist.SetTop(prgbar.Top())\n\tstylelist.SetHeight(prgbar.Height())\n\tstylelist.SetWidth(240)\n\tstylelist.SetOnDblClick(func(vcl.IObject) {\n\t\tindex := stylelist.ItemIndex()\n\t\tif index != -1 {\n\t\t\t\/\/ 这里直接替换是因为原本文件名就是样式名,只是简单下,实际样式名要通过相关函数取得,但这里没有给出相关函数\n\t\t\t\/\/styleName := strings.Replace(stylelist.Items().Strings(stylelist.ItemIndex()), \".vsf\", \"\", 1)\n\t\t\t\/\/styleHandle := vcl.StyleManager.Style(styleName)\n\t\t\ttext := stylelist.Items().Strings(index)\n\t\t\tif name, ok := styleNames[text]; ok {\n\t\t\t\tvcl.StyleManager.SetStyle2(name)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tstyleFileName := \"..\\\\..\\\\bin\\\\styles\\\\\" + text\n\t\t\tif rtl.FileExists(styleFileName) {\n\t\t\t\tif ok, name := vcl.StyleManager.IsValidStyle2(styleFileName); ok {\n\t\t\t\t\tstyleNames[text] = name\n\t\t\t\t\tvcl.StyleManager.SetStyleFromFileName(styleFileName)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Println(\"样式无效\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfmt.Println(\"样式文件不存在\")\n\t\t\t}\n\t\t}\n\t})\n\taddStyleFileName(stylelist)\n\n\ttop += prgbar.Height() + 10\n\n\tdtp := vcl.NewDateTimePicker(mainForm)\n\tdtp.SetParent(mainForm)\n\tdtp.SetBounds(left, top, 167, 25)\n\tdtp.SetFormat(\"yyyy-MM-dd HH:mm:ss\")\n\n\t\/\/ 在xp下应用了style需要解除一个样式属性,应该是冲突引起的\n\tdtp.SetStyleElements(rtl.Include(0, types.SeFont))\n\n\ttop += dtp.Height() + 10\n\n\tmdtp := vcl.NewMonthCalendar(mainForm)\n\tmdtp.SetParent(mainForm)\n\tmdtp.SetBounds(left, top, 250, 250)\n\tmdtp.SetOnClick(func(vcl.IObject) {\n\t\tfmt.Println(mdtp.Date())\n\t})\n\n\ttop += mdtp.Height() + 10\n\tdtp.SetDateTime(time.Now().Add(time.Hour * 48))\n\tdtp.SetDate(time.Now().AddDate(1, 0, 0))\n\tfmt.Println(\"time: \", mdtp.Date())\n\n\tbtn = vcl.NewButton(mainForm)\n\tbtn.SetParent(mainForm)\n\tbtn.SetLeft(left)\n\tbtn.SetTop(top)\n\tbtn.SetCaption(\"改变日期\")\n\tbtn.SetOnClick(func(vcl.IObject) {\n\t\tmdtp.SetDate(time.Now().AddDate(-20, 0, 0))\n\t})\n\n\t\/\/ 样式已改变\n\tmainForm.SetOnStyleChanged(func(sender vcl.IObject) {\n\t\tfmt.Println(\"样式已经改变\")\n\t\tmainForm.SetAllowDropFiles(false)\n\t\tmainForm.SetAllowDropFiles(true)\n\t})\n\n\tmainForm.SetAllowDropFiles(true)\n\tmainForm.SetOnDropFiles(func(sender vcl.IObject, aFileNames []string) {\n\t\tfmt.Println(\"当前拖放文件事件执行,文件数:\", len(aFileNames))\n\t\tfor i, s := range aFileNames {\n\t\t\tfmt.Println(\"index:\", i, \", filename:\", s)\n\t\t}\n\t})\n\n\t\/\/ run\n\tvcl.Application.Run()\n}\n\nfunc addStyleFileName(list *vcl.TListBox) {\n\tfd, err := os.Open(\"..\\\\..\\\\bin\\\\styles\\\\\")\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tif os.IsNotExist(err) {\n\t\t\treturn\n\t\t}\n\t\treturn\n\t}\n\tdefer fd.Close()\n\tlist.Items().BeginUpdate()\n\tdefer list.Items().EndUpdate()\n\tfor {\n\t\tfiles, err := fd.Readdir(100)\n\t\tfor _, f := range files {\n\t\t\tif !f.IsDir() {\n\t\t\t\tlist.Items().Add(f.Name())\n\t\t\t}\n\t\t}\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif len(files) == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/Author Sarah Carroll\n\/\/Date :20\/09\/2017\n\n\/\/ Adapter from: https:\/\/tour.golang.org\/welcome\/1\n\npackage main\n\nimport \"fmt\"\n\nfunc main() {\n\tfmt.Println(\"こんにちは世界\")\n}<commit_msg>additional comments<commit_after>\/\/Author Sarah Carroll\n\/\/g00330821\n\n\/\/ Adapter from: https:\/\/tour.golang.org\/welcome\/1\n\npackage main\n\n\/\/An import path is a string that uniquely identifies a package\nimport \"fmt\"\n\nfunc main() {\n\tfmt.Println(\"こんにちは世界\")\n}<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\tterm \"github.com\/nsf\/termbox-go\"\n\t\"strconv\"\n\t\"unicode\/utf8\"\n)\n\ntype GotoLineMode struct {\n\tlinestr string\n}\n\nfunc (g *GotoLineMode) Handle(ev term.Event, cursor *Cursor, mode *string) {\n\tswitch ev.Key {\n\tcase term.KeyCtrlK:\n\t\tg.linestr = \"\"\n\t\t*mode = \"normal\"\n\tcase term.KeyEnter:\n\t\tn, err := strconv.Atoi(g.linestr)\n\t\tif err != nil {\n\t\t\tpanic(\"cannot convert gotoline string to int\")\n\t\t}\n\t\t\/\/ line number starts with 1.\n\t\t\/\/ but internally it starts with 0.\n\t\t\/\/ so we should n - 1, except 0 will treated as 0.\n\t\tif n != 0 {\n\t\t\tn--\n\t\t}\n\t\tcursor.GotoLine(n)\n\t\tg.linestr = \"\"\n\t\t*mode = \"normal\"\n\tcase term.KeyBackspace, term.KeyBackspace2:\n\t\tif g.linestr == \"\" {\n\t\t\treturn\n\t\t}\n\t\t_, rlen := utf8.DecodeLastRuneInString(g.linestr)\n\t\tg.linestr = g.linestr[:len(g.linestr)-rlen]\n\tdefault:\n\t\tif ev.Ch != 0 {\n\t\t\t_, err := strconv.Atoi(string(ev.Ch))\n\t\t\tif err == nil {\n\t\t\t\tg.linestr += string(ev.Ch)\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>fix 'cannot convert gotoline string to int' panic<commit_after>package main\n\nimport (\n\tterm \"github.com\/nsf\/termbox-go\"\n\t\"strconv\"\n\t\"unicode\/utf8\"\n)\n\ntype GotoLineMode struct {\n\tlinestr string\n}\n\nfunc (g *GotoLineMode) Handle(ev term.Event, cursor *Cursor, mode *string) {\n\tswitch ev.Key {\n\tcase term.KeyCtrlK:\n\t\tg.linestr = \"\"\n\t\t*mode = \"normal\"\n\tcase term.KeyEnter:\n\t\tif g.linestr == \"\" {\n\t\t\t*mode = \"normal\"\n\t\t\treturn\n\t\t}\n\t\tn, err := strconv.Atoi(g.linestr)\n\t\tif err != nil {\n\t\t\tpanic(\"cannot convert gotoline string to int\")\n\t\t}\n\t\t\/\/ line number starts with 1.\n\t\t\/\/ but internally it starts with 0.\n\t\t\/\/ so we should n - 1, except 0 will treated as 0.\n\t\tif n != 0 {\n\t\t\tn--\n\t\t}\n\t\tcursor.GotoLine(n)\n\t\tg.linestr = \"\"\n\t\t*mode = \"normal\"\n\tcase term.KeyBackspace, term.KeyBackspace2:\n\t\tif g.linestr == \"\" {\n\t\t\treturn\n\t\t}\n\t\t_, rlen := utf8.DecodeLastRuneInString(g.linestr)\n\t\tg.linestr = g.linestr[:len(g.linestr)-rlen]\n\tdefault:\n\t\tif ev.Ch != 0 {\n\t\t\t_, err := strconv.Atoi(string(ev.Ch))\n\t\t\tif err == nil {\n\t\t\t\tg.linestr += string(ev.Ch)\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage vm\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/vmware\/govmomi\/find\"\n\t\"github.com\/vmware\/govmomi\/govc\/cli\"\n\t\"github.com\/vmware\/govmomi\/govc\/flags\"\n\t\"github.com\/vmware\/govmomi\/property\"\n\t\"github.com\/vmware\/govmomi\/vim25\/mo\"\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype info struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n\t*flags.SearchFlag\n\n\tWaitForIP bool\n\tExtraConfig bool\n}\n\nfunc init() {\n\tcli.Register(\"vm.info\", &info{})\n}\n\nfunc (cmd *info) Register(f *flag.FlagSet) {\n\tcmd.SearchFlag = flags.NewSearchFlag(flags.SearchVirtualMachines)\n\n\tf.BoolVar(&cmd.WaitForIP, \"waitip\", false, \"Wait for VM to acquire IP address\")\n\tf.BoolVar(&cmd.ExtraConfig, \"e\", false, \"Show ExtraConfig\")\n}\n\nfunc (cmd *info) Process() error { return nil }\n\nfunc (cmd *info) Run(f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvms, err := cmd.VirtualMachines(f.Args())\n\tif err != nil {\n\t\tif _, ok := err.(*find.NotFoundError); ok {\n\t\t\t\/\/ Continue with empty VM slice\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar res infoResult\n\tvar props []string\n\n\tif cmd.OutputFlag.JSON {\n\t\tprops = nil \/\/ Load everything\n\t} else {\n\t\tprops = []string{\"summary\", \"guest.ipAddress\"} \/\/ Load summary\n\t\tif cmd.ExtraConfig {\n\t\t\tprops = append(props, \"config.extraConfig\")\n\t\t}\n\t}\n\n\tfor _, vm := range vms {\n\t\tfor {\n\t\t\tvar mvm mo.VirtualMachine\n\n\t\t\tpc := property.DefaultCollector(c)\n\t\t\terr = pc.RetrieveOne(context.TODO(), vm.Reference(), props, &mvm)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif cmd.WaitForIP && mvm.Guest.IpAddress == \"\" {\n\t\t\t\t_, err = vm.WaitForIP(context.TODO())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t\/\/ Reload virtual machine object\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tres.VirtualMachines = append(res.VirtualMachines, mvm)\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn cmd.WriteResult(&res)\n}\n\ntype infoResult struct {\n\tVirtualMachines []mo.VirtualMachine\n}\n\nfunc (r *infoResult) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0)\n\n\tfor _, vm := range r.VirtualMachines {\n\t\ts := vm.Summary\n\n\t\tfmt.Fprintf(tw, \"Name:\\t%s\\n\", s.Config.Name)\n\t\tfmt.Fprintf(tw, \" UUID:\\t%s\\n\", s.Config.Uuid)\n\t\tfmt.Fprintf(tw, \" Guest name:\\t%s\\n\", s.Config.GuestFullName)\n\t\tfmt.Fprintf(tw, \" Memory:\\t%dMB\\n\", s.Config.MemorySizeMB)\n\t\tfmt.Fprintf(tw, \" CPU:\\t%d vCPU(s)\\n\", s.Config.NumCpu)\n\t\tfmt.Fprintf(tw, \" Power state:\\t%s\\n\", s.Runtime.PowerState)\n\t\tfmt.Fprintf(tw, \" Boot time:\\t%s\\n\", s.Runtime.BootTime)\n\t\tfmt.Fprintf(tw, \" IP address:\\t%s\\n\", s.Guest.IpAddress)\n\t\tif vm.Config != nil && vm.Config.ExtraConfig != nil {\n\t\t\tfmt.Fprintf(tw, \" ExtraConfig:\\n\")\n\t\t\tfor _, v := range vm.Config.ExtraConfig {\n\t\t\t\tfmt.Fprintf(tw, \" %s:\\t%s\\n\", v.GetOptionValue().Key, v.GetOptionValue().Value)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn tw.Flush()\n}\n<commit_msg>Add Host information to vm.info<commit_after>\/*\nCopyright (c) 2014 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage vm\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/vmware\/govmomi\/find\"\n\t\"github.com\/vmware\/govmomi\/govc\/cli\"\n\t\"github.com\/vmware\/govmomi\/govc\/flags\"\n\t\"github.com\/vmware\/govmomi\/object\"\n\t\"github.com\/vmware\/govmomi\/property\"\n\t\"github.com\/vmware\/govmomi\/vim25\/mo\"\n\t\"golang.org\/x\/net\/context\"\n)\n\ntype info struct {\n\t*flags.ClientFlag\n\t*flags.OutputFlag\n\t*flags.SearchFlag\n\n\tWaitForIP bool\n\tExtraConfig bool\n}\n\nfunc init() {\n\tcli.Register(\"vm.info\", &info{})\n}\n\nfunc (cmd *info) Register(f *flag.FlagSet) {\n\tcmd.SearchFlag = flags.NewSearchFlag(flags.SearchVirtualMachines)\n\n\tf.BoolVar(&cmd.WaitForIP, \"waitip\", false, \"Wait for VM to acquire IP address\")\n\tf.BoolVar(&cmd.ExtraConfig, \"e\", false, \"Show ExtraConfig\")\n}\n\nfunc (cmd *info) Process() error { return nil }\n\nfunc (cmd *info) Run(f *flag.FlagSet) error {\n\tc, err := cmd.Client()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvms, err := cmd.VirtualMachines(f.Args())\n\tif err != nil {\n\t\tif _, ok := err.(*find.NotFoundError); ok {\n\t\t\t\/\/ Continue with empty VM slice\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar res infoResult\n\tvar props []string\n\n\tif cmd.OutputFlag.JSON {\n\t\tprops = nil \/\/ Load everything\n\t} else {\n\t\tprops = []string{\"summary\", \"guest.ipAddress\"} \/\/ Load summary\n\t\tif cmd.ExtraConfig {\n\t\t\tprops = append(props, \"config.extraConfig\")\n\t\t}\n\t}\n\n\tctx := context.TODO()\n\n\tfor _, vm := range vms {\n\t\tfor {\n\t\t\tvar mvm mo.VirtualMachine\n\n\t\t\tpc := property.DefaultCollector(c)\n\t\t\terr = pc.RetrieveOne(ctx, vm.Reference(), props, &mvm)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif cmd.WaitForIP && mvm.Guest.IpAddress == \"\" {\n\t\t\t\t_, err = vm.WaitForIP(ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t\/\/ Reload virtual machine object\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar hostName string\n\t\t\thostRef := mvm.Summary.Runtime.Host\n\t\t\tif hostRef == nil {\n\t\t\t\thostName = \"<unavailable>\"\n\t\t\t} else {\n\t\t\t\thost := object.NewHostSystem(c, *hostRef)\n\t\t\t\thostName, err = host.Name(ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tres.VmInfos = append(res.VmInfos, vmInfo{mvm, hostName})\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn cmd.WriteResult(&res)\n}\n\ntype vmInfo struct {\n\tmo.VirtualMachine\n\thostName string\n}\n\ntype infoResult struct {\n\tVmInfos []vmInfo\n}\n\nfunc (r *infoResult) Write(w io.Writer) error {\n\ttw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0)\n\n\tfor _, vmInfo := range r.VmInfos {\n\t\tvm := vmInfo.VirtualMachine\n\t\ts := vm.Summary\n\n\t\tfmt.Fprintf(tw, \"Name:\\t%s\\n\", s.Config.Name)\n\t\tfmt.Fprintf(tw, \" UUID:\\t%s\\n\", s.Config.Uuid)\n\t\tfmt.Fprintf(tw, \" Guest name:\\t%s\\n\", s.Config.GuestFullName)\n\t\tfmt.Fprintf(tw, \" Memory:\\t%dMB\\n\", s.Config.MemorySizeMB)\n\t\tfmt.Fprintf(tw, \" CPU:\\t%d vCPU(s)\\n\", s.Config.NumCpu)\n\t\tfmt.Fprintf(tw, \" Power state:\\t%s\\n\", s.Runtime.PowerState)\n\t\tfmt.Fprintf(tw, \" Boot time:\\t%s\\n\", s.Runtime.BootTime)\n\t\tfmt.Fprintf(tw, \" IP address:\\t%s\\n\", s.Guest.IpAddress)\n\t\tfmt.Fprintf(tw, \" Host:\\t%s\\n\", vmInfo.hostName)\n\t\tif vm.Config != nil && vm.Config.ExtraConfig != nil {\n\t\t\tfmt.Fprintf(tw, \" ExtraConfig:\\n\")\n\t\t\tfor _, v := range vm.Config.ExtraConfig {\n\t\t\t\tfmt.Fprintf(tw, \" %s:\\t%s\\n\", v.GetOptionValue().Key, v.GetOptionValue().Value)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn tw.Flush()\n}\n<|endoftext|>"} {"text":"<commit_before>package screenshot\n\nimport (\n\t\"code.google.com\/p\/x-go-binding\/xgb\"\n\t\"image\"\n\t\"os\"\n)\n\nfunc ScreenRect() (image.Rectangle, error) {\n\tc, err := xgb.Dial(os.Getenv(\"DISPLAY\"))\n\tif err != nil {\n\t\treturn image.Rectangle{}, err\n\t}\n\tdefer c.Close()\n\tx := c.DefaultScreen().WidthInPixels\n\ty := c.DefaultScreen().HeightInPixels\n\n\treturn image.Rect(0, 0, int(x), int(y)), nil\n}\n\nfunc CaptureScreen() (*image.RGBA, error) {\n\tr, e := ScreenRect()\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn CaptureRect(r)\n}\n\nfunc CaptureRect(rect image.Rectangle) (*image.RGBA, error) {\n\tc, err := xgb.Dial(os.Getenv(\"DISPLAY\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer c.Close()\n\n\tx, y := rect.Dx(), rect.Dy()\n\txImg, err := c.GetImage(xgb.ImageFormatZPixmap, c.DefaultScreen().Root, int16(rect.Min.X), int16(rect.Min.Y), uint16(x), uint16(y), 0xffffffff)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := xImg.Data\n\tfor i := 0; i < len(data); i += 4 {\n\t\tdata[i], data[i+2], data[i+3] = data[i+2], data[i], 255\n\t}\n\n\timg := &image.RGBA{data, 4 * x, image.Rect(0, 0, x, y)}\n\treturn img, nil\n}\n<commit_msg>Switched to new BurntSushi\/xgb package.<commit_after>package screenshot\n\nimport (\n\t\"image\"\n\n\t\"github.com\/BurntSushi\/xgb\"\n\t\"github.com\/BurntSushi\/xgb\/xproto\"\n)\n\nfunc ScreenRect() (image.Rectangle, error) {\n\tc, err := xgb.NewConn()\n\tif err != nil {\n\t\treturn image.Rectangle{}, err\n\t}\n\tdefer c.Close()\n\n\tscreen := xproto.Setup(c).DefaultScreen(c)\n\tx := screen.WidthInPixels\n\ty := screen.HeightInPixels\n\n\treturn image.Rect(0, 0, int(x), int(y)), nil\n}\n\nfunc CaptureScreen() (*image.RGBA, error) {\n\tr, e := ScreenRect()\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn CaptureRect(r)\n}\n\nfunc CaptureRect(rect image.Rectangle) (*image.RGBA, error) {\n\tc, err := xgb.NewConn()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer c.Close()\n\n\tscreen := xproto.Setup(c).DefaultScreen(c)\n\tx, y := rect.Dx(), rect.Dy()\n\txImg, err := xproto.GetImage(c, xproto.ImageFormatZPixmap, xproto.Drawable(screen.Root), int16(rect.Min.X), int16(rect.Min.Y), uint16(x), uint16(y), 0xffffffff).Reply()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := xImg.Data\n\tfor i := 0; i < len(data); i += 4 {\n\t\tdata[i], data[i+2], data[i+3] = data[i+2], data[i], 255\n\t}\n\n\timg := &image.RGBA{data, 4 * x, image.Rect(0, 0, x, y)}\n\treturn img, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package levant\n\nimport (\n\tnomad \"github.com\/hashicorp\/nomad\/api\"\n\t\"github.com\/jrasell\/levant\/logging\"\n)\n\nfunc (l *levantDeployment) autoRevert(jobID *string) {\n\n\tdep, _, err := l.nomad.Jobs().LatestDeployment(*jobID, nil)\n\tif err != nil {\n\t\tlogging.Error(\"levant\/auto_revert: unable to query latest deployment of job %s\", *jobID)\n\t\treturn\n\t}\n\n\tlogging.Info(\"levant\/auto_revert: beginning deployment watcher for job %s\", *jobID)\n\tsuccess := l.deploymentWatcher(dep.ID)\n\n\tif success {\n\t\tlogging.Info(\"levant\/auto_revert: auto-revert of job %s was successful\", *jobID)\n\t} else {\n\t\tlogging.Error(\"levant\/auto_revert: auto-revert of job %s failed; POTENTIAL OUTAGE SITUATION\", *jobID)\n\t\tl.checkFailedDeployment(&dep.ID)\n\t}\n}\n\n\/\/ checkAutoRevert inspects a Nomad deployment to determine if any TashGroups\n\/\/ have been auto-reverted.\nfunc (l *levantDeployment) checkAutoRevert(dep *nomad.Deployment) {\n\n\tvar revert bool\n\n\t\/\/ Identify whether any of the TashGroups are enabled for auto-revert and have\n\t\/\/ therefore caused the job to enter a deployment to revert to a stable\n\t\/\/ version.\n\tfor _, v := range dep.TaskGroups {\n\t\tif v.AutoRevert {\n\t\t\trevert = true\n\t\t}\n\t}\n\n\tif revert {\n\t\tlogging.Info(\"levant\/auto_revert: job %v has entered auto-revert state; launching auto-revert checker\",\n\t\t\tdep.JobID)\n\n\t\t\/\/ Run the levant autoRevert function.\n\t\tl.autoRevert(&dep.JobID)\n\t} else {\n\t\tlogging.Info(\"levant\/auto_revert: job %v is not in auto-revert; POTENTIAL OUTAGE SITUATION\", dep.JobID)\n\t}\n}\n<commit_msg>Fix tiny typo<commit_after>package levant\n\nimport (\n\tnomad \"github.com\/hashicorp\/nomad\/api\"\n\t\"github.com\/jrasell\/levant\/logging\"\n)\n\nfunc (l *levantDeployment) autoRevert(jobID *string) {\n\n\tdep, _, err := l.nomad.Jobs().LatestDeployment(*jobID, nil)\n\tif err != nil {\n\t\tlogging.Error(\"levant\/auto_revert: unable to query latest deployment of job %s\", *jobID)\n\t\treturn\n\t}\n\n\tlogging.Info(\"levant\/auto_revert: beginning deployment watcher for job %s\", *jobID)\n\tsuccess := l.deploymentWatcher(dep.ID)\n\n\tif success {\n\t\tlogging.Info(\"levant\/auto_revert: auto-revert of job %s was successful\", *jobID)\n\t} else {\n\t\tlogging.Error(\"levant\/auto_revert: auto-revert of job %s failed; POTENTIAL OUTAGE SITUATION\", *jobID)\n\t\tl.checkFailedDeployment(&dep.ID)\n\t}\n}\n\n\/\/ checkAutoRevert inspects a Nomad deployment to determine if any TashGroups\n\/\/ have been auto-reverted.\nfunc (l *levantDeployment) checkAutoRevert(dep *nomad.Deployment) {\n\n\tvar revert bool\n\n\t\/\/ Identify whether any of the TaskGroups are enabled for auto-revert and have\n\t\/\/ therefore caused the job to enter a deployment to revert to a stable\n\t\/\/ version.\n\tfor _, v := range dep.TaskGroups {\n\t\tif v.AutoRevert {\n\t\t\trevert = true\n\t\t}\n\t}\n\n\tif revert {\n\t\tlogging.Info(\"levant\/auto_revert: job %v has entered auto-revert state; launching auto-revert checker\",\n\t\t\tdep.JobID)\n\n\t\t\/\/ Run the levant autoRevert function.\n\t\tl.autoRevert(&dep.JobID)\n\t} else {\n\t\tlogging.Info(\"levant\/auto_revert: job %v is not in auto-revert; POTENTIAL OUTAGE SITUATION\", dep.JobID)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Andreas Koch. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage html\n\nimport (\n\t\"encoding\/csv\"\n\t\"fmt\"\n\t\"github.com\/andreaskoch\/allmark\/path\"\n\t\"github.com\/andreaskoch\/allmark\/repository\"\n\t\"github.com\/andreaskoch\/allmark\/util\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar (\n\t\/\/ !csv[*description text*](*file path*)\n\ttablePattern = regexp.MustCompile(`!csv\\[([^\\]]+)\\]\\(([^)]+)\\)`)\n)\n\nfunc NewTableRenderer(markdown string, fileIndex *repository.FileIndex, pathProvider *path.Provider) func(text string) string {\n\treturn func(text string) string {\n\t\treturn renderTable(markdown, fileIndex, pathProvider)\n\t}\n}\n\nfunc renderTable(markdown string, fileIndex *repository.FileIndex, pathProvider *path.Provider) string {\n\n\tfor {\n\n\t\tfound, matches := util.IsMatch(markdown, tablePattern)\n\t\tif !found || (found && len(matches) != 3) {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ parameters\n\t\toriginalText := strings.TrimSpace(matches[0])\n\t\ttitle := strings.TrimSpace(matches[1])\n\t\tpath := strings.TrimSpace(matches[2])\n\n\t\t\/\/ create image gallery code\n\t\tfiles := fileIndex.GetFilesByPath(path, isCSVFile)\n\n\t\tif len(files) == 0 {\n\t\t\t\/\/ file not found remove entry\n\t\t\tmsg := fmt.Sprintf(\"<!-- Cannot render table. The file %q could not be found -->\", path)\n\t\t\tmarkdown = strings.Replace(markdown, originalText, msg, 1)\n\t\t\tcontinue\n\t\t}\n\n\t\ttableData, err := readCSV(files[0].Path())\n\t\tif err != nil {\n\t\t\t\/\/ file not found remove entry\n\t\t\tmsg := fmt.Sprintf(\"<!-- Cannot read csv file %q (Error: %s) -->\", path, err)\n\t\t\tmarkdown = strings.Replace(markdown, originalText, msg, 1)\n\t\t\tcontinue\n\t\t}\n\n\t\ttableCode := fmt.Sprintf(`<table class=\"csv\" summary=\"%s\">\n\t\t`, title)\n\n\t\tfor rowNumber := range tableData {\n\t\t\trow := tableData[rowNumber]\n\n\t\t\tif rowNumber == 0 {\n\t\t\t\ttableCode += `<thead>`\n\t\t\t}\n\n\t\t\tif rowNumber == 1 {\n\t\t\t\ttableCode += `<tbody>`\n\t\t\t}\n\n\t\t\ttableCode += `<tr>`\n\n\t\t\tfor columnNumber := range row {\n\t\t\t\tvalue := row[columnNumber]\n\t\t\t\ttableCode += fmt.Sprintf(`<td>%s<\/td>`, value)\n\t\t\t}\n\n\t\t\ttableCode += `<\/tr>`\n\n\t\t\tif rowNumber == 0 {\n\t\t\t\ttableCode += `<\/thead>`\n\t\t\t}\n\t\t}\n\n\t\ttableCode += `<\/tbody>\n\t\t\t<\/table>`\n\n\t\t\/\/ replace markdown with image gallery\n\t\tmarkdown = strings.Replace(markdown, originalText, tableCode, 1)\n\n\t}\n\n\treturn markdown\n}\n\nfunc readCSV(path string) ([][]string, error) {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer file.Close()\n\n\treader := csv.NewReader(file)\n\treturn reader.ReadAll()\n}\n\nfunc isCSVFile(pather path.Pather) bool {\n\tfileExtension := strings.ToLower(filepath.Ext(pather.Path()))\n\tswitch fileExtension {\n\tcase \".csv\":\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n\n\tpanic(\"Unreachable\")\n}\n<commit_msg>CSV Table Rendering: The code is now capable of dealing with different seperator characters, not just commas as in the previous version. Which character is used as the delimiter is determined by looking at the first line of the csv file.<commit_after>\/\/ Copyright 2013 Andreas Koch. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage html\n\nimport (\n\t\"bufio\"\n\t\"encoding\/csv\"\n\t\"fmt\"\n\t\"github.com\/andreaskoch\/allmark\/path\"\n\t\"github.com\/andreaskoch\/allmark\/repository\"\n\t\"github.com\/andreaskoch\/allmark\/util\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar (\n\t\/\/ !csv[*description text*](*file path*)\n\ttablePattern = regexp.MustCompile(`!csv\\[([^\\]]+)\\]\\(([^)]+)\\)`)\n)\n\nfunc NewTableRenderer(markdown string, fileIndex *repository.FileIndex, pathProvider *path.Provider) func(text string) string {\n\treturn func(text string) string {\n\t\treturn renderTable(markdown, fileIndex, pathProvider)\n\t}\n}\n\nfunc renderTable(markdown string, fileIndex *repository.FileIndex, pathProvider *path.Provider) string {\n\n\tfor {\n\n\t\tfound, matches := util.IsMatch(markdown, tablePattern)\n\t\tif !found || (found && len(matches) != 3) {\n\t\t\tbreak\n\t\t}\n\n\t\t\/\/ parameters\n\t\toriginalText := strings.TrimSpace(matches[0])\n\t\ttitle := strings.TrimSpace(matches[1])\n\t\tpath := strings.TrimSpace(matches[2])\n\n\t\t\/\/ create image gallery code\n\t\tfiles := fileIndex.GetFilesByPath(path, isCSVFile)\n\n\t\tif len(files) == 0 {\n\t\t\t\/\/ file not found remove entry\n\t\t\tmsg := fmt.Sprintf(\"<!-- Cannot render table. The file %q could not be found -->\", path)\n\t\t\tmarkdown = strings.Replace(markdown, originalText, msg, 1)\n\t\t\tcontinue\n\t\t}\n\n\t\ttableData, err := readCSV(files[0].Path())\n\t\tif err != nil {\n\t\t\t\/\/ file not found remove entry\n\t\t\tmsg := fmt.Sprintf(\"<!-- Cannot read csv file %q (Error: %s) -->\", path, err)\n\t\t\tmarkdown = strings.Replace(markdown, originalText, msg, 1)\n\t\t\tcontinue\n\t\t}\n\n\t\ttableCode := fmt.Sprintf(`<table class=\"csv\" summary=\"%s\">\n\t\t`, title)\n\n\t\tfor rowNumber := range tableData {\n\t\t\trow := tableData[rowNumber]\n\n\t\t\tif rowNumber == 0 {\n\t\t\t\ttableCode += `<thead>`\n\t\t\t}\n\n\t\t\tif rowNumber == 1 {\n\t\t\t\ttableCode += `<tbody>`\n\t\t\t}\n\n\t\t\ttableCode += `<tr>`\n\n\t\t\tfor columnNumber := range row {\n\t\t\t\tvalue := row[columnNumber]\n\t\t\t\ttableCode += fmt.Sprintf(`<td>%s<\/td>`, value)\n\t\t\t}\n\n\t\t\ttableCode += `<\/tr>`\n\n\t\t\tif rowNumber == 0 {\n\t\t\t\ttableCode += `<\/thead>`\n\t\t\t}\n\t\t}\n\n\t\ttableCode += `<\/tbody>\n\t\t\t<\/table>`\n\n\t\t\/\/ replace markdown with image gallery\n\t\tmarkdown = strings.Replace(markdown, originalText, tableCode, 1)\n\n\t}\n\n\treturn markdown\n}\n\nfunc readCSV(path string) ([][]string, error) {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer file.Close()\n\n\t\/\/ determine the separator\n\tseparator := determineCSVColumnSeparator(path, ';')\n\n\t\/\/ read the csv\n\tcsvReader := csv.NewReader(file)\n\tcsvReader.Comma = separator\n\n\treturn csvReader.ReadAll()\n}\n\nfunc determineCSVColumnSeparator(path string, fallback rune) rune {\n\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn fallback\n\t}\n\n\tdefer file.Close()\n\n\treader := bufio.NewReader(file)\n\tline, _, err := reader.ReadLine()\n\tif err != nil {\n\t\treturn fallback\n\t}\n\n\tfor _, character := range line {\n\t\tswitch character {\n\t\tcase ',':\n\t\t\treturn ','\n\t\tcase ';':\n\t\t\treturn ';'\n\t\tcase '\\t':\n\t\t\treturn '\\t'\n\t\t}\n\t}\n\n\treturn fallback\n}\n\nfunc isCSVFile(pather path.Pather) bool {\n\tfileExtension := strings.ToLower(filepath.Ext(pather.Path()))\n\tswitch fileExtension {\n\tcase \".csv\":\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n\n\tpanic(\"Unreachable\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2017 The Things Network\n\/\/ Use of this source code is governed by the MIT license that can be found in the LICENSE file.\n\npackage handler\n\nimport (\n\t\"encoding\/json\"\n\n\tpb \"github.com\/TheThingsNetwork\/ttn\/api\/handler\"\n\t\"github.com\/TheThingsNetwork\/ttn\/core\/handler\/functions\"\n\t\"github.com\/TheThingsNetwork\/ttn\/utils\/errors\"\n\t\"golang.org\/x\/net\/context\" \/\/ See https:\/\/github.com\/grpc\/grpc-go\/issues\/711\"\n)\n\n\/\/ DryUplink converts the uplink message payload by running the payload\n\/\/ functions that are provided in the DryUplinkMessage, without actually going to the network.\n\/\/ This is helpful for testing the payload functions without having to save them.\nfunc (h *handlerManager) DryUplink(ctx context.Context, in *pb.DryUplinkMessage) (*pb.DryUplinkResult, error) {\n\tapp := in.App\n\n\tlogger := functions.NewEntryLogger()\n\n\tflds := \"\"\n\tvalid := true\n\tif app != nil && app.Decoder != \"\" {\n\t\tfunctions := &UplinkFunctions{\n\t\t\tDecoder: app.Decoder,\n\t\t\tConverter: app.Converter,\n\t\t\tValidator: app.Validator,\n\t\t\tLogger: logger,\n\t\t}\n\n\t\tfields, val, err := functions.Process(in.Payload, uint8(in.Port))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvalid = val\n\n\t\tmarshalled, err := json.Marshal(fields)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tflds = string(marshalled)\n\t}\n\n\treturn &pb.DryUplinkResult{\n\t\tPayload: in.Payload,\n\t\tFields: flds,\n\t\tValid: valid,\n\t\tLogs: logger.Logs,\n\t}, nil\n}\n\n\/\/ DryDownlink converts the downlink message payload by running the payload\n\/\/ functions that are provided in the DryDownlinkMessage, without actually going to the network.\n\/\/ This is helpful for testing the payload functions without having to save them.\nfunc (h *handlerManager) DryDownlink(ctx context.Context, in *pb.DryDownlinkMessage) (*pb.DryDownlinkResult, error) {\n\tapp := in.App\n\n\tif in.Payload != nil {\n\t\tif in.Fields != \"\" {\n\t\t\treturn nil, errors.NewErrInvalidArgument(\"Downlink\", \"Both Fields and Payload provided\")\n\t\t}\n\t\treturn &pb.DryDownlinkResult{\n\t\t\tPayload: in.Payload,\n\t\t}, nil\n\t}\n\n\tif in.Fields == \"\" {\n\t\treturn nil, errors.NewErrInvalidArgument(\"Downlink\", \"Neither Fields nor Payload provided\")\n\t}\n\n\tif app == nil || app.Encoder == \"\" {\n\t\treturn nil, errors.NewErrInvalidArgument(\"Encoder\", \"Not specified\")\n\t}\n\n\tlogger := functions.NewEntryLogger()\n\n\tfunctions := &DownlinkFunctions{\n\t\tEncoder: app.Encoder,\n\t\tLogger: logger,\n\t}\n\n\tvar parsed map[string]interface{}\n\terr := json.Unmarshal([]byte(in.Fields), &parsed)\n\tif err != nil {\n\t\treturn nil, errors.NewErrInvalidArgument(\"Fields\", err.Error())\n\t}\n\n\tpayload, _, err := functions.Process(parsed, uint8(in.Port))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &pb.DryDownlinkResult{\n\t\tPayload: payload,\n\t\tLogs: logger.Logs,\n\t}, nil\n}\n<commit_msg>Make dry run work with custom formatter<commit_after>\/\/ Copyright © 2017 The Things Network\n\/\/ Use of this source code is governed by the MIT license that can be found in the LICENSE file.\n\npackage handler\n\nimport (\n\t\"encoding\/json\"\n\n\tpb \"github.com\/TheThingsNetwork\/ttn\/api\/handler\"\n\t\"github.com\/TheThingsNetwork\/ttn\/core\/handler\/functions\"\n\t\"github.com\/TheThingsNetwork\/ttn\/utils\/errors\"\n\t\"golang.org\/x\/net\/context\" \/\/ See https:\/\/github.com\/grpc\/grpc-go\/issues\/711\"\n)\n\n\/\/ DryUplink converts the uplink message payload by running the payload\n\/\/ functions that are provided in the DryUplinkMessage, without actually going to the network.\n\/\/ This is helpful for testing the payload functions without having to save them.\nfunc (h *handlerManager) DryUplink(ctx context.Context, in *pb.DryUplinkMessage) (*pb.DryUplinkResult, error) {\n\tapp := in.App\n\n\tlogger := functions.NewEntryLogger()\n\n\tflds := \"\"\n\tvalid := true\n\tif app != nil {\n\t\tvar decoder PayloadDecoder\n\t\tdecoder = &CustomUplinkFunctions{\n\t\t\tDecoder: app.Decoder,\n\t\t\tConverter: app.Converter,\n\t\t\tValidator: app.Validator,\n\t\t\tLogger: logger,\n\t\t}\n\n\t\tfields, val, err := decoder.Decode(in.Payload, uint8(in.Port))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tvalid = val\n\n\t\tmarshalled, err := json.Marshal(fields)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tflds = string(marshalled)\n\t}\n\n\treturn &pb.DryUplinkResult{\n\t\tPayload: in.Payload,\n\t\tFields: flds,\n\t\tValid: valid,\n\t\tLogs: logger.Logs,\n\t}, nil\n}\n\n\/\/ DryDownlink converts the downlink message payload by running the payload\n\/\/ functions that are provided in the DryDownlinkMessage, without actually going to the network.\n\/\/ This is helpful for testing the payload functions without having to save them.\nfunc (h *handlerManager) DryDownlink(ctx context.Context, in *pb.DryDownlinkMessage) (*pb.DryDownlinkResult, error) {\n\tapp := in.App\n\n\tif in.Payload != nil {\n\t\tif in.Fields != \"\" {\n\t\t\treturn nil, errors.NewErrInvalidArgument(\"Downlink\", \"Both Fields and Payload provided\")\n\t\t}\n\t\treturn &pb.DryDownlinkResult{\n\t\t\tPayload: in.Payload,\n\t\t}, nil\n\t}\n\n\tif in.Fields == \"\" {\n\t\treturn nil, errors.NewErrInvalidArgument(\"Downlink\", \"Neither Fields nor Payload provided\")\n\t}\n\n\tif app == nil || app.Encoder == \"\" {\n\t\treturn nil, errors.NewErrInvalidArgument(\"Encoder\", \"Not specified\")\n\t}\n\n\tlogger := functions.NewEntryLogger()\n\n\tvar encoder PayloadEncoder\n\tencoder = &CustomDownlinkFunctions{\n\t\tEncoder: app.Encoder,\n\t\tLogger: logger,\n\t}\n\n\tvar parsed map[string]interface{}\n\terr := json.Unmarshal([]byte(in.Fields), &parsed)\n\tif err != nil {\n\t\treturn nil, errors.NewErrInvalidArgument(\"Fields\", err.Error())\n\t}\n\n\tpayload, _, err := encoder.Encode(parsed, uint8(in.Port))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &pb.DryDownlinkResult{\n\t\tPayload: payload,\n\t\tLogs: logger.Logs,\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2017 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage stream\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/google\/gapid\/core\/data\/protoutil\"\n)\n\nvar (\n\t\/\/ U1 represents a 1-bit unsigned integer.\n\tU1 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 1}}}\n\t\/\/ U2 represents a 2-bit unsigned integer.\n\tU2 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 2}}}\n\t\/\/ U4 represents a 4-bit unsigned integer.\n\tU4 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 4}}}\n\t\/\/ U5 represents a 5-bit unsigned integer.\n\tU5 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 5}}}\n\t\/\/ U6 represents a 6-bit unsigned integer.\n\tU6 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 6}}}\n\t\/\/ U8 represents a 8-bit unsigned integer.\n\tU8 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 8}}}\n\t\/\/ U9 represents a 9-bit unsigned integer.\n\tU9 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 9}}}\n\t\/\/ U10 represents a 10-bit unsigned integer.\n\tU10 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 10}}}\n\t\/\/ U11 represents a 11-bit unsigned integer.\n\tU11 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 11}}}\n\t\/\/ U16 represents a 16-bit unsigned integer.\n\tU16 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 16}}}\n\t\/\/ U24 represents a 24-bit unsigned integer.\n\tU24 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 24}}}\n\t\/\/ U32 represents a 32-bit unsigned integer.\n\tU32 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 32}}}\n\t\/\/ U64 represents a 64-bit unsigned integer.\n\tU64 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 64}}}\n\t\/\/ S2 represents a 2-bit signed integer.\n\tS2 = DataType{Signed: true, Kind: &DataType_Integer{&Integer{Bits: 1}}}\n\t\/\/ S8 represents a 8-bit signed integer.\n\tS8 = DataType{Signed: true, Kind: &DataType_Integer{&Integer{Bits: 7}}}\n\t\/\/ S10 represents a 10-bit signed integer.\n\tS10 = DataType{Signed: true, Kind: &DataType_Integer{&Integer{Bits: 9}}}\n\t\/\/ S11 represents a 11-bit signed integer.\n\tS11 = DataType{Signed: true, Kind: &DataType_Integer{&Integer{Bits: 10}}}\n\t\/\/ S16 represents a 16-bit signed integer.\n\tS16 = DataType{Signed: true, Kind: &DataType_Integer{&Integer{Bits: 15}}}\n\t\/\/ S32 represents a 32-bit signed integer.\n\tS32 = DataType{Signed: true, Kind: &DataType_Integer{&Integer{Bits: 31}}}\n\t\/\/ S64 represents a 64-bit signed integer.\n\tS64 = DataType{Signed: true, Kind: &DataType_Integer{&Integer{Bits: 63}}}\n\t\/\/ F10 represents a 10-bit unsigned floating-point number.\n\tF10 = DataType{Signed: false, Kind: &DataType_Float{&Float{ExponentBits: 5, MantissaBits: 5}}}\n\t\/\/ F11 represents a 11-bit unsigned floating-point number.\n\tF11 = DataType{Signed: false, Kind: &DataType_Float{&Float{ExponentBits: 5, MantissaBits: 6}}}\n\t\/\/ F16 represents a 16-bit signed, floating-point number.\n\tF16 = DataType{Signed: true, Kind: &DataType_Float{&Float{ExponentBits: 5, MantissaBits: 10}}}\n\t\/\/ F32 represents a 32-bit signed, floating-point number.\n\tF32 = DataType{Signed: true, Kind: &DataType_Float{&Float{ExponentBits: 7, MantissaBits: 24}}}\n\t\/\/ F64 represents a 64-bit signed, floating-point number.\n\tF64 = DataType{Signed: true, Kind: &DataType_Float{&Float{ExponentBits: 10, MantissaBits: 53}}}\n\t\/\/ S16_16 represents a 16.16 bit signed, fixed-point number.\n\tS16_16 = DataType{Signed: true, Kind: &DataType_Fixed{&Fixed{IntegerBits: 15, FractionalBits: 16}}}\n)\n\n\/\/ Format prints the DataType to f.\nfunc (t DataType) Format(f fmt.State, r rune) {\n\tswitch {\n\tcase t.Is(F10):\n\t\tfmt.Fprintf(f, \"F10\")\n\tcase t.Is(F11):\n\t\tfmt.Fprintf(f, \"F11\")\n\tcase t.Is(F16):\n\t\tfmt.Fprintf(f, \"F16\")\n\tcase t.Is(F32):\n\t\tfmt.Fprintf(f, \"F32\")\n\tcase t.Is(F64):\n\t\tfmt.Fprintf(f, \"F64\")\n\tcase t.IsFloat() && t.Signed:\n\t\tfmt.Fprintf(f, \"F:s:%d:%d\", t.GetFloat().ExponentBits, t.GetFloat().MantissaBits)\n\tcase t.IsFloat() && !t.Signed:\n\t\tfmt.Fprintf(f, \"F:u:%d:%d\", t.GetFloat().ExponentBits, t.GetFloat().MantissaBits)\n\tcase t.IsInteger() && t.Signed:\n\t\tfmt.Fprintf(f, \"S%d\", t.GetInteger().Bits+1)\n\tcase t.IsInteger() && !t.Signed:\n\t\tfmt.Fprintf(f, \"U%d\", t.GetInteger().Bits)\n\tcase t.IsFixed() && t.Signed:\n\t\tfmt.Fprintf(f, \"S%d.%d\", t.GetFixed().IntegerBits+1, t.GetFixed().FractionalBits)\n\tcase t.IsFixed() && !t.Signed:\n\t\tfmt.Fprintf(f, \"U%d.%d\", t.GetFixed().IntegerBits, t.GetFixed().FractionalBits)\n\tdefault:\n\t\tfmt.Fprintf(f, \"<unknown kind %T>\", t.Kind)\n\t}\n}\n\n\/\/ Bits returns the size of the data type in bits.\nfunc (t *DataType) Bits() uint32 {\n\tbits := uint32(0)\n\tswitch k := protoutil.OneOf(t.Kind).(type) {\n\tcase *Integer:\n\t\tbits = k.Bits\n\tcase *Float:\n\t\tbits = k.ExponentBits + k.MantissaBits\n\tcase *Fixed:\n\t\tbits = k.IntegerBits + k.FractionalBits\n\tdefault:\n\t\tpanic(fmt.Errorf(\"Unknown data type kind %T\", k))\n\t}\n\tif t.Signed {\n\t\tbits++\n\t}\n\treturn bits\n}\n\n\/\/ IsInteger returns true if t is an integer.\nfunc (t *DataType) IsInteger() bool { return t.GetInteger() != nil }\n\n\/\/ IsFloat returns true if t is a float.\nfunc (t *DataType) IsFloat() bool { return t.GetFloat() != nil }\n\n\/\/ IsFixed returns true if the DataType is a fixed point number.\nfunc (t *DataType) IsFixed() bool { return t.GetFixed() != nil }\n\n\/\/ Is returns true if t is equivalent to o.\nfunc (t DataType) Is(o DataType) bool { return proto.Equal(&t, &o) }\n<commit_msg>Fix format definition of single and double precision floats<commit_after>\/\/ Copyright (C) 2017 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage stream\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/google\/gapid\/core\/data\/protoutil\"\n)\n\nvar (\n\t\/\/ U1 represents a 1-bit unsigned integer.\n\tU1 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 1}}}\n\t\/\/ U2 represents a 2-bit unsigned integer.\n\tU2 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 2}}}\n\t\/\/ U4 represents a 4-bit unsigned integer.\n\tU4 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 4}}}\n\t\/\/ U5 represents a 5-bit unsigned integer.\n\tU5 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 5}}}\n\t\/\/ U6 represents a 6-bit unsigned integer.\n\tU6 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 6}}}\n\t\/\/ U8 represents a 8-bit unsigned integer.\n\tU8 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 8}}}\n\t\/\/ U9 represents a 9-bit unsigned integer.\n\tU9 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 9}}}\n\t\/\/ U10 represents a 10-bit unsigned integer.\n\tU10 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 10}}}\n\t\/\/ U11 represents a 11-bit unsigned integer.\n\tU11 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 11}}}\n\t\/\/ U16 represents a 16-bit unsigned integer.\n\tU16 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 16}}}\n\t\/\/ U24 represents a 24-bit unsigned integer.\n\tU24 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 24}}}\n\t\/\/ U32 represents a 32-bit unsigned integer.\n\tU32 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 32}}}\n\t\/\/ U64 represents a 64-bit unsigned integer.\n\tU64 = DataType{Signed: false, Kind: &DataType_Integer{&Integer{Bits: 64}}}\n\t\/\/ S2 represents a 2-bit signed integer.\n\tS2 = DataType{Signed: true, Kind: &DataType_Integer{&Integer{Bits: 1}}}\n\t\/\/ S8 represents a 8-bit signed integer.\n\tS8 = DataType{Signed: true, Kind: &DataType_Integer{&Integer{Bits: 7}}}\n\t\/\/ S10 represents a 10-bit signed integer.\n\tS10 = DataType{Signed: true, Kind: &DataType_Integer{&Integer{Bits: 9}}}\n\t\/\/ S11 represents a 11-bit signed integer.\n\tS11 = DataType{Signed: true, Kind: &DataType_Integer{&Integer{Bits: 10}}}\n\t\/\/ S16 represents a 16-bit signed integer.\n\tS16 = DataType{Signed: true, Kind: &DataType_Integer{&Integer{Bits: 15}}}\n\t\/\/ S32 represents a 32-bit signed integer.\n\tS32 = DataType{Signed: true, Kind: &DataType_Integer{&Integer{Bits: 31}}}\n\t\/\/ S64 represents a 64-bit signed integer.\n\tS64 = DataType{Signed: true, Kind: &DataType_Integer{&Integer{Bits: 63}}}\n\t\/\/ F10 represents a 10-bit unsigned floating-point number.\n\tF10 = DataType{Signed: false, Kind: &DataType_Float{&Float{ExponentBits: 5, MantissaBits: 5}}}\n\t\/\/ F11 represents a 11-bit unsigned floating-point number.\n\tF11 = DataType{Signed: false, Kind: &DataType_Float{&Float{ExponentBits: 5, MantissaBits: 6}}}\n\t\/\/ F16 represents a 16-bit signed, floating-point number.\n\tF16 = DataType{Signed: true, Kind: &DataType_Float{&Float{ExponentBits: 5, MantissaBits: 10}}}\n\t\/\/ F32 represents a 32-bit signed, floating-point number.\n\tF32 = DataType{Signed: true, Kind: &DataType_Float{&Float{ExponentBits: 8, MantissaBits: 23}}}\n\t\/\/ F64 represents a 64-bit signed, floating-point number.\n\tF64 = DataType{Signed: true, Kind: &DataType_Float{&Float{ExponentBits: 11, MantissaBits: 52}}}\n\t\/\/ S16_16 represents a 16.16 bit signed, fixed-point number.\n\tS16_16 = DataType{Signed: true, Kind: &DataType_Fixed{&Fixed{IntegerBits: 15, FractionalBits: 16}}}\n)\n\n\/\/ Format prints the DataType to f.\nfunc (t DataType) Format(f fmt.State, r rune) {\n\tswitch {\n\tcase t.Is(F10):\n\t\tfmt.Fprintf(f, \"F10\")\n\tcase t.Is(F11):\n\t\tfmt.Fprintf(f, \"F11\")\n\tcase t.Is(F16):\n\t\tfmt.Fprintf(f, \"F16\")\n\tcase t.Is(F32):\n\t\tfmt.Fprintf(f, \"F32\")\n\tcase t.Is(F64):\n\t\tfmt.Fprintf(f, \"F64\")\n\tcase t.IsFloat() && t.Signed:\n\t\tfmt.Fprintf(f, \"F:s:%d:%d\", t.GetFloat().ExponentBits, t.GetFloat().MantissaBits)\n\tcase t.IsFloat() && !t.Signed:\n\t\tfmt.Fprintf(f, \"F:u:%d:%d\", t.GetFloat().ExponentBits, t.GetFloat().MantissaBits)\n\tcase t.IsInteger() && t.Signed:\n\t\tfmt.Fprintf(f, \"S%d\", t.GetInteger().Bits+1)\n\tcase t.IsInteger() && !t.Signed:\n\t\tfmt.Fprintf(f, \"U%d\", t.GetInteger().Bits)\n\tcase t.IsFixed() && t.Signed:\n\t\tfmt.Fprintf(f, \"S%d.%d\", t.GetFixed().IntegerBits+1, t.GetFixed().FractionalBits)\n\tcase t.IsFixed() && !t.Signed:\n\t\tfmt.Fprintf(f, \"U%d.%d\", t.GetFixed().IntegerBits, t.GetFixed().FractionalBits)\n\tdefault:\n\t\tfmt.Fprintf(f, \"<unknown kind %T>\", t.Kind)\n\t}\n}\n\n\/\/ Bits returns the size of the data type in bits.\nfunc (t *DataType) Bits() uint32 {\n\tbits := uint32(0)\n\tswitch k := protoutil.OneOf(t.Kind).(type) {\n\tcase *Integer:\n\t\tbits = k.Bits\n\tcase *Float:\n\t\tbits = k.ExponentBits + k.MantissaBits\n\tcase *Fixed:\n\t\tbits = k.IntegerBits + k.FractionalBits\n\tdefault:\n\t\tpanic(fmt.Errorf(\"Unknown data type kind %T\", k))\n\t}\n\tif t.Signed {\n\t\tbits++\n\t}\n\treturn bits\n}\n\n\/\/ IsInteger returns true if t is an integer.\nfunc (t *DataType) IsInteger() bool { return t.GetInteger() != nil }\n\n\/\/ IsFloat returns true if t is a float.\nfunc (t *DataType) IsFloat() bool { return t.GetFloat() != nil }\n\n\/\/ IsFixed returns true if the DataType is a fixed point number.\nfunc (t *DataType) IsFixed() bool { return t.GetFixed() != nil }\n\n\/\/ Is returns true if t is equivalent to o.\nfunc (t DataType) Is(o DataType) bool { return proto.Equal(&t, &o) }\n<|endoftext|>"} {"text":"<commit_before>package win32\n\nimport \"sync\"\n\nvar emptyCursor HCURSOR\nvar emptyCursorOnce sync.Once\n\n\/\/ Create a custom cursor at run time.\nfunc GetEmptyCursor() HCURSOR {\n\temptyCursorOnce.Do(func() {\n\t\tandMASK := []byte{\n\t\t\t0xFF, 0xFF, 0xFF, 0xFF,\n\t\t}\n\n\t\txorMASK := []byte{\n\t\t\t0x00, 0x00, 0x00, 0x00,\n\t\t}\n\t\temptyCursor = CreateCursor(hThisInstance, \/\/ app. instance\n\t\t\t0, \/\/ horizontal position of hot spot\n\t\t\t0, \/\/ vertical position of hot spot\n\t\t\t\/\/ 0 width\/height is unsupported in testing\n\t\t\t1, \/\/ cursor width\n\t\t\t1, \/\/ cursor height\n\t\t\tandMASK,\n\t\t\txorMASK)\n\t})\n\treturn emptyCursor\n}\n\n\/\/ TODO: Add image.Image to cursor conversion and setting functionality\n\/\/ this can currently be done in oak by having a image follow the cursor around,\n\/\/ but that will inherently not be as smooth as setting the OS cursor. (but more portable)\n<commit_msg>shiny\/driver\/internal\/win32: add build tags to cursor file<commit_after>\/\/go:build windows\n\/\/ +build windows\n\npackage win32\n\nimport \"sync\"\n\nvar emptyCursor HCURSOR\nvar emptyCursorOnce sync.Once\n\n\/\/ Create a custom cursor at run time.\nfunc GetEmptyCursor() HCURSOR {\n\temptyCursorOnce.Do(func() {\n\t\tandMASK := []byte{\n\t\t\t0xFF, 0xFF, 0xFF, 0xFF,\n\t\t}\n\n\t\txorMASK := []byte{\n\t\t\t0x00, 0x00, 0x00, 0x00,\n\t\t}\n\t\temptyCursor = CreateCursor(hThisInstance, \/\/ app. instance\n\t\t\t0, \/\/ horizontal position of hot spot\n\t\t\t0, \/\/ vertical position of hot spot\n\t\t\t\/\/ 0 width\/height is unsupported in testing\n\t\t\t1, \/\/ cursor width\n\t\t\t1, \/\/ cursor height\n\t\t\tandMASK,\n\t\t\txorMASK)\n\t})\n\treturn emptyCursor\n}\n\n\/\/ TODO: Add image.Image to cursor conversion and setting functionality\n\/\/ this can currently be done in oak by having a image follow the cursor around,\n\/\/ but that will inherently not be as smooth as setting the OS cursor. (but more portable)\n<|endoftext|>"} {"text":"<commit_before>package util\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"strconv\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\n\/\/ Configuration are the available config values\ntype Configuration struct {\n\tListenAddr string `yaml:\"ListenAddr\" env:\"LISTEN_ADDR\"`\n\tBaseURL string `yaml:\"BaseURL\" env:\"BASE_URL\"`\n\tDataDir string `yaml:\"DataDir\" env:\"DATA_DIR\"`\n\tUseSSL bool `yaml:\"EnableSSL\" env:\"USE_SSL\"`\n\tEnableDebugMode bool `yaml:\"EnableDebugMode\" env:\"ENABLE_DEBUG_MODE\"`\n\tShortedIDLength int `yaml:\"ShortedIDLength\" env:\"SHORTED_ID_LENGTH\"`\n\tGoogle oAuthConf `yaml:\"Google\" env:\"GOOGLE\"`\n\tGitHub oAuthConf `yaml:\"GitHub\" env:\"GITHUB\"`\n\tMicrosoft oAuthConf `yaml:\"Microsoft\" env:\"MICROSOFT\"`\n}\n\ntype oAuthConf struct {\n\tClientID string `yaml:\"ClientID\" env:\"CLIENT_ID\"`\n\tClientSecret string `yaml:\"ClientSecret\" env:\"CLIENT_SECRET\"`\n}\n\n\/\/ config contains the default values\nvar config = Configuration{\n\tListenAddr: \":8080\",\n\tBaseURL: \"http:\/\/localhost:3000\",\n\tDataDir: \"data\",\n\tEnableDebugMode: false,\n\tUseSSL: false,\n\tShortedIDLength: 4,\n}\n\n\/\/ ReadInConfig loads the Configuration and other needed folders for further usage\nfunc ReadInConfig() error {\n\tfile, err := ioutil.ReadFile(\"config.yaml\")\n\tif err == nil {\n\t\tif err := yaml.Unmarshal(file, &config); err != nil {\n\t\t\treturn errors.Wrap(err, \"could not unmarshal yaml file\")\n\t\t}\n\t} else if !os.IsNotExist(err) {\n\t\treturn errors.Wrap(err, \"could not read config file\")\n\t} else {\n\t\tlogrus.Info(\"No configuration file found, using defaults with environment variable overrides.\")\n\t}\n\tif err := config.applyEnvironmentConfig(); err != nil {\n\t\treturn errors.Wrap(err, \"could not apply environment configuration\")\n\t}\n\tconfig.DataDir, err = filepath.Abs(config.DataDir)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"could not get relative data dir path\")\n\t}\n\tif _, err = os.Stat(config.DataDir); os.IsNotExist(err) {\n\t\tif err = os.MkdirAll(config.DataDir, 0755); err != nil {\n\t\t\treturn errors.Wrap(err, \"could not create config directory\")\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *Configuration) applyEnvironmentConfig() error {\n\treturn c.setDefaultValue(reflect.ValueOf(c), reflect.TypeOf(*c), -1, \"GUS\")\n}\n\nfunc (c *Configuration) setDefaultValue(v reflect.Value, t reflect.Type, counter int, prefix string) error {\n\tif v.Kind() != reflect.Ptr {\n\t\treturn errors.New(\"Not a pointer value\")\n\t}\n\tf := reflect.StructField{}\n\tif counter != -1 {\n\t\tf = t.Field(counter)\n\t}\n\tv = reflect.Indirect(v)\n\tfieldEnv, exists := f.Tag.Lookup(\"env\")\n\tenv := os.Getenv(prefix + fieldEnv)\n\tif exists && env != \"\" {\n\t\tswitch v.Kind() {\n\t\tcase reflect.Int:\n\t\t\tenvI, err := strconv.Atoi(env)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Warningf(\"could not parse to int: %v\", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tv.SetInt(int64(envI))\n\t\tcase reflect.String:\n\t\t\tv.SetString(env)\n\t\tcase reflect.Bool:\n\t\t\tenvB, err := strconv.ParseBool(env)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Warningf(\"could not parse to bool: %v\", err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tv.SetBool(envB)\n\t\t}\n\t}\n\tif v.Kind() == reflect.Struct {\n\t\tfor i := 0; i < v.NumField(); i++ {\n\t\t\tif err := c.setDefaultValue(v.Field(i).Addr(), v.Type(), i, prefix+fieldEnv+\"_\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (o oAuthConf) Enabled() bool {\n\treturn o.ClientSecret != \"\"\n}\n\n\/\/ GetConfig returns the configuration from the memory\nfunc GetConfig() Configuration {\n\treturn config\n}\n\n\/\/ SetConfig sets the configuration into the memory\nfunc SetConfig(c Configuration) {\n\tconfig = c\n}\n<commit_msg>moved env struct applier to a new package<commit_after>package util\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\tenvstruct \"github.com\/maxibanki\/golang-env-struct\"\n\t\"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\n\/\/ Configuration are the available config values\ntype Configuration struct {\n\tListenAddr string `yaml:\"ListenAddr\" env:\"LISTEN_ADDR\"`\n\tBaseURL string `yaml:\"BaseURL\" env:\"BASE_URL\"`\n\tDataDir string `yaml:\"DataDir\" env:\"DATA_DIR\"`\n\tUseSSL bool `yaml:\"EnableSSL\" env:\"USE_SSL\"`\n\tEnableDebugMode bool `yaml:\"EnableDebugMode\" env:\"ENABLE_DEBUG_MODE\"`\n\tShortedIDLength int `yaml:\"ShortedIDLength\" env:\"SHORTED_ID_LENGTH\"`\n\tGoogle oAuthConf `yaml:\"Google\" env:\"GOOGLE\"`\n\tGitHub oAuthConf `yaml:\"GitHub\" env:\"GITHUB\"`\n\tMicrosoft oAuthConf `yaml:\"Microsoft\" env:\"MICROSOFT\"`\n}\n\ntype oAuthConf struct {\n\tClientID string `yaml:\"ClientID\" env:\"CLIENT_ID\"`\n\tClientSecret string `yaml:\"ClientSecret\" env:\"CLIENT_SECRET\"`\n}\n\n\/\/ config contains the default values\nvar config = Configuration{\n\tListenAddr: \":8080\",\n\tBaseURL: \"http:\/\/localhost:3000\",\n\tDataDir: \"data\",\n\tEnableDebugMode: false,\n\tUseSSL: false,\n\tShortedIDLength: 4,\n}\n\n\/\/ ReadInConfig loads the Configuration and other needed folders for further usage\nfunc ReadInConfig() error {\n\tfile, err := ioutil.ReadFile(\"config.yaml\")\n\tif err == nil {\n\t\tif err := yaml.Unmarshal(file, &config); err != nil {\n\t\t\treturn errors.Wrap(err, \"could not unmarshal yaml file\")\n\t\t}\n\t} else if !os.IsNotExist(err) {\n\t\treturn errors.Wrap(err, \"could not read config file\")\n\t} else {\n\t\tlogrus.Info(\"No configuration file found, using defaults with environment variable overrides.\")\n\t}\n\tif err := envstruct.ApplyEnvVars(&config, \"GUS\"); err != nil {\n\t\treturn errors.Wrap(err, \"could not apply environment configuration\")\n\t}\n\tconfig.DataDir, err = filepath.Abs(config.DataDir)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"could not get relative data dir path\")\n\t}\n\tif _, err = os.Stat(config.DataDir); os.IsNotExist(err) {\n\t\tif err = os.MkdirAll(config.DataDir, 0755); err != nil {\n\t\t\treturn errors.Wrap(err, \"could not create config directory\")\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (o oAuthConf) Enabled() bool {\n\treturn o.ClientSecret != \"\"\n}\n\n\/\/ GetConfig returns the configuration from the memory\nfunc GetConfig() Configuration {\n\treturn config\n}\n\n\/\/ SetConfig sets the configuration into the memory\nfunc SetConfig(c Configuration) {\n\tconfig = c\n}\n<|endoftext|>"} {"text":"<commit_before>package sakuracloud\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n)\n\nfunc TestAccSakuraCloudDataSourceCDROM_Basic(t *testing.T) {\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tPreventPostDestroyRefresh: true,\n\t\tCheckDestroy: testAccCheckSakuraCloudCDROMDataSourceDestroy,\n\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccCheckSakuraCloudDataSourceCDROMConfig,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckSakuraCloudCDROMDataSourceID(\"data.sakuracloud_cdrom.foobar\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"data.sakuracloud_cdrom.foobar\", \"name\", \"Ubuntu server 18.04 LTS 64bit\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"data.sakuracloud_cdrom.foobar\", \"size\", \"5\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"data.sakuracloud_cdrom.foobar\", \"tags.#\", \"5\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"data.sakuracloud_cdrom.foobar\", \"tags.0\", \"arch-64bit\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"data.sakuracloud_cdrom.foobar\", \"tags.1\", \"current-stable\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"data.sakuracloud_cdrom.foobar\", \"tags.2\", \"distro-ubuntu\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"data.sakuracloud_cdrom.foobar\", \"tags.3\", \"distro-ver-18.04\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"data.sakuracloud_cdrom.foobar\", \"tags.4\", \"os-unix\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccCheckSakuraCloudDataSourceCDROMConfig_With_Tag,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckSakuraCloudCDROMDataSourceID(\"data.sakuracloud_cdrom.foobar\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccCheckSakuraCloudDataSourceCDROM_NameSelector_Exists,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckSakuraCloudCDROMDataSourceID(\"data.sakuracloud_cdrom.foobar\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccCheckSakuraCloudDataSourceCDROM_TagSelector_Exists,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckSakuraCloudCDROMDataSourceID(\"data.sakuracloud_cdrom.foobar\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccCheckSakuraCloudDataSourceCDROMConfig_NotExists,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckSakuraCloudCDROMDataSourceNotExists(\"data.sakuracloud_cdrom.foobar\"),\n\t\t\t\t),\n\t\t\t\tDestroy: true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccCheckSakuraCloudDataSourceCDROMConfig_With_NotExists_Tag,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckSakuraCloudCDROMDataSourceNotExists(\"data.sakuracloud_cdrom.foobar\"),\n\t\t\t\t),\n\t\t\t\tDestroy: true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccCheckSakuraCloudDataSourceCDROM_NameSelector_NotExists,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckSakuraCloudCDROMDataSourceNotExists(\"data.sakuracloud_cdrom.foobar\"),\n\t\t\t\t),\n\t\t\t\tDestroy: true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccCheckSakuraCloudDataSourceCDROM_TagSelector_NotExists,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckSakuraCloudCDROMDataSourceNotExists(\"data.sakuracloud_cdrom.foobar\"),\n\t\t\t\t),\n\t\t\t\tDestroy: true,\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc testAccCheckSakuraCloudCDROMDataSourceID(n string) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Can't find CDROM data source: %s\", n)\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn errors.New(\"CDROM data source ID not set\")\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckSakuraCloudCDROMDataSourceNotExists(n string) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\t_, ok := s.RootModule().Resources[n]\n\t\tif ok {\n\t\t\treturn fmt.Errorf(\"Found CDROM data source: %s\", n)\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckSakuraCloudCDROMDataSourceDestroy(s *terraform.State) error {\n\tclient := testAccProvider.Meta().(*APIClient)\n\n\tfor _, rs := range s.RootModule().Resources {\n\t\tif rs.Type != \"sakuracloud_cdrom\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err := client.CDROM.Read(toSakuraCloudID(rs.Primary.ID))\n\n\t\tif err == nil {\n\t\t\treturn errors.New(\"CDROM still exists\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nvar testAccCheckSakuraCloudDataSourceCDROMConfig = `\ndata \"sakuracloud_cdrom\" \"foobar\" {\n filter = {\n\tname = \"Name\"\n\tvalues = [\"Ubuntu Server 18\"]\n }\n}`\n\nvar testAccCheckSakuraCloudDataSourceCDROMConfig_With_Tag = `\ndata \"sakuracloud_cdrom\" \"foobar\" {\n filter = {\n\tname = \"Tags\"\n\tvalues = [\"distro-ubuntu\",\"os-unix\"]\n }\n}`\n\nvar testAccCheckSakuraCloudDataSourceCDROMConfig_With_NotExists_Tag = `\ndata \"sakuracloud_cdrom\" \"foobar\" {\n filter = {\n\tname = \"Tags\"\n\tvalues = [\"distro-ubuntu-xxxxxxxxxxx\",\"os-linux-xxxxxxxx\"]\n }\n}`\n\nvar testAccCheckSakuraCloudDataSourceCDROMConfig_NotExists = `\ndata \"sakuracloud_cdrom\" \"foobar\" {\n filter = {\n\tname = \"Name\"\n\tvalues = [\"xxxxxxxxxxxxxxxxxx\"]\n }\n}`\n\nvar testAccCheckSakuraCloudDataSourceCDROM_NameSelector_Exists = `\ndata \"sakuracloud_cdrom\" \"foobar\" {\n name_selectors = [\"Ubuntu\",\"server\",\"18\"]\n}\n`\nvar testAccCheckSakuraCloudDataSourceCDROM_NameSelector_NotExists = `\ndata \"sakuracloud_cdrom\" \"foobar\" {\n name_selectors = [\"xxxxxxxxxx\"]\n}\n`\n\nvar testAccCheckSakuraCloudDataSourceCDROM_TagSelector_Exists = `\ndata \"sakuracloud_cdrom\" \"foobar\" {\n\ttag_selectors = [\"distro-ubuntu\",\"os-unix\"]\n}`\n\nvar testAccCheckSakuraCloudDataSourceCDROM_TagSelector_NotExists = `\ndata \"sakuracloud_cdrom\" \"foobar\" {\n\ttag_selectors = [\"xxxxxxxxxx\"]\n}`\n<commit_msg>fix broken test - sakuracloud_cdrom<commit_after>package sakuracloud\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n)\n\nfunc TestAccSakuraCloudDataSourceCDROM_Basic(t *testing.T) {\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tPreventPostDestroyRefresh: true,\n\t\tCheckDestroy: testAccCheckSakuraCloudCDROMDataSourceDestroy,\n\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccCheckSakuraCloudDataSourceCDROMConfig,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckSakuraCloudCDROMDataSourceID(\"data.sakuracloud_cdrom.foobar\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"data.sakuracloud_cdrom.foobar\", \"name\", \"Ubuntu server 18.04.1 LTS 64bit\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"data.sakuracloud_cdrom.foobar\", \"size\", \"5\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"data.sakuracloud_cdrom.foobar\", \"tags.#\", \"5\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"data.sakuracloud_cdrom.foobar\", \"tags.0\", \"arch-64bit\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"data.sakuracloud_cdrom.foobar\", \"tags.1\", \"current-stable\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"data.sakuracloud_cdrom.foobar\", \"tags.2\", \"distro-ubuntu\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"data.sakuracloud_cdrom.foobar\", \"tags.3\", \"distro-ver-18.04.1\"),\n\t\t\t\t\tresource.TestCheckResourceAttr(\"data.sakuracloud_cdrom.foobar\", \"tags.4\", \"os-unix\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccCheckSakuraCloudDataSourceCDROMConfig_With_Tag,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckSakuraCloudCDROMDataSourceID(\"data.sakuracloud_cdrom.foobar\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccCheckSakuraCloudDataSourceCDROM_NameSelector_Exists,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckSakuraCloudCDROMDataSourceID(\"data.sakuracloud_cdrom.foobar\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccCheckSakuraCloudDataSourceCDROM_TagSelector_Exists,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckSakuraCloudCDROMDataSourceID(\"data.sakuracloud_cdrom.foobar\"),\n\t\t\t\t),\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccCheckSakuraCloudDataSourceCDROMConfig_NotExists,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckSakuraCloudCDROMDataSourceNotExists(\"data.sakuracloud_cdrom.foobar\"),\n\t\t\t\t),\n\t\t\t\tDestroy: true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccCheckSakuraCloudDataSourceCDROMConfig_With_NotExists_Tag,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckSakuraCloudCDROMDataSourceNotExists(\"data.sakuracloud_cdrom.foobar\"),\n\t\t\t\t),\n\t\t\t\tDestroy: true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccCheckSakuraCloudDataSourceCDROM_NameSelector_NotExists,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckSakuraCloudCDROMDataSourceNotExists(\"data.sakuracloud_cdrom.foobar\"),\n\t\t\t\t),\n\t\t\t\tDestroy: true,\n\t\t\t},\n\t\t\t{\n\t\t\t\tConfig: testAccCheckSakuraCloudDataSourceCDROM_TagSelector_NotExists,\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckSakuraCloudCDROMDataSourceNotExists(\"data.sakuracloud_cdrom.foobar\"),\n\t\t\t\t),\n\t\t\t\tDestroy: true,\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc testAccCheckSakuraCloudCDROMDataSourceID(n string) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Can't find CDROM data source: %s\", n)\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn errors.New(\"CDROM data source ID not set\")\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckSakuraCloudCDROMDataSourceNotExists(n string) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\t_, ok := s.RootModule().Resources[n]\n\t\tif ok {\n\t\t\treturn fmt.Errorf(\"Found CDROM data source: %s\", n)\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckSakuraCloudCDROMDataSourceDestroy(s *terraform.State) error {\n\tclient := testAccProvider.Meta().(*APIClient)\n\n\tfor _, rs := range s.RootModule().Resources {\n\t\tif rs.Type != \"sakuracloud_cdrom\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err := client.CDROM.Read(toSakuraCloudID(rs.Primary.ID))\n\n\t\tif err == nil {\n\t\t\treturn errors.New(\"CDROM still exists\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nvar testAccCheckSakuraCloudDataSourceCDROMConfig = `\ndata \"sakuracloud_cdrom\" \"foobar\" {\n filter = {\n\tname = \"Name\"\n\tvalues = [\"Ubuntu Server 18\"]\n }\n}`\n\nvar testAccCheckSakuraCloudDataSourceCDROMConfig_With_Tag = `\ndata \"sakuracloud_cdrom\" \"foobar\" {\n filter = {\n\tname = \"Tags\"\n\tvalues = [\"distro-ubuntu\",\"os-unix\"]\n }\n}`\n\nvar testAccCheckSakuraCloudDataSourceCDROMConfig_With_NotExists_Tag = `\ndata \"sakuracloud_cdrom\" \"foobar\" {\n filter = {\n\tname = \"Tags\"\n\tvalues = [\"distro-ubuntu-xxxxxxxxxxx\",\"os-linux-xxxxxxxx\"]\n }\n}`\n\nvar testAccCheckSakuraCloudDataSourceCDROMConfig_NotExists = `\ndata \"sakuracloud_cdrom\" \"foobar\" {\n filter = {\n\tname = \"Name\"\n\tvalues = [\"xxxxxxxxxxxxxxxxxx\"]\n }\n}`\n\nvar testAccCheckSakuraCloudDataSourceCDROM_NameSelector_Exists = `\ndata \"sakuracloud_cdrom\" \"foobar\" {\n name_selectors = [\"Ubuntu\",\"server\",\"18\"]\n}\n`\nvar testAccCheckSakuraCloudDataSourceCDROM_NameSelector_NotExists = `\ndata \"sakuracloud_cdrom\" \"foobar\" {\n name_selectors = [\"xxxxxxxxxx\"]\n}\n`\n\nvar testAccCheckSakuraCloudDataSourceCDROM_TagSelector_Exists = `\ndata \"sakuracloud_cdrom\" \"foobar\" {\n\ttag_selectors = [\"distro-ubuntu\",\"os-unix\"]\n}`\n\nvar testAccCheckSakuraCloudDataSourceCDROM_TagSelector_NotExists = `\ndata \"sakuracloud_cdrom\" \"foobar\" {\n\ttag_selectors = [\"xxxxxxxxxx\"]\n}`\n<|endoftext|>"} {"text":"<commit_before>package const_conf\n\nimport (\n\t\"fmt\"\n)\n\nconst (\n\tServiceScheme = \"https:\/\/\"\n\t\/\/DomainName = \"gre.jolly23.com\"\n\tDomainName = \"\"\n\n\tHttpPort = 8080\n\tHttpsPort = 8090\n\n\tIsEnableHTTP = true\n\tIsEnableHTTPS = false\n\n\tSSLCertFile = \"deploy_conf\/certs\/cert.pem\"\n\tSSLKeyFile = \"deploy_conf\/certs\/cert.key\"\n\n\tBeeGoViewsPath = \"controller_views\"\n\tBeeGoConfiguration = \"deploy_conf\/beego_main.conf\"\n\tBeeGoOrmAlias = \"default\"\n\tBeeGoOrmMaxConn = 30\n\tBeeGoOrmMaxIdle = 15\n\n\tIsEnableXSRF = false\n\tBeeGoXSRFKey = \"ASb&ADGaEmGQnp2XdTEc5NFw0Al0bKx$P1o61eJJF7$2b$1EoETzKXQuYh\"\n\tBeeGoXSRFExpire = 3600\n\n\tRedisPort = \"6379\"\n\tRedisAddress = \"link-docker-redis\" + \":\" + RedisPort\n\tRedisPassword = \"\"\n\tRedisNumber = 0\n\n\tCookieSecure = \"$2m094FKSzyBj1DN27Ib$12$Fw0Al0bKGX9XuarHQzGDmtOSyeLWnfSbEc5N&AD\"\n\tWebCookieName = \"IsLogin\"\n\n\tLogsMethod = \"file\"\n\tLogsConfig = `{\"filename\":\"logs\/site.log\"}`\n\n\tPageSize = 20\n\n\tMarkWordTimeLimit = 10\n)\n\nvar DbSource string = fmt.Sprintf(\"sslmode=%s host=%s port=%s dbname=%s user=%s password=%s\",\n\tPgSslMode,\n\tPgHostAddress,\n\tPgHostPort,\n\tDatabaseName,\n\tPgUserName,\n\tPgPassword,\n)\n<commit_msg>fix:dev:修改配置文件[数据库连接池数量配置]<commit_after>package const_conf\n\nimport (\n\t\"fmt\"\n)\n\nconst (\n\tServiceScheme = \"https:\/\/\"\n\t\/\/DomainName = \"gre.jolly23.com\"\n\tDomainName = \"\"\n\n\tHttpPort = 8080\n\tHttpsPort = 8090\n\n\tIsEnableHTTP = true\n\tIsEnableHTTPS = false\n\n\tSSLCertFile = \"deploy_conf\/certs\/cert.pem\"\n\tSSLKeyFile = \"deploy_conf\/certs\/cert.key\"\n\n\tBeeGoViewsPath = \"controller_views\"\n\tBeeGoConfiguration = \"deploy_conf\/beego_main.conf\"\n\tBeeGoOrmAlias = \"default\"\n\tBeeGoOrmMaxConn = 20\n\tBeeGoOrmMaxIdle = 10\n\n\tIsEnableXSRF = false\n\tBeeGoXSRFKey = \"ASb&ADGaEmGQnp2XdTEc5NFw0Al0bKx$P1o61eJJF7$2b$1EoETzKXQuYh\"\n\tBeeGoXSRFExpire = 3600\n\n\tRedisPort = \"6379\"\n\tRedisAddress = \"link-docker-redis\" + \":\" + RedisPort\n\tRedisPassword = \"\"\n\tRedisNumber = 0\n\n\tCookieSecure = \"$2m094FKSzyBj1DN27Ib$12$Fw0Al0bKGX9XuarHQzGDmtOSyeLWnfSbEc5N&AD\"\n\tWebCookieName = \"IsLogin\"\n\n\tLogsMethod = \"file\"\n\tLogsConfig = `{\"filename\":\"logs\/site.log\"}`\n\n\tPageSize = 20\n\n\tMarkWordTimeLimit = 10\n)\n\nvar DbSource string = fmt.Sprintf(\"sslmode=%s host=%s port=%s dbname=%s user=%s password=%s\",\n\tPgSslMode,\n\tPgHostAddress,\n\tPgHostPort,\n\tDatabaseName,\n\tPgUserName,\n\tPgPassword,\n)\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Handler for Docker containers.\npackage docker\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/google\/cadvisor\/container\"\n\tcontainerlibcontainer \"github.com\/google\/cadvisor\/container\/libcontainer\"\n\t\"github.com\/google\/cadvisor\/fs\"\n\tinfo \"github.com\/google\/cadvisor\/info\/v1\"\n\t\"github.com\/google\/cadvisor\/utils\"\n\n\tdocker \"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/cgroups\"\n\tcgroupfs \"github.com\/opencontainers\/runc\/libcontainer\/cgroups\/fs\"\n\tlibcontainerconfigs \"github.com\/opencontainers\/runc\/libcontainer\/configs\"\n)\n\nconst (\n\t\/\/ Path to aufs dir where all the files exist.\n\t\/\/ aufs\/layers is ignored here since it does not hold a lot of data.\n\t\/\/ aufs\/mnt contains the mount points used to compose the rootfs. Hence it is also ignored.\n\tpathToAufsDir = \"aufs\/diff\"\n\t\/\/ Path to the directory where docker stores log files if the json logging driver is enabled.\n\tpathToContainersDir = \"containers\"\n\t\/\/ Path to the overlayfs storage driver directory.\n\tpathToOverlayDir = \"overlay\"\n)\n\ntype dockerContainerHandler struct {\n\tclient *docker.Client\n\tname string\n\tid string\n\taliases []string\n\tmachineInfoFactory info.MachineInfoFactory\n\n\t\/\/ Absolute path to the cgroup hierarchies of this container.\n\t\/\/ (e.g.: \"cpu\" -> \"\/sys\/fs\/cgroup\/cpu\/test\")\n\tcgroupPaths map[string]string\n\n\t\/\/ Manager of this container's cgroups.\n\tcgroupManager cgroups.Manager\n\n\tstorageDriver storageDriver\n\tfsInfo fs.FsInfo\n\tstorageDirs []string\n\n\t\/\/ Time at which this container was created.\n\tcreationTime time.Time\n\n\t\/\/ Metadata labels associated with the container.\n\tlabels map[string]string\n\n\t\/\/ The container PID used to switch namespaces as required\n\tpid int\n\n\t\/\/ Image name used for this container.\n\timage string\n\n\t\/\/ The host root FS to read\n\trootFs string\n\n\t\/\/ The network mode of the container\n\tnetworkMode string\n\n\t\/\/ Filesystem handler.\n\tfsHandler fsHandler\n}\n\nfunc newDockerContainerHandler(\n\tclient *docker.Client,\n\tname string,\n\tmachineInfoFactory info.MachineInfoFactory,\n\tfsInfo fs.FsInfo,\n\tstorageDriver storageDriver,\n\tcgroupSubsystems *containerlibcontainer.CgroupSubsystems,\n\tinHostNamespace bool,\n) (container.ContainerHandler, error) {\n\t\/\/ Create the cgroup paths.\n\tcgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))\n\tfor key, val := range cgroupSubsystems.MountPoints {\n\t\tcgroupPaths[key] = path.Join(val, name)\n\t}\n\n\t\/\/ Generate the equivalent cgroup manager for this container.\n\tcgroupManager := &cgroupfs.Manager{\n\t\tCgroups: &libcontainerconfigs.Cgroup{\n\t\t\tName: name,\n\t\t},\n\t\tPaths: cgroupPaths,\n\t}\n\n\trootFs := \"\/\"\n\tif !inHostNamespace {\n\t\trootFs = \"\/rootfs\"\n\t}\n\n\tid := ContainerNameToDockerId(name)\n\n\t\/\/ Add the Containers dir where the log files are stored.\n\tstorageDirs := []string{path.Join(*dockerRootDir, pathToContainersDir, id)}\n\n\tswitch storageDriver {\n\tcase aufsStorageDriver:\n\t\t\/\/ Add writable layer for aufs.\n\t\tstorageDirs = append(storageDirs, path.Join(*dockerRootDir, pathToAufsDir, id))\n\tcase overlayStorageDriver:\n\t\tstorageDirs = append(storageDirs, path.Join(*dockerRootDir, pathToOverlayDir, id))\n\t}\n\n\thandler := &dockerContainerHandler{\n\t\tid: id,\n\t\tclient: client,\n\t\tname: name,\n\t\tmachineInfoFactory: machineInfoFactory,\n\t\tcgroupPaths: cgroupPaths,\n\t\tcgroupManager: cgroupManager,\n\t\tstorageDriver: storageDriver,\n\t\tfsInfo: fsInfo,\n\t\trootFs: rootFs,\n\t\tstorageDirs: storageDirs,\n\t\tfsHandler: newFsHandler(time.Minute, storageDirs, fsInfo),\n\t}\n\n\t\/\/ Start the filesystem handler.\n\thandler.fsHandler.start()\n\n\t\/\/ We assume that if Inspect fails then the container is not known to docker.\n\tctnr, err := client.InspectContainer(id)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to inspect container %q: %v\", id, err)\n\t}\n\thandler.creationTime = ctnr.Created\n\thandler.pid = ctnr.State.Pid\n\n\t\/\/ Add the name and bare ID as aliases of the container.\n\thandler.aliases = append(handler.aliases, strings.TrimPrefix(ctnr.Name, \"\/\"), id)\n\thandler.labels = ctnr.Config.Labels\n\thandler.image = ctnr.Config.Image\n\thandler.networkMode = ctnr.HostConfig.NetworkMode\n\n\treturn handler, nil\n}\n\nfunc (self *dockerContainerHandler) ContainerReference() (info.ContainerReference, error) {\n\treturn info.ContainerReference{\n\t\tName: self.name,\n\t\tAliases: self.aliases,\n\t\tNamespace: DockerNamespace,\n\t}, nil\n}\n\nfunc (self *dockerContainerHandler) readLibcontainerConfig() (*libcontainerconfigs.Config, error) {\n\tconfig, err := containerlibcontainer.ReadConfig(*dockerRootDir, *dockerRunDir, self.id)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read libcontainer config: %v\", err)\n\t}\n\n\t\/\/ Replace cgroup parent and name with our own since we may be running in a different context.\n\tif config.Cgroups == nil {\n\t\tconfig.Cgroups = new(libcontainerconfigs.Cgroup)\n\t}\n\tconfig.Cgroups.Name = self.name\n\tconfig.Cgroups.Parent = \"\/\"\n\n\treturn config, nil\n}\n\nfunc libcontainerConfigToContainerSpec(config *libcontainerconfigs.Config, mi *info.MachineInfo) info.ContainerSpec {\n\tvar spec info.ContainerSpec\n\tspec.HasMemory = true\n\tspec.Memory.Limit = math.MaxUint64\n\tspec.Memory.SwapLimit = math.MaxUint64\n\tif config.Cgroups.Memory > 0 {\n\t\tspec.Memory.Limit = uint64(config.Cgroups.Memory)\n\t}\n\tif config.Cgroups.MemorySwap > 0 {\n\t\tspec.Memory.SwapLimit = uint64(config.Cgroups.MemorySwap)\n\t}\n\n\t\/\/ Get CPU info\n\tspec.HasCpu = true\n\tspec.Cpu.Limit = 1024\n\tif config.Cgroups.CpuShares != 0 {\n\t\tspec.Cpu.Limit = uint64(config.Cgroups.CpuShares)\n\t}\n\tspec.Cpu.Mask = utils.FixCpuMask(config.Cgroups.CpusetCpus, mi.NumCores)\n\n\tspec.HasDiskIo = true\n\n\treturn spec\n}\n\nvar (\n\thasNetworkModes = map[string]bool{\n\t\t\"host\": true,\n\t\t\"bridge\": true,\n\t\t\"default\": true,\n\t}\n)\n\nfunc hasNet(networkMode string) bool {\n\treturn hasNetworkModes[networkMode]\n}\n\nfunc (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {\n\tmi, err := self.machineInfoFactory.GetMachineInfo()\n\tif err != nil {\n\t\treturn info.ContainerSpec{}, err\n\t}\n\tlibcontainerConfig, err := self.readLibcontainerConfig()\n\tif err != nil {\n\t\treturn info.ContainerSpec{}, err\n\t}\n\n\tspec := libcontainerConfigToContainerSpec(libcontainerConfig, mi)\n\tspec.CreationTime = self.creationTime\n\n\tspec.HasFilesystem = false\n\tswitch self.storageDriver {\n\tcase aufsStorageDriver:\n\tcase overlayStorageDriver:\n\t\tspec.HasFilesystem = true\n\t}\n\n\tspec.Labels = self.labels\n\tspec.Image = self.image\n\tspec.HasNetwork = hasNet(self.networkMode)\n\n\treturn spec, err\n}\n\nfunc (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error {\n\tswitch self.storageDriver {\n\tcase aufsStorageDriver:\n\tcase overlayStorageDriver:\n\t\tbreak\n\tdefault:\n\t\treturn nil\n\t}\n\n\t\/\/ As of now we assume that all the storage dirs are on the same device.\n\t\/\/ The first storage dir will be that of the image layers.\n\tdeviceInfo, err := self.fsInfo.GetDirFsDevice(self.storageDirs[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmi, err := self.machineInfoFactory.GetMachineInfo()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar limit uint64 = 0\n\t\/\/ Docker does not impose any filesystem limits for containers. So use capacity as limit.\n\tfor _, fs := range mi.Filesystems {\n\t\tif fs.Device == deviceInfo.Device {\n\t\t\tlimit = fs.Capacity\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfsStat := info.FsStats{Device: deviceInfo.Device, Limit: limit}\n\n\tfsStat.Usage = self.fsHandler.usage()\n\tstats.Filesystem = append(stats.Filesystem, fsStat)\n\n\treturn nil\n}\n\n\/\/ TODO(vmarmol): Get from libcontainer API instead of cgroup manager when we don't have to support older Dockers.\nfunc (self *dockerContainerHandler) GetStats() (*info.ContainerStats, error) {\n\tstats, err := containerlibcontainer.GetStats(self.cgroupManager, self.rootFs, self.pid)\n\tif err != nil {\n\t\treturn stats, err\n\t}\n\t\/\/ Clean up stats for containers that don't have their own network - this\n\t\/\/ includes containers running in Kubernetes pods that use the network of the\n\t\/\/ infrastructure container. This stops metrics being reported multiple times\n\t\/\/ for each container in a pod.\n\tif !hasNet(self.networkMode) {\n\t\tstats.Network = info.NetworkStats{}\n\t}\n\n\t\/\/ Get filesystem stats.\n\terr = self.getFsStats(stats)\n\tif err != nil {\n\t\treturn stats, err\n\t}\n\n\treturn stats, nil\n}\n\nfunc (self *dockerContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {\n\t\/\/ No-op for Docker driver.\n\treturn []info.ContainerReference{}, nil\n}\n\nfunc (self *dockerContainerHandler) GetCgroupPath(resource string) (string, error) {\n\tpath, ok := self.cgroupPaths[resource]\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"could not find path for resource %q for container %q\\n\", resource, self.name)\n\t}\n\treturn path, nil\n}\n\nfunc (self *dockerContainerHandler) ListThreads(listType container.ListType) ([]int, error) {\n\t\/\/ TODO(vmarmol): Implement.\n\treturn nil, nil\n}\n\nfunc (self *dockerContainerHandler) GetContainerLabels() map[string]string {\n\treturn self.labels\n}\n\nfunc (self *dockerContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {\n\treturn containerlibcontainer.GetProcesses(self.cgroupManager)\n}\n\nfunc (self *dockerContainerHandler) WatchSubcontainers(events chan container.SubcontainerEvent) error {\n\treturn fmt.Errorf(\"watch is unimplemented in the Docker container driver\")\n}\n\nfunc (self *dockerContainerHandler) StopWatchingSubcontainers() error {\n\t\/\/ No-op for Docker driver.\n\treturn nil\n}\n\nfunc (self *dockerContainerHandler) Exists() bool {\n\treturn containerlibcontainer.Exists(*dockerRootDir, *dockerRunDir, self.id)\n}\n\nfunc DockerInfo() (map[string]string, error) {\n\tclient, err := Client()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to communicate with docker daemon: %v\", err)\n\t}\n\tinfo, err := client.Info()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn info.Map(), nil\n}\n\nfunc DockerImages() ([]docker.APIImages, error) {\n\tclient, err := Client()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to communicate with docker daemon: %v\", err)\n\t}\n\timages, err := client.ListImages(docker.ListImagesOptions{All: false})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn images, nil\n}\n<commit_msg>Fix case statements dealing with storageDriver<commit_after>\/\/ Copyright 2014 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Handler for Docker containers.\npackage docker\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/google\/cadvisor\/container\"\n\tcontainerlibcontainer \"github.com\/google\/cadvisor\/container\/libcontainer\"\n\t\"github.com\/google\/cadvisor\/fs\"\n\tinfo \"github.com\/google\/cadvisor\/info\/v1\"\n\t\"github.com\/google\/cadvisor\/utils\"\n\n\tdocker \"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/opencontainers\/runc\/libcontainer\/cgroups\"\n\tcgroupfs \"github.com\/opencontainers\/runc\/libcontainer\/cgroups\/fs\"\n\tlibcontainerconfigs \"github.com\/opencontainers\/runc\/libcontainer\/configs\"\n)\n\nconst (\n\t\/\/ Path to aufs dir where all the files exist.\n\t\/\/ aufs\/layers is ignored here since it does not hold a lot of data.\n\t\/\/ aufs\/mnt contains the mount points used to compose the rootfs. Hence it is also ignored.\n\tpathToAufsDir = \"aufs\/diff\"\n\t\/\/ Path to the directory where docker stores log files if the json logging driver is enabled.\n\tpathToContainersDir = \"containers\"\n\t\/\/ Path to the overlayfs storage driver directory.\n\tpathToOverlayDir = \"overlay\"\n)\n\ntype dockerContainerHandler struct {\n\tclient *docker.Client\n\tname string\n\tid string\n\taliases []string\n\tmachineInfoFactory info.MachineInfoFactory\n\n\t\/\/ Absolute path to the cgroup hierarchies of this container.\n\t\/\/ (e.g.: \"cpu\" -> \"\/sys\/fs\/cgroup\/cpu\/test\")\n\tcgroupPaths map[string]string\n\n\t\/\/ Manager of this container's cgroups.\n\tcgroupManager cgroups.Manager\n\n\tstorageDriver storageDriver\n\tfsInfo fs.FsInfo\n\tstorageDirs []string\n\n\t\/\/ Time at which this container was created.\n\tcreationTime time.Time\n\n\t\/\/ Metadata labels associated with the container.\n\tlabels map[string]string\n\n\t\/\/ The container PID used to switch namespaces as required\n\tpid int\n\n\t\/\/ Image name used for this container.\n\timage string\n\n\t\/\/ The host root FS to read\n\trootFs string\n\n\t\/\/ The network mode of the container\n\tnetworkMode string\n\n\t\/\/ Filesystem handler.\n\tfsHandler fsHandler\n}\n\nfunc newDockerContainerHandler(\n\tclient *docker.Client,\n\tname string,\n\tmachineInfoFactory info.MachineInfoFactory,\n\tfsInfo fs.FsInfo,\n\tstorageDriver storageDriver,\n\tcgroupSubsystems *containerlibcontainer.CgroupSubsystems,\n\tinHostNamespace bool,\n) (container.ContainerHandler, error) {\n\t\/\/ Create the cgroup paths.\n\tcgroupPaths := make(map[string]string, len(cgroupSubsystems.MountPoints))\n\tfor key, val := range cgroupSubsystems.MountPoints {\n\t\tcgroupPaths[key] = path.Join(val, name)\n\t}\n\n\t\/\/ Generate the equivalent cgroup manager for this container.\n\tcgroupManager := &cgroupfs.Manager{\n\t\tCgroups: &libcontainerconfigs.Cgroup{\n\t\t\tName: name,\n\t\t},\n\t\tPaths: cgroupPaths,\n\t}\n\n\trootFs := \"\/\"\n\tif !inHostNamespace {\n\t\trootFs = \"\/rootfs\"\n\t}\n\n\tid := ContainerNameToDockerId(name)\n\n\t\/\/ Add the Containers dir where the log files are stored.\n\tstorageDirs := []string{path.Join(*dockerRootDir, pathToContainersDir, id)}\n\n\tswitch storageDriver {\n\tcase aufsStorageDriver:\n\t\t\/\/ Add writable layer for aufs.\n\t\tstorageDirs = append(storageDirs, path.Join(*dockerRootDir, pathToAufsDir, id))\n\tcase overlayStorageDriver:\n\t\tstorageDirs = append(storageDirs, path.Join(*dockerRootDir, pathToOverlayDir, id))\n\t}\n\n\thandler := &dockerContainerHandler{\n\t\tid: id,\n\t\tclient: client,\n\t\tname: name,\n\t\tmachineInfoFactory: machineInfoFactory,\n\t\tcgroupPaths: cgroupPaths,\n\t\tcgroupManager: cgroupManager,\n\t\tstorageDriver: storageDriver,\n\t\tfsInfo: fsInfo,\n\t\trootFs: rootFs,\n\t\tstorageDirs: storageDirs,\n\t\tfsHandler: newFsHandler(time.Minute, storageDirs, fsInfo),\n\t}\n\n\t\/\/ Start the filesystem handler.\n\thandler.fsHandler.start()\n\n\t\/\/ We assume that if Inspect fails then the container is not known to docker.\n\tctnr, err := client.InspectContainer(id)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to inspect container %q: %v\", id, err)\n\t}\n\thandler.creationTime = ctnr.Created\n\thandler.pid = ctnr.State.Pid\n\n\t\/\/ Add the name and bare ID as aliases of the container.\n\thandler.aliases = append(handler.aliases, strings.TrimPrefix(ctnr.Name, \"\/\"), id)\n\thandler.labels = ctnr.Config.Labels\n\thandler.image = ctnr.Config.Image\n\thandler.networkMode = ctnr.HostConfig.NetworkMode\n\n\treturn handler, nil\n}\n\nfunc (self *dockerContainerHandler) ContainerReference() (info.ContainerReference, error) {\n\treturn info.ContainerReference{\n\t\tName: self.name,\n\t\tAliases: self.aliases,\n\t\tNamespace: DockerNamespace,\n\t}, nil\n}\n\nfunc (self *dockerContainerHandler) readLibcontainerConfig() (*libcontainerconfigs.Config, error) {\n\tconfig, err := containerlibcontainer.ReadConfig(*dockerRootDir, *dockerRunDir, self.id)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read libcontainer config: %v\", err)\n\t}\n\n\t\/\/ Replace cgroup parent and name with our own since we may be running in a different context.\n\tif config.Cgroups == nil {\n\t\tconfig.Cgroups = new(libcontainerconfigs.Cgroup)\n\t}\n\tconfig.Cgroups.Name = self.name\n\tconfig.Cgroups.Parent = \"\/\"\n\n\treturn config, nil\n}\n\nfunc libcontainerConfigToContainerSpec(config *libcontainerconfigs.Config, mi *info.MachineInfo) info.ContainerSpec {\n\tvar spec info.ContainerSpec\n\tspec.HasMemory = true\n\tspec.Memory.Limit = math.MaxUint64\n\tspec.Memory.SwapLimit = math.MaxUint64\n\tif config.Cgroups.Memory > 0 {\n\t\tspec.Memory.Limit = uint64(config.Cgroups.Memory)\n\t}\n\tif config.Cgroups.MemorySwap > 0 {\n\t\tspec.Memory.SwapLimit = uint64(config.Cgroups.MemorySwap)\n\t}\n\n\t\/\/ Get CPU info\n\tspec.HasCpu = true\n\tspec.Cpu.Limit = 1024\n\tif config.Cgroups.CpuShares != 0 {\n\t\tspec.Cpu.Limit = uint64(config.Cgroups.CpuShares)\n\t}\n\tspec.Cpu.Mask = utils.FixCpuMask(config.Cgroups.CpusetCpus, mi.NumCores)\n\n\tspec.HasDiskIo = true\n\n\treturn spec\n}\n\nvar (\n\thasNetworkModes = map[string]bool{\n\t\t\"host\": true,\n\t\t\"bridge\": true,\n\t\t\"default\": true,\n\t}\n)\n\nfunc hasNet(networkMode string) bool {\n\treturn hasNetworkModes[networkMode]\n}\n\nfunc (self *dockerContainerHandler) GetSpec() (info.ContainerSpec, error) {\n\tmi, err := self.machineInfoFactory.GetMachineInfo()\n\tif err != nil {\n\t\treturn info.ContainerSpec{}, err\n\t}\n\tlibcontainerConfig, err := self.readLibcontainerConfig()\n\tif err != nil {\n\t\treturn info.ContainerSpec{}, err\n\t}\n\n\tspec := libcontainerConfigToContainerSpec(libcontainerConfig, mi)\n\tspec.CreationTime = self.creationTime\n\n\tswitch self.storageDriver {\n\tcase aufsStorageDriver, overlayStorageDriver:\n\t\tspec.HasFilesystem = true\n\tdefault:\n\t\tspec.HasFilesystem = false\n\t}\n\n\tspec.Labels = self.labels\n\tspec.Image = self.image\n\tspec.HasNetwork = hasNet(self.networkMode)\n\n\treturn spec, err\n}\n\nfunc (self *dockerContainerHandler) getFsStats(stats *info.ContainerStats) error {\n\tswitch self.storageDriver {\n\tcase aufsStorageDriver, overlayStorageDriver:\n\tdefault:\n\t\treturn nil\n\t}\n\n\t\/\/ As of now we assume that all the storage dirs are on the same device.\n\t\/\/ The first storage dir will be that of the image layers.\n\tdeviceInfo, err := self.fsInfo.GetDirFsDevice(self.storageDirs[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmi, err := self.machineInfoFactory.GetMachineInfo()\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar limit uint64 = 0\n\t\/\/ Docker does not impose any filesystem limits for containers. So use capacity as limit.\n\tfor _, fs := range mi.Filesystems {\n\t\tif fs.Device == deviceInfo.Device {\n\t\t\tlimit = fs.Capacity\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfsStat := info.FsStats{Device: deviceInfo.Device, Limit: limit}\n\n\tfsStat.Usage = self.fsHandler.usage()\n\tstats.Filesystem = append(stats.Filesystem, fsStat)\n\n\treturn nil\n}\n\n\/\/ TODO(vmarmol): Get from libcontainer API instead of cgroup manager when we don't have to support older Dockers.\nfunc (self *dockerContainerHandler) GetStats() (*info.ContainerStats, error) {\n\tstats, err := containerlibcontainer.GetStats(self.cgroupManager, self.rootFs, self.pid)\n\tif err != nil {\n\t\treturn stats, err\n\t}\n\t\/\/ Clean up stats for containers that don't have their own network - this\n\t\/\/ includes containers running in Kubernetes pods that use the network of the\n\t\/\/ infrastructure container. This stops metrics being reported multiple times\n\t\/\/ for each container in a pod.\n\tif !hasNet(self.networkMode) {\n\t\tstats.Network = info.NetworkStats{}\n\t}\n\n\t\/\/ Get filesystem stats.\n\terr = self.getFsStats(stats)\n\tif err != nil {\n\t\treturn stats, err\n\t}\n\n\treturn stats, nil\n}\n\nfunc (self *dockerContainerHandler) ListContainers(listType container.ListType) ([]info.ContainerReference, error) {\n\t\/\/ No-op for Docker driver.\n\treturn []info.ContainerReference{}, nil\n}\n\nfunc (self *dockerContainerHandler) GetCgroupPath(resource string) (string, error) {\n\tpath, ok := self.cgroupPaths[resource]\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"could not find path for resource %q for container %q\\n\", resource, self.name)\n\t}\n\treturn path, nil\n}\n\nfunc (self *dockerContainerHandler) ListThreads(listType container.ListType) ([]int, error) {\n\t\/\/ TODO(vmarmol): Implement.\n\treturn nil, nil\n}\n\nfunc (self *dockerContainerHandler) GetContainerLabels() map[string]string {\n\treturn self.labels\n}\n\nfunc (self *dockerContainerHandler) ListProcesses(listType container.ListType) ([]int, error) {\n\treturn containerlibcontainer.GetProcesses(self.cgroupManager)\n}\n\nfunc (self *dockerContainerHandler) WatchSubcontainers(events chan container.SubcontainerEvent) error {\n\treturn fmt.Errorf(\"watch is unimplemented in the Docker container driver\")\n}\n\nfunc (self *dockerContainerHandler) StopWatchingSubcontainers() error {\n\t\/\/ No-op for Docker driver.\n\treturn nil\n}\n\nfunc (self *dockerContainerHandler) Exists() bool {\n\treturn containerlibcontainer.Exists(*dockerRootDir, *dockerRunDir, self.id)\n}\n\nfunc DockerInfo() (map[string]string, error) {\n\tclient, err := Client()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to communicate with docker daemon: %v\", err)\n\t}\n\tinfo, err := client.Info()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn info.Map(), nil\n}\n\nfunc DockerImages() ([]docker.APIImages, error) {\n\tclient, err := Client()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to communicate with docker daemon: %v\", err)\n\t}\n\timages, err := client.ListImages(docker.ListImagesOptions{All: false})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn images, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright 2013 Juliano Martinez <juliano@martinez.io>\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n @author: Juliano Martinez\n*\/\n\npackage utils\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"log\/syslog\"\n)\n\nvar (\n\t_log, s_err = syslog.New(syslog.LOG_ERR, \"cloudboss\")\n)\n\nfunc Check(err error, message string) {\n\tcheck(err, message, false)\n}\n\nfunc CheckPanic(err error, message string) {\n\tcheck(err, message, true)\n}\n\nfunc Log(message string) {\n\tCheckPanic(s_err, \"Unable to write syslog message\")\n\t_log.Info(message)\n\tdefer _log.Close()\n}\n\nfunc check(err error, message string, _panic bool) {\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"%s: %s\", message, err)\n\t\tif s_err != nil {\n\t\t\tlog.Fatalln(\"Unable to write syslog message\")\n\t\t}\n\t\t_log.Warning(msg)\n\t\tdefer _log.Close()\n\t\tlog.Fatalln(msg)\n\t\tif _panic {\n\t\t\tpanic(msg)\n\t\t}\n\t}\n}\n<commit_msg>changed log name<commit_after>\/*\n Copyright 2013 Juliano Martinez <juliano@martinez.io>\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\n @author: Juliano Martinez\n*\/\n\npackage utils\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"log\/syslog\"\n)\n\nvar (\n\t_log, s_err = syslog.New(syslog.LOG_ERR, \"hpr\")\n)\n\nfunc Check(err error, message string) {\n\tcheck(err, message, false)\n}\n\nfunc CheckPanic(err error, message string) {\n\tcheck(err, message, true)\n}\n\nfunc Log(message string) {\n\tCheckPanic(s_err, \"Unable to write syslog message\")\n\t_log.Info(message)\n\tdefer _log.Close()\n}\n\nfunc check(err error, message string, _panic bool) {\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"%s: %s\", message, err)\n\t\tif s_err != nil {\n\t\t\tlog.Fatalln(\"Unable to write syslog message\")\n\t\t}\n\t\t_log.Warning(msg)\n\t\tdefer _log.Close()\n\t\tlog.Fatalln(msg)\n\t\tif _panic {\n\t\t\tpanic(msg)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package utils\n\nimport (\n\t\"sync\"\n\n\t\"github.com\/pborman\/uuid\"\n)\n\n\/\/ GenerateUUID generates a new uuid for a minion\nfunc GenerateUUID(name string) uuid.UUID {\n\tu := uuid.NewSHA1(uuid.NameSpace_DNS, []byte(name))\n\n\treturn u\n}\n\n\/\/ Concurrentmap is a map type that can be safely shared between\n\/\/ goroutines that require read\/write access to a map\ntype ConcurrentMap struct {\n\tsync.RWMutex\n\titems map[string]interface{}\n}\n\n\/\/ ConcurrentMapItem contains a key\/value pair item of a concurrent map\ntype ConcurrentMapItem struct {\n\tKey string\n\tValue interface{}\n}\n\n\/\/ NewConcurrentMap creates a new concurrent map\nfunc NewConcurrentMap() *ConcurrentMap {\n\tcm := &ConcurrentMap{\n\t\titems: make(map[string]interface{}),\n\t}\n\n\treturn cm\n}\n\n\/\/ Set adds an item to a concurrent map\nfunc (cm *ConcurrentMap) Set(key string, value interface{}) {\n\tcm.Lock()\n\tdefer cm.Unlock()\n\n\tcm.items[key] = value\n}\n\n\/\/ Get retrieves the value for a concurrent map item\nfunc (cm *ConcurrentMap) Get(key string) (interface{}, bool) {\n\tcm.Lock()\n\tdefer cm.Unlock()\n\n\tvalue, ok := cm.items[key]\n\n\treturn value, ok\n}\n\n\/\/ Iter iterates over the items in a concurrent map\n\/\/ Each item is sent over a channel, so that\n\/\/ we can iterate over the map using the builtin range keyword\nfunc (cm *ConcurrentMap) Iter() <-chan ConcurrentMapItem {\n\tc := make(chan ConcurrentMapItem)\n\n\tf := func() {\n\t\tcm.Lock()\n\t\tdefer cm.Unlock()\n\n\t\tfor k, v := range cm.items {\n\t\t\tc <- ConcurrentMapItem{k, v}\n\t\t}\n\t\tclose(c)\n\t}\n\tgo f()\n\n\treturn c\n}\n\n\/\/ ConcurrentSlice type that can be safely shared between goroutines\ntype ConcurrentSlice struct {\n\tsync.RWMutex\n\titems []interface{}\n}\n\n\/\/ ConcurrentSliceItem contains the index\/value pair of an item in a\n\/\/ concurrent slice\ntype ConcurrentSliceItem struct {\n\tIndex int\n\tValue interface{}\n}\n\n\/\/ NewConcurrentSlice creates a new concurrent slice\nfunc NewConcurrentSlice() *ConcurrentSlice {\n\tcs := &ConcurrentSlice{\n\t\titems: make([]interface{}, 0),\n\t}\n\n\treturn cs\n}\n\n\/\/ Append adds an item to the concurrent slice\nfunc (cs *ConcurrentSlice) Append(item interface{}) {\n\tcs.Lock()\n\tdefer cs.Unlock()\n\n\tcs.items = append(cs.items, item)\n}\n\n\/\/ Iter iterates over the items in the concurrent slice\n\/\/ Each item is sent over a channel, so that\n\/\/ we can iterate over the slice using the builin range keyword\nfunc (cs *ConcurrentSlice) Iter() <-chan ConcurrentSliceItem {\n\tc := make(chan ConcurrentSliceItem)\n\n\tf := func() {\n\t\tcs.Lock()\n\t\tdefer cs.Lock()\n\t\tfor index, value := range cs.items {\n\t\t\tc <- ConcurrentSliceItem{index, value}\n\t\t}\n\t\tclose(c)\n\t}\n\tgo f()\n\n\treturn c\n}\n<commit_msg>doc comment update<commit_after>package utils\n\nimport (\n\t\"sync\"\n\n\t\"github.com\/pborman\/uuid\"\n)\n\n\/\/ GenerateUUID generates a new uuid for a minion\nfunc GenerateUUID(name string) uuid.UUID {\n\tu := uuid.NewSHA1(uuid.NameSpace_DNS, []byte(name))\n\n\treturn u\n}\n\n\/\/ ConcurrentMap is a map type that can be safely shared between\n\/\/ goroutines that require read\/write access to a map\ntype ConcurrentMap struct {\n\tsync.RWMutex\n\titems map[string]interface{}\n}\n\n\/\/ ConcurrentMapItem contains a key\/value pair item of a concurrent map\ntype ConcurrentMapItem struct {\n\tKey string\n\tValue interface{}\n}\n\n\/\/ NewConcurrentMap creates a new concurrent map\nfunc NewConcurrentMap() *ConcurrentMap {\n\tcm := &ConcurrentMap{\n\t\titems: make(map[string]interface{}),\n\t}\n\n\treturn cm\n}\n\n\/\/ Set adds an item to a concurrent map\nfunc (cm *ConcurrentMap) Set(key string, value interface{}) {\n\tcm.Lock()\n\tdefer cm.Unlock()\n\n\tcm.items[key] = value\n}\n\n\/\/ Get retrieves the value for a concurrent map item\nfunc (cm *ConcurrentMap) Get(key string) (interface{}, bool) {\n\tcm.Lock()\n\tdefer cm.Unlock()\n\n\tvalue, ok := cm.items[key]\n\n\treturn value, ok\n}\n\n\/\/ Iter iterates over the items in a concurrent map\n\/\/ Each item is sent over a channel, so that\n\/\/ we can iterate over the map using the builtin range keyword\nfunc (cm *ConcurrentMap) Iter() <-chan ConcurrentMapItem {\n\tc := make(chan ConcurrentMapItem)\n\n\tf := func() {\n\t\tcm.Lock()\n\t\tdefer cm.Unlock()\n\n\t\tfor k, v := range cm.items {\n\t\t\tc <- ConcurrentMapItem{k, v}\n\t\t}\n\t\tclose(c)\n\t}\n\tgo f()\n\n\treturn c\n}\n\n\/\/ ConcurrentSlice type that can be safely shared between goroutines\ntype ConcurrentSlice struct {\n\tsync.RWMutex\n\titems []interface{}\n}\n\n\/\/ ConcurrentSliceItem contains the index\/value pair of an item in a\n\/\/ concurrent slice\ntype ConcurrentSliceItem struct {\n\tIndex int\n\tValue interface{}\n}\n\n\/\/ NewConcurrentSlice creates a new concurrent slice\nfunc NewConcurrentSlice() *ConcurrentSlice {\n\tcs := &ConcurrentSlice{\n\t\titems: make([]interface{}, 0),\n\t}\n\n\treturn cs\n}\n\n\/\/ Append adds an item to the concurrent slice\nfunc (cs *ConcurrentSlice) Append(item interface{}) {\n\tcs.Lock()\n\tdefer cs.Unlock()\n\n\tcs.items = append(cs.items, item)\n}\n\n\/\/ Iter iterates over the items in the concurrent slice\n\/\/ Each item is sent over a channel, so that\n\/\/ we can iterate over the slice using the builin range keyword\nfunc (cs *ConcurrentSlice) Iter() <-chan ConcurrentSliceItem {\n\tc := make(chan ConcurrentSliceItem)\n\n\tf := func() {\n\t\tcs.Lock()\n\t\tdefer cs.Lock()\n\t\tfor index, value := range cs.items {\n\t\t\tc <- ConcurrentSliceItem{index, value}\n\t\t}\n\t\tclose(c)\n\t}\n\tgo f()\n\n\treturn c\n}\n<|endoftext|>"} {"text":"<commit_before>package utils\n\nimport (\n\t\/\/\"fmt\"\n\t\"fmt\"\n\t\"math\"\n\t\"math\/big\"\n\t\"math\/rand\"\n\t\"time\"\n)\n\ntype TupleInt struct {\n\tA int\n\tB int\n}\n\n\/\/Euclidean modulous\nfunc Mod(a, b int) int {\n\tab := big.NewInt(int64(a))\n\tbb := big.NewInt(int64(b))\n\treturn int(ab.Mod(ab, bb).Int64())\n}\n\n\/\/Dot product\nfunc DotInt(a, b []int) int {\n\tif len(a) != len(b) {\n\t\tpanic(\"Params have differing lengths\")\n\t}\n\tresult := 0\n\tfor i := range a {\n\t\tresult += a[i] * b[i]\n\t}\n\treturn result\n}\n\n\/\/Populates integer slice with index values\nfunc FillSliceWithIdxInt(values []int) {\n\tfor i := range values {\n\t\tvalues[i] = i\n\t}\n}\n\n\/\/Populates float64 slice with specified value\nfunc FillSliceInt(values []int, value int) {\n\tfor i := range values {\n\t\tvalues[i] = value\n\t}\n}\n\n\/\/Populates float64 slice with specified value\nfunc FillSliceFloat64(values []float64, value float64) {\n\tfor i := range values {\n\t\tvalues[i] = value\n\t}\n}\n\n\/\/Populates bool slice with specified value\nfunc FillSliceBool(values []bool, value bool) {\n\tfor i := range values {\n\t\tvalues[i] = value\n\t}\n}\n\n\/\/Populates bool slice with specified value\nfunc FillSliceRangeBool(values []bool, value bool, start, length int) {\n\tfor i := 0; i < length; i++ {\n\t\tvalues[start+i] = value\n\t}\n}\n\n\/\/Returns the subset of values specified by indices\nfunc SubsetSliceInt(values, indices []int) []int {\n\tresult := make([]int, len(indices))\n\tfor i, val := range indices {\n\t\tresult[i] = values[val]\n\t}\n\treturn result\n}\n\n\/\/Returns the subset of values specified by indices\nfunc SubsetSliceFloat64(values []float64, indices []int) []float64 {\n\tresult := make([]float64, len(indices))\n\tfor i, val := range indices {\n\t\tresult[i] = values[val]\n\t}\n\treturn result\n}\n\n\/\/Creates an integer slice with indices containing\n\/\/ the specified initial value\nfunc MakeSliceInt(size, initialValue int) []int {\n\tresult := make([]int, size)\n\tif initialValue != 0 {\n\t\tfor i, _ := range result {\n\t\t\tresult[i] = initialValue\n\t\t}\n\t}\n\treturn result\n}\n\nfunc MakeSliceFloat64(size int, initialValue float64) []float64 {\n\tresult := make([]float64, size)\n\tif initialValue != 0 {\n\t\tfor i, _ := range result {\n\t\t\tresult[i] = initialValue\n\t\t}\n\t}\n\treturn result\n}\n\n\/\/Returns cartesian product of specified\n\/\/2d arrayb\nfunc CartProductInt(values [][]int) [][]int {\n\tpos := make([]int, len(values))\n\tvar result [][]int\n\n\tfor pos[0] < len(values[0]) {\n\t\ttemp := make([]int, len(values))\n\t\tfor j := 0; j < len(values); j++ {\n\t\t\ttemp[j] = values[j][pos[j]]\n\t\t}\n\t\tresult = append(result, temp)\n\t\tpos[len(values)-1]++\n\t\tfor k := len(values) - 1; k >= 1; k-- {\n\t\t\tif pos[k] >= len(values[k]) {\n\t\t\t\tpos[k] = 0\n\t\t\t\tpos[k-1]++\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}\n\n\/\/Searches int slice for specified integer\nfunc ContainsInt(q int, vals []int) bool {\n\tfor _, val := range vals {\n\t\tif val == q {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc ContainsFloat64(q float64, vals []float64) bool {\n\tfor _, val := range vals {\n\t\tif val == q {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ type CompareInt func(int) bool\n\n\/\/ func CountInt(q CompareInt, vals []int) int {\n\/\/ \tcount := 0\n\/\/ \tfor i := range vals {\n\/\/ \t\tif q(i) {\n\/\/ \t\t\tcount++\n\/\/ \t\t}\n\/\/ \t}\n\/\/ \treturn count\n\/\/ }\n\nfunc RandFloatRange(min, max float64) float64 {\n\treturn rand.Float64()*(max-min) + min\n}\n\n\/\/returns max index wise comparison\nfunc MaxInt(a, b []int) []int {\n\tresult := make([]int, len(a))\n\tfor i := 0; i < len(a); i++ {\n\t\tif a[i] > b[i] {\n\t\t\tresult[i] = a[i]\n\t\t} else {\n\t\t\tresult[i] = b[i]\n\t\t}\n\t}\n\n\treturn result\n}\n\n\/\/Returns max value from specified int slice\nfunc MaxSliceInt(values []int) int {\n\tmax := 0\n\tfor i := 0; i < len(values); i++ {\n\t\tif values[i] > max {\n\t\t\tmax = values[i]\n\t\t}\n\t}\n\treturn max\n}\n\n\/\/Returns max value from specified float slice\nfunc MaxSliceFloat64(values []float64) float64 {\n\tmax := 0.0\n\tfor i := 0; i < len(values); i++ {\n\t\tif values[i] > max {\n\t\t\tmax = values[i]\n\t\t}\n\t}\n\treturn max\n}\n\n\/\/Returns product of set of integers\nfunc ProdInt(vals []int) int {\n\tsum := 1\n\tfor x := 0; x < len(vals); x++ {\n\t\tsum *= vals[x]\n\t}\n\n\tif sum == 1 {\n\t\treturn 0\n\t} else {\n\t\treturn sum\n\t}\n}\n\n\/\/Returns cumulative product\nfunc CumProdInt(vals []int) []int {\n\tif len(vals) < 2 {\n\t\treturn vals\n\t}\n\tresult := make([]int, len(vals))\n\tresult[0] = vals[0]\n\tfor x := 1; x < len(vals); x++ {\n\t\tresult[x] = vals[x] * result[x-1]\n\t}\n\n\treturn result\n}\n\n\/\/Returns cumulative product starting from end\nfunc RevCumProdInt(vals []int) []int {\n\tif len(vals) < 2 {\n\t\treturn vals\n\t}\n\tresult := make([]int, len(vals))\n\tresult[len(vals)-1] = vals[len(vals)-1]\n\tfor x := len(vals) - 2; x >= 0; x-- {\n\t\tresult[x] = vals[x] * result[x+1]\n\t}\n\n\treturn result\n}\n\nfunc RoundPrec(x float64, prec int) float64 {\n\tif math.IsNaN(x) || math.IsInf(x, 0) {\n\t\treturn x\n\t}\n\n\tsign := 1.0\n\tif x < 0 {\n\t\tsign = -1\n\t\tx *= -1\n\t}\n\n\tvar rounder float64\n\tpow := math.Pow(10, float64(prec))\n\tintermed := x * pow\n\t_, frac := math.Modf(intermed)\n\n\tif frac >= 0.5 {\n\t\trounder = math.Ceil(intermed)\n\t} else {\n\t\trounder = math.Floor(intermed)\n\t}\n\n\treturn rounder \/ pow * sign\n}\n\n\/\/Helper for unit tests where int literals are easier\n\/\/ to read\nfunc Make2DBool(values [][]int) [][]bool {\n\tresult := make([][]bool, len(values))\n\n\tfor i, val := range values {\n\t\tresult[i] = make([]bool, len(val))\n\t\tfor j, col := range val {\n\t\t\tresult[i][j] = col == 1\n\t\t}\n\t}\n\n\treturn result\n}\n\nfunc Make1DBool(values []int) []bool {\n\tresult := make([]bool, len(values))\n\tfor i, val := range values {\n\t\tresult[i] = val == 1\n\t}\n\treturn result\n}\n\n\/\/Returns number of on bits\nfunc CountInt(values []int, value int) int {\n\tcount := 0\n\tfor _, val := range values {\n\t\tif val == value {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}\n\n\/\/Returns number of on bits\nfunc CountFloat64(values []float64, value float64) int {\n\tcount := 0\n\tfor _, val := range values {\n\t\tif val == value {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}\n\n\/\/Returns number of on bits\nfunc CountTrue(values []bool) int {\n\tcount := 0\n\tfor _, val := range values {\n\t\tif val {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}\n\n\/\/Or's 2 bool slices\nfunc OrBool(a, b []bool) []bool {\n\tresult := make([]bool, len(a))\n\tfor i, val := range a {\n\t\tresult[i] = val || b[i]\n\t}\n\treturn result\n}\n\n\/\/Returns random slice of floats of specified length\nfunc RandomSample(length int) []float64 {\n\tresult := make([]float64, length)\n\n\tfor i, _ := range result {\n\t\tresult[i] = rand.Float64()\n\t}\n\n\treturn result\n}\n\nfunc Bool2Int(s []bool) []int {\n\tresult := make([]int, len(s))\n\tfor idx, val := range s {\n\t\tif val {\n\t\t\tresult[idx] = 1\n\t\t} else {\n\t\t\tresult[idx] = 0\n\t\t}\n\n\t}\n\treturn result\n}\n\nfunc timeTrack(start time.Time, name string) {\n\telapsed := time.Since(start)\n\tfmt.Printf(\"%s took %s \\n\", name, elapsed)\n}\n\nfunc SumSliceFloat64(values []float64) float64 {\n\tresult := 0.0\n\tfor _, val := range values {\n\t\tresult += val\n\t}\n\treturn result\n}\n\n\/\/Returns \"on\" indices\nfunc OnIndices(s []bool) []int {\n\tvar result []int\n\tfor idx, val := range s {\n\t\tif val {\n\t\t\tresult = append(result, idx)\n\t\t}\n\t}\n\treturn result\n}\n\n\/\/ Returns complement of s and t\nfunc Complement(s []int, t []int) []int {\n\tresult := make([]int, 0, len(s))\n\tfor _, val := range s {\n\t\tfound := false\n\t\tfor _, v2 := range t {\n\t\t\tif v2 == val {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tresult = append(result, val)\n\t\t}\n\t}\n\treturn result\n}\n\nfunc Add(s []int, t []int) []int {\n\tresult := make([]int, 0, len(s)+len(t))\n\tresult = append(result, s...)\n\n\tfor _, val := range t {\n\t\tif ContainsInt(val, s) {\n\t\t\tresult = append(result, val)\n\t\t}\n\t}\n\treturn result\n}\n<commit_msg>fix add<commit_after>package utils\n\nimport (\n\t\/\/\"fmt\"\n\t\"fmt\"\n\t\"math\"\n\t\"math\/big\"\n\t\"math\/rand\"\n\t\"time\"\n)\n\ntype TupleInt struct {\n\tA int\n\tB int\n}\n\n\/\/Euclidean modulous\nfunc Mod(a, b int) int {\n\tab := big.NewInt(int64(a))\n\tbb := big.NewInt(int64(b))\n\treturn int(ab.Mod(ab, bb).Int64())\n}\n\n\/\/Dot product\nfunc DotInt(a, b []int) int {\n\tif len(a) != len(b) {\n\t\tpanic(\"Params have differing lengths\")\n\t}\n\tresult := 0\n\tfor i := range a {\n\t\tresult += a[i] * b[i]\n\t}\n\treturn result\n}\n\n\/\/Populates integer slice with index values\nfunc FillSliceWithIdxInt(values []int) {\n\tfor i := range values {\n\t\tvalues[i] = i\n\t}\n}\n\n\/\/Populates float64 slice with specified value\nfunc FillSliceInt(values []int, value int) {\n\tfor i := range values {\n\t\tvalues[i] = value\n\t}\n}\n\n\/\/Populates float64 slice with specified value\nfunc FillSliceFloat64(values []float64, value float64) {\n\tfor i := range values {\n\t\tvalues[i] = value\n\t}\n}\n\n\/\/Populates bool slice with specified value\nfunc FillSliceBool(values []bool, value bool) {\n\tfor i := range values {\n\t\tvalues[i] = value\n\t}\n}\n\n\/\/Populates bool slice with specified value\nfunc FillSliceRangeBool(values []bool, value bool, start, length int) {\n\tfor i := 0; i < length; i++ {\n\t\tvalues[start+i] = value\n\t}\n}\n\n\/\/Returns the subset of values specified by indices\nfunc SubsetSliceInt(values, indices []int) []int {\n\tresult := make([]int, len(indices))\n\tfor i, val := range indices {\n\t\tresult[i] = values[val]\n\t}\n\treturn result\n}\n\n\/\/Returns the subset of values specified by indices\nfunc SubsetSliceFloat64(values []float64, indices []int) []float64 {\n\tresult := make([]float64, len(indices))\n\tfor i, val := range indices {\n\t\tresult[i] = values[val]\n\t}\n\treturn result\n}\n\n\/\/Creates an integer slice with indices containing\n\/\/ the specified initial value\nfunc MakeSliceInt(size, initialValue int) []int {\n\tresult := make([]int, size)\n\tif initialValue != 0 {\n\t\tfor i, _ := range result {\n\t\t\tresult[i] = initialValue\n\t\t}\n\t}\n\treturn result\n}\n\nfunc MakeSliceFloat64(size int, initialValue float64) []float64 {\n\tresult := make([]float64, size)\n\tif initialValue != 0 {\n\t\tfor i, _ := range result {\n\t\t\tresult[i] = initialValue\n\t\t}\n\t}\n\treturn result\n}\n\n\/\/Returns cartesian product of specified\n\/\/2d arrayb\nfunc CartProductInt(values [][]int) [][]int {\n\tpos := make([]int, len(values))\n\tvar result [][]int\n\n\tfor pos[0] < len(values[0]) {\n\t\ttemp := make([]int, len(values))\n\t\tfor j := 0; j < len(values); j++ {\n\t\t\ttemp[j] = values[j][pos[j]]\n\t\t}\n\t\tresult = append(result, temp)\n\t\tpos[len(values)-1]++\n\t\tfor k := len(values) - 1; k >= 1; k-- {\n\t\t\tif pos[k] >= len(values[k]) {\n\t\t\t\tpos[k] = 0\n\t\t\t\tpos[k-1]++\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}\n\n\/\/Searches int slice for specified integer\nfunc ContainsInt(q int, vals []int) bool {\n\tfor _, val := range vals {\n\t\tif val == q {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc ContainsFloat64(q float64, vals []float64) bool {\n\tfor _, val := range vals {\n\t\tif val == q {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ type CompareInt func(int) bool\n\n\/\/ func CountInt(q CompareInt, vals []int) int {\n\/\/ \tcount := 0\n\/\/ \tfor i := range vals {\n\/\/ \t\tif q(i) {\n\/\/ \t\t\tcount++\n\/\/ \t\t}\n\/\/ \t}\n\/\/ \treturn count\n\/\/ }\n\nfunc RandFloatRange(min, max float64) float64 {\n\treturn rand.Float64()*(max-min) + min\n}\n\n\/\/returns max index wise comparison\nfunc MaxInt(a, b []int) []int {\n\tresult := make([]int, len(a))\n\tfor i := 0; i < len(a); i++ {\n\t\tif a[i] > b[i] {\n\t\t\tresult[i] = a[i]\n\t\t} else {\n\t\t\tresult[i] = b[i]\n\t\t}\n\t}\n\n\treturn result\n}\n\n\/\/Returns max value from specified int slice\nfunc MaxSliceInt(values []int) int {\n\tmax := 0\n\tfor i := 0; i < len(values); i++ {\n\t\tif values[i] > max {\n\t\t\tmax = values[i]\n\t\t}\n\t}\n\treturn max\n}\n\n\/\/Returns max value from specified float slice\nfunc MaxSliceFloat64(values []float64) float64 {\n\tmax := 0.0\n\tfor i := 0; i < len(values); i++ {\n\t\tif values[i] > max {\n\t\t\tmax = values[i]\n\t\t}\n\t}\n\treturn max\n}\n\n\/\/Returns product of set of integers\nfunc ProdInt(vals []int) int {\n\tsum := 1\n\tfor x := 0; x < len(vals); x++ {\n\t\tsum *= vals[x]\n\t}\n\n\tif sum == 1 {\n\t\treturn 0\n\t} else {\n\t\treturn sum\n\t}\n}\n\n\/\/Returns cumulative product\nfunc CumProdInt(vals []int) []int {\n\tif len(vals) < 2 {\n\t\treturn vals\n\t}\n\tresult := make([]int, len(vals))\n\tresult[0] = vals[0]\n\tfor x := 1; x < len(vals); x++ {\n\t\tresult[x] = vals[x] * result[x-1]\n\t}\n\n\treturn result\n}\n\n\/\/Returns cumulative product starting from end\nfunc RevCumProdInt(vals []int) []int {\n\tif len(vals) < 2 {\n\t\treturn vals\n\t}\n\tresult := make([]int, len(vals))\n\tresult[len(vals)-1] = vals[len(vals)-1]\n\tfor x := len(vals) - 2; x >= 0; x-- {\n\t\tresult[x] = vals[x] * result[x+1]\n\t}\n\n\treturn result\n}\n\nfunc RoundPrec(x float64, prec int) float64 {\n\tif math.IsNaN(x) || math.IsInf(x, 0) {\n\t\treturn x\n\t}\n\n\tsign := 1.0\n\tif x < 0 {\n\t\tsign = -1\n\t\tx *= -1\n\t}\n\n\tvar rounder float64\n\tpow := math.Pow(10, float64(prec))\n\tintermed := x * pow\n\t_, frac := math.Modf(intermed)\n\n\tif frac >= 0.5 {\n\t\trounder = math.Ceil(intermed)\n\t} else {\n\t\trounder = math.Floor(intermed)\n\t}\n\n\treturn rounder \/ pow * sign\n}\n\n\/\/Helper for unit tests where int literals are easier\n\/\/ to read\nfunc Make2DBool(values [][]int) [][]bool {\n\tresult := make([][]bool, len(values))\n\n\tfor i, val := range values {\n\t\tresult[i] = make([]bool, len(val))\n\t\tfor j, col := range val {\n\t\t\tresult[i][j] = col == 1\n\t\t}\n\t}\n\n\treturn result\n}\n\nfunc Make1DBool(values []int) []bool {\n\tresult := make([]bool, len(values))\n\tfor i, val := range values {\n\t\tresult[i] = val == 1\n\t}\n\treturn result\n}\n\n\/\/Returns number of on bits\nfunc CountInt(values []int, value int) int {\n\tcount := 0\n\tfor _, val := range values {\n\t\tif val == value {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}\n\n\/\/Returns number of on bits\nfunc CountFloat64(values []float64, value float64) int {\n\tcount := 0\n\tfor _, val := range values {\n\t\tif val == value {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}\n\n\/\/Returns number of on bits\nfunc CountTrue(values []bool) int {\n\tcount := 0\n\tfor _, val := range values {\n\t\tif val {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}\n\n\/\/Or's 2 bool slices\nfunc OrBool(a, b []bool) []bool {\n\tresult := make([]bool, len(a))\n\tfor i, val := range a {\n\t\tresult[i] = val || b[i]\n\t}\n\treturn result\n}\n\n\/\/Returns random slice of floats of specified length\nfunc RandomSample(length int) []float64 {\n\tresult := make([]float64, length)\n\n\tfor i, _ := range result {\n\t\tresult[i] = rand.Float64()\n\t}\n\n\treturn result\n}\n\nfunc Bool2Int(s []bool) []int {\n\tresult := make([]int, len(s))\n\tfor idx, val := range s {\n\t\tif val {\n\t\t\tresult[idx] = 1\n\t\t} else {\n\t\t\tresult[idx] = 0\n\t\t}\n\n\t}\n\treturn result\n}\n\nfunc timeTrack(start time.Time, name string) {\n\telapsed := time.Since(start)\n\tfmt.Printf(\"%s took %s \\n\", name, elapsed)\n}\n\nfunc SumSliceFloat64(values []float64) float64 {\n\tresult := 0.0\n\tfor _, val := range values {\n\t\tresult += val\n\t}\n\treturn result\n}\n\n\/\/Returns \"on\" indices\nfunc OnIndices(s []bool) []int {\n\tvar result []int\n\tfor idx, val := range s {\n\t\tif val {\n\t\t\tresult = append(result, idx)\n\t\t}\n\t}\n\treturn result\n}\n\n\/\/ Returns complement of s and t\nfunc Complement(s []int, t []int) []int {\n\tresult := make([]int, 0, len(s))\n\tfor _, val := range s {\n\t\tfound := false\n\t\tfor _, v2 := range t {\n\t\t\tif v2 == val {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tresult = append(result, val)\n\t\t}\n\t}\n\treturn result\n}\n\nfunc Add(s []int, t []int) []int {\n\tresult := make([]int, 0, len(s)+len(t))\n\tresult = append(result, s...)\n\n\tfor _, val := range t {\n\t\tif !ContainsInt(val, s) {\n\t\t\tresult = append(result, val)\n\t\t}\n\t}\n\treturn result\n}\n<|endoftext|>"} {"text":"<commit_before>package utils\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/request\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/cloudformation\"\n)\n\ntype AwsRequest interface {\n\tSend() error\n}\n\ntype RequestHandler interface {\n\tHandleRequest(req AwsRequest) error\n}\n\ntype AwsRequestHandler struct {\n}\n\nfunc (r *AwsRequestHandler) HandleRequest(req AwsRequest) error {\n\ts := 1\n\tvar err error\n\tfor err = req.Send(); err != nil; err = req.Send() {\n\t\tif reqerr, ok := err.(awserr.RequestFailure); ok {\n\t\t\tif reqerr.Code() == \"RequestLimitExceeded\" {\n\t\t\t\ttime.Sleep(time.Duration(s) * time.Second)\n\t\t\t\ts = s * 2\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n\treturn nil\n}\n\ntype Input struct {\n\tSource struct {\n\t\tName string `json:\"name\"`\n\t\tAwsAccessKeyId string `json:\"aws_access_key_id\"`\n\t\tAwsSecretAccessKey string `json:\"aws_secret_access_key\"`\n\t\tRegion string `json:\"region\"`\n\t} `json:\"source\"`\n\tVersion struct {\n\t\tLastUpdatedTime string `json:\"LastUpdatedTime\"`\n\t} `json:\"version\"`\n\tParams struct {\n\t\tTemplate string `json:\"template\"`\n\t\tParameters string `json:\"parameters\"`\n\t\tTags string `json:\"tags\"`\n\t\tCapabilities []string `json:\"capabilities\"`\n\t\tDelete bool `json:\"delete\"`\n\t\tWait bool `json:\"wait\"`\n\t} `json:\"params\"`\n}\n\nfunc GetInput() Input {\n\tbytes, err := ioutil.ReadAll(os.Stdin)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tinput := Input{}\n\terr = json.Unmarshal(bytes, &input)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn input\n}\n\ntype AwsCloudformationSvc interface {\n\tDescribeStacksRequest(input *cloudformation.DescribeStacksInput) (req *request.Request, output *cloudformation.DescribeStacksOutput)\n\tCreateStackRequest(input *cloudformation.CreateStackInput) (req *request.Request, output *cloudformation.CreateStackOutput)\n\tUpdateStackRequest(input *cloudformation.UpdateStackInput) (req *request.Request, output *cloudformation.UpdateStackOutput)\n\tDescribeStackEventsRequest(input *cloudformation.DescribeStackEventsInput) (req *request.Request, output *cloudformation.DescribeStackEventsOutput)\n\tDeleteStackRequest(input *cloudformation.DeleteStackInput) (req *request.Request, output *cloudformation.DeleteStackOutput)\n}\n\nfunc GetCloudformationService(input Input) AwsCloudformationSvc {\n\tcreds := credentials.NewStaticCredentials(input.Source.AwsAccessKeyId, input.Source.AwsSecretAccessKey, \"\")\n\tawsConfig := aws.NewConfig().WithCredentials(creds).WithRegion(input.Source.Region)\n\tsess := session.Must(session.NewSession(awsConfig))\n\tsvc := cloudformation.New(sess)\n\treturn svc\n}\n\nfunc GoToBuildDirectory() {\n\tfiles, err := ioutil.ReadDir(\"\/tmp\/build\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif len(files) != 1 {\n\t\tfmt.Printf(\"Expected only 1 file in \/tmp\/build but found %d: %v\\n\", len(files), files)\n\t\tos.Exit(1)\n\t}\n\n\tos.Chdir(\"\/tmp\/build\/\" + files[0].Name())\n}\n\nfunc Logln(a ...interface{}) {\n\tfmt.Fprintln(os.Stderr, a...)\n}\n\nfunc Logf(format string, a ...interface{}) {\n\tfmt.Fprintf(os.Stderr, format, a)\n}\n<commit_msg>Added some logging<commit_after>package utils\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/request\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/cloudformation\"\n)\n\ntype AwsRequest interface {\n\tSend() error\n}\n\ntype RequestHandler interface {\n\tHandleRequest(req AwsRequest) error\n}\n\ntype AwsRequestHandler struct {\n}\n\nfunc (r *AwsRequestHandler) HandleRequest(req AwsRequest) error {\n\ts := 1\n\tvar err error\n\tfor err = req.Send(); err != nil; err = req.Send() {\n\t\tif reqerr, ok := err.(awserr.RequestFailure); ok {\n\t\t\tif reqerr.Code() == \"RequestLimitExceeded\" {\n\t\t\t\ttime.Sleep(time.Duration(s) * time.Second)\n\t\t\t\ts = s * 2\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tLogln(\"HandleRequest error code:\", reqerr.Code())\n\t\t}\n\t\treturn err\n\t}\n\treturn nil\n}\n\ntype Input struct {\n\tSource struct {\n\t\tName string `json:\"name\"`\n\t\tAwsAccessKeyId string `json:\"aws_access_key_id\"`\n\t\tAwsSecretAccessKey string `json:\"aws_secret_access_key\"`\n\t\tRegion string `json:\"region\"`\n\t} `json:\"source\"`\n\tVersion struct {\n\t\tLastUpdatedTime string `json:\"LastUpdatedTime\"`\n\t} `json:\"version\"`\n\tParams struct {\n\t\tTemplate string `json:\"template\"`\n\t\tParameters string `json:\"parameters\"`\n\t\tTags string `json:\"tags\"`\n\t\tCapabilities []string `json:\"capabilities\"`\n\t\tDelete bool `json:\"delete\"`\n\t\tWait bool `json:\"wait\"`\n\t} `json:\"params\"`\n}\n\nfunc GetInput() Input {\n\tbytes, err := ioutil.ReadAll(os.Stdin)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tinput := Input{}\n\terr = json.Unmarshal(bytes, &input)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn input\n}\n\ntype AwsCloudformationSvc interface {\n\tDescribeStacksRequest(input *cloudformation.DescribeStacksInput) (req *request.Request, output *cloudformation.DescribeStacksOutput)\n\tCreateStackRequest(input *cloudformation.CreateStackInput) (req *request.Request, output *cloudformation.CreateStackOutput)\n\tUpdateStackRequest(input *cloudformation.UpdateStackInput) (req *request.Request, output *cloudformation.UpdateStackOutput)\n\tDescribeStackEventsRequest(input *cloudformation.DescribeStackEventsInput) (req *request.Request, output *cloudformation.DescribeStackEventsOutput)\n\tDeleteStackRequest(input *cloudformation.DeleteStackInput) (req *request.Request, output *cloudformation.DeleteStackOutput)\n}\n\nfunc GetCloudformationService(input Input) AwsCloudformationSvc {\n\tcreds := credentials.NewStaticCredentials(input.Source.AwsAccessKeyId, input.Source.AwsSecretAccessKey, \"\")\n\tawsConfig := aws.NewConfig().WithCredentials(creds).WithRegion(input.Source.Region)\n\tsess := session.Must(session.NewSession(awsConfig))\n\tsvc := cloudformation.New(sess)\n\treturn svc\n}\n\nfunc GoToBuildDirectory() {\n\tfiles, err := ioutil.ReadDir(\"\/tmp\/build\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tif len(files) != 1 {\n\t\tLogf(\"Expected only 1 file in \/tmp\/build but found %d: %v\\n\", len(files), files)\n\t\tos.Exit(1)\n\t}\n\n\tos.Chdir(\"\/tmp\/build\/\" + files[0].Name())\n}\n\nfunc Logln(a ...interface{}) {\n\tfmt.Fprintln(os.Stderr, a...)\n}\n\nfunc Logf(format string, a ...interface{}) {\n\tfmt.Fprintf(os.Stderr, format, a)\n}\n<|endoftext|>"} {"text":"<commit_before>package utils\n\nimport (\n\t\"errors\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n)\n\nfunc Check(err error) {\n\tif err != nil {\n\t\tlog.Fatalf(\"fatal: %v\\n\", err)\n\t}\n}\n\nfunc ConcatPaths(paths ...string) string {\n\treturn strings.Join(paths, \"\/\")\n}\n\nfunc BrowserLauncher() ([]string, error) {\n\tbrowser := os.Getenv(\"BROWSER\")\n\tif browser == \"\" {\n\t\tbrowser = searchBrowserLauncher(runtime.GOOS)\n\t}\n\n\tif browser == \"\" {\n\t\treturn nil, errors.New(\"Please set $BROWSER to a web launcher\")\n\t}\n\n\treturn strings.Split(browser, \" \"), nil\n}\n\nfunc searchBrowserLauncher(goos string) (browser string) {\n\tswitch goos {\n\tcase \"darwin\":\n\t\tbrowser = \"open\"\n\tcase \"windows\":\n\t\tbrowser = \"cmd \/c start\"\n\tdefault:\n\t\tcandidates := []string{\"xdg-open\", \"cygstart\", \"x-www-browser\", \"firefox\",\n\t\t\t\"opera\", \"mozilla\", \"netscape\"}\n\t\tfor _, b := range candidates {\n\t\t\tpath, err := exec.LookPath(b)\n\t\t\tif err == nil {\n\t\t\t\tbrowser = path\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn browser\n}\n\nfunc DirName() (string, error) {\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tname := filepath.Base(dir)\n\tname = strings.Replace(name, \" \", \"-\", -1)\n\treturn name, nil\n}\n\nfunc IsOption(confirm, short, long string) bool {\n\treturn strings.EqualFold(confirm, short) || strings.EqualFold(confirm, long)\n}\n<commit_msg>Print error to stderr instead of using log<commit_after>package utils\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n)\n\nfunc Check(err error) {\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc ConcatPaths(paths ...string) string {\n\treturn strings.Join(paths, \"\/\")\n}\n\nfunc BrowserLauncher() ([]string, error) {\n\tbrowser := os.Getenv(\"BROWSER\")\n\tif browser == \"\" {\n\t\tbrowser = searchBrowserLauncher(runtime.GOOS)\n\t}\n\n\tif browser == \"\" {\n\t\treturn nil, errors.New(\"Please set $BROWSER to a web launcher\")\n\t}\n\n\treturn strings.Split(browser, \" \"), nil\n}\n\nfunc searchBrowserLauncher(goos string) (browser string) {\n\tswitch goos {\n\tcase \"darwin\":\n\t\tbrowser = \"open\"\n\tcase \"windows\":\n\t\tbrowser = \"cmd \/c start\"\n\tdefault:\n\t\tcandidates := []string{\"xdg-open\", \"cygstart\", \"x-www-browser\", \"firefox\",\n\t\t\t\"opera\", \"mozilla\", \"netscape\"}\n\t\tfor _, b := range candidates {\n\t\t\tpath, err := exec.LookPath(b)\n\t\t\tif err == nil {\n\t\t\t\tbrowser = path\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn browser\n}\n\nfunc DirName() (string, error) {\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tname := filepath.Base(dir)\n\tname = strings.Replace(name, \" \", \"-\", -1)\n\treturn name, nil\n}\n\nfunc IsOption(confirm, short, long string) bool {\n\treturn strings.EqualFold(confirm, short) || strings.EqualFold(confirm, long)\n}\n<|endoftext|>"} {"text":"<commit_before>package utils\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"runtime\/debug\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/qor\/qor\"\n\n\t\"strings\"\n)\n\n\/\/ HumanizeString Humanize separates string based on capitalizd letters\n\/\/ e.g. \"OrderItem\" -> \"Order Item\"\nfunc HumanizeString(str string) string {\n\tvar human []rune\n\tfor i, l := range str {\n\t\tif i > 0 && isUppercase(byte(l)) {\n\t\t\tif i > 0 && !isUppercase(str[i-1]) || i+1 < len(str) && !isUppercase(str[i+1]) {\n\t\t\t\thuman = append(human, rune(' '))\n\t\t\t}\n\t\t}\n\t\thuman = append(human, l)\n\t}\n\treturn strings.Title(string(human))\n}\n\nfunc isUppercase(char byte) bool {\n\treturn 'A' <= char && char <= 'Z'\n}\n\nvar upcaseRegexp = regexp.MustCompile(\"[A-Z]{3,}[a-z]\")\n\n\/\/ ToParamString replaces spaces and separates words (by uppercase letters) with\n\/\/ underscores in a string, also downcase it\n\/\/ e.g. ToParamString -> to_param_string, To ParamString -> to_param_string\nfunc ToParamString(str string) string {\n\treturn gorm.ToDBName(strings.Replace(str, \" \", \"_\", -1))\n}\n\n\/\/ PatchURL updates the query part of the current request url. You can\n\/\/ access it in template by `patch_url`.\n\/\/ patch_url \"google.com\" \"key\" \"value\"\nfunc PatchURL(originalURL string, params ...interface{}) (patchedURL string, err error) {\n\turl, err := url.Parse(originalURL)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tquery := url.Query()\n\tfor i := 0; i < len(params)\/2; i++ {\n\t\t\/\/ Check if params is key&value pair\n\t\tkey := fmt.Sprintf(\"%v\", params[i*2])\n\t\tvalue := fmt.Sprintf(\"%v\", params[i*2+1])\n\n\t\tif value == \"\" {\n\t\t\tquery.Del(key)\n\t\t} else {\n\t\t\tquery.Set(key, value)\n\t\t}\n\t}\n\n\turl.RawQuery = query.Encode()\n\tpatchedURL = url.String()\n\treturn\n}\n\n\/\/ GetLocale get locale from request, cookie, after get the locale, will write the locale to the cookie if possible\nfunc GetLocale(context *qor.Context) string {\n\tif locale := context.Request.Header.Get(\"Locale\"); locale != \"\" {\n\t\treturn locale\n\t}\n\n\tif locale := context.Request.URL.Query().Get(\"locale\"); locale != \"\" {\n\t\tif context.Writer != nil {\n\t\t\tcontext.Request.Header.Set(\"Locale\", locale)\n\t\t\tc := http.Cookie{Name: \"locale\", Value: locale, Expires: time.Now().AddDate(1, 0, 0), Path: \"\/\", HttpOnly: true}\n\t\t\thttp.SetCookie(context.Writer, &c)\n\t\t}\n\t\treturn locale\n\t}\n\n\tif locale, err := context.Request.Cookie(\"locale\"); err == nil {\n\t\treturn locale.Value\n\t}\n\n\treturn \"\"\n}\n\n\/\/ Stringify stringify any data, if it is a struct, will try to use its Name, Title, Code field, else will use its primary key\nfunc Stringify(object interface{}) string {\n\tif obj, ok := object.(interface {\n\t\tStringify() string\n\t}); ok {\n\t\treturn obj.Stringify()\n\t}\n\n\tscope := gorm.Scope{Value: object}\n\tfor _, column := range []string{\"Name\", \"Title\", \"Code\"} {\n\t\tif field, ok := scope.FieldByName(column); ok {\n\t\t\treturn fmt.Sprintf(\"%v\", field.Field.Interface())\n\t\t}\n\t}\n\n\tif scope.PrimaryField() != nil {\n\t\tif scope.PrimaryKeyZero() {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn fmt.Sprintf(\"%v#%v\", scope.GetModelStruct().ModelType.Name(), scope.PrimaryKeyValue())\n\t}\n\n\treturn fmt.Sprint(reflect.Indirect(reflect.ValueOf(object)).Interface())\n}\n\n\/\/ ModelType get value's model type\nfunc ModelType(value interface{}) reflect.Type {\n\treflectType := reflect.Indirect(reflect.ValueOf(value)).Type()\n\n\tfor reflectType.Kind() == reflect.Ptr || reflectType.Kind() == reflect.Slice {\n\t\treflectType = reflectType.Elem()\n\t}\n\n\treturn reflectType\n}\n\n\/\/ ParseTagOption parse tag options to hash\nfunc ParseTagOption(str string) map[string]string {\n\ttags := strings.Split(str, \";\")\n\tsetting := map[string]string{}\n\tfor _, value := range tags {\n\t\tv := strings.Split(value, \":\")\n\t\tk := strings.TrimSpace(strings.ToUpper(v[0]))\n\t\tif len(v) == 2 {\n\t\t\tsetting[k] = v[1]\n\t\t} else {\n\t\t\tsetting[k] = k\n\t\t}\n\t}\n\treturn setting\n}\n\n\/\/ ExitWithMsg debug error messages and print stack\nfunc ExitWithMsg(msg interface{}, value ...interface{}) {\n\tfmt.Printf(\"\\n\"+filenameWithLineNum()+\"\\n%v\\n\", append([]interface{}{msg}, value...)...)\n\tdebug.PrintStack()\n}\n\nfunc filenameWithLineNum() string {\n\tvar total = 10\n\tvar results []string\n\tfor i := 2; i < 15; i++ {\n\t\tif _, file, line, ok := runtime.Caller(i); ok {\n\t\t\ttotal--\n\t\t\tresults = append(results[:0],\n\t\t\t\tappend(\n\t\t\t\t\t[]string{fmt.Sprintf(\"%v:%v\", strings.TrimPrefix(file, os.Getenv(\"GOPATH\")+\"src\/\"), line)},\n\t\t\t\t\tresults[0:]...)...)\n\n\t\t\tif total == 0 {\n\t\t\t\treturn strings.Join(results, \"\\n\")\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}\n<commit_msg>Add set cookie method<commit_after>package utils\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"runtime\/debug\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\t\"github.com\/qor\/qor\"\n\n\t\"strings\"\n)\n\n\/\/ HumanizeString Humanize separates string based on capitalizd letters\n\/\/ e.g. \"OrderItem\" -> \"Order Item\"\nfunc HumanizeString(str string) string {\n\tvar human []rune\n\tfor i, l := range str {\n\t\tif i > 0 && isUppercase(byte(l)) {\n\t\t\tif i > 0 && !isUppercase(str[i-1]) || i+1 < len(str) && !isUppercase(str[i+1]) {\n\t\t\t\thuman = append(human, rune(' '))\n\t\t\t}\n\t\t}\n\t\thuman = append(human, l)\n\t}\n\treturn strings.Title(string(human))\n}\n\nfunc isUppercase(char byte) bool {\n\treturn 'A' <= char && char <= 'Z'\n}\n\nvar upcaseRegexp = regexp.MustCompile(\"[A-Z]{3,}[a-z]\")\n\n\/\/ ToParamString replaces spaces and separates words (by uppercase letters) with\n\/\/ underscores in a string, also downcase it\n\/\/ e.g. ToParamString -> to_param_string, To ParamString -> to_param_string\nfunc ToParamString(str string) string {\n\treturn gorm.ToDBName(strings.Replace(str, \" \", \"_\", -1))\n}\n\n\/\/ PatchURL updates the query part of the current request url. You can\n\/\/ access it in template by `patch_url`.\n\/\/ patch_url \"google.com\" \"key\" \"value\"\nfunc PatchURL(originalURL string, params ...interface{}) (patchedURL string, err error) {\n\turl, err := url.Parse(originalURL)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tquery := url.Query()\n\tfor i := 0; i < len(params)\/2; i++ {\n\t\t\/\/ Check if params is key&value pair\n\t\tkey := fmt.Sprintf(\"%v\", params[i*2])\n\t\tvalue := fmt.Sprintf(\"%v\", params[i*2+1])\n\n\t\tif value == \"\" {\n\t\t\tquery.Del(key)\n\t\t} else {\n\t\t\tquery.Set(key, value)\n\t\t}\n\t}\n\n\turl.RawQuery = query.Encode()\n\tpatchedURL = url.String()\n\treturn\n}\n\n\/\/ GetLocale get locale from request, cookie, after get the locale, will write the locale to the cookie if possible\nfunc GetLocale(context *qor.Context) string {\n\tif locale := context.Request.Header.Get(\"Locale\"); locale != \"\" {\n\t\treturn locale\n\t}\n\n\tif locale := context.Request.URL.Query().Get(\"locale\"); locale != \"\" {\n\t\tif context.Writer != nil {\n\t\t\tcontext.Request.Header.Set(\"Locale\", locale)\n\t\t\tc := http.Cookie{Name: \"locale\", Value: locale, Expires: time.Now().AddDate(1, 0, 0), Path: \"\/\", HttpOnly: true}\n\t\t\thttp.SetCookie(context.Writer, &c)\n\t\t}\n\t\treturn locale\n\t}\n\n\tif locale, err := context.Request.Cookie(\"locale\"); err == nil {\n\t\treturn locale.Value\n\t}\n\n\treturn \"\"\n}\n\n\/\/ SetCookie set cookie for context\nfunc SetCookie(cookie http.Cookie, context *qor.Context) string {\n\tcookie.HttpOnly = true\n\n\t\/\/ set https cookie\n\tif context.Request != nil && context.Request.URL.Scheme == \"https\" {\n\t\tcookie.Secure = true\n\t}\n\n\t\/\/ set default path\n\tif cookie.Path == \"\" {\n\t\tcookie.Path = \"\/\"\n\t}\n\n\thttp.SetCookie(context.Writer, &cookie)\n}\n\n\/\/ Stringify stringify any data, if it is a struct, will try to use its Name, Title, Code field, else will use its primary key\nfunc Stringify(object interface{}) string {\n\tif obj, ok := object.(interface {\n\t\tStringify() string\n\t}); ok {\n\t\treturn obj.Stringify()\n\t}\n\n\tscope := gorm.Scope{Value: object}\n\tfor _, column := range []string{\"Name\", \"Title\", \"Code\"} {\n\t\tif field, ok := scope.FieldByName(column); ok {\n\t\t\treturn fmt.Sprintf(\"%v\", field.Field.Interface())\n\t\t}\n\t}\n\n\tif scope.PrimaryField() != nil {\n\t\tif scope.PrimaryKeyZero() {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn fmt.Sprintf(\"%v#%v\", scope.GetModelStruct().ModelType.Name(), scope.PrimaryKeyValue())\n\t}\n\n\treturn fmt.Sprint(reflect.Indirect(reflect.ValueOf(object)).Interface())\n}\n\n\/\/ ModelType get value's model type\nfunc ModelType(value interface{}) reflect.Type {\n\treflectType := reflect.Indirect(reflect.ValueOf(value)).Type()\n\n\tfor reflectType.Kind() == reflect.Ptr || reflectType.Kind() == reflect.Slice {\n\t\treflectType = reflectType.Elem()\n\t}\n\n\treturn reflectType\n}\n\n\/\/ ParseTagOption parse tag options to hash\nfunc ParseTagOption(str string) map[string]string {\n\ttags := strings.Split(str, \";\")\n\tsetting := map[string]string{}\n\tfor _, value := range tags {\n\t\tv := strings.Split(value, \":\")\n\t\tk := strings.TrimSpace(strings.ToUpper(v[0]))\n\t\tif len(v) == 2 {\n\t\t\tsetting[k] = v[1]\n\t\t} else {\n\t\t\tsetting[k] = k\n\t\t}\n\t}\n\treturn setting\n}\n\n\/\/ ExitWithMsg debug error messages and print stack\nfunc ExitWithMsg(msg interface{}, value ...interface{}) {\n\tfmt.Printf(\"\\n\"+filenameWithLineNum()+\"\\n%v\\n\", append([]interface{}{msg}, value...)...)\n\tdebug.PrintStack()\n}\n\nfunc filenameWithLineNum() string {\n\tvar total = 10\n\tvar results []string\n\tfor i := 2; i < 15; i++ {\n\t\tif _, file, line, ok := runtime.Caller(i); ok {\n\t\t\ttotal--\n\t\t\tresults = append(results[:0],\n\t\t\t\tappend(\n\t\t\t\t\t[]string{fmt.Sprintf(\"%v:%v\", strings.TrimPrefix(file, os.Getenv(\"GOPATH\")+\"src\/\"), line)},\n\t\t\t\t\tresults[0:]...)...)\n\n\t\t\tif total == 0 {\n\t\t\t\treturn strings.Join(results, \"\\n\")\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}\n<|endoftext|>"} {"text":"<commit_before>package buffer\n\nimport (\n\t\"sync\"\n\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pkg\/grpcutil\"\n)\n\nvar bufPool = sync.Pool{\n\tNew: func() interface{} {\n\t\treturn make([]byte, grpcutil.MaxMsgSize\/10)\n\t},\n}\n\nfunc Get() []byte {\n\treturn bufPool.Get().([]byte)\n}\n\nfunc Put(buf []byte) {\n\tbufPool.Put(buf)\n}\n<commit_msg>Specify buffer size directly in the buffer package to get rid of import cycle<commit_after>package buffer\n\nimport (\n\t\"sync\"\n)\n\nvar bufPool = sync.Pool{\n\tNew: func() interface{} {\n\t\t\/\/ We use 2MB as the default buffer size because this buffer pool\n\t\t\/\/ is typically used for gRPC and 2MB is:\n\t\t\/\/ 1. Reasonably smaller than the max gRPC size (which is 20MB)\n\t\t\/\/ 2. Small enough that having hundreds of these buffers won't\n\t\t\/\/ overwhelm the node\n\t\t\/\/ 3. Large enough for message-sending to be efficient\n\t\treturn make([]byte, 2*1024*1024)\n\t},\n}\n\nfunc Get() []byte {\n\treturn bufPool.Get().([]byte)\n}\n\nfunc Put(buf []byte) {\n\tbufPool.Put(buf)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cache\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/apimachinery\/pkg\/watch\"\n)\n\nfunc TestMutationDetector(t *testing.T) {\n\tfakeWatch := watch.NewFake()\n\tlw := &testLW{\n\t\tWatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {\n\t\t\treturn fakeWatch, nil\n\t\t},\n\t\tListFunc: func(options metav1.ListOptions) (runtime.Object, error) {\n\t\t\treturn &v1.PodList{}, nil\n\t\t},\n\t}\n\tpod := &v1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"anything\",\n\t\t\tLabels: map[string]string{\"check\": \"foo\"},\n\t\t},\n\t}\n\tstopCh := make(chan struct{})\n\tdefer close(stopCh)\n\tmutationFound := make(chan bool)\n\n\tinformer := NewSharedInformer(lw, &v1.Pod{}, 1*time.Second).(*sharedIndexInformer)\n\tdetector := &defaultCacheMutationDetector{\n\t\tname: \"name\",\n\t\tperiod: 1 * time.Second,\n\t\tfailureFunc: func(message string) {\n\t\t\tmutationFound <- true\n\t\t},\n\t}\n\tinformer.cacheMutationDetector = detector\n\tgo informer.Run(stopCh)\n\n\tfakeWatch.Add(pod)\n\n\twait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {\n\t\tdetector.addedObjsLock.Lock()\n\t\tdefer detector.addedObjsLock.Unlock()\n\t\treturn len(detector.addedObjs) > 0, nil\n\t})\n\n\tdetector.compareObjectsLock.Lock()\n\tpod.Labels[\"change\"] = \"true\"\n\tdetector.compareObjectsLock.Unlock()\n\n\tselect {\n\tcase <-mutationFound:\n\t}\n\n}\n<commit_msg>Add test timeout to mutation detector test<commit_after>\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cache\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/apimachinery\/pkg\/watch\"\n)\n\nfunc TestMutationDetector(t *testing.T) {\n\tfakeWatch := watch.NewFake()\n\tlw := &testLW{\n\t\tWatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {\n\t\t\treturn fakeWatch, nil\n\t\t},\n\t\tListFunc: func(options metav1.ListOptions) (runtime.Object, error) {\n\t\t\treturn &v1.PodList{}, nil\n\t\t},\n\t}\n\tpod := &v1.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: \"anything\",\n\t\t\tLabels: map[string]string{\"check\": \"foo\"},\n\t\t},\n\t}\n\tstopCh := make(chan struct{})\n\tdefer close(stopCh)\n\tmutationFound := make(chan bool)\n\n\tinformer := NewSharedInformer(lw, &v1.Pod{}, 1*time.Second).(*sharedIndexInformer)\n\tdetector := &defaultCacheMutationDetector{\n\t\tname: \"name\",\n\t\tperiod: 1 * time.Second,\n\t\tretainDuration: 2 * time.Minute,\n\t\tfailureFunc: func(message string) {\n\t\t\tmutationFound <- true\n\t\t},\n\t}\n\tinformer.cacheMutationDetector = detector\n\tgo informer.Run(stopCh)\n\n\tfakeWatch.Add(pod)\n\n\twait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {\n\t\tdetector.addedObjsLock.Lock()\n\t\tdefer detector.addedObjsLock.Unlock()\n\t\treturn len(detector.addedObjs) > 0, nil\n\t})\n\n\tdetector.compareObjectsLock.Lock()\n\tpod.Labels[\"change\"] = \"true\"\n\tdetector.compareObjectsLock.Unlock()\n\n\tselect {\n\tcase <-mutationFound:\n\tcase <-time.After(wait.ForeverTestTimeout):\n\t\tt.Fatalf(\"failed waiting for mutating detector\")\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/linuxkit\/linuxkit\/src\/cmd\/linuxkit\/pkglib\"\n)\n\nfunc pkgShowTag(args []string) {\n\tflags := flag.NewFlagSet(\"pkg show-tag\", flag.ExitOnError)\n\tflags.Usage = func() {\n\t\tinvoked := filepath.Base(os.Args[0])\n\t\tfmt.Fprintf(os.Stderr, \"USAGE: %s pkg show-tag [options] path\\n\\n\", invoked)\n\t\tfmt.Fprintf(os.Stderr, \"'path' specifies the path to the package source directory.\\n\")\n\t\tfmt.Fprintf(os.Stderr, \"\\n\")\n\t\tflags.PrintDefaults()\n\t}\n\n\tpkgs, err := pkglib.NewFromCLI(flags, args...)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tfor _, p := range pkgs {\n\t\tfmt.Println(p.Tag())\n\t}\n}\n<commit_msg>option to show canonical tag<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/linuxkit\/linuxkit\/src\/cmd\/linuxkit\/pkglib\"\n)\n\nfunc pkgShowTag(args []string) {\n\tflags := flag.NewFlagSet(\"pkg show-tag\", flag.ExitOnError)\n\tflags.Usage = func() {\n\t\tinvoked := filepath.Base(os.Args[0])\n\t\tfmt.Fprintf(os.Stderr, \"USAGE: %s pkg show-tag [options] path\\n\\n\", invoked)\n\t\tfmt.Fprintf(os.Stderr, \"'path' specifies the path to the package source directory.\\n\")\n\t\tfmt.Fprintf(os.Stderr, \"\\n\")\n\t\tflags.PrintDefaults()\n\t}\n\tcanonical := flags.Bool(\"canonical\", false, \"Show canonical name, e.g. docker.io\/linuxkit\/foo:1234, instead of the default, e.g. linuxkit\/foo:1234\")\n\n\tpkgs, err := pkglib.NewFromCLI(flags, args...)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tfor _, p := range pkgs {\n\t\ttag := p.Tag()\n\t\tif *canonical {\n\t\t\ttag = p.FullTag()\n\t\t}\n\t\tfmt.Println(tag)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc loadSyms(t *testing.T) map[string]string {\n\tswitch runtime.GOOS {\n\tcase \"android\", \"nacl\":\n\t\tt.Skipf(\"skipping on %s\", runtime.GOOS)\n\t}\n\n\tcmd := exec.Command(\"go\", \"tool\", \"nm\", os.Args[0])\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"go tool nm %v: %v\\n%s\", os.Args[0], err, string(out))\n\t}\n\tsyms := make(map[string]string)\n\tscanner := bufio.NewScanner(bytes.NewReader(out))\n\tfor scanner.Scan() {\n\t\tf := strings.Fields(scanner.Text())\n\t\tif len(f) < 3 {\n\t\t\tcontinue\n\t\t}\n\t\tsyms[f[2]] = f[0]\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tt.Fatalf(\"error reading symbols: %v\", err)\n\t}\n\treturn syms\n}\n\nfunc runObjDump(t *testing.T, exe, startaddr, endaddr string) (path, lineno string) {\n\tswitch runtime.GOOS {\n\tcase \"android\", \"nacl\":\n\t\tt.Skipf(\"skipping on %s\", runtime.GOOS)\n\t}\n\n\tcmd := exec.Command(exe, os.Args[0], startaddr, endaddr)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"go tool objdump %v: %v\\n%s\", os.Args[0], err, string(out))\n\t}\n\tf := strings.Split(string(out), \"\\n\")\n\tif len(f) < 1 {\n\t\tt.Fatal(\"objdump output must have at least one line\")\n\t}\n\tpathAndLineNo := f[0]\n\tf = strings.Split(pathAndLineNo, \":\")\n\tif runtime.GOOS == \"windows\" {\n\t\tswitch len(f) {\n\t\tcase 2:\n\t\t\treturn f[0], f[1]\n\t\tcase 3:\n\t\t\treturn f[0] + \":\" + f[1], f[2]\n\t\tdefault:\n\t\t\tt.Fatalf(\"no line number found in %q\", pathAndLineNo)\n\t\t}\n\t}\n\tif len(f) != 2 {\n\t\tt.Fatalf(\"no line number found in %q\", pathAndLineNo)\n\t}\n\treturn f[0], f[1]\n}\n\nfunc testObjDump(t *testing.T, exe, startaddr, endaddr string, line int) {\n\tsrcPath, srcLineNo := runObjDump(t, exe, startaddr, endaddr)\n\tfi1, err := os.Stat(\"objdump_test.go\")\n\tif err != nil {\n\t\tt.Fatalf(\"Stat failed: %v\", err)\n\t}\n\tfi2, err := os.Stat(srcPath)\n\tif err != nil {\n\t\tt.Fatalf(\"Stat failed: %v\", err)\n\t}\n\tif !os.SameFile(fi1, fi2) {\n\t\tt.Fatalf(\"objdump_test.go and %s are not same file\", srcPath)\n\t}\n\tif srcLineNo != fmt.Sprint(line) {\n\t\tt.Fatalf(\"line number = %v; want %d\", srcLineNo, line)\n\t}\n}\n\nfunc TestObjDump(t *testing.T) {\n\t_, _, line, _ := runtime.Caller(0)\n\tsyms := loadSyms(t)\n\n\ttmp, exe := buildObjdump(t)\n\tdefer os.RemoveAll(tmp)\n\n\tstartaddr := syms[\"cmd\/objdump.TestObjDump\"]\n\taddr, err := strconv.ParseUint(startaddr, 16, 64)\n\tif err != nil {\n\t\tt.Fatalf(\"invalid start address %v: %v\", startaddr, err)\n\t}\n\tendaddr := fmt.Sprintf(\"%x\", addr+10)\n\ttestObjDump(t, exe, startaddr, endaddr, line-1)\n\ttestObjDump(t, exe, \"0x\"+startaddr, \"0x\"+endaddr, line-1)\n}\n\nfunc buildObjdump(t *testing.T) (tmp, exe string) {\n\tswitch runtime.GOOS {\n\tcase \"android\", \"nacl\":\n\t\tt.Skipf(\"skipping on %s\", runtime.GOOS)\n\t}\n\n\ttmp, err := ioutil.TempDir(\"\", \"TestObjDump\")\n\tif err != nil {\n\t\tt.Fatal(\"TempDir failed: \", err)\n\t}\n\n\texe = filepath.Join(tmp, \"testobjdump.exe\")\n\tout, err := exec.Command(\"go\", \"build\", \"-o\", exe, \"cmd\/objdump\").CombinedOutput()\n\tif err != nil {\n\t\tos.RemoveAll(tmp)\n\t\tt.Fatalf(\"go build -o %v cmd\/objdump: %v\\n%s\", exe, err, string(out))\n\t}\n\treturn\n}\n\nvar x86Need = []string{\n\t\"fmthello.go:6\",\n\t\"TEXT main.main(SB)\",\n\t\"JMP main.main(SB)\",\n\t\"CALL fmt.Println(SB)\",\n\t\"RET\",\n}\n\nvar armNeed = []string{\n\t\"fmthello.go:6\",\n\t\"TEXT main.main(SB)\",\n\t\"B.LS main.main(SB)\",\n\t\"BL fmt.Println(SB)\",\n\t\"RET\",\n}\n\n\/\/ objdump is fully cross platform: it can handle binaries\n\/\/ from any known operating system and architecture.\n\/\/ We could in principle add binaries to testdata and check\n\/\/ all the supported systems during this test. However, the\n\/\/ binaries would be about 1 MB each, and we don't want to\n\/\/ add that much junk to the hg repository. Instead, build a\n\/\/ binary for the current system (only) and test that objdump\n\/\/ can handle that one.\n\nfunc testDisasm(t *testing.T, flags ...string) {\n\ttmp, exe := buildObjdump(t)\n\tdefer os.RemoveAll(tmp)\n\n\thello := filepath.Join(tmp, \"hello.exe\")\n\targs := []string{\"build\", \"-o\", hello}\n\targs = append(args, flags...)\n\targs = append(args, \"testdata\/fmthello.go\")\n\tout, err := exec.Command(\"go\", args...).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"go build fmthello.go: %v\\n%s\", err, out)\n\t}\n\tneed := []string{\n\t\t\"fmthello.go:6\",\n\t\t\"TEXT main.main(SB)\",\n\t}\n\tswitch runtime.GOARCH {\n\tcase \"amd64\", \"386\":\n\t\tneed = append(need, x86Need...)\n\tcase \"arm\":\n\t\tneed = append(need, armNeed...)\n\t}\n\n\tout, err = exec.Command(exe, \"-s\", \"main.main\", hello).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"objdump fmthello.exe: %v\\n%s\", err, out)\n\t}\n\n\ttext := string(out)\n\tok := true\n\tfor _, s := range need {\n\t\tif !strings.Contains(text, s) {\n\t\t\tt.Errorf(\"disassembly missing '%s'\", s)\n\t\t\tok = false\n\t\t}\n\t}\n\tif !ok {\n\t\tt.Logf(\"full disassembly:\\n%s\", text)\n\t}\n}\n\nfunc TestDisasm(t *testing.T) {\n\ttestDisasm(t)\n}\n\nfunc TestDisasmExtld(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"plan9\", \"windows\":\n\t\tt.Skipf(\"skipping on %s\", runtime.GOOS)\n\t}\n\ttestDisasm(t, \"-ldflags=-linkmode=external\")\n}\n<commit_msg>cmd\/objdump: disable test failing on arm5<commit_after>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc loadSyms(t *testing.T) map[string]string {\n\tswitch runtime.GOOS {\n\tcase \"android\", \"nacl\":\n\t\tt.Skipf(\"skipping on %s\", runtime.GOOS)\n\t}\n\n\tcmd := exec.Command(\"go\", \"tool\", \"nm\", os.Args[0])\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"go tool nm %v: %v\\n%s\", os.Args[0], err, string(out))\n\t}\n\tsyms := make(map[string]string)\n\tscanner := bufio.NewScanner(bytes.NewReader(out))\n\tfor scanner.Scan() {\n\t\tf := strings.Fields(scanner.Text())\n\t\tif len(f) < 3 {\n\t\t\tcontinue\n\t\t}\n\t\tsyms[f[2]] = f[0]\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tt.Fatalf(\"error reading symbols: %v\", err)\n\t}\n\treturn syms\n}\n\nfunc runObjDump(t *testing.T, exe, startaddr, endaddr string) (path, lineno string) {\n\tswitch runtime.GOOS {\n\tcase \"android\", \"nacl\":\n\t\tt.Skipf(\"skipping on %s\", runtime.GOOS)\n\t}\n\n\tcmd := exec.Command(exe, os.Args[0], startaddr, endaddr)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"go tool objdump %v: %v\\n%s\", os.Args[0], err, string(out))\n\t}\n\tf := strings.Split(string(out), \"\\n\")\n\tif len(f) < 1 {\n\t\tt.Fatal(\"objdump output must have at least one line\")\n\t}\n\tpathAndLineNo := f[0]\n\tf = strings.Split(pathAndLineNo, \":\")\n\tif runtime.GOOS == \"windows\" {\n\t\tswitch len(f) {\n\t\tcase 2:\n\t\t\treturn f[0], f[1]\n\t\tcase 3:\n\t\t\treturn f[0] + \":\" + f[1], f[2]\n\t\tdefault:\n\t\t\tt.Fatalf(\"no line number found in %q\", pathAndLineNo)\n\t\t}\n\t}\n\tif len(f) != 2 {\n\t\tt.Fatalf(\"no line number found in %q\", pathAndLineNo)\n\t}\n\treturn f[0], f[1]\n}\n\nfunc testObjDump(t *testing.T, exe, startaddr, endaddr string, line int) {\n\tsrcPath, srcLineNo := runObjDump(t, exe, startaddr, endaddr)\n\tfi1, err := os.Stat(\"objdump_test.go\")\n\tif err != nil {\n\t\tt.Fatalf(\"Stat failed: %v\", err)\n\t}\n\tfi2, err := os.Stat(srcPath)\n\tif err != nil {\n\t\tt.Fatalf(\"Stat failed: %v\", err)\n\t}\n\tif !os.SameFile(fi1, fi2) {\n\t\tt.Fatalf(\"objdump_test.go and %s are not same file\", srcPath)\n\t}\n\tif srcLineNo != fmt.Sprint(line) {\n\t\tt.Fatalf(\"line number = %v; want %d\", srcLineNo, line)\n\t}\n}\n\nfunc TestObjDump(t *testing.T) {\n\t_, _, line, _ := runtime.Caller(0)\n\tsyms := loadSyms(t)\n\n\ttmp, exe := buildObjdump(t)\n\tdefer os.RemoveAll(tmp)\n\n\tstartaddr := syms[\"cmd\/objdump.TestObjDump\"]\n\taddr, err := strconv.ParseUint(startaddr, 16, 64)\n\tif err != nil {\n\t\tt.Fatalf(\"invalid start address %v: %v\", startaddr, err)\n\t}\n\tendaddr := fmt.Sprintf(\"%x\", addr+10)\n\ttestObjDump(t, exe, startaddr, endaddr, line-1)\n\ttestObjDump(t, exe, \"0x\"+startaddr, \"0x\"+endaddr, line-1)\n}\n\nfunc buildObjdump(t *testing.T) (tmp, exe string) {\n\tswitch runtime.GOOS {\n\tcase \"android\", \"nacl\":\n\t\tt.Skipf(\"skipping on %s\", runtime.GOOS)\n\t}\n\n\ttmp, err := ioutil.TempDir(\"\", \"TestObjDump\")\n\tif err != nil {\n\t\tt.Fatal(\"TempDir failed: \", err)\n\t}\n\n\texe = filepath.Join(tmp, \"testobjdump.exe\")\n\tout, err := exec.Command(\"go\", \"build\", \"-o\", exe, \"cmd\/objdump\").CombinedOutput()\n\tif err != nil {\n\t\tos.RemoveAll(tmp)\n\t\tt.Fatalf(\"go build -o %v cmd\/objdump: %v\\n%s\", exe, err, string(out))\n\t}\n\treturn\n}\n\nvar x86Need = []string{\n\t\"fmthello.go:6\",\n\t\"TEXT main.main(SB)\",\n\t\"JMP main.main(SB)\",\n\t\"CALL fmt.Println(SB)\",\n\t\"RET\",\n}\n\nvar armNeed = []string{\n\t\"fmthello.go:6\",\n\t\"TEXT main.main(SB)\",\n\t\/\/\"B.LS main.main(SB)\", \/\/ TODO(rsc): restore; golang.org\/issue\/9021\n\t\"BL fmt.Println(SB)\",\n\t\"RET\",\n}\n\n\/\/ objdump is fully cross platform: it can handle binaries\n\/\/ from any known operating system and architecture.\n\/\/ We could in principle add binaries to testdata and check\n\/\/ all the supported systems during this test. However, the\n\/\/ binaries would be about 1 MB each, and we don't want to\n\/\/ add that much junk to the hg repository. Instead, build a\n\/\/ binary for the current system (only) and test that objdump\n\/\/ can handle that one.\n\nfunc testDisasm(t *testing.T, flags ...string) {\n\ttmp, exe := buildObjdump(t)\n\tdefer os.RemoveAll(tmp)\n\n\thello := filepath.Join(tmp, \"hello.exe\")\n\targs := []string{\"build\", \"-o\", hello}\n\targs = append(args, flags...)\n\targs = append(args, \"testdata\/fmthello.go\")\n\tout, err := exec.Command(\"go\", args...).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"go build fmthello.go: %v\\n%s\", err, out)\n\t}\n\tneed := []string{\n\t\t\"fmthello.go:6\",\n\t\t\"TEXT main.main(SB)\",\n\t}\n\tswitch runtime.GOARCH {\n\tcase \"amd64\", \"386\":\n\t\tneed = append(need, x86Need...)\n\tcase \"arm\":\n\t\tneed = append(need, armNeed...)\n\t}\n\n\tout, err = exec.Command(exe, \"-s\", \"main.main\", hello).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"objdump fmthello.exe: %v\\n%s\", err, out)\n\t}\n\n\ttext := string(out)\n\tok := true\n\tfor _, s := range need {\n\t\tif !strings.Contains(text, s) {\n\t\t\tt.Errorf(\"disassembly missing '%s'\", s)\n\t\t\tok = false\n\t\t}\n\t}\n\tif !ok {\n\t\tt.Logf(\"full disassembly:\\n%s\", text)\n\t}\n}\n\nfunc TestDisasm(t *testing.T) {\n\ttestDisasm(t)\n}\n\nfunc TestDisasmExtld(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"plan9\", \"windows\":\n\t\tt.Skipf(\"skipping on %s\", runtime.GOOS)\n\t}\n\ttestDisasm(t, \"-ldflags=-linkmode=external\")\n}\n<|endoftext|>"} {"text":"<commit_before>package search\n\nimport (\n\t\"testing\"\n\n\t\"sort\"\n\n\t\"github.com\/Goita\/go-goita\/goita\"\n)\n\nfunc TestSolve(t *testing.T) {\n\tboard := goita.ParseBoard(\"11244556,12234569,11123378,11113457,s3,371,411,115,2p,3p,4p,145,252,3p,4p,124,2p\")\n\tret := Solve(board)\n\tsort.Slice(ret, func(i, j int) bool {\n\t\treturn ret[i].Move.OrderKey() < ret[j].Move.OrderKey()\n\t})\n\tif len(ret) != 4 || ret[0].Score != -40 || ret[1].Score != -40 || ret[2].Score != -50 || ret[3].Score != -40 {\n\t\tt.Errorf(\"search.Solve() = %v, want [p:-40] [81:-40] [82:-50] [83:-40]\", ret)\n\t}\n\t\/\/[[p:-40 -> 3p,443,1p,2p,3p,415,1p,2p,3p,417] [81:-40 -> 381,413,1p,2p,3p,454,1p,2p,3p,417] [82:-50 -> 382,4p,1p,2p,311,413,1p,232,3p,4p,1p,264,3p,4p,1p,218] [83:-40 -> 383,4p,1p,2p,311,413,1p,234,3p,4p,1p,261,3p,415,1p,2p,3p,447]]\n}\n\nfunc BenchmarkSolve(b *testing.B) {\n\tboard := goita.ParseBoard(\"11244556,12234569,11123378,11113457,s3,371,411,115,2p,3p,4p,145,252,3p,4p,124,2p\")\n\tfor i := 0; i < b.N; i++ {\n\t\tSolve(board)\n\t}\n\t\/\/ fmt.Println(ret)\n}\n<commit_msg>fix: import cycle<commit_after>package search\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com\/Goita\/go-goita\/goita\"\n)\n\nfunc TestSolve(t *testing.T) {\n\tboard := goita.ParseBoard(\"11244556,12234569,11123378,11113457,s3,371,411,115,2p,3p,4p,145,252,3p,4p,124,2p\")\n\tret := Solve(board)\n\tmoves := board.GetPossibleMoves()\n\tresults := make([]*EvaluatedMove, 0, len(moves))\n\tfor r := range ret {\n\t\tresults = append(results, r)\n\t\tfmt.Printf(\"move:[%v] score:[%v] %v\\n\\n\", r.Move, r.Score, r.History.History(board.Turn))\n\t}\n\tif len(results) != 4 || results[0].Score != -40 || results[1].Score != -40 || results[2].Score != -50 || results[3].Score != -40 {\n\t\tt.Errorf(\"search.Solve() = %v, want [p:-40] [81:-40] [82:-50] [83:-40]\", ret)\n\t}\n\t\/\/[[p:-40 -> 3p,443,1p,2p,3p,415,1p,2p,3p,417] [81:-40 -> 381,413,1p,2p,3p,454,1p,2p,3p,417] [82:-50 -> 382,4p,1p,2p,311,413,1p,232,3p,4p,1p,264,3p,4p,1p,218] [83:-40 -> 383,4p,1p,2p,311,413,1p,234,3p,4p,1p,261,3p,415,1p,2p,3p,447]]\n}\n\nfunc BenchmarkSolve(b *testing.B) {\n\tboard := goita.ParseBoard(\"11244556,12234569,11123378,11113457,s3,371,411,115,2p,3p,4p,145,252,3p,4p,124,2p\")\n\tfor i := 0; i < b.N; i++ {\n\t\tSolve(board)\n\t}\n\t\/\/ fmt.Println(ret)\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/suite\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\n\t\"warcluster\/entities\/db\"\n)\n\nconst (\n\tuser = \"{\\\"Command\\\": \\\"login\\\", \\\"Username\\\": \\\"JohnDoe\\\", \\\"TwitterId\\\": \\\"some twitter ID\\\"}\"\n\tsetupParams = \"{\\\"Command\\\": \\\"setup_parameters\\\", \\\"Fraction\\\": 0, \\\"SunTextureId\\\": 0}\"\n)\n\ntype ClientTestSuite struct {\n suite.Suite\n conn redis.Conn\n session *testSession\n}\n\nfunc (suite *ClientTestSuite) SetupTest() {\n suite.conn = db.Pool.Get()\n\tsuite.conn.Do(\"FLUSHDB\")\n\tsuite.session = new(testSession)\n}\n\nfunc (suite *ClientTestSuite) TearDownTest() {\n\tsuite.conn.Close()\n}\n\nfunc (suite *ClientTestSuite) TestRegisterNewUser() {\n\tsuite.session.Send([]byte(user))\n\tsuite.session.Send([]byte(setupParams))\n\n\tplayers_before, err := redis.Strings(suite.conn.Do(\"KEYS\", \"player.*\"))\n\tbefore := len(players_before)\n\n\t_, err = authenticate(suite.session)\n\n\tassert.Nil(suite.T(), err)\n\n\tplayers_after, err := redis.Strings(suite.conn.Do(\"KEYS\", \"player.*\"))\n\tafter := len(players_after)\n\n\tassert.Nil(suite.T(), err)\n\n\tassert.Equal(suite.T(), before + 1, after)\n}\n\nfunc (suite *ClientTestSuite) TestAuthenticateExcistingUser() {\n\tsuite.session.Send([]byte(user))\n\tsuite.session.Send([]byte(setupParams))\n\tsuite.session.Send([]byte(user))\n\n\tplayers_before, err := redis.Strings(suite.conn.Do(\"KEYS\", \"player.*\"))\n\tbefore := len(players_before)\n\n\tauthenticate(suite.session)\n\tauthenticate(suite.session)\n\n\tplayers_after, err := redis.Strings(suite.conn.Do(\"KEYS\", \"player.*\"))\n\tafter := len(players_after)\n\n\tassert.Nil(suite.T(), err)\n\n\tassert.Equal(suite.T(), before + 1, after)\n}\n\nfunc (suite *ClientTestSuite) TestAuthenticateUserWithIncompleteData() {\n\tsuite.session.Send([]byte(\"{\\\"Command\\\": \\\"login\\\", \\\"TwitterId\\\": \\\"some twitter ID\\\"}\"))\n\n\tplayers_before, err := redis.Strings(suite.conn.Do(\"KEYS\", \"player.*\"))\n\tbefore := len(players_before)\n\n\tauthenticate(suite.session)\n\n\tplayers_after, err := redis.Strings(suite.conn.Do(\"KEYS\", \"player.*\"))\n\tafter := len(players_after)\n\n\tassert.Nil(suite.T(), err)\n\n\tassert.Equal(suite.T(), before, after)\n}\n\nfunc (suite *ClientTestSuite) TestAuthenticateUserWithNilData() {\n\tsuite.session.Send(nil)\n\t_, err := authenticate(suite.session)\n\n\tassert.NotNil(suite.T(), err)\n}\n\nfunc (suite *ClientTestSuite) TestAuthenticateUserWithInvalidJSONData() {\n\tsuite.session.Send([]byte(\"panda\"))\n\t_, err := authenticate(suite.session)\n\n\tassert.NotNil(suite.T(), err)\n}\n\nfunc (suite *ClientTestSuite) TestAuthenticateUserWithNilSetupData() {\n\tsuite.session.Send([]byte(user))\n\tsuite.session.Send(nil)\n\t_, err := authenticate(suite.session)\n\n\tassert.NotNil(suite.T(), err)\n}\n\nfunc TestClientTestSuite (t *testing.T) {\n suite.Run(t, new(ClientTestSuite))\n}\n<commit_msg>Gofmt<commit_after>package server\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/suite\"\n\n\t\"warcluster\/entities\/db\"\n)\n\nconst (\n\tuser = \"{\\\"Command\\\": \\\"login\\\", \\\"Username\\\": \\\"JohnDoe\\\", \\\"TwitterId\\\": \\\"some twitter ID\\\"}\"\n\tsetupParams = \"{\\\"Command\\\": \\\"setup_parameters\\\", \\\"Fraction\\\": 0, \\\"SunTextureId\\\": 0}\"\n)\n\ntype ClientTestSuite struct {\n\tsuite.Suite\n\tconn redis.Conn\n\tsession *testSession\n}\n\nfunc (suite *ClientTestSuite) SetupTest() {\n\tsuite.conn = db.Pool.Get()\n\tsuite.conn.Do(\"FLUSHDB\")\n\tsuite.session = new(testSession)\n}\n\nfunc (suite *ClientTestSuite) TearDownTest() {\n\tsuite.conn.Close()\n}\n\nfunc (suite *ClientTestSuite) TestRegisterNewUser() {\n\tsuite.session.Send([]byte(user))\n\tsuite.session.Send([]byte(setupParams))\n\n\tplayers_before, err := redis.Strings(suite.conn.Do(\"KEYS\", \"player.*\"))\n\tbefore := len(players_before)\n\n\t_, err = authenticate(suite.session)\n\n\tassert.Nil(suite.T(), err)\n\n\tplayers_after, err := redis.Strings(suite.conn.Do(\"KEYS\", \"player.*\"))\n\tafter := len(players_after)\n\n\tassert.Nil(suite.T(), err)\n\n\tassert.Equal(suite.T(), before+1, after)\n}\n\nfunc (suite *ClientTestSuite) TestAuthenticateExcistingUser() {\n\tsuite.session.Send([]byte(user))\n\tsuite.session.Send([]byte(setupParams))\n\tsuite.session.Send([]byte(user))\n\n\tplayers_before, err := redis.Strings(suite.conn.Do(\"KEYS\", \"player.*\"))\n\tbefore := len(players_before)\n\n\tauthenticate(suite.session)\n\tauthenticate(suite.session)\n\n\tplayers_after, err := redis.Strings(suite.conn.Do(\"KEYS\", \"player.*\"))\n\tafter := len(players_after)\n\n\tassert.Nil(suite.T(), err)\n\n\tassert.Equal(suite.T(), before+1, after)\n}\n\nfunc (suite *ClientTestSuite) TestAuthenticateUserWithIncompleteData() {\n\tsuite.session.Send([]byte(\"{\\\"Command\\\": \\\"login\\\", \\\"TwitterId\\\": \\\"some twitter ID\\\"}\"))\n\n\tplayers_before, err := redis.Strings(suite.conn.Do(\"KEYS\", \"player.*\"))\n\tbefore := len(players_before)\n\n\tauthenticate(suite.session)\n\n\tplayers_after, err := redis.Strings(suite.conn.Do(\"KEYS\", \"player.*\"))\n\tafter := len(players_after)\n\n\tassert.Nil(suite.T(), err)\n\n\tassert.Equal(suite.T(), before, after)\n}\n\nfunc (suite *ClientTestSuite) TestAuthenticateUserWithNilData() {\n\tsuite.session.Send(nil)\n\t_, err := authenticate(suite.session)\n\n\tassert.NotNil(suite.T(), err)\n}\n\nfunc (suite *ClientTestSuite) TestAuthenticateUserWithInvalidJSONData() {\n\tsuite.session.Send([]byte(\"panda\"))\n\t_, err := authenticate(suite.session)\n\n\tassert.NotNil(suite.T(), err)\n}\n\nfunc (suite *ClientTestSuite) TestAuthenticateUserWithNilSetupData() {\n\tsuite.session.Send([]byte(user))\n\tsuite.session.Send(nil)\n\t_, err := authenticate(suite.session)\n\n\tassert.NotNil(suite.T(), err)\n}\n\nfunc TestClientTestSuite(t *testing.T) {\n\tsuite.Run(t, new(ClientTestSuite))\n}\n<|endoftext|>"} {"text":"<commit_before>package installer\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\n\t\"github.com\/tsuru\/tsuru-client\/tsuru\/installer\/dm\"\n)\n\nvar defaultCompose = `\nversion: \"3\"\n\nservices:\n redis:\n image: redis:latest\n networks:\n - tsuru\n volumes:\n - redis-data:\/data\/db\n \n mongo:\n image: mongo:latest\n networks:\n - tsuru\n volumes:\n - mongo-data:\/data\n\n planb:\n image: tsuru\/planb:latest\n command: --listen :8080 --read-redis-host redis --write-redis-host redis\n ports:\n - 80:8080\n networks:\n - tsuru\n depends_on:\n - redis\n\n registry:\n image: registry:2\n environment:\n - \"REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=\/var\/lib\/registry\"\n - \"REGISTRY_HTTP_TLS_CERTIFICATE=\/certs\/{{CLUSTER_ADDR}}:5000\/registry-cert.pem\"\n - \"REGISTRY_HTTP_TLS_KEY=\/certs\/{{CLUSTER_ADDR}}:5000\/registry-key.pem\"\n volumes:\n - \"\/etc\/docker\/certs.d:\/certs:ro\"\n - registry-data:\/var\/lib\/registry\n ports:\n - 5000:5000\n networks:\n - tsuru\n\n tsuru:\n image: tsuru\/api:v1\n volumes:\n - \"\/etc\/docker\/certs.d:\/certs:ro\"\n ports:\n - 8080:8080\n networks:\n - tsuru\n depends_on:\n - redis\n - mongo\n - registry\n - planb\n environment:\n - MONGODB_ADDR=mongo\n - MONGODB_PORT=27017\n - REDIS_ADDR=redis\n - REDIS_PORT=6379\n - HIPACHE_DOMAIN={{CLUSTER_ADDR}}.nip.io\n - REGISTRY_ADDR={{CLUSTER_PRIVATE_ADDR}}\n - REGISTRY_PORT=5000\n - TSURU_ADDR=http:\/\/{{CLUSTER_ADDR}}\n - TSURU_PORT=8080\n - IAAS_CONF={{IAAS_CONF}}\n\nnetworks:\n tsuru:\n driver: overlay\n ipam:\n driver: default\n config:\n - subnet: 10.0.9.0\/24\n\nvolumes:\n mongo-data:\n redis-data:\n registry-data:\n`\n\nfunc resolveConfig(baseConfig string, customConfigs map[string]string) (string, error) {\n\tif baseConfig == \"\" {\n\t\tbaseConfig = defaultCompose\n\t} else {\n\t\tb, err := ioutil.ReadFile(baseConfig)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tbaseConfig = string(b)\n\t}\n\tfor k, v := range customConfigs {\n\t\tif v != \"\" {\n\t\t\tbaseConfig = strings.Replace(baseConfig, fmt.Sprintf(\"{{%s}}\", k), v, -1)\n\t\t}\n\t}\n\treturn baseConfig, nil\n}\n\nfunc composeDeploy(c ServiceCluster, installConfig *InstallOpts) error {\n\tcomponentsConfig := installConfig.ComponentsConfig\n\tmanager := c.GetManager()\n\tcomponentsConfig.IaaSConfig.Dockermachine.InsecureRegistry = fmt.Sprintf(\"%s:5000\", dm.GetPrivateIP(manager))\n\tiaasConfig, err := json.Marshal(componentsConfig.IaaSConfig)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal iaas config: %s\", err)\n\t}\n\tconfigs := map[string]string{\n\t\t\"CLUSTER_ADDR\": manager.Base.Address,\n\t\t\"CLUSTER_PRIVATE_ADDR\": dm.GetPrivateIP(manager),\n\t\t\"IAAS_CONF\": string(iaasConfig),\n\t}\n\tconfig, err := resolveConfig(installConfig.ComposeFile, configs)\n\tif err != nil {\n\t\treturn err\n\t}\n\tremoteWriteCmdFmt := \"printf '%%s' '%s' | sudo tee %s\"\n\t_, err = manager.Host.RunSSHCommand(fmt.Sprintf(remoteWriteCmdFmt, config, \"\/tmp\/compose.yml\"))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to write remote file: %s\", err)\n\t}\n\tfmt.Printf(\"Deploying compose file in cluster manager....\\n\")\n\toutput, err := manager.Host.RunSSHCommand(\"sudo docker deploy -c \/tmp\/compose.yml tsuru\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(output)\n\treturn nil\n}\n<commit_msg>installer: use private IP for registry certificate<commit_after>package installer\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"strings\"\n\n\t\"github.com\/tsuru\/tsuru-client\/tsuru\/installer\/dm\"\n)\n\nvar defaultCompose = `\nversion: \"3\"\n\nservices:\n redis:\n image: redis:latest\n networks:\n - tsuru\n volumes:\n - redis-data:\/data\/db\n \n mongo:\n image: mongo:latest\n networks:\n - tsuru\n volumes:\n - mongo-data:\/data\n\n planb:\n image: tsuru\/planb:latest\n command: --listen :8080 --read-redis-host redis --write-redis-host redis\n ports:\n - 80:8080\n networks:\n - tsuru\n depends_on:\n - redis\n\n registry:\n image: registry:2\n environment:\n - \"REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=\/var\/lib\/registry\"\n - \"REGISTRY_HTTP_TLS_CERTIFICATE=\/certs\/{{CLUSTER_PRIVATE_ADDR}}:5000\/registry-cert.pem\"\n - \"REGISTRY_HTTP_TLS_KEY=\/certs\/{{CLUSTER_PRIVATE_ADDR}}:5000\/registry-key.pem\"\n volumes:\n - \"\/etc\/docker\/certs.d:\/certs:ro\"\n - registry-data:\/var\/lib\/registry\n ports:\n - 5000:5000\n networks:\n - tsuru\n\n tsuru:\n image: tsuru\/api:v1\n volumes:\n - \"\/etc\/docker\/certs.d:\/certs:ro\"\n ports:\n - 8080:8080\n networks:\n - tsuru\n depends_on:\n - redis\n - mongo\n - registry\n - planb\n environment:\n - MONGODB_ADDR=mongo\n - MONGODB_PORT=27017\n - REDIS_ADDR=redis\n - REDIS_PORT=6379\n - HIPACHE_DOMAIN={{CLUSTER_ADDR}}.nip.io\n - REGISTRY_ADDR={{CLUSTER_PRIVATE_ADDR}}\n - REGISTRY_PORT=5000\n - TSURU_ADDR=http:\/\/{{CLUSTER_ADDR}}\n - TSURU_PORT=8080\n - IAAS_CONF={{IAAS_CONF}}\n\nnetworks:\n tsuru:\n driver: overlay\n ipam:\n driver: default\n config:\n - subnet: 10.0.9.0\/24\n\nvolumes:\n mongo-data:\n redis-data:\n registry-data:\n`\n\nfunc resolveConfig(baseConfig string, customConfigs map[string]string) (string, error) {\n\tif baseConfig == \"\" {\n\t\tbaseConfig = defaultCompose\n\t} else {\n\t\tb, err := ioutil.ReadFile(baseConfig)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tbaseConfig = string(b)\n\t}\n\tfor k, v := range customConfigs {\n\t\tif v != \"\" {\n\t\t\tbaseConfig = strings.Replace(baseConfig, fmt.Sprintf(\"{{%s}}\", k), v, -1)\n\t\t}\n\t}\n\treturn baseConfig, nil\n}\n\nfunc composeDeploy(c ServiceCluster, installConfig *InstallOpts) error {\n\tcomponentsConfig := installConfig.ComponentsConfig\n\tmanager := c.GetManager()\n\tcomponentsConfig.IaaSConfig.Dockermachine.InsecureRegistry = fmt.Sprintf(\"%s:5000\", dm.GetPrivateIP(manager))\n\tiaasConfig, err := json.Marshal(componentsConfig.IaaSConfig)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to marshal iaas config: %s\", err)\n\t}\n\tconfigs := map[string]string{\n\t\t\"CLUSTER_ADDR\": manager.Base.Address,\n\t\t\"CLUSTER_PRIVATE_ADDR\": dm.GetPrivateIP(manager),\n\t\t\"IAAS_CONF\": string(iaasConfig),\n\t}\n\tconfig, err := resolveConfig(installConfig.ComposeFile, configs)\n\tif err != nil {\n\t\treturn err\n\t}\n\tremoteWriteCmdFmt := \"printf '%%s' '%s' | sudo tee %s\"\n\t_, err = manager.Host.RunSSHCommand(fmt.Sprintf(remoteWriteCmdFmt, config, \"\/tmp\/compose.yml\"))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to write remote file: %s\", err)\n\t}\n\tfmt.Printf(\"Deploying compose file in cluster manager....\\n\")\n\toutput, err := manager.Host.RunSSHCommand(\"sudo docker deploy -c \/tmp\/compose.yml tsuru\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Printf(output)\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package llvm_module\n\nimport (\n\t\"github.com\/grainlang\/grain\/ast\"\n\t\"llvm.org\/llvm\/bindings\/go\/llvm\"\n\t\"strconv\"\n)\n\nfunc CreateLlvmModuleFromFunction(function ast.Function, allFunctions []ast.Function) llvm.Module {\n\tcontext := llvm.GlobalContext()\n\tbuilder := context.NewBuilder()\n\tmodule := context.NewModule(function.Id + \" \" + function.Name)\n\tllvmFunction := createFunctionDeclarationInModule(function, module)\n\tbodyBlock := llvm.AddBasicBlock(llvmFunction, \"body\")\n\tbuilder.SetInsertPoint(bodyBlock, bodyBlock.FirstInstruction())\n\treturnValueToLlvmValue := make(map[string]llvm.Value)\n\tfunctionToLlvmDeclaration := make(map[string]llvm.Value)\n\treturnBindings := make([]llvm.Value, 0)\n\tfor _, body := range function.Body {\n\t\tswitch typedBody := body.(type) {\n\t\tcase ast.NativeFunctionCall:\n\t\t\tnativeFunctionParamTypes := make([]llvm.Type, len(typedBody.Parameters))\n\t\t\tfor i := range typedBody.Parameters {\n\t\t\t\tnativeFunctionParamTypes[i] = llvm.Int32Type()\n\t\t\t}\n\t\t\tvar nativeFunctionReturnType llvm.Type\n\t\t\tif typedBody.ReturnType == ast.NativeValueVoid {\n\t\t\t\tnativeFunctionReturnType = llvm.VoidType()\n\t\t\t} else if typedBody.ReturnType == ast.NativeValueInt {\n\t\t\t\tnativeFunctionReturnType = llvm.Int32Type()\n\t\t\t} else {\n\t\t\t\tpanic(\"Unknown type\")\n\t\t\t}\n\t\t\tnativeFunctionType := llvm.FunctionType(nativeFunctionReturnType, nativeFunctionParamTypes, false)\n\t\t\tnativeFunction := llvm.AddFunction(module, typedBody.Name, nativeFunctionType)\n\t\t\tnativeFunctionParamValues := make([]llvm.Value, len(typedBody.Parameters))\n\t\t\tfor i, nativeFunctionParam := range typedBody.Parameters {\n\t\t\t\tfor index, param := range function.Parameters {\n\t\t\t\t\tif nativeFunctionParam.Id == param.Id {\n\t\t\t\t\t\tnativeFunctionParamValues[i] = llvmFunction.Param(index)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tnativeFunctionReturnValue := builder.CreateCall(nativeFunction, nativeFunctionParamValues, \"ret\")\n\t\t\treturnValueToLlvmValue[typedBody.Id + \" \" + typedBody.ReturnId] = nativeFunctionReturnValue\n\t\tcase ast.BinaryOperationCall:\n\t\t\tvar opcode llvm.Opcode\n\t\t\tif typedBody.Name == \"+\" {\n\t\t\t\topcode = llvm.Add\n\t\t\t} else if typedBody.Name == \"-\" {\n\t\t\t\topcode = llvm.Sub\n\t\t\t} else if typedBody.Name == \"*\" {\n\t\t\t\topcode = llvm.Mul\n\t\t\t} else if typedBody.Name == \"\/\" {\n\t\t\t\topcode = llvm.SDiv\n\t\t\t} else if typedBody.Name == \"%\" {\n\t\t\t\topcode = llvm.SRem\n\t\t\t} else {\n\t\t\t\tpanic(\"Unknown operator: \" + typedBody.Name)\n\t\t\t}\n\t\t\tvar leftParam, rightParam llvm.Value\n\t\t\tfor index, param := range function.Parameters {\n\t\t\t\tif typedBody.LeftParameter.Id == param.Id {\n\t\t\t\t\tleftParam = llvmFunction.Param(index)\n\t\t\t\t} else if typedBody.RightParameter.Id == param.Id {\n\t\t\t\t\trightParam = llvmFunction.Param(index)\n\t\t\t\t}\n\t\t\t}\n\t\t\tbinaryOperationReturnValue := builder.CreateBinOp(opcode, leftParam, rightParam, \"ret\")\n\t\t\treturnValueToLlvmValue[typedBody.Id + \" \" + typedBody.ReturnId] = binaryOperationReturnValue\n\t\tcase ast.FunctionUse:\n\t\t\tconsumingFunction := FindUsedFunction(typedBody, allFunctions)\n\t\t\tvar consumingLlvmFunction llvm.Value\n\t\t\tif foundDeclaration, ok := functionToLlvmDeclaration[consumingFunction.Id]; ok {\n\t\t\t\tconsumingLlvmFunction = foundDeclaration\n\t\t\t} else {\n\t\t\t\tconsumingLlvmFunction = createFunctionDeclarationInModule(consumingFunction, module)\n\t\t\t\tfunctionToLlvmDeclaration[consumingFunction.Id] = consumingLlvmFunction\n\t\t\t}\n\t\t\tllvmParams := make([]llvm.Value, len(typedBody.Bindings))\n\t\t\tfor i, binding := range typedBody.Bindings {\n\t\t\t\tllvmParams[i] = returnValueToLlvmValue[binding.FromId + \" \" + binding.FromReturnValue]\n\t\t\t}\n\t\t\tconsumingFunctionReturnValue := builder.CreateCall(consumingLlvmFunction, llvmParams, \"ret\")\n\t\t\tfor i, returnValue := range consumingFunction.ReturnValues {\n\t\t\t\telement := builder.CreateExtractValue(consumingFunctionReturnValue, i, \"elem\" + strconv.Itoa(i))\n\t\t\t\treturnValueToLlvmValue[typedBody.Id + \" \" + returnValue.Id] = element\n\t\t\t}\n\t\tcase ast.Binding:\n\t\t\treturnBindings = append(returnBindings, returnValueToLlvmValue[typedBody.FromId + \" \" + typedBody.FromReturnValue])\n\t\t}\n\t}\n\tbuilder.CreateAggregateRet(returnBindings)\n\treturn module\n}\n\nfunc createFunctionDeclarationInModule(function ast.Function, module llvm.Module) llvm.Value {\n\tparamTypes := make([]llvm.Type, len(function.Parameters))\n\tfor i, param := range function.Parameters {\n\t\tif param.ValueType == ast.Integer {\n\t\t\tparamTypes[i] = llvm.Int64Type()\n\t\t} else {\n\t\t\tparamTypes[i] = llvm.Int32Type()\n\t\t}\n\t}\n\treturnTypes := make([]llvm.Type, len(function.ReturnValues))\n\tfor i, returnValue := range function.ReturnValues {\n\t\tif returnValue.ValueType == ast.Integer {\n\t\t\treturnTypes[i] = llvm.Int32Type()\n\t\t} else {\n\t\t\treturnTypes[i] = llvm.Int32Type()\n\t\t}\n\t}\n\treturnType := llvm.StructType(returnTypes, false)\n\tllvmFunctionType := llvm.FunctionType(returnType, paramTypes, false)\n\tllvmFunction := llvm.AddFunction(module, \"$\" + function.Id, llvmFunctionType)\n\treturn llvmFunction\n}\n\nfunc FindUsedFunction(bodyPart ast.FunctionUse, allFunctions []ast.Function) ast.Function {\n\tfor _, fn := range allFunctions {\n\t\tif bodyPart.FunctionId == fn.Id {\n\t\t\treturn fn\n\t\t}\n\t}\n\tpanic(\"No such function \" + bodyPart.FunctionId)\n}<commit_msg>Use return value names in LLVM IR.<commit_after>package llvm_module\n\nimport (\n\t\"github.com\/grainlang\/grain\/ast\"\n\t\"llvm.org\/llvm\/bindings\/go\/llvm\"\n\t\"strconv\"\n)\n\nfunc CreateLlvmModuleFromFunction(function ast.Function, allFunctions []ast.Function) llvm.Module {\n\tcontext := llvm.GlobalContext()\n\tbuilder := context.NewBuilder()\n\tmodule := context.NewModule(function.Id + \" \" + function.Name)\n\tllvmFunction := createFunctionDeclarationInModule(function, module)\n\tbodyBlock := llvm.AddBasicBlock(llvmFunction, \"body\")\n\tbuilder.SetInsertPoint(bodyBlock, bodyBlock.FirstInstruction())\n\treturnValueToLlvmValue := make(map[string]llvm.Value)\n\tfunctionToLlvmDeclaration := make(map[string]llvm.Value)\n\treturnBindings := make([]llvm.Value, 0)\n\tfor _, body := range function.Body {\n\t\tswitch typedBody := body.(type) {\n\t\tcase ast.NativeFunctionCall:\n\t\t\tnativeFunctionParamTypes := make([]llvm.Type, len(typedBody.Parameters))\n\t\t\tfor i := range typedBody.Parameters {\n\t\t\t\tnativeFunctionParamTypes[i] = llvm.Int32Type()\n\t\t\t}\n\t\t\tvar nativeFunctionReturnType llvm.Type\n\t\t\tif typedBody.ReturnType == ast.NativeValueVoid {\n\t\t\t\tnativeFunctionReturnType = llvm.VoidType()\n\t\t\t} else if typedBody.ReturnType == ast.NativeValueInt {\n\t\t\t\tnativeFunctionReturnType = llvm.Int32Type()\n\t\t\t} else {\n\t\t\t\tpanic(\"Unknown type\")\n\t\t\t}\n\t\t\tnativeFunctionType := llvm.FunctionType(nativeFunctionReturnType, nativeFunctionParamTypes, false)\n\t\t\tnativeFunction := llvm.AddFunction(module, typedBody.Name, nativeFunctionType)\n\t\t\tnativeFunctionParamValues := make([]llvm.Value, len(typedBody.Parameters))\n\t\t\tfor i, nativeFunctionParam := range typedBody.Parameters {\n\t\t\t\tfor index, param := range function.Parameters {\n\t\t\t\t\tif nativeFunctionParam.Id == param.Id {\n\t\t\t\t\t\tnativeFunctionParamValues[i] = llvmFunction.Param(index)\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tnativeFunctionReturnValue := builder.CreateCall(nativeFunction, nativeFunctionParamValues, \"ret\")\n\t\t\treturnValueToLlvmValue[typedBody.Id + \" \" + typedBody.ReturnId] = nativeFunctionReturnValue\n\t\tcase ast.BinaryOperationCall:\n\t\t\tvar opcode llvm.Opcode\n\t\t\tif typedBody.Name == \"+\" {\n\t\t\t\topcode = llvm.Add\n\t\t\t} else if typedBody.Name == \"-\" {\n\t\t\t\topcode = llvm.Sub\n\t\t\t} else if typedBody.Name == \"*\" {\n\t\t\t\topcode = llvm.Mul\n\t\t\t} else if typedBody.Name == \"\/\" {\n\t\t\t\topcode = llvm.SDiv\n\t\t\t} else if typedBody.Name == \"%\" {\n\t\t\t\topcode = llvm.SRem\n\t\t\t} else {\n\t\t\t\tpanic(\"Unknown operator: \" + typedBody.Name)\n\t\t\t}\n\t\t\tvar leftParam, rightParam llvm.Value\n\t\t\tfor index, param := range function.Parameters {\n\t\t\t\tif typedBody.LeftParameter.Id == param.Id {\n\t\t\t\t\tleftParam = llvmFunction.Param(index)\n\t\t\t\t} else if typedBody.RightParameter.Id == param.Id {\n\t\t\t\t\trightParam = llvmFunction.Param(index)\n\t\t\t\t}\n\t\t\t}\n\t\t\tbinaryOperationReturnValue := builder.CreateBinOp(opcode, leftParam, rightParam, \"ret\")\n\t\t\treturnValueToLlvmValue[typedBody.Id + \" \" + typedBody.ReturnId] = binaryOperationReturnValue\n\t\tcase ast.FunctionUse:\n\t\t\tconsumingFunction := FindUsedFunction(typedBody, allFunctions)\n\t\t\tvar consumingLlvmFunction llvm.Value\n\t\t\tif foundDeclaration, ok := functionToLlvmDeclaration[consumingFunction.Id]; ok {\n\t\t\t\tconsumingLlvmFunction = foundDeclaration\n\t\t\t} else {\n\t\t\t\tconsumingLlvmFunction = createFunctionDeclarationInModule(consumingFunction, module)\n\t\t\t\tfunctionToLlvmDeclaration[consumingFunction.Id] = consumingLlvmFunction\n\t\t\t}\n\t\t\tllvmParams := make([]llvm.Value, len(typedBody.Bindings))\n\t\t\tfor i, binding := range typedBody.Bindings {\n\t\t\t\tllvmParams[i] = returnValueToLlvmValue[binding.FromId + \" \" + binding.FromReturnValue]\n\t\t\t}\n\t\t\tconsumingFunctionReturnValue := builder.CreateCall(consumingLlvmFunction, llvmParams, \"ret\")\n\t\t\tfor i, returnValue := range consumingFunction.ReturnValues {\n\t\t\t\tvar name string\n\t\t\t\tif returnValue.Name != \"\" {\n\t\t\t\t\tname = returnValue.Name\n\t\t\t\t} else {\n\t\t\t\t\tname = \"elem\" + strconv.Itoa(i)\n\t\t\t\t}\n\t\t\t\telement := builder.CreateExtractValue(consumingFunctionReturnValue, i, name)\n\t\t\t\treturnValueToLlvmValue[typedBody.Id + \" \" + returnValue.Id] = element\n\t\t\t}\n\t\tcase ast.Binding:\n\t\t\treturnBindings = append(returnBindings, returnValueToLlvmValue[typedBody.FromId + \" \" + typedBody.FromReturnValue])\n\t\t}\n\t}\n\tbuilder.CreateAggregateRet(returnBindings)\n\treturn module\n}\n\nfunc createFunctionDeclarationInModule(function ast.Function, module llvm.Module) llvm.Value {\n\tparamTypes := make([]llvm.Type, len(function.Parameters))\n\tfor i, param := range function.Parameters {\n\t\tif param.ValueType == ast.Integer {\n\t\t\tparamTypes[i] = llvm.Int64Type()\n\t\t} else {\n\t\t\tparamTypes[i] = llvm.Int32Type()\n\t\t}\n\t}\n\treturnTypes := make([]llvm.Type, len(function.ReturnValues))\n\tfor i, returnValue := range function.ReturnValues {\n\t\tif returnValue.ValueType == ast.Integer {\n\t\t\treturnTypes[i] = llvm.Int32Type()\n\t\t} else {\n\t\t\treturnTypes[i] = llvm.Int32Type()\n\t\t}\n\t}\n\treturnType := llvm.StructType(returnTypes, false)\n\tllvmFunctionType := llvm.FunctionType(returnType, paramTypes, false)\n\tllvmFunction := llvm.AddFunction(module, \"$\" + function.Id, llvmFunctionType)\n\treturn llvmFunction\n}\n\nfunc FindUsedFunction(bodyPart ast.FunctionUse, allFunctions []ast.Function) ast.Function {\n\tfor _, fn := range allFunctions {\n\t\tif bodyPart.FunctionId == fn.Id {\n\t\t\treturn fn\n\t\t}\n\t}\n\tpanic(\"No such function \" + bodyPart.FunctionId)\n}<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"net\"\n\t\"sync\"\n)\n\ntype NodeManager struct {\n\tparams *NodeManagerParams\n\tlogger Logger\n\twg sync.WaitGroup\n\tmu sync.Mutex\n\ttransportManager *TransportManager\n\trooms map[string]struct{}\n}\n\ntype NodeManagerParams struct {\n\tLoggerFactory LoggerFactory\n\tRoomManager *ChannelRoomManager\n\tTracksManager TracksManager\n\tListenAddr *net.UDPAddr\n\tNodes []*net.UDPAddr\n}\n\nfunc NewNodeManager(params NodeManagerParams) (*NodeManager, error) {\n\tconn, err := net.ListenUDP(\"udp\", params.ListenAddr)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttransportManager := NewTransportManager(TransportManagerParams{\n\t\tConn: conn,\n\t\tLoggerFactory: params.LoggerFactory,\n\t})\n\n\tnm := &NodeManager{\n\t\tparams: ¶ms,\n\t\ttransportManager: transportManager,\n\t\tlogger: params.LoggerFactory.GetLogger(\"nodemanager\"),\n\t\trooms: map[string]struct{}{},\n\t}\n\n\tfor _, addr := range params.Nodes {\n\t\tfactory, err := transportManager.GetTransportFactory(addr)\n\t\tif err != nil {\n\t\t\tnm.logger.Println(\"Error creating transport factory for remote addr: %s\", addr)\n\t\t}\n\n\t\tnm.handleServerTransportFactory(factory)\n\t}\n\n\tgo nm.startTransportEventLoop()\n\tgo nm.startRoomEventLoop()\n\n\treturn nm, nil\n}\n\nfunc (nm *NodeManager) startTransportEventLoop() {\n\tfor {\n\t\tfactory, err := nm.transportManager.AcceptTransportFactory()\n\t\tif err != nil {\n\t\t\tnm.logger.Printf(\"Error accepting transport factory: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tnm.handleServerTransportFactory(factory)\n\t}\n}\n\nfunc (nm *NodeManager) handleServerTransportFactory(factory *ServerTransportFactory) {\n\tnm.wg.Add(1)\n\tgo func() {\n\t\tdefer nm.wg.Done()\n\n\t\tdoneChan := make(chan struct{})\n\t\tcloseChannelOnce := sync.Once{}\n\n\t\tdone := func() {\n\t\t\tcloseChannelOnce.Do(func() {\n\t\t\t\tclose(doneChan)\n\t\t\t})\n\t\t}\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-doneChan:\n\t\t\t\tnm.logger.Printf(\"Aborting server transport factory goroutine\")\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\t\t\ttransportPromise := factory.AcceptTransport()\n\t\t\tnm.handleTransportPromise(transportPromise)\n\n\t\t\tnm.wg.Add(1)\n\t\t\tgo func(p *TransportPromise) {\n\t\t\t\tdefer nm.wg.Done()\n\n\t\t\t\t_, err := p.Wait()\n\t\t\t\tif err != nil {\n\t\t\t\t\tnm.logger.Printf(\"Error while waiting for TransportPromise: %s\", err)\n\t\t\t\t\tdone()\n\t\t\t\t}\n\t\t\t}(transportPromise)\n\t\t}\n\t}()\n}\n\nfunc (nm *NodeManager) handleTransportPromise(transportPromise *TransportPromise) {\n\tnm.wg.Add(1)\n\n\tgo func() {\n\t\tdefer nm.wg.Done()\n\n\t\tstreamTransport, err := transportPromise.Wait()\n\n\t\tif err != nil {\n\t\t\tnm.logger.Printf(\"Error waiting for transport promise: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tnm.mu.Lock()\n\t\tdefer nm.mu.Unlock()\n\n\t\tnm.params.TracksManager.Add(transportPromise.StreamID(), streamTransport)\n\t}()\n}\n\nfunc (nm *NodeManager) startRoomEventLoop() {\n\tfor {\n\t\troomEvent, err := nm.params.RoomManager.AcceptEvent()\n\t\tif err != nil {\n\t\t\tnm.logger.Printf(\"Error accepting room event: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tswitch roomEvent.Type {\n\t\tcase RoomEventTypeAdd:\n\t\t\tfor _, factory := range nm.transportManager.Factories() {\n\t\t\t\ttransportPromise := factory.NewTransport(roomEvent.RoomName)\n\t\t\t\tnm.handleTransportPromise(transportPromise)\n\t\t\t}\n\t\tcase RoomEventTypeRemove:\n\t\t\tfor _, factory := range nm.transportManager.Factories() {\n\t\t\t\tfactory.CloseTransport(roomEvent.RoomName)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (nm *NodeManager) Close() error {\n\tnm.params.RoomManager.Close()\n\tnm.transportManager.Close()\n\n\tnm.wg.Wait()\n\n\treturn nil\n}\n<commit_msg>Log listen UDP conn addr<commit_after>package server\n\nimport (\n\t\"net\"\n\t\"sync\"\n)\n\ntype NodeManager struct {\n\tparams *NodeManagerParams\n\tlogger Logger\n\twg sync.WaitGroup\n\tmu sync.Mutex\n\ttransportManager *TransportManager\n\trooms map[string]struct{}\n}\n\ntype NodeManagerParams struct {\n\tLoggerFactory LoggerFactory\n\tRoomManager *ChannelRoomManager\n\tTracksManager TracksManager\n\tListenAddr *net.UDPAddr\n\tNodes []*net.UDPAddr\n}\n\nfunc NewNodeManager(params NodeManagerParams) (*NodeManager, error) {\n\tlogger := params.LoggerFactory.GetLogger(\"nodemanager\")\n\n\tconn, err := net.ListenUDP(\"udp\", params.ListenAddr)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlogger.Printf(\"Listening on UDP port: %s\", conn.LocalAddr().String())\n\n\ttransportManager := NewTransportManager(TransportManagerParams{\n\t\tConn: conn,\n\t\tLoggerFactory: params.LoggerFactory,\n\t})\n\n\tnm := &NodeManager{\n\t\tparams: ¶ms,\n\t\ttransportManager: transportManager,\n\t\tlogger: logger,\n\t\trooms: map[string]struct{}{},\n\t}\n\n\tfor _, addr := range params.Nodes {\n\t\tlogger.Printf(\"Configuring remote node: %s\", addr.String())\n\n\t\tfactory, err := transportManager.GetTransportFactory(addr)\n\t\tif err != nil {\n\t\t\tnm.logger.Println(\"Error creating transport factory for remote addr: %s\", addr)\n\t\t}\n\n\t\tnm.handleServerTransportFactory(factory)\n\t}\n\n\tgo nm.startTransportEventLoop()\n\tgo nm.startRoomEventLoop()\n\n\treturn nm, nil\n}\n\nfunc (nm *NodeManager) startTransportEventLoop() {\n\tfor {\n\t\tfactory, err := nm.transportManager.AcceptTransportFactory()\n\t\tif err != nil {\n\t\t\tnm.logger.Printf(\"Error accepting transport factory: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tnm.handleServerTransportFactory(factory)\n\t}\n}\n\nfunc (nm *NodeManager) handleServerTransportFactory(factory *ServerTransportFactory) {\n\tnm.wg.Add(1)\n\tgo func() {\n\t\tdefer nm.wg.Done()\n\n\t\tdoneChan := make(chan struct{})\n\t\tcloseChannelOnce := sync.Once{}\n\n\t\tdone := func() {\n\t\t\tcloseChannelOnce.Do(func() {\n\t\t\t\tclose(doneChan)\n\t\t\t})\n\t\t}\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-doneChan:\n\t\t\t\tnm.logger.Printf(\"Aborting server transport factory goroutine\")\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\t\t\ttransportPromise := factory.AcceptTransport()\n\t\t\tnm.handleTransportPromise(transportPromise)\n\n\t\t\tnm.wg.Add(1)\n\t\t\tgo func(p *TransportPromise) {\n\t\t\t\tdefer nm.wg.Done()\n\n\t\t\t\t_, err := p.Wait()\n\t\t\t\tif err != nil {\n\t\t\t\t\tnm.logger.Printf(\"Error while waiting for TransportPromise: %s\", err)\n\t\t\t\t\tdone()\n\t\t\t\t}\n\t\t\t}(transportPromise)\n\t\t}\n\t}()\n}\n\nfunc (nm *NodeManager) handleTransportPromise(transportPromise *TransportPromise) {\n\tnm.wg.Add(1)\n\n\tgo func() {\n\t\tdefer nm.wg.Done()\n\n\t\tstreamTransport, err := transportPromise.Wait()\n\n\t\tif err != nil {\n\t\t\tnm.logger.Printf(\"Error waiting for transport promise: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tnm.mu.Lock()\n\t\tdefer nm.mu.Unlock()\n\n\t\tnm.params.TracksManager.Add(transportPromise.StreamID(), streamTransport)\n\t}()\n}\n\nfunc (nm *NodeManager) startRoomEventLoop() {\n\tfor {\n\t\troomEvent, err := nm.params.RoomManager.AcceptEvent()\n\t\tif err != nil {\n\t\t\tnm.logger.Printf(\"Error accepting room event: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tswitch roomEvent.Type {\n\t\tcase RoomEventTypeAdd:\n\t\t\tfor _, factory := range nm.transportManager.Factories() {\n\t\t\t\ttransportPromise := factory.NewTransport(roomEvent.RoomName)\n\t\t\t\tnm.handleTransportPromise(transportPromise)\n\t\t\t}\n\t\tcase RoomEventTypeRemove:\n\t\t\tfor _, factory := range nm.transportManager.Factories() {\n\t\t\t\tfactory.CloseTransport(roomEvent.RoomName)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (nm *NodeManager) Close() error {\n\tnm.params.RoomManager.Close()\n\tnm.transportManager.Close()\n\n\tnm.wg.Wait()\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package shark\n\nimport(\n\ttp \"tritium\/proto\"\n\t\"libxml\/xpath\"\n\t\"rubex\"\n\tproto \"goprotobuf.googlecode.com\/hg\/proto\"\n)\n\ntype Shark struct {\n\tRegexpCache map[string]*rubex.Regexp\n\tXPathCache map[string]*xpath.Expression\n}\n\ntype Ctx struct {\n\tFunctions []*Function\n\tTypes []string\n\tExports [][]string\n\tLogs []string\n\tEnv map[string]string\n\t*Shark\n\t*tp.Transform\n}\n\ntype Function struct {\n\tName string\n\t*tp.Function\n}\n\ntype Scope struct {\n\tValue interface{}\n}\n\n\n\nfunc NewEngine() (*Shark) {\n\te := &Shark{\n\t\tRegexpCache: make(map[string]*rubex.Regexp),\n\t\tXPathCache: make(map[string]*xpath.Expression),\n\t}\n\treturn e\n}\n\nfunc (ctx *Ctx) UsePackage(pkg *tp.Package) {\n\tctx.Types = make([]string, len(pkg.Types))\n\tfor i, t := range(pkg.Types) {\n\t\tctx.Types[i] = proto.GetString(t.Name)\n\t}\n\t\n\tctx.Functions = make([]*Function, len(pkg.Functions))\n\tfor i, f := range(pkg.Functions) {\n\t\tfun := &Function{\n\t\t\tName: proto.GetString(f.Name),\n\t\t\tFunction: f,\n\t\t}\n\t\tctx.Functions[i] = fun\n\t}\n}\n\nfunc (eng *Shark) Run(transform *tp.Transform, input string, vars map[string]string) (data string, exports [][]string, logs []string) {\n\tctx := &Ctx{\n\t\tShark: eng,\n\t\tExports: make([][]string, 0),\n\t\tLogs: make([]string, 0),\n\t\tEnv: make(map[string]string),\n\t\tTransform: transform,\n\t}\n\tctx.UsePackage(transform.Pkg)\n\tscope := &Scope{Value:input}\n\tctx.runInstruction(scope, transform.Objects[0].Root)\n\tdata = scope.Value.(string)\n\treturn\n}\n\nfunc (ctx *Ctx) runInstruction(scope *Scope, ins *tp.Instruction) (returnValue interface{}) {\n\treturnValue = \"\"\n\tswitch *ins.Type {\n\tcase tp.Instruction_BLOCK:\n\t\tfor _, child := range(ins.Children) {\n\t\t\treturnValue = ctx.runInstruction(scope, child)\n\t\t}\n\tcase tp.Instruction_TEXT:\n\t\treturnValue = proto.GetString(ins.Value)\n\tcase tp.Instruction_FUNCTION_CALL:\n\t\tfun := ctx.Functions[int(proto.GetInt32(ins.FunctionId))]\n\t\targs := make([]interface{}, len(ins.Arguments))\n\t\tfor i, argIns := range(ins.Arguments) {\n\t\t\targs[i] = ctx.runInstruction(scope, argIns)\n\t\t}\n\t\tif proto.GetBool(fun.BuiltIn) {\n\t\t\tswitch fun.Name {\n\t\t\tcase \"concat\":\n\t\t\t\treturnValue = args[0].(string) + args[1].(string)\n\t\t\tcase \"var\":\n\t\t\t\tts := &Scope{Value: ctx.Env[args[0].(string)]}\n\t\t\t\tctx.runChildren(ts, ins)\n\t\t\t\treturnValue = ts.Value\n\t\t\t\tctx.Env[args[0].(string)] = returnValue.(string)\n\t\t\tcase \"set\":\n\t\t\t\tscope.Value = args[0]\n\t\t\tcase \"log\":\n\t\t\t\tctx.Logs = append(ctx.Logs, args[0].(string))\n\t\t\tdefault:\n\t\t\t\tprintln(\"Must implement\", fun.Name)\n\t\t\t}\n\t\t\t\n\t\t} else {\n\t\t\tprintln(\"Not Built in!\")\n\t\t}\n\t}\n\treturn\n}\n\nfunc (ctx *Ctx) runChildren(scope *Scope, ins *tp.Instruction) (returnValue interface{}) {\n\tfor _, child := range(ins.Children) {\n\t\treturnValue = ctx.runInstruction(scope, child)\n\t}\n\treturn\n}\n<commit_msg>implementation of export<commit_after>package shark\n\nimport(\n\ttp \"tritium\/proto\"\n\t\"libxml\/xpath\"\n\t\"rubex\"\n\tproto \"goprotobuf.googlecode.com\/hg\/proto\"\n)\n\ntype Shark struct {\n\tRegexpCache map[string]*rubex.Regexp\n\tXPathCache map[string]*xpath.Expression\n}\n\ntype Ctx struct {\n\tFunctions []*Function\n\tTypes []string\n\tExports [][]string\n\tLogs []string\n\tEnv map[string]string\n\tLocalVars map[string]interface{}\n\t*Shark\n\t*tp.Transform\n}\n\ntype Function struct {\n\tName string\n\t*tp.Function\n}\n\ntype Scope struct {\n\tValue interface{}\n}\n\nfunc NewEngine() (*Shark) {\n\te := &Shark{\n\t\tRegexpCache: make(map[string]*rubex.Regexp),\n\t\tXPathCache: make(map[string]*xpath.Expression),\n\t}\n\treturn e\n}\n\nfunc (ctx *Ctx) UsePackage(pkg *tp.Package) {\n\tctx.Types = make([]string, len(pkg.Types))\n\tfor i, t := range(pkg.Types) {\n\t\tctx.Types[i] = proto.GetString(t.Name)\n\t}\n\t\n\tctx.Functions = make([]*Function, len(pkg.Functions))\n\tfor i, f := range(pkg.Functions) {\n\t\tname := proto.GetString(f.Name)\n\t\tfor _, a := range(f.Args) {\n\t\t\ttypeString := ctx.Types[int(proto.GetInt32(a.TypeId))]\n\t\t\tname = name + \".\" + typeString\n\t\t}\n\t\tfun := &Function{\n\t\t\tName: name,\n\t\t\tFunction: f,\n\t\t}\n\t\tctx.Functions[i] = fun\n\t}\n}\n\nfunc (eng *Shark) Run(transform *tp.Transform, input string, vars map[string]string) (data string, exports [][]string, logs []string) {\n\tctx := &Ctx{\n\t\tShark: eng,\n\t\tExports: make([][]string, 0),\n\t\tLogs: make([]string, 0),\n\t\tEnv: make(map[string]string),\n\t\tTransform: transform,\n\t}\n\tctx.UsePackage(transform.Pkg)\n\tscope := &Scope{Value:input}\n\tctx.runInstruction(scope, transform.Objects[0].Root)\n\tdata = scope.Value.(string)\n\treturn\n}\n\nfunc (ctx *Ctx) runInstruction(scope *Scope, ins *tp.Instruction) (returnValue interface{}) {\n\treturnValue = \"\"\n\tswitch *ins.Type {\n\tcase tp.Instruction_BLOCK:\n\t\tfor _, child := range(ins.Children) {\n\t\t\treturnValue = ctx.runInstruction(scope, child)\n\t\t}\n\tcase tp.Instruction_TEXT:\n\t\treturnValue = proto.GetString(ins.Value)\n\tcase tp.Instruction_LOCAL_VAR:\n\t\t\n\tcase tp.Instruction_FUNCTION_CALL:\n\t\tfun := ctx.Functions[int(proto.GetInt32(ins.FunctionId))]\n\t\targs := make([]interface{}, len(ins.Arguments))\n\t\tfor i, argIns := range(ins.Arguments) {\n\t\t\targs[i] = ctx.runInstruction(scope, argIns)\n\t\t}\n\t\tif proto.GetBool(fun.BuiltIn) {\n\t\t\tswitch fun.Name {\n\t\t\tcase \"concat.Text.Text\":\n\t\t\t\treturnValue = args[0].(string) + args[1].(string)\n\t\t\tcase \"var.Text\":\n\t\t\t\tts := &Scope{Value: ctx.Env[args[0].(string)]}\n\t\t\t\tctx.runChildren(ts, ins)\n\t\t\t\treturnValue = ts.Value\n\t\t\t\tctx.Env[args[0].(string)] = returnValue.(string)\n\t\t\tcase \"export.Text\":\n\t\t\t\tval := make([]string, 2)\n\t\t\t\tval[0] = args[0].(string)\n\t\t\t\tts := &Scope{Value:\"\"}\n\t\t\t\tctx.runChildren(ts, ins)\n\t\t\t\tval[1] = ts.Value.(string)\n\t\t\t\tctx.Exports = append(ctx.Exports, val)\n\t\t\tcase \"set.Text\":\n\t\t\t\tscope.Value = args[0]\n\t\t\tcase \"log.Text\":\n\t\t\t\tctx.Logs = append(ctx.Logs, args[0].(string))\n\t\t\tdefault:\n\t\t\t\tprintln(\"Must implement\", fun.Name)\n\t\t\t}\n\t\t\t\n\t\t} else {\n\t\t\tprintln(\"Not Built in!\")\n\t\t}\n\t}\n\treturn\n}\n\nfunc (ctx *Ctx) runChildren(scope *Scope, ins *tp.Instruction) (returnValue interface{}) {\n\tfor _, child := range(ins.Children) {\n\t\treturnValue = ctx.runInstruction(scope, child)\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package sha holds Security Health Analytics finding entities and functions\npackage sha\n\n\/\/ Copyright 2019 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ \thttps:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\nimport (\n\t\"encoding\/json\"\n\n\t\"cloud.google.com\/go\/pubsub\"\n\t\"github.com\/googlecloudplatform\/threat-automation\/entities\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ Finding common attributes, source properties and security marks\n\/\/ to all Security Health Analytics Security Command Center findings\ntype Finding struct {\n\tNotificationConfigName string\n\tFinding struct {\n\t\tName string\n\t\tParent string\n\t\tResourceName string\n\t\tState string\n\t\tCategory string\n\t\tExternalURI string\n\t\tSourceProperties struct {\n\t\t\tReactivationCount float32\n\t\t\tExceptionInstructions string\n\t\t\tSeverityLevel string\n\t\t\tRecommendation string\n\t\t\tProjectID string\n\t\t\tDeactivationReason string\n\t\t\tAssetCreationTime string\n\t\t\tScannerName string\n\t\t\tScanRunID string\n\t\t\tExplanation string\n\t\t}\n\t\tSecurityMarks struct {\n\t\t\tName string\n\t\t\tMarks map[string]string\n\t\t}\n\t\tEventTime string\n\t\tCreateTime string\n\t}\n}\n\n\/\/ NewFinding returns a new ShaFinding.\nfunc NewFinding(m *pubsub.Message) (*Finding, error) {\n\tf := Finding{}\n\n\tif err := json.Unmarshal(m.Data, &f); err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to unmarshal\")\n\t}\n\tif f.Finding.ResourceName == \"\" {\n\t\treturn nil, errors.Wrap(entities.ErrValueNotFound, \"does not have a resource name\")\n\t}\n\n\tif f.Finding.Category == \"\" {\n\t\treturn nil, errors.Wrap(entities.ErrValueNotFound, \"does not have a category\")\n\t}\n\n\treturn &f, nil\n}\n\n\/\/ ResourceName returns the finding ResourceName\nfunc (f *Finding) ResourceName() string {\n\treturn f.Finding.ResourceName\n}\n\n\/\/ Category returns the finding Category\nfunc (f *Finding) Category() string {\n\treturn f.Finding.Category\n}\n\n\/\/ ScannerName returns the Security Health Analytics finding ScannerName\nfunc (f *Finding) ScannerName() string {\n\treturn f.Finding.SourceProperties.ScannerName\n}\n\n\/\/ ProjectID returns the Security Health Analytics finding ProjectID\nfunc (f *Finding) ProjectID() string {\n\treturn f.Finding.SourceProperties.ProjectID\n}\n<commit_msg>Add validation func<commit_after>\/\/ Package sha holds Security Health Analytics finding entities and functions\npackage sha\n\n\/\/ Copyright 2019 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ \thttps:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\nimport (\n\t\"encoding\/json\"\n\n\t\"cloud.google.com\/go\/pubsub\"\n\t\"github.com\/googlecloudplatform\/threat-automation\/entities\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ Finding common attributes, source properties and security marks\n\/\/ to all Security Health Analytics Security Command Center findings\ntype Finding struct {\n\tNotificationConfigName string\n\tFinding struct {\n\t\tName string\n\t\tParent string\n\t\tResourceName string\n\t\tState string\n\t\tCategory string\n\t\tExternalURI string\n\t\tSourceProperties struct {\n\t\t\tReactivationCount float32\n\t\t\tExceptionInstructions string\n\t\t\tSeverityLevel string\n\t\t\tRecommendation string\n\t\t\tProjectID string\n\t\t\tDeactivationReason string\n\t\t\tAssetCreationTime string\n\t\t\tScannerName string\n\t\t\tScanRunID string\n\t\t\tExplanation string\n\t\t}\n\t\tSecurityMarks struct {\n\t\t\tName string\n\t\t\tMarks map[string]string\n\t\t}\n\t\tEventTime string\n\t\tCreateTime string\n\t}\n}\n\n\/\/ NewFinding returns a new ShaFinding.\nfunc NewFinding(m *pubsub.Message) (*Finding, error) {\n\tf := Finding{}\n\n\tif err := json.Unmarshal(m.Data, &f); err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to unmarshal\")\n\t}\n\n\tif err := f.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &f, nil\n}\n\nfunc (f *Finding) validate() error {\n\n\tif f.Finding.ResourceName == \"\" {\n\t\treturn errors.Wrap(entities.ErrValueNotFound, \"does not have a resource name\")\n\t}\n\n\tif f.Finding.Category == \"\" {\n\t\treturn errors.Wrap(entities.ErrValueNotFound, \"does not have a category\")\n\t}\n\n\treturn nil\n\n}\n\n\/\/ ResourceName returns the finding ResourceName\nfunc (f *Finding) ResourceName() string {\n\treturn f.Finding.ResourceName\n}\n\n\/\/ Category returns the finding Category\nfunc (f *Finding) Category() string {\n\treturn f.Finding.Category\n}\n\n\/\/ ScannerName returns the Security Health Analytics finding ScannerName\nfunc (f *Finding) ScannerName() string {\n\treturn f.Finding.SourceProperties.ScannerName\n}\n\n\/\/ ProjectID returns the Security Health Analytics finding ProjectID\nfunc (f *Finding) ProjectID() string {\n\treturn f.Finding.SourceProperties.ProjectID\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage docker\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"code.google.com\/p\/go.crypto\/ssh\/terminal\"\n\t\"github.com\/tsuru\/tsuru\/cmd\"\n\t\"github.com\/tsuru\/tsuru\/cmd\/tsuru-base\"\n\t\"github.com\/tsuru\/tsuru\/errors\"\n\ttsuruIo \"github.com\/tsuru\/tsuru\/io\"\n\t\"launchpad.net\/gnuflag\"\n)\n\nvar httpHeaderRegexp = regexp.MustCompile(`HTTP\/.*? (\\d+)`)\n\ntype moveContainersCmd struct{}\n\ntype progressFormatter struct{}\n\nfunc (progressFormatter) Format(out io.Writer, data []byte) error {\n\tvar logEntry progressLog\n\terr := json.Unmarshal(data, &logEntry)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintf(out, \"%s\\n\", logEntry.Message)\n\treturn nil\n}\n\nfunc (c *moveContainersCmd) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"containers-move\",\n\t\tUsage: \"containers-move <from host> <to host>\",\n\t\tDesc: \"Move all containers from one host to another.\\nThis command is especially useful for host maintenance.\",\n\t\tMinArgs: 2,\n\t}\n}\n\nfunc (c *moveContainersCmd) Run(context *cmd.Context, client *cmd.Client) error {\n\turl, err := cmd.GetURL(\"\/docker\/containers\/move\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tparams := map[string]string{\n\t\t\"from\": context.Args[0],\n\t\t\"to\": context.Args[1],\n\t}\n\tb, err := json.Marshal(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbuffer := bytes.NewBuffer(b)\n\trequest, err := http.NewRequest(\"POST\", url, buffer)\n\tif err != nil {\n\t\treturn err\n\t}\n\trequest.Header.Set(\"Content-Type\", \"application\/json\")\n\tresponse, err := client.Do(request)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\tw := tsuruIo.NewStreamWriter(context.Stdout, progressFormatter{})\n\tfor n := int64(1); n > 0 && err == nil; n, err = io.Copy(w, response.Body) {\n\t}\n\treturn nil\n}\n\ntype fixContainersCmd struct{}\n\nfunc (fixContainersCmd) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"fix-containers\",\n\t\tUsage: \"fix-containers\",\n\t\tDesc: \"Fix containers that are broken in the cluster.\",\n\t}\n}\n\nfunc (fixContainersCmd) Run(context *cmd.Context, client *cmd.Client) error {\n\turl, err := cmd.GetURL(\"\/docker\/fix-containers\")\n\tif err != nil {\n\t\treturn err\n\t}\n\trequest, err := http.NewRequest(\"POST\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = client.Do(request)\n\treturn err\n}\n\ntype moveContainerCmd struct{}\n\nfunc (c *moveContainerCmd) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"container-move\",\n\t\tUsage: \"container-move <container id> <to host>\",\n\t\tDesc: \"Move specified container to another host.\",\n\t\tMinArgs: 2,\n\t}\n}\n\nfunc (c *moveContainerCmd) Run(context *cmd.Context, client *cmd.Client) error {\n\turl, err := cmd.GetURL(fmt.Sprintf(\"\/docker\/container\/%s\/move\", context.Args[0]))\n\tif err != nil {\n\t\treturn err\n\t}\n\tparams := map[string]string{\n\t\t\"to\": context.Args[1],\n\t}\n\tb, err := json.Marshal(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbuffer := bytes.NewBuffer(b)\n\trequest, err := http.NewRequest(\"POST\", url, buffer)\n\tif err != nil {\n\t\treturn err\n\t}\n\trequest.Header.Set(\"Content-Type\", \"application\/json\")\n\tresponse, err := client.Do(request)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\tw := tsuruIo.NewStreamWriter(context.Stdout, progressFormatter{})\n\tfor n := int64(1); n > 0 && err == nil; n, err = io.Copy(w, response.Body) {\n\t}\n\treturn nil\n}\n\ntype rebalanceContainersCmd struct {\n\ttsuru.ConfirmationCommand\n\tfs *gnuflag.FlagSet\n\tdry bool\n}\n\nfunc (c *rebalanceContainersCmd) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"containers-rebalance\",\n\t\tUsage: \"containers-rebalance [--dry] [-y\/--assume-yes]\",\n\t\tDesc: \"Move containers creating a more even distribution between docker nodes.\",\n\t\tMinArgs: 0,\n\t}\n}\n\nfunc (c *rebalanceContainersCmd) Run(context *cmd.Context, client *cmd.Client) error {\n\tif !c.dry && !c.Confirm(context, \"Are you sure you want to rebalance containers?\") {\n\t\treturn nil\n\t}\n\turl, err := cmd.GetURL(\"\/docker\/containers\/rebalance\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tparams := map[string]string{\n\t\t\"dry\": fmt.Sprintf(\"%t\", c.dry),\n\t}\n\tb, err := json.Marshal(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbuffer := bytes.NewBuffer(b)\n\trequest, err := http.NewRequest(\"POST\", url, buffer)\n\tif err != nil {\n\t\treturn err\n\t}\n\trequest.Header.Set(\"Content-Type\", \"application\/json\")\n\tresponse, err := client.Do(request)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\tw := tsuruIo.NewStreamWriter(context.Stdout, progressFormatter{})\n\tfor n := int64(1); n > 0 && err == nil; n, err = io.Copy(w, response.Body) {\n\t}\n\treturn nil\n}\n\nfunc (c *rebalanceContainersCmd) Flags() *gnuflag.FlagSet {\n\tif c.fs == nil {\n\t\tc.fs = c.ConfirmationCommand.Flags()\n\t\tc.fs.BoolVar(&c.dry, \"dry\", false, \"Dry run, only shows what would be done\")\n\t}\n\treturn c.fs\n}\n\ntype sshToContainerCmd struct{}\n\nfunc (sshToContainerCmd) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"ssh\",\n\t\tUsage: \"ssh <container-id>\",\n\t\tDesc: \"Open a SSH shell to the given container.\",\n\t\tMinArgs: 1,\n\t}\n}\n\nfunc (sshToContainerCmd) Run(context *cmd.Context, _ *cmd.Client) error {\n\tserverURL, err := cmd.GetURL(\"\/docker\/ssh\/\" + context.Args[0])\n\tif err != nil {\n\t\treturn err\n\t}\n\trequest, err := http.NewRequest(\"GET\", serverURL, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\trequest.Close = true\n\ttoken, err := cmd.ReadToken()\n\tif err == nil {\n\t\trequest.Header.Set(\"Authorization\", \"bearer \"+token)\n\t}\n\tparsedURL, _ := url.Parse(serverURL)\n\tconn, err := net.Dial(\"tcp\", parsedURL.Host)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\trequest.Write(conn)\n\tif stdin, ok := context.Stdin.(*os.File); ok {\n\t\tfd := int(stdin.Fd())\n\t\tif terminal.IsTerminal(fd) {\n\t\t\toldState, err := terminal.MakeRaw(fd)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer terminal.Restore(fd, oldState)\n\t\t\tsigChan := make(chan os.Signal, 2)\n\t\t\tgo func(c <-chan os.Signal) {\n\t\t\t\tif _, ok := <-c; ok {\n\t\t\t\t\tterminal.Restore(fd, oldState)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t}(sigChan)\n\t\t\tsignal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL, syscall.SIGQUIT)\n\t\t}\n\t}\n\tbytesLimit := 50\n\tvar readStr string\n\tbyteBuffer := make([]byte, 1)\n\tfor i := 0; i < bytesLimit && byteBuffer[0] != '\\n'; i++ {\n\t\t_, err := conn.Read(byteBuffer)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\treadStr += string(byteBuffer)\n\t}\n\tmatches := httpHeaderRegexp.FindAllStringSubmatch(readStr, -1)\n\tif len(matches) > 0 && len(matches[0]) > 1 {\n\t\tcode, _ := strconv.Atoi(matches[0][1])\n\t\treturn &errors.HTTP{\n\t\t\tCode: code,\n\t\t\tMessage: strings.TrimSpace(readStr),\n\t\t}\n\t} else {\n\t\tcontext.Stdout.Write([]byte(readStr))\n\t}\n\terrs := make(chan error, 2)\n\tquit := make(chan bool)\n\tgo io.Copy(conn, context.Stdin)\n\tgo func() {\n\t\tdefer close(quit)\n\t\t_, err := io.Copy(context.Stdout, conn)\n\t\tif err != nil && err != io.EOF {\n\t\t\terrs <- err\n\t\t}\n\t}()\n\t<-quit\n\tclose(errs)\n\treturn <-errs\n}\n<commit_msg>provision\/docker: ask for size of the terminal and send to the server<commit_after>\/\/ Copyright 2014 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage docker\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"code.google.com\/p\/go.crypto\/ssh\/terminal\"\n\t\"github.com\/tsuru\/tsuru\/cmd\"\n\t\"github.com\/tsuru\/tsuru\/cmd\/tsuru-base\"\n\t\"github.com\/tsuru\/tsuru\/errors\"\n\ttsuruIo \"github.com\/tsuru\/tsuru\/io\"\n\t\"launchpad.net\/gnuflag\"\n)\n\nvar httpHeaderRegexp = regexp.MustCompile(`HTTP\/.*? (\\d+)`)\n\ntype moveContainersCmd struct{}\n\ntype progressFormatter struct{}\n\nfunc (progressFormatter) Format(out io.Writer, data []byte) error {\n\tvar logEntry progressLog\n\terr := json.Unmarshal(data, &logEntry)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Fprintf(out, \"%s\\n\", logEntry.Message)\n\treturn nil\n}\n\nfunc (c *moveContainersCmd) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"containers-move\",\n\t\tUsage: \"containers-move <from host> <to host>\",\n\t\tDesc: \"Move all containers from one host to another.\\nThis command is especially useful for host maintenance.\",\n\t\tMinArgs: 2,\n\t}\n}\n\nfunc (c *moveContainersCmd) Run(context *cmd.Context, client *cmd.Client) error {\n\turl, err := cmd.GetURL(\"\/docker\/containers\/move\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tparams := map[string]string{\n\t\t\"from\": context.Args[0],\n\t\t\"to\": context.Args[1],\n\t}\n\tb, err := json.Marshal(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbuffer := bytes.NewBuffer(b)\n\trequest, err := http.NewRequest(\"POST\", url, buffer)\n\tif err != nil {\n\t\treturn err\n\t}\n\trequest.Header.Set(\"Content-Type\", \"application\/json\")\n\tresponse, err := client.Do(request)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\tw := tsuruIo.NewStreamWriter(context.Stdout, progressFormatter{})\n\tfor n := int64(1); n > 0 && err == nil; n, err = io.Copy(w, response.Body) {\n\t}\n\treturn nil\n}\n\ntype fixContainersCmd struct{}\n\nfunc (fixContainersCmd) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"fix-containers\",\n\t\tUsage: \"fix-containers\",\n\t\tDesc: \"Fix containers that are broken in the cluster.\",\n\t}\n}\n\nfunc (fixContainersCmd) Run(context *cmd.Context, client *cmd.Client) error {\n\turl, err := cmd.GetURL(\"\/docker\/fix-containers\")\n\tif err != nil {\n\t\treturn err\n\t}\n\trequest, err := http.NewRequest(\"POST\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = client.Do(request)\n\treturn err\n}\n\ntype moveContainerCmd struct{}\n\nfunc (c *moveContainerCmd) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"container-move\",\n\t\tUsage: \"container-move <container id> <to host>\",\n\t\tDesc: \"Move specified container to another host.\",\n\t\tMinArgs: 2,\n\t}\n}\n\nfunc (c *moveContainerCmd) Run(context *cmd.Context, client *cmd.Client) error {\n\turl, err := cmd.GetURL(fmt.Sprintf(\"\/docker\/container\/%s\/move\", context.Args[0]))\n\tif err != nil {\n\t\treturn err\n\t}\n\tparams := map[string]string{\n\t\t\"to\": context.Args[1],\n\t}\n\tb, err := json.Marshal(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbuffer := bytes.NewBuffer(b)\n\trequest, err := http.NewRequest(\"POST\", url, buffer)\n\tif err != nil {\n\t\treturn err\n\t}\n\trequest.Header.Set(\"Content-Type\", \"application\/json\")\n\tresponse, err := client.Do(request)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\tw := tsuruIo.NewStreamWriter(context.Stdout, progressFormatter{})\n\tfor n := int64(1); n > 0 && err == nil; n, err = io.Copy(w, response.Body) {\n\t}\n\treturn nil\n}\n\ntype rebalanceContainersCmd struct {\n\ttsuru.ConfirmationCommand\n\tfs *gnuflag.FlagSet\n\tdry bool\n}\n\nfunc (c *rebalanceContainersCmd) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"containers-rebalance\",\n\t\tUsage: \"containers-rebalance [--dry] [-y\/--assume-yes]\",\n\t\tDesc: \"Move containers creating a more even distribution between docker nodes.\",\n\t\tMinArgs: 0,\n\t}\n}\n\nfunc (c *rebalanceContainersCmd) Run(context *cmd.Context, client *cmd.Client) error {\n\tif !c.dry && !c.Confirm(context, \"Are you sure you want to rebalance containers?\") {\n\t\treturn nil\n\t}\n\turl, err := cmd.GetURL(\"\/docker\/containers\/rebalance\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tparams := map[string]string{\n\t\t\"dry\": fmt.Sprintf(\"%t\", c.dry),\n\t}\n\tb, err := json.Marshal(params)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbuffer := bytes.NewBuffer(b)\n\trequest, err := http.NewRequest(\"POST\", url, buffer)\n\tif err != nil {\n\t\treturn err\n\t}\n\trequest.Header.Set(\"Content-Type\", \"application\/json\")\n\tresponse, err := client.Do(request)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\tw := tsuruIo.NewStreamWriter(context.Stdout, progressFormatter{})\n\tfor n := int64(1); n > 0 && err == nil; n, err = io.Copy(w, response.Body) {\n\t}\n\treturn nil\n}\n\nfunc (c *rebalanceContainersCmd) Flags() *gnuflag.FlagSet {\n\tif c.fs == nil {\n\t\tc.fs = c.ConfirmationCommand.Flags()\n\t\tc.fs.BoolVar(&c.dry, \"dry\", false, \"Dry run, only shows what would be done\")\n\t}\n\treturn c.fs\n}\n\ntype sshToContainerCmd struct{}\n\nfunc (sshToContainerCmd) Info() *cmd.Info {\n\treturn &cmd.Info{\n\t\tName: \"ssh\",\n\t\tUsage: \"ssh <container-id>\",\n\t\tDesc: \"Open a SSH shell to the given container.\",\n\t\tMinArgs: 1,\n\t}\n}\n\nfunc (sshToContainerCmd) Run(context *cmd.Context, _ *cmd.Client) error {\n\tvar width, height int\n\tif stdin, ok := context.Stdin.(*os.File); ok {\n\t\tfd := int(stdin.Fd())\n\t\tif terminal.IsTerminal(fd) {\n\t\t\twidth, height, _ = terminal.GetSize(fd)\n\t\t\toldState, err := terminal.MakeRaw(fd)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer terminal.Restore(fd, oldState)\n\t\t\tsigChan := make(chan os.Signal, 2)\n\t\t\tgo func(c <-chan os.Signal) {\n\t\t\t\tif _, ok := <-c; ok {\n\t\t\t\t\tterminal.Restore(fd, oldState)\n\t\t\t\t\tos.Exit(1)\n\t\t\t\t}\n\t\t\t}(sigChan)\n\t\t\tsignal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL, syscall.SIGQUIT)\n\t\t}\n\t}\n\tqueryString := make(url.Values)\n\tqueryString.Set(\"width\", strconv.Itoa(width))\n\tqueryString.Set(\"height\", strconv.Itoa(height))\n\tserverURL, err := cmd.GetURL(\"\/docker\/ssh\/\" + context.Args[0] + \"?\" + queryString.Encode())\n\tif err != nil {\n\t\treturn err\n\t}\n\trequest, err := http.NewRequest(\"GET\", serverURL, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\trequest.Close = true\n\ttoken, err := cmd.ReadToken()\n\tif err == nil {\n\t\trequest.Header.Set(\"Authorization\", \"bearer \"+token)\n\t}\n\tparsedURL, _ := url.Parse(serverURL)\n\tconn, err := net.Dial(\"tcp\", parsedURL.Host)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\trequest.Write(conn)\n\tbytesLimit := 50\n\tvar readStr string\n\tbyteBuffer := make([]byte, 1)\n\tfor i := 0; i < bytesLimit && byteBuffer[0] != '\\n'; i++ {\n\t\t_, err := conn.Read(byteBuffer)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\treadStr += string(byteBuffer)\n\t}\n\tmatches := httpHeaderRegexp.FindAllStringSubmatch(readStr, -1)\n\tif len(matches) > 0 && len(matches[0]) > 1 {\n\t\tcode, _ := strconv.Atoi(matches[0][1])\n\t\treturn &errors.HTTP{\n\t\t\tCode: code,\n\t\t\tMessage: strings.TrimSpace(readStr),\n\t\t}\n\t} else {\n\t\tcontext.Stdout.Write([]byte(readStr))\n\t}\n\terrs := make(chan error, 2)\n\tquit := make(chan bool)\n\tgo io.Copy(conn, context.Stdin)\n\tgo func() {\n\t\tdefer close(quit)\n\t\t_, err := io.Copy(context.Stdout, conn)\n\t\tif err != nil && err != io.EOF {\n\t\t\terrs <- err\n\t\t}\n\t}()\n\t<-quit\n\tclose(errs)\n\treturn <-errs\n}\n<|endoftext|>"} {"text":"<commit_before>package server_test\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/sha256\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"github.com\/WhiteHatCP\/seclab-listener\/server\"\n\t\"testing\"\n\t\"time\"\n)\n\ntype nullBackend struct{}\n\nfunc (b *nullBackend) Open() error {\n\treturn nil\n}\nfunc (b *nullBackend) Close() error {\n\treturn nil\n}\n\ntype errorBackend struct{}\n\nfunc (b *errorBackend) Open() error {\n\treturn errors.New(\"open error\")\n}\nfunc (b *errorBackend) Close() error {\n\treturn errors.New(\"close error\")\n}\n\nfunc getTestInstance() server.Server {\n\treturn server.New([]byte(\"dismykey\"), 10, &nullBackend{})\n}\n\nfunc TestBadSignature(t *testing.T) {\n\tmsg := make([]byte, 41)\n\terr := getTestInstance().CheckMessage(msg)\n\tif err == nil || err.Error() != \"Incorrect HMAC signature\" {\n\t\tt.Error(\"Expected Incorrect HMAC signature, got\", err)\n\t}\n}\n\nfunc TestExpired(t *testing.T) {\n\tpayload := make([]byte, 9)\n\tmac := hmac.New(sha256.New, []byte(\"dismykey\"))\n\tmac.Write(payload)\n\terr := getTestInstance().CheckMessage(mac.Sum(payload))\n\tif err == nil || err.Error() != \"Request expired\" {\n\t\tt.Error(\"Expected Request expired, got\", err)\n\t}\n}\n\nfunc TestGoodCheck(t *testing.T) {\n\tpayload := make([]byte, 9)\n\tpayload[0] = 0xff\n\tnow64 := time.Now().Unix()\n\tbinary.BigEndian.PutUint64(payload[1:9], uint64(now64))\n\tmac := hmac.New(sha256.New, []byte(\"dismykey\"))\n\tmac.Write(payload)\n\tmessage := mac.Sum(payload)\n\tif err := getTestInstance().CheckMessage(message); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestDispatchUnknown(t *testing.T) {\n\t_, err := getTestInstance().DispatchRequest(0x69)\n\tif err == nil || err.Error() != \"Unrecognized status byte: 0x69\" {\n\t\tt.Error(\"Expected Unrecognized status byte: 0x69, got\", err)\n\t}\n}\n\nfunc TestDispatchOpenError(t *testing.T) {\n\ts := server.New(nil, 10, &errorBackend{})\n\t_, err := s.DispatchRequest(0xff)\n\tif err == nil || err.Error() != \"open error\" {\n\t\tt.Error(\"Expected open error, got\", err)\n\t}\n}\n\nfunc TestDispatchCloseError(t *testing.T) {\n\ts := server.New(nil, 10, &errorBackend{})\n\t_, err := s.DispatchRequest(0x00)\n\tif err == nil || err.Error() != \"close error\" {\n\t\tt.Error(\"Expected close error, got\", err)\n\t}\n}\n<commit_msg>Test correct functionality<commit_after>package server_test\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/sha256\"\n\t\"encoding\/binary\"\n\t\"errors\"\n\t\"github.com\/WhiteHatCP\/seclab-listener\/server\"\n\t\"testing\"\n\t\"time\"\n)\n\ntype nullBackend struct{}\n\nfunc (b *nullBackend) Open() error {\n\treturn nil\n}\nfunc (b *nullBackend) Close() error {\n\treturn nil\n}\n\ntype errorBackend struct{}\n\nfunc (b *errorBackend) Open() error {\n\treturn errors.New(\"open error\")\n}\nfunc (b *errorBackend) Close() error {\n\treturn errors.New(\"close error\")\n}\n\nfunc getTestInstance() server.Server {\n\treturn server.New([]byte(\"dismykey\"), 10, &nullBackend{})\n}\n\nfunc TestBadSignature(t *testing.T) {\n\tmsg := make([]byte, 41)\n\terr := getTestInstance().CheckMessage(msg)\n\tif err == nil || err.Error() != \"Incorrect HMAC signature\" {\n\t\tt.Error(\"Expected Incorrect HMAC signature, got\", err)\n\t}\n}\n\nfunc TestExpired(t *testing.T) {\n\tpayload := make([]byte, 9)\n\tmac := hmac.New(sha256.New, []byte(\"dismykey\"))\n\tmac.Write(payload)\n\terr := getTestInstance().CheckMessage(mac.Sum(payload))\n\tif err == nil || err.Error() != \"Request expired\" {\n\t\tt.Error(\"Expected Request expired, got\", err)\n\t}\n}\n\nfunc TestGoodCheck(t *testing.T) {\n\tpayload := make([]byte, 9)\n\tpayload[0] = 0xff\n\tnow64 := time.Now().Unix()\n\tbinary.BigEndian.PutUint64(payload[1:9], uint64(now64))\n\tmac := hmac.New(sha256.New, []byte(\"dismykey\"))\n\tmac.Write(payload)\n\tmessage := mac.Sum(payload)\n\tif err := getTestInstance().CheckMessage(message); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestDispatchUnknown(t *testing.T) {\n\t_, err := getTestInstance().DispatchRequest(0x69)\n\tif err == nil || err.Error() != \"Unrecognized status byte: 0x69\" {\n\t\tt.Error(\"Expected Unrecognized status byte: 0x69, got\", err)\n\t}\n}\n\nfunc TestDispatchOpenError(t *testing.T) {\n\ts := server.New(nil, 10, &errorBackend{})\n\t_, err := s.DispatchRequest(0xff)\n\tif err == nil || err.Error() != \"open error\" {\n\t\tt.Error(\"Expected open error, got\", err)\n\t}\n}\n\nfunc TestDispatchCloseError(t *testing.T) {\n\ts := server.New(nil, 10, &errorBackend{})\n\t_, err := s.DispatchRequest(0x00)\n\tif err == nil || err.Error() != \"close error\" {\n\t\tt.Error(\"Expected close error, got\", err)\n\t}\n}\n\nfunc TestDispatchOpenGood(t *testing.T) {\n\tresp, err := getTestInstance().DispatchRequest(0xff)\n\tif err != nil {\n\t\tt.Error(err)\n\t} else if resp != 0xff {\n\t\tt.Errorf(\"Expected 0xff, got 0x%x\", resp)\n\t}\n}\n\nfunc TestDispatchCloseGood(t *testing.T) {\n\tresp, err := getTestInstance().DispatchRequest(0x00)\n\tif err != nil {\n\t\tt.Error(err)\n\t} else if resp != 0xff {\n\t\tt.Errorf(\"Expected 0xff, got 0x%x\", resp)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package auth\n\n\/*\n * Implements auth middleware for cention applications.\n * Expects to be running in Gin framework\n *\/\n\nimport (\n\twf \"c3\/osm\/webframework\"\n\t\"c3\/web\/controllers\"\n\t\"crypto\/sha256\"\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/cention-mujibur-rahman\/gobcache\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/gorilla\/securecookie\"\n)\n\nconst (\n\tHTTP_UNAUTHORIZE_ACCESS = 401\n\tHTTP_FORBIDDEN_ACCESS = 403\n\tHTTP_FOUND = 302\n)\n\nvar (\n\thashKey = securecookie.GenerateRandomKey(64)\n\tblockKey = securecookie.GenerateRandomKey(32)\n\tsc = securecookie.New(hashKey, blockKey)\n\tERROR_USER_PASS_MISMATCH = errors.New(\"Username or Password doesnt match!\")\n\tERROR_USER_PASS_EMPTY = errors.New(\"Username or Password is empty!\")\n\tERROR_CACHE_MISSED = errors.New(\"Cache missed somehow!\")\n\tERROR_ON_SECURECOOKIE_HASHKEY = errors.New(\"HashKey doesn't match with secureCookie\")\n\tERROR_COOKIE_NOT_FOUND = errors.New(\"Cookie: cention-suiteSSID=? not found\")\n\tERROR_WF_USER_NULL = errors.New(\"webframework user is null\")\n\tERROR_MEMCACHE_FAILED = errors.New(\"Sessiond not running\")\n)\n\nvar (\n\tsessiond = gobcache.NewCache(\"localhost:11311\")\n)\n\ntype AuthCookieManager struct {\n\tUserId int\n\tLastLoginTime int64\n\tLoggedIn bool\n}\n\nfunc checkingMemcache() bool {\n\tconn, err := net.Dial(\"tcp\", \"localhost:11311\")\n\tif err != nil {\n\t\tlog.Println(\"Sessiond Server is not running! \", err)\n\t\treturn false\n\t}\n\tdefer conn.Close()\n\treturn true\n}\n\nfunc getCookieHashKey(ctx *gin.Context) (string, error) {\n\tcookie, err := ctx.Request.Cookie(\"cention-suiteSSID\")\n\tif err != nil {\n\t\tlog.Println(\"getCookieHashKey(): Cookie is empty - \", err)\n\t\treturn \"\", err\n\t}\n\tif cookie != nil && cookie.Value == \"\" && cookie.Value == \"guest\" {\n\t\treturn \"\", ERROR_CACHE_MISSED\n\t}\n\treturn cookie.Value, nil\n}\n\nfunc decodeCookie(ctx *gin.Context) (string, error) {\n\tcookie, err := getCookieHashKey(ctx)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif cookie != \"\" && cookie != \"guest\" {\n\t\treturn cookie, nil\n\t}\n\treturn \"\", ERROR_COOKIE_NOT_FOUND\n}\n\nfunc getCurrentSession(v []byte) (int, int, bool, error) {\n\tsv := string(v)\n\tsValue := strings.Split(sv, \"\/\")\n\tif len(sValue) != 3 {\n\t\treturn 0, 0, false, ERROR_CACHE_MISSED\n\t}\n\tuid, err := strconv.Atoi(sValue[0])\n\tif err != nil {\n\t\tlog.Printf(\"Error on Uid conversion: %v\", err)\n\t\treturn 0, 0, false, err\n\t}\n\tts, err := strconv.Atoi(sValue[1])\n\tif err != nil {\n\t\tlog.Printf(\"Error on timestampLastLogin conversion: %v\", err)\n\t\treturn 0, 0, false, err\n\t}\n\tcurrentlyLoggedIn, err := strconv.ParseBool(sValue[2])\n\tif err != nil {\n\t\tlog.Printf(\"Error on currentlyLoggedIn conversion: %v\", err)\n\t\treturn 0, 0, false, err\n\t}\n\treturn uid, ts, currentlyLoggedIn, nil\n}\n\nfunc fetchFromCache(key string) error {\n\tskey := \"Session_\" + key\n\tsItems, err := sessiond.GetRawFromMemcache(skey)\n\tif err != nil {\n\t\tlog.Println(\"[GetRawFromMemcache] key `Session` is empty!\")\n\t\treturn err\n\t}\n\tif sItems != nil {\n\t\tuid, timestamp, currentlyLogedin, err := getCurrentSession(sItems.Value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/Mujibur: timestamp, thinking how to use it?\n\t\t_ = timestamp\n\t\tif uid != 0 && currentlyLogedin {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn ERROR_CACHE_MISSED\n}\n\nfunc CheckOrCreateAuthCookie(ctx *gin.Context) error {\n\tssid, err := decodeCookie(ctx)\n\tif err != nil {\n\t\terr := createNewAuthCookie(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\tuser := ctx.Request.FormValue(\"username\")\n\tpass := ctx.Request.FormValue(\"password\")\n\tif user == \"\" && pass == \"\" {\n\t\tif checkingMemcache() {\n\t\t\tif err = fetchFromCache(ssid); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\treturn ERROR_USER_PASS_EMPTY\n\t} else {\n\t\tlog.Println(\"!!-- Setting cookie informations to memcache. First time login.\")\n\t\twfUser, err := validateUser(user, pass)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error on CheckOrCreateAuthCookie() - validateUser: \", err)\n\t\t\treturn err\n\t\t}\n\t\tif wfUser != nil {\n\t\t\tlastLoginTime := time.Now().Unix()\n\t\t\tif checkingMemcache() {\n\t\t\t\tsValue := fmt.Sprintf(\"%v\/%v\/%v\", wfUser.Id, lastLoginTime, true)\n\t\t\t\tif err = saveToSessiondCache(ssid, sValue); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err = saveUserIdToCache(wfUser.Id, ssid); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlog.Printf(\"CentionAuth: User `%s` just now Logged In\", wfUser.Username)\n\t\t\t\treturn nil\n\t\t\t} else {\n\t\t\t\treturn ERROR_MEMCACHE_FAILED\n\t\t\t}\n\t\t}\n\t}\n\treturn ERROR_WF_USER_NULL\n}\n\nfunc saveUserIdToCache(key int, value string) error {\n\tsKey := fmt.Sprintf(\"user\/%d\", key)\n\tif err := sessiond.SetRawToMemcache(sKey, value); err != nil {\n\t\tlog.Println(\"[`SetRawToMemcache`] Error on saving:\", err)\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc saveToSessiondCache(key, value string) error {\n\tsKey := \"Session_\" + key\n\tif err := sessiond.SetRawToMemcache(sKey, value); err != nil {\n\t\tlog.Println(\"[`SetRawToMemcache`] Error on saving:\", err)\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc validateUser(user, pass string) (*wf.User, error) {\n\twu, err := wf.QueryUser_byLogin(user)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif wu != nil {\n\t\tif wu.Active && wu.Password == encodePassword(pass) {\n\t\t\treturn wu, nil\n\t\t}\n\t}\n\treturn nil, ERROR_USER_PASS_MISMATCH\n}\nfunc encodePassword(p string) string {\n\tep := sha256.New()\n\t_, err := ep.Write([]byte(p))\n\tif err != nil {\n\t\tlog.Println(\"encodePassword() - Error on Sha256: \", err)\n\t\treturn \"\"\n\t}\n\treturn base64.StdEncoding.EncodeToString(ep.Sum(nil))\n}\nfunc createNewAuthCookie(ctx *gin.Context) error {\n\tvalue := map[string]interface{}{\n\t\t\"cookie-set-date\": time.Now().Unix(),\n\t}\n\tencoded, err := sc.Encode(\"cention-suiteSSID\", value)\n\tif err != nil {\n\t\tlog.Printf(\"createNewAuthCookie(): Error %v, creating `guest` cookie\", err)\n\t\tcookie := fmt.Sprintf(\"cention-suiteSSID=%s; Path=\/\", \"guest\")\n\t\tctx.Writer.Header().Add(\"Set-Cookie\", cookie)\n\t\treturn err\n\t}\n\tcookie := fmt.Sprintf(\"cention-suiteSSID=%s; Path=\/\", encoded)\n\tctx.Writer.Header().Add(\"Set-Cookie\", cookie)\n\treturn nil\n}\nfunc CheckAuthCookie(ctx *gin.Context) (bool, error) {\n\tcookie, err := decodeCookie(ctx)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tok, err := validateByBrowserCookie(cookie)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif !ok {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\n\nfunc fetchFromCacheWithValue(key string) (int, int, bool, error) {\n\tskey := \"Session_\" + key\n\tsItems, err := sessiond.GetRawFromMemcache(skey)\n\tif err != nil {\n\t\tlog.Println(\"[GetRawFromMemcache] key `Session` is empty!\")\n\t\treturn 0, 0, false, err\n\t}\n\tif sItems != nil {\n\t\tuid, timestamp, currentlyLogedin, err := getCurrentSession(sItems.Value)\n\t\tif err != nil {\n\t\t\treturn 0, 0, false, err\n\t\t}\n\t\tif uid != 0 && currentlyLogedin {\n\t\t\treturn uid, timestamp, currentlyLogedin, nil\n\t\t}\n\t}\n\treturn 0, 0, false, ERROR_CACHE_MISSED\n}\n\nfunc validateByBrowserCookie(ssid string) (bool, error) {\n\tif checkingMemcache() {\n\t\tuid, _, currentlyLogedin, err := fetchFromCacheWithValue(ssid)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif err := updateTimeStampToCache(ssid, uid, currentlyLogedin); err != nil {\n\t\t\tlog.Printf(\"Error on validateByBrowserCookie(): %v\", err)\n\t\t\treturn false, err\n\t\t}\n\t\tlog.Printf(\"CentionAuth: Cookie has verified with this info: %v\", uid)\n\t\treturn true, nil\n\t}\n\treturn false, ERROR_MEMCACHE_FAILED\n}\n\nfunc destroyAuthCookie(ctx *gin.Context) error {\n\tssid, err := decodeCookie(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif checkingMemcache() {\n\t\tuid, _, _, err := fetchFromCacheWithValue(ssid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := updateTimeStampToCache(ssid, uid, false); err != nil {\n\t\t\tlog.Printf(\"Error on validateByBrowserCookie(): %v\", err)\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\treturn ERROR_MEMCACHE_FAILED\n}\nfunc Logout(ctx *gin.Context) error {\n\treturn destroyAuthCookie(ctx)\n}\n\nfunc fetchCookieFromRequest(r *http.Request) (string, error) {\n\tcookie, err := r.Cookie(\"cention-suiteSSID\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn cookie.Value, nil\n}\nfunc GetWebframeworkUserFromRequest(r *http.Request) int {\n\tssid, err := fetchCookieFromRequest(r)\n\tif err != nil {\n\t\tlog.Printf(\"Error on getting SSID: %v\", err)\n\t\treturn 0\n\t}\n\tif ssid == \"\" {\n\t\treturn 0\n\t}\n\tif checkingMemcache() {\n\t\tuid, _, currentlyLogedin, err := fetchFromCacheWithValue(ssid)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error on fetchFromCacheWithValue(): \", err)\n\t\t\treturn 0\n\t\t}\n\t\tif !currentlyLogedin {\n\t\t\treturn 0\n\t\t}\n\t\t\/\/Update timestamp for every request\n\t\tif err := updateTimeStampToCache(ssid, uid, currentlyLogedin); err != nil {\n\t\t\tlog.Printf(\"Error on %v\", err)\n\t\t\treturn 0\n\t\t}\n\t\treturn uid\n\t}\n\treturn 0\n}\n\nfunc updateTimeStampToCache(ssid string, uid int, loginStatus bool) error {\n\tlastTimeGetRequest := time.Now().Unix()\n\tsvalue := fmt.Sprintf(\"%v\/%v\/%v\", uid, lastTimeGetRequest, loginStatus)\n\tif err := saveToSessiondCache(ssid, svalue); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\nfunc Middleware() func(*gin.Context) {\n\treturn func(ctx *gin.Context) {\n\t\tif strings.HasPrefix(ctx.Request.RequestURI, \"\/debug\/pprof\/\") {\n\t\t\tctx.Next()\n\t\t\treturn\n\t\t}\n\t\twfUserId := GetWebframeworkUserFromRequest(ctx.Request)\n\t\tif wfUserId == 0 {\n\t\t\tctx.AbortWithStatus(HTTP_UNAUTHORIZE_ACCESS)\n\t\t\treturn\n\t\t}\n\t\tcurrUser := controllers.FetchUserObject(wfUserId)\n\t\tctx.Keys = make(map[string]interface{})\n\t\tctx.Keys[\"loggedInUser\"] = currUser\n\t\tctx.Next()\n\t}\n}\n<commit_msg>Reports: needs to get the users lastlogin, lastlogout<commit_after>package auth\n\n\/*\n * Implements auth middleware for cention applications.\n * Expects to be running in Gin framework\n *\/\n\nimport (\n\twf \"c3\/osm\/webframework\"\n\t\"c3\/osm\/workflow\"\n\t\"c3\/web\/controllers\"\n\t\"crypto\/sha256\"\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/cention-mujibur-rahman\/gobcache\"\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/gorilla\/securecookie\"\n)\n\nconst (\n\tHTTP_UNAUTHORIZE_ACCESS = 401\n\tHTTP_FORBIDDEN_ACCESS = 403\n\tHTTP_FOUND = 302\n)\n\nvar (\n\thashKey = securecookie.GenerateRandomKey(64)\n\tblockKey = securecookie.GenerateRandomKey(32)\n\tsc = securecookie.New(hashKey, blockKey)\n\tERROR_USER_PASS_MISMATCH = errors.New(\"Username or Password doesnt match!\")\n\tERROR_USER_PASS_EMPTY = errors.New(\"Username or Password is empty!\")\n\tERROR_CACHE_MISSED = errors.New(\"Cache missed somehow!\")\n\tERROR_ON_SECURECOOKIE_HASHKEY = errors.New(\"HashKey doesn't match with secureCookie\")\n\tERROR_COOKIE_NOT_FOUND = errors.New(\"Cookie: cention-suiteSSID=? not found\")\n\tERROR_WF_USER_NULL = errors.New(\"webframework user is null\")\n\tERROR_MEMCACHE_FAILED = errors.New(\"Sessiond not running\")\n)\n\nvar (\n\tsessiond = gobcache.NewCache(\"localhost:11311\")\n)\n\ntype AuthCookieManager struct {\n\tUserId int\n\tLastLoginTime int64\n\tLoggedIn bool\n}\n\nfunc checkingMemcache() bool {\n\tconn, err := net.Dial(\"tcp\", \"localhost:11311\")\n\tif err != nil {\n\t\tlog.Println(\"Sessiond Server is not running! \", err)\n\t\treturn false\n\t}\n\tdefer conn.Close()\n\treturn true\n}\n\nfunc getCookieHashKey(ctx *gin.Context) (string, error) {\n\tcookie, err := ctx.Request.Cookie(\"cention-suiteSSID\")\n\tif err != nil {\n\t\tlog.Println(\"getCookieHashKey(): Cookie is empty - \", err)\n\t\treturn \"\", err\n\t}\n\tif cookie != nil && cookie.Value == \"\" && cookie.Value == \"guest\" {\n\t\treturn \"\", ERROR_CACHE_MISSED\n\t}\n\treturn cookie.Value, nil\n}\n\nfunc decodeCookie(ctx *gin.Context) (string, error) {\n\tcookie, err := getCookieHashKey(ctx)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif cookie != \"\" && cookie != \"guest\" {\n\t\treturn cookie, nil\n\t}\n\treturn \"\", ERROR_COOKIE_NOT_FOUND\n}\n\nfunc getCurrentSession(v []byte) (int, int, bool, error) {\n\tsv := string(v)\n\tsValue := strings.Split(sv, \"\/\")\n\tif len(sValue) != 3 {\n\t\treturn 0, 0, false, ERROR_CACHE_MISSED\n\t}\n\tuid, err := strconv.Atoi(sValue[0])\n\tif err != nil {\n\t\tlog.Printf(\"Error on Uid conversion: %v\", err)\n\t\treturn 0, 0, false, err\n\t}\n\tts, err := strconv.Atoi(sValue[1])\n\tif err != nil {\n\t\tlog.Printf(\"Error on timestampLastLogin conversion: %v\", err)\n\t\treturn 0, 0, false, err\n\t}\n\tcurrentlyLoggedIn, err := strconv.ParseBool(sValue[2])\n\tif err != nil {\n\t\tlog.Printf(\"Error on currentlyLoggedIn conversion: %v\", err)\n\t\treturn 0, 0, false, err\n\t}\n\treturn uid, ts, currentlyLoggedIn, nil\n}\n\nfunc fetchFromCache(key string) error {\n\tskey := \"Session_\" + key\n\tsItems, err := sessiond.GetRawFromMemcache(skey)\n\tif err != nil {\n\t\tlog.Println(\"[GetRawFromMemcache] key `Session` is empty!\")\n\t\treturn err\n\t}\n\tif sItems != nil {\n\t\tuid, _, currentlyLogedin, err := getCurrentSession(sItems.Value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif uid != 0 && currentlyLogedin {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn ERROR_CACHE_MISSED\n}\n\nfunc CheckOrCreateAuthCookie(ctx *gin.Context) error {\n\tssid, err := decodeCookie(ctx)\n\tif err != nil {\n\t\terr := createNewAuthCookie(ctx)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\tuser := ctx.Request.FormValue(\"username\")\n\tpass := ctx.Request.FormValue(\"password\")\n\tif user == \"\" && pass == \"\" {\n\t\tif checkingMemcache() {\n\t\t\tif err = fetchFromCache(ssid); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\treturn ERROR_USER_PASS_EMPTY\n\t} else {\n\t\tlog.Println(\"!!-- Setting cookie informations to memcache. First time login.\")\n\t\twfUser, err := validateUser(user, pass)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error on CheckOrCreateAuthCookie() - validateUser: \", err)\n\t\t\treturn err\n\t\t}\n\t\tif wfUser != nil {\n\t\t\tlastLoginTime := time.Now().Unix()\n\t\t\tif checkingMemcache() {\n\t\t\t\tsValue := fmt.Sprintf(\"%v\/%v\/%v\", wfUser.Id, lastLoginTime, true)\n\t\t\t\tif err = saveToSessiondCache(ssid, sValue); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err = saveUserIdToCache(wfUser.Id, ssid); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tupdateUserCurrentLoginIn(wfUser.Id)\n\t\t\t\tlog.Printf(\"CentionAuth: User `%s` just now Logged In\", wfUser.Username)\n\t\t\t\treturn nil\n\t\t\t} else {\n\t\t\t\treturn ERROR_MEMCACHE_FAILED\n\t\t\t}\n\t\t}\n\t}\n\treturn ERROR_WF_USER_NULL\n}\nfunc updateUserCurrentLoginIn(wfUId int) {\n\tuser, err := workflow.QueryUser_byWebframeworkUser(wfUId)\n\tif err != nil {\n\t\tlog.Println(\"Error QueryUser_byWebframeworkUser: \", err)\n\t}\n\tuser.SetTimestampLastLogin(time.Now().Unix())\n\tuser.SetCurrentlyLoggedIn(true)\n\tif err := user.Save(); err != nil {\n\t\tlog.Println(\"Error on Save: \", err)\n\t}\n}\nfunc updateUserCurrentLoginOut(wfUId int) {\n\tuser, err := workflow.QueryUser_byWebframeworkUser(wfUId)\n\tif err != nil {\n\t\tlog.Println(\"Error QueryUser_byWebframeworkUser: \", err)\n\t}\n\tuser.SetTimestampLastLogout(time.Now().Unix())\n\tuser.SetCurrentlyLoggedIn(false)\n\tif err := user.Save(); err != nil {\n\t\tlog.Println(\"Error on Save: \", err)\n\t}\n}\nfunc saveUserIdToCache(key int, value string) error {\n\tsKey := fmt.Sprintf(\"user\/%d\", key)\n\tif err := sessiond.SetRawToMemcache(sKey, value); err != nil {\n\t\tlog.Println(\"[`SetRawToMemcache`] Error on saving:\", err)\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc saveToSessiondCache(key, value string) error {\n\tsKey := \"Session_\" + key\n\tif err := sessiond.SetRawToMemcache(sKey, value); err != nil {\n\t\tlog.Println(\"[`SetRawToMemcache`] Error on saving:\", err)\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc validateUser(user, pass string) (*wf.User, error) {\n\twu, err := wf.QueryUser_byLogin(user)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif wu != nil {\n\t\tif wu.Active && wu.Password == encodePassword(pass) {\n\t\t\treturn wu, nil\n\t\t}\n\t}\n\treturn nil, ERROR_USER_PASS_MISMATCH\n}\nfunc encodePassword(p string) string {\n\tep := sha256.New()\n\t_, err := ep.Write([]byte(p))\n\tif err != nil {\n\t\tlog.Println(\"encodePassword() - Error on Sha256: \", err)\n\t\treturn \"\"\n\t}\n\treturn base64.StdEncoding.EncodeToString(ep.Sum(nil))\n}\nfunc createNewAuthCookie(ctx *gin.Context) error {\n\tvalue := map[string]interface{}{\n\t\t\"cookie-set-date\": time.Now().Unix(),\n\t}\n\tencoded, err := sc.Encode(\"cention-suiteSSID\", value)\n\tif err != nil {\n\t\tlog.Printf(\"createNewAuthCookie(): Error %v, creating `guest` cookie\", err)\n\t\tcookie := fmt.Sprintf(\"cention-suiteSSID=%s; Path=\/\", \"guest\")\n\t\tctx.Writer.Header().Add(\"Set-Cookie\", cookie)\n\t\treturn err\n\t}\n\tcookie := fmt.Sprintf(\"cention-suiteSSID=%s; Path=\/\", encoded)\n\tctx.Writer.Header().Add(\"Set-Cookie\", cookie)\n\treturn nil\n}\nfunc CheckAuthCookie(ctx *gin.Context) (bool, error) {\n\tcookie, err := decodeCookie(ctx)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tok, err := validateByBrowserCookie(cookie)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif !ok {\n\t\treturn false, err\n\t}\n\treturn true, nil\n}\n\nfunc fetchFromCacheWithValue(key string) (int, int, bool, error) {\n\tskey := \"Session_\" + key\n\tsItems, err := sessiond.GetRawFromMemcache(skey)\n\tif err != nil {\n\t\tlog.Println(\"[GetRawFromMemcache] key `Session` is empty!\")\n\t\treturn 0, 0, false, err\n\t}\n\tif sItems != nil {\n\t\tuid, timestamp, currentlyLogedin, err := getCurrentSession(sItems.Value)\n\t\tif err != nil {\n\t\t\treturn 0, 0, false, err\n\t\t}\n\t\tif uid != 0 && !currentlyLogedin {\n\t\t\treturn uid, timestamp, !currentlyLogedin, nil\n\t\t}\n\t\tif uid != 0 && currentlyLogedin {\n\t\t\treturn uid, timestamp, currentlyLogedin, nil\n\t\t}\n\t}\n\treturn 0, 0, false, ERROR_CACHE_MISSED\n}\n\nfunc validateByBrowserCookie(ssid string) (bool, error) {\n\tif checkingMemcache() {\n\t\tuid, _, currentlyLogedin, err := fetchFromCacheWithValue(ssid)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif err := updateTimeStampToCache(ssid, uid, currentlyLogedin); err != nil {\n\t\t\tlog.Printf(\"Error on validateByBrowserCookie(): %v\", err)\n\t\t\treturn false, err\n\t\t}\n\t\tlog.Printf(\"CentionAuth: Cookie has verified with this info: %v\", uid)\n\t\treturn true, nil\n\t}\n\treturn false, ERROR_MEMCACHE_FAILED\n}\n\nfunc destroyAuthCookie(ctx *gin.Context) error {\n\tssid, err := decodeCookie(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif checkingMemcache() {\n\t\tuid, _, _, err := fetchFromCacheWithValue(ssid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := updateTimeStampToCache(ssid, uid, false); err != nil {\n\t\t\tlog.Printf(\"Error on validateByBrowserCookie(): %v\", err)\n\t\t\treturn err\n\t\t}\n\t\tupdateUserCurrentLoginOut(uid)\n\t\treturn nil\n\t}\n\treturn ERROR_MEMCACHE_FAILED\n}\nfunc Logout(ctx *gin.Context) error {\n\treturn destroyAuthCookie(ctx)\n}\n\nfunc fetchCookieFromRequest(r *http.Request) (string, error) {\n\tcookie, err := r.Cookie(\"cention-suiteSSID\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn cookie.Value, nil\n}\nfunc GetWebframeworkUserFromRequest(r *http.Request) int {\n\tssid, err := fetchCookieFromRequest(r)\n\tif err != nil {\n\t\tlog.Printf(\"Error on getting SSID: %v\", err)\n\t\treturn 0\n\t}\n\tif ssid == \"\" {\n\t\treturn 0\n\t}\n\tif checkingMemcache() {\n\t\tuid, _, currentlyLogedin, err := fetchFromCacheWithValue(ssid)\n\t\tif err != nil {\n\t\t\tlog.Println(\"Error on fetchFromCacheWithValue(): \", err)\n\t\t\treturn 0\n\t\t}\n\t\tif !currentlyLogedin {\n\t\t\treturn 0\n\t\t}\n\t\t\/\/Update timestamp for every request\n\t\tif err := updateTimeStampToCache(ssid, uid, currentlyLogedin); err != nil {\n\t\t\tlog.Printf(\"Error on %v\", err)\n\t\t\treturn 0\n\t\t}\n\t\treturn uid\n\t}\n\treturn 0\n}\n\nfunc updateTimeStampToCache(ssid string, uid int, loginStatus bool) error {\n\tlastTimeGetRequest := time.Now().Unix()\n\tsvalue := fmt.Sprintf(\"%v\/%v\/%v\", uid, lastTimeGetRequest, loginStatus)\n\tif err := saveToSessiondCache(ssid, svalue); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\nfunc Middleware() func(*gin.Context) {\n\treturn func(ctx *gin.Context) {\n\t\tif strings.HasPrefix(ctx.Request.RequestURI, \"\/debug\/pprof\/\") {\n\t\t\tctx.Next()\n\t\t\treturn\n\t\t}\n\t\twfUserId := GetWebframeworkUserFromRequest(ctx.Request)\n\t\tif wfUserId == 0 {\n\t\t\tctx.AbortWithStatus(HTTP_UNAUTHORIZE_ACCESS)\n\t\t\treturn\n\t\t}\n\t\tcurrUser := controllers.FetchUserObject(wfUserId)\n\t\tctx.Keys = make(map[string]interface{})\n\t\tctx.Keys[\"loggedInUser\"] = currUser\n\t\tctx.Next()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"os\"\n\t\"path\"\n\t\"net\/http\"\n\t\"io\"\n\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/samalba\/dockerclient\"\n)\n\n\/\/ instance of DockerClient allowing for making calls to the docker daemon\n\/\/ remote API\nvar dockerClient *dockerclient.DockerClient\n\n\nfunc main() {\n\n\t\/\/ init docker client object\n\tvar err error\n\tdockerClient, err = dockerclient.NewDockerClient(\"unix:\/\/\/var\/run\/docker.sock\", nil)\n\tif err != nil {\n\t\tlogrus.Fatal(err.Error())\n\t}\n\n\t\/\/ get the version of the docker daemon so we can be sure the corresponding\n\t\/\/ docker client is installed and install it if necessary\n\tversion, err := dockerClient.Version()\n\tif err != nil {\n\t\tlogrus.Fatal(err.Error())\n\t}\n\tdockerDaemonVersion := version.Version\n\n\t\/\/ name of docker binary that is needed \n\tdockerBinaryName := \"docker-\" + dockerDaemonVersion\n\tlogrus.Println(\"looking for docker binary named:\", dockerBinaryName)\n\n\tfilename := path.Join(\"\/bin\", dockerBinaryName)\n\t\n\tif _, err := os.Stat(filename); os.IsNotExist(err) {\n\t\t\n\t\tlogrus.Println(\"docker binary (version \" + dockerDaemonVersion + \") not found.\")\n\t\tlogrus.Println(\"downloading\", dockerBinaryName, \"...\")\n\n\t\tout, err := os.Create(filename)\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t\tdefer out.Close()\n\t\tresp, err := http.Get(\"https:\/\/get.docker.com\/builds\/Linux\/x86_64\/docker-\" + dockerDaemonVersion)\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\t\n\t\t_, err = io.Copy(out, resp.Body)\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\n\t\terr = os.Chmod(filename, 0700)\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n }\n}\n<commit_msg>Get the docker client from the tgz file<commit_after>package main\n\nimport (\n\t\"os\"\n\t\"path\"\n\t\"net\/http\"\n\t\"io\"\n\t\"archive\/tar\"\n\t\"compress\/gzip\"\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/samalba\/dockerclient\"\n)\n\n\/\/ instance of DockerClient allowing for making calls to the docker daemon\n\/\/ remote API\nvar dockerClient *dockerclient.DockerClient\n\n\nfunc main() {\n\n\t\/\/ init docker client object\n\tvar err error\n\tdockerClient, err = dockerclient.NewDockerClient(\"unix:\/\/\/var\/run\/docker.sock\", nil)\n\tif err != nil {\n\t\tlogrus.Fatal(err.Error())\n\t}\n\n\t\/\/ get the version of the docker daemon so we can be sure the corresponding\n\t\/\/ docker client is installed and install it if necessary\n\tversion, err := dockerClient.Version()\n\tif err != nil {\n\t\tlogrus.Fatal(err.Error())\n\t}\n\tdockerDaemonVersion := version.Version\n\n\t\/\/ name of docker binary that is needed \n\tdockerBinaryName := \"docker-\" + dockerDaemonVersion\n\tlogrus.Println(\"looking for docker binary named:\", dockerBinaryName)\n\n filename := path.Join(\"\/bin\", dockerBinaryName)\n\t\n\tif _, err := os.Stat(filename); os.IsNotExist(err) {\n\t\t\n\t\tlogrus.Println(\"docker binary (version \" + dockerDaemonVersion + \") not found.\")\n\t\tlogrus.Println(\"downloading\", dockerBinaryName, \"...\")\n\n\t\tout, err := os.Create(filename)\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t\tdefer out.Close()\n\t\tresp, err := http.Get(\"https:\/\/get.docker.com\/builds\/Linux\/x86_64\/docker-\" + dockerDaemonVersion + \".tgz\")\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tgr, err := gzip.NewReader(resp.Body)\n defer gr.Close()\n if err != nil {\n logrus.Fatal(err.Error())\n }\n\n tr := tar.NewReader(gr)\n for {\n hdr, err := tr.Next()\n if err == io.EOF {\n break\n }\n if err != nil {\n logrus.Fatal(err.Error())\n }\n\n if hdr.Typeflag == tar.TypeReg && hdr.Name == \"docker\/docker\" {\n _, err = io.Copy(out, tr)\n if err != nil {\n logrus.Fatal(err.Error())\n\t\t }\n break\n }\n logrus.Println(\"not yet\")\n }\n\n\t\terr = os.Chmod(filename, 0700)\n\t\tif err != nil {\n\t\t\tlogrus.Fatal(err.Error())\n\t\t}\n }\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"github.com\/privacybydesign\/irmago\"\n\tsseclient \"github.com\/sietseringers\/go-sse\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst pollInterval = 1000 * time.Millisecond\n\nfunc WaitStatus(transport *irma.HTTPTransport, initialStatus Status, statuschan chan Status, errorchan chan error) {\n\tif err := subscribeSSE(transport, statuschan, errorchan); err != nil {\n\t\tgo poll(transport, initialStatus, statuschan, errorchan)\n\t}\n}\n\nfunc WaitStatusChanged(transport *irma.HTTPTransport, initialStatus Status, statuschan chan Status, errorchan chan error) {\n\tif err := subscribeSSE(transport, statuschan, errorchan); err != nil {\n\t\tgo pollUntilChange(transport, initialStatus, statuschan, errorchan)\n\t}\n}\n\n\/\/ Start listening for server-sent events\nfunc subscribeSSE(transport *irma.HTTPTransport, statuschan chan Status, errorchan chan error) error {\n\n\tevents := make(chan *sseclient.Event)\n\tgo func() {\n\t\tfor {\n\t\t\tif e := <-events; e != nil && e.Type != \"open\" {\n\t\t\t\tstatus := Status(strings.Trim(string(e.Data), `\"`))\n\t\t\t\tstatuschan <- status\n\t\t\t\tif status.Finished() {\n\t\t\t\t\terrorchan <- nil\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\terr := sseclient.Notify(nil, transport.Server+\"statusevents\", true, events)\n\tif err != nil {\n\t\tclose(events)\n\t}\n\treturn err\n}\n\n\/\/ poll recursively polls the session status until a final status is received.\nfunc poll(transport *irma.HTTPTransport, initialStatus Status, statuschan chan Status, errorchan chan error) {\n\tgo func() {\n\t\tstatus := initialStatus\n\t\tfor {\n\t\t\tstatuschanPolling := make(chan Status)\n\t\t\terrorchanPolling := make(chan error)\n\t\t\tgo pollUntilChange(transport, status, statuschanPolling, errorchanPolling)\n\t\t\tselect {\n\t\t\tcase status = <-statuschanPolling:\n\t\t\t\tstatuschan <- status\n\t\t\t\tif status.Finished() {\n\t\t\t\t\terrorchan <- nil\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\tcase err := <-errorchanPolling:\n\t\t\t\terrorchan <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc pollUntilChange(transport *irma.HTTPTransport, initialStatus Status, statuschan chan Status, errorchan chan error) {\n\t\/\/ First we wait\n\t<-time.NewTimer(pollInterval).C\n\n\t\/\/ Get session status\n\tvar s string\n\tif err := transport.Get(\"status\", &s); err != nil {\n\t\terrorchan <- err\n\t\treturn\n\t}\n\tstatus := Status(strings.Trim(s, `\"`))\n\n\t\/\/ report if status changed\n\tif status != initialStatus {\n\t\tstatuschan <- status\n\t\terrorchan <- nil\n\t\treturn\n\t}\n\n\tgo pollUntilChange(transport, status, statuschan, errorchan)\n}\n<commit_msg>Cancel sse channel when closing it prematurely<commit_after>package server\n\nimport (\n\t\"context\"\n\t\"github.com\/privacybydesign\/irmago\"\n\tsseclient \"github.com\/sietseringers\/go-sse\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst pollInterval = 1000 * time.Millisecond\n\nfunc WaitStatus(transport *irma.HTTPTransport, initialStatus Status, statuschan chan Status, errorchan chan error) {\n\tif err := subscribeSSE(transport, statuschan, errorchan, false); err != nil {\n\t\tgo poll(transport, initialStatus, statuschan, errorchan)\n\t}\n}\n\nfunc WaitStatusChanged(transport *irma.HTTPTransport, initialStatus Status, statuschan chan Status, errorchan chan error) {\n\tif err := subscribeSSE(transport, statuschan, errorchan, true); err != nil {\n\t\tgo pollUntilChange(transport, initialStatus, statuschan, errorchan)\n\t}\n}\n\n\/\/ Start listening for server-sent events\nfunc subscribeSSE(transport *irma.HTTPTransport, statuschan chan Status, errorchan chan error, untilNextOnly bool) error {\n\tctx, cancel := context.WithCancel(context.Background())\n\n\tevents := make(chan *sseclient.Event)\n\tcancelled := false\n\tgo func() {\n\t\tfor {\n\t\t\te := <-events\n\t\t\tif e != nil && e.Type != \"open\" {\n\t\t\t\tstatus := Status(strings.Trim(string(e.Data), `\"`))\n\t\t\t\tstatuschan <- status\n\t\t\t\tif untilNextOnly || status.Finished() {\n\t\t\t\t\terrorchan <- nil\n\t\t\t\t\tcancelled = true\n\t\t\t\t\tcancel()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\terr := sseclient.Notify(ctx, transport.Server+\"statusevents\", true, events)\n\tif !cancelled {\n\t\tclose(events)\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ poll recursively polls the session status until a final status is received.\nfunc poll(transport *irma.HTTPTransport, initialStatus Status, statuschan chan Status, errorchan chan error) {\n\tgo func() {\n\t\tstatus := initialStatus\n\t\tfor {\n\t\t\tstatuschanPolling := make(chan Status)\n\t\t\terrorchanPolling := make(chan error)\n\t\t\tgo pollUntilChange(transport, status, statuschanPolling, errorchanPolling)\n\t\t\tselect {\n\t\t\tcase status = <-statuschanPolling:\n\t\t\t\tstatuschan <- status\n\t\t\t\tif status.Finished() {\n\t\t\t\t\terrorchan <- nil\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\tcase err := <-errorchanPolling:\n\t\t\t\terrorchan <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc pollUntilChange(transport *irma.HTTPTransport, initialStatus Status, statuschan chan Status, errorchan chan error) {\n\t\/\/ First we wait\n\t<-time.NewTimer(pollInterval).C\n\n\t\/\/ Get session status\n\tvar s string\n\tif err := transport.Get(\"status\", &s); err != nil {\n\t\terrorchan <- err\n\t\treturn\n\t}\n\tstatus := Status(strings.Trim(s, `\"`))\n\n\t\/\/ report if status changed\n\tif status != initialStatus {\n\t\tstatuschan <- status\n\t\terrorchan <- nil\n\t\treturn\n\t}\n\n\tgo pollUntilChange(transport, status, statuschan, errorchan)\n}\n<|endoftext|>"} {"text":"<commit_before>package html\n\nimport (\n\t\"html\/template\"\n\t\"time\"\n\n\t\"github.com\/chrisolsen\/ae\"\n\t\"github.com\/russross\/blackfriday\"\n\t\"google.golang.org\/appengine\/datastore\"\n)\n\n\/\/ CSS prevents any custom embedded styles from being encoded to html safe values\nfunc CSS(s string) template.CSS {\n\treturn template.CSS(s)\n}\n\n\/\/ Preview returns a preview of the string\nfunc Preview(size int, val string) string {\n\tif len(val) <= size {\n\t\treturn val\n\t}\n\treturn val[:size-1] + \"...\"\n}\n\n\/\/ Markdown converts text in the markdown syntax to html\nfunc Markdown(input string) interface{} {\n\tout := string(blackfriday.MarkdownCommon([]byte(input)))\n\treturn template.HTML(out)\n}\n\n\/\/ Add adds the numbers\nfunc Add(a, b int) int {\n\treturn a + b\n}\n\n\/\/ EncodeKey encodes a datastore key\nfunc EncodeKey(data interface{}) string {\n\tswitch data.(type) {\n\tcase ae.Model:\n\t\treturn data.(ae.Model).Key.Encode()\n\tcase *datastore.Key:\n\t\treturn data.(*datastore.Key).Encode()\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\n\/\/ Checked returns the checked attribute for positive values.\n\/\/ \t<input type=\"checkbox\" {{IsAdmin | checked}}> => <input type=\"checkbox\" checked=\"checked\">\nfunc Checked(checked bool) template.HTMLAttr {\n\tif checked {\n\t\treturn template.HTMLAttr(\"checked\")\n\t}\n\treturn \"\"\n}\n\nfunc Selected(selected bool) template.HTMLAttr {\n\tif selected {\n\t\treturn template.HTMLAttr(\"selected\")\n\t}\n\treturn \"\"\n}\n\n\/\/ Disabled returns the checked attribute for positive values.\n\/\/ \t<button type=\"submit\" {{HasError | disabled}}>Save<\/button> => <button type=\"submit\" disabled=\"\">Save<\/button>\n\/\/ \tor\n\/\/ \t<button type=\"submit\" {{ValidationError | disabled}}>Save<\/button> => <button type=\"submit\" disabled=\"\">Save<\/button>\nfunc Disabled(err interface{}) template.HTMLAttr {\n\td := template.HTMLAttr(\"disabled\")\n\tswitch err.(type) {\n\tcase string:\n\t\tif len(err.(string)) == 0 {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn d\n\tcase error:\n\t\treturn d\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\n\/\/ Show returns an empty style attr or display:none\n\/\/ \t<div {{IsVisibe | show}}> => <div style=\"display:none\">some hidden text<\/div>\nfunc Show(display bool) template.CSS {\n\tif display {\n\t\treturn template.CSS(\"\")\n\t}\n\treturn template.CSS(\"display:none;\")\n}\n\nfunc Hide(display bool) template.CSS {\n\treturn Show(!display)\n}\n\n\/\/ KeyEqual allow for *datastore.Key comparison\nfunc KeyEqual(a, b *datastore.Key) bool {\n\treturn a.Equal(b)\n}\n\n\/\/ Timestamp formats the time to the RFC3339 layout\nfunc Timestamp(d time.Time) string {\n\treturn d.Format(time.RFC3339)\n}\n<commit_msg>add some common template helpers<commit_after>package html\n\nimport (\n\t\"html\/template\"\n\t\"time\"\n\n\t\"github.com\/chrisolsen\/ae\"\n\t\"github.com\/russross\/blackfriday\"\n\t\"google.golang.org\/appengine\/datastore\"\n)\n\n\/\/ CSS prevents any custom embedded styles from being encoded to html safe values\nfunc CSS(s string) template.CSS {\n\treturn template.CSS(s)\n}\n\n\/\/ Preview returns a preview of the string\nfunc Preview(size int, val string) string {\n\tif len(val) <= size {\n\t\treturn val\n\t}\n\treturn val[:size-1] + \"...\"\n}\n\n\/\/ Markdown converts text in the markdown syntax to html\nfunc Markdown(input string) interface{} {\n\tout := string(blackfriday.MarkdownCommon([]byte(input)))\n\treturn template.HTML(out)\n}\n\n\/\/ Add adds the numbers\nfunc Add(a, b int) int {\n\treturn a + b\n}\n\n\/\/ EncodeKey encodes a datastore key\nfunc EncodeKey(data interface{}) string {\n\tswitch data.(type) {\n\tcase ae.Model:\n\t\treturn data.(ae.Model).Key.Encode()\n\tcase *datastore.Key:\n\t\tkey := data.(*datastore.Key)\n\t\tif key == nil {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn key.Encode()\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\n\/\/ EncodeParentKey encodes the key's parent\nfunc EncodeParentKey(key *datastore.Key) string {\n\treturn key.Parent().Encode()\n}\n\n\/\/ Checked returns the checked attribute for positive values.\n\/\/ \t<input type=\"checkbox\" {{IsAdmin | checked}}> => <input type=\"checkbox\" checked=\"checked\">\nfunc Checked(checked interface{}) template.HTMLAttr {\n\tswitch checked.(type) {\n\tcase bool:\n\t\tif checked.(bool) {\n\t\t\treturn template.HTMLAttr(\"checked\")\n\t\t}\n\t}\n\treturn \"\"\n}\n\nfunc Selected(selected bool) template.HTMLAttr {\n\tif selected {\n\t\treturn template.HTMLAttr(\"selected\")\n\t}\n\treturn \"\"\n}\n\n\/\/ Disabled returns the checked attribute for positive values.\n\/\/ \t<button type=\"submit\" {{HasError | disabled}}>Save<\/button> => <button type=\"submit\" disabled=\"\">Save<\/button>\n\/\/ \tor\n\/\/ \t<button type=\"submit\" {{ValidationError | disabled}}>Save<\/button> => <button type=\"submit\" disabled=\"\">Save<\/button>\nfunc Disabled(obj interface{}) template.HTMLAttr {\n\td := template.HTMLAttr(\"disabled\")\n\tswitch obj.(type) {\n\tcase string:\n\t\tif len(obj.(string)) == 0 {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn d\n\tcase bool:\n\t\tif obj.(bool) {\n\t\t\treturn d\n\t\t}\n\t\treturn \"\"\n\tcase error:\n\t\treturn d\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\n\/\/ Show returns an empty style attr or display:none\n\/\/ \t<div {{IsVisibe | show}}> => <div style=\"display:none\">some hidden text<\/div>\nfunc Show(display bool) template.CSS {\n\tif display {\n\t\treturn template.CSS(\"\")\n\t}\n\treturn template.CSS(\"display:none;\")\n}\n\nfunc Hide(display bool) template.CSS {\n\treturn Show(!display)\n}\n\n\/\/ KeyEqual allow for *datastore.Key comparison\nfunc KeyEqual(a, b *datastore.Key) bool {\n\treturn a.Equal(b)\n}\n\n\/\/ Timestamp formats the time to the RFC3339 layout\nfunc Timestamp(d time.Time) string {\n\treturn d.Format(time.RFC3339)\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n)\n\n\/\/ Cluster represents high-level information about a LXD cluster.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering\ntype Cluster struct {\n\t\/\/ Name of the cluster member answering the request\n\t\/\/ Example: lxd01\n\tServerName string `json:\"server_name\" yaml:\"server_name\"`\n\n\t\/\/ Whether clustering is enabled\n\t\/\/ Example: true\n\tEnabled bool `json:\"enabled\" yaml:\"enabled\"`\n\n\t\/\/ List of member configuration keys (used during join)\n\t\/\/ Example: []\n\t\/\/\n\t\/\/ API extension: clustering_join\n\tMemberConfig []ClusterMemberConfigKey `json:\"member_config\" yaml:\"member_config\"`\n}\n\n\/\/ ClusterMemberConfigKey represents a single config key that a new member of\n\/\/ the cluster is required to provide when joining.\n\/\/\n\/\/ The Value field is empty when getting clustering information with GET\n\/\/ \/1.0\/cluster, and should be filled by the joining node when performing a PUT\n\/\/ \/1.0\/cluster join request.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering_join\ntype ClusterMemberConfigKey struct {\n\t\/\/ The kind of configuration key (network, storage-pool, ...)\n\t\/\/ Example: storage-pool\n\tEntity string `json:\"entity\" yaml:\"entity\"`\n\n\t\/\/ The name of the object requiring this key\n\t\/\/ Example: local\n\tName string `json:\"name\" yaml:\"name\"`\n\n\t\/\/ The name of the key\n\t\/\/ Example: source\n\tKey string `json:\"key\" yaml:\"key\"`\n\n\t\/\/ The value on the answering cluster member\n\t\/\/ Example: \/dev\/sdb\n\tValue string `json:\"value\" yaml:\"value\"`\n\n\t\/\/ A human friendly description key\n\t\/\/ Example: \"source\" property for storage pool \"local\"\n\tDescription string `json:\"description\" yaml:\"description\"`\n}\n\n\/\/ ClusterPut represents the fields required to bootstrap or join a LXD\n\/\/ cluster.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering\ntype ClusterPut struct {\n\tCluster `yaml:\",inline\"`\n\n\t\/\/ The address of the cluster you wish to join\n\t\/\/ Example: 10.0.0.1:8443\n\tClusterAddress string `json:\"cluster_address\" yaml:\"cluster_address\"`\n\n\t\/\/ The expected certificate (X509 PEM encoded) for the cluster\n\t\/\/ Example: X509 PEM certificate\n\tClusterCertificate string `json:\"cluster_certificate\" yaml:\"cluster_certificate\"`\n\n\t\/\/ The local address to use for cluster communication\n\t\/\/ Example: 10.0.0.2:8443\n\t\/\/\n\t\/\/ API extension: clustering_join\n\tServerAddress string `json:\"server_address\" yaml:\"server_address\"`\n\n\t\/\/ The trust password of the cluster you're trying to join\n\t\/\/ Example: blah\n\t\/\/\n\t\/\/ API extension: clustering_join\n\tClusterPassword string `json:\"cluster_password\" yaml:\"cluster_password\"`\n}\n\n\/\/ ClusterMembersPost represents the fields required to request a join token to add a member to the cluster.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering_join_token\ntype ClusterMembersPost struct {\n\t\/\/ The name of the new cluster member\n\t\/\/ Example: lxd02\n\tServerName string `json:\"server_name\" yaml:\"server_name\"`\n}\n\n\/\/ ClusterMemberJoinToken represents the fields contained within an encoded cluster member join token.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering_join_token\ntype ClusterMemberJoinToken struct {\n\t\/\/ The name of the new cluster member\n\t\/\/ Example: lxd02\n\tServerName string `json:\"server_name\" yaml:\"server_name\"`\n\n\t\/\/ The fingerprint of the network certificate\n\t\/\/ Example: 57bb0ff4340b5bb28517e062023101adf788c37846dc8b619eb2c3cb4ef29436\n\tFingerprint string `json:\"fingerprint\" yaml:\"fingerprint\"`\n\n\t\/\/ The addresses of existing online cluster members\n\t\/\/ Example: [\"10.98.30.229:8443\"]\n\tAddresses []string `json:\"addresses\" yaml:\"addresses\"`\n\n\t\/\/ The random join secret.\n\t\/\/ Example: 2b2284d44db32675923fe0d2020477e0e9be11801ff70c435e032b97028c35cd\n\tSecret string `json:\"secret\" yaml:\"secret\"`\n}\n\n\/\/ String encodes the cluster member join token as JSON and then Base64.\nfunc (t *ClusterMemberJoinToken) String() string {\n\tjoinTokenJSON, err := json.Marshal(t)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn base64.StdEncoding.EncodeToString(joinTokenJSON)\n}\n\n\/\/ ClusterMemberPost represents the fields required to rename a LXD node.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering\ntype ClusterMemberPost struct {\n\t\/\/ The new name of the cluster member\n\t\/\/ Example: lxd02\n\tServerName string `json:\"server_name\" yaml:\"server_name\"`\n}\n\n\/\/ ClusterMember represents the a LXD node in the cluster.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering\ntype ClusterMember struct {\n\tClusterMemberPut `yaml:\",inline\"`\n\n\t\/\/ Name of the cluster member\n\t\/\/ Example: lxd01\n\tServerName string `json:\"server_name\" yaml:\"server_name\"`\n\n\t\/\/ URL at which the cluster member can be reached\n\t\/\/ Example: https:\/\/10.0.0.1:8443\n\tURL string `json:\"url\" yaml:\"url\"`\n\n\t\/\/ Whether the cluster member is a database server\n\t\/\/ Example: true\n\tDatabase bool `json:\"database\" yaml:\"database\"`\n\n\t\/\/ Current status\n\t\/\/ Example: Online\n\tStatus string `json:\"status\" yaml:\"status\"`\n\n\t\/\/ Additional status information\n\t\/\/ Example: fully operational\n\tMessage string `json:\"message\" yaml:\"message\"`\n\n\t\/\/ The primary architecture of the cluster member\n\t\/\/ Example: x86_64\n\t\/\/\n\t\/\/ API extension: clustering_architecture\n\tArchitecture string `json:\"architecture\" yaml:\"architecture\"`\n}\n\n\/\/ Writable converts a full Profile struct into a ProfilePut struct (filters read-only fields)\nfunc (member *ClusterMember) Writable() ClusterMemberPut {\n\treturn member.ClusterMemberPut\n}\n\n\/\/ ClusterMemberPut represents the the modifiable fields of a LXD cluster member\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering_edit_roles\ntype ClusterMemberPut struct {\n\t\/\/ List of roles held by this cluster member\n\t\/\/ Example: [\"database\"]\n\t\/\/\n\t\/\/ API extension: clustering_roles\n\tRoles []string `json:\"roles\" yaml:\"roles\"`\n\n\t\/\/ Name of the failure domain for this cluster member\n\t\/\/ Example: rack1\n\t\/\/\n\t\/\/ API extension: clustering_failure_domains\n\tFailureDomain string `json:\"failure_domain\" yaml:\"failure_domain\"`\n\n\t\/\/ Cluster member description\n\t\/\/ Example: AMD Epyc 32c\/64t\n\t\/\/\n\t\/\/ API extension: clustering_description\n\tDescription string `json:\"description\" yaml:\"description\"`\n\n\t\/\/ Additional configuration information\n\t\/\/ Example: {\"scheduler.instance\": \"all\"}\n\t\/\/\n\t\/\/ API extension: clustering_config\n\tConfig map[string]string `json:\"config\" yaml:\"config\"`\n\n\t\/\/ List of cluster groups this member belongs to\n\t\/\/ Example: [\"group1\", \"group2\"]\n\t\/\/\n\t\/\/ API extension: clustering_groups\n\tGroups []string `json:\"groups\" yaml:\"groups\"`\n}\n\n\/\/ ClusterCertificatePut represents the certificate and key pair for all members in a LXD Cluster\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering_update_certs\ntype ClusterCertificatePut struct {\n\t\/\/ The new certificate (X509 PEM encoded) for the cluster\n\t\/\/ Example: X509 PEM certificate\n\tClusterCertificate string `json:\"cluster_certificate\" yaml:\"cluster_certificate\"`\n\n\t\/\/ The new certificate key (X509 PEM encoded) for the cluster\n\t\/\/ Example: X509 PEM certificate key\n\tClusterCertificateKey string `json:\"cluster_certificate_key\" yaml:\"cluster_certificate_key\"`\n}\n\n\/\/ ClusterMemberStatePost represents the fields required to evacuate a cluster member.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering_evacuation\ntype ClusterMemberStatePost struct {\n\t\/\/ The action to be performed. Valid actions are \"evacuate\" and \"restore\".\n\t\/\/ Example: evacuate\n\tAction string `json:\"action\" yaml:\"action\"`\n}\n\n\/\/ ClusterGroupsPost represents the fields available for a new cluster group.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering_groups\ntype ClusterGroupsPost struct {\n\tClusterGroupPut\n\n\t\/\/ The new name of the cluster group\n\t\/\/ Example: group1\n\tName string `json:\"name\" yaml:\"name\"`\n}\n\n\/\/ ClusterGroup represents a cluster group.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering_groups\ntype ClusterGroup struct {\n\tClusterGroupPut `yaml:\",inline\"`\n\tClusterGroupPost `yaml:\",inline\"`\n}\n\n\/\/ ClusterGroupPost represents the fields required to rename a cluster group.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering_groups\ntype ClusterGroupPost struct {\n\t\/\/ The new name of the cluster group\n\t\/\/ Example: group1\n\tName string `json:\"name\" yaml:\"name\"`\n}\n\n\/\/ ClusterGroupPut represents the modifiable fields of a cluster group.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering_groups\ntype ClusterGroupPut struct {\n\t\/\/ The description of the cluster group\n\t\/\/ Example: amd64 servers\n\tDescription string `json:\"description\" yaml:\"description\"`\n\n\t\/\/ List of members in this group\n\t\/\/ Example: [\"node1\", \"node3\"]\n\tMembers []string `json:\"members\" yaml:\"members\"`\n}\n\n\/\/ Writable converts a full ClusterGroup struct into a ClusterGroupPut struct (filters read-only fields)\nfunc (c *ClusterGroup) Writable() ClusterGroupPut {\n\treturn c.ClusterGroupPut\n}\n<commit_msg>shared\/api\/cluster: s\/Base64\/base64\/<commit_after>package api\n\nimport (\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n)\n\n\/\/ Cluster represents high-level information about a LXD cluster.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering\ntype Cluster struct {\n\t\/\/ Name of the cluster member answering the request\n\t\/\/ Example: lxd01\n\tServerName string `json:\"server_name\" yaml:\"server_name\"`\n\n\t\/\/ Whether clustering is enabled\n\t\/\/ Example: true\n\tEnabled bool `json:\"enabled\" yaml:\"enabled\"`\n\n\t\/\/ List of member configuration keys (used during join)\n\t\/\/ Example: []\n\t\/\/\n\t\/\/ API extension: clustering_join\n\tMemberConfig []ClusterMemberConfigKey `json:\"member_config\" yaml:\"member_config\"`\n}\n\n\/\/ ClusterMemberConfigKey represents a single config key that a new member of\n\/\/ the cluster is required to provide when joining.\n\/\/\n\/\/ The Value field is empty when getting clustering information with GET\n\/\/ \/1.0\/cluster, and should be filled by the joining node when performing a PUT\n\/\/ \/1.0\/cluster join request.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering_join\ntype ClusterMemberConfigKey struct {\n\t\/\/ The kind of configuration key (network, storage-pool, ...)\n\t\/\/ Example: storage-pool\n\tEntity string `json:\"entity\" yaml:\"entity\"`\n\n\t\/\/ The name of the object requiring this key\n\t\/\/ Example: local\n\tName string `json:\"name\" yaml:\"name\"`\n\n\t\/\/ The name of the key\n\t\/\/ Example: source\n\tKey string `json:\"key\" yaml:\"key\"`\n\n\t\/\/ The value on the answering cluster member\n\t\/\/ Example: \/dev\/sdb\n\tValue string `json:\"value\" yaml:\"value\"`\n\n\t\/\/ A human friendly description key\n\t\/\/ Example: \"source\" property for storage pool \"local\"\n\tDescription string `json:\"description\" yaml:\"description\"`\n}\n\n\/\/ ClusterPut represents the fields required to bootstrap or join a LXD\n\/\/ cluster.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering\ntype ClusterPut struct {\n\tCluster `yaml:\",inline\"`\n\n\t\/\/ The address of the cluster you wish to join\n\t\/\/ Example: 10.0.0.1:8443\n\tClusterAddress string `json:\"cluster_address\" yaml:\"cluster_address\"`\n\n\t\/\/ The expected certificate (X509 PEM encoded) for the cluster\n\t\/\/ Example: X509 PEM certificate\n\tClusterCertificate string `json:\"cluster_certificate\" yaml:\"cluster_certificate\"`\n\n\t\/\/ The local address to use for cluster communication\n\t\/\/ Example: 10.0.0.2:8443\n\t\/\/\n\t\/\/ API extension: clustering_join\n\tServerAddress string `json:\"server_address\" yaml:\"server_address\"`\n\n\t\/\/ The trust password of the cluster you're trying to join\n\t\/\/ Example: blah\n\t\/\/\n\t\/\/ API extension: clustering_join\n\tClusterPassword string `json:\"cluster_password\" yaml:\"cluster_password\"`\n}\n\n\/\/ ClusterMembersPost represents the fields required to request a join token to add a member to the cluster.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering_join_token\ntype ClusterMembersPost struct {\n\t\/\/ The name of the new cluster member\n\t\/\/ Example: lxd02\n\tServerName string `json:\"server_name\" yaml:\"server_name\"`\n}\n\n\/\/ ClusterMemberJoinToken represents the fields contained within an encoded cluster member join token.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering_join_token\ntype ClusterMemberJoinToken struct {\n\t\/\/ The name of the new cluster member\n\t\/\/ Example: lxd02\n\tServerName string `json:\"server_name\" yaml:\"server_name\"`\n\n\t\/\/ The fingerprint of the network certificate\n\t\/\/ Example: 57bb0ff4340b5bb28517e062023101adf788c37846dc8b619eb2c3cb4ef29436\n\tFingerprint string `json:\"fingerprint\" yaml:\"fingerprint\"`\n\n\t\/\/ The addresses of existing online cluster members\n\t\/\/ Example: [\"10.98.30.229:8443\"]\n\tAddresses []string `json:\"addresses\" yaml:\"addresses\"`\n\n\t\/\/ The random join secret.\n\t\/\/ Example: 2b2284d44db32675923fe0d2020477e0e9be11801ff70c435e032b97028c35cd\n\tSecret string `json:\"secret\" yaml:\"secret\"`\n}\n\n\/\/ String encodes the cluster member join token as JSON and then base64.\nfunc (t *ClusterMemberJoinToken) String() string {\n\tjoinTokenJSON, err := json.Marshal(t)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn base64.StdEncoding.EncodeToString(joinTokenJSON)\n}\n\n\/\/ ClusterMemberPost represents the fields required to rename a LXD node.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering\ntype ClusterMemberPost struct {\n\t\/\/ The new name of the cluster member\n\t\/\/ Example: lxd02\n\tServerName string `json:\"server_name\" yaml:\"server_name\"`\n}\n\n\/\/ ClusterMember represents the a LXD node in the cluster.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering\ntype ClusterMember struct {\n\tClusterMemberPut `yaml:\",inline\"`\n\n\t\/\/ Name of the cluster member\n\t\/\/ Example: lxd01\n\tServerName string `json:\"server_name\" yaml:\"server_name\"`\n\n\t\/\/ URL at which the cluster member can be reached\n\t\/\/ Example: https:\/\/10.0.0.1:8443\n\tURL string `json:\"url\" yaml:\"url\"`\n\n\t\/\/ Whether the cluster member is a database server\n\t\/\/ Example: true\n\tDatabase bool `json:\"database\" yaml:\"database\"`\n\n\t\/\/ Current status\n\t\/\/ Example: Online\n\tStatus string `json:\"status\" yaml:\"status\"`\n\n\t\/\/ Additional status information\n\t\/\/ Example: fully operational\n\tMessage string `json:\"message\" yaml:\"message\"`\n\n\t\/\/ The primary architecture of the cluster member\n\t\/\/ Example: x86_64\n\t\/\/\n\t\/\/ API extension: clustering_architecture\n\tArchitecture string `json:\"architecture\" yaml:\"architecture\"`\n}\n\n\/\/ Writable converts a full Profile struct into a ProfilePut struct (filters read-only fields)\nfunc (member *ClusterMember) Writable() ClusterMemberPut {\n\treturn member.ClusterMemberPut\n}\n\n\/\/ ClusterMemberPut represents the the modifiable fields of a LXD cluster member\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering_edit_roles\ntype ClusterMemberPut struct {\n\t\/\/ List of roles held by this cluster member\n\t\/\/ Example: [\"database\"]\n\t\/\/\n\t\/\/ API extension: clustering_roles\n\tRoles []string `json:\"roles\" yaml:\"roles\"`\n\n\t\/\/ Name of the failure domain for this cluster member\n\t\/\/ Example: rack1\n\t\/\/\n\t\/\/ API extension: clustering_failure_domains\n\tFailureDomain string `json:\"failure_domain\" yaml:\"failure_domain\"`\n\n\t\/\/ Cluster member description\n\t\/\/ Example: AMD Epyc 32c\/64t\n\t\/\/\n\t\/\/ API extension: clustering_description\n\tDescription string `json:\"description\" yaml:\"description\"`\n\n\t\/\/ Additional configuration information\n\t\/\/ Example: {\"scheduler.instance\": \"all\"}\n\t\/\/\n\t\/\/ API extension: clustering_config\n\tConfig map[string]string `json:\"config\" yaml:\"config\"`\n\n\t\/\/ List of cluster groups this member belongs to\n\t\/\/ Example: [\"group1\", \"group2\"]\n\t\/\/\n\t\/\/ API extension: clustering_groups\n\tGroups []string `json:\"groups\" yaml:\"groups\"`\n}\n\n\/\/ ClusterCertificatePut represents the certificate and key pair for all members in a LXD Cluster\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering_update_certs\ntype ClusterCertificatePut struct {\n\t\/\/ The new certificate (X509 PEM encoded) for the cluster\n\t\/\/ Example: X509 PEM certificate\n\tClusterCertificate string `json:\"cluster_certificate\" yaml:\"cluster_certificate\"`\n\n\t\/\/ The new certificate key (X509 PEM encoded) for the cluster\n\t\/\/ Example: X509 PEM certificate key\n\tClusterCertificateKey string `json:\"cluster_certificate_key\" yaml:\"cluster_certificate_key\"`\n}\n\n\/\/ ClusterMemberStatePost represents the fields required to evacuate a cluster member.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering_evacuation\ntype ClusterMemberStatePost struct {\n\t\/\/ The action to be performed. Valid actions are \"evacuate\" and \"restore\".\n\t\/\/ Example: evacuate\n\tAction string `json:\"action\" yaml:\"action\"`\n}\n\n\/\/ ClusterGroupsPost represents the fields available for a new cluster group.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering_groups\ntype ClusterGroupsPost struct {\n\tClusterGroupPut\n\n\t\/\/ The new name of the cluster group\n\t\/\/ Example: group1\n\tName string `json:\"name\" yaml:\"name\"`\n}\n\n\/\/ ClusterGroup represents a cluster group.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering_groups\ntype ClusterGroup struct {\n\tClusterGroupPut `yaml:\",inline\"`\n\tClusterGroupPost `yaml:\",inline\"`\n}\n\n\/\/ ClusterGroupPost represents the fields required to rename a cluster group.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering_groups\ntype ClusterGroupPost struct {\n\t\/\/ The new name of the cluster group\n\t\/\/ Example: group1\n\tName string `json:\"name\" yaml:\"name\"`\n}\n\n\/\/ ClusterGroupPut represents the modifiable fields of a cluster group.\n\/\/\n\/\/ swagger:model\n\/\/\n\/\/ API extension: clustering_groups\ntype ClusterGroupPut struct {\n\t\/\/ The description of the cluster group\n\t\/\/ Example: amd64 servers\n\tDescription string `json:\"description\" yaml:\"description\"`\n\n\t\/\/ List of members in this group\n\t\/\/ Example: [\"node1\", \"node3\"]\n\tMembers []string `json:\"members\" yaml:\"members\"`\n}\n\n\/\/ Writable converts a full ClusterGroup struct into a ClusterGroupPut struct (filters read-only fields)\nfunc (c *ClusterGroup) Writable() ClusterGroupPut {\n\treturn c.ClusterGroupPut\n}\n<|endoftext|>"} {"text":"<commit_before>package shared\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\/ec2iface\"\n\t\"github.com\/sneakybeaky\/aws-volumes\/shared\/iface\"\n\t\"github.com\/sneakybeaky\/aws-volumes\/shared\/log\"\n)\n\n\/\/ VolumeTagPrefix prefixes the name of a tag describing an allocated volume\nconst VolumeTagPrefix = \"volume_\"\n\n\/\/ DetachVolumesTag when set to a true value signals volumes can be detached\nconst DetachVolumesTag = \"detach_volumes\"\n\n\/\/ EC2Instance provides metadata about an EC2 instance.\ntype EC2Instance struct {\n\tsvc ec2iface.EC2API\n\tmetadata iface.Metadata\n}\n\nfunc NewEC2Instance(metadata iface.Metadata, svc ec2iface.EC2API) *EC2Instance {\n\n\treturn &EC2Instance{\n\t\tsvc: svc,\n\t\tmetadata: metadata,\n\t}\n\n}\n\nfunc (e EC2Instance) Tags() ([]*ec2.TagDescription, error) {\n\n\tinstanceid, err := e.metadata.InstanceID()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparams := &ec2.DescribeTagsInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t{ \/\/ Required\n\t\t\t\tName: aws.String(\"resource-id\"),\n\t\t\t\tValues: []*string{\n\t\t\t\t\taws.String(instanceid), \/\/ Required\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tMaxResults: aws.Int64(1000),\n\t}\n\tresp, err := e.svc.DescribeTags(params)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp.Tags, nil\n}\n\nfunc (e EC2Instance) AllocatedVolumes() ([]*AllocatedVolume, error) {\n\tvar allocated []*AllocatedVolume\n\n\ttags, err := e.Tags()\n\n\tif err != nil {\n\t\treturn allocated, err\n\t}\n\n\tfor _, tag := range tags {\n\t\tif strings.HasPrefix(*tag.Key, VolumeTagPrefix) {\n\n\t\t\tkey := *tag.Key\n\t\t\tdevice := key[len(VolumeTagPrefix):]\n\t\t\tallocated = append(allocated, NewAllocatedVolume(*tag.Value, device, *tag.ResourceId, e.svc))\n\t\t}\n\t}\n\n\treturn allocated, nil\n}\n\nfunc (e EC2Instance) DetachVolumes() error {\n\n\ttags, err := e.Tags()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, tag := range tags {\n\t\tif *tag.Key == DetachVolumesTag {\n\n\t\t\tdetachVolumes, _ := strconv.ParseBool(*tag.Value)\n\n\t\t\tif detachVolumes {\n\t\t\t\te.applyToVolumes(detachVolume)\n\t\t\t} else {\n\t\t\t\tlog.Debug.Printf(\"Tag '%s' value is '%s' - not detaching volumes\", DetachVolumesTag, *tag.Value)\n\t\t\t}\n\n\t\t\tbreak\n\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (e EC2Instance) AttachVolumes() {\n\te.applyToVolumes(attachVolume)\n}\n\nfunc (e EC2Instance) ShowVolumesInfo() {\n\te.applyToVolumes(showVolumeInfo)\n}\n\nvar attachVolume = func(volume *AllocatedVolume) {\n\n\tif err := volume.Attach(); err != nil {\n\t\tlog.Error.Printf(\"Unable to attach volume : %s\\n\", err)\n\t}\n}\n\nvar detachVolume = func(volume *AllocatedVolume) {\n\n\tif err := volume.Detach(); err != nil {\n\t\tlog.Error.Printf(\"Unable to detach volume : %s\\n\", err)\n\t}\n}\n\nvar showVolumeInfo = func(volume *AllocatedVolume) {\n\tbuf := new(bytes.Buffer)\n\n\tif err := volume.Info(buf); err != nil {\n\t\tlog.Error.Printf(\"Unable to get info for volume : %s\\n\", err)\n\t\treturn\n\t}\n\tos.Stdout.WriteString(buf.String())\n\n}\n\nfunc (e EC2Instance) applyToVolumes(action func(volume *AllocatedVolume)) {\n\tif volumes, err := e.AllocatedVolumes(); err != nil {\n\t\tlog.Error.Printf(\"Unable to find allocated volumes : %s\", err)\n\t} else {\n\n\t\tvar wg sync.WaitGroup\n\n\t\tfor _, volume := range volumes {\n\n\t\t\twg.Add(1)\n\t\t\tgo func(action func(volume *AllocatedVolume), volume *AllocatedVolume) {\n\n\t\t\t\tdefer wg.Done()\n\t\t\t\taction(volume)\n\n\t\t\t}(action, volume)\n\n\t\t}\n\n\t\twg.Wait()\n\t}\n\n}\n<commit_msg>Alerts when not detaching volumes because of tag logic. Fixes #2<commit_after>package shared\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\/ec2iface\"\n\t\"github.com\/sneakybeaky\/aws-volumes\/shared\/iface\"\n\t\"github.com\/sneakybeaky\/aws-volumes\/shared\/log\"\n)\n\n\/\/ VolumeTagPrefix prefixes the name of a tag describing an allocated volume\nconst VolumeTagPrefix = \"volume_\"\n\n\/\/ DetachVolumesTag when set to a true value signals volumes can be detached\nconst DetachVolumesTag = \"detach_volumes\"\n\n\/\/ EC2Instance provides metadata about an EC2 instance.\ntype EC2Instance struct {\n\tsvc ec2iface.EC2API\n\tmetadata iface.Metadata\n}\n\nfunc NewEC2Instance(metadata iface.Metadata, svc ec2iface.EC2API) *EC2Instance {\n\n\treturn &EC2Instance{\n\t\tsvc: svc,\n\t\tmetadata: metadata,\n\t}\n\n}\n\nfunc (e EC2Instance) Tags() ([]*ec2.TagDescription, error) {\n\n\tinstanceid, err := e.metadata.InstanceID()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparams := &ec2.DescribeTagsInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t{ \/\/ Required\n\t\t\t\tName: aws.String(\"resource-id\"),\n\t\t\t\tValues: []*string{\n\t\t\t\t\taws.String(instanceid), \/\/ Required\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tMaxResults: aws.Int64(1000),\n\t}\n\tresp, err := e.svc.DescribeTags(params)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resp.Tags, nil\n}\n\nfunc (e EC2Instance) AllocatedVolumes() ([]*AllocatedVolume, error) {\n\tvar allocated []*AllocatedVolume\n\n\ttags, err := e.Tags()\n\n\tif err != nil {\n\t\treturn allocated, err\n\t}\n\n\tfor _, tag := range tags {\n\t\tif strings.HasPrefix(*tag.Key, VolumeTagPrefix) {\n\n\t\t\tkey := *tag.Key\n\t\t\tdevice := key[len(VolumeTagPrefix):]\n\t\t\tallocated = append(allocated, NewAllocatedVolume(*tag.Value, device, *tag.ResourceId, e.svc))\n\t\t}\n\t}\n\n\treturn allocated, nil\n}\n\nfunc (e EC2Instance) DetachVolumes() error {\n\n\ttags, err := e.Tags()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, tag := range tags {\n\t\tif *tag.Key == DetachVolumesTag {\n\n\t\t\tdetachVolumes, _ := strconv.ParseBool(*tag.Value)\n\n\t\t\tif detachVolumes {\n\t\t\t\te.applyToVolumes(detachVolume)\n\t\t\t} else {\n\t\t\t\tlog.Info.Printf(\"Tag '%s' value is '%s' - not detaching volumes\", DetachVolumesTag, *tag.Value)\n\t\t\t}\n\n\t\t\tbreak\n\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (e EC2Instance) AttachVolumes() {\n\te.applyToVolumes(attachVolume)\n}\n\nfunc (e EC2Instance) ShowVolumesInfo() {\n\te.applyToVolumes(showVolumeInfo)\n}\n\nvar attachVolume = func(volume *AllocatedVolume) {\n\n\tif err := volume.Attach(); err != nil {\n\t\tlog.Error.Printf(\"Unable to attach volume : %s\\n\", err)\n\t}\n}\n\nvar detachVolume = func(volume *AllocatedVolume) {\n\n\tif err := volume.Detach(); err != nil {\n\t\tlog.Error.Printf(\"Unable to detach volume : %s\\n\", err)\n\t}\n}\n\nvar showVolumeInfo = func(volume *AllocatedVolume) {\n\tbuf := new(bytes.Buffer)\n\n\tif err := volume.Info(buf); err != nil {\n\t\tlog.Error.Printf(\"Unable to get info for volume : %s\\n\", err)\n\t\treturn\n\t}\n\tos.Stdout.WriteString(buf.String())\n\n}\n\nfunc (e EC2Instance) applyToVolumes(action func(volume *AllocatedVolume)) {\n\tif volumes, err := e.AllocatedVolumes(); err != nil {\n\t\tlog.Error.Printf(\"Unable to find allocated volumes : %s\", err)\n\t} else {\n\n\t\tvar wg sync.WaitGroup\n\n\t\tfor _, volume := range volumes {\n\n\t\t\twg.Add(1)\n\t\t\tgo func(action func(volume *AllocatedVolume), volume *AllocatedVolume) {\n\n\t\t\t\tdefer wg.Done()\n\t\t\t\taction(volume)\n\n\t\t\t}(action, volume)\n\n\t\t}\n\n\t\twg.Wait()\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package logpeck\n\nimport (\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n)\n\nfunc NewAddTaskHandler(pecker *Pecker, db *DB) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"AddTaskHandler\")\n\t\tr_str, _ := httputil.DumpRequest(r, false)\n\t\tlog.Printf(\"Request len[%d], body[%s]\", len(r_str), r_str)\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(\"AddTaskHandler Success\\n\"))\n\t}\n}\n\nfunc NewUpdateTaskHandler(pecker *Pecker, db *DB) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"UpdateTaskHandler\")\n\t\tr_str, _ := httputil.DumpRequest(r, false)\n\t\tlog.Printf(\"Request len[%d], body[%s]\", len(r_str), r_str)\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(\"UpdateTaskHandler Success\\n\"))\n\t}\n}\n\nfunc NewPauseTaskHandler(pecker *Pecker, db *DB) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"PauseTaskHandler\")\n\t\tr_str, _ := httputil.DumpRequest(r, false)\n\t\tlog.Printf(\"Request len[%d], body[%s]\", len(r_str), r_str)\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(\"PauseTaskHandler Success\\n\"))\n\t}\n}\n\nfunc NewRemoveTaskHandler(pecker *Pecker, db *DB) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Printf(\"RemoveTaskHandler\")\n\t\tr_str, _ := httputil.DumpRequest(r, false)\n\t\tlog.Printf(\"Request len[%d], body[%s]\", len(r_str), r_str)\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(\"RemoveTaskHandler Success\\n\"))\n\t}\n}\n<commit_msg>update http handler<commit_after>package logpeck\n\nimport (\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n)\n\nfunc logRequest(r *http.Request, prefix string) {\n\tr_str, _ := httputil.DumpRequest(r, true)\n\tlog.Printf(\"[%s] req_len[%d] req[%s]\", prefix, len(r_str), r_str)\n}\n\nfunc NewAddTaskHandler(pecker *Pecker, db *DB) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlogRequest(r, \"AddTaskHandler\")\n\t\tdefer r.Body.Close()\n\n\t\tvar config PeckTaskConfig\n\t\traw, _ := ioutil.ReadAll(r.Body)\n\t\terr := json.Unmarshal(raw, &config)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Parse PeckTaskConfig error, %s\", err)\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tw.Write([]byte(\"Bad Request\\n\"))\n\t\t\treturn\n\t\t}\n\n\t\terr = pecker.AddPeckTask(&config)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"AddTaskConfig error, %s\", err)\n\t\t\tw.WriteHeader(http.StatusNotAcceptable)\n\t\t\tw.Write([]byte(\"Add failed, \" + err.Error() + \"\\n\"))\n\t\t\treturn\n\t\t}\n\n\t\terr = db.SaveConfig(&config)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"AddTaskConfig error, save config error, %s\", err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tw.Write([]byte(err.Error() + \"\\n\"))\n\t\t\treturn\n\t\t}\n\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(\"OK\\n\"))\n\t\treturn\n\t}\n}\n\nfunc NewUpdateTaskHandler(pecker *Pecker, db *DB) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlogRequest(r, \"UpdateTaskHandler\")\n\t\tdefer r.Body.Close()\n\n\t\tvar config PeckTaskConfig\n\t\traw, _ := ioutil.ReadAll(r.Body)\n\t\terr := json.Unmarshal(raw, &config)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Parse PeckTaskConfig error, %s\", err)\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tw.Write([]byte(\"Bad Request\\n\"))\n\t\t\treturn\n\t\t}\n\n\t\terr = pecker.UpdatePeckTask(&config)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusNotAcceptable)\n\t\t\tw.Write([]byte(\"Update failed, \" + err.Error() + \"\\n\"))\n\t\t\treturn\n\t\t}\n\n\t\terr = db.SaveConfig(&config)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"UpdateTaskConfig error, save config error, %s\", err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tw.Write([]byte(err.Error() + \"\\n\"))\n\t\t\treturn\n\t\t}\n\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(\"UpdateTaskHandler Success\\n\"))\n\t}\n}\n\nfunc NewStartTaskHandler(pecker *Pecker, db *DB) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlogRequest(r, \"StartTaskHandler\")\n\t\tdefer r.Body.Close()\n\n\t\tvar config PeckTaskConfig\n\t\traw, _ := ioutil.ReadAll(r.Body)\n\t\terr := json.Unmarshal(raw, &config)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Start PeckTaskConfig error, %s\", err)\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tw.Write([]byte(\"Bad Request\\n\"))\n\t\t\treturn\n\t\t}\n\n\t\tconfig_w, c_err := db.GetConfig(&config)\n\t\tif c_err != nil {\n\t\t\tw.WriteHeader(http.StatusNotAcceptable)\n\t\t\tw.Write([]byte(\"Start failed, \" + c_err.Error() + \"\\n\"))\n\t\t\treturn\n\t\t}\n\n\t\terr = pecker.StartPeckTask(config_w)\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusNotAcceptable)\n\t\t\tw.Write([]byte(\"Update failed, \" + err.Error() + \"\\n\"))\n\t\t\treturn\n\t\t}\n\n\t\terr = db.SaveConfig(&config)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"UpdateTaskConfig error, save config error, %s\", err)\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tw.Write([]byte(err.Error() + \"\\n\"))\n\t\t\treturn\n\t\t}\n\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(\"PauseTaskHandler Success\\n\"))\n\t}\n}\n\nfunc NewPauseTaskHandler(pecker *Pecker, db *DB) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlogRequest(r, \"PauseTaskHandler\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(\"PauseTaskHandler Success\\n\"))\n\t}\n}\n\nfunc NewRemoveTaskHandler(pecker *Pecker, db *DB) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlogRequest(r, \"RemoveTaskHandler\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Write([]byte(\"RemoveTaskHandler Success\\n\"))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package itchio\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/mitchellh\/mapstructure\"\n\t\"github.com\/pkg\/errors\"\n)\n\nfunc getDebugLevel() int64 {\n\tval, err := strconv.ParseInt(os.Getenv(\"GO_ITCHIO_DEBUG\"), 10, 64)\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn val\n}\n\nvar (\n\tdebugLevel = getDebugLevel()\n\tlogRequests = debugLevel >= 1\n\tdumpAPICalls = debugLevel >= 2\n)\n\n\/\/ Get performs an HTTP GET request to the API\nfunc (c *Client) Get(ctx context.Context, url string) (*http.Response, error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq = req.WithContext(ctx)\n\treturn c.Do(req)\n}\n\n\/\/ GetResponse performs an HTTP GET request and parses the API response.\nfunc (c *Client) GetResponse(ctx context.Context, url string, dst interface{}) error {\n\tresp, err := c.Get(ctx, url)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\terr = ParseAPIResponse(dst, resp)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\treturn nil\n}\n\n\/\/ PostForm performs an HTTP POST request to the API, with url-encoded parameters\nfunc (c *Client) PostForm(ctx context.Context, url string, data url.Values) (*http.Response, error) {\n\treq, err := http.NewRequest(\"POST\", url, strings.NewReader(data.Encode()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq = req.WithContext(ctx)\n\treq.Header.Set(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\treturn c.Do(req)\n}\n\n\/\/ PostFormResponse performs an HTTP POST request to the API *and* parses the API response.\nfunc (c *Client) PostFormResponse(ctx context.Context, url string, data url.Values, dst interface{}) error {\n\tresp, err := c.PostForm(ctx, url, data)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\terr = ParseAPIResponse(dst, resp)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\treturn nil\n}\n\n\/\/ Do performs a request (any method). It takes care of JWT or API key\n\/\/ authentication, sets the proper user agent, has built-in retry,\nfunc (c *Client) Do(req *http.Request) (*http.Response, error) {\n\tc.Limiter.Wait(req.Context())\n\treq.Header.Add(\"Authorization\", c.Key)\n\treq.Header.Set(\"User-Agent\", c.UserAgent)\n\treq.Header.Set(\"Accept-Language\", c.AcceptedLanguage)\n\treq.Header.Set(\"Accept\", \"application\/vnd.itch.v2\")\n\n\tvar res *http.Response\n\tvar err error\n\n\tif logRequests {\n\t\tfmt.Fprintf(os.Stderr, \"%s %s [request]\\n\", req.Method, req.URL)\n\t}\n\n\tif dumpAPICalls {\n\t\tfor k, vv := range req.Header {\n\t\t\tfor _, v := range vv {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"[request] %s: %s\\n\", k, v)\n\t\t\t}\n\t\t}\n\t}\n\n\tretryPatterns := append(c.RetryPatterns, time.Millisecond)\n\n\tfor _, sleepTime := range retryPatterns {\n\t\tif c.onOutgoingRequest != nil {\n\t\t\tc.onOutgoingRequest(req)\n\t\t}\n\t\tres, err = c.HTTPClient.Do(req)\n\t\tif err != nil {\n\t\t\tif strings.Contains(err.Error(), \"TLS handshake timeout\") {\n\t\t\t\ttime.Sleep(sleepTime + time.Duration(rand.Int()%1000)*time.Millisecond)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif res.StatusCode == 503 {\n\t\t\tres.Body.Close()\n\n\t\t\t\/\/ Rate limited, try again according to patterns.\n\t\t\t\/\/ following https:\/\/cloud.google.com\/storage\/docs\/json_api\/v1\/how-tos\/upload#exp-backoff to the letter\n\t\t\tactualSleepTime := sleepTime + time.Duration(rand.Int()%1000)*time.Millisecond\n\t\t\tif c.onRateLimited != nil {\n\t\t\t\tc.onRateLimited(req, res)\n\t\t\t}\n\t\t\tif logRequests {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%s %s [rate limited, sleeping %v]\\n\", req.Method, req.URL, actualSleepTime)\n\t\t\t}\n\t\t\ttime.Sleep(actualSleepTime)\n\t\t\tcontinue\n\t\t}\n\n\t\tbreak\n\t}\n\n\treturn res, err\n}\n\n\/\/ MakePath crafts an API url from our configured base URL\nfunc (c *Client) MakePath(format string, a ...interface{}) string {\n\treturn c.MakeValuesPath(nil, format, a...)\n}\n\n\/\/ MakeValuesPath crafts an API url from our configured base URL\nfunc (c *Client) MakeValuesPath(values url.Values, format string, a ...interface{}) string {\n\tbase := strings.Trim(c.BaseURL, \"\/\")\n\tsubPath := strings.Trim(fmt.Sprintf(format, a...), \"\/\")\n\tpath := fmt.Sprintf(\"%s\/%s\", base, subPath)\n\tif len(values) == 0 {\n\t\treturn path\n\t}\n\treturn fmt.Sprintf(\"%s?%s\", path, values.Encode())\n}\n\nfunc asHTTPCodeError(res *http.Response) error {\n\tif res.StatusCode\/100 != 2 {\n\t\terr := fmt.Errorf(\"Server error: HTTP %s for %s\", res.Status, res.Request.URL.Path)\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ ParseAPIResponse unmarshals an HTTP response into one of out response\n\/\/ data structures\nfunc ParseAPIResponse(dst interface{}, res *http.Response) error {\n\tif res == nil || res.Body == nil {\n\t\treturn fmt.Errorf(\"No response from server\")\n\t}\n\n\tbodyReader := res.Body\n\tdefer bodyReader.Close()\n\n\tbody, err := ioutil.ReadAll(bodyReader)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tif dumpAPICalls {\n\t\tfmt.Fprintf(os.Stderr, \"[response] %s\\n\", string(body))\n\t}\n\n\tintermediate := make(map[string]interface{})\n\n\terr = json.NewDecoder(bytes.NewReader(body)).Decode(&intermediate)\n\tif err != nil {\n\t\tif he := asHTTPCodeError(res); he != nil {\n\t\t\treturn he\n\t\t}\n\n\t\tmsg := fmt.Sprintf(\"JSON decode error: %s\\n\\nBody: %s\\n\\n\", err.Error(), string(body))\n\t\treturn errors.New(msg)\n\t}\n\n\tif errorsField, ok := intermediate[\"errors\"]; ok {\n\t\tif errorsList, ok := errorsField.([]interface{}); ok {\n\t\t\tvar messages []string\n\t\t\tfor _, el := range errorsList {\n\t\t\t\tif errorMessage, ok := el.(string); ok {\n\t\t\t\t\tmessages = append(messages, errorMessage)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(messages) > 0 {\n\t\t\t\treturn &APIError{Messages: messages, StatusCode: res.StatusCode, Path: res.Request.URL.Path}\n\t\t\t}\n\t\t}\n\t}\n\n\tif he := asHTTPCodeError(res); he != nil {\n\t\treturn he\n\t}\n\n\tintermediate = camelifyMap(intermediate)\n\n\tif dumpAPICalls {\n\t\tenc := json.NewEncoder(os.Stderr)\n\t\tenc.SetIndent(\"[intermediate] \", \" \")\n\t\tenc.Encode(intermediate)\n\t}\n\n\tdecoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{\n\t\tTagName: \"json\",\n\t\tResult: dst,\n\t\t\/\/ see https:\/\/github.com\/itchio\/itch\/issues\/1549\n\t\tWeaklyTypedInput: true,\n\t\tDecodeHook: mapstructure.ComposeDecodeHookFunc(\n\t\t\tmapstructure.StringToTimeHookFunc(time.RFC3339Nano),\n\t\t\tGameHookFunc,\n\t\t\tUploadHookFunc,\n\t\t),\n\t})\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\terr = decoder.Decode(intermediate)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"mapstructure decode error: %s\\n\\nBody: %#v\\n\\n\", err.Error(), intermediate)\n\t\treturn errors.New(msg)\n\t}\n\n\tif res.StatusCode != 200 {\n\t\treturn errors.Errorf(\"HTTP %v\", res.StatusCode)\n\t}\n\n\treturn nil\n}\n\n\/\/ FindBuildFile looks for an uploaded file of the right type\n\/\/ in a list of file. Returns nil if it can't find one.\nfunc FindBuildFile(fileType BuildFileType, files []*BuildFile) *BuildFile {\n\tfor _, f := range files {\n\t\tif f.Type == fileType && f.State == BuildFileStateUploaded {\n\t\t\treturn f\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ FindBuildFileEx looks for an uploaded file of the right type\n\/\/ and subtype in a list of file. Returns nil if it can't find one.\nfunc FindBuildFileEx(fileType BuildFileType, fileSubType BuildFileSubType, files []*BuildFile) *BuildFile {\n\tfor _, f := range files {\n\t\tif f.Type == fileType && f.SubType == fileSubType && f.State == BuildFileStateUploaded {\n\t\t\treturn f\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>Call limiter *inside* retry loop<commit_after>package itchio\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/mitchellh\/mapstructure\"\n\t\"github.com\/pkg\/errors\"\n)\n\nfunc getDebugLevel() int64 {\n\tval, err := strconv.ParseInt(os.Getenv(\"GO_ITCHIO_DEBUG\"), 10, 64)\n\tif err != nil {\n\t\treturn 0\n\t}\n\treturn val\n}\n\nvar (\n\tdebugLevel = getDebugLevel()\n\tlogRequests = debugLevel >= 1\n\tdumpAPICalls = debugLevel >= 2\n)\n\n\/\/ Get performs an HTTP GET request to the API\nfunc (c *Client) Get(ctx context.Context, url string) (*http.Response, error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq = req.WithContext(ctx)\n\treturn c.Do(req)\n}\n\n\/\/ GetResponse performs an HTTP GET request and parses the API response.\nfunc (c *Client) GetResponse(ctx context.Context, url string, dst interface{}) error {\n\tresp, err := c.Get(ctx, url)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\terr = ParseAPIResponse(dst, resp)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\treturn nil\n}\n\n\/\/ PostForm performs an HTTP POST request to the API, with url-encoded parameters\nfunc (c *Client) PostForm(ctx context.Context, url string, data url.Values) (*http.Response, error) {\n\treq, err := http.NewRequest(\"POST\", url, strings.NewReader(data.Encode()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq = req.WithContext(ctx)\n\treq.Header.Set(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\treturn c.Do(req)\n}\n\n\/\/ PostFormResponse performs an HTTP POST request to the API *and* parses the API response.\nfunc (c *Client) PostFormResponse(ctx context.Context, url string, data url.Values, dst interface{}) error {\n\tresp, err := c.PostForm(ctx, url, data)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\terr = ParseAPIResponse(dst, resp)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\treturn nil\n}\n\n\/\/ Do performs a request (any method). It takes care of JWT or API key\n\/\/ authentication, sets the proper user agent, has built-in retry,\nfunc (c *Client) Do(req *http.Request) (*http.Response, error) {\n\treq.Header.Add(\"Authorization\", c.Key)\n\treq.Header.Set(\"User-Agent\", c.UserAgent)\n\treq.Header.Set(\"Accept-Language\", c.AcceptedLanguage)\n\treq.Header.Set(\"Accept\", \"application\/vnd.itch.v2\")\n\n\tvar res *http.Response\n\tvar err error\n\n\tif logRequests {\n\t\tfmt.Fprintf(os.Stderr, \"%s %s [request]\\n\", req.Method, req.URL)\n\t}\n\n\tif dumpAPICalls {\n\t\tfor k, vv := range req.Header {\n\t\t\tfor _, v := range vv {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"[request] %s: %s\\n\", k, v)\n\t\t\t}\n\t\t}\n\t}\n\n\tretryPatterns := append(c.RetryPatterns, time.Millisecond)\n\n\tfor _, sleepTime := range retryPatterns {\n\t\tc.Limiter.Wait(req.Context())\n\t\tif c.onOutgoingRequest != nil {\n\t\t\tc.onOutgoingRequest(req)\n\t\t}\n\t\tres, err = c.HTTPClient.Do(req)\n\t\tif err != nil {\n\t\t\tif strings.Contains(err.Error(), \"TLS handshake timeout\") {\n\t\t\t\ttime.Sleep(sleepTime + time.Duration(rand.Int()%1000)*time.Millisecond)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif res.StatusCode == 503 {\n\t\t\tres.Body.Close()\n\n\t\t\t\/\/ Rate limited, try again according to patterns.\n\t\t\t\/\/ following https:\/\/cloud.google.com\/storage\/docs\/json_api\/v1\/how-tos\/upload#exp-backoff to the letter\n\t\t\tactualSleepTime := sleepTime + time.Duration(rand.Int()%1000)*time.Millisecond\n\t\t\tif c.onRateLimited != nil {\n\t\t\t\tc.onRateLimited(req, res)\n\t\t\t}\n\t\t\tif logRequests {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"%s %s [rate limited, sleeping %v]\\n\", req.Method, req.URL, actualSleepTime)\n\t\t\t}\n\t\t\ttime.Sleep(actualSleepTime)\n\t\t\tcontinue\n\t\t}\n\n\t\tbreak\n\t}\n\n\treturn res, err\n}\n\n\/\/ MakePath crafts an API url from our configured base URL\nfunc (c *Client) MakePath(format string, a ...interface{}) string {\n\treturn c.MakeValuesPath(nil, format, a...)\n}\n\n\/\/ MakeValuesPath crafts an API url from our configured base URL\nfunc (c *Client) MakeValuesPath(values url.Values, format string, a ...interface{}) string {\n\tbase := strings.Trim(c.BaseURL, \"\/\")\n\tsubPath := strings.Trim(fmt.Sprintf(format, a...), \"\/\")\n\tpath := fmt.Sprintf(\"%s\/%s\", base, subPath)\n\tif len(values) == 0 {\n\t\treturn path\n\t}\n\treturn fmt.Sprintf(\"%s?%s\", path, values.Encode())\n}\n\nfunc asHTTPCodeError(res *http.Response) error {\n\tif res.StatusCode\/100 != 2 {\n\t\terr := fmt.Errorf(\"Server error: HTTP %s for %s\", res.Status, res.Request.URL.Path)\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ ParseAPIResponse unmarshals an HTTP response into one of out response\n\/\/ data structures\nfunc ParseAPIResponse(dst interface{}, res *http.Response) error {\n\tif res == nil || res.Body == nil {\n\t\treturn fmt.Errorf(\"No response from server\")\n\t}\n\n\tbodyReader := res.Body\n\tdefer bodyReader.Close()\n\n\tbody, err := ioutil.ReadAll(bodyReader)\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\tif dumpAPICalls {\n\t\tfmt.Fprintf(os.Stderr, \"[response] %s\\n\", string(body))\n\t}\n\n\tintermediate := make(map[string]interface{})\n\n\terr = json.NewDecoder(bytes.NewReader(body)).Decode(&intermediate)\n\tif err != nil {\n\t\tif he := asHTTPCodeError(res); he != nil {\n\t\t\treturn he\n\t\t}\n\n\t\tmsg := fmt.Sprintf(\"JSON decode error: %s\\n\\nBody: %s\\n\\n\", err.Error(), string(body))\n\t\treturn errors.New(msg)\n\t}\n\n\tif errorsField, ok := intermediate[\"errors\"]; ok {\n\t\tif errorsList, ok := errorsField.([]interface{}); ok {\n\t\t\tvar messages []string\n\t\t\tfor _, el := range errorsList {\n\t\t\t\tif errorMessage, ok := el.(string); ok {\n\t\t\t\t\tmessages = append(messages, errorMessage)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(messages) > 0 {\n\t\t\t\treturn &APIError{Messages: messages, StatusCode: res.StatusCode, Path: res.Request.URL.Path}\n\t\t\t}\n\t\t}\n\t}\n\n\tif he := asHTTPCodeError(res); he != nil {\n\t\treturn he\n\t}\n\n\tintermediate = camelifyMap(intermediate)\n\n\tif dumpAPICalls {\n\t\tenc := json.NewEncoder(os.Stderr)\n\t\tenc.SetIndent(\"[intermediate] \", \" \")\n\t\tenc.Encode(intermediate)\n\t}\n\n\tdecoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{\n\t\tTagName: \"json\",\n\t\tResult: dst,\n\t\t\/\/ see https:\/\/github.com\/itchio\/itch\/issues\/1549\n\t\tWeaklyTypedInput: true,\n\t\tDecodeHook: mapstructure.ComposeDecodeHookFunc(\n\t\t\tmapstructure.StringToTimeHookFunc(time.RFC3339Nano),\n\t\t\tGameHookFunc,\n\t\t\tUploadHookFunc,\n\t\t),\n\t})\n\tif err != nil {\n\t\treturn errors.WithStack(err)\n\t}\n\n\terr = decoder.Decode(intermediate)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"mapstructure decode error: %s\\n\\nBody: %#v\\n\\n\", err.Error(), intermediate)\n\t\treturn errors.New(msg)\n\t}\n\n\tif res.StatusCode != 200 {\n\t\treturn errors.Errorf(\"HTTP %v\", res.StatusCode)\n\t}\n\n\treturn nil\n}\n\n\/\/ FindBuildFile looks for an uploaded file of the right type\n\/\/ in a list of file. Returns nil if it can't find one.\nfunc FindBuildFile(fileType BuildFileType, files []*BuildFile) *BuildFile {\n\tfor _, f := range files {\n\t\tif f.Type == fileType && f.State == BuildFileStateUploaded {\n\t\t\treturn f\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ FindBuildFileEx looks for an uploaded file of the right type\n\/\/ and subtype in a list of file. Returns nil if it can't find one.\nfunc FindBuildFileEx(fileType BuildFileType, fileSubType BuildFileSubType, files []*BuildFile) *BuildFile {\n\tfor _, f := range files {\n\t\tif f.Type == fileType && f.SubType == fileSubType && f.State == BuildFileStateUploaded {\n\t\t\treturn f\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nvar shellGodownloader = `#!\/bin\/sh\nset -e\n# Code generated by godownloader. DO NOT EDIT.\n#\n\nusage() {\n this=$1\n cat <<EOF\n$this: download go binaries for {{ $.Release.GitHub.Owner }}\/{{ $.Release.GitHub.Name }}\n\nUsage: $this [version]\n where [version] is 'latest' or a version number from\n https:\/\/github.com\/{{ $.Release.GitHub.Owner }}\/{{ $.Release.GitHub.Name }}\/releases\n\nGenerated by godownloader\n https:\/\/github.com\/goreleaser\/godownloader\n\nEOF\n}\n` + shellfn + `\nis_supported() {\n platform=$1\n found=0\n case \"$platform\" in \n\t {{- range $goos := $.Build.Goos }}{{ range $goarch := $.Build.Goarch }}\n\t {{ $goos }}\/{{ $goarch }}) found=1 ;;\n\t {{- end }}{{ end }}\n esac\n {{- if $.Build.Ignore }}\n case \"$platform\" in \n {{- range $ignore := $.Build.Ignore }}\n {{ $ignore.Goos }}\/{{ $ignore.Goarch }}) found=0 ;; \n {{- end }}\n esac\n {{- end }}\n return $found\n}\n\nOWNER={{ $.Release.GitHub.Owner }}\nREPO={{ $.Release.GitHub.Name }}\nBINARY={{ .Build.Binary }}\nFORMAT={{ .Archive.Format }}\nBINDIR=${BINDIR:-.\/bin}\n\nuname_os_check\nuname_arch_check\n\nVERSION=$1\ncase \"${VERSION}\" in\n latest)\n VERSION=\"\"\n ;;\n -h|-?|*help*)\n usage \"$0\"\n exit 1\n ;;\nesac\n\n# check to see if request version is supported\n# based on goreleaser.yml file\n\nPREFIX=\"$OWNER\/$REPO\"\n\nif [ -z \"${VERSION}\" ]; then\n echo \"$PREFIX: checking GitHub for latest version\"\n VERSION=$(github_last_release \"$OWNER\/$REPO\")\nfi\n# if version starts with 'v', remove it\nVERSION=${VERSION#v}\n\nOS=$(uname_os)\nARCH=$(uname_arch)\n\n# change format (tar.gz or zip) based on ARCH\n{{- with .Archive.FormatOverrides }}\ncase ${ARCH} in\n{{- range . }}\n{{ .Goos }}) FORMAT={{ .Format }} ;;\nesac\n{{- end }}\n{{- end }}\n\n# adjust archive name based on OS\n{{- with .Archive.Replacements }}\ncase ${OS} in\n{{- range $k, $v := . }}\n{{ $k }}) OS={{ $v }} ;;\n{{- end }}\nesac\n\n# adjust archive name based on ARCH\ncase ${ARCH} in\n{{- range $k, $v := . }}\n{{ $k }}) ARCH={{ $v }} ;;\n{{- end }}\nesac\n{{- end }}\n\necho \"$PREFIX: found version ${VERSION} for ${OS}\/${ARCH}\"\n\n{{ .Archive.NameTemplate }}\nTARBALL=${NAME}.${FORMAT}\nTARBALL_URL=https:\/\/github.com\/${OWNER}\/${REPO}\/releases\/download\/v${VERSION}\/${TARBALL}\nCHECKSUM=${REPO}_checksums.txt\nCHECKSUM_URL=https:\/\/github.com\/${OWNER}\/${REPO}\/releases\/download\/v${VERSION}\/${CHECKSUM}\n\n# this function wraps all the destructive operations\n# if a curl|bash cuts off the end of the script due to\n# network, either nothing will happen or will syntax error\n# out preventing half-done work\nexecute() {\n TMPDIR=$(mktmpdir)\n echo \"$PREFIX: downloading ${TARBALL_URL}\"\n http_download \"${TMPDIR}\/${TARBALL}\" \"${TARBALL_URL}\"\n\n echo \"$PREFIX: verifying checksums\"\n http_download \"${TMPDIR}\/${CHECKSUM}\" \"${CHECKSUM_URL}\"\n hash_sha256_verify \"${TMPDIR}\/${TARBALL}\" \"${TMPDIR}\/${CHECKSUM}\"\n\n (cd \"${TMPDIR}\" && untar \"${TARBALL}\")\n install -d \"${BINDIR}\"\n install \"${TMPDIR}\/${BINARY}\" \"${BINDIR}\/\"\n echo \"$PREFIX: installed as ${BINDIR}\/${BINARY}\"\n}\n\nexecute\n`\n<commit_msg>Issue #20 - warn if platform is not supported<commit_after>package main\n\nvar shellGodownloader = `#!\/bin\/sh\nset -e\n# Code generated by godownloader. DO NOT EDIT.\n#\n\nusage() {\n this=$1\n cat <<EOF\n$this: download go binaries for {{ $.Release.GitHub.Owner }}\/{{ $.Release.GitHub.Name }}\n\nUsage: $this [version]\n where [version] is 'latest' or a version number from\n https:\/\/github.com\/{{ $.Release.GitHub.Owner }}\/{{ $.Release.GitHub.Name }}\/releases\n\nGenerated by godownloader\n https:\/\/github.com\/goreleaser\/godownloader\n\nEOF\n}\n` + shellfn + `\nis_supported_platform() {\n platform=$1\n found=1\n case \"$platform\" in \n {{- range $goos := $.Build.Goos }}{{ range $goarch := $.Build.Goarch }}\n {{ $goos }}\/{{ $goarch }}) found=0 ;;\n {{- end }}{{ end }}\n esac\n {{- if $.Build.Ignore }}\n case \"$platform\" in \n {{- range $ignore := $.Build.Ignore }}\n {{ $ignore.Goos }}\/{{ $ignore.Goarch }}) found=1 ;; \n {{- end }}\n esac\n {{- end }}\n return $found\n}\n\nOWNER={{ $.Release.GitHub.Owner }}\nREPO={{ $.Release.GitHub.Name }}\nBINARY={{ .Build.Binary }}\nFORMAT={{ .Archive.Format }}\nBINDIR=${BINDIR:-.\/bin}\n\nVERSION=$1\ncase \"${VERSION}\" in\n latest)\n VERSION=\"\"\n ;;\n -h|-?|*help*)\n usage \"$0\"\n exit 1\n ;;\nesac\n\nuname_os_check\nuname_arch_check\n\nOS=$(uname_os)\nARCH=$(uname_arch)\nPREFIX=\"$OWNER\/$REPO\"\nPLATFORM=\"${OS}\/${ARCH}\"\nif is_supported_platform \"$PLATFORM\"; then\n # optional logging goes here\n true\nelse\n echo \"${PREFIX}: platform $PLATFORM is not supported. Make sure this script is up-to-date and file request at https:\/\/github.com\/${PREFIX}\/issues\/new\"\n exit 1\nfi\n\nif [ -z \"${VERSION}\" ]; then\n echo \"$PREFIX: checking GitHub for latest version\"\n VERSION=$(github_last_release \"$OWNER\/$REPO\")\nfi\n# if version starts with 'v', remove it\nVERSION=${VERSION#v}\n\n\n# change format (tar.gz or zip) based on ARCH\n{{- with .Archive.FormatOverrides }}\ncase ${ARCH} in\n{{- range . }}\n{{ .Goos }}) FORMAT={{ .Format }} ;;\nesac\n{{- end }}\n{{- end }}\n\n# adjust archive name based on OS\n{{- with .Archive.Replacements }}\ncase ${OS} in\n{{- range $k, $v := . }}\n{{ $k }}) OS={{ $v }} ;;\n{{- end }}\nesac\n\n# adjust archive name based on ARCH\ncase ${ARCH} in\n{{- range $k, $v := . }}\n{{ $k }}) ARCH={{ $v }} ;;\n{{- end }}\nesac\n{{- end }}\n\necho \"$PREFIX: found version ${VERSION} for ${OS}\/${ARCH}\"\n\n{{ .Archive.NameTemplate }}\nTARBALL=${NAME}.${FORMAT}\nTARBALL_URL=https:\/\/github.com\/${OWNER}\/${REPO}\/releases\/download\/v${VERSION}\/${TARBALL}\nCHECKSUM=${REPO}_checksums.txt\nCHECKSUM_URL=https:\/\/github.com\/${OWNER}\/${REPO}\/releases\/download\/v${VERSION}\/${CHECKSUM}\n\n# this function wraps all the destructive operations\n# if a curl|bash cuts off the end of the script due to\n# network, either nothing will happen or will syntax error\n# out preventing half-done work\nexecute() {\n TMPDIR=$(mktmpdir)\n echo \"$PREFIX: downloading ${TARBALL_URL}\"\n http_download \"${TMPDIR}\/${TARBALL}\" \"${TARBALL_URL}\"\n\n echo \"$PREFIX: verifying checksums\"\n http_download \"${TMPDIR}\/${CHECKSUM}\" \"${CHECKSUM_URL}\"\n hash_sha256_verify \"${TMPDIR}\/${TARBALL}\" \"${TMPDIR}\/${CHECKSUM}\"\n\n (cd \"${TMPDIR}\" && untar \"${TARBALL}\")\n install -d \"${BINDIR}\"\n install \"${TMPDIR}\/${BINARY}\" \"${BINDIR}\/\"\n echo \"$PREFIX: installed as ${BINDIR}\/${BINARY}\"\n}\n\nexecute\n`\n<|endoftext|>"} {"text":"<commit_before>package beater\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\tcfg \"github.com\/elastic\/beats\/filebeat\/config\"\n\t\"github.com\/elastic\/beats\/filebeat\/input\"\n\t\"github.com\/elastic\/beats\/libbeat\/logp\"\n)\n\nvar debugf = logp.MakeDebug(\"spooler\")\n\n\/\/ channelSize is the number of events Channel can buffer before blocking will occur.\nconst channelSize = 16\n\n\/\/ Spooler aggregates the events and sends the aggregated data to the publisher.\ntype Spooler struct {\n\tChannel chan *input.FileEvent \/\/ Channel is the input to the Spooler.\n\n\t\/\/ Config\n\tidleTimeout time.Duration \/\/ How often to flush the spooler if spoolSize is not reached.\n\tspoolSize uint64 \/\/ Maximum number of events that are stored before a flush occurs.\n\n\texit chan struct{} \/\/ Channel used to signal shutdown.\n\tnextFlushTime time.Time \/\/ Scheduled time of the next flush.\n\tpublisher chan<- []*input.FileEvent \/\/ Channel used to publish events.\n\tspool []*input.FileEvent \/\/ FileEvents being held by the Spooler.\n\twg sync.WaitGroup \/\/ WaitGroup used to control the shutdown.\n}\n\n\/\/ NewSpooler creates and returns a new Spooler. The returned Spooler must be\n\/\/ started by calling Start before it can be used.\nfunc NewSpooler(\n\tconfig cfg.FilebeatConfig,\n\tpublisher chan<- []*input.FileEvent,\n) *Spooler {\n\tspoolSize := config.SpoolSize\n\tif spoolSize <= 0 {\n\t\tspoolSize = cfg.DefaultSpoolSize\n\t\tdebugf(\"Spooler will use the default spool_size of %d\", spoolSize)\n\t}\n\n\tidleTimeout := config.IdleTimeout\n\tif idleTimeout <= 0 {\n\t\tidleTimeout = cfg.DefaultIdleTimeout\n\t\tdebugf(\"Spooler will use the default idle_timeout of %s\", idleTimeout)\n\t}\n\n\treturn &Spooler{\n\t\tChannel: make(chan *input.FileEvent, channelSize),\n\t\tidleTimeout: idleTimeout,\n\t\tspoolSize: spoolSize,\n\t\texit: make(chan struct{}),\n\t\tnextFlushTime: time.Now().Add(idleTimeout),\n\t\tpublisher: publisher,\n\t\tspool: make([]*input.FileEvent, 0, spoolSize),\n\t}\n}\n\n\/\/ Start starts the Spooler. Stop must be called to stop the Spooler.\nfunc (s *Spooler) Start() {\n\ts.wg.Add(1)\n\tgo s.run()\n}\n\n\/\/ run queues events that it reads from Channel and flushes them when either the\n\/\/ queue reaches its capacity (which is spoolSize) or a timeout period elapses.\nfunc (s *Spooler) run() {\n\tdefer s.wg.Done()\n\n\tticker := time.NewTicker(s.idleTimeout \/ 2)\n\n\tlogp.Info(\"Starting spooler: spool_size: %v; idle_timeout: %s\",\n\t\ts.spoolSize, s.idleTimeout)\n\nloop:\n\tfor {\n\t\tselect {\n\t\tcase <-s.exit:\n\t\t\tticker.Stop()\n\t\t\tbreak loop\n\t\tcase event := <-s.Channel:\n\t\t\tif event != nil {\n\t\t\t\ts.queue(event)\n\t\t\t}\n\t\tcase <-ticker.C:\n\t\t\ts.timedFlush()\n\t\t}\n\t}\n\n\t\/\/ Drain any events that may remain in Channel.\n\tfor e := range s.Channel {\n\t\ts.queue(e)\n\t}\n\tdebugf(\"Flushing events from spooler at shutdown\")\n\ts.flush()\n}\n\n\/\/ Stop stops this Spooler. This method blocks until all events have been\n\/\/ flushed to the publisher. The method should only be invoked one time after\n\/\/ Start has been invoked.\nfunc (s *Spooler) Stop() {\n\tlogp.Info(\"Stopping spooler\")\n\n\t\/\/ Signal to the run method that it should stop.\n\tclose(s.exit)\n\n\t\/\/ Stop accepting writes. Any events in the channel will be flushed.\n\tclose(s.Channel)\n\n\t\/\/ Wait for the flush to complete.\n\ts.wg.Wait()\n\tdebugf(\"Spooler has stopped\")\n}\n\n\/\/ queue queues a single event to be spooled. If the queue reaches spoolSize\n\/\/ while calling this method then all events in the queue will be flushed to\n\/\/ the publisher.\nfunc (s *Spooler) queue(event *input.FileEvent) {\n\ts.spool = append(s.spool, event)\n\tif len(s.spool) == cap(s.spool) {\n\t\tdebugf(\"Flushing spooler because spooler full. Events flushed: %v\", len(s.spool))\n\t\ts.flush()\n\t}\n}\n\n\/\/ timedFlush flushes the events in the queue if a flush has not occurred\n\/\/ for a period of time greater than idleTimeout.\nfunc (s *Spooler) timedFlush() {\n\tif time.Now().After(s.nextFlushTime) {\n\t\tdebugf(\"Flushing spooler because of timeout. Events flushed: %v\", len(s.spool))\n\t\ts.flush()\n\t}\n}\n\n\/\/ flush flushes all events to the publisher.\nfunc (s *Spooler) flush() {\n\tif len(s.spool) > 0 {\n\t\t\/\/ copy buffer\n\t\ttmpCopy := make([]*input.FileEvent, len(s.spool))\n\t\tcopy(tmpCopy, s.spool)\n\n\t\t\/\/ clear buffer\n\t\ts.spool = s.spool[:0]\n\n\t\t\/\/ send\n\t\ts.publisher <- tmpCopy\n\t}\n\ts.nextFlushTime = time.Now().Add(s.idleTimeout)\n}\n<commit_msg>Fix spooler shutdown blocking<commit_after>package beater\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\tcfg \"github.com\/elastic\/beats\/filebeat\/config\"\n\t\"github.com\/elastic\/beats\/filebeat\/input\"\n\t\"github.com\/elastic\/beats\/libbeat\/logp\"\n)\n\nvar debugf = logp.MakeDebug(\"spooler\")\n\n\/\/ channelSize is the number of events Channel can buffer before blocking will occur.\nconst channelSize = 16\n\n\/\/ Spooler aggregates the events and sends the aggregated data to the publisher.\ntype Spooler struct {\n\tChannel chan *input.FileEvent \/\/ Channel is the input to the Spooler.\n\n\t\/\/ Config\n\tidleTimeout time.Duration \/\/ How often to flush the spooler if spoolSize is not reached.\n\tspoolSize uint64 \/\/ Maximum number of events that are stored before a flush occurs.\n\n\texit chan struct{} \/\/ Channel used to signal shutdown.\n\tnextFlushTime time.Time \/\/ Scheduled time of the next flush.\n\tpublisher chan<- []*input.FileEvent \/\/ Channel used to publish events.\n\tspool []*input.FileEvent \/\/ FileEvents being held by the Spooler.\n\twg sync.WaitGroup \/\/ WaitGroup used to control the shutdown.\n}\n\n\/\/ NewSpooler creates and returns a new Spooler. The returned Spooler must be\n\/\/ started by calling Start before it can be used.\nfunc NewSpooler(\n\tconfig cfg.FilebeatConfig,\n\tpublisher chan<- []*input.FileEvent,\n) *Spooler {\n\tspoolSize := config.SpoolSize\n\tif spoolSize <= 0 {\n\t\tspoolSize = cfg.DefaultSpoolSize\n\t\tdebugf(\"Spooler will use the default spool_size of %d\", spoolSize)\n\t}\n\n\tidleTimeout := config.IdleTimeout\n\tif idleTimeout <= 0 {\n\t\tidleTimeout = cfg.DefaultIdleTimeout\n\t\tdebugf(\"Spooler will use the default idle_timeout of %s\", idleTimeout)\n\t}\n\n\treturn &Spooler{\n\t\tChannel: make(chan *input.FileEvent, channelSize),\n\t\tidleTimeout: idleTimeout,\n\t\tspoolSize: spoolSize,\n\t\texit: make(chan struct{}),\n\t\tnextFlushTime: time.Now().Add(idleTimeout),\n\t\tpublisher: publisher,\n\t\tspool: make([]*input.FileEvent, 0, spoolSize),\n\t}\n}\n\n\/\/ Start starts the Spooler. Stop must be called to stop the Spooler.\nfunc (s *Spooler) Start() {\n\ts.wg.Add(1)\n\tgo s.run()\n}\n\n\/\/ run queues events that it reads from Channel and flushes them when either the\n\/\/ queue reaches its capacity (which is spoolSize) or a timeout period elapses.\nfunc (s *Spooler) run() {\n\tdefer s.wg.Done()\n\n\tticker := time.NewTicker(s.idleTimeout \/ 2)\n\n\tlogp.Info(\"Starting spooler: spool_size: %v; idle_timeout: %s\",\n\t\ts.spoolSize, s.idleTimeout)\n\nloop:\n\tfor {\n\t\tselect {\n\t\tcase <-s.exit:\n\t\t\tticker.Stop()\n\t\t\tbreak loop\n\t\tcase event := <-s.Channel:\n\t\t\tif event != nil {\n\t\t\t\ts.queue(event)\n\t\t\t}\n\t\tcase <-ticker.C:\n\t\t\ts.timedFlush()\n\t\t}\n\t}\n\n\t\/\/ Drain any events that may remain in Channel.\n\tfor e := range s.Channel {\n\t\ts.queue(e)\n\t}\n\tdebugf(\"Flushing events from spooler at shutdown\")\n\ts.flush()\n}\n\n\/\/ Stop stops this Spooler. This method blocks until all events have been\n\/\/ flushed to the publisher. The method should only be invoked one time after\n\/\/ Start has been invoked.\nfunc (s *Spooler) Stop() {\n\tlogp.Info(\"Stopping spooler\")\n\n\t\/\/ Signal to the run method that it should stop.\n\tclose(s.exit)\n\n\t\/\/ Stop accepting writes. Any events in the channel will be flushed.\n\tclose(s.Channel)\n\n\t\/\/ Wait for the flush to complete.\n\ts.wg.Wait()\n\tdebugf(\"Spooler has stopped\")\n}\n\n\/\/ queue queues a single event to be spooled. If the queue reaches spoolSize\n\/\/ while calling this method then all events in the queue will be flushed to\n\/\/ the publisher.\nfunc (s *Spooler) queue(event *input.FileEvent) {\n\ts.spool = append(s.spool, event)\n\tif len(s.spool) == cap(s.spool) {\n\t\tdebugf(\"Flushing spooler because spooler full. Events flushed: %v\", len(s.spool))\n\t\ts.flush()\n\t}\n}\n\n\/\/ timedFlush flushes the events in the queue if a flush has not occurred\n\/\/ for a period of time greater than idleTimeout.\nfunc (s *Spooler) timedFlush() {\n\tif time.Now().After(s.nextFlushTime) {\n\t\tdebugf(\"Flushing spooler because of timeout. Events flushed: %v\", len(s.spool))\n\t\ts.flush()\n\t}\n}\n\n\/\/ flush flushes all events to the publisher.\nfunc (s *Spooler) flush() {\n\tif len(s.spool) > 0 {\n\t\t\/\/ copy buffer\n\t\ttmpCopy := make([]*input.FileEvent, len(s.spool))\n\t\tcopy(tmpCopy, s.spool)\n\n\t\t\/\/ clear buffer\n\t\ts.spool = s.spool[:0]\n\n\t\tselect {\n\t\tcase <-s.exit:\n\t\tcase s.publisher <- tmpCopy: \/\/ send\n\t\t}\n\t}\n\ts.nextFlushTime = time.Now().Add(s.idleTimeout)\n}\n<|endoftext|>"} {"text":"<commit_before>package socket\n\nimport (\n\t\"testing\"\n)\n\nfunc TestPacketString(t *testing.T) {\n\tvar p = NewPacket(nil)\n\tp.SetSeq(21)\n\tp.SetPtype(3)\n\tp.SetSize(300)\n\tp.SetBody(map[string]int{\"a\": 1})\n\tp.SetUri(\"uri\/b\")\n\tp.SetBodyCodec(5)\n\tp.Meta().Set(\"key\", \"value\")\n\tt.Logf(\"%%s:%s\", p.String())\n\tt.Logf(\"%%v:%v\", p)\n\tt.Logf(\"%%#v:%#v\", p)\n\tt.Logf(\"%%+v:%+v\", p)\n}\n<commit_msg>Update TestPacketString<commit_after>package socket\n\nimport (\n\t\"testing\"\n)\n\nfunc TestPacketString(t *testing.T) {\n\tvar p = NewPacket()\n\tp.SetSeq(21)\n\tp.XferPipe().Append('g')\n\tp.SetPtype(3)\n\tp.SetSize(300)\n\tp.SetBody(map[string]int{\"a\": 1})\n\tp.SetUri(\"uri\/b\")\n\tp.SetBodyCodec(5)\n\tp.Meta().Set(\"key\", \"value\")\n\tt.Logf(\"%%s:%s\", p.String())\n\tt.Logf(\"%%v:%v\", p)\n\tt.Logf(\"%%#v:%#v\", p)\n\tt.Logf(\"%%+v:%+v\", p)\n}\n<|endoftext|>"} {"text":"<commit_before>package multiaddr\n\nimport \"testing\"\n\nfunc expectVarint(t *testing.T, x, expected int) {\n\tsize := VarintSize(x)\n\tif size != expected {\n\t\tt.Fatalf(\"expected varintsize of %d to be %d, got %d\", x, expected, size)\n\t}\n}\n\nfunc TestVarintSize(t *testing.T) {\n\texpectVarint(t, (1<<7)-1, 1)\n\texpectVarint(t, 0, 1)\n\texpectVarint(t, 1<<7, 2)\n}\n<commit_msg>test: test all varints less than 2**16 againt VarintSize<commit_after>package multiaddr\n\nimport (\n\t\"encoding\/binary\"\n\t\"testing\"\n)\n\nfunc checkVarint(t *testing.T, x int) {\n\tbuf := make([]byte, binary.MaxVarintLen64)\n\texpected := binary.PutUvarint(buf, uint64(x))\n\n\tsize := VarintSize(x)\n\tif size != expected {\n\t\tt.Fatalf(\"expected varintsize of %d to be %d, got %d\", x, expected, size)\n\t}\n}\n\nfunc TestVarintSize(t *testing.T) {\n\tmax := 1 << 16\n\tfor x := 0; x < max; x++ {\n\t\tcheckVarint(t, x)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2016 Pulcy.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage vault\n\nimport (\n\t\"crypto\/x509\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/vault\/api\"\n\t\"github.com\/juju\/errgo\"\n\t\"github.com\/mitchellh\/go-homedir\"\n\t\"github.com\/op\/go-logging\"\n)\n\ntype VaultConfig struct {\n\tVaultAddr string\n\tVaultCACert string\n\tVaultCAPath string\n}\n\ntype Vault struct {\n\tvaultClient *api.Client\n\tlog *logging.Logger\n}\n\nfunc NewVault(srvCfg VaultConfig, log *logging.Logger) (*Vault, error) {\n\t\/\/ Create a vault client\n\tconfig := api.DefaultConfig()\n\tif err := config.ReadEnvironment(); err != nil {\n\t\treturn nil, maskAny(err)\n\t}\n\tif srvCfg.VaultAddr != \"\" {\n\t\tconfig.Address = srvCfg.VaultAddr\n\t}\n\tif srvCfg.VaultCACert != \"\" || srvCfg.VaultCAPath != \"\" {\n\t\tvar newCertPool *x509.CertPool\n\t\tvar err error\n\t\tif srvCfg.VaultCACert != \"\" {\n\t\t\tlog.Debugf(\"Loading CA cert: %s\", srvCfg.VaultCACert)\n\t\t\tnewCertPool, err = api.LoadCACert(srvCfg.VaultCACert)\n\t\t} else {\n\t\t\tlog.Debugf(\"Loading CA certs from: %s\", srvCfg.VaultCAPath)\n\t\t\tnewCertPool, err = api.LoadCAPath(srvCfg.VaultCAPath)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, maskAny(err)\n\t\t}\n\t\tclientTLSConfig := config.HttpClient.Transport.(*http.Transport).TLSClientConfig\n\t\tclientTLSConfig.RootCAs = newCertPool\n\t}\n\tclient, err := api.NewClient(config)\n\tif err != nil {\n\t\treturn nil, maskAny(err)\n\t}\n\n\treturn &Vault{\n\t\tlog: log,\n\t\tvaultClient: client,\n\t}, nil\n\n}\n\ntype GithubLoginData struct {\n\tGithubToken string\n\tGithubTokenPath string\n\tMount string \/\/ defaults to \"github\"\n}\n\n\/\/ GithubLogin performs a standard Github authentication and initializes the vaultClient with the resulting token.\nfunc (s *Vault) GithubLogin(data GithubLoginData) error {\n\t\/\/ Read token\n\tvar err error\n\tdata.GithubToken, err = s.readGithubToken(data)\n\tif err != nil {\n\t\treturn maskAny(err)\n\t}\n\t\/\/ Perform login\n\ts.vaultClient.ClearToken()\n\tlogical := s.vaultClient.Logical()\n\tloginData := make(map[string]interface{})\n\tloginData[\"token\"] = data.GithubToken\n\tif data.Mount == \"\" {\n\t\tdata.Mount = \"github\"\n\t}\n\tpath := fmt.Sprintf(\"auth\/%s\/login\", data.Mount)\n\tif loginSecret, err := logical.Write(path, loginData); err != nil {\n\t\treturn maskAny(err)\n\t} else if loginSecret.Auth == nil {\n\t\treturn maskAny(errgo.WithCausef(nil, VaultError, \"missing authentication in secret response\"))\n\t} else {\n\t\t\/\/ Use token\n\t\ts.vaultClient.SetToken(loginSecret.Auth.ClientToken)\n\t}\n\n\t\/\/ We're done\n\treturn nil\n}\n\nfunc (s *Vault) readGithubToken(data GithubLoginData) (string, error) {\n\tif data.GithubToken != \"\" {\n\t\treturn data.GithubToken, nil\n\t}\n\tif data.GithubTokenPath == \"\" {\n\t\treturn \"\", maskAny(errgo.WithCausef(nil, InvalidArgumentError, \"No github token path set\"))\n\t}\n\tpath, err := homedir.Expand(data.GithubTokenPath)\n\tif err != nil {\n\t\treturn \"\", maskAny(err)\n\t}\n\traw, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn \"\", maskAny(err)\n\t}\n\treturn strings.TrimSpace(string(raw)), nil\n}\n\n\/\/ extractSecret extracts a secret based on given variables\n\/\/ Call a login method before calling this method.\nfunc (s *Vault) Extract(secretPath, secretField string) (string, error) {\n\tif secretPath == \"\" {\n\t\treturn \"\", maskAny(errgo.WithCausef(nil, InvalidArgumentError, \"path not set\"))\n\t}\n\tif secretField == \"\" {\n\t\treturn \"\", maskAny(errgo.WithCausef(nil, InvalidArgumentError, \"field not set\"))\n\t}\n\n\t\/\/ Load secret\n\ts.log.Infof(\"Read %s#%s\", secretPath, secretField)\n\tsecret, err := s.vaultClient.Logical().Read(secretPath)\n\tif err != nil {\n\t\treturn \"\", maskAny(errgo.WithCausef(nil, VaultError, \"error reading %s: %s\", secretPath, err))\n\t}\n\tif secret == nil {\n\t\treturn \"\", maskAny(errgo.WithCausef(nil, VaultError, \"no value found at %s\", secretPath))\n\t}\n\n\tif value, ok := secret.Data[secretField]; !ok {\n\t\treturn \"\", maskAny(errgo.WithCausef(nil, VaultError, \"no field '%s' found at %s\", secretField, secretPath))\n\t} else {\n\t\treturn value.(string), nil\n\t}\n}\n<commit_msg>Fixed SSL certificate error regarding IP addresses missing from certificate<commit_after>\/\/ Copyright (c) 2016 Pulcy.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage vault\n\nimport (\n\t\"crypto\/x509\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/vault\/api\"\n\t\"github.com\/juju\/errgo\"\n\t\"github.com\/mitchellh\/go-homedir\"\n\t\"github.com\/op\/go-logging\"\n)\n\ntype VaultConfig struct {\n\tVaultAddr string\n\tVaultCACert string\n\tVaultCAPath string\n}\n\ntype Vault struct {\n\tvaultClient *api.Client\n\tlog *logging.Logger\n}\n\nfunc NewVault(srvCfg VaultConfig, log *logging.Logger) (*Vault, error) {\n\t\/\/ Create a vault client\n\tconfig := api.DefaultConfig()\n\tif err := config.ReadEnvironment(); err != nil {\n\t\treturn nil, maskAny(err)\n\t}\n\tvar serverName string\n\tif srvCfg.VaultAddr != \"\" {\n\t\tlog.Debugf(\"Setting vault address to %s\", srvCfg.VaultAddr)\n\t\tconfig.Address = srvCfg.VaultAddr\n\t\turl, err := url.Parse(config.Address)\n\t\tif err != nil {\n\t\t\treturn nil, maskAny(err)\n\t\t}\n\t\thost, _, err := net.SplitHostPort(url.Host)\n\t\tif err != nil {\n\t\t\treturn nil, maskAny(err)\n\t\t}\n\t\tserverName = host\n\t}\n\tif srvCfg.VaultCACert != \"\" || srvCfg.VaultCAPath != \"\" {\n\t\tvar newCertPool *x509.CertPool\n\t\tvar err error\n\t\tif srvCfg.VaultCACert != \"\" {\n\t\t\tlog.Debugf(\"Loading CA cert: %s\", srvCfg.VaultCACert)\n\t\t\tnewCertPool, err = api.LoadCACert(srvCfg.VaultCACert)\n\t\t} else {\n\t\t\tlog.Debugf(\"Loading CA certs from: %s\", srvCfg.VaultCAPath)\n\t\t\tnewCertPool, err = api.LoadCAPath(srvCfg.VaultCAPath)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, maskAny(err)\n\t\t}\n\t\tclientTLSConfig := config.HttpClient.Transport.(*http.Transport).TLSClientConfig\n\t\tclientTLSConfig.RootCAs = newCertPool\n\t\tclientTLSConfig.ServerName = serverName\n\t}\n\tclient, err := api.NewClient(config)\n\tif err != nil {\n\t\treturn nil, maskAny(err)\n\t}\n\n\treturn &Vault{\n\t\tlog: log,\n\t\tvaultClient: client,\n\t}, nil\n\n}\n\ntype GithubLoginData struct {\n\tGithubToken string\n\tGithubTokenPath string\n\tMount string \/\/ defaults to \"github\"\n}\n\n\/\/ GithubLogin performs a standard Github authentication and initializes the vaultClient with the resulting token.\nfunc (s *Vault) GithubLogin(data GithubLoginData) error {\n\t\/\/ Read token\n\tvar err error\n\tdata.GithubToken, err = s.readGithubToken(data)\n\tif err != nil {\n\t\treturn maskAny(err)\n\t}\n\t\/\/ Perform login\n\ts.vaultClient.ClearToken()\n\tlogical := s.vaultClient.Logical()\n\tloginData := make(map[string]interface{})\n\tloginData[\"token\"] = data.GithubToken\n\tif data.Mount == \"\" {\n\t\tdata.Mount = \"github\"\n\t}\n\tpath := fmt.Sprintf(\"auth\/%s\/login\", data.Mount)\n\tif loginSecret, err := logical.Write(path, loginData); err != nil {\n\t\treturn maskAny(err)\n\t} else if loginSecret.Auth == nil {\n\t\treturn maskAny(errgo.WithCausef(nil, VaultError, \"missing authentication in secret response\"))\n\t} else {\n\t\t\/\/ Use token\n\t\ts.vaultClient.SetToken(loginSecret.Auth.ClientToken)\n\t}\n\n\t\/\/ We're done\n\treturn nil\n}\n\nfunc (s *Vault) readGithubToken(data GithubLoginData) (string, error) {\n\tif data.GithubToken != \"\" {\n\t\treturn data.GithubToken, nil\n\t}\n\tif data.GithubTokenPath == \"\" {\n\t\treturn \"\", maskAny(errgo.WithCausef(nil, InvalidArgumentError, \"No github token path set\"))\n\t}\n\tpath, err := homedir.Expand(data.GithubTokenPath)\n\tif err != nil {\n\t\treturn \"\", maskAny(err)\n\t}\n\traw, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn \"\", maskAny(err)\n\t}\n\treturn strings.TrimSpace(string(raw)), nil\n}\n\n\/\/ extractSecret extracts a secret based on given variables\n\/\/ Call a login method before calling this method.\nfunc (s *Vault) Extract(secretPath, secretField string) (string, error) {\n\tif secretPath == \"\" {\n\t\treturn \"\", maskAny(errgo.WithCausef(nil, InvalidArgumentError, \"path not set\"))\n\t}\n\tif secretField == \"\" {\n\t\treturn \"\", maskAny(errgo.WithCausef(nil, InvalidArgumentError, \"field not set\"))\n\t}\n\n\t\/\/ Load secret\n\ts.log.Infof(\"Read %s#%s\", secretPath, secretField)\n\tsecret, err := s.vaultClient.Logical().Read(secretPath)\n\tif err != nil {\n\t\treturn \"\", maskAny(errgo.WithCausef(nil, VaultError, \"error reading %s: %s\", secretPath, err))\n\t}\n\tif secret == nil {\n\t\treturn \"\", maskAny(errgo.WithCausef(nil, VaultError, \"no value found at %s\", secretPath))\n\t}\n\n\tif value, ok := secret.Data[secretField]; !ok {\n\t\treturn \"\", maskAny(errgo.WithCausef(nil, VaultError, \"no field '%s' found at %s\", secretField, secretPath))\n\t} else {\n\t\treturn value.(string), nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package vault\n\nimport (\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\n\tvaultapi \"github.com\/hashicorp\/vault\/api\"\n\t\"k8s.io\/client-go\/rest\"\n)\n\n\/\/ Client is a Vault client with Kubernetes support and token automatic renewing\ntype Client struct {\n\tclient *vaultapi.Client\n\tlogical *vaultapi.Logical\n\ttokenRenewer *vaultapi.Renewer\n}\n\n\/\/ NewClient creates a new Vault client\nfunc NewClient(role string) (*Client, error) {\n\treturn NewClientWithConfig(vaultapi.DefaultConfig(), role)\n}\n\n\/\/ NewClientWithConfig creates a new Vault client with custom configuration\nfunc NewClientWithConfig(config *vaultapi.Config, role string) (*Client, error) {\n\tclient, err := vaultapi.NewClient(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogical := client.Logical()\n\tvar tokenRenewer *vaultapi.Renewer\n\n\tif client.Token() == \"\" {\n\n\t\ttoken, err := ioutil.ReadFile(os.Getenv(\"HOME\") + \"\/.vault-token\")\n\n\t\tif err == nil {\n\n\t\t\tclient.SetToken(string(token))\n\n\t\t} else {\n\t\t\t\/\/ If VAULT_TOKEN or ~\/.vault-token wasn't provided let's suppose\n\t\t\t\/\/ we are in Kubernetes and try to get one with the ServiceAccount token\n\n\t\t\tk8sconfig, err := rest.InClusterConfig()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tinitialTokenArrived := make(chan string, 1)\n\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\tdata := map[string]interface{}{\"jwt\": k8sconfig.BearerToken, \"role\": role}\n\t\t\t\t\tsecret, err := logical.Write(\"auth\/kubernetes\/login\", data)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(err.Error())\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tlog.Println(\"Received new Vault token\")\n\n\t\t\t\t\t\/\/ Set the first token from the response\n\t\t\t\t\tclient.SetToken(secret.Auth.ClientToken)\n\n\t\t\t\t\tinitialTokenArrived <- secret.LeaseID\n\n\t\t\t\t\t\/\/ Start the renewing process\n\t\t\t\t\ttokenRenewer, err = client.NewRenewer(&vaultapi.RenewerInput{Secret: secret})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(err.Error())\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tgo tokenRenewer.Renew()\n\n\t\t\t\t\trunRenewChecker(tokenRenewer)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\t<-initialTokenArrived\n\t\t}\n\t}\n\n\treturn &Client{client: client, logical: logical, tokenRenewer: tokenRenewer}, nil\n}\n\nfunc runRenewChecker(tokenRenewer *vaultapi.Renewer) {\n\tfor {\n\t\tselect {\n\t\tcase err := <-tokenRenewer.DoneCh():\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Renew error:\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\tcase renewal := <-tokenRenewer.RenewCh():\n\t\t\tlog.Printf(\"Successfully renewed at: %s\", renewal.RenewedAt)\n\t\t}\n\t}\n}\n\n\/\/ Vault returns the underlying hashicorp Vault client\nfunc (client *Client) Vault() *vaultapi.Client {\n\treturn client.client\n}\n\n\/\/ Close stops the token renewing process of this client\nfunc (client *Client) Close() {\n\tif client.tokenRenewer != nil {\n\t\tlog.Println(\"Stopped Vault tokenRenewer\")\n\t\tclient.tokenRenewer.Stop()\n\t}\n}\n<commit_msg>Replace old tokenRenewer in case of a new one is created<commit_after>package vault\n\nimport (\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\n\tvaultapi \"github.com\/hashicorp\/vault\/api\"\n\t\"k8s.io\/client-go\/rest\"\n)\n\n\/\/ Client is a Vault client with Kubernetes support and token automatic renewing\ntype Client struct {\n\tclient *vaultapi.Client\n\tlogical *vaultapi.Logical\n\ttokenRenewer *vaultapi.Renewer\n}\n\n\/\/ NewClient creates a new Vault client\nfunc NewClient(role string) (*Client, error) {\n\treturn NewClientWithConfig(vaultapi.DefaultConfig(), role)\n}\n\n\/\/ NewClientWithConfig creates a new Vault client with custom configuration\nfunc NewClientWithConfig(config *vaultapi.Config, role string) (*Client, error) {\n\trawClient, err := vaultapi.NewClient(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogical := rawClient.Logical()\n\tvar tokenRenewer *vaultapi.Renewer\n\n\tclient := &Client{client: rawClient, logical: logical}\n\n\tif rawClient.Token() == \"\" {\n\n\t\ttoken, err := ioutil.ReadFile(os.Getenv(\"HOME\") + \"\/.vault-token\")\n\n\t\tif err == nil {\n\n\t\t\trawClient.SetToken(string(token))\n\n\t\t} else {\n\t\t\t\/\/ If VAULT_TOKEN or ~\/.vault-token wasn't provided let's suppose\n\t\t\t\/\/ we are in Kubernetes and try to get one with the ServiceAccount token\n\n\t\t\tk8sconfig, err := rest.InClusterConfig()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tinitialTokenArrived := make(chan string, 1)\n\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\tdata := map[string]interface{}{\"jwt\": k8sconfig.BearerToken, \"role\": role}\n\t\t\t\t\tsecret, err := logical.Write(\"auth\/kubernetes\/login\", data)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(err.Error())\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tlog.Println(\"Received new Vault token\")\n\n\t\t\t\t\t\/\/ Set the first token from the response\n\t\t\t\t\trawClient.SetToken(secret.Auth.ClientToken)\n\n\t\t\t\t\tinitialTokenArrived <- secret.LeaseID\n\n\t\t\t\t\t\/\/ Start the renewing process\n\t\t\t\t\ttokenRenewer, err = rawClient.NewRenewer(&vaultapi.RenewerInput{Secret: secret})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tlog.Println(err.Error())\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tclient.tokenRenewer = tokenRenewer\n\n\t\t\t\t\tgo tokenRenewer.Renew()\n\n\t\t\t\t\trunRenewChecker(tokenRenewer)\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\t<-initialTokenArrived\n\t\t}\n\t}\n\n\treturn client, nil\n}\n\nfunc runRenewChecker(tokenRenewer *vaultapi.Renewer) {\n\tfor {\n\t\tselect {\n\t\tcase err := <-tokenRenewer.DoneCh():\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Renew error:\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\tcase renewal := <-tokenRenewer.RenewCh():\n\t\t\tlog.Printf(\"Successfully renewed at: %s\", renewal.RenewedAt)\n\t\t}\n\t}\n}\n\n\/\/ Vault returns the underlying hashicorp Vault client\nfunc (client *Client) Vault() *vaultapi.Client {\n\treturn client.client\n}\n\n\/\/ Close stops the token renewing process of this client\nfunc (client *Client) Close() {\n\tif client.tokenRenewer != nil {\n\t\tlog.Println(\"Stopped Vault tokenRenewer\")\n\t\tclient.tokenRenewer.Stop()\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package schema\n\nimport (\n\t\"encoding\/json\"\n\n\t\"github.com\/zclconf\/go-cty\/cty\"\n\tctyjson \"github.com\/zclconf\/go-cty\/cty\/json\"\n\n\t\"github.com\/hashicorp\/terraform\/configs\/configschema\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n)\n\n\/\/ DiffFromValues takes the current state and desired state as cty.Values and\n\/\/ derives a terraform.InstanceDiff to give to the legacy providers. This is\n\/\/ used to take the states provided by the new ApplyResourceChange method and\n\/\/ convert them to a state+diff required for the legacy Apply method.\nfunc DiffFromValues(prior, planned cty.Value, res *Resource) (*terraform.InstanceDiff, error) {\n\treturn diffFromValues(prior, planned, res, nil)\n}\n\n\/\/ diffFromValues takes an additional CustomizeDiffFunc, so we can generate our\n\/\/ test fixtures from the legacy tests. In the new provider protocol the diff\n\/\/ only needs to be created for the apply operation, and any customizations\n\/\/ have already been done.\nfunc diffFromValues(prior, planned cty.Value, res *Resource, cust CustomizeDiffFunc) (*terraform.InstanceDiff, error) {\n\tinstanceState, err := res.ShimInstanceStateFromValue(prior)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfigSchema := res.CoreConfigSchema()\n\n\tcfg := terraform.NewResourceConfigShimmed(planned, configSchema)\n\n\tdiff, err := schemaMap(res.Schema).Diff(instanceState, cfg, cust, nil, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn diff, err\n}\n\n\/\/ ApplyDiff takes a cty.Value state and applies a terraform.InstanceDiff to\n\/\/ get a new cty.Value state. This is used to convert the diff returned from\n\/\/ the legacy provider Diff method to the state required for the new\n\/\/ PlanResourceChange method.\nfunc ApplyDiff(base cty.Value, d *terraform.InstanceDiff, schema *configschema.Block) (cty.Value, error) {\n\treturn d.ApplyToValue(base, schema)\n}\n\n\/\/ StateValueToJSONMap converts a cty.Value to generic JSON map via the cty JSON\n\/\/ encoding.\nfunc StateValueToJSONMap(val cty.Value, ty cty.Type) (map[string]interface{}, error) {\n\tjs, err := ctyjson.Marshal(val, ty)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar m map[string]interface{}\n\tif err := json.Unmarshal(js, &m); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn m, nil\n}\n\n\/\/ JSONMapToStateValue takes a generic json map[string]interface{} and converts it\n\/\/ to the specific type, ensuring that the values conform to the schema.\nfunc JSONMapToStateValue(m map[string]interface{}, block *configschema.Block) (cty.Value, error) {\n\tvar val cty.Value\n\n\tjs, err := json.Marshal(m)\n\tif err != nil {\n\t\treturn val, err\n\t}\n\n\tval, err = ctyjson.Unmarshal(js, block.ImpliedType())\n\tif err != nil {\n\t\treturn val, err\n\t}\n\n\treturn block.CoerceValue(val)\n}\n\n\/\/ StateValueFromInstanceState converts a terraform.InstanceState to a\n\/\/ cty.Value as described by the provided cty.Type, and maintains the resource\n\/\/ ID as the \"id\" attribute.\nfunc StateValueFromInstanceState(is *terraform.InstanceState, ty cty.Type) (cty.Value, error) {\n\treturn is.AttrsAsObjectValue(ty)\n}\n\n\/\/ LegacyResourceSchema takes a *Resource and returns a deep copy with 0.12 specific\n\/\/ features removed. This is used by the shims to get a configschema that\n\/\/ directly matches the structure of the schema.Resource.\nfunc LegacyResourceSchema(r *Resource) *Resource {\n\tif r == nil {\n\t\treturn nil\n\t}\n\t\/\/ start with a shallow copy\n\tnewResource := new(Resource)\n\t*newResource = *r\n\tnewResource.Schema = map[string]*Schema{}\n\n\tfor k, s := range r.Schema {\n\t\tnewResource.Schema[k] = LegacySchema(s)\n\t}\n\n\treturn newResource\n}\n\n\/\/ LegacySchema takes a *Schema and returns a deep copy with 0.12 specific\n\/\/ features removed. This is used by the shims to get a configschema that\n\/\/ directly matches the structure of the schema.Resource.\nfunc LegacySchema(s *Schema) *Schema {\n\tif s == nil {\n\t\treturn nil\n\t}\n\t\/\/ start with a shallow copy\n\tnewSchema := new(Schema)\n\t*newSchema = *s\n\tnewSchema.ConfigMode = SchemaConfigModeAuto\n\tnewSchema.PromoteSingle = false\n\tnewSchema.SkipCoreTypeCheck = false\n\n\tswitch e := newSchema.Elem.(type) {\n\tcase *Schema:\n\t\tnewSchema.Elem = LegacySchema(e)\n\tcase *Resource:\n\t\tnewSchema.Elem = LegacyResourceSchema(e)\n\t}\n\n\treturn newSchema\n}\n<commit_msg>PromoteSingle is used in 0.11 mode<commit_after>package schema\n\nimport (\n\t\"encoding\/json\"\n\n\t\"github.com\/zclconf\/go-cty\/cty\"\n\tctyjson \"github.com\/zclconf\/go-cty\/cty\/json\"\n\n\t\"github.com\/hashicorp\/terraform\/configs\/configschema\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n)\n\n\/\/ DiffFromValues takes the current state and desired state as cty.Values and\n\/\/ derives a terraform.InstanceDiff to give to the legacy providers. This is\n\/\/ used to take the states provided by the new ApplyResourceChange method and\n\/\/ convert them to a state+diff required for the legacy Apply method.\nfunc DiffFromValues(prior, planned cty.Value, res *Resource) (*terraform.InstanceDiff, error) {\n\treturn diffFromValues(prior, planned, res, nil)\n}\n\n\/\/ diffFromValues takes an additional CustomizeDiffFunc, so we can generate our\n\/\/ test fixtures from the legacy tests. In the new provider protocol the diff\n\/\/ only needs to be created for the apply operation, and any customizations\n\/\/ have already been done.\nfunc diffFromValues(prior, planned cty.Value, res *Resource, cust CustomizeDiffFunc) (*terraform.InstanceDiff, error) {\n\tinstanceState, err := res.ShimInstanceStateFromValue(prior)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfigSchema := res.CoreConfigSchema()\n\n\tcfg := terraform.NewResourceConfigShimmed(planned, configSchema)\n\n\tdiff, err := schemaMap(res.Schema).Diff(instanceState, cfg, cust, nil, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn diff, err\n}\n\n\/\/ ApplyDiff takes a cty.Value state and applies a terraform.InstanceDiff to\n\/\/ get a new cty.Value state. This is used to convert the diff returned from\n\/\/ the legacy provider Diff method to the state required for the new\n\/\/ PlanResourceChange method.\nfunc ApplyDiff(base cty.Value, d *terraform.InstanceDiff, schema *configschema.Block) (cty.Value, error) {\n\treturn d.ApplyToValue(base, schema)\n}\n\n\/\/ StateValueToJSONMap converts a cty.Value to generic JSON map via the cty JSON\n\/\/ encoding.\nfunc StateValueToJSONMap(val cty.Value, ty cty.Type) (map[string]interface{}, error) {\n\tjs, err := ctyjson.Marshal(val, ty)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar m map[string]interface{}\n\tif err := json.Unmarshal(js, &m); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn m, nil\n}\n\n\/\/ JSONMapToStateValue takes a generic json map[string]interface{} and converts it\n\/\/ to the specific type, ensuring that the values conform to the schema.\nfunc JSONMapToStateValue(m map[string]interface{}, block *configschema.Block) (cty.Value, error) {\n\tvar val cty.Value\n\n\tjs, err := json.Marshal(m)\n\tif err != nil {\n\t\treturn val, err\n\t}\n\n\tval, err = ctyjson.Unmarshal(js, block.ImpliedType())\n\tif err != nil {\n\t\treturn val, err\n\t}\n\n\treturn block.CoerceValue(val)\n}\n\n\/\/ StateValueFromInstanceState converts a terraform.InstanceState to a\n\/\/ cty.Value as described by the provided cty.Type, and maintains the resource\n\/\/ ID as the \"id\" attribute.\nfunc StateValueFromInstanceState(is *terraform.InstanceState, ty cty.Type) (cty.Value, error) {\n\treturn is.AttrsAsObjectValue(ty)\n}\n\n\/\/ LegacyResourceSchema takes a *Resource and returns a deep copy with 0.12 specific\n\/\/ features removed. This is used by the shims to get a configschema that\n\/\/ directly matches the structure of the schema.Resource.\nfunc LegacyResourceSchema(r *Resource) *Resource {\n\tif r == nil {\n\t\treturn nil\n\t}\n\t\/\/ start with a shallow copy\n\tnewResource := new(Resource)\n\t*newResource = *r\n\tnewResource.Schema = map[string]*Schema{}\n\n\tfor k, s := range r.Schema {\n\t\tnewResource.Schema[k] = LegacySchema(s)\n\t}\n\n\treturn newResource\n}\n\n\/\/ LegacySchema takes a *Schema and returns a deep copy with 0.12 specific\n\/\/ features removed. This is used by the shims to get a configschema that\n\/\/ directly matches the structure of the schema.Resource.\nfunc LegacySchema(s *Schema) *Schema {\n\tif s == nil {\n\t\treturn nil\n\t}\n\t\/\/ start with a shallow copy\n\tnewSchema := new(Schema)\n\t*newSchema = *s\n\tnewSchema.ConfigMode = SchemaConfigModeAuto\n\tnewSchema.SkipCoreTypeCheck = false\n\n\tswitch e := newSchema.Elem.(type) {\n\tcase *Schema:\n\t\tnewSchema.Elem = LegacySchema(e)\n\tcase *Resource:\n\t\tnewSchema.Elem = LegacyResourceSchema(e)\n\t}\n\n\treturn newSchema\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package configutil provides an interface for loading and validating configuration\n\/\/ data from YAML files.\n\/\/\n\/\/ Other YAML files could be included via the following directive:\n\/\/\n\/\/ production.yaml:\n\/\/ extends: base.yaml\n\/\/\n\/\/ There is no multiple inheritance supported. Dependency tree suppossed to\n\/\/ form a directed acyclic graph(DAG).\n\/\/\n\/\/\n\/\/ Values from multiple configurations within the same hierarchy are deep merged\n\/\/\n\/\/ Package support multiple configuraton directories potentially having multiple files\n\/\/ with the the same name. In this the we just follow the path in extends and load all\n\/\/ the file accroding to the relative directory, i.e configA: base.yaml production.yaml(extends base.yaml), configB: base.yaml the load sequance\n\/\/ will be the following: configA(base.yaml), configA(production.yaml)\n\/\/\n\/\/ Note regarding configuration merging:\n\/\/ Array defined in YAML will be overriden based on load sequence.\n\/\/ e.g. in the base.yaml:\n\/\/ sports:\n\/\/ - football\n\/\/ in the development.yaml:\n\/\/ extends: base.yaml\n\/\/ sports:\n\/\/ - basketball\n\/\/ after the merge:\n\/\/ sports:\n\/\/ - basketball \/\/ only keep the latest one\n\/\/\n\/\/ Map defined in YAML will be merged together based on load sequence.\n\/\/ e.g. in the base.yaml:\n\/\/ sports:\n\/\/ football: true\n\/\/ in the development.yaml:\n\/\/ extends: base.yaml\n\/\/ sports:\n\/\/ basketball: true\n\/\/ after the merge:\n\/\/ sports: \/\/ combine all the map fields\n\/\/ football: true\n\/\/ basketball: true\n\/\/\npackage configutil\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"code.uber.internal\/infra\/kraken\/utils\/stringset\"\n\n\t\"gopkg.in\/validator.v2\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\nconst (\n\tconfigDirKey = \"UBER_CONFIG_DIR\"\n)\n\nconst (\n\tconfigDir = \"config\"\n\tsecretsFile = \"secrets.yaml\"\n\tconfigSeparator = \":\"\n)\n\n\/\/ ErrNoFilesToLoad is returned when you attemp to call LoadFiles with no file paths\nvar ErrNoFilesToLoad = errors.New(\"attempt to load configuration with no files\")\n\n\/\/ ErrCycleRef is returned when there are circular dependencies detected in\n\/\/ configuraiton files extending each other\nvar ErrCycleRef = errors.New(\"cyclic reference in configuration extends detected\")\n\n\/\/ Extends define a keywoword in config for extending a base configuration file\ntype Extends struct {\n\tExtends string `yaml:\"extends\"`\n}\n\n\/\/ ValidationError is the returned when a configuration fails to pass validation\ntype ValidationError struct {\n\terrorMap validator.ErrorMap\n}\n\n\/\/ ErrForField returns the validation error for the given field\nfunc (e ValidationError) ErrForField(name string) error {\n\treturn e.errorMap[name]\n}\n\nfunc (e ValidationError) Error() string {\n\tvar w bytes.Buffer\n\n\tfmt.Fprintf(&w, \"validation failed\")\n\tfor f, err := range e.errorMap {\n\t\tfmt.Fprintf(&w, \" %s: %v\\n\", f, err)\n\t}\n\n\treturn w.String()\n}\n\n\/\/ FilterCandidates filters candidate config files into only the ones that exist\nfunc FilterCandidates(fname string) ([]string, error) {\n\trealConfigDirs := []string{configDir}\n\t\/\/ Allow overriding the directory config is loaded from, useful for tests\n\t\/\/ inside subdirectories when the config\/ dir is in the top-level of a project.\n\tif configRoot := os.Getenv(configDirKey); configRoot != \"\" {\n\t\trealConfigDirs = strings.Split(configRoot, configSeparator)\n\t}\n\treturn filterCandidatesFromDirs(fname, realConfigDirs)\n}\n\nfunc readExtend(configFile string) (string, error) {\n\tvar cfg Extends\n\n\tdata, err := ioutil.ReadFile(configFile)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := yaml.Unmarshal(data, &cfg); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn cfg.Extends, nil\n}\n\nfunc getCandidate(fname string, dirs []string) string {\n\tcandidate := \"\"\n\tfor _, realConfigDir := range dirs {\n\t\tconfigFile := path.Join(realConfigDir, fname)\n\t\tif _, err := os.Stat(configFile); err == nil {\n\t\t\tcandidate = configFile\n\t\t}\n\t}\n\treturn candidate\n}\n\nfunc filterCandidatesFromDirs(fname string, dirs []string) ([]string, error) {\n\n\tvar paths []string\n\tcSet := make(stringset.Set)\n\n\t\/\/ Go through all the 'extends' hierarchy until\n\t\/\/ there is no base anymore or some reference cycles\n\t\/\/ been detected.\n\tcSet.Add(fname)\n\n\tcandidate := getCandidate(fname, dirs)\n\tfmt.Fprintf(os.Stderr, \"candidate: %s\\n\", candidate)\n\tif candidate == \"\" {\n\t\treturn nil, fmt.Errorf(\"file %s not found in %s\", fname, dirs)\n\t}\n\n\tfor {\n\t\textends, err := readExtend(candidate)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpaths = append([]string{candidate}, paths...)\n\t\tif extends != \"\" {\n\t\t\t\/\/ prevent cycle references\n\t\t\tif !cSet.Has(extends) {\n\t\t\t\tcandidate = path.Join(filepath.Dir(candidate), extends)\n\t\t\t\tcSet.Add(extends)\n\t\t\t} else {\n\t\t\t\treturn nil, ErrCycleRef\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ append secrets\n\tcandidate = getCandidate(secretsFile, dirs)\n\tif candidate != \"\" {\n\t\tpaths = append(paths, candidate)\n\t}\n\n\treturn paths, nil\n}\n\n\/\/ Load loads configuration based on environment variables\nfunc Load(fname string, config interface{}) error {\n\tcandidates, err := FilterCandidates(fname)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn LoadFiles(config, candidates...)\n}\n\n\/\/ LoadFile loads configuration based on config directory\n\/\/ where the input file is located\nfunc LoadFile(fname string, config interface{}) error {\n\tcandidates, err := filterCandidatesFromDirs(\n\t\tfilepath.Base(fname), []string{filepath.Dir(fname)})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn LoadFiles(config, candidates...)\n}\n\n\/\/ LoadFiles loads a list of files, deep-merging values.\n\/\/ This function is exposed for using from tests. For production it's recommended\n\/\/ to use the default resolution and the Load() method.\nfunc LoadFiles(config interface{}, fnames ...string) error {\n\tif len(fnames) == 0 {\n\t\treturn ErrNoFilesToLoad\n\t}\n\tfor _, fname := range fnames {\n\t\tdata, err := ioutil.ReadFile(fname)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := yaml.Unmarshal(data, config); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Validate on the merged config at the end.\n\tif err := validator.Validate(config); err != nil {\n\t\treturn ValidationError{\n\t\t\terrorMap: err.(validator.ErrorMap),\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Fix proxy config<commit_after>\/\/ Package configutil provides an interface for loading and validating configuration\n\/\/ data from YAML files.\n\/\/\n\/\/ Other YAML files could be included via the following directive:\n\/\/\n\/\/ production.yaml:\n\/\/ extends: base.yaml\n\/\/\n\/\/ There is no multiple inheritance supported. Dependency tree suppossed to\n\/\/ form a directed acyclic graph(DAG).\n\/\/\n\/\/\n\/\/ Values from multiple configurations within the same hierarchy are deep merged\n\/\/\n\/\/ Package support multiple configuraton directories potentially having multiple files\n\/\/ with the the same name. In this the we just follow the path in extends and load all\n\/\/ the file accroding to the relative directory, i.e configA: base.yaml production.yaml(extends base.yaml), configB: base.yaml the load sequance\n\/\/ will be the following: configA(base.yaml), configA(production.yaml)\n\/\/\n\/\/ Note regarding configuration merging:\n\/\/ Array defined in YAML will be overriden based on load sequence.\n\/\/ e.g. in the base.yaml:\n\/\/ sports:\n\/\/ - football\n\/\/ in the development.yaml:\n\/\/ extends: base.yaml\n\/\/ sports:\n\/\/ - basketball\n\/\/ after the merge:\n\/\/ sports:\n\/\/ - basketball \/\/ only keep the latest one\n\/\/\n\/\/ Map defined in YAML will be merged together based on load sequence.\n\/\/ e.g. in the base.yaml:\n\/\/ sports:\n\/\/ football: true\n\/\/ in the development.yaml:\n\/\/ extends: base.yaml\n\/\/ sports:\n\/\/ basketball: true\n\/\/ after the merge:\n\/\/ sports: \/\/ combine all the map fields\n\/\/ football: true\n\/\/ basketball: true\n\/\/\npackage configutil\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"code.uber.internal\/infra\/kraken\/utils\/stringset\"\n\n\t\"gopkg.in\/validator.v2\"\n\t\"gopkg.in\/yaml.v2\"\n)\n\nconst (\n\tconfigDirKey = \"UBER_CONFIG_DIR\"\n)\n\nconst (\n\tconfigDir = \"config\"\n\tsecretsFile = \"secrets.yaml\"\n\tconfigSeparator = \":\"\n)\n\n\/\/ ErrNoFilesToLoad is returned when you attemp to call LoadFiles with no file paths\nvar ErrNoFilesToLoad = errors.New(\"attempt to load configuration with no files\")\n\n\/\/ ErrCycleRef is returned when there are circular dependencies detected in\n\/\/ configuraiton files extending each other\nvar ErrCycleRef = errors.New(\"cyclic reference in configuration extends detected\")\n\n\/\/ Extends define a keywoword in config for extending a base configuration file\ntype Extends struct {\n\tExtends string `yaml:\"extends\"`\n}\n\n\/\/ ValidationError is the returned when a configuration fails to pass validation\ntype ValidationError struct {\n\terrorMap validator.ErrorMap\n}\n\n\/\/ ErrForField returns the validation error for the given field\nfunc (e ValidationError) ErrForField(name string) error {\n\treturn e.errorMap[name]\n}\n\nfunc (e ValidationError) Error() string {\n\tvar w bytes.Buffer\n\n\tfmt.Fprintf(&w, \"validation failed\")\n\tfor f, err := range e.errorMap {\n\t\tfmt.Fprintf(&w, \" %s: %v\\n\", f, err)\n\t}\n\n\treturn w.String()\n}\n\n\/\/ FilterCandidates filters candidate config files into only the ones that exist\nfunc FilterCandidates(fname string) ([]string, error) {\n\trealConfigDirs := []string{configDir}\n\t\/\/ Allow overriding the directory config is loaded from, useful for tests\n\t\/\/ inside subdirectories when the config\/ dir is in the top-level of a project.\n\tif configRoot := os.Getenv(configDirKey); configRoot != \"\" {\n\t\trealConfigDirs = strings.Split(configRoot, configSeparator)\n\t}\n\treturn filterCandidatesFromDirs(fname, realConfigDirs)\n}\n\nfunc readExtend(configFile string) (string, error) {\n\tvar cfg Extends\n\n\tdata, err := ioutil.ReadFile(configFile)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := yaml.Unmarshal(data, &cfg); err != nil {\n\t\treturn \"\", fmt.Errorf(\"unmarshal %s: %s\", configFile, err)\n\t}\n\treturn cfg.Extends, nil\n}\n\nfunc getCandidate(fname string, dirs []string) string {\n\tcandidate := \"\"\n\tfor _, realConfigDir := range dirs {\n\t\tconfigFile := path.Join(realConfigDir, fname)\n\t\tif _, err := os.Stat(configFile); err == nil {\n\t\t\tcandidate = configFile\n\t\t}\n\t}\n\treturn candidate\n}\n\nfunc filterCandidatesFromDirs(fname string, dirs []string) ([]string, error) {\n\n\tvar paths []string\n\tcSet := make(stringset.Set)\n\n\t\/\/ Go through all the 'extends' hierarchy until\n\t\/\/ there is no base anymore or some reference cycles\n\t\/\/ been detected.\n\tcSet.Add(fname)\n\n\tcandidate := getCandidate(fname, dirs)\n\tfmt.Fprintf(os.Stderr, \"candidate: %s\\n\", candidate)\n\tif candidate == \"\" {\n\t\treturn nil, fmt.Errorf(\"file %s not found in %s\", fname, dirs)\n\t}\n\n\tfor {\n\t\textends, err := readExtend(candidate)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpaths = append([]string{candidate}, paths...)\n\t\tif extends != \"\" {\n\t\t\t\/\/ prevent cycle references\n\t\t\tif !cSet.Has(extends) {\n\t\t\t\tcandidate = path.Join(filepath.Dir(candidate), extends)\n\t\t\t\tcSet.Add(extends)\n\t\t\t} else {\n\t\t\t\treturn nil, ErrCycleRef\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ append secrets\n\tcandidate = getCandidate(secretsFile, dirs)\n\tif candidate != \"\" {\n\t\tpaths = append(paths, candidate)\n\t}\n\n\treturn paths, nil\n}\n\n\/\/ Load loads configuration based on environment variables\nfunc Load(fname string, config interface{}) error {\n\tcandidates, err := FilterCandidates(fname)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn LoadFiles(config, candidates...)\n}\n\n\/\/ LoadFile loads configuration based on config directory\n\/\/ where the input file is located\nfunc LoadFile(fname string, config interface{}) error {\n\tcandidates, err := filterCandidatesFromDirs(\n\t\tfilepath.Base(fname), []string{filepath.Dir(fname)})\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn LoadFiles(config, candidates...)\n}\n\n\/\/ LoadFiles loads a list of files, deep-merging values.\n\/\/ This function is exposed for using from tests. For production it's recommended\n\/\/ to use the default resolution and the Load() method.\nfunc LoadFiles(config interface{}, fnames ...string) error {\n\tif len(fnames) == 0 {\n\t\treturn ErrNoFilesToLoad\n\t}\n\tfor _, fname := range fnames {\n\t\tdata, err := ioutil.ReadFile(fname)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := yaml.Unmarshal(data, config); err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshal %s: %s\", fname, err)\n\t\t}\n\t}\n\n\t\/\/ Validate on the merged config at the end.\n\tif err := validator.Validate(config); err != nil {\n\t\treturn ValidationError{\n\t\t\terrorMap: err.(validator.ErrorMap),\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/: ----------------------------------------------------------------------------\n\/\/: Copyright (C) 2017 Verizon. All Rights Reserved.\n\/\/: All Rights Reserved\n\/\/:\n\/\/: file: ipfix.go\n\/\/: details: ipfix decoders handler\n\/\/: author: Mehrdad Arshad Rad\n\/\/: date: 02\/01\/2017\n\/\/:\n\/\/: Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/: you may not use this file except in compliance with the License.\n\/\/: You may obtain a copy of the License at\n\/\/:\n\/\/: http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/:\n\/\/: Unless required by applicable law or agreed to in writing, software\n\/\/: distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/: See the License for the specific language governing permissions and\n\/\/: limitations under the License.\n\/\/: ----------------------------------------------------------------------------\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"net\"\n\t\"strconv\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/VerizonDigital\/vflow\/ipfix\"\n\t\"github.com\/VerizonDigital\/vflow\/producer\"\n)\n\n\/\/ IPFIX represents IPFIX collector\ntype IPFIX struct {\n\tport int\n\taddr string\n\tworkers int\n\tstop bool\n\tstats IPFIXStats\n\tpool chan chan struct{}\n}\n\n\/\/ IPFIXUDPMsg represents IPFIX UDP data\ntype IPFIXUDPMsg struct {\n\traddr *net.UDPAddr\n\tbody []byte\n}\n\n\/\/ IPFIXStats represents IPFIX stats\ntype IPFIXStats struct {\n\tUDPQueue int\n\tUDPMirrorQueue int\n\tMessageQueue int\n\tUDPCount uint64\n\tDecodedCount uint64\n\tMQErrorCount uint64\n\tWorkers int32\n}\n\nvar (\n\tipfixUDPCh = make(chan IPFIXUDPMsg, 1000)\n\tipfixMCh = make(chan IPFIXUDPMsg, 1000)\n\tipfixMQCh = make(chan []byte, 1000)\n\tipfixMirrorEnabled bool\n\n\t\/\/ templates memory cache\n\tmCache ipfix.MemCache\n\n\t\/\/ ipfix udp payload pool\n\tipfixBuffer = &sync.Pool{\n\t\tNew: func() interface{} {\n\t\t\treturn make([]byte, opts.IPFIXUDPSize)\n\t\t},\n\t}\n)\n\n\/\/ NewIPFIX constructs IPFIX\nfunc NewIPFIX() *IPFIX {\n\treturn &IPFIX{\n\t\tport: opts.IPFIXPort,\n\t\tworkers: opts.IPFIXWorkers,\n\t\tpool: make(chan chan struct{}, maxWorkers),\n\t}\n}\n\nfunc (i *IPFIX) run() {\n\t\/\/ exit if the ipfix is disabled\n\tif !opts.IPFIXEnabled {\n\t\tlogger.Println(\"ipfix has been disabled\")\n\t\treturn\n\t}\n\n\thostPort := net.JoinHostPort(i.addr, strconv.Itoa(i.port))\n\tudpAddr, _ := net.ResolveUDPAddr(\"udp\", hostPort)\n\n\tconn, err := net.ListenUDP(\"udp\", udpAddr)\n\tif err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\n\tatomic.AddInt32(&i.stats.Workers, int32(i.workers))\n\tfor n := 0; n < i.workers; n++ {\n\t\tgo func() {\n\t\t\twQuit := make(chan struct{})\n\t\t\ti.pool <- wQuit\n\t\t\ti.ipfixWorker(wQuit)\n\t\t}()\n\t}\n\n\tlogger.Printf(\"ipfix is running (workers#: %d)\", i.workers)\n\n\tmCache = ipfix.GetCache(opts.IPFIXTplCacheFile)\n\tgo ipfix.RPC(mCache, &ipfix.RPCConfig{\n\t\tEnabled: opts.IPFIXRPCEnabled,\n\t\tLogger: logger,\n\t})\n\n\tgo mirrorIPFIXDispatcher(ipfixMCh)\n\n\tgo func() {\n\t\tp := producer.NewProducer(opts.MQName)\n\n\t\tp.MQConfigFile = opts.MQConfigFile\n\t\tp.MQErrorCount = &i.stats.MQErrorCount\n\t\tp.Logger = logger\n\t\tp.Chan = ipfixMQCh\n\t\tp.Topic = opts.IPFIXTopic\n\n\t\tif err := p.Run(); err != nil {\n\t\t\tlogger.Fatal(err)\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tif !opts.DynWorkers {\n\t\t\tlogger.Println(\"IPFIX dynamic worker disabled\")\n\t\t\treturn\n\t\t}\n\n\t\ti.dynWorkers()\n\t}()\n\n\tfor !i.stop {\n\t\tb := ipfixBuffer.Get().([]byte)\n\t\tconn.SetReadDeadline(time.Now().Add(1e9))\n\t\tn, raddr, err := conn.ReadFromUDP(b)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tatomic.AddUint64(&i.stats.UDPCount, 1)\n\t\tipfixUDPCh <- IPFIXUDPMsg{raddr, b[:n]}\n\t}\n\n}\n\nfunc (i *IPFIX) shutdown() {\n\t\/\/ exit if the ipfix is disabled\n\tif !opts.IPFIXEnabled {\n\t\tlogger.Println(\"ipfix disabled\")\n\t\treturn\n\t}\n\n\t\/\/ stop reading from UDP listener\n\ti.stop = true\n\tlogger.Println(\"stopping ipfix service gracefully ...\")\n\ttime.Sleep(1 * time.Second)\n\n\t\/\/ dump the templates to storage\n\tif err := mCache.Dump(opts.IPFIXTplCacheFile); err != nil {\n\t\tlogger.Println(\"couldn't not dump template\", err)\n\t}\n\n\t\/\/ logging and close UDP channel\n\tlogger.Println(\"ipfix has been shutdown\")\n\tclose(ipfixUDPCh)\n}\n\nfunc (i *IPFIX) ipfixWorker(wQuit chan struct{}) {\n\tvar (\n\t\tdecodedMsg *ipfix.Message\n\t\tmirror IPFIXUDPMsg\n\t\tmsg = IPFIXUDPMsg{body: ipfixBuffer.Get().([]byte)}\n\t\tbuf = new(bytes.Buffer)\n\t\terr error\n\t\tok bool\n\t\tb []byte\n\t)\n\nLOOP:\n\tfor {\n\n\t\tipfixBuffer.Put(msg.body[:opts.IPFIXUDPSize])\n\t\tbuf.Reset()\n\n\t\tselect {\n\t\tcase <-wQuit:\n\t\t\tbreak LOOP\n\t\tcase msg, ok = <-ipfixUDPCh:\n\t\t\tif !ok {\n\t\t\t\tbreak LOOP\n\t\t\t}\n\t\t}\n\n\t\tif opts.Verbose {\n\t\t\tlogger.Printf(\"rcvd ipfix data from: %s, size: %d bytes\",\n\t\t\t\tmsg.raddr, len(msg.body))\n\t\t}\n\n\t\tif ipfixMirrorEnabled {\n\t\t\tmirror.body = ipfixBuffer.Get().([]byte)\n\t\t\tmirror.raddr = msg.raddr\n\t\t\tmirror.body = append(mirror.body[:0], msg.body...)\n\n\t\t\tselect {\n\t\t\tcase ipfixMCh <- mirror:\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\n\t\td := ipfix.NewDecoder(msg.raddr.IP, msg.body)\n\t\tif decodedMsg, err = d.Decode(mCache); err != nil {\n\t\t\tlogger.Println(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tatomic.AddUint64(&i.stats.DecodedCount, 1)\n\n\t\tif len(decodedMsg.DataSets) > 0 {\n\t\t\tb, err = decodedMsg.JSONMarshal(buf)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Println(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase ipfixMQCh <- append([]byte{}, b...):\n\t\t\tdefault:\n\t\t\t}\n\n\t\t\tif opts.Verbose {\n\t\t\t\tlogger.Println(string(b))\n\t\t\t}\n\t\t}\n\n\t}\n}\n\nfunc (i *IPFIX) status() *IPFIXStats {\n\treturn &IPFIXStats{\n\t\tUDPQueue: len(ipfixUDPCh),\n\t\tUDPMirrorQueue: len(ipfixMCh),\n\t\tMessageQueue: len(ipfixMQCh),\n\t\tUDPCount: atomic.LoadUint64(&i.stats.UDPCount),\n\t\tDecodedCount: atomic.LoadUint64(&i.stats.DecodedCount),\n\t\tMQErrorCount: atomic.LoadUint64(&i.stats.MQErrorCount),\n\t\tWorkers: atomic.LoadInt32(&i.stats.Workers),\n\t}\n}\n\nfunc (i *IPFIX) dynWorkers() {\n\tvar load, nSeq, newWorkers, workers, n int\n\n\ttick := time.Tick(120 * time.Second)\n\n\tfor {\n\t\t<-tick\n\t\tload = 0\n\n\t\tfor n = 0; n < 30; n++ {\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tload += len(ipfixUDPCh)\n\t\t}\n\n\t\tif load > 15 {\n\n\t\t\tswitch {\n\t\t\tcase load > 300:\n\t\t\t\tnewWorkers = 100\n\t\t\tcase load > 200:\n\t\t\t\tnewWorkers = 60\n\t\t\tcase load > 100:\n\t\t\t\tnewWorkers = 40\n\t\t\tdefault:\n\t\t\t\tnewWorkers = 30\n\t\t\t}\n\n\t\t\tworkers = int(atomic.LoadInt32(&i.stats.Workers))\n\t\t\tif workers+newWorkers > maxWorkers {\n\t\t\t\tlogger.Println(\"ipfix :: max out workers\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor n = 0; n < newWorkers; n++ {\n\t\t\t\tgo func() {\n\t\t\t\t\tatomic.AddInt32(&i.stats.Workers, 1)\n\t\t\t\t\twQuit := make(chan struct{})\n\t\t\t\t\ti.pool <- wQuit\n\t\t\t\t\ti.ipfixWorker(wQuit)\n\t\t\t\t}()\n\t\t\t}\n\n\t\t}\n\n\t\tif load == 0 {\n\t\t\tnSeq++\n\t\t} else {\n\t\t\tnSeq = 0\n\t\t\tcontinue\n\t\t}\n\n\t\tif nSeq > 15 {\n\t\t\tfor n = 0; n < 10; n++ {\n\t\t\t\tif len(i.pool) > i.workers {\n\t\t\t\t\tatomic.AddInt32(&i.stats.Workers, -1)\n\t\t\t\t\twQuit := <-i.pool\n\t\t\t\t\tclose(wQuit)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tnSeq = 0\n\t\t}\n\t}\n}\n<commit_msg>prevent continue loop once we have at least one data set<commit_after>\/\/: ----------------------------------------------------------------------------\n\/\/: Copyright (C) 2017 Verizon. All Rights Reserved.\n\/\/: All Rights Reserved\n\/\/:\n\/\/: file: ipfix.go\n\/\/: details: ipfix decoders handler\n\/\/: author: Mehrdad Arshad Rad\n\/\/: date: 02\/01\/2017\n\/\/:\n\/\/: Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/: you may not use this file except in compliance with the License.\n\/\/: You may obtain a copy of the License at\n\/\/:\n\/\/: http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/:\n\/\/: Unless required by applicable law or agreed to in writing, software\n\/\/: distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/: See the License for the specific language governing permissions and\n\/\/: limitations under the License.\n\/\/: ----------------------------------------------------------------------------\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"net\"\n\t\"strconv\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/VerizonDigital\/vflow\/ipfix\"\n\t\"github.com\/VerizonDigital\/vflow\/producer\"\n)\n\n\/\/ IPFIX represents IPFIX collector\ntype IPFIX struct {\n\tport int\n\taddr string\n\tworkers int\n\tstop bool\n\tstats IPFIXStats\n\tpool chan chan struct{}\n}\n\n\/\/ IPFIXUDPMsg represents IPFIX UDP data\ntype IPFIXUDPMsg struct {\n\traddr *net.UDPAddr\n\tbody []byte\n}\n\n\/\/ IPFIXStats represents IPFIX stats\ntype IPFIXStats struct {\n\tUDPQueue int\n\tUDPMirrorQueue int\n\tMessageQueue int\n\tUDPCount uint64\n\tDecodedCount uint64\n\tMQErrorCount uint64\n\tWorkers int32\n}\n\nvar (\n\tipfixUDPCh = make(chan IPFIXUDPMsg, 1000)\n\tipfixMCh = make(chan IPFIXUDPMsg, 1000)\n\tipfixMQCh = make(chan []byte, 1000)\n\tipfixMirrorEnabled bool\n\n\t\/\/ templates memory cache\n\tmCache ipfix.MemCache\n\n\t\/\/ ipfix udp payload pool\n\tipfixBuffer = &sync.Pool{\n\t\tNew: func() interface{} {\n\t\t\treturn make([]byte, opts.IPFIXUDPSize)\n\t\t},\n\t}\n)\n\n\/\/ NewIPFIX constructs IPFIX\nfunc NewIPFIX() *IPFIX {\n\treturn &IPFIX{\n\t\tport: opts.IPFIXPort,\n\t\tworkers: opts.IPFIXWorkers,\n\t\tpool: make(chan chan struct{}, maxWorkers),\n\t}\n}\n\nfunc (i *IPFIX) run() {\n\t\/\/ exit if the ipfix is disabled\n\tif !opts.IPFIXEnabled {\n\t\tlogger.Println(\"ipfix has been disabled\")\n\t\treturn\n\t}\n\n\thostPort := net.JoinHostPort(i.addr, strconv.Itoa(i.port))\n\tudpAddr, _ := net.ResolveUDPAddr(\"udp\", hostPort)\n\n\tconn, err := net.ListenUDP(\"udp\", udpAddr)\n\tif err != nil {\n\t\tlogger.Fatal(err)\n\t}\n\n\tatomic.AddInt32(&i.stats.Workers, int32(i.workers))\n\tfor n := 0; n < i.workers; n++ {\n\t\tgo func() {\n\t\t\twQuit := make(chan struct{})\n\t\t\ti.pool <- wQuit\n\t\t\ti.ipfixWorker(wQuit)\n\t\t}()\n\t}\n\n\tlogger.Printf(\"ipfix is running (workers#: %d)\", i.workers)\n\n\tmCache = ipfix.GetCache(opts.IPFIXTplCacheFile)\n\tgo ipfix.RPC(mCache, &ipfix.RPCConfig{\n\t\tEnabled: opts.IPFIXRPCEnabled,\n\t\tLogger: logger,\n\t})\n\n\tgo mirrorIPFIXDispatcher(ipfixMCh)\n\n\tgo func() {\n\t\tp := producer.NewProducer(opts.MQName)\n\n\t\tp.MQConfigFile = opts.MQConfigFile\n\t\tp.MQErrorCount = &i.stats.MQErrorCount\n\t\tp.Logger = logger\n\t\tp.Chan = ipfixMQCh\n\t\tp.Topic = opts.IPFIXTopic\n\n\t\tif err := p.Run(); err != nil {\n\t\t\tlogger.Fatal(err)\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tif !opts.DynWorkers {\n\t\t\tlogger.Println(\"IPFIX dynamic worker disabled\")\n\t\t\treturn\n\t\t}\n\n\t\ti.dynWorkers()\n\t}()\n\n\tfor !i.stop {\n\t\tb := ipfixBuffer.Get().([]byte)\n\t\tconn.SetReadDeadline(time.Now().Add(1e9))\n\t\tn, raddr, err := conn.ReadFromUDP(b)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tatomic.AddUint64(&i.stats.UDPCount, 1)\n\t\tipfixUDPCh <- IPFIXUDPMsg{raddr, b[:n]}\n\t}\n\n}\n\nfunc (i *IPFIX) shutdown() {\n\t\/\/ exit if the ipfix is disabled\n\tif !opts.IPFIXEnabled {\n\t\tlogger.Println(\"ipfix disabled\")\n\t\treturn\n\t}\n\n\t\/\/ stop reading from UDP listener\n\ti.stop = true\n\tlogger.Println(\"stopping ipfix service gracefully ...\")\n\ttime.Sleep(1 * time.Second)\n\n\t\/\/ dump the templates to storage\n\tif err := mCache.Dump(opts.IPFIXTplCacheFile); err != nil {\n\t\tlogger.Println(\"couldn't not dump template\", err)\n\t}\n\n\t\/\/ logging and close UDP channel\n\tlogger.Println(\"ipfix has been shutdown\")\n\tclose(ipfixUDPCh)\n}\n\nfunc (i *IPFIX) ipfixWorker(wQuit chan struct{}) {\n\tvar (\n\t\tdecodedMsg *ipfix.Message\n\t\tmirror IPFIXUDPMsg\n\t\tmsg = IPFIXUDPMsg{body: ipfixBuffer.Get().([]byte)}\n\t\tbuf = new(bytes.Buffer)\n\t\terr error\n\t\tok bool\n\t\tb []byte\n\t)\n\nLOOP:\n\tfor {\n\n\t\tipfixBuffer.Put(msg.body[:opts.IPFIXUDPSize])\n\t\tbuf.Reset()\n\n\t\tselect {\n\t\tcase <-wQuit:\n\t\t\tbreak LOOP\n\t\tcase msg, ok = <-ipfixUDPCh:\n\t\t\tif !ok {\n\t\t\t\tbreak LOOP\n\t\t\t}\n\t\t}\n\n\t\tif opts.Verbose {\n\t\t\tlogger.Printf(\"rcvd ipfix data from: %s, size: %d bytes\",\n\t\t\t\tmsg.raddr, len(msg.body))\n\t\t}\n\n\t\tif ipfixMirrorEnabled {\n\t\t\tmirror.body = ipfixBuffer.Get().([]byte)\n\t\t\tmirror.raddr = msg.raddr\n\t\t\tmirror.body = append(mirror.body[:0], msg.body...)\n\n\t\t\tselect {\n\t\t\tcase ipfixMCh <- mirror:\n\t\t\tdefault:\n\t\t\t}\n\t\t}\n\n\t\td := ipfix.NewDecoder(msg.raddr.IP, msg.body)\n\t\tif decodedMsg, err = d.Decode(mCache); err != nil {\n\t\t\tlogger.Println(err)\n\t\t\t\/\/ in case ipfix message header couldn't decode\n\t\t\tif decodedMsg == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tatomic.AddUint64(&i.stats.DecodedCount, 1)\n\n\t\tif len(decodedMsg.DataSets) > 0 {\n\t\t\tb, err = decodedMsg.JSONMarshal(buf)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Println(err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase ipfixMQCh <- append([]byte{}, b...):\n\t\t\tdefault:\n\t\t\t}\n\n\t\t\tif opts.Verbose {\n\t\t\t\tlogger.Println(string(b))\n\t\t\t}\n\t\t}\n\n\t}\n}\n\nfunc (i *IPFIX) status() *IPFIXStats {\n\treturn &IPFIXStats{\n\t\tUDPQueue: len(ipfixUDPCh),\n\t\tUDPMirrorQueue: len(ipfixMCh),\n\t\tMessageQueue: len(ipfixMQCh),\n\t\tUDPCount: atomic.LoadUint64(&i.stats.UDPCount),\n\t\tDecodedCount: atomic.LoadUint64(&i.stats.DecodedCount),\n\t\tMQErrorCount: atomic.LoadUint64(&i.stats.MQErrorCount),\n\t\tWorkers: atomic.LoadInt32(&i.stats.Workers),\n\t}\n}\n\nfunc (i *IPFIX) dynWorkers() {\n\tvar load, nSeq, newWorkers, workers, n int\n\n\ttick := time.Tick(120 * time.Second)\n\n\tfor {\n\t\t<-tick\n\t\tload = 0\n\n\t\tfor n = 0; n < 30; n++ {\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tload += len(ipfixUDPCh)\n\t\t}\n\n\t\tif load > 15 {\n\n\t\t\tswitch {\n\t\t\tcase load > 300:\n\t\t\t\tnewWorkers = 100\n\t\t\tcase load > 200:\n\t\t\t\tnewWorkers = 60\n\t\t\tcase load > 100:\n\t\t\t\tnewWorkers = 40\n\t\t\tdefault:\n\t\t\t\tnewWorkers = 30\n\t\t\t}\n\n\t\t\tworkers = int(atomic.LoadInt32(&i.stats.Workers))\n\t\t\tif workers+newWorkers > maxWorkers {\n\t\t\t\tlogger.Println(\"ipfix :: max out workers\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor n = 0; n < newWorkers; n++ {\n\t\t\t\tgo func() {\n\t\t\t\t\tatomic.AddInt32(&i.stats.Workers, 1)\n\t\t\t\t\twQuit := make(chan struct{})\n\t\t\t\t\ti.pool <- wQuit\n\t\t\t\t\ti.ipfixWorker(wQuit)\n\t\t\t\t}()\n\t\t\t}\n\n\t\t}\n\n\t\tif load == 0 {\n\t\t\tnSeq++\n\t\t} else {\n\t\t\tnSeq = 0\n\t\t\tcontinue\n\t\t}\n\n\t\tif nSeq > 15 {\n\t\t\tfor n = 0; n < 10; n++ {\n\t\t\t\tif len(i.pool) > i.workers {\n\t\t\t\t\tatomic.AddInt32(&i.stats.Workers, -1)\n\t\t\t\t\twQuit := <-i.pool\n\t\t\t\t\tclose(wQuit)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tnSeq = 0\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 Google Inc. All Rights Reserved.\n\/\/ This file is available under the Apache license.\n\n\/\/ Build the parser:\n\/\/go:generate go tool yacc -v y.output -o parser.go -p mtail parser.y\n\npackage vm\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\n\t\"github.com\/google\/mtail\/metrics\"\n)\n\ntype compiler struct {\n\tname string \/\/ Name of the program.\n\n\terrors ErrorList \/\/ Compile errors.\n\tprog []instr \/\/ The emitted program.\n\tstr []string \/\/ Static strings.\n\tre []*regexp.Regexp \/\/ Static regular expressions.\n\tm []*metrics.Metric \/\/ Metrics accessible to this program.\n\n\tdecos []*decoNode \/\/ Decorator stack to unwind\n\n\tsymtab *scope\n}\n\n\/\/ Compile compiles a program from the input into a virtual machine or a list\n\/\/ of compile errors. It takes the program's name and the metric store as\n\/\/ additional arguments to build the virtual machine.\nfunc Compile(name string, input io.Reader, ms *metrics.Store, compileOnly bool, syslogUseCurrentYear bool) (*VM, error) {\n\tname = filepath.Base(name)\n\tp := newParser(name, input, ms)\n\tr := mtailParse(p)\n\tif r != 0 || p == nil || p.errors != nil {\n\t\treturn nil, p.errors\n\t}\n\tc := &compiler{name: name, symtab: p.s}\n\tc.compile(p.root)\n\tif len(c.errors) > 0 {\n\t\treturn nil, c.errors\n\t}\n\tif compileOnly {\n\t\treturn nil, nil\n\t}\n\n\tvm := New(name, c.re, c.str, c.m, c.prog, syslogUseCurrentYear)\n\treturn vm, nil\n}\n\nfunc (c *compiler) errorf(format string, args ...interface{}) {\n\te := fmt.Sprintf(format, args...)\n\tc.errors.Add(position{filename: c.name}, e)\n}\n\nfunc (c *compiler) emit(i instr) {\n\tc.prog = append(c.prog, i)\n}\n\nfunc (c *compiler) compile(untypedNode node) {\n\tswitch n := untypedNode.(type) {\n\tcase *stmtlistNode:\n\t\tfor _, child := range n.children {\n\t\t\tc.compile(child)\n\t\t}\n\n\tcase *exprlistNode:\n\t\tfor _, child := range n.children {\n\t\t\tc.compile(child)\n\t\t}\n\n\tcase *declNode:\n\t\t\/\/ Build the list of addressable metrics for this program, and set the symbol's address.\n\t\tn.sym.addr = len(c.m)\n\t\tc.m = append(c.m, n.sym.binding.(*metrics.Metric))\n\n\tcase *condNode:\n\t\tif n.cond != nil {\n\t\t\tc.compile(n.cond)\n\t\t}\n\t\t\/\/ Save PC of previous jump instruction\n\t\t\/\/ (see regexNode and relNode cases, which will emit a jump)\n\t\tpc := len(c.prog) - 1\n\t\tfor _, child := range n.children {\n\t\t\tc.compile(child)\n\t\t}\n\t\t\/\/ Rewrite jump target to jump to instruction after block.\n\t\tc.prog[pc].opnd = len(c.prog)\n\n\tcase *regexNode:\n\t\tif n.re == nil {\n\t\t\tre, err := regexp.Compile(n.pattern)\n\t\t\tif err != nil {\n\t\t\t\tc.errorf(\"%s\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tc.re = append(c.re, re)\n\t\t\tn.re = re\n\t\t\t\/\/ Store the location of this regular expression in the regexNode\n\t\t\tn.addr = len(c.re) - 1\n\t\t}\n\t\tc.emit(instr{match, n.addr})\n\t\tc.emit(instr{op: jnm})\n\n\tcase *binaryExprNode:\n\t\tc.compile(n.lhs)\n\t\tc.compile(n.rhs)\n\t\tswitch n.op {\n\t\tcase LT:\n\t\t\tc.emit(instr{cmp, -1})\n\t\t\tc.emit(instr{op: jnm})\n\t\tcase GT:\n\t\t\tc.emit(instr{cmp, 1})\n\t\t\tc.emit(instr{op: jnm})\n\t\tcase LE:\n\t\t\tc.emit(instr{cmp, 1})\n\t\t\tc.emit(instr{op: jm})\n\t\tcase GE:\n\t\t\tc.emit(instr{cmp, -1})\n\t\t\tc.emit(instr{op: jm})\n\t\tcase EQ:\n\t\t\tc.emit(instr{cmp, 0})\n\t\t\tc.emit(instr{op: jnm})\n\t\tcase NE:\n\t\t\tc.emit(instr{cmp, 0})\n\t\t\tc.emit(instr{op: jm})\n\t\tcase '+':\n\t\t\tc.emit(instr{op: add})\n\t\tcase '-':\n\t\t\tc.emit(instr{op: sub})\n\t\tcase '*':\n\t\t\tc.emit(instr{op: mul})\n\t\tcase '\/':\n\t\t\tc.emit(instr{op: div})\n\t\tcase AND:\n\t\t\tc.emit(instr{op: and})\n\t\tcase OR:\n\t\t\tc.emit(instr{op: or})\n\t\tcase XOR:\n\t\t\tc.emit(instr{op: xor})\n\t\tcase ASSIGN:\n\t\t\tc.emit(instr{op: set})\n\t\tcase ADD_ASSIGN:\n\t\t\tc.emit(instr{inc, 1})\n\t\tcase SHL:\n\t\t\tc.emit(instr{op: shl})\n\t\tcase SHR:\n\t\t\tc.emit(instr{op: shr})\n\t\tcase POW:\n\t\t\tc.emit(instr{op: pow})\n\t\t}\n\n\tcase *unaryExprNode:\n\t\tc.compile(n.lhs)\n\t\tswitch n.op {\n\t\tcase INC:\n\t\t\tc.emit(instr{op: inc})\n\t\tcase NOT:\n\t\t\tc.emit(instr{op: not})\n\t\t}\n\n\tcase *indexedExprNode:\n\t\tc.compile(n.index)\n\t\tc.compile(n.lhs)\n\n\tcase *numericExprNode:\n\t\tc.emit(instr{push, int(n.i)})\n\n\tcase *stringNode:\n\t\tc.str = append(c.str, n.text)\n\t\tc.emit(instr{str, len(c.str) - 1})\n\n\tcase *idNode:\n\t\tc.emit(instr{mload, n.sym.addr})\n\t\tm := n.sym.binding.(*metrics.Metric)\n\t\tc.emit(instr{dload, len(m.Keys)})\n\n\tcase *caprefNode:\n\t\trn := n.sym.binding.(*regexNode)\n\t\t\/\/ rn.addr contains the index of the regular expression object,\n\t\t\/\/ which correlates to storage on the re heap\n\t\tc.emit(instr{push, rn.addr})\n\t\tc.emit(instr{capref, n.sym.addr})\n\n\tcase *builtinNode:\n\t\tif n.args != nil {\n\t\t\tc.compile(n.args)\n\t\t\tc.emit(instr{builtin[n.name], len(n.args.(*exprlistNode).children)})\n\t\t} else {\n\t\t\tc.emit(instr{op: builtin[n.name]})\n\t\t}\n\n\tcase *defNode:\n\t\t\/\/ Do nothing, defs are inlined.\n\n\tcase *decoNode:\n\t\t\/\/ Put the current block on the stack\n\t\tc.decos = append(c.decos, n)\n\t\t\/\/ then iterate over the decorator's nodes\n\t\tfor _, child := range n.def.children {\n\t\t\tc.compile(child)\n\t\t}\n\t\t\/\/ Pop the block off\n\t\tc.decos = c.decos[:len(c.decos)-1]\n\n\tcase *nextNode:\n\t\t\/\/ Visit the 'next' block on the decorated block stack\n\t\tdeco := c.decos[len(c.decos)-1]\n\t\tfor _, child := range deco.children {\n\t\t\tc.compile(child)\n\t\t}\n\n\tdefault:\n\t\tc.errorf(\"undefined node type %T (%q)6\", untypedNode, untypedNode)\n\t}\n}\n<commit_msg>Don't cast the numeric expr because we accept interfaces as opnd.<commit_after>\/\/ Copyright 2011 Google Inc. All Rights Reserved.\n\/\/ This file is available under the Apache license.\n\n\/\/ Build the parser:\n\/\/go:generate go tool yacc -v y.output -o parser.go -p mtail parser.y\n\npackage vm\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\n\t\"github.com\/google\/mtail\/metrics\"\n)\n\ntype compiler struct {\n\tname string \/\/ Name of the program.\n\n\terrors ErrorList \/\/ Compile errors.\n\tprog []instr \/\/ The emitted program.\n\tstr []string \/\/ Static strings.\n\tre []*regexp.Regexp \/\/ Static regular expressions.\n\tm []*metrics.Metric \/\/ Metrics accessible to this program.\n\n\tdecos []*decoNode \/\/ Decorator stack to unwind\n\n\tsymtab *scope\n}\n\n\/\/ Compile compiles a program from the input into a virtual machine or a list\n\/\/ of compile errors. It takes the program's name and the metric store as\n\/\/ additional arguments to build the virtual machine.\nfunc Compile(name string, input io.Reader, ms *metrics.Store, compileOnly bool, syslogUseCurrentYear bool) (*VM, error) {\n\tname = filepath.Base(name)\n\tp := newParser(name, input, ms)\n\tr := mtailParse(p)\n\tif r != 0 || p == nil || p.errors != nil {\n\t\treturn nil, p.errors\n\t}\n\tc := &compiler{name: name, symtab: p.s}\n\tc.compile(p.root)\n\tif len(c.errors) > 0 {\n\t\treturn nil, c.errors\n\t}\n\tif compileOnly {\n\t\treturn nil, nil\n\t}\n\n\tvm := New(name, c.re, c.str, c.m, c.prog, syslogUseCurrentYear)\n\treturn vm, nil\n}\n\nfunc (c *compiler) errorf(format string, args ...interface{}) {\n\te := fmt.Sprintf(format, args...)\n\tc.errors.Add(position{filename: c.name}, e)\n}\n\nfunc (c *compiler) emit(i instr) {\n\tc.prog = append(c.prog, i)\n}\n\nfunc (c *compiler) compile(untypedNode node) {\n\tswitch n := untypedNode.(type) {\n\tcase *stmtlistNode:\n\t\tfor _, child := range n.children {\n\t\t\tc.compile(child)\n\t\t}\n\n\tcase *exprlistNode:\n\t\tfor _, child := range n.children {\n\t\t\tc.compile(child)\n\t\t}\n\n\tcase *declNode:\n\t\t\/\/ Build the list of addressable metrics for this program, and set the symbol's address.\n\t\tn.sym.addr = len(c.m)\n\t\tc.m = append(c.m, n.sym.binding.(*metrics.Metric))\n\n\tcase *condNode:\n\t\tif n.cond != nil {\n\t\t\tc.compile(n.cond)\n\t\t}\n\t\t\/\/ Save PC of previous jump instruction\n\t\t\/\/ (see regexNode and relNode cases, which will emit a jump)\n\t\tpc := len(c.prog) - 1\n\t\tfor _, child := range n.children {\n\t\t\tc.compile(child)\n\t\t}\n\t\t\/\/ Rewrite jump target to jump to instruction after block.\n\t\tc.prog[pc].opnd = len(c.prog)\n\n\tcase *regexNode:\n\t\tif n.re == nil {\n\t\t\tre, err := regexp.Compile(n.pattern)\n\t\t\tif err != nil {\n\t\t\t\tc.errorf(\"%s\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tc.re = append(c.re, re)\n\t\t\tn.re = re\n\t\t\t\/\/ Store the location of this regular expression in the regexNode\n\t\t\tn.addr = len(c.re) - 1\n\t\t}\n\t\tc.emit(instr{match, n.addr})\n\t\tc.emit(instr{op: jnm})\n\n\tcase *binaryExprNode:\n\t\tc.compile(n.lhs)\n\t\tc.compile(n.rhs)\n\t\tswitch n.op {\n\t\tcase LT:\n\t\t\tc.emit(instr{cmp, -1})\n\t\t\tc.emit(instr{op: jnm})\n\t\tcase GT:\n\t\t\tc.emit(instr{cmp, 1})\n\t\t\tc.emit(instr{op: jnm})\n\t\tcase LE:\n\t\t\tc.emit(instr{cmp, 1})\n\t\t\tc.emit(instr{op: jm})\n\t\tcase GE:\n\t\t\tc.emit(instr{cmp, -1})\n\t\t\tc.emit(instr{op: jm})\n\t\tcase EQ:\n\t\t\tc.emit(instr{cmp, 0})\n\t\t\tc.emit(instr{op: jnm})\n\t\tcase NE:\n\t\t\tc.emit(instr{cmp, 0})\n\t\t\tc.emit(instr{op: jm})\n\t\tcase '+':\n\t\t\tc.emit(instr{op: add})\n\t\tcase '-':\n\t\t\tc.emit(instr{op: sub})\n\t\tcase '*':\n\t\t\tc.emit(instr{op: mul})\n\t\tcase '\/':\n\t\t\tc.emit(instr{op: div})\n\t\tcase AND:\n\t\t\tc.emit(instr{op: and})\n\t\tcase OR:\n\t\t\tc.emit(instr{op: or})\n\t\tcase XOR:\n\t\t\tc.emit(instr{op: xor})\n\t\tcase ASSIGN:\n\t\t\tc.emit(instr{op: set})\n\t\tcase ADD_ASSIGN:\n\t\t\tc.emit(instr{inc, 1})\n\t\tcase SHL:\n\t\t\tc.emit(instr{op: shl})\n\t\tcase SHR:\n\t\t\tc.emit(instr{op: shr})\n\t\tcase POW:\n\t\t\tc.emit(instr{op: pow})\n\t\t}\n\n\tcase *unaryExprNode:\n\t\tc.compile(n.lhs)\n\t\tswitch n.op {\n\t\tcase INC:\n\t\t\tc.emit(instr{op: inc})\n\t\tcase NOT:\n\t\t\tc.emit(instr{op: not})\n\t\t}\n\n\tcase *indexedExprNode:\n\t\tc.compile(n.index)\n\t\tc.compile(n.lhs)\n\n\tcase *numericExprNode:\n\t\tc.emit(instr{push, n.i})\n\n\tcase *stringNode:\n\t\tc.str = append(c.str, n.text)\n\t\tc.emit(instr{str, len(c.str) - 1})\n\n\tcase *idNode:\n\t\tc.emit(instr{mload, n.sym.addr})\n\t\tm := n.sym.binding.(*metrics.Metric)\n\t\tc.emit(instr{dload, len(m.Keys)})\n\n\tcase *caprefNode:\n\t\trn := n.sym.binding.(*regexNode)\n\t\t\/\/ rn.addr contains the index of the regular expression object,\n\t\t\/\/ which correlates to storage on the re heap\n\t\tc.emit(instr{push, rn.addr})\n\t\tc.emit(instr{capref, n.sym.addr})\n\n\tcase *builtinNode:\n\t\tif n.args != nil {\n\t\t\tc.compile(n.args)\n\t\t\tc.emit(instr{builtin[n.name], len(n.args.(*exprlistNode).children)})\n\t\t} else {\n\t\t\tc.emit(instr{op: builtin[n.name]})\n\t\t}\n\n\tcase *defNode:\n\t\t\/\/ Do nothing, defs are inlined.\n\n\tcase *decoNode:\n\t\t\/\/ Put the current block on the stack\n\t\tc.decos = append(c.decos, n)\n\t\t\/\/ then iterate over the decorator's nodes\n\t\tfor _, child := range n.def.children {\n\t\t\tc.compile(child)\n\t\t}\n\t\t\/\/ Pop the block off\n\t\tc.decos = c.decos[:len(c.decos)-1]\n\n\tcase *nextNode:\n\t\t\/\/ Visit the 'next' block on the decorated block stack\n\t\tdeco := c.decos[len(c.decos)-1]\n\t\tfor _, child := range deco.children {\n\t\t\tc.compile(child)\n\t\t}\n\n\tdefault:\n\t\tc.errorf(\"undefined node type %T (%q)6\", untypedNode, untypedNode)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Joel Scoble and The JoeFriday authors.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package cpuinfo handles processing of \/proc\/cpuinfo. The Info struct will\n\/\/ have one entry per processor.\npackage cpuinfo\n\nimport (\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/SermoDigital\/helpers\"\n\tjoe \"github.com\/mohae\/joefriday\"\n)\n\nconst procFile = \"\/proc\/cpuinfo\"\n\n\/\/ CPUInfo holds information about the system's cpus; CPU will have one entry\n\/\/ per processor.\ntype CPUInfo struct {\n\tTimestamp int64\n\tCPU []CPU `json:\"cpus\"`\n}\n\n\/\/ CPU holds the \/proc\/cpuinfo for a single processor.\ntype CPU struct {\n\tProcessor int16 `json:\"processor\"`\n\tVendorID string `json:\"vendor_id\"`\n\tCPUFamily string `json:\"cpu_family\"`\n\tModel string `json:\"model\"`\n\tModelName string `json:\"model_name\"`\n\tStepping string `json:\"stepping\"`\n\tMicrocode string `json:\"microcode\"`\n\tCPUMHz float32 `json:\"cpu_mhz\"`\n\tCacheSize string `json:\"cache_size\"`\n\tPhysicalID int16 `json:\"physical_id\"`\n\tSiblings int16 `json:\"siblings\"`\n\tCoreID int16 `json:\"core_id\"`\n\tCPUCores int16 `json:\"cpu_cores\"`\n\tApicID int16 `json:\"apicid\"`\n\tInitialApicID int16 `json:\"initial_apicid\"`\n\tFPU string `json:\"fpu\"`\n\tFPUException string `json:\"fpu_exception\"`\n\tCPUIDLevel string `json:\"cpuid_level\"`\n\tWP string `json:\"wp\"`\n\tFlags []string `json:\"flags\"`\n\tBogoMIPS float32 `json:\"bogomips\"`\n\tCLFlushSize string `json:\"clflush_size\"`\n\tCacheAlignment string `json:\"cache_alignment\"`\n\tAddressSizes string `json:\"address_sizes\"`\n\tPowerManagement string `json:\"power_management\"`\n}\n\n\/\/ Profiler is used to process the \/proc\/cpuinfo file.\ntype Profiler struct {\n\tjoe.Procer\n\tLine []byte\n\tVal []byte\n}\n\n\/\/ Returns an initialized Profiler; ready to use.\nfunc NewProfiler() (prof *Profiler, err error) {\n\tproc, err := joe.New(procFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Profiler{Procer: proc, Val: make([]byte, 0, 32)}, nil\n}\n\n\/\/ Reset resources; after reset the profiler is ready to be used again.\nfunc (prof *Profiler) Reset() error {\n\tprof.Val = prof.Val[:0]\n\treturn prof.Procer.Reset()\n}\n\n\/\/ Get returns the current cpuinfo.\nfunc (prof *Profiler) Get() (inf *CPUInfo, err error) {\n\tvar (\n\t\tcpuCnt, i, pos, nameLen int\n\t\tn uint64\n\t\tv byte\n\t\tcpu CPU\n\t)\n\terr = prof.Reset()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinf = &CPUInfo{Timestamp: time.Now().UTC().UnixNano()}\n\tfor {\n\t\tprof.Line, err = prof.ReadSlice('\\n')\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn nil, &joe.ReadError{Err: err}\n\t\t}\n\t\tprof.Val = prof.Val[:0]\n\t\t\/\/ First grab the attribute name; everything up to the ':'. The key may have\n\t\t\/\/ spaces and has trailing spaces; that gets trimmed.\n\t\tfor i, v = range prof.Line {\n\t\t\tif v == 0x3A {\n\t\t\t\tprof.Val = prof.Line[:i]\n\t\t\t\tpos = i + 1\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t\/\/prof.Val = append(prof.Val, v)\n\t\t}\n\t\tprof.Val = joe.TrimTrailingSpaces(prof.Val[:])\n\t\tnameLen = len(prof.Val)\n\t\t\/\/ if there's no name; skip.\n\t\tif nameLen == 0 {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ if there's anything left, the value is everything else; trim spaces\n\t\tif pos+1 < len(prof.Line) {\n\t\t\tprof.Val = append(prof.Val, joe.TrimTrailingSpaces(prof.Line[pos+1:])...)\n\t\t}\n\t\tv = prof.Val[0]\n\t\tif v == 'a' {\n\t\t\tv = prof.Val[1]\n\t\t\tif v == 'd' { \/\/ address sizes\n\t\t\t\tcpu.AddressSizes = string(prof.Val[nameLen:])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif v == 'p' { \/\/ apicid\n\t\t\t\tn, err = helpers.ParseUint(prof.Val[nameLen:])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, &joe.ParseError{Info: string(prof.Val[:nameLen]), Err: err}\n\t\t\t\t}\n\t\t\t\tcpu.ApicID = int16(n)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif v == 'c' {\n\t\t\tv = prof.Val[1]\n\t\t\tif v == 'p' {\n\t\t\t\tv = prof.Val[4]\n\t\t\t\tif v == 'c' { \/\/ cpu cores\n\t\t\t\t\tn, err = helpers.ParseUint(prof.Val[nameLen:])\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, &joe.ParseError{Info: string(prof.Val[:nameLen]), Err: err}\n\t\t\t\t\t}\n\t\t\t\t\tcpu.CPUCores = int16(n)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif v == 'f' { \/\/ cpu family\n\t\t\t\t\tcpu.CPUFamily = string(prof.Val[nameLen:])\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif v == 'M' { \/\/ cpu MHz\n\t\t\t\t\tf, err := strconv.ParseFloat(string(prof.Val[nameLen:]), 32)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, &joe.ParseError{Info: string(prof.Val[:nameLen]), Err: err}\n\t\t\t\t\t}\n\t\t\t\t\tcpu.CPUMHz = float32(f)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif v == 'd' { \/\/ cpuid level\n\t\t\t\t\tcpu.CPUIDLevel = string(prof.Val[nameLen:])\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tv = prof.Val[5]\n\t\t\tif v == '_' { \/\/ cache_alignment\n\t\t\t\tcpu.CacheAlignment = string(prof.Val[nameLen:])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif v == ' ' { \/\/ cache size\n\t\t\t\tcpu.CacheSize = string(prof.Val[nameLen:])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif v == 's' { \/\/ clflush size\n\t\t\t\tcpu.CLFlushSize = string(prof.Val[nameLen:])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif v == 'i' { \/\/ core id\n\t\t\t\tn, err = helpers.ParseUint(prof.Val[nameLen:])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, &joe.ParseError{Info: string(prof.Val[:nameLen]), Err: err}\n\t\t\t\t}\n\t\t\t\tcpu.CoreID = int16(n)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif v == 'f' {\n\t\t\tv = prof.Val[1]\n\t\t\tif v == 'l' { \/\/ flags\n\t\t\t\tcpu.Flags = strings.Split(string(prof.Val[nameLen:]), \" \")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif v == 'p' {\n\t\t\t\tif nameLen == 3 { \/\/ fpu\n\t\t\t\t\tcpu.FPU = string(prof.Val[nameLen:])\n\t\t\t\t} else { \/\/ fpu_exception\n\t\t\t\t\tcpu.FPUException = string(prof.Val[nameLen:])\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif v == 'm' {\n\t\t\tv = prof.Val[1]\n\t\t\tif v == 'i' { \/\/ microcode\n\t\t\t\tcpu.Microcode = string(prof.Val[nameLen:])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif v == 'o' {\n\t\t\t\tif nameLen == 5 { \/\/ model\n\t\t\t\t\tcpu.Model = string(prof.Val[nameLen:])\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tcpu.ModelName = string(prof.Val[nameLen:])\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif v == 'p' {\n\t\t\tv = prof.Val[1]\n\t\t\tif v == 'h' { \/\/ physical id\n\t\t\t\tn, err = helpers.ParseUint(prof.Val[nameLen:])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, &joe.ParseError{Info: string(prof.Val[:nameLen]), Err: err}\n\t\t\t\t}\n\t\t\t\tcpu.PhysicalID = int16(n)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif v == 'o' { \/\/ power management\n\t\t\t\tcpu.PowerManagement = string(prof.Val[nameLen:])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ processor starts information about a processor.\n\t\t\tif v == 'r' { \/\/ processor\n\t\t\t\tif cpuCnt > 0 {\n\t\t\t\t\tinf.CPU = append(inf.CPU, cpu)\n\t\t\t\t}\n\t\t\t\tcpuCnt++\n\t\t\t\tn, err = helpers.ParseUint(prof.Val[nameLen:])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, &joe.ParseError{Info: string(prof.Val[:nameLen]), Err: err}\n\t\t\t\t}\n\t\t\t\tcpu = CPU{Processor: int16(n)}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif v == 's' {\n\t\t\tv = prof.Val[1]\n\t\t\tif v == 'i' { \/\/ siblings\n\t\t\t\tn, err = helpers.ParseUint(prof.Val[nameLen:])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, &joe.ParseError{Info: string(prof.Val[:nameLen]), Err: err}\n\t\t\t\t}\n\t\t\t\tcpu.Siblings = int16(n)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif v == 't' { \/\/ stepping\n\t\t\t\tcpu.Stepping = string(prof.Val[nameLen:])\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ also check 2nd name pos for o as some output also have a bugs line.\n\t\tif v == 'b' && prof.Val[1] == 'o' { \/\/ bogomips\n\t\t\tf, err := strconv.ParseFloat(string(prof.Val[nameLen:]), 32)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, &joe.ParseError{Info: string(prof.Val[:nameLen]), Err: err}\n\t\t\t}\n\t\t\tcpu.BogoMIPS = float32(f)\n\t\t\tcontinue\n\t\t}\n\t\tif v == 'i' { \/\/ initial apicid\n\t\t\tn, err = helpers.ParseUint(prof.Val[nameLen:])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, &joe.ParseError{Info: string(prof.Val[:nameLen]), Err: err}\n\t\t\t}\n\t\t\tcpu.InitialApicID = int16(n)\n\t\t\tcontinue\n\t\t}\n\t\tif v == 'W' { \/\/ WP\n\t\t\tcpu.WP = string(prof.Val[nameLen:])\n\t\t\tcontinue\n\t\t}\n\t\tif v == 'v' { \/\/ vendor_id\n\t\t\tcpu.VendorID = string(prof.Val[nameLen:])\n\t\t}\n\t}\n\t\/\/ append the current processor informatin\n\tinf.CPU = append(inf.CPU, cpu)\n\treturn inf, nil\n}\n\nvar std *Profiler\nvar stdMu sync.Mutex\n\n\/\/ Get returns the current cpuinfo using the package's global Profiler.\nfunc Get() (inf *CPUInfo, err error) {\n\tstdMu.Lock()\n\tdefer stdMu.Unlock()\n\tif std == nil {\n\t\tstd, err = NewProfiler()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn std.Get()\n}\n<commit_msg>check for wp should look for w not W<commit_after>\/\/ Copyright 2016 Joel Scoble and The JoeFriday authors.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package cpuinfo handles processing of \/proc\/cpuinfo. The Info struct will\n\/\/ have one entry per processor.\npackage cpuinfo\n\nimport (\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/SermoDigital\/helpers\"\n\tjoe \"github.com\/mohae\/joefriday\"\n)\n\nconst procFile = \"\/proc\/cpuinfo\"\n\n\/\/ CPUInfo holds information about the system's cpus; CPU will have one entry\n\/\/ per processor.\ntype CPUInfo struct {\n\tTimestamp int64\n\tCPU []CPU `json:\"cpus\"`\n}\n\n\/\/ CPU holds the \/proc\/cpuinfo for a single processor.\ntype CPU struct {\n\tProcessor int16 `json:\"processor\"`\n\tVendorID string `json:\"vendor_id\"`\n\tCPUFamily string `json:\"cpu_family\"`\n\tModel string `json:\"model\"`\n\tModelName string `json:\"model_name\"`\n\tStepping string `json:\"stepping\"`\n\tMicrocode string `json:\"microcode\"`\n\tCPUMHz float32 `json:\"cpu_mhz\"`\n\tCacheSize string `json:\"cache_size\"`\n\tPhysicalID int16 `json:\"physical_id\"`\n\tSiblings int16 `json:\"siblings\"`\n\tCoreID int16 `json:\"core_id\"`\n\tCPUCores int16 `json:\"cpu_cores\"`\n\tApicID int16 `json:\"apicid\"`\n\tInitialApicID int16 `json:\"initial_apicid\"`\n\tFPU string `json:\"fpu\"`\n\tFPUException string `json:\"fpu_exception\"`\n\tCPUIDLevel string `json:\"cpuid_level\"`\n\tWP string `json:\"wp\"`\n\tFlags []string `json:\"flags\"`\n\tBogoMIPS float32 `json:\"bogomips\"`\n\tCLFlushSize string `json:\"clflush_size\"`\n\tCacheAlignment string `json:\"cache_alignment\"`\n\tAddressSizes string `json:\"address_sizes\"`\n\tPowerManagement string `json:\"power_management\"`\n}\n\n\/\/ Profiler is used to process the \/proc\/cpuinfo file.\ntype Profiler struct {\n\tjoe.Procer\n\tLine []byte\n\tVal []byte\n}\n\n\/\/ Returns an initialized Profiler; ready to use.\nfunc NewProfiler() (prof *Profiler, err error) {\n\tproc, err := joe.New(procFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Profiler{Procer: proc, Val: make([]byte, 0, 32)}, nil\n}\n\n\/\/ Reset resources; after reset the profiler is ready to be used again.\nfunc (prof *Profiler) Reset() error {\n\tprof.Val = prof.Val[:0]\n\treturn prof.Procer.Reset()\n}\n\n\/\/ Get returns the current cpuinfo.\nfunc (prof *Profiler) Get() (inf *CPUInfo, err error) {\n\tvar (\n\t\tcpuCnt, i, pos, nameLen int\n\t\tn uint64\n\t\tv byte\n\t\tcpu CPU\n\t)\n\terr = prof.Reset()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tinf = &CPUInfo{Timestamp: time.Now().UTC().UnixNano()}\n\tfor {\n\t\tprof.Line, err = prof.ReadSlice('\\n')\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn nil, &joe.ReadError{Err: err}\n\t\t}\n\t\tprof.Val = prof.Val[:0]\n\t\t\/\/ First grab the attribute name; everything up to the ':'. The key may have\n\t\t\/\/ spaces and has trailing spaces; that gets trimmed.\n\t\tfor i, v = range prof.Line {\n\t\t\tif v == 0x3A {\n\t\t\t\tprof.Val = prof.Line[:i]\n\t\t\t\tpos = i + 1\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t\/\/prof.Val = append(prof.Val, v)\n\t\t}\n\t\tprof.Val = joe.TrimTrailingSpaces(prof.Val[:])\n\t\tnameLen = len(prof.Val)\n\t\t\/\/ if there's no name; skip.\n\t\tif nameLen == 0 {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ if there's anything left, the value is everything else; trim spaces\n\t\tif pos+1 < len(prof.Line) {\n\t\t\tprof.Val = append(prof.Val, joe.TrimTrailingSpaces(prof.Line[pos+1:])...)\n\t\t}\n\t\tv = prof.Val[0]\n\t\tif v == 'a' {\n\t\t\tv = prof.Val[1]\n\t\t\tif v == 'd' { \/\/ address sizes\n\t\t\t\tcpu.AddressSizes = string(prof.Val[nameLen:])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif v == 'p' { \/\/ apicid\n\t\t\t\tn, err = helpers.ParseUint(prof.Val[nameLen:])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, &joe.ParseError{Info: string(prof.Val[:nameLen]), Err: err}\n\t\t\t\t}\n\t\t\t\tcpu.ApicID = int16(n)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif v == 'c' {\n\t\t\tv = prof.Val[1]\n\t\t\tif v == 'p' {\n\t\t\t\tv = prof.Val[4]\n\t\t\t\tif v == 'c' { \/\/ cpu cores\n\t\t\t\t\tn, err = helpers.ParseUint(prof.Val[nameLen:])\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, &joe.ParseError{Info: string(prof.Val[:nameLen]), Err: err}\n\t\t\t\t\t}\n\t\t\t\t\tcpu.CPUCores = int16(n)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif v == 'f' { \/\/ cpu family\n\t\t\t\t\tcpu.CPUFamily = string(prof.Val[nameLen:])\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif v == 'M' { \/\/ cpu MHz\n\t\t\t\t\tf, err := strconv.ParseFloat(string(prof.Val[nameLen:]), 32)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, &joe.ParseError{Info: string(prof.Val[:nameLen]), Err: err}\n\t\t\t\t\t}\n\t\t\t\t\tcpu.CPUMHz = float32(f)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif v == 'd' { \/\/ cpuid level\n\t\t\t\t\tcpu.CPUIDLevel = string(prof.Val[nameLen:])\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tv = prof.Val[5]\n\t\t\tif v == '_' { \/\/ cache_alignment\n\t\t\t\tcpu.CacheAlignment = string(prof.Val[nameLen:])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif v == ' ' { \/\/ cache size\n\t\t\t\tcpu.CacheSize = string(prof.Val[nameLen:])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif v == 's' { \/\/ clflush size\n\t\t\t\tcpu.CLFlushSize = string(prof.Val[nameLen:])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif v == 'i' { \/\/ core id\n\t\t\t\tn, err = helpers.ParseUint(prof.Val[nameLen:])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, &joe.ParseError{Info: string(prof.Val[:nameLen]), Err: err}\n\t\t\t\t}\n\t\t\t\tcpu.CoreID = int16(n)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif v == 'f' {\n\t\t\tv = prof.Val[1]\n\t\t\tif v == 'l' { \/\/ flags\n\t\t\t\tcpu.Flags = strings.Split(string(prof.Val[nameLen:]), \" \")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif v == 'p' {\n\t\t\t\tif nameLen == 3 { \/\/ fpu\n\t\t\t\t\tcpu.FPU = string(prof.Val[nameLen:])\n\t\t\t\t} else { \/\/ fpu_exception\n\t\t\t\t\tcpu.FPUException = string(prof.Val[nameLen:])\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif v == 'm' {\n\t\t\tv = prof.Val[1]\n\t\t\tif v == 'i' { \/\/ microcode\n\t\t\t\tcpu.Microcode = string(prof.Val[nameLen:])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif v == 'o' {\n\t\t\t\tif nameLen == 5 { \/\/ model\n\t\t\t\t\tcpu.Model = string(prof.Val[nameLen:])\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tcpu.ModelName = string(prof.Val[nameLen:])\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif v == 'p' {\n\t\t\tv = prof.Val[1]\n\t\t\tif v == 'h' { \/\/ physical id\n\t\t\t\tn, err = helpers.ParseUint(prof.Val[nameLen:])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, &joe.ParseError{Info: string(prof.Val[:nameLen]), Err: err}\n\t\t\t\t}\n\t\t\t\tcpu.PhysicalID = int16(n)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif v == 'o' { \/\/ power management\n\t\t\t\tcpu.PowerManagement = string(prof.Val[nameLen:])\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ processor starts information about a processor.\n\t\t\tif v == 'r' { \/\/ processor\n\t\t\t\tif cpuCnt > 0 {\n\t\t\t\t\tinf.CPU = append(inf.CPU, cpu)\n\t\t\t\t}\n\t\t\t\tcpuCnt++\n\t\t\t\tn, err = helpers.ParseUint(prof.Val[nameLen:])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, &joe.ParseError{Info: string(prof.Val[:nameLen]), Err: err}\n\t\t\t\t}\n\t\t\t\tcpu = CPU{Processor: int16(n)}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif v == 's' {\n\t\t\tv = prof.Val[1]\n\t\t\tif v == 'i' { \/\/ siblings\n\t\t\t\tn, err = helpers.ParseUint(prof.Val[nameLen:])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, &joe.ParseError{Info: string(prof.Val[:nameLen]), Err: err}\n\t\t\t\t}\n\t\t\t\tcpu.Siblings = int16(n)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif v == 't' { \/\/ stepping\n\t\t\t\tcpu.Stepping = string(prof.Val[nameLen:])\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ also check 2nd name pos for o as some output also have a bugs line.\n\t\tif v == 'b' && prof.Val[1] == 'o' { \/\/ bogomips\n\t\t\tf, err := strconv.ParseFloat(string(prof.Val[nameLen:]), 32)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, &joe.ParseError{Info: string(prof.Val[:nameLen]), Err: err}\n\t\t\t}\n\t\t\tcpu.BogoMIPS = float32(f)\n\t\t\tcontinue\n\t\t}\n\t\tif v == 'i' { \/\/ initial apicid\n\t\t\tn, err = helpers.ParseUint(prof.Val[nameLen:])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, &joe.ParseError{Info: string(prof.Val[:nameLen]), Err: err}\n\t\t\t}\n\t\t\tcpu.InitialApicID = int16(n)\n\t\t\tcontinue\n\t\t}\n\t\tif v == 'w' { \/\/ WP\n\t\t\tcpu.WP = string(prof.Val[nameLen:])\n\t\t\tcontinue\n\t\t}\n\t\tif v == 'v' { \/\/ vendor_id\n\t\t\tcpu.VendorID = string(prof.Val[nameLen:])\n\t\t}\n\t}\n\t\/\/ append the current processor informatin\n\tinf.CPU = append(inf.CPU, cpu)\n\treturn inf, nil\n}\n\nvar std *Profiler\nvar stdMu sync.Mutex\n\n\/\/ Get returns the current cpuinfo using the package's global Profiler.\nfunc Get() (inf *CPUInfo, err error) {\n\tstdMu.Lock()\n\tdefer stdMu.Unlock()\n\tif std == nil {\n\t\tstd, err = NewProfiler()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn std.Get()\n}\n<|endoftext|>"} {"text":"<commit_before>package common\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst taskIDSize = 20\n\nvar (\n\tTracker = &TaskTracker{tasks: map[string]*Task{}}\n\tTaskStatusUnknown = &TaskStatus{Status: StatusUnknown}\n)\n\nfunc MaintenanceChecker(file string, interval time.Duration) {\n\tgo Tracker.MaintenanceChecker(file, interval)\n}\n\nfunc NewTask(name string, executor TaskExecutor) *Task {\n\ttask := &Task{Tracker: Tracker, Executor: executor}\n\ttask.Status = StatusInit\n\ttask.StatusTime = time.Now()\n\ttask.Name = name\n\ttask.Description = executor.Description()\n\ttask.Request = executor.Request()\n\treturn task\n}\n\ntype TaskTracker struct {\n\tsync.RWMutex\n\tResultDuration time.Duration\n\tMaintenance bool\n\ttasks map[string]*Task\n}\n\ntype Task struct {\n\tsync.RWMutex\n\tTaskStatus\n\tErr error\n\tTracker *TaskTracker\n\tID string\n\tExecutor TaskExecutor\n\tRequest interface{}\n\tResult interface{}\n}\n\ntype TaskExecutor interface {\n\tRequest() interface{}\n\tResult() interface{}\n\tDescription() string\n\tExecute(t *Task) error\n\tAuthorize() error\n}\n\ntype TaskMaintenanceExecutor interface {\n\tAllowDuringMaintenance() bool\n}\n\nfunc createTaskID() string {\n\treturn CreateRandomID(taskIDSize)\n}\n\nfunc (t *TaskTracker) ListIDs() []string {\n\tt.Lock()\n\tids := make([]string, len(t.tasks))\n\ti := 0\n\tfor id, _ := range t.tasks {\n\t\tids[i] = id\n\t\ti++\n\t}\n\tt.Unlock()\n\treturn ids\n}\n\nfunc (t *TaskTracker) SetMaintenance(on bool) {\n\tt.Lock()\n\tt.Maintenance = on\n\tt.Unlock()\n}\n\nfunc (t *TaskTracker) UnderMaintenance() bool {\n\tt.RLock()\n\tmaint := t.Maintenance\n\tt.RUnlock()\n\treturn maint\n}\n\nfunc (t *TaskTracker) MaintenanceChecker(file string, interval time.Duration) {\n\tfor {\n\t\tif _, err := os.Stat(file); err == nil {\n\t\t\t\/\/ maintenance file exists\n\t\t\tif !t.UnderMaintenance() {\n\t\t\t\tlog.Println(\"Begin Maintenance\")\n\t\t\t\tt.SetMaintenance(true)\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ maintenance file doesn't exist or there is an error looking for it\n\t\t\tif t.UnderMaintenance() {\n\t\t\t\tlog.Println(\"End Maintenance\")\n\t\t\t\tt.SetMaintenance(false)\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(interval)\n\t}\n}\n\nfunc (t *TaskTracker) Idle(checkTask *Task) bool {\n\tidle := true\n\tt.RLock()\n\tfor _, task := range t.tasks {\n\t\tif task != checkTask && !task.Done {\n\t\t\tidle = false\n\t\t\tbreak\n\t\t}\n\t}\n\tt.RUnlock()\n\treturn idle\n}\n\nfunc (t *TaskTracker) ReserveTaskID(task *Task) string {\n\tt.Lock()\n\trequestID := createTaskID()\n\tfor _, present := t.tasks[requestID]; present; _, present = t.tasks[requestID] {\n\t\trequestID = createTaskID()\n\t}\n\tt.tasks[requestID] = task \/\/ reserve request id\n\tt.Unlock()\n\ttask.Lock()\n\ttask.ID = requestID\n\ttask.Unlock()\n\treturn requestID\n}\n\nfunc (t *TaskTracker) ReleaseTaskID(id string) {\n\tt.Lock()\n\tdelete(t.tasks, id)\n\tt.Unlock()\n}\n\nfunc (t *TaskTracker) Status(id string) (*TaskStatus, error) {\n\tt.RLock()\n\ttask := t.tasks[id]\n\tt.RUnlock()\n\tif task != nil {\n\t\ttask.RLock()\n\t\tstatus := task.CopyTaskStatus()\n\t\terr := task.Err\n\t\ttask.RUnlock()\n\t\treturn status, err\n\t}\n\treturn TaskStatusUnknown, errors.New(\"Unknown Task Status\")\n}\n\nfunc (t *TaskTracker) Result(id string) interface{} {\n\tt.RLock()\n\ttask := t.tasks[id]\n\tt.RUnlock()\n\tif task != nil {\n\t\ttask.RLock()\n\t\tresult := task.Result\n\t\ttask.RUnlock()\n\t\treturn result\n\t}\n\treturn nil\n}\n\nfunc (t *Task) Authorize() error {\n\treturn t.Executor.Authorize()\n}\n\nfunc (t *Task) Run() error {\n\tif t.Tracker.UnderMaintenance() {\n\t\texecutor, ok := t.Executor.(TaskMaintenanceExecutor)\n\t\tif !ok || !executor.AllowDuringMaintenance() {\n\t\t\treturn t.End(errors.New(\"Under Maintenance\"), false)\n\t\t}\n\t}\n\tt.Tracker.ReserveTaskID(t)\n\tt.Log(\"Begin %s\", t.Description)\n\tt.Lock()\n\tt.StartTime = time.Now()\n\tt.Unlock()\n\terr := t.Executor.Authorize()\n\tif err != nil {\n\t\treturn t.End(err, false)\n\t}\n\treturn t.End(t.Executor.Execute(t), false)\n}\n\nfunc (t *Task) RunAsync(r *AsyncReply) error {\n\tif t.Tracker.UnderMaintenance() {\n\t\texecutor, ok := t.Executor.(TaskMaintenanceExecutor)\n\t\tif !ok || !executor.AllowDuringMaintenance() {\n\t\t\treturn t.End(errors.New(\"Under Maintenance\"), false)\n\t\t}\n\t}\n\tt.Tracker.ReserveTaskID(t)\n\tt.RLock()\n\tr.ID = t.ID\n\tt.RUnlock()\n\tgo func() error {\n\t\tt.Log(\"Begin %s\", t.Description)\n\t\tt.Lock()\n\t\tt.StartTime = time.Now()\n\t\tt.Unlock()\n\t\terr := t.Executor.Authorize()\n\t\tif err != nil {\n\t\t\treturn t.End(err, true)\n\t\t}\n\t\tt.End(t.Executor.Execute(t), true)\n\t\treturn nil\n\t}()\n\treturn nil\n}\n\nfunc (t *Task) End(err error, async bool) error {\n\tlogString := fmt.Sprintf(\"End %s\", t.Description)\n\tt.Lock()\n\tt.Result = t.Executor.Result()\n\tt.EndTime = time.Now()\n\tt.StatusTime = t.EndTime\n\tif err == nil {\n\t\tt.Status = StatusDone\n\t\tt.Done = true\n\t} else {\n\t\tt.Status = StatusError\n\t\tt.Err = err\n\t\tt.Done = true\n\t\tlogString += fmt.Sprintf(\" - Error: %s\", err.Error())\n\t}\n\tt.Unlock()\n\tt.Log(logString)\n\tif async {\n\t\ttime.AfterFunc(t.Tracker.ResultDuration, func() {\n\t\t\t\/\/ keep result around for 30 min in case someone wants to check on it\n\t\t\tt.Tracker.ReleaseTaskID(t.ID)\n\t\t})\n\t} else {\n\t\tt.Tracker.ReleaseTaskID(t.ID)\n\t}\n\treturn err\n}\n\nfunc (t *Task) Log(format string, args ...interface{}) {\n\tt.RLock()\n\tlog.Printf(\"[RPC][\"+t.Name+\"][\"+t.ID+\"] \"+format, args...)\n\tt.RUnlock()\n}\n\nfunc (t *Task) LogStatus(format string, args ...interface{}) {\n\tt.Log(format, args...)\n\tt.Lock()\n\tt.StatusTime = time.Now()\n\tt.Status = fmt.Sprintf(format, args...)\n\tt.Unlock()\n}\n\nfunc (t *Task) AddWarning(warn string) {\n\tt.Lock()\n\tif t.Warnings == nil {\n\t\tt.Warnings = []string{warn}\n\t} else {\n\t\tt.Warnings = append(t.Warnings, warn)\n\t}\n\tt.Unlock()\n\tt.Log(\"WARNING: %s\", warn)\n}\n\ntype TaskStatus struct {\n\tName string\n\tDescription string\n\tStatus string\n\tWarnings []string\n\tDone bool\n\tStartTime time.Time\n\tStatusTime time.Time\n\tEndTime time.Time\n}\n\nfunc (t *TaskStatus) Map() map[string]interface{} {\n\treturn map[string]interface{}{\n\t\t\"Name\": t.Name,\n\t\t\"Description\": t.Description,\n\t\t\"Status\": t.Status,\n\t\t\"Warnings\": t.Warnings,\n\t\t\"Done\": t.Done,\n\t\t\"StartTime\": t.StartTime,\n\t\t\"StatusTime\": t.StatusTime,\n\t\t\"EndTime\": t.EndTime,\n\t}\n}\n\nfunc (t *TaskStatus) String() string {\n\treturn fmt.Sprintf(`%s\nDescription : %s\nStatus : %s\nWarnings : %v\nDone : %t\nStartTime : %s\nStatusTime : %s\nEndTime : %s`, t.Name, t.Description, t.Status, t.Warnings, t.Done, t.StartTime, t.StatusTime,\n\t\tt.EndTime)\n}\n\nfunc (t *TaskStatus) CopyTaskStatus() *TaskStatus {\n\treturn &TaskStatus{t.Name, t.Description, t.Status, t.Warnings, t.Done, t.StartTime, t.StatusTime,\n\t\tt.EndTime}\n}\n<commit_msg>specify task name in list<commit_after>package common\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst taskIDSize = 20\n\nvar (\n\tTracker = &TaskTracker{tasks: map[string]*Task{}}\n\tTaskStatusUnknown = &TaskStatus{Status: StatusUnknown}\n)\n\nfunc MaintenanceChecker(file string, interval time.Duration) {\n\tgo Tracker.MaintenanceChecker(file, interval)\n}\n\nfunc NewTask(name string, executor TaskExecutor) *Task {\n\ttask := &Task{Tracker: Tracker, Executor: executor}\n\ttask.Status = StatusInit\n\ttask.StatusTime = time.Now()\n\ttask.Name = name\n\ttask.Description = executor.Description()\n\ttask.Request = executor.Request()\n\treturn task\n}\n\ntype TaskTracker struct {\n\tsync.RWMutex\n\tResultDuration time.Duration\n\tMaintenance bool\n\ttasks map[string]*Task\n}\n\ntype Task struct {\n\tsync.RWMutex\n\tTaskStatus\n\tErr error\n\tTracker *TaskTracker\n\tID string\n\tExecutor TaskExecutor\n\tRequest interface{}\n\tResult interface{}\n}\n\ntype TaskExecutor interface {\n\tRequest() interface{}\n\tResult() interface{}\n\tDescription() string\n\tExecute(t *Task) error\n\tAuthorize() error\n}\n\ntype TaskMaintenanceExecutor interface {\n\tAllowDuringMaintenance() bool\n}\n\nfunc createTaskID() string {\n\treturn CreateRandomID(taskIDSize)\n}\n\nfunc (t *TaskTracker) ListIDs(typ string) []string {\n\tt.Lock()\n\tids := make([]string, len(t.tasks))\n\ti := 0\n\tfor id, task := range t.tasks {\n\t\tif task.Name != typ {\n\t\t\tcontinue\n\t\t}\n\t\tids[i] = id\n\t\ti++\n\t}\n\tt.Unlock()\n\treturn ids\n}\n\nfunc (t *TaskTracker) SetMaintenance(on bool) {\n\tt.Lock()\n\tt.Maintenance = on\n\tt.Unlock()\n}\n\nfunc (t *TaskTracker) UnderMaintenance() bool {\n\tt.RLock()\n\tmaint := t.Maintenance\n\tt.RUnlock()\n\treturn maint\n}\n\nfunc (t *TaskTracker) MaintenanceChecker(file string, interval time.Duration) {\n\tfor {\n\t\tif _, err := os.Stat(file); err == nil {\n\t\t\t\/\/ maintenance file exists\n\t\t\tif !t.UnderMaintenance() {\n\t\t\t\tlog.Println(\"Begin Maintenance\")\n\t\t\t\tt.SetMaintenance(true)\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ maintenance file doesn't exist or there is an error looking for it\n\t\t\tif t.UnderMaintenance() {\n\t\t\t\tlog.Println(\"End Maintenance\")\n\t\t\t\tt.SetMaintenance(false)\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(interval)\n\t}\n}\n\nfunc (t *TaskTracker) Idle(checkTask *Task) bool {\n\tidle := true\n\tt.RLock()\n\tfor _, task := range t.tasks {\n\t\tif task != checkTask && !task.Done {\n\t\t\tidle = false\n\t\t\tbreak\n\t\t}\n\t}\n\tt.RUnlock()\n\treturn idle\n}\n\nfunc (t *TaskTracker) ReserveTaskID(task *Task) string {\n\tt.Lock()\n\trequestID := createTaskID()\n\tfor _, present := t.tasks[requestID]; present; _, present = t.tasks[requestID] {\n\t\trequestID = createTaskID()\n\t}\n\tt.tasks[requestID] = task \/\/ reserve request id\n\tt.Unlock()\n\ttask.Lock()\n\ttask.ID = requestID\n\ttask.Unlock()\n\treturn requestID\n}\n\nfunc (t *TaskTracker) ReleaseTaskID(id string) {\n\tt.Lock()\n\tdelete(t.tasks, id)\n\tt.Unlock()\n}\n\nfunc (t *TaskTracker) Status(id string) (*TaskStatus, error) {\n\tt.RLock()\n\ttask := t.tasks[id]\n\tt.RUnlock()\n\tif task != nil {\n\t\ttask.RLock()\n\t\tstatus := task.CopyTaskStatus()\n\t\terr := task.Err\n\t\ttask.RUnlock()\n\t\treturn status, err\n\t}\n\treturn TaskStatusUnknown, errors.New(\"Unknown Task Status\")\n}\n\nfunc (t *TaskTracker) Result(id string) interface{} {\n\tt.RLock()\n\ttask := t.tasks[id]\n\tt.RUnlock()\n\tif task != nil {\n\t\ttask.RLock()\n\t\tresult := task.Result\n\t\ttask.RUnlock()\n\t\treturn result\n\t}\n\treturn nil\n}\n\nfunc (t *Task) Authorize() error {\n\treturn t.Executor.Authorize()\n}\n\nfunc (t *Task) Run() error {\n\tif t.Tracker.UnderMaintenance() {\n\t\texecutor, ok := t.Executor.(TaskMaintenanceExecutor)\n\t\tif !ok || !executor.AllowDuringMaintenance() {\n\t\t\treturn t.End(errors.New(\"Under Maintenance\"), false)\n\t\t}\n\t}\n\tt.Tracker.ReserveTaskID(t)\n\tt.Log(\"Begin %s\", t.Description)\n\tt.Lock()\n\tt.StartTime = time.Now()\n\tt.Unlock()\n\terr := t.Executor.Authorize()\n\tif err != nil {\n\t\treturn t.End(err, false)\n\t}\n\treturn t.End(t.Executor.Execute(t), false)\n}\n\nfunc (t *Task) RunAsync(r *AsyncReply) error {\n\tif t.Tracker.UnderMaintenance() {\n\t\texecutor, ok := t.Executor.(TaskMaintenanceExecutor)\n\t\tif !ok || !executor.AllowDuringMaintenance() {\n\t\t\treturn t.End(errors.New(\"Under Maintenance\"), false)\n\t\t}\n\t}\n\tt.Tracker.ReserveTaskID(t)\n\tt.RLock()\n\tr.ID = t.ID\n\tt.RUnlock()\n\tgo func() error {\n\t\tt.Log(\"Begin %s\", t.Description)\n\t\tt.Lock()\n\t\tt.StartTime = time.Now()\n\t\tt.Unlock()\n\t\terr := t.Executor.Authorize()\n\t\tif err != nil {\n\t\t\treturn t.End(err, true)\n\t\t}\n\t\tt.End(t.Executor.Execute(t), true)\n\t\treturn nil\n\t}()\n\treturn nil\n}\n\nfunc (t *Task) End(err error, async bool) error {\n\tlogString := fmt.Sprintf(\"End %s\", t.Description)\n\tt.Lock()\n\tt.Result = t.Executor.Result()\n\tt.EndTime = time.Now()\n\tt.StatusTime = t.EndTime\n\tif err == nil {\n\t\tt.Status = StatusDone\n\t\tt.Done = true\n\t} else {\n\t\tt.Status = StatusError\n\t\tt.Err = err\n\t\tt.Done = true\n\t\tlogString += fmt.Sprintf(\" - Error: %s\", err.Error())\n\t}\n\tt.Unlock()\n\tt.Log(logString)\n\tif async {\n\t\ttime.AfterFunc(t.Tracker.ResultDuration, func() {\n\t\t\t\/\/ keep result around for 30 min in case someone wants to check on it\n\t\t\tt.Tracker.ReleaseTaskID(t.ID)\n\t\t})\n\t} else {\n\t\tt.Tracker.ReleaseTaskID(t.ID)\n\t}\n\treturn err\n}\n\nfunc (t *Task) Log(format string, args ...interface{}) {\n\tt.RLock()\n\tlog.Printf(\"[RPC][\"+t.Name+\"][\"+t.ID+\"] \"+format, args...)\n\tt.RUnlock()\n}\n\nfunc (t *Task) LogStatus(format string, args ...interface{}) {\n\tt.Log(format, args...)\n\tt.Lock()\n\tt.StatusTime = time.Now()\n\tt.Status = fmt.Sprintf(format, args...)\n\tt.Unlock()\n}\n\nfunc (t *Task) AddWarning(warn string) {\n\tt.Lock()\n\tif t.Warnings == nil {\n\t\tt.Warnings = []string{warn}\n\t} else {\n\t\tt.Warnings = append(t.Warnings, warn)\n\t}\n\tt.Unlock()\n\tt.Log(\"WARNING: %s\", warn)\n}\n\ntype TaskStatus struct {\n\tName string\n\tDescription string\n\tStatus string\n\tWarnings []string\n\tDone bool\n\tStartTime time.Time\n\tStatusTime time.Time\n\tEndTime time.Time\n}\n\nfunc (t *TaskStatus) Map() map[string]interface{} {\n\treturn map[string]interface{}{\n\t\t\"Name\": t.Name,\n\t\t\"Description\": t.Description,\n\t\t\"Status\": t.Status,\n\t\t\"Warnings\": t.Warnings,\n\t\t\"Done\": t.Done,\n\t\t\"StartTime\": t.StartTime,\n\t\t\"StatusTime\": t.StatusTime,\n\t\t\"EndTime\": t.EndTime,\n\t}\n}\n\nfunc (t *TaskStatus) String() string {\n\treturn fmt.Sprintf(`%s\nDescription : %s\nStatus : %s\nWarnings : %v\nDone : %t\nStartTime : %s\nStatusTime : %s\nEndTime : %s`, t.Name, t.Description, t.Status, t.Warnings, t.Done, t.StartTime, t.StatusTime,\n\t\tt.EndTime)\n}\n\nfunc (t *TaskStatus) CopyTaskStatus() *TaskStatus {\n\treturn &TaskStatus{t.Name, t.Description, t.Status, t.Warnings, t.Done, t.StartTime, t.StatusTime,\n\t\tt.EndTime}\n}\n<|endoftext|>"} {"text":"<commit_before>package homecloud\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"reflect\"\n\t\"sync\"\n\n\t\"github.com\/ninjasphere\/go-ninja\/api\"\n\t\"github.com\/ninjasphere\/go-ninja\/logger\"\n\t\"github.com\/ninjasphere\/go-ninja\/model\"\n\t\"github.com\/ninjasphere\/redigo\/redis\"\n)\n\ntype SiteModel struct {\n\tbaseModel\n}\n\nfunc NewSiteModel(pool *redis.Pool, conn *ninja.Connection) *SiteModel {\n\treturn &SiteModel{\n\t\tbaseModel{\n\t\t\tsyncing: &sync.WaitGroup{},\n\t\t\tpool: pool,\n\t\t\tidType: \"site\",\n\t\t\tobjType: reflect.TypeOf(model.Site{}),\n\t\t\tconn: conn,\n\t\t\tlog: logger.GetLogger(\"SiteModel\"),\n\t\t},\n\t}\n}\n\nfunc (m *SiteModel) Fetch(siteID string) (*model.Site, error) {\n\tm.syncing.Wait()\n\n\tsite := &model.Site{}\n\n\tif err := m.fetch(siteID, site, false); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn site, nil\n}\n\nfunc (m *SiteModel) FetchAll() (*[]*model.Site, error) {\n\tm.syncing.Wait()\n\n\tids, err := m.fetchIds()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsites := make([]*model.Site, len(ids))\n\n\tfor i, id := range ids {\n\t\tsites[i], err = m.Fetch(id)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &sites, nil\n}\n\nfunc (m *SiteModel) Create(site *model.Site) error {\n\tm.syncing.Wait()\n\t\/\/defer m.sync()\n\n\tlog.Debugf(\"Saving site %s\", site.ID)\n\n\tupdated, err := m.save(site.ID, site)\n\n\tlog.Debugf(\"Site was updated? %t\", updated)\n\n\treturn err\n}\n\nfunc (m *SiteModel) Delete(id string) error {\n\tm.syncing.Wait()\n\t\/\/defer m.sync()\n\n\treturn m.delete(id)\n}\n\nfunc (m *SiteModel) Update(id string, site *model.Site) error {\n\tm.syncing.Wait()\n\t\/\/defer m.sync()\n\n\toldSite := &model.Site{}\n\n\tif err := m.fetch(id, oldSite, false); err != nil {\n\t\treturn fmt.Errorf(\"Failed to fetch site (id:%s): %s\", id, err)\n\t}\n\n\toldSite.Name = site.Name\n\toldSite.Type = site.Type\n\n\tif (oldSite.Latitude == nil || oldSite.Longitude == nil) || (*oldSite.Latitude != *site.Latitude || *oldSite.Longitude != *site.Longitude) {\n\t\toldSite.Latitude = site.Latitude\n\t\toldSite.Longitude = site.Longitude\n\n\t\ttz, err := getTimezone(*site.Latitude, *site.Longitude)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to get timezone: %s\", err)\n\t\t}\n\n\t\toldSite.TimeZoneID = tz.TimeZoneID\n\t\toldSite.TimeZoneName = tz.TimeZoneName\n\t\toldSite.TimeZoneOffset = tz.RawOffset\n\t\t\/\/ TODO: Not handling DST\n\t}\n\n\tif _, err := m.save(id, oldSite); err != nil {\n\t\treturn fmt.Errorf(\"Failed to update site (id:%s): %s\", id, err)\n\t}\n\n\treturn nil\n}\n\ntype googleTimezone struct {\n\tDstOffset *int `json:\"dstOffset,omitempty\"`\n\tRawOffset *int `json:\"rawOffset,omitempty\"`\n\tStatus *string `json:\"status,omitempty\"`\n\tTimeZoneID *string `json:\"timeZoneId,omitempty\"`\n\tTimeZoneName *string `json:\"timeZoneName,omitempty\"`\n}\n\nfunc getTimezone(latitude, longitude float64) (*googleTimezone, error) {\n\n\t\/\/ TODO: Send proper timestamp to get the dst... or...?\n\turl := fmt.Sprintf(\"https:\/\/maps.googleapis.com\/maps\/api\/timezone\/json?location=%f,%f×tamp=1414645501\", latitude, longitude)\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, errors.New(\"Could not access schema \" + resp.Status)\n\t}\n\n\tbodyBuff, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar tz googleTimezone\n\terr = json.Unmarshal(bodyBuff, &tz)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif *tz.Status != \"OK\" {\n\t\treturn nil, fmt.Errorf(\"Failed to get timezone: %s\", *tz.Status)\n\t}\n\n\t\/*\n\n\t req := &geocode.Request{\n\t Region: \"us\",\n\t Provider: geocode.GOOGLE,\n\t Location: &geocode.Point{-33.86, 151.20},\n\t }*\/\n\n\treturn &tz, nil\n}\n<commit_msg>Update linux timezone when setting site lat\/long<commit_after>package homecloud\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\/exec\"\n\t\"reflect\"\n\t\"sync\"\n\n\t\"github.com\/ninjasphere\/go-ninja\/api\"\n\t\"github.com\/ninjasphere\/go-ninja\/logger\"\n\t\"github.com\/ninjasphere\/go-ninja\/model\"\n\t\"github.com\/ninjasphere\/redigo\/redis\"\n)\n\ntype SiteModel struct {\n\tbaseModel\n}\n\nfunc NewSiteModel(pool *redis.Pool, conn *ninja.Connection) *SiteModel {\n\treturn &SiteModel{\n\t\tbaseModel{\n\t\t\tsyncing: &sync.WaitGroup{},\n\t\t\tpool: pool,\n\t\t\tidType: \"site\",\n\t\t\tobjType: reflect.TypeOf(model.Site{}),\n\t\t\tconn: conn,\n\t\t\tlog: logger.GetLogger(\"SiteModel\"),\n\t\t},\n\t}\n}\n\nfunc (m *SiteModel) Fetch(siteID string) (*model.Site, error) {\n\tm.syncing.Wait()\n\n\tsite := &model.Site{}\n\n\tif err := m.fetch(siteID, site, false); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn site, nil\n}\n\nfunc (m *SiteModel) FetchAll() (*[]*model.Site, error) {\n\tm.syncing.Wait()\n\n\tids, err := m.fetchIds()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsites := make([]*model.Site, len(ids))\n\n\tfor i, id := range ids {\n\t\tsites[i], err = m.Fetch(id)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &sites, nil\n}\n\nfunc (m *SiteModel) Create(site *model.Site) error {\n\tm.syncing.Wait()\n\t\/\/defer m.sync()\n\n\tlog.Debugf(\"Saving site %s\", site.ID)\n\n\tupdated, err := m.save(site.ID, site)\n\n\tlog.Debugf(\"Site was updated? %t\", updated)\n\n\treturn err\n}\n\nfunc (m *SiteModel) Delete(id string) error {\n\tm.syncing.Wait()\n\t\/\/defer m.sync()\n\n\treturn m.delete(id)\n}\n\nfunc (m *SiteModel) Update(id string, site *model.Site) error {\n\tm.syncing.Wait()\n\t\/\/defer m.sync()\n\n\toldSite := &model.Site{}\n\n\tif err := m.fetch(id, oldSite, false); err != nil {\n\t\treturn fmt.Errorf(\"Failed to fetch site (id:%s): %s\", id, err)\n\t}\n\n\toldSite.Name = site.Name\n\toldSite.Type = site.Type\n\n\tif (oldSite.Latitude == nil || oldSite.Longitude == nil) || (*oldSite.Latitude != *site.Latitude || *oldSite.Longitude != *site.Longitude) {\n\t\toldSite.Latitude = site.Latitude\n\t\toldSite.Longitude = site.Longitude\n\n\t\ttz, err := getTimezone(*site.Latitude, *site.Longitude)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to get timezone: %s\", err)\n\t\t}\n\n\t\toldSite.TimeZoneID = tz.TimeZoneID\n\t\toldSite.TimeZoneName = tz.TimeZoneName\n\t\toldSite.TimeZoneOffset = tz.RawOffset\n\t\t\/\/ TODO: Not handling DST\n\n\t\tif tz.TimeZoneID != nil && *tz.TimeZoneID != \"\" {\n\t\t\terr = setTimezone(*tz.TimeZoneID)\n\t\t\tif err != nil {\n\t\t\t\tlog.Warningf(\"Failed to set timezone: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\tif _, err := m.save(id, oldSite); err != nil {\n\t\treturn fmt.Errorf(\"Failed to update site (id:%s): %s\", id, err)\n\t}\n\n\treturn nil\n}\n\nfunc setTimezone(zone string) error {\n\t\/\/ln -s \/usr\/share\/zoneinfo\/Etc\/GMT$offset \/etc\/localtime\n\n\tcmd := exec.Command(\"ln\", \"-s\", \"-f\", \"\/usr\/share\/zoneinfo\/\"+zone, \"\/etc\/localtime\")\n\t_, err := cmd.Output()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\ntype googleTimezone struct {\n\tDstOffset *int `json:\"dstOffset,omitempty\"`\n\tRawOffset *int `json:\"rawOffset,omitempty\"`\n\tStatus *string `json:\"status,omitempty\"`\n\tTimeZoneID *string `json:\"timeZoneId,omitempty\"`\n\tTimeZoneName *string `json:\"timeZoneName,omitempty\"`\n}\n\nfunc getTimezone(latitude, longitude float64) (*googleTimezone, error) {\n\n\t\/\/ TODO: Send proper timestamp to get the dst... or...?\n\turl := fmt.Sprintf(\"https:\/\/maps.googleapis.com\/maps\/api\/timezone\/json?location=%f,%f×tamp=1414645501\", latitude, longitude)\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, errors.New(\"Could not access schema \" + resp.Status)\n\t}\n\n\tbodyBuff, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar tz googleTimezone\n\terr = json.Unmarshal(bodyBuff, &tz)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif *tz.Status != \"OK\" {\n\t\treturn nil, fmt.Errorf(\"Failed to get timezone: %s\", *tz.Status)\n\t}\n\n\t\/*\n\n\t req := &geocode.Request{\n\t Region: \"us\",\n\t Provider: geocode.GOOGLE,\n\t Location: &geocode.Point{-33.86, 151.20},\n\t }*\/\n\n\treturn &tz, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/lxc\/lxd\/lxd\/cluster\"\n\t\"github.com\/lxc\/lxd\/lxd\/db\"\n\t\"github.com\/lxc\/lxd\/lxd\/network\"\n\t\"github.com\/lxc\/lxd\/lxd\/project\"\n\t\"github.com\/lxc\/lxd\/lxd\/state\"\n\t\"github.com\/lxc\/lxd\/shared\/logger\"\n)\n\nfunc networkAutoAttach(cluster *db.Cluster, devName string) error {\n\t_, dbInfo, err := cluster.GetNetworkWithInterface(devName)\n\tif err != nil {\n\t\t\/\/ No match found, move on\n\t\treturn nil\n\t}\n\n\treturn network.AttachInterface(dbInfo.Name, devName)\n}\n\n\/\/ networkUpdateForkdnsServersTask runs every 30s and refreshes the forkdns servers list.\nfunc networkUpdateForkdnsServersTask(s *state.State, heartbeatData *cluster.APIHeartbeat) error {\n\t\/\/ Use project.Default here as forkdns (fan bridge) networks don't support projects.\n\tprojectName := project.Default\n\n\t\/\/ Get a list of managed networks\n\tnetworks, err := s.Cluster.GetCreatedNetworks(projectName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, name := range networks {\n\t\tn, err := network.LoadByName(s, projectName, name)\n\t\tif err != nil {\n\t\t\tlogger.Errorf(\"Failed to load network %q from project %q for heartbeat\", name, projectName)\n\t\t\tcontinue\n\t\t}\n\n\t\tif n.Type() == \"bridge\" && n.Config()[\"bridge.mode\"] == \"fan\" {\n\t\t\terr := n.HandleHeartbeat(heartbeatData)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>lxd\/networks\/utils: Log forkdns refresh task starting in networkUpdateForkdnsServersTask<commit_after>package main\n\nimport (\n\t\"github.com\/lxc\/lxd\/lxd\/cluster\"\n\t\"github.com\/lxc\/lxd\/lxd\/db\"\n\t\"github.com\/lxc\/lxd\/lxd\/network\"\n\t\"github.com\/lxc\/lxd\/lxd\/project\"\n\t\"github.com\/lxc\/lxd\/lxd\/state\"\n\t\"github.com\/lxc\/lxd\/shared\/logger\"\n)\n\nfunc networkAutoAttach(cluster *db.Cluster, devName string) error {\n\t_, dbInfo, err := cluster.GetNetworkWithInterface(devName)\n\tif err != nil {\n\t\t\/\/ No match found, move on\n\t\treturn nil\n\t}\n\n\treturn network.AttachInterface(dbInfo.Name, devName)\n}\n\n\/\/ networkUpdateForkdnsServersTask runs every 30s and refreshes the forkdns servers list.\nfunc networkUpdateForkdnsServersTask(s *state.State, heartbeatData *cluster.APIHeartbeat) error {\n\tlogger.Debug(\"Refreshing forkdns servers\")\n\n\t\/\/ Use project.Default here as forkdns (fan bridge) networks don't support projects.\n\tprojectName := project.Default\n\n\t\/\/ Get a list of managed networks\n\tnetworks, err := s.Cluster.GetCreatedNetworks(projectName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, name := range networks {\n\t\tn, err := network.LoadByName(s, projectName, name)\n\t\tif err != nil {\n\t\t\tlogger.Errorf(\"Failed to load network %q from project %q for heartbeat\", name, projectName)\n\t\t\tcontinue\n\t\t}\n\n\t\tif n.Type() == \"bridge\" && n.Config()[\"bridge.mode\"] == \"fan\" {\n\t\t\terr := n.HandleHeartbeat(heartbeatData)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package management\n\nvar (\n\tsdkVersion = \"8.0.0-beta\"\n)\n<commit_msg>Updating reported management version.<commit_after>package management\n\nvar (\n\tsdkVersion = \"9.0.0-beta\"\n)\n<|endoftext|>"} {"text":"<commit_before>package pkg\n\nimport (\n\tgopath \"path\"\n\n\tbosherr \"github.com\/cloudfoundry\/bosh-utils\/errors\"\n\tboshsys \"github.com\/cloudfoundry\/bosh-utils\/system\"\n\n\t. \"github.com\/cloudfoundry\/bosh-init\/release\/pkg\/manifest\"\n\t. \"github.com\/cloudfoundry\/bosh-init\/release\/resource\"\n)\n\ntype DirReaderImpl struct {\n\tarchiveFactory ArchiveFunc\n\n\tsrcDirPath string\n\tblobsDirPath string\n\n\tfs boshsys.FileSystem\n}\n\nfunc NewDirReaderImpl(\n\tarchiveFactory ArchiveFunc,\n\tsrcDirPath string,\n\tblobsDirPath string,\n\tfs boshsys.FileSystem,\n) DirReaderImpl {\n\treturn DirReaderImpl{\n\t\tarchiveFactory: archiveFactory,\n\t\tsrcDirPath: srcDirPath,\n\t\tblobsDirPath: blobsDirPath,\n\t\tfs: fs,\n\t}\n}\n\nfunc (r DirReaderImpl) Read(path string) (*Package, error) {\n\tmanifest, files, prepFiles, err := r.collectFiles(path)\n\tif err != nil {\n\t\treturn nil, bosherr.WrapErrorf(err, \"Collecting package files\")\n\t}\n\n\t\/\/ Note that files do not include package's spec file,\n\t\/\/ but rather specify dependencies as additional chunks for the fingerprint.\n\tarchive := r.archiveFactory(files, prepFiles, manifest.Dependencies)\n\n\tfp, err := archive.Fingerprint()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresource := NewResource(manifest.Name, fp, archive)\n\n\treturn NewPackage(resource, manifest.Dependencies), nil\n}\n\nfunc (r DirReaderImpl) collectFiles(path string) (Manifest, []File, []File, error) {\n\tvar files, prepFiles []File\n\n\tspecPath := gopath.Join(path, \"spec\")\n\n\tmanifest, err := NewManifestFromPath(specPath, r.fs)\n\tif err != nil {\n\t\treturn Manifest{}, nil, nil, err\n\t}\n\n\tpackagingPath := gopath.Join(path, \"packaging\")\n\n\tif r.fs.FileExists(packagingPath) {\n\t\tfile := NewFile(packagingPath, path)\n\t\tfile.ExcludeMode = true\n\t\tfiles = append(files, file)\n\t} else {\n\t\treturn manifest, nil, nil, bosherr.Errorf(\n\t\t\t\"Expected to find '%s' for package '%s'\", packagingPath, manifest.Name)\n\t}\n\n\tprePackagingPath := gopath.Join(path, \"pre_packaging\")\n\n\tif r.fs.FileExists(prePackagingPath) {\n\t\tfile := NewFile(prePackagingPath, path)\n\t\tfile.ExcludeMode = true\n\t\tfiles = append(files, file)\n\t\tprepFiles = append(prepFiles, file)\n\t}\n\n\tfilesByRelPath := map[string]File{}\n\n\tfor _, glob := range manifest.Files {\n\t\tsrcDirMatches, err := r.fs.Glob(gopath.Join(r.srcDirPath, glob))\n\t\tif err != nil {\n\t\t\treturn manifest, nil, nil, bosherr.WrapErrorf(err, \"Listing package files in src\")\n\t\t}\n\n\t\tfor _, path := range srcDirMatches {\n\t\t\tfile := NewFile(path, r.srcDirPath)\n\t\t\tif _, found := filesByRelPath[file.RelativePath]; !found {\n\t\t\t\tfilesByRelPath[file.RelativePath] = file\n\t\t\t}\n\t\t}\n\n\t\tblobsDirMatches, err := r.fs.Glob(gopath.Join(r.blobsDirPath, glob))\n\t\tif err != nil {\n\t\t\treturn manifest, nil, nil, bosherr.WrapErrorf(err, \"Listing package files in blobs\")\n\t\t}\n\n\t\tfor _, path := range blobsDirMatches {\n\t\t\tfile := NewFile(path, r.blobsDirPath)\n\t\t\tif _, found := filesByRelPath[file.RelativePath]; !found {\n\t\t\t\tfilesByRelPath[file.RelativePath] = file\n\t\t\t}\n\t\t}\n\t}\n\n\tvar excludedFiles []File\n\n\tfor _, glob := range manifest.ExcludedFiles {\n\t\tsrcDirMatches, err := r.fs.Glob(gopath.Join(r.srcDirPath, glob))\n\t\tif err != nil {\n\t\t\treturn manifest, nil, nil, bosherr.WrapErrorf(err, \"Listing package excluded files in src\")\n\t\t}\n\n\t\tfor _, path := range srcDirMatches {\n\t\t\tfile := NewFile(path, r.srcDirPath)\n\t\t\texcludedFiles = append(excludedFiles, file)\n\t\t}\n\n\t\tblobsDirMatches, err := r.fs.Glob(gopath.Join(r.blobsDirPath, glob))\n\t\tif err != nil {\n\t\t\treturn manifest, nil, nil, bosherr.WrapErrorf(err, \"Listing package excluded files in blobs\")\n\t\t}\n\n\t\tfor _, path := range blobsDirMatches {\n\t\t\tfile := NewFile(path, r.blobsDirPath)\n\t\t\texcludedFiles = append(excludedFiles, file)\n\t\t}\n\t}\n\n\tfor _, excludedFile := range excludedFiles {\n\t\tdelete(filesByRelPath, excludedFile.RelativePath)\n\t}\n\n\tfor _, specialFileName := range []string{\"packaging\", \"pre_packaging\"} {\n\t\tif _, ok := filesByRelPath[specialFileName]; ok {\n\t\t\terrMsg := \"Expected special '%s' file to not be included via 'files' key for package '%s'\"\n\t\t\treturn manifest, nil, nil, bosherr.Errorf(errMsg, specialFileName, manifest.Name)\n\t\t}\n\t}\n\n\tfor _, file := range filesByRelPath {\n\t\tfiles = append(files, file)\n\t}\n\n\treturn manifest, files, prepFiles, nil\n}\n<commit_msg>Use recursive glob function for pkg dir reader to support **<commit_after>package pkg\n\nimport (\n\tgopath \"path\"\n\n\tbosherr \"github.com\/cloudfoundry\/bosh-utils\/errors\"\n\tboshsys \"github.com\/cloudfoundry\/bosh-utils\/system\"\n\n\t. \"github.com\/cloudfoundry\/bosh-init\/release\/pkg\/manifest\"\n\t. \"github.com\/cloudfoundry\/bosh-init\/release\/resource\"\n)\n\ntype DirReaderImpl struct {\n\tarchiveFactory ArchiveFunc\n\n\tsrcDirPath string\n\tblobsDirPath string\n\n\tfs boshsys.FileSystem\n}\n\nfunc NewDirReaderImpl(\n\tarchiveFactory ArchiveFunc,\n\tsrcDirPath string,\n\tblobsDirPath string,\n\tfs boshsys.FileSystem,\n) DirReaderImpl {\n\treturn DirReaderImpl{\n\t\tarchiveFactory: archiveFactory,\n\t\tsrcDirPath: srcDirPath,\n\t\tblobsDirPath: blobsDirPath,\n\t\tfs: fs,\n\t}\n}\n\nfunc (r DirReaderImpl) Read(path string) (*Package, error) {\n\tmanifest, files, prepFiles, err := r.collectFiles(path)\n\tif err != nil {\n\t\treturn nil, bosherr.WrapErrorf(err, \"Collecting package files\")\n\t}\n\n\t\/\/ Note that files do not include package's spec file,\n\t\/\/ but rather specify dependencies as additional chunks for the fingerprint.\n\tarchive := r.archiveFactory(files, prepFiles, manifest.Dependencies)\n\n\tfp, err := archive.Fingerprint()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresource := NewResource(manifest.Name, fp, archive)\n\n\treturn NewPackage(resource, manifest.Dependencies), nil\n}\n\nfunc (r DirReaderImpl) collectFiles(path string) (Manifest, []File, []File, error) {\n\tvar files, prepFiles []File\n\n\tspecPath := gopath.Join(path, \"spec\")\n\n\tmanifest, err := NewManifestFromPath(specPath, r.fs)\n\tif err != nil {\n\t\treturn Manifest{}, nil, nil, err\n\t}\n\n\tpackagingPath := gopath.Join(path, \"packaging\")\n\n\tif r.fs.FileExists(packagingPath) {\n\t\tfile := NewFile(packagingPath, path)\n\t\tfile.ExcludeMode = true\n\t\tfiles = append(files, file)\n\t} else {\n\t\treturn manifest, nil, nil, bosherr.Errorf(\n\t\t\t\"Expected to find '%s' for package '%s'\", packagingPath, manifest.Name)\n\t}\n\n\tprePackagingPath := gopath.Join(path, \"pre_packaging\")\n\n\tif r.fs.FileExists(prePackagingPath) {\n\t\tfile := NewFile(prePackagingPath, path)\n\t\tfile.ExcludeMode = true\n\t\tfiles = append(files, file)\n\t\tprepFiles = append(prepFiles, file)\n\t}\n\n\tfilesByRelPath := map[string]File{}\n\n\tfor _, glob := range manifest.Files {\n\t\tsrcDirMatches, err := r.fs.RecursiveGlob(gopath.Join(r.srcDirPath, glob))\n\t\tif err != nil {\n\t\t\treturn manifest, nil, nil, bosherr.WrapErrorf(err, \"Listing package files in src\")\n\t\t}\n\n\t\tfor _, path := range srcDirMatches {\n\t\t\tfile := NewFile(path, r.srcDirPath)\n\t\t\tif _, found := filesByRelPath[file.RelativePath]; !found {\n\t\t\t\tfilesByRelPath[file.RelativePath] = file\n\t\t\t}\n\t\t}\n\n\t\tblobsDirMatches, err := r.fs.RecursiveGlob(gopath.Join(r.blobsDirPath, glob))\n\t\tif err != nil {\n\t\t\treturn manifest, nil, nil, bosherr.WrapErrorf(err, \"Listing package files in blobs\")\n\t\t}\n\n\t\tfor _, path := range blobsDirMatches {\n\t\t\tfile := NewFile(path, r.blobsDirPath)\n\t\t\tif _, found := filesByRelPath[file.RelativePath]; !found {\n\t\t\t\tfilesByRelPath[file.RelativePath] = file\n\t\t\t}\n\t\t}\n\t}\n\n\tvar excludedFiles []File\n\n\tfor _, glob := range manifest.ExcludedFiles {\n\t\tsrcDirMatches, err := r.fs.RecursiveGlob(gopath.Join(r.srcDirPath, glob))\n\t\tif err != nil {\n\t\t\treturn manifest, nil, nil, bosherr.WrapErrorf(err, \"Listing package excluded files in src\")\n\t\t}\n\n\t\tfor _, path := range srcDirMatches {\n\t\t\tfile := NewFile(path, r.srcDirPath)\n\t\t\texcludedFiles = append(excludedFiles, file)\n\t\t}\n\n\t\tblobsDirMatches, err := r.fs.RecursiveGlob(gopath.Join(r.blobsDirPath, glob))\n\t\tif err != nil {\n\t\t\treturn manifest, nil, nil, bosherr.WrapErrorf(err, \"Listing package excluded files in blobs\")\n\t\t}\n\n\t\tfor _, path := range blobsDirMatches {\n\t\t\tfile := NewFile(path, r.blobsDirPath)\n\t\t\texcludedFiles = append(excludedFiles, file)\n\t\t}\n\t}\n\n\tfor _, excludedFile := range excludedFiles {\n\t\tdelete(filesByRelPath, excludedFile.RelativePath)\n\t}\n\n\tfor _, specialFileName := range []string{\"packaging\", \"pre_packaging\"} {\n\t\tif _, ok := filesByRelPath[specialFileName]; ok {\n\t\t\terrMsg := \"Expected special '%s' file to not be included via 'files' key for package '%s'\"\n\t\t\treturn manifest, nil, nil, bosherr.Errorf(errMsg, specialFileName, manifest.Name)\n\t\t}\n\t}\n\n\tfor _, file := range filesByRelPath {\n\t\tfiles = append(files, file)\n\t}\n\n\treturn manifest, files, prepFiles, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2016 caicloud authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage provider\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/caicloud\/cyclone\/api\"\n\t\"github.com\/caicloud\/cyclone\/pkg\/log\"\n\t\"github.com\/caicloud\/cyclone\/pkg\/osutil\"\n\t\"github.com\/caicloud\/cyclone\/store\"\n\t\"github.com\/google\/go-github\/github\"\n\t\"golang.org\/x\/oauth2\"\n)\n\nconst CYCLONE_SERVER_HOST = \"CYCLONE_SERVER_HOST\"\n\n\/\/ GitHub is the type for Github remote provider.\ntype GitHub struct {\n}\n\n\/\/ NewGitHub returns a new GitHub remoter.\nfunc NewGitHub() *GitHub {\n\treturn &GitHub{}\n}\n\n\/\/ Pack the information into oauth.config that is used to get token\n\/\/ ClientID、ClientSecret,these values use to assemble the token request url and\n\/\/ there values come from github or other by registering some information.\nfunc (g *GitHub) getConf() (*oauth2.Config, error) {\n\t\/\/cyclonePath http request listen address\n\tcyclonePath := osutil.GetStringEnv(CYCLONE_SERVER_HOST, \"http:\/\/localhost:7099\")\n\tclientID := osutil.GetStringEnv(\"CLIENTID\", \"\")\n\tclientSecret := osutil.GetStringEnv(\"CLIENTIDSECRET\", \"\")\n\treturn &oauth2.Config{\n\t\tClientID: clientID,\n\t\tClientSecret: clientSecret,\n\t\tRedirectURL: fmt.Sprintf(\"%s\/api\/%s\/remotes\/%s\/authcallback\", cyclonePath, api.APIVersion, \"github\"),\n\t\tScopes: []string{\"repo\"},\n\t\tEndpoint: oauth2.Endpoint{\n\t\t\tAuthURL: \"https:\/\/github.com\/login\/oauth\/authorize\",\n\t\t\tTokenURL: \"https:\/\/github.com\/login\/oauth\/access_token\",\n\t\t},\n\t}, nil\n}\n\n\/\/ GetTokenQuestURL gets the URL for token request.\nfunc (g *GitHub) GetTokenQuestURL(userID string) (string, error) {\n\t\/\/ Get a object to request token.\n\tconf, err := g.getConf()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t\/\/ Get the request url and send to the github or other.\n\turl := conf.AuthCodeURL(userID) \/\/ Use userid as state.\n\tlog.InfoWithFields(\"cyclone receives creating token request\",\n\t\tlog.Fields{\"request url\": url})\n\n\tif !strings.Contains(url, \"github\") {\n\t\tlog.ErrorWithFields(\"Unable to get the url\", log.Fields{\"user_id\": userID})\n\t\treturn \"\", fmt.Errorf(\"Unable to get the url\")\n\t}\n\treturn url, nil\n}\n\n\/\/ Authcallback is the callback handler.\nfunc (g *GitHub) Authcallback(code, state string) (string, error) {\n\tif code == \"\" || state == \"\" {\n\t\treturn \"\", fmt.Errorf(\"code or state is nil\")\n\t}\n\n\t\/\/ Caicloud web address,eg caicloud.io\n\tuiPath := osutil.GetStringEnv(\"CONSOLE_WEB_ENDPOINT\", \"http:\/\/localhost:8000\")\n\tredirectURL := fmt.Sprintf(\"%s\/cyclone\/add?type=github&code=%s&state=%s\", uiPath, code, state)\n\n\t\/\/ Sync to get token.\n\tgo g.getToken(code, state)\n\treturn redirectURL, nil\n}\n\n\/\/ get token by using code from github.\nfunc (g *GitHub) getToken(code, state string) error {\n\tif code == \"\" || state == \"\" {\n\t\tlog.ErrorWithFields(\"code or state is nil\", log.Fields{\"code\": code, \"state\": state})\n\t\treturn fmt.Errorf(\"code or state is nil\")\n\t}\n\tlog.InfoWithFields(\"cyclone receives auth code\", log.Fields{\"request code\": code})\n\n\t\/\/ Get a object to request token.\n\tconf, err := g.getConf()\n\tif err != nil {\n\t\tlog.Warnf(\"Unable to get the conf according coderepository\")\n\t\treturn err\n\t}\n\n\t\/\/ To communication with githubo or other vcs to get token.\n\tvar tok *oauth2.Token\n\ttok, err = conf.Exchange(oauth2.NoContext, code) \/\/ Post a token request and receive toeken.\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\n\tif !tok.Valid() {\n\t\tlog.Fatalf(\"Token invalid. Got: %#v\", tok)\n\t\treturn err\n\t}\n\tlog.Info(\"get the token successfully!\")\n\n\t\/\/ Create service in database (but not ready to be used yet).\n\tvcstoken := api.VscToken{\n\t\tUserID: state,\n\t\tVsc: \"github\",\n\t\tVsctoken: *tok,\n\t}\n\n\tds := store.NewStore()\n\tdefer ds.Close()\n\n\t_, err = ds.FindtokenByUserID(state, \"github\")\n\tif err != nil {\n\t\terr = ds.NewTokenDocument(&vcstoken)\n\t\tif err != nil {\n\t\t\tlog.ErrorWithFields(\"NewTokenDocument\", log.Fields{\"user_id\": state,\n\t\t\t\t\"token\": tok, \"error\": err})\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\terr = ds.UpdateToken(&vcstoken)\n\t\tif err != nil {\n\t\t\tlog.ErrorWithFields(\"UpdateToken\", log.Fields{\"user_id\": state,\n\t\t\t\t\"token\": tok, \"error\": err})\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ GetRepos gets token by using code from github.\nfunc (g *GitHub) GetRepos(userID string) (Repos []api.Repo, username, avatarURL string, err error) {\n\tds := store.NewStore()\n\tdefer ds.Close()\n\n\ttok, err := ds.FindtokenByUserID(userID, \"github\")\n\tif err != nil {\n\t\tlog.ErrorWithFields(\"find token failed\", log.Fields{\"user_id\": userID, \"error\": err})\n\t\treturn Repos, username, avatarURL, err\n\t}\n\n\t\/\/ Use token to get repo list.\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: tok.Vsctoken.AccessToken},\n\t)\n\ttc := oauth2.NewClient(oauth2.NoContext, ts)\n\n\tclient := github.NewClient(tc)\n\n\t\/\/ List all repositories for the authenticated user.\n\topt := &github.RepositoryListOptions{\n\t\tListOptions: github.ListOptions{PerPage: 30},\n\t}\n\t\/\/ Get all pages of results.\n\tvar allRepos []*github.Repository\n\tfor {\n\t\trepos, resp, err := client.Repositories.List(\"\", opt)\n\t\tif err != nil {\n\t\t\tmessage := \"Unable to list repo by token\"\n\t\t\tlog.ErrorWithFields(message, log.Fields{\"user_id\": userID, \"token\": tok, \"error\": err})\n\t\t\treturn Repos, username, avatarURL, fmt.Errorf(\"Unable to list repo by token\")\n\t\t}\n\t\tallRepos = append(allRepos, repos...)\n\t\tif resp.NextPage == 0 {\n\t\t\tbreak\n\t\t}\n\t\topt.ListOptions.Page = resp.NextPage\n\t}\n\n\tRepos = make([]api.Repo, len(allRepos))\n\tfor i, repo := range allRepos {\n\t\tRepos[i].Name = *repo.Name\n\t\tRepos[i].URL = *repo.CloneURL\n\t\tRepos[i].Owner = *repo.Owner.Login\n\t}\n\n\tuser, _, err := client.Users.Get(\"\")\n\tif err != nil {\n\t\tlog.ErrorWithFields(\"Users.Get returned error\", log.Fields{\"user_id\": userID,\n\t\t\t\"token\": tok, \"error\": err})\n\t\treturn Repos, username, avatarURL, err\n\t}\n\tusername = *user.Login\n\tavatarURL = *user.AvatarURL\n\n\treturn Repos, username, avatarURL, nil\n}\n\n\/\/ LogOut logs out.\nfunc (g *GitHub) LogOut(userID string) error {\n\t\/\/ Fiind the token by userid and code repository.\n\tds := store.NewStore()\n\tdefer ds.Close()\n\ttok, err := ds.FindtokenByUserID(userID, \"github\")\n\tif err != nil {\n\t\tlog.ErrorWithFields(\"find token failed\", log.Fields{\"user_id\": userID, \"error\": err})\n\t\treturn err\n\t}\n\n\tconf, err := g.getConf()\n\tif err != nil {\n\t\tlog.ErrorWithFields(\"Unable to get the conf according coderepository\",\n\t\t\tlog.Fields{\"user_id\": userID, \"error\": err})\n\t\treturn err\n\t}\n\n\ttp := github.BasicAuthTransport{\n\t\tUsername: conf.ClientID,\n\t\tPassword: conf.ClientSecret,\n\t}\n\n\tclient := github.NewClient(tp.Client())\n\t_, err = client.Authorizations.Revoke(conf.ClientID, tok.Vsctoken.AccessToken)\n\tif err != nil {\n\t\tlog.ErrorWithFields(\"revoke failed\", log.Fields{\"user_id\": userID, \"error\": err})\n\t\treturn err\n\t}\n\n\t\/\/ Remove the token saved in DB.\n\terr = ds.RemoveTokeninDB(userID, \"github\")\n\tif err != nil {\n\t\tlog.ErrorWithFields(\"remove token failed\", log.Fields{\"user_id\": userID, \"error\": err})\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ CreateHook is a helper to register webhook.\nfunc (g *GitHub) CreateHook(service *api.Service) error {\n\twebhooktype := service.Repository.Webhook\n\tif webhooktype == \"\" {\n\t\treturn fmt.Errorf(\"no need webhook registry\")\n\t}\n\n\tif webhooktype == api.GITHUB {\n\t\turl := getHookURL(webhooktype, service.ServiceID)\n\t\tif url == \"\" {\n\t\t\tlog.Infof(\"url is empty\", log.Fields{\"user_id\": service.UserID})\n\t\t\treturn nil\n\t\t}\n\n\t\tds := store.NewStore()\n\t\tdefer ds.Close()\n\n\t\ttok, err := ds.FindtokenByUserID(service.UserID, api.GITHUB)\n\t\tif err != nil {\n\t\t\tlog.ErrorWithFields(\"find token failed\", log.Fields{\"user_id\": service.UserID, \"error\": err})\n\t\t\treturn err\n\t\t}\n\n\t\tts := oauth2.StaticTokenSource(\n\t\t\t&oauth2.Token{AccessToken: tok.Vsctoken.AccessToken},\n\t\t)\n\t\ttc := oauth2.NewClient(oauth2.NoContext, ts)\n\t\tclient := github.NewClient(tc)\n\n\t\tvar hook github.Hook\n\t\thook.Name = github.String(\"web\")\n\t\thook.Events = []string{\"push\", \"pull_request\"}\n\t\thook.Config = map[string]interface{}{}\n\t\thook.Config[\"url\"] = url\n\t\thook.Config[\"content_type\"] = \"json\"\n\t\tonwer, name := parseURL(service.Repository.URL)\n\t\t_, _, err = client.Repositories.CreateHook(onwer, name, &hook)\n\t\treturn err\n\t}\n\tlog.WarnWithFields(\"not support vcs repository\", log.Fields{\"vcs repository\": webhooktype})\n\treturn fmt.Errorf(\"not support vcs repository in create webhook\")\n}\n\n\/\/ DeleteHook is a helper to unregister webhook.\nfunc (g *GitHub) DeleteHook(service *api.Service) error {\n\twebhooktype := service.Repository.Webhook\n\tif webhooktype == \"\" {\n\t\treturn fmt.Errorf(\"no need webhook registry\")\n\t}\n\n\tif webhooktype == api.GITHUB {\n\t\turl := getHookURL(webhooktype, service.ServiceID)\n\t\tif url == \"\" {\n\t\t\tlog.Infof(\"url is empty\", log.Fields{\"user_id\": service.UserID})\n\t\t\treturn nil\n\t\t}\n\n\t\tds := store.NewStore()\n\t\tdefer ds.Close()\n\n\t\ttok, err := ds.FindtokenByUserID(service.UserID, api.GITHUB)\n\t\tif err != nil {\n\t\t\tlog.ErrorWithFields(\"find token failed\", log.Fields{\"user_id\": service.UserID, \"error\": err})\n\t\t\treturn err\n\t\t}\n\n\t\tts := oauth2.StaticTokenSource(\n\t\t\t&oauth2.Token{AccessToken: tok.Vsctoken.AccessToken},\n\t\t)\n\t\ttc := oauth2.NewClient(oauth2.NoContext, ts)\n\t\tclient := github.NewClient(tc)\n\n\t\towner, name := parseURL(service.Repository.URL)\n\t\thooks, _, err := client.Repositories.ListHooks(owner, name, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar hook *github.Hook\n\t\thasFoundHook := false\n\t\tfor _, hook = range hooks {\n\t\t\thookurl, ok := hook.Config[\"url\"].(string)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif strings.HasPrefix(hookurl, url) {\n\t\t\t\thasFoundHook = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif hasFoundHook {\n\t\t\t_, err = client.Repositories.DeleteHook(owner, name, *hook.ID)\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\tlog.WarnWithFields(\"not support vcs repository\", log.Fields{\"vcs repository\": webhooktype})\n\treturn fmt.Errorf(\"not support vcs repository in delete webhook\")\n}\n\n\/\/ PostCommitStatus posts Commit Status To Github.\nfunc (g *GitHub) PostCommitStatus(service *api.Service, version *api.Version) error {\n\t\/\/ Check if github webhook has set.\n\tif service.Repository.Webhook != api.GITHUB {\n\t\treturn fmt.Errorf(\"vcs github webhook hasn't set\")\n\t}\n\n\t\/\/ Check if has set commitID.\n\tif version.Commit == \"\" {\n\t\treturn fmt.Errorf(\"commit hasn't set\")\n\t}\n\n\t\/\/ Get token.\n\tds := store.NewStore()\n\tdefer ds.Close()\n\ttok, err := ds.FindtokenByUserID(service.UserID, api.GITHUB)\n\tif err != nil {\n\t\tlog.ErrorWithFields(\"find token failed\", log.Fields{\"user_id\": service.UserID, \"error\": err})\n\t\treturn err\n\t}\n\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: tok.Vsctoken.AccessToken},\n\t)\n\ttc := oauth2.NewClient(oauth2.NoContext, ts)\n\tclient := github.NewClient(tc)\n\n\t\/\/ Post commit status.\n\towner, repo := parseURL(service.Repository.URL)\n\turlHost := osutil.GetStringEnv(CYCLONE_SERVER_HOST, \"https:\/\/fornax-canary.caicloud.io\")\n\n\tvar state string\n\tif version.Status == api.VersionHealthy {\n\t\tstate = api.CISuccess\n\t} else if version.Status == api.VersionFailed || version.Status == api.VersionCancel {\n\t\tstate = api.CIFailure\n\t} else {\n\t\tstate = api.CIPending\n\t}\n\n\tlog.Infof(\"Now, version status is %s, post %s to github\", version.Status, state)\n\turlLog := fmt.Sprintf(\"%s\/log?user=%s&service=%s&version=%s\", urlHost, service.UserID,\n\t\tservice.ServiceID, version.VersionID)\n\tlog.Infof(\"Log getting url: %s\", urlLog)\n\tstatus := &github.RepoStatus{\n\t\tState: github.String(state),\n\t\tTargetURL: github.String(urlLog),\n\t\tDescription: github.String(version.ErrorMessage),\n\t\tContext: github.String(\"Cyclone\"),\n\t}\n\n\t_, _, err = client.Repositories.CreateStatus(owner, repo, version.Commit, status)\n\treturn err\n}\n<commit_msg>Add service name to description of GitHub PR status.<commit_after>\/*\nCopyright 2016 caicloud authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage provider\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/caicloud\/cyclone\/api\"\n\t\"github.com\/caicloud\/cyclone\/pkg\/log\"\n\t\"github.com\/caicloud\/cyclone\/pkg\/osutil\"\n\t\"github.com\/caicloud\/cyclone\/store\"\n\t\"github.com\/google\/go-github\/github\"\n\t\"golang.org\/x\/oauth2\"\n)\n\nconst CYCLONE_SERVER_HOST = \"CYCLONE_SERVER_HOST\"\n\n\/\/ GitHub is the type for Github remote provider.\ntype GitHub struct {\n}\n\n\/\/ NewGitHub returns a new GitHub remoter.\nfunc NewGitHub() *GitHub {\n\treturn &GitHub{}\n}\n\n\/\/ Pack the information into oauth.config that is used to get token\n\/\/ ClientID、ClientSecret,these values use to assemble the token request url and\n\/\/ there values come from github or other by registering some information.\nfunc (g *GitHub) getConf() (*oauth2.Config, error) {\n\t\/\/cyclonePath http request listen address\n\tcyclonePath := osutil.GetStringEnv(CYCLONE_SERVER_HOST, \"http:\/\/localhost:7099\")\n\tclientID := osutil.GetStringEnv(\"CLIENTID\", \"\")\n\tclientSecret := osutil.GetStringEnv(\"CLIENTIDSECRET\", \"\")\n\treturn &oauth2.Config{\n\t\tClientID: clientID,\n\t\tClientSecret: clientSecret,\n\t\tRedirectURL: fmt.Sprintf(\"%s\/api\/%s\/remotes\/%s\/authcallback\", cyclonePath, api.APIVersion, \"github\"),\n\t\tScopes: []string{\"repo\"},\n\t\tEndpoint: oauth2.Endpoint{\n\t\t\tAuthURL: \"https:\/\/github.com\/login\/oauth\/authorize\",\n\t\t\tTokenURL: \"https:\/\/github.com\/login\/oauth\/access_token\",\n\t\t},\n\t}, nil\n}\n\n\/\/ GetTokenQuestURL gets the URL for token request.\nfunc (g *GitHub) GetTokenQuestURL(userID string) (string, error) {\n\t\/\/ Get a object to request token.\n\tconf, err := g.getConf()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t\/\/ Get the request url and send to the github or other.\n\turl := conf.AuthCodeURL(userID) \/\/ Use userid as state.\n\tlog.InfoWithFields(\"cyclone receives creating token request\",\n\t\tlog.Fields{\"request url\": url})\n\n\tif !strings.Contains(url, \"github\") {\n\t\tlog.ErrorWithFields(\"Unable to get the url\", log.Fields{\"user_id\": userID})\n\t\treturn \"\", fmt.Errorf(\"Unable to get the url\")\n\t}\n\treturn url, nil\n}\n\n\/\/ Authcallback is the callback handler.\nfunc (g *GitHub) Authcallback(code, state string) (string, error) {\n\tif code == \"\" || state == \"\" {\n\t\treturn \"\", fmt.Errorf(\"code or state is nil\")\n\t}\n\n\t\/\/ Caicloud web address,eg caicloud.io\n\tuiPath := osutil.GetStringEnv(\"CONSOLE_WEB_ENDPOINT\", \"http:\/\/localhost:8000\")\n\tredirectURL := fmt.Sprintf(\"%s\/cyclone\/add?type=github&code=%s&state=%s\", uiPath, code, state)\n\n\t\/\/ Sync to get token.\n\tgo g.getToken(code, state)\n\treturn redirectURL, nil\n}\n\n\/\/ get token by using code from github.\nfunc (g *GitHub) getToken(code, state string) error {\n\tif code == \"\" || state == \"\" {\n\t\tlog.ErrorWithFields(\"code or state is nil\", log.Fields{\"code\": code, \"state\": state})\n\t\treturn fmt.Errorf(\"code or state is nil\")\n\t}\n\tlog.InfoWithFields(\"cyclone receives auth code\", log.Fields{\"request code\": code})\n\n\t\/\/ Get a object to request token.\n\tconf, err := g.getConf()\n\tif err != nil {\n\t\tlog.Warnf(\"Unable to get the conf according coderepository\")\n\t\treturn err\n\t}\n\n\t\/\/ To communication with githubo or other vcs to get token.\n\tvar tok *oauth2.Token\n\ttok, err = conf.Exchange(oauth2.NoContext, code) \/\/ Post a token request and receive toeken.\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\n\tif !tok.Valid() {\n\t\tlog.Fatalf(\"Token invalid. Got: %#v\", tok)\n\t\treturn err\n\t}\n\tlog.Info(\"get the token successfully!\")\n\n\t\/\/ Create service in database (but not ready to be used yet).\n\tvcstoken := api.VscToken{\n\t\tUserID: state,\n\t\tVsc: \"github\",\n\t\tVsctoken: *tok,\n\t}\n\n\tds := store.NewStore()\n\tdefer ds.Close()\n\n\t_, err = ds.FindtokenByUserID(state, \"github\")\n\tif err != nil {\n\t\terr = ds.NewTokenDocument(&vcstoken)\n\t\tif err != nil {\n\t\t\tlog.ErrorWithFields(\"NewTokenDocument\", log.Fields{\"user_id\": state,\n\t\t\t\t\"token\": tok, \"error\": err})\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\terr = ds.UpdateToken(&vcstoken)\n\t\tif err != nil {\n\t\t\tlog.ErrorWithFields(\"UpdateToken\", log.Fields{\"user_id\": state,\n\t\t\t\t\"token\": tok, \"error\": err})\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ GetRepos gets token by using code from github.\nfunc (g *GitHub) GetRepos(userID string) (Repos []api.Repo, username, avatarURL string, err error) {\n\tds := store.NewStore()\n\tdefer ds.Close()\n\n\ttok, err := ds.FindtokenByUserID(userID, \"github\")\n\tif err != nil {\n\t\tlog.ErrorWithFields(\"find token failed\", log.Fields{\"user_id\": userID, \"error\": err})\n\t\treturn Repos, username, avatarURL, err\n\t}\n\n\t\/\/ Use token to get repo list.\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: tok.Vsctoken.AccessToken},\n\t)\n\ttc := oauth2.NewClient(oauth2.NoContext, ts)\n\n\tclient := github.NewClient(tc)\n\n\t\/\/ List all repositories for the authenticated user.\n\topt := &github.RepositoryListOptions{\n\t\tListOptions: github.ListOptions{PerPage: 30},\n\t}\n\t\/\/ Get all pages of results.\n\tvar allRepos []*github.Repository\n\tfor {\n\t\trepos, resp, err := client.Repositories.List(\"\", opt)\n\t\tif err != nil {\n\t\t\tmessage := \"Unable to list repo by token\"\n\t\t\tlog.ErrorWithFields(message, log.Fields{\"user_id\": userID, \"token\": tok, \"error\": err})\n\t\t\treturn Repos, username, avatarURL, fmt.Errorf(\"Unable to list repo by token\")\n\t\t}\n\t\tallRepos = append(allRepos, repos...)\n\t\tif resp.NextPage == 0 {\n\t\t\tbreak\n\t\t}\n\t\topt.ListOptions.Page = resp.NextPage\n\t}\n\n\tRepos = make([]api.Repo, len(allRepos))\n\tfor i, repo := range allRepos {\n\t\tRepos[i].Name = *repo.Name\n\t\tRepos[i].URL = *repo.CloneURL\n\t\tRepos[i].Owner = *repo.Owner.Login\n\t}\n\n\tuser, _, err := client.Users.Get(\"\")\n\tif err != nil {\n\t\tlog.ErrorWithFields(\"Users.Get returned error\", log.Fields{\"user_id\": userID,\n\t\t\t\"token\": tok, \"error\": err})\n\t\treturn Repos, username, avatarURL, err\n\t}\n\tusername = *user.Login\n\tavatarURL = *user.AvatarURL\n\n\treturn Repos, username, avatarURL, nil\n}\n\n\/\/ LogOut logs out.\nfunc (g *GitHub) LogOut(userID string) error {\n\t\/\/ Fiind the token by userid and code repository.\n\tds := store.NewStore()\n\tdefer ds.Close()\n\ttok, err := ds.FindtokenByUserID(userID, \"github\")\n\tif err != nil {\n\t\tlog.ErrorWithFields(\"find token failed\", log.Fields{\"user_id\": userID, \"error\": err})\n\t\treturn err\n\t}\n\n\tconf, err := g.getConf()\n\tif err != nil {\n\t\tlog.ErrorWithFields(\"Unable to get the conf according coderepository\",\n\t\t\tlog.Fields{\"user_id\": userID, \"error\": err})\n\t\treturn err\n\t}\n\n\ttp := github.BasicAuthTransport{\n\t\tUsername: conf.ClientID,\n\t\tPassword: conf.ClientSecret,\n\t}\n\n\tclient := github.NewClient(tp.Client())\n\t_, err = client.Authorizations.Revoke(conf.ClientID, tok.Vsctoken.AccessToken)\n\tif err != nil {\n\t\tlog.ErrorWithFields(\"revoke failed\", log.Fields{\"user_id\": userID, \"error\": err})\n\t\treturn err\n\t}\n\n\t\/\/ Remove the token saved in DB.\n\terr = ds.RemoveTokeninDB(userID, \"github\")\n\tif err != nil {\n\t\tlog.ErrorWithFields(\"remove token failed\", log.Fields{\"user_id\": userID, \"error\": err})\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ CreateHook is a helper to register webhook.\nfunc (g *GitHub) CreateHook(service *api.Service) error {\n\twebhooktype := service.Repository.Webhook\n\tif webhooktype == \"\" {\n\t\treturn fmt.Errorf(\"no need webhook registry\")\n\t}\n\n\tif webhooktype == api.GITHUB {\n\t\turl := getHookURL(webhooktype, service.ServiceID)\n\t\tif url == \"\" {\n\t\t\tlog.Infof(\"url is empty\", log.Fields{\"user_id\": service.UserID})\n\t\t\treturn nil\n\t\t}\n\n\t\tds := store.NewStore()\n\t\tdefer ds.Close()\n\n\t\ttok, err := ds.FindtokenByUserID(service.UserID, api.GITHUB)\n\t\tif err != nil {\n\t\t\tlog.ErrorWithFields(\"find token failed\", log.Fields{\"user_id\": service.UserID, \"error\": err})\n\t\t\treturn err\n\t\t}\n\n\t\tts := oauth2.StaticTokenSource(\n\t\t\t&oauth2.Token{AccessToken: tok.Vsctoken.AccessToken},\n\t\t)\n\t\ttc := oauth2.NewClient(oauth2.NoContext, ts)\n\t\tclient := github.NewClient(tc)\n\n\t\tvar hook github.Hook\n\t\thook.Name = github.String(\"web\")\n\t\thook.Events = []string{\"push\", \"pull_request\"}\n\t\thook.Config = map[string]interface{}{}\n\t\thook.Config[\"url\"] = url\n\t\thook.Config[\"content_type\"] = \"json\"\n\t\tonwer, name := parseURL(service.Repository.URL)\n\t\t_, _, err = client.Repositories.CreateHook(onwer, name, &hook)\n\t\treturn err\n\t}\n\tlog.WarnWithFields(\"not support vcs repository\", log.Fields{\"vcs repository\": webhooktype})\n\treturn fmt.Errorf(\"not support vcs repository in create webhook\")\n}\n\n\/\/ DeleteHook is a helper to unregister webhook.\nfunc (g *GitHub) DeleteHook(service *api.Service) error {\n\twebhooktype := service.Repository.Webhook\n\tif webhooktype == \"\" {\n\t\treturn fmt.Errorf(\"no need webhook registry\")\n\t}\n\n\tif webhooktype == api.GITHUB {\n\t\turl := getHookURL(webhooktype, service.ServiceID)\n\t\tif url == \"\" {\n\t\t\tlog.Infof(\"url is empty\", log.Fields{\"user_id\": service.UserID})\n\t\t\treturn nil\n\t\t}\n\n\t\tds := store.NewStore()\n\t\tdefer ds.Close()\n\n\t\ttok, err := ds.FindtokenByUserID(service.UserID, api.GITHUB)\n\t\tif err != nil {\n\t\t\tlog.ErrorWithFields(\"find token failed\", log.Fields{\"user_id\": service.UserID, \"error\": err})\n\t\t\treturn err\n\t\t}\n\n\t\tts := oauth2.StaticTokenSource(\n\t\t\t&oauth2.Token{AccessToken: tok.Vsctoken.AccessToken},\n\t\t)\n\t\ttc := oauth2.NewClient(oauth2.NoContext, ts)\n\t\tclient := github.NewClient(tc)\n\n\t\towner, name := parseURL(service.Repository.URL)\n\t\thooks, _, err := client.Repositories.ListHooks(owner, name, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tvar hook *github.Hook\n\t\thasFoundHook := false\n\t\tfor _, hook = range hooks {\n\t\t\thookurl, ok := hook.Config[\"url\"].(string)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif strings.HasPrefix(hookurl, url) {\n\t\t\t\thasFoundHook = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif hasFoundHook {\n\t\t\t_, err = client.Repositories.DeleteHook(owner, name, *hook.ID)\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\tlog.WarnWithFields(\"not support vcs repository\", log.Fields{\"vcs repository\": webhooktype})\n\treturn fmt.Errorf(\"not support vcs repository in delete webhook\")\n}\n\n\/\/ PostCommitStatus posts Commit Status To Github.\nfunc (g *GitHub) PostCommitStatus(service *api.Service, version *api.Version) error {\n\t\/\/ Check if github webhook has set.\n\tif service.Repository.Webhook != api.GITHUB {\n\t\treturn fmt.Errorf(\"vcs github webhook hasn't set\")\n\t}\n\n\t\/\/ Check if has set commitID.\n\tif version.Commit == \"\" {\n\t\treturn fmt.Errorf(\"commit hasn't set\")\n\t}\n\n\t\/\/ Get token.\n\tds := store.NewStore()\n\tdefer ds.Close()\n\ttok, err := ds.FindtokenByUserID(service.UserID, api.GITHUB)\n\tif err != nil {\n\t\tlog.ErrorWithFields(\"find token failed\", log.Fields{\"user_id\": service.UserID, \"error\": err})\n\t\treturn err\n\t}\n\n\tts := oauth2.StaticTokenSource(\n\t\t&oauth2.Token{AccessToken: tok.Vsctoken.AccessToken},\n\t)\n\ttc := oauth2.NewClient(oauth2.NoContext, ts)\n\tclient := github.NewClient(tc)\n\n\t\/\/ Post commit status.\n\towner, repo := parseURL(service.Repository.URL)\n\turlHost := osutil.GetStringEnv(CYCLONE_SERVER_HOST, \"https:\/\/fornax-canary.caicloud.io\")\n\n\tvar state string\n\tif version.Status == api.VersionHealthy {\n\t\tstate = api.CISuccess\n\t} else if version.Status == api.VersionFailed || version.Status == api.VersionCancel {\n\t\tstate = api.CIFailure\n\t} else {\n\t\tstate = api.CIPending\n\t}\n\n\tlog.Infof(\"Now, version status is %s, post %s to github\", version.Status, state)\n\turlLog := fmt.Sprintf(\"%s\/log?user=%s&service=%s&version=%s\", urlHost, service.UserID,\n\t\tservice.ServiceID, version.VersionID)\n\tlog.Infof(\"Log getting url: %s\", urlLog)\n\tstatus := &github.RepoStatus{\n\t\tState: github.String(state),\n\t\tTargetURL: github.String(urlLog),\n\t\tDescription: github.String(service.Name + \" \" + version.ErrorMessage),\n\t\tContext: github.String(\"Cyclone\"),\n\t}\n\n\t_, _, err = client.Repositories.CreateStatus(owner, repo, version.Commit, status)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package webui\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/braintree\/manners\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"github.com\/gocraft\/web\"\n\t\"github.com\/gocraft\/work\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"sync\"\n)\n\ntype WebUIServer struct {\n\tnamespace string\n\tpool *redis.Pool\n\tclient *work.Client\n\thostPort string\n\tserver *manners.GracefulServer\n\twg sync.WaitGroup\n\trouter *web.Router\n}\n\ntype context struct {\n\t*WebUIServer\n}\n\nfunc NewServer(namespace string, pool *redis.Pool, hostPort string) *WebUIServer {\n\trouter := web.New(context{})\n\tserver := &WebUIServer{\n\t\tnamespace: namespace,\n\t\tpool: pool,\n\t\tclient: work.NewClient(namespace, pool),\n\t\thostPort: hostPort,\n\t\tserver: manners.NewServer(),\n\t\trouter: router,\n\t}\n\n\trouter.Middleware(func(c *context, rw web.ResponseWriter, r *web.Request, next web.NextMiddlewareFunc) {\n\t\tc.WebUIServer = server\n\t\tnext(rw, r)\n\t})\n\trouter.Middleware(func(rw web.ResponseWriter, r *web.Request, next web.NextMiddlewareFunc) {\n\t\trw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\tnext(rw, r)\n\t})\n\trouter.Get(\"\/queues\", (*context).queues)\n\trouter.Get(\"\/worker_pools\", (*context).workerPools)\n\trouter.Get(\"\/busy_workers\", (*context).busyWorkers)\n\trouter.Get(\"\/retry_jobs\", (*context).retryJobs)\n\trouter.Get(\"\/scheduled_jobs\", (*context).scheduledJobs)\n\trouter.Get(\"\/dead_jobs\", (*context).deadJobs)\n\n\treturn server\n}\n\nfunc (w *WebUIServer) Start() {\n\tw.wg.Add(1)\n\tgo func(w *WebUIServer) {\n\t\tw.server.ListenAndServe(w.hostPort, w.router)\n\t\tw.wg.Done()\n\t}(w)\n}\n\nfunc (w *WebUIServer) Stop() {\n\tw.server.Shutdown <- true\n\tw.wg.Wait()\n}\n\nfunc (c *context) queues(rw web.ResponseWriter, r *web.Request) {\n\tresponse, err := c.client.Queues()\n\trender(rw, response, err)\n}\n\nfunc (c *context) workerPools(rw web.ResponseWriter, r *web.Request) {\n\tresponse, err := c.client.WorkerPoolHeartbeats()\n\trender(rw, response, err)\n}\n\nfunc (c *context) busyWorkers(rw web.ResponseWriter, r *web.Request) {\n\tobservations, err := c.client.WorkerObservations()\n\tif err != nil {\n\t\trenderError(rw, err)\n\t\treturn\n\t}\n\n\tvar busyObservations []*work.WorkerObservation\n\tfor _, ob := range observations {\n\t\tif ob.IsBusy {\n\t\t\tbusyObservations = append(busyObservations, ob)\n\t\t}\n\t}\n\n\trender(rw, busyObservations, err)\n}\n\nfunc (c *context) retryJobs(rw web.ResponseWriter, r *web.Request) {\n\tpage, err := parsePage(r)\n\tif err != nil {\n\t\trenderError(rw, err)\n\t\treturn\n\t}\n\n\tjobs, count, err := c.client.RetryJobs(page)\n\tif err != nil {\n\t\trenderError(rw, err)\n\t\treturn\n\t}\n\n\tresponse := struct {\n\t\tCount int64\n\t\tJobs []*work.RetryJob\n\t}{Count: count, Jobs: jobs}\n\n\trender(rw, response, err)\n}\n\nfunc (c *context) scheduledJobs(rw web.ResponseWriter, r *web.Request) {\n\tpage, err := parsePage(r)\n\tif err != nil {\n\t\trenderError(rw, err)\n\t\treturn\n\t}\n\n\tjobs, count, err := c.client.ScheduledJobs(page)\n\tif err != nil {\n\t\trenderError(rw, err)\n\t\treturn\n\t}\n\n\tresponse := struct {\n\t\tCount int64\n\t\tJobs []*work.ScheduledJob\n\t}{Count: count, Jobs: jobs}\n\n\trender(rw, response, err)\n}\n\nfunc (c *context) deadJobs(rw web.ResponseWriter, r *web.Request) {\n\tpage, err := parsePage(r)\n\tif err != nil {\n\t\trenderError(rw, err)\n\t\treturn\n\t}\n\n\tjobs, count, err := c.client.DeadJobs(page)\n\tif err != nil {\n\t\trenderError(rw, err)\n\t\treturn\n\t}\n\n\tresponse := struct {\n\t\tCount int64\n\t\tJobs []*work.DeadJob\n\t}{Count: count, Jobs: jobs}\n\trender(rw, response, err)\n}\n\nfunc render(rw web.ResponseWriter, jsonable interface{}, err error) {\n\tif err != nil {\n\t\trenderError(rw, err)\n\t\treturn\n\t}\n\n\tjsonData, err := json.MarshalIndent(jsonable, \"\", \"\\t\")\n\tif err != nil {\n\t\trenderError(rw, err)\n\t\treturn\n\t}\n\trw.Write(jsonData)\n}\n\nfunc renderError(rw http.ResponseWriter, err error) {\n\trw.WriteHeader(500)\n\tfmt.Fprintf(rw, `{\"error\": \"%s\"}`, err.Error())\n}\n\nfunc parsePage(r *web.Request) (uint, error) {\n\terr := r.ParseForm()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tpageStr := r.Form.Get(\"page\")\n\tif pageStr == \"\" {\n\t\tpageStr = \"1\"\n\t}\n\n\tpage, err := strconv.ParseUint(pageStr, 10, 0)\n\treturn uint(page), err\n}\n<commit_msg>Add endpoints Add endpoints for deleting and retrying dead jobs<commit_after>package webui\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/braintree\/manners\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"github.com\/gocraft\/web\"\n\t\"github.com\/gocraft\/work\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"sync\"\n)\n\ntype WebUIServer struct {\n\tnamespace string\n\tpool *redis.Pool\n\tclient *work.Client\n\thostPort string\n\tserver *manners.GracefulServer\n\twg sync.WaitGroup\n\trouter *web.Router\n}\n\ntype context struct {\n\t*WebUIServer\n}\n\nfunc NewServer(namespace string, pool *redis.Pool, hostPort string) *WebUIServer {\n\trouter := web.New(context{})\n\tserver := &WebUIServer{\n\t\tnamespace: namespace,\n\t\tpool: pool,\n\t\tclient: work.NewClient(namespace, pool),\n\t\thostPort: hostPort,\n\t\tserver: manners.NewServer(),\n\t\trouter: router,\n\t}\n\n\trouter.Middleware(func(c *context, rw web.ResponseWriter, r *web.Request, next web.NextMiddlewareFunc) {\n\t\tc.WebUIServer = server\n\t\tnext(rw, r)\n\t})\n\trouter.Middleware(func(rw web.ResponseWriter, r *web.Request, next web.NextMiddlewareFunc) {\n\t\trw.Header().Set(\"Content-Type\", \"application\/json; charset=utf-8\")\n\t\tnext(rw, r)\n\t})\n\trouter.Get(\"\/queues\", (*context).queues)\n\trouter.Get(\"\/worker_pools\", (*context).workerPools)\n\trouter.Get(\"\/busy_workers\", (*context).busyWorkers)\n\trouter.Get(\"\/retry_jobs\", (*context).retryJobs)\n\trouter.Get(\"\/scheduled_jobs\", (*context).scheduledJobs)\n\trouter.Get(\"\/dead_jobs\", (*context).deadJobs)\n\trouter.Post(\"\/delete_dead_job\/:died_at:\\\\d.*\/:job_id\", (*context).deleteDeadJob)\n\trouter.Post(\"\/retry_dead_job\/:died_at:\\\\d.*\/:job_id\", (*context).retryDeadJob)\n\n\treturn server\n}\n\nfunc (w *WebUIServer) Start() {\n\tw.wg.Add(1)\n\tgo func(w *WebUIServer) {\n\t\tw.server.ListenAndServe(w.hostPort, w.router)\n\t\tw.wg.Done()\n\t}(w)\n}\n\nfunc (w *WebUIServer) Stop() {\n\tw.server.Shutdown <- true\n\tw.wg.Wait()\n}\n\nfunc (c *context) queues(rw web.ResponseWriter, r *web.Request) {\n\tresponse, err := c.client.Queues()\n\trender(rw, response, err)\n}\n\nfunc (c *context) workerPools(rw web.ResponseWriter, r *web.Request) {\n\tresponse, err := c.client.WorkerPoolHeartbeats()\n\trender(rw, response, err)\n}\n\nfunc (c *context) busyWorkers(rw web.ResponseWriter, r *web.Request) {\n\tobservations, err := c.client.WorkerObservations()\n\tif err != nil {\n\t\trenderError(rw, err)\n\t\treturn\n\t}\n\n\tvar busyObservations []*work.WorkerObservation\n\tfor _, ob := range observations {\n\t\tif ob.IsBusy {\n\t\t\tbusyObservations = append(busyObservations, ob)\n\t\t}\n\t}\n\n\trender(rw, busyObservations, err)\n}\n\nfunc (c *context) retryJobs(rw web.ResponseWriter, r *web.Request) {\n\tpage, err := parsePage(r)\n\tif err != nil {\n\t\trenderError(rw, err)\n\t\treturn\n\t}\n\n\tjobs, count, err := c.client.RetryJobs(page)\n\tif err != nil {\n\t\trenderError(rw, err)\n\t\treturn\n\t}\n\n\tresponse := struct {\n\t\tCount int64\n\t\tJobs []*work.RetryJob\n\t}{Count: count, Jobs: jobs}\n\n\trender(rw, response, err)\n}\n\nfunc (c *context) scheduledJobs(rw web.ResponseWriter, r *web.Request) {\n\tpage, err := parsePage(r)\n\tif err != nil {\n\t\trenderError(rw, err)\n\t\treturn\n\t}\n\n\tjobs, count, err := c.client.ScheduledJobs(page)\n\tif err != nil {\n\t\trenderError(rw, err)\n\t\treturn\n\t}\n\n\tresponse := struct {\n\t\tCount int64\n\t\tJobs []*work.ScheduledJob\n\t}{Count: count, Jobs: jobs}\n\n\trender(rw, response, err)\n}\n\nfunc (c *context) deadJobs(rw web.ResponseWriter, r *web.Request) {\n\tpage, err := parsePage(r)\n\tif err != nil {\n\t\trenderError(rw, err)\n\t\treturn\n\t}\n\n\tjobs, count, err := c.client.DeadJobs(page)\n\tif err != nil {\n\t\trenderError(rw, err)\n\t\treturn\n\t}\n\n\tresponse := struct {\n\t\tCount int64\n\t\tJobs []*work.DeadJob\n\t}{Count: count, Jobs: jobs}\n\t\n\trender(rw, response, err)\n}\n\nfunc (c *context) deleteDeadJob(rw web.ResponseWriter, r *web.Request) {\n\tdiedAt, err := strconv.ParseInt(r.PathParams[\"died_at\"], 10, 64)\n\tif err != nil {\n\t\trenderError(rw, err)\n\t\treturn\n\t}\n\t\n\tjob := &work.DeadJob{\n\t\tDiedAt: diedAt,\n\t\tJob: &work.Job{ID: r.PathParams[\"job_id\"]},\n\t}\n\t\n\terr = c.client.DeleteDeadJob(job)\n\t\n\trender(rw, map[string]string{\"status\": \"ok\"}, err)\n}\n\nfunc (c *context) retryDeadJob(rw web.ResponseWriter, r *web.Request) {\n\tdiedAt, err := strconv.ParseInt(r.PathParams[\"died_at\"], 10, 64)\n\tif err != nil {\n\t\trenderError(rw, err)\n\t\treturn\n\t}\n\t\n\tjob := &work.DeadJob{\n\t\tDiedAt: diedAt,\n\t\tJob: &work.Job{ID: r.PathParams[\"job_id\"]},\n\t}\n\t\n\terr = c.client.RetryDeadJob(job)\n\t\n\trender(rw, map[string]string{\"status\": \"ok\"}, err)\n}\n\nfunc render(rw web.ResponseWriter, jsonable interface{}, err error) {\n\tif err != nil {\n\t\trenderError(rw, err)\n\t\treturn\n\t}\n\n\tjsonData, err := json.MarshalIndent(jsonable, \"\", \"\\t\")\n\tif err != nil {\n\t\trenderError(rw, err)\n\t\treturn\n\t}\n\trw.Write(jsonData)\n}\n\nfunc renderError(rw http.ResponseWriter, err error) {\n\trw.WriteHeader(500)\n\tfmt.Fprintf(rw, `{\"error\": \"%s\"}`, err.Error())\n}\n\nfunc parsePage(r *web.Request) (uint, error) {\n\terr := r.ParseForm()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tpageStr := r.Form.Get(\"page\")\n\tif pageStr == \"\" {\n\t\tpageStr = \"1\"\n\t}\n\n\tpage, err := strconv.ParseUint(pageStr, 10, 0)\n\treturn uint(page), err\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/scriptnull\/badgeit\/worker\/downloader\"\n\t\"github.com\/streadway\/amqp\"\n)\n\nfunc main() {\n\tlog.Println(\"Booting Badgeit worker\")\n\n\tlog.Printf(\"Setting up connection to badgeit queue\")\n\tusername := os.Getenv(\"RABBIT_USERNAME\")\n\tpassword := os.Getenv(\"RABBIT_PASSWORD\")\n\thostname := os.Getenv(\"RABBIT_HOSTNAME\")\n\tport := os.Getenv(\"RABBIT_PORT\")\n\tconStr := fmt.Sprintf(\"amqp:\/\/%s:%s@%s:%s\/\", username, password, hostname, port)\n\tconn, err := amqp.Dial(conStr)\n\tfailOnError(err, \"Failed to connect to RabbitMQ\")\n\tdefer conn.Close()\n\n\tch, err := conn.Channel()\n\tfailOnError(err, \"Failed to open a channel\")\n\tdefer ch.Close()\n\n\tq, err := ch.QueueDeclare(\n\t\t\"badgeit.worker\", \/\/ name\n\t\ttrue, \/\/ durable\n\t\tfalse, \/\/ delete when unused\n\t\tfalse, \/\/ exclusive\n\t\tfalse, \/\/ no-wait\n\t\tnil, \/\/ arguments\n\t)\n\tfailOnError(err, \"Failed to declare a queue\")\n\n\terr = ch.Qos(\n\t\t1, \/\/ prefetch count\n\t\t0, \/\/ prefetch size\n\t\tfalse, \/\/ global\n\t)\n\tfailOnError(err, \"Failed to set QoS\")\n\n\tmsgs, err := ch.Consume(\n\t\tq.Name, \/\/ queue\n\t\t\"\", \/\/ consumer\n\t\tfalse, \/\/ auto-ack\n\t\tfalse, \/\/ exclusive\n\t\tfalse, \/\/ no-local\n\t\tfalse, \/\/ no-wait\n\t\tnil, \/\/ args\n\t)\n\tfailOnError(err, \"Failed to register a consumer\")\n\n\tforever := make(chan bool)\n\n\tgo func() {\n\t\tfor d := range msgs {\n\t\t\tlog.Printf(\"Starting Task for message: %s\", d.Body)\n\t\t\texecuteTask(d.Body)\n\t\t\tlog.Printf(\"Finished Task for message: %s\", d.Body)\n\t\t\td.Ack(false)\n\t\t}\n\t}()\n\n\tlog.Printf(\"Booted Badgeit Worker. To exit press CTRL+C\")\n\t<-forever\n}\n\nfunc failOnError(err error, msg string) {\n\tif err != nil {\n\t\tlog.Fatalf(\"%s: %s\", msg, err)\n\t\tpanic(fmt.Sprintf(\"%s: %s\", msg, err))\n\t}\n}\n\nfunc executeTask(message []byte) {\n\tpayload := struct {\n\t\tRemote string\n\t\tDownload string\n\t\tCallback string\n\t}{}\n\terr := json.Unmarshal(message, &payload)\n\tif err != nil {\n\t\tlog.Printf(\"Error Parsing the payload %d\", err)\n\t\treturn\n\t}\n\n\tdir, err := ioutil.TempDir(\"\", \"repo\")\n\tif err != nil {\n\t\tlog.Fatalln(\"Error creating temporary folder: \", err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\td := downloader.NewDownloader(downloader.DownloaderOptions{\n\t\tType: payload.Download,\n\t\tRemote: payload.Remote,\n\t\tPath: dir,\n\t})\n\tlog.Println(\"Downloading the repository: \", payload.Remote)\n\terr = d.Download()\n\tif err != nil {\n\t\tlog.Println(\"Error Downloading repository: \", err)\n\t\t\/\/ report error\n\t}\n\tlog.Println(\"Downloading complete @ \", dir)\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Println(\"Error Getting Working Directory: \", err)\n\t\t\/\/ report error\n\t}\n\n\tresult, err := exec.Command(filepath.Join(wd, \"badgeit\"), \"-f\", \"all-json\", dir).Output()\n\tif err != nil {\n\t\tlog.Println(\"Error Executing badgeit: \", err)\n\t\t\/\/ report error\n\t}\n\n\terr = callback(payload.Callback, result)\n\tif err != nil {\n\t\tlog.Println(\"Error While Posting callback: \", err)\n\t}\n}\n\nfunc callback(responseURL string, buf []byte) error {\n\tjsonPayload, err := json.Marshal(map[string]string{\n\t\t\"badges\": string(buf),\n\t\t\"error\": \"hello\",\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = http.Post(responseURL, \"application\/json\", strings.NewReader(string(jsonPayload)))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<commit_msg>adds error reporting in worker<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/scriptnull\/badgeit\/worker\/downloader\"\n\t\"github.com\/streadway\/amqp\"\n)\n\nfunc main() {\n\tlog.Println(\"Booting Badgeit worker\")\n\n\tlog.Printf(\"Setting up connection to badgeit queue\")\n\tusername := os.Getenv(\"RABBIT_USERNAME\")\n\tpassword := os.Getenv(\"RABBIT_PASSWORD\")\n\thostname := os.Getenv(\"RABBIT_HOSTNAME\")\n\tport := os.Getenv(\"RABBIT_PORT\")\n\tconStr := fmt.Sprintf(\"amqp:\/\/%s:%s@%s:%s\/\", username, password, hostname, port)\n\tconn, err := amqp.Dial(conStr)\n\tfailOnError(err, \"Failed to connect to RabbitMQ\")\n\tdefer conn.Close()\n\n\tch, err := conn.Channel()\n\tfailOnError(err, \"Failed to open a channel\")\n\tdefer ch.Close()\n\n\tq, err := ch.QueueDeclare(\n\t\t\"badgeit.worker\", \/\/ name\n\t\ttrue, \/\/ durable\n\t\tfalse, \/\/ delete when unused\n\t\tfalse, \/\/ exclusive\n\t\tfalse, \/\/ no-wait\n\t\tnil, \/\/ arguments\n\t)\n\tfailOnError(err, \"Failed to declare a queue\")\n\n\terr = ch.Qos(\n\t\t1, \/\/ prefetch count\n\t\t0, \/\/ prefetch size\n\t\tfalse, \/\/ global\n\t)\n\tfailOnError(err, \"Failed to set QoS\")\n\n\tmsgs, err := ch.Consume(\n\t\tq.Name, \/\/ queue\n\t\t\"\", \/\/ consumer\n\t\tfalse, \/\/ auto-ack\n\t\tfalse, \/\/ exclusive\n\t\tfalse, \/\/ no-local\n\t\tfalse, \/\/ no-wait\n\t\tnil, \/\/ args\n\t)\n\tfailOnError(err, \"Failed to register a consumer\")\n\n\tforever := make(chan bool)\n\n\tgo func() {\n\t\tfor d := range msgs {\n\t\t\tlog.Printf(\"Starting Task for message: %s\", d.Body)\n\t\t\texecuteTask(d.Body)\n\t\t\tlog.Printf(\"Finished Task for message: %s\", d.Body)\n\t\t\td.Ack(false)\n\t\t}\n\t}()\n\n\tlog.Printf(\"Booted Badgeit Worker. To exit press CTRL+C\")\n\t<-forever\n}\n\nfunc failOnError(err error, msg string) {\n\tif err != nil {\n\t\tlog.Fatalf(\"%s: %s\", msg, err)\n\t\tpanic(fmt.Sprintf(\"%s: %s\", msg, err))\n\t}\n}\n\ntype taskResult struct {\n\tCallbackURL string\n\t\/\/ TODO: add callback headers\n\tBadges string\n\tError string\n}\n\nfunc executeTask(message []byte) {\n\n\t\/\/ Parse input message\n\tpayload := struct {\n\t\tRemote string\n\t\tDownload string\n\t\tCallback string\n\t}{}\n\terr := json.Unmarshal(message, &payload)\n\tif err != nil {\n\t\tlog.Printf(\"Error Parsing the payload %d\", err)\n\t\treturn\n\t}\n\n\t\/\/ Create temporary directory for download operation\n\tdir, err := ioutil.TempDir(\"\", \"repo\")\n\tif err != nil {\n\t\tlog.Fatalln(\"Error creating temporary folder: \", err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\t\/\/ Initiate taskResult for reporting back to the callback server\\\n\tcallbackResponse := taskResult{\n\t\tCallbackURL: payload.Callback,\n\t}\n\n\t\/\/ Download the repository\n\td := downloader.NewDownloader(downloader.DownloaderOptions{\n\t\tType: payload.Download,\n\t\tRemote: payload.Remote,\n\t\tPath: dir,\n\t})\n\tlog.Println(\"Downloading the repository: \", payload.Remote)\n\terr = d.Download()\n\tif err != nil {\n\t\terrorStr := fmt.Sprintln(\"Error Downloading repository: \", err)\n\t\tcallbackResponse.Error = errorStr\n\t\tcallback(callbackResponse)\n\t\treturn\n\t}\n\tlog.Println(\"Downloading complete @ \", dir)\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\terrorStr := fmt.Sprintln(\"Error Getting Working Directory: \", err)\n\t\tcallbackResponse.Error = errorStr\n\t\tcallback(callbackResponse)\n\t\treturn\n\t}\n\n\tresult, err := exec.Command(filepath.Join(wd, \"badgeit\"), \"-f\", \"all-json\", dir).Output()\n\tif err != nil {\n\t\terrorStr := fmt.Sprintln(\"Error Executing badgeit: \", err)\n\t\tcallbackResponse.Error = errorStr\n\t\tcallback(callbackResponse)\n\t\treturn\n\t}\n\n\tcallbackResponse.Badges = string(result)\n\terr = callback(callbackResponse)\n\tif err != nil {\n\t\tlog.Println(\"Error While Posting callback: \", err)\n\t}\n}\n\nfunc callback(result taskResult) error {\n\tif result.Error == \"\" {\n\t\tlog.Print(result.Error)\n\t}\n\tjsonPayload, err := json.Marshal(map[string]interface{}{\n\t\t\"badges\": result.Badges,\n\t\t\"error\": result.Error,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = http.Post(result.CallbackURL, \"application\/json\", strings.NewReader(string(jsonPayload)))\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package processors_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/dailyburn\/ratchet\"\n\t\"github.com\/dailyburn\/ratchet\/data\"\n\t\"github.com\/dailyburn\/ratchet\/logger\"\n\t\"github.com\/dailyburn\/ratchet\/processors\"\n)\n\nfunc ExampleGetRequest() {\n\tlogger.LogLevel = logger.LevelSilent\n\n\tgetGoogle, err := processors.NewHTTPRequest(\"GET\", \"http:\/\/www.google.com\", nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t\/\/ this is just a really basic checking function so we can have\n\t\/\/ determinable example output.\n\tcheckHTML := processors.NewFuncTransformer(func(d data.JSON) data.JSON {\n\t\toutput := \"Got HTML?\\n\"\n\t\tif strings.Contains(strings.ToLower(string(d)), \"html\") {\n\t\t\toutput += \"YES\\n\"\n\t\t} else {\n\t\t\toutput += \"NO\\n\"\n\t\t}\n\t\toutput += \"HTML contains Search Google?\\n\"\n\t\tif strings.Contains(string(d), \"Google Search\") {\n\t\t\toutput += \"YES\\n\"\n\t\t} else {\n\t\t\toutput += \"NO\\n\"\n\t\t}\n\t\treturn data.JSON(output)\n\t})\n\tstdout := processors.NewIoWriter(os.Stdout)\n\tpipeline := ratchet.NewPipeline(getGoogle, checkHTML, stdout)\n\n\terr = <-pipeline.Run()\n\n\tif err != nil {\n\t\tfmt.Println(\"An error occurred in the ratchet pipeline:\", err.Error())\n\t}\n\n\t\/\/ Output:\n\t\/\/ Got HTML?\n\t\/\/ YES\n\t\/\/ HTML contains Search Google?\n\t\/\/ YES\n}\n<commit_msg>Fix typo in http_request_test<commit_after>package processors_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/dailyburn\/ratchet\"\n\t\"github.com\/dailyburn\/ratchet\/data\"\n\t\"github.com\/dailyburn\/ratchet\/logger\"\n\t\"github.com\/dailyburn\/ratchet\/processors\"\n)\n\nfunc ExampleGetRequest() {\n\tlogger.LogLevel = logger.LevelSilent\n\n\tgetGoogle, err := processors.NewHTTPRequest(\"GET\", \"http:\/\/www.google.com\", nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t\/\/ this is just a really basic checking function so we can have\n\t\/\/ determinable example output.\n\tcheckHTML := processors.NewFuncTransformer(func(d data.JSON) data.JSON {\n\t\toutput := \"Got HTML?\\n\"\n\t\tif strings.Contains(strings.ToLower(string(d)), \"html\") {\n\t\t\toutput += \"YES\\n\"\n\t\t} else {\n\t\t\toutput += \"NO\\n\"\n\t\t}\n\t\toutput += \"HTML contains Google Search?\\n\"\n\t\tif strings.Contains(string(d), \"Google Search\") {\n\t\t\toutput += \"YES\\n\"\n\t\t} else {\n\t\t\toutput += \"NO\\n\"\n\t\t}\n\t\treturn data.JSON(output)\n\t})\n\tstdout := processors.NewIoWriter(os.Stdout)\n\tpipeline := ratchet.NewPipeline(getGoogle, checkHTML, stdout)\n\n\terr = <-pipeline.Run()\n\n\tif err != nil {\n\t\tfmt.Println(\"An error occurred in the ratchet pipeline:\", err.Error())\n\t}\n\n\t\/\/ Output:\n\t\/\/ Got HTML?\n\t\/\/ YES\n\t\/\/ HTML contains Google Search?\n\t\/\/ YES\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ (c) 2019-2020, Ava Labs, Inc. All rights reserved.\n\/\/ See the file LICENSE for licensing terms.\n\n\/\/ Package state manages the meta-data required by consensus for an avalanche\n\/\/ dag.\npackage state\n\nimport (\n\t\"errors\"\n\n\t\"github.com\/ava-labs\/avalanchego\/cache\"\n\t\"github.com\/ava-labs\/avalanchego\/database\"\n\t\"github.com\/ava-labs\/avalanchego\/database\/versiondb\"\n\t\"github.com\/ava-labs\/avalanchego\/ids\"\n\t\"github.com\/ava-labs\/avalanchego\/snow\"\n\t\"github.com\/ava-labs\/avalanchego\/snow\/choices\"\n\t\"github.com\/ava-labs\/avalanchego\/snow\/consensus\/avalanche\"\n\t\"github.com\/ava-labs\/avalanchego\/snow\/consensus\/snowstorm\"\n\t\"github.com\/ava-labs\/avalanchego\/snow\/engine\/avalanche\/vertex\"\n\t\"github.com\/ava-labs\/avalanchego\/utils\/hashing\"\n\t\"github.com\/ava-labs\/avalanchego\/utils\/math\"\n)\n\nconst (\n\tdbCacheSize = 10000\n\tidCacheSize = 1000\n)\n\nvar (\n\terrUnknownVertex = errors.New(\"unknown vertex\")\n\terrWrongChainID = errors.New(\"wrong ChainID in vertex\")\n)\n\n\/\/ Serializer manages the state of multiple vertices\ntype Serializer struct {\n\tctx *snow.Context\n\tvm vertex.DAGVM\n\tstate *prefixedState\n\tdb *versiondb.Database\n\tedge ids.Set\n}\n\n\/\/ Initialize implements the avalanche.State interface\nfunc (s *Serializer) Initialize(ctx *snow.Context, vm vertex.DAGVM, db database.Database) {\n\ts.ctx = ctx\n\ts.vm = vm\n\n\tvdb := versiondb.New(db)\n\tdbCache := &cache.LRU{Size: dbCacheSize}\n\trawState := &state{\n\t\tserializer: s,\n\t\tdbCache: dbCache,\n\t\tdb: vdb,\n\t}\n\ts.state = newPrefixedState(rawState, idCacheSize)\n\ts.db = vdb\n\n\ts.edge.Add(s.state.Edge()...)\n}\n\n\/\/ ParseVertex implements the avalanche.State interface\nfunc (s *Serializer) ParseVertex(b []byte) (avalanche.Vertex, error) {\n\tvtx, err := s.parseVertex(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := vtx.Verify(); err != nil {\n\t\treturn nil, err\n\t}\n\tuVtx := &uniqueVertex{\n\t\tserializer: s,\n\t\tvtxID: vtx.ID(),\n\t}\n\tif uVtx.Status() == choices.Unknown {\n\t\tif err := uVtx.setVertex(vtx); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn uVtx, s.db.Commit()\n}\n\n\/\/ BuildVertex implements the avalanche.State interface\nfunc (s *Serializer) BuildVertex(parentSet ids.Set, txs []snowstorm.Tx) (avalanche.Vertex, error) {\n\tif len(txs) == 0 {\n\t\treturn nil, errNoTxs\n\t}\n\n\tparentIDs := parentSet.List()\n\tids.SortIDs(parentIDs)\n\tsortTxs(txs)\n\n\theight := uint64(0)\n\tfor _, parentID := range parentIDs {\n\t\tparent, err := s.getVertex(parentID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\theight = math.Max64(height, parent.v.vtx.height)\n\t}\n\n\tvtx := &innerVertex{\n\t\tchainID: s.ctx.ChainID,\n\t\theight: height + 1,\n\t\tparentIDs: parentIDs,\n\t\ttxs: txs,\n\t}\n\n\tbytes, err := vtx.Marshal()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvtx.bytes = bytes\n\tvtx.id = ids.NewID(hashing.ComputeHash256Array(vtx.bytes))\n\n\tuVtx := &uniqueVertex{\n\t\tserializer: s,\n\t\tvtxID: vtx.ID(),\n\t}\n\t\/\/ It is possible this vertex already exists in the database, even though we\n\t\/\/ just made it.\n\tif uVtx.Status() == choices.Unknown {\n\t\tif err := uVtx.setVertex(vtx); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn uVtx, s.db.Commit()\n}\n\n\/\/ GetVertex implements the avalanche.State interface\nfunc (s *Serializer) GetVertex(vtxID ids.ID) (avalanche.Vertex, error) { return s.getVertex(vtxID) }\n\n\/\/ Edge implements the avalanche.State interface\nfunc (s *Serializer) Edge() []ids.ID { return s.edge.List() }\n\nfunc (s *Serializer) parseVertex(b []byte) (*innerVertex, error) {\n\tvtx := &innerVertex{}\n\tif err := vtx.Unmarshal(b, s.vm); err != nil {\n\t\treturn nil, err\n\t} else if !vtx.chainID.Equals(s.ctx.ChainID) {\n\t\treturn nil, errWrongChainID\n\t}\n\treturn vtx, nil\n}\n\nfunc (s *Serializer) getVertex(vtxID ids.ID) (*uniqueVertex, error) {\n\tvtx := &uniqueVertex{\n\t\tserializer: s,\n\t\tvtxID: vtxID,\n\t}\n\tif vtx.Status() == choices.Unknown {\n\t\treturn nil, errUnknownVertex\n\t}\n\treturn vtx, nil\n}\n<commit_msg>enforce max txs and max parents in BuildVertex<commit_after>\/\/ (c) 2019-2020, Ava Labs, Inc. All rights reserved.\n\/\/ See the file LICENSE for licensing terms.\n\n\/\/ Package state manages the meta-data required by consensus for an avalanche\n\/\/ dag.\npackage state\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com\/ava-labs\/avalanchego\/cache\"\n\t\"github.com\/ava-labs\/avalanchego\/database\"\n\t\"github.com\/ava-labs\/avalanchego\/database\/versiondb\"\n\t\"github.com\/ava-labs\/avalanchego\/ids\"\n\t\"github.com\/ava-labs\/avalanchego\/snow\"\n\t\"github.com\/ava-labs\/avalanchego\/snow\/choices\"\n\t\"github.com\/ava-labs\/avalanchego\/snow\/consensus\/avalanche\"\n\t\"github.com\/ava-labs\/avalanchego\/snow\/consensus\/snowstorm\"\n\t\"github.com\/ava-labs\/avalanchego\/snow\/engine\/avalanche\/vertex\"\n\t\"github.com\/ava-labs\/avalanchego\/utils\/hashing\"\n\t\"github.com\/ava-labs\/avalanchego\/utils\/math\"\n)\n\nconst (\n\tdbCacheSize = 10000\n\tidCacheSize = 1000\n)\n\nvar (\n\terrUnknownVertex = errors.New(\"unknown vertex\")\n\terrWrongChainID = errors.New(\"wrong ChainID in vertex\")\n)\n\n\/\/ Serializer manages the state of multiple vertices\ntype Serializer struct {\n\tctx *snow.Context\n\tvm vertex.DAGVM\n\tstate *prefixedState\n\tdb *versiondb.Database\n\tedge ids.Set\n}\n\n\/\/ Initialize implements the avalanche.State interface\nfunc (s *Serializer) Initialize(ctx *snow.Context, vm vertex.DAGVM, db database.Database) {\n\ts.ctx = ctx\n\ts.vm = vm\n\n\tvdb := versiondb.New(db)\n\tdbCache := &cache.LRU{Size: dbCacheSize}\n\trawState := &state{\n\t\tserializer: s,\n\t\tdbCache: dbCache,\n\t\tdb: vdb,\n\t}\n\ts.state = newPrefixedState(rawState, idCacheSize)\n\ts.db = vdb\n\n\ts.edge.Add(s.state.Edge()...)\n}\n\n\/\/ ParseVertex implements the avalanche.State interface\nfunc (s *Serializer) ParseVertex(b []byte) (avalanche.Vertex, error) {\n\tvtx, err := s.parseVertex(b)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := vtx.Verify(); err != nil {\n\t\treturn nil, err\n\t}\n\tuVtx := &uniqueVertex{\n\t\tserializer: s,\n\t\tvtxID: vtx.ID(),\n\t}\n\tif uVtx.Status() == choices.Unknown {\n\t\tif err := uVtx.setVertex(vtx); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn uVtx, s.db.Commit()\n}\n\n\/\/ BuildVertex implements the avalanche.State interface\nfunc (s *Serializer) BuildVertex(parentSet ids.Set, txs []snowstorm.Tx) (avalanche.Vertex, error) {\n\tif len(txs) == 0 {\n\t\treturn nil, errNoTxs\n\t} else if l := len(txs); l > maxTxsPerVtx {\n\t\treturn nil, fmt.Errorf(\"number of txs (%d) exceeds max (%d)\", l, maxTxsPerVtx)\n\t} else if l := parentSet.Len(); l > maxNumParents {\n\t\treturn nil, fmt.Errorf(\"number of parents (%d) exceeds max (%d)\", l, maxNumParents)\n\t}\n\n\tparentIDs := parentSet.List()\n\tids.SortIDs(parentIDs)\n\tsortTxs(txs)\n\n\theight := uint64(0)\n\tfor _, parentID := range parentIDs {\n\t\tparent, err := s.getVertex(parentID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\theight = math.Max64(height, parent.v.vtx.height)\n\t}\n\n\tvtx := &innerVertex{\n\t\tchainID: s.ctx.ChainID,\n\t\theight: height + 1,\n\t\tparentIDs: parentIDs,\n\t\ttxs: txs,\n\t}\n\n\tbytes, err := vtx.Marshal()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvtx.bytes = bytes\n\tvtx.id = ids.NewID(hashing.ComputeHash256Array(vtx.bytes))\n\n\tuVtx := &uniqueVertex{\n\t\tserializer: s,\n\t\tvtxID: vtx.ID(),\n\t}\n\t\/\/ It is possible this vertex already exists in the database, even though we\n\t\/\/ just made it.\n\tif uVtx.Status() == choices.Unknown {\n\t\tif err := uVtx.setVertex(vtx); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn uVtx, s.db.Commit()\n}\n\n\/\/ GetVertex implements the avalanche.State interface\nfunc (s *Serializer) GetVertex(vtxID ids.ID) (avalanche.Vertex, error) { return s.getVertex(vtxID) }\n\n\/\/ Edge implements the avalanche.State interface\nfunc (s *Serializer) Edge() []ids.ID { return s.edge.List() }\n\nfunc (s *Serializer) parseVertex(b []byte) (*innerVertex, error) {\n\tvtx := &innerVertex{}\n\tif err := vtx.Unmarshal(b, s.vm); err != nil {\n\t\treturn nil, err\n\t} else if !vtx.chainID.Equals(s.ctx.ChainID) {\n\t\treturn nil, errWrongChainID\n\t}\n\treturn vtx, nil\n}\n\nfunc (s *Serializer) getVertex(vtxID ids.ID) (*uniqueVertex, error) {\n\tvtx := &uniqueVertex{\n\t\tserializer: s,\n\t\tvtxID: vtxID,\n\t}\n\tif vtx.Status() == choices.Unknown {\n\t\treturn nil, errUnknownVertex\n\t}\n\treturn vtx, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package softlayer\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"github.com\/softlayer\/softlayer-go\/datatypes\"\n\t\"github.com\/softlayer\/softlayer-go\/filter\"\n\t\"github.com\/softlayer\/softlayer-go\/helpers\/product\"\n\t\"github.com\/softlayer\/softlayer-go\/services\"\n\t\"github.com\/softlayer\/softlayer-go\/session\"\n\t\"github.com\/softlayer\/softlayer-go\/sl\"\n)\n\nconst (\n\tAdditionalServicesGlobalIpAddressesPackageType = \"ADDITIONAL_SERVICES_GLOBAL_IP_ADDRESSES\"\n\n\tGlobalIpMask = \"id,ipAddress[ipAddress],destinationIpAddress[ipAddress]\"\n)\n\nfunc resourceSoftLayerGlobalIp() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceSoftLayerGlobalIpCreate,\n\t\tRead: resourceSoftLayerGlobalIpRead,\n\t\tUpdate: resourceSoftLayerGlobalIpUpdate,\n\t\tDelete: resourceSoftLayerGlobalIpDelete,\n\t\tExists: resourceSoftLayerGlobalIpExists,\n\t\tImporter: &schema.ResourceImporter{},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"id\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"ip_address\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"routes_to\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {\n\t\t\t\t\taddress := v.(string)\n\t\t\t\t\tif net.ParseIP(address) == nil {\n\t\t\t\t\t\terrors = append(errors, fmt.Errorf(\"Invalid IP format: %s\", address))\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t},\n\t\t\t\tDiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool {\n\t\t\t\t\treturn net.ParseIP(o).String() == net.ParseIP(n).String()\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceSoftLayerGlobalIpCreate(d *schema.ResourceData, meta interface{}) error {\n\tsess := meta.(ProviderConfig).SoftLayerSession()\n\n\t\/\/ Find price items with AdditionalServicesGlobalIpAddresses\n\tproductOrderContainer, err := buildGlobalIpProductOrderContainer(d, sess, AdditionalServicesGlobalIpAddressesPackageType)\n\tif err != nil {\n\t\t\/\/ Find price items with AdditionalServices\n\t\tproductOrderContainer, err = buildGlobalIpProductOrderContainer(d, sess, AdditionalServicesPackageType)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error creating global ip: %s\", err)\n\t\t}\n\t}\n\n\tlog.Println(\"[INFO] Creating global ip\")\n\n\treceipt, err := services.GetProductOrderService(sess).\n\t\tPlaceOrder(productOrderContainer, sl.Bool(false))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error during creation of global ip: %s\", err)\n\t}\n\n\tglobalIp, err := findGlobalIpByOrderId(sess, *receipt.OrderId)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error during creation of global ip: %s\", err)\n\t}\n\n\td.SetId(fmt.Sprintf(\"%d\", *globalIp.Id))\n\td.Set(\"ip_address\", *globalIp.IpAddress.IpAddress)\n\n\treturn resourceSoftLayerGlobalIpUpdate(d, meta)\n}\n\nfunc resourceSoftLayerGlobalIpRead(d *schema.ResourceData, meta interface{}) error {\n\tsess := meta.(ProviderConfig).SoftLayerSession()\n\tservice := services.GetNetworkSubnetIpAddressGlobalService(sess)\n\n\tglobalIpId, err := strconv.Atoi(d.Id())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Not a valid global ip ID, must be an integer: %s\", err)\n\t}\n\n\tglobalIp, err := service.Id(globalIpId).Mask(GlobalIpMask).GetObject()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error retrieving Global Ip: %s\", err)\n\t}\n\n\td.Set(\"id\", *globalIp.Id)\n\td.Set(\"ip_address\", *globalIp.IpAddress.IpAddress)\n\tif globalIp.DestinationIpAddress != nil {\n\t\td.Set(\"routes_to\", *globalIp.DestinationIpAddress.IpAddress)\n\t}\n\treturn nil\n}\n\nfunc resourceSoftLayerGlobalIpUpdate(d *schema.ResourceData, meta interface{}) error {\n\tsess := meta.(ProviderConfig).SoftLayerSession()\n\tservice := services.GetNetworkSubnetIpAddressGlobalService(sess)\n\n\tglobalIpId, err := strconv.Atoi(d.Id())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Not a valid global ip ID, must be an integer: %s\", err)\n\t}\n\n\troutes_to := d.Get(\"routes_to\").(string)\n\tif strings.Contains(routes_to, \":\") && len(routes_to) != 39 {\n\t\tparts := strings.Split(routes_to, \":\")\n\t\tfor x, s := range parts {\n\t\t\tif s == \"\" {\n\t\t\t\tzeroes := 9 - len(parts)\n\t\t\t\tparts[x] = strings.Repeat(\"0000:\", zeroes)[:(zeroes*4)+(zeroes-1)]\n\t\t\t} else {\n\t\t\t\tparts[x] = fmt.Sprintf(\"%04s\", s)\n\t\t\t}\n\t\t}\n\n\t\troutes_to = strings.Join(parts, \":\")\n\t\td.Set(\"routes_to\", routes_to)\n\t}\n\n\t_, err = service.Id(globalIpId).Route(sl.String(routes_to))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error editing Global Ip: %s\", err)\n\t}\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"pending\"},\n\t\tTarget: []string{\"complete\"},\n\t\tRefresh: func() (interface{}, string, error) {\n\t\t\ttransaction, err := service.Id(globalIpId).GetActiveTransaction()\n\t\t\tif err != nil {\n\t\t\t\treturn datatypes.Network_Subnet_IpAddress_Global{}, \"pending\", err\n\t\t\t}\n\t\t\tif transaction.Id == nil {\n\t\t\t\treturn datatypes.Network_Subnet_IpAddress_Global{}, \"complete\", nil\n\t\t\t}\n\t\t\treturn datatypes.Network_Subnet_IpAddress_Global{}, \"pending\", nil\n\t\t},\n\t\tTimeout: 10 * time.Minute,\n\t\tDelay: 5 * time.Second,\n\t\tMinTimeout: 3 * time.Second,\n\t}\n\n\tpendingResult, err := stateConf.WaitForState()\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error waiting for global ip destination ip address to become active: %s\", err)\n\t}\n\n\tif _, ok := pendingResult.(datatypes.Network_Subnet_IpAddress_Global); ok {\n\t\treturn nil\n\t}\n\n\treturn nil\n}\n\nfunc resourceSoftLayerGlobalIpDelete(d *schema.ResourceData, meta interface{}) error {\n\tsess := meta.(ProviderConfig).SoftLayerSession()\n\tservice := services.GetNetworkSubnetIpAddressGlobalService(sess)\n\n\tglobalIpId, err := strconv.Atoi(d.Id())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Not a valid global ip ID, must be an integer: %s\", err)\n\t}\n\n\tbillingItem, err := service.Id(globalIpId).GetBillingItem()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error deleting global ip: %s\", err)\n\t}\n\n\tif billingItem.Id == nil {\n\t\treturn nil\n\t}\n\n\t_, err = services.GetBillingItemService(sess).Id(*billingItem.Id).CancelService()\n\n\treturn err\n}\n\nfunc resourceSoftLayerGlobalIpExists(d *schema.ResourceData, meta interface{}) (bool, error) {\n\tsess := meta.(ProviderConfig).SoftLayerSession()\n\tservice := services.GetNetworkSubnetIpAddressGlobalService(sess)\n\n\tglobalIpId, err := strconv.Atoi(d.Id())\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"Not a valid ID, must be an integer: %s\", err)\n\t}\n\n\tresult, err := service.Id(globalIpId).GetObject()\n\tif err != nil {\n\t\tif apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Error retrieving global ip: %s\", err)\n\t}\n\treturn result.Id != nil && *result.Id == globalIpId, nil\n}\n\nfunc findGlobalIpByOrderId(sess *session.Session, orderId int) (datatypes.Network_Subnet_IpAddress_Global, error) {\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"pending\"},\n\t\tTarget: []string{\"complete\"},\n\t\tRefresh: func() (interface{}, string, error) {\n\t\t\tglobalIps, err := services.GetAccountService(sess).\n\t\t\t\tFilter(filter.Path(\"globalIpRecords.billingItem.orderItem.order.id\").\n\t\t\t\t\tEq(strconv.Itoa(orderId)).Build()).\n\t\t\t\tMask(\"id,ipAddress[ipAddress]\").\n\t\t\t\tGetGlobalIpRecords()\n\t\t\tif err != nil {\n\t\t\t\treturn datatypes.Network_Subnet_IpAddress_Global{}, \"\", err\n\t\t\t}\n\n\t\t\tif len(globalIps) == 1 && globalIps[0].IpAddress != nil {\n\t\t\t\treturn globalIps[0], \"complete\", nil\n\t\t\t} else if len(globalIps) == 0 || len(globalIps) == 1 {\n\t\t\t\treturn nil, \"pending\", nil\n\t\t\t} else {\n\t\t\t\treturn nil, \"\", fmt.Errorf(\"Expected one global ip: %s\", err)\n\t\t\t}\n\t\t},\n\t\tTimeout: 10 * time.Minute,\n\t\tDelay: 5 * time.Second,\n\t\tMinTimeout: 3 * time.Second,\n\t}\n\n\tpendingResult, err := stateConf.WaitForState()\n\n\tif err != nil {\n\t\treturn datatypes.Network_Subnet_IpAddress_Global{}, err\n\t}\n\n\tif result, ok := pendingResult.(datatypes.Network_Subnet_IpAddress_Global); ok {\n\t\treturn result, nil\n\t}\n\n\treturn datatypes.Network_Subnet_IpAddress_Global{},\n\t\tfmt.Errorf(\"Cannot find global ip with order id '%d'\", orderId)\n}\n\nfunc buildGlobalIpProductOrderContainer(d *schema.ResourceData, sess *session.Session, packageType string) (\n\t*datatypes.Container_Product_Order_Network_Subnet, error) {\n\n\t\/\/ 1. Get a package\n\tpkg, err := product.GetPackageByType(sess, packageType)\n\tif err != nil {\n\t\treturn &datatypes.Container_Product_Order_Network_Subnet{}, err\n\t}\n\n\t\/\/ 2. Get all prices for the package\n\tproductItems, err := product.GetPackageProducts(sess, *pkg.Id)\n\tif err != nil {\n\t\treturn &datatypes.Container_Product_Order_Network_Subnet{}, err\n\t}\n\n\t\/\/ 3. Find global ip prices\n\tglobalIpKeyname := \"GLOBAL_IPV4\"\n\tif strings.Contains(d.Get(\"routes_to\").(string), \":\") {\n\t\tglobalIpKeyname = \"GLOBAL_IPV6\"\n\t}\n\n\t\/\/ 4. Select items with a matching keyname\n\tglobalIpItems := []datatypes.Product_Item{}\n\tfor _, item := range productItems {\n\t\tif *item.KeyName == globalIpKeyname {\n\t\t\tglobalIpItems = append(globalIpItems, item)\n\t\t}\n\t}\n\n\tif len(globalIpItems) == 0 {\n\t\treturn &datatypes.Container_Product_Order_Network_Subnet{},\n\t\t\tfmt.Errorf(\"No product items matching %s could be found\", globalIpKeyname)\n\t}\n\n\tproductOrderContainer := datatypes.Container_Product_Order_Network_Subnet{\n\t\tContainer_Product_Order: datatypes.Container_Product_Order{\n\t\t\tPackageId: pkg.Id,\n\t\t\tPrices: []datatypes.Product_Item_Price{\n\t\t\t\t{\n\t\t\t\t\tId: globalIpItems[0].Prices[0].Id,\n\t\t\t\t},\n\t\t\t},\n\t\t\tQuantity: sl.Int(1),\n\t\t},\n\t}\n\n\treturn &productOrderContainer, nil\n}\n<commit_msg>Check if n has the appropriate IPv6 format in DiffSuppressFunc of globalip. (#114)<commit_after>package softlayer\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n\t\"github.com\/softlayer\/softlayer-go\/datatypes\"\n\t\"github.com\/softlayer\/softlayer-go\/filter\"\n\t\"github.com\/softlayer\/softlayer-go\/helpers\/product\"\n\t\"github.com\/softlayer\/softlayer-go\/services\"\n\t\"github.com\/softlayer\/softlayer-go\/session\"\n\t\"github.com\/softlayer\/softlayer-go\/sl\"\n)\n\nconst (\n\tAdditionalServicesGlobalIpAddressesPackageType = \"ADDITIONAL_SERVICES_GLOBAL_IP_ADDRESSES\"\n\n\tGlobalIpMask = \"id,ipAddress[ipAddress],destinationIpAddress[ipAddress]\"\n)\n\nfunc resourceSoftLayerGlobalIp() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceSoftLayerGlobalIpCreate,\n\t\tRead: resourceSoftLayerGlobalIpRead,\n\t\tUpdate: resourceSoftLayerGlobalIpUpdate,\n\t\tDelete: resourceSoftLayerGlobalIpDelete,\n\t\tExists: resourceSoftLayerGlobalIpExists,\n\t\tImporter: &schema.ResourceImporter{},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"id\": &schema.Schema{\n\t\t\t\tType: schema.TypeInt,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"ip_address\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"routes_to\": &schema.Schema{\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {\n\t\t\t\t\taddress := v.(string)\n\t\t\t\t\tif net.ParseIP(address) == nil {\n\t\t\t\t\t\terrors = append(errors, fmt.Errorf(\"Invalid IP format: %s\", address))\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\t\t\t\t},\n\t\t\t\tDiffSuppressFunc: func(k, o, n string, d *schema.ResourceData) bool {\n\t\t\t\t\tnewRoutesTo := net.ParseIP(n)\n\t\t\t\t\t\/\/ Return true when n has the appropriate IPv6 format and\n\t\t\t\t\t\/\/ the compressed value of n equals the compressed value of o.\n\t\t\t\t\treturn newRoutesTo != nil && (newRoutesTo.String() == net.ParseIP(o).String())\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc resourceSoftLayerGlobalIpCreate(d *schema.ResourceData, meta interface{}) error {\n\tsess := meta.(ProviderConfig).SoftLayerSession()\n\n\t\/\/ Find price items with AdditionalServicesGlobalIpAddresses\n\tproductOrderContainer, err := buildGlobalIpProductOrderContainer(d, sess, AdditionalServicesGlobalIpAddressesPackageType)\n\tif err != nil {\n\t\t\/\/ Find price items with AdditionalServices\n\t\tproductOrderContainer, err = buildGlobalIpProductOrderContainer(d, sess, AdditionalServicesPackageType)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error creating global ip: %s\", err)\n\t\t}\n\t}\n\n\tlog.Println(\"[INFO] Creating global ip\")\n\n\treceipt, err := services.GetProductOrderService(sess).\n\t\tPlaceOrder(productOrderContainer, sl.Bool(false))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error during creation of global ip: %s\", err)\n\t}\n\n\tglobalIp, err := findGlobalIpByOrderId(sess, *receipt.OrderId)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error during creation of global ip: %s\", err)\n\t}\n\n\td.SetId(fmt.Sprintf(\"%d\", *globalIp.Id))\n\td.Set(\"ip_address\", *globalIp.IpAddress.IpAddress)\n\n\treturn resourceSoftLayerGlobalIpUpdate(d, meta)\n}\n\nfunc resourceSoftLayerGlobalIpRead(d *schema.ResourceData, meta interface{}) error {\n\tsess := meta.(ProviderConfig).SoftLayerSession()\n\tservice := services.GetNetworkSubnetIpAddressGlobalService(sess)\n\n\tglobalIpId, err := strconv.Atoi(d.Id())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Not a valid global ip ID, must be an integer: %s\", err)\n\t}\n\n\tglobalIp, err := service.Id(globalIpId).Mask(GlobalIpMask).GetObject()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error retrieving Global Ip: %s\", err)\n\t}\n\n\td.Set(\"id\", *globalIp.Id)\n\td.Set(\"ip_address\", *globalIp.IpAddress.IpAddress)\n\tif globalIp.DestinationIpAddress != nil {\n\t\td.Set(\"routes_to\", *globalIp.DestinationIpAddress.IpAddress)\n\t}\n\treturn nil\n}\n\nfunc resourceSoftLayerGlobalIpUpdate(d *schema.ResourceData, meta interface{}) error {\n\tsess := meta.(ProviderConfig).SoftLayerSession()\n\tservice := services.GetNetworkSubnetIpAddressGlobalService(sess)\n\n\tglobalIpId, err := strconv.Atoi(d.Id())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Not a valid global ip ID, must be an integer: %s\", err)\n\t}\n\n\troutes_to := d.Get(\"routes_to\").(string)\n\tif strings.Contains(routes_to, \":\") && len(routes_to) != 39 {\n\t\tparts := strings.Split(routes_to, \":\")\n\t\tfor x, s := range parts {\n\t\t\tif s == \"\" {\n\t\t\t\tzeroes := 9 - len(parts)\n\t\t\t\tparts[x] = strings.Repeat(\"0000:\", zeroes)[:(zeroes*4)+(zeroes-1)]\n\t\t\t} else {\n\t\t\t\tparts[x] = fmt.Sprintf(\"%04s\", s)\n\t\t\t}\n\t\t}\n\n\t\troutes_to = strings.Join(parts, \":\")\n\t\td.Set(\"routes_to\", routes_to)\n\t}\n\n\t_, err = service.Id(globalIpId).Route(sl.String(routes_to))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error editing Global Ip: %s\", err)\n\t}\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"pending\"},\n\t\tTarget: []string{\"complete\"},\n\t\tRefresh: func() (interface{}, string, error) {\n\t\t\ttransaction, err := service.Id(globalIpId).GetActiveTransaction()\n\t\t\tif err != nil {\n\t\t\t\treturn datatypes.Network_Subnet_IpAddress_Global{}, \"pending\", err\n\t\t\t}\n\t\t\tif transaction.Id == nil {\n\t\t\t\treturn datatypes.Network_Subnet_IpAddress_Global{}, \"complete\", nil\n\t\t\t}\n\t\t\treturn datatypes.Network_Subnet_IpAddress_Global{}, \"pending\", nil\n\t\t},\n\t\tTimeout: 10 * time.Minute,\n\t\tDelay: 5 * time.Second,\n\t\tMinTimeout: 3 * time.Second,\n\t}\n\n\tpendingResult, err := stateConf.WaitForState()\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error waiting for global ip destination ip address to become active: %s\", err)\n\t}\n\n\tif _, ok := pendingResult.(datatypes.Network_Subnet_IpAddress_Global); ok {\n\t\treturn nil\n\t}\n\n\treturn nil\n}\n\nfunc resourceSoftLayerGlobalIpDelete(d *schema.ResourceData, meta interface{}) error {\n\tsess := meta.(ProviderConfig).SoftLayerSession()\n\tservice := services.GetNetworkSubnetIpAddressGlobalService(sess)\n\n\tglobalIpId, err := strconv.Atoi(d.Id())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Not a valid global ip ID, must be an integer: %s\", err)\n\t}\n\n\tbillingItem, err := service.Id(globalIpId).GetBillingItem()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error deleting global ip: %s\", err)\n\t}\n\n\tif billingItem.Id == nil {\n\t\treturn nil\n\t}\n\n\t_, err = services.GetBillingItemService(sess).Id(*billingItem.Id).CancelService()\n\n\treturn err\n}\n\nfunc resourceSoftLayerGlobalIpExists(d *schema.ResourceData, meta interface{}) (bool, error) {\n\tsess := meta.(ProviderConfig).SoftLayerSession()\n\tservice := services.GetNetworkSubnetIpAddressGlobalService(sess)\n\n\tglobalIpId, err := strconv.Atoi(d.Id())\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"Not a valid ID, must be an integer: %s\", err)\n\t}\n\n\tresult, err := service.Id(globalIpId).GetObject()\n\tif err != nil {\n\t\tif apiErr, ok := err.(sl.Error); ok && apiErr.StatusCode == 404 {\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, fmt.Errorf(\"Error retrieving global ip: %s\", err)\n\t}\n\treturn result.Id != nil && *result.Id == globalIpId, nil\n}\n\nfunc findGlobalIpByOrderId(sess *session.Session, orderId int) (datatypes.Network_Subnet_IpAddress_Global, error) {\n\tstateConf := &resource.StateChangeConf{\n\t\tPending: []string{\"pending\"},\n\t\tTarget: []string{\"complete\"},\n\t\tRefresh: func() (interface{}, string, error) {\n\t\t\tglobalIps, err := services.GetAccountService(sess).\n\t\t\t\tFilter(filter.Path(\"globalIpRecords.billingItem.orderItem.order.id\").\n\t\t\t\t\tEq(strconv.Itoa(orderId)).Build()).\n\t\t\t\tMask(\"id,ipAddress[ipAddress]\").\n\t\t\t\tGetGlobalIpRecords()\n\t\t\tif err != nil {\n\t\t\t\treturn datatypes.Network_Subnet_IpAddress_Global{}, \"\", err\n\t\t\t}\n\n\t\t\tif len(globalIps) == 1 && globalIps[0].IpAddress != nil {\n\t\t\t\treturn globalIps[0], \"complete\", nil\n\t\t\t} else if len(globalIps) == 0 || len(globalIps) == 1 {\n\t\t\t\treturn nil, \"pending\", nil\n\t\t\t} else {\n\t\t\t\treturn nil, \"\", fmt.Errorf(\"Expected one global ip: %s\", err)\n\t\t\t}\n\t\t},\n\t\tTimeout: 10 * time.Minute,\n\t\tDelay: 5 * time.Second,\n\t\tMinTimeout: 3 * time.Second,\n\t}\n\n\tpendingResult, err := stateConf.WaitForState()\n\n\tif err != nil {\n\t\treturn datatypes.Network_Subnet_IpAddress_Global{}, err\n\t}\n\n\tif result, ok := pendingResult.(datatypes.Network_Subnet_IpAddress_Global); ok {\n\t\treturn result, nil\n\t}\n\n\treturn datatypes.Network_Subnet_IpAddress_Global{},\n\t\tfmt.Errorf(\"Cannot find global ip with order id '%d'\", orderId)\n}\n\nfunc buildGlobalIpProductOrderContainer(d *schema.ResourceData, sess *session.Session, packageType string) (\n\t*datatypes.Container_Product_Order_Network_Subnet, error) {\n\n\t\/\/ 1. Get a package\n\tpkg, err := product.GetPackageByType(sess, packageType)\n\tif err != nil {\n\t\treturn &datatypes.Container_Product_Order_Network_Subnet{}, err\n\t}\n\n\t\/\/ 2. Get all prices for the package\n\tproductItems, err := product.GetPackageProducts(sess, *pkg.Id)\n\tif err != nil {\n\t\treturn &datatypes.Container_Product_Order_Network_Subnet{}, err\n\t}\n\n\t\/\/ 3. Find global ip prices\n\tglobalIpKeyname := \"GLOBAL_IPV4\"\n\tif strings.Contains(d.Get(\"routes_to\").(string), \":\") {\n\t\tglobalIpKeyname = \"GLOBAL_IPV6\"\n\t}\n\n\t\/\/ 4. Select items with a matching keyname\n\tglobalIpItems := []datatypes.Product_Item{}\n\tfor _, item := range productItems {\n\t\tif *item.KeyName == globalIpKeyname {\n\t\t\tglobalIpItems = append(globalIpItems, item)\n\t\t}\n\t}\n\n\tif len(globalIpItems) == 0 {\n\t\treturn &datatypes.Container_Product_Order_Network_Subnet{},\n\t\t\tfmt.Errorf(\"No product items matching %s could be found\", globalIpKeyname)\n\t}\n\n\tproductOrderContainer := datatypes.Container_Product_Order_Network_Subnet{\n\t\tContainer_Product_Order: datatypes.Container_Product_Order{\n\t\t\tPackageId: pkg.Id,\n\t\t\tPrices: []datatypes.Product_Item_Price{\n\t\t\t\t{\n\t\t\t\t\tId: globalIpItems[0].Prices[0].Id,\n\t\t\t\t},\n\t\t\t},\n\t\t\tQuantity: sl.Int(1),\n\t\t},\n\t}\n\n\treturn &productOrderContainer, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package ingestor\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\t\"code.cloudfoundry.org\/lager\"\n\t\"github.com\/google\/go-github\/github\"\n)\n\ntype handler struct {\n\tlogger lager.Logger\n\tsecretKey []byte\n\tingestor Ingestor\n}\n\nfunc NewHandler(logger lager.Logger, ingestor Ingestor, secretKey string) *handler {\n\treturn &handler{\n\t\tlogger: logger.Session(\"webhook-handler\"),\n\t\tsecretKey: []byte(secretKey),\n\t\tingestor: ingestor,\n\t}\n}\n\nfunc (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\th.logger.Debug(\"starting\")\n\n\tpayload, err := github.ValidatePayload(r, h.secretKey)\n\tif err != nil {\n\t\th.logger.Error(\"invalid-payload\", err)\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\treturn\n\t}\n\n\tvar event github.PushEvent\n\tif err := json.Unmarshal(payload, &event); err != nil {\n\t\th.logger.Error(\"unmarshal-failed\", err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tscan, valid := extractPushScanFromEvent(event)\n\tif !valid {\n\t\th.logger.Info(\"invalid-event-dropped\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn\n\t}\n\n\th.logger.Info(\"handling-webhook-payload\", lager.Data{\n\t\t\"before\": scan.From,\n\t\t\"after\": scan.To,\n\t\t\"owner\": scan.Owner,\n\t\t\"repo\": scan.Repository,\n\t\t\"private\": scan.Private,\n\t})\n\n\terr = h.ingestor.IngestPushScan(h.logger, scan, r.Header.Get(\"X-GitHub-Delivery\"))\n\tif err != nil {\n\t\th.logger.Error(\"ingest-push-scan-failed\", err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\th.logger.Debug(\"done\")\n\tw.WriteHeader(http.StatusOK)\n}\n<commit_msg>ingestor.NewHandler returns http.Handler<commit_after>package ingestor\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\t\"code.cloudfoundry.org\/lager\"\n\t\"github.com\/google\/go-github\/github\"\n)\n\ntype handler struct {\n\tlogger lager.Logger\n\tsecretKey []byte\n\tingestor Ingestor\n}\n\nfunc NewHandler(logger lager.Logger, ingestor Ingestor, secretKey string) http.Handler {\n\treturn &handler{\n\t\tlogger: logger.Session(\"webhook-handler\"),\n\t\tsecretKey: []byte(secretKey),\n\t\tingestor: ingestor,\n\t}\n}\n\nfunc (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\th.logger.Debug(\"starting\")\n\n\tpayload, err := github.ValidatePayload(r, h.secretKey)\n\tif err != nil {\n\t\th.logger.Error(\"invalid-payload\", err)\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\treturn\n\t}\n\n\tvar event github.PushEvent\n\tif err := json.Unmarshal(payload, &event); err != nil {\n\t\th.logger.Error(\"unmarshal-failed\", err)\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tscan, valid := extractPushScanFromEvent(event)\n\tif !valid {\n\t\th.logger.Info(\"invalid-event-dropped\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn\n\t}\n\n\th.logger.Info(\"handling-webhook-payload\", lager.Data{\n\t\t\"before\": scan.From,\n\t\t\"after\": scan.To,\n\t\t\"owner\": scan.Owner,\n\t\t\"repo\": scan.Repository,\n\t\t\"private\": scan.Private,\n\t})\n\n\terr = h.ingestor.IngestPushScan(h.logger, scan, r.Header.Get(\"X-GitHub-Delivery\"))\n\tif err != nil {\n\t\th.logger.Error(\"ingest-push-scan-failed\", err)\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\th.logger.Debug(\"done\")\n\tw.WriteHeader(http.StatusOK)\n}\n<|endoftext|>"} {"text":"<commit_before>package admin\n\nimport (\n\t\"diskette\/util\"\n\t\"errors\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/labstack\/echo\"\n\t\"labix.org\/v2\/mgo\/bson\"\n)\n\n\/\/ http POST localhost:5025\/admin\/remove-users userIds:='[\"56bf19d65a1d18b704000001\", \"56be731d5a1d18accd000001\"]' X-Diskette-Session-Token:<session_token>\nfunc (service *serviceImpl) SignoutUsers(c *echo.Context) error {\n\tvar request struct {\n\t\tUserIds []string `json:\"userIds\"`\n\t}\n\tc.Bind(&request)\n\n\tif request.UserIds == nil {\n\t\treturn c.JSON(http.StatusBadRequest, util.CreateErrResponse(errors.New(\"Missing parameter 'userIds'\")))\n\t}\n\n\tobjectIds := []bson.ObjectId{}\n\tfor _, userId := range request.UserIds {\n\t\tobjectIds = append(objectIds, bson.ObjectIdHex(userId))\n\t}\n\n\tinfo, err := service.userCollection.UpdateAll(\n\t\tbson.M{\n\t\t\t\"_id\": bson.M{\n\t\t\t\t\"$in\": objectIds,\n\t\t\t},\n\t\t},\n\t\tbson.M{\n\t\t\t\"$set\": bson.M{\n\t\t\t\t\"signedOutAt\": time.Now(),\n\t\t\t},\n\t\t},\n\t)\n\n\tif err != nil {\n\t\treturn c.JSON(http.StatusInternalServerError, util.CreateErrResponse(err))\n\t}\n\n\treturn c.JSON(http.StatusOK, util.CreateOkResponse(info.Updated))\n}\n<commit_msg>update<commit_after>package admin\n\nimport (\n\t\"diskette\/util\"\n\t\"errors\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/labstack\/echo\"\n\t\"labix.org\/v2\/mgo\/bson\"\n)\n\n\/\/ http POST localhost:5025\/admin\/signout-users userIds:='[\"56bf19d65a1d18b704000001\", \"56be731d5a1d18accd000001\"]' X-Diskette-Session-Token:<session_token>\nfunc (service *serviceImpl) SignoutUsers(c *echo.Context) error {\n\tvar request struct {\n\t\tUserIds []string `json:\"userIds\"`\n\t}\n\tc.Bind(&request)\n\n\tif request.UserIds == nil {\n\t\treturn c.JSON(http.StatusBadRequest, util.CreateErrResponse(errors.New(\"Missing parameter 'userIds'\")))\n\t}\n\n\tobjectIds := []bson.ObjectId{}\n\tfor _, userId := range request.UserIds {\n\t\tobjectIds = append(objectIds, bson.ObjectIdHex(userId))\n\t}\n\n\tinfo, err := service.userCollection.UpdateAll(\n\t\tbson.M{\n\t\t\t\"_id\": bson.M{\n\t\t\t\t\"$in\": objectIds,\n\t\t\t},\n\t\t},\n\t\tbson.M{\n\t\t\t\"$set\": bson.M{\n\t\t\t\t\"signedOutAt\": time.Now(),\n\t\t\t},\n\t\t},\n\t)\n\n\tif err != nil {\n\t\treturn c.JSON(http.StatusInternalServerError, util.CreateErrResponse(err))\n\t}\n\n\treturn c.JSON(http.StatusOK, util.CreateOkResponse(info.Updated))\n}\n<|endoftext|>"} {"text":"<commit_before>package match\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/ast\"\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/debug\"\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/gensym\"\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/scalar\"\n)\n\ntype desugarer struct {\n\tletBoundNames, lets []interface{}\n}\n\nfunc newDesugarer() *desugarer {\n\treturn &desugarer{nil, nil}\n}\n\nfunc (d *desugarer) desugar(x interface{}) interface{} {\n\tswitch x := x.(type) {\n\tcase ast.App:\n\t\treturn ast.NewApp(\n\t\t\td.desugar(x.Function()),\n\t\t\td.desugar(x.Arguments()).(ast.Arguments),\n\t\t\tx.DebugInfo())\n\tcase ast.Arguments:\n\t\tps := make([]ast.PositionalArgument, 0, len(x.Positionals()))\n\n\t\tfor _, p := range x.Positionals() {\n\t\t\tps = append(ps, d.desugar(p).(ast.PositionalArgument))\n\t\t}\n\n\t\tks := make([]ast.KeywordArgument, 0, len(x.Keywords()))\n\n\t\tfor _, k := range x.Keywords() {\n\t\t\tks = append(ks, d.desugar(k).(ast.KeywordArgument))\n\t\t}\n\n\t\tds := make([]interface{}, 0, len(x.ExpandedDicts()))\n\n\t\tfor _, dict := range x.ExpandedDicts() {\n\t\t\tds = append(ds, d.desugar(dict))\n\t\t}\n\n\t\treturn ast.NewArguments(ps, ks, ds)\n\tcase ast.KeywordArgument:\n\t\treturn ast.NewKeywordArgument(x.Name(), d.desugar(x.Value()))\n\tcase ast.LetFunction:\n\t\tls := make([]interface{}, 0, len(x.Lets()))\n\n\t\tfor _, l := range x.Lets() {\n\t\t\tl := d.desugar(l)\n\t\t\tls = append(ls, append(d.takeLets(), l)...)\n\t\t}\n\n\t\tb := d.desugar(x.Body())\n\n\t\treturn ast.NewLetFunction(\n\t\t\tx.Name(),\n\t\t\tx.Signature(),\n\t\t\tappend(ls, d.takeLets()...),\n\t\t\tb,\n\t\t\tx.DebugInfo())\n\tcase ast.LetVar:\n\t\treturn ast.NewLetVar(x.Name(), d.desugar(x.Expr()))\n\tcase ast.Match:\n\t\tcs := make([]ast.MatchCase, 0, len(x.Cases()))\n\n\t\tfor _, c := range x.Cases() {\n\t\t\tcs = append(cs, renameBoundNamesInCase(ast.NewMatchCase(c.Pattern(), d.desugar(c.Value()))))\n\t\t}\n\n\t\treturn d.resultApp(d.createMatchFunction(cs), d.desugar(x.Value()))\n\tcase ast.MutualRecursion:\n\t\tfs := make([]ast.LetFunction, 0, len(x.LetFunctions()))\n\n\t\tfor _, f := range x.LetFunctions() {\n\t\t\tfs = append(fs, d.desugar(f).(ast.LetFunction))\n\t\t}\n\n\t\treturn ast.NewMutualRecursion(fs, x.DebugInfo())\n\tcase ast.Output:\n\t\treturn ast.NewOutput(d.desugar(x.Expr()), x.Expanded())\n\tcase ast.PositionalArgument:\n\t\treturn ast.NewPositionalArgument(d.desugar(x.Value()), x.Expanded())\n\tdefault:\n\t\treturn x\n\t}\n}\n\nfunc (d *desugarer) takeLets() []interface{} {\n\tls := append(d.letBoundNames, d.lets...)\n\td.letBoundNames = nil\n\td.lets = nil\n\treturn ls\n}\n\nfunc (d *desugarer) letTempVar(v interface{}) string {\n\ts := gensym.GenSym(\"match\", \"tmp\")\n\td.lets = append(d.lets, ast.NewLetVar(s, v))\n\treturn s\n}\n\nfunc (d *desugarer) bindName(p interface{}, v interface{}) string {\n\ts := generalNamePatternToName(p)\n\td.letBoundNames = append(d.letBoundNames, ast.NewLetVar(s, v))\n\treturn s\n}\n\n\/\/ matchedApp applies a function to arguments and creates a matched value of\n\/\/ match expression.\nfunc (d *desugarer) matchedApp(f interface{}, args ...interface{}) string {\n\treturn d.bindName(gensym.GenSym(\"match\", \"app\"), app(f, args...))\n}\n\n\/\/ resultApp applies a function to arguments and creates a result value of match\n\/\/ expression.\nfunc (d *desugarer) resultApp(f interface{}, args ...interface{}) string {\n\treturn d.letTempVar(app(f, args...))\n}\n\nfunc (d *desugarer) createMatchFunction(cs []ast.MatchCase) interface{} {\n\targ := gensym.GenSym(\"match\", \"argument\")\n\tbody := d.desugarCases(arg, cs, \"$matchError\")\n\n\tf := ast.NewLetFunction(\n\t\tgensym.GenSym(\"match\", \"function\"),\n\t\tast.NewSignature([]string{arg}, nil, \"\", nil, nil, \"\"),\n\t\td.takeLets(),\n\t\tbody,\n\t\tdebug.NewGoInfo(0))\n\n\td.lets = append(d.lets, f)\n\n\treturn f.Name()\n}\n\nfunc (d *desugarer) desugarCases(v interface{}, cs []ast.MatchCase, dc interface{}) interface{} {\n\tcss := groupCases(cs)\n\n\tif cs, ok := css[namePattern]; ok {\n\t\tc := cs[0]\n\t\td.bindName(c.Pattern().(string), v)\n\t\tdc = c.Value()\n\t}\n\n\tks := []ast.SwitchCase{}\n\n\tif cs, ok := css[listPattern]; ok {\n\t\tks = append(ks, ast.NewSwitchCase(\"\\\"list\\\"\", d.desugarListCases(v, cs, dc)))\n\t}\n\n\tif cs, ok := css[dictPattern]; ok {\n\t\tks = append(ks, ast.NewSwitchCase(\"\\\"dict\\\"\", d.desugarDictCases(v, cs, dc)))\n\t}\n\n\tif cs, ok := css[scalarPattern]; ok {\n\t\tdc = d.desugarScalarCases(v, cs, dc)\n\t}\n\n\treturn newSwitch(d.resultApp(\"$typeOf\", v), ks, dc)\n}\n\nfunc groupCases(cs []ast.MatchCase) map[patternType][]ast.MatchCase {\n\tcss := map[patternType][]ast.MatchCase{}\n\n\tfor i, c := range cs {\n\t\tt := getPatternType(c.Pattern())\n\n\t\tif t == namePattern && i < len(cs)-1 {\n\t\t\tpanic(\"A wildcard pattern is found while some patterns are left\")\n\t\t}\n\n\t\tcss[t] = append(css[t], c)\n\t}\n\n\treturn css\n}\n\nfunc getPatternType(p interface{}) patternType {\n\tswitch x := p.(type) {\n\tcase string:\n\t\tif scalar.Defined(x) {\n\t\t\treturn scalarPattern\n\t\t}\n\n\t\treturn namePattern\n\tcase ast.App:\n\t\tswitch x.Function().(string) {\n\t\tcase \"$list\":\n\t\t\treturn listPattern\n\t\tcase \"$dict\":\n\t\t\treturn dictPattern\n\t\t}\n\t}\n\n\tpanic(fmt.Errorf(\"Invalid pattern: %#v\", p))\n}\n\nfunc isGeneralNamePattern(p interface{}) bool {\n\tswitch x := p.(type) {\n\tcase string:\n\t\tif scalar.Defined(x) {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\tcase ast.App:\n\t\tps := x.Arguments().Positionals()\n\t\tok := len(ps) == 1 && ps[0].Expanded()\n\n\t\tswitch x.Function().(string) {\n\t\tcase \"$list\":\n\t\t\treturn ok\n\t\tcase \"$dict\":\n\t\t\treturn ok\n\t\t}\n\t}\n\n\tpanic(fmt.Errorf(\"Invalid pattern: %#v\", p))\n}\n\nfunc (d *desugarer) desugarListCases(list interface{}, cs []ast.MatchCase, dc interface{}) interface{} {\n\ttype group struct {\n\t\tfirst interface{}\n\t\tcases []ast.MatchCase\n\t}\n\n\tgs := []group{}\n\tfirst := d.matchedApp(\"$first\", list)\n\trest := d.matchedApp(\"$rest\", list)\n\n\tfor i, c := range cs {\n\t\tps := c.Pattern().(ast.App).Arguments().Positionals()\n\n\t\tif len(ps) == 0 {\n\t\t\tdc = d.resultApp(\"$if\", app(\"$=\", list, \"$emptyList\"), c.Value(), dc)\n\t\t\tcontinue\n\t\t}\n\n\t\tv := ps[0].Value()\n\n\t\tif ps[0].Expanded() {\n\t\t\td.bindName(v.(string), list)\n\t\t\tdc = c.Value()\n\t\t\tbreak\n\t\t}\n\n\t\tc = ast.NewMatchCase(\n\t\t\tast.NewApp(\"$list\", ast.NewArguments(ps[1:], nil, nil), debug.NewGoInfo(0)),\n\t\t\tc.Value())\n\n\t\tif isGeneralNamePattern(v) {\n\t\t\td.bindName(v, first)\n\n\t\t\tif ks := cs[i+1:]; len(ks) > 0 {\n\t\t\t\tdc = d.desugarListCases(list, ks, dc)\n\t\t\t}\n\n\t\t\tnext := d.desugarCases(rest, []ast.MatchCase{c}, dc)\n\n\t\t\tswitch getPatternType(v) {\n\t\t\tcase namePattern:\n\t\t\t\tdc = next\n\t\t\tcase listPattern:\n\t\t\t\tdc = d.ifType(first, \"list\", next, dc)\n\t\t\tcase dictPattern:\n\t\t\t\tdc = d.ifType(first, \"dict\", next, dc)\n\t\t\tdefault:\n\t\t\t\tpanic(\"Unreachable\")\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\n\t\tgroupExist := false\n\n\t\tfor i, g := range gs {\n\t\t\tif equalPatterns(v, g.first) {\n\t\t\t\tgroupExist = true\n\t\t\t\tgs[i].cases = append(gs[i].cases, c)\n\t\t\t}\n\t\t}\n\n\t\tif !groupExist {\n\t\t\tgs = append(gs, group{v, []ast.MatchCase{c}})\n\t\t}\n\t}\n\n\tks := make([]ast.MatchCase, 0, len(gs))\n\tdc = d.letTempVar(dc)\n\n\tfor _, g := range gs {\n\t\tks = append(ks, ast.NewMatchCase(g.first, d.desugarCases(rest, g.cases, dc)))\n\t}\n\n\treturn d.desugarCases(first, ks, dc)\n}\n\nfunc (d *desugarer) ifType(v interface{}, t string, then, els interface{}) interface{} {\n\treturn d.resultApp(\"$if\", app(\"$=\", app(\"$typeOf\", v), \"\\\"\"+t+\"\\\"\"), then, els)\n}\n\nfunc (d *desugarer) desugarDictCases(dict interface{}, cs []ast.MatchCase, dc interface{}) interface{} {\n\ttype group struct {\n\t\tkey interface{}\n\t\tcases []ast.MatchCase\n\t}\n\n\tgs := []group{}\n\n\tfor _, c := range cs {\n\t\tps := c.Pattern().(ast.App).Arguments().Positionals()\n\n\t\tif len(ps) == 0 {\n\t\t\tdc = d.resultApp(\"$if\", app(\"$=\", dict, \"$emptyDict\"), c.Value(), dc)\n\t\t\tcontinue\n\t\t}\n\n\t\tk := ps[0].Value()\n\n\t\tif ps[0].Expanded() {\n\t\t\td.bindName(k.(string), dict)\n\t\t\tdc = c.Value()\n\t\t\tbreak\n\t\t}\n\n\t\tg := group{k, []ast.MatchCase{c}}\n\n\t\tif len(gs) == 0 {\n\t\t\tgs = append(gs, g)\n\t\t} else if last := len(gs) - 1; equalPatterns(g.key, gs[last].key) {\n\t\t\tgs[last].cases = append(gs[last].cases, c)\n\t\t} else {\n\t\t\tgs = append(gs, g)\n\t\t}\n\t}\n\n\tfor i := len(gs) - 1; i >= 0; i-- {\n\t\tg := gs[i]\n\t\tdc = d.resultApp(\"$if\",\n\t\t\tapp(\"$include\", dict, g.key),\n\t\t\td.desugarDictCasesOfSameKey(dict, g.cases, dc),\n\t\t\tdc)\n\t}\n\n\treturn dc\n}\n\nfunc (d *desugarer) desugarDictCasesOfSameKey(dict interface{}, cs []ast.MatchCase, dc interface{}) interface{} {\n\ttype group struct {\n\t\tvalue interface{}\n\t\tcases []ast.MatchCase\n\t}\n\n\tkey := cs[0].Pattern().(ast.App).Arguments().Positionals()[0].Value()\n\tvalue := d.matchedApp(dict, key)\n\tnewDict := d.matchedApp(\"delete\", dict, key)\n\tgs := []group{}\n\n\tfor i, c := range cs {\n\t\tps := c.Pattern().(ast.App).Arguments().Positionals()\n\t\tv := ps[1].Value()\n\n\t\tc = ast.NewMatchCase(\n\t\t\tast.NewApp(\"$dict\", ast.NewArguments(ps[2:], nil, nil), debug.NewGoInfo(0)),\n\t\t\tc.Value())\n\n\t\tif isGeneralNamePattern(v) {\n\t\t\td.bindName(v, value)\n\n\t\t\tif ks := cs[i+1:]; len(ks) != 0 {\n\t\t\t\tdc = d.desugarDictCasesOfSameKey(dict, ks, dc)\n\t\t\t}\n\n\t\t\tnext := d.desugarCases(newDict, []ast.MatchCase{c}, dc)\n\n\t\t\tswitch getPatternType(v) {\n\t\t\tcase namePattern:\n\t\t\t\tdc = next\n\t\t\tcase listPattern:\n\t\t\t\tdc = d.ifType(value, \"list\", next, dc)\n\t\t\tcase dictPattern:\n\t\t\t\tdc = d.ifType(value, \"dict\", next, dc)\n\t\t\tdefault:\n\t\t\t\tpanic(\"Unreachable\")\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\n\t\tgroupExist := false\n\n\t\tfor i, g := range gs {\n\t\t\tif equalPatterns(v, g.value) {\n\t\t\t\tgroupExist = true\n\t\t\t\tgs[i].cases = append(gs[i].cases, c)\n\t\t\t}\n\t\t}\n\n\t\tif !groupExist {\n\t\t\tgs = append(gs, group{v, []ast.MatchCase{c}})\n\t\t}\n\t}\n\n\tcs = make([]ast.MatchCase, 0, len(gs))\n\tdc = d.letTempVar(dc)\n\n\tfor _, g := range gs {\n\t\tcs = append(\n\t\t\tcs,\n\t\t\tast.NewMatchCase(g.value, d.desugarCases(newDict, g.cases, dc)))\n\t}\n\n\treturn d.desugarCases(value, cs, dc)\n}\n\nfunc (d *desugarer) desugarScalarCases(v interface{}, cs []ast.MatchCase, dc interface{}) interface{} {\n\tks := []ast.SwitchCase{}\n\n\tfor _, c := range cs {\n\t\tks = append(ks, ast.NewSwitchCase(c.Pattern().(string), c.Value()))\n\t}\n\n\treturn newSwitch(v, ks, dc)\n}\n\nfunc renameBoundNamesInCase(c ast.MatchCase) ast.MatchCase {\n\tp, ns := newPatternRenamer().rename(c.Pattern())\n\treturn ast.NewMatchCase(p, newValueRenamer(ns).rename(c.Value()))\n}\n\nfunc equalPatterns(p, q interface{}) bool {\n\tswitch x := p.(type) {\n\tcase string:\n\t\ty, ok := q.(string)\n\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\n\t\treturn x == y\n\tcase ast.App:\n\t\ty, ok := q.(ast.App)\n\n\t\tif !ok ||\n\t\t\tx.Function().(string) != y.Function().(string) ||\n\t\t\tlen(x.Arguments().Positionals()) != len(y.Arguments().Positionals()) {\n\t\t\treturn false\n\t\t}\n\n\t\tfor i := range x.Arguments().Positionals() {\n\t\t\tp := x.Arguments().Positionals()[i]\n\t\t\tq := y.Arguments().Positionals()[i]\n\n\t\t\tif p.Expanded() != q.Expanded() || !equalPatterns(p.Value(), q.Value()) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}\n\n\tpanic(fmt.Errorf(\"Invalid pattern: %#v, %#v\", p, q))\n}\n\nfunc generalNamePatternToName(p interface{}) string {\n\tswitch x := p.(type) {\n\tcase string:\n\t\treturn x\n\tcase ast.App:\n\t\tif ps := x.Arguments().Positionals(); len(ps) == 1 && ps[0].Expanded() {\n\t\t\treturn ps[0].Value().(string)\n\t\t}\n\t}\n\n\tpanic(fmt.Errorf(\"Invalid pattern: %#v\", p))\n}\n<commit_msg>Refactor match\/desugarer.go<commit_after>package match\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/ast\"\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/debug\"\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/gensym\"\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/scalar\"\n)\n\ntype desugarer struct {\n\tletBoundNames, lets []interface{}\n}\n\nfunc newDesugarer() *desugarer {\n\treturn &desugarer{nil, nil}\n}\n\nfunc (d *desugarer) desugar(x interface{}) interface{} {\n\tswitch x := x.(type) {\n\tcase ast.App:\n\t\treturn ast.NewApp(\n\t\t\td.desugar(x.Function()),\n\t\t\td.desugar(x.Arguments()).(ast.Arguments),\n\t\t\tx.DebugInfo())\n\tcase ast.Arguments:\n\t\tps := make([]ast.PositionalArgument, 0, len(x.Positionals()))\n\n\t\tfor _, p := range x.Positionals() {\n\t\t\tps = append(ps, d.desugar(p).(ast.PositionalArgument))\n\t\t}\n\n\t\tks := make([]ast.KeywordArgument, 0, len(x.Keywords()))\n\n\t\tfor _, k := range x.Keywords() {\n\t\t\tks = append(ks, d.desugar(k).(ast.KeywordArgument))\n\t\t}\n\n\t\tds := make([]interface{}, 0, len(x.ExpandedDicts()))\n\n\t\tfor _, dict := range x.ExpandedDicts() {\n\t\t\tds = append(ds, d.desugar(dict))\n\t\t}\n\n\t\treturn ast.NewArguments(ps, ks, ds)\n\tcase ast.KeywordArgument:\n\t\treturn ast.NewKeywordArgument(x.Name(), d.desugar(x.Value()))\n\tcase ast.LetFunction:\n\t\tls := make([]interface{}, 0, len(x.Lets()))\n\n\t\tfor _, l := range x.Lets() {\n\t\t\tl := d.desugar(l)\n\t\t\tls = append(ls, append(d.takeLets(), l)...)\n\t\t}\n\n\t\tb := d.desugar(x.Body())\n\n\t\treturn ast.NewLetFunction(\n\t\t\tx.Name(),\n\t\t\tx.Signature(),\n\t\t\tappend(ls, d.takeLets()...),\n\t\t\tb,\n\t\t\tx.DebugInfo())\n\tcase ast.LetVar:\n\t\treturn ast.NewLetVar(x.Name(), d.desugar(x.Expr()))\n\tcase ast.Match:\n\t\tcs := make([]ast.MatchCase, 0, len(x.Cases()))\n\n\t\tfor _, c := range x.Cases() {\n\t\t\tcs = append(cs, renameBoundNamesInCase(ast.NewMatchCase(c.Pattern(), d.desugar(c.Value()))))\n\t\t}\n\n\t\treturn d.resultApp(d.createMatchFunction(cs), d.desugar(x.Value()))\n\tcase ast.MutualRecursion:\n\t\tfs := make([]ast.LetFunction, 0, len(x.LetFunctions()))\n\n\t\tfor _, f := range x.LetFunctions() {\n\t\t\tfs = append(fs, d.desugar(f).(ast.LetFunction))\n\t\t}\n\n\t\treturn ast.NewMutualRecursion(fs, x.DebugInfo())\n\tcase ast.Output:\n\t\treturn ast.NewOutput(d.desugar(x.Expr()), x.Expanded())\n\tcase ast.PositionalArgument:\n\t\treturn ast.NewPositionalArgument(d.desugar(x.Value()), x.Expanded())\n\tdefault:\n\t\treturn x\n\t}\n}\n\nfunc (d *desugarer) takeLets() []interface{} {\n\tls := append(d.letBoundNames, d.lets...)\n\td.letBoundNames = nil\n\td.lets = nil\n\treturn ls\n}\n\nfunc (d *desugarer) letTempVar(v interface{}) string {\n\ts := gensym.GenSym(\"match\", \"tmp\")\n\td.lets = append(d.lets, ast.NewLetVar(s, v))\n\treturn s\n}\n\nfunc (d *desugarer) bindName(p interface{}, v interface{}) string {\n\ts := generalNamePatternToName(p)\n\td.letBoundNames = append(d.letBoundNames, ast.NewLetVar(s, v))\n\treturn s\n}\n\n\/\/ matchedApp applies a function to arguments and creates a matched value of\n\/\/ match expression.\nfunc (d *desugarer) matchedApp(f interface{}, args ...interface{}) string {\n\treturn d.bindName(gensym.GenSym(\"match\", \"app\"), app(f, args...))\n}\n\n\/\/ resultApp applies a function to arguments and creates a result value of match\n\/\/ expression.\nfunc (d *desugarer) resultApp(f interface{}, args ...interface{}) string {\n\treturn d.letTempVar(app(f, args...))\n}\n\nfunc (d *desugarer) createMatchFunction(cs []ast.MatchCase) interface{} {\n\targ := gensym.GenSym(\"match\", \"argument\")\n\tbody := d.desugarCases(arg, cs, \"$matchError\")\n\n\tf := ast.NewLetFunction(\n\t\tgensym.GenSym(\"match\", \"function\"),\n\t\tast.NewSignature([]string{arg}, nil, \"\", nil, nil, \"\"),\n\t\td.takeLets(),\n\t\tbody,\n\t\tdebug.NewGoInfo(0))\n\n\td.lets = append(d.lets, f)\n\n\treturn f.Name()\n}\n\nfunc (d *desugarer) desugarCases(v interface{}, cs []ast.MatchCase, dc interface{}) interface{} {\n\tcss := groupCases(cs)\n\n\tif cs, ok := css[namePattern]; ok {\n\t\tc := cs[0]\n\t\td.bindName(c.Pattern().(string), v)\n\t\tdc = c.Value()\n\t}\n\n\tks := []ast.SwitchCase{}\n\n\tif cs, ok := css[listPattern]; ok {\n\t\tks = append(ks, ast.NewSwitchCase(\"\\\"list\\\"\", d.desugarListCases(v, cs, dc)))\n\t}\n\n\tif cs, ok := css[dictPattern]; ok {\n\t\tks = append(ks, ast.NewSwitchCase(\"\\\"dict\\\"\", d.desugarDictCases(v, cs, dc)))\n\t}\n\n\tif cs, ok := css[scalarPattern]; ok {\n\t\tdc = d.desugarScalarCases(v, cs, dc)\n\t}\n\n\treturn newSwitch(d.resultApp(\"$typeOf\", v), ks, dc)\n}\n\nfunc groupCases(cs []ast.MatchCase) map[patternType][]ast.MatchCase {\n\tcss := map[patternType][]ast.MatchCase{}\n\n\tfor i, c := range cs {\n\t\tt := getPatternType(c.Pattern())\n\n\t\tif t == namePattern && i < len(cs)-1 {\n\t\t\tpanic(\"A wildcard pattern is found while some patterns are left\")\n\t\t}\n\n\t\tcss[t] = append(css[t], c)\n\t}\n\n\treturn css\n}\n\nfunc getPatternType(p interface{}) patternType {\n\tswitch x := p.(type) {\n\tcase string:\n\t\tif scalar.Defined(x) {\n\t\t\treturn scalarPattern\n\t\t}\n\n\t\treturn namePattern\n\tcase ast.App:\n\t\tswitch x.Function().(string) {\n\t\tcase \"$list\":\n\t\t\treturn listPattern\n\t\tcase \"$dict\":\n\t\t\treturn dictPattern\n\t\t}\n\t}\n\n\tpanic(fmt.Errorf(\"Invalid pattern: %#v\", p))\n}\n\nfunc isGeneralNamePattern(p interface{}) bool {\n\tswitch x := p.(type) {\n\tcase string:\n\t\tif scalar.Defined(x) {\n\t\t\treturn false\n\t\t}\n\n\t\treturn true\n\tcase ast.App:\n\t\tps := x.Arguments().Positionals()\n\t\tok := len(ps) == 1 && ps[0].Expanded()\n\n\t\tswitch x.Function().(string) {\n\t\tcase \"$list\":\n\t\t\treturn ok\n\t\tcase \"$dict\":\n\t\t\treturn ok\n\t\t}\n\t}\n\n\tpanic(fmt.Errorf(\"Invalid pattern: %#v\", p))\n}\n\nfunc (d *desugarer) desugarListCases(list interface{}, cs []ast.MatchCase, dc interface{}) interface{} {\n\ttype group struct {\n\t\tfirst interface{}\n\t\tcases []ast.MatchCase\n\t}\n\n\tgs := []group{}\n\tfirst := d.matchedApp(\"$first\", list)\n\trest := d.matchedApp(\"$rest\", list)\n\n\tfor i, c := range cs {\n\t\tps := c.Pattern().(ast.App).Arguments().Positionals()\n\n\t\tif len(ps) == 0 {\n\t\t\tdc = d.resultApp(\"$if\", app(\"$=\", list, \"$emptyList\"), c.Value(), dc)\n\t\t\tcontinue\n\t\t}\n\n\t\tv := ps[0].Value()\n\n\t\tif ps[0].Expanded() {\n\t\t\td.bindName(v.(string), list)\n\t\t\tdc = c.Value()\n\t\t\tbreak\n\t\t}\n\n\t\tc = ast.NewMatchCase(\n\t\t\tast.NewApp(\"$list\", ast.NewArguments(ps[1:], nil, nil), debug.NewGoInfo(0)),\n\t\t\tc.Value())\n\n\t\tif isGeneralNamePattern(v) {\n\t\t\td.bindName(v, first)\n\n\t\t\tif cs := cs[i+1:]; len(cs) > 0 {\n\t\t\t\tdc = d.desugarListCases(list, cs, dc)\n\t\t\t}\n\n\t\t\tnext := d.desugarCases(rest, []ast.MatchCase{c}, dc)\n\n\t\t\tswitch getPatternType(v) {\n\t\t\tcase namePattern:\n\t\t\t\tdc = next\n\t\t\tcase listPattern:\n\t\t\t\tdc = d.ifType(first, \"list\", next, dc)\n\t\t\tcase dictPattern:\n\t\t\t\tdc = d.ifType(first, \"dict\", next, dc)\n\t\t\tdefault:\n\t\t\t\tpanic(\"Unreachable\")\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\n\t\tgroupExist := false\n\n\t\tfor i, g := range gs {\n\t\t\tif equalPatterns(v, g.first) {\n\t\t\t\tgroupExist = true\n\t\t\t\tgs[i].cases = append(gs[i].cases, c)\n\t\t\t}\n\t\t}\n\n\t\tif !groupExist {\n\t\t\tgs = append(gs, group{v, []ast.MatchCase{c}})\n\t\t}\n\t}\n\n\tcs = make([]ast.MatchCase, 0, len(gs))\n\tdc = d.letTempVar(dc)\n\n\tfor _, g := range gs {\n\t\tcs = append(cs, ast.NewMatchCase(g.first, d.desugarCases(rest, g.cases, dc)))\n\t}\n\n\treturn d.desugarCases(first, cs, dc)\n}\n\nfunc (d *desugarer) ifType(v interface{}, t string, then, els interface{}) interface{} {\n\treturn d.resultApp(\"$if\", app(\"$=\", app(\"$typeOf\", v), \"\\\"\"+t+\"\\\"\"), then, els)\n}\n\nfunc (d *desugarer) desugarDictCases(dict interface{}, cs []ast.MatchCase, dc interface{}) interface{} {\n\ttype group struct {\n\t\tkey interface{}\n\t\tcases []ast.MatchCase\n\t}\n\n\tgs := []group{}\n\n\tfor _, c := range cs {\n\t\tps := c.Pattern().(ast.App).Arguments().Positionals()\n\n\t\tif len(ps) == 0 {\n\t\t\tdc = d.resultApp(\"$if\", app(\"$=\", dict, \"$emptyDict\"), c.Value(), dc)\n\t\t\tcontinue\n\t\t}\n\n\t\tk := ps[0].Value()\n\n\t\tif ps[0].Expanded() {\n\t\t\td.bindName(k.(string), dict)\n\t\t\tdc = c.Value()\n\t\t\tbreak\n\t\t}\n\n\t\tg := group{k, []ast.MatchCase{c}}\n\n\t\tif len(gs) == 0 {\n\t\t\tgs = append(gs, g)\n\t\t} else if last := len(gs) - 1; equalPatterns(g.key, gs[last].key) {\n\t\t\tgs[last].cases = append(gs[last].cases, c)\n\t\t} else {\n\t\t\tgs = append(gs, g)\n\t\t}\n\t}\n\n\tfor i := len(gs) - 1; i >= 0; i-- {\n\t\tg := gs[i]\n\t\tdc = d.resultApp(\"$if\",\n\t\t\tapp(\"$include\", dict, g.key),\n\t\t\td.desugarDictCasesOfSameKey(dict, g.cases, dc),\n\t\t\tdc)\n\t}\n\n\treturn dc\n}\n\nfunc (d *desugarer) desugarDictCasesOfSameKey(dict interface{}, cs []ast.MatchCase, dc interface{}) interface{} {\n\ttype group struct {\n\t\tvalue interface{}\n\t\tcases []ast.MatchCase\n\t}\n\n\tkey := cs[0].Pattern().(ast.App).Arguments().Positionals()[0].Value()\n\tvalue := d.matchedApp(dict, key)\n\tnewDict := d.matchedApp(\"delete\", dict, key)\n\tgs := []group{}\n\n\tfor i, c := range cs {\n\t\tps := c.Pattern().(ast.App).Arguments().Positionals()\n\t\tv := ps[1].Value()\n\n\t\tc = ast.NewMatchCase(\n\t\t\tast.NewApp(\"$dict\", ast.NewArguments(ps[2:], nil, nil), debug.NewGoInfo(0)),\n\t\t\tc.Value())\n\n\t\tif isGeneralNamePattern(v) {\n\t\t\td.bindName(v, value)\n\n\t\t\tif cs := cs[i+1:]; len(cs) != 0 {\n\t\t\t\tdc = d.desugarDictCasesOfSameKey(dict, cs, dc)\n\t\t\t}\n\n\t\t\tnext := d.desugarCases(newDict, []ast.MatchCase{c}, dc)\n\n\t\t\tswitch getPatternType(v) {\n\t\t\tcase namePattern:\n\t\t\t\tdc = next\n\t\t\tcase listPattern:\n\t\t\t\tdc = d.ifType(value, \"list\", next, dc)\n\t\t\tcase dictPattern:\n\t\t\t\tdc = d.ifType(value, \"dict\", next, dc)\n\t\t\tdefault:\n\t\t\t\tpanic(\"Unreachable\")\n\t\t\t}\n\n\t\t\tbreak\n\t\t}\n\n\t\tgroupExist := false\n\n\t\tfor i, g := range gs {\n\t\t\tif equalPatterns(v, g.value) {\n\t\t\t\tgroupExist = true\n\t\t\t\tgs[i].cases = append(gs[i].cases, c)\n\t\t\t}\n\t\t}\n\n\t\tif !groupExist {\n\t\t\tgs = append(gs, group{v, []ast.MatchCase{c}})\n\t\t}\n\t}\n\n\tcs = make([]ast.MatchCase, 0, len(gs))\n\tdc = d.letTempVar(dc)\n\n\tfor _, g := range gs {\n\t\tcs = append(\n\t\t\tcs,\n\t\t\tast.NewMatchCase(g.value, d.desugarCases(newDict, g.cases, dc)))\n\t}\n\n\treturn d.desugarCases(value, cs, dc)\n}\n\nfunc (d *desugarer) desugarScalarCases(v interface{}, cs []ast.MatchCase, dc interface{}) interface{} {\n\tks := []ast.SwitchCase{}\n\n\tfor _, c := range cs {\n\t\tks = append(ks, ast.NewSwitchCase(c.Pattern().(string), c.Value()))\n\t}\n\n\treturn newSwitch(v, ks, dc)\n}\n\nfunc renameBoundNamesInCase(c ast.MatchCase) ast.MatchCase {\n\tp, ns := newPatternRenamer().rename(c.Pattern())\n\treturn ast.NewMatchCase(p, newValueRenamer(ns).rename(c.Value()))\n}\n\nfunc equalPatterns(p, q interface{}) bool {\n\tswitch x := p.(type) {\n\tcase string:\n\t\ty, ok := q.(string)\n\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\n\t\treturn x == y\n\tcase ast.App:\n\t\ty, ok := q.(ast.App)\n\n\t\tif !ok ||\n\t\t\tx.Function().(string) != y.Function().(string) ||\n\t\t\tlen(x.Arguments().Positionals()) != len(y.Arguments().Positionals()) {\n\t\t\treturn false\n\t\t}\n\n\t\tfor i := range x.Arguments().Positionals() {\n\t\t\tp := x.Arguments().Positionals()[i]\n\t\t\tq := y.Arguments().Positionals()[i]\n\n\t\t\tif p.Expanded() != q.Expanded() || !equalPatterns(p.Value(), q.Value()) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\t\treturn true\n\t}\n\n\tpanic(fmt.Errorf(\"Invalid pattern: %#v, %#v\", p, q))\n}\n\nfunc generalNamePatternToName(p interface{}) string {\n\tswitch x := p.(type) {\n\tcase string:\n\t\treturn x\n\tcase ast.App:\n\t\tif ps := x.Arguments().Positionals(); len(ps) == 1 && ps[0].Expanded() {\n\t\t\treturn ps[0].Value().(string)\n\t\t}\n\t}\n\n\tpanic(fmt.Errorf(\"Invalid pattern: %#v\", p))\n}\n<|endoftext|>"} {"text":"<commit_before>package match\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/ast\"\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/debug\"\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/gensym\"\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/scalar\"\n)\n\ntype desugarer struct {\n\tlets []interface{}\n}\n\nfunc newDesugarer() *desugarer {\n\treturn &desugarer{nil}\n}\n\nfunc (d *desugarer) desugarMatchExpression(x interface{}) interface{} {\n\tswitch x := x.(type) {\n\tcase ast.App:\n\t\treturn ast.NewApp(\n\t\t\td.desugarMatchExpression(x.Function()),\n\t\t\td.desugarMatchExpression(x.Arguments()).(ast.Arguments),\n\t\t\tx.DebugInfo())\n\tcase ast.Arguments:\n\t\tps := make([]ast.PositionalArgument, 0, len(x.Positionals()))\n\n\t\tfor _, p := range x.Positionals() {\n\t\t\tps = append(ps, d.desugarMatchExpression(p).(ast.PositionalArgument))\n\t\t}\n\n\t\tks := make([]ast.KeywordArgument, 0, len(x.Keywords()))\n\n\t\tfor _, k := range x.Keywords() {\n\t\t\tks = append(ks, d.desugarMatchExpression(k).(ast.KeywordArgument))\n\t\t}\n\n\t\tdicts := make([]interface{}, 0, len(x.ExpandedDicts()))\n\n\t\tfor _, dict := range x.ExpandedDicts() {\n\t\t\tdicts = append(dicts, d.desugarMatchExpression(dict))\n\t\t}\n\n\t\treturn ast.NewArguments(ps, ks, dicts)\n\tcase ast.KeywordArgument:\n\t\treturn ast.NewKeywordArgument(x.Name(), d.desugarMatchExpression(x.Value()))\n\tcase ast.LetFunction:\n\t\tls := make([]interface{}, 0, len(x.Lets()))\n\n\t\tfor _, l := range x.Lets() {\n\t\t\tl := d.desugarMatchExpression(l)\n\t\t\tls = append(ls, append(d.lets, l)...)\n\t\t\td.lets = nil\n\t\t}\n\n\t\treturn ast.NewLetFunction(\n\t\t\tx.Name(),\n\t\t\tx.Signature(),\n\t\t\tls,\n\t\t\td.desugarMatchExpression(x.Body()),\n\t\t\tx.DebugInfo())\n\tcase ast.LetVar:\n\t\treturn ast.NewLetVar(x.Name(), d.desugarMatchExpression(x.Expr()))\n\tcase ast.Match:\n\t\treturn app(d.createMatchFunction(x.Cases()), d.desugarMatchExpression(x.Value()))\n\tcase ast.Output:\n\t\treturn ast.NewOutput(d.desugarMatchExpression(x.Expr()), x.Expanded())\n\tcase ast.PositionalArgument:\n\t\treturn ast.NewPositionalArgument(d.desugarMatchExpression(x.Value()), x.Expanded())\n\tdefault:\n\t\treturn x\n\t}\n}\n\nfunc (d *desugarer) letVar(v interface{}) string {\n\ts := gensym.GenSym(\"match\", \"intermediate\")\n\n\td.lets = append(d.lets, ast.NewLetVar(s, v))\n\n\treturn s\n}\n\nfunc (d *desugarer) createMatchFunction(cs []ast.Case) interface{} {\n\targ := gensym.GenSym(\"match\", \"argument\")\n\tbody := d.casesToBody(arg, cs)\n\n\tf := ast.NewLetFunction(\n\t\tgensym.GenSym(\"match\", \"function\"),\n\t\tast.NewSignature([]string{arg}, nil, \"\", nil, nil, \"\"),\n\t\td.lets,\n\t\tbody,\n\t\tdebug.NewGoInfo(0))\n\n\td.lets = []interface{}{f}\n\n\treturn f.Name()\n}\n\nfunc (d *desugarer) casesToBody(arg string, cs []ast.Case) interface{} {\n\tcs = renameBoundNamesInCases(cs)\n\tbody := app(\"error\", \"MatchError\", \"\\\"Failed to match a value with patterns.\\\"\")\n\n\tfor _, cs := range groupCases(cs) {\n\t\tresult, ok := d.matchCasesOfSamePatterns(arg, cs)\n\t\tbody = app(\"if\", ok, result, body)\n\t}\n\n\treturn body\n}\n\nfunc renameBoundNamesInCases(cs []ast.Case) []ast.Case {\n\tpanic(\"Not implemented\")\n}\n\nfunc app(f interface{}, args ...interface{}) interface{} {\n\treturn ast.NewPApp(f, args, debug.NewGoInfo(0))\n}\n\nfunc (d *desugarer) matchCasesOfSamePatterns(v string, cs []ast.Case) (interface{}, interface{}) {\n\t\/\/ TODO: Implement this function.\n\n\tswitch getPatternType(cs[0].Pattern()) {\n\tcase listPattern:\n\t\tpanic(\"Not implemented\")\n\tcase dictPattern:\n\t\tpanic(\"Not implemented\")\n\tcase scalarPattern:\n\t\tss := make([]interface{}, 0, 2*len(cs))\n\n\t\tfor _, c := range cs {\n\t\t\tss = append(ss, c.Pattern(), c.Value())\n\t\t}\n\n\t\tdict := d.letVar(app(\"dict\", ss...))\n\n\t\treturn app(dict, v), app(\"include\", dict, v)\n\tcase namePattern:\n\t\tpanic(\"Not implemented\")\n\t}\n\n\tpanic(fmt.Errorf(\"Invalid cases: %#v\", cs))\n}\n\nfunc matchType(v string, typ string) interface{} {\n\treturn app(\"=\", app(\"typeOf\", v), typ)\n}\n\nfunc groupCases(cs []ast.Case) map[patternType][]ast.Case {\n\tm := map[patternType][]ast.Case{}\n\n\tfor _, c := range cs {\n\t\tp := getPatternType(c.Pattern())\n\t\tm[p] = append(m[p], c)\n\t}\n\n\treturn m\n}\n\nfunc getPatternType(p interface{}) patternType {\n\tswitch x := p.(type) {\n\tcase string:\n\t\tif scalar.Defined(x) {\n\t\t\treturn scalarPattern\n\t\t}\n\n\t\treturn namePattern\n\tcase ast.App:\n\t\tswitch x.Function().(string) {\n\t\tcase \"$list\":\n\t\t\treturn listPattern\n\t\tcase \"$dict\":\n\t\t\treturn dictPattern\n\t\t}\n\t}\n\n\tpanic(fmt.Errorf(\"Invalid pattern: %#v\", p))\n}\n<commit_msg>Add mock of renameBoundNamesInCases<commit_after>package match\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/ast\"\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/debug\"\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/gensym\"\n\t\"github.com\/tisp-lang\/tisp\/src\/lib\/scalar\"\n)\n\ntype desugarer struct {\n\tlets []interface{}\n}\n\nfunc newDesugarer() *desugarer {\n\treturn &desugarer{nil}\n}\n\nfunc (d *desugarer) desugarMatchExpression(x interface{}) interface{} {\n\tswitch x := x.(type) {\n\tcase ast.App:\n\t\treturn ast.NewApp(\n\t\t\td.desugarMatchExpression(x.Function()),\n\t\t\td.desugarMatchExpression(x.Arguments()).(ast.Arguments),\n\t\t\tx.DebugInfo())\n\tcase ast.Arguments:\n\t\tps := make([]ast.PositionalArgument, 0, len(x.Positionals()))\n\n\t\tfor _, p := range x.Positionals() {\n\t\t\tps = append(ps, d.desugarMatchExpression(p).(ast.PositionalArgument))\n\t\t}\n\n\t\tks := make([]ast.KeywordArgument, 0, len(x.Keywords()))\n\n\t\tfor _, k := range x.Keywords() {\n\t\t\tks = append(ks, d.desugarMatchExpression(k).(ast.KeywordArgument))\n\t\t}\n\n\t\tdicts := make([]interface{}, 0, len(x.ExpandedDicts()))\n\n\t\tfor _, dict := range x.ExpandedDicts() {\n\t\t\tdicts = append(dicts, d.desugarMatchExpression(dict))\n\t\t}\n\n\t\treturn ast.NewArguments(ps, ks, dicts)\n\tcase ast.KeywordArgument:\n\t\treturn ast.NewKeywordArgument(x.Name(), d.desugarMatchExpression(x.Value()))\n\tcase ast.LetFunction:\n\t\tls := make([]interface{}, 0, len(x.Lets()))\n\n\t\tfor _, l := range x.Lets() {\n\t\t\tl := d.desugarMatchExpression(l)\n\t\t\tls = append(ls, append(d.lets, l)...)\n\t\t\td.lets = nil\n\t\t}\n\n\t\treturn ast.NewLetFunction(\n\t\t\tx.Name(),\n\t\t\tx.Signature(),\n\t\t\tls,\n\t\t\td.desugarMatchExpression(x.Body()),\n\t\t\tx.DebugInfo())\n\tcase ast.LetVar:\n\t\treturn ast.NewLetVar(x.Name(), d.desugarMatchExpression(x.Expr()))\n\tcase ast.Match:\n\t\treturn app(d.createMatchFunction(x.Cases()), d.desugarMatchExpression(x.Value()))\n\tcase ast.Output:\n\t\treturn ast.NewOutput(d.desugarMatchExpression(x.Expr()), x.Expanded())\n\tcase ast.PositionalArgument:\n\t\treturn ast.NewPositionalArgument(d.desugarMatchExpression(x.Value()), x.Expanded())\n\tdefault:\n\t\treturn x\n\t}\n}\n\nfunc (d *desugarer) letVar(v interface{}) string {\n\ts := gensym.GenSym(\"match\", \"intermediate\")\n\n\td.lets = append(d.lets, ast.NewLetVar(s, v))\n\n\treturn s\n}\n\nfunc (d *desugarer) createMatchFunction(cs []ast.Case) interface{} {\n\targ := gensym.GenSym(\"match\", \"argument\")\n\tbody := d.casesToBody(arg, cs)\n\n\tf := ast.NewLetFunction(\n\t\tgensym.GenSym(\"match\", \"function\"),\n\t\tast.NewSignature([]string{arg}, nil, \"\", nil, nil, \"\"),\n\t\td.lets,\n\t\tbody,\n\t\tdebug.NewGoInfo(0))\n\n\td.lets = []interface{}{f}\n\n\treturn f.Name()\n}\n\nfunc (d *desugarer) casesToBody(arg string, cs []ast.Case) interface{} {\n\tcs = renameBoundNamesInCases(cs)\n\tbody := app(\"error\", \"MatchError\", \"\\\"Failed to match a value with patterns.\\\"\")\n\n\tfor _, cs := range groupCases(cs) {\n\t\tresult, ok := d.matchCasesOfSamePatterns(arg, cs)\n\t\tbody = app(\"if\", ok, result, body)\n\t}\n\n\treturn body\n}\n\nfunc renameBoundNamesInCases(cs []ast.Case) []ast.Case {\n\t\/\/ TODO: Implement this function.\n\treturn cs\n}\n\nfunc app(f interface{}, args ...interface{}) interface{} {\n\treturn ast.NewPApp(f, args, debug.NewGoInfo(0))\n}\n\nfunc (d *desugarer) matchCasesOfSamePatterns(v string, cs []ast.Case) (interface{}, interface{}) {\n\tswitch getPatternType(cs[0].Pattern()) {\n\tcase listPattern:\n\t\tpanic(\"Not implemented\")\n\tcase dictPattern:\n\t\tpanic(\"Not implemented\")\n\tcase scalarPattern:\n\t\tss := make([]interface{}, 0, 2*len(cs))\n\n\t\tfor _, c := range cs {\n\t\t\tss = append(ss, c.Pattern(), c.Value())\n\t\t}\n\n\t\tdict := d.letVar(app(\"dict\", ss...))\n\n\t\treturn app(dict, v), app(\"include\", dict, v)\n\tcase namePattern:\n\t\tpanic(\"Not implemented\")\n\t}\n\n\tpanic(fmt.Errorf(\"Invalid cases: %#v\", cs))\n}\n\nfunc matchType(v string, typ string) interface{} {\n\treturn app(\"=\", app(\"typeOf\", v), typ)\n}\n\nfunc groupCases(cs []ast.Case) map[patternType][]ast.Case {\n\tm := map[patternType][]ast.Case{}\n\n\tfor _, c := range cs {\n\t\tp := getPatternType(c.Pattern())\n\t\tm[p] = append(m[p], c)\n\t}\n\n\treturn m\n}\n\nfunc getPatternType(p interface{}) patternType {\n\tswitch x := p.(type) {\n\tcase string:\n\t\tif scalar.Defined(x) {\n\t\t\treturn scalarPattern\n\t\t}\n\n\t\treturn namePattern\n\tcase ast.App:\n\t\tswitch x.Function().(string) {\n\t\tcase \"$list\":\n\t\t\treturn listPattern\n\t\tcase \"$dict\":\n\t\t\treturn dictPattern\n\t\t}\n\t}\n\n\tpanic(fmt.Errorf(\"Invalid pattern: %#v\", p))\n}\n<|endoftext|>"} {"text":"<commit_before>package flooding_nr\n\ntype position struct {\n\tx int\n\ty int\n}\n\nfunc Flood(terrain *[][]rune, x, y int) {\n\tsize := 0\n\tfor _, line := range *terrain {\n\t\tsize += len(line)\n\t}\n\n\tc := make(chan position, size)\n\n\tc <- position{x, y}\n\n\tfor len(c) > 0 {\n\t\tpos := <-c\n\n\t\tif (*terrain)[pos.y][pos.x] == '.' {\n\t\t\t(*terrain)[pos.y][pos.x] = 'X'\n\t\t\tif validatePos(terrain, pos.x+1, pos.y) {\n\t\t\t\tc <- position{pos.x + 1, pos.y}\n\t\t\t}\n\t\t\tif validatePos(terrain, pos.x-1, pos.y) {\n\t\t\t\tc <- position{pos.x - 1, pos.y}\n\t\t\t}\n\t\t\tif validatePos(terrain, pos.x, pos.y-1) {\n\t\t\t\tc <- position{pos.x, pos.y - 1}\n\t\t\t}\n\t\t\tif validatePos(terrain, pos.x, pos.y+1) {\n\t\t\t\tc <- position{pos.x, pos.y + 1}\n\t\t\t}\n\n\t\t}\n\t}\n}\n\nfunc validatePos(terrain *[][]rune, x, y int) bool {\n\tif y < 0 || y >= len(*terrain) || x < 0 || x >= len((*terrain)[y]) {\n\t\treturn false\n\t}\n\treturn (*terrain)[y][x] == '.'\n}\n<commit_msg>comment<commit_after>package flooding_nr\n\ntype position struct {\n\tx int\n\ty int\n}\n\nfunc Flood(terrain *[][]rune, x, y int) {\n\tsize := 0\n\tfor _, line := range *terrain {\n\t\tsize += len(line)\n\t}\n\n\t\/\/ TODO: use real FIFO\n\t\/\/ using a channel instead of a FIFO forces me to set the buffer\n\t\/\/ to the maximum possible number of items for it not to hang\n\tc := make(chan position, size)\n\n\tc <- position{x, y}\n\n\tfor len(c) > 0 {\n\t\tpos := <-c\n\n\t\tif (*terrain)[pos.y][pos.x] == '.' {\n\t\t\t(*terrain)[pos.y][pos.x] = 'X'\n\t\t\tif validatePos(terrain, pos.x+1, pos.y) {\n\t\t\t\tc <- position{pos.x + 1, pos.y}\n\t\t\t}\n\t\t\tif validatePos(terrain, pos.x-1, pos.y) {\n\t\t\t\tc <- position{pos.x - 1, pos.y}\n\t\t\t}\n\t\t\tif validatePos(terrain, pos.x, pos.y-1) {\n\t\t\t\tc <- position{pos.x, pos.y - 1}\n\t\t\t}\n\t\t\tif validatePos(terrain, pos.x, pos.y+1) {\n\t\t\t\tc <- position{pos.x, pos.y + 1}\n\t\t\t}\n\n\t\t}\n\t}\n}\n\nfunc validatePos(terrain *[][]rune, x, y int) bool {\n\tif y < 0 || y >= len(*terrain) || x < 0 || x >= len((*terrain)[y]) {\n\t\treturn false\n\t}\n\treturn (*terrain)[y][x] == '.'\n}\n<|endoftext|>"} {"text":"<commit_before>package fritz\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n)\n\n\/\/ Config stores client configuration of your FRITZ!Box\ntype Config struct {\n\tProtocol string `json:\"protocol\"`\n\tHost string `json:\"host\"`\n\tLoginURL string `json:\"loginURL\"`\n\tUsername string `json:\"username\"`\n\tPassword string `json:\"password\"`\n}\n\n\/\/ FromFile creates a new Config by reading from a file.\nfunc FromFile(filestr string) (*Config, error) {\n\tlog.Printf(\"Reading config from '%s'\", filestr)\n\tfile, errOpen := os.Open(filestr)\n\tif errOpen != nil {\n\t\treturn nil, errors.New(\"Cannot open configuration file '\" + filestr + \"'. Nested error is: \" + errOpen.Error())\n\t}\n\tdecoder := json.NewDecoder(file)\n\tconf := Config{}\n\terrDecode := decoder.Decode(&conf)\n\tif errDecode != nil {\n\t\treturn nil, errors.New(\"Unable to parse configuration file '\" + filestr + \"'. Nested error is: \" + errDecode.Error())\n\t}\n\treturn &conf, nil\n}\n\n\/\/ GetLoginURL returns the URL that is queried for the login challenge\nfunc (config *Config) GetLoginURL() string {\n\treturn fmt.Sprintf(\"%s:\/\/%s%s\", config.Protocol, config.Host, config.LoginURL)\n}\n\n\/\/ GetLoginResponseURL returns the URL that is queried for the login challenge\nfunc (config *Config) GetLoginResponseURL(response string) string {\n\treturn fmt.Sprintf(\"%s?response=%s&username=%s\", config.GetLoginURL(), response, config.Username)\n}\n<commit_msg>use fluent one-liner for decoding<commit_after>package fritz\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n)\n\n\/\/ Config stores client configuration of your FRITZ!Box\ntype Config struct {\n\tProtocol string `json:\"protocol\"`\n\tHost string `json:\"host\"`\n\tLoginURL string `json:\"loginURL\"`\n\tUsername string `json:\"username\"`\n\tPassword string `json:\"password\"`\n}\n\n\/\/ FromFile creates a new Config by reading from a file.\nfunc FromFile(filestr string) (*Config, error) {\n\tlog.Printf(\"Reading config from '%s'\", filestr)\n\tfile, errOpen := os.Open(filestr)\n\tif errOpen != nil {\n\t\treturn nil, errors.New(\"Cannot open configuration file '\" + filestr + \"'. Nested error is: \" + errOpen.Error())\n\t}\n\tconf := Config{}\n\terrDecode := json.NewDecoder(file).Decode(&conf)\n\tif errDecode != nil {\n\t\treturn nil, errors.New(\"Unable to parse configuration file '\" + filestr + \"'. Nested error is: \" + errDecode.Error())\n\t}\n\treturn &conf, nil\n}\n\n\/\/ GetLoginURL returns the URL that is queried for the login challenge\nfunc (config *Config) GetLoginURL() string {\n\treturn fmt.Sprintf(\"%s:\/\/%s%s\", config.Protocol, config.Host, config.LoginURL)\n}\n\n\/\/ GetLoginResponseURL returns the URL that is queried for the login challenge\nfunc (config *Config) GetLoginResponseURL(response string) string {\n\treturn fmt.Sprintf(\"%s?response=%s&username=%s\", config.GetLoginURL(), response, config.Username)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n *\n * k6 - a next-generation load testing tool\n * Copyright (C) 2016 Load Impact\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU Affero General Public License as\n * published by the Free Software Foundation, either version 3 of the\n * License, or (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU Affero General Public License for more details.\n *\n * You should have received a copy of the GNU Affero General Public License\n * along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n *\n *\/\n\npackage influxdb\n\nimport (\n\t\"context\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/influxdata\/influxdb\/client\/v2\"\n\t\"github.com\/loadimpact\/k6\/lib\"\n\t\"github.com\/loadimpact\/k6\/stats\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nconst (\n\tpushInterval = 1 * time.Second\n)\n\nvar _ lib.Collector = &Collector{}\n\ntype Collector struct {\n\tClient client.Client\n\tConfig Config\n\tBatchConf client.BatchPointsConfig\n\n\tbuffer []stats.Sample\n\tbufferLock sync.Mutex\n}\n\nfunc New(conf Config) (*Collector, error) {\n\tcl, err := MakeClient(conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbatchConf := MakeBatchConfig(conf)\n\treturn &Collector{\n\t\tClient: cl,\n\t\tConfig: conf,\n\t\tBatchConf: batchConf,\n\t}, nil\n}\n\nfunc (c *Collector) Init() error {\n\t\/\/ Try to create the database if it doesn't exist. Failure to do so is USUALLY harmless; it\n\t\/\/ usually means we're either a non-admin user to an existing DB or connecting over UDP.\n\t_, err := c.Client.Query(client.NewQuery(\"CREATE DATABASE \"+c.BatchConf.Database, \"\", \"\"))\n\tif err != nil {\n\t\tlog.WithError(err).Debug(\"InfluxDB: Couldn't create database; most likely harmless\")\n\t}\n\n\treturn nil\n}\n\nfunc (c *Collector) Run(ctx context.Context) {\n\tlog.Debug(\"InfluxDB: Running!\")\n\tticker := time.NewTicker(pushInterval)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tc.commit()\n\t\tcase <-ctx.Done():\n\t\t\tc.commit()\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *Collector) Collect(samples []stats.Sample) {\n\tc.bufferLock.Lock()\n\tc.buffer = append(c.buffer, samples...)\n\tc.bufferLock.Unlock()\n}\n\nfunc (c *Collector) Link() string {\n\treturn c.Config.Addr\n}\n\nfunc (c *Collector) commit() {\n\tc.bufferLock.Lock()\n\tsamples := c.buffer\n\tc.buffer = nil\n\tc.bufferLock.Unlock()\n\n\tlog.Debug(\"InfluxDB: Committing...\")\n\tbatch, err := client.NewBatchPoints(c.BatchConf)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"InfluxDB: Couldn't make a batch\")\n\t\treturn\n\t}\n\n\tfor _, sample := range samples {\n\t\ttags := sample.Tags.CloneTags() \/\/TODO: optimize when implementing https:\/\/github.com\/loadimpact\/k6\/issues\/569\n\t\tvalues := c.extractFields(tags, sample.Value)\n\t\tp, err := client.NewPoint(\n\t\t\tsample.Metric.Name,\n\t\t\ttags,\n\t\t\tvalues,\n\t\t\tsample.Time,\n\t\t)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"InfluxDB: Couldn't make point from sample!\")\n\t\t\treturn\n\t\t}\n\t\tbatch.AddPoint(p)\n\t}\n\n\tlog.WithField(\"points\", len(batch.Points())).Debug(\"InfluxDB: Writing...\")\n\tstartTime := time.Now()\n\tif err := c.Client.Write(batch); err != nil {\n\t\tlog.WithError(err).Error(\"InfluxDB: Couldn't write stats\")\n\t}\n\tt := time.Since(startTime)\n\tlog.WithField(\"t\", t).Debug(\"InfluxDB: Batch written!\")\n}\n\nfunc (c *Collector) extractFields(tags map[string]string, value interface{}) map[string]interface{} {\n\tfields := make(map[string]interface{})\n\tfields[\"value\"] = value\n\tfor _, tag := range c.Config.TagsAsFields {\n\t\tif val, ok := tags[tag]; ok {\n\t\t\tfields[tag] = val\n\t\t\tdelete(tags, tag)\n\t\t}\n\t}\n\treturn fields\n}\n\n\/\/ GetRequiredSystemTags returns which sample tags are needed by this collector\nfunc (c *Collector) GetRequiredSystemTags() lib.TagSet {\n\treturn lib.TagSet{} \/\/ There are no required tags for this collector\n}\n<commit_msg>Optimize the InfluxDB commit function by caching tags and values.<commit_after>\/*\n *\n * k6 - a next-generation load testing tool\n * Copyright (C) 2016 Load Impact\n *\n * This program is free software: you can redistribute it and\/or modify\n * it under the terms of the GNU Affero General Public License as\n * published by the Free Software Foundation, either version 3 of the\n * License, or (at your option) any later version.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU Affero General Public License for more details.\n *\n * You should have received a copy of the GNU Affero General Public License\n * along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n *\n *\/\n\npackage influxdb\n\nimport (\n\t\"context\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/influxdata\/influxdb\/client\/v2\"\n\t\"github.com\/loadimpact\/k6\/lib\"\n\t\"github.com\/loadimpact\/k6\/stats\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\nconst (\n\tpushInterval = 1 * time.Second\n)\n\nvar _ lib.Collector = &Collector{}\n\ntype Collector struct {\n\tClient client.Client\n\tConfig Config\n\tBatchConf client.BatchPointsConfig\n\n\tbuffer []stats.Sample\n\tbufferLock sync.Mutex\n}\n\nfunc New(conf Config) (*Collector, error) {\n\tcl, err := MakeClient(conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbatchConf := MakeBatchConfig(conf)\n\treturn &Collector{\n\t\tClient: cl,\n\t\tConfig: conf,\n\t\tBatchConf: batchConf,\n\t}, nil\n}\n\nfunc (c *Collector) Init() error {\n\t\/\/ Try to create the database if it doesn't exist. Failure to do so is USUALLY harmless; it\n\t\/\/ usually means we're either a non-admin user to an existing DB or connecting over UDP.\n\t_, err := c.Client.Query(client.NewQuery(\"CREATE DATABASE \"+c.BatchConf.Database, \"\", \"\"))\n\tif err != nil {\n\t\tlog.WithError(err).Debug(\"InfluxDB: Couldn't create database; most likely harmless\")\n\t}\n\n\treturn nil\n}\n\nfunc (c *Collector) Run(ctx context.Context) {\n\tlog.Debug(\"InfluxDB: Running!\")\n\tticker := time.NewTicker(pushInterval)\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tc.commit()\n\t\tcase <-ctx.Done():\n\t\t\tc.commit()\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *Collector) Collect(samples []stats.Sample) {\n\tc.bufferLock.Lock()\n\tc.buffer = append(c.buffer, samples...)\n\tc.bufferLock.Unlock()\n}\n\nfunc (c *Collector) Link() string {\n\treturn c.Config.Addr\n}\n\nfunc (c *Collector) commit() {\n\tc.bufferLock.Lock()\n\tsamples := c.buffer\n\tc.buffer = nil\n\tc.bufferLock.Unlock()\n\n\tlog.Debug(\"InfluxDB: Committing...\")\n\tbatch, err := client.NewBatchPoints(c.BatchConf)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"InfluxDB: Couldn't make a batch\")\n\t\treturn\n\t}\n\n\tcache := map[*stats.SampleTags]struct {\n\t\ttags map[string]string\n\t\tvalues map[string]interface{}\n\t}{}\n\n\tvar tags map[string]string\n\tvar values map[string]interface{}\n\tfor _, sample := range samples {\n\t\tif cached, ok := cache[sample.Tags]; ok {\n\t\t\ttags = cached.tags\n\t\t\tvalues = cached.values\n\t\t\tvalues[\"value\"] = sample.Value \/\/ Overwrite the \"value\" field\n\t\t} else {\n\t\t\ttags = sample.Tags.CloneTags()\n\t\t\tvalues = c.extractFields(tags)\n\t\t\tvalues[\"value\"] = sample.Value \/\/ Ok since the \"value\" field will always be overwritten\n\t\t\tcache[sample.Tags] = struct {\n\t\t\t\ttags map[string]string\n\t\t\t\tvalues map[string]interface{}\n\t\t\t}{sample.Tags.CloneTags(), values}\n\t\t}\n\t\tp, err := client.NewPoint(\n\t\t\tsample.Metric.Name,\n\t\t\ttags,\n\t\t\tvalues,\n\t\t\tsample.Time,\n\t\t)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Error(\"InfluxDB: Couldn't make point from sample!\")\n\t\t\treturn\n\t\t}\n\t\tbatch.AddPoint(p)\n\t}\n\n\tlog.WithField(\"points\", len(batch.Points())).Debug(\"InfluxDB: Writing...\")\n\tstartTime := time.Now()\n\tif err := c.Client.Write(batch); err != nil {\n\t\tlog.WithError(err).Error(\"InfluxDB: Couldn't write stats\")\n\t}\n\tt := time.Since(startTime)\n\tlog.WithField(\"t\", t).Debug(\"InfluxDB: Batch written!\")\n}\n\nfunc (c *Collector) extractFields(tags map[string]string) map[string]interface{} {\n\tfields := make(map[string]interface{})\n\tfor _, tag := range c.Config.TagsAsFields {\n\t\tif val, ok := tags[tag]; ok {\n\t\t\tfields[tag] = val\n\t\t\tdelete(tags, tag)\n\t\t}\n\t}\n\treturn fields\n}\n\n\/\/ GetRequiredSystemTags returns which sample tags are needed by this collector\nfunc (c *Collector) GetRequiredSystemTags() lib.TagSet {\n\treturn lib.TagSet{} \/\/ There are no required tags for this collector\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 go-swagger maintainers\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage middleware\n\nimport (\n\t\"net\/http\"\n\n\t\"github.com\/go-swagger\/go-swagger\/errors\"\n\t\"github.com\/go-swagger\/go-swagger\/httpkit\"\n\t\"github.com\/go-swagger\/go-swagger\/httpkit\/middleware\/untyped\"\n\t\"github.com\/go-swagger\/go-swagger\/spec\"\n\t\"github.com\/go-swagger\/go-swagger\/strfmt\"\n\t\"github.com\/golang\/gddo\/httputil\"\n\t\"github.com\/gorilla\/context\"\n)\n\n\/\/ RequestBinder is an interface for types to implement\n\/\/ when they want to be able to bind from a request\ntype RequestBinder interface {\n\tBindRequest(*http.Request, *MatchedRoute) error\n}\n\n\/\/ Responder is an interface for types to implement\n\/\/ when they want to be considered for writing HTTP responses\ntype Responder interface {\n\tWriteResponse(http.ResponseWriter, httpkit.Producer)\n}\n\n\/\/ Context is a type safe wrapper around an untyped request context\n\/\/ used throughout to store request context with the gorilla context module\ntype Context struct {\n\tspec *spec.Document\n\tapi RoutableAPI\n\trouter Router\n\tformats strfmt.Registry\n}\n\ntype routableUntypedAPI struct {\n\tapi *untyped.API\n\thandlers map[string]http.Handler\n\tdefaultConsumes string\n\tdefaultProduces string\n}\n\nfunc newRoutableUntypedAPI(spec *spec.Document, api *untyped.API, context *Context) *routableUntypedAPI {\n\tvar handlers map[string]http.Handler\n\tif spec == nil || api == nil {\n\t\treturn nil\n\t}\n\tfor _, hls := range spec.Operations() {\n\t\tfor _, op := range hls {\n\t\t\tschemes := spec.SecurityDefinitionsFor(op)\n\n\t\t\tif oh, ok := api.OperationHandlerFor(op.ID); ok {\n\t\t\t\tif handlers == nil {\n\t\t\t\t\thandlers = make(map[string]http.Handler)\n\t\t\t\t}\n\n\t\t\t\thandlers[op.ID] = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\t\t\/\/ lookup route info in the context\n\t\t\t\t\troute, _ := context.RouteInfo(r)\n\n\t\t\t\t\t\/\/ bind and validate the request using reflection\n\t\t\t\t\tbound, validation := context.BindAndValidate(r, route)\n\t\t\t\t\tif validation != nil {\n\t\t\t\t\t\tcontext.Respond(w, r, route.Produces, route, validation)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ actually handle the request\n\t\t\t\t\tresult, err := oh.Handle(bound)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\/\/ respond with failure\n\t\t\t\t\t\tcontext.Respond(w, r, route.Produces, route, err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ respond with success\n\t\t\t\t\tcontext.Respond(w, r, route.Produces, route, result)\n\t\t\t\t})\n\n\t\t\t\tif len(schemes) > 0 {\n\t\t\t\t\thandlers[op.ID] = newSecureAPI(context, handlers[op.ID])\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &routableUntypedAPI{\n\t\tapi: api,\n\t\thandlers: handlers,\n\t\tdefaultProduces: api.DefaultProduces,\n\t\tdefaultConsumes: api.DefaultConsumes,\n\t}\n}\n\nfunc (r *routableUntypedAPI) HandlerFor(operationID string) (http.Handler, bool) {\n\thandler, ok := r.handlers[operationID]\n\treturn handler, ok\n}\nfunc (r *routableUntypedAPI) ServeErrorFor(operationID string) func(http.ResponseWriter, *http.Request, error) {\n\treturn r.api.ServeError\n}\nfunc (r *routableUntypedAPI) ConsumersFor(mediaTypes []string) map[string]httpkit.Consumer {\n\treturn r.api.ConsumersFor(mediaTypes)\n}\nfunc (r *routableUntypedAPI) ProducersFor(mediaTypes []string) map[string]httpkit.Producer {\n\treturn r.api.ProducersFor(mediaTypes)\n}\nfunc (r *routableUntypedAPI) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]httpkit.Authenticator {\n\treturn r.api.AuthenticatorsFor(schemes)\n}\nfunc (r *routableUntypedAPI) Formats() strfmt.Registry {\n\treturn r.api.Formats()\n}\n\nfunc (r *routableUntypedAPI) DefaultProduces() string {\n\treturn r.defaultProduces\n}\n\nfunc (r *routableUntypedAPI) DefaultConsumes() string {\n\treturn r.defaultConsumes\n}\n\n\/\/ NewRoutableContext creates a new context for a routable API\nfunc NewRoutableContext(spec *spec.Document, routableAPI RoutableAPI, routes Router) *Context {\n\tctx := &Context{spec: spec, api: routableAPI}\n\treturn ctx\n}\n\n\/\/ NewContext creates a new context wrapper\nfunc NewContext(spec *spec.Document, api *untyped.API, routes Router) *Context {\n\tctx := &Context{spec: spec}\n\tctx.api = newRoutableUntypedAPI(spec, api, ctx)\n\treturn ctx\n}\n\n\/\/ Serve serves the specified spec with the specified api registrations as a http.Handler\nfunc Serve(spec *spec.Document, api *untyped.API) http.Handler {\n\tcontext := NewContext(spec, api, nil)\n\treturn context.APIHandler()\n}\n\ntype contextKey int8\n\nconst (\n\t_ contextKey = iota\n\tctxContentType\n\tctxResponseFormat\n\tctxMatchedRoute\n\tctxAllowedMethods\n\tctxBoundParams\n\tctxSecurityPrincipal\n\n\tctxConsumer\n)\n\ntype contentTypeValue struct {\n\tMediaType string\n\tCharset string\n}\n\n\/\/ BasePath returns the base path for this API\nfunc (c *Context) BasePath() string {\n\treturn c.spec.BasePath()\n}\n\n\/\/ RequiredProduces returns the accepted content types for responses\nfunc (c *Context) RequiredProduces() []string {\n\treturn c.spec.RequiredProduces()\n}\n\n\/\/ BindValidRequest binds a params object to a request but only when the request is valid\n\/\/ if the request is not valid an error will be returned\nfunc (c *Context) BindValidRequest(request *http.Request, route *MatchedRoute, binder RequestBinder) error {\n\tvar res []error\n\n\t\/\/ check and validate content type, select consumer\n\tif httpkit.CanHaveBody(request.Method) {\n\t\tct, _, err := httpkit.ContentType(request.Header)\n\t\tif err != nil {\n\t\t\tres = append(res, err)\n\t\t} else {\n\t\t\tif err := validateContentType(route.Consumes, ct); err != nil {\n\t\t\t\tres = append(res, err)\n\t\t\t}\n\t\t\troute.Consumer = route.Consumers[ct]\n\t\t}\n\t}\n\n\t\/\/ check and validate the response format\n\tif len(res) == 0 {\n\t\tif str := httputil.NegotiateContentType(request, route.Produces, \"\"); str == \"\" {\n\t\t\tres = append(res, errors.InvalidResponseFormat(request.Header.Get(httpkit.HeaderAccept), route.Produces))\n\t\t}\n\t}\n\n\t\/\/ now bind the request with the provided binder\n\t\/\/ it's assumed the binder will also validate the request and return an error if the\n\t\/\/ request is invalid\n\tif binder != nil && len(res) == 0 {\n\t\tif err := binder.BindRequest(request, route); err != nil {\n\t\t\tres = append(res, err)\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}\n\n\/\/ ContentType gets the parsed value of a content type\nfunc (c *Context) ContentType(request *http.Request) (string, string, *errors.ParseError) {\n\tif v, ok := context.GetOk(request, ctxContentType); ok {\n\t\tif val, ok := v.(*contentTypeValue); ok {\n\t\t\treturn val.MediaType, val.Charset, nil\n\t\t}\n\t}\n\n\tmt, cs, err := httpkit.ContentType(request.Header)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tcontext.Set(request, ctxContentType, &contentTypeValue{mt, cs})\n\treturn mt, cs, nil\n}\n\n\/\/ LookupRoute looks a route up and returns true when it is found\nfunc (c *Context) LookupRoute(request *http.Request) (*MatchedRoute, bool) {\n\tif route, ok := c.router.Lookup(request.Method, request.URL.Path); ok {\n\t\treturn route, ok\n\t}\n\treturn nil, false\n}\n\n\/\/ RouteInfo tries to match a route for this request\nfunc (c *Context) RouteInfo(request *http.Request) (*MatchedRoute, bool) {\n\tif v, ok := context.GetOk(request, ctxMatchedRoute); ok {\n\t\tif val, ok := v.(*MatchedRoute); ok {\n\t\t\treturn val, ok\n\t\t}\n\t}\n\n\tif route, ok := c.LookupRoute(request); ok {\n\t\tcontext.Set(request, ctxMatchedRoute, route)\n\t\treturn route, ok\n\t}\n\n\treturn nil, false\n}\n\n\/\/ ResponseFormat negotiates the response content type\nfunc (c *Context) ResponseFormat(r *http.Request, offers []string) string {\n\tif v, ok := context.GetOk(r, ctxResponseFormat); ok {\n\t\tif val, ok := v.(string); ok {\n\t\t\treturn val\n\t\t}\n\t}\n\n\tformat := httputil.NegotiateContentType(r, offers, \"\")\n\tif format != \"\" {\n\t\tcontext.Set(r, ctxResponseFormat, format)\n\t}\n\treturn format\n}\n\n\/\/ AllowedMethods gets the allowed methods for the path of this request\nfunc (c *Context) AllowedMethods(request *http.Request) []string {\n\treturn c.router.OtherMethods(request.Method, request.URL.Path)\n}\n\n\/\/ Authorize authorizes the request\nfunc (c *Context) Authorize(request *http.Request, route *MatchedRoute) (interface{}, error) {\n\tif len(route.Authenticators) == 0 {\n\t\treturn nil, nil\n\t}\n\tif v, ok := context.GetOk(request, ctxSecurityPrincipal); ok {\n\t\treturn v, nil\n\t}\n\n\tfor _, authenticator := range route.Authenticators {\n\t\tapplies, usr, err := authenticator.Authenticate(request)\n\t\tif !applies || err != nil || usr == nil {\n\t\t\tcontinue\n\t\t}\n\t\tcontext.Set(request, ctxSecurityPrincipal, usr)\n\t\treturn usr, nil\n\t}\n\n\treturn nil, errors.Unauthenticated(\"invalid credentials\")\n}\n\n\/\/ BindAndValidate binds and validates the request\nfunc (c *Context) BindAndValidate(request *http.Request, matched *MatchedRoute) (interface{}, error) {\n\tif v, ok := context.GetOk(request, ctxBoundParams); ok {\n\t\tif val, ok := v.(*validation); ok {\n\t\t\tif len(val.result) > 0 {\n\t\t\t\treturn val.bound, errors.CompositeValidationError(val.result...)\n\t\t\t}\n\t\t\treturn val.bound, nil\n\t\t}\n\t}\n\tresult := validateRequest(c, request, matched)\n\tif result != nil {\n\t\tcontext.Set(request, ctxBoundParams, result)\n\t}\n\tif len(result.result) > 0 {\n\t\treturn result.bound, errors.CompositeValidationError(result.result...)\n\t}\n\treturn result.bound, nil\n}\n\n\/\/ NotFound the default not found responder for when no route has been matched yet\nfunc (c *Context) NotFound(rw http.ResponseWriter, r *http.Request) {\n\tc.Respond(rw, r, []string{c.api.DefaultProduces()}, nil, errors.NotFound(\"not found\"))\n}\n\n\/\/ Respond renders the response after doing some content negotiation\nfunc (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []string, route *MatchedRoute, data interface{}) {\n\toffers := []string{c.api.DefaultProduces()}\n\tfor _, mt := range produces {\n\t\tif mt != c.api.DefaultProduces() {\n\t\t\toffers = append(offers, mt)\n\t\t}\n\t}\n\n\tformat := c.ResponseFormat(r, offers)\n\trw.Header().Set(httpkit.HeaderContentType, format)\n\n\tif resp, ok := data.(Responder); ok {\n\t\tproducers := route.Producers\n\t\tprod, ok := producers[format]\n\t\tif !ok {\n\t\t\tpanic(errors.New(http.StatusInternalServerError, \"can't find a producer for \"+format))\n\t\t}\n\t\tresp.WriteResponse(rw, prod)\n\t\treturn\n\t}\n\n\tif err, ok := data.(error); ok {\n\t\tif format == \"\" {\n\t\t\trw.Header().Set(httpkit.HeaderContentType, httpkit.JSONMime)\n\t\t}\n\t\tif route == nil || route.Operation == nil {\n\t\t\tc.api.ServeErrorFor(\"\")(rw, r, err)\n\t\t\treturn\n\t\t}\n\t\tc.api.ServeErrorFor(route.Operation.ID)(rw, r, err)\n\t\treturn\n\t}\n\n\tif route == nil || route.Operation == nil {\n\t\trw.WriteHeader(200)\n\t\tif r.Method == \"HEAD\" {\n\t\t\treturn\n\t\t}\n\t\tproducers := c.api.ProducersFor(offers)\n\t\tprod, ok := producers[format]\n\t\tif !ok {\n\t\t\tpanic(errors.New(http.StatusInternalServerError, \"can't find a producer for \"+format))\n\t\t}\n\t\tif err := prod.Produce(rw, data); err != nil {\n\t\t\tpanic(err) \/\/ let the recovery middleware deal with this\n\t\t}\n\t\treturn\n\t}\n\n\tif _, code, ok := route.Operation.SuccessResponse(); ok {\n\t\trw.WriteHeader(code)\n\t\tif code == 204 || r.Method == \"HEAD\" {\n\t\t\treturn\n\t\t}\n\n\t\tproducers := route.Producers\n\t\tprod, ok := producers[format]\n\t\tif !ok {\n\t\t\tpanic(errors.New(http.StatusInternalServerError, \"can't find a producer for \"+format))\n\t\t}\n\t\tif err := prod.Produce(rw, data); err != nil {\n\t\t\tpanic(err) \/\/ let the recovery middleware deal with this\n\t\t}\n\t\treturn\n\t}\n\n\tc.api.ServeErrorFor(route.Operation.ID)(rw, r, errors.New(http.StatusInternalServerError, \"can't produce response\"))\n}\n\n\/\/ APIHandler returns a handler to serve\nfunc (c *Context) APIHandler() http.Handler {\n\treturn specMiddleware(c, newRouter(c, newOperationExecutor(c)))\n}\n<commit_msg>fixes enum validation fail<commit_after>\/\/ Copyright 2015 go-swagger maintainers\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage middleware\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"github.com\/go-swagger\/go-swagger\/errors\"\n\t\"github.com\/go-swagger\/go-swagger\/httpkit\"\n\t\"github.com\/go-swagger\/go-swagger\/httpkit\/middleware\/untyped\"\n\t\"github.com\/go-swagger\/go-swagger\/spec\"\n\t\"github.com\/go-swagger\/go-swagger\/strfmt\"\n\t\"github.com\/golang\/gddo\/httputil\"\n\t\"github.com\/gorilla\/context\"\n)\n\n\/\/ RequestBinder is an interface for types to implement\n\/\/ when they want to be able to bind from a request\ntype RequestBinder interface {\n\tBindRequest(*http.Request, *MatchedRoute) error\n}\n\n\/\/ Responder is an interface for types to implement\n\/\/ when they want to be considered for writing HTTP responses\ntype Responder interface {\n\tWriteResponse(http.ResponseWriter, httpkit.Producer)\n}\n\n\/\/ Context is a type safe wrapper around an untyped request context\n\/\/ used throughout to store request context with the gorilla context module\ntype Context struct {\n\tspec *spec.Document\n\tapi RoutableAPI\n\trouter Router\n\tformats strfmt.Registry\n}\n\ntype routableUntypedAPI struct {\n\tapi *untyped.API\n\thandlers map[string]http.Handler\n\tdefaultConsumes string\n\tdefaultProduces string\n}\n\nfunc newRoutableUntypedAPI(spec *spec.Document, api *untyped.API, context *Context) *routableUntypedAPI {\n\tvar handlers map[string]http.Handler\n\tif spec == nil || api == nil {\n\t\treturn nil\n\t}\n\tfor _, hls := range spec.Operations() {\n\t\tfor _, op := range hls {\n\t\t\tschemes := spec.SecurityDefinitionsFor(op)\n\n\t\t\tif oh, ok := api.OperationHandlerFor(op.ID); ok {\n\t\t\t\tif handlers == nil {\n\t\t\t\t\thandlers = make(map[string]http.Handler)\n\t\t\t\t}\n\n\t\t\t\thandlers[op.ID] = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\t\t\/\/ lookup route info in the context\n\t\t\t\t\troute, _ := context.RouteInfo(r)\n\n\t\t\t\t\t\/\/ bind and validate the request using reflection\n\t\t\t\t\tbound, validation := context.BindAndValidate(r, route)\n\t\t\t\t\tif validation != nil {\n\t\t\t\t\t\tcontext.Respond(w, r, route.Produces, route, validation)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ actually handle the request\n\t\t\t\t\tresult, err := oh.Handle(bound)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\/\/ respond with failure\n\t\t\t\t\t\tcontext.Respond(w, r, route.Produces, route, err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ respond with success\n\t\t\t\t\tcontext.Respond(w, r, route.Produces, route, result)\n\t\t\t\t})\n\n\t\t\t\tif len(schemes) > 0 {\n\t\t\t\t\thandlers[op.ID] = newSecureAPI(context, handlers[op.ID])\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &routableUntypedAPI{\n\t\tapi: api,\n\t\thandlers: handlers,\n\t\tdefaultProduces: api.DefaultProduces,\n\t\tdefaultConsumes: api.DefaultConsumes,\n\t}\n}\n\nfunc (r *routableUntypedAPI) HandlerFor(operationID string) (http.Handler, bool) {\n\thandler, ok := r.handlers[operationID]\n\treturn handler, ok\n}\nfunc (r *routableUntypedAPI) ServeErrorFor(operationID string) func(http.ResponseWriter, *http.Request, error) {\n\treturn r.api.ServeError\n}\nfunc (r *routableUntypedAPI) ConsumersFor(mediaTypes []string) map[string]httpkit.Consumer {\n\treturn r.api.ConsumersFor(mediaTypes)\n}\nfunc (r *routableUntypedAPI) ProducersFor(mediaTypes []string) map[string]httpkit.Producer {\n\treturn r.api.ProducersFor(mediaTypes)\n}\nfunc (r *routableUntypedAPI) AuthenticatorsFor(schemes map[string]spec.SecurityScheme) map[string]httpkit.Authenticator {\n\treturn r.api.AuthenticatorsFor(schemes)\n}\nfunc (r *routableUntypedAPI) Formats() strfmt.Registry {\n\treturn r.api.Formats()\n}\n\nfunc (r *routableUntypedAPI) DefaultProduces() string {\n\treturn r.defaultProduces\n}\n\nfunc (r *routableUntypedAPI) DefaultConsumes() string {\n\treturn r.defaultConsumes\n}\n\n\/\/ NewRoutableContext creates a new context for a routable API\nfunc NewRoutableContext(spec *spec.Document, routableAPI RoutableAPI, routes Router) *Context {\n\tctx := &Context{spec: spec, api: routableAPI}\n\treturn ctx\n}\n\n\/\/ NewContext creates a new context wrapper\nfunc NewContext(spec *spec.Document, api *untyped.API, routes Router) *Context {\n\tctx := &Context{spec: spec}\n\tctx.api = newRoutableUntypedAPI(spec, api, ctx)\n\treturn ctx\n}\n\n\/\/ Serve serves the specified spec with the specified api registrations as a http.Handler\nfunc Serve(spec *spec.Document, api *untyped.API) http.Handler {\n\tcontext := NewContext(spec, api, nil)\n\treturn context.APIHandler()\n}\n\ntype contextKey int8\n\nconst (\n\t_ contextKey = iota\n\tctxContentType\n\tctxResponseFormat\n\tctxMatchedRoute\n\tctxAllowedMethods\n\tctxBoundParams\n\tctxSecurityPrincipal\n\n\tctxConsumer\n)\n\ntype contentTypeValue struct {\n\tMediaType string\n\tCharset string\n}\n\n\/\/ BasePath returns the base path for this API\nfunc (c *Context) BasePath() string {\n\treturn c.spec.BasePath()\n}\n\n\/\/ RequiredProduces returns the accepted content types for responses\nfunc (c *Context) RequiredProduces() []string {\n\treturn c.spec.RequiredProduces()\n}\n\n\/\/ BindValidRequest binds a params object to a request but only when the request is valid\n\/\/ if the request is not valid an error will be returned\nfunc (c *Context) BindValidRequest(request *http.Request, route *MatchedRoute, binder RequestBinder) error {\n\tvar res []error\n\n\t\/\/ check and validate content type, select consumer\n\tif httpkit.CanHaveBody(request.Method) {\n\t\tct, _, err := httpkit.ContentType(request.Header)\n\t\tif err != nil {\n\t\t\tres = append(res, err)\n\t\t} else {\n\t\t\tif err := validateContentType(route.Consumes, ct); err != nil {\n\t\t\t\tres = append(res, err)\n\t\t\t}\n\t\t\troute.Consumer = route.Consumers[ct]\n\t\t}\n\t}\n\n\t\/\/ check and validate the response format\n\tif len(res) == 0 {\n\t\tif str := httputil.NegotiateContentType(request, route.Produces, \"\"); str == \"\" {\n\t\t\tres = append(res, errors.InvalidResponseFormat(request.Header.Get(httpkit.HeaderAccept), route.Produces))\n\t\t}\n\t}\n\n\t\/\/ now bind the request with the provided binder\n\t\/\/ it's assumed the binder will also validate the request and return an error if the\n\t\/\/ request is invalid\n\tif binder != nil && len(res) == 0 {\n\t\tif err := binder.BindRequest(request, route); err != nil {\n\t\t\tres = append(res, err)\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}\n\n\/\/ ContentType gets the parsed value of a content type\nfunc (c *Context) ContentType(request *http.Request) (string, string, *errors.ParseError) {\n\tif v, ok := context.GetOk(request, ctxContentType); ok {\n\t\tif val, ok := v.(*contentTypeValue); ok {\n\t\t\treturn val.MediaType, val.Charset, nil\n\t\t}\n\t}\n\n\tmt, cs, err := httpkit.ContentType(request.Header)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tcontext.Set(request, ctxContentType, &contentTypeValue{mt, cs})\n\treturn mt, cs, nil\n}\n\n\/\/ LookupRoute looks a route up and returns true when it is found\nfunc (c *Context) LookupRoute(request *http.Request) (*MatchedRoute, bool) {\n\tif route, ok := c.router.Lookup(request.Method, request.URL.Path); ok {\n\t\treturn route, ok\n\t}\n\treturn nil, false\n}\n\n\/\/ RouteInfo tries to match a route for this request\nfunc (c *Context) RouteInfo(request *http.Request) (*MatchedRoute, bool) {\n\tif v, ok := context.GetOk(request, ctxMatchedRoute); ok {\n\t\tif val, ok := v.(*MatchedRoute); ok {\n\t\t\treturn val, ok\n\t\t}\n\t}\n\n\tif route, ok := c.LookupRoute(request); ok {\n\t\tcontext.Set(request, ctxMatchedRoute, route)\n\t\treturn route, ok\n\t}\n\n\treturn nil, false\n}\n\n\/\/ ResponseFormat negotiates the response content type\nfunc (c *Context) ResponseFormat(r *http.Request, offers []string) string {\n\tif v, ok := context.GetOk(r, ctxResponseFormat); ok {\n\t\tif val, ok := v.(string); ok {\n\t\t\treturn val\n\t\t}\n\t}\n\n\tformat := httputil.NegotiateContentType(r, offers, \"\")\n\tif format != \"\" {\n\t\tcontext.Set(r, ctxResponseFormat, format)\n\t}\n\treturn format\n}\n\n\/\/ AllowedMethods gets the allowed methods for the path of this request\nfunc (c *Context) AllowedMethods(request *http.Request) []string {\n\treturn c.router.OtherMethods(request.Method, request.URL.Path)\n}\n\n\/\/ Authorize authorizes the request\nfunc (c *Context) Authorize(request *http.Request, route *MatchedRoute) (interface{}, error) {\n\tif len(route.Authenticators) == 0 {\n\t\treturn nil, nil\n\t}\n\tif v, ok := context.GetOk(request, ctxSecurityPrincipal); ok {\n\t\treturn v, nil\n\t}\n\n\tfor _, authenticator := range route.Authenticators {\n\t\tapplies, usr, err := authenticator.Authenticate(request)\n\t\tif !applies || err != nil || usr == nil {\n\t\t\tcontinue\n\t\t}\n\t\tcontext.Set(request, ctxSecurityPrincipal, usr)\n\t\treturn usr, nil\n\t}\n\n\treturn nil, errors.Unauthenticated(\"invalid credentials\")\n}\n\n\/\/ BindAndValidate binds and validates the request\nfunc (c *Context) BindAndValidate(request *http.Request, matched *MatchedRoute) (interface{}, error) {\n\tif v, ok := context.GetOk(request, ctxBoundParams); ok {\n\t\tif val, ok := v.(*validation); ok {\n\t\t\tif len(val.result) > 0 {\n\t\t\t\treturn val.bound, errors.CompositeValidationError(val.result...)\n\t\t\t}\n\t\t\treturn val.bound, nil\n\t\t}\n\t}\n\tresult := validateRequest(c, request, matched)\n\tif result != nil {\n\t\tcontext.Set(request, ctxBoundParams, result)\n\t}\n\tif len(result.result) > 0 {\n\t\treturn result.bound, errors.CompositeValidationError(result.result...)\n\t}\n\treturn result.bound, nil\n}\n\n\/\/ NotFound the default not found responder for when no route has been matched yet\nfunc (c *Context) NotFound(rw http.ResponseWriter, r *http.Request) {\n\tc.Respond(rw, r, []string{c.api.DefaultProduces()}, nil, errors.NotFound(\"not found\"))\n}\n\n\/\/ Respond renders the response after doing some content negotiation\nfunc (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []string, route *MatchedRoute, data interface{}) {\n\toffers := []string{c.api.DefaultProduces()}\n\tfor _, mt := range produces {\n\t\tif mt != c.api.DefaultProduces() {\n\t\t\toffers = append(offers, mt)\n\t\t}\n\t}\n\n\tformat := c.ResponseFormat(r, offers)\n\trw.Header().Set(httpkit.HeaderContentType, format)\n\n\tif resp, ok := data.(Responder); ok {\n\t\tproducers := route.Producers\n\t\tprod, ok := producers[format]\n\t\tif !ok {\n\t\t\tpanic(errors.New(http.StatusInternalServerError, \"can't find a producer for \"+format))\n\t\t}\n\t\tresp.WriteResponse(rw, prod)\n\t\treturn\n\t}\n\n\tif err, ok := data.(error); ok {\n\t\tif format == \"\" {\n\t\t\trw.Header().Set(httpkit.HeaderContentType, httpkit.JSONMime)\n\t\t}\n\t\tif route == nil || route.Operation == nil {\n\t\t\tc.api.ServeErrorFor(\"\")(rw, r, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Println(\"ERROR:\", err)\n\t\tc.api.ServeErrorFor(route.Operation.ID)(rw, r, err)\n\t\treturn\n\t}\n\n\tif route == nil || route.Operation == nil {\n\t\trw.WriteHeader(200)\n\t\tif r.Method == \"HEAD\" {\n\t\t\treturn\n\t\t}\n\t\tproducers := c.api.ProducersFor(offers)\n\t\tprod, ok := producers[format]\n\t\tif !ok {\n\t\t\tpanic(errors.New(http.StatusInternalServerError, \"can't find a producer for \"+format))\n\t\t}\n\t\tif err := prod.Produce(rw, data); err != nil {\n\t\t\tpanic(err) \/\/ let the recovery middleware deal with this\n\t\t}\n\t\treturn\n\t}\n\n\tif _, code, ok := route.Operation.SuccessResponse(); ok {\n\t\trw.WriteHeader(code)\n\t\tif code == 204 || r.Method == \"HEAD\" {\n\t\t\treturn\n\t\t}\n\n\t\tproducers := route.Producers\n\t\tprod, ok := producers[format]\n\t\tif !ok {\n\t\t\tpanic(errors.New(http.StatusInternalServerError, \"can't find a producer for \"+format))\n\t\t}\n\t\tif err := prod.Produce(rw, data); err != nil {\n\t\t\tpanic(err) \/\/ let the recovery middleware deal with this\n\t\t}\n\t\treturn\n\t}\n\n\tc.api.ServeErrorFor(route.Operation.ID)(rw, r, errors.New(http.StatusInternalServerError, \"can't produce response\"))\n}\n\n\/\/ APIHandler returns a handler to serve\nfunc (c *Context) APIHandler() http.Handler {\n\treturn specMiddleware(c, newRouter(c, newOperationExecutor(c)))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Hugo Authors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage hugolib\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/gohugoio\/hugo\/output\"\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/gohugoio\/hugo\/resources\/page\"\n\t\"github.com\/gohugoio\/hugo\/resources\/page\/pagemeta\"\n)\n\ntype siteRenderContext struct {\n\tcfg *BuildCfg\n\n\t\/\/ Zero based index for all output formats combined.\n\tsitesOutIdx int\n\n\t\/\/ Zero based index of the output formats configured within a Site.\n\toutIdx int\n\n\tmultihost bool\n}\n\n\/\/ Whether to render 404.html, robotsTXT.txt which usually is rendered\n\/\/ once only in the site root.\nfunc (s siteRenderContext) renderSingletonPages() bool {\n\tif s.multihost {\n\t\t\/\/ 1 per site\n\t\treturn s.outIdx == 0\n\t}\n\n\t\/\/ 1 for all sites\n\treturn s.sitesOutIdx == 0\n\n}\n\n\/\/ renderPages renders pages each corresponding to a markdown file.\n\/\/ TODO(bep np doc\nfunc (s *Site) renderPages(ctx *siteRenderContext) error {\n\n\tresults := make(chan error)\n\tpages := make(chan *pageState)\n\terrs := make(chan error)\n\n\tgo s.errorCollator(results, errs)\n\n\tnumWorkers := getGoMaxProcs() * 4\n\n\twg := &sync.WaitGroup{}\n\n\tfor i := 0; i < numWorkers; i++ {\n\t\twg.Add(1)\n\t\tgo pageRenderer(ctx, s, pages, results, wg)\n\t}\n\n\tcfg := ctx.cfg\n\n\tif !cfg.PartialReRender && ctx.outIdx == 0 && len(s.headlessPages) > 0 {\n\t\twg.Add(1)\n\t\tgo headlessPagesPublisher(s, wg)\n\t}\n\nL:\n\tfor _, page := range s.workAllPages {\n\t\tif cfg.shouldRender(page) {\n\t\t\tselect {\n\t\t\tcase <-s.h.Done():\n\t\t\t\tbreak L\n\t\t\tdefault:\n\t\t\t\tpages <- page\n\t\t\t}\n\t\t}\n\t}\n\n\tclose(pages)\n\n\twg.Wait()\n\n\tclose(results)\n\n\terr := <-errs\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to render pages\")\n\t}\n\treturn nil\n}\n\nfunc headlessPagesPublisher(s *Site, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tfor _, p := range s.headlessPages {\n\t\tif err := p.renderResources(); err != nil {\n\t\t\ts.SendError(p.errorf(err, \"failed to render page resources\"))\n\t\t}\n\t}\n}\n\nfunc pageRenderer(\n\tctx *siteRenderContext,\n\ts *Site,\n\tpages <-chan *pageState,\n\tresults chan<- error,\n\twg *sync.WaitGroup) {\n\n\tdefer wg.Done()\n\n\tfor p := range pages {\n\t\tf := p.outputFormat()\n\n\t\t\/\/ TODO(bep) get rid of this odd construct. RSS is an output format.\n\t\tif f.Name == \"RSS\" && !s.isEnabled(kindRSS) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif ctx.outIdx == 0 {\n\t\t\tif err := p.renderResources(); err != nil {\n\t\t\t\ts.SendError(p.errorf(err, \"failed to render page resources\"))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tlayouts, err := p.getLayouts()\n\t\tif err != nil {\n\t\t\ts.Log.ERROR.Printf(\"Failed to resolve layout for output %q for page %q: %s\", f.Name, p, err)\n\t\t\tcontinue\n\t\t}\n\n\t\ttargetPath := p.targetPaths().TargetFilename\n\n\t\tif targetPath == \"\" {\n\t\t\ts.Log.ERROR.Printf(\"Failed to create target path for output %q for page %q: %s\", f.Name, p, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := s.renderAndWritePage(&s.PathSpec.ProcessingStats.Pages, \"page \"+p.Title(), targetPath, p, layouts...); err != nil {\n\t\t\tresults <- err\n\t\t}\n\n\t\tif p.paginator != nil && p.paginator.current != nil {\n\t\t\tif err := s.renderPaginator(p, layouts); err != nil {\n\t\t\t\tresults <- err\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ renderPaginator must be run after the owning Page has been rendered.\nfunc (s *Site) renderPaginator(p *pageState, layouts []string) error {\n\n\tpaginatePath := s.Cfg.GetString(\"paginatePath\")\n\n\td := p.targetPathDescriptor\n\tf := p.s.rc.Format\n\td.Type = f\n\n\t\/\/ Rewind\n\tp.paginator.current = p.paginator.current.First()\n\n\t\/\/ Write alias for page 1\n\td.Addends = fmt.Sprintf(\"\/%s\/%d\", paginatePath, 1)\n\ttargetPaths := page.CreateTargetPaths(d)\n\n\tif err := s.writeDestAlias(targetPaths.TargetFilename, p.Permalink(), f, nil); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Render pages for the rest\n\tfor current := p.paginator.current.Next(); current != nil; current = current.Next() {\n\n\t\tp.paginator.current = current\n\t\td.Addends = fmt.Sprintf(\"\/%s\/%d\", paginatePath, current.PageNumber())\n\t\ttargetPaths := page.CreateTargetPaths(d)\n\n\t\tif err := s.renderAndWritePage(\n\t\t\t&s.PathSpec.ProcessingStats.PaginatorPages,\n\t\t\tp.Title(),\n\t\t\ttargetPaths.TargetFilename, p, layouts...); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\treturn nil\n}\n\nfunc (s *Site) render404() error {\n\tif !s.isEnabled(kind404) {\n\t\treturn nil\n\t}\n\n\tp, err := newPageStandalone(&pageMeta{\n\t\ts: s,\n\t\tkind: kind404,\n\t\turlPaths: pagemeta.URLPath{\n\t\t\tURL: \"404.html\",\n\t\t},\n\t},\n\t\toutput.HTMLFormat,\n\t)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnfLayouts := []string{\"404.html\"}\n\n\ttargetPath := p.targetPaths().TargetFilename\n\n\tif targetPath == \"\" {\n\t\treturn errors.New(\"failed to create targetPath for 404 page\")\n\t}\n\n\treturn s.renderAndWritePage(&s.PathSpec.ProcessingStats.Pages, \"404 page\", targetPath, p, nfLayouts...)\n}\n\nfunc (s *Site) renderSitemap() error {\n\tif !s.isEnabled(kindSitemap) {\n\t\treturn nil\n\t}\n\n\tp, err := newPageStandalone(&pageMeta{\n\t\ts: s,\n\t\tkind: kindSitemap,\n\t\turlPaths: pagemeta.URLPath{\n\t\t\tURL: s.siteCfg.sitemap.Filename,\n\t\t}},\n\t\toutput.HTMLFormat,\n\t)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttargetPath := p.targetPaths().TargetFilename\n\n\tif targetPath == \"\" {\n\t\treturn errors.New(\"failed to create targetPath for sitemap\")\n\t}\n\n\tsmLayouts := []string{\"sitemap.xml\", \"_default\/sitemap.xml\", \"_internal\/_default\/sitemap.xml\"}\n\n\treturn s.renderAndWriteXML(&s.PathSpec.ProcessingStats.Sitemaps, \"sitemap\", targetPath, p, smLayouts...)\n}\n\nfunc (s *Site) renderRobotsTXT() error {\n\tif !s.isEnabled(kindRobotsTXT) {\n\t\treturn nil\n\t}\n\n\tif !s.Cfg.GetBool(\"enableRobotsTXT\") {\n\t\treturn nil\n\t}\n\n\tp, err := newPageStandalone(&pageMeta{\n\t\ts: s,\n\t\tkind: kindRobotsTXT,\n\t\turlPaths: pagemeta.URLPath{\n\t\t\tURL: \"robots.txt\",\n\t\t},\n\t},\n\t\toutput.RobotsTxtFormat)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trLayouts := []string{\"robots.txt\", \"_default\/robots.txt\", \"_internal\/_default\/robots.txt\"}\n\n\treturn s.renderAndWritePage(&s.PathSpec.ProcessingStats.Pages, \"Robots Txt\", p.targetPaths().TargetFilename, p, rLayouts...)\n\n}\n\n\/\/ renderAliases renders shell pages that simply have a redirect in the header.\nfunc (s *Site) renderAliases() error {\n\tfor _, p := range s.workAllPages {\n\n\t\tif len(p.Aliases()) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, of := range p.OutputFormats() {\n\t\t\tif !of.Format.IsHTML {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tplink := of.Permalink()\n\t\t\tf := of.Format\n\n\t\t\tfor _, a := range p.Aliases() {\n\t\t\t\tisRelative := !strings.HasPrefix(a, \"\/\")\n\n\t\t\t\tif isRelative {\n\t\t\t\t\t\/\/ Make alias relative, where \".\" will be on the\n\t\t\t\t\t\/\/ same directory level as the current page.\n\t\t\t\t\t\/\/ TODO(bep) ugly URLs doesn't seem to be supported in\n\t\t\t\t\t\/\/ aliases, I'm not sure why not.\n\t\t\t\t\tbasePath := of.RelPermalink()\n\t\t\t\t\tif strings.HasSuffix(basePath, \"\/\") {\n\t\t\t\t\t\tbasePath = path.Join(basePath, \"..\")\n\t\t\t\t\t}\n\t\t\t\t\ta = path.Join(basePath, a)\n\n\t\t\t\t} else if f.Path != \"\" {\n\t\t\t\t\t\/\/ Make sure AMP and similar doesn't clash with regular aliases.\n\t\t\t\t\ta = path.Join(f.Path, a)\n\t\t\t\t}\n\n\t\t\t\tlang := p.Language().Lang\n\n\t\t\t\tif s.h.multihost && !strings.HasPrefix(a, \"\/\"+lang) {\n\t\t\t\t\t\/\/ These need to be in its language root.\n\t\t\t\t\ta = path.Join(lang, a)\n\t\t\t\t}\n\n\t\t\t\tif err := s.writeDestAlias(a, plink, f, p); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ renderMainLanguageRedirect creates a redirect to the main language home,\n\/\/ depending on if it lives in sub folder (e.g. \/en) or not.\nfunc (s *Site) renderMainLanguageRedirect() error {\n\n\tif !s.h.multilingual.enabled() || s.h.IsMultihost() {\n\t\t\/\/ No need for a redirect\n\t\treturn nil\n\t}\n\n\thtml, found := s.outputFormatsConfig.GetByName(\"HTML\")\n\tif found {\n\t\tmainLang := s.h.multilingual.DefaultLang\n\t\tif s.Info.defaultContentLanguageInSubdir {\n\t\t\tmainLangURL := s.PathSpec.AbsURL(mainLang.Lang, false)\n\t\t\ts.Log.DEBUG.Printf(\"Write redirect to main language %s: %s\", mainLang, mainLangURL)\n\t\t\tif err := s.publishDestAlias(true, \"\/\", mainLangURL, html, nil); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tmainLangURL := s.PathSpec.AbsURL(\"\", false)\n\t\t\ts.Log.DEBUG.Printf(\"Write redirect to main language %s: %s\", mainLang, mainLangURL)\n\t\t\tif err := s.publishDestAlias(true, mainLang.Lang, mainLangURL, html, nil); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>hugolib: Buffer the render pages chan<commit_after>\/\/ Copyright 2019 The Hugo Authors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage hugolib\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/gohugoio\/hugo\/output\"\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/gohugoio\/hugo\/resources\/page\"\n\t\"github.com\/gohugoio\/hugo\/resources\/page\/pagemeta\"\n)\n\ntype siteRenderContext struct {\n\tcfg *BuildCfg\n\n\t\/\/ Zero based index for all output formats combined.\n\tsitesOutIdx int\n\n\t\/\/ Zero based index of the output formats configured within a Site.\n\toutIdx int\n\n\tmultihost bool\n}\n\n\/\/ Whether to render 404.html, robotsTXT.txt which usually is rendered\n\/\/ once only in the site root.\nfunc (s siteRenderContext) renderSingletonPages() bool {\n\tif s.multihost {\n\t\t\/\/ 1 per site\n\t\treturn s.outIdx == 0\n\t}\n\n\t\/\/ 1 for all sites\n\treturn s.sitesOutIdx == 0\n\n}\n\n\/\/ renderPages renders pages each corresponding to a markdown file.\n\/\/ TODO(bep np doc\nfunc (s *Site) renderPages(ctx *siteRenderContext) error {\n\n\tnumWorkers := getGoMaxProcs() * 4\n\n\tresults := make(chan error)\n\tpages := make(chan *pageState, numWorkers) \/\/ buffered for performance\n\terrs := make(chan error)\n\n\tgo s.errorCollator(results, errs)\n\n\twg := &sync.WaitGroup{}\n\n\tfor i := 0; i < numWorkers; i++ {\n\t\twg.Add(1)\n\t\tgo pageRenderer(ctx, s, pages, results, wg)\n\t}\n\n\tcfg := ctx.cfg\n\n\tif !cfg.PartialReRender && ctx.outIdx == 0 && len(s.headlessPages) > 0 {\n\t\twg.Add(1)\n\t\tgo headlessPagesPublisher(s, wg)\n\t}\n\nL:\n\tfor _, page := range s.workAllPages {\n\t\tif cfg.shouldRender(page) {\n\t\t\tselect {\n\t\t\tcase <-s.h.Done():\n\t\t\t\tbreak L\n\t\t\tdefault:\n\t\t\t\tpages <- page\n\t\t\t}\n\t\t}\n\t}\n\n\tclose(pages)\n\n\twg.Wait()\n\n\tclose(results)\n\n\terr := <-errs\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to render pages\")\n\t}\n\treturn nil\n}\n\nfunc headlessPagesPublisher(s *Site, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\tfor _, p := range s.headlessPages {\n\t\tif err := p.renderResources(); err != nil {\n\t\t\ts.SendError(p.errorf(err, \"failed to render page resources\"))\n\t\t}\n\t}\n}\n\nfunc pageRenderer(\n\tctx *siteRenderContext,\n\ts *Site,\n\tpages <-chan *pageState,\n\tresults chan<- error,\n\twg *sync.WaitGroup) {\n\n\tdefer wg.Done()\n\n\tfor p := range pages {\n\t\tf := p.outputFormat()\n\n\t\t\/\/ TODO(bep) get rid of this odd construct. RSS is an output format.\n\t\tif f.Name == \"RSS\" && !s.isEnabled(kindRSS) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif ctx.outIdx == 0 {\n\t\t\tif err := p.renderResources(); err != nil {\n\t\t\t\ts.SendError(p.errorf(err, \"failed to render page resources\"))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tlayouts, err := p.getLayouts()\n\t\tif err != nil {\n\t\t\ts.Log.ERROR.Printf(\"Failed to resolve layout for output %q for page %q: %s\", f.Name, p, err)\n\t\t\tcontinue\n\t\t}\n\n\t\ttargetPath := p.targetPaths().TargetFilename\n\n\t\tif targetPath == \"\" {\n\t\t\ts.Log.ERROR.Printf(\"Failed to create target path for output %q for page %q: %s\", f.Name, p, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := s.renderAndWritePage(&s.PathSpec.ProcessingStats.Pages, \"page \"+p.Title(), targetPath, p, layouts...); err != nil {\n\t\t\tresults <- err\n\t\t}\n\n\t\tif p.paginator != nil && p.paginator.current != nil {\n\t\t\tif err := s.renderPaginator(p, layouts); err != nil {\n\t\t\t\tresults <- err\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ renderPaginator must be run after the owning Page has been rendered.\nfunc (s *Site) renderPaginator(p *pageState, layouts []string) error {\n\n\tpaginatePath := s.Cfg.GetString(\"paginatePath\")\n\n\td := p.targetPathDescriptor\n\tf := p.s.rc.Format\n\td.Type = f\n\n\t\/\/ Rewind\n\tp.paginator.current = p.paginator.current.First()\n\n\t\/\/ Write alias for page 1\n\td.Addends = fmt.Sprintf(\"\/%s\/%d\", paginatePath, 1)\n\ttargetPaths := page.CreateTargetPaths(d)\n\n\tif err := s.writeDestAlias(targetPaths.TargetFilename, p.Permalink(), f, nil); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Render pages for the rest\n\tfor current := p.paginator.current.Next(); current != nil; current = current.Next() {\n\n\t\tp.paginator.current = current\n\t\td.Addends = fmt.Sprintf(\"\/%s\/%d\", paginatePath, current.PageNumber())\n\t\ttargetPaths := page.CreateTargetPaths(d)\n\n\t\tif err := s.renderAndWritePage(\n\t\t\t&s.PathSpec.ProcessingStats.PaginatorPages,\n\t\t\tp.Title(),\n\t\t\ttargetPaths.TargetFilename, p, layouts...); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\treturn nil\n}\n\nfunc (s *Site) render404() error {\n\tif !s.isEnabled(kind404) {\n\t\treturn nil\n\t}\n\n\tp, err := newPageStandalone(&pageMeta{\n\t\ts: s,\n\t\tkind: kind404,\n\t\turlPaths: pagemeta.URLPath{\n\t\t\tURL: \"404.html\",\n\t\t},\n\t},\n\t\toutput.HTMLFormat,\n\t)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnfLayouts := []string{\"404.html\"}\n\n\ttargetPath := p.targetPaths().TargetFilename\n\n\tif targetPath == \"\" {\n\t\treturn errors.New(\"failed to create targetPath for 404 page\")\n\t}\n\n\treturn s.renderAndWritePage(&s.PathSpec.ProcessingStats.Pages, \"404 page\", targetPath, p, nfLayouts...)\n}\n\nfunc (s *Site) renderSitemap() error {\n\tif !s.isEnabled(kindSitemap) {\n\t\treturn nil\n\t}\n\n\tp, err := newPageStandalone(&pageMeta{\n\t\ts: s,\n\t\tkind: kindSitemap,\n\t\turlPaths: pagemeta.URLPath{\n\t\t\tURL: s.siteCfg.sitemap.Filename,\n\t\t}},\n\t\toutput.HTMLFormat,\n\t)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttargetPath := p.targetPaths().TargetFilename\n\n\tif targetPath == \"\" {\n\t\treturn errors.New(\"failed to create targetPath for sitemap\")\n\t}\n\n\tsmLayouts := []string{\"sitemap.xml\", \"_default\/sitemap.xml\", \"_internal\/_default\/sitemap.xml\"}\n\n\treturn s.renderAndWriteXML(&s.PathSpec.ProcessingStats.Sitemaps, \"sitemap\", targetPath, p, smLayouts...)\n}\n\nfunc (s *Site) renderRobotsTXT() error {\n\tif !s.isEnabled(kindRobotsTXT) {\n\t\treturn nil\n\t}\n\n\tif !s.Cfg.GetBool(\"enableRobotsTXT\") {\n\t\treturn nil\n\t}\n\n\tp, err := newPageStandalone(&pageMeta{\n\t\ts: s,\n\t\tkind: kindRobotsTXT,\n\t\turlPaths: pagemeta.URLPath{\n\t\t\tURL: \"robots.txt\",\n\t\t},\n\t},\n\t\toutput.RobotsTxtFormat)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trLayouts := []string{\"robots.txt\", \"_default\/robots.txt\", \"_internal\/_default\/robots.txt\"}\n\n\treturn s.renderAndWritePage(&s.PathSpec.ProcessingStats.Pages, \"Robots Txt\", p.targetPaths().TargetFilename, p, rLayouts...)\n\n}\n\n\/\/ renderAliases renders shell pages that simply have a redirect in the header.\nfunc (s *Site) renderAliases() error {\n\tfor _, p := range s.workAllPages {\n\n\t\tif len(p.Aliases()) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, of := range p.OutputFormats() {\n\t\t\tif !of.Format.IsHTML {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tplink := of.Permalink()\n\t\t\tf := of.Format\n\n\t\t\tfor _, a := range p.Aliases() {\n\t\t\t\tisRelative := !strings.HasPrefix(a, \"\/\")\n\n\t\t\t\tif isRelative {\n\t\t\t\t\t\/\/ Make alias relative, where \".\" will be on the\n\t\t\t\t\t\/\/ same directory level as the current page.\n\t\t\t\t\t\/\/ TODO(bep) ugly URLs doesn't seem to be supported in\n\t\t\t\t\t\/\/ aliases, I'm not sure why not.\n\t\t\t\t\tbasePath := of.RelPermalink()\n\t\t\t\t\tif strings.HasSuffix(basePath, \"\/\") {\n\t\t\t\t\t\tbasePath = path.Join(basePath, \"..\")\n\t\t\t\t\t}\n\t\t\t\t\ta = path.Join(basePath, a)\n\n\t\t\t\t} else if f.Path != \"\" {\n\t\t\t\t\t\/\/ Make sure AMP and similar doesn't clash with regular aliases.\n\t\t\t\t\ta = path.Join(f.Path, a)\n\t\t\t\t}\n\n\t\t\t\tlang := p.Language().Lang\n\n\t\t\t\tif s.h.multihost && !strings.HasPrefix(a, \"\/\"+lang) {\n\t\t\t\t\t\/\/ These need to be in its language root.\n\t\t\t\t\ta = path.Join(lang, a)\n\t\t\t\t}\n\n\t\t\t\tif err := s.writeDestAlias(a, plink, f, p); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ renderMainLanguageRedirect creates a redirect to the main language home,\n\/\/ depending on if it lives in sub folder (e.g. \/en) or not.\nfunc (s *Site) renderMainLanguageRedirect() error {\n\n\tif !s.h.multilingual.enabled() || s.h.IsMultihost() {\n\t\t\/\/ No need for a redirect\n\t\treturn nil\n\t}\n\n\thtml, found := s.outputFormatsConfig.GetByName(\"HTML\")\n\tif found {\n\t\tmainLang := s.h.multilingual.DefaultLang\n\t\tif s.Info.defaultContentLanguageInSubdir {\n\t\t\tmainLangURL := s.PathSpec.AbsURL(mainLang.Lang, false)\n\t\t\ts.Log.DEBUG.Printf(\"Write redirect to main language %s: %s\", mainLang, mainLangURL)\n\t\t\tif err := s.publishDestAlias(true, \"\/\", mainLangURL, html, nil); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tmainLangURL := s.PathSpec.AbsURL(\"\", false)\n\t\t\ts.Log.DEBUG.Printf(\"Write redirect to main language %s: %s\", mainLang, mainLangURL)\n\t\t\tif err := s.publishDestAlias(true, mainLang.Lang, mainLangURL, html, nil); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Hugo Authors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage hugolib\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/spf13\/hugo\/helpers\"\n\n\t\"github.com\/spf13\/hugo\/output\"\n\n\tbp \"github.com\/spf13\/hugo\/bufferpool\"\n)\n\n\/\/ renderPages renders pages each corresponding to a markdown file.\n\/\/ TODO(bep np doc\nfunc (s *Site) renderPages() error {\n\n\tresults := make(chan error)\n\tpages := make(chan *Page)\n\terrs := make(chan error)\n\n\tgo errorCollator(results, errs)\n\n\tnumWorkers := getGoMaxProcs() * 4\n\n\twg := &sync.WaitGroup{}\n\n\tfor i := 0; i < numWorkers; i++ {\n\t\twg.Add(1)\n\t\tgo pageRenderer(s, pages, results, wg)\n\t}\n\n\tfor _, page := range s.Pages {\n\t\tpages <- page\n\t}\n\n\tclose(pages)\n\n\twg.Wait()\n\n\tclose(results)\n\n\terr := <-errs\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error(s) rendering pages: %s\", err)\n\t}\n\treturn nil\n}\n\nfunc pageRenderer(s *Site, pages <-chan *Page, results chan<- error, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\n\tfor page := range pages {\n\n\t\tfor i, outFormat := range page.outputFormats {\n\n\t\t\tvar (\n\t\t\t\tpageOutput *PageOutput\n\t\t\t\terr error\n\t\t\t)\n\n\t\t\tif i == 0 {\n\t\t\t\tpageOutput, err = newPageOutput(page, false, outFormat)\n\t\t\t\tpage.mainPageOutput = pageOutput\n\t\t\t}\n\n\t\t\tif outFormat != page.s.rc.Format {\n\t\t\t\t\/\/ Will be rendered ... later.\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif pageOutput == nil {\n\t\t\t\tpageOutput, err = page.mainPageOutput.copyWithFormat(outFormat)\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\ts.Log.ERROR.Printf(\"Failed to create output page for type %q for page %q: %s\", outFormat.Name, page, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar layouts []string\n\n\t\t\tif page.selfLayout != \"\" {\n\t\t\t\tlayouts = []string{page.selfLayout}\n\t\t\t} else {\n\t\t\t\tlayouts, err = s.layouts(pageOutput)\n\t\t\t\tif err != nil {\n\t\t\t\t\ts.Log.ERROR.Printf(\"Failed to resolve layout output %q for page %q: %s\", outFormat.Name, page, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tswitch pageOutput.outputFormat.Name {\n\n\t\t\tcase \"RSS\":\n\t\t\t\tif err := s.renderRSS(pageOutput); err != nil {\n\t\t\t\t\tresults <- err\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\ttargetPath, err := pageOutput.targetPath()\n\t\t\t\tif err != nil {\n\t\t\t\t\ts.Log.ERROR.Printf(\"Failed to create target path for output %q for page %q: %s\", outFormat.Name, page, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\ts.Log.DEBUG.Printf(\"Render %s to %q with layouts %q\", pageOutput.Kind, targetPath, layouts)\n\n\t\t\t\tif err := s.renderAndWritePage(\"page \"+pageOutput.FullFilePath(), targetPath, pageOutput, layouts...); err != nil {\n\t\t\t\t\tresults <- err\n\t\t\t\t}\n\n\t\t\t\tif pageOutput.IsNode() {\n\t\t\t\t\tif err := s.renderPaginator(pageOutput); err != nil {\n\t\t\t\t\t\tresults <- err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t}\n}\n\n\/\/ renderPaginator must be run after the owning Page has been rendered.\nfunc (s *Site) renderPaginator(p *PageOutput) error {\n\tif p.paginator != nil {\n\t\ts.Log.DEBUG.Printf(\"Render paginator for page %q\", p.Path())\n\t\tpaginatePath := s.Cfg.GetString(\"paginatePath\")\n\n\t\t\/\/ write alias for page 1\n\t\taddend := fmt.Sprintf(\"\/%s\/%d\", paginatePath, 1)\n\t\ttarget, err := p.createTargetPath(p.outputFormat, addend)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ TODO(bep) do better\n\t\tlink := newOutputFormat(p.Page, p.outputFormat).Permalink()\n\t\tif err := s.writeDestAlias(target, link, nil); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpagers := p.paginator.Pagers()\n\n\t\tfor i, pager := range pagers {\n\t\t\tif i == 0 {\n\t\t\t\t\/\/ already created\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tpagerNode, err := p.copy()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tpagerNode.origOnCopy = p.Page\n\n\t\t\tpagerNode.paginator = pager\n\t\t\tif pager.TotalPages() > 0 {\n\t\t\t\tfirst, _ := pager.page(0)\n\t\t\t\tpagerNode.Date = first.Date\n\t\t\t\tpagerNode.Lastmod = first.Lastmod\n\t\t\t}\n\n\t\t\tpageNumber := i + 1\n\t\t\taddend := fmt.Sprintf(\"\/%s\/%d\", paginatePath, pageNumber)\n\t\t\ttargetPath, _ := p.targetPath(addend)\n\t\t\tlayouts, err := p.layouts()\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := s.renderAndWritePage(\n\t\t\t\tpagerNode.Title,\n\t\t\t\ttargetPath, pagerNode, layouts...); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *Site) renderRSS(p *PageOutput) error {\n\n\tif !s.isEnabled(kindRSS) {\n\t\treturn nil\n\t}\n\n\tif s.Cfg.GetBool(\"disableRSS\") {\n\t\treturn nil\n\t}\n\n\tp.Kind = kindRSS\n\n\t\/\/ TODO(bep) we zero the date here to get the number of diffs down in\n\t\/\/ testing. But this should be set back later; the RSS feed should\n\t\/\/ inherit the publish date from the node it represents.\n\tif p.Kind == KindTaxonomy {\n\t\tvar zeroDate time.Time\n\t\tp.Date = zeroDate\n\t}\n\n\tlimit := s.Cfg.GetInt(\"rssLimit\")\n\tif limit >= 0 && len(p.Pages) > limit {\n\t\tp.Pages = p.Pages[:limit]\n\t\tp.Data[\"Pages\"] = p.Pages\n\t}\n\n\tlayouts, err := s.layoutHandler.For(\n\t\tp.layoutDescriptor,\n\t\t\"\",\n\t\tp.outputFormat)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttargetPath, err := p.targetPath()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn s.renderAndWriteXML(p.Title,\n\t\ttargetPath, p, layouts...)\n}\n\nfunc (s *Site) render404() error {\n\tif !s.isEnabled(kind404) {\n\t\treturn nil\n\t}\n\n\tif s.Cfg.GetBool(\"disable404\") {\n\t\treturn nil\n\t}\n\n\tif s.owner.multilingual.enabled() && (s.Language.Lang != s.owner.multilingual.DefaultLang.Lang) {\n\t\treturn nil\n\t}\n\n\tp := s.newNodePage(kind404)\n\n\tp.Title = \"404 Page not found\"\n\tp.Data[\"Pages\"] = s.Pages\n\tp.Pages = s.Pages\n\tp.URLPath.URL = \"404.html\"\n\n\tif err := p.initTargetPathDescriptor(); err != nil {\n\t\treturn err\n\t}\n\n\tnfLayouts := []string{\"404.html\"}\n\n\tpageOutput, err := newPageOutput(p, false, output.HTMLFormat)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn s.renderAndWritePage(\"404 page\", \"404.html\", pageOutput, s.appendThemeTemplates(nfLayouts)...)\n\n}\n\nfunc (s *Site) renderSitemap() error {\n\tif !s.isEnabled(kindSitemap) {\n\t\treturn nil\n\t}\n\n\tif s.Cfg.GetBool(\"disableSitemap\") {\n\t\treturn nil\n\t}\n\n\tsitemapDefault := parseSitemap(s.Cfg.GetStringMap(\"sitemap\"))\n\n\tn := s.newNodePage(kindSitemap)\n\n\t\/\/ Include all pages (regular, home page, taxonomies etc.)\n\tpages := s.Pages\n\n\tpage := s.newNodePage(kindSitemap)\n\tpage.URLPath.URL = \"\"\n\tif err := page.initTargetPathDescriptor(); err != nil {\n\t\treturn err\n\t}\n\tpage.Sitemap.ChangeFreq = sitemapDefault.ChangeFreq\n\tpage.Sitemap.Priority = sitemapDefault.Priority\n\tpage.Sitemap.Filename = sitemapDefault.Filename\n\n\tn.Data[\"Pages\"] = pages\n\tn.Pages = pages\n\n\t\/\/ TODO(bep) we have several of these\n\tif err := page.initTargetPathDescriptor(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ TODO(bep) this should be done somewhere else\n\tfor _, page := range pages {\n\t\tif page.Sitemap.ChangeFreq == \"\" {\n\t\t\tpage.Sitemap.ChangeFreq = sitemapDefault.ChangeFreq\n\t\t}\n\n\t\tif page.Sitemap.Priority == -1 {\n\t\t\tpage.Sitemap.Priority = sitemapDefault.Priority\n\t\t}\n\n\t\tif page.Sitemap.Filename == \"\" {\n\t\t\tpage.Sitemap.Filename = sitemapDefault.Filename\n\t\t}\n\t}\n\n\tsmLayouts := []string{\"sitemap.xml\", \"_default\/sitemap.xml\", \"_internal\/_default\/sitemap.xml\"}\n\taddLanguagePrefix := n.Site.IsMultiLingual()\n\n\treturn s.renderAndWriteXML(\"sitemap\",\n\t\tn.addLangPathPrefixIfFlagSet(page.Sitemap.Filename, addLanguagePrefix), n, s.appendThemeTemplates(smLayouts)...)\n}\n\nfunc (s *Site) renderRobotsTXT() error {\n\tif !s.isEnabled(kindRobotsTXT) {\n\t\treturn nil\n\t}\n\n\tif !s.Cfg.GetBool(\"enableRobotsTXT\") {\n\t\treturn nil\n\t}\n\n\tn := s.newNodePage(kindRobotsTXT)\n\tif err := n.initTargetPathDescriptor(); err != nil {\n\t\treturn err\n\t}\n\tn.Data[\"Pages\"] = s.Pages\n\tn.Pages = s.Pages\n\n\trLayouts := []string{\"robots.txt\", \"_default\/robots.txt\", \"_internal\/_default\/robots.txt\"}\n\toutBuffer := bp.GetBuffer()\n\tdefer bp.PutBuffer(outBuffer)\n\tif err := s.renderForLayouts(\"robots\", n, outBuffer, s.appendThemeTemplates(rLayouts)...); err != nil {\n\t\thelpers.DistinctWarnLog.Println(err)\n\t\treturn nil\n\t}\n\n\tif outBuffer.Len() == 0 {\n\t\treturn nil\n\t}\n\n\treturn s.publish(\"robots.txt\", outBuffer)\n}\n\n\/\/ renderAliases renders shell pages that simply have a redirect in the header.\nfunc (s *Site) renderAliases() error {\n\tfor _, p := range s.Pages {\n\t\tif len(p.Aliases) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, f := range p.outputFormats {\n\t\t\tif !f.IsHTML {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\to := newOutputFormat(p, f)\n\t\t\tplink := o.Permalink()\n\n\t\t\tfor _, a := range p.Aliases {\n\t\t\t\tif f.Path != \"\" {\n\t\t\t\t\t\/\/ Make sure AMP and similar doesn't clash with regular aliases.\n\t\t\t\t\ta = path.Join(a, f.Path)\n\t\t\t\t}\n\n\t\t\t\tif err := s.writeDestAlias(a, plink, p); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif s.owner.multilingual.enabled() {\n\t\tmainLang := s.owner.multilingual.DefaultLang\n\t\tif s.Info.defaultContentLanguageInSubdir {\n\t\t\tmainLangURL := s.PathSpec.AbsURL(mainLang.Lang, false)\n\t\t\ts.Log.DEBUG.Printf(\"Write redirect to main language %s: %s\", mainLang, mainLangURL)\n\t\t\tif err := s.publishDestAlias(true, \"\/\", mainLangURL, nil); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tmainLangURL := s.PathSpec.AbsURL(\"\", false)\n\t\t\ts.Log.DEBUG.Printf(\"Write redirect to main language %s: %s\", mainLang, mainLangURL)\n\t\t\tif err := s.publishDestAlias(true, mainLang.Lang, mainLangURL, nil); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>hugolib: Make the RSS feed use the date for the node it represents<commit_after>\/\/ Copyright 2016 The Hugo Authors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage hugolib\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"sync\"\n\n\t\"github.com\/spf13\/hugo\/helpers\"\n\n\t\"github.com\/spf13\/hugo\/output\"\n\n\tbp \"github.com\/spf13\/hugo\/bufferpool\"\n)\n\n\/\/ renderPages renders pages each corresponding to a markdown file.\n\/\/ TODO(bep np doc\nfunc (s *Site) renderPages() error {\n\n\tresults := make(chan error)\n\tpages := make(chan *Page)\n\terrs := make(chan error)\n\n\tgo errorCollator(results, errs)\n\n\tnumWorkers := getGoMaxProcs() * 4\n\n\twg := &sync.WaitGroup{}\n\n\tfor i := 0; i < numWorkers; i++ {\n\t\twg.Add(1)\n\t\tgo pageRenderer(s, pages, results, wg)\n\t}\n\n\tfor _, page := range s.Pages {\n\t\tpages <- page\n\t}\n\n\tclose(pages)\n\n\twg.Wait()\n\n\tclose(results)\n\n\terr := <-errs\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Error(s) rendering pages: %s\", err)\n\t}\n\treturn nil\n}\n\nfunc pageRenderer(s *Site, pages <-chan *Page, results chan<- error, wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\n\tfor page := range pages {\n\n\t\tfor i, outFormat := range page.outputFormats {\n\n\t\t\tvar (\n\t\t\t\tpageOutput *PageOutput\n\t\t\t\terr error\n\t\t\t)\n\n\t\t\tif i == 0 {\n\t\t\t\tpageOutput, err = newPageOutput(page, false, outFormat)\n\t\t\t\tpage.mainPageOutput = pageOutput\n\t\t\t}\n\n\t\t\tif outFormat != page.s.rc.Format {\n\t\t\t\t\/\/ Will be rendered ... later.\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif pageOutput == nil {\n\t\t\t\tpageOutput, err = page.mainPageOutput.copyWithFormat(outFormat)\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\ts.Log.ERROR.Printf(\"Failed to create output page for type %q for page %q: %s\", outFormat.Name, page, err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar layouts []string\n\n\t\t\tif page.selfLayout != \"\" {\n\t\t\t\tlayouts = []string{page.selfLayout}\n\t\t\t} else {\n\t\t\t\tlayouts, err = s.layouts(pageOutput)\n\t\t\t\tif err != nil {\n\t\t\t\t\ts.Log.ERROR.Printf(\"Failed to resolve layout output %q for page %q: %s\", outFormat.Name, page, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tswitch pageOutput.outputFormat.Name {\n\n\t\t\tcase \"RSS\":\n\t\t\t\tif err := s.renderRSS(pageOutput); err != nil {\n\t\t\t\t\tresults <- err\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\ttargetPath, err := pageOutput.targetPath()\n\t\t\t\tif err != nil {\n\t\t\t\t\ts.Log.ERROR.Printf(\"Failed to create target path for output %q for page %q: %s\", outFormat.Name, page, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\ts.Log.DEBUG.Printf(\"Render %s to %q with layouts %q\", pageOutput.Kind, targetPath, layouts)\n\n\t\t\t\tif err := s.renderAndWritePage(\"page \"+pageOutput.FullFilePath(), targetPath, pageOutput, layouts...); err != nil {\n\t\t\t\t\tresults <- err\n\t\t\t\t}\n\n\t\t\t\tif pageOutput.IsNode() {\n\t\t\t\t\tif err := s.renderPaginator(pageOutput); err != nil {\n\t\t\t\t\t\tresults <- err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\t}\n}\n\n\/\/ renderPaginator must be run after the owning Page has been rendered.\nfunc (s *Site) renderPaginator(p *PageOutput) error {\n\tif p.paginator != nil {\n\t\ts.Log.DEBUG.Printf(\"Render paginator for page %q\", p.Path())\n\t\tpaginatePath := s.Cfg.GetString(\"paginatePath\")\n\n\t\t\/\/ write alias for page 1\n\t\taddend := fmt.Sprintf(\"\/%s\/%d\", paginatePath, 1)\n\t\ttarget, err := p.createTargetPath(p.outputFormat, addend)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ TODO(bep) do better\n\t\tlink := newOutputFormat(p.Page, p.outputFormat).Permalink()\n\t\tif err := s.writeDestAlias(target, link, nil); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpagers := p.paginator.Pagers()\n\n\t\tfor i, pager := range pagers {\n\t\t\tif i == 0 {\n\t\t\t\t\/\/ already created\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tpagerNode, err := p.copy()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tpagerNode.origOnCopy = p.Page\n\n\t\t\tpagerNode.paginator = pager\n\t\t\tif pager.TotalPages() > 0 {\n\t\t\t\tfirst, _ := pager.page(0)\n\t\t\t\tpagerNode.Date = first.Date\n\t\t\t\tpagerNode.Lastmod = first.Lastmod\n\t\t\t}\n\n\t\t\tpageNumber := i + 1\n\t\t\taddend := fmt.Sprintf(\"\/%s\/%d\", paginatePath, pageNumber)\n\t\t\ttargetPath, _ := p.targetPath(addend)\n\t\t\tlayouts, err := p.layouts()\n\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := s.renderAndWritePage(\n\t\t\t\tpagerNode.Title,\n\t\t\t\ttargetPath, pagerNode, layouts...); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *Site) renderRSS(p *PageOutput) error {\n\n\tif !s.isEnabled(kindRSS) {\n\t\treturn nil\n\t}\n\n\tif s.Cfg.GetBool(\"disableRSS\") {\n\t\treturn nil\n\t}\n\n\tp.Kind = kindRSS\n\n\tlimit := s.Cfg.GetInt(\"rssLimit\")\n\tif limit >= 0 && len(p.Pages) > limit {\n\t\tp.Pages = p.Pages[:limit]\n\t\tp.Data[\"Pages\"] = p.Pages\n\t}\n\n\tlayouts, err := s.layoutHandler.For(\n\t\tp.layoutDescriptor,\n\t\t\"\",\n\t\tp.outputFormat)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttargetPath, err := p.targetPath()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn s.renderAndWriteXML(p.Title,\n\t\ttargetPath, p, layouts...)\n}\n\nfunc (s *Site) render404() error {\n\tif !s.isEnabled(kind404) {\n\t\treturn nil\n\t}\n\n\tif s.Cfg.GetBool(\"disable404\") {\n\t\treturn nil\n\t}\n\n\tif s.owner.multilingual.enabled() && (s.Language.Lang != s.owner.multilingual.DefaultLang.Lang) {\n\t\treturn nil\n\t}\n\n\tp := s.newNodePage(kind404)\n\n\tp.Title = \"404 Page not found\"\n\tp.Data[\"Pages\"] = s.Pages\n\tp.Pages = s.Pages\n\tp.URLPath.URL = \"404.html\"\n\n\tif err := p.initTargetPathDescriptor(); err != nil {\n\t\treturn err\n\t}\n\n\tnfLayouts := []string{\"404.html\"}\n\n\tpageOutput, err := newPageOutput(p, false, output.HTMLFormat)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn s.renderAndWritePage(\"404 page\", \"404.html\", pageOutput, s.appendThemeTemplates(nfLayouts)...)\n\n}\n\nfunc (s *Site) renderSitemap() error {\n\tif !s.isEnabled(kindSitemap) {\n\t\treturn nil\n\t}\n\n\tif s.Cfg.GetBool(\"disableSitemap\") {\n\t\treturn nil\n\t}\n\n\tsitemapDefault := parseSitemap(s.Cfg.GetStringMap(\"sitemap\"))\n\n\tn := s.newNodePage(kindSitemap)\n\n\t\/\/ Include all pages (regular, home page, taxonomies etc.)\n\tpages := s.Pages\n\n\tpage := s.newNodePage(kindSitemap)\n\tpage.URLPath.URL = \"\"\n\tif err := page.initTargetPathDescriptor(); err != nil {\n\t\treturn err\n\t}\n\tpage.Sitemap.ChangeFreq = sitemapDefault.ChangeFreq\n\tpage.Sitemap.Priority = sitemapDefault.Priority\n\tpage.Sitemap.Filename = sitemapDefault.Filename\n\n\tn.Data[\"Pages\"] = pages\n\tn.Pages = pages\n\n\t\/\/ TODO(bep) we have several of these\n\tif err := page.initTargetPathDescriptor(); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ TODO(bep) this should be done somewhere else\n\tfor _, page := range pages {\n\t\tif page.Sitemap.ChangeFreq == \"\" {\n\t\t\tpage.Sitemap.ChangeFreq = sitemapDefault.ChangeFreq\n\t\t}\n\n\t\tif page.Sitemap.Priority == -1 {\n\t\t\tpage.Sitemap.Priority = sitemapDefault.Priority\n\t\t}\n\n\t\tif page.Sitemap.Filename == \"\" {\n\t\t\tpage.Sitemap.Filename = sitemapDefault.Filename\n\t\t}\n\t}\n\n\tsmLayouts := []string{\"sitemap.xml\", \"_default\/sitemap.xml\", \"_internal\/_default\/sitemap.xml\"}\n\taddLanguagePrefix := n.Site.IsMultiLingual()\n\n\treturn s.renderAndWriteXML(\"sitemap\",\n\t\tn.addLangPathPrefixIfFlagSet(page.Sitemap.Filename, addLanguagePrefix), n, s.appendThemeTemplates(smLayouts)...)\n}\n\nfunc (s *Site) renderRobotsTXT() error {\n\tif !s.isEnabled(kindRobotsTXT) {\n\t\treturn nil\n\t}\n\n\tif !s.Cfg.GetBool(\"enableRobotsTXT\") {\n\t\treturn nil\n\t}\n\n\tn := s.newNodePage(kindRobotsTXT)\n\tif err := n.initTargetPathDescriptor(); err != nil {\n\t\treturn err\n\t}\n\tn.Data[\"Pages\"] = s.Pages\n\tn.Pages = s.Pages\n\n\trLayouts := []string{\"robots.txt\", \"_default\/robots.txt\", \"_internal\/_default\/robots.txt\"}\n\toutBuffer := bp.GetBuffer()\n\tdefer bp.PutBuffer(outBuffer)\n\tif err := s.renderForLayouts(\"robots\", n, outBuffer, s.appendThemeTemplates(rLayouts)...); err != nil {\n\t\thelpers.DistinctWarnLog.Println(err)\n\t\treturn nil\n\t}\n\n\tif outBuffer.Len() == 0 {\n\t\treturn nil\n\t}\n\n\treturn s.publish(\"robots.txt\", outBuffer)\n}\n\n\/\/ renderAliases renders shell pages that simply have a redirect in the header.\nfunc (s *Site) renderAliases() error {\n\tfor _, p := range s.Pages {\n\t\tif len(p.Aliases) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, f := range p.outputFormats {\n\t\t\tif !f.IsHTML {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\to := newOutputFormat(p, f)\n\t\t\tplink := o.Permalink()\n\n\t\t\tfor _, a := range p.Aliases {\n\t\t\t\tif f.Path != \"\" {\n\t\t\t\t\t\/\/ Make sure AMP and similar doesn't clash with regular aliases.\n\t\t\t\t\ta = path.Join(a, f.Path)\n\t\t\t\t}\n\n\t\t\t\tif err := s.writeDestAlias(a, plink, p); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif s.owner.multilingual.enabled() {\n\t\tmainLang := s.owner.multilingual.DefaultLang\n\t\tif s.Info.defaultContentLanguageInSubdir {\n\t\t\tmainLangURL := s.PathSpec.AbsURL(mainLang.Lang, false)\n\t\t\ts.Log.DEBUG.Printf(\"Write redirect to main language %s: %s\", mainLang, mainLangURL)\n\t\t\tif err := s.publishDestAlias(true, \"\/\", mainLangURL, nil); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tmainLangURL := s.PathSpec.AbsURL(\"\", false)\n\t\t\ts.Log.DEBUG.Printf(\"Write redirect to main language %s: %s\", mainLang, mainLangURL)\n\t\t\tif err := s.publishDestAlias(true, mainLang.Lang, mainLangURL, nil); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License. See the AUTHORS file\n\/\/ for names of contributors.\n\/\/\n\/\/ Author: Ben Darnell\n\npackage storage\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/cockroachdb\/cockroach\/client\"\n\t\"github.com\/cockroachdb\/cockroach\/config\"\n\t\"github.com\/cockroachdb\/cockroach\/gossip\"\n\t\"github.com\/cockroachdb\/cockroach\/keys\"\n\t\"github.com\/cockroachdb\/cockroach\/roachpb\"\n\t\"github.com\/cockroachdb\/cockroach\/util\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/log\"\n)\n\nconst (\n\t\/\/ replicaGCQueueMaxSize is the max size of the gc queue.\n\treplicaGCQueueMaxSize = 100\n\n\t\/\/ replicaGCQueueTimerDuration is the duration between GCs of queued replicas.\n\treplicaGCQueueTimerDuration = 10 * time.Second\n\n\t\/\/ ReplicaGCQueueInactivityThreshold is the inactivity duration after which\n\t\/\/ a range will be considered for garbage collection. Exported for testing.\n\tReplicaGCQueueInactivityThreshold = 10 * 24 * time.Hour \/\/ 10 days\n)\n\n\/\/ replicaGCQueue manages a queue of replicas to be considered for garbage\n\/\/ collections. The GC process asynchronously removes local data for\n\/\/ ranges that have been rebalanced away from this store.\ntype replicaGCQueue struct {\n\tbaseQueue\n\tdb *client.DB\n\tlocker sync.Locker\n}\n\n\/\/ newReplicaGCQueue returns a new instance of replicaGCQueue.\nfunc newReplicaGCQueue(db *client.DB, gossip *gossip.Gossip, locker sync.Locker) *replicaGCQueue {\n\tq := &replicaGCQueue{\n\t\tdb: db,\n\t\tlocker: locker,\n\t}\n\tq.baseQueue = makeBaseQueue(\"replicaGC\", q, gossip, replicaGCQueueMaxSize)\n\treturn q\n}\n\nfunc (*replicaGCQueue) needsLeaderLease() bool {\n\treturn false\n}\n\nfunc (*replicaGCQueue) acceptsUnsplitRanges() bool {\n\treturn true\n}\n\n\/\/ shouldQueue determines whether a replica should be queued for GC, and\n\/\/ if so at what priority. Replicas which have been inactive for longer\n\/\/ than ReplicaGCQueueInactivityThreshold are considered for possible GC\n\/\/ at equal priority.\nfunc (*replicaGCQueue) shouldQueue(now roachpb.Timestamp, rng *Replica,\n\t_ *config.SystemConfig) (bool, float64) {\n\n\tif l := rng.getLease(); l.Expiration.Add(ReplicaGCQueueInactivityThreshold.Nanoseconds(), 0).Less(now) {\n\t\treturn true, 0\n\t}\n\treturn false, 0\n}\n\n\/\/ process performs a consistent lookup on the range descriptor to see if we are\n\/\/ still a member of the range.\nfunc (q *replicaGCQueue) process(now roachpb.Timestamp, rng *Replica, _ *config.SystemConfig) error {\n\tdesc := rng.Desc()\n\n\t\/\/ Calls to RangeLookup typically use inconsistent reads, but we\n\t\/\/ want to do a consistent read here. This is important when we are\n\t\/\/ considering one of the metadata ranges: we must not do an\n\t\/\/ inconsistent lookup in our own copy of the range.\n\tb := &client.Batch{}\n\tb.InternalAddRequest(&roachpb.RangeLookupRequest{\n\t\tSpan: roachpb.Span{\n\t\t\tKey: keys.RangeMetaKey(desc.StartKey),\n\t\t},\n\t\tMaxRanges: 1,\n\t})\n\tbr, err := q.db.RunWithResponse(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\treply := br.Responses[0].GetInner().(*roachpb.RangeLookupResponse)\n\n\tif len(reply.Ranges) != 1 {\n\t\treturn util.Errorf(\"expected 1 range descriptor, got %d\", len(reply.Ranges))\n\t}\n\n\treplyDesc := reply.Ranges[0]\n\tcurrentMember := false\n\tif me := rng.GetReplica(); me != nil {\n\t\tfor _, rep := range replyDesc.Replicas {\n\t\t\tif rep.StoreID == me.StoreID {\n\t\t\t\tcurrentMember = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tif !currentMember {\n\t\t\/\/ We are no longer a member of this range; clean up our local data.\n\t\tif log.V(1) {\n\t\t\tlog.Infof(\"destroying local data from range %d\", desc.RangeID)\n\t\t}\n\t\tif err := rng.store.RemoveReplica(rng); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Lock the store to prevent a new replica of the range from being\n\t\t\/\/ added while we're deleting the previous one. We'd really like\n\t\t\/\/ to do this before calling RemoveReplica, but this could\n\t\t\/\/ deadlock with other work on the Store.processRaft goroutine.\n\t\t\/\/ Instead, we check after acquiring the lock to make sure the\n\t\t\/\/ range is still absent.\n\t\tq.locker.Lock()\n\t\tdefer q.locker.Unlock()\n\n\t\tif _, err := rng.store.GetReplica(desc.RangeID); err == nil {\n\t\t\tlog.Infof(\"replica recreated during deletion; aborting deletion\")\n\t\t\treturn nil\n\t\t}\n\n\t\tif err := rng.Destroy(); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if desc.RangeID != desc.RangeID {\n\t\t\/\/ If we get a different range ID back, then the range has been merged\n\t\t\/\/ away. But currentMember is true, so we are still a member of the\n\t\t\/\/ subsuming range. Shut down raft processing for the former range\n\t\t\/\/ and delete any remaining metadata, but do not delete the data.\n\t\tif log.V(1) {\n\t\t\tlog.Infof(\"removing merged range %d\", desc.RangeID)\n\t\t}\n\t\tif err := rng.store.RemoveReplica(rng); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ TODO(bdarnell): remove raft logs and other metadata (while leaving a\n\t\t\/\/ tombstone). Add tests for GC of merged ranges.\n\t} else {\n\t\t\/\/ This range is a current member of the raft group. Acquire the lease\n\t\t\/\/ to avoid processing this range again before the next inactivity threshold.\n\t\tif err := rng.requestLeaderLease(now); err != nil {\n\t\t\tif _, ok := err.(*roachpb.LeaseRejectedError); !ok {\n\t\t\t\tif log.V(1) {\n\t\t\t\t\tlog.Infof(\"unable to acquire lease from valid range %s: %s\", rng, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (*replicaGCQueue) timer() time.Duration {\n\treturn replicaGCQueueTimerDuration\n}\n<commit_msg>storage: remove branch<commit_after>\/\/ Copyright 2015 The Cockroach Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n\/\/ implied. See the License for the specific language governing\n\/\/ permissions and limitations under the License. See the AUTHORS file\n\/\/ for names of contributors.\n\/\/\n\/\/ Author: Ben Darnell\n\npackage storage\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/cockroachdb\/cockroach\/client\"\n\t\"github.com\/cockroachdb\/cockroach\/config\"\n\t\"github.com\/cockroachdb\/cockroach\/gossip\"\n\t\"github.com\/cockroachdb\/cockroach\/keys\"\n\t\"github.com\/cockroachdb\/cockroach\/roachpb\"\n\t\"github.com\/cockroachdb\/cockroach\/util\"\n\t\"github.com\/cockroachdb\/cockroach\/util\/log\"\n)\n\nconst (\n\t\/\/ replicaGCQueueMaxSize is the max size of the gc queue.\n\treplicaGCQueueMaxSize = 100\n\n\t\/\/ replicaGCQueueTimerDuration is the duration between GCs of queued replicas.\n\treplicaGCQueueTimerDuration = 10 * time.Second\n\n\t\/\/ ReplicaGCQueueInactivityThreshold is the inactivity duration after which\n\t\/\/ a range will be considered for garbage collection. Exported for testing.\n\tReplicaGCQueueInactivityThreshold = 10 * 24 * time.Hour \/\/ 10 days\n)\n\n\/\/ replicaGCQueue manages a queue of replicas to be considered for garbage\n\/\/ collections. The GC process asynchronously removes local data for\n\/\/ ranges that have been rebalanced away from this store.\ntype replicaGCQueue struct {\n\tbaseQueue\n\tdb *client.DB\n\tlocker sync.Locker\n}\n\n\/\/ newReplicaGCQueue returns a new instance of replicaGCQueue.\nfunc newReplicaGCQueue(db *client.DB, gossip *gossip.Gossip, locker sync.Locker) *replicaGCQueue {\n\tq := &replicaGCQueue{\n\t\tdb: db,\n\t\tlocker: locker,\n\t}\n\tq.baseQueue = makeBaseQueue(\"replicaGC\", q, gossip, replicaGCQueueMaxSize)\n\treturn q\n}\n\nfunc (*replicaGCQueue) needsLeaderLease() bool {\n\treturn false\n}\n\nfunc (*replicaGCQueue) acceptsUnsplitRanges() bool {\n\treturn true\n}\n\n\/\/ shouldQueue determines whether a replica should be queued for GC, and\n\/\/ if so at what priority. Replicas which have been inactive for longer\n\/\/ than ReplicaGCQueueInactivityThreshold are considered for possible GC\n\/\/ at equal priority.\nfunc (*replicaGCQueue) shouldQueue(now roachpb.Timestamp, rng *Replica,\n\t_ *config.SystemConfig) (bool, float64) {\n\n\treturn rng.getLease().Expiration.Add(\n\t\tReplicaGCQueueInactivityThreshold.Nanoseconds(), 0,\n\t).Less(now), 0\n}\n\n\/\/ process performs a consistent lookup on the range descriptor to see if we are\n\/\/ still a member of the range.\nfunc (q *replicaGCQueue) process(now roachpb.Timestamp, rng *Replica, _ *config.SystemConfig) error {\n\tdesc := rng.Desc()\n\n\t\/\/ Calls to RangeLookup typically use inconsistent reads, but we\n\t\/\/ want to do a consistent read here. This is important when we are\n\t\/\/ considering one of the metadata ranges: we must not do an\n\t\/\/ inconsistent lookup in our own copy of the range.\n\tb := &client.Batch{}\n\tb.InternalAddRequest(&roachpb.RangeLookupRequest{\n\t\tSpan: roachpb.Span{\n\t\t\tKey: keys.RangeMetaKey(desc.StartKey),\n\t\t},\n\t\tMaxRanges: 1,\n\t})\n\tbr, err := q.db.RunWithResponse(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\treply := br.Responses[0].GetInner().(*roachpb.RangeLookupResponse)\n\n\tif len(reply.Ranges) != 1 {\n\t\treturn util.Errorf(\"expected 1 range descriptor, got %d\", len(reply.Ranges))\n\t}\n\n\treplyDesc := reply.Ranges[0]\n\tcurrentMember := false\n\tif me := rng.GetReplica(); me != nil {\n\t\tfor _, rep := range replyDesc.Replicas {\n\t\t\tif rep.StoreID == me.StoreID {\n\t\t\t\tcurrentMember = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tif !currentMember {\n\t\t\/\/ We are no longer a member of this range; clean up our local data.\n\t\tif log.V(1) {\n\t\t\tlog.Infof(\"destroying local data from range %d\", desc.RangeID)\n\t\t}\n\t\tif err := rng.store.RemoveReplica(rng); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Lock the store to prevent a new replica of the range from being\n\t\t\/\/ added while we're deleting the previous one. We'd really like\n\t\t\/\/ to do this before calling RemoveReplica, but this could\n\t\t\/\/ deadlock with other work on the Store.processRaft goroutine.\n\t\t\/\/ Instead, we check after acquiring the lock to make sure the\n\t\t\/\/ range is still absent.\n\t\tq.locker.Lock()\n\t\tdefer q.locker.Unlock()\n\n\t\tif _, err := rng.store.GetReplica(desc.RangeID); err == nil {\n\t\t\tlog.Infof(\"replica recreated during deletion; aborting deletion\")\n\t\t\treturn nil\n\t\t}\n\n\t\tif err := rng.Destroy(); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else if desc.RangeID != desc.RangeID {\n\t\t\/\/ If we get a different range ID back, then the range has been merged\n\t\t\/\/ away. But currentMember is true, so we are still a member of the\n\t\t\/\/ subsuming range. Shut down raft processing for the former range\n\t\t\/\/ and delete any remaining metadata, but do not delete the data.\n\t\tif log.V(1) {\n\t\t\tlog.Infof(\"removing merged range %d\", desc.RangeID)\n\t\t}\n\t\tif err := rng.store.RemoveReplica(rng); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ TODO(bdarnell): remove raft logs and other metadata (while leaving a\n\t\t\/\/ tombstone). Add tests for GC of merged ranges.\n\t} else {\n\t\t\/\/ This range is a current member of the raft group. Acquire the lease\n\t\t\/\/ to avoid processing this range again before the next inactivity threshold.\n\t\tif err := rng.requestLeaderLease(now); err != nil {\n\t\t\tif _, ok := err.(*roachpb.LeaseRejectedError); !ok {\n\t\t\t\tif log.V(1) {\n\t\t\t\t\tlog.Infof(\"unable to acquire lease from valid range %s: %s\", rng, err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (*replicaGCQueue) timer() time.Duration {\n\treturn replicaGCQueueTimerDuration\n}\n<|endoftext|>"} {"text":"<commit_before>package requestHandler\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/zwirec\/TGChatScanner\/TGBotApi\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"log\"\n)\n\nfunc BotUpdateHanlder(w http.ResponseWriter, req *http.Request) {\n\tbody, err := ioutil.ReadAll(req.Body)\n\tlogger := req.Context().Value(loggerContextKey).(*log.Logger)\n\tif err != nil {\n\t\tlogger.Printf(\"Error during handling request on %s : %s\", req.URL.String(), err)\n\t\treturn\n\t}\n\n\tvar update TGBotApi.Update\n\terr = json.Unmarshal(body, &update)\n\ttoLog,_ := json.MarshalIndent(body,\"\",\" \")\n\tlogger.Printf(\"%+v\", toLog)\n\tif err != nil {\n\t\tlogger.Printf(\"Error during unmarshaling request on %s : %s\", req.URL.String(), err)\n\t\treturn\n\t}\n\tif pl := len(update.Message.Photo);pl != 0 {\n\t\tphoto := update.Message.Photo[pl - 1]\n\t\tctx := map[string]interface{}{}\n\t\tctx[\"From\"] = update.Message.From\n\t\tfb := &FileBasic{\n\t\t\tFileId:photo.FileId,\n\t\t\tType:\"photo\",\n\t\t\tContext:ctx,\n\t\t}\n\t\tappContext.DownloadRequests <- fb\n\t} else if update.Message.Entities[0].Type == \"bot_command\" {\n\t\tif err := AddSubsription(update.Message, appContext.Cache); err != nil {\n\t\t\tlogger.Println(err)\n\t\t\treturn\n\t\t}\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc AddSubsription(message TGBotApi.Message, cache *MemoryCache) error {\n\tr := regexp.MustCompile(`\\\/(startgroup|start)?\\s+(?P<token>[[:alnum:]]+)`)\n\tcommand := r.FindStringSubmatch(message.Text)\n\tif len(command) == 0 {\n\t\treturn fmt.Errorf(\"unexpected command %s\", message.Text)\n\t}\n\n\tuserKey := command[2]\n\tuserId, ok := cache.Get(userKey)\n\tif !ok {\n\t\treturn fmt.Errorf(\"user not found, key %s\", userKey)\n\t}\n\n\tchatId := message.From.Id\n\n\t\/\/TODO: store subscripton in database\n\treturn fmt.Errorf(\"New subscription: %s, $s\", userId, chatId)\n}\n<commit_msg>debug stuff removed<commit_after>package requestHandler\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/zwirec\/TGChatScanner\/TGBotApi\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"log\"\n)\n\nfunc BotUpdateHanlder(w http.ResponseWriter, req *http.Request) {\n\tbody, err := ioutil.ReadAll(req.Body)\n\tlogger := req.Context().Value(loggerContextKey).(*log.Logger)\n\tif err != nil {\n\t\tlogger.Printf(\"Error during handling request on %s : %s\", req.URL.String(), err)\n\t\treturn\n\t}\n\n\tvar update TGBotApi.Update\n\terr = json.Unmarshal(body, &update)\n\tif err != nil {\n\t\tlogger.Printf(\"Error during unmarshaling request on %s : %s\", req.URL.String(), err)\n\t\treturn\n\t}\n\tif pl := len(update.Message.Photo);pl != 0 {\n\t\tphoto := update.Message.Photo[pl - 1]\n\t\tctx := map[string]interface{}{}\n\t\tctx[\"From\"] = update.Message.From\n\t\tfb := &FileBasic{\n\t\t\tFileId:photo.FileId,\n\t\t\tType:\"photo\",\n\t\t\tContext:ctx,\n\t\t}\n\t\tappContext.DownloadRequests <- fb\n\t} else if update.Message.Entities[0].Type == \"bot_command\" {\n\t\tif err := AddSubsription(update.Message, appContext.Cache); err != nil {\n\t\t\tlogger.Println(err)\n\t\t\treturn\n\t\t}\n\t}\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc AddSubsription(message TGBotApi.Message, cache *MemoryCache) error {\n\tr := regexp.MustCompile(`\\\/(startgroup|start)?\\s+(?P<token>[[:alnum:]]+)`)\n\tcommand := r.FindStringSubmatch(message.Text)\n\tif len(command) == 0 {\n\t\treturn fmt.Errorf(\"unexpected command %s\", message.Text)\n\t}\n\n\tuserKey := command[2]\n\tuserId, ok := cache.Get(userKey)\n\tif !ok {\n\t\treturn fmt.Errorf(\"user not found, key %s\", userKey)\n\t}\n\n\tchatId := message.From.Id\n\n\t\/\/TODO: store subscripton in database\n\treturn fmt.Errorf(\"New subscription: %s, $s\", userId, chatId)\n}\n<|endoftext|>"} {"text":"<commit_before>package rest\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n)\n\n\/\/ TODO Future improvements:\n\/\/ * support %{strftime}t ?\n\/\/ * support %{<header>}o to print headers\n\n\/\/ AccessLogFormat defines the format of the access log record.\n\/\/ This implementation is a subset of Apache mod_log_config.\n\/\/ (See http:\/\/httpd.apache.org\/docs\/2.0\/mod\/mod_log_config.html)\n\/\/\n\/\/ %b content length in bytes, - if 0\n\/\/ %B content length in bytes\n\/\/ %D response elapsed time in microseconds\n\/\/ %h remote address\n\/\/ %H server protocol\n\/\/ %l identd logname, not supported, -\n\/\/ %m http method\n\/\/ %P process id\n\/\/ %q query string\n\/\/ %r first line of the request\n\/\/ %s status code\n\/\/ %S status code preceeded by a terminal color\n\/\/ %t time of the request\n\/\/ %T response elapsed time in seconds, 3 decimals\n\/\/ %u remote user, - if missing\n\/\/ %{User-Agent}i user agent, - if missing\n\/\/ %{Referer}i referer, - is missing\n\/\/\n\/\/ Some predefined formats are provided as contants.\ntype AccessLogFormat string\n\nconst (\n\t\/\/ CommonLogFormat is the Common Log Format (CLF).\n\tCommonLogFormat = \"%h %l %u %t \\\"%r\\\" %s %b\"\n\n\t\/\/ CombinedLogFormat is the NCSA extended\/combined log format.\n\tCombinedLogFormat = \"%h %l %u %t \\\"%r\\\" %s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\"\n\n\t\/\/ DefaultLogFormat is the default format, colored output and response time, convenient for development.\n\tDefaultLogFormat = \"%t %S\\033[0m \\033[36;1m%Dμs\\033[0m \\\"%r\\\" \\033[1;30m%u \\\"%{User-Agent}i\\\"\\033[0m\"\n)\n\n\/\/ AccessLogApacheMiddleware produces the access log following a format inspired by Apache\n\/\/ mod_log_config. It depends on TimerMiddleware and RecorderMiddleware that should be in the wrapped\n\/\/ middlewares. It also uses request.Env[\"REMOTE_USER\"].(string) set by the auth middlewares.\ntype AccessLogApacheMiddleware struct {\n\n\t\/\/ Logger points to the logger object used by this middleware, it defaults to\n\t\/\/ log.New(os.Stderr, \"\", 0).\n\tLogger *log.Logger\n\n\t\/\/ Format defines the format of the access log record. See AccessLogFormat for the details.\n\t\/\/ It defaults to DefaultLogFormat.\n\tFormat AccessLogFormat\n\n\ttextTemplate *template.Template\n}\n\n\/\/ MiddlewareFunc makes AccessLogApacheMiddleware implement the Middleware interface.\nfunc (mw *AccessLogApacheMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc {\n\n\t\/\/ set the default Logger\n\tif mw.Logger == nil {\n\t\tmw.Logger = log.New(os.Stderr, \"\", 0)\n\t}\n\n\t\/\/ set default format\n\tif mw.Format == \"\" {\n\t\tmw.Format = DefaultLogFormat\n\t}\n\n\tmw.convertFormat()\n\n\treturn func(w ResponseWriter, r *Request) {\n\n\t\t\/\/ call the handler\n\t\th(w, r)\n\n\t\tutil := &accessLogUtil{w, r}\n\n\t\tmw.Logger.Print(mw.executeTextTemplate(util))\n\t}\n}\n\nvar apacheAdapter = strings.NewReplacer(\n\t\"%b\", \"{{.BytesWritten | dashIf0}}\",\n\t\"%B\", \"{{.BytesWritten}}\",\n\t\"%D\", \"{{.ResponseTime | microseconds}}\",\n\t\"%h\", \"{{.ApacheRemoteAddr}}\",\n\t\"%H\", \"{{.R.Proto}}\",\n\t\"%l\", \"-\",\n\t\"%m\", \"{{.R.Method}}\",\n\t\"%P\", \"{{.Pid}}\",\n\t\"%q\", \"{{.ApacheQueryString}}\",\n\t\"%r\", \"{{.R.Method}} {{.R.URL.RequestURI}} {{.R.Proto}}\",\n\t\"%s\", \"{{.StatusCode}}\",\n\t\"%S\", \"\\033[{{.StatusCode | statusCodeColor}}m{{.StatusCode}}\",\n\t\"%t\", \"{{if .StartTime}}{{.StartTime.Format \\\"02\/Jan\/2006:15:04:05 -0700\\\"}}{{end}}\",\n\t\"%T\", \"{{if .ResponseTime}}{{.ResponseTime.Seconds | printf \\\"%.3f\\\"}}{{end}}\",\n\t\"%u\", \"{{.RemoteUser | dashIfEmptyStr}}\",\n\t\"%{User-Agent}i\", \"{{.R.UserAgent | dashIfEmptyStr}}\",\n\t\"%{Referer}i\", \"{{.R.Referer | dashIfEmptyStr}}\",\n)\n\n\/\/ Convert the Apache access log format into a text\/template\nfunc (mw *AccessLogApacheMiddleware) convertFormat() {\n\n\ttmplText := apacheAdapter.Replace(string(mw.Format))\n\n\tfuncMap := template.FuncMap{\n\t\t\"dashIfEmptyStr\": func(value string) string {\n\t\t\tif value == \"\" {\n\t\t\t\treturn \"-\"\n\t\t\t}\n\t\t\treturn value\n\t\t},\n\t\t\"dashIf0\": func(value int64) string {\n\t\t\tif value == 0 {\n\t\t\t\treturn \"-\"\n\t\t\t}\n\t\t\treturn fmt.Sprintf(\"%d\", value)\n\t\t},\n\t\t\"microseconds\": func(dur *time.Duration) string {\n\t\t\treturn fmt.Sprintf(\"%d\", dur.Nanoseconds()\/1000)\n\t\t},\n\t\t\"statusCodeColor\": func(statusCode int) string {\n\t\t\tif statusCode >= 400 && statusCode < 500 {\n\t\t\t\treturn \"1;33\"\n\t\t\t} else if statusCode >= 500 {\n\t\t\t\treturn \"0;31\"\n\t\t\t}\n\t\t\treturn \"0;32\"\n\t\t},\n\t}\n\n\tvar err error\n\tmw.textTemplate, err = template.New(\"accessLog\").Funcs(funcMap).Parse(tmplText)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/ Execute the text template with the data derived from the request, and return a string.\nfunc (mw *AccessLogApacheMiddleware) executeTextTemplate(util *accessLogUtil) string {\n\tbuf := bytes.NewBufferString(\"\")\n\terr := mw.textTemplate.Execute(buf, util)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn buf.String()\n}\n\n\/\/ accessLogUtil provides a collection of utility functions that devrive data from the Request object.\n\/\/ This object is used to provide data to the Apache Style template and the the JSON log record.\ntype accessLogUtil struct {\n\tW ResponseWriter\n\tR *Request\n}\n\n\/\/ As stored by the auth middlewares.\nfunc (u *accessLogUtil) RemoteUser() string {\n\tif u.R.Env[\"REMOTE_USER\"] != nil {\n\t\treturn u.R.Env[\"REMOTE_USER\"].(string)\n\t}\n\treturn \"\"\n}\n\n\/\/ If qs exists then return it with a leadin \"?\", apache log style.\nfunc (u *accessLogUtil) ApacheQueryString() string {\n\tif u.R.URL.RawQuery != \"\" {\n\t\treturn \"?\" + u.R.URL.RawQuery\n\t}\n\treturn \"\"\n}\n\n\/\/ When the request entered the timer middleware.\nfunc (u *accessLogUtil) StartTime() *time.Time {\n\tif u.R.Env[\"START_TIME\"] != nil {\n\t\treturn u.R.Env[\"START_TIME\"].(*time.Time)\n\t}\n\treturn nil\n}\n\n\/\/ If remoteAddr is set then return is without the port number, apache log style.\nfunc (u *accessLogUtil) ApacheRemoteAddr() string {\n\tremoteAddr := u.R.RemoteAddr\n\tif remoteAddr != \"\" {\n\t\tif ip, _, err := net.SplitHostPort(remoteAddr); err == nil {\n\t\t\treturn ip\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ As recorded by the recorder middleware.\nfunc (u *accessLogUtil) StatusCode() int {\n\tif u.R.Env[\"STATUS_CODE\"] != nil {\n\t\treturn u.R.Env[\"STATUS_CODE\"].(int)\n\t}\n\treturn 0\n}\n\n\/\/ As mesured by the timer middleware.\nfunc (u *accessLogUtil) ResponseTime() *time.Duration {\n\tif u.R.Env[\"ELAPSED_TIME\"] != nil {\n\t\treturn u.R.Env[\"ELAPSED_TIME\"].(*time.Duration)\n\t}\n\treturn nil\n}\n\n\/\/ Process id.\nfunc (u *accessLogUtil) Pid() int {\n\treturn os.Getpid()\n}\n\n\/\/ As recorded by the recorder middleware.\nfunc (u *accessLogUtil) BytesWritten() int64 {\n\tif u.R.Env[\"BYTES_WRITTEN\"] != nil {\n\t\treturn u.R.Env[\"BYTES_WRITTEN\"].(int64)\n\t}\n\treturn 0\n}\n<commit_msg>Fix Apache microsecond logging<commit_after>package rest\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n)\n\n\/\/ TODO Future improvements:\n\/\/ * support %{strftime}t ?\n\/\/ * support %{<header>}o to print headers\n\n\/\/ AccessLogFormat defines the format of the access log record.\n\/\/ This implementation is a subset of Apache mod_log_config.\n\/\/ (See http:\/\/httpd.apache.org\/docs\/2.0\/mod\/mod_log_config.html)\n\/\/\n\/\/ %b content length in bytes, - if 0\n\/\/ %B content length in bytes\n\/\/ %D response elapsed time in microseconds\n\/\/ %h remote address\n\/\/ %H server protocol\n\/\/ %l identd logname, not supported, -\n\/\/ %m http method\n\/\/ %P process id\n\/\/ %q query string\n\/\/ %r first line of the request\n\/\/ %s status code\n\/\/ %S status code preceeded by a terminal color\n\/\/ %t time of the request\n\/\/ %T response elapsed time in seconds, 3 decimals\n\/\/ %u remote user, - if missing\n\/\/ %{User-Agent}i user agent, - if missing\n\/\/ %{Referer}i referer, - is missing\n\/\/\n\/\/ Some predefined formats are provided as contants.\ntype AccessLogFormat string\n\nconst (\n\t\/\/ CommonLogFormat is the Common Log Format (CLF).\n\tCommonLogFormat = \"%h %l %u %t \\\"%r\\\" %s %b\"\n\n\t\/\/ CombinedLogFormat is the NCSA extended\/combined log format.\n\tCombinedLogFormat = \"%h %l %u %t \\\"%r\\\" %s %b \\\"%{Referer}i\\\" \\\"%{User-Agent}i\\\"\"\n\n\t\/\/ DefaultLogFormat is the default format, colored output and response time, convenient for development.\n\tDefaultLogFormat = \"%t %S\\033[0m \\033[36;1m%Dμs\\033[0m \\\"%r\\\" \\033[1;30m%u \\\"%{User-Agent}i\\\"\\033[0m\"\n)\n\n\/\/ AccessLogApacheMiddleware produces the access log following a format inspired by Apache\n\/\/ mod_log_config. It depends on TimerMiddleware and RecorderMiddleware that should be in the wrapped\n\/\/ middlewares. It also uses request.Env[\"REMOTE_USER\"].(string) set by the auth middlewares.\ntype AccessLogApacheMiddleware struct {\n\n\t\/\/ Logger points to the logger object used by this middleware, it defaults to\n\t\/\/ log.New(os.Stderr, \"\", 0).\n\tLogger *log.Logger\n\n\t\/\/ Format defines the format of the access log record. See AccessLogFormat for the details.\n\t\/\/ It defaults to DefaultLogFormat.\n\tFormat AccessLogFormat\n\n\ttextTemplate *template.Template\n}\n\n\/\/ MiddlewareFunc makes AccessLogApacheMiddleware implement the Middleware interface.\nfunc (mw *AccessLogApacheMiddleware) MiddlewareFunc(h HandlerFunc) HandlerFunc {\n\n\t\/\/ set the default Logger\n\tif mw.Logger == nil {\n\t\tmw.Logger = log.New(os.Stderr, \"\", 0)\n\t}\n\n\t\/\/ set default format\n\tif mw.Format == \"\" {\n\t\tmw.Format = DefaultLogFormat\n\t}\n\n\tmw.convertFormat()\n\n\treturn func(w ResponseWriter, r *Request) {\n\n\t\t\/\/ call the handler\n\t\th(w, r)\n\n\t\tutil := &accessLogUtil{w, r}\n\n\t\tmw.Logger.Print(mw.executeTextTemplate(util))\n\t}\n}\n\nvar apacheAdapter = strings.NewReplacer(\n\t\"%b\", \"{{.BytesWritten | dashIf0}}\",\n\t\"%B\", \"{{.BytesWritten}}\",\n\t\"%D\", \"{{.ResponseTime | microseconds}}\",\n\t\"%h\", \"{{.ApacheRemoteAddr}}\",\n\t\"%H\", \"{{.R.Proto}}\",\n\t\"%l\", \"-\",\n\t\"%m\", \"{{.R.Method}}\",\n\t\"%P\", \"{{.Pid}}\",\n\t\"%q\", \"{{.ApacheQueryString}}\",\n\t\"%r\", \"{{.R.Method}} {{.R.URL.RequestURI}} {{.R.Proto}}\",\n\t\"%s\", \"{{.StatusCode}}\",\n\t\"%S\", \"\\033[{{.StatusCode | statusCodeColor}}m{{.StatusCode}}\",\n\t\"%t\", \"{{if .StartTime}}{{.StartTime.Format \\\"02\/Jan\/2006:15:04:05 -0700\\\"}}{{end}}\",\n\t\"%T\", \"{{if .ResponseTime}}{{.ResponseTime.Seconds | printf \\\"%.3f\\\"}}{{end}}\",\n\t\"%u\", \"{{.RemoteUser | dashIfEmptyStr}}\",\n\t\"%{User-Agent}i\", \"{{.R.UserAgent | dashIfEmptyStr}}\",\n\t\"%{Referer}i\", \"{{.R.Referer | dashIfEmptyStr}}\",\n)\n\n\/\/ Convert the Apache access log format into a text\/template\nfunc (mw *AccessLogApacheMiddleware) convertFormat() {\n\n\ttmplText := apacheAdapter.Replace(string(mw.Format))\n\n\tfuncMap := template.FuncMap{\n\t\t\"dashIfEmptyStr\": func(value string) string {\n\t\t\tif value == \"\" {\n\t\t\t\treturn \"-\"\n\t\t\t}\n\t\t\treturn value\n\t\t},\n\t\t\"dashIf0\": func(value int64) string {\n\t\t\tif value == 0 {\n\t\t\t\treturn \"-\"\n\t\t\t}\n\t\t\treturn fmt.Sprintf(\"%d\", value)\n\t\t},\n\t\t\"microseconds\": func(dur *time.Duration) string {\n\t\t\tif dur != nil {\n\t\t\t\treturn fmt.Sprintf(\"%d\", dur.Nanoseconds()\/1000)\n\t\t\t}\n\t\t\treturn \"\"\n\t\t},\n\t\t\"statusCodeColor\": func(statusCode int) string {\n\t\t\tif statusCode >= 400 && statusCode < 500 {\n\t\t\t\treturn \"1;33\"\n\t\t\t} else if statusCode >= 500 {\n\t\t\t\treturn \"0;31\"\n\t\t\t}\n\t\t\treturn \"0;32\"\n\t\t},\n\t}\n\n\tvar err error\n\tmw.textTemplate, err = template.New(\"accessLog\").Funcs(funcMap).Parse(tmplText)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\n\/\/ Execute the text template with the data derived from the request, and return a string.\nfunc (mw *AccessLogApacheMiddleware) executeTextTemplate(util *accessLogUtil) string {\n\tbuf := bytes.NewBufferString(\"\")\n\terr := mw.textTemplate.Execute(buf, util)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn buf.String()\n}\n\n\/\/ accessLogUtil provides a collection of utility functions that devrive data from the Request object.\n\/\/ This object is used to provide data to the Apache Style template and the the JSON log record.\ntype accessLogUtil struct {\n\tW ResponseWriter\n\tR *Request\n}\n\n\/\/ As stored by the auth middlewares.\nfunc (u *accessLogUtil) RemoteUser() string {\n\tif u.R.Env[\"REMOTE_USER\"] != nil {\n\t\treturn u.R.Env[\"REMOTE_USER\"].(string)\n\t}\n\treturn \"\"\n}\n\n\/\/ If qs exists then return it with a leadin \"?\", apache log style.\nfunc (u *accessLogUtil) ApacheQueryString() string {\n\tif u.R.URL.RawQuery != \"\" {\n\t\treturn \"?\" + u.R.URL.RawQuery\n\t}\n\treturn \"\"\n}\n\n\/\/ When the request entered the timer middleware.\nfunc (u *accessLogUtil) StartTime() *time.Time {\n\tif u.R.Env[\"START_TIME\"] != nil {\n\t\treturn u.R.Env[\"START_TIME\"].(*time.Time)\n\t}\n\treturn nil\n}\n\n\/\/ If remoteAddr is set then return is without the port number, apache log style.\nfunc (u *accessLogUtil) ApacheRemoteAddr() string {\n\tremoteAddr := u.R.RemoteAddr\n\tif remoteAddr != \"\" {\n\t\tif ip, _, err := net.SplitHostPort(remoteAddr); err == nil {\n\t\t\treturn ip\n\t\t}\n\t}\n\treturn \"\"\n}\n\n\/\/ As recorded by the recorder middleware.\nfunc (u *accessLogUtil) StatusCode() int {\n\tif u.R.Env[\"STATUS_CODE\"] != nil {\n\t\treturn u.R.Env[\"STATUS_CODE\"].(int)\n\t}\n\treturn 0\n}\n\n\/\/ As mesured by the timer middleware.\nfunc (u *accessLogUtil) ResponseTime() *time.Duration {\n\tif u.R.Env[\"ELAPSED_TIME\"] != nil {\n\t\treturn u.R.Env[\"ELAPSED_TIME\"].(*time.Duration)\n\t}\n\treturn nil\n}\n\n\/\/ Process id.\nfunc (u *accessLogUtil) Pid() int {\n\treturn os.Getpid()\n}\n\n\/\/ As recorded by the recorder middleware.\nfunc (u *accessLogUtil) BytesWritten() int64 {\n\tif u.R.Env[\"BYTES_WRITTEN\"] != nil {\n\t\treturn u.R.Env[\"BYTES_WRITTEN\"].(int64)\n\t}\n\treturn 0\n}\n<|endoftext|>"} {"text":"<commit_before>package robots\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com\/gistia\/slackbot\/db\"\n\t\"github.com\/gistia\/slackbot\/pivotal\"\n\t\"github.com\/gistia\/slackbot\/robots\"\n\t\"github.com\/gistia\/slackbot\/utils\"\n)\n\ntype bot struct {\n\thandler utils.SlackHandler\n}\n\nfunc init() {\n\thandler := utils.NewSlackHandler(\"Pivotal\", \":triangular_ruler:\")\n\ts := &bot{handler: handler}\n\trobots.RegisterRobot(\"pvt\", s)\n}\n\nfunc (r bot) Run(p *robots.Payload) string {\n\tgo r.DeferredAction(p)\n\treturn \"\"\n}\n\nfunc (r bot) DeferredAction(p *robots.Payload) {\n\tch := utils.NewCmdHandler(p, r.handler, \"pvt\")\n\tch.Handle(\"projects\", r.sendProjects)\n\tch.Handle(\"stories\", r.sendStories)\n\tch.Handle(\"mystories\", r.sendMyStories)\n\tch.Handle(\"auth\", r.sendAuth)\n\tch.Handle(\"users\", r.users)\n\n\tcmds := []string{\"start\", \"unstart\", \"finish\", \"accept\", \"reject\", \"deliver\"}\n\tch.HandleMany(cmds, r.setStoryState)\n\n\tch.Process(p.Text)\n}\n\nfunc (r bot) users(p *robots.Payload, cmd utils.Command) error {\n\tprojectId := cmd.Arg(0)\n\tif projectId == \"\" {\n\t\tr.handler.Send(p, \"Missing project id. Use !pvt users <project-id>\")\n\t\treturn nil\n\t}\n\n\tpvt, err := conn(p.UserName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tproject, err := pvt.GetProject(projectId)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif project == nil {\n\t\tr.handler.Send(p, \"Project with id \"+projectId+\" doesn't exist.\")\n\t\treturn nil\n\t}\n\n\tmemberships, err := pvt.GetProjectMemberships(projectId)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts := \"Current users for project *\" + project.Name + \"*:\\n\"\n\tfor _, m := range memberships {\n\t\tpp := m.Person\n\t\ts += fmt.Sprintf(\"%d - %s (%s)\\n\", pp.Id, pp.Name, pp.Email)\n\t}\n\n\tr.handler.Send(p, s)\n\treturn nil\n}\n\nfunc (r bot) sendProjects(payload *robots.Payload, cmd utils.Command) error {\n\tvar ps []pivotal.Project\n\tvar err error\n\n\tterm := cmd.Arg(0)\n\n\tpvt, err := conn(payload.UserName)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts := \"Projects\"\n\n\tif len(term) > 0 {\n\t\tfmt.Printf(\"Retrieving projects with term \\\"%s\\\"...\\n\\n\", term)\n\t\ts += fmt.Sprintf(\" matching '%s':\\n\", term)\n\t\t\/\/ ps, err = pvt.SearchProject(term)\n\t} else {\n\t\ts += \":\\n\"\n\t\tfmt.Println(\"Retrieving projects...\\n\")\n\t\tps, err = pvt.Projects()\n\t}\n\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Error: %s\", err.Error())\n\t\tr.handler.Send(payload, msg)\n\t\treturn nil\n\t}\n\n\tr.handler.Send(payload, s+projectTable(ps))\n\treturn nil\n}\n\nfunc (r bot) sendStories(p *robots.Payload, cmd utils.Command) error {\n\tproject := cmd.Arg(0)\n\tpvt, err := conn(p.UserName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstories, err := pvt.Stories(project)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstr := \"\"\n\tfor _, s := range stories {\n\t\tstr += fmt.Sprintf(\"%d - %s\\n\", s.Id, s.Name)\n\t}\n\n\tr.handler.Send(p, str)\n\treturn nil\n}\n\nfunc (r bot) sendMyStories(p *robots.Payload, cmd utils.Command) error {\n\targs, err := cmd.ParseArgs(\"pvt-project-id\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tproject := args[0]\n\tpvt, err := conn(p.UserName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tuser, err := db.GetUserByName(p.UserName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfilter := map[string]string{\n\t\t\"owned_by\": user.StrPivotalId(),\n\t\t\"state\": \"started,finished,delivered\",\n\t}\n\tstories, err := pvt.FilteredStories(project, filter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(stories) < 1 {\n\t\tr.handler.Send(p, \"No open stories for *\"+p.UserName+\"*\")\n\t\treturn nil\n\t}\n\n\tstr := \"Current stories for *\" + p.UserName + \"*:\\n\"\n\tfor _, s := range stories {\n\t\tstr += fmt.Sprintf(\"%d - %s\\n\", s.Id, s.Name)\n\t}\n\n\tr.handler.Send(p, str)\n\treturn nil\n}\n\nfunc (r bot) setStoryState(p *robots.Payload, cmd utils.Command) error {\n\tstate := cmd.Command\n\tid := cmd.Arg(0)\n\tpvt, err := conn(p.UserName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstate = fmt.Sprintf(\"%sed\", state)\n\tstory, err := pvt.SetStoryState(id, state)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr.handler.Send(p, fmt.Sprintf(\"Story %s - %s %s successfully\",\n\t\tid, story.Name, state))\n\treturn nil\n}\n\nfunc (r bot) sendAuth(p *robots.Payload, cmd utils.Command) error {\n\ts, err := db.GetSetting(p.UserName, \"PIVOTAL_TOKEN\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif s != nil {\n\t\tr.handler.Send(p, \"You are already connected with Pivotal.\")\n\t\treturn nil\n\t}\n\n\tmsg := `*Authenticating with Pivotal Tracker*\n1. Visit your profile here <https:\/\/www.pivotaltracker.com\/profile>\n2. Copy your API token at the bottom of the page\n3. Run the command:\n ` + \"`\/store set PIVOTAL_TOKEN=<token>`\"\n\tr.handler.Send(p, msg)\n\treturn nil\n}\n\nfunc conn(user string) (*pivotal.Pivotal, error) {\n\ttoken, err := db.GetSetting(user, \"PIVOTAL_TOKEN\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif token == nil {\n\t\treturn nil, errors.New(\"No PIVOTAL_TOKEN set for @\" + user)\n\t}\n\tcon := pivotal.NewPivotal(token.Value, false)\n\treturn con, nil\n}\n\nfunc projectTable(ps []pivotal.Project) string {\n\ts := \"\"\n\n\tfor _, p := range ps {\n\t\ts += fmt.Sprintf(\"%d - %s\\n\", p.Id, p.Name)\n\t}\n\n\treturn s\n}\n\nfunc (r bot) Description() (description string) {\n\treturn \"Pivotal bot\\n\\tUsage: !pvt <command>\\n\"\n}\n<commit_msg>Added link to stories in mystories command<commit_after>package robots\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com\/gistia\/slackbot\/db\"\n\t\"github.com\/gistia\/slackbot\/pivotal\"\n\t\"github.com\/gistia\/slackbot\/robots\"\n\t\"github.com\/gistia\/slackbot\/utils\"\n)\n\ntype bot struct {\n\thandler utils.SlackHandler\n}\n\nfunc init() {\n\thandler := utils.NewSlackHandler(\"Pivotal\", \":triangular_ruler:\")\n\ts := &bot{handler: handler}\n\trobots.RegisterRobot(\"pvt\", s)\n}\n\nfunc (r bot) Run(p *robots.Payload) string {\n\tgo r.DeferredAction(p)\n\treturn \"\"\n}\n\nfunc (r bot) DeferredAction(p *robots.Payload) {\n\tch := utils.NewCmdHandler(p, r.handler, \"pvt\")\n\tch.Handle(\"projects\", r.sendProjects)\n\tch.Handle(\"stories\", r.sendStories)\n\tch.Handle(\"mystories\", r.sendMyStories)\n\tch.Handle(\"auth\", r.sendAuth)\n\tch.Handle(\"users\", r.users)\n\n\tcmds := []string{\"start\", \"unstart\", \"finish\", \"accept\", \"reject\", \"deliver\"}\n\tch.HandleMany(cmds, r.setStoryState)\n\n\tch.Process(p.Text)\n}\n\nfunc (r bot) users(p *robots.Payload, cmd utils.Command) error {\n\tprojectId := cmd.Arg(0)\n\tif projectId == \"\" {\n\t\tr.handler.Send(p, \"Missing project id. Use !pvt users <project-id>\")\n\t\treturn nil\n\t}\n\n\tpvt, err := conn(p.UserName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tproject, err := pvt.GetProject(projectId)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif project == nil {\n\t\tr.handler.Send(p, \"Project with id \"+projectId+\" doesn't exist.\")\n\t\treturn nil\n\t}\n\n\tmemberships, err := pvt.GetProjectMemberships(projectId)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts := \"Current users for project *\" + project.Name + \"*:\\n\"\n\tfor _, m := range memberships {\n\t\tpp := m.Person\n\t\ts += fmt.Sprintf(\"%d - %s (%s)\\n\", pp.Id, pp.Name, pp.Email)\n\t}\n\n\tr.handler.Send(p, s)\n\treturn nil\n}\n\nfunc (r bot) sendProjects(payload *robots.Payload, cmd utils.Command) error {\n\tvar ps []pivotal.Project\n\tvar err error\n\n\tterm := cmd.Arg(0)\n\n\tpvt, err := conn(payload.UserName)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts := \"Projects\"\n\n\tif len(term) > 0 {\n\t\tfmt.Printf(\"Retrieving projects with term \\\"%s\\\"...\\n\\n\", term)\n\t\ts += fmt.Sprintf(\" matching '%s':\\n\", term)\n\t\t\/\/ ps, err = pvt.SearchProject(term)\n\t} else {\n\t\ts += \":\\n\"\n\t\tfmt.Println(\"Retrieving projects...\\n\")\n\t\tps, err = pvt.Projects()\n\t}\n\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Error: %s\", err.Error())\n\t\tr.handler.Send(payload, msg)\n\t\treturn nil\n\t}\n\n\tr.handler.Send(payload, s+projectTable(ps))\n\treturn nil\n}\n\nfunc (r bot) sendStories(p *robots.Payload, cmd utils.Command) error {\n\tproject := cmd.Arg(0)\n\tpvt, err := conn(p.UserName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstories, err := pvt.Stories(project)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstr := \"\"\n\tfor _, s := range stories {\n\t\tstr += fmt.Sprintf(\"%d - %s\\n\", s.Id, s.Name)\n\t}\n\n\tr.handler.Send(p, str)\n\treturn nil\n}\n\nfunc (r bot) sendMyStories(p *robots.Payload, cmd utils.Command) error {\n\targs, err := cmd.ParseArgs(\"pvt-project-id\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tproject := args[0]\n\tpvt, err := conn(p.UserName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tuser, err := db.GetUserByName(p.UserName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfilter := map[string]string{\n\t\t\"owned_by\": user.StrPivotalId(),\n\t\t\"state\": \"started,finished,delivered\",\n\t}\n\tstories, err := pvt.FilteredStories(project, filter)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(stories) < 1 {\n\t\tr.handler.Send(p, \"No open stories for *\"+p.UserName+\"*\")\n\t\treturn nil\n\t}\n\n\tstr := \"Current stories for *\" + p.UserName + \"*:\\n\"\n\tatts := []robots.Attachment{}\n\tfor _, s := range stories {\n\t\tfallback := fmt.Sprintf(\"%d - %s - %s\\n\", s.Id, s.Name, s.State)\n\t\ttitle := fmt.Sprintf(\"%d - %s\\n\", s.Id, s.Name)\n\t\ta := utils.FmtAttachment(fallback, title, s.Url, s.State)\n\t\tatts = append(atts, a)\n\t}\n\n\tr.handler.SendWithAttachments(p, str, atts)\n\treturn nil\n}\n\nfunc (r bot) setStoryState(p *robots.Payload, cmd utils.Command) error {\n\tstate := cmd.Command\n\tid := cmd.Arg(0)\n\tpvt, err := conn(p.UserName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstate = fmt.Sprintf(\"%sed\", state)\n\tstory, err := pvt.SetStoryState(id, state)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr.handler.Send(p, fmt.Sprintf(\"Story %s - %s %s successfully\",\n\t\tid, story.Name, state))\n\treturn nil\n}\n\nfunc (r bot) sendAuth(p *robots.Payload, cmd utils.Command) error {\n\ts, err := db.GetSetting(p.UserName, \"PIVOTAL_TOKEN\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif s != nil {\n\t\tr.handler.Send(p, \"You are already connected with Pivotal.\")\n\t\treturn nil\n\t}\n\n\tmsg := `*Authenticating with Pivotal Tracker*\n1. Visit your profile here <https:\/\/www.pivotaltracker.com\/profile>\n2. Copy your API token at the bottom of the page\n3. Run the command:\n ` + \"`\/store set PIVOTAL_TOKEN=<token>`\"\n\tr.handler.Send(p, msg)\n\treturn nil\n}\n\nfunc conn(user string) (*pivotal.Pivotal, error) {\n\ttoken, err := db.GetSetting(user, \"PIVOTAL_TOKEN\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif token == nil {\n\t\treturn nil, errors.New(\"No PIVOTAL_TOKEN set for @\" + user)\n\t}\n\tcon := pivotal.NewPivotal(token.Value, false)\n\treturn con, nil\n}\n\nfunc projectTable(ps []pivotal.Project) string {\n\ts := \"\"\n\n\tfor _, p := range ps {\n\t\ts += fmt.Sprintf(\"%d - %s\\n\", p.Id, p.Name)\n\t}\n\n\treturn s\n}\n\nfunc (r bot) Description() (description string) {\n\treturn \"Pivotal bot\\n\\tUsage: !pvt <command>\\n\"\n}\n<|endoftext|>"} {"text":"<commit_before>package routing\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/lightningnetwork\/lnd\/channeldb\"\n\t\"github.com\/lightningnetwork\/lnd\/channeldb\/kvdb\"\n\t\"github.com\/lightningnetwork\/lnd\/lnwire\"\n\t\"github.com\/lightningnetwork\/lnd\/routing\/route\"\n)\n\nconst (\n\t\/\/ DefaultPenaltyHalfLife is the default half-life duration. The\n\t\/\/ half-life duration defines after how much time a penalized node or\n\t\/\/ channel is back at 50% probability.\n\tDefaultPenaltyHalfLife = time.Hour\n\n\t\/\/ minSecondChanceInterval is the minimum time required between\n\t\/\/ second-chance failures.\n\t\/\/\n\t\/\/ If nodes return a channel policy related failure, they may get a\n\t\/\/ second chance to forward the payment. It could be that the channel\n\t\/\/ policy that we are aware of is not up to date. This is especially\n\t\/\/ important in case of mobile apps that are mostly offline.\n\t\/\/\n\t\/\/ However, we don't want to give nodes the option to endlessly return\n\t\/\/ new channel updates so that we are kept busy trying to route through\n\t\/\/ that node until the payment loop times out.\n\t\/\/\n\t\/\/ Therefore we only grant a second chance to a node if the previous\n\t\/\/ second chance is sufficiently long ago. This is what\n\t\/\/ minSecondChanceInterval defines. If a second policy failure comes in\n\t\/\/ within that interval, we will apply a penalty.\n\t\/\/\n\t\/\/ Second chances granted are tracked on the level of node pairs. This\n\t\/\/ means that if a node has multiple channels to the same peer, they\n\t\/\/ will only get a single second chance to route to that peer again.\n\t\/\/ Nodes forward non-strict, so it isn't necessary to apply a less\n\t\/\/ restrictive channel level tracking scheme here.\n\tminSecondChanceInterval = time.Minute\n\n\t\/\/ DefaultMaxMcHistory is the default maximum history size.\n\tDefaultMaxMcHistory = 1000\n\n\t\/\/ prevSuccessProbability is the assumed probability for node pairs that\n\t\/\/ successfully relayed the previous attempt.\n\tprevSuccessProbability = 0.95\n\n\t\/\/ DefaultAprioriWeight is the default a priori weight. See\n\t\/\/ MissionControlConfig for further explanation.\n\tDefaultAprioriWeight = 0.5\n\n\t\/\/ DefaultMinFailureRelaxInterval is the default minimum time that must\n\t\/\/ have passed since the previously recorded failure before the failure\n\t\/\/ amount may be raised.\n\tDefaultMinFailureRelaxInterval = time.Minute\n)\n\nvar (\n\t\/\/ ErrInvalidMcHistory is returned if we get a negative mission control\n\t\/\/ history count.\n\tErrInvalidMcHistory = errors.New(\"mission control history must be \" +\n\t\t\">= 0\")\n\n\t\/\/ ErrInvalidFailureInterval is returned if we get an invalid failure\n\t\/\/ interval.\n\tErrInvalidFailureInterval = errors.New(\"failure interval must be >= 0\")\n)\n\n\/\/ NodeResults contains previous results from a node to its peers.\ntype NodeResults map[route.Vertex]TimedPairResult\n\n\/\/ MissionControl contains state which summarizes the past attempts of HTLC\n\/\/ routing by external callers when sending payments throughout the network. It\n\/\/ acts as a shared memory during routing attempts with the goal to optimize the\n\/\/ payment attempt success rate.\n\/\/\n\/\/ Failed payment attempts are reported to mission control. These reports are\n\/\/ used to track the time of the last node or channel level failure. The time\n\/\/ since the last failure is used to estimate a success probability that is fed\n\/\/ into the path finding process for subsequent payment attempts.\ntype MissionControl struct {\n\t\/\/ state is the internal mission control state that is input for\n\t\/\/ probability estimation.\n\tstate *missionControlState\n\n\t\/\/ now is expected to return the current time. It is supplied as an\n\t\/\/ external function to enable deterministic unit tests.\n\tnow func() time.Time\n\n\t\/\/ selfNode is our pubkey.\n\tselfNode route.Vertex\n\n\tstore *missionControlStore\n\n\t\/\/ estimator is the probability estimator that is used with the payment\n\t\/\/ results that mission control collects.\n\testimator *probabilityEstimator\n\n\tsync.Mutex\n\n\t\/\/ TODO(roasbeef): further counters, if vertex continually unavailable,\n\t\/\/ add to another generation\n\n\t\/\/ TODO(roasbeef): also add favorable metrics for nodes\n}\n\n\/\/ MissionControlConfig defines parameters that control mission control\n\/\/ behaviour.\ntype MissionControlConfig struct {\n\t\/\/ ProbabilityEstimatorConfig is the config we will use for probability\n\t\/\/ calculations.\n\tProbabilityEstimatorCfg\n\n\t\/\/ MaxMcHistory defines the maximum number of payment results that are\n\t\/\/ held on disk.\n\tMaxMcHistory int\n\n\t\/\/ MinFailureRelaxInterval is the minimum time that must have passed\n\t\/\/ since the previously recorded failure before the failure amount may\n\t\/\/ be raised.\n\tMinFailureRelaxInterval time.Duration\n}\n\nfunc (c *MissionControlConfig) validate() error {\n\tif err := c.ProbabilityEstimatorCfg.validate(); err != nil {\n\t\treturn err\n\t}\n\n\tif c.MaxMcHistory < 0 {\n\t\treturn ErrInvalidMcHistory\n\t}\n\n\tif c.MinFailureRelaxInterval < 0 {\n\t\treturn ErrInvalidFailureInterval\n\t}\n\n\treturn nil\n}\n\n\/\/ String returns a string representation of a mission control config.\nfunc (c *MissionControlConfig) String() string {\n\treturn fmt.Sprintf(\"Penalty Half Life: %v, Apriori Hop \"+\n\t\t\"Probablity: %v, Maximum History: %v, Apriori Weight: %v, \"+\n\t\t\"Minimum Failure Relax Interval: %v\", c.PenaltyHalfLife,\n\t\tc.AprioriHopProbability, c.MaxMcHistory, c.AprioriWeight,\n\t\tc.MinFailureRelaxInterval)\n}\n\n\/\/ TimedPairResult describes a timestamped pair result.\ntype TimedPairResult struct {\n\t\/\/ FailTime is the time of the last failure.\n\tFailTime time.Time\n\n\t\/\/ FailAmt is the amount of the last failure. This amount may be pushed\n\t\/\/ up if a later success is higher than the last failed amount.\n\tFailAmt lnwire.MilliSatoshi\n\n\t\/\/ SuccessTime is the time of the last success.\n\tSuccessTime time.Time\n\n\t\/\/ SuccessAmt is the highest amount that successfully forwarded. This\n\t\/\/ isn't necessarily the last success amount. The value of this field\n\t\/\/ may also be pushed down if a later failure is lower than the highest\n\t\/\/ success amount. Because of this, SuccessAmt may not match\n\t\/\/ SuccessTime.\n\tSuccessAmt lnwire.MilliSatoshi\n}\n\n\/\/ MissionControlSnapshot contains a snapshot of the current state of mission\n\/\/ control.\ntype MissionControlSnapshot struct {\n\t\/\/ Pairs is a list of channels for which specific information is\n\t\/\/ logged.\n\tPairs []MissionControlPairSnapshot\n}\n\n\/\/ MissionControlPairSnapshot contains a snapshot of the current node pair\n\/\/ state in mission control.\ntype MissionControlPairSnapshot struct {\n\t\/\/ Pair is the node pair of which the state is described.\n\tPair DirectedNodePair\n\n\t\/\/ TimedPairResult contains the data for this pair.\n\tTimedPairResult\n}\n\n\/\/ paymentResult is the information that becomes available when a payment\n\/\/ attempt completes.\ntype paymentResult struct {\n\tid uint64\n\ttimeFwd, timeReply time.Time\n\troute *route.Route\n\tsuccess bool\n\tfailureSourceIdx *int\n\tfailure lnwire.FailureMessage\n}\n\n\/\/ NewMissionControl returns a new instance of missionControl.\nfunc NewMissionControl(db kvdb.Backend, self route.Vertex,\n\tcfg *MissionControlConfig) (*MissionControl, error) {\n\n\tlog.Debugf(\"Instantiating mission control with config: %v\", cfg)\n\n\tif err := cfg.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tstore, err := newMissionControlStore(db, cfg.MaxMcHistory)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\testimator := &probabilityEstimator{\n\t\tProbabilityEstimatorCfg: cfg.ProbabilityEstimatorCfg,\n\t\tprevSuccessProbability: prevSuccessProbability,\n\t}\n\n\tmc := &MissionControl{\n\t\tstate: newMissionControlState(cfg.MinFailureRelaxInterval),\n\t\tnow: time.Now,\n\t\tselfNode: self,\n\t\tstore: store,\n\t\testimator: estimator,\n\t}\n\n\tif err := mc.init(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn mc, nil\n}\n\n\/\/ init initializes mission control with historical data.\nfunc (m *MissionControl) init() error {\n\tlog.Debugf(\"Mission control state reconstruction started\")\n\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tstart := time.Now()\n\n\tresults, err := m.store.fetchAll()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, result := range results {\n\t\tm.applyPaymentResult(result)\n\t}\n\n\tlog.Debugf(\"Mission control state reconstruction finished: \"+\n\t\t\"n=%v, time=%v\", len(results), time.Since(start))\n\n\treturn nil\n}\n\n\/\/ GetConfig returns the config that mission control is currently configured\n\/\/ with. All fields are copied by value, so we do not need to worry about\n\/\/ mutation.\nfunc (m *MissionControl) GetConfig() *MissionControlConfig {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\treturn &MissionControlConfig{\n\t\tProbabilityEstimatorCfg: m.estimator.ProbabilityEstimatorCfg,\n\t\tMaxMcHistory: m.store.maxRecords,\n\t\tMinFailureRelaxInterval: m.state.minFailureRelaxInterval,\n\t}\n}\n\n\/\/ SetConfig validates the config provided and updates mission control's config\n\/\/ if it is valid.\nfunc (m *MissionControl) SetConfig(cfg *MissionControlConfig) error {\n\tif cfg == nil {\n\t\treturn errors.New(\"nil mission control config\")\n\t}\n\n\tif err := cfg.validate(); err != nil {\n\t\treturn err\n\t}\n\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tlog.Infof(\"Updating mission control cfg: %v\", cfg)\n\n\tm.store.maxRecords = cfg.MaxMcHistory\n\tm.state.minFailureRelaxInterval = cfg.MinFailureRelaxInterval\n\tm.estimator.ProbabilityEstimatorCfg = cfg.ProbabilityEstimatorCfg\n\n\treturn nil\n}\n\n\/\/ ResetHistory resets the history of MissionControl returning it to a state as\n\/\/ if no payment attempts have been made.\nfunc (m *MissionControl) ResetHistory() error {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tif err := m.store.clear(); err != nil {\n\t\treturn err\n\t}\n\n\tm.state.resetHistory()\n\n\tlog.Debugf(\"Mission control history cleared\")\n\n\treturn nil\n}\n\n\/\/ GetProbability is expected to return the success probability of a payment\n\/\/ from fromNode along edge.\nfunc (m *MissionControl) GetProbability(fromNode, toNode route.Vertex,\n\tamt lnwire.MilliSatoshi) float64 {\n\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tnow := m.now()\n\tresults, _ := m.state.getLastPairResult(fromNode)\n\n\t\/\/ Use a distinct probability estimation function for local channels.\n\tif fromNode == m.selfNode {\n\t\treturn m.estimator.getLocalPairProbability(now, results, toNode)\n\t}\n\n\treturn m.estimator.getPairProbability(now, results, toNode, amt)\n}\n\n\/\/ GetHistorySnapshot takes a snapshot from the current mission control state\n\/\/ and actual probability estimates.\nfunc (m *MissionControl) GetHistorySnapshot() *MissionControlSnapshot {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tlog.Debugf(\"Requesting history snapshot from mission control\")\n\n\treturn m.state.getSnapshot()\n}\n\n\/\/ GetPairHistorySnapshot returns the stored history for a given node pair.\nfunc (m *MissionControl) GetPairHistorySnapshot(\n\tfromNode, toNode route.Vertex) TimedPairResult {\n\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tresults, ok := m.state.getLastPairResult(fromNode)\n\tif !ok {\n\t\treturn TimedPairResult{}\n\t}\n\n\tresult, ok := results[toNode]\n\tif !ok {\n\t\treturn TimedPairResult{}\n\t}\n\n\treturn result\n}\n\n\/\/ ReportPaymentFail reports a failed payment to mission control as input for\n\/\/ future probability estimates. The failureSourceIdx argument indicates the\n\/\/ failure source. If it is nil, the failure source is unknown. This function\n\/\/ returns a reason if this failure is a final failure. In that case no further\n\/\/ payment attempts need to be made.\nfunc (m *MissionControl) ReportPaymentFail(paymentID uint64, rt *route.Route,\n\tfailureSourceIdx *int, failure lnwire.FailureMessage) (\n\t*channeldb.FailureReason, error) {\n\n\ttimestamp := m.now()\n\n\tresult := &paymentResult{\n\t\tsuccess: false,\n\t\ttimeFwd: timestamp,\n\t\ttimeReply: timestamp,\n\t\tid: paymentID,\n\t\tfailureSourceIdx: failureSourceIdx,\n\t\tfailure: failure,\n\t\troute: rt,\n\t}\n\n\treturn m.processPaymentResult(result)\n}\n\n\/\/ ReportPaymentSuccess reports a successful payment to mission control as input\n\/\/ for future probability estimates.\nfunc (m *MissionControl) ReportPaymentSuccess(paymentID uint64,\n\trt *route.Route) error {\n\n\ttimestamp := m.now()\n\n\tresult := &paymentResult{\n\t\ttimeFwd: timestamp,\n\t\ttimeReply: timestamp,\n\t\tid: paymentID,\n\t\tsuccess: true,\n\t\troute: rt,\n\t}\n\n\t_, err := m.processPaymentResult(result)\n\treturn err\n}\n\n\/\/ processPaymentResult stores a payment result in the mission control store and\n\/\/ updates mission control's in-memory state.\nfunc (m *MissionControl) processPaymentResult(result *paymentResult) (\n\t*channeldb.FailureReason, error) {\n\n\t\/\/ Store complete result in database.\n\tif err := m.store.AddResult(result); err != nil {\n\t\treturn nil, err\n\t}\n\n\tm.Lock()\n\tdefer m.Unlock()\n\n\t\/\/ Apply result to update mission control state.\n\treason := m.applyPaymentResult(result)\n\n\treturn reason, nil\n}\n\n\/\/ applyPaymentResult applies a payment result as input for future probability\n\/\/ estimates. It returns a bool indicating whether this error is a final error\n\/\/ and no further payment attempts need to be made.\nfunc (m *MissionControl) applyPaymentResult(\n\tresult *paymentResult) *channeldb.FailureReason {\n\n\t\/\/ Interpret result.\n\ti := interpretResult(\n\t\tresult.route, result.success, result.failureSourceIdx,\n\t\tresult.failure,\n\t)\n\n\tif i.policyFailure != nil {\n\t\tif m.state.requestSecondChance(\n\t\t\tresult.timeReply,\n\t\t\ti.policyFailure.From, i.policyFailure.To,\n\t\t) {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ If there is a node-level failure, record a failure for every tried\n\t\/\/ connection of that node. A node-level failure can be considered as a\n\t\/\/ failure that would have occurred with any of the node's channels.\n\t\/\/\n\t\/\/ Ideally we'd also record the failure for the untried connections of\n\t\/\/ the node. Unfortunately this would require access to the graph and\n\t\/\/ adding this dependency and db calls does not outweigh the benefits.\n\t\/\/\n\t\/\/ Untried connections will fall back to the node probability. After the\n\t\/\/ call to setAllPairResult below, the node probability will be equal to\n\t\/\/ the probability of the tried channels except that the a priori\n\t\/\/ probability is mixed in too. This effect is controlled by the\n\t\/\/ aprioriWeight parameter. If that parameter isn't set to an extreme\n\t\/\/ and there are a few known connections, there shouldn't be much of a\n\t\/\/ difference. The largest difference occurs when aprioriWeight is 1. In\n\t\/\/ that case, a node-level failure would not be applied to untried\n\t\/\/ channels.\n\tif i.nodeFailure != nil {\n\t\tlog.Debugf(\"Reporting node failure to Mission Control: \"+\n\t\t\t\"node=%v\", *i.nodeFailure)\n\n\t\tm.state.setAllFail(*i.nodeFailure, result.timeReply)\n\t}\n\n\tfor pair, pairResult := range i.pairResults {\n\t\tpairResult := pairResult\n\n\t\tif pairResult.success {\n\t\t\tlog.Debugf(\"Reporting pair success to Mission \"+\n\t\t\t\t\"Control: pair=%v, amt=%v\",\n\t\t\t\tpair, pairResult.amt)\n\t\t} else {\n\t\t\tlog.Debugf(\"Reporting pair failure to Mission \"+\n\t\t\t\t\"Control: pair=%v, amt=%v\",\n\t\t\t\tpair, pairResult.amt)\n\t\t}\n\n\t\tm.state.setLastPairResult(\n\t\t\tpair.From, pair.To, result.timeReply, &pairResult,\n\t\t)\n\t}\n\n\treturn i.finalFailureReason\n}\n<commit_msg>routing: remove unnecessary lock in mission control init<commit_after>package routing\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/lightningnetwork\/lnd\/channeldb\"\n\t\"github.com\/lightningnetwork\/lnd\/channeldb\/kvdb\"\n\t\"github.com\/lightningnetwork\/lnd\/lnwire\"\n\t\"github.com\/lightningnetwork\/lnd\/routing\/route\"\n)\n\nconst (\n\t\/\/ DefaultPenaltyHalfLife is the default half-life duration. The\n\t\/\/ half-life duration defines after how much time a penalized node or\n\t\/\/ channel is back at 50% probability.\n\tDefaultPenaltyHalfLife = time.Hour\n\n\t\/\/ minSecondChanceInterval is the minimum time required between\n\t\/\/ second-chance failures.\n\t\/\/\n\t\/\/ If nodes return a channel policy related failure, they may get a\n\t\/\/ second chance to forward the payment. It could be that the channel\n\t\/\/ policy that we are aware of is not up to date. This is especially\n\t\/\/ important in case of mobile apps that are mostly offline.\n\t\/\/\n\t\/\/ However, we don't want to give nodes the option to endlessly return\n\t\/\/ new channel updates so that we are kept busy trying to route through\n\t\/\/ that node until the payment loop times out.\n\t\/\/\n\t\/\/ Therefore we only grant a second chance to a node if the previous\n\t\/\/ second chance is sufficiently long ago. This is what\n\t\/\/ minSecondChanceInterval defines. If a second policy failure comes in\n\t\/\/ within that interval, we will apply a penalty.\n\t\/\/\n\t\/\/ Second chances granted are tracked on the level of node pairs. This\n\t\/\/ means that if a node has multiple channels to the same peer, they\n\t\/\/ will only get a single second chance to route to that peer again.\n\t\/\/ Nodes forward non-strict, so it isn't necessary to apply a less\n\t\/\/ restrictive channel level tracking scheme here.\n\tminSecondChanceInterval = time.Minute\n\n\t\/\/ DefaultMaxMcHistory is the default maximum history size.\n\tDefaultMaxMcHistory = 1000\n\n\t\/\/ prevSuccessProbability is the assumed probability for node pairs that\n\t\/\/ successfully relayed the previous attempt.\n\tprevSuccessProbability = 0.95\n\n\t\/\/ DefaultAprioriWeight is the default a priori weight. See\n\t\/\/ MissionControlConfig for further explanation.\n\tDefaultAprioriWeight = 0.5\n\n\t\/\/ DefaultMinFailureRelaxInterval is the default minimum time that must\n\t\/\/ have passed since the previously recorded failure before the failure\n\t\/\/ amount may be raised.\n\tDefaultMinFailureRelaxInterval = time.Minute\n)\n\nvar (\n\t\/\/ ErrInvalidMcHistory is returned if we get a negative mission control\n\t\/\/ history count.\n\tErrInvalidMcHistory = errors.New(\"mission control history must be \" +\n\t\t\">= 0\")\n\n\t\/\/ ErrInvalidFailureInterval is returned if we get an invalid failure\n\t\/\/ interval.\n\tErrInvalidFailureInterval = errors.New(\"failure interval must be >= 0\")\n)\n\n\/\/ NodeResults contains previous results from a node to its peers.\ntype NodeResults map[route.Vertex]TimedPairResult\n\n\/\/ MissionControl contains state which summarizes the past attempts of HTLC\n\/\/ routing by external callers when sending payments throughout the network. It\n\/\/ acts as a shared memory during routing attempts with the goal to optimize the\n\/\/ payment attempt success rate.\n\/\/\n\/\/ Failed payment attempts are reported to mission control. These reports are\n\/\/ used to track the time of the last node or channel level failure. The time\n\/\/ since the last failure is used to estimate a success probability that is fed\n\/\/ into the path finding process for subsequent payment attempts.\ntype MissionControl struct {\n\t\/\/ state is the internal mission control state that is input for\n\t\/\/ probability estimation.\n\tstate *missionControlState\n\n\t\/\/ now is expected to return the current time. It is supplied as an\n\t\/\/ external function to enable deterministic unit tests.\n\tnow func() time.Time\n\n\t\/\/ selfNode is our pubkey.\n\tselfNode route.Vertex\n\n\tstore *missionControlStore\n\n\t\/\/ estimator is the probability estimator that is used with the payment\n\t\/\/ results that mission control collects.\n\testimator *probabilityEstimator\n\n\tsync.Mutex\n\n\t\/\/ TODO(roasbeef): further counters, if vertex continually unavailable,\n\t\/\/ add to another generation\n\n\t\/\/ TODO(roasbeef): also add favorable metrics for nodes\n}\n\n\/\/ MissionControlConfig defines parameters that control mission control\n\/\/ behaviour.\ntype MissionControlConfig struct {\n\t\/\/ ProbabilityEstimatorConfig is the config we will use for probability\n\t\/\/ calculations.\n\tProbabilityEstimatorCfg\n\n\t\/\/ MaxMcHistory defines the maximum number of payment results that are\n\t\/\/ held on disk.\n\tMaxMcHistory int\n\n\t\/\/ MinFailureRelaxInterval is the minimum time that must have passed\n\t\/\/ since the previously recorded failure before the failure amount may\n\t\/\/ be raised.\n\tMinFailureRelaxInterval time.Duration\n}\n\nfunc (c *MissionControlConfig) validate() error {\n\tif err := c.ProbabilityEstimatorCfg.validate(); err != nil {\n\t\treturn err\n\t}\n\n\tif c.MaxMcHistory < 0 {\n\t\treturn ErrInvalidMcHistory\n\t}\n\n\tif c.MinFailureRelaxInterval < 0 {\n\t\treturn ErrInvalidFailureInterval\n\t}\n\n\treturn nil\n}\n\n\/\/ String returns a string representation of a mission control config.\nfunc (c *MissionControlConfig) String() string {\n\treturn fmt.Sprintf(\"Penalty Half Life: %v, Apriori Hop \"+\n\t\t\"Probablity: %v, Maximum History: %v, Apriori Weight: %v, \"+\n\t\t\"Minimum Failure Relax Interval: %v\", c.PenaltyHalfLife,\n\t\tc.AprioriHopProbability, c.MaxMcHistory, c.AprioriWeight,\n\t\tc.MinFailureRelaxInterval)\n}\n\n\/\/ TimedPairResult describes a timestamped pair result.\ntype TimedPairResult struct {\n\t\/\/ FailTime is the time of the last failure.\n\tFailTime time.Time\n\n\t\/\/ FailAmt is the amount of the last failure. This amount may be pushed\n\t\/\/ up if a later success is higher than the last failed amount.\n\tFailAmt lnwire.MilliSatoshi\n\n\t\/\/ SuccessTime is the time of the last success.\n\tSuccessTime time.Time\n\n\t\/\/ SuccessAmt is the highest amount that successfully forwarded. This\n\t\/\/ isn't necessarily the last success amount. The value of this field\n\t\/\/ may also be pushed down if a later failure is lower than the highest\n\t\/\/ success amount. Because of this, SuccessAmt may not match\n\t\/\/ SuccessTime.\n\tSuccessAmt lnwire.MilliSatoshi\n}\n\n\/\/ MissionControlSnapshot contains a snapshot of the current state of mission\n\/\/ control.\ntype MissionControlSnapshot struct {\n\t\/\/ Pairs is a list of channels for which specific information is\n\t\/\/ logged.\n\tPairs []MissionControlPairSnapshot\n}\n\n\/\/ MissionControlPairSnapshot contains a snapshot of the current node pair\n\/\/ state in mission control.\ntype MissionControlPairSnapshot struct {\n\t\/\/ Pair is the node pair of which the state is described.\n\tPair DirectedNodePair\n\n\t\/\/ TimedPairResult contains the data for this pair.\n\tTimedPairResult\n}\n\n\/\/ paymentResult is the information that becomes available when a payment\n\/\/ attempt completes.\ntype paymentResult struct {\n\tid uint64\n\ttimeFwd, timeReply time.Time\n\troute *route.Route\n\tsuccess bool\n\tfailureSourceIdx *int\n\tfailure lnwire.FailureMessage\n}\n\n\/\/ NewMissionControl returns a new instance of missionControl.\nfunc NewMissionControl(db kvdb.Backend, self route.Vertex,\n\tcfg *MissionControlConfig) (*MissionControl, error) {\n\n\tlog.Debugf(\"Instantiating mission control with config: %v\", cfg)\n\n\tif err := cfg.validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tstore, err := newMissionControlStore(db, cfg.MaxMcHistory)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\testimator := &probabilityEstimator{\n\t\tProbabilityEstimatorCfg: cfg.ProbabilityEstimatorCfg,\n\t\tprevSuccessProbability: prevSuccessProbability,\n\t}\n\n\tmc := &MissionControl{\n\t\tstate: newMissionControlState(cfg.MinFailureRelaxInterval),\n\t\tnow: time.Now,\n\t\tselfNode: self,\n\t\tstore: store,\n\t\testimator: estimator,\n\t}\n\n\tif err := mc.init(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn mc, nil\n}\n\n\/\/ init initializes mission control with historical data.\nfunc (m *MissionControl) init() error {\n\tlog.Debugf(\"Mission control state reconstruction started\")\n\n\tstart := time.Now()\n\n\tresults, err := m.store.fetchAll()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, result := range results {\n\t\tm.applyPaymentResult(result)\n\t}\n\n\tlog.Debugf(\"Mission control state reconstruction finished: \"+\n\t\t\"n=%v, time=%v\", len(results), time.Since(start))\n\n\treturn nil\n}\n\n\/\/ GetConfig returns the config that mission control is currently configured\n\/\/ with. All fields are copied by value, so we do not need to worry about\n\/\/ mutation.\nfunc (m *MissionControl) GetConfig() *MissionControlConfig {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\treturn &MissionControlConfig{\n\t\tProbabilityEstimatorCfg: m.estimator.ProbabilityEstimatorCfg,\n\t\tMaxMcHistory: m.store.maxRecords,\n\t\tMinFailureRelaxInterval: m.state.minFailureRelaxInterval,\n\t}\n}\n\n\/\/ SetConfig validates the config provided and updates mission control's config\n\/\/ if it is valid.\nfunc (m *MissionControl) SetConfig(cfg *MissionControlConfig) error {\n\tif cfg == nil {\n\t\treturn errors.New(\"nil mission control config\")\n\t}\n\n\tif err := cfg.validate(); err != nil {\n\t\treturn err\n\t}\n\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tlog.Infof(\"Updating mission control cfg: %v\", cfg)\n\n\tm.store.maxRecords = cfg.MaxMcHistory\n\tm.state.minFailureRelaxInterval = cfg.MinFailureRelaxInterval\n\tm.estimator.ProbabilityEstimatorCfg = cfg.ProbabilityEstimatorCfg\n\n\treturn nil\n}\n\n\/\/ ResetHistory resets the history of MissionControl returning it to a state as\n\/\/ if no payment attempts have been made.\nfunc (m *MissionControl) ResetHistory() error {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tif err := m.store.clear(); err != nil {\n\t\treturn err\n\t}\n\n\tm.state.resetHistory()\n\n\tlog.Debugf(\"Mission control history cleared\")\n\n\treturn nil\n}\n\n\/\/ GetProbability is expected to return the success probability of a payment\n\/\/ from fromNode along edge.\nfunc (m *MissionControl) GetProbability(fromNode, toNode route.Vertex,\n\tamt lnwire.MilliSatoshi) float64 {\n\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tnow := m.now()\n\tresults, _ := m.state.getLastPairResult(fromNode)\n\n\t\/\/ Use a distinct probability estimation function for local channels.\n\tif fromNode == m.selfNode {\n\t\treturn m.estimator.getLocalPairProbability(now, results, toNode)\n\t}\n\n\treturn m.estimator.getPairProbability(now, results, toNode, amt)\n}\n\n\/\/ GetHistorySnapshot takes a snapshot from the current mission control state\n\/\/ and actual probability estimates.\nfunc (m *MissionControl) GetHistorySnapshot() *MissionControlSnapshot {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tlog.Debugf(\"Requesting history snapshot from mission control\")\n\n\treturn m.state.getSnapshot()\n}\n\n\/\/ GetPairHistorySnapshot returns the stored history for a given node pair.\nfunc (m *MissionControl) GetPairHistorySnapshot(\n\tfromNode, toNode route.Vertex) TimedPairResult {\n\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tresults, ok := m.state.getLastPairResult(fromNode)\n\tif !ok {\n\t\treturn TimedPairResult{}\n\t}\n\n\tresult, ok := results[toNode]\n\tif !ok {\n\t\treturn TimedPairResult{}\n\t}\n\n\treturn result\n}\n\n\/\/ ReportPaymentFail reports a failed payment to mission control as input for\n\/\/ future probability estimates. The failureSourceIdx argument indicates the\n\/\/ failure source. If it is nil, the failure source is unknown. This function\n\/\/ returns a reason if this failure is a final failure. In that case no further\n\/\/ payment attempts need to be made.\nfunc (m *MissionControl) ReportPaymentFail(paymentID uint64, rt *route.Route,\n\tfailureSourceIdx *int, failure lnwire.FailureMessage) (\n\t*channeldb.FailureReason, error) {\n\n\ttimestamp := m.now()\n\n\tresult := &paymentResult{\n\t\tsuccess: false,\n\t\ttimeFwd: timestamp,\n\t\ttimeReply: timestamp,\n\t\tid: paymentID,\n\t\tfailureSourceIdx: failureSourceIdx,\n\t\tfailure: failure,\n\t\troute: rt,\n\t}\n\n\treturn m.processPaymentResult(result)\n}\n\n\/\/ ReportPaymentSuccess reports a successful payment to mission control as input\n\/\/ for future probability estimates.\nfunc (m *MissionControl) ReportPaymentSuccess(paymentID uint64,\n\trt *route.Route) error {\n\n\ttimestamp := m.now()\n\n\tresult := &paymentResult{\n\t\ttimeFwd: timestamp,\n\t\ttimeReply: timestamp,\n\t\tid: paymentID,\n\t\tsuccess: true,\n\t\troute: rt,\n\t}\n\n\t_, err := m.processPaymentResult(result)\n\treturn err\n}\n\n\/\/ processPaymentResult stores a payment result in the mission control store and\n\/\/ updates mission control's in-memory state.\nfunc (m *MissionControl) processPaymentResult(result *paymentResult) (\n\t*channeldb.FailureReason, error) {\n\n\t\/\/ Store complete result in database.\n\tif err := m.store.AddResult(result); err != nil {\n\t\treturn nil, err\n\t}\n\n\tm.Lock()\n\tdefer m.Unlock()\n\n\t\/\/ Apply result to update mission control state.\n\treason := m.applyPaymentResult(result)\n\n\treturn reason, nil\n}\n\n\/\/ applyPaymentResult applies a payment result as input for future probability\n\/\/ estimates. It returns a bool indicating whether this error is a final error\n\/\/ and no further payment attempts need to be made.\nfunc (m *MissionControl) applyPaymentResult(\n\tresult *paymentResult) *channeldb.FailureReason {\n\n\t\/\/ Interpret result.\n\ti := interpretResult(\n\t\tresult.route, result.success, result.failureSourceIdx,\n\t\tresult.failure,\n\t)\n\n\tif i.policyFailure != nil {\n\t\tif m.state.requestSecondChance(\n\t\t\tresult.timeReply,\n\t\t\ti.policyFailure.From, i.policyFailure.To,\n\t\t) {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ If there is a node-level failure, record a failure for every tried\n\t\/\/ connection of that node. A node-level failure can be considered as a\n\t\/\/ failure that would have occurred with any of the node's channels.\n\t\/\/\n\t\/\/ Ideally we'd also record the failure for the untried connections of\n\t\/\/ the node. Unfortunately this would require access to the graph and\n\t\/\/ adding this dependency and db calls does not outweigh the benefits.\n\t\/\/\n\t\/\/ Untried connections will fall back to the node probability. After the\n\t\/\/ call to setAllPairResult below, the node probability will be equal to\n\t\/\/ the probability of the tried channels except that the a priori\n\t\/\/ probability is mixed in too. This effect is controlled by the\n\t\/\/ aprioriWeight parameter. If that parameter isn't set to an extreme\n\t\/\/ and there are a few known connections, there shouldn't be much of a\n\t\/\/ difference. The largest difference occurs when aprioriWeight is 1. In\n\t\/\/ that case, a node-level failure would not be applied to untried\n\t\/\/ channels.\n\tif i.nodeFailure != nil {\n\t\tlog.Debugf(\"Reporting node failure to Mission Control: \"+\n\t\t\t\"node=%v\", *i.nodeFailure)\n\n\t\tm.state.setAllFail(*i.nodeFailure, result.timeReply)\n\t}\n\n\tfor pair, pairResult := range i.pairResults {\n\t\tpairResult := pairResult\n\n\t\tif pairResult.success {\n\t\t\tlog.Debugf(\"Reporting pair success to Mission \"+\n\t\t\t\t\"Control: pair=%v, amt=%v\",\n\t\t\t\tpair, pairResult.amt)\n\t\t} else {\n\t\t\tlog.Debugf(\"Reporting pair failure to Mission \"+\n\t\t\t\t\"Control: pair=%v, amt=%v\",\n\t\t\t\tpair, pairResult.amt)\n\t\t}\n\n\t\tm.state.setLastPairResult(\n\t\t\tpair.From, pair.To, result.timeReply, &pairResult,\n\t\t)\n\t}\n\n\treturn i.finalFailureReason\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ p_s - library routines for pstop.\n\/\/\n\/\/ This file contains the library routines for managing the\n\/\/ table_io_waits_by_table table.\npackage table_io_waits_summary_by_table\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/sjmudd\/pstop\/lib\"\n\t\"github.com\/sjmudd\/pstop\/p_s\"\n)\n\n\/\/ a table of rows\ntype Table_io_waits_summary_by_table struct {\n\tp_s.RelativeStats\n\tp_s.InitialTime\n\twant_latency bool\n\tinitial table_io_waits_summary_by_table_rows \/\/ initial data for relative values\n\tcurrent table_io_waits_summary_by_table_rows \/\/ last loaded values\n\tresults table_io_waits_summary_by_table_rows \/\/ results (maybe with subtraction)\n\ttotals table_io_waits_summary_by_table_row \/\/ totals of results\n}\n\nfunc (t *Table_io_waits_summary_by_table) SetWantsLatency(want_latency bool) {\n\tt.want_latency = want_latency\n}\n\nfunc (t Table_io_waits_summary_by_table) WantsLatency() bool {\n\treturn t.want_latency\n}\n\n\/\/ Collect() collects data from the db, updating initial\n\/\/ values if needed, and then subtracting initial values if we want\n\/\/ relative values, after which it stores totals.\nfunc (t *Table_io_waits_summary_by_table) Collect(dbh *sql.DB) {\n\tstart := time.Now()\n\t\/\/ lib.Logger.Println(\"Table_io_waits_summary_by_table.Collect() BEGIN\")\n\tt.current = select_tiwsbt_rows(dbh)\n\tlib.Logger.Println(\"t.current collected\", len(t.current), \"row(s) from SELECT\")\n\n\tif len(t.initial) == 0 && len(t.current) > 0 {\n\t\tlib.Logger.Println(\"t.initial: copying from t.current (initial setup)\")\n\t\tt.initial = make(table_io_waits_summary_by_table_rows, len(t.current))\n\t\tcopy(t.initial, t.current)\n\t}\n\n\t\/\/ check for reload initial characteristics\n\tif t.initial.needs_refresh(t.current) {\n\t\tlib.Logger.Println(\"t.initial: copying from t.current (data needs refreshing)\")\n\t\tt.initial = make(table_io_waits_summary_by_table_rows, len(t.current))\n\t\tcopy(t.initial, t.current)\n\t}\n\n\tt.make_results()\n\n\t\/\/ lib.Logger.Println( \"t.initial:\", t.initial )\n\t\/\/ lib.Logger.Println( \"t.current:\", t.current )\n\tlib.Logger.Println(\"t.initial.totals():\", t.initial.totals())\n\tlib.Logger.Println(\"t.current.totals():\", t.current.totals())\n\t\/\/ lib.Logger.Println(\"t.results:\", t.results)\n\t\/\/ lib.Logger.Println(\"t.totals:\", t.totals)\n\tlib.Logger.Println(\"Table_io_waits_summary_by_table.Collect() END, took:\", time.Duration(time.Since(start)).String())\n}\n\nfunc (t *Table_io_waits_summary_by_table) make_results() {\n\t\/\/ lib.Logger.Println( \"- t.results set from t.current\" )\n\tt.results = make(table_io_waits_summary_by_table_rows, len(t.current))\n\tcopy(t.results, t.current)\n\tif t.WantRelativeStats() {\n\t\t\/\/ lib.Logger.Println( \"- subtracting t.initial from t.results as WantRelativeStats()\" )\n\t\tt.results.subtract(t.initial)\n\t}\n\n\t\/\/ lib.Logger.Println( \"- sorting t.results\" )\n\tt.results.Sort(t.want_latency)\n\t\/\/ lib.Logger.Println( \"- collecting t.totals from t.results\" )\n\tt.totals = t.results.totals()\n}\n\n\/\/ reset the statistics to current values\nfunc (t *Table_io_waits_summary_by_table) SyncReferenceValues() {\n\t\/\/ lib.Logger.Println( \"Table_io_waits_summary_by_table.SyncReferenceValues() BEGIN\" )\n\n\tt.initial = make(table_io_waits_summary_by_table_rows, len(t.current))\n\tcopy(t.initial, t.current)\n\n\tt.make_results()\n\n\t\/\/ lib.Logger.Println( \"Table_io_waits_summary_by_table.SyncReferenceValues() END\" )\n}\n\nfunc (t *Table_io_waits_summary_by_table) Headings() string {\n\tif t.want_latency {\n\t\treturn t.latencyHeadings()\n\t} else {\n\t\treturn t.opsHeadings()\n\t}\n}\n\nfunc (t Table_io_waits_summary_by_table) RowContent(max_rows int) []string {\n\tif t.want_latency {\n\t\treturn t.latencyRowContent(max_rows)\n\t} else {\n\t\treturn t.opsRowContent(max_rows)\n\t}\n}\n\nfunc (t Table_io_waits_summary_by_table) EmptyRowContent() string {\n\tif t.want_latency {\n\t\treturn t.emptyLatencyRowContent()\n\t} else {\n\t\treturn t.emptyOpsRowContent()\n\t}\n}\n\nfunc (t Table_io_waits_summary_by_table) TotalRowContent() string {\n\tif t.want_latency {\n\t\treturn t.totalLatencyRowContent()\n\t} else {\n\t\treturn t.totalOpsRowContent()\n\t}\n}\n\nfunc (t Table_io_waits_summary_by_table) Description() string {\n\tif t.want_latency {\n\t\treturn t.latencyDescription()\n\t} else {\n\t\treturn t.opsDescription()\n\t}\n}\n\nfunc (t *Table_io_waits_summary_by_table) latencyHeadings() string {\n\tvar r table_io_waits_summary_by_table_row\n\n\treturn r.latency_headings()\n}\n\nfunc (t *Table_io_waits_summary_by_table) opsHeadings() string {\n\tvar r table_io_waits_summary_by_table_row\n\n\treturn r.ops_headings()\n}\n\nfunc (t Table_io_waits_summary_by_table) opsRowContent(max_rows int) []string {\n\trows := make([]string, 0, max_rows)\n\n\tfor i := range t.results {\n\t\tif i < max_rows {\n\t\t\trows = append(rows, t.results[i].ops_row_content(t.totals))\n\t\t}\n\t}\n\n\treturn rows\n}\n\nfunc (t Table_io_waits_summary_by_table) latencyRowContent(max_rows int) []string {\n\trows := make([]string, 0, max_rows)\n\n\tfor i := range t.results {\n\t\tif i < max_rows {\n\t\t\trows = append(rows, t.results[i].latency_row_content(t.totals))\n\t\t}\n\t}\n\n\treturn rows\n}\n\nfunc (t Table_io_waits_summary_by_table) emptyOpsRowContent() string {\n\tvar r table_io_waits_summary_by_table_row\n\n\treturn r.ops_row_content(r)\n}\n\nfunc (t Table_io_waits_summary_by_table) emptyLatencyRowContent() string {\n\tvar r table_io_waits_summary_by_table_row\n\n\treturn r.latency_row_content(r)\n}\n\nfunc (t Table_io_waits_summary_by_table) totalOpsRowContent() string {\n\treturn t.totals.ops_row_content(t.totals)\n}\n\nfunc (t Table_io_waits_summary_by_table) totalLatencyRowContent() string {\n\treturn t.totals.latency_row_content(t.totals)\n}\n\nfunc (t Table_io_waits_summary_by_table) latencyDescription() string {\n\tcount := t.count_rows()\n\treturn fmt.Sprintf(\"Latency by Table Name (table_io_waits_summary_by_table) %d rows\", count)\n}\n\nfunc (t Table_io_waits_summary_by_table) opsDescription() string {\n\tcount := t.count_rows()\n\treturn fmt.Sprintf(\"Operations by Table Name (table_io_waits_summary_by_table) %d rows\", count)\n}\n\nfunc (t Table_io_waits_summary_by_table) count_rows() int {\n\tvar count int\n\tfor row := range t.results {\n\t\tif t.results[row].SUM_TIMER_WAIT > 0 {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}\n<commit_msg>Bugfix: resetting values did not reset the timestamp for table_io_waits_summary_by_table<commit_after>\/\/ p_s - library routines for pstop.\n\/\/\n\/\/ This file contains the library routines for managing the\n\/\/ table_io_waits_by_table table.\npackage table_io_waits_summary_by_table\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/sjmudd\/pstop\/lib\"\n\t\"github.com\/sjmudd\/pstop\/p_s\"\n)\n\n\/\/ a table of rows\ntype Table_io_waits_summary_by_table struct {\n\tp_s.RelativeStats\n\tp_s.InitialTime\n\twant_latency bool\n\tinitial table_io_waits_summary_by_table_rows \/\/ initial data for relative values\n\tcurrent table_io_waits_summary_by_table_rows \/\/ last loaded values\n\tresults table_io_waits_summary_by_table_rows \/\/ results (maybe with subtraction)\n\ttotals table_io_waits_summary_by_table_row \/\/ totals of results\n}\n\nfunc (t *Table_io_waits_summary_by_table) SetWantsLatency(want_latency bool) {\n\tt.want_latency = want_latency\n}\n\nfunc (t Table_io_waits_summary_by_table) WantsLatency() bool {\n\treturn t.want_latency\n}\n\n\/\/ Collect() collects data from the db, updating initial\n\/\/ values if needed, and then subtracting initial values if we want\n\/\/ relative values, after which it stores totals.\nfunc (t *Table_io_waits_summary_by_table) Collect(dbh *sql.DB) {\n\tstart := time.Now()\n\t\/\/ lib.Logger.Println(\"Table_io_waits_summary_by_table.Collect() BEGIN\")\n\tt.current = select_tiwsbt_rows(dbh)\n\tlib.Logger.Println(\"t.current collected\", len(t.current), \"row(s) from SELECT\")\n\n\tif len(t.initial) == 0 && len(t.current) > 0 {\n\t\tlib.Logger.Println(\"t.initial: copying from t.current (initial setup)\")\n\t\tt.initial = make(table_io_waits_summary_by_table_rows, len(t.current))\n\t\tcopy(t.initial, t.current)\n\t}\n\n\t\/\/ check for reload initial characteristics\n\tif t.initial.needs_refresh(t.current) {\n\t\tlib.Logger.Println(\"t.initial: copying from t.current (data needs refreshing)\")\n\t\tt.initial = make(table_io_waits_summary_by_table_rows, len(t.current))\n\t\tcopy(t.initial, t.current)\n\t}\n\n\tt.make_results()\n\n\t\/\/ lib.Logger.Println( \"t.initial:\", t.initial )\n\t\/\/ lib.Logger.Println( \"t.current:\", t.current )\n\tlib.Logger.Println(\"t.initial.totals():\", t.initial.totals())\n\tlib.Logger.Println(\"t.current.totals():\", t.current.totals())\n\t\/\/ lib.Logger.Println(\"t.results:\", t.results)\n\t\/\/ lib.Logger.Println(\"t.totals:\", t.totals)\n\tlib.Logger.Println(\"Table_io_waits_summary_by_table.Collect() END, took:\", time.Duration(time.Since(start)).String())\n}\n\nfunc (t *Table_io_waits_summary_by_table) make_results() {\n\t\/\/ lib.Logger.Println( \"- t.results set from t.current\" )\n\tt.results = make(table_io_waits_summary_by_table_rows, len(t.current))\n\tcopy(t.results, t.current)\n\tif t.WantRelativeStats() {\n\t\t\/\/ lib.Logger.Println( \"- subtracting t.initial from t.results as WantRelativeStats()\" )\n\t\tt.results.subtract(t.initial)\n\t}\n\n\t\/\/ lib.Logger.Println( \"- sorting t.results\" )\n\tt.results.Sort(t.want_latency)\n\t\/\/ lib.Logger.Println( \"- collecting t.totals from t.results\" )\n\tt.totals = t.results.totals()\n}\n\n\/\/ reset the statistics to current values\nfunc (t *Table_io_waits_summary_by_table) SyncReferenceValues() {\n\t\/\/ lib.Logger.Println( \"Table_io_waits_summary_by_table.SyncReferenceValues() BEGIN\" )\n\n\tt.SetNow()\n\tt.initial = make(table_io_waits_summary_by_table_rows, len(t.current))\n\tcopy(t.initial, t.current)\n\n\tt.make_results()\n\n\t\/\/ lib.Logger.Println( \"Table_io_waits_summary_by_table.SyncReferenceValues() END\" )\n}\n\nfunc (t *Table_io_waits_summary_by_table) Headings() string {\n\tif t.want_latency {\n\t\treturn t.latencyHeadings()\n\t} else {\n\t\treturn t.opsHeadings()\n\t}\n}\n\nfunc (t Table_io_waits_summary_by_table) RowContent(max_rows int) []string {\n\tif t.want_latency {\n\t\treturn t.latencyRowContent(max_rows)\n\t} else {\n\t\treturn t.opsRowContent(max_rows)\n\t}\n}\n\nfunc (t Table_io_waits_summary_by_table) EmptyRowContent() string {\n\tif t.want_latency {\n\t\treturn t.emptyLatencyRowContent()\n\t} else {\n\t\treturn t.emptyOpsRowContent()\n\t}\n}\n\nfunc (t Table_io_waits_summary_by_table) TotalRowContent() string {\n\tif t.want_latency {\n\t\treturn t.totalLatencyRowContent()\n\t} else {\n\t\treturn t.totalOpsRowContent()\n\t}\n}\n\nfunc (t Table_io_waits_summary_by_table) Description() string {\n\tif t.want_latency {\n\t\treturn t.latencyDescription()\n\t} else {\n\t\treturn t.opsDescription()\n\t}\n}\n\nfunc (t *Table_io_waits_summary_by_table) latencyHeadings() string {\n\tvar r table_io_waits_summary_by_table_row\n\n\treturn r.latency_headings()\n}\n\nfunc (t *Table_io_waits_summary_by_table) opsHeadings() string {\n\tvar r table_io_waits_summary_by_table_row\n\n\treturn r.ops_headings()\n}\n\nfunc (t Table_io_waits_summary_by_table) opsRowContent(max_rows int) []string {\n\trows := make([]string, 0, max_rows)\n\n\tfor i := range t.results {\n\t\tif i < max_rows {\n\t\t\trows = append(rows, t.results[i].ops_row_content(t.totals))\n\t\t}\n\t}\n\n\treturn rows\n}\n\nfunc (t Table_io_waits_summary_by_table) latencyRowContent(max_rows int) []string {\n\trows := make([]string, 0, max_rows)\n\n\tfor i := range t.results {\n\t\tif i < max_rows {\n\t\t\trows = append(rows, t.results[i].latency_row_content(t.totals))\n\t\t}\n\t}\n\n\treturn rows\n}\n\nfunc (t Table_io_waits_summary_by_table) emptyOpsRowContent() string {\n\tvar r table_io_waits_summary_by_table_row\n\n\treturn r.ops_row_content(r)\n}\n\nfunc (t Table_io_waits_summary_by_table) emptyLatencyRowContent() string {\n\tvar r table_io_waits_summary_by_table_row\n\n\treturn r.latency_row_content(r)\n}\n\nfunc (t Table_io_waits_summary_by_table) totalOpsRowContent() string {\n\treturn t.totals.ops_row_content(t.totals)\n}\n\nfunc (t Table_io_waits_summary_by_table) totalLatencyRowContent() string {\n\treturn t.totals.latency_row_content(t.totals)\n}\n\nfunc (t Table_io_waits_summary_by_table) latencyDescription() string {\n\tcount := t.count_rows()\n\treturn fmt.Sprintf(\"Latency by Table Name (table_io_waits_summary_by_table) %d rows\", count)\n}\n\nfunc (t Table_io_waits_summary_by_table) opsDescription() string {\n\tcount := t.count_rows()\n\treturn fmt.Sprintf(\"Operations by Table Name (table_io_waits_summary_by_table) %d rows\", count)\n}\n\nfunc (t Table_io_waits_summary_by_table) count_rows() int {\n\tvar count int\n\tfor row := range t.results {\n\t\tif t.results[row].SUM_TIMER_WAIT > 0 {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Cutehacks AS. All rights reserved.\n\/\/ License can be found in the LICENSE file.\n\npackage commands\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/codes\"\n\t\"qpm.io\/common\"\n\tmsg \"qpm.io\/common\/messages\"\n\t\"qpm.io\/qpm\/core\"\n)\n\ntype PublishCommand struct {\n\tBaseCommand\n\tPackageName string\n}\n\nfunc NewPublishCommand(ctx core.Context) *PublishCommand {\n\treturn &PublishCommand{\n\t\tBaseCommand: BaseCommand{\n\t\t\tCtx: ctx,\n\t\t},\n\t}\n}\n\nfunc (p PublishCommand) Description() string {\n\treturn \"Publishes a new module\"\n}\n\nfunc (p *PublishCommand) RegisterFlags(flags *flag.FlagSet) {\n\n}\n\nfunc get(name string, echoOff bool) string {\n\tvar val string\n\tfor {\n\t\tif echoOff {\n\t\t\tval = <-PromptPassword(name + \":\")\n\t\t} else {\n\t\t\tval = <-Prompt(name+\":\", \"\")\n\t\t}\n\t\tif val == \"\" {\n\t\t\tfmt.Printf(\"ERROR: Must enter a %s\\n\", name)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn val\n}\n\nfunc LoginPrompt(ctx context.Context, client msg.QpmClient) (string, error) {\n\n\temail := get(\"email\", false)\n\tpassword := get(\"password\", true)\n\n\tloginRequest := &msg.LoginRequest{\n\t\tEmail: email,\n\t\tPassword: password,\n\t\tCreate: false,\n\t}\n\n\tloginResp, err := client.Login(context.Background(), loginRequest)\n\n\tif err != nil {\n\t\tif grpc.Code(err) == codes.NotFound {\n\t\t\tfmt.Println(\"User not found. Confirm password to create a new user.\")\n\t\t\tconfirm := get(\"password\", true)\n\t\t\tif password != confirm {\n\t\t\t\treturn \"\", fmt.Errorf(\"Passwords do not match.\")\n\t\t\t}\n\n\t\t\tloginRequest.Create = true\n\t\t\tif loginResp, err = client.Login(context.Background(), loginRequest); err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t} else {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn loginResp.Token, nil\n}\n\nfunc (p *PublishCommand) Run() error {\n\n\ttoken, err := LoginPrompt(context.Background(), p.Ctx.Client)\n\n\tif err != nil {\n\t\tfmt.Printf(\"ERROR: %v\\n\", err)\n\t\treturn err\n\t}\n\n\tfmt.Println(\"Publishing\")\n\twrapper, err := common.LoadPackage(\"\")\n\n\tif err != nil {\n\t\tp.Fatal(\"Cannot read \" + core.PackageFile + \": \" + err.Error())\n\t}\n\n\t_, err = p.Ctx.Client.Publish(context.Background(), &msg.PublishRequest{\n\t\tToken: token,\n\t\tPackageDescription: wrapper.Package,\n\t})\n\n\tif err != nil {\n\t\tp.Fatal(\"ERROR:\" + err.Error())\n\t}\n\n\treturn nil\n}\n<commit_msg>Check packages before publishing them.<commit_after>\/\/ Copyright 2015 Cutehacks AS. All rights reserved.\n\/\/ License can be found in the LICENSE file.\n\npackage commands\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/codes\"\n\t\"qpm.io\/common\"\n\tmsg \"qpm.io\/common\/messages\"\n\t\"qpm.io\/qpm\/core\"\n)\n\ntype PublishCommand struct {\n\tBaseCommand\n\tPackageName string\n}\n\nfunc NewPublishCommand(ctx core.Context) *PublishCommand {\n\treturn &PublishCommand{\n\t\tBaseCommand: BaseCommand{\n\t\t\tCtx: ctx,\n\t\t},\n\t}\n}\n\nfunc (p PublishCommand) Description() string {\n\treturn \"Publishes a new module\"\n}\n\nfunc (p *PublishCommand) RegisterFlags(flags *flag.FlagSet) {\n\n}\n\nfunc get(name string, echoOff bool) string {\n\tvar val string\n\tfor {\n\t\tif echoOff {\n\t\t\tval = <-PromptPassword(name + \":\")\n\t\t} else {\n\t\t\tval = <-Prompt(name+\":\", \"\")\n\t\t}\n\t\tif val == \"\" {\n\t\t\tfmt.Printf(\"ERROR: Must enter a %s\\n\", name)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn val\n}\n\nfunc LoginPrompt(ctx context.Context, client msg.QpmClient) (string, error) {\n\n\temail := get(\"email\", false)\n\tpassword := get(\"password\", true)\n\n\tloginRequest := &msg.LoginRequest{\n\t\tEmail: email,\n\t\tPassword: password,\n\t\tCreate: false,\n\t}\n\n\tloginResp, err := client.Login(context.Background(), loginRequest)\n\n\tif err != nil {\n\t\tif grpc.Code(err) == codes.NotFound {\n\t\t\tfmt.Println(\"User not found. Confirm password to create a new user.\")\n\t\t\tconfirm := get(\"password\", true)\n\t\t\tif password != confirm {\n\t\t\t\treturn \"\", fmt.Errorf(\"Passwords do not match.\")\n\t\t\t}\n\n\t\t\tloginRequest.Create = true\n\t\t\tif loginResp, err = client.Login(context.Background(), loginRequest); err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t} else {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn loginResp.Token, nil\n}\n\nfunc (p *PublishCommand) Run() error {\n\n\ttoken, err := LoginPrompt(context.Background(), p.Ctx.Client)\n\n\tif err != nil {\n\t\tfmt.Printf(\"ERROR: %v\\n\", err)\n\t\treturn err\n\t}\n\n\tfmt.Println(\"Running check\")\n\tif err := NewCheckCommand(p.Ctx).Run(); err != nil {\n\t\tp.Fatal(err.Error())\n\t}\n\n\tfmt.Println(\"Publishing\")\n\twrapper, err := common.LoadPackage(\"\")\n\n\tif err != nil {\n\t\tp.Fatal(\"Cannot read \" + core.PackageFile + \": \" + err.Error())\n\t}\n\n\t_, err = p.Ctx.Client.Publish(context.Background(), &msg.PublishRequest{\n\t\tToken: token,\n\t\tPackageDescription: wrapper.Package,\n\t})\n\n\tif err != nil {\n\t\tp.Fatal(\"ERROR:\" + err.Error())\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/go:build windows\n\/\/ +build windows\n\npackage runtime_test\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"internal\/testenv\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\t\"testing\"\n)\n\nfunc TestVectoredHandlerDontCrashOnLibrary(t *testing.T) {\n\tif *flagQuick {\n\t\tt.Skip(\"-quick\")\n\t}\n\tif runtime.GOARCH != \"amd64\" {\n\t\tt.Skip(\"this test can only run on windows\/amd64\")\n\t}\n\ttestenv.MustHaveGoBuild(t)\n\ttestenv.MustHaveExecPath(t, \"gcc\")\n\ttestprog.Lock()\n\tdefer testprog.Unlock()\n\tdir := t.TempDir()\n\n\t\/\/ build go dll\n\tdll := filepath.Join(dir, \"testwinlib.dll\")\n\tcmd := exec.Command(testenv.GoToolPath(t), \"build\", \"-o\", dll, \"--buildmode\", \"c-shared\", \"testdata\/testwinlib\/main.go\")\n\tout, err := testenv.CleanCmdEnv(cmd).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to build go library: %s\\n%s\", err, out)\n\t}\n\n\t\/\/ build c program\n\texe := filepath.Join(dir, \"test.exe\")\n\tcmd = exec.Command(\"gcc\", \"-L\"+dir, \"-I\"+dir, \"-ltestwinlib\", \"-o\", exe, \"testdata\/testwinlib\/main.c\")\n\tout, err = testenv.CleanCmdEnv(cmd).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to build c exe: %s\\n%s\", err, out)\n\t}\n\n\t\/\/ run test program\n\tcmd = exec.Command(exe)\n\tout, err = testenv.CleanCmdEnv(cmd).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"failure while running executable: %s\\n%s\", err, out)\n\t}\n\texpectedOutput := \"exceptionCount: 1\\ncontinueCount: 1\\n\"\n\t\/\/ cleaning output\n\tcleanedOut := strings.ReplaceAll(string(out), \"\\r\\n\", \"\\n\")\n\tif cleanedOut != expectedOutput {\n\t\tt.Errorf(\"expected output %q, got %q\", expectedOutput, cleanedOut)\n\t}\n}\n\nfunc sendCtrlBreak(pid int) error {\n\tkernel32, err := syscall.LoadDLL(\"kernel32.dll\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"LoadDLL: %v\\n\", err)\n\t}\n\tgenerateEvent, err := kernel32.FindProc(\"GenerateConsoleCtrlEvent\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"FindProc: %v\\n\", err)\n\t}\n\tresult, _, err := generateEvent.Call(syscall.CTRL_BREAK_EVENT, uintptr(pid))\n\tif result == 0 {\n\t\treturn fmt.Errorf(\"GenerateConsoleCtrlEvent: %v\\n\", err)\n\t}\n\treturn nil\n}\n\n\/\/ TestCtrlHandler tests that Go can gracefully handle closing the console window.\n\/\/ See https:\/\/golang.org\/issues\/41884.\nfunc TestCtrlHandler(t *testing.T) {\n\ttestenv.MustHaveGoBuild(t)\n\tt.Parallel()\n\n\t\/\/ build go program\n\texe := filepath.Join(t.TempDir(), \"test.exe\")\n\tcmd := exec.Command(testenv.GoToolPath(t), \"build\", \"-o\", exe, \"testdata\/testwinsignal\/main.go\")\n\tout, err := testenv.CleanCmdEnv(cmd).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to build go exe: %v\\n%s\", err, out)\n\t}\n\n\t\/\/ run test program\n\tcmd = exec.Command(exe)\n\tvar stderr bytes.Buffer\n\tcmd.Stderr = &stderr\n\toutPipe, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create stdout pipe: %v\", err)\n\t}\n\toutReader := bufio.NewReader(outPipe)\n\n\t\/\/ in a new command window\n\tconst _CREATE_NEW_CONSOLE = 0x00000010\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tCreationFlags: _CREATE_NEW_CONSOLE,\n\t\tHideWindow: true,\n\t}\n\tif err := cmd.Start(); err != nil {\n\t\tt.Fatalf(\"Start failed: %v\", err)\n\t}\n\tdefer func() {\n\t\tcmd.Process.Kill()\n\t\tcmd.Wait()\n\t}()\n\n\t\/\/ wait for child to be ready to receive signals\n\tif line, err := outReader.ReadString('\\n'); err != nil {\n\t\tt.Fatalf(\"could not read stdout: %v\", err)\n\t} else if strings.TrimSpace(line) != \"ready\" {\n\t\tt.Fatalf(\"unexpected message: %s\", line)\n\t}\n\n\t\/\/ gracefully kill pid, this closes the command window\n\tif err := exec.Command(\"taskkill.exe\", \"\/pid\", strconv.Itoa(cmd.Process.Pid)).Run(); err != nil {\n\t\tt.Fatalf(\"failed to kill: %v\", err)\n\t}\n\n\t\/\/ check child received, handled SIGTERM\n\tif line, err := outReader.ReadString('\\n'); err != nil {\n\t\tt.Fatalf(\"could not read stdout: %v\", err)\n\t} else if expected, got := syscall.SIGTERM.String(), strings.TrimSpace(line); expected != got {\n\t\tt.Fatalf(\"Expected '%s' got: %s\", expected, got)\n\t}\n\n\t\/\/ check child exited gracefully, did not timeout\n\tif err := cmd.Wait(); err != nil {\n\t\tt.Fatalf(\"Program exited with error: %v\\n%s\", err, &stderr)\n\t}\n}\n\n\/\/ TestLibraryCtrlHandler tests that Go DLL allows calling program to handle console control events.\n\/\/ See https:\/\/golang.org\/issues\/35965.\nfunc TestLibraryCtrlHandler(t *testing.T) {\n\tif *flagQuick {\n\t\tt.Skip(\"-quick\")\n\t}\n\tif runtime.GOARCH != \"amd64\" {\n\t\tt.Skip(\"this test can only run on windows\/amd64\")\n\t}\n\ttestenv.MustHaveGoBuild(t)\n\ttestenv.MustHaveExecPath(t, \"gcc\")\n\ttestprog.Lock()\n\tdefer testprog.Unlock()\n\tdir := t.TempDir()\n\n\t\/\/ build go dll\n\tdll := filepath.Join(dir, \"dummy.dll\")\n\tcmd := exec.Command(testenv.GoToolPath(t), \"build\", \"-o\", dll, \"--buildmode\", \"c-shared\", \"testdata\/testwinlibsignal\/dummy.go\")\n\tout, err := testenv.CleanCmdEnv(cmd).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to build go library: %s\\n%s\", err, out)\n\t}\n\n\t\/\/ build c program\n\texe := filepath.Join(dir, \"test.exe\")\n\tcmd = exec.Command(\"gcc\", \"-o\", exe, \"testdata\/testwinlibsignal\/main.c\")\n\tout, err = testenv.CleanCmdEnv(cmd).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to build c exe: %s\\n%s\", err, out)\n\t}\n\n\t\/\/ run test program\n\tcmd = exec.Command(exe)\n\tvar stderr bytes.Buffer\n\tcmd.Stderr = &stderr\n\toutPipe, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create stdout pipe: %v\", err)\n\t}\n\toutReader := bufio.NewReader(outPipe)\n\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tCreationFlags: syscall.CREATE_NEW_PROCESS_GROUP,\n\t}\n\tif err := cmd.Start(); err != nil {\n\t\tt.Fatalf(\"Start failed: %v\", err)\n\t}\n\n\terrCh := make(chan error, 1)\n\tgo func() {\n\t\tif line, err := outReader.ReadString('\\n'); err != nil {\n\t\t\terrCh <- fmt.Errorf(\"could not read stdout: %v\", err)\n\t\t} else if strings.TrimSpace(line) != \"ready\" {\n\t\t\terrCh <- fmt.Errorf(\"unexpected message: %v\", line)\n\t\t} else {\n\t\t\terrCh <- sendCtrlBreak(cmd.Process.Pid)\n\t\t}\n\t}()\n\n\tif err := <-errCh; err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := cmd.Wait(); err != nil {\n\t\tt.Fatalf(\"Program exited with error: %v\\n%s\", err, &stderr)\n\t}\n}\n<commit_msg>runtime: replace --buildmode with -buildmode in tests<commit_after>\/\/go:build windows\n\/\/ +build windows\n\npackage runtime_test\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"internal\/testenv\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\t\"testing\"\n)\n\nfunc TestVectoredHandlerDontCrashOnLibrary(t *testing.T) {\n\tif *flagQuick {\n\t\tt.Skip(\"-quick\")\n\t}\n\tif runtime.GOARCH != \"amd64\" {\n\t\tt.Skip(\"this test can only run on windows\/amd64\")\n\t}\n\ttestenv.MustHaveGoBuild(t)\n\ttestenv.MustHaveExecPath(t, \"gcc\")\n\ttestprog.Lock()\n\tdefer testprog.Unlock()\n\tdir := t.TempDir()\n\n\t\/\/ build go dll\n\tdll := filepath.Join(dir, \"testwinlib.dll\")\n\tcmd := exec.Command(testenv.GoToolPath(t), \"build\", \"-o\", dll, \"-buildmode\", \"c-shared\", \"testdata\/testwinlib\/main.go\")\n\tout, err := testenv.CleanCmdEnv(cmd).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to build go library: %s\\n%s\", err, out)\n\t}\n\n\t\/\/ build c program\n\texe := filepath.Join(dir, \"test.exe\")\n\tcmd = exec.Command(\"gcc\", \"-L\"+dir, \"-I\"+dir, \"-ltestwinlib\", \"-o\", exe, \"testdata\/testwinlib\/main.c\")\n\tout, err = testenv.CleanCmdEnv(cmd).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to build c exe: %s\\n%s\", err, out)\n\t}\n\n\t\/\/ run test program\n\tcmd = exec.Command(exe)\n\tout, err = testenv.CleanCmdEnv(cmd).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"failure while running executable: %s\\n%s\", err, out)\n\t}\n\texpectedOutput := \"exceptionCount: 1\\ncontinueCount: 1\\n\"\n\t\/\/ cleaning output\n\tcleanedOut := strings.ReplaceAll(string(out), \"\\r\\n\", \"\\n\")\n\tif cleanedOut != expectedOutput {\n\t\tt.Errorf(\"expected output %q, got %q\", expectedOutput, cleanedOut)\n\t}\n}\n\nfunc sendCtrlBreak(pid int) error {\n\tkernel32, err := syscall.LoadDLL(\"kernel32.dll\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"LoadDLL: %v\\n\", err)\n\t}\n\tgenerateEvent, err := kernel32.FindProc(\"GenerateConsoleCtrlEvent\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"FindProc: %v\\n\", err)\n\t}\n\tresult, _, err := generateEvent.Call(syscall.CTRL_BREAK_EVENT, uintptr(pid))\n\tif result == 0 {\n\t\treturn fmt.Errorf(\"GenerateConsoleCtrlEvent: %v\\n\", err)\n\t}\n\treturn nil\n}\n\n\/\/ TestCtrlHandler tests that Go can gracefully handle closing the console window.\n\/\/ See https:\/\/golang.org\/issues\/41884.\nfunc TestCtrlHandler(t *testing.T) {\n\ttestenv.MustHaveGoBuild(t)\n\tt.Parallel()\n\n\t\/\/ build go program\n\texe := filepath.Join(t.TempDir(), \"test.exe\")\n\tcmd := exec.Command(testenv.GoToolPath(t), \"build\", \"-o\", exe, \"testdata\/testwinsignal\/main.go\")\n\tout, err := testenv.CleanCmdEnv(cmd).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to build go exe: %v\\n%s\", err, out)\n\t}\n\n\t\/\/ run test program\n\tcmd = exec.Command(exe)\n\tvar stderr bytes.Buffer\n\tcmd.Stderr = &stderr\n\toutPipe, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create stdout pipe: %v\", err)\n\t}\n\toutReader := bufio.NewReader(outPipe)\n\n\t\/\/ in a new command window\n\tconst _CREATE_NEW_CONSOLE = 0x00000010\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tCreationFlags: _CREATE_NEW_CONSOLE,\n\t\tHideWindow: true,\n\t}\n\tif err := cmd.Start(); err != nil {\n\t\tt.Fatalf(\"Start failed: %v\", err)\n\t}\n\tdefer func() {\n\t\tcmd.Process.Kill()\n\t\tcmd.Wait()\n\t}()\n\n\t\/\/ wait for child to be ready to receive signals\n\tif line, err := outReader.ReadString('\\n'); err != nil {\n\t\tt.Fatalf(\"could not read stdout: %v\", err)\n\t} else if strings.TrimSpace(line) != \"ready\" {\n\t\tt.Fatalf(\"unexpected message: %s\", line)\n\t}\n\n\t\/\/ gracefully kill pid, this closes the command window\n\tif err := exec.Command(\"taskkill.exe\", \"\/pid\", strconv.Itoa(cmd.Process.Pid)).Run(); err != nil {\n\t\tt.Fatalf(\"failed to kill: %v\", err)\n\t}\n\n\t\/\/ check child received, handled SIGTERM\n\tif line, err := outReader.ReadString('\\n'); err != nil {\n\t\tt.Fatalf(\"could not read stdout: %v\", err)\n\t} else if expected, got := syscall.SIGTERM.String(), strings.TrimSpace(line); expected != got {\n\t\tt.Fatalf(\"Expected '%s' got: %s\", expected, got)\n\t}\n\n\t\/\/ check child exited gracefully, did not timeout\n\tif err := cmd.Wait(); err != nil {\n\t\tt.Fatalf(\"Program exited with error: %v\\n%s\", err, &stderr)\n\t}\n}\n\n\/\/ TestLibraryCtrlHandler tests that Go DLL allows calling program to handle console control events.\n\/\/ See https:\/\/golang.org\/issues\/35965.\nfunc TestLibraryCtrlHandler(t *testing.T) {\n\tif *flagQuick {\n\t\tt.Skip(\"-quick\")\n\t}\n\tif runtime.GOARCH != \"amd64\" {\n\t\tt.Skip(\"this test can only run on windows\/amd64\")\n\t}\n\ttestenv.MustHaveGoBuild(t)\n\ttestenv.MustHaveExecPath(t, \"gcc\")\n\ttestprog.Lock()\n\tdefer testprog.Unlock()\n\tdir := t.TempDir()\n\n\t\/\/ build go dll\n\tdll := filepath.Join(dir, \"dummy.dll\")\n\tcmd := exec.Command(testenv.GoToolPath(t), \"build\", \"-o\", dll, \"-buildmode\", \"c-shared\", \"testdata\/testwinlibsignal\/dummy.go\")\n\tout, err := testenv.CleanCmdEnv(cmd).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to build go library: %s\\n%s\", err, out)\n\t}\n\n\t\/\/ build c program\n\texe := filepath.Join(dir, \"test.exe\")\n\tcmd = exec.Command(\"gcc\", \"-o\", exe, \"testdata\/testwinlibsignal\/main.c\")\n\tout, err = testenv.CleanCmdEnv(cmd).CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to build c exe: %s\\n%s\", err, out)\n\t}\n\n\t\/\/ run test program\n\tcmd = exec.Command(exe)\n\tvar stderr bytes.Buffer\n\tcmd.Stderr = &stderr\n\toutPipe, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create stdout pipe: %v\", err)\n\t}\n\toutReader := bufio.NewReader(outPipe)\n\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tCreationFlags: syscall.CREATE_NEW_PROCESS_GROUP,\n\t}\n\tif err := cmd.Start(); err != nil {\n\t\tt.Fatalf(\"Start failed: %v\", err)\n\t}\n\n\terrCh := make(chan error, 1)\n\tgo func() {\n\t\tif line, err := outReader.ReadString('\\n'); err != nil {\n\t\t\terrCh <- fmt.Errorf(\"could not read stdout: %v\", err)\n\t\t} else if strings.TrimSpace(line) != \"ready\" {\n\t\t\terrCh <- fmt.Errorf(\"unexpected message: %v\", line)\n\t\t} else {\n\t\t\terrCh <- sendCtrlBreak(cmd.Process.Pid)\n\t\t}\n\t}()\n\n\tif err := <-errCh; err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := cmd.Wait(); err != nil {\n\t\tt.Fatalf(\"Program exited with error: %v\\n%s\", err, &stderr)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package infrastructure\n\nimport (\n\t\"io\/ioutil\"\n\n\t\"gopkg.in\/yaml.v2\"\n)\n\ntype Configuration struct {\n\tPort string `yaml:\"port\"`\n\tClientID string `yaml:\"clientID\"`\n\tClientSecret string `yaml:\"clientSecret\"`\n\tSalt string `yaml:\"salt\"`\n\tHost string `yaml:\"host\"`\n\tScopes []string `yaml:\"scopes,flow\"`\n}\n\nfunc GetConfiguration(path string) (*Configuration, error) {\n\n\tdata, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconf := &Configuration{}\n\n\terr = yaml.Unmarshal(data, conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn conf, nil\n\n}\n<commit_msg>Rename Host to APIHost<commit_after>package infrastructure\n\nimport (\n\t\"io\/ioutil\"\n\n\t\"gopkg.in\/yaml.v2\"\n)\n\n\/\/ Configuration stores the fields to configure the application\ntype Configuration struct {\n\tPort string `yaml:\"port\"`\n\tClientID string `yaml:\"clientID\"`\n\tClientSecret string `yaml:\"clientSecret\"`\n\tSalt string `yaml:\"salt\"`\n\tScopes []string `yaml:\"scopes,flow\"`\n\tAPIHost string\n}\n\n\/\/ GetConfiguration returns the configuration stored in a file\nfunc GetConfiguration(path string) (*Configuration, error) {\n\n\tdata, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconf := &Configuration{}\n\n\terr = yaml.Unmarshal(data, conf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn conf, nil\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2014 Daniel Harrison\n\npackage hfile\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"encoding\/hex\"\n\t\"log\"\n)\n\ntype Scanner struct {\n\treader *Reader\n\tidx int\n\tblock []byte\n\tpos *int\n\tbuf []byte\n\tOrderedOps\n}\n\nfunc NewScanner(r *Reader) *Scanner {\n\tvar buf []byte\n\tif r.compressionCodec > CompressionNone {\n\t\tbuf = make([]byte, int(float64(r.totalUncompressedDataBytes\/uint64(len(r.index)))*1.5))\n\t}\n\treturn &Scanner{r, 0, nil, nil, buf, OrderedOps{nil}}\n}\n\nfunc (s *Scanner) Reset() {\n\ts.idx = 0\n\ts.block = nil\n\ts.pos = nil\n\ts.ResetState()\n}\n\nfunc (s *Scanner) blockFor(key []byte) ([]byte, error, bool) {\n\terr := s.CheckIfKeyOutOfOrder(key)\n\tif err != nil {\n\t\treturn nil, err, false\n\t}\n\n\tif s.reader.index[s.idx].IsAfter(key) {\n\t\tif s.reader.Debug {\n\t\t\tlog.Printf(\"[Scanner.blockFor] curBlock after key %s (cur: %d, start: %s)\\n\",\n\t\t\t\thex.EncodeToString(key),\n\t\t\t\ts.idx,\n\t\t\t\thex.EncodeToString(s.reader.index[s.idx].firstKeyBytes),\n\t\t\t)\n\t\t}\n\t\treturn nil, nil, false\n\t}\n\n\tidx := s.reader.FindBlock(s.idx, key)\n\tif s.reader.Debug {\n\t\tlog.Printf(\"[Scanner.blockFor] findBlock (key: %s) picked %d (starts: %s). Cur: %d (starts: %s)\\n\",\n\t\t\thex.EncodeToString(key),\n\t\t\tidx,\n\t\t\thex.EncodeToString(s.reader.index[idx].firstKeyBytes),\n\t\t\ts.idx,\n\t\t\thex.EncodeToString(s.reader.index[s.idx].firstKeyBytes),\n\t\t)\n\t}\n\n\tif idx != s.idx || s.block == nil { \/\/ need to load a new block\n\t\tdata, err := s.reader.GetBlockBuf(idx, s.buf)\n\t\tif err != nil {\n\t\t\tif s.reader.Debug {\n\t\t\t\tlog.Printf(\"[Scanner.blockFor] read err %s (key: %s, idx: %d, start: %s)\\n\",\n\t\t\t\t\terr,\n\t\t\t\t\thex.EncodeToString(key),\n\t\t\t\t\tidx,\n\t\t\t\t\thex.EncodeToString(s.reader.index[idx].firstKeyBytes),\n\t\t\t\t)\n\t\t\t}\n\t\t\treturn nil, err, false\n\t\t}\n\t\ti := 8\n\t\ts.pos = &i\n\t\ts.idx = idx\n\t\ts.block = data\n\t} else {\n\t\tif s.reader.Debug {\n\t\t\tlog.Println(\"[Scanner.blockFor] Re-using current block\")\n\t\t}\n\t}\n\n\treturn s.block, nil, true\n}\n\nfunc (s *Scanner) GetFirst(key []byte) ([]byte, error, bool) {\n\tdata, err, ok := s.blockFor(key)\n\n\tif !ok {\n\t\tif s.reader.Debug {\n\t\t\tlog.Printf(\"[Scanner.GetFirst] No Block for key: %s (err: %s, found: %s)\\n\", hex.EncodeToString(key), err, ok)\n\t\t}\n\t\treturn nil, err, ok\n\t}\n\n\tif s.reader.Debug {\n\t\tlog.Printf(\"[Scanner.GetFirst] Searching Block for key: %s (pos: %d)\\n\", hex.EncodeToString(key), *s.pos)\n\t}\n\tvalue, _, found := s.getValuesFromBuffer(data, s.pos, key, true)\n\tif s.reader.Debug {\n\t\tlog.Printf(\"[Scanner.GetFirst] After pos pos: %d\\n\", *s.pos)\n\t}\n\treturn value, nil, found\n}\n\nfunc (s *Scanner) GetAll(key []byte) ([][]byte, error) {\n\tdata, err, ok := s.blockFor(key)\n\n\tif !ok {\n\t\tif s.reader.Debug {\n\t\t\tlog.Printf(\"[Scanner.GetAll] No Block for key: %s (err: %s, found: %s)\\n\", hex.EncodeToString(key), err, ok)\n\t\t}\n\t\treturn nil, err\n\t}\n\n\t_, found, _ := s.getValuesFromBuffer(data, s.pos, key, false)\n\treturn found, err\n}\n\nfunc (s *Scanner) getValuesFromBuffer(buf []byte, pos *int, key []byte, first bool) ([]byte, [][]byte, bool) {\n\tvar acc [][]byte\n\n\ti := *pos\n\n\tif s.reader.Debug {\n\t\tlog.Printf(\"[Scanner.getValuesFromBuffer] buf before %d \/ %d\\n\", i, len(buf))\n\t}\n\n\tfor len(buf)-i > 8 {\n\t\tkeyLen := int(binary.BigEndian.Uint32(buf[i : i+4]))\n\t\tvalLen := int(binary.BigEndian.Uint32(buf[i+4 : i+8]))\n\n\t\tcmp := bytes.Compare(buf[i+8:i+8+keyLen], key)\n\n\t\tswitch {\n\t\tcase cmp == 0:\n\t\t\ti += 8 + keyLen\n\t\t\tret := make([]byte, valLen)\n\t\t\tcopy(ret, buf[i:i+valLen])\n\n\t\t\tif first {\n\t\t\t\t*pos = i + valLen\n\t\t\t\treturn ret, nil, true\n\t\t\t} else {\n\t\t\t\tacc = append(acc, ret)\n\t\t\t\ti += valLen \/\/ now on next length pair\n\t\t\t}\n\t\tcase cmp > 0:\n\t\t\t*pos = i\n\t\t\treturn nil, acc, len(acc) > 0\n\t\tdefault:\n\t\t\ti += 8 + keyLen + valLen\n\t\t}\n\t}\n\n\t*pos = i\n\n\tif s.reader.Debug {\n\t\tlog.Printf(\"[Scanner.getValuesFromBuffer] walked off block\\n\")\n\t}\n\treturn nil, acc, len(acc) > 0\n}\n\nfunc (s *Scanner) Release() {\n\ts.Reset()\n\tselect {\n\tcase s.reader.scannerCache <- s:\n\tdefault:\n\t}\n}\n<commit_msg>consolidate offset change<commit_after>\/\/ Copyright (C) 2014 Daniel Harrison\n\npackage hfile\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"encoding\/hex\"\n\t\"log\"\n)\n\ntype Scanner struct {\n\treader *Reader\n\tidx int\n\tblock []byte\n\tpos *int\n\tbuf []byte\n\tOrderedOps\n}\n\nfunc NewScanner(r *Reader) *Scanner {\n\tvar buf []byte\n\tif r.compressionCodec > CompressionNone {\n\t\tbuf = make([]byte, int(float64(r.totalUncompressedDataBytes\/uint64(len(r.index)))*1.5))\n\t}\n\treturn &Scanner{r, 0, nil, nil, buf, OrderedOps{nil}}\n}\n\nfunc (s *Scanner) Reset() {\n\ts.idx = 0\n\ts.block = nil\n\ts.pos = nil\n\ts.ResetState()\n}\n\nfunc (s *Scanner) blockFor(key []byte) ([]byte, error, bool) {\n\terr := s.CheckIfKeyOutOfOrder(key)\n\tif err != nil {\n\t\treturn nil, err, false\n\t}\n\n\tif s.reader.index[s.idx].IsAfter(key) {\n\t\tif s.reader.Debug {\n\t\t\tlog.Printf(\"[Scanner.blockFor] curBlock after key %s (cur: %d, start: %s)\\n\",\n\t\t\t\thex.EncodeToString(key),\n\t\t\t\ts.idx,\n\t\t\t\thex.EncodeToString(s.reader.index[s.idx].firstKeyBytes),\n\t\t\t)\n\t\t}\n\t\treturn nil, nil, false\n\t}\n\n\tidx := s.reader.FindBlock(s.idx, key)\n\tif s.reader.Debug {\n\t\tlog.Printf(\"[Scanner.blockFor] findBlock (key: %s) picked %d (starts: %s). Cur: %d (starts: %s)\\n\",\n\t\t\thex.EncodeToString(key),\n\t\t\tidx,\n\t\t\thex.EncodeToString(s.reader.index[idx].firstKeyBytes),\n\t\t\ts.idx,\n\t\t\thex.EncodeToString(s.reader.index[s.idx].firstKeyBytes),\n\t\t)\n\t}\n\n\tif idx != s.idx || s.block == nil { \/\/ need to load a new block\n\t\tdata, err := s.reader.GetBlockBuf(idx, s.buf)\n\t\tif err != nil {\n\t\t\tif s.reader.Debug {\n\t\t\t\tlog.Printf(\"[Scanner.blockFor] read err %s (key: %s, idx: %d, start: %s)\\n\",\n\t\t\t\t\terr,\n\t\t\t\t\thex.EncodeToString(key),\n\t\t\t\t\tidx,\n\t\t\t\t\thex.EncodeToString(s.reader.index[idx].firstKeyBytes),\n\t\t\t\t)\n\t\t\t}\n\t\t\treturn nil, err, false\n\t\t}\n\t\ti := 8\n\t\ts.pos = &i\n\t\ts.idx = idx\n\t\ts.block = data\n\t} else {\n\t\tif s.reader.Debug {\n\t\t\tlog.Println(\"[Scanner.blockFor] Re-using current block\")\n\t\t}\n\t}\n\n\treturn s.block, nil, true\n}\n\nfunc (s *Scanner) GetFirst(key []byte) ([]byte, error, bool) {\n\tdata, err, ok := s.blockFor(key)\n\n\tif !ok {\n\t\tif s.reader.Debug {\n\t\t\tlog.Printf(\"[Scanner.GetFirst] No Block for key: %s (err: %s, found: %s)\\n\", hex.EncodeToString(key), err, ok)\n\t\t}\n\t\treturn nil, err, ok\n\t}\n\n\tif s.reader.Debug {\n\t\tlog.Printf(\"[Scanner.GetFirst] Searching Block for key: %s (pos: %d)\\n\", hex.EncodeToString(key), *s.pos)\n\t}\n\tvalue, _, found := s.getValuesFromBuffer(data, s.pos, key, true)\n\tif s.reader.Debug {\n\t\tlog.Printf(\"[Scanner.GetFirst] After pos pos: %d\\n\", *s.pos)\n\t}\n\treturn value, nil, found\n}\n\nfunc (s *Scanner) GetAll(key []byte) ([][]byte, error) {\n\tdata, err, ok := s.blockFor(key)\n\n\tif !ok {\n\t\tif s.reader.Debug {\n\t\t\tlog.Printf(\"[Scanner.GetAll] No Block for key: %s (err: %s, found: %s)\\n\", hex.EncodeToString(key), err, ok)\n\t\t}\n\t\treturn nil, err\n\t}\n\n\t_, found, _ := s.getValuesFromBuffer(data, s.pos, key, false)\n\treturn found, err\n}\n\nfunc (s *Scanner) getValuesFromBuffer(buf []byte, pos *int, key []byte, first bool) ([]byte, [][]byte, bool) {\n\tvar acc [][]byte\n\n\ti := *pos\n\n\tif s.reader.Debug {\n\t\tlog.Printf(\"[Scanner.getValuesFromBuffer] buf before %d \/ %d\\n\", i, len(buf))\n\t}\n\n\tfor len(buf)-i > 8 {\n\t\tkeyLen := int(binary.BigEndian.Uint32(buf[i : i+4]))\n\t\tvalLen := int(binary.BigEndian.Uint32(buf[i+4 : i+8]))\n\n\t\tcmp := bytes.Compare(buf[i+8:i+8+keyLen], key)\n\n\t\tswitch {\n\t\tcase cmp == 0:\n\t\t\ti += 8 + keyLen\n\n\t\t\tret := make([]byte, valLen)\n\t\t\tcopy(ret, buf[i:i+valLen])\n\n\t\t\ti += valLen \/\/ now on next length pair\n\n\t\t\tif first {\n\t\t\t\t*pos = i\n\t\t\t\treturn ret, nil, true\n\t\t\t}\n\t\t\tacc = append(acc, ret)\n\t\tcase cmp > 0:\n\t\t\t*pos = i\n\t\t\treturn nil, acc, len(acc) > 0\n\t\tdefault:\n\t\t\ti += 8 + keyLen + valLen\n\t\t}\n\t}\n\n\t*pos = i\n\n\tif s.reader.Debug {\n\t\tlog.Printf(\"[Scanner.getValuesFromBuffer] walked off block\\n\")\n\t}\n\treturn nil, acc, len(acc) > 0\n}\n\nfunc (s *Scanner) Release() {\n\ts.Reset()\n\tselect {\n\tcase s.reader.scannerCache <- s:\n\tdefault:\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package wuxia\n\nfunc entryScript() string {\n\treturn `\nvar System=sys();\nvar Tpl={};\nTpl.funcs={};\nTpl.getTplFuncs=function(){\n\tvar rst=[];\n\tfor (var prop in Tpl.funcs){\n\t\tif (Tpl.funcs.hasOwnProperty(prop)){\n\t\t\trst.push(prop);\n\t\t}\n\t}\n\treturn rst;\n}\n`\n}\n<commit_msg>Update entryScript<commit_after>package wuxia\n\nfunc entryScript() string {\n\treturn `\nvar System=sys();\nvar Tpl={};\nTpl.funcs={};\nTpl.getTplFuncs=function(){\n\tvar rst=[];\n\tfor (var prop in Tpl.funcs){\n\t\tif (Tpl.funcs.hasOwnProperty(prop)){\n\t\t\trst.push(prop);\n\t\t}\n\t}\n\treturn rst;\n}\n\nfunction getCurrentSys(){\n\treturn System;\n}\n`\n}\n<|endoftext|>"} {"text":"<commit_before>package sde\n\nimport (\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n)\n\nvar (\n\tErrTypeDoesNotExist = fmt.Errorf(\"sde: type does not exist\")\n\tErrSDEIsNil = fmt.Errorf(\"sde: SDE struct was nil\")\n\tErrTypeIsNil = fmt.Errorf(\"sde: SDEType struct was nil\")\n)\n\n\/\/ Load loads an encoding\/gob encoded SDE object from file\nfunc Load(filename string) (*SDE, error) {\n\tif f, err := os.OpenFile(filename, os.O_RDONLY, 0777); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\ts := &SDE{}\n\t\tdec := gob.NewDecoder(f)\n\t\tif err := dec.Decode(s); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn s, nil\n\t}\n\treturn nil, nil\n}\n\n\/\/ LoadReader returns an SDE pointer given an io.Reader to read from\nfunc LoadReader(r io.Reader) (*SDE, error) {\n\ts := &SDE{}\n\tdec := gob.NewDecoder(r)\n\tif err := dec.Decode(s); err != nil {\n\t\treturn nil, err\n\t}\n\treturn s, nil\n}\n\n\/\/ Save saves a provided SDE object to disk\nfunc Save(filename string, s *SDE) error {\n\tif f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0777); err != nil {\n\t\treturn err\n\t} else {\n\t\tenc := gob.NewEncoder(f)\n\t\tif err := enc.Encode(s); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/*\n\tSDE is a struct that owns every type for a given SDE.\n \t\t@TODO:\n\t\tAdd more old methods:\n\t\t\tGetTypeByName\n\t\t\tGetTypeByTag\n\t\t\t...\n\t\tAdd lookups:\n\t\t\tTypeName\n\t\t\tAttrribute[\"mDiplsayName\"]\n\t\t\tUse a map that isn't gobed and generate on load(use goroutine)\n*\/\ntype SDE struct {\n\tVersion string\n\tOfficial bool\n\tTypes map[int]*SDEType\n\tCache *Cache\n}\n\n\/\/ Cache is a struct that is included within SDE.\n\/\/\n\/\/ Whenever an SDE file is loaded we populate this and whenever an SDE is\n\/\/ saved we make the pointer nil. The struct is supposed to provide\n\/\/ faster lookups for things like TypeName and mDisplayName\ntype Cache struct {\n\tTypeNameLookup map[string]*SDEType\n\tDisplayNameLookup map[string]*SDEType\n}\n\n\/\/ GetType returns a pointer to an SDEType or nil and an error\nfunc (s *SDE) GetType(id int) (sdetype *SDEType, err error) {\n\tif s == nil {\n\t\treturn nil, ErrSDEIsNil\n\t}\n\tif v, ok := s.Types[id]; ok {\n\t\tif v == nil {\n\t\t\treturn nil, ErrTypeIsNil\n\t\t}\n\t\treturn v, nil\n\t} else {\n\t\treturn nil, ErrTypeDoesNotExist\n\t}\n\n}\n\n\/\/ Search checks for the existance of ss in mDisplayName or TypeName in every type and returns\n\/\/ a slice of pointers to SDETypes\nfunc (s *SDE) Search(ss string) (sdetypes []*SDEType, err error) {\n\tout := make([]*SDEType, 0)\n\tfor _, v := range s.Types {\n\t\tif strings.Contains(strings.ToLower(v.GetName()), strings.ToLower(ss)) || strings.Contains(strings.ToLower(v.TypeName), strings.ToLower(ss)) {\n\t\t\tout = append(out, v)\n\t\t}\n\t}\n\treturn out, nil\n}\n\n\/\/ VerifySDEPrint prints the entire list of types\/typeids to check for DB corruption\nfunc (s *SDE) VerifySDEPrint() {\n\tfor k, v := range s.Types {\n\t\tfmt.Printf(\" [%v][%v] %v at %p\\n\", k, v.TypeID, v.GetName(), v)\n\t}\n}\n\n\/\/ FindTypeThatReferences returns any time that refers to the given type\n\/\/\n\/\/ Suprising how fast this method runs\n\/\/\n\/\/ @TODO:\n\/\/\tWhen our caching system is finished update this to not iterate all ~3400 types lol\nfunc (s *SDE) FindTypesThatReference(t *SDEType) ([]*SDEType, error) {\n\tout := make([]*SDEType, 0)\n\tfor _, v := range s.Types {\n\t\tfor _, attr := range v.Attributes {\n\t\t\tswitch tid := attr.(type) {\n\t\t\tcase int:\n\t\t\t\tif tid == t.TypeID && !sdeslicecontains(out, tid) {\n\t\t\t\t\tout = append(out, v)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn out, nil\n}\n\n\/\/ Size estimates the memory usage of the SDE instance.\nfunc (s *SDE) Size() int {\n\tbase := int(reflect.ValueOf(*s).Type().Size())\n\tfor _, v := range s.Types {\n\t\tvv := int(reflect.ValueOf(*v).Type().Size())\n\t\tfor _, a := range v.Attributes {\n\t\t\tswitch reflect.TypeOf(a).Kind() {\n\t\t\tcase reflect.String:\n\t\t\t\tvv += len(a.(string))\n\t\t\t\tfallthrough\n\t\t\tdefault:\n\t\t\t\tvv += int(reflect.ValueOf(a).Type().Size())\n\t\t\t}\n\t\t}\n\t\tbase += vv\n\t}\n\treturn base\n}\n\n\/\/ Internal methods\n\n\/\/ Use whenever possible. Benchmarks have shown it takes roughly the same\n\/\/ amount of time to generate the cache as it does to perform one SDEType\n\/\/ level lookup. Let alone one that looks into SDEType.Attributes\nfunc (s *SDE) generateCache() {\n\ts.Cache = &Cache{}\n\ts.Cache.TypeNameLookup = make(map[string]*SDEType)\n\tfor _, v := range s.Types {\n\t\ts.Cache.TypeNameLookup[v.TypeName] = v\n\t}\n}\n\nfunc (s *SDE) lookupByTypeName(typeName string) (*SDEType, error) {\n\tif s.Cache != nil { \/\/ Fast lookup\n\t\tif v, ok := s.Cache.TypeNameLookup[typeName]; ok {\n\t\t\treturn v, nil\n\t\t} else {\n\t\t\treturn nil, ErrTypeDoesNotExist\n\t\t}\n\t}\n\t\/\/ Default to slow lookup if cache is nil\n\n\tfor _, v := range s.Types {\n\t\tif v.TypeName == typeName {\n\t\t\treturn v, nil\n\t\t}\n\t}\n\treturn nil, ErrTypeDoesNotExist\n}\n\n\/*\n\tSDEType is a struct representing a single individual type in an SDE.\n\t@TODO:\n\t\tAdd old methods.\n\t\tMake some cleaner way than before of checking for the existance of *.*... atributes:\n\t\tOptions:\n\t\t\t1) Substruct them out and create a parser for each(yuck)\n\t\t\t2) Map[string]interface{} parser(ehh)\n*\/\ntype SDEType struct {\n\tTypeID int\n\tTypeName string\n\tAttributes map[string]interface{}\n}\n\n\/\/ GetName returns the string value of Attributes[\"mDisplayName\"] if it exists. Otherwise we return TypeName\nfunc (s *SDEType) GetName() string {\n\tif v, ok := s.Attributes[\"mDisplayName\"]; ok {\n\t\treturn v.(string)\n\t}\n\treturn s.TypeName\n}\n\nfunc (s *SDEType) GetAttribute(attr string) interface{} {\n\treturn s.Attributes[attr]\n}\n\n\/\/ CompareTo prints the differences between two types\nfunc (s *SDEType) CompareTo(t *SDEType) {\n\t\/\/ @TODO: Print differences between typenames\/typeid\n\tfor key, value := range s.Attributes {\n\t\tif v, ok := t.Attributes[key]; ok {\n\t\t\tif value != v {\n\t\t\t\tfmt.Printf(\"CHANGE: %v: %v\\n\", value, v)\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Printf(\"ADD: %v: %v\\n\", key, value)\n\t\t}\n\t}\n\tfor key, value := range t.Attributes {\n\t\tif _, ok := s.Attributes[key]; ok {\n\t\t} else {\n\t\t\tfmt.Printf(\"REMOVE: %v: %v\\n\", key, value)\n\t\t}\n\t}\n}\n\n\/*\n\tHelpers\n*\/\n\nfunc sdeslicecontains(s []*SDEType, tid int) bool {\n\tfor _, v := range s {\n\t\tif v.TypeID == tid {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc init() {\n\tgob.Register(SDE{})\n\tgob.Register(SDEType{})\n}\n<commit_msg>SDE Parents; GetAttribute checks projectiles<commit_after>package sde\n\nimport (\n\t\"encoding\/gob\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n)\n\nvar (\n\tErrTypeDoesNotExist = fmt.Errorf(\"sde: type does not exist\")\n\tErrSDEIsNil = fmt.Errorf(\"sde: SDE struct was nil\")\n\tErrTypeIsNil = fmt.Errorf(\"sde: SDEType struct was nil\")\n)\n\n\/\/ Load loads an encoding\/gob encoded SDE object from file\nfunc Load(filename string) (*SDE, error) {\n\tif f, err := os.OpenFile(filename, os.O_RDONLY, 0777); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\ts := &SDE{}\n\t\tdec := gob.NewDecoder(f)\n\t\tif err := dec.Decode(s); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn s, nil\n\t}\n\treturn nil, nil\n}\n\n\/\/ LoadReader returns an SDE pointer given an io.Reader to read from\nfunc LoadReader(r io.Reader) (*SDE, error) {\n\ts := &SDE{}\n\tdec := gob.NewDecoder(r)\n\tif err := dec.Decode(s); err != nil {\n\t\treturn nil, err\n\t}\n\treturn s, nil\n}\n\n\/\/ Save saves a provided SDE object to disk\nfunc Save(filename string, s *SDE) error {\n\tif f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0777); err != nil {\n\t\treturn err\n\t} else {\n\t\tenc := gob.NewEncoder(f)\n\t\tif err := enc.Encode(s); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n\/*\n\tSDE is a struct that owns every type for a given SDE.\n \t\t@TODO:\n\t\tAdd more old methods:\n\t\t\tGetTypeByName\n\t\t\tGetTypeByTag\n\t\t\t...\n\t\tAdd lookups:\n\t\t\tTypeName\n\t\t\tAttrribute[\"mDiplsayName\"]\n\t\t\tUse a map that isn't gobed and generate on load(use goroutine)\n*\/\ntype SDE struct {\n\tVersion string\n\tOfficial bool\n\tTypes map[int]*SDEType\n\tCache *Cache\n}\n\n\/\/ Cache is a struct that is included within SDE.\n\/\/\n\/\/ Whenever an SDE file is loaded we populate this and whenever an SDE is\n\/\/ saved we make the pointer nil. The struct is supposed to provide\n\/\/ faster lookups for things like TypeName and mDisplayName\ntype Cache struct {\n\tTypeNameLookup map[string]*SDEType\n\tDisplayNameLookup map[string]*SDEType\n}\n\n\/\/ GetType returns a pointer to an SDEType or nil and an error\nfunc (s *SDE) GetType(id int) (sdetype *SDEType, err error) {\n\tif s == nil {\n\t\treturn nil, ErrSDEIsNil\n\t}\n\tif v, ok := s.Types[id]; ok {\n\t\tif v == nil {\n\t\t\treturn nil, ErrTypeIsNil\n\t\t}\n\t\tv.Parent = s\n\t\treturn v, nil\n\t} else {\n\t\treturn nil, ErrTypeDoesNotExist\n\t}\n\n}\n\n\/\/ Search checks for the existance of ss in mDisplayName or TypeName in every type and returns\n\/\/ a slice of pointers to SDETypes\nfunc (s *SDE) Search(ss string) (sdetypes []*SDEType, err error) {\n\tout := make([]*SDEType, 0)\n\tfor _, v := range s.Types {\n\t\tif strings.Contains(strings.ToLower(v.GetName()), strings.ToLower(ss)) || strings.Contains(strings.ToLower(v.TypeName), strings.ToLower(ss)) {\n\t\t\tout = append(out, v)\n\t\t}\n\t}\n\treturn out, nil\n}\n\n\/\/ VerifySDEPrint prints the entire list of types\/typeids to check for DB corruption\nfunc (s *SDE) VerifySDEPrint() {\n\tfor k, v := range s.Types {\n\t\tfmt.Printf(\" [%v][%v] %v at %p\\n\", k, v.TypeID, v.GetName(), v)\n\t}\n}\n\n\/\/ FindTypeThatReferences returns any time that refers to the given type\n\/\/\n\/\/ Suprising how fast this method runs\n\/\/\n\/\/ @TODO:\n\/\/\tWhen our caching system is finished update this to not iterate all ~3400 types lol\nfunc (s *SDE) FindTypesThatReference(t *SDEType) ([]*SDEType, error) {\n\tout := make([]*SDEType, 0)\n\tfor _, v := range s.Types {\n\t\tfor _, attr := range v.Attributes {\n\t\t\tswitch tid := attr.(type) {\n\t\t\tcase int:\n\t\t\t\tif tid == t.TypeID && !sdeslicecontains(out, tid) {\n\t\t\t\t\tout = append(out, v)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn out, nil\n}\n\n\/\/ Size estimates the memory usage of the SDE instance.\nfunc (s *SDE) Size() int {\n\tbase := int(reflect.ValueOf(*s).Type().Size())\n\tfor _, v := range s.Types {\n\t\tvv := int(reflect.ValueOf(*v).Type().Size())\n\t\tfor _, a := range v.Attributes {\n\t\t\tswitch reflect.TypeOf(a).Kind() {\n\t\t\tcase reflect.String:\n\t\t\t\tvv += len(a.(string))\n\t\t\t\tfallthrough\n\t\t\tdefault:\n\t\t\t\tvv += int(reflect.ValueOf(a).Type().Size())\n\t\t\t}\n\t\t}\n\t\tbase += vv\n\t}\n\treturn base\n}\n\n\/\/ Internal methods\n\n\/\/ Use whenever possible. Benchmarks have shown it takes roughly the same\n\/\/ amount of time to generate the cache as it does to perform one SDEType\n\/\/ level lookup. Let alone one that looks into SDEType.Attributes\nfunc (s *SDE) generateCache() {\n\ts.Cache = &Cache{}\n\ts.Cache.TypeNameLookup = make(map[string]*SDEType)\n\tfor _, v := range s.Types {\n\t\ts.Cache.TypeNameLookup[v.TypeName] = v\n\t}\n}\n\nfunc (s *SDE) lookupByTypeName(typeName string) (*SDEType, error) {\n\tif s.Cache != nil { \/\/ Fast lookup\n\t\tif v, ok := s.Cache.TypeNameLookup[typeName]; ok {\n\t\t\treturn v, nil\n\t\t} else {\n\t\t\treturn nil, ErrTypeDoesNotExist\n\t\t}\n\t}\n\t\/\/ Default to slow lookup if cache is nil\n\n\tfor _, v := range s.Types {\n\t\tif v.TypeName == typeName {\n\t\t\treturn v, nil\n\t\t}\n\t}\n\treturn nil, ErrTypeDoesNotExist\n}\n\n\/*\n\tSDEType is a struct representing a single individual type in an SDE.\n\t@TODO:\n\t\tAdd old methods.\n\t\tMake some cleaner way than before of checking for the existance of *.*... atributes:\n\t\tOptions:\n\t\t\t1) Substruct them out and create a parser for each(yuck)\n\t\t\t2) Map[string]interface{} parser(ehh)\n*\/\ntype SDEType struct {\n\tTypeID int\n\tTypeName string\n\tAttributes map[string]interface{}\n\tParent *SDE\n}\n\n\/\/ GetName returns the string value of Attributes[\"mDisplayName\"] if it exists. Otherwise we return TypeName\nfunc (s *SDEType) GetName() string {\n\tif v, ok := s.Attributes[\"mDisplayName\"]; ok {\n\t\treturn v.(string)\n\t}\n\treturn s.TypeName\n}\n\n\/\/ GetAttribute checks if the type has the attribute and returns it. If it doesn't exist we lookup the weapons projectile type\nfunc (s *SDEType) GetAttribute(attr string) interface{} {\n\tif v, ok := s.Attributes[attr]; ok {\n\t\treturn v\n\t} else {\n\t\tif v, ok := s.Attributes[\"mFireMode0.projectileType\"]; ok {\n\t\t\tv, _ := v.(int)\n\t\t\tt, _ := s.Parent.GetType(v) \/\/ Ditching error because we don't return an error. I don't want to break SDETool things yet\n\t\t\tif v, ok := t.Attributes[attr]; ok {\n\t\t\t\treturn v\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ CompareTo prints the differences between two types\nfunc (s *SDEType) CompareTo(t *SDEType) {\n\t\/\/ @TODO: Print differences between typenames\/typeid\n\tfor key, value := range s.Attributes {\n\t\tif v, ok := t.Attributes[key]; ok {\n\t\t\tif value != v {\n\t\t\t\tfmt.Printf(\"CHANGE: %v: %v\\n\", value, v)\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Printf(\"ADD: %v: %v\\n\", key, value)\n\t\t}\n\t}\n\tfor key, value := range t.Attributes {\n\t\tif _, ok := s.Attributes[key]; ok {\n\t\t} else {\n\t\t\tfmt.Printf(\"REMOVE: %v: %v\\n\", key, value)\n\t\t}\n\t}\n}\n\n\/*\n\tHelpers\n*\/\n\nfunc sdeslicecontains(s []*SDEType, tid int) bool {\n\tfor _, v := range s {\n\t\tif v.TypeID == tid {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc init() {\n\tgob.Register(SDE{})\n\tgob.Register(SDEType{})\n}\n<|endoftext|>"} {"text":"<commit_before>package sec\n\nimport (\n\t\"bytes\"\n\t\"crypto\/hmac\"\n\t\"crypto\/rand\"\n\t\"crypto\/sha1\"\n\t\"crypto\/sha256\"\n\t\"crypto\/sha512\"\n\t\"encoding\/base32\"\n\t\"encoding\/base64\"\n\t\"encoding\/binary\"\n\t\"encoding\/hex\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"time\"\n)\n\nconst (\n\t\/\/dSecurityKey\t\t = \"\\x69\\x51\\xe8\\x41\\x50\\x83\\x19\\xa4\\xf0\\x2f\\xac\\x7d\\x99\\xb7\\x5e\\xbe\\x7e\\x32\\xf5\\xa5\\xf7\\x1f\\x43\\x04\\x96\\xdd\\x1b\\xf0\\x93\\x4e\\xc5\\x44\"\n\t\/\/dCSRFToken\t\t\t= \"\\xc7\\x58\\xa7\\xf2\\x15\\x79\\x54\\x34\\x24\\xeb\\x45\\x50\\x33\\x0f\\xa5\\x52\\x95\\x36\\x06\\xb0\\xb7\\xdb\\x5d\\xa7\\x07\\xcf\\xa5\\x1c\\x10\\xe7\\x4b\\xd4\"\n\t\/\/dHashSalt\t\t\t= \"\\x5d\\xfb\\xcf\\x47\\x30\\xce\\x2e\\x43\\xfa\\x1c\\x5f\\xee\\x76\\x0f\\xd7\\x31\\x14\\x07\\x24\\xa8\\xbf\\xd0\\x3c\\x88\\xfc\\xa3\\xdc\\x3b\\xae\\xaa\\x3a\\x15\"\n\t\/\/dCSRFTokenLife\t\t= 14400\n\tCSRFTokenMinLife = 3600\n\tCSRFDefaultTokenLife = 14400\n\tCSRFDefaultCookieName = \"XSRF-TOKEN\"\n\tCSRFDefaultRequestName = \"X-XSRF-TOKEN\"\n\tCSRFTimestampLen = 5\n\tCSRFRandTokenLen = 16\n)\n\n\/\/ the resultant hash length should be longer than or equals to sessEntropy\nvar sessHash = sha256.New\n\n\/\/ use a higher entropy (bytes) to prevent brute force session attack\nvar sessEnthropy = 24\n\ntype CSRFGate struct {\n\tconfig CSRFGateConfig\n}\n\ntype CSRFGateConfig struct {\n\tSunnyConfig bool `config.namespace:\"sunnified.sec.csrf\"`\n\tKey []byte\n\tToken []byte\n\tTokenlife int `config.default:\"14400\"`\n\tCookiename string `config.default:\"XSRF-TOKEN\"`\n\tReqname string `config.default:\"X-XSRF-TOKEN\"`\n}\n\nfunc NewCSRFGate(settings CSRFGateConfig) *CSRFGate {\n\tif settings.Key == nil || settings.Token == nil {\n\t\treturn nil\n\t}\n\n\tif settings.Tokenlife == 0 {\n\t\tsettings.Tokenlife = CSRFDefaultTokenLife\n\t}\n\tif settings.Reqname == \"\" {\n\t\tsettings.Reqname = CSRFDefaultRequestName\n\t}\n\tif settings.Cookiename == \"\" {\n\t\tsettings.Cookiename = CSRFDefaultCookieName\n\t}\n\n\treturn &CSRFGate{config: settings}\n}\n\ntype CSRFRequestBody struct {\n\tName string\n\tValue string\n\tCookie *http.Cookie\n\tOk bool\n}\n\n\/\/ SetCSRFToken returns a CsrfRequestBody containing the name and value to be used\n\/\/ as a query string or form input that can be verified by VerifyCSRFToken.\n\/\/ Additionally, a cookie will be set (if ResponseWriter is not nil) to cross authenticate validity of token data if non exists\nfunc (cg *CSRFGate) CSRFToken(w http.ResponseWriter, r *http.Request) (crb CSRFRequestBody) {\n\tvar (\n\t\trandToken []byte\n\t\tmsg []byte\n\t\twriteCookie = false\n\t\ttstamp = time.Now().Unix()\n\t\t\/\/ the current rolling global token.\n\t\t\/\/ this token is the share for the entire application\n\t\t\/\/ it rolls over to a new token every \"csrf-token-life\"\n\t\tcurrentToken = cg.csrfCurrentToken(tstamp)\n\t\tckie, err = r.Cookie(cg.config.Cookiename)\n\t)\n\n\t\/\/ gets the cookie containing the random token generated\n\t\/\/ the random token will be shared for all requests from the same machine\/browser\n\t\/\/ this is a very simple mechanism for unique user identification\n\tif err == nil {\n\t\trandToken, err = AesCtrDecryptBase64(cg.config.Key, ckie.Value)\n\t}\n\t\/\/ if there are no random token from the cookie,\n\t\/\/ generate a new one ourselves.\n\tif err != nil || len(randToken) != CSRFRandTokenLen {\n\t\trandToken = GenRandomBytes(CSRFRandTokenLen)\n\n\t\tif randToken == nil {\n\t\t\t\/\/ the randomness of this token is not as critical to security\n\t\t\tlenToFill := CSRFRandTokenLen\n\t\t\tmsgToHash := []byte(strconv.FormatInt(tstamp, 10))\n\t\t\tmsgToHash = append(msgToHash, currentToken...)\n\t\t\trandToken = make([]byte, 0, CSRFRandTokenLen)\n\n\t\t\t\/\/ fill the random token slice using sha512 checksum\n\t\t\t\/\/ if random token exceeds 64 bytes(len of sha512),\n\t\t\t\/\/ it loops and generate more checksum to fill\n\t\t\tfor lenToFill > 0 {\n\t\t\t\th := sha512.Sum512(msgToHash)\n\n\t\t\t\tfillLen := lenToFill\n\t\t\t\tif fillLen > 64 {\n\t\t\t\t\tfillLen = 64\n\t\t\t\t}\n\t\t\t\tlenToFill = lenToFill - fillLen\n\n\t\t\t\trandToken = append(randToken, h[0:fillLen]...)\n\t\t\t\tmsgToHash = h[:]\n\t\t\t}\n\t\t}\n\n\t\t\/\/ a new random token is generated, set the cookie to update it\n\t\twriteCookie = true\n\t}\n\n\tmsg = make([]byte, 0, CSRFTimestampLen+CSRFRandTokenLen+len(currentToken))\n\tbuf := bytes.NewBuffer(msg)\n\tbinary.Write(buf, binary.LittleEndian, tstamp)\n\n\t\/\/ the csrf token consists of timestamp(5 bytes),\n\t\/\/ random bytes(16 bytes),\n\t\/\/ rolling global token(20bytes [sha1 checksum])\n\tmsg = append(msg[0:CSRFTimestampLen], randToken...)\n\tmsg = append(msg, currentToken...)\n\n\tif value, err := AesCtrEncryptBase64(cg.config.Key, msg); err == nil {\n\t\tif writeCookie {\n\t\t\tenc, err := AesCtrEncryptBase64(cg.config.Key, randToken)\n\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tckie = &http.Cookie{\n\t\t\t\tName: cg.config.Cookiename,\n\t\t\t\tValue: enc,\n\t\t\t\tPath: \"\/\",\n\t\t\t}\n\t\t\tif w != nil {\n\t\t\t\thttp.SetCookie(w, ckie)\n\t\t\t}\n\t\t}\n\n\t\tcrb.Name = cg.config.Reqname\n\t\tcrb.Value = value\n\t\tcrb.Cookie = ckie\n\t\tcrb.Ok = true\n\t}\n\n\treturn\n}\n\n\/\/ VerifyCSRFToken checks whether the request r includes a valid CSRF token\nfunc (cg *CSRFGate) VerifyCSRFToken(r *http.Request) (valid bool) {\n\tvar token string\n\n\tif token = r.Header.Get(cg.config.Reqname); token != \"\" {\n\t\t\/\/ TODO: for cross domain, the request will first perform an OPTIONS\n\t\t\/\/ with Access-Control-Request-Headers: X-XSRF-TOKEN\n\t\t\/\/ we gotten respond with Access-Control-Allow-Headers: X-XSRF-TOKEN somehow\n\t\t\/\/ if router doesn't respond by mirroring the request\n\t\tif ckie, err := r.Cookie(cg.config.Cookiename); err == nil {\n\t\t\tvalid = token == ckie.Value\n\t\t}\n\t} else {\n\t\tr.ParseForm()\n\t\ttoken = r.Form.Get(cg.config.Reqname)\n\n\t\tif token == \"\" {\n\t\t\treturn\n\t\t}\n\n\t\tresult, err := AesCtrDecryptBase64(cg.config.Key, token)\n\n\t\tif err != nil || len(result) <= (CSRFTimestampLen+CSRFRandTokenLen) {\n\t\t\treturn\n\t\t}\n\n\t\tlenTNC := CSRFTimestampLen + CSRFRandTokenLen\n\n\t\ttcreatedcap := CSRFTimestampLen\n\t\tif tcreatedcap < 8 {\n\t\t\ttcreatedcap = 8\n\t\t}\n\n\t\ttcreated := make([]byte, CSRFTimestampLen, tcreatedcap)\n\n\t\t\/\/ copy into a new slice, append overwrites original slice data\n\t\tcopy(tcreated, result[0:CSRFTimestampLen])\n\t\tckietoken := make([]byte, CSRFRandTokenLen)\n\t\tcopy(ckietoken, result[CSRFTimestampLen:lenTNC])\n\t\treqtoken := make([]byte, len(result)-lenTNC)\n\t\tcopy(reqtoken, result[lenTNC:])\n\n\t\tif CSRFTimestampLen < 8 {\n\t\t\tfiller := make([]byte, 8-CSRFTimestampLen)\n\t\t\ttcreated = append(tcreated, filler...)\n\t\t}\n\n\t\tvar tcreated64 int64\n\t\tbinary.Read(bytes.NewBuffer(tcreated), binary.LittleEndian, &tcreated64)\n\t\ttstamp := time.Now().Unix()\n\n\t\t\/\/ check whether request token has already expired\n\t\tif (tcreated64+int64(cg.config.Tokenlife)) < tstamp || tcreated64 > tstamp {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ cookie authentication of csrf token is needed to ensure each machine has unique token\n\t\tif ckie, err := r.Cookie(cg.config.Cookiename); err == nil {\n\t\t\tdec, err := AesCtrDecryptBase64(cg.config.Key, ckie.Value)\n\n\t\t\tif err != nil || !bytes.Equal(dec, ckietoken) {\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\treturn\n\t\t}\n\n\t\tvalid = bytes.Equal(reqtoken, cg.csrfCurrentToken(tstamp)) || bytes.Equal(reqtoken, cg.csrfPrevToken(tstamp))\n\t}\n\n\treturn\n}\n\nfunc (cg *CSRFGate) csrfCurrentToken(t ...int64) []byte {\n\tvar tnow int64\n\n\tif len(t) > 0 {\n\t\ttnow = t[0]\n\t} else {\n\t\ttnow = time.Now().Unix()\n\t}\n\titeration := tnow \/ int64(cg.config.Tokenlife)\n\treturn cg.csrfIterToken(iteration)\n}\n\nfunc (cg *CSRFGate) csrfCurrentTokenString(t ...int64) string {\n\treturn string(cg.csrfCurrentToken(t...))\n}\n\nfunc (cg *CSRFGate) csrfPrevToken(t ...int64) []byte {\n\tvar tnow int64\n\n\tif len(t) > 0 {\n\t\ttnow = t[0]\n\t} else {\n\t\ttnow = time.Now().Unix()\n\t}\n\n\titeration := tnow \/ int64(cg.config.Tokenlife)\n\treturn cg.csrfIterToken(iteration - 1)\n}\n\nfunc (cg *CSRFGate) csrfPrevTokenString(t ...int64) string {\n\treturn string(cg.csrfPrevToken(t...))\n}\n\nfunc (cg *CSRFGate) csrfIterToken(iteration int64) []byte {\n\titertoken := strconv.FormatInt(iteration, 10)\n\th := hmac.New(sha1.New, cg.config.Token)\n\th.Write([]byte(itertoken))\n\thash := make([]byte, 0, h.Size())\n\thash = h.Sum(hash)\n\treturn hash\n}\n\nfunc (cg *CSRFGate) csrfIterTokenString(iteration int64) string {\n\treturn string(cg.csrfIterToken(iteration))\n}\n\n\/\/ GenRandomBytes return a slice of random bytes of length l\nfunc GenRandomBytes(l int) (rb []byte) {\n\trb = make([]byte, l)\n\n\t\/\/ rand.Read() is blocking\n\tif _, err := rand.Read(rb); err != nil {\n\t\trb = nil\n\t}\n\n\treturn\n}\n\nfunc GenRandomString(l int) string {\n\trb := GenRandomBytes(l)\n\n\tif rb != nil {\n\t\treturn string(rb)\n\t}\n\n\treturn \"\"\n}\n\nfunc GenRandomBase64String(l int) string {\n\treturn base64.StdEncoding.EncodeToString(GenRandomBytes(l))\n}\n\nfunc GenRandomHexString(l int) string {\n\treturn hex.EncodeToString(GenRandomBytes(l))\n}\n\nfunc genSessionID(l ...int) []byte {\n\tvar size = sessEnthropy\n\tif len(l) > 0 && l[0] > 0 && l[0] < 1035 {\n\t\tsize = l[0]\n\t}\n\n\tvar h = sessHash()\n\th.Write(GenRandomBytes(size))\n\th.Write([]byte(strconv.FormatInt(time.Now().Unix(), 10)))\n\tid := make([]byte, 0, h.Size())\n\tid = h.Sum(id)\n\treturn id\n}\n\nfunc GenSessionID(l ...int) string {\n\treturn base64.StdEncoding.EncodeToString(genSessionID(l...))\n}\n\nfunc GenSessionIDBase32(l ...int) string {\n\treturn base32.StdEncoding.EncodeToString(genSessionID(l...))\n}\n<commit_msg>Added GenPassword(length) for random password string generation in sec package<commit_after>package sec\n\nimport (\n\t\"bytes\"\n\t\"crypto\/hmac\"\n\t\"crypto\/rand\"\n\t\"crypto\/sha1\"\n\t\"crypto\/sha256\"\n\t\"crypto\/sha512\"\n\t\"encoding\/base32\"\n\t\"encoding\/base64\"\n\t\"encoding\/binary\"\n\t\"encoding\/hex\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"time\"\n)\n\nconst (\n\t\/\/dSecurityKey\t\t = \"\\x69\\x51\\xe8\\x41\\x50\\x83\\x19\\xa4\\xf0\\x2f\\xac\\x7d\\x99\\xb7\\x5e\\xbe\\x7e\\x32\\xf5\\xa5\\xf7\\x1f\\x43\\x04\\x96\\xdd\\x1b\\xf0\\x93\\x4e\\xc5\\x44\"\n\t\/\/dCSRFToken\t\t\t= \"\\xc7\\x58\\xa7\\xf2\\x15\\x79\\x54\\x34\\x24\\xeb\\x45\\x50\\x33\\x0f\\xa5\\x52\\x95\\x36\\x06\\xb0\\xb7\\xdb\\x5d\\xa7\\x07\\xcf\\xa5\\x1c\\x10\\xe7\\x4b\\xd4\"\n\t\/\/dHashSalt\t\t\t= \"\\x5d\\xfb\\xcf\\x47\\x30\\xce\\x2e\\x43\\xfa\\x1c\\x5f\\xee\\x76\\x0f\\xd7\\x31\\x14\\x07\\x24\\xa8\\xbf\\xd0\\x3c\\x88\\xfc\\xa3\\xdc\\x3b\\xae\\xaa\\x3a\\x15\"\n\t\/\/dCSRFTokenLife\t\t= 14400\n\tCSRFTokenMinLife = 3600\n\tCSRFDefaultTokenLife = 14400\n\tCSRFDefaultCookieName = \"XSRF-TOKEN\"\n\tCSRFDefaultRequestName = \"X-XSRF-TOKEN\"\n\tCSRFTimestampLen = 5\n\tCSRFRandTokenLen = 16\n)\n\n\/\/ the resultant hash length should be longer than or equals to sessEntropy\nvar sessHash = sha256.New\n\n\/\/ use a higher entropy (bytes) to prevent brute force session attack\nvar sessEnthropy = 24\n\n\/\/ characters for password generation\nvar pwdchars = [40]byte{'A', 'C', 'E', 'F', 'H', 'J', 'M', 'N', 'P', 'R', 'T', 'Y',\n\t'a', 'b', 'c', 'd', 'e', 'f', 'h', 'm', 'n', 'p', 'q', 'r', 'y',\n\t'3', '4', '7', '@', '#', '%', '&', '-', '_', '=', '?', '*', '\/', '^', '+'}\n\ntype CSRFGate struct {\n\tconfig CSRFGateConfig\n}\n\ntype CSRFGateConfig struct {\n\tSunnyConfig bool `config.namespace:\"sunnified.sec.csrf\"`\n\tKey []byte\n\tToken []byte\n\tTokenlife int `config.default:\"14400\"`\n\tCookiename string `config.default:\"XSRF-TOKEN\"`\n\tReqname string `config.default:\"X-XSRF-TOKEN\"`\n}\n\nfunc NewCSRFGate(settings CSRFGateConfig) *CSRFGate {\n\tif settings.Key == nil || settings.Token == nil {\n\t\treturn nil\n\t}\n\n\tif settings.Tokenlife == 0 {\n\t\tsettings.Tokenlife = CSRFDefaultTokenLife\n\t}\n\tif settings.Reqname == \"\" {\n\t\tsettings.Reqname = CSRFDefaultRequestName\n\t}\n\tif settings.Cookiename == \"\" {\n\t\tsettings.Cookiename = CSRFDefaultCookieName\n\t}\n\n\treturn &CSRFGate{config: settings}\n}\n\ntype CSRFRequestBody struct {\n\tName string\n\tValue string\n\tCookie *http.Cookie\n\tOk bool\n}\n\n\/\/ SetCSRFToken returns a CsrfRequestBody containing the name and value to be used\n\/\/ as a query string or form input that can be verified by VerifyCSRFToken.\n\/\/ Additionally, a cookie will be set (if ResponseWriter is not nil) to cross authenticate validity of token data if non exists\nfunc (cg *CSRFGate) CSRFToken(w http.ResponseWriter, r *http.Request) (crb CSRFRequestBody) {\n\tvar (\n\t\trandToken []byte\n\t\tmsg []byte\n\t\twriteCookie = false\n\t\ttstamp = time.Now().Unix()\n\t\t\/\/ the current rolling global token.\n\t\t\/\/ this token is the share for the entire application\n\t\t\/\/ it rolls over to a new token every \"csrf-token-life\"\n\t\tcurrentToken = cg.csrfCurrentToken(tstamp)\n\t\tckie, err = r.Cookie(cg.config.Cookiename)\n\t)\n\n\t\/\/ gets the cookie containing the random token generated\n\t\/\/ the random token will be shared for all requests from the same machine\/browser\n\t\/\/ this is a very simple mechanism for unique user identification\n\tif err == nil {\n\t\trandToken, err = AesCtrDecryptBase64(cg.config.Key, ckie.Value)\n\t}\n\t\/\/ if there are no random token from the cookie,\n\t\/\/ generate a new one ourselves.\n\tif err != nil || len(randToken) != CSRFRandTokenLen {\n\t\trandToken = GenRandomBytes(CSRFRandTokenLen)\n\n\t\tif randToken == nil {\n\t\t\t\/\/ the randomness of this token is not as critical to security\n\t\t\tlenToFill := CSRFRandTokenLen\n\t\t\tmsgToHash := []byte(strconv.FormatInt(tstamp, 10))\n\t\t\tmsgToHash = append(msgToHash, currentToken...)\n\t\t\trandToken = make([]byte, 0, CSRFRandTokenLen)\n\n\t\t\t\/\/ fill the random token slice using sha512 checksum\n\t\t\t\/\/ if random token exceeds 64 bytes(len of sha512),\n\t\t\t\/\/ it loops and generate more checksum to fill\n\t\t\tfor lenToFill > 0 {\n\t\t\t\th := sha512.Sum512(msgToHash)\n\n\t\t\t\tfillLen := lenToFill\n\t\t\t\tif fillLen > 64 {\n\t\t\t\t\tfillLen = 64\n\t\t\t\t}\n\t\t\t\tlenToFill = lenToFill - fillLen\n\n\t\t\t\trandToken = append(randToken, h[0:fillLen]...)\n\t\t\t\tmsgToHash = h[:]\n\t\t\t}\n\t\t}\n\n\t\t\/\/ a new random token is generated, set the cookie to update it\n\t\twriteCookie = true\n\t}\n\n\tmsg = make([]byte, 0, CSRFTimestampLen+CSRFRandTokenLen+len(currentToken))\n\tbuf := bytes.NewBuffer(msg)\n\tbinary.Write(buf, binary.LittleEndian, tstamp)\n\n\t\/\/ the csrf token consists of timestamp(5 bytes),\n\t\/\/ random bytes(16 bytes),\n\t\/\/ rolling global token(20bytes [sha1 checksum])\n\tmsg = append(msg[0:CSRFTimestampLen], randToken...)\n\tmsg = append(msg, currentToken...)\n\n\tif value, err := AesCtrEncryptBase64(cg.config.Key, msg); err == nil {\n\t\tif writeCookie {\n\t\t\tenc, err := AesCtrEncryptBase64(cg.config.Key, randToken)\n\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tckie = &http.Cookie{\n\t\t\t\tName: cg.config.Cookiename,\n\t\t\t\tValue: enc,\n\t\t\t\tPath: \"\/\",\n\t\t\t}\n\t\t\tif w != nil {\n\t\t\t\thttp.SetCookie(w, ckie)\n\t\t\t}\n\t\t}\n\n\t\tcrb.Name = cg.config.Reqname\n\t\tcrb.Value = value\n\t\tcrb.Cookie = ckie\n\t\tcrb.Ok = true\n\t}\n\n\treturn\n}\n\n\/\/ VerifyCSRFToken checks whether the request r includes a valid CSRF token\nfunc (cg *CSRFGate) VerifyCSRFToken(r *http.Request) (valid bool) {\n\tvar token string\n\n\tif token = r.Header.Get(cg.config.Reqname); token != \"\" {\n\t\t\/\/ TODO: for cross domain, the request will first perform an OPTIONS\n\t\t\/\/ with Access-Control-Request-Headers: X-XSRF-TOKEN\n\t\t\/\/ we gotten respond with Access-Control-Allow-Headers: X-XSRF-TOKEN somehow\n\t\t\/\/ if router doesn't respond by mirroring the request\n\t\tif ckie, err := r.Cookie(cg.config.Cookiename); err == nil {\n\t\t\tvalid = token == ckie.Value\n\t\t}\n\t} else {\n\t\tr.ParseForm()\n\t\ttoken = r.Form.Get(cg.config.Reqname)\n\n\t\tif token == \"\" {\n\t\t\treturn\n\t\t}\n\n\t\tresult, err := AesCtrDecryptBase64(cg.config.Key, token)\n\n\t\tif err != nil || len(result) <= (CSRFTimestampLen+CSRFRandTokenLen) {\n\t\t\treturn\n\t\t}\n\n\t\tlenTNC := CSRFTimestampLen + CSRFRandTokenLen\n\n\t\ttcreatedcap := CSRFTimestampLen\n\t\tif tcreatedcap < 8 {\n\t\t\ttcreatedcap = 8\n\t\t}\n\n\t\ttcreated := make([]byte, CSRFTimestampLen, tcreatedcap)\n\n\t\t\/\/ copy into a new slice, append overwrites original slice data\n\t\tcopy(tcreated, result[0:CSRFTimestampLen])\n\t\tckietoken := make([]byte, CSRFRandTokenLen)\n\t\tcopy(ckietoken, result[CSRFTimestampLen:lenTNC])\n\t\treqtoken := make([]byte, len(result)-lenTNC)\n\t\tcopy(reqtoken, result[lenTNC:])\n\n\t\tif CSRFTimestampLen < 8 {\n\t\t\tfiller := make([]byte, 8-CSRFTimestampLen)\n\t\t\ttcreated = append(tcreated, filler...)\n\t\t}\n\n\t\tvar tcreated64 int64\n\t\tbinary.Read(bytes.NewBuffer(tcreated), binary.LittleEndian, &tcreated64)\n\t\ttstamp := time.Now().Unix()\n\n\t\t\/\/ check whether request token has already expired\n\t\tif (tcreated64+int64(cg.config.Tokenlife)) < tstamp || tcreated64 > tstamp {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ cookie authentication of csrf token is needed to ensure each machine has unique token\n\t\tif ckie, err := r.Cookie(cg.config.Cookiename); err == nil {\n\t\t\tdec, err := AesCtrDecryptBase64(cg.config.Key, ckie.Value)\n\n\t\t\tif err != nil || !bytes.Equal(dec, ckietoken) {\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\treturn\n\t\t}\n\n\t\tvalid = bytes.Equal(reqtoken, cg.csrfCurrentToken(tstamp)) || bytes.Equal(reqtoken, cg.csrfPrevToken(tstamp))\n\t}\n\n\treturn\n}\n\nfunc (cg *CSRFGate) csrfCurrentToken(t ...int64) []byte {\n\tvar tnow int64\n\n\tif len(t) > 0 {\n\t\ttnow = t[0]\n\t} else {\n\t\ttnow = time.Now().Unix()\n\t}\n\titeration := tnow \/ int64(cg.config.Tokenlife)\n\treturn cg.csrfIterToken(iteration)\n}\n\nfunc (cg *CSRFGate) csrfCurrentTokenString(t ...int64) string {\n\treturn string(cg.csrfCurrentToken(t...))\n}\n\nfunc (cg *CSRFGate) csrfPrevToken(t ...int64) []byte {\n\tvar tnow int64\n\n\tif len(t) > 0 {\n\t\ttnow = t[0]\n\t} else {\n\t\ttnow = time.Now().Unix()\n\t}\n\n\titeration := tnow \/ int64(cg.config.Tokenlife)\n\treturn cg.csrfIterToken(iteration - 1)\n}\n\nfunc (cg *CSRFGate) csrfPrevTokenString(t ...int64) string {\n\treturn string(cg.csrfPrevToken(t...))\n}\n\nfunc (cg *CSRFGate) csrfIterToken(iteration int64) []byte {\n\titertoken := strconv.FormatInt(iteration, 10)\n\th := hmac.New(sha1.New, cg.config.Token)\n\th.Write([]byte(itertoken))\n\thash := make([]byte, 0, h.Size())\n\thash = h.Sum(hash)\n\treturn hash\n}\n\nfunc (cg *CSRFGate) csrfIterTokenString(iteration int64) string {\n\treturn string(cg.csrfIterToken(iteration))\n}\n\n\/\/ GenRandomBytes return a slice of random bytes of length l\nfunc GenRandomBytes(l int) (rb []byte) {\n\trb = make([]byte, l)\n\n\t\/\/ rand.Read() is blocking\n\tif _, err := rand.Read(rb); err != nil {\n\t\trb = nil\n\t}\n\n\treturn\n}\n\nfunc GenRandomString(l int) string {\n\trb := GenRandomBytes(l)\n\n\tif rb != nil {\n\t\treturn string(rb)\n\t}\n\n\treturn \"\"\n}\n\nfunc GenRandomBase64String(l int) string {\n\treturn base64.StdEncoding.EncodeToString(GenRandomBytes(l))\n}\n\nfunc GenRandomHexString(l int) string {\n\treturn hex.EncodeToString(GenRandomBytes(l))\n}\n\nfunc genSessionID(l ...int) []byte {\n\tvar size = sessEnthropy\n\tif len(l) > 0 && l[0] > 0 && l[0] < 1035 {\n\t\tsize = l[0]\n\t}\n\n\tvar h = sessHash()\n\th.Write(GenRandomBytes(size))\n\th.Write([]byte(strconv.FormatInt(time.Now().Unix(), 10)))\n\tid := make([]byte, 0, h.Size())\n\tid = h.Sum(id)\n\treturn id\n}\n\nfunc GenSessionID(l ...int) string {\n\treturn base64.StdEncoding.EncodeToString(genSessionID(l...))\n}\n\nfunc GenSessionIDBase32(l ...int) string {\n\treturn base32.StdEncoding.EncodeToString(genSessionID(l...))\n}\n\nfunc GenPassword(l int) string {\n\tif l < 1 {\n\t\treturn \"\"\n\t}\n\n\tvar (\n\t\tb = GenRandomBytes(l)\n\t\tpwd = make([]byte, l)\n\t\tcsetl = len(pwdchars)\n\t\tmax byte = byte(256 - (256 % csetl) - 1)\n\t)\n\n\tfor i := 0; i < 10; i++ {\n\t\tfor b[i] > max {\n\t\t\tb[i] = GenRandomBytes(1)[0]\n\t\t}\n\t\tpwd[i] = pwdchars[b[i]%csetl]\n\t}\n\n\treturn string(pwd)\n}\n<|endoftext|>"} {"text":"<commit_before>package gosseract\n\nimport (\n\t\"image\"\n\t\"image\/png\"\n\t\"os\"\n)\n\n\/\/ Servant of gosseract providing interactive setting\ntype Servant struct {\n\tsource source\n\tlang\tlang\n\toptions options\n}\ntype source struct {\n\tFilePath string\n\tisTmp\tbool\n\t\/\/ TODO: accept multiple image formats\n}\ntype lang struct {\n\tValue\t string\n\tAvailables []string\n}\ntype options struct {\n\tUseFile bool\n\tFilePath string\n\tDigest map[string]string\n}\ntype VersionInfo struct {\n\tTesseractVersion string\n\tGosseractVersion string\n}\n\n\/\/ Provide new servant instance\nfunc SummonServant() Servant {\n\n\tif !tesseractInstalled() {\n\t\tpanic(\"Missin `tesseract` command!! install tessearct at first.\")\n\t}\n\n\tlang := lang{}\n\tlang.init()\n\topts := options{}\n\topts.init()\n\treturn Servant{\n\t\tlang:\tlang,\n\t\toptions: opts,\n\t}\n}\n\n\/\/ Check information of tesseract and gosseract\nfunc (s *Servant) Info() VersionInfo {\n\ttessVersion := getTesseractVersion()\n\tinfo := VersionInfo{\n\t\tTesseractVersion: tessVersion,\n\t\tGosseractVersion: VERSION,\n\t}\n\treturn info\n}\n\n\/\/ Give source file to servant by file path\nfunc (s *Servant) Target(filepath string) *Servant {\n\t\/\/ TODO: check existence of this file\n\ts.source.FilePath = filepath\n\treturn s\n}\n\n\/\/ Give source file to servant by image.Image\nfunc (s *Servant) Eat(img image.Image) *Servant {\n\tfilepath := genTmpFilePath()\n\tf, e := os.Create(filepath)\n\tif e != nil {\n\t\tpanic(e)\n\t}\n\tdefer f.Close()\n\tpng.Encode(f, img)\n\n\ts.source.FilePath = filepath\n\ts.source.isTmp = true\n\n\treturn s\n}\n\n\/\/ Get result (or error?)\nfunc (s *Servant) Out() (string, error) {\n\tresult := execute(s.source.FilePath, s.buildArguments())\n\t\/\/ TODO? : should make `gosseract.servant` package?\n\n\tif !s.options.UseFile {\n\t\t_ = os.Remove(s.options.FilePath)\n\t}\n\tif s.source.isTmp {\n\t\t_ = os.Remove(s.source.FilePath)\n\t}\n\n\t\/\/ TODO: handle errors\n\treturn result, nil\n}\n\n\/\/ Make up arguments appropriate to tesseract command\nfunc (s *Servant) buildArguments() []string {\n\tvar args []string\n\targs = append(args, \"-l\", s.lang.Value)\n\tif !s.options.UseFile {\n\t\ts.options.FilePath = makeUpOptionFile(s.options.Digest)\n\t}\n\targs = append(args, s.options.FilePath)\n\treturn args\n}\n\/\/ Make up option file for tesseract command.\n\/\/ (is needless if tesseract accepts such options by cli options)\nfunc makeUpOptionFile(digestMap map[string]string) (fpath string) {\n\tfpath = \"\"\n\tvar digestFileContents string\n\tfor k, v := range digestMap {\n\t\tdigestFileContents = digestFileContents + k + \" \" + v + \"\\n\"\n\t}\n\tif digestFileContents == \"\" {\n\t\treturn fpath\n\t}\n\tfpath = genTmpFilePath()\n\tf, _ := os.Create(fpath)\n\tdefer f.Close()\n\t_, _ = f.WriteString(digestFileContents)\n\treturn fpath\n}\n<commit_msg>Execute `go fmt`<commit_after>package gosseract\n\nimport (\n\t\"image\"\n\t\"image\/png\"\n\t\"os\"\n)\n\n\/\/ Servant of gosseract providing interactive setting\ntype Servant struct {\n\tsource source\n\tlang lang\n\toptions options\n}\ntype source struct {\n\tFilePath string\n\tisTmp bool\n\t\/\/ TODO: accept multiple image formats\n}\ntype lang struct {\n\tValue string\n\tAvailables []string\n}\ntype options struct {\n\tUseFile bool\n\tFilePath string\n\tDigest map[string]string\n}\ntype VersionInfo struct {\n\tTesseractVersion string\n\tGosseractVersion string\n}\n\n\/\/ Provide new servant instance\nfunc SummonServant() Servant {\n\n\tif !tesseractInstalled() {\n\t\tpanic(\"Missin `tesseract` command!! install tessearct at first.\")\n\t}\n\n\tlang := lang{}\n\tlang.init()\n\topts := options{}\n\topts.init()\n\treturn Servant{\n\t\tlang: lang,\n\t\toptions: opts,\n\t}\n}\n\n\/\/ Check information of tesseract and gosseract\nfunc (s *Servant) Info() VersionInfo {\n\ttessVersion := getTesseractVersion()\n\tinfo := VersionInfo{\n\t\tTesseractVersion: tessVersion,\n\t\tGosseractVersion: VERSION,\n\t}\n\treturn info\n}\n\n\/\/ Give source file to servant by file path\nfunc (s *Servant) Target(filepath string) *Servant {\n\t\/\/ TODO: check existence of this file\n\ts.source.FilePath = filepath\n\treturn s\n}\n\n\/\/ Give source file to servant by image.Image\nfunc (s *Servant) Eat(img image.Image) *Servant {\n\tfilepath := genTmpFilePath()\n\tf, e := os.Create(filepath)\n\tif e != nil {\n\t\tpanic(e)\n\t}\n\tdefer f.Close()\n\tpng.Encode(f, img)\n\n\ts.source.FilePath = filepath\n\ts.source.isTmp = true\n\n\treturn s\n}\n\n\/\/ Get result (or error?)\nfunc (s *Servant) Out() (string, error) {\n\tresult := execute(s.source.FilePath, s.buildArguments())\n\t\/\/ TODO? : should make `gosseract.servant` package?\n\n\tif !s.options.UseFile {\n\t\t_ = os.Remove(s.options.FilePath)\n\t}\n\tif s.source.isTmp {\n\t\t_ = os.Remove(s.source.FilePath)\n\t}\n\n\t\/\/ TODO: handle errors\n\treturn result, nil\n}\n\n\/\/ Make up arguments appropriate to tesseract command\nfunc (s *Servant) buildArguments() []string {\n\tvar args []string\n\targs = append(args, \"-l\", s.lang.Value)\n\tif !s.options.UseFile {\n\t\ts.options.FilePath = makeUpOptionFile(s.options.Digest)\n\t}\n\targs = append(args, s.options.FilePath)\n\treturn args\n}\n\n\/\/ Make up option file for tesseract command.\n\/\/ (is needless if tesseract accepts such options by cli options)\nfunc makeUpOptionFile(digestMap map[string]string) (fpath string) {\n\tfpath = \"\"\n\tvar digestFileContents string\n\tfor k, v := range digestMap {\n\t\tdigestFileContents = digestFileContents + k + \" \" + v + \"\\n\"\n\t}\n\tif digestFileContents == \"\" {\n\t\treturn fpath\n\t}\n\tfpath = genTmpFilePath()\n\tf, _ := os.Create(fpath)\n\tdefer f.Close()\n\t_, _ = f.WriteString(digestFileContents)\n\treturn fpath\n}\n<|endoftext|>"} {"text":"<commit_before>package rkv\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/relab\/raft\"\n)\n\n\/\/ Service exposes the Store api as a http service.\ntype Service struct {\n\tstore *Store\n}\n\n\/\/ NewService creates a new Service backed by store.\nfunc NewService(store *Store) *Service {\n\treturn &Service{\n\t\tstore: store,\n\t}\n}\n\n\/\/ ServeHTTP implements the http.Handler interface.\nfunc (s *Service) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tpath := strings.Split(r.URL.Path, \"\/\")\n\n\tif len(path) < 2 {\n\t\thttp.Error(w, \"400 Bad Request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tswitch path[1] {\n\tcase \"register\":\n\t\tid, err := s.store.Register()\n\n\t\tif err != nil {\n\t\t\traftError(w, r, err)\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Fprintln(w, id)\n\t\treturn\n\tcase \"store\":\n\t\tif len(path) != 3 {\n\t\t\thttp.Error(w, \"400 Bad Request\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tkey := path[2]\n\n\t\tif len(key) < 1 {\n\t\t\thttp.Error(w, \"400 Bad Request\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tswitch r.Method {\n\t\tcase http.MethodGet:\n\t\t\tvalue, err := s.store.Lookup(key, false)\n\n\t\t\tif err != nil {\n\t\t\t\traftError(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfmt.Fprintln(w, value)\n\n\t\tcase http.MethodPut:\n\t\t\tvalue, err := ioutil.ReadAll(r.Body)\n\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, \"400 Bad Request\", http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tquery := r.URL.Query()\n\t\t\t\/\/ TODO Bound check.\n\t\t\tid := query[\"id\"][0]\n\t\t\tseq := query[\"seq\"][0]\n\t\t\tsequ, err := strconv.ParseUint(seq, 10, 64)\n\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, \"400 Bad Request\", http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\terr = s.store.Insert(id, sequ, key, string(value))\n\n\t\t\tif err != nil {\n\t\t\t\traftError(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ TODO Change to StatusOK when we actually verify commitment.\n\t\t\tw.WriteHeader(http.StatusAccepted)\n\t\t}\n\tdefault:\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n}\n\nfunc raftError(w http.ResponseWriter, r *http.Request, err error) {\n\tswitch err := err.(type) {\n\tcase raft.ErrNotLeader:\n\t\t\/\/ TODO Assumes a valid addr is returned.\n\t\t\/\/ Try random server if err.LeaderAddr == \"\".\n\t\thost, port, _ := net.SplitHostPort(err.LeaderAddr)\n\n\t\tif host == \"\" {\n\t\t\thost = \"localhost\"\n\t\t}\n\n\t\t\/\/ TODO Hack. Since LeaderAddr is the Raft port, we just assume\n\t\t\/\/ the application is using Raft port - 100. Fix means changing\n\t\t\/\/ Raft to put the application port into LeaderAddr, however we\n\t\t\/\/ don't have a way of knowing the application ports, as they\n\t\t\/\/ are set locally.\n\t\tp, _ := strconv.Atoi(port)\n\t\tport = strconv.Itoa(p - 100)\n\n\t\taddr := net.JoinHostPort(host, port)\n\n\t\thttp.Redirect(w, r, \"http:\/\/\"+addr+r.URL.RequestURI(), http.StatusTemporaryRedirect)\n\tdefault:\n\t\thttp.Error(w, \"503 Service Unavailable\", http.StatusServiceUnavailable)\n\t}\n}\n<commit_msg>service.go: Handle missing leader<commit_after>package rkv\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/relab\/raft\"\n)\n\n\/\/ Service exposes the Store api as a http service.\ntype Service struct {\n\tstore *Store\n}\n\n\/\/ NewService creates a new Service backed by store.\nfunc NewService(store *Store) *Service {\n\treturn &Service{\n\t\tstore: store,\n\t}\n}\n\n\/\/ ServeHTTP implements the http.Handler interface.\nfunc (s *Service) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tpath := strings.Split(r.URL.Path, \"\/\")\n\n\tif len(path) < 2 {\n\t\thttp.Error(w, \"400 Bad Request\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tswitch path[1] {\n\tcase \"register\":\n\t\tid, err := s.store.Register()\n\n\t\tif err != nil {\n\t\t\traftError(w, r, err)\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Fprintln(w, id)\n\t\treturn\n\tcase \"store\":\n\t\tif len(path) != 3 {\n\t\t\thttp.Error(w, \"400 Bad Request\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tkey := path[2]\n\n\t\tif len(key) < 1 {\n\t\t\thttp.Error(w, \"400 Bad Request\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tswitch r.Method {\n\t\tcase http.MethodGet:\n\t\t\tvalue, err := s.store.Lookup(key, false)\n\n\t\t\tif err != nil {\n\t\t\t\traftError(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfmt.Fprintln(w, value)\n\n\t\tcase http.MethodPut:\n\t\t\tvalue, err := ioutil.ReadAll(r.Body)\n\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, \"400 Bad Request\", http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tquery := r.URL.Query()\n\t\t\t\/\/ TODO Bound check.\n\t\t\tid := query[\"id\"][0]\n\t\t\tseq := query[\"seq\"][0]\n\t\t\tsequ, err := strconv.ParseUint(seq, 10, 64)\n\n\t\t\tif err != nil {\n\t\t\t\thttp.Error(w, \"400 Bad Request\", http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\terr = s.store.Insert(id, sequ, key, string(value))\n\n\t\t\tif err != nil {\n\t\t\t\traftError(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t\/\/ TODO Change to StatusOK when we actually verify commitment.\n\t\t\tw.WriteHeader(http.StatusAccepted)\n\t\t}\n\tdefault:\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n}\n\nfunc raftError(w http.ResponseWriter, r *http.Request, err error) {\n\tswitch err := err.(type) {\n\tcase raft.ErrNotLeader:\n\t\tif err.LeaderAddr == \"\" {\n\t\t\t\/\/ TODO Document that this means the client should\n\t\t\t\/\/ change to a random server.\n\t\t\tw.Header().Set(\"Retry-After\", \"-1\")\n\t\t\thttp.Error(w, \"503 Service Unavailable\", http.StatusServiceUnavailable)\n\t\t}\n\n\t\thost, port, erri := net.SplitHostPort(err.LeaderAddr)\n\n\t\tif erri != nil {\n\t\t\thttp.Error(w, \"500 Internal Server Error\", http.StatusInternalServerError)\n\t\t}\n\n\t\tif host == \"\" {\n\t\t\thost = \"localhost\"\n\t\t}\n\n\t\t\/\/ TODO Hack. Since LeaderAddr is the Raft port, we just assume\n\t\t\/\/ the application is using Raft port - 100. Fix means changing\n\t\t\/\/ Raft to put the application port into LeaderAddr, however we\n\t\t\/\/ don't have a way of knowing the application ports, as they\n\t\t\/\/ are set locally.\n\t\tp, _ := strconv.Atoi(port)\n\t\tport = strconv.Itoa(p - 100)\n\n\t\taddr := net.JoinHostPort(host, port)\n\n\t\thttp.Redirect(w, r, \"http:\/\/\"+addr+r.URL.RequestURI(), http.StatusTemporaryRedirect)\n\tdefault:\n\t\thttp.Error(w, \"503 Service Unavailable\", http.StatusServiceUnavailable)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package siesta\n\nimport (\n\t\"net\/http\"\n\t\"path\"\n\t\"regexp\"\n\t\"strings\"\n)\n\n\/\/ Registered services keyed by base URI.\nvar services = map[string]*Service{}\n\n\/\/ A Service is a container for routes with a common base URI.\n\/\/ It also has two middleware chains, named \"pre\" and \"post\".\n\/\/\n\/\/ The \"pre\" chain is run before the main handler. The first\n\/\/ handler in the \"pre\" chain is guaranteed to run, but execution\n\/\/ may quit anywhere else in the chain.\n\/\/\n\/\/ If the \"pre\" chain executes completely, the main handler is executed.\n\/\/ It is skipped otherwise.\n\/\/\n\/\/ The \"post\" chain runs after the main handler, whether it is skipped\n\/\/ or not. The first handler in the \"post\" chain is guaranteed to run, but\n\/\/ execution may quit anywhere else in the chain if the quit function\n\/\/ is called.\ntype Service struct {\n\tbaseURI string\n\n\tpre []contextHandler\n\tpost []contextHandler\n\n\thandlers map[*regexp.Regexp]contextHandler\n\n\troutes map[string]*node\n\n\tnotFound contextHandler\n}\n\n\/\/ NewService returns a new Service with the given base URI\n\/\/ or panics if the base URI has already been registered.\nfunc NewService(baseURI string) *Service {\n\tif services[baseURI] != nil {\n\t\tpanic(\"service already registered\")\n\t}\n\n\treturn &Service{\n\t\tbaseURI: path.Join(\"\/\", baseURI, \"\/\"),\n\t\thandlers: make(map[*regexp.Regexp]contextHandler),\n\t\troutes: map[string]*node{},\n\t}\n}\n\nfunc addToChain(f interface{}, chain []contextHandler) []contextHandler {\n\tm := toContextHandler(f)\n\treturn append(chain, m)\n}\n\n\/\/ AddPre adds f to the end of the \"pre\" chain.\n\/\/ It panics if f cannot be converted to a contextHandler (see Service.Route).\nfunc (s *Service) AddPre(f interface{}) {\n\ts.pre = addToChain(f, s.pre)\n}\n\n\/\/ AddPost adds f to the end of the \"post\" chain.\n\/\/ It panics if f cannot be converted to a contextHandler (see Service.Route).\nfunc (s *Service) AddPost(f interface{}) {\n\ts.post = addToChain(f, s.post)\n}\n\n\/\/ Service satisfies the http.Handler interface.\nfunc (s *Service) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\ts.ServeHTTPInContext(NewSiestaContext(), w, r)\n}\n\n\/\/ ServiceHTTPInContext serves an HTTP request within the Context c.\n\/\/ A Service will run through both of its internal chains, quitting\n\/\/ when requested.\nfunc (s *Service) ServeHTTPInContext(c Context, w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\n\tquit := false\n\tfor _, m := range s.pre {\n\t\tm(c, w, r, func() {\n\t\t\tquit = true\n\t\t})\n\n\t\tif quit {\n\t\t\t\/\/ Break out of the \"pre\" loop, but\n\t\t\t\/\/ continue on.\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !quit {\n\t\t\/\/ The main handler is only run if we have not\n\t\t\/\/ been signaled to quit.\n\n\t\tif r.URL.Path != \"\/\" {\n\t\t\tr.URL.Path = strings.TrimRight(r.URL.Path, \"\/\")\n\t\t}\n\n\t\tvar (\n\t\t\thandler contextHandler\n\t\t\tparams routeParams\n\t\t)\n\n\t\t\/\/ Lookup the tree for this method\n\t\trouteNode, ok := s.routes[r.Method]\n\n\t\tif ok {\n\t\t\thandler, params, _ = routeNode.getValue(r.URL.Path)\n\t\t}\n\n\t\tif handler == nil {\n\t\t\tif s.notFound != nil {\n\t\t\t\t\/\/ Use user-defined handler.\n\t\t\t\ts.notFound(c, w, r, func() {})\n\t\t\t} else {\n\t\t\t\t\/\/ Default to the net\/http NotFoundHandler.\n\t\t\t\thttp.NotFoundHandler().ServeHTTP(w, r)\n\t\t\t}\n\t\t} else {\n\t\t\tfor _, p := range params {\n\t\t\t\tr.Form.Set(p.Key, p.Value)\n\t\t\t}\n\n\t\t\thandler(c, w, r, func() {\n\t\t\t\tquit = true\n\t\t\t})\n\t\t}\n\t}\n\n\tquit = false\n\tfor _, m := range s.post {\n\t\tm(c, w, r, func() {\n\t\t\tquit = true\n\t\t})\n\n\t\tif quit {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Route adds a new route to the Service.\n\/\/ f must be a function with one of the following signatures:\n\/\/\n\/\/ func(http.ResponseWriter, *http.Request)\n\/\/ func(http.ResponseWriter, *http.Request, func())\n\/\/ func(Context, http.ResponseWriter, *http.Request)\n\/\/ func(Context, http.ResponseWriter, *http.Request, func())\n\/\/\n\/\/ Note that Context is an interface type defined in this package.\n\/\/ The last argument is a function which is called to signal the\n\/\/ quitting of the current execution sequence.\nfunc (s *Service) Route(verb, uriPath, usage string, f interface{}) {\n\thandler := toContextHandler(f)\n\n\tif n := s.routes[verb]; n == nil {\n\t\ts.routes[verb] = &node{}\n\t}\n\n\ts.routes[verb].addRoute(path.Join(s.baseURI, strings.TrimRight(uriPath, \"\/\")), handler)\n}\n\n\/\/ SetNotFound sets the handler for all paths that do not\n\/\/ match any existing routes. It accepts the same function\n\/\/ signatures that Route does with the addition of `nil`.\nfunc (s *Service) SetNotFound(f interface{}) {\n\tif f == nil {\n\t\ts.notFound = nil\n\t\treturn\n\t}\n\n\thandler := toContextHandler(f)\n\ts.notFound = handler\n}\n\n\/\/ Register registers s by adding it as a handler to the\n\/\/ DefaultServeMux in the net\/http package.\nfunc (s *Service) Register() {\n\thttp.Handle(s.baseURI, s)\n}\n<commit_msg>remove dead regexp code<commit_after>package siesta\n\nimport (\n\t\"net\/http\"\n\t\"path\"\n\t\"strings\"\n)\n\n\/\/ Registered services keyed by base URI.\nvar services = map[string]*Service{}\n\n\/\/ A Service is a container for routes with a common base URI.\n\/\/ It also has two middleware chains, named \"pre\" and \"post\".\n\/\/\n\/\/ The \"pre\" chain is run before the main handler. The first\n\/\/ handler in the \"pre\" chain is guaranteed to run, but execution\n\/\/ may quit anywhere else in the chain.\n\/\/\n\/\/ If the \"pre\" chain executes completely, the main handler is executed.\n\/\/ It is skipped otherwise.\n\/\/\n\/\/ The \"post\" chain runs after the main handler, whether it is skipped\n\/\/ or not. The first handler in the \"post\" chain is guaranteed to run, but\n\/\/ execution may quit anywhere else in the chain if the quit function\n\/\/ is called.\ntype Service struct {\n\tbaseURI string\n\n\tpre []contextHandler\n\tpost []contextHandler\n\n\troutes map[string]*node\n\n\tnotFound contextHandler\n}\n\n\/\/ NewService returns a new Service with the given base URI\n\/\/ or panics if the base URI has already been registered.\nfunc NewService(baseURI string) *Service {\n\tif services[baseURI] != nil {\n\t\tpanic(\"service already registered\")\n\t}\n\n\treturn &Service{\n\t\tbaseURI: path.Join(\"\/\", baseURI, \"\/\"),\n\t\troutes: map[string]*node{},\n\t}\n}\n\nfunc addToChain(f interface{}, chain []contextHandler) []contextHandler {\n\tm := toContextHandler(f)\n\treturn append(chain, m)\n}\n\n\/\/ AddPre adds f to the end of the \"pre\" chain.\n\/\/ It panics if f cannot be converted to a contextHandler (see Service.Route).\nfunc (s *Service) AddPre(f interface{}) {\n\ts.pre = addToChain(f, s.pre)\n}\n\n\/\/ AddPost adds f to the end of the \"post\" chain.\n\/\/ It panics if f cannot be converted to a contextHandler (see Service.Route).\nfunc (s *Service) AddPost(f interface{}) {\n\ts.post = addToChain(f, s.post)\n}\n\n\/\/ Service satisfies the http.Handler interface.\nfunc (s *Service) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\ts.ServeHTTPInContext(NewSiestaContext(), w, r)\n}\n\n\/\/ ServiceHTTPInContext serves an HTTP request within the Context c.\n\/\/ A Service will run through both of its internal chains, quitting\n\/\/ when requested.\nfunc (s *Service) ServeHTTPInContext(c Context, w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\n\tquit := false\n\tfor _, m := range s.pre {\n\t\tm(c, w, r, func() {\n\t\t\tquit = true\n\t\t})\n\n\t\tif quit {\n\t\t\t\/\/ Break out of the \"pre\" loop, but\n\t\t\t\/\/ continue on.\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !quit {\n\t\t\/\/ The main handler is only run if we have not\n\t\t\/\/ been signaled to quit.\n\n\t\tif r.URL.Path != \"\/\" {\n\t\t\tr.URL.Path = strings.TrimRight(r.URL.Path, \"\/\")\n\t\t}\n\n\t\tvar (\n\t\t\thandler contextHandler\n\t\t\tparams routeParams\n\t\t)\n\n\t\t\/\/ Lookup the tree for this method\n\t\trouteNode, ok := s.routes[r.Method]\n\n\t\tif ok {\n\t\t\thandler, params, _ = routeNode.getValue(r.URL.Path)\n\t\t}\n\n\t\tif handler == nil {\n\t\t\tif s.notFound != nil {\n\t\t\t\t\/\/ Use user-defined handler.\n\t\t\t\ts.notFound(c, w, r, func() {})\n\t\t\t} else {\n\t\t\t\t\/\/ Default to the net\/http NotFoundHandler.\n\t\t\t\thttp.NotFoundHandler().ServeHTTP(w, r)\n\t\t\t}\n\t\t} else {\n\t\t\tfor _, p := range params {\n\t\t\t\tr.Form.Set(p.Key, p.Value)\n\t\t\t}\n\n\t\t\thandler(c, w, r, func() {\n\t\t\t\tquit = true\n\t\t\t})\n\t\t}\n\t}\n\n\tquit = false\n\tfor _, m := range s.post {\n\t\tm(c, w, r, func() {\n\t\t\tquit = true\n\t\t})\n\n\t\tif quit {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ Route adds a new route to the Service.\n\/\/ f must be a function with one of the following signatures:\n\/\/\n\/\/ func(http.ResponseWriter, *http.Request)\n\/\/ func(http.ResponseWriter, *http.Request, func())\n\/\/ func(Context, http.ResponseWriter, *http.Request)\n\/\/ func(Context, http.ResponseWriter, *http.Request, func())\n\/\/\n\/\/ Note that Context is an interface type defined in this package.\n\/\/ The last argument is a function which is called to signal the\n\/\/ quitting of the current execution sequence.\nfunc (s *Service) Route(verb, uriPath, usage string, f interface{}) {\n\thandler := toContextHandler(f)\n\n\tif n := s.routes[verb]; n == nil {\n\t\ts.routes[verb] = &node{}\n\t}\n\n\ts.routes[verb].addRoute(path.Join(s.baseURI, strings.TrimRight(uriPath, \"\/\")), handler)\n}\n\n\/\/ SetNotFound sets the handler for all paths that do not\n\/\/ match any existing routes. It accepts the same function\n\/\/ signatures that Route does with the addition of `nil`.\nfunc (s *Service) SetNotFound(f interface{}) {\n\tif f == nil {\n\t\ts.notFound = nil\n\t\treturn\n\t}\n\n\thandler := toContextHandler(f)\n\ts.notFound = handler\n}\n\n\/\/ Register registers s by adding it as a handler to the\n\/\/ DefaultServeMux in the net\/http package.\nfunc (s *Service) Register() {\n\thttp.Handle(s.baseURI, s)\n}\n<|endoftext|>"} {"text":"<commit_before>package session\n\nimport (\n\t_ \"net\/http\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype Session struct {\n\tid string\n\tstore Store\n\tmutex sync.Mutex\n\tdata map[string]interface{}\n\texpiresAt time.Time\n}\n\nfunc (s *Session) Id() string {\n\treturn s.id\n}\n\nfunc (s *Session) Has(key string) bool {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\t_, ok := s.data[key]\n\treturn ok\n}\n\nfunc (s *Session) Get(key string) interface{} {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\treturn s.data[key]\n}\n\nfunc (s *Session) Set(key string, value interface{}) {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\ts.data[key] = value\n}\n\nfunc (s *Session) Unset(key string) {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\tdelete(s.data, key)\n}\n\nfunc (s *Session) ExpiresAt() time.Time {\n\treturn s.expiresAt\n}\n\nfunc NewSession(id string) *Session {\n\treturn &Session{\n\t\tid: id,\n\t\tdata: make(map[string]interface{}),\n\t}\n}\n\ntype Store interface {\n\t\/\/ Check session existence\n\tHas(id string) (bool, error)\n\n\t\/\/ Load a session by id, if not found, return nil\n\tLoad(id string) (*Session, error)\n\n\t\/\/ Delete a session\n\tRevoke(id string) error\n\n\t\/\/ Extend session expiry\n\tRenew(session *Session) error\n\n\t\/\/ Save a session. Session expiry gets updated too\n\tSave(session *Session) error\n\n\t\/\/ Clean up\n\tClose() error\n}\n<commit_msg>Remve unused package.<commit_after>package session\n\nimport (\n\t\"sync\"\n\t\"time\"\n)\n\ntype Session struct {\n\tid string\n\tstore Store\n\tmutex sync.Mutex\n\tdata map[string]interface{}\n\texpiresAt time.Time\n}\n\nfunc (s *Session) Id() string {\n\treturn s.id\n}\n\nfunc (s *Session) Has(key string) bool {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\t_, ok := s.data[key]\n\treturn ok\n}\n\nfunc (s *Session) Get(key string) interface{} {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\treturn s.data[key]\n}\n\nfunc (s *Session) Set(key string, value interface{}) {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\ts.data[key] = value\n}\n\nfunc (s *Session) Unset(key string) {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\tdelete(s.data, key)\n}\n\nfunc (s *Session) ExpiresAt() time.Time {\n\treturn s.expiresAt\n}\n\nfunc NewSession(id string) *Session {\n\treturn &Session{\n\t\tid: id,\n\t\tdata: make(map[string]interface{}),\n\t}\n}\n\ntype Store interface {\n\t\/\/ Check session existence\n\tHas(id string) (bool, error)\n\n\t\/\/ Load a session by id, if not found, return nil\n\tLoad(id string) (*Session, error)\n\n\t\/\/ Delete a session\n\tRevoke(id string) error\n\n\t\/\/ Extend session expiry\n\tRenew(session *Session) error\n\n\t\/\/ Save a session. Session expiry gets updated too\n\tSave(session *Session) error\n\n\t\/\/ Clean up\n\tClose() error\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n)\n\ntype session struct {\n\tsp *subProcess\n\tconn *connection\n\tmessages chan string\n}\n\nfunc NewSession(conn *connection) (s *session, err error) {\n\ts = &session{\n\t\tconn: conn,\n\t}\n\n\ts.sendMessage(\"hilo\")\n\n\tsp, err := NewSubProcess(\"bash\")\n\tif err != nil {\n\t\treturn\n\t}\n\ts.sp = sp\n\n\tgo func() {\n\t\tline := \"\"\n\t\tfor {\n\t\t\tmessage := conn.Read()\n\t\t\tif len(message) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif message[0] == 13 {\n\t\t\t\tfmt.Println(\"message received: \", line)\n\t\t\t\tsp.input <- line + \"\\n\"\n\t\t\t\tline = \"\"\n\t\t\t} else {\n\t\t\t\tmsg := string(message)\n\t\t\t\tline += msg\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor line := range sp.output {\n\t\t\tfmt.Print(line)\n\t\t\tconn.Write(line)\n\t\t}\n\t}()\n\n\treturn\n}\n\nfunc (s *session) sendMessage(message string) {\n\ts.conn.Write(message)\n}\n\nfunc (s *session) End() {\n\ts.sp.kill()\n}\n<commit_msg>terminate session when client disappears<commit_after>package main\n\nimport (\n\t\"fmt\"\n)\n\ntype session struct {\n\tsp *subProcess\n\tconn *connection\n\tmessages chan string\n}\n\nfunc NewSession(conn *connection) (s *session, err error) {\n\ts = &session{\n\t\tconn: conn,\n\t}\n\n\ts.sendMessage(\"hilo\")\n\n\tsp, err := NewSubProcess(\"bash\")\n\tif err != nil {\n\t\treturn\n\t}\n\ts.sp = sp\n\n\tgo func() {\n\t\tline := \"\"\n\t\tfor {\n\t\t\tmessage := conn.Read()\n\t\t\tif len(message) == 0 {\n\t\t\t\tfmt.Println(\"connection closed, terminating subprocess\")\n\t\t\t\tsp.kill()\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif message[0] == 13 {\n\t\t\t\tfmt.Println(\"message received: \", line)\n\t\t\t\tsp.input <- line + \"\\n\"\n\t\t\t\tline = \"\"\n\t\t\t} else {\n\t\t\t\tmsg := string(message)\n\t\t\t\tline += msg\n\t\t\t}\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tfor line := range sp.output {\n\t\t\tfmt.Print(line)\n\t\t\tconn.Write(line)\n\t\t}\n\t}()\n\n\treturn\n}\n\nfunc (s *session) sendMessage(message string) {\n\ts.conn.Write(message)\n}\n\nfunc (s *session) End() {\n\ts.sp.kill()\n}\n<|endoftext|>"} {"text":"<commit_before>package qb\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n)\n\n\/\/ NewSession generates a new Session given engine and returns session pointer\nfunc NewSession(metadata *MetaData) *Session {\n\treturn &Session{\n\t\tqueries: []*Query{},\n\t\tmapper: NewMapper(metadata.Engine().Driver()),\n\t\tmetadata: metadata,\n\t}\n}\n\n\/\/ Session is the composition of engine connection & orm mappings\ntype Session struct {\n\tqueries []*Query\n\tmapper *Mapper\n\tmetadata *MetaData\n\ttx *sql.Tx\n}\n\nfunc (s *Session) add(query *Query) {\n\tvar err error\n\tif s.tx == nil {\n\t\ts.tx, err = s.metadata.Engine().DB().Begin()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\ts.queries = append(s.queries, query)\n}\n\n\/\/ Delete adds a single delete query to the session\nfunc (s *Session) Delete(model interface{}) {\n\n\tkv := s.mapper.ConvertStructToMap(model)\n\n\td := s.metadata.Table(s.mapper.ModelName(model)).Delete()\n\n\tands := []string{}\n\tbindings := []interface{}{}\n\tfor k, v := range kv {\n\t\tands = append(ands, fmt.Sprintf(\"%s = %s\", s.mapper.ColName(k), d.Placeholder()))\n\t\tbindings = append(bindings, v)\n\t}\n\n\tdel := d.Where(d.And(ands...), bindings...).Query()\n\ts.add(del)\n}\n\n\/\/ Add adds a single query to the session. The query must be insert or update\nfunc (s *Session) Add(model interface{}) {\n\n\trawMap := s.mapper.ConvertStructToMap(model)\n\n\tkv := map[string]interface{}{}\n\n\tfor k, v := range rawMap {\n\t\tkv[s.mapper.ColName(k)] = v\n\t}\n\n\tq := s.metadata.Table(s.mapper.ModelName(model)).Insert(kv).Query()\n\ts.add(q)\n}\n\n\/\/ AddAll adds multiple models an adds an insert statement to current queries\nfunc (s *Session) AddAll(models ...interface{}) {\n\tfor _, m := range models {\n\t\ts.Add(m)\n\t}\n}\n\n\/\/ Commit commits the current transaction with queries\nfunc (s *Session) Commit() error {\n\n\tfor _, q := range s.queries {\n\t\t_, err := s.tx.Exec(q.SQL(), q.Bindings()...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn s.tx.Commit()\n}\n\n\/\/ Select makers\n<commit_msg>fix a bug that causes error when multiple transactions are executed in order<commit_after>package qb\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n)\n\n\/\/ NewSession generates a new Session given engine and returns session pointer\nfunc NewSession(metadata *MetaData) *Session {\n\treturn &Session{\n\t\tqueries: []*Query{},\n\t\tmapper: NewMapper(metadata.Engine().Driver()),\n\t\tmetadata: metadata,\n\t}\n}\n\n\/\/ Session is the composition of engine connection & orm mappings\ntype Session struct {\n\tqueries []*Query\n\tmapper *Mapper\n\tmetadata *MetaData\n\ttx *sql.Tx\n}\n\nfunc (s *Session) add(query *Query) {\n\tvar err error\n\tif s.tx == nil {\n\t\ts.queries = []*Query{}\n\t\ts.tx, err = s.metadata.Engine().DB().Begin()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\ts.queries = append(s.queries, query)\n}\n\n\/\/ Delete adds a single delete query to the session\nfunc (s *Session) Delete(model interface{}) {\n\n\tkv := s.mapper.ConvertStructToMap(model)\n\n\ttName := s.mapper.ModelName(model)\n\n\td := s.metadata.Table(tName).Delete()\n\tands := []string{}\n\tbindings := []interface{}{}\n\n\tpcols := s.metadata.Table(tName).PrimaryKey()\n\n\t\/\/ if table has primary key\n\tif len(pcols) > 0 {\n\n\t\tfor _, pk := range pcols {\n\t\t\t\/\/ find\n\t\t\tb := kv[pk]\n\t\t\tands = append(ands, fmt.Sprintf(\"%s = %s\", pk, d.Placeholder()))\n\t\t\tbindings = append(bindings, b)\n\t\t}\n\n\t} else {\n\t\tfor k, v := range kv {\n\t\t\tands = append(ands, fmt.Sprintf(\"%s = %s\", s.mapper.ColName(k), d.Placeholder()))\n\t\t\tbindings = append(bindings, v)\n\t\t}\n\t}\n\n\tdel := d.Where(d.And(ands...), bindings...).Query()\n\ts.add(del)\n}\n\n\/\/ Add adds a single query to the session. The query must be insert or update\nfunc (s *Session) Add(model interface{}) {\n\n\trawMap := s.mapper.ConvertStructToMap(model)\n\n\tkv := map[string]interface{}{}\n\n\tfor k, v := range rawMap {\n\t\tkv[s.mapper.ColName(k)] = v\n\t}\n\n\tq := s.metadata.Table(s.mapper.ModelName(model)).Insert(kv).Query()\n\ts.add(q)\n}\n\n\/\/ AddAll adds multiple models an adds an insert statement to current queries\nfunc (s *Session) AddAll(models ...interface{}) {\n\tfor _, m := range models {\n\t\ts.Add(m)\n\t}\n}\n\n\/\/ Commit commits the current transaction with queries\nfunc (s *Session) Commit() error {\n\n\tfor _, q := range s.queries {\n\t\t_, err := s.tx.Exec(q.SQL(), q.Bindings()...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr := s.tx.Commit()\n\ts.tx = nil\n\treturn err\n}\n\n\/\/ Select makers\n<|endoftext|>"} {"text":"<commit_before>\/\/ sudo hexdump -v -e '5\/1 \"%02x \" \"\\n\"' \/dev\/hidraw4\n\/\/ lsusb\n\/\/ http:\/\/stackoverflow.com\/questions\/15949163\/read-from-dev-input\n\/\/ http:\/\/reactivated.net\/writing_udev_rules.html\n\/\/ \/lib\/udev\/rules.d\/\n\/\/ sudo service udev restart\n\/\/ sudo udevadm control --reload-rules\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"time\"\n)\n\ntype ReadValue struct {\n\tJog int8\n\tWheel uint8\n\tWheelDelta int8\n\tButtons [5]bool\n}\n\ntype Mode int\n\nconst (\n\tModeScroll Mode = iota\n\tModeTab\n\tModeSelect\n)\n\nvar (\n\tmode = ModeScroll\n\tjog int8 = 0\n)\n\nfunc ToReadValue(buf []byte) *ReadValue {\n\tb := [5]bool{}\n\tb[0] = buf[3]&0x10 != 0\n\tb[1] = buf[3]&0x20 != 0\n\tb[2] = buf[3]&0x40 != 0\n\tb[3] = buf[3]&0x80 != 0\n\tb[4] = buf[4]&0x01 != 0\n\treturn &ReadValue{\n\t\tJog: int8(buf[0]),\n\t\tWheel: uint8(buf[1]),\n\t\tButtons: b,\n\t}\n}\n\nfunc findDevice() string {\n\treturn \"\/dev\/hidraw5\"\n}\n\nfunc action(v *ReadValue) {\n\tfmt.Printf(\"val: %#v\\n\", v)\n\tfmt.Printf(\"mode: %#v\\n\", mode)\n\n\tswitch {\n\tcase v.Buttons[0]:\n\t\texec.Command(\"xdotool\", \"key\", \"Return\").Run()\n\tcase v.Buttons[1]:\n\t\tmode = ModeScroll\n\tcase v.Buttons[2]:\n\t\tmode = ModeTab\n\tcase v.Buttons[3]:\n\t\tmode = ModeSelect\n\tcase v.Buttons[4]:\n\t\texec.Command(\"xdotool\", \"key\", \"Return\").Run()\n\t}\n\n\tif v.WheelDelta > 0 {\n\t\tswitch mode {\n\t\tcase ModeScroll:\n\t\t\texec.Command(\"xdotool\", \"click\", \"5\").Run()\n\t\tcase ModeTab:\n\t\t\texec.Command(\"xdotool\", \"key\", \"Ctrl+Tab\").Run()\n\t\tcase ModeSelect:\n\t\t\texec.Command(\"xdotool\", \"key\", \"Tab\").Run()\n\t\t}\n\t}\n\tif v.WheelDelta < 0 {\n\t\tswitch mode {\n\t\tcase ModeScroll:\n\t\t\texec.Command(\"xdotool\", \"click\", \"4\").Run()\n\t\tcase ModeTab:\n\t\t\texec.Command(\"xdotool\", \"key\", \"Ctrl+Shift+Tab\").Run()\n\t\tcase ModeSelect:\n\t\t\texec.Command(\"xdotool\", \"key\", \"Shift+Tab\").Run()\n\t\t}\n\t}\n\tjog = v.Jog\n}\n\nfunc loop(c <-chan *ReadValue) {\n\tp := <-c\n\tfor {\n\t\tselect {\n\t\tcase v := <-c:\n\t\t\tv.WheelDelta = int8(v.Wheel) - int8(p.Wheel)\n\t\t\taction(v)\n\t\t\tp = v\n\t\t}\n\t}\n}\n\nfunc jogLoop() {\n\tt := time.NewTicker(10 * time.Millisecond)\n\ti := 0\n\tfor _ = range t.C {\n\t\ti++\n\t\tthr := (1 << 4) - (1 << uint(abs(int(jog))))\n\t\tif i > thr {\n\t\t\tif jog > 0 {\n\t\t\t\tlog.Printf(\"tick %d %d\/%d\", jog, i, thr)\n\t\t\t\texec.Command(\"xdotool\", \"click\", \"5\").Run()\n\t\t\t}\n\t\t\tif jog < 0 {\n\t\t\t\tlog.Printf(\"tick %d %d\/%d\", jog, i, thr)\n\t\t\t\texec.Command(\"xdotool\", \"click\", \"4\").Run()\n\t\t\t}\n\t\t\ti = 0\n\t\t}\n\t}\n}\n\nfunc abs(x int) int {\n\tif x >= 0 {\n\t\treturn x\n\t} else {\n\t\treturn -x\n\t}\n}\n\nfunc generate(c chan<- *ReadValue) {\n\tf, err := os.Open(findDevice())\n\tif err != nil {\n\t\tlog.Fatalf(\"invalid file name: %s\", findDevice())\n\t\treturn\n\t}\n\n\tbuf := make([]byte, 5)\n\tfor {\n\t\tf.Read(buf)\n\t\tv := ToReadValue(buf)\n\t\tc <- v\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tgo jogLoop()\n\n\tc := make(chan *ReadValue)\n\tgo generate(c)\n\tloop(c)\n\n}\n<commit_msg>use evdev -- not working yet<commit_after>\/\/ sudo hexdump -v -e '5\/1 \"%02x \" \"\\n\"' \/dev\/hidraw4\n\/\/ lsusb\n\/\/ http:\/\/stackoverflow.com\/questions\/15949163\/read-from-dev-input\n\/\/ http:\/\/reactivated.net\/writing_udev_rules.html\n\/\/ \/lib\/udev\/rules.d\/\n\/\/ sudo service udev restart\n\/\/ sudo udevadm control --reload-rules\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/jteeuwen\/evdev\"\n\t\"log\"\n\t\"os\/exec\"\n\t\"time\"\n)\n\ntype ReadValue struct {\n\tJog int8\n\tWheel uint8\n\tWheelDelta int8\n\tButtons [5]bool\n}\n\ntype Mode int\n\nconst (\n\tModeScroll Mode = iota\n\tModeTab\n\tModeSelect\n)\n\nvar (\n\tmode = ModeScroll\n\tjog int8 = 0\n)\n\nfunc findDevice() string {\n\treturn \"\/dev\/input\/by-id\/usb-Contour_Design_ShuttleXpress-event-if00\"\n}\n\nfunc action(v *ReadValue) {\n\tfmt.Printf(\"val: %#v\\n\", v)\n\tfmt.Printf(\"mode: %#v\\n\", mode)\n\n\tswitch {\n\tcase v.Buttons[0]:\n\t\texec.Command(\"xdotool\", \"key\", \"Return\").Run()\n\tcase v.Buttons[1]:\n\t\tmode = ModeScroll\n\tcase v.Buttons[2]:\n\t\tmode = ModeTab\n\tcase v.Buttons[3]:\n\t\tmode = ModeSelect\n\tcase v.Buttons[4]:\n\t\texec.Command(\"xdotool\", \"key\", \"Return\").Run()\n\t}\n\n\tif v.WheelDelta > 0 {\n\t\tswitch mode {\n\t\tcase ModeScroll:\n\t\t\texec.Command(\"xdotool\", \"click\", \"5\").Run()\n\t\tcase ModeTab:\n\t\t\texec.Command(\"xdotool\", \"key\", \"Ctrl+Tab\").Run()\n\t\tcase ModeSelect:\n\t\t\texec.Command(\"xdotool\", \"key\", \"Tab\").Run()\n\t\t}\n\t}\n\tif v.WheelDelta < 0 {\n\t\tswitch mode {\n\t\tcase ModeScroll:\n\t\t\texec.Command(\"xdotool\", \"click\", \"4\").Run()\n\t\tcase ModeTab:\n\t\t\texec.Command(\"xdotool\", \"key\", \"Ctrl+Shift+Tab\").Run()\n\t\tcase ModeSelect:\n\t\t\texec.Command(\"xdotool\", \"key\", \"Shift+Tab\").Run()\n\t\t}\n\t}\n\tjog = v.Jog\n}\n\nfunc loop(c <-chan *ReadValue) {\n\tp := <-c\n\tfor {\n\t\tselect {\n\t\tcase v := <-c:\n\t\t\tv.WheelDelta = int8(v.Wheel) - int8(p.Wheel)\n\t\t\taction(v)\n\t\t\tp = v\n\t\t}\n\t}\n}\n\nfunc jogLoop() {\n\tt := time.NewTicker(10 * time.Millisecond)\n\ti := 0\n\tfor _ = range t.C {\n\t\ti++\n\t\tthr := (1 << 4) - (1 << uint(abs(int(jog))))\n\t\tif i > thr {\n\t\t\tif jog > 0 {\n\t\t\t\tlog.Printf(\"tick %d %d\/%d\", jog, i, thr)\n\t\t\t\texec.Command(\"xdotool\", \"click\", \"5\").Run()\n\t\t\t}\n\t\t\tif jog < 0 {\n\t\t\t\tlog.Printf(\"tick %d %d\/%d\", jog, i, thr)\n\t\t\t\texec.Command(\"xdotool\", \"click\", \"4\").Run()\n\t\t\t}\n\t\t\ti = 0\n\t\t}\n\t}\n}\n\nfunc abs(x int) int {\n\tif x >= 0 {\n\t\treturn x\n\t} else {\n\t\treturn -x\n\t}\n}\n\nfunc generate(c chan<- *ReadValue) {\n\tdev, err := evdev.Open(findDevice())\n\tif err != nil {\n\t\tlog.Fatalf(\"error opening device: %v\", err)\n\t\treturn\n\t}\n\n\tfor evt := range dev.Inbox {\n\t\tlog.Printf(\"Input event: %#v\", evt)\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tgo jogLoop()\n\n\tc := make(chan *ReadValue)\n\tgo generate(c)\n\tloop(c)\n\n}\n<|endoftext|>"} {"text":"<commit_before>package sitemap\n\nimport (\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"time\"\n)\n\n\/\/ Index is a structure of <sitemapindex>\ntype Index struct {\n\tXMLName xml.Name `xml:\"sitemapindex\"`\n\tSitemap []parts `xml:\"sitemap\"`\n}\n\n\/\/ parts is a structure of <sitemap> in <sitemapindex>\ntype parts struct {\n\tLoc string `xml:\"loc\"`\n\tLastMod string `xml:\"lastmod\"`\n}\n\n\/\/ Sitemap is a structure of <sitemap>\ntype Sitemap struct {\n\tXMLName xml.Name `xml:\"urlset\"`\n\tURL []URL `xml:\"url\"`\n}\n\n\/\/ URL is a structure of <url> in <sitemap>\ntype URL struct {\n\tLoc string `xml:\"loc\"`\n\tLastMod string `xml:\"lastmod\"`\n\tChangeFreq string `xml:\"changefreq\"`\n\tPriority float32 `xml:\"priority\"`\n}\n\n\/\/ fetch is page acquisition function\nvar fetch = func(URL string, options interface{}) ([]byte, error) {\n\tvar body []byte\n\n\tres, err := http.Get(URL)\n\tif err != nil {\n\t\treturn body, err\n\t}\n\tdefer res.Body.Close()\n\n\treturn ioutil.ReadAll(res.Body)\n}\n\n\/\/ Time interval to be used in Index.get\nvar interval = time.Second\n\n\/\/ Get sitemap data from URL\nfunc Get(URL string, options interface{}) (Sitemap, error) {\n\tdata, err := fetch(URL, options)\n\tif err != nil {\n\t\treturn Sitemap{}, err\n\t}\n\n\tidx, idxErr := ParseIndex(data)\n\tsmap, smapErr := Parse(data)\n\n\tif idxErr != nil && smapErr != nil {\n\t\terr = errors.New(\"URL is not a sitemap or sitemapindex\")\n\t\treturn Sitemap{}, err\n\t}\n\n\tif idxErr == nil {\n\t\tsmap, err = idx.get(data, options)\n\t\tif err != nil {\n\t\t\treturn Sitemap{}, err\n\t\t}\n\t}\n\n\treturn smap, err\n}\n\n\/\/ Get Sitemap data from sitemapindex file\nfunc (s *Index) get(data []byte, options interface{}) (Sitemap, error) {\n\tidx, err := ParseIndex(data)\n\tif err != nil {\n\t\treturn Sitemap{}, err\n\t}\n\n\tvar smap Sitemap\n\tfor _, s := range idx.Sitemap {\n\t\ttime.Sleep(interval)\n\t\tdata, err := fetch(s.Loc, options)\n\t\tif err != nil {\n\t\t\treturn smap, err\n\t\t}\n\n\t\terr = xml.Unmarshal(data, &smap)\n\t\tif err != nil {\n\t\t\treturn smap, err\n\t\t}\n\t}\n\n\treturn smap, err\n}\n\n\/\/ Parse create Sitemap data from text\nfunc Parse(data []byte) (smap Sitemap, err error) {\n\terr = xml.Unmarshal(data, &smap)\n\treturn\n}\n\n\/\/ ParseIndex create Index data from text\nfunc ParseIndex(data []byte) (idx Index, err error) {\n\terr = xml.Unmarshal(data, &idx)\n\treturn\n}\n\n\/\/ SetInterval change Time interval to be used in Index.get\nfunc SetInterval(time time.Duration) {\n\tinterval = time\n}\n\n\/\/ SetFetch change fetch closure\nfunc SetFetch(f func(URL string, options interface{}) ([]byte, error)) {\n\tfetch = f\n}\n<commit_msg>Refactor Get function<commit_after>package sitemap\n\nimport (\n\t\"encoding\/xml\"\n\t\"errors\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"time\"\n)\n\n\/\/ Index is a structure of <sitemapindex>\ntype Index struct {\n\tXMLName xml.Name `xml:\"sitemapindex\"`\n\tSitemap []parts `xml:\"sitemap\"`\n}\n\n\/\/ parts is a structure of <sitemap> in <sitemapindex>\ntype parts struct {\n\tLoc string `xml:\"loc\"`\n\tLastMod string `xml:\"lastmod\"`\n}\n\n\/\/ Sitemap is a structure of <sitemap>\ntype Sitemap struct {\n\tXMLName xml.Name `xml:\"urlset\"`\n\tURL []URL `xml:\"url\"`\n}\n\n\/\/ URL is a structure of <url> in <sitemap>\ntype URL struct {\n\tLoc string `xml:\"loc\"`\n\tLastMod string `xml:\"lastmod\"`\n\tChangeFreq string `xml:\"changefreq\"`\n\tPriority float32 `xml:\"priority\"`\n}\n\n\/\/ fetch is page acquisition function\nvar fetch = func(URL string, options interface{}) ([]byte, error) {\n\tvar body []byte\n\n\tres, err := http.Get(URL)\n\tif err != nil {\n\t\treturn body, err\n\t}\n\tdefer res.Body.Close()\n\n\treturn ioutil.ReadAll(res.Body)\n}\n\n\/\/ Time interval to be used in Index.get\nvar interval = time.Second\n\n\/\/ Get sitemap data from URL\nfunc Get(URL string, options interface{}) (Sitemap, error) {\n\tdata, err := fetch(URL, options)\n\tif err != nil {\n\t\treturn Sitemap{}, err\n\t}\n\n\tidx, idxErr := ParseIndex(data)\n\tsmap, smapErr := Parse(data)\n\n\tif idxErr != nil && smapErr != nil {\n\t\treturn Sitemap{}, errors.New(\"URL is not a sitemap or sitemapindex\")\n\t} else if idxErr != nil {\n\t\treturn smap, nil\n\t}\n\n\tsmap, err = idx.get(data, options)\n\tif err != nil {\n\t\treturn Sitemap{}, err\n\t}\n\n\treturn smap, nil\n}\n\n\/\/ Get Sitemap data from sitemapindex file\nfunc (s *Index) get(data []byte, options interface{}) (Sitemap, error) {\n\tidx, err := ParseIndex(data)\n\tif err != nil {\n\t\treturn Sitemap{}, err\n\t}\n\n\tvar smap Sitemap\n\tfor _, s := range idx.Sitemap {\n\t\ttime.Sleep(interval)\n\t\tdata, err := fetch(s.Loc, options)\n\t\tif err != nil {\n\t\t\treturn smap, err\n\t\t}\n\n\t\terr = xml.Unmarshal(data, &smap)\n\t\tif err != nil {\n\t\t\treturn smap, err\n\t\t}\n\t}\n\n\treturn smap, err\n}\n\n\/\/ Parse create Sitemap data from text\nfunc Parse(data []byte) (smap Sitemap, err error) {\n\terr = xml.Unmarshal(data, &smap)\n\treturn\n}\n\n\/\/ ParseIndex create Index data from text\nfunc ParseIndex(data []byte) (idx Index, err error) {\n\terr = xml.Unmarshal(data, &idx)\n\treturn\n}\n\n\/\/ SetInterval change Time interval to be used in Index.get\nfunc SetInterval(time time.Duration) {\n\tinterval = time\n}\n\n\/\/ SetFetch change fetch closure\nfunc SetFetch(f func(URL string, options interface{}) ([]byte, error)) {\n\tfetch = f\n}\n<|endoftext|>"} {"text":"<commit_before>package local \/\/ import \"github.com\/docker\/docker\/volume\/local\"\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/docker\/docker\/pkg\/idtools\"\n\t\"github.com\/docker\/docker\/pkg\/mount\"\n)\n\nfunc TestGetAddress(t *testing.T) {\n\tcases := map[string]string{\n\t\t\"addr=11.11.11.1\": \"11.11.11.1\",\n\t\t\" \": \"\",\n\t\t\"addr=\": \"\",\n\t\t\"addr=2001:db8::68\": \"2001:db8::68\",\n\t}\n\tfor name, success := range cases {\n\t\tv := getAddress(name)\n\t\tif v != success {\n\t\t\tt.Errorf(\"Test case failed for %s actual: %s expected : %s\", name, v, success)\n\t\t}\n\t}\n\n}\n\nfunc TestRemove(t *testing.T) {\n\t\/\/ TODO Windows: Investigate why this test fails on Windows under CI\n\t\/\/ but passes locally.\n\tif runtime.GOOS == \"windows\" {\n\t\tt.Skip(\"Test failing on Windows CI\")\n\t}\n\trootDir, err := ioutil.TempDir(\"\", \"local-volume-test\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(rootDir)\n\n\tr, err := New(rootDir, idtools.IDPair{UID: 0, GID: 0})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvol, err := r.Create(\"testing\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := r.Remove(vol); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvol, err = r.Create(\"testing2\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := os.RemoveAll(vol.Path()); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := r.Remove(vol); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, err := os.Stat(vol.Path()); err != nil && !os.IsNotExist(err) {\n\t\tt.Fatal(\"volume dir not removed\")\n\t}\n\n\tif l, _ := r.List(); len(l) != 0 {\n\t\tt.Fatal(\"expected there to be no volumes\")\n\t}\n}\n\nfunc TestInitializeWithVolumes(t *testing.T) {\n\trootDir, err := ioutil.TempDir(\"\", \"local-volume-test\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(rootDir)\n\n\tr, err := New(rootDir, idtools.IDPair{UID: 0, GID: 0})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvol, err := r.Create(\"testing\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tr, err = New(rootDir, idtools.IDPair{UID: 0, GID: 0})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tv, err := r.Get(vol.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif v.Path() != vol.Path() {\n\t\tt.Fatal(\"expected to re-initialize root with existing volumes\")\n\t}\n}\n\nfunc TestCreate(t *testing.T) {\n\trootDir, err := ioutil.TempDir(\"\", \"local-volume-test\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(rootDir)\n\n\tr, err := New(rootDir, idtools.IDPair{UID: 0, GID: 0})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcases := map[string]bool{\n\t\t\"name\": true,\n\t\t\"name-with-dash\": true,\n\t\t\"name_with_underscore\": true,\n\t\t\"name\/with\/slash\": false,\n\t\t\"name\/with\/..\/..\/slash\": false,\n\t\t\".\/name\": false,\n\t\t\"..\/name\": false,\n\t\t\".\/\": false,\n\t\t\"..\/\": false,\n\t\t\"~\": false,\n\t\t\".\": false,\n\t\t\"..\": false,\n\t\t\"...\": false,\n\t}\n\n\tfor name, success := range cases {\n\t\tv, err := r.Create(name, nil)\n\t\tif success {\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif v.Name() != name {\n\t\t\t\tt.Fatalf(\"Expected volume with name %s, got %s\", name, v.Name())\n\t\t\t}\n\t\t} else {\n\t\t\tif err == nil {\n\t\t\t\tt.Fatalf(\"Expected error creating volume with name %s, got nil\", name)\n\t\t\t}\n\t\t}\n\t}\n\n\tr, err = New(rootDir, idtools.IDPair{UID: 0, GID: 0})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestValidateName(t *testing.T) {\n\tr := &Root{}\n\tnames := map[string]bool{\n\t\t\"x\": false,\n\t\t\"\/testvol\": false,\n\t\t\"thing.d\": true,\n\t\t\"hello-world\": true,\n\t\t\".\/hello\": false,\n\t\t\".hello\": false,\n\t}\n\n\tfor vol, expected := range names {\n\t\terr := r.validateName(vol)\n\t\tif expected && err != nil {\n\t\t\tt.Fatalf(\"expected %s to be valid got %v\", vol, err)\n\t\t}\n\t\tif !expected && err == nil {\n\t\t\tt.Fatalf(\"expected %s to be invalid\", vol)\n\t\t}\n\t}\n}\n\nfunc TestCreateWithOpts(t *testing.T) {\n\tif runtime.GOOS == \"windows\" {\n\t\tt.Skip()\n\t}\n\trootDir, err := ioutil.TempDir(\"\", \"local-volume-test\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(rootDir)\n\n\tr, err := New(rootDir, idtools.IDPair{UID: 0, GID: 0})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, err := r.Create(\"test\", map[string]string{\"invalidopt\": \"notsupported\"}); err == nil {\n\t\tt.Fatal(\"expected invalid opt to cause error\")\n\t}\n\n\tvol, err := r.Create(\"test\", map[string]string{\"device\": \"tmpfs\", \"type\": \"tmpfs\", \"o\": \"size=1m,uid=1000\"})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tv := vol.(*localVolume)\n\n\tdir, err := v.Mount(\"1234\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer func() {\n\t\tif err := v.Unmount(\"1234\"); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}()\n\n\tmountInfos, err := mount.GetMounts(nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar found bool\n\tfor _, info := range mountInfos {\n\t\tif info.Mountpoint == dir {\n\t\t\tfound = true\n\t\t\tif info.Fstype != \"tmpfs\" {\n\t\t\t\tt.Fatalf(\"expected tmpfs mount, got %q\", info.Fstype)\n\t\t\t}\n\t\t\tif info.Source != \"tmpfs\" {\n\t\t\t\tt.Fatalf(\"expected tmpfs mount, got %q\", info.Source)\n\t\t\t}\n\t\t\tif !strings.Contains(info.VfsOpts, \"uid=1000\") {\n\t\t\t\tt.Fatalf(\"expected mount info to have uid=1000: %q\", info.VfsOpts)\n\t\t\t}\n\t\t\tif !strings.Contains(info.VfsOpts, \"size=1024k\") {\n\t\t\t\tt.Fatalf(\"expected mount info to have size=1024k: %q\", info.VfsOpts)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !found {\n\t\tt.Fatal(\"mount not found\")\n\t}\n\n\tif v.active.count != 1 {\n\t\tt.Fatalf(\"Expected active mount count to be 1, got %d\", v.active.count)\n\t}\n\n\t\/\/ test double mount\n\tif _, err := v.Mount(\"1234\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif v.active.count != 2 {\n\t\tt.Fatalf(\"Expected active mount count to be 2, got %d\", v.active.count)\n\t}\n\n\tif err := v.Unmount(\"1234\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif v.active.count != 1 {\n\t\tt.Fatalf(\"Expected active mount count to be 1, got %d\", v.active.count)\n\t}\n\n\tmounted, err := mount.Mounted(v.path)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !mounted {\n\t\tt.Fatal(\"expected mount to still be active\")\n\t}\n\n\tr, err = New(rootDir, idtools.IDPair{UID: 0, GID: 0})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tv2, exists := r.volumes[\"test\"]\n\tif !exists {\n\t\tt.Fatal(\"missing volume on restart\")\n\t}\n\n\tif !reflect.DeepEqual(v.opts, v2.opts) {\n\t\tt.Fatal(\"missing volume options on restart\")\n\t}\n}\n\nfunc TestRealodNoOpts(t *testing.T) {\n\trootDir, err := ioutil.TempDir(\"\", \"volume-test-reload-no-opts\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(rootDir)\n\n\tr, err := New(rootDir, idtools.IDPair{UID: 0, GID: 0})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, err := r.Create(\"test1\", nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := r.Create(\"test2\", nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ make sure a file with `null` (.e.g. empty opts map from older daemon) is ok\n\tif err := ioutil.WriteFile(filepath.Join(rootDir, \"test2\"), []byte(\"null\"), 600); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, err := r.Create(\"test3\", nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ make sure an empty opts file doesn't break us too\n\tif err := ioutil.WriteFile(filepath.Join(rootDir, \"test3\"), nil, 600); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, err := r.Create(\"test4\", map[string]string{}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tr, err = New(rootDir, idtools.IDPair{UID: 0, GID: 0})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor _, name := range []string{\"test1\", \"test2\", \"test3\", \"test4\"} {\n\t\tv, err := r.Get(name)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tlv, ok := v.(*localVolume)\n\t\tif !ok {\n\t\t\tt.Fatalf(\"expected *localVolume got: %v\", reflect.TypeOf(v))\n\t\t}\n\t\tif lv.opts != nil {\n\t\t\tt.Fatalf(\"expected opts to be nil, got: %v\", lv.opts)\n\t\t}\n\t\tif _, err := lv.Mount(\"1234\"); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n<commit_msg>volume\/local\/TestCreateWithOpts(): use mount filter<commit_after>package local \/\/ import \"github.com\/docker\/docker\/volume\/local\"\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/docker\/docker\/pkg\/idtools\"\n\t\"github.com\/docker\/docker\/pkg\/mount\"\n)\n\nfunc TestGetAddress(t *testing.T) {\n\tcases := map[string]string{\n\t\t\"addr=11.11.11.1\": \"11.11.11.1\",\n\t\t\" \": \"\",\n\t\t\"addr=\": \"\",\n\t\t\"addr=2001:db8::68\": \"2001:db8::68\",\n\t}\n\tfor name, success := range cases {\n\t\tv := getAddress(name)\n\t\tif v != success {\n\t\t\tt.Errorf(\"Test case failed for %s actual: %s expected : %s\", name, v, success)\n\t\t}\n\t}\n\n}\n\nfunc TestRemove(t *testing.T) {\n\t\/\/ TODO Windows: Investigate why this test fails on Windows under CI\n\t\/\/ but passes locally.\n\tif runtime.GOOS == \"windows\" {\n\t\tt.Skip(\"Test failing on Windows CI\")\n\t}\n\trootDir, err := ioutil.TempDir(\"\", \"local-volume-test\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(rootDir)\n\n\tr, err := New(rootDir, idtools.IDPair{UID: 0, GID: 0})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvol, err := r.Create(\"testing\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := r.Remove(vol); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvol, err = r.Create(\"testing2\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := os.RemoveAll(vol.Path()); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := r.Remove(vol); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, err := os.Stat(vol.Path()); err != nil && !os.IsNotExist(err) {\n\t\tt.Fatal(\"volume dir not removed\")\n\t}\n\n\tif l, _ := r.List(); len(l) != 0 {\n\t\tt.Fatal(\"expected there to be no volumes\")\n\t}\n}\n\nfunc TestInitializeWithVolumes(t *testing.T) {\n\trootDir, err := ioutil.TempDir(\"\", \"local-volume-test\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(rootDir)\n\n\tr, err := New(rootDir, idtools.IDPair{UID: 0, GID: 0})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvol, err := r.Create(\"testing\", nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tr, err = New(rootDir, idtools.IDPair{UID: 0, GID: 0})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tv, err := r.Get(vol.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif v.Path() != vol.Path() {\n\t\tt.Fatal(\"expected to re-initialize root with existing volumes\")\n\t}\n}\n\nfunc TestCreate(t *testing.T) {\n\trootDir, err := ioutil.TempDir(\"\", \"local-volume-test\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(rootDir)\n\n\tr, err := New(rootDir, idtools.IDPair{UID: 0, GID: 0})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tcases := map[string]bool{\n\t\t\"name\": true,\n\t\t\"name-with-dash\": true,\n\t\t\"name_with_underscore\": true,\n\t\t\"name\/with\/slash\": false,\n\t\t\"name\/with\/..\/..\/slash\": false,\n\t\t\".\/name\": false,\n\t\t\"..\/name\": false,\n\t\t\".\/\": false,\n\t\t\"..\/\": false,\n\t\t\"~\": false,\n\t\t\".\": false,\n\t\t\"..\": false,\n\t\t\"...\": false,\n\t}\n\n\tfor name, success := range cases {\n\t\tv, err := r.Create(name, nil)\n\t\tif success {\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif v.Name() != name {\n\t\t\t\tt.Fatalf(\"Expected volume with name %s, got %s\", name, v.Name())\n\t\t\t}\n\t\t} else {\n\t\t\tif err == nil {\n\t\t\t\tt.Fatalf(\"Expected error creating volume with name %s, got nil\", name)\n\t\t\t}\n\t\t}\n\t}\n\n\tr, err = New(rootDir, idtools.IDPair{UID: 0, GID: 0})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestValidateName(t *testing.T) {\n\tr := &Root{}\n\tnames := map[string]bool{\n\t\t\"x\": false,\n\t\t\"\/testvol\": false,\n\t\t\"thing.d\": true,\n\t\t\"hello-world\": true,\n\t\t\".\/hello\": false,\n\t\t\".hello\": false,\n\t}\n\n\tfor vol, expected := range names {\n\t\terr := r.validateName(vol)\n\t\tif expected && err != nil {\n\t\t\tt.Fatalf(\"expected %s to be valid got %v\", vol, err)\n\t\t}\n\t\tif !expected && err == nil {\n\t\t\tt.Fatalf(\"expected %s to be invalid\", vol)\n\t\t}\n\t}\n}\n\nfunc TestCreateWithOpts(t *testing.T) {\n\tif runtime.GOOS == \"windows\" {\n\t\tt.Skip()\n\t}\n\tif os.Getuid() != 0 {\n\t\tt.Skip(\"root required\")\n\t}\n\n\trootDir, err := ioutil.TempDir(\"\", \"local-volume-test\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(rootDir)\n\n\tr, err := New(rootDir, idtools.IDPair{UID: 0, GID: 0})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, err := r.Create(\"test\", map[string]string{\"invalidopt\": \"notsupported\"}); err == nil {\n\t\tt.Fatal(\"expected invalid opt to cause error\")\n\t}\n\n\tvol, err := r.Create(\"test\", map[string]string{\"device\": \"tmpfs\", \"type\": \"tmpfs\", \"o\": \"size=1m,uid=1000\"})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tv := vol.(*localVolume)\n\n\tdir, err := v.Mount(\"1234\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer func() {\n\t\tif err := v.Unmount(\"1234\"); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}()\n\n\tmountInfos, err := mount.GetMounts(mount.SingleEntryFilter(dir))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif len(mountInfos) != 1 {\n\t\tt.Fatalf(\"expected 1 mount, found %d: %+v\", len(mountInfos), mountInfos)\n\t}\n\n\tinfo := mountInfos[0]\n\tt.Logf(\"%+v\", info)\n\tif info.Fstype != \"tmpfs\" {\n\t\tt.Fatalf(\"expected tmpfs mount, got %q\", info.Fstype)\n\t}\n\tif info.Source != \"tmpfs\" {\n\t\tt.Fatalf(\"expected tmpfs mount, got %q\", info.Source)\n\t}\n\tif !strings.Contains(info.VfsOpts, \"uid=1000\") {\n\t\tt.Fatalf(\"expected mount info to have uid=1000: %q\", info.VfsOpts)\n\t}\n\tif !strings.Contains(info.VfsOpts, \"size=1024k\") {\n\t\tt.Fatalf(\"expected mount info to have size=1024k: %q\", info.VfsOpts)\n\t}\n\n\tif v.active.count != 1 {\n\t\tt.Fatalf(\"Expected active mount count to be 1, got %d\", v.active.count)\n\t}\n\n\t\/\/ test double mount\n\tif _, err := v.Mount(\"1234\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif v.active.count != 2 {\n\t\tt.Fatalf(\"Expected active mount count to be 2, got %d\", v.active.count)\n\t}\n\n\tif err := v.Unmount(\"1234\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif v.active.count != 1 {\n\t\tt.Fatalf(\"Expected active mount count to be 1, got %d\", v.active.count)\n\t}\n\n\tmounted, err := mount.Mounted(v.path)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !mounted {\n\t\tt.Fatal(\"expected mount to still be active\")\n\t}\n\n\tr, err = New(rootDir, idtools.IDPair{UID: 0, GID: 0})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tv2, exists := r.volumes[\"test\"]\n\tif !exists {\n\t\tt.Fatal(\"missing volume on restart\")\n\t}\n\n\tif !reflect.DeepEqual(v.opts, v2.opts) {\n\t\tt.Fatal(\"missing volume options on restart\")\n\t}\n}\n\nfunc TestRealodNoOpts(t *testing.T) {\n\trootDir, err := ioutil.TempDir(\"\", \"volume-test-reload-no-opts\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(rootDir)\n\n\tr, err := New(rootDir, idtools.IDPair{UID: 0, GID: 0})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, err := r.Create(\"test1\", nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif _, err := r.Create(\"test2\", nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ make sure a file with `null` (.e.g. empty opts map from older daemon) is ok\n\tif err := ioutil.WriteFile(filepath.Join(rootDir, \"test2\"), []byte(\"null\"), 600); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, err := r.Create(\"test3\", nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ make sure an empty opts file doesn't break us too\n\tif err := ioutil.WriteFile(filepath.Join(rootDir, \"test3\"), nil, 600); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif _, err := r.Create(\"test4\", map[string]string{}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tr, err = New(rootDir, idtools.IDPair{UID: 0, GID: 0})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor _, name := range []string{\"test1\", \"test2\", \"test3\", \"test4\"} {\n\t\tv, err := r.Get(name)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tlv, ok := v.(*localVolume)\n\t\tif !ok {\n\t\t\tt.Fatalf(\"expected *localVolume got: %v\", reflect.TypeOf(v))\n\t\t}\n\t\tif lv.opts != nil {\n\t\t\tt.Fatalf(\"expected opts to be nil, got: %v\", lv.opts)\n\t\t}\n\t\tif _, err := lv.Mount(\"1234\"); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package vspherecli\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/enaml-ops\/enaml\"\n\t\"github.com\/enaml-ops\/omg-cli\/plugins\/products\/bosh-init\"\n\t\"github.com\/enaml-ops\/omg-cli\/utils\"\n)\n\n\/\/ GetFlags returns the available CLI flags\nfunc GetFlags() []cli.Flag {\n\tboshdefaults := boshinit.GetVSphereDefaults()\n\n\tboshFlags := boshinit.BoshFlags(boshdefaults)\n\tvsphereFlags := []cli.Flag{\n\t\t\/\/ vsphere specific flags\n\t\tcli.StringFlag{Name: \"vsphere-address\", Value: \"\", Usage: \"IP of the vCenter\"},\n\t\tcli.StringFlag{Name: \"vsphere-user\", Value: \"\", Usage: \"vSphere user\"},\n\t\tcli.StringFlag{Name: \"vsphere-password\", Value: \"\", Usage: \"vSphere user's password\"},\n\t\tcli.StringFlag{Name: \"vsphere-datacenter-name\", Value: \"\", Usage: \"name of the datacenter the Director will use for VM creation\"},\n\t\tcli.StringFlag{Name: \"vsphere-vm-folder\", Value: \"\", Usage: \"name of the folder created to hold VMs\"},\n\t\tcli.StringFlag{Name: \"vsphere-template-folder\", Value: \"\", Usage: \"the name of the folder created to hold stemcells\"},\n\t\tcli.StringFlag{Name: \"vsphere-datastore\", Value: \"\", Usage: \"name of the datastore the Director will use for storing VMs\"},\n\t\tcli.StringFlag{Name: \"vsphere-disk-path\", Value: \"\", Usage: \"name of the VMs folder, disk folder will be automatically created in the chosen datastore.\"},\n\t\tcli.StringSliceFlag{Name: \"vsphere-clusters\", Value: &cli.StringSlice{\"\"}, Usage: \"one or more vSphere datacenter cluster names\"},\n\t\tcli.StringFlag{Name: \"vsphere-resource-pool\", Value: \"\", Usage: \"Name of resource pool for vsphere cluster\"},\n\t\t\/\/ vsphere subnet1 flags\n\t\tcli.StringFlag{Name: \"vsphere-subnet1-name\", Usage: \"name of the vSphere network for subnet1\"},\n\t\tcli.StringFlag{Name: \"vsphere-subnet1-range\", Usage: \"CIDR range for subnet1\"},\n\t\tcli.StringFlag{Name: \"vsphere-subnet1-gateway\", Usage: \"IP of the default gateway for subnet1\"},\n\t\tcli.StringSliceFlag{Name: \"vsphere-subnet1-dns\", Usage: \"IP of the DNS server(s) for subnet1\"},\n\t}\n\tfor _, flag := range vsphereFlags {\n\t\tboshFlags = append(boshFlags, flag)\n\t}\n\treturn boshFlags\n}\n\n\/\/ GetAction returns a function action that can be registered with the CLI\nfunc GetAction(boshInitDeploy func(string)) func(c *cli.Context) error {\n\treturn func(c *cli.Context) (e error) {\n\t\tvar boshBase *boshinit.BoshBase\n\t\tif boshBase, e = boshinit.NewBoshBase(c); e != nil {\n\t\t\treturn\n\t\t}\n\t\tutils.CheckRequired(c, \"vsphere-address\", \"vsphere-user\", \"vsphere-password\", \"vsphere-datacenter-name\",\n\t\t\t\"vsphere-vm-folder\", \"vsphere-template-folder\", \"vsphere-datastore\", \"vsphere-disk-path\",\n\t\t\t\"vsphere-clusters\", \"vsphere-resource-pool\", \"vsphere-subnet1-name\", \"vsphere-subnet1-range\", \"vsphere-subnet1-range\", \"vsphere-subnet1-dns\")\n\n\t\tmanifest := boshinit.NewVSphereBosh(boshinit.VSphereInitConfig{\n\t\t\t\/\/ vsphere specific\n\t\t\tVSphereAddress: c.String(\"vsphere-address\"),\n\t\t\tVSphereUser: c.String(\"vsphere-user\"),\n\t\t\tVSpherePassword: c.String(\"vsphere-password\"),\n\t\t\tVSphereDatacenterName: c.String(\"vsphere-datacenter-name\"),\n\t\t\tVSphereVMFolder: c.String(\"vsphere-vm-folder\"),\n\t\t\tVSphereTemplateFolder: c.String(\"vsphere-template-folder\"),\n\t\t\tVSphereDataStore: c.String(\"vsphere-datastore\"),\n\t\t\tVSphereDiskPath: c.String(\"vsphere-disk-path\"),\n\t\t\tVSphereClusters: utils.ClearDefaultStringSliceValue(c.StringSlice(\"vsphere-clusters\")...),\n\t\t\tVSphereResourcePool: c.String(\"vsphere-resource-pool\"),\n\t\t\tVSphereNetworks: []boshinit.Network{boshinit.Network{\n\t\t\t\tName: c.String(\"vsphere-subnet1-name\"),\n\t\t\t\tRange: c.String(\"vsphere-subnet1-range\"),\n\t\t\t\tGateway: c.String(\"vsphere-subnet1-range\"),\n\t\t\t\tDNS: utils.ClearDefaultStringSliceValue(c.StringSlice(\"vsphere-subnet1-dns\")...),\n\t\t\t}},\n\t\t}, boshBase)\n\n\t\tif yamlString, err := enaml.Paint(manifest); err == nil {\n\n\t\t\tif c.Bool(\"print-manifest\") {\n\t\t\t\tfmt.Println(yamlString)\n\n\t\t\t} else {\n\t\t\t\tutils.DeployYaml(yamlString, boshInitDeploy)\n\t\t\t}\n\t\t} else {\n\t\t\te = err\n\t\t}\n\t\treturn\n\t}\n}\n<commit_msg>fixed bug with typo in vSphere bosh-init<commit_after>package vspherecli\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/codegangsta\/cli\"\n\t\"github.com\/enaml-ops\/enaml\"\n\t\"github.com\/enaml-ops\/omg-cli\/plugins\/products\/bosh-init\"\n\t\"github.com\/enaml-ops\/omg-cli\/utils\"\n)\n\n\/\/ GetFlags returns the available CLI flags\nfunc GetFlags() []cli.Flag {\n\tboshdefaults := boshinit.GetVSphereDefaults()\n\n\tboshFlags := boshinit.BoshFlags(boshdefaults)\n\tvsphereFlags := []cli.Flag{\n\t\t\/\/ vsphere specific flags\n\t\tcli.StringFlag{Name: \"vsphere-address\", Value: \"\", Usage: \"IP of the vCenter\"},\n\t\tcli.StringFlag{Name: \"vsphere-user\", Value: \"\", Usage: \"vSphere user\"},\n\t\tcli.StringFlag{Name: \"vsphere-password\", Value: \"\", Usage: \"vSphere user's password\"},\n\t\tcli.StringFlag{Name: \"vsphere-datacenter-name\", Value: \"\", Usage: \"name of the datacenter the Director will use for VM creation\"},\n\t\tcli.StringFlag{Name: \"vsphere-vm-folder\", Value: \"\", Usage: \"name of the folder created to hold VMs\"},\n\t\tcli.StringFlag{Name: \"vsphere-template-folder\", Value: \"\", Usage: \"the name of the folder created to hold stemcells\"},\n\t\tcli.StringFlag{Name: \"vsphere-datastore\", Value: \"\", Usage: \"name of the datastore the Director will use for storing VMs\"},\n\t\tcli.StringFlag{Name: \"vsphere-disk-path\", Value: \"\", Usage: \"name of the VMs folder, disk folder will be automatically created in the chosen datastore.\"},\n\t\tcli.StringSliceFlag{Name: \"vsphere-clusters\", Value: &cli.StringSlice{\"\"}, Usage: \"one or more vSphere datacenter cluster names\"},\n\t\tcli.StringFlag{Name: \"vsphere-resource-pool\", Value: \"\", Usage: \"Name of resource pool for vsphere cluster\"},\n\t\t\/\/ vsphere subnet1 flags\n\t\tcli.StringFlag{Name: \"vsphere-subnet1-name\", Usage: \"name of the vSphere network for subnet1\"},\n\t\tcli.StringFlag{Name: \"vsphere-subnet1-range\", Usage: \"CIDR range for subnet1\"},\n\t\tcli.StringFlag{Name: \"vsphere-subnet1-gateway\", Usage: \"IP of the default gateway for subnet1\"},\n\t\tcli.StringSliceFlag{Name: \"vsphere-subnet1-dns\", Usage: \"IP of the DNS server(s) for subnet1\"},\n\t}\n\tfor _, flag := range vsphereFlags {\n\t\tboshFlags = append(boshFlags, flag)\n\t}\n\treturn boshFlags\n}\n\n\/\/ GetAction returns a function action that can be registered with the CLI\nfunc GetAction(boshInitDeploy func(string)) func(c *cli.Context) error {\n\treturn func(c *cli.Context) (e error) {\n\t\tvar boshBase *boshinit.BoshBase\n\t\tif boshBase, e = boshinit.NewBoshBase(c); e != nil {\n\t\t\treturn\n\t\t}\n\t\tutils.CheckRequired(c, \"vsphere-address\", \"vsphere-user\", \"vsphere-password\", \"vsphere-datacenter-name\",\n\t\t\t\"vsphere-vm-folder\", \"vsphere-template-folder\", \"vsphere-datastore\", \"vsphere-disk-path\",\n\t\t\t\"vsphere-clusters\", \"vsphere-resource-pool\", \"vsphere-subnet1-name\", \"vsphere-subnet1-range\", \"vsphere-subnet1-range\", \"vsphere-subnet1-dns\")\n\n\t\tmanifest := boshinit.NewVSphereBosh(boshinit.VSphereInitConfig{\n\t\t\t\/\/ vsphere specific\n\t\t\tVSphereAddress: c.String(\"vsphere-address\"),\n\t\t\tVSphereUser: c.String(\"vsphere-user\"),\n\t\t\tVSpherePassword: c.String(\"vsphere-password\"),\n\t\t\tVSphereDatacenterName: c.String(\"vsphere-datacenter-name\"),\n\t\t\tVSphereVMFolder: c.String(\"vsphere-vm-folder\"),\n\t\t\tVSphereTemplateFolder: c.String(\"vsphere-template-folder\"),\n\t\t\tVSphereDataStore: c.String(\"vsphere-datastore\"),\n\t\t\tVSphereDiskPath: c.String(\"vsphere-disk-path\"),\n\t\t\tVSphereClusters: utils.ClearDefaultStringSliceValue(c.StringSlice(\"vsphere-clusters\")...),\n\t\t\tVSphereResourcePool: c.String(\"vsphere-resource-pool\"),\n\t\t\tVSphereNetworks: []boshinit.Network{boshinit.Network{\n\t\t\t\tName: c.String(\"vsphere-subnet1-name\"),\n\t\t\t\tRange: c.String(\"vsphere-subnet1-range\"),\n\t\t\t\tGateway: c.String(\"vsphere-subnet1-gateway\"),\n\t\t\t\tDNS: utils.ClearDefaultStringSliceValue(c.StringSlice(\"vsphere-subnet1-dns\")...),\n\t\t\t}},\n\t\t}, boshBase)\n\n\t\tif yamlString, err := enaml.Paint(manifest); err == nil {\n\n\t\t\tif c.Bool(\"print-manifest\") {\n\t\t\t\tfmt.Println(yamlString)\n\n\t\t\t} else {\n\t\t\t\tutils.DeployYaml(yamlString, boshInitDeploy)\n\t\t\t}\n\t\t} else {\n\t\t\te = err\n\t\t}\n\t\treturn\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package instances\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/cozy\/cozy-stack\/pkg\/couchdb\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/instance\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/logger\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/oauth\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/permissions\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/utils\"\n\t\"github.com\/cozy\/cozy-stack\/web\/jsonapi\"\n\t\"github.com\/cozy\/echo\"\n)\n\ntype apiInstance struct {\n\t*instance.Instance\n}\n\nfunc (i *apiInstance) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(i.Instance)\n}\n\n\/\/ Links is used to generate a JSON-API link for the instance\nfunc (i *apiInstance) Links() *jsonapi.LinksList {\n\treturn &jsonapi.LinksList{Self: \"\/instances\/\" + i.Instance.DocID}\n}\n\n\/\/ Relationships is used to generate the content relationship in JSON-API format\nfunc (i *apiInstance) Relationships() jsonapi.RelationshipMap {\n\treturn jsonapi.RelationshipMap{}\n}\n\n\/\/ Included is part of the jsonapi.Object interface\nfunc (i *apiInstance) Included() []jsonapi.Object {\n\treturn nil\n}\n\nfunc createHandler(c echo.Context) error {\n\tvar diskQuota int64\n\tif c.QueryParam(\"DiskQuota\") != \"\" {\n\t\tvar err error\n\t\tdiskQuota, err = strconv.ParseInt(c.QueryParam(\"DiskQuota\"), 10, 64)\n\t\tif err != nil {\n\t\t\treturn wrapError(err)\n\t\t}\n\t}\n\tvar settings couchdb.JSONDoc\n\tsettings.M = make(map[string]interface{})\n\tfor _, setting := range strings.Split(c.QueryParam(\"Settings\"), \",\") {\n\t\tif parts := strings.SplitN(setting, \":\", 2); len(parts) == 2 {\n\t\t\tsettings.M[parts[0]] = parts[1]\n\t\t}\n\t}\n\tif tz := c.QueryParam(\"Timezone\"); tz != \"\" {\n\t\tsettings.M[\"tz\"] = tz\n\t}\n\tif email := c.QueryParam(\"Email\"); email != \"\" {\n\t\tsettings.M[\"email\"] = email\n\t}\n\tif name := c.QueryParam(\"PublicName\"); name != \"\" {\n\t\tsettings.M[\"public_name\"] = name\n\t}\n\tin, err := instance.Create(&instance.Options{\n\t\tDomain: c.QueryParam(\"Domain\"),\n\t\tLocale: c.QueryParam(\"Locale\"),\n\t\tDiskQuota: diskQuota,\n\t\tSettings: settings,\n\t\tApps: utils.SplitTrimString(c.QueryParam(\"Apps\"), \",\"),\n\t\tDev: (c.QueryParam(\"Dev\") == \"true\"),\n\t})\n\tif err != nil {\n\t\treturn wrapError(err)\n\t}\n\tin.OAuthSecret = nil\n\tin.SessionSecret = nil\n\tin.PassphraseHash = nil\n\tpass := c.QueryParam(\"Passphrase\")\n\tif pass != \"\" {\n\t\tif err = in.RegisterPassphrase([]byte(pass), in.RegisterToken); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn jsonapi.Data(c, http.StatusCreated, &apiInstance{in}, nil)\n}\n\nfunc showHandler(c echo.Context) error {\n\tdomain := c.Param(\"domain\")\n\ti, err := instance.Get(domain)\n\tif err != nil {\n\t\treturn wrapError(err)\n\t}\n\treturn jsonapi.Data(c, http.StatusOK, &apiInstance{i}, nil)\n}\n\nfunc modifyHandler(c echo.Context) error {\n\tdomain := c.Param(\"domain\")\n\ti, err := instance.Get(domain)\n\tif err != nil {\n\t\treturn wrapError(err)\n\t}\n\tvar shouldUpdate bool\n\tif quota := c.QueryParam(\"DiskQuota\"); quota != \"\" {\n\t\tvar diskQuota int64\n\t\tdiskQuota, err = strconv.ParseInt(quota, 10, 64)\n\t\tif err != nil {\n\t\t\treturn wrapError(err)\n\t\t}\n\t\ti.BytesDiskQuota = diskQuota\n\t\tshouldUpdate = true\n\t}\n\tif locale := c.QueryParam(\"Locale\"); locale != \"\" {\n\t\ti.Locale = locale\n\t\tshouldUpdate = true\n\t}\n\tif shouldUpdate {\n\t\tif err = instance.Update(i); err != nil {\n\t\t\treturn wrapError(err)\n\t\t}\n\t}\n\tif debug, err := strconv.ParseBool(c.QueryParam(\"Debug\")); err == nil {\n\t\tif debug {\n\t\t\terr = logger.AddDebugDomain(domain)\n\t\t} else {\n\t\t\terr = logger.RemoveDebugDomain(domain)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn wrapError(err)\n\t\t}\n\t}\n\treturn jsonapi.Data(c, http.StatusOK, &apiInstance{i}, nil)\n}\n\nfunc listHandler(c echo.Context) error {\n\tis, err := instance.List()\n\tif err != nil {\n\t\tif couchdb.IsNoDatabaseError(err) {\n\t\t\treturn jsonapi.DataList(c, http.StatusOK, nil, nil)\n\t\t}\n\t\treturn wrapError(err)\n\t}\n\n\tobjs := make([]jsonapi.Object, len(is))\n\tfor i, in := range is {\n\t\tobjs[i] = &apiInstance{in}\n\t}\n\n\treturn jsonapi.DataList(c, http.StatusOK, objs, nil)\n}\n\nfunc deleteHandler(c echo.Context) error {\n\tdomain := c.Param(\"domain\")\n\terr := instance.Destroy(domain)\n\tif err != nil {\n\t\treturn wrapError(err)\n\t}\n\treturn c.NoContent(http.StatusNoContent)\n}\n\nfunc createToken(c echo.Context) error {\n\tdomain := c.QueryParam(\"Domain\")\n\taudience := c.QueryParam(\"Audience\")\n\tscope := c.QueryParam(\"Scope\")\n\tsubject := c.QueryParam(\"Subject\")\n\texpire := c.QueryParam(\"Expire\")\n\tin, err := instance.Get(domain)\n\tif err != nil {\n\t\treturn wrapError(err)\n\t}\n\tswitch audience {\n\tcase \"app\":\n\t\taudience = permissions.AppAudience\n\tcase \"konn\", \"konnector\":\n\t\taudience = permissions.KonnectorAudience\n\tcase \"access-token\":\n\t\taudience = permissions.AccessTokenAudience\n\tcase \"cli\":\n\t\taudience = permissions.CLIAudience\n\tdefault:\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, \"Unknown audience %s\", audience)\n\t}\n\tissuedAt := time.Now()\n\tif expire != \"\" && expire != \"0s\" {\n\t\tvar duration time.Duration\n\t\tif duration, err = time.ParseDuration(expire); err == nil {\n\t\t\tissuedAt = issuedAt.Add(duration - permissions.TokenValidityDuration)\n\t\t}\n\t}\n\ttoken, err := in.MakeJWT(audience, subject, scope, issuedAt)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.String(http.StatusOK, token)\n}\n\nfunc registerClient(c echo.Context) error {\n\tin, err := instance.Get(c.QueryParam(\"Domain\"))\n\tif err != nil {\n\t\treturn wrapError(err)\n\t}\n\tclient := oauth.Client{\n\t\tRedirectURIs: []string{c.QueryParam(\"RedirectURI\")},\n\t\tClientName: c.QueryParam(\"ClientName\"),\n\t\tSoftwareID: c.QueryParam(\"SoftwareID\"),\n\t}\n\tif regErr := client.Create(in); regErr != nil {\n\t\treturn c.String(http.StatusBadRequest, regErr.Description)\n\t}\n\treturn c.String(http.StatusOK, client.ClientID)\n}\n\nfunc wrapError(err error) error {\n\tswitch err {\n\tcase instance.ErrNotFound:\n\t\treturn jsonapi.NotFound(err)\n\tcase instance.ErrExists:\n\t\treturn jsonapi.Conflict(err)\n\tcase instance.ErrIllegalDomain:\n\t\treturn jsonapi.InvalidParameter(\"domain\", err)\n\tcase instance.ErrMissingToken:\n\t\treturn jsonapi.BadRequest(err)\n\tcase instance.ErrInvalidToken:\n\t\treturn jsonapi.BadRequest(err)\n\tcase instance.ErrMissingPassphrase:\n\t\treturn jsonapi.BadRequest(err)\n\tcase instance.ErrInvalidPassphrase:\n\t\treturn jsonapi.BadRequest(err)\n\t}\n\treturn err\n}\n\n\/\/ Routes sets the routing for the instances service\nfunc Routes(router *echo.Group) {\n\trouter.GET(\"\", listHandler)\n\trouter.POST(\"\", createHandler)\n\trouter.GET(\"\/:domain\", showHandler)\n\trouter.PATCH(\"\/:domain\", modifyHandler)\n\trouter.DELETE(\"\/:domain\", deleteHandler)\n\trouter.POST(\"\/token\", createToken)\n\trouter.POST(\"\/oauth_client\", registerClient)\n}\n<commit_msg>Continue to filter out secrets from admin api<commit_after>package instances\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/cozy\/cozy-stack\/pkg\/couchdb\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/instance\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/logger\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/oauth\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/permissions\"\n\t\"github.com\/cozy\/cozy-stack\/pkg\/utils\"\n\t\"github.com\/cozy\/cozy-stack\/web\/jsonapi\"\n\t\"github.com\/cozy\/echo\"\n)\n\ntype apiInstance struct {\n\t*instance.Instance\n}\n\nfunc (i *apiInstance) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(i.Instance)\n}\n\n\/\/ Links is used to generate a JSON-API link for the instance\nfunc (i *apiInstance) Links() *jsonapi.LinksList {\n\treturn &jsonapi.LinksList{Self: \"\/instances\/\" + i.Instance.DocID}\n}\n\n\/\/ Relationships is used to generate the content relationship in JSON-API format\nfunc (i *apiInstance) Relationships() jsonapi.RelationshipMap {\n\treturn jsonapi.RelationshipMap{}\n}\n\n\/\/ Included is part of the jsonapi.Object interface\nfunc (i *apiInstance) Included() []jsonapi.Object {\n\treturn nil\n}\n\nfunc createHandler(c echo.Context) error {\n\tvar diskQuota int64\n\tif c.QueryParam(\"DiskQuota\") != \"\" {\n\t\tvar err error\n\t\tdiskQuota, err = strconv.ParseInt(c.QueryParam(\"DiskQuota\"), 10, 64)\n\t\tif err != nil {\n\t\t\treturn wrapError(err)\n\t\t}\n\t}\n\tvar settings couchdb.JSONDoc\n\tsettings.M = make(map[string]interface{})\n\tfor _, setting := range strings.Split(c.QueryParam(\"Settings\"), \",\") {\n\t\tif parts := strings.SplitN(setting, \":\", 2); len(parts) == 2 {\n\t\t\tsettings.M[parts[0]] = parts[1]\n\t\t}\n\t}\n\tif tz := c.QueryParam(\"Timezone\"); tz != \"\" {\n\t\tsettings.M[\"tz\"] = tz\n\t}\n\tif email := c.QueryParam(\"Email\"); email != \"\" {\n\t\tsettings.M[\"email\"] = email\n\t}\n\tif name := c.QueryParam(\"PublicName\"); name != \"\" {\n\t\tsettings.M[\"public_name\"] = name\n\t}\n\tin, err := instance.Create(&instance.Options{\n\t\tDomain: c.QueryParam(\"Domain\"),\n\t\tLocale: c.QueryParam(\"Locale\"),\n\t\tDiskQuota: diskQuota,\n\t\tSettings: settings,\n\t\tApps: utils.SplitTrimString(c.QueryParam(\"Apps\"), \",\"),\n\t\tDev: (c.QueryParam(\"Dev\") == \"true\"),\n\t})\n\tif err != nil {\n\t\treturn wrapError(err)\n\t}\n\tin.OAuthSecret = nil\n\tin.SessionSecret = nil\n\tpass := c.QueryParam(\"Passphrase\")\n\tif pass != \"\" {\n\t\tif err = in.RegisterPassphrase([]byte(pass), in.RegisterToken); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn jsonapi.Data(c, http.StatusCreated, &apiInstance{in}, nil)\n}\n\nfunc showHandler(c echo.Context) error {\n\tdomain := c.Param(\"domain\")\n\ti, err := instance.Get(domain)\n\tif err != nil {\n\t\treturn wrapError(err)\n\t}\n\treturn jsonapi.Data(c, http.StatusOK, &apiInstance{i}, nil)\n}\n\nfunc modifyHandler(c echo.Context) error {\n\tdomain := c.Param(\"domain\")\n\ti, err := instance.Get(domain)\n\tif err != nil {\n\t\treturn wrapError(err)\n\t}\n\tvar shouldUpdate bool\n\tif quota := c.QueryParam(\"DiskQuota\"); quota != \"\" {\n\t\tvar diskQuota int64\n\t\tdiskQuota, err = strconv.ParseInt(quota, 10, 64)\n\t\tif err != nil {\n\t\t\treturn wrapError(err)\n\t\t}\n\t\ti.BytesDiskQuota = diskQuota\n\t\tshouldUpdate = true\n\t}\n\tif locale := c.QueryParam(\"Locale\"); locale != \"\" {\n\t\ti.Locale = locale\n\t\tshouldUpdate = true\n\t}\n\tif shouldUpdate {\n\t\tif err = instance.Update(i); err != nil {\n\t\t\treturn wrapError(err)\n\t\t}\n\t}\n\tif debug, err := strconv.ParseBool(c.QueryParam(\"Debug\")); err == nil {\n\t\tif debug {\n\t\t\terr = logger.AddDebugDomain(domain)\n\t\t} else {\n\t\t\terr = logger.RemoveDebugDomain(domain)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn wrapError(err)\n\t\t}\n\t}\n\treturn jsonapi.Data(c, http.StatusOK, &apiInstance{i}, nil)\n}\n\nfunc listHandler(c echo.Context) error {\n\tis, err := instance.List()\n\tif err != nil {\n\t\tif couchdb.IsNoDatabaseError(err) {\n\t\t\treturn jsonapi.DataList(c, http.StatusOK, nil, nil)\n\t\t}\n\t\treturn wrapError(err)\n\t}\n\n\tobjs := make([]jsonapi.Object, len(is))\n\tfor i, in := range is {\n\t\tin.OAuthSecret = nil\n\t\tin.SessionSecret = nil\n\t\tobjs[i] = &apiInstance{in}\n\t}\n\n\treturn jsonapi.DataList(c, http.StatusOK, objs, nil)\n}\n\nfunc deleteHandler(c echo.Context) error {\n\tdomain := c.Param(\"domain\")\n\terr := instance.Destroy(domain)\n\tif err != nil {\n\t\treturn wrapError(err)\n\t}\n\treturn c.NoContent(http.StatusNoContent)\n}\n\nfunc createToken(c echo.Context) error {\n\tdomain := c.QueryParam(\"Domain\")\n\taudience := c.QueryParam(\"Audience\")\n\tscope := c.QueryParam(\"Scope\")\n\tsubject := c.QueryParam(\"Subject\")\n\texpire := c.QueryParam(\"Expire\")\n\tin, err := instance.Get(domain)\n\tif err != nil {\n\t\treturn wrapError(err)\n\t}\n\tswitch audience {\n\tcase \"app\":\n\t\taudience = permissions.AppAudience\n\tcase \"konn\", \"konnector\":\n\t\taudience = permissions.KonnectorAudience\n\tcase \"access-token\":\n\t\taudience = permissions.AccessTokenAudience\n\tcase \"cli\":\n\t\taudience = permissions.CLIAudience\n\tdefault:\n\t\treturn echo.NewHTTPError(http.StatusBadRequest, \"Unknown audience %s\", audience)\n\t}\n\tissuedAt := time.Now()\n\tif expire != \"\" && expire != \"0s\" {\n\t\tvar duration time.Duration\n\t\tif duration, err = time.ParseDuration(expire); err == nil {\n\t\t\tissuedAt = issuedAt.Add(duration - permissions.TokenValidityDuration)\n\t\t}\n\t}\n\ttoken, err := in.MakeJWT(audience, subject, scope, issuedAt)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn c.String(http.StatusOK, token)\n}\n\nfunc registerClient(c echo.Context) error {\n\tin, err := instance.Get(c.QueryParam(\"Domain\"))\n\tif err != nil {\n\t\treturn wrapError(err)\n\t}\n\tclient := oauth.Client{\n\t\tRedirectURIs: []string{c.QueryParam(\"RedirectURI\")},\n\t\tClientName: c.QueryParam(\"ClientName\"),\n\t\tSoftwareID: c.QueryParam(\"SoftwareID\"),\n\t}\n\tif regErr := client.Create(in); regErr != nil {\n\t\treturn c.String(http.StatusBadRequest, regErr.Description)\n\t}\n\treturn c.String(http.StatusOK, client.ClientID)\n}\n\nfunc wrapError(err error) error {\n\tswitch err {\n\tcase instance.ErrNotFound:\n\t\treturn jsonapi.NotFound(err)\n\tcase instance.ErrExists:\n\t\treturn jsonapi.Conflict(err)\n\tcase instance.ErrIllegalDomain:\n\t\treturn jsonapi.InvalidParameter(\"domain\", err)\n\tcase instance.ErrMissingToken:\n\t\treturn jsonapi.BadRequest(err)\n\tcase instance.ErrInvalidToken:\n\t\treturn jsonapi.BadRequest(err)\n\tcase instance.ErrMissingPassphrase:\n\t\treturn jsonapi.BadRequest(err)\n\tcase instance.ErrInvalidPassphrase:\n\t\treturn jsonapi.BadRequest(err)\n\t}\n\treturn err\n}\n\n\/\/ Routes sets the routing for the instances service\nfunc Routes(router *echo.Group) {\n\trouter.GET(\"\", listHandler)\n\trouter.POST(\"\", createHandler)\n\trouter.GET(\"\/:domain\", showHandler)\n\trouter.PATCH(\"\/:domain\", modifyHandler)\n\trouter.DELETE(\"\/:domain\", deleteHandler)\n\trouter.POST(\"\/token\", createToken)\n\trouter.POST(\"\/oauth_client\", registerClient)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build !windows\n\n\/*\n Copyright 2020 Padduck, LLC\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n \thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage tty\n\nimport (\n\t\"fmt\"\n\t\"github.com\/creack\/pty\"\n\t\"github.com\/pufferpanel\/pufferpanel\/v2\"\n\t\"github.com\/pufferpanel\/pufferpanel\/v2\/logging\"\n\t\"github.com\/pufferpanel\/pufferpanel\/v2\/messages\"\n\t\"github.com\/shirou\/gopsutil\/process\"\n\t\"github.com\/spf13\/cast\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n)\n\ntype tty struct {\n\t*pufferpanel.BaseEnvironment\n\tmainProcess *exec.Cmd\n\tstdInWriter io.Writer\n}\n\nfunc (t *tty) ttyExecuteAsync(steps pufferpanel.ExecutionData) (err error) {\n\trunning, err := t.IsRunning()\n\tif err != nil {\n\t\treturn\n\t}\n\tif running {\n\t\terr = pufferpanel.ErrProcessRunning\n\t\treturn\n\t}\n\tt.Wait.Wait()\n\n\tpr := exec.Command(steps.Command, steps.Arguments...)\n\tpr.Dir = steps.WorkingDirectory\n\tpr.Env = append(os.Environ(), \"HOME=\"+s.RootDirectory, \"TERM=xterm-256color\")\n\tfor k, v := range steps.Environment {\n\t\tpr.Env = append(pr.Env, fmt.Sprintf(\"%s=%s\", k, v))\n\t}\n\n\twrapper := t.CreateWrapper()\n\tt.Wait.Add(1)\n\tpr.SysProcAttr = &syscall.SysProcAttr{Setctty: true, Setsid: true}\n\tt.mainProcess = pr\n\tlogging.Info().Printf(\"Starting process: %s %s\", t.mainProcess.Path, strings.Join(t.mainProcess.Args[1:], \" \"))\n\n\tmsg := messages.Status{Running:true}\n\t_ = t.WSManager.WriteMessage(msg)\n\n\ttty, err := pty.Start(pr)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tt.stdInWriter = tty\n\n\tgo func(proxy io.Writer) {\n\t\t_, _ = io.Copy(proxy, tty)\n\t}(wrapper)\n\n\tgo t.handleClose(steps.Callback)\n\treturn\n}\n\nfunc (t *tty) ExecuteInMainProcess(cmd string) (err error) {\n\trunning, err := t.IsRunning()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !running {\n\t\terr = pufferpanel.ErrServerOffline\n\t\treturn\n\t}\n\tstdIn := t.stdInWriter\n\t_, err = io.WriteString(stdIn, cmd+\"\\n\")\n\treturn\n}\n\nfunc (t *tty) Kill() (err error) {\n\trunning, err := t.IsRunning()\n\tif err != nil {\n\t\treturn\n\t}\n\tif !running {\n\t\treturn\n\t}\n\treturn t.mainProcess.Process.Kill()\n}\n\nfunc (t *tty) IsRunning() (isRunning bool, err error) {\n\tisRunning = t.mainProcess != nil && t.mainProcess.Process != nil\n\tif isRunning {\n\t\tpr, pErr := os.FindProcess(t.mainProcess.Process.Pid)\n\t\tif pr == nil || pErr != nil {\n\t\t\tisRunning = false\n\t\t} else if pr.Signal(syscall.Signal(0)) != nil {\n\t\t\tisRunning = false\n\t\t}\n\t}\n\treturn\n}\n\nfunc (t *tty) GetStats() (*pufferpanel.ServerStats, error) {\n\trunning, err := t.IsRunning()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !running {\n\t\treturn nil, pufferpanel.ErrServerOffline\n\t}\n\tpr, err := process.NewProcess(int32(t.mainProcess.Process.Pid))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmemMap, _ := pr.MemoryInfo()\n\tcpu, _ := pr.Percent(time.Second * 1)\n\n\treturn &pufferpanel.ServerStats{\n\t\tCpu: cpu,\n\t\tMemory: cast.ToFloat64(memMap.RSS),\n\t}, nil\n}\n\nfunc (t *tty) Create() error {\n\treturn os.Mkdir(t.RootDirectory, 0755)\n}\n\nfunc (t *tty) WaitForMainProcess() error {\n\treturn t.WaitForMainProcessFor(0)\n}\n\nfunc (t *tty) WaitForMainProcessFor(timeout int) (err error) {\n\trunning, err := t.IsRunning()\n\tif err != nil {\n\t\treturn\n\t}\n\tif running {\n\t\tif timeout > 0 {\n\t\t\tvar timer = time.AfterFunc(time.Duration(timeout)*time.Millisecond, func() {\n\t\t\t\terr = t.Kill()\n\t\t\t})\n\t\t\tt.Wait.Wait()\n\t\t\ttimer.Stop()\n\t\t} else {\n\t\t\tt.Wait.Wait()\n\t\t}\n\t}\n\treturn\n}\n\nfunc (t *tty) SendCode(code int) error {\n\trunning, err := t.IsRunning()\n\n\tif err != nil || !running {\n\t\treturn err\n\t}\n\n\treturn t.mainProcess.Process.Signal(syscall.Signal(code))\n}\n\nfunc (t *tty) handleClose(callback func(graceful bool)) {\n\terr := t.mainProcess.Wait()\n\n\tvar success bool\n\tif t.mainProcess == nil || t.mainProcess.ProcessState == nil || err != nil {\n\t\tsuccess = false\n\t} else {\n\t\tsuccess = t.mainProcess.ProcessState.Success()\n\t}\n\n\tif t.mainProcess != nil && t.mainProcess.Process != nil {\n\t\t_ = t.mainProcess.Process.Release()\n\t}\n\tt.mainProcess = nil\n\tt.Wait.Done()\n\n\tmsg := messages.Status{Running:false}\n\t_ = t.WSManager.WriteMessage(msg)\n\n\tif callback != nil {\n\t\tcallback(success)\n\t}\n}\n<commit_msg>Fix build<commit_after>\/\/ +build !windows\n\n\/*\n Copyright 2020 Padduck, LLC\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n \thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage tty\n\nimport (\n\t\"fmt\"\n\t\"github.com\/creack\/pty\"\n\t\"github.com\/pufferpanel\/pufferpanel\/v2\"\n\t\"github.com\/pufferpanel\/pufferpanel\/v2\/logging\"\n\t\"github.com\/pufferpanel\/pufferpanel\/v2\/messages\"\n\t\"github.com\/shirou\/gopsutil\/process\"\n\t\"github.com\/spf13\/cast\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n)\n\ntype tty struct {\n\t*pufferpanel.BaseEnvironment\n\tmainProcess *exec.Cmd\n\tstdInWriter io.Writer\n}\n\nfunc (t *tty) ttyExecuteAsync(steps pufferpanel.ExecutionData) (err error) {\n\trunning, err := t.IsRunning()\n\tif err != nil {\n\t\treturn\n\t}\n\tif running {\n\t\terr = pufferpanel.ErrProcessRunning\n\t\treturn\n\t}\n\tt.Wait.Wait()\n\n\tpr := exec.Command(steps.Command, steps.Arguments...)\n\tpr.Dir = steps.WorkingDirectory\n\tpr.Env = append(os.Environ(), \"HOME=\"+t.RootDirectory, \"TERM=xterm-256color\")\n\tfor k, v := range steps.Environment {\n\t\tpr.Env = append(pr.Env, fmt.Sprintf(\"%s=%s\", k, v))\n\t}\n\n\twrapper := t.CreateWrapper()\n\tt.Wait.Add(1)\n\tpr.SysProcAttr = &syscall.SysProcAttr{Setctty: true, Setsid: true}\n\tt.mainProcess = pr\n\tlogging.Info().Printf(\"Starting process: %s %s\", t.mainProcess.Path, strings.Join(t.mainProcess.Args[1:], \" \"))\n\n\tmsg := messages.Status{Running:true}\n\t_ = t.WSManager.WriteMessage(msg)\n\n\ttty, err := pty.Start(pr)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tt.stdInWriter = tty\n\n\tgo func(proxy io.Writer) {\n\t\t_, _ = io.Copy(proxy, tty)\n\t}(wrapper)\n\n\tgo t.handleClose(steps.Callback)\n\treturn\n}\n\nfunc (t *tty) ExecuteInMainProcess(cmd string) (err error) {\n\trunning, err := t.IsRunning()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !running {\n\t\terr = pufferpanel.ErrServerOffline\n\t\treturn\n\t}\n\tstdIn := t.stdInWriter\n\t_, err = io.WriteString(stdIn, cmd+\"\\n\")\n\treturn\n}\n\nfunc (t *tty) Kill() (err error) {\n\trunning, err := t.IsRunning()\n\tif err != nil {\n\t\treturn\n\t}\n\tif !running {\n\t\treturn\n\t}\n\treturn t.mainProcess.Process.Kill()\n}\n\nfunc (t *tty) IsRunning() (isRunning bool, err error) {\n\tisRunning = t.mainProcess != nil && t.mainProcess.Process != nil\n\tif isRunning {\n\t\tpr, pErr := os.FindProcess(t.mainProcess.Process.Pid)\n\t\tif pr == nil || pErr != nil {\n\t\t\tisRunning = false\n\t\t} else if pr.Signal(syscall.Signal(0)) != nil {\n\t\t\tisRunning = false\n\t\t}\n\t}\n\treturn\n}\n\nfunc (t *tty) GetStats() (*pufferpanel.ServerStats, error) {\n\trunning, err := t.IsRunning()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !running {\n\t\treturn nil, pufferpanel.ErrServerOffline\n\t}\n\tpr, err := process.NewProcess(int32(t.mainProcess.Process.Pid))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmemMap, _ := pr.MemoryInfo()\n\tcpu, _ := pr.Percent(time.Second * 1)\n\n\treturn &pufferpanel.ServerStats{\n\t\tCpu: cpu,\n\t\tMemory: cast.ToFloat64(memMap.RSS),\n\t}, nil\n}\n\nfunc (t *tty) Create() error {\n\treturn os.Mkdir(t.RootDirectory, 0755)\n}\n\nfunc (t *tty) WaitForMainProcess() error {\n\treturn t.WaitForMainProcessFor(0)\n}\n\nfunc (t *tty) WaitForMainProcessFor(timeout int) (err error) {\n\trunning, err := t.IsRunning()\n\tif err != nil {\n\t\treturn\n\t}\n\tif running {\n\t\tif timeout > 0 {\n\t\t\tvar timer = time.AfterFunc(time.Duration(timeout)*time.Millisecond, func() {\n\t\t\t\terr = t.Kill()\n\t\t\t})\n\t\t\tt.Wait.Wait()\n\t\t\ttimer.Stop()\n\t\t} else {\n\t\t\tt.Wait.Wait()\n\t\t}\n\t}\n\treturn\n}\n\nfunc (t *tty) SendCode(code int) error {\n\trunning, err := t.IsRunning()\n\n\tif err != nil || !running {\n\t\treturn err\n\t}\n\n\treturn t.mainProcess.Process.Signal(syscall.Signal(code))\n}\n\nfunc (t *tty) handleClose(callback func(graceful bool)) {\n\terr := t.mainProcess.Wait()\n\n\tvar success bool\n\tif t.mainProcess == nil || t.mainProcess.ProcessState == nil || err != nil {\n\t\tsuccess = false\n\t} else {\n\t\tsuccess = t.mainProcess.ProcessState.Success()\n\t}\n\n\tif t.mainProcess != nil && t.mainProcess.Process != nil {\n\t\t_ = t.mainProcess.Process.Release()\n\t}\n\tt.mainProcess = nil\n\tt.Wait.Done()\n\n\tmsg := messages.Status{Running:false}\n\t_ = t.WSManager.WriteMessage(msg)\n\n\tif callback != nil {\n\t\tcallback(success)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage oglematchers\n\nimport (\n)\n\n\/\/ IdenticalTo(x) returns a matcher that matches values v such that all of the\n\/\/ following hold:\n\/\/\n\/\/ * v and x have identical types.\n\/\/\n\/\/ * If v and x are of a reference type (slice, map, function, channel), then\n\/\/ they are either both nil or are references to the same object.\n\/\/\n\/\/ * If v and x are not of a reference type, then v == x.\n\/\/\n\/\/ This function will panic if x is of a value type that is not comparable. For\n\/\/ example, x cannot be an array of functions.\nfunc IdenticalTo(x interface{}) Matcher {\n\t\/\/ TODO\n\treturn &hasSubstrMatcher{\"asd\"}\n}\n<commit_msg>Added the skeleton of a type check.<commit_after>\/\/ Copyright 2012 Aaron Jacobs. All Rights Reserved.\n\/\/ Author: aaronjjacobs@gmail.com (Aaron Jacobs)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage oglematchers\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n)\n\n\/\/ Should the supplied type be allowed as an argument to IdenticalTo?\nfunc isLegalForIdenticalTo(t reflect.Type) (bool, error) {\n\treturn false, errors.New(\"TODO\")\n}\n\n\/\/ IdenticalTo(x) returns a matcher that matches values v such that all of the\n\/\/ following hold:\n\/\/\n\/\/ * v and x have identical types.\n\/\/\n\/\/ * If v and x are of a reference type (slice, map, function, channel), then\n\/\/ they are either both nil or are references to the same object.\n\/\/\n\/\/ * If v and x are not of a reference type, then v == x.\n\/\/\n\/\/ This function will panic if x is of a value type that is not comparable. For\n\/\/ example, x cannot be an array of functions.\nfunc IdenticalTo(x interface{}) Matcher {\n\tt := reflect.TypeOf(x)\n\n\t\/\/ Reject illegal arguments.\n\tif ok, err := isLegalForIdenticalTo(t); !ok {\n\t\tpanic(\"IdenticalTo: \" + err.Error())\n\t}\n\n\t\/\/ TODO\n\treturn &hasSubstrMatcher{\"asd\"}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 by caixw, All rights reserved.\n\/\/ Use of this source code is governed by a MIT\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package syntax 提供对代码块的语法进行解析\npackage syntax\n\nimport (\n\t\"log\"\n\n\t\"github.com\/caixw\/apidoc\/locale\"\n\t\"github.com\/caixw\/apidoc\/types\"\n\t\"github.com\/caixw\/apidoc\/vars\"\n\n\t\"github.com\/issue9\/is\"\n)\n\n\/\/ Input 输入的数据\ntype Input struct {\n\tFile string\n\tLine int\n\tData []rune\n\tError *log.Logger\n\tWarn *log.Logger\n}\n\n\/\/ Parse 分析一段代码,并将结果保存到 d 中。\nfunc Parse(d *types.Doc, input *Input) {\n\tl := newLexer(input)\n\n\tfor {\n\t\tswitch {\n\t\tcase l.matchTag(vars.APIDoc):\n\t\t\tif !l.scanAPIDoc(d) {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase l.matchTag(vars.API):\n\t\t\tif api, ok := l.scanAPI(); ok {\n\t\t\t\tif api == nil { \/\/ @apiIgnore\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\td.NewAPI(api)\n\t\t\t} else {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase l.match(vars.API):\n\t\t\tl.backup()\n\t\t\tword := l.readWord()\n\t\t\tif input.Warn != nil {\n\t\t\t\terr := &types.SyntaxError{\n\t\t\t\t\tFile: input.File,\n\t\t\t\t\tLine: input.Line,\n\t\t\t\t\tMessage: locale.Sprintf(locale.ErrUnknownTag, word),\n\t\t\t\t}\n\t\t\t\tinput.Warn.Println(err)\n\t\t\t}\n\t\t\tl.readTag() \/\/ 指针移到下一个标签处\n\t\tdefault:\n\t\t\tif l.atEOF() {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tl.pos++ \/\/ 去掉无用的字符。\n\t\t}\n\t} \/\/ end for\n}\n\n\/\/ 解析 @apidoc 及其子标签\n\/\/\n\/\/ @apidoc title of doc\n\/\/ @apiVersion 2.0\n\/\/ @apiBaseURL https:\/\/api.caixw.io\n\/\/ @apiLicense MIT https:\/\/opensource.org\/licenses\/MIT\n\/\/\n\/\/ @apiContent\n\/\/ content1\n\/\/ content2\nfunc (l *lexer) scanAPIDoc(d *types.Doc) bool {\n\tif len(d.Title) > 0 || len(d.Version) > 0 {\n\t\tl.syntaxError(locale.ErrDuplicateTag, vars.APIDoc)\n\t\treturn false\n\t}\n\n\tt := l.readTag()\n\td.Title = t.readLine()\n\tif len(d.Title) == 0 {\n\t\tl.syntaxError(locale.ErrTagArgNotEnough, vars.APIDoc)\n\t\treturn false\n\t}\n\tif !t.atEOF() {\n\t\tl.syntaxError(locale.ErrTagArgTooMuch, vars.APIDoc)\n\t\treturn false\n\t}\n\n\tfor {\n\t\tswitch {\n\t\tcase l.matchTag(vars.APIVersion):\n\t\t\tt := l.readTag()\n\t\t\td.Version = t.readLine()\n\t\t\tif len(d.Version) == 0 {\n\t\t\t\tt.syntaxError(locale.ErrTagArgNotEnough, vars.APIVersion)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif !t.atEOF() {\n\t\t\t\tt.syntaxError(locale.ErrTagArgTooMuch, vars.APIVersion)\n\t\t\t\treturn false\n\t\t\t}\n\t\tcase l.matchTag(vars.APIBaseURL):\n\t\t\tt := l.readTag()\n\t\t\td.BaseURL = t.readLine()\n\t\t\tif len(d.BaseURL) == 0 {\n\t\t\t\tt.syntaxError(locale.ErrTagArgNotEnough, vars.APIBaseURL)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif !t.atEOF() {\n\t\t\t\tt.syntaxError(locale.ErrTagArgTooMuch, vars.APIBaseURL)\n\t\t\t\treturn false\n\t\t\t}\n\t\tcase l.matchTag(vars.APILicense):\n\t\t\tt := l.readTag()\n\t\t\td.LicenseName = t.readWord()\n\t\t\td.LicenseURL = t.readLine()\n\t\t\tif len(d.LicenseName) == 0 {\n\t\t\t\tt.syntaxError(locale.ErrTagArgNotEnough, vars.APILicense)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif len(d.LicenseURL) > 0 && !is.URL(d.LicenseURL) {\n\t\t\t\tt.syntaxError(locale.ErrSecondArgMustURL)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif !t.atEOF() {\n\t\t\t\tt.syntaxError(locale.ErrTagArgTooMuch, vars.APILicense)\n\t\t\t\treturn false\n\t\t\t}\n\t\tcase l.matchTag(vars.APIContent):\n\t\t\td.Content = l.readEnd()\n\t\tcase l.match(vars.API): \/\/ 不认识的标签\n\t\t\tl.backup()\n\t\t\tl.syntaxError(locale.ErrUnknownTag, l.readWord())\n\t\t\treturn false\n\t\tdefault:\n\t\t\tif l.atEOF() {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tl.pos++ \/\/ 去掉无用的字符。\n\t\t}\n\t} \/\/ end for\n}\n\n\/\/ 解析 @api 及其子标签\nfunc (l *lexer) scanAPI() (*types.API, bool) {\n\tapi := &types.API{}\n\tt := l.readTag()\n\tapi.Method = t.readWord()\n\tapi.URL = t.readWord()\n\tapi.Summary = t.readLine()\n\n\tif len(api.Method) == 0 || len(api.URL) == 0 || len(api.Summary) == 0 {\n\t\tt.syntaxError(locale.ErrTagArgNotEnough, vars.API)\n\t\treturn nil, false\n\t}\n\n\tapi.Description = t.readEnd()\nLOOP:\n\tfor {\n\t\tswitch {\n\t\tcase l.matchTag(vars.APIIgnore):\n\t\t\treturn nil, true\n\t\tcase l.matchTag(vars.APIGroup):\n\t\t\tif !l.scanGroup(api) {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\tcase l.matchTag(vars.APIQuery):\n\t\t\tif !l.scanAPIQueries(api) {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\tcase l.matchTag(vars.APIParam):\n\t\t\tif !l.scanAPIParams(api) {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\tcase l.matchTag(vars.APIRequest):\n\t\t\treq, ok := l.scanAPIRequest()\n\t\t\tif !ok {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tapi.Request = req\n\t\tcase l.matchTag(vars.APIError):\n\t\t\tresp, ok := l.scanResponse(vars.APIError)\n\t\t\tif !ok {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tapi.Error = resp\n\t\tcase l.matchTag(vars.APISuccess):\n\t\t\tresp, ok := l.scanResponse(vars.APISuccess)\n\t\t\tif !ok {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tapi.Success = resp\n\t\tcase l.match(vars.API): \/\/ 不认识的标签\n\t\t\tl.backup()\n\t\t\tl.syntaxWarn(locale.ErrUnknownTag, l.readWord())\n\t\t\tl.readTag() \/\/ 指针移到下一个标签处\n\t\tdefault:\n\t\t\tif l.atEOF() {\n\t\t\t\tbreak LOOP\n\t\t\t}\n\t\t\tl.pos++ \/\/ 去掉无用的字符。\n\t\t}\n\t} \/\/ end for\n\n\tif api.Success == nil {\n\t\tl.syntaxError(locale.ErrSuccessNotEmpty)\n\t\treturn nil, false\n\t}\n\n\tif len(api.Group) == 0 {\n\t\tapi.Group = vars.DefaultGroupName\n\t}\n\n\treturn api, true\n}\n\nfunc (l *lexer) scanGroup(api *types.API) bool {\n\tt := l.readTag()\n\n\tapi.Group = t.readWord()\n\tif len(api.Group) == 0 {\n\t\tt.syntaxError(locale.ErrTagArgNotEnough, vars.APIGroup)\n\t\treturn false\n\t}\n\n\tif !t.atEOF() {\n\t\tt.syntaxError(locale.ErrTagArgTooMuch, vars.APIGroup)\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (l *lexer) scanAPIQueries(api *types.API) bool {\n\tif api.Queries == nil {\n\t\tapi.Queries = make([]*types.Param, 0, 1)\n\t}\n\n\tif p, ok := l.scanAPIParam(vars.APIQuery); ok {\n\t\tapi.Queries = append(api.Queries, p)\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (l *lexer) scanAPIParams(api *types.API) bool {\n\tif api.Params == nil {\n\t\tapi.Params = make([]*types.Param, 0, 1)\n\t}\n\n\tif p, ok := l.scanAPIParam(vars.APIParam); ok {\n\t\tapi.Params = append(api.Params, p)\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ 解析 @apiRequest 及其子标签\nfunc (l *lexer) scanAPIRequest() (*types.Request, bool) {\n\tt := l.readTag()\n\tr := &types.Request{\n\t\tType: t.readLine(),\n\t\tHeaders: map[string]string{},\n\t\tParams: []*types.Param{},\n\t\tExamples: []*types.Example{},\n\t}\n\tif !t.atEOF() {\n\t\tt.syntaxError(locale.ErrTagArgTooMuch, vars.APIRequest)\n\t\treturn nil, false\n\t}\n\nLOOP:\n\tfor {\n\t\tswitch {\n\t\tcase l.matchTag(vars.APIHeader):\n\t\t\tt := l.readTag()\n\t\t\tkey := t.readWord()\n\t\t\tval := t.readLine()\n\t\t\tif len(key) == 0 || len(val) == 0 {\n\t\t\t\tt.syntaxError(locale.ErrTagArgNotEnough, vars.APIHeader)\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tif !t.atEOF() {\n\t\t\t\tt.syntaxError(locale.ErrTagArgTooMuch, vars.APIHeader)\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tr.Headers[string(key)] = string(val)\n\t\tcase l.matchTag(vars.APIParam):\n\t\t\tp, ok := l.scanAPIParam(vars.APIParam)\n\t\t\tif !ok {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tr.Params = append(r.Params, p)\n\t\tcase l.matchTag(vars.APIExample):\n\t\t\te, ok := l.scanAPIExample()\n\t\t\tif !ok {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tr.Examples = append(r.Examples, e)\n\t\tcase l.match(vars.API): \/\/ 其它 api*,退出。\n\t\t\tl.backup()\n\t\t\tbreak LOOP\n\t\tdefault:\n\t\t\tif l.atEOF() {\n\t\t\t\tbreak LOOP\n\t\t\t}\n\t\t\tl.pos++ \/\/ 去掉无用的字符。\n\n\t\t} \/\/ end switch\n\t} \/\/ end for\n\n\treturn r, true\n}\n\n\/\/ 解析 @apiSuccess 或是 @apiError 及其子标签。\nfunc (l *lexer) scanResponse(tagName string) (*types.Response, bool) {\n\ttag := l.readTag()\n\tresp := &types.Response{\n\t\tCode: tag.readWord(),\n\t\tSummary: tag.readLine(),\n\t\tHeaders: map[string]string{},\n\t\tParams: []*types.Param{},\n\t\tExamples: []*types.Example{},\n\t}\n\n\tif len(resp.Code) == 0 || len(resp.Summary) == 0 {\n\t\ttag.syntaxError(locale.ErrTagArgNotEnough, tagName)\n\t\treturn nil, false\n\t}\n\tif !tag.atEOF() {\n\t\ttag.syntaxError(locale.ErrTagArgTooMuch, tagName)\n\t\treturn nil, false\n\t}\n\nLOOP:\n\tfor {\n\t\tswitch {\n\t\tcase l.matchTag(vars.APIHeader):\n\t\t\tt := l.readTag()\n\t\t\tkey := t.readWord()\n\t\t\tval := t.readLine()\n\t\t\tif len(key) == 0 || len(val) == 0 {\n\t\t\t\tt.syntaxError(locale.ErrTagArgNotEnough, vars.APIHeader)\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tif !t.atEOF() {\n\t\t\t\tt.syntaxError(locale.ErrTagArgTooMuch, vars.APIHeader)\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tresp.Headers[key] = val\n\t\tcase l.matchTag(vars.APIParam):\n\t\t\tp, ok := l.scanAPIParam(vars.APIParam)\n\t\t\tif !ok {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tresp.Params = append(resp.Params, p)\n\t\tcase l.matchTag(vars.APIExample):\n\t\t\te, ok := l.scanAPIExample()\n\t\t\tif !ok {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tresp.Examples = append(resp.Examples, e)\n\t\tcase l.match(vars.API): \/\/ 其它 api*,退出。\n\t\t\tl.backup()\n\t\t\tbreak LOOP\n\t\tdefault:\n\t\t\tif l.atEOF() {\n\t\t\t\tbreak LOOP\n\t\t\t}\n\t\t\tl.pos++ \/\/ 去掉无用的字符。\n\t\t}\n\t}\n\n\treturn resp, true\n}\n\n\/\/ 解析 @apiExample 标签\nfunc (l *lexer) scanAPIExample() (*types.Example, bool) {\n\ttag := l.readTag()\n\texample := &types.Example{\n\t\tType: tag.readWord(),\n\t\tCode: tag.readEnd(),\n\t}\n\n\tif len(example.Type) == 0 || len(example.Code) == 0 {\n\t\ttag.syntaxError(locale.ErrTagArgNotEnough, vars.APIExample)\n\t\treturn nil, false\n\t}\n\n\treturn example, true\n}\n\n\/\/ 解析 @apiParam 标签\nfunc (l *lexer) scanAPIParam(tagName string) (*types.Param, bool) {\n\tp := &types.Param{}\n\n\ttag := l.readTag()\n\tp.Name = tag.readWord()\n\tp.Type = tag.readWord()\n\tp.Summary = tag.readEnd()\n\tif len(p.Name) == 0 || len(p.Type) == 0 || len(p.Summary) == 0 {\n\t\ttag.syntaxError(locale.ErrTagArgNotEnough, tagName)\n\t\treturn nil, false\n\t}\n\treturn p, true\n}\n<commit_msg>复用 lexer.syntaxError 函数<commit_after>\/\/ Copyright 2016 by caixw, All rights reserved.\n\/\/ Use of this source code is governed by a MIT\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package syntax 负责对标签语法的解析操作。\npackage syntax\n\nimport (\n\t\"log\"\n\n\t\"github.com\/caixw\/apidoc\/locale\"\n\t\"github.com\/caixw\/apidoc\/types\"\n\t\"github.com\/caixw\/apidoc\/vars\"\n\n\t\"github.com\/issue9\/is\"\n)\n\n\/\/ Input 由外界提供的与标签语法相关的内容。\ntype Input struct {\n\tFile string \/\/ 该段代码所在的文件\n\tLine int \/\/ 该段代码在文件中的行号\n\tData []rune \/\/ 需要解析的代码段\n\tError *log.Logger \/\/ 出错时的输出通道\n\tWarn *log.Logger \/\/ 警告信息的输出通道\n}\n\n\/\/ Parse 分析一段代码,并将结果保存到 d 中。\nfunc Parse(d *types.Doc, input *Input) {\n\tl := newLexer(input)\n\n\tfor {\n\t\tswitch {\n\t\tcase l.matchTag(vars.APIDoc):\n\t\t\tif !l.scanAPIDoc(d) {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase l.matchTag(vars.API):\n\t\t\tif api, ok := l.scanAPI(); ok {\n\t\t\t\tif api == nil { \/\/ @apiIgnore\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\td.NewAPI(api)\n\t\t\t} else {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase l.match(vars.API):\n\t\t\tl.backup()\n\t\t\tl.syntaxWarn(locale.ErrUnknownTag, l.readWord())\n\t\t\tl.readTag() \/\/ 指针移到下一个标签处\n\t\tdefault:\n\t\t\tif l.atEOF() {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tl.pos++ \/\/ 去掉无用的字符。\n\t\t}\n\t} \/\/ end for\n}\n\n\/\/ 解析 @apidoc 及其子标签\n\/\/\n\/\/ @apidoc title of doc\n\/\/ @apiVersion 2.0\n\/\/ @apiBaseURL https:\/\/api.caixw.io\n\/\/ @apiLicense MIT https:\/\/opensource.org\/licenses\/MIT\n\/\/\n\/\/ @apiContent\n\/\/ content1\n\/\/ content2\nfunc (l *lexer) scanAPIDoc(d *types.Doc) bool {\n\tif len(d.Title) > 0 || len(d.Version) > 0 {\n\t\tl.syntaxError(locale.ErrDuplicateTag, vars.APIDoc)\n\t\treturn false\n\t}\n\n\tt := l.readTag()\n\td.Title = t.readLine()\n\tif len(d.Title) == 0 {\n\t\tl.syntaxError(locale.ErrTagArgNotEnough, vars.APIDoc)\n\t\treturn false\n\t}\n\tif !t.atEOF() {\n\t\tl.syntaxError(locale.ErrTagArgTooMuch, vars.APIDoc)\n\t\treturn false\n\t}\n\n\tfor {\n\t\tswitch {\n\t\tcase l.matchTag(vars.APIVersion):\n\t\t\tt := l.readTag()\n\t\t\td.Version = t.readLine()\n\t\t\tif len(d.Version) == 0 {\n\t\t\t\tt.syntaxError(locale.ErrTagArgNotEnough, vars.APIVersion)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif !t.atEOF() {\n\t\t\t\tt.syntaxError(locale.ErrTagArgTooMuch, vars.APIVersion)\n\t\t\t\treturn false\n\t\t\t}\n\t\tcase l.matchTag(vars.APIBaseURL):\n\t\t\tt := l.readTag()\n\t\t\td.BaseURL = t.readLine()\n\t\t\tif len(d.BaseURL) == 0 {\n\t\t\t\tt.syntaxError(locale.ErrTagArgNotEnough, vars.APIBaseURL)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif !t.atEOF() {\n\t\t\t\tt.syntaxError(locale.ErrTagArgTooMuch, vars.APIBaseURL)\n\t\t\t\treturn false\n\t\t\t}\n\t\tcase l.matchTag(vars.APILicense):\n\t\t\tt := l.readTag()\n\t\t\td.LicenseName = t.readWord()\n\t\t\td.LicenseURL = t.readLine()\n\t\t\tif len(d.LicenseName) == 0 {\n\t\t\t\tt.syntaxError(locale.ErrTagArgNotEnough, vars.APILicense)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif len(d.LicenseURL) > 0 && !is.URL(d.LicenseURL) {\n\t\t\t\tt.syntaxError(locale.ErrSecondArgMustURL)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif !t.atEOF() {\n\t\t\t\tt.syntaxError(locale.ErrTagArgTooMuch, vars.APILicense)\n\t\t\t\treturn false\n\t\t\t}\n\t\tcase l.matchTag(vars.APIContent):\n\t\t\td.Content = l.readEnd()\n\t\tcase l.match(vars.API): \/\/ 不认识的标签\n\t\t\tl.backup()\n\t\t\tl.syntaxError(locale.ErrUnknownTag, l.readWord())\n\t\t\treturn false\n\t\tdefault:\n\t\t\tif l.atEOF() {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tl.pos++ \/\/ 去掉无用的字符。\n\t\t}\n\t} \/\/ end for\n}\n\n\/\/ 解析 @api 及其子标签\nfunc (l *lexer) scanAPI() (*types.API, bool) {\n\tapi := &types.API{}\n\tt := l.readTag()\n\tapi.Method = t.readWord()\n\tapi.URL = t.readWord()\n\tapi.Summary = t.readLine()\n\n\tif len(api.Method) == 0 || len(api.URL) == 0 || len(api.Summary) == 0 {\n\t\tt.syntaxError(locale.ErrTagArgNotEnough, vars.API)\n\t\treturn nil, false\n\t}\n\n\tapi.Description = t.readEnd()\nLOOP:\n\tfor {\n\t\tswitch {\n\t\tcase l.matchTag(vars.APIIgnore):\n\t\t\treturn nil, true\n\t\tcase l.matchTag(vars.APIGroup):\n\t\t\tif !l.scanGroup(api) {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\tcase l.matchTag(vars.APIQuery):\n\t\t\tif !l.scanAPIQueries(api) {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\tcase l.matchTag(vars.APIParam):\n\t\t\tif !l.scanAPIParams(api) {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\tcase l.matchTag(vars.APIRequest):\n\t\t\treq, ok := l.scanAPIRequest()\n\t\t\tif !ok {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tapi.Request = req\n\t\tcase l.matchTag(vars.APIError):\n\t\t\tresp, ok := l.scanResponse(vars.APIError)\n\t\t\tif !ok {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tapi.Error = resp\n\t\tcase l.matchTag(vars.APISuccess):\n\t\t\tresp, ok := l.scanResponse(vars.APISuccess)\n\t\t\tif !ok {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tapi.Success = resp\n\t\tcase l.match(vars.API): \/\/ 不认识的标签\n\t\t\tl.backup()\n\t\t\tl.syntaxWarn(locale.ErrUnknownTag, l.readWord())\n\t\t\tl.readTag() \/\/ 指针移到下一个标签处\n\t\tdefault:\n\t\t\tif l.atEOF() {\n\t\t\t\tbreak LOOP\n\t\t\t}\n\t\t\tl.pos++ \/\/ 去掉无用的字符。\n\t\t}\n\t} \/\/ end for\n\n\tif api.Success == nil {\n\t\tl.syntaxError(locale.ErrSuccessNotEmpty)\n\t\treturn nil, false\n\t}\n\n\tif len(api.Group) == 0 {\n\t\tapi.Group = vars.DefaultGroupName\n\t}\n\n\treturn api, true\n}\n\nfunc (l *lexer) scanGroup(api *types.API) bool {\n\tt := l.readTag()\n\n\tapi.Group = t.readWord()\n\tif len(api.Group) == 0 {\n\t\tt.syntaxError(locale.ErrTagArgNotEnough, vars.APIGroup)\n\t\treturn false\n\t}\n\n\tif !t.atEOF() {\n\t\tt.syntaxError(locale.ErrTagArgTooMuch, vars.APIGroup)\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (l *lexer) scanAPIQueries(api *types.API) bool {\n\tif api.Queries == nil {\n\t\tapi.Queries = make([]*types.Param, 0, 1)\n\t}\n\n\tif p, ok := l.scanAPIParam(vars.APIQuery); ok {\n\t\tapi.Queries = append(api.Queries, p)\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (l *lexer) scanAPIParams(api *types.API) bool {\n\tif api.Params == nil {\n\t\tapi.Params = make([]*types.Param, 0, 1)\n\t}\n\n\tif p, ok := l.scanAPIParam(vars.APIParam); ok {\n\t\tapi.Params = append(api.Params, p)\n\t\treturn true\n\t}\n\treturn false\n}\n\n\/\/ 解析 @apiRequest 及其子标签\nfunc (l *lexer) scanAPIRequest() (*types.Request, bool) {\n\tt := l.readTag()\n\tr := &types.Request{\n\t\tType: t.readLine(),\n\t\tHeaders: map[string]string{},\n\t\tParams: []*types.Param{},\n\t\tExamples: []*types.Example{},\n\t}\n\tif !t.atEOF() {\n\t\tt.syntaxError(locale.ErrTagArgTooMuch, vars.APIRequest)\n\t\treturn nil, false\n\t}\n\nLOOP:\n\tfor {\n\t\tswitch {\n\t\tcase l.matchTag(vars.APIHeader):\n\t\t\tt := l.readTag()\n\t\t\tkey := t.readWord()\n\t\t\tval := t.readLine()\n\t\t\tif len(key) == 0 || len(val) == 0 {\n\t\t\t\tt.syntaxError(locale.ErrTagArgNotEnough, vars.APIHeader)\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tif !t.atEOF() {\n\t\t\t\tt.syntaxError(locale.ErrTagArgTooMuch, vars.APIHeader)\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tr.Headers[string(key)] = string(val)\n\t\tcase l.matchTag(vars.APIParam):\n\t\t\tp, ok := l.scanAPIParam(vars.APIParam)\n\t\t\tif !ok {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tr.Params = append(r.Params, p)\n\t\tcase l.matchTag(vars.APIExample):\n\t\t\te, ok := l.scanAPIExample()\n\t\t\tif !ok {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tr.Examples = append(r.Examples, e)\n\t\tcase l.match(vars.API): \/\/ 其它 api*,退出。\n\t\t\tl.backup()\n\t\t\tbreak LOOP\n\t\tdefault:\n\t\t\tif l.atEOF() {\n\t\t\t\tbreak LOOP\n\t\t\t}\n\t\t\tl.pos++ \/\/ 去掉无用的字符。\n\n\t\t} \/\/ end switch\n\t} \/\/ end for\n\n\treturn r, true\n}\n\n\/\/ 解析 @apiSuccess 或是 @apiError 及其子标签。\nfunc (l *lexer) scanResponse(tagName string) (*types.Response, bool) {\n\ttag := l.readTag()\n\tresp := &types.Response{\n\t\tCode: tag.readWord(),\n\t\tSummary: tag.readLine(),\n\t\tHeaders: map[string]string{},\n\t\tParams: []*types.Param{},\n\t\tExamples: []*types.Example{},\n\t}\n\n\tif len(resp.Code) == 0 || len(resp.Summary) == 0 {\n\t\ttag.syntaxError(locale.ErrTagArgNotEnough, tagName)\n\t\treturn nil, false\n\t}\n\tif !tag.atEOF() {\n\t\ttag.syntaxError(locale.ErrTagArgTooMuch, tagName)\n\t\treturn nil, false\n\t}\n\nLOOP:\n\tfor {\n\t\tswitch {\n\t\tcase l.matchTag(vars.APIHeader):\n\t\t\tt := l.readTag()\n\t\t\tkey := t.readWord()\n\t\t\tval := t.readLine()\n\t\t\tif len(key) == 0 || len(val) == 0 {\n\t\t\t\tt.syntaxError(locale.ErrTagArgNotEnough, vars.APIHeader)\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tif !t.atEOF() {\n\t\t\t\tt.syntaxError(locale.ErrTagArgTooMuch, vars.APIHeader)\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tresp.Headers[key] = val\n\t\tcase l.matchTag(vars.APIParam):\n\t\t\tp, ok := l.scanAPIParam(vars.APIParam)\n\t\t\tif !ok {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tresp.Params = append(resp.Params, p)\n\t\tcase l.matchTag(vars.APIExample):\n\t\t\te, ok := l.scanAPIExample()\n\t\t\tif !ok {\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tresp.Examples = append(resp.Examples, e)\n\t\tcase l.match(vars.API): \/\/ 其它 api*,退出。\n\t\t\tl.backup()\n\t\t\tbreak LOOP\n\t\tdefault:\n\t\t\tif l.atEOF() {\n\t\t\t\tbreak LOOP\n\t\t\t}\n\t\t\tl.pos++ \/\/ 去掉无用的字符。\n\t\t}\n\t}\n\n\treturn resp, true\n}\n\n\/\/ 解析 @apiExample 标签\nfunc (l *lexer) scanAPIExample() (*types.Example, bool) {\n\ttag := l.readTag()\n\texample := &types.Example{\n\t\tType: tag.readWord(),\n\t\tCode: tag.readEnd(),\n\t}\n\n\tif len(example.Type) == 0 || len(example.Code) == 0 {\n\t\ttag.syntaxError(locale.ErrTagArgNotEnough, vars.APIExample)\n\t\treturn nil, false\n\t}\n\n\treturn example, true\n}\n\n\/\/ 解析 @apiParam 标签\nfunc (l *lexer) scanAPIParam(tagName string) (*types.Param, bool) {\n\tp := &types.Param{}\n\n\ttag := l.readTag()\n\tp.Name = tag.readWord()\n\tp.Type = tag.readWord()\n\tp.Summary = tag.readEnd()\n\tif len(p.Name) == 0 || len(p.Type) == 0 || len(p.Summary) == 0 {\n\t\ttag.syntaxError(locale.ErrTagArgNotEnough, tagName)\n\t\treturn nil, false\n\t}\n\treturn p, true\n}\n<|endoftext|>"} {"text":"<commit_before>package mount\n\nimport (\n\t\"github.com\/hanwen\/go-fuse\/v2\/fuse\"\n\tsys \"golang.org\/x\/sys\/unix\"\n\t\"runtime\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nconst (\n\t\/\/ https:\/\/man7.org\/linux\/man-pages\/man7\/xattr.7.html#:~:text=The%20VFS%20imposes%20limitations%20that,in%20listxattr(2)).\n\tMAX_XATTR_NAME_SIZE = 255\n\tMAX_XATTR_VALUE_SIZE = 65536\n\tXATTR_PREFIX = \"xattr-\" \/\/ same as filer\n)\n\n\/\/ GetXAttr reads an extended attribute, and should return the\n\/\/ number of bytes. If the buffer is too small, return ERANGE,\n\/\/ with the required buffer size.\nfunc (wfs *WFS) GetXAttr(cancel <-chan struct{}, header *fuse.InHeader, attr string, dest []byte) (size uint32, code fuse.Status) {\n\n\t\/\/validate attr name\n\tif len(attr) > MAX_XATTR_NAME_SIZE {\n\t\tif runtime.GOOS == \"darwin\" {\n\t\t\treturn 0, fuse.EPERM\n\t\t} else {\n\t\t\treturn 0, fuse.ERANGE\n\t\t}\n\t}\n\tif len(attr) == 0 {\n\t\treturn 0, fuse.EINVAL\n\t}\n\n\t_, _, entry, status := wfs.maybeReadEntry(header.NodeId)\n\tif status != fuse.OK {\n\t\treturn 0, status\n\t}\n\tif entry == nil {\n\t\treturn 0, fuse.ENOENT\n\t}\n\tif entry.Extended == nil {\n\t\treturn 0, fuse.ENOATTR\n\t}\n\tdata, found := entry.Extended[XATTR_PREFIX+attr]\n\tif !found {\n\t\treturn 0, fuse.ENOATTR\n\t}\n\tif len(dest) < len(data) {\n\t\treturn uint32(len(data)), fuse.ERANGE\n\t}\n\tcopy(dest, data)\n\n\treturn uint32(len(data)), fuse.OK\n}\n\n\/\/ SetXAttr writes an extended attribute.\n\/\/ https:\/\/man7.org\/linux\/man-pages\/man2\/setxattr.2.html\n\/\/ By default (i.e., flags is zero), the extended attribute will be\n\/\/ created if it does not exist, or the value will be replaced if\n\/\/ the attribute already exists. To modify these semantics, one of\n\/\/ the following values can be specified in flags:\n\/\/\n\/\/ XATTR_CREATE\n\/\/ Perform a pure create, which fails if the named attribute\n\/\/ exists already.\n\/\/\n\/\/ XATTR_REPLACE\n\/\/ Perform a pure replace operation, which fails if the named\n\/\/ attribute does not already exist.\nfunc (wfs *WFS) SetXAttr(cancel <-chan struct{}, input *fuse.SetXAttrIn, attr string, data []byte) fuse.Status {\n\t\/\/validate attr name\n\tif len(attr) > MAX_XATTR_NAME_SIZE {\n\t\tif runtime.GOOS == \"darwin\" {\n\t\t\treturn fuse.EPERM\n\t\t} else {\n\t\t\treturn fuse.ERANGE\n\t\t}\n\t}\n\tif len(attr) == 0 {\n\t\treturn fuse.EINVAL\n\t}\n\t\/\/validate attr value\n\tif len(data) > MAX_XATTR_VALUE_SIZE {\n\t\tif runtime.GOOS == \"darwin\" {\n\t\t\treturn fuse.Status(syscall.E2BIG)\n\t\t} else {\n\t\t\treturn fuse.ERANGE\n\t\t}\n\t}\n\n\tpath, _, entry, status := wfs.maybeReadEntry(input.NodeId)\n\tif status != fuse.OK {\n\t\treturn status\n\t}\n\tif entry.Extended == nil {\n\t\tentry.Extended = make(map[string][]byte)\n\t}\n\toldData, _ := entry.Extended[XATTR_PREFIX+attr]\n\tswitch input.Flags {\n\tcase sys.XATTR_CREATE:\n\t\tif len(oldData) > 0 {\n\t\t\tbreak\n\t\t}\n\t\tfallthrough\n\tcase sys.XATTR_REPLACE:\n\t\tfallthrough\n\tdefault:\n\t\tentry.Extended[XATTR_PREFIX+attr] = data\n\t}\n\n\treturn wfs.saveEntry(path, entry)\n\n}\n\n\/\/ ListXAttr lists extended attributes as '\\0' delimited byte\n\/\/ slice, and return the number of bytes. If the buffer is too\n\/\/ small, return ERANGE, with the required buffer size.\nfunc (wfs *WFS) ListXAttr(cancel <-chan struct{}, header *fuse.InHeader, dest []byte) (n uint32, code fuse.Status) {\n\t_, _, entry, status := wfs.maybeReadEntry(header.NodeId)\n\tif status != fuse.OK {\n\t\treturn 0, status\n\t}\n\tif entry == nil {\n\t\treturn 0, fuse.ENOENT\n\t}\n\tif entry.Extended == nil {\n\t\treturn 0, fuse.ENOATTR\n\t}\n\n\tvar data []byte\n\tfor k := range entry.Extended {\n\t\tif strings.HasPrefix(k, XATTR_PREFIX) {\n\t\t\tdata = append(data, k[len(XATTR_PREFIX):]...)\n\t\t\tdata = append(data, 0)\n\t\t}\n\t}\n\tif len(dest) < len(data) {\n\t\treturn uint32(len(data)), fuse.ERANGE\n\t}\n\n\tcopy(dest, data)\n\n\treturn uint32(len(data)), fuse.OK\n}\n\n\/\/ RemoveXAttr removes an extended attribute.\nfunc (wfs *WFS) RemoveXAttr(cancel <-chan struct{}, header *fuse.InHeader, attr string) fuse.Status {\n\tif len(attr) == 0 {\n\t\treturn fuse.EINVAL\n\t}\n\tpath, _, entry, status := wfs.maybeReadEntry(header.NodeId)\n\tif status != fuse.OK {\n\t\treturn status\n\t}\n\tif entry.Extended == nil {\n\t\treturn fuse.ENOATTR\n\t}\n\t_, found := entry.Extended[XATTR_PREFIX+attr]\n\n\tif !found {\n\t\treturn fuse.ENOATTR\n\t}\n\n\tdelete(entry.Extended, XATTR_PREFIX+attr)\n\n\treturn wfs.saveEntry(path, entry)\n}\n<commit_msg>mount2: listXattr return ok if xattr is empty<commit_after>package mount\n\nimport (\n\t\"github.com\/hanwen\/go-fuse\/v2\/fuse\"\n\tsys \"golang.org\/x\/sys\/unix\"\n\t\"runtime\"\n\t\"strings\"\n\t\"syscall\"\n)\n\nconst (\n\t\/\/ https:\/\/man7.org\/linux\/man-pages\/man7\/xattr.7.html#:~:text=The%20VFS%20imposes%20limitations%20that,in%20listxattr(2)).\n\tMAX_XATTR_NAME_SIZE = 255\n\tMAX_XATTR_VALUE_SIZE = 65536\n\tXATTR_PREFIX = \"xattr-\" \/\/ same as filer\n)\n\n\/\/ GetXAttr reads an extended attribute, and should return the\n\/\/ number of bytes. If the buffer is too small, return ERANGE,\n\/\/ with the required buffer size.\nfunc (wfs *WFS) GetXAttr(cancel <-chan struct{}, header *fuse.InHeader, attr string, dest []byte) (size uint32, code fuse.Status) {\n\n\t\/\/validate attr name\n\tif len(attr) > MAX_XATTR_NAME_SIZE {\n\t\tif runtime.GOOS == \"darwin\" {\n\t\t\treturn 0, fuse.EPERM\n\t\t} else {\n\t\t\treturn 0, fuse.ERANGE\n\t\t}\n\t}\n\tif len(attr) == 0 {\n\t\treturn 0, fuse.EINVAL\n\t}\n\n\t_, _, entry, status := wfs.maybeReadEntry(header.NodeId)\n\tif status != fuse.OK {\n\t\treturn 0, status\n\t}\n\tif entry == nil {\n\t\treturn 0, fuse.ENOENT\n\t}\n\tif entry.Extended == nil {\n\t\treturn 0, fuse.ENOATTR\n\t}\n\tdata, found := entry.Extended[XATTR_PREFIX+attr]\n\tif !found {\n\t\treturn 0, fuse.ENOATTR\n\t}\n\tif len(dest) < len(data) {\n\t\treturn uint32(len(data)), fuse.ERANGE\n\t}\n\tcopy(dest, data)\n\n\treturn uint32(len(data)), fuse.OK\n}\n\n\/\/ SetXAttr writes an extended attribute.\n\/\/ https:\/\/man7.org\/linux\/man-pages\/man2\/setxattr.2.html\n\/\/ By default (i.e., flags is zero), the extended attribute will be\n\/\/ created if it does not exist, or the value will be replaced if\n\/\/ the attribute already exists. To modify these semantics, one of\n\/\/ the following values can be specified in flags:\n\/\/\n\/\/ XATTR_CREATE\n\/\/ Perform a pure create, which fails if the named attribute\n\/\/ exists already.\n\/\/\n\/\/ XATTR_REPLACE\n\/\/ Perform a pure replace operation, which fails if the named\n\/\/ attribute does not already exist.\nfunc (wfs *WFS) SetXAttr(cancel <-chan struct{}, input *fuse.SetXAttrIn, attr string, data []byte) fuse.Status {\n\t\/\/validate attr name\n\tif len(attr) > MAX_XATTR_NAME_SIZE {\n\t\tif runtime.GOOS == \"darwin\" {\n\t\t\treturn fuse.EPERM\n\t\t} else {\n\t\t\treturn fuse.ERANGE\n\t\t}\n\t}\n\tif len(attr) == 0 {\n\t\treturn fuse.EINVAL\n\t}\n\t\/\/validate attr value\n\tif len(data) > MAX_XATTR_VALUE_SIZE {\n\t\tif runtime.GOOS == \"darwin\" {\n\t\t\treturn fuse.Status(syscall.E2BIG)\n\t\t} else {\n\t\t\treturn fuse.ERANGE\n\t\t}\n\t}\n\n\tpath, _, entry, status := wfs.maybeReadEntry(input.NodeId)\n\tif status != fuse.OK {\n\t\treturn status\n\t}\n\tif entry.Extended == nil {\n\t\tentry.Extended = make(map[string][]byte)\n\t}\n\toldData, _ := entry.Extended[XATTR_PREFIX+attr]\n\tswitch input.Flags {\n\tcase sys.XATTR_CREATE:\n\t\tif len(oldData) > 0 {\n\t\t\tbreak\n\t\t}\n\t\tfallthrough\n\tcase sys.XATTR_REPLACE:\n\t\tfallthrough\n\tdefault:\n\t\tentry.Extended[XATTR_PREFIX+attr] = data\n\t}\n\n\treturn wfs.saveEntry(path, entry)\n\n}\n\n\/\/ ListXAttr lists extended attributes as '\\0' delimited byte\n\/\/ slice, and return the number of bytes. If the buffer is too\n\/\/ small, return ERANGE, with the required buffer size.\nfunc (wfs *WFS) ListXAttr(cancel <-chan struct{}, header *fuse.InHeader, dest []byte) (n uint32, code fuse.Status) {\n\t_, _, entry, status := wfs.maybeReadEntry(header.NodeId)\n\tif status != fuse.OK {\n\t\treturn 0, status\n\t}\n\tif entry == nil {\n\t\treturn 0, fuse.ENOENT\n\t}\n\tif entry.Extended == nil {\n\t\treturn 0, fuse.OK\n\t}\n\n\tvar data []byte\n\tfor k := range entry.Extended {\n\t\tif strings.HasPrefix(k, XATTR_PREFIX) {\n\t\t\tdata = append(data, k[len(XATTR_PREFIX):]...)\n\t\t\tdata = append(data, 0)\n\t\t}\n\t}\n\tif len(dest) < len(data) {\n\t\treturn uint32(len(data)), fuse.ERANGE\n\t}\n\n\tcopy(dest, data)\n\n\treturn uint32(len(data)), fuse.OK\n}\n\n\/\/ RemoveXAttr removes an extended attribute.\nfunc (wfs *WFS) RemoveXAttr(cancel <-chan struct{}, header *fuse.InHeader, attr string) fuse.Status {\n\tif len(attr) == 0 {\n\t\treturn fuse.EINVAL\n\t}\n\tpath, _, entry, status := wfs.maybeReadEntry(header.NodeId)\n\tif status != fuse.OK {\n\t\treturn status\n\t}\n\tif entry.Extended == nil {\n\t\treturn fuse.ENOATTR\n\t}\n\t_, found := entry.Extended[XATTR_PREFIX+attr]\n\n\tif !found {\n\t\treturn fuse.ENOATTR\n\t}\n\n\tdelete(entry.Extended, XATTR_PREFIX+attr)\n\n\treturn wfs.saveEntry(path, entry)\n}\n<|endoftext|>"} {"text":"<commit_before>package imageunpacker\n\nconst (\n\tStatusStreamIdle = 0\n\tStatusStreamScanning = 1\n\tStatusStreamScanned = 2\n\tStatusStreamFetching = 3\n\tStatusStreamUpdating = 4\n\tStatusStreamPreparing = 5\n)\n\ntype DeviceInfo struct {\n\tDeviceName string\n\tSize uint64\n\tStreamName string\n}\n\n\/\/ The AddDevice() RPC is an exclusive transaction following this sequence:\n\/\/ - Server sends string \"\\n\" if Client should proceed with attaching a device\n\/\/ to the server, else it sends a string indicating an error\n\/\/ - Client sends string containing the DeviceID that was just attached\n\/\/ - Server sends \"\\n\" if device was found, else an error message.\n\/\/ - End of transaction. Method completes.\n\ntype GetStatusRequest struct{}\n\ntype GetStatusResponse struct {\n\tDevices map[string]DeviceInfo \/\/ Key: DeviceId.\n\tImageStreams map[string]ImageStreamInfo \/\/ Key: StreamName.\n}\n\ntype ImageStreamInfo struct {\n\tDeviceId string\n\tStatus StreamStatus\n}\n\ntype PrepareForCaptureRequest struct {\n\tStreamName string\n}\n\ntype PrepareForCaptureResponse struct{}\n\ntype PrepareForUnpackRequest struct {\n\tStreamName string\n}\n\ntype PrepareForUnpackResponse struct{}\n\ntype StreamStatus uint\n\nfunc (status StreamStatus) String() string {\n\treturn status.string()\n}\n\ntype UnpackImageRequest struct {\n\tStreamName string\n\tImageLeafName string\n}\n\ntype UnpackImageResponse struct{}\n<commit_msg>Add ImageUnpacker.AssociateStreamWithDevice SRPC protocol messages.<commit_after>package imageunpacker\n\nconst (\n\tStatusStreamIdle = 0\n\tStatusStreamScanning = 1\n\tStatusStreamScanned = 2\n\tStatusStreamFetching = 3\n\tStatusStreamUpdating = 4\n\tStatusStreamPreparing = 5\n)\n\ntype DeviceInfo struct {\n\tDeviceName string\n\tSize uint64\n\tStreamName string\n}\n\n\/\/ The AddDevice() RPC is an exclusive transaction following this sequence:\n\/\/ - Server sends string \"\\n\" if Client should proceed with attaching a device\n\/\/ to the server, else it sends a string indicating an error\n\/\/ - Client sends string containing the DeviceID that was just attached\n\/\/ - Server sends \"\\n\" if device was found, else an error message.\n\/\/ - End of transaction. Method completes.\n\ntype AssociateStreamWithDeviceRequest struct {\n\tStreamName string\n\tDeviceId string\n}\n\ntype AssociateStreamWithDeviceResponse struct{}\n\ntype GetStatusRequest struct{}\n\ntype GetStatusResponse struct {\n\tDevices map[string]DeviceInfo \/\/ Key: DeviceId.\n\tImageStreams map[string]ImageStreamInfo \/\/ Key: StreamName.\n}\n\ntype ImageStreamInfo struct {\n\tDeviceId string\n\tStatus StreamStatus\n}\n\ntype PrepareForCaptureRequest struct {\n\tStreamName string\n}\n\ntype PrepareForCaptureResponse struct{}\n\ntype PrepareForUnpackRequest struct {\n\tStreamName string\n}\n\ntype PrepareForUnpackResponse struct{}\n\ntype StreamStatus uint\n\nfunc (status StreamStatus) String() string {\n\treturn status.string()\n}\n\ntype UnpackImageRequest struct {\n\tStreamName string\n\tImageLeafName string\n}\n\ntype UnpackImageResponse struct{}\n<|endoftext|>"} {"text":"<commit_before>package funscript\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/funjack\/launchcontrol\/protocol\"\n)\n\nconst (\n\t\/\/ SpeedLimitMin is the slowest movement command possible. The Launch\n\t\/\/ crashes on very slow speeds.\n\tSpeedLimitMin = 20\n\t\/\/ SpeedLimitMax is the fasts movement command possible. The Launch\n\t\/\/ makes weird 'clicking' noises when moving at very fast speeds.\n\tSpeedLimitMax = 80\n\t\/\/ PositionMin is the lowest position possible.\n\tPositionMin = 5\n\t\/\/ PositionMax is the hight position possible.\n\tPositionMax = 95\n\t\/\/ Threshold is the minimum amount of time between actions.\n\tThreshold = 100 * time.Millisecond\n)\n\n\/\/ Script is the Funscript container type holding Launch data.\ntype Script struct {\n\t\/\/ Version of Launchscript\n\tVersion string\n\t\/\/ Inverted causes up and down movement to be flipped.\n\tInverted bool `json:\"inverted,omitempty\"`\n\t\/\/ Range is the percentage of a full stroke to use.\n\tRange Range `json:\"range,omitempty\"`\n\t\/\/ Actions are the timed moves.\n\tActions []Action\n}\n\n\/\/ Action is a move at a specific time.\ntype Action struct {\n\t\/\/ At time in milliseconds the action should fire.\n\tAt int64\n\t\/\/ Pos is the place in percent to move to.\n\tPos int\n}\n\n\/\/ Range in percent.\ntype Range int\n\n\/\/ Position returns the position for p limited within the range.\nfunc (r Range) Position(p int) int {\n\tif r > 0 {\n\t\treturn int(float64(p) \/ 100 * float64(r))\n\t}\n\treturn p\n}\n\n\/\/ Stats when generated script.\ntype Stats struct {\n\tCount uint64 \/\/ Amount of actions generated.\n\tDistanceTotal uint64 \/\/ Total distance that will be traveled.\n\tSpeedTotal uint64 \/\/ Accumulation of all commands speed param.\n\tSpeedOverrideFast int \/\/ Number of times generated speed was too fast.\n\tSpeedOverrideSlow int \/\/ Number of times generated speed was too slow.\n\tDelayed int \/\/ Number of actions that will be thresholded.\n}\n\n\/\/ String returns a formatted.\nfunc (s Stats) String() string {\n\tvar fastPct, slowPct float64\n\toverrideTotal := s.SpeedOverrideFast + s.SpeedOverrideSlow\n\tif overrideTotal > 0 {\n\t\tfastPct = float64(s.SpeedOverrideFast) \/\n\t\t\tfloat64(overrideTotal) * 100\n\t\tslowPct = float64(s.SpeedOverrideSlow) \/\n\t\t\tfloat64(overrideTotal) * 100\n\t}\n\tvar avgSpeed uint64\n\tif s.Count > 0 {\n\t\tavgSpeed = s.SpeedTotal \/ s.Count\n\t}\n\treturn fmt.Sprintf(\"actions=%d (avgspeed=%d%%), delayed=%d, \"+\n\t\t\"speedoverrides=%d (fast=%.2f%%,slow=%.2f%%)\",\n\t\ts.Count, avgSpeed, s.Delayed, overrideTotal,\n\t\tfastPct, slowPct)\n}\n\n\/\/ TimedActions creates timed Launch actions from the Scripts timed positions.\n\/\/ The minspd\/maxspd arguments are Launch speed limits in percent. The\n\/\/ minpos\/maxpos specify the position limits in percent.\n\/\/ The second return value are statistics on the script generation.\nfunc (fs Script) TimedActions(minspd, maxspd, minpos, maxpos int) (s protocol.TimedActions, stat Stats) {\n\tif minspd < SpeedLimitMin {\n\t\tminspd = SpeedLimitMin\n\t}\n\tif maxspd > SpeedLimitMax {\n\t\tmaxspd = SpeedLimitMax\n\t}\n\tif minpos < PositionMin {\n\t\tminpos = PositionMin\n\t}\n\tif maxpos > PositionMax {\n\t\tmaxpos = PositionMax\n\t}\n\tr := Range(maxpos - minpos)\n\tif fs.Range != 0 && r > fs.Range {\n\t\tr = fs.Range\n\t}\n\n\ts = make(protocol.TimedActions, 1, len(fs.Actions)+1)\n\ts[0].Time = 0\n\tif fs.Inverted {\n\t\ts[0].Position = maxpos \/\/ Init at top\n\t} else {\n\n\t\ts[0].Position = minpos \/\/ Init at bottom\n\t}\n\ts[0].Speed = SpeedLimitMin\n\n\tpreviousPosition := s[0].Position\n\tprevious := Action{\n\t\tAt: 0,\n\t\tPos: 0,\n\t}\n\tfor _, e := range fs.Actions {\n\t\tif e.Pos == previous.Pos {\n\t\t\tprevious = e\n\t\t\tcontinue\n\t\t}\n\t\ttimediff := time.Duration(e.At-previous.At) * time.Millisecond\n\t\tif timediff < Threshold {\n\t\t\tstat.Delayed++\n\t\t}\n\t\tposition := e.Pos\n\t\tif fs.Inverted {\n\t\t\tposition = 100 - e.Pos\n\t\t}\n\t\tposition = r.Position(position) + minpos\n\t\tdistance := position - previousPosition\n\t\tif distance < 0 {\n\t\t\tdistance = -distance\n\t\t}\n\t\tstat.DistanceTotal = stat.DistanceTotal + uint64(distance)\n\t\tspeed := Speed(distance, timediff)\n\t\tif speed > maxspd {\n\t\t\tspeed = maxspd\n\t\t\tstat.SpeedOverrideFast++\n\t\t} else if speed < minspd {\n\t\t\tspeed = minspd\n\t\t\tstat.SpeedOverrideSlow++\n\t\t}\n\t\tstat.SpeedTotal = stat.SpeedTotal + uint64(speed)\n\t\tta := protocol.TimedAction{\n\t\t\tTime: time.Duration(previous.At) * time.Millisecond,\n\t\t\tAction: protocol.Action{\n\t\t\t\tPosition: position,\n\t\t\t\tSpeed: speed,\n\t\t\t},\n\t\t}\n\t\ts = append(s, ta)\n\t\tstat.Count++\n\t\tprevious = e\n\t\tpreviousPosition = ta.Position\n\t}\n\treturn\n}\n<commit_msg>Include json tags on funscript structs<commit_after>package funscript\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/funjack\/launchcontrol\/protocol\"\n)\n\nconst (\n\t\/\/ SpeedLimitMin is the slowest movement command possible. The Launch\n\t\/\/ crashes on very slow speeds.\n\tSpeedLimitMin = 20\n\t\/\/ SpeedLimitMax is the fasts movement command possible. The Launch\n\t\/\/ makes weird 'clicking' noises when moving at very fast speeds.\n\tSpeedLimitMax = 80\n\t\/\/ PositionMin is the lowest position possible.\n\tPositionMin = 5\n\t\/\/ PositionMax is the hight position possible.\n\tPositionMax = 95\n\t\/\/ Threshold is the minimum amount of time between actions.\n\tThreshold = 100 * time.Millisecond\n)\n\n\/\/ Script is the Funscript container type holding Launch data.\ntype Script struct {\n\t\/\/ Version of Launchscript\n\tVersion string `json:\"version\"`\n\t\/\/ Inverted causes up and down movement to be flipped.\n\tInverted bool `json:\"inverted,omitempty\"`\n\t\/\/ Range is the percentage of a full stroke to use.\n\tRange Range `json:\"range,omitempty\"`\n\t\/\/ Actions are the timed moves.\n\tActions []Action `json:\"actions\"`\n}\n\n\/\/ Action is a move at a specific time.\ntype Action struct {\n\t\/\/ At time in milliseconds the action should fire.\n\tAt int64 `json:\"at\"`\n\t\/\/ Pos is the place in percent to move to.\n\tPos int `json:\"pos\"`\n}\n\n\/\/ Range in percent.\ntype Range int\n\n\/\/ Position returns the position for p limited within the range.\nfunc (r Range) Position(p int) int {\n\tif r > 0 {\n\t\treturn int(float64(p) \/ 100 * float64(r))\n\t}\n\treturn p\n}\n\n\/\/ Stats when generated script.\ntype Stats struct {\n\tCount uint64 \/\/ Amount of actions generated.\n\tDistanceTotal uint64 \/\/ Total distance that will be traveled.\n\tSpeedTotal uint64 \/\/ Accumulation of all commands speed param.\n\tSpeedOverrideFast int \/\/ Number of times generated speed was too fast.\n\tSpeedOverrideSlow int \/\/ Number of times generated speed was too slow.\n\tDelayed int \/\/ Number of actions that will be thresholded.\n}\n\n\/\/ String returns a formatted.\nfunc (s Stats) String() string {\n\tvar fastPct, slowPct float64\n\toverrideTotal := s.SpeedOverrideFast + s.SpeedOverrideSlow\n\tif overrideTotal > 0 {\n\t\tfastPct = float64(s.SpeedOverrideFast) \/\n\t\t\tfloat64(overrideTotal) * 100\n\t\tslowPct = float64(s.SpeedOverrideSlow) \/\n\t\t\tfloat64(overrideTotal) * 100\n\t}\n\tvar avgSpeed uint64\n\tif s.Count > 0 {\n\t\tavgSpeed = s.SpeedTotal \/ s.Count\n\t}\n\treturn fmt.Sprintf(\"actions=%d (avgspeed=%d%%), delayed=%d, \"+\n\t\t\"speedoverrides=%d (fast=%.2f%%,slow=%.2f%%)\",\n\t\ts.Count, avgSpeed, s.Delayed, overrideTotal,\n\t\tfastPct, slowPct)\n}\n\n\/\/ TimedActions creates timed Launch actions from the Scripts timed positions.\n\/\/ The minspd\/maxspd arguments are Launch speed limits in percent. The\n\/\/ minpos\/maxpos specify the position limits in percent.\n\/\/ The second return value are statistics on the script generation.\nfunc (fs Script) TimedActions(minspd, maxspd, minpos, maxpos int) (s protocol.TimedActions, stat Stats) {\n\tif minspd < SpeedLimitMin {\n\t\tminspd = SpeedLimitMin\n\t}\n\tif maxspd > SpeedLimitMax {\n\t\tmaxspd = SpeedLimitMax\n\t}\n\tif minpos < PositionMin {\n\t\tminpos = PositionMin\n\t}\n\tif maxpos > PositionMax {\n\t\tmaxpos = PositionMax\n\t}\n\tr := Range(maxpos - minpos)\n\tif fs.Range != 0 && r > fs.Range {\n\t\tr = fs.Range\n\t}\n\n\ts = make(protocol.TimedActions, 1, len(fs.Actions)+1)\n\ts[0].Time = 0\n\tif fs.Inverted {\n\t\ts[0].Position = maxpos \/\/ Init at top\n\t} else {\n\n\t\ts[0].Position = minpos \/\/ Init at bottom\n\t}\n\ts[0].Speed = SpeedLimitMin\n\n\tpreviousPosition := s[0].Position\n\tprevious := Action{\n\t\tAt: 0,\n\t\tPos: 0,\n\t}\n\tfor _, e := range fs.Actions {\n\t\tif e.Pos == previous.Pos {\n\t\t\tprevious = e\n\t\t\tcontinue\n\t\t}\n\t\ttimediff := time.Duration(e.At-previous.At) * time.Millisecond\n\t\tif timediff < Threshold {\n\t\t\tstat.Delayed++\n\t\t}\n\t\tposition := e.Pos\n\t\tif fs.Inverted {\n\t\t\tposition = 100 - e.Pos\n\t\t}\n\t\tposition = r.Position(position) + minpos\n\t\tdistance := position - previousPosition\n\t\tif distance < 0 {\n\t\t\tdistance = -distance\n\t\t}\n\t\tstat.DistanceTotal = stat.DistanceTotal + uint64(distance)\n\t\tspeed := Speed(distance, timediff)\n\t\tif speed > maxspd {\n\t\t\tspeed = maxspd\n\t\t\tstat.SpeedOverrideFast++\n\t\t} else if speed < minspd {\n\t\t\tspeed = minspd\n\t\t\tstat.SpeedOverrideSlow++\n\t\t}\n\t\tstat.SpeedTotal = stat.SpeedTotal + uint64(speed)\n\t\tta := protocol.TimedAction{\n\t\t\tTime: time.Duration(previous.At) * time.Millisecond,\n\t\t\tAction: protocol.Action{\n\t\t\t\tPosition: position,\n\t\t\t\tSpeed: speed,\n\t\t\t},\n\t\t}\n\t\ts = append(s, ta)\n\t\tstat.Count++\n\t\tprevious = e\n\t\tpreviousPosition = ta.Position\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Terraformer Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage aws\n\nimport (\n\t\"context\"\n\t\"github.com\/GoogleCloudPlatform\/terraformer\/terraform_utils\"\n\t\"github.com\/aws\/aws-sdk-go-v2\/aws\"\n\t\"github.com\/aws\/aws-sdk-go-v2\/service\/cloudformation\"\n\t\"regexp\"\n)\n\nvar cloudFormationAllowEmptyValues = []string{\"tags.\"}\n\ntype CloudFormationGenerator struct {\n\tAWSService\n}\n\nfunc (g *CloudFormationGenerator) InitResources() error {\n\tconfig, e := g.generateConfig()\n\tif e != nil {\n\t\treturn e\n\t}\n\tsvc := cloudformation.New(config)\n\tp := cloudformation.NewListStacksPaginator(svc.ListStacksRequest(&cloudformation.ListStacksInput{}))\n\tfor p.Next(context.Background()) {\n\t\tfor _, stackSummary := range p.CurrentPage().StackSummaries {\n\t\t\tif stackSummary.StackStatus == cloudformation.StackStatusDeleteComplete {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tg.Resources = append(g.Resources, terraform_utils.NewSimpleResource(\n\t\t\t\t*stackSummary.StackName,\n\t\t\t\t*stackSummary.StackName,\n\t\t\t\t\"aws_cloudformation_stack\",\n\t\t\t\t\"aws\",\n\t\t\t\tcloudFormationAllowEmptyValues,\n\t\t\t))\n\t\t}\n\t}\n\tif err := p.Err(); err != nil {\n\t\treturn err\n\t}\n\tstackSets, err := svc.ListStackSetsRequest(&cloudformation.ListStackSetsInput{}).Send(context.Background())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, stackSetSummary := range stackSets.Summaries {\n\t\tg.Resources = append(g.Resources, terraform_utils.NewSimpleResource(\n\t\t\t*stackSetSummary.StackSetName,\n\t\t\t*stackSetSummary.StackSetName,\n\t\t\t\"aws_cloudformation_stack_set\",\n\t\t\t\"aws\",\n\t\t\tcloudFormationAllowEmptyValues,\n\t\t))\n\n\t\tstackSetInstances, err := svc.ListStackInstancesRequest(&cloudformation.ListStackInstancesInput{\n\t\t\tStackSetName: stackSetSummary.StackSetName,\n\t\t}).Send(context.Background())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, stackSetI := range stackSetInstances.Summaries {\n\t\t\tid := aws.StringValue(stackSetI.StackSetId) + \",\" + aws.StringValue(stackSetI.Account) + \",\" + aws.StringValue(stackSetI.Region)\n\n\t\t\tg.Resources = append(g.Resources, terraform_utils.NewSimpleResource(\n\t\t\t\tid,\n\t\t\t\tid,\n\t\t\t\t\"aws_cloudformation_stack_set_instance\",\n\t\t\t\t\"aws\",\n\t\t\t\tcloudFormationAllowEmptyValues,\n\t\t\t))\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (g *CloudFormationGenerator) PostConvertHook() error {\n\tcfInterpolation := regexp.MustCompile(`(\\${[0-9A-Za-z:]+})`)\n\tfor _, resource := range g.Resources {\n\t\tif resource.InstanceInfo.Type == \"aws_cloudformation_stack\" {\n\t\t\tdelete(resource.Item, \"outputs\")\n\t\t\tif templateBody, ok := resource.InstanceState.Attributes[\"template_body\"]; ok {\n\t\t\t\tstr := cfInterpolation.ReplaceAllString(templateBody, \"$$$1\")\n\t\t\t\tresource.Item[\"template_body\"] = str\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>#423 Bring back reference to CF resource by its identifier<commit_after>\/\/ Copyright 2019 The Terraformer Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage aws\n\nimport (\n\t\"context\"\n\t\"github.com\/GoogleCloudPlatform\/terraformer\/terraform_utils\"\n\t\"github.com\/aws\/aws-sdk-go-v2\/aws\"\n\t\"github.com\/aws\/aws-sdk-go-v2\/service\/cloudformation\"\n\t\"regexp\"\n)\n\nvar cloudFormationAllowEmptyValues = []string{\"tags.\"}\n\ntype CloudFormationGenerator struct {\n\tAWSService\n}\n\nfunc (g *CloudFormationGenerator) InitResources() error {\n\tconfig, e := g.generateConfig()\n\tif e != nil {\n\t\treturn e\n\t}\n\tsvc := cloudformation.New(config)\n\tp := cloudformation.NewListStacksPaginator(svc.ListStacksRequest(&cloudformation.ListStacksInput{}))\n\tfor p.Next(context.Background()) {\n\t\tfor _, stackSummary := range p.CurrentPage().StackSummaries {\n\t\t\tif stackSummary.StackStatus == cloudformation.StackStatusDeleteComplete {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tg.Resources = append(g.Resources, terraform_utils.NewSimpleResource(\n\t\t\t\t*stackSummary.StackId,\n\t\t\t\t*stackSummary.StackName,\n\t\t\t\t\"aws_cloudformation_stack\",\n\t\t\t\t\"aws\",\n\t\t\t\tcloudFormationAllowEmptyValues,\n\t\t\t))\n\t\t}\n\t}\n\tif err := p.Err(); err != nil {\n\t\treturn err\n\t}\n\tstackSets, err := svc.ListStackSetsRequest(&cloudformation.ListStackSetsInput{}).Send(context.Background())\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, stackSetSummary := range stackSets.Summaries {\n\t\tif stackSetSummary.Status == cloudformation.StackSetStatusDeleted {\n\t\t\tcontinue\n\t\t}\n\t\tg.Resources = append(g.Resources, terraform_utils.NewSimpleResource(\n\t\t\t*stackSetSummary.StackSetId,\n\t\t\t*stackSetSummary.StackSetName,\n\t\t\t\"aws_cloudformation_stack_set\",\n\t\t\t\"aws\",\n\t\t\tcloudFormationAllowEmptyValues,\n\t\t))\n\n\t\tstackSetInstances, err := svc.ListStackInstancesRequest(&cloudformation.ListStackInstancesInput{\n\t\t\tStackSetName: stackSetSummary.StackSetName,\n\t\t}).Send(context.Background())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, stackSetI := range stackSetInstances.Summaries {\n\t\t\tid := aws.StringValue(stackSetI.StackSetId) + \",\" + aws.StringValue(stackSetI.Account) + \",\" + aws.StringValue(stackSetI.Region)\n\n\t\t\tg.Resources = append(g.Resources, terraform_utils.NewSimpleResource(\n\t\t\t\tid,\n\t\t\t\tid,\n\t\t\t\t\"aws_cloudformation_stack_set_instance\",\n\t\t\t\t\"aws\",\n\t\t\t\tcloudFormationAllowEmptyValues,\n\t\t\t))\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (g *CloudFormationGenerator) PostConvertHook() error {\n\tcfInterpolation := regexp.MustCompile(`(\\${[0-9A-Za-z:]+})`)\n\tfor _, resource := range g.Resources {\n\t\tif resource.InstanceInfo.Type == \"aws_cloudformation_stack\" {\n\t\t\tdelete(resource.Item, \"outputs\")\n\t\t\tif templateBody, ok := resource.InstanceState.Attributes[\"template_body\"]; ok {\n\t\t\t\tstr := cfInterpolation.ReplaceAllString(templateBody, \"$$$1\")\n\t\t\t\tresource.Item[\"template_body\"] = str\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package bundled\n\nconst epmElv = `use re\n\n# Verbosity configuration\ndebug-mode = $false\n\n# Configuration for common domains\n-default-domain-config = [\n &\"github.com\"= [\n &method= git\n &protocol= https\n &levels= 2\n ]\n &\"bitbucket.org\"= [\n &method= git\n &protocol= https\n &levels= 2\n ]\n &\"gitlab.com\"= [\n &method= git\n &protocol= https\n &levels= 2\n ]\n]\n\n# Internal configuration\n-data-dir = ~\/.elvish\n-lib-dir = $-data-dir\/lib\n\n# General utility functions\n\nfn -debug [text]{\n if $debug-mode {\n print (styled '=> ' blue)\n echo $text\n }\n}\n\nfn -info [text]{\n print (styled '=> ' green)\n echo $text\n}\n\nfn -warn [text]{\n print (styled '=> ' yellow)\n echo $text\n}\n\nfn -error [text]{\n print (styled '=> ' red)\n echo $text\n}\n\nfn dest [pkg]{\n put $-lib-dir\/$pkg\n}\n\nfn is-installed [pkg]{\n bool ?(test -e (dest $pkg))\n}\n\nfn -package-domain [pkg]{\n splits &max=2 \/ $pkg | take 1\n}\n\nfn -package-without-domain [pkg]{\n splits &max=2 \/ $pkg | drop 1 | joins ''\n}\n\n# Merge two maps\nfn -merge [a b]{\n keys $b | each [k]{ a[$k] = $b[$k] }\n put $a\n}\n\n# Uppercase first letter of a string\nfn -first-upper [s]{\n put (echo $s[0] | tr '[:lower:]' '[:upper:]')$s[(count $s[0]):]\n}\n\n# Expand tilde at the beginning of a string to the home dir\nfn -tilde-expand [p]{\n re:replace \"^~\" $E:HOME $p\n}\n\n# Known method handlers. Each entry is indexed by method name (the\n# value of the \"method\" key in the domain configs), and must contain\n# two keys: install and upgrade, each one must be a closure that\n# received two arguments: package name and the domain config entry\n#\n# - Method 'git' requires the key 'protocol' in the domain config,\n# which has to be 'http' or 'https'\n# - Method 'rsync' requires the key 'location' in the domain config,\n# which has to contain the directory where the domain files are\n# stored. It can be any source location understood by the rsync\n# command.\n-method-handler = [\n &git= [\n &src= [pkg dom-cfg]{\n put $dom-cfg[protocol]\":\/\/\"$pkg\n }\n\n &install= [pkg dom-cfg]{\n dest = (dest $pkg)\n -info \"Installing \"$pkg\n mkdir -p $dest\n git clone ($-method-handler[git][src] $pkg $dom-cfg) $dest\n }\n\n &upgrade= [pkg dom-cfg]{\n dest = (dest $pkg)\n -info \"Updating \"$pkg\n try {\n git -C $dest pull\n } except _ {\n -error \"Something failed, please check error above and retry.\"\n }\n }\n ]\n\n &rsync= [\n &src= [pkg dom-cfg]{\n put (-tilde-expand $dom-cfg[location])\/(-package-without-domain $pkg)\/\n }\n\n &install= [pkg dom-cfg]{\n dest = (dest $pkg)\n pkgd = (-package-without-domain $pkg)\n -info \"Installing \"$pkg\n rsync -av ($-method-handler[rsync][src] $pkg $dom-cfg) $dest\n }\n\n &upgrade= [pkg dom-cfg]{\n dest = (dest $pkg)\n pkgd = (-package-without-domain $pkg)\n if (not (is-installed $pkg)) {\n -error \"Package \"$pkg\" is not installed.\"\n return\n }\n -info \"Updating \"$pkg\n rsync -av ($-method-handler[rsync][src] $pkg $dom-cfg) $dest\n }\n ]\n]\n\n# Return the filename of the domain config file for the given domain\n# (regardless of whether it exists)\nfn -domain-config-file [dom]{\n put $-lib-dir\/$dom\/epm-domain.cfg\n}\n\n# Return the filename of the metadata file for the given package\n# (regardless of whether it exists)\nfn -package-metadata-file [pkg]{\n put (dest $pkg)\/metadata.json\n}\n\nfn -write-domain-config [dom]{\n cfgfile = (-domain-config-file $dom)\n mkdir -p (dirname $cfgfile)\n if (has-key $-default-domain-config $dom) {\n put $-default-domain-config[$dom] | to-json > $cfgfile\n } else {\n -error \"No default config exists for domain \"$dom\".\"\n }\n}\n\n# Returns the domain config for a given domain, as a JSON data\n# structure. If the file does not exist but we have a built-in\n# definition, then we return the default. Otherwise we return $false,\n# so the result can always be checked with 'if'.\nfn -domain-config [dom]{\n cfgfile = (-domain-config-file $dom)\n cfg = $false\n if ?(test -f $cfgfile) {\n # If the config file exists, read it...\n cfg = (cat $cfgfile | from-json)\n -debug \"Read domain config for \"$dom\": \"(to-string $cfg)\n } else {\n # ...otherwise check if we have a default config for the domain, and save it\n if (has-key $-default-domain-config $dom) {\n cfg = $-default-domain-config[$dom]\n -debug \"No existing config for \"$dom\", using the default: \"(to-string $cfg)\n } else {\n -debug \"No existing config for \"$dom\" and no default available.\"\n }\n }\n put $cfg\n}\n\n\n# Return the method by which a package is installed\nfn -package-method [pkg]{\n dom = (-package-domain $pkg)\n cfg = (-domain-config $dom)\n if $cfg {\n put $cfg[method]\n } else {\n put $false\n }\n}\n\n# Invoke package operations defined in $-method-handler above\nfn -package-op [pkg what]{\n dom = (-package-domain $pkg)\n cfg = (-domain-config $dom)\n if $cfg {\n method = $cfg[method]\n if (has-key $-method-handler $method) {\n if (has-key $-method-handler[$method] $what) {\n $-method-handler[$method][$what] $pkg $cfg\n } else {\n fail \"Unknown operation '\"$what\"' for package \"$pkg\n }\n } else {\n fail \"Unknown method '\"$method\"', specified in in config file \"(-domain-config-file $dom)\n }\n } else {\n -error \"No config for domain '\"$dom\"'.\"\n }\n}\n\n# Uninstall a single package by removing its directory\nfn -uninstall-package [pkg]{\n if (not (is-installed $pkg)) {\n -error \"Package \"$pkg\" is not installed.\"\n return\n }\n dest = (dest $pkg)\n -info \"Removing package \"$pkg\n rm -rf $dest\n}\n\n######################################################################\n# Main user-facing functions\n\n# Read and parse the package metadata, if it exists\nfn metadata [pkg]{\n # Base metadata attributes\n res = [\n &name= $pkg\n &method= (-package-method $pkg)\n &src= (-package-op $pkg src)\n &dst= (dest $pkg)\n &installed= (is-installed $pkg)\n ]\n # Merge with package-specified attributes, if any\n file = (-package-metadata-file $pkg)\n if (and (is-installed $pkg) ?(test -f $file)) {\n res = (-merge (cat $file | from-json) $res)\n }\n put $res\n}\n\n# Print out information about a package\nfn query [pkg]{\n data = (metadata $pkg)\n special-keys = [name method installed src dst]\n echo (styled \"Package \"$data[name] cyan)\n if $data[installed] {\n echo (styled \"Installed at \"$data[dst] green)\n } else {\n echo (styled \"Not installed\" red)\n }\n echo (styled \"Source:\" blue) $data[method] $data[src]\n keys $data | each [key]{\n if (not (has-value $special-keys $key)) {\n val = $data[$key]\n if (eq (kind-of $val) list) {\n val = (joins \", \" $val)\n }\n echo (styled (-first-upper $key)\":\" blue) $val\n }\n }\n}\n\n# List installed packages\nfn installed {\n put $-lib-dir\/*[nomatch-ok] | each [dir]{\n dom = (replaces $-lib-dir\/ '' $dir)\n cfg = (-domain-config $dom)\n # Only list domains for which we know the config, so that the user\n # can have his own non-package directories under ~\/.elvish\/lib\n # without conflicts.\n if $cfg {\n lvl = $cfg[levels]\n pat = '^\\Q'$-lib-dir'\/\\E('(repeat (+ $lvl 1) '[^\/]+' | joins '\/')')\/$'\n put (each [d]{ re:find $pat $d } [ $-lib-dir\/$dom\/**[nomatch-ok]\/ ] )[groups][1][text]\n }\n }\n}\n\n# epm:list is an alias for epm:installed\nfn list { installed }\n\n# Install and upgrade are method-specific, so we call the\n# corresponding functions using -package-op\nfn install [&silent-if-installed=$false @pkgs]{\n if (eq $pkgs []) {\n -error \"You must specify at least one package.\"\n return\n }\n for pkg $pkgs {\n if (is-installed $pkg) {\n if (not $silent-if-installed) {\n -info \"Package \"$pkg\" is already installed.\"\n }\n } else {\n -package-op $pkg install\n # Check if there are any dependencies to install\n metadata = (metadata $pkg)\n if (has-key $metadata dependencies) {\n deps = $metadata[dependencies]\n -info \"Installing dependencies: \"(joins \" \" $deps)\n # If the installation of dependencies fails, uninstall the\n # target package (leave any already-installed dependencies in\n # place)\n try {\n install $@deps\n } except e {\n -error \"Dependency installation failed. Uninstalling \"$pkg\", please check the errors above and try again.\"\n -uninstall-package $pkg\n }\n }\n }\n }\n}\n\nfn upgrade [@pkgs]{\n if (eq $pkgs []) {\n pkgs = [(installed)]\n -info 'Upgrading all installed packages'\n }\n for pkg $pkgs {\n if (not (is-installed $pkg)) {\n -error \"Package \"$pkg\" is not installed.\"\n } else {\n -package-op $pkg upgrade\n }\n }\n}\n\n# Uninstall is the same for everyone, just remove the directory\nfn uninstall [@pkgs]{\n if (eq $pkgs []) {\n -error 'You must specify at least one package.'\n return\n }\n for pkg $pkgs {\n -uninstall-package $pkg\n }\n}`\n<commit_msg>Fix typo in comments: received -> receives (#839)<commit_after>package bundled\n\nconst epmElv = `use re\n\n# Verbosity configuration\ndebug-mode = $false\n\n# Configuration for common domains\n-default-domain-config = [\n &\"github.com\"= [\n &method= git\n &protocol= https\n &levels= 2\n ]\n &\"bitbucket.org\"= [\n &method= git\n &protocol= https\n &levels= 2\n ]\n &\"gitlab.com\"= [\n &method= git\n &protocol= https\n &levels= 2\n ]\n]\n\n# Internal configuration\n-data-dir = ~\/.elvish\n-lib-dir = $-data-dir\/lib\n\n# General utility functions\n\nfn -debug [text]{\n if $debug-mode {\n print (styled '=> ' blue)\n echo $text\n }\n}\n\nfn -info [text]{\n print (styled '=> ' green)\n echo $text\n}\n\nfn -warn [text]{\n print (styled '=> ' yellow)\n echo $text\n}\n\nfn -error [text]{\n print (styled '=> ' red)\n echo $text\n}\n\nfn dest [pkg]{\n put $-lib-dir\/$pkg\n}\n\nfn is-installed [pkg]{\n bool ?(test -e (dest $pkg))\n}\n\nfn -package-domain [pkg]{\n splits &max=2 \/ $pkg | take 1\n}\n\nfn -package-without-domain [pkg]{\n splits &max=2 \/ $pkg | drop 1 | joins ''\n}\n\n# Merge two maps\nfn -merge [a b]{\n keys $b | each [k]{ a[$k] = $b[$k] }\n put $a\n}\n\n# Uppercase first letter of a string\nfn -first-upper [s]{\n put (echo $s[0] | tr '[:lower:]' '[:upper:]')$s[(count $s[0]):]\n}\n\n# Expand tilde at the beginning of a string to the home dir\nfn -tilde-expand [p]{\n re:replace \"^~\" $E:HOME $p\n}\n\n# Known method handlers. Each entry is indexed by method name (the\n# value of the \"method\" key in the domain configs), and must contain\n# two keys: install and upgrade, each one must be a closure that\n# receives two arguments: package name and the domain config entry\n#\n# - Method 'git' requires the key 'protocol' in the domain config,\n# which has to be 'http' or 'https'\n# - Method 'rsync' requires the key 'location' in the domain config,\n# which has to contain the directory where the domain files are\n# stored. It can be any source location understood by the rsync\n# command.\n-method-handler = [\n &git= [\n &src= [pkg dom-cfg]{\n put $dom-cfg[protocol]\":\/\/\"$pkg\n }\n\n &install= [pkg dom-cfg]{\n dest = (dest $pkg)\n -info \"Installing \"$pkg\n mkdir -p $dest\n git clone ($-method-handler[git][src] $pkg $dom-cfg) $dest\n }\n\n &upgrade= [pkg dom-cfg]{\n dest = (dest $pkg)\n -info \"Updating \"$pkg\n try {\n git -C $dest pull\n } except _ {\n -error \"Something failed, please check error above and retry.\"\n }\n }\n ]\n\n &rsync= [\n &src= [pkg dom-cfg]{\n put (-tilde-expand $dom-cfg[location])\/(-package-without-domain $pkg)\/\n }\n\n &install= [pkg dom-cfg]{\n dest = (dest $pkg)\n pkgd = (-package-without-domain $pkg)\n -info \"Installing \"$pkg\n rsync -av ($-method-handler[rsync][src] $pkg $dom-cfg) $dest\n }\n\n &upgrade= [pkg dom-cfg]{\n dest = (dest $pkg)\n pkgd = (-package-without-domain $pkg)\n if (not (is-installed $pkg)) {\n -error \"Package \"$pkg\" is not installed.\"\n return\n }\n -info \"Updating \"$pkg\n rsync -av ($-method-handler[rsync][src] $pkg $dom-cfg) $dest\n }\n ]\n]\n\n# Return the filename of the domain config file for the given domain\n# (regardless of whether it exists)\nfn -domain-config-file [dom]{\n put $-lib-dir\/$dom\/epm-domain.cfg\n}\n\n# Return the filename of the metadata file for the given package\n# (regardless of whether it exists)\nfn -package-metadata-file [pkg]{\n put (dest $pkg)\/metadata.json\n}\n\nfn -write-domain-config [dom]{\n cfgfile = (-domain-config-file $dom)\n mkdir -p (dirname $cfgfile)\n if (has-key $-default-domain-config $dom) {\n put $-default-domain-config[$dom] | to-json > $cfgfile\n } else {\n -error \"No default config exists for domain \"$dom\".\"\n }\n}\n\n# Returns the domain config for a given domain, as a JSON data\n# structure. If the file does not exist but we have a built-in\n# definition, then we return the default. Otherwise we return $false,\n# so the result can always be checked with 'if'.\nfn -domain-config [dom]{\n cfgfile = (-domain-config-file $dom)\n cfg = $false\n if ?(test -f $cfgfile) {\n # If the config file exists, read it...\n cfg = (cat $cfgfile | from-json)\n -debug \"Read domain config for \"$dom\": \"(to-string $cfg)\n } else {\n # ...otherwise check if we have a default config for the domain, and save it\n if (has-key $-default-domain-config $dom) {\n cfg = $-default-domain-config[$dom]\n -debug \"No existing config for \"$dom\", using the default: \"(to-string $cfg)\n } else {\n -debug \"No existing config for \"$dom\" and no default available.\"\n }\n }\n put $cfg\n}\n\n\n# Return the method by which a package is installed\nfn -package-method [pkg]{\n dom = (-package-domain $pkg)\n cfg = (-domain-config $dom)\n if $cfg {\n put $cfg[method]\n } else {\n put $false\n }\n}\n\n# Invoke package operations defined in $-method-handler above\nfn -package-op [pkg what]{\n dom = (-package-domain $pkg)\n cfg = (-domain-config $dom)\n if $cfg {\n method = $cfg[method]\n if (has-key $-method-handler $method) {\n if (has-key $-method-handler[$method] $what) {\n $-method-handler[$method][$what] $pkg $cfg\n } else {\n fail \"Unknown operation '\"$what\"' for package \"$pkg\n }\n } else {\n fail \"Unknown method '\"$method\"', specified in in config file \"(-domain-config-file $dom)\n }\n } else {\n -error \"No config for domain '\"$dom\"'.\"\n }\n}\n\n# Uninstall a single package by removing its directory\nfn -uninstall-package [pkg]{\n if (not (is-installed $pkg)) {\n -error \"Package \"$pkg\" is not installed.\"\n return\n }\n dest = (dest $pkg)\n -info \"Removing package \"$pkg\n rm -rf $dest\n}\n\n######################################################################\n# Main user-facing functions\n\n# Read and parse the package metadata, if it exists\nfn metadata [pkg]{\n # Base metadata attributes\n res = [\n &name= $pkg\n &method= (-package-method $pkg)\n &src= (-package-op $pkg src)\n &dst= (dest $pkg)\n &installed= (is-installed $pkg)\n ]\n # Merge with package-specified attributes, if any\n file = (-package-metadata-file $pkg)\n if (and (is-installed $pkg) ?(test -f $file)) {\n res = (-merge (cat $file | from-json) $res)\n }\n put $res\n}\n\n# Print out information about a package\nfn query [pkg]{\n data = (metadata $pkg)\n special-keys = [name method installed src dst]\n echo (styled \"Package \"$data[name] cyan)\n if $data[installed] {\n echo (styled \"Installed at \"$data[dst] green)\n } else {\n echo (styled \"Not installed\" red)\n }\n echo (styled \"Source:\" blue) $data[method] $data[src]\n keys $data | each [key]{\n if (not (has-value $special-keys $key)) {\n val = $data[$key]\n if (eq (kind-of $val) list) {\n val = (joins \", \" $val)\n }\n echo (styled (-first-upper $key)\":\" blue) $val\n }\n }\n}\n\n# List installed packages\nfn installed {\n put $-lib-dir\/*[nomatch-ok] | each [dir]{\n dom = (replaces $-lib-dir\/ '' $dir)\n cfg = (-domain-config $dom)\n # Only list domains for which we know the config, so that the user\n # can have his own non-package directories under ~\/.elvish\/lib\n # without conflicts.\n if $cfg {\n lvl = $cfg[levels]\n pat = '^\\Q'$-lib-dir'\/\\E('(repeat (+ $lvl 1) '[^\/]+' | joins '\/')')\/$'\n put (each [d]{ re:find $pat $d } [ $-lib-dir\/$dom\/**[nomatch-ok]\/ ] )[groups][1][text]\n }\n }\n}\n\n# epm:list is an alias for epm:installed\nfn list { installed }\n\n# Install and upgrade are method-specific, so we call the\n# corresponding functions using -package-op\nfn install [&silent-if-installed=$false @pkgs]{\n if (eq $pkgs []) {\n -error \"You must specify at least one package.\"\n return\n }\n for pkg $pkgs {\n if (is-installed $pkg) {\n if (not $silent-if-installed) {\n -info \"Package \"$pkg\" is already installed.\"\n }\n } else {\n -package-op $pkg install\n # Check if there are any dependencies to install\n metadata = (metadata $pkg)\n if (has-key $metadata dependencies) {\n deps = $metadata[dependencies]\n -info \"Installing dependencies: \"(joins \" \" $deps)\n # If the installation of dependencies fails, uninstall the\n # target package (leave any already-installed dependencies in\n # place)\n try {\n install $@deps\n } except e {\n -error \"Dependency installation failed. Uninstalling \"$pkg\", please check the errors above and try again.\"\n -uninstall-package $pkg\n }\n }\n }\n }\n}\n\nfn upgrade [@pkgs]{\n if (eq $pkgs []) {\n pkgs = [(installed)]\n -info 'Upgrading all installed packages'\n }\n for pkg $pkgs {\n if (not (is-installed $pkg)) {\n -error \"Package \"$pkg\" is not installed.\"\n } else {\n -package-op $pkg upgrade\n }\n }\n}\n\n# Uninstall is the same for everyone, just remove the directory\nfn uninstall [@pkgs]{\n if (eq $pkgs []) {\n -error 'You must specify at least one package.'\n return\n }\n for pkg $pkgs {\n -uninstall-package $pkg\n }\n}`\n<|endoftext|>"} {"text":"<commit_before>package algo\n\nimport (\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc assertMatch(t *testing.T, fun func(bool, bool, []rune, []rune) (int, int), caseSensitive bool, forward bool, input string, pattern string, sidx int, eidx int) {\n\tif !caseSensitive {\n\t\tpattern = strings.ToLower(pattern)\n\t}\n\ts, e := fun(caseSensitive, forward, []rune(input), []rune(pattern))\n\tif s != sidx {\n\t\tt.Errorf(\"Invalid start index: %d (expected: %d, %s \/ %s)\", s, sidx, input, pattern)\n\t}\n\tif e != eidx {\n\t\tt.Errorf(\"Invalid end index: %d (expected: %d, %s \/ %s)\", e, eidx, input, pattern)\n\t}\n}\n\nfunc TestFuzzyMatch(t *testing.T) {\n\tassertMatch(t, FuzzyMatch, false, true, \"fooBarbaz\", \"oBZ\", 2, 9)\n\tassertMatch(t, FuzzyMatch, true, true, \"fooBarbaz\", \"oBZ\", -1, -1)\n\tassertMatch(t, FuzzyMatch, true, true, \"fooBarbaz\", \"oBz\", 2, 9)\n\tassertMatch(t, FuzzyMatch, true, true, \"fooBarbaz\", \"fooBarbazz\", -1, -1)\n}\n\nfunc TestFuzzyMatchBackward(t *testing.T) {\n\tassertMatch(t, FuzzyMatch, false, true, \"foobar fb\", \"fb\", 0, 4)\n\tassertMatch(t, FuzzyMatch, false, false, \"foobar fb\", \"fb\", 7, 9)\n}\n\nfunc TestExactMatchNaive(t *testing.T) {\n\tfor _, dir := range []bool{true, false} {\n\t\tassertMatch(t, ExactMatchNaive, false, dir, \"fooBarbaz\", \"oBA\", 2, 5)\n\t\tassertMatch(t, ExactMatchNaive, true, dir, \"fooBarbaz\", \"oBA\", -1, -1)\n\t\tassertMatch(t, ExactMatchNaive, true, dir, \"fooBarbaz\", \"fooBarbazz\", -1, -1)\n\t}\n}\n\nfunc TestExactMatchNaiveBackward(t *testing.T) {\n\tassertMatch(t, FuzzyMatch, false, true, \"foobar foob\", \"oo\", 1, 3)\n\tassertMatch(t, FuzzyMatch, false, false, \"foobar foob\", \"oo\", 8, 10)\n}\n\nfunc TestPrefixMatch(t *testing.T) {\n\tfor _, dir := range []bool{true, false} {\n\t\tassertMatch(t, PrefixMatch, false, dir, \"fooBarbaz\", \"Foo\", 0, 3)\n\t\tassertMatch(t, PrefixMatch, true, dir, \"fooBarbaz\", \"Foo\", -1, -1)\n\t\tassertMatch(t, PrefixMatch, false, dir, \"fooBarbaz\", \"baz\", -1, -1)\n\t}\n}\n\nfunc TestSuffixMatch(t *testing.T) {\n\tfor _, dir := range []bool{true, false} {\n\t\tassertMatch(t, SuffixMatch, false, dir, \"fooBarbaz\", \"Foo\", -1, -1)\n\t\tassertMatch(t, SuffixMatch, false, dir, \"fooBarbaz\", \"baz\", 6, 9)\n\t\tassertMatch(t, SuffixMatch, true, dir, \"fooBarbaz\", \"Baz\", -1, -1)\n\t}\n}\n\nfunc TestEmptyPattern(t *testing.T) {\n\tfor _, dir := range []bool{true, false} {\n\t\tassertMatch(t, FuzzyMatch, true, dir, \"foobar\", \"\", 0, 0)\n\t\tassertMatch(t, ExactMatchNaive, true, dir, \"foobar\", \"\", 0, 0)\n\t\tassertMatch(t, PrefixMatch, true, dir, \"foobar\", \"\", 0, 0)\n\t\tassertMatch(t, SuffixMatch, true, dir, \"foobar\", \"\", 6, 6)\n\t}\n}\n<commit_msg>Fix algorithm tests<commit_after>package algo\n\nimport (\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc assertMatch(t *testing.T, fun func(bool, bool, []rune, []rune) (int, int), caseSensitive bool, forward bool, input string, pattern string, sidx int, eidx int) {\n\tif !caseSensitive {\n\t\tpattern = strings.ToLower(pattern)\n\t}\n\ts, e := fun(caseSensitive, forward, []rune(input), []rune(pattern))\n\tif s != sidx {\n\t\tt.Errorf(\"Invalid start index: %d (expected: %d, %s \/ %s)\", s, sidx, input, pattern)\n\t}\n\tif e != eidx {\n\t\tt.Errorf(\"Invalid end index: %d (expected: %d, %s \/ %s)\", e, eidx, input, pattern)\n\t}\n}\n\nfunc TestFuzzyMatch(t *testing.T) {\n\tassertMatch(t, FuzzyMatch, false, true, \"fooBarbaz\", \"oBZ\", 2, 9)\n\tassertMatch(t, FuzzyMatch, true, true, \"fooBarbaz\", \"oBZ\", -1, -1)\n\tassertMatch(t, FuzzyMatch, true, true, \"fooBarbaz\", \"oBz\", 2, 9)\n\tassertMatch(t, FuzzyMatch, true, true, \"fooBarbaz\", \"fooBarbazz\", -1, -1)\n}\n\nfunc TestFuzzyMatchBackward(t *testing.T) {\n\tassertMatch(t, FuzzyMatch, false, true, \"foobar fb\", \"fb\", 0, 4)\n\tassertMatch(t, FuzzyMatch, false, false, \"foobar fb\", \"fb\", 7, 9)\n}\n\nfunc TestExactMatchNaive(t *testing.T) {\n\tfor _, dir := range []bool{true, false} {\n\t\tassertMatch(t, ExactMatchNaive, false, dir, \"fooBarbaz\", \"oBA\", 2, 5)\n\t\tassertMatch(t, ExactMatchNaive, true, dir, \"fooBarbaz\", \"oBA\", -1, -1)\n\t\tassertMatch(t, ExactMatchNaive, true, dir, \"fooBarbaz\", \"fooBarbazz\", -1, -1)\n\t}\n}\n\nfunc TestExactMatchNaiveBackward(t *testing.T) {\n\tassertMatch(t, ExactMatchNaive, false, true, \"foobar foob\", \"oo\", 1, 3)\n\tassertMatch(t, ExactMatchNaive, false, false, \"foobar foob\", \"oo\", 8, 10)\n}\n\nfunc TestPrefixMatch(t *testing.T) {\n\tfor _, dir := range []bool{true, false} {\n\t\tassertMatch(t, PrefixMatch, false, dir, \"fooBarbaz\", \"Foo\", 0, 3)\n\t\tassertMatch(t, PrefixMatch, true, dir, \"fooBarbaz\", \"Foo\", -1, -1)\n\t\tassertMatch(t, PrefixMatch, false, dir, \"fooBarbaz\", \"baz\", -1, -1)\n\t}\n}\n\nfunc TestSuffixMatch(t *testing.T) {\n\tfor _, dir := range []bool{true, false} {\n\t\tassertMatch(t, SuffixMatch, false, dir, \"fooBarbaz\", \"Foo\", -1, -1)\n\t\tassertMatch(t, SuffixMatch, false, dir, \"fooBarbaz\", \"baz\", 6, 9)\n\t\tassertMatch(t, SuffixMatch, true, dir, \"fooBarbaz\", \"Baz\", -1, -1)\n\t}\n}\n\nfunc TestEmptyPattern(t *testing.T) {\n\tfor _, dir := range []bool{true, false} {\n\t\tassertMatch(t, FuzzyMatch, true, dir, \"foobar\", \"\", 0, 0)\n\t\tassertMatch(t, ExactMatchNaive, true, dir, \"foobar\", \"\", 0, 0)\n\t\tassertMatch(t, PrefixMatch, true, dir, \"foobar\", \"\", 0, 0)\n\t\tassertMatch(t, SuffixMatch, true, dir, \"foobar\", \"\", 6, 6)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ ----------------------------------------------------------------------------\n\/\/\n\/\/ *** AUTO GENERATED CODE *** Type: MMv1 ***\n\/\/\n\/\/ ----------------------------------------------------------------------------\n\/\/\n\/\/ This file is automatically generated by Magic Modules and manual\n\/\/ changes will be clobbered when the file is regenerated.\n\/\/\n\/\/ Please read more about how to change this file in\n\/\/ .github\/CONTRIBUTING.md.\n\/\/\n\/\/ ----------------------------------------------------------------------------\n\npackage google\n\nimport \"reflect\"\n\nconst VertexAIFeaturestoreEntitytypeAssetType string = \"{{region}}-aiplatform.googleapis.com\/FeaturestoreEntitytype\"\n\nfunc resourceConverterVertexAIFeaturestoreEntitytype() ResourceConverter {\n\treturn ResourceConverter{\n\t\tAssetType: VertexAIFeaturestoreEntitytypeAssetType,\n\t\tConvert: GetVertexAIFeaturestoreEntitytypeCaiObject,\n\t}\n}\n\nfunc GetVertexAIFeaturestoreEntitytypeCaiObject(d TerraformResourceData, config *Config) ([]Asset, error) {\n\tname, err := assetName(d, config, \"\/\/{{region}}-aiplatform.googleapis.com\/{{featurestore}}\/entityTypes\/{{name}}\")\n\tif err != nil {\n\t\treturn []Asset{}, err\n\t}\n\tif obj, err := GetVertexAIFeaturestoreEntitytypeApiObject(d, config); err == nil {\n\t\treturn []Asset{{\n\t\t\tName: name,\n\t\t\tType: VertexAIFeaturestoreEntitytypeAssetType,\n\t\t\tResource: &AssetResource{\n\t\t\t\tVersion: \"v1\",\n\t\t\t\tDiscoveryDocumentURI: \"https:\/\/www.googleapis.com\/discovery\/v1\/apis\/{{region}}-aiplatform\/v1\/rest\",\n\t\t\t\tDiscoveryName: \"FeaturestoreEntitytype\",\n\t\t\t\tData: obj,\n\t\t\t},\n\t\t}}, nil\n\t} else {\n\t\treturn []Asset{}, err\n\t}\n}\n\nfunc GetVertexAIFeaturestoreEntitytypeApiObject(d TerraformResourceData, config *Config) (map[string]interface{}, error) {\n\tobj := make(map[string]interface{})\n\tlabelsProp, err := expandVertexAIFeaturestoreEntitytypeLabels(d.Get(\"labels\"), d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if v, ok := d.GetOkExists(\"labels\"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) {\n\t\tobj[\"labels\"] = labelsProp\n\t}\n\tmonitoringConfigProp, err := expandVertexAIFeaturestoreEntitytypeMonitoringConfig(d.Get(\"monitoring_config\"), d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if v, ok := d.GetOkExists(\"monitoring_config\"); !isEmptyValue(reflect.ValueOf(monitoringConfigProp)) && (ok || !reflect.DeepEqual(v, monitoringConfigProp)) {\n\t\tobj[\"monitoringConfig\"] = monitoringConfigProp\n\t}\n\n\treturn obj, nil\n}\n\nfunc expandVertexAIFeaturestoreEntitytypeLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) {\n\tif v == nil {\n\t\treturn map[string]string{}, nil\n\t}\n\tm := make(map[string]string)\n\tfor k, val := range v.(map[string]interface{}) {\n\t\tm[k] = val.(string)\n\t}\n\treturn m, nil\n}\n\nfunc expandVertexAIFeaturestoreEntitytypeMonitoringConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\tl := v.([]interface{})\n\tif len(l) == 0 || l[0] == nil {\n\t\treturn nil, nil\n\t}\n\traw := l[0]\n\toriginal := raw.(map[string]interface{})\n\ttransformed := make(map[string]interface{})\n\n\ttransformedSnapshotAnalysis, err := expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysis(original[\"snapshot_analysis\"], d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if val := reflect.ValueOf(transformedSnapshotAnalysis); val.IsValid() && !isEmptyValue(val) {\n\t\ttransformed[\"snapshotAnalysis\"] = transformedSnapshotAnalysis\n\t}\n\n\treturn transformed, nil\n}\n\nfunc expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysis(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\tl := v.([]interface{})\n\tif len(l) == 0 || l[0] == nil {\n\t\treturn nil, nil\n\t}\n\traw := l[0]\n\toriginal := raw.(map[string]interface{})\n\ttransformed := make(map[string]interface{})\n\n\ttransformedDisabled, err := expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisDisabled(original[\"disabled\"], d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) {\n\t\ttransformed[\"disabled\"] = transformedDisabled\n\t}\n\n\treturn transformed, nil\n}\n\nfunc expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\treturn v, nil\n}\n<commit_msg>fix: added parameter region to google_vertex_ai_featurestore_entitytype (#6588) (#1089)<commit_after>\/\/ ----------------------------------------------------------------------------\n\/\/\n\/\/ *** AUTO GENERATED CODE *** Type: MMv1 ***\n\/\/\n\/\/ ----------------------------------------------------------------------------\n\/\/\n\/\/ This file is automatically generated by Magic Modules and manual\n\/\/ changes will be clobbered when the file is regenerated.\n\/\/\n\/\/ Please read more about how to change this file in\n\/\/ .github\/CONTRIBUTING.md.\n\/\/\n\/\/ ----------------------------------------------------------------------------\n\npackage google\n\nimport (\n\t\"reflect\"\n\t\"regexp\"\n)\n\nconst VertexAIFeaturestoreEntitytypeAssetType string = \"{{region}}-aiplatform.googleapis.com\/FeaturestoreEntitytype\"\n\nfunc resourceConverterVertexAIFeaturestoreEntitytype() ResourceConverter {\n\treturn ResourceConverter{\n\t\tAssetType: VertexAIFeaturestoreEntitytypeAssetType,\n\t\tConvert: GetVertexAIFeaturestoreEntitytypeCaiObject,\n\t}\n}\n\nfunc GetVertexAIFeaturestoreEntitytypeCaiObject(d TerraformResourceData, config *Config) ([]Asset, error) {\n\tname, err := assetName(d, config, \"\/\/{{region}}-aiplatform.googleapis.com\/{{featurestore}}\/entityTypes\/{{name}}\")\n\tif err != nil {\n\t\treturn []Asset{}, err\n\t}\n\tif obj, err := GetVertexAIFeaturestoreEntitytypeApiObject(d, config); err == nil {\n\t\treturn []Asset{{\n\t\t\tName: name,\n\t\t\tType: VertexAIFeaturestoreEntitytypeAssetType,\n\t\t\tResource: &AssetResource{\n\t\t\t\tVersion: \"v1\",\n\t\t\t\tDiscoveryDocumentURI: \"https:\/\/www.googleapis.com\/discovery\/v1\/apis\/{{region}}-aiplatform\/v1\/rest\",\n\t\t\t\tDiscoveryName: \"FeaturestoreEntitytype\",\n\t\t\t\tData: obj,\n\t\t\t},\n\t\t}}, nil\n\t} else {\n\t\treturn []Asset{}, err\n\t}\n}\n\nfunc GetVertexAIFeaturestoreEntitytypeApiObject(d TerraformResourceData, config *Config) (map[string]interface{}, error) {\n\tobj := make(map[string]interface{})\n\tlabelsProp, err := expandVertexAIFeaturestoreEntitytypeLabels(d.Get(\"labels\"), d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if v, ok := d.GetOkExists(\"labels\"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) {\n\t\tobj[\"labels\"] = labelsProp\n\t}\n\tmonitoringConfigProp, err := expandVertexAIFeaturestoreEntitytypeMonitoringConfig(d.Get(\"monitoring_config\"), d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if v, ok := d.GetOkExists(\"monitoring_config\"); !isEmptyValue(reflect.ValueOf(monitoringConfigProp)) && (ok || !reflect.DeepEqual(v, monitoringConfigProp)) {\n\t\tobj[\"monitoringConfig\"] = monitoringConfigProp\n\t}\n\n\treturn resourceVertexAIFeaturestoreEntitytypeEncoder(d, config, obj)\n}\n\nfunc resourceVertexAIFeaturestoreEntitytypeEncoder(d TerraformResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) {\n\tif v, ok := d.GetOk(\"featurestore\"); ok {\n\t\tre := regexp.MustCompile(\"projects\/(.+)\/locations\/(.+)\/featurestores\/(.+)$\")\n\t\tif parts := re.FindStringSubmatch(v.(string)); parts != nil {\n\t\t\td.Set(\"region\", parts[2])\n\t\t}\n\t}\n\n\treturn obj, nil\n}\n\nfunc expandVertexAIFeaturestoreEntitytypeLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) {\n\tif v == nil {\n\t\treturn map[string]string{}, nil\n\t}\n\tm := make(map[string]string)\n\tfor k, val := range v.(map[string]interface{}) {\n\t\tm[k] = val.(string)\n\t}\n\treturn m, nil\n}\n\nfunc expandVertexAIFeaturestoreEntitytypeMonitoringConfig(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\tl := v.([]interface{})\n\tif len(l) == 0 || l[0] == nil {\n\t\treturn nil, nil\n\t}\n\traw := l[0]\n\toriginal := raw.(map[string]interface{})\n\ttransformed := make(map[string]interface{})\n\n\ttransformedSnapshotAnalysis, err := expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysis(original[\"snapshot_analysis\"], d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if val := reflect.ValueOf(transformedSnapshotAnalysis); val.IsValid() && !isEmptyValue(val) {\n\t\ttransformed[\"snapshotAnalysis\"] = transformedSnapshotAnalysis\n\t}\n\n\treturn transformed, nil\n}\n\nfunc expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysis(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\tl := v.([]interface{})\n\tif len(l) == 0 || l[0] == nil {\n\t\treturn nil, nil\n\t}\n\traw := l[0]\n\toriginal := raw.(map[string]interface{})\n\ttransformed := make(map[string]interface{})\n\n\ttransformedDisabled, err := expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisDisabled(original[\"disabled\"], d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if val := reflect.ValueOf(transformedDisabled); val.IsValid() && !isEmptyValue(val) {\n\t\ttransformed[\"disabled\"] = transformedDisabled\n\t}\n\n\treturn transformed, nil\n}\n\nfunc expandVertexAIFeaturestoreEntitytypeMonitoringConfigSnapshotAnalysisDisabled(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\treturn v, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/* This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/. *\/\n\npackage simplepush\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/mux\"\n)\n\nfunc NewEndpointHandler() (h *EndpointHandler) {\n\th = &EndpointHandler{mux: mux.NewRouter()}\n\th.mux.HandleFunc(\"\/update\/{key}\", h.UpdateHandler)\n\treturn h\n}\n\ntype EndpointHandlerConfig struct {\n\tMaxDataLen int `toml:\"max_data_len\" env:\"max_data_len\"`\n\tAlwaysRoute bool `toml:\"always_route\" env:\"always_route\"`\n\tListener TCPListenerConfig\n}\n\ntype EndpointHandler struct {\n\tapp *Application\n\tlogger *SimpleLogger\n\tmetrics Statistician\n\tstore Store\n\trouter Router\n\tpinger PropPinger\n\tbalancer Balancer\n\thostname string\n\ttokenKey []byte\n\tlistener net.Listener\n\tserver *ServeCloser\n\tmux *mux.Router\n\turl string\n\tmaxConns int\n\tmaxDataLen int\n\talwaysRoute bool\n\tcloseOnce Once\n}\n\nfunc (h *EndpointHandler) ConfigStruct() interface{} {\n\treturn &EndpointHandlerConfig{\n\t\tMaxDataLen: 4096,\n\t\tAlwaysRoute: false,\n\t\tListener: TCPListenerConfig{\n\t\t\tAddr: \":8081\",\n\t\t\tMaxConns: 1000,\n\t\t\tKeepAlivePeriod: \"3m\",\n\t\t},\n\t}\n}\n\nfunc (h *EndpointHandler) Init(app *Application, config interface{}) (err error) {\n\tconf := config.(*EndpointHandlerConfig)\n\th.setApp(app)\n\n\tif h.listener, err = conf.Listener.Listen(); err != nil {\n\t\th.logger.Panic(\"handlers_endpoint\", \"Could not attach update listener\",\n\t\t\tLogFields{\"error\": err.Error()})\n\t\treturn err\n\t}\n\n\tvar scheme string\n\tif conf.Listener.UseTLS() {\n\t\tscheme = \"https\"\n\t} else {\n\t\tscheme = \"http\"\n\t}\n\thost, port := HostPort(h.listener, app)\n\th.url = CanonicalURL(scheme, host, port)\n\n\th.maxConns = conf.Listener.MaxConns\n\th.setMaxDataLen(conf.MaxDataLen)\n\th.alwaysRoute = conf.AlwaysRoute\n\n\treturn nil\n}\n\nfunc (h *EndpointHandler) Listener() net.Listener { return h.listener }\nfunc (h *EndpointHandler) MaxConns() int { return h.maxConns }\nfunc (h *EndpointHandler) URL() string { return h.url }\nfunc (h *EndpointHandler) ServeMux() ServeMux { return (*RouteMux)(h.mux) }\n\n\/\/ setApp sets the parent application for this update handler.\nfunc (h *EndpointHandler) setApp(app *Application) {\n\th.app = app\n\th.logger = app.Logger()\n\th.metrics = app.Metrics()\n\th.store = app.Store()\n\th.router = app.Router()\n\th.pinger = app.PropPinger()\n\th.tokenKey = app.TokenKey()\n\th.server = NewServeCloser(&http.Server{\n\t\tConnState: func(c net.Conn, state http.ConnState) {\n\t\t\tif state == http.StateNew {\n\t\t\t\th.metrics.Increment(\"endpoint.socket.connect\")\n\t\t\t} else if state == http.StateClosed {\n\t\t\t\th.metrics.Increment(\"endpoint.socket.disconnect\")\n\t\t\t}\n\t\t},\n\t\tHandler: &LogHandler{h.mux, h.logger},\n\t\tErrorLog: log.New(&LogWriter{\n\t\t\tLogger: h.logger,\n\t\t\tName: \"handlers_endpoint\",\n\t\t\tLevel: ERROR,\n\t\t}, \"\", 0),\n\t})\n}\n\n\/\/ setMaxDataLen sets the maximum data length to v\nfunc (h *EndpointHandler) setMaxDataLen(v int) {\n\th.maxDataLen = v\n}\n\nfunc (h *EndpointHandler) Start(errChan chan<- error) {\n\tif h.logger.ShouldLog(INFO) {\n\t\th.logger.Info(\"handlers_endpoint\", \"Starting update server\",\n\t\t\tLogFields{\"url\": h.url})\n\t}\n\terrChan <- h.server.Serve(h.listener)\n}\n\nfunc (h *EndpointHandler) decodePK(token string) (key string, err error) {\n\tif len(token) == 0 {\n\t\treturn \"\", fmt.Errorf(\"Missing primary key\")\n\t}\n\tif len(h.tokenKey) == 0 {\n\t\treturn token, nil\n\t}\n\tbpk, err := Decode(h.tokenKey, token)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(bytes.TrimSpace(bpk)), nil\n}\n\nfunc (h *EndpointHandler) resolvePK(token string) (uaid, chid string, err error) {\n\tpk, err := h.decodePK(token)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error decoding primary key: %s\", err)\n\t\treturn \"\", \"\", err\n\t}\n\tif !validPK(pk) {\n\t\terr = fmt.Errorf(\"Invalid primary key: %q\", pk)\n\t\treturn \"\", \"\", err\n\t}\n\tif uaid, chid, err = h.store.KeyToIDs(pk); err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\treturn uaid, chid, nil\n}\n\nfunc (h *EndpointHandler) doPropPing(uaid string, version int64, data string) (ok bool, err error) {\n\tif h.pinger == nil {\n\t\treturn false, nil\n\t}\n\tif ok, err = h.pinger.Send(uaid, version, data); err != nil {\n\t\treturn false, fmt.Errorf(\"Could not send proprietary ping: %s\", err)\n\t}\n\tif !ok {\n\t\treturn false, nil\n\t}\n\t\/* if this is a GCM connected host, boot vers immediately to GCM\n\t *\/\n\treturn h.pinger.CanBypassWebsocket(), nil\n}\n\n\/\/ getUpdateParams extracts the update version and data from req.\nfunc (h *EndpointHandler) getUpdateParams(req *http.Request) (version int64, data string, err error) {\n\tif req.Header.Get(\"Content-Type\") == \"\" {\n\t\treq.Header.Set(\"Content-Type\",\n\t\t\t\"application\/x-www-form-urlencoded\")\n\t}\n\tsvers := req.FormValue(\"version\")\n\tif svers != \"\" {\n\t\tif version, err = strconv.ParseInt(svers, 10, 64); err != nil || version < 0 {\n\t\t\treturn 0, \"\", ErrBadVersion\n\t\t}\n\t} else {\n\t\tversion = timeNow().UTC().Unix()\n\t}\n\n\tdata = req.FormValue(\"data\")\n\tif len(data) > h.maxDataLen {\n\t\treturn 0, \"\", ErrDataTooLong\n\t}\n\treturn\n}\n\n\/\/ -- REST\nfunc (h *EndpointHandler) UpdateHandler(resp http.ResponseWriter, req *http.Request) {\n\t\/\/ Handle the version updates.\n\ttimer := timeNow()\n\trequestID := req.Header.Get(HeaderID)\n\tlogWarning := h.logger.ShouldLog(WARNING)\n\tvar (\n\t\terr error\n\t\tupdateSent bool\n\t\tversion int64\n\t\tuaid, chid string\n\t)\n\n\tdefer func() {\n\t\tnow := timeNow()\n\t\tif h.logger.ShouldLog(DEBUG) {\n\t\t\th.logger.Debug(\"handlers_endpoint\", \"+++++++++++++ DONE +++\",\n\t\t\t\tLogFields{\"rid\": requestID})\n\t\t}\n\t\tif h.logger.ShouldLog(INFO) {\n\t\t\th.logger.Info(\"handlers_endpoint\", \"Client Update complete\", LogFields{\n\t\t\t\t\"rid\": requestID,\n\t\t\t\t\"uaid\": uaid,\n\t\t\t\t\"chid\": chid,\n\t\t\t\t\"successful\": strconv.FormatBool(updateSent)})\n\t\t}\n\t\tif updateSent {\n\t\t\th.metrics.Timer(\"updates.handled\", now.Sub(timer))\n\t\t}\n\t}()\n\n\tif h.logger.ShouldLog(INFO) {\n\t\th.logger.Info(\"handlers_endpoint\", \"Handling Update\",\n\t\t\tLogFields{\"rid\": requestID})\n\t}\n\n\tif req.Method != \"PUT\" {\n\t\twriteJSON(resp, http.StatusMethodNotAllowed, []byte(`\"Method Not Allowed\"`))\n\t\th.metrics.Increment(\"updates.appserver.invalid\")\n\t\treturn\n\t}\n\n\tversion, data, err := h.getUpdateParams(req)\n\tif err != nil {\n\t\tif err == ErrDataTooLong {\n\t\t\tif logWarning {\n\t\t\t\th.logger.Warn(\"handlers_endpoint\", \"Data too large, rejecting request\",\n\t\t\t\t\tLogFields{\"rid\": requestID})\n\t\t\t}\n\t\t\twriteJSON(resp, http.StatusRequestEntityTooLarge, []byte(fmt.Sprintf(\n\t\t\t\t`\"Data exceeds max length of %d bytes\"`, h.maxDataLen)))\n\t\t\th.metrics.Increment(\"updates.appserver.toolong\")\n\t\t\treturn\n\t\t}\n\t\twriteJSON(resp, http.StatusBadRequest, []byte(`\"Invalid Version\"`))\n\t\th.metrics.Increment(\"updates.appserver.invalid\")\n\t\treturn\n\t}\n\n\t\/\/ TODO:\n\t\/\/ is there a magic flag for proxyable endpoints?\n\t\/\/ e.g. update\/p\/gcm\/LSoC or something?\n\t\/\/ (Note, this would allow us to use smarter FE proxies.)\n\ttoken := mux.Vars(req)[\"key\"]\n\tif uaid, chid, err = h.resolvePK(token); err != nil {\n\t\tif logWarning {\n\t\t\th.logger.Warn(\"handlers_endpoint\", \"Invalid primary key for update\",\n\t\t\t\tLogFields{\"error\": err.Error(), \"rid\": requestID, \"token\": token})\n\t\t}\n\t\twriteJSON(resp, http.StatusNotFound, []byte(`\"Invalid Token\"`))\n\t\th.metrics.Increment(\"updates.appserver.invalid\")\n\t\treturn\n\t}\n\n\t\/\/ At this point we should have a valid endpoint in the URL\n\th.metrics.Increment(\"updates.appserver.incoming\")\n\n\t\/\/ is there a Proprietary Ping for this?\n\tupdateSent, err = h.doPropPing(uaid, version, data)\n\tif err != nil {\n\t\tif logWarning {\n\t\t\th.logger.Warn(\"handlers_endpoint\", \"Could not send proprietary ping\",\n\t\t\t\tLogFields{\"rid\": requestID, \"uaid\": uaid, \"error\": err.Error()})\n\t\t}\n\t} else if updateSent {\n\t\t\/\/ Neat! Might as well return.\n\t\th.metrics.Increment(\"updates.appserver.received\")\n\t\twriteSuccess(resp)\n\t\treturn\n\t}\n\n\tif h.logger.ShouldLog(INFO) {\n\t\th.logger.Info(\"handlers_endpoint\", \"setting version for ChannelID\",\n\t\t\tLogFields{\"rid\": requestID, \"uaid\": uaid, \"chid\": chid,\n\t\t\t\t\"version\": strconv.FormatInt(version, 10)})\n\t}\n\n\tif err = h.store.Update(uaid, chid, version); err != nil {\n\t\tif logWarning {\n\t\t\th.logger.Warn(\"handlers_endpoint\", \"Could not update channel\", LogFields{\n\t\t\t\t\"rid\": requestID,\n\t\t\t\t\"uaid\": uaid,\n\t\t\t\t\"chid\": chid,\n\t\t\t\t\"version\": strconv.FormatInt(version, 10),\n\t\t\t\t\"error\": err.Error()})\n\t\t}\n\t\tstatus, _ := ErrToStatus(err)\n\t\th.metrics.Increment(\"updates.appserver.error\")\n\t\twriteJSON(resp, status, []byte(`\"Could not update channel version\"`))\n\t\treturn\n\t}\n\n\tcn, _ := resp.(http.CloseNotifier)\n\tif !h.deliver(cn, uaid, chid, version, requestID, data) {\n\t\t\/\/ We've accepted the valid endpoint, stored the data for\n\t\t\/\/ eventual pickup by the client, but failed to deliver to\n\t\t\/\/ the client via routing.\n\t\twriteJSON(resp, http.StatusAccepted, []byte(\"{}\"))\n\t\treturn\n\t}\n\n\twriteSuccess(resp)\n\tupdateSent = true\n\treturn\n}\n\n\/\/ deliver routes an incoming update to the appropriate server.\nfunc (h *EndpointHandler) deliver(cn http.CloseNotifier, uaid, chid string,\n\tversion int64, requestID string, data string) (delivered bool) {\n\n\tworker, workerConnected := h.app.GetWorker(uaid)\n\tvar routingTime time.Duration\n\n\t\/\/ Always route to other servers first, in case we're holding open a stale\n\t\/\/ connection and the client has already reconnected to a different server.\n\tif h.alwaysRoute || !workerConnected {\n\t\th.metrics.Increment(\"updates.routed.outgoing\")\n\t\t\/\/ Abort routing if the connection goes away.\n\t\tvar cancelSignal <-chan bool\n\t\tif cn != nil {\n\t\t\tcancelSignal = cn.CloseNotify()\n\t\t}\n\t\t\/\/ Route the update.\n\t\tstartTime := timeNow().UTC()\n\t\tdelivered, _ = h.router.Route(cancelSignal, uaid, chid, version,\n\t\t\tstartTime, requestID, data)\n\t\troutingTime = timeNow().UTC().Sub(startTime)\n\t}\n\n\t\/\/ If we delivered the message\n\tif delivered {\n\t\th.metrics.Increment(\"router.broadcast.hit\")\n\t\th.metrics.Timer(\"updates.routed.hits\", routingTime)\n\t\t\/\/ If we're not always routing, we're done now\n\t\tif !h.alwaysRoute {\n\t\t\th.metrics.Increment(\"updates.appserver.received\")\n\t\t\treturn true\n\t\t}\n\t} else if !workerConnected {\n\t\t\/\/ Worker is not connected and routing failed\n\t\th.metrics.Increment(\"router.broadcast.miss\")\n\t\th.metrics.Timer(\"updates.routed.misses\", routingTime)\n\t\th.metrics.Increment(\"updates.appserver.rejected\")\n\t\treturn false\n\t}\n\n\t\/\/ Possible conditions at this point:\n\t\/\/ Router delivered, but alwaysRoute is true\n\t\/\/ Router didn't deliver, alwaysRoute true or false\n\n\t\/\/ Try local delivery\n\tif err := worker.Send(chid, version, data); err == nil {\n\t\t\/\/ Local delivery worked, avoid incrementing received if we\n\t\t\/\/ also already delivered it\n\t\tif !delivered {\n\t\t\th.metrics.Increment(\"updates.appserver.received\")\n\t\t}\n\t\tdelivered = true\n\t} else if !delivered {\n\t\t\/\/ Local delivery failed *and* routing failed\n\t\th.metrics.Increment(\"updates.appserver.rejected\")\n\t}\n\treturn delivered\n}\n\nfunc (h *EndpointHandler) Close() error {\n\treturn h.closeOnce.Do(h.close)\n}\n\nfunc (h *EndpointHandler) close() (err error) {\n\tif h.logger.ShouldLog(INFO) {\n\t\th.logger.Info(\"handlers_endpoint\", \"Closing update handler\",\n\t\t\tLogFields{\"url\": h.url})\n\t}\n\tif err = h.listener.Close(); err != nil && h.logger.ShouldLog(ERROR) {\n\t\th.logger.Error(\"handlers_endpoint\", \"Error closing update listener\",\n\t\t\tLogFields{\"error\": err.Error(), \"url\": h.url})\n\t}\n\th.server.Close()\n\treturn\n}\n\nfunc validPK(pk string) bool {\n\tfor i := 0; i < len(pk); i++ {\n\t\tb := pk[i]\n\t\tif b >= 'A' && b <= 'Z' {\n\t\t\tb += 'a' - 'A'\n\t\t}\n\t\t\/\/ Accept bin64 && UUID encoding\n\t\tif (b < 'a' || b > 'z') && (b < '0' || b > '9') && b != '_' && b != '.' && b != '=' && b != '-' {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc writeJSON(resp http.ResponseWriter, status int, data []byte) {\n\tresp.Header().Set(\"Content-Type\", \"application\/json\")\n\tresp.WriteHeader(status)\n\tresp.Write(data)\n}\n\nfunc writeSuccess(resp http.ResponseWriter) {\n\twriteJSON(resp, http.StatusOK, []byte(\"{}\"))\n}\n\n\/\/ o4fs\n\/\/ vim: set tabstab=4 softtabstop=4 shiftwidth=4 noexpandtab\n<commit_msg>Clean up logic<commit_after>\/* This Source Code Form is subject to the terms of the Mozilla Public\n * License, v. 2.0. If a copy of the MPL was not distributed with this\n * file, You can obtain one at http:\/\/mozilla.org\/MPL\/2.0\/. *\/\n\npackage simplepush\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/mux\"\n)\n\nfunc NewEndpointHandler() (h *EndpointHandler) {\n\th = &EndpointHandler{mux: mux.NewRouter()}\n\th.mux.HandleFunc(\"\/update\/{key}\", h.UpdateHandler)\n\treturn h\n}\n\ntype EndpointHandlerConfig struct {\n\tMaxDataLen int `toml:\"max_data_len\" env:\"max_data_len\"`\n\tAlwaysRoute bool `toml:\"always_route\" env:\"always_route\"`\n\tListener TCPListenerConfig\n}\n\ntype EndpointHandler struct {\n\tapp *Application\n\tlogger *SimpleLogger\n\tmetrics Statistician\n\tstore Store\n\trouter Router\n\tpinger PropPinger\n\tbalancer Balancer\n\thostname string\n\ttokenKey []byte\n\tlistener net.Listener\n\tserver *ServeCloser\n\tmux *mux.Router\n\turl string\n\tmaxConns int\n\tmaxDataLen int\n\talwaysRoute bool\n\tcloseOnce Once\n}\n\nfunc (h *EndpointHandler) ConfigStruct() interface{} {\n\treturn &EndpointHandlerConfig{\n\t\tMaxDataLen: 4096,\n\t\tAlwaysRoute: false,\n\t\tListener: TCPListenerConfig{\n\t\t\tAddr: \":8081\",\n\t\t\tMaxConns: 1000,\n\t\t\tKeepAlivePeriod: \"3m\",\n\t\t},\n\t}\n}\n\nfunc (h *EndpointHandler) Init(app *Application, config interface{}) (err error) {\n\tconf := config.(*EndpointHandlerConfig)\n\th.setApp(app)\n\n\tif h.listener, err = conf.Listener.Listen(); err != nil {\n\t\th.logger.Panic(\"handlers_endpoint\", \"Could not attach update listener\",\n\t\t\tLogFields{\"error\": err.Error()})\n\t\treturn err\n\t}\n\n\tvar scheme string\n\tif conf.Listener.UseTLS() {\n\t\tscheme = \"https\"\n\t} else {\n\t\tscheme = \"http\"\n\t}\n\thost, port := HostPort(h.listener, app)\n\th.url = CanonicalURL(scheme, host, port)\n\n\th.maxConns = conf.Listener.MaxConns\n\th.setMaxDataLen(conf.MaxDataLen)\n\th.alwaysRoute = conf.AlwaysRoute\n\n\treturn nil\n}\n\nfunc (h *EndpointHandler) Listener() net.Listener { return h.listener }\nfunc (h *EndpointHandler) MaxConns() int { return h.maxConns }\nfunc (h *EndpointHandler) URL() string { return h.url }\nfunc (h *EndpointHandler) ServeMux() ServeMux { return (*RouteMux)(h.mux) }\n\n\/\/ setApp sets the parent application for this update handler.\nfunc (h *EndpointHandler) setApp(app *Application) {\n\th.app = app\n\th.logger = app.Logger()\n\th.metrics = app.Metrics()\n\th.store = app.Store()\n\th.router = app.Router()\n\th.pinger = app.PropPinger()\n\th.tokenKey = app.TokenKey()\n\th.server = NewServeCloser(&http.Server{\n\t\tConnState: func(c net.Conn, state http.ConnState) {\n\t\t\tif state == http.StateNew {\n\t\t\t\th.metrics.Increment(\"endpoint.socket.connect\")\n\t\t\t} else if state == http.StateClosed {\n\t\t\t\th.metrics.Increment(\"endpoint.socket.disconnect\")\n\t\t\t}\n\t\t},\n\t\tHandler: &LogHandler{h.mux, h.logger},\n\t\tErrorLog: log.New(&LogWriter{\n\t\t\tLogger: h.logger,\n\t\t\tName: \"handlers_endpoint\",\n\t\t\tLevel: ERROR,\n\t\t}, \"\", 0),\n\t})\n}\n\n\/\/ setMaxDataLen sets the maximum data length to v\nfunc (h *EndpointHandler) setMaxDataLen(v int) {\n\th.maxDataLen = v\n}\n\nfunc (h *EndpointHandler) Start(errChan chan<- error) {\n\tif h.logger.ShouldLog(INFO) {\n\t\th.logger.Info(\"handlers_endpoint\", \"Starting update server\",\n\t\t\tLogFields{\"url\": h.url})\n\t}\n\terrChan <- h.server.Serve(h.listener)\n}\n\nfunc (h *EndpointHandler) decodePK(token string) (key string, err error) {\n\tif len(token) == 0 {\n\t\treturn \"\", fmt.Errorf(\"Missing primary key\")\n\t}\n\tif len(h.tokenKey) == 0 {\n\t\treturn token, nil\n\t}\n\tbpk, err := Decode(h.tokenKey, token)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(bytes.TrimSpace(bpk)), nil\n}\n\nfunc (h *EndpointHandler) resolvePK(token string) (uaid, chid string, err error) {\n\tpk, err := h.decodePK(token)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Error decoding primary key: %s\", err)\n\t\treturn \"\", \"\", err\n\t}\n\tif !validPK(pk) {\n\t\terr = fmt.Errorf(\"Invalid primary key: %q\", pk)\n\t\treturn \"\", \"\", err\n\t}\n\tif uaid, chid, err = h.store.KeyToIDs(pk); err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\treturn uaid, chid, nil\n}\n\nfunc (h *EndpointHandler) doPropPing(uaid string, version int64, data string) (ok bool, err error) {\n\tif h.pinger == nil {\n\t\treturn false, nil\n\t}\n\tif ok, err = h.pinger.Send(uaid, version, data); err != nil {\n\t\treturn false, fmt.Errorf(\"Could not send proprietary ping: %s\", err)\n\t}\n\tif !ok {\n\t\treturn false, nil\n\t}\n\t\/* if this is a GCM connected host, boot vers immediately to GCM\n\t *\/\n\treturn h.pinger.CanBypassWebsocket(), nil\n}\n\n\/\/ getUpdateParams extracts the update version and data from req.\nfunc (h *EndpointHandler) getUpdateParams(req *http.Request) (version int64, data string, err error) {\n\tif req.Header.Get(\"Content-Type\") == \"\" {\n\t\treq.Header.Set(\"Content-Type\",\n\t\t\t\"application\/x-www-form-urlencoded\")\n\t}\n\tsvers := req.FormValue(\"version\")\n\tif svers != \"\" {\n\t\tif version, err = strconv.ParseInt(svers, 10, 64); err != nil || version < 0 {\n\t\t\treturn 0, \"\", ErrBadVersion\n\t\t}\n\t} else {\n\t\tversion = timeNow().UTC().Unix()\n\t}\n\n\tdata = req.FormValue(\"data\")\n\tif len(data) > h.maxDataLen {\n\t\treturn 0, \"\", ErrDataTooLong\n\t}\n\treturn\n}\n\n\/\/ -- REST\nfunc (h *EndpointHandler) UpdateHandler(resp http.ResponseWriter, req *http.Request) {\n\t\/\/ Handle the version updates.\n\ttimer := timeNow()\n\trequestID := req.Header.Get(HeaderID)\n\tlogWarning := h.logger.ShouldLog(WARNING)\n\tvar (\n\t\terr error\n\t\tupdateSent bool\n\t\tversion int64\n\t\tuaid, chid string\n\t)\n\n\tdefer func() {\n\t\tnow := timeNow()\n\t\tif h.logger.ShouldLog(DEBUG) {\n\t\t\th.logger.Debug(\"handlers_endpoint\", \"+++++++++++++ DONE +++\",\n\t\t\t\tLogFields{\"rid\": requestID})\n\t\t}\n\t\tif h.logger.ShouldLog(INFO) {\n\t\t\th.logger.Info(\"handlers_endpoint\", \"Client Update complete\", LogFields{\n\t\t\t\t\"rid\": requestID,\n\t\t\t\t\"uaid\": uaid,\n\t\t\t\t\"chid\": chid,\n\t\t\t\t\"successful\": strconv.FormatBool(updateSent)})\n\t\t}\n\t\tif updateSent {\n\t\t\th.metrics.Timer(\"updates.handled\", now.Sub(timer))\n\t\t}\n\t}()\n\n\tif h.logger.ShouldLog(INFO) {\n\t\th.logger.Info(\"handlers_endpoint\", \"Handling Update\",\n\t\t\tLogFields{\"rid\": requestID})\n\t}\n\n\tif req.Method != \"PUT\" {\n\t\twriteJSON(resp, http.StatusMethodNotAllowed, []byte(`\"Method Not Allowed\"`))\n\t\th.metrics.Increment(\"updates.appserver.invalid\")\n\t\treturn\n\t}\n\n\tversion, data, err := h.getUpdateParams(req)\n\tif err != nil {\n\t\tif err == ErrDataTooLong {\n\t\t\tif logWarning {\n\t\t\t\th.logger.Warn(\"handlers_endpoint\", \"Data too large, rejecting request\",\n\t\t\t\t\tLogFields{\"rid\": requestID})\n\t\t\t}\n\t\t\twriteJSON(resp, http.StatusRequestEntityTooLarge, []byte(fmt.Sprintf(\n\t\t\t\t`\"Data exceeds max length of %d bytes\"`, h.maxDataLen)))\n\t\t\th.metrics.Increment(\"updates.appserver.toolong\")\n\t\t\treturn\n\t\t}\n\t\twriteJSON(resp, http.StatusBadRequest, []byte(`\"Invalid Version\"`))\n\t\th.metrics.Increment(\"updates.appserver.invalid\")\n\t\treturn\n\t}\n\n\t\/\/ TODO:\n\t\/\/ is there a magic flag for proxyable endpoints?\n\t\/\/ e.g. update\/p\/gcm\/LSoC or something?\n\t\/\/ (Note, this would allow us to use smarter FE proxies.)\n\ttoken := mux.Vars(req)[\"key\"]\n\tif uaid, chid, err = h.resolvePK(token); err != nil {\n\t\tif logWarning {\n\t\t\th.logger.Warn(\"handlers_endpoint\", \"Invalid primary key for update\",\n\t\t\t\tLogFields{\"error\": err.Error(), \"rid\": requestID, \"token\": token})\n\t\t}\n\t\twriteJSON(resp, http.StatusNotFound, []byte(`\"Invalid Token\"`))\n\t\th.metrics.Increment(\"updates.appserver.invalid\")\n\t\treturn\n\t}\n\n\t\/\/ At this point we should have a valid endpoint in the URL\n\th.metrics.Increment(\"updates.appserver.incoming\")\n\n\t\/\/ is there a Proprietary Ping for this?\n\tupdateSent, err = h.doPropPing(uaid, version, data)\n\tif err != nil {\n\t\tif logWarning {\n\t\t\th.logger.Warn(\"handlers_endpoint\", \"Could not send proprietary ping\",\n\t\t\t\tLogFields{\"rid\": requestID, \"uaid\": uaid, \"error\": err.Error()})\n\t\t}\n\t} else if updateSent {\n\t\t\/\/ Neat! Might as well return.\n\t\th.metrics.Increment(\"updates.appserver.received\")\n\t\twriteSuccess(resp)\n\t\treturn\n\t}\n\n\tif h.logger.ShouldLog(INFO) {\n\t\th.logger.Info(\"handlers_endpoint\", \"setting version for ChannelID\",\n\t\t\tLogFields{\"rid\": requestID, \"uaid\": uaid, \"chid\": chid,\n\t\t\t\t\"version\": strconv.FormatInt(version, 10)})\n\t}\n\n\tif err = h.store.Update(uaid, chid, version); err != nil {\n\t\tif logWarning {\n\t\t\th.logger.Warn(\"handlers_endpoint\", \"Could not update channel\", LogFields{\n\t\t\t\t\"rid\": requestID,\n\t\t\t\t\"uaid\": uaid,\n\t\t\t\t\"chid\": chid,\n\t\t\t\t\"version\": strconv.FormatInt(version, 10),\n\t\t\t\t\"error\": err.Error()})\n\t\t}\n\t\tstatus, _ := ErrToStatus(err)\n\t\th.metrics.Increment(\"updates.appserver.error\")\n\t\twriteJSON(resp, status, []byte(`\"Could not update channel version\"`))\n\t\treturn\n\t}\n\n\tcn, _ := resp.(http.CloseNotifier)\n\tif !h.deliver(cn, uaid, chid, version, requestID, data) {\n\t\t\/\/ We've accepted the valid endpoint, stored the data for\n\t\t\/\/ eventual pickup by the client, but failed to deliver to\n\t\t\/\/ the client via routing.\n\t\twriteJSON(resp, http.StatusAccepted, []byte(\"{}\"))\n\t\treturn\n\t}\n\n\twriteSuccess(resp)\n\tupdateSent = true\n\treturn\n}\n\n\/\/ deliver routes an incoming update to the appropriate server.\nfunc (h *EndpointHandler) deliver(cn http.CloseNotifier, uaid, chid string,\n\tversion int64, requestID string, data string) (delivered bool) {\n\n\tworker, workerConnected := h.app.GetWorker(uaid)\n\tvar routingTime time.Duration\n\n\tshouldRoute := h.alwaysRoute || !workerConnected\n\n\t\/\/ Always route to other servers first, in case we're holding open a stale\n\t\/\/ connection and the client has already reconnected to a different server.\n\tif shouldRoute {\n\t\th.metrics.Increment(\"updates.routed.outgoing\")\n\t\t\/\/ Abort routing if the connection goes away.\n\t\tvar cancelSignal <-chan bool\n\t\tif cn != nil {\n\t\t\tcancelSignal = cn.CloseNotify()\n\t\t}\n\t\t\/\/ Route the update.\n\t\tstartTime := timeNow().UTC()\n\t\tdelivered, _ = h.router.Route(cancelSignal, uaid, chid, version,\n\t\t\tstartTime, requestID, data)\n\t\troutingTime = timeNow().UTC().Sub(startTime)\n\n\t\t\/\/ Increment appropriate metrics\n\t\tif delivered {\n\t\t\th.metrics.Increment(\"router.broadcast.hit\")\n\t\t\th.metrics.Timer(\"updates.routed.hits\", routingTime)\n\t\t} else {\n\t\t\th.metrics.Increment(\"router.broadcast.miss\")\n\t\t\th.metrics.Timer(\"updates.routed.misses\", routingTime)\n\t\t}\n\t}\n\n\t\/\/ Should we attempt local delivery? Only if the worker is connected\n\t\/\/ and we either always route, or failed to remote deliver\n\tshouldLocalDeliver := workerConnected && (h.alwaysRoute || !delivered)\n\n\tif shouldLocalDeliver {\n\t\tif err := worker.Send(chid, version, data); err == nil {\n\t\t\tdelivered = true\n\t\t}\n\t}\n\n\t\/\/ Increment the appropriate final metric whether deliver did or\n\t\/\/ did not work\n\tif delivered {\n\t\th.metrics.Increment(\"updates.appserver.received\")\n\t} else {\n\t\th.metrics.Increment(\"updates.appserver.rejected\")\n\t}\n\n\treturn delivered\n}\n\nfunc (h *EndpointHandler) Close() error {\n\treturn h.closeOnce.Do(h.close)\n}\n\nfunc (h *EndpointHandler) close() (err error) {\n\tif h.logger.ShouldLog(INFO) {\n\t\th.logger.Info(\"handlers_endpoint\", \"Closing update handler\",\n\t\t\tLogFields{\"url\": h.url})\n\t}\n\tif err = h.listener.Close(); err != nil && h.logger.ShouldLog(ERROR) {\n\t\th.logger.Error(\"handlers_endpoint\", \"Error closing update listener\",\n\t\t\tLogFields{\"error\": err.Error(), \"url\": h.url})\n\t}\n\th.server.Close()\n\treturn\n}\n\nfunc validPK(pk string) bool {\n\tfor i := 0; i < len(pk); i++ {\n\t\tb := pk[i]\n\t\tif b >= 'A' && b <= 'Z' {\n\t\t\tb += 'a' - 'A'\n\t\t}\n\t\t\/\/ Accept bin64 && UUID encoding\n\t\tif (b < 'a' || b > 'z') && (b < '0' || b > '9') && b != '_' && b != '.' && b != '=' && b != '-' {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc writeJSON(resp http.ResponseWriter, status int, data []byte) {\n\tresp.Header().Set(\"Content-Type\", \"application\/json\")\n\tresp.WriteHeader(status)\n\tresp.Write(data)\n}\n\nfunc writeSuccess(resp http.ResponseWriter) {\n\twriteJSON(resp, http.StatusOK, []byte(\"{}\"))\n}\n\n\/\/ o4fs\n\/\/ vim: set tabstab=4 softtabstop=4 shiftwidth=4 noexpandtab\n<|endoftext|>"} {"text":"<commit_before>package integration_test\n\nimport (\n\t\"github.com\/cloudfoundry\/libbuildpack\/cutlass\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"pushing a rails51 webpacker app with sprockets\", func() {\n\tvar app *cutlass.App\n\tAfterEach(func() { app = DestroyApp(app) })\n\n\tBeforeEach(func() {\n\t\tapp = cutlass.New(Fixtures(\"rails51_webpacker\"))\n\t})\n\n\tIt(\"compiles assets with webpacker\", func() {\n\t\tPushAppAndConfirm(app)\n\t\tExpect(app.Stdout.String()).To(ContainSubstring(\"Webpacker is installed\"))\n\t\tExpect(app.Stdout.String()).To(ContainSubstring(\"Asset precompilation completed\"))\n\n\t\tExpect(app.GetBody(\"\/\")).To(ContainSubstring(\"Welcome to Rails51 Webpacker!\"))\n\t\tEventually(app.Stdout.String()).Should(ContainSubstring(\"Cleaning assets\"))\n\t})\n})\n<commit_msg>Bumps disk quota on Rails 5.1 app with sprockets<commit_after>package integration_test\n\nimport (\n\t\"github.com\/cloudfoundry\/libbuildpack\/cutlass\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"pushing a rails51 webpacker app with sprockets\", func() {\n\tvar app *cutlass.App\n\tAfterEach(func() { app = DestroyApp(app) })\n\n\tBeforeEach(func() {\n\t\tapp = cutlass.New(Fixtures(\"rails51_webpacker\"))\n\t\tapp.Disk = \"1G\"\n\t})\n\n\tIt(\"compiles assets with webpacker\", func() {\n\t\tPushAppAndConfirm(app)\n\t\tExpect(app.Stdout.String()).To(ContainSubstring(\"Webpacker is installed\"))\n\t\tExpect(app.Stdout.String()).To(ContainSubstring(\"Asset precompilation completed\"))\n\n\t\tExpect(app.GetBody(\"\/\")).To(ContainSubstring(\"Welcome to Rails51 Webpacker!\"))\n\t\tEventually(app.Stdout.String()).Should(ContainSubstring(\"Cleaning assets\"))\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package termite\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/hanwen\/termite\/attr\"\n\t\"github.com\/hanwen\/termite\/cba\"\n\t\"github.com\/hanwen\/termite\/stats\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/rpc\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype mirrorConnection struct {\n\tworkerAddr string \/\/ key in map.\n\trpcClient *rpc.Client\n\tconnection net.Conn\n\n\t\/\/ For serving the Fileserver.\n\treverseConnection net.Conn\n\n\t\/\/ Protected by mirrorConnections.Mutex.\n\tmaxJobs int\n\tavailableJobs int\n\n\tmaster *Master\n\tfileSetWaiter *attr.FileSetWaiter\n}\n\nfunc (me *mirrorConnection) Id() string {\n\treturn me.workerAddr\n}\n\nfunc (me *mirrorConnection) innerFetch(start, end int, hash string) ([]byte, error) {\n\treq := &cba.ContentRequest{\n\t\tHash: hash,\n\t\tStart: start,\n\t\tEnd: end,\n\t}\n\trep := &cba.ContentResponse{}\n\terr := me.rpcClient.Call(\"Mirror.FileContent\", req, rep)\n\treturn rep.Chunk, err\n}\n\nfunc (me *mirrorConnection) replay(fset attr.FileSet) error {\n\t\/\/ Must get data before we modify the file-system, so we don't\n\t\/\/ leave the FS in a half-finished state.\n\tfor _, info := range fset.Files {\n\t\tif info.Hash != \"\" && !me.master.cache.HasHash(info.Hash) {\n\t\t\tsaved, err := me.master.cache.Fetch(\n\t\t\t\tfunc(start, end int) ([]byte, error) {\n\t\t\t\t\treturn me.innerFetch(start, end, info.Hash)\n\t\t\t\t})\n\t\t\tif err == nil && saved != info.Hash {\n\t\t\t\tlog.Fatalf(\"mirrorConnection.replay: fetch corruption got %x want %x\", saved, info.Hash)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tme.master.replay(fset)\n\treturn nil\n}\n\nfunc (me *mirrorConnection) Send(files []*attr.FileAttr) error {\n\treq := UpdateRequest{\n\t\tFiles: files,\n\t}\n\trep := UpdateResponse{}\n\terr := me.rpcClient.Call(\"Mirror.Update\", &req, &rep)\n\tif err != nil {\n\t\tlog.Println(\"Mirror.Update failure\", err)\n\t\treturn err\n\t}\n\tlog.Printf(\"Sent pending changes to %s\", me.workerAddr)\n\treturn nil\n}\n\n\/\/ mirrorConnection manages connections from the master to the mirrors\n\/\/ on the workers.\ntype mirrorConnections struct {\n\tmaster *Master\n\tcoordinator string\n\n\tkeepAlive time.Duration\n\n\twantedMaxJobs int\n\n\tstats *stats.ServerStats\n\n\t\/\/ Protects all of the below.\n\tsync.Mutex\n\tworkers map[string]bool\n\tmirrors map[string]*mirrorConnection\n\tlastActionTime time.Time\n}\n\nfunc (me *mirrorConnections) fetchWorkers() (newMap map[string]bool) {\n\tnewMap = map[string]bool{}\n\tclient, err := rpc.DialHTTP(\"tcp\", me.coordinator)\n\tif err != nil {\n\t\tlog.Println(\"fetchWorkers: dialing coordinator:\", err)\n\t\treturn newMap\n\t}\n\tdefer client.Close()\n\treq := 0\n\trep := Registered{}\n\terr = client.Call(\"Coordinator.List\", &req, &rep)\n\tif err != nil {\n\t\tlog.Println(\"coordinator rpc error:\", err)\n\t\treturn newMap\n\t}\n\n\tfor _, v := range rep.Registrations {\n\t\tnewMap[v.Address] = true\n\t}\n\tif len(newMap) == 0 {\n\t\tlog.Println(\"coordinator has no workers for us.\")\n\t}\n\treturn newMap\n}\n\nfunc (me *mirrorConnections) refreshWorkers() {\n\tnewWorkers := me.fetchWorkers()\n\tif len(newWorkers) > 0 {\n\t\tme.Mutex.Lock()\n\t\tdefer me.Mutex.Unlock()\n\t\tme.workers = newWorkers\n\t}\n}\n\nfunc newMirrorConnections(m *Master, coordinator string, maxJobs int) *mirrorConnections {\n\tme := &mirrorConnections{\n\t\tmaster: m,\n\t\twantedMaxJobs: maxJobs,\n\t\tworkers: make(map[string]bool),\n\t\tmirrors: make(map[string]*mirrorConnection),\n\t\tcoordinator: coordinator,\n\t\tkeepAlive: time.Minute,\n\t}\n\tme.refreshStats()\n\treturn me\n}\n\nfunc (me *mirrorConnections) refreshStats() {\n\tme.stats = stats.NewServerStats()\n\tme.stats.PhaseOrder = []string{\"run\", \"send\", \"remote\", \"filewait\"}\n}\n\nfunc (me *mirrorConnections) periodicHouseholding() {\n\tme.refreshWorkers()\n\tme.maybeDropConnections()\n}\n\n\/\/ Must be called with lock held.\nfunc (me *mirrorConnections) availableJobs() int {\n\ta := 0\n\tfor _, mc := range me.mirrors {\n\t\tif mc.availableJobs > 0 {\n\t\t\ta += mc.availableJobs\n\t\t}\n\t}\n\treturn a\n}\n\n\/\/ Must be called with lock held.\nfunc (me *mirrorConnections) maxJobs() int {\n\ta := 0\n\tfor _, mc := range me.mirrors {\n\t\ta += mc.maxJobs\n\t}\n\treturn a\n}\n\nfunc (me *mirrorConnections) maybeDropConnections() {\n\tme.Mutex.Lock()\n\tdefer me.Mutex.Unlock()\n\n\t\/\/ Already dropped everything.\n\tif len(me.mirrors) == 0 {\n\t\treturn\n\t}\n\n\t\/\/ Something is running.\n\tif me.availableJobs() < me.maxJobs() {\n\t\treturn\n\t}\n\n\tif me.lastActionTime.Add(me.keepAlive).After(time.Now()) {\n\t\treturn\n\t}\n\n\tlog.Println(\"master inactive too long. Dropping connections.\")\n\tme.dropConnections()\n}\n\nfunc (me *mirrorConnections) dropConnections() {\n\tfor _, mc := range me.mirrors {\n\t\tmc.rpcClient.Close()\n\t\tmc.connection.Close()\n\t\tmc.reverseConnection.Close()\n\t\tme.master.attributes.RmClient(mc)\n\t}\n\tme.mirrors = make(map[string]*mirrorConnection)\n\tme.refreshStats()\n}\n\n\/\/ Gets a mirrorConnection to run on. Will block if none available\nfunc (me *mirrorConnections) find(name string) (*mirrorConnection, error) {\n\tme.Mutex.Lock()\n\tdefer me.Mutex.Unlock()\n\n\tvar found *mirrorConnection\n\tfor nm, v := range me.mirrors {\n\t\tif strings.Contains(nm, name) {\n\t\t\tfound = v\n\t\t\tbreak\n\t\t}\n\t}\n\tif found == nil {\n\t\treturn nil, fmt.Errorf(\"No worker with name: %q. Have %v\", name, me.mirrors)\n\t}\n\tfound.availableJobs--\n\treturn found, nil\n}\n\nfunc (me *mirrorConnections) pick() (*mirrorConnection, error) {\n\tme.Mutex.Lock()\n\tdefer me.Mutex.Unlock()\n\n\tif me.availableJobs() <= 0 {\n\t\tif len(me.workers) == 0 {\n\t\t\tme.workers = me.fetchWorkers()\n\t\t}\n\t\tme.tryConnect()\n\n\t\tif me.maxJobs() == 0 {\n\t\t\t\/\/ Didn't connect to anything. Should\n\t\t\t\/\/ probably direct the wrapper to compile\n\t\t\t\/\/ locally.\n\t\t\treturn nil, errors.New(\"No workers found at all.\")\n\t\t}\n\t}\n\n\tj := len(me.mirrors)\n\tif me.availableJobs() == 0 {\n\t\t\/\/ All workers full: schedule on a random one.\n\t\tj = rand.Intn(j)\n\t}\n\n\tvar found *mirrorConnection\n\tfor _, v := range me.mirrors {\n\t\tif j <= 0 || v.availableJobs > 0 {\n\t\t\tfound = v\n\t\t\tbreak\n\t\t}\n\t\tj--\n\t}\n\tfound.availableJobs--\n\treturn found, nil\n}\n\nfunc (me *mirrorConnections) drop(mc *mirrorConnection, err error) {\n\tme.master.fileServer.attributes.RmClient(mc)\n\n\tme.Mutex.Lock()\n\tdefer me.Mutex.Unlock()\n\tlog.Printf(\"Dropping mirror %s. Reason: %s\", mc.workerAddr, err)\n\tmc.connection.Close()\n\tmc.reverseConnection.Close()\n\tdelete(me.mirrors, mc.workerAddr)\n\tdelete(me.workers, mc.workerAddr)\n}\n\nfunc (me *mirrorConnections) jobDone(mc *mirrorConnection) {\n\tme.Mutex.Lock()\n\tdefer me.Mutex.Unlock()\n\n\tme.lastActionTime = time.Now()\n\tmc.availableJobs++\n}\n\nfunc (me *mirrorConnections) idleWorkerAddress() string {\n\tcands := []string{}\n\tfor addr := range me.workers {\n\t\t_, ok := me.mirrors[addr]\n\t\tif ok {\n\t\t\tcontinue\n\t\t}\n\t\tcands = append(cands, addr)\n\t}\n\n\tif len(cands) == 0 {\n\t\treturn \"\"\n\t}\n\treturn cands[rand.Intn(len(cands))]\n}\n\n\/\/ Tries to connect to one extra worker. Must already hold mutex.\nfunc (me *mirrorConnections) tryConnect() {\n\t\/\/ We want to max out capacity of each worker, as that helps\n\t\/\/ with cache hit rates on the worker.\n\twanted := me.wantedMaxJobs - me.maxJobs()\n\tif wanted <= 0 {\n\t\treturn\n\t}\n\n\tfor {\n\t\taddr := me.idleWorkerAddress()\n\t\tif addr == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tme.Mutex.Unlock()\n\t\tlog.Printf(\"Creating mirror on %v, requesting %d jobs\", addr, wanted)\n\t\tmc, err := me.master.createMirror(addr, wanted)\n\t\tme.Mutex.Lock()\n\t\tif err != nil {\n\t\t\tdelete(me.workers, addr)\n\t\t\tlog.Println(\"nonfatal error creating mirror:\", err)\n\t\t} else {\n\t\t\t\/\/ This could happen in the unlikely event of\n\t\t\t\/\/ the workers having more capacity than our\n\t\t\t\/\/ parallelism.\n\t\t\tif _, ok := me.mirrors[addr]; ok {\n\t\t\t\tlog.Panicf(\"already have this mirror: %v\", addr)\n\t\t\t}\n\t\t\tmc.workerAddr = addr\n\t\t\tme.mirrors[addr] = mc\n\t\t\tme.master.fileServer.attributes.AddClient(mc)\n\t\t}\n\t}\n}\n<commit_msg>Run jobs on least-loaded mirror.<commit_after>package termite\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/hanwen\/termite\/attr\"\n\t\"github.com\/hanwen\/termite\/cba\"\n\t\"github.com\/hanwen\/termite\/stats\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"net\/rpc\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\ntype mirrorConnection struct {\n\tworkerAddr string \/\/ key in map.\n\trpcClient *rpc.Client\n\tconnection net.Conn\n\n\t\/\/ For serving the Fileserver.\n\treverseConnection net.Conn\n\n\t\/\/ Protected by mirrorConnections.Mutex.\n\tmaxJobs int\n\tavailableJobs int\n\n\tmaster *Master\n\tfileSetWaiter *attr.FileSetWaiter\n}\n\nfunc (me *mirrorConnection) Id() string {\n\treturn me.workerAddr\n}\n\nfunc (me *mirrorConnection) innerFetch(start, end int, hash string) ([]byte, error) {\n\treq := &cba.ContentRequest{\n\t\tHash: hash,\n\t\tStart: start,\n\t\tEnd: end,\n\t}\n\trep := &cba.ContentResponse{}\n\terr := me.rpcClient.Call(\"Mirror.FileContent\", req, rep)\n\treturn rep.Chunk, err\n}\n\nfunc (me *mirrorConnection) replay(fset attr.FileSet) error {\n\t\/\/ Must get data before we modify the file-system, so we don't\n\t\/\/ leave the FS in a half-finished state.\n\tfor _, info := range fset.Files {\n\t\tif info.Hash != \"\" && !me.master.cache.HasHash(info.Hash) {\n\t\t\tsaved, err := me.master.cache.Fetch(\n\t\t\t\tfunc(start, end int) ([]byte, error) {\n\t\t\t\t\treturn me.innerFetch(start, end, info.Hash)\n\t\t\t\t})\n\t\t\tif err == nil && saved != info.Hash {\n\t\t\t\tlog.Fatalf(\"mirrorConnection.replay: fetch corruption got %x want %x\", saved, info.Hash)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tme.master.replay(fset)\n\treturn nil\n}\n\nfunc (me *mirrorConnection) Send(files []*attr.FileAttr) error {\n\treq := UpdateRequest{\n\t\tFiles: files,\n\t}\n\trep := UpdateResponse{}\n\terr := me.rpcClient.Call(\"Mirror.Update\", &req, &rep)\n\tif err != nil {\n\t\tlog.Println(\"Mirror.Update failure\", err)\n\t\treturn err\n\t}\n\tlog.Printf(\"Sent pending changes to %s\", me.workerAddr)\n\treturn nil\n}\n\n\/\/ mirrorConnection manages connections from the master to the mirrors\n\/\/ on the workers.\ntype mirrorConnections struct {\n\tmaster *Master\n\tcoordinator string\n\n\tkeepAlive time.Duration\n\n\twantedMaxJobs int\n\n\tstats *stats.ServerStats\n\n\t\/\/ Protects all of the below.\n\tsync.Mutex\n\tworkers map[string]bool\n\tmirrors map[string]*mirrorConnection\n\tlastActionTime time.Time\n}\n\nfunc (me *mirrorConnections) fetchWorkers() (newMap map[string]bool) {\n\tnewMap = map[string]bool{}\n\tclient, err := rpc.DialHTTP(\"tcp\", me.coordinator)\n\tif err != nil {\n\t\tlog.Println(\"fetchWorkers: dialing coordinator:\", err)\n\t\treturn newMap\n\t}\n\tdefer client.Close()\n\treq := 0\n\trep := Registered{}\n\terr = client.Call(\"Coordinator.List\", &req, &rep)\n\tif err != nil {\n\t\tlog.Println(\"coordinator rpc error:\", err)\n\t\treturn newMap\n\t}\n\n\tfor _, v := range rep.Registrations {\n\t\tnewMap[v.Address] = true\n\t}\n\tif len(newMap) == 0 {\n\t\tlog.Println(\"coordinator has no workers for us.\")\n\t}\n\treturn newMap\n}\n\nfunc (me *mirrorConnections) refreshWorkers() {\n\tnewWorkers := me.fetchWorkers()\n\tif len(newWorkers) > 0 {\n\t\tme.Mutex.Lock()\n\t\tdefer me.Mutex.Unlock()\n\t\tme.workers = newWorkers\n\t}\n}\n\nfunc newMirrorConnections(m *Master, coordinator string, maxJobs int) *mirrorConnections {\n\tme := &mirrorConnections{\n\t\tmaster: m,\n\t\twantedMaxJobs: maxJobs,\n\t\tworkers: make(map[string]bool),\n\t\tmirrors: make(map[string]*mirrorConnection),\n\t\tcoordinator: coordinator,\n\t\tkeepAlive: time.Minute,\n\t}\n\tme.refreshStats()\n\treturn me\n}\n\nfunc (me *mirrorConnections) refreshStats() {\n\tme.stats = stats.NewServerStats()\n\tme.stats.PhaseOrder = []string{\"run\", \"send\", \"remote\", \"filewait\"}\n}\n\nfunc (me *mirrorConnections) periodicHouseholding() {\n\tme.refreshWorkers()\n\tme.maybeDropConnections()\n}\n\n\/\/ Must be called with lock held.\nfunc (me *mirrorConnections) availableJobs() int {\n\ta := 0\n\tfor _, mc := range me.mirrors {\n\t\tif mc.availableJobs > 0 {\n\t\t\ta += mc.availableJobs\n\t\t}\n\t}\n\treturn a\n}\n\n\/\/ Must be called with lock held.\nfunc (me *mirrorConnections) maxJobs() int {\n\ta := 0\n\tfor _, mc := range me.mirrors {\n\t\ta += mc.maxJobs\n\t}\n\treturn a\n}\n\nfunc (me *mirrorConnections) maybeDropConnections() {\n\tme.Mutex.Lock()\n\tdefer me.Mutex.Unlock()\n\n\t\/\/ Already dropped everything.\n\tif len(me.mirrors) == 0 {\n\t\treturn\n\t}\n\n\t\/\/ Something is running.\n\tif me.availableJobs() < me.maxJobs() {\n\t\treturn\n\t}\n\n\tif me.lastActionTime.Add(me.keepAlive).After(time.Now()) {\n\t\treturn\n\t}\n\n\tlog.Println(\"master inactive too long. Dropping connections.\")\n\tme.dropConnections()\n}\n\nfunc (me *mirrorConnections) dropConnections() {\n\tfor _, mc := range me.mirrors {\n\t\tmc.rpcClient.Close()\n\t\tmc.connection.Close()\n\t\tmc.reverseConnection.Close()\n\t\tme.master.attributes.RmClient(mc)\n\t}\n\tme.mirrors = make(map[string]*mirrorConnection)\n\tme.refreshStats()\n}\n\n\/\/ Gets a mirrorConnection to run on. Will block if none available\nfunc (me *mirrorConnections) find(name string) (*mirrorConnection, error) {\n\tme.Mutex.Lock()\n\tdefer me.Mutex.Unlock()\n\n\tvar found *mirrorConnection\n\tfor nm, v := range me.mirrors {\n\t\tif strings.Contains(nm, name) {\n\t\t\tfound = v\n\t\t\tbreak\n\t\t}\n\t}\n\tif found == nil {\n\t\treturn nil, fmt.Errorf(\"No worker with name: %q. Have %v\", name, me.mirrors)\n\t}\n\tfound.availableJobs--\n\treturn found, nil\n}\n\nfunc (me *mirrorConnections) pick() (*mirrorConnection, error) {\n\tme.Mutex.Lock()\n\tdefer me.Mutex.Unlock()\n\n\tif me.availableJobs() <= 0 {\n\t\tif len(me.workers) == 0 {\n\t\t\tme.workers = me.fetchWorkers()\n\t\t}\n\t\tme.tryConnect()\n\n\t\tif me.maxJobs() == 0 {\n\t\t\t\/\/ Didn't connect to anything. Should\n\t\t\t\/\/ probably direct the wrapper to compile\n\t\t\t\/\/ locally.\n\t\t\treturn nil, errors.New(\"No workers found at all.\")\n\t\t}\n\t}\n\n\tmaxAvail := -1e9\n\tvar maxAvailMirror *mirrorConnection\n\tfor _, v := range me.mirrors {\n\t\tif v.availableJobs > 0 {\n\t\t\tv.availableJobs--\n\t\t\treturn v, nil\n\t\t}\n\t\tl := float64(v.availableJobs) \/ float64(v.maxJobs)\n\t\tif l > maxAvail {\n\t\t\tmaxAvailMirror = v\n\t\t\tmaxAvail = l\n\t\t}\n\t}\n\n\tmaxAvailMirror.availableJobs--\n\treturn maxAvailMirror, nil\n}\n\nfunc (me *mirrorConnections) drop(mc *mirrorConnection, err error) {\n\tme.master.fileServer.attributes.RmClient(mc)\n\n\tme.Mutex.Lock()\n\tdefer me.Mutex.Unlock()\n\tlog.Printf(\"Dropping mirror %s. Reason: %s\", mc.workerAddr, err)\n\tmc.connection.Close()\n\tmc.reverseConnection.Close()\n\tdelete(me.mirrors, mc.workerAddr)\n\tdelete(me.workers, mc.workerAddr)\n}\n\nfunc (me *mirrorConnections) jobDone(mc *mirrorConnection) {\n\tme.Mutex.Lock()\n\tdefer me.Mutex.Unlock()\n\n\tme.lastActionTime = time.Now()\n\tmc.availableJobs++\n}\n\nfunc (me *mirrorConnections) idleWorkerAddress() string {\n\tcands := []string{}\n\tfor addr := range me.workers {\n\t\t_, ok := me.mirrors[addr]\n\t\tif ok {\n\t\t\tcontinue\n\t\t}\n\t\tcands = append(cands, addr)\n\t}\n\n\tif len(cands) == 0 {\n\t\treturn \"\"\n\t}\n\treturn cands[rand.Intn(len(cands))]\n}\n\n\/\/ Tries to connect to one extra worker. Must already hold mutex.\nfunc (me *mirrorConnections) tryConnect() {\n\t\/\/ We want to max out capacity of each worker, as that helps\n\t\/\/ with cache hit rates on the worker.\n\twanted := me.wantedMaxJobs - me.maxJobs()\n\tif wanted <= 0 {\n\t\treturn\n\t}\n\n\tfor {\n\t\taddr := me.idleWorkerAddress()\n\t\tif addr == \"\" {\n\t\t\tbreak\n\t\t}\n\t\tme.Mutex.Unlock()\n\t\tlog.Printf(\"Creating mirror on %v, requesting %d jobs\", addr, wanted)\n\t\tmc, err := me.master.createMirror(addr, wanted)\n\t\tme.Mutex.Lock()\n\t\tif err != nil {\n\t\t\tdelete(me.workers, addr)\n\t\t\tlog.Println(\"nonfatal error creating mirror:\", err)\n\t\t} else {\n\t\t\t\/\/ This could happen in the unlikely event of\n\t\t\t\/\/ the workers having more capacity than our\n\t\t\t\/\/ parallelism.\n\t\t\tif _, ok := me.mirrors[addr]; ok {\n\t\t\t\tlog.Panicf(\"already have this mirror: %v\", addr)\n\t\t\t}\n\t\t\tmc.workerAddr = addr\n\t\t\tme.mirrors[addr] = mc\n\t\t\tme.master.fileServer.attributes.AddClient(mc)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package command\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/util\"\n)\n\ntype ServerOptions struct {\n\tcpuprofile *string\n\tv VolumeServerOptions\n}\n\nvar (\n\tserverOptions ServerOptions\n\tmasterOptions MasterOptions\n\tfilerOptions FilerOptions\n\ts3Options S3Options\n\tmsgBrokerOptions MessageBrokerOptions\n)\n\nfunc init() {\n\tcmdServer.Run = runServer \/\/ break init cycle\n}\n\nvar cmdServer = &Command{\n\tUsageLine: \"server -dir=\/tmp -volume.max=5 -ip=server_name\",\n\tShort: \"start a master server, a volume server, and optionally a filer and a S3 gateway\",\n\tLong: `start both a volume server to provide storage spaces\n and a master server to provide volume=>location mapping service and sequence number of file ids\n\n This is provided as a convenient way to start both volume server and master server.\n The servers acts exactly the same as starting them separately.\n So other volume servers can connect to this master server also.\n\n Optionally, a filer server can be started.\n Also optionally, a S3 gateway can be started.\n\n `,\n}\n\nvar (\n\tserverIp = cmdServer.Flag.String(\"ip\", util.DetectedHostAddress(), \"ip or server name\")\n\tserverBindIp = cmdServer.Flag.String(\"ip.bind\", \"0.0.0.0\", \"ip address to bind to\")\n\tserverTimeout = cmdServer.Flag.Int(\"idleTimeout\", 30, \"connection idle seconds\")\n\tserverDataCenter = cmdServer.Flag.String(\"dataCenter\", \"\", \"current volume server's data center name\")\n\tserverRack = cmdServer.Flag.String(\"rack\", \"\", \"current volume server's rack name\")\n\tserverWhiteListOption = cmdServer.Flag.String(\"whiteList\", \"\", \"comma separated Ip addresses having write permission. No limit if empty.\")\n\tserverDisableHttp = cmdServer.Flag.Bool(\"disableHttp\", false, \"disable http requests, only gRPC operations are allowed.\")\n\tvolumeDataFolders = cmdServer.Flag.String(\"dir\", os.TempDir(), \"directories to store data files. dir[,dir]...\")\n\tvolumeMaxDataVolumeCounts = cmdServer.Flag.String(\"volume.max\", \"8\", \"maximum numbers of volumes, count[,count]... If set to zero, the limit will be auto configured.\")\n\tvolumeMinFreeSpacePercent = cmdServer.Flag.String(\"volume.minFreeSpacePercent\", \"1\", \"minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly.\")\n\n\t\/\/ pulseSeconds = cmdServer.Flag.Int(\"pulseSeconds\", 5, \"number of seconds between heartbeats\")\n\tisStartingFiler = cmdServer.Flag.Bool(\"filer\", false, \"whether to start filer\")\n\tisStartingS3 = cmdServer.Flag.Bool(\"s3\", false, \"whether to start S3 gateway\")\n\tisStartingMsgBroker = cmdServer.Flag.Bool(\"msgBroker\", false, \"whether to start message broker\")\n\n\tserverWhiteList []string\n\n\tFalse = false\n)\n\nfunc init() {\n\tserverOptions.cpuprofile = cmdServer.Flag.String(\"cpuprofile\", \"\", \"cpu profile output file\")\n\n\tmasterOptions.port = cmdServer.Flag.Int(\"master.port\", 9333, \"master server http listen port\")\n\tmasterOptions.metaFolder = cmdServer.Flag.String(\"master.dir\", \"\", \"data directory to store meta data, default to same as -dir specified\")\n\tmasterOptions.peers = cmdServer.Flag.String(\"master.peers\", \"\", \"all master nodes in comma separated ip:masterPort list\")\n\tmasterOptions.volumeSizeLimitMB = cmdServer.Flag.Uint(\"master.volumeSizeLimitMB\", 30*1000, \"Master stops directing writes to oversized volumes.\")\n\tmasterOptions.volumePreallocate = cmdServer.Flag.Bool(\"master.volumePreallocate\", false, \"Preallocate disk space for volumes.\")\n\tmasterOptions.defaultReplication = cmdServer.Flag.String(\"master.defaultReplication\", \"000\", \"Default replication type if not specified.\")\n\tmasterOptions.garbageThreshold = cmdServer.Flag.Float64(\"garbageThreshold\", 0.3, \"threshold to vacuum and reclaim spaces\")\n\tmasterOptions.metricsAddress = cmdServer.Flag.String(\"metrics.address\", \"\", \"Prometheus gateway address\")\n\tmasterOptions.metricsIntervalSec = cmdServer.Flag.Int(\"metrics.intervalSeconds\", 15, \"Prometheus push interval in seconds\")\n\n\tfilerOptions.collection = cmdServer.Flag.String(\"filer.collection\", \"\", \"all data will be stored in this collection\")\n\tfilerOptions.port = cmdServer.Flag.Int(\"filer.port\", 8888, \"filer server http listen port\")\n\tfilerOptions.publicPort = cmdServer.Flag.Int(\"filer.port.public\", 0, \"filer server public http listen port\")\n\tfilerOptions.defaultReplicaPlacement = cmdServer.Flag.String(\"filer.defaultReplicaPlacement\", \"\", \"Default replication type if not specified during runtime.\")\n\tfilerOptions.disableDirListing = cmdServer.Flag.Bool(\"filer.disableDirListing\", false, \"turn off directory listing\")\n\tfilerOptions.maxMB = cmdServer.Flag.Int(\"filer.maxMB\", 32, \"split files larger than the limit\")\n\tfilerOptions.dirListingLimit = cmdServer.Flag.Int(\"filer.dirListLimit\", 1000, \"limit sub dir listing size\")\n\tfilerOptions.cipher = cmdServer.Flag.Bool(\"filer.encryptVolumeData\", false, \"encrypt data on volume servers\")\n\tfilerOptions.peers = cmdServer.Flag.String(\"filer.peers\", \"\", \"all filers sharing the same filer store in comma separated ip:port list\")\n\tfilerOptions.metricsHttpPort = cmdServer.Flag.Int(\"filer.metricsPort\", 0, \"Prometheus metrics listen port\")\n\n\tserverOptions.v.port = cmdServer.Flag.Int(\"volume.port\", 8080, \"volume server http listen port\")\n\tserverOptions.v.publicPort = cmdServer.Flag.Int(\"volume.port.public\", 0, \"volume server public port\")\n\tserverOptions.v.indexType = cmdServer.Flag.String(\"volume.index\", \"memory\", \"Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.\")\n\tserverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool(\"volume.images.fix.orientation\", false, \"Adjust jpg orientation when uploading.\")\n\tserverOptions.v.readRedirect = cmdServer.Flag.Bool(\"volume.read.redirect\", true, \"Redirect moved or non-local volumes.\")\n\tserverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int(\"volume.compactionMBps\", 0, \"limit compaction speed in mega bytes per second\")\n\tserverOptions.v.fileSizeLimitMB = cmdServer.Flag.Int(\"volume.fileSizeLimitMB\", 1024, \"limit file size to avoid out of memory\")\n\tserverOptions.v.publicUrl = cmdServer.Flag.String(\"volume.publicUrl\", \"\", \"publicly accessible address\")\n\tserverOptions.v.preStopSeconds = cmdServer.Flag.Int(\"volume.preStopSeconds\", 10, \"number of seconds between stop send heartbeats and stop volume server\")\n\tserverOptions.v.pprof = cmdServer.Flag.Bool(\"volume.pprof\", false, \"enable pprof http handlers. precludes --memprofile and --cpuprofile\")\n\tserverOptions.v.metricsHttpPort = cmdServer.Flag.Int(\"volume.metricsPort\", 0, \"Prometheus metrics listen port\")\n\n\ts3Options.port = cmdServer.Flag.Int(\"s3.port\", 8333, \"s3 server http listen port\")\n\ts3Options.domainName = cmdServer.Flag.String(\"s3.domainName\", \"\", \"suffix of the host name, {bucket}.{domainName}\")\n\ts3Options.tlsPrivateKey = cmdServer.Flag.String(\"s3.key.file\", \"\", \"path to the TLS private key file\")\n\ts3Options.tlsCertificate = cmdServer.Flag.String(\"s3.cert.file\", \"\", \"path to the TLS certificate file\")\n\ts3Options.config = cmdServer.Flag.String(\"s3.config\", \"\", \"path to the config file\")\n\ts3Options.metricsHttpPort = cmdServer.Flag.Int(\"s3.metricsPort\", 0, \"Prometheus metrics listen port\")\n\n\tmsgBrokerOptions.port = cmdServer.Flag.Int(\"msgBroker.port\", 17777, \"broker gRPC listen port\")\n\n}\n\nfunc runServer(cmd *Command, args []string) bool {\n\n\tutil.LoadConfiguration(\"security\", false)\n\tutil.LoadConfiguration(\"master\", false)\n\n\tif *serverOptions.cpuprofile != \"\" {\n\t\tf, err := os.Create(*serverOptions.cpuprofile)\n\t\tif err != nil {\n\t\t\tglog.Fatal(err)\n\t\t}\n\t\tpprof.StartCPUProfile(f)\n\t\tdefer pprof.StopCPUProfile()\n\t}\n\n\tif *isStartingS3 {\n\t\t*isStartingFiler = true\n\t}\n\tif *isStartingMsgBroker {\n\t\t*isStartingFiler = true\n\t}\n\n\t_, peerList := checkPeers(*serverIp, *masterOptions.port, *masterOptions.peers)\n\tpeers := strings.Join(peerList, \",\")\n\tmasterOptions.peers = &peers\n\n\tmasterOptions.ip = serverIp\n\tmasterOptions.ipBind = serverBindIp\n\tfilerOptions.masters = &peers\n\tfilerOptions.ip = serverIp\n\tfilerOptions.bindIp = serverBindIp\n\tserverOptions.v.ip = serverIp\n\tserverOptions.v.bindIp = serverBindIp\n\tserverOptions.v.masters = &peers\n\tserverOptions.v.idleConnectionTimeout = serverTimeout\n\tserverOptions.v.dataCenter = serverDataCenter\n\tserverOptions.v.rack = serverRack\n\tmsgBrokerOptions.ip = serverIp\n\n\t\/\/ serverOptions.v.pulseSeconds = pulseSeconds\n\t\/\/ masterOptions.pulseSeconds = pulseSeconds\n\n\tmasterOptions.whiteList = serverWhiteListOption\n\n\tfilerOptions.dataCenter = serverDataCenter\n\tfilerOptions.disableHttp = serverDisableHttp\n\tmasterOptions.disableHttp = serverDisableHttp\n\n\tfilerAddress := fmt.Sprintf(\"%s:%d\", *serverIp, *filerOptions.port)\n\ts3Options.filer = &filerAddress\n\tmsgBrokerOptions.filer = &filerAddress\n\n\tif *filerOptions.defaultReplicaPlacement == \"\" {\n\t\t*filerOptions.defaultReplicaPlacement = *masterOptions.defaultReplication\n\t}\n\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tfolders := strings.Split(*volumeDataFolders, \",\")\n\n\tif *masterOptions.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 {\n\t\tglog.Fatalf(\"masterVolumeSizeLimitMB should be less than 30000\")\n\t}\n\n\tif *masterOptions.metaFolder == \"\" {\n\t\t*masterOptions.metaFolder = folders[0]\n\t}\n\tif err := util.TestFolderWritable(util.ResolvePath(*masterOptions.metaFolder)); err != nil {\n\t\tglog.Fatalf(\"Check Meta Folder (-mdir=\\\"%s\\\") Writable: %s\", *masterOptions.metaFolder, err)\n\t}\n\tfilerOptions.defaultLevelDbDirectory = masterOptions.metaFolder\n\n\tif *serverWhiteListOption != \"\" {\n\t\tserverWhiteList = strings.Split(*serverWhiteListOption, \",\")\n\t}\n\n\tif *isStartingFiler {\n\t\tgo func() {\n\t\t\ttime.Sleep(1 * time.Second)\n\n\t\t\tfilerOptions.startFiler()\n\n\t\t}()\n\t}\n\n\tif *isStartingS3 {\n\t\tgo func() {\n\t\t\ttime.Sleep(2 * time.Second)\n\n\t\t\ts3Options.startS3Server()\n\n\t\t}()\n\t}\n\n\tif *isStartingMsgBroker {\n\t\tgo func() {\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t\tmsgBrokerOptions.startQueueServer()\n\t\t}()\n\t}\n\n\t\/\/ start volume server\n\t{\n\t\tgo serverOptions.v.startVolumeServer(*volumeDataFolders, *volumeMaxDataVolumeCounts, *serverWhiteListOption, *volumeMinFreeSpacePercent)\n\n\t}\n\n\tstartMaster(masterOptions, serverWhiteList)\n\n\treturn true\n}\n<commit_msg>consolidate to one metricsPort in \"weed server\" mode<commit_after>package command\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"runtime\/pprof\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/glog\"\n\t\"github.com\/chrislusf\/seaweedfs\/weed\/util\"\n)\n\ntype ServerOptions struct {\n\tcpuprofile *string\n\tv VolumeServerOptions\n}\n\nvar (\n\tserverOptions ServerOptions\n\tmasterOptions MasterOptions\n\tfilerOptions FilerOptions\n\ts3Options S3Options\n\tmsgBrokerOptions MessageBrokerOptions\n)\n\nfunc init() {\n\tcmdServer.Run = runServer \/\/ break init cycle\n}\n\nvar cmdServer = &Command{\n\tUsageLine: \"server -dir=\/tmp -volume.max=5 -ip=server_name\",\n\tShort: \"start a master server, a volume server, and optionally a filer and a S3 gateway\",\n\tLong: `start both a volume server to provide storage spaces\n and a master server to provide volume=>location mapping service and sequence number of file ids\n\n This is provided as a convenient way to start both volume server and master server.\n The servers acts exactly the same as starting them separately.\n So other volume servers can connect to this master server also.\n\n Optionally, a filer server can be started.\n Also optionally, a S3 gateway can be started.\n\n `,\n}\n\nvar (\n\tserverIp = cmdServer.Flag.String(\"ip\", util.DetectedHostAddress(), \"ip or server name\")\n\tserverBindIp = cmdServer.Flag.String(\"ip.bind\", \"0.0.0.0\", \"ip address to bind to\")\n\tserverTimeout = cmdServer.Flag.Int(\"idleTimeout\", 30, \"connection idle seconds\")\n\tserverDataCenter = cmdServer.Flag.String(\"dataCenter\", \"\", \"current volume server's data center name\")\n\tserverRack = cmdServer.Flag.String(\"rack\", \"\", \"current volume server's rack name\")\n\tserverWhiteListOption = cmdServer.Flag.String(\"whiteList\", \"\", \"comma separated Ip addresses having write permission. No limit if empty.\")\n\tserverDisableHttp = cmdServer.Flag.Bool(\"disableHttp\", false, \"disable http requests, only gRPC operations are allowed.\")\n\tvolumeDataFolders = cmdServer.Flag.String(\"dir\", os.TempDir(), \"directories to store data files. dir[,dir]...\")\n\tvolumeMaxDataVolumeCounts = cmdServer.Flag.String(\"volume.max\", \"8\", \"maximum numbers of volumes, count[,count]... If set to zero, the limit will be auto configured.\")\n\tvolumeMinFreeSpacePercent = cmdServer.Flag.String(\"volume.minFreeSpacePercent\", \"1\", \"minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly.\")\n\tserverMetricsHttpPort = cmdServer.Flag.Int(\"metricsPort\", 0, \"Prometheus metrics listen port\")\n\n\t\/\/ pulseSeconds = cmdServer.Flag.Int(\"pulseSeconds\", 5, \"number of seconds between heartbeats\")\n\tisStartingFiler = cmdServer.Flag.Bool(\"filer\", false, \"whether to start filer\")\n\tisStartingS3 = cmdServer.Flag.Bool(\"s3\", false, \"whether to start S3 gateway\")\n\tisStartingMsgBroker = cmdServer.Flag.Bool(\"msgBroker\", false, \"whether to start message broker\")\n\n\tserverWhiteList []string\n\n\tFalse = false\n)\n\nfunc init() {\n\tserverOptions.cpuprofile = cmdServer.Flag.String(\"cpuprofile\", \"\", \"cpu profile output file\")\n\n\tmasterOptions.port = cmdServer.Flag.Int(\"master.port\", 9333, \"master server http listen port\")\n\tmasterOptions.metaFolder = cmdServer.Flag.String(\"master.dir\", \"\", \"data directory to store meta data, default to same as -dir specified\")\n\tmasterOptions.peers = cmdServer.Flag.String(\"master.peers\", \"\", \"all master nodes in comma separated ip:masterPort list\")\n\tmasterOptions.volumeSizeLimitMB = cmdServer.Flag.Uint(\"master.volumeSizeLimitMB\", 30*1000, \"Master stops directing writes to oversized volumes.\")\n\tmasterOptions.volumePreallocate = cmdServer.Flag.Bool(\"master.volumePreallocate\", false, \"Preallocate disk space for volumes.\")\n\tmasterOptions.defaultReplication = cmdServer.Flag.String(\"master.defaultReplication\", \"000\", \"Default replication type if not specified.\")\n\tmasterOptions.garbageThreshold = cmdServer.Flag.Float64(\"garbageThreshold\", 0.3, \"threshold to vacuum and reclaim spaces\")\n\tmasterOptions.metricsAddress = cmdServer.Flag.String(\"metrics.address\", \"\", \"Prometheus gateway address\")\n\tmasterOptions.metricsIntervalSec = cmdServer.Flag.Int(\"metrics.intervalSeconds\", 15, \"Prometheus push interval in seconds\")\n\n\tfilerOptions.collection = cmdServer.Flag.String(\"filer.collection\", \"\", \"all data will be stored in this collection\")\n\tfilerOptions.port = cmdServer.Flag.Int(\"filer.port\", 8888, \"filer server http listen port\")\n\tfilerOptions.publicPort = cmdServer.Flag.Int(\"filer.port.public\", 0, \"filer server public http listen port\")\n\tfilerOptions.defaultReplicaPlacement = cmdServer.Flag.String(\"filer.defaultReplicaPlacement\", \"\", \"Default replication type if not specified during runtime.\")\n\tfilerOptions.disableDirListing = cmdServer.Flag.Bool(\"filer.disableDirListing\", false, \"turn off directory listing\")\n\tfilerOptions.maxMB = cmdServer.Flag.Int(\"filer.maxMB\", 32, \"split files larger than the limit\")\n\tfilerOptions.dirListingLimit = cmdServer.Flag.Int(\"filer.dirListLimit\", 1000, \"limit sub dir listing size\")\n\tfilerOptions.cipher = cmdServer.Flag.Bool(\"filer.encryptVolumeData\", false, \"encrypt data on volume servers\")\n\tfilerOptions.peers = cmdServer.Flag.String(\"filer.peers\", \"\", \"all filers sharing the same filer store in comma separated ip:port list\")\n\n\tserverOptions.v.port = cmdServer.Flag.Int(\"volume.port\", 8080, \"volume server http listen port\")\n\tserverOptions.v.publicPort = cmdServer.Flag.Int(\"volume.port.public\", 0, \"volume server public port\")\n\tserverOptions.v.indexType = cmdServer.Flag.String(\"volume.index\", \"memory\", \"Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.\")\n\tserverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool(\"volume.images.fix.orientation\", false, \"Adjust jpg orientation when uploading.\")\n\tserverOptions.v.readRedirect = cmdServer.Flag.Bool(\"volume.read.redirect\", true, \"Redirect moved or non-local volumes.\")\n\tserverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int(\"volume.compactionMBps\", 0, \"limit compaction speed in mega bytes per second\")\n\tserverOptions.v.fileSizeLimitMB = cmdServer.Flag.Int(\"volume.fileSizeLimitMB\", 1024, \"limit file size to avoid out of memory\")\n\tserverOptions.v.publicUrl = cmdServer.Flag.String(\"volume.publicUrl\", \"\", \"publicly accessible address\")\n\tserverOptions.v.preStopSeconds = cmdServer.Flag.Int(\"volume.preStopSeconds\", 10, \"number of seconds between stop send heartbeats and stop volume server\")\n\tserverOptions.v.pprof = cmdServer.Flag.Bool(\"volume.pprof\", false, \"enable pprof http handlers. precludes --memprofile and --cpuprofile\")\n\n\ts3Options.port = cmdServer.Flag.Int(\"s3.port\", 8333, \"s3 server http listen port\")\n\ts3Options.domainName = cmdServer.Flag.String(\"s3.domainName\", \"\", \"suffix of the host name, {bucket}.{domainName}\")\n\ts3Options.tlsPrivateKey = cmdServer.Flag.String(\"s3.key.file\", \"\", \"path to the TLS private key file\")\n\ts3Options.tlsCertificate = cmdServer.Flag.String(\"s3.cert.file\", \"\", \"path to the TLS certificate file\")\n\ts3Options.config = cmdServer.Flag.String(\"s3.config\", \"\", \"path to the config file\")\n\n\tmsgBrokerOptions.port = cmdServer.Flag.Int(\"msgBroker.port\", 17777, \"broker gRPC listen port\")\n\n}\n\nfunc runServer(cmd *Command, args []string) bool {\n\n\tutil.LoadConfiguration(\"security\", false)\n\tutil.LoadConfiguration(\"master\", false)\n\n\tif *serverOptions.cpuprofile != \"\" {\n\t\tf, err := os.Create(*serverOptions.cpuprofile)\n\t\tif err != nil {\n\t\t\tglog.Fatal(err)\n\t\t}\n\t\tpprof.StartCPUProfile(f)\n\t\tdefer pprof.StopCPUProfile()\n\t}\n\n\tif *isStartingS3 {\n\t\t*isStartingFiler = true\n\t}\n\tif *isStartingMsgBroker {\n\t\t*isStartingFiler = true\n\t}\n\n\t_, peerList := checkPeers(*serverIp, *masterOptions.port, *masterOptions.peers)\n\tpeers := strings.Join(peerList, \",\")\n\tmasterOptions.peers = &peers\n\n\t\/\/ ip address\n\tmasterOptions.ip = serverIp\n\tmasterOptions.ipBind = serverBindIp\n\tfilerOptions.masters = &peers\n\tfilerOptions.ip = serverIp\n\tfilerOptions.bindIp = serverBindIp\n\tserverOptions.v.ip = serverIp\n\tserverOptions.v.bindIp = serverBindIp\n\tserverOptions.v.masters = &peers\n\tserverOptions.v.idleConnectionTimeout = serverTimeout\n\tserverOptions.v.dataCenter = serverDataCenter\n\tserverOptions.v.rack = serverRack\n\tmsgBrokerOptions.ip = serverIp\n\n\t\/\/ metrics port\n\tfilerOptions.metricsHttpPort = serverMetricsHttpPort\n\tserverOptions.v.metricsHttpPort = serverMetricsHttpPort\n\ts3Options.metricsHttpPort = serverMetricsHttpPort\n\n\t\/\/ serverOptions.v.pulseSeconds = pulseSeconds\n\t\/\/ masterOptions.pulseSeconds = pulseSeconds\n\n\tmasterOptions.whiteList = serverWhiteListOption\n\n\tfilerOptions.dataCenter = serverDataCenter\n\tfilerOptions.disableHttp = serverDisableHttp\n\tmasterOptions.disableHttp = serverDisableHttp\n\n\tfilerAddress := fmt.Sprintf(\"%s:%d\", *serverIp, *filerOptions.port)\n\ts3Options.filer = &filerAddress\n\tmsgBrokerOptions.filer = &filerAddress\n\n\tif *filerOptions.defaultReplicaPlacement == \"\" {\n\t\t*filerOptions.defaultReplicaPlacement = *masterOptions.defaultReplication\n\t}\n\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\n\tfolders := strings.Split(*volumeDataFolders, \",\")\n\n\tif *masterOptions.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 {\n\t\tglog.Fatalf(\"masterVolumeSizeLimitMB should be less than 30000\")\n\t}\n\n\tif *masterOptions.metaFolder == \"\" {\n\t\t*masterOptions.metaFolder = folders[0]\n\t}\n\tif err := util.TestFolderWritable(util.ResolvePath(*masterOptions.metaFolder)); err != nil {\n\t\tglog.Fatalf(\"Check Meta Folder (-mdir=\\\"%s\\\") Writable: %s\", *masterOptions.metaFolder, err)\n\t}\n\tfilerOptions.defaultLevelDbDirectory = masterOptions.metaFolder\n\n\tif *serverWhiteListOption != \"\" {\n\t\tserverWhiteList = strings.Split(*serverWhiteListOption, \",\")\n\t}\n\n\tif *isStartingFiler {\n\t\tgo func() {\n\t\t\ttime.Sleep(1 * time.Second)\n\n\t\t\tfilerOptions.startFiler()\n\n\t\t}()\n\t}\n\n\tif *isStartingS3 {\n\t\tgo func() {\n\t\t\ttime.Sleep(2 * time.Second)\n\n\t\t\ts3Options.startS3Server()\n\n\t\t}()\n\t}\n\n\tif *isStartingMsgBroker {\n\t\tgo func() {\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t\tmsgBrokerOptions.startQueueServer()\n\t\t}()\n\t}\n\n\t\/\/ start volume server\n\t{\n\t\tgo serverOptions.v.startVolumeServer(*volumeDataFolders, *volumeMaxDataVolumeCounts, *serverWhiteListOption, *volumeMinFreeSpacePercent)\n\n\t}\n\n\tstartMaster(masterOptions, serverWhiteList)\n\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>package util\n\nconst (\n\tVERSION = \"0.71 beta\"\n)\n<commit_msg>change to 0.71 version<commit_after>package util\n\nconst (\n\tVERSION = \"0.71\"\n)\n<|endoftext|>"} {"text":"<commit_before><commit_msg>replicant-call benchmark<commit_after><|endoftext|>"} {"text":"<commit_before>package etcd\n\nimport (\n\t\"log\"\n\t\"time\"\n\n\t\"strings\"\n\n\t\"github.com\/coreos\/etcd\/client\"\n\t\"github.com\/fsouza\/go-dockerclient\/external\/golang.org\/x\/net\/context\"\n\t\"github.com\/vsco\/dcdr\/cli\/api\/stores\"\n\t\"github.com\/vsco\/dcdr\/config\"\n)\n\ntype ETCDStore struct {\n\tkv client.KeysAPI\n\tctx context.Context\n\tgetOpts *client.GetOptions\n\tsetOpts *client.SetOptions\n\tdeleteOpts *client.DeleteOptions\n}\n\nvar DefaultEndpoints = []string{\"http:\/\/127.0.0.1:2379\"}\n\nfunc DefaultETCDlStore(cfg *config.Config) (client.KeysAPI, error) {\n\tendpoints := DefaultEndpoints\n\n\tif len(cfg.Etcd.Endpoints) > 0 {\n\t\tendpoints = cfg.Etcd.Endpoints\n\t}\n\n\tecfg := client.Config{\n\t\tEndpoints: endpoints,\n\t\tTransport: client.DefaultTransport,\n\t\tHeaderTimeoutPerRequest: time.Second,\n\t}\n\n\tc, err := client.New(ecfg)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn client.NewKeysAPI(c), nil\n}\n\nfunc New(cfg *config.Config) stores.StoreIFace {\n\tkv, _ := DefaultETCDlStore(cfg)\n\n\tes := &ETCDStore{\n\t\tkv: kv,\n\t\tctx: context.Background(),\n\t}\n\n\treturn es\n}\n\nfunc (s *ETCDStore) Get(key string) (*stores.KVByte, error) {\n\tresp, err := s.kv.Get(s.ctx, key, s.getOpts)\n\n\tif err != nil {\n\t\tswitch err.(client.Error).Code {\n\t\tcase client.ErrorCodeKeyNotFound:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn toKVByte(resp.Node), nil\n}\n\nfunc (s *ETCDStore) Set(key string, bts []byte) error {\n\t_, err := s.kv.Set(s.ctx, key, string(bts), s.setOpts)\n\n\treturn err\n}\n\nfunc (s *ETCDStore) Delete(key string) error {\n\t_, err := s.kv.Delete(s.ctx, key, s.deleteOpts)\n\n\tswitch err.(client.Error).Code {\n\tcase client.ErrorCodeKeyNotFound:\n\t\treturn nil\n\tdefault:\n\t\treturn err\n\t}\n}\n\nfunc (s *ETCDStore) List(prefix string) (stores.KVBytes, error) {\n\topts := &client.GetOptions{\n\t\tRecursive: true,\n\t\tSort: true,\n\t\tQuorum: true,\n\t}\n\n\tresp, err := s.kv.Get(s.ctx, prefix, opts)\n\n\tif err != nil {\n\t\tswitch err.(client.Error).Code {\n\t\tcase client.ErrorCodeKeyNotFound:\n\t\t\treturn nil, nil\n\t\tdefault:\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tkvbs := FlattenToKVBytes(resp.Node, make(stores.KVBytes, 0))\n\n\treturn kvbs, nil\n}\n\nfunc FlattenToKVBytes(n *client.Node, nodes stores.KVBytes) stores.KVBytes {\n\tif n.Dir {\n\t\tfor _, nd := range n.Nodes {\n\t\t\tnodes = FlattenToKVBytes(nd, nodes)\n\t\t}\n\t} else {\n\t\tnodes = append(nodes, toKVByte(n))\n\t}\n\n\treturn nodes\n}\n\nfunc toKVByte(n *client.Node) *stores.KVByte {\n\treturn &stores.KVByte{\n\t\t\/\/ remove leading slash as it adds an empty\n\t\t\/\/ hash entry when exploded to JSON.\n\t\tKey: strings.TrimPrefix(n.Key, \"\/\"),\n\t\tBytes: []byte(n.Value),\n\t}\n}\n<commit_msg>Handle etcd cluster errors gracefully<commit_after>package etcd\n\nimport (\n\t\"log\"\n\t\"time\"\n\n\t\"strings\"\n\n\t\"github.com\/coreos\/etcd\/client\"\n\t\"github.com\/fsouza\/go-dockerclient\/external\/golang.org\/x\/net\/context\"\n\t\"github.com\/vsco\/dcdr\/cli\/api\/stores\"\n\t\"github.com\/vsco\/dcdr\/config\"\n)\n\ntype ETCDStore struct {\n\tkv client.KeysAPI\n\tctx context.Context\n\tgetOpts *client.GetOptions\n\tsetOpts *client.SetOptions\n\tdeleteOpts *client.DeleteOptions\n}\n\nvar DefaultEndpoints = []string{\"http:\/\/127.0.0.1:2379\"}\n\nfunc DefaultETCDlStore(cfg *config.Config) (client.KeysAPI, error) {\n\tendpoints := DefaultEndpoints\n\n\tif len(cfg.Etcd.Endpoints) > 0 {\n\t\tendpoints = cfg.Etcd.Endpoints\n\t}\n\n\tecfg := client.Config{\n\t\tEndpoints: endpoints,\n\t\tTransport: client.DefaultTransport,\n\t\tHeaderTimeoutPerRequest: time.Second,\n\t}\n\n\tc, err := client.New(ecfg)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treturn client.NewKeysAPI(c), nil\n}\n\nfunc New(cfg *config.Config) stores.StoreIFace {\n\tkv, _ := DefaultETCDlStore(cfg)\n\n\tes := &ETCDStore{\n\t\tkv: kv,\n\t\tctx: context.Background(),\n\t}\n\n\treturn es\n}\n\nfunc (s *ETCDStore) Get(key string) (*stores.KVByte, error) {\n\tresp, err := s.kv.Get(s.ctx, key, s.getOpts)\n\n\tif err != nil {\n\t\treturn nil, etcdError(err)\n\t}\n\n\treturn toKVByte(resp.Node), nil\n}\n\nfunc (s *ETCDStore) Set(key string, bts []byte) error {\n\t_, err := s.kv.Set(s.ctx, key, string(bts), s.setOpts)\n\n\treturn err\n}\n\nfunc (s *ETCDStore) Delete(key string) error {\n\t_, err := s.kv.Delete(s.ctx, key, s.deleteOpts)\n\n\treturn etcdError(err)\n}\n\nfunc (s *ETCDStore) List(prefix string) (stores.KVBytes, error) {\n\topts := &client.GetOptions{\n\t\tRecursive: true,\n\t\tSort: true,\n\t\tQuorum: true,\n\t}\n\n\tresp, err := s.kv.Get(s.ctx, prefix, opts)\n\n\tif err != nil {\n\t\treturn nil, etcdError(err)\n\t}\n\n\tkvbs := FlattenToKVBytes(resp.Node, make(stores.KVBytes, 0))\n\n\treturn kvbs, nil\n}\n\nfunc FlattenToKVBytes(n *client.Node, nodes stores.KVBytes) stores.KVBytes {\n\tif n.Dir {\n\t\tfor _, nd := range n.Nodes {\n\t\t\tnodes = FlattenToKVBytes(nd, nodes)\n\t\t}\n\t} else {\n\t\tnodes = append(nodes, toKVByte(n))\n\t}\n\n\treturn nodes\n}\n\nfunc toKVByte(n *client.Node) *stores.KVByte {\n\treturn &stores.KVByte{\n\t\t\/\/ remove leading slash as it adds an empty\n\t\t\/\/ hash entry when exploded to JSON.\n\t\tKey: strings.TrimPrefix(n.Key, \"\/\"),\n\t\tBytes: []byte(n.Value),\n\t}\n}\n\nfunc etcdError(err error) error {\n\tswitch err.(type) {\n\tcase client.Error:\n\t\tif err.(client.Error).Code == client.ErrorCodeKeyNotFound {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn err\n\tdefault:\n\t\treturn err\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"os\"\n\t\"sync\"\n\n\t\"git.mayflower.de\/vaillant-team\/docker-ls\/cli\/docker-ls\/response\"\n\t\"git.mayflower.de\/vaillant-team\/docker-ls\/lib\"\n)\n\ntype repositoriesCmd struct {\n\tflags *flag.FlagSet\n\tcfg *Config\n}\n\nfunc (r *repositoriesCmd) execute(argv []string) (err error) {\n\tlibCfg := lib.NewConfig()\n\tlibCfg.BindToFlags(r.flags)\n\n\tr.cfg = newConfig()\n\tr.cfg.bindToFlags(r.flags, OPTIONS_FULL)\n\n\terr = r.flags.Parse(argv)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(r.flags.Args()) != 0 {\n\t\tr.flags.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tregistryApi := lib.NewRegistryApi(libCfg)\n\tvar resp sortable\n\n\tswitch {\n\tcase r.cfg.recursionLevel >= 1:\n\t\tresp, err = r.listLevel1(registryApi)\n\n\tcase r.cfg.recursionLevel == 0:\n\t\tresp, err = r.listLevel0(registryApi)\n\t}\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresp.Sort()\n\terr = serializeToStdout(resp, r.cfg)\n\n\tif r.cfg.statistics {\n\t\tdumpStatistics(registryApi.GetStatistics())\n\t}\n\n\treturn\n}\n\nfunc (r *repositoriesCmd) listLevel0(api lib.RegistryApi) (resp *response.RepositoriesL0, err error) {\n\tprogress := NewProgressIndicator(r.cfg)\n\tprogress.Start(\"requesting list\")\n\n\tresult := api.ListRepositories()\n\tresp = response.NewRepositoriesL0()\n\n\tprogress.Progress()\n\n\tfor repository := range result.Repositories() {\n\t\tresp.AddRepository(repository)\n\t}\n\n\terr = result.LastError()\n\n\tprogress.Finish(\"done\")\n\treturn\n}\n\nfunc (r *repositoriesCmd) listLevel1(api lib.RegistryApi) (resp *response.RepositoriesL1, err error) {\n\tprogress := NewProgressIndicator(r.cfg)\n\tprogress.Start(\"requesting list\")\n\n\trepositoriesResult := api.ListRepositories()\n\tresp = response.NewRepositoriesL1()\n\tprogress.Progress()\n\n\terrors := make(chan error)\n\n\tgo func() {\n\t\tvar wait sync.WaitGroup\n\n\t\tfor repository := range repositoriesResult.Repositories() {\n\t\t\twait.Add(1)\n\n\t\t\tgo func(repository lib.Repository) {\n\t\t\t\ttagsResult := api.ListTags(repository.Name())\n\t\t\t\tprogress.Progress()\n\t\t\t\ttagsL0 := response.NewTagsL0(repository.Name())\n\n\t\t\t\tfor tag := range tagsResult.Tags() {\n\t\t\t\t\ttagsL0.AddTag(tag)\n\t\t\t\t}\n\n\t\t\t\tresp.AddTags(tagsL0)\n\n\t\t\t\tif err := tagsResult.LastError(); err != nil {\n\t\t\t\t\terrors <- err\n\t\t\t\t}\n\n\t\t\t\twait.Done()\n\t\t\t}(repository)\n\t\t}\n\n\t\tif err := repositoriesResult.LastError(); err != nil {\n\t\t\terrors <- err\n\t\t}\n\n\t\twait.Wait()\n\n\t\tclose(errors)\n\t}()\n\n\tfor nextError := range errors {\n\t\tif err == nil {\n\t\t\terr = nextError\n\t\t}\n\t}\n\n\tprogress.Finish(\"done\")\n\treturn\n}\n\nfunc newRepositoriesCmd(name string) (cmd *repositoriesCmd) {\n\tcmd = &repositoriesCmd{\n\t\tflags: flag.NewFlagSet(name, flag.ExitOnError),\n\t}\n\n\tcmd.flags.Usage = commandUsage(name, \"\", \"List all repositories.\", cmd.flags)\n\n\treturn\n}\n<commit_msg>More accurate progress reporting für repository listings.<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"os\"\n\t\"sync\"\n\n\t\"git.mayflower.de\/vaillant-team\/docker-ls\/cli\/docker-ls\/response\"\n\t\"git.mayflower.de\/vaillant-team\/docker-ls\/lib\"\n)\n\ntype repositoriesCmd struct {\n\tflags *flag.FlagSet\n\tcfg *Config\n}\n\nfunc (r *repositoriesCmd) execute(argv []string) (err error) {\n\tlibCfg := lib.NewConfig()\n\tlibCfg.BindToFlags(r.flags)\n\n\tr.cfg = newConfig()\n\tr.cfg.bindToFlags(r.flags, OPTIONS_FULL)\n\n\terr = r.flags.Parse(argv)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif len(r.flags.Args()) != 0 {\n\t\tr.flags.Usage()\n\t\tos.Exit(1)\n\t}\n\n\tregistryApi := lib.NewRegistryApi(libCfg)\n\tvar resp sortable\n\n\tswitch {\n\tcase r.cfg.recursionLevel >= 1:\n\t\tresp, err = r.listLevel1(registryApi)\n\n\tcase r.cfg.recursionLevel == 0:\n\t\tresp, err = r.listLevel0(registryApi)\n\t}\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tresp.Sort()\n\terr = serializeToStdout(resp, r.cfg)\n\n\tif r.cfg.statistics {\n\t\tdumpStatistics(registryApi.GetStatistics())\n\t}\n\n\treturn\n}\n\nfunc (r *repositoriesCmd) listLevel0(api lib.RegistryApi) (resp *response.RepositoriesL0, err error) {\n\tprogress := NewProgressIndicator(r.cfg)\n\tprogress.Start(\"requesting list\")\n\n\tresult := api.ListRepositories()\n\tresp = response.NewRepositoriesL0()\n\n\tprogress.Progress()\n\n\tfor repository := range result.Repositories() {\n\t\tresp.AddRepository(repository)\n\t}\n\n\terr = result.LastError()\n\n\tprogress.Finish(\"done\")\n\treturn\n}\n\nfunc (r *repositoriesCmd) listLevel1(api lib.RegistryApi) (resp *response.RepositoriesL1, err error) {\n\tprogress := NewProgressIndicator(r.cfg)\n\tprogress.Start(\"requesting list\")\n\n\trepositoriesResult := api.ListRepositories()\n\tresp = response.NewRepositoriesL1()\n\tprogress.Progress()\n\n\terrors := make(chan error)\n\n\tgo func() {\n\t\tvar wait sync.WaitGroup\n\n\t\tfor repository := range repositoriesResult.Repositories() {\n\t\t\twait.Add(1)\n\n\t\t\tgo func(repository lib.Repository) {\n\t\t\t\ttagsResult := api.ListTags(repository.Name())\n\t\t\t\ttagsL0 := response.NewTagsL0(repository.Name())\n\n\t\t\t\tfor tag := range tagsResult.Tags() {\n\t\t\t\t\ttagsL0.AddTag(tag)\n\t\t\t\t}\n\n\t\t\t\tprogress.Progress()\n\t\t\t\tresp.AddTags(tagsL0)\n\n\t\t\t\tif err := tagsResult.LastError(); err != nil {\n\t\t\t\t\terrors <- err\n\t\t\t\t}\n\n\t\t\t\twait.Done()\n\t\t\t}(repository)\n\t\t}\n\n\t\tif err := repositoriesResult.LastError(); err != nil {\n\t\t\terrors <- err\n\t\t}\n\n\t\twait.Wait()\n\n\t\tclose(errors)\n\t}()\n\n\tfor nextError := range errors {\n\t\tif err == nil {\n\t\t\terr = nextError\n\t\t}\n\t}\n\n\tprogress.Finish(\"done\")\n\treturn\n}\n\nfunc newRepositoriesCmd(name string) (cmd *repositoriesCmd) {\n\tcmd = &repositoriesCmd{\n\t\tflags: flag.NewFlagSet(name, flag.ExitOnError),\n\t}\n\n\tcmd.flags.Usage = commandUsage(name, \"\", \"List all repositories.\", cmd.flags)\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package tlcore\n\nimport (\n\t\"os\"\n\t\"testing\"\n)\nimport \"bytes\"\n\n\/*\nThis file contains all core testing functions.\n\n*\/\n\nconst numChunks = 8\n\n\/\/Test a simple put & get\nfunc TestSimple(t *testing.T) {\n\tm := NewMap(\"testdb\/\", numChunks)\n\tdefer os.RemoveAll(\"testdb\/\")\n\tdefer m.Close()\n\n\tm.Set([]byte(\"k1\"), []byte(\"v1\"))\n\trval, _ := m.Get([]byte(\"k1\"))\n\tif !bytes.Equal([]byte(\"v1\"), rval) {\n\t\tt.Fatal(\"Error: value mismatch\")\n\t}\n}\n\n\/\/Test a simple put & get (after a map close())\nfunc TestSimpleRestore(t *testing.T) {\n\tdefer os.RemoveAll(\"testdb\/\")\n\t{\n\t\tm := NewMap(\"testdb\/\", numChunks)\n\t\tm.Set([]byte(\"k1\"), []byte(\"v1\"))\n\t\tm.Close()\n\t}\n\t{\n\t\tm := OpenMap(\"testdb\/\")\n\t\trval, _ := m.Get([]byte(\"k1\"))\n\t\tif !bytes.Equal([]byte(\"v1\"), rval) {\n\t\t\tt.Fatal(\"Error: value mismatch\")\n\t\t}\n\t\tm.Close()\n\t}\n}\n\n\/*\n\/\/Test lots of simple put & get\nfunc TestSimpleLots(t *testing.T) {\n\tdefer os.RemoveAll(\"testdb\/\")\n\tdefer os.Chdir(\"..\/\")\n\tk := make([]byte, 4)\n\tv := make([]byte, 4)\n\t{\n\t\tdb := Create(\"testdb\")\n\t\tm1, _ := db.AllocMap(\"mapa1\")\n\n\t\tfor i := 0; i < 128*1024; i++ {\n\t\t\tbinary.LittleEndian.PutUint32(k, uint32(i))\n\t\t\tbinary.LittleEndian.PutUint32(v, uint32(i))\n\t\t\tm1.Put(k, v)\n\t\t}\n\n\t\tfor i := 0; i < 128*1024; i++ {\n\t\t\tbinary.LittleEndian.PutUint32(k, uint32(i))\n\t\t\tbinary.LittleEndian.PutUint32(v, uint32(i))\n\t\t\trval, _ := m1.Get(k)\n\t\t\tif !bytes.Equal(v, rval) {\n\t\t\t\tt.Fatal(\"Err 1: mismatch\")\n\t\t\t}\n\t\t}\n\n\t\tdb.Close()\n\t}\n\t{\n\t\tdb := Open(\"testdb\")\n\t\tm1 := db.mapsByName[\"mapa1\"]\n\t\tfor i := 0; i < 128*1024; i++ {\n\t\t\tbinary.LittleEndian.PutUint32(k, uint32(i))\n\t\t\tbinary.LittleEndian.PutUint32(v, uint32(i))\n\t\t\trval, _ := m1.Get(k)\n\t\t\tif !bytes.Equal(v, rval) {\n\t\t\t\tt.Fatal(\"Err 2: mismatch\")\n\t\t\t}\n\t\t}\n\t\tdb.Close()\n\t}\n}\n\n\n\n\/\/Test lots of simple put & get, in a parallel way, test multi thread safety\nfunc TestParSimpleLots(t *testing.T) {\n\tdefer os.RemoveAll(\"testdb\/\")\n\tdefer os.Chdir(\"..\/\")\n\tk := make([]byte, 4)\n\tv := make([]byte, 4)\n\t{\n\t\tdb := Create(\"testdb\")\n\t\tm1, _ := db.AllocMap(\"mapa1\")\n\n\t\tfor tid := 0; tid < 27; tid++ {\n\t\t\tfor i := 0; i < 1024; i++ {\n\t\t\t\tbinary.LittleEndian.PutUint32(k, uint32(i*27+tid))\n\t\t\t\tbinary.LittleEndian.PutUint32(v, uint32(i*27+tid))\n\t\t\t\tm1.Put(k, v)\n\t\t\t}\n\t\t}\n\n\t\tfor i := 0; i < 27*1024; i++ {\n\t\t\tbinary.LittleEndian.PutUint32(k, uint32(i))\n\t\t\tbinary.LittleEndian.PutUint32(v, uint32(i))\n\t\t\trval, _ := m1.Get(k)\n\t\t\tif !bytes.Equal(v, rval) {\n\t\t\t\tt.Fatal(\"Err 1: mismatch\")\n\t\t\t}\n\t\t}\n\n\t\tdb.Close()\n\t}\n\t{\n\t\tdb := Open(\"testdb\")\n\t\tm1 := db.mapsByName[\"mapa1\"]\n\t\tfor i := 0; i < 27*1024; i++ {\n\t\t\tbinary.LittleEndian.PutUint32(k, uint32(i))\n\t\t\tbinary.LittleEndian.PutUint32(v, uint32(i))\n\t\t\trval, _ := m1.Get(k)\n\t\t\tif !bytes.Equal(v, rval) {\n\t\t\t\tt.Fatal(\"Err 2: mismatch\")\n\t\t\t}\n\t\t}\n\t\tdb.Close()\n\t}\n}\n\n\/\/Test simple set\n\/\/Test simple del\n\n\/\/Test complex get, put, set, del mix\n\n\/\/Common functions test\nfunc TestCmplx1(t *testing.T) {\n\tcoreTest(100000, 11, 129, 64)\n}\n\n\/\/Low key size: test delete operation\nfunc TestCmplx2(t *testing.T) {\n\tcoreTest(100000, 2, 129, 64)\n}\n\n\/\/Large key size\nfunc TestCmplx3(t *testing.T) {\n\tcoreTest(10000, 130, 129, 64)\n}\n\n\/\/Large value size\nfunc TestCmplx4(t *testing.T) {\n\tcoreTest(10000, 11, 555, 64)\n}\n\n\/\/Test low value size\nfunc TestCmplx5(t *testing.T) {\n\tcoreTest(10000, 11, 2, 64)\n}\n\nfunc operate() {\n\n}\nfunc goOperate() {\n\n}\nfunc dbOperate() {\n\n}\nfunc checkDB() {\n\n}\n\nfunc coreTest(numOperations, maxKeySize, maxValueSize, threads int) {\n\t\/\/Operate on built-in map\n\n\tgoMap := make(map[string][]byte)\n\tvar goDeletes []([]byte)\n\tfor core := 0; core < threads; core++ {\n\t\tr := rand.New(rand.NewSource(int64(core)))\n\t\tbase := make([]byte, 4)\n\t\tbase2 := make([]byte, 4)\n\t\tfor i := 0; i < numOperations; i++ {\n\t\t\topType := 1 + r.Intn(3)\n\t\t\topKeySize := r.Intn(maxKeySize-1) + 1\n\t\t\topValueSize := r.Intn(maxValueSize-1) + 1\n\t\t\tbinary.LittleEndian.PutUint32(base, uint32(r.Int31()*64)+uint32(core))\n\t\t\tbinary.LittleEndian.PutUint32(base2, uint32(i*64+core))\n\t\t\tkey := bytes.Repeat([]byte(base), opKeySize)\n\t\t\tvalue := bytes.Repeat([]byte(base), opValueSize)\n\t\t\t\/\/fmt.Println(\"gomap\", opType, key, value)\n\t\t\tswitch opType {\n\t\t\tcase OpPut:\n\t\t\t\tif _, ok := goMap[string(key)]; !ok {\n\t\t\t\t\tgoMap[string(key)] = value\n\t\t\t\t}\n\t\t\tcase OpDel:\n\t\t\t\tif _, ok := goMap[string(key)]; ok {\n\t\t\t\t\tdelete(goMap, string(key))\n\t\t\t\t\tgoDeletes = append(goDeletes, key)\n\t\t\t\t}\n\t\t\tcase OpSet:\n\t\t\t\tif _, ok := goMap[string(key)]; ok {\n\t\t\t\t\tgoMap[string(key)] = value\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\/\/Operate on DB\n\tdb := Create(\"testdb2\")\n\tdefer os.RemoveAll(\"testdb2\/\")\n\tdefer os.Chdir(\"..\/\")\n\n\tm1, _ := db.AllocMap(\"mapa1\")\n\n\tvar w sync.WaitGroup\n\tw.Add(threads)\n\tfor core := 0; core < threads; core++ {\n\t\tgo func(core int) {\n\t\t\tr := rand.New(rand.NewSource(int64(core)))\n\t\t\tbase := make([]byte, 4)\n\t\t\tbase2 := make([]byte, 4)\n\t\t\tfor i := 0; i < numOperations; i++ {\n\t\t\t\topType := 1 + r.Intn(3)\n\t\t\t\topKeySize := r.Intn(maxKeySize-1) + 1\n\t\t\t\topValueSize := r.Intn(maxValueSize-1) + 1\n\t\t\t\tbinary.LittleEndian.PutUint32(base, uint32(r.Int31()*64)+uint32(core))\n\t\t\t\tbinary.LittleEndian.PutUint32(base2, uint32(i*64+core))\n\t\t\t\tkey := bytes.Repeat([]byte(base), opKeySize)[0:opKeySize]\n\t\t\t\tvalue := bytes.Repeat([]byte(base2), opValueSize)[0:opValueSize]\n\t\t\t\t\/\/fmt.Println(\"db \", opType, key, value)\n\t\t\t\tswitch opType {\n\t\t\t\tcase OpPut:\n\t\t\t\t\tm1.Put(key, value)\n\t\t\t\tcase OpDel:\n\t\t\t\t\tm1.Delete(key)\n\t\t\t\tcase OpSet:\n\t\t\t\t\tm1.Set(key, value)\n\t\t\t\t}\n\t\t\t}\n\t\t\tw.Done()\n\t\t}(core)\n\t}\n\tw.Wait()\n\t\/\/Check map is in DB\n\tfor key, value := range goMap {\n\t\trval, err := m1.Get([]byte(key))\n\t\t\/\/fmt.Println([]byte(key), value)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif !bytes.Equal(rval, value) {\n\t\t\tpanic(1)\n\t\t}\n\t}\n\n\t\/\/Check deleteds aren't in DB\n\tfmt.Println(\"Tested deletes:\", len(goDeletes))\n\tfor i := 0; i < len(goDeletes); i++ {\n\t\tkey := goDeletes[i]\n\t\t_, err := m1.Get([]byte(key))\n\t\tif err == nil {\n\t\t\tpanic(2)\n\t\t}\n\t}\n\t\/\/Close DB\n\tdb.Close()\n\n\t{\n\t\t\/\/Restore DB\n\t\tdb := Open(\"testdb2\")\n\t\tm1 := db.mapsByName[\"mapa1\"]\n\t\t\/\/Check again\n\t\t\/\/Check map is in DB\n\t\tfor key, value := range goMap {\n\t\t\trval, err := m1.Get([]byte(key))\n\t\t\t\/\/fmt.Println([]byte(key), value)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tif !bytes.Equal(rval, value) {\n\t\t\t\tpanic(1)\n\t\t\t}\n\t\t}\n\n\t\t\/\/Check deleteds aren't in DB\n\t\tfmt.Println(\"Tested deletes:\", len(goDeletes))\n\t\tfor i := 0; i < len(goDeletes); i++ {\n\t\t\tkey := goDeletes[i]\n\t\t\t_, err := m1.Get([]byte(key))\n\t\t\tif err == nil {\n\t\t\t\tpanic(2)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/Test parallel complex get, put, set, del mix\n\n\/\/Tests operational limits: size\n\n\/\/Test sync\/nosync file\n\n\/\/Bench with diferents sizes\n\n\/\/Bench lots of gets\nfunc BenchmarkGet(b *testing.B) {\n\tdefer os.RemoveAll(\"benchdb\/\")\n\tdefer os.Chdir(\"..\/\")\n\tif testing.Verbose() {\n\t\tfmt.Println(\"\\tInserting\", b.N, \"keys...\")\n\t}\n\tdb := Create(\"benchdb\/\")\n\tm, _ := db.AllocMap(\"mapA\")\n\tkey := make([]byte, 4)\n\tlenValue := 100\n\tvalue := bytes.Repeat([]byte(\"X\"), lenValue)\n\tfor i := 0; i < b.N\/32+1; i++ {\n\t\tbinary.LittleEndian.PutUint32(key, uint32(3*i))\n\t\tbinary.LittleEndian.PutUint32(value, uint32(3*i))\n\t\terr := m.Put(key, value)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tgid := uint64(0)\n\tfmt.Println(\"get...\")\n\tb.ResetTimer()\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tsum := 0\n\t\tkey := make([]byte, 4)\n\t\tid := uint32(atomic.AddUint64(&gid, 1))\n\t\tfmt.Println(id)\n\t\tr := rand.New(rand.NewSource(int64(id)))\n\t\tfor i := 0; pb.Next(); i++ {\n\t\t\tbinary.LittleEndian.PutUint32(key, uint32(3*r.Intn(b.N\/32+1)))\n\t\t\tv, err := m.Get(key)\n\t\t\tsum += int(v[len(v)-1])\n\t\t\tif err != nil || !bytes.Equal(v, v) {\n\t\t\t\tb.Fatal(\"Key not present\", key, id, i, v, sum)\n\t\t\t}\n\t\t}\n\t})\n\tb.StopTimer()\n\tdb.Close()\n}\n\n\/\/Bench lots of gets, in a parallel way\n\n\/\/Bench lots of puts\n\n\/\/Bench lots of puts, in a parallel way\n*\/\n<commit_msg>core - TestSimpleLots recuperado<commit_after>package tlcore\n\nimport (\n\t\"encoding\/binary\"\n\t\"os\"\n\t\"testing\"\n)\nimport \"bytes\"\n\n\/*\nThis file contains all core testing functions.\n\n*\/\n\nconst numChunks = 8\n\n\/\/Test a simple put & get\nfunc TestSimple(t *testing.T) {\n\tm := NewMap(\"testdb\/\", numChunks)\n\tdefer os.RemoveAll(\"testdb\/\")\n\tdefer m.Close()\n\n\tm.Set([]byte(\"k1\"), []byte(\"v1\"))\n\trval, _ := m.Get([]byte(\"k1\"))\n\tif !bytes.Equal([]byte(\"v1\"), rval) {\n\t\tt.Fatal(\"Error: value mismatch\")\n\t}\n}\n\n\/\/Test a simple put & get (after a map close())\nfunc TestSimpleRestore(t *testing.T) {\n\tdefer os.RemoveAll(\"testdb\/\")\n\t{\n\t\tm := NewMap(\"testdb\/\", numChunks)\n\t\tm.Set([]byte(\"k1\"), []byte(\"v1\"))\n\t\tm.Close()\n\t}\n\t{\n\t\tm := OpenMap(\"testdb\/\")\n\t\trval, _ := m.Get([]byte(\"k1\"))\n\t\tif !bytes.Equal([]byte(\"v1\"), rval) {\n\t\t\tt.Fatal(\"Error: value mismatch\")\n\t\t}\n\t\tm.Close()\n\t}\n}\n\n\/\/Test lots of simple put & get before and after closing the map\nfunc TestSimpleLots(t *testing.T) {\n\tdefer os.RemoveAll(\"testdb\/\")\n\tk := make([]byte, 4)\n\tv := make([]byte, 4)\n\t{\n\t\tm1 := NewMap(\"testdb\/\", numChunks)\n\t\tfor i := 0; i < 128*1024; i++ {\n\t\t\tbinary.LittleEndian.PutUint32(k, uint32(i))\n\t\t\tbinary.LittleEndian.PutUint32(v, uint32(i))\n\t\t\tm1.Set(k, v)\n\t\t}\n\t\tfor i := 0; i < 128*1024; i++ {\n\t\t\tbinary.LittleEndian.PutUint32(k, uint32(i))\n\t\t\tbinary.LittleEndian.PutUint32(v, uint32(i))\n\t\t\trval, _ := m1.Get(k)\n\t\t\tif !bytes.Equal(v, rval) {\n\t\t\t\tt.Fatal(\"Error 1: value mismatch\")\n\t\t\t}\n\t\t}\n\t\tm1.Close()\n\t}\n\t{\n\t\tm2 := OpenMap(\"testdb\/\")\n\t\tfor i := 0; i < 128*1024; i++ {\n\t\t\tbinary.LittleEndian.PutUint32(k, uint32(i))\n\t\t\tbinary.LittleEndian.PutUint32(v, uint32(i))\n\t\t\trval, _ := m2.Get(k)\n\t\t\tif !bytes.Equal(v, rval) {\n\t\t\t\tt.Fatal(\"Error 2: value mismatch\")\n\t\t\t}\n\t\t}\n\t\tm2.Close()\n\t}\n}\n\n\/*\n\/\/Test lots of simple put & get, in a parallel way, test multi thread safety\nfunc TestParSimpleLots(t *testing.T) {\n\tdefer os.RemoveAll(\"testdb\/\")\n\tdefer os.Chdir(\"..\/\")\n\tk := make([]byte, 4)\n\tv := make([]byte, 4)\n\t{\n\t\tdb := Create(\"testdb\")\n\t\tm1, _ := db.AllocMap(\"mapa1\")\n\n\t\tfor tid := 0; tid < 27; tid++ {\n\t\t\tfor i := 0; i < 1024; i++ {\n\t\t\t\tbinary.LittleEndian.PutUint32(k, uint32(i*27+tid))\n\t\t\t\tbinary.LittleEndian.PutUint32(v, uint32(i*27+tid))\n\t\t\t\tm1.Put(k, v)\n\t\t\t}\n\t\t}\n\n\t\tfor i := 0; i < 27*1024; i++ {\n\t\t\tbinary.LittleEndian.PutUint32(k, uint32(i))\n\t\t\tbinary.LittleEndian.PutUint32(v, uint32(i))\n\t\t\trval, _ := m1.Get(k)\n\t\t\tif !bytes.Equal(v, rval) {\n\t\t\t\tt.Fatal(\"Err 1: mismatch\")\n\t\t\t}\n\t\t}\n\n\t\tdb.Close()\n\t}\n\t{\n\t\tdb := Open(\"testdb\")\n\t\tm1 := db.mapsByName[\"mapa1\"]\n\t\tfor i := 0; i < 27*1024; i++ {\n\t\t\tbinary.LittleEndian.PutUint32(k, uint32(i))\n\t\t\tbinary.LittleEndian.PutUint32(v, uint32(i))\n\t\t\trval, _ := m1.Get(k)\n\t\t\tif !bytes.Equal(v, rval) {\n\t\t\t\tt.Fatal(\"Err 2: mismatch\")\n\t\t\t}\n\t\t}\n\t\tdb.Close()\n\t}\n}\n\n\/\/Test simple set\n\/\/Test simple del\n\n\/\/Test complex get, put, set, del mix\n\n\/\/Common functions test\nfunc TestCmplx1(t *testing.T) {\n\tcoreTest(100000, 11, 129, 64)\n}\n\n\/\/Low key size: test delete operation\nfunc TestCmplx2(t *testing.T) {\n\tcoreTest(100000, 2, 129, 64)\n}\n\n\/\/Large key size\nfunc TestCmplx3(t *testing.T) {\n\tcoreTest(10000, 130, 129, 64)\n}\n\n\/\/Large value size\nfunc TestCmplx4(t *testing.T) {\n\tcoreTest(10000, 11, 555, 64)\n}\n\n\/\/Test low value size\nfunc TestCmplx5(t *testing.T) {\n\tcoreTest(10000, 11, 2, 64)\n}\n\nfunc operate() {\n\n}\nfunc goOperate() {\n\n}\nfunc dbOperate() {\n\n}\nfunc checkDB() {\n\n}\n\nfunc coreTest(numOperations, maxKeySize, maxValueSize, threads int) {\n\t\/\/Operate on built-in map\n\n\tgoMap := make(map[string][]byte)\n\tvar goDeletes []([]byte)\n\tfor core := 0; core < threads; core++ {\n\t\tr := rand.New(rand.NewSource(int64(core)))\n\t\tbase := make([]byte, 4)\n\t\tbase2 := make([]byte, 4)\n\t\tfor i := 0; i < numOperations; i++ {\n\t\t\topType := 1 + r.Intn(3)\n\t\t\topKeySize := r.Intn(maxKeySize-1) + 1\n\t\t\topValueSize := r.Intn(maxValueSize-1) + 1\n\t\t\tbinary.LittleEndian.PutUint32(base, uint32(r.Int31()*64)+uint32(core))\n\t\t\tbinary.LittleEndian.PutUint32(base2, uint32(i*64+core))\n\t\t\tkey := bytes.Repeat([]byte(base), opKeySize)\n\t\t\tvalue := bytes.Repeat([]byte(base), opValueSize)\n\t\t\t\/\/fmt.Println(\"gomap\", opType, key, value)\n\t\t\tswitch opType {\n\t\t\tcase OpPut:\n\t\t\t\tif _, ok := goMap[string(key)]; !ok {\n\t\t\t\t\tgoMap[string(key)] = value\n\t\t\t\t}\n\t\t\tcase OpDel:\n\t\t\t\tif _, ok := goMap[string(key)]; ok {\n\t\t\t\t\tdelete(goMap, string(key))\n\t\t\t\t\tgoDeletes = append(goDeletes, key)\n\t\t\t\t}\n\t\t\tcase OpSet:\n\t\t\t\tif _, ok := goMap[string(key)]; ok {\n\t\t\t\t\tgoMap[string(key)] = value\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\/\/Operate on DB\n\tdb := Create(\"testdb2\")\n\tdefer os.RemoveAll(\"testdb2\/\")\n\tdefer os.Chdir(\"..\/\")\n\n\tm1, _ := db.AllocMap(\"mapa1\")\n\n\tvar w sync.WaitGroup\n\tw.Add(threads)\n\tfor core := 0; core < threads; core++ {\n\t\tgo func(core int) {\n\t\t\tr := rand.New(rand.NewSource(int64(core)))\n\t\t\tbase := make([]byte, 4)\n\t\t\tbase2 := make([]byte, 4)\n\t\t\tfor i := 0; i < numOperations; i++ {\n\t\t\t\topType := 1 + r.Intn(3)\n\t\t\t\topKeySize := r.Intn(maxKeySize-1) + 1\n\t\t\t\topValueSize := r.Intn(maxValueSize-1) + 1\n\t\t\t\tbinary.LittleEndian.PutUint32(base, uint32(r.Int31()*64)+uint32(core))\n\t\t\t\tbinary.LittleEndian.PutUint32(base2, uint32(i*64+core))\n\t\t\t\tkey := bytes.Repeat([]byte(base), opKeySize)[0:opKeySize]\n\t\t\t\tvalue := bytes.Repeat([]byte(base2), opValueSize)[0:opValueSize]\n\t\t\t\t\/\/fmt.Println(\"db \", opType, key, value)\n\t\t\t\tswitch opType {\n\t\t\t\tcase OpPut:\n\t\t\t\t\tm1.Put(key, value)\n\t\t\t\tcase OpDel:\n\t\t\t\t\tm1.Delete(key)\n\t\t\t\tcase OpSet:\n\t\t\t\t\tm1.Set(key, value)\n\t\t\t\t}\n\t\t\t}\n\t\t\tw.Done()\n\t\t}(core)\n\t}\n\tw.Wait()\n\t\/\/Check map is in DB\n\tfor key, value := range goMap {\n\t\trval, err := m1.Get([]byte(key))\n\t\t\/\/fmt.Println([]byte(key), value)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif !bytes.Equal(rval, value) {\n\t\t\tpanic(1)\n\t\t}\n\t}\n\n\t\/\/Check deleteds aren't in DB\n\tfmt.Println(\"Tested deletes:\", len(goDeletes))\n\tfor i := 0; i < len(goDeletes); i++ {\n\t\tkey := goDeletes[i]\n\t\t_, err := m1.Get([]byte(key))\n\t\tif err == nil {\n\t\t\tpanic(2)\n\t\t}\n\t}\n\t\/\/Close DB\n\tdb.Close()\n\n\t{\n\t\t\/\/Restore DB\n\t\tdb := Open(\"testdb2\")\n\t\tm1 := db.mapsByName[\"mapa1\"]\n\t\t\/\/Check again\n\t\t\/\/Check map is in DB\n\t\tfor key, value := range goMap {\n\t\t\trval, err := m1.Get([]byte(key))\n\t\t\t\/\/fmt.Println([]byte(key), value)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tif !bytes.Equal(rval, value) {\n\t\t\t\tpanic(1)\n\t\t\t}\n\t\t}\n\n\t\t\/\/Check deleteds aren't in DB\n\t\tfmt.Println(\"Tested deletes:\", len(goDeletes))\n\t\tfor i := 0; i < len(goDeletes); i++ {\n\t\t\tkey := goDeletes[i]\n\t\t\t_, err := m1.Get([]byte(key))\n\t\t\tif err == nil {\n\t\t\t\tpanic(2)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/Test parallel complex get, put, set, del mix\n\n\/\/Tests operational limits: size\n\n\/\/Test sync\/nosync file\n\n\/\/Bench with diferents sizes\n\n\/\/Bench lots of gets\nfunc BenchmarkGet(b *testing.B) {\n\tdefer os.RemoveAll(\"benchdb\/\")\n\tdefer os.Chdir(\"..\/\")\n\tif testing.Verbose() {\n\t\tfmt.Println(\"\\tInserting\", b.N, \"keys...\")\n\t}\n\tdb := Create(\"benchdb\/\")\n\tm, _ := db.AllocMap(\"mapA\")\n\tkey := make([]byte, 4)\n\tlenValue := 100\n\tvalue := bytes.Repeat([]byte(\"X\"), lenValue)\n\tfor i := 0; i < b.N\/32+1; i++ {\n\t\tbinary.LittleEndian.PutUint32(key, uint32(3*i))\n\t\tbinary.LittleEndian.PutUint32(value, uint32(3*i))\n\t\terr := m.Put(key, value)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tgid := uint64(0)\n\tfmt.Println(\"get...\")\n\tb.ResetTimer()\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tsum := 0\n\t\tkey := make([]byte, 4)\n\t\tid := uint32(atomic.AddUint64(&gid, 1))\n\t\tfmt.Println(id)\n\t\tr := rand.New(rand.NewSource(int64(id)))\n\t\tfor i := 0; pb.Next(); i++ {\n\t\t\tbinary.LittleEndian.PutUint32(key, uint32(3*r.Intn(b.N\/32+1)))\n\t\t\tv, err := m.Get(key)\n\t\t\tsum += int(v[len(v)-1])\n\t\t\tif err != nil || !bytes.Equal(v, v) {\n\t\t\t\tb.Fatal(\"Key not present\", key, id, i, v, sum)\n\t\t\t}\n\t\t}\n\t})\n\tb.StopTimer()\n\tdb.Close()\n}\n\n\/\/Bench lots of gets, in a parallel way\n\n\/\/Bench lots of puts\n\n\/\/Bench lots of puts, in a parallel way\n*\/\n<|endoftext|>"} {"text":"<commit_before>package wikifier\n<commit_msg>html{}<commit_after>package wikifier\n\ntype htmlBlock struct {\n\t*parserBlock\n}\n\nfunc newHTMLBlock(name string, b *parserBlock) block {\n\treturn &fmtBlock{parserBlock: b}\n}\n\nfunc (b *htmlBlock) html(page *Page, el element) {\n\tel.setMeta(\"noIndent\", true)\n\tel.setMeta(\"noTags\", true)\n\tfor _, item := range b.visiblePosContent() {\n\t\tel.add(item.content)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\n\/\/ Validates the execution of a process\n\ntype ExecutionValidation struct {\n\tFatal bool \/\/ If matched, should we abort the (sequence of) operation(s)?\n\tMustContain bool \/\/ Should this be in there?\n\tOutputStream int \/\/ 1 = standard output, 2 error output\n\tText string \/\/ Text to match\n}\n\n\/\/ Must contain XYZ\nfunc newExecutionValidationStandardOutputMustContain(txt string) *ExecutionValidation {\n\treturn &ExecutionValidation{\n\t\tFatal: true,\n\t\tMustContain: true,\n\t\tText: txt,\n\t\tOutputStream: 1,\n\t}\n}\n<commit_msg>Abstract constructor<commit_after>package main\n\n\/\/ Validates the execution of a process\n\ntype ExecutionValidation struct {\n\tFatal bool \/\/ If matched, should we abort the (sequence of) operation(s)?\n\tMustContain bool \/\/ Should this be in there?\n\tOutputStream int \/\/ 1 = standard output, 2 error output\n\tText string \/\/ Text to match\n}\n\n\/\/ Must contain XYZ\nfunc newExecutionValidation(txt string, fatal bool, mustContain bool, outputStream int) *ExecutionValidation {\n\t\/\/ Validate stream\n\tif outputStream != 1 && outputStream != 2 {\n\t\treturn nil\n\t}\n\n\t\/\/ Must have text\n\tif len(txt) < 1 {\n\t\treturn nil\n\t}\n\n\treturn &ExecutionValidation{\n\t\tFatal: true,\n\t\tMustContain: true,\n\t\tText: txt,\n\t\tOutputStream: 1,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage executor\n\nimport (\n\t\"math\/rand\"\n\n\t. \"github.com\/pingcap\/check\"\n\t\"github.com\/pingcap\/tidb\/parser\/mysql\"\n\t\"github.com\/pingcap\/tidb\/planner\/core\"\n\t\"github.com\/pingcap\/tidb\/types\"\n\t\"github.com\/pingcap\/tidb\/util\/chunk\"\n)\n\nvar _ = Suite(&testSuiteJoiner{})\n\ntype testSuiteJoiner struct{}\n\nfunc (s *testSuiteJoiner) SetUpSuite(c *C) {\n}\n\nfunc (s *testSuiteJoiner) TestRequiredRows(c *C) {\n\tjoinTypes := []core.JoinType{core.InnerJoin, core.LeftOuterJoin, core.RightOuterJoin}\n\tlTypes := [][]byte{\n\t\t{mysql.TypeLong},\n\t\t{mysql.TypeFloat},\n\t\t{mysql.TypeLong, mysql.TypeFloat},\n\t}\n\trTypes := lTypes\n\n\tconvertTypes := func(mysqlTypes []byte) []*types.FieldType {\n\t\tfieldTypes := make([]*types.FieldType, 0, len(mysqlTypes))\n\t\tfor _, t := range mysqlTypes {\n\t\t\tfieldTypes = append(fieldTypes, types.NewFieldType(t))\n\t\t}\n\t\treturn fieldTypes\n\t}\n\n\tfor _, joinType := range joinTypes {\n\t\tfor _, ltype := range lTypes {\n\t\t\tfor _, rtype := range rTypes {\n\t\t\t\tmaxChunkSize := defaultCtx().GetSessionVars().MaxChunkSize\n\t\t\t\tlfields := convertTypes(ltype)\n\t\t\t\trfields := convertTypes(rtype)\n\t\t\t\touterRow := genTestChunk(maxChunkSize, 1, lfields).GetRow(0)\n\t\t\t\tinnerChk := genTestChunk(maxChunkSize, maxChunkSize, rfields)\n\t\t\t\tvar defaultInner []types.Datum\n\t\t\t\tfor i, f := range rfields {\n\t\t\t\t\tdefaultInner = append(defaultInner, innerChk.GetRow(0).GetDatum(i, f))\n\t\t\t\t}\n\t\t\t\tjoiner := newJoiner(defaultCtx(), joinType, false, defaultInner, nil, lfields, rfields, nil)\n\n\t\t\t\tfields := make([]*types.FieldType, 0, len(lfields)+len(rfields))\n\t\t\t\tfields = append(fields, rfields...)\n\t\t\t\tfields = append(fields, lfields...)\n\t\t\t\tresult := chunk.New(fields, maxChunkSize, maxChunkSize)\n\n\t\t\t\tfor i := 0; i < 10; i++ {\n\t\t\t\t\trequired := rand.Int()%maxChunkSize + 1\n\t\t\t\t\tresult.SetRequiredRows(required, maxChunkSize)\n\t\t\t\t\tresult.Reset()\n\t\t\t\t\tit := chunk.NewIterator4Chunk(innerChk)\n\t\t\t\t\tit.Begin()\n\t\t\t\t\t_, _, err := joiner.tryToMatchInners(outerRow, it, result)\n\t\t\t\t\tc.Assert(err, IsNil)\n\t\t\t\t\tc.Assert(result.NumRows(), Equals, required)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc genTestChunk(maxChunkSize int, numRows int, fields []*types.FieldType) *chunk.Chunk {\n\tchk := chunk.New(fields, maxChunkSize, maxChunkSize)\n\tfor numRows > 0 {\n\t\tnumRows--\n\t\tfor col, field := range fields {\n\t\t\tswitch field.Tp {\n\t\t\tcase mysql.TypeLong:\n\t\t\t\tchk.AppendInt64(col, 0)\n\t\t\tcase mysql.TypeFloat:\n\t\t\t\tchk.AppendFloat32(col, 0)\n\t\t\tdefault:\n\t\t\t\tpanic(\"not support\")\n\t\t\t}\n\t\t}\n\t}\n\treturn chk\n}\n<commit_msg>executor: migrate test-infra to testify for joiner_test.go (#32454)<commit_after>\/\/ Copyright 2019 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage executor\n\nimport (\n\t\"math\/rand\"\n\t\"testing\"\n\n\t\"github.com\/pingcap\/tidb\/parser\/mysql\"\n\t\"github.com\/pingcap\/tidb\/planner\/core\"\n\t\"github.com\/pingcap\/tidb\/types\"\n\t\"github.com\/pingcap\/tidb\/util\/chunk\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestRequiredRows(t *testing.T) {\n\tjoinTypes := []core.JoinType{core.InnerJoin, core.LeftOuterJoin, core.RightOuterJoin}\n\tlTypes := [][]byte{\n\t\t{mysql.TypeLong},\n\t\t{mysql.TypeFloat},\n\t\t{mysql.TypeLong, mysql.TypeFloat},\n\t}\n\trTypes := lTypes\n\n\tconvertTypes := func(mysqlTypes []byte) []*types.FieldType {\n\t\tfieldTypes := make([]*types.FieldType, 0, len(mysqlTypes))\n\t\tfor _, t := range mysqlTypes {\n\t\t\tfieldTypes = append(fieldTypes, types.NewFieldType(t))\n\t\t}\n\t\treturn fieldTypes\n\t}\n\n\tfor _, joinType := range joinTypes {\n\t\tfor _, ltype := range lTypes {\n\t\t\tfor _, rtype := range rTypes {\n\t\t\t\tmaxChunkSize := defaultCtx().GetSessionVars().MaxChunkSize\n\t\t\t\tlfields := convertTypes(ltype)\n\t\t\t\trfields := convertTypes(rtype)\n\t\t\t\touterRow := genTestChunk(maxChunkSize, 1, lfields).GetRow(0)\n\t\t\t\tinnerChk := genTestChunk(maxChunkSize, maxChunkSize, rfields)\n\t\t\t\tvar defaultInner []types.Datum\n\t\t\t\tfor i, f := range rfields {\n\t\t\t\t\tdefaultInner = append(defaultInner, innerChk.GetRow(0).GetDatum(i, f))\n\t\t\t\t}\n\t\t\t\tjoiner := newJoiner(defaultCtx(), joinType, false, defaultInner, nil, lfields, rfields, nil)\n\n\t\t\t\tfields := make([]*types.FieldType, 0, len(lfields)+len(rfields))\n\t\t\t\tfields = append(fields, rfields...)\n\t\t\t\tfields = append(fields, lfields...)\n\t\t\t\tresult := chunk.New(fields, maxChunkSize, maxChunkSize)\n\n\t\t\t\tfor i := 0; i < 10; i++ {\n\t\t\t\t\trequired := rand.Int()%maxChunkSize + 1\n\t\t\t\t\tresult.SetRequiredRows(required, maxChunkSize)\n\t\t\t\t\tresult.Reset()\n\t\t\t\t\tit := chunk.NewIterator4Chunk(innerChk)\n\t\t\t\t\tit.Begin()\n\t\t\t\t\t_, _, err := joiner.tryToMatchInners(outerRow, it, result)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\trequire.Equal(t, required, result.NumRows())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc genTestChunk(maxChunkSize int, numRows int, fields []*types.FieldType) *chunk.Chunk {\n\tchk := chunk.New(fields, maxChunkSize, maxChunkSize)\n\tfor numRows > 0 {\n\t\tnumRows--\n\t\tfor col, field := range fields {\n\t\t\tswitch field.Tp {\n\t\t\tcase mysql.TypeLong:\n\t\t\t\tchk.AppendInt64(col, 0)\n\t\t\tcase mysql.TypeFloat:\n\t\t\t\tchk.AppendFloat32(col, 0)\n\t\t\tdefault:\n\t\t\t\tpanic(\"not support\")\n\t\t\t}\n\t\t}\n\t}\n\treturn chk\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cache\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/google\/trillian\/merkle\/compact\"\n\t\"github.com\/google\/trillian\/merkle\/hashers\"\n\t\"github.com\/google\/trillian\/storage\"\n\t\"github.com\/google\/trillian\/storage\/storagepb\"\n)\n\n\/\/ NewLogSubtreeCache creates and returns a SubtreeCache appropriate for use with a log\n\/\/ tree. The caller must supply the strata depths to be used and a suitable LogHasher.\nfunc NewLogSubtreeCache(logStrata []int, hasher hashers.LogHasher) SubtreeCache {\n\treturn NewSubtreeCache(logStrata, populateLogSubtreeNodes(hasher), prepareLogSubtreeWrite())\n}\n\n\/\/ LogPopulateFunc obtains a log storage population function based on a supplied LogHasher.\n\/\/ This is intended for use by storage utilities.\nfunc LogPopulateFunc(hasher hashers.LogHasher) storage.PopulateSubtreeFunc {\n\treturn populateLogSubtreeNodes(hasher)\n}\n\n\/\/ populateLogSubtreeNodes re-creates a Log subtree's InternalNodes from the\n\/\/ subtree Leaves map.\n\/\/\n\/\/ This uses the compact Merkle tree to repopulate internal nodes, and so will\n\/\/ handle imperfect (but left-hand dense) subtrees. Note that we only rebuild internal\n\/\/ nodes when the subtree is fully populated. For an explanation of why see the comments\n\/\/ below for PrepareLogSubtreeWrite.\nfunc populateLogSubtreeNodes(hasher hashers.LogHasher) storage.PopulateSubtreeFunc {\n\treturn func(st *storagepb.SubtreeProto) error {\n\t\tcmt := compact.NewTree(hasher)\n\t\tif st.Depth < 1 {\n\t\t\treturn fmt.Errorf(\"populate log subtree with invalid depth: %d\", st.Depth)\n\t\t}\n\t\t\/\/ maxLeaves is the number of leaves that fully populates a subtree of the depth we are\n\t\t\/\/ working with.\n\t\tmaxLeaves := 1 << uint(st.Depth)\n\n\t\t\/\/ If the subtree is fully populated then the internal node map is expected to be nil but in\n\t\t\/\/ case it isn't we recreate it as we're about to rebuild the contents. We'll check\n\t\t\/\/ below that the number of nodes is what we expected to have.\n\t\tif st.InternalNodes == nil || len(st.Leaves) == maxLeaves {\n\t\t\tst.InternalNodes = make(map[string][]byte)\n\t\t}\n\n\t\t\/\/ We need to update the subtree root hash regardless of whether it's fully populated\n\t\tfor leafIndex := int64(0); leafIndex < int64(len(st.Leaves)); leafIndex++ {\n\t\t\tnodeID := storage.NewNodeIDFromPrefix(st.Prefix, logStrataDepth, leafIndex, logStrataDepth, maxLogDepth)\n\t\t\t_, sfx := nodeID.Split(len(st.Prefix), int(st.Depth))\n\t\t\tsfxKey := sfx.String()\n\t\t\th := st.Leaves[sfxKey]\n\t\t\tif h == nil {\n\t\t\t\treturn fmt.Errorf(\"unexpectedly got nil for subtree leaf suffix %s\", sfx)\n\t\t\t}\n\t\t\tseq, err := cmt.AddLeafHash(h, func(height int, index int64, h []byte) error {\n\t\t\t\tif height == logStrataDepth && index == 0 {\n\t\t\t\t\t\/\/ no space for the root in the node cache\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tsubDepth := logStrataDepth - height\n\t\t\t\tnodeID := storage.NewNodeIDFromPrefix(st.Prefix, subDepth, index, logStrataDepth, maxLogDepth)\n\t\t\t\t_, sfx := nodeID.Split(len(st.Prefix), int(st.Depth))\n\t\t\t\tsfxKey := sfx.String()\n\t\t\t\t\/\/ Don't put leaves into the internal map and only update if we're rebuilding internal\n\t\t\t\t\/\/ nodes. If the subtree was saved with internal nodes then we don't touch the map.\n\t\t\t\tif height > 0 && len(st.Leaves) == maxLeaves {\n\t\t\t\t\tst.InternalNodes[sfxKey] = h\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif got, expected := seq, leafIndex; got != expected {\n\t\t\t\treturn fmt.Errorf(\"got seq of %d, but expected %d\", got, expected)\n\t\t\t}\n\t\t}\n\t\tst.RootHash = cmt.CurrentRoot()\n\n\t\t\/\/ Additional check - after population we should have the same number of internal nodes\n\t\t\/\/ as before the subtree was written to storage. Either because they were loaded from\n\t\t\/\/ storage or just rebuilt above.\n\t\tif got, want := uint32(len(st.InternalNodes)), st.InternalNodeCount; got != want {\n\t\t\t\/\/ TODO(Martin2112): Possibly replace this with stronger checks on the data in\n\t\t\t\/\/ subtrees on disk so we can detect corruption.\n\t\t\treturn fmt.Errorf(\"log repop got: %d internal nodes, want: %d\", got, want)\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\n\/\/ prepareLogSubtreeWrite prepares a log subtree for writing. If the subtree is fully\n\/\/ populated the internal nodes are cleared. Otherwise they are written.\n\/\/\n\/\/ To see why this is necessary consider the case where a tree has a single full subtree\n\/\/ and then an additional leaf is added.\n\/\/\n\/\/ This causes an extra level to be added to the tree with an internal node that is a hash\n\/\/ of the root of the left full subtree and the new leaf. Note that the leaves remain at\n\/\/ level zero in the overall tree coordinate space but they are now in a lower subtree stratum\n\/\/ than they were before the last node was added as the tree has grown above them.\n\/\/\n\/\/ Thus in the case just discussed the internal nodes cannot be correctly reconstructed\n\/\/ in isolation when the tree is reloaded because of the dependency on another subtree.\n\/\/\n\/\/ Fully populated subtrees don't have this problem because by definition they can only\n\/\/ contain internal nodes built from their own contents.\nfunc prepareLogSubtreeWrite() storage.PrepareSubtreeWriteFunc {\n\treturn func(st *storagepb.SubtreeProto) error {\n\t\tst.InternalNodeCount = uint32(len(st.InternalNodes))\n\t\tif st.Depth < 1 {\n\t\t\treturn fmt.Errorf(\"prepare subtree for log write invalid depth: %d\", st.Depth)\n\t\t}\n\t\tmaxLeaves := 1 << uint(st.Depth)\n\t\t\/\/ If the subtree is fully populated we can safely clear the internal nodes\n\t\tif len(st.Leaves) == maxLeaves {\n\t\t\tst.InternalNodes = nil\n\t\t}\n\t\treturn nil\n\t}\n}\n<commit_msg>Small subtree cache optimization.<commit_after>\/\/ Copyright 2017 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cache\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/google\/trillian\/merkle\/compact\"\n\t\"github.com\/google\/trillian\/merkle\/hashers\"\n\t\"github.com\/google\/trillian\/storage\"\n\t\"github.com\/google\/trillian\/storage\/storagepb\"\n)\n\n\/\/ NewLogSubtreeCache creates and returns a SubtreeCache appropriate for use with a log\n\/\/ tree. The caller must supply the strata depths to be used and a suitable LogHasher.\nfunc NewLogSubtreeCache(logStrata []int, hasher hashers.LogHasher) SubtreeCache {\n\treturn NewSubtreeCache(logStrata, populateLogSubtreeNodes(hasher), prepareLogSubtreeWrite())\n}\n\n\/\/ LogPopulateFunc obtains a log storage population function based on a supplied LogHasher.\n\/\/ This is intended for use by storage utilities.\nfunc LogPopulateFunc(hasher hashers.LogHasher) storage.PopulateSubtreeFunc {\n\treturn populateLogSubtreeNodes(hasher)\n}\n\n\/\/ populateLogSubtreeNodes re-creates a Log subtree's InternalNodes from the\n\/\/ subtree Leaves map.\n\/\/\n\/\/ This uses the compact Merkle tree to repopulate internal nodes, and so will\n\/\/ handle imperfect (but left-hand dense) subtrees. Note that we only rebuild internal\n\/\/ nodes when the subtree is fully populated. For an explanation of why see the comments\n\/\/ below for PrepareLogSubtreeWrite.\nfunc populateLogSubtreeNodes(hasher hashers.LogHasher) storage.PopulateSubtreeFunc {\n\treturn func(st *storagepb.SubtreeProto) error {\n\t\tcmt := compact.NewTree(hasher)\n\t\tif st.Depth < 1 {\n\t\t\treturn fmt.Errorf(\"populate log subtree with invalid depth: %d\", st.Depth)\n\t\t}\n\t\t\/\/ maxLeaves is the number of leaves that fully populates a subtree of the depth we are\n\t\t\/\/ working with.\n\t\tmaxLeaves := 1 << uint(st.Depth)\n\n\t\t\/\/ If the subtree is fully populated then the internal node map is expected to be nil but in\n\t\t\/\/ case it isn't we recreate it as we're about to rebuild the contents. We'll check\n\t\t\/\/ below that the number of nodes is what we expected to have.\n\t\tif st.InternalNodes == nil || len(st.Leaves) == maxLeaves {\n\t\t\tst.InternalNodes = make(map[string][]byte)\n\t\t}\n\n\t\t\/\/ We need to update the subtree root hash regardless of whether it's fully populated\n\t\tfor leafIndex := int64(0); leafIndex < int64(len(st.Leaves)); leafIndex++ {\n\t\t\tnodeID := storage.NewNodeIDFromPrefix(st.Prefix, logStrataDepth, leafIndex, logStrataDepth, maxLogDepth)\n\t\t\t_, sfx := nodeID.Split(len(st.Prefix), int(st.Depth))\n\t\t\tsfxKey := sfx.String()\n\t\t\th := st.Leaves[sfxKey]\n\t\t\tif h == nil {\n\t\t\t\treturn fmt.Errorf(\"unexpectedly got nil for subtree leaf suffix %s\", sfx)\n\t\t\t}\n\t\t\tseq, err := cmt.AddLeafHash(h, func(height int, index int64, h []byte) error {\n\t\t\t\tif height == logStrataDepth && index == 0 {\n\t\t\t\t\t\/\/ no space for the root in the node cache\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\t\/\/ Don't put leaves into the internal map and only update if we're rebuilding internal\n\t\t\t\t\/\/ nodes. If the subtree was saved with internal nodes then we don't touch the map.\n\t\t\t\tif height > 0 && len(st.Leaves) == maxLeaves {\n\t\t\t\t\tsubDepth := logStrataDepth - height\n\t\t\t\t\tnodeID := storage.NewNodeIDFromPrefix(st.Prefix, subDepth, index, logStrataDepth, maxLogDepth)\n\t\t\t\t\t_, sfx := nodeID.Split(len(st.Prefix), int(st.Depth))\n\t\t\t\t\tsfxKey := sfx.String()\n\t\t\t\t\tst.InternalNodes[sfxKey] = h\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif got, expected := seq, leafIndex; got != expected {\n\t\t\t\treturn fmt.Errorf(\"got seq of %d, but expected %d\", got, expected)\n\t\t\t}\n\t\t}\n\t\tst.RootHash = cmt.CurrentRoot()\n\n\t\t\/\/ Additional check - after population we should have the same number of internal nodes\n\t\t\/\/ as before the subtree was written to storage. Either because they were loaded from\n\t\t\/\/ storage or just rebuilt above.\n\t\tif got, want := uint32(len(st.InternalNodes)), st.InternalNodeCount; got != want {\n\t\t\t\/\/ TODO(Martin2112): Possibly replace this with stronger checks on the data in\n\t\t\t\/\/ subtrees on disk so we can detect corruption.\n\t\t\treturn fmt.Errorf(\"log repop got: %d internal nodes, want: %d\", got, want)\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\n\/\/ prepareLogSubtreeWrite prepares a log subtree for writing. If the subtree is fully\n\/\/ populated the internal nodes are cleared. Otherwise they are written.\n\/\/\n\/\/ To see why this is necessary consider the case where a tree has a single full subtree\n\/\/ and then an additional leaf is added.\n\/\/\n\/\/ This causes an extra level to be added to the tree with an internal node that is a hash\n\/\/ of the root of the left full subtree and the new leaf. Note that the leaves remain at\n\/\/ level zero in the overall tree coordinate space but they are now in a lower subtree stratum\n\/\/ than they were before the last node was added as the tree has grown above them.\n\/\/\n\/\/ Thus in the case just discussed the internal nodes cannot be correctly reconstructed\n\/\/ in isolation when the tree is reloaded because of the dependency on another subtree.\n\/\/\n\/\/ Fully populated subtrees don't have this problem because by definition they can only\n\/\/ contain internal nodes built from their own contents.\nfunc prepareLogSubtreeWrite() storage.PrepareSubtreeWriteFunc {\n\treturn func(st *storagepb.SubtreeProto) error {\n\t\tst.InternalNodeCount = uint32(len(st.InternalNodes))\n\t\tif st.Depth < 1 {\n\t\t\treturn fmt.Errorf(\"prepare subtree for log write invalid depth: %d\", st.Depth)\n\t\t}\n\t\tmaxLeaves := 1 << uint(st.Depth)\n\t\t\/\/ If the subtree is fully populated we can safely clear the internal nodes\n\t\tif len(st.Leaves) == maxLeaves {\n\t\t\tst.InternalNodes = nil\n\t\t}\n\t\treturn nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2017 The OpenEBS Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage command\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/openebs\/maya\/pkg\/apis\/openebs.io\/v1alpha1\"\n\t\"github.com\/openebs\/maya\/pkg\/client\/mapiserver\"\n\t\"github.com\/openebs\/maya\/pkg\/util\"\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ VolumeInfo stores the volume information\ntype VolumeInfo struct {\n\tVolume v1alpha1.CASVolume\n}\n\n\/\/ CmdVolumeOptions stores information of volume being operated\ntype CmdVolumeOptions struct {\n\tvolName string\n\tsourceVolumeName string\n\tsnapshotName string\n\tsize string\n\tnamespace string\n\tjson string\n}\n\n\/\/ CASType is engine type\ntype CASType string\n\nconst (\n\t\/\/ VolumeAPIPath is the api path to get volume information\n\tVolumeAPIPath = \"\/latest\/volumes\/\"\n\tcontrollerStatusOk = \"running\"\n\tvolumeStatusOK = \"Running\"\n\t\/\/ JivaStorageEngine is constant for jiva engine\n\tJivaStorageEngine CASType = \"jiva\"\n\t\/\/ CstorStorageEngine is constant for cstor engine\n\tCstorStorageEngine CASType = \"cstor\"\n\ttimeout = 5 * time.Second\n)\n\nvar (\n\tvolumeCommandHelpText = `\nThe following commands helps in operating a Volume such as create, list, and so on.\n\nUsage: mayactl volume <subcommand> [options] [args]\n\nExamples:\n\n # Create a Volume:\n $ mayactl volume create --volname <vol> --size <size>\n\n # List Volumes:\n $ mayactl volume list\n\n # Delete a Volume:\n $ mayactl volume delete --volname <vol>\n\n # Delete a Volume created in 'test' namespace:\n $ mayactl volume delete --volname <vol> --namespace test\n\n # Statistics of a Volume:\n $ mayactl volume stats --volname <vol>\n\n # Statistics of a Volume created in 'test' namespace:\n $ mayactl volume stats --volname <vol> --namespace test\n\n # Info of a Volume:\n $ mayactl volume info --volname <vol>\n\n # Info of a Volume created in 'test' namespace:\n $ mayactl volume info --volname <vol> --namespace test\n`\n\toptions = &CmdVolumeOptions{\n\t\tnamespace: \"default\",\n\t}\n)\n\n\/\/ NewCmdVolume provides options for managing OpenEBS Volume\nfunc NewCmdVolume() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"volume\",\n\t\tShort: \"Provides operations related to a Volume\",\n\t\tLong: volumeCommandHelpText,\n\t}\n\n\tcmd.AddCommand(\n\t\tNewCmdVolumeCreate(),\n\t\tNewCmdVolumesList(),\n\t\tNewCmdVolumeDelete(),\n\t\tNewCmdVolumeStats(),\n\t\tNewCmdVolumeInfo(),\n\t)\n\tcmd.PersistentFlags().StringVarP(&options.namespace, \"namespace\", \"n\", options.namespace,\n\t\t\"namespace name, required if volume is not in the default namespace\")\n\n\tcmd.PersistentFlags().AddGoFlagSet(flag.CommandLine)\n\tflag.CommandLine.Parse([]string{})\n\treturn cmd\n}\n\n\/\/ Validate verifies whether a volume name,source name or snapshot name is provided or not followed by\n\/\/ stats command. It returns nil and proceeds to execute the command if there is\n\/\/ no error and returns an error if it is missing.\nfunc (c *CmdVolumeOptions) Validate(cmd *cobra.Command, snapshotnameverify, sourcenameverify, volnameverify bool) error {\n\tif snapshotnameverify {\n\t\tif len(c.snapshotName) == 0 {\n\t\t\treturn errors.New(\"--snapname is missing. Please provide a snapshotname\")\n\t\t}\n\t}\n\tif sourcenameverify {\n\t\tif len(c.sourceVolumeName) == 0 {\n\t\t\treturn errors.New(\"--sourcevol is missing. Please specify a sourcevolumename\")\n\t\t}\n\t}\n\tif volnameverify {\n\t\tif len(c.volName) == 0 {\n\t\t\treturn errors.New(\"--volname is missing. Please specify a unique volumename\")\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ NewVolumeInfo fetches and fills CASVolume structure from URL given to it\nfunc NewVolumeInfo(URL string, volname string, namespace string) (volInfo *VolumeInfo, err error) {\n\turl := URL\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\treq.Header.Set(\"namespace\", namespace)\n\n\tc := &http.Client{\n\t\tTimeout: timeout,\n\t}\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\tfmt.Printf(\"Can't get a response, error found: %v\", err)\n\t\treturn\n\t}\n\tif resp != nil && resp.StatusCode != 200 {\n\t\tif resp.StatusCode == 500 {\n\t\t\tfmt.Printf(\"Volume: %s not found at namespace: %q\\n\", volname, namespace)\n\t\t\terr = util.InternalServerError\n\t\t} else if resp.StatusCode == 503 {\n\t\t\tfmt.Printf(\"maya apiservice not reachable at %q\\n\", mapiserver.GetURL())\n\t\t\terr = util.ServerUnavailable\n\t\t} else if resp.StatusCode == 404 {\n\t\t\tfmt.Printf(\"Volume: %s not found at namespace: %q error: %s\\n\", volname, namespace, http.StatusText(resp.StatusCode))\n\t\t\terr = util.PageNotFound\n\t\t}\n\t\tfmt.Printf(\"Received an error from maya apiservice: statuscode: %d\", resp.StatusCode)\n\t\terr = fmt.Errorf(\"Received an error from maya apiservice: statuscode: %d\", resp.StatusCode)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tcasVol := v1alpha1.CASVolume{}\n\terr = json.NewDecoder(resp.Body).Decode(&casVol)\n\tif err != nil {\n\t\tfmt.Printf(\"Response decode failed: error '%+v'\", err)\n\t\treturn\n\t}\n\tif casVol.Status.Reason == \"pending\" {\n\t\tfmt.Println(\"VOLUME status Unknown to maya apiservice\")\n\t\terr = fmt.Errorf(\"VOLUME status Unknown to maya apiservice\")\n\t\treturn\n\t}\n\tvolInfo = &VolumeInfo{\n\t\tVolume: casVol,\n\t}\n\treturn\n}\n\n\/\/ GetCASType returns the CASType of the volume in lowercase\nfunc (volInfo *VolumeInfo) GetCASType() string {\n\tif len(volInfo.Volume.Spec.CasType) == 0 {\n\t\treturn string(JivaStorageEngine)\n\t}\n\treturn strings.ToLower(volInfo.Volume.Spec.CasType)\n}\n\n\/\/ GetClusterIP returns the ClusterIP of the cluster\nfunc (volInfo *VolumeInfo) GetClusterIP() string {\n\tif val, ok := volInfo.Volume.ObjectMeta.Annotations[\"openebs.io\/cluster-ips\"]; ok {\n\t\treturn val\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"vsm.openebs.io\/cluster-ips\"]; ok {\n\t\treturn val\n\t}\n\treturn \"\"\n}\n\n\/\/ GetControllerStatus returns the status of the volume controller\nfunc (volInfo *VolumeInfo) GetControllerStatus() string {\n\tif val, ok := volInfo.Volume.ObjectMeta.Annotations[\"openebs.io\/controller-status\"]; ok {\n\t\treturn val\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"vsm.openebs.io\/controller-status\"]; ok {\n\t\treturn val\n\t}\n\treturn \"\"\n}\n\n\/\/ GetIQN returns the IQN of the volume\nfunc (volInfo *VolumeInfo) GetIQN() string {\n\tif len(volInfo.Volume.Spec.Iqn) > 0 {\n\t\treturn volInfo.Volume.Spec.Iqn\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"openebs.io\/iqn\"]; ok {\n\t\treturn val\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"vsm.openebs.io\/iqn\"]; ok {\n\t\treturn val\n\t}\n\treturn \"\"\n\n}\n\n\/\/ GetVolumeName returns the volume name\nfunc (volInfo *VolumeInfo) GetVolumeName() string {\n\treturn volInfo.Volume.ObjectMeta.Name\n}\n\n\/\/ GetTargetPortal returns the TargetPortal of the volume\nfunc (volInfo *VolumeInfo) GetTargetPortal() string {\n\tif len(volInfo.Volume.Spec.TargetPortal) > 0 {\n\t\treturn volInfo.Volume.Spec.TargetPortal\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"openebs.io\/targetportals\"]; ok {\n\t\treturn val\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"vsm.openebs.io\/targetportals\"]; ok {\n\t\treturn val\n\t}\n\treturn \"\"\n}\n\n\/\/ GetVolumeSize returns the capacity of the volume\nfunc (volInfo *VolumeInfo) GetVolumeSize() string {\n\tif len(volInfo.Volume.Spec.Capacity) > 0 {\n\t\treturn volInfo.Volume.Spec.Capacity\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"openebs.io\/volume-size\"]; ok {\n\t\treturn val\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"vsm.openebs.io\/volume-size\"]; ok {\n\t\treturn val\n\t}\n\treturn \"\"\n}\n\n\/\/ GetReplicaCount returns the volume replica count\nfunc (volInfo *VolumeInfo) GetReplicaCount() string {\n\tif len(volInfo.Volume.Spec.Replicas) > 0 {\n\t\treturn volInfo.Volume.Spec.Replicas\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"openebs.io\/replica-count\"]; ok {\n\t\treturn val\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"vsm.openebs.io\/replica-count\"]; ok {\n\t\treturn val\n\t}\n\treturn \"\"\n}\n\n\/\/ GetReplicaStatus returns the replica status of the volume replica\nfunc (volInfo *VolumeInfo) GetReplicaStatus() string {\n\tif val, ok := volInfo.Volume.ObjectMeta.Annotations[\"openebs.io\/replica-status\"]; ok {\n\t\treturn val\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"vsm.openebs.io\/replica-status\"]; ok {\n\t\treturn val\n\t}\n\treturn \"\"\n}\n\n\/\/ GetReplicaIP returns the IP of volume replica\nfunc (volInfo *VolumeInfo) GetReplicaIP() string {\n\tif val, ok := volInfo.Volume.ObjectMeta.Annotations[\"openebs.io\/replica-ips\"]; ok {\n\t\treturn val\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"vsm.openebs.io\/replica-ips\"]; ok {\n\t\treturn val\n\t}\n\treturn \"\"\n}\n<commit_msg>Disabled volume create and delete command in mayactl<commit_after>\/*\nCopyright 2017 The OpenEBS Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage command\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/openebs\/maya\/pkg\/apis\/openebs.io\/v1alpha1\"\n\t\"github.com\/openebs\/maya\/pkg\/client\/mapiserver\"\n\t\"github.com\/openebs\/maya\/pkg\/util\"\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ VolumeInfo stores the volume information\ntype VolumeInfo struct {\n\tVolume v1alpha1.CASVolume\n}\n\n\/\/ CmdVolumeOptions stores information of volume being operated\ntype CmdVolumeOptions struct {\n\tvolName string\n\tsourceVolumeName string\n\tsnapshotName string\n\tsize string\n\tnamespace string\n\tjson string\n}\n\n\/\/ CASType is engine type\ntype CASType string\n\nconst (\n\t\/\/ VolumeAPIPath is the api path to get volume information\n\tVolumeAPIPath = \"\/latest\/volumes\/\"\n\tcontrollerStatusOk = \"running\"\n\tvolumeStatusOK = \"Running\"\n\t\/\/ JivaStorageEngine is constant for jiva engine\n\tJivaStorageEngine CASType = \"jiva\"\n\t\/\/ CstorStorageEngine is constant for cstor engine\n\tCstorStorageEngine CASType = \"cstor\"\n\ttimeout = 5 * time.Second\n)\n\n\/\/ # Create a Volume:\n\/\/ $ mayactl volume create --volname <vol> --size <size>\n\nvar (\n\tvolumeCommandHelpText = `\nThe following commands helps in operating a Volume such as create, list, and so on.\n\nUsage: mayactl volume <subcommand> [options] [args]\n\nExamples:\n # List Volumes:\n $ mayactl volume list\n\n # Statistics of a Volume:\n $ mayactl volume stats --volname <vol>\n\n # Statistics of a Volume created in 'test' namespace:\n $ mayactl volume stats --volname <vol> --namespace test\n\n # Info of a Volume:\n $ mayactl volume info --volname <vol>\n\n # Info of a Volume created in 'test' namespace:\n $ mayactl volume info --volname <vol> --namespace test\n\n # Delete a Volume:\n $ mayactl volume delete --volname <vol>\n\n # Delete a Volume created in 'test' namespace:\n $ mayactl volume delete --volname <vol> --namespace test\n`\n\toptions = &CmdVolumeOptions{\n\t\tnamespace: \"default\",\n\t}\n)\n\n\/\/ NewCmdVolume provides options for managing OpenEBS Volume\nfunc NewCmdVolume() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"volume\",\n\t\tShort: \"Provides operations related to a Volume\",\n\t\tLong: volumeCommandHelpText,\n\t}\n\n\tcmd.AddCommand(\n\t\t\/\/ NewCmdVolumeCreate(),\n\t\tNewCmdVolumesList(),\n\t\tNewCmdVolumeDelete(),\n\t\tNewCmdVolumeStats(),\n\t\tNewCmdVolumeInfo(),\n\t)\n\tcmd.PersistentFlags().StringVarP(&options.namespace, \"namespace\", \"n\", options.namespace,\n\t\t\"namespace name, required if volume is not in the default namespace\")\n\n\tcmd.PersistentFlags().AddGoFlagSet(flag.CommandLine)\n\tflag.CommandLine.Parse([]string{})\n\treturn cmd\n}\n\n\/\/ Validate verifies whether a volume name,source name or snapshot name is provided or not followed by\n\/\/ stats command. It returns nil and proceeds to execute the command if there is\n\/\/ no error and returns an error if it is missing.\nfunc (c *CmdVolumeOptions) Validate(cmd *cobra.Command, snapshotnameverify, sourcenameverify, volnameverify bool) error {\n\tif snapshotnameverify {\n\t\tif len(c.snapshotName) == 0 {\n\t\t\treturn errors.New(\"--snapname is missing. Please provide a snapshotname\")\n\t\t}\n\t}\n\tif sourcenameverify {\n\t\tif len(c.sourceVolumeName) == 0 {\n\t\t\treturn errors.New(\"--sourcevol is missing. Please specify a sourcevolumename\")\n\t\t}\n\t}\n\tif volnameverify {\n\t\tif len(c.volName) == 0 {\n\t\t\treturn errors.New(\"--volname is missing. Please specify a unique volumename\")\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ NewVolumeInfo fetches and fills CASVolume structure from URL given to it\nfunc NewVolumeInfo(URL string, volname string, namespace string) (volInfo *VolumeInfo, err error) {\n\turl := URL\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\treq.Header.Set(\"namespace\", namespace)\n\n\tc := &http.Client{\n\t\tTimeout: timeout,\n\t}\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\tfmt.Printf(\"Can't get a response, error found: %v\", err)\n\t\treturn\n\t}\n\tif resp != nil && resp.StatusCode != 200 {\n\t\tif resp.StatusCode == 500 {\n\t\t\tfmt.Printf(\"Volume: %s not found at namespace: %q\\n\", volname, namespace)\n\t\t\terr = util.InternalServerError\n\t\t} else if resp.StatusCode == 503 {\n\t\t\tfmt.Printf(\"maya apiservice not reachable at %q\\n\", mapiserver.GetURL())\n\t\t\terr = util.ServerUnavailable\n\t\t} else if resp.StatusCode == 404 {\n\t\t\tfmt.Printf(\"Volume: %s not found at namespace: %q error: %s\\n\", volname, namespace, http.StatusText(resp.StatusCode))\n\t\t\terr = util.PageNotFound\n\t\t}\n\t\tfmt.Printf(\"Received an error from maya apiservice: statuscode: %d\", resp.StatusCode)\n\t\terr = fmt.Errorf(\"Received an error from maya apiservice: statuscode: %d\", resp.StatusCode)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tcasVol := v1alpha1.CASVolume{}\n\terr = json.NewDecoder(resp.Body).Decode(&casVol)\n\tif err != nil {\n\t\tfmt.Printf(\"Response decode failed: error '%+v'\", err)\n\t\treturn\n\t}\n\tif casVol.Status.Reason == \"pending\" {\n\t\tfmt.Println(\"VOLUME status Unknown to maya apiservice\")\n\t\terr = fmt.Errorf(\"VOLUME status Unknown to maya apiservice\")\n\t\treturn\n\t}\n\tvolInfo = &VolumeInfo{\n\t\tVolume: casVol,\n\t}\n\treturn\n}\n\n\/\/ GetCASType returns the CASType of the volume in lowercase\nfunc (volInfo *VolumeInfo) GetCASType() string {\n\tif len(volInfo.Volume.Spec.CasType) == 0 {\n\t\treturn string(JivaStorageEngine)\n\t}\n\treturn strings.ToLower(volInfo.Volume.Spec.CasType)\n}\n\n\/\/ GetClusterIP returns the ClusterIP of the cluster\nfunc (volInfo *VolumeInfo) GetClusterIP() string {\n\tif val, ok := volInfo.Volume.ObjectMeta.Annotations[\"openebs.io\/cluster-ips\"]; ok {\n\t\treturn val\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"vsm.openebs.io\/cluster-ips\"]; ok {\n\t\treturn val\n\t}\n\treturn \"\"\n}\n\n\/\/ GetControllerStatus returns the status of the volume controller\nfunc (volInfo *VolumeInfo) GetControllerStatus() string {\n\tif val, ok := volInfo.Volume.ObjectMeta.Annotations[\"openebs.io\/controller-status\"]; ok {\n\t\treturn val\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"vsm.openebs.io\/controller-status\"]; ok {\n\t\treturn val\n\t}\n\treturn \"\"\n}\n\n\/\/ GetIQN returns the IQN of the volume\nfunc (volInfo *VolumeInfo) GetIQN() string {\n\tif len(volInfo.Volume.Spec.Iqn) > 0 {\n\t\treturn volInfo.Volume.Spec.Iqn\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"openebs.io\/iqn\"]; ok {\n\t\treturn val\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"vsm.openebs.io\/iqn\"]; ok {\n\t\treturn val\n\t}\n\treturn \"\"\n\n}\n\n\/\/ GetVolumeName returns the volume name\nfunc (volInfo *VolumeInfo) GetVolumeName() string {\n\treturn volInfo.Volume.ObjectMeta.Name\n}\n\n\/\/ GetTargetPortal returns the TargetPortal of the volume\nfunc (volInfo *VolumeInfo) GetTargetPortal() string {\n\tif len(volInfo.Volume.Spec.TargetPortal) > 0 {\n\t\treturn volInfo.Volume.Spec.TargetPortal\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"openebs.io\/targetportals\"]; ok {\n\t\treturn val\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"vsm.openebs.io\/targetportals\"]; ok {\n\t\treturn val\n\t}\n\treturn \"\"\n}\n\n\/\/ GetVolumeSize returns the capacity of the volume\nfunc (volInfo *VolumeInfo) GetVolumeSize() string {\n\tif len(volInfo.Volume.Spec.Capacity) > 0 {\n\t\treturn volInfo.Volume.Spec.Capacity\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"openebs.io\/volume-size\"]; ok {\n\t\treturn val\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"vsm.openebs.io\/volume-size\"]; ok {\n\t\treturn val\n\t}\n\treturn \"\"\n}\n\n\/\/ GetReplicaCount returns the volume replica count\nfunc (volInfo *VolumeInfo) GetReplicaCount() string {\n\tif len(volInfo.Volume.Spec.Replicas) > 0 {\n\t\treturn volInfo.Volume.Spec.Replicas\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"openebs.io\/replica-count\"]; ok {\n\t\treturn val\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"vsm.openebs.io\/replica-count\"]; ok {\n\t\treturn val\n\t}\n\treturn \"\"\n}\n\n\/\/ GetReplicaStatus returns the replica status of the volume replica\nfunc (volInfo *VolumeInfo) GetReplicaStatus() string {\n\tif val, ok := volInfo.Volume.ObjectMeta.Annotations[\"openebs.io\/replica-status\"]; ok {\n\t\treturn val\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"vsm.openebs.io\/replica-status\"]; ok {\n\t\treturn val\n\t}\n\treturn \"\"\n}\n\n\/\/ GetReplicaIP returns the IP of volume replica\nfunc (volInfo *VolumeInfo) GetReplicaIP() string {\n\tif val, ok := volInfo.Volume.ObjectMeta.Annotations[\"openebs.io\/replica-ips\"]; ok {\n\t\treturn val\n\t} else if val, ok := volInfo.Volume.ObjectMeta.Annotations[\"vsm.openebs.io\/replica-ips\"]; ok {\n\t\treturn val\n\t}\n\treturn \"\"\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage wav\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/hajimehoshi\/ebiten\/exp\/audio\"\n)\n\ntype Stream struct {\n\tsrc audio.ReadSeekCloser\n\theaderSize int64\n\tdataSize int64\n}\n\nfunc (s *Stream) Read(p []byte) (int, error) {\n\treturn s.src.Read(p)\n}\n\nfunc (s *Stream) Seek(offset int64, whence int) (int64, error) {\n\tif whence == 0 {\n\t\toffset += s.headerSize\n\t}\n\treturn s.src.Seek(offset, whence)\n}\n\nfunc (s *Stream) Close() error {\n\treturn s.src.Close()\n}\n\nfunc (s *Stream) Size() int64 {\n\treturn s.dataSize\n}\n\nfunc Decode(context *audio.Context, src audio.ReadSeekCloser) (*Stream, error) {\n\tbuf := make([]byte, 12)\n\tn, err := io.ReadFull(src, buf)\n\tif n != len(buf) {\n\t\treturn nil, fmt.Errorf(\"wav: invalid header\")\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !bytes.Equal(buf[0:4], []byte(\"RIFF\")) {\n\t\treturn nil, fmt.Errorf(\"wav: invalid header: 'RIFF' not found\")\n\t}\n\tif !bytes.Equal(buf[8:12], []byte(\"WAVE\")) {\n\t\treturn nil, fmt.Errorf(\"wav: invalid header: 'WAVE' not found\")\n\t}\n\n\t\/\/ Read chunks\n\tdataSize := int64(0)\n\theaderSize := int64(0)\nchunks:\n\tfor {\n\t\tbuf := make([]byte, 8)\n\t\tn, err := io.ReadFull(src, buf)\n\t\tif n != len(buf) {\n\t\t\treturn nil, fmt.Errorf(\"wav: invalid header\")\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\theaderSize += 8\n\t\tsize := int64(buf[4]) | int64(buf[5])<<8 | int64(buf[6])<<16 | int64(buf[7]<<24)\n\t\tswitch {\n\t\tcase bytes.Equal(buf[0:4], []byte(\"fmt \")):\n\t\t\tif size != 16 {\n\t\t\t\treturn nil, fmt.Errorf(\"wav: invalid header: maybe non-PCM file?\")\n\t\t\t}\n\t\t\tbuf := make([]byte, size)\n\t\t\tn, err := io.ReadFull(src, buf)\n\t\t\tif n != len(buf) {\n\t\t\t\treturn nil, fmt.Errorf(\"wav: invalid header\")\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\t\/\/ TODO: Remove this magic number\n\t\t\tif buf[2] != 2 {\n\t\t\t\treturn nil, fmt.Errorf(\"wav: invalid header: channel num must be 2\")\n\t\t\t}\n\t\t\t\/\/ TODO: Remove this magic number\n\t\t\tif buf[14] != 16 {\n\t\t\t\treturn nil, fmt.Errorf(\"wav: invalid header: depth must be 16\")\n\t\t\t}\n\t\t\tsampleRate := int64(buf[4]) | int64(buf[5])<<8 | int64(buf[6])<<16 | int64(buf[7]<<24)\n\t\t\tif int64(context.SampleRate()) != sampleRate {\n\t\t\t\treturn nil, fmt.Errorf(\"wav: sample rate must be %d but %d\", context.SampleRate(), sampleRate)\n\t\t\t}\n\t\t\theaderSize += size\n\t\tcase bytes.Equal(buf[0:4], []byte(\"data\")):\n\t\t\tdataSize = size\n\t\t\tbreak chunks\n\t\tdefault:\n\t\t\tbuf := make([]byte, size)\n\t\t\tn, err := io.ReadFull(src, buf)\n\t\t\tif n != len(buf) {\n\t\t\t\treturn nil, fmt.Errorf(\"wav: invalid header\")\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\theaderSize += size\n\t\t}\n\t}\n\ts := &Stream{\n\t\tsrc: src,\n\t\theaderSize: headerSize,\n\t\tdataSize: dataSize,\n\t}\n\treturn s, nil\n}\n<commit_msg>audio\/wav: Parse header more accurately<commit_after>\/\/ Copyright 2016 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage wav\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com\/hajimehoshi\/ebiten\/exp\/audio\"\n)\n\ntype Stream struct {\n\tsrc audio.ReadSeekCloser\n\theaderSize int64\n\tdataSize int64\n}\n\nfunc (s *Stream) Read(p []byte) (int, error) {\n\treturn s.src.Read(p)\n}\n\nfunc (s *Stream) Seek(offset int64, whence int) (int64, error) {\n\tif whence == 0 {\n\t\toffset += s.headerSize\n\t}\n\treturn s.src.Seek(offset, whence)\n}\n\nfunc (s *Stream) Close() error {\n\treturn s.src.Close()\n}\n\nfunc (s *Stream) Size() int64 {\n\treturn s.dataSize\n}\n\nfunc Decode(context *audio.Context, src audio.ReadSeekCloser) (*Stream, error) {\n\tbuf := make([]byte, 12)\n\tn, err := io.ReadFull(src, buf)\n\tif n != len(buf) {\n\t\treturn nil, fmt.Errorf(\"wav: invalid header\")\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !bytes.Equal(buf[0:4], []byte(\"RIFF\")) {\n\t\treturn nil, fmt.Errorf(\"wav: invalid header: 'RIFF' not found\")\n\t}\n\tif !bytes.Equal(buf[8:12], []byte(\"WAVE\")) {\n\t\treturn nil, fmt.Errorf(\"wav: invalid header: 'WAVE' not found\")\n\t}\n\n\t\/\/ Read chunks\n\tdataSize := int64(0)\n\theaderSize := int64(0)\nchunks:\n\tfor {\n\t\tbuf := make([]byte, 8)\n\t\tn, err := io.ReadFull(src, buf)\n\t\tif n != len(buf) {\n\t\t\treturn nil, fmt.Errorf(\"wav: invalid header\")\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\theaderSize += 8\n\t\tsize := int64(buf[4]) | int64(buf[5])<<8 | int64(buf[6])<<16 | int64(buf[7]<<24)\n\t\tswitch {\n\t\tcase bytes.Equal(buf[0:4], []byte(\"fmt \")):\n\t\t\tif size != 16 {\n\t\t\t\treturn nil, fmt.Errorf(\"wav: invalid header: maybe non-PCM file?\")\n\t\t\t}\n\t\t\tbuf := make([]byte, size)\n\t\t\tn, err := io.ReadFull(src, buf)\n\t\t\tif n != len(buf) {\n\t\t\t\treturn nil, fmt.Errorf(\"wav: invalid header\")\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tformat := int(buf[0]) | int(buf[1])<<8\n\t\t\tif format != 1 {\n\t\t\t\treturn nil, fmt.Errorf(\"wav: format must be linear PCM\")\n\t\t\t}\n\t\t\tchannelNum := int(buf[2]) | int(buf[3])<<8\n\t\t\t\/\/ TODO: Remove this magic number\n\t\t\tif channelNum != 2 {\n\t\t\t\treturn nil, fmt.Errorf(\"wav: channel num must be 2\")\n\t\t\t}\n\t\t\tbitsPerSample := int(buf[14]) | int(buf[15])<<8\n\t\t\t\/\/ TODO: Remove this magic number\n\t\t\tif bitsPerSample != 16 {\n\t\t\t\treturn nil, fmt.Errorf(\"wav: bits per sample must be 16\")\n\t\t\t}\n\t\t\tsampleRate := int64(buf[4]) | int64(buf[5])<<8 | int64(buf[6])<<16 | int64(buf[7]<<24)\n\t\t\tif int64(context.SampleRate()) != sampleRate {\n\t\t\t\treturn nil, fmt.Errorf(\"wav: sample rate must be %d but %d\", context.SampleRate(), sampleRate)\n\t\t\t}\n\t\t\theaderSize += size\n\t\tcase bytes.Equal(buf[0:4], []byte(\"data\")):\n\t\t\tdataSize = size\n\t\t\tbreak chunks\n\t\tdefault:\n\t\t\tbuf := make([]byte, size)\n\t\t\tn, err := io.ReadFull(src, buf)\n\t\t\tif n != len(buf) {\n\t\t\t\treturn nil, fmt.Errorf(\"wav: invalid header\")\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\theaderSize += size\n\t\t}\n\t}\n\ts := &Stream{\n\t\tsrc: src,\n\t\theaderSize: headerSize,\n\t\tdataSize: dataSize,\n\t}\n\treturn s, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013-2014 Apcera Inc. All rights reserved.\n\npackage test\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/nats-io\/nats\"\n)\n\nfunc TestServerRestartReSliceIssue(t *testing.T) {\n\tsrvA, srvB, optsA, optsB := runServers(t)\n\tdefer srvA.Shutdown()\n\n\turlA := fmt.Sprintf(\"nats:\/\/%s:%d\/\", optsA.Host, optsA.Port)\n\turlB := fmt.Sprintf(\"nats:\/\/%s:%d\/\", optsB.Host, optsB.Port)\n\n\t\/\/ msg to send..\n\tmsg := []byte(\"Hello World\")\n\n\tservers := []string{urlA, urlB}\n\n\topts := nats.DefaultOptions\n\topts.Timeout = (5 * time.Second)\n\topts.ReconnectWait = (50 * time.Millisecond)\n\topts.MaxReconnect = 1000\n\n\treconnects := int32(0)\n\treconnectsDone := make(chan bool)\n\topts.ReconnectedCB = func(nc *nats.Conn) {\n\t\tatomic.AddInt32(&reconnects, 1)\n\t\treconnectsDone <- true\n\t}\n\n\t\/\/ Create 20 random clients.\n\t\/\/ Half connected to A and half to B..\n\tnumClients := 20\n\tfor i := 0; i < numClients; i++ {\n\t\topts.Url = servers[i%2]\n\t\tnc, err := opts.Connect()\n\t\tdefer nc.Close()\n\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to create connection: %v\\n\", err)\n\t\t}\n\t\t\/\/ Create 10 subscriptions each..\n\t\tfor x := 0; x < 10; x++ {\n\t\t\tsubject := fmt.Sprintf(\"foo.%d\", (rand.Int()%50)+1)\n\t\t\tnc.Subscribe(subject, func(m *nats.Msg) {\n\t\t\t\t\/\/ Just eat it..\n\t\t\t})\n\t\t}\n\t\t\/\/ Pick one subject to send to..\n\t\tsubject := fmt.Sprintf(\"foo.%d\", (rand.Int()%50)+1)\n\t\tgo func() {\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\tfor i := 1; 1 <= 100; i++ {\n\t\t\t\tif err := nc.Publish(subject, msg); err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif i%10 == 0 {\n\t\t\t\t\ttime.Sleep(time.Millisecond)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\t\/\/ Wait for a short bit..\n\ttime.Sleep(20 * time.Millisecond)\n\n\t\/\/ Restart SrvB\n\tsrvB.Shutdown()\n\tsrvB = RunServer(optsB)\n\tdefer srvB.Shutdown()\n\n\tselect {\n\tcase <-reconnectsDone:\n\t\tbreak\n\tcase <-time.After(2 * time.Second):\n\t\tt.Fatalf(\"Expected %d reconnects, got %d\\n\", numClients\/2, reconnects)\n\t}\n}\n\n\/\/ This will test queue subscriber semantics across a cluster in the presence\n\/\/ of server restarts.\nfunc TestServerRestartAndQueueSubs(t *testing.T) {\n\tsrvA, srvB, optsA, optsB := runServers(t)\n\n\turlA := fmt.Sprintf(\"nats:\/\/%s:%d\/\", optsA.Host, optsA.Port)\n\turlB := fmt.Sprintf(\"nats:\/\/%s:%d\/\", optsB.Host, optsB.Port)\n\n\t\/\/ Client options\n\topts := nats.DefaultOptions\n\topts.Timeout = (5 * time.Second)\n\topts.ReconnectWait = (50 * time.Millisecond)\n\topts.MaxReconnect = 1000\n\topts.NoRandomize = true\n\n\t\/\/ Allow us to block on a reconnect completion.\n\treconnectsDone := make(chan bool)\n\topts.ReconnectedCB = func(nc *nats.Conn) {\n\t\treconnectsDone <- true\n\t}\n\n\t\/\/ Helper to wait on a reconnect.\n\twaitOnReconnect := func() {\n\t\tvar rcs int64\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-reconnectsDone:\n\t\t\t\tatomic.AddInt64(&rcs, 1)\n\t\t\t\tif rcs >= 2 {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase <-time.After(2 * time.Second):\n\t\t\t\tt.Fatalf(\"Expected a reconnect, timedout!\\n\")\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Create two clients..\n\topts.Servers = []string{urlA}\n\tnc1, err := opts.Connect()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create connection for nc1: %v\\n\", err)\n\t}\n\n\topts.Servers = []string{urlB}\n\tnc2, err := opts.Connect()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create connection for nc2: %v\\n\", err)\n\t}\n\n\tc1, _ := nats.NewEncodedConn(nc1, \"json\")\n\tdefer c1.Close()\n\tc2, _ := nats.NewEncodedConn(nc2, \"json\")\n\tdefer c2.Close()\n\n\t\/\/ Flusher helper function.\n\tflush := func() {\n\t\t\/\/ Wait for processing.\n\t\tc1.Flush()\n\t\tc2.Flush()\n\t\t\/\/ Wait for a short bit for cluster propogation.\n\t\ttime.Sleep(50 * time.Millisecond)\n\t}\n\n\t\/\/ To hold queue results.\n\tresults := make(map[int]int)\n\tvar mu sync.Mutex\n\n\t\/\/ This corresponds to the subsriptions below.\n\tconst ExpectedMsgCount = 3\n\n\t\/\/ Make sure we got what we needed, 1 msg only and all seqnos accounted for..\n\tcheckResults := func(numSent int) {\n\t\tmu.Lock()\n\t\tdefer mu.Unlock()\n\n\t\tfor i := 0; i < numSent; i++ {\n\t\t\tif results[i] != ExpectedMsgCount {\n\t\t\t\tt.Fatalf(\"Received incorrect number of messages, [%d] for seq: %d\\n\", results[i], i)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Auto reset results map\n\t\tresults = make(map[int]int)\n\t}\n\n\tsubj := \"foo.bar\"\n\tqgroup := \"workers\"\n\n\tcb := func(seqno int) {\n\t\tmu.Lock()\n\t\tdefer mu.Unlock()\n\t\tresults[seqno] = results[seqno] + 1\n\t}\n\n\t\/\/ Create queue subscribers\n\tc1.QueueSubscribe(subj, qgroup, cb)\n\tc2.QueueSubscribe(subj, qgroup, cb)\n\n\t\/\/ Do a wildcard subscription.\n\tc1.Subscribe(\"foo.*\", cb)\n\tc2.Subscribe(\"foo.*\", cb)\n\n\t\/\/ Wait for processing.\n\tflush()\n\n\tsendAndCheckMsgs := func(numToSend int) {\n\t\tfor i := 0; i < numToSend; i++ {\n\t\t\tif i%2 == 0 {\n\t\t\t\tc1.Publish(subj, i)\n\t\t\t} else {\n\t\t\t\tc2.Publish(subj, i)\n\t\t\t}\n\t\t}\n\t\t\/\/ Wait for processing.\n\t\tflush()\n\t\t\/\/ Check Results\n\t\tcheckResults(numToSend)\n\t}\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Base Test\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ Now send 10 messages, from each client..\n\tsendAndCheckMsgs(10)\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Now restart SrvA and srvB, re-run test\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tsrvA.Shutdown()\n\tsrvA = RunServer(optsA)\n\tdefer srvA.Shutdown()\n\n\tsrvB.Shutdown()\n\tsrvB = RunServer(optsB)\n\tdefer srvB.Shutdown()\n\n\twaitOnReconnect()\n\n\ttime.Sleep(50 * time.Millisecond)\n\n\t\/\/ Now send another 10 messages, from each client..\n\tsendAndCheckMsgs(10)\n}\n\n\/\/ This will test request semantics across a route\nfunc TestRequestsAcrossRoutes(t *testing.T) {\n\tsrvA, srvB, optsA, optsB := runServers(t)\n\tdefer srvA.Shutdown()\n\tdefer srvB.Shutdown()\n\n\turlA := fmt.Sprintf(\"nats:\/\/%s:%d\/\", optsA.Host, optsA.Port)\n\turlB := fmt.Sprintf(\"nats:\/\/%s:%d\/\", optsB.Host, optsB.Port)\n\n\tnc1, err := nats.Connect(urlA)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create connection for nc1: %v\\n\", err)\n\t}\n\tdefer nc1.Close()\n\n\tnc2, err := nats.Connect(urlB)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create connection for nc2: %v\\n\", err)\n\t}\n\tdefer nc2.Close()\n\n\tec2, _ := nats.NewEncodedConn(nc2, nats.JSON_ENCODER)\n\n\tresponse := []byte(\"I will help you\")\n\n\t\/\/ Connect responder to srvA\n\tnc1.Subscribe(\"foo-req\", func(m *nats.Msg) {\n\t\tnc1.Publish(m.Reply, response)\n\t})\n\t\/\/ Make sure the route and the subscription are propogated.\n\tnc1.Flush()\n\n\tvar resp string\n\n\tfor i := 0; i < 100; i++ {\n\t\tif err := ec2.Request(\"foo-req\", i, &resp, 100*time.Millisecond); err != nil {\n\t\t\tt.Fatalf(\"Received an error on Request test [%d]: %s\", i, err)\n\t\t}\n\t}\n}\n\n\/\/ This will test request semantics across a route to queues\nfunc TestRequestsAcrossRoutesToQueues(t *testing.T) {\n\tsrvA, srvB, optsA, optsB := runServers(t)\n\tdefer srvA.Shutdown()\n\tdefer srvB.Shutdown()\n\n\turlA := fmt.Sprintf(\"nats:\/\/%s:%d\/\", optsA.Host, optsA.Port)\n\turlB := fmt.Sprintf(\"nats:\/\/%s:%d\/\", optsB.Host, optsB.Port)\n\n\tnc1, err := nats.Connect(urlA)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create connection for nc1: %v\\n\", err)\n\t}\n\tdefer nc1.Close()\n\n\tnc2, err := nats.Connect(urlB)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create connection for nc2: %v\\n\", err)\n\t}\n\tdefer nc2.Close()\n\n\tec1, _ := nats.NewEncodedConn(nc1, nats.JSON_ENCODER)\n\tec2, _ := nats.NewEncodedConn(nc2, nats.JSON_ENCODER)\n\n\tresponse := []byte(\"I will help you\")\n\n\t\/\/ Connect one responder to srvA\n\tnc1.QueueSubscribe(\"foo-req\", \"booboo\", func(m *nats.Msg) {\n\t\tnc1.Publish(m.Reply, response)\n\t})\n\t\/\/ Make sure the route and the subscription are propogated.\n\tnc1.Flush()\n\n\t\/\/ Connect the other responder to srvB\n\tnc2.QueueSubscribe(\"foo-req\", \"booboo\", func(m *nats.Msg) {\n\t\tnc2.Publish(m.Reply, response)\n\t})\n\n\tvar resp string\n\n\tfor i := 0; i < 100; i++ {\n\t\tif err := ec2.Request(\"foo-req\", i, &resp, 100*time.Millisecond); err != nil {\n\t\t\tt.Fatalf(\"Received an error on Request test [%d]: %s\", i, err)\n\t\t}\n\t}\n\n\tfor i := 0; i < 100; i++ {\n\t\tif err := ec1.Request(\"foo-req\", i, &resp, 100*time.Millisecond); err != nil {\n\t\t\tt.Fatalf(\"Received an error on Request test [%d]: %s\", i, err)\n\t\t}\n\t}\n}\n<commit_msg>Fix test that was failing on Windows.<commit_after>\/\/ Copyright 2013-2014 Apcera Inc. All rights reserved.\n\npackage test\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/nats-io\/nats\"\n)\n\nfunc TestServerRestartReSliceIssue(t *testing.T) {\n\tsrvA, srvB, optsA, optsB := runServers(t)\n\tdefer srvA.Shutdown()\n\n\turlA := fmt.Sprintf(\"nats:\/\/%s:%d\/\", optsA.Host, optsA.Port)\n\turlB := fmt.Sprintf(\"nats:\/\/%s:%d\/\", optsB.Host, optsB.Port)\n\n\t\/\/ msg to send..\n\tmsg := []byte(\"Hello World\")\n\n\tservers := []string{urlA, urlB}\n\n\topts := nats.DefaultOptions\n\topts.Timeout = (5 * time.Second)\n\topts.ReconnectWait = (50 * time.Millisecond)\n\topts.MaxReconnect = 1000\n\n\treconnects := int32(0)\n\treconnectsDone := make(chan bool)\n\topts.ReconnectedCB = func(nc *nats.Conn) {\n\t\tatomic.AddInt32(&reconnects, 1)\n\t\treconnectsDone <- true\n\t}\n\n\t\/\/ Create 20 random clients.\n\t\/\/ Half connected to A and half to B..\n\tnumClients := 20\n\n\tclients := make([]*nats.Conn, numClients)\n\n\tfor i := 0; i < numClients; i++ {\n\t\topts.Url = servers[i%2]\n\t\tnc, err := opts.Connect()\n\t\tdefer nc.Close()\n\t\tclients = append(clients, nc)\n\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to create connection: %v\\n\", err)\n\t\t}\n\t\t\/\/ Create 10 subscriptions each..\n\t\tfor x := 0; x < 10; x++ {\n\t\t\tsubject := fmt.Sprintf(\"foo.%d\", (rand.Int()%50)+1)\n\t\t\tnc.Subscribe(subject, func(m *nats.Msg) {\n\t\t\t\t\/\/ Just eat it..\n\t\t\t})\n\t\t}\n\t\t\/\/ Pick one subject to send to..\n\t\tsubject := fmt.Sprintf(\"foo.%d\", (rand.Int()%50)+1)\n\t\tgo func() {\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\tfor i := 1; i <= 100; i++ {\n\t\t\t\tif err := nc.Publish(subject, msg); err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif i%10 == 0 {\n\t\t\t\t\ttime.Sleep(time.Millisecond)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\t\/\/ Wait for a short bit..\n\ttime.Sleep(20 * time.Millisecond)\n\n\t\/\/ Restart SrvB\n\tsrvB.Shutdown()\n\tsrvB = RunServer(optsB)\n\tdefer srvB.Shutdown()\n\n\tselect {\n\tcase <-reconnectsDone:\n\t\tbreak\n\tcase <-time.After(2 * time.Second):\n\t\tt.Fatalf(\"Expected %d reconnects, got %d\\n\", numClients\/2, reconnects)\n\t}\n\n\t\/\/ On windows, as of go 1.5.2, the test does not exit until we close\n\t\/\/ the connections...\n\tfor _, nc := range clients {\n\t\tif nc != nil {\n\t\t\tnc.Close()\n\t\t}\n\t}\n}\n\n\/\/ This will test queue subscriber semantics across a cluster in the presence\n\/\/ of server restarts.\nfunc TestServerRestartAndQueueSubs(t *testing.T) {\n\tsrvA, srvB, optsA, optsB := runServers(t)\n\n\turlA := fmt.Sprintf(\"nats:\/\/%s:%d\/\", optsA.Host, optsA.Port)\n\turlB := fmt.Sprintf(\"nats:\/\/%s:%d\/\", optsB.Host, optsB.Port)\n\n\t\/\/ Client options\n\topts := nats.DefaultOptions\n\topts.Timeout = (5 * time.Second)\n\topts.ReconnectWait = (50 * time.Millisecond)\n\topts.MaxReconnect = 1000\n\topts.NoRandomize = true\n\n\t\/\/ Allow us to block on a reconnect completion.\n\treconnectsDone := make(chan bool)\n\topts.ReconnectedCB = func(nc *nats.Conn) {\n\t\treconnectsDone <- true\n\t}\n\n\t\/\/ Helper to wait on a reconnect.\n\twaitOnReconnect := func() {\n\t\tvar rcs int64\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-reconnectsDone:\n\t\t\t\tatomic.AddInt64(&rcs, 1)\n\t\t\t\tif rcs >= 2 {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase <-time.After(2 * time.Second):\n\t\t\t\tt.Fatalf(\"Expected a reconnect, timedout!\\n\")\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Create two clients..\n\topts.Servers = []string{urlA}\n\tnc1, err := opts.Connect()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create connection for nc1: %v\\n\", err)\n\t}\n\n\topts.Servers = []string{urlB}\n\tnc2, err := opts.Connect()\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create connection for nc2: %v\\n\", err)\n\t}\n\n\tc1, _ := nats.NewEncodedConn(nc1, \"json\")\n\tdefer c1.Close()\n\tc2, _ := nats.NewEncodedConn(nc2, \"json\")\n\tdefer c2.Close()\n\n\t\/\/ Flusher helper function.\n\tflush := func() {\n\t\t\/\/ Wait for processing.\n\t\tc1.Flush()\n\t\tc2.Flush()\n\t\t\/\/ Wait for a short bit for cluster propogation.\n\t\ttime.Sleep(50 * time.Millisecond)\n\t}\n\n\t\/\/ To hold queue results.\n\tresults := make(map[int]int)\n\tvar mu sync.Mutex\n\n\t\/\/ This corresponds to the subsriptions below.\n\tconst ExpectedMsgCount = 3\n\n\t\/\/ Make sure we got what we needed, 1 msg only and all seqnos accounted for..\n\tcheckResults := func(numSent int) {\n\t\tmu.Lock()\n\t\tdefer mu.Unlock()\n\n\t\tfor i := 0; i < numSent; i++ {\n\t\t\tif results[i] != ExpectedMsgCount {\n\t\t\t\tt.Fatalf(\"Received incorrect number of messages, [%d] for seq: %d\\n\", results[i], i)\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Auto reset results map\n\t\tresults = make(map[int]int)\n\t}\n\n\tsubj := \"foo.bar\"\n\tqgroup := \"workers\"\n\n\tcb := func(seqno int) {\n\t\tmu.Lock()\n\t\tdefer mu.Unlock()\n\t\tresults[seqno] = results[seqno] + 1\n\t}\n\n\t\/\/ Create queue subscribers\n\tc1.QueueSubscribe(subj, qgroup, cb)\n\tc2.QueueSubscribe(subj, qgroup, cb)\n\n\t\/\/ Do a wildcard subscription.\n\tc1.Subscribe(\"foo.*\", cb)\n\tc2.Subscribe(\"foo.*\", cb)\n\n\t\/\/ Wait for processing.\n\tflush()\n\n\tsendAndCheckMsgs := func(numToSend int) {\n\t\tfor i := 0; i < numToSend; i++ {\n\t\t\tif i%2 == 0 {\n\t\t\t\tc1.Publish(subj, i)\n\t\t\t} else {\n\t\t\t\tc2.Publish(subj, i)\n\t\t\t}\n\t\t}\n\t\t\/\/ Wait for processing.\n\t\tflush()\n\t\t\/\/ Check Results\n\t\tcheckResults(numToSend)\n\t}\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Base Test\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ Now send 10 messages, from each client..\n\tsendAndCheckMsgs(10)\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Now restart SrvA and srvB, re-run test\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\tsrvA.Shutdown()\n\tsrvA = RunServer(optsA)\n\tdefer srvA.Shutdown()\n\n\tsrvB.Shutdown()\n\tsrvB = RunServer(optsB)\n\tdefer srvB.Shutdown()\n\n\twaitOnReconnect()\n\n\ttime.Sleep(50 * time.Millisecond)\n\n\t\/\/ Now send another 10 messages, from each client..\n\tsendAndCheckMsgs(10)\n}\n\n\/\/ This will test request semantics across a route\nfunc TestRequestsAcrossRoutes(t *testing.T) {\n\tsrvA, srvB, optsA, optsB := runServers(t)\n\tdefer srvA.Shutdown()\n\tdefer srvB.Shutdown()\n\n\turlA := fmt.Sprintf(\"nats:\/\/%s:%d\/\", optsA.Host, optsA.Port)\n\turlB := fmt.Sprintf(\"nats:\/\/%s:%d\/\", optsB.Host, optsB.Port)\n\n\tnc1, err := nats.Connect(urlA)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create connection for nc1: %v\\n\", err)\n\t}\n\tdefer nc1.Close()\n\n\tnc2, err := nats.Connect(urlB)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create connection for nc2: %v\\n\", err)\n\t}\n\tdefer nc2.Close()\n\n\tec2, _ := nats.NewEncodedConn(nc2, nats.JSON_ENCODER)\n\n\tresponse := []byte(\"I will help you\")\n\n\t\/\/ Connect responder to srvA\n\tnc1.Subscribe(\"foo-req\", func(m *nats.Msg) {\n\t\tnc1.Publish(m.Reply, response)\n\t})\n\t\/\/ Make sure the route and the subscription are propogated.\n\tnc1.Flush()\n\n\tvar resp string\n\n\tfor i := 0; i < 100; i++ {\n\t\tif err := ec2.Request(\"foo-req\", i, &resp, 100*time.Millisecond); err != nil {\n\t\t\tt.Fatalf(\"Received an error on Request test [%d]: %s\", i, err)\n\t\t}\n\t}\n}\n\n\/\/ This will test request semantics across a route to queues\nfunc TestRequestsAcrossRoutesToQueues(t *testing.T) {\n\tsrvA, srvB, optsA, optsB := runServers(t)\n\tdefer srvA.Shutdown()\n\tdefer srvB.Shutdown()\n\n\turlA := fmt.Sprintf(\"nats:\/\/%s:%d\/\", optsA.Host, optsA.Port)\n\turlB := fmt.Sprintf(\"nats:\/\/%s:%d\/\", optsB.Host, optsB.Port)\n\n\tnc1, err := nats.Connect(urlA)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create connection for nc1: %v\\n\", err)\n\t}\n\tdefer nc1.Close()\n\n\tnc2, err := nats.Connect(urlB)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create connection for nc2: %v\\n\", err)\n\t}\n\tdefer nc2.Close()\n\n\tec1, _ := nats.NewEncodedConn(nc1, nats.JSON_ENCODER)\n\tec2, _ := nats.NewEncodedConn(nc2, nats.JSON_ENCODER)\n\n\tresponse := []byte(\"I will help you\")\n\n\t\/\/ Connect one responder to srvA\n\tnc1.QueueSubscribe(\"foo-req\", \"booboo\", func(m *nats.Msg) {\n\t\tnc1.Publish(m.Reply, response)\n\t})\n\t\/\/ Make sure the route and the subscription are propogated.\n\tnc1.Flush()\n\n\t\/\/ Connect the other responder to srvB\n\tnc2.QueueSubscribe(\"foo-req\", \"booboo\", func(m *nats.Msg) {\n\t\tnc2.Publish(m.Reply, response)\n\t})\n\n\tvar resp string\n\n\tfor i := 0; i < 100; i++ {\n\t\tif err := ec2.Request(\"foo-req\", i, &resp, 100*time.Millisecond); err != nil {\n\t\t\tt.Fatalf(\"Received an error on Request test [%d]: %s\", i, err)\n\t\t}\n\t}\n\n\tfor i := 0; i < 100; i++ {\n\t\tif err := ec1.Request(\"foo-req\", i, &resp, 100*time.Millisecond); err != nil {\n\t\t\tt.Fatalf(\"Received an error on Request test [%d]: %s\", i, err)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package agozon\n\n\/\/ Locale Information\ntype LocaleSearchIndex struct {\n\tBrowseNode int64 `json:\"BrowseNode\"`\n\tSortValues []string `json:\"SortValues\"`\n\tItemSearchParameters []string `json:\"ItemSearchParameters\"`\n}\n\nvar LocaleInformation = []map[string]LocaleSearchIndex{\n\tLocaleBRMap, LocaleCAMap, LocaleCNMap, LocaleDEMap, LocaleESMap, LocaleFRMap, LocaleINMap,\n\tLocaleITMap, LocaleJPMap, LocaleUKMap, LocaleUSMap,\n}\n<commit_msg>update localeinformation to map[string]map[string]LocaleSearchIndex<commit_after>package agozon\n\n\/\/ Locale Information\ntype LocaleSearchIndex struct {\n\tBrowseNode int64 `json:\"BrowseNode\"`\n\tSortValues []string `json:\"SortValues\"`\n\tItemSearchParameters []string `json:\"ItemSearchParameters\"`\n}\n\nvar LocaleInformation = map[string]map[string]LocaleSearchIndex{\n\t\"BR\": LocaleBRMap,\n\t\"CA\": LocaleCAMap,\n\t\"CN\": LocaleCNMap,\n\t\"DE\": LocaleDEMap,\n\t\"ES\": LocaleESMap,\n\t\"FR\": LocaleFRMap,\n\t\"IN\": LocaleINMap,\n\t\"IT\": LocaleITMap,\n\t\"JP\": LocaleJPMap,\n\t\"UK\": LocaleUKMap,\n\t\"US\": LocaleUSMap,\n}\n<|endoftext|>"} {"text":"<commit_before>package rest\n\nimport (\n\t\"connectordb\/plugins\/rest\/restcore\"\n\t\"connectordb\/streamdb\/datastream\"\n\t\"connectordb\/streamdb\/operator\"\n\t\"connectordb\/streamdb\/operator\/messenger\"\n\t\"connectordb\/streamdb\/query\"\n\t\"connectordb\/streamdb\/query\/transforms\"\n\t\"errors\"\n\t\"io\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/nats-io\/nats\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\nconst (\n\t\/\/The max size of a websocket message\n\tmessageSizeLimit = 1 * restcore.Mb\n\n\t\/\/The time allowed to write a message\n\twriteWait = 2 * time.Second\n\n\t\/\/Ping pong stuff - making sure that the connection still exists\n\tpongWait = 60 * time.Second\n\tpingPeriod = (pongWait * 9) \/ 10\n\n\t\/\/The number of messages to buffer\n\tmessageBuffer = 3\n\n\twebSocketClosed = \"EXIT\"\n\twebSocketClosedNonClean = \"@EXIT\"\n)\n\n\/\/The websocket upgrader\nvar (\n\tupgrader = websocket.Upgrader{\n\t\tReadBufferSize: 1024,\n\t\tWriteBufferSize: 1024,\n\t\t\/\/ Allow from all origins\n\t\tCheckOrigin: func(r *http.Request) bool { return true },\n\t}\n\n\t\/\/websocketWaitGroup is the WaitGroup of websockets that are currently open\n\twebsocketWaitGroup = sync.WaitGroup{}\n)\n\ntype Subscription struct {\n\tsync.Mutex \/\/The transform mutex\n\n\tnats *nats.Subscription \/\/The nats subscription\n\n\ttransform map[string]transforms.DatapointTransform \/\/the transforms associated with the subscription - this allows us to run transforms on the data!\n}\n\nfunc NewSubscription(subs *nats.Subscription) *Subscription {\n\treturn &Subscription{\n\t\tnats: subs,\n\t\ttransform: make(map[string]transforms.DatapointTransform),\n\t}\n}\n\n\/\/Close shuts down the subscription\nfunc (s *Subscription) Close() {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.nats.Unsubscribe()\n}\n\n\/\/Size is the number of subscriptions to the stream (using different transforms)\nfunc (s *Subscription) Size() int {\n\ts.Lock()\n\tdefer s.Unlock()\n\treturn len(s.transform)\n}\n\n\/\/Add a transform subscription to the string\nfunc (s *Subscription) AddTransform(transform string) (err error) {\n\tif _, ok := s.transform[transform]; ok {\n\t\treturn errors.New(\"Subscription to the transform already exists\")\n\t}\n\n\t\/\/First, attempt to generate the transform\n\tvar t transforms.DatapointTransform\n\tif transform != \"\" {\n\t\tt, err = transforms.NewTransformPipeline(transform)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ts.Lock()\n\ts.transform[transform] = t\n\ts.Unlock()\n\n\treturn nil\n}\n\n\/\/RemTransform deletes a transform from the subscriptions\nfunc (s *Subscription) RemTransform(transform string) (err error) {\n\ts.Lock()\n\tdelete(s.transform, transform)\n\ts.Unlock()\n\treturn nil\n}\n\n\/\/WebsocketConnection is the general connection with a websocket that is run.\n\/\/Loosely based on github.com\/gorilla\/websocket\/blob\/master\/examples\/chat\/conn.go\n\/\/No need for mutex because only reader reads and implements commands\ntype WebsocketConnection struct {\n\tws *websocket.Conn\n\n\tsubscriptions map[string]*Subscription\n\n\tc chan messenger.Message\n\n\tlogger *log.Entry \/\/logrus uses a mutex internally\n\to operator.Operator\n}\n\n\/\/NewWebsocketConnection creates a new websocket connection based on the operators and stuff\nfunc NewWebsocketConnection(o operator.Operator, writer http.ResponseWriter, request *http.Request, logger *log.Entry) (*WebsocketConnection, error) {\n\n\tws, err := upgrader.Upgrade(writer, request, nil)\n\tif err != nil {\n\t\tlogger.Errorln(err)\n\t\treturn nil, err\n\t}\n\n\tws.SetReadLimit(messageSizeLimit)\n\n\treturn &WebsocketConnection{ws, make(map[string]*Subscription), make(chan messenger.Message, messageBuffer), logger, o}, nil\n}\n\nfunc (c *WebsocketConnection) write(obj interface{}) error {\n\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\treturn c.ws.WriteJSON(obj)\n}\n\n\/\/Close the websocket connection\nfunc (c *WebsocketConnection) Close() {\n\tc.UnsubscribeAll()\n\tclose(c.c)\n\tc.ws.Close()\n\tc.logger.WithField(\"cmd\", \"close\").Debugln()\n}\n\n\/\/Insert a datapoint using the websocket\nfunc (c *WebsocketConnection) Insert(ws *websocketCommand) {\n\tlogger := c.logger.WithFields(log.Fields{\"cmd\": \"insert\", \"arg\": ws.Arg})\n\tlogger.Debugln(\"Inserting\", len(ws.D), \"dp\")\n\terr := c.o.InsertStream(ws.Arg, ws.D, true)\n\tif err != nil {\n\t\t\/\/TODO: Notify user of insert failure\n\t\tlogger.Warn(err.Error())\n\t} else {\n\t\tatomic.AddUint32(&restcore.StatsInserts, uint32(len(ws.D)))\n\t}\n}\n\n\/\/Subscribe to the given data stream\nfunc (c *WebsocketConnection) Subscribe(s, transform string) {\n\tlogger := c.logger.WithFields(log.Fields{\"cmd\": \"subscribe\", \"arg\": s})\n\n\t\/\/Next check if nats is subscribed\n\tif _, ok := c.subscriptions[s]; !ok {\n\t\tsubs, err := c.o.Subscribe(s, c.c)\n\t\tif err != nil {\n\t\t\tlogger.Warningln(err)\n\t\t} else {\n\n\t\t\tlogger.Debugln(\"Initializing subscription\")\n\t\t\tc.subscriptions[s] = NewSubscription(subs)\n\t\t}\n\t}\n\n\terr := c.subscriptions[s].AddTransform(transform)\n\tif err != nil {\n\t\tlogger.Warningln(err)\n\t}\n}\n\n\/\/Unsubscribe from the given data stream\nfunc (c *WebsocketConnection) Unsubscribe(s, transform string) {\n\tlogger := c.logger.WithFields(log.Fields{\"cmd\": \"unsubscribe\", \"arg\": s})\n\tif val, ok := c.subscriptions[s]; ok {\n\t\tval.RemTransform(transform)\n\t\tif val.Size() == 0 {\n\t\t\tlogger.Debugln(\"Full unsubscribe\")\n\t\t\tval.Close()\n\t\t\tdelete(c.subscriptions, s)\n\t\t} else {\n\t\t\tlogger.Debugln(\"transform subscription removed\")\n\t\t}\n\n\t} else {\n\t\tlogger.Warningln(\"subscription DNE\")\n\t}\n}\n\n\/\/UnsubscribeAll from all streams of data\nfunc (c *WebsocketConnection) UnsubscribeAll() {\n\tc.logger.WithField(\"cmd\", \"unsubscribeALL\").Debugln()\n\tfor _, val := range c.subscriptions {\n\t\tval.Close()\n\t}\n\tc.subscriptions = make(map[string]*Subscription)\n}\n\n\/\/A command is a cmd and the arg operation\ntype websocketCommand struct {\n\tCmd string\n\tArg string\n\tTransform string \/\/Allows subscribing with a transform\n\n\tD []datastream.Datapoint \/\/If the command is \"insert\", it needs an additional datapoint\n}\n\n\/\/RunReader runs the reading routine. It also maps the commands to actual subscriptions\nfunc (c *WebsocketConnection) RunReader(readmessenger chan string) {\n\n\t\/\/Set up the heartbeat reader(makes sure that sockets are alive)\n\tc.ws.SetReadDeadline(time.Now().Add(pongWait))\n\tc.ws.SetPongHandler(func(string) error {\n\t\t\/\/c.logger.WithField(\"cmd\", \"PingPong\").Debugln()\n\t\tc.ws.SetReadDeadline(time.Now().Add(pongWait))\n\t\treturn nil\n\t})\n\n\tvar cmd websocketCommand\n\tfor {\n\t\terr := c.ws.ReadJSON(&cmd)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\treadmessenger <- webSocketClosed\n\t\t\t\treturn \/\/On EOF, do nothing - it is just a close\n\t\t\t}\n\t\t\tc.logger.Warningln(err)\n\t\t\tbreak\n\t\t}\n\t\tswitch cmd.Cmd {\n\t\tdefault:\n\t\t\tc.logger.Warningln(\"Command not recognized:\", cmd.Cmd)\n\t\t\t\/\/Do nothing - the command is not recognized\n\t\tcase \"insert\":\n\t\t\tc.Insert(&cmd)\n\t\tcase \"subscribe\":\n\t\t\tc.Subscribe(cmd.Arg, cmd.Transform)\n\t\tcase \"unsubscribe\":\n\t\t\tc.Unsubscribe(cmd.Arg, cmd.Transform)\n\t\tcase \"unsubscribe_all\":\n\t\t\tc.UnsubscribeAll()\n\t\t}\n\t}\n\t\/\/Since the reader is exiting, notify the writer to send close message\n\treadmessenger <- webSocketClosedNonClean\n}\n\n\/\/RunWriter writes the subscription data as well as the heartbeat pings.\nfunc (c *WebsocketConnection) RunWriter(readmessenger chan string, exitchan chan bool) {\n\tticker := time.NewTicker(pingPeriod)\n\tdefer ticker.Stop()\nloop:\n\tfor {\n\t\tselect {\n\t\tcase dp, ok := <-c.c:\n\t\t\tif !ok {\n\t\t\t\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\t\tc.ws.WriteMessage(websocket.CloseMessage, []byte{})\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t\tc.logger.WithFields(log.Fields{\"cmd\": \"MSG\", \"arg\": dp.Stream}).Debugln()\n\n\t\t\t\/\/Now loop through all transforms for the datapoint array\n\t\t\tsubs, ok := c.subscriptions[dp.Stream]\n\t\t\tif ok {\n\t\t\t\tsubs.Lock()\n\t\t\t\tfor transform, tf := range subs.transform {\n\t\t\t\t\tif transform == \"\" {\n\t\t\t\t\t\tlog.Debugf(\"wrote (no transform)\")\n\t\t\t\t\t\tif err := c.write(dp); err != nil {\n\t\t\t\t\t\t\tbreak loop\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tdpa, err := query.TransformArray(tf, &dp.Data)\n\t\t\t\t\t\tlog.Debugf(\"Wrote: %s\", transform)\n\t\t\t\t\t\tif err == nil && dpa.Length() > 0 {\n\t\t\t\t\t\t\tif err := c.write(messenger.Message{\n\t\t\t\t\t\t\t\tStream: dp.Stream,\n\t\t\t\t\t\t\t\tTransform: transform,\n\t\t\t\t\t\t\t\tData: *dpa,\n\t\t\t\t\t\t\t}); err != nil {\n\t\t\t\t\t\t\t\tbreak loop\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t\tsubs.Unlock()\n\t\t\t}\n\t\t\tif err := c.write(dp); err != nil {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\tcase <-ticker.C:\n\t\t\t\/\/c.logger.WithField(\"cmd\", \"PING\").Debugln()\n\t\t\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tif err := c.ws.WriteMessage(websocket.PingMessage, []byte{}); err != nil {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\tcase msg := <-readmessenger:\n\t\t\tif msg == webSocketClosed {\n\t\t\t\tbreak loop\n\t\t\t} else if msg == webSocketClosedNonClean {\n\t\t\t\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\t\tc.ws.WriteMessage(websocket.CloseMessage, []byte{})\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tc.ws.WriteMessage(websocket.TextMessage, []byte(msg))\n\t\tcase <-restcore.ShutdownChannel:\n\t\t\trestcore.ShutdownChannel <- true\n\t\t\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tc.ws.WriteMessage(websocket.CloseMessage, []byte{})\n\t\t\tbreak loop\n\t\t}\n\t}\n\texitchan <- true\n}\n\n\/\/Run the websocket operations\nfunc (c *WebsocketConnection) Run() error {\n\tc.logger.Debugln(\"Running websocket...\")\n\twebsocketWaitGroup.Add(1)\n\n\t\/\/The reader can communicate with the writer through the channel\n\tmsgchn := make(chan string, 1)\n\texitchan := make(chan bool, 1)\n\tgo c.RunWriter(msgchn, exitchan)\n\tc.RunReader(msgchn)\n\t\/\/Wait for writer to exit, or for the exit timeout to happen\n\tgo func() {\n\t\ttime.Sleep(writeWait)\n\t\texitchan <- false\n\t}()\n\n\tif !<-exitchan {\n\t\tc.logger.Error(\"writer exit timeout\")\n\t}\n\twebsocketWaitGroup.Done()\n\treturn nil\n}\n\n\/\/RunWebsocket runs the websocket handler\nfunc RunWebsocket(o operator.Operator, writer http.ResponseWriter, request *http.Request, logger *log.Entry) (int, string) {\n\tconn, err := NewWebsocketConnection(o, writer, request, logger)\n\tif err != nil {\n\t\twriter.WriteHeader(http.StatusInternalServerError)\n\t\treturn 3, err.Error()\n\t}\n\tdefer conn.Close()\n\terr = conn.Run()\n\tif err != nil {\n\t\treturn 2, err.Error()\n\t}\n\treturn 0, \"\"\n}\n<commit_msg>Small fixes to websocket<commit_after>package rest\n\nimport (\n\t\"connectordb\/plugins\/rest\/restcore\"\n\t\"connectordb\/streamdb\/datastream\"\n\t\"connectordb\/streamdb\/operator\"\n\t\"connectordb\/streamdb\/operator\/messenger\"\n\t\"connectordb\/streamdb\/query\"\n\t\"connectordb\/streamdb\/query\/transforms\"\n\t\"errors\"\n\t\"io\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/nats-io\/nats\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n)\n\nconst (\n\t\/\/The max size of a websocket message\n\tmessageSizeLimit = 1 * restcore.Mb\n\n\t\/\/The time allowed to write a message\n\twriteWait = 2 * time.Second\n\n\t\/\/Ping pong stuff - making sure that the connection still exists\n\tpongWait = 60 * time.Second\n\tpingPeriod = (pongWait * 9) \/ 10\n\n\t\/\/The number of messages to buffer\n\tmessageBuffer = 3\n\n\twebSocketClosed = \"EXIT\"\n\twebSocketClosedNonClean = \"@EXIT\"\n)\n\n\/\/The websocket upgrader\nvar (\n\tupgrader = websocket.Upgrader{\n\t\tReadBufferSize: 1024,\n\t\tWriteBufferSize: 1024,\n\t\t\/\/ Allow from all origins\n\t\tCheckOrigin: func(r *http.Request) bool { return true },\n\t}\n\n\t\/\/websocketWaitGroup is the WaitGroup of websockets that are currently open\n\twebsocketWaitGroup = sync.WaitGroup{}\n)\n\ntype Subscription struct {\n\tsync.Mutex \/\/The transform mutex\n\n\tnats *nats.Subscription \/\/The nats subscription\n\n\ttransform map[string]transforms.DatapointTransform \/\/the transforms associated with the subscription - this allows us to run transforms on the data!\n}\n\nfunc NewSubscription(subs *nats.Subscription) *Subscription {\n\treturn &Subscription{\n\t\tnats: subs,\n\t\ttransform: make(map[string]transforms.DatapointTransform),\n\t}\n}\n\n\/\/Close shuts down the subscription\nfunc (s *Subscription) Close() {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.nats.Unsubscribe()\n}\n\n\/\/Size is the number of subscriptions to the stream (using different transforms)\nfunc (s *Subscription) Size() int {\n\ts.Lock()\n\tdefer s.Unlock()\n\treturn len(s.transform)\n}\n\n\/\/Add a transform subscription to the string\nfunc (s *Subscription) AddTransform(transform string) (err error) {\n\tif _, ok := s.transform[transform]; ok {\n\t\treturn errors.New(\"Subscription to the transform already exists\")\n\t}\n\n\t\/\/First, attempt to generate the transform\n\tvar t transforms.DatapointTransform\n\tif transform != \"\" {\n\t\tt, err = transforms.NewTransformPipeline(transform)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ts.Lock()\n\ts.transform[transform] = t\n\ts.Unlock()\n\n\treturn nil\n}\n\n\/\/RemTransform deletes a transform from the subscriptions\nfunc (s *Subscription) RemTransform(transform string) (err error) {\n\ts.Lock()\n\tdelete(s.transform, transform)\n\ts.Unlock()\n\treturn nil\n}\n\n\/\/WebsocketConnection is the general connection with a websocket that is run.\n\/\/Loosely based on github.com\/gorilla\/websocket\/blob\/master\/examples\/chat\/conn.go\n\/\/No need for mutex because only reader reads and implements commands\ntype WebsocketConnection struct {\n\tws *websocket.Conn\n\n\tsubscriptions map[string]*Subscription\n\n\tc chan messenger.Message\n\n\tlogger *log.Entry \/\/logrus uses a mutex internally\n\to operator.Operator\n}\n\n\/\/NewWebsocketConnection creates a new websocket connection based on the operators and stuff\nfunc NewWebsocketConnection(o operator.Operator, writer http.ResponseWriter, request *http.Request, logger *log.Entry) (*WebsocketConnection, error) {\n\n\tws, err := upgrader.Upgrade(writer, request, nil)\n\tif err != nil {\n\t\tlogger.Errorln(err)\n\t\treturn nil, err\n\t}\n\n\tws.SetReadLimit(messageSizeLimit)\n\n\treturn &WebsocketConnection{ws, make(map[string]*Subscription), make(chan messenger.Message, messageBuffer), logger, o}, nil\n}\n\nfunc (c *WebsocketConnection) write(obj interface{}) error {\n\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\treturn c.ws.WriteJSON(obj)\n}\n\n\/\/Close the websocket connection\nfunc (c *WebsocketConnection) Close() {\n\tc.UnsubscribeAll()\n\tclose(c.c)\n\tc.ws.Close()\n\tc.logger.WithField(\"cmd\", \"close\").Debugln()\n}\n\n\/\/Insert a datapoint using the websocket\nfunc (c *WebsocketConnection) Insert(ws *websocketCommand) {\n\tlogger := c.logger.WithFields(log.Fields{\"cmd\": \"insert\", \"arg\": ws.Arg})\n\tlogger.Debugln(\"-> insert \", len(ws.D), \"dp\")\n\terr := c.o.InsertStream(ws.Arg, ws.D, true)\n\tif err != nil {\n\t\t\/\/TODO: Notify user of insert failure\n\t\tlogger.Warn(err.Error())\n\t} else {\n\t\tatomic.AddUint32(&restcore.StatsInserts, uint32(len(ws.D)))\n\t}\n}\n\n\/\/Subscribe to the given data stream\nfunc (c *WebsocketConnection) Subscribe(s, transform string) {\n\tlogger := c.logger.WithFields(log.Fields{\"cmd\": \"subscribe\", \"arg\": s})\n\n\t\/\/Next check if nats is subscribed\n\tif _, ok := c.subscriptions[s]; !ok {\n\t\tsubs, err := c.o.Subscribe(s, c.c)\n\t\tif err != nil {\n\t\t\tlogger.Warningln(err)\n\t\t} else {\n\n\t\t\tlogger.Debugln(\"Initializing subscription\")\n\t\t\tc.subscriptions[s] = NewSubscription(subs)\n\t\t}\n\t}\n\n\terr := c.subscriptions[s].AddTransform(transform)\n\tif err != nil {\n\t\tlogger.Warningln(err)\n\t}\n}\n\n\/\/Unsubscribe from the given data stream\nfunc (c *WebsocketConnection) Unsubscribe(s, transform string) {\n\tlogger := c.logger.WithFields(log.Fields{\"cmd\": \"unsubscribe\", \"arg\": s})\n\tif val, ok := c.subscriptions[s]; ok {\n\t\tval.RemTransform(transform)\n\t\tif val.Size() == 0 {\n\t\t\tlogger.Debugln(\"stop subscription\")\n\t\t\tval.Close()\n\t\t\tdelete(c.subscriptions, s)\n\t\t} else {\n\t\t\tlogger.Debugln()\n\t\t}\n\n\t} else {\n\t\tlogger.Warningln(\"subscription DNE\")\n\t}\n}\n\n\/\/UnsubscribeAll from all streams of data\nfunc (c *WebsocketConnection) UnsubscribeAll() {\n\tfor key, val := range c.subscriptions {\n\t\tc.logger.Debugf(\"Unsubscribe: %s\", key)\n\t\tval.Close()\n\t}\n\tc.subscriptions = make(map[string]*Subscription)\n}\n\n\/\/A command is a cmd and the arg operation\ntype websocketCommand struct {\n\tCmd string\n\tArg string\n\tTransform string \/\/Allows subscribing with a transform\n\n\tD []datastream.Datapoint \/\/If the command is \"insert\", it needs an additional datapoint\n}\n\n\/\/RunReader runs the reading routine. It also maps the commands to actual subscriptions\nfunc (c *WebsocketConnection) RunReader(readmessenger chan string) {\n\n\t\/\/Set up the heartbeat reader(makes sure that sockets are alive)\n\tc.ws.SetReadDeadline(time.Now().Add(pongWait))\n\tc.ws.SetPongHandler(func(string) error {\n\t\t\/\/c.logger.WithField(\"cmd\", \"PingPong\").Debugln()\n\t\tc.ws.SetReadDeadline(time.Now().Add(pongWait))\n\t\treturn nil\n\t})\n\n\tvar cmd websocketCommand\n\tfor {\n\t\terr := c.ws.ReadJSON(&cmd)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\treadmessenger <- webSocketClosed\n\t\t\t\treturn \/\/On EOF, do nothing - it is just a close\n\t\t\t}\n\t\t\tc.logger.Warningln(err)\n\t\t\tbreak\n\t\t}\n\t\tswitch cmd.Cmd {\n\t\tdefault:\n\t\t\tc.logger.Warningln(\"Command not recognized:\", cmd.Cmd)\n\t\t\t\/\/Do nothing - the command is not recognized\n\t\tcase \"insert\":\n\t\t\tc.Insert(&cmd)\n\t\tcase \"subscribe\":\n\t\t\tc.Subscribe(cmd.Arg, cmd.Transform)\n\t\tcase \"unsubscribe\":\n\t\t\tc.Unsubscribe(cmd.Arg, cmd.Transform)\n\t\tcase \"unsubscribe_all\":\n\t\t\tc.UnsubscribeAll()\n\t\t}\n\t}\n\t\/\/Since the reader is exiting, notify the writer to send close message\n\treadmessenger <- webSocketClosedNonClean\n}\n\n\/\/RunWriter writes the subscription data as well as the heartbeat pings.\nfunc (c *WebsocketConnection) RunWriter(readmessenger chan string, exitchan chan bool) {\n\tticker := time.NewTicker(pingPeriod)\n\tdefer ticker.Stop()\nloop:\n\tfor {\n\t\tselect {\n\t\tcase dp, ok := <-c.c:\n\t\t\tif !ok {\n\t\t\t\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\t\tc.ws.WriteMessage(websocket.CloseMessage, []byte{})\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t\tlogger := c.logger.WithFields(log.Fields{\"stream\": dp.Stream})\n\n\t\t\t\/\/Now loop through all transforms for the datapoint array\n\t\t\tsubs, ok := c.subscriptions[dp.Stream]\n\t\t\tif ok {\n\t\t\t\tsubs.Lock()\n\t\t\t\tfor transform, tf := range subs.transform {\n\t\t\t\t\tif transform == \"\" {\n\t\t\t\t\t\tlogger.Debugln(\"<- send\")\n\t\t\t\t\t\tif err := c.write(dp); err != nil {\n\t\t\t\t\t\t\tbreak loop\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tdpa, err := query.TransformArray(tf, &dp.Data)\n\t\t\t\t\t\tlogger.Debugf(\"<- send %s\", transform)\n\t\t\t\t\t\tif err == nil && dpa.Length() > 0 {\n\t\t\t\t\t\t\tif err := c.write(messenger.Message{\n\t\t\t\t\t\t\t\tStream: dp.Stream,\n\t\t\t\t\t\t\t\tTransform: transform,\n\t\t\t\t\t\t\t\tData: *dpa,\n\t\t\t\t\t\t\t}); err != nil {\n\t\t\t\t\t\t\t\tbreak loop\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t\tsubs.Unlock()\n\t\t\t}\n\t\t\tif err := c.write(dp); err != nil {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\tcase <-ticker.C:\n\t\t\t\/\/c.logger.WithField(\"cmd\", \"PING\").Debugln()\n\t\t\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tif err := c.ws.WriteMessage(websocket.PingMessage, []byte{}); err != nil {\n\t\t\t\tbreak loop\n\t\t\t}\n\t\tcase msg := <-readmessenger:\n\t\t\tif msg == webSocketClosed {\n\t\t\t\tbreak loop\n\t\t\t} else if msg == webSocketClosedNonClean {\n\t\t\t\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\t\tc.ws.WriteMessage(websocket.CloseMessage, []byte{})\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tc.ws.WriteMessage(websocket.TextMessage, []byte(msg))\n\t\tcase <-restcore.ShutdownChannel:\n\t\t\trestcore.ShutdownChannel <- true\n\t\t\tc.ws.SetWriteDeadline(time.Now().Add(writeWait))\n\t\t\tc.ws.WriteMessage(websocket.CloseMessage, []byte{})\n\t\t\tbreak loop\n\t\t}\n\t}\n\texitchan <- true\n}\n\n\/\/Run the websocket operations\nfunc (c *WebsocketConnection) Run() error {\n\tc.logger.Debugln(\"Running websocket...\")\n\twebsocketWaitGroup.Add(1)\n\n\t\/\/The reader can communicate with the writer through the channel\n\tmsgchn := make(chan string, 1)\n\texitchan := make(chan bool, 1)\n\tgo c.RunWriter(msgchn, exitchan)\n\tc.RunReader(msgchn)\n\t\/\/Wait for writer to exit, or for the exit timeout to happen\n\tgo func() {\n\t\ttime.Sleep(writeWait)\n\t\texitchan <- false\n\t}()\n\n\tif !<-exitchan {\n\t\tc.logger.Error(\"writer exit timeout\")\n\t}\n\twebsocketWaitGroup.Done()\n\treturn nil\n}\n\n\/\/RunWebsocket runs the websocket handler\nfunc RunWebsocket(o operator.Operator, writer http.ResponseWriter, request *http.Request, logger *log.Entry) (int, string) {\n\tconn, err := NewWebsocketConnection(o, writer, request, logger)\n\tif err != nil {\n\t\twriter.WriteHeader(http.StatusInternalServerError)\n\t\treturn 3, err.Error()\n\t}\n\tdefer conn.Close()\n\terr = conn.Run()\n\tif err != nil {\n\t\treturn 2, err.Error()\n\t}\n\treturn 0, \"Websocket closed\"\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ errorcheck\n\n\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Issue 4468: go\/defer calls may not be parenthesized.\n\npackage p\n\nfunc F() {\n\tgo (F())\t\/\/ ERROR \"must be function call\"\n\tdefer (F())\t\/\/ ERROR \"must be function call\"\n}\n<commit_msg>test: add some tests where go\/defer arg starts with parenthesis<commit_after>\/\/ errorcheck\n\n\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Issue 4468: go\/defer calls may not be parenthesized.\n\npackage p\n\ntype T int\n\nfunc (t *T) F() T {\n\treturn *t\n}\n\ntype S struct {\n\tt T\n}\n\nfunc F() {\n\tgo (F())\t\/\/ ERROR \"must be function call\"\n\tdefer (F())\t\/\/ ERROR \"must be function call\"\n\tvar s S\n\t(&s.t).F()\n\tgo (&s.t).F()\n\tdefer (&s.t).F()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Mesosphere, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage agent\n\nimport (\n\t\"time\"\n\n\t\"github.com\/dcos\/dcos-metrics\/producers\"\n)\n\nconst (\n\t\/\/ Container tagging constants\n\tCONTAINERID = \"container_id\"\n\tSOURCE = \"source\"\n\tFRAMEWORKID = \"framework_id\"\n\tEXECUTORID = \"executor_id\"\n\tEXECUTORNAME = \"executor_name\"\n\n\t\/\/ Container unit constants\n\tSECONDS = \"seconds\"\n\tCOUNT = \"count\"\n\tBYTES = \"bytes\"\n)\n\nfunc (c *Collector) createContainerDatapoints(container agentContainer) []producers.Datapoint {\n\tts := thisTime()\n\tdps := []producers.Datapoint{}\n\n\tdpTags := map[string]string{\n\t\tCONTAINERID: container.ContainerID,\n\t\tSOURCE: container.Source,\n\t\tFRAMEWORKID: container.FrameworkID,\n\t\tEXECUTORID: container.ExecutorID,\n\t\tEXECUTORNAME: container.ExecutorName,\n\t}\n\n\tc.log.Debugf(\"Adding tags for container %s:\\n%+v\", container.ContainerID, dpTags)\n\n\taddDps := []producers.Datapoint{\n\t\tproducers.Datapoint{\n\t\t\tName: \"cpus.user.time\",\n\t\t\tValue: container.Statistics.CpusUserTimeSecs,\n\t\t\tUnit: SECONDS,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"cpus.system.time\",\n\t\t\tValue: container.Statistics.CpusSystemTimeSecs,\n\t\t\tUnit: SECONDS,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"cpus.limit\",\n\t\t\tValue: container.Statistics.CpusLimit,\n\t\t\tUnit: COUNT,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"cpus.throttled.time\",\n\t\t\tValue: container.Statistics.CpusThrottledTimeSecs,\n\t\t\tUnit: SECONDS,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"mem.total\",\n\t\t\tValue: container.Statistics.MemTotalBytes,\n\t\t\tUnit: BYTES,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"mem.limit\",\n\t\t\tValue: container.Statistics.MemLimitBytes,\n\t\t\tUnit: BYTES,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"disk.limit\",\n\t\t\tValue: container.Statistics.DiskLimitBytes,\n\t\t\tUnit: BYTES,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"disk.used\",\n\t\t\tValue: container.Statistics.DiskUsedBytes,\n\t\t\tUnit: BYTES,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"net.rx.packets\",\n\t\t\tValue: container.Statistics.NetRxPackets,\n\t\t\tUnit: COUNT,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"net.rx.bytes\",\n\t\t\tValue: container.Statistics.NetRxBytes,\n\t\t\tUnit: BYTES,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"net.rx.errors\",\n\t\t\tValue: container.Statistics.NetRxErrors,\n\t\t\tUnit: COUNT,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"net.rx.dropped\",\n\t\t\tValue: container.Statistics.NetRxDropped,\n\t\t\tUnit: COUNT,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"net.tx.packets\",\n\t\t\tValue: container.Statistics.NetRxPackets,\n\t\t\tUnit: COUNT,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"net.tx.bytes\",\n\t\t\tValue: container.Statistics.NetRxBytes,\n\t\t\tUnit: BYTES,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"net.tx.errors\",\n\t\t\tValue: container.Statistics.NetRxErrors,\n\t\t\tUnit: COUNT,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"net.tx.dropped\",\n\t\t\tValue: container.Statistics.NetRxDropped,\n\t\t\tUnit: COUNT,\n\t\t\tTags: dpTags,\n\t\t},\n\t}\n\n\tfor _, dp := range addDps {\n\t\tdp.Timestamp = ts\n\t\tdps = append(dps, dp)\n\t}\n\n\treturn dps\n}\n\n\/\/ -- helpers\n\nfunc thisTime() string {\n\treturn time.Now().UTC().Format(time.RFC3339Nano)\n}\n<commit_msg>Fixed linter issues reported for collectors\/mesos\/agent\/metrics.go.<commit_after>\/\/ Copyright 2016 Mesosphere, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage agent\n\nimport (\n\t\"time\"\n\n\t\"github.com\/dcos\/dcos-metrics\/producers\"\n)\n\nconst (\n\t\/\/ Container tagging constants\n\n\tcontainerID = \"container_id\"\n\tsource = \"source\"\n\tframeworkID = \"framework_id\"\n\texecutorID = \"executor_id\"\n\texecutorName = \"executor_name\"\n\n\t\/\/ Container unit constants\n\tseconds = \"seconds\"\n\tcount = \"count\"\n\tbytes = \"bytes\"\n)\n\nfunc (c *Collector) createContainerDatapoints(container agentContainer) []producers.Datapoint {\n\tts := thisTime()\n\tdps := []producers.Datapoint{}\n\n\tdpTags := map[string]string{\n\t\tcontainerID: container.ContainerID,\n\t\tsource: container.Source,\n\t\tframeworkID: container.FrameworkID,\n\t\texecutorID: container.ExecutorID,\n\t\texecutorName: container.ExecutorName,\n\t}\n\n\tc.log.Debugf(\"Adding tags for container %s:\\n%+v\", container.ContainerID, dpTags)\n\n\taddDps := []producers.Datapoint{\n\t\tproducers.Datapoint{\n\t\t\tName: \"cpus.user.time\",\n\t\t\tValue: container.Statistics.CpusUserTimeSecs,\n\t\t\tUnit: seconds,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"cpus.system.time\",\n\t\t\tValue: container.Statistics.CpusSystemTimeSecs,\n\t\t\tUnit: seconds,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"cpus.limit\",\n\t\t\tValue: container.Statistics.CpusLimit,\n\t\t\tUnit: count,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"cpus.throttled.time\",\n\t\t\tValue: container.Statistics.CpusThrottledTimeSecs,\n\t\t\tUnit: seconds,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"mem.total\",\n\t\t\tValue: container.Statistics.MemTotalBytes,\n\t\t\tUnit: bytes,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"mem.limit\",\n\t\t\tValue: container.Statistics.MemLimitBytes,\n\t\t\tUnit: bytes,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"disk.limit\",\n\t\t\tValue: container.Statistics.DiskLimitBytes,\n\t\t\tUnit: bytes,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"disk.used\",\n\t\t\tValue: container.Statistics.DiskUsedBytes,\n\t\t\tUnit: bytes,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"net.rx.packets\",\n\t\t\tValue: container.Statistics.NetRxPackets,\n\t\t\tUnit: count,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"net.rx.bytes\",\n\t\t\tValue: container.Statistics.NetRxBytes,\n\t\t\tUnit: bytes,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"net.rx.errors\",\n\t\t\tValue: container.Statistics.NetRxErrors,\n\t\t\tUnit: count,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"net.rx.dropped\",\n\t\t\tValue: container.Statistics.NetRxDropped,\n\t\t\tUnit: count,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"net.tx.packets\",\n\t\t\tValue: container.Statistics.NetRxPackets,\n\t\t\tUnit: count,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"net.tx.bytes\",\n\t\t\tValue: container.Statistics.NetRxBytes,\n\t\t\tUnit: bytes,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"net.tx.errors\",\n\t\t\tValue: container.Statistics.NetRxErrors,\n\t\t\tUnit: count,\n\t\t\tTags: dpTags,\n\t\t},\n\t\tproducers.Datapoint{\n\t\t\tName: \"net.tx.dropped\",\n\t\t\tValue: container.Statistics.NetRxDropped,\n\t\t\tUnit: count,\n\t\t\tTags: dpTags,\n\t\t},\n\t}\n\n\tfor _, dp := range addDps {\n\t\tdp.Timestamp = ts\n\t\tdps = append(dps, dp)\n\t}\n\n\treturn dps\n}\n\n\/\/ -- helpers\n\nfunc thisTime() string {\n\treturn time.Now().UTC().Format(time.RFC3339Nano)\n}\n<|endoftext|>"} {"text":"<commit_before>package client\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"github.com\/Clever\/inter-service-api-testing\/codegen-poc\/generated\/models\"\n\t\"golang.org\/x\/net\/context\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar _ = json.Marshal\nvar _ = strings.Replace\n\nvar _ = strconv.FormatInt\n\ntype Client struct {\n\tBasePath string\n\trequestDoer doer\n\ttransport *http.Transport\n\t\/\/ Keep the retry doer around so that we can set the number of retries\n\tretryDoer retryDoer\n}\n\n\/\/ NewClient creates a new client. The base path and http transport are configurable\nfunc NewClient(basePath string) Client {\n\tbase := baseDoer{}\n\ttracing := tracingDoer{d: base}\n\tretry := retryDoer{d: tracing, defaultRetries: 1}\n\n\treturn Client{requestDoer: retry, retryDoer: retry, transport: nil, BasePath: basePath}\n}\n\nfunc (c Client) WithRetries(retries int) Client {\n\tc.retryDoer.defaultRetries = retries\n\treturn c\n}\n\nfunc (c Client) GetBooks(ctx context.Context, i *models.GetBooksInput) (models.GetBooksOutput, error) {\n\tpath := c.BasePath + \"\/v1\/books\"\n\turlVals := url.Values{}\n\tvar body []byte\n\n\turlVals.Add(\"author\", i.Author)\n\turlVals.Add(\"available\", strconv.FormatBool(i.Available))\n\turlVals.Add(\"maxPages\", strconv.FormatFloat(i.MaxPages, 'E', -1, 64))\n\tpath = path + \"?\" + urlVals.Encode()\n\n\tclient := &http.Client{Transport: c.transport}\n\treq, _ := http.NewRequest(\"GET\", path, bytes.NewBuffer(body))\n\n\t\/\/ Add the opname for doers like tracing\n\tctx = context.WithValue(ctx, opNameCtx{}, \"getBooks\")\n\tresp, err := c.requestDoer.Do(ctx, client, req)\n\tif err != nil {\n\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t}\n\tswitch resp.StatusCode {\n\tcase 200:\n\n\t\tvar output models.GetBooks200Output\n\t\tif err := json.NewDecoder(resp.Body).Decode(&output); err != nil {\n\t\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t\t}\n\t\treturn output, nil\n\tcase 400:\n\n\t\tvar output models.DefaultBadRequest\n\t\tif err := json.NewDecoder(resp.Body).Decode(&output); err != nil {\n\t\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t\t}\n\t\treturn nil, output\n\tcase 500:\n\n\t\tvar output models.DefaultInternalError\n\t\tif err := json.NewDecoder(resp.Body).Decode(&output); err != nil {\n\t\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t\t}\n\t\treturn nil, output\n\tdefault:\n\t\treturn nil, models.DefaultInternalError{Msg: \"Unknown response\"}\n\t}\n}\n\nfunc (c Client) GetBookByID(ctx context.Context, i *models.GetBookByIDInput) (models.GetBookByIDOutput, error) {\n\tpath := c.BasePath + \"\/v1\/books\/{bookID}\"\n\turlVals := url.Values{}\n\tvar body []byte\n\n\tpath = strings.Replace(path, \"{bookID}\", strconv.FormatInt(i.BookID, 10), -1)\n\tpath = path + \"?\" + urlVals.Encode()\n\n\tclient := &http.Client{Transport: c.transport}\n\treq, _ := http.NewRequest(\"GET\", path, bytes.NewBuffer(body))\n\treq.Header.Set(\"authorization\", i.Authorization)\n\n\t\/\/ Add the opname for doers like tracing\n\tctx = context.WithValue(ctx, opNameCtx{}, \"getBookByID\")\n\tresp, err := c.requestDoer.Do(ctx, client, req)\n\tif err != nil {\n\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t}\n\tswitch resp.StatusCode {\n\tcase 200:\n\n\t\tvar output models.GetBookByID200Output\n\t\tif err := json.NewDecoder(resp.Body).Decode(&output); err != nil {\n\t\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t\t}\n\t\treturn output, nil\n\tcase 204:\n\t\tvar output models.GetBookByID204Output\n\t\treturn output, nil\n\tcase 401:\n\t\tvar output models.GetBookByID401Output\n\t\treturn nil, output\n\tcase 404:\n\t\treturn nil, models.GetBookByID404Output{}\n\tcase 400:\n\n\t\tvar output models.DefaultBadRequest\n\t\tif err := json.NewDecoder(resp.Body).Decode(&output); err != nil {\n\t\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t\t}\n\t\treturn nil, output\n\tcase 500:\n\n\t\tvar output models.DefaultInternalError\n\t\tif err := json.NewDecoder(resp.Body).Decode(&output); err != nil {\n\t\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t\t}\n\t\treturn nil, output\n\tdefault:\n\t\treturn nil, models.DefaultInternalError{Msg: \"Unknown response\"}\n\t}\n}\n\nfunc (c Client) CreateBook(ctx context.Context, i *models.CreateBookInput) (models.CreateBookOutput, error) {\n\tpath := c.BasePath + \"\/v1\/books\/{bookID}\"\n\turlVals := url.Values{}\n\tvar body []byte\n\n\tpath = path + \"?\" + urlVals.Encode()\n\n\tbody, _ = json.Marshal(i.NewBook)\n\n\tclient := &http.Client{Transport: c.transport}\n\treq, _ := http.NewRequest(\"POST\", path, bytes.NewBuffer(body))\n\n\t\/\/ Add the opname for doers like tracing\n\tctx = context.WithValue(ctx, opNameCtx{}, \"createBook\")\n\tresp, err := c.requestDoer.Do(ctx, client, req)\n\tif err != nil {\n\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t}\n\tswitch resp.StatusCode {\n\tcase 200:\n\n\t\tvar output models.CreateBook200Output\n\t\tif err := json.NewDecoder(resp.Body).Decode(&output); err != nil {\n\t\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t\t}\n\t\treturn output, nil\n\tcase 400:\n\n\t\tvar output models.DefaultBadRequest\n\t\tif err := json.NewDecoder(resp.Body).Decode(&output); err != nil {\n\t\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t\t}\n\t\treturn nil, output\n\tcase 500:\n\n\t\tvar output models.DefaultInternalError\n\t\tif err := json.NewDecoder(resp.Body).Decode(&output); err != nil {\n\t\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t\t}\n\t\treturn nil, output\n\tdefault:\n\t\treturn nil, models.DefaultInternalError{Msg: \"Unknown response\"}\n\t}\n}\n<commit_msg>Just client.New not client.NewClient<commit_after>package client\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/Clever\/inter-service-api-testing\/codegen-poc\/generated\/models\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nvar _ = json.Marshal\nvar _ = strings.Replace\n\nvar _ = strconv.FormatInt\n\ntype Client struct {\n\tBasePath string\n\trequestDoer doer\n\ttransport *http.Transport\n\t\/\/ Keep the retry doer around so that we can set the number of retries\n\tretryDoer retryDoer\n}\n\n\/\/ New creates a new client. The base path and http transport are configurable\nfunc New(basePath string) Client {\n\tbase := baseDoer{}\n\ttracing := tracingDoer{d: base}\n\tretry := retryDoer{d: tracing, defaultRetries: 1}\n\n\treturn Client{requestDoer: retry, retryDoer: retry, transport: nil, BasePath: basePath}\n}\n\nfunc (c Client) WithRetries(retries int) Client {\n\tc.retryDoer.defaultRetries = retries\n\treturn c\n}\n\nfunc (c Client) GetBooks(ctx context.Context, i *models.GetBooksInput) (models.GetBooksOutput, error) {\n\tpath := c.BasePath + \"\/v1\/books\"\n\turlVals := url.Values{}\n\tvar body []byte\n\n\turlVals.Add(\"author\", i.Author)\n\turlVals.Add(\"available\", strconv.FormatBool(i.Available))\n\turlVals.Add(\"maxPages\", strconv.FormatFloat(i.MaxPages, 'E', -1, 64))\n\tpath = path + \"?\" + urlVals.Encode()\n\n\tclient := &http.Client{Transport: c.transport}\n\treq, _ := http.NewRequest(\"GET\", path, bytes.NewBuffer(body))\n\n\t\/\/ Add the opname for doers like tracing\n\tctx = context.WithValue(ctx, opNameCtx{}, \"getBooks\")\n\tresp, err := c.requestDoer.Do(ctx, client, req)\n\tif err != nil {\n\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t}\n\tswitch resp.StatusCode {\n\tcase 200:\n\n\t\tvar output models.GetBooks200Output\n\t\tif err := json.NewDecoder(resp.Body).Decode(&output); err != nil {\n\t\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t\t}\n\t\treturn output, nil\n\tcase 400:\n\n\t\tvar output models.DefaultBadRequest\n\t\tif err := json.NewDecoder(resp.Body).Decode(&output); err != nil {\n\t\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t\t}\n\t\treturn nil, output\n\tcase 500:\n\n\t\tvar output models.DefaultInternalError\n\t\tif err := json.NewDecoder(resp.Body).Decode(&output); err != nil {\n\t\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t\t}\n\t\treturn nil, output\n\tdefault:\n\t\treturn nil, models.DefaultInternalError{Msg: \"Unknown response\"}\n\t}\n}\n\nfunc (c Client) GetBookByID(ctx context.Context, i *models.GetBookByIDInput) (models.GetBookByIDOutput, error) {\n\tpath := c.BasePath + \"\/v1\/books\/{bookID}\"\n\turlVals := url.Values{}\n\tvar body []byte\n\n\tpath = strings.Replace(path, \"{bookID}\", strconv.FormatInt(i.BookID, 10), -1)\n\tpath = path + \"?\" + urlVals.Encode()\n\n\tclient := &http.Client{Transport: c.transport}\n\treq, _ := http.NewRequest(\"GET\", path, bytes.NewBuffer(body))\n\treq.Header.Set(\"authorization\", i.Authorization)\n\n\t\/\/ Add the opname for doers like tracing\n\tctx = context.WithValue(ctx, opNameCtx{}, \"getBookByID\")\n\tresp, err := c.requestDoer.Do(ctx, client, req)\n\tif err != nil {\n\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t}\n\tswitch resp.StatusCode {\n\tcase 200:\n\n\t\tvar output models.GetBookByID200Output\n\t\tif err := json.NewDecoder(resp.Body).Decode(&output); err != nil {\n\t\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t\t}\n\t\treturn output, nil\n\tcase 204:\n\t\tvar output models.GetBookByID204Output\n\t\treturn output, nil\n\tcase 401:\n\t\tvar output models.GetBookByID401Output\n\t\treturn nil, output\n\tcase 404:\n\t\treturn nil, models.GetBookByID404Output{}\n\tcase 400:\n\n\t\tvar output models.DefaultBadRequest\n\t\tif err := json.NewDecoder(resp.Body).Decode(&output); err != nil {\n\t\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t\t}\n\t\treturn nil, output\n\tcase 500:\n\n\t\tvar output models.DefaultInternalError\n\t\tif err := json.NewDecoder(resp.Body).Decode(&output); err != nil {\n\t\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t\t}\n\t\treturn nil, output\n\tdefault:\n\t\treturn nil, models.DefaultInternalError{Msg: \"Unknown response\"}\n\t}\n}\n\nfunc (c Client) CreateBook(ctx context.Context, i *models.CreateBookInput) (models.CreateBookOutput, error) {\n\tpath := c.BasePath + \"\/v1\/books\/{bookID}\"\n\turlVals := url.Values{}\n\tvar body []byte\n\n\tpath = path + \"?\" + urlVals.Encode()\n\n\tbody, _ = json.Marshal(i.NewBook)\n\n\tclient := &http.Client{Transport: c.transport}\n\treq, _ := http.NewRequest(\"POST\", path, bytes.NewBuffer(body))\n\n\t\/\/ Add the opname for doers like tracing\n\tctx = context.WithValue(ctx, opNameCtx{}, \"createBook\")\n\tresp, err := c.requestDoer.Do(ctx, client, req)\n\tif err != nil {\n\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t}\n\tswitch resp.StatusCode {\n\tcase 200:\n\n\t\tvar output models.CreateBook200Output\n\t\tif err := json.NewDecoder(resp.Body).Decode(&output); err != nil {\n\t\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t\t}\n\t\treturn output, nil\n\tcase 400:\n\n\t\tvar output models.DefaultBadRequest\n\t\tif err := json.NewDecoder(resp.Body).Decode(&output); err != nil {\n\t\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t\t}\n\t\treturn nil, output\n\tcase 500:\n\n\t\tvar output models.DefaultInternalError\n\t\tif err := json.NewDecoder(resp.Body).Decode(&output); err != nil {\n\t\t\treturn nil, models.DefaultInternalError{Msg: err.Error()}\n\t\t}\n\t\treturn nil, output\n\tdefault:\n\t\treturn nil, models.DefaultInternalError{Msg: \"Unknown response\"}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package irc\n\nimport (\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc (irc *Connection) AddCallback(eventcode string, callback func(*Event)) {\n\teventcode = strings.ToUpper(eventcode)\n\n\tif _, ok := irc.events[eventcode]; ok {\n\t\tirc.events[eventcode] = append(irc.events[eventcode], callback)\n\n\t} else {\n\t\tirc.events[eventcode] = make([]func(*Event), 1)\n\t\tirc.events[eventcode][0] = callback\n\t}\n}\n\nfunc (irc *Connection) ReplaceCallback(eventcode string, i int, callback func(*Event)) {\n\teventcode = strings.ToUpper(eventcode)\n\n\tif event, ok := irc.events[eventcode]; ok {\n\t\tif i < len(event) {\n\t\t\tevent[i] = callback\n\t\t\treturn\n\t\t}\n\t\tirc.Log.Printf(\"Event found, but no callback found at index %d. Use AddCallback\\n\", i)\n\t\treturn\n\t}\n\tirc.Log.Printf(\"Event not found. Use AddCallBack\\n\")\n}\n\nfunc (irc *Connection) RunCallbacks(event *Event) {\n\tif event.Code == \"PRIVMSG\" && len(event.Message) > 0 && event.Message[0] == '\\x01' {\n\t\tevent.Code = \"CTCP\" \/\/Unknown CTCP\n\n\t\tif i := strings.LastIndex(event.Message, \"\\x01\"); i > -1 {\n\t\t\tevent.Message = event.Message[1:i]\n\t\t}\n\n\t\tif event.Message == \"VERSION\" {\n\t\t\tevent.Code = \"CTCP_VERSION\"\n\n\t\t} else if event.Message == \"TIME\" {\n\t\t\tevent.Code = \"CTCP_TIME\"\n\n\t\t} else if event.Message[0:4] == \"PING\" {\n\t\t\tevent.Code = \"CTCP_PING\"\n\n\t\t} else if event.Message == \"USERINFO\" {\n\t\t\tevent.Code = \"CTCP_USERINFO\"\n\n\t\t} else if event.Message == \"CLIENTINFO\" {\n\t\t\tevent.Code = \"CTCP_CLIENTINFO\"\n\n\t\t} else if event.Message[0:6] == \"ACTION\" {\n\t\t\tevent.Code = \"CTCP_ACTION\"\n\t\t\tevent.Message = event.Message[7:]\n\n\t\t}\n\t}\n\n\tif callbacks, ok := irc.events[event.Code]; ok {\n\t\tif irc.VerboseCallbackHandler {\n\t\t\tirc.Log.Printf(\"%v (%v) >> %#v\\n\", event.Code, len(callbacks), event)\n\t\t}\n\n\t\tfor _, callback := range callbacks {\n\t\t\tgo callback(event)\n\t\t}\n\n\t} else if irc.VerboseCallbackHandler {\n\t\tirc.Log.Printf(\"%v (0) >> %#v\\n\", event.Code, event)\n\t}\n}\n\nfunc (irc *Connection) setupCallbacks() {\n\tirc.events = make(map[string][]func(*Event))\n\n\t\/\/Handle ping events\n\tirc.AddCallback(\"PING\", func(e *Event) { irc.SendRaw(\"PONG :\" + e.Message) })\n\n\t\/\/Version handler\n\tirc.AddCallback(\"CTCP_VERSION\", func(e *Event) {\n\t\tirc.SendRawf(\"NOTICE %s :\\x01VERSION %s\\x01\", e.Nick, irc.Version)\n\t})\n\n\tirc.AddCallback(\"CTCP_USERINFO\", func(e *Event) {\n\t\tirc.SendRawf(\"NOTICE %s :\\x01USERINFO %s\\x01\", e.Nick, irc.user)\n\t})\n\n\tirc.AddCallback(\"CTCP_CLIENTINFO\", func(e *Event) {\n\t\tirc.SendRawf(\"NOTICE %s :\\x01CLIENTINFO PING VERSION TIME USERINFO CLIENTINFO\\x01\", e.Nick)\n\t})\n\n\tirc.AddCallback(\"CTCP_TIME\", func(e *Event) {\n\t\tltime := time.Now()\n\t\tirc.SendRawf(\"NOTICE %s :\\x01TIME %s\\x01\", e.Nick, ltime.String())\n\t})\n\n\tirc.AddCallback(\"CTCP_PING\", func(e *Event) { irc.SendRawf(\"NOTICE %s :\\x01%s\\x01\", e.Nick, e.Message) })\n\n\tirc.AddCallback(\"437\", func(e *Event) {\n\t\tirc.nickcurrent = irc.nickcurrent + \"_\"\n\t\tirc.SendRawf(\"NICK %s\", irc.nickcurrent)\n\t})\n\n\tirc.AddCallback(\"433\", func(e *Event) {\n\t\tif len(irc.nickcurrent) > 8 {\n\t\t\tirc.nickcurrent = \"_\" + irc.nickcurrent\n\n\t\t} else {\n\t\t\tirc.nickcurrent = irc.nickcurrent + \"_\"\n\t\t}\n\t\tirc.SendRawf(\"NICK %s\", irc.nickcurrent)\n\t})\n\n\tirc.AddCallback(\"PONG\", func(e *Event) {\n\t\tns, _ := strconv.ParseInt(e.Message, 10, 64)\n\t\tdelta := time.Duration(time.Now().UnixNano() - ns)\n\t\tif irc.Debug {\n\t\t\tirc.Log.Printf(\"Lag: %vs\\n\", delta)\n\t\t}\n\t})\n\n\tirc.AddCallback(\"NICK\", func(e *Event) {\n\t\tif e.Nick == irc.nick {\n\t\t\tirc.nickcurrent = e.Message\n\t\t}\n\t})\n\n\tirc.AddCallback(\"001\", func(e *Event) {\n\t\tirc.nickcurrent = e.Arguments[0]\n\t})\n}\n<commit_msg>Added ability to get the index of a callback in the callback registry. Ability to process events against a 'wildcard' handler<commit_after>package irc\n\nimport (\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc (irc *Connection) AddCallback(eventcode string, callback func(*Event)) (idx int) {\n\teventcode = strings.ToUpper(eventcode)\n\n\tif _, ok := irc.events[eventcode]; ok {\n\t\tirc.events[eventcode] = append(irc.events[eventcode], callback)\n\t\tidx = len(irc.events[eventcode]) - 1\n\t} else {\n\t\tirc.events[eventcode] = make([]func(*Event), 1)\n\t\tirc.events[eventcode][0] = callback\n\t\tidx = 0\n\t}\n\treturn\n}\n\nfunc (irc *Connection) RemoveCallback(eventcode string, i int) {\n\teventcode = strings.ToUpper(eventcode)\n\n\tif event, ok := irc.events[eventcode]; ok {\n\t\tif i < len(event) {\n\t\t\tirc.events[eventcode] = append(event[:i], event[i+1:]...)\n\t\t\treturn\n\t\t}\n\t\tirc.Log.Printf(\"Event found, but no callback found at index %d.\\n\", i)\n\t\treturn\n\t}\n\tirc.Log.Printf(\"Event not found\\n\")\n}\n\nfunc (irc *Connection) ReplaceCallback(eventcode string, i int, callback func(*Event)) {\n\teventcode = strings.ToUpper(eventcode)\n\n\tif event, ok := irc.events[eventcode]; ok {\n\t\tif i < len(event) {\n\t\t\tevent[i] = callback\n\t\t\treturn\n\t\t}\n\t\tirc.Log.Printf(\"Event found, but no callback found at index %d. Use AddCallback\\n\", i)\n\t\treturn\n\t}\n\tirc.Log.Printf(\"Event not found. Use AddCallBack\\n\")\n}\n\nfunc (irc *Connection) RunCallbacks(event *Event) {\n\tif event.Code == \"PRIVMSG\" && len(event.Message) > 0 && event.Message[0] == '\\x01' {\n\t\tevent.Code = \"CTCP\" \/\/Unknown CTCP\n\n\t\tif i := strings.LastIndex(event.Message, \"\\x01\"); i > -1 {\n\t\t\tevent.Message = event.Message[1:i]\n\t\t}\n\n\t\tif event.Message == \"VERSION\" {\n\t\t\tevent.Code = \"CTCP_VERSION\"\n\n\t\t} else if event.Message == \"TIME\" {\n\t\t\tevent.Code = \"CTCP_TIME\"\n\n\t\t} else if event.Message[0:4] == \"PING\" {\n\t\t\tevent.Code = \"CTCP_PING\"\n\n\t\t} else if event.Message == \"USERINFO\" {\n\t\t\tevent.Code = \"CTCP_USERINFO\"\n\n\t\t} else if event.Message == \"CLIENTINFO\" {\n\t\t\tevent.Code = \"CTCP_CLIENTINFO\"\n\n\t\t} else if event.Message[0:6] == \"ACTION\" {\n\t\t\tevent.Code = \"CTCP_ACTION\"\n\t\t\tevent.Message = event.Message[7:]\n\n\t\t}\n\t}\n\n\tif callbacks, ok := irc.events[event.Code]; ok {\n\t\tif irc.VerboseCallbackHandler {\n\t\t\tirc.Log.Printf(\"%v (%v) >> %#v\\n\", event.Code, len(callbacks), event)\n\t\t}\n\n\t\tfor _, callback := range callbacks {\n\t\t\tgo callback(event)\n\t\t}\n\t} else if irc.VerboseCallbackHandler {\n\t\tirc.Log.Printf(\"%v (0) >> %#v\\n\", event.Code, event)\n\t}\n\n\tif callbacks, ok := irc.events[\"*\"]; ok {\n\t\tif irc.VerboseCallbackHandler {\n\t\t\tirc.Log.Printf(\"Wildcard %v (%v) >> %#v\\n\", event.Code, len(callbacks), event)\n\t\t}\n\n\t\tfor _, callback := range callbacks {\n\t\t\tgo callback(event)\n\t\t}\n\t}\n}\n\nfunc (irc *Connection) setupCallbacks() {\n\tirc.events = make(map[string][]func(*Event))\n\n\t\/\/Handle ping events\n\tirc.AddCallback(\"PING\", func(e *Event) { irc.SendRaw(\"PONG :\" + e.Message) })\n\n\t\/\/Version handler\n\tirc.AddCallback(\"CTCP_VERSION\", func(e *Event) {\n\t\tirc.SendRawf(\"NOTICE %s :\\x01VERSION %s\\x01\", e.Nick, irc.Version)\n\t})\n\n\tirc.AddCallback(\"CTCP_USERINFO\", func(e *Event) {\n\t\tirc.SendRawf(\"NOTICE %s :\\x01USERINFO %s\\x01\", e.Nick, irc.user)\n\t})\n\n\tirc.AddCallback(\"CTCP_CLIENTINFO\", func(e *Event) {\n\t\tirc.SendRawf(\"NOTICE %s :\\x01CLIENTINFO PING VERSION TIME USERINFO CLIENTINFO\\x01\", e.Nick)\n\t})\n\n\tirc.AddCallback(\"CTCP_TIME\", func(e *Event) {\n\t\tltime := time.Now()\n\t\tirc.SendRawf(\"NOTICE %s :\\x01TIME %s\\x01\", e.Nick, ltime.String())\n\t})\n\n\tirc.AddCallback(\"CTCP_PING\", func(e *Event) { irc.SendRawf(\"NOTICE %s :\\x01%s\\x01\", e.Nick, e.Message) })\n\n\tirc.AddCallback(\"437\", func(e *Event) {\n\t\tirc.nickcurrent = irc.nickcurrent + \"_\"\n\t\tirc.SendRawf(\"NICK %s\", irc.nickcurrent)\n\t})\n\n\tirc.AddCallback(\"433\", func(e *Event) {\n\t\tif len(irc.nickcurrent) > 8 {\n\t\t\tirc.nickcurrent = \"_\" + irc.nickcurrent\n\n\t\t} else {\n\t\t\tirc.nickcurrent = irc.nickcurrent + \"_\"\n\t\t}\n\t\tirc.SendRawf(\"NICK %s\", irc.nickcurrent)\n\t})\n\n\tirc.AddCallback(\"PONG\", func(e *Event) {\n\t\tns, _ := strconv.ParseInt(e.Message, 10, 64)\n\t\tdelta := time.Duration(time.Now().UnixNano() - ns)\n\t\tif irc.Debug {\n\t\t\tirc.Log.Printf(\"Lag: %vs\\n\", delta)\n\t\t}\n\t})\n\n\tirc.AddCallback(\"NICK\", func(e *Event) {\n\t\tif e.Nick == irc.nick {\n\t\t\tirc.nickcurrent = e.Message\n\t\t}\n\t})\n\n\tirc.AddCallback(\"001\", func(e *Event) {\n\t\tirc.nickcurrent = e.Arguments[0]\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage sqltypes\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\tquerypb \"vitess.io\/vitess\/go\/vt\/proto\/query\"\n\tvtrpcpb \"vitess.io\/vitess\/go\/vt\/proto\/vtrpc\"\n\t\"vitess.io\/vitess\/go\/vt\/vterrors\"\n)\n\nfunc TestResult(t *testing.T) {\n\tfields := []*querypb.Field{{\n\t\tName: \"col1\",\n\t\tType: VarChar,\n\t}, {\n\t\tName: \"col2\",\n\t\tType: Int64,\n\t}, {\n\t\tName: \"col3\",\n\t\tType: Float64,\n\t}}\n\tsqlResult := &Result{\n\t\tFields: fields,\n\t\tInsertID: 1,\n\t\tRowsAffected: 2,\n\t\tRows: [][]Value{{\n\t\t\tTestValue(VarChar, \"aa\"),\n\t\t\tTestValue(Int64, \"1\"),\n\t\t\tTestValue(Float64, \"2\"),\n\t\t}, {\n\t\t\tMakeTrusted(VarChar, []byte(\"bb\")),\n\t\t\tNULL,\n\t\t\tNULL,\n\t\t}},\n\t}\n\tp3Result := &querypb.QueryResult{\n\t\tFields: fields,\n\t\tInsertId: 1,\n\t\tRowsAffected: 2,\n\t\tRows: []*querypb.Row{{\n\t\t\tLengths: []int64{2, 1, 1},\n\t\t\tValues: []byte(\"aa12\"),\n\t\t}, {\n\t\t\tLengths: []int64{2, -1, -1},\n\t\t\tValues: []byte(\"bb\"),\n\t\t}},\n\t}\n\tp3converted := ResultToProto3(sqlResult)\n\tif !proto.Equal(p3converted, p3Result) {\n\t\tt.Errorf(\"P3:\\n%v, want\\n%v\", p3converted, p3Result)\n\t}\n\n\treverse := Proto3ToResult(p3Result)\n\tif !reverse.Equal(sqlResult) {\n\t\tt.Errorf(\"reverse:\\n%#v, want\\n%#v\", reverse, sqlResult)\n\t}\n\n\t\/\/ Test custom fields.\n\tfields[1].Type = VarBinary\n\tsqlResult.Rows[0][1] = TestValue(VarBinary, \"1\")\n\treverse = CustomProto3ToResult(fields, p3Result)\n\tif !reverse.Equal(sqlResult) {\n\t\tt.Errorf(\"reverse:\\n%#v, want\\n%#v\", reverse, sqlResult)\n\t}\n}\n\nfunc TestResults(t *testing.T) {\n\tfields1 := []*querypb.Field{{\n\t\tName: \"col1\",\n\t\tType: VarChar,\n\t}, {\n\t\tName: \"col2\",\n\t\tType: Int64,\n\t}, {\n\t\tName: \"col3\",\n\t\tType: Float64,\n\t}}\n\tfields2 := []*querypb.Field{{\n\t\tName: \"col11\",\n\t\tType: VarChar,\n\t}, {\n\t\tName: \"col12\",\n\t\tType: Int64,\n\t}, {\n\t\tName: \"col13\",\n\t\tType: Float64,\n\t}}\n\tsqlResults := []Result{{\n\t\tFields: fields1,\n\t\tInsertID: 1,\n\t\tRowsAffected: 2,\n\t\tRows: [][]Value{{\n\t\t\tTestValue(VarChar, \"aa\"),\n\t\t\tTestValue(Int64, \"1\"),\n\t\t\tTestValue(Float64, \"2\"),\n\t\t}},\n\t}, {\n\t\tFields: fields2,\n\t\tInsertID: 3,\n\t\tRowsAffected: 4,\n\t\tRows: [][]Value{{\n\t\t\tTestValue(VarChar, \"bb\"),\n\t\t\tTestValue(Int64, \"3\"),\n\t\t\tTestValue(Float64, \"4\"),\n\t\t}},\n\t}}\n\tp3Results := []*querypb.QueryResult{{\n\t\tFields: fields1,\n\t\tInsertId: 1,\n\t\tRowsAffected: 2,\n\t\tRows: []*querypb.Row{{\n\t\t\tLengths: []int64{2, 1, 1},\n\t\t\tValues: []byte(\"aa12\"),\n\t\t}},\n\t}, {\n\t\tFields: fields2,\n\t\tInsertId: 3,\n\t\tRowsAffected: 4,\n\t\tRows: []*querypb.Row{{\n\t\t\tLengths: []int64{2, 1, 1},\n\t\t\tValues: []byte(\"bb34\"),\n\t\t}},\n\t}}\n\tp3converted := ResultsToProto3(sqlResults)\n\tif !Proto3ResultsEqual(p3converted, p3Results) {\n\t\tt.Errorf(\"P3:\\n%v, want\\n%v\", p3converted, p3Results)\n\t}\n\n\treverse := Proto3ToResults(p3Results)\n\tif !ResultsEqual(reverse, sqlResults) {\n\t\tt.Errorf(\"reverse:\\n%#v, want\\n%#v\", reverse, sqlResults)\n\t}\n}\n\nfunc TestQueryReponses(t *testing.T) {\n\tfields1 := []*querypb.Field{{\n\t\tName: \"col1\",\n\t\tType: VarChar,\n\t}, {\n\t\tName: \"col2\",\n\t\tType: Int64,\n\t}, {\n\t\tName: \"col3\",\n\t\tType: Float64,\n\t}}\n\tfields2 := []*querypb.Field{{\n\t\tName: \"col11\",\n\t\tType: VarChar,\n\t}, {\n\t\tName: \"col12\",\n\t\tType: Int64,\n\t}, {\n\t\tName: \"col13\",\n\t\tType: Float64,\n\t}}\n\n\tqueryResponses := []QueryResponse{\n\t\t{\n\t\t\tQueryResult: &Result{\n\t\t\t\tFields: fields1,\n\t\t\t\tInsertID: 1,\n\t\t\t\tRowsAffected: 2,\n\t\t\t\tRows: [][]Value{{\n\t\t\t\t\tTestValue(VarChar, \"aa\"),\n\t\t\t\t\tTestValue(Int64, \"1\"),\n\t\t\t\t\tTestValue(Float64, \"2\"),\n\t\t\t\t}},\n\t\t\t},\n\t\t\tQueryError: nil,\n\t\t}, {\n\t\t\tQueryResult: &Result{\n\t\t\t\tFields: fields2,\n\t\t\t\tInsertID: 3,\n\t\t\t\tRowsAffected: 4,\n\t\t\t\tRows: [][]Value{{\n\t\t\t\t\tTestValue(VarChar, \"bb\"),\n\t\t\t\t\tTestValue(Int64, \"3\"),\n\t\t\t\t\tTestValue(Float64, \"4\"),\n\t\t\t\t}},\n\t\t\t},\n\t\t\tQueryError: nil,\n\t\t}, {\n\t\t\tQueryResult: nil,\n\t\t\tQueryError: vterrors.New(vtrpcpb.Code_DEADLINE_EXCEEDED, \"deadline exceeded\"),\n\t\t},\n\t}\n\n\tp3ResultWithError := []*querypb.ResultWithError{\n\t\t{\n\t\t\tError: nil,\n\t\t\tResult: &querypb.QueryResult{\n\t\t\t\tFields: fields1,\n\t\t\t\tInsertId: 1,\n\t\t\t\tRowsAffected: 2,\n\t\t\t\tRows: []*querypb.Row{{\n\t\t\t\t\tLengths: []int64{2, 1, 1},\n\t\t\t\t\tValues: []byte(\"aa12\"),\n\t\t\t\t}},\n\t\t\t},\n\t\t}, {\n\t\t\tError: nil,\n\t\t\tResult: &querypb.QueryResult{\n\t\t\t\tFields: fields2,\n\t\t\t\tInsertId: 3,\n\t\t\t\tRowsAffected: 4,\n\t\t\t\tRows: []*querypb.Row{{\n\t\t\t\t\tLengths: []int64{2, 1, 1},\n\t\t\t\t\tValues: []byte(\"bb34\"),\n\t\t\t\t}},\n\t\t\t},\n\t\t}, {\n\t\t\tError: &vtrpcpb.RPCError{\n\t\t\t\tLegacyCode: vtrpcpb.LegacyErrorCode_DEADLINE_EXCEEDED_LEGACY,\n\t\t\t\tMessage: \"deadline exceeded\",\n\t\t\t\tCode: vtrpcpb.Code_DEADLINE_EXCEEDED,\n\t\t\t},\n\t\t\tResult: nil,\n\t\t},\n\t}\n\tp3converted := QueryResponsesToProto3(queryResponses)\n\tif !Proto3QueryResponsesEqual(p3converted, p3ResultWithError) {\n\t\tt.Errorf(\"P3:\\n%v, want\\n%v\", p3converted, p3ResultWithError)\n\t}\n\n\treverse := Proto3ToQueryReponses(p3ResultWithError)\n\tif !QueryResponsesEqual(reverse, queryResponses) {\n\t\tt.Errorf(\"reverse:\\n%#v, want\\n%#v\", reverse, queryResponses)\n\t}\n}\n<commit_msg>Add unit test for func Proto3ValuesEqual.<commit_after>\/*\nCopyright 2019 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage sqltypes\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/stretchr\/testify\/require\"\n\tquerypb \"vitess.io\/vitess\/go\/vt\/proto\/query\"\n\tvtrpcpb \"vitess.io\/vitess\/go\/vt\/proto\/vtrpc\"\n\t\"vitess.io\/vitess\/go\/vt\/vterrors\"\n)\n\nfunc TestResult(t *testing.T) {\n\tfields := []*querypb.Field{{\n\t\tName: \"col1\",\n\t\tType: VarChar,\n\t}, {\n\t\tName: \"col2\",\n\t\tType: Int64,\n\t}, {\n\t\tName: \"col3\",\n\t\tType: Float64,\n\t}}\n\tsqlResult := &Result{\n\t\tFields: fields,\n\t\tInsertID: 1,\n\t\tRowsAffected: 2,\n\t\tRows: [][]Value{{\n\t\t\tTestValue(VarChar, \"aa\"),\n\t\t\tTestValue(Int64, \"1\"),\n\t\t\tTestValue(Float64, \"2\"),\n\t\t}, {\n\t\t\tMakeTrusted(VarChar, []byte(\"bb\")),\n\t\t\tNULL,\n\t\t\tNULL,\n\t\t}},\n\t}\n\tp3Result := &querypb.QueryResult{\n\t\tFields: fields,\n\t\tInsertId: 1,\n\t\tRowsAffected: 2,\n\t\tRows: []*querypb.Row{{\n\t\t\tLengths: []int64{2, 1, 1},\n\t\t\tValues: []byte(\"aa12\"),\n\t\t}, {\n\t\t\tLengths: []int64{2, -1, -1},\n\t\t\tValues: []byte(\"bb\"),\n\t\t}},\n\t}\n\tp3converted := ResultToProto3(sqlResult)\n\tif !proto.Equal(p3converted, p3Result) {\n\t\tt.Errorf(\"P3:\\n%v, want\\n%v\", p3converted, p3Result)\n\t}\n\n\treverse := Proto3ToResult(p3Result)\n\tif !reverse.Equal(sqlResult) {\n\t\tt.Errorf(\"reverse:\\n%#v, want\\n%#v\", reverse, sqlResult)\n\t}\n\n\t\/\/ Test custom fields.\n\tfields[1].Type = VarBinary\n\tsqlResult.Rows[0][1] = TestValue(VarBinary, \"1\")\n\treverse = CustomProto3ToResult(fields, p3Result)\n\tif !reverse.Equal(sqlResult) {\n\t\tt.Errorf(\"reverse:\\n%#v, want\\n%#v\", reverse, sqlResult)\n\t}\n}\n\nfunc TestResults(t *testing.T) {\n\tfields1 := []*querypb.Field{{\n\t\tName: \"col1\",\n\t\tType: VarChar,\n\t}, {\n\t\tName: \"col2\",\n\t\tType: Int64,\n\t}, {\n\t\tName: \"col3\",\n\t\tType: Float64,\n\t}}\n\tfields2 := []*querypb.Field{{\n\t\tName: \"col11\",\n\t\tType: VarChar,\n\t}, {\n\t\tName: \"col12\",\n\t\tType: Int64,\n\t}, {\n\t\tName: \"col13\",\n\t\tType: Float64,\n\t}}\n\tsqlResults := []Result{{\n\t\tFields: fields1,\n\t\tInsertID: 1,\n\t\tRowsAffected: 2,\n\t\tRows: [][]Value{{\n\t\t\tTestValue(VarChar, \"aa\"),\n\t\t\tTestValue(Int64, \"1\"),\n\t\t\tTestValue(Float64, \"2\"),\n\t\t}},\n\t}, {\n\t\tFields: fields2,\n\t\tInsertID: 3,\n\t\tRowsAffected: 4,\n\t\tRows: [][]Value{{\n\t\t\tTestValue(VarChar, \"bb\"),\n\t\t\tTestValue(Int64, \"3\"),\n\t\t\tTestValue(Float64, \"4\"),\n\t\t}},\n\t}}\n\tp3Results := []*querypb.QueryResult{{\n\t\tFields: fields1,\n\t\tInsertId: 1,\n\t\tRowsAffected: 2,\n\t\tRows: []*querypb.Row{{\n\t\t\tLengths: []int64{2, 1, 1},\n\t\t\tValues: []byte(\"aa12\"),\n\t\t}},\n\t}, {\n\t\tFields: fields2,\n\t\tInsertId: 3,\n\t\tRowsAffected: 4,\n\t\tRows: []*querypb.Row{{\n\t\t\tLengths: []int64{2, 1, 1},\n\t\t\tValues: []byte(\"bb34\"),\n\t\t}},\n\t}}\n\tp3converted := ResultsToProto3(sqlResults)\n\tif !Proto3ResultsEqual(p3converted, p3Results) {\n\t\tt.Errorf(\"P3:\\n%v, want\\n%v\", p3converted, p3Results)\n\t}\n\n\treverse := Proto3ToResults(p3Results)\n\tif !ResultsEqual(reverse, sqlResults) {\n\t\tt.Errorf(\"reverse:\\n%#v, want\\n%#v\", reverse, sqlResults)\n\t}\n}\n\nfunc TestQueryReponses(t *testing.T) {\n\tfields1 := []*querypb.Field{{\n\t\tName: \"col1\",\n\t\tType: VarChar,\n\t}, {\n\t\tName: \"col2\",\n\t\tType: Int64,\n\t}, {\n\t\tName: \"col3\",\n\t\tType: Float64,\n\t}}\n\tfields2 := []*querypb.Field{{\n\t\tName: \"col11\",\n\t\tType: VarChar,\n\t}, {\n\t\tName: \"col12\",\n\t\tType: Int64,\n\t}, {\n\t\tName: \"col13\",\n\t\tType: Float64,\n\t}}\n\n\tqueryResponses := []QueryResponse{\n\t\t{\n\t\t\tQueryResult: &Result{\n\t\t\t\tFields: fields1,\n\t\t\t\tInsertID: 1,\n\t\t\t\tRowsAffected: 2,\n\t\t\t\tRows: [][]Value{{\n\t\t\t\t\tTestValue(VarChar, \"aa\"),\n\t\t\t\t\tTestValue(Int64, \"1\"),\n\t\t\t\t\tTestValue(Float64, \"2\"),\n\t\t\t\t}},\n\t\t\t},\n\t\t\tQueryError: nil,\n\t\t}, {\n\t\t\tQueryResult: &Result{\n\t\t\t\tFields: fields2,\n\t\t\t\tInsertID: 3,\n\t\t\t\tRowsAffected: 4,\n\t\t\t\tRows: [][]Value{{\n\t\t\t\t\tTestValue(VarChar, \"bb\"),\n\t\t\t\t\tTestValue(Int64, \"3\"),\n\t\t\t\t\tTestValue(Float64, \"4\"),\n\t\t\t\t}},\n\t\t\t},\n\t\t\tQueryError: nil,\n\t\t}, {\n\t\t\tQueryResult: nil,\n\t\t\tQueryError: vterrors.New(vtrpcpb.Code_DEADLINE_EXCEEDED, \"deadline exceeded\"),\n\t\t},\n\t}\n\n\tp3ResultWithError := []*querypb.ResultWithError{\n\t\t{\n\t\t\tError: nil,\n\t\t\tResult: &querypb.QueryResult{\n\t\t\t\tFields: fields1,\n\t\t\t\tInsertId: 1,\n\t\t\t\tRowsAffected: 2,\n\t\t\t\tRows: []*querypb.Row{{\n\t\t\t\t\tLengths: []int64{2, 1, 1},\n\t\t\t\t\tValues: []byte(\"aa12\"),\n\t\t\t\t}},\n\t\t\t},\n\t\t}, {\n\t\t\tError: nil,\n\t\t\tResult: &querypb.QueryResult{\n\t\t\t\tFields: fields2,\n\t\t\t\tInsertId: 3,\n\t\t\t\tRowsAffected: 4,\n\t\t\t\tRows: []*querypb.Row{{\n\t\t\t\t\tLengths: []int64{2, 1, 1},\n\t\t\t\t\tValues: []byte(\"bb34\"),\n\t\t\t\t}},\n\t\t\t},\n\t\t}, {\n\t\t\tError: &vtrpcpb.RPCError{\n\t\t\t\tLegacyCode: vtrpcpb.LegacyErrorCode_DEADLINE_EXCEEDED_LEGACY,\n\t\t\t\tMessage: \"deadline exceeded\",\n\t\t\t\tCode: vtrpcpb.Code_DEADLINE_EXCEEDED,\n\t\t\t},\n\t\t\tResult: nil,\n\t\t},\n\t}\n\tp3converted := QueryResponsesToProto3(queryResponses)\n\tif !Proto3QueryResponsesEqual(p3converted, p3ResultWithError) {\n\t\tt.Errorf(\"P3:\\n%v, want\\n%v\", p3converted, p3ResultWithError)\n\t}\n\n\treverse := Proto3ToQueryReponses(p3ResultWithError)\n\tif !QueryResponsesEqual(reverse, queryResponses) {\n\t\tt.Errorf(\"reverse:\\n%#v, want\\n%#v\", reverse, queryResponses)\n\t}\n}\n\nfunc TestProto3ValuesEqual(t *testing.T) {\n\tfor _, tc := range []struct {\n\t\tv1, v2 []*querypb.Value\n\t\texpected bool\n\t}{\n\t\t{\n\t\t\tv1: []*querypb.Value{\n\t\t\t\t{\n\t\t\t\t\tType: 0,\n\t\t\t\t\tValue: []byte{0, 1},\n\t\t\t\t},\n\t\t\t},\n\t\t\tv2: []*querypb.Value{\n\t\t\t\t{\n\t\t\t\t\tType: 0,\n\t\t\t\t\tValue: []byte{0, 1},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: 1,\n\t\t\t\t\tValue: []byte{0, 1, 2},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tv1: []*querypb.Value{\n\t\t\t\t{\n\t\t\t\t\tType: 0,\n\t\t\t\t\tValue: []byte{0, 1},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: 1,\n\t\t\t\t\tValue: []byte{0, 1, 2},\n\t\t\t\t},\n\t\t\t},\n\t\t\tv2: []*querypb.Value{\n\t\t\t\t{\n\t\t\t\t\tType: 0,\n\t\t\t\t\tValue: []byte{0, 1},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: 1,\n\t\t\t\t\tValue: []byte{0, 1, 2},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: true,\n\t\t},\n\t\t{\n\t\t\tv1: []*querypb.Value{\n\t\t\t\t{\n\t\t\t\t\tType: 0,\n\t\t\t\t\tValue: []byte{0, 1},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: 1,\n\t\t\t\t\tValue: []byte{0, 1},\n\t\t\t\t},\n\t\t\t},\n\t\t\tv2: []*querypb.Value{\n\t\t\t\t{\n\t\t\t\t\tType: 0,\n\t\t\t\t\tValue: []byte{0, 1},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tType: 1,\n\t\t\t\t\tValue: []byte{0, 1, 2},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: false,\n\t\t},\n\t} {\n\t\trequire.Equal(t, tc.expected, Proto3ValuesEqual(tc.v1, tc.v2))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package gocd\n\nimport (\n\t\"github.com\/h2non\/gock\"\n\t\"testing\"\n)\n\ntype MockClient struct {\n}\n\n\/\/func TestConfigurationService(t *testing.T) {\n\/\/\tdefer gock.Off() \/\/ Flush pending mocks after test execution\n\/\/\tcs := ConfigurationService{}\n\/\/}\n<commit_msg>Removed unused imports<commit_after>package gocd\n\ntype MockClient struct {\n}\n\n\/\/func TestConfigurationService(t *testing.T) {\n\/\/\tdefer gock.Off() \/\/ Flush pending mocks after test execution\n\/\/\tcs := ConfigurationService{}\n\/\/}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build ignore\n\n\/\/ Command makestatic reads a set of files and writes a Go source file to \"static.go\"\n\/\/ that declares a map of string constants containing contents of the input files.\n\/\/ It is intended to be invoked via \"go generate\" (directive in \"gen.go\").\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/format\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"time\"\n\t\"unicode\/utf8\"\n)\n\nvar files = []string{\n\t\"analysis\/call3.png\",\n\t\"analysis\/call-eg.png\",\n\t\"analysis\/callers1.png\",\n\t\"analysis\/callers2.png\",\n\t\"analysis\/chan1.png\",\n\t\"analysis\/chan2a.png\",\n\t\"analysis\/chan2b.png\",\n\t\"analysis\/error1.png\",\n\t\"analysis\/help.html\",\n\t\"analysis\/ident-def.png\",\n\t\"analysis\/ident-field.png\",\n\t\"analysis\/ident-func.png\",\n\t\"analysis\/ipcg-func.png\",\n\t\"analysis\/ipcg-pkg.png\",\n\t\"analysis\/typeinfo-pkg.png\",\n\t\"analysis\/typeinfo-src.png\",\n\t\"callgraph.html\",\n\t\"codewalk.html\",\n\t\"codewalkdir.html\",\n\t\"dirlist.html\",\n\t\"error.html\",\n\t\"example.html\",\n\t\"godoc.html\",\n\t\"godocs.js\",\n\t\"images\/minus.gif\",\n\t\"images\/plus.gif\",\n\t\"images\/treeview-black-line.gif\",\n\t\"images\/treeview-black.gif\",\n\t\"images\/treeview-default-line.gif\",\n\t\"images\/treeview-default.gif\",\n\t\"images\/treeview-gray-line.gif\",\n\t\"images\/treeview-gray.gif\",\n\t\"implements.html\",\n\t\"jquery.js\",\n\t\"jquery.treeview.css\",\n\t\"jquery.treeview.edit.js\",\n\t\"jquery.treeview.js\",\n\t\"methodset.html\",\n\t\"opensearch.xml\",\n\t\"package.html\",\n\t\"packageroot.html\",\n\t\"package.txt\",\n\t\"play.js\",\n\t\"playground.js\",\n\t\"search.html\",\n\t\"search.txt\",\n\t\"searchcode.html\",\n\t\"searchdoc.html\",\n\t\"searchtxt.html\",\n\t\"style.css\",\n}\n\nfunc main() {\n\tif err := makestatic(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc makestatic() error {\n\tf, err := os.Create(\"static.go\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\tbuf := new(bytes.Buffer)\n\tfmt.Fprintf(buf, \"%v\\n\\n%v\\n\\npackage static\\n\\n\", license, warning)\n\tfmt.Fprintf(buf, \"var Files = map[string]string{\\n\")\n\tfor _, fn := range files {\n\t\tb, err := ioutil.ReadFile(fn)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintf(buf, \"\\t%q: \", fn)\n\t\tif utf8.Valid(b) {\n\t\t\tfmt.Fprintf(buf, \"`%s`\", sanitize(b))\n\t\t} else {\n\t\t\tfmt.Fprintf(buf, \"%q\", b)\n\t\t}\n\t\tfmt.Fprintln(buf, \",\\n\")\n\t}\n\tfmt.Fprintln(buf, \"}\")\n\tfmtbuf, err := format.Source(buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(\"static.go\", fmtbuf, 0666)\n}\n\n\/\/ sanitize prepares a valid UTF-8 string as a raw string constant.\nfunc sanitize(b []byte) []byte {\n\t\/\/ Replace ` with `+\"`\"+`\n\tb = bytes.Replace(b, []byte(\"`\"), []byte(\"`+\\\"`\\\"+`\"), -1)\n\n\t\/\/ Replace BOM with `+\"\\xEF\\xBB\\xBF\"+`\n\t\/\/ (A BOM is valid UTF-8 but not permitted in Go source files.\n\t\/\/ I wouldn't bother handling this, but for some insane reason\n\t\/\/ jquery.js has a BOM somewhere in the middle.)\n\treturn bytes.Replace(b, []byte(\"\\xEF\\xBB\\xBF\"), []byte(\"`+\\\"\\\\xEF\\\\xBB\\\\xBF\\\"+`\"), -1)\n}\n\nconst warning = `\/\/ Code generated by \"makestatic\"; DO NOT EDIT.`\n\nvar license = fmt.Sprintf(`\/\/ Copyright %d The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.`, time.Now().UTC().Year())\n<commit_msg>godoc\/static: cleanup unused lines in makestatic.go<commit_after>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build ignore\n\n\/\/ Command makestatic reads a set of files and writes a Go source file to \"static.go\"\n\/\/ that declares a map of string constants containing contents of the input files.\n\/\/ It is intended to be invoked via \"go generate\" (directive in \"gen.go\").\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/format\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"time\"\n\t\"unicode\/utf8\"\n)\n\nvar files = []string{\n\t\"analysis\/call3.png\",\n\t\"analysis\/call-eg.png\",\n\t\"analysis\/callers1.png\",\n\t\"analysis\/callers2.png\",\n\t\"analysis\/chan1.png\",\n\t\"analysis\/chan2a.png\",\n\t\"analysis\/chan2b.png\",\n\t\"analysis\/error1.png\",\n\t\"analysis\/help.html\",\n\t\"analysis\/ident-def.png\",\n\t\"analysis\/ident-field.png\",\n\t\"analysis\/ident-func.png\",\n\t\"analysis\/ipcg-func.png\",\n\t\"analysis\/ipcg-pkg.png\",\n\t\"analysis\/typeinfo-pkg.png\",\n\t\"analysis\/typeinfo-src.png\",\n\t\"callgraph.html\",\n\t\"codewalk.html\",\n\t\"codewalkdir.html\",\n\t\"dirlist.html\",\n\t\"error.html\",\n\t\"example.html\",\n\t\"godoc.html\",\n\t\"godocs.js\",\n\t\"images\/minus.gif\",\n\t\"images\/plus.gif\",\n\t\"images\/treeview-black-line.gif\",\n\t\"images\/treeview-black.gif\",\n\t\"images\/treeview-default-line.gif\",\n\t\"images\/treeview-default.gif\",\n\t\"images\/treeview-gray-line.gif\",\n\t\"images\/treeview-gray.gif\",\n\t\"implements.html\",\n\t\"jquery.js\",\n\t\"jquery.treeview.css\",\n\t\"jquery.treeview.edit.js\",\n\t\"jquery.treeview.js\",\n\t\"methodset.html\",\n\t\"opensearch.xml\",\n\t\"package.html\",\n\t\"packageroot.html\",\n\t\"package.txt\",\n\t\"play.js\",\n\t\"playground.js\",\n\t\"search.html\",\n\t\"search.txt\",\n\t\"searchcode.html\",\n\t\"searchdoc.html\",\n\t\"searchtxt.html\",\n\t\"style.css\",\n}\n\nfunc main() {\n\tif err := makestatic(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc makestatic() error {\n\tbuf := new(bytes.Buffer)\n\tfmt.Fprintf(buf, \"%v\\n\\n%v\\n\\npackage static\\n\\n\", license, warning)\n\tfmt.Fprintf(buf, \"var Files = map[string]string{\\n\")\n\tfor _, fn := range files {\n\t\tb, err := ioutil.ReadFile(fn)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintf(buf, \"\\t%q: \", fn)\n\t\tif utf8.Valid(b) {\n\t\t\tfmt.Fprintf(buf, \"`%s`\", sanitize(b))\n\t\t} else {\n\t\t\tfmt.Fprintf(buf, \"%q\", b)\n\t\t}\n\t\tfmt.Fprintln(buf, \",\\n\")\n\t}\n\tfmt.Fprintln(buf, \"}\")\n\tfmtbuf, err := format.Source(buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(\"static.go\", fmtbuf, 0666)\n}\n\n\/\/ sanitize prepares a valid UTF-8 string as a raw string constant.\nfunc sanitize(b []byte) []byte {\n\t\/\/ Replace ` with `+\"`\"+`\n\tb = bytes.Replace(b, []byte(\"`\"), []byte(\"`+\\\"`\\\"+`\"), -1)\n\n\t\/\/ Replace BOM with `+\"\\xEF\\xBB\\xBF\"+`\n\t\/\/ (A BOM is valid UTF-8 but not permitted in Go source files.\n\t\/\/ I wouldn't bother handling this, but for some insane reason\n\t\/\/ jquery.js has a BOM somewhere in the middle.)\n\treturn bytes.Replace(b, []byte(\"\\xEF\\xBB\\xBF\"), []byte(\"`+\\\"\\\\xEF\\\\xBB\\\\xBF\\\"+`\"), -1)\n}\n\nconst warning = `\/\/ Code generated by \"makestatic\"; DO NOT EDIT.`\n\nvar license = fmt.Sprintf(`\/\/ Copyright %d The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.`, time.Now().UTC().Year())\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"os\"\n)\n\ntype Installations struct {\n\tInstallationItems []installationItem `json:\"installations\"`\n}\n\ntype installationItem struct {\n\tLabel string `json:\"label\"`\n\tServices []ServiceItem `json:\"services\"`\n\tLambdas []string `json:\"lambdas\"`\n}\n\ntype CredentialsItem struct {\n\tProfile string `json:\"profile\"`\n\tAwsKey string `json:\"awsKey\"`\n\tAwsSecret string `json:\"awsSecret\"`\n}\n\ntype ServiceItem struct {\n\tLabel string `json:\"label\"`\n\tCluster string `json:\"cluster\"`\n\tService string `json:\"service\"`\n}\n\ntype Output struct {\n\tInstallations []OutputTemplate\n}\n\ntype OutputTemplate struct {\n\tLabel string\n\tServices []OutputItem\n\tLambdas []LambdaOutputItem\n}\n\ntype OutputItem struct {\n\tLabel string\n\tTaskDefName string\n\tImage string\n\tVersion string\n\tDesiredCount int64\n\tRunningCount int64\n}\n\ntype LambdaOutputItem struct {\n\tLabel string\n\tVersion string\n\tDescription string\n}\n\nfunc GenerateReport(jsonData []byte, templateFile string) {\n\tvar config Installations\n\n\terr := json.Unmarshal(jsonData, &config)\n\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t}\n\n\toutput := Output{}\n\n\tfor i := 0; i < len(config.InstallationItems); i++ {\n\t\tinstallation := config.InstallationItems[i];\n\n\t\toutputTemplate := OutputTemplate{\n\t\t\tLabel : installation.Label,\n\t\t}\n\n\t\tfor j := 0; j < len(installation.Services); j++ {\n\n\t\t\tservice := installation.Services[j]\n\t\t\tclusterArn := GetClusterArn(service.Cluster);\n\t\t\tserviceArn := GetServiceArn(clusterArn, service.Service);\n\n\t\t\tserviceDescription := _describeService(clusterArn, serviceArn)\n\n\t\t\tfor k := 0; k < len(serviceDescription.Services); k++ {\n\n\t\t\t\trealService := serviceDescription.Services[k];\n\n\t\t\t\ttaskDefinition := _describeTaskDefinition(*realService.TaskDefinition);\n\t\t\t\tversion, image := ExtractVersion(*taskDefinition.TaskDefinition.ContainerDefinitions[0].Image)\n\n\n\n\t\t\t\tfor l := 0; l < len(realService.Deployments); l++ {\n\t\t\t\t\tdeployment := realService.Deployments[l];\n\t\t\t\t\toutputItem := OutputItem{\n\t\t\t\t\t\tVersion: version,\n\t\t\t\t\t\tImage:ExtractImageName(image),\n\t\t\t\t\t\tTaskDefName:ExtractName(deployment.TaskDefinition),\n\t\t\t\t\t\tLabel:service.Label,\n\t\t\t\t\t\tRunningCount:*deployment.RunningCount,\n\t\t\t\t\t\tDesiredCount:*deployment.DesiredCount,\n\t\t\t\t\t}\n\t\t\t\t\toutputTemplate.Services = append(outputTemplate.Services, outputItem)\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\n\t\tfor k := 0; k < len(installation.Lambdas); k++ {\n\t\t\tlambdaFunction := installation.Lambdas[k]\n\n\t\t\taliasInfo := _getLambdaFunctionAliasInfo(lambdaFunction, \"PRIMARY\")\n\n\t\t\tfunctionInfo := _getLambdaFunctionInfo(lambdaFunction, *aliasInfo.FunctionVersion);\n\n\t\t\toutputItem := LambdaOutputItem{\n\t\t\t\tDescription: *aliasInfo.Description,\n\t\t\t\tVersion: *functionInfo.Version,\n\t\t\t\tLabel: *functionInfo.Description,\n\t\t\t}\n\n\t\t\toutputTemplate.Lambdas = append(outputTemplate.Lambdas, outputItem)\n\t\t}\n\n\t\toutput.Installations = append(output.Installations, outputTemplate)\n\t}\n\n\treportTemplate, err := template.New(\"report\").Parse(templateFile)\n\n\tif err != nil {\n\t\terrState(err.Error())\n\t}\n\n\terr = reportTemplate.Execute(os.Stdout, output)\n\n\tif err != nil {\n\t\terrState(err.Error())\n\t}\n}\n\n<commit_msg>Fix for label in lambda output in reporting<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"os\"\n)\n\ntype Installations struct {\n\tInstallationItems []installationItem `json:\"installations\"`\n}\n\ntype installationItem struct {\n\tLabel string `json:\"label\"`\n\tServices []ServiceItem `json:\"services\"`\n\tLambdas []string `json:\"lambdas\"`\n}\n\ntype CredentialsItem struct {\n\tProfile string `json:\"profile\"`\n\tAwsKey string `json:\"awsKey\"`\n\tAwsSecret string `json:\"awsSecret\"`\n}\n\ntype ServiceItem struct {\n\tLabel string `json:\"label\"`\n\tCluster string `json:\"cluster\"`\n\tService string `json:\"service\"`\n}\n\ntype Output struct {\n\tInstallations []OutputTemplate\n}\n\ntype OutputTemplate struct {\n\tLabel string\n\tServices []OutputItem\n\tLambdas []LambdaOutputItem\n}\n\ntype OutputItem struct {\n\tLabel string\n\tTaskDefName string\n\tImage string\n\tVersion string\n\tDesiredCount int64\n\tRunningCount int64\n}\n\ntype LambdaOutputItem struct {\n\tLabel string\n\tVersion string\n\tDescription string\n}\n\nfunc GenerateReport(jsonData []byte, templateFile string) {\n\tvar config Installations\n\n\terr := json.Unmarshal(jsonData, &config)\n\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t}\n\n\toutput := Output{}\n\n\tfor i := 0; i < len(config.InstallationItems); i++ {\n\t\tinstallation := config.InstallationItems[i];\n\n\t\toutputTemplate := OutputTemplate{\n\t\t\tLabel : installation.Label,\n\t\t}\n\n\t\tfor j := 0; j < len(installation.Services); j++ {\n\n\t\t\tservice := installation.Services[j]\n\t\t\tclusterArn := GetClusterArn(service.Cluster);\n\t\t\tserviceArn := GetServiceArn(clusterArn, service.Service);\n\n\t\t\tserviceDescription := _describeService(clusterArn, serviceArn)\n\n\t\t\tfor k := 0; k < len(serviceDescription.Services); k++ {\n\n\t\t\t\trealService := serviceDescription.Services[k];\n\n\t\t\t\ttaskDefinition := _describeTaskDefinition(*realService.TaskDefinition);\n\t\t\t\tversion, image := ExtractVersion(*taskDefinition.TaskDefinition.ContainerDefinitions[0].Image)\n\n\n\n\t\t\t\tfor l := 0; l < len(realService.Deployments); l++ {\n\t\t\t\t\tdeployment := realService.Deployments[l];\n\t\t\t\t\toutputItem := OutputItem{\n\t\t\t\t\t\tVersion: version,\n\t\t\t\t\t\tImage:ExtractImageName(image),\n\t\t\t\t\t\tTaskDefName:ExtractName(deployment.TaskDefinition),\n\t\t\t\t\t\tLabel:service.Label,\n\t\t\t\t\t\tRunningCount:*deployment.RunningCount,\n\t\t\t\t\t\tDesiredCount:*deployment.DesiredCount,\n\t\t\t\t\t}\n\t\t\t\t\toutputTemplate.Services = append(outputTemplate.Services, outputItem)\n\t\t\t\t}\n\t\t\t}\n\n\t\t}\n\n\t\tfor k := 0; k < len(installation.Lambdas); k++ {\n\t\t\tlambdaFunction := installation.Lambdas[k]\n\n\t\t\taliasInfo := _getLambdaFunctionAliasInfo(lambdaFunction, \"PRIMARY\")\n\n\t\t\tfunctionInfo := _getLambdaFunctionInfo(lambdaFunction, *aliasInfo.FunctionVersion);\n\n\t\t\toutputItem := LambdaOutputItem{\n\t\t\t\tDescription: *aliasInfo.Description,\n\t\t\t\tVersion: *functionInfo.Version,\n\t\t\t\tLabel: lambdaFunction,\n\t\t\t}\n\n\t\t\toutputTemplate.Lambdas = append(outputTemplate.Lambdas, outputItem)\n\t\t}\n\n\t\toutput.Installations = append(output.Installations, outputTemplate)\n\t}\n\n\treportTemplate, err := template.New(\"report\").Parse(templateFile)\n\n\tif err != nil {\n\t\terrState(err.Error())\n\t}\n\n\terr = reportTemplate.Execute(os.Stdout, output)\n\n\tif err != nil {\n\t\terrState(err.Error())\n\t}\n}\n\n<|endoftext|>"} {"text":"<commit_before>package tvdbapi\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n)\n\ntype searchData struct {\n\tSeries []Series `json:\"data\"`\n}\n\ntype SearchQuery struct {\n\tName string\n\tImdbId string\n\tZap2itId string\n\tAcceptLanguage string\n}\n\ntype AiredTime struct {\n\ttime.Time\n}\n\nconst ctLayout = \"2006-01-02\"\n\nfunc (ct *AiredTime) UnmarshalJSON(b []byte) (err error) {\n\tif b[0] == '\"' && b[len(b)-1] == '\"' {\n\t\tb = b[1 : len(b)-1]\n\t}\n\tct.Time, err = time.Parse(ctLayout, string(b))\n\treturn\n}\n\ntype Series struct {\n\tId int `json:\"id\"`\n\tSeriesName string `json:\"seriesName\"`\n\tAliases []string `json:\"aliases\"`\n\tBanner string `json:\"banner\"`\n\tSeriesId string `json:\"seriesId\"`\n\tStatus string `json:\"status\"`\n\tFirstAired AiredTime `json:\"firstAired\"`\n\tNetwork string `json:\"network\"`\n\tNetworkId string `json:\"networkId\"`\n\tRuntime string `json:\"runtime\"`\n\tGenre []string `json:\"genre\"`\n\tOverview string `json:\"overview\"`\n\tLastUpdated int `json:\"lastUpdated\"`\n\tAirsDayOfWeek string `json:\"airsDayOfWeek\"`\n\tAirsTime string `json:\"airsTime\"`\n\tRating string `json:\"rating\"`\n\tImdbId string `json:\"imdbId\"`\n\tZap2itId string `json:\"zap2itId\"`\n\tAdded string `json:\"added\"`\n\tSiteRating float32 `json:\"siteRating\"`\n\tSiteRatingCount int `json:\"siteRatingCount\"`\n}\n\ntype seriesInfoData struct {\n\tSeries Series `json:\"data\"`\n}\n\nfunc (client Client) Search(query SearchQuery) []Series {\n\tresult := searchData{}\n\tvalues := url.Values{}\n\n\tif query.Name != \"\" {\n\t\tvalues.Add(\"name\", query.Name)\n\t}\n\n\tif query.ImdbId != \"\" {\n\t\tvalues.Add(\"imdbId\", query.ImdbId)\n\t}\n\n\tif query.Zap2itId != \"\" {\n\t\tvalues.Add(\"zap2itId\", query.Zap2itId)\n\t}\n\n\turl := fmt.Sprintf(\"https:\/\/api.thetvdb.com\/search\/series?%s\", values.Encode())\n\n\treq, _ := http.NewRequest(\"GET\", url, nil)\n\n\treq.Header.Add(\"authorization\", \"Bearer \"+client.ApiToken)\n\tif query.AcceptLanguage != \"\" {\n\t\treq.Header.Add(\"Accept-Language\", query.AcceptLanguage)\n\t}\n\n\tres, _ := http.DefaultClient.Do(req)\n\tdefer res.Body.Close()\n\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\terr := json.Unmarshal(body, &result)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn result.Series\n\t}\n\n\tlog.Println(\"search completed successfully\")\n\tlog.Println(fmt.Sprintf(\"Total found: %v\", len(result.Series)))\n\n\treturn result.Series\n}\n\nfunc (client Client) GetSeriesInfo(series Series) Series {\n\treturn client.GetSeriesInfoById(series.Id)\n}\n\nfunc (client Client) GetSeriesInfoById(seriesId int) Series {\n\tresult := seriesInfoData{}\n\n\turl := fmt.Sprintf(\"https:\/\/api.thetvdb.com\/series\/%v\", seriesId)\n\n\treq, _ := http.NewRequest(\"GET\", url, nil)\n\n\treq.Header.Add(\"authorization\", \"Bearer \"+client.ApiToken)\n\n\tres, _ := http.DefaultClient.Do(req)\n\tdefer res.Body.Close()\n\n\tbody, _ := ioutil.ReadAll(res.Body)\n\n\terr := json.Unmarshal(body, &result)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn result.Series\n\t}\n\n\tlog.Println(\"get series info completed successfully\")\n\tlog.Println(fmt.Sprintf(\"Series: %s; ImdbId: %s; LastUpdated: %s; Zip2itid: %s; First Aired: %v\",\n\t\tresult.Series.SeriesName,\n\t\tresult.Series.ImdbId,\n\t\tresult.Series.LastUpdated,\n\t\tresult.Series.Zap2itId,\n\t\tresult.Series.FirstAired))\n\n\treturn result.Series\n}\n<commit_msg>Fix empty AiredDate<commit_after>package tvdbapi\r\rimport (\r\t\"encoding\/json\"\r\t\"fmt\"\r\t\"io\/ioutil\"\r\t\"log\"\r\t\"net\/http\"\r\t\"net\/url\"\r\t\"time\"\r)\r\rtype searchData struct {\r\tSeries []Series `json:\"data\"`\r}\r\rtype SearchQuery struct {\r\tName string\r\tImdbId string\r\tZap2itId string\r\tAcceptLanguage string\r}\r\rtype AiredTime struct {\r\ttime.Time\r}\r\rconst ctLayout = \"2006-01-02\"\r\rfunc (ct *AiredTime) UnmarshalJSON(b []byte) (err error) {\r\tif b[0] == '\"' && b[len(b)-1] == '\"' {\r\t\tb = b[1 : len(b)-1]\r\t}\r\tif len(b) == 0 {\r\t\t\/\/ sometimes AiredDate is empty. Lets it will be 1900-01-01\r\t\tct.Time, err = time.Parse(ctLayout, \"1900-01-01\")\r\t} else {\r\t\tct.Time, err = time.Parse(ctLayout, string(b))\r\t}\r\treturn\r}\r\rtype Series struct {\r\tId int `json:\"id\"`\r\tSeriesName string `json:\"seriesName\"`\r\tAliases []string `json:\"aliases\"`\r\tBanner string `json:\"banner\"`\r\tSeriesId string `json:\"seriesId\"`\r\tStatus string `json:\"status\"`\r\tFirstAired AiredTime `json:\"firstAired\"`\r\tNetwork string `json:\"network\"`\r\tNetworkId string `json:\"networkId\"`\r\tRuntime string `json:\"runtime\"`\r\tGenre []string `json:\"genre\"`\r\tOverview string `json:\"overview\"`\r\tLastUpdated int `json:\"lastUpdated\"`\r\tAirsDayOfWeek string `json:\"airsDayOfWeek\"`\r\tAirsTime string `json:\"airsTime\"`\r\tRating string `json:\"rating\"`\r\tImdbId string `json:\"imdbId\"`\r\tZap2itId string `json:\"zap2itId\"`\r\tAdded string `json:\"added\"`\r\tSiteRating float32 `json:\"siteRating\"`\r\tSiteRatingCount int `json:\"siteRatingCount\"`\r}\r\rtype seriesInfoData struct {\r\tSeries Series `json:\"data\"`\r}\r\rfunc (client Client) Search(query SearchQuery) []Series {\r\tresult := searchData{}\r\tvalues := url.Values{}\r\r\tif query.Name != \"\" {\r\t\tvalues.Add(\"name\", query.Name)\r\t}\r\r\tif query.ImdbId != \"\" {\r\t\tvalues.Add(\"imdbId\", query.ImdbId)\r\t}\r\r\tif query.Zap2itId != \"\" {\r\t\tvalues.Add(\"zap2itId\", query.Zap2itId)\r\t}\r\r\turl := fmt.Sprintf(\"https:\/\/api.thetvdb.com\/search\/series?%s\", values.Encode())\r\r\treq, _ := http.NewRequest(\"GET\", url, nil)\r\r\treq.Header.Add(\"authorization\", \"Bearer \"+client.ApiToken)\r\tif query.AcceptLanguage != \"\" {\r\t\treq.Header.Add(\"Accept-Language\", query.AcceptLanguage)\r\t}\r\r\tres, _ := http.DefaultClient.Do(req)\r\tdefer res.Body.Close()\r\r\tbody, _ := ioutil.ReadAll(res.Body)\r\r\terr := json.Unmarshal(body, &result)\r\r\tif err != nil {\r\t\tlog.Fatal(err)\r\t\treturn result.Series\r\t}\r\r\tlog.Println(\"search completed successfully\")\r\tlog.Println(fmt.Sprintf(\"Total found: %v\", len(result.Series)))\r\r\treturn result.Series\r}\r\rfunc (client Client) GetSeriesInfo(series Series) Series {\r\treturn client.GetSeriesInfoById(series.Id)\r}\r\rfunc (client Client) GetSeriesInfoById(seriesId int) Series {\r\tresult := seriesInfoData{}\r\r\turl := fmt.Sprintf(\"https:\/\/api.thetvdb.com\/series\/%v\", seriesId)\r\r\treq, _ := http.NewRequest(\"GET\", url, nil)\r\r\treq.Header.Add(\"authorization\", \"Bearer \"+client.ApiToken)\r\r\tres, _ := http.DefaultClient.Do(req)\r\tdefer res.Body.Close()\r\r\tbody, _ := ioutil.ReadAll(res.Body)\r\r\terr := json.Unmarshal(body, &result)\r\r\tif err != nil {\r\t\tlog.Fatal(err)\r\t\treturn result.Series\r\t}\r\r\tlog.Println(\"get series info completed successfully\")\r\tlog.Println(fmt.Sprintf(\"Series: %s; ImdbId: %s; LastUpdated: %s; Zip2itid: %s; First Aired: %v\",\r\t\tresult.Series.SeriesName,\r\t\tresult.Series.ImdbId,\r\t\tresult.Series.LastUpdated,\r\t\tresult.Series.Zap2itId,\r\t\tresult.Series.FirstAired))\r\r\treturn result.Series\r}\r<|endoftext|>"} {"text":"<commit_before>package googledrive\n\nimport (\n\t\"io\/ioutil\"\n\t\"time\"\n\n\t\"golang.org\/x\/oauth2\"\n\t\"golang.org\/x\/oauth2\/google\"\n\t\"google.golang.org\/api\/drive\/v2\"\n)\n\nvar (\n\tconfig *oauth2.Config\n)\n\n\/\/ AuthURL() returns a URL to the Google OAuth2 login page\nfunc AuthURL() string {\n\treturn config.AuthCodeURL(\"google\", oauth2.AccessTypeOffline)\n}\n\n\/\/ Config() reads the information from the client_secret.json file and\n\/\/ parses it into the global config object, so the other functions can\n\/\/ access it.\nfunc Config(filepath string) error {\n\tsecret, err := ioutil.ReadFile(filepath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconfig, err = google.ConfigFromJSON(secret, drive.DriveScope)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Validate() validates an access code against the oauth2.config object. It\n\/\/ then returns the real token togehter with an expiry date.\nfunc Validate(code string) (string, time.Time, error) {\n\ttoken, err := config.Exchange(oauth2.NoContext, code)\n\treturn token.AccessToken, token.Expiry, err\n}\n<commit_msg>minor syntax change for golint<commit_after>package googledrive\n\nimport (\n\t\"io\/ioutil\"\n\t\"time\"\n\n\t\"golang.org\/x\/oauth2\"\n\t\"golang.org\/x\/oauth2\/google\"\n\t\"google.golang.org\/api\/drive\/v2\"\n)\n\nvar (\n\tconfig *oauth2.Config\n)\n\n\/\/ AuthURL returns a URL to the Google OAuth2 login page\nfunc AuthURL() string {\n\treturn config.AuthCodeURL(\"google\", oauth2.AccessTypeOffline)\n}\n\n\/\/ Config reads the information from the client_secret.json file and\n\/\/ parses it into the global config object, so the other functions can\n\/\/ access it.\nfunc Config(filepath string) error {\n\tsecret, err := ioutil.ReadFile(filepath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconfig, err = google.ConfigFromJSON(secret, drive.DriveScope)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ Validate validates an access code against the oauth2.config object. It\n\/\/ then returns the real token togehter with an expiry date.\nfunc Validate(code string) (string, time.Time, error) {\n\ttoken, err := config.Exchange(oauth2.NoContext, code)\n\treturn token.AccessToken, token.Expiry, err\n}\n<|endoftext|>"} {"text":"<commit_before>package gqt_test\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\n\t\"code.cloudfoundry.org\/garden\"\n\t\"code.cloudfoundry.org\/guardian\/gqt\/runner\"\n\t\"code.cloudfoundry.org\/guardian\/rundmc\/runrunc\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n)\n\nvar _ = Describe(\"Runtime Plugin\", func() {\n\tvar (\n\t\tclient *runner.RunningGarden\n\t)\n\n\tJustBeforeEach(func() {\n\t\tclient = runner.Start(config)\n\t})\n\n\tAfterEach(func() {\n\t\tExpect(client.DestroyAndStop()).To(Succeed())\n\t})\n\n\tContext(\"when a runtime plugin is provided\", func() {\n\t\tBeforeEach(func() {\n\t\t\tconfig.RuntimePluginBin = binaries.RuntimePlugin\n\t\t\tconfig.NetworkPluginBin = binaries.NetworkPlugin\n\t\t\tconfig.ImagePluginBin = binaries.NoopPlugin\n\t\t\tconfig.RuntimePluginExtraArgs = []string{\n\t\t\t\t`\"--image-store\"`, `some-image-store`,\n\t\t\t}\n\t\t})\n\n\t\tDescribe(\"creating a container\", func() {\n\t\t\tvar (\n\t\t\t\thandle string\n\t\t\t\targsFilepath string\n\t\t\t)\n\n\t\t\tJustBeforeEach(func() {\n\t\t\t\thandle = fmt.Sprintf(\"runtime-plugin-test-handle-%s\", config.Tag)\n\t\t\t})\n\n\t\t\t\/\/ The \"on Linux \/ on Windows\" Contexts here are only temporary.\n\t\t\t\/\/ Right now winc does not support \"winc run -d\" so we have to branch\n\t\t\t\/\/ the logic.\n\t\t\t\/\/ The winc team are aware and have a story to add support:\n\t\t\t\/\/ https:\/\/www.pivotaltracker.com\/n\/projects\/1156164\/stories\/153062983\n\t\t\tContext(\"on Linux\", func() {\n\t\t\t\tJustBeforeEach(func() {\n\t\t\t\t\tonlyOnLinux()\n\t\t\t\t\targsFilepath = filepath.Join(client.TmpDir, \"run-args\")\n\t\t\t\t})\n\n\t\t\t\tIt(\"executes the plugin, passing the correct args for create\", func() {\n\t\t\t\t\t_, err := client.Create(garden.ContainerSpec{Handle: handle})\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t\t\tExpect(readPluginArgs(argsFilepath)).To(ConsistOf(\n\t\t\t\t\t\tbinaries.RuntimePlugin,\n\t\t\t\t\t\t\"--debug\",\n\t\t\t\t\t\t\"--log\", HaveSuffix(filepath.Join(\"containers\", handle, \"create.log\")),\n\t\t\t\t\t\t\"--log-format\", \"json\",\n\t\t\t\t\t\t\"--image-store\", \"some-image-store\",\n\t\t\t\t\t\t\"run\",\n\t\t\t\t\t\t\"--detach\",\n\t\t\t\t\t\t\"--no-new-keyring\",\n\t\t\t\t\t\t\"--bundle\", HaveSuffix(filepath.Join(\"containers\", handle)),\n\t\t\t\t\t\t\"--pid-file\", HaveSuffix(filepath.Join(\"containers\", handle, \"pidfile\")),\n\t\t\t\t\t\thandle,\n\t\t\t\t\t))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"on Windows\", func() {\n\t\t\t\tJustBeforeEach(func() {\n\t\t\t\t\tonlyOnWindows()\n\t\t\t\t\targsFilepath = filepath.Join(client.TmpDir, \"create-args\")\n\t\t\t\t})\n\n\t\t\t\tIt(\"executes the plugin, passing the correct args for create\", func() {\n\t\t\t\t\t_, err := client.Create(garden.ContainerSpec{Handle: handle})\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t\t\tExpect(readPluginArgs(argsFilepath)).To(ConsistOf(\n\t\t\t\t\t\tbinaries.RuntimePlugin,\n\t\t\t\t\t\t\"--debug\",\n\t\t\t\t\t\t\"--log\", HaveSuffix(filepath.Join(\"containers\", handle, \"create.log\")),\n\t\t\t\t\t\t\"--log-format\", \"json\",\n\t\t\t\t\t\t\"--image-store\", \"some-image-store\",\n\t\t\t\t\t\t\"create\",\n\t\t\t\t\t\t\"--no-new-keyring\",\n\t\t\t\t\t\t\"--bundle\", HaveSuffix(filepath.Join(\"containers\", handle)),\n\t\t\t\t\t\t\"--pid-file\", HaveSuffix(filepath.Join(\"containers\", handle, \"pidfile\")),\n\t\t\t\t\t\thandle,\n\t\t\t\t\t))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when the network plugin returns configuration\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tpluginReturn := `{\n\t\t\t\t\t\"properties\":{\n\t\t\t\t\t\t\"foo\":\"bar\",\n\t\t\t\t\t\t\"kawasaki.mtu\":\"1499\",\n\t\t\t\t\t\t\"garden.network.container-ip\":\"10.255.10.10\",\n\t\t\t\t\t\t\"garden.network.host-ip\":\"255.255.255.255\"\n\t\t\t\t\t},\n\t\t\t\t\t\"dns_servers\": [\n\t\t\t\t\t\t\"1.2.3.4\",\n\t\t\t\t\t\t\"1.2.3.5\"\n\t\t\t\t\t]\n\t\t\t }`\n\t\t\t\t\tconfig.NetworkPluginExtraArgs = []string{\n\t\t\t\t\t\tos.DevNull,\n\t\t\t\t\t\tos.DevNull,\n\t\t\t\t\t\tpluginReturn,\n\t\t\t\t\t}\n\t\t\t\t})\n\n\t\t\t\tIt(\"succeeds\", func() {\n\t\t\t\t\t_, err := client.Create(garden.ContainerSpec{Handle: handle})\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when runtime plugin state returns a pid of zero for a created container\", func() {\n\t\t\t\tvar networkPluginArgsFile string\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tnetworkPluginArgsFile = filepath.Join(config.TmpDir, fmt.Sprintf(\"network-a-%d\", GinkgoParallelNode()))\n\t\t\t\t\tconfig.NetworkPluginExtraArgs = []string{networkPluginArgsFile, os.DevNull, \"unused\"}\n\t\t\t\t})\n\n\t\t\t\tAfterEach(func() {\n\t\t\t\t\tExpect(os.Remove(networkPluginArgsFile)).To(Succeed())\n\t\t\t\t})\n\n\t\t\t\tJustBeforeEach(func() {\n\t\t\t\t\tExpect(ioutil.WriteFile(\n\t\t\t\t\t\tfilepath.Join(client.TmpDir, \"runtime-state-output\"),\n\t\t\t\t\t\t[]byte(`{\"pid\": 0, \"status\": \"created\"}`),\n\t\t\t\t\t\t0600,\n\t\t\t\t\t)).To(Succeed())\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns an error without calling the network plugin\", func() {\n\t\t\t\t\t_, err := client.Create(garden.ContainerSpec{Handle: handle})\n\t\t\t\t\tExpect(err).To(HaveOccurred())\n\t\t\t\t\tExpect(readFile(networkPluginArgsFile)).NotTo(ContainSubstring(\"up\"))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"starting a process\", func() {\n\t\t\t\tvar (\n\t\t\t\t\truntimePluginExitCode int\n\t\t\t\t\tstdoutContents string\n\t\t\t\t\tstderrContents string\n\n\t\t\t\t\tprocess garden.Process\n\t\t\t\t\tstdoutWriter *gbytes.Buffer\n\t\t\t\t\tstderrWriter *gbytes.Buffer\n\t\t\t\t\trunErr error\n\t\t\t\t)\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\truntimePluginExitCode = 0\n\n\t\t\t\t\tstdoutWriter = gbytes.NewBuffer()\n\t\t\t\t\tstderrWriter = gbytes.NewBuffer()\n\t\t\t\t})\n\n\t\t\t\tJustBeforeEach(func() {\n\t\t\t\t\targsFilepath = filepath.Join(client.TmpDir, \"exec-args\")\n\n\t\t\t\t\tcontainer, err := client.Create(garden.ContainerSpec{Handle: handle})\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t\t\tprocess, runErr = container.Run(garden.ProcessSpec{\n\t\t\t\t\t\tPath: \"some-idiosyncratic-binary\",\n\t\t\t\t\t\tArgs: []string{fmt.Sprintf(\"%d\", runtimePluginExitCode), stdoutContents, stderrContents},\n\t\t\t\t\t}, garden.ProcessIO{\n\t\t\t\t\t\tStdout: io.MultiWriter(GinkgoWriter, stdoutWriter),\n\t\t\t\t\t\tStderr: io.MultiWriter(GinkgoWriter, stderrWriter),\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns no error\", func() {\n\t\t\t\t\tExpect(runErr).NotTo(HaveOccurred())\n\t\t\t\t})\n\n\t\t\t\tIt(\"executes the plugin, passing the correct args for exec\", func() {\n\t\t\t\t\tlogfileMatcher := MatchRegexp(\".*\")\n\t\t\t\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\t\t\tlogfileMatcher = HaveSuffix(\"exec.log\")\n\t\t\t\t\t}\n\n\t\t\t\t\tpluginArgs := []interface{}{\n\t\t\t\t\t\tbinaries.RuntimePlugin,\n\t\t\t\t\t\t\"--debug\",\n\t\t\t\t\t\t\"--log\", logfileMatcher,\n\t\t\t\t\t\t\"--log-format\", \"json\",\n\t\t\t\t\t\t\"exec\",\n\t\t\t\t\t\t\"-p\", MatchRegexp(\".*\"),\n\t\t\t\t\t\t\"--pid-file\", MatchRegexp(\".*\"),\n\t\t\t\t\t\thandle,\n\t\t\t\t\t}\n\t\t\t\t\tif runtime.GOOS != \"windows\" {\n\t\t\t\t\t\tpluginArgs = append(pluginArgs, \"-d\")\n\t\t\t\t\t}\n\t\t\t\t\tExpect(readPluginArgs(argsFilepath)).To(ConsistOf(pluginArgs))\n\n\t\t\t\t\t_, err := process.Wait() \/\/ ensure plugin has finished running before asserting on output(s)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t})\n\n\t\t\t\tIt(\"passes the spec serialised into a file\", func() {\n\t\t\t\t\tprocessSpecFilePath := filepath.Join(client.TmpDir, \"exec-process-spec\")\n\t\t\t\t\tEventually(processSpecFilePath).Should(BeAnExistingFile())\n\n\t\t\t\t\tvar processSpec runrunc.PreparedSpec\n\t\t\t\t\tprocessSpecContent, err := ioutil.ReadFile(processSpecFilePath)\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\t\tExpect(json.Unmarshal(processSpecContent, &processSpec)).To(Succeed())\n\n\t\t\t\t\tExpect(processSpec.Process.Args[0]).To(Equal(\"some-idiosyncratic-binary\"))\n\t\t\t\t})\n\n\t\t\t\tDescribe(\"runtime plugin stdio\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tstdoutContents = \"some stdout content\"\n\t\t\t\t\t\tstderrContents = \"some stderr content\"\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns the runtime plugin's stdout\", func() {\n\t\t\t\t\t\tEventually(stdoutWriter).Should(gbytes.Say(stdoutContents))\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns the runtime plugin's stderr\", func() {\n\t\t\t\t\t\tEventually(stderrWriter).Should(gbytes.Say(stderrContents))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when the runtime plugin exits with 32\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\truntimePluginExitCode = 32\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns an error because the runtime plugin exits non-zero on Linux\", func() {\n\t\t\t\t\t\tonlyOnLinux()\n\t\t\t\t\t\tExpect(runErr).To(MatchError(ContainSubstring(\"exit status 32\")))\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns no error on Windows\", func() {\n\t\t\t\t\t\tonlyOnWindows()\n\t\t\t\t\t\tExpect(runErr).NotTo(HaveOccurred())\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tDescribe(\"the returned process\", func() {\n\t\t\t\t\tDescribe(\"Wait\", func() {\n\t\t\t\t\t\tIt(\"returns the exit status of the runtime plugin\", func() {\n\t\t\t\t\t\t\tExpect(process.Wait()).To(Equal(0))\n\t\t\t\t\t\t})\n\n\t\t\t\t\t\tContext(\"when the runtime plugin exits with 42\", func() {\n\t\t\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\t\t\truntimePluginExitCode = 42\n\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\tIt(\"returns the exit status of the runtime plugin on Windows\", func() {\n\t\t\t\t\t\t\t\tonlyOnWindows()\n\t\t\t\t\t\t\t\tExpect(process.Wait()).To(Equal(42))\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t})\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"destroying a container\", func() {\n\t\t\t\tvar (\n\t\t\t\t\targsFilepath string\n\t\t\t\t)\n\n\t\t\t\tJustBeforeEach(func() {\n\t\t\t\t\thandle = fmt.Sprintf(\"runtime-plugin-test-handle-%s\", config.Tag)\n\t\t\t\t\targsFilepath = filepath.Join(client.TmpDir, \"delete-args\")\n\n\t\t\t\t\t_, err := client.Create(garden.ContainerSpec{Handle: handle})\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\t})\n\n\t\t\t\tIt(\"executes the plugin, passing the correct args for delete\", func() {\n\t\t\t\t\tExpect(client.Destroy(handle)).To(Succeed())\n\n\t\t\t\t\tExpect(readPluginArgs(argsFilepath)).To(ConsistOf(\n\t\t\t\t\t\tbinaries.RuntimePlugin,\n\t\t\t\t\t\t\"--debug\",\n\t\t\t\t\t\t\"--log\", MatchRegexp(\".*\"),\n\t\t\t\t\t\t\"--log-format\", \"json\",\n\t\t\t\t\t\t\"delete\",\n\t\t\t\t\t\thandle,\n\t\t\t\t\t))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n\nfunc readPluginArgs(argsFilePath string) []string {\n\tEventually(argsFilePath).Should(BeAnExistingFile())\n\tpluginArgsBytes, err := ioutil.ReadFile(argsFilePath)\n\tExpect(err).ToNot(HaveOccurred())\n\treturn strings.Split(string(pluginArgsBytes), \" \")\n}\n\nfunc onlyOnLinux() {\n\tonlyOn(\"linux\")\n}\n\nfunc onlyOnWindows() {\n\tonlyOn(\"windows\")\n}\n\nfunc onlyOn(goos string) {\n\tif runtime.GOOS != goos {\n\t\tSkip(goos + \" only\")\n\t}\n}\n<commit_msg>Add debug to flaky runtime test<commit_after>package gqt_test\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\n\t\"code.cloudfoundry.org\/garden\"\n\t\"code.cloudfoundry.org\/guardian\/gqt\/runner\"\n\t\"code.cloudfoundry.org\/guardian\/rundmc\/runrunc\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/gbytes\"\n)\n\nvar _ = Describe(\"Runtime Plugin\", func() {\n\tvar (\n\t\tclient *runner.RunningGarden\n\t)\n\n\tJustBeforeEach(func() {\n\t\tclient = runner.Start(config)\n\t})\n\n\tAfterEach(func() {\n\t\tExpect(client.DestroyAndStop()).To(Succeed())\n\t})\n\n\tContext(\"when a runtime plugin is provided\", func() {\n\t\tBeforeEach(func() {\n\t\t\tconfig.RuntimePluginBin = binaries.RuntimePlugin\n\t\t\tconfig.NetworkPluginBin = binaries.NetworkPlugin\n\t\t\tconfig.ImagePluginBin = binaries.NoopPlugin\n\t\t\tconfig.RuntimePluginExtraArgs = []string{\n\t\t\t\t`\"--image-store\"`, `some-image-store`,\n\t\t\t}\n\t\t})\n\n\t\tDescribe(\"creating a container\", func() {\n\t\t\tvar (\n\t\t\t\thandle string\n\t\t\t\targsFilepath string\n\t\t\t)\n\n\t\t\tJustBeforeEach(func() {\n\t\t\t\thandle = fmt.Sprintf(\"runtime-plugin-test-handle-%s\", config.Tag)\n\t\t\t})\n\n\t\t\t\/\/ The \"on Linux \/ on Windows\" Contexts here are only temporary.\n\t\t\t\/\/ Right now winc does not support \"winc run -d\" so we have to branch\n\t\t\t\/\/ the logic.\n\t\t\t\/\/ The winc team are aware and have a story to add support:\n\t\t\t\/\/ https:\/\/www.pivotaltracker.com\/n\/projects\/1156164\/stories\/153062983\n\t\t\tContext(\"on Linux\", func() {\n\t\t\t\tJustBeforeEach(func() {\n\t\t\t\t\tonlyOnLinux()\n\t\t\t\t\targsFilepath = filepath.Join(client.TmpDir, \"run-args\")\n\t\t\t\t})\n\n\t\t\t\tIt(\"executes the plugin, passing the correct args for create\", func() {\n\t\t\t\t\t_, err := client.Create(garden.ContainerSpec{Handle: handle})\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t\t\tExpect(readPluginArgs(argsFilepath)).To(ConsistOf(\n\t\t\t\t\t\tbinaries.RuntimePlugin,\n\t\t\t\t\t\t\"--debug\",\n\t\t\t\t\t\t\"--log\", HaveSuffix(filepath.Join(\"containers\", handle, \"create.log\")),\n\t\t\t\t\t\t\"--log-format\", \"json\",\n\t\t\t\t\t\t\"--image-store\", \"some-image-store\",\n\t\t\t\t\t\t\"run\",\n\t\t\t\t\t\t\"--detach\",\n\t\t\t\t\t\t\"--no-new-keyring\",\n\t\t\t\t\t\t\"--bundle\", HaveSuffix(filepath.Join(\"containers\", handle)),\n\t\t\t\t\t\t\"--pid-file\", HaveSuffix(filepath.Join(\"containers\", handle, \"pidfile\")),\n\t\t\t\t\t\thandle,\n\t\t\t\t\t))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"on Windows\", func() {\n\t\t\t\tJustBeforeEach(func() {\n\t\t\t\t\tonlyOnWindows()\n\t\t\t\t\targsFilepath = filepath.Join(client.TmpDir, \"create-args\")\n\t\t\t\t})\n\n\t\t\t\tIt(\"executes the plugin, passing the correct args for create\", func() {\n\t\t\t\t\t_, err := client.Create(garden.ContainerSpec{Handle: handle})\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t\t\tExpect(readPluginArgs(argsFilepath)).To(ConsistOf(\n\t\t\t\t\t\tbinaries.RuntimePlugin,\n\t\t\t\t\t\t\"--debug\",\n\t\t\t\t\t\t\"--log\", HaveSuffix(filepath.Join(\"containers\", handle, \"create.log\")),\n\t\t\t\t\t\t\"--log-format\", \"json\",\n\t\t\t\t\t\t\"--image-store\", \"some-image-store\",\n\t\t\t\t\t\t\"create\",\n\t\t\t\t\t\t\"--no-new-keyring\",\n\t\t\t\t\t\t\"--bundle\", HaveSuffix(filepath.Join(\"containers\", handle)),\n\t\t\t\t\t\t\"--pid-file\", HaveSuffix(filepath.Join(\"containers\", handle, \"pidfile\")),\n\t\t\t\t\t\thandle,\n\t\t\t\t\t))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when the network plugin returns configuration\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tpluginReturn := `{\n\t\t\t\t\t\"properties\":{\n\t\t\t\t\t\t\"foo\":\"bar\",\n\t\t\t\t\t\t\"kawasaki.mtu\":\"1499\",\n\t\t\t\t\t\t\"garden.network.container-ip\":\"10.255.10.10\",\n\t\t\t\t\t\t\"garden.network.host-ip\":\"255.255.255.255\"\n\t\t\t\t\t},\n\t\t\t\t\t\"dns_servers\": [\n\t\t\t\t\t\t\"1.2.3.4\",\n\t\t\t\t\t\t\"1.2.3.5\"\n\t\t\t\t\t]\n\t\t\t }`\n\t\t\t\t\tconfig.NetworkPluginExtraArgs = []string{\n\t\t\t\t\t\tos.DevNull,\n\t\t\t\t\t\tos.DevNull,\n\t\t\t\t\t\tpluginReturn,\n\t\t\t\t\t}\n\t\t\t\t})\n\n\t\t\t\tIt(\"succeeds\", func() {\n\t\t\t\t\t_, err := client.Create(garden.ContainerSpec{Handle: handle})\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when runtime plugin state returns a pid of zero for a created container\", func() {\n\t\t\t\tvar networkPluginArgsFile string\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tnetworkPluginArgsFile = filepath.Join(config.TmpDir, fmt.Sprintf(\"network-a-%d\", GinkgoParallelNode()))\n\t\t\t\t\tconfig.NetworkPluginExtraArgs = []string{networkPluginArgsFile, os.DevNull, \"unused\"}\n\t\t\t\t})\n\n\t\t\t\tAfterEach(func() {\n\t\t\t\t\tExpect(os.Remove(networkPluginArgsFile)).To(Succeed())\n\t\t\t\t})\n\n\t\t\t\tJustBeforeEach(func() {\n\t\t\t\t\tExpect(ioutil.WriteFile(\n\t\t\t\t\t\tfilepath.Join(client.TmpDir, \"runtime-state-output\"),\n\t\t\t\t\t\t[]byte(`{\"pid\": 0, \"status\": \"created\"}`),\n\t\t\t\t\t\t0600,\n\t\t\t\t\t)).To(Succeed())\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns an error without calling the network plugin\", func() {\n\t\t\t\t\t_, err := client.Create(garden.ContainerSpec{Handle: handle})\n\t\t\t\t\tExpect(err).To(HaveOccurred())\n\t\t\t\t\tExpect(readFile(networkPluginArgsFile)).NotTo(ContainSubstring(\"up\"))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"starting a process\", func() {\n\t\t\t\tvar (\n\t\t\t\t\truntimePluginExitCode int\n\t\t\t\t\tstdoutContents string\n\t\t\t\t\tstderrContents string\n\n\t\t\t\t\tprocess garden.Process\n\t\t\t\t\tstdoutWriter *gbytes.Buffer\n\t\t\t\t\tstderrWriter *gbytes.Buffer\n\t\t\t\t\trunErr error\n\t\t\t\t)\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\truntimePluginExitCode = 0\n\n\t\t\t\t\tstdoutWriter = gbytes.NewBuffer()\n\t\t\t\t\tstderrWriter = gbytes.NewBuffer()\n\t\t\t\t})\n\n\t\t\t\tJustBeforeEach(func() {\n\t\t\t\t\targsFilepath = filepath.Join(client.TmpDir, \"exec-args\")\n\n\t\t\t\t\tcontainer, err := client.Create(garden.ContainerSpec{Handle: handle})\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\t\t\t\tprocess, runErr = container.Run(garden.ProcessSpec{\n\t\t\t\t\t\tPath: \"some-idiosyncratic-binary\",\n\t\t\t\t\t\tArgs: []string{fmt.Sprintf(\"%d\", runtimePluginExitCode), stdoutContents, stderrContents},\n\t\t\t\t\t}, garden.ProcessIO{\n\t\t\t\t\t\tStdout: io.MultiWriter(GinkgoWriter, stdoutWriter),\n\t\t\t\t\t\tStderr: io.MultiWriter(GinkgoWriter, stderrWriter),\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns no error\", func() {\n\t\t\t\t\tExpect(runErr).NotTo(HaveOccurred())\n\t\t\t\t})\n\n\t\t\t\tIt(\"executes the plugin, passing the correct args for exec\", func() {\n\t\t\t\t\tlogfileMatcher := MatchRegexp(\".*\")\n\t\t\t\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\t\t\tlogfileMatcher = HaveSuffix(\"exec.log\")\n\t\t\t\t\t}\n\n\t\t\t\t\tpluginArgs := []interface{}{\n\t\t\t\t\t\tbinaries.RuntimePlugin,\n\t\t\t\t\t\t\"--debug\",\n\t\t\t\t\t\t\"--log\", logfileMatcher,\n\t\t\t\t\t\t\"--log-format\", \"json\",\n\t\t\t\t\t\t\"exec\",\n\t\t\t\t\t\t\"-p\", MatchRegexp(\".*\"),\n\t\t\t\t\t\t\"--pid-file\", MatchRegexp(\".*\"),\n\t\t\t\t\t\thandle,\n\t\t\t\t\t}\n\t\t\t\t\tif runtime.GOOS != \"windows\" {\n\t\t\t\t\t\tpluginArgs = append(pluginArgs, \"-d\")\n\t\t\t\t\t}\n\t\t\t\t\tExpect(readPluginArgs(argsFilepath)).To(ConsistOf(pluginArgs))\n\n\t\t\t\t\t_, err := process.Wait() \/\/ ensure plugin has finished running before asserting on output(s)\n\t\t\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\t\t})\n\n\t\t\t\tIt(\"passes the spec serialised into a file\", func() {\n\t\t\t\t\tprocessSpecFilePath := filepath.Join(client.TmpDir, \"exec-process-spec\")\n\t\t\t\t\tEventually(processSpecFilePath).Should(BeAnExistingFile())\n\n\t\t\t\t\tvar processSpec runrunc.PreparedSpec\n\t\t\t\t\tprocessSpecContent, err := ioutil.ReadFile(processSpecFilePath)\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\t\tExpect(json.Unmarshal(processSpecContent, &processSpec)).To(Succeed(), fmt.Sprintf(\"Process Spec: %s\\n\", string(processSpecContent)))\n\n\t\t\t\t\tExpect(processSpec.Process.Args[0]).To(Equal(\"some-idiosyncratic-binary\"))\n\t\t\t\t})\n\n\t\t\t\tDescribe(\"runtime plugin stdio\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tstdoutContents = \"some stdout content\"\n\t\t\t\t\t\tstderrContents = \"some stderr content\"\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns the runtime plugin's stdout\", func() {\n\t\t\t\t\t\tEventually(stdoutWriter).Should(gbytes.Say(stdoutContents))\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns the runtime plugin's stderr\", func() {\n\t\t\t\t\t\tEventually(stderrWriter).Should(gbytes.Say(stderrContents))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when the runtime plugin exits with 32\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\truntimePluginExitCode = 32\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns an error because the runtime plugin exits non-zero on Linux\", func() {\n\t\t\t\t\t\tonlyOnLinux()\n\t\t\t\t\t\tExpect(runErr).To(MatchError(ContainSubstring(\"exit status 32\")))\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns no error on Windows\", func() {\n\t\t\t\t\t\tonlyOnWindows()\n\t\t\t\t\t\tExpect(runErr).NotTo(HaveOccurred())\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tDescribe(\"the returned process\", func() {\n\t\t\t\t\tDescribe(\"Wait\", func() {\n\t\t\t\t\t\tIt(\"returns the exit status of the runtime plugin\", func() {\n\t\t\t\t\t\t\tExpect(process.Wait()).To(Equal(0))\n\t\t\t\t\t\t})\n\n\t\t\t\t\t\tContext(\"when the runtime plugin exits with 42\", func() {\n\t\t\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\t\t\truntimePluginExitCode = 42\n\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\tIt(\"returns the exit status of the runtime plugin on Windows\", func() {\n\t\t\t\t\t\t\t\tonlyOnWindows()\n\t\t\t\t\t\t\t\tExpect(process.Wait()).To(Equal(42))\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t})\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"destroying a container\", func() {\n\t\t\t\tvar (\n\t\t\t\t\targsFilepath string\n\t\t\t\t)\n\n\t\t\t\tJustBeforeEach(func() {\n\t\t\t\t\thandle = fmt.Sprintf(\"runtime-plugin-test-handle-%s\", config.Tag)\n\t\t\t\t\targsFilepath = filepath.Join(client.TmpDir, \"delete-args\")\n\n\t\t\t\t\t_, err := client.Create(garden.ContainerSpec{Handle: handle})\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\t})\n\n\t\t\t\tIt(\"executes the plugin, passing the correct args for delete\", func() {\n\t\t\t\t\tExpect(client.Destroy(handle)).To(Succeed())\n\n\t\t\t\t\tExpect(readPluginArgs(argsFilepath)).To(ConsistOf(\n\t\t\t\t\t\tbinaries.RuntimePlugin,\n\t\t\t\t\t\t\"--debug\",\n\t\t\t\t\t\t\"--log\", MatchRegexp(\".*\"),\n\t\t\t\t\t\t\"--log-format\", \"json\",\n\t\t\t\t\t\t\"delete\",\n\t\t\t\t\t\thandle,\n\t\t\t\t\t))\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n\nfunc readPluginArgs(argsFilePath string) []string {\n\tEventually(argsFilePath).Should(BeAnExistingFile())\n\tpluginArgsBytes, err := ioutil.ReadFile(argsFilePath)\n\tExpect(err).ToNot(HaveOccurred())\n\treturn strings.Split(string(pluginArgsBytes), \" \")\n}\n\nfunc onlyOnLinux() {\n\tonlyOn(\"linux\")\n}\n\nfunc onlyOnWindows() {\n\tonlyOn(\"windows\")\n}\n\nfunc onlyOn(goos string) {\n\tif runtime.GOOS != goos {\n\t\tSkip(goos + \" only\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"reflect\"\n\t\"testing\"\n)\n\ntype testUnmarshalCase struct {\n\tok bool\n\tin []byte\n\twant interface{}\n}\n\nfunc testUnmarshalCases() map[string]testUnmarshalCase {\n\tc := map[string]testUnmarshalCase{\n\t\t\"ok\": {\n\t\t\tok: true,\n\t\t\tin: []byte(`{\"i\":123, \"f\":123.456, \"b\":true,\"s\":\"abc\"}`),\n\t\t\twant: T{I: int64(123), F: float64(123.456), B: true, S: []byte{'a', 'b', 'c'}},\n\t\t},\n\t}\n\treturn c\n}\n\nfunc TestUnmarshal(t *testing.T) {\n\tfor name, tc := range testUnmarshalCases() {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\ttestUnmarshal(t, tc)\n\t\t})\n\t}\n}\n\nfunc testUnmarshal(t *testing.T, tc testUnmarshalCase) {\n\tvar got T\n\terr := json.Unmarshal(tc.in, &got)\n\tif !tc.ok {\n\t\tif !reflect.DeepEqual(got, tc.want) || err == nil {\n\t\t\tt.Errorf(\"got %+v, %v; want %+v, <error>\", got, err, tc.want)\n\t\t}\n\t\treturn\n\t}\n\tif !reflect.DeepEqual(got, tc.want) || err != nil {\n\t\tt.Errorf(\"got %+v, %+v; want %+v, <nil>\", got, err, tc.want)\n\t}\n}\n<commit_msg>ff: fix tests<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"reflect\"\n\t\"testing\"\n)\n\ntype testUnmarshalCase struct {\n\tok bool\n\tin []byte\n\twant interface{}\n}\n\nfunc testUnmarshalCases() map[string]testUnmarshalCase {\n\tc := map[string]testUnmarshalCase{\n\t\t\"ok\": {\n\t\t\tok: true,\n\t\t\tin: []byte(`{\"i\":123, \"f\":123.456, \"b\":true,\"s\":\"abc\"}`),\n\t\t\twant: One{I: int64(123), F: float64(123.456), B: true, S: \"abc\"},\n\t\t},\n\t}\n\treturn c\n}\n\nfunc TestUnmarshal(t *testing.T) {\n\tfor name, tc := range testUnmarshalCases() {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\ttestUnmarshal(t, tc)\n\t\t})\n\t}\n}\n\nfunc testUnmarshal(t *testing.T, tc testUnmarshalCase) {\n\tvar got One\n\terr := json.Unmarshal(tc.in, &got)\n\tif !tc.ok {\n\t\tif !reflect.DeepEqual(got, tc.want) || err == nil {\n\t\t\tt.Errorf(\"got %+v, %v; want %+v, <error>\", got, err, tc.want)\n\t\t}\n\t\treturn\n\t}\n\tif !reflect.DeepEqual(got, tc.want) || err != nil {\n\t\tt.Errorf(\"got %+v, %+v; want %+v, <nil>\", got, err, tc.want)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package fdcount\n\nimport (\n\t\"net\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/getlantern\/testify\/assert\"\n)\n\nfunc TestTCP(t *testing.T) {\n\t\/\/ Lower maxAssertAttempts to keep this test from running too long\n\tmaxAssertAttempts = 2\n\n\tl0, err := net.Listen(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer l0.Close()\n\n\tstart, fdc, err := Matching(\"TCP\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tassert.Equal(t, 1, start, \"Starting count should have been 1\")\n\n\terr = fdc.AssertDelta(0)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tassert.NoError(t, err, \"Initial TCP count should be 0\")\n\n\tl, err := net.Listen(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer l.Close()\n\t_, middle, err := Matching(\"TCP\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = fdc.AssertDelta(0)\n\tif assert.Error(t, err, \"Asserting wrong count should fail\") {\n\t\tassert.Contains(t, err.Error(), \"Expected 0, have 1\")\n\t\tassert.True(t, len(err.Error()) > 100)\n\t}\n\terr = fdc.AssertDelta(1)\n\tassert.NoError(t, err, \"Ending TCP count should be 1\")\n\n\terr = fdc.AssertDelta(0)\n\tif assert.Error(t, err, \"Asserting wrong count should fail\") {\n\t\tassert.Contains(t, err.Error(), \"Expected 0, have 1\")\n\t\tassert.Contains(t, err.Error(), \"New\")\n\t\tassert.True(t, len(err.Error()) > 100)\n\t}\n\n\tl.Close()\n\terr = middle.AssertDelta(0)\n\tif assert.Error(t, err, \"Asserting wrong count should fail\") {\n\t\tassert.Contains(t, err.Error(), \"Expected 0, have -1\")\n\t\tassert.Contains(t, err.Error(), \"Removed\")\n\t\tassert.True(t, len(err.Error()) > 100)\n\t}\n}\n\nfunc TestWaitUntilNoneMatchOK(t *testing.T) {\n\tconn, err := net.Dial(\"tcp\", \"www.google.com:80\")\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to dial google: %v\", err)\n\t}\n\tdefer conn.Close()\n\n\twait := 250 * time.Millisecond\n\tstart := time.Now()\n\tgo func() {\n\t\ttime.Sleep(wait)\n\t\tconn.Close()\n\t}()\n\n\terr = WaitUntilNoneMatch(\"TCP\", wait*2)\n\telapsed := time.Now().Sub(start)\n\tassert.NoError(t, err, \"Waiting should have succeeded\")\n\tassert.True(t, elapsed >= wait, \"Should have waited a while\")\n}\n\nfunc TestWaitUntilNoneMatchTimeout(t *testing.T) {\n\tconn, err := net.Dial(\"tcp\", \"www.google.com:80\")\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to dial google: %v\", err)\n\t}\n\tdefer conn.Close()\n\n\twait := 250 * time.Millisecond\n\tstart := time.Now()\n\tgo func() {\n\t\ttime.Sleep(wait)\n\t\tconn.Close()\n\t}()\n\n\terr = WaitUntilNoneMatch(\"TCP\", wait\/2)\n\telapsed := time.Now().Sub(start)\n\tassert.Error(t, err, \"Waiting should have failed\")\n\tassert.True(t, elapsed < wait, \"Should have waited less than time to close conn\")\n}\n<commit_msg>double wait time in TestWaitUntilNoneMatchTimeout<commit_after>package fdcount\n\nimport (\n\t\"net\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/getlantern\/testify\/assert\"\n)\n\nfunc TestTCP(t *testing.T) {\n\t\/\/ Lower maxAssertAttempts to keep this test from running too long\n\tmaxAssertAttempts = 2\n\n\tl0, err := net.Listen(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer l0.Close()\n\n\tstart, fdc, err := Matching(\"TCP\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tassert.Equal(t, 1, start, \"Starting count should have been 1\")\n\n\terr = fdc.AssertDelta(0)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tassert.NoError(t, err, \"Initial TCP count should be 0\")\n\n\tl, err := net.Listen(\"tcp\", \"localhost:0\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer l.Close()\n\t_, middle, err := Matching(\"TCP\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\terr = fdc.AssertDelta(0)\n\tif assert.Error(t, err, \"Asserting wrong count should fail\") {\n\t\tassert.Contains(t, err.Error(), \"Expected 0, have 1\")\n\t\tassert.True(t, len(err.Error()) > 100)\n\t}\n\terr = fdc.AssertDelta(1)\n\tassert.NoError(t, err, \"Ending TCP count should be 1\")\n\n\terr = fdc.AssertDelta(0)\n\tif assert.Error(t, err, \"Asserting wrong count should fail\") {\n\t\tassert.Contains(t, err.Error(), \"Expected 0, have 1\")\n\t\tassert.Contains(t, err.Error(), \"New\")\n\t\tassert.True(t, len(err.Error()) > 100)\n\t}\n\n\tl.Close()\n\terr = middle.AssertDelta(0)\n\tif assert.Error(t, err, \"Asserting wrong count should fail\") {\n\t\tassert.Contains(t, err.Error(), \"Expected 0, have -1\")\n\t\tassert.Contains(t, err.Error(), \"Removed\")\n\t\tassert.True(t, len(err.Error()) > 100)\n\t}\n}\n\nfunc TestWaitUntilNoneMatchOK(t *testing.T) {\n\tconn, err := net.Dial(\"tcp\", \"www.google.com:80\")\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to dial google: %v\", err)\n\t}\n\tdefer conn.Close()\n\n\twait := 250 * time.Millisecond\n\tstart := time.Now()\n\tgo func() {\n\t\ttime.Sleep(wait)\n\t\tconn.Close()\n\t}()\n\n\terr = WaitUntilNoneMatch(\"TCP\", wait*2)\n\telapsed := time.Now().Sub(start)\n\tassert.NoError(t, err, \"Waiting should have succeeded\")\n\tassert.True(t, elapsed >= wait, \"Should have waited a while\")\n}\n\nfunc TestWaitUntilNoneMatchTimeout(t *testing.T) {\n\tconn, err := net.Dial(\"tcp\", \"www.google.com:80\")\n\tif err != nil {\n\t\tt.Fatalf(\"Unable to dial google: %v\", err)\n\t}\n\tdefer conn.Close()\n\n\twait := 500 * time.Millisecond\n\tstart := time.Now()\n\tgo func() {\n\t\ttime.Sleep(wait)\n\t\tconn.Close()\n\t}()\n\n\terr = WaitUntilNoneMatch(\"TCP\", wait\/2)\n\telapsed := time.Now().Sub(start)\n\tassert.Error(t, err, \"Waiting should have failed\")\n\tassert.True(t, elapsed < wait, \"Should have waited less than time to close conn\")\n}\n<|endoftext|>"} {"text":"<commit_before>package receiver\n\nimport (\n\t\"github.com\/tgermain\/grandRepositorySky\/communicator\"\n\t\"github.com\/tgermain\/grandRepositorySky\/communicator\/sender\"\n\t\"github.com\/tgermain\/grandRepositorySky\/node\"\n\t\"github.com\/tgermain\/grandRepositorySky\/shared\"\n\t\"net\"\n\t\"runtime\"\n\t\/\/ \"time\"\n)\n\n\/\/Objects parts ---------------------------------------------------------\n\ntype ReceiverLink struct {\n\tnode *node.DHTnode\n\tsender *sender.SenderLink\n}\n\n\/\/Private methods -------------------------------------------------------\nfunc (r *ReceiverLink) handleRequest(payload []byte) {\n\t\/\/unmarshall message\n\tmsg := communicator.UnmarshallMessage(payload)\n\t\/\/switch depending of message type\n\tshared.Logger.Debug(\"Handle Request receive something : %#v\", msg)\n\tswitch {\n\tcase msg.TypeOfMsg == communicator.LOOKUP:\n\t\t{\n\t\t\tr.receiveLookup(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.LOOKUPRESPONSE:\n\t\t{\n\t\t\tr.receiveLookupResponse(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.JOINRING:\n\t\t{\n\t\t\tr.receiveJoinRing(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.UPDATESUCCESSOR:\n\t\t{\n\t\t\tr.receiveUpdateSuccessor(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.UPDATEPREDECESSOR:\n\t\t{\n\t\t\tr.receiveUpdatePredecessor(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.PRINTRING:\n\t\t{\n\t\t\tr.receivePrintRing(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.UPDATEFINGERTABLE:\n\t\t{\n\t\t\tr.receiveUpdateFingerTable(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.AREYOUALIVE:\n\t\t{\n\t\t\tr.receiveHeartBeat(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.IAMALIVE:\n\t\t{\n\t\t\tr.receiveHeartBeatResponse(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.GETSUCCESORE:\n\t\t{\n\t\t\tr.receiveGetSuccesor(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.GETSUCCESORERESPONSE:\n\t\t{\n\t\t\tr.receiveGetSuccesorResponse(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.GETDATA:\n\t\t{\n\t\t\tr.receiveGetData(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.GETDATARESPONSE:\n\t\t{\n\t\t\tr.receiveGetDataResponse(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.DELETEDATA:\n\t\t{\n\t\t\tr.receiveDeleteData(&msg)\n\t\t}\n\tdefault:\n\t\t{\n\t\t\t\/\/rejected mesage\n\t\t\tshared.Logger.Error(\"Message rejected\")\n\t\t}\n\t}\n\t\/\/ multiple launch a go routine\n}\n\n\/\/========RECEIVE\nfunc (r *ReceiverLink) receiveUpdatePredecessor(msg *communicator.Message) {\n\tif checkRequiredParams(msg.Parameters, \"newNodeID\", \"newNodeIp\", \"newNodePort\") {\n\t\tnewNodeID, _ := msg.Parameters[\"newNodeID\"]\n\t\tnewNodeIp, _ := msg.Parameters[\"newNodeIp\"]\n\t\tnewNodePort, _ := msg.Parameters[\"newNodePort\"]\n\t\tshared.Logger.Info(\"Receive an update Predecessor to %s\", newNodeID)\n\n\t\tr.node.UpdatePredecessor(&shared.DistantNode{\n\t\t\tnewNodeID,\n\t\t\tnewNodeIp,\n\t\t\tnewNodePort,\n\t\t})\n\n\t}\n}\n\nfunc (r *ReceiverLink) receiveUpdateSuccessor(msg *communicator.Message) {\n\tif checkRequiredParams(msg.Parameters, \"newNodeID\", \"newNodeIp\", \"newNodePort\") {\n\t\tnewNodeID, _ := msg.Parameters[\"newNodeID\"]\n\t\tnewNodeIp, _ := msg.Parameters[\"newNodeIp\"]\n\t\tnewNodePort, _ := msg.Parameters[\"newNodePort\"]\n\t\tshared.Logger.Info(\"Receive an update Successor %s\", newNodeID)\n\n\t\tr.node.UpdateSuccessor(&shared.DistantNode{\n\t\t\tnewNodeID,\n\t\t\tnewNodeIp,\n\t\t\tnewNodePort,\n\t\t})\n\n\t}\n}\n\nfunc (r *ReceiverLink) receivePrintRing(msg *communicator.Message) {\n\t\/\/write your info and if the successor is the origine of the communicator.Message, send it back to him\n\tif checkRequiredParams(msg.Parameters, \"currentString\") {\n\t\tcurrentString, _ := msg.Parameters[\"currentString\"]\n\n\t\tshared.Logger.Info(\"Receiving a print ring request from %s\", msg.Origin.Id)\n\t\tif shared.LocalId == msg.Origin.Id {\n\t\t\tshared.Logger.Info(\"And %s is me !\", msg.Origin.Id)\n\t\t\t\/\/I launch this request know print the result\n\t\t\tshared.Logger.Info(\"The ring is like :\\n%s\", currentString)\n\t\t} else {\n\t\t\t\/\/pass the request around\n\t\t\tr.node.PrintNodeName(¤tString)\n\t\t\tmsg.Parameters[\"currentString\"] = currentString\n\t\t\tgo r.sender.RelayPrintRing(r.node.GetSuccesor(), msg)\n\t\t}\n\t}\n}\n\nfunc (r *ReceiverLink) receiveJoinRing(msg *communicator.Message) {\n\tshared.Logger.Info(\"Receiving join ring message from %s\", msg.Origin)\n\tgo r.node.AddToRing(&msg.Origin)\n}\n\nfunc (r *ReceiverLink) receiveLookup(msg *communicator.Message) {\n\t\/\/check if the parameter are correct\n\tif checkRequiredParams(msg.Parameters, \"idAnswer\", \"idSearched\") {\n\t\tidSearched, _ := msg.Parameters[\"idSearched\"]\n\t\tidAnswer, _ := msg.Parameters[\"idAnswer\"]\n\t\tshared.Logger.Info(\"Receive a lookup for : %s\", idSearched)\n\n\t\t\/\/Am I responsible for the key requested ?\n\t\tif r.node.IsResponsible(idSearched) {\n\t\t\tshared.Logger.Info(\"I'm responsible !\")\n\t\t\tgo r.sender.SendLookupResponse(&msg.Origin, idAnswer, idSearched)\n\t\t} else {\n\t\t\t\/\/no -> sending the request to the closest node\n\t\t\tshared.Logger.Info(\"relay the lookup\")\n\t\t\tgo r.sender.RelayLookup(r.node.FindClosestNode(idSearched), msg)\n\t\t}\n\n\t}\n\n}\n\nfunc (r *ReceiverLink) receiveLookupResponse(msg *communicator.Message) {\n\t\/\/heck if everything required is here\n\tif checkRequiredParams(msg.Parameters, \"idAnswer\", \"idSearched\") {\n\t\tidSearched, _ := msg.Parameters[\"idSearched\"]\n\t\tidAnswer, _ := msg.Parameters[\"idAnswer\"]\n\n\t\tshared.Logger.Info(\"Receive a lookup response for : %s\", idSearched)\n\n\t\tchanResp, ok2 := communicator.PendingLookups[idAnswer]\n\t\tif ok2 {\n\t\t\tchanResp <- msg.Origin\n\t\t}\n\t}\n}\n\nfunc (r *ReceiverLink) receiveUpdateFingerTable(msg *communicator.Message) {\n\tshared.Logger.Info(\"Receiving update finger table from %s\", msg.Origin.Id)\n\tr.node.UpdateFingerTable()\n}\n\nfunc (r *ReceiverLink) receiveHeartBeat(msg *communicator.Message) {\n\tif checkRequiredParams(msg.Parameters, \"idAnswer\") {\n\t\tshared.Logger.Info(\"Receiving a heartBeat from %s\", msg.Origin.Id)\n\t\tidAnswer, _ := msg.Parameters[\"idAnswer\"]\n\n\t\tgo r.sender.SendHeartBeatResponse(&msg.Origin, idAnswer)\n\t}\n}\n\nfunc (r *ReceiverLink) receiveHeartBeatResponse(msg *communicator.Message) {\n\tif checkRequiredParams(msg.Parameters, \"idAnswer\") {\n\t\tidAnswer, _ := msg.Parameters[\"idAnswer\"]\n\t\tshared.Logger.Info(\"Receiving a heartBeat response from %s for %s\", msg.Origin.Id, idAnswer)\n\n\t\tchanResp, ok2 := communicator.PendingHearBeat[idAnswer]\n\t\tif ok2 {\n\t\t\tchanResp <- msg.Origin\n\t\t}\n\t}\n}\n\nfunc (r *ReceiverLink) receiveGetSuccesor(msg *communicator.Message) {\n\tif checkRequiredParams(msg.Parameters, \"idAnswer\") {\n\t\tshared.Logger.Info(\"Receiving a get successor from %s\", msg.Origin.Id)\n\t\tidAnswer, _ := msg.Parameters[\"idAnswer\"]\n\n\t\tgo r.sender.SendGetSuccResponse(&msg.Origin, idAnswer, r.node.GetSuccesor())\n\t}\n}\n\nfunc (r *ReceiverLink) receiveGetSuccesorResponse(msg *communicator.Message) {\n\tif checkRequiredParams(msg.Parameters, \"idAnswer\", \"succSuccID\",\n\t\t\"succSuccIp\",\n\t\t\"succSuccPort\") {\n\t\tidAnswer, _ := msg.Parameters[\"idAnswer\"]\n\t\tsuccSuccID, _ := msg.Parameters[\"succSuccID\"]\n\t\tsuccSuccIp, _ := msg.Parameters[\"succSuccIp\"]\n\t\tsuccSuccPort, _ := msg.Parameters[\"succSuccPort\"]\n\n\t\tshared.Logger.Info(\"Receiving a GetSuccesor response from %s for %s\", msg.Origin.Id, idAnswer)\n\n\t\tsuccSucc := shared.DistantNode{\n\t\t\tsuccSuccID,\n\t\t\tsuccSuccIp,\n\t\t\tsuccSuccPort,\n\t\t}\n\n\t\tchanResp, ok2 := communicator.PendingGetSucc[idAnswer]\n\t\tif ok2 {\n\t\t\tchanResp <- succSucc\n\t\t}\n\t}\n}\n\nfunc (r *ReceiverLink) receiveGetData(msg *communicator.Message) {\n\tif checkRequiredParams(msg.Parameters, \"keySearched\", \"idAnswer\") {\n\t\tshared.Logger.Info(\"Receiving a get data from %s\", msg.Origin.Id)\n\t\tkeySearched, _ := msg.Parameters[\"keySearched\"]\n\t\tidAnswer, _ := msg.Parameters[\"idAnswer\"]\n\t\t_, forced := msg.Parameters[\"forced\"]\n\n\t\tvar result string\n\t\tif forced {\n\t\t\tresult = r.node.GetLocalData(keySearched)\n\t\t} else {\n\t\t\tresult = r.node.GetData(keySearched)\n\t\t}\n\t\tr.sender.SendGetDataResponse(&msg.Origin, idAnswer, result)\n\t}\n}\n\nfunc (r *ReceiverLink) receiveGetDataResponse(msg *communicator.Message) {\n\tif checkRequiredParams(msg.Parameters, \"idAnswer\", \"value\") {\n\t\tidAnswer, _ := msg.Parameters[\"idAnswer\"]\n\t\tvalue, _ := msg.Parameters[\"value\"]\n\n\t\tshared.Logger.Info(\"Receiving a get data response from %s for %s\", msg.Origin.Id, idAnswer)\n\n\t\tchanResp, ok2 := communicator.PendingGetData[idAnswer]\n\t\tif ok2 {\n\t\t\tchanResp <- value\n\t\t}\n\t}\n}\n\nfunc (r *ReceiverLink) receiveSetData(msg *communicator.Message) {\n\tif checkRequiredParams(msg.Parameters, \"key\", \"value\") {\n\t\tkey, _ := msg.Parameters[\"key\"]\n\t\tvalue, _ := msg.Parameters[\"value\"]\n\t\ttag, forced := msg.Parameters[\"forced\"]\n\t\tshared.Logger.Info(\"Receiving a set data from %s with data %s\", msg.Origin.Id, value)\n\n\t\tif forced {\n\t\t\tr.node.SetLocalData(key, value, tag)\n\t\t} else {\n\t\t\tr.node.SetData(key, value)\n\t\t}\n\t}\n}\n\nfunc (r *ReceiverLink) receiveDeleteData(msg *communicator.Message) {\n\tif checkRequiredParams(msg.Parameters, \"key\") {\n\t\tkey, _ := msg.Parameters[\"key\"]\n\t\tshared.Logger.Info(\"Receiving a delete data from %s for key %s\", msg.Origin.Id, key)\n\n\t\tr.node.DeleteLocalData(key)\n\t}\n}\n\nfunc checkRequiredParams(params map[string]string, p ...string) bool {\n\tfor _, v := range p {\n\t\t_, ok := params[v]\n\t\tif !ok {\n\t\t\tshared.Logger.Error(\"missing parameter %s\", v)\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/Exported methods ------------------------------------------------------\n\nfunc (r *ReceiverLink) StartAndListen() {\n\n\t\/\/launch a go routine and start to listen on local address\n\t\/\/handle incoming communicator.Message\n\n\taddr, err := net.ResolveUDPAddr(\"udp\", (shared.LocalIp + \":\" + shared.LocalPort))\n\tif err != nil {\n\t\tshared.Logger.Critical(\"error when resolving udp address:\", err)\n\t\tpanic(err)\n\t}\n\tconn, err := net.ListenUDP(\"udp\", addr)\n\tif err != nil {\n\t\tshared.Logger.Critical(\"error when connecting to udp:\", err)\n\t\tpanic(err)\n\t}\n\t\/\/ defer conn.Close()\n\tgo func() {\n\t\tshared.Logger.Info(\"Receiver starting to listen on address [%s]\", addr)\n\t\tfor {\n\t\t\t\/\/multiple goroutine ! work !\n\t\t\tbuffer := make([]byte, 1024)\n\t\t\tbytesReads, err := conn.Read(buffer)\n\t\t\tif err != nil {\n\t\t\t\tshared.Logger.Critical(\"error while reading:\", err)\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tpayload := buffer[0:bytesReads]\n\t\t\tgo r.handleRequest(payload)\n\t\t\t\/\/ time.Sleep(time.Millisecond * 10)\n\t\t\truntime.Gosched()\n\t\t}\n\t}()\n\n}\n\nfunc MakeReceiver(n *node.DHTnode, s *sender.SenderLink) *ReceiverLink {\n\treturn &ReceiverLink{\n\t\tn,\n\t\ts,\n\t}\n}\n<commit_msg>forgeting to handle a type of message<commit_after>package receiver\n\nimport (\n\t\"github.com\/tgermain\/grandRepositorySky\/communicator\"\n\t\"github.com\/tgermain\/grandRepositorySky\/communicator\/sender\"\n\t\"github.com\/tgermain\/grandRepositorySky\/node\"\n\t\"github.com\/tgermain\/grandRepositorySky\/shared\"\n\t\"net\"\n\t\"runtime\"\n\t\/\/ \"time\"\n)\n\n\/\/Objects parts ---------------------------------------------------------\n\ntype ReceiverLink struct {\n\tnode *node.DHTnode\n\tsender *sender.SenderLink\n}\n\n\/\/Private methods -------------------------------------------------------\nfunc (r *ReceiverLink) handleRequest(payload []byte) {\n\t\/\/unmarshall message\n\tmsg := communicator.UnmarshallMessage(payload)\n\t\/\/switch depending of message type\n\tshared.Logger.Debug(\"Handle Request receive something : %#v\", msg)\n\tswitch {\n\tcase msg.TypeOfMsg == communicator.LOOKUP:\n\t\t{\n\t\t\tr.receiveLookup(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.LOOKUPRESPONSE:\n\t\t{\n\t\t\tr.receiveLookupResponse(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.JOINRING:\n\t\t{\n\t\t\tr.receiveJoinRing(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.UPDATESUCCESSOR:\n\t\t{\n\t\t\tr.receiveUpdateSuccessor(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.UPDATEPREDECESSOR:\n\t\t{\n\t\t\tr.receiveUpdatePredecessor(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.PRINTRING:\n\t\t{\n\t\t\tr.receivePrintRing(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.UPDATEFINGERTABLE:\n\t\t{\n\t\t\tr.receiveUpdateFingerTable(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.AREYOUALIVE:\n\t\t{\n\t\t\tr.receiveHeartBeat(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.IAMALIVE:\n\t\t{\n\t\t\tr.receiveHeartBeatResponse(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.GETSUCCESORE:\n\t\t{\n\t\t\tr.receiveGetSuccesor(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.GETSUCCESORERESPONSE:\n\t\t{\n\t\t\tr.receiveGetSuccesorResponse(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.GETDATA:\n\t\t{\n\t\t\tr.receiveGetData(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.GETDATARESPONSE:\n\t\t{\n\t\t\tr.receiveGetDataResponse(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.DELETEDATA:\n\t\t{\n\t\t\tr.receiveDeleteData(&msg)\n\t\t}\n\tcase msg.TypeOfMsg == communicator.SETDATA:\n\t\t{\n\t\t\tr.receiveSetData(&msg)\n\t\t}\n\tdefault:\n\t\t{\n\t\t\t\/\/rejected mesage\n\t\t\tshared.Logger.Error(\"Message rejected\")\n\t\t\tshared.Logger.Error(\"Handle Request receive something : %#v\", msg)\n\t\t}\n\t}\n\t\/\/ multiple launch a go routine\n}\n\n\/\/========RECEIVE\nfunc (r *ReceiverLink) receiveUpdatePredecessor(msg *communicator.Message) {\n\tif checkRequiredParams(msg.Parameters, \"newNodeID\", \"newNodeIp\", \"newNodePort\") {\n\t\tnewNodeID, _ := msg.Parameters[\"newNodeID\"]\n\t\tnewNodeIp, _ := msg.Parameters[\"newNodeIp\"]\n\t\tnewNodePort, _ := msg.Parameters[\"newNodePort\"]\n\t\tshared.Logger.Error(\"Receive an update Predecessor to %s from %s : %s\", newNodeID, msg.Origin.Ip, msg.Origin.Port)\n\n\t\tr.node.UpdatePredecessor(&shared.DistantNode{\n\t\t\tnewNodeID,\n\t\t\tnewNodeIp,\n\t\t\tnewNodePort,\n\t\t})\n\n\t}\n}\n\nfunc (r *ReceiverLink) receiveUpdateSuccessor(msg *communicator.Message) {\n\tif checkRequiredParams(msg.Parameters, \"newNodeID\", \"newNodeIp\", \"newNodePort\") {\n\t\tnewNodeID, _ := msg.Parameters[\"newNodeID\"]\n\t\tnewNodeIp, _ := msg.Parameters[\"newNodeIp\"]\n\t\tnewNodePort, _ := msg.Parameters[\"newNodePort\"]\n\t\tshared.Logger.Error(\"Receive an update Successor %s from %s : %s\", newNodeID, msg.Origin.Ip, msg.Origin.Port)\n\n\t\tr.node.UpdateSuccessor(&shared.DistantNode{\n\t\t\tnewNodeID,\n\t\t\tnewNodeIp,\n\t\t\tnewNodePort,\n\t\t})\n\n\t}\n}\n\nfunc (r *ReceiverLink) receivePrintRing(msg *communicator.Message) {\n\t\/\/write your info and if the successor is the origine of the communicator.Message, send it back to him\n\tif checkRequiredParams(msg.Parameters, \"currentString\") {\n\t\tcurrentString, _ := msg.Parameters[\"currentString\"]\n\n\t\tshared.Logger.Info(\"Receiving a print ring request from %s\", msg.Origin.Id)\n\t\tif shared.LocalId == msg.Origin.Id {\n\t\t\tshared.Logger.Info(\"And %s is me !\", msg.Origin.Id)\n\t\t\t\/\/I launch this request know print the result\n\t\t\tshared.Logger.Info(\"The ring is like :\\n%s\", currentString)\n\t\t} else {\n\t\t\t\/\/pass the request around\n\t\t\tr.node.PrintNodeName(¤tString)\n\t\t\tmsg.Parameters[\"currentString\"] = currentString\n\t\t\tgo r.sender.RelayPrintRing(r.node.GetSuccesor(), msg)\n\t\t}\n\t}\n}\n\nfunc (r *ReceiverLink) receiveJoinRing(msg *communicator.Message) {\n\tshared.Logger.Info(\"Receiving join ring message from %s\", msg.Origin)\n\tgo r.node.AddToRing(&msg.Origin)\n}\n\nfunc (r *ReceiverLink) receiveLookup(msg *communicator.Message) {\n\t\/\/check if the parameter are correct\n\tif checkRequiredParams(msg.Parameters, \"idAnswer\", \"idSearched\") {\n\t\tidSearched, _ := msg.Parameters[\"idSearched\"]\n\t\tidAnswer, _ := msg.Parameters[\"idAnswer\"]\n\t\tshared.Logger.Info(\"Receive a lookup for : %s\", idSearched)\n\n\t\t\/\/Am I responsible for the key requested ?\n\t\tif r.node.IsResponsible(idSearched) {\n\t\t\tshared.Logger.Info(\"I'm responsible !\")\n\t\t\tgo r.sender.SendLookupResponse(&msg.Origin, idAnswer, idSearched)\n\t\t} else {\n\t\t\t\/\/no -> sending the request to the closest node\n\t\t\tshared.Logger.Info(\"relay the lookup\")\n\t\t\tgo r.sender.RelayLookup(r.node.FindClosestNode(idSearched), msg)\n\t\t}\n\n\t}\n\n}\n\nfunc (r *ReceiverLink) receiveLookupResponse(msg *communicator.Message) {\n\t\/\/heck if everything required is here\n\tif checkRequiredParams(msg.Parameters, \"idAnswer\", \"idSearched\") {\n\t\tidSearched, _ := msg.Parameters[\"idSearched\"]\n\t\tidAnswer, _ := msg.Parameters[\"idAnswer\"]\n\n\t\tshared.Logger.Info(\"Receive a lookup response for : %s\", idSearched)\n\n\t\tchanResp, ok2 := communicator.PendingLookups[idAnswer]\n\t\tif ok2 {\n\t\t\tchanResp <- msg.Origin\n\t\t}\n\t}\n}\n\nfunc (r *ReceiverLink) receiveUpdateFingerTable(msg *communicator.Message) {\n\tshared.Logger.Info(\"Receiving update finger table from %s\", msg.Origin.Id)\n\tr.node.UpdateFingerTable()\n}\n\nfunc (r *ReceiverLink) receiveHeartBeat(msg *communicator.Message) {\n\tif checkRequiredParams(msg.Parameters, \"idAnswer\") {\n\t\tshared.Logger.Info(\"Receiving a heartBeat from %s\", msg.Origin.Id)\n\t\tidAnswer, _ := msg.Parameters[\"idAnswer\"]\n\n\t\tgo r.sender.SendHeartBeatResponse(&msg.Origin, idAnswer)\n\t}\n}\n\nfunc (r *ReceiverLink) receiveHeartBeatResponse(msg *communicator.Message) {\n\tif checkRequiredParams(msg.Parameters, \"idAnswer\") {\n\t\tidAnswer, _ := msg.Parameters[\"idAnswer\"]\n\t\tshared.Logger.Info(\"Receiving a heartBeat response from %s for %s\", msg.Origin.Id, idAnswer)\n\n\t\tchanResp, ok2 := communicator.PendingHearBeat[idAnswer]\n\t\tif ok2 {\n\t\t\tchanResp <- msg.Origin\n\t\t}\n\t}\n}\n\nfunc (r *ReceiverLink) receiveGetSuccesor(msg *communicator.Message) {\n\tif checkRequiredParams(msg.Parameters, \"idAnswer\") {\n\t\tshared.Logger.Info(\"Receiving a get successor from %s\", msg.Origin.Id)\n\t\tidAnswer, _ := msg.Parameters[\"idAnswer\"]\n\n\t\tgo r.sender.SendGetSuccResponse(&msg.Origin, idAnswer, r.node.GetSuccesor())\n\t}\n}\n\nfunc (r *ReceiverLink) receiveGetSuccesorResponse(msg *communicator.Message) {\n\tif checkRequiredParams(msg.Parameters, \"idAnswer\", \"succSuccID\",\n\t\t\"succSuccIp\",\n\t\t\"succSuccPort\") {\n\t\tidAnswer, _ := msg.Parameters[\"idAnswer\"]\n\t\tsuccSuccID, _ := msg.Parameters[\"succSuccID\"]\n\t\tsuccSuccIp, _ := msg.Parameters[\"succSuccIp\"]\n\t\tsuccSuccPort, _ := msg.Parameters[\"succSuccPort\"]\n\n\t\tshared.Logger.Info(\"Receiving a GetSuccesor response from %s for %s\", msg.Origin.Id, idAnswer)\n\n\t\tsuccSucc := shared.DistantNode{\n\t\t\tsuccSuccID,\n\t\t\tsuccSuccIp,\n\t\t\tsuccSuccPort,\n\t\t}\n\n\t\tchanResp, ok2 := communicator.PendingGetSucc[idAnswer]\n\t\tif ok2 {\n\t\t\tchanResp <- succSucc\n\t\t}\n\t}\n}\n\nfunc (r *ReceiverLink) receiveGetData(msg *communicator.Message) {\n\tif checkRequiredParams(msg.Parameters, \"keySearched\", \"idAnswer\") {\n\t\tshared.Logger.Info(\"Receiving a get data from %s\", msg.Origin.Id)\n\t\tkeySearched, _ := msg.Parameters[\"keySearched\"]\n\t\tidAnswer, _ := msg.Parameters[\"idAnswer\"]\n\t\t_, forced := msg.Parameters[\"forced\"]\n\n\t\tvar result string\n\t\tif forced {\n\t\t\tresult = r.node.GetLocalData(keySearched)\n\t\t} else {\n\t\t\tresult = r.node.GetData(keySearched)\n\t\t}\n\t\tr.sender.SendGetDataResponse(&msg.Origin, idAnswer, result)\n\t}\n}\n\nfunc (r *ReceiverLink) receiveGetDataResponse(msg *communicator.Message) {\n\tif checkRequiredParams(msg.Parameters, \"idAnswer\", \"value\") {\n\t\tidAnswer, _ := msg.Parameters[\"idAnswer\"]\n\t\tvalue, _ := msg.Parameters[\"value\"]\n\n\t\tshared.Logger.Info(\"Receiving a get data response from %s for %s\", msg.Origin.Id, idAnswer)\n\n\t\tchanResp, ok2 := communicator.PendingGetData[idAnswer]\n\t\tif ok2 {\n\t\t\tchanResp <- value\n\t\t}\n\t}\n}\n\nfunc (r *ReceiverLink) receiveSetData(msg *communicator.Message) {\n\tif checkRequiredParams(msg.Parameters, \"key\", \"value\") {\n\t\tkey, _ := msg.Parameters[\"key\"]\n\t\tvalue, _ := msg.Parameters[\"value\"]\n\t\ttag, forced := msg.Parameters[\"forced\"]\n\t\tshared.Logger.Info(\"Receiving a set data from %s with data %s\", msg.Origin.Id, value)\n\n\t\tif forced {\n\t\t\tr.node.SetLocalData(key, value, tag)\n\t\t} else {\n\t\t\tr.node.SetData(key, value)\n\t\t}\n\t}\n}\n\nfunc (r *ReceiverLink) receiveDeleteData(msg *communicator.Message) {\n\tif checkRequiredParams(msg.Parameters, \"key\") {\n\t\tkey, _ := msg.Parameters[\"key\"]\n\t\tshared.Logger.Info(\"Receiving a delete data from %s for key %s\", msg.Origin.Id, key)\n\n\t\tr.node.DeleteLocalData(key)\n\t}\n}\n\nfunc checkRequiredParams(params map[string]string, p ...string) bool {\n\tfor _, v := range p {\n\t\t_, ok := params[v]\n\t\tif !ok {\n\t\t\tshared.Logger.Error(\"missing parameter %s\", v)\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/Exported methods ------------------------------------------------------\n\nfunc (r *ReceiverLink) StartAndListen() {\n\n\t\/\/launch a go routine and start to listen on local address\n\t\/\/handle incoming communicator.Message\n\n\taddr, err := net.ResolveUDPAddr(\"udp\", (\"\" + \":\" + shared.LocalPort))\n\tif err != nil {\n\t\tshared.Logger.Critical(\"error when resolving udp address:\", err)\n\t\tpanic(err)\n\t}\n\tconn, err := net.ListenUDP(\"udp\", addr)\n\tif err != nil {\n\t\tshared.Logger.Critical(\"error when connecting to udp:\", err)\n\t\tpanic(err)\n\t}\n\t\/\/ defer conn.Close()\n\tgo func() {\n\t\tshared.Logger.Info(\"Receiver starting to listen on address [%s]\", addr)\n\t\tfor {\n\t\t\t\/\/multiple goroutine ! work !\n\t\t\tbuffer := make([]byte, 1024)\n\t\t\tbytesReads, err := conn.Read(buffer)\n\t\t\tif err != nil {\n\t\t\t\tshared.Logger.Critical(\"error while reading:\", err)\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tpayload := buffer[0:bytesReads]\n\t\t\tgo r.handleRequest(payload)\n\t\t\t\/\/ time.Sleep(time.Millisecond * 10)\n\t\t\truntime.Gosched()\n\t\t}\n\t}()\n\n}\n\nfunc MakeReceiver(n *node.DHTnode, s *sender.SenderLink) *ReceiverLink {\n\treturn &ReceiverLink{\n\t\tn,\n\t\ts,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package p4\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype Describe struct {\n\tChange int\n\tUser string\n\tClient string\n\tTime string\n\tDescription string\n\tStatus string\n\tFiles []DescribeFile\n}\n\ntype DescribeFile struct {\n\tPath string\n\tVersion int\n\tAction string\n}\n\ntype Review struct {\n\tChange int\n\tUser string\n\tEmail string\n\tName string\n}\n\nvar newlineRegexp = regexp.MustCompile(\"\\r\\n|\\r|\\n\")\n\nvar countersRegexp = regexp.MustCompile(\"(?m)^(.+) = (.+)$\")\nvar describeRegexp = regexp.MustCompile(\"\\\\AChange (\\\\d+) by (.+)@(.+) on (.+)((?: *pending*)?)\\n\\n((?:\\t.*\\n)*)\\nAffected files ...\\n\\n((?:... (?:.+) (?:[\\\\w\/]+)\\n)*)\\n\\\\z\")\nvar describeAffectedRegexp = regexp.MustCompile(\"(?m)^... (.+)#(\\\\d+) ([\\\\w\/]+)$\")\nvar printRegexp = regexp.MustCompile(\"(?m)\\\\A(.+)(@|#)(\\\\d+)(?: - | )(.+)$\")\nvar reviewRegexp = regexp.MustCompile(\"(?m)^Change (\\\\d+) (.+) <(.+)> \\\\((.+)\\\\)$\")\n\nfunc (c *Connection) Counters() (map[string]string, error) {\n\tcounters := map[string]string{}\n\n\tif data, err := c.execP4(\"counters\"); err == nil {\n\t\tsubmatch := countersRegexp.FindAllSubmatch(data, 1000000)\n\n\t\tfor _, counter := range submatch {\n\t\t\tcounters[string(counter[1])] = string(counter[2])\n\t\t}\n\n\t\treturn counters, nil\n\t} else {\n\t\treturn nil, err\n\t}\n}\n\nfunc (c *Connection) Describe(change int) (Describe, error) {\n\tvar describe Describe\n\n\tif data, err := c.execP4(\"describe\", \"-s\", strconv.Itoa(change)); err == nil {\n\t\tsubmatch := describeRegexp.FindSubmatch(data)\n\t\tintChange, err := strconv.Atoi(string(submatch[1]))\n\n\t\tif err != nil {\n\t\t\treturn describe, err\n\t\t}\n\n\t\tstatus := \"submitted\"\n\n\t\tif string(submatch[5]) == \" *pending\" {\n\t\t\tstatus = \"pending\"\n\t\t}\n\n\t\tdescribe = Describe{\n\t\t\tChange: intChange,\n\t\t\tUser: string(submatch[2]),\n\t\t\tClient: string(submatch[3]),\n\t\t\tTime: string(submatch[4]),\n\t\t\tDescription: string(submatch[6]),\n\t\t\tStatus: status,\n\t\t}\n\n\t\taffectedSubmatch := describeAffectedRegexp.FindAllSubmatch(submatch[7], 10000000)\n\n\t\tfor _, m := range affectedSubmatch {\n\t\t\tintVersion, err := strconv.Atoi(string(m[2]))\n\n\t\t\tif err != nil {\n\t\t\t\treturn describe, err\n\t\t\t}\n\n\t\t\tdescribe.Files = append(describe.Files, DescribeFile{\n\t\t\t\tPath: string(m[1]),\n\t\t\t\tVersion: intVersion,\n\t\t\t\tAction: string(m[3]),\n\t\t\t})\n\t\t}\n\n\t\treturn describe, nil\n\t} else {\n\t\treturn describe, err\n\t}\n}\n\nfunc (c *Connection) GetCounter(counter string) (string, error) {\n\tif data, err := c.execP4(\"counter\", counter); err == nil {\n\t\treturn strings.TrimRight(string(data), \"\\n\"), nil\n\t} else {\n\t\treturn \"\", err\n\t}\n}\n\nfunc (c *Connection) Print(path string, clNumber int) ([]byte, error) {\n\t\/\/ We can not use p4's -q flag here, as that leaves us with no method of\n\t\/\/ distinguishing an actual error from a file happens to contain an error\n\t\/\/ message. The process exits with a status code of 0 in both cases.\n\t\/\/\n\t\/\/ The first line of output differs slightly between a successful request\n\t\/\/ and an error:\n\t\/\/\n\t\/\/ * On error, the line begins with the path followed by `@` and the\n\t\/\/ changelist number.\n\t\/\/ * On success, the line begins with the path followed by `#` and the\n\t\/\/ file revision.\n\t\/\/\n\t\/\/ Another limitation of p4's print is automatic line-ending conversions\n\t\/\/ on text files. This can not be disabled. It also can not be determined\n\t\/\/ if the file is treated as text or binary by Perforce.\n\t\/\/\n\t\/\/ No attempt is made to correct this anomaly. Whatever p4 gives us, we\n\t\/\/ give you.\n\n\turl := fmt.Sprintf(\"%s@%d\", path, clNumber)\n\n\tif data, err := c.execP4(\"print\", url); err == nil {\n\t\tlines := newlineRegexp.Split(string(data), 2)\n\n\t\tif len(lines) != 2 {\n\t\t\treturn nil, errors.New(\"no newlines found in p4's output\")\n\t\t}\n\n\t\tsubmatch := printRegexp.FindSubmatch([]byte(lines[0]))\n\n\t\tif len(submatch) == 0 {\n\t\t\treturn nil, errors.New(\"first line from p4 print of invalid format\")\n\t\t}\n\n\t\tif submatch[2][0] != '#' {\n\t\t\treturn nil, P4Error{\n\t\t\t\terrors.New(string(submatch[4])),\n\t\t\t\t[]string{\"p4\", \"print\", url},\n\t\t\t\tdata,\n\t\t\t}\n\t\t}\n\n\t\treturn []byte(lines[1]), nil\n\t} else {\n\t\treturn nil, err\n\t}\n}\n\nfunc (c *Connection) ReviewByChangelist(clNumber int) ([]Review, error) {\n\treturn c.review(\"review\", \"-c\", strconv.Itoa(clNumber))\n}\n\nfunc (c *Connection) ReviewByCounter(counter string) ([]Review, error) {\n\treturn c.review(\"review\", \"-t\", counter)\n}\n\nfunc (c *Connection) review(arguments ...string) ([]Review, error) {\n\treviews := []Review{}\n\n\tif data, err := c.execP4(arguments...); err == nil {\n\t\tsubmatch := reviewRegexp.FindAllSubmatch(data, 10000000)\n\n\t\tfor _, review := range submatch {\n\t\t\tintChange, err := strconv.Atoi(string(review[1]))\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\treviews = append(reviews, Review{\n\t\t\t\tChange: intChange,\n\t\t\t\tUser: string(review[2]),\n\t\t\t\tEmail: string(review[3]),\n\t\t\t\tName: string(review[4]),\n\t\t\t})\n\t\t}\n\n\t\treturn reviews, nil\n\t} else {\n\t\treturn nil, err\n\t}\n}\n\nfunc (c *Connection) SetCounter(counter string, value string) error {\n\tif _, err := c.execP4(\"counter\", counter, value); err == nil {\n\t\treturn nil\n\t} else {\n\t\treturn err\n\t}\n}\n\nfunc (c *Connection) Sync(path string, clNumber int) error {\n\tif _, err := c.execP4(\"sync\", \"-f\", fmt.Sprintf(\"%s@%d\", path, clNumber)); err == nil {\n\t\treturn nil\n\t} else {\n\t\treturn err\n\t}\n}\n<commit_msg>Add `user` command<commit_after>package p4\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype Describe struct {\n\tChange int\n\tUser string\n\tClient string\n\tTime string\n\tDescription string\n\tStatus string\n\tFiles []DescribeFile\n}\n\ntype DescribeFile struct {\n\tPath string\n\tVersion int\n\tAction string\n}\n\ntype Review struct {\n\tChange int\n\tUser string\n\tEmail string\n\tName string\n}\n\ntype User struct {\n\tUser string\n\tEmail string\n\tFullName string\n}\n\nvar newlineRegexp = regexp.MustCompile(\"\\r\\n|\\r|\\n\")\n\nvar countersRegexp = regexp.MustCompile(\"(?m)^(.+) = (.+)$\")\nvar describeRegexp = regexp.MustCompile(\"\\\\AChange (\\\\d+) by (.+)@(.+) on (.+)((?: *pending*)?)\\n\\n((?:\\t.*\\n)*)\\nAffected files ...\\n\\n((?:... (?:.+) (?:[\\\\w\/]+)\\n)*)\\n\\\\z\")\nvar describeAffectedRegexp = regexp.MustCompile(\"(?m)^... (.+)#(\\\\d+) ([\\\\w\/]+)$\")\nvar printRegexp = regexp.MustCompile(\"(?m)\\\\A(.+)(@|#)(\\\\d+)(?: - | )(.+)$\")\nvar reviewRegexp = regexp.MustCompile(\"(?m)^Change (\\\\d+) (.+) <(.+)> \\\\((.+)\\\\)$\")\n\nfunc (c *Connection) Counters() (map[string]string, error) {\n\tcounters := map[string]string{}\n\n\tif data, err := c.execP4(\"counters\"); err == nil {\n\t\tsubmatch := countersRegexp.FindAllSubmatch(data, 1000000)\n\n\t\tfor _, counter := range submatch {\n\t\t\tcounters[string(counter[1])] = string(counter[2])\n\t\t}\n\n\t\treturn counters, nil\n\t} else {\n\t\treturn nil, err\n\t}\n}\n\nfunc (c *Connection) Describe(change int) (Describe, error) {\n\tvar describe Describe\n\n\tif data, err := c.execP4(\"describe\", \"-s\", strconv.Itoa(change)); err == nil {\n\t\tsubmatch := describeRegexp.FindSubmatch(data)\n\t\tintChange, err := strconv.Atoi(string(submatch[1]))\n\n\t\tif err != nil {\n\t\t\treturn describe, err\n\t\t}\n\n\t\tstatus := \"submitted\"\n\n\t\tif string(submatch[5]) == \" *pending\" {\n\t\t\tstatus = \"pending\"\n\t\t}\n\n\t\tdescribe = Describe{\n\t\t\tChange: intChange,\n\t\t\tUser: string(submatch[2]),\n\t\t\tClient: string(submatch[3]),\n\t\t\tTime: string(submatch[4]),\n\t\t\tDescription: string(submatch[6]),\n\t\t\tStatus: status,\n\t\t}\n\n\t\taffectedSubmatch := describeAffectedRegexp.FindAllSubmatch(submatch[7], 10000000)\n\n\t\tfor _, m := range affectedSubmatch {\n\t\t\tintVersion, err := strconv.Atoi(string(m[2]))\n\n\t\t\tif err != nil {\n\t\t\t\treturn describe, err\n\t\t\t}\n\n\t\t\tdescribe.Files = append(describe.Files, DescribeFile{\n\t\t\t\tPath: string(m[1]),\n\t\t\t\tVersion: intVersion,\n\t\t\t\tAction: string(m[3]),\n\t\t\t})\n\t\t}\n\n\t\treturn describe, nil\n\t} else {\n\t\treturn describe, err\n\t}\n}\n\nvar emailRegex = regexp.MustCompile(`(?m)^Email:\\s+(.*)$`)\nvar fullNameRegex = regexp.MustCompile(`(?m)^FullName:\\s+(.*)$`)\n\nfunc (c *Connection) User(username string) (User, error) {\n\tif data, err := c.execP4(\"user\", \"-o\", username); err == nil {\n\t\tuser := User{\n\t\t\tUser: username,\n\t\t\tEmail: string(emailRegex.FindSubmatch(data)[1]),\n\t\t\tFullName: string(fullNameRegex.FindSubmatch(data)[1]),\n\t\t}\n\t\treturn user, nil\n\t} else {\n\t\treturn User{}, err\n\t}\n}\n\nfunc (c *Connection) GetCounter(counter string) (string, error) {\n\tif data, err := c.execP4(\"counter\", counter); err == nil {\n\t\treturn strings.TrimRight(string(data), \"\\n\"), nil\n\t} else {\n\t\treturn \"\", err\n\t}\n}\n\nfunc (c *Connection) Print(path string, clNumber int) ([]byte, error) {\n\t\/\/ We can not use p4's -q flag here, as that leaves us with no method of\n\t\/\/ distinguishing an actual error from a file happens to contain an error\n\t\/\/ message. The process exits with a status code of 0 in both cases.\n\t\/\/\n\t\/\/ The first line of output differs slightly between a successful request\n\t\/\/ and an error:\n\t\/\/\n\t\/\/ * On error, the line begins with the path followed by `@` and the\n\t\/\/ changelist number.\n\t\/\/ * On success, the line begins with the path followed by `#` and the\n\t\/\/ file revision.\n\t\/\/\n\t\/\/ Another limitation of p4's print is automatic line-ending conversions\n\t\/\/ on text files. This can not be disabled. It also can not be determined\n\t\/\/ if the file is treated as text or binary by Perforce.\n\t\/\/\n\t\/\/ No attempt is made to correct this anomaly. Whatever p4 gives us, we\n\t\/\/ give you.\n\n\turl := fmt.Sprintf(\"%s@%d\", path, clNumber)\n\n\tif data, err := c.execP4(\"print\", url); err == nil {\n\t\tlines := newlineRegexp.Split(string(data), 2)\n\n\t\tif len(lines) != 2 {\n\t\t\treturn nil, errors.New(\"no newlines found in p4's output\")\n\t\t}\n\n\t\tsubmatch := printRegexp.FindSubmatch([]byte(lines[0]))\n\n\t\tif len(submatch) == 0 {\n\t\t\treturn nil, errors.New(\"first line from p4 print of invalid format\")\n\t\t}\n\n\t\tif submatch[2][0] != '#' {\n\t\t\treturn nil, P4Error{\n\t\t\t\terrors.New(string(submatch[4])),\n\t\t\t\t[]string{\"p4\", \"print\", url},\n\t\t\t\tdata,\n\t\t\t}\n\t\t}\n\n\t\treturn []byte(lines[1]), nil\n\t} else {\n\t\treturn nil, err\n\t}\n}\n\nfunc (c *Connection) ReviewByChangelist(clNumber int) ([]Review, error) {\n\treturn c.review(\"review\", \"-c\", strconv.Itoa(clNumber))\n}\n\nfunc (c *Connection) ReviewByCounter(counter string) ([]Review, error) {\n\treturn c.review(\"review\", \"-t\", counter)\n}\n\nfunc (c *Connection) review(arguments ...string) ([]Review, error) {\n\treviews := []Review{}\n\n\tif data, err := c.execP4(arguments...); err == nil {\n\t\tsubmatch := reviewRegexp.FindAllSubmatch(data, 10000000)\n\n\t\tfor _, review := range submatch {\n\t\t\tintChange, err := strconv.Atoi(string(review[1]))\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\treviews = append(reviews, Review{\n\t\t\t\tChange: intChange,\n\t\t\t\tUser: string(review[2]),\n\t\t\t\tEmail: string(review[3]),\n\t\t\t\tName: string(review[4]),\n\t\t\t})\n\t\t}\n\n\t\treturn reviews, nil\n\t} else {\n\t\treturn nil, err\n\t}\n}\n\nfunc (c *Connection) SetCounter(counter string, value string) error {\n\tif _, err := c.execP4(\"counter\", counter, value); err == nil {\n\t\treturn nil\n\t} else {\n\t\treturn err\n\t}\n}\n\nfunc (c *Connection) Sync(path string, clNumber int) error {\n\tif _, err := c.execP4(\"sync\", \"-f\", fmt.Sprintf(\"%s@%d\", path, clNumber)); err == nil {\n\t\treturn nil\n\t} else {\n\t\treturn err\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\n\/\/ Package tlogger provides a zapcore.Core that is capable of writing log\n\/\/ messages to a *testing.T and *testing.B. It may be used from Go tests or\n\/\/ benchmarks to have log messages printed only if a test failed, or if the\n\/\/ `-v` flag was passed to `go test`.\npackage tlogger \/\/ import \"go.uber.org\/zap\/zaptest\/tlogger\"\n\nimport (\n\t\"testing\"\n\n\t\"go.uber.org\/zap\"\n\t\"go.uber.org\/zap\/zapcore\"\n)\n\n\/\/ New builds a new Core that logs all messages to the given testing.TB.\n\/\/\n\/\/ Use this with a *testing.T or *testing.B to get logs which get printed only\n\/\/ if a test fails or if you ran go test -v.\n\/\/\n\/\/ logger := zap.New(tlogger.New(t))\nfunc New(t testing.TB) zapcore.Core {\n\treturn NewAt(t, zapcore.DebugLevel)\n}\n\n\/\/ NewAt builds a new Core that logs messages to the given testing.TB if the\n\/\/ given LevelEnabler allows it.\n\/\/\n\/\/ Use this with a *testing.T or *testing.B to get logs which get printed only\n\/\/ if a test fails or if you ran go test -v.\n\/\/\n\/\/ logger := zap.New(tlogger.NewAt(t, zap.InfoLevel))\nfunc NewAt(t testing.TB, enab zapcore.LevelEnabler) zapcore.Core {\n\treturn zapcore.NewCore(\n\t\tzapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()),\n\t\ttestingWriter{t},\n\t\tenab,\n\t)\n}\n\n\/\/ testingWriter is a WriteSyncer that writes to the given testing.TB.\ntype testingWriter struct{ t testing.TB }\n\nfunc (w testingWriter) Write(p []byte) (n int, err error) {\n\ts := string(p)\n\n\t\/\/ Strip trailing newline because t.Log always adds one.\n\tif s[len(s)-1] == '\\n' {\n\t\ts = s[:len(s)-1]\n\t}\n\n\t\/\/ Note: t.Log is safe for concurrent use.\n\tw.t.Log(s)\n\treturn len(p), nil\n}\n\nfunc (w testingWriter) Sync() error {\n\treturn nil\n}\n<commit_msg>Remove backticks<commit_after>\/\/ Copyright (c) 2017 Uber Technologies, Inc.\n\/\/\n\/\/ Permission is hereby granted, free of charge, to any person obtaining a copy\n\/\/ of this software and associated documentation files (the \"Software\"), to deal\n\/\/ in the Software without restriction, including without limitation the rights\n\/\/ to use, copy, modify, merge, publish, distribute, sublicense, and\/or sell\n\/\/ copies of the Software, and to permit persons to whom the Software is\n\/\/ furnished to do so, subject to the following conditions:\n\/\/\n\/\/ The above copyright notice and this permission notice shall be included in\n\/\/ all copies or substantial portions of the Software.\n\/\/\n\/\/ THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n\/\/ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n\/\/ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n\/\/ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n\/\/ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n\/\/ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n\/\/ THE SOFTWARE.\n\n\/\/ Package tlogger provides a zapcore.Core that is capable of writing log\n\/\/ messages to a *testing.T and *testing.B. It may be used from Go tests or\n\/\/ benchmarks to have log messages printed only if a test failed, or if the\n\/\/ -v flag was passed to go test.\npackage tlogger \/\/ import \"go.uber.org\/zap\/zaptest\/tlogger\"\n\nimport (\n\t\"testing\"\n\n\t\"go.uber.org\/zap\"\n\t\"go.uber.org\/zap\/zapcore\"\n)\n\n\/\/ New builds a new Core that logs all messages to the given testing.TB.\n\/\/\n\/\/ Use this with a *testing.T or *testing.B to get logs which get printed only\n\/\/ if a test fails or if you ran go test -v.\n\/\/\n\/\/ logger := zap.New(tlogger.New(t))\nfunc New(t testing.TB) zapcore.Core {\n\treturn NewAt(t, zapcore.DebugLevel)\n}\n\n\/\/ NewAt builds a new Core that logs messages to the given testing.TB if the\n\/\/ given LevelEnabler allows it.\n\/\/\n\/\/ Use this with a *testing.T or *testing.B to get logs which get printed only\n\/\/ if a test fails or if you ran go test -v.\n\/\/\n\/\/ logger := zap.New(tlogger.NewAt(t, zap.InfoLevel))\nfunc NewAt(t testing.TB, enab zapcore.LevelEnabler) zapcore.Core {\n\treturn zapcore.NewCore(\n\t\tzapcore.NewConsoleEncoder(zap.NewDevelopmentEncoderConfig()),\n\t\ttestingWriter{t},\n\t\tenab,\n\t)\n}\n\n\/\/ testingWriter is a WriteSyncer that writes to the given testing.TB.\ntype testingWriter struct{ t testing.TB }\n\nfunc (w testingWriter) Write(p []byte) (n int, err error) {\n\ts := string(p)\n\n\t\/\/ Strip trailing newline because t.Log always adds one.\n\tif s[len(s)-1] == '\\n' {\n\t\ts = s[:len(s)-1]\n\t}\n\n\t\/\/ Note: t.Log is safe for concurrent use.\n\tw.t.Log(s)\n\treturn len(p), nil\n}\n\nfunc (w testingWriter) Sync() error {\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage lifecycle\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\tclientset \"k8s.io\/client-go\/kubernetes\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/common\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nconst resizeNodeReadyTimeout = 2 * time.Minute\n\nfunc resizeRC(c clientset.Interface, ns, name string, replicas int32) error {\n\trc, err := c.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\t*(rc.Spec.Replicas) = replicas\n\t_, err = c.CoreV1().ReplicationControllers(rc.Namespace).Update(rc)\n\treturn err\n}\n\nvar _ = SIGDescribe(\"Nodes [Disruptive]\", func() {\n\tf := framework.NewDefaultFramework(\"resize-nodes\")\n\tvar systemPodsNo int32\n\tvar c clientset.Interface\n\tvar ns string\n\tignoreLabels := framework.ImagePullerLabels\n\tvar group string\n\n\tBeforeEach(func() {\n\t\tc = f.ClientSet\n\t\tns = f.Namespace.Name\n\t\tsystemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tsystemPodsNo = int32(len(systemPods))\n\t\tif strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, \",\") >= 0 {\n\t\t\tframework.Failf(\"Test dose not support cluster setup with more than one MIG: %s\", framework.TestContext.CloudConfig.NodeInstanceGroup)\n\t\t} else {\n\t\t\tgroup = framework.TestContext.CloudConfig.NodeInstanceGroup\n\t\t}\n\t})\n\n\t\/\/ Slow issue #13323 (8 min)\n\tDescribe(\"Resize [Slow]\", func() {\n\t\tvar skipped bool\n\n\t\tBeforeEach(func() {\n\t\t\tskipped = true\n\t\t\tframework.SkipUnlessProviderIs(\"gce\", \"gke\", \"aws\")\n\t\t\tframework.SkipUnlessNodeCountIsAtLeast(2)\n\t\t\tskipped = false\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tif skipped {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tBy(\"restoring the original node instance group size\")\n\t\t\tif err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {\n\t\t\t\tframework.Failf(\"Couldn't restore the original node instance group size: %v\", err)\n\t\t\t}\n\t\t\t\/\/ In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a\n\t\t\t\/\/ rebooted\/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.\n\t\t\t\/\/ Most tests make use of some proxy feature to verify functionality. So, if a reboot test runs\n\t\t\t\/\/ right before a test that tries to get logs, for example, we may get unlucky and try to use a\n\t\t\t\/\/ closed tunnel to a node that was recently rebooted. There's no good way to framework.Poll for proxies\n\t\t\t\/\/ being closed, so we sleep.\n\t\t\t\/\/\n\t\t\t\/\/ TODO(cjcullen) reduce this sleep (#19314)\n\t\t\tif framework.ProviderIs(\"gke\") {\n\t\t\t\tBy(\"waiting 5 minutes for all dead tunnels to be dropped\")\n\t\t\t\ttime.Sleep(5 * time.Minute)\n\t\t\t}\n\t\t\tif err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {\n\t\t\t\tframework.Failf(\"Couldn't restore the original node instance group size: %v\", err)\n\t\t\t}\n\t\t\tif err := framework.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil {\n\t\t\t\tframework.Failf(\"Couldn't restore the original cluster size: %v\", err)\n\t\t\t}\n\t\t\t\/\/ Many e2e tests assume that the cluster is fully healthy before they start. Wait until\n\t\t\t\/\/ the cluster is restored to health.\n\t\t\tBy(\"waiting for system pods to successfully restart\")\n\t\t\terr := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, ignoreLabels)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tBy(\"waiting for image prepulling pods to complete\")\n\t\t\tframework.WaitForPodsSuccess(c, metav1.NamespaceSystem, framework.ImagePullerLabels, framework.ImagePrePullingTimeout)\n\t\t})\n\n\t\tIt(\"should be able to delete nodes\", func() {\n\t\t\t\/\/ Create a replication controller for a service that serves its hostname.\n\t\t\t\/\/ The source for the Docker container kubernetes\/serve_hostname is in contrib\/for-demos\/serve_hostname\n\t\t\tname := \"my-hostname-delete-node\"\n\t\t\tnumNodes, err := framework.NumberOfRegisteredNodes(c)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\treplicas := int32(numNodes)\n\t\t\tcommon.NewRCByName(c, ns, name, replicas, nil)\n\t\t\terr = framework.VerifyPods(c, ns, name, true, replicas)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\ttargetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1)\n\t\t\tBy(fmt.Sprintf(\"decreasing cluster size to %d\", targetNumNodes))\n\t\t\terr = framework.ResizeGroup(group, targetNumNodes)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\terr = framework.WaitForGroupSize(group, targetNumNodes)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\terr = framework.WaitForReadyNodes(c, int(replicas-1), 10*time.Minute)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tBy(\"waiting 1 minute for the watch in the podGC to catch up, remove any pods scheduled on \" +\n\t\t\t\t\"the now non-existent node and the RC to recreate it\")\n\t\t\ttime.Sleep(time.Minute)\n\n\t\t\tBy(\"verifying whether the pods from the removed node are recreated\")\n\t\t\terr = framework.VerifyPods(c, ns, name, true, replicas)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t})\n\n\t\t\/\/ TODO: Bug here - testName is not correct\n\t\tIt(\"should be able to add nodes\", func() {\n\t\t\t\/\/ Create a replication controller for a service that serves its hostname.\n\t\t\t\/\/ The source for the Docker container kubernetes\/serve_hostname is in contrib\/for-demos\/serve_hostname\n\t\t\tname := \"my-hostname-add-node\"\n\t\t\tcommon.NewSVCByName(c, ns, name)\n\t\t\tnumNodes, err := framework.NumberOfRegisteredNodes(c)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\treplicas := int32(numNodes)\n\t\t\tcommon.NewRCByName(c, ns, name, replicas, nil)\n\t\t\terr = framework.VerifyPods(c, ns, name, true, replicas)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\ttargetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes + 1)\n\t\t\tBy(fmt.Sprintf(\"increasing cluster size to %d\", targetNumNodes))\n\t\t\terr = framework.ResizeGroup(group, targetNumNodes)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\terr = framework.WaitForGroupSize(group, targetNumNodes)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\terr = framework.WaitForReadyNodes(c, int(replicas+1), 10*time.Minute)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tBy(fmt.Sprintf(\"increasing size of the replication controller to %d and verifying all pods are running\", replicas+1))\n\t\t\terr = resizeRC(c, ns, name, replicas+1)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\terr = framework.VerifyPods(c, ns, name, true, replicas+1)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t})\n\t})\n})\n<commit_msg>Fix resize test for Regional Clusters<commit_after>\/*\nCopyright 2015 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage lifecycle\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\tclientset \"k8s.io\/client-go\/kubernetes\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/common\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nconst resizeNodeReadyTimeout = 2 * time.Minute\n\nfunc resizeRC(c clientset.Interface, ns, name string, replicas int32) error {\n\trc, err := c.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\t*(rc.Spec.Replicas) = replicas\n\t_, err = c.CoreV1().ReplicationControllers(rc.Namespace).Update(rc)\n\treturn err\n}\n\nvar _ = SIGDescribe(\"Nodes [Disruptive]\", func() {\n\tf := framework.NewDefaultFramework(\"resize-nodes\")\n\tvar systemPodsNo int32\n\tvar c clientset.Interface\n\tvar ns string\n\tignoreLabels := framework.ImagePullerLabels\n\tvar group string\n\n\tBeforeEach(func() {\n\t\tc = f.ClientSet\n\t\tns = f.Namespace.Name\n\t\tsystemPods, err := framework.GetPodsInNamespace(c, ns, ignoreLabels)\n\t\tExpect(err).NotTo(HaveOccurred())\n\t\tsystemPodsNo = int32(len(systemPods))\n\t\tif strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, \",\") >= 0 {\n\t\t\tframework.Failf(\"Test dose not support cluster setup with more than one MIG: %s\", framework.TestContext.CloudConfig.NodeInstanceGroup)\n\t\t} else {\n\t\t\tgroup = framework.TestContext.CloudConfig.NodeInstanceGroup\n\t\t}\n\t})\n\n\t\/\/ Slow issue #13323 (8 min)\n\tDescribe(\"Resize [Slow]\", func() {\n\t\tvar originalNodeCount int32\n\t\tvar skipped bool\n\n\t\tBeforeEach(func() {\n\t\t\tskipped = true\n\t\t\tframework.SkipUnlessProviderIs(\"gce\", \"gke\", \"aws\")\n\t\t\tframework.SkipUnlessNodeCountIsAtLeast(2)\n\t\t\tskipped = false\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tif skipped {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tBy(\"restoring the original node instance group size\")\n\t\t\tif err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {\n\t\t\t\tframework.Failf(\"Couldn't restore the original node instance group size: %v\", err)\n\t\t\t}\n\t\t\t\/\/ In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a\n\t\t\t\/\/ rebooted\/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.\n\t\t\t\/\/ Most tests make use of some proxy feature to verify functionality. So, if a reboot test runs\n\t\t\t\/\/ right before a test that tries to get logs, for example, we may get unlucky and try to use a\n\t\t\t\/\/ closed tunnel to a node that was recently rebooted. There's no good way to framework.Poll for proxies\n\t\t\t\/\/ being closed, so we sleep.\n\t\t\t\/\/\n\t\t\t\/\/ TODO(cjcullen) reduce this sleep (#19314)\n\t\t\tif framework.ProviderIs(\"gke\") {\n\t\t\t\tBy(\"waiting 5 minutes for all dead tunnels to be dropped\")\n\t\t\t\ttime.Sleep(5 * time.Minute)\n\t\t\t}\n\t\t\tif err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {\n\t\t\t\tframework.Failf(\"Couldn't restore the original node instance group size: %v\", err)\n\t\t\t}\n\n\t\t\tif err := framework.WaitForReadyNodes(c, int(originalNodeCount), 10*time.Minute); err != nil {\n\t\t\t\tframework.Failf(\"Couldn't restore the original cluster size: %v\", err)\n\t\t\t}\n\t\t\t\/\/ Many e2e tests assume that the cluster is fully healthy before they start. Wait until\n\t\t\t\/\/ the cluster is restored to health.\n\t\t\tBy(\"waiting for system pods to successfully restart\")\n\t\t\terr := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, ignoreLabels)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\tBy(\"waiting for image prepulling pods to complete\")\n\t\t\tframework.WaitForPodsSuccess(c, metav1.NamespaceSystem, framework.ImagePullerLabels, framework.ImagePrePullingTimeout)\n\t\t})\n\n\t\tIt(\"should be able to delete nodes\", func() {\n\t\t\t\/\/ Create a replication controller for a service that serves its hostname.\n\t\t\t\/\/ The source for the Docker container kubernetes\/serve_hostname is in contrib\/for-demos\/serve_hostname\n\t\t\tname := \"my-hostname-delete-node\"\n\t\t\tnumNodes, err := framework.NumberOfRegisteredNodes(c)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\toriginalNodeCount = int32(numNodes)\n\t\t\tcommon.NewRCByName(c, ns, name, originalNodeCount, nil)\n\t\t\terr = framework.VerifyPods(c, ns, name, true, originalNodeCount)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\ttargetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1)\n\t\t\tBy(fmt.Sprintf(\"decreasing cluster size to %d\", targetNumNodes))\n\t\t\terr = framework.ResizeGroup(group, targetNumNodes)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\terr = framework.WaitForGroupSize(group, targetNumNodes)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\terr = framework.WaitForReadyNodes(c, int(originalNodeCount-1), 10*time.Minute)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tBy(\"waiting 1 minute for the watch in the podGC to catch up, remove any pods scheduled on \" +\n\t\t\t\t\"the now non-existent node and the RC to recreate it\")\n\t\t\ttime.Sleep(time.Minute)\n\n\t\t\tBy(\"verifying whether the pods from the removed node are recreated\")\n\t\t\terr = framework.VerifyPods(c, ns, name, true, originalNodeCount)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t})\n\n\t\t\/\/ TODO: Bug here - testName is not correct\n\t\tIt(\"should be able to add nodes\", func() {\n\t\t\t\/\/ Create a replication controller for a service that serves its hostname.\n\t\t\t\/\/ The source for the Docker container kubernetes\/serve_hostname is in contrib\/for-demos\/serve_hostname\n\t\t\tname := \"my-hostname-add-node\"\n\t\t\tcommon.NewSVCByName(c, ns, name)\n\t\t\tnumNodes, err := framework.NumberOfRegisteredNodes(c)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\toriginalNodeCount = int32(numNodes)\n\t\t\tcommon.NewRCByName(c, ns, name, originalNodeCount, nil)\n\t\t\terr = framework.VerifyPods(c, ns, name, true, originalNodeCount)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\ttargetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes + 1)\n\t\t\tBy(fmt.Sprintf(\"increasing cluster size to %d\", targetNumNodes))\n\t\t\terr = framework.ResizeGroup(group, targetNumNodes)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\terr = framework.WaitForGroupSize(group, targetNumNodes)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\terr = framework.WaitForReadyNodes(c, int(originalNodeCount+1), 10*time.Minute)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\n\t\t\tBy(fmt.Sprintf(\"increasing size of the replication controller to %d and verifying all pods are running\", originalNodeCount+1))\n\t\t\terr = resizeRC(c, ns, name, originalNodeCount+1)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t\terr = framework.VerifyPods(c, ns, name, true, originalNodeCount+1)\n\t\t\tExpect(err).NotTo(HaveOccurred())\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/6xiao\/esl4ElasticSearch\/EasySearch\"\n\t\"github.com\/6xiao\/go\/Common\"\n\tes \"github.com\/mattbaird\/elastigo\/lib\"\n\t\"log\"\n\t\"runtime\"\n)\n\nvar (\n\tflgRpc = flag.String(\"rpc\", \":12311\", \"rpc server port\")\n\tflgEs = flag.String(\"elasticsearch\", \"192.168.248.16\", \"elastic search\")\n)\n\nfunc Search(ess EasySearch.EasySearch, res *map[string][]byte) error {\n\tdefer Common.CheckPanic()\n\n\tfilter, err := ParseEsl(ess.ESL)\n\tif filter == nil || err != nil {\n\t\tlog.Println(\"error parse esl :\", err, ess.ESL)\n\t\treturn err\n\t}\n\n\tswitch filter.(type) {\n\tcase *es.FilterWrap:\n\t\tb, e := filter.(*es.FilterWrap).MarshalJSON()\n\t\tfmt.Println(\"filter:\", string(b), e)\n\n\tcase *es.FilterOp:\n\t\tb, e := es.CompoundFilter(filter).MarshalJSON()\n\t\tfmt.Println(\"filter:\", string(b), e)\n\n\tdefault:\n\t\treturn errors.New(\"esl parse error\")\n\t}\n\n\tc := es.NewConn()\n\tc.Domain = *flgEs\n\tse := es.Query().Term(\"appkey\", ess.Appkey)\n\tre := es.Search(ess.Index).Type(ess.Type).Fields(ess.Fields...)\n\trsp, err := re.Size(\"65536\").Scroll(\"1\").Query(se).Filter(filter).Result(c)\n\tif err != nil {\n\t\tlog.Println(\"error:\", err)\n\t\treturn err\n\t}\n\n\tparam := make(map[string]interface{})\n\tparam[\"scroll\"] = \"1\"\n\n\tfor len(*res) < rsp.Hits.Total && len(rsp.Hits.Hits) > 0 {\n\t\tfor _, item := range rsp.Hits.Hits {\n\t\t\tif item.Fields != nil {\n\t\t\t\t(*res)[item.Id], _ = item.Fields.MarshalJSON()\n\t\t\t} else if item.Source != nil {\n\t\t\t\t(*res)[item.Id], _ = item.Source.MarshalJSON()\n\t\t\t} else {\n\t\t\t\t(*res)[item.Id] = nil\n\t\t\t}\n\t\t}\n\n\t\t*rsp, err = c.Scroll(param, rsp.ScrollId)\n\t\tif err != nil {\n\t\t\tlog.Println(\"error:\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc init() {\n\tflag.Parse()\n\tflag.Usage()\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tlog.SetFlags(log.Ldate | log.Ltime | log.Lshortfile)\n}\n\nfunc main() {\n\ttrpc := EasySearch.NewEsRpc(Search)\n\tCommon.ListenRpc(*flgRpc, trpc, nil)\n\tlog.Fatal(\"exit ...\")\n}\n<commit_msg>remove atexit function<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/6xiao\/esl4ElasticSearch\/EasySearch\"\n\t\"github.com\/6xiao\/go\/Common\"\n\tes \"github.com\/mattbaird\/elastigo\/lib\"\n\t\"log\"\n\t\"runtime\"\n)\n\nvar (\n\tflgRpc = flag.String(\"rpc\", \":12311\", \"rpc server port\")\n\tflgEs = flag.String(\"elasticsearch\", \"192.168.248.16\", \"elastic search\")\n)\n\nfunc Search(ess EasySearch.EasySearch, res *map[string][]byte) error {\n\tdefer Common.CheckPanic()\n\n\tfilter, err := ParseEsl(ess.ESL)\n\tif filter == nil || err != nil {\n\t\tlog.Println(\"error parse esl :\", err, ess.ESL)\n\t\treturn err\n\t}\n\n\tswitch filter.(type) {\n\tcase *es.FilterWrap:\n\t\tb, e := filter.(*es.FilterWrap).MarshalJSON()\n\t\tfmt.Println(\"filter:\", string(b), e)\n\n\tcase *es.FilterOp:\n\t\tb, e := es.CompoundFilter(filter).MarshalJSON()\n\t\tfmt.Println(\"filter:\", string(b), e)\n\n\tdefault:\n\t\treturn errors.New(\"esl parse error\")\n\t}\n\n\tc := es.NewConn()\n\tc.Domain = *flgEs\n\tse := es.Query().Term(\"appkey\", ess.Appkey)\n\tre := es.Search(ess.Index).Type(ess.Type).Fields(ess.Fields...)\n\trsp, err := re.Size(\"65536\").Scroll(\"1\").Query(se).Filter(filter).Result(c)\n\tif err != nil {\n\t\tlog.Println(\"error:\", err)\n\t\treturn err\n\t}\n\n\tparam := make(map[string]interface{})\n\tparam[\"scroll\"] = \"1\"\n\n\tfor len(*res) < rsp.Hits.Total && len(rsp.Hits.Hits) > 0 {\n\t\tfor _, item := range rsp.Hits.Hits {\n\t\t\tif item.Fields != nil {\n\t\t\t\t(*res)[item.Id], _ = item.Fields.MarshalJSON()\n\t\t\t} else if item.Source != nil {\n\t\t\t\t(*res)[item.Id], _ = item.Source.MarshalJSON()\n\t\t\t} else {\n\t\t\t\t(*res)[item.Id] = nil\n\t\t\t}\n\t\t}\n\n\t\t*rsp, err = c.Scroll(param, rsp.ScrollId)\n\t\tif err != nil {\n\t\t\tlog.Println(\"error:\", err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc init() {\n\tflag.Parse()\n\tflag.Usage()\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tlog.SetFlags(log.Ldate | log.Ltime | log.Lshortfile)\n}\n\nfunc main() {\n\ttrpc := EasySearch.NewEsRpc(Search)\n\tCommon.ListenRpc(*flgRpc, trpc)\n\tlog.Fatal(\"exit ...\")\n}\n<|endoftext|>"} {"text":"<commit_before>package zip\n\nimport (\n\t. \"jvmgo\/any\"\n\t\"jvmgo\/jvm\/rtda\"\n\trtc \"jvmgo\/jvm\/rtda\/class\"\n\t\"jvmgo\/util\"\n)\n\nconst (\n\tJZENTRY_NAME = 0\n\tJZENTRY_EXTRA = 1\n\tJZENTRY_COMMENT = 2\n)\n\nfunc init() {\n\t_zf(initIDs, \"initIDs\", \"()V\")\n\t_zf(getEntryBytes, \"getEntryBytes\", \"(JI)[B\")\n\t_zf(getEntryFlag, \"getEntryFlag\", \"(J)I\")\n\t_zf(getEntryTime, \"getEntryTime\", \"(J)J\")\n\t_zf(getNextEntry, \"getNextEntry\", \"(JI)J\")\n\t_zf(getTotal, \"getTotal\", \"(J)I\")\n\t_zf(open, \"open\", \"(Ljava\/lang\/String;IJZ)J\")\n\t_zf(startsWithLOC, \"startsWithLOC\", \"(J)Z\")\n}\n\nfunc _zf(method Any, name, desc string) {\n\trtc.RegisterNativeMethod(\"java\/util\/zip\/ZipFile\", name, desc, method)\n}\n\n\/\/ private static native void initIDs();\n\/\/ ()V\nfunc initIDs(frame *rtda.Frame) {\n\t\/\/ todo\n}\n\n\/\/ private static native long open(String name, int mode, long lastModified,\n\/\/ boolean usemmap) throws IOException;\n\/\/ (Ljava\/lang\/String;IJZ)J\nfunc open(frame *rtda.Frame) {\n\tvars := frame.LocalVars()\n\tnameObj := vars.GetRef(0)\n\n\tname := rtda.GoString(nameObj)\n\tjzfile, err := openZip(name)\n\tif err != nil {\n\t\t\/\/ todo\n\t\tpanic(\"IOException\")\n\t}\n\n\tstack := frame.OperandStack()\n\tstack.PushLong(jzfile)\n}\n\n\/\/ private static native boolean startsWithLOC(long jzfile);\n\/\/ (J)Z\nfunc startsWithLOC(frame *rtda.Frame) {\n\t\/\/ todo\n\tstack := frame.OperandStack()\n\tstack.PushBoolean(true)\n}\n\n\/\/ private static native int getTotal(long jzfile);\n\/\/ (J)I\nfunc getTotal(frame *rtda.Frame) {\n\tvars := frame.LocalVars()\n\tjzfile := vars.GetLong(0)\n\n\ttotal := getEntryCount(jzfile)\n\n\tstack := frame.OperandStack()\n\tstack.PushInt(total)\n}\n\n\/\/ private static native long getNextEntry(long jzfile, int i);\n\/\/ (JI)J\nfunc getNextEntry(frame *rtda.Frame) {\n\tvars := frame.LocalVars()\n\tjzfile := vars.GetLong(0)\n\ti := vars.GetInt(2)\n\n\tjzentry := getJzentry(jzfile, i)\n\n\tstack := frame.OperandStack()\n\tstack.PushLong(jzentry)\n}\n\n\/\/ private static native byte[] getEntryBytes(long jzentry, int type);\n\/\/ (JI)[B\nfunc getEntryBytes(frame *rtda.Frame) {\n\tvars := frame.LocalVars()\n\tjzentry := vars.GetLong(0)\n\t_type := vars.GetInt(2)\n\n\tgoBytes := _getEntryBytes(jzentry, _type)\n\tjBytes := util.CastUint8sToInt8s(goBytes)\n\tbyteArr := rtc.NewByteArray(jBytes, frame.ClassLoader())\n\n\tstack := frame.OperandStack()\n\tstack.PushRef(byteArr)\n}\n\nfunc _getEntryBytes(jzentry int64, _type int32) []byte {\n\tentry := getEntry(jzentry)\n\tswitch _type {\n\tcase JZENTRY_NAME:\n\t\treturn []byte(entry.Name)\n\tcase JZENTRY_EXTRA:\n\t\treturn entry.Extra\n\tcase JZENTRY_COMMENT:\n\t\treturn []byte(entry.Comment)\n\t}\n\tutil.Panicf(\"BAD type: %v\", _type)\n\treturn nil\n}\n\n\/\/ private static native int getEntryFlag(long jzentry);\n\/\/ (J)I\nfunc getEntryFlag(frame *rtda.Frame) {\n\tvars := frame.LocalVars()\n\tjzentry := vars.GetLong(0)\n\n\tentry := getEntry(jzentry)\n\tflag := int32(entry.Flags)\n\n\tstack := frame.OperandStack()\n\tstack.PushInt(flag)\n}\n\n\/\/ private static native long getEntryTime(long jzentry);\n\/\/ (J)J\nfunc getEntryTime(frame *rtda.Frame) {\n\tvars := frame.LocalVars()\n\tjzentry := vars.GetLong(0)\n\n\tentry := getEntry(jzentry)\n\tmodDate := entry.ModifiedDate\n\tmodTime := entry.ModifiedTime\n\ttime := int64(modDate)<<16 | int64(modTime)\n\n\tstack := frame.OperandStack()\n\tstack.PushLong(time)\n}\n<commit_msg>code refactor<commit_after>package zip\n\nimport (\n\tgozip \"archive\/zip\"\n\t. \"jvmgo\/any\"\n\t\"jvmgo\/jvm\/rtda\"\n\trtc \"jvmgo\/jvm\/rtda\/class\"\n\t\"jvmgo\/util\"\n)\n\nconst (\n\tJZENTRY_NAME = 0\n\tJZENTRY_EXTRA = 1\n\tJZENTRY_COMMENT = 2\n)\n\nfunc init() {\n\t_zf(initIDs, \"initIDs\", \"()V\")\n\t_zf(getEntryBytes, \"getEntryBytes\", \"(JI)[B\")\n\t_zf(getEntryFlag, \"getEntryFlag\", \"(J)I\")\n\t_zf(getEntryTime, \"getEntryTime\", \"(J)J\")\n\t_zf(getNextEntry, \"getNextEntry\", \"(JI)J\")\n\t_zf(getTotal, \"getTotal\", \"(J)I\")\n\t_zf(open, \"open\", \"(Ljava\/lang\/String;IJZ)J\")\n\t_zf(startsWithLOC, \"startsWithLOC\", \"(J)Z\")\n}\n\nfunc _zf(method Any, name, desc string) {\n\trtc.RegisterNativeMethod(\"java\/util\/zip\/ZipFile\", name, desc, method)\n}\n\n\/\/ private static native void initIDs();\n\/\/ ()V\nfunc initIDs(frame *rtda.Frame) {\n\t\/\/ todo\n}\n\n\/\/ private static native long open(String name, int mode, long lastModified,\n\/\/ boolean usemmap) throws IOException;\n\/\/ (Ljava\/lang\/String;IJZ)J\nfunc open(frame *rtda.Frame) {\n\tvars := frame.LocalVars()\n\tnameObj := vars.GetRef(0)\n\n\tname := rtda.GoString(nameObj)\n\tjzfile, err := openZip(name)\n\tif err != nil {\n\t\t\/\/ todo\n\t\tpanic(\"IOException\")\n\t}\n\n\tstack := frame.OperandStack()\n\tstack.PushLong(jzfile)\n}\n\n\/\/ private static native boolean startsWithLOC(long jzfile);\n\/\/ (J)Z\nfunc startsWithLOC(frame *rtda.Frame) {\n\t\/\/ todo\n\tstack := frame.OperandStack()\n\tstack.PushBoolean(true)\n}\n\n\/\/ private static native int getTotal(long jzfile);\n\/\/ (J)I\nfunc getTotal(frame *rtda.Frame) {\n\tvars := frame.LocalVars()\n\tjzfile := vars.GetLong(0)\n\n\ttotal := getEntryCount(jzfile)\n\n\tstack := frame.OperandStack()\n\tstack.PushInt(total)\n}\n\n\/\/ private static native long getNextEntry(long jzfile, int i);\n\/\/ (JI)J\nfunc getNextEntry(frame *rtda.Frame) {\n\tvars := frame.LocalVars()\n\tjzfile := vars.GetLong(0)\n\ti := vars.GetInt(2)\n\n\tjzentry := getJzentry(jzfile, i)\n\n\tstack := frame.OperandStack()\n\tstack.PushLong(jzentry)\n}\n\n\/\/ private static native byte[] getEntryBytes(long jzentry, int type);\n\/\/ (JI)[B\nfunc getEntryBytes(frame *rtda.Frame) {\n\tvars := frame.LocalVars()\n\tjzentry := vars.GetLong(0)\n\t_type := vars.GetInt(2)\n\n\tgoBytes := _getEntryBytes(jzentry, _type)\n\tjBytes := util.CastUint8sToInt8s(goBytes)\n\tbyteArr := rtc.NewByteArray(jBytes, frame.ClassLoader())\n\n\tstack := frame.OperandStack()\n\tstack.PushRef(byteArr)\n}\n\nfunc _getEntryBytes(jzentry int64, _type int32) []byte {\n\tentry := getEntry(jzentry)\n\tswitch _type {\n\tcase JZENTRY_NAME:\n\t\treturn []byte(entry.Name)\n\tcase JZENTRY_EXTRA:\n\t\treturn entry.Extra\n\tcase JZENTRY_COMMENT:\n\t\treturn []byte(entry.Comment)\n\t}\n\tutil.Panicf(\"BAD type: %v\", _type)\n\treturn nil\n}\n\n\/\/ private static native int getEntryFlag(long jzentry);\n\/\/ (J)I\nfunc getEntryFlag(frame *rtda.Frame) {\n\tentry := _getEntryPop(frame)\n\tflag := int32(entry.Flags)\n\n\tstack := frame.OperandStack()\n\tstack.PushInt(flag)\n}\n\n\/\/ private static native long getEntryTime(long jzentry);\n\/\/ (J)J\nfunc getEntryTime(frame *rtda.Frame) {\n\tentry := _getEntryPop(frame)\n\tmodDate := entry.ModifiedDate\n\tmodTime := entry.ModifiedTime\n\ttime := int64(modDate)<<16 | int64(modTime)\n\n\tstack := frame.OperandStack()\n\tstack.PushLong(time)\n}\n\nfunc _getEntryPop(frame *rtda.Frame) *gozip.File {\n\tvars := frame.LocalVars()\n\tjzentry := vars.GetLong(0)\n\n\tentry := getEntry(jzentry)\n\treturn entry\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage goroutines\n\nimport (\n\t\"bytes\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc wrappedWaitForIt(wg *sync.WaitGroup, wait chan struct{}, n int64) {\n\tif n == 0 {\n\t\twaitForIt(wg, wait)\n\t} else {\n\t\twrappedWaitForIt(wg, wait, n-1)\n\t}\n}\n\nfunc waitForIt(wg *sync.WaitGroup, wait chan struct{}) {\n\twg.Done()\n\t<-wait\n}\n\nfunc runGoA(wg *sync.WaitGroup, wait chan struct{}) {\n\tgo waitForIt(wg, wait)\n}\n\nfunc runGoB(wg *sync.WaitGroup, wait chan struct{}) {\n\tgo wrappedWaitForIt(wg, wait, 3)\n}\n\nfunc runGoC(wg *sync.WaitGroup, wait chan struct{}) {\n\tgo func() {\n\t\twg.Done()\n\t\t<-wait\n\t}()\n}\n\nfunc TestGet(t *testing.T) {\n\tvar wg sync.WaitGroup\n\twg.Add(3)\n\twait := make(chan struct{})\n\trunGoA(&wg, wait)\n\trunGoB(&wg, wait)\n\trunGoC(&wg, wait)\n\twg.Wait()\n\tgs, err := Get()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tclose(wait)\n\n\tif len(gs) < 4 {\n\t\tt.Errorf(\"Got %d goroutines, expected at least 4\", len(gs))\n\t}\n\tbycreator := map[string]*Goroutine{}\n\tfor _, g := range gs {\n\t\tkey := \"\"\n\t\tif g.Creator != nil {\n\t\t\tkey = g.Creator.Call\n\t\t}\n\t\tbycreator[key] = g\n\t}\n\ta := bycreator[\"v.io\/x\/ref\/test\/goroutines.runGoA\"]\n\tif a == nil {\n\t\tt.Errorf(\"runGoA is missing\")\n\t} else if len(a.Stack) != 1 {\n\t\tt.Errorf(\"got %d expected 1: %#v\", len(a.Stack), a.Stack)\n\t} else if !strings.HasPrefix(a.Stack[0].Call, \"v.io\/x\/ref\/test\/goroutines.waitForIt\") {\n\t\tt.Errorf(\"got %s, wanted it to start with v.io\/x\/ref\/test\/goroutines.waitForIt\",\n\t\t\ta.Stack[0].Call)\n\t}\n\tb := bycreator[\"v.io\/x\/ref\/test\/goroutines.runGoB\"]\n\tif b == nil {\n\t\tt.Errorf(\"runGoB is missing\")\n\t} else if len(b.Stack) != 5 {\n\t\tt.Errorf(\"got %d expected 1: %#v\", len(b.Stack), b.Stack)\n\t}\n\tc := bycreator[\"v.io\/x\/ref\/test\/goroutines.runGoC\"]\n\tif c == nil {\n\t\tt.Errorf(\"runGoC is missing\")\n\t} else if len(c.Stack) != 1 {\n\t\tt.Errorf(\"got %d expected 1: %#v\", len(c.Stack), c.Stack)\n\t} else if !strings.HasPrefix(c.Stack[0].Call, \"v.io\/x\/ref\/test\/goroutines.\") {\n\t\tt.Errorf(\"got %s, wanted it to start with v.io\/x\/ref\/test\/goroutines.\",\n\t\t\tc.Stack[0].Call)\n\t}\n}\n\nfunc TestFormat(t *testing.T) {\n\tvar wg sync.WaitGroup\n\twg.Add(3)\n\twait := make(chan struct{})\n\trunGoA(&wg, wait)\n\trunGoB(&wg, wait)\n\trunGoC(&wg, wait)\n\twg.Wait()\n\n\tbuf := make([]byte, 1<<20)\n\tbuf = buf[:runtime.Stack(buf, true)]\n\tclose(wait)\n\n\tgs, err := Parse(buf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif formatted := Format(gs...); !bytes.Equal(buf, formatted) {\n\t\tt.Errorf(\"got:\\n%s\\nwanted:\\n%s\\n\", string(formatted), string(buf))\n\t}\n}\n\ntype fakeErrorReporter struct {\n\tcalls int\n\textra int\n\tformatted string\n}\n\nfunc (f *fakeErrorReporter) Errorf(format string, args ...interface{}) {\n\tf.calls++\n\tf.extra = args[0].(int)\n\tf.formatted = args[1].(string)\n}\n\nfunc TestNoLeaks(t *testing.T) {\n\ter := &fakeErrorReporter{}\n\tf := NoLeaks(er, 100*time.Millisecond)\n\n\tvar wg sync.WaitGroup\n\twg.Add(3)\n\twait := make(chan struct{})\n\trunGoA(&wg, wait)\n\trunGoB(&wg, wait)\n\trunGoC(&wg, wait)\n\twg.Wait()\n\n\tf()\n\tif er.calls != 1 {\n\t\tt.Errorf(\"got %d, wanted 1: %s\", er.calls, er.formatted)\n\t}\n\tif er.extra != 3 {\n\t\tt.Errorf(\"got %d, wanted 3: %s\", er.extra, er.formatted)\n\t}\n\tclose(wait)\n\n\t*er = fakeErrorReporter{}\n\tf()\n\tif er.calls != 0 {\n\t\tt.Errorf(\"got %d, wanted 0: %s\", er.calls, er.formatted)\n\t}\n\tif er.extra != 0 {\n\t\tt.Errorf(\"got %d, wanted 0: %s\", er.extra, er.formatted)\n\t}\n}\n<commit_msg>TBR: ref\/test\/goroutines: Prevent leaked goroutines from breaking the test.<commit_after>\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage goroutines\n\nimport (\n\t\"bytes\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc wrappedWaitForIt(wg *sync.WaitGroup, wait chan struct{}, n int64) {\n\tif n == 0 {\n\t\twaitForIt(wg, wait)\n\t} else {\n\t\twrappedWaitForIt(wg, wait, n-1)\n\t}\n}\n\nfunc waitForIt(wg *sync.WaitGroup, wait chan struct{}) {\n\twg.Done()\n\t<-wait\n}\n\nfunc runGoA(wg *sync.WaitGroup, wait chan struct{}) {\n\tgo waitForIt(wg, wait)\n}\n\nfunc runGoB(wg *sync.WaitGroup, wait chan struct{}) {\n\tgo wrappedWaitForIt(wg, wait, 3)\n}\n\nfunc runGoC(wg *sync.WaitGroup, wait chan struct{}) {\n\tgo func() {\n\t\twg.Done()\n\t\t<-wait\n\t}()\n}\n\nfunc TestGet(t *testing.T) {\n\tdefer NoLeaks(t, 100*time.Millisecond)()\n\tvar wg sync.WaitGroup\n\twg.Add(3)\n\twait := make(chan struct{})\n\trunGoA(&wg, wait)\n\trunGoB(&wg, wait)\n\trunGoC(&wg, wait)\n\twg.Wait()\n\tgs, err := Get()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tclose(wait)\n\n\tif len(gs) < 4 {\n\t\tt.Errorf(\"Got %d goroutines, expected at least 4\", len(gs))\n\t}\n\tbycreator := map[string]*Goroutine{}\n\tfor _, g := range gs {\n\t\tkey := \"\"\n\t\tif g.Creator != nil {\n\t\t\tkey = g.Creator.Call\n\t\t}\n\t\tbycreator[key] = g\n\t}\n\ta := bycreator[\"v.io\/x\/ref\/test\/goroutines.runGoA\"]\n\tif a == nil {\n\t\tt.Errorf(\"runGoA is missing\")\n\t} else if len(a.Stack) != 1 {\n\t\tt.Errorf(\"got %d expected 1: %#v\", len(a.Stack), a.Stack)\n\t} else if !strings.HasPrefix(a.Stack[0].Call, \"v.io\/x\/ref\/test\/goroutines.waitForIt\") {\n\t\tt.Errorf(\"got %s, wanted it to start with v.io\/x\/ref\/test\/goroutines.waitForIt\",\n\t\t\ta.Stack[0].Call)\n\t}\n\tb := bycreator[\"v.io\/x\/ref\/test\/goroutines.runGoB\"]\n\tif b == nil {\n\t\tt.Errorf(\"runGoB is missing\")\n\t} else if len(b.Stack) != 5 {\n\t\tt.Errorf(\"got %d expected 1: %#v\", len(b.Stack), b.Stack)\n\t}\n\tc := bycreator[\"v.io\/x\/ref\/test\/goroutines.runGoC\"]\n\tif c == nil {\n\t\tt.Errorf(\"runGoC is missing\")\n\t} else if len(c.Stack) != 1 {\n\t\tt.Errorf(\"got %d expected 1: %#v\", len(c.Stack), c.Stack)\n\t} else if !strings.HasPrefix(c.Stack[0].Call, \"v.io\/x\/ref\/test\/goroutines.\") {\n\t\tt.Errorf(\"got %s, wanted it to start with v.io\/x\/ref\/test\/goroutines.\",\n\t\t\tc.Stack[0].Call)\n\t}\n}\n\nfunc TestFormat(t *testing.T) {\n\tdefer NoLeaks(t, 100*time.Millisecond)()\n\n\tvar wg sync.WaitGroup\n\twg.Add(3)\n\twait := make(chan struct{})\n\trunGoA(&wg, wait)\n\trunGoB(&wg, wait)\n\trunGoC(&wg, wait)\n\twg.Wait()\n\n\tbuf := make([]byte, 1<<20)\n\tbuf = buf[:runtime.Stack(buf, true)]\n\tclose(wait)\n\n\tgs, err := Parse(buf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif formatted := Format(gs...); !bytes.Equal(buf, formatted) {\n\t\tt.Errorf(\"got:\\n%s\\nwanted:\\n%s\\n\", string(formatted), string(buf))\n\t}\n}\n\ntype fakeErrorReporter struct {\n\tcalls int\n\textra int\n\tformatted string\n}\n\nfunc (f *fakeErrorReporter) Errorf(format string, args ...interface{}) {\n\tf.calls++\n\tf.extra = args[0].(int)\n\tf.formatted = args[1].(string)\n}\n\nfunc TestNoLeaks(t *testing.T) {\n\ter := &fakeErrorReporter{}\n\tf := NoLeaks(er, 100*time.Millisecond)\n\n\tvar wg sync.WaitGroup\n\twg.Add(3)\n\twait := make(chan struct{})\n\trunGoA(&wg, wait)\n\trunGoB(&wg, wait)\n\trunGoC(&wg, wait)\n\twg.Wait()\n\n\tf()\n\tif er.calls != 1 {\n\t\tt.Errorf(\"got %d, wanted 1: %s\", er.calls, er.formatted)\n\t}\n\tif er.extra != 3 {\n\t\tt.Errorf(\"got %d, wanted 3: %s\", er.extra, er.formatted)\n\t}\n\tclose(wait)\n\n\t*er = fakeErrorReporter{}\n\tf()\n\tif er.calls != 0 {\n\t\tt.Errorf(\"got %d, wanted 0: %s\", er.calls, er.formatted)\n\t}\n\tif er.extra != 0 {\n\t\tt.Errorf(\"got %d, wanted 0: %s\", er.extra, er.formatted)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package rtda\n\nimport rtc \"jvmgo\/jvm\/rtda\/class\"\n\n\/\/ stack frame\ntype Frame struct {\n thread *Thread\n nextPC int\n localVars *LocalVars\n operandStack *OperandStack\n method *rtc.Method\n onPopAction func()\n}\n\nfunc newFrame(thread *Thread, method *rtc.Method) (*Frame) {\n localVars := newLocalVars(method.MaxLocals())\n operandStack := newOperandStack(method.MaxStack())\n return &Frame{thread, 0, localVars, operandStack, method, nil}\n}\n\n\/\/ getters & setters\nfunc (self *Frame) Thread() (*Thread) {\n return self.thread\n}\nfunc (self *Frame) NextPC() (int) {\n return self.nextPC\n}\nfunc (self *Frame) SetNextPC(nextPC int) {\n self.nextPC = nextPC\n}\nfunc (self *Frame) LocalVars() (*LocalVars) {\n return self.localVars\n}\nfunc (self *Frame) OperandStack() (*OperandStack) {\n return self.operandStack\n}\nfunc (self *Frame) Method() (*rtc.Method) {\n return self.method\n}\nfunc (self *Frame) SetOnPopAction(f func()) {\n self.onPopAction = f\n}\n<commit_msg>code refactor<commit_after>package rtda\n\nimport rtc \"jvmgo\/jvm\/rtda\/class\"\n\n\/\/ stack frame\ntype Frame struct {\n lower *Frame \/\/ stack is implemented as linked list\n thread *Thread\n method *rtc.Method\n localVars *LocalVars\n operandStack *OperandStack\n nextPC int\n onPopAction func()\n}\n\nfunc newFrame(thread *Thread, method *rtc.Method) (*Frame) {\n frame := &Frame{}\n frame.thread = thread\n frame.method = method\n frame.localVars = newLocalVars(method.MaxLocals())\n frame.operandStack = newOperandStack(method.MaxStack())\n return frame\n}\n\n\/\/ getters & setters\nfunc (self *Frame) Thread() (*Thread) {\n return self.thread\n}\nfunc (self *Frame) NextPC() (int) {\n return self.nextPC\n}\nfunc (self *Frame) SetNextPC(nextPC int) {\n self.nextPC = nextPC\n}\nfunc (self *Frame) LocalVars() (*LocalVars) {\n return self.localVars\n}\nfunc (self *Frame) OperandStack() (*OperandStack) {\n return self.operandStack\n}\nfunc (self *Frame) Method() (*rtc.Method) {\n return self.method\n}\nfunc (self *Frame) SetOnPopAction(f func()) {\n self.onPopAction = f\n}\n<|endoftext|>"} {"text":"<commit_before>package zclsyntax\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/apparentlymart\/go-textseg\/textseg\"\n\t\"github.com\/zclconf\/go-zcl\/zcl\"\n)\n\n\/\/ Token represents a sequence of bytes from some zcl code that has been\n\/\/ tagged with a type and its range within the source file.\ntype Token struct {\n\tType TokenType\n\tBytes []byte\n\tRange zcl.Range\n}\n\n\/\/ TokenType is an enumeration used for the Type field on Token.\ntype TokenType rune\n\n\/\/go:generate stringer -type TokenType -output token_type_string.go\n\nconst (\n\t\/\/ Single-character tokens are represented by their own character, for\n\t\/\/ convenience in producing these within the scanner. However, the values\n\t\/\/ are otherwise arbitrary and just intended to be mnemonic for humans\n\t\/\/ who might see them in debug output.\n\n\tTokenOBrace TokenType = '{'\n\tTokenCBrace TokenType = '}'\n\tTokenOBrack TokenType = '['\n\tTokenCBrack TokenType = ']'\n\tTokenOParen TokenType = '('\n\tTokenCParen TokenType = ')'\n\tTokenOQuote TokenType = '«'\n\tTokenCQuote TokenType = '»'\n\n\tTokenDot TokenType = '.'\n\tTokenStar TokenType = '*'\n\tTokenSlash TokenType = '\/'\n\tTokenPlus TokenType = '+'\n\tTokenMinus TokenType = '-'\n\n\tTokenEqual TokenType = '='\n\tTokenNotEqual TokenType = '≠'\n\tTokenLessThan TokenType = '<'\n\tTokenLessThanEq TokenType = '≤'\n\tTokenGreaterThan TokenType = '>'\n\tTokenGreaterThanEq TokenType = '≥'\n\n\tTokenAnd TokenType = '∧'\n\tTokenOr TokenType = '∨'\n\tTokenBang TokenType = '!'\n\n\tTokenQuestion TokenType = '?'\n\tTokenColon TokenType = ':'\n\n\tTokenTemplateInterp TokenType = '∫'\n\tTokenTemplateControl TokenType = 'λ'\n\tTokenTemplateSeqEnd TokenType = '∎'\n\n\tTokenStringLit TokenType = 'S'\n\tTokenHeredoc TokenType = 'H'\n\tTokenNumberLit TokenType = 'N'\n\tTokenIdent TokenType = 'I'\n\n\tTokenNewline TokenType = '\\n'\n\tTokenEOF TokenType = '␄'\n\n\t\/\/ The rest are not used in the language but recognized by the scanner so\n\t\/\/ we can generate good diagnostics in the parser when users try to write\n\t\/\/ things that might work in other languages they are familiar with, or\n\t\/\/ simply make incorrect assumptions about the zcl language.\n\n\tTokenBitwiseAnd TokenType = '&'\n\tTokenBitwiseOr TokenType = '|'\n\tTokenBitwiseNot TokenType = '~'\n\tTokenBitwiseXor TokenType = '^'\n\tTokenStarStar TokenType = '➚'\n\tTokenBacktick TokenType = '`'\n\tTokenSemicolon TokenType = ';'\n\tTokenTabs TokenType = '␉'\n\tTokenInvalid TokenType = '�'\n\tTokenBadUTF8 TokenType = '💩'\n)\n\nfunc (t TokenType) GoString() string {\n\treturn fmt.Sprintf(\"zclsyntax.%s\", t.String())\n}\n\ntype tokenAccum struct {\n\tFilename string\n\tBytes []byte\n\tPos zcl.Pos\n\tTokens []Token\n}\n\nfunc (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) {\n\t\/\/ Walk through our buffer to figure out how much we need to adjust\n\t\/\/ the start pos to get our end pos.\n\n\tstart := f.Pos\n\tstart.Column += startOfs - f.Pos.Byte \/\/ Safe because only ASCII spaces can be in the offset\n\tstart.Byte = startOfs\n\n\tend := start\n\tend.Byte = endOfs\n\tb := f.Bytes[startOfs:endOfs]\n\tfor len(b) > 0 {\n\t\tadvance, seq, _ := textseg.ScanGraphemeClusters(b, true)\n\t\tif len(seq) == 1 && seq[0] == '\\n' {\n\t\t\tend.Line++\n\t\t\tend.Column = 1\n\t\t} else {\n\t\t\tend.Column++\n\t\t}\n\t\tb = b[advance:]\n\t}\n\n\tf.Pos = end\n\n\tf.Tokens = append(f.Tokens, Token{\n\t\tType: ty,\n\t\tBytes: f.Bytes[startOfs:endOfs],\n\t\tRange: zcl.Range{\n\t\t\tFilename: f.Filename,\n\t\t\tStart: start,\n\t\t\tEnd: end,\n\t\t},\n\t})\n}\n<commit_msg>zclsyntax: heredoc to be separate start\/end tokens<commit_after>package zclsyntax\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/apparentlymart\/go-textseg\/textseg\"\n\t\"github.com\/zclconf\/go-zcl\/zcl\"\n)\n\n\/\/ Token represents a sequence of bytes from some zcl code that has been\n\/\/ tagged with a type and its range within the source file.\ntype Token struct {\n\tType TokenType\n\tBytes []byte\n\tRange zcl.Range\n}\n\n\/\/ TokenType is an enumeration used for the Type field on Token.\ntype TokenType rune\n\n\/\/go:generate stringer -type TokenType -output token_type_string.go\n\nconst (\n\t\/\/ Single-character tokens are represented by their own character, for\n\t\/\/ convenience in producing these within the scanner. However, the values\n\t\/\/ are otherwise arbitrary and just intended to be mnemonic for humans\n\t\/\/ who might see them in debug output.\n\n\tTokenOBrace TokenType = '{'\n\tTokenCBrace TokenType = '}'\n\tTokenOBrack TokenType = '['\n\tTokenCBrack TokenType = ']'\n\tTokenOParen TokenType = '('\n\tTokenCParen TokenType = ')'\n\tTokenOQuote TokenType = '«'\n\tTokenCQuote TokenType = '»'\n\tTokenOHeredoc TokenType = 'H'\n\tTokenCHeredoc TokenType = 'h'\n\n\tTokenDot TokenType = '.'\n\tTokenStar TokenType = '*'\n\tTokenSlash TokenType = '\/'\n\tTokenPlus TokenType = '+'\n\tTokenMinus TokenType = '-'\n\n\tTokenEqual TokenType = '='\n\tTokenNotEqual TokenType = '≠'\n\tTokenLessThan TokenType = '<'\n\tTokenLessThanEq TokenType = '≤'\n\tTokenGreaterThan TokenType = '>'\n\tTokenGreaterThanEq TokenType = '≥'\n\n\tTokenAnd TokenType = '∧'\n\tTokenOr TokenType = '∨'\n\tTokenBang TokenType = '!'\n\n\tTokenQuestion TokenType = '?'\n\tTokenColon TokenType = ':'\n\n\tTokenTemplateInterp TokenType = '∫'\n\tTokenTemplateControl TokenType = 'λ'\n\tTokenTemplateSeqEnd TokenType = '∎'\n\n\tTokenStringLit TokenType = 'S'\n\tTokenNumberLit TokenType = 'N'\n\tTokenIdent TokenType = 'I'\n\n\tTokenNewline TokenType = '\\n'\n\tTokenEOF TokenType = '␄'\n\n\t\/\/ The rest are not used in the language but recognized by the scanner so\n\t\/\/ we can generate good diagnostics in the parser when users try to write\n\t\/\/ things that might work in other languages they are familiar with, or\n\t\/\/ simply make incorrect assumptions about the zcl language.\n\n\tTokenBitwiseAnd TokenType = '&'\n\tTokenBitwiseOr TokenType = '|'\n\tTokenBitwiseNot TokenType = '~'\n\tTokenBitwiseXor TokenType = '^'\n\tTokenStarStar TokenType = '➚'\n\tTokenBacktick TokenType = '`'\n\tTokenSemicolon TokenType = ';'\n\tTokenTabs TokenType = '␉'\n\tTokenInvalid TokenType = '�'\n\tTokenBadUTF8 TokenType = '💩'\n)\n\nfunc (t TokenType) GoString() string {\n\treturn fmt.Sprintf(\"zclsyntax.%s\", t.String())\n}\n\ntype tokenAccum struct {\n\tFilename string\n\tBytes []byte\n\tPos zcl.Pos\n\tTokens []Token\n}\n\nfunc (f *tokenAccum) emitToken(ty TokenType, startOfs, endOfs int) {\n\t\/\/ Walk through our buffer to figure out how much we need to adjust\n\t\/\/ the start pos to get our end pos.\n\n\tstart := f.Pos\n\tstart.Column += startOfs - f.Pos.Byte \/\/ Safe because only ASCII spaces can be in the offset\n\tstart.Byte = startOfs\n\n\tend := start\n\tend.Byte = endOfs\n\tb := f.Bytes[startOfs:endOfs]\n\tfor len(b) > 0 {\n\t\tadvance, seq, _ := textseg.ScanGraphemeClusters(b, true)\n\t\tif len(seq) == 1 && seq[0] == '\\n' {\n\t\t\tend.Line++\n\t\t\tend.Column = 1\n\t\t} else {\n\t\t\tend.Column++\n\t\t}\n\t\tb = b[advance:]\n\t}\n\n\tf.Pos = end\n\n\tf.Tokens = append(f.Tokens, Token{\n\t\tType: ty,\n\t\tBytes: f.Bytes[startOfs:endOfs],\n\t\tRange: zcl.Range{\n\t\t\tFilename: f.Filename,\n\t\t\tStart: start,\n\t\t\tEnd: end,\n\t\t},\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package resource\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/go-getter\"\n\t\"github.com\/hashicorp\/terraform\/config\/module\"\n\t\"github.com\/hashicorp\/terraform\/helper\/logging\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n)\n\nconst TestEnvVar = \"TF_ACC\"\n\n\/\/ TestCheckFunc is the callback type used with acceptance tests to check\n\/\/ the state of a resource. The state passed in is the latest state known,\n\/\/ or in the case of being after a destroy, it is the last known state when\n\/\/ it was created.\ntype TestCheckFunc func(*terraform.State) error\n\n\/\/ TestCase is a single acceptance test case used to test the apply\/destroy\n\/\/ lifecycle of a resource in a specific configuration.\n\/\/\n\/\/ When the destroy plan is executed, the config from the last TestStep\n\/\/ is used to plan it.\ntype TestCase struct {\n\t\/\/ PreCheck, if non-nil, will be called before any test steps are\n\t\/\/ executed. It will only be executed in the case that the steps\n\t\/\/ would run, so it can be used for some validation before running\n\t\/\/ acceptance tests, such as verifying that keys are setup.\n\tPreCheck func()\n\n\t\/\/ Providers is the ResourceProvider that will be under test.\n\t\/\/\n\t\/\/ Alternately, ProviderFactories can be specified for the providers\n\t\/\/ that are valid. This takes priority over Providers.\n\t\/\/\n\t\/\/ The end effect of each is the same: specifying the providers that\n\t\/\/ are used within the tests.\n\tProviders map[string]terraform.ResourceProvider\n\tProviderFactories map[string]terraform.ResourceProviderFactory\n\n\t\/\/ CheckDestroy is called after the resource is finally destroyed\n\t\/\/ to allow the tester to test that the resource is truly gone.\n\tCheckDestroy TestCheckFunc\n\n\t\/\/ Steps are the apply sequences done within the context of the\n\t\/\/ same state. Each step can have its own check to verify correctness.\n\tSteps []TestStep\n}\n\n\/\/ TestStep is a single apply sequence of a test, done within the\n\/\/ context of a state.\n\/\/\n\/\/ Multiple TestSteps can be sequenced in a Test to allow testing\n\/\/ potentially complex update logic. In general, simply create\/destroy\n\/\/ tests will only need one step.\ntype TestStep struct {\n\t\/\/ PreConfig is called before the Config is applied to perform any per-step\n\t\/\/ setup that needs to happen\n\tPreConfig func()\n\n\t\/\/ Config a string of the configuration to give to Terraform.\n\tConfig string\n\n\t\/\/ Check is called after the Config is applied. Use this step to\n\t\/\/ make your own API calls to check the status of things, and to\n\t\/\/ inspect the format of the ResourceState itself.\n\t\/\/\n\t\/\/ If an error is returned, the test will fail. In this case, a\n\t\/\/ destroy plan will still be attempted.\n\t\/\/\n\t\/\/ If this is nil, no check is done on this step.\n\tCheck TestCheckFunc\n\n\t\/\/ Destroy will create a destroy plan if set to true.\n\tDestroy bool\n}\n\n\/\/ Test performs an acceptance test on a resource.\n\/\/\n\/\/ Tests are not run unless an environmental variable \"TF_ACC\" is\n\/\/ set to some non-empty value. This is to avoid test cases surprising\n\/\/ a user by creating real resources.\n\/\/\n\/\/ Tests will fail unless the verbose flag (`go test -v`, or explicitly\n\/\/ the \"-test.v\" flag) is set. Because some acceptance tests take quite\n\/\/ long, we require the verbose flag so users are able to see progress\n\/\/ output.\nfunc Test(t TestT, c TestCase) {\n\t\/\/ We only run acceptance tests if an env var is set because they're\n\t\/\/ slow and generally require some outside configuration.\n\tif os.Getenv(TestEnvVar) == \"\" {\n\t\tt.Skip(fmt.Sprintf(\n\t\t\t\"Acceptance tests skipped unless env '%s' set\",\n\t\t\tTestEnvVar))\n\t\treturn\n\t}\n\n\tlogWriter, err := logging.LogOutput()\n\tif err != nil {\n\t\tt.Error(fmt.Errorf(\"error setting up logging: %s\", err))\n\t}\n\tlog.SetOutput(logWriter)\n\n\t\/\/ We require verbose mode so that the user knows what is going on.\n\tif !testTesting && !testing.Verbose() {\n\t\tt.Fatal(\"Acceptance tests must be run with the -v flag on tests\")\n\t\treturn\n\t}\n\n\t\/\/ Run the PreCheck if we have it\n\tif c.PreCheck != nil {\n\t\tc.PreCheck()\n\t}\n\n\t\/\/ Build our context options that we can\n\tctxProviders := c.ProviderFactories\n\tif ctxProviders == nil {\n\t\tctxProviders = make(map[string]terraform.ResourceProviderFactory)\n\t\tfor k, p := range c.Providers {\n\t\t\tctxProviders[k] = terraform.ResourceProviderFactoryFixed(p)\n\t\t}\n\t}\n\topts := terraform.ContextOpts{Providers: ctxProviders}\n\n\t\/\/ A single state variable to track the lifecycle, starting with no state\n\tvar state *terraform.State\n\n\t\/\/ Go through each step and run it\n\tfor i, step := range c.Steps {\n\t\tvar err error\n\t\tlog.Printf(\"[WARN] Test: Executing step %d\", i)\n\t\tstate, err = testStep(opts, state, step)\n\t\tif err != nil {\n\t\t\tt.Error(fmt.Sprintf(\n\t\t\t\t\"Step %d error: %s\", i, err))\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ If we have a state, then run the destroy\n\tif state != nil {\n\t\tdestroyStep := TestStep{\n\t\t\tConfig: c.Steps[len(c.Steps)-1].Config,\n\t\t\tCheck: c.CheckDestroy,\n\t\t\tDestroy: true,\n\t\t}\n\n\t\tlog.Printf(\"[WARN] Test: Executing destroy step\")\n\t\tstate, err := testStep(opts, state, destroyStep)\n\t\tif err != nil {\n\t\t\tt.Error(fmt.Sprintf(\n\t\t\t\t\"Error destroying resource! WARNING: Dangling resources\\n\"+\n\t\t\t\t\t\"may exist. The full state and error is shown below.\\n\\n\"+\n\t\t\t\t\t\"Error: %s\\n\\nState: %s\",\n\t\t\t\terr,\n\t\t\t\tstate))\n\t\t}\n\t} else {\n\t\tlog.Printf(\"[WARN] Skipping destroy test since there is no state.\")\n\t}\n}\n\nfunc testStep(\n\topts terraform.ContextOpts,\n\tstate *terraform.State,\n\tstep TestStep) (*terraform.State, error) {\n\tif step.PreConfig != nil {\n\t\tstep.PreConfig()\n\t}\n\n\tcfgPath, err := ioutil.TempDir(\"\", \"tf-test\")\n\tif err != nil {\n\t\treturn state, fmt.Errorf(\n\t\t\t\"Error creating temporary directory for config: %s\", err)\n\t}\n\tdefer os.RemoveAll(cfgPath)\n\n\t\/\/ Write the configuration\n\tcfgF, err := os.Create(filepath.Join(cfgPath, \"main.tf\"))\n\tif err != nil {\n\t\treturn state, fmt.Errorf(\n\t\t\t\"Error creating temporary file for config: %s\", err)\n\t}\n\n\t_, err = io.Copy(cfgF, strings.NewReader(step.Config))\n\tcfgF.Close()\n\tif err != nil {\n\t\treturn state, fmt.Errorf(\n\t\t\t\"Error creating temporary file for config: %s\", err)\n\t}\n\n\t\/\/ Parse the configuration\n\tmod, err := module.NewTreeModule(\"\", cfgPath)\n\tif err != nil {\n\t\treturn state, fmt.Errorf(\n\t\t\t\"Error loading configuration: %s\", err)\n\t}\n\n\t\/\/ Load the modules\n\tmodStorage := &getter.FolderStorage{\n\t\tStorageDir: filepath.Join(cfgPath, \".tfmodules\"),\n\t}\n\terr = mod.Load(modStorage, module.GetModeGet)\n\tif err != nil {\n\t\treturn state, fmt.Errorf(\"Error downloading modules: %s\", err)\n\t}\n\n\t\/\/ Build the context\n\topts.Module = mod\n\topts.State = state\n\topts.Destroy = step.Destroy\n\tctx := terraform.NewContext(&opts)\n\tif ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 {\n\t\tif len(es) > 0 {\n\t\t\testrs := make([]string, len(es))\n\t\t\tfor i, e := range es {\n\t\t\t\testrs[i] = e.Error()\n\t\t\t}\n\t\t\treturn state, fmt.Errorf(\n\t\t\t\t\"Configuration is invalid.\\n\\nWarnings: %#v\\n\\nErrors: %#v\",\n\t\t\t\tws, estrs)\n\t\t}\n\t\tlog.Printf(\"[WARN] Config warnings: %#v\", ws)\n\t}\n\n\t\/\/ Refresh!\n\tstate, err = ctx.Refresh()\n\tif err != nil {\n\t\treturn state, fmt.Errorf(\n\t\t\t\"Error refreshing: %s\", err)\n\t}\n\n\t\/\/ Plan!\n\tif p, err := ctx.Plan(); err != nil {\n\t\treturn state, fmt.Errorf(\n\t\t\t\"Error planning: %s\", err)\n\t} else {\n\t\tlog.Printf(\"[WARN] Test: Step plan: %s\", p)\n\t}\n\n\t\/\/ Apply!\n\tstate, err = ctx.Apply()\n\tif err != nil {\n\t\treturn state, fmt.Errorf(\"Error applying: %s\", err)\n\t}\n\n\t\/\/ Check! Excitement!\n\tif step.Check != nil {\n\t\tif err := step.Check(state); err != nil {\n\t\t\treturn state, fmt.Errorf(\"Check failed: %s\", err)\n\t\t}\n\t}\n\n\t\/\/ Now, verify that Plan is now empty and we don't have a perpetual diff issue\n\t\/\/ We do this with TWO plans. One without a refresh.\n\tif p, err := ctx.Plan(); err != nil {\n\t\treturn state, fmt.Errorf(\"Error on follow-up plan: %s\", err)\n\t} else {\n\t\tif p.Diff != nil && !p.Diff.Empty() {\n\t\t\treturn state, fmt.Errorf(\n\t\t\t\t\"After applying this step, the plan was not empty:\\n\\n%s\", p)\n\t\t}\n\t}\n\n\t\/\/ And another after a Refresh.\n\tstate, err = ctx.Refresh()\n\tif err != nil {\n\t\treturn state, fmt.Errorf(\n\t\t\t\"Error on follow-up refresh: %s\", err)\n\t}\n\tif p, err := ctx.Plan(); err != nil {\n\t\treturn state, fmt.Errorf(\"Error on second follow-up plan: %s\", err)\n\t} else {\n\t\tif p.Diff != nil && !p.Diff.Empty() {\n\t\t\treturn state, fmt.Errorf(\n\t\t\t\t\"After applying this step and refreshing, the plan was not empty:\\n\\n%s\", p)\n\t\t}\n\t}\n\n\t\/\/ Made it here? Good job test step!\n\treturn state, nil\n}\n\n\/\/ ComposeTestCheckFunc lets you compose multiple TestCheckFuncs into\n\/\/ a single TestCheckFunc.\n\/\/\n\/\/ As a user testing their provider, this lets you decompose your checks\n\/\/ into smaller pieces more easily.\nfunc ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\tfor _, f := range fs {\n\t\t\tif err := f(s); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\nfunc TestCheckResourceAttr(name, key, value string) TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\tms := s.RootModule()\n\t\trs, ok := ms.Resources[name]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", name)\n\t\t}\n\n\t\tis := rs.Primary\n\t\tif is == nil {\n\t\t\treturn fmt.Errorf(\"No primary instance: %s\", name)\n\t\t}\n\n\t\tif is.Attributes[key] != value {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"%s: Attribute '%s' expected %#v, got %#v\",\n\t\t\t\tname,\n\t\t\t\tkey,\n\t\t\t\tvalue,\n\t\t\t\tis.Attributes[key])\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\nfunc TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\tms := s.RootModule()\n\t\trs, ok := ms.Resources[name]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", name)\n\t\t}\n\n\t\tis := rs.Primary\n\t\tif is == nil {\n\t\t\treturn fmt.Errorf(\"No primary instance: %s\", name)\n\t\t}\n\n\t\tif !r.MatchString(is.Attributes[key]) {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"%s: Attribute '%s' didn't match %q, got %#v\",\n\t\t\t\tname,\n\t\t\t\tkey,\n\t\t\t\tr.String(),\n\t\t\t\tis.Attributes[key])\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\n\/\/ TestCheckResourceAttrPtr is like TestCheckResourceAttr except the\n\/\/ value is a pointer so that it can be updated while the test is running.\n\/\/ It will only be dereferenced at the point this step is run.\nfunc TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\treturn TestCheckResourceAttr(name, key, *value)(s)\n\t}\n}\n\n\/\/ TestT is the interface used to handle the test lifecycle of a test.\n\/\/\n\/\/ Users should just use a *testing.T object, which implements this.\ntype TestT interface {\n\tError(args ...interface{})\n\tFatal(args ...interface{})\n\tSkip(args ...interface{})\n}\n\n\/\/ This is set to true by unit tests to alter some behavior\nvar testTesting = false\n<commit_msg>testing: Use a copy of pre-destroy state in destroy check<commit_after>package resource\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/hashicorp\/go-getter\"\n\t\"github.com\/hashicorp\/terraform\/config\/module\"\n\t\"github.com\/hashicorp\/terraform\/helper\/logging\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n)\n\nconst TestEnvVar = \"TF_ACC\"\n\n\/\/ TestCheckFunc is the callback type used with acceptance tests to check\n\/\/ the state of a resource. The state passed in is the latest state known,\n\/\/ or in the case of being after a destroy, it is the last known state when\n\/\/ it was created.\ntype TestCheckFunc func(*terraform.State) error\n\n\/\/ TestCase is a single acceptance test case used to test the apply\/destroy\n\/\/ lifecycle of a resource in a specific configuration.\n\/\/\n\/\/ When the destroy plan is executed, the config from the last TestStep\n\/\/ is used to plan it.\ntype TestCase struct {\n\t\/\/ PreCheck, if non-nil, will be called before any test steps are\n\t\/\/ executed. It will only be executed in the case that the steps\n\t\/\/ would run, so it can be used for some validation before running\n\t\/\/ acceptance tests, such as verifying that keys are setup.\n\tPreCheck func()\n\n\t\/\/ Providers is the ResourceProvider that will be under test.\n\t\/\/\n\t\/\/ Alternately, ProviderFactories can be specified for the providers\n\t\/\/ that are valid. This takes priority over Providers.\n\t\/\/\n\t\/\/ The end effect of each is the same: specifying the providers that\n\t\/\/ are used within the tests.\n\tProviders map[string]terraform.ResourceProvider\n\tProviderFactories map[string]terraform.ResourceProviderFactory\n\n\t\/\/ CheckDestroy is called after the resource is finally destroyed\n\t\/\/ to allow the tester to test that the resource is truly gone.\n\tCheckDestroy TestCheckFunc\n\n\t\/\/ Steps are the apply sequences done within the context of the\n\t\/\/ same state. Each step can have its own check to verify correctness.\n\tSteps []TestStep\n}\n\n\/\/ TestStep is a single apply sequence of a test, done within the\n\/\/ context of a state.\n\/\/\n\/\/ Multiple TestSteps can be sequenced in a Test to allow testing\n\/\/ potentially complex update logic. In general, simply create\/destroy\n\/\/ tests will only need one step.\ntype TestStep struct {\n\t\/\/ PreConfig is called before the Config is applied to perform any per-step\n\t\/\/ setup that needs to happen\n\tPreConfig func()\n\n\t\/\/ Config a string of the configuration to give to Terraform.\n\tConfig string\n\n\t\/\/ Check is called after the Config is applied. Use this step to\n\t\/\/ make your own API calls to check the status of things, and to\n\t\/\/ inspect the format of the ResourceState itself.\n\t\/\/\n\t\/\/ If an error is returned, the test will fail. In this case, a\n\t\/\/ destroy plan will still be attempted.\n\t\/\/\n\t\/\/ If this is nil, no check is done on this step.\n\tCheck TestCheckFunc\n\n\t\/\/ Destroy will create a destroy plan if set to true.\n\tDestroy bool\n}\n\n\/\/ Test performs an acceptance test on a resource.\n\/\/\n\/\/ Tests are not run unless an environmental variable \"TF_ACC\" is\n\/\/ set to some non-empty value. This is to avoid test cases surprising\n\/\/ a user by creating real resources.\n\/\/\n\/\/ Tests will fail unless the verbose flag (`go test -v`, or explicitly\n\/\/ the \"-test.v\" flag) is set. Because some acceptance tests take quite\n\/\/ long, we require the verbose flag so users are able to see progress\n\/\/ output.\nfunc Test(t TestT, c TestCase) {\n\t\/\/ We only run acceptance tests if an env var is set because they're\n\t\/\/ slow and generally require some outside configuration.\n\tif os.Getenv(TestEnvVar) == \"\" {\n\t\tt.Skip(fmt.Sprintf(\n\t\t\t\"Acceptance tests skipped unless env '%s' set\",\n\t\t\tTestEnvVar))\n\t\treturn\n\t}\n\n\tlogWriter, err := logging.LogOutput()\n\tif err != nil {\n\t\tt.Error(fmt.Errorf(\"error setting up logging: %s\", err))\n\t}\n\tlog.SetOutput(logWriter)\n\n\t\/\/ We require verbose mode so that the user knows what is going on.\n\tif !testTesting && !testing.Verbose() {\n\t\tt.Fatal(\"Acceptance tests must be run with the -v flag on tests\")\n\t\treturn\n\t}\n\n\t\/\/ Run the PreCheck if we have it\n\tif c.PreCheck != nil {\n\t\tc.PreCheck()\n\t}\n\n\t\/\/ Build our context options that we can\n\tctxProviders := c.ProviderFactories\n\tif ctxProviders == nil {\n\t\tctxProviders = make(map[string]terraform.ResourceProviderFactory)\n\t\tfor k, p := range c.Providers {\n\t\t\tctxProviders[k] = terraform.ResourceProviderFactoryFixed(p)\n\t\t}\n\t}\n\topts := terraform.ContextOpts{Providers: ctxProviders}\n\n\t\/\/ A single state variable to track the lifecycle, starting with no state\n\tvar state *terraform.State\n\n\t\/\/ Go through each step and run it\n\tfor i, step := range c.Steps {\n\t\tvar err error\n\t\tlog.Printf(\"[WARN] Test: Executing step %d\", i)\n\t\tstate, err = testStep(opts, state, step)\n\t\tif err != nil {\n\t\t\tt.Error(fmt.Sprintf(\n\t\t\t\t\"Step %d error: %s\", i, err))\n\t\t\tbreak\n\t\t}\n\t}\n\n\t\/\/ If we have a state, then run the destroy\n\tif state != nil {\n\t\tdestroyStep := TestStep{\n\t\t\tConfig: c.Steps[len(c.Steps)-1].Config,\n\t\t\tCheck: c.CheckDestroy,\n\t\t\tDestroy: true,\n\t\t}\n\n\t\tlog.Printf(\"[WARN] Test: Executing destroy step\")\n\t\tstate, err := testStep(opts, state, destroyStep)\n\t\tif err != nil {\n\t\t\tt.Error(fmt.Sprintf(\n\t\t\t\t\"Error destroying resource! WARNING: Dangling resources\\n\"+\n\t\t\t\t\t\"may exist. The full state and error is shown below.\\n\\n\"+\n\t\t\t\t\t\"Error: %s\\n\\nState: %s\",\n\t\t\t\terr,\n\t\t\t\tstate))\n\t\t}\n\t} else {\n\t\tlog.Printf(\"[WARN] Skipping destroy test since there is no state.\")\n\t}\n}\n\nfunc testStep(\n\topts terraform.ContextOpts,\n\tstate *terraform.State,\n\tstep TestStep) (*terraform.State, error) {\n\tif step.PreConfig != nil {\n\t\tstep.PreConfig()\n\t}\n\n\tcfgPath, err := ioutil.TempDir(\"\", \"tf-test\")\n\tif err != nil {\n\t\treturn state, fmt.Errorf(\n\t\t\t\"Error creating temporary directory for config: %s\", err)\n\t}\n\tdefer os.RemoveAll(cfgPath)\n\n\t\/\/ Write the configuration\n\tcfgF, err := os.Create(filepath.Join(cfgPath, \"main.tf\"))\n\tif err != nil {\n\t\treturn state, fmt.Errorf(\n\t\t\t\"Error creating temporary file for config: %s\", err)\n\t}\n\n\t_, err = io.Copy(cfgF, strings.NewReader(step.Config))\n\tcfgF.Close()\n\tif err != nil {\n\t\treturn state, fmt.Errorf(\n\t\t\t\"Error creating temporary file for config: %s\", err)\n\t}\n\n\t\/\/ Parse the configuration\n\tmod, err := module.NewTreeModule(\"\", cfgPath)\n\tif err != nil {\n\t\treturn state, fmt.Errorf(\n\t\t\t\"Error loading configuration: %s\", err)\n\t}\n\n\t\/\/ Load the modules\n\tmodStorage := &getter.FolderStorage{\n\t\tStorageDir: filepath.Join(cfgPath, \".tfmodules\"),\n\t}\n\terr = mod.Load(modStorage, module.GetModeGet)\n\tif err != nil {\n\t\treturn state, fmt.Errorf(\"Error downloading modules: %s\", err)\n\t}\n\n\t\/\/ Build the context\n\topts.Module = mod\n\topts.State = state\n\topts.Destroy = step.Destroy\n\tctx := terraform.NewContext(&opts)\n\tif ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 {\n\t\tif len(es) > 0 {\n\t\t\testrs := make([]string, len(es))\n\t\t\tfor i, e := range es {\n\t\t\t\testrs[i] = e.Error()\n\t\t\t}\n\t\t\treturn state, fmt.Errorf(\n\t\t\t\t\"Configuration is invalid.\\n\\nWarnings: %#v\\n\\nErrors: %#v\",\n\t\t\t\tws, estrs)\n\t\t}\n\t\tlog.Printf(\"[WARN] Config warnings: %#v\", ws)\n\t}\n\n\t\/\/ Refresh!\n\tstate, err = ctx.Refresh()\n\tif err != nil {\n\t\treturn state, fmt.Errorf(\n\t\t\t\"Error refreshing: %s\", err)\n\t}\n\n\t\/\/ Plan!\n\tif p, err := ctx.Plan(); err != nil {\n\t\treturn state, fmt.Errorf(\n\t\t\t\"Error planning: %s\", err)\n\t} else {\n\t\tlog.Printf(\"[WARN] Test: Step plan: %s\", p)\n\t}\n\n\t\/\/ We need to keep a copy of the state prior to destroying\n\t\/\/ such that destroy steps can verify their behaviour in the check\n\t\/\/ function\n\tstateBeforeApplication := state.DeepCopy()\n\n\t\/\/ Apply!\n\tstate, err = ctx.Apply()\n\tif err != nil {\n\t\treturn state, fmt.Errorf(\"Error applying: %s\", err)\n\t}\n\n\t\/\/ Check! Excitement!\n\tif step.Check != nil {\n\t\tif step.Destroy {\n\t\t\tif err := step.Check(stateBeforeApplication); err != nil {\n\t\t\t\treturn state, fmt.Errorf(\"Check failed: %s\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tif err := step.Check(state); err != nil {\n\t\t\t\treturn state, fmt.Errorf(\"Check failed: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Now, verify that Plan is now empty and we don't have a perpetual diff issue\n\t\/\/ We do this with TWO plans. One without a refresh.\n\tif p, err := ctx.Plan(); err != nil {\n\t\treturn state, fmt.Errorf(\"Error on follow-up plan: %s\", err)\n\t} else {\n\t\tif p.Diff != nil && !p.Diff.Empty() {\n\t\t\treturn state, fmt.Errorf(\n\t\t\t\t\"After applying this step, the plan was not empty:\\n\\n%s\", p)\n\t\t}\n\t}\n\n\t\/\/ And another after a Refresh.\n\tstate, err = ctx.Refresh()\n\tif err != nil {\n\t\treturn state, fmt.Errorf(\n\t\t\t\"Error on follow-up refresh: %s\", err)\n\t}\n\tif p, err := ctx.Plan(); err != nil {\n\t\treturn state, fmt.Errorf(\"Error on second follow-up plan: %s\", err)\n\t} else {\n\t\tif p.Diff != nil && !p.Diff.Empty() {\n\t\t\treturn state, fmt.Errorf(\n\t\t\t\t\"After applying this step and refreshing, the plan was not empty:\\n\\n%s\", p)\n\t\t}\n\t}\n\n\t\/\/ Made it here? Good job test step!\n\treturn state, nil\n}\n\n\/\/ ComposeTestCheckFunc lets you compose multiple TestCheckFuncs into\n\/\/ a single TestCheckFunc.\n\/\/\n\/\/ As a user testing their provider, this lets you decompose your checks\n\/\/ into smaller pieces more easily.\nfunc ComposeTestCheckFunc(fs ...TestCheckFunc) TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\tfor _, f := range fs {\n\t\t\tif err := f(s); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\nfunc TestCheckResourceAttr(name, key, value string) TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\tms := s.RootModule()\n\t\trs, ok := ms.Resources[name]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", name)\n\t\t}\n\n\t\tis := rs.Primary\n\t\tif is == nil {\n\t\t\treturn fmt.Errorf(\"No primary instance: %s\", name)\n\t\t}\n\n\t\tif is.Attributes[key] != value {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"%s: Attribute '%s' expected %#v, got %#v\",\n\t\t\t\tname,\n\t\t\t\tkey,\n\t\t\t\tvalue,\n\t\t\t\tis.Attributes[key])\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\nfunc TestMatchResourceAttr(name, key string, r *regexp.Regexp) TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\tms := s.RootModule()\n\t\trs, ok := ms.Resources[name]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", name)\n\t\t}\n\n\t\tis := rs.Primary\n\t\tif is == nil {\n\t\t\treturn fmt.Errorf(\"No primary instance: %s\", name)\n\t\t}\n\n\t\tif !r.MatchString(is.Attributes[key]) {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"%s: Attribute '%s' didn't match %q, got %#v\",\n\t\t\t\tname,\n\t\t\t\tkey,\n\t\t\t\tr.String(),\n\t\t\t\tis.Attributes[key])\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\n\/\/ TestCheckResourceAttrPtr is like TestCheckResourceAttr except the\n\/\/ value is a pointer so that it can be updated while the test is running.\n\/\/ It will only be dereferenced at the point this step is run.\nfunc TestCheckResourceAttrPtr(name string, key string, value *string) TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\treturn TestCheckResourceAttr(name, key, *value)(s)\n\t}\n}\n\n\/\/ TestT is the interface used to handle the test lifecycle of a test.\n\/\/\n\/\/ Users should just use a *testing.T object, which implements this.\ntype TestT interface {\n\tError(args ...interface{})\n\tFatal(args ...interface{})\n\tSkip(args ...interface{})\n}\n\n\/\/ This is set to true by unit tests to alter some behavior\nvar testTesting = false\n<|endoftext|>"} {"text":"<commit_before>0ddd19ae-2e56-11e5-9284-b827eb9e62be<commit_msg>0de265a8-2e56-11e5-9284-b827eb9e62be<commit_after>0de265a8-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>a6170df8-2e54-11e5-9284-b827eb9e62be<commit_msg>a61c36fc-2e54-11e5-9284-b827eb9e62be<commit_after>a61c36fc-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>0c44400e-2e56-11e5-9284-b827eb9e62be<commit_msg>0c49b372-2e56-11e5-9284-b827eb9e62be<commit_after>0c49b372-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>318ab036-2e57-11e5-9284-b827eb9e62be<commit_msg>318fd2f0-2e57-11e5-9284-b827eb9e62be<commit_after>318fd2f0-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>0d9a37d4-2e55-11e5-9284-b827eb9e62be<commit_msg>0d9f8a36-2e55-11e5-9284-b827eb9e62be<commit_after>0d9f8a36-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>cd1ec6de-2e54-11e5-9284-b827eb9e62be<commit_msg>cd24027a-2e54-11e5-9284-b827eb9e62be<commit_after>cd24027a-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>c7f49a6c-2e54-11e5-9284-b827eb9e62be<commit_msg>c7f9c9a6-2e54-11e5-9284-b827eb9e62be<commit_after>c7f9c9a6-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>c079870a-2e56-11e5-9284-b827eb9e62be<commit_msg>c07ea5f0-2e56-11e5-9284-b827eb9e62be<commit_after>c07ea5f0-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>0b619b3c-2e56-11e5-9284-b827eb9e62be<commit_msg>0b66e8ee-2e56-11e5-9284-b827eb9e62be<commit_after>0b66e8ee-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>1094f136-2e55-11e5-9284-b827eb9e62be<commit_msg>109a7732-2e55-11e5-9284-b827eb9e62be<commit_after>109a7732-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>b7b67e40-2e54-11e5-9284-b827eb9e62be<commit_msg>b7bbb798-2e54-11e5-9284-b827eb9e62be<commit_after>b7bbb798-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>c1e68f18-2e54-11e5-9284-b827eb9e62be<commit_msg>c1ebdce8-2e54-11e5-9284-b827eb9e62be<commit_after>c1ebdce8-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>3400a056-2e56-11e5-9284-b827eb9e62be<commit_msg>34060118-2e56-11e5-9284-b827eb9e62be<commit_after>34060118-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>0f66c580-2e57-11e5-9284-b827eb9e62be<commit_msg>0f6bfdd4-2e57-11e5-9284-b827eb9e62be<commit_after>0f6bfdd4-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>7442069c-2e55-11e5-9284-b827eb9e62be<commit_msg>74472db6-2e55-11e5-9284-b827eb9e62be<commit_after>74472db6-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>0606bd44-2e55-11e5-9284-b827eb9e62be<commit_msg>060c0f42-2e55-11e5-9284-b827eb9e62be<commit_after>060c0f42-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>a65f5464-2e54-11e5-9284-b827eb9e62be<commit_msg>a66465d0-2e54-11e5-9284-b827eb9e62be<commit_after>a66465d0-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>ee0676a6-2e56-11e5-9284-b827eb9e62be<commit_msg>ee0ba446-2e56-11e5-9284-b827eb9e62be<commit_after>ee0ba446-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>1c5f7932-2e55-11e5-9284-b827eb9e62be<commit_msg>1c64cb1c-2e55-11e5-9284-b827eb9e62be<commit_after>1c64cb1c-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>90073668-2e55-11e5-9284-b827eb9e62be<commit_msg>900c5648-2e55-11e5-9284-b827eb9e62be<commit_after>900c5648-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>2b9c7dfe-2e56-11e5-9284-b827eb9e62be<commit_msg>2ba1f8b0-2e56-11e5-9284-b827eb9e62be<commit_after>2ba1f8b0-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>d60c688c-2e54-11e5-9284-b827eb9e62be<commit_msg>d6118c2c-2e54-11e5-9284-b827eb9e62be<commit_after>d6118c2c-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>b0edf40c-2e55-11e5-9284-b827eb9e62be<commit_msg>b0f31978-2e55-11e5-9284-b827eb9e62be<commit_after>b0f31978-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>41cd8fd8-2e55-11e5-9284-b827eb9e62be<commit_msg>41d2bbde-2e55-11e5-9284-b827eb9e62be<commit_after>41d2bbde-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>eb00dbf4-2e56-11e5-9284-b827eb9e62be<commit_msg>eb060d5e-2e56-11e5-9284-b827eb9e62be<commit_after>eb060d5e-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>c1231e78-2e56-11e5-9284-b827eb9e62be<commit_msg>c12838d6-2e56-11e5-9284-b827eb9e62be<commit_after>c12838d6-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>226fd31a-2e57-11e5-9284-b827eb9e62be<commit_msg>227511ea-2e57-11e5-9284-b827eb9e62be<commit_after>227511ea-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>ea610c56-2e55-11e5-9284-b827eb9e62be<commit_msg>ea6631b8-2e55-11e5-9284-b827eb9e62be<commit_after>ea6631b8-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>7ad72be0-2e55-11e5-9284-b827eb9e62be<commit_msg>7adc6380-2e55-11e5-9284-b827eb9e62be<commit_after>7adc6380-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>61ecd836-2e56-11e5-9284-b827eb9e62be<commit_msg>61f1ed4e-2e56-11e5-9284-b827eb9e62be<commit_after>61f1ed4e-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>fce549e6-2e55-11e5-9284-b827eb9e62be<commit_msg>fceabcd2-2e55-11e5-9284-b827eb9e62be<commit_after>fceabcd2-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>25960f5c-2e55-11e5-9284-b827eb9e62be<commit_msg>259b39aa-2e55-11e5-9284-b827eb9e62be<commit_after>259b39aa-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>cf48105e-2e55-11e5-9284-b827eb9e62be<commit_msg>cf4d3458-2e55-11e5-9284-b827eb9e62be<commit_after>cf4d3458-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>479fdd62-2e55-11e5-9284-b827eb9e62be<commit_msg>47a5263c-2e55-11e5-9284-b827eb9e62be<commit_after>47a5263c-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>fc6f6dde-2e55-11e5-9284-b827eb9e62be<commit_msg>fc74fcf4-2e55-11e5-9284-b827eb9e62be<commit_after>fc74fcf4-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>d8ff8096-2e55-11e5-9284-b827eb9e62be<commit_msg>d904a224-2e55-11e5-9284-b827eb9e62be<commit_after>d904a224-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>a854c934-2e54-11e5-9284-b827eb9e62be<commit_msg>a85a007a-2e54-11e5-9284-b827eb9e62be<commit_after>a85a007a-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>f7f0614c-2e54-11e5-9284-b827eb9e62be<commit_msg>f7f59536-2e54-11e5-9284-b827eb9e62be<commit_after>f7f59536-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>371ed11e-2e55-11e5-9284-b827eb9e62be<commit_msg>3724036e-2e55-11e5-9284-b827eb9e62be<commit_after>3724036e-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>a86d8798-2e55-11e5-9284-b827eb9e62be<commit_msg>a872a020-2e55-11e5-9284-b827eb9e62be<commit_after>a872a020-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>f2b9229e-2e55-11e5-9284-b827eb9e62be<commit_msg>f2be6c72-2e55-11e5-9284-b827eb9e62be<commit_after>f2be6c72-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>2fdb03aa-2e55-11e5-9284-b827eb9e62be<commit_msg>2fe03b5e-2e55-11e5-9284-b827eb9e62be<commit_after>2fe03b5e-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>aa9c9e1a-2e54-11e5-9284-b827eb9e62be<commit_msg>aaa1bcc4-2e54-11e5-9284-b827eb9e62be<commit_after>aaa1bcc4-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>91bfdb6c-2e56-11e5-9284-b827eb9e62be<commit_msg>91c4f8a4-2e56-11e5-9284-b827eb9e62be<commit_after>91c4f8a4-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>2447ef20-2e56-11e5-9284-b827eb9e62be<commit_msg>244d2e04-2e56-11e5-9284-b827eb9e62be<commit_after>244d2e04-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>f6afd560-2e54-11e5-9284-b827eb9e62be<commit_msg>f6b50814-2e54-11e5-9284-b827eb9e62be<commit_after>f6b50814-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017-present The Hugo Authors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package releaser implements a set of utilities and a wrapper around Goreleaser\n\/\/ to help automate the Hugo release process.\npackage releaser\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n)\n\nconst (\n\tissueLinkTemplate = \"[#%d](https:\/\/github.com\/gohugoio\/hugo\/issues\/%d)\"\n\tlinkTemplate = \"[%s](%s)\"\n\treleaseNotesMarkdownTemplate = `\n{{- $patchRelease := isPatch . -}}\n{{- $contribsPerAuthor := .All.ContribCountPerAuthor -}}\n{{- $docsContribsPerAuthor := .Docs.ContribCountPerAuthor -}}\n{{- if $patchRelease }}\n{{ if eq (len .All) 1 }}\nThis is a bug-fix release with one important fix.\n{{ else }}\nThis is a bug-fix release with a couple of important fixes.\n{{ end }}\n{{ else }}\nThis release represents **{{ len .All }} contributions by {{ len $contribsPerAuthor }} contributors** to the main Hugo code base.\n{{ end -}}\n\n{{- if gt (len $contribsPerAuthor) 3 -}}\n{{- $u1 := index $contribsPerAuthor 0 -}}\n{{- $u2 := index $contribsPerAuthor 1 -}}\n{{- $u3 := index $contribsPerAuthor 2 -}}\n{{- $u4 := index $contribsPerAuthor 3 -}}\n{{- $u1.AuthorLink }} leads the Hugo development with a significant amount of contributions, but also a big shoutout to {{ $u2.AuthorLink }}, {{ $u3.AuthorLink }}, and {{ $u4.AuthorLink }} for their ongoing contributions.\nAnd a big thanks to [@digitalcraftsman](https:\/\/github.com\/digitalcraftsman) and [@onedrawingperday](https:\/\/github.com\/onedrawingperday) for their relentless work on keeping the themes site in pristine condition and to [@kaushalmodi](https:\/\/github.com\/kaushalmodi) for his great work on the documentation site.\n{{ end }}\n{{- if not $patchRelease }}\nMany have also been busy writing and fixing the documentation in [hugoDocs](https:\/\/github.com\/gohugoio\/hugoDocs), \nwhich has received **{{ len .Docs }} contributions by {{ len $docsContribsPerAuthor }} contributors**.\n{{- if gt (len $docsContribsPerAuthor) 3 -}}\n{{- $u1 := index $docsContribsPerAuthor 0 -}}\n{{- $u2 := index $docsContribsPerAuthor 1 -}}\n{{- $u3 := index $docsContribsPerAuthor 2 -}}\n{{- $u4 := index $docsContribsPerAuthor 3 }} A special thanks to {{ $u1.AuthorLink }}, {{ $u2.AuthorLink }}, {{ $u3.AuthorLink }}, and {{ $u4.AuthorLink }} for their work on the documentation site.\n{{ end }}\n{{ end }}\nHugo now has:\n\n{{ with .Repo -}}\n* {{ .Stars }}+ [stars](https:\/\/github.com\/gohugoio\/hugo\/stargazers)\n* {{ len .Contributors }}+ [contributors](https:\/\/github.com\/gohugoio\/hugo\/graphs\/contributors)\n{{- end -}}\n{{ with .ThemeCount }}\n* {{ . }}+ [themes](http:\/\/themes.gohugo.io\/)\n{{ end }}\n{{ with .Notes }}\n## Notes\n{{ template \"change-section\" . }}\n{{- end -}}\n## Enhancements\n{{ template \"change-headers\" .Enhancements -}}\n## Fixes\n{{ template \"change-headers\" .Fixes -}}\n\n{{ define \"change-headers\" }}\n{{ $tmplChanges := index . \"templateChanges\" -}}\n{{- $outChanges := index . \"outChanges\" -}}\n{{- $coreChanges := index . \"coreChanges\" -}}\n{{- $otherChanges := index . \"otherChanges\" -}}\n{{- with $tmplChanges -}}\n### Templates\n{{ template \"change-section\" . }}\n{{- end -}}\n{{- with $outChanges -}}\n### Output\n{{ template \"change-section\" . }}\n{{- end -}}\n{{- with $coreChanges -}}\n### Core\n{{ template \"change-section\" . }}\n{{- end -}}\n{{- with $otherChanges -}}\n### Other\n{{ template \"change-section\" . }}\n{{- end -}}\n{{ end }}\n\n\n{{ define \"change-section\" }}\n{{ range . }}\n{{- if .GitHubCommit -}}\n* {{ .Subject }} {{ . | commitURL }} {{ . | authorURL }} {{ range .Issues }}{{ . | issue }}{{ end }}\n{{ else -}}\n* {{ .Subject }} {{ range .Issues }}{{ . | issue }}{{ end }}\n{{ end -}}\n{{- end }}\n{{ end }}\n`\n)\n\nvar templateFuncs = template.FuncMap{\n\t\"isPatch\": func(c changeLog) bool {\n\t\treturn !strings.HasSuffix(c.Version, \"0\")\n\t},\n\t\"issue\": func(id int) string {\n\t\treturn fmt.Sprintf(issueLinkTemplate, id, id)\n\t},\n\t\"commitURL\": func(info gitInfo) string {\n\t\tif info.GitHubCommit.HTMLURL == \"\" {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn fmt.Sprintf(linkTemplate, info.Hash, info.GitHubCommit.HTMLURL)\n\t},\n\t\"authorURL\": func(info gitInfo) string {\n\t\tif info.GitHubCommit.Author.Login == \"\" {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn fmt.Sprintf(linkTemplate, \"@\"+info.GitHubCommit.Author.Login, info.GitHubCommit.Author.HTMLURL)\n\t},\n}\n\nfunc writeReleaseNotes(version string, infosMain, infosDocs gitInfos, to io.Writer) error {\n\tclient := newGitHubAPI(\"hugo\")\n\tchanges := gitInfosToChangeLog(infosMain, infosDocs)\n\tchanges.Version = version\n\trepo, err := client.fetchRepo()\n\tif err == nil {\n\t\tchanges.Repo = &repo\n\t}\n\tthemeCount, err := fetchThemeCount()\n\tif err == nil {\n\t\tchanges.ThemeCount = themeCount\n\t}\n\n\ttmpl, err := template.New(\"\").Funcs(templateFuncs).Parse(releaseNotesMarkdownTemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = tmpl.Execute(to, changes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n\n}\n\nfunc fetchThemeCount() (int, error) {\n\tresp, err := http.Get(\"https:\/\/raw.githubusercontent.com\/gohugoio\/hugoThemes\/master\/.gitmodules\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer resp.Body.Close()\n\n\tb, _ := ioutil.ReadAll(resp.Body)\n\treturn bytes.Count(b, []byte(\"submodule\")), nil\n}\n\nfunc writeReleaseNotesToTmpFile(version string, infosMain, infosDocs gitInfos) (string, error) {\n\tf, err := ioutil.TempFile(\"\", \"hugorelease\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer f.Close()\n\n\tif err := writeReleaseNotes(version, infosMain, infosDocs, f); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn f.Name(), nil\n}\n\nfunc getReleaseNotesDocsTempDirAndName(version string, final bool) (string, string) {\n\tif final {\n\t\treturn hugoFilepath(\"temp\"), fmt.Sprintf(\"%s-relnotes-ready.md\", version)\n\t}\n\treturn hugoFilepath(\"temp\"), fmt.Sprintf(\"%s-relnotes.md\", version)\n}\n\nfunc getReleaseNotesDocsTempFilename(version string, final bool) string {\n\treturn filepath.Join(getReleaseNotesDocsTempDirAndName(version, final))\n}\n\nfunc (r *ReleaseHandler) releaseNotesState(version string) (releaseNotesState, error) {\n\tdocsTempPath, name := getReleaseNotesDocsTempDirAndName(version, false)\n\t_, err := os.Stat(filepath.Join(docsTempPath, name))\n\n\tif err == nil {\n\t\treturn releaseNotesCreated, nil\n\t}\n\n\tdocsTempPath, name = getReleaseNotesDocsTempDirAndName(version, true)\n\t_, err = os.Stat(filepath.Join(docsTempPath, name))\n\n\tif err == nil {\n\t\treturn releaseNotesReady, nil\n\t}\n\n\tif !os.IsNotExist(err) {\n\t\treturn releaseNotesNone, err\n\t}\n\n\treturn releaseNotesNone, nil\n\n}\n\nfunc (r *ReleaseHandler) writeReleaseNotesToTemp(version string, infosMain, infosDocs gitInfos) (string, error) {\n\n\tdocsTempPath, name := getReleaseNotesDocsTempDirAndName(version, false)\n\n\tvar (\n\t\tw io.WriteCloser\n\t)\n\n\tif !r.try {\n\t\tos.Mkdir(docsTempPath, os.ModePerm)\n\n\t\tf, err := os.Create(filepath.Join(docsTempPath, name))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tname = f.Name()\n\n\t\tdefer f.Close()\n\n\t\tw = f\n\n\t} else {\n\t\tw = os.Stdout\n\t}\n\n\tif err := writeReleaseNotes(version, infosMain, infosDocs, w); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn name, nil\n\n}\n\nfunc (r *ReleaseHandler) writeReleaseNotesToDocs(title, sourceFilename string) (string, error) {\n\ttargetFilename := \"index.md\"\n\tbundleDir := strings.TrimSuffix(filepath.Base(sourceFilename), \"-ready.md\")\n\tcontentDir := hugoFilepath(\"docs\/content\/en\/news\/\" + bundleDir)\n\ttargetFullFilename := filepath.Join(contentDir, targetFilename)\n\n\tif r.try {\n\t\tfmt.Printf(\"Write release notes to \/docs: Bundle %q Dir: %q\\n\", bundleDir, contentDir)\n\t\treturn targetFullFilename, nil\n\t}\n\n\tif err := os.MkdirAll(contentDir, os.ModePerm); err != nil {\n\t\treturn \"\", nil\n\t}\n\n\tb, err := ioutil.ReadFile(sourceFilename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tf, err := os.Create(targetFullFilename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer f.Close()\n\n\tfmTail := \"\"\n\tif strings.Count(title, \".\") > 1 {\n\t\t\/\/ Bug fix release\n\t\tfmTail = `\nimages:\n- images\/blog\/hugo-bug-poster.png\n`\n\t}\n\n\tif _, err := f.WriteString(fmt.Sprintf(`\n---\ndate: %s\ntitle: %q\ndescription: %q\ncategories: [\"Releases\"]%s\n---\n\n\t`, time.Now().Format(\"2006-01-02\"), title, title, fmTail)); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif _, err := f.Write(b); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn targetFullFilename, nil\n\n}\n<commit_msg>releaser: Adjust patch vs images logic<commit_after>\/\/ Copyright 2017-present The Hugo Authors. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Package releaser implements a set of utilities and a wrapper around Goreleaser\n\/\/ to help automate the Hugo release process.\npackage releaser\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n)\n\nconst (\n\tissueLinkTemplate = \"[#%d](https:\/\/github.com\/gohugoio\/hugo\/issues\/%d)\"\n\tlinkTemplate = \"[%s](%s)\"\n\treleaseNotesMarkdownTemplate = `\n{{- $patchRelease := isPatch . -}}\n{{- $contribsPerAuthor := .All.ContribCountPerAuthor -}}\n{{- $docsContribsPerAuthor := .Docs.ContribCountPerAuthor -}}\n{{- if $patchRelease }}\n{{ if eq (len .All) 1 }}\nThis is a bug-fix release with one important fix.\n{{ else }}\nThis is a bug-fix release with a couple of important fixes.\n{{ end }}\n{{ else }}\nThis release represents **{{ len .All }} contributions by {{ len $contribsPerAuthor }} contributors** to the main Hugo code base.\n{{ end -}}\n\n{{- if gt (len $contribsPerAuthor) 3 -}}\n{{- $u1 := index $contribsPerAuthor 0 -}}\n{{- $u2 := index $contribsPerAuthor 1 -}}\n{{- $u3 := index $contribsPerAuthor 2 -}}\n{{- $u4 := index $contribsPerAuthor 3 -}}\n{{- $u1.AuthorLink }} leads the Hugo development with a significant amount of contributions, but also a big shoutout to {{ $u2.AuthorLink }}, {{ $u3.AuthorLink }}, and {{ $u4.AuthorLink }} for their ongoing contributions.\nAnd a big thanks to [@digitalcraftsman](https:\/\/github.com\/digitalcraftsman) and [@onedrawingperday](https:\/\/github.com\/onedrawingperday) for their relentless work on keeping the themes site in pristine condition and to [@kaushalmodi](https:\/\/github.com\/kaushalmodi) for his great work on the documentation site.\n{{ end }}\n{{- if not $patchRelease }}\nMany have also been busy writing and fixing the documentation in [hugoDocs](https:\/\/github.com\/gohugoio\/hugoDocs), \nwhich has received **{{ len .Docs }} contributions by {{ len $docsContribsPerAuthor }} contributors**.\n{{- if gt (len $docsContribsPerAuthor) 3 -}}\n{{- $u1 := index $docsContribsPerAuthor 0 -}}\n{{- $u2 := index $docsContribsPerAuthor 1 -}}\n{{- $u3 := index $docsContribsPerAuthor 2 -}}\n{{- $u4 := index $docsContribsPerAuthor 3 }} A special thanks to {{ $u1.AuthorLink }}, {{ $u2.AuthorLink }}, {{ $u3.AuthorLink }}, and {{ $u4.AuthorLink }} for their work on the documentation site.\n{{ end }}\n{{ end }}\nHugo now has:\n\n{{ with .Repo -}}\n* {{ .Stars }}+ [stars](https:\/\/github.com\/gohugoio\/hugo\/stargazers)\n* {{ len .Contributors }}+ [contributors](https:\/\/github.com\/gohugoio\/hugo\/graphs\/contributors)\n{{- end -}}\n{{ with .ThemeCount }}\n* {{ . }}+ [themes](http:\/\/themes.gohugo.io\/)\n{{ end }}\n{{ with .Notes }}\n## Notes\n{{ template \"change-section\" . }}\n{{- end -}}\n## Enhancements\n{{ template \"change-headers\" .Enhancements -}}\n## Fixes\n{{ template \"change-headers\" .Fixes -}}\n\n{{ define \"change-headers\" }}\n{{ $tmplChanges := index . \"templateChanges\" -}}\n{{- $outChanges := index . \"outChanges\" -}}\n{{- $coreChanges := index . \"coreChanges\" -}}\n{{- $otherChanges := index . \"otherChanges\" -}}\n{{- with $tmplChanges -}}\n### Templates\n{{ template \"change-section\" . }}\n{{- end -}}\n{{- with $outChanges -}}\n### Output\n{{ template \"change-section\" . }}\n{{- end -}}\n{{- with $coreChanges -}}\n### Core\n{{ template \"change-section\" . }}\n{{- end -}}\n{{- with $otherChanges -}}\n### Other\n{{ template \"change-section\" . }}\n{{- end -}}\n{{ end }}\n\n\n{{ define \"change-section\" }}\n{{ range . }}\n{{- if .GitHubCommit -}}\n* {{ .Subject }} {{ . | commitURL }} {{ . | authorURL }} {{ range .Issues }}{{ . | issue }}{{ end }}\n{{ else -}}\n* {{ .Subject }} {{ range .Issues }}{{ . | issue }}{{ end }}\n{{ end -}}\n{{- end }}\n{{ end }}\n`\n)\n\nvar templateFuncs = template.FuncMap{\n\t\"isPatch\": func(c changeLog) bool {\n\t\treturn !strings.HasSuffix(c.Version, \"0\")\n\t},\n\t\"issue\": func(id int) string {\n\t\treturn fmt.Sprintf(issueLinkTemplate, id, id)\n\t},\n\t\"commitURL\": func(info gitInfo) string {\n\t\tif info.GitHubCommit.HTMLURL == \"\" {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn fmt.Sprintf(linkTemplate, info.Hash, info.GitHubCommit.HTMLURL)\n\t},\n\t\"authorURL\": func(info gitInfo) string {\n\t\tif info.GitHubCommit.Author.Login == \"\" {\n\t\t\treturn \"\"\n\t\t}\n\t\treturn fmt.Sprintf(linkTemplate, \"@\"+info.GitHubCommit.Author.Login, info.GitHubCommit.Author.HTMLURL)\n\t},\n}\n\nfunc writeReleaseNotes(version string, infosMain, infosDocs gitInfos, to io.Writer) error {\n\tclient := newGitHubAPI(\"hugo\")\n\tchanges := gitInfosToChangeLog(infosMain, infosDocs)\n\tchanges.Version = version\n\trepo, err := client.fetchRepo()\n\tif err == nil {\n\t\tchanges.Repo = &repo\n\t}\n\tthemeCount, err := fetchThemeCount()\n\tif err == nil {\n\t\tchanges.ThemeCount = themeCount\n\t}\n\n\ttmpl, err := template.New(\"\").Funcs(templateFuncs).Parse(releaseNotesMarkdownTemplate)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = tmpl.Execute(to, changes)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n\n}\n\nfunc fetchThemeCount() (int, error) {\n\tresp, err := http.Get(\"https:\/\/raw.githubusercontent.com\/gohugoio\/hugoThemes\/master\/.gitmodules\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer resp.Body.Close()\n\n\tb, _ := ioutil.ReadAll(resp.Body)\n\treturn bytes.Count(b, []byte(\"submodule\")), nil\n}\n\nfunc writeReleaseNotesToTmpFile(version string, infosMain, infosDocs gitInfos) (string, error) {\n\tf, err := ioutil.TempFile(\"\", \"hugorelease\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer f.Close()\n\n\tif err := writeReleaseNotes(version, infosMain, infosDocs, f); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn f.Name(), nil\n}\n\nfunc getReleaseNotesDocsTempDirAndName(version string, final bool) (string, string) {\n\tif final {\n\t\treturn hugoFilepath(\"temp\"), fmt.Sprintf(\"%s-relnotes-ready.md\", version)\n\t}\n\treturn hugoFilepath(\"temp\"), fmt.Sprintf(\"%s-relnotes.md\", version)\n}\n\nfunc getReleaseNotesDocsTempFilename(version string, final bool) string {\n\treturn filepath.Join(getReleaseNotesDocsTempDirAndName(version, final))\n}\n\nfunc (r *ReleaseHandler) releaseNotesState(version string) (releaseNotesState, error) {\n\tdocsTempPath, name := getReleaseNotesDocsTempDirAndName(version, false)\n\t_, err := os.Stat(filepath.Join(docsTempPath, name))\n\n\tif err == nil {\n\t\treturn releaseNotesCreated, nil\n\t}\n\n\tdocsTempPath, name = getReleaseNotesDocsTempDirAndName(version, true)\n\t_, err = os.Stat(filepath.Join(docsTempPath, name))\n\n\tif err == nil {\n\t\treturn releaseNotesReady, nil\n\t}\n\n\tif !os.IsNotExist(err) {\n\t\treturn releaseNotesNone, err\n\t}\n\n\treturn releaseNotesNone, nil\n\n}\n\nfunc (r *ReleaseHandler) writeReleaseNotesToTemp(version string, infosMain, infosDocs gitInfos) (string, error) {\n\n\tdocsTempPath, name := getReleaseNotesDocsTempDirAndName(version, false)\n\n\tvar (\n\t\tw io.WriteCloser\n\t)\n\n\tif !r.try {\n\t\tos.Mkdir(docsTempPath, os.ModePerm)\n\n\t\tf, err := os.Create(filepath.Join(docsTempPath, name))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tname = f.Name()\n\n\t\tdefer f.Close()\n\n\t\tw = f\n\n\t} else {\n\t\tw = os.Stdout\n\t}\n\n\tif err := writeReleaseNotes(version, infosMain, infosDocs, w); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn name, nil\n\n}\n\nfunc (r *ReleaseHandler) writeReleaseNotesToDocs(title, sourceFilename string) (string, error) {\n\ttargetFilename := \"index.md\"\n\tbundleDir := strings.TrimSuffix(filepath.Base(sourceFilename), \"-ready.md\")\n\tcontentDir := hugoFilepath(\"docs\/content\/en\/news\/\" + bundleDir)\n\ttargetFullFilename := filepath.Join(contentDir, targetFilename)\n\n\tif r.try {\n\t\tfmt.Printf(\"Write release notes to \/docs: Bundle %q Dir: %q\\n\", bundleDir, contentDir)\n\t\treturn targetFullFilename, nil\n\t}\n\n\tif err := os.MkdirAll(contentDir, os.ModePerm); err != nil {\n\t\treturn \"\", nil\n\t}\n\n\tb, err := ioutil.ReadFile(sourceFilename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tf, err := os.Create(targetFullFilename)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer f.Close()\n\n\tfmTail := \"\"\n\tif !strings.HasSuffix(title, \".0\") {\n\t\t\/\/ Bug fix release\n\t\tfmTail = `\nimages:\n- images\/blog\/hugo-bug-poster.png\n`\n\t}\n\n\tif _, err := f.WriteString(fmt.Sprintf(`\n---\ndate: %s\ntitle: %q\ndescription: %q\ncategories: [\"Releases\"]%s\n---\n\n\t`, time.Now().Format(\"2006-01-02\"), title, title, fmTail)); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif _, err := f.Write(b); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn targetFullFilename, nil\n\n}\n<|endoftext|>"} {"text":"<commit_before>3b58fa70-2e55-11e5-9284-b827eb9e62be<commit_msg>3b5e2e78-2e55-11e5-9284-b827eb9e62be<commit_after>3b5e2e78-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>073b52cc-2e57-11e5-9284-b827eb9e62be<commit_msg>0740bbcc-2e57-11e5-9284-b827eb9e62be<commit_after>0740bbcc-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>e607c914-2e56-11e5-9284-b827eb9e62be<commit_msg>e60cdd5a-2e56-11e5-9284-b827eb9e62be<commit_after>e60cdd5a-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>a019743e-2e56-11e5-9284-b827eb9e62be<commit_msg>a01e9964-2e56-11e5-9284-b827eb9e62be<commit_after>a01e9964-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>7ac27060-2e55-11e5-9284-b827eb9e62be<commit_msg>7ac79e64-2e55-11e5-9284-b827eb9e62be<commit_after>7ac79e64-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>cad70350-2e54-11e5-9284-b827eb9e62be<commit_msg>cadc305a-2e54-11e5-9284-b827eb9e62be<commit_after>cadc305a-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>f37c2e0a-2e56-11e5-9284-b827eb9e62be<commit_msg>f38147dc-2e56-11e5-9284-b827eb9e62be<commit_after>f38147dc-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>e0224e34-2e56-11e5-9284-b827eb9e62be<commit_msg>e0277c24-2e56-11e5-9284-b827eb9e62be<commit_after>e0277c24-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>be430ed6-2e54-11e5-9284-b827eb9e62be<commit_msg>be485e40-2e54-11e5-9284-b827eb9e62be<commit_after>be485e40-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>6aec91ec-2e56-11e5-9284-b827eb9e62be<commit_msg>6af1dee0-2e56-11e5-9284-b827eb9e62be<commit_after>6af1dee0-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>853932da-2e56-11e5-9284-b827eb9e62be<commit_msg>853e4748-2e56-11e5-9284-b827eb9e62be<commit_after>853e4748-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>b02f4ace-2e56-11e5-9284-b827eb9e62be<commit_msg>b0346536-2e56-11e5-9284-b827eb9e62be<commit_after>b0346536-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>8ea31f8a-2e55-11e5-9284-b827eb9e62be<commit_msg>8ea83880-2e55-11e5-9284-b827eb9e62be<commit_after>8ea83880-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>f327f88c-2e54-11e5-9284-b827eb9e62be<commit_msg>f32d2c62-2e54-11e5-9284-b827eb9e62be<commit_after>f32d2c62-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>d090d2ca-2e55-11e5-9284-b827eb9e62be<commit_msg>d0960560-2e55-11e5-9284-b827eb9e62be<commit_after>d0960560-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>7ee090f4-2e56-11e5-9284-b827eb9e62be<commit_msg>7ee5c4b6-2e56-11e5-9284-b827eb9e62be<commit_after>7ee5c4b6-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>f7bb2bca-2e55-11e5-9284-b827eb9e62be<commit_msg>f7c0684c-2e55-11e5-9284-b827eb9e62be<commit_after>f7c0684c-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>1c811e74-2e56-11e5-9284-b827eb9e62be<commit_msg>1c9fad4e-2e56-11e5-9284-b827eb9e62be<commit_after>1c9fad4e-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>93de6190-2e54-11e5-9284-b827eb9e62be<commit_msg>93e37c34-2e54-11e5-9284-b827eb9e62be<commit_after>93e37c34-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>be235302-2e54-11e5-9284-b827eb9e62be<commit_msg>be288ade-2e54-11e5-9284-b827eb9e62be<commit_after>be288ade-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>618a31be-2e55-11e5-9284-b827eb9e62be<commit_msg>618f4442-2e55-11e5-9284-b827eb9e62be<commit_after>618f4442-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>0b815be2-2e57-11e5-9284-b827eb9e62be<commit_msg>0b869968-2e57-11e5-9284-b827eb9e62be<commit_after>0b869968-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>1f5f75ce-2e55-11e5-9284-b827eb9e62be<commit_msg>1f64bf7a-2e55-11e5-9284-b827eb9e62be<commit_after>1f64bf7a-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>3fa94788-2e55-11e5-9284-b827eb9e62be<commit_msg>3fae6e16-2e55-11e5-9284-b827eb9e62be<commit_after>3fae6e16-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>f916a248-2e54-11e5-9284-b827eb9e62be<commit_msg>f91c001c-2e54-11e5-9284-b827eb9e62be<commit_after>f91c001c-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>9f2b0912-2e55-11e5-9284-b827eb9e62be<commit_msg>9f303d92-2e55-11e5-9284-b827eb9e62be<commit_after>9f303d92-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>daea644e-2e54-11e5-9284-b827eb9e62be<commit_msg>daef9b12-2e54-11e5-9284-b827eb9e62be<commit_after>daef9b12-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>c85787bc-2e54-11e5-9284-b827eb9e62be<commit_msg>c85cbbec-2e54-11e5-9284-b827eb9e62be<commit_after>c85cbbec-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>2ef1d166-2e56-11e5-9284-b827eb9e62be<commit_msg>2ef7041a-2e56-11e5-9284-b827eb9e62be<commit_after>2ef7041a-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>7cb532e4-2e56-11e5-9284-b827eb9e62be<commit_msg>7cba617e-2e56-11e5-9284-b827eb9e62be<commit_after>7cba617e-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>1d917b8c-2e57-11e5-9284-b827eb9e62be<commit_msg>1d96aa3a-2e57-11e5-9284-b827eb9e62be<commit_after>1d96aa3a-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>efcd5fba-2e54-11e5-9284-b827eb9e62be<commit_msg>efd2952a-2e54-11e5-9284-b827eb9e62be<commit_after>efd2952a-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>f346ec82-2e55-11e5-9284-b827eb9e62be<commit_msg>f34c2670-2e55-11e5-9284-b827eb9e62be<commit_after>f34c2670-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>5894e3a0-2e56-11e5-9284-b827eb9e62be<commit_msg>589a0588-2e56-11e5-9284-b827eb9e62be<commit_after>589a0588-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>fead904a-2e54-11e5-9284-b827eb9e62be<commit_msg>feb2c394-2e54-11e5-9284-b827eb9e62be<commit_after>feb2c394-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>6be72cb0-2e56-11e5-9284-b827eb9e62be<commit_msg>6bec77ba-2e56-11e5-9284-b827eb9e62be<commit_after>6bec77ba-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>d70b4cce-2e56-11e5-9284-b827eb9e62be<commit_msg>d7106c9a-2e56-11e5-9284-b827eb9e62be<commit_after>d7106c9a-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>2e260784-2e56-11e5-9284-b827eb9e62be<commit_msg>2e2b4faa-2e56-11e5-9284-b827eb9e62be<commit_after>2e2b4faa-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>d460ce5a-2e55-11e5-9284-b827eb9e62be<commit_msg>d465e5f2-2e55-11e5-9284-b827eb9e62be<commit_after>d465e5f2-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>f410b8ec-2e54-11e5-9284-b827eb9e62be<commit_msg>f42cca82-2e54-11e5-9284-b827eb9e62be<commit_after>f42cca82-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>e591ec4e-2e56-11e5-9284-b827eb9e62be<commit_msg>e5970904-2e56-11e5-9284-b827eb9e62be<commit_after>e5970904-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>1720d590-2e57-11e5-9284-b827eb9e62be<commit_msg>17264962-2e57-11e5-9284-b827eb9e62be<commit_after>17264962-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>045bf666-2e56-11e5-9284-b827eb9e62be<commit_msg>04612c26-2e56-11e5-9284-b827eb9e62be<commit_after>04612c26-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>bb33e90c-2e56-11e5-9284-b827eb9e62be<commit_msg>bb392354-2e56-11e5-9284-b827eb9e62be<commit_after>bb392354-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>db3d7ebc-2e55-11e5-9284-b827eb9e62be<commit_msg>db429ac8-2e55-11e5-9284-b827eb9e62be<commit_after>db429ac8-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>6e772624-2e56-11e5-9284-b827eb9e62be<commit_msg>6e7c415e-2e56-11e5-9284-b827eb9e62be<commit_after>6e7c415e-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>b9d2fdd8-2e55-11e5-9284-b827eb9e62be<commit_msg>b9d81f7a-2e55-11e5-9284-b827eb9e62be<commit_after>b9d81f7a-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>f94476ae-2e55-11e5-9284-b827eb9e62be<commit_msg>f949b484-2e55-11e5-9284-b827eb9e62be<commit_after>f949b484-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>8e65d0da-2e55-11e5-9284-b827eb9e62be<commit_msg>8e6ae85e-2e55-11e5-9284-b827eb9e62be<commit_after>8e6ae85e-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>875c0574-2e56-11e5-9284-b827eb9e62be<commit_msg>87612856-2e56-11e5-9284-b827eb9e62be<commit_after>87612856-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>23f47350-2e55-11e5-9284-b827eb9e62be<commit_msg>23f9c7ba-2e55-11e5-9284-b827eb9e62be<commit_after>23f9c7ba-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>c4ed4008-2e54-11e5-9284-b827eb9e62be<commit_msg>c4f2c00a-2e54-11e5-9284-b827eb9e62be<commit_after>c4f2c00a-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>36a7409e-2e56-11e5-9284-b827eb9e62be<commit_msg>36acd20c-2e56-11e5-9284-b827eb9e62be<commit_after>36acd20c-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>9eb81b60-2e54-11e5-9284-b827eb9e62be<commit_msg>9ebd4054-2e54-11e5-9284-b827eb9e62be<commit_after>9ebd4054-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>f2684a80-2e56-11e5-9284-b827eb9e62be<commit_msg>f26d613c-2e56-11e5-9284-b827eb9e62be<commit_after>f26d613c-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>399b0d4e-2e56-11e5-9284-b827eb9e62be<commit_msg>39a04340-2e56-11e5-9284-b827eb9e62be<commit_after>39a04340-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>eb3540d8-2e56-11e5-9284-b827eb9e62be<commit_msg>eb3aa244-2e56-11e5-9284-b827eb9e62be<commit_after>eb3aa244-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>aec8b4a0-2e55-11e5-9284-b827eb9e62be<commit_msg>aecdd2be-2e55-11e5-9284-b827eb9e62be<commit_after>aecdd2be-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>8e898bec-2e55-11e5-9284-b827eb9e62be<commit_msg>8e8ea5a0-2e55-11e5-9284-b827eb9e62be<commit_after>8e8ea5a0-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>5914e154-2e56-11e5-9284-b827eb9e62be<commit_msg>5919fd92-2e56-11e5-9284-b827eb9e62be<commit_after>5919fd92-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>53796440-2e56-11e5-9284-b827eb9e62be<commit_msg>537ea8a6-2e56-11e5-9284-b827eb9e62be<commit_after>537ea8a6-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>0fe1fc5c-2e55-11e5-9284-b827eb9e62be<commit_msg>0fe72fb0-2e55-11e5-9284-b827eb9e62be<commit_after>0fe72fb0-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>db61413a-2e55-11e5-9284-b827eb9e62be<commit_msg>db6656a2-2e55-11e5-9284-b827eb9e62be<commit_after>db6656a2-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>5787a128-2e56-11e5-9284-b827eb9e62be<commit_msg>578cb988-2e56-11e5-9284-b827eb9e62be<commit_after>578cb988-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>09908b82-2e57-11e5-9284-b827eb9e62be<commit_msg>0995a680-2e57-11e5-9284-b827eb9e62be<commit_after>0995a680-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>ba10bbf6-2e54-11e5-9284-b827eb9e62be<commit_msg>ba15e928-2e54-11e5-9284-b827eb9e62be<commit_after>ba15e928-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>43d2697a-2e55-11e5-9284-b827eb9e62be<commit_msg>43d7b1a0-2e55-11e5-9284-b827eb9e62be<commit_after>43d7b1a0-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>ff6af6b6-2e55-11e5-9284-b827eb9e62be<commit_msg>ff7032fc-2e55-11e5-9284-b827eb9e62be<commit_after>ff7032fc-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>cd8d0706-2e55-11e5-9284-b827eb9e62be<commit_msg>cd922d62-2e55-11e5-9284-b827eb9e62be<commit_after>cd922d62-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>a55b2f10-2e55-11e5-9284-b827eb9e62be<commit_msg>a560540e-2e55-11e5-9284-b827eb9e62be<commit_after>a560540e-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>0a712c7e-2e56-11e5-9284-b827eb9e62be<commit_msg>0a7681a6-2e56-11e5-9284-b827eb9e62be<commit_after>0a7681a6-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>aab35456-2e55-11e5-9284-b827eb9e62be<commit_msg>aab87242-2e55-11e5-9284-b827eb9e62be<commit_after>aab87242-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>b1120c02-2e55-11e5-9284-b827eb9e62be<commit_msg>b11725e8-2e55-11e5-9284-b827eb9e62be<commit_after>b11725e8-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>1b051402-2e55-11e5-9284-b827eb9e62be<commit_msg>1b0aadf4-2e55-11e5-9284-b827eb9e62be<commit_after>1b0aadf4-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>528dae92-2e56-11e5-9284-b827eb9e62be<commit_msg>5292e0ce-2e56-11e5-9284-b827eb9e62be<commit_after>5292e0ce-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>a214a9b2-2e55-11e5-9284-b827eb9e62be<commit_msg>a219d75c-2e55-11e5-9284-b827eb9e62be<commit_after>a219d75c-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>99f79a8c-2e55-11e5-9284-b827eb9e62be<commit_msg>99fcb31e-2e55-11e5-9284-b827eb9e62be<commit_after>99fcb31e-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>ce132a94-2e54-11e5-9284-b827eb9e62be<commit_msg>ce18474a-2e54-11e5-9284-b827eb9e62be<commit_after>ce18474a-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>d5425050-2e55-11e5-9284-b827eb9e62be<commit_msg>d547703a-2e55-11e5-9284-b827eb9e62be<commit_after>d547703a-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>e712110c-2e56-11e5-9284-b827eb9e62be<commit_msg>e7172be2-2e56-11e5-9284-b827eb9e62be<commit_after>e7172be2-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>bda1ecc6-2e55-11e5-9284-b827eb9e62be<commit_msg>bda70990-2e55-11e5-9284-b827eb9e62be<commit_after>bda70990-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>1cd04450-2e55-11e5-9284-b827eb9e62be<commit_msg>1cd59680-2e55-11e5-9284-b827eb9e62be<commit_after>1cd59680-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>f24f8438-2e55-11e5-9284-b827eb9e62be<commit_msg>f255d770-2e55-11e5-9284-b827eb9e62be<commit_after>f255d770-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>eaf5fe92-2e55-11e5-9284-b827eb9e62be<commit_msg>eafb241c-2e55-11e5-9284-b827eb9e62be<commit_after>eafb241c-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>7a0aedb8-2e56-11e5-9284-b827eb9e62be<commit_msg>7a100e42-2e56-11e5-9284-b827eb9e62be<commit_after>7a100e42-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>b2d8f5d8-2e54-11e5-9284-b827eb9e62be<commit_msg>b2de1072-2e54-11e5-9284-b827eb9e62be<commit_after>b2de1072-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>d7fe4254-2e55-11e5-9284-b827eb9e62be<commit_msg>d8037abc-2e55-11e5-9284-b827eb9e62be<commit_after>d8037abc-2e55-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>e59a9a94-2e54-11e5-9284-b827eb9e62be<commit_msg>e59fae08-2e54-11e5-9284-b827eb9e62be<commit_after>e59fae08-2e54-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>f1e31482-2e56-11e5-9284-b827eb9e62be<commit_msg>f1e83110-2e56-11e5-9284-b827eb9e62be<commit_after>f1e83110-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>565da61c-2e56-11e5-9284-b827eb9e62be<commit_msg>5662f0c2-2e56-11e5-9284-b827eb9e62be<commit_after>5662f0c2-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>2850a606-2e57-11e5-9284-b827eb9e62be<commit_msg>2855c0aa-2e57-11e5-9284-b827eb9e62be<commit_after>2855c0aa-2e57-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>cd1cd87c-2e56-11e5-9284-b827eb9e62be<commit_msg>cd21ffdc-2e56-11e5-9284-b827eb9e62be<commit_after>cd21ffdc-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>81df7f86-2e56-11e5-9284-b827eb9e62be<commit_msg>81e4ae7a-2e56-11e5-9284-b827eb9e62be<commit_after>81e4ae7a-2e56-11e5-9284-b827eb9e62be<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Chihaya Authors. All rights reserved.\n\/\/ Use of this source code is governed by the BSD 2-Clause license,\n\/\/ which can be found in the LICENSE file.\n\npackage bytepool\n\nimport \"sync\"\n\n\/\/ BytePool is a cached pool of reusable byte slices.\ntype BytePool struct {\n\tsync.Pool\n}\n\n\/\/ New allocates a new BytePool with slices of the provided capacity.\nfunc New(length, capacity int) *BytePool {\n\tvar bp BytePool\n\tbp.Pool.New = func() interface{} {\n\t\treturn make([]byte, length, capacity)\n\t}\n\treturn &bp\n}\n\n\/\/ Get returns a byte slice from the pool.\nfunc (bp *BytePool) Get() []byte {\n\treturn bp.Pool.Get().([]byte)\n}\n\n\/\/ Put returns a byte slice to the pool.\nfunc (bp *BytePool) Put(b []byte) {\n\t\/\/ Zero out the bytes.\n\tfor i := 0; i < cap(b); i++ {\n\t\tb[i] = 0x0\n\t}\n\tbp.Pool.Put(b)\n}\n<commit_msg>fix bytepool out of range panic<commit_after>\/\/ Copyright 2016 The Chihaya Authors. All rights reserved.\n\/\/ Use of this source code is governed by the BSD 2-Clause license,\n\/\/ which can be found in the LICENSE file.\n\npackage bytepool\n\nimport \"sync\"\n\n\/\/ BytePool is a cached pool of reusable byte slices.\ntype BytePool struct {\n\tsync.Pool\n}\n\n\/\/ New allocates a new BytePool with slices of the provided capacity.\nfunc New(length, capacity int) *BytePool {\n\tvar bp BytePool\n\tbp.Pool.New = func() interface{} {\n\t\treturn make([]byte, length, capacity)\n\t}\n\treturn &bp\n}\n\n\/\/ Get returns a byte slice from the pool.\nfunc (bp *BytePool) Get() []byte {\n\treturn bp.Pool.Get().([]byte)\n}\n\n\/\/ Put returns a byte slice to the pool.\nfunc (bp *BytePool) Put(b []byte) {\n\tb = b[:cap(b)]\n\t\/\/ Zero out the bytes.\n\t\/\/ Apparently this specific expression is optimized by the compiler, see\n\t\/\/ github.com\/golang\/go\/issues\/5373.\n\tfor i := range b {\n\t\tb[i] = 0\n\t}\n\tbp.Pool.Put(b)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 gf Author(https:\/\/github.com\/gogf\/gf). All Rights Reserved.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the MIT License.\n\/\/ If a copy of the MIT was not distributed with this file,\n\/\/ You can obtain one at https:\/\/github.com\/gogf\/gf.\n\npackage gbinary_test\n\nimport (\n\t\"github.com\/gogf\/gf\/g\/encoding\/gbinary\"\n\t\"github.com\/gogf\/gf\/g\/test\/gtest\"\n\t\"math\"\n\t\"testing\"\n)\n\nvar testData = map[string]interface{}{\n\t\/\/\"nil\": nil,\n\t\"int\": int(123),\n\t\"int8\": int8(-99),\n\t\"int8.max\": math.MaxInt8,\n\t\"int16\": int16(123),\n\t\"int16.max\": math.MaxInt16,\n\t\"int32\": int32(-199),\n\t\"int32.max\": math.MaxInt32,\n\t\"int64\": int64(123),\n\t\"int64.max\": math.MaxInt64,\n\t\"uint\": uint(123),\n\t\"uint8\": uint8(123),\n\t\"uint8.max\": math.MaxUint8,\n\t\"uint16\": uint16(9999),\n\t\"uint16.max\": math.MaxUint16,\n\t\"uint32\": uint32(123),\n\t\"uint32.max\": math.MaxUint32,\n\t\"uint64\": uint64(123),\n\t\"uint64.max\": math.MaxUint32 + 1,\n\t\"bool.true\": true,\n\t\"bool.false\": false,\n\t\"string\": \"hehe haha\",\n\t\"byte\": []byte(\"hehe haha\"),\n\t\"float32\": float32(123.456),\n\t\"float32.max\": math.MaxFloat32,\n\t\"float64\": float64(123.456),\n\t\"float64.max\": math.MaxFloat64,\n}\n\nfunc TestEncodeAndDecode(t *testing.T) {\n\tfor k, v := range testData {\n\t\tve := gbinary.Encode(v)\n\t\tve1 := gbinary.EncodeByLength(len(ve), v)\n\n\t\t\/\/t.Logf(\"%s:%v, encoded:%v\\n\", k, v, ve)\n\t\tswitch v.(type) {\n\t\tcase int:\n\t\t\tgtest.Assert(gbinary.DecodeToInt(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToInt(ve1), v)\n\t\tcase int8:\n\t\t\tgtest.Assert(gbinary.DecodeToInt8(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToInt8(ve1), v)\n\t\tcase int16:\n\t\t\tgtest.Assert(gbinary.DecodeToInt16(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToInt16(ve1), v)\n\t\tcase int32:\n\t\t\tgtest.Assert(gbinary.DecodeToInt32(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToInt32(ve1), v)\n\t\tcase int64:\n\t\t\tgtest.Assert(gbinary.DecodeToInt64(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToInt64(ve1), v)\n\t\tcase uint:\n\t\t\tgtest.Assert(gbinary.DecodeToUint(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToUint(ve1), v)\n\t\tcase uint8:\n\t\t\tgtest.Assert(gbinary.DecodeToUint8(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToUint8(ve1), v)\n\t\tcase uint16:\n\t\t\tgtest.Assert(gbinary.DecodeToUint16(ve1), v)\n\t\t\tgtest.Assert(gbinary.DecodeToUint16(ve), v)\n\t\tcase uint32:\n\t\t\tgtest.Assert(gbinary.DecodeToUint32(ve1), v)\n\t\t\tgtest.Assert(gbinary.DecodeToUint32(ve), v)\n\t\tcase uint64:\n\t\t\tgtest.Assert(gbinary.DecodeToUint64(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToUint64(ve1), v)\n\t\tcase bool:\n\t\t\tgtest.Assert(gbinary.DecodeToBool(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToBool(ve1), v)\n\t\tcase string:\n\t\t\tgtest.Assert(gbinary.DecodeToString(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToString(ve1), v)\n\t\tcase float32:\n\t\t\tgtest.Assert(gbinary.DecodeToFloat32(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToFloat32(ve1), v)\n\t\tcase float64:\n\t\t\tgtest.Assert(gbinary.DecodeToFloat64(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToFloat64(ve1), v)\n\t\tdefault:\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tres := make([]byte, len(ve))\n\t\t\terr := gbinary.Decode(ve, res)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"test data: %s, %v, error:%v\", k, v, err)\n\t\t\t}\n\t\t\tgtest.Assert(res, v)\n\t\t}\n\t}\n}\n\ntype User struct {\n\tName string\n\tAge int\n\tUrl string\n}\n\nfunc TestEncodeStruct(t *testing.T) {\n\tuser := User{\"wenzi1\", 999, \"www.baidu.com\"}\n\tve := gbinary.Encode(user)\n\ts := gbinary.DecodeToString(ve)\n\tgtest.Assert(string(s), s)\n}\n\nvar testBitData = []int{0, 99, 122, 129, 222, 999, 22322}\n\nfunc TestBits(t *testing.T) {\n\tfor i := range testBitData {\n\t\tbits := make([]gbinary.Bit, 0)\n\t\tres := gbinary.EncodeBits(bits, testBitData[i], 64)\n\n\t\tgtest.Assert(gbinary.DecodeBits(res), testBitData[i])\n\t\tgtest.Assert(gbinary.DecodeBitsToUint(res), uint(testBitData[i]))\n\n\t\tgtest.Assert(gbinary.DecodeBytesToBits(gbinary.EncodeBitsToBytes(res)), res)\n\t}\n\n}\n<commit_msg>add unit test<commit_after>\/\/ Copyright 2017 gf Author(https:\/\/github.com\/gogf\/gf). All Rights Reserved.\n\/\/\n\/\/ This Source Code Form is subject to the terms of the MIT License.\n\/\/ If a copy of the MIT was not distributed with this file,\n\/\/ You can obtain one at https:\/\/github.com\/gogf\/gf.\n\npackage gbinary_test\n\nimport (\n\t\"github.com\/gogf\/gf\/g\/encoding\/gbinary\"\n\t\"github.com\/gogf\/gf\/g\/test\/gtest\"\n\t\"math\"\n\t\"testing\"\n)\n\nvar testData = map[string]interface{}{\n\t\/\/\"nil\": nil,\n\t\"int\": int(123),\n\t\"int8\": int8(-99),\n\t\"int8.max\": math.MaxInt8,\n\t\"int16\": int16(123),\n\t\"int16.max\": math.MaxInt16,\n\t\"int32\": int32(-199),\n\t\"int32.max\": math.MaxInt32,\n\t\"int64\": int64(123),\n\t\"uint\": uint(123),\n\t\"uint8\": uint8(123),\n\t\"uint8.max\": math.MaxUint8,\n\t\"uint16\": uint16(9999),\n\t\"uint16.max\": math.MaxUint16,\n\t\"uint32\": uint32(123),\n\t\"uint64\": uint64(123),\n\t\"bool.true\": true,\n\t\"bool.false\": false,\n\t\"string\": \"hehe haha\",\n\t\"byte\": []byte(\"hehe haha\"),\n\t\"float32\": float32(123.456),\n\t\"float32.max\": math.MaxFloat32,\n\t\"float64\": float64(123.456),\n}\n\nfunc TestEncodeAndDecode(t *testing.T) {\n\tfor k, v := range testData {\n\t\tve := gbinary.Encode(v)\n\t\tve1 := gbinary.EncodeByLength(len(ve), v)\n\n\t\t\/\/t.Logf(\"%s:%v, encoded:%v\\n\", k, v, ve)\n\t\tswitch v.(type) {\n\t\tcase int:\n\t\t\tgtest.Assert(gbinary.DecodeToInt(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToInt(ve1), v)\n\t\tcase int8:\n\t\t\tgtest.Assert(gbinary.DecodeToInt8(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToInt8(ve1), v)\n\t\tcase int16:\n\t\t\tgtest.Assert(gbinary.DecodeToInt16(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToInt16(ve1), v)\n\t\tcase int32:\n\t\t\tgtest.Assert(gbinary.DecodeToInt32(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToInt32(ve1), v)\n\t\tcase int64:\n\t\t\tgtest.Assert(gbinary.DecodeToInt64(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToInt64(ve1), v)\n\t\tcase uint:\n\t\t\tgtest.Assert(gbinary.DecodeToUint(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToUint(ve1), v)\n\t\tcase uint8:\n\t\t\tgtest.Assert(gbinary.DecodeToUint8(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToUint8(ve1), v)\n\t\tcase uint16:\n\t\t\tgtest.Assert(gbinary.DecodeToUint16(ve1), v)\n\t\t\tgtest.Assert(gbinary.DecodeToUint16(ve), v)\n\t\tcase uint32:\n\t\t\tgtest.Assert(gbinary.DecodeToUint32(ve1), v)\n\t\t\tgtest.Assert(gbinary.DecodeToUint32(ve), v)\n\t\tcase uint64:\n\t\t\tgtest.Assert(gbinary.DecodeToUint64(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToUint64(ve1), v)\n\t\tcase bool:\n\t\t\tgtest.Assert(gbinary.DecodeToBool(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToBool(ve1), v)\n\t\tcase string:\n\t\t\tgtest.Assert(gbinary.DecodeToString(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToString(ve1), v)\n\t\tcase float32:\n\t\t\tgtest.Assert(gbinary.DecodeToFloat32(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToFloat32(ve1), v)\n\t\tcase float64:\n\t\t\tgtest.Assert(gbinary.DecodeToFloat64(ve), v)\n\t\t\tgtest.Assert(gbinary.DecodeToFloat64(ve1), v)\n\t\tdefault:\n\t\t\tif v == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tres := make([]byte, len(ve))\n\t\t\terr := gbinary.Decode(ve, res)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"test data: %s, %v, error:%v\", k, v, err)\n\t\t\t}\n\t\t\tgtest.Assert(res, v)\n\t\t}\n\t}\n}\n\ntype User struct {\n\tName string\n\tAge int\n\tUrl string\n}\n\nfunc TestEncodeStruct(t *testing.T) {\n\tuser := User{\"wenzi1\", 999, \"www.baidu.com\"}\n\tve := gbinary.Encode(user)\n\ts := gbinary.DecodeToString(ve)\n\tgtest.Assert(string(s), s)\n}\n\nvar testBitData = []int{0, 99, 122, 129, 222, 999, 22322}\n\nfunc TestBits(t *testing.T) {\n\tfor i := range testBitData {\n\t\tbits := make([]gbinary.Bit, 0)\n\t\tres := gbinary.EncodeBits(bits, testBitData[i], 64)\n\n\t\tgtest.Assert(gbinary.DecodeBits(res), testBitData[i])\n\t\tgtest.Assert(gbinary.DecodeBitsToUint(res), uint(testBitData[i]))\n\n\t\tgtest.Assert(gbinary.DecodeBytesToBits(gbinary.EncodeBitsToBytes(res)), res)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package db\n\nimport (\n\t\"io\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/gtfierro\/hod\/config\"\n\tturtle \"github.com\/gtfierro\/hod\/goraptor\"\n\t\"github.com\/gtfierro\/hod\/query\"\n)\n\nfunc TestDBQuery(t *testing.T) {\n\tcfg, err := config.ReadConfig(\"testhodconfig.yaml\")\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tcfg.DBPath = \"test_databases\/testdb\"\n\tdb, err := NewDB(cfg)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tfor _, test := range []struct {\n\t\tquery string\n\t\tresults []ResultMap\n\t}{\n\t\t{\n\t\t\t\"SELECT ?x WHERE { ?x rdf:type brick:Room . };\",\n\t\t\t[]ResultMap{{\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#room_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?x WHERE { ?ahu rdf:type brick:AHU . ?ahu bf:feeds ?x .};\",\n\t\t\t[]ResultMap{{\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#vav_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?x WHERE { ?ahu rdf:type brick:AHU . ?ahu bf:feeds+ ?x .};\",\n\t\t\t[]ResultMap{{\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#hvaczone_1\")}, {\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#vav_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?x WHERE { ?ahu rdf:type brick:AHU . ?x bf:isFedBy+ ?ahu .};\",\n\t\t\t[]ResultMap{{\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#hvaczone_1\")}, {\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#vav_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?x WHERE { ?ahu rdf:type brick:AHU . ?ahu bf:feeds\/bf:feeds ?x .};\",\n\t\t\t[]ResultMap{{\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#hvaczone_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?x WHERE { ?ahu rdf:type brick:AHU . ?ahu bf:feeds\/bf:feeds+ ?x .};\",\n\t\t\t[]ResultMap{{\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#hvaczone_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?x WHERE { ?ahu rdf:type brick:AHU . ?ahu bf:feeds\/bf:feeds? ?x .};\",\n\t\t\t[]ResultMap{{\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#hvaczone_1\")}, {\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#vav_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?x WHERE { ?ahu rdf:type brick:AHU . ?x bf:isFedBy\/bf:isFedBy? ?ahu .};\",\n\t\t\t[]ResultMap{{\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#hvaczone_1\")}, {\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#vav_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?x WHERE { ?ahu rdf:type brick:AHU . ?ahu bf:feeds* ?x .};\",\n\t\t\t[]ResultMap{{\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#hvaczone_1\")}, {\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#vav_1\")}, {\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#vav_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?x WHERE { ?ahu rdf:type brick:AHU . ?x bf:isFedBy* ?ahu .};\",\n\t\t\t[]ResultMap{{\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#hvaczone_1\")}, {\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#vav_1\")}, {\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#vav_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?vav ?room WHERE { ?vav rdf:type brick:VAV . ?room rdf:type brick:Room . ?zone rdf:type brick:HVAC_Zone . ?vav bf:feeds+ ?zone . ?room bf:isPartOf ?zone . }; \",\n\t\t\t[]ResultMap{{\"?room\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#room_1\"), \"?vav\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#vav_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?sensor WHERE { ?sensor rdf:type\/rdfs:subClassOf* brick:Zone_Temperature_Sensor . };\",\n\t\t\t[]ResultMap{{\"?sensor\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#ztemp_1\")}},\n\t\t},\n\t} {\n\t\tq, e := query.Parse(strings.NewReader(test.query))\n\t\tif e != nil {\n\t\t\tt.Error(test.query, e)\n\t\t\tcontinue\n\t\t}\n\t\tresult := db.RunQuery(q)\n\t\tif !compareResultMapList(test.results, result.Rows) {\n\t\t\tt.Errorf(\"Results for %s had\\n %+v\\nexpected\\n %+v\", test.query, result.Rows, test.results)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc BenchmarkQueryPerformance1(b *testing.B) {\n\tcfg, err := config.ReadConfig(\"testhodconfig.yaml\")\n\tif err != nil {\n\t\tb.Error(err)\n\t\treturn\n\t}\n\tcfg.DBPath = \"test_databases\/berkeleytestdb\"\n\tdb, err := NewDB(cfg)\n\tif err != nil {\n\t\tb.Error(err)\n\t\treturn\n\t}\n\tbenchmarks := []struct {\n\t\tname string\n\t\tquery io.Reader\n\t}{\n\t\t{\"SimpleSubjectVarTriple\", strings.NewReader(\"SELECT ?x WHERE { ?x rdf:type brick:Room . };\")},\n\t\t{\"LongerQuery1\", strings.NewReader(\"SELECT ?vav ?room WHERE { ?vav rdf:type brick:VAV . ?room rdf:type brick:Room . ?zone rdf:type brick:HVAC_Zone . ?vav bf:feeds+ ?zone . ?room bf:isPartOf ?zone . }; \")},\n\t}\n\n\tfor _, bm := range benchmarks {\n\t\tb.Run(bm.name, func(b *testing.B) {\n\t\t\tb.ReportAllocs()\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tq, _ := query.Parse(bm.query)\n\t\t\t\tdb.RunQuery(q)\n\t\t\t}\n\t\t})\n\t}\n}\n<commit_msg>correct the queries<commit_after>package db\n\nimport (\n\t\"io\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/gtfierro\/hod\/config\"\n\tturtle \"github.com\/gtfierro\/hod\/goraptor\"\n\t\"github.com\/gtfierro\/hod\/query\"\n)\n\nfunc TestDBQuery(t *testing.T) {\n\tcfg, err := config.ReadConfig(\"testhodconfig.yaml\")\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tcfg.DBPath = \"test_databases\/testdb\"\n\tdb, err := NewDB(cfg)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\tfor _, test := range []struct {\n\t\tquery string\n\t\tresults []ResultMap\n\t}{\n\t\t{\n\t\t\t\"SELECT ?x WHERE { ?x rdf:type brick:Room . };\",\n\t\t\t[]ResultMap{{\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#room_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?x WHERE { ?ahu rdf:type brick:AHU . ?ahu bf:feeds ?x .};\",\n\t\t\t[]ResultMap{{\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#vav_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?x WHERE { ?ahu rdf:type brick:AHU . ?ahu bf:feeds+ ?x .};\",\n\t\t\t[]ResultMap{{\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#hvaczone_1\")}, {\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#vav_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?x WHERE { ?ahu rdf:type brick:AHU . ?x bf:isFedBy+ ?ahu .};\",\n\t\t\t[]ResultMap{{\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#hvaczone_1\")}, {\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#vav_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?x WHERE { ?ahu rdf:type brick:AHU . ?ahu bf:feeds\/bf:feeds ?x .};\",\n\t\t\t[]ResultMap{{\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#hvaczone_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?x WHERE { ?ahu rdf:type brick:AHU . ?ahu bf:feeds\/bf:feeds+ ?x .};\",\n\t\t\t[]ResultMap{{\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#hvaczone_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?x WHERE { ?ahu rdf:type brick:AHU . ?ahu bf:feeds\/bf:feeds? ?x .};\",\n\t\t\t[]ResultMap{{\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#hvaczone_1\")}, {\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#vav_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?x WHERE { ?ahu rdf:type brick:AHU . ?x bf:isFedBy\/bf:isFedBy? ?ahu .};\",\n\t\t\t[]ResultMap{{\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#hvaczone_1\")}, {\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#vav_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?x WHERE { ?ahu rdf:type brick:AHU . ?ahu bf:feeds* ?x .};\",\n\t\t\t[]ResultMap{{\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#hvaczone_1\")}, {\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#vav_1\")}, {\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#ahu_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?x WHERE { ?ahu rdf:type brick:AHU . ?x bf:isFedBy* ?ahu .};\",\n\t\t\t[]ResultMap{{\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#hvaczone_1\")}, {\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#vav_1\")}, {\"?x\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#ahu_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?vav ?room WHERE { ?vav rdf:type brick:VAV . ?room rdf:type brick:Room . ?zone rdf:type brick:HVAC_Zone . ?vav bf:feeds+ ?zone . ?room bf:isPartOf ?zone . }; \",\n\t\t\t[]ResultMap{{\"?room\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#room_1\"), \"?vav\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#vav_1\")}},\n\t\t},\n\t\t{\n\t\t\t\"SELECT ?sensor WHERE { ?sensor rdf:type\/rdfs:subClassOf* brick:Zone_Temperature_Sensor . };\",\n\t\t\t[]ResultMap{{\"?sensor\": turtle.ParseURI(\"http:\/\/buildsys.org\/ontologies\/building_example#ztemp_1\")}},\n\t\t},\n\t} {\n\t\tq, e := query.Parse(strings.NewReader(test.query))\n\t\tif e != nil {\n\t\t\tt.Error(test.query, e)\n\t\t\tcontinue\n\t\t}\n\t\tresult := db.RunQuery(q)\n\t\tif !compareResultMapList(test.results, result.Rows) {\n\t\t\tt.Errorf(\"Results for %s had\\n %+v\\nexpected\\n %+v\", test.query, result.Rows, test.results)\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc BenchmarkQueryPerformance1(b *testing.B) {\n\tcfg, err := config.ReadConfig(\"testhodconfig.yaml\")\n\tif err != nil {\n\t\tb.Error(err)\n\t\treturn\n\t}\n\tcfg.DBPath = \"test_databases\/berkeleytestdb\"\n\tdb, err := NewDB(cfg)\n\tif err != nil {\n\t\tb.Error(err)\n\t\treturn\n\t}\n\tbenchmarks := []struct {\n\t\tname string\n\t\tquery io.Reader\n\t}{\n\t\t{\"SimpleSubjectVarTriple\", strings.NewReader(\"SELECT ?x WHERE { ?x rdf:type brick:Room . };\")},\n\t\t{\"LongerQuery1\", strings.NewReader(\"SELECT ?vav ?room WHERE { ?vav rdf:type brick:VAV . ?room rdf:type brick:Room . ?zone rdf:type brick:HVAC_Zone . ?vav bf:feeds+ ?zone . ?room bf:isPartOf ?zone . }; \")},\n\t}\n\n\tfor _, bm := range benchmarks {\n\t\tb.Run(bm.name, func(b *testing.B) {\n\t\t\tb.ReportAllocs()\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tq, _ := query.Parse(bm.query)\n\t\t\t\tdb.RunQuery(q)\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package resources\n\nimport (\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/codebuild\"\n)\n\ntype CodeBuildProject struct {\n\tsvc *codebuild.CodeBuild\n\tprojectName *string\n}\n\nfunc init() {\n\tregister(\"CodeBuildProject\", ListCodeBuildProjects)\n}\n\nfunc ListCodeBuildProjects(sess *session.Session) ([]Resource, error) {\n\tsvc := codebuild.New(sess)\n\tresources := []Resource{}\n\n\tparams := &codebuild.ListProjectsInput{}\n\n\tfor {\n\t\tresp, err := svc.ListProjects(params)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, project := range resp.Projects {\n\t\t\tresources = append(resources, &CodeBuildProject{\n\t\t\t\tsvc: svc,\n\t\t\t\tprojectName: project,\n\t\t\t})\n\t\t}\n\n\t\tif resp.NextToken == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tparams.NextToken = resp.NextToken\n\t}\n\n\treturn resources, nil\n}\n\nfunc (f *CodeBuildProject) Remove() error {\n\n\t_, err := f.svc.DeleteProject(&codebuild.DeleteProjectInput{\n\t\tName: f.projectName,\n\t})\n\n\treturn err\n}\n\nfunc (f *CodeBuildProject) String() string {\n\treturn *f.projectName\n}\n<commit_msg>Add tag based filtering for codebuild projects (#648)<commit_after>package resources\n\nimport (\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/codebuild\"\n\t\"github.com\/rebuy-de\/aws-nuke\/pkg\/types\"\n)\n\ntype CodeBuildProject struct {\n\tsvc *codebuild.CodeBuild\n\tprojectName *string\n\ttags map[string]*string\n}\n\nfunc init() {\n\tregister(\"CodeBuildProject\", ListCodeBuildProjects)\n}\n\nfunc GetTags(svc *codebuild.CodeBuild, project *string) map[string]*string {\n\ttags := make(map[string]*string)\n\tbatchResult, _ := svc.BatchGetProjects(&codebuild.BatchGetProjectsInput{Names: []*string{project}})\n\n\tfor _, project := range batchResult.Projects {\n\t\tif len(project.Tags) > 0 {\n\n\t\t\tfor _, v := range project.Tags {\n\t\t\t\ttags[*v.Key] = v.Value\n\t\t\t}\n\n\t\t\treturn tags\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc ListCodeBuildProjects(sess *session.Session) ([]Resource, error) {\n\tsvc := codebuild.New(sess)\n\tresources := []Resource{}\n\n\tparams := &codebuild.ListProjectsInput{}\n\n\tfor {\n\t\tresp, err := svc.ListProjects(params)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, project := range resp.Projects {\n\t\t\tresources = append(resources, &CodeBuildProject{\n\t\t\t\tsvc: svc,\n\t\t\t\tprojectName: project,\n\t\t\t\ttags: GetTags(svc, project),\n\t\t\t})\n\t\t}\n\n\t\tif resp.NextToken == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tparams.NextToken = resp.NextToken\n\t}\n\n\treturn resources, nil\n}\n\nfunc (f *CodeBuildProject) Remove() error {\n\n\t_, err := f.svc.DeleteProject(&codebuild.DeleteProjectInput{\n\t\tName: f.projectName,\n\t})\n\n\treturn err\n}\n\nfunc (f *CodeBuildProject) String() string {\n\treturn *f.projectName\n}\n\nfunc (f *CodeBuildProject) Properties() types.Properties {\n\tproperties := types.NewProperties()\n\tfor key, tag := range f.tags {\n\t\tproperties.SetTag(&key, tag)\n\t}\n\tproperties.\n\t\tSet(\"ProjectName\", f.projectName)\n\treturn properties\n}\n<|endoftext|>"} {"text":"<commit_before>package main_test\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\n\tcli \"github.com\/heroku\/cli\"\n\n\t\"github.com\/lunixbochs\/vtclean\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"debug\", func() {\n\tconst butt = \"<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>\\n\"\n\tvar stdout string\n\tBeforeEach(func() {\n\t\tcli.Stdout = new(bytes.Buffer)\n\t\tf, err := os.Create(cli.ErrLogPath)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tf.WriteString(butt)\n\t\tf.Close()\n\t\tcli.Start(\"heroku\", \"debug:errlog\")\n\t\tstdout = vtclean.Clean(cli.Stdout.(*bytes.Buffer).String(), false)\n\t\tcli.ExitFn = func(code int) {}\n\t})\n\tAfterEach(func() { cli.Stdout = os.Stdout })\n\n\tIt(\"shows the error log\", func() {\n\t\tExpect(stdout).To(ContainSubstring(butt))\n\t})\n})\n<commit_msg>fix debug test<commit_after>package main_test\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\n\tcli \"github.com\/heroku\/cli\"\n\n\t\"github.com\/lunixbochs\/vtclean\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"debug\", func() {\n\tconst butt = \"<<<<<<<<<<<<<<>>>>>>>>>>>>>>>>\\n\"\n\tvar stdout string\n\tBeforeEach(func() {\n\t\tcli.Stdout = new(bytes.Buffer)\n\t\tf, err := os.Create(cli.ErrLogPath)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tf.WriteString(butt)\n\t\tf.Close()\n\t\tcli.ExitFn = func(code int) {}\n\t\tcli.Start(\"heroku\", \"debug:errlog\")\n\t\tstdout = vtclean.Clean(cli.Stdout.(*bytes.Buffer).String(), false)\n\t})\n\tAfterEach(func() { cli.Stdout = os.Stdout })\n\n\tIt(\"shows the error log\", func() {\n\t\tExpect(stdout).To(ContainSubstring(butt))\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package jsonunroller\n\nimport (\n\t\/\/\t\"appengine\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"strconv\"\n)\n\nfunc dumpobj(prefix string, x interface{}) string {\n\n\ts := \"\"\n\tswitch t := x.(type) {\n\tcase map[string]interface{}:\n\t\tfor k, v := range t {\n\t\t\ts += dumpobj(prefix+\".\"+k, v)\n\t\t}\n\tcase []interface{}:\n\t\tfor i, v := range t {\n\t\t\ts += dumpobj(prefix+\"[\"+strconv.Itoa(i)+\"]\", v)\n\t\t}\n\tcase string:\n\t\ts += fmt.Sprintf(\"%s = %q\\n\", prefix, t)\n\tdefault:\n\t\tfmt.Printf(\"Unhandled: %T\\n\", t)\n\t}\n\treturn s\n}\n\nfunc init() {\n\thttp.HandleFunc(\"\/\", root)\n\thttp.HandleFunc(\"\/unroll\", unroll)\n}\n\nfunc root(w http.ResponseWriter, r *http.Request) {\n\tfmt.Fprint(w, postForm)\n}\n\nconst postForm = `\n<html>\n<body>\n<form action=\"\/unroll\" method=\"post\">\n<div><textarea style=\"box-sizing: border-box; height: 60%; width: 100%;\" name=\"content\"><\/textarea><\/div>\n<div><input type=\"submit\"><\/div>\n<pre>curl --data-urlencode content@foobar.json http:\/\/jsonunroller.appspot.com\/unroll<pre>\n<p><a href=https:\/\/github.com\/kaihendry\/GAE-jsonunroller>MIT source code<\/a><\/p>\n<\/form>\n<\/body>\n<\/html>\n`\n\nfunc unroll(w http.ResponseWriter, r *http.Request) {\n\n\tf := r.FormValue(\"content\")\n\tif f == \"\" {\n\t\terr := errors.New(\"No content!\")\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tvar pj interface{}\n\terr := json.Unmarshal([]byte(f), &pj)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\ts := dumpobj(\"this\", pj)\n\n\tw.Header().Set(\"Content-Type\", \"text\/plain; charset=utf-8\")\n\tio.WriteString(w, s)\n\n}\n<commit_msg>http fetch<commit_after>package jsonunroller\n\nimport (\n\t\"appengine\"\n\t\"appengine\/urlfetch\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc dumpobj(prefix string, x interface{}) string {\n\n\ts := \"\"\n\tswitch t := x.(type) {\n\tcase map[string]interface{}:\n\t\tfor k, v := range t {\n\t\t\ts += dumpobj(prefix+\".\"+k, v)\n\t\t}\n\tcase []interface{}:\n\t\tfor i, v := range t {\n\t\t\ts += dumpobj(prefix+\"[\"+strconv.Itoa(i)+\"]\", v)\n\t\t}\n\tcase string:\n\t\ts += fmt.Sprintf(\"%s = %q\\n\", prefix, t)\n\tdefault:\n\t\tfmt.Printf(\"Unhandled: %T\\n\", t)\n\t}\n\treturn s\n}\n\nfunc init() {\n\thttp.HandleFunc(\"\/\", root)\n\thttp.HandleFunc(\"\/unroll\", unroll)\n}\n\nfunc root(w http.ResponseWriter, r *http.Request) {\n\tu := r.FormValue(\"u\")\n\tlog.Println(\"URL:\", u)\n\tif u == \"\" {\n\t\tfmt.Fprint(w, postForm)\n\t\treturn\n\t}\n\n\tpu, err := url.ParseRequestURI(u)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif pu.IsAbs() != true {\n\t\terr = errors.New(\"Not absolute URL\")\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tc := appengine.NewContext(r)\n\tclient := urlfetch.Client(c)\n\tres, err := client.Get(pu.String())\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tcontenttype := res.Header.Get(\"Content-Type\")\n\tif strings.Contains(contenttype, \"application\/json\") != true {\n\t\terr = errors.New(\"Not application\/json: \" + contenttype)\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tj, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\t\/\/ res.Body.Close()\n\n\tvar pj interface{}\n\terr = json.Unmarshal([]byte(j), &pj)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\ts := dumpobj(\"this\", pj)\n\n\tw.Header().Set(\"Content-Type\", \"text\/plain; charset=utf-8\")\n\tio.WriteString(w, s)\n\n}\n\nconst postForm = `\n<html>\n<body>\n<form action=\"\/\" method=\"get\">\n<div><input type=url name=u size=100><\/input><\/div>\n<div><input type=\"submit\" value=\"Unroll JSON URL\"><\/div>\n<\/form>\n<form action=\"\/unroll\" method=\"post\">\n<div><textarea style=\"box-sizing: border-box; height: 60%; width: 100%;\" name=\"content\"><\/textarea><\/div>\n<div><input type=\"submit\"><\/div>\n<pre>curl --data-urlencode content@foobar.json http:\/\/jsonunroller.appspot.com\/unroll<pre>\n<p><a href=https:\/\/github.com\/kaihendry\/GAE-jsonunroller>MIT source code<\/a><\/p>\n<\/form>\n<\/body>\n<\/html>\n`\n\nfunc unroll(w http.ResponseWriter, r *http.Request) {\n\n\tf := r.FormValue(\"content\")\n\tif f == \"\" {\n\t\terr := errors.New(\"No content!\")\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tvar pj interface{}\n\terr := json.Unmarshal([]byte(f), &pj)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\ts := dumpobj(\"this\", pj)\n\n\tw.Header().Set(\"Content-Type\", \"text\/plain; charset=utf-8\")\n\tio.WriteString(w, s)\n\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build ignore\n\n\/\/ This program computes the value of rng_cooked in rng.go,\n\/\/ which is used for seeding all instances of rand.Source.\n\/\/ a 64bit and a 63bit version of the array is printed to\n\/\/ the standard output.\n\npackage main\n\nimport \"fmt\"\n\nconst (\n\tlength = 607\n\ttap = 273\n\tmask = (1 << 63) - 1\n\ta = 48271\n\tm = (1 << 31) - 1\n\tq = 44488\n\tr = 3399\n)\n\nvar (\n\trngVec [length]int64\n\trngTap, rngFeed int\n)\n\nfunc seedrand(x int32) int32 {\n\thi := x \/ q\n\tlo := x % q\n\tx = a*lo - r*hi\n\tif x < 0 {\n\t\tx += m\n\t}\n\treturn x\n}\n\nfunc srand(seed int32) {\n\trngTap = 0\n\trngFeed = length - tap\n\tseed %= m\n\tif seed < 0 {\n\t\tseed += m\n\t} else if seed == 0 {\n\t\tseed = 89482311\n\t}\n\tx := seed\n\tfor i := -20; i < length; i++ {\n\t\tx = seedrand(x)\n\t\tif i >= 0 {\n\t\t\tvar u int64\n\t\t\tu = int64(x) << 20\n\t\t\tx = seedrand(x)\n\t\t\tu ^= int64(x) << 10\n\t\t\tx = seedrand(x)\n\t\t\tu ^= int64(x)\n\t\t\trngVec[i] = u\n\t\t}\n\t}\n}\n\nfunc vrand() int64 {\n\trngTap--\n\tif rngTap < 0 {\n\t\trngTap += length\n\t}\n\trngFeed--\n\tif rngFeed < 0 {\n\t\trngFeed += length\n\t}\n\tx := (rngVec[rngFeed] + rngVec[rngTap])\n\trngVec[rngFeed] = x\n\treturn x\n}\n\nfunc main() {\n\tsrand(1)\n\tfor i := uint64(0); i < 7.8e12; i++ {\n\t\tvrand()\n\t}\n\tfmt.Printf(\"rngVec after 7.8e12 calls to vrand:\\n%#v\\n\", rngVec)\n\tfor i := range rngVec {\n\t\trngVec[i] &= mask\n\t}\n\tfmt.Printf(\"lower 63bit of rngVec after 7.8e12 calls to vrand:\\n%#v\\n\", rngVec)\n}\n<commit_msg>math\/rand: fix typo in comment<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build ignore\n\n\/\/ This program computes the value of rngCooked in rng.go,\n\/\/ which is used for seeding all instances of rand.Source.\n\/\/ a 64bit and a 63bit version of the array is printed to\n\/\/ the standard output.\n\npackage main\n\nimport \"fmt\"\n\nconst (\n\tlength = 607\n\ttap = 273\n\tmask = (1 << 63) - 1\n\ta = 48271\n\tm = (1 << 31) - 1\n\tq = 44488\n\tr = 3399\n)\n\nvar (\n\trngVec [length]int64\n\trngTap, rngFeed int\n)\n\nfunc seedrand(x int32) int32 {\n\thi := x \/ q\n\tlo := x % q\n\tx = a*lo - r*hi\n\tif x < 0 {\n\t\tx += m\n\t}\n\treturn x\n}\n\nfunc srand(seed int32) {\n\trngTap = 0\n\trngFeed = length - tap\n\tseed %= m\n\tif seed < 0 {\n\t\tseed += m\n\t} else if seed == 0 {\n\t\tseed = 89482311\n\t}\n\tx := seed\n\tfor i := -20; i < length; i++ {\n\t\tx = seedrand(x)\n\t\tif i >= 0 {\n\t\t\tvar u int64\n\t\t\tu = int64(x) << 20\n\t\t\tx = seedrand(x)\n\t\t\tu ^= int64(x) << 10\n\t\t\tx = seedrand(x)\n\t\t\tu ^= int64(x)\n\t\t\trngVec[i] = u\n\t\t}\n\t}\n}\n\nfunc vrand() int64 {\n\trngTap--\n\tif rngTap < 0 {\n\t\trngTap += length\n\t}\n\trngFeed--\n\tif rngFeed < 0 {\n\t\trngFeed += length\n\t}\n\tx := (rngVec[rngFeed] + rngVec[rngTap])\n\trngVec[rngFeed] = x\n\treturn x\n}\n\nfunc main() {\n\tsrand(1)\n\tfor i := uint64(0); i < 7.8e12; i++ {\n\t\tvrand()\n\t}\n\tfmt.Printf(\"rngVec after 7.8e12 calls to vrand:\\n%#v\\n\", rngVec)\n\tfor i := range rngVec {\n\t\trngVec[i] &= mask\n\t}\n\tfmt.Printf(\"lower 63bit of rngVec after 7.8e12 calls to vrand:\\n%#v\\n\", rngVec)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage retrieval\n<commit_msg>Add basic test for TargetManager.targetSet<commit_after>\/\/ Copyright 2013 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage retrieval\n\nimport (\n\t\"testing\"\n\n\t\"golang.org\/x\/net\/context\"\n\t\"gopkg.in\/yaml.v2\"\n\n\t\"github.com\/prometheus\/prometheus\/config\"\n\t\"github.com\/prometheus\/prometheus\/storage\/local\"\n)\n\nfunc TestTargetSetRecreatesTargetGroupsEveryRun(t *testing.T) {\n\tscrapeConfig := &config.ScrapeConfig{}\n\n\tsOne := `\njob_name: \"foo\"\ndns_sd_configs:\n- names:\n - \"srv.name.one.example.org\"\n`\n\tif err := yaml.Unmarshal([]byte(sOne), scrapeConfig); err != nil {\n\t\tt.Fatalf(\"Unable to load YAML config sOne: %s\", err)\n\t}\n\n\t\/\/ Not properly setting it up, but that seems okay\n\tmss := &local.MemorySeriesStorage{}\n\n\tts := newTargetSet(scrapeConfig, mss)\n\n\tts.runProviders(context.Background(), providersFromConfig(scrapeConfig))\n\n\tverifyPresence(t, ts.tgroups, \"dns\/0\/srv.name.one.example.org\", true)\n\n\tsTwo := `\njob_name: \"foo\"\ndns_sd_configs:\n- names:\n - \"srv.name.two.example.org\"\n`\n\tif err := yaml.Unmarshal([]byte(sTwo), scrapeConfig); err != nil {\n\t\tt.Fatalf(\"Unable to load YAML config sTwo: %s\", err)\n\t}\n\n\tts.runProviders(context.Background(), providersFromConfig(scrapeConfig))\n\n\tverifyPresence(t, ts.tgroups, \"dns\/0\/srv.name.one.example.org\", false)\n\tverifyPresence(t, ts.tgroups, \"dns\/0\/srv.name.two.example.org\", true)\n\n}\n\nfunc verifyPresence(t *testing.T, tgroups map[string][]*Target, name string, present bool) {\n\tif _, ok := tgroups[name]; ok != present {\n\t\tmsg := \"\"\n\t\tif !present {\n\t\t\tmsg = \"not \"\n\t\t}\n\t\tt.Fatalf(\"'%s' should %sbe present in TargetSet.tgroups: %s\", name, msg, tgroups)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package richcontent\n\nimport (\n\t\"fmt\"\n\t\"html\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\nfunc FindUrl(ctx context.Context, input []byte) ([]RichContent, error) {\n\trcs := make([]RichContent, 0, 4)\n\tfor _, u := range FindAllUrlsIndex(input) {\n\t\turlBytes := input[u[0]:u[1]]\n\t\tvar components []Component\n\t\tfor _, p := range defaultUrlPatterns {\n\t\t\tif match := p.Pattern.FindSubmatchIndex(urlBytes); match != nil {\n\t\t\t\tif c, err := p.Handler(ctx, urlBytes, MatchIndices(match)); err == nil {\n\t\t\t\t\tcomponents = c\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\trcs = append(rcs, MakeRichContent(u[0], u[1], string(urlBytes), components))\n\t}\n\treturn rcs, nil\n}\n\ntype UrlPatternHandler func(ctx context.Context, urlBytes []byte, match MatchIndices) ([]Component, error)\n\ntype UrlPattern struct {\n\tPattern *regexp.Regexp\n\tHandler UrlPatternHandler\n}\n\nvar defaultUrlPatterns = []*UrlPattern{\n\tnewUrlPattern(`^https?:\/\/(?:www\\.youtube\\.com\/watch\\?(?:.+&)*v=|youtu\\.be\/)([\\w\\-]+)`, handleYoutube),\n\tnewUrlPattern(`^https?:\/\/imgur\\.com\/([,\\w]+)(?:\\#(\\d+))?[^\/]*$`, handleImgur),\n\tnewUrlPattern(`^http:\/\/picmoe\\.net\/d\\.php\\?id=(\\d+)`, handlePicmoe),\n\tnewUrlPattern(`\\.(?i:png|jpg|gif)$`, handleGenericImage),\n}\n\nfunc newUrlPattern(pattern string, handler UrlPatternHandler) *UrlPattern {\n\treturn &UrlPattern{\n\t\tPattern: regexp.MustCompile(pattern),\n\t\tHandler: handler,\n\t}\n}\n\nfunc imageHtmlTag(urlString string) string {\n\treturn fmt.Sprintf(`<img src=\"%s\" alt=\"\" \/>`, html.EscapeString(urlString))\n}\n\n\/\/ Handlers\n\nfunc handleYoutube(ctx context.Context, urlBytes []byte, match MatchIndices) ([]Component, error) {\n\treturn []Component{MakeComponent(fmt.Sprintf(\n\t\t`<iframe class=\"youtube-player\" type=\"text\/html\" width=\"640\" height=\"385\" src=\"\/\/www.youtube.com\/embed\/%s\" frameborder=\"0\"><\/iframe>`,\n\t\tstring(match.ByteSliceOf(urlBytes, 1))))}, nil\n}\n\nfunc handleImgur(ctx context.Context, urlBytes []byte, match MatchIndices) ([]Component, error) {\n\tvar comps []Component\n\tfor _, id := range strings.Split(string(match.ByteSliceOf(urlBytes, 1)), \",\") {\n\t\tlink := fmt.Sprintf(`\/\/i.imgur.com\/%s.jpg`, id)\n\t\tcomps = append(comps, MakeComponent(imageHtmlTag(link)))\n\t}\n\treturn comps, nil\n}\n\nfunc handlePicmoe(ctx context.Context, urlBytes []byte, match MatchIndices) ([]Component, error) {\n\tlink := fmt.Sprintf(`http:\/\/picmoe.net\/src\/%ss.jpg`, string(match.ByteSliceOf(urlBytes, 1)))\n\treturn []Component{MakeComponent(imageHtmlTag(link))}, nil\n}\n\nfunc handleGenericImage(ctx context.Context, urlBytes []byte, match MatchIndices) ([]Component, error) {\n\treturn []Component{MakeComponent(imageHtmlTag(string(urlBytes)))}, nil\n}\n<commit_msg>richcontent: special handle imgur's direct link.<commit_after>package richcontent\n\nimport (\n\t\"fmt\"\n\t\"html\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"golang.org\/x\/net\/context\"\n)\n\nfunc FindUrl(ctx context.Context, input []byte) ([]RichContent, error) {\n\trcs := make([]RichContent, 0, 4)\n\tfor _, u := range FindAllUrlsIndex(input) {\n\t\turlBytes := input[u[0]:u[1]]\n\t\tvar components []Component\n\t\tfor _, p := range defaultUrlPatterns {\n\t\t\tif match := p.Pattern.FindSubmatchIndex(urlBytes); match != nil {\n\t\t\t\tif c, err := p.Handler(ctx, urlBytes, MatchIndices(match)); err == nil {\n\t\t\t\t\tcomponents = c\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\trcs = append(rcs, MakeRichContent(u[0], u[1], string(urlBytes), components))\n\t}\n\treturn rcs, nil\n}\n\ntype UrlPatternHandler func(ctx context.Context, urlBytes []byte, match MatchIndices) ([]Component, error)\n\ntype UrlPattern struct {\n\tPattern *regexp.Regexp\n\tHandler UrlPatternHandler\n}\n\nvar defaultUrlPatterns = []*UrlPattern{\n\tnewUrlPattern(`^https?:\/\/(?:www\\.youtube\\.com\/watch\\?(?:.+&)*v=|youtu\\.be\/)([\\w\\-]+)`, handleYoutube),\n\tnewUrlPattern(`^https?:(\/\/i\\.imgur\\.com\/[\\.\\w]+)$`, handleSameSchemeImage), \/\/ Note: cuz some users use http\n\tnewUrlPattern(`^https?:\/\/imgur\\.com\/([,\\w]+)(?:\\#(\\d+))?[^\/]*$`, handleImgur),\n\tnewUrlPattern(`^http:\/\/picmoe\\.net\/d\\.php\\?id=(\\d+)`, handlePicmoe),\n\tnewUrlPattern(`\\.(?i:png|jpg|gif)$`, handleGenericImage),\n}\n\nfunc newUrlPattern(pattern string, handler UrlPatternHandler) *UrlPattern {\n\treturn &UrlPattern{\n\t\tPattern: regexp.MustCompile(pattern),\n\t\tHandler: handler,\n\t}\n}\n\nfunc imageHtmlTag(urlString string) string {\n\treturn fmt.Sprintf(`<img src=\"%s\" alt=\"\" \/>`, html.EscapeString(urlString))\n}\n\n\/\/ Handlers\n\nfunc handleYoutube(ctx context.Context, urlBytes []byte, match MatchIndices) ([]Component, error) {\n\treturn []Component{MakeComponent(fmt.Sprintf(\n\t\t`<iframe class=\"youtube-player\" type=\"text\/html\" width=\"640\" height=\"385\" src=\"\/\/www.youtube.com\/embed\/%s\" frameborder=\"0\"><\/iframe>`,\n\t\tstring(match.ByteSliceOf(urlBytes, 1))))}, nil\n}\n\nfunc handleSameSchemeImage(ctx context.Context, urlBytes []byte, match MatchIndices) ([]Component, error) {\n\treturn []Component{MakeComponent(imageHtmlTag(string(match.ByteSliceOf(urlBytes, 1))))}, nil\n}\n\nfunc handleImgur(ctx context.Context, urlBytes []byte, match MatchIndices) ([]Component, error) {\n\tvar comps []Component\n\tfor _, id := range strings.Split(string(match.ByteSliceOf(urlBytes, 1)), \",\") {\n\t\tlink := fmt.Sprintf(`\/\/i.imgur.com\/%s.jpg`, id)\n\t\tcomps = append(comps, MakeComponent(imageHtmlTag(link)))\n\t}\n\treturn comps, nil\n}\n\nfunc handlePicmoe(ctx context.Context, urlBytes []byte, match MatchIndices) ([]Component, error) {\n\tlink := fmt.Sprintf(`http:\/\/picmoe.net\/src\/%ss.jpg`, string(match.ByteSliceOf(urlBytes, 1)))\n\treturn []Component{MakeComponent(imageHtmlTag(link))}, nil\n}\n\nfunc handleGenericImage(ctx context.Context, urlBytes []byte, match MatchIndices) ([]Component, error) {\n\treturn []Component{MakeComponent(imageHtmlTag(string(urlBytes)))}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package models\n\nimport (\n\t\"database\/sql\"\n\t\"time\"\n)\n\n\/*\n\tThese are more functions for querying the \"races\" table,\n\tbut these functions are only used for the website\n*\/\n\n\/\/ RaceHistory gets the history for each race in the database\ntype RaceHistory struct {\n\tRaceID int\n\tRaceType string\n\tRaceFormat string\n\tRaceChar string\n\tRaceGoal string\n\tRaceDateStart time.Time\n\tRaceDateFinished time.Time\n\tRaceParticipants []RaceHistoryParticipants\n}\n\n\/\/ RaceHistoryParticipants gets the user stats for each racer in each race\ntype RaceHistoryParticipants struct {\n\tRacerName string\n\tRacerPlace int\n\tRacerTime string\n\tRacerComment string\n}\n\n\/\/ GetRacesHistory gets all data for all races\nfunc (*Races) GetRacesHistory(currentPage int, racesPerPage int, raceOffset int) ([]RaceHistory, int, error) {\n\tvar rows *sql.Rows\n\tif v, err := db.Query(`\n\t\tSELECT\n\t\t\tid,\n\t\t\tranked,\n\t\t\tformat,\n\t\t\tplayer_type,\n\t\t\tgoal,\n\t\t\tdatetime_created,\n\t\t\tdatetime_finished\n\t\tFROM\n\t\t\traces\n\t\tWHERE\n\t\t\tfinished = 1\n\t\tGROUP BY\n\t\t\tid\n\t\tORDER BY\n\t\t\tdatetime_created DESC\n\t\tLIMIT\n\t\t\t?\n\t\tOFFSET\n\t\t\t?\n\t`, racesPerPage, raceOffset); err != nil {\n\t\treturn nil, 0, err\n\t} else {\n\t\trows = v\n\t}\n\tdefer rows.Close()\n\n\traceHistory := make([]RaceHistory, 0)\n\tfor rows.Next() {\n\t\tvar race RaceHistory\n\t\tif err := rows.Scan(\n\t\t\t&race.RaceID,\n\t\t\t&race.RaceType,\n\t\t\t&race.RaceFormat,\n\t\t\t&race.RaceChar,\n\t\t\t&race.RaceGoal,\n\t\t\t&race.RaceDateStart,\n\t\t\t&race.RaceDateFinished,\n\t\t); err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\t\trace.RaceParticipants = nil\n\n\t\tvar rows2 *sql.Rows\n\t\tif v, err := db.Query(`\n\t\t\tSELECT\n\t\t\t u.username,\n\t\t\t rp.place,\n\t\t\t CONCAT(LPAD(FLOOR(rp.run_time\/1000\/60),2,0), \":\", LPAD(FLOOR(rp.run_time\/1000%60),2,0)),\t\t\t rp.comment\n\t\t\tFROM\n\t\t\t race_participants rp\n\t\t\tLEFT JOIN\n\t\t\t users u\n\t\t\t ON u.id = rp.user_id\n\t\t\tWHERE\n\t\t\t rp.race_id = ?\n\t\t\tORDER BY\n\t\t\t CASE WHEN rp.place = -1 THEN 1 ELSE 0 END,\n\t\t\t rp.place,\n rp.run_time;\n\t\t`, race.RaceID); err != nil {\n\t\t\treturn nil, 0, err\n\t\t} else {\n\t\t\trows2 = v\n\t\t}\n\t\tdefer rows2.Close()\n\n\t\traceRacers := make([]RaceHistoryParticipants, 0)\n\t\tfor rows2.Next() {\n\t\t\tvar racer RaceHistoryParticipants\n\t\t\tif err := rows2.Scan(\n\t\t\t\t&racer.RacerName,\n\t\t\t\t&racer.RacerPlace,\n\t\t\t\t&racer.RacerTime,\n\t\t\t\t&racer.RacerComment,\n\t\t\t); err != nil {\n\t\t\t\treturn nil, 0, err\n\t\t\t}\n\t\t\traceRacers = append(raceRacers, racer)\n\t\t}\n\t\trace.RaceParticipants = raceRacers\n\t\traceHistory = append(raceHistory, race)\n\t}\n\n\tvar allRaceCount int\n\tif err := db.QueryRow(`\n\t\tSELECT count(id)\n\t\tFROM races\n\t\tWHERE finished = 1\n\t`).Scan(&allRaceCount); err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn raceHistory, allRaceCount, nil\n}\n\n\/\/ GetRaceHistory gets race history for a single race\nfunc (*Races) GetRaceHistory(raceID int) ([]RaceHistory, error) {\n\tvar rows *sql.Rows\n\tif v, err := db.Query(`\n\t\tSELECT\n\t\t\tid,\n\t\t\tranked,\n\t\t\tformat,\n\t\t\tplayer_type,\n\t\t\tgoal,\n\t\t\tdatetime_created,\n\t\t\tdatetime_finished\n\t\tFROM\n\t\t\traces\n\t\tWHERE\n\t\t\tid = ?\n\t\tGROUP BY\n\t\t\tid\n\t\tORDER BY\n\t\t\tdatetime_created DESC\n\t`, raceID); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\trows = v\n\t}\n\tdefer rows.Close()\n\n\traceHistory := make([]RaceHistory, 0)\n\tfor rows.Next() {\n\t\tvar race RaceHistory\n\t\tif err := rows.Scan(\n\t\t\t&race.RaceID,\n\t\t\t&race.RaceType,\n\t\t\t&race.RaceFormat,\n\t\t\t&race.RaceChar,\n\t\t\t&race.RaceGoal,\n\t\t\t&race.RaceDateStart,\n\t\t\t&race.RaceDateFinished,\n\t\t); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trace.RaceParticipants = nil\n\n\t\tvar rows2 *sql.Rows\n\t\tif v, err := db.Query(`\n\t\t\tSELECT\n\t\t\t u.username,\n\t\t\t rp.place,\n\t\t\t CONCAT(LPAD(FLOOR(rp.run_time\/1000\/60),2,0), \":\", LPAD(FLOOR(rp.run_time\/1000%60),2,0)),\n\t\t\t rp.comment\n\t\t\tFROM\n\t\t\t race_participants rp\n\t\t\tLEFT JOIN\n\t\t\t users u\n\t\t\t ON u.id = rp.user_id\n\t\t\tWHERE\n\t\t\t rp.race_id = ?\n\t\t\tORDER BY\n\t\t\t CASE WHEN rp.place = -1 THEN 1 ELSE 0 END,\n\t\t\t rp.place,\n rp.run_time;\n\t\t`, race.RaceID); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\trows2 = v\n\t\t}\n\t\tdefer rows2.Close()\n\n\t\traceRacers := make([]RaceHistoryParticipants, 0)\n\t\tfor rows2.Next() {\n\t\t\tvar racer RaceHistoryParticipants\n\t\t\tif err := rows2.Scan(\n\t\t\t\t&racer.RacerName,\n\t\t\t\t&racer.RacerPlace,\n\t\t\t\t&racer.RacerTime,\n\t\t\t\t&racer.RacerComment,\n\t\t\t); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\traceRacers = append(raceRacers, racer)\n\t\t}\n\t\trace.RaceParticipants = raceRacers\n\t\traceHistory = append(raceHistory, race)\n\t}\n\treturn raceHistory, nil\n}\n\n\/\/ GetRaceProfileHistory gets the race data for the profile page\nfunc (*Races) GetRaceProfileHistory(user string, racesPerPage int) ([]RaceHistory, error) {\n\tvar rows *sql.Rows\n\tif v, err := db.Query(`\n\t\tSELECT\n\t\t\tr.id,\n\t\t\tr.ranked,\n\t\t\tr.format,\n\t\t\tr.player_type,\n\t\t\tr.goal,\n\t\t\tr.datetime_created,\n\t\t\tr.datetime_finished\n\t\tFROM\n\t\t\traces r\n\t\tLEFT JOIN\n\t\t\trace_participants rp\n\t\t\tON rp.race_id = r.id\n\t\tLEFT JOIN\n\t\t\tusers u\n\t\t\tON u.id = rp.user_id\n\n\t\tWHERE\n\t\t\tr.finished = 1\n\t\t\tAND u.username = ?\n\t\tGROUP BY\n\t\t\tid\n\t\tORDER BY\n\t\t\tdatetime_created DESC\n\t\tLIMIT\n\t\t\t?\n\t`, user, racesPerPage); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\trows = v\n\t}\n\tdefer rows.Close()\n\n\traceHistory := make([]RaceHistory, 0)\n\tfor rows.Next() {\n\t\tvar race RaceHistory\n\t\tif err := rows.Scan(\n\t\t\t&race.RaceID,\n\t\t\t&race.RaceType,\n\t\t\t&race.RaceFormat,\n\t\t\t&race.RaceChar,\n\t\t\t&race.RaceGoal,\n\t\t\t&race.RaceDateStart,\n\t\t\t&race.RaceDateFinished,\n\t\t); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trace.RaceParticipants = nil\n\n\t\tvar rows2 *sql.Rows\n\t\tif v, err := db.Query(`\n\t\t\tSELECT\n\t\t\t u.username,\n\t\t\t rp.place,\n\t\t\t CONCAT(LPAD(FLOOR(rp.run_time\/1000\/60),2,0), \":\", LPAD(FLOOR(rp.run_time\/1000%60),2,0)),\n\t\t\t rp.comment\n\t\t\tFROM\n\t\t\t race_participants rp\n\t\t\tLEFT JOIN\n\t\t\t users u\n\t\t\t ON u.id = rp.user_id\n\t\t\tWHERE\n\t\t\t rp.race_id = ?\n\t\t\tORDER BY\n\t\t\t CASE WHEN rp.place = -1 THEN 1 ELSE 0 END,\n\t\t\t rp.place,\n rp.run_time;\n\t\t`, race.RaceID); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\trows2 = v\n\t\t}\n\t\tdefer rows2.Close()\n\n\t\traceRacers := make([]RaceHistoryParticipants, 0)\n\t\tfor rows2.Next() {\n\t\t\tvar racer RaceHistoryParticipants\n\t\t\tif err := rows2.Scan(\n\t\t\t\t&racer.RacerName,\n\t\t\t\t&racer.RacerPlace,\n\t\t\t\t&racer.RacerTime,\n\t\t\t\t&racer.RacerComment,\n\t\t\t); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\traceRacers = append(raceRacers, racer)\n\t\t}\n\t\trace.RaceParticipants = raceRacers\n\t\traceHistory = append(raceHistory, race)\n\t}\n\n\tvar allRaceCount int\n\tif err := db.QueryRow(`\n\t\tSELECT count(id)\n\t\tFROM races\n\t\tWHERE finished = 1\n\t`).Scan(&allRaceCount); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn raceHistory, nil\n}\n<commit_msg>Hehe new lines<commit_after>package models\n\nimport (\n\t\"database\/sql\"\n\t\"time\"\n)\n\n\/*\n\tThese are more functions for querying the \"races\" table,\n\tbut these functions are only used for the website\n*\/\n\n\/\/ RaceHistory gets the history for each race in the database\ntype RaceHistory struct {\n\tRaceID int\n\tRaceType string\n\tRaceFormat string\n\tRaceChar string\n\tRaceGoal string\n\tRaceDateStart time.Time\n\tRaceDateFinished time.Time\n\tRaceParticipants []RaceHistoryParticipants\n}\n\n\/\/ RaceHistoryParticipants gets the user stats for each racer in each race\ntype RaceHistoryParticipants struct {\n\tRacerName string\n\tRacerPlace int\n\tRacerTime string\n\tRacerComment string\n}\n\n\/\/ GetRacesHistory gets all data for all races\nfunc (*Races) GetRacesHistory(currentPage int, racesPerPage int, raceOffset int) ([]RaceHistory, int, error) {\n\tvar rows *sql.Rows\n\tif v, err := db.Query(`\n\t\tSELECT\n\t\t\tid,\n\t\t\tranked,\n\t\t\tformat,\n\t\t\tplayer_type,\n\t\t\tgoal,\n\t\t\tdatetime_created,\n\t\t\tdatetime_finished\n\t\tFROM\n\t\t\traces\n\t\tWHERE\n\t\t\tfinished = 1\n\t\tGROUP BY\n\t\t\tid\n\t\tORDER BY\n\t\t\tdatetime_created DESC\n\t\tLIMIT\n\t\t\t?\n\t\tOFFSET\n\t\t\t?\n\t`, racesPerPage, raceOffset); err != nil {\n\t\treturn nil, 0, err\n\t} else {\n\t\trows = v\n\t}\n\tdefer rows.Close()\n\n\traceHistory := make([]RaceHistory, 0)\n\tfor rows.Next() {\n\t\tvar race RaceHistory\n\t\tif err := rows.Scan(\n\t\t\t&race.RaceID,\n\t\t\t&race.RaceType,\n\t\t\t&race.RaceFormat,\n\t\t\t&race.RaceChar,\n\t\t\t&race.RaceGoal,\n\t\t\t&race.RaceDateStart,\n\t\t\t&race.RaceDateFinished,\n\t\t); err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\t\trace.RaceParticipants = nil\n\n\t\tvar rows2 *sql.Rows\n\t\tif v, err := db.Query(`\n\t\t\tSELECT\n\t\t\t u.username,\n\t\t\t rp.place,\n\t\t\t CONCAT(LPAD(FLOOR(rp.run_time\/1000\/60),2,0), \":\", LPAD(FLOOR(rp.run_time\/1000%60),2,0)),\t\t\t \n\t\t\t rp.comment\n\t\t\tFROM\n\t\t\t race_participants rp\n\t\t\tLEFT JOIN\n\t\t\t users u\n\t\t\t ON u.id = rp.user_id\n\t\t\tWHERE\n\t\t\t rp.race_id = ?\n\t\t\tORDER BY\n\t\t\t CASE WHEN rp.place = -1 THEN 1 ELSE 0 END,\n\t\t\t rp.place,\n rp.run_time;\n\t\t`, race.RaceID); err != nil {\n\t\t\treturn nil, 0, err\n\t\t} else {\n\t\t\trows2 = v\n\t\t}\n\t\tdefer rows2.Close()\n\n\t\traceRacers := make([]RaceHistoryParticipants, 0)\n\t\tfor rows2.Next() {\n\t\t\tvar racer RaceHistoryParticipants\n\t\t\tif err := rows2.Scan(\n\t\t\t\t&racer.RacerName,\n\t\t\t\t&racer.RacerPlace,\n\t\t\t\t&racer.RacerTime,\n\t\t\t\t&racer.RacerComment,\n\t\t\t); err != nil {\n\t\t\t\treturn nil, 0, err\n\t\t\t}\n\t\t\traceRacers = append(raceRacers, racer)\n\t\t}\n\t\trace.RaceParticipants = raceRacers\n\t\traceHistory = append(raceHistory, race)\n\t}\n\n\tvar allRaceCount int\n\tif err := db.QueryRow(`\n\t\tSELECT count(id)\n\t\tFROM races\n\t\tWHERE finished = 1\n\t`).Scan(&allRaceCount); err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn raceHistory, allRaceCount, nil\n}\n\n\/\/ GetRaceHistory gets race history for a single race\nfunc (*Races) GetRaceHistory(raceID int) ([]RaceHistory, error) {\n\tvar rows *sql.Rows\n\tif v, err := db.Query(`\n\t\tSELECT\n\t\t\tid,\n\t\t\tranked,\n\t\t\tformat,\n\t\t\tplayer_type,\n\t\t\tgoal,\n\t\t\tdatetime_created,\n\t\t\tdatetime_finished\n\t\tFROM\n\t\t\traces\n\t\tWHERE\n\t\t\tid = ?\n\t\tGROUP BY\n\t\t\tid\n\t\tORDER BY\n\t\t\tdatetime_created DESC\n\t`, raceID); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\trows = v\n\t}\n\tdefer rows.Close()\n\n\traceHistory := make([]RaceHistory, 0)\n\tfor rows.Next() {\n\t\tvar race RaceHistory\n\t\tif err := rows.Scan(\n\t\t\t&race.RaceID,\n\t\t\t&race.RaceType,\n\t\t\t&race.RaceFormat,\n\t\t\t&race.RaceChar,\n\t\t\t&race.RaceGoal,\n\t\t\t&race.RaceDateStart,\n\t\t\t&race.RaceDateFinished,\n\t\t); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trace.RaceParticipants = nil\n\n\t\tvar rows2 *sql.Rows\n\t\tif v, err := db.Query(`\n\t\t\tSELECT\n\t\t\t u.username,\n\t\t\t rp.place,\n\t\t\t CONCAT(LPAD(FLOOR(rp.run_time\/1000\/60),2,0), \":\", LPAD(FLOOR(rp.run_time\/1000%60),2,0)),\n\t\t\t rp.comment\n\t\t\tFROM\n\t\t\t race_participants rp\n\t\t\tLEFT JOIN\n\t\t\t users u\n\t\t\t ON u.id = rp.user_id\n\t\t\tWHERE\n\t\t\t rp.race_id = ?\n\t\t\tORDER BY\n\t\t\t CASE WHEN rp.place = -1 THEN 1 ELSE 0 END,\n\t\t\t rp.place,\n rp.run_time;\n\t\t`, race.RaceID); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\trows2 = v\n\t\t}\n\t\tdefer rows2.Close()\n\n\t\traceRacers := make([]RaceHistoryParticipants, 0)\n\t\tfor rows2.Next() {\n\t\t\tvar racer RaceHistoryParticipants\n\t\t\tif err := rows2.Scan(\n\t\t\t\t&racer.RacerName,\n\t\t\t\t&racer.RacerPlace,\n\t\t\t\t&racer.RacerTime,\n\t\t\t\t&racer.RacerComment,\n\t\t\t); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\traceRacers = append(raceRacers, racer)\n\t\t}\n\t\trace.RaceParticipants = raceRacers\n\t\traceHistory = append(raceHistory, race)\n\t}\n\treturn raceHistory, nil\n}\n\n\/\/ GetRaceProfileHistory gets the race data for the profile page\nfunc (*Races) GetRaceProfileHistory(user string, racesPerPage int) ([]RaceHistory, error) {\n\tvar rows *sql.Rows\n\tif v, err := db.Query(`\n\t\tSELECT\n\t\t\tr.id,\n\t\t\tr.ranked,\n\t\t\tr.format,\n\t\t\tr.player_type,\n\t\t\tr.goal,\n\t\t\tr.datetime_created,\n\t\t\tr.datetime_finished\n\t\tFROM\n\t\t\traces r\n\t\tLEFT JOIN\n\t\t\trace_participants rp\n\t\t\tON rp.race_id = r.id\n\t\tLEFT JOIN\n\t\t\tusers u\n\t\t\tON u.id = rp.user_id\n\n\t\tWHERE\n\t\t\tr.finished = 1\n\t\t\tAND u.username = ?\n\t\tGROUP BY\n\t\t\tid\n\t\tORDER BY\n\t\t\tdatetime_created DESC\n\t\tLIMIT\n\t\t\t?\n\t`, user, racesPerPage); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\trows = v\n\t}\n\tdefer rows.Close()\n\n\traceHistory := make([]RaceHistory, 0)\n\tfor rows.Next() {\n\t\tvar race RaceHistory\n\t\tif err := rows.Scan(\n\t\t\t&race.RaceID,\n\t\t\t&race.RaceType,\n\t\t\t&race.RaceFormat,\n\t\t\t&race.RaceChar,\n\t\t\t&race.RaceGoal,\n\t\t\t&race.RaceDateStart,\n\t\t\t&race.RaceDateFinished,\n\t\t); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trace.RaceParticipants = nil\n\n\t\tvar rows2 *sql.Rows\n\t\tif v, err := db.Query(`\n\t\t\tSELECT\n\t\t\t u.username,\n\t\t\t rp.place,\n\t\t\t CONCAT(LPAD(FLOOR(rp.run_time\/1000\/60),2,0), \":\", LPAD(FLOOR(rp.run_time\/1000%60),2,0)),\n\t\t\t rp.comment\n\t\t\tFROM\n\t\t\t race_participants rp\n\t\t\tLEFT JOIN\n\t\t\t users u\n\t\t\t ON u.id = rp.user_id\n\t\t\tWHERE\n\t\t\t rp.race_id = ?\n\t\t\tORDER BY\n\t\t\t CASE WHEN rp.place = -1 THEN 1 ELSE 0 END,\n\t\t\t rp.place,\n rp.run_time;\n\t\t`, race.RaceID); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\trows2 = v\n\t\t}\n\t\tdefer rows2.Close()\n\n\t\traceRacers := make([]RaceHistoryParticipants, 0)\n\t\tfor rows2.Next() {\n\t\t\tvar racer RaceHistoryParticipants\n\t\t\tif err := rows2.Scan(\n\t\t\t\t&racer.RacerName,\n\t\t\t\t&racer.RacerPlace,\n\t\t\t\t&racer.RacerTime,\n\t\t\t\t&racer.RacerComment,\n\t\t\t); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\traceRacers = append(raceRacers, racer)\n\t\t}\n\t\trace.RaceParticipants = raceRacers\n\t\traceHistory = append(raceHistory, race)\n\t}\n\n\tvar allRaceCount int\n\tif err := db.QueryRow(`\n\t\tSELECT count(id)\n\t\tFROM races\n\t\tWHERE finished = 1\n\t`).Scan(&allRaceCount); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn raceHistory, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package catalog\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/rancher\/norman\/api\/access\"\n\t\"github.com\/rancher\/norman\/httperror\"\n\t\"github.com\/rancher\/norman\/types\"\n\thelmlib \"github.com\/rancher\/rancher\/pkg\/catalog\/helm\"\n\tcatUtil \"github.com\/rancher\/rancher\/pkg\/catalog\/utils\"\n\thcommon \"github.com\/rancher\/rancher\/pkg\/controllers\/user\/helm\/common\"\n\tv3 \"github.com\/rancher\/types\/apis\/management.cattle.io\/v3\"\n\tmanagementschema \"github.com\/rancher\/types\/apis\/management.cattle.io\/v3\/schema\"\n\tclient \"github.com\/rancher\/types\/client\/management\/v3\"\n)\n\ntype TemplateWrapper struct {\n\tCatalogLister v3.CatalogLister\n\tClusterCatalogLister v3.ClusterCatalogLister\n\tProjectCatalogLister v3.ProjectCatalogLister\n\tCatalogTemplateVersionLister v3.CatalogTemplateVersionLister\n}\n\nfunc (t TemplateWrapper) TemplateFormatter(apiContext *types.APIContext, resource *types.RawResource) {\n\tvar prjCatalogName, clusterCatalogName string\n\t\/\/ version links\n\tresource.Values[\"versionLinks\"] = t.extractVersionLinks(apiContext, resource)\n\n\t\/\/icon\n\tdelete(resource.Values, \"icon\")\n\tresource.Links[\"icon\"] = apiContext.URLBuilder.Link(\"icon\", resource)\n\n\tval := resource.Values\n\tif val[client.CatalogTemplateFieldCatalogID] != nil {\n\t\t\/\/catalog link\n\t\tcatalogSchema := apiContext.Schemas.Schema(&managementschema.Version, client.CatalogType)\n\t\tcatalogName := strings.Split(resource.ID, \"-\")[0]\n\t\tresource.Links[\"catalog\"] = apiContext.URLBuilder.ResourceLinkByID(catalogSchema, catalogName)\n\t}\n\n\tif val[client.CatalogTemplateFieldProjectCatalogID] != nil {\n\t\tprjCatID, ok := val[client.CatalogTemplateFieldProjectCatalogID].(string)\n\t\tif ok {\n\t\t\tprjCatalogName = prjCatID\n\t\t}\n\t\t\/\/project catalog link\n\t\tprjCatalogSchema := apiContext.Schemas.Schema(&managementschema.Version, client.ProjectCatalogType)\n\t\tresource.Links[\"projectCatalog\"] = apiContext.URLBuilder.ResourceLinkByID(prjCatalogSchema, prjCatalogName)\n\t}\n\n\tif val[client.CatalogTemplateFieldClusterCatalogID] != nil {\n\t\tclusterCatID, ok := val[client.CatalogTemplateFieldClusterCatalogID].(string)\n\t\tif ok {\n\t\t\tclusterCatalogName = clusterCatID\n\t\t}\n\t\t\/\/cluster catalog link\n\t\tclCatalogSchema := apiContext.Schemas.Schema(&managementschema.Version, client.ClusterCatalogType)\n\t\tresource.Links[\"clusterCatalog\"] = apiContext.URLBuilder.ResourceLinkByID(clCatalogSchema, clusterCatalogName)\n\t}\n\n\t\/\/ delete category\n\tdelete(resource.Values, \"category\")\n\n\t\/\/ delete versions\n\tdelete(resource.Values, \"versions\")\n}\n\nfunc (t TemplateWrapper) extractVersionLinks(apiContext *types.APIContext, resource *types.RawResource) map[string]string {\n\tschema := apiContext.Schemas.Schema(&managementschema.Version, client.TemplateVersionType)\n\tr := map[string]string{}\n\tversionMap, ok := resource.Values[\"versions\"].([]interface{})\n\tif ok {\n\t\tfor _, version := range versionMap {\n\t\t\trevision := \"\"\n\t\t\tif v, ok := version.(map[string]interface{})[\"revision\"].(int64); ok {\n\t\t\t\trevision = strconv.FormatInt(v, 10)\n\t\t\t}\n\t\t\tversionString := version.(map[string]interface{})[\"version\"].(string)\n\t\t\tversionID := fmt.Sprintf(\"%v-%v\", resource.ID, versionString)\n\t\t\tif revision != \"\" {\n\t\t\t\tversionID = fmt.Sprintf(\"%v-%v\", resource.ID, revision)\n\t\t\t}\n\t\t\tif t.templateVersionForRancherVersion(apiContext, version.(map[string]interface{})[\"externalId\"].(string)) {\n\t\t\t\tr[versionString] = apiContext.URLBuilder.ResourceLinkByID(schema, versionID)\n\t\t\t}\n\t\t}\n\t}\n\treturn r\n}\n\nfunc (t TemplateWrapper) TemplateIconHandler(apiContext *types.APIContext, next types.RequestHandler) error {\n\tswitch apiContext.Link {\n\tcase \"icon\":\n\t\ttemplate := &client.Template{}\n\t\tif err := access.ByID(apiContext, apiContext.Version, apiContext.Type, apiContext.ID, template); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif template.Icon == \"\" {\n\t\t\thttp.Error(apiContext.Response, \"\", http.StatusNoContent)\n\t\t\treturn nil\n\t\t}\n\n\t\tvar (\n\t\t\tcatalogType string\n\t\t\tcatalogName string\n\t\t\ticonBytes []byte\n\t\t\terr error\n\t\t)\n\n\t\tif template.CatalogID != \"\" {\n\t\t\tcatalogType = client.CatalogType\n\t\t\tcatalogName = template.CatalogID\n\t\t} else if template.ClusterCatalogID != \"\" {\n\t\t\tcatalogType = client.ClusterCatalogType\n\t\t\tcatalogName = template.ClusterCatalogID\n\t\t} else if template.ProjectCatalogID != \"\" {\n\t\t\tcatalogType = client.ProjectCatalogType\n\t\t\tcatalogName = template.ProjectCatalogID\n\t\t}\n\n\t\tnamespace, name := helmlib.SplitNamespaceAndName(catalogName)\n\t\tcatalog, err := helmlib.GetCatalog(catalogType, namespace, name, t.CatalogLister, t.ClusterCatalogLister, t.ProjectCatalogLister)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\thelm, err := helmlib.New(catalog)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ticonBytes, err = helm.LoadIcon(template.IconFilename, template.Icon)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tt, err := time.Parse(time.RFC3339, template.Created)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ticonReader := bytes.NewReader(iconBytes)\n\t\tapiContext.Response.Header().Set(\"Cache-Control\", \"private, max-age=604800\")\n\t\thttp.ServeContent(apiContext.Response, apiContext.Request, template.IconFilename, t, iconReader)\n\t\treturn nil\n\tdefault:\n\t\treturn httperror.NewAPIError(httperror.NotFound, \"not found\")\n\t}\n}\n\n\/\/ templateVersionForRancherVersion indicates if a templateVersion works with the rancher server version\n\/\/ In the error case it will always return true - if a template is actually invalid for that rancher version\n\/\/ API validation will handle the rejection\nfunc (t TemplateWrapper) templateVersionForRancherVersion(apiContext *types.APIContext, externalID string) bool {\n\tvar rancherVersion string\n\tfor query, fields := range apiContext.Query {\n\t\tif query == \"rancherVersion\" {\n\t\t\trancherVersion = fields[0]\n\t\t}\n\t}\n\n\tif !catUtil.ReleaseServerVersion(rancherVersion) {\n\t\treturn true\n\t}\n\n\ttemplateVersionID, namespace, err := hcommon.ParseExternalID(externalID)\n\tif err != nil {\n\t\treturn true\n\t}\n\n\ttemplate, err := t.CatalogTemplateVersionLister.Get(namespace, templateVersionID)\n\tif err != nil {\n\t\treturn true\n\t}\n\n\terr = catUtil.ValidateRancherVersion(template)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n<commit_msg>Modify template formatter to not use proxy<commit_after>package catalog\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/rancher\/norman\/api\/access\"\n\t\"github.com\/rancher\/norman\/httperror\"\n\t\"github.com\/rancher\/norman\/types\"\n\thelmlib \"github.com\/rancher\/rancher\/pkg\/catalog\/helm\"\n\tcatUtil \"github.com\/rancher\/rancher\/pkg\/catalog\/utils\"\n\thcommon \"github.com\/rancher\/rancher\/pkg\/controllers\/user\/helm\/common\"\n\tv3 \"github.com\/rancher\/types\/apis\/management.cattle.io\/v3\"\n\tmanagementschema \"github.com\/rancher\/types\/apis\/management.cattle.io\/v3\/schema\"\n\tclient \"github.com\/rancher\/types\/client\/management\/v3\"\n)\n\ntype TemplateWrapper struct {\n\tCatalogLister v3.CatalogLister\n\tClusterCatalogLister v3.ClusterCatalogLister\n\tProjectCatalogLister v3.ProjectCatalogLister\n\tCatalogTemplateVersionLister v3.CatalogTemplateVersionLister\n}\n\nfunc (t TemplateWrapper) TemplateFormatter(apiContext *types.APIContext, resource *types.RawResource) {\n\tvar prjCatalogName, clusterCatalogName string\n\t\/\/ version links\n\tresource.Values[\"versionLinks\"] = t.extractVersionLinks(apiContext, resource)\n\n\t\/\/icon\n\tic, ok := resource.Values[\"icon\"]\n\tif ok {\n\t\tif strings.HasPrefix(ic.(string), \"file:\") {\n\t\t\tdelete(resource.Values, \"icon\")\n\t\t\tresource.Links[\"icon\"] = apiContext.URLBuilder.Link(\"icon\", resource)\n\n\t\t} else {\n\t\t\tdelete(resource.Values, \"icon\")\n\t\t\tresource.Links[\"icon\"] = ic.(string)\n\t\t}\n\t} else {\n\t\tdelete(resource.Values, \"icon\")\n\t\tresource.Links[\"icon\"] = apiContext.URLBuilder.Link(\"icon\", resource)\n\t}\n\n\tval := resource.Values\n\tif val[client.CatalogTemplateFieldCatalogID] != nil {\n\t\t\/\/catalog link\n\t\tcatalogSchema := apiContext.Schemas.Schema(&managementschema.Version, client.CatalogType)\n\t\tcatalogName := strings.Split(resource.ID, \"-\")[0]\n\t\tresource.Links[\"catalog\"] = apiContext.URLBuilder.ResourceLinkByID(catalogSchema, catalogName)\n\t}\n\n\tif val[client.CatalogTemplateFieldProjectCatalogID] != nil {\n\t\tprjCatID, ok := val[client.CatalogTemplateFieldProjectCatalogID].(string)\n\t\tif ok {\n\t\t\tprjCatalogName = prjCatID\n\t\t}\n\t\t\/\/project catalog link\n\t\tprjCatalogSchema := apiContext.Schemas.Schema(&managementschema.Version, client.ProjectCatalogType)\n\t\tresource.Links[\"projectCatalog\"] = apiContext.URLBuilder.ResourceLinkByID(prjCatalogSchema, prjCatalogName)\n\t}\n\n\tif val[client.CatalogTemplateFieldClusterCatalogID] != nil {\n\t\tclusterCatID, ok := val[client.CatalogTemplateFieldClusterCatalogID].(string)\n\t\tif ok {\n\t\t\tclusterCatalogName = clusterCatID\n\t\t}\n\t\t\/\/cluster catalog link\n\t\tclCatalogSchema := apiContext.Schemas.Schema(&managementschema.Version, client.ClusterCatalogType)\n\t\tresource.Links[\"clusterCatalog\"] = apiContext.URLBuilder.ResourceLinkByID(clCatalogSchema, clusterCatalogName)\n\t}\n\n\t\/\/ delete category\n\tdelete(resource.Values, \"category\")\n\n\t\/\/ delete versions\n\tdelete(resource.Values, \"versions\")\n}\n\nfunc (t TemplateWrapper) extractVersionLinks(apiContext *types.APIContext, resource *types.RawResource) map[string]string {\n\tschema := apiContext.Schemas.Schema(&managementschema.Version, client.TemplateVersionType)\n\tr := map[string]string{}\n\tversionMap, ok := resource.Values[\"versions\"].([]interface{})\n\tif ok {\n\t\tfor _, version := range versionMap {\n\t\t\trevision := \"\"\n\t\t\tif v, ok := version.(map[string]interface{})[\"revision\"].(int64); ok {\n\t\t\t\trevision = strconv.FormatInt(v, 10)\n\t\t\t}\n\t\t\tversionString := version.(map[string]interface{})[\"version\"].(string)\n\t\t\tversionID := fmt.Sprintf(\"%v-%v\", resource.ID, versionString)\n\t\t\tif revision != \"\" {\n\t\t\t\tversionID = fmt.Sprintf(\"%v-%v\", resource.ID, revision)\n\t\t\t}\n\t\t\tif t.templateVersionForRancherVersion(apiContext, version.(map[string]interface{})[\"externalId\"].(string)) {\n\t\t\t\tr[versionString] = apiContext.URLBuilder.ResourceLinkByID(schema, versionID)\n\t\t\t}\n\t\t}\n\t}\n\treturn r\n}\n\nfunc (t TemplateWrapper) TemplateIconHandler(apiContext *types.APIContext, next types.RequestHandler) error {\n\tswitch apiContext.Link {\n\tcase \"icon\":\n\t\ttemplate := &client.Template{}\n\t\tif err := access.ByID(apiContext, apiContext.Version, apiContext.Type, apiContext.ID, template); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif template.Icon == \"\" {\n\t\t\thttp.Error(apiContext.Response, \"\", http.StatusNoContent)\n\t\t\treturn nil\n\t\t}\n\n\t\tvar (\n\t\t\tcatalogType string\n\t\t\tcatalogName string\n\t\t\ticonBytes []byte\n\t\t\terr error\n\t\t)\n\n\t\tif template.CatalogID != \"\" {\n\t\t\tcatalogType = client.CatalogType\n\t\t\tcatalogName = template.CatalogID\n\t\t} else if template.ClusterCatalogID != \"\" {\n\t\t\tcatalogType = client.ClusterCatalogType\n\t\t\tcatalogName = template.ClusterCatalogID\n\t\t} else if template.ProjectCatalogID != \"\" {\n\t\t\tcatalogType = client.ProjectCatalogType\n\t\t\tcatalogName = template.ProjectCatalogID\n\t\t}\n\n\t\tnamespace, name := helmlib.SplitNamespaceAndName(catalogName)\n\t\tcatalog, err := helmlib.GetCatalog(catalogType, namespace, name, t.CatalogLister, t.ClusterCatalogLister, t.ProjectCatalogLister)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\thelm, err := helmlib.New(catalog)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ticonBytes, err = helm.LoadIcon(template.IconFilename, template.Icon)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tt, err := time.Parse(time.RFC3339, template.Created)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ticonReader := bytes.NewReader(iconBytes)\n\t\tapiContext.Response.Header().Set(\"Cache-Control\", \"private, max-age=604800\")\n\t\t\/\/ add security headers (similar to raw.githubusercontent)\n\t\tapiContext.Response.Header().Set(\"Content-Security-Policy\", \"default-src 'none'; style-src 'unsafe-inline'; sandbox\")\n\t\tapiContext.Response.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n\t\thttp.ServeContent(apiContext.Response, apiContext.Request, template.IconFilename, t, iconReader)\n\t\treturn nil\n\tdefault:\n\t\treturn httperror.NewAPIError(httperror.NotFound, \"not found\")\n\t}\n}\n\n\/\/ templateVersionForRancherVersion indicates if a templateVersion works with the rancher server version\n\/\/ In the error case it will always return true - if a template is actually invalid for that rancher version\n\/\/ API validation will handle the rejection\nfunc (t TemplateWrapper) templateVersionForRancherVersion(apiContext *types.APIContext, externalID string) bool {\n\tvar rancherVersion string\n\tfor query, fields := range apiContext.Query {\n\t\tif query == \"rancherVersion\" {\n\t\t\trancherVersion = fields[0]\n\t\t}\n\t}\n\n\tif !catUtil.ReleaseServerVersion(rancherVersion) {\n\t\treturn true\n\t}\n\n\ttemplateVersionID, namespace, err := hcommon.ParseExternalID(externalID)\n\tif err != nil {\n\t\treturn true\n\t}\n\n\ttemplate, err := t.CatalogTemplateVersionLister.Get(namespace, templateVersionID)\n\tif err != nil {\n\t\treturn true\n\t}\n\n\terr = catUtil.ValidateRancherVersion(template)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2018 The Knative Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage v1alpha1\n\nimport (\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"knative.dev\/pkg\/apis\"\n\tduckv1beta1 \"knative.dev\/pkg\/apis\/duck\/v1beta1\"\n\t\"knative.dev\/pkg\/kmeta\"\n\tnet \"knative.dev\/serving\/pkg\/apis\/networking\"\n)\n\n\/\/ +genclient\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\n\/\/ PodAutoscaler is a Knative abstraction that encapsulates the interface by which Knative\n\/\/ components instantiate autoscalers. This definition is an abstraction that may be backed\n\/\/ by multiple definitions. For more information, see the Knative Pluggability presentation:\n\/\/ https:\/\/docs.google.com\/presentation\/d\/10KWynvAJYuOEWy69VBa6bHJVCqIsz1TNdEKosNvcpPY\/edit\ntype PodAutoscaler struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\t\/\/ +optional\n\tmetav1.ObjectMeta `json:\"metadata,omitempty\"`\n\n\t\/\/ Spec holds the desired state of the PodAutoscaler (from the client).\n\t\/\/ +optional\n\tSpec PodAutoscalerSpec `json:\"spec,omitempty\"`\n\n\t\/\/ Status communicates the observed state of the PodAutoscaler (from the controller).\n\t\/\/ +optional\n\tStatus PodAutoscalerStatus `json:\"status,omitempty\"`\n}\n\n\/\/ Verify that PodAutoscaler adheres to the appropriate interfaces.\nvar (\n\t\/\/ Check that PodAutoscaler can be validated and can be defaulted.\n\t_ apis.Validatable = (*PodAutoscaler)(nil)\n\t_ apis.Defaultable = (*PodAutoscaler)(nil)\n\n\t\/\/ Check that we can create OwnerReferences to a PodAutoscaler.\n\t_ kmeta.OwnerRefable = (*PodAutoscaler)(nil)\n)\n\n\/\/ ReachabilityType is the enumeration type for the different states of reachability\n\/\/ to the `ScaleTarget` of a `PodAutoscaler`\ntype ReachabilityType string\n\nconst (\n\t\/\/ ReachabilityUnknown means the reachability of the `ScaleTarget` is unknown.\n\t\/\/ Used when the reachability cannot be determined, eg. during activation.\n\tReachabilityUnknown ReachabilityType = \"\"\n\n\t\/\/ ReachabilityReachable means the `ScaleTarget` is reachable, ie. it has an active route.\n\tReachabilityReachable ReachabilityType = \"Reachable\"\n\n\t\/\/ ReachabilityReachable means the `ScaleTarget` is not reachable, ie. it does not have an active route.\n\tReachabilityUnreachable ReachabilityType = \"Unreachable\"\n)\n\n\/\/ PodAutoscalerSpec holds the desired state of the PodAutoscaler (from the client).\ntype PodAutoscalerSpec struct {\n\t\/\/ DeprecatedGeneration was used prior in Kubernetes versions <1.11\n\t\/\/ when metadata.generation was not being incremented by the api server\n\t\/\/\n\t\/\/ This property will be dropped in future Knative releases and should\n\t\/\/ not be used - use metadata.generation\n\t\/\/\n\t\/\/ Tracking issue: https:\/\/knative.dev\/serving\/issues\/643\n\t\/\/\n\t\/\/ +optional\n\tDeprecatedGeneration int64 `json:\"generation,omitempty\"`\n\n\t\/\/ ContainerConcurrency specifies the maximum allowed\n\t\/\/ in-flight (concurrent) requests per container of the Revision.\n\t\/\/ Defaults to `0` which means unlimited concurrency.\n\t\/\/ +optional\n\tContainerConcurrency int64 `json:\"containerConcurrency,omitempty\"`\n\n\t\/\/ ScaleTargetRef defines the \/scale-able resource that this PodAutoscaler\n\t\/\/ is responsible for quickly right-sizing.\n\tScaleTargetRef corev1.ObjectReference `json:\"scaleTargetRef\"`\n\n\t\/\/ Reachable specifies whether or not the `ScaleTargetRef` can be reached (ie. has a route).\n\t\/\/ Defaults to `ReachabilityUnknown`\n\t\/\/ +optional\n\tReachability ReachabilityType `json:\"reachable,omitempty\"`\n\n\t\/\/ DeprecatedServiceName holds the name of a core Kubernetes Service resource that\n\t\/\/ load balances over the pods referenced by the ScaleTargetRef.\n\tDeprecatedServiceName string `json:\"serviceName\"`\n\n\t\/\/ The application-layer protocol. Matches `ProtocolType` inferred from the revision spec.\n\tProtocolType net.ProtocolType `json:\"protocolType\"`\n}\n\nconst (\n\t\/\/ PodAutoscalerConditionReady is set when the revision is starting to materialize\n\t\/\/ runtime resources, and becomes true when those resources are ready.\n\tPodAutoscalerConditionReady = apis.ConditionReady\n\t\/\/ PodAutoscalerConditionActive is set when the PodAutoscaler's ScaleTargetRef is receiving traffic.\n\tPodAutoscalerConditionActive apis.ConditionType = \"Active\"\n)\n\n\/\/ PodAutoscalerStatus communicates the observed state of the PodAutoscaler (from the controller).\ntype PodAutoscalerStatus struct {\n\tduckv1beta1.Status\n\n\t\/\/ ServiceName is the K8s Service name that serves the revision, scaled by this PA.\n\t\/\/ The service is created and owned by the ServerlessService object owned by this PA.\n\tServiceName string `json:\"serviceName\"`\n\n\t\/\/ MetricsServiceName is the K8s Service name that provides revision metrics.\n\t\/\/ The service is managed by the PA object.\n\tMetricsServiceName string `json:\"metricsServiceName\"`\n\n\t\/\/ DesiredScale shows the current desired number of replicas for the revision.\n\tDesiredScale *int32 `json:\"desiredScale,omitempty\"`\n\n\t\/\/ ActualScale shows the actual number of replicas for the revision.\n\tActualScale *int32 `json:\"actualScale,omitempty\"`\n}\n\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\n\/\/ PodAutoscalerList is a list of PodAutoscaler resources\ntype PodAutoscalerList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata\"`\n\n\tItems []PodAutoscaler `json:\"items\"`\n}\n<commit_msg>Fix `Reachability` json struct tag (#5251)<commit_after>\/*\nCopyright 2018 The Knative Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage v1alpha1\n\nimport (\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"knative.dev\/pkg\/apis\"\n\tduckv1beta1 \"knative.dev\/pkg\/apis\/duck\/v1beta1\"\n\t\"knative.dev\/pkg\/kmeta\"\n\tnet \"knative.dev\/serving\/pkg\/apis\/networking\"\n)\n\n\/\/ +genclient\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\n\/\/ PodAutoscaler is a Knative abstraction that encapsulates the interface by which Knative\n\/\/ components instantiate autoscalers. This definition is an abstraction that may be backed\n\/\/ by multiple definitions. For more information, see the Knative Pluggability presentation:\n\/\/ https:\/\/docs.google.com\/presentation\/d\/10KWynvAJYuOEWy69VBa6bHJVCqIsz1TNdEKosNvcpPY\/edit\ntype PodAutoscaler struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\t\/\/ +optional\n\tmetav1.ObjectMeta `json:\"metadata,omitempty\"`\n\n\t\/\/ Spec holds the desired state of the PodAutoscaler (from the client).\n\t\/\/ +optional\n\tSpec PodAutoscalerSpec `json:\"spec,omitempty\"`\n\n\t\/\/ Status communicates the observed state of the PodAutoscaler (from the controller).\n\t\/\/ +optional\n\tStatus PodAutoscalerStatus `json:\"status,omitempty\"`\n}\n\n\/\/ Verify that PodAutoscaler adheres to the appropriate interfaces.\nvar (\n\t\/\/ Check that PodAutoscaler can be validated and can be defaulted.\n\t_ apis.Validatable = (*PodAutoscaler)(nil)\n\t_ apis.Defaultable = (*PodAutoscaler)(nil)\n\n\t\/\/ Check that we can create OwnerReferences to a PodAutoscaler.\n\t_ kmeta.OwnerRefable = (*PodAutoscaler)(nil)\n)\n\n\/\/ ReachabilityType is the enumeration type for the different states of reachability\n\/\/ to the `ScaleTarget` of a `PodAutoscaler`\ntype ReachabilityType string\n\nconst (\n\t\/\/ ReachabilityUnknown means the reachability of the `ScaleTarget` is unknown.\n\t\/\/ Used when the reachability cannot be determined, eg. during activation.\n\tReachabilityUnknown ReachabilityType = \"\"\n\n\t\/\/ ReachabilityReachable means the `ScaleTarget` is reachable, ie. it has an active route.\n\tReachabilityReachable ReachabilityType = \"Reachable\"\n\n\t\/\/ ReachabilityReachable means the `ScaleTarget` is not reachable, ie. it does not have an active route.\n\tReachabilityUnreachable ReachabilityType = \"Unreachable\"\n)\n\n\/\/ PodAutoscalerSpec holds the desired state of the PodAutoscaler (from the client).\ntype PodAutoscalerSpec struct {\n\t\/\/ DeprecatedGeneration was used prior in Kubernetes versions <1.11\n\t\/\/ when metadata.generation was not being incremented by the api server\n\t\/\/\n\t\/\/ This property will be dropped in future Knative releases and should\n\t\/\/ not be used - use metadata.generation\n\t\/\/\n\t\/\/ Tracking issue: https:\/\/knative.dev\/serving\/issues\/643\n\t\/\/\n\t\/\/ +optional\n\tDeprecatedGeneration int64 `json:\"generation,omitempty\"`\n\n\t\/\/ ContainerConcurrency specifies the maximum allowed\n\t\/\/ in-flight (concurrent) requests per container of the Revision.\n\t\/\/ Defaults to `0` which means unlimited concurrency.\n\t\/\/ +optional\n\tContainerConcurrency int64 `json:\"containerConcurrency,omitempty\"`\n\n\t\/\/ ScaleTargetRef defines the \/scale-able resource that this PodAutoscaler\n\t\/\/ is responsible for quickly right-sizing.\n\tScaleTargetRef corev1.ObjectReference `json:\"scaleTargetRef\"`\n\n\t\/\/ Reachable specifies whether or not the `ScaleTargetRef` can be reached (ie. has a route).\n\t\/\/ Defaults to `ReachabilityUnknown`\n\t\/\/ +optional\n\tReachability ReachabilityType `json:\"reachability,omitempty\"`\n\n\t\/\/ DeprecatedServiceName holds the name of a core Kubernetes Service resource that\n\t\/\/ load balances over the pods referenced by the ScaleTargetRef.\n\tDeprecatedServiceName string `json:\"serviceName\"`\n\n\t\/\/ The application-layer protocol. Matches `ProtocolType` inferred from the revision spec.\n\tProtocolType net.ProtocolType `json:\"protocolType\"`\n}\n\nconst (\n\t\/\/ PodAutoscalerConditionReady is set when the revision is starting to materialize\n\t\/\/ runtime resources, and becomes true when those resources are ready.\n\tPodAutoscalerConditionReady = apis.ConditionReady\n\t\/\/ PodAutoscalerConditionActive is set when the PodAutoscaler's ScaleTargetRef is receiving traffic.\n\tPodAutoscalerConditionActive apis.ConditionType = \"Active\"\n)\n\n\/\/ PodAutoscalerStatus communicates the observed state of the PodAutoscaler (from the controller).\ntype PodAutoscalerStatus struct {\n\tduckv1beta1.Status\n\n\t\/\/ ServiceName is the K8s Service name that serves the revision, scaled by this PA.\n\t\/\/ The service is created and owned by the ServerlessService object owned by this PA.\n\tServiceName string `json:\"serviceName\"`\n\n\t\/\/ MetricsServiceName is the K8s Service name that provides revision metrics.\n\t\/\/ The service is managed by the PA object.\n\tMetricsServiceName string `json:\"metricsServiceName\"`\n\n\t\/\/ DesiredScale shows the current desired number of replicas for the revision.\n\tDesiredScale *int32 `json:\"desiredScale,omitempty\"`\n\n\t\/\/ ActualScale shows the actual number of replicas for the revision.\n\tActualScale *int32 `json:\"actualScale,omitempty\"`\n}\n\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\n\/\/ PodAutoscalerList is a list of PodAutoscaler resources\ntype PodAutoscalerList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata\"`\n\n\tItems []PodAutoscaler `json:\"items\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package demoinfocs\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\tproto \"github.com\/gogo\/protobuf\/proto\"\n\n\tmsg \"github.com\/markus-wa\/demoinfocs-golang\/msg\"\n)\n\nvar byteSlicePool sync.Pool = sync.Pool{\n\tNew: func() interface{} {\n\t\ts := make([]byte, 0, 256)\n\t\treturn &s\n\t},\n}\n\nfunc (p *Parser) parsePacket() {\n\t\/\/ Booooring\n\t\/\/ 152 bytes CommandInfo, 4 bytes SeqNrIn, 4 bytes SeqNrOut\n\t\/\/ See at the bottom what the CommandInfo would contain if you are interested.\n\tp.bitReader.Skip((152 + 4 + 4) << 3)\n\n\t\/\/ Here we go\n\tp.bitReader.BeginChunk(p.bitReader.ReadSignedInt(32) << 3)\n\tfor !p.bitReader.ChunkFinished() {\n\t\tcmd := int(p.bitReader.ReadVarInt32())\n\t\tsize := int(p.bitReader.ReadVarInt32())\n\n\t\tp.bitReader.BeginChunk(size << 3)\n\t\tvar m proto.Message\n\t\tswitch cmd {\n\t\tcase int(msg.SVC_Messages_svc_PacketEntities):\n\t\t\t\/\/ TODO: Find a way to pool SVC_Messages_svc_PacketEntities\n\t\t\t\/\/ Need to make sure the message was consumed before pooling\n\t\t\t\/\/ and the message's contents will be overridden (either by protobuf or manually)\n\t\t\tm = new(msg.CSVCMsg_PacketEntities)\n\n\t\tcase int(msg.SVC_Messages_svc_GameEventList):\n\t\t\tm = new(msg.CSVCMsg_GameEventList)\n\n\t\tcase int(msg.SVC_Messages_svc_GameEvent):\n\t\t\tm = new(msg.CSVCMsg_GameEvent)\n\n\t\tcase int(msg.SVC_Messages_svc_CreateStringTable):\n\t\t\tm = new(msg.CSVCMsg_CreateStringTable)\n\n\t\tcase int(msg.SVC_Messages_svc_UpdateStringTable):\n\t\t\tm = new(msg.CSVCMsg_UpdateStringTable)\n\n\t\tcase int(msg.SVC_Messages_svc_UserMessage):\n\t\t\tm = new(msg.CSVCMsg_UserMessage)\n\n\t\tdefault:\n\t\t\tif p.warn != nil || isDebug {\n\t\t\t\tvar name string\n\t\t\t\tif cmd < 8 || cmd >= 100 {\n\t\t\t\t\tname = msg.NET_Messages_name[int32(cmd)]\n\t\t\t\t} else {\n\t\t\t\t\tname = msg.SVC_Messages_name[int32(cmd)]\n\t\t\t\t}\n\n\t\t\t\tdebugUnhandledMessage(cmd, name)\n\n\t\t\t\tif p.warn != nil {\n\t\t\t\t\tif name == \"\" {\n\t\t\t\t\t\t\/\/ Send a warning if the command is unknown\n\t\t\t\t\t\t\/\/ This might mean our proto files are out of date\n\t\t\t\t\t\tp.warn(fmt.Sprintf(\"Unknown message command %q\", cmd))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ On to the next one\n\t\t\tp.bitReader.EndChunk()\n\t\t\tcontinue\n\t\t}\n\n\t\tb := byteSlicePool.Get().(*[]byte)\n\t\tp.bitReader.ReadBytesInto(b, size)\n\n\t\tif proto.Unmarshal(*b, m) != nil {\n\t\t\t\/\/ TODO: Don't crash here, happens with demos that work in gotv\n\t\t\tpanic(fmt.Sprintf(\"Failed to unmarshal cmd %d\", cmd))\n\t\t}\n\t\tp.msgQueue <- m\n\n\t\t\/\/ Reset to 0 length and pool\n\t\t*b = (*b)[:0]\n\t\tbyteSlicePool.Put(b)\n\n\t\tp.bitReader.EndChunk()\n\t}\n\tp.bitReader.EndChunk()\n}\n\n\/*\nFormat of 'CommandInfos' - I honestly have no clue what they are good for.\nIf you find a use for this please let me know!\n\nHere is all i know:\n\nCommandInfo [152 bytes]\n- [2]Split\n\nSplit [76 bytes]\n- flags [4 bytes]\n- viewOrigin [12 bytes]\n- viewAngles [12 bytes]\n- localViewAngles [12 bytes]\n- viewOrigin2 [12 bytes]\n- viewAngles2 [12 bytes]\n- localViewAngles2 [12 bytes]\n\nOrigin [12 bytes]\n- X [4 bytes]\n- Y [4 bytes]\n- Z [4 bytes]\n\nAngle [12 bytes]\n- X [4 bytes]\n- Y [4 bytes]\n- Z [4 bytes]\n\nThey are parsed in the following order:\nsplit1.flags\nsplit1.viewOrigin.x\nsplit1.viewOrigin.y\nsplit1.viewOrigin.z\nsplit1.viewAngles.x\nsplit1.viewAngles.y\nsplit1.viewAngles.z\nsplit1.localViewAngles.x\nsplit1.localViewAngles.y\nsplit1.localViewAngles.z\nsplit1.viewOrigin2...\nsplit1.viewAngles2...\nsplit1.localViewAngles2...\nsplit2.flags\n...\n\nOr just check this file's history for an example on how to parse them\n*\/\n<commit_msg>Clarified why we don't pool PacketEntities<commit_after>package demoinfocs\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\tproto \"github.com\/gogo\/protobuf\/proto\"\n\n\tmsg \"github.com\/markus-wa\/demoinfocs-golang\/msg\"\n)\n\nvar byteSlicePool sync.Pool = sync.Pool{\n\tNew: func() interface{} {\n\t\ts := make([]byte, 0, 256)\n\t\treturn &s\n\t},\n}\n\nfunc (p *Parser) parsePacket() {\n\t\/\/ Booooring\n\t\/\/ 152 bytes CommandInfo, 4 bytes SeqNrIn, 4 bytes SeqNrOut\n\t\/\/ See at the bottom what the CommandInfo would contain if you are interested.\n\tp.bitReader.Skip((152 + 4 + 4) << 3)\n\n\t\/\/ Here we go\n\tp.bitReader.BeginChunk(p.bitReader.ReadSignedInt(32) << 3)\n\tfor !p.bitReader.ChunkFinished() {\n\t\tcmd := int(p.bitReader.ReadVarInt32())\n\t\tsize := int(p.bitReader.ReadVarInt32())\n\n\t\tp.bitReader.BeginChunk(size << 3)\n\t\tvar m proto.Message\n\t\tswitch cmd {\n\t\tcase int(msg.SVC_Messages_svc_PacketEntities):\n\t\t\t\/\/ We could pool CSVCMsg_PacketEntities as they take up A LOT of the allocations\n\t\t\t\/\/ but unless we're on a system that's doing a lot of concurrent parsing there isn't really a point\n\t\t\t\/\/ as handling packets is a lot slower than creating them and we can't pool until they are handled.\n\t\t\tm = new(msg.CSVCMsg_PacketEntities)\n\n\t\tcase int(msg.SVC_Messages_svc_GameEventList):\n\t\t\tm = new(msg.CSVCMsg_GameEventList)\n\n\t\tcase int(msg.SVC_Messages_svc_GameEvent):\n\t\t\tm = new(msg.CSVCMsg_GameEvent)\n\n\t\tcase int(msg.SVC_Messages_svc_CreateStringTable):\n\t\t\tm = new(msg.CSVCMsg_CreateStringTable)\n\n\t\tcase int(msg.SVC_Messages_svc_UpdateStringTable):\n\t\t\tm = new(msg.CSVCMsg_UpdateStringTable)\n\n\t\tcase int(msg.SVC_Messages_svc_UserMessage):\n\t\t\tm = new(msg.CSVCMsg_UserMessage)\n\n\t\tdefault:\n\t\t\tif p.warn != nil || isDebug {\n\t\t\t\tvar name string\n\t\t\t\tif cmd < 8 || cmd >= 100 {\n\t\t\t\t\tname = msg.NET_Messages_name[int32(cmd)]\n\t\t\t\t} else {\n\t\t\t\t\tname = msg.SVC_Messages_name[int32(cmd)]\n\t\t\t\t}\n\n\t\t\t\tdebugUnhandledMessage(cmd, name)\n\n\t\t\t\tif p.warn != nil {\n\t\t\t\t\tif name == \"\" {\n\t\t\t\t\t\t\/\/ Send a warning if the command is unknown\n\t\t\t\t\t\t\/\/ This might mean our proto files are out of date\n\t\t\t\t\t\tp.warn(fmt.Sprintf(\"Unknown message command %q\", cmd))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ On to the next one\n\t\t\tp.bitReader.EndChunk()\n\t\t\tcontinue\n\t\t}\n\n\t\tb := byteSlicePool.Get().(*[]byte)\n\t\tp.bitReader.ReadBytesInto(b, size)\n\n\t\tif proto.Unmarshal(*b, m) != nil {\n\t\t\t\/\/ TODO: Don't crash here, happens with demos that work in gotv\n\t\t\tpanic(fmt.Sprintf(\"Failed to unmarshal cmd %d\", cmd))\n\t\t}\n\t\tp.msgQueue <- m\n\n\t\t\/\/ Reset to 0 length and pool\n\t\t*b = (*b)[:0]\n\t\tbyteSlicePool.Put(b)\n\n\t\tp.bitReader.EndChunk()\n\t}\n\tp.bitReader.EndChunk()\n}\n\n\/*\nFormat of 'CommandInfos' - I honestly have no clue what they are good for.\nIf you find a use for this please let me know!\n\nHere is all i know:\n\nCommandInfo [152 bytes]\n- [2]Split\n\nSplit [76 bytes]\n- flags [4 bytes]\n- viewOrigin [12 bytes]\n- viewAngles [12 bytes]\n- localViewAngles [12 bytes]\n- viewOrigin2 [12 bytes]\n- viewAngles2 [12 bytes]\n- localViewAngles2 [12 bytes]\n\nOrigin [12 bytes]\n- X [4 bytes]\n- Y [4 bytes]\n- Z [4 bytes]\n\nAngle [12 bytes]\n- X [4 bytes]\n- Y [4 bytes]\n- Z [4 bytes]\n\nThey are parsed in the following order:\nsplit1.flags\nsplit1.viewOrigin.x\nsplit1.viewOrigin.y\nsplit1.viewOrigin.z\nsplit1.viewAngles.x\nsplit1.viewAngles.y\nsplit1.viewAngles.z\nsplit1.localViewAngles.x\nsplit1.localViewAngles.y\nsplit1.localViewAngles.z\nsplit1.viewOrigin2...\nsplit1.viewAngles2...\nsplit1.localViewAngles2...\nsplit2.flags\n...\n\nOr just check this file's history for an example on how to parse them\n*\/\n<|endoftext|>"} {"text":"<commit_before>package http_server\n\nimport (\n\t\"crypto\/tls\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/tedsuo\/ifrit\"\n)\n\nconst (\n\tTCP = \"tcp\"\n\tUNIX = \"unix\"\n)\n\ntype httpServer struct {\n\tprotocol string\n\taddress string\n\thandler http.Handler\n\n\tconnectionWaitGroup *sync.WaitGroup\n\tinactiveConnections map[net.Conn]struct{}\n\tinactiveConnectionsMu *sync.Mutex\n\tstoppingChan chan struct{}\n\n\ttlsConfig *tls.Config\n}\n\nfunc newServerWithListener(protocol, address string, handler http.Handler, tlsConfig *tls.Config) ifrit.Runner {\n\treturn &httpServer{\n\t\taddress: address,\n\t\thandler: handler,\n\t\ttlsConfig: tlsConfig,\n\t\tprotocol: protocol,\n\t}\n}\n\nfunc NewUnixServer(address string, handler http.Handler) ifrit.Runner {\n\treturn newServerWithListener(UNIX, address, handler, nil)\n}\n\nfunc New(address string, handler http.Handler) ifrit.Runner {\n\treturn newServerWithListener(TCP, address, handler, nil)\n}\n\nfunc NewUnixTLSServer(address string, handler http.Handler, tlsConfig *tls.Config) ifrit.Runner {\n\treturn newServerWithListener(UNIX, address, handler, tlsConfig)\n}\n\nfunc NewTLSServer(address string, handler http.Handler, tlsConfig *tls.Config) ifrit.Runner {\n\treturn newServerWithListener(TCP, address, handler, tlsConfig)\n}\n\nfunc (s *httpServer) Run(signals <-chan os.Signal, ready chan<- struct{}) error {\n\ts.connectionWaitGroup = new(sync.WaitGroup)\n\ts.inactiveConnectionsMu = new(sync.Mutex)\n\ts.inactiveConnections = make(map[net.Conn]struct{})\n\ts.stoppingChan = make(chan struct{})\n\n\tserver := http.Server{\n\t\tHandler: s.handler,\n\t\tTLSConfig: s.tlsConfig,\n\t\tConnState: func(conn net.Conn, state http.ConnState) {\n\t\t\tswitch state {\n\t\t\tcase http.StateNew:\n\t\t\t\ts.connectionWaitGroup.Add(1)\n\t\t\t\ts.addInactiveConnection(conn)\n\n\t\t\tcase http.StateIdle:\n\t\t\t\ts.addInactiveConnection(conn)\n\n\t\t\tcase http.StateActive:\n\t\t\t\ts.removeInactiveConnection(conn)\n\n\t\t\tcase http.StateHijacked, http.StateClosed:\n\t\t\t\ts.removeInactiveConnection(conn)\n\t\t\t\ts.connectionWaitGroup.Done()\n\t\t\t}\n\t\t},\n\t}\n\n\tlistener, err := s.getListener(server.TLSConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tserverErrChan := make(chan error, 1)\n\tgo func() {\n\t\tserverErrChan <- server.Serve(listener)\n\t}()\n\n\tclose(ready)\n\n\tfor {\n\t\tselect {\n\t\tcase err = <-serverErrChan:\n\t\t\treturn err\n\n\t\tcase <-signals:\n\t\t\tclose(s.stoppingChan)\n\n\t\t\tlistener.Close()\n\n\t\t\ts.inactiveConnectionsMu.Lock()\n\t\t\tfor c := range s.inactiveConnections {\n\t\t\t\tc.Close()\n\t\t\t}\n\t\t\ts.inactiveConnectionsMu.Unlock()\n\n\t\t\ts.connectionWaitGroup.Wait()\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (s *httpServer) getListener(tlsConfig *tls.Config) (net.Listener, error) {\n\tlistener, err := net.Listen(s.protocol, s.address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif tlsConfig == nil {\n\t\treturn listener, nil\n\t}\n\tswitch s.protocol {\n\tcase TCP:\n\t\tlistener = tls.NewListener(tcpKeepAliveListener{listener.(*net.TCPListener)}, tlsConfig)\n\tdefault:\n\t\tlistener = tls.NewListener(listener, tlsConfig)\n\t}\n\n\treturn listener, nil\n}\n\nfunc (s *httpServer) addInactiveConnection(conn net.Conn) {\n\tselect {\n\tcase <-s.stoppingChan:\n\t\tconn.Close()\n\tdefault:\n\t\ts.inactiveConnectionsMu.Lock()\n\t\ts.inactiveConnections[conn] = struct{}{}\n\t\ts.inactiveConnectionsMu.Unlock()\n\t}\n}\n\nfunc (s *httpServer) removeInactiveConnection(conn net.Conn) {\n\ts.inactiveConnectionsMu.Lock()\n\tdelete(s.inactiveConnections, conn)\n\ts.inactiveConnectionsMu.Unlock()\n}\n\ntype tcpKeepAliveListener struct {\n\t*net.TCPListener\n}\n\nfunc (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {\n\ttc, err := ln.AcceptTCP()\n\tif err != nil {\n\t\treturn\n\t}\n\ttc.SetKeepAlive(true)\n\ttc.SetKeepAlivePeriod(3 * time.Minute)\n\treturn tc, nil\n}\n<commit_msg>replace a racy usage of WaitGroups with channels<commit_after>package http_server\n\nimport (\n\t\"crypto\/tls\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/tedsuo\/ifrit\"\n)\n\nconst (\n\tTCP = \"tcp\"\n\tUNIX = \"unix\"\n)\n\ntype httpServer struct {\n\tprotocol string\n\taddress string\n\thandler http.Handler\n\n\tconnectionWaitGroup *sync.WaitGroup\n\tinactiveConnections map[net.Conn]struct{}\n\tinactiveConnectionsMu *sync.Mutex\n\tstoppingChan chan struct{}\n\n\ttlsConfig *tls.Config\n}\n\nfunc newServerWithListener(protocol, address string, handler http.Handler, tlsConfig *tls.Config) ifrit.Runner {\n\treturn &httpServer{\n\t\taddress: address,\n\t\thandler: handler,\n\t\ttlsConfig: tlsConfig,\n\t\tprotocol: protocol,\n\t}\n}\n\nfunc NewUnixServer(address string, handler http.Handler) ifrit.Runner {\n\treturn newServerWithListener(UNIX, address, handler, nil)\n}\n\nfunc New(address string, handler http.Handler) ifrit.Runner {\n\treturn newServerWithListener(TCP, address, handler, nil)\n}\n\nfunc NewUnixTLSServer(address string, handler http.Handler, tlsConfig *tls.Config) ifrit.Runner {\n\treturn newServerWithListener(UNIX, address, handler, tlsConfig)\n}\n\nfunc NewTLSServer(address string, handler http.Handler, tlsConfig *tls.Config) ifrit.Runner {\n\treturn newServerWithListener(TCP, address, handler, tlsConfig)\n}\n\nfunc (s *httpServer) Run(signals <-chan os.Signal, ready chan<- struct{}) error {\n\ts.connectionWaitGroup = new(sync.WaitGroup)\n\ts.inactiveConnectionsMu = new(sync.Mutex)\n\ts.inactiveConnections = make(map[net.Conn]struct{})\n\ts.stoppingChan = make(chan struct{})\n\n\tconnCountCh := make(chan int)\n\n\tserver := http.Server{\n\t\tHandler: s.handler,\n\t\tTLSConfig: s.tlsConfig,\n\t\tConnState: func(conn net.Conn, state http.ConnState) {\n\t\t\tswitch state {\n\t\t\tcase http.StateNew:\n\t\t\t\tconnCountCh <- 1\n\t\t\t\ts.addInactiveConnection(conn)\n\n\t\t\tcase http.StateIdle:\n\t\t\t\ts.addInactiveConnection(conn)\n\n\t\t\tcase http.StateActive:\n\t\t\t\ts.removeInactiveConnection(conn)\n\n\t\t\tcase http.StateHijacked, http.StateClosed:\n\t\t\t\ts.removeInactiveConnection(conn)\n\t\t\t\tconnCountCh <- -1\n\t\t\t}\n\t\t},\n\t}\n\n\tlistener, err := s.getListener(server.TLSConfig)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tserverErrChan := make(chan error, 1)\n\tgo func() {\n\t\tserverErrChan <- server.Serve(listener)\n\t}()\n\n\tclose(ready)\n\n\tconnCount := 0\n\tfor {\n\t\tselect {\n\t\tcase err = <-serverErrChan:\n\t\t\treturn err\n\n\t\tcase delta := <-connCountCh:\n\t\t\tconnCount += delta\n\n\t\tcase <-signals:\n\t\t\tclose(s.stoppingChan)\n\n\t\t\tlistener.Close()\n\n\t\t\ts.inactiveConnectionsMu.Lock()\n\t\t\tfor c := range s.inactiveConnections {\n\t\t\t\tc.Close()\n\t\t\t}\n\t\t\ts.inactiveConnectionsMu.Unlock()\n\n\t\t\tfor connCount != 0 {\n\t\t\t\tdelta := <-connCountCh\n\t\t\t\tconnCount += delta\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (s *httpServer) getListener(tlsConfig *tls.Config) (net.Listener, error) {\n\tlistener, err := net.Listen(s.protocol, s.address)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif tlsConfig == nil {\n\t\treturn listener, nil\n\t}\n\tswitch s.protocol {\n\tcase TCP:\n\t\tlistener = tls.NewListener(tcpKeepAliveListener{listener.(*net.TCPListener)}, tlsConfig)\n\tdefault:\n\t\tlistener = tls.NewListener(listener, tlsConfig)\n\t}\n\n\treturn listener, nil\n}\n\nfunc (s *httpServer) addInactiveConnection(conn net.Conn) {\n\tselect {\n\tcase <-s.stoppingChan:\n\t\tconn.Close()\n\tdefault:\n\t\ts.inactiveConnectionsMu.Lock()\n\t\ts.inactiveConnections[conn] = struct{}{}\n\t\ts.inactiveConnectionsMu.Unlock()\n\t}\n}\n\nfunc (s *httpServer) removeInactiveConnection(conn net.Conn) {\n\ts.inactiveConnectionsMu.Lock()\n\tdelete(s.inactiveConnections, conn)\n\ts.inactiveConnectionsMu.Unlock()\n}\n\ntype tcpKeepAliveListener struct {\n\t*net.TCPListener\n}\n\nfunc (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {\n\ttc, err := ln.AcceptTCP()\n\tif err != nil {\n\t\treturn\n\t}\n\ttc.SetKeepAlive(true)\n\ttc.SetKeepAlivePeriod(3 * time.Minute)\n\treturn tc, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage exec_test\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\/exec\"\n\t\"strings\"\n)\n\nfunc ExampleLookPath() {\n\tpath, err := exec.LookPath(\"fortune\")\n\tif err != nil {\n\t\tlog.Fatal(\"installing fortune is in your future\")\n\t}\n\tfmt.Printf(\"fortune is available at %s\\n\", path)\n}\n\nfunc ExampleCommand() {\n\tcmd := exec.Command(\"tr\", \"a-z\", \"A-Z\")\n\tcmd.Stdin = strings.NewReader(\"some input\")\n\tvar out bytes.Buffer\n\tcmd.Stdout = &out\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"in all caps: %q\\n\", out.String())\n}\n\nfunc ExampleCmd_Output() {\n\tout, err := exec.Command(\"date\").Output()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"The date is %s\\n\", out)\n}\n\nfunc ExampleCmd_Start() {\n\tcmd := exec.Command(\"sleep\", \"5\")\n\terr := cmd.Start()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Printf(\"Waiting for command to finish...\")\n\terr = cmd.Wait()\n\tlog.Printf(\"Command finished with error: %v\", err)\n}\n\nfunc ExampleCmd_StdoutPipe() {\n\tcmd := exec.Command(\"echo\", \"-n\", `{\"Name\": \"Bob\", \"Age\": 32}`)\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif err := cmd.Start(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar person struct {\n\t\tName string\n\t\tAge int\n\t}\n\tif err := json.NewDecoder(stdout).Decode(&person); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif err := cmd.Wait(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%s is %d years old\\n\", person.Name, person.Age)\n}\n\nfunc ExampleCmd_StdinPipe() {\n\tcmd := exec.Command(\"cat\")\n\tstdin, err := cmd.StdinPipe()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgo func() {\n\t\tdefer stdin.Close()\n\t\tio.WriteString(stdin, \"values written to stdin are passed to cmd's standard input\")\n\t}()\n\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Printf(\"%s\\n\", out)\n}\n\nfunc ExampleCmd_StderrPipe() {\n\tcmd := exec.Command(\"sh\", \"-c\", \"echo stdout; echo 1>&2 stderr\")\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif err := cmd.Start(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tslurp, _ := ioutil.ReadAll(stderr)\n\tfmt.Printf(\"%s\\n\", slurp)\n\n\tif err := cmd.Wait(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc ExampleCmd_CombinedOutput() {\n\tcmd := exec.Command(\"sh\", \"-c\", \"echo stdout; echo 1>&2 stderr\")\n\tstdoutStderr, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%s\\n\", stdoutStderr)\n}\n<commit_msg>os\/exec: add example for CommandContext<commit_after>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage exec_test\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n)\n\nfunc ExampleLookPath() {\n\tpath, err := exec.LookPath(\"fortune\")\n\tif err != nil {\n\t\tlog.Fatal(\"installing fortune is in your future\")\n\t}\n\tfmt.Printf(\"fortune is available at %s\\n\", path)\n}\n\nfunc ExampleCommand() {\n\tcmd := exec.Command(\"tr\", \"a-z\", \"A-Z\")\n\tcmd.Stdin = strings.NewReader(\"some input\")\n\tvar out bytes.Buffer\n\tcmd.Stdout = &out\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"in all caps: %q\\n\", out.String())\n}\n\nfunc ExampleCmd_Output() {\n\tout, err := exec.Command(\"date\").Output()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"The date is %s\\n\", out)\n}\n\nfunc ExampleCmd_Start() {\n\tcmd := exec.Command(\"sleep\", \"5\")\n\terr := cmd.Start()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Printf(\"Waiting for command to finish...\")\n\terr = cmd.Wait()\n\tlog.Printf(\"Command finished with error: %v\", err)\n}\n\nfunc ExampleCmd_StdoutPipe() {\n\tcmd := exec.Command(\"echo\", \"-n\", `{\"Name\": \"Bob\", \"Age\": 32}`)\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif err := cmd.Start(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar person struct {\n\t\tName string\n\t\tAge int\n\t}\n\tif err := json.NewDecoder(stdout).Decode(&person); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif err := cmd.Wait(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%s is %d years old\\n\", person.Name, person.Age)\n}\n\nfunc ExampleCmd_StdinPipe() {\n\tcmd := exec.Command(\"cat\")\n\tstdin, err := cmd.StdinPipe()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgo func() {\n\t\tdefer stdin.Close()\n\t\tio.WriteString(stdin, \"values written to stdin are passed to cmd's standard input\")\n\t}()\n\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Printf(\"%s\\n\", out)\n}\n\nfunc ExampleCmd_StderrPipe() {\n\tcmd := exec.Command(\"sh\", \"-c\", \"echo stdout; echo 1>&2 stderr\")\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif err := cmd.Start(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tslurp, _ := ioutil.ReadAll(stderr)\n\tfmt.Printf(\"%s\\n\", slurp)\n\n\tif err := cmd.Wait(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc ExampleCmd_CombinedOutput() {\n\tcmd := exec.Command(\"sh\", \"-c\", \"echo stdout; echo 1>&2 stderr\")\n\tstdoutStderr, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"%s\\n\", stdoutStderr)\n}\n\nfunc ExampleCommandContext() {\n\tctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)\n\tdefer cancel()\n\n\tif err := exec.CommandContext(ctx, \"sleep\", \"5\").Run(); err != nil {\n\t\t\/\/ This will fail after 100 milliseconds. The 5 second sleep\n\t\t\/\/ will be interrupted.\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package keyring\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\tjose \"github.com\/dvsekhvalnov\/jose2go\"\n\t\"github.com\/mitchellh\/go-homedir\"\n\t\"golang.org\/x\/crypto\/ssh\/terminal\"\n)\n\ntype passwordFunc func(string) (string, error)\n\nfunc terminalPrompt(prompt string) (string, error) {\n\tif password := os.Getenv(\"AWS_VAULT_FILE_PASSPHRASE\"); password != \"\" {\n\t\treturn password, nil\n\t}\n\n\tfmt.Printf(\"%s: \", prompt)\n\tb, err := terminal.ReadPassword(1)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfmt.Println()\n\treturn string(b), nil\n}\n\nfunc init() {\n\tsupportedBackends[FileBackend] = opener(func(name string) (Keyring, error) {\n\t\treturn &fileKeyring{\n\t\t\tPasswordFunc: terminalPrompt,\n\t\t}, nil\n\t})\n}\n\ntype fileKeyring struct {\n\tDir string\n\tPasswordFunc passwordFunc\n\tpassword string\n}\n\nfunc (k *fileKeyring) dir() (string, error) {\n\tdir := k.Dir\n\tif dir == \"\" {\n\t\thome, err := homedir.Dir()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tdir = filepath.Join(home, \"\/.awsvault\/keys\/\")\n\t}\n\n\tstat, err := os.Stat(dir)\n\tif os.IsNotExist(err) {\n\t\tos.MkdirAll(dir, 0700)\n\t} else if err != nil && !stat.IsDir() {\n\t\terr = fmt.Errorf(\"%s is a file, not a directory\", dir)\n\t}\n\n\treturn dir, nil\n}\n\nfunc (k *fileKeyring) unlock() error {\n\tdir, err := k.dir()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif k.password == \"\" {\n\t\tpwd, err := k.PasswordFunc(fmt.Sprintf(\"Enter passphrase to unlock %s\", dir))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tk.password = pwd\n\t}\n\n\treturn nil\n}\n\nfunc (k *fileKeyring) Get(key string) (Item, error) {\n\tdir, err := k.dir()\n\tif err != nil {\n\t\treturn Item{}, err\n\t}\n\n\tbytes, err := ioutil.ReadFile(filepath.Join(dir, key))\n\tif os.IsNotExist(err) {\n\t\treturn Item{}, ErrKeyNotFound\n\t} else if err != nil {\n\t\treturn Item{}, err\n\t}\n\n\tif err = k.unlock(); err != nil {\n\t\treturn Item{}, err\n\t}\n\n\tpayload, _, err := jose.Decode(string(bytes), k.password)\n\tif err != nil {\n\t\treturn Item{}, err\n\t}\n\n\tvar decoded Item\n\terr = json.Unmarshal([]byte(payload), &decoded)\n\n\treturn decoded, err\n}\n\nfunc (k *fileKeyring) Set(i Item) error {\n\tbytes, err := json.Marshal(i)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdir, err := k.dir()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = k.unlock(); err != nil {\n\t\treturn err\n\t}\n\n\ttoken, err := jose.Encrypt(string(bytes), jose.PBES2_HS256_A128KW, jose.A256GCM, k.password,\n\t\tjose.Headers(map[string]interface{}{\n\t\t\t\"created\": time.Now().String(),\n\t\t}))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ioutil.WriteFile(filepath.Join(dir, i.Key), []byte(token), 0600)\n}\n\nfunc (k *fileKeyring) Remove(key string) error {\n\tdir, err := k.dir()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn os.Remove(filepath.Join(dir, key))\n}\n\nfunc (k *fileKeyring) Keys() ([]string, error) {\n\tdir, err := k.dir()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar keys = []string{}\n\tfiles, _ := ioutil.ReadDir(dir)\n\tfor _, f := range files {\n\t\tkeys = append(keys, f.Name())\n\t}\n\n\treturn keys, nil\n}\n<commit_msg>Use the file descriptor from os.Stdin instead of 1<commit_after>package keyring\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\tjose \"github.com\/dvsekhvalnov\/jose2go\"\n\t\"github.com\/mitchellh\/go-homedir\"\n\t\"golang.org\/x\/crypto\/ssh\/terminal\"\n)\n\ntype passwordFunc func(string) (string, error)\n\nfunc terminalPrompt(prompt string) (string, error) {\n\tif password := os.Getenv(\"AWS_VAULT_FILE_PASSPHRASE\"); password != \"\" {\n\t\treturn password, nil\n\t}\n\n\tfmt.Printf(\"%s: \", prompt)\n\tb, err := terminal.ReadPassword(int(os.Stdin.Fd()))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tfmt.Println()\n\treturn string(b), nil\n}\n\nfunc init() {\n\tsupportedBackends[FileBackend] = opener(func(name string) (Keyring, error) {\n\t\treturn &fileKeyring{\n\t\t\tPasswordFunc: terminalPrompt,\n\t\t}, nil\n\t})\n}\n\ntype fileKeyring struct {\n\tDir string\n\tPasswordFunc passwordFunc\n\tpassword string\n}\n\nfunc (k *fileKeyring) dir() (string, error) {\n\tdir := k.Dir\n\tif dir == \"\" {\n\t\thome, err := homedir.Dir()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tdir = filepath.Join(home, \"\/.awsvault\/keys\/\")\n\t}\n\n\tstat, err := os.Stat(dir)\n\tif os.IsNotExist(err) {\n\t\tos.MkdirAll(dir, 0700)\n\t} else if err != nil && !stat.IsDir() {\n\t\terr = fmt.Errorf(\"%s is a file, not a directory\", dir)\n\t}\n\n\treturn dir, nil\n}\n\nfunc (k *fileKeyring) unlock() error {\n\tdir, err := k.dir()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif k.password == \"\" {\n\t\tpwd, err := k.PasswordFunc(fmt.Sprintf(\"Enter passphrase to unlock %s\", dir))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tk.password = pwd\n\t}\n\n\treturn nil\n}\n\nfunc (k *fileKeyring) Get(key string) (Item, error) {\n\tdir, err := k.dir()\n\tif err != nil {\n\t\treturn Item{}, err\n\t}\n\n\tbytes, err := ioutil.ReadFile(filepath.Join(dir, key))\n\tif os.IsNotExist(err) {\n\t\treturn Item{}, ErrKeyNotFound\n\t} else if err != nil {\n\t\treturn Item{}, err\n\t}\n\n\tif err = k.unlock(); err != nil {\n\t\treturn Item{}, err\n\t}\n\n\tpayload, _, err := jose.Decode(string(bytes), k.password)\n\tif err != nil {\n\t\treturn Item{}, err\n\t}\n\n\tvar decoded Item\n\terr = json.Unmarshal([]byte(payload), &decoded)\n\n\treturn decoded, err\n}\n\nfunc (k *fileKeyring) Set(i Item) error {\n\tbytes, err := json.Marshal(i)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdir, err := k.dir()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = k.unlock(); err != nil {\n\t\treturn err\n\t}\n\n\ttoken, err := jose.Encrypt(string(bytes), jose.PBES2_HS256_A128KW, jose.A256GCM, k.password,\n\t\tjose.Headers(map[string]interface{}{\n\t\t\t\"created\": time.Now().String(),\n\t\t}))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ioutil.WriteFile(filepath.Join(dir, i.Key), []byte(token), 0600)\n}\n\nfunc (k *fileKeyring) Remove(key string) error {\n\tdir, err := k.dir()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn os.Remove(filepath.Join(dir, key))\n}\n\nfunc (k *fileKeyring) Keys() ([]string, error) {\n\tdir, err := k.dir()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar keys = []string{}\n\tfiles, _ := ioutil.ReadDir(dir)\n\tfor _, f := range files {\n\t\tkeys = append(keys, f.Name())\n\t}\n\n\treturn keys, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package fetch\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/spf13\/viper\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/temoto\/robotstxt\"\n)\n\nfunc TestIsRobotsTxt(t *testing.T) {\n\tassert.Equal(t, false, IsRobotsTxt(\"http:\/\/google.com\/robots.txst\"))\n\tassert.Equal(t, true, IsRobotsTxt(\"http:\/\/google.com\/robots.txt\"))\n\n}\n\nfunc TestRobotstxtData(t *testing.T) {\n\t\/\/test AllowedByRobots func\n\trobots, err := robotstxt.FromString(robotstxtData)\n\tassert.NoError(t, err, \"No error returned\")\n\tassert.Equal(t, true, AllowedByRobots(\"http:\/\/\"+addr+\"\/allowed\", robots), \"Test allowed url\")\n\tassert.Equal(t, false, AllowedByRobots(\"http:\/\/\"+addr+\"\/disallowed\", robots), \"Test disallowed url\")\n\tassert.Equal(t, time.Duration(0), GetCrawlDelay(robots))\n\trobots = nil\n\tassert.Equal(t, true, AllowedByRobots(\"http:\/\/\"+addr+\"\/allowed\", robots), \"Test allowed url\")\n\n\n\tviper.Set(\"DFK_FETCH\", \"http:\/\/127.0.0.1:8000\")\n\t\/\/rd, err := RobotstxtData(\"http:\/\/\" + addr)\n\trd, err := RobotstxtData(\"https:\/\/google.com\")\n\tassert.NoError(t, err, \"No error returned\")\n\tassert.NotNil(t, rd, \"No error returned\")\n\n\trd, err = RobotstxtData(\"invalid_host\")\n\tassert.Error(t, err, \"No error returned\")\n}<commit_msg>fix typos<commit_after>package fetch\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/spf13\/viper\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/temoto\/robotstxt\"\n)\n\nfunc TestIsRobotsTxt(t *testing.T) {\n\tassert.Equal(t, false, IsRobotsTxt(\"http:\/\/google.com\/robots.txst\"))\n\tassert.Equal(t, true, IsRobotsTxt(\"http:\/\/google.com\/robots.txt\"))\n\n}\n\nfunc TestRobotstxtData(t *testing.T) {\n\t\/\/test AllowedByRobots func\n\trobots, err := robotstxt.FromString(robotstxtData)\n\tassert.NoError(t, err, \"No error returned\")\n\tassert.Equal(t, true, AllowedByRobots(\"http:\/\/\"+addr+\"\/allowed\", robots), \"Test allowed url\")\n\tassert.Equal(t, false, AllowedByRobots(\"http:\/\/\"+addr+\"\/disallowed\", robots), \"Test disallowed url\")\n\tassert.Equal(t, time.Duration(0), GetCrawlDelay(robots))\n\trobots = nil\n\tassert.Equal(t, true, AllowedByRobots(\"http:\/\/\"+addr+\"\/allowed\", robots), \"Test allowed url\")\n\n\n\tviper.Set(\"DFK_FETCH\", \"http:\/\/127.0.0.1:8000\")\n\t\/\/rd, err := RobotstxtData(\"http:\/\/\" + addr)\n\trd, err := RobotstxtData(\"https:\/\/google.com\")\n\tassert.NoError(t, err, \"No error returned\")\n\tassert.NotNil(t, rd, \"Not nil returned\")\n\n\trd, err = RobotstxtData(\"invalid_host\")\n\tassert.Error(t, err, \"error returned\")\n}<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2017 Circonus, Inc. <support@circonus.com>\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\/\/\n\npackage agent\n\nimport (\n\t\"context\"\n\t\"os\"\n\t\"os\/signal\"\n\n\t\"github.com\/circonus-labs\/circonus-agent\/internal\/plugins\"\n\t\"github.com\/circonus-labs\/circonus-agent\/internal\/release\"\n\t\"github.com\/circonus-labs\/circonus-agent\/internal\/reverse\"\n\t\"github.com\/circonus-labs\/circonus-agent\/internal\/server\"\n\t\"github.com\/circonus-labs\/circonus-agent\/internal\/statsd\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/rs\/zerolog\/log\"\n)\n\n\/\/ New returns a new agent instance\nfunc New() (*Agent, error) {\n\ta := Agent{\n\t\terrCh: make(chan error, 10),\n\t\tsignalCh: make(chan os.Signal, 10),\n\t}\n\n\t\/\/ Handle shutdown via a.shutdownCtx\n\tsignalNotifySetup(a.signalCh)\n\n\ta.shutdownCtx, a.shutdown = context.WithCancel(context.Background())\n\n\tvar err error\n\n\ta.plugins = plugins.New(a.shutdownCtx)\n\terr = a.plugins.Scan()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ta.statsdServer, err = statsd.New()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ta.reverseConn, err = reverse.New()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ta.listenServer, err = server.New(a.shutdownCtx, a.plugins, a.statsdServer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &a, nil\n}\n\n\/\/ Start the agent\nfunc (a *Agent) Start() {\n\n\tgo a.handleSignals()\n\n\tgo func() {\n\t\tif err := a.statsdServer.Start(a.shutdownCtx); err != nil {\n\t\t\ta.errCh <- errors.Wrap(err, \"Starting StatsD listener\")\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tif err := a.reverseConn.Start(a.shutdownCtx); err != nil {\n\t\t\ta.errCh <- errors.Wrap(err, \"Unable to start reverse connection\")\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tif err := a.listenServer.Start(); err != nil {\n\t\t\ta.errCh <- errors.Wrap(err, \"Starting server\")\n\t\t}\n\t}()\n}\n\n\/\/ Stop cleans up and shuts down the Agent\nfunc (a *Agent) Stop() {\n\ta.stopSignalHandler()\n\ta.plugins.Stop()\n\ta.statsdServer.Stop()\n\ta.reverseConn.Stop()\n\ta.listenServer.Stop()\n\ta.shutdown()\n\n\tlog.Debug().\n\t\tInt(\"pid\", os.Getpid()).\n\t\tStr(\"name\", release.NAME).\n\t\tStr(\"ver\", release.VERSION).Msg(\"Stopped\")\n\n\tos.Exit(0)\n}\n\n\/\/ Wait blocks until shutdown\nfunc (a *Agent) Wait() error {\n\tlog.Debug().\n\t\tInt(\"pid\", os.Getpid()).\n\t\tStr(\"name\", release.NAME).\n\t\tStr(\"ver\", release.VERSION).Msg(\"Starting wait\")\n\tselect {\n\tcase <-a.shutdownCtx.Done():\n\tcase err := <-a.errCh:\n\t\tlog.Error().Err(err).Msg(\"Shutting down agent due to errors\")\n\t\ta.Stop()\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ stopSignalHandler disables the signal handler\nfunc (a *Agent) stopSignalHandler() {\n\tsignal.Stop(a.signalCh)\n}\n<commit_msg>remove ctx for statsd and reverse<commit_after>\/\/ Copyright © 2017 Circonus, Inc. <support@circonus.com>\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\/\/\n\npackage agent\n\nimport (\n\t\"context\"\n\t\"os\"\n\t\"os\/signal\"\n\n\t\"github.com\/circonus-labs\/circonus-agent\/internal\/plugins\"\n\t\"github.com\/circonus-labs\/circonus-agent\/internal\/release\"\n\t\"github.com\/circonus-labs\/circonus-agent\/internal\/reverse\"\n\t\"github.com\/circonus-labs\/circonus-agent\/internal\/server\"\n\t\"github.com\/circonus-labs\/circonus-agent\/internal\/statsd\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/rs\/zerolog\/log\"\n)\n\n\/\/ New returns a new agent instance\nfunc New() (*Agent, error) {\n\ta := Agent{\n\t\terrCh: make(chan error, 10),\n\t\tsignalCh: make(chan os.Signal, 10),\n\t}\n\n\t\/\/ Handle shutdown via a.shutdownCtx\n\tsignalNotifySetup(a.signalCh)\n\n\ta.shutdownCtx, a.shutdown = context.WithCancel(context.Background())\n\n\tvar err error\n\n\ta.plugins = plugins.New(a.shutdownCtx)\n\terr = a.plugins.Scan()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ta.statsdServer, err = statsd.New()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ta.reverseConn, err = reverse.New()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ta.listenServer, err = server.New(a.shutdownCtx, a.plugins, a.statsdServer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &a, nil\n}\n\n\/\/ Start the agent\nfunc (a *Agent) Start() {\n\n\tgo a.handleSignals()\n\n\tgo func() {\n\t\tif err := a.statsdServer.Start(); err != nil {\n\t\t\ta.errCh <- errors.Wrap(err, \"Starting StatsD listener\")\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tif err := a.reverseConn.Start(); err != nil {\n\t\t\ta.errCh <- errors.Wrap(err, \"Unable to start reverse connection\")\n\t\t}\n\t}()\n\n\tgo func() {\n\t\tif err := a.listenServer.Start(); err != nil {\n\t\t\ta.errCh <- errors.Wrap(err, \"Starting server\")\n\t\t}\n\t}()\n}\n\n\/\/ Stop cleans up and shuts down the Agent\nfunc (a *Agent) Stop() {\n\ta.stopSignalHandler()\n\ta.plugins.Stop()\n\ta.statsdServer.Stop()\n\ta.reverseConn.Stop()\n\ta.listenServer.Stop()\n\ta.shutdown()\n\n\tlog.Debug().\n\t\tInt(\"pid\", os.Getpid()).\n\t\tStr(\"name\", release.NAME).\n\t\tStr(\"ver\", release.VERSION).Msg(\"Stopped\")\n\n\tos.Exit(0)\n}\n\n\/\/ Wait blocks until shutdown\nfunc (a *Agent) Wait() error {\n\tlog.Debug().\n\t\tInt(\"pid\", os.Getpid()).\n\t\tStr(\"name\", release.NAME).\n\t\tStr(\"ver\", release.VERSION).Msg(\"Starting wait\")\n\tselect {\n\tcase <-a.shutdownCtx.Done():\n\tcase err := <-a.errCh:\n\t\tlog.Error().Err(err).Msg(\"Shutting down agent due to error\")\n\t\ta.Stop()\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ stopSignalHandler disables the signal handler\nfunc (a *Agent) stopSignalHandler() {\n\tsignal.Stop(a.signalCh)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage http\n\nimport (\n\t\"bytes\"\n\t\"reflect\"\n\t\"testing\"\n)\n\n\nvar writeSetCookiesTests = []struct {\n\tCookies []*Cookie\n\tRaw string\n}{\n\t{\n\t\t[]*Cookie{&Cookie{Name: \"cookie-1\", Value: \"v$1\", MaxAge: -1}},\n\t\t\"Set-Cookie: cookie-1=v$1\\r\\n\",\n\t},\n}\n\nfunc TestWriteSetCookies(t *testing.T) {\n\tfor i, tt := range writeSetCookiesTests {\n\t\tvar w bytes.Buffer\n\t\twriteSetCookies(&w, tt.Cookies)\n\t\tseen := string(w.Bytes())\n\t\tif seen != tt.Raw {\n\t\t\tt.Errorf(\"Test %d, expecting:\\n%s\\nGot:\\n%s\\n\", i, tt.Raw, seen)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nvar writeCookiesTests = []struct {\n\tCookies []*Cookie\n\tRaw string\n}{\n\t{\n\t\t[]*Cookie{&Cookie{Name: \"cookie-1\", Value: \"v$1\", MaxAge: -1}},\n\t\t\"Cookie: cookie-1=v$1\\r\\n\",\n\t},\n}\n\nfunc TestWriteCookies(t *testing.T) {\n\tfor i, tt := range writeCookiesTests {\n\t\tvar w bytes.Buffer\n\t\twriteCookies(&w, tt.Cookies)\n\t\tseen := string(w.Bytes())\n\t\tif seen != tt.Raw {\n\t\t\tt.Errorf(\"Test %d, expecting:\\n%s\\nGot:\\n%s\\n\", i, tt.Raw, seen)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nvar readSetCookiesTests = []struct {\n\tHeader Header\n\tCookies []*Cookie\n}{\n\t{\n\t\tHeader{\"Set-Cookie\": {\"Cookie-1=v$1\"}},\n\t\t[]*Cookie{&Cookie{Name: \"Cookie-1\", Value: \"v$1\", MaxAge: -1, Raw: \"Cookie-1=v$1\"}},\n\t},\n}\n\nfunc TestReadSetCookies(t *testing.T) {\n\tfor i, tt := range readSetCookiesTests {\n\t\tc := readSetCookies(tt.Header)\n\t\tif !reflect.DeepEqual(c, tt.Cookies) {\n\t\t\tt.Errorf(\"#%d readSetCookies: have\\n%#v\\nwant\\n%#v\\n\", i, c, tt.Cookies)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nvar readCookiesTests = []struct {\n\tHeader Header\n\tCookies []*Cookie\n}{\n\t{\n\t\tHeader{\"Cookie\": {\"Cookie-1=v$1\"}},\n\t\t[]*Cookie{&Cookie{Name: \"Cookie-1\", Value: \"v$1\", MaxAge: -1, Raw: \"Cookie-1=v$1\"}},\n\t},\n}\n\nfunc TestReadCookies(t *testing.T) {\n\tfor i, tt := range readCookiesTests {\n\t\tc := readCookies(tt.Header)\n\t\tif !reflect.DeepEqual(c, tt.Cookies) {\n\t\t\tt.Errorf(\"#%d readCookies: have\\n%#v\\nwant\\n%#v\\n\", i, c, tt.Cookies)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n<commit_msg>http: fix cookie_test<commit_after>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage http\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"json\"\n\t\"reflect\"\n\t\"testing\"\n)\n\n\nvar writeSetCookiesTests = []struct {\n\tCookies []*Cookie\n\tRaw string\n}{\n\t{\n\t\t[]*Cookie{\n\t\t\t&Cookie{Name: \"cookie-1\", Value: \"v$1\"},\n\t\t\t&Cookie{Name: \"cookie-2\", Value: \"two\", MaxAge: 3600},\n\t\t},\n\t\t\"Set-Cookie: cookie-1=v$1\\r\\n\" +\n\t\t\t\"Set-Cookie: cookie-2=two; Max-Age=3600\\r\\n\",\n\t},\n}\n\nfunc TestWriteSetCookies(t *testing.T) {\n\tfor i, tt := range writeSetCookiesTests {\n\t\tvar w bytes.Buffer\n\t\twriteSetCookies(&w, tt.Cookies)\n\t\tseen := string(w.Bytes())\n\t\tif seen != tt.Raw {\n\t\t\tt.Errorf(\"Test %d, expecting:\\n%s\\nGot:\\n%s\\n\", i, tt.Raw, seen)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nvar writeCookiesTests = []struct {\n\tCookies []*Cookie\n\tRaw string\n}{\n\t{\n\t\t[]*Cookie{&Cookie{Name: \"cookie-1\", Value: \"v$1\"}},\n\t\t\"Cookie: cookie-1=v$1\\r\\n\",\n\t},\n}\n\nfunc TestWriteCookies(t *testing.T) {\n\tfor i, tt := range writeCookiesTests {\n\t\tvar w bytes.Buffer\n\t\twriteCookies(&w, tt.Cookies)\n\t\tseen := string(w.Bytes())\n\t\tif seen != tt.Raw {\n\t\t\tt.Errorf(\"Test %d, expecting:\\n%s\\nGot:\\n%s\\n\", i, tt.Raw, seen)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nvar readSetCookiesTests = []struct {\n\tHeader Header\n\tCookies []*Cookie\n}{\n\t{\n\t\tHeader{\"Set-Cookie\": {\"Cookie-1=v$1\"}},\n\t\t[]*Cookie{&Cookie{Name: \"Cookie-1\", Value: \"v$1\", Raw: \"Cookie-1=v$1\"}},\n\t},\n}\n\nfunc toJSON(v interface{}) string {\n\tb, err := json.Marshal(v)\n\tif err != nil {\n\t\treturn fmt.Sprintf(\"%#v\", v)\n\t}\n\treturn string(b)\n}\n\nfunc TestReadSetCookies(t *testing.T) {\n\tfor i, tt := range readSetCookiesTests {\n\t\tc := readSetCookies(tt.Header)\n\t\tif !reflect.DeepEqual(c, tt.Cookies) {\n\t\t\tt.Errorf(\"#%d readSetCookies: have\\n%s\\nwant\\n%s\\n\", i, toJSON(c), toJSON(tt.Cookies))\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nvar readCookiesTests = []struct {\n\tHeader Header\n\tCookies []*Cookie\n}{\n\t{\n\t\tHeader{\"Cookie\": {\"Cookie-1=v$1\"}},\n\t\t[]*Cookie{&Cookie{Name: \"Cookie-1\", Value: \"v$1\"}},\n\t},\n}\n\nfunc TestReadCookies(t *testing.T) {\n\tfor i, tt := range readCookiesTests {\n\t\tc := readCookies(tt.Header)\n\t\tif !reflect.DeepEqual(c, tt.Cookies) {\n\t\t\tt.Errorf(\"#%d readCookies: have\\n%s\\nwant\\n%s\\n\", i, toJSON(c), toJSON(tt.Cookies))\n\t\t\tcontinue\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage internal\n\nimport (\n\t\"context\"\n\n\t\"golang.org\/x\/pkgsite\/internal\/licenses\"\n)\n\n\/\/ DataSource is the interface used by the frontend to interact with module data.\ntype DataSource interface {\n\t\/\/ See the internal\/postgres package for further documentation of these\n\t\/\/ methods, particularly as they pertain to the main postgres implementation.\n\n\t\/\/ GetDirectory returns information about a directory, which may also be a module and\/or package.\n\t\/\/ The module and version must both be known.\n\tGetDirectory(ctx context.Context, dirPath, modulePath, version string, pathID int, fields ...FieldSet) (_ *Directory, err error)\n\t\/\/ GetDirectoryMeta returns information about a directory.\n\tGetDirectoryMeta(ctx context.Context, dirPath, modulePath, version string) (_ *DirectoryMeta, err error)\n\t\/\/ GetImports returns a slice of import paths imported by the package\n\t\/\/ specified by path and version.\n\tGetImports(ctx context.Context, pkgPath, modulePath, version string) ([]string, error)\n\t\/\/ GetLicenses returns licenses at the given path for given modulePath and version.\n\tGetLicenses(ctx context.Context, fullPath, modulePath, resolvedVersion string) ([]*licenses.License, error)\n\t\/\/ GetModuleInfo returns the ModuleInfo corresponding to modulePath and\n\t\/\/ version.\n\tGetModuleInfo(ctx context.Context, modulePath, version string) (*ModuleInfo, error)\n\t\/\/ GetPathInfo returns information about a path.\n\tGetPathInfo(ctx context.Context, path, inModulePath, inVersion string) (outModulePath, outVersion string, isPackage bool, err error)\n\n\t\/\/ TODO(golang\/go#39629): Deprecate these methods.\n\t\/\/\n\t\/\/ LegacyGetDirectory returns packages whose import path is in a (possibly\n\t\/\/ nested) subdirectory of the given directory path. When multiple\n\t\/\/ package paths satisfy this query, it should prefer the module with\n\t\/\/ the longest path.\n\tLegacyGetDirectory(ctx context.Context, dirPath, modulePath, version string, fields FieldSet) (_ *LegacyDirectory, err error)\n\t\/\/ LegacyGetModuleInfo returns the LegacyModuleInfo corresponding to modulePath and\n\t\/\/ version.\n\tLegacyGetModuleInfo(ctx context.Context, modulePath, version string) (*LegacyModuleInfo, error)\n\t\/\/ LegacyGetModuleLicenses returns all top-level Licenses for the given modulePath\n\t\/\/ and version. (i.e., Licenses contained in the module root directory)\n\tLegacyGetModuleLicenses(ctx context.Context, modulePath, version string) ([]*licenses.License, error)\n\t\/\/ LegacyGetPackage returns the LegacyVersionedPackage corresponding to the given package\n\t\/\/ pkgPath, modulePath, and version. When multiple package paths satisfy this query, it\n\t\/\/ should prefer the module with the longest path.\n\tLegacyGetPackage(ctx context.Context, pkgPath, modulePath, version string) (*LegacyVersionedPackage, error)\n\t\/\/ LegacyGetPackagesInModule returns LegacyPackages contained in the module version\n\t\/\/ specified by modulePath and version.\n\tLegacyGetPackagesInModule(ctx context.Context, modulePath, version string) ([]*LegacyPackage, error)\n\t\/\/ LegacyGetPackageLicenses returns all Licenses that apply to pkgPath, within the\n\t\/\/ module version specified by modulePath and version.\n\tLegacyGetPackageLicenses(ctx context.Context, pkgPath, modulePath, version string) ([]*licenses.License, error)\n\t\/\/ LegacyGetPsuedoVersionsForModule returns ModuleInfo for all known\n\t\/\/ pseudo-versions for the module corresponding to modulePath.\n\tLegacyGetPsuedoVersionsForModule(ctx context.Context, modulePath string) ([]*ModuleInfo, error)\n\t\/\/ LegacyGetPsuedoVersionsForModule returns ModuleInfo for all known\n\t\/\/ pseudo-versions for any module containing a package with the given import\n\t\/\/ path.\n\tLegacyGetPsuedoVersionsForPackageSeries(ctx context.Context, pkgPath string) ([]*ModuleInfo, error)\n\t\/\/ LegacyGetTaggedVersionsForModule returns ModuleInfo for all known tagged\n\t\/\/ versions for the module corresponding to modulePath.\n\tLegacyGetTaggedVersionsForModule(ctx context.Context, modulePath string) ([]*ModuleInfo, error)\n\t\/\/ LegacyGetTaggedVersionsForModule returns ModuleInfo for all known tagged\n\t\/\/ versions for any module containing a package with the given import path.\n\tLegacyGetTaggedVersionsForPackageSeries(ctx context.Context, pkgPath string) ([]*ModuleInfo, error)\n}\n<commit_msg>internal: remove unused GetModuleInfo method from DataSource<commit_after>\/\/ Copyright 2019 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage internal\n\nimport (\n\t\"context\"\n\n\t\"golang.org\/x\/pkgsite\/internal\/licenses\"\n)\n\n\/\/ DataSource is the interface used by the frontend to interact with module data.\ntype DataSource interface {\n\t\/\/ See the internal\/postgres package for further documentation of these\n\t\/\/ methods, particularly as they pertain to the main postgres implementation.\n\n\t\/\/ GetDirectory returns information about a directory, which may also be a module and\/or package.\n\t\/\/ The module and version must both be known.\n\tGetDirectory(ctx context.Context, dirPath, modulePath, version string, pathID int, fields ...FieldSet) (_ *Directory, err error)\n\t\/\/ GetDirectoryMeta returns information about a directory.\n\tGetDirectoryMeta(ctx context.Context, dirPath, modulePath, version string) (_ *DirectoryMeta, err error)\n\t\/\/ GetImports returns a slice of import paths imported by the package\n\t\/\/ specified by path and version.\n\tGetImports(ctx context.Context, pkgPath, modulePath, version string) ([]string, error)\n\t\/\/ GetLicenses returns licenses at the given path for given modulePath and version.\n\tGetLicenses(ctx context.Context, fullPath, modulePath, resolvedVersion string) ([]*licenses.License, error)\n\t\/\/ GetPathInfo returns information about a path.\n\tGetPathInfo(ctx context.Context, path, inModulePath, inVersion string) (outModulePath, outVersion string, isPackage bool, err error)\n\n\t\/\/ TODO(golang\/go#39629): Deprecate these methods.\n\t\/\/\n\t\/\/ LegacyGetDirectory returns packages whose import path is in a (possibly\n\t\/\/ nested) subdirectory of the given directory path. When multiple\n\t\/\/ package paths satisfy this query, it should prefer the module with\n\t\/\/ the longest path.\n\tLegacyGetDirectory(ctx context.Context, dirPath, modulePath, version string, fields FieldSet) (_ *LegacyDirectory, err error)\n\t\/\/ LegacyGetModuleInfo returns the LegacyModuleInfo corresponding to modulePath and\n\t\/\/ version.\n\tLegacyGetModuleInfo(ctx context.Context, modulePath, version string) (*LegacyModuleInfo, error)\n\t\/\/ LegacyGetModuleLicenses returns all top-level Licenses for the given modulePath\n\t\/\/ and version. (i.e., Licenses contained in the module root directory)\n\tLegacyGetModuleLicenses(ctx context.Context, modulePath, version string) ([]*licenses.License, error)\n\t\/\/ LegacyGetPackage returns the LegacyVersionedPackage corresponding to the given package\n\t\/\/ pkgPath, modulePath, and version. When multiple package paths satisfy this query, it\n\t\/\/ should prefer the module with the longest path.\n\tLegacyGetPackage(ctx context.Context, pkgPath, modulePath, version string) (*LegacyVersionedPackage, error)\n\t\/\/ LegacyGetPackagesInModule returns LegacyPackages contained in the module version\n\t\/\/ specified by modulePath and version.\n\tLegacyGetPackagesInModule(ctx context.Context, modulePath, version string) ([]*LegacyPackage, error)\n\t\/\/ LegacyGetPackageLicenses returns all Licenses that apply to pkgPath, within the\n\t\/\/ module version specified by modulePath and version.\n\tLegacyGetPackageLicenses(ctx context.Context, pkgPath, modulePath, version string) ([]*licenses.License, error)\n\t\/\/ LegacyGetPsuedoVersionsForModule returns ModuleInfo for all known\n\t\/\/ pseudo-versions for the module corresponding to modulePath.\n\tLegacyGetPsuedoVersionsForModule(ctx context.Context, modulePath string) ([]*ModuleInfo, error)\n\t\/\/ LegacyGetPsuedoVersionsForModule returns ModuleInfo for all known\n\t\/\/ pseudo-versions for any module containing a package with the given import\n\t\/\/ path.\n\tLegacyGetPsuedoVersionsForPackageSeries(ctx context.Context, pkgPath string) ([]*ModuleInfo, error)\n\t\/\/ LegacyGetTaggedVersionsForModule returns ModuleInfo for all known tagged\n\t\/\/ versions for the module corresponding to modulePath.\n\tLegacyGetTaggedVersionsForModule(ctx context.Context, modulePath string) ([]*ModuleInfo, error)\n\t\/\/ LegacyGetTaggedVersionsForModule returns ModuleInfo for all known tagged\n\t\/\/ versions for any module containing a package with the given import path.\n\tLegacyGetTaggedVersionsForPackageSeries(ctx context.Context, pkgPath string) ([]*ModuleInfo, error)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package gif implements a GIF image decoder.\n\/\/\n\/\/ The GIF specification is at http:\/\/www.w3.org\/Graphics\/GIF\/spec-gif89a.txt.\npackage gif\n\nimport (\n\t\"bufio\"\n\t\"compress\/lzw\"\n\t\"fmt\"\n\t\"image\"\n\t\"io\"\n\t\"os\"\n)\n\n\/\/ If the io.Reader does not also have ReadByte, then decode will introduce its own buffering.\ntype reader interface {\n\tio.Reader\n\tio.ByteReader\n}\n\n\/\/ Masks etc.\nconst (\n\t\/\/ Fields.\n\tfColorMapFollows = 1 << 7\n\n\t\/\/ Image fields.\n\tifInterlace = 1 << 6\n\n\t\/\/ Graphic control flags.\n\tgcTransparentColorSet = 1 << 0\n)\n\n\/\/ Section indicators.\nconst (\n\tsExtension = 0x21\n\tsImageDescriptor = 0x2C\n\tsTrailer = 0x3B\n)\n\n\/\/ Extensions.\nconst (\n\teText = 0x01 \/\/ Plain Text\n\teGraphicControl = 0xF9 \/\/ Graphic Control\n\teComment = 0xFE \/\/ Comment\n\teApplication = 0xFF \/\/ Application\n)\n\n\/\/ decoder is the type used to decode a GIF file.\ntype decoder struct {\n\tr reader\n\n\t\/\/ From header.\n\tvers string\n\twidth int\n\theight int\n\tflags byte\n\theaderFields byte\n\tbackgroundIndex byte\n\tloopCount int\n\tdelayTime int\n\n\t\/\/ Unused from header.\n\taspect byte\n\n\t\/\/ From image descriptor.\n\timageFields byte\n\n\t\/\/ Computed.\n\tpixelSize uint\n\tglobalColorMap image.PalettedColorModel\n\n\t\/\/ Computed but unused (TODO).\n\ttransparentIndex byte\n\n\t\/\/ Used when decoding.\n\tdelay []int\n\timage []*image.Paletted\n\ttmp [1024]byte \/\/ must be at least 768 so we can read color map\n}\n\n\/\/ blockReader parses the block structure of GIF image data, which\n\/\/ comprises (n, (n bytes)) blocks, with 1 <= n <= 255. It is the\n\/\/ reader given to the LZW decoder, which is thus immune to the\n\/\/ blocking. After the LZW decoder completes, there will be a 0-byte\n\/\/ block remaining (0, ()), but under normal execution blockReader\n\/\/ doesn't consume it, so it is handled in decode.\ntype blockReader struct {\n\tr reader\n\tslice []byte\n\ttmp [256]byte\n}\n\nfunc (b *blockReader) Read(p []byte) (n int, err os.Error) {\n\tif len(p) == 0 {\n\t\treturn\n\t}\n\tif len(b.slice) > 0 {\n\t\tn = copy(p, b.slice)\n\t\tb.slice = b.slice[n:]\n\t\treturn\n\t}\n\tvar blockLen uint8\n\tblockLen, err = b.r.ReadByte()\n\tif err != nil {\n\t\treturn\n\t}\n\tif blockLen == 0 {\n\t\treturn 0, os.EOF\n\t}\n\tb.slice = b.tmp[0:blockLen]\n\tif _, err = io.ReadFull(b.r, b.slice); err != nil {\n\t\treturn\n\t}\n\treturn b.Read(p)\n}\n\n\/\/ decode reads a GIF image from r and stores the result in d.\nfunc (d *decoder) decode(r io.Reader, configOnly bool) os.Error {\n\t\/\/ Add buffering if r does not provide ReadByte.\n\tif rr, ok := r.(reader); ok {\n\t\td.r = rr\n\t} else {\n\t\td.r = bufio.NewReader(r)\n\t}\n\n\terr := d.readHeaderAndScreenDescriptor()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif configOnly {\n\t\treturn nil\n\t}\n\n\tif d.headerFields&fColorMapFollows != 0 {\n\t\tif d.globalColorMap, err = d.readColorMap(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\td.image = nil\n\nLoop:\n\tfor err == nil {\n\t\tvar c byte\n\t\tc, err = d.r.ReadByte()\n\t\tif err == os.EOF {\n\t\t\tbreak\n\t\t}\n\t\tswitch c {\n\t\tcase sExtension:\n\t\t\terr = d.readExtension()\n\n\t\tcase sImageDescriptor:\n\t\t\tvar m *image.Paletted\n\t\t\tm, err = d.newImageFromDescriptor()\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif d.imageFields&fColorMapFollows != 0 {\n\t\t\t\tm.Palette, err = d.readColorMap()\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tm.Palette = d.globalColorMap\n\t\t\t}\n\t\t\tvar litWidth uint8\n\t\t\tlitWidth, err = d.r.ReadByte()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif litWidth > 8 {\n\t\t\t\treturn fmt.Errorf(\"gif: pixel size in decode out of range: %d\", litWidth)\n\t\t\t}\n\t\t\t\/\/ A wonderfully Go-like piece of magic. Unfortunately it's only at its\n\t\t\t\/\/ best for 8-bit pixels.\n\t\t\tlzwr := lzw.NewReader(&blockReader{r: d.r}, lzw.LSB, int(litWidth))\n\t\t\tif _, err = io.ReadFull(lzwr, m.Pix); err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t\/\/ There should be a \"0\" block remaining; drain that.\n\t\t\tc, err = d.r.ReadByte()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif c != 0 {\n\t\t\t\treturn os.ErrorString(\"gif: extra data after image\")\n\t\t\t}\n\t\t\td.image = append(d.image, m)\n\t\t\td.delay = append(d.delay, d.delayTime)\n\t\t\td.delayTime = 0 \/\/ TODO: is this correct, or should we hold on to the value?\n\n\t\tcase sTrailer:\n\t\t\tbreak Loop\n\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"gif: unknown block type: 0x%.2x\", c)\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(d.image) == 0 {\n\t\treturn io.ErrUnexpectedEOF\n\t}\n\treturn nil\n}\n\nfunc (d *decoder) readHeaderAndScreenDescriptor() os.Error {\n\t_, err := io.ReadFull(d.r, d.tmp[0:13])\n\tif err != nil {\n\t\treturn err\n\t}\n\td.vers = string(d.tmp[0:6])\n\tif d.vers != \"GIF87a\" && d.vers != \"GIF89a\" {\n\t\treturn fmt.Errorf(\"gif: can't recognize format %s\", d.vers)\n\t}\n\td.width = int(d.tmp[6]) + int(d.tmp[7])<<8\n\td.height = int(d.tmp[8]) + int(d.tmp[9])<<8\n\td.headerFields = d.tmp[10]\n\td.backgroundIndex = d.tmp[11]\n\td.aspect = d.tmp[12]\n\td.loopCount = -1\n\td.pixelSize = uint(d.headerFields&7) + 1\n\treturn nil\n}\n\nfunc (d *decoder) readColorMap() (image.PalettedColorModel, os.Error) {\n\tif d.pixelSize > 8 {\n\t\treturn nil, fmt.Errorf(\"gif: can't handle %d bits per pixel\", d.pixelSize)\n\t}\n\tnumColors := 1 << d.pixelSize\n\tnumValues := 3 * numColors\n\t_, err := io.ReadFull(d.r, d.tmp[0:numValues])\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"gif: short read on color map: %s\", err)\n\t}\n\tcolorMap := make(image.PalettedColorModel, numColors)\n\tj := 0\n\tfor i := range colorMap {\n\t\tcolorMap[i] = image.RGBAColor{d.tmp[j+0], d.tmp[j+1], d.tmp[j+2], 0xFF}\n\t\tj += 3\n\t}\n\treturn colorMap, nil\n}\n\nfunc (d *decoder) readExtension() os.Error {\n\textension, err := d.r.ReadByte()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsize := 0\n\tswitch extension {\n\tcase eText:\n\t\tsize = 13\n\tcase eGraphicControl:\n\t\treturn d.readGraphicControl()\n\tcase eComment:\n\t\t\/\/ nothing to do but read the data.\n\tcase eApplication:\n\t\tb, err := d.r.ReadByte()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ The spec requires size be 11, but Adobe sometimes uses 10.\n\t\tsize = int(b)\n\tdefault:\n\t\treturn fmt.Errorf(\"gif: unknown extension 0x%.2x\", extension)\n\t}\n\tif size > 0 {\n\t\tif _, err := d.r.Read(d.tmp[0:size]); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Application Extension with \"NETSCAPE2.0\" as string and 1 in data means\n\t\/\/ this extension defines a loop count.\n\tif extension == eApplication && string(d.tmp[:size]) == \"NETSCAPE2.0\" {\n\t\tn, err := d.readBlock()\n\t\tif n == 0 || err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif n == 3 && d.tmp[0] == 1 {\n\t\t\td.loopCount = int(d.tmp[1]) | int(d.tmp[2])<<8\n\t\t}\n\t}\n\tfor {\n\t\tn, err := d.readBlock()\n\t\tif n == 0 || err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc (d *decoder) readGraphicControl() os.Error {\n\tif _, err := io.ReadFull(d.r, d.tmp[0:6]); err != nil {\n\t\treturn fmt.Errorf(\"gif: can't read graphic control: %s\", err)\n\t}\n\td.flags = d.tmp[1]\n\td.delayTime = int(d.tmp[2]) | int(d.tmp[3])<<8\n\tif d.flags&gcTransparentColorSet != 0 {\n\t\td.transparentIndex = d.tmp[4]\n\t\treturn os.ErrorString(\"gif: can't handle transparency\")\n\t}\n\treturn nil\n}\n\nfunc (d *decoder) newImageFromDescriptor() (*image.Paletted, os.Error) {\n\tif _, err := io.ReadFull(d.r, d.tmp[0:9]); err != nil {\n\t\treturn nil, fmt.Errorf(\"gif: can't read image descriptor: %s\", err)\n\t}\n\t_ = int(d.tmp[0]) + int(d.tmp[1])<<8 \/\/ TODO: honor left value\n\t_ = int(d.tmp[2]) + int(d.tmp[3])<<8 \/\/ TODO: honor top value\n\twidth := int(d.tmp[4]) + int(d.tmp[5])<<8\n\theight := int(d.tmp[6]) + int(d.tmp[7])<<8\n\td.imageFields = d.tmp[8]\n\tif d.imageFields&ifInterlace != 0 {\n\t\treturn nil, os.ErrorString(\"gif: can't handle interlaced images\")\n\t}\n\treturn image.NewPaletted(width, height, nil), nil\n}\n\nfunc (d *decoder) readBlock() (int, os.Error) {\n\tn, err := d.r.ReadByte()\n\tif n == 0 || err != nil {\n\t\treturn 0, err\n\t}\n\treturn io.ReadFull(d.r, d.tmp[0:n])\n}\n\n\/\/ Decode reads a GIF image from r and returns the first embedded\n\/\/ image as an image.Image.\n\/\/ Limitation: The file must be 8 bits per pixel and have no interlacing\n\/\/ or transparency.\nfunc Decode(r io.Reader) (image.Image, os.Error) {\n\tvar d decoder\n\tif err := d.decode(r, false); err != nil {\n\t\treturn nil, err\n\t}\n\treturn d.image[0], nil\n}\n\n\/\/ GIF represents the possibly multiple images stored in a GIF file.\ntype GIF struct {\n\tImage []*image.Paletted \/\/ The successive images.\n\tDelay []int \/\/ The successive delay times, one per frame, in 100ths of a second.\n\tLoopCount int \/\/ The loop count.\n}\n\n\/\/ DecodeAll reads a GIF image from r and returns the sequential frames\n\/\/ and timing information.\n\/\/ Limitation: The file must be 8 bits per pixel and have no interlacing\n\/\/ or transparency.\nfunc DecodeAll(r io.Reader) (*GIF, os.Error) {\n\tvar d decoder\n\tif err := d.decode(r, false); err != nil {\n\t\treturn nil, err\n\t}\n\tgif := &GIF{\n\t\tImage: d.image,\n\t\tLoopCount: d.loopCount,\n\t\tDelay: d.delay,\n\t}\n\treturn gif, nil\n}\n\n\/\/ DecodeConfig returns the color model and dimensions of a GIF image without\n\/\/ decoding the entire image.\nfunc DecodeConfig(r io.Reader) (image.Config, os.Error) {\n\tvar d decoder\n\tif err := d.decode(r, true); err != nil {\n\t\treturn image.Config{}, err\n\t}\n\tcolorMap := d.globalColorMap\n\treturn image.Config{colorMap, d.width, d.height}, nil\n}\n\nfunc init() {\n\timage.RegisterFormat(\"gif\", \"GIF8?a\", Decode, DecodeConfig)\n}\n<commit_msg>image\/gif: implement transparency.<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package gif implements a GIF image decoder.\n\/\/\n\/\/ The GIF specification is at http:\/\/www.w3.org\/Graphics\/GIF\/spec-gif89a.txt.\npackage gif\n\nimport (\n\t\"bufio\"\n\t\"compress\/lzw\"\n\t\"fmt\"\n\t\"image\"\n\t\"io\"\n\t\"os\"\n)\n\n\/\/ If the io.Reader does not also have ReadByte, then decode will introduce its own buffering.\ntype reader interface {\n\tio.Reader\n\tio.ByteReader\n}\n\n\/\/ Masks etc.\nconst (\n\t\/\/ Fields.\n\tfColorMapFollows = 1 << 7\n\n\t\/\/ Image fields.\n\tifInterlace = 1 << 6\n\n\t\/\/ Graphic control flags.\n\tgcTransparentColorSet = 1 << 0\n)\n\n\/\/ Section indicators.\nconst (\n\tsExtension = 0x21\n\tsImageDescriptor = 0x2C\n\tsTrailer = 0x3B\n)\n\n\/\/ Extensions.\nconst (\n\teText = 0x01 \/\/ Plain Text\n\teGraphicControl = 0xF9 \/\/ Graphic Control\n\teComment = 0xFE \/\/ Comment\n\teApplication = 0xFF \/\/ Application\n)\n\n\/\/ decoder is the type used to decode a GIF file.\ntype decoder struct {\n\tr reader\n\n\t\/\/ From header.\n\tvers string\n\twidth int\n\theight int\n\tflags byte\n\theaderFields byte\n\tbackgroundIndex byte\n\tloopCount int\n\tdelayTime int\n\n\t\/\/ Unused from header.\n\taspect byte\n\n\t\/\/ From image descriptor.\n\timageFields byte\n\n\t\/\/ From graphics control.\n\ttransparentIndex byte\n\n\t\/\/ Computed.\n\tpixelSize uint\n\tglobalColorMap image.PalettedColorModel\n\n\t\/\/ Used when decoding.\n\tdelay []int\n\timage []*image.Paletted\n\ttmp [1024]byte \/\/ must be at least 768 so we can read color map\n}\n\n\/\/ blockReader parses the block structure of GIF image data, which\n\/\/ comprises (n, (n bytes)) blocks, with 1 <= n <= 255. It is the\n\/\/ reader given to the LZW decoder, which is thus immune to the\n\/\/ blocking. After the LZW decoder completes, there will be a 0-byte\n\/\/ block remaining (0, ()), but under normal execution blockReader\n\/\/ doesn't consume it, so it is handled in decode.\ntype blockReader struct {\n\tr reader\n\tslice []byte\n\ttmp [256]byte\n}\n\nfunc (b *blockReader) Read(p []byte) (n int, err os.Error) {\n\tif len(p) == 0 {\n\t\treturn\n\t}\n\tif len(b.slice) > 0 {\n\t\tn = copy(p, b.slice)\n\t\tb.slice = b.slice[n:]\n\t\treturn\n\t}\n\tvar blockLen uint8\n\tblockLen, err = b.r.ReadByte()\n\tif err != nil {\n\t\treturn\n\t}\n\tif blockLen == 0 {\n\t\treturn 0, os.EOF\n\t}\n\tb.slice = b.tmp[0:blockLen]\n\tif _, err = io.ReadFull(b.r, b.slice); err != nil {\n\t\treturn\n\t}\n\treturn b.Read(p)\n}\n\n\/\/ decode reads a GIF image from r and stores the result in d.\nfunc (d *decoder) decode(r io.Reader, configOnly bool) os.Error {\n\t\/\/ Add buffering if r does not provide ReadByte.\n\tif rr, ok := r.(reader); ok {\n\t\td.r = rr\n\t} else {\n\t\td.r = bufio.NewReader(r)\n\t}\n\n\terr := d.readHeaderAndScreenDescriptor()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif configOnly {\n\t\treturn nil\n\t}\n\n\tif d.headerFields&fColorMapFollows != 0 {\n\t\tif d.globalColorMap, err = d.readColorMap(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\td.image = nil\n\nLoop:\n\tfor err == nil {\n\t\tvar c byte\n\t\tc, err = d.r.ReadByte()\n\t\tif err == os.EOF {\n\t\t\tbreak\n\t\t}\n\t\tswitch c {\n\t\tcase sExtension:\n\t\t\terr = d.readExtension()\n\n\t\tcase sImageDescriptor:\n\t\t\tvar m *image.Paletted\n\t\t\tm, err = d.newImageFromDescriptor()\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif d.imageFields&fColorMapFollows != 0 {\n\t\t\t\tm.Palette, err = d.readColorMap()\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\t\/\/ TODO: do we set transparency in this map too? That would be\n\t\t\t\t\/\/ d.setTransparency(m.Palette)\n\t\t\t} else {\n\t\t\t\tm.Palette = d.globalColorMap\n\t\t\t}\n\t\t\tvar litWidth uint8\n\t\t\tlitWidth, err = d.r.ReadByte()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif litWidth > 8 {\n\t\t\t\treturn fmt.Errorf(\"gif: pixel size in decode out of range: %d\", litWidth)\n\t\t\t}\n\t\t\t\/\/ A wonderfully Go-like piece of magic. Unfortunately it's only at its\n\t\t\t\/\/ best for 8-bit pixels.\n\t\t\tlzwr := lzw.NewReader(&blockReader{r: d.r}, lzw.LSB, int(litWidth))\n\t\t\tif _, err = io.ReadFull(lzwr, m.Pix); err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t\/\/ There should be a \"0\" block remaining; drain that.\n\t\t\tc, err = d.r.ReadByte()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif c != 0 {\n\t\t\t\treturn os.ErrorString(\"gif: extra data after image\")\n\t\t\t}\n\t\t\td.image = append(d.image, m)\n\t\t\td.delay = append(d.delay, d.delayTime)\n\t\t\td.delayTime = 0 \/\/ TODO: is this correct, or should we hold on to the value?\n\n\t\tcase sTrailer:\n\t\t\tbreak Loop\n\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"gif: unknown block type: 0x%.2x\", c)\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(d.image) == 0 {\n\t\treturn io.ErrUnexpectedEOF\n\t}\n\treturn nil\n}\n\nfunc (d *decoder) readHeaderAndScreenDescriptor() os.Error {\n\t_, err := io.ReadFull(d.r, d.tmp[0:13])\n\tif err != nil {\n\t\treturn err\n\t}\n\td.vers = string(d.tmp[0:6])\n\tif d.vers != \"GIF87a\" && d.vers != \"GIF89a\" {\n\t\treturn fmt.Errorf(\"gif: can't recognize format %s\", d.vers)\n\t}\n\td.width = int(d.tmp[6]) + int(d.tmp[7])<<8\n\td.height = int(d.tmp[8]) + int(d.tmp[9])<<8\n\td.headerFields = d.tmp[10]\n\td.backgroundIndex = d.tmp[11]\n\td.aspect = d.tmp[12]\n\td.loopCount = -1\n\td.pixelSize = uint(d.headerFields&7) + 1\n\treturn nil\n}\n\nfunc (d *decoder) readColorMap() (image.PalettedColorModel, os.Error) {\n\tif d.pixelSize > 8 {\n\t\treturn nil, fmt.Errorf(\"gif: can't handle %d bits per pixel\", d.pixelSize)\n\t}\n\tnumColors := 1 << d.pixelSize\n\tnumValues := 3 * numColors\n\t_, err := io.ReadFull(d.r, d.tmp[0:numValues])\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"gif: short read on color map: %s\", err)\n\t}\n\tcolorMap := make(image.PalettedColorModel, numColors)\n\tj := 0\n\tfor i := range colorMap {\n\t\tcolorMap[i] = image.RGBAColor{d.tmp[j+0], d.tmp[j+1], d.tmp[j+2], 0xFF}\n\t\tj += 3\n\t}\n\treturn colorMap, nil\n}\n\nfunc (d *decoder) readExtension() os.Error {\n\textension, err := d.r.ReadByte()\n\tif err != nil {\n\t\treturn err\n\t}\n\tsize := 0\n\tswitch extension {\n\tcase eText:\n\t\tsize = 13\n\tcase eGraphicControl:\n\t\treturn d.readGraphicControl()\n\tcase eComment:\n\t\t\/\/ nothing to do but read the data.\n\tcase eApplication:\n\t\tb, err := d.r.ReadByte()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ The spec requires size be 11, but Adobe sometimes uses 10.\n\t\tsize = int(b)\n\tdefault:\n\t\treturn fmt.Errorf(\"gif: unknown extension 0x%.2x\", extension)\n\t}\n\tif size > 0 {\n\t\tif _, err := d.r.Read(d.tmp[0:size]); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Application Extension with \"NETSCAPE2.0\" as string and 1 in data means\n\t\/\/ this extension defines a loop count.\n\tif extension == eApplication && string(d.tmp[:size]) == \"NETSCAPE2.0\" {\n\t\tn, err := d.readBlock()\n\t\tif n == 0 || err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif n == 3 && d.tmp[0] == 1 {\n\t\t\td.loopCount = int(d.tmp[1]) | int(d.tmp[2])<<8\n\t\t}\n\t}\n\tfor {\n\t\tn, err := d.readBlock()\n\t\tif n == 0 || err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc (d *decoder) readGraphicControl() os.Error {\n\tif _, err := io.ReadFull(d.r, d.tmp[0:6]); err != nil {\n\t\treturn fmt.Errorf(\"gif: can't read graphic control: %s\", err)\n\t}\n\td.flags = d.tmp[1]\n\td.delayTime = int(d.tmp[2]) | int(d.tmp[3])<<8\n\tif d.flags&gcTransparentColorSet == 0 {\n\t\td.transparentIndex = d.tmp[4]\n\t\td.setTransparency(d.globalColorMap)\n\t}\n\treturn nil\n}\n\nfunc (d *decoder) setTransparency(colorMap image.PalettedColorModel) {\n\tif int(d.transparentIndex) < len(colorMap) {\n\t\tcolorMap[d.transparentIndex] = image.RGBAColor{}\n\t}\n}\n\nfunc (d *decoder) newImageFromDescriptor() (*image.Paletted, os.Error) {\n\tif _, err := io.ReadFull(d.r, d.tmp[0:9]); err != nil {\n\t\treturn nil, fmt.Errorf(\"gif: can't read image descriptor: %s\", err)\n\t}\n\t_ = int(d.tmp[0]) + int(d.tmp[1])<<8 \/\/ TODO: honor left value\n\t_ = int(d.tmp[2]) + int(d.tmp[3])<<8 \/\/ TODO: honor top value\n\twidth := int(d.tmp[4]) + int(d.tmp[5])<<8\n\theight := int(d.tmp[6]) + int(d.tmp[7])<<8\n\td.imageFields = d.tmp[8]\n\tif d.imageFields&ifInterlace != 0 {\n\t\treturn nil, os.ErrorString(\"gif: can't handle interlaced images\")\n\t}\n\treturn image.NewPaletted(width, height, nil), nil\n}\n\nfunc (d *decoder) readBlock() (int, os.Error) {\n\tn, err := d.r.ReadByte()\n\tif n == 0 || err != nil {\n\t\treturn 0, err\n\t}\n\treturn io.ReadFull(d.r, d.tmp[0:n])\n}\n\n\/\/ Decode reads a GIF image from r and returns the first embedded\n\/\/ image as an image.Image.\n\/\/ Limitation: The file must be 8 bits per pixel and have no interlacing.\nfunc Decode(r io.Reader) (image.Image, os.Error) {\n\tvar d decoder\n\tif err := d.decode(r, false); err != nil {\n\t\treturn nil, err\n\t}\n\treturn d.image[0], nil\n}\n\n\/\/ GIF represents the possibly multiple images stored in a GIF file.\ntype GIF struct {\n\tImage []*image.Paletted \/\/ The successive images.\n\tDelay []int \/\/ The successive delay times, one per frame, in 100ths of a second.\n\tLoopCount int \/\/ The loop count.\n}\n\n\/\/ DecodeAll reads a GIF image from r and returns the sequential frames\n\/\/ and timing information.\n\/\/ Limitation: The file must be 8 bits per pixel and have no interlacing.\nfunc DecodeAll(r io.Reader) (*GIF, os.Error) {\n\tvar d decoder\n\tif err := d.decode(r, false); err != nil {\n\t\treturn nil, err\n\t}\n\tgif := &GIF{\n\t\tImage: d.image,\n\t\tLoopCount: d.loopCount,\n\t\tDelay: d.delay,\n\t}\n\treturn gif, nil\n}\n\n\/\/ DecodeConfig returns the color model and dimensions of a GIF image without\n\/\/ decoding the entire image.\nfunc DecodeConfig(r io.Reader) (image.Config, os.Error) {\n\tvar d decoder\n\tif err := d.decode(r, true); err != nil {\n\t\treturn image.Config{}, err\n\t}\n\tcolorMap := d.globalColorMap\n\treturn image.Config{colorMap, d.width, d.height}, nil\n}\n\nfunc init() {\n\timage.RegisterFormat(\"gif\", \"GIF8?a\", Decode, DecodeConfig)\n}\n<|endoftext|>"} {"text":"<commit_before>package plans\n\nimport (\n\t\"sort\"\n\n\t\"github.com\/hashicorp\/terraform\/internal\/addrs\"\n\t\"github.com\/hashicorp\/terraform\/internal\/configs\/configschema\"\n\t\"github.com\/hashicorp\/terraform\/internal\/lang\/globalref\"\n\t\"github.com\/hashicorp\/terraform\/internal\/states\"\n\t\"github.com\/zclconf\/go-cty\/cty\"\n)\n\n\/\/ Plan is the top-level type representing a planned set of changes.\n\/\/\n\/\/ A plan is a summary of the set of changes required to move from a current\n\/\/ state to a goal state derived from configuration. The described changes\n\/\/ are not applied directly, but contain an approximation of the final\n\/\/ result that will be completed during apply by resolving any values that\n\/\/ cannot be predicted.\n\/\/\n\/\/ A plan must always be accompanied by the configuration it was built from,\n\/\/ since the plan does not itself include all of the information required to\n\/\/ make the changes indicated.\ntype Plan struct {\n\t\/\/ Mode is the mode under which this plan was created.\n\t\/\/\n\t\/\/ This is only recorded to allow for UI differences when presenting plans\n\t\/\/ to the end-user, and so it must not be used to influence apply-time\n\t\/\/ behavior. The actions during apply must be described entirely by\n\t\/\/ the Changes field, regardless of how the plan was created.\n\tUIMode Mode\n\n\tVariableValues map[string]DynamicValue\n\tChanges *Changes\n\tDriftedResources []*ResourceInstanceChangeSrc\n\tTargetAddrs []addrs.Targetable\n\tForceReplaceAddrs []addrs.AbsResourceInstance\n\tBackend Backend\n\n\t\/\/ Checks captures a snapshot of the (probably-incomplete) check results\n\t\/\/ at the end of the planning process.\n\t\/\/\n\t\/\/ If this plan is applyable (that is, if the planning process completed\n\t\/\/ without errors) then the set of checks here should be complete even\n\t\/\/ though some of them will likely have StatusUnknown where the check\n\t\/\/ condition depends on values we won't know until the apply step.\n\tChecks *states.CheckResults\n\n\t\/\/ RelevantAttributes is a set of resource instance addresses and\n\t\/\/ attributes that are either directly affected by proposed changes or may\n\t\/\/ have indirectly contributed to them via references in expressions.\n\t\/\/\n\t\/\/ This is the result of a heuristic and is intended only as a hint to\n\t\/\/ the UI layer in case it wants to emphasize or de-emphasize certain\n\t\/\/ resources. Don't use this to drive any non-cosmetic behavior, especially\n\t\/\/ including anything that would be subject to compatibility constraints.\n\tRelevantAttributes []globalref.ResourceAttr\n\n\t\/\/ PrevRunState and PriorState both describe the situation that the plan\n\t\/\/ was derived from:\n\t\/\/\n\t\/\/ PrevRunState is a representation of the outcome of the previous\n\t\/\/ Terraform operation, without any updates from the remote system but\n\t\/\/ potentially including some changes that resulted from state upgrade\n\t\/\/ actions.\n\t\/\/\n\t\/\/ PriorState is a representation of the current state of remote objects,\n\t\/\/ which will differ from PrevRunState if the \"refresh\" step returned\n\t\/\/ different data, which might reflect drift.\n\t\/\/\n\t\/\/ PriorState is the main snapshot we use for actions during apply.\n\t\/\/ PrevRunState is only here so that we can diff PriorState against it in\n\t\/\/ order to report to the user any out-of-band changes we've detected.\n\tPrevRunState *states.State\n\tPriorState *states.State\n}\n\n\/\/ CanApply returns true if and only if the recieving plan includes content\n\/\/ that would make sense to apply. If it returns false, the plan operation\n\/\/ should indicate that there's nothing to do and Terraform should exit\n\/\/ without prompting the user to confirm the changes.\n\/\/\n\/\/ This function represents our main business logic for making the decision\n\/\/ about whether a given plan represents meaningful \"changes\", and so its\n\/\/ exact definition may change over time; the intent is just to centralize the\n\/\/ rules for that rather than duplicating different versions of it at various\n\/\/ locations in the UI code.\nfunc (p *Plan) CanApply() bool {\n\tswitch {\n\tcase !p.Changes.Empty():\n\t\t\/\/ \"Empty\" means that everything in the changes is a \"NoOp\", so if\n\t\t\/\/ not empty then there's at least one non-NoOp change.\n\t\treturn true\n\n\tcase !p.PriorState.ManagedResourcesEqual(p.PrevRunState):\n\t\t\/\/ If there are no changes planned but we detected some\n\t\t\/\/ outside-Terraform changes while refreshing then we consider\n\t\t\/\/ that applyable in isolation only if this was a refresh-only\n\t\t\/\/ plan where we expect updating the state to include these\n\t\t\/\/ changes was the intended goal.\n\t\t\/\/\n\t\t\/\/ (We don't treat a \"refresh only\" plan as applyable in normal\n\t\t\/\/ planning mode because historically the refresh result wasn't\n\t\t\/\/ considered part of a plan at all, and so it would be\n\t\t\/\/ a disruptive breaking change if refreshing alone suddenly\n\t\t\/\/ became applyable in the normal case and an existing configuration\n\t\t\/\/ was relying on ignore_changes in order to be convergent in spite\n\t\t\/\/ of intentional out-of-band operations.)\n\t\treturn p.UIMode == RefreshOnlyMode\n\n\tdefault:\n\t\t\/\/ Otherwise, there are either no changes to apply or they are changes\n\t\t\/\/ our cases above don't consider as worthy of applying in isolation.\n\t\treturn false\n\t}\n}\n\n\/\/ ProviderAddrs returns a list of all of the provider configuration addresses\n\/\/ referenced throughout the receiving plan.\n\/\/\n\/\/ The result is de-duplicated so that each distinct address appears only once.\nfunc (p *Plan) ProviderAddrs() []addrs.AbsProviderConfig {\n\tif p == nil || p.Changes == nil {\n\t\treturn nil\n\t}\n\n\tm := map[string]addrs.AbsProviderConfig{}\n\tfor _, rc := range p.Changes.Resources {\n\t\tm[rc.ProviderAddr.String()] = rc.ProviderAddr\n\t}\n\tif len(m) == 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ This is mainly just so we'll get stable results for testing purposes.\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\tret := make([]addrs.AbsProviderConfig, len(keys))\n\tfor i, key := range keys {\n\t\tret[i] = m[key]\n\t}\n\n\treturn ret\n}\n\n\/\/ Backend represents the backend-related configuration and other data as it\n\/\/ existed when a plan was created.\ntype Backend struct {\n\t\/\/ Type is the type of backend that the plan will apply against.\n\tType string\n\n\t\/\/ Config is the configuration of the backend, whose schema is decided by\n\t\/\/ the backend Type.\n\tConfig DynamicValue\n\n\t\/\/ Workspace is the name of the workspace that was active when the plan\n\t\/\/ was created. It is illegal to apply a plan created for one workspace\n\t\/\/ to the state of another workspace.\n\t\/\/ (This constraint is already enforced by the statefile lineage mechanism,\n\t\/\/ but storing this explicitly allows us to return a better error message\n\t\/\/ in the situation where the user has the wrong workspace selected.)\n\tWorkspace string\n}\n\nfunc NewBackend(typeName string, config cty.Value, configSchema *configschema.Block, workspaceName string) (*Backend, error) {\n\tdv, err := NewDynamicValue(config, configSchema.ImpliedType())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Backend{\n\t\tType: typeName,\n\t\tConfig: dv,\n\t\tWorkspace: workspaceName,\n\t}, nil\n}\n<commit_msg>update UIMode comment<commit_after>package plans\n\nimport (\n\t\"sort\"\n\n\t\"github.com\/hashicorp\/terraform\/internal\/addrs\"\n\t\"github.com\/hashicorp\/terraform\/internal\/configs\/configschema\"\n\t\"github.com\/hashicorp\/terraform\/internal\/lang\/globalref\"\n\t\"github.com\/hashicorp\/terraform\/internal\/states\"\n\t\"github.com\/zclconf\/go-cty\/cty\"\n)\n\n\/\/ Plan is the top-level type representing a planned set of changes.\n\/\/\n\/\/ A plan is a summary of the set of changes required to move from a current\n\/\/ state to a goal state derived from configuration. The described changes\n\/\/ are not applied directly, but contain an approximation of the final\n\/\/ result that will be completed during apply by resolving any values that\n\/\/ cannot be predicted.\n\/\/\n\/\/ A plan must always be accompanied by the configuration it was built from,\n\/\/ since the plan does not itself include all of the information required to\n\/\/ make the changes indicated.\ntype Plan struct {\n\t\/\/ Mode is the mode under which this plan was created.\n\t\/\/\n\t\/\/ This is only recorded to allow for UI differences when presenting plans\n\t\/\/ to the end-user, and so it must not be used to influence apply-time\n\t\/\/ behavior. The actions during apply must be described entirely by\n\t\/\/ the Changes field, regardless of how the plan was created.\n\t\/\/\n\t\/\/ FIXME: destroy operations still rely on DestroyMode being set, because\n\t\/\/ there is no other source of this information in the plan. New behavior\n\t\/\/ should not be added based on this flag, and changing the flag should be\n\t\/\/ checked carefully against existing destroy behaviors.\n\tUIMode Mode\n\n\tVariableValues map[string]DynamicValue\n\tChanges *Changes\n\tDriftedResources []*ResourceInstanceChangeSrc\n\tTargetAddrs []addrs.Targetable\n\tForceReplaceAddrs []addrs.AbsResourceInstance\n\tBackend Backend\n\n\t\/\/ Checks captures a snapshot of the (probably-incomplete) check results\n\t\/\/ at the end of the planning process.\n\t\/\/\n\t\/\/ If this plan is applyable (that is, if the planning process completed\n\t\/\/ without errors) then the set of checks here should be complete even\n\t\/\/ though some of them will likely have StatusUnknown where the check\n\t\/\/ condition depends on values we won't know until the apply step.\n\tChecks *states.CheckResults\n\n\t\/\/ RelevantAttributes is a set of resource instance addresses and\n\t\/\/ attributes that are either directly affected by proposed changes or may\n\t\/\/ have indirectly contributed to them via references in expressions.\n\t\/\/\n\t\/\/ This is the result of a heuristic and is intended only as a hint to\n\t\/\/ the UI layer in case it wants to emphasize or de-emphasize certain\n\t\/\/ resources. Don't use this to drive any non-cosmetic behavior, especially\n\t\/\/ including anything that would be subject to compatibility constraints.\n\tRelevantAttributes []globalref.ResourceAttr\n\n\t\/\/ PrevRunState and PriorState both describe the situation that the plan\n\t\/\/ was derived from:\n\t\/\/\n\t\/\/ PrevRunState is a representation of the outcome of the previous\n\t\/\/ Terraform operation, without any updates from the remote system but\n\t\/\/ potentially including some changes that resulted from state upgrade\n\t\/\/ actions.\n\t\/\/\n\t\/\/ PriorState is a representation of the current state of remote objects,\n\t\/\/ which will differ from PrevRunState if the \"refresh\" step returned\n\t\/\/ different data, which might reflect drift.\n\t\/\/\n\t\/\/ PriorState is the main snapshot we use for actions during apply.\n\t\/\/ PrevRunState is only here so that we can diff PriorState against it in\n\t\/\/ order to report to the user any out-of-band changes we've detected.\n\tPrevRunState *states.State\n\tPriorState *states.State\n}\n\n\/\/ CanApply returns true if and only if the recieving plan includes content\n\/\/ that would make sense to apply. If it returns false, the plan operation\n\/\/ should indicate that there's nothing to do and Terraform should exit\n\/\/ without prompting the user to confirm the changes.\n\/\/\n\/\/ This function represents our main business logic for making the decision\n\/\/ about whether a given plan represents meaningful \"changes\", and so its\n\/\/ exact definition may change over time; the intent is just to centralize the\n\/\/ rules for that rather than duplicating different versions of it at various\n\/\/ locations in the UI code.\nfunc (p *Plan) CanApply() bool {\n\tswitch {\n\tcase !p.Changes.Empty():\n\t\t\/\/ \"Empty\" means that everything in the changes is a \"NoOp\", so if\n\t\t\/\/ not empty then there's at least one non-NoOp change.\n\t\treturn true\n\n\tcase !p.PriorState.ManagedResourcesEqual(p.PrevRunState):\n\t\t\/\/ If there are no changes planned but we detected some\n\t\t\/\/ outside-Terraform changes while refreshing then we consider\n\t\t\/\/ that applyable in isolation only if this was a refresh-only\n\t\t\/\/ plan where we expect updating the state to include these\n\t\t\/\/ changes was the intended goal.\n\t\t\/\/\n\t\t\/\/ (We don't treat a \"refresh only\" plan as applyable in normal\n\t\t\/\/ planning mode because historically the refresh result wasn't\n\t\t\/\/ considered part of a plan at all, and so it would be\n\t\t\/\/ a disruptive breaking change if refreshing alone suddenly\n\t\t\/\/ became applyable in the normal case and an existing configuration\n\t\t\/\/ was relying on ignore_changes in order to be convergent in spite\n\t\t\/\/ of intentional out-of-band operations.)\n\t\treturn p.UIMode == RefreshOnlyMode\n\n\tdefault:\n\t\t\/\/ Otherwise, there are either no changes to apply or they are changes\n\t\t\/\/ our cases above don't consider as worthy of applying in isolation.\n\t\treturn false\n\t}\n}\n\n\/\/ ProviderAddrs returns a list of all of the provider configuration addresses\n\/\/ referenced throughout the receiving plan.\n\/\/\n\/\/ The result is de-duplicated so that each distinct address appears only once.\nfunc (p *Plan) ProviderAddrs() []addrs.AbsProviderConfig {\n\tif p == nil || p.Changes == nil {\n\t\treturn nil\n\t}\n\n\tm := map[string]addrs.AbsProviderConfig{}\n\tfor _, rc := range p.Changes.Resources {\n\t\tm[rc.ProviderAddr.String()] = rc.ProviderAddr\n\t}\n\tif len(m) == 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ This is mainly just so we'll get stable results for testing purposes.\n\tkeys := make([]string, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\tret := make([]addrs.AbsProviderConfig, len(keys))\n\tfor i, key := range keys {\n\t\tret[i] = m[key]\n\t}\n\n\treturn ret\n}\n\n\/\/ Backend represents the backend-related configuration and other data as it\n\/\/ existed when a plan was created.\ntype Backend struct {\n\t\/\/ Type is the type of backend that the plan will apply against.\n\tType string\n\n\t\/\/ Config is the configuration of the backend, whose schema is decided by\n\t\/\/ the backend Type.\n\tConfig DynamicValue\n\n\t\/\/ Workspace is the name of the workspace that was active when the plan\n\t\/\/ was created. It is illegal to apply a plan created for one workspace\n\t\/\/ to the state of another workspace.\n\t\/\/ (This constraint is already enforced by the statefile lineage mechanism,\n\t\/\/ but storing this explicitly allows us to return a better error message\n\t\/\/ in the situation where the user has the wrong workspace selected.)\n\tWorkspace string\n}\n\nfunc NewBackend(typeName string, config cty.Value, configSchema *configschema.Block, workspaceName string) (*Backend, error) {\n\tdv, err := NewDynamicValue(config, configSchema.ImpliedType())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Backend{\n\t\tType: typeName,\n\t\tConfig: dv,\n\t\tWorkspace: workspaceName,\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage ghmetrics\n\nimport (\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ ghTokenUntilResetGaugeVec provides the 'github_token_reset' gauge that\n\/\/ enables keeping track of GitHub reset times.\nvar ghTokenUntilResetGaugeVec = prometheus.NewGaugeVec(\n\tprometheus.GaugeOpts{\n\t\tName: \"github_token_reset\",\n\t\tHelp: \"Last reported GitHub token reset time.\",\n\t},\n\t[]string{\"token_hash\", \"api_version\", \"ratelimit_resource\"},\n)\n\n\/\/ ghTokenUsageGaugeVec provides the 'github_token_usage' gauge that\n\/\/ enables keeping track of GitHub calls and quotas.\nvar ghTokenUsageGaugeVec = prometheus.NewGaugeVec(\n\tprometheus.GaugeOpts{\n\t\tName: \"github_token_usage\",\n\t\tHelp: \"How many GitHub token requets are remaining for the current hour.\",\n\t},\n\t[]string{\"token_hash\", \"api_version\", \"ratelimit_resource\"},\n)\n\n\/\/ ghRequestDurationHistVec provides the 'github_request_duration' histogram that keeps track\n\/\/ of the duration of GitHub requests by API path.\nvar ghRequestDurationHistVec = prometheus.NewHistogramVec(\n\tprometheus.HistogramOpts{\n\t\tName: \"github_request_duration\",\n\t\tHelp: \"GitHub request duration by API path.\",\n\t\tBuckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10},\n\t},\n\t[]string{\"token_hash\", \"path\", \"status\", \"user_agent\"},\n)\n\n\/\/ ghRequestDurationHistVec provides the 'github_request_duration' histogram that keeps track\n\/\/ of the duration of GitHub requests by API path.\nvar ghRequestWaitDurationHistVec = prometheus.NewHistogramVec(\n\tprometheus.HistogramOpts{\n\t\tName: \"github_request_wait_duration_seconds\",\n\t\tHelp: \"GitHub request wait duration before sending to API in seconds\",\n\t\tBuckets: []float64{0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 7.5, 10, 15, 20, 25, 30, 45, 60},\n\t},\n\t[]string{\"token_hash\", \"request_type\", \"api\"},\n)\n\n\/\/ cacheCounter provides the 'ghcache_responses' counter vec that is indexed\n\/\/ by the cache response mode.\nvar cacheCounter = prometheus.NewCounterVec(\n\tprometheus.CounterOpts{\n\t\tName: \"ghcache_responses\",\n\t\tHelp: \"How many cache responses of each cache response mode there are.\",\n\t},\n\t[]string{\"mode\", \"path\", \"user_agent\", \"token_hash\"},\n)\n\n\/\/ timeoutDuration provides the 'github_request_timeouts' histogram that keeps\n\/\/ track of the timeouts of GitHub requests by API path.\nvar timeoutDuration = prometheus.NewHistogramVec(\n\tprometheus.HistogramOpts{\n\t\tName: \"github_request_timeouts\",\n\t\tHelp: \"GitHub request timeout by API path.\",\n\t\tBuckets: []float64{45, 60, 90, 120, 300},\n\t},\n\t[]string{\"token_hash\", \"path\", \"user_agent\"},\n)\n\n\/\/ cacheEntryAge tells us about the age of responses\n\/\/ that came from the cache.\nvar cacheEntryAge = prometheus.NewHistogramVec(\n\tprometheus.HistogramOpts{\n\t\tName: \"ghcache_cache_entry_age_seconds\",\n\t\tHelp: \"The age of cache entries by API path.\",\n\t\tBuckets: []float64{5, 900, 1800, 3600, 7200, 14400},\n\t},\n\t[]string{\"token_hash\", \"path\", \"user_agent\"},\n)\n\nvar muxTokenUsage sync.Mutex\nvar lastGitHubResponse time.Time\n\nfunc init() {\n\tprometheus.MustRegister(ghTokenUntilResetGaugeVec)\n\tprometheus.MustRegister(ghTokenUsageGaugeVec)\n\tprometheus.MustRegister(ghRequestDurationHistVec)\n\tprometheus.MustRegister(ghRequestWaitDurationHistVec)\n\tprometheus.MustRegister(cacheCounter)\n\tprometheus.MustRegister(timeoutDuration)\n\tprometheus.MustRegister(cacheEntryAge)\n}\n\n\/\/ CollectGitHubTokenMetrics publishes the rate limits of the github api to\n\/\/ `github_token_usage` as well as `github_token_reset` on prometheus.\nfunc CollectGitHubTokenMetrics(tokenHash, apiVersion string, headers http.Header, reqStartTime, responseTime time.Time) {\n\tremaining := headers.Get(\"X-RateLimit-Remaining\")\n\tif remaining == \"\" {\n\t\treturn\n\t}\n\tresource := headers.Get(\"X-RateLimit-Resource\")\n\ttimeUntilReset := timestampStringToTime(headers.Get(\"X-RateLimit-Reset\"))\n\tdurationUntilReset := timeUntilReset.Sub(reqStartTime)\n\n\tremainingFloat, err := strconv.ParseFloat(remaining, 64)\n\tif err != nil {\n\t\tlogrus.WithError(err).Infof(\"Couldn't convert number of remaining token requests into gauge value (float)\")\n\t}\n\tif remainingFloat == 0 {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"header\": remaining,\n\t\t\t\"user-agent\": headers.Get(\"User-Agent\"),\n\t\t}).Debug(\"Parsed GitHub header as indicating no remaining rate-limit.\")\n\t}\n\n\tmuxTokenUsage.Lock()\n\tisAfter := lastGitHubResponse.After(responseTime)\n\tif !isAfter {\n\t\tlastGitHubResponse = responseTime\n\t}\n\tmuxTokenUsage.Unlock()\n\tif isAfter {\n\t\tlogrus.WithField(\"last-github-response\", lastGitHubResponse).WithField(\"response-time\", responseTime).Debug(\"Previously pushed metrics of a newer response, skipping old metrics\")\n\t} else {\n\t\tghTokenUntilResetGaugeVec.With(prometheus.Labels{\"token_hash\": tokenHash, \"api_version\": apiVersion, \"ratelimit_resource\": resource}).Set(float64(durationUntilReset.Nanoseconds()))\n\t\tghTokenUsageGaugeVec.With(prometheus.Labels{\"token_hash\": tokenHash, \"api_version\": apiVersion, \"ratelimit_resource\": resource}).Set(remainingFloat)\n\t}\n}\n\n\/\/ CollectGitHubRequestMetrics publishes the number of requests by API path to\n\/\/ `github_requests` on prometheus.\nfunc CollectGitHubRequestMetrics(tokenHash, path, statusCode, userAgent string, roundTripTime float64) {\n\tghRequestDurationHistVec.With(prometheus.Labels{\"token_hash\": tokenHash, \"path\": simplifier.Simplify(path), \"status\": statusCode, \"user_agent\": userAgentWithoutVersion(userAgent)}).Observe(roundTripTime)\n}\n\n\/\/ timestampStringToTime takes a unix timestamp and returns a `time.Time`\n\/\/ from the given time.\nfunc timestampStringToTime(tstamp string) time.Time {\n\ttimestamp, err := strconv.ParseInt(tstamp, 10, 64)\n\tif err != nil {\n\t\tlogrus.WithField(\"timestamp\", tstamp).Info(\"Couldn't convert unix timestamp\")\n\t}\n\treturn time.Unix(timestamp, 0)\n}\n\n\/\/ userAgentWithouVersion formats a user agent without the version to reduce label cardinality\nfunc userAgentWithoutVersion(userAgent string) string {\n\tif !strings.Contains(userAgent, \"\/\") {\n\t\treturn userAgent\n\t}\n\treturn strings.SplitN(userAgent, \"\/\", 2)[0]\n}\n\n\/\/ CollectCacheRequestMetrics records a cache outcome for a specific path\nfunc CollectCacheRequestMetrics(mode, path, userAgent, tokenHash string) {\n\tcacheCounter.With(prometheus.Labels{\"mode\": mode, \"path\": simplifier.Simplify(path), \"user_agent\": userAgentWithoutVersion(userAgent), \"token_hash\": tokenHash}).Inc()\n}\n\nfunc CollectCacheEntryAgeMetrics(age float64, path, userAgent, tokenHash string) {\n\tcacheEntryAge.With(prometheus.Labels{\"path\": simplifier.Simplify(path), \"user_agent\": userAgentWithoutVersion(userAgent), \"token_hash\": tokenHash}).Observe(age)\n}\n\n\/\/ CollectRequestTimeoutMetrics publishes the duration of timed-out requests by\n\/\/ API path to 'github_request_timeouts' on prometheus.\nfunc CollectRequestTimeoutMetrics(tokenHash, path, userAgent string, reqStartTime, responseTime time.Time) {\n\ttimeoutDuration.With(prometheus.Labels{\"token_hash\": tokenHash, \"path\": simplifier.Simplify(path), \"user_agent\": userAgentWithoutVersion(userAgent)}).Observe(float64(responseTime.Sub(reqStartTime).Seconds()))\n}\n\n\/\/ CollectGitHubRequestWaitDurationMetrics publishes the wait duration of requests\n\/\/ before sending to respective GitHub API on prometheus.\nfunc CollectGitHubRequestWaitDurationMetrics(tokenHash, requestType, api string, duration time.Duration) {\n\tghRequestWaitDurationHistVec.With(prometheus.Labels{\"token_hash\": tokenHash, \"request_type\": requestType, \"api\": api}).Observe(duration.Seconds())\n}\n<commit_msg>More realistic buckets for GH Rate limiting algorithm<commit_after>\/*\nCopyright 2019 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage ghmetrics\n\nimport (\n\t\"net\/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ ghTokenUntilResetGaugeVec provides the 'github_token_reset' gauge that\n\/\/ enables keeping track of GitHub reset times.\nvar ghTokenUntilResetGaugeVec = prometheus.NewGaugeVec(\n\tprometheus.GaugeOpts{\n\t\tName: \"github_token_reset\",\n\t\tHelp: \"Last reported GitHub token reset time.\",\n\t},\n\t[]string{\"token_hash\", \"api_version\", \"ratelimit_resource\"},\n)\n\n\/\/ ghTokenUsageGaugeVec provides the 'github_token_usage' gauge that\n\/\/ enables keeping track of GitHub calls and quotas.\nvar ghTokenUsageGaugeVec = prometheus.NewGaugeVec(\n\tprometheus.GaugeOpts{\n\t\tName: \"github_token_usage\",\n\t\tHelp: \"How many GitHub token requets are remaining for the current hour.\",\n\t},\n\t[]string{\"token_hash\", \"api_version\", \"ratelimit_resource\"},\n)\n\n\/\/ ghRequestDurationHistVec provides the 'github_request_duration' histogram that keeps track\n\/\/ of the duration of GitHub requests by API path.\nvar ghRequestDurationHistVec = prometheus.NewHistogramVec(\n\tprometheus.HistogramOpts{\n\t\tName: \"github_request_duration\",\n\t\tHelp: \"GitHub request duration by API path.\",\n\t\tBuckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10},\n\t},\n\t[]string{\"token_hash\", \"path\", \"status\", \"user_agent\"},\n)\n\n\/\/ ghRequestDurationHistVec provides the 'github_request_duration' histogram that keeps track\n\/\/ of the duration of GitHub requests by API path.\nvar ghRequestWaitDurationHistVec = prometheus.NewHistogramVec(\n\tprometheus.HistogramOpts{\n\t\tName: \"github_request_wait_duration_seconds\",\n\t\tHelp: \"GitHub request wait duration before sending to API in seconds\",\n\t\tBuckets: []float64{0.1, 0.25, 0.5, 1, 2.5, 5, 7.5, 10, 15, 20, 25, 30, 45, 60, 90, 120, 150, 180},\n\t},\n\t[]string{\"token_hash\", \"request_type\", \"api\"},\n)\n\n\/\/ cacheCounter provides the 'ghcache_responses' counter vec that is indexed\n\/\/ by the cache response mode.\nvar cacheCounter = prometheus.NewCounterVec(\n\tprometheus.CounterOpts{\n\t\tName: \"ghcache_responses\",\n\t\tHelp: \"How many cache responses of each cache response mode there are.\",\n\t},\n\t[]string{\"mode\", \"path\", \"user_agent\", \"token_hash\"},\n)\n\n\/\/ timeoutDuration provides the 'github_request_timeouts' histogram that keeps\n\/\/ track of the timeouts of GitHub requests by API path.\nvar timeoutDuration = prometheus.NewHistogramVec(\n\tprometheus.HistogramOpts{\n\t\tName: \"github_request_timeouts\",\n\t\tHelp: \"GitHub request timeout by API path.\",\n\t\tBuckets: []float64{45, 60, 90, 120, 300},\n\t},\n\t[]string{\"token_hash\", \"path\", \"user_agent\"},\n)\n\n\/\/ cacheEntryAge tells us about the age of responses\n\/\/ that came from the cache.\nvar cacheEntryAge = prometheus.NewHistogramVec(\n\tprometheus.HistogramOpts{\n\t\tName: \"ghcache_cache_entry_age_seconds\",\n\t\tHelp: \"The age of cache entries by API path.\",\n\t\tBuckets: []float64{5, 900, 1800, 3600, 7200, 14400},\n\t},\n\t[]string{\"token_hash\", \"path\", \"user_agent\"},\n)\n\nvar muxTokenUsage sync.Mutex\nvar lastGitHubResponse time.Time\n\nfunc init() {\n\tprometheus.MustRegister(ghTokenUntilResetGaugeVec)\n\tprometheus.MustRegister(ghTokenUsageGaugeVec)\n\tprometheus.MustRegister(ghRequestDurationHistVec)\n\tprometheus.MustRegister(ghRequestWaitDurationHistVec)\n\tprometheus.MustRegister(cacheCounter)\n\tprometheus.MustRegister(timeoutDuration)\n\tprometheus.MustRegister(cacheEntryAge)\n}\n\n\/\/ CollectGitHubTokenMetrics publishes the rate limits of the github api to\n\/\/ `github_token_usage` as well as `github_token_reset` on prometheus.\nfunc CollectGitHubTokenMetrics(tokenHash, apiVersion string, headers http.Header, reqStartTime, responseTime time.Time) {\n\tremaining := headers.Get(\"X-RateLimit-Remaining\")\n\tif remaining == \"\" {\n\t\treturn\n\t}\n\tresource := headers.Get(\"X-RateLimit-Resource\")\n\ttimeUntilReset := timestampStringToTime(headers.Get(\"X-RateLimit-Reset\"))\n\tdurationUntilReset := timeUntilReset.Sub(reqStartTime)\n\n\tremainingFloat, err := strconv.ParseFloat(remaining, 64)\n\tif err != nil {\n\t\tlogrus.WithError(err).Infof(\"Couldn't convert number of remaining token requests into gauge value (float)\")\n\t}\n\tif remainingFloat == 0 {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"header\": remaining,\n\t\t\t\"user-agent\": headers.Get(\"User-Agent\"),\n\t\t}).Debug(\"Parsed GitHub header as indicating no remaining rate-limit.\")\n\t}\n\n\tmuxTokenUsage.Lock()\n\tisAfter := lastGitHubResponse.After(responseTime)\n\tif !isAfter {\n\t\tlastGitHubResponse = responseTime\n\t}\n\tmuxTokenUsage.Unlock()\n\tif isAfter {\n\t\tlogrus.WithField(\"last-github-response\", lastGitHubResponse).WithField(\"response-time\", responseTime).Debug(\"Previously pushed metrics of a newer response, skipping old metrics\")\n\t} else {\n\t\tghTokenUntilResetGaugeVec.With(prometheus.Labels{\"token_hash\": tokenHash, \"api_version\": apiVersion, \"ratelimit_resource\": resource}).Set(float64(durationUntilReset.Nanoseconds()))\n\t\tghTokenUsageGaugeVec.With(prometheus.Labels{\"token_hash\": tokenHash, \"api_version\": apiVersion, \"ratelimit_resource\": resource}).Set(remainingFloat)\n\t}\n}\n\n\/\/ CollectGitHubRequestMetrics publishes the number of requests by API path to\n\/\/ `github_requests` on prometheus.\nfunc CollectGitHubRequestMetrics(tokenHash, path, statusCode, userAgent string, roundTripTime float64) {\n\tghRequestDurationHistVec.With(prometheus.Labels{\"token_hash\": tokenHash, \"path\": simplifier.Simplify(path), \"status\": statusCode, \"user_agent\": userAgentWithoutVersion(userAgent)}).Observe(roundTripTime)\n}\n\n\/\/ timestampStringToTime takes a unix timestamp and returns a `time.Time`\n\/\/ from the given time.\nfunc timestampStringToTime(tstamp string) time.Time {\n\ttimestamp, err := strconv.ParseInt(tstamp, 10, 64)\n\tif err != nil {\n\t\tlogrus.WithField(\"timestamp\", tstamp).Info(\"Couldn't convert unix timestamp\")\n\t}\n\treturn time.Unix(timestamp, 0)\n}\n\n\/\/ userAgentWithouVersion formats a user agent without the version to reduce label cardinality\nfunc userAgentWithoutVersion(userAgent string) string {\n\tif !strings.Contains(userAgent, \"\/\") {\n\t\treturn userAgent\n\t}\n\treturn strings.SplitN(userAgent, \"\/\", 2)[0]\n}\n\n\/\/ CollectCacheRequestMetrics records a cache outcome for a specific path\nfunc CollectCacheRequestMetrics(mode, path, userAgent, tokenHash string) {\n\tcacheCounter.With(prometheus.Labels{\"mode\": mode, \"path\": simplifier.Simplify(path), \"user_agent\": userAgentWithoutVersion(userAgent), \"token_hash\": tokenHash}).Inc()\n}\n\nfunc CollectCacheEntryAgeMetrics(age float64, path, userAgent, tokenHash string) {\n\tcacheEntryAge.With(prometheus.Labels{\"path\": simplifier.Simplify(path), \"user_agent\": userAgentWithoutVersion(userAgent), \"token_hash\": tokenHash}).Observe(age)\n}\n\n\/\/ CollectRequestTimeoutMetrics publishes the duration of timed-out requests by\n\/\/ API path to 'github_request_timeouts' on prometheus.\nfunc CollectRequestTimeoutMetrics(tokenHash, path, userAgent string, reqStartTime, responseTime time.Time) {\n\ttimeoutDuration.With(prometheus.Labels{\"token_hash\": tokenHash, \"path\": simplifier.Simplify(path), \"user_agent\": userAgentWithoutVersion(userAgent)}).Observe(float64(responseTime.Sub(reqStartTime).Seconds()))\n}\n\n\/\/ CollectGitHubRequestWaitDurationMetrics publishes the wait duration of requests\n\/\/ before sending to respective GitHub API on prometheus.\nfunc CollectGitHubRequestWaitDurationMetrics(tokenHash, requestType, api string, duration time.Duration) {\n\tghRequestWaitDurationHistVec.With(prometheus.Labels{\"token_hash\": tokenHash, \"request_type\": requestType, \"api\": api}).Observe(duration.Seconds())\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The go-github AUTHORS. All rights reserved.\n\/\/\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage github\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/google\/go-cmp\/cmp\"\n)\n\nfunc TestIssuesService_ListComments_allIssues(t *testing.T) {\n\tclient, mux, _, teardown := setup()\n\tdefer teardown()\n\n\tmux.HandleFunc(\"\/repos\/o\/r\/issues\/comments\", func(w http.ResponseWriter, r *http.Request) {\n\t\ttestMethod(t, r, \"GET\")\n\t\ttestHeader(t, r, \"Accept\", mediaTypeReactionsPreview)\n\t\ttestFormValues(t, r, values{\n\t\t\t\"sort\": \"updated\",\n\t\t\t\"direction\": \"desc\",\n\t\t\t\"since\": \"2002-02-10T15:30:00Z\",\n\t\t\t\"page\": \"2\",\n\t\t})\n\t\tfmt.Fprint(w, `[{\"id\":1}]`)\n\t})\n\n\tsince := time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC)\n\topt := &IssueListCommentsOptions{\n\t\tSort: String(\"updated\"),\n\t\tDirection: String(\"desc\"),\n\t\tSince: &since,\n\t\tListOptions: ListOptions{Page: 2},\n\t}\n\tctx := context.Background()\n\tcomments, _, err := client.Issues.ListComments(ctx, \"o\", \"r\", 0, opt)\n\tif err != nil {\n\t\tt.Errorf(\"Issues.ListComments returned error: %v\", err)\n\t}\n\n\twant := []*IssueComment{{ID: Int64(1)}}\n\tif !cmp.Equal(comments, want) {\n\t\tt.Errorf(\"Issues.ListComments returned %+v, want %+v\", comments, want)\n\t}\n\n\tconst methodName = \"ListComments\"\n\ttestBadOptions(t, methodName, func() (err error) {\n\t\t_, _, err = client.Issues.ListComments(ctx, \"\\n\", \"\\n\", -1, opt)\n\t\treturn err\n\t})\n\n\ttestNewRequestAndDoFailure(t, methodName, client, func() (*Response, error) {\n\t\tgot, resp, err := client.Issues.ListComments(ctx, \"o\", \"r\", 0, opt)\n\t\tif got != nil {\n\t\t\tt.Errorf(\"testNewRequestAndDoFailure %v = %#v, want nil\", methodName, got)\n\t\t}\n\t\treturn resp, err\n\t})\n}\n\nfunc TestIssuesService_ListComments_specificIssue(t *testing.T) {\n\tclient, mux, _, teardown := setup()\n\tdefer teardown()\n\n\tmux.HandleFunc(\"\/repos\/o\/r\/issues\/1\/comments\", func(w http.ResponseWriter, r *http.Request) {\n\t\ttestMethod(t, r, \"GET\")\n\t\ttestHeader(t, r, \"Accept\", mediaTypeReactionsPreview)\n\t\tfmt.Fprint(w, `[{\"id\":1}]`)\n\t})\n\n\tctx := context.Background()\n\tcomments, _, err := client.Issues.ListComments(ctx, \"o\", \"r\", 1, nil)\n\tif err != nil {\n\t\tt.Errorf(\"Issues.ListComments returned error: %v\", err)\n\t}\n\n\twant := []*IssueComment{{ID: Int64(1)}}\n\tif !cmp.Equal(comments, want) {\n\t\tt.Errorf(\"Issues.ListComments returned %+v, want %+v\", comments, want)\n\t}\n\n\tconst methodName = \"ListComments\"\n\ttestBadOptions(t, methodName, func() (err error) {\n\t\t_, _, err = client.Issues.ListComments(ctx, \"\\n\", \"\\n\", -1, nil)\n\t\treturn err\n\t})\n\n\ttestNewRequestAndDoFailure(t, methodName, client, func() (*Response, error) {\n\t\tgot, resp, err := client.Issues.ListComments(ctx, \"o\", \"r\", 1, nil)\n\t\tif got != nil {\n\t\t\tt.Errorf(\"testNewRequestAndDoFailure %v = %#v, want nil\", methodName, got)\n\t\t}\n\t\treturn resp, err\n\t})\n}\n\nfunc TestIssuesService_ListComments_invalidOwner(t *testing.T) {\n\tclient, _, _, teardown := setup()\n\tdefer teardown()\n\n\tctx := context.Background()\n\t_, _, err := client.Issues.ListComments(ctx, \"%\", \"r\", 1, nil)\n\ttestURLParseError(t, err)\n}\n\nfunc TestIssuesService_GetComment(t *testing.T) {\n\tclient, mux, _, teardown := setup()\n\tdefer teardown()\n\n\tmux.HandleFunc(\"\/repos\/o\/r\/issues\/comments\/1\", func(w http.ResponseWriter, r *http.Request) {\n\t\ttestMethod(t, r, \"GET\")\n\t\ttestHeader(t, r, \"Accept\", mediaTypeReactionsPreview)\n\t\tfmt.Fprint(w, `{\"id\":1}`)\n\t})\n\n\tctx := context.Background()\n\tcomment, _, err := client.Issues.GetComment(ctx, \"o\", \"r\", 1)\n\tif err != nil {\n\t\tt.Errorf(\"Issues.GetComment returned error: %v\", err)\n\t}\n\n\twant := &IssueComment{ID: Int64(1)}\n\tif !cmp.Equal(comment, want) {\n\t\tt.Errorf(\"Issues.GetComment returned %+v, want %+v\", comment, want)\n\t}\n\n\tconst methodName = \"GetComment\"\n\ttestBadOptions(t, methodName, func() (err error) {\n\t\t_, _, err = client.Issues.GetComment(ctx, \"\\n\", \"\\n\", -1)\n\t\treturn err\n\t})\n\n\ttestNewRequestAndDoFailure(t, methodName, client, func() (*Response, error) {\n\t\tgot, resp, err := client.Issues.GetComment(ctx, \"o\", \"r\", 1)\n\t\tif got != nil {\n\t\t\tt.Errorf(\"testNewRequestAndDoFailure %v = %#v, want nil\", methodName, got)\n\t\t}\n\t\treturn resp, err\n\t})\n}\n\nfunc TestIssuesService_GetComment_invalidOrg(t *testing.T) {\n\tclient, _, _, teardown := setup()\n\tdefer teardown()\n\n\tctx := context.Background()\n\t_, _, err := client.Issues.GetComment(ctx, \"%\", \"r\", 1)\n\ttestURLParseError(t, err)\n}\n\nfunc TestIssuesService_CreateComment(t *testing.T) {\n\tclient, mux, _, teardown := setup()\n\tdefer teardown()\n\n\tinput := &IssueComment{Body: String(\"b\")}\n\n\tmux.HandleFunc(\"\/repos\/o\/r\/issues\/1\/comments\", func(w http.ResponseWriter, r *http.Request) {\n\t\tv := new(IssueComment)\n\t\tjson.NewDecoder(r.Body).Decode(v)\n\n\t\ttestMethod(t, r, \"POST\")\n\t\tif !cmp.Equal(v, input) {\n\t\t\tt.Errorf(\"Request body = %+v, want %+v\", v, input)\n\t\t}\n\n\t\tfmt.Fprint(w, `{\"id\":1}`)\n\t})\n\n\tctx := context.Background()\n\tcomment, _, err := client.Issues.CreateComment(ctx, \"o\", \"r\", 1, input)\n\tif err != nil {\n\t\tt.Errorf(\"Issues.CreateComment returned error: %v\", err)\n\t}\n\n\twant := &IssueComment{ID: Int64(1)}\n\tif !cmp.Equal(comment, want) {\n\t\tt.Errorf(\"Issues.CreateComment returned %+v, want %+v\", comment, want)\n\t}\n\n\tconst methodName = \"CreateComment\"\n\ttestBadOptions(t, methodName, func() (err error) {\n\t\t_, _, err = client.Issues.CreateComment(ctx, \"\\n\", \"\\n\", -1, input)\n\t\treturn err\n\t})\n\n\ttestNewRequestAndDoFailure(t, methodName, client, func() (*Response, error) {\n\t\tgot, resp, err := client.Issues.CreateComment(ctx, \"o\", \"r\", 1, input)\n\t\tif got != nil {\n\t\t\tt.Errorf(\"testNewRequestAndDoFailure %v = %#v, want nil\", methodName, got)\n\t\t}\n\t\treturn resp, err\n\t})\n}\n\nfunc TestIssuesService_CreateComment_invalidOrg(t *testing.T) {\n\tclient, _, _, teardown := setup()\n\tdefer teardown()\n\n\tctx := context.Background()\n\t_, _, err := client.Issues.CreateComment(ctx, \"%\", \"r\", 1, nil)\n\ttestURLParseError(t, err)\n}\n\nfunc TestIssuesService_EditComment(t *testing.T) {\n\tclient, mux, _, teardown := setup()\n\tdefer teardown()\n\n\tinput := &IssueComment{Body: String(\"b\")}\n\n\tmux.HandleFunc(\"\/repos\/o\/r\/issues\/comments\/1\", func(w http.ResponseWriter, r *http.Request) {\n\t\tv := new(IssueComment)\n\t\tjson.NewDecoder(r.Body).Decode(v)\n\n\t\ttestMethod(t, r, \"PATCH\")\n\t\tif !cmp.Equal(v, input) {\n\t\t\tt.Errorf(\"Request body = %+v, want %+v\", v, input)\n\t\t}\n\n\t\tfmt.Fprint(w, `{\"id\":1}`)\n\t})\n\n\tctx := context.Background()\n\tcomment, _, err := client.Issues.EditComment(ctx, \"o\", \"r\", 1, input)\n\tif err != nil {\n\t\tt.Errorf(\"Issues.EditComment returned error: %v\", err)\n\t}\n\n\twant := &IssueComment{ID: Int64(1)}\n\tif !cmp.Equal(comment, want) {\n\t\tt.Errorf(\"Issues.EditComment returned %+v, want %+v\", comment, want)\n\t}\n\n\tconst methodName = \"EditComment\"\n\ttestBadOptions(t, methodName, func() (err error) {\n\t\t_, _, err = client.Issues.EditComment(ctx, \"\\n\", \"\\n\", -1, input)\n\t\treturn err\n\t})\n\n\ttestNewRequestAndDoFailure(t, methodName, client, func() (*Response, error) {\n\t\tgot, resp, err := client.Issues.EditComment(ctx, \"o\", \"r\", 1, input)\n\t\tif got != nil {\n\t\t\tt.Errorf(\"testNewRequestAndDoFailure %v = %#v, want nil\", methodName, got)\n\t\t}\n\t\treturn resp, err\n\t})\n}\n\nfunc TestIssuesService_EditComment_invalidOwner(t *testing.T) {\n\tclient, _, _, teardown := setup()\n\tdefer teardown()\n\n\tctx := context.Background()\n\t_, _, err := client.Issues.EditComment(ctx, \"%\", \"r\", 1, nil)\n\ttestURLParseError(t, err)\n}\n\nfunc TestIssuesService_DeleteComment(t *testing.T) {\n\tclient, mux, _, teardown := setup()\n\tdefer teardown()\n\n\tmux.HandleFunc(\"\/repos\/o\/r\/issues\/comments\/1\", func(w http.ResponseWriter, r *http.Request) {\n\t\ttestMethod(t, r, \"DELETE\")\n\t})\n\n\tctx := context.Background()\n\t_, err := client.Issues.DeleteComment(ctx, \"o\", \"r\", 1)\n\tif err != nil {\n\t\tt.Errorf(\"Issues.DeleteComments returned error: %v\", err)\n\t}\n\n\tconst methodName = \"DeleteComment\"\n\ttestBadOptions(t, methodName, func() (err error) {\n\t\t_, err = client.Issues.DeleteComment(ctx, \"\\n\", \"\\n\", -1)\n\t\treturn err\n\t})\n\n\ttestNewRequestAndDoFailure(t, methodName, client, func() (*Response, error) {\n\t\treturn client.Issues.DeleteComment(ctx, \"o\", \"r\", 1)\n\t})\n}\n\nfunc TestIssuesService_DeleteComment_invalidOwner(t *testing.T) {\n\tclient, _, _, teardown := setup()\n\tdefer teardown()\n\n\tctx := context.Background()\n\t_, err := client.Issues.DeleteComment(ctx, \"%\", \"r\", 1)\n\ttestURLParseError(t, err)\n}\n<commit_msg>Add test cases for JSON resource marshaling (#1977)<commit_after>\/\/ Copyright 2013 The go-github AUTHORS. All rights reserved.\n\/\/\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage github\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/google\/go-cmp\/cmp\"\n)\n\nfunc TestIssuesService_ListComments_allIssues(t *testing.T) {\n\tclient, mux, _, teardown := setup()\n\tdefer teardown()\n\n\tmux.HandleFunc(\"\/repos\/o\/r\/issues\/comments\", func(w http.ResponseWriter, r *http.Request) {\n\t\ttestMethod(t, r, \"GET\")\n\t\ttestHeader(t, r, \"Accept\", mediaTypeReactionsPreview)\n\t\ttestFormValues(t, r, values{\n\t\t\t\"sort\": \"updated\",\n\t\t\t\"direction\": \"desc\",\n\t\t\t\"since\": \"2002-02-10T15:30:00Z\",\n\t\t\t\"page\": \"2\",\n\t\t})\n\t\tfmt.Fprint(w, `[{\"id\":1}]`)\n\t})\n\n\tsince := time.Date(2002, time.February, 10, 15, 30, 0, 0, time.UTC)\n\topt := &IssueListCommentsOptions{\n\t\tSort: String(\"updated\"),\n\t\tDirection: String(\"desc\"),\n\t\tSince: &since,\n\t\tListOptions: ListOptions{Page: 2},\n\t}\n\tctx := context.Background()\n\tcomments, _, err := client.Issues.ListComments(ctx, \"o\", \"r\", 0, opt)\n\tif err != nil {\n\t\tt.Errorf(\"Issues.ListComments returned error: %v\", err)\n\t}\n\n\twant := []*IssueComment{{ID: Int64(1)}}\n\tif !cmp.Equal(comments, want) {\n\t\tt.Errorf(\"Issues.ListComments returned %+v, want %+v\", comments, want)\n\t}\n\n\tconst methodName = \"ListComments\"\n\ttestBadOptions(t, methodName, func() (err error) {\n\t\t_, _, err = client.Issues.ListComments(ctx, \"\\n\", \"\\n\", -1, opt)\n\t\treturn err\n\t})\n\n\ttestNewRequestAndDoFailure(t, methodName, client, func() (*Response, error) {\n\t\tgot, resp, err := client.Issues.ListComments(ctx, \"o\", \"r\", 0, opt)\n\t\tif got != nil {\n\t\t\tt.Errorf(\"testNewRequestAndDoFailure %v = %#v, want nil\", methodName, got)\n\t\t}\n\t\treturn resp, err\n\t})\n}\n\nfunc TestIssuesService_ListComments_specificIssue(t *testing.T) {\n\tclient, mux, _, teardown := setup()\n\tdefer teardown()\n\n\tmux.HandleFunc(\"\/repos\/o\/r\/issues\/1\/comments\", func(w http.ResponseWriter, r *http.Request) {\n\t\ttestMethod(t, r, \"GET\")\n\t\ttestHeader(t, r, \"Accept\", mediaTypeReactionsPreview)\n\t\tfmt.Fprint(w, `[{\"id\":1}]`)\n\t})\n\n\tctx := context.Background()\n\tcomments, _, err := client.Issues.ListComments(ctx, \"o\", \"r\", 1, nil)\n\tif err != nil {\n\t\tt.Errorf(\"Issues.ListComments returned error: %v\", err)\n\t}\n\n\twant := []*IssueComment{{ID: Int64(1)}}\n\tif !cmp.Equal(comments, want) {\n\t\tt.Errorf(\"Issues.ListComments returned %+v, want %+v\", comments, want)\n\t}\n\n\tconst methodName = \"ListComments\"\n\ttestBadOptions(t, methodName, func() (err error) {\n\t\t_, _, err = client.Issues.ListComments(ctx, \"\\n\", \"\\n\", -1, nil)\n\t\treturn err\n\t})\n\n\ttestNewRequestAndDoFailure(t, methodName, client, func() (*Response, error) {\n\t\tgot, resp, err := client.Issues.ListComments(ctx, \"o\", \"r\", 1, nil)\n\t\tif got != nil {\n\t\t\tt.Errorf(\"testNewRequestAndDoFailure %v = %#v, want nil\", methodName, got)\n\t\t}\n\t\treturn resp, err\n\t})\n}\n\nfunc TestIssuesService_ListComments_invalidOwner(t *testing.T) {\n\tclient, _, _, teardown := setup()\n\tdefer teardown()\n\n\tctx := context.Background()\n\t_, _, err := client.Issues.ListComments(ctx, \"%\", \"r\", 1, nil)\n\ttestURLParseError(t, err)\n}\n\nfunc TestIssuesService_GetComment(t *testing.T) {\n\tclient, mux, _, teardown := setup()\n\tdefer teardown()\n\n\tmux.HandleFunc(\"\/repos\/o\/r\/issues\/comments\/1\", func(w http.ResponseWriter, r *http.Request) {\n\t\ttestMethod(t, r, \"GET\")\n\t\ttestHeader(t, r, \"Accept\", mediaTypeReactionsPreview)\n\t\tfmt.Fprint(w, `{\"id\":1}`)\n\t})\n\n\tctx := context.Background()\n\tcomment, _, err := client.Issues.GetComment(ctx, \"o\", \"r\", 1)\n\tif err != nil {\n\t\tt.Errorf(\"Issues.GetComment returned error: %v\", err)\n\t}\n\n\twant := &IssueComment{ID: Int64(1)}\n\tif !cmp.Equal(comment, want) {\n\t\tt.Errorf(\"Issues.GetComment returned %+v, want %+v\", comment, want)\n\t}\n\n\tconst methodName = \"GetComment\"\n\ttestBadOptions(t, methodName, func() (err error) {\n\t\t_, _, err = client.Issues.GetComment(ctx, \"\\n\", \"\\n\", -1)\n\t\treturn err\n\t})\n\n\ttestNewRequestAndDoFailure(t, methodName, client, func() (*Response, error) {\n\t\tgot, resp, err := client.Issues.GetComment(ctx, \"o\", \"r\", 1)\n\t\tif got != nil {\n\t\t\tt.Errorf(\"testNewRequestAndDoFailure %v = %#v, want nil\", methodName, got)\n\t\t}\n\t\treturn resp, err\n\t})\n}\n\nfunc TestIssuesService_GetComment_invalidOrg(t *testing.T) {\n\tclient, _, _, teardown := setup()\n\tdefer teardown()\n\n\tctx := context.Background()\n\t_, _, err := client.Issues.GetComment(ctx, \"%\", \"r\", 1)\n\ttestURLParseError(t, err)\n}\n\nfunc TestIssuesService_CreateComment(t *testing.T) {\n\tclient, mux, _, teardown := setup()\n\tdefer teardown()\n\n\tinput := &IssueComment{Body: String(\"b\")}\n\n\tmux.HandleFunc(\"\/repos\/o\/r\/issues\/1\/comments\", func(w http.ResponseWriter, r *http.Request) {\n\t\tv := new(IssueComment)\n\t\tjson.NewDecoder(r.Body).Decode(v)\n\n\t\ttestMethod(t, r, \"POST\")\n\t\tif !cmp.Equal(v, input) {\n\t\t\tt.Errorf(\"Request body = %+v, want %+v\", v, input)\n\t\t}\n\n\t\tfmt.Fprint(w, `{\"id\":1}`)\n\t})\n\n\tctx := context.Background()\n\tcomment, _, err := client.Issues.CreateComment(ctx, \"o\", \"r\", 1, input)\n\tif err != nil {\n\t\tt.Errorf(\"Issues.CreateComment returned error: %v\", err)\n\t}\n\n\twant := &IssueComment{ID: Int64(1)}\n\tif !cmp.Equal(comment, want) {\n\t\tt.Errorf(\"Issues.CreateComment returned %+v, want %+v\", comment, want)\n\t}\n\n\tconst methodName = \"CreateComment\"\n\ttestBadOptions(t, methodName, func() (err error) {\n\t\t_, _, err = client.Issues.CreateComment(ctx, \"\\n\", \"\\n\", -1, input)\n\t\treturn err\n\t})\n\n\ttestNewRequestAndDoFailure(t, methodName, client, func() (*Response, error) {\n\t\tgot, resp, err := client.Issues.CreateComment(ctx, \"o\", \"r\", 1, input)\n\t\tif got != nil {\n\t\t\tt.Errorf(\"testNewRequestAndDoFailure %v = %#v, want nil\", methodName, got)\n\t\t}\n\t\treturn resp, err\n\t})\n}\n\nfunc TestIssuesService_CreateComment_invalidOrg(t *testing.T) {\n\tclient, _, _, teardown := setup()\n\tdefer teardown()\n\n\tctx := context.Background()\n\t_, _, err := client.Issues.CreateComment(ctx, \"%\", \"r\", 1, nil)\n\ttestURLParseError(t, err)\n}\n\nfunc TestIssuesService_EditComment(t *testing.T) {\n\tclient, mux, _, teardown := setup()\n\tdefer teardown()\n\n\tinput := &IssueComment{Body: String(\"b\")}\n\n\tmux.HandleFunc(\"\/repos\/o\/r\/issues\/comments\/1\", func(w http.ResponseWriter, r *http.Request) {\n\t\tv := new(IssueComment)\n\t\tjson.NewDecoder(r.Body).Decode(v)\n\n\t\ttestMethod(t, r, \"PATCH\")\n\t\tif !cmp.Equal(v, input) {\n\t\t\tt.Errorf(\"Request body = %+v, want %+v\", v, input)\n\t\t}\n\n\t\tfmt.Fprint(w, `{\"id\":1}`)\n\t})\n\n\tctx := context.Background()\n\tcomment, _, err := client.Issues.EditComment(ctx, \"o\", \"r\", 1, input)\n\tif err != nil {\n\t\tt.Errorf(\"Issues.EditComment returned error: %v\", err)\n\t}\n\n\twant := &IssueComment{ID: Int64(1)}\n\tif !cmp.Equal(comment, want) {\n\t\tt.Errorf(\"Issues.EditComment returned %+v, want %+v\", comment, want)\n\t}\n\n\tconst methodName = \"EditComment\"\n\ttestBadOptions(t, methodName, func() (err error) {\n\t\t_, _, err = client.Issues.EditComment(ctx, \"\\n\", \"\\n\", -1, input)\n\t\treturn err\n\t})\n\n\ttestNewRequestAndDoFailure(t, methodName, client, func() (*Response, error) {\n\t\tgot, resp, err := client.Issues.EditComment(ctx, \"o\", \"r\", 1, input)\n\t\tif got != nil {\n\t\t\tt.Errorf(\"testNewRequestAndDoFailure %v = %#v, want nil\", methodName, got)\n\t\t}\n\t\treturn resp, err\n\t})\n}\n\nfunc TestIssuesService_EditComment_invalidOwner(t *testing.T) {\n\tclient, _, _, teardown := setup()\n\tdefer teardown()\n\n\tctx := context.Background()\n\t_, _, err := client.Issues.EditComment(ctx, \"%\", \"r\", 1, nil)\n\ttestURLParseError(t, err)\n}\n\nfunc TestIssuesService_DeleteComment(t *testing.T) {\n\tclient, mux, _, teardown := setup()\n\tdefer teardown()\n\n\tmux.HandleFunc(\"\/repos\/o\/r\/issues\/comments\/1\", func(w http.ResponseWriter, r *http.Request) {\n\t\ttestMethod(t, r, \"DELETE\")\n\t})\n\n\tctx := context.Background()\n\t_, err := client.Issues.DeleteComment(ctx, \"o\", \"r\", 1)\n\tif err != nil {\n\t\tt.Errorf(\"Issues.DeleteComments returned error: %v\", err)\n\t}\n\n\tconst methodName = \"DeleteComment\"\n\ttestBadOptions(t, methodName, func() (err error) {\n\t\t_, err = client.Issues.DeleteComment(ctx, \"\\n\", \"\\n\", -1)\n\t\treturn err\n\t})\n\n\ttestNewRequestAndDoFailure(t, methodName, client, func() (*Response, error) {\n\t\treturn client.Issues.DeleteComment(ctx, \"o\", \"r\", 1)\n\t})\n}\n\nfunc TestIssuesService_DeleteComment_invalidOwner(t *testing.T) {\n\tclient, _, _, teardown := setup()\n\tdefer teardown()\n\n\tctx := context.Background()\n\t_, err := client.Issues.DeleteComment(ctx, \"%\", \"r\", 1)\n\ttestURLParseError(t, err)\n}\n\nfunc TestIssueComment_Marshal(t *testing.T) {\n\ttestJSONMarshal(t, &IssueComment{}, \"{}\")\n\n\tu := &IssueComment{\n\t\tID: Int64(1),\n\t\tNodeID: String(\"nid\"),\n\t\tBody: String(\"body\"),\n\t\tUser: &User{\n\t\t\tLogin: String(\"l\"),\n\t\t\tID: Int64(1),\n\t\t\tURL: String(\"u\"),\n\t\t\tAvatarURL: String(\"a\"),\n\t\t\tGravatarID: String(\"g\"),\n\t\t\tName: String(\"n\"),\n\t\t\tCompany: String(\"c\"),\n\t\t\tBlog: String(\"b\"),\n\t\t\tLocation: String(\"l\"),\n\t\t\tEmail: String(\"e\"),\n\t\t\tHireable: Bool(true),\n\t\t\tBio: String(\"b\"),\n\t\t\tTwitterUsername: String(\"t\"),\n\t\t\tPublicRepos: Int(1),\n\t\t\tFollowers: Int(1),\n\t\t\tFollowing: Int(1),\n\t\t\tCreatedAt: &Timestamp{referenceTime},\n\t\t\tSuspendedAt: &Timestamp{referenceTime},\n\t\t},\n\t\tReactions: &Reactions{TotalCount: Int(1)},\n\t\tCreatedAt: &referenceTime,\n\t\tUpdatedAt: &referenceTime,\n\t\tAuthorAssociation: String(\"aa\"),\n\t\tURL: String(\"url\"),\n\t\tHTMLURL: String(\"hurl\"),\n\t\tIssueURL: String(\"iurl\"),\n\t}\n\n\twant := `{\n\t\t\"id\": 1,\n\t\t\"node_id\": \"nid\",\n\t\t\"body\": \"body\",\n\t\t\"user\": {\n\t\t\t\"login\": \"l\",\n\t\t\t\"id\": 1,\n\t\t\t\"avatar_url\": \"a\",\n\t\t\t\"gravatar_id\": \"g\",\n\t\t\t\"name\": \"n\",\n\t\t\t\"company\": \"c\",\n\t\t\t\"blog\": \"b\",\n\t\t\t\"location\": \"l\",\n\t\t\t\"email\": \"e\",\n\t\t\t\"hireable\": true,\n\t\t\t\"bio\": \"b\",\n\t\t\t\"twitter_username\": \"t\",\n\t\t\t\"public_repos\": 1,\n\t\t\t\"followers\": 1,\n\t\t\t\"following\": 1,\n\t\t\t\"created_at\": ` + referenceTimeStr + `,\n\t\t\t\"suspended_at\": ` + referenceTimeStr + `,\n\t\t\t\"url\": \"u\"\n\t\t},\n\t\t\"reactions\": {\n\t\t\t\"total_count\": 1\n\t\t},\n\t\t\"created_at\": ` + referenceTimeStr + `,\n\t\t\"updated_at\": ` + referenceTimeStr + `,\n\t\t\"author_association\": \"aa\",\n\t\t\"url\": \"url\",\n\t\t\"html_url\": \"hurl\",\n\t\t\"issue_url\": \"iurl\"\n\t}`\n\n\ttestJSONMarshal(t, u, want)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * Copyright (c) 2013 ASMlover. All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * * Redistributions of source code must retain the above copyright\n * notice, this list ofconditions and the following disclaimer.\n *\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in\n * the documentation and\/or other materialsprovided with the\n * distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n *\/\npackage stack\n\n\n\ntype node_t struct {\n next *node_t\n value interface{}\n}\n\ntype stack_t struct {\n head *node_t\n size uint\n}\n\n\n\n\nfunc New() *stack_t {\n return &stack_t{nil, 0}\n}\n\nfunc (s *stack_t) Empty() bool {\n return s.head == nil\n}\n\nfunc (s *stack_t) Push(val interface{}) {\n n := node_t{nil, val}\n\n n.next = s.head\n s.head = &n\n s.size++\n}\n\nfunc (s *stack_t) Pop() interface{} {\n if s.head == nil {\n return nil\n }\n\n value := s.head.value\n s.head = s.head.next\n\n return value\n}\n<commit_msg>fix bug of stack module<commit_after>\/*\n * Copyright (c) 2013 ASMlover. All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n *\n * * Redistributions of source code must retain the above copyright\n * notice, this list ofconditions and the following disclaimer.\n *\n * * Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in\n * the documentation and\/or other materialsprovided with the\n * distribution.\n *\n * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,\n * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n * POSSIBILITY OF SUCH DAMAGE.\n *\/\npackage stack\n\n\n\ntype node_t struct {\n next *node_t\n value interface{}\n}\n\ntype stack_t struct {\n head *node_t\n}\n\n\n\n\nfunc New() *stack_t {\n return &stack_t{nil}\n}\n\nfunc (s *stack_t) Empty() bool {\n return s.head == nil\n}\n\nfunc (s *stack_t) Push(val interface{}) {\n n := node_t{s.head, val}\n\n s.head = &n\n}\n\nfunc (s *stack_t) Pop() interface{} {\n if s.head == nil {\n return nil\n }\n\n value := s.head.value\n s.head = s.head.next\n\n return value\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2019 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage mysql\n\nimport (\n\t\"fmt\"\n\n\treplicationdatapb \"vitess.io\/vitess\/go\/vt\/proto\/replicationdata\"\n\t\"vitess.io\/vitess\/go\/vt\/vterrors\"\n)\n\n\/\/ ReplicationStatus holds replication information from SHOW SLAVE STATUS.\ntype ReplicationStatus struct {\n\tPosition Position\n\t\/\/ RelayLogPosition is the Position that the replica would be at if it\n\t\/\/ were to finish executing everything that's currently in its relay log.\n\t\/\/ However, some MySQL flavors don't expose this information,\n\t\/\/ in which case RelayLogPosition.IsZero() will be true.\n\tRelayLogPosition Position\n\tFilePosition Position\n\tFileRelayLogPosition Position\n\tSourceServerID uint\n\tIOThreadRunning bool\n\tSQLThreadRunning bool\n\tReplicationLagSeconds uint\n\tSourceHost string\n\tSourcePort int\n\tConnectRetry int\n\tSourceUUID SID\n}\n\n\/\/ ReplicationRunning returns true iff both the IO and SQL threads are\n\/\/ running.\nfunc (s *ReplicationStatus) ReplicationRunning() bool {\n\treturn s.IOThreadRunning && s.SQLThreadRunning\n}\n\n\/\/ ReplicationStatusToProto translates a Status to proto3.\nfunc ReplicationStatusToProto(s ReplicationStatus) *replicationdatapb.Status {\n\treturn &replicationdatapb.Status{\n\t\tPosition: EncodePosition(s.Position),\n\t\tRelayLogPosition: EncodePosition(s.RelayLogPosition),\n\t\tFilePosition: EncodePosition(s.FilePosition),\n\t\tFileRelayLogPosition: EncodePosition(s.FileRelayLogPosition),\n\t\tSourceServerId: uint32(s.SourceServerID),\n\t\tIoThreadRunning: s.IOThreadRunning,\n\t\tSqlThreadRunning: s.SQLThreadRunning,\n\t\tReplicationLagSeconds: uint32(s.ReplicationLagSeconds),\n\t\tSourceHost: s.SourceHost,\n\t\tSourcePort: int32(s.SourcePort),\n\t\tConnectRetry: int32(s.ConnectRetry),\n\t\tSourceUuid: s.SourceUUID.String(),\n\t}\n}\n\n\/\/ ProtoToReplicationStatus translates a proto Status, or panics.\nfunc ProtoToReplicationStatus(s *replicationdatapb.Status) ReplicationStatus {\n\tpos, err := DecodePosition(s.Position)\n\tif err != nil {\n\t\tpanic(vterrors.Wrapf(err, \"cannot decode Position\"))\n\t}\n\trelayPos, err := DecodePosition(s.RelayLogPosition)\n\tif err != nil {\n\t\tpanic(vterrors.Wrapf(err, \"cannot decode RelayLogPosition\"))\n\t}\n\tfilePos, err := DecodePosition(s.FilePosition)\n\tif err != nil {\n\t\tpanic(vterrors.Wrapf(err, \"cannot decode FilePosition\"))\n\t}\n\tfileRelayPos, err := DecodePosition(s.FileRelayLogPosition)\n\tif err != nil {\n\t\tpanic(vterrors.Wrapf(err, \"cannot decode FileRelayLogPosition\"))\n\t}\n\tvar sid SID\n\tif s.SourceUuid != \"\" {\n\t\tsid, err = ParseSID(s.SourceUuid)\n\t\tif err != nil {\n\t\t\tpanic(vterrors.Wrapf(err, \"cannot decode SourceUUID\"))\n\t\t}\n\t}\n\treturn ReplicationStatus{\n\t\tPosition: pos,\n\t\tRelayLogPosition: relayPos,\n\t\tFilePosition: filePos,\n\t\tFileRelayLogPosition: fileRelayPos,\n\t\tSourceServerID: uint(s.SourceServerId),\n\t\tIOThreadRunning: s.IoThreadRunning,\n\t\tSQLThreadRunning: s.SqlThreadRunning,\n\t\tReplicationLagSeconds: uint(s.ReplicationLagSeconds),\n\t\tSourceHost: s.SourceHost,\n\t\tSourcePort: int(s.SourcePort),\n\t\tConnectRetry: int(s.ConnectRetry),\n\t\tSourceUUID: sid,\n\t}\n}\n\n\/\/ FindErrantGTIDs can be used to find errant GTIDs in the receiver's relay log, by comparing it against all known replicas,\n\/\/ provided as a list of ReplicationStatus's. This method only works if the flavor for all retrieved ReplicationStatus's is MySQL.\n\/\/ The result is returned as a Mysql56GTIDSet, each of whose elements is a found errant GTID.\nfunc (s *ReplicationStatus) FindErrantGTIDs(otherReplicaStatuses []*ReplicationStatus) (Mysql56GTIDSet, error) {\n\tset, ok := s.RelayLogPosition.GTIDSet.(Mysql56GTIDSet)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"errant GTIDs can only be computed on the MySQL flavor\")\n\t}\n\n\totherSets := make([]Mysql56GTIDSet, 0, len(otherReplicaStatuses))\n\tfor _, status := range otherReplicaStatuses {\n\t\totherSet, ok := status.RelayLogPosition.GTIDSet.(Mysql56GTIDSet)\n\t\tif !ok {\n\t\t\tpanic(\"The receiver ReplicationStatus contained a Mysql56GTIDSet in its relay log, but a replica's ReplicationStatus is of another flavor. This should never happen.\")\n\t\t}\n\t\t\/\/ Copy and throw out primary SID from consideration, so we don't mutate input.\n\t\totherSetNoPrimarySID := make(Mysql56GTIDSet, len(otherSet))\n\t\tfor sid, intervals := range otherSet {\n\t\t\tif sid == status.SourceUUID {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\totherSetNoPrimarySID[sid] = intervals\n\t\t}\n\n\t\totherSets = append(otherSets, otherSetNoPrimarySID)\n\t}\n\n\t\/\/ Copy set for final diffSet so we don't mutate receiver.\n\tdiffSet := make(Mysql56GTIDSet, len(set))\n\tfor sid, intervals := range set {\n\t\tif sid == s.SourceUUID {\n\t\t\tcontinue\n\t\t}\n\t\tdiffSet[sid] = intervals\n\t}\n\n\tfor _, otherSet := range otherSets {\n\t\tdiffSet = diffSet.Difference(otherSet)\n\t}\n\n\tif len(diffSet) == 0 {\n\t\t\/\/ If diffSet is empty, then we have no errant GTIDs.\n\t\treturn nil, nil\n\t}\n\n\treturn diffSet, nil\n}\n<commit_msg>Make it clearer which GTIDSet we are using.<commit_after>\/*\nCopyright 2019 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage mysql\n\nimport (\n\t\"fmt\"\n\n\treplicationdatapb \"vitess.io\/vitess\/go\/vt\/proto\/replicationdata\"\n\t\"vitess.io\/vitess\/go\/vt\/vterrors\"\n)\n\n\/\/ ReplicationStatus holds replication information from SHOW SLAVE STATUS.\ntype ReplicationStatus struct {\n\tPosition Position\n\t\/\/ RelayLogPosition is the Position that the replica would be at if it\n\t\/\/ were to finish executing everything that's currently in its relay log.\n\t\/\/ However, some MySQL flavors don't expose this information,\n\t\/\/ in which case RelayLogPosition.IsZero() will be true.\n\tRelayLogPosition Position\n\tFilePosition Position\n\tFileRelayLogPosition Position\n\tSourceServerID uint\n\tIOThreadRunning bool\n\tSQLThreadRunning bool\n\tReplicationLagSeconds uint\n\tSourceHost string\n\tSourcePort int\n\tConnectRetry int\n\tSourceUUID SID\n}\n\n\/\/ ReplicationRunning returns true iff both the IO and SQL threads are\n\/\/ running.\nfunc (s *ReplicationStatus) ReplicationRunning() bool {\n\treturn s.IOThreadRunning && s.SQLThreadRunning\n}\n\n\/\/ ReplicationStatusToProto translates a Status to proto3.\nfunc ReplicationStatusToProto(s ReplicationStatus) *replicationdatapb.Status {\n\treturn &replicationdatapb.Status{\n\t\tPosition: EncodePosition(s.Position),\n\t\tRelayLogPosition: EncodePosition(s.RelayLogPosition),\n\t\tFilePosition: EncodePosition(s.FilePosition),\n\t\tFileRelayLogPosition: EncodePosition(s.FileRelayLogPosition),\n\t\tSourceServerId: uint32(s.SourceServerID),\n\t\tIoThreadRunning: s.IOThreadRunning,\n\t\tSqlThreadRunning: s.SQLThreadRunning,\n\t\tReplicationLagSeconds: uint32(s.ReplicationLagSeconds),\n\t\tSourceHost: s.SourceHost,\n\t\tSourcePort: int32(s.SourcePort),\n\t\tConnectRetry: int32(s.ConnectRetry),\n\t\tSourceUuid: s.SourceUUID.String(),\n\t}\n}\n\n\/\/ ProtoToReplicationStatus translates a proto Status, or panics.\nfunc ProtoToReplicationStatus(s *replicationdatapb.Status) ReplicationStatus {\n\tpos, err := DecodePosition(s.Position)\n\tif err != nil {\n\t\tpanic(vterrors.Wrapf(err, \"cannot decode Position\"))\n\t}\n\trelayPos, err := DecodePosition(s.RelayLogPosition)\n\tif err != nil {\n\t\tpanic(vterrors.Wrapf(err, \"cannot decode RelayLogPosition\"))\n\t}\n\tfilePos, err := DecodePosition(s.FilePosition)\n\tif err != nil {\n\t\tpanic(vterrors.Wrapf(err, \"cannot decode FilePosition\"))\n\t}\n\tfileRelayPos, err := DecodePosition(s.FileRelayLogPosition)\n\tif err != nil {\n\t\tpanic(vterrors.Wrapf(err, \"cannot decode FileRelayLogPosition\"))\n\t}\n\tvar sid SID\n\tif s.SourceUuid != \"\" {\n\t\tsid, err = ParseSID(s.SourceUuid)\n\t\tif err != nil {\n\t\t\tpanic(vterrors.Wrapf(err, \"cannot decode SourceUUID\"))\n\t\t}\n\t}\n\treturn ReplicationStatus{\n\t\tPosition: pos,\n\t\tRelayLogPosition: relayPos,\n\t\tFilePosition: filePos,\n\t\tFileRelayLogPosition: fileRelayPos,\n\t\tSourceServerID: uint(s.SourceServerId),\n\t\tIOThreadRunning: s.IoThreadRunning,\n\t\tSQLThreadRunning: s.SqlThreadRunning,\n\t\tReplicationLagSeconds: uint(s.ReplicationLagSeconds),\n\t\tSourceHost: s.SourceHost,\n\t\tSourcePort: int(s.SourcePort),\n\t\tConnectRetry: int(s.ConnectRetry),\n\t\tSourceUUID: sid,\n\t}\n}\n\n\/\/ FindErrantGTIDs can be used to find errant GTIDs in the receiver's relay log, by comparing it against all known replicas,\n\/\/ provided as a list of ReplicationStatus's. This method only works if the flavor for all retrieved ReplicationStatus's is MySQL.\n\/\/ The result is returned as a Mysql56GTIDSet, each of whose elements is a found errant GTID.\nfunc (s *ReplicationStatus) FindErrantGTIDs(otherReplicaStatuses []*ReplicationStatus) (Mysql56GTIDSet, error) {\n\trelayLogSet, ok := s.RelayLogPosition.GTIDSet.(Mysql56GTIDSet)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"errant GTIDs can only be computed on the MySQL flavor\")\n\t}\n\n\totherSets := make([]Mysql56GTIDSet, 0, len(otherReplicaStatuses))\n\tfor _, status := range otherReplicaStatuses {\n\t\totherSet, ok := status.RelayLogPosition.GTIDSet.(Mysql56GTIDSet)\n\t\tif !ok {\n\t\t\tpanic(\"The receiver ReplicationStatus contained a Mysql56GTIDSet in its relay log, but a replica's ReplicationStatus is of another flavor. This should never happen.\")\n\t\t}\n\t\t\/\/ Copy and throw out primary SID from consideration, so we don't mutate input.\n\t\totherSetNoPrimarySID := make(Mysql56GTIDSet, len(otherSet))\n\t\tfor sid, intervals := range otherSet {\n\t\t\tif sid == status.SourceUUID {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\totherSetNoPrimarySID[sid] = intervals\n\t\t}\n\n\t\totherSets = append(otherSets, otherSetNoPrimarySID)\n\t}\n\n\t\/\/ Copy set for final diffSet so we don't mutate receiver.\n\tdiffSet := make(Mysql56GTIDSet, len(relayLogSet))\n\tfor sid, intervals := range relayLogSet {\n\t\tif sid == s.SourceUUID {\n\t\t\tcontinue\n\t\t}\n\t\tdiffSet[sid] = intervals\n\t}\n\n\tfor _, otherSet := range otherSets {\n\t\tdiffSet = diffSet.Difference(otherSet)\n\t}\n\n\tif len(diffSet) == 0 {\n\t\t\/\/ If diffSet is empty, then we have no errant GTIDs.\n\t\treturn nil, nil\n\t}\n\n\treturn diffSet, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2020 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cluster\n\nimport (\n\t\"regexp\"\n\t\"strings\"\n\n\t\"vitess.io\/vitess\/go\/vt\/log\"\n)\n\n\/\/ FlagsByImpl groups a set of flags by discovery implementation. Its mapping is\n\/\/ impl_name=>flag=>value.\ntype FlagsByImpl map[string]map[string]string\n\n\/\/ Merge applies the flags in the parameter to the receiver, conflicts are\n\/\/ resolved in favor of the parameter and not the receiver.\nfunc (base *FlagsByImpl) Merge(override map[string]map[string]string) {\n\tif (*base) == nil {\n\t\t*base = map[string]map[string]string{}\n\t}\n\n\tfor impl, flags := range override {\n\t\t_, ok := (*base)[impl]\n\t\tif !ok {\n\t\t\t(*base)[impl] = map[string]string{}\n\t\t}\n\n\t\tfor k, v := range flags {\n\t\t\t(*base)[impl][k] = v\n\t\t}\n\t}\n}\n\n\/\/ ClustersFlag implements flag.Value allowing multiple occurrences of a flag to\n\/\/ be accumulated into a map.\ntype ClustersFlag map[string]Config\n\n\/\/ String is part of the flag.Value interface.\nfunc (cf *ClustersFlag) String() string {\n\tbuf := strings.Builder{}\n\n\tbuf.WriteString(\"[\")\n\n\ti := 0\n\n\tfor _, cfg := range *cf {\n\t\tbuf.WriteString(cfg.String())\n\n\t\tif i < len(*cf)-1 {\n\t\t\tbuf.WriteString(\" \")\n\t\t}\n\n\t\ti++\n\t}\n\n\tbuf.WriteString(\"]\")\n\n\treturn buf.String()\n}\n\n\/\/ Type is part of the pflag.Value interface.\nfunc (cf *ClustersFlag) Type() string {\n\treturn \"cluster.ClustersFlag\"\n}\n\n\/\/ Set is part of the flag.Value interface. It merges the parsed config into the\n\/\/ map, allowing ClustersFlag to power a repeated flag. See (*Config).Set for\n\/\/ details on flag parsing.\nfunc (cf *ClustersFlag) Set(value string) error {\n\tif (*cf) == nil {\n\t\t(*cf) = map[string]Config{}\n\t}\n\n\tcfg := Config{\n\t\tDiscoveryFlagsByImpl: map[string]map[string]string{},\n\t}\n\n\tif err := parseFlag(&cfg, value); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Merge a potentially existing config for the same cluster ID.\n\tc, ok := (*cf)[cfg.ID]\n\tif !ok {\n\t\t\/\/ If we don't have an existing config, create an empty one to \"merge\"\n\t\t\/\/ into.\n\t\tc = Config{}\n\t}\n\n\t(*cf)[cfg.ID] = cfg.Merge(c)\n\n\treturn nil\n}\n\n\/\/ nolint:gochecknoglobals\nvar discoveryFlagRegexp = regexp.MustCompile(`^discovery-(?P<impl>\\w+)-(?P<flag>.+)$`)\n\nfunc parseFlag(cfg *Config, value string) error {\n\targs := strings.Split(value, \",\")\n\tfor _, arg := range args {\n\t\tvar (\n\t\t\tname string\n\t\t\tval string\n\t\t)\n\n\t\tif strings.Contains(arg, \"=\") {\n\t\t\tparts := strings.Split(arg, \"=\")\n\t\t\tname = parts[0]\n\t\t\tval = strings.Join(parts[1:], \"=\")\n\t\t} else {\n\t\t\tname = arg\n\t\t\tval = \"true\"\n\t\t}\n\n\t\tif err := parseOne(cfg, name, val); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc parseOne(cfg *Config, name string, val string) error {\n\tswitch name {\n\tcase \"id\":\n\t\tcfg.ID = val\n\tcase \"name\":\n\t\tcfg.Name = val\n\tcase \"discovery\":\n\t\tcfg.DiscoveryImpl = val\n\tcase \"tablet-fqdn-tmpl\":\n\t\tcfg.TabletFQDNTmplStr = val\n\tdefault:\n\t\tif strings.HasPrefix(name, \"vtsql-\") {\n\t\t\tif cfg.VtSQLFlags == nil {\n\t\t\t\tcfg.VtSQLFlags = map[string]string{}\n\t\t\t}\n\n\t\t\tcfg.VtSQLFlags[strings.TrimPrefix(name, \"vtsql-\")] = val\n\n\t\t\treturn nil\n\t\t} else if strings.HasPrefix(name, \"vtctld-\") {\n\t\t\tif cfg.VtctldFlags == nil {\n\t\t\t\tcfg.VtctldFlags = map[string]string{}\n\t\t\t}\n\n\t\t\tcfg.VtctldFlags[strings.TrimPrefix(name, \"vtctld-\")] = val\n\n\t\t\treturn nil\n\t\t}\n\n\t\tmatch := discoveryFlagRegexp.FindStringSubmatch(name)\n\t\tif match == nil {\n\t\t\t\/\/ not a discovery flag\n\t\t\tlog.Warningf(\"Attempted to parse %q as a discovery flag, ignoring ...\", name)\n\t\t\treturn nil\n\t\t}\n\n\t\tvar impl, flag string\n\n\t\tfor i, g := range discoveryFlagRegexp.SubexpNames() {\n\t\t\tswitch g {\n\t\t\tcase \"impl\":\n\t\t\t\timpl = match[i]\n\t\t\tcase \"flag\":\n\t\t\t\tflag = match[i]\n\t\t\t}\n\t\t}\n\n\t\tif cfg.DiscoveryFlagsByImpl == nil {\n\t\t\tcfg.DiscoveryFlagsByImpl = map[string]map[string]string{}\n\t\t}\n\n\t\tif cfg.DiscoveryFlagsByImpl[impl] == nil {\n\t\t\tcfg.DiscoveryFlagsByImpl[impl] = map[string]string{}\n\t\t}\n\n\t\tcfg.DiscoveryFlagsByImpl[impl][flag] = val\n\t}\n\n\treturn nil\n}\n<commit_msg>Restructure parseOne to remove the if\/else chain<commit_after>\/*\nCopyright 2020 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage cluster\n\nimport (\n\t\"regexp\"\n\t\"strings\"\n\n\t\"vitess.io\/vitess\/go\/vt\/log\"\n)\n\n\/\/ FlagsByImpl groups a set of flags by discovery implementation. Its mapping is\n\/\/ impl_name=>flag=>value.\ntype FlagsByImpl map[string]map[string]string\n\n\/\/ Merge applies the flags in the parameter to the receiver, conflicts are\n\/\/ resolved in favor of the parameter and not the receiver.\nfunc (base *FlagsByImpl) Merge(override map[string]map[string]string) {\n\tif (*base) == nil {\n\t\t*base = map[string]map[string]string{}\n\t}\n\n\tfor impl, flags := range override {\n\t\t_, ok := (*base)[impl]\n\t\tif !ok {\n\t\t\t(*base)[impl] = map[string]string{}\n\t\t}\n\n\t\tfor k, v := range flags {\n\t\t\t(*base)[impl][k] = v\n\t\t}\n\t}\n}\n\n\/\/ ClustersFlag implements flag.Value allowing multiple occurrences of a flag to\n\/\/ be accumulated into a map.\ntype ClustersFlag map[string]Config\n\n\/\/ String is part of the flag.Value interface.\nfunc (cf *ClustersFlag) String() string {\n\tbuf := strings.Builder{}\n\n\tbuf.WriteString(\"[\")\n\n\ti := 0\n\n\tfor _, cfg := range *cf {\n\t\tbuf.WriteString(cfg.String())\n\n\t\tif i < len(*cf)-1 {\n\t\t\tbuf.WriteString(\" \")\n\t\t}\n\n\t\ti++\n\t}\n\n\tbuf.WriteString(\"]\")\n\n\treturn buf.String()\n}\n\n\/\/ Type is part of the pflag.Value interface.\nfunc (cf *ClustersFlag) Type() string {\n\treturn \"cluster.ClustersFlag\"\n}\n\n\/\/ Set is part of the flag.Value interface. It merges the parsed config into the\n\/\/ map, allowing ClustersFlag to power a repeated flag. See (*Config).Set for\n\/\/ details on flag parsing.\nfunc (cf *ClustersFlag) Set(value string) error {\n\tif (*cf) == nil {\n\t\t(*cf) = map[string]Config{}\n\t}\n\n\tcfg := Config{\n\t\tDiscoveryFlagsByImpl: map[string]map[string]string{},\n\t}\n\n\tif err := parseFlag(&cfg, value); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Merge a potentially existing config for the same cluster ID.\n\tc, ok := (*cf)[cfg.ID]\n\tif !ok {\n\t\t\/\/ If we don't have an existing config, create an empty one to \"merge\"\n\t\t\/\/ into.\n\t\tc = Config{}\n\t}\n\n\t(*cf)[cfg.ID] = cfg.Merge(c)\n\n\treturn nil\n}\n\n\/\/ nolint:gochecknoglobals\nvar discoveryFlagRegexp = regexp.MustCompile(`^discovery-(?P<impl>\\w+)-(?P<flag>.+)$`)\n\nfunc parseFlag(cfg *Config, value string) error {\n\targs := strings.Split(value, \",\")\n\tfor _, arg := range args {\n\t\tvar (\n\t\t\tname string\n\t\t\tval string\n\t\t)\n\n\t\tif strings.Contains(arg, \"=\") {\n\t\t\tparts := strings.Split(arg, \"=\")\n\t\t\tname = parts[0]\n\t\t\tval = strings.Join(parts[1:], \"=\")\n\t\t} else {\n\t\t\tname = arg\n\t\t\tval = \"true\"\n\t\t}\n\n\t\tif err := parseOne(cfg, name, val); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc parseOne(cfg *Config, name string, val string) error {\n\tswitch name {\n\tcase \"id\":\n\t\tcfg.ID = val\n\tcase \"name\":\n\t\tcfg.Name = val\n\tcase \"discovery\":\n\t\tcfg.DiscoveryImpl = val\n\tcase \"tablet-fqdn-tmpl\":\n\t\tcfg.TabletFQDNTmplStr = val\n\tdefault:\n\t\tswitch {\n\t\tcase strings.HasPrefix(name, \"vtsql-\"):\n\t\t\tif cfg.VtSQLFlags == nil {\n\t\t\t\tcfg.VtSQLFlags = map[string]string{}\n\t\t\t}\n\n\t\t\tcfg.VtSQLFlags[strings.TrimPrefix(name, \"vtsql-\")] = val\n\t\tcase strings.HasPrefix(name, \"vtctld-\"):\n\t\t\tif cfg.VtctldFlags == nil {\n\t\t\t\tcfg.VtctldFlags = map[string]string{}\n\t\t\t}\n\n\t\t\tcfg.VtctldFlags[strings.TrimPrefix(name, \"vtctld-\")] = val\n\t\tdefault:\n\t\t\tmatch := discoveryFlagRegexp.FindStringSubmatch(name)\n\t\t\tif match == nil {\n\t\t\t\t\/\/ not a discovery flag\n\t\t\t\tlog.Warningf(\"Attempted to parse %q as a discovery flag, ignoring ...\", name)\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tvar impl, flag string\n\n\t\t\tfor i, g := range discoveryFlagRegexp.SubexpNames() {\n\t\t\t\tswitch g {\n\t\t\t\tcase \"impl\":\n\t\t\t\t\timpl = match[i]\n\t\t\t\tcase \"flag\":\n\t\t\t\t\tflag = match[i]\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif cfg.DiscoveryFlagsByImpl == nil {\n\t\t\t\tcfg.DiscoveryFlagsByImpl = map[string]map[string]string{}\n\t\t\t}\n\n\t\t\tif cfg.DiscoveryFlagsByImpl[impl] == nil {\n\t\t\t\tcfg.DiscoveryFlagsByImpl[impl] = map[string]string{}\n\t\t\t}\n\n\t\t\tcfg.DiscoveryFlagsByImpl[impl][flag] = val\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package flake\n\nimport (\n\t\"encoding\/binary\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nvar testHardwareID HardwareID\n\nfunc TestMain(m *testing.M) {\n\ttestHardwareID = make([]byte, 8)\n\tbinary.LittleEndian.PutUint64(testHardwareID, 0x112233445566)\n\n\tretCode := m.Run()\n\n\tos.Exit(retCode)\n}\n\nfunc TestGenerateID(t *testing.T) {\n\t\/\/ Create a generator\n\tgen := NewOvertoneEpochGenerator(testHardwareID)\n\n\tassert.NotNil(t, gen, \"Expecting generator to be allocated\")\n\t\/\/assert.Equal(t, OvertoneEpochMs, gen.Epoch(), \"Expecting generator.Epoch() to == OvertoneEpochMS\")\n\t\/\/assert.Equal(t, os.Getpid(), gen.ProcessID(), \"Expecting generator.ProcessID() to == os.Getpid()\")\n\t\/\/assert.Equal(t, testHardwareID, gen.HardwareID(), \"Expecting generator.HardwareID() to == testHardwareID\")\n\t\/\/ assert.Equal(t, OvertFlakeIDLength, gen.IDSize(), \"Expecting generator.IDSize() to == OvertFlakeIDLength\")\n\n\t\/\/ remember when we start the gen so we can compare the timestamp in the id for >=\n\t\/\/startTime := time.Now().UTC().UnixNano() \/ 1e6\n\n\tid, err := gen.Generate(1)\n\tassert.NoError(t, err)\n\tassert.NotNil(t, id, \"Expecting id to be non-nill if err == nil\")\n\tassert.Equal(t, OvertFlakeIDLength, len(id), \"Expecting id length to == %d, not %d\", OvertFlakeIDLength, len(id))\n\n\tassert.Condition(t, func() bool {\n\t\treturn gen.LastAllocatedTime() > 0\n\t})\n\n\tupper := binary.BigEndian.Uint64(id[0:8])\n\t\/\/lower := binary.BigEndian.Uint64(id[8:16])\n\n\t\/*assert.Equal(t,\n\tgen.(*generator).machineID,\n\tlower,\n\t\"expecting lower 64-bits of id to == generator.machineID (%d), not %d\",\n\tgen.(*generator).machineID,\n\tlower)*\/\n\n\t\/\/ because we are generating 1, and we know that we are only requestor then\n\t\/\/ we also know none have been allocated so the interval should be 0\n\tassert.Condition(t, func() bool {\n\t\treturn (upper & 0xFFFF) == 0\n\t})\n\n\t\/*assert.Condition(t, func() bool {\n\t\ttimestamp := upper >> 16\n\t\tbeginDelta := uint64(startTime - gen.Epoch())\n\t\treturn timestamp >= beginDelta\n\t}, \"Expecting upper %d >= %d\", upper>>16, startTime-gen.Epoch())*\/\n}\n\nfunc TestGenerateStreamIDs(t *testing.T) {\n\t\/\/ Create a generator\n\tgen := NewOvertFlakeGenerator(OvertoneEpochMs, testHardwareID, 42, 0)\n\n\t\/\/ Create a buffer which forces the stream to provide them 1 at a time\n\tbuffer := make([]byte, OvertFlakeIDLength)\n\tvar called int\n\ttotalAllocated, err := gen.GenerateAsStream(3, buffer, func(allocated int, ids []byte) error {\n\t\tcalled++\n\t\treturn nil\n\t})\n\tassert.NoError(t, err)\n\tassert.Equal(t, 3, totalAllocated, \"Expecting total # of ids generated to == %d, not %d\", 3, totalAllocated)\n\tassert.Equal(t, 3, called, \"Expecting total # of callbacks to be %d, not %d\", 3, called)\n\n\t\/\/ Create a buffer which forces the stream to provide them 2 at a time\n\tbuffer = make([]byte, OvertFlakeIDLength*2)\n\tcalled = 0\n\n\t\/\/ We are requesting 3 with a buffer that can hold 2, so two callbacks are expected with\n\ttotalAllocated, err = gen.GenerateAsStream(3, buffer, func(allocated int, ids []byte) error {\n\t\tswitch called {\n\t\tcase 0:\n\t\t\tassert.Equal(t, 2, allocated, \"Expecting 1st callback to have % ids, not %d\", 2, allocated)\n\t\tcase 1:\n\t\t\tassert.Equal(t, 1, allocated, \"Expecting last callback to have % ids, not %d\", 1, allocated)\n\t\t}\n\t\tcalled++\n\t\treturn nil\n\t})\n\tassert.NoError(t, err)\n\tassert.Equal(t, 3, totalAllocated, \"Expecting total # of ids generated to == %d, not %d\", 3, totalAllocated)\n\tassert.Equal(t, 2, called, \"Expecting total # of callbacks to be %d, not %d\", 3, called)\n\n}\n<commit_msg>add a test of a larger buffer<commit_after>package flake\n\nimport (\n\t\"encoding\/binary\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nvar testHardwareID HardwareID\n\nfunc TestMain(m *testing.M) {\n\ttestHardwareID = make([]byte, 8)\n\tbinary.LittleEndian.PutUint64(testHardwareID, 0x112233445566)\n\n\tretCode := m.Run()\n\n\tos.Exit(retCode)\n}\n\nfunc TestGenerateID(t *testing.T) {\n\t\/\/ Create a generator\n\tgen := NewOvertoneEpochGenerator(testHardwareID)\n\n\tassert.NotNil(t, gen, \"Expecting generator to be allocated\")\n\t\/\/assert.Equal(t, OvertoneEpochMs, gen.Epoch(), \"Expecting generator.Epoch() to == OvertoneEpochMS\")\n\t\/\/assert.Equal(t, os.Getpid(), gen.ProcessID(), \"Expecting generator.ProcessID() to == os.Getpid()\")\n\t\/\/assert.Equal(t, testHardwareID, gen.HardwareID(), \"Expecting generator.HardwareID() to == testHardwareID\")\n\t\/\/ assert.Equal(t, OvertFlakeIDLength, gen.IDSize(), \"Expecting generator.IDSize() to == OvertFlakeIDLength\")\n\n\t\/\/ remember when we start the gen so we can compare the timestamp in the id for >=\n\t\/\/startTime := time.Now().UTC().UnixNano() \/ 1e6\n\n\tid, err := gen.Generate(1)\n\tassert.NoError(t, err)\n\tassert.NotNil(t, id, \"Expecting id to be non-nill if err == nil\")\n\tassert.Equal(t, OvertFlakeIDLength, len(id), \"Expecting id length to == %d, not %d\", OvertFlakeIDLength, len(id))\n\n\tassert.Condition(t, func() bool {\n\t\treturn gen.LastAllocatedTime() > 0\n\t})\n\n\tupper := binary.BigEndian.Uint64(id[0:8])\n\t\/\/lower := binary.BigEndian.Uint64(id[8:16])\n\n\t\/*assert.Equal(t,\n\tgen.(*generator).machineID,\n\tlower,\n\t\"expecting lower 64-bits of id to == generator.machineID (%d), not %d\",\n\tgen.(*generator).machineID,\n\tlower)*\/\n\n\t\/\/ because we are generating 1, and we know that we are only requestor then\n\t\/\/ we also know none have been allocated so the interval should be 0\n\tassert.Condition(t, func() bool {\n\t\treturn (upper & 0xFFFF) == 0\n\t})\n\n\t\/*assert.Condition(t, func() bool {\n\t\ttimestamp := upper >> 16\n\t\tbeginDelta := uint64(startTime - gen.Epoch())\n\t\treturn timestamp >= beginDelta\n\t}, \"Expecting upper %d >= %d\", upper>>16, startTime-gen.Epoch())*\/\n}\n\nfunc TestGenerateStreamIDs(t *testing.T) {\n\t\/\/ Create a generator\n\tgen := NewOvertFlakeGenerator(OvertoneEpochMs, testHardwareID, 42, 0)\n\n\t\/\/ Create a buffer which forces the stream to provide them 1 at a time\n\tbuffer := make([]byte, OvertFlakeIDLength)\n\tvar called int\n\ttotalAllocated, err := gen.GenerateAsStream(3, buffer, func(allocated int, ids []byte) error {\n\t\tcalled++\n\t\treturn nil\n\t})\n\tassert.NoError(t, err)\n\tassert.Equal(t, 3, totalAllocated, \"Expecting total # of ids generated to == %d, not %d\", 3, totalAllocated)\n\tassert.Equal(t, 3, called, \"Expecting total # of callbacks to be %d, not %d\", 3, called)\n\n\t\/\/ Create a buffer which forces the stream to provide them 2 at a time\n\tbuffer = make([]byte, OvertFlakeIDLength*2)\n\tcalled = 0\n\n\t\/\/ We are requesting 3 with a buffer that can hold 2, so two callbacks are expected with\n\ttotalAllocated, err = gen.GenerateAsStream(3, buffer, func(allocated int, ids []byte) error {\n\t\tswitch called {\n\t\tcase 0:\n\t\t\tassert.Equal(t, 2, allocated, \"Expecting 1st callback to have % ids, not %d\", 2, allocated)\n\t\tcase 1:\n\t\t\tassert.Equal(t, 1, allocated, \"Expecting last callback to have % ids, not %d\", 1, allocated)\n\t\t}\n\t\tcalled++\n\t\treturn nil\n\t})\n\tassert.NoError(t, err)\n\tassert.Equal(t, 3, totalAllocated, \"Expecting total # of ids generated to == %d, not %d\", 3, totalAllocated)\n\tassert.Equal(t, 2, called, \"Expecting total # of callbacks to be %d, not %d\", 3, called)\n\n\tbuffer = make([]byte, OvertFlakeIDLength*64)\n\tcalled = 0\n\n\t\/\/ We are requesting 3 with a buffer that can hold 2, so two callbacks are expected with\n\ttotalAllocated, err = gen.GenerateAsStream(64, buffer, func(allocated int, ids []byte) error {\n\t\tswitch called {\n\t\tcase 0:\n\t\t\tassert.Equal(t, 64, allocated, \"Expecting 1st callback to have % ids, not %d\", 64, allocated)\n\t\tcase 1:\n\t\t\tassert.Fail(t, \"Not expecting 2nd callback\")\n\t\t}\n\t\tcalled++\n\t\treturn nil\n\t})\n\tassert.NoError(t, err)\n\tassert.Equal(t, 64, totalAllocated, \"Expecting total # of ids generated to == %d, not %d\", 64, totalAllocated)\n\tassert.Equal(t, 1, called, \"Expecting total # of callbacks to be %d, not %d\", 1, called)\n}\n<|endoftext|>"} {"text":"<commit_before>package mailp\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/HouzuoGuo\/laitos\/bridge\"\n\t\"github.com\/HouzuoGuo\/laitos\/email\"\n\t\"github.com\/HouzuoGuo\/laitos\/feature\"\n\t\"github.com\/HouzuoGuo\/laitos\/frontend\/common\"\n\t\"github.com\/HouzuoGuo\/laitos\/global\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/*\nLook for feature commands from an incoming mail, run them and reply the sender with command results.\nUsually used in combination of laitos' own SMTP daemon, but it can also work independently in something like\npostfix's \"forward-mail-to-program\" mechanism.\n*\/\ntype MailProcessor struct {\n\tCommandTimeoutSec int `json:\"CommandTimeoutSec\"` \/\/ Commands get time out error after this number of seconds\n\tUndocumented1 Undocumented1 `json:\"Undocumented1\"` \/\/ Intentionally undocumented he he he he\n\tProcessor *common.CommandProcessor `json:\"-\"` \/\/ Feature configuration\n\tReplyMailer email.Mailer `json:\"-\"` \/\/ To deliver Email replies\n\tLogger global.Logger `json:\"-\"` \/\/ Logger\n}\n\n\/\/ Run a health check on mailer and \"undocumented\" things.\nfunc (mailproc *MailProcessor) SelfTest() error {\n\tret := make([]error, 0, 0)\n\tretMutex := &sync.Mutex{}\n\twait := &sync.WaitGroup{}\n\t\/\/ One mailer and one undocumented\n\twait.Add(2)\n\tgo func() {\n\t\terr := mailproc.ReplyMailer.SelfTest()\n\t\tif err != nil {\n\t\t\tretMutex.Lock()\n\t\t\tret = append(ret, err)\n\t\t\tretMutex.Unlock()\n\t\t}\n\t}()\n\tgo func() {\n\t\terr := mailproc.Undocumented1.SelfTest()\n\t\tif err != nil {\n\t\t\tretMutex.Lock()\n\t\t\tret = append(ret, err)\n\t\t\tretMutex.Unlock()\n\t\t}\n\t}()\n\twait.Done()\n\tif len(ret) == 0 {\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"%v\", ret)\n}\n\n\/*\nMake sure mail processor is sane before processing the incoming mail.\nProcess only one command (if found) in the incoming mail. If reply addresses are specified, send command result\nto the specified addresses. If they are not specified, use the incoming mail sender's address as reply address.\n*\/\nfunc (mailproc *MailProcessor) Process(mailContent []byte, replyAddresses ...string) error {\n\tmailproc.Logger = global.Logger{ComponentName: \"MailProcessor\", ComponentID: strconv.Itoa(mailproc.CommandTimeoutSec)}\n\tmailproc.Processor.SetLogger(mailproc.Logger)\n\tif global.EmergencyLockDown {\n\t\treturn global.ErrEmergencyLockDown\n\t}\n\tif errs := mailproc.Processor.IsSaneForInternet(); len(errs) > 0 {\n\t\treturn fmt.Errorf(\"MailProcessor.Process: %+v\", errs)\n\t}\n\tvar commandIsProcessed bool\n\twalkErr := email.WalkMessage(mailContent, func(prop email.BasicProperties, body []byte) (bool, error) {\n\t\t\/\/ Avoid recursive processing\n\t\tif strings.Contains(prop.Subject, email.OutgoingMailSubjectKeyword) {\n\t\t\treturn false, errors.New(\"Ignore email sent by this program itself\")\n\t\t}\n\t\tmailproc.Logger.Printf(\"Process\", prop.FromAddress, nil, \"process message of type %s, subject \\\"%s\\\"\", prop.ContentType, prop.Subject)\n\t\t\/\/ By contract, PIN processor finds command among input lines.\n\t\tresult := mailproc.Processor.Process(feature.Command{\n\t\t\tContent: string(body),\n\t\t\tTimeoutSec: mailproc.CommandTimeoutSec,\n\t\t})\n\t\t\/\/ If this part does not have a PIN\/shortcut match, simply move on to the next part.\n\t\tif result.Error == bridge.ErrPINAndShortcutNotFound {\n\t\t\t\/\/ Move on, do not return error.\n\t\t\treturn true, nil\n\t\t} else if result.Error != nil {\n\t\t\t\/\/ In case of command processing error, do not move on, return the error.\n\t\t\treturn false, result.Error\n\t\t}\n\t\t\/\/ A command has been processed, now work on the reply.\n\t\tcommandIsProcessed = true\n\t\t\/\/ Normally the result should be sent as Email reply, but there are undocumented scenarios.\n\t\tif mailproc.Undocumented1.IsConfigured() {\n\t\t\t\/\/ The undocumented scenario is triggered by an Email address suffix\n\t\t\tif mailproc.Undocumented1.Addr1 != \"\" && strings.HasSuffix(prop.ReplyAddress, mailproc.Undocumented1.Addr1) {\n\t\t\t\t\/\/ Let the undocumented scenario take care of delivering the result\n\t\t\t\tif undoc1Err := mailproc.Undocumented1.SendMessage(result.CombinedOutput); undoc1Err == nil {\n\t\t\t\t\treturn false, nil\n\t\t\t\t} else {\n\t\t\t\t\treturn false, undoc1Err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\/\/ The Email address suffix did not satisfy undocumented scenario, so send the result as a normal Email reply.\n\t\tif !mailproc.ReplyMailer.IsConfigured() {\n\t\t\treturn false, errors.New(\"The reply has to be sent via Email but configuration is missing\")\n\t\t}\n\t\trecipients := replyAddresses\n\t\tif recipients == nil || len(recipients) == 0 {\n\t\t\trecipients = []string{prop.ReplyAddress}\n\t\t}\n\t\treturn false, mailproc.ReplyMailer.Send(email.OutgoingMailSubjectKeyword+\"-reply-\"+result.Command.Content, result.CombinedOutput, recipients...)\n\t})\n\tif walkErr != nil {\n\t\treturn walkErr\n\t}\n\t\/\/ If all parts have been visited but no command is found, return the PIN mismatch error.\n\tif !commandIsProcessed {\n\t\treturn bridge.ErrPINAndShortcutNotFound\n\t}\n\treturn nil\n}\n\nvar TestUndocumented1Message = \"\" \/\/ Content is set by init_mail_test.go\nvar TestUndocumented1Wolfram = feature.WolframAlpha{} \/\/ Details are set by init_mail_test.go\n<commit_msg>fix malfunctioned self test of mail processor<commit_after>package mailp\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"github.com\/HouzuoGuo\/laitos\/bridge\"\n\t\"github.com\/HouzuoGuo\/laitos\/email\"\n\t\"github.com\/HouzuoGuo\/laitos\/feature\"\n\t\"github.com\/HouzuoGuo\/laitos\/frontend\/common\"\n\t\"github.com\/HouzuoGuo\/laitos\/global\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\n\/*\nLook for feature commands from an incoming mail, run them and reply the sender with command results.\nUsually used in combination of laitos' own SMTP daemon, but it can also work independently in something like\npostfix's \"forward-mail-to-program\" mechanism.\n*\/\ntype MailProcessor struct {\n\tCommandTimeoutSec int `json:\"CommandTimeoutSec\"` \/\/ Commands get time out error after this number of seconds\n\tUndocumented1 Undocumented1 `json:\"Undocumented1\"` \/\/ Intentionally undocumented he he he he\n\tProcessor *common.CommandProcessor `json:\"-\"` \/\/ Feature configuration\n\tReplyMailer email.Mailer `json:\"-\"` \/\/ To deliver Email replies\n\tLogger global.Logger `json:\"-\"` \/\/ Logger\n}\n\n\/\/ Run a health check on mailer and \"undocumented\" things.\nfunc (mailproc *MailProcessor) SelfTest() error {\n\tret := make([]error, 0, 0)\n\tretMutex := &sync.Mutex{}\n\twait := &sync.WaitGroup{}\n\t\/\/ One mailer and one undocumented\n\twait.Add(2)\n\tgo func() {\n\t\terr := mailproc.ReplyMailer.SelfTest()\n\t\tif err != nil {\n\t\t\tretMutex.Lock()\n\t\t\tret = append(ret, err)\n\t\t\tretMutex.Unlock()\n\t\t}\n\t\twait.Done()\n\t}()\n\tgo func() {\n\t\terr := mailproc.Undocumented1.SelfTest()\n\t\tif err != nil {\n\t\t\tretMutex.Lock()\n\t\t\tret = append(ret, err)\n\t\t\tretMutex.Unlock()\n\t\t}\n\t\twait.Done()\n\t}()\n\twait.Wait()\n\tif len(ret) == 0 {\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"%v\", ret)\n}\n\n\/*\nMake sure mail processor is sane before processing the incoming mail.\nProcess only one command (if found) in the incoming mail. If reply addresses are specified, send command result\nto the specified addresses. If they are not specified, use the incoming mail sender's address as reply address.\n*\/\nfunc (mailproc *MailProcessor) Process(mailContent []byte, replyAddresses ...string) error {\n\tmailproc.Logger = global.Logger{ComponentName: \"MailProcessor\", ComponentID: strconv.Itoa(mailproc.CommandTimeoutSec)}\n\tmailproc.Processor.SetLogger(mailproc.Logger)\n\tif global.EmergencyLockDown {\n\t\treturn global.ErrEmergencyLockDown\n\t}\n\tif errs := mailproc.Processor.IsSaneForInternet(); len(errs) > 0 {\n\t\treturn fmt.Errorf(\"MailProcessor.Process: %+v\", errs)\n\t}\n\tvar commandIsProcessed bool\n\twalkErr := email.WalkMessage(mailContent, func(prop email.BasicProperties, body []byte) (bool, error) {\n\t\t\/\/ Avoid recursive processing\n\t\tif strings.Contains(prop.Subject, email.OutgoingMailSubjectKeyword) {\n\t\t\treturn false, errors.New(\"Ignore email sent by this program itself\")\n\t\t}\n\t\tmailproc.Logger.Printf(\"Process\", prop.FromAddress, nil, \"process message of type %s, subject \\\"%s\\\"\", prop.ContentType, prop.Subject)\n\t\t\/\/ By contract, PIN processor finds command among input lines.\n\t\tresult := mailproc.Processor.Process(feature.Command{\n\t\t\tContent: string(body),\n\t\t\tTimeoutSec: mailproc.CommandTimeoutSec,\n\t\t})\n\t\t\/\/ If this part does not have a PIN\/shortcut match, simply move on to the next part.\n\t\tif result.Error == bridge.ErrPINAndShortcutNotFound {\n\t\t\t\/\/ Move on, do not return error.\n\t\t\treturn true, nil\n\t\t} else if result.Error != nil {\n\t\t\t\/\/ In case of command processing error, do not move on, return the error.\n\t\t\treturn false, result.Error\n\t\t}\n\t\t\/\/ A command has been processed, now work on the reply.\n\t\tcommandIsProcessed = true\n\t\t\/\/ Normally the result should be sent as Email reply, but there are undocumented scenarios.\n\t\tif mailproc.Undocumented1.IsConfigured() {\n\t\t\t\/\/ The undocumented scenario is triggered by an Email address suffix\n\t\t\tif mailproc.Undocumented1.Addr1 != \"\" && strings.HasSuffix(prop.ReplyAddress, mailproc.Undocumented1.Addr1) {\n\t\t\t\t\/\/ Let the undocumented scenario take care of delivering the result\n\t\t\t\tif undoc1Err := mailproc.Undocumented1.SendMessage(result.CombinedOutput); undoc1Err == nil {\n\t\t\t\t\treturn false, nil\n\t\t\t\t} else {\n\t\t\t\t\treturn false, undoc1Err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t\/\/ The Email address suffix did not satisfy undocumented scenario, so send the result as a normal Email reply.\n\t\tif !mailproc.ReplyMailer.IsConfigured() {\n\t\t\treturn false, errors.New(\"The reply has to be sent via Email but configuration is missing\")\n\t\t}\n\t\trecipients := replyAddresses\n\t\tif recipients == nil || len(recipients) == 0 {\n\t\t\trecipients = []string{prop.ReplyAddress}\n\t\t}\n\t\treturn false, mailproc.ReplyMailer.Send(email.OutgoingMailSubjectKeyword+\"-reply-\"+result.Command.Content, result.CombinedOutput, recipients...)\n\t})\n\tif walkErr != nil {\n\t\treturn walkErr\n\t}\n\t\/\/ If all parts have been visited but no command is found, return the PIN mismatch error.\n\tif !commandIsProcessed {\n\t\treturn bridge.ErrPINAndShortcutNotFound\n\t}\n\treturn nil\n}\n\nvar TestUndocumented1Message = \"\" \/\/ Content is set by init_mail_test.go\nvar TestUndocumented1Wolfram = feature.WolframAlpha{} \/\/ Details are set by init_mail_test.go\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage util\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/coreos\/fleet\/Godeps\/_workspace\/src\/github.com\/pborman\/uuid\"\n)\n\nvar fleetctlBinPath string\n\nfunc init() {\n\tfleetctlBinPath = os.Getenv(\"FLEETCTL_BIN\")\n\tif fleetctlBinPath == \"\" {\n\t\tfmt.Println(\"FLEETCTL_BIN environment variable must be set\")\n\t\tos.Exit(1)\n\t} else if _, err := os.Stat(fleetctlBinPath); err != nil {\n\t\tfmt.Printf(\"%v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tif os.Getenv(\"SSH_AUTH_SOCK\") == \"\" {\n\t\tfmt.Println(\"SSH_AUTH_SOCK environment variable must be set\")\n\t\tos.Exit(1)\n\t}\n}\n\ntype fleetfunc func(args ...string) (string, string, error)\n\nfunc RunFleetctl(args ...string) (string, string, error) {\n\tlog.Printf(\"%s %s\", fleetctlBinPath, strings.Join(args, \" \"))\n\tvar stdoutBytes, stderrBytes bytes.Buffer\n\tcmd := exec.Command(fleetctlBinPath, args...)\n\tcmd.Stdout = &stdoutBytes\n\tcmd.Stderr = &stderrBytes\n\terr := cmd.Run()\n\treturn stdoutBytes.String(), stderrBytes.String(), err\n}\n\nfunc RunFleetctlWithInput(input string, args ...string) (string, string, error) {\n\tlog.Printf(\"%s %s\", fleetctlBinPath, strings.Join(args, \" \"))\n\tvar stdoutBytes, stderrBytes bytes.Buffer\n\tcmd := exec.Command(fleetctlBinPath, args...)\n\tcmd.Stdout = &stdoutBytes\n\tcmd.Stderr = &stderrBytes\n\tstdin, err := cmd.StdinPipe()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tif err = cmd.Start(); err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tstdin.Write([]byte(input))\n\tstdin.Close()\n\terr = cmd.Wait()\n\n\treturn stdoutBytes.String(), stderrBytes.String(), err\n}\n\n\/\/ ActiveToSingleStates takes a map of active states (such as that returned by\n\/\/ WaitForNActiveUnits) and ensures that each unit has at most a single active\n\/\/ state. It returns a mapping of unit name to a single UnitState.\nfunc ActiveToSingleStates(active map[string][]UnitState) (map[string]UnitState, error) {\n\tstates := make(map[string]UnitState)\n\tfor name, us := range active {\n\t\tif len(us) != 1 {\n\t\t\treturn nil, fmt.Errorf(\"unit %s running in multiple locations: %v\", name, us)\n\t\t}\n\t\tstates[name] = us[0]\n\t}\n\treturn states, nil\n}\n\ntype UnitState struct {\n\tName string\n\tActiveState string\n\tMachine string\n}\n\ntype UnitFileState struct {\n\tName string\n\tDesiredState string\n\tState string\n}\n\nfunc ParseUnitStates(units []string) (states []UnitState) {\n\tfor _, unit := range units {\n\t\tcols := strings.Fields(unit)\n\t\tif len(cols) == 3 {\n\t\t\tmachine := strings.SplitN(cols[2], \"\/\", 2)[0]\n\t\t\tstates = append(states, UnitState{cols[0], cols[1], machine})\n\t\t}\n\t}\n\treturn states\n}\n\nfunc ParseUnitFileStates(units []string) (states []UnitFileState) {\n\tfor _, unit := range units {\n\t\tcols := strings.Fields(unit)\n\t\tif len(cols) == 3 {\n\t\t\tstates = append(states, UnitFileState{cols[0], cols[1], cols[2]})\n\t\t}\n\t}\n\treturn states\n}\n\nfunc FilterActiveUnits(states []UnitState) (filtered []UnitState) {\n\tfor _, state := range states {\n\t\tif state.ActiveState == \"active\" {\n\t\t\tfiltered = append(filtered, state)\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ tempUnit creates a local unit file with the given contents, returning\n\/\/ the name of the file\nfunc TempUnit(contents string) (string, error) {\n\ttmp, err := ioutil.TempFile(os.TempDir(), \"fleet-test-unit-\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttmp.Write([]byte(contents))\n\ttmp.Close()\n\n\tsvc := fmt.Sprintf(\"%s.service\", tmp.Name())\n\terr = os.Rename(tmp.Name(), svc)\n\tif err != nil {\n\t\tos.Remove(tmp.Name())\n\t\treturn \"\", err\n\t}\n\n\treturn svc, nil\n}\n\nfunc WaitForState(stateCheckFunc func() bool) (time.Duration, error) {\n\ttimeout := 15 * time.Second\n\talarm := time.After(timeout)\n\tticker := time.Tick(250 * time.Millisecond)\n\tfor {\n\t\tselect {\n\t\tcase <-alarm:\n\t\t\t\/\/ Generic message. Callers can build more specific ones using the returned timeout value.\n\t\t\treturn timeout, fmt.Errorf(\"Failed to reach expected state within %v.\", timeout)\n\t\tcase <-ticker:\n\t\t\tif stateCheckFunc() {\n\t\t\t\treturn timeout, nil\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc NewMachineID() string {\n\t\/\/ drop the standard separators to match systemd\n\treturn strings.Replace(uuid.New(), \"-\", \"\", -1)\n}\n<commit_msg>functional: introduce new helpers for testing the replace option<commit_after>\/\/ Copyright 2014 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage util\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/coreos\/fleet\/Godeps\/_workspace\/src\/github.com\/pborman\/uuid\"\n)\n\nvar fleetctlBinPath string\n\nfunc init() {\n\tfleetctlBinPath = os.Getenv(\"FLEETCTL_BIN\")\n\tif fleetctlBinPath == \"\" {\n\t\tfmt.Println(\"FLEETCTL_BIN environment variable must be set\")\n\t\tos.Exit(1)\n\t} else if _, err := os.Stat(fleetctlBinPath); err != nil {\n\t\tfmt.Printf(\"%v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tif os.Getenv(\"SSH_AUTH_SOCK\") == \"\" {\n\t\tfmt.Println(\"SSH_AUTH_SOCK environment variable must be set\")\n\t\tos.Exit(1)\n\t}\n}\n\ntype fleetfunc func(args ...string) (string, string, error)\n\nfunc RunFleetctl(args ...string) (string, string, error) {\n\tlog.Printf(\"%s %s\", fleetctlBinPath, strings.Join(args, \" \"))\n\tvar stdoutBytes, stderrBytes bytes.Buffer\n\tcmd := exec.Command(fleetctlBinPath, args...)\n\tcmd.Stdout = &stdoutBytes\n\tcmd.Stderr = &stderrBytes\n\terr := cmd.Run()\n\treturn stdoutBytes.String(), stderrBytes.String(), err\n}\n\nfunc RunFleetctlWithInput(input string, args ...string) (string, string, error) {\n\tlog.Printf(\"%s %s\", fleetctlBinPath, strings.Join(args, \" \"))\n\tvar stdoutBytes, stderrBytes bytes.Buffer\n\tcmd := exec.Command(fleetctlBinPath, args...)\n\tcmd.Stdout = &stdoutBytes\n\tcmd.Stderr = &stderrBytes\n\tstdin, err := cmd.StdinPipe()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tif err = cmd.Start(); err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tstdin.Write([]byte(input))\n\tstdin.Close()\n\terr = cmd.Wait()\n\n\treturn stdoutBytes.String(), stderrBytes.String(), err\n}\n\n\/\/ ActiveToSingleStates takes a map of active states (such as that returned by\n\/\/ WaitForNActiveUnits) and ensures that each unit has at most a single active\n\/\/ state. It returns a mapping of unit name to a single UnitState.\nfunc ActiveToSingleStates(active map[string][]UnitState) (map[string]UnitState, error) {\n\tstates := make(map[string]UnitState)\n\tfor name, us := range active {\n\t\tif len(us) != 1 {\n\t\t\treturn nil, fmt.Errorf(\"unit %s running in multiple locations: %v\", name, us)\n\t\t}\n\t\tstates[name] = us[0]\n\t}\n\treturn states, nil\n}\n\ntype UnitState struct {\n\tName string\n\tActiveState string\n\tMachine string\n}\n\ntype UnitFileState struct {\n\tName string\n\tDesiredState string\n\tState string\n}\n\nfunc ParseUnitStates(units []string) (states []UnitState) {\n\tfor _, unit := range units {\n\t\tcols := strings.Fields(unit)\n\t\tif len(cols) == 3 {\n\t\t\tmachine := strings.SplitN(cols[2], \"\/\", 2)[0]\n\t\t\tstates = append(states, UnitState{cols[0], cols[1], machine})\n\t\t}\n\t}\n\treturn states\n}\n\nfunc ParseUnitFileStates(units []string) (states []UnitFileState) {\n\tfor _, unit := range units {\n\t\tcols := strings.Fields(unit)\n\t\tif len(cols) == 3 {\n\t\t\tstates = append(states, UnitFileState{cols[0], cols[1], cols[2]})\n\t\t}\n\t}\n\treturn states\n}\n\nfunc FilterActiveUnits(states []UnitState) (filtered []UnitState) {\n\tfor _, state := range states {\n\t\tif state.ActiveState == \"active\" {\n\t\t\tfiltered = append(filtered, state)\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ tempUnit creates a local unit file with the given contents, returning\n\/\/ the name of the file\nfunc TempUnit(contents string) (string, error) {\n\ttmp, err := ioutil.TempFile(os.TempDir(), \"fleet-test-unit-\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttmp.Write([]byte(contents))\n\ttmp.Close()\n\n\tsvc := fmt.Sprintf(\"%s.service\", tmp.Name())\n\terr = os.Rename(tmp.Name(), svc)\n\tif err != nil {\n\t\tos.Remove(tmp.Name())\n\t\treturn \"\", err\n\t}\n\n\treturn svc, nil\n}\n\nfunc WaitForState(stateCheckFunc func() bool) (time.Duration, error) {\n\ttimeout := 15 * time.Second\n\talarm := time.After(timeout)\n\tticker := time.Tick(250 * time.Millisecond)\n\tfor {\n\t\tselect {\n\t\tcase <-alarm:\n\t\t\t\/\/ Generic message. Callers can build more specific ones using the returned timeout value.\n\t\t\treturn timeout, fmt.Errorf(\"Failed to reach expected state within %v.\", timeout)\n\t\tcase <-ticker:\n\t\t\tif stateCheckFunc() {\n\t\t\t\treturn timeout, nil\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc NewMachineID() string {\n\t\/\/ drop the standard separators to match systemd\n\treturn strings.Replace(uuid.New(), \"-\", \"\", -1)\n}\n\n\/\/ CopyFile()\nfunc CopyFile(newFile, oldFile string) error {\n\tinput, err := ioutil.ReadFile(oldFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(newFile, []byte(input), 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ GenNewFleetService() is a helper for generating a temporary fleet service\n\/\/ that reads from oldFile, replaces oldVal with newVal, and stores the result\n\/\/ to newFile.\nfunc GenNewFleetService(newFile, oldFile, newVal, oldVal string) error {\n\tinput, err := ioutil.ReadFile(oldFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlines := strings.Split(string(input), \"\\n\")\n\n\tfor i, line := range lines {\n\t\tif strings.Contains(line, oldVal) {\n\t\t\tlines[i] = strings.Replace(line, oldVal, newVal, len(oldVal))\n\t\t}\n\t}\n\toutput := strings.Join(lines, \"\\n\")\n\terr = ioutil.WriteFile(newFile, []byte(output), 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2017 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage client\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/google\/gapid\/core\/app\/auth\"\n\t\"github.com\/google\/gapid\/core\/fault\/cause\"\n\t\"github.com\/google\/gapid\/core\/log\"\n\t\"github.com\/google\/gapid\/core\/net\/grpcutil\"\n\t\"github.com\/google\/gapid\/core\/os\/file\"\n\t\"github.com\/google\/gapid\/core\/os\/process\"\n\t\"github.com\/google\/gapid\/framework\/binary\/registry\"\n\t\"github.com\/google\/gapid\/framework\/binary\/schema\"\n\t\"google.golang.org\/grpc\"\n)\n\nconst (\n\tBinaryName = \"gapis\"\n)\n\nvar (\n\t\/\/ GapisPath is the full filepath to the gapir executable.\n\tGapisPath file.Path\n)\n\nfunc init() {\n\t\/\/ Search directory that this executable is in.\n\tif path, err := file.FindExecutable(file.ExecutablePath().Parent().Join(BinaryName).System()); err == nil {\n\t\tGapisPath = path\n\t\treturn\n\t}\n\t\/\/ Search $PATH.\n\tif path, err := file.FindExecutable(BinaryName); err == nil {\n\t\tGapisPath = path\n\t\treturn\n\t}\n}\n\ntype Config struct {\n\tPath *file.Path\n\tPort int\n\tArgs []string\n\tToken auth.Token\n}\n\n\/\/ Connect attempts to connect to a GAPIS process.\n\/\/ If port is zero, a new GAPIS server will be started, otherwise a connection\n\/\/ will be made to the specified port.\nfunc Connect(ctx log.Context, cfg Config) (Client, *schema.Message, error) {\n\tif cfg.Path == nil {\n\t\tcfg.Path = &GapisPath\n\t}\n\n\tvar err error\n\tif cfg.Port == 0 || len(cfg.Args) > 0 {\n\t\tif ll := logLevel(ctx); ll != \"\" {\n\t\t\tcfg.Args = append(cfg.Args, \"--log-level\", ll)\n\t\t}\n\t\tif cfg.Token != auth.NoAuth {\n\t\t\tcfg.Args = append(cfg.Args, \"--gapis-auth-token\", string(cfg.Token))\n\t\t}\n\t\tcfg.Port, err = process.Start(ctx, cfg.Path.System(), nil, cfg.Args...)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\ttarget := fmt.Sprintf(\"localhost:%d\", cfg.Port)\n\n\tconn, err := grpcutil.Dial(ctx, target,\n\t\tgrpc.WithInsecure(),\n\t\tgrpc.WithUnaryInterceptor(auth.ClientInterceptor(cfg.Token)))\n\tif err != nil {\n\t\treturn nil, nil, cause.Explain(ctx, err, \"Dialing GAPIS\")\n\t}\n\tclient := Bind(conn)\n\n\tmessage, err := client.GetSchema(ctx)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"Error resolving schema: %v\", err)\n\t}\n\n\tfor _, entity := range message.Entities {\n\t\tregistry.Global.Add((*schema.ObjectClass)(entity))\n\t}\n\n\treturn client, message, nil\n}\n\nfunc logLevel(ctx log.Context) string {\n\tswitch {\n\tcase ctx.Debug().Active():\n\t\treturn \"Debug\"\n\tcase ctx.Info().Active():\n\t\treturn \"Info\"\n\tcase ctx.Notice().Active():\n\t\treturn \"Notice\"\n\tcase ctx.Warning().Active():\n\t\treturn \"Warning\"\n\tcase ctx.Error().Active():\n\t\treturn \"Error\"\n\tcase ctx.Critical().Active():\n\t\treturn \"Critical\"\n\tcase ctx.Alert().Active():\n\t\treturn \"Alert\"\n\tcase ctx.Emergency().Active():\n\t\treturn \"Emergency\"\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n<commit_msg>Add package style path searching to gapit looking for gapis<commit_after>\/\/ Copyright (C) 2017 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage client\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/google\/gapid\/core\/app\/auth\"\n\t\"github.com\/google\/gapid\/core\/fault\/cause\"\n\t\"github.com\/google\/gapid\/core\/log\"\n\t\"github.com\/google\/gapid\/core\/net\/grpcutil\"\n\t\"github.com\/google\/gapid\/core\/os\/device\"\n\t\"github.com\/google\/gapid\/core\/os\/file\"\n\t\"github.com\/google\/gapid\/core\/os\/process\"\n\t\"github.com\/google\/gapid\/framework\/binary\/registry\"\n\t\"github.com\/google\/gapid\/framework\/binary\/schema\"\n\t\"google.golang.org\/grpc\"\n)\n\nconst (\n\tBinaryName = \"gapis\"\n)\n\nvar (\n\t\/\/ GapisPath is the full filepath to the gapir executable.\n\tGapisPath file.Path\n)\n\nfunc init() {\n\t\/\/ Search directory that this executable is in.\n\tif path, err := file.FindExecutable(file.ExecutablePath().Parent().Join(BinaryName).System()); err == nil {\n\t\tGapisPath = path\n\t\treturn\n\t}\n\t\/\/ Search standard package structure\n\tpackagePath := file.Abs(\".\")\n\tswitch device.Host(log.Background()).Configuration.OS.Kind {\n\tcase device.Windows:\n\t\tpackagePath = packagePath.Join(\"windows\")\n\tcase device.OSX:\n\t\tpackagePath = packagePath.Join(\"osx\")\n\tcase device.Linux:\n\t\tpackagePath = packagePath.Join(\"linux\")\n\t}\n\tpackagePath = packagePath.Join(\"x86_64\")\n\tif path, err := file.FindExecutable(packagePath.Join(BinaryName).System()); err == nil {\n\t\tGapisPath = path\n\t\treturn\n\t}\n\t\/\/ Search $PATH.\n\tif path, err := file.FindExecutable(BinaryName); err == nil {\n\t\tGapisPath = path\n\t\treturn\n\t}\n}\n\ntype Config struct {\n\tPath *file.Path\n\tPort int\n\tArgs []string\n\tToken auth.Token\n}\n\n\/\/ Connect attempts to connect to a GAPIS process.\n\/\/ If port is zero, a new GAPIS server will be started, otherwise a connection\n\/\/ will be made to the specified port.\nfunc Connect(ctx log.Context, cfg Config) (Client, *schema.Message, error) {\n\tif cfg.Path == nil {\n\t\tcfg.Path = &GapisPath\n\t}\n\n\tvar err error\n\tif cfg.Port == 0 || len(cfg.Args) > 0 {\n\t\tif ll := logLevel(ctx); ll != \"\" {\n\t\t\tcfg.Args = append(cfg.Args, \"--log-level\", ll)\n\t\t}\n\t\tif cfg.Token != auth.NoAuth {\n\t\t\tcfg.Args = append(cfg.Args, \"--gapis-auth-token\", string(cfg.Token))\n\t\t}\n\t\tcfg.Port, err = process.Start(ctx, cfg.Path.System(), nil, cfg.Args...)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\ttarget := fmt.Sprintf(\"localhost:%d\", cfg.Port)\n\n\tconn, err := grpcutil.Dial(ctx, target,\n\t\tgrpc.WithInsecure(),\n\t\tgrpc.WithUnaryInterceptor(auth.ClientInterceptor(cfg.Token)))\n\tif err != nil {\n\t\treturn nil, nil, cause.Explain(ctx, err, \"Dialing GAPIS\")\n\t}\n\tclient := Bind(conn)\n\n\tmessage, err := client.GetSchema(ctx)\n\tif err != nil {\n\t\treturn nil, nil, fmt.Errorf(\"Error resolving schema: %v\", err)\n\t}\n\n\tfor _, entity := range message.Entities {\n\t\tregistry.Global.Add((*schema.ObjectClass)(entity))\n\t}\n\n\treturn client, message, nil\n}\n\nfunc logLevel(ctx log.Context) string {\n\tswitch {\n\tcase ctx.Debug().Active():\n\t\treturn \"Debug\"\n\tcase ctx.Info().Active():\n\t\treturn \"Info\"\n\tcase ctx.Notice().Active():\n\t\treturn \"Notice\"\n\tcase ctx.Warning().Active():\n\t\treturn \"Warning\"\n\tcase ctx.Error().Active():\n\t\treturn \"Error\"\n\tcase ctx.Critical().Active():\n\t\treturn \"Critical\"\n\tcase ctx.Alert().Active():\n\t\treturn \"Alert\"\n\tcase ctx.Emergency().Active():\n\t\treturn \"Emergency\"\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package gateway\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/TykTechnologies\/tyk\/config\"\n\t\"github.com\/TykTechnologies\/tyk\/storage\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\ntype (\n\tHealthCheckStatus string\n\n\tHealthCheckComponentType string\n)\n\nconst (\n\tPass HealthCheckStatus = \"pass\"\n\tFail = \"fail\"\n\tWarn = \"warn\"\n\n\tComponent HealthCheckComponentType = \"component\"\n\tDatastore = \"datastore\"\n\tSystem = \"system\"\n)\n\nvar (\n\thealthCheckInfo atomic.Value\n\thealthCheckLock sync.Mutex\n)\n\nfunc setCurrentHealthCheckInfo(h map[string]HealthCheckItem) {\n\thealthCheckLock.Lock()\n\thealthCheckInfo.Store(h)\n\thealthCheckLock.Unlock()\n}\n\nfunc getHealthCheckInfo() map[string]HealthCheckItem {\n\thealthCheckLock.Lock()\n\tret := healthCheckInfo.Load().(map[string]HealthCheckItem)\n\thealthCheckLock.Unlock()\n\treturn ret\n}\n\ntype HealthCheckResponse struct {\n\tStatus HealthCheckStatus `json:\"status\"`\n\tVersion string `json:\"version,omitempty\"`\n\tOutput string `json:\"output,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tDetails map[string]HealthCheckItem `json:\"details,omitempty\"`\n}\n\ntype HealthCheckItem struct {\n\tStatus HealthCheckStatus `json:\"status\"`\n\tOutput string `json:\"output,omitempty\"`\n\tComponentType string `json:\"componentType,omitempty\"`\n\tComponentID string `json:\"componentId,omitempty\"`\n\tTime string `json:\"time\"`\n}\n\nfunc initHealthCheck(ctx context.Context) {\n\tsetCurrentHealthCheckInfo(make(map[string]HealthCheckItem, 3))\n\n\tgo func(ctx context.Context) {\n\t\tvar n = config.Global().LivenessCheck.CheckDuration\n\n\t\tif n == 0 {\n\t\t\tn = 10\n\t\t}\n\n\t\tticker := time.NewTicker(time.Second * n)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\n\t\t\t\tticker.Stop()\n\t\t\t\tmainLog.WithFields(logrus.Fields{\n\t\t\t\t\t\"prefix\": \"health-check\",\n\t\t\t\t}).Debug(\"Stopping Health checks for all components\")\n\t\t\t\treturn\n\n\t\t\tcase <-ticker.C:\n\t\t\t\tgatherHealthChecks()\n\t\t\t}\n\t\t}\n\t}(ctx)\n}\n\nfunc gatherHealthChecks() {\n\n\tallInfos := make(map[string]HealthCheckItem, 3)\n\n\tredisStore := storage.RedisCluster{KeyPrefix: \"livenesscheck-\"}\n\n\tkey := \"tyk-liveness-probe\"\n\n\tvar wg sync.WaitGroup\n\n\tgo func() {\n\n\t\twg.Add(1)\n\n\t\tdefer wg.Done()\n\n\t\tvar checkItem = HealthCheckItem{\n\t\t\tStatus: Pass,\n\t\t\tComponentType: Datastore,\n\t\t\tTime: time.Now().Format(time.RFC3339),\n\t\t}\n\n\t\terr := redisStore.SetRawKey(key, key, 10)\n\t\tif err != nil {\n\t\t\tmainLog.WithField(\"liveness-check\", true).WithError(err).Error(\"Redis health check failed\")\n\t\t\tcheckItem.Output = err.Error()\n\t\t\tcheckItem.Status = Fail\n\t\t}\n\n\t\tallInfos[\"redis\"] = checkItem\n\t}()\n\n\tif config.Global().UseDBAppConfigs {\n\n\t\twg.Add(1)\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\n\t\t\tvar checkItem = HealthCheckItem{\n\t\t\t\tStatus: Pass,\n\t\t\t\tComponentType: Datastore,\n\t\t\t\tTime: time.Now().Format(time.RFC3339),\n\t\t\t}\n\n\t\t\tif err := DashService.Ping(); err != nil {\n\t\t\t\tmainLog.WithField(\"liveness-check\", true).Error(err)\n\t\t\t\tcheckItem.Output = err.Error()\n\t\t\t\tcheckItem.Status = Fail\n\t\t\t}\n\n\t\t\tcheckItem.ComponentType = System\n\t\t\tallInfos[\"dashboard\"] = checkItem\n\t\t}()\n\t}\n\n\tif config.Global().Policies.PolicySource == \"rpc\" {\n\n\t\twg.Add(1)\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\n\t\t\tvar checkItem = HealthCheckItem{\n\t\t\t\tStatus: Pass,\n\t\t\t\tComponentType: Datastore,\n\t\t\t\tTime: time.Now().Format(time.RFC3339),\n\t\t\t}\n\n\t\t\trpcStore := RPCStorageHandler{KeyPrefix: \"livenesscheck-\"}\n\n\t\t\tif !rpcStore.Connect() {\n\t\t\t\tcheckItem.Output = \"Could not connect to RPC\"\n\t\t\t\tcheckItem.Status = Fail\n\t\t\t}\n\n\t\t\tcheckItem.ComponentType = System\n\n\t\t\tallInfos[\"rpc\"] = checkItem\n\t\t}()\n\t}\n\n\twg.Wait()\n\n\tsetCurrentHealthCheckInfo(allInfos)\n}\n\nfunc liveCheckHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodGet {\n\t\tdoJSONWrite(w, http.StatusMethodNotAllowed, apiError(http.StatusText(http.StatusMethodNotAllowed)))\n\t\treturn\n\t}\n\n\tchecks := getHealthCheckInfo()\n\n\tres := HealthCheckResponse{\n\t\tStatus: Pass,\n\t\tVersion: VERSION,\n\t\tDescription: \"Tyk GW\",\n\t\tDetails: checks,\n\t}\n\n\tvar failCount int\n\n\tfor _, v := range checks {\n\t\tif v.Status == Fail {\n\t\t\tfailCount++\n\t\t}\n\t}\n\n\tvar status HealthCheckStatus\n\n\tswitch failCount {\n\tcase 0:\n\t\tstatus = Pass\n\n\tcase len(checks):\n\t\tstatus = Fail\n\n\tdefault:\n\t\tstatus = Warn\n\t}\n\n\tres.Status = status\n\n\tw.WriteHeader(http.StatusOK)\n\tjson.NewEncoder(w).Encode(res)\n}\n<commit_msg>Fix race (#2881)<commit_after>package gateway\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/TykTechnologies\/tyk\/config\"\n\t\"github.com\/TykTechnologies\/tyk\/storage\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\ntype (\n\tHealthCheckStatus string\n\n\tHealthCheckComponentType string\n)\n\nconst (\n\tPass HealthCheckStatus = \"pass\"\n\tFail = \"fail\"\n\tWarn = \"warn\"\n\n\tComponent HealthCheckComponentType = \"component\"\n\tDatastore = \"datastore\"\n\tSystem = \"system\"\n)\n\nvar (\n\thealthCheckInfo atomic.Value\n\thealthCheckLock sync.Mutex\n)\n\nfunc setCurrentHealthCheckInfo(h map[string]HealthCheckItem) {\n\thealthCheckLock.Lock()\n\thealthCheckInfo.Store(h)\n\thealthCheckLock.Unlock()\n}\n\nfunc getHealthCheckInfo() map[string]HealthCheckItem {\n\thealthCheckLock.Lock()\n\tret := healthCheckInfo.Load().(map[string]HealthCheckItem)\n\thealthCheckLock.Unlock()\n\treturn ret\n}\n\ntype HealthCheckResponse struct {\n\tStatus HealthCheckStatus `json:\"status\"`\n\tVersion string `json:\"version,omitempty\"`\n\tOutput string `json:\"output,omitempty\"`\n\tDescription string `json:\"description,omitempty\"`\n\tDetails map[string]HealthCheckItem `json:\"details,omitempty\"`\n}\n\ntype HealthCheckItem struct {\n\tStatus HealthCheckStatus `json:\"status\"`\n\tOutput string `json:\"output,omitempty\"`\n\tComponentType string `json:\"componentType,omitempty\"`\n\tComponentID string `json:\"componentId,omitempty\"`\n\tTime string `json:\"time\"`\n}\n\nfunc initHealthCheck(ctx context.Context) {\n\tsetCurrentHealthCheckInfo(make(map[string]HealthCheckItem, 3))\n\n\tgo func(ctx context.Context) {\n\t\tvar n = config.Global().LivenessCheck.CheckDuration\n\n\t\tif n == 0 {\n\t\t\tn = 10\n\t\t}\n\n\t\tticker := time.NewTicker(time.Second * n)\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\n\t\t\t\tticker.Stop()\n\t\t\t\tmainLog.WithFields(logrus.Fields{\n\t\t\t\t\t\"prefix\": \"health-check\",\n\t\t\t\t}).Debug(\"Stopping Health checks for all components\")\n\t\t\t\treturn\n\n\t\t\tcase <-ticker.C:\n\t\t\t\tgatherHealthChecks()\n\t\t\t}\n\t\t}\n\t}(ctx)\n}\n\nfunc gatherHealthChecks() {\n\n\tallInfos := make(map[string]HealthCheckItem, 3)\n\n\tredisStore := storage.RedisCluster{KeyPrefix: \"livenesscheck-\"}\n\n\tkey := \"tyk-liveness-probe\"\n\n\tvar wg sync.WaitGroup\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tvar checkItem = HealthCheckItem{\n\t\t\tStatus: Pass,\n\t\t\tComponentType: Datastore,\n\t\t\tTime: time.Now().Format(time.RFC3339),\n\t\t}\n\n\t\terr := redisStore.SetRawKey(key, key, 10)\n\t\tif err != nil {\n\t\t\tmainLog.WithField(\"liveness-check\", true).WithError(err).Error(\"Redis health check failed\")\n\t\t\tcheckItem.Output = err.Error()\n\t\t\tcheckItem.Status = Fail\n\t\t}\n\n\t\tallInfos[\"redis\"] = checkItem\n\t}()\n\n\tif config.Global().UseDBAppConfigs {\n\t\twg.Add(1)\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\n\t\t\tvar checkItem = HealthCheckItem{\n\t\t\t\tStatus: Pass,\n\t\t\t\tComponentType: Datastore,\n\t\t\t\tTime: time.Now().Format(time.RFC3339),\n\t\t\t}\n\n\t\t\tif err := DashService.Ping(); err != nil {\n\t\t\t\tmainLog.WithField(\"liveness-check\", true).Error(err)\n\t\t\t\tcheckItem.Output = err.Error()\n\t\t\t\tcheckItem.Status = Fail\n\t\t\t}\n\n\t\t\tcheckItem.ComponentType = System\n\t\t\tallInfos[\"dashboard\"] = checkItem\n\t\t}()\n\t}\n\n\tif config.Global().Policies.PolicySource == \"rpc\" {\n\n\t\twg.Add(1)\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\n\t\t\tvar checkItem = HealthCheckItem{\n\t\t\t\tStatus: Pass,\n\t\t\t\tComponentType: Datastore,\n\t\t\t\tTime: time.Now().Format(time.RFC3339),\n\t\t\t}\n\n\t\t\trpcStore := RPCStorageHandler{KeyPrefix: \"livenesscheck-\"}\n\n\t\t\tif !rpcStore.Connect() {\n\t\t\t\tcheckItem.Output = \"Could not connect to RPC\"\n\t\t\t\tcheckItem.Status = Fail\n\t\t\t}\n\n\t\t\tcheckItem.ComponentType = System\n\n\t\t\tallInfos[\"rpc\"] = checkItem\n\t\t}()\n\t}\n\n\twg.Wait()\n\n\tsetCurrentHealthCheckInfo(allInfos)\n}\n\nfunc liveCheckHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodGet {\n\t\tdoJSONWrite(w, http.StatusMethodNotAllowed, apiError(http.StatusText(http.StatusMethodNotAllowed)))\n\t\treturn\n\t}\n\n\tchecks := getHealthCheckInfo()\n\n\tres := HealthCheckResponse{\n\t\tStatus: Pass,\n\t\tVersion: VERSION,\n\t\tDescription: \"Tyk GW\",\n\t\tDetails: checks,\n\t}\n\n\tvar failCount int\n\n\tfor _, v := range checks {\n\t\tif v.Status == Fail {\n\t\t\tfailCount++\n\t\t}\n\t}\n\n\tvar status HealthCheckStatus\n\n\tswitch failCount {\n\tcase 0:\n\t\tstatus = Pass\n\n\tcase len(checks):\n\t\tstatus = Fail\n\n\tdefault:\n\t\tstatus = Warn\n\t}\n\n\tres.Status = status\n\n\tw.WriteHeader(http.StatusOK)\n\tjson.NewEncoder(w).Encode(res)\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"io\"\n\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/peer-calls\/peer-calls\/server\/identifiers\"\n\t\"github.com\/peer-calls\/peer-calls\/server\/logger\"\n\t\"github.com\/peer-calls\/peer-calls\/server\/sfu\"\n\t\"github.com\/pion\/webrtc\/v3\"\n)\n\ntype DataTransceiver struct {\n\tlog logger.Logger\n\n\tclientID identifiers.ClientID\n\tpeerConnection *webrtc.PeerConnection\n\n\t\/\/ dataChannelChan will receive DataChannels from peer connection\n\t\/\/ OnDataChannel handler.\n\tdataChannelChan chan *webrtc.DataChannel\n\n\t\/\/ privateRecvMessagesChan is never closed and it is there to prevent panics\n\t\/\/ when a message is received, but the recvMessagesChan has already been\n\t\/\/ closed.\n\tprivateRecvMessagesChan chan webrtc.DataChannelMessage\n\n\t\/\/ recvMessagesChan contains received messages. It will be closed on\n\t\/\/ teardown.\n\trecvMessagesChan chan webrtc.DataChannelMessage\n\n\t\/\/ sendMessagesChan contains messages to be sent. It is never closed.\n\tsendMessagesChan chan dataTransceiverMessageSend\n\n\t\/\/ teardownChan will initiate a teardown as soon as it receives a message.\n\tteardownChan chan struct{}\n\n\t\/\/ torndownChan will be closed as soon as teardown is complete.\n\ttorndownChan chan struct{}\n}\n\nfunc NewDataTransceiver(\n\tlog logger.Logger,\n\tclientID identifiers.ClientID,\n\tdataChannel *webrtc.DataChannel,\n\tpeerConnection *webrtc.PeerConnection,\n) *DataTransceiver {\n\td := &DataTransceiver{\n\t\tlog: log.WithNamespaceAppended(\"datatransceiver\").WithCtx(logger.Ctx{\n\t\t\t\"client_id\": clientID,\n\t\t}),\n\t\tclientID: clientID,\n\t\tpeerConnection: peerConnection,\n\n\t\tdataChannelChan: make(chan *webrtc.DataChannel),\n\t\tprivateRecvMessagesChan: make(chan webrtc.DataChannelMessage),\n\t\trecvMessagesChan: make(chan webrtc.DataChannelMessage),\n\t\tsendMessagesChan: make(chan dataTransceiverMessageSend),\n\t\tteardownChan: make(chan struct{}),\n\t\ttorndownChan: make(chan struct{}),\n\t}\n\n\tgo d.start()\n\n\tif dataChannel != nil {\n\t\td.handleDataChannel(dataChannel)\n\t}\n\n\tpeerConnection.OnDataChannel(d.handleDataChannel)\n\n\treturn d\n}\n\nfunc (d *DataTransceiver) handleDataChannel(dataChannel *webrtc.DataChannel) {\n\tif dataChannel.Label() == sfu.DataChannelName {\n\t\td.dataChannelChan <- dataChannel\n\n\t\tdataChannel.OnMessage(func(message webrtc.DataChannelMessage) {\n\t\t\td.log.Info(\"DataTransceiver.handleMessage\", nil)\n\n\t\t\tselect {\n\t\t\tcase <-d.torndownChan:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase d.privateRecvMessagesChan <- message:\n\t\t\t\t\/\/ Successfully sent.\n\t\t\tcase <-d.torndownChan:\n\t\t\t\t\/\/ DataTransceiver has been torn down.\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc (d *DataTransceiver) start() {\n\tdefer func() {\n\t\tclose(d.recvMessagesChan)\n\t\tclose(d.torndownChan)\n\t}()\n\n\tvar dataChannel *webrtc.DataChannel\n\n\thandleSendMessage := func(message webrtc.DataChannelMessage) error {\n\t\tif dataChannel == nil {\n\t\t\treturn errors.Errorf(\"data channel is nil\")\n\t\t}\n\n\t\tif message.IsString {\n\t\t\treturn errors.Annotate(dataChannel.SendText(string(message.Data)), \"send text\")\n\t\t}\n\n\t\treturn errors.Annotate(dataChannel.Send(message.Data), \"send bytes\")\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase dc := <-d.dataChannelChan:\n\t\t\tdataChannel = dc\n\t\tcase msg := <-d.privateRecvMessagesChan:\n\t\t\td.recvMessagesChan <- msg\n\t\tcase msgFuture := <-d.sendMessagesChan:\n\t\t\terr := handleSendMessage(msgFuture.message)\n\t\t\tif err != nil {\n\t\t\t\td.log.Error(\"Send error\", errors.Trace(err), nil)\n\n\t\t\t\tmsgFuture.errCh <- errors.Trace(err)\n\t\t\t}\n\t\tcase <-d.teardownChan:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (d *DataTransceiver) MessagesChannel() <-chan webrtc.DataChannelMessage {\n\treturn d.recvMessagesChan\n}\n\nfunc (d *DataTransceiver) Close() {\n\td.log.Trace(\"DataTransceiver.Close\", nil)\n\n\tselect {\n\tcase d.teardownChan <- struct{}{}:\n\tcase <-d.torndownChan:\n\t}\n\n\t<-d.torndownChan\n}\n\nfunc (d *DataTransceiver) Send(message webrtc.DataChannelMessage) <-chan error {\n\terrCh := make(chan error, 1)\n\n\tselect {\n\tcase d.sendMessagesChan <- dataTransceiverMessageSend{\n\t\terrCh: errCh,\n\t\tmessage: message,\n\t}:\n\tcase <-d.torndownChan:\n\t\terrCh <- errors.Trace(io.ErrClosedPipe)\n\t}\n\n\tclose(errCh)\n\n\treturn errCh\n}\n\ntype dataTransceiverMessageSend struct {\n\t\/\/ errCh will have error written to it if it occurrs. It will be closed once\n\t\/\/ the message sending has finished.\n\terrCh chan<- error\n\t\/\/ message to send.\n\tmessage webrtc.DataChannelMessage\n}\n<commit_msg>Fix panic in DataTransceiver (#218)<commit_after>package server\n\nimport (\n\t\"io\"\n\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/peer-calls\/peer-calls\/server\/identifiers\"\n\t\"github.com\/peer-calls\/peer-calls\/server\/logger\"\n\t\"github.com\/peer-calls\/peer-calls\/server\/sfu\"\n\t\"github.com\/pion\/webrtc\/v3\"\n)\n\ntype DataTransceiver struct {\n\tlog logger.Logger\n\n\tclientID identifiers.ClientID\n\tpeerConnection *webrtc.PeerConnection\n\n\t\/\/ dataChannelChan will receive DataChannels from peer connection\n\t\/\/ OnDataChannel handler.\n\tdataChannelChan chan *webrtc.DataChannel\n\n\t\/\/ privateRecvMessagesChan is never closed and it is there to prevent panics\n\t\/\/ when a message is received, but the recvMessagesChan has already been\n\t\/\/ closed.\n\tprivateRecvMessagesChan chan webrtc.DataChannelMessage\n\n\t\/\/ recvMessagesChan contains received messages. It will be closed on\n\t\/\/ teardown.\n\trecvMessagesChan chan webrtc.DataChannelMessage\n\n\t\/\/ sendMessagesChan contains messages to be sent. It is never closed.\n\tsendMessagesChan chan dataTransceiverMessageSend\n\n\t\/\/ teardownChan will initiate a teardown as soon as it receives a message.\n\tteardownChan chan struct{}\n\n\t\/\/ torndownChan will be closed as soon as teardown is complete.\n\ttorndownChan chan struct{}\n}\n\nfunc NewDataTransceiver(\n\tlog logger.Logger,\n\tclientID identifiers.ClientID,\n\tdataChannel *webrtc.DataChannel,\n\tpeerConnection *webrtc.PeerConnection,\n) *DataTransceiver {\n\td := &DataTransceiver{\n\t\tlog: log.WithNamespaceAppended(\"datatransceiver\").WithCtx(logger.Ctx{\n\t\t\t\"client_id\": clientID,\n\t\t}),\n\t\tclientID: clientID,\n\t\tpeerConnection: peerConnection,\n\n\t\tdataChannelChan: make(chan *webrtc.DataChannel),\n\t\tprivateRecvMessagesChan: make(chan webrtc.DataChannelMessage),\n\t\trecvMessagesChan: make(chan webrtc.DataChannelMessage),\n\t\tsendMessagesChan: make(chan dataTransceiverMessageSend),\n\t\tteardownChan: make(chan struct{}),\n\t\ttorndownChan: make(chan struct{}),\n\t}\n\n\tgo d.start()\n\n\tif dataChannel != nil {\n\t\td.handleDataChannel(dataChannel)\n\t}\n\n\tpeerConnection.OnDataChannel(d.handleDataChannel)\n\n\treturn d\n}\n\nfunc (d *DataTransceiver) handleDataChannel(dataChannel *webrtc.DataChannel) {\n\tif dataChannel.Label() == sfu.DataChannelName {\n\t\td.dataChannelChan <- dataChannel\n\n\t\tdataChannel.OnMessage(func(message webrtc.DataChannelMessage) {\n\t\t\td.log.Info(\"DataTransceiver.handleMessage\", nil)\n\n\t\t\tselect {\n\t\t\tcase <-d.torndownChan:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase d.privateRecvMessagesChan <- message:\n\t\t\t\t\/\/ Successfully sent.\n\t\t\tcase <-d.torndownChan:\n\t\t\t\t\/\/ DataTransceiver has been torn down.\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc (d *DataTransceiver) start() {\n\tdefer func() {\n\t\tclose(d.recvMessagesChan)\n\t\tclose(d.torndownChan)\n\t}()\n\n\tvar dataChannel *webrtc.DataChannel\n\n\thandleSendMessage := func(message webrtc.DataChannelMessage) error {\n\t\tif dataChannel == nil {\n\t\t\treturn errors.Errorf(\"data channel is nil\")\n\t\t}\n\n\t\tif message.IsString {\n\t\t\treturn errors.Annotate(dataChannel.SendText(string(message.Data)), \"send text\")\n\t\t}\n\n\t\treturn errors.Annotate(dataChannel.Send(message.Data), \"send bytes\")\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase dc := <-d.dataChannelChan:\n\t\t\tdataChannel = dc\n\t\tcase msg := <-d.privateRecvMessagesChan:\n\t\t\td.recvMessagesChan <- msg\n\t\tcase msgFuture := <-d.sendMessagesChan:\n\t\t\terr := handleSendMessage(msgFuture.message)\n\t\t\tif err != nil {\n\t\t\t\td.log.Error(\"Send error\", errors.Trace(err), nil)\n\n\t\t\t\tmsgFuture.errCh <- errors.Trace(err)\n\t\t\t}\n\n\t\t\tclose(msgFuture.errCh)\n\t\tcase <-d.teardownChan:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (d *DataTransceiver) MessagesChannel() <-chan webrtc.DataChannelMessage {\n\treturn d.recvMessagesChan\n}\n\nfunc (d *DataTransceiver) Close() {\n\td.log.Trace(\"DataTransceiver.Close\", nil)\n\n\tselect {\n\tcase d.teardownChan <- struct{}{}:\n\tcase <-d.torndownChan:\n\t}\n\n\t<-d.torndownChan\n}\n\nfunc (d *DataTransceiver) Send(message webrtc.DataChannelMessage) <-chan error {\n\terrCh := make(chan error, 1)\n\n\tselect {\n\tcase d.sendMessagesChan <- dataTransceiverMessageSend{\n\t\terrCh: errCh,\n\t\tmessage: message,\n\t}:\n\tcase <-d.torndownChan:\n\t\terrCh <- errors.Trace(io.ErrClosedPipe)\n\t\tclose(errCh)\n\t}\n\n\treturn errCh\n}\n\ntype dataTransceiverMessageSend struct {\n\t\/\/ errCh will have error written to it if it occurrs. It will be closed once\n\t\/\/ the message sending has finished.\n\terrCh chan<- error\n\t\/\/ message to send.\n\tmessage webrtc.DataChannelMessage\n}\n<|endoftext|>"} {"text":"<commit_before>package handlers\n\nimport (\n\t\"fmt\"\n\t\"github.com\/APTrust\/easy-store\/db\/models\"\n\t\"github.com\/APTrust\/easy-store\/util\/testutil\"\n\t\"github.com\/APTrust\/go-form-it\"\n\t\"github.com\/APTrust\/go-form-it\/fields\"\n\t\"github.com\/gorilla\/schema\"\n\t\"github.com\/jinzhu\/gorm\"\n\t_ \"github.com\/jinzhu\/gorm\/dialects\/sqlite\"\n\t_ \"github.com\/mattn\/go-sqlite3\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"strconv\"\n)\n\n\/\/ The following vars are shared among all files in this package.\nvar templates *template.Template\nvar decoder = schema.NewDecoder()\nvar db *gorm.DB\n\nfunc HandleRootRequest(w http.ResponseWriter, r *http.Request) {\n\tdata := make(map[string]interface{})\n\terr := templates.ExecuteTemplate(w, \"index\", data)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\nfunc CompileTemplates(pathToServerRoot string) {\n\ttemplateDir, _ := filepath.Abs(filepath.Join(pathToServerRoot, \"templates\", \"*.html\"))\n\tlog.Println(\"Loading templates:\", templateDir)\n\ttemplates = template.Must(template.ParseGlob(templateDir))\n}\n\n\/\/ TODO: This is also used by the easy_store_setup app.\n\/\/ Put it on one place, and don't rely on testutil.GetPathToSchema()\n\/\/ as that file and directory exist in dev mode only, and users\n\/\/ won't have them.\nfunc InitDBConnection() {\n\tschemaPath, err := testutil.GetPathToSchema()\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tdbFilePath := filepath.Join(filepath.Dir(schemaPath), \"..\", \"..\", \"easy-store.db\")\n\t\/\/ This sets the main global var db.\n\tdb, err = gorm.Open(\"sqlite3\", dbFilePath)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tdb.LogMode(true)\n}\n\nfunc GetOptions(modelName string) map[string][]fields.InputChoice {\n\t\/\/ BagItProfile, StorageService\n\tchoices := make([]fields.InputChoice, 1)\n\tchoices[0] = fields.InputChoice{Id: \"\", Val: \"\"}\n\tif modelName == \"BagItProfile\" {\n\t\tprofiles := make([]models.BagItProfile, 0)\n\t\tdb.Select(\"id, name\").Find(&profiles).Order(\"name\")\n\t\tfor _, profile := range profiles {\n\t\t\tchoices = append(choices, fields.InputChoice{\n\t\t\t\tId: strconv.FormatUint(uint64(profile.ID), 10),\n\t\t\t\tVal: profile.Name})\n\t\t}\n\t} else if modelName == \"StorageService\" {\n\t\tservices := make([]models.StorageService, 0)\n\t\tdb.Select(\"id, name\").Find(&services).Order(\"name\")\n\t\tfor _, service := range services {\n\t\t\tchoices = append(choices, fields.InputChoice{\n\t\t\t\tId: strconv.FormatUint(uint64(service.ID), 10),\n\t\t\t\tVal: service.Name})\n\t\t}\n\t} else if modelName == \"Workflow\" {\n\t\tworkflows := make([]models.Workflow, 0)\n\t\tdb.Select(\"id, name\").Find(&workflows).Order(\"name\")\n\t\tfor _, workflow := range workflows {\n\t\t\tchoices = append(choices, fields.InputChoice{\n\t\t\t\tId: strconv.FormatUint(uint64(workflow.ID), 10),\n\t\t\t\tVal: workflow.Name})\n\t\t}\n\t} else if modelName == \"SerializationFormat\" {\n\t\tchoices = append(choices, fields.InputChoice{Id: \"gzip\", Val: \"gzip\"})\n\t\tchoices = append(choices, fields.InputChoice{Id: \"tar\", Val: \"tar\"})\n\t\tchoices = append(choices, fields.InputChoice{Id: \"zip\", Val: \"zip\"})\n\t} else if modelName == \"Protocol\" {\n\t\tchoices = append(choices, fields.InputChoice{Id: \"ftp\", Val: \"ftp\"})\n\t\tchoices = append(choices, fields.InputChoice{Id: \"rsync\", Val: \"rsync\"})\n\t\tchoices = append(choices, fields.InputChoice{Id: \"s3\", Val: \"s3\"})\n\t\tchoices = append(choices, fields.InputChoice{Id: \"scp\", Val: \"scp\"})\n\t}\n\toptions := make(map[string][]fields.InputChoice)\n\toptions[\"\"] = choices\n\treturn options\n}\n\n\/\/ AddTagValueFields adds tag value form fields for a BagItProfile to\n\/\/ the given form. Set the last param, hideNonEmpty, to true if you want\n\/\/ to hide the fields that have non-empty values. Do this on the job form,\n\/\/ for example, where the user fills in only those tags that don't already\n\/\/ have default values.\nfunc AddTagValueFields(profile models.BagItProfile, form *forms.Form, hideNonEmpty bool) error {\n\tprofileDef, err := profile.Profile()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, relFilePath := range profileDef.SortedTagFilesRequired() {\n\t\thasVisibleFields := false\n\t\tfieldsInSet := make([]fields.FieldInterface, 0)\n\t\tmapOfRequiredTags := profileDef.TagFilesRequired[relFilePath]\n\t\tfor _, tagname := range profileDef.SortedTagNames(relFilePath) {\n\t\t\t\/\/ This tag is a basic part of the BagIt spec and will\n\t\t\t\/\/ always be set by the system, not the user.\n\t\t\tif tagname == \"Payload-Oxum\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttagdef := mapOfRequiredTags[tagname]\n\t\t\tdefaultTags := profile.GetDefaultTagValues(relFilePath, tagname)\n\t\t\tdefaultValue := \"\"\n\t\t\tdefaultTagId := uint(0)\n\t\t\tif len(defaultTags) > 0 {\n\t\t\t\tdefaultValue = defaultTags[0].TagValue\n\t\t\t\tdefaultTagId = defaultTags[0].ID\n\t\t\t}\n\t\t\tfieldName := fmt.Sprintf(\"%s|%s|%d\", relFilePath, tagname, defaultTagId)\n\t\t\tfieldLabel := tagname\n\n\t\t\tformField := fields.TextField(fieldName)\n\t\t\tif len(tagdef.Values) > 0 {\n\t\t\t\toptions := make(map[string][]fields.InputChoice)\n\t\t\t\toptions[\"\"] = make([]fields.InputChoice, len(tagdef.Values)+1)\n\t\t\t\toptions[\"\"][0] = fields.InputChoice{Id: \"\", Val: \"\"}\n\t\t\t\tfor i, val := range tagdef.Values {\n\t\t\t\t\toptions[\"\"][i+1] = fields.InputChoice{Id: val, Val: val}\n\t\t\t\t}\n\t\t\t\tformField = fields.SelectField(fieldName, options)\n\t\t\t}\n\t\t\tformField.SetLabel(fieldLabel)\n\t\t\tformField.SetValue(defaultValue)\n\t\t\tif hideNonEmpty && defaultValue != \"\" {\n\t\t\t\tformField.AddClass(\"hidden\")\n\t\t\t\tformField.AddClass(\"show-hide\")\n\t\t\t} else {\n\t\t\t\thasVisibleFields = true\n\t\t\t}\n\t\t\tfieldsInSet = append(fieldsInSet, formField)\n\t\t}\n\t\tlegend := fmt.Sprintf(\"Default values for %s\", relFilePath)\n\t\tfieldSet := forms.FieldSet(relFilePath, legend, fieldsInSet...)\n\t\tform.Elements(fieldSet)\n\t\tif !hasVisibleFields {\n\t\t\tfieldSet.AddClass(\"hidden\")\n\t\t\tfieldSet.AddClass(\"show-hide\")\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Don't let user set bag size. System will do it.<commit_after>package handlers\n\nimport (\n\t\"fmt\"\n\t\"github.com\/APTrust\/easy-store\/db\/models\"\n\t\"github.com\/APTrust\/easy-store\/util\/testutil\"\n\t\"github.com\/APTrust\/go-form-it\"\n\t\"github.com\/APTrust\/go-form-it\/fields\"\n\t\"github.com\/gorilla\/schema\"\n\t\"github.com\/jinzhu\/gorm\"\n\t_ \"github.com\/jinzhu\/gorm\/dialects\/sqlite\"\n\t_ \"github.com\/mattn\/go-sqlite3\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"strconv\"\n)\n\n\/\/ The following vars are shared among all files in this package.\nvar templates *template.Template\nvar decoder = schema.NewDecoder()\nvar db *gorm.DB\n\nfunc HandleRootRequest(w http.ResponseWriter, r *http.Request) {\n\tdata := make(map[string]interface{})\n\terr := templates.ExecuteTemplate(w, \"index\", data)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\nfunc CompileTemplates(pathToServerRoot string) {\n\ttemplateDir, _ := filepath.Abs(filepath.Join(pathToServerRoot, \"templates\", \"*.html\"))\n\tlog.Println(\"Loading templates:\", templateDir)\n\ttemplates = template.Must(template.ParseGlob(templateDir))\n}\n\n\/\/ TODO: This is also used by the easy_store_setup app.\n\/\/ Put it on one place, and don't rely on testutil.GetPathToSchema()\n\/\/ as that file and directory exist in dev mode only, and users\n\/\/ won't have them.\nfunc InitDBConnection() {\n\tschemaPath, err := testutil.GetPathToSchema()\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tdbFilePath := filepath.Join(filepath.Dir(schemaPath), \"..\", \"..\", \"easy-store.db\")\n\t\/\/ This sets the main global var db.\n\tdb, err = gorm.Open(\"sqlite3\", dbFilePath)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\tdb.LogMode(true)\n}\n\nfunc GetOptions(modelName string) map[string][]fields.InputChoice {\n\t\/\/ BagItProfile, StorageService\n\tchoices := make([]fields.InputChoice, 1)\n\tchoices[0] = fields.InputChoice{Id: \"\", Val: \"\"}\n\tif modelName == \"BagItProfile\" {\n\t\tprofiles := make([]models.BagItProfile, 0)\n\t\tdb.Select(\"id, name\").Find(&profiles).Order(\"name\")\n\t\tfor _, profile := range profiles {\n\t\t\tchoices = append(choices, fields.InputChoice{\n\t\t\t\tId: strconv.FormatUint(uint64(profile.ID), 10),\n\t\t\t\tVal: profile.Name})\n\t\t}\n\t} else if modelName == \"StorageService\" {\n\t\tservices := make([]models.StorageService, 0)\n\t\tdb.Select(\"id, name\").Find(&services).Order(\"name\")\n\t\tfor _, service := range services {\n\t\t\tchoices = append(choices, fields.InputChoice{\n\t\t\t\tId: strconv.FormatUint(uint64(service.ID), 10),\n\t\t\t\tVal: service.Name})\n\t\t}\n\t} else if modelName == \"Workflow\" {\n\t\tworkflows := make([]models.Workflow, 0)\n\t\tdb.Select(\"id, name\").Find(&workflows).Order(\"name\")\n\t\tfor _, workflow := range workflows {\n\t\t\tchoices = append(choices, fields.InputChoice{\n\t\t\t\tId: strconv.FormatUint(uint64(workflow.ID), 10),\n\t\t\t\tVal: workflow.Name})\n\t\t}\n\t} else if modelName == \"SerializationFormat\" {\n\t\tchoices = append(choices, fields.InputChoice{Id: \"gzip\", Val: \"gzip\"})\n\t\tchoices = append(choices, fields.InputChoice{Id: \"tar\", Val: \"tar\"})\n\t\tchoices = append(choices, fields.InputChoice{Id: \"zip\", Val: \"zip\"})\n\t} else if modelName == \"Protocol\" {\n\t\tchoices = append(choices, fields.InputChoice{Id: \"ftp\", Val: \"ftp\"})\n\t\tchoices = append(choices, fields.InputChoice{Id: \"rsync\", Val: \"rsync\"})\n\t\tchoices = append(choices, fields.InputChoice{Id: \"s3\", Val: \"s3\"})\n\t\tchoices = append(choices, fields.InputChoice{Id: \"scp\", Val: \"scp\"})\n\t}\n\toptions := make(map[string][]fields.InputChoice)\n\toptions[\"\"] = choices\n\treturn options\n}\n\n\/\/ AddTagValueFields adds tag value form fields for a BagItProfile to\n\/\/ the given form. Set the last param, hideNonEmpty, to true if you want\n\/\/ to hide the fields that have non-empty values. Do this on the job form,\n\/\/ for example, where the user fills in only those tags that don't already\n\/\/ have default values.\nfunc AddTagValueFields(profile models.BagItProfile, form *forms.Form, hideNonEmpty bool) error {\n\tprofileDef, err := profile.Profile()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, relFilePath := range profileDef.SortedTagFilesRequired() {\n\t\thasVisibleFields := false\n\t\tfieldsInSet := make([]fields.FieldInterface, 0)\n\t\tmapOfRequiredTags := profileDef.TagFilesRequired[relFilePath]\n\t\tfor _, tagname := range profileDef.SortedTagNames(relFilePath) {\n\t\t\t\/\/ Payload-Oxum is a basic part of the BagIt spec and will\n\t\t\t\/\/ always be set by the system, not the user. Bag-Size is\n\t\t\t\/\/ a DPN tag that should also be set by the system.\n\t\t\tif tagname == \"Payload-Oxum\" || tagname == \"Bag-Size\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttagdef := mapOfRequiredTags[tagname]\n\t\t\tdefaultTags := profile.GetDefaultTagValues(relFilePath, tagname)\n\t\t\tdefaultValue := \"\"\n\t\t\tdefaultTagId := uint(0)\n\t\t\tif len(defaultTags) > 0 {\n\t\t\t\tdefaultValue = defaultTags[0].TagValue\n\t\t\t\tdefaultTagId = defaultTags[0].ID\n\t\t\t}\n\t\t\tfieldName := fmt.Sprintf(\"%s|%s|%d\", relFilePath, tagname, defaultTagId)\n\t\t\tfieldLabel := tagname\n\n\t\t\tformField := fields.TextField(fieldName)\n\t\t\tif len(tagdef.Values) > 0 {\n\t\t\t\toptions := make(map[string][]fields.InputChoice)\n\t\t\t\toptions[\"\"] = make([]fields.InputChoice, len(tagdef.Values)+1)\n\t\t\t\toptions[\"\"][0] = fields.InputChoice{Id: \"\", Val: \"\"}\n\t\t\t\tfor i, val := range tagdef.Values {\n\t\t\t\t\toptions[\"\"][i+1] = fields.InputChoice{Id: val, Val: val}\n\t\t\t\t}\n\t\t\t\tformField = fields.SelectField(fieldName, options)\n\t\t\t}\n\t\t\tformField.SetLabel(fieldLabel)\n\t\t\tformField.SetValue(defaultValue)\n\t\t\tif hideNonEmpty && defaultValue != \"\" {\n\t\t\t\tformField.AddClass(\"hidden\")\n\t\t\t\tformField.AddClass(\"show-hide\")\n\t\t\t} else {\n\t\t\t\thasVisibleFields = true\n\t\t\t}\n\t\t\tfieldsInSet = append(fieldsInSet, formField)\n\t\t}\n\t\tlegend := fmt.Sprintf(\"Default values for %s\", relFilePath)\n\t\tfieldSet := forms.FieldSet(relFilePath, legend, fieldsInSet...)\n\t\tform.Elements(fieldSet)\n\t\tif !hasVisibleFields {\n\t\t\tfieldSet.AddClass(\"hidden\")\n\t\t\tfieldSet.AddClass(\"show-hide\")\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package snakes\n\nimport (\n \"math\/big\"\n)\n\n\nfunc MontgomeryLadderExp(a, b, c *big.Int) *big.Int {\n zero := big.NewInt(0)\n if a.Cmp(zero) == 0 && b.Cmp(zero) == 0 {\n return big.NewInt(1)\n }\n a1 := a\n a2 := new(big.Int).Mul(a, a)\n for pos := b.BitLen() - 1; pos >= 0; pos -- {\n if b.Bit(pos) == 0 {\n a2 = new(big.Int).Mul(a2, a1)\n a1 = new(big.Int).Mul(a1, a1)\n } else {\n a1 = new(big.Int).Mul(a1, a2)\n a2 = new(big.Int).Mul(a2, a2)\n }\n\n a1 = new(big.Int).Mod(a1, c)\n a2 = new(big.Int).Mod(a2, c)\n }\n return a1\n}\n<commit_msg>Document<commit_after>package snakes\n\nimport (\n \"math\/big\"\n)\n\n\/\/ Compute a ** b % c, just like big.Int.Exp, but do it in a way that is more\n\/\/ constant time. This is useful for crypto stuff.\nfunc MontgomeryLadderExp(a, b, c *big.Int) *big.Int {\n zero := big.NewInt(0)\n if a.Cmp(zero) == 0 && b.Cmp(zero) == 0 {\n return big.NewInt(1)\n }\n a1 := a\n a2 := new(big.Int).Mul(a, a)\n for pos := b.BitLen() - 1; pos >= 0; pos -- {\n if b.Bit(pos) == 0 {\n a2 = new(big.Int).Mul(a2, a1)\n a1 = new(big.Int).Mul(a1, a1)\n } else {\n a1 = new(big.Int).Mul(a1, a2)\n a2 = new(big.Int).Mul(a2, a2)\n }\n\n a1 = new(big.Int).Mod(a1, c)\n a2 = new(big.Int).Mod(a2, c)\n }\n return a1\n}\n<|endoftext|>"} {"text":"<commit_before>package server\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\tqt \"github.com\/frankban\/quicktest\"\n\t\"github.com\/google\/go-cmp\/cmp\"\n\t\"github.com\/google\/uuid\"\n\t\"github.com\/rs\/zerolog\"\n\t\"golang.org\/x\/oauth2\"\n\n\t\"github.com\/gilcrest\/go-api-basic\/domain\/app\"\n\t\"github.com\/gilcrest\/go-api-basic\/domain\/auth\"\n\t\"github.com\/gilcrest\/go-api-basic\/domain\/errs\"\n\t\"github.com\/gilcrest\/go-api-basic\/domain\/logger\"\n\t\"github.com\/gilcrest\/go-api-basic\/domain\/org\"\n)\n\ntype mockFindAppService struct{}\n\nfunc (m mockFindAppService) FindAppByAPIKey(ctx context.Context, realm string, appExtlID string, apiKey string) (app.App, error) {\n\treturn app.App{\n\t\tID: uuid.UUID{},\n\t\tExternalID: []byte(\"so random\"),\n\t\tOrg: org.Org{},\n\t\tName: \"\",\n\t\tDescription: \"\",\n\t\tCreateAppID: uuid.UUID{},\n\t\tCreateUserID: uuid.UUID{},\n\t\tCreateTime: time.Time{},\n\t\tUpdateAppID: uuid.UUID{},\n\t\tUpdateUserID: uuid.UUID{},\n\t\tUpdateTime: time.Time{},\n\t\tAPIKeys: nil,\n\t}, nil\n}\n\nfunc TestJSONContentTypeResponseHandler(t *testing.T) {\n\n\ts := Server{}\n\n\treq, err := http.NewRequest(\"GET\", \"\/ping\", nil)\n\tif err != nil {\n\t\tt.Fatalf(\"http.NewRequest() error = %v\", err)\n\t}\n\n\ttestJSONContentTypeResponseHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tcontentType := w.Header().Get(\"Content-Type\")\n\t\tif contentType != \"application\/json\" {\n\t\t\tt.Fatalf(\"Content-Type %s is invalid\", contentType)\n\t\t}\n\t})\n\n\trr := httptest.NewRecorder()\n\n\thandlers := s.jsonContentTypeResponseHandler(testJSONContentTypeResponseHandler)\n\thandlers.ServeHTTP(rr, req)\n}\n\n\/\/ TODO - add typical - with database test to actually query db. Requires quite a bit of data setup, but is appropriate and will get to this.\nfunc TestServer_appHandler(t *testing.T) {\n\tt.Run(\"typical - mock database\", func(t *testing.T) {\n\t\tc := qt.New(t)\n\n\t\treq, err := http.NewRequest(\"GET\", \"\/ping\", nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"http.NewRequest() error = %v\", err)\n\t\t}\n\t\treq.Header.Add(appIDHeaderKey, \"test_app_extl_id\")\n\t\treq.Header.Add(apiKeyHeaderKey, \"test_app_api_key\")\n\n\t\ttestAppHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\ta, err := app.FromRequest(r)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"app.FromRequest() error\", err)\n\t\t\t}\n\t\t\twantApp := app.App{\n\t\t\t\tID: uuid.UUID{},\n\t\t\t\tExternalID: []byte(\"so random\"),\n\t\t\t\tOrg: org.Org{},\n\t\t\t\tName: \"\",\n\t\t\t\tDescription: \"\",\n\t\t\t\tCreateAppID: uuid.UUID{},\n\t\t\t\tCreateUserID: uuid.UUID{},\n\t\t\t\tCreateTime: time.Time{},\n\t\t\t\tUpdateAppID: uuid.UUID{},\n\t\t\t\tUpdateUserID: uuid.UUID{},\n\t\t\t\tUpdateTime: time.Time{},\n\t\t\t\tAPIKeys: nil,\n\t\t\t}\n\t\t\tc.Assert(a, qt.DeepEquals, wantApp)\n\t\t})\n\n\t\trr := httptest.NewRecorder()\n\n\t\tlgr := logger.NewLogger(os.Stdout, zerolog.DebugLevel, true)\n\n\t\ts := New(NewMuxRouter(), NewDriver(), lgr)\n\t\ts.FindAppService = mockFindAppService{}\n\n\t\thandlers := s.appHandler(testAppHandler)\n\t\thandlers.ServeHTTP(rr, req)\n\n\t\t\/\/ If there is any issues with the Access Token, the body\n\t\t\/\/ should be empty and the status code should be 401\n\t\tc.Assert(rr.Code, qt.Equals, http.StatusOK)\n\t})\n}\n\nfunc TestXHeader(t *testing.T) {\n\tt.Run(\"x-app-id\", func(t *testing.T) {\n\t\tc := qt.New(t)\n\t\thdr := http.Header{}\n\t\thdr.Add(appIDHeaderKey, \"appologies\")\n\n\t\tappID, err := xHeader(defaultRealm, hdr, appIDHeaderKey)\n\t\tc.Assert(err, qt.IsNil)\n\t\tc.Assert(appID, qt.Equals, \"appologies\")\n\t})\n\tt.Run(\"no header error\", func(t *testing.T) {\n\t\tc := qt.New(t)\n\t\thdr := http.Header{}\n\n\t\t_, err := xHeader(defaultRealm, hdr, appIDHeaderKey)\n\t\tc.Assert(err, qt.CmpEquals(cmp.Comparer(errs.Match)), errs.E(errs.Unauthenticated, errs.Realm(defaultRealm), fmt.Sprintf(\"unauthenticated: no %s header sent\", appIDHeaderKey)))\n\t})\n\tt.Run(\"too many values error\", func(t *testing.T) {\n\t\tc := qt.New(t)\n\t\thdr := http.Header{}\n\t\thdr.Add(appIDHeaderKey, \"value1\")\n\t\thdr.Add(appIDHeaderKey, \"value2\")\n\n\t\t_, err := xHeader(defaultRealm, hdr, appIDHeaderKey)\n\t\tc.Assert(err, qt.CmpEquals(cmp.Comparer(errs.Match)), errs.E(errs.Unauthenticated, errs.Realm(defaultRealm), fmt.Sprintf(\"%s header value > 1\", appIDHeaderKey)))\n\t})\n\tt.Run(\"empty value error\", func(t *testing.T) {\n\t\tc := qt.New(t)\n\t\thdr := http.Header{}\n\t\thdr.Add(appIDHeaderKey, \"\")\n\n\t\t_, err := xHeader(defaultRealm, hdr, appIDHeaderKey)\n\t\tc.Assert(err, qt.CmpEquals(cmp.Comparer(errs.Match)), errs.E(errs.Unauthenticated, errs.Realm(defaultRealm), fmt.Sprintf(\"unauthenticated: %s header value not found\", appIDHeaderKey)))\n\t})\n}\n\nfunc Test_authHeader(t *testing.T) {\n\tc := qt.New(t)\n\n\tconst reqHeader string = \"Authorization\"\n\n\ttype args struct {\n\t\trealm string\n\t\theader http.Header\n\t}\n\n\thdr := http.Header{}\n\thdr.Add(reqHeader, \"Bearer foobarbbq\")\n\n\temptyHdr := http.Header{}\n\temptyHdrErr := errs.E(errs.Unauthenticated, errs.Realm(defaultRealm), \"unauthenticated: no Authorization header sent\")\n\n\ttooManyValues := http.Header{}\n\ttooManyValues.Add(reqHeader, \"value1\")\n\ttooManyValues.Add(reqHeader, \"value2\")\n\ttooManyValuesErr := errs.E(errs.Unauthenticated, errs.Realm(defaultRealm), \"header value > 1\")\n\n\tnoBearer := http.Header{}\n\tnoBearer.Add(reqHeader, \"xyz\")\n\tnoBearerErr := errs.E(errs.Unauthenticated, errs.Realm(defaultRealm), \"unauthenticated: Bearer authentication scheme not found\")\n\n\thdrSpacesBearer := http.Header{}\n\thdrSpacesBearer.Add(\"Authorization\", \"Bearer \")\n\tspacesHdrErr := errs.E(errs.Unauthenticated, errs.Realm(defaultRealm), \"unauthenticated: Authorization header sent with Bearer scheme, but no token found\")\n\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twantToken oauth2.Token\n\t\twantErr error\n\t}{\n\t\t{\"typical\", args{realm: defaultRealm, header: hdr}, oauth2.Token{AccessToken: \"foobarbbq\", TokenType: auth.BearerTokenType}, nil},\n\t\t{\"no authorization header error\", args{realm: defaultRealm, header: emptyHdr}, oauth2.Token{}, emptyHdrErr},\n\t\t{\"too many values error\", args{realm: defaultRealm, header: tooManyValues}, oauth2.Token{}, tooManyValuesErr},\n\t\t{\"no bearer scheme error\", args{realm: defaultRealm, header: noBearer}, oauth2.Token{}, noBearerErr},\n\t\t{\"spaces as token error\", args{realm: defaultRealm, header: hdrSpacesBearer}, oauth2.Token{}, spacesHdrErr},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgotToken, err := authHeader(tt.args.realm, tt.args.header)\n\t\t\tif (err != nil) && (tt.wantErr == nil) {\n\t\t\t\tt.Errorf(\"authHeader() error = %v, nil expected\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tc.Assert(err, qt.CmpEquals(cmp.Comparer(errs.Match)), tt.wantErr)\n\t\t\tc.Assert(gotToken, qt.Equals, tt.wantToken)\n\t\t})\n\t}\n}\n<commit_msg>updated tests<commit_after>package server\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/http\/httptest\"\n\t\"os\"\n\t\"testing\"\n\n\tqt \"github.com\/frankban\/quicktest\"\n\t\"github.com\/google\/go-cmp\/cmp\"\n\t\"github.com\/google\/uuid\"\n\t\"github.com\/rs\/zerolog\"\n\t\"golang.org\/x\/oauth2\"\n\n\t\"github.com\/gilcrest\/go-api-basic\/domain\/app\"\n\t\"github.com\/gilcrest\/go-api-basic\/domain\/auth\"\n\t\"github.com\/gilcrest\/go-api-basic\/domain\/errs\"\n\t\"github.com\/gilcrest\/go-api-basic\/domain\/logger\"\n\t\"github.com\/gilcrest\/go-api-basic\/domain\/org\"\n)\n\ntype mockFindAppService struct{}\n\nfunc (m mockFindAppService) FindAppByAPIKey(ctx context.Context, realm string, appExtlID string, apiKey string) (app.App, error) {\n\treturn app.App{\n\t\tID: uuid.UUID{},\n\t\tExternalID: []byte(\"so random\"),\n\t\tOrg: org.Org{},\n\t\tName: \"\",\n\t\tDescription: \"\",\n\t\tAPIKeys: nil,\n\t}, nil\n}\n\nfunc TestJSONContentTypeResponseHandler(t *testing.T) {\n\n\ts := Server{}\n\n\treq, err := http.NewRequest(\"GET\", \"\/ping\", nil)\n\tif err != nil {\n\t\tt.Fatalf(\"http.NewRequest() error = %v\", err)\n\t}\n\n\ttestJSONContentTypeResponseHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tcontentType := w.Header().Get(\"Content-Type\")\n\t\tif contentType != \"application\/json\" {\n\t\t\tt.Fatalf(\"Content-Type %s is invalid\", contentType)\n\t\t}\n\t})\n\n\trr := httptest.NewRecorder()\n\n\thandlers := s.jsonContentTypeResponseHandler(testJSONContentTypeResponseHandler)\n\thandlers.ServeHTTP(rr, req)\n}\n\n\/\/ TODO - add typical - with database test to actually query db. Requires quite a bit of data setup, but is appropriate and will get to this.\nfunc TestServer_appHandler(t *testing.T) {\n\tt.Run(\"typical - mock database\", func(t *testing.T) {\n\t\tc := qt.New(t)\n\n\t\treq, err := http.NewRequest(\"GET\", \"\/ping\", nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"http.NewRequest() error = %v\", err)\n\t\t}\n\t\treq.Header.Add(appIDHeaderKey, \"test_app_extl_id\")\n\t\treq.Header.Add(apiKeyHeaderKey, \"test_app_api_key\")\n\n\t\ttestAppHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\ta, err := app.FromRequest(r)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(\"app.FromRequest() error\", err)\n\t\t\t}\n\t\t\twantApp := app.App{\n\t\t\t\tID: uuid.UUID{},\n\t\t\t\tExternalID: []byte(\"so random\"),\n\t\t\t\tOrg: org.Org{},\n\t\t\t\tName: \"\",\n\t\t\t\tDescription: \"\",\n\t\t\t\tAPIKeys: nil,\n\t\t\t}\n\t\t\tc.Assert(a, qt.DeepEquals, wantApp)\n\t\t})\n\n\t\trr := httptest.NewRecorder()\n\n\t\tlgr := logger.NewLogger(os.Stdout, zerolog.DebugLevel, true)\n\n\t\ts := New(NewMuxRouter(), NewDriver(), lgr)\n\t\ts.FindAppService = mockFindAppService{}\n\n\t\thandlers := s.appHandler(testAppHandler)\n\t\thandlers.ServeHTTP(rr, req)\n\n\t\t\/\/ If there is any issues with the Access Token, the body\n\t\t\/\/ should be empty and the status code should be 401\n\t\tc.Assert(rr.Code, qt.Equals, http.StatusOK)\n\t})\n}\n\nfunc TestXHeader(t *testing.T) {\n\tt.Run(\"x-app-id\", func(t *testing.T) {\n\t\tc := qt.New(t)\n\t\thdr := http.Header{}\n\t\thdr.Add(appIDHeaderKey, \"appologies\")\n\n\t\tappID, err := xHeader(defaultRealm, hdr, appIDHeaderKey)\n\t\tc.Assert(err, qt.IsNil)\n\t\tc.Assert(appID, qt.Equals, \"appologies\")\n\t})\n\tt.Run(\"no header error\", func(t *testing.T) {\n\t\tc := qt.New(t)\n\t\thdr := http.Header{}\n\n\t\t_, err := xHeader(defaultRealm, hdr, appIDHeaderKey)\n\t\tc.Assert(err, qt.CmpEquals(cmp.Comparer(errs.Match)), errs.E(errs.Unauthenticated, errs.Realm(defaultRealm), fmt.Sprintf(\"unauthenticated: no %s header sent\", appIDHeaderKey)))\n\t})\n\tt.Run(\"too many values error\", func(t *testing.T) {\n\t\tc := qt.New(t)\n\t\thdr := http.Header{}\n\t\thdr.Add(appIDHeaderKey, \"value1\")\n\t\thdr.Add(appIDHeaderKey, \"value2\")\n\n\t\t_, err := xHeader(defaultRealm, hdr, appIDHeaderKey)\n\t\tc.Assert(err, qt.CmpEquals(cmp.Comparer(errs.Match)), errs.E(errs.Unauthenticated, errs.Realm(defaultRealm), fmt.Sprintf(\"%s header value > 1\", appIDHeaderKey)))\n\t})\n\tt.Run(\"empty value error\", func(t *testing.T) {\n\t\tc := qt.New(t)\n\t\thdr := http.Header{}\n\t\thdr.Add(appIDHeaderKey, \"\")\n\n\t\t_, err := xHeader(defaultRealm, hdr, appIDHeaderKey)\n\t\tc.Assert(err, qt.CmpEquals(cmp.Comparer(errs.Match)), errs.E(errs.Unauthenticated, errs.Realm(defaultRealm), fmt.Sprintf(\"unauthenticated: %s header value not found\", appIDHeaderKey)))\n\t})\n}\n\nfunc Test_authHeader(t *testing.T) {\n\tc := qt.New(t)\n\n\tconst reqHeader string = \"Authorization\"\n\n\ttype args struct {\n\t\trealm string\n\t\theader http.Header\n\t}\n\n\thdr := http.Header{}\n\thdr.Add(reqHeader, \"Bearer foobarbbq\")\n\n\temptyHdr := http.Header{}\n\temptyHdrErr := errs.E(errs.Unauthenticated, errs.Realm(defaultRealm), \"unauthenticated: no Authorization header sent\")\n\n\ttooManyValues := http.Header{}\n\ttooManyValues.Add(reqHeader, \"value1\")\n\ttooManyValues.Add(reqHeader, \"value2\")\n\ttooManyValuesErr := errs.E(errs.Unauthenticated, errs.Realm(defaultRealm), \"header value > 1\")\n\n\tnoBearer := http.Header{}\n\tnoBearer.Add(reqHeader, \"xyz\")\n\tnoBearerErr := errs.E(errs.Unauthenticated, errs.Realm(defaultRealm), \"unauthenticated: Bearer authentication scheme not found\")\n\n\thdrSpacesBearer := http.Header{}\n\thdrSpacesBearer.Add(\"Authorization\", \"Bearer \")\n\tspacesHdrErr := errs.E(errs.Unauthenticated, errs.Realm(defaultRealm), \"unauthenticated: Authorization header sent with Bearer scheme, but no token found\")\n\n\ttests := []struct {\n\t\tname string\n\t\targs args\n\t\twantToken oauth2.Token\n\t\twantErr error\n\t}{\n\t\t{\"typical\", args{realm: defaultRealm, header: hdr}, oauth2.Token{AccessToken: \"foobarbbq\", TokenType: auth.BearerTokenType}, nil},\n\t\t{\"no authorization header error\", args{realm: defaultRealm, header: emptyHdr}, oauth2.Token{}, emptyHdrErr},\n\t\t{\"too many values error\", args{realm: defaultRealm, header: tooManyValues}, oauth2.Token{}, tooManyValuesErr},\n\t\t{\"no bearer scheme error\", args{realm: defaultRealm, header: noBearer}, oauth2.Token{}, noBearerErr},\n\t\t{\"spaces as token error\", args{realm: defaultRealm, header: hdrSpacesBearer}, oauth2.Token{}, spacesHdrErr},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgotToken, err := authHeader(tt.args.realm, tt.args.header)\n\t\t\tif (err != nil) && (tt.wantErr == nil) {\n\t\t\t\tt.Errorf(\"authHeader() error = %v, nil expected\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tc.Assert(err, qt.CmpEquals(cmp.Comparer(errs.Match)), tt.wantErr)\n\t\t\tc.Assert(gotToken, qt.Equals, tt.wantToken)\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package ops\n\nimport (\n\t\"errors\"\n\t\"github.com\/phzfi\/RIC\/server\/images\"\n\t\"github.com\/phzfi\/RIC\/server\/logging\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n)\n\ntype dim [2]int\ntype idToSize map[string]dim\n\ntype ImageSource struct {\n\troots []string\n\tsizes idToSize\n\tmutex *sync.RWMutex\n}\n\nfunc MakeImageSource() ImageSource {\n\treturn ImageSource{\n\t\tsizes: make(idToSize),\n\t\tmutex: new(sync.RWMutex),\n\t}\n}\n\nfunc (i ImageSource) LoadImageOp(id string) Operation {\n\treturn loadImageOp{&i, id}\n}\n\n\/\/ Search root for an image. Returned image should be destroyed by image.Destroy, image.Resized or image.ToBlob or other.\nfunc (i ImageSource) searchRoots(filename string, img images.Image) (err error) {\n\tif len(i.roots) == 0 {\n\t\tlogging.Debug(\"No roots\")\n\t\terr = os.ErrNotExist\n\t\treturn\n\t}\n\t\/\/ Extract requested type\/extension and id from filename\n\text := strings.TrimLeft(filepath.Ext(filename), \".\")\n\tid := strings.TrimRight(filename[0:len(filename)-len(ext)], \".\")\n\t\/\/ Search requested image from all roots by trial and error\n\tfor _, root := range i.roots {\n\t\t\/\/ TODO: Fix escape vulnerability (sanitize filename from at least \"..\" etc)\n\t\t\/\/ Assume image is stored as .jpg -> change extension to .jpg\n\t\ttrial := filepath.Join(root, id) + \".jpg\"\n\t\terr = img.FromFile(trial)\n\t\tif err == nil {\n\t\t\tlogging.Debug(\"Found: \" + trial)\n\t\t\tbreak\n\t\t}\n\t\tlogging.Debug(\"Not found: \" + trial)\n\t}\n\treturn\n}\n\n\/\/ TODO: This is a temp solution for ImageSize creating too many Images.\n\/\/ Limit to creating only one at time for finding the image size\n\nfunc (i ImageSource) ImageSize(fn string) (w int, h int, err error) {\n\ti.mutex.RLock()\n\ts, ok := i.sizes[fn]\n\ti.mutex.RUnlock()\n\n\tif ok {\n\t\treturn s[0], s[1], nil\n\t}\n\n\timage := images.NewImage()\n\tdefer image.Destroy()\n\n\terr = i.searchRoots(fn, image)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tw = image.GetWidth()\n\th = image.GetHeight()\n\n\ti.mutex.Lock()\n\ti.sizes[fn] = dim{w, h}\n\ti.mutex.Unlock()\n\n\treturn\n}\n\n\/\/ A very trivial (and inefficient way to handle roots)\n\/\/ Can be used for development work, however.\nfunc (i *ImageSource) AddRoot(root string) error {\n\tabspath, err := filepath.Abs(root)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogging.Debug(\"Adding root: \" + root + \" -> \" + abspath)\n\tfor _, path := range i.roots {\n\t\tif path == abspath {\n\t\t\treturn errors.New(\"Root is already served\")\n\t\t}\n\t}\n\n\ti.roots = append(i.roots, abspath)\n\treturn nil\n}\n\n\/\/ A very trivial (and inefficient way to handle roots)\n\/\/ Can be used for development work, however.\nfunc (is *ImageSource) RemoveRoot(root string) error {\n\tabspath, err := filepath.Abs(root)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i, path := range is.roots {\n\t\tif path == abspath {\n\t\t\tis.roots = append(is.roots[:i], is.roots[i+1:]...)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn errors.New(\"Root not found\")\n}\n<commit_msg>ImageSize no longer reads the whole file<commit_after>package ops\n\nimport (\n\t\"errors\"\n\t\"github.com\/phzfi\/RIC\/server\/images\"\n\t\"github.com\/phzfi\/RIC\/server\/logging\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n)\n\ntype dim [2]int\ntype idToSize map[string]dim\n\ntype ImageSource struct {\n\troots []string\n\tsizes idToSize\n\tmutex *sync.RWMutex\n}\n\nfunc MakeImageSource() ImageSource {\n\treturn ImageSource{\n\t\tsizes: make(idToSize),\n\t\tmutex: new(sync.RWMutex),\n\t}\n}\n\nfunc (i ImageSource) LoadImageOp(id string) Operation {\n\treturn loadImageOp{&i, id}\n}\n\n\/\/ Searches root for an image. If found loads the image to img. Otherwise does nothing and returns an error.\nfunc (i ImageSource) searchRoots(fn string, img images.Image) (err error) {\n\treturn i.searchRootsCustomTrialFunc(fn, img.FromFile)\n}\n\n\n\/\/ Searches root for an image. Calls the given trialFunc with the given fn for every root until trialFunc does not return an error. Returns if trialFunc succeeds. returns with error if no trialFunc succeeds.\nfunc (i ImageSource) searchRootsCustomTrialFunc(fn string, trialFunc func (fn string) (err error)) (err error) {\n\tif len(i.roots) == 0 {\n\t\tlogging.Debug(\"No roots\")\n\t\terr = os.ErrNotExist\n\t\treturn\n\t}\n\t\/\/ Extract requested type\/extension and id from filename\n\text := strings.TrimLeft(filepath.Ext(fn), \".\")\n\tid := strings.TrimRight(fn[0:len(fn)-len(ext)], \".\")\n\t\/\/ Search requested image from all roots by trial and error\n\tfor _, root := range i.roots {\n\t\t\/\/ TODO: Fix escape vulnerability (sanitize filename from at least \"..\" etc)\n\t\t\/\/ Assume image is stored as .jpg -> change extension to .jpg\n\t\ttrial := filepath.Join(root, id) + \".jpg\"\n\t\terr = trialFunc(trial)\n\t\tif err == nil {\n\t\t\tlogging.Debug(\"Found: \" + trial)\n\t\t\tbreak\n\t\t}\n\t\tlogging.Debug(\"Not found: \" + trial)\n\t}\n\treturn\n}\n\n\/\/ Get image size\nfunc (i ImageSource) ImageSize(fn string) (w int, h int, err error) {\n\ti.mutex.RLock()\n\ts, ok := i.sizes[fn]\n\ti.mutex.RUnlock()\n\n\tif ok {\n\t\treturn s[0], s[1], nil\n\t}\n\n\timage := images.NewImage()\n\tdefer image.Destroy()\n\n\terr = i.pingRoots(fn, image)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tw = image.GetWidth()\n\th = image.GetHeight()\n\n\ti.mutex.Lock()\n\ti.sizes[fn] = dim{w, h}\n\ti.mutex.Unlock()\n\n\treturn\n}\n\n\/\/ Searches root for an image. If found, loads only the image metadata to img. Otherwise does nothing and returns an error.\nfunc (i ImageSource) pingRoots(fn string, img images.Image) (err error) {\n\treturn i.searchRootsCustomTrialFunc(fn, img.PingImage)\n}\n\n\/\/ A very trivial (and inefficient way to handle roots)\n\/\/ Can be used for development work, however.\nfunc (i *ImageSource) AddRoot(root string) error {\n\tabspath, err := filepath.Abs(root)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogging.Debug(\"Adding root: \" + root + \" -> \" + abspath)\n\tfor _, path := range i.roots {\n\t\tif path == abspath {\n\t\t\treturn errors.New(\"Root is already served\")\n\t\t}\n\t}\n\n\ti.roots = append(i.roots, abspath)\n\treturn nil\n}\n\n\/\/ A very trivial (and inefficient way to handle roots)\n\/\/ Can be used for development work, however.\nfunc (is *ImageSource) RemoveRoot(root string) error {\n\tabspath, err := filepath.Abs(root)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i, path := range is.roots {\n\t\tif path == abspath {\n\t\t\tis.roots = append(is.roots[:i], is.roots[i+1:]...)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn errors.New(\"Root not found\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage http\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/google\/cloudprober\/logger\"\n\t\"github.com\/google\/cloudprober\/metrics\"\n\t\"github.com\/google\/cloudprober\/targets\/lameduck\"\n\t\"google3\/go\/context\/context\"\n)\n\nconst testExportInterval = 2 * time.Second\n\ntype fakeLameduckLister struct {\n\tlameducked []string\n\terr error\n}\n\nfunc (f *fakeLameduckLister) List() ([]string, error) {\n\treturn f.lameducked, f.err\n}\n\nfunc TestMain(m *testing.M) {\n\tos.Exit(m.Run())\n}\n\nfunc testServer(ctx context.Context, t *testing.T, insName string, ldLister lameduck.Lister) (*Server, chan *metrics.EventMetrics) {\n\tln, err := net.Listen(\"tcp\", \":0\")\n\tif err != nil {\n\t\tt.Fatalf(\"Listen error: %v.\", err)\n\t}\n\n\tdataChan := make(chan *metrics.EventMetrics, 10)\n\ts := &Server{\n\t\tl: &logger.Logger{},\n\t\tln: ln,\n\t\tstatsInterval: 2 * time.Second,\n\t\tinstanceName: insName,\n\t\tldLister: ldLister,\n\t\tstaticURLResTable: map[string]string{\n\t\t\t\"\/\": OK,\n\t\t\t\"\/instance\": insName,\n\t\t},\n\t}\n\n\tgo func() {\n\t\ts.Start(ctx, dataChan)\n\t}()\n\n\treturn s, dataChan\n}\n\n\/\/ get preforms HTTP GET request and return the response body and status\nfunc get(t *testing.T, ln net.Listener, path string) (string, string) {\n\tt.Helper()\n\tresp, err := http.Get(fmt.Sprintf(\"http:\/\/%s\/%s\", listenerAddr(ln), path))\n\tif err != nil {\n\t\tt.Errorf(\"HTTP server returned an error for the URL '\/%s'. Err: %v\", path, err)\n\t\treturn \"\", \"\"\n\t}\n\tstatus := resp.Status\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Errorf(\"Error while reading response for the URL '\/%s': Err: %v\", path, err)\n\t\treturn \"\", status\n\t}\n\treturn string(body), status\n}\n\nfunc listenerAddr(ln net.Listener) string {\n\treturn fmt.Sprintf(\"localhost:%d\", ln.Addr().(*net.TCPAddr).Port)\n}\n\nfunc TestListenAndServeStats(t *testing.T) {\n\ttestIns := \"testInstance\"\n\tctx, cancelFunc := context.WithCancel(context.Background())\n\ts, dataChan := testServer(ctx, t, testIns, &fakeLameduckLister{})\n\tdefer cancelFunc()\n\n\turlsAndExpectedResponse := map[string]string{\n\t\t\"\/\": OK,\n\t\t\"\/instance\": \"testInstance\",\n\t\t\"\/lameduck\": \"false\",\n\t\t\"\/healthcheck\": OK,\n\t}\n\tfor url, expectedResponse := range urlsAndExpectedResponse {\n\t\tif response, _ := get(t, s.ln, url); response != expectedResponse {\n\t\t\tt.Errorf(\"Didn't get the expected response for URL '%s'. Got: %s, Expected: %s\", url, response, expectedResponse)\n\t\t}\n\t}\n\t\/\/ Sleep for the export interval and a second extra to allow for the stats to\n\t\/\/ come in.\n\ttime.Sleep(s.statsInterval)\n\ttime.Sleep(time.Second)\n\n\t\/\/ Build a map of expected URL stats\n\texpectedURLStats := make(map[string]int64)\n\tfor url := range urlsAndExpectedResponse {\n\t\texpectedURLStats[url]++\n\t}\n\tif len(dataChan) != 1 {\n\t\tt.Errorf(\"Wrong number of stats on the stats channel. Got: %d, Expected: %d\", len(dataChan), 1)\n\t}\n\tem := <-dataChan\n\n\t\/\/ See if we got stats for the all URLs\n\tfor url, expectedCount := range expectedURLStats {\n\t\tcount := em.Metric(\"req\").(*metrics.Map).GetKey(url).Int64()\n\t\tif count != expectedCount {\n\t\t\tt.Errorf(\"Didn't get the expected stats for the URL: %s. Got: %d, Expected: %d\", url, count, expectedCount)\n\t\t}\n\t}\n}\n\nfunc TestLameduckingTestInstance(t *testing.T) {\n\tctx, cancelFunc := context.WithCancel(context.Background())\n\ts, _ := testServer(ctx, t, \"testInstance\", &fakeLameduckLister{})\n\tdefer cancelFunc()\n\n\tif resp, _ := get(t, s.ln, \"lameduck\"); !strings.Contains(resp, \"false\") {\n\t\tt.Errorf(\"Didn't get the expected response for the URL '\/lameduck'. got: %q, want it to contain: %q\", resp, \"false\")\n\t}\n\tif resp, status := get(t, s.ln, \"healthcheck\"); resp != OK || status != \"200 OK\" {\n\t\tt.Errorf(\"Didn't get the expected response for the URL '\/healthcheck'. got: %q, %q , want: %q, %q\", resp, status, OK, \"200 OK\")\n\t}\n\n\ts.ldLister = &fakeLameduckLister{[]string{\"testInstance\"}, nil}\n\n\tif resp, _ := get(t, s.ln, \"lameduck\"); !strings.Contains(resp, \"true\") {\n\t\tt.Errorf(\"Didn't get the expected response for the URL '\/lameduck'. got: %q, want it to contain: %q\", resp, \"true\")\n\t}\n\tif _, status := get(t, s.ln, \"healthcheck\"); status != \"503 Service Unavailable\" {\n\t\tt.Errorf(\"Didn't get the expected response for the URL '\/healthcheck'. got: %q , want: %q\", status, \"200 OK\")\n\t}\n}\n\nfunc TestLameduckListerNil(t *testing.T) {\n\texpectedErrMsg := \"not initialized\"\n\n\tctx, cancelFunc := context.WithCancel(context.Background())\n\ts, _ := testServer(ctx, t, \"testInstance\", nil)\n\tdefer cancelFunc()\n\n\tif resp, status := get(t, s.ln, \"lameduck\"); !strings.Contains(resp, expectedErrMsg) || status != \"200 OK\" {\n\t\tt.Errorf(\"Didn't get the expected response for the URL '\/lameduck'. got: %q, %q. want it to contain: %q, %q\", resp, status, expectedErrMsg, \"200 OK\")\n\t}\n\tif resp, status := get(t, s.ln, \"healthcheck\"); resp != OK || status != \"200 OK\" {\n\t\tt.Errorf(\"Didn't get the expected response for the URL '\/healthcheck'. got: %q, %q , want: %q, %q\", resp, status, OK, \"200 OK\")\n\t}\n}\n\nfunc TestErrorToGetLameduckList(t *testing.T) {\n\texpectedErrMsg := \"fake lameduck error message\"\n\n\tctx, cancelFunc := context.WithCancel(context.Background())\n\ts, _ := testServer(ctx, t, \"testInstance\", &fakeLameduckLister{nil, errors.New(expectedErrMsg)})\n\tdefer cancelFunc()\n\n\tif resp, status := get(t, s.ln, \"lameduck\"); !strings.Contains(resp, expectedErrMsg) || status != \"200 OK\" {\n\t\tt.Errorf(\"Didn't get the expected response for the URL '\/lameduck'. got: %q, %q. want it to contain: %q, %q\", resp, status, expectedErrMsg, \"200 OK\")\n\t}\n\tif resp, status := get(t, s.ln, \"healthcheck\"); resp != OK || status != \"200 OK\" {\n\t\tt.Errorf(\"Didn't get the expected response for the URL '\/healthcheck'. got: %q, %q , want: %q, %q\", resp, status, OK, \"200 OK\")\n\t}\n}\n<commit_msg>Use standard context package in http_test.<commit_after>\/\/ Copyright 2017 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage http\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/google\/cloudprober\/logger\"\n\t\"github.com\/google\/cloudprober\/metrics\"\n\t\"github.com\/google\/cloudprober\/targets\/lameduck\"\n)\n\nconst testExportInterval = 2 * time.Second\n\ntype fakeLameduckLister struct {\n\tlameducked []string\n\terr error\n}\n\nfunc (f *fakeLameduckLister) List() ([]string, error) {\n\treturn f.lameducked, f.err\n}\n\nfunc TestMain(m *testing.M) {\n\tos.Exit(m.Run())\n}\n\nfunc testServer(ctx context.Context, t *testing.T, insName string, ldLister lameduck.Lister) (*Server, chan *metrics.EventMetrics) {\n\tln, err := net.Listen(\"tcp\", \":0\")\n\tif err != nil {\n\t\tt.Fatalf(\"Listen error: %v.\", err)\n\t}\n\n\tdataChan := make(chan *metrics.EventMetrics, 10)\n\ts := &Server{\n\t\tl: &logger.Logger{},\n\t\tln: ln,\n\t\tstatsInterval: 2 * time.Second,\n\t\tinstanceName: insName,\n\t\tldLister: ldLister,\n\t\tstaticURLResTable: map[string]string{\n\t\t\t\"\/\": OK,\n\t\t\t\"\/instance\": insName,\n\t\t},\n\t}\n\n\tgo func() {\n\t\ts.Start(ctx, dataChan)\n\t}()\n\n\treturn s, dataChan\n}\n\n\/\/ get preforms HTTP GET request and return the response body and status\nfunc get(t *testing.T, ln net.Listener, path string) (string, string) {\n\tt.Helper()\n\tresp, err := http.Get(fmt.Sprintf(\"http:\/\/%s\/%s\", listenerAddr(ln), path))\n\tif err != nil {\n\t\tt.Errorf(\"HTTP server returned an error for the URL '\/%s'. Err: %v\", path, err)\n\t\treturn \"\", \"\"\n\t}\n\tstatus := resp.Status\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tt.Errorf(\"Error while reading response for the URL '\/%s': Err: %v\", path, err)\n\t\treturn \"\", status\n\t}\n\treturn string(body), status\n}\n\nfunc listenerAddr(ln net.Listener) string {\n\treturn fmt.Sprintf(\"localhost:%d\", ln.Addr().(*net.TCPAddr).Port)\n}\n\nfunc TestListenAndServeStats(t *testing.T) {\n\ttestIns := \"testInstance\"\n\tctx, cancelFunc := context.WithCancel(context.Background())\n\ts, dataChan := testServer(ctx, t, testIns, &fakeLameduckLister{})\n\tdefer cancelFunc()\n\n\turlsAndExpectedResponse := map[string]string{\n\t\t\"\/\": OK,\n\t\t\"\/instance\": \"testInstance\",\n\t\t\"\/lameduck\": \"false\",\n\t\t\"\/healthcheck\": OK,\n\t}\n\tfor url, expectedResponse := range urlsAndExpectedResponse {\n\t\tif response, _ := get(t, s.ln, url); response != expectedResponse {\n\t\t\tt.Errorf(\"Didn't get the expected response for URL '%s'. Got: %s, Expected: %s\", url, response, expectedResponse)\n\t\t}\n\t}\n\t\/\/ Sleep for the export interval and a second extra to allow for the stats to\n\t\/\/ come in.\n\ttime.Sleep(s.statsInterval)\n\ttime.Sleep(time.Second)\n\n\t\/\/ Build a map of expected URL stats\n\texpectedURLStats := make(map[string]int64)\n\tfor url := range urlsAndExpectedResponse {\n\t\texpectedURLStats[url]++\n\t}\n\tif len(dataChan) != 1 {\n\t\tt.Errorf(\"Wrong number of stats on the stats channel. Got: %d, Expected: %d\", len(dataChan), 1)\n\t}\n\tem := <-dataChan\n\n\t\/\/ See if we got stats for the all URLs\n\tfor url, expectedCount := range expectedURLStats {\n\t\tcount := em.Metric(\"req\").(*metrics.Map).GetKey(url).Int64()\n\t\tif count != expectedCount {\n\t\t\tt.Errorf(\"Didn't get the expected stats for the URL: %s. Got: %d, Expected: %d\", url, count, expectedCount)\n\t\t}\n\t}\n}\n\nfunc TestLameduckingTestInstance(t *testing.T) {\n\tctx, cancelFunc := context.WithCancel(context.Background())\n\ts, _ := testServer(ctx, t, \"testInstance\", &fakeLameduckLister{})\n\tdefer cancelFunc()\n\n\tif resp, _ := get(t, s.ln, \"lameduck\"); !strings.Contains(resp, \"false\") {\n\t\tt.Errorf(\"Didn't get the expected response for the URL '\/lameduck'. got: %q, want it to contain: %q\", resp, \"false\")\n\t}\n\tif resp, status := get(t, s.ln, \"healthcheck\"); resp != OK || status != \"200 OK\" {\n\t\tt.Errorf(\"Didn't get the expected response for the URL '\/healthcheck'. got: %q, %q , want: %q, %q\", resp, status, OK, \"200 OK\")\n\t}\n\n\ts.ldLister = &fakeLameduckLister{[]string{\"testInstance\"}, nil}\n\n\tif resp, _ := get(t, s.ln, \"lameduck\"); !strings.Contains(resp, \"true\") {\n\t\tt.Errorf(\"Didn't get the expected response for the URL '\/lameduck'. got: %q, want it to contain: %q\", resp, \"true\")\n\t}\n\tif _, status := get(t, s.ln, \"healthcheck\"); status != \"503 Service Unavailable\" {\n\t\tt.Errorf(\"Didn't get the expected response for the URL '\/healthcheck'. got: %q , want: %q\", status, \"200 OK\")\n\t}\n}\n\nfunc TestLameduckListerNil(t *testing.T) {\n\texpectedErrMsg := \"not initialized\"\n\n\tctx, cancelFunc := context.WithCancel(context.Background())\n\ts, _ := testServer(ctx, t, \"testInstance\", nil)\n\tdefer cancelFunc()\n\n\tif resp, status := get(t, s.ln, \"lameduck\"); !strings.Contains(resp, expectedErrMsg) || status != \"200 OK\" {\n\t\tt.Errorf(\"Didn't get the expected response for the URL '\/lameduck'. got: %q, %q. want it to contain: %q, %q\", resp, status, expectedErrMsg, \"200 OK\")\n\t}\n\tif resp, status := get(t, s.ln, \"healthcheck\"); resp != OK || status != \"200 OK\" {\n\t\tt.Errorf(\"Didn't get the expected response for the URL '\/healthcheck'. got: %q, %q , want: %q, %q\", resp, status, OK, \"200 OK\")\n\t}\n}\n\nfunc TestErrorToGetLameduckList(t *testing.T) {\n\texpectedErrMsg := \"fake lameduck error message\"\n\n\tctx, cancelFunc := context.WithCancel(context.Background())\n\ts, _ := testServer(ctx, t, \"testInstance\", &fakeLameduckLister{nil, errors.New(expectedErrMsg)})\n\tdefer cancelFunc()\n\n\tif resp, status := get(t, s.ln, \"lameduck\"); !strings.Contains(resp, expectedErrMsg) || status != \"200 OK\" {\n\t\tt.Errorf(\"Didn't get the expected response for the URL '\/lameduck'. got: %q, %q. want it to contain: %q, %q\", resp, status, expectedErrMsg, \"200 OK\")\n\t}\n\tif resp, status := get(t, s.ln, \"healthcheck\"); resp != OK || status != \"200 OK\" {\n\t\tt.Errorf(\"Didn't get the expected response for the URL '\/healthcheck'. got: %q, %q , want: %q, %q\", resp, status, OK, \"200 OK\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package create\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\tawssession \"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/kms\"\n\t\"github.com\/giantswarm\/awstpr\"\n\tawsinfo \"github.com\/giantswarm\/awstpr\/aws\"\n\t\"github.com\/giantswarm\/clustertpr\/node\"\n\t\"github.com\/giantswarm\/k8scloudconfig\"\n\tmicroerror \"github.com\/giantswarm\/microkit\/error\"\n\tmicrologger \"github.com\/giantswarm\/microkit\/logger\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\t\"k8s.io\/client-go\/pkg\/api\"\n\t\"k8s.io\/client-go\/pkg\/runtime\"\n\t\"k8s.io\/client-go\/pkg\/watch\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\n\tawsutil \"github.com\/giantswarm\/aws-operator\/client\/aws\"\n\tk8sutil \"github.com\/giantswarm\/aws-operator\/client\/k8s\"\n)\n\nconst (\n\tClusterListAPIEndpoint string = \"\/apis\/cluster.giantswarm.io\/v1\/awses\"\n\tClusterWatchAPIEndpoint string = \"\/apis\/cluster.giantswarm.io\/v1\/watch\/awses\"\n\t\/\/ The format of instance's name is \"[name of cluster]-[prefix ('master' or 'worker')]-[number]\".\n\tinstanceNameFormat string = \"%s-%s-%d\"\n\t\/\/ Period or re-synchronizing the list of objects in k8s watcher. 0 means that re-sync will be\n\t\/\/ delayed as long as possible, until the watch will be closed or timed out.\n\tresyncPeriod time.Duration = 0\n\t\/\/ Prefixes used for machine names.\n\tprefixMaster string = \"master\"\n\tprefixWorker string = \"worker\"\n\t\/\/ EC2 instance tag keys.\n\ttagKeyName string = \"Name\"\n\ttagKeyCluster string = \"Cluster\"\n)\n\ntype EC2StateCode int\n\nconst (\n\t\/\/ http:\/\/docs.aws.amazon.com\/sdk-for-go\/api\/service\/ec2\/#InstanceState\n\tEC2PendingState EC2StateCode = 0\n\tEC2RunningState EC2StateCode = 16\n\tEC2ShuttingDownState EC2StateCode = 32\n\tEC2TerminatedState EC2StateCode = 48\n\tEC2StoppingState EC2StateCode = 64\n\tEC2StoppedState EC2StateCode = 80\n)\n\n\/\/ Config represents the configuration used to create a version service.\ntype Config struct {\n\t\/\/ Dependencies.\n\tAwsConfig awsutil.Config\n\tK8sClient kubernetes.Interface\n\tLogger micrologger.Logger\n\tCertsDir string\n}\n\n\/\/ DefaultConfig provides a default configuration to create a new version service\n\/\/ by best effort.\nfunc DefaultConfig() Config {\n\treturn Config{\n\t\t\/\/ Dependencies.\n\t\tK8sClient: nil,\n\t\tLogger: nil,\n\t\tCertsDir: \"\",\n\t}\n}\n\n\/\/ New creates a new configured version service.\nfunc New(config Config) (*Service, error) {\n\t\/\/ Dependencies.\n\tif config.Logger == nil {\n\t\treturn nil, microerror.MaskAnyf(invalidConfigError, \"logger must not be empty\")\n\t}\n\n\tnewService := &Service{\n\t\t\/\/ Dependencies.\n\t\tawsConfig: config.AwsConfig,\n\t\tk8sClient: config.K8sClient,\n\t\tlogger: config.Logger,\n\n\t\t\/\/ AWS certificates options.\n\t\tcertsDir: config.CertsDir,\n\n\t\t\/\/ Internals\n\t\tbootOnce: sync.Once{},\n\t}\n\n\treturn newService, nil\n}\n\n\/\/ Service implements the version service interface.\ntype Service struct {\n\t\/\/ Dependencies.\n\tawsConfig awsutil.Config\n\tk8sClient kubernetes.Interface\n\tlogger micrologger.Logger\n\n\t\/\/ AWS certificates options.\n\tcertsDir string\n\n\t\/\/ Internals.\n\tbootOnce sync.Once\n}\n\ntype Event struct {\n\tType string\n\tObject *awstpr.CustomObject\n}\n\nfunc (s *Service) newClusterListWatch() *cache.ListWatch {\n\tclient := s.k8sClient.Core().RESTClient()\n\n\tlistWatch := &cache.ListWatch{\n\t\tListFunc: func(options api.ListOptions) (runtime.Object, error) {\n\t\t\treq := client.Get().AbsPath(ClusterListAPIEndpoint)\n\t\t\tb, err := req.DoRaw()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tvar c awstpr.List\n\t\t\tif err := json.Unmarshal(b, &c); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\treturn &c, nil\n\t\t},\n\n\t\tWatchFunc: func(options api.ListOptions) (watch.Interface, error) {\n\t\t\treq := client.Get().AbsPath(ClusterWatchAPIEndpoint)\n\t\t\tstream, err := req.Stream()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\twatcher := watch.NewStreamWatcher(&k8sutil.ClusterDecoder{\n\t\t\t\tStream: stream,\n\t\t\t})\n\n\t\t\treturn watcher, nil\n\t\t},\n\t}\n\n\treturn listWatch\n}\n\nfunc (s *Service) Boot() {\n\ts.bootOnce.Do(func() {\n\t\tif err := s.createTPR(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\ts.logger.Log(\"info\", \"successfully created third-party resource\")\n\n\t\t_, clusterInformer := cache.NewInformer(\n\t\t\ts.newClusterListWatch(),\n\t\t\t&awstpr.CustomObject{},\n\t\t\tresyncPeriod,\n\t\t\tcache.ResourceEventHandlerFuncs{\n\t\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\t\tcluster := obj.(*awstpr.CustomObject)\n\t\t\t\t\ts.logger.Log(\"info\", fmt.Sprintf(\"creating cluster '%s'\", cluster.Name))\n\n\t\t\t\t\tif err := s.createClusterNamespace(cluster.Spec.Cluster); err != nil {\n\t\t\t\t\t\ts.logger.Log(\"error\", fmt.Sprintf(\"could not create cluster namespace: %s\", err))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Create AWS client\n\t\t\t\t\ts.awsConfig.Region = cluster.Spec.AWS.Region\n\t\t\t\t\tawsSession, ec2Client := awsutil.NewClient(s.awsConfig)\n\n\t\t\t\t\t\/\/ Create KMS key\n\t\t\t\t\tkmsSvc := kms.New(awsSession)\n\t\t\t\t\tkey, err := kmsSvc.CreateKey(&kms.CreateKeyInput{})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ts.logger.Log(\"error\", fmt.Sprintf(\"could not create KMS service client: %s\", err))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Encode TLS assets\n\t\t\t\t\ttlsAssets, err := s.encodeTLSAssets(awsSession, *key.KeyMetadata.Arn)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ts.logger.Log(\"error\", fmt.Sprintf(\"could not encode TLS assets: %s\", err))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Run masters\n\t\t\t\t\tif err := s.runMachines(runMachinesInput{\n\t\t\t\t\t\tawsSession: awsSession,\n\t\t\t\t\t\tec2Client: ec2Client,\n\t\t\t\t\t\tspec: cluster.Spec,\n\t\t\t\t\t\ttlsAssets: tlsAssets,\n\t\t\t\t\t\tclusterName: cluster.Name,\n\t\t\t\t\t\tprefix: prefixMaster,\n\t\t\t\t\t}); err != nil {\n\t\t\t\t\t\ts.logger.Log(\"error\", microerror.MaskAny(err))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Run workers\n\t\t\t\t\tif err := s.runMachines(runMachinesInput{\n\t\t\t\t\t\tawsSession: awsSession,\n\t\t\t\t\t\tec2Client: ec2Client,\n\t\t\t\t\t\tspec: cluster.Spec,\n\t\t\t\t\t\ttlsAssets: tlsAssets,\n\t\t\t\t\t\tclusterName: cluster.Name,\n\t\t\t\t\t\tprefix: prefixWorker,\n\t\t\t\t\t}); err != nil {\n\t\t\t\t\t\ts.logger.Log(\"error\", microerror.MaskAny(err))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\ts.logger.Log(\"info\", fmt.Sprintf(\"cluster '%s' processed\", cluster.Name))\n\t\t\t\t},\n\t\t\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t\t\tcluster := obj.(*awstpr.CustomObject)\n\t\t\t\t\ts.logger.Log(\"info\", fmt.Sprintf(\"cluster '%s' deleted\", cluster.Name))\n\n\t\t\t\t\tif err := s.deleteClusterNamespace(cluster.Spec.Cluster); err != nil {\n\t\t\t\t\t\ts.logger.Log(\"error\", \"could not delete cluster namespace:\", err)\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t)\n\n\t\ts.logger.Log(\"info\", \"starting watch\")\n\n\t\t\/\/ Cluster informer lifecycle can be interrupted by putting a value into a \"stop channel\".\n\t\t\/\/ We aren't currently using that functionality, so we are passing a nil here.\n\t\tclusterInformer.Run(nil)\n\t})\n}\n\ntype runMachinesInput struct {\n\tawsSession *awssession.Session\n\tec2Client *ec2.EC2\n\tspec awstpr.Spec\n\ttlsAssets *cloudconfig.CompactTLSAssets\n\tclusterName string\n\tprefix string\n}\n\nfunc (s *Service) runMachines(input runMachinesInput) error {\n\tvar (\n\t\tmachines []node.Node\n\t\tawsMachines []awsinfo.Node\n\t)\n\n\tswitch input.prefix {\n\tcase prefixMaster:\n\t\tmachines = input.spec.Cluster.Masters\n\t\tawsMachines = input.spec.AWS.Masters\n\tcase prefixWorker:\n\t\tmachines = input.spec.Cluster.Workers\n\t\tawsMachines = input.spec.AWS.Workers\n\t}\n\n\t\/\/ TODO(nhlfr): Create a separate module for validating specs and execute on the earlier stages.\n\tif len(machines) != len(awsMachines) {\n\t\treturn microerror.MaskAny(fmt.Errorf(\"mismatched number of %s machines in the 'spec' and 'aws' sections: %d != %d\",\n\t\t\tinput.prefix,\n\t\t\tlen(machines),\n\t\t\tlen(awsMachines)))\n\t}\n\n\tfor i := 0; i < len(machines); i++ {\n\t\tname := fmt.Sprintf(instanceNameFormat, input.clusterName, input.prefix, i)\n\t\tif err := s.runMachine(runMachineInput{\n\t\t\tawsSession: input.awsSession,\n\t\t\tec2Client: input.ec2Client,\n\t\t\tspec: input.spec,\n\t\t\tmachine: machines[i],\n\t\t\tawsNode: awsMachines[i],\n\t\t\ttlsAssets: input.tlsAssets,\n\t\t\tclusterName: input.clusterName,\n\t\t\tname: name,\n\t\t\tprefix: input.prefix,\n\t\t}); err != nil {\n\t\t\treturn microerror.MaskAny(err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc allExistingInstancesMatch(instances *ec2.DescribeInstancesOutput, state EC2StateCode) bool {\n\t\/\/ If the instance doesn't exist, then the Reservation field should be nil.\n\t\/\/ Otherwise, it will contain a slice of instances (which is going to contain our one instance we queried for).\n\t\/\/ TODO(nhlfr): Check whether the instance has correct parameters. That will be most probably done when we\n\t\/\/ will introduce the interface for creating, deleting and updating resources.\n\tif instances.Reservations != nil {\n\t\tfor _, r := range instances.Reservations {\n\t\t\tfor _, i := range r.Instances {\n\t\t\t\tif *i.State.Code != int64(state) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n\ntype runMachineInput struct {\n\tawsSession *awssession.Session\n\tec2Client *ec2.EC2\n\tspec awstpr.Spec\n\tmachine node.Node\n\tawsNode awsinfo.Node\n\ttlsAssets *cloudconfig.CompactTLSAssets\n\tclusterName string\n\tname string\n\tprefix string\n}\n\nfunc (s *Service) runMachine(input runMachineInput) error {\n\tinstances, err := input.ec2Client.DescribeInstances(&ec2.DescribeInstancesInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t&ec2.Filter{\n\t\t\t\tName: aws.String(fmt.Sprintf(\"tag:%s\", tagKeyName)),\n\t\t\t\tValues: []*string{\n\t\t\t\t\taws.String(input.name),\n\t\t\t\t},\n\t\t\t},\n\t\t\t&ec2.Filter{\n\t\t\t\tName: aws.String(fmt.Sprintf(\"tag:%s\", tagKeyCluster)),\n\t\t\t\tValues: []*string{\n\t\t\t\t\taws.String(input.clusterName),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn microerror.MaskAny(err)\n\t}\n\n\tcloudConfigParams := cloudconfig.CloudConfigTemplateParams{\n\t\tCluster: input.spec.Cluster,\n\t\tNode: input.machine,\n\t\tTLSAssets: *input.tlsAssets,\n\t}\n\n\tcloudConfig, err := s.cloudConfig(input.prefix, cloudConfigParams)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !allExistingInstancesMatch(instances, EC2TerminatedState) {\n\t\ts.logger.Log(\"info\", fmt.Sprintf(\"instance '%s' already exists\", input.name))\n\t\treturn nil\n\t}\n\n\treservation, err := input.ec2Client.RunInstances(&ec2.RunInstancesInput{\n\t\tImageId: aws.String(input.awsNode.ImageID),\n\t\tInstanceType: aws.String(input.awsNode.InstanceType),\n\t\tMinCount: aws.Int64(int64(1)),\n\t\tMaxCount: aws.Int64(int64(1)),\n\t\tUserData: aws.String(cloudConfig),\n\t})\n\tif err != nil {\n\t\treturn microerror.MaskAny(err)\n\t}\n\n\ts.logger.Log(\"info\", fmt.Sprintf(\"instance '%s' reserved\", input.name))\n\n\tif _, err := input.ec2Client.CreateTags(&ec2.CreateTagsInput{\n\t\tResources: []*string{reservation.Instances[0].InstanceId},\n\t\tTags: []*ec2.Tag{\n\t\t\t{\n\t\t\t\tKey: aws.String(tagKeyName),\n\t\t\t\tValue: aws.String(input.name),\n\t\t\t},\n\t\t\t{\n\t\t\t\tKey: aws.String(tagKeyCluster),\n\t\t\t\tValue: aws.String(input.clusterName),\n\t\t\t},\n\t\t},\n\t}); err != nil {\n\t\treturn microerror.MaskAny(err)\n\t}\n\n\ts.logger.Log(\"info\", fmt.Sprintf(\"instance '%s' tagged\", input.name))\n\n\treturn nil\n}\n<commit_msg>create: pass instance profile to new instances (#34)<commit_after>package create\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\tawssession \"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ec2\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/kms\"\n\t\"github.com\/giantswarm\/awstpr\"\n\tawsinfo \"github.com\/giantswarm\/awstpr\/aws\"\n\t\"github.com\/giantswarm\/clustertpr\/node\"\n\t\"github.com\/giantswarm\/k8scloudconfig\"\n\tmicroerror \"github.com\/giantswarm\/microkit\/error\"\n\tmicrologger \"github.com\/giantswarm\/microkit\/logger\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\t\"k8s.io\/client-go\/pkg\/api\"\n\t\"k8s.io\/client-go\/pkg\/runtime\"\n\t\"k8s.io\/client-go\/pkg\/watch\"\n\t\"k8s.io\/client-go\/tools\/cache\"\n\n\tawsutil \"github.com\/giantswarm\/aws-operator\/client\/aws\"\n\tk8sutil \"github.com\/giantswarm\/aws-operator\/client\/k8s\"\n)\n\nconst (\n\tClusterListAPIEndpoint string = \"\/apis\/cluster.giantswarm.io\/v1\/awses\"\n\tClusterWatchAPIEndpoint string = \"\/apis\/cluster.giantswarm.io\/v1\/watch\/awses\"\n\t\/\/ The format of instance's name is \"[name of cluster]-[prefix ('master' or 'worker')]-[number]\".\n\tinstanceNameFormat string = \"%s-%s-%d\"\n\t\/\/ Period or re-synchronizing the list of objects in k8s watcher. 0 means that re-sync will be\n\t\/\/ delayed as long as possible, until the watch will be closed or timed out.\n\tresyncPeriod time.Duration = 0\n\t\/\/ Prefixes used for machine names.\n\tprefixMaster string = \"master\"\n\tprefixWorker string = \"worker\"\n\t\/\/ EC2 instance tag keys.\n\ttagKeyName string = \"Name\"\n\ttagKeyCluster string = \"Cluster\"\n)\n\ntype EC2StateCode int\n\nconst (\n\t\/\/ http:\/\/docs.aws.amazon.com\/sdk-for-go\/api\/service\/ec2\/#InstanceState\n\tEC2PendingState EC2StateCode = 0\n\tEC2RunningState EC2StateCode = 16\n\tEC2ShuttingDownState EC2StateCode = 32\n\tEC2TerminatedState EC2StateCode = 48\n\tEC2StoppingState EC2StateCode = 64\n\tEC2StoppedState EC2StateCode = 80\n)\n\n\/\/ Config represents the configuration used to create a version service.\ntype Config struct {\n\t\/\/ Dependencies.\n\tAwsConfig awsutil.Config\n\tK8sClient kubernetes.Interface\n\tLogger micrologger.Logger\n\tCertsDir string\n}\n\n\/\/ DefaultConfig provides a default configuration to create a new version service\n\/\/ by best effort.\nfunc DefaultConfig() Config {\n\treturn Config{\n\t\t\/\/ Dependencies.\n\t\tK8sClient: nil,\n\t\tLogger: nil,\n\t\tCertsDir: \"\",\n\t}\n}\n\n\/\/ New creates a new configured version service.\nfunc New(config Config) (*Service, error) {\n\t\/\/ Dependencies.\n\tif config.Logger == nil {\n\t\treturn nil, microerror.MaskAnyf(invalidConfigError, \"logger must not be empty\")\n\t}\n\n\tnewService := &Service{\n\t\t\/\/ Dependencies.\n\t\tawsConfig: config.AwsConfig,\n\t\tk8sClient: config.K8sClient,\n\t\tlogger: config.Logger,\n\n\t\t\/\/ AWS certificates options.\n\t\tcertsDir: config.CertsDir,\n\n\t\t\/\/ Internals\n\t\tbootOnce: sync.Once{},\n\t}\n\n\treturn newService, nil\n}\n\n\/\/ Service implements the version service interface.\ntype Service struct {\n\t\/\/ Dependencies.\n\tawsConfig awsutil.Config\n\tk8sClient kubernetes.Interface\n\tlogger micrologger.Logger\n\n\t\/\/ AWS certificates options.\n\tcertsDir string\n\n\t\/\/ Internals.\n\tbootOnce sync.Once\n}\n\ntype Event struct {\n\tType string\n\tObject *awstpr.CustomObject\n}\n\nfunc (s *Service) newClusterListWatch() *cache.ListWatch {\n\tclient := s.k8sClient.Core().RESTClient()\n\n\tlistWatch := &cache.ListWatch{\n\t\tListFunc: func(options api.ListOptions) (runtime.Object, error) {\n\t\t\treq := client.Get().AbsPath(ClusterListAPIEndpoint)\n\t\t\tb, err := req.DoRaw()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tvar c awstpr.List\n\t\t\tif err := json.Unmarshal(b, &c); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\treturn &c, nil\n\t\t},\n\n\t\tWatchFunc: func(options api.ListOptions) (watch.Interface, error) {\n\t\t\treq := client.Get().AbsPath(ClusterWatchAPIEndpoint)\n\t\t\tstream, err := req.Stream()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\twatcher := watch.NewStreamWatcher(&k8sutil.ClusterDecoder{\n\t\t\t\tStream: stream,\n\t\t\t})\n\n\t\t\treturn watcher, nil\n\t\t},\n\t}\n\n\treturn listWatch\n}\n\nfunc (s *Service) Boot() {\n\ts.bootOnce.Do(func() {\n\t\tif err := s.createTPR(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\ts.logger.Log(\"info\", \"successfully created third-party resource\")\n\n\t\t_, clusterInformer := cache.NewInformer(\n\t\t\ts.newClusterListWatch(),\n\t\t\t&awstpr.CustomObject{},\n\t\t\tresyncPeriod,\n\t\t\tcache.ResourceEventHandlerFuncs{\n\t\t\t\tAddFunc: func(obj interface{}) {\n\t\t\t\t\tcluster := obj.(*awstpr.CustomObject)\n\t\t\t\t\ts.logger.Log(\"info\", fmt.Sprintf(\"creating cluster '%s'\", cluster.Name))\n\n\t\t\t\t\tif err := s.createClusterNamespace(cluster.Spec.Cluster); err != nil {\n\t\t\t\t\t\ts.logger.Log(\"error\", fmt.Sprintf(\"could not create cluster namespace: %s\", err))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Create AWS client\n\t\t\t\t\ts.awsConfig.Region = cluster.Spec.AWS.Region\n\t\t\t\t\tawsSession, ec2Client := awsutil.NewClient(s.awsConfig)\n\n\t\t\t\t\t\/\/ Create KMS key\n\t\t\t\t\tkmsSvc := kms.New(awsSession)\n\t\t\t\t\tkey, err := kmsSvc.CreateKey(&kms.CreateKeyInput{})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ts.logger.Log(\"error\", fmt.Sprintf(\"could not create KMS service client: %s\", err))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Encode TLS assets\n\t\t\t\t\ttlsAssets, err := s.encodeTLSAssets(awsSession, *key.KeyMetadata.Arn)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\ts.logger.Log(\"error\", fmt.Sprintf(\"could not encode TLS assets: %s\", err))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Run masters\n\t\t\t\t\tif err := s.runMachines(runMachinesInput{\n\t\t\t\t\t\tawsSession: awsSession,\n\t\t\t\t\t\tec2Client: ec2Client,\n\t\t\t\t\t\tspec: cluster.Spec,\n\t\t\t\t\t\ttlsAssets: tlsAssets,\n\t\t\t\t\t\tclusterName: cluster.Name,\n\t\t\t\t\t\tprefix: prefixMaster,\n\t\t\t\t\t}); err != nil {\n\t\t\t\t\t\ts.logger.Log(\"error\", microerror.MaskAny(err))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t\/\/ Run workers\n\t\t\t\t\tif err := s.runMachines(runMachinesInput{\n\t\t\t\t\t\tawsSession: awsSession,\n\t\t\t\t\t\tec2Client: ec2Client,\n\t\t\t\t\t\tspec: cluster.Spec,\n\t\t\t\t\t\ttlsAssets: tlsAssets,\n\t\t\t\t\t\tclusterName: cluster.Name,\n\t\t\t\t\t\tprefix: prefixWorker,\n\t\t\t\t\t}); err != nil {\n\t\t\t\t\t\ts.logger.Log(\"error\", microerror.MaskAny(err))\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\ts.logger.Log(\"info\", fmt.Sprintf(\"cluster '%s' processed\", cluster.Name))\n\t\t\t\t},\n\t\t\t\tDeleteFunc: func(obj interface{}) {\n\t\t\t\t\tcluster := obj.(*awstpr.CustomObject)\n\t\t\t\t\ts.logger.Log(\"info\", fmt.Sprintf(\"cluster '%s' deleted\", cluster.Name))\n\n\t\t\t\t\tif err := s.deleteClusterNamespace(cluster.Spec.Cluster); err != nil {\n\t\t\t\t\t\ts.logger.Log(\"error\", \"could not delete cluster namespace:\", err)\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t},\n\t\t)\n\n\t\ts.logger.Log(\"info\", \"starting watch\")\n\n\t\t\/\/ Cluster informer lifecycle can be interrupted by putting a value into a \"stop channel\".\n\t\t\/\/ We aren't currently using that functionality, so we are passing a nil here.\n\t\tclusterInformer.Run(nil)\n\t})\n}\n\ntype runMachinesInput struct {\n\tawsSession *awssession.Session\n\tec2Client *ec2.EC2\n\tspec awstpr.Spec\n\ttlsAssets *cloudconfig.CompactTLSAssets\n\tclusterName string\n\tprefix string\n}\n\nfunc (s *Service) runMachines(input runMachinesInput) error {\n\tvar (\n\t\tmachines []node.Node\n\t\tawsMachines []awsinfo.Node\n\t)\n\n\tswitch input.prefix {\n\tcase prefixMaster:\n\t\tmachines = input.spec.Cluster.Masters\n\t\tawsMachines = input.spec.AWS.Masters\n\tcase prefixWorker:\n\t\tmachines = input.spec.Cluster.Workers\n\t\tawsMachines = input.spec.AWS.Workers\n\t}\n\n\t\/\/ TODO(nhlfr): Create a separate module for validating specs and execute on the earlier stages.\n\tif len(machines) != len(awsMachines) {\n\t\treturn microerror.MaskAny(fmt.Errorf(\"mismatched number of %s machines in the 'spec' and 'aws' sections: %d != %d\",\n\t\t\tinput.prefix,\n\t\t\tlen(machines),\n\t\t\tlen(awsMachines)))\n\t}\n\n\tfor i := 0; i < len(machines); i++ {\n\t\tname := fmt.Sprintf(instanceNameFormat, input.clusterName, input.prefix, i)\n\t\tif err := s.runMachine(runMachineInput{\n\t\t\tawsSession: input.awsSession,\n\t\t\tec2Client: input.ec2Client,\n\t\t\tspec: input.spec,\n\t\t\tmachine: machines[i],\n\t\t\tawsNode: awsMachines[i],\n\t\t\ttlsAssets: input.tlsAssets,\n\t\t\tclusterName: input.clusterName,\n\t\t\tname: name,\n\t\t\tprefix: input.prefix,\n\t\t}); err != nil {\n\t\t\treturn microerror.MaskAny(err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc allExistingInstancesMatch(instances *ec2.DescribeInstancesOutput, state EC2StateCode) bool {\n\t\/\/ If the instance doesn't exist, then the Reservation field should be nil.\n\t\/\/ Otherwise, it will contain a slice of instances (which is going to contain our one instance we queried for).\n\t\/\/ TODO(nhlfr): Check whether the instance has correct parameters. That will be most probably done when we\n\t\/\/ will introduce the interface for creating, deleting and updating resources.\n\tif instances.Reservations != nil {\n\t\tfor _, r := range instances.Reservations {\n\t\t\tfor _, i := range r.Instances {\n\t\t\t\tif *i.State.Code != int64(state) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n\ntype runMachineInput struct {\n\tawsSession *awssession.Session\n\tec2Client *ec2.EC2\n\tspec awstpr.Spec\n\tmachine node.Node\n\tawsNode awsinfo.Node\n\ttlsAssets *cloudconfig.CompactTLSAssets\n\tclusterName string\n\tname string\n\tprefix string\n}\n\nfunc (s *Service) runMachine(input runMachineInput) error {\n\tinstances, err := input.ec2Client.DescribeInstances(&ec2.DescribeInstancesInput{\n\t\tFilters: []*ec2.Filter{\n\t\t\t&ec2.Filter{\n\t\t\t\tName: aws.String(fmt.Sprintf(\"tag:%s\", tagKeyName)),\n\t\t\t\tValues: []*string{\n\t\t\t\t\taws.String(input.name),\n\t\t\t\t},\n\t\t\t},\n\t\t\t&ec2.Filter{\n\t\t\t\tName: aws.String(fmt.Sprintf(\"tag:%s\", tagKeyCluster)),\n\t\t\t\tValues: []*string{\n\t\t\t\t\taws.String(input.clusterName),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn microerror.MaskAny(err)\n\t}\n\n\tcloudConfigParams := cloudconfig.CloudConfigTemplateParams{\n\t\tCluster: input.spec.Cluster,\n\t\tNode: input.machine,\n\t\tTLSAssets: *input.tlsAssets,\n\t}\n\n\tcloudConfig, err := s.cloudConfig(input.prefix, cloudConfigParams)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !allExistingInstancesMatch(instances, EC2TerminatedState) {\n\t\ts.logger.Log(\"info\", fmt.Sprintf(\"instance '%s' already exists\", input.name))\n\t\treturn nil\n\t}\n\n\treservation, err := input.ec2Client.RunInstances(&ec2.RunInstancesInput{\n\t\tImageId: aws.String(input.awsNode.ImageID),\n\t\tInstanceType: aws.String(input.awsNode.InstanceType),\n\t\tMinCount: aws.Int64(int64(1)),\n\t\tMaxCount: aws.Int64(int64(1)),\n\t\tUserData: aws.String(cloudConfig),\n\t\tIamInstanceProfile: &ec2.IamInstanceProfileSpecification{\n\t\t\tName: aws.String(ProfileName),\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn microerror.MaskAny(err)\n\t}\n\n\ts.logger.Log(\"info\", fmt.Sprintf(\"instance '%s' reserved\", input.name))\n\n\tif _, err := input.ec2Client.CreateTags(&ec2.CreateTagsInput{\n\t\tResources: []*string{reservation.Instances[0].InstanceId},\n\t\tTags: []*ec2.Tag{\n\t\t\t{\n\t\t\t\tKey: aws.String(tagKeyName),\n\t\t\t\tValue: aws.String(input.name),\n\t\t\t},\n\t\t\t{\n\t\t\t\tKey: aws.String(tagKeyCluster),\n\t\t\t\tValue: aws.String(input.clusterName),\n\t\t\t},\n\t\t},\n\t}); err != nil {\n\t\treturn microerror.MaskAny(err)\n\t}\n\n\ts.logger.Log(\"info\", fmt.Sprintf(\"instance '%s' tagged\", input.name))\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package smf\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"time\"\n\n\t\"github.com\/gomidi\/midi\/internal\/midilib\"\n\n\t\"github.com\/gomidi\/midi\"\n)\n\nvar (\n\t_ midi.Writer = Writer(nil)\n\t_ midi.Reader = Reader(nil)\n)\n\n\/\/ Writer writes midi messages to a standard midi file (SMF)\n\/\/ Writer is also a midi.Writer\ntype Writer interface {\n\n\t\/\/ Header returns the header\n\tHeader() Header\n\n\t\/\/ WriteHeader writes the midi header\n\t\/\/ If WriteHeader was not called before the first run of Write,\n\t\/\/ it will implicitly be called when calling Write.\n\tWriteHeader() (int, error)\n\n\t\/\/ Write writes a midi message to the SMF file.\n\t\/\/\n\t\/\/ Due to the nature of SMF files there is some maybe surprising behavior.\n\t\/\/ - If the header has not been written yet, it will be written before writing the first message.\n\t\/\/ - The first message will be written to track 0 which will be implicitly created.\n\t\/\/ - All messages of a track will be buffered inside the track and only be written if an EndOfTrack\n\t\/\/ message is written.\n\t\/\/ - The number of tracks that are written will never execeed the NumTracks that have been defined when creating the writer.\n\t\/\/ If the last track has been written, io.EOF will be returned. (Also for any further attempt to write).\n\t\/\/ - It is the responsibility of the caller to make sure the provided NumTracks (which defaults to 1) is not\n\t\/\/ larger as the number of tracks in the file.\n\t\/\/ Keep the above in mind when examinating the written nbytes that are returned. They reflect the number of bytes\n\t\/\/ that have been physically written at that point in time.\n\t\/\/ any error stops the writing, is tracked and prohibits further writing.\n\t\/\/ this last error is returned from Error()\n\tWrite(midi.Message) (nBytes int, err error)\n\n\t\/\/ SetDelta sets a time distance between the last written and the following message in ticks.\n\t\/\/ The meaning of a tick depends on the time format that is set in the header of the SMF file.\n\tSetDelta(ticks uint32)\n}\n\n\/\/ Reader reads midi messages from a standard midi file (SMF)\n\/\/ Reader is also a midi.Reader\ntype Reader interface {\n\n\t\/\/ ReadHeader reads the header of the SMF file. If Header is called before ReadHeader, it will panic.\n\t\/\/ ReadHeader is also implicitly called with the first call of Read() (if it has not been run before)\n\tReadHeader() error\n\n\t\/\/ Read reads a MIDI message from a SMF file.\n\t\/\/ any error will be tracked and stops reading and prevents any other attempt to read.\n\t\/\/ this first and last error is returned from Error()\n\tRead() (midi.Message, error)\n\n\t\/\/ Header returns the header of SMF file\n\t\/\/ if the header is not yet read, it will be read before\n\t\/\/ if any error occurred during reading of header, it can be found with Error()\n\tHeader() Header\n\n\t\/\/ Delta returns the time distance between the last read midi message and the message before in ticks.\n\t\/\/ The meaning of a tick depends on the time format that is set in the header of the SMF file.\n\tDelta() (ticks uint32)\n\n\t\/\/ Track returns the number of the track of the last read midi message (starting with 0)\n\t\/\/ It returns -1 if no message has been read yet.\n\tTrack() int16\n}\n\n\/\/ Header represents the header of a SMF file.\ntype Header struct {\n\n\t\/\/ Format is the SMF file format: SMF0, SMF1 or SMF2\n\tFormat\n\n\t\/\/ NumTracks is the number of tracks (always > 0)\n\tNumTracks uint16\n\n\t\/\/ TimeFormat is the time format (either MetricTicks or TimeCode)\n\tTimeFormat\n}\n\nfunc (h Header) String() string {\n\treturn fmt.Sprintf(\"<Format: %v, NumTracks: %v, TimeFormat: %v>\", h.Format, h.NumTracks, h.TimeFormat)\n}\n\nconst (\n\t\/\/ SMF0 represents the singletrack SMF format (0)\n\tSMF0 = format(0)\n\n\t\/\/ SMF1 represents the multitrack SMF format (1)\n\tSMF1 = format(1)\n\n\t\/\/ SMF2 represents the sequential track SMF format (2)\n\tSMF2 = format(2)\n)\n\ntype Chunk struct {\n\ttyp []byte \/\/ must always be 4 bytes long, to avoid conversions everytime, we take []byte here instead of [4]byte\n\tdata []byte\n}\n\n\/\/ Chunk returns the length of the chunk body\nfunc (c *Chunk) Len() int {\n\treturn len(c.data)\n}\n\n\/\/ SetType sets the type of the chunk\nfunc (c *Chunk) SetType(typ [4]byte) {\n\tc.typ = make([]byte, 4)\n\tc.typ[0] = typ[0]\n\tc.typ[1] = typ[1]\n\tc.typ[2] = typ[2]\n\tc.typ[3] = typ[3]\n}\n\nfunc (c *Chunk) Type() string {\n\tvar bf bytes.Buffer\n\tbf.Write(c.typ)\n\treturn bf.String()\n}\n\n\/\/ Clear removes all data but keeps the typ\nfunc (c *Chunk) Clear() {\n\tc.data = nil\n}\n\n\/\/ WriteTo writes the content of the chunk to the given writer\nfunc (c *Chunk) WriteTo(wr io.Writer) (int64, error) {\n\tif len(c.typ) != 4 {\n\t\treturn 0, fmt.Errorf(\"chunk header not set properly\")\n\t}\n\n\tvar bf bytes.Buffer\n\tbf.Write(c.typ)\n\tbinary.Write(&bf, binary.BigEndian, int32(c.Len()))\n\tbf.Write(c.data)\n\ti, err := wr.Write(bf.Bytes())\n\treturn int64(i), err\n}\n\n\/\/ ReadHeader reads the header from the given reader\n\/\/ returns the length of the following body\n\/\/ for errors, length of 0 is returned\nfunc (c *Chunk) ReadHeader(rd io.Reader) (length uint32, err error) {\n\tc.typ, err = midilib.ReadNBytes(4, rd)\n\n\tif err != nil {\n\t\tc.typ = nil\n\t\treturn\n\t}\n\n\treturn midilib.ReadUint32(rd)\n}\n\n\/\/ Write writes the given bytes to the body of the chunk\nfunc (c *Chunk) Write(b []byte) (int, error) {\n\tc.data = append(c.data, b...)\n\treturn len(b), nil\n}\n\nvar (\n\t_ TimeFormat = MetricTicks(0)\n\t_ TimeFormat = TimeCode{}\n\t_ Format = SMF0\n)\n\n\/\/ TimeCode is the SMPTE time format.\n\/\/ It can be comfortable created with the SMPTE* functions.\ntype TimeCode struct {\n\tFramesPerSecond uint8\n\tSubFrames uint8\n}\n\n\/\/ String represents the TimeCode as a string.\nfunc (t TimeCode) String() string {\n\n\tswitch t.FramesPerSecond {\n\tcase 29:\n\t\treturn fmt.Sprintf(\"SMPTE30DropFrame %v subframes\", t.SubFrames)\n\tdefault:\n\t\treturn fmt.Sprintf(\"SMPTE%v %v subframes\", t.FramesPerSecond, t.SubFrames)\n\t}\n\n}\n\nfunc (t TimeCode) timeformat() {}\n\n\/\/ SMPTE24 returns a SMPTE24 TimeCode with the given subframes\nfunc SMPTE24(subframes uint8) TimeCode {\n\treturn TimeCode{24, subframes}\n}\n\n\/\/ SMPTE25 returns a SMPTE25 TimeCode with the given subframes\nfunc SMPTE25(subframes uint8) TimeCode {\n\treturn TimeCode{25, subframes}\n}\n\n\/\/ SMPTE30DropFrame returns a SMPTE30 drop frame TimeCode with the given subframes\nfunc SMPTE30DropFrame(subframes uint8) TimeCode {\n\treturn TimeCode{29, subframes}\n}\n\n\/\/ SMPTE30 returns a SMPTE30 TimeCode with the given subframes\nfunc SMPTE30(subframes uint8) TimeCode {\n\treturn TimeCode{30, subframes}\n}\n\n\/\/ MetricTicks represents the \"ticks per quarter note\" (metric) time format\n\/\/ It defaults to 960 (i.e. 0 is treated as if it where 960 ticks per quarter note)\ntype MetricTicks uint16\n\n\/\/ Duration returns the time.Duration for a number of ticks at a certain tempo (in BPM)\nfunc (q MetricTicks) Duration(tempoBPM uint32, deltaTicks uint32) time.Duration {\n\t\/\/ (60000 \/ T) * (d \/ R) = D[ms]\n\tdurQnMilli := 60000 \/ float64(tempoBPM)\n\t_4thticks := float64(deltaTicks) \/ float64(uint16(q))\n\treturn time.Duration(roundFloat(durQnMilli*_4thticks, 0)) * time.Millisecond\n}\n\n\/\/ Ticks returns the ticks for a given time.Duration at a certain tempo (in BPM)\nfunc (q MetricTicks) Ticks(tempoBPM uint32, d time.Duration) (ticks uint32) {\n\t\/\/ d = (D[ms] * R * T) \/ 60000\n\tticks = uint32(roundFloat((float64(d.Nanoseconds())\/1000000*float64(uint16(q))*float64(tempoBPM))\/60000, 0))\n\treturn ticks\n}\n\nfunc (q MetricTicks) div(d float64) uint32 {\n\treturn uint32(roundFloat(float64(q.Ticks4th())\/d, 0))\n}\n\n\/\/ Number returns the number of the metric ticks (ticks for a quarter note, defaults to 960)\nfunc (q MetricTicks) Number() uint16 {\n\treturn q.Ticks4th()\n}\n\n\/\/ Ticks4th returns the ticks for a quarter note\nfunc (q MetricTicks) Ticks4th() uint16 {\n\tif uint16(q) == 0 {\n\t\treturn 960 \/\/ default\n\t}\n\treturn uint16(q)\n}\n\n\/\/ Ticks8th returns the ticks for a quaver note\nfunc (q MetricTicks) Ticks8th() uint32 {\n\treturn q.div(2)\n}\n\n\/\/ Ticks16th returns the ticks for a 16th note\nfunc (q MetricTicks) Ticks16th() uint32 {\n\treturn q.div(4)\n}\n\n\/\/ Ticks32th returns the ticks for a 32th note\nfunc (q MetricTicks) Ticks32th() uint32 {\n\treturn q.div(8)\n}\n\n\/\/ Ticks64th returns the ticks for a 64th note\nfunc (q MetricTicks) Ticks64th() uint32 {\n\treturn q.div(16)\n}\n\n\/\/ Ticks128th returns the ticks for a 128th note\nfunc (q MetricTicks) Ticks128th() uint32 {\n\treturn q.div(32)\n}\n\n\/\/ Ticks256th returns the ticks for a 256th note\nfunc (q MetricTicks) Ticks256th() uint32 {\n\treturn q.div(64)\n}\n\n\/\/ Ticks512th returns the ticks for a 512th note\nfunc (q MetricTicks) Ticks512th() uint32 {\n\treturn q.div(128)\n}\n\n\/\/ Ticks1024th returns the ticks for a 1024th note\nfunc (q MetricTicks) Ticks1024th() uint32 {\n\treturn q.div(256)\n}\n\n\/\/ String returns the string representation of the quarter note resolution\nfunc (q MetricTicks) String() string {\n\treturn fmt.Sprintf(\"%v MetricTicks\", q.Ticks4th())\n}\n\nfunc (q MetricTicks) timeformat() {}\n\n\/\/ Format is the common interface of all SMF file formats\ntype Format interface {\n\n\t\/\/ String returns the string representation of the SMF format.\n\tString() string\n\n\t\/\/ Type returns the type of the SMF file: 0 for SMF0, 1 for SMF1 and 2 for SMF2\n\tType() uint16\n\n\tsmfformat() \/\/ make the implementation exclusive to this package\n}\n\n\/\/ TimeFormat is the common interface of all SMF time formats\ntype TimeFormat interface {\n\tString() string\n\ttimeformat() \/\/ make the implementation exclusive to this package\n}\n\n\/\/ format is an implementation of Format\ntype format uint16\n\nfunc (f format) Type() uint16 {\n\treturn uint16(f)\n}\n\nfunc (f format) smfformat() {}\n\nfunc (f format) String() string {\n\tswitch f {\n\tcase SMF0:\n\t\treturn \"SMF0 (singletrack)\"\n\tcase SMF1:\n\t\treturn \"SMF1 (multitrack)\"\n\tcase SMF2:\n\t\treturn \"SMF2 (sequential tracks)\"\n\t}\n\tpanic(\"unreachable\")\n}\n<commit_msg>make Ticks4th consistent with other Ticks* functions (fixes #10)<commit_after>package smf\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"time\"\n\n\t\"github.com\/gomidi\/midi\/internal\/midilib\"\n\n\t\"github.com\/gomidi\/midi\"\n)\n\nvar (\n\t_ midi.Writer = Writer(nil)\n\t_ midi.Reader = Reader(nil)\n)\n\n\/\/ Writer writes midi messages to a standard midi file (SMF)\n\/\/ Writer is also a midi.Writer\ntype Writer interface {\n\n\t\/\/ Header returns the header\n\tHeader() Header\n\n\t\/\/ WriteHeader writes the midi header\n\t\/\/ If WriteHeader was not called before the first run of Write,\n\t\/\/ it will implicitly be called when calling Write.\n\tWriteHeader() (int, error)\n\n\t\/\/ Write writes a midi message to the SMF file.\n\t\/\/\n\t\/\/ Due to the nature of SMF files there is some maybe surprising behavior.\n\t\/\/ - If the header has not been written yet, it will be written before writing the first message.\n\t\/\/ - The first message will be written to track 0 which will be implicitly created.\n\t\/\/ - All messages of a track will be buffered inside the track and only be written if an EndOfTrack\n\t\/\/ message is written.\n\t\/\/ - The number of tracks that are written will never execeed the NumTracks that have been defined when creating the writer.\n\t\/\/ If the last track has been written, io.EOF will be returned. (Also for any further attempt to write).\n\t\/\/ - It is the responsibility of the caller to make sure the provided NumTracks (which defaults to 1) is not\n\t\/\/ larger as the number of tracks in the file.\n\t\/\/ Keep the above in mind when examinating the written nbytes that are returned. They reflect the number of bytes\n\t\/\/ that have been physically written at that point in time.\n\t\/\/ any error stops the writing, is tracked and prohibits further writing.\n\t\/\/ this last error is returned from Error()\n\tWrite(midi.Message) (nBytes int, err error)\n\n\t\/\/ SetDelta sets a time distance between the last written and the following message in ticks.\n\t\/\/ The meaning of a tick depends on the time format that is set in the header of the SMF file.\n\tSetDelta(ticks uint32)\n}\n\n\/\/ Reader reads midi messages from a standard midi file (SMF)\n\/\/ Reader is also a midi.Reader\ntype Reader interface {\n\n\t\/\/ ReadHeader reads the header of the SMF file. If Header is called before ReadHeader, it will panic.\n\t\/\/ ReadHeader is also implicitly called with the first call of Read() (if it has not been run before)\n\tReadHeader() error\n\n\t\/\/ Read reads a MIDI message from a SMF file.\n\t\/\/ any error will be tracked and stops reading and prevents any other attempt to read.\n\t\/\/ this first and last error is returned from Error()\n\tRead() (midi.Message, error)\n\n\t\/\/ Header returns the header of SMF file\n\t\/\/ if the header is not yet read, it will be read before\n\t\/\/ if any error occurred during reading of header, it can be found with Error()\n\tHeader() Header\n\n\t\/\/ Delta returns the time distance between the last read midi message and the message before in ticks.\n\t\/\/ The meaning of a tick depends on the time format that is set in the header of the SMF file.\n\tDelta() (ticks uint32)\n\n\t\/\/ Track returns the number of the track of the last read midi message (starting with 0)\n\t\/\/ It returns -1 if no message has been read yet.\n\tTrack() int16\n}\n\n\/\/ Header represents the header of a SMF file.\ntype Header struct {\n\n\t\/\/ Format is the SMF file format: SMF0, SMF1 or SMF2\n\tFormat\n\n\t\/\/ NumTracks is the number of tracks (always > 0)\n\tNumTracks uint16\n\n\t\/\/ TimeFormat is the time format (either MetricTicks or TimeCode)\n\tTimeFormat\n}\n\nfunc (h Header) String() string {\n\treturn fmt.Sprintf(\"<Format: %v, NumTracks: %v, TimeFormat: %v>\", h.Format, h.NumTracks, h.TimeFormat)\n}\n\nconst (\n\t\/\/ SMF0 represents the singletrack SMF format (0)\n\tSMF0 = format(0)\n\n\t\/\/ SMF1 represents the multitrack SMF format (1)\n\tSMF1 = format(1)\n\n\t\/\/ SMF2 represents the sequential track SMF format (2)\n\tSMF2 = format(2)\n)\n\ntype Chunk struct {\n\ttyp []byte \/\/ must always be 4 bytes long, to avoid conversions everytime, we take []byte here instead of [4]byte\n\tdata []byte\n}\n\n\/\/ Chunk returns the length of the chunk body\nfunc (c *Chunk) Len() int {\n\treturn len(c.data)\n}\n\n\/\/ SetType sets the type of the chunk\nfunc (c *Chunk) SetType(typ [4]byte) {\n\tc.typ = make([]byte, 4)\n\tc.typ[0] = typ[0]\n\tc.typ[1] = typ[1]\n\tc.typ[2] = typ[2]\n\tc.typ[3] = typ[3]\n}\n\nfunc (c *Chunk) Type() string {\n\tvar bf bytes.Buffer\n\tbf.Write(c.typ)\n\treturn bf.String()\n}\n\n\/\/ Clear removes all data but keeps the typ\nfunc (c *Chunk) Clear() {\n\tc.data = nil\n}\n\n\/\/ WriteTo writes the content of the chunk to the given writer\nfunc (c *Chunk) WriteTo(wr io.Writer) (int64, error) {\n\tif len(c.typ) != 4 {\n\t\treturn 0, fmt.Errorf(\"chunk header not set properly\")\n\t}\n\n\tvar bf bytes.Buffer\n\tbf.Write(c.typ)\n\tbinary.Write(&bf, binary.BigEndian, int32(c.Len()))\n\tbf.Write(c.data)\n\ti, err := wr.Write(bf.Bytes())\n\treturn int64(i), err\n}\n\n\/\/ ReadHeader reads the header from the given reader\n\/\/ returns the length of the following body\n\/\/ for errors, length of 0 is returned\nfunc (c *Chunk) ReadHeader(rd io.Reader) (length uint32, err error) {\n\tc.typ, err = midilib.ReadNBytes(4, rd)\n\n\tif err != nil {\n\t\tc.typ = nil\n\t\treturn\n\t}\n\n\treturn midilib.ReadUint32(rd)\n}\n\n\/\/ Write writes the given bytes to the body of the chunk\nfunc (c *Chunk) Write(b []byte) (int, error) {\n\tc.data = append(c.data, b...)\n\treturn len(b), nil\n}\n\nvar (\n\t_ TimeFormat = MetricTicks(0)\n\t_ TimeFormat = TimeCode{}\n\t_ Format = SMF0\n)\n\n\/\/ TimeCode is the SMPTE time format.\n\/\/ It can be comfortable created with the SMPTE* functions.\ntype TimeCode struct {\n\tFramesPerSecond uint8\n\tSubFrames uint8\n}\n\n\/\/ String represents the TimeCode as a string.\nfunc (t TimeCode) String() string {\n\n\tswitch t.FramesPerSecond {\n\tcase 29:\n\t\treturn fmt.Sprintf(\"SMPTE30DropFrame %v subframes\", t.SubFrames)\n\tdefault:\n\t\treturn fmt.Sprintf(\"SMPTE%v %v subframes\", t.FramesPerSecond, t.SubFrames)\n\t}\n\n}\n\nfunc (t TimeCode) timeformat() {}\n\n\/\/ SMPTE24 returns a SMPTE24 TimeCode with the given subframes\nfunc SMPTE24(subframes uint8) TimeCode {\n\treturn TimeCode{24, subframes}\n}\n\n\/\/ SMPTE25 returns a SMPTE25 TimeCode with the given subframes\nfunc SMPTE25(subframes uint8) TimeCode {\n\treturn TimeCode{25, subframes}\n}\n\n\/\/ SMPTE30DropFrame returns a SMPTE30 drop frame TimeCode with the given subframes\nfunc SMPTE30DropFrame(subframes uint8) TimeCode {\n\treturn TimeCode{29, subframes}\n}\n\n\/\/ SMPTE30 returns a SMPTE30 TimeCode with the given subframes\nfunc SMPTE30(subframes uint8) TimeCode {\n\treturn TimeCode{30, subframes}\n}\n\n\/\/ MetricTicks represents the \"ticks per quarter note\" (metric) time format\n\/\/ It defaults to 960 (i.e. 0 is treated as if it where 960 ticks per quarter note)\ntype MetricTicks uint16\n\n\/\/ Duration returns the time.Duration for a number of ticks at a certain tempo (in BPM)\nfunc (q MetricTicks) Duration(tempoBPM uint32, deltaTicks uint32) time.Duration {\n\t\/\/ (60000 \/ T) * (d \/ R) = D[ms]\n\tdurQnMilli := 60000 \/ float64(tempoBPM)\n\t_4thticks := float64(deltaTicks) \/ float64(uint16(q))\n\treturn time.Duration(roundFloat(durQnMilli*_4thticks, 0)) * time.Millisecond\n}\n\n\/\/ Ticks returns the ticks for a given time.Duration at a certain tempo (in BPM)\nfunc (q MetricTicks) Ticks(tempoBPM uint32, d time.Duration) (ticks uint32) {\n\t\/\/ d = (D[ms] * R * T) \/ 60000\n\tticks = uint32(roundFloat((float64(d.Nanoseconds())\/1000000*float64(uint16(q))*float64(tempoBPM))\/60000, 0))\n\treturn ticks\n}\n\nfunc (q MetricTicks) div(d float64) uint32 {\n\treturn uint32(roundFloat(float64(q.Number())\/d, 0))\n}\n\n\/\/ Number returns the number of the metric ticks (ticks for a quarter note, defaults to 960)\nfunc (q MetricTicks) Number() uint16 {\n\tif q == 0 {\n\t\treturn 960 \/\/ default\n\t}\n\treturn uint16(q)\n}\n\n\/\/ Ticks4th returns the ticks for a quarter note\nfunc (q MetricTicks) Ticks4th() uint32 {\n\treturn uint32(q.Number())\n}\n\n\/\/ Ticks8th returns the ticks for a quaver note\nfunc (q MetricTicks) Ticks8th() uint32 {\n\treturn q.div(2)\n}\n\n\/\/ Ticks16th returns the ticks for a 16th note\nfunc (q MetricTicks) Ticks16th() uint32 {\n\treturn q.div(4)\n}\n\n\/\/ Ticks32th returns the ticks for a 32th note\nfunc (q MetricTicks) Ticks32th() uint32 {\n\treturn q.div(8)\n}\n\n\/\/ Ticks64th returns the ticks for a 64th note\nfunc (q MetricTicks) Ticks64th() uint32 {\n\treturn q.div(16)\n}\n\n\/\/ Ticks128th returns the ticks for a 128th note\nfunc (q MetricTicks) Ticks128th() uint32 {\n\treturn q.div(32)\n}\n\n\/\/ Ticks256th returns the ticks for a 256th note\nfunc (q MetricTicks) Ticks256th() uint32 {\n\treturn q.div(64)\n}\n\n\/\/ Ticks512th returns the ticks for a 512th note\nfunc (q MetricTicks) Ticks512th() uint32 {\n\treturn q.div(128)\n}\n\n\/\/ Ticks1024th returns the ticks for a 1024th note\nfunc (q MetricTicks) Ticks1024th() uint32 {\n\treturn q.div(256)\n}\n\n\/\/ String returns the string representation of the quarter note resolution\nfunc (q MetricTicks) String() string {\n\treturn fmt.Sprintf(\"%v MetricTicks\", q.Ticks4th())\n}\n\nfunc (q MetricTicks) timeformat() {}\n\n\/\/ Format is the common interface of all SMF file formats\ntype Format interface {\n\n\t\/\/ String returns the string representation of the SMF format.\n\tString() string\n\n\t\/\/ Type returns the type of the SMF file: 0 for SMF0, 1 for SMF1 and 2 for SMF2\n\tType() uint16\n\n\tsmfformat() \/\/ make the implementation exclusive to this package\n}\n\n\/\/ TimeFormat is the common interface of all SMF time formats\ntype TimeFormat interface {\n\tString() string\n\ttimeformat() \/\/ make the implementation exclusive to this package\n}\n\n\/\/ format is an implementation of Format\ntype format uint16\n\nfunc (f format) Type() uint16 {\n\treturn uint16(f)\n}\n\nfunc (f format) smfformat() {}\n\nfunc (f format) String() string {\n\tswitch f {\n\tcase SMF0:\n\t\treturn \"SMF0 (singletrack)\"\n\tcase SMF1:\n\t\treturn \"SMF1 (multitrack)\"\n\tcase SMF2:\n\t\treturn \"SMF2 (sequential tracks)\"\n\t}\n\tpanic(\"unreachable\")\n}\n<|endoftext|>"} {"text":"<commit_before>package smtpapi\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"unicode\/utf16\"\n)\n\nconst Version = \"0.4.2\"\n\n\/\/ SMTPAPIHeader will be used to set up X-SMTPAPI params\ntype SMTPAPIHeader struct {\n\tTo []string `json:\"to,omitempty\"`\n\tSub map[string][]string `json:\"sub,omitempty\"`\n\tSection map[string]string `json:\"section,omitempty\"`\n\tCategory []string `json:\"category,omitempty\"`\n\tUniqueArgs map[string]string `json:\"unique_args,omitempty\"`\n\tFilters map[string]Filter `json:\"filters,omitempty\"`\n\tASMGroupID int `json:\"asm_group_id,omitempty\"`\n\tSendAt int64 `json:\"send_at,omitempty\"`\n\tSendEachAt []int64 `json:\"send_each_at,omitempty\"`\n\tIpPool string `json:\"ip_pool,omitempty\"`\n\tBatchID string `json:\"batch_id,omitempty\"`\n}\n\n\/\/ Filter represents an App\/Filter and its settings\ntype Filter struct {\n\tSettings map[string]interface{} `json:\"settings,omitempty\"`\n}\n\n\/\/ NewSMTPAPIHeader creates a new header struct\nfunc NewSMTPAPIHeader() *SMTPAPIHeader {\n\treturn &SMTPAPIHeader{}\n}\n\n\/\/ AddTo appends a single email to the To header\nfunc (h *SMTPAPIHeader) AddTo(email string) {\n\th.To = append(h.To, email)\n}\n\n\/\/ AddTos appends multiple emails to the To header\nfunc (h *SMTPAPIHeader) AddTos(emails []string) {\n\tfor i := 0; i < len(emails); i++ {\n\t\th.AddTo(emails[i])\n\t}\n}\n\n\/\/ SetTos sets the value of the To header\nfunc (h *SMTPAPIHeader) SetTos(emails []string) {\n\th.To = emails\n}\n\n\/\/ AddSubstitution adds a new substitution to a specific key\nfunc (h *SMTPAPIHeader) AddSubstitution(key, sub string) {\n\tif h.Sub == nil {\n\t\th.Sub = make(map[string][]string)\n\t}\n\th.Sub[key] = append(h.Sub[key], sub)\n}\n\n\/\/ AddSubstitutions adds a multiple substitutions to a specific key\nfunc (h *SMTPAPIHeader) AddSubstitutions(key string, subs []string) {\n\tfor i := 0; i < len(subs); i++ {\n\t\th.AddSubstitution(key, subs[i])\n\t}\n}\n\n\/\/ SetSubstitutions sets the value of the substitutions on the Sub header\nfunc (h *SMTPAPIHeader) SetSubstitutions(sub map[string][]string) {\n\th.Sub = sub\n}\n\n\/\/ AddSection sets the value for a specific section\nfunc (h *SMTPAPIHeader) AddSection(section, value string) {\n\tif h.Section == nil {\n\t\th.Section = make(map[string]string)\n\t}\n\th.Section[section] = value\n}\n\n\/\/ SetSections sets the value for the Section header\nfunc (h *SMTPAPIHeader) SetSections(sections map[string]string) {\n\th.Section = sections\n}\n\n\/\/ AddCategory adds a new category to the Category header\nfunc (h *SMTPAPIHeader) AddCategory(category string) {\n\th.Category = append(h.Category, category)\n}\n\n\/\/ AddCategories adds multiple categories to the Category header\nfunc (h *SMTPAPIHeader) AddCategories(categories []string) {\n\tfor i := 0; i < len(categories); i++ {\n\t\th.AddCategory(categories[i])\n\t}\n}\n\n\/\/ SetCategories will set the value of the Categories field\nfunc (h *SMTPAPIHeader) SetCategories(categories []string) {\n\th.Category = categories\n}\n\n\/\/ SetASMGroupID will set the value of the ASMGroupID field\nfunc (h *SMTPAPIHeader) SetASMGroupID(groupID int) {\n\th.ASMGroupID = groupID\n}\n\n\/\/ AddUniqueArg will set the value of a specific argument\nfunc (h *SMTPAPIHeader) AddUniqueArg(arg, value string) {\n\tif h.UniqueArgs == nil {\n\t\th.UniqueArgs = make(map[string]string)\n\t}\n\th.UniqueArgs[arg] = value\n}\n\n\/\/ SetUniqueArgs will set the value of the Unique_args header\nfunc (h *SMTPAPIHeader) SetUniqueArgs(args map[string]string) {\n\th.UniqueArgs = args\n}\n\n\/\/ AddFilter will set the specific setting for a filter\nfunc (h *SMTPAPIHeader) AddFilter(filter, setting string, value interface{}) {\n\tif h.Filters == nil {\n\t\th.Filters = make(map[string]Filter)\n\t}\n\tif _, ok := h.Filters[filter]; !ok {\n\t\th.Filters[filter] = Filter{\n\t\t\tSettings: make(map[string]interface{}),\n\t\t}\n\t}\n\th.Filters[filter].Settings[setting] = value\n}\n\n\/\/ SetFilter takes in a Filter struct with predetermined settings and sets it for such Filter key\nfunc (h *SMTPAPIHeader) SetFilter(filter string, value *Filter) {\n\tif h.Filters == nil {\n\t\th.Filters = make(map[string]Filter)\n\t}\n\th.Filters[filter] = *value\n}\n\n\/\/ SetSendAt takes in a timestamp which determines when the email will be sent\nfunc (h *SMTPAPIHeader) SetSendAt(sendAt int64) {\n\th.SendAt = sendAt\n}\n\n\/\/ AddSendEachAt takes in a timestamp and pushes it into a list Must match length of To emails\nfunc (h *SMTPAPIHeader) AddSendEachAt(sendEachAt int64) {\n\th.SendEachAt = append(h.SendEachAt, sendEachAt)\n}\n\n\/\/ SetSendEachAt takes an array of timestamps. Must match length of To emails\nfunc (h *SMTPAPIHeader) SetSendEachAt(sendEachAt []int64) {\n\th.SendEachAt = sendEachAt\n}\n\n\/\/ SetIpPool takes a strings and sets the IpPool field\nfunc (h *SMTPAPIHeader) SetIpPool(ipPool string) {\n\th.IpPool = ipPool\n}\n\n\/\/ Unicode escape\nfunc escapeUnicode(input string) string {\n\t\/\/var buffer bytes.Buffer\n\tbuffer := bytes.NewBufferString(\"\")\n\tfor _, r := range input {\n\t\tif r > 65535 {\n\t\t\t\/\/ surrogate pair\n\t\t\tvar r1, r2 = utf16.EncodeRune(r)\n\t\t\tvar s = fmt.Sprintf(\"\\\\u%x\\\\u%x\", r1, r2)\n\t\t\tbuffer.WriteString(s)\n\t\t} else if r > 127 {\n\t\t\tvar s = fmt.Sprintf(\"\\\\u%04x\", r)\n\t\t\tbuffer.WriteString(s)\n\t\t} else {\n\t\t\tvar s = fmt.Sprintf(\"%c\", r)\n\t\t\tbuffer.WriteString(s)\n\t\t}\n\t}\n\treturn buffer.String()\n}\n\n\/\/ JSONString returns the representation of the Header\nfunc (h *SMTPAPIHeader) JSONString() (string, error) {\n\theaders, e := json.Marshal(h)\n\treturn escapeUnicode(string(headers)), e\n}\n\n\/\/ Load allows you to load a pre-formed x-smtpapi header\nfunc (h *SMTPAPIHeader) Load(b []byte) error {\n\treturn json.Unmarshal(b, h)\n}\n<commit_msg>QEA-1700: support asm_groups_to_display<commit_after>package smtpapi\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"unicode\/utf16\"\n)\n\nconst Version = \"0.4.2\"\n\n\/\/ SMTPAPIHeader will be used to set up X-SMTPAPI params\ntype SMTPAPIHeader struct {\n\tTo []string `json:\"to,omitempty\"`\n\tSub map[string][]string `json:\"sub,omitempty\"`\n\tSection map[string]string `json:\"section,omitempty\"`\n\tCategory []string `json:\"category,omitempty\"`\n\tUniqueArgs map[string]string `json:\"unique_args,omitempty\"`\n\tFilters map[string]Filter `json:\"filters,omitempty\"`\n\tASMGroupID int `json:\"asm_group_id,omitempty\"`\n\tASMGroups []int `json:\"asm_groups_to_display,omitempty\"`\n\tSendAt int64 `json:\"send_at,omitempty\"`\n\tSendEachAt []int64 `json:\"send_each_at,omitempty\"`\n\tIpPool string `json:\"ip_pool,omitempty\"`\n\tBatchID string `json:\"batch_id,omitempty\"`\n}\n\n\/\/ Filter represents an App\/Filter and its settings\ntype Filter struct {\n\tSettings map[string]interface{} `json:\"settings,omitempty\"`\n}\n\n\/\/ NewSMTPAPIHeader creates a new header struct\nfunc NewSMTPAPIHeader() *SMTPAPIHeader {\n\treturn &SMTPAPIHeader{}\n}\n\n\/\/ AddTo appends a single email to the To header\nfunc (h *SMTPAPIHeader) AddTo(email string) {\n\th.To = append(h.To, email)\n}\n\n\/\/ AddTos appends multiple emails to the To header\nfunc (h *SMTPAPIHeader) AddTos(emails []string) {\n\tfor i := 0; i < len(emails); i++ {\n\t\th.AddTo(emails[i])\n\t}\n}\n\n\/\/ SetTos sets the value of the To header\nfunc (h *SMTPAPIHeader) SetTos(emails []string) {\n\th.To = emails\n}\n\n\/\/ AddSubstitution adds a new substitution to a specific key\nfunc (h *SMTPAPIHeader) AddSubstitution(key, sub string) {\n\tif h.Sub == nil {\n\t\th.Sub = make(map[string][]string)\n\t}\n\th.Sub[key] = append(h.Sub[key], sub)\n}\n\n\/\/ AddSubstitutions adds a multiple substitutions to a specific key\nfunc (h *SMTPAPIHeader) AddSubstitutions(key string, subs []string) {\n\tfor i := 0; i < len(subs); i++ {\n\t\th.AddSubstitution(key, subs[i])\n\t}\n}\n\n\/\/ SetSubstitutions sets the value of the substitutions on the Sub header\nfunc (h *SMTPAPIHeader) SetSubstitutions(sub map[string][]string) {\n\th.Sub = sub\n}\n\n\/\/ AddSection sets the value for a specific section\nfunc (h *SMTPAPIHeader) AddSection(section, value string) {\n\tif h.Section == nil {\n\t\th.Section = make(map[string]string)\n\t}\n\th.Section[section] = value\n}\n\n\/\/ SetSections sets the value for the Section header\nfunc (h *SMTPAPIHeader) SetSections(sections map[string]string) {\n\th.Section = sections\n}\n\n\/\/ AddCategory adds a new category to the Category header\nfunc (h *SMTPAPIHeader) AddCategory(category string) {\n\th.Category = append(h.Category, category)\n}\n\n\/\/ AddCategories adds multiple categories to the Category header\nfunc (h *SMTPAPIHeader) AddCategories(categories []string) {\n\tfor i := 0; i < len(categories); i++ {\n\t\th.AddCategory(categories[i])\n\t}\n}\n\n\/\/ SetCategories will set the value of the Categories field\nfunc (h *SMTPAPIHeader) SetCategories(categories []string) {\n\th.Category = categories\n}\n\n\/\/ SetASMGroupID will set the value of the ASMGroupID field\nfunc (h *SMTPAPIHeader) SetASMGroupID(groupID int) {\n\th.ASMGroupID = groupID\n}\n\n\/\/ AddASMGroupToDisplay adds a new ASM group ID to be displayed\nfunc (h *SMTPAPIHeader) AddASMGroupToDisplay(groupID int) {\n\th.ASMGroups = append(h.ASMGroups, groupID)\n}\n\n\/\/ AddASMGroupsToDisplay adds multiple ASM group IDs to be displayed\nfunc (h *SMTPAPIHeader) AddASMGroupsToDisplay(groupIDs []int) {\n\tfor i := 0; i < len(groupIDs); i++ {\n\t\th.AddASMGroupToDisplay(groupIDs[i])\n\t}\n}\n\n\/\/ SetASMGroupsToDisplay will set the value of the ASMGroups field\nfunc (h *SMTPAPIHeader) SetASMGroupsToDisplay(groups []int) {\n\th.ASMGroups = groups\n}\n\n\/\/ AddUniqueArg will set the value of a specific argument\nfunc (h *SMTPAPIHeader) AddUniqueArg(arg, value string) {\n\tif h.UniqueArgs == nil {\n\t\th.UniqueArgs = make(map[string]string)\n\t}\n\th.UniqueArgs[arg] = value\n}\n\n\/\/ SetUniqueArgs will set the value of the Unique_args header\nfunc (h *SMTPAPIHeader) SetUniqueArgs(args map[string]string) {\n\th.UniqueArgs = args\n}\n\n\/\/ AddFilter will set the specific setting for a filter\nfunc (h *SMTPAPIHeader) AddFilter(filter, setting string, value interface{}) {\n\tif h.Filters == nil {\n\t\th.Filters = make(map[string]Filter)\n\t}\n\tif _, ok := h.Filters[filter]; !ok {\n\t\th.Filters[filter] = Filter{\n\t\t\tSettings: make(map[string]interface{}),\n\t\t}\n\t}\n\th.Filters[filter].Settings[setting] = value\n}\n\n\/\/ SetFilter takes in a Filter struct with predetermined settings and sets it for such Filter key\nfunc (h *SMTPAPIHeader) SetFilter(filter string, value *Filter) {\n\tif h.Filters == nil {\n\t\th.Filters = make(map[string]Filter)\n\t}\n\th.Filters[filter] = *value\n}\n\n\/\/ SetSendAt takes in a timestamp which determines when the email will be sent\nfunc (h *SMTPAPIHeader) SetSendAt(sendAt int64) {\n\th.SendAt = sendAt\n}\n\n\/\/ AddSendEachAt takes in a timestamp and pushes it into a list Must match length of To emails\nfunc (h *SMTPAPIHeader) AddSendEachAt(sendEachAt int64) {\n\th.SendEachAt = append(h.SendEachAt, sendEachAt)\n}\n\n\/\/ SetSendEachAt takes an array of timestamps. Must match length of To emails\nfunc (h *SMTPAPIHeader) SetSendEachAt(sendEachAt []int64) {\n\th.SendEachAt = sendEachAt\n}\n\n\/\/ SetIpPool takes a strings and sets the IpPool field\nfunc (h *SMTPAPIHeader) SetIpPool(ipPool string) {\n\th.IpPool = ipPool\n}\n\n\/\/ Unicode escape\nfunc escapeUnicode(input string) string {\n\t\/\/var buffer bytes.Buffer\n\tbuffer := bytes.NewBufferString(\"\")\n\tfor _, r := range input {\n\t\tif r > 65535 {\n\t\t\t\/\/ surrogate pair\n\t\t\tvar r1, r2 = utf16.EncodeRune(r)\n\t\t\tvar s = fmt.Sprintf(\"\\\\u%x\\\\u%x\", r1, r2)\n\t\t\tbuffer.WriteString(s)\n\t\t} else if r > 127 {\n\t\t\tvar s = fmt.Sprintf(\"\\\\u%04x\", r)\n\t\t\tbuffer.WriteString(s)\n\t\t} else {\n\t\t\tvar s = fmt.Sprintf(\"%c\", r)\n\t\t\tbuffer.WriteString(s)\n\t\t}\n\t}\n\treturn buffer.String()\n}\n\n\/\/ JSONString returns the representation of the Header\nfunc (h *SMTPAPIHeader) JSONString() (string, error) {\n\theaders, e := json.Marshal(h)\n\treturn escapeUnicode(string(headers)), e\n}\n\n\/\/ Load allows you to load a pre-formed x-smtpapi header\nfunc (h *SMTPAPIHeader) Load(b []byte) error {\n\treturn json.Unmarshal(b, h)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nSoultion for Project Euler Problem #17 - https:\/\/projecteuler.net\/problem=17\n\n(c) 2016 dpetker\n*\/\npackage main\n\nimport (\n\t\"fmt\"\n)\n\nvar baseDict = map[int]int{\n\t1: len(\"one\"),\n\t2: len(\"two\"),\n\t3: len(\"three\"),\n\t4: len(\"four\"),\n\t5: len(\"five\"),\n\t6: len(\"six\"),\n\t7: len(\"seven\"),\n\t8: len(\"eight\"),\n\t9: len(\"nine\"),\n\t10: len(\"ten\"),\n\t11: len(\"eleven\"),\n\t12: len(\"twelve\"),\n\t13: len(\"thirteen\"),\n\t14: len(\"fourteen\"),\n\t15: len(\"fifteen\"),\n\t16: len(\"sixteen\"),\n\t17: len(\"seventeen\"),\n\t18: len(\"eighteen\"),\n\t19: len(\"nineteen\"),\n\t20: len(\"twenty\"),\n\t30: len(\"thirty\"),\n\t40: len(\"fourty\"),\n\t50: len(\"fifty\"),\n\t60: len(\"sixty\"),\n\t70: len(\"seventy\"),\n\t80: len(\"eighty\"),\n\t90: len(\"ninety\"),\n\t100: len(\"hundred\"),\n\t1000: len(\"thousand\"),\n}\n\nfunc main() {\n\tsum := 0\n\n\tfor i := 1; i < 1001; i++ {\n\t\tsum += findCharCount(i)\n\t}\n\n\tfmt.Println(sum)\n}\n\nfunc findCharCount(num int) int {\n\tif num <= 20 {\n\t\treturn baseDict[num]\n\t}\n\n\treturn 0\n}\n<commit_msg>Solution for problem 17<commit_after>\/*\nSoultion for Project Euler Problem #17 - https:\/\/projecteuler.net\/problem=17\n\n(c) 2016 dpetker\n*\/\npackage main\n\nimport (\n\t\"fmt\"\n)\n\nvar baseDict = map[int]int{\n\t0: 0,\n\t1: len(\"one\"),\n\t2: len(\"two\"),\n\t3: len(\"three\"),\n\t4: len(\"four\"),\n\t5: len(\"five\"),\n\t6: len(\"six\"),\n\t7: len(\"seven\"),\n\t8: len(\"eight\"),\n\t9: len(\"nine\"),\n\t10: len(\"ten\"),\n\t11: len(\"eleven\"),\n\t12: len(\"twelve\"),\n\t13: len(\"thirteen\"),\n\t14: len(\"fourteen\"),\n\t15: len(\"fifteen\"),\n\t16: len(\"sixteen\"),\n\t17: len(\"seventeen\"),\n\t18: len(\"eighteen\"),\n\t19: len(\"nineteen\"),\n\t20: len(\"twenty\"),\n\t30: len(\"thirty\"),\n\t40: len(\"forty\"),\n\t50: len(\"fifty\"),\n\t60: len(\"sixty\"),\n\t70: len(\"seventy\"),\n\t80: len(\"eighty\"),\n\t90: len(\"ninety\"),\n\t100: len(\"hundred\"),\n\t1000: len(\"onethousand\"),\n}\n\nfunc main() {\n\tsum := 0\n\n\tfor i := 1; i < 1001; i++ {\n\t\tsum += findCharCount(i)\n\t}\n\n\tfmt.Println(sum)\n}\n\nfunc findCharCount(num int) int {\n\tswitch {\n\tcase num <= 20 || num == 1000:\n\t\treturn baseDict[num]\n\tcase num > 20 && num < 100:\n\t\ttens := (num \/ 10) * 10\n\t\tones := num % 10\n\t\treturn baseDict[tens] + baseDict[ones]\n\tcase num >= 100 && num < 1000:\n\t\thundreds := num \/ 100\n\t\tremainder := num % 100\n\n\t\t\/\/ +3 letters for \"and\" as in, \"one hundred and forty two\"\n\t\ttempSum := baseDict[hundreds] + baseDict[100] + 3 + findCharCount(remainder)\n\n\t\tif remainder == 0 {\n\t\t\t\/\/ No \"and\" when we're right on a hundred\n\t\t\ttempSum -= 3\n\t\t}\n\n\t\treturn tempSum\n\t}\n\n\treturn 0\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/papplampe\/euler\/sieve\"\n\t\"strconv\"\n)\n\nvar (\n\tprimes []bool\n)\n\nconst (\n\tN = 1000000\n)\n\nfunc rotateBytes(s []byte) {\n\tlast := s[len(s)-1]\n\tfor i := len(s) - 1; i > 0; i-- {\n\t\ts[i] = s[i-1]\n\t}\n\ts[0] = last\n}\n\nfunc isCircularPrime(n uint64) bool {\n\tif !primes[n] {\n\t\treturn false\n\t}\n\tt := []byte(fmt.Sprint(n))\n\tif len(t) == 1 {\n\t\treturn true\n\t}\n\tfor i := 0; i < len(t); i++ {\n\t\trotateBytes(t)\n\t\tx, _ := strconv.Atoi(string(t))\n\t\tif !primes[x] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc main() {\n\tvar i, s uint64\n\tprimes = sieve.Sieve(N)\n\tfor i = 1; i < N; i++ {\n\t\tif isCircularPrime(i) {\n\t\t\tfmt.Println(i)\n\t\t\ts++\n\t\t}\n\t}\n\tfmt.Println(s)\n}\n<commit_msg>35 blah<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/papplampe\/euler\/sieve\"\n\t\"strconv\"\n)\n\nvar (\n\tprimes []bool\n)\n\nconst (\n\tN = 1000000\n)\n\nfunc rotateBytes(s []byte) {\n\tlast := s[len(s)-1]\n\tfor i := len(s) - 1; i > 0; i-- {\n\t\ts[i] = s[i-1]\n\t}\n\ts[0] = last\n}\n\nfunc isCircularPrime(n uint64) bool {\n\tif !primes[n] {\n\t\treturn false\n\t}\n\tt := []byte(fmt.Sprint(n))\n\tif len(t) == 1 {\n\t\treturn true\n\t}\n\tfor i := 0; i < len(t); i++ {\n\t\trotateBytes(t)\n\t\tx, _ := strconv.Atoi(string(t))\n\t\tif !primes[x] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc main() {\n\tvar i, s uint64\n\tprimes = sieve.Sieve(N)\n\tfor i = 1; i < N; i++ {\n\t\tif isCircularPrime(i) {\n\t\t\ts++\n\t\t}\n\t}\n\tfmt.Println(s)\n}\n<|endoftext|>"} {"text":"<commit_before>package libytcd\n\nimport (\n\t\"libGFC\"\n)\n\ntype Server struct {\n\tports []Port\n\ttransaction chan TransactionError\n\tblocks chan BlockError\n\tstate *libGFC.GFCChain\n}\n\nfunc NewServer(ports []Port) (s *Server) {\n\ts = new(Server)\n\ts.ports = ports\n\ts.state = libGFC.NewGFCChain()\n\n\ts.blocks = make(chan BlockError)\n\ts.transaction = make(chan TransactionError)\n\n\tgo s.handleChannels()\n\n\tfor _, p := range ports {\n\t\tp.AddTransactionChannel(s.transaction)\n\t\tp.AddBlockChannel(s.blocks)\n\t}\n\n\treturn\n}\n\nfunc (s *Server) handleChannels() {\n\tfor {\n\t\tselect {\n\t\tcase c := <-s.transaction:\n\t\t\tupdate := c.BlockMessage\n\t\t\terr := update.Verify(s.state)\n\t\t\tif err != nil {\n\t\t\t\tc.error <- err\n\t\t\t} else {\n\t\t\t\tfor _, p := range s.ports {\n\t\t\t\t\tif p != c.Source {\n\t\t\t\t\t\tp.AddTransaction(update)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase block := <-s.blocks:\n\t\t\tfor _, v := range block.BlockMessage {\n\t\t\t\terr := v.Verify(s.state)\n\t\t\t\tif err != nil {\n\t\t\t\t\tblock.error <- err\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tv.Apply(s.state)\n\t\t\t}\n\n\t\t\tfor _, p := range s.ports {\n\t\t\t\tif p != block.Source {\n\t\t\t\t\tp.AddBlock(block.BlockMessage)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>Fix blocking bug in ytcd<commit_after>package libytcd\n\nimport (\n\t\"libGFC\"\n)\n\ntype Server struct {\n\tports []Port\n\ttransaction chan TransactionError\n\tblocks chan BlockError\n\tstate *libGFC.GFCChain\n}\n\nfunc NewServer(ports []Port) (s *Server) {\n\ts = new(Server)\n\ts.ports = ports\n\ts.state = libGFC.NewGFCChain()\n\n\ts.blocks = make(chan BlockError)\n\ts.transaction = make(chan TransactionError)\n\n\tgo s.handleChannels()\n\n\tfor _, p := range ports {\n\t\tp.AddTransactionChannel(s.transaction)\n\t\tp.AddBlockChannel(s.blocks)\n\t}\n\n\treturn\n}\n\nfunc (s *Server) handleChannels() {\n\tfor {\n\t\tselect {\n\t\tcase c := <-s.transaction:\n\t\t\tupdate := c.BlockMessage\n\t\t\terr := update.Verify(s.state)\n\t\t\tif err != nil {\n\t\t\t\tc.error <- err\n\t\t\t} else {\n\t\t\t\tc.error <- nil\n\t\t\t\tfor _, p := range s.ports {\n\t\t\t\t\tif p != c.Source {\n\t\t\t\t\t\tp.AddTransaction(update)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase block := <-s.blocks:\n\t\t\tvar err error = nil\n\t\t\tfor _, v := range block.BlockMessage {\n\t\t\t\terr = v.Verify(s.state)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tv.Apply(s.state)\n\t\t\t}\n\t\t\tblock.error <- err\n\n\t\t\tfor _, p := range s.ports {\n\t\t\t\tif p != block.Source {\n\t\t\t\t\tp.AddBlock(block.BlockMessage)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 Brad Fitzpatrick. All rights reserved. See LICENSE file.\n\/\/\n\/\/ This program mirrors FotoBilder galleries & pictures. FotoBilder\n\/\/ is the software the runs LiveJournal's photo galleries, and\n\/\/ previously ran picpix.com, which is now apparently shutting down.\n\/\/\n\/\/ This tool fetches public galleries & photos only when passwords\n\/\/ aren't handy.\n\/\/\n\/\/ If you have the passwords, use fotoup.pl and its --backup mode,\n\/\/ which uses the authenticated API.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"http\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"sync\"\n\t\"time\"\n\t\"xml\"\n)\n\nimport _ \"http\/pprof\"\n\nvar flagBase *string = flag.String(\"base\", \"\",\n\t\"e.g. http:\/\/www.picpix.com\/username (no trailing slash)\")\n\nvar flagDest *string = flag.String(\"dest\", \"\", \"Destination backup root\")\n\nvar flagSloppy *bool = flag.Bool(\"sloppy\", false, \"Continue on errors\")\nvar flagMaxNetwork *int = flag.Int(\"concurrency\", 20, \"Max concurrent requests\")\n\nvar flagProfile *string = flag.String(\"profile\", \"\",\n\t\"the listen address to run a webserver for profiling; empty to leave disabled\")\n\nvar galleryMutex sync.Mutex\nvar galleryMap map[string]*Gallery = make(map[string]*Gallery)\n\nvar picMutex sync.Mutex\nvar picMap map[string]*MediaSetItem = make(map[string]*MediaSetItem)\n\nvar networkOpGate chan bool\n\n\/\/ Consult ulimit -n; you may have to up your\n\/\/ \/etc\/security\/limits.conf's nofile.\nvar localOpGate chan bool = make(chan bool, 10000)\n\nvar opsMutex sync.Mutex\nvar opsInFlight int\n\nvar errorMutex sync.Mutex\nvar errors []string = make([]string, 0)\n\nvar galleryPattern *regexp.Regexp = regexp.MustCompile(\n\t\"\/gallery\/([0-9a-z][0-9a-z][0-9a-z][0-9a-z][0-9a-z][0-9a-z][0-9a-z][0-9a-z])\")\nvar picPattern *regexp.Regexp = regexp.MustCompile(\n\t\"\/pic\/([0-9a-z][0-9a-z][0-9a-z][0-9a-z][0-9a-z][0-9a-z][0-9a-z][0-9a-z])\")\n\nfunc addError(msg string) {\n\terrorMutex.Lock()\n\tdefer errorMutex.Unlock()\n\terrors = append(errors, msg)\n\tif *flagSloppy {\n\t\tlog.Printf(\"ERROR: %s\", msg)\n\t} else {\n\t\tlog.Exitf(\"ERROR: %s\", msg)\n\t}\n}\n\ntype Operation interface {\n\tDone()\n}\n\ntype NetworkOperation int\ntype LocalOperation int\n\nfunc NewLocalOperation() Operation {\n\topsMutex.Lock()\n\topsInFlight++\n\topsMutex.Unlock()\n\tlocalOpGate <- true \/\/ buffer-limited, may\/should block\n\treturn LocalOperation(0)\n}\n\nfunc NewNetworkOperation() Operation {\n\topsMutex.Lock()\n\topsInFlight++\n\topsMutex.Unlock()\n\tnetworkOpGate <- true\n\treturn NetworkOperation(0)\n}\n\nfunc (o LocalOperation) Done() {\n\t<-localOpGate\n\topsMutex.Lock()\n\tdefer opsMutex.Unlock()\n\topsInFlight--\n}\n\nfunc (o NetworkOperation) Done() {\n\t<-networkOpGate\n\topsMutex.Lock()\n\tdefer opsMutex.Unlock()\n\topsInFlight--\n}\n\nfunc OperationsInFlight() int {\n\topsMutex.Lock()\n\tdefer opsMutex.Unlock()\n\treturn opsInFlight\n}\n\nfunc fetchUrlToFile(url, filename string, expectedSize int64) bool {\n\tfi, statErr := os.Stat(filename)\n\tif statErr == nil &&\n\t\t(expectedSize == -1 && fi.Size > 0 ||\n\t\t\texpectedSize == fi.Size) {\n\t\t\/\/ TODO: re-fetch mode?\n\t\treturn true\n\t}\n\n\tnetop := NewNetworkOperation()\n\tdefer netop.Done()\n\n\tres, _, err := http.Get(url)\n\tif err != nil {\n\t\taddError(fmt.Sprintf(\"Error fetching %s: %v\", url, err))\n\t\treturn false\n\t}\n\tdefer res.Body.Close()\n\n\tfileBytes, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\taddError(fmt.Sprintf(\"Error reading XML from %s: %v\", url, err))\n\t\treturn false\n\t}\n\n\terr = ioutil.WriteFile(filename, fileBytes, 0600)\n\tif err != nil {\n\t\taddError(fmt.Sprintf(\"Error writing file %s: %v\", filename, err))\n\t\treturn false\n\t}\n\treturn true\n}\n\ntype Gallery struct {\n\tkey string\n}\n\nfunc (g *Gallery) XmlUrl() string {\n\treturn fmt.Sprintf(\"%s\/gallery\/%s.xml\", *flagBase, g.key)\n}\n\nfunc (g *Gallery) Fetch(op Operation) {\n\tdefer op.Done()\n\n\tgalXmlFilename := fmt.Sprintf(\"%s\/gallery-%s.xml\", *flagDest, g.key)\n\tif fetchUrlToFile(g.XmlUrl(), galXmlFilename, -1) {\n\t\tgo fetchPhotosInGallery(galXmlFilename, NewLocalOperation())\n\t}\n}\n\ntype DigestInfo struct {\n\tXMLName xml.Name \"digest\"\n\tType string \"attr\"\n\tValue string \"chardata\"\n}\n\ntype MediaFile struct {\n\tXMLName xml.Name \"file\"\n\tDigest DigestInfo\n\tMime string\n\tWidth int\n\tHeight int\n\tBytes int64\n\tUrl string \/\/ the raw URL\n}\n\ntype MediaSetItem struct {\n\tXMLName xml.Name \"mediaSetItem\"\n\tTitle string\n\tDescription string\n\tInfoURL string \/\/ the xml URL\n\tFile MediaFile\n\n\t\/\/ Not in the XML:\n\tkey string \/\/ the 8 chars\n}\n\nfunc (p *MediaSetItem) XmlUrl() string {\n\treturn fmt.Sprintf(\"%s\/pic\/%s.xml\", *flagBase, p.key)\n}\n\nfunc (p *MediaSetItem) BlobUrl() string {\n\treturn fmt.Sprintf(\"%s\/pic\/%s\", *flagBase, p.key)\n}\n\nfunc (p *MediaSetItem) XmlBackupFilename() string {\n\treturn fmt.Sprintf(\"%s\/pic-%s.xml\", *flagDest, p.key)\n}\n\nfunc (p *MediaSetItem) BlobBackupFilename() string {\n\tvar ext string\n\tswitch p.File.Mime {\n\tcase \"image\/jpeg\":\n\t\text = \"jpg\"\n\tcase \"image\/png\":\n\t\text = \"png\"\n\tcase \"image\/gif\":\n\t\text = \"gif\"\n\t}\n\treturn fmt.Sprintf(\"%s\/pic-%s.%s\", *flagDest, p.key, ext)\n}\n\nfunc (p *MediaSetItem) Fetch(op Operation) {\n\tdefer op.Done()\n\tif !fetchUrlToFile(p.XmlUrl(), p.XmlBackupFilename(), -1) {\n\t\treturn\n\t}\n\n\tif p.File.Bytes <= 0 {\n\t\tpanic(\"expected file to have some known file size\")\n\t}\n\tfetchUrlToFile(p.BlobUrl(), p.BlobBackupFilename(), p.File.Bytes)\n}\n\ntype MediaSetItemsWrapper struct {\n\tXMLName xml.Name \"mediaSetItems\"\n\tMediaSetItem []MediaSetItem\n}\n\ntype LinkedFromSet struct {\n\tXMLName xml.Name \"linkedFrom\"\n\tInfoURL []string \/\/ xml gallery URLs of 'parent' galleries (not a DAG)\n}\n\ntype LinkedToSet struct {\n\tXMLName xml.Name \"linkedTo\"\n\tInfoURL []string \/\/ xml gallery URLs of 'children' galleries (not a DAG)\n}\n\ntype MediaSet struct {\n\tXMLName xml.Name \"mediaSet\"\n\tMediaSetItems MediaSetItemsWrapper\n\tLinkedFrom LinkedFromSet\n\tLinkedTo LinkedToSet\n}\n\nfunc fetchPhotosInGallery(filename string, op Operation) {\n\tdefer op.Done()\n\n\tf, err := os.Open(filename, os.O_RDONLY, 0)\n\tif err != nil {\n\t\taddError(fmt.Sprintf(\"Failed to open %s: %v\", filename, err))\n\t\treturn\n\t}\n\tdefer f.Close()\n\tmediaSet := new(MediaSet)\n\terr = xml.Unmarshal(f, mediaSet)\n\tif err != nil {\n\t\taddError(fmt.Sprintf(\"Failed to unmarshal %s: %v\", filename, err))\n\t\treturn\n\t}\n\n\t\/\/ Learn about new galleries, potentially?\n\tfor _, url := range mediaSet.LinkedFrom.InfoURL {\n\t\tnoteGallery(url)\n\t}\n\tfor _, url := range mediaSet.LinkedTo.InfoURL {\n\t\tnoteGallery(url)\n\t}\n\n\t\/\/log.Printf(\"Parse of %s is: %q\", filename, mediaSet)\n\tfor _, item := range mediaSet.MediaSetItems.MediaSetItem {\n\t\titem.key = findKey(item.InfoURL, picPattern)\n\t\t\/\/log.Printf(\" pic: %s\", item.InfoURL)\n\t\tnotePhoto(&item)\n\t}\n}\n\nfunc knownGalleries() int {\n\tgalleryMutex.Lock()\n\tdefer galleryMutex.Unlock()\n\treturn len(galleryMap)\n}\n\nfunc findKey(keyOrUrl string, pattern *regexp.Regexp) string {\n\tif len(keyOrUrl) == 8 {\n\t\treturn keyOrUrl\n\t}\n\n\tmatches := pattern.FindStringSubmatch(keyOrUrl)\n\tif matches == nil {\n\t\tpanic(\"Failed to parse: \" + keyOrUrl)\n\t}\n\tif len(matches[1]) != 8 {\n\t\tpanic(\"Expected match of 8 chars in \" + keyOrUrl)\n\t}\n\treturn matches[1]\n}\n\nfunc noteGallery(keyOrUrl string) {\n\tkey := findKey(keyOrUrl, galleryPattern)\n\tgalleryMutex.Lock()\n\tdefer galleryMutex.Unlock()\n\tif _, known := galleryMap[key]; known {\n\t\treturn\n\t}\n\tgallery := &Gallery{key}\n\tgalleryMap[key] = gallery\n\tlog.Printf(\"Gallery: %s\", gallery.XmlUrl())\n\tgo gallery.Fetch(NewLocalOperation())\n}\n\nfunc notePhoto(pic *MediaSetItem) {\n\tpicMutex.Lock()\n\tdefer picMutex.Unlock()\n\tif _, known := picMap[pic.key]; known {\n\t\treturn\n\t}\n\tpicMap[pic.key] = pic\n\tlog.Printf(\"Photo: %s\", pic.XmlUrl())\n\tgo pic.Fetch(NewLocalOperation())\n}\n\nfunc fetchGalleryPage(page int) {\n\tlog.Printf(\"Fetching gallery page %d\", page)\n\tres, finalUrl, err := http.Get(fmt.Sprintf(\"%s\/?sort=alpha&page=%d\",\n\t\t*flagBase, page))\n\tif err != nil {\n\t\tlog.Exitf(\"Error fetching gallery page %d: %v\", page, err)\n\t}\n\tlog.Printf(\"Fetched page %d: %s\", page, finalUrl)\n\thtmlBytes, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tlog.Exitf(\"Error reading gallery page %d's HTML: %v\", page, err)\n\t}\n\tres.Body.Close()\n\n\thtml := string(htmlBytes)\n\tlog.Printf(\"read %d bytes\", len(html))\n\n\tmatches := galleryPattern.FindAllStringSubmatch(html, -1)\n\tfor _, match := range matches {\n\t\tnoteGallery(match[1])\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif *flagDest == \"\" {\n\t\tlog.Exitf(\"No --dest given.\")\n\t}\n\tif *flagBase == \"\" {\n\t\tlog.Exitf(\"No --base URL given.\")\n\t}\n\n\tnetworkOpGate = make(chan bool, *flagMaxNetwork)\n\n\tlog.Printf(\"Starting.\")\n\n\tif *flagProfile != \"\" {\n\t\tlog.Printf(\"Listening on http:\/\/%s\", *flagProfile)\n\t\tgo http.ListenAndServe(*flagProfile, nil)\n\t}\n\n\tpage := 1\n\tfor {\n\t\tcountBefore := knownGalleries()\n\t\tfetchGalleryPage(page)\n\t\tcountAfter := knownGalleries()\n\t\tlog.Printf(\"Galleries known: %d\", countAfter)\n\t\tif countAfter == countBefore {\n\t\t\tlog.Printf(\"No new galleries, stopping.\")\n\t\t\tbreak\n\t\t}\n\t\tpage++\n\t}\n\n\tfor {\n\t\tn := OperationsInFlight()\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\t\tlog.Printf(\"%d Operations in-flight. Waiting.\", n)\n\t\ttime.Sleep(5 * 1e9)\n\t}\n\tlog.Printf(\"Done.\")\n}\n<commit_msg>remove stuttering<commit_after>\/\/ Copyright 2010 Brad Fitzpatrick. All rights reserved. See LICENSE file.\n\/\/\n\/\/ This program mirrors FotoBilder galleries & pictures. FotoBilder\n\/\/ is the software the runs LiveJournal's photo galleries, and\n\/\/ previously ran picpix.com, which is now apparently shutting down.\n\/\/\n\/\/ This tool fetches public galleries & photos only when passwords\n\/\/ aren't handy.\n\/\/\n\/\/ If you have the passwords, use fotoup.pl and its --backup mode,\n\/\/ which uses the authenticated API.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"http\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"sync\"\n\t\"time\"\n\t\"xml\"\n)\n\nimport _ \"http\/pprof\"\n\nvar flagBase = flag.String(\"base\", \"\",\n\t\"e.g. http:\/\/www.picpix.com\/username (no trailing slash)\")\n\nvar flagDest = flag.String(\"dest\", \"\", \"Destination backup root\")\n\nvar flagSloppy = flag.Bool(\"sloppy\", false, \"Continue on errors\")\nvar flagMaxNetwork = flag.Int(\"concurrency\", 20, \"Max concurrent requests\")\n\nvar flagProfile = flag.String(\"profile\", \"\",\n\t\"the listen address to run a webserver for profiling; empty to leave disabled\")\n\nvar galleryMutex sync.Mutex\nvar galleryMap = make(map[string]*Gallery)\n\nvar picMutex sync.Mutex\nvar picMap = make(map[string]*MediaSetItem)\n\nvar networkOpGate chan bool\n\n\/\/ Consult ulimit -n; you may have to up your\n\/\/ \/etc\/security\/limits.conf's nofile.\nvar localOpGate = make(chan bool, 10000)\n\nvar opsMutex sync.Mutex\nvar opsInFlight int\n\nvar errorMutex sync.Mutex\nvar errors = make([]string, 0)\n\nvar galleryPattern = regexp.MustCompile(\n\t\"\/gallery\/([0-9a-z][0-9a-z][0-9a-z][0-9a-z][0-9a-z][0-9a-z][0-9a-z][0-9a-z])\")\nvar picPattern = regexp.MustCompile(\n\t\"\/pic\/([0-9a-z][0-9a-z][0-9a-z][0-9a-z][0-9a-z][0-9a-z][0-9a-z][0-9a-z])\")\n\nfunc addError(msg string) {\n\terrorMutex.Lock()\n\tdefer errorMutex.Unlock()\n\terrors = append(errors, msg)\n\tif *flagSloppy {\n\t\tlog.Printf(\"ERROR: %s\", msg)\n\t} else {\n\t\tlog.Exitf(\"ERROR: %s\", msg)\n\t}\n}\n\ntype Operation interface {\n\tDone()\n}\n\ntype NetworkOperation int\ntype LocalOperation int\n\nfunc NewLocalOperation() Operation {\n\topsMutex.Lock()\n\topsInFlight++\n\topsMutex.Unlock()\n\tlocalOpGate <- true \/\/ buffer-limited, may\/should block\n\treturn LocalOperation(0)\n}\n\nfunc NewNetworkOperation() Operation {\n\topsMutex.Lock()\n\topsInFlight++\n\topsMutex.Unlock()\n\tnetworkOpGate <- true\n\treturn NetworkOperation(0)\n}\n\nfunc (o LocalOperation) Done() {\n\t<-localOpGate\n\topsMutex.Lock()\n\tdefer opsMutex.Unlock()\n\topsInFlight--\n}\n\nfunc (o NetworkOperation) Done() {\n\t<-networkOpGate\n\topsMutex.Lock()\n\tdefer opsMutex.Unlock()\n\topsInFlight--\n}\n\nfunc OperationsInFlight() int {\n\topsMutex.Lock()\n\tdefer opsMutex.Unlock()\n\treturn opsInFlight\n}\n\nfunc fetchUrlToFile(url, filename string, expectedSize int64) bool {\n\tfi, statErr := os.Stat(filename)\n\tif statErr == nil &&\n\t\t(expectedSize == -1 && fi.Size > 0 ||\n\t\t\texpectedSize == fi.Size) {\n\t\t\/\/ TODO: re-fetch mode?\n\t\treturn true\n\t}\n\n\tnetop := NewNetworkOperation()\n\tdefer netop.Done()\n\n\tres, _, err := http.Get(url)\n\tif err != nil {\n\t\taddError(fmt.Sprintf(\"Error fetching %s: %v\", url, err))\n\t\treturn false\n\t}\n\tdefer res.Body.Close()\n\n\tfileBytes, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\taddError(fmt.Sprintf(\"Error reading XML from %s: %v\", url, err))\n\t\treturn false\n\t}\n\n\terr = ioutil.WriteFile(filename, fileBytes, 0600)\n\tif err != nil {\n\t\taddError(fmt.Sprintf(\"Error writing file %s: %v\", filename, err))\n\t\treturn false\n\t}\n\treturn true\n}\n\ntype Gallery struct {\n\tkey string\n}\n\nfunc (g *Gallery) XmlUrl() string {\n\treturn fmt.Sprintf(\"%s\/gallery\/%s.xml\", *flagBase, g.key)\n}\n\nfunc (g *Gallery) Fetch(op Operation) {\n\tdefer op.Done()\n\n\tgalXmlFilename := fmt.Sprintf(\"%s\/gallery-%s.xml\", *flagDest, g.key)\n\tif fetchUrlToFile(g.XmlUrl(), galXmlFilename, -1) {\n\t\tgo fetchPhotosInGallery(galXmlFilename, NewLocalOperation())\n\t}\n}\n\ntype DigestInfo struct {\n\tXMLName xml.Name \"digest\"\n\tType string \"attr\"\n\tValue string \"chardata\"\n}\n\ntype MediaFile struct {\n\tXMLName xml.Name \"file\"\n\tDigest DigestInfo\n\tMime string\n\tWidth int\n\tHeight int\n\tBytes int64\n\tUrl string \/\/ the raw URL\n}\n\ntype MediaSetItem struct {\n\tXMLName xml.Name \"mediaSetItem\"\n\tTitle string\n\tDescription string\n\tInfoURL string \/\/ the xml URL\n\tFile MediaFile\n\n\t\/\/ Not in the XML:\n\tkey string \/\/ the 8 chars\n}\n\nfunc (p *MediaSetItem) XmlUrl() string {\n\treturn fmt.Sprintf(\"%s\/pic\/%s.xml\", *flagBase, p.key)\n}\n\nfunc (p *MediaSetItem) BlobUrl() string {\n\treturn fmt.Sprintf(\"%s\/pic\/%s\", *flagBase, p.key)\n}\n\nfunc (p *MediaSetItem) XmlBackupFilename() string {\n\treturn fmt.Sprintf(\"%s\/pic-%s.xml\", *flagDest, p.key)\n}\n\nfunc (p *MediaSetItem) BlobBackupFilename() string {\n\tvar ext string\n\tswitch p.File.Mime {\n\tcase \"image\/jpeg\":\n\t\text = \"jpg\"\n\tcase \"image\/png\":\n\t\text = \"png\"\n\tcase \"image\/gif\":\n\t\text = \"gif\"\n\t}\n\treturn fmt.Sprintf(\"%s\/pic-%s.%s\", *flagDest, p.key, ext)\n}\n\nfunc (p *MediaSetItem) Fetch(op Operation) {\n\tdefer op.Done()\n\tif !fetchUrlToFile(p.XmlUrl(), p.XmlBackupFilename(), -1) {\n\t\treturn\n\t}\n\n\tif p.File.Bytes <= 0 {\n\t\tpanic(\"expected file to have some known file size\")\n\t}\n\tfetchUrlToFile(p.BlobUrl(), p.BlobBackupFilename(), p.File.Bytes)\n}\n\ntype MediaSetItemsWrapper struct {\n\tXMLName xml.Name \"mediaSetItems\"\n\tMediaSetItem []MediaSetItem\n}\n\ntype LinkedFromSet struct {\n\tXMLName xml.Name \"linkedFrom\"\n\tInfoURL []string \/\/ xml gallery URLs of 'parent' galleries (not a DAG)\n}\n\ntype LinkedToSet struct {\n\tXMLName xml.Name \"linkedTo\"\n\tInfoURL []string \/\/ xml gallery URLs of 'children' galleries (not a DAG)\n}\n\ntype MediaSet struct {\n\tXMLName xml.Name \"mediaSet\"\n\tMediaSetItems MediaSetItemsWrapper\n\tLinkedFrom LinkedFromSet\n\tLinkedTo LinkedToSet\n}\n\nfunc fetchPhotosInGallery(filename string, op Operation) {\n\tdefer op.Done()\n\n\tf, err := os.Open(filename, os.O_RDONLY, 0)\n\tif err != nil {\n\t\taddError(fmt.Sprintf(\"Failed to open %s: %v\", filename, err))\n\t\treturn\n\t}\n\tdefer f.Close()\n\tmediaSet := new(MediaSet)\n\terr = xml.Unmarshal(f, mediaSet)\n\tif err != nil {\n\t\taddError(fmt.Sprintf(\"Failed to unmarshal %s: %v\", filename, err))\n\t\treturn\n\t}\n\n\t\/\/ Learn about new galleries, potentially?\n\tfor _, url := range mediaSet.LinkedFrom.InfoURL {\n\t\tnoteGallery(url)\n\t}\n\tfor _, url := range mediaSet.LinkedTo.InfoURL {\n\t\tnoteGallery(url)\n\t}\n\n\t\/\/log.Printf(\"Parse of %s is: %q\", filename, mediaSet)\n\tfor _, item := range mediaSet.MediaSetItems.MediaSetItem {\n\t\titem.key = findKey(item.InfoURL, picPattern)\n\t\t\/\/log.Printf(\" pic: %s\", item.InfoURL)\n\t\tnotePhoto(&item)\n\t}\n}\n\nfunc knownGalleries() int {\n\tgalleryMutex.Lock()\n\tdefer galleryMutex.Unlock()\n\treturn len(galleryMap)\n}\n\nfunc findKey(keyOrUrl string, pattern *regexp.Regexp) string {\n\tif len(keyOrUrl) == 8 {\n\t\treturn keyOrUrl\n\t}\n\n\tmatches := pattern.FindStringSubmatch(keyOrUrl)\n\tif matches == nil {\n\t\tpanic(\"Failed to parse: \" + keyOrUrl)\n\t}\n\tif len(matches[1]) != 8 {\n\t\tpanic(\"Expected match of 8 chars in \" + keyOrUrl)\n\t}\n\treturn matches[1]\n}\n\nfunc noteGallery(keyOrUrl string) {\n\tkey := findKey(keyOrUrl, galleryPattern)\n\tgalleryMutex.Lock()\n\tdefer galleryMutex.Unlock()\n\tif _, known := galleryMap[key]; known {\n\t\treturn\n\t}\n\tgallery := &Gallery{key}\n\tgalleryMap[key] = gallery\n\tlog.Printf(\"Gallery: %s\", gallery.XmlUrl())\n\tgo gallery.Fetch(NewLocalOperation())\n}\n\nfunc notePhoto(pic *MediaSetItem) {\n\tpicMutex.Lock()\n\tdefer picMutex.Unlock()\n\tif _, known := picMap[pic.key]; known {\n\t\treturn\n\t}\n\tpicMap[pic.key] = pic\n\tlog.Printf(\"Photo: %s\", pic.XmlUrl())\n\tgo pic.Fetch(NewLocalOperation())\n}\n\nfunc fetchGalleryPage(page int) {\n\tlog.Printf(\"Fetching gallery page %d\", page)\n\tres, finalUrl, err := http.Get(fmt.Sprintf(\"%s\/?sort=alpha&page=%d\",\n\t\t*flagBase, page))\n\tif err != nil {\n\t\tlog.Exitf(\"Error fetching gallery page %d: %v\", page, err)\n\t}\n\tlog.Printf(\"Fetched page %d: %s\", page, finalUrl)\n\thtmlBytes, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tlog.Exitf(\"Error reading gallery page %d's HTML: %v\", page, err)\n\t}\n\tres.Body.Close()\n\n\thtml := string(htmlBytes)\n\tlog.Printf(\"read %d bytes\", len(html))\n\n\tmatches := galleryPattern.FindAllStringSubmatch(html, -1)\n\tfor _, match := range matches {\n\t\tnoteGallery(match[1])\n\t}\n}\n\nfunc main() {\n\tflag.Parse()\n\n\tif *flagDest == \"\" {\n\t\tlog.Exitf(\"No --dest given.\")\n\t}\n\tif *flagBase == \"\" {\n\t\tlog.Exitf(\"No --base URL given.\")\n\t}\n\n\tnetworkOpGate = make(chan bool, *flagMaxNetwork)\n\n\tlog.Printf(\"Starting.\")\n\n\tif *flagProfile != \"\" {\n\t\tlog.Printf(\"Listening on http:\/\/%s\", *flagProfile)\n\t\tgo http.ListenAndServe(*flagProfile, nil)\n\t}\n\n\tpage := 1\n\tfor {\n\t\tcountBefore := knownGalleries()\n\t\tfetchGalleryPage(page)\n\t\tcountAfter := knownGalleries()\n\t\tlog.Printf(\"Galleries known: %d\", countAfter)\n\t\tif countAfter == countBefore {\n\t\t\tlog.Printf(\"No new galleries, stopping.\")\n\t\t\tbreak\n\t\t}\n\t\tpage++\n\t}\n\n\tfor {\n\t\tn := OperationsInFlight()\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\t\tlog.Printf(\"%d Operations in-flight. Waiting.\", n)\n\t\ttime.Sleep(5 * 1e9)\n\t}\n\tlog.Printf(\"Done.\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2021 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage aggregation\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"vitess.io\/vitess\/go\/test\/endtoend\/vtgate\/utils\"\n\n\t\"github.com\/stretchr\/testify\/require\"\n\n\t\"vitess.io\/vitess\/go\/mysql\"\n\t\"vitess.io\/vitess\/go\/test\/endtoend\/cluster\"\n)\n\nfunc TestAggregateTypes(t *testing.T) {\n\tdefer cluster.PanicHandler(t)\n\tctx := context.Background()\n\tconn, err := mysql.Connect(ctx, &vtParams)\n\trequire.Nil(t, err)\n\tdefer conn.Close()\n\tutils.Exec(t, conn, \"insert into aggr_test(id, val1, val2) values(1,'a',1), (2,'A',1), (3,'b',1), (4,'c',3), (5,'c',4)\")\n\tutils.Exec(t, conn, \"insert into aggr_test(id, val1, val2) values(6,'d',null), (7,'e',null), (8,'E',1)\")\n\tutils.AssertMatches(t, conn, \"select val1, count(distinct val2), count(*) from aggr_test group by val1\", `[[VARCHAR(\"a\") INT64(1) INT64(2)] [VARCHAR(\"b\") INT64(1) INT64(1)] [VARCHAR(\"c\") INT64(2) INT64(2)] [VARCHAR(\"d\") INT64(0) INT64(1)] [VARCHAR(\"e\") INT64(1) INT64(2)]]`)\n\tutils.AssertMatches(t, conn, \"select val1, sum(distinct val2), sum(val2) from aggr_test group by val1\", `[[VARCHAR(\"a\") DECIMAL(1) DECIMAL(2)] [VARCHAR(\"b\") DECIMAL(1) DECIMAL(1)] [VARCHAR(\"c\") DECIMAL(7) DECIMAL(7)] [VARCHAR(\"d\") NULL NULL] [VARCHAR(\"e\") DECIMAL(1) DECIMAL(1)]]`)\n\tutils.AssertMatches(t, conn, \"select val1, count(distinct val2) k, count(*) from aggr_test group by val1 order by k desc, val1\", `[[VARCHAR(\"c\") INT64(2) INT64(2)] [VARCHAR(\"a\") INT64(1) INT64(2)] [VARCHAR(\"b\") INT64(1) INT64(1)] [VARCHAR(\"e\") INT64(1) INT64(2)] [VARCHAR(\"d\") INT64(0) INT64(1)]]`)\n\tutils.AssertMatches(t, conn, \"select val1, count(distinct val2) k, count(*) from aggr_test group by val1 order by k desc, val1 limit 4\", `[[VARCHAR(\"c\") INT64(2) INT64(2)] [VARCHAR(\"a\") INT64(1) INT64(2)] [VARCHAR(\"b\") INT64(1) INT64(1)] [VARCHAR(\"e\") INT64(1) INT64(2)]]`)\n\tutils.AssertMatches(t, conn, \"select ascii(val1) as a, count(*) from aggr_test group by a\", `[[INT64(65) INT64(1)] [INT64(69) INT64(1)] [INT64(97) INT64(1)] [INT64(98) INT64(1)] [INT64(99) INT64(2)] [INT64(100) INT64(1)] [INT64(101) INT64(1)]]`)\n\tutils.AssertMatches(t, conn, \"select ascii(val1) as a, count(*) from aggr_test group by a order by a\", `[[INT64(65) INT64(1)] [INT64(69) INT64(1)] [INT64(97) INT64(1)] [INT64(98) INT64(1)] [INT64(99) INT64(2)] [INT64(100) INT64(1)] [INT64(101) INT64(1)]]`)\n\tutils.AssertMatches(t, conn, \"select ascii(val1) as a, count(*) from aggr_test group by a order by 2, a\", `[[INT64(65) INT64(1)] [INT64(69) INT64(1)] [INT64(97) INT64(1)] [INT64(98) INT64(1)] [INT64(100) INT64(1)] [INT64(101) INT64(1)] [INT64(99) INT64(2)]]`)\n\tutils.AssertMatches(t, conn, \"select val1 as a, count(*) from aggr_test group by a\", `[[VARCHAR(\"a\") INT64(2)] [VARCHAR(\"b\") INT64(1)] [VARCHAR(\"c\") INT64(2)] [VARCHAR(\"d\") INT64(1)] [VARCHAR(\"e\") INT64(2)]]`)\n\tutils.AssertMatches(t, conn, \"select val1 as a, count(*) from aggr_test group by a order by a\", `[[VARCHAR(\"a\") INT64(2)] [VARCHAR(\"b\") INT64(1)] [VARCHAR(\"c\") INT64(2)] [VARCHAR(\"d\") INT64(1)] [VARCHAR(\"e\") INT64(2)]]`)\n\tutils.AssertMatches(t, conn, \"select val1 as a, count(*) from aggr_test group by a order by 2, a\", `[[VARCHAR(\"b\") INT64(1)] [VARCHAR(\"d\") INT64(1)] [VARCHAR(\"a\") INT64(2)] [VARCHAR(\"c\") INT64(2)] [VARCHAR(\"e\") INT64(2)]]`)\n\tutils.Exec(t, conn, \"delete from aggr_test\")\n}\n\nfunc TestGroupBy(t *testing.T) {\n\tdefer cluster.PanicHandler(t)\n\tctx := context.Background()\n\tconn, err := mysql.Connect(ctx, &vtParams)\n\trequire.Nil(t, err)\n\tdefer conn.Close()\n\tutils.Exec(t, conn, \"insert into t3(id5, id6, id7) values(1,1,2), (2,2,4), (3,2,4), (4,1,2), (5,1,2), (6,3,6)\")\n\t\/\/ test ordering and group by int column\n\tutils.AssertMatches(t, conn, \"select id6, id7, count(*) k from t3 group by id6, id7 order by k\", `[[INT64(3) INT64(6) INT64(1)] [INT64(2) INT64(4) INT64(2)] [INT64(1) INT64(2) INT64(3)]]`)\n\n\tdefer func() {\n\t\tutils.Exec(t, conn, \"set workload = oltp\")\n\t\tutils.Exec(t, conn, \"delete from t3\")\n\t}()\n\t\/\/ Test the same queries in streaming mode\n\tutils.Exec(t, conn, \"set workload = olap\")\n\tutils.AssertMatches(t, conn, \"select id6, id7, count(*) k from t3 group by id6, id7 order by k\", `[[INT64(3) INT64(6) INT64(1)] [INT64(2) INT64(4) INT64(2)] [INT64(1) INT64(2) INT64(3)]]`)\n}\n\nfunc TestDistinct(t *testing.T) {\n\tdefer cluster.PanicHandler(t)\n\n\tctx := context.Background()\n\tconn, err := mysql.Connect(ctx, &vtParams)\n\trequire.Nil(t, err)\n\tdefer conn.Close()\n\tutils.Exec(t, conn, \"insert into t3(id5,id6,id7) values(1,3,3), (2,3,4), (3,3,6), (4,5,7), (5,5,6)\")\n\tutils.Exec(t, conn, \"insert into t7_xxhash(uid,phone) values('1',4), ('2',4), ('3',3), ('4',1), ('5',1)\")\n\tutils.Exec(t, conn, \"insert into aggr_test(id, val1, val2) values(1,'a',1), (2,'A',1), (3,'b',1), (4,'c',3), (5,'c',4)\")\n\tutils.Exec(t, conn, \"insert into aggr_test(id, val1, val2) values(6,'d',null), (7,'e',null), (8,'E',1)\")\n\tutils.AssertMatches(t, conn, \"select distinct val2, count(*) from aggr_test group by val2\", `[[NULL INT64(2)] [INT64(1) INT64(4)] [INT64(3) INT64(1)] [INT64(4) INT64(1)]]`)\n\tutils.AssertMatches(t, conn, \"select distinct id6 from t3 join t7_xxhash on t3.id5 = t7_xxhash.phone\", `[[INT64(3)] [INT64(5)]]`)\n\tutils.Exec(t, conn, \"delete from t3\")\n\tutils.Exec(t, conn, \"delete from t7_xxhash\")\n\tutils.Exec(t, conn, \"delete from aggr_test\")\n}\n<commit_msg>Addition of an end-to-end test for equality filter on scatter<commit_after>\/*\nCopyright 2021 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage aggregation\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"vitess.io\/vitess\/go\/test\/endtoend\/vtgate\/utils\"\n\n\t\"github.com\/stretchr\/testify\/require\"\n\n\t\"vitess.io\/vitess\/go\/mysql\"\n\t\"vitess.io\/vitess\/go\/test\/endtoend\/cluster\"\n)\n\nfunc TestAggregateTypes(t *testing.T) {\n\tdefer cluster.PanicHandler(t)\n\tctx := context.Background()\n\tconn, err := mysql.Connect(ctx, &vtParams)\n\trequire.Nil(t, err)\n\tdefer conn.Close()\n\tutils.Exec(t, conn, \"insert into aggr_test(id, val1, val2) values(1,'a',1), (2,'A',1), (3,'b',1), (4,'c',3), (5,'c',4)\")\n\tutils.Exec(t, conn, \"insert into aggr_test(id, val1, val2) values(6,'d',null), (7,'e',null), (8,'E',1)\")\n\tutils.AssertMatches(t, conn, \"select val1, count(distinct val2), count(*) from aggr_test group by val1\", `[[VARCHAR(\"a\") INT64(1) INT64(2)] [VARCHAR(\"b\") INT64(1) INT64(1)] [VARCHAR(\"c\") INT64(2) INT64(2)] [VARCHAR(\"d\") INT64(0) INT64(1)] [VARCHAR(\"e\") INT64(1) INT64(2)]]`)\n\tutils.AssertMatches(t, conn, \"select val1, sum(distinct val2), sum(val2) from aggr_test group by val1\", `[[VARCHAR(\"a\") DECIMAL(1) DECIMAL(2)] [VARCHAR(\"b\") DECIMAL(1) DECIMAL(1)] [VARCHAR(\"c\") DECIMAL(7) DECIMAL(7)] [VARCHAR(\"d\") NULL NULL] [VARCHAR(\"e\") DECIMAL(1) DECIMAL(1)]]`)\n\tutils.AssertMatches(t, conn, \"select val1, count(distinct val2) k, count(*) from aggr_test group by val1 order by k desc, val1\", `[[VARCHAR(\"c\") INT64(2) INT64(2)] [VARCHAR(\"a\") INT64(1) INT64(2)] [VARCHAR(\"b\") INT64(1) INT64(1)] [VARCHAR(\"e\") INT64(1) INT64(2)] [VARCHAR(\"d\") INT64(0) INT64(1)]]`)\n\tutils.AssertMatches(t, conn, \"select val1, count(distinct val2) k, count(*) from aggr_test group by val1 order by k desc, val1 limit 4\", `[[VARCHAR(\"c\") INT64(2) INT64(2)] [VARCHAR(\"a\") INT64(1) INT64(2)] [VARCHAR(\"b\") INT64(1) INT64(1)] [VARCHAR(\"e\") INT64(1) INT64(2)]]`)\n\tutils.AssertMatches(t, conn, \"select ascii(val1) as a, count(*) from aggr_test group by a\", `[[INT64(65) INT64(1)] [INT64(69) INT64(1)] [INT64(97) INT64(1)] [INT64(98) INT64(1)] [INT64(99) INT64(2)] [INT64(100) INT64(1)] [INT64(101) INT64(1)]]`)\n\tutils.AssertMatches(t, conn, \"select ascii(val1) as a, count(*) from aggr_test group by a order by a\", `[[INT64(65) INT64(1)] [INT64(69) INT64(1)] [INT64(97) INT64(1)] [INT64(98) INT64(1)] [INT64(99) INT64(2)] [INT64(100) INT64(1)] [INT64(101) INT64(1)]]`)\n\tutils.AssertMatches(t, conn, \"select ascii(val1) as a, count(*) from aggr_test group by a order by 2, a\", `[[INT64(65) INT64(1)] [INT64(69) INT64(1)] [INT64(97) INT64(1)] [INT64(98) INT64(1)] [INT64(100) INT64(1)] [INT64(101) INT64(1)] [INT64(99) INT64(2)]]`)\n\tutils.AssertMatches(t, conn, \"select val1 as a, count(*) from aggr_test group by a\", `[[VARCHAR(\"a\") INT64(2)] [VARCHAR(\"b\") INT64(1)] [VARCHAR(\"c\") INT64(2)] [VARCHAR(\"d\") INT64(1)] [VARCHAR(\"e\") INT64(2)]]`)\n\tutils.AssertMatches(t, conn, \"select val1 as a, count(*) from aggr_test group by a order by a\", `[[VARCHAR(\"a\") INT64(2)] [VARCHAR(\"b\") INT64(1)] [VARCHAR(\"c\") INT64(2)] [VARCHAR(\"d\") INT64(1)] [VARCHAR(\"e\") INT64(2)]]`)\n\tutils.AssertMatches(t, conn, \"select val1 as a, count(*) from aggr_test group by a order by 2, a\", `[[VARCHAR(\"b\") INT64(1)] [VARCHAR(\"d\") INT64(1)] [VARCHAR(\"a\") INT64(2)] [VARCHAR(\"c\") INT64(2)] [VARCHAR(\"e\") INT64(2)]]`)\n\tutils.Exec(t, conn, \"delete from aggr_test\")\n}\n\nfunc TestGroupBy(t *testing.T) {\n\tdefer cluster.PanicHandler(t)\n\tctx := context.Background()\n\tconn, err := mysql.Connect(ctx, &vtParams)\n\trequire.Nil(t, err)\n\tdefer conn.Close()\n\tutils.Exec(t, conn, \"insert into t3(id5, id6, id7) values(1,1,2), (2,2,4), (3,2,4), (4,1,2), (5,1,2), (6,3,6)\")\n\t\/\/ test ordering and group by int column\n\tutils.AssertMatches(t, conn, \"select id6, id7, count(*) k from t3 group by id6, id7 order by k\", `[[INT64(3) INT64(6) INT64(1)] [INT64(2) INT64(4) INT64(2)] [INT64(1) INT64(2) INT64(3)]]`)\n\n\tdefer func() {\n\t\tutils.Exec(t, conn, \"set workload = oltp\")\n\t\tutils.Exec(t, conn, \"delete from t3\")\n\t}()\n\t\/\/ Test the same queries in streaming mode\n\tutils.Exec(t, conn, \"set workload = olap\")\n\tutils.AssertMatches(t, conn, \"select id6, id7, count(*) k from t3 group by id6, id7 order by k\", `[[INT64(3) INT64(6) INT64(1)] [INT64(2) INT64(4) INT64(2)] [INT64(1) INT64(2) INT64(3)]]`)\n}\n\nfunc TestDistinct(t *testing.T) {\n\tdefer cluster.PanicHandler(t)\n\n\tctx := context.Background()\n\tconn, err := mysql.Connect(ctx, &vtParams)\n\trequire.Nil(t, err)\n\tdefer conn.Close()\n\tutils.Exec(t, conn, \"insert into t3(id5,id6,id7) values(1,3,3), (2,3,4), (3,3,6), (4,5,7), (5,5,6)\")\n\tutils.Exec(t, conn, \"insert into t7_xxhash(uid,phone) values('1',4), ('2',4), ('3',3), ('4',1), ('5',1)\")\n\tutils.Exec(t, conn, \"insert into aggr_test(id, val1, val2) values(1,'a',1), (2,'A',1), (3,'b',1), (4,'c',3), (5,'c',4)\")\n\tutils.Exec(t, conn, \"insert into aggr_test(id, val1, val2) values(6,'d',null), (7,'e',null), (8,'E',1)\")\n\tutils.AssertMatches(t, conn, \"select distinct val2, count(*) from aggr_test group by val2\", `[[NULL INT64(2)] [INT64(1) INT64(4)] [INT64(3) INT64(1)] [INT64(4) INT64(1)]]`)\n\tutils.AssertMatches(t, conn, \"select distinct id6 from t3 join t7_xxhash on t3.id5 = t7_xxhash.phone\", `[[INT64(3)] [INT64(5)]]`)\n\tutils.Exec(t, conn, \"delete from t3\")\n\tutils.Exec(t, conn, \"delete from t7_xxhash\")\n\tutils.Exec(t, conn, \"delete from aggr_test\")\n}\n\nfunc TestEqualityFilterOnScatter(t *testing.T) {\n\tdefer cluster.PanicHandler(t)\n\tctx := context.Background()\n\tconn, err := mysql.Connect(ctx, &vtParams)\n\trequire.Nil(t, err)\n\tdefer conn.Close()\n\n\tutils.Exec(t, conn, \"insert into aggr_test(id, val1, val2) values(1,'a',1), (2,'b',2), (3,'c',3), (4,'d',4), (5,'e',5)\")\n\n\tdefer func() {\n\t\tutils.Exec(t, conn, \"delete from aggr_test\")\n\t}()\n\n\tutils.AssertMatches(t, conn, \"select \/* GEN4_COMPARE_ONLY_GEN4 *\/ count(*) as a from aggr_test having 1 = 1\", `[[INT64(5)]]`) \/\/ where clause\n\tutils.AssertMatches(t, conn, \"select \/* GEN4_COMPARE_ONLY_GEN4 *\/ count(*) as a from aggr_test having a = 5\", `[[INT64(5)]]`) \/\/ having clause\n\tutils.AssertMatches(t, conn, \"select \/* GEN4_COMPARE_ONLY_GEN4 *\/ count(*) as a from aggr_test having 5 = a\", `[[INT64(5)]]`) \/\/ having clause\n\tutils.AssertMatches(t, conn, \"select \/* GEN4_COMPARE_ONLY_GEN4 *\/ count(*) as a from aggr_test having a = a\", `[[INT64(5)]]`) \/\/ having clause\n\tutils.AssertMatches(t, conn, \"select \/* GEN4_COMPARE_ONLY_GEN4 *\/ count(*) as a from aggr_test having a = 3+2\", `[[INT64(5)]]`) \/\/ having clause\n\tutils.AssertMatches(t, conn, \"select \/* GEN4_COMPARE_ONLY_GEN4 *\/ count(*) as a from aggr_test having 1+4 = 3+2\", `[[INT64(5)]]`) \/\/ where clause\n\tutils.AssertMatches(t, conn, \"select \/* GEN4_COMPARE_ONLY_GEN4 *\/ count(*) as a from aggr_test having a = 1\", `[]`) \/\/ having clause\n\t\/\/ utils.AssertMatches(t, conn, \"select \/* GEN4_COMPARE_ONLY_GEN4 *\/ count(*) as a from aggr_test having 0 = 1\", `[]`) \/\/ where clause, still returns one row with a value of 0\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012-2015 Oliver Eilhard. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-license.\n\/\/ See http:\/\/olivere.mit-license.org\/license.txt for details.\n\npackage elastic_test\n\nimport (\n\t\"encoding\/json\"\n\t\"testing\"\n\n\t\"gopkg.in\/olivere\/elastic.v3-unstable\"\n)\n\nfunc ExampleWildcardQuery() {\n\t\/\/ Get a client to the local Elasticsearch instance.\n\tclient, err := elastic.NewClient()\n\tif err != nil {\n\t\t\/\/ Handle error\n\t\tpanic(err)\n\t}\n\n\t\/\/ Define wildcard query\n\tq := elastic.NewWildcardQuery(\"user\", \"oli*er?\").Boost(1.2)\n\tsearchResult, err := client.Search().\n\t\tIndex(\"twitter\"). \/\/ search in index \"twitter\"\n\t\tQuery(q). \/\/ use wildcard query defined above\n\t\tDo() \/\/ execute\n\tif err != nil {\n\t\t\/\/ Handle error\n\t\tpanic(err)\n\t}\n\t_ = searchResult\n}\n\nfunc TestWildcardQuery(t *testing.T) {\n\tq := elastic.NewWildcardQuery(\"user\", \"ki*y??\")\n\tsrc, err := q.Source()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdata, err := json.Marshal(src)\n\tif err != nil {\n\t\tt.Fatalf(\"marshaling to JSON failed: %v\", err)\n\t}\n\tgot := string(data)\n\texpected := `{\"wildcard\":{\"user\":{\"wildcard\":\"ki*y??\"}}}`\n\tif got != expected {\n\t\tt.Errorf(\"expected\\n%s\\n,got:\\n%s\", expected, got)\n\t}\n}\n\nfunc TestWildcardQueryWithBoost(t *testing.T) {\n\tq := elastic.NewWildcardQuery(\"user\", \"ki*y??\").Boost(1.2)\n\tsrc, err := q.Source()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdata, err := json.Marshal(src)\n\tif err != nil {\n\t\tt.Fatalf(\"marshaling to JSON failed: %v\", err)\n\t}\n\tgot := string(data)\n\texpected := `{\"wildcard\":{\"user\":{\"boost\":1.2,\"wildcard\":\"ki*y??\"}}}`\n\tif got != expected {\n\t\tt.Errorf(\"expected\\n%s\\n,got:\\n%s\", expected, got)\n\t}\n}\n<commit_msg>Fix package path in cluster-test<commit_after>\/\/ Copyright 2012-2015 Oliver Eilhard. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-license.\n\/\/ See http:\/\/olivere.mit-license.org\/license.txt for details.\n\npackage elastic_test\n\nimport (\n\t\"encoding\/json\"\n\t\"testing\"\n\n\t\"gopkg.in\/olivere\/elastic.v3\"\n)\n\nfunc ExampleWildcardQuery() {\n\t\/\/ Get a client to the local Elasticsearch instance.\n\tclient, err := elastic.NewClient()\n\tif err != nil {\n\t\t\/\/ Handle error\n\t\tpanic(err)\n\t}\n\n\t\/\/ Define wildcard query\n\tq := elastic.NewWildcardQuery(\"user\", \"oli*er?\").Boost(1.2)\n\tsearchResult, err := client.Search().\n\t\tIndex(\"twitter\"). \/\/ search in index \"twitter\"\n\t\tQuery(q). \/\/ use wildcard query defined above\n\t\tDo() \/\/ execute\n\tif err != nil {\n\t\t\/\/ Handle error\n\t\tpanic(err)\n\t}\n\t_ = searchResult\n}\n\nfunc TestWildcardQuery(t *testing.T) {\n\tq := elastic.NewWildcardQuery(\"user\", \"ki*y??\")\n\tsrc, err := q.Source()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdata, err := json.Marshal(src)\n\tif err != nil {\n\t\tt.Fatalf(\"marshaling to JSON failed: %v\", err)\n\t}\n\tgot := string(data)\n\texpected := `{\"wildcard\":{\"user\":{\"wildcard\":\"ki*y??\"}}}`\n\tif got != expected {\n\t\tt.Errorf(\"expected\\n%s\\n,got:\\n%s\", expected, got)\n\t}\n}\n\nfunc TestWildcardQueryWithBoost(t *testing.T) {\n\tq := elastic.NewWildcardQuery(\"user\", \"ki*y??\").Boost(1.2)\n\tsrc, err := q.Source()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdata, err := json.Marshal(src)\n\tif err != nil {\n\t\tt.Fatalf(\"marshaling to JSON failed: %v\", err)\n\t}\n\tgot := string(data)\n\texpected := `{\"wildcard\":{\"user\":{\"boost\":1.2,\"wildcard\":\"ki*y??\"}}}`\n\tif got != expected {\n\t\tt.Errorf(\"expected\\n%s\\n,got:\\n%s\", expected, got)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package handler\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/micro\/go-micro\/broker\"\n\t\"github.com\/micro\/go-micro\/cmd\"\n)\n\nconst (\n\tpingTime = (readDeadline * 9) \/ 10\n\treadLimit = 16384\n\treadDeadline = 60 * time.Second\n\twriteDeadline = 10 * time.Second\n)\n\ntype conn struct {\n\tcType string\n\ttopic string\n\tqueue string\n\texit chan bool\n\n\tsync.Mutex\n\tws *websocket.Conn\n}\n\nvar (\n\tonce sync.Once\n\n\tupgrader = websocket.Upgrader{\n\t\tReadBufferSize: 1024,\n\t\tWriteBufferSize: 1024,\n\t}\n\n\tcontentType = \"text\/plain\"\n)\n\nfunc (c *conn) readLoop() {\n\tdefer func() {\n\t\tclose(c.exit)\n\t\tc.ws.Close()\n\t}()\n\n\tc.ws.SetReadLimit(readLimit)\n\tc.ws.SetReadDeadline(time.Now().Add(readDeadline))\n\tc.ws.SetPongHandler(func(string) error {\n\t\tc.ws.SetReadDeadline(time.Now().Add(readDeadline))\n\t\treturn nil\n\t})\n\n\tfor {\n\t\t_, message, err := c.ws.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t(*cmd.DefaultOptions().Broker).Publish(c.topic, &broker.Message{\n\t\t\tHeader: map[string]string{\"Content-Type\": c.cType},\n\t\t\tBody: message,\n\t\t})\n\t}\n}\n\nfunc (c *conn) write(mType int, data []byte) error {\n\tc.Lock()\n\tc.ws.SetWriteDeadline(time.Now().Add(writeDeadline))\n\terr := c.ws.WriteMessage(mType, data)\n\tc.Unlock()\n\treturn err\n}\n\nfunc (c *conn) writeLoop() {\n\tticker := time.NewTicker(pingTime)\n\n\tvar opts []broker.SubscribeOption\n\n\tif len(c.queue) > 0 {\n\t\topts = append(opts, broker.Queue(c.queue))\n\t}\n\n\tsubscriber, err := (*cmd.DefaultOptions().Broker).Subscribe(c.topic, func(p broker.Publication) error {\n\t\tb, err := json.Marshal(p.Message())\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn c.write(websocket.TextMessage, b)\n\t}, opts...)\n\n\tdefer func() {\n\t\tsubscriber.Unsubscribe()\n\t\tticker.Stop()\n\t\tc.ws.Close()\n\t}()\n\n\tif err != nil {\n\t\tlog.Print(err.Error())\n\t\treturn\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif err := c.write(websocket.PingMessage, []byte{}); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-c.exit:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc Broker(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" {\n\t\thttp.Error(w, \"Method not allowed\", 405)\n\t\treturn\n\t}\n\n\tr.ParseForm()\n\ttopic := r.Form.Get(\"topic\")\n\tif len(topic) == 0 {\n\t\thttp.Error(w, \"Topic not specified\", 400)\n\t\treturn\n\t}\n\tqueue := r.Form.Get(\"queue\")\n\n\tws, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tlog.Print(err.Error())\n\t\treturn\n\t}\n\n\tonce.Do(func() {\n\t\t(*cmd.DefaultOptions().Broker).Init()\n\t\t(*cmd.DefaultOptions().Broker).Connect()\n\t})\n\n\tcType := r.Header.Get(\"Content-Type\")\n\tif len(cType) == 0 {\n\t\tcType = contentType\n\t}\n\n\tc := &conn{\n\t\tcType: cType,\n\t\ttopic: topic,\n\t\tqueue: queue,\n\t\texit: make(chan bool),\n\t\tws: ws,\n\t}\n\n\tgo c.writeLoop()\n\tc.readLoop()\n}\n<commit_msg>Handle connection close<commit_after>package handler\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\t\"net\/http\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/micro\/go-micro\/broker\"\n\t\"github.com\/micro\/go-micro\/cmd\"\n)\n\nconst (\n\tpingTime = (readDeadline * 9) \/ 10\n\treadLimit = 16384\n\treadDeadline = 60 * time.Second\n\twriteDeadline = 10 * time.Second\n)\n\ntype conn struct {\n\tcType string\n\ttopic string\n\tqueue string\n\texit chan bool\n\n\tsync.Mutex\n\tws *websocket.Conn\n}\n\nvar (\n\tonce sync.Once\n\n\tupgrader = websocket.Upgrader{\n\t\tReadBufferSize: 1024,\n\t\tWriteBufferSize: 1024,\n\t}\n\n\tcontentType = \"text\/plain\"\n)\n\nfunc (c *conn) close() {\n\tselect {\n\tcase <-c.exit:\n\t\treturn\n\tdefault:\n\t\tclose(c.exit)\n\t}\n}\n\nfunc (c *conn) readLoop() {\n\tdefer func() {\n\t\tc.close()\n\t\tc.ws.Close()\n\t}()\n\n\t\/\/ set read limit\/deadline\n\tc.ws.SetReadLimit(readLimit)\n\tc.ws.SetReadDeadline(time.Now().Add(readDeadline))\n\n\t\/\/ set close handler\n\tch := c.ws.CloseHandler()\n\tc.ws.SetCloseHandler(func(code int, text string) error {\n\t\terr := ch(code, text)\n\t\tc.close()\n\t\treturn err\n\t})\n\n\t\/\/ set pong handler\n\tc.ws.SetPongHandler(func(string) error {\n\t\tc.ws.SetReadDeadline(time.Now().Add(readDeadline))\n\t\treturn nil\n\t})\n\n\tfor {\n\t\t_, message, err := c.ws.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t(*cmd.DefaultOptions().Broker).Publish(c.topic, &broker.Message{\n\t\t\tHeader: map[string]string{\"Content-Type\": c.cType},\n\t\t\tBody: message,\n\t\t})\n\t}\n}\n\nfunc (c *conn) write(mType int, data []byte) error {\n\tc.Lock()\n\tc.ws.SetWriteDeadline(time.Now().Add(writeDeadline))\n\terr := c.ws.WriteMessage(mType, data)\n\tc.Unlock()\n\treturn err\n}\n\nfunc (c *conn) writeLoop() {\n\tticker := time.NewTicker(pingTime)\n\n\tvar opts []broker.SubscribeOption\n\n\tif len(c.queue) > 0 {\n\t\topts = append(opts, broker.Queue(c.queue))\n\t}\n\n\tsubscriber, err := (*cmd.DefaultOptions().Broker).Subscribe(c.topic, func(p broker.Publication) error {\n\t\tb, err := json.Marshal(p.Message())\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn c.write(websocket.TextMessage, b)\n\t}, opts...)\n\n\tdefer func() {\n\t\tsubscriber.Unsubscribe()\n\t\tticker.Stop()\n\t\tc.ws.Close()\n\t}()\n\n\tif err != nil {\n\t\tlog.Print(err.Error())\n\t\treturn\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tif err := c.write(websocket.PingMessage, []byte{}); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-c.exit:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc Broker(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != \"GET\" {\n\t\thttp.Error(w, \"Method not allowed\", 405)\n\t\treturn\n\t}\n\n\tr.ParseForm()\n\ttopic := r.Form.Get(\"topic\")\n\tif len(topic) == 0 {\n\t\thttp.Error(w, \"Topic not specified\", 400)\n\t\treturn\n\t}\n\tqueue := r.Form.Get(\"queue\")\n\n\tws, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\tlog.Print(err.Error())\n\t\treturn\n\t}\n\n\tonce.Do(func() {\n\t\t(*cmd.DefaultOptions().Broker).Init()\n\t\t(*cmd.DefaultOptions().Broker).Connect()\n\t})\n\n\tcType := r.Header.Get(\"Content-Type\")\n\tif len(cType) == 0 {\n\t\tcType = contentType\n\t}\n\n\tc := &conn{\n\t\tcType: cType,\n\t\ttopic: topic,\n\t\tqueue: queue,\n\t\texit: make(chan bool),\n\t\tws: ws,\n\t}\n\n\tgo c.writeLoop()\n\tc.readLoop()\n}\n<|endoftext|>"} {"text":"<commit_before>package interpreter\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n)\n\ntype CommandNotFound struct {\n\tName string\n\tErr error\n}\n\nfunc (this CommandNotFound) Stringer() string {\n\treturn fmt.Sprintf(\"'%s' is not recognized as an internal or external command,\\noperable program or batch file\", this.Name)\n}\n\nfunc (this CommandNotFound) Error() string {\n\treturn this.Stringer()\n}\n\ntype NextT int\n\nconst (\n\tTHROUGH NextT = 0\n\tCONTINUE NextT = 1\n\tSHUTDOWN NextT = 2\n)\n\nfunc (this NextT) String() string {\n\tswitch this {\n\tcase THROUGH:\n\t\treturn \"THROUGH\"\n\tcase CONTINUE:\n\t\treturn \"CONTINUE\"\n\tcase SHUTDOWN:\n\t\treturn \"SHUTDOWN\"\n\tdefault:\n\t\treturn \"UNKNOWN\"\n\t}\n}\n\ntype Interpreter struct {\n\texec.Cmd\n\tStdio [3]*os.File\n\tHookCount int\n\tCloser []io.Closer\n\tTag interface{}\n}\n\nfunc (this *Interpreter) closeAtEnd() {\n\tif this.Closer != nil {\n\t\tfor _, c := range this.Closer {\n\t\t\tc.Close()\n\t\t}\n\t\tthis.Closer = nil\n\t}\n}\n\nfunc New() *Interpreter {\n\tthis := Interpreter{\n\t\tStdio: [3]*os.File{os.Stdin, os.Stdout, os.Stderr},\n\t}\n\tthis.Stdin = os.Stdin\n\tthis.Stdout = os.Stdout\n\tthis.Stderr = os.Stderr\n\tthis.Tag = nil\n\treturn &this\n}\n\nfunc (this *Interpreter) SetStdin(f *os.File) {\n\tthis.Stdio[0] = f\n\tthis.Stdin = f\n}\nfunc (this *Interpreter) SetStdout(f *os.File) {\n\tthis.Stdio[1] = f\n\tthis.Stdout = f\n}\nfunc (this *Interpreter) SetStderr(f *os.File) {\n\tthis.Stdio[2] = f\n\tthis.Stderr = f\n}\n\nfunc (this *Interpreter) Clone() *Interpreter {\n\trv := new(Interpreter)\n\trv.Stdio[0] = this.Stdio[0]\n\trv.Stdio[1] = this.Stdio[1]\n\trv.Stdio[2] = this.Stdio[2]\n\trv.Stdin = this.Stdin\n\trv.Stdout = this.Stdout\n\trv.Stderr = this.Stderr\n\trv.HookCount = this.HookCount\n\trv.Tag = this.Tag\n\trv.Closer = nil\n\treturn rv\n}\n\ntype ArgsHookT func(it *Interpreter, args []string) []string\n\nvar argsHook = func(it *Interpreter, args []string) []string {\n\treturn args\n}\n\nfunc SetArgsHook(argsHook_ ArgsHookT) (rv ArgsHookT) {\n\trv, argsHook = argsHook, argsHook_\n\treturn\n}\n\ntype HookT func(*Interpreter) (NextT, error)\n\nvar hook = func(*Interpreter) (NextT, error) {\n\treturn THROUGH, nil\n}\n\nfunc SetHook(hook_ HookT) (rv HookT) {\n\trv, hook = hook, hook_\n\treturn\n}\n\nvar OnCommandNotFound = func(this *Interpreter, err error) error {\n\terr = &CommandNotFound{this.Args[0], err}\n\treturn err\n}\n\nvar errorStatusPattern = regexp.MustCompile(\"^exit status ([0-9]+)\")\nvar ErrorLevel string\n\nfunc nvl(a *os.File, b *os.File) *os.File {\n\tif a != nil {\n\t\treturn a\n\t} else {\n\t\treturn b\n\t}\n}\n\nfunc (this *Interpreter) Spawnvp() (NextT, error) {\n\tvar whatToDo NextT = CONTINUE\n\tvar err error = nil\n\n\tif len(this.Args) > 0 {\n\t\twhatToDo, err = hook(this)\n\t\tif whatToDo == THROUGH {\n\t\t\tthis.Path, err = exec.LookPath(this.Args[0])\n\t\t\tif err == nil {\n\t\t\t\terr = this.Run()\n\t\t\t} else {\n\t\t\t\terr = OnCommandNotFound(this, err)\n\t\t\t}\n\t\t\twhatToDo = CONTINUE\n\t\t}\n\t}\n\treturn whatToDo, err\n}\n\ntype result_t struct {\n\tNextValue NextT\n\tError error\n}\n\nfunc (this *Interpreter) Interpret(text string) (NextT, error) {\n\tstatements, statementsErr := Parse(text)\n\tif statementsErr != nil {\n\t\treturn CONTINUE, statementsErr\n\t}\n\tif argsHook != nil {\n\t\tfor _, pipeline := range statements {\n\t\t\tfor _, state := range pipeline {\n\t\t\t\tstate.Argv = argsHook(this, state.Argv)\n\t\t\t}\n\t\t}\n\t}\n\tvar result chan result_t = nil\n\tfor _, pipeline := range statements {\n\t\tvar pipeOut *os.File = nil\n\t\tfor i := len(pipeline) - 1; i >= 0; i-- {\n\t\t\tstate := pipeline[i]\n\n\t\t\tcmd := new(Interpreter)\n\t\t\tcmd.Tag = this.Tag\n\t\t\tcmd.HookCount = this.HookCount\n\t\t\tcmd.SetStdin(nvl(this.Stdio[0], os.Stdin))\n\t\t\tcmd.SetStdout(nvl(this.Stdio[1], os.Stdout))\n\t\t\tcmd.SetStderr(nvl(this.Stdio[2], os.Stderr))\n\n\t\t\tvar err error = nil\n\n\t\t\tif state.Term[0] == '|' {\n\t\t\t\tcmd.SetStdout(pipeOut)\n\t\t\t\tif state.Term == \"|&\" {\n\t\t\t\t\tcmd.SetStderr(pipeOut)\n\t\t\t\t}\n\t\t\t\tcmd.Closer = append(cmd.Closer, pipeOut)\n\t\t\t}\n\n\t\t\tif i > 0 && pipeline[i-1].Term[0] == '|' {\n\t\t\t\tvar pipeIn *os.File\n\t\t\t\tpipeIn, pipeOut, err = os.Pipe()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn CONTINUE, err\n\t\t\t\t}\n\t\t\t\tcmd.SetStdin(pipeIn)\n\t\t\t\tcmd.Closer = append(cmd.Closer, pipeIn)\n\t\t\t} else {\n\t\t\t\tpipeOut = nil\n\t\t\t}\n\n\t\t\tfor _, red := range state.Redirect {\n\t\t\t\terr = red.OpenOn(cmd)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn CONTINUE, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tcmd.Args = state.Argv\n\t\t\tif i == len(pipeline)-1 && state.Term != \"&\" {\n\t\t\t\tresult = make(chan result_t)\n\t\t\t\tgo func() {\n\t\t\t\t\twhatToDo, err := cmd.Spawnvp()\n\t\t\t\t\tcmd.closeAtEnd()\n\t\t\t\t\tresult <- result_t{whatToDo, err}\n\t\t\t\t}()\n\t\t\t} else {\n\t\t\t\tgo func() {\n\t\t\t\t\tcmd.Spawnvp()\n\t\t\t\t\tcmd.closeAtEnd()\n\t\t\t\t}()\n\t\t\t}\n\t\t}\n\t}\n\tif result != nil {\n\t\tresultValue := <-result\n\t\tif resultValue.Error != nil {\n\t\t\tm := errorStatusPattern.FindStringSubmatch(\n\t\t\t\tresultValue.Error.Error())\n\t\t\tif m != nil {\n\t\t\t\tErrorLevel = m[1]\n\t\t\t\tresultValue.Error = nil\n\t\t\t} else {\n\t\t\t\tErrorLevel = \"-1\"\n\t\t\t}\n\t\t} else {\n\t\t\tErrorLevel = \"0\"\n\t\t}\n\t\treturn resultValue.NextValue, resultValue.Error\n\t} else {\n\t\treturn CONTINUE, nil\n\t}\n}\n<commit_msg>Fixed 'A ; B' was same as 'A & B'<commit_after>package interpreter\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n)\n\ntype CommandNotFound struct {\n\tName string\n\tErr error\n}\n\nfunc (this CommandNotFound) Stringer() string {\n\treturn fmt.Sprintf(\"'%s' is not recognized as an internal or external command,\\noperable program or batch file\", this.Name)\n}\n\nfunc (this CommandNotFound) Error() string {\n\treturn this.Stringer()\n}\n\ntype NextT int\n\nconst (\n\tTHROUGH NextT = 0\n\tCONTINUE NextT = 1\n\tSHUTDOWN NextT = 2\n)\n\nfunc (this NextT) String() string {\n\tswitch this {\n\tcase THROUGH:\n\t\treturn \"THROUGH\"\n\tcase CONTINUE:\n\t\treturn \"CONTINUE\"\n\tcase SHUTDOWN:\n\t\treturn \"SHUTDOWN\"\n\tdefault:\n\t\treturn \"UNKNOWN\"\n\t}\n}\n\ntype Interpreter struct {\n\texec.Cmd\n\tStdio [3]*os.File\n\tHookCount int\n\tCloser []io.Closer\n\tTag interface{}\n}\n\nfunc (this *Interpreter) closeAtEnd() {\n\tif this.Closer != nil {\n\t\tfor _, c := range this.Closer {\n\t\t\tc.Close()\n\t\t}\n\t\tthis.Closer = nil\n\t}\n}\n\nfunc New() *Interpreter {\n\tthis := Interpreter{\n\t\tStdio: [3]*os.File{os.Stdin, os.Stdout, os.Stderr},\n\t}\n\tthis.Stdin = os.Stdin\n\tthis.Stdout = os.Stdout\n\tthis.Stderr = os.Stderr\n\tthis.Tag = nil\n\treturn &this\n}\n\nfunc (this *Interpreter) SetStdin(f *os.File) {\n\tthis.Stdio[0] = f\n\tthis.Stdin = f\n}\nfunc (this *Interpreter) SetStdout(f *os.File) {\n\tthis.Stdio[1] = f\n\tthis.Stdout = f\n}\nfunc (this *Interpreter) SetStderr(f *os.File) {\n\tthis.Stdio[2] = f\n\tthis.Stderr = f\n}\n\nfunc (this *Interpreter) Clone() *Interpreter {\n\trv := new(Interpreter)\n\trv.Stdio[0] = this.Stdio[0]\n\trv.Stdio[1] = this.Stdio[1]\n\trv.Stdio[2] = this.Stdio[2]\n\trv.Stdin = this.Stdin\n\trv.Stdout = this.Stdout\n\trv.Stderr = this.Stderr\n\trv.HookCount = this.HookCount\n\trv.Tag = this.Tag\n\trv.Closer = nil\n\treturn rv\n}\n\ntype ArgsHookT func(it *Interpreter, args []string) []string\n\nvar argsHook = func(it *Interpreter, args []string) []string {\n\treturn args\n}\n\nfunc SetArgsHook(argsHook_ ArgsHookT) (rv ArgsHookT) {\n\trv, argsHook = argsHook, argsHook_\n\treturn\n}\n\ntype HookT func(*Interpreter) (NextT, error)\n\nvar hook = func(*Interpreter) (NextT, error) {\n\treturn THROUGH, nil\n}\n\nfunc SetHook(hook_ HookT) (rv HookT) {\n\trv, hook = hook, hook_\n\treturn\n}\n\nvar OnCommandNotFound = func(this *Interpreter, err error) error {\n\terr = &CommandNotFound{this.Args[0], err}\n\treturn err\n}\n\nvar errorStatusPattern = regexp.MustCompile(\"^exit status ([0-9]+)\")\nvar ErrorLevel string\n\nfunc nvl(a *os.File, b *os.File) *os.File {\n\tif a != nil {\n\t\treturn a\n\t} else {\n\t\treturn b\n\t}\n}\n\nfunc (this *Interpreter) Spawnvp() (NextT, error) {\n\tvar whatToDo NextT = CONTINUE\n\tvar err error = nil\n\n\tif len(this.Args) > 0 {\n\t\twhatToDo, err = hook(this)\n\t\tif whatToDo == THROUGH {\n\t\t\tthis.Path, err = exec.LookPath(this.Args[0])\n\t\t\tif err == nil {\n\t\t\t\terr = this.Run()\n\t\t\t} else {\n\t\t\t\terr = OnCommandNotFound(this, err)\n\t\t\t}\n\t\t\twhatToDo = CONTINUE\n\t\t}\n\t}\n\tthis.Stdio[1].Sync()\n\tthis.Stdio[2].Sync()\n\treturn whatToDo, err\n}\n\ntype result_t struct {\n\tNextValue NextT\n\tError error\n}\n\nfunc (this *Interpreter) Interpret(text string) (next NextT, err error) {\n\tnext = CONTINUE\n\terr = nil\n\n\tstatements, statementsErr := Parse(text)\n\tif statementsErr != nil {\n\t\treturn CONTINUE, statementsErr\n\t}\n\tif argsHook != nil {\n\t\tfor _, pipeline := range statements {\n\t\t\tfor _, state := range pipeline {\n\t\t\t\tstate.Argv = argsHook(this, state.Argv)\n\t\t\t}\n\t\t}\n\t}\n\tfor _, pipeline := range statements {\n\t\tvar result chan result_t = nil\n\t\tvar pipeOut *os.File = nil\n\t\tfor i := len(pipeline) - 1; i >= 0; i-- {\n\t\t\tstate := pipeline[i]\n\n\t\t\tcmd := new(Interpreter)\n\t\t\tcmd.Tag = this.Tag\n\t\t\tcmd.HookCount = this.HookCount\n\t\t\tcmd.SetStdin(nvl(this.Stdio[0], os.Stdin))\n\t\t\tcmd.SetStdout(nvl(this.Stdio[1], os.Stdout))\n\t\t\tcmd.SetStderr(nvl(this.Stdio[2], os.Stderr))\n\n\t\t\tvar err error = nil\n\n\t\t\tif state.Term[0] == '|' {\n\t\t\t\tcmd.SetStdout(pipeOut)\n\t\t\t\tif state.Term == \"|&\" {\n\t\t\t\t\tcmd.SetStderr(pipeOut)\n\t\t\t\t}\n\t\t\t\tcmd.Closer = append(cmd.Closer, pipeOut)\n\t\t\t}\n\n\t\t\tif i > 0 && pipeline[i-1].Term[0] == '|' {\n\t\t\t\tvar pipeIn *os.File\n\t\t\t\tpipeIn, pipeOut, err = os.Pipe()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn CONTINUE, err\n\t\t\t\t}\n\t\t\t\tcmd.SetStdin(pipeIn)\n\t\t\t\tcmd.Closer = append(cmd.Closer, pipeIn)\n\t\t\t} else {\n\t\t\t\tpipeOut = nil\n\t\t\t}\n\n\t\t\tfor _, red := range state.Redirect {\n\t\t\t\terr = red.OpenOn(cmd)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn CONTINUE, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tcmd.Args = state.Argv\n\t\t\tif i == len(pipeline)-1 && state.Term != \"&\" {\n\t\t\t\tresult = make(chan result_t)\n\t\t\t\tgo func() {\n\t\t\t\t\twhatToDo, err := cmd.Spawnvp()\n\t\t\t\t\tcmd.closeAtEnd()\n\t\t\t\t\tresult <- result_t{whatToDo, err}\n\t\t\t\t}()\n\t\t\t} else {\n\t\t\t\tgo func() {\n\t\t\t\t\tcmd.Spawnvp()\n\t\t\t\t\tcmd.closeAtEnd()\n\t\t\t\t}()\n\t\t\t}\n\t\t}\n\t\tif result != nil {\n\t\t\tresultValue := <-result\n\t\t\tif resultValue.Error != nil {\n\t\t\t\tm := errorStatusPattern.FindStringSubmatch(\n\t\t\t\t\tresultValue.Error.Error())\n\t\t\t\tif m != nil {\n\t\t\t\t\tErrorLevel = m[1]\n\t\t\t\t\tresultValue.Error = nil\n\t\t\t\t} else {\n\t\t\t\t\tErrorLevel = \"-1\"\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tErrorLevel = \"0\"\n\t\t\t}\n\t\t\tnext = resultValue.NextValue\n\t\t\terr = resultValue.Error\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Chihaya Authors. All rights reserved.\n\/\/ Use of this source code is governed by the BSD 2-Clause license,\n\/\/ which can be found in the LICENSE file.\n\n\/\/ Package prometheus implements a chihaya Server for serving metrics to\n\/\/ Prometheus.\npackage prometheus\n\nimport (\n\t\"errors\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/tylerb\/graceful\"\n\t\"gopkg.in\/yaml.v2\"\n\n\t\"github.com\/chihaya\/chihaya\"\n\t\"github.com\/chihaya\/chihaya\/server\"\n\t\"github.com\/chihaya\/chihaya\/tracker\"\n)\n\nfunc init() {\n\tserver.Register(\"prometheus\", constructor)\n}\n\nfunc constructor(srvcfg *chihaya.ServerConfig, tkr *tracker.Tracker) (server.Server, error) {\n\tcfg, err := NewServerConfig(srvcfg)\n\tif err != nil {\n\t\treturn nil, errors.New(\"prometheus: invalid config: \" + err.Error())\n\t}\n\n\treturn &Server{\n\t\tcfg: cfg,\n\t}, nil\n}\n\n\/\/ ServerConfig represents the configuration options for a\n\/\/ PrometheusServer.\ntype ServerConfig struct {\n\tAddr string `yaml:\"addr\"`\n\tShutdownTimeout time.Duration `yaml:\"shutdown_timeout\"`\n\tReadTimeout time.Duration `yaml:\"read_timeout\"`\n\tWriteTimeout time.Duration `yaml:\"write_timeout\"`\n}\n\n\/\/ NewServerConfig marshals a chihaya.ServerConfig and unmarshals it\n\/\/ into a more specific prometheus ServerConfig.\nfunc NewServerConfig(srvcfg *chihaya.ServerConfig) (*ServerConfig, error) {\n\tbytes, err := yaml.Marshal(srvcfg.Config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar cfg ServerConfig\n\terr = yaml.Unmarshal(bytes, &cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &cfg, nil\n}\n\n\/\/ Server implements a chihaya Server for serving metrics to Prometheus.\ntype Server struct {\n\tcfg *ServerConfig\n\tgrace *graceful.Server\n\tstopped bool\n}\n\nvar _ server.Server = &Server{}\n\nfunc (s *Server) Start() {\n\ts.grace = &graceful.Server{\n\t\tServer: &http.Server{\n\t\t\tAddr: s.cfg.Addr,\n\t\t\tHandler: prometheus.Handler(),\n\t\t\tReadTimeout: s.cfg.ReadTimeout,\n\t\t\tWriteTimeout: s.cfg.WriteTimeout,\n\t\t},\n\t\tTimeout: s.cfg.ShutdownTimeout,\n\t\tNoSignalHandling: true,\n\t}\n}\n\nfunc (s *Server) Stop() {\n\ts.grace.Stop(s.cfg.ShutdownTimeout)\n\tstopChan := s.grace.StopChan()\n\n\t\/\/ Block until the graceful server shuts down and closes this channel.\n\tfor range stopChan {\n\t}\n}\n<commit_msg>server\/prom: get rid of unneeded graceful call<commit_after>\/\/ Copyright 2016 The Chihaya Authors. All rights reserved.\n\/\/ Use of this source code is governed by the BSD 2-Clause license,\n\/\/ which can be found in the LICENSE file.\n\n\/\/ Package prometheus implements a chihaya Server for serving metrics to\n\/\/ Prometheus.\npackage prometheus\n\nimport (\n\t\"errors\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/tylerb\/graceful\"\n\t\"gopkg.in\/yaml.v2\"\n\n\t\"github.com\/chihaya\/chihaya\"\n\t\"github.com\/chihaya\/chihaya\/server\"\n\t\"github.com\/chihaya\/chihaya\/tracker\"\n)\n\nfunc init() {\n\tserver.Register(\"prometheus\", constructor)\n}\n\nfunc constructor(srvcfg *chihaya.ServerConfig, tkr *tracker.Tracker) (server.Server, error) {\n\tcfg, err := NewServerConfig(srvcfg)\n\tif err != nil {\n\t\treturn nil, errors.New(\"prometheus: invalid config: \" + err.Error())\n\t}\n\n\treturn &Server{\n\t\tcfg: cfg,\n\t}, nil\n}\n\n\/\/ ServerConfig represents the configuration options for a\n\/\/ PrometheusServer.\ntype ServerConfig struct {\n\tAddr string `yaml:\"addr\"`\n\tShutdownTimeout time.Duration `yaml:\"shutdown_timeout\"`\n\tReadTimeout time.Duration `yaml:\"read_timeout\"`\n\tWriteTimeout time.Duration `yaml:\"write_timeout\"`\n}\n\n\/\/ NewServerConfig marshals a chihaya.ServerConfig and unmarshals it\n\/\/ into a more specific prometheus ServerConfig.\nfunc NewServerConfig(srvcfg *chihaya.ServerConfig) (*ServerConfig, error) {\n\tbytes, err := yaml.Marshal(srvcfg.Config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar cfg ServerConfig\n\terr = yaml.Unmarshal(bytes, &cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &cfg, nil\n}\n\n\/\/ Server implements a chihaya Server for serving metrics to Prometheus.\ntype Server struct {\n\tcfg *ServerConfig\n\tgrace *graceful.Server\n}\n\nvar _ server.Server = &Server{}\n\nfunc (s *Server) Start() {\n\ts.grace = &graceful.Server{\n\t\tServer: &http.Server{\n\t\t\tAddr: s.cfg.Addr,\n\t\t\tHandler: prometheus.Handler(),\n\t\t\tReadTimeout: s.cfg.ReadTimeout,\n\t\t\tWriteTimeout: s.cfg.WriteTimeout,\n\t\t},\n\t\tTimeout: s.cfg.ShutdownTimeout,\n\t\tNoSignalHandling: true,\n\t}\n}\n\nfunc (s *Server) Stop() {\n\ts.grace.Stop(s.cfg.ShutdownTimeout)\n}\n<|endoftext|>"} {"text":"<commit_before>package ask\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/byuoitav\/common\/db\"\n\t\"github.com\/byuoitav\/common\/log\"\n\t\"github.com\/byuoitav\/common\/nerr\"\n\t\"github.com\/byuoitav\/common\/structs\"\n\t\"github.com\/byuoitav\/common\/v2\/events\"\n\t\"github.com\/byuoitav\/device-monitoring\/localsystem\"\n)\n\nconst (\n\t\/\/ TODO change this command name to match whatever command we need\n\thardwareInfoCommandID = \"HardwareInfo\"\n)\n\n\/\/ DeviceHardwareJob gets hardware information from devices in the room and pushes it up\ntype DeviceHardwareJob struct{}\n\n\/\/ Run runs the job\nfunc (j *DeviceHardwareJob) Run(ctx interface{}, eventWrite chan events.Event) interface{} {\n\tlog.L.Infof(\"Getting hardware info for devices in room\")\n\n\t\/\/ get list of devices from database\n\troomID, err := localsystem.RoomID()\n\tif err != nil {\n\t\treturn err.Addf(\"failed to get hardware info about devices\")\n\t}\n\n\tdevices, gerr := db.GetDB().GetDevicesByRoom(roomID)\n\tif gerr != nil {\n\t\treturn nerr.Translate(gerr).Addf(\"failed to get hardware info about devices in %s\", roomID)\n\t}\n\n\twg := sync.WaitGroup{}\n\thardwareInfo := make(map[string]structs.HardwareInfo)\n\n\tfor i := range devices {\n\t\t\/\/ skip the pi's\n\t\tif devices[i].Type.ID == \"Pi3\" {\n\t\t\tcontinue\n\t\t}\n\t\twg.Add(1)\n\n\t\tgo func(idx int) {\n\t\t\tdefer wg.Done()\n\n\t\t\tinfo := getHardwareInfo(&devices[idx])\n\t\t\tif info != nil {\n\t\t\t\tsendHardwareInfo(devices[idx].ID, info, eventWrite)\n\t\t\t\thardwareInfo[devices[idx].ID] = *info\n\t\t\t}\n\t\t}(i)\n\t}\n\n\twg.Wait()\n\n\treturn hardwareInfo\n}\n\nfunc getHardwareInfo(device *structs.Device) *structs.HardwareInfo {\n\tif device == nil {\n\t\tlog.L.Errorf(\"device to get hardware info from cannot be null\")\n\t\treturn nil\n\t}\n\n\taddress := device.GetCommandByID(hardwareInfoCommandID).BuildCommandAddress()\n\tif len(address) == 0 {\n\t\tlog.L.Infof(\"%s doesn't have a %s command, so I can't get any hardware info about it\", device.ID, hardwareInfoCommandID)\n\t\treturn nil\n\t}\n\n\tlog.L.Infof(\"Getting hardware info for %s\", device.ID)\n\n\taddress = strings.Replace(address, \":address\", device.Address, 1)\n\n\tclient := &http.Client{\n\t\tTimeout: 20 * time.Second,\n\t}\n\n\tlog.L.Debugf(\"Sending GET request to: %s\", address)\n\n\t\/\/ get hardware info about device\n\tresp, err := client.Get(address)\n\tif err != nil {\n\t\tlog.L.Warnf(\"failed to get hardware info for %s: %s\", device.ID, err)\n\t\treturn nil\n\t}\n\tdefer resp.Body.Close()\n\n\tbytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.L.Warnf(\"failed to get hardware info for %s: %s\", device.ID, err)\n\t\treturn nil\n\t}\n\n\tret := &structs.HardwareInfo{}\n\n\terr = json.Unmarshal(bytes, ret)\n\tif err != nil {\n\t\tlog.L.Warnf(\"failed to get hardware info for %s: %s\", device.ID, err)\n\t\treturn nil\n\t}\n\n\treturn ret\n}\n\nfunc sendHardwareInfo(deviceID string, info *structs.HardwareInfo, eventWrite chan events.Event) {\n\t\/\/ push up events about device\n\ttargetDevice := events.GenerateBasicDeviceInfo(deviceID)\n\tevent := events.Event{\n\t\tGeneratingSystem: localsystem.MustSystemID(),\n\t\tTimestamp: time.Now(),\n\t\tEventTags: []string{\n\t\t\tevents.HardwareInfo,\n\t\t\tevents.DetailState,\n\t\t},\n\t\tTargetDevice: targetDevice,\n\t\tAffectedRoom: events.GenerateBasicRoomInfo(targetDevice.RoomID),\n\t\tKey: \"hardware-info\",\n\t\tData: info,\n\t}\n\teventWrite <- event \/\/ dump up all the hardware info\n\n\t\/\/ reset the data\/key\n\tevent.Data = nil\n\tevent.Key = \"\"\n\n\t\/\/ push up hostname\n\tif len(info.Hostname) > 0 {\n\t\tevent.Key = \"hostname\"\n\t\tevent.Value = info.Hostname\n\t\teventWrite <- event\n\t}\n\n\tif len(info.ModelName) > 0 {\n\t\tevent.Key = \"model-name\"\n\t\tevent.Value = info.ModelName\n\t\teventWrite <- event\n\t}\n\n\tif len(info.SerialNumber) > 0 {\n\t\tevent.Key = \"serial-number\"\n\t\tevent.Value = info.SerialNumber\n\t\teventWrite <- event\n\t}\n\n\tif len(info.FirmwareVersion) > 0 {\n\t\tevent.Key = \"firmware-version\"\n\t\t\/\/ TODO what kind of interface{}...?\n\t\tevent.Value = fmt.Sprintf(\"%v\", info.FirmwareVersion)\n\t\teventWrite <- event\n\t}\n\n\tif len(info.FilterStatus) > 0 {\n\t\tevent.Key = \"filter-status\"\n\t\tevent.Value = info.FilterStatus\n\t\teventWrite <- event\n\t}\n\n\tif len(info.WarningStatus) > 0 {\n\t\tevent.Key = \"warning-status\"\n\n\t\tstr := \"\"\n\n\t\tfor i := range info.WarningStatus {\n\t\t\tstr += info.WarningStatus[i]\n\t\t}\n\n\t\tevent.Value = str\n\t\teventWrite <- event\n\t}\n\n\tif len(info.ErrorStatus) > 0 {\n\t\tevent.Key = \"error-status\"\n\t\tstr := \"\"\n\n\t\tfor i := range info.ErrorStatus {\n\t\t\tstr += info.WarningStatus[i]\n\t\t}\n\n\t\tevent.Value = str\n\t\teventWrite <- event\n\t}\n\n\tif len(info.PowerStatus) > 0 {\n\t\tevent.Key = \"power-status\"\n\t\tevent.Value = info.PowerStatus\n\t\teventWrite <- event\n\t}\n\n\tif info.TimerInfo != nil {\n\t\tevent.Key = \"timer-info\"\n\n\t\t\/\/ TODO what kind of interface{}?\n\t\tevent.Value = fmt.Sprintf(\"%v\", info.TimerInfo)\n\t\teventWrite <- event\n\t}\n\n\tif len(info.NetworkInfo.IPAddress) > 0 {\n\t\tevent.Key = \"ip-address\"\n\t\tevent.Value = info.NetworkInfo.IPAddress\n\t\teventWrite <- event\n\t}\n\n\tif len(info.NetworkInfo.MACAddress) > 0 {\n\t\tevent.Key = \"mac-address\"\n\t\tevent.Value = info.NetworkInfo.MACAddress\n\t\teventWrite <- event\n\t}\n\n\tif len(info.NetworkInfo.Gateway) > 0 {\n\t\tevent.Key = \"default-gateway\"\n\t\tevent.Value = info.NetworkInfo.Gateway\n\t\teventWrite <- event\n\t}\n\n\tif len(info.NetworkInfo.DNS) > 0 {\n\t\tevent.Key = \"dns-addresses\"\n\t\tbuilder := strings.Builder{}\n\n\t\tfor i := range info.NetworkInfo.DNS {\n\t\t\tbuilder.WriteString(info.NetworkInfo.DNS[i])\n\n\t\t\tif i != len(info.NetworkInfo.DNS)-1 {\n\t\t\t\tbuilder.WriteString(\", \")\n\t\t\t}\n\t\t}\n\n\t\tevent.Value = builder.String()\n\t\teventWrite <- event\n\t}\n}\n<commit_msg>only send detail state for some events<commit_after>package ask\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/byuoitav\/common\/db\"\n\t\"github.com\/byuoitav\/common\/log\"\n\t\"github.com\/byuoitav\/common\/nerr\"\n\t\"github.com\/byuoitav\/common\/structs\"\n\t\"github.com\/byuoitav\/common\/v2\/events\"\n\t\"github.com\/byuoitav\/device-monitoring\/localsystem\"\n)\n\nconst (\n\t\/\/ TODO change this command name to match whatever command we need\n\thardwareInfoCommandID = \"HardwareInfo\"\n)\n\n\/\/ DeviceHardwareJob gets hardware information from devices in the room and pushes it up\ntype DeviceHardwareJob struct{}\n\n\/\/ Run runs the job\nfunc (j *DeviceHardwareJob) Run(ctx interface{}, eventWrite chan events.Event) interface{} {\n\tlog.L.Infof(\"Getting hardware info for devices in room\")\n\n\t\/\/ get list of devices from database\n\troomID, err := localsystem.RoomID()\n\tif err != nil {\n\t\treturn err.Addf(\"failed to get hardware info about devices\")\n\t}\n\n\tdevices, gerr := db.GetDB().GetDevicesByRoom(roomID)\n\tif gerr != nil {\n\t\treturn nerr.Translate(gerr).Addf(\"failed to get hardware info about devices in %s\", roomID)\n\t}\n\n\twg := sync.WaitGroup{}\n\thardwareInfo := make(map[string]structs.HardwareInfo)\n\n\tfor i := range devices {\n\t\t\/\/ skip the pi's\n\t\tif devices[i].Type.ID == \"Pi3\" {\n\t\t\tcontinue\n\t\t}\n\t\twg.Add(1)\n\n\t\tgo func(idx int) {\n\t\t\tdefer wg.Done()\n\n\t\t\tinfo := getHardwareInfo(&devices[idx])\n\t\t\tif info != nil {\n\t\t\t\tsendHardwareInfo(devices[idx].ID, info, eventWrite)\n\t\t\t\thardwareInfo[devices[idx].ID] = *info\n\t\t\t}\n\t\t}(i)\n\t}\n\n\twg.Wait()\n\n\treturn hardwareInfo\n}\n\nfunc getHardwareInfo(device *structs.Device) *structs.HardwareInfo {\n\tif device == nil {\n\t\tlog.L.Errorf(\"device to get hardware info from cannot be null\")\n\t\treturn nil\n\t}\n\n\taddress := device.GetCommandByID(hardwareInfoCommandID).BuildCommandAddress()\n\tif len(address) == 0 {\n\t\tlog.L.Infof(\"%s doesn't have a %s command, so I can't get any hardware info about it\", device.ID, hardwareInfoCommandID)\n\t\treturn nil\n\t}\n\n\tlog.L.Infof(\"Getting hardware info for %s\", device.ID)\n\n\taddress = strings.Replace(address, \":address\", device.Address, 1)\n\n\tclient := &http.Client{\n\t\tTimeout: 20 * time.Second,\n\t}\n\n\tlog.L.Debugf(\"Sending GET request to: %s\", address)\n\n\t\/\/ get hardware info about device\n\tresp, err := client.Get(address)\n\tif err != nil {\n\t\tlog.L.Warnf(\"failed to get hardware info for %s: %s\", device.ID, err)\n\t\treturn nil\n\t}\n\tdefer resp.Body.Close()\n\n\tbytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.L.Warnf(\"failed to get hardware info for %s: %s\", device.ID, err)\n\t\treturn nil\n\t}\n\n\tret := &structs.HardwareInfo{}\n\n\terr = json.Unmarshal(bytes, ret)\n\tif err != nil {\n\t\tlog.L.Warnf(\"failed to get hardware info for %s: %s\", device.ID, err)\n\t\treturn nil\n\t}\n\n\treturn ret\n}\n\nfunc sendHardwareInfo(deviceID string, info *structs.HardwareInfo, eventWrite chan events.Event) {\n\t\/\/ push up events about device\n\ttargetDevice := events.GenerateBasicDeviceInfo(deviceID)\n\tevent := events.Event{\n\t\tGeneratingSystem: localsystem.MustSystemID(),\n\t\tTimestamp: time.Now(),\n\t\tEventTags: []string{\n\t\t\tevents.HardwareInfo,\n\t\t},\n\t\tTargetDevice: targetDevice,\n\t\tAffectedRoom: events.GenerateBasicRoomInfo(targetDevice.RoomID),\n\t\tKey: \"hardware-info\",\n\t\tData: info,\n\t}\n\teventWrite <- event \/\/ dump up all the hardware info\n\tevent.Data = nil\n\n\t\/\/ push up hostname\n\tif len(info.Hostname) > 0 {\n\t\ttmp := event\n\t\ttmp.AddToTags(events.DetailState)\n\t\ttmp.Key = \"hostname\"\n\t\ttmp.Value = info.Hostname\n\t\teventWrite <- tmp\n\t}\n\n\tif len(info.ModelName) > 0 {\n\t\ttmp := event\n\t\ttmp.AddToTags(events.DetailState)\n\t\ttmp.Key = \"model-name\"\n\t\ttmp.Value = info.ModelName\n\t\teventWrite <- tmp\n\t}\n\n\tif len(info.SerialNumber) > 0 {\n\t\ttmp := event\n\t\ttmp.AddToTags(events.DetailState)\n\t\ttmp.Key = \"serial-number\"\n\t\ttmp.Value = info.SerialNumber\n\t\teventWrite <- tmp\n\t}\n\n\tif len(info.FirmwareVersion) > 0 {\n\t\ttmp := event\n\t\ttmp.AddToTags(events.DetailState)\n\t\ttmp.Key = \"firmware-version\"\n\t\t\/\/ TODO what kind of interface{}...?\n\t\ttmp.Value = fmt.Sprintf(\"%v\", info.FirmwareVersion)\n\t\teventWrite <- tmp\n\t}\n\n\tif len(info.FilterStatus) > 0 {\n\t\ttmp := event\n\t\ttmp.AddToTags(events.DetailState)\n\t\ttmp.Key = \"filter-status\"\n\t\ttmp.Value = info.FilterStatus\n\t\teventWrite <- tmp\n\t}\n\n\tif len(info.WarningStatus) > 0 {\n\t\ttmp := event\n\t\ttmp.AddToTags(events.DetailState)\n\t\ttmp.Key = \"warning-status\"\n\n\t\tstr := \"\"\n\n\t\tfor i := range info.WarningStatus {\n\t\t\tstr += info.WarningStatus[i]\n\t\t}\n\n\t\ttmp.Value = str\n\t\teventWrite <- tmp\n\t}\n\n\tif len(info.ErrorStatus) > 0 {\n\t\ttmp := event\n\t\ttmp.AddToTags(events.DetailState)\n\t\ttmp.Key = \"error-status\"\n\t\tstr := \"\"\n\n\t\tfor i := range info.ErrorStatus {\n\t\t\tstr += info.WarningStatus[i]\n\t\t}\n\n\t\ttmp.Value = str\n\t\teventWrite <- tmp\n\t}\n\n\tif len(info.PowerStatus) > 0 {\n\t\tevent.Key = \"power-status\"\n\t\tevent.Value = info.PowerStatus\n\t\teventWrite <- event\n\t}\n\n\tif info.TimerInfo != nil {\n\t\tevent.Key = \"timer-info\"\n\n\t\t\/\/ TODO what kind of interface{}?\n\t\tevent.Value = fmt.Sprintf(\"%v\", info.TimerInfo)\n\t\teventWrite <- event\n\t}\n\n\tif len(info.NetworkInfo.IPAddress) > 0 {\n\t\tevent.Key = \"ip-address\"\n\t\tevent.Value = info.NetworkInfo.IPAddress\n\t\teventWrite <- event\n\t}\n\n\tif len(info.NetworkInfo.MACAddress) > 0 {\n\t\tevent.Key = \"mac-address\"\n\t\tevent.Value = info.NetworkInfo.MACAddress\n\t\teventWrite <- event\n\t}\n\n\tif len(info.NetworkInfo.Gateway) > 0 {\n\t\tevent.Key = \"default-gateway\"\n\t\tevent.Value = info.NetworkInfo.Gateway\n\t\teventWrite <- event\n\t}\n\n\tif len(info.NetworkInfo.DNS) > 0 {\n\t\tevent.Key = \"dns-addresses\"\n\t\tbuilder := strings.Builder{}\n\n\t\tfor i := range info.NetworkInfo.DNS {\n\t\t\tbuilder.WriteString(info.NetworkInfo.DNS[i])\n\n\t\t\tif i != len(info.NetworkInfo.DNS)-1 {\n\t\t\t\tbuilder.WriteString(\", \")\n\t\t\t}\n\t\t}\n\n\t\tevent.Value = builder.String()\n\t\teventWrite <- event\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2015 The Things Network\n\/\/ Use of this source code is governed by the MIT license that can be found in the LICENSE file.\n\npackage gateway\n\nimport (\n\t\"fmt\"\n\t\"github.com\/thethingsnetwork\/core\/lorawan\/semtech\"\n\t\"github.com\/thethingsnetwork\/core\/utils\/pointer\"\n\t\"io\"\n\t\"time\"\n)\n\ntype Forwarder struct {\n\tId [8]byte \/\/ Gateway's Identifier\n\talti int \/\/ GPS altitude in RX meters\n\tupnb uint \/\/ Number of upstream datagrams sent\n\tackn uint \/\/ Number of upstream datagrams that were acknowledged\n\tdwnb uint \/\/ Number of downlink datagrams received\n\tlati float64 \/\/ GPS latitude, North is +\n\tlong float64 \/\/ GPS longitude, East is +\n\trxfw uint \/\/ Number of radio packets forwarded\n\trxnb uint \/\/ Number of radio packets received\n\tadapters []io.ReadWriteCloser \/\/ List of downlink adapters\n\tpackets []semtech.Packet \/\/ Downlink packets received\n\tcommands chan command \/\/ Concurrent access on gateway stats\n\tErrors chan error \/\/ Done channel\n}\n\ntype commandName string\ntype command struct {\n\tname commandName\n\tdata interface{}\n}\n\nconst (\n\tcmd_ACK commandName = \"Acknowledged\"\n\tcmd_EMIT commandName = \"Emitted\"\n\tcmd_RECVUP commandName = \"Radio Packet Received\"\n\tcmd_RECVDWN commandName = \"Dowlink Datagram Received\"\n\tcmd_FWD commandName = \"Forwarded\"\n\tcmd_FLUSH commandName = \"Flush\"\n\tcmd_STATS commandName = \"Stats\"\n)\n\n\/\/ NewForwarder create a forwarder instance bound to a set of routers.\nfunc NewForwarder(id [8]byte, adapters ...io.ReadWriteCloser) (*Forwarder, error) {\n\tif len(adapters) == 0 {\n\t\treturn nil, fmt.Errorf(\"At least one adapter must be supplied\")\n\t}\n\n\tfwd := &Forwarder{\n\t\tId: id,\n\t\talti: 120,\n\t\tlati: 53.3702,\n\t\tlong: 4.8952,\n\t\tadapters: adapters,\n\t\tcommands: make(chan command),\n\t\tErrors: make(chan error, len(adapters)),\n\t}\n\n\tgo fwd.handleCommands()\n\n\t\/\/ Star listening to each adapter Read() method\n\tfor _, adapter := range fwd.adapters {\n\t\tgo fwd.listenAdapter(adapter)\n\t}\n\n\treturn fwd, nil\n}\n\n\/\/ listenAdapter listen to incoming datagrams from an adapter. Non-valid packets are ignored.\nfunc (fwd Forwarder) listenAdapter(adapter io.ReadWriteCloser) {\n\tacks := make(map[[3]byte]uint) \/\/ adapterIndex | packet.Identifier | packet.Token\n\tfor {\n\t\tbuf := make([]byte, 1024)\n\t\tfmt.Printf(\"Forwarder listens to downlink datagrams\\n\")\n\t\tn, err := adapter.Read(buf)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tfwd.Errors <- err\n\t\t\treturn \/\/ Error on reading, we assume the connection is closed \/ lost\n\t\t}\n\t\tfmt.Printf(\"Forwarder unmarshals datagram %x\\n\", buf[:n])\n\t\tpacket, err := semtech.Unmarshal(buf[:n])\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tcontinue\n\t\t}\n\n\t\ttoken := [3]byte{packet.Identifier, packet.Token[0], packet.Token[1]}\n\t\tswitch packet.Identifier {\n\t\tcase semtech.PUSH_ACK, semtech.PULL_ACK:\n\t\t\tif acks[token] > 0 {\n\t\t\t\tacks[token] -= 1\n\t\t\t\tfwd.commands <- command{cmd_ACK, nil}\n\t\t\t}\n\t\tcase semtech.PULL_RESP:\n\t\t\tfwd.commands <- command{cmd_RECVDWN, packet}\n\t\tdefault:\n\t\t\tfmt.Printf(\"Forwarder ignores contingent packet %+v\\n\", packet)\n\t\t}\n\n\t}\n}\n\n\/\/ handleCommands acts as a mediator between all goroutines that attempt to modify the forwarder\n\/\/ attributes. All sensitive operations are done by commands send though an appropriate channel.\n\/\/ This method consume commands from the channel until it's closed.\nfunc (fwd *Forwarder) handleCommands() {\n\tfor cmd := range fwd.commands {\n\t\tfmt.Printf(\"Fowarder executes command: %v\\n\", cmd.name)\n\t\tswitch cmd.name {\n\t\tcase cmd_ACK:\n\t\t\tfwd.ackn += 1\n\t\tcase cmd_FWD:\n\t\t\tfwd.rxfw += 1\n\t\tcase cmd_EMIT:\n\t\t\tfwd.upnb += 1\n\t\tcase cmd_RECVUP:\n\t\t\tfwd.rxnb += 1\n\t\tcase cmd_RECVDWN:\n\t\t\tfwd.dwnb += 1\n\t\t\tfwd.packets = append(fwd.packets, cmd.data.(semtech.Packet))\n\t\tcase cmd_FLUSH:\n\t\t\tcmd.data.(chan []semtech.Packet) <- fwd.packets\n\t\t\tfwd.packets = make([]semtech.Packet, 0)\n\t\tcase cmd_STATS:\n\t\t\tvar ackr float64\n\t\t\tif fwd.upnb != 0 {\n\t\t\t\tackr = float64(fwd.ackn) \/ float64(fwd.upnb)\n\t\t\t}\n\n\t\t\tcmd.data.(chan semtech.Stat) <- semtech.Stat{\n\t\t\t\tAckr: &ackr,\n\t\t\t\tAlti: pointer.Int(fwd.alti),\n\t\t\t\tDwnb: pointer.Uint(fwd.dwnb),\n\t\t\t\tLati: pointer.Float64(fwd.lati),\n\t\t\t\tLong: pointer.Float64(fwd.long),\n\t\t\t\tRxfw: pointer.Uint(fwd.rxfw),\n\t\t\t\tRxnb: pointer.Uint(fwd.rxnb),\n\t\t\t\tRxok: pointer.Uint(fwd.rxnb),\n\t\t\t\tTime: pointer.Time(time.Now()),\n\t\t\t\tTxnb: pointer.Uint(0),\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Forward dispatch a packet to all connected routers.\nfunc (fwd Forwarder) Forward(packet semtech.Packet) error {\n\tfwd.commands <- command{cmd_RECVUP, nil}\n\tif packet.Identifier != semtech.PUSH_DATA {\n\t\treturn fmt.Errorf(\"Unable to forward with identifier %x\", packet.Identifier)\n\t}\n\n\traw, err := semtech.Marshal(packet)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, adapter := range fwd.adapters {\n\t\tn, err := adapter.Write(raw)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif n < len(raw) {\n\t\t\treturn fmt.Errorf(\"Packet was too long\")\n\t\t}\n\t\tfwd.commands <- command{cmd_EMIT, nil}\n\t}\n\n\tfwd.commands <- command{cmd_FWD, nil}\n\treturn nil\n}\n\n\/\/ Flush spits out all downlink packet received by the forwarder since the last flush.\nfunc (fwd Forwarder) Flush() []semtech.Packet {\n\tchpkt := make(chan []semtech.Packet)\n\tfwd.commands <- command{cmd_FLUSH, chpkt}\n\treturn <-chpkt\n}\n\n\/\/ Stats computes and return the forwarder statistics since it was created\nfunc (fwd Forwarder) Stats() semtech.Stat {\n\tchstats := make(chan semtech.Stat)\n\tfwd.commands <- command{cmd_STATS, chstats}\n\treturn <-chstats\n}\n\n\/\/ Stop terminate the forwarder activity. Closing all routers connections\nfunc (fwd Forwarder) Stop() error {\n\tvar errors []error\n\n\t\/\/ Close the uplink adapters\n\tfor _, adapter := range fwd.adapters {\n\t\terr := adapter.Close()\n\t\tif err != nil {\n\t\t\terrors = append(errors, err)\n\t\t}\n\t}\n\n\tif len(errors) > 0 {\n\t\treturn fmt.Errorf(\"Unable to stop the forwarder: %+v\", errors)\n\t}\n\n\treturn nil\n}\n<commit_msg>[simulators.gateway] Add debug variable. To be removed in the future<commit_after>\/\/ Copyright © 2015 The Things Network\n\/\/ Use of this source code is governed by the MIT license that can be found in the LICENSE file.\n\npackage gateway\n\nimport (\n\t\"fmt\"\n\t\"github.com\/thethingsnetwork\/core\/lorawan\/semtech\"\n\t\"github.com\/thethingsnetwork\/core\/utils\/pointer\"\n\t\"io\"\n\t\"time\"\n)\n\ntype Forwarder struct {\n\tId [8]byte \/\/ Gateway's Identifier\n\tdebug bool\n\talti int \/\/ GPS altitude in RX meters\n\tupnb uint \/\/ Number of upstream datagrams sent\n\tackn uint \/\/ Number of upstream datagrams that were acknowledged\n\tdwnb uint \/\/ Number of downlink datagrams received\n\tlati float64 \/\/ GPS latitude, North is +\n\tlong float64 \/\/ GPS longitude, East is +\n\trxfw uint \/\/ Number of radio packets forwarded\n\trxnb uint \/\/ Number of radio packets received\n\tadapters []io.ReadWriteCloser \/\/ List of downlink adapters\n\tpackets []semtech.Packet \/\/ Downlink packets received\n\tcommands chan command \/\/ Concurrent access on gateway stats\n\tErrors chan error \/\/ Done channel\n}\n\ntype commandName string\ntype command struct {\n\tname commandName\n\tdata interface{}\n}\n\nconst (\n\tcmd_ACK commandName = \"Acknowledged\"\n\tcmd_EMIT commandName = \"Emitted\"\n\tcmd_RECVUP commandName = \"Radio Packet Received\"\n\tcmd_RECVDWN commandName = \"Dowlink Datagram Received\"\n\tcmd_FWD commandName = \"Forwarded\"\n\tcmd_FLUSH commandName = \"Flush\"\n\tcmd_STATS commandName = \"Stats\"\n)\n\n\/\/ NewForwarder create a forwarder instance bound to a set of routers.\nfunc NewForwarder(id [8]byte, adapters ...io.ReadWriteCloser) (*Forwarder, error) {\n\tif len(adapters) == 0 {\n\t\treturn nil, fmt.Errorf(\"At least one adapter must be supplied\")\n\t}\n\n\tfwd := &Forwarder{\n\t\tId: id,\n\t\tdebug: false,\n\t\talti: 120,\n\t\tlati: 53.3702,\n\t\tlong: 4.8952,\n\t\tadapters: adapters,\n\t\tpackets: make([]semtech.Packet, 0),\n\t\tcommands: make(chan command),\n\t\tErrors: make(chan error, len(adapters)),\n\t}\n\n\tgo fwd.handleCommands()\n\n\t\/\/ Star listening to each adapter Read() method\n\tfor _, adapter := range fwd.adapters {\n\t\tgo fwd.listenAdapter(adapter)\n\t}\n\n\treturn fwd, nil\n}\n\n\/\/ listenAdapter listen to incoming datagrams from an adapter. Non-valid packets are ignored.\nfunc (fwd Forwarder) listenAdapter(adapter io.ReadWriteCloser) {\n\tacks := make(map[[3]byte]uint) \/\/ adapterIndex | packet.Identifier | packet.Token\n\tfor {\n\t\tbuf := make([]byte, 1024)\n\t\tif fwd.debug {\n\t\t\tfmt.Printf(\"Forwarder listens to downlink datagrams\\n\")\n\t\t}\n\t\tn, err := adapter.Read(buf)\n\t\tif err != nil {\n\t\t\tif fwd.debug {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\tfwd.Errors <- err\n\t\t\treturn \/\/ Error on reading, we assume the connection is closed \/ lost\n\t\t}\n\t\tif fwd.debug {\n\t\t\tfmt.Printf(\"Forwarder unmarshals datagram %x\\n\", buf[:n])\n\t\t}\n\t\tpacket, err := semtech.Unmarshal(buf[:n])\n\t\tif err != nil {\n\t\t\tif fwd.debug {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\ttoken := [3]byte{packet.Identifier, packet.Token[0], packet.Token[1]}\n\t\tswitch packet.Identifier {\n\t\tcase semtech.PUSH_ACK, semtech.PULL_ACK:\n\t\t\tif acks[token] > 0 {\n\t\t\t\tacks[token] -= 1\n\t\t\t\tfwd.commands <- command{cmd_ACK, nil}\n\t\t\t}\n\t\tcase semtech.PULL_RESP:\n\t\t\tfwd.commands <- command{cmd_RECVDWN, packet}\n\t\tdefault:\n\t\t\tif fwd.debug {\n\t\t\t\tfmt.Printf(\"Forwarder ignores contingent packet %+v\\n\", packet)\n\t\t\t}\n\t\t}\n\n\t}\n}\n\n\/\/ handleCommands acts as a mediator between all goroutines that attempt to modify the forwarder\n\/\/ attributes. All sensitive operations are done by commands send though an appropriate channel.\n\/\/ This method consume commands from the channel until it's closed.\nfunc (fwd *Forwarder) handleCommands() {\n\tfor cmd := range fwd.commands {\n\t\tif fwd.debug {\n\t\t\tfmt.Printf(\"Fowarder executes command: %v\\n\", cmd.name)\n\t\t}\n\n\t\tswitch cmd.name {\n\t\tcase cmd_ACK:\n\t\t\tfwd.ackn += 1\n\t\tcase cmd_FWD:\n\t\t\tfwd.rxfw += 1\n\t\tcase cmd_EMIT:\n\t\t\tfwd.upnb += 1\n\t\tcase cmd_RECVUP:\n\t\t\tfwd.rxnb += 1\n\t\tcase cmd_RECVDWN:\n\t\t\tfwd.dwnb += 1\n\t\t\tfwd.packets = append(fwd.packets, *cmd.data.(*semtech.Packet))\n\t\tcase cmd_FLUSH:\n\t\t\tcmd.data.(chan []semtech.Packet) <- fwd.packets\n\t\t\tfwd.packets = make([]semtech.Packet, 0)\n\t\tcase cmd_STATS:\n\t\t\tvar ackr float64\n\t\t\tif fwd.upnb != 0 {\n\t\t\t\tackr = float64(fwd.ackn) \/ float64(fwd.upnb)\n\t\t\t}\n\n\t\t\tcmd.data.(chan semtech.Stat) <- semtech.Stat{\n\t\t\t\tAckr: &ackr,\n\t\t\t\tAlti: pointer.Int(fwd.alti),\n\t\t\t\tDwnb: pointer.Uint(fwd.dwnb),\n\t\t\t\tLati: pointer.Float64(fwd.lati),\n\t\t\t\tLong: pointer.Float64(fwd.long),\n\t\t\t\tRxfw: pointer.Uint(fwd.rxfw),\n\t\t\t\tRxnb: pointer.Uint(fwd.rxnb),\n\t\t\t\tRxok: pointer.Uint(fwd.rxnb),\n\t\t\t\tTime: pointer.Time(time.Now()),\n\t\t\t\tTxnb: pointer.Uint(0),\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Forward dispatch a packet to all connected routers.\nfunc (fwd Forwarder) Forward(packet semtech.Packet) error {\n\tfwd.commands <- command{cmd_RECVUP, nil}\n\tif packet.Identifier != semtech.PUSH_DATA {\n\t\treturn fmt.Errorf(\"Unable to forward with identifier %x\", packet.Identifier)\n\t}\n\n\traw, err := semtech.Marshal(packet)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, adapter := range fwd.adapters {\n\t\tn, err := adapter.Write(raw)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif n < len(raw) {\n\t\t\treturn fmt.Errorf(\"Packet was too long\")\n\t\t}\n\t\tfwd.commands <- command{cmd_EMIT, nil}\n\t}\n\n\tfwd.commands <- command{cmd_FWD, nil}\n\treturn nil\n}\n\n\/\/ Flush spits out all downlink packet received by the forwarder since the last flush.\nfunc (fwd Forwarder) Flush() []semtech.Packet {\n\tchpkt := make(chan []semtech.Packet)\n\tfwd.commands <- command{cmd_FLUSH, chpkt}\n\treturn <-chpkt\n}\n\n\/\/ Stats computes and return the forwarder statistics since it was created\nfunc (fwd Forwarder) Stats() semtech.Stat {\n\tchstats := make(chan semtech.Stat)\n\tfwd.commands <- command{cmd_STATS, chstats}\n\treturn <-chstats\n}\n\n\/\/ Stop terminate the forwarder activity. Closing all routers connections\nfunc (fwd Forwarder) Stop() error {\n\tvar errors []error\n\n\t\/\/ Close the uplink adapters\n\tfor _, adapter := range fwd.adapters {\n\t\terr := adapter.Close()\n\t\tif err != nil {\n\t\t\terrors = append(errors, err)\n\t\t}\n\t}\n\n\tif len(errors) > 0 {\n\t\treturn fmt.Errorf(\"Unable to stop the forwarder: %+v\", errors)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ When using FileStore, we will have yamls files to store the data.\n\n\/\/ FIXME:\n\/\/ Since I am not planning to use this in production (I prefer etcd as store)\n\/\/ I would NOT be using any optimizations to cache the process file\n\/\/ contents (ie, result of yaml parser) Please give me a patch (one way to clear\n\/\/ the cache is by sending some signals to reread the file and update the cache.\n\/\/ Also we should make sure we reread only the specific files in question).\n\npackage rangestore\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n)\n\nvar _config = \"cluster.yaml\"\n\ntype FileStore struct {\n\tStorePath string\n\tMaxDepth int\n}\n\n\/\/ check whether the StorePath Exists, etc\nfunc ConnectFileStore(dir string, depth int) (f *FileStore, err error) {\n\t\/\/ removing trailing path seperator\n\tif os.IsPathSeparator(dir[len(dir)-1]) {\n\t\tdir = dir[:len(dir)-1]\n\t}\n\tvar fi os.FileInfo\n\t\/\/ check whether the dir exists\n\tfi, err = os.Stat(dir)\n\tif err != nil {\n\t\treturn nil, errors.New(fmt.Sprintf(\"Path [%s] is not a FileStore directory (ERROR: %s)\", dir, err))\n\t}\n\t\/\/ check whether it is a dir\n\tif !fi.IsDir() {\n\t\treturn nil, errors.New(fmt.Sprintf(\"Path [%s] is not a directory\", dir))\n\t}\n\tf = &FileStore{StorePath: dir, MaxDepth: depth}\n\treturn f, nil\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ LOOKUP CLUSTER \/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ LOGIC\n\/\/ -----\n\/\/ * for the first element in cluster create results array\n\/\/ * check whether the cluster is a leaf node\n\/\/ * if yes, call KeyLookup, with key == NODES\n\/\/ * if not, call listClusters\n\/\/ * if more elements are there, repeat the above\n\/\/ but do an ArraytoSet with the results array\nfunc (f *FileStore) ClusterLookup(cluster *[]string) (*[]string, error) {\n\t\/\/ store the resuls\n\tvar results = make([]string, 0)\n\t\/\/ for each cluster, do a lookup\n\t\/\/ (this will only happen only for nested lookups eg, %%..)\n\tfor _, elem := range *cluster {\n\t\tvar err error\n\t\tisLeaf, err := f.checkIsLeafNode(elem)\n\t\tif err != nil {\n\t\t\treturn &[]string{}, err\n\t\t}\n\t\t\/\/ if it is a leaf node, we need do a KeyLookup (NODES)\n\t\tif isLeaf {\n\t\t\t\/\/ by default, lookup for NODES\n\t\t\tresult, err := f.KeyLookup(&[]string{elem}, \"NODES\")\n\t\t\tif err != nil {\n\t\t\t\treturn &[]string{}, err\n\t\t\t}\n\t\t\tresults = append(results, *result...)\n\t\t} else { \/\/ we need to return the children\n\t\t\tresult, err := f.listClusters(elem)\n\t\t\tif err != nil {\n\t\t\t\treturn &[]string{}, err\n\t\t\t}\n\t\t\tresults = append(results, result...)\n\t\t}\n\n\t}\n\n\treturn &results, nil\n}\n\nfunc (f *FileStore) KeyLookup(cluster *[]string, key string) (*[]string, error) {\n\t\/\/ store the resuls\n\tvar results = make([]string, 0)\n\t\/\/ this will most likely be single element arrays\n\t\/\/ can't think of a reason otherwise\n\tfor _, elem := range *cluster {\n\t\t\/\/ 1. read the config\n\t\t\/\/ 2. do a key lookup\n\t\t\/\/ 3. append the result\n\t\tcontent, err := f.readClusterConfig(elem)\n\t\tif err != nil {\n\t\t\treturn &[]string{}, errors.New(fmt.Sprintf(\"KeyLookup for [%s] Failed (Error: %s)\", elem, err))\n\t\t}\n\t\tresult, err := yamlKeyLookup(content, key)\n\t\tif err != nil {\n\t\t\treturn &[]string{}, errors.New(fmt.Sprintf(\"KeyLookup for [%s] Failed (Error: %s)\", elem, err))\n\t\t}\n\t\tresults = append(results, *result...)\n\t}\n\n\treturn &results, nil\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ LOOKUP REVERSE \/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nfunc (f *FileStore) KeyReverseLookup(key string) (*[]string, error) {\n\treturn &[]string{}, errors.New(\"KeyReverseLookup Failed, returning empty\")\n}\n\nfunc (f *FileStore) KeyReverseLookupAttr(key string, attr string) (*[]string, error) {\n\treturn &[]string{}, errors.New(\"KeyReverseLookupAttr Failed, returning empty\")\n}\n\nfunc (f *FileStore) KeyReverseLookupHint(key string, attr string, hint string) (*[]string, error) {\n\treturn &[]string{}, errors.New(\"KeyReverseLookupHint Failed, returning empty\")\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Internal Functions \/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ given a cluster name, it will convert to cluster\n\/\/ in the file system\nfunc (f *FileStore) clusterToPath(cluster string) string {\n\treturn fmt.Sprintf(\"%s\/%s\", f.StorePath, strings.Replace(cluster, \"-\", \"\/\", -1))\n}\n\n\/\/ reads the child clusters of this cluster.\n\/\/ returns only those nodes for which this cluster is parent\nfunc (f *FileStore) listClusters(cluster string) ([]string, error) {\n\tvar dir = f.clusterToPath(cluster)\n\tvar children = make([]string, 0)\n\tfiles, err := ioutil.ReadDir(dir)\n\t\/\/ if there is an error, return err\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\tfor _, f := range files {\n\t\tif f.IsDir() {\n\t\t\tchildren = append(children, f.Name())\n\t\t}\n\t}\n\treturn children, nil\n}\n\n\/\/ Checks whether the cluster is in leaf or not\n\/\/ It will return error if the cluster doesn't exist,\n\/\/ false if not a leaf node, true otherwise\nfunc (f *FileStore) checkIsLeafNode(cluster string) (bool, error) {\n\tvar err error\n\tvar dir = f.clusterToPath(cluster)\n\tvar fi os.FileInfo\n\t\/\/ check whether it is a dir\n\tfi, err = os.Stat(dir)\n\tif err != nil {\n\t\treturn false, errors.New(fmt.Sprintf(\"cluser [%s] is NOT FOUND in FileStore [%s w.r.t %s] (ERROR: %s)\", cluster, dir, f.StorePath, err))\n\t}\n\tif !fi.IsDir() {\n\t\treturn false, errors.New(fmt.Sprintf(\"cluser [%s] is NOT A DIRECTORY in FileStore [%s w.r.t %s] (ERROR: %s)\", cluster, dir, f.StorePath, err))\n\t}\n\n\t\/\/ now check whether this dir has \"cluster.yaml\" as its direct child\n\t_, err = os.Stat(fmt.Sprintf(\"%s\/%s\", dir, _config))\n\t\/\/ if err is nil, it means file exists\n\tif err == nil {\n\t\treturn true, nil\n\t} else if os.IsNotExist(err) {\n\t\treturn false, nil\n\t} else {\n\t\treturn false, errors.New(fmt.Sprintf(\"cluser [%s] is NEITHER a LeafNode or a Cluster Dir in FileStore [%s w.r.t %s] (ERROR: %s)\", cluster, dir, f.StorePath, err))\n\t}\n\n\t\/\/ not a dir\n\treturn true, nil\n}\n\n\/\/ Given a cluster name, it will read the corresponding cluster config\n\/\/ and return the file content as a string\nfunc (f *FileStore) readClusterConfig(cluster string) (content []byte, err error) {\n\tvar dir = f.clusterToPath(cluster)\n\tcontent, err = ioutil.ReadFile(fmt.Sprintf(\"%s\/%s\", dir, _config))\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\treturn content, nil\n}\n<commit_msg>reverse lookup + merging yaml parsing<commit_after>\/\/ When using FileStore, we will have yamls files to store the data.\n\n\/\/ FIXME:\n\/\/ Since I am not planning to use this in production (I prefer etcd as store)\n\/\/ I would NOT be using any optimizations to cache the process file\n\/\/ contents (ie, result of yaml parser) Please give me a patch (one way to clear\n\/\/ the cache is by sending some signals to reread the file and update the cache.\n\/\/ Also we should make sure we reread only the specific files in question).\n\npackage rangestore\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"gopkg.in\/yaml.v2\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\nvar _config = \"cluster.yaml\"\n\ntype FileStore struct {\n\tStorePath string \/\/ directory where yamls are stored\n\tMaxDepth int \/\/ TODO: we could use this for reverse lookup to limit nested look down\n\tFastLookup bool \/\/ fast return, will return the first match\n}\n\n\/\/ check whether the StorePath Exists, etc\nfunc ConnectFileStore(dir string, depth int, fast bool) (f *FileStore, err error) {\n\t\/\/ removing trailing path seperator\n\tif os.IsPathSeparator(dir[len(dir)-1]) {\n\t\tdir = dir[:len(dir)-1]\n\t}\n\tvar fi os.FileInfo\n\t\/\/ check whether the dir exists\n\tfi, err = os.Stat(dir)\n\tif err != nil {\n\t\treturn nil, errors.New(fmt.Sprintf(\"Path [%s] is not a FileStore directory (ERROR: %s)\", dir, err))\n\t}\n\t\/\/ check whether it is a dir\n\tif !fi.IsDir() {\n\t\treturn nil, errors.New(fmt.Sprintf(\"Path [%s] is not a directory\", dir))\n\t}\n\tf = &FileStore{StorePath: dir, MaxDepth: depth, FastLookup: fast}\n\treturn f, nil\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ LOOKUP CLUSTER \/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ LOGIC\n\/\/ -----\n\/\/ * for the first element in cluster create results array\n\/\/ * check whether the cluster is a leaf node\n\/\/ * if yes, call KeyLookup, with key == NODES\n\/\/ * if not, call listClusters\n\/\/ * if more elements are there, repeat the above\n\/\/ but do an ArraytoSet with the results array\nfunc (f *FileStore) ClusterLookup(cluster *[]string) (*[]string, error) {\n\t\/\/ store the resuls\n\tvar results = make([]string, 0)\n\t\/\/ for each cluster, do a lookup\n\t\/\/ (this will only happen only for nested lookups eg, %%..)\n\tfor _, elem := range *cluster {\n\t\tvar err error\n\t\tisLeaf, err := f.checkIsLeafNode(elem)\n\t\tif err != nil {\n\t\t\treturn &[]string{}, err\n\t\t}\n\t\t\/\/ if it is a leaf node, we need do a KeyLookup (NODES)\n\t\tif isLeaf {\n\t\t\t\/\/ by default, lookup for NODES\n\t\t\tresult, err := f.KeyLookup(&[]string{elem}, \"NODES\")\n\t\t\tif err != nil {\n\t\t\t\treturn &[]string{}, err\n\t\t\t}\n\t\t\tresults = append(results, *result...)\n\t\t} else { \/\/ we need to return the children\n\t\t\tresult, err := f.listClusters(elem)\n\t\t\tif err != nil {\n\t\t\t\treturn &[]string{}, err\n\t\t\t}\n\t\t\tresults = append(results, result...)\n\t\t}\n\n\t}\n\n\treturn &results, nil\n}\n\nfunc (f *FileStore) KeyLookup(cluster *[]string, key string) (*[]string, error) {\n\t\/\/ store the resuls\n\tvar results = make([]string, 0)\n\t\/\/ this will most likely be single element arrays\n\t\/\/ can't think of a reason otherwise\n\tfor _, elem := range *cluster {\n\t\t\/\/ 1. read the config\n\t\t\/\/ 2. do a key lookup\n\t\t\/\/ 3. append the result\n\t\tcontent, err := f.readClusterConfig(elem)\n\t\tif err != nil {\n\t\t\treturn &[]string{}, errors.New(fmt.Sprintf(\"KeyLookup for [%s] Failed (Error: %s)\", elem, err))\n\t\t}\n\t\tresult, err := yamlKeyLookup(content, key)\n\t\tif err != nil {\n\t\t\treturn &[]string{}, errors.New(fmt.Sprintf(\"KeyLookup for [%s] Failed (Error: %s)\", elem, err))\n\t\t}\n\t\tresults = append(results, *result...)\n\t}\n\n\treturn &results, nil\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ LOOKUP REVERSE \/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ same as KeyReverseLookupAttr where attr == NODES\nfunc (f *FileStore) KeyReverseLookup(key string) (*[]string, error) {\n\treturn f.KeyReverseLookupAttr(key, \"NODES\")\n}\n\n\/\/ same as KeyReverseLookupAttr where attr == NODES and hint == \"\"\nfunc (f *FileStore) KeyReverseLookupAttr(key string, attr string) (*[]string, error) {\n\treturn f.KeyReverseLookupHint(key, attr, \"\")\n}\n\n\/\/ given a key, it will serach for the cluster where the attr has that key\n\/\/ hint is to limit the scope of search\nfunc (f *FileStore) KeyReverseLookupHint(key string, attr string, hint string) (*[]string, error) {\n\tvar clusters *[]string\n\tvar err error\n\tvar results = make([]string, 0)\n\tvar seen bool\n\n\tclusters, err = f.getAllLeafNodes(hint)\n\tif err != nil {\n\t\treturn &results, nil\n\t}\n\n\tfor _, elem := range *clusters {\n\t\t\/\/ get the cluster config\n\t\tcontent, err := f.readClusterConfig(elem)\n\t\tif err != nil {\n\t\t\treturn &[]string{}, errors.New(fmt.Sprintf(\"KeyLookup for [%s] Failed (Error: %s)\", elem, err))\n\t\t}\n\t\t\/\/ look whether the attr exists\n\t\tresult, err := yamlKeyLookup(content, attr)\n\t\tif err != nil {\n\t\t\tcontinue \/\/ looks like we didn't find the key\n\t\t} else {\n\t\t\tfor _, i := range *result {\n\t\t\t\tif i == key {\n\t\t\t\t\tresults = append(results, elem)\n\t\t\t\t\tseen = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif seen && f.FastLookup {\n\t\t\t\treturn &results, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &results, nil\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\/\/ Internal Functions \/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ given a cluster name, it will convert to cluster\n\/\/ in the file system\nfunc (f *FileStore) clusterToPath(cluster string) string {\n\treturn fmt.Sprintf(\"%s\/%s\", f.StorePath, strings.Replace(cluster, \"-\", \"\/\", -1))\n}\n\n\/\/ reads the child clusters of this cluster.\n\/\/ returns only those nodes for which this cluster is parent\nfunc (f *FileStore) listClusters(cluster string) ([]string, error) {\n\tvar dir = f.clusterToPath(cluster)\n\tvar children = make([]string, 0)\n\tfiles, err := ioutil.ReadDir(dir)\n\t\/\/ if there is an error, return err\n\tif err != nil {\n\t\treturn []string{}, err\n\t}\n\tfor _, f := range files {\n\t\tif f.IsDir() {\n\t\t\tchildren = append(children, f.Name())\n\t\t}\n\t}\n\treturn children, nil\n}\n\n\/\/ Checks whether the cluster is in leaf or not\n\/\/ It will return error if the cluster doesn't exist,\n\/\/ false if not a leaf node, true otherwise\nfunc (f *FileStore) checkIsLeafNode(cluster string) (bool, error) {\n\tvar err error\n\tvar dir = f.clusterToPath(cluster)\n\tvar fi os.FileInfo\n\t\/\/ check whether it is a dir\n\tfi, err = os.Stat(dir)\n\tif err != nil {\n\t\treturn false, errors.New(fmt.Sprintf(\"cluser [%s] is NOT FOUND in FileStore [%s w.r.t %s] (ERROR: %s)\", cluster, dir, f.StorePath, err))\n\t}\n\tif !fi.IsDir() {\n\t\treturn false, errors.New(fmt.Sprintf(\"cluser [%s] is NOT A DIRECTORY in FileStore [%s w.r.t %s] (ERROR: %s)\", cluster, dir, f.StorePath, err))\n\t}\n\n\t\/\/ now check whether this dir has \"cluster.yaml\" as its direct child\n\t_, err = os.Stat(fmt.Sprintf(\"%s\/%s\", dir, _config))\n\t\/\/ if err is nil, it means file exists\n\tif err == nil {\n\t\treturn true, nil\n\t} else if os.IsNotExist(err) {\n\t\treturn false, nil\n\t} else {\n\t\treturn false, errors.New(fmt.Sprintf(\"cluser [%s] is NEITHER a LeafNode or a Cluster Dir in FileStore [%s w.r.t %s] (ERROR: %s)\", cluster, dir, f.StorePath, err))\n\t}\n\n\t\/\/ not a dir\n\treturn true, nil\n}\n\n\/\/ Given a cluster name, it will read the corresponding cluster config\n\/\/ and return the file content as a string\nfunc (f *FileStore) readClusterConfig(cluster string) (content []byte, err error) {\n\tvar dir = f.clusterToPath(cluster)\n\tcontent, err = ioutil.ReadFile(fmt.Sprintf(\"%s\/%s\", dir, _config))\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\treturn content, nil\n}\n\n\/\/ Get all the leaf cluster nodes for a given dir\n\/\/ NOTE: This code is not efficient as \"path\/filepath\" Walk() function\n\/\/ is not efficient since it does the walk in lexical order\nfunc (f *FileStore) getAllLeafNodes(root string) (*[]string, error) {\n\tvar leafs = make([]string, 0)\n\tvar err error\n\t\/\/ if root is given, append to localize the lookup\n\troot = f.clusterToPath(root)\n\t\/\/ do a Clean to remove weirdness in path\n\ttrimPath := fmt.Sprintf(\"%s\/\", filepath.Clean(f.StorePath))\n\t\/\/ do the walk\n\terr = filepath.Walk(\n\t\troot,\n\t\t\/\/ append only the name matches _config\n\t\tfunc(path string, fi os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif fi.Name() == _config {\n\t\t\t\tleafs = append(leafs, strings.Replace(strings.TrimPrefix(filepath.Dir(path), trimPath), \"\/\", \"-\", -1))\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t)\n\n\tif err != nil {\n\t\treturn &[]string{}, errors.New(fmt.Sprintf(\"filepath.Walk Failed for ROOT dir [%s]\", root))\n\t}\n\n\treturn &leafs, nil\n}\n\n\/\/ We expect the YAML data to be in key value, where value is\n\/\/ an array. Incase value is not an array, we will still return\n\/\/ as an array\nfunc yamlKeyLookup(content []byte, key string) (*[]string, error) {\n\tvar u map[string]interface{}\n\tvar err error\n\terr = yaml.Unmarshal(content, &u)\n\t\/\/ if unmarshal fails, return early with error\n\tif err != nil {\n\t\treturn &[]string{}, err\n\t}\n\n\t\/\/ handle KEYS seperately\n\t\/\/ returns all the KEYS of a cluster\n\tif key == \"KEYS\" {\n\t\tvar results = make([]string, 0)\n\t\tfor k := range u {\n\t\t\tresults = append(results, k)\n\t\t}\n\t\treturn &results, nil\n\t}\n\n\t\/\/ check whether the map has the key we are looking for\n\tvalue, ok := u[key]\n\tif !ok {\n\t\treturn &[]string{}, errors.New(fmt.Sprintf(\"Cannot find Key [%s]\", key))\n\t}\n\n\t\/\/ try to return result pointer to an array of strings\n\tswitch value.(type) {\n\t\/\/ if it is an array\n\tcase []interface{}:\n\t\tvar results = make([]string, 0)\n\t\tfor _, elem := range value.([]interface{}) {\n\t\t\tswitch elem.(type) {\n\t\t\tcase string:\n\t\t\t\tresults = append(results, elem.(string))\n\t\t\tcase int:\n\t\t\t\tresults = append(results, fmt.Sprintf(\"%d\", elem.(int)))\n\t\t\tcase bool:\n\t\t\t\tresults = append(results, fmt.Sprintf(\"%t\", elem.(bool)))\n\t\t\t}\n\t\t}\n\t\treturn &results, nil\n\t\t\/\/ if not an array, make it an array\n\tcase string:\n\t\treturn &[]string{value.(string)}, nil\n\tcase int:\n\t\treturn &[]string{fmt.Sprintf(\"%d\", value.(int))}, nil\n\tcase bool:\n\t\treturn &[]string{fmt.Sprintf(\"%t\", value.(bool))}, nil\n\t}\n\n\treturn &[]string{}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\/exec\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/vmarmol\/vertigo\/let\/api\"\n)\n\ntype DockerTaskManager struct {\n\tclient *docker.Client\n\tlock sync.Mutex\n}\n\nvar endpoint = \"unix:\/\/\/var\/run\/docker.sock\"\n\ntype TaskManager interface {\n\tRunTask(runspec *api.RunSpec) (containerSpec *api.ContainerSpec, err error)\n}\n\nfunc NewDockerTaskManager() (TaskManager, error) {\n\tclient, err := docker.NewClient(endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &DockerTaskManager{\n\t\tclient: client,\n\t}, nil\n}\n\nfunc (self *DockerTaskManager) pull(image string) error {\n\tself.lock.Lock()\n\tdefer self.lock.Unlock()\n\tcmd := exec.Command(\"docker\", \"pull\", image)\n\terr := cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn cmd.Wait()\n}\n\nfunc randomUniqString() string {\n\tvar d [8]byte\n\tio.ReadFull(rand.Reader, d[:])\n\tstr := hex.EncodeToString(d[:])\n\treturn fmt.Sprintf(\"%x-%v\", time.Now().Unix(), str)\n}\n\nfunc (self *DockerTaskManager) RunTask(runspec *api.RunSpec) (containerSpec *api.ContainerSpec, err error) {\n\terr = self.pull(runspec.Image)\n\tif err != nil {\n\t\treturn\n\t}\n\n\texposedPorts := make(map[docker.Port]struct{}, len(runspec.Ports))\n\tportBindings := make(map[docker.Port][]docker.PortBinding, len(runspec.Ports))\n\tfor _, port := range runspec.Ports {\n\t\tif port.HostPort != port.ContainerPort {\n\t\t\terr = fmt.Errorf(\"host port != container port: %+v\", port)\n\t\t\treturn\n\t\t}\n\t\tdport := docker.Port(fmt.Sprintf(\"%v\/tcp\", port.ContainerPort))\n\t\texposedPorts[dport] = struct{}{}\n\t\tportBindings[dport] = []docker.PortBinding{\n\t\t\tdocker.PortBinding{\n\t\t\t\tHostPort: fmt.Sprintf(\"%v\", port.HostPort),\n\t\t\t},\n\t\t}\n\t}\n\n\tname := randomUniqString()\n\tenv := make([]string, 0, len(runspec.Env))\n\tfor _, e := range runspec.Env {\n\t\tenv = append(env, e.String())\n\t}\n\n\tcmd := make([]string, 0, 1+len(runspec.Args))\n\tcmd = append(cmd, runspec.Cmd)\n\tif len(runspec.Args) > 0 {\n\t\tcmd = append(cmd, runspec.Args...)\n\t}\n\n\topts := docker.CreateContainerOptions{\n\t\tName: name,\n\t\tConfig: &docker.Config{\n\t\t\tImage: runspec.Image,\n\t\t\tExposedPorts: exposedPorts,\n\t\t\tEnv: env,\n\t\t\tCmd: cmd,\n\t\t},\n\t}\n\tcontainer, err := self.client.CreateContainer(opts)\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = self.client.StartContainer(container.ID, &docker.HostConfig{\n\t\tPortBindings: portBindings,\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\tcontainerSpec = &api.ContainerSpec{\n\t\tId: container.ID,\n\t}\n\treturn\n}\n<commit_msg>tested<commit_after>package main\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\/exec\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/vmarmol\/vertigo\/let\/api\"\n)\n\ntype DockerTaskManager struct {\n\tclient *docker.Client\n\tlock sync.Mutex\n}\n\nvar endpoint = \"unix:\/\/\/var\/run\/docker.sock\"\n\ntype TaskManager interface {\n\tRunTask(runspec *api.RunSpec) (containerSpec *api.ContainerSpec, err error)\n}\n\nfunc NewDockerTaskManager() (TaskManager, error) {\n\tclient, err := docker.NewClient(endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &DockerTaskManager{\n\t\tclient: client,\n\t}, nil\n}\n\nfunc (self *DockerTaskManager) pull(image string) error {\n\tself.lock.Lock()\n\tdefer self.lock.Unlock()\n\tcmd := exec.Command(\"docker\", \"pull\", image)\n\terr := cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn cmd.Wait()\n}\n\nfunc randomUniqString() string {\n\tvar d [8]byte\n\tio.ReadFull(rand.Reader, d[:])\n\tstr := hex.EncodeToString(d[:])\n\treturn fmt.Sprintf(\"%x-%v\", time.Now().Unix(), str)\n}\n\nfunc (self *DockerTaskManager) RunTask(runspec *api.RunSpec) (containerSpec *api.ContainerSpec, err error) {\n\terr = self.pull(runspec.Image)\n\tif err != nil {\n\t\treturn\n\t}\n\tlog.Println(\"pulled image\")\n\n\texposedPorts := make(map[docker.Port]struct{}, len(runspec.Ports))\n\tportBindings := make(map[docker.Port][]docker.PortBinding, len(runspec.Ports))\n\tfor _, port := range runspec.Ports {\n\t\tif port.HostPort != port.ContainerPort {\n\t\t\terr = fmt.Errorf(\"host port != container port: %+v\", port)\n\t\t\treturn\n\t\t}\n\t\tdport := docker.Port(fmt.Sprintf(\"%v\/tcp\", port.ContainerPort))\n\t\texposedPorts[dport] = struct{}{}\n\t\tportBindings[dport] = []docker.PortBinding{\n\t\t\tdocker.PortBinding{\n\t\t\t\tHostPort: fmt.Sprintf(\"%v\", port.HostPort),\n\t\t\t},\n\t\t}\n\t}\n\n\tname := randomUniqString()\n\tenv := make([]string, 0, len(runspec.Env))\n\tfor _, e := range runspec.Env {\n\t\tenv = append(env, e.String())\n\t}\n\n\tcmd := make([]string, 0, 1+len(runspec.Args))\n\tcmd = append(cmd, runspec.Cmd)\n\tif len(runspec.Args) > 0 {\n\t\tcmd = append(cmd, runspec.Args...)\n\t}\n\n\topts := docker.CreateContainerOptions{\n\t\tName: name,\n\t\tConfig: &docker.Config{\n\t\t\tImage: runspec.Image,\n\t\t\tExposedPorts: exposedPorts,\n\t\t\tEnv: env,\n\t\t\tCmd: cmd,\n\t\t},\n\t}\n\tlog.Printf(\"creating container %+v\\n\", opts)\n\tcontainer, err := self.client.CreateContainer(opts)\n\tif err != nil {\n\t\treturn\n\t}\n\tlog.Printf(\"created container %+v\\n\", container)\n\n\terr = self.client.StartContainer(container.ID, &docker.HostConfig{\n\t\tPortBindings: portBindings,\n\t})\n\tif err != nil {\n\t\treturn\n\t}\n\tlog.Printf(\"started container\\n\")\n\tcontainerSpec = &api.ContainerSpec{\n\t\tId: container.ID,\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build darwin dragonfly freebsd linux netbsd openbsd solaris\n\npackage runtime\n\nimport \"runtime\/internal\/sys\"\n\nconst (\n\t_SIG_DFL uintptr = 0\n\t_SIG_IGN uintptr = 1\n)\n\n\/\/ Stores the signal handlers registered before Go installed its own.\n\/\/ These signal handlers will be invoked in cases where Go doesn't want to\n\/\/ handle a particular signal (e.g., signal occurred on a non-Go thread).\n\/\/ See sigfwdgo() for more information on when the signals are forwarded.\n\/\/\n\/\/ Signal forwarding is currently available only on Darwin and Linux.\nvar fwdSig [_NSIG]uintptr\n\n\/\/ sigmask represents a general signal mask compatible with the GOOS\n\/\/ specific sigset types: the signal numbered x is represented by bit x-1\n\/\/ to match the representation expected by sigprocmask.\ntype sigmask [(_NSIG + 31) \/ 32]uint32\n\n\/\/ channels for synchronizing signal mask updates with the signal mask\n\/\/ thread\nvar (\n\tdisableSigChan chan uint32\n\tenableSigChan chan uint32\n\tmaskUpdatedChan chan struct{}\n)\n\nfunc initsig() {\n\t\/\/ _NSIG is the number of signals on this operating system.\n\t\/\/ sigtable should describe what to do for all the possible signals.\n\tif len(sigtable) != _NSIG {\n\t\tprint(\"runtime: len(sigtable)=\", len(sigtable), \" _NSIG=\", _NSIG, \"\\n\")\n\t\tthrow(\"initsig\")\n\t}\n\n\t\/\/ First call: basic setup.\n\tfor i := int32(0); i < _NSIG; i++ {\n\t\tt := &sigtable[i]\n\t\tif t.flags == 0 || t.flags&_SigDefault != 0 {\n\t\t\tcontinue\n\t\t}\n\t\tfwdSig[i] = getsig(i)\n\t\t\/\/ For some signals, we respect an inherited SIG_IGN handler\n\t\t\/\/ rather than insist on installing our own default handler.\n\t\t\/\/ Even these signals can be fetched using the os\/signal package.\n\t\tswitch i {\n\t\tcase _SIGHUP, _SIGINT:\n\t\t\tif getsig(i) == _SIG_IGN {\n\t\t\t\tt.flags = _SigNotify | _SigIgnored\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif t.flags&_SigSetStack != 0 {\n\t\t\tsetsigstack(i)\n\t\t\tcontinue\n\t\t}\n\n\t\tt.flags |= _SigHandling\n\t\tsetsig(i, funcPC(sighandler), true)\n\t}\n}\n\nfunc sigenable(sig uint32) {\n\tif sig >= uint32(len(sigtable)) {\n\t\treturn\n\t}\n\n\tt := &sigtable[sig]\n\tif t.flags&_SigNotify != 0 {\n\t\tensureSigM()\n\t\tenableSigChan <- sig\n\t\t<-maskUpdatedChan\n\t\tif t.flags&_SigHandling == 0 {\n\t\t\tt.flags |= _SigHandling\n\t\t\tif getsig(int32(sig)) == _SIG_IGN {\n\t\t\t\tt.flags |= _SigIgnored\n\t\t\t}\n\t\t\tsetsig(int32(sig), funcPC(sighandler), true)\n\t\t}\n\t}\n}\n\nfunc sigdisable(sig uint32) {\n\tif sig >= uint32(len(sigtable)) {\n\t\treturn\n\t}\n\n\tt := &sigtable[sig]\n\tif t.flags&_SigNotify != 0 {\n\t\tensureSigM()\n\t\tdisableSigChan <- sig\n\t\t<-maskUpdatedChan\n\t\tif t.flags&_SigHandling != 0 {\n\t\t\tt.flags &^= _SigHandling\n\t\t\tif t.flags&_SigIgnored != 0 {\n\t\t\t\tsetsig(int32(sig), _SIG_IGN, true)\n\t\t\t} else {\n\t\t\t\tsetsig(int32(sig), _SIG_DFL, true)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc sigignore(sig uint32) {\n\tif sig >= uint32(len(sigtable)) {\n\t\treturn\n\t}\n\n\tt := &sigtable[sig]\n\tif t.flags&_SigNotify != 0 {\n\t\tt.flags &^= _SigHandling\n\t\tsetsig(int32(sig), _SIG_IGN, true)\n\t}\n}\n\nfunc resetcpuprofiler(hz int32) {\n\tvar it itimerval\n\tif hz == 0 {\n\t\tsetitimer(_ITIMER_PROF, &it, nil)\n\t} else {\n\t\tit.it_interval.tv_sec = 0\n\t\tit.it_interval.set_usec(1000000 \/ hz)\n\t\tit.it_value = it.it_interval\n\t\tsetitimer(_ITIMER_PROF, &it, nil)\n\t}\n\t_g_ := getg()\n\t_g_.m.profilehz = hz\n}\n\nfunc sigpipe() {\n\tsetsig(_SIGPIPE, _SIG_DFL, false)\n\traise(_SIGPIPE)\n}\n\n\/\/ raisebadsignal is called when a signal is received on a non-Go\n\/\/ thread, and the Go program does not want to handle it (that is, the\n\/\/ program has not called os\/signal.Notify for the signal).\nfunc raisebadsignal(sig int32) {\n\tif sig == _SIGPROF {\n\t\t\/\/ Ignore profiling signals that arrive on non-Go threads.\n\t\treturn\n\t}\n\n\tvar handler uintptr\n\tif sig >= _NSIG {\n\t\thandler = _SIG_DFL\n\t} else {\n\t\thandler = fwdSig[sig]\n\t}\n\n\t\/\/ Reset the signal handler and raise the signal.\n\t\/\/ We are currently running inside a signal handler, so the\n\t\/\/ signal is blocked. We need to unblock it before raising the\n\t\/\/ signal, or the signal we raise will be ignored until we return\n\t\/\/ from the signal handler. We know that the signal was unblocked\n\t\/\/ before entering the handler, or else we would not have received\n\t\/\/ it. That means that we don't have to worry about blocking it\n\t\/\/ again.\n\tunblocksig(sig)\n\tsetsig(sig, handler, false)\n\traise(sig)\n\n\t\/\/ If the signal didn't cause the program to exit, restore the\n\t\/\/ Go signal handler and carry on.\n\t\/\/\n\t\/\/ We may receive another instance of the signal before we\n\t\/\/ restore the Go handler, but that is not so bad: we know\n\t\/\/ that the Go program has been ignoring the signal.\n\tsetsig(sig, funcPC(sighandler), true)\n}\n\nfunc crash() {\n\tif GOOS == \"darwin\" {\n\t\t\/\/ OS X core dumps are linear dumps of the mapped memory,\n\t\t\/\/ from the first virtual byte to the last, with zeros in the gaps.\n\t\t\/\/ Because of the way we arrange the address space on 64-bit systems,\n\t\t\/\/ this means the OS X core file will be >128 GB and even on a zippy\n\t\t\/\/ workstation can take OS X well over an hour to write (uninterruptible).\n\t\t\/\/ Save users from making that mistake.\n\t\tif sys.PtrSize == 8 {\n\t\t\treturn\n\t\t}\n\t}\n\n\tupdatesigmask(sigmask{})\n\tsetsig(_SIGABRT, _SIG_DFL, false)\n\traise(_SIGABRT)\n}\n\n\/\/ ensureSigM starts one global, sleeping thread to make sure at least one thread\n\/\/ is available to catch signals enabled for os\/signal.\nfunc ensureSigM() {\n\tif maskUpdatedChan != nil {\n\t\treturn\n\t}\n\tmaskUpdatedChan = make(chan struct{})\n\tdisableSigChan = make(chan uint32)\n\tenableSigChan = make(chan uint32)\n\tgo func() {\n\t\t\/\/ Signal masks are per-thread, so make sure this goroutine stays on one\n\t\t\/\/ thread.\n\t\tLockOSThread()\n\t\tdefer UnlockOSThread()\n\t\t\/\/ The sigBlocked mask contains the signals not active for os\/signal,\n\t\t\/\/ initially all signals except the essential. When signal.Notify()\/Stop is called,\n\t\t\/\/ sigenable\/sigdisable in turn notify this thread to update its signal\n\t\t\/\/ mask accordingly.\n\t\tvar sigBlocked sigmask\n\t\tfor i := range sigBlocked {\n\t\t\tsigBlocked[i] = ^uint32(0)\n\t\t}\n\t\tfor i := range sigtable {\n\t\t\tif sigtable[i].flags&_SigUnblock != 0 {\n\t\t\t\tsigBlocked[(i-1)\/32] &^= 1 << ((uint32(i) - 1) & 31)\n\t\t\t}\n\t\t}\n\t\tupdatesigmask(sigBlocked)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase sig := <-enableSigChan:\n\t\t\t\tif b := sig - 1; b >= 0 {\n\t\t\t\t\tsigBlocked[b\/32] &^= (1 << (b & 31))\n\t\t\t\t}\n\t\t\tcase sig := <-disableSigChan:\n\t\t\t\tif b := sig - 1; b >= 0 {\n\t\t\t\t\tsigBlocked[b\/32] |= (1 << (b & 31))\n\t\t\t\t}\n\t\t\t}\n\t\t\tupdatesigmask(sigBlocked)\n\t\t\tmaskUpdatedChan <- struct{}{}\n\t\t}\n\t}()\n}\n<commit_msg>runtime: fix integer comparison in signal handling<commit_after>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build darwin dragonfly freebsd linux netbsd openbsd solaris\n\npackage runtime\n\nimport \"runtime\/internal\/sys\"\n\nconst (\n\t_SIG_DFL uintptr = 0\n\t_SIG_IGN uintptr = 1\n)\n\n\/\/ Stores the signal handlers registered before Go installed its own.\n\/\/ These signal handlers will be invoked in cases where Go doesn't want to\n\/\/ handle a particular signal (e.g., signal occurred on a non-Go thread).\n\/\/ See sigfwdgo() for more information on when the signals are forwarded.\n\/\/\n\/\/ Signal forwarding is currently available only on Darwin and Linux.\nvar fwdSig [_NSIG]uintptr\n\n\/\/ sigmask represents a general signal mask compatible with the GOOS\n\/\/ specific sigset types: the signal numbered x is represented by bit x-1\n\/\/ to match the representation expected by sigprocmask.\ntype sigmask [(_NSIG + 31) \/ 32]uint32\n\n\/\/ channels for synchronizing signal mask updates with the signal mask\n\/\/ thread\nvar (\n\tdisableSigChan chan uint32\n\tenableSigChan chan uint32\n\tmaskUpdatedChan chan struct{}\n)\n\nfunc initsig() {\n\t\/\/ _NSIG is the number of signals on this operating system.\n\t\/\/ sigtable should describe what to do for all the possible signals.\n\tif len(sigtable) != _NSIG {\n\t\tprint(\"runtime: len(sigtable)=\", len(sigtable), \" _NSIG=\", _NSIG, \"\\n\")\n\t\tthrow(\"initsig\")\n\t}\n\n\t\/\/ First call: basic setup.\n\tfor i := int32(0); i < _NSIG; i++ {\n\t\tt := &sigtable[i]\n\t\tif t.flags == 0 || t.flags&_SigDefault != 0 {\n\t\t\tcontinue\n\t\t}\n\t\tfwdSig[i] = getsig(i)\n\t\t\/\/ For some signals, we respect an inherited SIG_IGN handler\n\t\t\/\/ rather than insist on installing our own default handler.\n\t\t\/\/ Even these signals can be fetched using the os\/signal package.\n\t\tswitch i {\n\t\tcase _SIGHUP, _SIGINT:\n\t\t\tif getsig(i) == _SIG_IGN {\n\t\t\t\tt.flags = _SigNotify | _SigIgnored\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif t.flags&_SigSetStack != 0 {\n\t\t\tsetsigstack(i)\n\t\t\tcontinue\n\t\t}\n\n\t\tt.flags |= _SigHandling\n\t\tsetsig(i, funcPC(sighandler), true)\n\t}\n}\n\nfunc sigenable(sig uint32) {\n\tif sig >= uint32(len(sigtable)) {\n\t\treturn\n\t}\n\n\tt := &sigtable[sig]\n\tif t.flags&_SigNotify != 0 {\n\t\tensureSigM()\n\t\tenableSigChan <- sig\n\t\t<-maskUpdatedChan\n\t\tif t.flags&_SigHandling == 0 {\n\t\t\tt.flags |= _SigHandling\n\t\t\tif getsig(int32(sig)) == _SIG_IGN {\n\t\t\t\tt.flags |= _SigIgnored\n\t\t\t}\n\t\t\tsetsig(int32(sig), funcPC(sighandler), true)\n\t\t}\n\t}\n}\n\nfunc sigdisable(sig uint32) {\n\tif sig >= uint32(len(sigtable)) {\n\t\treturn\n\t}\n\n\tt := &sigtable[sig]\n\tif t.flags&_SigNotify != 0 {\n\t\tensureSigM()\n\t\tdisableSigChan <- sig\n\t\t<-maskUpdatedChan\n\t\tif t.flags&_SigHandling != 0 {\n\t\t\tt.flags &^= _SigHandling\n\t\t\tif t.flags&_SigIgnored != 0 {\n\t\t\t\tsetsig(int32(sig), _SIG_IGN, true)\n\t\t\t} else {\n\t\t\t\tsetsig(int32(sig), _SIG_DFL, true)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc sigignore(sig uint32) {\n\tif sig >= uint32(len(sigtable)) {\n\t\treturn\n\t}\n\n\tt := &sigtable[sig]\n\tif t.flags&_SigNotify != 0 {\n\t\tt.flags &^= _SigHandling\n\t\tsetsig(int32(sig), _SIG_IGN, true)\n\t}\n}\n\nfunc resetcpuprofiler(hz int32) {\n\tvar it itimerval\n\tif hz == 0 {\n\t\tsetitimer(_ITIMER_PROF, &it, nil)\n\t} else {\n\t\tit.it_interval.tv_sec = 0\n\t\tit.it_interval.set_usec(1000000 \/ hz)\n\t\tit.it_value = it.it_interval\n\t\tsetitimer(_ITIMER_PROF, &it, nil)\n\t}\n\t_g_ := getg()\n\t_g_.m.profilehz = hz\n}\n\nfunc sigpipe() {\n\tsetsig(_SIGPIPE, _SIG_DFL, false)\n\traise(_SIGPIPE)\n}\n\n\/\/ raisebadsignal is called when a signal is received on a non-Go\n\/\/ thread, and the Go program does not want to handle it (that is, the\n\/\/ program has not called os\/signal.Notify for the signal).\nfunc raisebadsignal(sig int32) {\n\tif sig == _SIGPROF {\n\t\t\/\/ Ignore profiling signals that arrive on non-Go threads.\n\t\treturn\n\t}\n\n\tvar handler uintptr\n\tif sig >= _NSIG {\n\t\thandler = _SIG_DFL\n\t} else {\n\t\thandler = fwdSig[sig]\n\t}\n\n\t\/\/ Reset the signal handler and raise the signal.\n\t\/\/ We are currently running inside a signal handler, so the\n\t\/\/ signal is blocked. We need to unblock it before raising the\n\t\/\/ signal, or the signal we raise will be ignored until we return\n\t\/\/ from the signal handler. We know that the signal was unblocked\n\t\/\/ before entering the handler, or else we would not have received\n\t\/\/ it. That means that we don't have to worry about blocking it\n\t\/\/ again.\n\tunblocksig(sig)\n\tsetsig(sig, handler, false)\n\traise(sig)\n\n\t\/\/ If the signal didn't cause the program to exit, restore the\n\t\/\/ Go signal handler and carry on.\n\t\/\/\n\t\/\/ We may receive another instance of the signal before we\n\t\/\/ restore the Go handler, but that is not so bad: we know\n\t\/\/ that the Go program has been ignoring the signal.\n\tsetsig(sig, funcPC(sighandler), true)\n}\n\nfunc crash() {\n\tif GOOS == \"darwin\" {\n\t\t\/\/ OS X core dumps are linear dumps of the mapped memory,\n\t\t\/\/ from the first virtual byte to the last, with zeros in the gaps.\n\t\t\/\/ Because of the way we arrange the address space on 64-bit systems,\n\t\t\/\/ this means the OS X core file will be >128 GB and even on a zippy\n\t\t\/\/ workstation can take OS X well over an hour to write (uninterruptible).\n\t\t\/\/ Save users from making that mistake.\n\t\tif sys.PtrSize == 8 {\n\t\t\treturn\n\t\t}\n\t}\n\n\tupdatesigmask(sigmask{})\n\tsetsig(_SIGABRT, _SIG_DFL, false)\n\traise(_SIGABRT)\n}\n\n\/\/ ensureSigM starts one global, sleeping thread to make sure at least one thread\n\/\/ is available to catch signals enabled for os\/signal.\nfunc ensureSigM() {\n\tif maskUpdatedChan != nil {\n\t\treturn\n\t}\n\tmaskUpdatedChan = make(chan struct{})\n\tdisableSigChan = make(chan uint32)\n\tenableSigChan = make(chan uint32)\n\tgo func() {\n\t\t\/\/ Signal masks are per-thread, so make sure this goroutine stays on one\n\t\t\/\/ thread.\n\t\tLockOSThread()\n\t\tdefer UnlockOSThread()\n\t\t\/\/ The sigBlocked mask contains the signals not active for os\/signal,\n\t\t\/\/ initially all signals except the essential. When signal.Notify()\/Stop is called,\n\t\t\/\/ sigenable\/sigdisable in turn notify this thread to update its signal\n\t\t\/\/ mask accordingly.\n\t\tvar sigBlocked sigmask\n\t\tfor i := range sigBlocked {\n\t\t\tsigBlocked[i] = ^uint32(0)\n\t\t}\n\t\tfor i := range sigtable {\n\t\t\tif sigtable[i].flags&_SigUnblock != 0 {\n\t\t\t\tsigBlocked[(i-1)\/32] &^= 1 << ((uint32(i) - 1) & 31)\n\t\t\t}\n\t\t}\n\t\tupdatesigmask(sigBlocked)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase sig := <-enableSigChan:\n\t\t\t\tif b := sig - 1; sig > 0 {\n\t\t\t\t\tsigBlocked[b\/32] &^= (1 << (b & 31))\n\t\t\t\t}\n\t\t\tcase sig := <-disableSigChan:\n\t\t\t\tif b := sig - 1; sig > 0 {\n\t\t\t\t\tsigBlocked[b\/32] |= (1 << (b & 31))\n\t\t\t\t}\n\t\t\t}\n\t\t\tupdatesigmask(sigBlocked)\n\t\t\tmaskUpdatedChan <- struct{}{}\n\t\t}\n\t}()\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n)\n\nfunc brainfuck(src string) []uint8 {\n\ttape := []uint8{0}\n\ttapeIndex := 0\n\n\tsrcLength := len(src)\n\n\tfor srcIndex := 0; srcIndex < srcLength; srcIndex++ {\n\n\t\tswitch src[srcIndex] {\n\t\tcase '>':\n\t\t\ttapeIndex += 1\n\t\t\tif len(tape) <= tapeIndex {\n\t\t\t\ttape = append(tape, 0)\n\t\t\t}\n\n\t\tcase '<':\n\t\t\tif tapeIndex > 0 {\n\t\t\t\ttapeIndex -= 1\n\t\t\t}\n\n\t\tcase '+':\n\t\t\ttape[tapeIndex] += 1\n\n\t\tcase '-':\n\t\t\ttape[tapeIndex] -= 1\n\n\t\tcase '.':\n\t\t\tfmt.Print(string(tape[tapeIndex]))\n\n\t\tcase ',':\n\t\t\tb := make([]byte, 1)\n\t\t\tos.Stdin.Read(b)\n\t\t\ttape[tapeIndex] = b[0]\n\n\t\tcase '[':\n\t\t\tif tape[tapeIndex] == 0 {\n\t\t\t\tdepth := 1\n\t\t\t\tfor depth > 0 {\n\t\t\t\t\tsrcIndex++\n\t\t\t\t\tsrcCharacter := src[srcIndex]\n\t\t\t\t\tif srcCharacter == '[' {\n\t\t\t\t\t\tdepth++\n\t\t\t\t\t} else if srcCharacter == ']' {\n\t\t\t\t\t\tdepth--\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase ']':\n\t\t\tdepth := 1\n\t\t\tfor depth > 0 {\n\t\t\t\tsrcIndex--\n\t\t\t\tsrcCharacter := src[srcIndex]\n\t\t\t\tif srcCharacter == '[' {\n\t\t\t\t\tdepth--\n\t\t\t\t} else if srcCharacter == ']' {\n\t\t\t\t\tdepth++\n\t\t\t\t}\n\t\t\t}\n\t\t\tsrcIndex--\n\t\t}\n\t}\n\n\treturn tape\n}\n\nfunc main() {\n\tcontent, err := ioutil.ReadFile(os.Args[1])\n\tif err != nil {\n\t\tfmt.Println(\"Error reading file\")\n\t\tfmt.Println(err)\n\t} else {\n\t\tvar src = string(content)\n\n\t\tbrainfuck(src)\n\t}\n}\n<commit_msg>declare looping 'depth' IN the for statement<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n)\n\nfunc brainfuck(src string) []uint8 {\n\ttape := []uint8{0}\n\ttapeIndex := 0\n\n\tsrcLength := len(src)\n\n\tfor srcIndex := 0; srcIndex < srcLength; srcIndex++ {\n\n\t\tswitch src[srcIndex] {\n\t\tcase '>':\n\t\t\ttapeIndex += 1\n\t\t\tif len(tape) <= tapeIndex {\n\t\t\t\ttape = append(tape, 0)\n\t\t\t}\n\n\t\tcase '<':\n\t\t\tif tapeIndex > 0 {\n\t\t\t\ttapeIndex -= 1\n\t\t\t}\n\n\t\tcase '+':\n\t\t\ttape[tapeIndex] += 1\n\n\t\tcase '-':\n\t\t\ttape[tapeIndex] -= 1\n\n\t\tcase '.':\n\t\t\tfmt.Print(string(tape[tapeIndex]))\n\n\t\tcase ',':\n\t\t\tb := make([]byte, 1)\n\t\t\tos.Stdin.Read(b)\n\t\t\ttape[tapeIndex] = b[0]\n\n\t\tcase '[':\n\t\t\tif tape[tapeIndex] == 0 {\n\t\t\t\tfor depth := 1; depth > 0; {\n\t\t\t\t\tsrcIndex++\n\t\t\t\t\tsrcCharacter := src[srcIndex]\n\t\t\t\t\tif srcCharacter == '[' {\n\t\t\t\t\t\tdepth++\n\t\t\t\t\t} else if srcCharacter == ']' {\n\t\t\t\t\t\tdepth--\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase ']':\n\t\t\tfor depth := 1; depth > 0; {\n\t\t\t\tsrcIndex--\n\t\t\t\tsrcCharacter := src[srcIndex]\n\t\t\t\tif srcCharacter == '[' {\n\t\t\t\t\tdepth--\n\t\t\t\t} else if srcCharacter == ']' {\n\t\t\t\t\tdepth++\n\t\t\t\t}\n\t\t\t}\n\t\t\tsrcIndex--\n\t\t}\n\t}\n\n\treturn tape\n}\n\nfunc main() {\n\tcontent, err := ioutil.ReadFile(os.Args[1])\n\tif err != nil {\n\t\tfmt.Println(\"Error reading file\")\n\t\tfmt.Println(err)\n\t} else {\n\t\tvar src = string(content)\n\n\t\tbrainfuck(src)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package config\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n)\n\ntype Broker struct {\n\tName string\n\tServiceGenericName string\n\tIP string\n\tPort int\n\tCertFile string\n\tKeyFile string\n\tAuthExchange string\n\tAuthAllExchange string\n\tWebProtocol string\n}\n\ntype Config struct {\n\tAws struct {\n\t\tKey string\n\t\tSecret string\n\t}\n\tBuildNumber int\n\tEnvironment string\n\tRegions struct {\n\t\tVagrant string\n\t\tSJ string\n\t\tAWS string\n\t\tPremium string\n\t}\n\tProjectRoot string\n\tUserSitesDomain string\n\tContainerSubnet string\n\tVmPool string\n\tVersion string\n\tClient struct {\n\t\tStaticFilesBaseUrl string\n\t\tRuntimeOptions RuntimeOptions\n\t}\n\tMongo string\n\tMongoKontrol string\n\tMongoMinWrites int\n\tMq struct {\n\t\tHost string\n\t\tPort int\n\t\tLogin string\n\t\tPassword string\n\t\tVhost string\n\t\tLogLevel string\n\t}\n\tNeo4j struct {\n\t\tRead string\n\t\tWrite string\n\t\tPort int\n\t\tEnabled bool\n\t}\n\tGoLogLevel string\n\tBroker Broker\n\tPremiumBroker Broker\n\tBrokerKite Broker\n\tPremiumBrokerKite Broker\n\tLoggr struct {\n\t\tPush bool\n\t\tUrl string\n\t\tApiKey string\n\t}\n\tLibrato struct {\n\t\tPush bool\n\t\tEmail string\n\t\tToken string\n\t\tInterval int\n\t}\n\tOpsview struct {\n\t\tPush bool\n\t\tHost string\n\t}\n\tElasticSearch struct {\n\t\tHost string\n\t\tPort int\n\t\tQueue string\n\t}\n\tNewKites struct {\n\t\tUseTLS bool\n\t\tCertFile string\n\t\tKeyFile string\n\t}\n\tNewKontrol struct {\n\t\tPort int\n\t\tUseTLS bool\n\t\tCertFile string\n\t\tKeyFile string\n\t\tPublicKeyFile string\n\t\tPrivateKeyFile string\n\t}\n\tProxyKite struct {\n\t\tDomain string\n\t\tCertFile string\n\t\tKeyFile string\n\t}\n\tEtcd []struct {\n\t\tHost string\n\t\tPort int\n\t}\n\tKontrold struct {\n\t\tVhost string\n\t\tOverview struct {\n\t\t\tApiPort int\n\t\t\tApiHost string\n\t\t\tPort int\n\t\t\tKodingHost string\n\t\t\tSocialHost string\n\t\t}\n\t\tApi struct {\n\t\t\tPort int\n\t\t\tURL string\n\t\t}\n\t\tProxy struct {\n\t\t\tPort int\n\t\t\tPortSSL int\n\t\t\tFTPIP string\n\t\t}\n\t}\n\tFollowFeed struct {\n\t\tHost string\n\t\tPort int\n\t\tComponentUser string\n\t\tPassword string\n\t\tVhost string\n\t}\n\tStatsd struct {\n\t\tUse bool\n\t\tIp string\n\t\tPort int\n\t}\n\tTopicModifier struct {\n\t\tCronSchedule string\n\t}\n\tSlack struct {\n\t\tToken string\n\t\tChannel string\n\t}\n\tGraphite struct {\n\t\tUse bool\n\t\tHost string\n\t\tPort int\n\t}\n\tLogLevel map[string]string\n\tRedis string\n\tSubscriptionEndpoint string\n\tGowebserver struct {\n\t\tPort int\n\t}\n\tRerouting struct {\n\t\tPort int\n\t}\n\tSocialApi struct {\n\t\tProxyUrl string\n\t\tCustomDomain struct {\n\t\t\tPublic string\n\t\t\tLocal string\n\t\t}\n\t}\n\tVmwatcher struct {\n\t\tPort string\n\t\tAwsKey string\n\t\tAwsSecret string\n\t\tKloudSecretKey string\n\t\tKloudAddr string\n\t}\n\tSegment string\n\tGatherIngestor struct {\n\t\tPort int\n\t}\n\tSendEventsToSegment bool `json:\"sendEventsToSegment\"`\n\tMailgun struct {\n\t\tDomain string\n\t\tPrivateKey string\n\t\tPublicKey string\n\t}\n}\n\ntype RuntimeOptions struct {\n\tKites struct {\n\t\tDisableWebSocketByDefault bool `json:\"disableWebSocketByDefault\"`\n\t\tStack struct {\n\t\t\tForce bool `json:\"force\"`\n\t\t\tNewKites bool `json:\"newKites\"`\n\t\t} `json:\"stack\"`\n\t\tKontrol struct {\n\t\t\tUsername string `json:\"username\"`\n\t\t} `json:\"kontrol\"`\n\t\tOs struct {\n\t\t\tVersion string `json:\"version\"`\n\t\t} `json:\"os\"`\n\t\tTerminal struct {\n\t\t\tVersion string `json:\"version\"`\n\t\t} `json:\"terminal\"`\n\t\tKlient struct {\n\t\t\tVersion string `json:\"version\"`\n\t\t} `json:\"klient\"`\n\t\tKloud struct {\n\t\t\tVersion string `json:\"version\"`\n\t\t} `json:\"kloud\"`\n\t} `json:\"kites\"`\n\tAlgolia struct {\n\t\tAppId string `json:\"appId\"`\n\t\tApiKey string `json:\"apiKey\"`\n\t\tIndexSuffix string `json:\"indexSuffix\"`\n\t} `json:\"algolia\"`\n\tLogToExternal bool `json:\"logToExternal\"`\n\tSuppressLogs bool `json:\"suppressLogs\"`\n\tLogToInternal bool `json:\"logToInternal\"`\n\tAuthExchange string `json:\"authExchange\"`\n\tEnvironment string `json:\"environment\"`\n\tVersion string `json:\"version\"`\n\tResourceName string `json:\"resourceName\"`\n\tUserSitesDomain string `json:\"userSitesDomain\"`\n\tLogResourceName string `json:\"logResourceName\"`\n\tSocialApiUri string `json:\"socialApiUri\"`\n\tApiUri string `json:\"apiUri\"`\n\tMainUri string `json:\"mainUri\"`\n\tSourceMapsUri string `json:\"sourceMapsUri\"`\n\tBroker struct {\n\t\tUri string `json:\"uri\"`\n\t} `json:\"broker\"`\n\tAppsUri string `json:\"appsUri\"`\n\tUploadsUri string `json:\"uploadsUri\"`\n\tUploadsUriForGroup string `json:\"uploadsUriForGroup\"`\n\tFileFetchTimeout int `json:\"fileFetchTimeout\"`\n\tUserIdleMs int `json:\"userIdleMs\"`\n\tEmbedly struct {\n\t\tApiKey string `json:\"apiKey\"`\n\t} `json:\"embedly\"`\n\tGithub struct {\n\t\tClientId string `json:\"clientId\"`\n\t} `json:\"github\"`\n\tNewkontrol struct {\n\t\tUrl string `json:\"url\"`\n\t} `json:\"newkontrol\"`\n\tSessionCookie struct {\n\t\tMaxAge int `json:\"maxAge\"`\n\t\tSecure bool `json:\"secure\"`\n\t} `json:\"sessionCookie\"`\n\tTroubleshoot struct {\n\t\tIdleTime int `json:\"idleTime\"`\n\t\tExternalUrl string `json:\"externalUrl\"`\n\t} `json:\"troubleshoot\"`\n\tStripe struct {\n\t\tToken string `json:\"token\"`\n\t} `json:\"stripe\"`\n\tExternalProfiles struct {\n\t\tGoogle struct {\n\t\t\tNicename string `json:\"nicename\"`\n\t\t} `json:\"google\"`\n\t\tLinkedin struct {\n\t\t\tNicename string `json:\"nicename\"`\n\t\t} `json:\"linkedin\"`\n\t\tTwitter struct {\n\t\t\tNicename string `json:\"nicename\"`\n\t\t} `json:\"twitter\"`\n\t\tOdesk struct {\n\t\t\tNicename string `json:\"nicename\"`\n\t\t\tUrlLocation string `json:\"urlLocation\"`\n\t\t} `json:\"odesk\"`\n\t\tFacebook struct {\n\t\t\tNicename string `json:\"nicename\"`\n\t\t\tUrlLocation string `json:\"urlLocation\"`\n\t\t} `json:\"facebook\"`\n\t\tGithub struct {\n\t\t\tNicename string `json:\"nicename\"`\n\t\t\tUrlLocation string `json:\"urlLocation\"`\n\t\t} `json:\"github\"`\n\t} `json:\"externalProfiles\"`\n\tEntryPoint struct {\n\t\tSlug string `json:\"slug\"`\n\t\tType string `json:\"type\"`\n\t} `json:\"entryPoint\"`\n\tRoles []string `json:\"roles\"`\n\tPermissions []interface{} `json:\"permissions\"`\n\tSiftScience string `json:\"siftScience\"`\n\tPaypal struct {\n\t\tFormUrl string `json:\"formUrl\"`\n\t} `json:\"paypal\"`\n\tPubnub struct {\n\t\tSubscribeKey string `json:\"subscribekey\"`\n\t\tEnabled bool `json:\"enabled\"`\n\t\tSSL bool `json:\"ssl\"`\n\t} `json:\"pubnub\"`\n\tCollaboration struct {\n\t\tTimeout int `json:\"timeout\"`\n\t} `json:\"collaboration\"`\n\tPaymentBlockDuration float64 `json:\"paymentBlockDuration\"`\n\tTokbox struct {\n\t\tApiKey string `json:\"apiKey\"`\n\t} `json:\"tokbox\"`\n\tDisabledFeatures struct {\n\t\tModeration bool `json:\"moderation\"`\n\t\tTeams bool `json:\"teams\"`\n\t\tBotChannel bool `json:\"botchannel\"`\n\t} `json:\"disabledFeatures\"`\n\tContentRotatorUrl string `json:\"contentRotatorUrl\"`\n\tIntegration struct {\n\t\tUrl string `json:\"url\"`\n\t} `json:\"integration\"`\n\tWebhookMiddleware struct {\n\t\tUrl string `json:\"url\"`\n\t} `json:\"webhookMiddleware\"`\n\tGoogle struct {\n\t\tApiKey string `json:\"apiKey\"`\n\t} `json:\"google\"`\n\tRecaptcha struct {\n\t\tKey string `json:\"key\"`\n\t\tEnabled bool `json:\"enabled\"`\n\t} `json:\"recaptcha\"`\n\tSendEventsToSegment bool `json:\"sendEventsToSegment\"`\n}\n\n\/\/ TODO: THIS IS ADDED SO ALL GO PACKAGES CLEANLY EXIT EVEN WHEN\n\/\/ RUN WITH RERUN\n\nfunc init() {\n\n\tgo func() {\n\t\tsignals := make(chan os.Signal, 1)\n\t\tsignal.Notify(signals)\n\t\tfor {\n\t\t\tsignal := <-signals\n\t\t\tswitch signal {\n\t\t\tcase syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGSTOP:\n\t\t\t\tos.Exit(0)\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc MustConfig(profile string) *Config {\n\tconf, err := readConfig(\"\", profile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn conf\n}\n\n\/\/ MustEnv is like Env, but panics if the Config cannot be read successfully.\nfunc MustEnv() *Config {\n\tconf, err := Env()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn conf\n}\n\n\/\/ Env reads from the KONFIG_JSON environment variable and intitializes the\n\/\/ Config struct\nfunc Env() (*Config, error) {\n\treturn readConfig(\"\", \"\")\n}\n\n\/\/ TODO: Fix this shit below where dir and profile is not even used ...\nfunc MustConfigDir(dir, profile string) *Config {\n\tconf, err := readConfig(dir, profile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn conf\n}\n\nfunc readConfig(configDir, profile string) (*Config, error) {\n\tjsonData := os.Getenv(\"KONFIG_JSON\")\n\tif jsonData == \"\" {\n\t\treturn nil, errors.New(\"KONFIG_JSON is not set\")\n\t}\n\n\tconf := new(Config)\n\terr := json.Unmarshal([]byte(jsonData), &conf)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Configuration error, make sure KONFIG_JSON is set: %s\\nConfiguration source output:\\n%s\\n\",\n\t\t\terr.Error(), string(jsonData))\n\t}\n\n\treturn conf, nil\n}\n<commit_msg>Config: update config schema for gowebserver<commit_after>package config\n\nimport (\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n)\n\ntype Broker struct {\n\tName string\n\tServiceGenericName string\n\tIP string\n\tPort int\n\tCertFile string\n\tKeyFile string\n\tAuthExchange string\n\tAuthAllExchange string\n\tWebProtocol string\n}\n\ntype Config struct {\n\tAws struct {\n\t\tKey string\n\t\tSecret string\n\t}\n\tBuildNumber int\n\tEnvironment string\n\tRegions struct {\n\t\tVagrant string\n\t\tSJ string\n\t\tAWS string\n\t\tPremium string\n\t}\n\tProjectRoot string\n\tUserSitesDomain string\n\tContainerSubnet string\n\tVmPool string\n\tVersion string\n\tClient struct {\n\t\tStaticFilesBaseUrl string\n\t\tRuntimeOptions RuntimeOptions\n\t}\n\tMongo string\n\tMongoKontrol string\n\tMongoMinWrites int\n\tMq struct {\n\t\tHost string\n\t\tPort int\n\t\tLogin string\n\t\tPassword string\n\t\tVhost string\n\t\tLogLevel string\n\t}\n\tNeo4j struct {\n\t\tRead string\n\t\tWrite string\n\t\tPort int\n\t\tEnabled bool\n\t}\n\tGoLogLevel string\n\tBroker Broker\n\tPremiumBroker Broker\n\tBrokerKite Broker\n\tPremiumBrokerKite Broker\n\tLoggr struct {\n\t\tPush bool\n\t\tUrl string\n\t\tApiKey string\n\t}\n\tLibrato struct {\n\t\tPush bool\n\t\tEmail string\n\t\tToken string\n\t\tInterval int\n\t}\n\tOpsview struct {\n\t\tPush bool\n\t\tHost string\n\t}\n\tElasticSearch struct {\n\t\tHost string\n\t\tPort int\n\t\tQueue string\n\t}\n\tNewKites struct {\n\t\tUseTLS bool\n\t\tCertFile string\n\t\tKeyFile string\n\t}\n\tNewKontrol struct {\n\t\tPort int\n\t\tUseTLS bool\n\t\tCertFile string\n\t\tKeyFile string\n\t\tPublicKeyFile string\n\t\tPrivateKeyFile string\n\t}\n\tProxyKite struct {\n\t\tDomain string\n\t\tCertFile string\n\t\tKeyFile string\n\t}\n\tEtcd []struct {\n\t\tHost string\n\t\tPort int\n\t}\n\tKontrold struct {\n\t\tVhost string\n\t\tOverview struct {\n\t\t\tApiPort int\n\t\t\tApiHost string\n\t\t\tPort int\n\t\t\tKodingHost string\n\t\t\tSocialHost string\n\t\t}\n\t\tApi struct {\n\t\t\tPort int\n\t\t\tURL string\n\t\t}\n\t\tProxy struct {\n\t\t\tPort int\n\t\t\tPortSSL int\n\t\t\tFTPIP string\n\t\t}\n\t}\n\tFollowFeed struct {\n\t\tHost string\n\t\tPort int\n\t\tComponentUser string\n\t\tPassword string\n\t\tVhost string\n\t}\n\tStatsd struct {\n\t\tUse bool\n\t\tIp string\n\t\tPort int\n\t}\n\tTopicModifier struct {\n\t\tCronSchedule string\n\t}\n\tSlack struct {\n\t\tToken string\n\t\tChannel string\n\t}\n\tGraphite struct {\n\t\tUse bool\n\t\tHost string\n\t\tPort int\n\t}\n\tLogLevel map[string]string\n\tRedis string\n\tSubscriptionEndpoint string\n\tGowebserver struct {\n\t\tPort int\n\t}\n\tRerouting struct {\n\t\tPort int\n\t}\n\tSocialApi struct {\n\t\tProxyUrl string\n\t\tCustomDomain struct {\n\t\t\tPublic string\n\t\t\tLocal string\n\t\t}\n\t}\n\tVmwatcher struct {\n\t\tPort string\n\t\tAwsKey string\n\t\tAwsSecret string\n\t\tKloudSecretKey string\n\t\tKloudAddr string\n\t}\n\tSegment string\n\tGatherIngestor struct {\n\t\tPort int\n\t}\n\tSendEventsToSegment bool `json:\"sendEventsToSegment\"`\n\tMailgun struct {\n\t\tDomain string\n\t\tPrivateKey string\n\t\tPublicKey string\n\t}\n}\n\ntype RuntimeOptions struct {\n\tKites struct {\n\t\tDisableWebSocketByDefault bool `json:\"disableWebSocketByDefault\"`\n\t\tStack struct {\n\t\t\tForce bool `json:\"force\"`\n\t\t\tNewKites bool `json:\"newKites\"`\n\t\t} `json:\"stack\"`\n\t\tKontrol struct {\n\t\t\tUsername string `json:\"username\"`\n\t\t} `json:\"kontrol\"`\n\t\tOs struct {\n\t\t\tVersion string `json:\"version\"`\n\t\t} `json:\"os\"`\n\t\tTerminal struct {\n\t\t\tVersion string `json:\"version\"`\n\t\t} `json:\"terminal\"`\n\t\tKlient struct {\n\t\t\tVersion string `json:\"version\"`\n\t\t} `json:\"klient\"`\n\t\tKloud struct {\n\t\t\tVersion string `json:\"version\"`\n\t\t} `json:\"kloud\"`\n\t} `json:\"kites\"`\n\tAlgolia struct {\n\t\tAppId string `json:\"appId\"`\n\t\tApiKey string `json:\"apiKey\"`\n\t\tIndexSuffix string `json:\"indexSuffix\"`\n\t} `json:\"algolia\"`\n\tLogToExternal bool `json:\"logToExternal\"`\n\tSuppressLogs bool `json:\"suppressLogs\"`\n\tLogToInternal bool `json:\"logToInternal\"`\n\tAuthExchange string `json:\"authExchange\"`\n\tEnvironment string `json:\"environment\"`\n\tVersion string `json:\"version\"`\n\tResourceName string `json:\"resourceName\"`\n\tUserSitesDomain string `json:\"userSitesDomain\"`\n\tLogResourceName string `json:\"logResourceName\"`\n\tSocialApiUri string `json:\"socialApiUri\"`\n\tApiUri string `json:\"apiUri\"`\n\tMainUri string `json:\"mainUri\"`\n\tSourceMapsUri string `json:\"sourceMapsUri\"`\n\tBroker struct {\n\t\tUri string `json:\"uri\"`\n\t} `json:\"broker\"`\n\tAppsUri string `json:\"appsUri\"`\n\tUploadsUri string `json:\"uploadsUri\"`\n\tUploadsUriForGroup string `json:\"uploadsUriForGroup\"`\n\tFileFetchTimeout int `json:\"fileFetchTimeout\"`\n\tUserIdleMs int `json:\"userIdleMs\"`\n\tEmbedly struct {\n\t\tApiKey string `json:\"apiKey\"`\n\t} `json:\"embedly\"`\n\tGithub struct {\n\t\tClientId string `json:\"clientId\"`\n\t} `json:\"github\"`\n\tNewkontrol struct {\n\t\tUrl string `json:\"url\"`\n\t} `json:\"newkontrol\"`\n\tSessionCookie struct {\n\t\tMaxAge int `json:\"maxAge\"`\n\t\tSecure bool `json:\"secure\"`\n\t} `json:\"sessionCookie\"`\n\tTroubleshoot struct {\n\t\tIdleTime int `json:\"idleTime\"`\n\t\tExternalUrl string `json:\"externalUrl\"`\n\t} `json:\"troubleshoot\"`\n\tStripe struct {\n\t\tToken string `json:\"token\"`\n\t} `json:\"stripe\"`\n\tExternalProfiles struct {\n\t\tGoogle struct {\n\t\t\tNicename string `json:\"nicename\"`\n\t\t} `json:\"google\"`\n\t\tLinkedin struct {\n\t\t\tNicename string `json:\"nicename\"`\n\t\t} `json:\"linkedin\"`\n\t\tTwitter struct {\n\t\t\tNicename string `json:\"nicename\"`\n\t\t} `json:\"twitter\"`\n\t\tOdesk struct {\n\t\t\tNicename string `json:\"nicename\"`\n\t\t\tUrlLocation string `json:\"urlLocation\"`\n\t\t} `json:\"odesk\"`\n\t\tFacebook struct {\n\t\t\tNicename string `json:\"nicename\"`\n\t\t\tUrlLocation string `json:\"urlLocation\"`\n\t\t} `json:\"facebook\"`\n\t\tGithub struct {\n\t\t\tNicename string `json:\"nicename\"`\n\t\t\tUrlLocation string `json:\"urlLocation\"`\n\t\t} `json:\"github\"`\n\t} `json:\"externalProfiles\"`\n\tEntryPoint struct {\n\t\tSlug string `json:\"slug\"`\n\t\tType string `json:\"type\"`\n\t} `json:\"entryPoint\"`\n\tRoles []string `json:\"roles\"`\n\tPermissions []interface{} `json:\"permissions\"`\n\tSiftScience string `json:\"siftScience\"`\n\tPaypal struct {\n\t\tFormUrl string `json:\"formUrl\"`\n\t} `json:\"paypal\"`\n\tPubnub struct {\n\t\tSubscribeKey string `json:\"subscribekey\"`\n\t\tEnabled bool `json:\"enabled\"`\n\t\tSSL bool `json:\"ssl\"`\n\t} `json:\"pubnub\"`\n\tCollaboration struct {\n\t\tTimeout int `json:\"timeout\"`\n\t} `json:\"collaboration\"`\n\tPaymentBlockDuration float64 `json:\"paymentBlockDuration\"`\n\tTokbox struct {\n\t\tApiKey string `json:\"apiKey\"`\n\t} `json:\"tokbox\"`\n\tDisabledFeatures struct {\n\t\tModeration bool `json:\"moderation\"`\n\t\tTeams bool `json:\"teams\"`\n\t\tBotChannel bool `json:\"botchannel\"`\n\t} `json:\"disabledFeatures\"`\n\tContentRotatorUrl string `json:\"contentRotatorUrl\"`\n\tIntegration struct {\n\t\tUrl string `json:\"url\"`\n\t} `json:\"integration\"`\n\tWebhookMiddleware struct {\n\t\tUrl string `json:\"url\"`\n\t} `json:\"webhookMiddleware\"`\n\tGoogle struct {\n\t\tApiKey string `json:\"apiKey\"`\n\t} `json:\"google\"`\n\tRecaptcha struct {\n\t\tKey string `json:\"key\"`\n\t\tEnabled bool `json:\"enabled\"`\n\t} `json:\"recaptcha\"`\n\tSendEventsToSegment bool `json:\"sendEventsToSegment\"`\n\tDomains struct {\n\t\tBase string `json:\"base\"`\n\t\tMail string `json:\"mail\"`\n\t\tMain string `json:\"main\"`\n\t\tPort string `json:\"port\"`\n\t} `json:\"domains\"`\n}\n\n\/\/ TODO: THIS IS ADDED SO ALL GO PACKAGES CLEANLY EXIT EVEN WHEN\n\/\/ RUN WITH RERUN\n\nfunc init() {\n\n\tgo func() {\n\t\tsignals := make(chan os.Signal, 1)\n\t\tsignal.Notify(signals)\n\t\tfor {\n\t\t\tsignal := <-signals\n\t\t\tswitch signal {\n\t\t\tcase syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGSTOP:\n\t\t\t\tos.Exit(0)\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc MustConfig(profile string) *Config {\n\tconf, err := readConfig(\"\", profile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn conf\n}\n\n\/\/ MustEnv is like Env, but panics if the Config cannot be read successfully.\nfunc MustEnv() *Config {\n\tconf, err := Env()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn conf\n}\n\n\/\/ Env reads from the KONFIG_JSON environment variable and intitializes the\n\/\/ Config struct\nfunc Env() (*Config, error) {\n\treturn readConfig(\"\", \"\")\n}\n\n\/\/ TODO: Fix this shit below where dir and profile is not even used ...\nfunc MustConfigDir(dir, profile string) *Config {\n\tconf, err := readConfig(dir, profile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn conf\n}\n\nfunc readConfig(configDir, profile string) (*Config, error) {\n\tjsonData := os.Getenv(\"KONFIG_JSON\")\n\tif jsonData == \"\" {\n\t\treturn nil, errors.New(\"KONFIG_JSON is not set\")\n\t}\n\n\tconf := new(Config)\n\terr := json.Unmarshal([]byte(jsonData), &conf)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Configuration error, make sure KONFIG_JSON is set: %s\\nConfiguration source output:\\n%s\\n\",\n\t\t\terr.Error(), string(jsonData))\n\t}\n\n\treturn conf, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package sequential\n\nimport (\n\t\"math\/rand\"\n)\n\nfunc (model *Model) init_board_strain() {\n\t\/\/ init on the metal\n\tmodel.Board_strain = make([][]int, model.Parameters.Board_Size)\n\tfor row_i := range model.Board_strain {\n\t\tmodel.Board_strain[row_i] = make([]int, model.Parameters.Board_Size)\n\t}\n\n\t\/\/ init in the model\n\tfor row_i := range model.Board_strain {\n\t\tfor col_i := range model.Board_strain[row_i] {\n\t\t\tif rand.Float64() < model.Parameters.R_Init_Odds {\n\t\t\t\tmodel.Board_strain[row_i][col_i] += 1\n\t\t\t}\n\t\t\tif rand.Float64() < model.Parameters.S_Init_Odds {\n\t\t\t\tmodel.Board_strain[row_i][col_i] += 2\n\t\t\t}\n\n\t\t}\n\t}\n}\n\nfunc (model *Model) init_board_signal_num() {\n\t\/\/ init on the metal\n\tmodel.Board_signal_num = make([][][]int, 2)\n\tfor strain_i := range model.Board_signal_num {\n\t\tmodel.Board_signal_num[strain_i] = make([][]int, model.Parameters.Board_Size)\n\t\tfor row_i := range model.Board_signal_num[strain_i] {\n\t\t\tmodel.Board_signal_num[strain_i][row_i] = make([]int, model.Parameters.Board_Size)\n\t\t}\n\t}\n\n\t\/\/ init in the model\n\tcenter_coord := Coordinate{}\n\tfor center_coord.r = 0; center_coord.r < model.Parameters.Board_Size; center_coord.r++ {\n\t\tfor center_coord.c = 0; center_coord.c < model.Parameters.Board_Size; center_coord.c++ {\n\t\t\trad_i := Coordinate{}\n\t\t\tfor rad_i.r = center_coord.r - model.Parameters.S_Radius; rad_i.r <= center_coord.r+model.Parameters.S_Radius; rad_i.r++ {\n\t\t\t\tfor rad_i.c = center_coord.c - model.Parameters.S_Radius; rad_i.c <= center_coord.c+model.Parameters.S_Radius; rad_i.c++ {\n\t\t\t\t\t\/\/ here we count the number of signals for each cell\n\n\t\t\t\t\t\/\/ get strain at rad_i\n\t\t\t\t\tstrain_at_rad := model.get_cell_strain(rad_i.get_toroid_coordinates(model.Parameters.Board_Size))\n\n\t\t\t\t\t\/\/ the allele of signal of the strain at rad_i\n\t\t\t\t\tsignal_strain_at_rad := s4strain[strain_at_rad]\n\n\t\t\t\t\t\/\/ add one signal at center_coord of the signal allele from rad_i\n\t\t\t\t\tmodel.Add_To_Cell_Signal_Num(center_coord, signal_strain_at_rad, 1)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (model *Model) init_board_prod() {\n\t\/\/ init on the metal\n\tmodel.Board_prod = make([][]bool, model.Parameters.Board_Size)\n\tfor i0 := range model.Board_prod {\n\t\tmodel.Board_prod[i0] = make([]bool, model.Parameters.Board_Size)\n\t}\n\n\t\/\/ init in the model\n\tcenter_coord := Coordinate{}\n\tfor center_coord.r = range model.Board_prod {\n\t\tfor center_coord.c = range model.Board_prod[center_coord.r] {\n\t\t\tstrain_at_center_coord := model.get_cell_strain(center_coord)\n\t\t\treceptor_allele_at_center_coord := r4strain[strain_at_center_coord]\n\t\t\tif model.get_cell_signal_num(center_coord, receptor_allele_at_center_coord) >= model.Parameters.Signal_Threshold {\n\t\t\t\tmodel.set_cell_prod(center_coord, 1 > 0)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (model *Model) init_board_pg_num() {\n\tmodel.Board_pg_num = make([][]int, model.Parameters.Board_Size)\n\tfor row_i := range model.Board_pg_num {\n\t\tmodel.Board_pg_num[row_i] = make([]int, model.Parameters.Board_Size)\n\t}\n\n\tcenter_coord := Coordinate{}\n\tfor center_coord.r = range model.Board_pg_num {\n\t\tfor center_coord.c = range model.Board_pg_num[center_coord.r] {\n\t\t\trad_i := Coordinate{}\n\t\t\tfor rad_i.r = center_coord.r - model.Parameters.PG_Radius; rad_i.r < center_coord.r+model.Parameters.PG_Radius+1; rad_i.r++ {\n\t\t\t\tfor rad_i.c = center_coord.c - model.Parameters.PG_Radius; rad_i.c < center_coord.c+model.Parameters.PG_Radius+1; rad_i.c++ {\n\t\t\t\t\trad_i_t := rad_i.get_toroid_coordinates(model.Parameters.Board_Size)\n\t\t\t\t\tif model.get_cell_prod(rad_i_t) {\n\t\t\t\t\t\tmodel.Add_To_Cell_PG_Num(center_coord, 1)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (model *Model) init_boards() {\n\tmodel.init_board_strain()\n\tmodel.init_board_signal_num()\n\tmodel.init_board_prod()\n\tmodel.init_board_pg_num()\n}\n\n\/\/ TODO this comment sucks. initialize all the data samples\nfunc (model *Model) init_data_samples() {\n\tmodel.init_data_samples_snapshots()\n\tmodel.init_data_samples_frequencies()\n\tmodel.init_data_samples_neighborhood_frequencies()\n}\n\n\/\/ Initialize the board snapshots samples\nfunc (model *Model) init_data_samples_snapshots() {\n\tif model.Settings.Snapshots_sample_num != 0 {\n\t\tif model.Parameters.Generations%model.Settings.Snapshots_sample_num == 0 {\n\t\t\t\/\/ the case in which the last snapshot is the same as the last generation\n\t\t\tmodel.Data_samples.Snapshots = make([]Snapshot, 1, model.Settings.Snapshots_sample_num)\n\t\t} else {\n\t\t\t\/\/ the case in which the last snapshot isn't the same as the last generation\n\t\t\tmodel.Data_samples.Snapshots = make([]Snapshot, 1, model.Settings.Snapshots_sample_num+1)\n\t\t}\n\t\tfor sample_i := range model.Data_samples.Snapshots {\n\t\t\tdata := make([][]int, model.Parameters.Board_Size)\n\t\t\tfor row := range data {\n\t\t\t\tdata[row] = make([]int, model.Parameters.Board_Size)\n\t\t\t}\n\t\t\tmodel.Data_samples.Snapshots[sample_i].Data = data\n\t\t}\n\t}\n}\n\n\/\/ Initialize the strain frequencies samples\nfunc (model *Model) init_data_samples_frequencies() {\n\tif model.Settings.Frequencies_sample_num != 0 {\n\t\tif model.Parameters.Generations%model.Settings.Frequencies_sample_num == 0 {\n\t\t\t\/\/ the case in which the last sample is the same as the last generation\n\t\t\tmodel.Data_samples.Frequencies = make([]Frequency, 1, model.Settings.Frequencies_sample_num)\n\t\t} else {\n\t\t\t\/\/ the case in which the last sample isn't the same as the last generation\n\t\t\tmodel.Data_samples.Frequencies = make([]Frequency, 1, model.Settings.Frequencies_sample_num+1)\n\t\t}\n\t\tfor sample_i := range model.Data_samples.Frequencies {\n\t\t\tmodel.Data_samples.Frequencies[sample_i].Data = make([]int, 8)\n\t\t}\n\t}\n}\n\n\/\/ Initialize the neighbors frequencies samples\nfunc (model *Model) init_data_samples_neighborhood_frequencies() {\n\tif model.Settings.Neighborhood_frequencies_sample_num != 0 {\n\t\tif model.Parameters.Generations%model.Settings.Neighborhood_frequencies_sample_num == 0 {\n\t\t\t\/\/ the case in which the last sample is the same as the last generation\n\t\t\tmodel.Data_samples.Neighbors_frequencies = make([]Neighbors_frequency, model.Settings.Neighborhood_frequencies_sample_num)\n\t\t} else {\n\t\t\t\/\/ the case in which the last sample isn't the same as the last generation\n\t\t\tmodel.Data_samples.Neighbors_frequencies = make([]Neighbors_frequency, model.Settings.Neighborhood_frequencies_sample_num+1)\n\t\t}\n\t\tfor sample_i := range model.Data_samples.Neighbors_frequencies {\n\t\t\tdata := make([][]int, 8)\n\t\t\t\/\/ for each strain we'll count how many strains are around it.\n\t\t\tfor strain_i := range data {\n\t\t\t\tdata[strain_i] = make([]int, 8)\n\t\t\t}\n\t\t\tmodel.Data_samples.Neighbors_frequencies[sample_i].Data = data\n\t\t}\n\t}\n\tmodel.Data_samples.Neighbors_frequencies = model.Data_samples.Neighbors_frequencies[0:1]\n}\n<commit_msg>Golangifying names and solving the 0 sample numbers bug.<commit_after>package sequential\n\nimport ()\n\n\/\/ We need only one init function for both models\nfunc (model *Model) InitBoardSignalNum() {\n\t\/\/ init on the metal\n\tmodel.BoardSignalNum = make([][][]int, 2)\n\tfor strain_i := range model.BoardSignalNum {\n\t\tmodel.BoardSignalNum[strain_i] = make([][]int, model.Parameters.BoardSize)\n\t\tfor row_i := range model.BoardSignalNum[strain_i] {\n\t\t\tmodel.BoardSignalNum[strain_i][row_i] = make([]int, model.Parameters.BoardSize)\n\t\t}\n\t}\n\n\t\/\/ init in the model\n\tcenter_coord := Coordinate{}\n\tfor center_coord.r = 0; center_coord.r < model.Parameters.BoardSize; center_coord.r++ {\n\t\tfor center_coord.c = 0; center_coord.c < model.Parameters.BoardSize; center_coord.c++ {\n\t\t\trad_coord := Coordinate{}\n\t\t\tfor rad_coord.r = center_coord.r - model.Parameters.SRadius; rad_coord.r <= center_coord.r+model.Parameters.SRadius; rad_coord.r++ {\n\t\t\t\tfor rad_coord.c = center_coord.c - model.Parameters.SRadius; rad_coord.c <= center_coord.c+model.Parameters.SRadius; rad_coord.c++ {\n\t\t\t\t\t\/\/ here we count the number of signals for each cell\n\n\t\t\t\t\t\/\/ get strain at rad_coord\n\t\t\t\t\tstrain_at_rad := model.CellStrain(rad_coord.ToroidCoordinates(model.Parameters.BoardSize))\n\n\t\t\t\t\t\/\/ the allele of signal of the strain at rad_coord\n\t\t\t\t\tsignal_strain_at_rad := s4strain[strain_at_rad]\n\n\t\t\t\t\t\/\/ add one signal at center_coord of the signal allele from rad_coord\n\t\t\t\t\tmodel.AddToCellSignalNum(center_coord, signal_strain_at_rad, 1)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ TODO this comment sucks. initialize all the data samples\nfunc (model *Model) initDataSamples() {\n\tmodel.initDataSamplesSnapshots()\n\tmodel.initDataSamplesFrequencies()\n\tmodel.initDataSamplesNeighborhoodFrequencies()\n}\n\n\/\/ Initialize the board snapshots samples\nfunc (model *Model) initDataSamplesSnapshots() {\n\tif model.Settings.SnapshotsSampleNum != 0 {\n\t\tif model.Parameters.Generations%model.Settings.SnapshotsSampleNum == 0 {\n\t\t\t\/\/ the case in which the last snapshot is the same as the last generation\n\t\t\tmodel.DataSamples.Snapshots = make([]Snapshot, model.Settings.SnapshotsSampleNum)\n\t\t} else {\n\t\t\t\/\/ the case in which the last snapshot isn't the same as the last generation\n\t\t\tmodel.DataSamples.Snapshots = make([]Snapshot, model.Settings.SnapshotsSampleNum+1)\n\t\t}\n\t\tfor sample_i := range model.DataSamples.Snapshots {\n\t\t\tmodel.DataSamples.Snapshots[sample_i].Data = make([][]int, model.Parameters.BoardSize)\n\t\t\tfor row := range model.DataSamples.Snapshots[sample_i].Data {\n\t\t\t\tmodel.DataSamples.Snapshots[sample_i].Data[row] = make([]int, model.Parameters.BoardSize)\n\t\t\t}\n\t\t\t\/\/fmt.Println(sample_i, model.DataSamples.Snapshots[sample_i])\n\t\t}\n\t\tmodel.DataSamples.Snapshots = model.DataSamples.Snapshots[0:1]\n\t}\n}\n\n\/\/ Initialize the strain frequencies samples\nfunc (model *Model) initDataSamplesFrequencies() {\n\tif model.Settings.FrequenciesSampleNum != 0 {\n\t\tif model.Parameters.Generations%model.Settings.FrequenciesSampleNum == 0 {\n\t\t\t\/\/ the case in which the last sample is the same as the last generation\n\t\t\tmodel.DataSamples.Frequencies = make([]Frequency, model.Settings.FrequenciesSampleNum)\n\t\t} else {\n\t\t\t\/\/ the case in which the last sample isn't the same as the last generation\n\t\t\tmodel.DataSamples.Frequencies = make([]Frequency, model.Settings.FrequenciesSampleNum+1)\n\t\t}\n\t\tfor sample_i := range model.DataSamples.Frequencies {\n\t\t\tmodel.DataSamples.Frequencies[sample_i].Data = make([]int, 8)\n\t\t}\n\t\tmodel.DataSamples.Frequencies = model.DataSamples.Frequencies[0:1]\n\t}\n}\n\n\/\/ Initialize the neighbors frequencies samples\nfunc (model *Model) initDataSamplesNeighborhoodFrequencies() {\n\tif model.Settings.NeighborhoodFrequenciesSampleNum != 0 {\n\t\tif model.Parameters.Generations%model.Settings.NeighborhoodFrequenciesSampleNum == 0 {\n\t\t\t\/\/ the case in which the last sample is the same as the last generation\n\t\t\tmodel.DataSamples.NeighborsFrequencies = make([]NeighborsFrequency, model.Settings.NeighborhoodFrequenciesSampleNum)\n\t\t} else {\n\t\t\t\/\/ the case in which the last sample isn't the same as the last generation\n\t\t\tmodel.DataSamples.NeighborsFrequencies = make([]NeighborsFrequency, model.Settings.NeighborhoodFrequenciesSampleNum+1)\n\t\t}\n\t\tfor sample_i := range model.DataSamples.NeighborsFrequencies {\n\t\t\tdata := make([][]int, 8)\n\t\t\t\/\/ for each strain we'll count how many strains are around it.\n\t\t\tfor strain_i := range data {\n\t\t\t\tdata[strain_i] = make([]int, 8)\n\t\t\t}\n\t\t\tmodel.DataSamples.NeighborsFrequencies[sample_i].Data = data\n\t\t}\n\t\tmodel.DataSamples.NeighborsFrequencies = model.DataSamples.NeighborsFrequencies[0:1]\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ ----------------------------------------------------------------------------\n\/\/\n\/\/ *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***\n\/\/\n\/\/ ----------------------------------------------------------------------------\n\/\/\n\/\/ This file is automatically generated by Magic Modules and manual\n\/\/ changes will be clobbered when the file is regenerated.\n\/\/\n\/\/ Please read more about how to change this file in\n\/\/ .github\/CONTRIBUTING.md.\n\/\/\n\/\/ ----------------------------------------------------------------------------\n\npackage google\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"regexp\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n)\n\nfunc defaultBinaryAuthorizationPolicy(project string) map[string]interface{} {\n\treturn map[string]interface{}{\n\t\t\"name\": fmt.Sprintf(\"projects\/%s\/policy\", project),\n\t\t\"admissionWhitelistPatterns\": []interface{}{\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"namePattern\": \"gcr.io\/google_containers\/*\",\n\t\t\t},\n\t\t},\n\t\t\"defaultAdmissionRule\": map[string]interface{}{\n\t\t\t\"evaluationMode\": \"ALWAYS_ALLOW\",\n\t\t\t\"enforcementMode\": \"ENFORCED_BLOCK_AND_AUDIT_LOG\",\n\t\t},\n\t}\n}\n\nfunc GetBinaryAuthorizationPolicyCaiObject(d TerraformResourceData, config *Config) (Asset, error) {\n\tname, err := assetName(d, config, \"\/\/binaryauthorization.googleapis.com\/projects\/{{project}}\/policy\")\n\tif err != nil {\n\t\treturn Asset{}, err\n\t}\n\tif obj, err := GetBinaryAuthorizationPolicyApiObject(d, config); err == nil {\n\t\treturn Asset{\n\t\t\tName: name,\n\t\t\tType: \"binaryauthorization.googleapis.com\/Policy\",\n\t\t\tResource: &AssetResource{\n\t\t\t\tVersion: \"v1\",\n\t\t\t\tDiscoveryDocumentURI: \"https:\/\/www.googleapis.com\/discovery\/v1\/apis\/binaryauthorization\/v1\/rest\",\n\t\t\t\tDiscoveryName: \"Policy\",\n\t\t\t\tData: obj,\n\t\t\t},\n\t\t}, nil\n\t} else {\n\t\treturn Asset{}, err\n\t}\n}\n\nfunc GetBinaryAuthorizationPolicyApiObject(d TerraformResourceData, config *Config) (map[string]interface{}, error) {\n\tobj := make(map[string]interface{})\n\tdescriptionProp, err := expandBinaryAuthorizationPolicyDescription(d.Get(\"description\"), d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if v, ok := d.GetOkExists(\"description\"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) {\n\t\tobj[\"description\"] = descriptionProp\n\t}\n\tadmissionWhitelistPatternsProp, err := expandBinaryAuthorizationPolicyAdmissionWhitelistPatterns(d.Get(\"admission_whitelist_patterns\"), d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if v, ok := d.GetOkExists(\"admission_whitelist_patterns\"); !isEmptyValue(reflect.ValueOf(admissionWhitelistPatternsProp)) && (ok || !reflect.DeepEqual(v, admissionWhitelistPatternsProp)) {\n\t\tobj[\"admissionWhitelistPatterns\"] = admissionWhitelistPatternsProp\n\t}\n\tclusterAdmissionRulesProp, err := expandBinaryAuthorizationPolicyClusterAdmissionRules(d.Get(\"cluster_admission_rules\"), d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if v, ok := d.GetOkExists(\"cluster_admission_rules\"); !isEmptyValue(reflect.ValueOf(clusterAdmissionRulesProp)) && (ok || !reflect.DeepEqual(v, clusterAdmissionRulesProp)) {\n\t\tobj[\"clusterAdmissionRules\"] = clusterAdmissionRulesProp\n\t}\n\tdefaultAdmissionRuleProp, err := expandBinaryAuthorizationPolicyDefaultAdmissionRule(d.Get(\"default_admission_rule\"), d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if v, ok := d.GetOkExists(\"default_admission_rule\"); !isEmptyValue(reflect.ValueOf(defaultAdmissionRuleProp)) && (ok || !reflect.DeepEqual(v, defaultAdmissionRuleProp)) {\n\t\tobj[\"defaultAdmissionRule\"] = defaultAdmissionRuleProp\n\t}\n\n\treturn obj, nil\n}\n\nfunc expandBinaryAuthorizationPolicyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\treturn v, nil\n}\n\nfunc expandBinaryAuthorizationPolicyAdmissionWhitelistPatterns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\tl := v.([]interface{})\n\treq := make([]interface{}, 0, len(l))\n\tfor _, raw := range l {\n\t\tif raw == nil {\n\t\t\tcontinue\n\t\t}\n\t\toriginal := raw.(map[string]interface{})\n\t\ttransformed := make(map[string]interface{})\n\n\t\ttransformedNamePattern, err := expandBinaryAuthorizationPolicyAdmissionWhitelistPatternsNamePattern(original[\"name_pattern\"], d, config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if val := reflect.ValueOf(transformedNamePattern); val.IsValid() && !isEmptyValue(val) {\n\t\t\ttransformed[\"namePattern\"] = transformedNamePattern\n\t\t}\n\n\t\treq = append(req, transformed)\n\t}\n\treturn req, nil\n}\n\nfunc expandBinaryAuthorizationPolicyAdmissionWhitelistPatternsNamePattern(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\treturn v, nil\n}\n\nfunc expandBinaryAuthorizationPolicyClusterAdmissionRules(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) {\n\tif v == nil {\n\t\treturn map[string]interface{}{}, nil\n\t}\n\tm := make(map[string]interface{})\n\tfor _, raw := range v.(*schema.Set).List() {\n\t\toriginal := raw.(map[string]interface{})\n\t\ttransformed := make(map[string]interface{})\n\n\t\ttransformedEvaluationMode, err := expandBinaryAuthorizationPolicyClusterAdmissionRulesEvaluationMode(original[\"evaluation_mode\"], d, config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttransformed[\"evaluationMode\"] = transformedEvaluationMode\n\t\ttransformedRequireAttestationsBy, err := expandBinaryAuthorizationPolicyClusterAdmissionRulesRequireAttestationsBy(original[\"require_attestations_by\"], d, config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttransformed[\"requireAttestationsBy\"] = transformedRequireAttestationsBy\n\t\ttransformedEnforcementMode, err := expandBinaryAuthorizationPolicyClusterAdmissionRulesEnforcementMode(original[\"enforcement_mode\"], d, config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttransformed[\"enforcementMode\"] = transformedEnforcementMode\n\n\t\tm[original[\"cluster\"].(string)] = transformed\n\t}\n\treturn m, nil\n}\n\nfunc expandBinaryAuthorizationPolicyClusterAdmissionRulesEvaluationMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\treturn v, nil\n}\n\nfunc expandBinaryAuthorizationPolicyClusterAdmissionRulesRequireAttestationsBy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\tr := regexp.MustCompile(\"projects\/(.+)\/attestors\/(.+)\")\n\n\t\/\/ It's possible that all entries in the list will specify a project, in\n\t\/\/ which case the user wouldn't necessarily have to specify a provider\n\t\/\/ project.\n\tvar project string\n\tvar err error\n\tfor _, s := range v.(*schema.Set).List() {\n\t\tif !r.MatchString(s.(string)) {\n\t\t\tproject, err = getProject(d, config)\n\t\t\tif err != nil {\n\t\t\t\treturn []interface{}{}, err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn convertAndMapStringArr(v.(*schema.Set).List(), func(s string) string {\n\t\tif r.MatchString(s) {\n\t\t\treturn s\n\t\t}\n\n\t\treturn fmt.Sprintf(\"projects\/%s\/attestors\/%s\", project, s)\n\t}), nil\n}\n\nfunc expandBinaryAuthorizationPolicyClusterAdmissionRulesEnforcementMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\treturn v, nil\n}\n\nfunc expandBinaryAuthorizationPolicyDefaultAdmissionRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\tl := v.([]interface{})\n\tif len(l) == 0 || l[0] == nil {\n\t\treturn nil, nil\n\t}\n\traw := l[0]\n\toriginal := raw.(map[string]interface{})\n\ttransformed := make(map[string]interface{})\n\n\ttransformedEvaluationMode, err := expandBinaryAuthorizationPolicyDefaultAdmissionRuleEvaluationMode(original[\"evaluation_mode\"], d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if val := reflect.ValueOf(transformedEvaluationMode); val.IsValid() && !isEmptyValue(val) {\n\t\ttransformed[\"evaluationMode\"] = transformedEvaluationMode\n\t}\n\n\ttransformedRequireAttestationsBy, err := expandBinaryAuthorizationPolicyDefaultAdmissionRuleRequireAttestationsBy(original[\"require_attestations_by\"], d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if val := reflect.ValueOf(transformedRequireAttestationsBy); val.IsValid() && !isEmptyValue(val) {\n\t\ttransformed[\"requireAttestationsBy\"] = transformedRequireAttestationsBy\n\t}\n\n\ttransformedEnforcementMode, err := expandBinaryAuthorizationPolicyDefaultAdmissionRuleEnforcementMode(original[\"enforcement_mode\"], d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if val := reflect.ValueOf(transformedEnforcementMode); val.IsValid() && !isEmptyValue(val) {\n\t\ttransformed[\"enforcementMode\"] = transformedEnforcementMode\n\t}\n\n\treturn transformed, nil\n}\n\nfunc expandBinaryAuthorizationPolicyDefaultAdmissionRuleEvaluationMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\treturn v, nil\n}\n\nfunc expandBinaryAuthorizationPolicyDefaultAdmissionRuleRequireAttestationsBy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\tr := regexp.MustCompile(\"projects\/(.+)\/attestors\/(.+)\")\n\n\t\/\/ It's possible that all entries in the list will specify a project, in\n\t\/\/ which case the user wouldn't necessarily have to specify a provider\n\t\/\/ project.\n\tvar project string\n\tvar err error\n\tfor _, s := range v.(*schema.Set).List() {\n\t\tif !r.MatchString(s.(string)) {\n\t\t\tproject, err = getProject(d, config)\n\t\t\tif err != nil {\n\t\t\t\treturn []interface{}{}, err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn convertAndMapStringArr(v.(*schema.Set).List(), func(s string) string {\n\t\tif r.MatchString(s) {\n\t\t\treturn s\n\t\t}\n\n\t\treturn fmt.Sprintf(\"projects\/%s\/attestors\/%s\", project, s)\n\t}), nil\n}\n\nfunc expandBinaryAuthorizationPolicyDefaultAdmissionRuleEnforcementMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\treturn v, nil\n}\n<commit_msg>Binary Authorization: globalPolicyEvaluationMode (#137)<commit_after>\/\/ ----------------------------------------------------------------------------\n\/\/\n\/\/ *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***\n\/\/\n\/\/ ----------------------------------------------------------------------------\n\/\/\n\/\/ This file is automatically generated by Magic Modules and manual\n\/\/ changes will be clobbered when the file is regenerated.\n\/\/\n\/\/ Please read more about how to change this file in\n\/\/ .github\/CONTRIBUTING.md.\n\/\/\n\/\/ ----------------------------------------------------------------------------\n\npackage google\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"regexp\"\n\n\t\"github.com\/hashicorp\/terraform\/helper\/schema\"\n)\n\nfunc defaultBinaryAuthorizationPolicy(project string) map[string]interface{} {\n\treturn map[string]interface{}{\n\t\t\"name\": fmt.Sprintf(\"projects\/%s\/policy\", project),\n\t\t\"admissionWhitelistPatterns\": []interface{}{\n\t\t\tmap[string]interface{}{\n\t\t\t\t\"namePattern\": \"gcr.io\/google_containers\/*\",\n\t\t\t},\n\t\t},\n\t\t\"defaultAdmissionRule\": map[string]interface{}{\n\t\t\t\"evaluationMode\": \"ALWAYS_ALLOW\",\n\t\t\t\"enforcementMode\": \"ENFORCED_BLOCK_AND_AUDIT_LOG\",\n\t\t},\n\t}\n}\n\nfunc GetBinaryAuthorizationPolicyCaiObject(d TerraformResourceData, config *Config) (Asset, error) {\n\tname, err := assetName(d, config, \"\/\/binaryauthorization.googleapis.com\/projects\/{{project}}\/policy\")\n\tif err != nil {\n\t\treturn Asset{}, err\n\t}\n\tif obj, err := GetBinaryAuthorizationPolicyApiObject(d, config); err == nil {\n\t\treturn Asset{\n\t\t\tName: name,\n\t\t\tType: \"binaryauthorization.googleapis.com\/Policy\",\n\t\t\tResource: &AssetResource{\n\t\t\t\tVersion: \"v1\",\n\t\t\t\tDiscoveryDocumentURI: \"https:\/\/www.googleapis.com\/discovery\/v1\/apis\/binaryauthorization\/v1\/rest\",\n\t\t\t\tDiscoveryName: \"Policy\",\n\t\t\t\tData: obj,\n\t\t\t},\n\t\t}, nil\n\t} else {\n\t\treturn Asset{}, err\n\t}\n}\n\nfunc GetBinaryAuthorizationPolicyApiObject(d TerraformResourceData, config *Config) (map[string]interface{}, error) {\n\tobj := make(map[string]interface{})\n\tdescriptionProp, err := expandBinaryAuthorizationPolicyDescription(d.Get(\"description\"), d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if v, ok := d.GetOkExists(\"description\"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) {\n\t\tobj[\"description\"] = descriptionProp\n\t}\n\tglobalPolicyEvaluationModeProp, err := expandBinaryAuthorizationPolicyGlobalPolicyEvaluationMode(d.Get(\"global_policy_evaluation_mode\"), d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if v, ok := d.GetOkExists(\"global_policy_evaluation_mode\"); !isEmptyValue(reflect.ValueOf(globalPolicyEvaluationModeProp)) && (ok || !reflect.DeepEqual(v, globalPolicyEvaluationModeProp)) {\n\t\tobj[\"globalPolicyEvaluationMode\"] = globalPolicyEvaluationModeProp\n\t}\n\tadmissionWhitelistPatternsProp, err := expandBinaryAuthorizationPolicyAdmissionWhitelistPatterns(d.Get(\"admission_whitelist_patterns\"), d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if v, ok := d.GetOkExists(\"admission_whitelist_patterns\"); !isEmptyValue(reflect.ValueOf(admissionWhitelistPatternsProp)) && (ok || !reflect.DeepEqual(v, admissionWhitelistPatternsProp)) {\n\t\tobj[\"admissionWhitelistPatterns\"] = admissionWhitelistPatternsProp\n\t}\n\tclusterAdmissionRulesProp, err := expandBinaryAuthorizationPolicyClusterAdmissionRules(d.Get(\"cluster_admission_rules\"), d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if v, ok := d.GetOkExists(\"cluster_admission_rules\"); !isEmptyValue(reflect.ValueOf(clusterAdmissionRulesProp)) && (ok || !reflect.DeepEqual(v, clusterAdmissionRulesProp)) {\n\t\tobj[\"clusterAdmissionRules\"] = clusterAdmissionRulesProp\n\t}\n\tdefaultAdmissionRuleProp, err := expandBinaryAuthorizationPolicyDefaultAdmissionRule(d.Get(\"default_admission_rule\"), d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if v, ok := d.GetOkExists(\"default_admission_rule\"); !isEmptyValue(reflect.ValueOf(defaultAdmissionRuleProp)) && (ok || !reflect.DeepEqual(v, defaultAdmissionRuleProp)) {\n\t\tobj[\"defaultAdmissionRule\"] = defaultAdmissionRuleProp\n\t}\n\n\treturn obj, nil\n}\n\nfunc expandBinaryAuthorizationPolicyDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\treturn v, nil\n}\n\nfunc expandBinaryAuthorizationPolicyGlobalPolicyEvaluationMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\treturn v, nil\n}\n\nfunc expandBinaryAuthorizationPolicyAdmissionWhitelistPatterns(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\tl := v.([]interface{})\n\treq := make([]interface{}, 0, len(l))\n\tfor _, raw := range l {\n\t\tif raw == nil {\n\t\t\tcontinue\n\t\t}\n\t\toriginal := raw.(map[string]interface{})\n\t\ttransformed := make(map[string]interface{})\n\n\t\ttransformedNamePattern, err := expandBinaryAuthorizationPolicyAdmissionWhitelistPatternsNamePattern(original[\"name_pattern\"], d, config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t} else if val := reflect.ValueOf(transformedNamePattern); val.IsValid() && !isEmptyValue(val) {\n\t\t\ttransformed[\"namePattern\"] = transformedNamePattern\n\t\t}\n\n\t\treq = append(req, transformed)\n\t}\n\treturn req, nil\n}\n\nfunc expandBinaryAuthorizationPolicyAdmissionWhitelistPatternsNamePattern(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\treturn v, nil\n}\n\nfunc expandBinaryAuthorizationPolicyClusterAdmissionRules(v interface{}, d TerraformResourceData, config *Config) (map[string]interface{}, error) {\n\tif v == nil {\n\t\treturn map[string]interface{}{}, nil\n\t}\n\tm := make(map[string]interface{})\n\tfor _, raw := range v.(*schema.Set).List() {\n\t\toriginal := raw.(map[string]interface{})\n\t\ttransformed := make(map[string]interface{})\n\n\t\ttransformedEvaluationMode, err := expandBinaryAuthorizationPolicyClusterAdmissionRulesEvaluationMode(original[\"evaluation_mode\"], d, config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttransformed[\"evaluationMode\"] = transformedEvaluationMode\n\t\ttransformedRequireAttestationsBy, err := expandBinaryAuthorizationPolicyClusterAdmissionRulesRequireAttestationsBy(original[\"require_attestations_by\"], d, config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttransformed[\"requireAttestationsBy\"] = transformedRequireAttestationsBy\n\t\ttransformedEnforcementMode, err := expandBinaryAuthorizationPolicyClusterAdmissionRulesEnforcementMode(original[\"enforcement_mode\"], d, config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttransformed[\"enforcementMode\"] = transformedEnforcementMode\n\n\t\tm[original[\"cluster\"].(string)] = transformed\n\t}\n\treturn m, nil\n}\n\nfunc expandBinaryAuthorizationPolicyClusterAdmissionRulesEvaluationMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\treturn v, nil\n}\n\nfunc expandBinaryAuthorizationPolicyClusterAdmissionRulesRequireAttestationsBy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\tr := regexp.MustCompile(\"projects\/(.+)\/attestors\/(.+)\")\n\n\t\/\/ It's possible that all entries in the list will specify a project, in\n\t\/\/ which case the user wouldn't necessarily have to specify a provider\n\t\/\/ project.\n\tvar project string\n\tvar err error\n\tfor _, s := range v.(*schema.Set).List() {\n\t\tif !r.MatchString(s.(string)) {\n\t\t\tproject, err = getProject(d, config)\n\t\t\tif err != nil {\n\t\t\t\treturn []interface{}{}, err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn convertAndMapStringArr(v.(*schema.Set).List(), func(s string) string {\n\t\tif r.MatchString(s) {\n\t\t\treturn s\n\t\t}\n\n\t\treturn fmt.Sprintf(\"projects\/%s\/attestors\/%s\", project, s)\n\t}), nil\n}\n\nfunc expandBinaryAuthorizationPolicyClusterAdmissionRulesEnforcementMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\treturn v, nil\n}\n\nfunc expandBinaryAuthorizationPolicyDefaultAdmissionRule(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\tl := v.([]interface{})\n\tif len(l) == 0 || l[0] == nil {\n\t\treturn nil, nil\n\t}\n\traw := l[0]\n\toriginal := raw.(map[string]interface{})\n\ttransformed := make(map[string]interface{})\n\n\ttransformedEvaluationMode, err := expandBinaryAuthorizationPolicyDefaultAdmissionRuleEvaluationMode(original[\"evaluation_mode\"], d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if val := reflect.ValueOf(transformedEvaluationMode); val.IsValid() && !isEmptyValue(val) {\n\t\ttransformed[\"evaluationMode\"] = transformedEvaluationMode\n\t}\n\n\ttransformedRequireAttestationsBy, err := expandBinaryAuthorizationPolicyDefaultAdmissionRuleRequireAttestationsBy(original[\"require_attestations_by\"], d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if val := reflect.ValueOf(transformedRequireAttestationsBy); val.IsValid() && !isEmptyValue(val) {\n\t\ttransformed[\"requireAttestationsBy\"] = transformedRequireAttestationsBy\n\t}\n\n\ttransformedEnforcementMode, err := expandBinaryAuthorizationPolicyDefaultAdmissionRuleEnforcementMode(original[\"enforcement_mode\"], d, config)\n\tif err != nil {\n\t\treturn nil, err\n\t} else if val := reflect.ValueOf(transformedEnforcementMode); val.IsValid() && !isEmptyValue(val) {\n\t\ttransformed[\"enforcementMode\"] = transformedEnforcementMode\n\t}\n\n\treturn transformed, nil\n}\n\nfunc expandBinaryAuthorizationPolicyDefaultAdmissionRuleEvaluationMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\treturn v, nil\n}\n\nfunc expandBinaryAuthorizationPolicyDefaultAdmissionRuleRequireAttestationsBy(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\tr := regexp.MustCompile(\"projects\/(.+)\/attestors\/(.+)\")\n\n\t\/\/ It's possible that all entries in the list will specify a project, in\n\t\/\/ which case the user wouldn't necessarily have to specify a provider\n\t\/\/ project.\n\tvar project string\n\tvar err error\n\tfor _, s := range v.(*schema.Set).List() {\n\t\tif !r.MatchString(s.(string)) {\n\t\t\tproject, err = getProject(d, config)\n\t\t\tif err != nil {\n\t\t\t\treturn []interface{}{}, err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn convertAndMapStringArr(v.(*schema.Set).List(), func(s string) string {\n\t\tif r.MatchString(s) {\n\t\t\treturn s\n\t\t}\n\n\t\treturn fmt.Sprintf(\"projects\/%s\/attestors\/%s\", project, s)\n\t}), nil\n}\n\nfunc expandBinaryAuthorizationPolicyDefaultAdmissionRuleEnforcementMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) {\n\treturn v, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 Peter H. Froehlich. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Database API for Go.\n\/\/\n\/\/ Terminology:\n\/\/\n\/\/ Database systems are pieces of software (usually outside of Go)\n\/\/ that allow storage and retrieval of data. We try not to imply\n\/\/ \"relational\" at the level of this API.\n\/\/\n\/\/ Database drivers are pieces of software (usually written in Go)\n\/\/ that allow Go programs to interact with database systems through\n\/\/ some query language. We try not to imply \"SQL\" at the level of\n\/\/ this API.\n\/\/\n\/\/ Goals:\n\/\/\n\/\/ The API described here is a set of conventions that should be\n\/\/ followed by database drivers. Obviously there are levels of\n\/\/ compliance, but every database driver should at least implement\n\/\/ the core of the API: the functions Version() and Open() as well\n\/\/ as the interfaces TODO Connection, Statement, and Cursor.\npackage db\n\nimport \"os\"\nimport \"strings\"\n\n\/\/ Database drivers must provide the Version() function to allow\n\/\/ careful clients to configure themselves appropriately for the\n\/\/ database system in question. There are a number of well-known\n\/\/ keys in the map returned by Version():\n\/\/\n\/\/\tKey\t\tDescription\n\/\/\n\/\/\tversion\t\tgeneric version (if client\/server doesn't apply)\n\/\/\tclient\t\tclient version\n\/\/\tserver\t\tserver version\n\/\/\tprotocol\tprotocol version\n\/\/\tdriver\t\tdatabase driver version\n\/\/\n\/\/ Database drivers decide which of these keys to return. For\n\/\/ example, the sqlite3 driver returns \"version\" and \"driver\";\n\/\/ the mysql driver should probably return all keys except\n\/\/ \"version\" instead.\n\/\/\n\/\/ Database drivers can also return additional keys provided\n\/\/ they prefix them with the package name of the driver. The\n\/\/ sqlite3 driver, for example, returns \"sqlite3.sourceid\" in\n\/\/ addition to \"version\" and \"driver\".\ntype VersionSignature func() (map[string]string, os.Error)\n\n\/\/ Database drivers must provide the Open() function to allow\n\/\/ clients to establish connections to a database system. The\n\/\/ parameter to Open() is a URL of the following form:\n\/\/\n\/\/\tdriver:\/\/username:password@host:port\/database?key=value;key=value\n\/\/\n\/\/ Most parts of this URL are optional. The sqlite3 database\n\/\/ driver for example interprets \"sqlite3:\/\/test.db\" as the\n\/\/ database \"test.db\" in the current directory. Actually, it\n\/\/ also interprets \"test.db\" by itself that way. If a driver\n\/\/ is specified in the URL, it has to match the driver whose\n\/\/ Open() function is called. For example the sqlite3 driver\n\/\/ will fail if asked to open \"mysql:\/\/somedb\". There can be\n\/\/ as many key\/value pairs as necessary to configure special\n\/\/ features of the particular database driver. Here are more\n\/\/ examples:\n\/\/\n\/\/\tc, e := mysql.Open(\"mysql:\/\/phf:wow@example.com:7311\/mydb\");\n\/\/\tc, e := sqlite3.Open(\"test.db?flags=0x00020001\");\n\/\/\n\/\/ Note that defaults for all optional components are specific\n\/\/ to the database driver in question and should be documented\n\/\/ there.\n\/\/\n\/\/ The Open() function is free to ignore components that it\n\/\/ has no use for. For example, the sqlite3 driver ignores\n\/\/ username, password, host, and port.\n\/\/\n\/\/ A successful call to Open() results in a connection to the\n\/\/ database system. Specific database drivers will return\n\/\/ connection objects conforming to one or more of the following\n\/\/ interfaces which represent different levels of functionality.\ntype OpenSignature func(url string) (conn Connection, err os.Error)\n\n\/\/ The most basic type of database connection.\n\/\/\n\/\/ The choice to separate Prepare() and Execute() is deliberate:\n\/\/ It leaves the database driver the most flexibilty for achieving\n\/\/ good performance without requiring additional caching schemes.\n\/\/\n\/\/ Prepare() accepts a query language string and returns\n\/\/ a precompiled statement that can be executed after any\n\/\/ remaining parameters have been bound. The format of\n\/\/ parameters in the query string is dependent on the\n\/\/ database driver in question.\n\/\/\n\/\/ Execute() accepts a precompiled statement, binds the\n\/\/ given parameters, and then executes the statement.\n\/\/ If the statement produces results, Execute() returns\n\/\/ a cursor; otherwise it returns nil. Specific database\n\/\/ driver will return cursor objects conforming to one\n\/\/ or more of the following interfaces which represent\n\/\/ different levels of functionality.\n\/\/\n\/\/ Iterate() is an experimental variant of Execute()\n\/\/ that returns a channel of Result objects instead\n\/\/ of a Cursor. XXX: Is this any good?\n\/\/\n\/\/ Close() ends the connection to the database system\n\/\/ and frees up all internal resources associated with\n\/\/ it. Note that you must close all Statement and Cursor\n\/\/ objects created through a connection before closing\n\/\/ the connection itself. After a connection has been\n\/\/ closed, no further operations are allowed on it.\ntype Connection interface {\n\tPrepare(query string) (Statement, os.Error);\n\tExecute(statement Statement, parameters ...) (Cursor, os.Error);\n\tIterate(statement Statement, parameters ...) (<-chan Result, os.Error);\n\tClose() os.Error;\n}\n\n\/\/ The most basic type of result.\n\/\/\n\/\/ Data() returns the data for this result as an array\n\/\/ of generic objects. The database driver in question\n\/\/ defines what concrete types are returned depending\n\/\/ on the types used by the database system.\n\/\/\n\/\/ Error() returns the error that occurred when this\n\/\/ result was fetched, or nil if no error occurred.\ntype Result interface {\n\tData() []interface{};\n\tError() os.Error;\n}\n\n\/\/ InformativeResults supply useful but optional information.\n\/\/\n\/\/ Fields() returns the names of each item of data in the\n\/\/ result.\n\/\/\n\/\/ Types() returns the names of the types of each item in\n\/\/ the result.\ntype InformativeResult interface {\n\tResult;\n\tFields() []string;\n\tTypes() []string;\n}\n\n\/\/ FancyResults provide an alternate way of processing results.\n\/\/\n\/\/ DataMap() returns a map from item names to item values. As\n\/\/ for Data() the concrete types have to be defined by the\n\/\/ database driver in question.\n\/\/\n\/\/ TypeMap() returns a map from item names to the names of the\n\/\/ types of each item.\ntype FancyResult interface {\n\tResult;\n\tDataMap() map[string]interface{};\n\tTypeMap() map[string]string;\n}\n\n\/\/ InformativeConnections supply useful but optional information.\n\/\/\n\/\/ Changes() returns the number of changes the last query made\n\/\/ to the database. Note that the database driver has to explain\n\/\/ what exactly constitutes a \"change\" for a given database system\n\/\/ and query.\ntype InformativeConnection interface {\n\tConnection;\n\tChanges() (int, os.Error);\n}\n\n\/\/ TransactionalConnections support transactions. Note that\n\/\/ the database driver in question may be in \"auto commit\"\n\/\/ mode by default. Once you call Begin(), \"auto commit\" will\n\/\/ be disabled for that connection until you either Commit()\n\/\/ or Rollback() successfully.\n\/\/\n\/\/ Begin() starts a transaction.\n\/\/\n\/\/ Commit() tries to push all changes made as part of the\n\/\/ current transaction to the database.\n\/\/\n\/\/ Rollback() tries to undo all changes made as part of the\n\/\/ current transaction.\ntype TransactionalConnection interface {\n\tConnection;\n\tBegin() os.Error;\n\tCommit() os.Error;\n\tRollback() os.Error;\n}\n\n\/\/ Statements are precompiled queries, possibly with remaining\n\/\/ parameter slots that need to be filled before execution.\n\/\/ TODO: include parameter binding API? or subsume in Execute()?\n\/\/ what about resetting the statement or clearing parameter\n\/\/ bindings?\ntype Statement interface {\n\tClose() os.Error;\n}\n\n\/\/ The most basic type of database cursor.\n\/\/ TODO: base on exp\/iterable instead? Iter() <-chan interface{};\n\/\/\n\/\/ MoreResults() returns true if there are more results\n\/\/ to be fetched.\n\/\/\n\/\/ FetchOne() returns the next result from the database.\n\/\/ Each result is returned as an array of generic objects.\n\/\/ The database driver in question has to define what\n\/\/ concrete types are returned depending on the types\n\/\/ used by the database system.\n\/\/\n\/\/ FetchMany() returns at most count results.\n\/\/ XXX: FetchMany() MAY GO AWAY SOON.\n\/\/\n\/\/ FetchAll() returns all (remaining) results.\n\/\/ XXX: FetchAll() MAY GO AWAY SOON.\n\/\/\n\/\/ Close() frees the cursor. After a cursor has been\n\/\/ closed, no further operations are allowed on it.\ntype Cursor interface {\n\tMoreResults() bool;\n\tFetchOne() ([]interface{}, os.Error);\n\tFetchMany(count int) ([][]interface{}, os.Error);\n\tFetchAll() ([][]interface{}, os.Error);\n\tClose() os.Error;\n}\n\n\/\/ InformativeCursors supply useful but optional information.\n\/\/\n\/\/ Description() returns a map from (the name of) a field to\n\/\/ (the name of) its type. The exact format of field and type\n\/\/ names is specified by the database driver in question.\n\/\/\n\/\/ Results() returns the number of results remaining to be\n\/\/ fetched.\ntype InformativeCursor interface {\n\tCursor;\n\tDescription() (map[string]string, os.Error);\n\tResults() int;\n}\n\n\/\/ PythonicCursors fetch results as maps from field names to\n\/\/ values instead of just slices of values.\n\/\/\n\/\/ TODO: find a better name for this!\n\/\/\n\/\/ FetchDict() is similar to FetchOne().\n\/\/ FetchDictMany() is similar to FetchMany().\n\/\/ FetchDictAll() is similar to FetchAll().\ntype PythonicCursor interface {\n\tCursor;\n\tFetchDict() (data map[string]interface{}, error os.Error);\n\tFetchManyDicts(count int) (data []map[string]interface{}, error os.Error);\n\tFetchAllDicts() (data []map[string]interface{}, error os.Error);\n}\n\n\/\/ ExecuteDirectly is a convenience function for \"one-off\" queries.\n\/\/ It's particularly convenient for queries that don't produce any\n\/\/ results.\n\/\/\n\/\/ If you need more control, for example to rebind parameters over\n\/\/ and over again, to get results one by one, or to access metadata\n\/\/ about the results, you should use the Prepare() and Execute()\n\/\/ methods explicitly instead.\nfunc ExecuteDirectly(conn Connection, query string, params ...) (results [][]interface{}, err os.Error) {\n\tvar s Statement;\n\ts, err = conn.Prepare(query);\n\tif err != nil || s == nil {\n\t\treturn\n\t}\n\tdefer s.Close();\n\n\tvar c Cursor;\n\tc, err = conn.Execute(s, params);\n\tif err != nil || c == nil {\n\t\treturn\n\t}\n\tdefer c.Close();\n\n\tresults, err = c.FetchAll();\n\treturn;\n}\n\n\/\/ ParseQueryURL() helps database drivers parse URLs passed to\n\/\/ Open(). It takes a string of the form\n\/\/\n\/\/\tkey=value{;key=value;...;key=value}]\n\/\/\n\/\/ and returns a map from keys to values. Empty strings yield\n\/\/ an empty map; malformed strings yield a nil map instead.\nfunc ParseQueryURL(str string) (opt map[string]string) {\n\topt = make(map[string]string);\n\tif len(str) == 0 {\n\t\treturn opt\n\t}\n\n\tpairs := strings.Split(str, \";\", 0);\n\tif len(pairs) == 0 {\n\t\treturn nil\n\t}\n\n\tfor _, p := range pairs {\n\t\tpieces := strings.Split(p, \"=\", 0);\n\t\tif len(pieces) == 2 {\n\t\t\topt[pieces[0]] = pieces[1]\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn;\n}\n<commit_msg>Got rid of PythonicCursor.<commit_after>\/\/ Copyright 2009 Peter H. Froehlich. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Database API for Go.\n\/\/\n\/\/ Terminology:\n\/\/\n\/\/ Database systems are pieces of software (usually outside of Go)\n\/\/ that allow storage and retrieval of data. We try not to imply\n\/\/ \"relational\" at the level of this API.\n\/\/\n\/\/ Database drivers are pieces of software (usually written in Go)\n\/\/ that allow Go programs to interact with database systems through\n\/\/ some query language. We try not to imply \"SQL\" at the level of\n\/\/ this API.\n\/\/\n\/\/ Goals:\n\/\/\n\/\/ The API described here is a set of conventions that should be\n\/\/ followed by database drivers. Obviously there are levels of\n\/\/ compliance, but every database driver should at least implement\n\/\/ the core of the API: the functions Version() and Open() as well\n\/\/ as the interfaces Connection, Statement, and Result.\npackage db\n\nimport \"os\"\nimport \"strings\"\n\n\/\/ Database drivers must provide the Version() function to allow\n\/\/ careful clients to configure themselves appropriately for the\n\/\/ database system in question. There are a number of well-known\n\/\/ keys in the map returned by Version():\n\/\/\n\/\/\tKey\t\tDescription\n\/\/\n\/\/\tversion\t\tgeneric version (if client\/server doesn't apply)\n\/\/\tclient\t\tclient version\n\/\/\tserver\t\tserver version\n\/\/\tprotocol\tprotocol version\n\/\/\tdriver\t\tdatabase driver version\n\/\/\n\/\/ Database drivers decide which of these keys to return. For\n\/\/ example, the sqlite3 driver returns \"version\" and \"driver\";\n\/\/ the mysql driver should probably return all keys except\n\/\/ \"version\" instead.\n\/\/\n\/\/ Database drivers can also return additional keys provided\n\/\/ they prefix them with the package name of the driver. The\n\/\/ sqlite3 driver, for example, returns \"sqlite3.sourceid\" in\n\/\/ addition to \"version\" and \"driver\".\ntype VersionSignature func() (map[string]string, os.Error)\n\n\/\/ Database drivers must provide the Open() function to allow\n\/\/ clients to establish connections to a database system. The\n\/\/ parameter to Open() is a URL of the following form:\n\/\/\n\/\/\tdriver:\/\/username:password@host:port\/database?key=value;key=value\n\/\/\n\/\/ Most parts of this URL are optional. The sqlite3 database\n\/\/ driver for example interprets \"sqlite3:\/\/test.db\" as the\n\/\/ database \"test.db\" in the current directory. Actually, it\n\/\/ also interprets \"test.db\" by itself that way. If a driver\n\/\/ is specified in the URL, it has to match the driver whose\n\/\/ Open() function is called. For example the sqlite3 driver\n\/\/ will fail if asked to open \"mysql:\/\/somedb\". There can be\n\/\/ as many key\/value pairs as necessary to configure special\n\/\/ features of the particular database driver. Here are more\n\/\/ examples:\n\/\/\n\/\/\tc, e := mysql.Open(\"mysql:\/\/phf:wow@example.com:7311\/mydb\");\n\/\/\tc, e := sqlite3.Open(\"test.db?flags=0x00020001\");\n\/\/\n\/\/ Note that defaults for all optional components are specific\n\/\/ to the database driver in question and should be documented\n\/\/ there.\n\/\/\n\/\/ The Open() function is free to ignore components that it\n\/\/ has no use for. For example, the sqlite3 driver ignores\n\/\/ username, password, host, and port.\n\/\/\n\/\/ A successful call to Open() results in a connection to the\n\/\/ database system. Specific database drivers will return\n\/\/ connection objects conforming to one or more of the following\n\/\/ interfaces which represent different levels of functionality.\ntype OpenSignature func(url string) (conn Connection, err os.Error)\n\n\/\/ The most basic type of database connection.\n\/\/\n\/\/ The choice to separate Prepare() and Execute() is deliberate:\n\/\/ It leaves the database driver the most flexibilty for achieving\n\/\/ good performance without requiring additional caching schemes.\n\/\/\n\/\/ Prepare() accepts a query language string and returns\n\/\/ a precompiled statement that can be executed after any\n\/\/ remaining parameters have been bound. The format of\n\/\/ parameters in the query string is dependent on the\n\/\/ database driver in question.\n\/\/\n\/\/ Execute() accepts a precompiled statement, binds the\n\/\/ given parameters, and then executes the statement.\n\/\/ If the statement produces results, Execute() returns\n\/\/ a cursor; otherwise it returns nil. Specific database\n\/\/ driver will return cursor objects conforming to one\n\/\/ or more of the following interfaces which represent\n\/\/ different levels of functionality.\n\/\/\n\/\/ Iterate() is an experimental variant of Execute()\n\/\/ that returns a channel of Result objects instead\n\/\/ of a Cursor. XXX: Is this any good?\n\/\/\n\/\/ Close() ends the connection to the database system\n\/\/ and frees up all internal resources associated with\n\/\/ it. Note that you must close all Statement and Cursor\n\/\/ objects created through a connection before closing\n\/\/ the connection itself. After a connection has been\n\/\/ closed, no further operations are allowed on it.\ntype Connection interface {\n\tPrepare(query string) (Statement, os.Error);\n\tExecute(statement Statement, parameters ...) (Cursor, os.Error);\n\tIterate(statement Statement, parameters ...) (<-chan Result, os.Error);\n\tClose() os.Error;\n}\n\n\/\/ The most basic type of result.\n\/\/\n\/\/ Data() returns the data for this result as an array\n\/\/ of generic objects. The database driver in question\n\/\/ defines what concrete types are returned depending\n\/\/ on the types used by the database system.\n\/\/\n\/\/ Error() returns the error that occurred when this\n\/\/ result was fetched, or nil if no error occurred.\ntype Result interface {\n\tData() []interface{};\n\tError() os.Error;\n}\n\n\/\/ InformativeResults supply useful but optional information.\n\/\/\n\/\/ Fields() returns the names of each item of data in the\n\/\/ result.\n\/\/\n\/\/ Types() returns the names of the types of each item in\n\/\/ the result.\ntype InformativeResult interface {\n\tResult;\n\tFields() []string;\n\tTypes() []string;\n}\n\n\/\/ FancyResults provide an alternate way of processing results.\n\/\/\n\/\/ DataMap() returns a map from item names to item values. As\n\/\/ for Data() the concrete types have to be defined by the\n\/\/ database driver in question.\n\/\/\n\/\/ TypeMap() returns a map from item names to the names of the\n\/\/ types of each item.\ntype FancyResult interface {\n\tResult;\n\tDataMap() map[string]interface{};\n\tTypeMap() map[string]string;\n}\n\n\/\/ InformativeConnections supply useful but optional information.\n\/\/\n\/\/ Changes() returns the number of changes the last query made\n\/\/ to the database. Note that the database driver has to explain\n\/\/ what exactly constitutes a \"change\" for a given database system\n\/\/ and query.\ntype InformativeConnection interface {\n\tConnection;\n\tChanges() (int, os.Error);\n}\n\n\/\/ TransactionalConnections support transactions. Note that\n\/\/ the database driver in question may be in \"auto commit\"\n\/\/ mode by default. Once you call Begin(), \"auto commit\" will\n\/\/ be disabled for that connection until you either Commit()\n\/\/ or Rollback() successfully.\n\/\/\n\/\/ Begin() starts a transaction.\n\/\/\n\/\/ Commit() tries to push all changes made as part of the\n\/\/ current transaction to the database.\n\/\/\n\/\/ Rollback() tries to undo all changes made as part of the\n\/\/ current transaction.\ntype TransactionalConnection interface {\n\tConnection;\n\tBegin() os.Error;\n\tCommit() os.Error;\n\tRollback() os.Error;\n}\n\n\/\/ Statements are precompiled queries, possibly with remaining\n\/\/ parameter slots that need to be filled before execution.\n\/\/ TODO: include parameter binding API? or subsume in Execute()?\n\/\/ what about resetting the statement or clearing parameter\n\/\/ bindings?\ntype Statement interface {\n\tClose() os.Error;\n}\n\n\/\/ The most basic type of database cursor.\n\/\/ TODO: base on exp\/iterable instead? Iter() <-chan interface{};\n\/\/\n\/\/ MoreResults() returns true if there are more results\n\/\/ to be fetched.\n\/\/\n\/\/ FetchOne() returns the next result from the database.\n\/\/ Each result is returned as an array of generic objects.\n\/\/ The database driver in question has to define what\n\/\/ concrete types are returned depending on the types\n\/\/ used by the database system.\n\/\/\n\/\/ FetchMany() returns at most count results.\n\/\/ XXX: FetchMany() MAY GO AWAY SOON.\n\/\/\n\/\/ FetchAll() returns all (remaining) results.\n\/\/ XXX: FetchAll() MAY GO AWAY SOON.\n\/\/\n\/\/ Close() frees the cursor. After a cursor has been\n\/\/ closed, no further operations are allowed on it.\ntype Cursor interface {\n\tMoreResults() bool;\n\tFetchOne() ([]interface{}, os.Error);\n\tFetchMany(count int) ([][]interface{}, os.Error);\n\tFetchAll() ([][]interface{}, os.Error);\n\tClose() os.Error;\n}\n\n\/\/ InformativeCursors supply useful but optional information.\n\/\/\n\/\/ Description() returns a map from (the name of) a field to\n\/\/ (the name of) its type. The exact format of field and type\n\/\/ names is specified by the database driver in question.\n\/\/\n\/\/ Results() returns the number of results remaining to be\n\/\/ fetched.\ntype InformativeCursor interface {\n\tCursor;\n\tDescription() (map[string]string, os.Error);\n\tResults() int;\n}\n\n\/\/ ExecuteDirectly is a convenience function for \"one-off\" queries.\n\/\/ It's particularly convenient for queries that don't produce any\n\/\/ results.\n\/\/\n\/\/ If you need more control, for example to rebind parameters over\n\/\/ and over again, to get results one by one, or to access metadata\n\/\/ about the results, you should use the Prepare() and Execute()\n\/\/ methods explicitly instead.\nfunc ExecuteDirectly(conn Connection, query string, params ...) (results [][]interface{}, err os.Error) {\n\tvar s Statement;\n\ts, err = conn.Prepare(query);\n\tif err != nil || s == nil {\n\t\treturn\n\t}\n\tdefer s.Close();\n\n\tvar c Cursor;\n\tc, err = conn.Execute(s, params);\n\tif err != nil || c == nil {\n\t\treturn\n\t}\n\tdefer c.Close();\n\n\tresults, err = c.FetchAll();\n\treturn;\n}\n\n\/\/ ParseQueryURL() helps database drivers parse URLs passed to\n\/\/ Open(). It takes a string of the form\n\/\/\n\/\/\tkey=value{;key=value;...;key=value}]\n\/\/\n\/\/ and returns a map from keys to values. Empty strings yield\n\/\/ an empty map; malformed strings yield a nil map instead.\nfunc ParseQueryURL(str string) (opt map[string]string) {\n\topt = make(map[string]string);\n\tif len(str) == 0 {\n\t\treturn opt\n\t}\n\n\tpairs := strings.Split(str, \";\", 0);\n\tif len(pairs) == 0 {\n\t\treturn nil\n\t}\n\n\tfor _, p := range pairs {\n\t\tpieces := strings.Split(p, \"=\", 0);\n\t\tif len(pieces) == 2 {\n\t\t\topt[pieces[0]] = pieces[1]\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn;\n}\n<|endoftext|>"} {"text":"<commit_before>package s3\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\t\"strconv\"\n\n\t\"github.com\/gobwas\/glob\"\n\t\"github.com\/gogo\/protobuf\/types\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pfs\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/errutil\"\n)\n\nconst defaultMaxKeys int = 1000\n\n\/\/ the raw XML returned for a request to get the location of a bucket\nconst locationSource = `<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<LocationConstraint xmlns=\"http:\/\/s3.amazonaws.com\/doc\/2006-03-01\/\">PACHYDERM<\/LocationConstraint>`\n\n\/\/ ListBucketResult is an XML-encodable listing of files\/objects in a\n\/\/ repo\/bucket\ntype ListBucketResult struct {\n\tContents []Contents `xml:\"Contents\"`\n\tCommonPrefixes []CommonPrefixes `xml:\"CommonPrefixes\"`\n\tDelimiter string `xml:\"Delimiter`\n\tIsTruncated bool `xml:\"IsTruncated\"`\n\tMarker string `xml:\"Marker\"`\n\tMaxKeys int `xml:\"MaxKeys\"`\n\tName string `xml:\"Name\"`\n\tNextMarker string `xml:\"NextMarker,omitempty\"`\n\tPrefix string `xml:\"Prefix\"`\n}\n\nfunc (r *ListBucketResult) isFull() bool {\n\treturn len(r.Contents)+len(r.CommonPrefixes) >= r.MaxKeys\n}\n\n\/\/ Contents is an individual file\/object\ntype Contents struct {\n\tKey string `xml:\"Key\"`\n\tLastModified time.Time `xml:\"LastModified\"`\n\tETag string `xml:\"ETag\"`\n\tSize uint64 `xml:\"Size\"`\n\tStorageClass string `xml:\"StorageClass\"`\n\tOwner User `xml:\"Owner\"`\n}\n\nfunc newContents(fileInfo *pfs.FileInfo, etag string) (Contents, error) {\n\tt, err := types.TimestampFromProto(fileInfo.Committed)\n\tif err != nil {\n\t\treturn Contents{}, err\n\t}\n\n\treturn Contents{\n\t\tKey: fileInfo.File.Path,\n\t\tLastModified: t,\n\t\tETag: etag,\n\t\tSize: fileInfo.SizeBytes,\n\t\tStorageClass: storageClass,\n\t\tOwner: defaultUser,\n\t}, nil\n}\n\n\/\/ CommonPrefixes is an individual PFS directory\ntype CommonPrefixes struct {\n\tPrefix string `xml:\"Prefix\"`\n\tOwner User `xml:\"Owner\"`\n}\n\nfunc newCommonPrefixes(dir string) CommonPrefixes {\n\treturn CommonPrefixes{\n\t\tPrefix: fmt.Sprintf(\"%s\/\", dir),\n\t\tOwner: defaultUser,\n\t}\n}\n\ntype bucketHandler struct {\n\tpc *client.APIClient\n}\n\nfunc newBucketHandler(pc *client.APIClient) bucketHandler {\n\treturn bucketHandler{pc: pc}\n}\n\nfunc (h bucketHandler) location(w http.ResponseWriter, r *http.Request) {\n\trepo, branch := bucketArgs(w, r)\n\t\n\t_, err := h.pc.InspectBranch(repo, branch)\n\tif err != nil {\n\t\tnotFoundError(w, r, err)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/xml\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(locationSource))\n}\n\nfunc (h bucketHandler) get(w http.ResponseWriter, r *http.Request) {\n\trepo, branch := bucketArgs(w, r)\n\n\t\/\/ ensure the branch exists and has a head\n\tbranchInfo, err := h.pc.InspectBranch(repo, branch)\n\tif err != nil {\n\t\tnotFoundError(w, r, err)\n\t\treturn\n\t}\n\n\tmaxKeys := defaultMaxKeys\n\tmaxKeysStr := r.FormValue(\"max-keys\")\n\tif maxKeysStr != \"\" {\n\t\ti, err := strconv.Atoi(maxKeysStr)\n\t\tif err != nil || i < 0 || i > defaultMaxKeys {\n\t\t\tinvalidArgument(w, r)\n\t\t\treturn\n\t\t}\n\t\tmaxKeys = i\t\n\t}\n\n\tdelimiter := r.FormValue(\"delimiter\")\n\tif delimiter != \"\" && delimiter != \"\/\" {\n\t\tinvalidDelimiterError(w, r)\n\t\treturn\n\t}\n\n\tresult := &ListBucketResult{\n\t\tName: repo,\n\t\tPrefix: r.FormValue(\"prefix\"),\n\t\tMarker: r.FormValue(\"marker\"),\n\t\tDelimiter: delimiter,\n\t\tMaxKeys: maxKeys,\n\t\tIsTruncated: false,\n\t}\n\n\tif branchInfo.Head == nil {\n\t\t\/\/ if there's no head commit, just print an empty list of files\n\t\twriteXML(w, http.StatusOK, result)\n\t} else if delimiter == \"\" {\n\t\th.listRecursive(w, r, result, branch)\n\t} else {\n\t\th.list(w, r, result, branch)\n\t}\n}\n\nfunc (h bucketHandler) listRecursive(w http.ResponseWriter, r *http.Request, result *ListBucketResult, branch string) {\n\terr := h.pc.Walk(result.Name, branch, filepath.Dir(result.Prefix), func(fileInfo *pfs.FileInfo) error {\n\t\torigFilePath := fileInfo.File.Path\n\t\tfileInfo = updateFileInfo(branch, result.Marker, result.Prefix, fileInfo)\n\t\tif fileInfo == nil || fileInfo.FileType == pfs.FileType_DIR {\n\t\t\treturn nil\n\t\t}\n\t\tif result.isFull() {\n\t\t\tif result.MaxKeys > 0 {\n\t\t\t\tresult.IsTruncated = true\n\t\t\t}\n\t\t\treturn errutil.ErrBreak\n\t\t}\n\n\t\tmeta, err := getMeta(h.pc, result.Name, branch, origFilePath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tetag := \"\"\n\t\tif meta != nil {\n\t\t\tetag = meta.MD5\n\t\t}\n\t\t\n\t\tcontents, err := newContents(fileInfo, etag)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresult.Contents = append(result.Contents, contents)\n\t\t\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tinternalError(w, r, err)\n\t\treturn\n\t}\n\n\tsetNextMarker(result)\n\twriteXML(w, http.StatusOK, result)\n}\n\nfunc (h bucketHandler) list(w http.ResponseWriter, r *http.Request, result *ListBucketResult, branch string) {\n\tpattern := fmt.Sprintf(\"%s*\", glob.QuoteMeta(result.Prefix))\n\tfileInfos, err := h.pc.GlobFile(result.Name, branch, pattern)\n\tif err != nil {\n\t\tinternalError(w, r, err)\n\t\treturn\n\t}\n\n\tfor _, fileInfo := range fileInfos {\n\t\torigFilePath := fileInfo.File.Path\n\t\tfileInfo = updateFileInfo(branch, result.Marker, result.Prefix, fileInfo)\n\t\tif fileInfo == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif result.isFull() {\n\t\t\tif result.MaxKeys > 0 {\n\t\t\t\tresult.IsTruncated = true\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tif fileInfo.FileType == pfs.FileType_FILE {\n\t\t\tmeta, err := getMeta(h.pc, result.Name, branch, origFilePath)\n\t\t\tif err != nil {\n\t\t\t\tinternalError(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tetag := \"\"\n\t\t\tif meta != nil {\n\t\t\t\tetag = meta.MD5\n\t\t\t}\n\n\t\t\tcontents, err := newContents(fileInfo, etag)\n\t\t\tif err != nil {\n\t\t\t\tinternalError(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tresult.Contents = append(result.Contents, contents)\n\t\t} else {\n\t\t\tresult.CommonPrefixes = append(result.CommonPrefixes, newCommonPrefixes(fileInfo.File.Path))\n\t\t}\n\t}\n\n\tsetNextMarker(result)\n\twriteXML(w, http.StatusOK, result)\n}\n\nfunc (h bucketHandler) put(w http.ResponseWriter, r *http.Request) {\n\trepo, branch := bucketArgs(w, r)\n\n\terr := h.pc.CreateRepo(repo)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"as it already exists\") {\n\t\t\t\/\/ Bucket already exists - this is not an error so long as the\n\t\t\t\/\/ branch being created is new. Verify if that is the case now,\n\t\t\t\/\/ since PFS' `CreateBranch` won't error out.\n\t\t\t_, err := h.pc.InspectBranch(repo, branch)\n\t\t\tif err != nil {\n\t\t\t\tif !branchNotFoundMatcher.MatchString(err.Error()) {\n\t\t\t\t\tinternalError(w, r, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tbucketAlreadyExistsError(w, r)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tinternalError(w, r, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\terr = h.pc.CreateBranch(repo, branch, \"\", nil)\n\tif err != nil {\n\t\tinternalError(w, r, err)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc (h bucketHandler) del(w http.ResponseWriter, r *http.Request) {\n\trepo, branch := bucketArgs(w, r)\n\n\t\/\/ `DeleteBranch` does not return an error if a non-existing branch is\n\t\/\/ deleting. So first, we verify that the branch exists so we can\n\t\/\/ otherwise return a 404.\n\t_, err := h.pc.InspectBranch(repo, branch)\n\tif err != nil {\n\t\tnotFoundError(w, r, err)\n\t\treturn\n\t}\n\n\terr = h.pc.DeleteBranch(repo, branch, false)\n\tif err != nil {\n\t\tinternalError(w, r, err)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusNoContent)\n}\n\n\/\/ updateFileInfo takes in a `FileInfo`, and updates it to be used in s3\n\/\/ object listings:\n\/\/ 1) if nil is returned, the `FileInfo` should not be included in the list\n\/\/ 2) the path is updated to remove the leading slash\nfunc updateFileInfo(branch, marker, prefix string, fileInfo *pfs.FileInfo) *pfs.FileInfo {\n\tif fileInfo.FileType == pfs.FileType_DIR {\n\t\tif fileInfo.File.Path == \"\/\" {\n\t\t\t\/\/ skip the root directory\n\t\t\treturn nil\n\t\t}\n\t} else if fileInfo.FileType == pfs.FileType_FILE {\n\t\tif strings.HasSuffix(fileInfo.File.Path, \".s3g.json\") {\n\t\t\t\/\/ skip metadata files\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\t\/\/ skip anything that isn't a file or dir\n\t\treturn nil\n\t}\n\tfileInfo.File.Path = fileInfo.File.Path[1:] \/\/ strip leading slash\n\tif !strings.HasPrefix(fileInfo.File.Path, prefix) {\n\t\treturn nil\n\t}\n\tif fileInfo.File.Path <= marker {\n\t\t\/\/ skip file paths below the marker\n\t\treturn nil\n\t}\n\n\treturn fileInfo\n}\n\nfunc setNextMarker(result *ListBucketResult) {\n\tif result.IsTruncated {\n\t\tif len(result.Contents) > 0 && len(result.CommonPrefixes) == 0 {\n\t\t\tresult.NextMarker = result.Contents[len(result.Contents)-1].Key\n\t\t} else if len(result.Contents) == 0 && len(result.CommonPrefixes) > 0 {\n\t\t\tresult.NextMarker = result.CommonPrefixes[len(result.CommonPrefixes)-1].Prefix\n\t\t} else if len(result.Contents) > 0 && len(result.CommonPrefixes) > 0 {\n\t\t\tlastContents := result.Contents[len(result.Contents)-1].Key\n\t\t\tlastCommonPrefixes := result.CommonPrefixes[len(result.CommonPrefixes)-1].Prefix\n\n\t\t\tif lastContents > lastCommonPrefixes {\n\t\t\t\tresult.NextMarker = lastContents\n\t\t\t} else {\n\t\t\t\tresult.NextMarker = lastCommonPrefixes\n\t\t\t}\n\t\t}\n\t}\n}<commit_msg>Omit empty delimiters<commit_after>package s3\n\nimport (\n\t\"fmt\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\t\"strconv\"\n\n\t\"github.com\/gobwas\/glob\"\n\t\"github.com\/gogo\/protobuf\/types\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/client\/pfs\"\n\t\"github.com\/pachyderm\/pachyderm\/src\/server\/pkg\/errutil\"\n)\n\nconst defaultMaxKeys int = 1000\n\n\/\/ the raw XML returned for a request to get the location of a bucket\nconst locationSource = `<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<LocationConstraint xmlns=\"http:\/\/s3.amazonaws.com\/doc\/2006-03-01\/\">PACHYDERM<\/LocationConstraint>`\n\n\/\/ ListBucketResult is an XML-encodable listing of files\/objects in a\n\/\/ repo\/bucket\ntype ListBucketResult struct {\n\tContents []Contents `xml:\"Contents\"`\n\tCommonPrefixes []CommonPrefixes `xml:\"CommonPrefixes\"`\n\tDelimiter string `xml:\"Delimiter,omitempty\"`\n\tIsTruncated bool `xml:\"IsTruncated\"`\n\tMarker string `xml:\"Marker\"`\n\tMaxKeys int `xml:\"MaxKeys\"`\n\tName string `xml:\"Name\"`\n\tNextMarker string `xml:\"NextMarker,omitempty\"`\n\tPrefix string `xml:\"Prefix\"`\n}\n\nfunc (r *ListBucketResult) isFull() bool {\n\treturn len(r.Contents)+len(r.CommonPrefixes) >= r.MaxKeys\n}\n\n\/\/ Contents is an individual file\/object\ntype Contents struct {\n\tKey string `xml:\"Key\"`\n\tLastModified time.Time `xml:\"LastModified\"`\n\tETag string `xml:\"ETag\"`\n\tSize uint64 `xml:\"Size\"`\n\tStorageClass string `xml:\"StorageClass\"`\n\tOwner User `xml:\"Owner\"`\n}\n\nfunc newContents(fileInfo *pfs.FileInfo, etag string) (Contents, error) {\n\tt, err := types.TimestampFromProto(fileInfo.Committed)\n\tif err != nil {\n\t\treturn Contents{}, err\n\t}\n\n\treturn Contents{\n\t\tKey: fileInfo.File.Path,\n\t\tLastModified: t,\n\t\tETag: etag,\n\t\tSize: fileInfo.SizeBytes,\n\t\tStorageClass: storageClass,\n\t\tOwner: defaultUser,\n\t}, nil\n}\n\n\/\/ CommonPrefixes is an individual PFS directory\ntype CommonPrefixes struct {\n\tPrefix string `xml:\"Prefix\"`\n\tOwner User `xml:\"Owner\"`\n}\n\nfunc newCommonPrefixes(dir string) CommonPrefixes {\n\treturn CommonPrefixes{\n\t\tPrefix: fmt.Sprintf(\"%s\/\", dir),\n\t\tOwner: defaultUser,\n\t}\n}\n\ntype bucketHandler struct {\n\tpc *client.APIClient\n}\n\nfunc newBucketHandler(pc *client.APIClient) bucketHandler {\n\treturn bucketHandler{pc: pc}\n}\n\nfunc (h bucketHandler) location(w http.ResponseWriter, r *http.Request) {\n\trepo, branch := bucketArgs(w, r)\n\t\n\t_, err := h.pc.InspectBranch(repo, branch)\n\tif err != nil {\n\t\tnotFoundError(w, r, err)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/xml\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write([]byte(locationSource))\n}\n\nfunc (h bucketHandler) get(w http.ResponseWriter, r *http.Request) {\n\trepo, branch := bucketArgs(w, r)\n\n\t\/\/ ensure the branch exists and has a head\n\tbranchInfo, err := h.pc.InspectBranch(repo, branch)\n\tif err != nil {\n\t\tnotFoundError(w, r, err)\n\t\treturn\n\t}\n\n\tmaxKeys := defaultMaxKeys\n\tmaxKeysStr := r.FormValue(\"max-keys\")\n\tif maxKeysStr != \"\" {\n\t\ti, err := strconv.Atoi(maxKeysStr)\n\t\tif err != nil || i < 0 || i > defaultMaxKeys {\n\t\t\tinvalidArgument(w, r)\n\t\t\treturn\n\t\t}\n\t\tmaxKeys = i\t\n\t}\n\n\tdelimiter := r.FormValue(\"delimiter\")\n\tif delimiter != \"\" && delimiter != \"\/\" {\n\t\tinvalidDelimiterError(w, r)\n\t\treturn\n\t}\n\n\tresult := &ListBucketResult{\n\t\tName: repo,\n\t\tPrefix: r.FormValue(\"prefix\"),\n\t\tMarker: r.FormValue(\"marker\"),\n\t\tDelimiter: delimiter,\n\t\tMaxKeys: maxKeys,\n\t\tIsTruncated: false,\n\t}\n\n\tif branchInfo.Head == nil {\n\t\t\/\/ if there's no head commit, just print an empty list of files\n\t\twriteXML(w, http.StatusOK, result)\n\t} else if delimiter == \"\" {\n\t\th.listRecursive(w, r, result, branch)\n\t} else {\n\t\th.list(w, r, result, branch)\n\t}\n}\n\nfunc (h bucketHandler) listRecursive(w http.ResponseWriter, r *http.Request, result *ListBucketResult, branch string) {\n\terr := h.pc.Walk(result.Name, branch, filepath.Dir(result.Prefix), func(fileInfo *pfs.FileInfo) error {\n\t\torigFilePath := fileInfo.File.Path\n\t\tfileInfo = updateFileInfo(branch, result.Marker, result.Prefix, fileInfo)\n\t\tif fileInfo == nil || fileInfo.FileType == pfs.FileType_DIR {\n\t\t\treturn nil\n\t\t}\n\t\tif result.isFull() {\n\t\t\tif result.MaxKeys > 0 {\n\t\t\t\tresult.IsTruncated = true\n\t\t\t}\n\t\t\treturn errutil.ErrBreak\n\t\t}\n\n\t\tmeta, err := getMeta(h.pc, result.Name, branch, origFilePath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tetag := \"\"\n\t\tif meta != nil {\n\t\t\tetag = meta.MD5\n\t\t}\n\t\t\n\t\tcontents, err := newContents(fileInfo, etag)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresult.Contents = append(result.Contents, contents)\n\t\t\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tinternalError(w, r, err)\n\t\treturn\n\t}\n\n\tsetNextMarker(result)\n\twriteXML(w, http.StatusOK, result)\n}\n\nfunc (h bucketHandler) list(w http.ResponseWriter, r *http.Request, result *ListBucketResult, branch string) {\n\tpattern := fmt.Sprintf(\"%s*\", glob.QuoteMeta(result.Prefix))\n\tfileInfos, err := h.pc.GlobFile(result.Name, branch, pattern)\n\tif err != nil {\n\t\tinternalError(w, r, err)\n\t\treturn\n\t}\n\n\tfor _, fileInfo := range fileInfos {\n\t\torigFilePath := fileInfo.File.Path\n\t\tfileInfo = updateFileInfo(branch, result.Marker, result.Prefix, fileInfo)\n\t\tif fileInfo == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif result.isFull() {\n\t\t\tif result.MaxKeys > 0 {\n\t\t\t\tresult.IsTruncated = true\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tif fileInfo.FileType == pfs.FileType_FILE {\n\t\t\tmeta, err := getMeta(h.pc, result.Name, branch, origFilePath)\n\t\t\tif err != nil {\n\t\t\t\tinternalError(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tetag := \"\"\n\t\t\tif meta != nil {\n\t\t\t\tetag = meta.MD5\n\t\t\t}\n\n\t\t\tcontents, err := newContents(fileInfo, etag)\n\t\t\tif err != nil {\n\t\t\t\tinternalError(w, r, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tresult.Contents = append(result.Contents, contents)\n\t\t} else {\n\t\t\tresult.CommonPrefixes = append(result.CommonPrefixes, newCommonPrefixes(fileInfo.File.Path))\n\t\t}\n\t}\n\n\tsetNextMarker(result)\n\twriteXML(w, http.StatusOK, result)\n}\n\nfunc (h bucketHandler) put(w http.ResponseWriter, r *http.Request) {\n\trepo, branch := bucketArgs(w, r)\n\n\terr := h.pc.CreateRepo(repo)\n\tif err != nil {\n\t\tif strings.Contains(err.Error(), \"as it already exists\") {\n\t\t\t\/\/ Bucket already exists - this is not an error so long as the\n\t\t\t\/\/ branch being created is new. Verify if that is the case now,\n\t\t\t\/\/ since PFS' `CreateBranch` won't error out.\n\t\t\t_, err := h.pc.InspectBranch(repo, branch)\n\t\t\tif err != nil {\n\t\t\t\tif !branchNotFoundMatcher.MatchString(err.Error()) {\n\t\t\t\t\tinternalError(w, r, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tbucketAlreadyExistsError(w, r)\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tinternalError(w, r, err)\n\t\t\treturn\n\t\t}\n\t}\n\n\terr = h.pc.CreateBranch(repo, branch, \"\", nil)\n\tif err != nil {\n\t\tinternalError(w, r, err)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc (h bucketHandler) del(w http.ResponseWriter, r *http.Request) {\n\trepo, branch := bucketArgs(w, r)\n\n\t\/\/ `DeleteBranch` does not return an error if a non-existing branch is\n\t\/\/ deleting. So first, we verify that the branch exists so we can\n\t\/\/ otherwise return a 404.\n\t_, err := h.pc.InspectBranch(repo, branch)\n\tif err != nil {\n\t\tnotFoundError(w, r, err)\n\t\treturn\n\t}\n\n\terr = h.pc.DeleteBranch(repo, branch, false)\n\tif err != nil {\n\t\tinternalError(w, r, err)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusNoContent)\n}\n\n\/\/ updateFileInfo takes in a `FileInfo`, and updates it to be used in s3\n\/\/ object listings:\n\/\/ 1) if nil is returned, the `FileInfo` should not be included in the list\n\/\/ 2) the path is updated to remove the leading slash\nfunc updateFileInfo(branch, marker, prefix string, fileInfo *pfs.FileInfo) *pfs.FileInfo {\n\tif fileInfo.FileType == pfs.FileType_DIR {\n\t\tif fileInfo.File.Path == \"\/\" {\n\t\t\t\/\/ skip the root directory\n\t\t\treturn nil\n\t\t}\n\t} else if fileInfo.FileType == pfs.FileType_FILE {\n\t\tif strings.HasSuffix(fileInfo.File.Path, \".s3g.json\") {\n\t\t\t\/\/ skip metadata files\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\t\/\/ skip anything that isn't a file or dir\n\t\treturn nil\n\t}\n\tfileInfo.File.Path = fileInfo.File.Path[1:] \/\/ strip leading slash\n\tif !strings.HasPrefix(fileInfo.File.Path, prefix) {\n\t\treturn nil\n\t}\n\tif fileInfo.File.Path <= marker {\n\t\t\/\/ skip file paths below the marker\n\t\treturn nil\n\t}\n\n\treturn fileInfo\n}\n\nfunc setNextMarker(result *ListBucketResult) {\n\tif result.IsTruncated {\n\t\tif len(result.Contents) > 0 && len(result.CommonPrefixes) == 0 {\n\t\t\tresult.NextMarker = result.Contents[len(result.Contents)-1].Key\n\t\t} else if len(result.Contents) == 0 && len(result.CommonPrefixes) > 0 {\n\t\t\tresult.NextMarker = result.CommonPrefixes[len(result.CommonPrefixes)-1].Prefix\n\t\t} else if len(result.Contents) > 0 && len(result.CommonPrefixes) > 0 {\n\t\t\tlastContents := result.Contents[len(result.Contents)-1].Key\n\t\t\tlastCommonPrefixes := result.CommonPrefixes[len(result.CommonPrefixes)-1].Prefix\n\n\t\t\tif lastContents > lastCommonPrefixes {\n\t\t\t\tresult.NextMarker = lastContents\n\t\t\t} else {\n\t\t\t\tresult.NextMarker = lastCommonPrefixes\n\t\t\t}\n\t\t}\n\t}\n}<|endoftext|>"} {"text":"<commit_before>package paperless\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strings\"\n\t\"unicode\"\n\n\t\"github.com\/kopoli\/go-util\"\n)\n\n\/\/ Environment is the run environment for each command. It is supplied as part\n\/\/ of Status for a Link when it is Run or Validated.\ntype Environment struct {\n\n\t\/\/ Constants are variables that are defined before a command chain is run\n\tConstants map[string]string\n\n\t\/\/ Tempfiles are constants that house a name of a temporary file. The\n\t\/\/ files are created before the chain is run and they are removed at\n\t\/\/ the end.\n\tTempFiles []string\n\n\t\/\/ The directory where the commands are run. This is a created\n\t\/\/ temporary directory\n\tRootDir string\n\n\t\/\/ AllowedCommands contain the commands that are allowed. If this is\n\t\/\/ nil, all commands are allowed.\n\tAllowedCommands map[string]bool\n\n\tinitialized bool\n}\n\nfunc (e *Environment) initEnv() (err error) {\n\tif e.initialized {\n\t\treturn\n\t}\n\n\tif e.Constants == nil {\n\t\treturn util.E.New(\"field Constants not initialized\")\n\t}\n\n\te.RootDir, err = ioutil.TempDir(\"\", \"chain\")\n\tif err != nil {\n\t\treturn util.E.Annotate(err, \"rootdir creation failed\")\n\t}\n\n\tvar fp *os.File\n\tfor _, name := range e.TempFiles {\n\t\tfp, err = ioutil.TempFile(e.RootDir, \"tmp\")\n\t\tif err != nil {\n\t\t\terr = util.E.Annotate(err, \"tempfile creation failed\")\n\t\t\te.initialized = true\n\t\t\te2 := e.deinitEnv()\n\t\t\tif e2 != nil {\n\t\t\t\terr = util.E.Annotate(err, \"temproot removal failed:\", e2)\n\t\t\t\te.initialized = false\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\te.Constants[name] = fp.Name()\n\t\tfp.Close()\n\t}\n\n\te.initialized = true\n\treturn\n}\n\nfunc (e *Environment) deinitEnv() (err error) {\n\tif !e.initialized {\n\t\treturn\n\t}\n\n\tif !strings.HasPrefix(e.RootDir, os.TempDir()) || e.RootDir == os.TempDir() {\n\t\terr = util.E.New(\"Temporary directory path is corrupted: %s\", e.RootDir)\n\t\treturn\n\t}\n\n\terr = os.RemoveAll(e.RootDir)\n\tif err != nil {\n\t\terr = util.E.Annotate(err, \"tempdir removal failed:\")\n\t}\n\n\t\/\/ Clear the temporary file and directory names\n\te.RootDir = \"\"\n\tfor _, n := range e.TempFiles {\n\t\te.Constants[n] = \"\"\n\t}\n\n\te.initialized = false\n\treturn\n}\n\nfunc (e *Environment) validate() (err error) {\n\tif len(e.RootDir) == 0 {\n\t\treturn util.E.New(\"the RootDir must be defined\")\n\t}\n\tinfo, err := os.Stat(e.RootDir)\n\tif err != nil || info.Mode()&os.ModeDir == 0 {\n\t\treturn util.E.Annotate(err, \"file \", e.RootDir, \" is not a proper directory\")\n\t}\n\n\treturn\n}\n\n\/\/ Status is the runtime status of the command chain\ntype Status struct {\n\t\/\/ The log output will be written to this\n\tLog io.Writer\n\n\tEnvironment\n}\n\ntype Link interface {\n\tValidate(*Environment) error\n\tRun(*Status) error\n}\n\ntype CmdChain struct {\n\n\t\/\/TODO remove this (this should come from outside)\n\tEnvironment\n\n\tLinks []Link\n}\n\nfunc (c *CmdChain) Validate(e *Environment) (err error) {\n\tfor _, l := range c.Links {\n\t\terr = l.Validate(e)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc (c *CmdChain) Run(s *Status) (err error) {\n\terr = c.Validate(&s.Environment)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor i := range c.Links {\n\t\terr = c.Links[i].Run(s)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc RunCmdChain(c *CmdChain, s *Status) (err error) {\n\terr = s.Environment.initEnv()\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = c.Run(s)\n\te2 := s.Environment.deinitEnv()\n\tif e2 != nil {\n\t\terr = util.E.Annotate(err, \"cmdchain deinit failed: \", e2)\n\t}\n\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype Cmd struct {\n\tCmd []string\n}\n\n\/\/ NewCmd creates a new Cmd from given command string\nfunc NewCmd(cmdstr string) (c *Cmd, err error) {\n\tcommand := splitWsQuote(cmdstr)\n\n\tif len(command) == 0 {\n\t\treturn nil, util.E.New(\"A command could not be parsed from:\", cmdstr)\n\t}\n\n\tc = &Cmd{command}\n\n\t_, err = exec.LookPath(c.Cmd[0])\n\tif err != nil {\n\t\treturn nil, util.E.Annotate(err, \"Command\", c.Cmd[0], \"could not be found\")\n\n\t}\n\n\treturn\n}\n\n\/\/ Validate makes sure the command is proper and can be run\nfunc (c *Cmd) Validate(e *Environment) (err error) {\n\tif len(c.Cmd) == 0 {\n\t\treturn util.E.New(\"command string must be non-empty\")\n\t}\n\n\tif e.AllowedCommands != nil {\n\t\tif _, ok := e.AllowedCommands[c.Cmd[0]]; ok != true {\n\t\t\treturn util.E.New(\"command is not allowed\")\n\t\t}\n\t}\n\n\t_, err = exec.LookPath(c.Cmd[0])\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = e.validate()\n\n\tfor idx, a := range c.Cmd {\n\t\tconsts := parseConsts(a)\n\t\tif len(consts) > 0 {\n\t\t\tfor _, co := range consts {\n\t\t\t\tif _, ok := e.Constants[co]; !ok {\n\t\t\t\t\treturn util.E.New(\"constant \\\"%s\\\" not defined\", co)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Output redirection to a file\n\t\tif a == \">\" && (idx == len(c.Cmd)-1 || c.Cmd[idx+1] == \"\") {\n\t\t\treturn util.E.New(\"The output redirection requires a string\")\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (c *Cmd) Run(s *Status) (err error) {\n\terr = c.Validate(&s.Environment)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar args []string\n\tfor i := range c.Cmd {\n\t\targs = append(args, expandConsts(c.Cmd[i], s.Constants))\n\t}\n\n\tif s.Log != nil {\n\t\tfmt.Fprintln(s.Log, \"Running command:\", strings.Join(args, \" \"))\n\t}\n\n\tvar output io.Writer = s.Log\n\n\tredirout, pos := getRedirectFile(\">\", args)\n\tif redirout != \"\" {\n\t\tvar fp *os.File\n\t\tredirout = PathAbs(s.RootDir, redirout)\n\t\tfp, err = os.OpenFile(redirout, os.O_WRONLY | os.O_CREATE, 0666)\n\t\tif err != nil {\n\t\t\terr = util.E.Annotate(err, \"Could not open file\",redirout,\"for redirection\")\n\t\t\treturn\n\t\t}\n\t\tdefer fp.Close()\n\t\toutput = fp\n\n\t\t\/\/ Remove the redirection and the file argument from the command\n\t\targs = append(args[:pos], args[pos+2:]...)\n\t}\n\n\tcmd := exec.Command(args[0], args[1:]...)\n\tcmd.Dir = s.RootDir\n\tcmd.Stdout = output\n\tcmd.Stderr = s.Log\n\n\treturn cmd.Run()\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nvar (\n\tconstRe = regexp.MustCompile(`\\$(\\w+)`)\n\ttmpfileConstRe = regexp.MustCompile(`\\$(tmp\\w+)`)\n\tcommentRe = regexp.MustCompile(`#.*$`)\n\tpreWhitespaceRe = regexp.MustCompile(`^\\s+`)\n)\n\n\/\/ parseConsts parses the constants from a string. Returns a list of constant names\nfunc parseConsts(s string) (ret []string) {\n\tret = []string{}\n\n\tmatches := constRe.FindAllStringSubmatch(s, -1)\n\tif matches == nil {\n\t\treturn\n\t}\n\tfor _, m := range matches {\n\t\tret = append(ret, m[1])\n\t}\n\n\treturn\n}\n\nfunc expandConsts(s string, constants map[string]string) string {\n\treturn constRe.ReplaceAllStringFunc(s, func(match string) string {\n\t\tcs := parseConsts(match)\n\t\tif len(cs) != 1 {\n\t\t\tpanic(\"Invalid Regexp parsing\")\n\t\t}\n\n\t\tret, ok := constants[cs[0]]\n\t\tif !ok {\n\t\t\tret = \"\"\n\t\t}\n\n\t\treturn ret\n\t})\n}\n\n\/\/ Gets the string after the given redir string. If not found, returns empty\n\/\/ string.\nfunc getRedirectFile(redir string, args []string) (file string, pos int) {\n\tfor i := range args {\n\t\tif args[i] == redir {\n\t\t\tif i+1 < len(args) {\n\t\t\t\tfile = args[i+1]\n\t\t\t\tpos = i\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ NewCmdChainScript creates a CmdChain from a script where each command is on a separate line. The following syntax elements are supported:\n\/\/\n\/\/ - Empty lines are filtered out.\n\/\/\n\/\/ - Comments start with # and end with EOL.\n\/\/\n\/\/ - Constants are strings that begin with $ and they can be set before running the cmdchain.\n\/\/\n\/\/ - Temporary files are strings that start with $tmp and they are automatically created before running the cmdchain and removed afterwards.\nfunc NewCmdChainScript(script string) (c *CmdChain, err error) {\n\tc = &CmdChain{}\n\tc.Constants = make(map[string]string)\n\n\tfor _, line := range strings.Split(script, \"\\n\") {\n\t\tline = commentRe.ReplaceAllString(line, \"\")\n\t\tline = preWhitespaceRe.ReplaceAllString(line, \"\")\n\n\t\tif len(line) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tconstants := parseConsts(line)\n\t\tfor _, co := range constants {\n\t\t\tc.Constants[co] = \"\"\n\t\t\tif tmpfileConstRe.MatchString(\"$\" + co) {\n\t\t\t\tc.TempFiles = append(c.TempFiles, co)\n\t\t\t}\n\t\t}\n\n\t\tvar cmd *Cmd\n\t\tcmd, err = NewCmd(line)\n\t\tif err != nil {\n\t\t\treturn nil, util.E.Annotate(err, \"improper command\")\n\t\t}\n\n\t\tc.Links = append(c.Links, cmd)\n\t}\n\n\te := c.Environment\n\te.RootDir = \"\/\"\n\n\terr = c.Validate(&e)\n\tif err != nil {\n\t\treturn nil, util.E.Annotate(err, \"invalid command chain\")\n\t}\n\n\treturn\n}\n\n\/\/ splitWsQuote splits a string by whitespace, but takes doublequotes into\n\/\/ account\nfunc splitWsQuote(s string) []string {\n\n\tquote := rune(0)\n\n\treturn strings.FieldsFunc(s, func(r rune) bool {\n\t\tswitch {\n\t\tcase r == quote:\n\t\t\tquote = rune(0)\n\t\t\treturn true\n\t\tcase quote != rune(0):\n\t\t\treturn false\n\t\tcase unicode.In(r, unicode.Quotation_Mark):\n\t\t\tquote = r\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn unicode.IsSpace(r)\n\t\t}\n\t})\n}\n<commit_msg>cmdchain: make more visible what command is run<commit_after>package paperless\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strings\"\n\t\"unicode\"\n\n\t\"github.com\/kopoli\/go-util\"\n)\n\n\/\/ Environment is the run environment for each command. It is supplied as part\n\/\/ of Status for a Link when it is Run or Validated.\ntype Environment struct {\n\n\t\/\/ Constants are variables that are defined before a command chain is run\n\tConstants map[string]string\n\n\t\/\/ Tempfiles are constants that house a name of a temporary file. The\n\t\/\/ files are created before the chain is run and they are removed at\n\t\/\/ the end.\n\tTempFiles []string\n\n\t\/\/ The directory where the commands are run. This is a created\n\t\/\/ temporary directory\n\tRootDir string\n\n\t\/\/ AllowedCommands contain the commands that are allowed. If this is\n\t\/\/ nil, all commands are allowed.\n\tAllowedCommands map[string]bool\n\n\tinitialized bool\n}\n\nfunc (e *Environment) initEnv() (err error) {\n\tif e.initialized {\n\t\treturn\n\t}\n\n\tif e.Constants == nil {\n\t\treturn util.E.New(\"field Constants not initialized\")\n\t}\n\n\te.RootDir, err = ioutil.TempDir(\"\", \"chain\")\n\tif err != nil {\n\t\treturn util.E.Annotate(err, \"rootdir creation failed\")\n\t}\n\n\tvar fp *os.File\n\tfor _, name := range e.TempFiles {\n\t\tfp, err = ioutil.TempFile(e.RootDir, \"tmp\")\n\t\tif err != nil {\n\t\t\terr = util.E.Annotate(err, \"tempfile creation failed\")\n\t\t\te.initialized = true\n\t\t\te2 := e.deinitEnv()\n\t\t\tif e2 != nil {\n\t\t\t\terr = util.E.Annotate(err, \"temproot removal failed:\", e2)\n\t\t\t\te.initialized = false\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\te.Constants[name] = fp.Name()\n\t\tfp.Close()\n\t}\n\n\te.initialized = true\n\treturn\n}\n\nfunc (e *Environment) deinitEnv() (err error) {\n\tif !e.initialized {\n\t\treturn\n\t}\n\n\tif !strings.HasPrefix(e.RootDir, os.TempDir()) || e.RootDir == os.TempDir() {\n\t\terr = util.E.New(\"Temporary directory path is corrupted: %s\", e.RootDir)\n\t\treturn\n\t}\n\n\terr = os.RemoveAll(e.RootDir)\n\tif err != nil {\n\t\terr = util.E.Annotate(err, \"tempdir removal failed:\")\n\t}\n\n\t\/\/ Clear the temporary file and directory names\n\te.RootDir = \"\"\n\tfor _, n := range e.TempFiles {\n\t\te.Constants[n] = \"\"\n\t}\n\n\te.initialized = false\n\treturn\n}\n\nfunc (e *Environment) validate() (err error) {\n\tif len(e.RootDir) == 0 {\n\t\treturn util.E.New(\"the RootDir must be defined\")\n\t}\n\tinfo, err := os.Stat(e.RootDir)\n\tif err != nil || info.Mode()&os.ModeDir == 0 {\n\t\treturn util.E.Annotate(err, \"file \", e.RootDir, \" is not a proper directory\")\n\t}\n\n\treturn\n}\n\n\/\/ Status is the runtime status of the command chain\ntype Status struct {\n\t\/\/ The log output will be written to this\n\tLog io.Writer\n\n\tEnvironment\n}\n\ntype Link interface {\n\tValidate(*Environment) error\n\tRun(*Status) error\n}\n\ntype CmdChain struct {\n\n\t\/\/TODO remove this (this should come from outside)\n\tEnvironment\n\n\tLinks []Link\n}\n\nfunc (c *CmdChain) Validate(e *Environment) (err error) {\n\tfor _, l := range c.Links {\n\t\terr = l.Validate(e)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc (c *CmdChain) Run(s *Status) (err error) {\n\terr = c.Validate(&s.Environment)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor i := range c.Links {\n\t\terr = c.Links[i].Run(s)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc RunCmdChain(c *CmdChain, s *Status) (err error) {\n\terr = s.Environment.initEnv()\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = c.Run(s)\n\te2 := s.Environment.deinitEnv()\n\tif e2 != nil {\n\t\terr = util.E.Annotate(err, \"cmdchain deinit failed: \", e2)\n\t}\n\n\treturn\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\ntype Cmd struct {\n\tCmd []string\n}\n\n\/\/ NewCmd creates a new Cmd from given command string\nfunc NewCmd(cmdstr string) (c *Cmd, err error) {\n\tcommand := splitWsQuote(cmdstr)\n\n\tif len(command) == 0 {\n\t\treturn nil, util.E.New(\"A command could not be parsed from:\", cmdstr)\n\t}\n\n\tc = &Cmd{command}\n\n\t_, err = exec.LookPath(c.Cmd[0])\n\tif err != nil {\n\t\treturn nil, util.E.Annotate(err, \"Command\", c.Cmd[0], \"could not be found\")\n\n\t}\n\n\treturn\n}\n\n\/\/ Validate makes sure the command is proper and can be run\nfunc (c *Cmd) Validate(e *Environment) (err error) {\n\tif len(c.Cmd) == 0 {\n\t\treturn util.E.New(\"command string must be non-empty\")\n\t}\n\n\tif e.AllowedCommands != nil {\n\t\tif _, ok := e.AllowedCommands[c.Cmd[0]]; ok != true {\n\t\t\treturn util.E.New(\"command is not allowed\")\n\t\t}\n\t}\n\n\t_, err = exec.LookPath(c.Cmd[0])\n\tif err != nil {\n\t\treturn\n\t}\n\n\terr = e.validate()\n\n\tfor idx, a := range c.Cmd {\n\t\tconsts := parseConsts(a)\n\t\tif len(consts) > 0 {\n\t\t\tfor _, co := range consts {\n\t\t\t\tif _, ok := e.Constants[co]; !ok {\n\t\t\t\t\treturn util.E.New(\"constant \\\"%s\\\" not defined\", co)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Output redirection to a file\n\t\tif a == \">\" && (idx == len(c.Cmd)-1 || c.Cmd[idx+1] == \"\") {\n\t\t\treturn util.E.New(\"The output redirection requires a string\")\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (c *Cmd) Run(s *Status) (err error) {\n\terr = c.Validate(&s.Environment)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar args []string\n\tfor i := range c.Cmd {\n\t\targs = append(args, expandConsts(c.Cmd[i], s.Constants))\n\t}\n\n\tif s.Log != nil {\n\t\tfmt.Fprintln(s.Log, \"# Running command:\", strings.Join(args, \" \"))\n\t}\n\n\tvar output io.Writer = s.Log\n\n\tredirout, pos := getRedirectFile(\">\", args)\n\tif redirout != \"\" {\n\t\tvar fp *os.File\n\t\tredirout = PathAbs(s.RootDir, redirout)\n\t\tfp, err = os.OpenFile(redirout, os.O_WRONLY | os.O_CREATE, 0666)\n\t\tif err != nil {\n\t\t\terr = util.E.Annotate(err, \"Could not open file\",redirout,\"for redirection\")\n\t\t\treturn\n\t\t}\n\t\tdefer fp.Close()\n\t\toutput = fp\n\n\t\t\/\/ Remove the redirection and the file argument from the command\n\t\targs = append(args[:pos], args[pos+2:]...)\n\t}\n\n\tcmd := exec.Command(args[0], args[1:]...)\n\tcmd.Dir = s.RootDir\n\tcmd.Stdout = output\n\tcmd.Stderr = s.Log\n\n\treturn cmd.Run()\n}\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\nvar (\n\tconstRe = regexp.MustCompile(`\\$(\\w+)`)\n\ttmpfileConstRe = regexp.MustCompile(`\\$(tmp\\w+)`)\n\tcommentRe = regexp.MustCompile(`#.*$`)\n\tpreWhitespaceRe = regexp.MustCompile(`^\\s+`)\n)\n\n\/\/ parseConsts parses the constants from a string. Returns a list of constant names\nfunc parseConsts(s string) (ret []string) {\n\tret = []string{}\n\n\tmatches := constRe.FindAllStringSubmatch(s, -1)\n\tif matches == nil {\n\t\treturn\n\t}\n\tfor _, m := range matches {\n\t\tret = append(ret, m[1])\n\t}\n\n\treturn\n}\n\nfunc expandConsts(s string, constants map[string]string) string {\n\treturn constRe.ReplaceAllStringFunc(s, func(match string) string {\n\t\tcs := parseConsts(match)\n\t\tif len(cs) != 1 {\n\t\t\tpanic(\"Invalid Regexp parsing\")\n\t\t}\n\n\t\tret, ok := constants[cs[0]]\n\t\tif !ok {\n\t\t\tret = \"\"\n\t\t}\n\n\t\treturn ret\n\t})\n}\n\n\/\/ Gets the string after the given redir string. If not found, returns empty\n\/\/ string.\nfunc getRedirectFile(redir string, args []string) (file string, pos int) {\n\tfor i := range args {\n\t\tif args[i] == redir {\n\t\t\tif i+1 < len(args) {\n\t\t\t\tfile = args[i+1]\n\t\t\t\tpos = i\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ NewCmdChainScript creates a CmdChain from a script where each command is on a separate line. The following syntax elements are supported:\n\/\/\n\/\/ - Empty lines are filtered out.\n\/\/\n\/\/ - Comments start with # and end with EOL.\n\/\/\n\/\/ - Constants are strings that begin with $ and they can be set before running the cmdchain.\n\/\/\n\/\/ - Temporary files are strings that start with $tmp and they are automatically created before running the cmdchain and removed afterwards.\nfunc NewCmdChainScript(script string) (c *CmdChain, err error) {\n\tc = &CmdChain{}\n\tc.Constants = make(map[string]string)\n\n\tfor _, line := range strings.Split(script, \"\\n\") {\n\t\tline = commentRe.ReplaceAllString(line, \"\")\n\t\tline = preWhitespaceRe.ReplaceAllString(line, \"\")\n\n\t\tif len(line) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tconstants := parseConsts(line)\n\t\tfor _, co := range constants {\n\t\t\tc.Constants[co] = \"\"\n\t\t\tif tmpfileConstRe.MatchString(\"$\" + co) {\n\t\t\t\tc.TempFiles = append(c.TempFiles, co)\n\t\t\t}\n\t\t}\n\n\t\tvar cmd *Cmd\n\t\tcmd, err = NewCmd(line)\n\t\tif err != nil {\n\t\t\treturn nil, util.E.Annotate(err, \"improper command\")\n\t\t}\n\n\t\tc.Links = append(c.Links, cmd)\n\t}\n\n\te := c.Environment\n\te.RootDir = \"\/\"\n\n\terr = c.Validate(&e)\n\tif err != nil {\n\t\treturn nil, util.E.Annotate(err, \"invalid command chain\")\n\t}\n\n\treturn\n}\n\n\/\/ splitWsQuote splits a string by whitespace, but takes doublequotes into\n\/\/ account\nfunc splitWsQuote(s string) []string {\n\n\tquote := rune(0)\n\n\treturn strings.FieldsFunc(s, func(r rune) bool {\n\t\tswitch {\n\t\tcase r == quote:\n\t\t\tquote = rune(0)\n\t\t\treturn true\n\t\tcase quote != rune(0):\n\t\t\treturn false\n\t\tcase unicode.In(r, unicode.Quotation_Mark):\n\t\t\tquote = r\n\t\t\treturn true\n\t\tdefault:\n\t\t\treturn unicode.IsSpace(r)\n\t\t}\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nPackage runtime contains operations that interact with Go's runtime system,\nsuch as functions to control goroutines. It also includes the low-level type information\nused by the reflect package; see reflect's documentation for the programmable\ninterface to the run-time type system.\n\nEnvironment Variables\n\nThe following environment variables ($name or %name%, depending on the host\noperating system) control the run-time behavior of Go programs. The meanings\nand use may change from release to release.\n\nThe GOGC variable sets the initial garbage collection target percentage.\nA collection is triggered when the ratio of freshly allocated data to live data\nremaining after the previous collection reaches this percentage. The default\nis GOGC=100. Setting GOGC=off disables the garbage collector entirely.\nThe runtime\/debug package's SetGCPercent function allows changing this\npercentage at run time. See https:\/\/golang.org\/pkg\/runtime\/debug\/#SetGCPercent.\n\nThe GODEBUG variable controls debugging variables within the runtime.\nIt is a comma-separated list of name=val pairs setting these named variables:\n\n\tallocfreetrace: setting allocfreetrace=1 causes every allocation to be\n\tprofiled and a stack trace printed on each object's allocation and free.\n\n\tcgocheck: setting cgocheck=0 disables all checks for packages\n\tusing cgo to incorrectly pass Go pointers to non-Go code.\n\tSetting cgocheck=1 (the default) enables relatively cheap\n\tchecks that may miss some errors. Setting cgocheck=2 enables\n\texpensive checks that should not miss any errors, but will\n\tcause your program to run slower.\n\n\tefence: setting efence=1 causes the allocator to run in a mode\n\twhere each object is allocated on a unique page and addresses are\n\tnever recycled.\n\n\tgccheckmark: setting gccheckmark=1 enables verification of the\n\tgarbage collector's concurrent mark phase by performing a\n\tsecond mark pass while the world is stopped. If the second\n\tpass finds a reachable object that was not found by concurrent\n\tmark, the garbage collector will panic.\n\n\tgcpacertrace: setting gcpacertrace=1 causes the garbage collector to\n\tprint information about the internal state of the concurrent pacer.\n\n\tgcshrinkstackoff: setting gcshrinkstackoff=1 disables moving goroutines\n\tonto smaller stacks. In this mode, a goroutine's stack can only grow.\n\n\tgcstackbarrieroff: setting gcstackbarrieroff=1 disables the use of stack barriers\n\tthat allow the garbage collector to avoid repeating a stack scan during the\n\tmark termination phase.\n\n\tgcstackbarrierall: setting gcstackbarrierall=1 installs stack barriers\n\tin every stack frame, rather than in exponentially-spaced frames.\n\n\tgcstoptheworld: setting gcstoptheworld=1 disables concurrent garbage collection,\n\tmaking every garbage collection a stop-the-world event. Setting gcstoptheworld=2\n\talso disables concurrent sweeping after the garbage collection finishes.\n\n\tgctrace: setting gctrace=1 causes the garbage collector to emit a single line to standard\n\terror at each collection, summarizing the amount of memory collected and the\n\tlength of the pause. Setting gctrace=2 emits the same summary but also\n\trepeats each collection. The format of this line is subject to change.\n\tCurrently, it is:\n\t\tgc # @#s #%: #+#+# ms clock, #+#\/#\/#+# ms cpu, #->#-># MB, # MB goal, # P\n\twhere the fields are as follows:\n\t\tgc # the GC number, incremented at each GC\n\t\t@#s time in seconds since program start\n\t\t#% percentage of time spent in GC since program start\n\t\t#+...+# wall-clock\/CPU times for the phases of the GC\n\t\t#->#-># MB heap size at GC start, at GC end, and live heap\n\t\t# MB goal goal heap size\n\t\t# P number of processors used\n\tThe phases are stop-the-world (STW) sweep termination, concurrent\n\tmark and scan, and STW mark termination. The CPU times\n\tfor mark\/scan are broken down in to assist time (GC performed in\n\tline with allocation), background GC time, and idle GC time.\n\tIf the line ends with \"(forced)\", this GC was forced by a\n\truntime.GC() call and all phases are STW.\n\n\tmemprofilerate: setting memprofilerate=X will update the value of runtime.MemProfileRate.\n\tWhen set to 0 memory profiling is disabled. Refer to the description of\n\tMemProfileRate for the default value.\n\n\tinvalidptr: defaults to invalidptr=1, causing the garbage collector and stack\n\tcopier to crash the program if an invalid pointer value (for example, 1)\n\tis found in a pointer-typed location. Setting invalidptr=0 disables this check.\n\tThis should only be used as a temporary workaround to diagnose buggy code.\n\tThe real fix is to not store integers in pointer-typed locations.\n\n\tsbrk: setting sbrk=1 replaces the memory allocator and garbage collector\n\twith a trivial allocator that obtains memory from the operating system and\n\tnever reclaims any memory.\n\n\tscavenge: scavenge=1 enables debugging mode of heap scavenger.\n\n\tscheddetail: setting schedtrace=X and scheddetail=1 causes the scheduler to emit\n\tdetailed multiline info every X milliseconds, describing state of the scheduler,\n\tprocessors, threads and goroutines.\n\n\tschedtrace: setting schedtrace=X causes the scheduler to emit a single line to standard\n\terror every X milliseconds, summarizing the scheduler state.\n\nThe net and net\/http packages also refer to debugging variables in GODEBUG.\nSee the documentation for those packages for details.\n\nThe GOMAXPROCS variable limits the number of operating system threads that\ncan execute user-level Go code simultaneously. There is no limit to the number of threads\nthat can be blocked in system calls on behalf of Go code; those do not count against\nthe GOMAXPROCS limit. This package's GOMAXPROCS function queries and changes\nthe limit.\n\nThe GOTRACEBACK variable controls the amount of output generated when a Go\nprogram fails due to an unrecovered panic or an unexpected runtime condition.\nBy default, a failure prints a stack trace for the current goroutine,\neliding functions internal to the run-time system, and then exits with exit code 2.\nThe failure prints stack traces for all goroutines if there is no current goroutine\nor the failure is internal to the run-time.\nGOTRACEBACK=none omits the goroutine stack traces entirely.\nGOTRACEBACK=single (the default) behaves as described above.\nGOTRACEBACK=all adds stack traces for all user-created goroutines.\nGOTRACEBACK=system is like ``all'' but adds stack frames for run-time functions\nand shows goroutines created internally by the run-time.\nGOTRACEBACK=crash is like ``system'' but crashes in an operating system-specific\nmanner instead of exiting. For example, on Unix systems, the crash raises\nSIGABRT to trigger a core dump.\nFor historical reasons, the GOTRACEBACK settings 0, 1, and 2 are synonyms for\nnone, all, and system, respectively.\nThe runtime\/debug package's SetTraceback function allows increasing the\namount of output at run time, but it cannot reduce the amount below that\nspecified by the environment variable.\nSee https:\/\/golang.org\/pkg\/runtime\/debug\/#SetTraceback.\n\nThe GOARCH, GOOS, GOPATH, and GOROOT environment variables complete\nthe set of Go environment variables. They influence the building of Go programs\n(see https:\/\/golang.org\/cmd\/go and https:\/\/golang.org\/pkg\/go\/build).\nGOARCH, GOOS, and GOROOT are recorded at compile time and made available by\nconstants or functions in this package, but they do not influence the execution\nof the run-time system.\n*\/\npackage runtime\n\nimport \"runtime\/internal\/sys\"\n\n\/\/ Caller reports file and line number information about function invocations on\n\/\/ the calling goroutine's stack. The argument skip is the number of stack frames\n\/\/ to ascend, with 0 identifying the caller of Caller. (For historical reasons the\n\/\/ meaning of skip differs between Caller and Callers.) The return values report the\n\/\/ program counter, file name, and line number within the file of the corresponding\n\/\/ call. The boolean ok is false if it was not possible to recover the information.\nfunc Caller(skip int) (pc uintptr, file string, line int, ok bool) {\n\t\/\/ Ask for two PCs: the one we were asked for\n\t\/\/ and what it called, so that we can see if it\n\t\/\/ \"called\" sigpanic.\n\tvar rpc [2]uintptr\n\tif callers(1+skip-1, rpc[:]) < 2 {\n\t\treturn\n\t}\n\tf := findfunc(rpc[1])\n\tif f == nil {\n\t\t\/\/ TODO(rsc): Probably a bug?\n\t\t\/\/ The C version said \"have retpc at least\"\n\t\t\/\/ but actually returned pc=0.\n\t\tok = true\n\t\treturn\n\t}\n\tpc = rpc[1]\n\txpc := pc\n\tg := findfunc(rpc[0])\n\t\/\/ All architectures turn faults into apparent calls to sigpanic.\n\t\/\/ If we see a call to sigpanic, we do not back up the PC to find\n\t\/\/ the line number of the call instruction, because there is no call.\n\tif xpc > f.entry && (g == nil || g.entry != funcPC(sigpanic)) {\n\t\txpc--\n\t}\n\tfile, line32 := funcline(f, xpc)\n\tline = int(line32)\n\tok = true\n\treturn\n}\n\n\/\/ Callers fills the slice pc with the return program counters of function invocations\n\/\/ on the calling goroutine's stack. The argument skip is the number of stack frames\n\/\/ to skip before recording in pc, with 0 identifying the frame for Callers itself and\n\/\/ 1 identifying the caller of Callers.\n\/\/ It returns the number of entries written to pc.\n\/\/\n\/\/ Note that since each slice entry pc[i] is a return program counter,\n\/\/ looking up the file and line for pc[i] (for example, using (*Func).FileLine)\n\/\/ will normally return the file and line number of the instruction immediately\n\/\/ following the call.\n\/\/ To easily look up file\/line information for the call sequence, use Frames.\nfunc Callers(skip int, pc []uintptr) int {\n\t\/\/ runtime.callers uses pc.array==nil as a signal\n\t\/\/ to print a stack trace. Pick off 0-length pc here\n\t\/\/ so that we don't let a nil pc slice get to it.\n\tif len(pc) == 0 {\n\t\treturn 0\n\t}\n\treturn callers(skip, pc)\n}\n\n\/\/ GOROOT returns the root of the Go tree.\n\/\/ It uses the GOROOT environment variable, if set,\n\/\/ or else the root used during the Go build.\nfunc GOROOT() string {\n\ts := gogetenv(\"GOROOT\")\n\tif s != \"\" {\n\t\treturn s\n\t}\n\treturn sys.DefaultGoroot\n}\n\n\/\/ Version returns the Go tree's version string.\n\/\/ It is either the commit hash and date at the time of the build or,\n\/\/ when possible, a release tag like \"go1.3\".\nfunc Version() string {\n\treturn sys.TheVersion\n}\n\n\/\/ GOOS is the running program's operating system target:\n\/\/ one of darwin, freebsd, linux, and so on.\nconst GOOS string = sys.GOOS\n\n\/\/ GOARCH is the running program's architecture target:\n\/\/ 386, amd64, arm, or s390x.\nconst GOARCH string = sys.GOARCH\n<commit_msg>runtime: document heap scavenger memory summary<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/*\nPackage runtime contains operations that interact with Go's runtime system,\nsuch as functions to control goroutines. It also includes the low-level type information\nused by the reflect package; see reflect's documentation for the programmable\ninterface to the run-time type system.\n\nEnvironment Variables\n\nThe following environment variables ($name or %name%, depending on the host\noperating system) control the run-time behavior of Go programs. The meanings\nand use may change from release to release.\n\nThe GOGC variable sets the initial garbage collection target percentage.\nA collection is triggered when the ratio of freshly allocated data to live data\nremaining after the previous collection reaches this percentage. The default\nis GOGC=100. Setting GOGC=off disables the garbage collector entirely.\nThe runtime\/debug package's SetGCPercent function allows changing this\npercentage at run time. See https:\/\/golang.org\/pkg\/runtime\/debug\/#SetGCPercent.\n\nThe GODEBUG variable controls debugging variables within the runtime.\nIt is a comma-separated list of name=val pairs setting these named variables:\n\n\tallocfreetrace: setting allocfreetrace=1 causes every allocation to be\n\tprofiled and a stack trace printed on each object's allocation and free.\n\n\tcgocheck: setting cgocheck=0 disables all checks for packages\n\tusing cgo to incorrectly pass Go pointers to non-Go code.\n\tSetting cgocheck=1 (the default) enables relatively cheap\n\tchecks that may miss some errors. Setting cgocheck=2 enables\n\texpensive checks that should not miss any errors, but will\n\tcause your program to run slower.\n\n\tefence: setting efence=1 causes the allocator to run in a mode\n\twhere each object is allocated on a unique page and addresses are\n\tnever recycled.\n\n\tgccheckmark: setting gccheckmark=1 enables verification of the\n\tgarbage collector's concurrent mark phase by performing a\n\tsecond mark pass while the world is stopped. If the second\n\tpass finds a reachable object that was not found by concurrent\n\tmark, the garbage collector will panic.\n\n\tgcpacertrace: setting gcpacertrace=1 causes the garbage collector to\n\tprint information about the internal state of the concurrent pacer.\n\n\tgcshrinkstackoff: setting gcshrinkstackoff=1 disables moving goroutines\n\tonto smaller stacks. In this mode, a goroutine's stack can only grow.\n\n\tgcstackbarrieroff: setting gcstackbarrieroff=1 disables the use of stack barriers\n\tthat allow the garbage collector to avoid repeating a stack scan during the\n\tmark termination phase.\n\n\tgcstackbarrierall: setting gcstackbarrierall=1 installs stack barriers\n\tin every stack frame, rather than in exponentially-spaced frames.\n\n\tgcstoptheworld: setting gcstoptheworld=1 disables concurrent garbage collection,\n\tmaking every garbage collection a stop-the-world event. Setting gcstoptheworld=2\n\talso disables concurrent sweeping after the garbage collection finishes.\n\n\tgctrace: setting gctrace=1 causes the garbage collector to emit a single line to standard\n\terror at each collection, summarizing the amount of memory collected and the\n\tlength of the pause. Setting gctrace=2 emits the same summary but also\n\trepeats each collection. The format of this line is subject to change.\n\tCurrently, it is:\n\t\tgc # @#s #%: #+#+# ms clock, #+#\/#\/#+# ms cpu, #->#-># MB, # MB goal, # P\n\twhere the fields are as follows:\n\t\tgc # the GC number, incremented at each GC\n\t\t@#s time in seconds since program start\n\t\t#% percentage of time spent in GC since program start\n\t\t#+...+# wall-clock\/CPU times for the phases of the GC\n\t\t#->#-># MB heap size at GC start, at GC end, and live heap\n\t\t# MB goal goal heap size\n\t\t# P number of processors used\n\tThe phases are stop-the-world (STW) sweep termination, concurrent\n\tmark and scan, and STW mark termination. The CPU times\n\tfor mark\/scan are broken down in to assist time (GC performed in\n\tline with allocation), background GC time, and idle GC time.\n\tIf the line ends with \"(forced)\", this GC was forced by a\n\truntime.GC() call and all phases are STW.\n\n\tSetting gctrace to any value > 0 also causes the garbage collector\n\tto emit a summary when memory is released back to the system.\n\tThis process of returning memory to the system is called scavenging.\n\tThe format of this summary is subject to change.\n\tCurrently it is:\n\t\tscvg#: # MB released printed only if non-zero\n\t\tscvg#: inuse: # idle: # sys: # released: # consumed: # (MB)\n\twhere the fields are as follows:\n\t\tscvg# the scavenge cycle number, incremented at each scavenge\n\t\tinuse: # MB used or partially used spans\n\t\tidle: # MB spans pending scavenging\n\t\tsys: # MB mapped from the system\n\t\treleased: # MB released to the system\n\t\tconsumed: # MB allocated from the system\n\n\tmemprofilerate: setting memprofilerate=X will update the value of runtime.MemProfileRate.\n\tWhen set to 0 memory profiling is disabled. Refer to the description of\n\tMemProfileRate for the default value.\n\n\tinvalidptr: defaults to invalidptr=1, causing the garbage collector and stack\n\tcopier to crash the program if an invalid pointer value (for example, 1)\n\tis found in a pointer-typed location. Setting invalidptr=0 disables this check.\n\tThis should only be used as a temporary workaround to diagnose buggy code.\n\tThe real fix is to not store integers in pointer-typed locations.\n\n\tsbrk: setting sbrk=1 replaces the memory allocator and garbage collector\n\twith a trivial allocator that obtains memory from the operating system and\n\tnever reclaims any memory.\n\n\tscavenge: scavenge=1 enables debugging mode of heap scavenger.\n\n\tscheddetail: setting schedtrace=X and scheddetail=1 causes the scheduler to emit\n\tdetailed multiline info every X milliseconds, describing state of the scheduler,\n\tprocessors, threads and goroutines.\n\n\tschedtrace: setting schedtrace=X causes the scheduler to emit a single line to standard\n\terror every X milliseconds, summarizing the scheduler state.\n\nThe net and net\/http packages also refer to debugging variables in GODEBUG.\nSee the documentation for those packages for details.\n\nThe GOMAXPROCS variable limits the number of operating system threads that\ncan execute user-level Go code simultaneously. There is no limit to the number of threads\nthat can be blocked in system calls on behalf of Go code; those do not count against\nthe GOMAXPROCS limit. This package's GOMAXPROCS function queries and changes\nthe limit.\n\nThe GOTRACEBACK variable controls the amount of output generated when a Go\nprogram fails due to an unrecovered panic or an unexpected runtime condition.\nBy default, a failure prints a stack trace for the current goroutine,\neliding functions internal to the run-time system, and then exits with exit code 2.\nThe failure prints stack traces for all goroutines if there is no current goroutine\nor the failure is internal to the run-time.\nGOTRACEBACK=none omits the goroutine stack traces entirely.\nGOTRACEBACK=single (the default) behaves as described above.\nGOTRACEBACK=all adds stack traces for all user-created goroutines.\nGOTRACEBACK=system is like ``all'' but adds stack frames for run-time functions\nand shows goroutines created internally by the run-time.\nGOTRACEBACK=crash is like ``system'' but crashes in an operating system-specific\nmanner instead of exiting. For example, on Unix systems, the crash raises\nSIGABRT to trigger a core dump.\nFor historical reasons, the GOTRACEBACK settings 0, 1, and 2 are synonyms for\nnone, all, and system, respectively.\nThe runtime\/debug package's SetTraceback function allows increasing the\namount of output at run time, but it cannot reduce the amount below that\nspecified by the environment variable.\nSee https:\/\/golang.org\/pkg\/runtime\/debug\/#SetTraceback.\n\nThe GOARCH, GOOS, GOPATH, and GOROOT environment variables complete\nthe set of Go environment variables. They influence the building of Go programs\n(see https:\/\/golang.org\/cmd\/go and https:\/\/golang.org\/pkg\/go\/build).\nGOARCH, GOOS, and GOROOT are recorded at compile time and made available by\nconstants or functions in this package, but they do not influence the execution\nof the run-time system.\n*\/\npackage runtime\n\nimport \"runtime\/internal\/sys\"\n\n\/\/ Caller reports file and line number information about function invocations on\n\/\/ the calling goroutine's stack. The argument skip is the number of stack frames\n\/\/ to ascend, with 0 identifying the caller of Caller. (For historical reasons the\n\/\/ meaning of skip differs between Caller and Callers.) The return values report the\n\/\/ program counter, file name, and line number within the file of the corresponding\n\/\/ call. The boolean ok is false if it was not possible to recover the information.\nfunc Caller(skip int) (pc uintptr, file string, line int, ok bool) {\n\t\/\/ Ask for two PCs: the one we were asked for\n\t\/\/ and what it called, so that we can see if it\n\t\/\/ \"called\" sigpanic.\n\tvar rpc [2]uintptr\n\tif callers(1+skip-1, rpc[:]) < 2 {\n\t\treturn\n\t}\n\tf := findfunc(rpc[1])\n\tif f == nil {\n\t\t\/\/ TODO(rsc): Probably a bug?\n\t\t\/\/ The C version said \"have retpc at least\"\n\t\t\/\/ but actually returned pc=0.\n\t\tok = true\n\t\treturn\n\t}\n\tpc = rpc[1]\n\txpc := pc\n\tg := findfunc(rpc[0])\n\t\/\/ All architectures turn faults into apparent calls to sigpanic.\n\t\/\/ If we see a call to sigpanic, we do not back up the PC to find\n\t\/\/ the line number of the call instruction, because there is no call.\n\tif xpc > f.entry && (g == nil || g.entry != funcPC(sigpanic)) {\n\t\txpc--\n\t}\n\tfile, line32 := funcline(f, xpc)\n\tline = int(line32)\n\tok = true\n\treturn\n}\n\n\/\/ Callers fills the slice pc with the return program counters of function invocations\n\/\/ on the calling goroutine's stack. The argument skip is the number of stack frames\n\/\/ to skip before recording in pc, with 0 identifying the frame for Callers itself and\n\/\/ 1 identifying the caller of Callers.\n\/\/ It returns the number of entries written to pc.\n\/\/\n\/\/ Note that since each slice entry pc[i] is a return program counter,\n\/\/ looking up the file and line for pc[i] (for example, using (*Func).FileLine)\n\/\/ will normally return the file and line number of the instruction immediately\n\/\/ following the call.\n\/\/ To easily look up file\/line information for the call sequence, use Frames.\nfunc Callers(skip int, pc []uintptr) int {\n\t\/\/ runtime.callers uses pc.array==nil as a signal\n\t\/\/ to print a stack trace. Pick off 0-length pc here\n\t\/\/ so that we don't let a nil pc slice get to it.\n\tif len(pc) == 0 {\n\t\treturn 0\n\t}\n\treturn callers(skip, pc)\n}\n\n\/\/ GOROOT returns the root of the Go tree.\n\/\/ It uses the GOROOT environment variable, if set,\n\/\/ or else the root used during the Go build.\nfunc GOROOT() string {\n\ts := gogetenv(\"GOROOT\")\n\tif s != \"\" {\n\t\treturn s\n\t}\n\treturn sys.DefaultGoroot\n}\n\n\/\/ Version returns the Go tree's version string.\n\/\/ It is either the commit hash and date at the time of the build or,\n\/\/ when possible, a release tag like \"go1.3\".\nfunc Version() string {\n\treturn sys.TheVersion\n}\n\n\/\/ GOOS is the running program's operating system target:\n\/\/ one of darwin, freebsd, linux, and so on.\nconst GOOS string = sys.GOOS\n\n\/\/ GOARCH is the running program's architecture target:\n\/\/ 386, amd64, arm, or s390x.\nconst GOARCH string = sys.GOARCH\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"sort\"\n)\n\nconst (\n\tVERSION = \"0.0.1\"\n)\n\n\/\/ sort interface-typed arrays by first-class functions\ntype ByFn struct{\n\telems []interface{}\n\tcomp func(a, b interface{}) bool\n}\nfunc (c ByFn) Len() int { return len(c.elems) }\nfunc (c ByFn) Less(i, j int) bool { return c.comp(c.elems[i], c.elems[j]) }\nfunc (c ByFn) Swap(i, j int) { c.elems[i], c.elems[j] = c.elems[j], c.elems[i] }\n\n\n\/\/ generic api requests\nfunc apiReq(meth string, url string) interface{} {\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(meth, url, nil)\n\treq.SetBasicAuth(\"x\", os.Getenv(\"HEROKU_API_KEY\"))\n\treq.Header.Add(\"User-Agent\", fmt.Sprintf(\"hk\/%s\", VERSION))\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\tres, err := client.Do(req)\n\t\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif (res.StatusCode == 401) {\n\t\terror(\"Unauthorized\")\n\t}\n\tif (res.StatusCode != 200) {\n\t\terror(\"Unexpected error\")\n\t}\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tvar data interface{}\n\terr = json.Unmarshal(body, &data)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn data\n}\n\n\/\/ error formatting\nfunc error(msg string) {\n\tfmt.Fprintf(os.Stderr, \"Error: %s.\\n\", msg)\n\tos.Exit(1)\n}\n\nfunc unrecArg(arg string) {\n\terror(fmt.Sprintf(\"Unrecognized argument '%s'\", arg))\n}\n\nfunc unrecCmd(cmd string) {\n\terror(fmt.Sprintf(\"'%s' is not an hk command. See 'hk help'\", cmd))\n}\n\n\/\/ commands\nfunc envHelp() {\n\tfmt.Printf(\"Usage: hk env -a <app>\\n\\n\")\n\tfmt.Printf(\"Show all config vars.\")\n\tos.Exit(0)\n}\n\nfunc env() {\n\tif (len(os.Args) != 4) || (os.Args[2] != \"-a\") {\n\t\terror(\"Invalid usage. See hk help env\")\n\t}\n\tappName := os.Args[3]\n\tdata := apiReq(\"GET\", fmt.Sprintf(\"https:\/\/api.heroku.com\/apps\/%s\/config_vars\", appName))\n\tconfig := data.(map[string]interface{})\n\tfor k, v := range config {\n\t\tfmt.Printf(\"%s=%v\\n\", k, v)\n\t}\n\tos.Exit(0)\n}\n\nfunc getHelp() {\n\tfmt.Printf(\"Usage: hk get -a <app> <key>\\n\\n\")\n\tfmt.Printf(\"Get the value of a config var.\\n\")\n\tos.Exit(0)\n}\n\nfunc get() {\n\tif (len(os.Args) != 5) || (os.Args[2] != \"-a\") {\n\t\terror(\"Invalid usage. See hk help get\")\n\t}\n\tappName := os.Args[3]\n\tkey := os.Args[4]\n\tdata := apiReq(\"GET\", fmt.Sprintf(\"https:\/\/api.heroku.com\/apps\/%s\/config_vars\", appName))\n\tconfig := data.(map[string]string)\n\tvalue, found := config[key]\n\tif !found {\n\t\terror(fmt.Sprintf(\"No such key as '%s'\", key))\n\t}\n\tfmt.Println(value)\n\tos.Exit(0)\n}\n\nfunc listHelp() {\n\tfmt.Printf(\"Usage: hk list\\n\\n\")\n\tfmt.Printf(\"List accessible apps.\\n\")\n\tos.Exit(0)\n}\n\nfunc list() {\n\tif len(os.Args) != 2 {\n\t\tunrecArg(os.Args[2])\n\t}\n\tdata := apiReq(\"GET\", \"https:\/\/api.heroku.com\/apps\")\n\tapps := data.([]interface{})\n\tfor i := range apps {\n\t\tapp := apps[i].(map[string]interface{})\n\t\tfmt.Printf(\"%s\\n\", app[\"name\"])\n\t}\n\tos.Exit(0)\n}\n\nfunc psHelp() {\n\tfmt.Printf(\"Usage: hk ps -a <app>\\n\\n\")\n\tfmt.Printf(\"List apps processes.\\n\")\n\tos.Exit(0)\n}\n\nfunc ps() {\n\tif (len(os.Args) != 4) || (os.Args[2] != \"-a\") {\n\t\terror(\"Invalid usage. See hk help ps\")\n\t}\n\tappName := os.Args[3]\n\tdata := apiReq(\"GET\", fmt.Sprintf(\"https:\/\/api.heroku.com\/apps\/%s\/ps\", appName))\n\tprocesses := data.([]interface{})\n\tsort.Sort(ByFn{\n\t\tprocesses,\n\t\tfunc(a, b interface{}) bool {\n\t\t\tp1 := a.(map[string]interface{})[\"process\"].(string)\n\t\t\tp2 := b.(map[string]interface{})[\"process\"].(string)\n\t\t return p1 < p2\n\t }})\n\tfor i := range processes {\n\t\tprocess := processes[i].(map[string]interface{})\n\t\tfmt.Printf(\"%v\\n\", process[\"process\"])\n\t}\n\tos.Exit(0)\n}\n\nfunc versionHelp() {\n\tfmt.Printf(\"Usage: hk version\\n\\n\")\n\tfmt.Printf(\"Show hk client version.\\n\")\n\tos.Exit(0)\n}\n\nfunc version() {\n\tif len(os.Args) != 2 {\n\t unrecArg(os.Args[2])\n }\n\tfmt.Printf(\"%s\\n\", VERSION)\n\tos.Exit(0)\n}\n\nfunc help() {\n\tif len(os.Args) <= 2 {\n\t\tusage()\n\t} else {\n\t\tcmd := os.Args[2]\n\t\tswitch cmd {\n\t case \"env\":\n\t\t envHelp()\n\t case \"get\":\n\t\t\tgetHelp()\n\t\tcase \"list\":\n\t\t listHelp()\n\t case \"ps\":\n\t\t psHelp()\n\t\tcase \"version\":\n\t\t\tversionHelp()\n\t\t}\n\t\tunrecCmd(cmd)\n\t}\n}\n\n\/\/ top-level usage\nfunc usage() {\n\tfmt.Printf(\"Usage: hk <command> [-a <app>] [command-specific-options]\\n\\n\")\n\tfmt.Printf(\"Supported hk commands are:\\n\")\n\tfmt.Printf(\" addons List add-ons\\n\")\n\tfmt.Printf(\" addons-add Add an add-on\\n\")\n\tfmt.Printf(\" addons-open Open an add-on page\\n\")\n\tfmt.Printf(\" addons-remove Remove an add-on \\n\")\n\tfmt.Printf(\" create Create an app\\n\")\n\tfmt.Printf(\" destroy Destroy an app\\n\")\n\tfmt.Printf(\" env List config vars\\n\")\n\tfmt.Printf(\" get Get config var\\n\")\n\tfmt.Printf(\" help Show this help\\n\")\n\tfmt.Printf(\" info Show app info\\n\")\n\tfmt.Printf(\" list List apps\\n\")\n\tfmt.Printf(\" login Log in\\n\")\n\tfmt.Printf(\" logout Log out\\n\")\n\tfmt.Printf(\" logs Show logs\\n\")\n\tfmt.Printf(\" open Open app\\n\")\n\tfmt.Printf(\" pg List databases\\n\")\n\tfmt.Printf(\" pg-info Show database info\\n\")\n\tfmt.Printf(\" pg-promote Promote a database\\n\")\n\tfmt.Printf(\" ps-psql Open a psql database shell\\n\")\n\tfmt.Printf(\" pg-wait Await a database\\n\")\n\tfmt.Printf(\" ps List processes\\n\")\n\tfmt.Printf(\" release Show release info\\n\")\n\tfmt.Printf(\" releases List releases\\n\")\n\tfmt.Printf(\" rename Rename an app\\n\")\n\tfmt.Printf(\" restart Restart processes\\n\")\n\tfmt.Printf(\" rollback Rollback to a previous release\\n\")\n\tfmt.Printf(\" run Run a process\\n\")\n\tfmt.Printf(\" set Set config var\\n\")\n\tfmt.Printf(\" scale Scale processes\\n\")\n\tfmt.Printf(\" run Run a process\\n\")\n\tfmt.Printf(\" stop Stop a process\\n\")\n\tfmt.Printf(\" token Show auth token\\n\")\n\tfmt.Printf(\" unset Unset config vars\\n\")\n\tfmt.Printf(\" version Display version\\n\\n\")\n\tfmt.Printf(\"See 'hk help <command>' for more information on a specific command.\\n\")\n\tos.Exit(0)\n}\n\n\/\/ entry point\nfunc main() {\n\tif len(os.Args) <= 1 {\n\t\tusage()\n\t} else {\n\t\tcmd := os.Args[1]\n\t\tswitch cmd {\n\t\tcase \"env\":\n\t\t\tenv()\n\t\tcase \"get\":\n\t\t\tget()\n\t\tcase \"help\":\n\t\t\thelp()\n\t\tcase \"list\":\n\t\t\tlist()\n\t\tcase \"ps\":\n\t\t\tps()\n\t\tcase \"version\":\n\t\t\tversion()\n\t\t}\n\t\tunrecCmd(cmd)\n\t}\n}\n<commit_msg>more usable ps<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"sort\"\n)\n\nconst (\n\tVERSION = \"0.0.1\"\n)\n\n\/\/ sort interface-typed arrays by first-class functions\ntype ByFn struct{\n\telems []interface{}\n\tcomp func(a, b interface{}) bool\n}\nfunc (c ByFn) Len() int { return len(c.elems) }\nfunc (c ByFn) Less(i, j int) bool { return c.comp(c.elems[i], c.elems[j]) }\nfunc (c ByFn) Swap(i, j int) { c.elems[i], c.elems[j] = c.elems[j], c.elems[i] }\n\n\n\/\/ generic api requests\nfunc apiReq(meth string, url string) interface{} {\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(meth, url, nil)\n\treq.SetBasicAuth(\"x\", os.Getenv(\"HEROKU_API_KEY\"))\n\treq.Header.Add(\"User-Agent\", fmt.Sprintf(\"hk\/%s\", VERSION))\n\treq.Header.Add(\"Accept\", \"application\/json\")\n\tres, err := client.Do(req)\n\t\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif (res.StatusCode == 401) {\n\t\terror(\"Unauthorized\")\n\t}\n\tif (res.StatusCode != 200) {\n\t\terror(\"Unexpected error\")\n\t}\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tvar data interface{}\n\terr = json.Unmarshal(body, &data)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn data\n}\n\n\/\/ error formatting\nfunc error(msg string) {\n\tfmt.Fprintf(os.Stderr, \"Error: %s.\\n\", msg)\n\tos.Exit(1)\n}\n\nfunc unrecArg(arg string) {\n\terror(fmt.Sprintf(\"Unrecognized argument '%s'\", arg))\n}\n\nfunc unrecCmd(cmd string) {\n\terror(fmt.Sprintf(\"'%s' is not an hk command. See 'hk help'\", cmd))\n}\n\n\/\/ commands\nfunc envHelp() {\n\tfmt.Printf(\"Usage: hk env -a <app>\\n\\n\")\n\tfmt.Printf(\"Show all config vars.\")\n\tos.Exit(0)\n}\n\nfunc env() {\n\tif (len(os.Args) != 4) || (os.Args[2] != \"-a\") {\n\t\terror(\"Invalid usage. See hk help env\")\n\t}\n\tappName := os.Args[3]\n\tdata := apiReq(\"GET\", fmt.Sprintf(\"https:\/\/api.heroku.com\/apps\/%s\/config_vars\", appName))\n\tconfig := data.(map[string]interface{})\n\tfor k, v := range config {\n\t\tfmt.Printf(\"%s=%v\\n\", k, v)\n\t}\n\tos.Exit(0)\n}\n\nfunc getHelp() {\n\tfmt.Printf(\"Usage: hk get -a <app> <key>\\n\\n\")\n\tfmt.Printf(\"Get the value of a config var.\\n\")\n\tos.Exit(0)\n}\n\nfunc get() {\n\tif (len(os.Args) != 5) || (os.Args[2] != \"-a\") {\n\t\terror(\"Invalid usage. See hk help get\")\n\t}\n\tappName := os.Args[3]\n\tkey := os.Args[4]\n\tdata := apiReq(\"GET\", fmt.Sprintf(\"https:\/\/api.heroku.com\/apps\/%s\/config_vars\", appName))\n\tconfig := data.(map[string]string)\n\tvalue, found := config[key]\n\tif !found {\n\t\terror(fmt.Sprintf(\"No such key as '%s'\", key))\n\t}\n\tfmt.Println(value)\n\tos.Exit(0)\n}\n\nfunc listHelp() {\n\tfmt.Printf(\"Usage: hk list\\n\\n\")\n\tfmt.Printf(\"List accessible apps.\\n\")\n\tos.Exit(0)\n}\n\nfunc list() {\n\tif len(os.Args) != 2 {\n\t\tunrecArg(os.Args[2])\n\t}\n\tdata := apiReq(\"GET\", \"https:\/\/api.heroku.com\/apps\")\n\tapps := data.([]interface{})\n\tfor i := range apps {\n\t\tapp := apps[i].(map[string]interface{})\n\t\tfmt.Printf(\"%s\\n\", app[\"name\"])\n\t}\n\tos.Exit(0)\n}\n\nfunc psHelp() {\n\tfmt.Printf(\"Usage: hk ps -a <app>\\n\\n\")\n\tfmt.Printf(\"List apps processes.\\n\")\n\tos.Exit(0)\n}\n\nfunc ps() {\n\tif (len(os.Args) != 4) || (os.Args[2] != \"-a\") {\n\t\terror(\"Invalid usage. See hk help ps\")\n\t}\n\tappName := os.Args[3]\n\tdata := apiReq(\"GET\", fmt.Sprintf(\"https:\/\/api.heroku.com\/apps\/%s\/ps\", appName))\n\tprocesses := data.([]interface{})\n\tsort.Sort(ByFn{\n\t\tprocesses,\n\t\tfunc(a, b interface{}) bool {\n\t\t\tp1 := a.(map[string]interface{})[\"process\"].(string)\n\t\t\tp2 := b.(map[string]interface{})[\"process\"].(string)\n\t\t return p1 < p2\n\t }})\n\tfmt.Printf(\"Process State Command\\n\")\n\tfmt.Printf(\"---------------- ---------- ------------------------\\n\")\n\tfor i := range processes {\n\t\tprocess := processes[i].(map[string]interface{})\n\t\tfmt.Printf(\"%-16s %-10s %s\\n\", process[\"process\"], process[\"state\"], process[\"command\"])\n\t}\n\tos.Exit(0)\n}\n\nfunc versionHelp() {\n\tfmt.Printf(\"Usage: hk version\\n\\n\")\n\tfmt.Printf(\"Show hk client version.\\n\")\n\tos.Exit(0)\n}\n\nfunc version() {\n\tif len(os.Args) != 2 {\n\t unrecArg(os.Args[2])\n }\n\tfmt.Printf(\"%s\\n\", VERSION)\n\tos.Exit(0)\n}\n\nfunc help() {\n\tif len(os.Args) <= 2 {\n\t\tusage()\n\t} else {\n\t\tcmd := os.Args[2]\n\t\tswitch cmd {\n\t case \"env\":\n\t\t envHelp()\n\t case \"get\":\n\t\t\tgetHelp()\n\t\tcase \"list\":\n\t\t listHelp()\n\t case \"ps\":\n\t\t psHelp()\n\t\tcase \"version\":\n\t\t\tversionHelp()\n\t\t}\n\t\tunrecCmd(cmd)\n\t}\n}\n\n\/\/ top-level usage\nfunc usage() {\n\tfmt.Printf(\"Usage: hk <command> [-a <app>] [command-specific-options]\\n\\n\")\n\tfmt.Printf(\"Supported hk commands are:\\n\")\n\tfmt.Printf(\" addons List add-ons\\n\")\n\tfmt.Printf(\" addons-add Add an add-on\\n\")\n\tfmt.Printf(\" addons-open Open an add-on page\\n\")\n\tfmt.Printf(\" addons-remove Remove an add-on \\n\")\n\tfmt.Printf(\" create Create an app\\n\")\n\tfmt.Printf(\" destroy Destroy an app\\n\")\n\tfmt.Printf(\" env List config vars\\n\")\n\tfmt.Printf(\" get Get config var\\n\")\n\tfmt.Printf(\" help Show this help\\n\")\n\tfmt.Printf(\" info Show app info\\n\")\n\tfmt.Printf(\" list List apps\\n\")\n\tfmt.Printf(\" login Log in\\n\")\n\tfmt.Printf(\" logout Log out\\n\")\n\tfmt.Printf(\" logs Show logs\\n\")\n\tfmt.Printf(\" open Open app\\n\")\n\tfmt.Printf(\" pg List databases\\n\")\n\tfmt.Printf(\" pg-info Show database info\\n\")\n\tfmt.Printf(\" pg-promote Promote a database\\n\")\n\tfmt.Printf(\" ps-psql Open a psql database shell\\n\")\n\tfmt.Printf(\" pg-wait Await a database\\n\")\n\tfmt.Printf(\" ps List processes\\n\")\n\tfmt.Printf(\" release Show release info\\n\")\n\tfmt.Printf(\" releases List releases\\n\")\n\tfmt.Printf(\" rename Rename an app\\n\")\n\tfmt.Printf(\" restart Restart processes\\n\")\n\tfmt.Printf(\" rollback Rollback to a previous release\\n\")\n\tfmt.Printf(\" run Run a process\\n\")\n\tfmt.Printf(\" set Set config var\\n\")\n\tfmt.Printf(\" scale Scale processes\\n\")\n\tfmt.Printf(\" run Run a process\\n\")\n\tfmt.Printf(\" stop Stop a process\\n\")\n\tfmt.Printf(\" token Show auth token\\n\")\n\tfmt.Printf(\" unset Unset config vars\\n\")\n\tfmt.Printf(\" version Display version\\n\\n\")\n\tfmt.Printf(\"See 'hk help <command>' for more information on a specific command.\\n\")\n\tos.Exit(0)\n}\n\n\/\/ entry point\nfunc main() {\n\tif len(os.Args) <= 1 {\n\t\tusage()\n\t} else {\n\t\tcmd := os.Args[1]\n\t\tswitch cmd {\n\t\tcase \"env\":\n\t\t\tenv()\n\t\tcase \"get\":\n\t\t\tget()\n\t\tcase \"help\":\n\t\t\thelp()\n\t\tcase \"list\":\n\t\t\tlist()\n\t\tcase \"ps\":\n\t\t\tps()\n\t\tcase \"version\":\n\t\t\tversion()\n\t\t}\n\t\tunrecCmd(cmd)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package file\n\nimport (\n\t\"archive\/tar\"\n\t\"context\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/containerd\/continuity\/fs\"\n\t\"github.com\/docker\/docker\/pkg\/archive\"\n\t\"github.com\/docker\/docker\/pkg\/chrootarchive\"\n\tcopy \"github.com\/tonistiigi\/fsutil\/copy\"\n)\n\nfunc unpack(ctx context.Context, srcRoot string, src string, destRoot string, dest string, ch copy.Chowner, tm *time.Time) (bool, error) {\n\tsrc, err := fs.RootPath(srcRoot, src)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif !isArchivePath(src) {\n\t\treturn false, nil\n\t}\n\n\tdest, err = fs.RootPath(destRoot, dest)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif err := copy.MkdirAll(dest, 0755, ch, tm); err != nil {\n\t\treturn false, err\n\t}\n\n\tfile, err := os.Open(src)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer file.Close()\n\n\treturn true, chrootarchive.Untar(file, dest, nil)\n}\n\nfunc isArchivePath(path string) bool {\n\tfi, err := os.Lstat(path)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif fi.Mode()&os.ModeType != 0 {\n\t\treturn false\n\t}\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer file.Close()\n\trdr, err := archive.DecompressStream(file)\n\tif err != nil {\n\t\treturn false\n\t}\n\tr := tar.NewReader(rdr)\n\t_, err = r.Next()\n\treturn err == nil\n}\n<commit_msg>Close readclosers returned by DecompressStream<commit_after>package file\n\nimport (\n\t\"archive\/tar\"\n\t\"context\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/containerd\/continuity\/fs\"\n\t\"github.com\/docker\/docker\/pkg\/archive\"\n\t\"github.com\/docker\/docker\/pkg\/chrootarchive\"\n\tcopy \"github.com\/tonistiigi\/fsutil\/copy\"\n)\n\nfunc unpack(ctx context.Context, srcRoot string, src string, destRoot string, dest string, ch copy.Chowner, tm *time.Time) (bool, error) {\n\tsrc, err := fs.RootPath(srcRoot, src)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif !isArchivePath(src) {\n\t\treturn false, nil\n\t}\n\n\tdest, err = fs.RootPath(destRoot, dest)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif err := copy.MkdirAll(dest, 0755, ch, tm); err != nil {\n\t\treturn false, err\n\t}\n\n\tfile, err := os.Open(src)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer file.Close()\n\n\treturn true, chrootarchive.Untar(file, dest, nil)\n}\n\nfunc isArchivePath(path string) bool {\n\tfi, err := os.Lstat(path)\n\tif err != nil {\n\t\treturn false\n\t}\n\tif fi.Mode()&os.ModeType != 0 {\n\t\treturn false\n\t}\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer file.Close()\n\trdr, err := archive.DecompressStream(file)\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer rdr.Close()\n\tr := tar.NewReader(rdr)\n\t_, err = r.Next()\n\treturn err == nil\n}\n<|endoftext|>"} {"text":"<commit_before>package cc1100\n\nimport (\n\t\"log\"\n)\n\nconst (\n\tverbose = false\n\twriteUsingTransfer = false\n)\n\nfunc (r *Radio) ReadRegister(addr byte) (byte, error) {\n\tbuf := []byte{READ_MODE | addr, 0xFF}\n\terr := r.device.Transfer(buf)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn buf[1], nil\n}\n\nfunc (r *Radio) ReadBurst(addr byte, n int) ([]byte, error) {\n\treg := addr & 0x3F\n\tif 0x30 <= reg && reg <= 0x3D {\n\t\tlog.Panicf(\"burst access for status register %X is not available\\n\", reg)\n\t}\n\tbuf := make([]byte, n+1)\n\tbuf[0] = READ_MODE | BURST_MODE | addr\n\terr := r.device.Transfer(buf)\n\treturn buf[1:], err\n}\n\nfunc (r *Radio) writeData(data []byte) error {\n\tif writeUsingTransfer {\n\t\treturn r.device.Transfer(data)\n\t} else {\n\t\treturn r.device.Write(data)\n\t}\n}\n\nfunc (r *Radio) WriteRegister(addr byte, value byte) error {\n\treturn r.writeData([]byte{addr, value})\n}\n\nfunc (r *Radio) WriteBurst(addr byte, data []byte) error {\n\treturn r.writeData(append([]byte{BURST_MODE | addr}, data...))\n}\n\nfunc (r *Radio) WriteEach(data []byte) error {\n\tn := len(data)\n\tif n%2 != 0 {\n\t\tpanic(\"odd data length\")\n\t}\n\tfor i := 0; i < n; i += 2 {\n\t\terr := r.WriteRegister(data[i], data[i+1])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (r *Radio) Strobe(cmd byte) (byte, error) {\n\tif verbose && cmd != SNOP {\n\t\tlog.Printf(\"issuing %s command\\n\", strobeName(cmd))\n\t}\n\tbuf := []byte{cmd}\n\terr := r.device.Transfer(buf)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn buf[0], nil\n}\n\nfunc (r *Radio) Reset() error {\n\t_, err := r.Strobe(SRES)\n\treturn err\n}\n\nfunc (r *Radio) Version() (uint16, error) {\n\tp, err := r.ReadRegister(PARTNUM)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tv, err := r.ReadRegister(VERSION)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn uint16(p)<<8 | uint16(v), nil\n}\n<commit_msg>Use microsecond-resolution logging in verbose mode<commit_after>package cc1100\n\nimport (\n\t\"log\"\n)\n\nconst (\n\tverbose = false\n\twriteUsingTransfer = false\n)\n\nfunc init() {\n\tif verbose {\n\t\tlog.SetFlags(log.Ltime | log.Lmicroseconds | log.LUTC)\n\t}\n}\n\nfunc (r *Radio) ReadRegister(addr byte) (byte, error) {\n\tbuf := []byte{READ_MODE | addr, 0xFF}\n\terr := r.device.Transfer(buf)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn buf[1], nil\n}\n\nfunc (r *Radio) ReadBurst(addr byte, n int) ([]byte, error) {\n\treg := addr & 0x3F\n\tif 0x30 <= reg && reg <= 0x3D {\n\t\tlog.Panicf(\"burst access for status register %X is not available\\n\", reg)\n\t}\n\tbuf := make([]byte, n+1)\n\tbuf[0] = READ_MODE | BURST_MODE | addr\n\terr := r.device.Transfer(buf)\n\treturn buf[1:], err\n}\n\nfunc (r *Radio) writeData(data []byte) error {\n\tif writeUsingTransfer {\n\t\treturn r.device.Transfer(data)\n\t} else {\n\t\treturn r.device.Write(data)\n\t}\n}\n\nfunc (r *Radio) WriteRegister(addr byte, value byte) error {\n\treturn r.writeData([]byte{addr, value})\n}\n\nfunc (r *Radio) WriteBurst(addr byte, data []byte) error {\n\treturn r.writeData(append([]byte{BURST_MODE | addr}, data...))\n}\n\nfunc (r *Radio) WriteEach(data []byte) error {\n\tn := len(data)\n\tif n%2 != 0 {\n\t\tpanic(\"odd data length\")\n\t}\n\tfor i := 0; i < n; i += 2 {\n\t\terr := r.WriteRegister(data[i], data[i+1])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (r *Radio) Strobe(cmd byte) (byte, error) {\n\tif verbose && cmd != SNOP {\n\t\tlog.Printf(\"issuing %s command\\n\", strobeName(cmd))\n\t}\n\tbuf := []byte{cmd}\n\terr := r.device.Transfer(buf)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn buf[0], nil\n}\n\nfunc (r *Radio) Reset() error {\n\t_, err := r.Strobe(SRES)\n\treturn err\n}\n\nfunc (r *Radio) Version() (uint16, error) {\n\tp, err := r.ReadRegister(PARTNUM)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tv, err := r.ReadRegister(VERSION)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn uint16(p)<<8 | uint16(v), nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2017 Tigera, Inc. All rights reserved.\n\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage intdataplane\n\nimport (\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/projectcalico\/felix\/go\/felix\/ipsets\"\n\t\"github.com\/projectcalico\/felix\/go\/felix\/iptables\"\n\t\"github.com\/projectcalico\/felix\/go\/felix\/proto\"\n\t\"github.com\/projectcalico\/felix\/go\/felix\/rules\"\n\t\"github.com\/projectcalico\/felix\/go\/felix\/set\"\n\t\"reflect\"\n)\n\ntype mockTable struct {\n\tcurrentChains map[string]*iptables.Chain\n\texpectedChains map[string]*iptables.Chain\n}\n\nfunc newMockTable() *mockTable {\n\treturn &mockTable{\n\t\tcurrentChains: map[string]*iptables.Chain{},\n\t\texpectedChains: map[string]*iptables.Chain{},\n\t}\n}\n\nfunc logChains(message string, chains []*iptables.Chain) {\n\tif chains == nil {\n\t\tlog.Debug(message, \" with nil chains\")\n\t} else {\n\t\tlog.WithField(\"chains\", chains).Debug(message)\n\t\tfor _, chain := range chains {\n\t\t\tlog.WithField(\"chain\", *chain).Debug(\"\")\n\t\t}\n\t}\n}\n\nfunc (t *mockTable) UpdateChains(chains []*iptables.Chain) {\n\tlogChains(\"UpdateChains\", chains)\n\tfor _, chain := range chains {\n\t\tt.currentChains[chain.Name] = chain\n\t}\n}\n\nfunc (t *mockTable) RemoveChains(chains []*iptables.Chain) {\n\tlogChains(\"RemoveChains\", chains)\n\tfor _, chain := range chains {\n\t\t_, prs := t.currentChains[chain.Name]\n\t\tExpect(prs).To(BeTrue())\n\t\tdelete(t.currentChains, chain.Name)\n\t}\n}\n\nfunc (t *mockTable) checkChains(expected []*iptables.Chain) {\n\tt.expectedChains = map[string]*iptables.Chain{}\n\tfor _, chain := range expected {\n\t\tt.expectedChains[chain.Name] = chain\n\t}\n\tt.checkChainsSameAsBefore()\n}\n\nfunc (t *mockTable) checkChainsSameAsBefore() {\n\tExpect(reflect.DeepEqual(t.currentChains, t.expectedChains)).To(BeTrue())\n}\n\nvar wlDispatchEmpty = []*iptables.Chain{\n\t&iptables.Chain{\n\t\tName: \"cali-to-wl-dispatch\",\n\t\tRules: []iptables.Rule{\n\t\t\t{\n\t\t\t\tMatch: iptables.Match(),\n\t\t\t\tAction: iptables.DropAction{},\n\t\t\t\tComment: \"Unknown interface\",\n\t\t\t},\n\t\t},\n\t},\n\t&iptables.Chain{\n\t\tName: \"cali-from-wl-dispatch\",\n\t\tRules: []iptables.Rule{\n\t\t\t{\n\t\t\t\tMatch: iptables.Match(),\n\t\t\t\tAction: iptables.DropAction{},\n\t\t\t\tComment: \"Unknown interface\",\n\t\t\t},\n\t\t},\n\t},\n}\n\nvar hostDispatchEmpty = []*iptables.Chain{\n\t&iptables.Chain{\n\t\tName: \"cali-to-host-endpoint\",\n\t\tRules: []iptables.Rule{},\n\t},\n\t&iptables.Chain{\n\t\tName: \"cali-from-host-endpoint\",\n\t\tRules: []iptables.Rule{},\n\t},\n}\n\nfunc hostDispatchForIface(ifaceName string) []*iptables.Chain {\n\treturn []*iptables.Chain{\n\t\t&iptables.Chain{\n\t\t\tName: \"calith-\" + ifaceName,\n\t\t\tRules: []iptables.Rule{\n\t\t\t\t{\n\t\t\t\t\tMatch: iptables.Match(),\n\t\t\t\t\tAction: iptables.JumpAction{Target: \"cali-failsafe-out\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tMatch: iptables.Match(),\n\t\t\t\t\tAction: iptables.ClearMarkAction{Mark: 8},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tMatch: iptables.Match(),\n\t\t\t\t\tAction: iptables.DropAction{},\n\t\t\t\t\tComment: \"Drop if no profiles matched\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t&iptables.Chain{\n\t\t\tName: \"califh-\" + ifaceName,\n\t\t\tRules: []iptables.Rule{\n\t\t\t\t{\n\t\t\t\t\tMatch: iptables.Match(),\n\t\t\t\t\tAction: iptables.JumpAction{Target: \"cali-failsafe-in\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tMatch: iptables.Match(),\n\t\t\t\t\tAction: iptables.ClearMarkAction{Mark: 8},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tMatch: iptables.Match(),\n\t\t\t\t\tAction: iptables.DropAction{},\n\t\t\t\t\tComment: \"Drop if no profiles matched\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t&iptables.Chain{\n\t\t\tName: \"cali-to-host-endpoint\",\n\t\t\tRules: []iptables.Rule{\n\t\t\t\t{\n\t\t\t\t\tMatch: iptables.Match().OutInterface(ifaceName),\n\t\t\t\t\tAction: iptables.GotoAction{Target: \"calith-\" + ifaceName},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t&iptables.Chain{\n\t\t\tName: \"cali-from-host-endpoint\",\n\t\t\tRules: []iptables.Rule{\n\t\t\t\t{\n\t\t\t\t\tMatch: iptables.Match().InInterface(ifaceName),\n\t\t\t\t\tAction: iptables.GotoAction{Target: \"califh-\" + ifaceName},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nvar _ = Describe(\"EndpointManager test\", func() {\n\n\tvar epMgr *endpointManager\n\tvar filterTable *mockTable\n\n\trrConfigNormal := rules.Config{\n\t\tIPIPEnabled: true,\n\t\tIPIPTunnelAddress: nil,\n\t\tIPSetConfigV4: ipsets.NewIPVersionConfig(ipsets.IPFamilyV4, \"cali\", nil, nil),\n\t\tIPSetConfigV6: ipsets.NewIPVersionConfig(ipsets.IPFamilyV6, \"cali\", nil, nil),\n\t\tIptablesMarkAccept: 0x8,\n\t\tIptablesMarkNextTier: 0x10,\n\t}\n\n\tfor ip_version := range []uint8{4, 6} {\n\t\tBeforeEach(func() {\n\t\t\trenderer := rules.NewRenderer(rrConfigNormal)\n\t\t\tfilterTable = newMockTable()\n\t\t\tepMgr = newEndpointManager(\n\t\t\t\tfilterTable,\n\t\t\t\trenderer,\n\t\t\t\tnil,\n\t\t\t\tuint8(ip_version),\n\t\t\t\t[]string{\"cali\"},\n\t\t\t\tnil,\n\t\t\t)\n\t\t})\n\n\t\tIt(\"should be constructable\", func() {\n\t\t\tExpect(epMgr).ToNot(BeNil())\n\t\t})\n\n\t\tIt(\"should process host endpoints\", func() {\n\n\t\t\tlog.Info(\"TEST: Define a host endpoint for a named interface\")\n\t\t\tepMgr.OnUpdate(&proto.HostEndpointUpdate{\n\t\t\t\tId: &proto.HostEndpointID{\n\t\t\t\t\tEndpointId: \"endpoint-id-11\",\n\t\t\t\t},\n\t\t\t\tEndpoint: &proto.HostEndpoint{\n\t\t\t\t\tName: \"eth0\",\n\t\t\t\t\tProfileIds: []string{},\n\t\t\t\t\tTiers: []*proto.TierInfo{},\n\t\t\t\t\tExpectedIpv4Addrs: []string{},\n\t\t\t\t\tExpectedIpv6Addrs: []string{},\n\t\t\t\t},\n\t\t\t})\n\t\t\tepMgr.CompleteDeferredWork()\n\t\t\tfilterTable.checkChains(append(wlDispatchEmpty, hostDispatchEmpty...))\n\n\t\t\tlog.Info(\"TEST: Signal that that interface exists\")\n\t\t\tepMgr.OnUpdate(&ifaceUpdate{\n\t\t\t\tName: \"eth0\",\n\t\t\t\tState: \"up\",\n\t\t\t})\n\t\t\taddrs := set.New()\n\t\t\tepMgr.OnUpdate(&ifaceAddrsUpdate{\n\t\t\t\tName: \"eth0\",\n\t\t\t\tAddrs: addrs,\n\t\t\t})\n\t\t\tepMgr.CompleteDeferredWork()\n\t\t\tfilterTable.checkChains(append(wlDispatchEmpty, hostDispatchForIface(\"eth0\")...))\n\n\t\t\tlog.Info(\"TEST: Add an address to the interface\")\n\t\t\taddrs.Add(\"10.0.240.10\")\n\t\t\tepMgr.OnUpdate(&ifaceAddrsUpdate{\n\t\t\t\tName: \"eth0\",\n\t\t\t\tAddrs: addrs,\n\t\t\t})\n\t\t\tepMgr.CompleteDeferredWork()\n\t\t\tfilterTable.checkChainsSameAsBefore()\n\n\t\t\tlog.Info(\"TEST: Change host endpoint to expect that address instead of a named interface\")\n\t\t\tepMgr.OnUpdate(&proto.HostEndpointUpdate{\n\t\t\t\tId: &proto.HostEndpointID{\n\t\t\t\t\tEndpointId: \"endpoint-id-11\",\n\t\t\t\t},\n\t\t\t\tEndpoint: &proto.HostEndpoint{\n\t\t\t\t\tProfileIds: []string{},\n\t\t\t\t\tTiers: []*proto.TierInfo{},\n\t\t\t\t\tExpectedIpv4Addrs: []string{\"10.0.240.10\"},\n\t\t\t\t\tExpectedIpv6Addrs: []string{},\n\t\t\t\t},\n\t\t\t})\n\t\t\tepMgr.CompleteDeferredWork()\n\t\t\tfilterTable.checkChainsSameAsBefore()\n\n\t\t\tlog.Info(\"TEST: Signal another host endpoint that also matches the IP address\")\n\t\t\tepMgr.OnUpdate(&proto.HostEndpointUpdate{\n\t\t\t\tId: &proto.HostEndpointID{\n\t\t\t\t\tEndpointId: \"other-endpoint-id-55\",\n\t\t\t\t},\n\t\t\t\tEndpoint: &proto.HostEndpoint{\n\t\t\t\t\tProfileIds: []string{},\n\t\t\t\t\tTiers: []*proto.TierInfo{},\n\t\t\t\t\tExpectedIpv4Addrs: []string{\"8.8.8.8\", \"10.0.240.10\"},\n\t\t\t\t\tExpectedIpv6Addrs: []string{},\n\t\t\t\t},\n\t\t\t})\n\t\t\tepMgr.CompleteDeferredWork()\n\t\t\tfilterTable.checkChainsSameAsBefore()\n\n\t\t\tlog.Info(\"TEST: Remove that other host endpoint again\")\n\t\t\tepMgr.OnUpdate(&proto.HostEndpointRemove{\n\t\t\t\tId: &proto.HostEndpointID{\n\t\t\t\t\tEndpointId: \"other-endpoint-id-55\",\n\t\t\t\t},\n\t\t\t})\n\t\t\tepMgr.CompleteDeferredWork()\n\t\t\tfilterTable.checkChainsSameAsBefore()\n\n\t\t\tlog.Info(\"TEST: Change host endpoint to expect a different address\")\n\t\t\tepMgr.OnUpdate(&proto.HostEndpointUpdate{\n\t\t\t\tId: &proto.HostEndpointID{\n\t\t\t\t\tEndpointId: \"endpoint-id-11\",\n\t\t\t\t},\n\t\t\t\tEndpoint: &proto.HostEndpoint{\n\t\t\t\t\tProfileIds: []string{},\n\t\t\t\t\tTiers: []*proto.TierInfo{},\n\t\t\t\t\tExpectedIpv4Addrs: []string{\"10.0.240.11\"},\n\t\t\t\t\tExpectedIpv6Addrs: []string{},\n\t\t\t\t},\n\t\t\t})\n\t\t\tepMgr.CompleteDeferredWork()\n\t\t\tfilterTable.checkChains(append(wlDispatchEmpty, hostDispatchEmpty...))\n\n\t\t\tlog.Info(\"TEST: Change host endpoint to be for an interface that doesn't exist yet\")\n\t\t\tepMgr.OnUpdate(&proto.HostEndpointUpdate{\n\t\t\t\tId: &proto.HostEndpointID{\n\t\t\t\t\tEndpointId: \"endpoint-id-11\",\n\t\t\t\t},\n\t\t\t\tEndpoint: &proto.HostEndpoint{\n\t\t\t\t\tName: \"eth1\",\n\t\t\t\t\tProfileIds: []string{},\n\t\t\t\t\tTiers: []*proto.TierInfo{},\n\t\t\t\t\tExpectedIpv4Addrs: []string{},\n\t\t\t\t\tExpectedIpv6Addrs: []string{},\n\t\t\t\t},\n\t\t\t})\n\t\t\tepMgr.CompleteDeferredWork()\n\t\t\tfilterTable.checkChainsSameAsBefore()\n\n\t\t\tlog.Info(\"TEST: Signal that interface\")\n\t\t\tepMgr.OnUpdate(&ifaceUpdate{\n\t\t\t\tName: \"eth1\",\n\t\t\t\tState: \"up\",\n\t\t\t})\n\t\t\taddrs = set.New()\n\t\t\tepMgr.OnUpdate(&ifaceAddrsUpdate{\n\t\t\t\tName: \"eth1\",\n\t\t\t\tAddrs: addrs,\n\t\t\t})\n\t\t\tepMgr.CompleteDeferredWork()\n\t\t\tfilterTable.checkChains(append(wlDispatchEmpty, hostDispatchForIface(\"eth1\")...))\n\t\t})\n\n\t\tIt(\"should process a workload endpoint update\", func() {\n\t\t})\n\t}\n})\n<commit_msg>Rework host endpoint tests into better ginkgo style<commit_after>\/\/ Copyright (c) 2017 Tigera, Inc. All rights reserved.\n\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage intdataplane\n\nimport (\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/projectcalico\/felix\/go\/felix\/ipsets\"\n\t\"github.com\/projectcalico\/felix\/go\/felix\/iptables\"\n\t\"github.com\/projectcalico\/felix\/go\/felix\/proto\"\n\t\"github.com\/projectcalico\/felix\/go\/felix\/rules\"\n\t\"github.com\/projectcalico\/felix\/go\/felix\/set\"\n\t\"reflect\"\n)\n\ntype mockTable struct {\n\tcurrentChains map[string]*iptables.Chain\n\texpectedChains map[string]*iptables.Chain\n}\n\nfunc newMockTable() *mockTable {\n\treturn &mockTable{\n\t\tcurrentChains: map[string]*iptables.Chain{},\n\t\texpectedChains: map[string]*iptables.Chain{},\n\t}\n}\n\nfunc logChains(message string, chains []*iptables.Chain) {\n\tif chains == nil {\n\t\tlog.Debug(message, \" with nil chains\")\n\t} else {\n\t\tlog.WithField(\"chains\", chains).Debug(message)\n\t\tfor _, chain := range chains {\n\t\t\tlog.WithField(\"chain\", *chain).Debug(\"\")\n\t\t}\n\t}\n}\n\nfunc (t *mockTable) UpdateChains(chains []*iptables.Chain) {\n\tlogChains(\"UpdateChains\", chains)\n\tfor _, chain := range chains {\n\t\tt.currentChains[chain.Name] = chain\n\t}\n}\n\nfunc (t *mockTable) RemoveChains(chains []*iptables.Chain) {\n\tlogChains(\"RemoveChains\", chains)\n\tfor _, chain := range chains {\n\t\t_, prs := t.currentChains[chain.Name]\n\t\tExpect(prs).To(BeTrue())\n\t\tdelete(t.currentChains, chain.Name)\n\t}\n}\n\nfunc (t *mockTable) checkChains(expected []*iptables.Chain) {\n\tt.expectedChains = map[string]*iptables.Chain{}\n\tfor _, chain := range expected {\n\t\tt.expectedChains[chain.Name] = chain\n\t}\n\tt.checkChainsSameAsBefore()\n}\n\nfunc (t *mockTable) checkChainsSameAsBefore() {\n\tExpect(reflect.DeepEqual(t.currentChains, t.expectedChains)).To(BeTrue())\n}\n\nvar wlDispatchEmpty = []*iptables.Chain{\n\t&iptables.Chain{\n\t\tName: \"cali-to-wl-dispatch\",\n\t\tRules: []iptables.Rule{\n\t\t\t{\n\t\t\t\tMatch: iptables.Match(),\n\t\t\t\tAction: iptables.DropAction{},\n\t\t\t\tComment: \"Unknown interface\",\n\t\t\t},\n\t\t},\n\t},\n\t&iptables.Chain{\n\t\tName: \"cali-from-wl-dispatch\",\n\t\tRules: []iptables.Rule{\n\t\t\t{\n\t\t\t\tMatch: iptables.Match(),\n\t\t\t\tAction: iptables.DropAction{},\n\t\t\t\tComment: \"Unknown interface\",\n\t\t\t},\n\t\t},\n\t},\n}\n\nvar hostDispatchEmpty = []*iptables.Chain{\n\t&iptables.Chain{\n\t\tName: \"cali-to-host-endpoint\",\n\t\tRules: []iptables.Rule{},\n\t},\n\t&iptables.Chain{\n\t\tName: \"cali-from-host-endpoint\",\n\t\tRules: []iptables.Rule{},\n\t},\n}\n\nfunc hostDispatchForIface(ifaceName string) []*iptables.Chain {\n\treturn []*iptables.Chain{\n\t\t&iptables.Chain{\n\t\t\tName: \"calith-\" + ifaceName,\n\t\t\tRules: []iptables.Rule{\n\t\t\t\t{\n\t\t\t\t\tMatch: iptables.Match(),\n\t\t\t\t\tAction: iptables.JumpAction{Target: \"cali-failsafe-out\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tMatch: iptables.Match(),\n\t\t\t\t\tAction: iptables.ClearMarkAction{Mark: 8},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tMatch: iptables.Match(),\n\t\t\t\t\tAction: iptables.DropAction{},\n\t\t\t\t\tComment: \"Drop if no profiles matched\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t&iptables.Chain{\n\t\t\tName: \"califh-\" + ifaceName,\n\t\t\tRules: []iptables.Rule{\n\t\t\t\t{\n\t\t\t\t\tMatch: iptables.Match(),\n\t\t\t\t\tAction: iptables.JumpAction{Target: \"cali-failsafe-in\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tMatch: iptables.Match(),\n\t\t\t\t\tAction: iptables.ClearMarkAction{Mark: 8},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tMatch: iptables.Match(),\n\t\t\t\t\tAction: iptables.DropAction{},\n\t\t\t\t\tComment: \"Drop if no profiles matched\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t&iptables.Chain{\n\t\t\tName: \"cali-to-host-endpoint\",\n\t\t\tRules: []iptables.Rule{\n\t\t\t\t{\n\t\t\t\t\tMatch: iptables.Match().OutInterface(ifaceName),\n\t\t\t\t\tAction: iptables.GotoAction{Target: \"calith-\" + ifaceName},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t&iptables.Chain{\n\t\t\tName: \"cali-from-host-endpoint\",\n\t\t\tRules: []iptables.Rule{\n\t\t\t\t{\n\t\t\t\t\tMatch: iptables.Match().InInterface(ifaceName),\n\t\t\t\t\tAction: iptables.GotoAction{Target: \"califh-\" + ifaceName},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nvar _ = FDescribe(\"EndpointManager testing\", func() {\n\tconst (\n\t\tipv4 = \"10.0.240.10\"\n\t\tipv6 = \"2001:db8::10.0.240.10\"\n\t)\n\tvar (\n\t\tepMgr *endpointManager\n\t\tfilterTable *mockTable\n\t\trrConfigNormal rules.Config\n\t\tipVersion int\n\t\teth0Addrs set.Set\n\t\tloAddrs set.Set\n\t)\n\n\tBeforeEach(func() {\n\t\trrConfigNormal = rules.Config{\n\t\t\tIPIPEnabled: true,\n\t\t\tIPIPTunnelAddress: nil,\n\t\t\tIPSetConfigV4: ipsets.NewIPVersionConfig(ipsets.IPFamilyV4, \"cali\", nil, nil),\n\t\t\tIPSetConfigV6: ipsets.NewIPVersionConfig(ipsets.IPFamilyV6, \"cali\", nil, nil),\n\t\t\tIptablesMarkAccept: 0x8,\n\t\t\tIptablesMarkNextTier: 0x10,\n\t\t}\n\t\teth0Addrs = set.New()\n\t\teth0Addrs.Add(ipv4)\n\t\teth0Addrs.Add(ipv6)\n\t\tloAddrs = set.New()\n\t\tloAddrs.Add(\"127.0.1.1\")\n\t\tloAddrs.Add(\"::1\")\n\t})\n\n\tJustBeforeEach(func() {\n\t\trenderer := rules.NewRenderer(rrConfigNormal)\n\t\tfilterTable = newMockTable()\n\t\tepMgr = newEndpointManager(\n\t\t\tfilterTable,\n\t\t\trenderer,\n\t\t\tnil,\n\t\t\tuint8(ipVersion),\n\t\t\t[]string{\"cali\"},\n\t\t\tnil,\n\t\t)\n\t})\n\n\tfor ipVersion = range []uint8{4, 6} {\n\t\tIt(\"should be constructable\", func() {\n\t\t\tExpect(epMgr).ToNot(BeNil())\n\t\t})\n\n\t\tconfigureHostEp := func(id string, name string, ipv4Addrs []string, ipv6Addrs []string) func() {\n\t\t\treturn func() {\n\t\t\t\tepMgr.OnUpdate(&proto.HostEndpointUpdate{\n\t\t\t\t\tId: &proto.HostEndpointID{\n\t\t\t\t\t\tEndpointId: id,\n\t\t\t\t\t},\n\t\t\t\t\tEndpoint: &proto.HostEndpoint{\n\t\t\t\t\t\tName: name,\n\t\t\t\t\t\tProfileIds: []string{},\n\t\t\t\t\t\tTiers: []*proto.TierInfo{},\n\t\t\t\t\t\tExpectedIpv4Addrs: ipv4Addrs,\n\t\t\t\t\t\tExpectedIpv6Addrs: ipv6Addrs,\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\tepMgr.CompleteDeferredWork()\n\t\t\t}\n\t\t}\n\n\t\texpectChainsFor := func(name string) func() {\n\t\t\treturn func() {\n\t\t\t\tfilterTable.checkChains(append(wlDispatchEmpty, hostDispatchForIface(name)...))\n\t\t\t}\n\t\t}\n\n\t\texpectEmptyChains := func() func() {\n\t\t\treturn func() {\n\t\t\t\tfilterTable.checkChains(append(wlDispatchEmpty, hostDispatchEmpty...))\n\t\t\t}\n\t\t}\n\n\t\tremoveHostEp := func(id string) func() {\n\t\t\treturn func() {\n\t\t\t\tepMgr.OnUpdate(&proto.HostEndpointRemove{\n\t\t\t\t\tId: &proto.HostEndpointID{\n\t\t\t\t\t\tEndpointId: id,\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\tepMgr.CompleteDeferredWork()\n\t\t\t}\n\t\t}\n\n\t\tContext(\"with host interfaces eth0, lo\", func() {\n\t\t\tJustBeforeEach(func() {\n\t\t\t\tepMgr.OnUpdate(&ifaceUpdate{\n\t\t\t\t\tName: \"eth0\",\n\t\t\t\t\tState: \"up\",\n\t\t\t\t})\n\t\t\t\tepMgr.OnUpdate(&ifaceAddrsUpdate{\n\t\t\t\t\tName: \"eth0\",\n\t\t\t\t\tAddrs: eth0Addrs,\n\t\t\t\t})\n\t\t\t\tepMgr.OnUpdate(&ifaceUpdate{\n\t\t\t\t\tName: \"lo\",\n\t\t\t\t\tState: \"up\",\n\t\t\t\t})\n\t\t\t\tepMgr.OnUpdate(&ifaceAddrsUpdate{\n\t\t\t\t\tName: \"lo\",\n\t\t\t\t\tAddrs: loAddrs,\n\t\t\t\t})\n\t\t\t\tepMgr.CompleteDeferredWork()\n\t\t\t})\n\n\t\t\tIt(\"should have empty dispatch chains\", func() {\n\t\t\t\tfilterTable.checkChains(append(wlDispatchEmpty, hostDispatchEmpty...))\n\t\t\t})\n\n\t\t\tDescribe(\"with host endpoint matching eth0\", func() {\n\t\t\t\tJustBeforeEach(configureHostEp(\"id1\", \"eth0\", []string{}, []string{}))\n\t\t\t\tIt(\"should have expected chains\", expectChainsFor(\"eth0\"))\n\n\t\t\t\tContext(\"with another host ep that matches the IPv4 address\", func() {\n\t\t\t\t\tJustBeforeEach(configureHostEp(\"id2\", \"\", []string{ipv4}, []string{}))\n\t\t\t\t\tIt(\"should have expected chains\", expectChainsFor(\"eth0\"))\n\n\t\t\t\t\tContext(\"with the first host ep removed\", func() {\n\t\t\t\t\t\tJustBeforeEach(removeHostEp(\"id1\"))\n\t\t\t\t\t\tIt(\"should have expected chains\", expectChainsFor(\"eth0\"))\n\n\t\t\t\t\t\tContext(\"with both host eps removed\", func() {\n\t\t\t\t\t\t\tJustBeforeEach(removeHostEp(\"id2\"))\n\t\t\t\t\t\t\tIt(\"should have empty dispatch chains\", expectEmptyChains())\n\t\t\t\t\t\t})\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tDescribe(\"with host endpoint matching non-existent interface\", func() {\n\t\t\t\tJustBeforeEach(configureHostEp(\"id3\", \"eth1\", []string{}, []string{}))\n\t\t\t\tIt(\"should have empty dispatch chains\", expectEmptyChains())\n\t\t\t})\n\n\t\t\tDescribe(\"with host endpoint matching IPv4 address\", func() {\n\t\t\t\tJustBeforeEach(configureHostEp(\"id4\", \"\", []string{ipv4}, []string{}))\n\t\t\t\tIt(\"should have expected chains\", expectChainsFor(\"eth0\"))\n\t\t\t})\n\n\t\t\tDescribe(\"with host endpoint matching IPv6 address\", func() {\n\t\t\t\tJustBeforeEach(configureHostEp(\"id5\", \"\", []string{}, []string{ipv6}))\n\t\t\t\tIt(\"should have expected chains\", expectChainsFor(\"eth0\"))\n\t\t\t})\n\n\t\t\tDescribe(\"with host endpoint matching IPv4 address and correct interface name\", func() {\n\t\t\t\tJustBeforeEach(configureHostEp(\"id3\", \"eth0\", []string{ipv4}, []string{}))\n\t\t\t\tIt(\"should have expected chains\", expectChainsFor(\"eth0\"))\n\t\t\t})\n\n\t\t\tDescribe(\"with host endpoint matching IPv6 address and correct interface name\", func() {\n\t\t\t\tJustBeforeEach(configureHostEp(\"id3\", \"eth0\", []string{}, []string{ipv6}))\n\t\t\t\tIt(\"should have expected chains\", expectChainsFor(\"eth0\"))\n\t\t\t})\n\n\t\t\tDescribe(\"with host endpoint matching IPv4 address and wrong interface name\", func() {\n\t\t\t\tJustBeforeEach(configureHostEp(\"id3\", \"eth1\", []string{ipv4}, []string{}))\n\t\t\t\tIt(\"should have empty dispatch chains\", expectEmptyChains())\n\t\t\t})\n\n\t\t\tDescribe(\"with host endpoint matching IPv6 address and wrong interface name\", func() {\n\t\t\t\tJustBeforeEach(configureHostEp(\"id3\", \"eth1\", []string{}, []string{ipv6}))\n\t\t\t\tIt(\"should have empty dispatch chains\", expectEmptyChains())\n\t\t\t})\n\n\t\t\tDescribe(\"with host endpoint with unmatched IPv4 address\", func() {\n\t\t\t\tJustBeforeEach(configureHostEp(\"id4\", \"\", []string{\"8.8.8.8\"}, []string{}))\n\t\t\t\tIt(\"should have empty dispatch chains\", expectEmptyChains())\n\t\t\t})\n\n\t\t\tDescribe(\"with host endpoint with unmatched IPv6 address\", func() {\n\t\t\t\tJustBeforeEach(configureHostEp(\"id5\", \"\", []string{}, []string{\"fe08::2\"}))\n\t\t\t\tIt(\"should have empty dispatch chains\", expectEmptyChains())\n\t\t\t})\n\n\t\t})\n\n\t\tContext(\"with host endpoint configured before interface signaled\", func() {\n\t\t\tJustBeforeEach(configureHostEp(\"id3\", \"eth0\", []string{}, []string{}))\n\t\t\tIt(\"should have empty dispatch chains\", expectEmptyChains())\n\n\t\t\tContext(\"with interface signaled\", func() {\n\t\t\t\tJustBeforeEach(func() {\n\t\t\t\t\tepMgr.OnUpdate(&ifaceUpdate{\n\t\t\t\t\t\tName: \"eth0\",\n\t\t\t\t\t\tState: \"up\",\n\t\t\t\t\t})\n\t\t\t\t\tepMgr.OnUpdate(&ifaceAddrsUpdate{\n\t\t\t\t\t\tName: \"eth0\",\n\t\t\t\t\t\tAddrs: eth0Addrs,\n\t\t\t\t\t})\n\t\t\t\t\tepMgr.CompleteDeferredWork()\n\t\t\t\t})\n\t\t\t\tIt(\"should have expected chains\", expectChainsFor(\"eth0\"))\n\t\t\t})\n\t\t})\n\n\t\tIt(\"should process a workload endpoint update\", func() {\n\t\t})\n\t}\n})\n<|endoftext|>"} {"text":"<commit_before>\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\npackage awskms\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"os\"\n\t\/\/ ignore-placeholder1\n\t\/\/ ignore-placeholder2\n\t\"testing\"\n\n\t\"flag\"\n\t\/\/ context is used to cancel outstanding requests\n\t\/\/ TEST_SRCDIR to read the roots.pem\n\t\"github.com\/google\/tink\/go\/aead\"\n\t\"github.com\/google\/tink\/go\/core\/registry\"\n\t\"github.com\/google\/tink\/go\/keyset\"\n\t\/\/ ignore-placeholder3\n\t\"github.com\/google\/tink\/go\/subtle\/random\"\n\t\"github.com\/google\/tink\/go\/tink\"\n)\n\nconst (\n\tkeyURI = \"aws-kms:\/\/arn:aws:kms:us-east-2:235739564943:key\/3ee50705-5a82-4f5b-9753-05c4f473922f\"\n\tprofile = \"tink-user1\"\n)\n\nvar (\n\t\/\/ lint placeholder header, please ignore\n\tcredFile = os.Getenv(\"TEST_SRCDIR\") + \"\/\" + os.Getenv(\"TEST_WORKSPACE\") + \"\/\" + \"testdata\/credentials_aws.csv\"\n\t\/\/ lint placeholder footer, please ignore\n)\n\n\/\/ lint placeholder header, please ignore\nfunc init() {\n\tcertPath := os.Getenv(\"TEST_SRCDIR\") + \"\/\" + os.Getenv(\"TEST_WORKSPACE\") + \"\/\" + \"roots.pem\"\n\tflag.Set(\"cacerts\", certPath)\n\tos.Setenv(\"SSL_CERT_FILE\", certPath)\n}\n\n\/\/ lint placeholder footer, please ignore\n\nfunc setupKMS(t *testing.T) {\n\tt.Helper()\n\tg, err := NewAWSClient(keyURI)\n\tif err != nil {\n\t\tt.Fatalf(\"error setting up aws client: %v\", err)\n\t}\n\t_, err = g.LoadCredentials(credFile)\n\tif err != nil {\n\t\tt.Fatalf(\"error loading credentials : %v\", err)\n\t}\n\tregistry.RegisterKMSClient(g)\n}\n\nfunc basicAEADTest(t *testing.T, a tink.AEAD) error {\n\tt.Helper()\n\tfor i := 0; i < 100; i++ {\n\t\tpt := random.GetRandomBytes(20)\n\t\tad := random.GetRandomBytes(20)\n\t\tct, err := a.Encrypt(pt, ad)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdt, err := a.Decrypt(ct, ad)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !bytes.Equal(dt, pt) {\n\t\t\treturn errors.New(\"decrypt not inverse of encrypt\")\n\t\t}\n\t}\n\treturn nil\n}\nfunc TestBasicAead(t *testing.T) {\n\tsetupKMS(t)\n\t\/\/ ignore-placeholder4\n\tdek := aead.AES128CTRHMACSHA256KeyTemplate()\n\tkh, err := keyset.NewHandle(aead.KMSEnvelopeAEADKeyTemplate(keyURI, dek))\n\tif err != nil {\n\t\tt.Fatalf(\"error getting a new keyset handle: %v\", err)\n\t}\n\ta, err := awsaead(kh)\n\tif err != nil {\n\t\tt.Fatalf(\"error getting the primitive: %v\", err)\n\t}\n\tif err := basicAEADTest(t, a); err != nil {\n\t\tt.Errorf(\"error in basic aead tests: %v\", err)\n\t}\n}\n\n\/\/ ignore-placeholder5\n<commit_msg>add test for aws kms with no additional data<commit_after>\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\npackage awskms\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"os\"\n\t\/\/ ignore-placeholder1\n\t\/\/ ignore-placeholder2\n\t\"testing\"\n\n\t\"flag\"\n\t\/\/ context is used to cancel outstanding requests\n\t\/\/ TEST_SRCDIR to read the roots.pem\n\t\"github.com\/google\/tink\/go\/aead\"\n\t\"github.com\/google\/tink\/go\/core\/registry\"\n\t\"github.com\/google\/tink\/go\/keyset\"\n\t\/\/ ignore-placeholder3\n\t\"github.com\/google\/tink\/go\/subtle\/random\"\n\t\"github.com\/google\/tink\/go\/tink\"\n)\n\nconst (\n\tkeyURI = \"aws-kms:\/\/arn:aws:kms:us-east-2:235739564943:key\/3ee50705-5a82-4f5b-9753-05c4f473922f\"\n\tprofile = \"tink-user1\"\n)\n\nvar (\n\t\/\/ lint placeholder header, please ignore\n\tcredFile = os.Getenv(\"TEST_SRCDIR\") + \"\/\" + os.Getenv(\"TEST_WORKSPACE\") + \"\/\" + \"testdata\/credentials_aws.csv\"\n\t\/\/ lint placeholder footer, please ignore\n)\n\n\/\/ lint placeholder header, please ignore\nfunc init() {\n\tcertPath := os.Getenv(\"TEST_SRCDIR\") + \"\/\" + os.Getenv(\"TEST_WORKSPACE\") + \"\/\" + \"roots.pem\"\n\tflag.Set(\"cacerts\", certPath)\n\tos.Setenv(\"SSL_CERT_FILE\", certPath)\n}\n\n\/\/ lint placeholder footer, please ignore\n\nfunc setupKMS(t *testing.T) {\n\tt.Helper()\n\tg, err := NewAWSClient(keyURI)\n\tif err != nil {\n\t\tt.Fatalf(\"error setting up aws client: %v\", err)\n\t}\n\t_, err = g.LoadCredentials(credFile)\n\tif err != nil {\n\t\tt.Fatalf(\"error loading credentials : %v\", err)\n\t}\n\tregistry.RegisterKMSClient(g)\n}\n\nfunc basicAEADTest(t *testing.T, a tink.AEAD) error {\n\tt.Helper()\n\tfor i := 0; i < 100; i++ {\n\t\tpt := random.GetRandomBytes(20)\n\t\tad := random.GetRandomBytes(20)\n\t\tct, err := a.Encrypt(pt, ad)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdt, err := a.Decrypt(ct, ad)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !bytes.Equal(dt, pt) {\n\t\t\treturn errors.New(\"decrypt not inverse of encrypt\")\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc TestBasicAead(t *testing.T) {\n\tsetupKMS(t)\n\t\/\/ ignore-placeholder4\n\tdek := aead.AES128CTRHMACSHA256KeyTemplate()\n\tkh, err := keyset.NewHandle(aead.KMSEnvelopeAEADKeyTemplate(keyURI, dek))\n\tif err != nil {\n\t\tt.Fatalf(\"error getting a new keyset handle: %v\", err)\n\t}\n\ta, err := awsaead(kh)\n\tif err != nil {\n\t\tt.Fatalf(\"error getting the primitive: %v\", err)\n\t}\n\tif err := basicAEADTest(t, a); err != nil {\n\t\tt.Errorf(\"error in basic aead tests: %v\", err)\n\t}\n}\n\nfunc TestBasicAeadWithoutAdditionalData(t *testing.T) {\n\tsetupKMS(t)\n\t\/\/ ignore-placeholder4\n\tdek := aead.AES128CTRHMACSHA256KeyTemplate()\n\tkh, err := keyset.NewHandle(aead.KMSEnvelopeAEADKeyTemplate(keyURI, dek))\n\tif err != nil {\n\t\tt.Fatalf(\"error getting a new keyset handle: %v\", err)\n\t}\n\ta, err := awsaead(kh)\n\tif err != nil {\n\t\tt.Fatalf(\"error getting the primitive: %v\", err)\n\t}\n\tfor i := 0; i < 100; i++ {\n\t\tpt := random.GetRandomBytes(20)\n\t\tct, err := a.Encrypt(pt, nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"error encrypting data: %v\", err)\n\t\t}\n\t\tdt, err := a.Decrypt(ct, nil)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"erorr decrypting data: %v\", err)\n\t\t}\n\t\tif !bytes.Equal(dt, pt) {\n\t\t\tt.Fatalf(\"decrypt not inverse of encrypt\")\n\t\t}\n\t}\n}\n\n\/\/ ignore-placeholder5\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2018 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage srvtopo\n\nimport (\n\t\"fmt\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\ttopodatapb \"vitess.io\/vitess\/go\/vt\/proto\/topodata\"\n\tvschemapb \"vitess.io\/vitess\/go\/vt\/proto\/vschema\"\n\t\"vitess.io\/vitess\/go\/vt\/topo\"\n)\n\nvar (\n\t\/\/ ErrNilUnderlyingServer is returned when attempting to create a new keyspace\n\t\/\/ filtering server if a nil underlying server implementation is provided.\n\tErrNilUnderlyingServer = fmt.Errorf(\"Unable to construct filtering server without an underlying server\")\n\n\t\/\/ ErrTopoServerNotAvailable is returned if a caller tries to access the\n\t\/\/ topo.Server supporting this srvtopo.Server.\n\tErrTopoServerNotAvailable = fmt.Errorf(\"Cannot access underlying topology server when keyspace filtering is enabled\")\n)\n\n\/\/ NewKeyspaceFilteringServer constructs a new server based on the provided\n\/\/ implementation that prevents the specified keyspaces from being exposed\n\/\/ to consumers of the new Server.\nfunc NewKeyspaceFilteringServer(underlying Server, selectedKeyspaces []string) (Server, error) {\n\tif underlying == nil {\n\t\treturn nil, ErrNilUnderlyingServer\n\t}\n\n\tkeyspaces := map[string]bool{}\n\tfor _, ks := range selectedKeyspaces {\n\t\tkeyspaces[ks] = true\n\t}\n\n\treturn keyspaceFilteringServer{\n\t\tserver: underlying,\n\t\tselectKeyspaces: keyspaces,\n\t}, nil\n}\n\ntype keyspaceFilteringServer struct {\n\tserver Server\n\tselectKeyspaces map[string]bool\n}\n\nfunc (ksf keyspaceFilteringServer) GetTopoServer() (*topo.Server, error) {\n\treturn nil, ErrTopoServerNotAvailable\n}\n\nfunc (ksf keyspaceFilteringServer) GetSrvKeyspaceNames(\n\tctx context.Context,\n\tcell string,\n) ([]string, error) {\n\tkeyspaces, err := ksf.server.GetSrvKeyspaceNames(ctx, cell)\n\tret := make([]string, 0, len(keyspaces))\n\tfor _, ks := range keyspaces {\n\t\tif ksf.selectKeyspaces[ks] {\n\t\t\tret = append(ret, ks)\n\t\t}\n\t}\n\treturn ret, err\n}\n\nfunc (ksf keyspaceFilteringServer) GetSrvKeyspace(\n\tctx context.Context,\n\tcell,\n\tkeyspace string,\n) (*topodatapb.SrvKeyspace, error) {\n\tif !ksf.selectKeyspaces[keyspace] {\n\t\treturn nil, topo.NewError(topo.NoNode, keyspace)\n\t}\n\n\treturn ksf.server.GetSrvKeyspace(ctx, cell, keyspace)\n}\n\nfunc (ksf keyspaceFilteringServer) WatchSrvVSchema(\n\tctx context.Context,\n\tcell string,\n\tcallback func(*vschemapb.SrvVSchema, error),\n) {\n\tfilteringCallback := func(schema *vschemapb.SrvVSchema, err error) {\n\t\tif schema != nil {\n\t\t\tfor ks := range schema.Keyspaces {\n\t\t\t\tif !ksf.selectKeyspaces[ks] {\n\t\t\t\t\tdelete(schema.Keyspaces, ks)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tcallback(schema, err)\n\t}\n\n\tksf.server.WatchSrvVSchema(ctx, cell, filteringCallback)\n}\n<commit_msg>Document the error behavior<commit_after>\/*\nCopyright 2018 The Vitess Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage srvtopo\n\nimport (\n\t\"fmt\"\n\n\t\"golang.org\/x\/net\/context\"\n\n\ttopodatapb \"vitess.io\/vitess\/go\/vt\/proto\/topodata\"\n\tvschemapb \"vitess.io\/vitess\/go\/vt\/proto\/vschema\"\n\t\"vitess.io\/vitess\/go\/vt\/topo\"\n)\n\nvar (\n\t\/\/ ErrNilUnderlyingServer is returned when attempting to create a new keyspace\n\t\/\/ filtering server if a nil underlying server implementation is provided.\n\tErrNilUnderlyingServer = fmt.Errorf(\"Unable to construct filtering server without an underlying server\")\n\n\t\/\/ ErrTopoServerNotAvailable is returned if a caller tries to access the\n\t\/\/ topo.Server supporting this srvtopo.Server.\n\tErrTopoServerNotAvailable = fmt.Errorf(\"Cannot access underlying topology server when keyspace filtering is enabled\")\n)\n\n\/\/ NewKeyspaceFilteringServer constructs a new server based on the provided\n\/\/ implementation that prevents the specified keyspaces from being exposed\n\/\/ to consumers of the new Server.\n\/\/\n\/\/ A filtering server will not allow access to the topo.Server to prevent\n\/\/ updates that may corrupt the global VSchema keyspace.\nfunc NewKeyspaceFilteringServer(underlying Server, selectedKeyspaces []string) (Server, error) {\n\tif underlying == nil {\n\t\treturn nil, ErrNilUnderlyingServer\n\t}\n\n\tkeyspaces := map[string]bool{}\n\tfor _, ks := range selectedKeyspaces {\n\t\tkeyspaces[ks] = true\n\t}\n\n\treturn keyspaceFilteringServer{\n\t\tserver: underlying,\n\t\tselectKeyspaces: keyspaces,\n\t}, nil\n}\n\ntype keyspaceFilteringServer struct {\n\tserver Server\n\tselectKeyspaces map[string]bool\n}\n\n\/\/ GetTopoServer returns an error; filtering srvtopo.Server consumers may not\n\/\/ access the underlying topo.Server.\nfunc (ksf keyspaceFilteringServer) GetTopoServer() (*topo.Server, error) {\n\treturn nil, ErrTopoServerNotAvailable\n}\n\nfunc (ksf keyspaceFilteringServer) GetSrvKeyspaceNames(\n\tctx context.Context,\n\tcell string,\n) ([]string, error) {\n\tkeyspaces, err := ksf.server.GetSrvKeyspaceNames(ctx, cell)\n\tret := make([]string, 0, len(keyspaces))\n\tfor _, ks := range keyspaces {\n\t\tif ksf.selectKeyspaces[ks] {\n\t\t\tret = append(ret, ks)\n\t\t}\n\t}\n\treturn ret, err\n}\n\nfunc (ksf keyspaceFilteringServer) GetSrvKeyspace(\n\tctx context.Context,\n\tcell,\n\tkeyspace string,\n) (*topodatapb.SrvKeyspace, error) {\n\tif !ksf.selectKeyspaces[keyspace] {\n\t\treturn nil, topo.NewError(topo.NoNode, keyspace)\n\t}\n\n\treturn ksf.server.GetSrvKeyspace(ctx, cell, keyspace)\n}\n\nfunc (ksf keyspaceFilteringServer) WatchSrvVSchema(\n\tctx context.Context,\n\tcell string,\n\tcallback func(*vschemapb.SrvVSchema, error),\n) {\n\tfilteringCallback := func(schema *vschemapb.SrvVSchema, err error) {\n\t\tif schema != nil {\n\t\t\tfor ks := range schema.Keyspaces {\n\t\t\t\tif !ksf.selectKeyspaces[ks] {\n\t\t\t\t\tdelete(schema.Keyspaces, ks)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tcallback(schema, err)\n\t}\n\n\tksf.server.WatchSrvVSchema(ctx, cell, filteringCallback)\n}\n<|endoftext|>"} {"text":"<commit_before>package manifest\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/mitchellh\/packer\/common\"\n\t\"github.com\/mitchellh\/packer\/helper\/config\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"github.com\/mitchellh\/packer\/template\/interpolate\"\n)\n\ntype Config struct {\n\tcommon.PackerConfig `mapstructure:\",squash\"`\n\n\tFilename string `mapstructure:\"filename\"`\n\n\tctx interpolate.Context\n}\n\ntype PostProcessor struct {\n\tconfig Config\n}\n\ntype ManifestFile struct {\n\tBuilds []Artifact `json:\"builds\"`\n\tLastRunUUID string `json:\"last_run_uuid\"`\n}\n\nfunc (p *PostProcessor) Configure(raws ...interface{}) error {\n\terr := config.Decode(&p.config, &config.DecodeOpts{\n\t\tInterpolate: true,\n\t\tInterpolateContext: &p.config.ctx,\n\t\tInterpolateFilter: &interpolate.RenderFilter{\n\t\t\tExclude: []string{},\n\t\t},\n\t}, raws...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif p.config.Filename == \"\" {\n\t\tp.config.Filename = \"packer-manifest.json\"\n\t}\n\n\treturn nil\n}\n\nfunc (p *PostProcessor) PostProcess(ui packer.Ui, source packer.Artifact) (packer.Artifact, bool, error) {\n\tartifact := &Artifact{}\n\n\tvar err error\n\n\t\/\/ Create the current artifact.\n\tartifact.ArtifactFiles = source.Files()\n\tartifact.ArtifactId = source.Id()\n\tartifact.BuilderType = p.config.PackerBuilderType\n\tartifact.BuildName = p.config.PackerBuildName\n\tartifact.BuildTime = time.Now().Unix()\n\t\/\/ Since each post-processor runs in a different process we need a way to\n\t\/\/ coordinate between various post-processors in a single packer run. We do\n\t\/\/ this by setting a UUID per run and tracking this in the manifest file.\n\t\/\/ When we detect that the UUID in the file is the same, we know that we are\n\t\/\/ part of the same run and we simply add our data to the list. If the UUID\n\t\/\/ is different we will check the -force flag and decide whether to truncate\n\t\/\/ the file before we proceed.\n\tartifact.PackerRunUUID = os.Getenv(\"PACKER_RUN_UUID\")\n\n\t\/\/ Create a lock file with exclusive access. If this fails we will retry\n\t\/\/ after a delay.\n\tlockFilename := p.config.Filename + \".lock\"\n\tfor i := 0; i < 3; i++ {\n\t\t\/\/ The file should not be locked for very long so we'll keep this short.\n\t\ttime.Sleep((time.Duration(i) * 200 * time.Millisecond))\n\t\t_, err = os.OpenFile(lockFilename, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\tlog.Printf(\"Error locking manifest file for reading and writing. Will sleep and retry. %s\", err)\n\t}\n\tdefer os.Remove(lockFilename)\n\n\t\/\/ TODO fix error on first run:\n\t\/\/ * Post-processor failed: open packer-manifest.json: no such file or directory\n\t\/\/\n\t\/\/ Read the current manifest file from disk\n\tcontents := []byte{}\n\tif contents, err = ioutil.ReadFile(p.config.Filename); err != nil && !os.IsNotExist(err) {\n\t\treturn source, true, fmt.Errorf(\"Unable to open %s for reading: %s\", p.config.Filename, err)\n\t}\n\n\t\/\/ Parse the manifest file JSON, if we have one\n\tmanifestFile := &ManifestFile{}\n\tif len(contents) > 0 {\n\t\tif err = json.Unmarshal(contents, manifestFile); err != nil {\n\t\t\treturn source, true, fmt.Errorf(\"Unable to parse content from %s: %s\", p.config.Filename, err)\n\t\t}\n\t}\n\n\t\/\/ If -force is set and we are not on same run, truncate the file. Otherwise\n\t\/\/ we will continue to add new builds to the existing manifest file.\n\tif p.config.PackerForce && os.Getenv(\"PACKER_RUN_UUID\") != manifestFile.LastRunUUID {\n\t\tmanifestFile = &ManifestFile{}\n\t}\n\n\t\/\/ Add the current artifact to the manifest file\n\tmanifestFile.Builds = append(manifestFile.Builds, *artifact)\n\tmanifestFile.LastRunUUID = os.Getenv(\"PACKER_RUN_UUID\")\n\n\t\/\/ Write JSON to disk\n\tif out, err := json.MarshalIndent(manifestFile, \"\", \" \"); err == nil {\n\t\tif err = ioutil.WriteFile(p.config.Filename, out, 0664); err != nil {\n\t\t\treturn source, true, fmt.Errorf(\"Unable to write %s: %s\", p.config.Filename, err)\n\t\t}\n\t} else {\n\t\treturn source, true, fmt.Errorf(\"Unable to marshal JSON %s\", err)\n\t}\n\n\treturn source, true, err\n}\n<commit_msg>Fix build failure when there is no packer-manifest.json file<commit_after>package manifest\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com\/mitchellh\/packer\/common\"\n\t\"github.com\/mitchellh\/packer\/helper\/config\"\n\t\"github.com\/mitchellh\/packer\/packer\"\n\t\"github.com\/mitchellh\/packer\/template\/interpolate\"\n)\n\ntype Config struct {\n\tcommon.PackerConfig `mapstructure:\",squash\"`\n\n\tFilename string `mapstructure:\"filename\"`\n\n\tctx interpolate.Context\n}\n\ntype PostProcessor struct {\n\tconfig Config\n}\n\ntype ManifestFile struct {\n\tBuilds []Artifact `json:\"builds\"`\n\tLastRunUUID string `json:\"last_run_uuid\"`\n}\n\nfunc (p *PostProcessor) Configure(raws ...interface{}) error {\n\terr := config.Decode(&p.config, &config.DecodeOpts{\n\t\tInterpolate: true,\n\t\tInterpolateContext: &p.config.ctx,\n\t\tInterpolateFilter: &interpolate.RenderFilter{\n\t\t\tExclude: []string{},\n\t\t},\n\t}, raws...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif p.config.Filename == \"\" {\n\t\tp.config.Filename = \"packer-manifest.json\"\n\t}\n\n\treturn nil\n}\n\nfunc (p *PostProcessor) PostProcess(ui packer.Ui, source packer.Artifact) (packer.Artifact, bool, error) {\n\tartifact := &Artifact{}\n\n\tvar err error\n\n\t\/\/ Create the current artifact.\n\tartifact.ArtifactFiles = source.Files()\n\tartifact.ArtifactId = source.Id()\n\tartifact.BuilderType = p.config.PackerBuilderType\n\tartifact.BuildName = p.config.PackerBuildName\n\tartifact.BuildTime = time.Now().Unix()\n\t\/\/ Since each post-processor runs in a different process we need a way to\n\t\/\/ coordinate between various post-processors in a single packer run. We do\n\t\/\/ this by setting a UUID per run and tracking this in the manifest file.\n\t\/\/ When we detect that the UUID in the file is the same, we know that we are\n\t\/\/ part of the same run and we simply add our data to the list. If the UUID\n\t\/\/ is different we will check the -force flag and decide whether to truncate\n\t\/\/ the file before we proceed.\n\tartifact.PackerRunUUID = os.Getenv(\"PACKER_RUN_UUID\")\n\n\t\/\/ Create a lock file with exclusive access. If this fails we will retry\n\t\/\/ after a delay.\n\tlockFilename := p.config.Filename + \".lock\"\n\tfor i := 0; i < 3; i++ {\n\t\t\/\/ The file should not be locked for very long so we'll keep this short.\n\t\ttime.Sleep((time.Duration(i) * 200 * time.Millisecond))\n\t\t_, err = os.OpenFile(lockFilename, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\tlog.Printf(\"Error locking manifest file for reading and writing. Will sleep and retry. %s\", err)\n\t}\n\tdefer os.Remove(lockFilename)\n\n\t\/\/ TODO fix error on first run:\n\t\/\/ * Post-processor failed: open packer-manifest.json: no such file or directory\n\t\/\/\n\t\/\/ Read the current manifest file from disk\n\tcontents := []byte{}\n\tif contents, err = ioutil.ReadFile(p.config.Filename); err != nil && !os.IsNotExist(err) {\n\t\treturn source, true, fmt.Errorf(\"Unable to open %s for reading: %s\", p.config.Filename, err)\n\t}\n\n\t\/\/ Parse the manifest file JSON, if we have one\n\tmanifestFile := &ManifestFile{}\n\tif len(contents) > 0 {\n\t\tif err = json.Unmarshal(contents, manifestFile); err != nil {\n\t\t\treturn source, true, fmt.Errorf(\"Unable to parse content from %s: %s\", p.config.Filename, err)\n\t\t}\n\t}\n\n\t\/\/ If -force is set and we are not on same run, truncate the file. Otherwise\n\t\/\/ we will continue to add new builds to the existing manifest file.\n\tif p.config.PackerForce && os.Getenv(\"PACKER_RUN_UUID\") != manifestFile.LastRunUUID {\n\t\tmanifestFile = &ManifestFile{}\n\t}\n\n\t\/\/ Add the current artifact to the manifest file\n\tmanifestFile.Builds = append(manifestFile.Builds, *artifact)\n\tmanifestFile.LastRunUUID = os.Getenv(\"PACKER_RUN_UUID\")\n\n\t\/\/ Write JSON to disk\n\tif out, err := json.MarshalIndent(manifestFile, \"\", \" \"); err == nil {\n\t\tif err = ioutil.WriteFile(p.config.Filename, out, 0664); err != nil {\n\t\t\treturn source, true, fmt.Errorf(\"Unable to write %s: %s\", p.config.Filename, err)\n\t\t}\n\t} else {\n\t\treturn source, true, fmt.Errorf(\"Unable to marshal JSON %s\", err)\n\t}\n\n\treturn source, true, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package lib\n\nimport (\n\t\"bazil.org\/fuse\"\n\t\"bazil.org\/fuse\/fs\"\n\tdrive \"code.google.com\/p\/google-api-go-client\/drive\/v2\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n)\n\n\/\/ DriveDir represents a directory in google drive\ntype DriveDir struct {\n\tDir *drive.File\n\tModified time.Time\n\tCreated time.Time\n\tRoot bool\n}\n\n\/\/ Attr returns the file attributes\nfunc (DriveDir) Attr() fuse.Attr {\n\treturn fuse.Attr{\n\t\tMode: os.ModeDir | 0555,\n\t}\n}\n\n\/\/ Create creates an empty file inside of d\nfunc (d *DriveDir) Create(req *fuse.CreateRequest, res *fuse.CreateResponse, intr fs.Intr) (fs.Node, fs.Handle, fuse.Error) {\n\tnewFile := &drive.File{}\n\tnewFile.Title = req.Name\n\tp := &drive.ParentReference{Id: d.Dir.Id}\n\tnewFile.Parents = []*drive.ParentReference{p}\n\t\/\/ create temporary file to serve as the cache until the data is uploaded\n\tpath := \"\/tmp\/drivefs-\" + req.Name\n\ttmpFile, err := os.Create(path)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, nil, err\n\t}\n\tcreatedFile, err1 := service.Files.Insert(newFile).Media(tmpFile).Do()\n\tif err1 != nil {\n\t\tlog.Println(err1)\n\t\treturn nil, nil, err1\n\t}\n\t\/\/ update d's child index\n\n\tf := &DriveFile{File: createdFile, Root: false, TmpFile: tmpFile}\n\t\/\/ add the new file to the cach\/index\n\tnameToFile[f.File.Title] = f\n\tidToFile[f.File.Id] = f\n\tfileIndex[f.File.Id] = f.File\n\n\treturn f, f, nil\n}\n\n\/\/ Lookup scans the current directory for matching files or directories\nfunc (d *DriveDir) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {\n\t\/\/ Lookup dir by name\n\tif dir, ok := nameToDir[name]; ok {\n\t\treturn dir, nil\n\t}\n\n\t\/\/ Lookup file by name\n\tif file, ok := nameToFile[name]; ok {\n\t\treturn file, nil\n\t}\n\t\/\/ This comes up as the node id for first access, so just show the root folder\n\tif name == \".xdg-volume-info\" {\n\t\tif dir, ok := nameToDir[\"root\"]; ok {\n\t\t\treturn dir, nil\n\t\t}\n\t}\n\t\/\/ File not found\n\treturn nil, fuse.ENOENT\n}\n\n\/\/ ReadDir return a slice of directory entries\nfunc (d *DriveDir) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) {\n\tdirChan := make(chan *[]fuse.Dirent)\n\tgo func() {\n\t\t\/\/ List of directories to return\n\t\tvar dirs []fuse.Dirent\n\t\t\/\/ get all new list of files\n\t\tf, err := service.Files.List().Do()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t\tfileList := f.Items\n\t\t\/\/ Populate idToFile with new ids\n\t\tfor i := range fileList {\n\t\t\tidToFile[fileList[i].Id] = &DriveFile{File: fileList[i], Root: false}\n\t\t}\n\t\t\/\/ get list of children\n\t\t\/\/ If d is at root, fetch the root children, else fetch this file's children\n\t\tvar c *drive.ChildList\n\t\tif d.Root {\n\t\t\tc = childIndex[\"root\"]\n\t\t} else {\n\t\t\tc = childIndex[d.Dir.Id]\n\t\t}\n\n\t\t\/\/ Get children of this folder\n\t\tchildren := c.Items\n\n\t\tdirs = make([]fuse.Dirent, len(children))\n\n\t\t\/\/ populate dirs with children\n\t\tfor i := range children {\n\t\t\t\/\/ pull out a child temporarally\n\t\t\ttmp := idToFile[children[i].Id]\n\t\t\t\/\/ If child is a folder\/directory create a DirveDir else create a DriveFile\n\t\t\tif strings.Contains(tmp.File.MimeType, \"folder\") {\n\t\t\t\tdirs[i] = fuse.Dirent{\n\t\t\t\t\tName: tmp.File.Title,\n\t\t\t\t\tType: fuse.DT_Dir,\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tdirs[i] = fuse.Dirent{\n\t\t\t\t\tName: tmp.File.Title,\n\t\t\t\t\tType: fuse.DT_File,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tdirChan <- &dirs\n\t}()\n\t\/\/ Wait for the lookups to be done, or die if interupt happens\n\tselect {\n\tcase tmp := <-dirChan:\n\t\treturn *tmp, nil\n\tcase <-intr:\n\t\treturn nil, fuse.EINTR\n\t}\n\n}\n\n\/\/ Mkdir registers a new directory\nfunc (d *DriveDir) Mkdir(req *fuse.MkdirRequest, intr fs.Intr) (fs.Node, fuse.Error) {\n\tf := &drive.File{Title: req.Name, MimeType: \"application\/vnd.google-apps.folder\"}\n\tnewDir, err := service.Files.Insert(f).Do()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, fuse.Errno(syscall.EROFS)\n\t}\n\treturn DriveDir{Dir: newDir, Root: false}, nil\n}\n\n\/\/ Remove deletes a fild or folder from google drive\nfunc (d *DriveDir) Remove(req *fuse.RemoveRequest, intr fs.Intr) fuse.Error {\n\tif file, ok := nameToFile[req.Name]; ok {\n\t\terr := service.Files.Delete(file.File.Id).Do()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t\treturn err\n\t}\n\treturn fuse.ENODATA\n}\n\n\/\/ Rename a file in d\nfunc (d *DriveDir) Rename(req *fuse.RenameRequest, node fs.Node, intr fs.Intr) fuse.Error {\n\t\/\/ copy the file on google drive to the new name\n\t_, err := service.Files.Copy(nameToFile[req.OldName].File.Id, &drive.File{Title: req.NewName}).Do()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\terr = service.Files.Delete(nameToFile[req.OldName].File.Id).Do()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\n\tgo refreshAll()\n\treturn nil\n}\n<commit_msg>added stub Fsync and better error handling<commit_after>package lib\n\nimport (\n\t\"bazil.org\/fuse\"\n\t\"bazil.org\/fuse\/fs\"\n\tdrive \"code.google.com\/p\/google-api-go-client\/drive\/v2\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n)\n\n\/\/ DriveDir represents a directory in google drive\ntype DriveDir struct {\n\tDir *drive.File\n\tModified time.Time\n\tCreated time.Time\n\tRoot bool\n}\n\n\/\/ Attr returns the file attributes\nfunc (DriveDir) Attr() fuse.Attr {\n\treturn fuse.Attr{\n\t\tMode: os.ModeDir | 0555,\n\t}\n}\n\n\/\/ Create creates an empty file inside of d\nfunc (d *DriveDir) Create(req *fuse.CreateRequest, res *fuse.CreateResponse, intr fs.Intr) (fs.Node, fs.Handle, fuse.Error) {\n\tnewFile := &drive.File{}\n\tnewFile.Title = req.Name\n\tp := &drive.ParentReference{Id: d.Dir.Id}\n\tnewFile.Parents = []*drive.ParentReference{p}\n\t\/\/ create temporary file to serve as the cache until the data is uploaded\n\tpath := \"\/tmp\/drivefs-\" + req.Name\n\ttmpFile, err := os.Create(path)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, nil, err\n\t}\n\tcreatedFile, err1 := service.Files.Insert(newFile).Media(tmpFile).Do()\n\tif err1 != nil {\n\t\tlog.Println(err1)\n\t\treturn nil, nil, err1\n\t}\n\t\/\/ update d's child index\n\n\tf := &DriveFile{File: createdFile, Root: false, TmpFile: tmpFile}\n\t\/\/ add the new file to the cach\/index\n\tnameToFile[f.File.Title] = f\n\tidToFile[f.File.Id] = f\n\tfileIndex[f.File.Id] = f.File\n\n\treturn f, f, nil\n}\n\n\/\/ Lookup scans the current directory for matching files or directories\nfunc (d *DriveDir) Lookup(name string, intr fs.Intr) (fs.Node, fuse.Error) {\n\t\/\/ Lookup dir by name\n\tif dir, ok := nameToDir[name]; ok {\n\t\treturn dir, nil\n\t}\n\n\t\/\/ Lookup file by name\n\tif file, ok := nameToFile[name]; ok {\n\t\treturn file, nil\n\t}\n\t\/\/ This comes up as the node id for first access, so just show the root folder\n\tif name == \".xdg-volume-info\" {\n\t\tif dir, ok := nameToDir[\"root\"]; ok {\n\t\t\treturn dir, nil\n\t\t}\n\t}\n\t\/\/ File not found\n\treturn nil, fuse.ENOENT\n}\n\n\/\/ ReadDir return a slice of directory entries\nfunc (d *DriveDir) ReadDir(intr fs.Intr) ([]fuse.Dirent, fuse.Error) {\n\tdirChan := make(chan *[]fuse.Dirent)\n\tgo func() {\n\t\t\/\/ List of directories to return\n\t\tvar dirs []fuse.Dirent\n\t\t\/\/ get all new list of files\n\t\tf, err := service.Files.List().Do()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\treturn nil, err\n\t\t}\n\t\tfileList := f.Items\n\t\t\/\/ Populate idToFile with new ids\n\t\tfor i := range fileList {\n\t\t\tidToFile[fileList[i].Id] = &DriveFile{File: fileList[i], Root: false}\n\t\t}\n\t\t\/\/ get list of children\n\t\t\/\/ If d is at root, fetch the root children, else fetch this file's children\n\t\tvar c *drive.ChildList\n\t\tif d.Root {\n\t\t\tc = childIndex[\"root\"]\n\t\t} else {\n\t\t\tc = childIndex[d.Dir.Id]\n\t\t}\n\n\t\t\/\/ Get children of this folder\n\t\tchildren := c.Items\n\n\t\tdirs = make([]fuse.Dirent, len(children))\n\n\t\t\/\/ populate dirs with children\n\t\tfor i := range children {\n\t\t\t\/\/ pull out a child temporarally\n\t\t\ttmp := idToFile[children[i].Id]\n\t\t\t\/\/ If child is a folder\/directory create a DirveDir else create a DriveFile\n\t\t\tif strings.Contains(tmp.File.MimeType, \"folder\") {\n\t\t\t\tdirs[i] = fuse.Dirent{\n\t\t\t\t\tName: tmp.File.Title,\n\t\t\t\t\tType: fuse.DT_Dir,\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tdirs[i] = fuse.Dirent{\n\t\t\t\t\tName: tmp.File.Title,\n\t\t\t\t\tType: fuse.DT_File,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tdirChan <- &dirs\n\t}()\n\t\/\/ Wait for the lookups to be done, or die if interupt happens\n\tselect {\n\tcase tmp := <-dirChan:\n\t\treturn *tmp, nil\n\tcase <-intr:\n\t\treturn nil, fuse.EINTR\n\t}\n\n}\n\n\/\/ Mkdir registers a new directory\nfunc (d *DriveDir) Mkdir(req *fuse.MkdirRequest, intr fs.Intr) (fs.Node, fuse.Error) {\n\tf := &drive.File{Title: req.Name, MimeType: \"application\/vnd.google-apps.folder\"}\n\tnewDir, err := service.Files.Insert(f).Do()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn nil, fuse.Errno(syscall.EROFS)\n\t}\n\treturn DriveDir{Dir: newDir, Root: false}, nil\n}\n\n\/\/ Remove deletes a fild or folder from google drive\nfunc (d *DriveDir) Remove(req *fuse.RemoveRequest, intr fs.Intr) fuse.Error {\n\tif file, ok := nameToFile[req.Name]; ok {\n\t\terr := service.Files.Delete(file.File.Id).Do()\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t\treturn err\n\t}\n\treturn fuse.ENODATA\n}\n\n\/\/ Rename a file in d\nfunc (d *DriveDir) Rename(req *fuse.RenameRequest, node fs.Node, intr fs.Intr) fuse.Error {\n\t\/\/ copy the file on google drive to the new name\n\t_, err := service.Files.Copy(nameToFile[req.OldName].File.Id, &drive.File{Title: req.NewName}).Do()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\terr = service.Files.Delete(nameToFile[req.OldName].File.Id).Do()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\n\tgo refreshAll()\n\treturn nil\n}\n\n\/\/ FSync is a place holder and does nothing but satisfyes the FSyncer interface\nfunc (d *DriveDir) Fsync(req *fuse.FsyncRequest, intr fs.Intr) fuse.Error {\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2020 The Rook Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage integration\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"sort\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/require\"\n\n\t\"github.com\/rook\/rook\/pkg\/operator\/k8sutil\"\n\t\"github.com\/rook\/rook\/tests\/framework\/installer\"\n\t\"github.com\/rook\/rook\/tests\/framework\/utils\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/suite\"\n)\n\n\/\/ **************************************************\n\/\/ *** Mgr operations covered by TestMgrSmokeSuite ***\n\/\/\n\/\/ Ceph orchestrator device ls\n\/\/ Ceph orchestrator status\n\/\/ Ceph orchestrator host ls\n\/\/ Ceph orchestrator ls\n\/\/ **************************************************\nfunc TestCephMgrSuite(t *testing.T) {\n\tif installer.SkipTestSuite(installer.CephTestSuite) {\n\t\tt.Skip()\n\t}\n\n\ts := new(CephMgrSuite)\n\tdefer func(s *CephMgrSuite) {\n\t\tHandlePanics(recover(), s.TearDownSuite, s.T)\n\t}(s)\n\tsuite.Run(t, s)\n}\n\ntype CephMgrSuite struct {\n\tsuite.Suite\n\tsettings *installer.TestCephSettings\n\tk8sh *utils.K8sHelper\n\tinstaller *installer.CephInstaller\n\tnamespace string\n}\n\ntype host struct {\n\tAddr string\n\tHostname string\n\tLabels []string\n\tStatus string\n}\n\ntype serviceStatus struct {\n\tContainerImageName string `json:\"Container_image_name\"`\n\tLastRefresh string `json:\"Last_refresh\"`\n\tRunning int\n\tSize int\n}\n\ntype service struct {\n\tServiceName string `json:\"Service_name\"`\n\tServiceType string `json:\"Service_type\"`\n\tStatus serviceStatus\n}\n\nfunc (s *CephMgrSuite) SetupSuite() {\n\ts.namespace = \"mgr-ns\"\n\n\ts.settings = &installer.TestCephSettings{\n\t\tClusterName: s.namespace,\n\t\tOperatorNamespace: installer.SystemNamespace(s.namespace),\n\t\tNamespace: s.namespace,\n\t\tStorageClassName: \"\",\n\t\tUseHelm: false,\n\t\tUsePVC: false,\n\t\tMons: 1,\n\t\tUseCSI: true,\n\t\tSkipOSDCreation: true,\n\t\tEnableDiscovery: true,\n\t\tRookVersion: installer.LocalBuildTag,\n\t\tCephVersion: installer.MasterVersion,\n\t}\n\ts.settings.ApplyEnvVars()\n\ts.installer, s.k8sh = StartTestCluster(s.T, s.settings)\n\ts.waitForOrchestrationModule()\n\ts.prepareLocalStorageClass(\"local-storage\")\n}\n\nfunc (s *CephMgrSuite) AfterTest(suiteName, testName string) {\n\ts.installer.CollectOperatorLog(suiteName, testName)\n}\n\nfunc (s *CephMgrSuite) TearDownSuite() {\n\t_ = s.k8sh.DeleteResource(\"sc\", \"local-storage\")\n\ts.installer.UninstallRook()\n}\n\nfunc (s *CephMgrSuite) execute(command []string) (error, string) {\n\torchCommand := append([]string{\"orch\"}, command...)\n\treturn s.installer.Execute(\"ceph\", orchCommand, s.namespace)\n}\n\nfunc (s *CephMgrSuite) prepareLocalStorageClass(storageClassName string) {\n\t\/\/ Rook orchestrator use PVs based in this storage class to create OSDs\n\t\/\/ It is also needed to list \"devices\"\n\tlocalStorageClass := `\nkind: StorageClass\napiVersion: storage.k8s.io\/v1\nmetadata:\n name: ` + storageClassName + `\nprovisioner: kubernetes.io\/no-provisioner\nvolumeBindingMode: WaitForFirstConsumer\n`\n\terr := s.k8sh.ResourceOperation(\"apply\", localStorageClass)\n\tif err == nil {\n\t\terr, _ = s.installer.Execute(\"ceph\", []string{\"config\", \"set\", \"mgr\", \"mgr\/rook\/storage_class\", storageClassName}, s.namespace)\n\t\tif err == nil {\n\t\t\tlogger.Infof(\"Storage class %q set in manager config\", storageClassName)\n\t\t} else {\n\t\t\tassert.Fail(s.T(), fmt.Sprintf(\"Error configuring local storage class in manager config: %q\", err))\n\t\t}\n\t} else {\n\t\tassert.Fail(s.T(), fmt.Sprintf(\"Error creating local storage class: %q \", err))\n\t}\n}\n\nfunc (s *CephMgrSuite) enableOrchestratorModule() {\n\tlogger.Info(\"Enabling Rook orchestrator module: <ceph mgr module enable rook --force>\")\n\terr, output := s.installer.Execute(\"ceph\", []string{\"mgr\", \"module\", \"enable\", \"rook\", \"--force\"}, s.namespace)\n\tlogger.Infof(\"output: %s\", output)\n\tif err != nil {\n\t\tlogger.Infof(\"Failed to enable rook orchestrator module: %q\", err)\n\t\treturn\n\t}\n\n\tlogger.Info(\"Setting orchestrator backend to Rook .... <ceph orch set backend rook>\")\n\terr, output = s.execute([]string{\"set\", \"backend\", \"rook\"})\n\tlogger.Infof(\"output: %s\", output)\n\tif err != nil {\n\t\tlogger.Infof(\"Not possible to set rook as backend orchestrator module: %q\", err)\n\t}\n}\n\nfunc (s *CephMgrSuite) waitForOrchestrationModule() {\n\tvar err error\n\n\t\/\/ Status struct\n\ttype orchStatus struct {\n\t\tAvailable bool `json:\"available\"`\n\t\tBackend string `json:\"backend\"`\n\t}\n\n\tfor timeout := 0; timeout < 30; timeout++ {\n\t\tlogger.Info(\"Waiting for rook orchestrator module enabled and ready ...\")\n\t\terr, output := s.execute([]string{\"status\", \"--format\", \"json\"})\n\t\tlogger.Infof(\"%s\", output)\n\t\tif err == nil {\n\t\t\tlogger.Info(\"Ceph orchestrator ready to execute commands\")\n\n\t\t\t\/\/ Get status information\n\t\t\tbytes := []byte(output)\n\t\t\tlogBytesInfo(bytes)\n\n\t\t\tvar status orchStatus\n\t\t\terr := json.Unmarshal(bytes[:len(output)], &status)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(\"Error getting ceph orch status\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif status.Backend != \"rook\" {\n\t\t\t\tassert.Fail(s.T(), fmt.Sprintf(\"Orchestrator backend is <%q>. Setting it to <Rook>\", status.Backend))\n\t\t\t\ts.enableOrchestratorModule()\n\t\t\t} else {\n\t\t\t\tlogger.Info(\"Orchestrator backend is <Rook>\")\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\texitError, _ := err.(*exec.ExitError)\n\t\t\tif exitError.ExitCode() == 22 { \/\/ The <ceph orch> commands are still not recognized\n\t\t\t\tlogger.Info(\"Ceph manager modules still not ready ... \")\n\t\t\t} else if exitError.ExitCode() == 2 { \/\/ The rook orchestrator is not the orchestrator backend\n\t\t\t\ts.enableOrchestratorModule()\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(5 * time.Second)\n\t}\n\tif err != nil {\n\t\tlogger.Error(\"Giving up waiting for manager module to be ready\")\n\t}\n\trequire.Nil(s.T(), err)\n}\nfunc (s *CephMgrSuite) TestDeviceLs() {\n\tlogger.Info(\"Testing .... <ceph orch device ls>\")\n\terr, device_list := s.execute([]string{\"device\", \"ls\"})\n\tassert.Nil(s.T(), err)\n\tlogger.Infof(\"output = %s\", device_list)\n}\n\nfunc (s *CephMgrSuite) TestStatus() {\n\tlogger.Info(\"Testing .... <ceph orch status>\")\n\terr, status := s.execute([]string{\"status\"})\n\tassert.Nil(s.T(), err)\n\tlogger.Infof(\"output = %s\", status)\n\n\tassert.Equal(s.T(), status, \"Backend: rook\\nAvailable: Yes\")\n}\n\nfunc logBytesInfo(bytesSlice []byte) {\n\tlogger.Infof(\"---- bytes slice info ---\")\n\tlogger.Infof(\"bytes: %v\\n\", bytesSlice)\n\tlogger.Infof(\"length: %d\\n\", len(bytesSlice))\n\tlogger.Infof(\"string: -->%s<--\\n\", string(bytesSlice))\n\tlogger.Infof(\"-------------------------\")\n}\n\nfunc (s *CephMgrSuite) TestHostLs() {\n\tlogger.Info(\"Testing .... <ceph orch host ls>\")\n\n\t\/\/ Get the orchestrator hosts\n\terr, output := s.execute([]string{\"host\", \"ls\", \"json\"})\n\tassert.Nil(s.T(), err)\n\tlogger.Infof(\"output = %s\", output)\n\n\thosts := []byte(output)\n\tlogBytesInfo(hosts)\n\n\tvar hostsList []host\n\terr = json.Unmarshal(hosts[:len(output)], &hostsList)\n\tif err != nil {\n\t\tassert.Nil(s.T(), err)\n\t}\n\n\tvar hostOutput []string\n\tfor _, hostItem := range hostsList {\n\t\thostOutput = append(hostOutput, hostItem.Addr)\n\t}\n\tsort.Strings(hostOutput)\n\n\t\/\/ get the k8s nodes\n\tnodes, err := k8sutil.GetNodeHostNames(s.k8sh.Clientset)\n\tassert.Nil(s.T(), err)\n\n\tk8sNodes := make([]string, 0, len(nodes))\n\tfor k := range nodes {\n\t\tk8sNodes = append(k8sNodes, k)\n\t}\n\tsort.Strings(k8sNodes)\n\n\t\/\/ nodes and hosts must be the same\n\tassert.Equal(s.T(), hostOutput, k8sNodes)\n}\n\nfunc (s *CephMgrSuite) TestServiceLs() {\n\tlogger.Info(\"Testing .... <ceph orch ls --format json>\")\n\terr, output := s.execute([]string{\"ls\", \"--format\", \"json\"})\n\tassert.Nil(s.T(), err)\n\tlogger.Infof(\"output = %s\", output)\n\n\tservices := []byte(output)\n\tlogBytesInfo(services)\n\n\tvar servicesList []service\n\terr = json.Unmarshal(services[:len(output)], &servicesList)\n\tassert.Nil(s.T(), err)\n\n\tlabelFilter := \"\"\n\tfor _, svc := range servicesList {\n\t\tif svc.ServiceName != \"crash\" {\n\t\t\tlabelFilter = fmt.Sprintf(\"app=rook-ceph-%s\", svc.ServiceName)\n\t\t} else {\n\t\t\tlabelFilter = \"app=rook-ceph-crashcollector\"\n\t\t}\n\t\tk8sPods, err := k8sutil.PodsRunningWithLabel(s.k8sh.Clientset, s.namespace, labelFilter)\n\t\tlogger.Infof(\"Service: %+v\", svc)\n\t\tlogger.Infof(\"k8s pods for svc %q using label <%q>: %d\", svc.ServiceName, labelFilter, k8sPods)\n\t\tassert.Nil(s.T(), err)\n\t\tassert.Equal(s.T(), svc.Status.Running, k8sPods, fmt.Sprintf(\"Wrong number of pods for kind of service <%s>\", svc.ServiceType))\n\t}\n}\n<commit_msg>ceph: retry <ceph orch> commands when they fail<commit_after>\/*\nCopyright 2020 The Rook Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n\thttp:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage integration\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"sort\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/stretchr\/testify\/require\"\n\n\t\"github.com\/rook\/rook\/pkg\/operator\/k8sutil\"\n\t\"github.com\/rook\/rook\/tests\/framework\/installer\"\n\t\"github.com\/rook\/rook\/tests\/framework\/utils\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/suite\"\n)\n\nconst (\n\tdefaultTries = 3\n)\n\n\/\/ **************************************************\n\/\/ *** Mgr operations covered by TestMgrSmokeSuite ***\n\/\/\n\/\/ Ceph orchestrator device ls\n\/\/ Ceph orchestrator status\n\/\/ Ceph orchestrator host ls\n\/\/ Ceph orchestrator ls\n\/\/ **************************************************\nfunc TestCephMgrSuite(t *testing.T) {\n\tif installer.SkipTestSuite(installer.CephTestSuite) {\n\t\tt.Skip()\n\t}\n\n\ts := new(CephMgrSuite)\n\tdefer func(s *CephMgrSuite) {\n\t\tHandlePanics(recover(), s.TearDownSuite, s.T)\n\t}(s)\n\tsuite.Run(t, s)\n}\n\ntype CephMgrSuite struct {\n\tsuite.Suite\n\tsettings *installer.TestCephSettings\n\tk8sh *utils.K8sHelper\n\tinstaller *installer.CephInstaller\n\tnamespace string\n}\n\ntype host struct {\n\tAddr string\n\tHostname string\n\tLabels []string\n\tStatus string\n}\n\ntype serviceStatus struct {\n\tContainerImageName string `json:\"Container_image_name\"`\n\tLastRefresh string `json:\"Last_refresh\"`\n\tRunning int\n\tSize int\n}\n\ntype service struct {\n\tServiceName string `json:\"Service_name\"`\n\tServiceType string `json:\"Service_type\"`\n\tStatus serviceStatus\n}\n\nfunc (s *CephMgrSuite) SetupSuite() {\n\ts.namespace = \"mgr-ns\"\n\n\ts.settings = &installer.TestCephSettings{\n\t\tClusterName: s.namespace,\n\t\tOperatorNamespace: installer.SystemNamespace(s.namespace),\n\t\tNamespace: s.namespace,\n\t\tStorageClassName: \"\",\n\t\tUseHelm: false,\n\t\tUsePVC: false,\n\t\tMons: 1,\n\t\tUseCSI: true,\n\t\tSkipOSDCreation: true,\n\t\tEnableDiscovery: false,\n\t\tRookVersion: installer.LocalBuildTag,\n\t\tCephVersion: installer.MasterVersion,\n\t}\n\ts.settings.ApplyEnvVars()\n\ts.installer, s.k8sh = StartTestCluster(s.T, s.settings)\n\ts.waitForOrchestrationModule()\n\ts.prepareLocalStorageClass(\"local-storage\")\n}\n\nfunc (s *CephMgrSuite) AfterTest(suiteName, testName string) {\n\ts.installer.CollectOperatorLog(suiteName, testName)\n}\n\nfunc (s *CephMgrSuite) TearDownSuite() {\n\t_ = s.k8sh.DeleteResource(\"sc\", \"local-storage\")\n\ts.installer.UninstallRook()\n}\n\nfunc (s *CephMgrSuite) executeWithRetry(command []string, maxRetries int) (string, error) {\n\ttries := 0\n\torchestratorCommand := append([]string{\"orch\"}, command...)\n\tfor {\n\t\terr, output := s.installer.Execute(\"ceph\", orchestratorCommand, s.namespace)\n\t\ttries++\n\t\tif err != nil {\n\t\t\tif maxRetries == 1 {\n\t\t\t\treturn output, err\n\t\t\t}\n\t\t\tif tries == maxRetries {\n\t\t\t\treturn \"\", fmt.Errorf(\"max retries(%d) reached, last err: %v\", tries, err)\n\t\t\t}\n\t\t\tlogger.Infof(\"retrying command <<ceph %s>>: last error: %v\", command, err)\n\t\t\tcontinue\n\t\t}\n\t\treturn output, nil\n\t}\n}\n\nfunc (s *CephMgrSuite) execute(command []string) (string, error) {\n\treturn s.executeWithRetry(command, 1)\n}\n\nfunc (s *CephMgrSuite) prepareLocalStorageClass(storageClassName string) {\n\t\/\/ Rook orchestrator use PVs based in this storage class to create OSDs\n\t\/\/ It is also needed to list \"devices\"\n\tlocalStorageClass := `\nkind: StorageClass\napiVersion: storage.k8s.io\/v1\nmetadata:\n name: ` + storageClassName + `\nprovisioner: kubernetes.io\/no-provisioner\nvolumeBindingMode: WaitForFirstConsumer\n`\n\terr := s.k8sh.ResourceOperation(\"apply\", localStorageClass)\n\tif err == nil {\n\t\terr, _ = s.installer.Execute(\"ceph\", []string{\"config\", \"set\", \"mgr\", \"mgr\/rook\/storage_class\", storageClassName}, s.namespace)\n\t\tif err == nil {\n\t\t\tlogger.Infof(\"Storage class %q set in manager config\", storageClassName)\n\t\t} else {\n\t\t\tassert.Fail(s.T(), fmt.Sprintf(\"Error configuring local storage class in manager config: %q\", err))\n\t\t}\n\t} else {\n\t\tassert.Fail(s.T(), fmt.Sprintf(\"Error creating local storage class: %q \", err))\n\t}\n}\n\nfunc (s *CephMgrSuite) enableOrchestratorModule() {\n\tlogger.Info(\"Enabling Rook orchestrator module: <ceph mgr module enable rook --force>\")\n\terr, output := s.installer.Execute(\"ceph\", []string{\"mgr\", \"module\", \"enable\", \"rook\", \"--force\"}, s.namespace)\n\tlogger.Infof(\"output: %s\", output)\n\tif err != nil {\n\t\tlogger.Infof(\"Failed to enable rook orchestrator module: %q\", err)\n\t\treturn\n\t}\n\n\tlogger.Info(\"Setting orchestrator backend to Rook .... <ceph orch set backend rook>\")\n\toutput, err = s.execute([]string{\"set\", \"backend\", \"rook\"})\n\tlogger.Infof(\"output: %s\", output)\n\tif err != nil {\n\t\tlogger.Infof(\"Not possible to set rook as backend orchestrator module: %q\", err)\n\t}\n}\n\nfunc (s *CephMgrSuite) waitForOrchestrationModule() {\n\tvar err error\n\n\t\/\/ Status struct\n\ttype orchStatus struct {\n\t\tAvailable bool `json:\"available\"`\n\t\tBackend string `json:\"backend\"`\n\t}\n\n\tfor timeout := 0; timeout < 30; timeout++ {\n\t\tlogger.Info(\"Waiting for rook orchestrator module enabled and ready ...\")\n\t\toutput, err := s.execute([]string{\"status\", \"--format\", \"json\"})\n\t\tlogger.Infof(\"%s\", output)\n\t\tif err == nil {\n\t\t\tlogger.Info(\"Ceph orchestrator ready to execute commands\")\n\n\t\t\t\/\/ Get status information\n\t\t\tbytes := []byte(output)\n\t\t\tlogBytesInfo(bytes)\n\n\t\t\tvar status orchStatus\n\t\t\terr := json.Unmarshal(bytes[:len(output)], &status)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error(\"Error getting ceph orch status\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif status.Backend != \"rook\" {\n\t\t\t\tassert.Fail(s.T(), fmt.Sprintf(\"Orchestrator backend is <%q>. Setting it to <Rook>\", status.Backend))\n\t\t\t\ts.enableOrchestratorModule()\n\t\t\t} else {\n\t\t\t\tlogger.Info(\"Orchestrator backend is <Rook>\")\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\texitError, _ := err.(*exec.ExitError)\n\t\t\tif exitError.ExitCode() == 22 { \/\/ The <ceph orch> commands are still not recognized\n\t\t\t\tlogger.Info(\"Ceph manager modules still not ready ... \")\n\t\t\t} else if exitError.ExitCode() == 2 { \/\/ The rook orchestrator is not the orchestrator backend\n\t\t\t\ts.enableOrchestratorModule()\n\t\t\t}\n\t\t}\n\t\ttime.Sleep(5 * time.Second)\n\t}\n\tif err != nil {\n\t\tlogger.Error(\"Giving up waiting for manager module to be ready\")\n\t}\n\trequire.Nil(s.T(), err)\n}\nfunc (s *CephMgrSuite) TestDeviceLs() {\n\tlogger.Info(\"Testing .... <ceph orch device ls>\")\n\tdeviceList, err := s.executeWithRetry([]string{\"device\", \"ls\"}, defaultTries)\n\tassert.Nil(s.T(), err)\n\tlogger.Infof(\"output = %s\", deviceList)\n}\n\nfunc (s *CephMgrSuite) TestStatus() {\n\tlogger.Info(\"Testing .... <ceph orch status>\")\n\tstatus, err := s.executeWithRetry([]string{\"status\"}, defaultTries)\n\tassert.Nil(s.T(), err)\n\tlogger.Infof(\"output = %s\", status)\n\n\tassert.Equal(s.T(), status, \"Backend: rook\\nAvailable: Yes\")\n}\n\nfunc logBytesInfo(bytesSlice []byte) {\n\tlogger.Infof(\"---- bytes slice info ---\")\n\tlogger.Infof(\"bytes: %v\\n\", bytesSlice)\n\tlogger.Infof(\"length: %d\\n\", len(bytesSlice))\n\tlogger.Infof(\"string: -->%s<--\\n\", string(bytesSlice))\n\tlogger.Infof(\"-------------------------\")\n}\n\nfunc (s *CephMgrSuite) TestHostLs() {\n\tlogger.Info(\"Testing .... <ceph orch host ls>\")\n\n\t\/\/ Get the orchestrator hosts\n\toutput, err := s.executeWithRetry([]string{\"host\", \"ls\", \"json\"}, defaultTries)\n\tassert.Nil(s.T(), err)\n\tlogger.Infof(\"output = %s\", output)\n\n\thosts := []byte(output)\n\tlogBytesInfo(hosts)\n\n\tvar hostsList []host\n\terr = json.Unmarshal(hosts[:len(output)], &hostsList)\n\tif err != nil {\n\t\tassert.Nil(s.T(), err)\n\t}\n\n\tvar hostOutput []string\n\tfor _, hostItem := range hostsList {\n\t\thostOutput = append(hostOutput, hostItem.Addr)\n\t}\n\tsort.Strings(hostOutput)\n\n\t\/\/ get the k8s nodes\n\tnodes, err := k8sutil.GetNodeHostNames(s.k8sh.Clientset)\n\tassert.Nil(s.T(), err)\n\n\tk8sNodes := make([]string, 0, len(nodes))\n\tfor k := range nodes {\n\t\tk8sNodes = append(k8sNodes, k)\n\t}\n\tsort.Strings(k8sNodes)\n\n\t\/\/ nodes and hosts must be the same\n\tassert.Equal(s.T(), hostOutput, k8sNodes)\n}\n\nfunc (s *CephMgrSuite) TestServiceLs() {\n\tlogger.Info(\"Testing .... <ceph orch ls --format json>\")\n\toutput, err := s.executeWithRetry([]string{\"ls\", \"--format\", \"json\"}, defaultTries)\n\tassert.Nil(s.T(), err)\n\tlogger.Infof(\"output = %s\", output)\n\n\tservices := []byte(output)\n\tlogBytesInfo(services)\n\n\tvar servicesList []service\n\terr = json.Unmarshal(services[:len(output)], &servicesList)\n\tassert.Nil(s.T(), err)\n\n\tlabelFilter := \"\"\n\tfor _, svc := range servicesList {\n\t\tif svc.ServiceName != \"crash\" {\n\t\t\tlabelFilter = fmt.Sprintf(\"app=rook-ceph-%s\", svc.ServiceName)\n\t\t} else {\n\t\t\tlabelFilter = \"app=rook-ceph-crashcollector\"\n\t\t}\n\t\tk8sPods, err := k8sutil.PodsRunningWithLabel(s.k8sh.Clientset, s.namespace, labelFilter)\n\t\tlogger.Infof(\"Service: %+v\", svc)\n\t\tlogger.Infof(\"k8s pods for svc %q using label <%q>: %d\", svc.ServiceName, labelFilter, k8sPods)\n\t\tassert.Nil(s.T(), err)\n\t\tassert.Equal(s.T(), svc.Status.Running, k8sPods, fmt.Sprintf(\"Wrong number of pods for kind of service <%s>\", svc.ServiceType))\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package app\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"github.com\/murlokswarm\/log\"\n\t\"github.com\/murlokswarm\/markup\"\n\t\"github.com\/murlokswarm\/uid\"\n)\n\nconst (\n\tjsFmt = `\nfunction Mount(id, markup) {\n\tconst sel = '[data-murlok-root=\"' + id + '\"]';\n const elem = document.querySelector(sel);\n elem.innerHTML = markup;\n}\n\nfunction RenderFull(id, markup) {\n\tconst sel = '[data-murlok-id=\"' + id + '\"]';\n const elem = document.querySelector(sel);\n elem.outerHTML = markup;\n}\n\nfunction RenderAttributes(id, attrs) {\n\tconst sel = '[data-murlok-id=\"' + id + '\"]';\n const elem = document.querySelector(sel);\n \n for (var name in attrs) {\n if (attrs.hasOwnProperty(name)) {\n elem.setAttribute(name, attrs[name]);\n }\n }\n}\n\nfunction CallEvent(id, method, self, event) {\n\tvar arg;\n\tconst eventType = event.type;\n\n\tswitch (eventType) {\n\t\tcase \"click\":\n case \"contextmenu\":\n case \"dblclick\":\n case \"mousedown\":\n case \"mouseenter\":\n case \"mouseleave\":\n case \"mousemove\":\n case \"mouseover\":\n case \"mouseout\":\n case \"mouseup\":\n case \"drag\":\n case \"dragend\":\n case \"dragenter\":\n case \"dragleave\":\n case \"dragover\":\n case \"dragstart\":\n case \"drop\":\n arg = MakeMouseArg(event);\n break;\n \n case \"mousewheel\":\n arg = MakeWheelArg(event);\n break;\n \n case \"keydown\":\n case \"keypress\":\n case \"keyup\":\n arg = MakeKeyboardArg(event);\n break;\n\t\t\n\t\tcase \"change\":\n\t\t\targ = MakeChangeArg(self.value);\n\t\t\tbreak;\n\n default:\n\t\t\talert(\"not supported event: \" + eventType);\n return;\n\t}\n\t\n\tCall(id, method, arg);\n}\n\nfunction MakeMouseArg(event) {\n\treturn {\n \"AltKey\": event.altKey,\n \"Button\": event.button,\n \"ClientX\": event.clientX,\n \"ClientY\": event.clientY,\n \"CtrlKey\": event.ctrlKey,\n \"Detail\": event.detail,\n \"MetaKey\": event.metaKey,\n \"PageX\": event.pageX,\n \"PageY\": event.pageY,\n \"ScreenX\": event.screenX,\n \"ScreenY\": event.screenY,\n \"ShiftKey\": event.shiftKey\n };\n}\n\nfunction MakeWheelArg(event) {\n\treturn {\n \"DeltaX\": event.deltaX,\n \"DeltaY\": event.deltaY,\n \"DeltaZ\": event.deltaZ,\n \"DeltaMode\": event.deltaMode\n };\n}\n\nfunction MakeKeyboardArg(event) {\n\treturn {\n \"AltKey\": event.altKey,\n \"CtrlKey\": event.ctrlKey,\n \"CharCode\": event.charCode,\n \"KeyCode\": event.keyCode,\n \"Location\": event.location,\n \"MetaKey\": event.metaKey,\n \"ShiftKey\": event.shiftKey\n };\n}\n\nfunction MakeChangeArg(value) {\n\treturn {\n\t\tValue: value\n\t};\n}\n\nfunction Call(id, method, arg) {\n\tlet msg = {\n\t\tID: id,\n\t\tMethod: method,\n\t\tArg: JSON.stringify(arg)\n\t};\n\t\n\tmsg = JSON.stringify(msg);\n\t%v\n}\n `\n)\n\ntype jsMsg struct {\n\tID uid.ID\n\tMethod string\n\tArg string\n}\n\n\/\/ CallComponentMethod calls component method described by msg.\n\/\/ Should be used only in a driver.\nfunc CallComponentMethod(msg string) {\n\tvar jsMsg jsMsg\n\n\tif err := json.Unmarshal([]byte(msg), &jsMsg); err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\n\tmarkup.Call(jsMsg.ID, jsMsg.Method, jsMsg.Arg)\n}\n\n\/\/ MurlokJS returns the javascript code allowing bidirectional communication\n\/\/ between a context and it's webview.\n\/\/ Should be used only in drivers implementations.\nfunc MurlokJS() string {\n\treturn fmt.Sprintf(jsFmt, driver.JavascriptBridge())\n}\n<commit_msg>js remove element<commit_after>package app\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"github.com\/murlokswarm\/log\"\n\t\"github.com\/murlokswarm\/markup\"\n\t\"github.com\/murlokswarm\/uid\"\n)\n\nconst (\n\tjsFmt = `\nfunction Mount(id, markup) {\n\tconst sel = '[data-murlok-root=\"' + id + '\"]';\n const elem = document.querySelector(sel);\n elem.innerHTML = markup;\n}\n\nfunction RenderFull(id, markup) {\n\tconst sel = '[data-murlok-id=\"' + id + '\"]';\n const elem = document.querySelector(sel);\n elem.outerHTML = markup;\n}\n\nfunction RenderAttributes(id, attrs) {\n\tconst sel = '[data-murlok-id=\"' + id + '\"]';\n const elem = document.querySelector(sel);\n \n for (var name in attrs) {\n if (attrs.hasOwnProperty(name)) {\n if (attrs[name].length == 0) {\n elem.removeAttribute(name);\n continue;\n }\n elem.setAttribute(name, attrs[name]);\n }\n }\n}\n\nfunction CallEvent(id, method, self, event) {\n\tvar arg;\n\tconst eventType = event.type;\n\n\tswitch (eventType) {\n\t\tcase \"click\":\n case \"contextmenu\":\n case \"dblclick\":\n case \"mousedown\":\n case \"mouseenter\":\n case \"mouseleave\":\n case \"mousemove\":\n case \"mouseover\":\n case \"mouseout\":\n case \"mouseup\":\n case \"drag\":\n case \"dragend\":\n case \"dragenter\":\n case \"dragleave\":\n case \"dragover\":\n case \"dragstart\":\n case \"drop\":\n arg = MakeMouseArg(event);\n break;\n \n case \"mousewheel\":\n arg = MakeWheelArg(event);\n break;\n \n case \"keydown\":\n case \"keypress\":\n case \"keyup\":\n arg = MakeKeyboardArg(event);\n break;\n\t\t\n\t\tcase \"change\":\n\t\t\targ = MakeChangeArg(self.value);\n\t\t\tbreak;\n\n default:\n\t\t\talert(\"not supported event: \" + eventType);\n return;\n\t}\n\t\n\tCall(id, method, arg);\n}\n\nfunction MakeMouseArg(event) {\n\treturn {\n \"AltKey\": event.altKey,\n \"Button\": event.button,\n \"ClientX\": event.clientX,\n \"ClientY\": event.clientY,\n \"CtrlKey\": event.ctrlKey,\n \"Detail\": event.detail,\n \"MetaKey\": event.metaKey,\n \"PageX\": event.pageX,\n \"PageY\": event.pageY,\n \"ScreenX\": event.screenX,\n \"ScreenY\": event.screenY,\n \"ShiftKey\": event.shiftKey\n };\n}\n\nfunction MakeWheelArg(event) {\n\treturn {\n \"DeltaX\": event.deltaX,\n \"DeltaY\": event.deltaY,\n \"DeltaZ\": event.deltaZ,\n \"DeltaMode\": event.deltaMode\n };\n}\n\nfunction MakeKeyboardArg(event) {\n\treturn {\n \"AltKey\": event.altKey,\n \"CtrlKey\": event.ctrlKey,\n \"CharCode\": event.charCode,\n \"KeyCode\": event.keyCode,\n \"Location\": event.location,\n \"MetaKey\": event.metaKey,\n \"ShiftKey\": event.shiftKey\n };\n}\n\nfunction MakeChangeArg(value) {\n\treturn {\n\t\tValue: value\n\t};\n}\n\nfunction Call(id, method, arg) {\n\tlet msg = {\n\t\tID: id,\n\t\tMethod: method,\n\t\tArg: JSON.stringify(arg)\n\t};\n\t\n\tmsg = JSON.stringify(msg);\n\t%v\n}\n `\n)\n\ntype jsMsg struct {\n\tID uid.ID\n\tMethod string\n\tArg string\n}\n\n\/\/ CallComponentMethod calls component method described by msg.\n\/\/ Should be used only in a driver.\nfunc CallComponentMethod(msg string) {\n\tvar jsMsg jsMsg\n\n\tif err := json.Unmarshal([]byte(msg), &jsMsg); err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\n\tmarkup.Call(jsMsg.ID, jsMsg.Method, jsMsg.Arg)\n}\n\n\/\/ MurlokJS returns the javascript code allowing bidirectional communication\n\/\/ between a context and it's webview.\n\/\/ Should be used only in drivers implementations.\nfunc MurlokJS() string {\n\treturn fmt.Sprintf(jsFmt, driver.JavascriptBridge())\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The Kubernetes Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage jwe\n\nimport (\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"log\"\n\t\"sync\"\n\n\tauthApi \"github.com\/kubernetes\/dashboard\/src\/app\/backend\/auth\/api\"\n\tsyncApi \"github.com\/kubernetes\/dashboard\/src\/app\/backend\/sync\/api\"\n\tjose \"gopkg.in\/square\/go-jose.v2\"\n\t\"k8s.io\/api\/core\/v1\"\n\tk8sErrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetaV1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/watch\"\n)\n\n\/\/ Entries held by resource used to synchronize encryption key data.\nconst (\n\tholderMapKeyEntry = \"priv\"\n\tholderMapCertEntry = \"pub\"\n)\n\n\/\/ KeyHolder is responsible for generating, storing and synchronizing encryption key used for token\n\/\/ generation\/decryption.\ntype KeyHolder interface {\n\t\/\/ Returns encrypter instance that can be used to encrypt data.\n\tEncrypter() jose.Encrypter\n\t\/\/ Returns encryption key that can be used to decrypt data.\n\tKey() *rsa.PrivateKey\n\t\/\/ Forces refresh of encryption key synchronized with kubernetes resource (secret).\n\tRefresh()\n}\n\n\/\/ Implements KeyHolder interface\ntype rsaKeyHolder struct {\n\t\/\/ 256-byte random RSA key pair. Synced with a key saved in a secret.\n\tkey *rsa.PrivateKey\n\tsynchronizer syncApi.Synchronizer\n\tmux sync.Mutex\n}\n\n\/\/ Encrypter implements key holder interface. See KeyHolder for more information.\n\/\/ Used encryption algorithms:\n\/\/ - Content encryption: AES-GCM (256)\n\/\/ - Key management: RSA-OAEP-SHA256\nfunc (self *rsaKeyHolder) Encrypter() jose.Encrypter {\n\tpublicKey := &self.Key().PublicKey\n\tencrypter, err := jose.NewEncrypter(jose.A256GCM, jose.Recipient{Algorithm: jose.RSA_OAEP_256, Key: publicKey}, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn encrypter\n}\n\n\/\/ Key implements key holder interface. See KeyHolder for more information.\nfunc (self *rsaKeyHolder) Key() *rsa.PrivateKey {\n\tself.mux.Lock()\n\tdefer self.mux.Unlock()\n\treturn self.key\n}\n\n\/\/ Refresh implements key holder interface. See KeyHolder for more information.\nfunc (self *rsaKeyHolder) Refresh() {\n\tself.synchronizer.Refresh()\n\tself.update(self.synchronizer.Get())\n}\n\n\/\/ Handler function executed by synchronizer used to store encryption key. It is called whenever watched object\n\/\/ is created or updated.\nfunc (self *rsaKeyHolder) update(obj runtime.Object) {\n\tself.mux.Lock()\n\tdefer self.mux.Unlock()\n\tsecret := obj.(*v1.Secret)\n\tpriv, err := ParseRSAKey(string(secret.Data[holderMapKeyEntry]), string(secret.Data[holderMapCertEntry]))\n\tif err != nil {\n\t\t\/\/ Secret was probably tampered with. Update it based on local key.\n\t\terr := self.synchronizer.Update(self.getEncryptionKeyHolder())\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\treturn\n\t}\n\n\tself.key = priv\n}\n\n\/\/ Handler function executed by synchronizer used to store encryption key. It is called whenever watched object\n\/\/ is gets deleted. It is then recreated based on local key.\nfunc (self *rsaKeyHolder) recreate(obj runtime.Object) {\n\tsecret := obj.(*v1.Secret)\n\tlog.Printf(\"Synchronized secret %s has been deleted. Recreating.\", secret.Name)\n\tself.synchronizer.Create(self.getEncryptionKeyHolder())\n}\n\nfunc (self *rsaKeyHolder) init() {\n\tself.initEncryptionKey()\n\n\t\/\/ Register event handlers\n\tself.synchronizer.RegisterActionHandler(self.update, watch.Added, watch.Modified)\n\tself.synchronizer.RegisterActionHandler(self.recreate, watch.Deleted)\n\n\t\/\/ Try to init key from synchronized object\n\tif obj := self.synchronizer.Get(); obj != nil {\n\t\tlog.Print(\"Initializing JWE encryption key from synchronized object\")\n\t\tself.update(obj)\n\t\treturn\n\t}\n\n\t\/\/ Try to save generated key in a secret\n\tlog.Printf(\"Storing encryption key in a secret\")\n\terr := self.synchronizer.Create(self.getEncryptionKeyHolder())\n\tif err != nil && !k8sErrors.IsAlreadyExists(err) {\n\t\tpanic(err)\n\t}\n}\n\nfunc (self *rsaKeyHolder) getEncryptionKeyHolder() runtime.Object {\n\tpriv, pub := ExportRSAKeyOrDie(self.Key())\n\treturn &v1.Secret{\n\t\tObjectMeta: metaV1.ObjectMeta{\n\t\t\tNamespace: authApi.EncryptionKeyHolderNamespace,\n\t\t\tName: authApi.EncryptionKeyHolderName,\n\t\t},\n\n\t\tData: map[string][]byte{\n\t\t\tholderMapKeyEntry: []byte(priv),\n\t\t\tholderMapCertEntry: []byte(pub),\n\t\t},\n\t}\n}\n\n\/\/ Generates encryption key used to encrypt token payload.\nfunc (self *rsaKeyHolder) initEncryptionKey() {\n\tlog.Print(\"Generating JWE encryption key\")\n\tself.mux.Lock()\n\tdefer self.mux.Unlock()\n\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tself.key = privateKey\n}\n\n\/\/ NewRSAKeyHolder creates new KeyHolder instance.\nfunc NewRSAKeyHolder(synchronizer syncApi.Synchronizer) KeyHolder {\n\tholder := &rsaKeyHolder{\n\t\tsynchronizer: synchronizer,\n\t}\n\n\tholder.init()\n\treturn holder\n}\n<commit_msg>Fix deadlock during Dashboard start (#2690)<commit_after>\/\/ Copyright 2017 The Kubernetes Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage jwe\n\nimport (\n\t\"crypto\/rand\"\n\t\"crypto\/rsa\"\n\t\"log\"\n\t\"sync\"\n\n\tauthApi \"github.com\/kubernetes\/dashboard\/src\/app\/backend\/auth\/api\"\n\tsyncApi \"github.com\/kubernetes\/dashboard\/src\/app\/backend\/sync\/api\"\n\tjose \"gopkg.in\/square\/go-jose.v2\"\n\t\"k8s.io\/api\/core\/v1\"\n\tk8sErrors \"k8s.io\/apimachinery\/pkg\/api\/errors\"\n\tmetaV1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\"\n\t\"k8s.io\/apimachinery\/pkg\/watch\"\n)\n\n\/\/ Entries held by resource used to synchronize encryption key data.\nconst (\n\tholderMapKeyEntry = \"priv\"\n\tholderMapCertEntry = \"pub\"\n)\n\n\/\/ KeyHolder is responsible for generating, storing and synchronizing encryption key used for token\n\/\/ generation\/decryption.\ntype KeyHolder interface {\n\t\/\/ Returns encrypter instance that can be used to encrypt data.\n\tEncrypter() jose.Encrypter\n\t\/\/ Returns encryption key that can be used to decrypt data.\n\tKey() *rsa.PrivateKey\n\t\/\/ Forces refresh of encryption key synchronized with kubernetes resource (secret).\n\tRefresh()\n}\n\n\/\/ Implements KeyHolder interface\ntype rsaKeyHolder struct {\n\t\/\/ 256-byte random RSA key pair. Synced with a key saved in a secret.\n\tkey *rsa.PrivateKey\n\tsynchronizer syncApi.Synchronizer\n\tmux sync.Mutex\n}\n\n\/\/ Encrypter implements key holder interface. See KeyHolder for more information.\n\/\/ Used encryption algorithms:\n\/\/ - Content encryption: AES-GCM (256)\n\/\/ - Key management: RSA-OAEP-SHA256\nfunc (self *rsaKeyHolder) Encrypter() jose.Encrypter {\n\tpublicKey := &self.Key().PublicKey\n\tencrypter, err := jose.NewEncrypter(jose.A256GCM, jose.Recipient{Algorithm: jose.RSA_OAEP_256, Key: publicKey}, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn encrypter\n}\n\n\/\/ Key implements key holder interface. See KeyHolder for more information.\nfunc (self *rsaKeyHolder) Key() *rsa.PrivateKey {\n\tself.mux.Lock()\n\tdefer self.mux.Unlock()\n\treturn self.key\n}\n\n\/\/ Refresh implements key holder interface. See KeyHolder for more information.\nfunc (self *rsaKeyHolder) Refresh() {\n\tself.synchronizer.Refresh()\n\tself.update(self.synchronizer.Get())\n}\n\n\/\/ Handler function executed by synchronizer used to store encryption key. It is called whenever watched object\n\/\/ is created or updated.\nfunc (self *rsaKeyHolder) update(obj runtime.Object) {\n\tsecret := obj.(*v1.Secret)\n\tpriv, err := ParseRSAKey(string(secret.Data[holderMapKeyEntry]), string(secret.Data[holderMapCertEntry]))\n\tif err != nil {\n\t\t\/\/ Secret was probably tampered with. Update it based on local key.\n\t\terr := self.synchronizer.Update(self.getEncryptionKeyHolder())\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\treturn\n\t}\n\n\tself.mux.Lock()\n\tdefer self.mux.Unlock()\n\tself.key = priv\n}\n\n\/\/ Handler function executed by synchronizer used to store encryption key. It is called whenever watched object\n\/\/ is gets deleted. It is then recreated based on local key.\nfunc (self *rsaKeyHolder) recreate(obj runtime.Object) {\n\tsecret := obj.(*v1.Secret)\n\tlog.Printf(\"Synchronized secret %s has been deleted. Recreating.\", secret.Name)\n\tself.synchronizer.Create(self.getEncryptionKeyHolder())\n}\n\nfunc (self *rsaKeyHolder) init() {\n\tself.initEncryptionKey()\n\n\t\/\/ Register event handlers\n\tself.synchronizer.RegisterActionHandler(self.update, watch.Added, watch.Modified)\n\tself.synchronizer.RegisterActionHandler(self.recreate, watch.Deleted)\n\n\t\/\/ Try to init key from synchronized object\n\tif obj := self.synchronizer.Get(); obj != nil {\n\t\tlog.Print(\"Initializing JWE encryption key from synchronized object\")\n\t\tself.update(obj)\n\t\treturn\n\t}\n\n\t\/\/ Try to save generated key in a secret\n\tlog.Printf(\"Storing encryption key in a secret\")\n\terr := self.synchronizer.Create(self.getEncryptionKeyHolder())\n\tif err != nil && !k8sErrors.IsAlreadyExists(err) {\n\t\tpanic(err)\n\t}\n}\n\nfunc (self *rsaKeyHolder) getEncryptionKeyHolder() runtime.Object {\n\tpriv, pub := ExportRSAKeyOrDie(self.Key())\n\treturn &v1.Secret{\n\t\tObjectMeta: metaV1.ObjectMeta{\n\t\t\tNamespace: authApi.EncryptionKeyHolderNamespace,\n\t\t\tName: authApi.EncryptionKeyHolderName,\n\t\t},\n\n\t\tData: map[string][]byte{\n\t\t\tholderMapKeyEntry: []byte(priv),\n\t\t\tholderMapCertEntry: []byte(pub),\n\t\t},\n\t}\n}\n\n\/\/ Generates encryption key used to encrypt token payload.\nfunc (self *rsaKeyHolder) initEncryptionKey() {\n\tlog.Print(\"Generating JWE encryption key\")\n\tself.mux.Lock()\n\tdefer self.mux.Unlock()\n\n\tprivateKey, err := rsa.GenerateKey(rand.Reader, 2048)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tself.key = privateKey\n}\n\n\/\/ NewRSAKeyHolder creates new KeyHolder instance.\nfunc NewRSAKeyHolder(synchronizer syncApi.Synchronizer) KeyHolder {\n\tholder := &rsaKeyHolder{\n\t\tsynchronizer: synchronizer,\n\t}\n\n\tholder.init()\n\treturn holder\n}\n<|endoftext|>"} {"text":"<commit_before>package kubernetes\n\nimport (\n\t\"fmt\"\n\t\"github.com\/andyxning\/eventarbiter\/models\"\n\t\"github.com\/golang\/glog\"\n\tbackend \"k8s.io\/heapster\/events\/sources\/kubernetes\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"net\/url\"\n\t\"time\"\n)\n\nconst (\n\tfetchInterval = 500 * time.Millisecond\n)\n\ntype kubernetes struct {\n\tfetchTicker *time.Ticker\n\tupstream *backend.KubernetesEventSource\n}\n\nfunc MustNewKubernetes(uri *url.URL) models.Source {\n\tupstream, err := backend.NewKubernetesSource(uri)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"%s\", err))\n\t}\n\n\treturn kubernetes{\n\t\tfetchTicker: time.NewTicker(fetchInterval),\n\t\tupstream: upstream,\n\t}\n}\n\nfunc (k8s kubernetes) Start(eventChan chan<- *api.Event) {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-k8s.fetchTicker.C:\n\t\t\t\teventBatch := k8s.upstream.GetNewEvents()\n\t\t\t\tglog.Infof(\"got %d new events at %s\", len(eventBatch.Events), eventBatch.Timestamp)\n\n\t\t\t\tfor _, event := range eventBatch.Events {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase eventChan <- event:\n\t\t\t\t\t\tglog.V(3).Infof(\"%#v\", event)\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tglog.Errorf(\"event channel is full. ignoring %v\", event)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (k8s kubernetes) Stop() {\n\t\/\/ Nothing to do now.\n}\n<commit_msg>update log content for event<commit_after>package kubernetes\n\nimport (\n\t\"fmt\"\n\t\"github.com\/andyxning\/eventarbiter\/models\"\n\t\"github.com\/golang\/glog\"\n\tbackend \"k8s.io\/heapster\/events\/sources\/kubernetes\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"net\/url\"\n\t\"time\"\n)\n\nconst (\n\tfetchInterval = 500 * time.Millisecond\n)\n\ntype kubernetes struct {\n\tfetchTicker *time.Ticker\n\tupstream *backend.KubernetesEventSource\n}\n\nfunc MustNewKubernetes(uri *url.URL) models.Source {\n\tupstream, err := backend.NewKubernetesSource(uri)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"%s\", err))\n\t}\n\n\treturn kubernetes{\n\t\tfetchTicker: time.NewTicker(fetchInterval),\n\t\tupstream: upstream,\n\t}\n}\n\nfunc (k8s kubernetes) Start(eventChan chan<- *api.Event) {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-k8s.fetchTicker.C:\n\t\t\t\teventBatch := k8s.upstream.GetNewEvents()\n\t\t\t\tglog.Infof(\"got %d new events at %s\", len(eventBatch.Events), eventBatch.Timestamp)\n\n\t\t\t\tfor _, event := range eventBatch.Events {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase eventChan <- event:\n\t\t\t\t\t\tglog.V(3).Infof(\"%#v\", event)\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tglog.Errorf(\"event channel is full. ignoring %#v\", event)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (k8s kubernetes) Stop() {\n\t\/\/ Nothing to do now.\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Neugram Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n\n\t\"neugram.io\/ng\/eval\"\n\t\"neugram.io\/ng\/eval\/environ\"\n\t\"neugram.io\/ng\/eval\/shell\"\n\t\"neugram.io\/ng\/format\"\n\t\"neugram.io\/ng\/gengo\"\n\t\"neugram.io\/ng\/jupyter\"\n\t\"neugram.io\/ng\/parser\"\n\n\t\"github.com\/peterh\/liner\"\n)\n\nvar (\n\torigMode liner.ModeApplier\n\n\tlineNg *liner.State \/\/ ng-mode line reader\n\thistoryNgFile = \"\"\n\thistoryNg = make(chan string, 1)\n\thistoryShFile = \"\"\n\thistorySh = make(chan string, 1)\n\tsigint = make(chan os.Signal)\n\n\tp *parser.Parser\n\tprg *eval.Program\n\tshellState *shell.State\n)\n\nfunc exit(code int) {\n\tif lineNg != nil {\n\t\tlineNg.Close()\n\t}\n\tfmt.Fprintf(os.Stderr, \"\\n\")\n\tos.Exit(code)\n}\n\nfunc exitf(format string, args ...interface{}) {\n\tfmt.Fprintf(os.Stderr, \"ng: \"+format+\"\\n\", args...)\n\texit(1)\n}\n\nfunc mode() liner.ModeApplier {\n\tm, err := liner.TerminalMode()\n\tif err != nil {\n\t\texitf(\"terminal mode: %v\", err)\n\t}\n\treturn m\n}\n\nconst usageLine = \"ng [programfile | -e cmd | -jupyter file] [arguments]\"\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, `ng - neugram scripting language and shell\n\nUsage:\n\t%s\n\nOptions:\n`, usageLine)\n\tflag.PrintDefaults()\n}\n\nfunc main() {\n\tshell.Init()\n\n\tflagJupyter := flag.String(\"jupyter\", \"\", \"path to jupyter kernel connection file\")\n\tflagHelp := flag.Bool(\"h\", false, \"display help message and exit\")\n\tflagE := flag.String(\"e\", \"\", \"program passed as a string\")\n\tflagO := flag.String(\"o\", \"\", \"compile the program to the named file\")\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"usage: %s\\n\", usageLine)\n\t\tos.Exit(1)\n\t}\n\tflag.Parse()\n\n\tif *flagHelp {\n\t\tusage()\n\t\tos.Exit(0)\n\t}\n\tif *flagJupyter != \"\" {\n\t\terr := jupyter.Run(context.Background(), *flagJupyter)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tos.Exit(0)\n\t}\n\tif *flagE != \"\" {\n\t\tinitProgram(filepath.Join(cwd, \"ng-arg\"))\n\t\tres := p.ParseLine([]byte(*flagE))\n\t\thandleResult(res)\n\t\treturn\n\t}\n\tif args := flag.Args(); len(args) > 0 {\n\t\t\/\/ TODO: plumb through the rest of the args\n\t\tpath := args[0]\n\t\tif *flagO != \"\" {\n\t\t\tres, err := gengo.GenGo(path)\n\t\t\tif err != nil {\n\t\t\t\texitf(\"%v\", err)\n\t\t\t}\n\t\t\t_ = res\n\t\t\texitf(\"TODO gengo\")\n\t\t\treturn\n\t\t}\n\t\tinitProgram(path)\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\texitf(\"%v\", err)\n\t\t}\n\t\tstate, err := runFile(f)\n\t\tif err != nil {\n\t\t\texitf(\"%v\", err)\n\t\t}\n\t\tif state == parser.StateCmd {\n\t\t\texitf(\"%s: ends in an unclosed shell statement\", args[0])\n\t\t}\n\t\treturn\n\t}\n\tif *flagO != \"\" {\n\t\texitf(\"-o specified but no program file provided\")\n\t}\n\n\tlineNg = liner.NewLiner()\n\tdefer lineNg.Close()\n\n\tloop()\n}\n\nfunc setWindowSize(env map[interface{}]interface{}) {\n\t\/\/ TODO windowsize\n\t\/\/ TODO\n\t\/\/ TODO\n\t\/\/ TODO\n\t\/*\n\t\trows, cols, err := job.WindowSize(os.Stderr.Fd())\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"ng: could not get window size: %v\\n\", err)\n\t\t} else {\n\t\t\t\/\/ TODO: these are meant to be shell variables, not\n\t\t\t\/\/ environment variables. But then, how do programs\n\t\t\t\/\/ like `ls` read them?\n\t\t\tenv[\"LINES\"] = strconv.Itoa(rows)\n\t\t\tenv[\"COLUMNS\"] = strconv.Itoa(cols)\n\t\t}\n\t*\/\n}\n\nfunc ps1(env *environ.Environ) string {\n\tv := env.Get(\"PS1\")\n\tif v == \"\" {\n\t\treturn \"ng$ \"\n\t}\n\tif strings.IndexByte(v, '\\\\') == -1 {\n\t\treturn v\n\t}\n\tvar buf []byte\n\tfor {\n\t\ti := strings.IndexByte(v, '\\\\')\n\t\tif i == -1 || i == len(v)-1 {\n\t\t\tbreak\n\t\t}\n\t\tbuf = append(buf, v[:i]...)\n\t\tb := v[i+1]\n\t\tv = v[i+2:]\n\t\tswitch b {\n\t\tcase 'h', 'H':\n\t\t\tout, err := exec.Command(\"hostname\").CombinedOutput()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"ng: %v\\n\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif b == 'h' {\n\t\t\t\tif i := bytes.IndexByte(out, '.'); i >= 0 {\n\t\t\t\t\tout = out[:i]\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(out) > 0 && out[len(out)-1] == '\\n' {\n\t\t\t\tout = out[:len(out)-1]\n\t\t\t}\n\t\t\tbuf = append(buf, out...)\n\t\tcase 'n':\n\t\t\tbuf = append(buf, '\\n')\n\t\tcase 'w', 'W':\n\t\t\tcwd := env.Get(\"PWD\")\n\t\t\tif home := env.Get(\"HOME\"); home != \"\" {\n\t\t\t\tcwd = strings.Replace(cwd, home, \"~\", 1)\n\t\t\t}\n\t\t\tif b == 'W' {\n\t\t\t\tcwd = filepath.Base(cwd)\n\t\t\t}\n\t\t\tbuf = append(buf, cwd...)\n\t\t}\n\t\t\/\/ TODO: '!', '#', '$', 'nnn', 's', 'j', and more.\n\t}\n\tbuf = append(buf, v...)\n\treturn string(buf)\n}\n\nvar cwd string\n\nfunc init() {\n\tvar err error\n\tcwd, err = os.Getwd()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc initProgram(path string) {\n\tp = parser.New(path)\n\tshellState = &shell.State{\n\t\tEnv: environ.New(),\n\t\tAlias: environ.New(),\n\t}\n\tprg = eval.New(path, shellState)\n\n\t\/\/ TODO this env setup could be done in neugram code\n\tenv := prg.Environ()\n\tfor _, s := range os.Environ() {\n\t\ti := strings.Index(s, \"=\")\n\t\tenv.Set(s[:i], s[i+1:])\n\t}\n\twd, err := os.Getwd()\n\tif err == nil {\n\t\tenv.Set(\"PWD\", wd)\n\t}\n\t\/\/setWindowSize(env)\n\n\tgo func() {\n\t\tsig := make(chan os.Signal, 1)\n\t\tsignal.Notify(sig, os.Interrupt)\n\t\tfor {\n\t\t\ts := <-sig\n\t\t\tselect {\n\t\t\tcase sigint <- s:\n\t\t\tcase <-time.After(500 * time.Millisecond):\n\t\t\t\t\/\/ The evaluator has not handled the signal\n\t\t\t\t\/\/ promptly. There are several possible\n\t\t\t\t\/\/ reasons for this. The most likely right now\n\t\t\t\t\/\/ is the evaluator is in arbitrary Go code,\n\t\t\t\t\/\/ which does not have a way to be preempted.\n\t\t\t\t\/\/ It is also possible we have run into a\n\t\t\t\t\/\/ bug in the evaluator.\n\t\t\t\t\/\/\n\t\t\t\t\/\/ Either way, instead of being one of those\n\t\t\t\t\/\/ obnoxious programs that refuses to respond\n\t\t\t\t\/\/ to Ctrl-C, be overly agressive and let the\n\t\t\t\t\/\/ entire ng process exit.\n\t\t\t\t\/\/\n\t\t\t\t\/\/ This is bad if you use ng as your primary\n\t\t\t\t\/\/ shell, but good if you invoke ng to handle\n\t\t\t\t\/\/ scripts.\n\t\t\t\tfmt.Fprintf(os.Stderr, \"ng: exiting on interrupt\\n\")\n\t\t\t\texit(1)\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc runFile(f *os.File) (parser.ParserState, error) {\n\tstate := parser.StateStmt\n\tscanner := bufio.NewScanner(f)\n\tfor i := 0; scanner.Scan(); i++ {\n\t\tb := scanner.Bytes()\n\t\tif i == 0 && len(b) > 2 && b[0] == '#' && b[1] == '!' { \/\/ shebang\n\t\t\tcontinue\n\t\t}\n\t\tres := p.ParseLine(b)\n\t\thandleResult(res)\n\t\tstate = res.State\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn state, fmt.Errorf(\"%s: %v\", f.Name(), err)\n\t}\n\tswitch state {\n\tcase parser.StateStmtPartial, parser.StateCmdPartial:\n\t\treturn state, fmt.Errorf(\"%s: ends in a partial statement\", f.Name())\n\tdefault:\n\t\treturn state, nil\n\t}\n}\n\nfunc loop() {\n\tpath := filepath.Join(cwd, \"ng-interactive\")\n\tinitProgram(path)\n\n\tstate := parser.StateStmt\n\tif os.Args[0] == \"ngsh\" || os.Args[0] == \"-ngsh\" {\n\t\tinitFile := filepath.Join(os.Getenv(\"HOME\"), \".ngshinit\")\n\t\tif f, err := os.Open(initFile); err == nil {\n\t\t\tvar err error\n\t\t\tstate, err = runFile(f)\n\t\t\tf.Close()\n\t\t\tif err != nil {\n\t\t\t\texitf(\"%v\", err)\n\t\t\t}\n\t\t}\n\t\tif state == parser.StateStmt {\n\t\t\tres := p.ParseLine([]byte(\"$$\"))\n\t\t\thandleResult(res)\n\t\t\tstate = res.State\n\t\t}\n\t}\n\n\tlineNg.SetTabCompletionStyle(liner.TabPrints)\n\tlineNg.SetWordCompleter(completer)\n\tlineNg.SetCtrlCAborts(true)\n\n\tif f, err := os.Open(historyShFile); err == nil {\n\t\tlineNg.SetMode(\"sh\")\n\t\tlineNg.ReadHistory(f)\n\t\tf.Close()\n\t}\n\tgo historyWriter(historyShFile, historySh)\n\n\tif f, err := os.Open(historyNgFile); err == nil {\n\t\tlineNg.SetMode(\"ng\")\n\t\tlineNg.ReadHistory(f)\n\t\tf.Close()\n\t}\n\tgo historyWriter(historyNgFile, historyNg)\n\n\tfor {\n\t\tvar (\n\t\t\tmode string\n\t\t\tprompt string\n\t\t\thistory chan string\n\t\t)\n\t\tswitch state {\n\t\tcase parser.StateUnknown:\n\t\t\tmode, prompt, history = \"ng\", \"??> \", historyNg\n\t\tcase parser.StateStmt:\n\t\t\tmode, prompt, history = \"ng\", \"ng> \", historyNg\n\t\tcase parser.StateStmtPartial:\n\t\t\tmode, prompt, history = \"ng\", \"..> \", historyNg\n\t\tcase parser.StateCmd:\n\t\t\tmode, prompt, history = \"sh\", ps1(prg.Environ()), historySh\n\t\tcase parser.StateCmdPartial:\n\t\t\tmode, prompt, history = \"sh\", \"..$ \", historySh\n\t\tdefault:\n\t\t\texitf(\"unkown parser state: %v\", state)\n\t\t}\n\t\tlineNg.SetMode(mode)\n\t\tdata, err := lineNg.Prompt(prompt)\n\t\tif err == liner.ErrPromptAborted {\n\t\t\tswitch state {\n\t\t\tcase parser.StateStmtPartial:\n\t\t\t\tfmt.Printf(\"TODO interrupt partial statement\\n\")\n\t\t\tcase parser.StateCmdPartial:\n\t\t\t\tfmt.Printf(\"TODO interrupt partial command\\n\")\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\texit(0)\n\t\t\t}\n\t\t\texitf(\"error reading input: %v\", err)\n\t\t}\n\t\tif data == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tlineNg.AppendHistory(mode, data)\n\t\thistory <- data\n\t\tselect { \/\/ drain sigint\n\t\tcase <-sigint:\n\t\tdefault:\n\t\t}\n\t\tres := p.ParseLine([]byte(data))\n\t\thandleResult(res)\n\t\tstate = res.State\n\t}\n}\n\nfunc handleResult(res parser.Result) {\n\t\/\/ TODO: use ngcore for this\n\tfor _, s := range res.Stmts {\n\t\tv, err := prg.Eval(s, sigint)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"ng: %v\\n\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif len(v) > 1 {\n\t\t\tfmt.Print(\"(\")\n\t\t}\n\t\tfor i, val := range v {\n\t\t\tif i > 0 {\n\t\t\t\tfmt.Print(\", \")\n\t\t\t}\n\t\t\tif val == (reflect.Value{}) {\n\t\t\t\tfmt.Print(\"<nil>\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch v := val.Interface().(type) {\n\t\t\tcase eval.UntypedInt:\n\t\t\t\tfmt.Print(v.String())\n\t\t\tcase eval.UntypedFloat:\n\t\t\t\tfmt.Print(v.String())\n\t\t\tcase eval.UntypedComplex:\n\t\t\t\tfmt.Print(v.String())\n\t\t\tcase eval.UntypedString:\n\t\t\t\tfmt.Print(v.String)\n\t\t\tcase eval.UntypedRune:\n\t\t\t\tfmt.Printf(\"%v\", v.Rune)\n\t\t\tcase eval.UntypedBool:\n\t\t\t\tfmt.Print(v.Bool)\n\t\t\tdefault:\n\t\t\t\tfmt.Print(format.Debug(v))\n\t\t\t}\n\t\t}\n\t\tif len(v) > 1 {\n\t\t\tfmt.Println(\")\")\n\t\t} else if len(v) == 1 {\n\t\t\tfmt.Println(\"\")\n\t\t}\n\t}\n\tfor _, err := range res.Errs {\n\t\tfmt.Println(err.Error())\n\t}\n\tfor _, cmd := range res.Cmds {\n\t\tj := &shell.Job{\n\t\t\tState: shellState,\n\t\t\tCmd: cmd,\n\t\t\tParams: prg,\n\t\t\tStdin: os.Stdin,\n\t\t\tStdout: os.Stdout,\n\t\t\tStderr: os.Stderr,\n\t\t}\n\t\tif err := j.Start(); err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tdone, err := j.Wait()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tif !done {\n\t\t\tbreak \/\/ TODO not right, instead we should just have one cmd, not Cmds here.\n\t\t}\n\t}\n\t\/\/editMode.ApplyMode()\n}\n\nfunc init() {\n\tif home := os.Getenv(\"HOME\"); home != \"\" {\n\t\thistoryNgFile = filepath.Join(home, \".ng_history\")\n\t\thistoryShFile = filepath.Join(home, \".ngsh_history\")\n\t}\n}\n\nfunc historyWriter(dst string, src <-chan string) {\n\tvar batch []string\n\tticker := time.Tick(250 * time.Millisecond)\n\tfor {\n\t\tselect {\n\t\tcase line := <-src:\n\t\t\tbatch = append(batch, line)\n\t\tcase <-ticker:\n\t\t\tif len(batch) > 0 && dst != \"\" {\n\t\t\t\t\/\/ TODO: FcntlFlock\n\t\t\t\tf, err := os.OpenFile(dst, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0664)\n\t\t\t\tif err == nil {\n\t\t\t\t\tfor _, line := range batch {\n\t\t\t\t\t\tfmt.Fprintf(f, \"%s\\n\", line)\n\t\t\t\t\t}\n\t\t\t\t\tf.Close()\n\t\t\t\t}\n\t\t\t}\n\t\t\tbatch = nil\n\t\t}\n\t}\n}\n<commit_msg>ng: add flag to start in shell mode (#109)<commit_after>\/\/ Copyright 2016 The Neugram Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n\n\t\"neugram.io\/ng\/eval\"\n\t\"neugram.io\/ng\/eval\/environ\"\n\t\"neugram.io\/ng\/eval\/shell\"\n\t\"neugram.io\/ng\/format\"\n\t\"neugram.io\/ng\/gengo\"\n\t\"neugram.io\/ng\/jupyter\"\n\t\"neugram.io\/ng\/parser\"\n\n\t\"github.com\/peterh\/liner\"\n)\n\nvar (\n\torigMode liner.ModeApplier\n\n\tlineNg *liner.State \/\/ ng-mode line reader\n\thistoryNgFile = \"\"\n\thistoryNg = make(chan string, 1)\n\thistoryShFile = \"\"\n\thistorySh = make(chan string, 1)\n\tsigint = make(chan os.Signal)\n\n\tp *parser.Parser\n\tprg *eval.Program\n\tshellState *shell.State\n)\n\nfunc exit(code int) {\n\tif lineNg != nil {\n\t\tlineNg.Close()\n\t}\n\tfmt.Fprintf(os.Stderr, \"\\n\")\n\tos.Exit(code)\n}\n\nfunc exitf(format string, args ...interface{}) {\n\tfmt.Fprintf(os.Stderr, \"ng: \"+format+\"\\n\", args...)\n\texit(1)\n}\n\nfunc mode() liner.ModeApplier {\n\tm, err := liner.TerminalMode()\n\tif err != nil {\n\t\texitf(\"terminal mode: %v\", err)\n\t}\n\treturn m\n}\n\nconst usageLine = \"ng [programfile | -e cmd | -jupyter file] [arguments]\"\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, `ng - neugram scripting language and shell\n\nUsage:\n\t%s\n\nOptions:\n`, usageLine)\n\tflag.PrintDefaults()\n}\n\nfunc main() {\n\tshell.Init()\n\n\tflagJupyter := flag.String(\"jupyter\", \"\", \"path to jupyter kernel connection file\")\n\tflagShell := flag.Bool(\"shell\", false, \"start in shell mode\")\n\tflagHelp := flag.Bool(\"h\", false, \"display help message and exit\")\n\tflagE := flag.String(\"e\", \"\", \"program passed as a string\")\n\tflagO := flag.String(\"o\", \"\", \"compile the program to the named file\")\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"usage: %s\\n\", usageLine)\n\t\tos.Exit(1)\n\t}\n\tflag.Parse()\n\n\tif *flagHelp {\n\t\tusage()\n\t\tos.Exit(0)\n\t}\n\tif *flagJupyter != \"\" {\n\t\terr := jupyter.Run(context.Background(), *flagJupyter)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tos.Exit(0)\n\t}\n\tif *flagE != \"\" {\n\t\tinitProgram(filepath.Join(cwd, \"ng-arg\"))\n\t\tres := p.ParseLine([]byte(*flagE))\n\t\thandleResult(res)\n\t\treturn\n\t}\n\tif args := flag.Args(); len(args) > 0 {\n\t\t\/\/ TODO: plumb through the rest of the args\n\t\tpath := args[0]\n\t\tif *flagO != \"\" {\n\t\t\tres, err := gengo.GenGo(path)\n\t\t\tif err != nil {\n\t\t\t\texitf(\"%v\", err)\n\t\t\t}\n\t\t\t_ = res\n\t\t\texitf(\"TODO gengo\")\n\t\t\treturn\n\t\t}\n\t\tinitProgram(path)\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\texitf(\"%v\", err)\n\t\t}\n\t\tstate, err := runFile(f)\n\t\tif err != nil {\n\t\t\texitf(\"%v\", err)\n\t\t}\n\t\tif state == parser.StateCmd {\n\t\t\texitf(\"%s: ends in an unclosed shell statement\", args[0])\n\t\t}\n\t\treturn\n\t}\n\tif *flagO != \"\" {\n\t\texitf(\"-o specified but no program file provided\")\n\t}\n\n\tlineNg = liner.NewLiner()\n\tdefer lineNg.Close()\n\n\tloop(*flagShell)\n}\n\nfunc setWindowSize(env map[interface{}]interface{}) {\n\t\/\/ TODO windowsize\n\t\/\/ TODO\n\t\/\/ TODO\n\t\/\/ TODO\n\t\/*\n\t\trows, cols, err := job.WindowSize(os.Stderr.Fd())\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"ng: could not get window size: %v\\n\", err)\n\t\t} else {\n\t\t\t\/\/ TODO: these are meant to be shell variables, not\n\t\t\t\/\/ environment variables. But then, how do programs\n\t\t\t\/\/ like `ls` read them?\n\t\t\tenv[\"LINES\"] = strconv.Itoa(rows)\n\t\t\tenv[\"COLUMNS\"] = strconv.Itoa(cols)\n\t\t}\n\t*\/\n}\n\nfunc ps1(env *environ.Environ) string {\n\tv := env.Get(\"PS1\")\n\tif v == \"\" {\n\t\treturn \"ng$ \"\n\t}\n\tif strings.IndexByte(v, '\\\\') == -1 {\n\t\treturn v\n\t}\n\tvar buf []byte\n\tfor {\n\t\ti := strings.IndexByte(v, '\\\\')\n\t\tif i == -1 || i == len(v)-1 {\n\t\t\tbreak\n\t\t}\n\t\tbuf = append(buf, v[:i]...)\n\t\tb := v[i+1]\n\t\tv = v[i+2:]\n\t\tswitch b {\n\t\tcase 'h', 'H':\n\t\t\tout, err := exec.Command(\"hostname\").CombinedOutput()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"ng: %v\\n\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif b == 'h' {\n\t\t\t\tif i := bytes.IndexByte(out, '.'); i >= 0 {\n\t\t\t\t\tout = out[:i]\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(out) > 0 && out[len(out)-1] == '\\n' {\n\t\t\t\tout = out[:len(out)-1]\n\t\t\t}\n\t\t\tbuf = append(buf, out...)\n\t\tcase 'n':\n\t\t\tbuf = append(buf, '\\n')\n\t\tcase 'w', 'W':\n\t\t\tcwd := env.Get(\"PWD\")\n\t\t\tif home := env.Get(\"HOME\"); home != \"\" {\n\t\t\t\tcwd = strings.Replace(cwd, home, \"~\", 1)\n\t\t\t}\n\t\t\tif b == 'W' {\n\t\t\t\tcwd = filepath.Base(cwd)\n\t\t\t}\n\t\t\tbuf = append(buf, cwd...)\n\t\t}\n\t\t\/\/ TODO: '!', '#', '$', 'nnn', 's', 'j', and more.\n\t}\n\tbuf = append(buf, v...)\n\treturn string(buf)\n}\n\nvar cwd string\n\nfunc init() {\n\tvar err error\n\tcwd, err = os.Getwd()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n\nfunc initProgram(path string) {\n\tp = parser.New(path)\n\tshellState = &shell.State{\n\t\tEnv: environ.New(),\n\t\tAlias: environ.New(),\n\t}\n\tprg = eval.New(path, shellState)\n\n\t\/\/ TODO this env setup could be done in neugram code\n\tenv := prg.Environ()\n\tfor _, s := range os.Environ() {\n\t\ti := strings.Index(s, \"=\")\n\t\tenv.Set(s[:i], s[i+1:])\n\t}\n\twd, err := os.Getwd()\n\tif err == nil {\n\t\tenv.Set(\"PWD\", wd)\n\t}\n\t\/\/setWindowSize(env)\n\n\tgo func() {\n\t\tsig := make(chan os.Signal, 1)\n\t\tsignal.Notify(sig, os.Interrupt)\n\t\tfor {\n\t\t\ts := <-sig\n\t\t\tselect {\n\t\t\tcase sigint <- s:\n\t\t\tcase <-time.After(500 * time.Millisecond):\n\t\t\t\t\/\/ The evaluator has not handled the signal\n\t\t\t\t\/\/ promptly. There are several possible\n\t\t\t\t\/\/ reasons for this. The most likely right now\n\t\t\t\t\/\/ is the evaluator is in arbitrary Go code,\n\t\t\t\t\/\/ which does not have a way to be preempted.\n\t\t\t\t\/\/ It is also possible we have run into a\n\t\t\t\t\/\/ bug in the evaluator.\n\t\t\t\t\/\/\n\t\t\t\t\/\/ Either way, instead of being one of those\n\t\t\t\t\/\/ obnoxious programs that refuses to respond\n\t\t\t\t\/\/ to Ctrl-C, be overly agressive and let the\n\t\t\t\t\/\/ entire ng process exit.\n\t\t\t\t\/\/\n\t\t\t\t\/\/ This is bad if you use ng as your primary\n\t\t\t\t\/\/ shell, but good if you invoke ng to handle\n\t\t\t\t\/\/ scripts.\n\t\t\t\tfmt.Fprintf(os.Stderr, \"ng: exiting on interrupt\\n\")\n\t\t\t\texit(1)\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc runFile(f *os.File) (parser.ParserState, error) {\n\tstate := parser.StateStmt\n\tscanner := bufio.NewScanner(f)\n\tfor i := 0; scanner.Scan(); i++ {\n\t\tb := scanner.Bytes()\n\t\tif i == 0 && len(b) > 2 && b[0] == '#' && b[1] == '!' { \/\/ shebang\n\t\t\tcontinue\n\t\t}\n\t\tres := p.ParseLine(b)\n\t\thandleResult(res)\n\t\tstate = res.State\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn state, fmt.Errorf(\"%s: %v\", f.Name(), err)\n\t}\n\tswitch state {\n\tcase parser.StateStmtPartial, parser.StateCmdPartial:\n\t\treturn state, fmt.Errorf(\"%s: ends in a partial statement\", f.Name())\n\tdefault:\n\t\treturn state, nil\n\t}\n}\n\nfunc loop(startInShell bool) {\n\tpath := filepath.Join(cwd, \"ng-interactive\")\n\tinitProgram(path)\n\n\tstate := parser.StateStmt\n\tif os.Args[0] == \"ngsh\" || os.Args[0] == \"-ngsh\" || startInShell {\n\t\tinitFile := filepath.Join(os.Getenv(\"HOME\"), \".ngshinit\")\n\t\tif f, err := os.Open(initFile); err == nil {\n\t\t\tvar err error\n\t\t\tstate, err = runFile(f)\n\t\t\tf.Close()\n\t\t\tif err != nil {\n\t\t\t\texitf(\"%v\", err)\n\t\t\t}\n\t\t}\n\t\tif state == parser.StateStmt {\n\t\t\tres := p.ParseLine([]byte(\"$$\"))\n\t\t\thandleResult(res)\n\t\t\tstate = res.State\n\t\t}\n\t}\n\n\tlineNg.SetTabCompletionStyle(liner.TabPrints)\n\tlineNg.SetWordCompleter(completer)\n\tlineNg.SetCtrlCAborts(true)\n\n\tif f, err := os.Open(historyShFile); err == nil {\n\t\tlineNg.SetMode(\"sh\")\n\t\tlineNg.ReadHistory(f)\n\t\tf.Close()\n\t}\n\tgo historyWriter(historyShFile, historySh)\n\n\tif f, err := os.Open(historyNgFile); err == nil {\n\t\tlineNg.SetMode(\"ng\")\n\t\tlineNg.ReadHistory(f)\n\t\tf.Close()\n\t}\n\tgo historyWriter(historyNgFile, historyNg)\n\n\tfor {\n\t\tvar (\n\t\t\tmode string\n\t\t\tprompt string\n\t\t\thistory chan string\n\t\t)\n\t\tswitch state {\n\t\tcase parser.StateUnknown:\n\t\t\tmode, prompt, history = \"ng\", \"??> \", historyNg\n\t\tcase parser.StateStmt:\n\t\t\tmode, prompt, history = \"ng\", \"ng> \", historyNg\n\t\tcase parser.StateStmtPartial:\n\t\t\tmode, prompt, history = \"ng\", \"..> \", historyNg\n\t\tcase parser.StateCmd:\n\t\t\tmode, prompt, history = \"sh\", ps1(prg.Environ()), historySh\n\t\tcase parser.StateCmdPartial:\n\t\t\tmode, prompt, history = \"sh\", \"..$ \", historySh\n\t\tdefault:\n\t\t\texitf(\"unkown parser state: %v\", state)\n\t\t}\n\t\tlineNg.SetMode(mode)\n\t\tdata, err := lineNg.Prompt(prompt)\n\t\tif err == liner.ErrPromptAborted {\n\t\t\tswitch state {\n\t\t\tcase parser.StateStmtPartial:\n\t\t\t\tfmt.Printf(\"TODO interrupt partial statement\\n\")\n\t\t\tcase parser.StateCmdPartial:\n\t\t\t\tfmt.Printf(\"TODO interrupt partial command\\n\")\n\t\t\t}\n\t\t} else if err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\texit(0)\n\t\t\t}\n\t\t\texitf(\"error reading input: %v\", err)\n\t\t}\n\t\tif data == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tlineNg.AppendHistory(mode, data)\n\t\thistory <- data\n\t\tselect { \/\/ drain sigint\n\t\tcase <-sigint:\n\t\tdefault:\n\t\t}\n\t\tres := p.ParseLine([]byte(data))\n\t\thandleResult(res)\n\t\tstate = res.State\n\t}\n}\n\nfunc handleResult(res parser.Result) {\n\t\/\/ TODO: use ngcore for this\n\tfor _, s := range res.Stmts {\n\t\tv, err := prg.Eval(s, sigint)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"ng: %v\\n\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif len(v) > 1 {\n\t\t\tfmt.Print(\"(\")\n\t\t}\n\t\tfor i, val := range v {\n\t\t\tif i > 0 {\n\t\t\t\tfmt.Print(\", \")\n\t\t\t}\n\t\t\tif val == (reflect.Value{}) {\n\t\t\t\tfmt.Print(\"<nil>\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch v := val.Interface().(type) {\n\t\t\tcase eval.UntypedInt:\n\t\t\t\tfmt.Print(v.String())\n\t\t\tcase eval.UntypedFloat:\n\t\t\t\tfmt.Print(v.String())\n\t\t\tcase eval.UntypedComplex:\n\t\t\t\tfmt.Print(v.String())\n\t\t\tcase eval.UntypedString:\n\t\t\t\tfmt.Print(v.String)\n\t\t\tcase eval.UntypedRune:\n\t\t\t\tfmt.Printf(\"%v\", v.Rune)\n\t\t\tcase eval.UntypedBool:\n\t\t\t\tfmt.Print(v.Bool)\n\t\t\tdefault:\n\t\t\t\tfmt.Print(format.Debug(v))\n\t\t\t}\n\t\t}\n\t\tif len(v) > 1 {\n\t\t\tfmt.Println(\")\")\n\t\t} else if len(v) == 1 {\n\t\t\tfmt.Println(\"\")\n\t\t}\n\t}\n\tfor _, err := range res.Errs {\n\t\tfmt.Println(err.Error())\n\t}\n\tfor _, cmd := range res.Cmds {\n\t\tj := &shell.Job{\n\t\t\tState: shellState,\n\t\t\tCmd: cmd,\n\t\t\tParams: prg,\n\t\t\tStdin: os.Stdin,\n\t\t\tStdout: os.Stdout,\n\t\t\tStderr: os.Stderr,\n\t\t}\n\t\tif err := j.Start(); err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tdone, err := j.Wait()\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\tif !done {\n\t\t\tbreak \/\/ TODO not right, instead we should just have one cmd, not Cmds here.\n\t\t}\n\t}\n\t\/\/editMode.ApplyMode()\n}\n\nfunc init() {\n\tif home := os.Getenv(\"HOME\"); home != \"\" {\n\t\thistoryNgFile = filepath.Join(home, \".ng_history\")\n\t\thistoryShFile = filepath.Join(home, \".ngsh_history\")\n\t}\n}\n\nfunc historyWriter(dst string, src <-chan string) {\n\tvar batch []string\n\tticker := time.Tick(250 * time.Millisecond)\n\tfor {\n\t\tselect {\n\t\tcase line := <-src:\n\t\t\tbatch = append(batch, line)\n\t\tcase <-ticker:\n\t\t\tif len(batch) > 0 && dst != \"\" {\n\t\t\t\t\/\/ TODO: FcntlFlock\n\t\t\t\tf, err := os.OpenFile(dst, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0664)\n\t\t\t\tif err == nil {\n\t\t\t\t\tfor _, line := range batch {\n\t\t\t\t\t\tfmt.Fprintf(f, \"%s\\n\", line)\n\t\t\t\t\t}\n\t\t\t\t\tf.Close()\n\t\t\t\t}\n\t\t\t}\n\t\t\tbatch = nil\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package installer\n\n\/\/ Note: .exe is not in that list because we need to\n\/\/ read part of its contents to decide what we're going to\n\/\/ do with it: 1) extract it 2) run it as an installer 3)\n\/\/ just copy it naked\nvar installerForExt = map[string]InstallerType{\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Generic archives\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\".zip\": InstallerTypeArchive,\n\t\".gz\": InstallerTypeArchive,\n\t\".bz2\": InstallerTypeArchive,\n\t\".7z\": InstallerTypeArchive,\n\t\".tar\": InstallerTypeArchive,\n\t\".xz\": InstallerTypeArchive,\n\t\".rar\": InstallerTypeArchive,\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Known non-supported\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\".deb\": InstallerTypeUnsupported,\n\t\".rpm\": InstallerTypeUnsupported,\n\t\".pkg\": InstallerTypeUnsupported,\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Platform-specific packages\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ Apple disk images\n\t\".dmg\": InstallerTypeArchive,\n\n\t\/\/ Microsoft packages\n\t\".msi\": InstallerTypeMSI,\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Known naked that also sniff as other formats\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\".jar\": InstallerTypeNaked,\n\t\".air\": InstallerTypeNaked,\n\t\".love\": InstallerTypeNaked,\n\t\".unitypackage\": InstallerTypeNaked,\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Books!\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\".pdf\": InstallerTypeNaked,\n\t\".ps\": InstallerTypeNaked,\n\t\".djvu\": InstallerTypeNaked,\n\t\".cbr\": InstallerTypeNaked,\n\t\".cbz\": InstallerTypeNaked,\n\t\".cb7\": InstallerTypeNaked,\n\t\".cbt\": InstallerTypeNaked,\n\t\".cba\": InstallerTypeNaked,\n\t\".doc\": InstallerTypeNaked,\n\t\".docx\": InstallerTypeNaked,\n\t\".epub\": InstallerTypeNaked,\n\t\".mobi\": InstallerTypeNaked,\n\t\".pdb\": InstallerTypeNaked,\n\t\".fb2\": InstallerTypeNaked,\n\t\".xeb\": InstallerTypeNaked,\n\t\".ceb\": InstallerTypeNaked,\n\t\".ibooks\": InstallerTypeNaked,\n\t\".txt\": InstallerTypeNaked,\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Media\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\".ogg\": InstallerTypeNaked,\n\t\".mp3\": InstallerTypeNaked,\n\t\".wav\": InstallerTypeNaked,\n\t\".mp4\": InstallerTypeNaked,\n\t\".avi\": InstallerTypeNaked,\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Images\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\".png\": InstallerTypeNaked,\n\t\".jpg\": InstallerTypeNaked,\n\t\".gif\": InstallerTypeNaked,\n\t\".bmp\": InstallerTypeNaked,\n\t\".tga\": InstallerTypeNaked,\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Game Maker assets\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\".gmez\": InstallerTypeNaked,\n\t\".gmz\": InstallerTypeNaked,\n\t\".yyz\": InstallerTypeNaked,\n\t\".yymp\": InstallerTypeNaked,\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ ROMs\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\".gb\": InstallerTypeNaked,\n\t\".gbc\": InstallerTypeNaked,\n\t\".sfc\": InstallerTypeNaked,\n\t\".smc\": InstallerTypeNaked,\n\t\".swc\": InstallerTypeNaked,\n\t\".gen\": InstallerTypeNaked,\n\t\".sg\": InstallerTypeNaked,\n\t\".smd\": InstallerTypeNaked,\n\t\".md\": InstallerTypeNaked,\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Miscellaneous other things\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ Some html games provide a single .html file\n\t\/\/ Now that's dedication.\n\t\".html\": InstallerTypeNaked,\n}\n<commit_msg>Use DMG installer for .dmg files, duh<commit_after>package installer\n\n\/\/ Note: .exe is not in that list because we need to\n\/\/ read part of its contents to decide what we're going to\n\/\/ do with it: 1) extract it 2) run it as an installer 3)\n\/\/ just copy it naked\nvar installerForExt = map[string]InstallerType{\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Generic archives\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\".zip\": InstallerTypeArchive,\n\t\".gz\": InstallerTypeArchive,\n\t\".bz2\": InstallerTypeArchive,\n\t\".7z\": InstallerTypeArchive,\n\t\".tar\": InstallerTypeArchive,\n\t\".xz\": InstallerTypeArchive,\n\t\".rar\": InstallerTypeArchive,\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Known non-supported\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\".deb\": InstallerTypeUnsupported,\n\t\".rpm\": InstallerTypeUnsupported,\n\t\".pkg\": InstallerTypeUnsupported,\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Platform-specific packages\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ Apple disk images\n\t\".dmg\": InstallerTypeDMG,\n\n\t\/\/ Microsoft packages\n\t\".msi\": InstallerTypeMSI,\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Known naked that also sniff as other formats\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\".jar\": InstallerTypeNaked,\n\t\".air\": InstallerTypeNaked,\n\t\".love\": InstallerTypeNaked,\n\t\".unitypackage\": InstallerTypeNaked,\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Books!\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\".pdf\": InstallerTypeNaked,\n\t\".ps\": InstallerTypeNaked,\n\t\".djvu\": InstallerTypeNaked,\n\t\".cbr\": InstallerTypeNaked,\n\t\".cbz\": InstallerTypeNaked,\n\t\".cb7\": InstallerTypeNaked,\n\t\".cbt\": InstallerTypeNaked,\n\t\".cba\": InstallerTypeNaked,\n\t\".doc\": InstallerTypeNaked,\n\t\".docx\": InstallerTypeNaked,\n\t\".epub\": InstallerTypeNaked,\n\t\".mobi\": InstallerTypeNaked,\n\t\".pdb\": InstallerTypeNaked,\n\t\".fb2\": InstallerTypeNaked,\n\t\".xeb\": InstallerTypeNaked,\n\t\".ceb\": InstallerTypeNaked,\n\t\".ibooks\": InstallerTypeNaked,\n\t\".txt\": InstallerTypeNaked,\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Media\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\".ogg\": InstallerTypeNaked,\n\t\".mp3\": InstallerTypeNaked,\n\t\".wav\": InstallerTypeNaked,\n\t\".mp4\": InstallerTypeNaked,\n\t\".avi\": InstallerTypeNaked,\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Images\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\".png\": InstallerTypeNaked,\n\t\".jpg\": InstallerTypeNaked,\n\t\".gif\": InstallerTypeNaked,\n\t\".bmp\": InstallerTypeNaked,\n\t\".tga\": InstallerTypeNaked,\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Game Maker assets\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\".gmez\": InstallerTypeNaked,\n\t\".gmz\": InstallerTypeNaked,\n\t\".yyz\": InstallerTypeNaked,\n\t\".yymp\": InstallerTypeNaked,\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ ROMs\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\".gb\": InstallerTypeNaked,\n\t\".gbc\": InstallerTypeNaked,\n\t\".sfc\": InstallerTypeNaked,\n\t\".smc\": InstallerTypeNaked,\n\t\".swc\": InstallerTypeNaked,\n\t\".gen\": InstallerTypeNaked,\n\t\".sg\": InstallerTypeNaked,\n\t\".smd\": InstallerTypeNaked,\n\t\".md\": InstallerTypeNaked,\n\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\t\/\/ Miscellaneous other things\n\t\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\t\/\/ Some html games provide a single .html file\n\t\/\/ Now that's dedication.\n\t\".html\": InstallerTypeNaked,\n}\n<|endoftext|>"} {"text":"<commit_before>package brats_test\n\nimport (\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/cloudfoundry\/libbuildpack\"\n\t\"github.com\/cloudfoundry\/libbuildpack\/bratshelper\"\n\t\"github.com\/cloudfoundry\/libbuildpack\/cutlass\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"PHP buildpack\", func() {\n\tbratshelper.DeployingAnAppWithAnUpdatedVersionOfTheSameBuildpack(CopyBrats)\n\tbratshelper.StagingWithBuildpackThatSetsEOL(\"php\", CopyBrats)\n\tbratshelper.StagingWithADepThatIsNotTheLatest(\"php\", CopyBrats)\n\tbratshelper.StagingWithCustomBuildpackWithCredentialsInDependencies(`php7\\-[\\d\\.]+\\-linux\\-x64\\-cflinuxfs\\dm?-[\\da-f]+\\.tgz`, CopyBrats)\n\tbratshelper.DeployAppWithExecutableProfileScript(\"php\", CopyBrats)\n\tbratshelper.DeployAnAppWithSensitiveEnvironmentVariables(CopyBrats)\n\n\tcompatible := func(phpVersion, webserverVersion string) bool { return true }\n\tfor _, webserver := range []string{\"nginx\", \"httpd\"} {\n\t\twebserver := webserver\n\t\tcopyFunc := func(phpVersion, webserverVersion string) *cutlass.App {\n\t\t\treturn CopyBratsWithFramework(phpVersion, webserver, webserverVersion)\n\t\t}\n\t\tbratshelper.ForAllSupportedVersions2(\"php\", webserver, compatible, \"with php-%s and web_server: \"+webserver+\"-%s\", copyFunc, func(phpVersion, webserverVersion string, app *cutlass.App) {\n\t\t\tPushApp(app)\n\n\t\t\tvar options struct {\n\t\t\t\tExtensions []string `json:\"PHP_EXTENSIONS\"`\n\t\t\t}\n\t\t\tExpect(libbuildpack.NewJSON().Load(filepath.Join(app.Path, \".bp-config\", \"options.json\"), &options)).To(Succeed())\n\t\t\tExpect(options.Extensions).ToNot(BeEmpty())\n\n\t\t\tBy(\"should have the correct version\", func() {\n\t\t\t\tExpect(app.Stdout.String()).To(ContainSubstring(\"Installing PHP\"))\n\t\t\t\tExpect(app.Stdout.String()).To(ContainSubstring(\"PHP \" + phpVersion))\n\t\t\t})\n\t\t\tBy(\"should load all of the modules specified in options.json\", func() {\n\t\t\t\tbody, err := app.GetBody(\"\/?\" + strings.Join(options.Extensions, \",\"))\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\tfor _, extension := range options.Extensions {\n\t\t\t\t\tExpect(body).To(ContainSubstring(\"SUCCESS: \" + extension + \" loads\"))\n\t\t\t\t}\n\t\t\t})\n\t\t\tBy(\"should not include any warning messages when loading all the extensions\", func() {\n\t\t\t\tExpect(app.Stdout.String()).ToNot(MatchRegexp(`The extension .* is not provided by this buildpack.`))\n\t\t\t})\n\t\t\tBy(\"should not load unknown module\", func() {\n\t\t\t\tExpect(app.GetBody(\"\/?something\")).To(ContainSubstring(\"ERROR: something failed to load.\"))\n\t\t\t})\n\t\t})\n\t}\n})\n<commit_msg>Update bratshelper.StagingWithCustomBuildpackWithCredentialsInDependencies<commit_after>package brats_test\n\nimport (\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/cloudfoundry\/libbuildpack\"\n\t\"github.com\/cloudfoundry\/libbuildpack\/bratshelper\"\n\t\"github.com\/cloudfoundry\/libbuildpack\/cutlass\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"PHP buildpack\", func() {\n\tbratshelper.DeployingAnAppWithAnUpdatedVersionOfTheSameBuildpack(CopyBrats)\n\tbratshelper.StagingWithBuildpackThatSetsEOL(\"php\", CopyBrats)\n\tbratshelper.StagingWithADepThatIsNotTheLatest(\"php\", CopyBrats)\n\tbratshelper.StagingWithCustomBuildpackWithCredentialsInDependencies(CopyBrats)\n\tbratshelper.DeployAppWithExecutableProfileScript(\"php\", CopyBrats)\n\tbratshelper.DeployAnAppWithSensitiveEnvironmentVariables(CopyBrats)\n\n\tcompatible := func(phpVersion, webserverVersion string) bool { return true }\n\tfor _, webserver := range []string{\"nginx\", \"httpd\"} {\n\t\twebserver := webserver\n\t\tcopyFunc := func(phpVersion, webserverVersion string) *cutlass.App {\n\t\t\treturn CopyBratsWithFramework(phpVersion, webserver, webserverVersion)\n\t\t}\n\t\tbratshelper.ForAllSupportedVersions2(\"php\", webserver, compatible, \"with php-%s and web_server: \"+webserver+\"-%s\", copyFunc, func(phpVersion, webserverVersion string, app *cutlass.App) {\n\t\t\tPushApp(app)\n\n\t\t\tvar options struct {\n\t\t\t\tExtensions []string `json:\"PHP_EXTENSIONS\"`\n\t\t\t}\n\t\t\tExpect(libbuildpack.NewJSON().Load(filepath.Join(app.Path, \".bp-config\", \"options.json\"), &options)).To(Succeed())\n\t\t\tExpect(options.Extensions).ToNot(BeEmpty())\n\n\t\t\tBy(\"should have the correct version\", func() {\n\t\t\t\tExpect(app.Stdout.String()).To(ContainSubstring(\"Installing PHP\"))\n\t\t\t\tExpect(app.Stdout.String()).To(ContainSubstring(\"PHP \" + phpVersion))\n\t\t\t})\n\t\t\tBy(\"should load all of the modules specified in options.json\", func() {\n\t\t\t\tbody, err := app.GetBody(\"\/?\" + strings.Join(options.Extensions, \",\"))\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\tfor _, extension := range options.Extensions {\n\t\t\t\t\tExpect(body).To(ContainSubstring(\"SUCCESS: \" + extension + \" loads\"))\n\t\t\t\t}\n\t\t\t})\n\t\t\tBy(\"should not include any warning messages when loading all the extensions\", func() {\n\t\t\t\tExpect(app.Stdout.String()).ToNot(MatchRegexp(`The extension .* is not provided by this buildpack.`))\n\t\t\t})\n\t\t\tBy(\"should not load unknown module\", func() {\n\t\t\t\tExpect(app.GetBody(\"\/?something\")).To(ContainSubstring(\"ERROR: something failed to load.\"))\n\t\t\t})\n\t\t})\n\t}\n})\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 by caixw, All rights reserved.\n\/\/ Use of this source code is governed by a MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage entry\n\nimport (\n\t\"fmt\"\n\tstdregexp \"regexp\"\n\tstdsyntax \"regexp\/syntax\"\n\t\"strings\"\n)\n\ntype regexp struct {\n\t*base\n\texpr *stdregexp.Regexp\n\thasParams bool\n\tsyntaxExpr *stdsyntax.Regexp\n}\n\nfunc newRegexp(pattern string, s *syntax) (*regexp, error) {\n\tb := newBase(pattern)\n\n\t\/\/ 合并正则表达式\n\tstr := strings.Join(s.patterns, \"\")\n\tif b.wildcard {\n\t\tstr = str[:len(str)-1] \/\/ 去掉最后的星号\n\t}\n\n\texpr, err := stdregexp.Compile(str)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsyntaxExpr, err := stdsyntax.Parse(str, stdsyntax.Perl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ®exp{\n\t\tbase: b,\n\t\thasParams: s.hasParams,\n\t\texpr: expr,\n\t\tsyntaxExpr: syntaxExpr,\n\t}, nil\n}\n\nfunc (r *regexp) priority() int {\n\tif r.wildcard {\n\t\treturn typeRegexp + 100\n\t}\n\n\treturn typeRegexp\n}\n\n\/\/ Entry.Match\nfunc (r *regexp) match(url string) (bool, map[string]string) {\n\tloc := r.expr.FindStringIndex(url)\n\n\tif r.wildcard {\n\t\tif loc != nil &&\n\t\t\tloc[0] == 0 &&\n\t\t\tloc[1] < len(url) {\n\t\t\treturn true, r.params(url)\n\t\t}\n\t}\n\n\tif loc != nil &&\n\t\tloc[0] == 0 &&\n\t\tloc[1] == len(url) {\n\t\treturn true, r.params(url)\n\t}\n\n\treturn false, nil\n}\n\n\/\/ Entry.Params\nfunc (r *regexp) params(url string) map[string]string {\n\tif !r.hasParams {\n\t\treturn nil\n\t}\n\n\t\/\/ 正确匹配正则表达式,则获相关的正则表达式命名变量。\n\tsubexps := r.expr.SubexpNames()\n\tmapped := make(map[string]string, len(subexps))\n\targs := r.expr.FindStringSubmatch(url)\n\tfor index, name := range subexps {\n\t\tif len(name) > 0 && index < len(args) {\n\t\t\tmapped[name] = args[index]\n\t\t}\n\t}\n\treturn mapped\n}\n\n\/\/ fun\nfunc (r *regexp) URL(params map[string]string, path string) (string, error) {\n\tif r.syntaxExpr == nil {\n\t\treturn r.patternString, nil\n\t}\n\n\turl := r.syntaxExpr.String()\n\tfor _, sub := range r.syntaxExpr.Sub {\n\t\tif len(sub.Name) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tparam, exists := params[sub.Name]\n\t\tif !exists {\n\t\t\treturn \"\", fmt.Errorf(\"未找到参数 %v 的值\", sub.Name)\n\t\t}\n\t\turl = strings.Replace(url, sub.String(), param, -1)\n\t}\n\n\tif r.wildcard {\n\t\turl += path\n\t}\n\n\treturn url, nil\n}\n<commit_msg>[internal\/entry] 调整 regexp.match 函数<commit_after>\/\/ Copyright 2017 by caixw, All rights reserved.\n\/\/ Use of this source code is governed by a MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage entry\n\nimport (\n\t\"fmt\"\n\tstdregexp \"regexp\"\n\tstdsyntax \"regexp\/syntax\"\n\t\"strings\"\n)\n\ntype regexp struct {\n\t*base\n\texpr *stdregexp.Regexp\n\thasParams bool\n\tsyntaxExpr *stdsyntax.Regexp\n}\n\nfunc newRegexp(pattern string, s *syntax) (*regexp, error) {\n\tb := newBase(pattern)\n\n\t\/\/ 合并正则表达式\n\tstr := strings.Join(s.patterns, \"\")\n\tif b.wildcard {\n\t\tstr = str[:len(str)-1] \/\/ 去掉最后的星号\n\t}\n\n\texpr, err := stdregexp.Compile(str)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsyntaxExpr, err := stdsyntax.Parse(str, stdsyntax.Perl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ®exp{\n\t\tbase: b,\n\t\thasParams: s.hasParams,\n\t\texpr: expr,\n\t\tsyntaxExpr: syntaxExpr,\n\t}, nil\n}\n\nfunc (r *regexp) priority() int {\n\tif r.wildcard {\n\t\treturn typeRegexp + 100\n\t}\n\n\treturn typeRegexp\n}\n\n\/\/ Entry.Match\nfunc (r *regexp) match(url string) (bool, map[string]string) {\n\tloc := r.expr.FindStringIndex(url)\n\n\tif loc == nil || loc[0] != 0 {\n\t\treturn false, nil\n\t}\n\n\tif loc[1] == len(url) {\n\t\treturn true, r.params(url)\n\t}\n\n\t\/\/ 通配符的应该比较少,放最后比较\n\tif r.wildcard {\n\t\tif loc[1] < len(url) {\n\t\t\treturn true, r.params(url)\n\t\t}\n\t}\n\n\treturn false, nil\n}\n\n\/\/ Entry.Params\nfunc (r *regexp) params(url string) map[string]string {\n\tif !r.hasParams {\n\t\treturn nil\n\t}\n\n\t\/\/ 正确匹配正则表达式,则获相关的正则表达式命名变量。\n\tsubexps := r.expr.SubexpNames()\n\tmapped := make(map[string]string, len(subexps))\n\targs := r.expr.FindStringSubmatch(url)\n\tfor index, name := range subexps {\n\t\tif len(name) > 0 && index < len(args) {\n\t\t\tmapped[name] = args[index]\n\t\t}\n\t}\n\treturn mapped\n}\n\n\/\/ fun\nfunc (r *regexp) URL(params map[string]string, path string) (string, error) {\n\tif r.syntaxExpr == nil {\n\t\treturn r.patternString, nil\n\t}\n\n\turl := r.syntaxExpr.String()\n\tfor _, sub := range r.syntaxExpr.Sub {\n\t\tif len(sub.Name) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tparam, exists := params[sub.Name]\n\t\tif !exists {\n\t\t\treturn \"\", fmt.Errorf(\"未找到参数 %v 的值\", sub.Name)\n\t\t}\n\t\turl = strings.Replace(url, sub.String(), param, -1)\n\t}\n\n\tif r.wildcard {\n\t\turl += path\n\t}\n\n\treturn url, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage response provides helpers and utils for working with HTTP response\n*\/\npackage response\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\t\"github.com\/vardius\/go-api-boilerplate\/internal\/errors\"\n\t\"github.com\/vardius\/go-api-boilerplate\/internal\/http\/middleware\/metadata\"\n)\n\n\/\/ RespondJSON returns data as json response\nfunc RespondJSON(ctx context.Context, w http.ResponseWriter, payload interface{}, statusCode int) {\n\n\t\/\/ If there is nothing to marshal then set status code and return.\n\tif payload == nil || statusCode == http.StatusNoContent {\n\t\tw.WriteHeader(statusCode)\n\t\tif metadata, ok := ctx.Value(metadata.KeyMetadataValues).(*metadata.Metadata); ok {\n\t\t\tmetadata.StatusCode = statusCode\n\t\t}\n\t\treturn\n\t}\n\n\tencoder := json.NewEncoder(w)\n\tencoder.SetEscapeHTML(true)\n\tencoder.SetIndent(\"\", \"\")\n\n\tw.WriteHeader(statusCode)\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\n\tif err := encoder.Encode(payload); err != nil {\n\t\tpanic(err)\n\t}\n\n\tif metadata, ok := ctx.Value(metadata.KeyMetadataValues).(*metadata.Metadata); ok {\n\t\tmetadata.StatusCode = statusCode\n\t}\n\n\t\/\/ Check if it is stream response\n\tif f, ok := w.(http.Flusher); ok {\n\t\tf.Flush()\n\t} else {\n\t\t\/\/ Write nil in case of setting http.StatusOK header if header not set\n\t\tw.Write(nil)\n\t}\n}\n\n\/\/ RespondJSONError returns error response\n\/\/ uses WithPayloadAsJSON internally\nfunc RespondJSONError(ctx context.Context, w http.ResponseWriter, err error) {\n\tRespondJSON(ctx, w, err, errors.HTTPStatusCode(err))\n}\n<commit_msg>Fix multiple header writes<commit_after>\/*\nPackage response provides helpers and utils for working with HTTP response\n*\/\npackage response\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"net\/http\"\n\n\t\"github.com\/vardius\/go-api-boilerplate\/internal\/errors\"\n\t\"github.com\/vardius\/go-api-boilerplate\/internal\/http\/middleware\/metadata\"\n)\n\n\/\/ RespondJSON returns data as json response\nfunc RespondJSON(ctx context.Context, w http.ResponseWriter, payload interface{}, statusCode int) {\n\n\t\/\/ If there is nothing to marshal then set status code and return.\n\tif payload == nil || statusCode == http.StatusNoContent {\n\t\tw.WriteHeader(statusCode)\n\t\tif metadata, ok := ctx.Value(metadata.KeyMetadataValues).(*metadata.Metadata); ok {\n\t\t\tmetadata.StatusCode = statusCode\n\t\t}\n\t\treturn\n\t}\n\n\tencoder := json.NewEncoder(w)\n\tencoder.SetEscapeHTML(true)\n\tencoder.SetIndent(\"\", \"\")\n\n\tw.WriteHeader(statusCode)\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\n\tif err := encoder.Encode(payload); err != nil {\n\t\tpanic(err)\n\t}\n\n\tif metadata, ok := ctx.Value(metadata.KeyMetadataValues).(*metadata.Metadata); ok {\n\t\tmetadata.StatusCode = statusCode\n\t}\n\n\t\/\/ Check if it is stream response\n\tif f, ok := w.(http.Flusher); ok {\n\t\tf.Flush()\n\t}\n}\n\n\/\/ RespondJSONError returns error response\n\/\/ uses WithPayloadAsJSON internally\nfunc RespondJSONError(ctx context.Context, w http.ResponseWriter, err error) {\n\tRespondJSON(ctx, w, err, errors.HTTPStatusCode(err))\n}\n<|endoftext|>"} {"text":"<commit_before>package licenseutils\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/docker\/licensing\"\n\t\"github.com\/docker\/licensing\/model\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ HubUser wraps a licensing client and holds key information\n\/\/ for a user to avoid multiple lookups\ntype HubUser struct {\n\tClient licensing.Client\n\ttoken string\n\tUser model.User\n\tOrgs []model.Org\n}\n\n\/\/GetOrgByID finds the org by the ID in the users list of orgs\nfunc (u HubUser) GetOrgByID(orgID string) (model.Org, error) {\n\tfor _, org := range u.Orgs {\n\t\tif org.ID == orgID {\n\t\t\treturn org, nil\n\t\t}\n\t}\n\treturn model.Org{}, fmt.Errorf(\"org %s not found\", orgID)\n}\n\nfunc getClient() (licensing.Client, error) {\n\tbaseURI, err := url.Parse(licensingDefaultBaseURI)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn licensing.New(&licensing.Config{\n\t\tBaseURI: *baseURI,\n\t\tHTTPClient: &http.Client{},\n\t\tPublicKeys: licensingPublicKeys,\n\t})\n}\n\n\/\/ Login to the license server and return a client that can be used to look up and download license files or generate new trial licenses\nfunc Login(ctx context.Context, authConfig *types.AuthConfig) (HubUser, error) {\n\tlclient, err := getClient()\n\tif err != nil {\n\t\treturn HubUser{}, err\n\t}\n\n\t\/\/ For licensing we know they must have a valid login session\n\tif authConfig.Username == \"\" {\n\t\treturn HubUser{}, fmt.Errorf(\"you must be logged in to access licenses. Please use 'docker login' then try again\")\n\t}\n\ttoken, err := lclient.LoginViaAuth(ctx, authConfig.Username, authConfig.Password)\n\tif err != nil {\n\t\treturn HubUser{}, err\n\t}\n\tuser, err := lclient.GetHubUserByName(ctx, authConfig.Username)\n\tif err != nil {\n\t\treturn HubUser{}, err\n\t}\n\torgs, err := lclient.GetHubUserOrgs(ctx, token)\n\tif err != nil {\n\t\treturn HubUser{}, err\n\t}\n\treturn HubUser{\n\t\tClient: lclient,\n\t\ttoken: token,\n\t\tUser: *user,\n\t\tOrgs: orgs,\n\t}, nil\n\n}\n\n\/\/ GetAvailableLicenses finds all available licenses for a given account and their orgs\nfunc (u HubUser) GetAvailableLicenses(ctx context.Context) ([]LicenseDisplay, error) {\n\tsubs, err := u.Client.ListSubscriptions(ctx, u.token, u.User.ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, org := range u.Orgs {\n\t\torgSub, err := u.Client.ListSubscriptions(ctx, u.token, org.ID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsubs = append(subs, orgSub...)\n\t}\n\n\t\/\/ Convert the SubscriptionDetails to a more user-friendly type to render in the CLI\n\n\tres := []LicenseDisplay{}\n\n\t\/\/ Filter out expired licenses\n\ti := 0\n\tfor _, s := range subs {\n\t\tif s.State != \"expired\" && s.Expires != nil {\n\t\t\towner := \"\"\n\t\t\tif s.DockerID == u.User.ID {\n\t\t\t\towner = u.User.Username\n\t\t\t} else {\n\t\t\t\townerOrg, err := u.GetOrgByID(s.DockerID)\n\t\t\t\tif err == nil {\n\t\t\t\t\towner = ownerOrg.Orgname\n\t\t\t\t} else {\n\t\t\t\t\towner = \"unknown\"\n\t\t\t\t\tlogrus.Debugf(\"Unable to lookup org ID %s: %s\", s.DockerID, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tcomps := []string{}\n\t\t\tfor _, pc := range s.PricingComponents {\n\t\t\t\tcomps = append(comps, fmt.Sprintf(\"%s:%d\", pc.Name, pc.Value))\n\t\t\t}\n\t\t\tres = append(res, LicenseDisplay{\n\t\t\t\tSubscription: *s,\n\t\t\t\tNum: i,\n\t\t\t\tOwner: owner,\n\t\t\t\tComponentsString: strings.Join(comps, \",\"),\n\t\t\t})\n\t\t\ti++\n\t\t}\n\t}\n\n\treturn res, nil\n}\n\n\/\/ GenerateTrialLicense will generate a new trial license for the specified user or org\nfunc (u HubUser) GenerateTrialLicense(ctx context.Context, targetID string) (*model.IssuedLicense, error) {\n\tsubID, err := u.Client.GenerateNewTrialSubscription(ctx, u.token, targetID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn u.Client.DownloadLicenseFromHub(ctx, u.token, subID)\n}\n\n\/\/ GetIssuedLicense will download a license by ID\nfunc (u HubUser) GetIssuedLicense(ctx context.Context, ID string) (*model.IssuedLicense, error) {\n\treturn u.Client.DownloadLicenseFromHub(ctx, u.token, ID)\n}\n\n\/\/ LoadLocalIssuedLicense will load a local license file\nfunc LoadLocalIssuedLicense(ctx context.Context, filename string) (*model.IssuedLicense, error) {\n\tlclient, err := getClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn doLoadLocalIssuedLicense(ctx, filename, lclient)\n}\n\n\/\/ GetLicenseSummary summarizes the license for the user\nfunc GetLicenseSummary(ctx context.Context, license model.IssuedLicense) (string, error) {\n\tlclient, err := getClient()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcr, err := lclient.VerifyLicense(ctx, license)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn lclient.SummarizeLicense(cr, license.KeyID).String(), nil\n}\n\nfunc doLoadLocalIssuedLicense(ctx context.Context, filename string, lclient licensing.Client) (*model.IssuedLicense, error) {\n\tvar license model.IssuedLicense\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ The file may contain a leading BOM, which will choke the\n\t\/\/ json deserializer.\n\tdata = bytes.TrimPrefix(data, []byte(\"\\xef\\xbb\\xbf\"))\n\n\terr = json.Unmarshal(data, &license)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"malformed license file\")\n\t}\n\n\t_, err = lclient.VerifyLicense(ctx, license)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &license, nil\n}\n\n\/\/ ApplyLicense will store a license on the local system\nfunc ApplyLicense(ctx context.Context, dclient licensing.WrappedDockerClient, license *model.IssuedLicense) error {\n\tinfo, err := dclient.Info(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn licensing.StoreLicense(ctx, dclient, license, info.DockerRootDir)\n}\n<commit_msg>fix subscription filter<commit_after>package licenseutils\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\n\t\"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/docker\/licensing\"\n\t\"github.com\/docker\/licensing\/model\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ HubUser wraps a licensing client and holds key information\n\/\/ for a user to avoid multiple lookups\ntype HubUser struct {\n\tClient licensing.Client\n\ttoken string\n\tUser model.User\n\tOrgs []model.Org\n}\n\n\/\/GetOrgByID finds the org by the ID in the users list of orgs\nfunc (u HubUser) GetOrgByID(orgID string) (model.Org, error) {\n\tfor _, org := range u.Orgs {\n\t\tif org.ID == orgID {\n\t\t\treturn org, nil\n\t\t}\n\t}\n\treturn model.Org{}, fmt.Errorf(\"org %s not found\", orgID)\n}\n\nfunc getClient() (licensing.Client, error) {\n\tbaseURI, err := url.Parse(licensingDefaultBaseURI)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn licensing.New(&licensing.Config{\n\t\tBaseURI: *baseURI,\n\t\tHTTPClient: &http.Client{},\n\t\tPublicKeys: licensingPublicKeys,\n\t})\n}\n\n\/\/ Login to the license server and return a client that can be used to look up and download license files or generate new trial licenses\nfunc Login(ctx context.Context, authConfig *types.AuthConfig) (HubUser, error) {\n\tlclient, err := getClient()\n\tif err != nil {\n\t\treturn HubUser{}, err\n\t}\n\n\t\/\/ For licensing we know they must have a valid login session\n\tif authConfig.Username == \"\" {\n\t\treturn HubUser{}, fmt.Errorf(\"you must be logged in to access licenses. Please use 'docker login' then try again\")\n\t}\n\ttoken, err := lclient.LoginViaAuth(ctx, authConfig.Username, authConfig.Password)\n\tif err != nil {\n\t\treturn HubUser{}, err\n\t}\n\tuser, err := lclient.GetHubUserByName(ctx, authConfig.Username)\n\tif err != nil {\n\t\treturn HubUser{}, err\n\t}\n\torgs, err := lclient.GetHubUserOrgs(ctx, token)\n\tif err != nil {\n\t\treturn HubUser{}, err\n\t}\n\treturn HubUser{\n\t\tClient: lclient,\n\t\ttoken: token,\n\t\tUser: *user,\n\t\tOrgs: orgs,\n\t}, nil\n\n}\n\n\/\/ GetAvailableLicenses finds all available licenses for a given account and their orgs\nfunc (u HubUser) GetAvailableLicenses(ctx context.Context) ([]LicenseDisplay, error) {\n\tsubs, err := u.Client.ListSubscriptions(ctx, u.token, u.User.ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, org := range u.Orgs {\n\t\torgSub, err := u.Client.ListSubscriptions(ctx, u.token, org.ID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsubs = append(subs, orgSub...)\n\t}\n\n\t\/\/ Convert the SubscriptionDetails to a more user-friendly type to render in the CLI\n\n\tres := []LicenseDisplay{}\n\n\t\/\/ Filter out expired licenses\n\ti := 0\n\tfor _, s := range subs {\n\t\tif s.State == \"active\" && s.Expires != nil {\n\t\t\towner := \"\"\n\t\t\tif s.DockerID == u.User.ID {\n\t\t\t\towner = u.User.Username\n\t\t\t} else {\n\t\t\t\townerOrg, err := u.GetOrgByID(s.DockerID)\n\t\t\t\tif err == nil {\n\t\t\t\t\towner = ownerOrg.Orgname\n\t\t\t\t} else {\n\t\t\t\t\towner = \"unknown\"\n\t\t\t\t\tlogrus.Debugf(\"Unable to lookup org ID %s: %s\", s.DockerID, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tcomps := []string{}\n\t\t\tfor _, pc := range s.PricingComponents {\n\t\t\t\tcomps = append(comps, fmt.Sprintf(\"%s:%d\", pc.Name, pc.Value))\n\t\t\t}\n\t\t\tres = append(res, LicenseDisplay{\n\t\t\t\tSubscription: *s,\n\t\t\t\tNum: i,\n\t\t\t\tOwner: owner,\n\t\t\t\tComponentsString: strings.Join(comps, \",\"),\n\t\t\t})\n\t\t\ti++\n\t\t}\n\t}\n\n\treturn res, nil\n}\n\n\/\/ GenerateTrialLicense will generate a new trial license for the specified user or org\nfunc (u HubUser) GenerateTrialLicense(ctx context.Context, targetID string) (*model.IssuedLicense, error) {\n\tsubID, err := u.Client.GenerateNewTrialSubscription(ctx, u.token, targetID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn u.Client.DownloadLicenseFromHub(ctx, u.token, subID)\n}\n\n\/\/ GetIssuedLicense will download a license by ID\nfunc (u HubUser) GetIssuedLicense(ctx context.Context, ID string) (*model.IssuedLicense, error) {\n\treturn u.Client.DownloadLicenseFromHub(ctx, u.token, ID)\n}\n\n\/\/ LoadLocalIssuedLicense will load a local license file\nfunc LoadLocalIssuedLicense(ctx context.Context, filename string) (*model.IssuedLicense, error) {\n\tlclient, err := getClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn doLoadLocalIssuedLicense(ctx, filename, lclient)\n}\n\n\/\/ GetLicenseSummary summarizes the license for the user\nfunc GetLicenseSummary(ctx context.Context, license model.IssuedLicense) (string, error) {\n\tlclient, err := getClient()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcr, err := lclient.VerifyLicense(ctx, license)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn lclient.SummarizeLicense(cr, license.KeyID).String(), nil\n}\n\nfunc doLoadLocalIssuedLicense(ctx context.Context, filename string, lclient licensing.Client) (*model.IssuedLicense, error) {\n\tvar license model.IssuedLicense\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ The file may contain a leading BOM, which will choke the\n\t\/\/ json deserializer.\n\tdata = bytes.TrimPrefix(data, []byte(\"\\xef\\xbb\\xbf\"))\n\n\terr = json.Unmarshal(data, &license)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"malformed license file\")\n\t}\n\n\t_, err = lclient.VerifyLicense(ctx, license)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &license, nil\n}\n\n\/\/ ApplyLicense will store a license on the local system\nfunc ApplyLicense(ctx context.Context, dclient licensing.WrappedDockerClient, license *model.IssuedLicense) error {\n\tinfo, err := dclient.Info(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn licensing.StoreLicense(ctx, dclient, license, info.DockerRootDir)\n}\n<|endoftext|>"} {"text":"<commit_before>package source\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"regexp\"\n\t\"strings\"\n\t\"unicode\"\n\t\"unicode\/utf8\"\n)\n\n\/\/ CommentToMarkdown converts comment text to formatted markdown.\n\/\/ The comment was prepared by DocReader,\n\/\/ so it is known not to have leading, trailing blank lines\n\/\/ nor to have trailing spaces at the end of lines.\n\/\/ The comment markers have already been removed.\n\/\/\n\/\/ Each line is converted into a markdown line and empty lines are just converted to\n\/\/ newlines. Heading are prefixed with `### ` to make it a markdown heading.\n\/\/\n\/\/ A span of indented lines retains a 4 space prefix block, with the common indent\n\/\/ prefix removed unless empty, in which case it will be converted to a newline.\n\/\/\n\/\/ URLs in the comment text are converted into links.\nfunc CommentToMarkdown(text string) string {\n\tbuf := &bytes.Buffer{}\n\tcommentToMarkdown(buf, text)\n\treturn buf.String()\n}\n\nvar (\n\tmdNewline = []byte(\"\\n\")\n\tmdHeader = []byte(\"### \")\n\tmdIndent = []byte(\"    \")\n\tmdLinkStart = []byte(\"[\")\n\tmdLinkDiv = []byte(\"](\")\n\tmdLinkEnd = []byte(\")\")\n)\n\nfunc commentToMarkdown(w io.Writer, text string) {\n\tisFirstLine := true\n\tfor _, b := range blocks(text) {\n\t\tswitch b.op {\n\t\tcase opPara:\n\t\t\tif !isFirstLine {\n\t\t\t\tw.Write(mdNewline)\n\t\t\t}\n\n\t\t\tfor _, line := range b.lines {\n\t\t\t\temphasize(w, line, true)\n\t\t\t}\n\t\tcase opHead:\n\t\t\tif !isFirstLine {\n\t\t\t\tw.Write(mdNewline)\n\t\t\t}\n\t\t\tw.Write(mdNewline)\n\n\t\t\tfor _, line := range b.lines {\n\t\t\t\tw.Write(mdHeader)\n\t\t\t\tcommentEscape(w, line, true)\n\t\t\t\tw.Write(mdNewline)\n\t\t\t}\n\t\tcase opPre:\n\t\t\tif !isFirstLine {\n\t\t\t\tw.Write(mdNewline)\n\t\t\t}\n\t\t\tw.Write(mdNewline)\n\n\t\t\tfor _, line := range b.lines {\n\t\t\t\tif isBlank(line) {\n\t\t\t\t\tw.Write(mdNewline)\n\t\t\t\t} else {\n\t\t\t\t\tw.Write(mdIndent)\n\t\t\t\t\tw.Write([]byte(line))\n\t\t\t\t\tw.Write(mdNewline)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tisFirstLine = false\n\t}\n}\n\nconst (\n\tulquo = \"“\"\n\turquo = \"”\"\n)\n\nvar (\n\tmarkdownEscape = regexp.MustCompile(`([\\\\\\x60*{}[\\]()#+\\-.!_>~|\"$%&'\\\/:;<=?@^])`)\n\n\tunicodeQuoteReplacer = strings.NewReplacer(\"``\", ulquo, \"''\", urquo)\n)\n\n\/\/ commentEscape escapes comment text for markdown. If nice is set,\n\/\/ also turn `` into “; and '' into ”;.\nfunc commentEscape(w io.Writer, text string, nice bool) {\n\tif nice {\n\t\ttext = convertQuotes(text)\n\t}\n\ttext = escapeRegex(text)\n\tw.Write([]byte(text))\n}\n\nfunc convertQuotes(text string) string {\n\treturn unicodeQuoteReplacer.Replace(text)\n}\n\nfunc escapeRegex(text string) string {\n\treturn markdownEscape.ReplaceAllString(text, `\\$1`)\n}\n\nfunc emphasize(w io.Writer, line string, nice bool) {\n\tfor {\n\t\tm := matchRx.FindStringSubmatchIndex(line)\n\t\tif m == nil {\n\t\t\tbreak\n\t\t}\n\t\t\/\/ m >= 6 (two parenthesized sub-regexps in matchRx, 1st one is urlRx)\n\n\t\t\/\/ write text before match\n\t\tcommentEscape(w, line[0:m[0]], nice)\n\n\t\t\/\/ adjust match for URLs\n\t\tmatch := line[m[0]:m[1]]\n\t\tif strings.Contains(match, \":\/\/\") {\n\t\t\tm0, m1 := m[0], m[1]\n\t\t\tfor _, s := range []string{\"()\", \"{}\", \"[]\"} {\n\t\t\t\topen, close := s[:1], s[1:] \/\/ E.g., \"(\" and \")\"\n\t\t\t\t\/\/ require opening parentheses before closing parentheses (#22285)\n\t\t\t\tif i := strings.Index(match, close); i >= 0 && i < strings.Index(match, open) {\n\t\t\t\t\tm1 = m0 + i\n\t\t\t\t\tmatch = line[m0:m1]\n\t\t\t\t}\n\t\t\t\t\/\/ require balanced pairs of parentheses (#5043)\n\t\t\t\tfor i := 0; strings.Count(match, open) != strings.Count(match, close) && i < 10; i++ {\n\t\t\t\t\tm1 = strings.LastIndexAny(line[:m1], s)\n\t\t\t\t\tmatch = line[m0:m1]\n\t\t\t\t}\n\t\t\t}\n\t\t\tif m1 != m[1] {\n\t\t\t\t\/\/ redo matching with shortened line for correct indices\n\t\t\t\tm = matchRx.FindStringSubmatchIndex(line[:m[0]+len(match)])\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Following code has been modified from go\/doc since words is always\n\t\t\/\/ nil. All html formatting has also been transformed into markdown formatting\n\n\t\t\/\/ analyze match\n\t\turl := \"\"\n\t\tif m[2] >= 0 {\n\t\t\turl = match\n\t\t}\n\n\t\t\/\/ write match\n\t\tif len(url) > 0 {\n\t\t\tw.Write(mdLinkStart)\n\t\t}\n\n\t\tcommentEscape(w, match, nice)\n\n\t\tif len(url) > 0 {\n\t\t\tw.Write(mdLinkDiv)\n\t\t\tw.Write([]byte(urlReplacer.Replace(url)))\n\t\t\tw.Write(mdLinkEnd)\n\t\t}\n\n\t\t\/\/ advance\n\t\tline = line[m[1]:]\n\t}\n\tcommentEscape(w, line, nice)\n}\n\n\/\/ Everything from here on is a copy of go\/doc\/comment.go\n\nconst (\n\t\/\/ Regexp for Go identifiers\n\tidentRx = `[\\pL_][\\pL_0-9]*`\n\n\t\/\/ Regexp for URLs\n\t\/\/ Match parens, and check later for balance - see #5043, #22285\n\t\/\/ Match .,:;?! within path, but not at end - see #18139, #16565\n\t\/\/ This excludes some rare yet valid urls ending in common punctuation\n\t\/\/ in order to allow sentences ending in URLs.\n\n\t\/\/ protocol (required) e.g. http\n\tprotoPart = `(https?|ftp|file|gopher|mailto|nntp)`\n\t\/\/ host (required) e.g. www.example.com or [::1]:8080\n\thostPart = `([a-zA-Z0-9_@\\-.\\[\\]:]+)`\n\t\/\/ path+query+fragment (optional) e.g. \/path\/index.html?q=foo#bar\n\tpathPart = `([.,:;?!]*[a-zA-Z0-9$'()*+&#=@~_\/\\-\\[\\]%])*`\n\n\turlRx = protoPart + `:\/\/` + hostPart + pathPart\n)\n\nvar (\n\tmatchRx = regexp.MustCompile(`(` + urlRx + `)|(` + identRx + `)`)\n\turlReplacer = strings.NewReplacer(`(`, `\\(`, `)`, `\\)`)\n)\n\nfunc indentLen(s string) int {\n\ti := 0\n\tfor i < len(s) && (s[i] == ' ' || s[i] == '\\t') {\n\t\ti++\n\t}\n\treturn i\n}\n\nfunc isBlank(s string) bool {\n\treturn len(s) == 0 || (len(s) == 1 && s[0] == '\\n')\n}\n\nfunc commonPrefix(a, b string) string {\n\ti := 0\n\tfor i < len(a) && i < len(b) && a[i] == b[i] {\n\t\ti++\n\t}\n\treturn a[0:i]\n}\n\nfunc unindent(block []string) {\n\tif len(block) == 0 {\n\t\treturn\n\t}\n\n\t\/\/ compute maximum common white prefix\n\tprefix := block[0][0:indentLen(block[0])]\n\tfor _, line := range block {\n\t\tif !isBlank(line) {\n\t\t\tprefix = commonPrefix(prefix, line[0:indentLen(line)])\n\t\t}\n\t}\n\tn := len(prefix)\n\n\t\/\/ remove\n\tfor i, line := range block {\n\t\tif !isBlank(line) {\n\t\t\tblock[i] = line[n:]\n\t\t}\n\t}\n}\n\n\/\/ heading returns the trimmed line if it passes as a section heading;\n\/\/ otherwise it returns the empty string.\nfunc heading(line string) string {\n\tline = strings.TrimSpace(line)\n\tif len(line) == 0 {\n\t\treturn \"\"\n\t}\n\n\t\/\/ a heading must start with an uppercase letter\n\tr, _ := utf8.DecodeRuneInString(line)\n\tif !unicode.IsLetter(r) || !unicode.IsUpper(r) {\n\t\treturn \"\"\n\t}\n\n\t\/\/ it must end in a letter or digit:\n\tr, _ = utf8.DecodeLastRuneInString(line)\n\tif !unicode.IsLetter(r) && !unicode.IsDigit(r) {\n\t\treturn \"\"\n\t}\n\n\t\/\/ exclude lines with illegal characters. we allow \"(),\"\n\tif strings.ContainsAny(line, \";:!?+*\/=[]{}_^°&§~%#@<\\\">\\\\\") {\n\t\treturn \"\"\n\t}\n\n\t\/\/ allow \"'\" for possessive \"'s\" only\n\tfor b := line; ; {\n\t\ti := strings.IndexRune(b, '\\'')\n\t\tif i < 0 {\n\t\t\tbreak\n\t\t}\n\t\tif i+1 >= len(b) || b[i+1] != 's' || (i+2 < len(b) && b[i+2] != ' ') {\n\t\t\treturn \"\" \/\/ not followed by \"s \"\n\t\t}\n\t\tb = b[i+2:]\n\t}\n\n\t\/\/ allow \".\" when followed by non-space\n\tfor b := line; ; {\n\t\ti := strings.IndexRune(b, '.')\n\t\tif i < 0 {\n\t\t\tbreak\n\t\t}\n\t\tif i+1 >= len(b) || b[i+1] == ' ' {\n\t\t\treturn \"\" \/\/ not followed by non-space\n\t\t}\n\t\tb = b[i+1:]\n\t}\n\n\treturn line\n}\n\ntype op int\n\nconst (\n\topPara op = iota\n\topHead\n\topPre\n)\n\ntype block struct {\n\top op\n\tlines []string\n}\n\nfunc blocks(text string) []block {\n\tvar (\n\t\tout []block\n\t\tpara []string\n\n\t\tlastWasBlank = false\n\t\tlastWasHeading = false\n\t)\n\n\tclose := func() {\n\t\tif para != nil {\n\t\t\tout = append(out, block{opPara, para})\n\t\t\tpara = nil\n\t\t}\n\t}\n\n\tlines := strings.SplitAfter(text, \"\\n\")\n\tunindent(lines)\n\tfor i := 0; i < len(lines); {\n\t\tline := lines[i]\n\t\tif isBlank(line) {\n\t\t\t\/\/ close paragraph\n\t\t\tclose()\n\t\t\ti++\n\t\t\tlastWasBlank = true\n\t\t\tcontinue\n\t\t}\n\t\tif indentLen(line) > 0 {\n\t\t\t\/\/ close paragraph\n\t\t\tclose()\n\n\t\t\t\/\/ count indented or blank lines\n\t\t\tj := i + 1\n\t\t\tfor j < len(lines) && (isBlank(lines[j]) || indentLen(lines[j]) > 0) {\n\t\t\t\tj++\n\t\t\t}\n\t\t\t\/\/ but not trailing blank lines\n\t\t\tfor j > i && isBlank(lines[j-1]) {\n\t\t\t\tj--\n\t\t\t}\n\t\t\tpre := lines[i:j]\n\t\t\ti = j\n\n\t\t\tunindent(pre)\n\n\t\t\t\/\/ put those lines in a pre block\n\t\t\tout = append(out, block{opPre, pre})\n\t\t\tlastWasHeading = false\n\t\t\tcontinue\n\t\t}\n\n\t\tif lastWasBlank && !lastWasHeading && i+2 < len(lines) &&\n\t\t\tisBlank(lines[i+1]) && !isBlank(lines[i+2]) && indentLen(lines[i+2]) == 0 {\n\t\t\t\/\/ current line is non-blank, surrounded by blank lines\n\t\t\t\/\/ and the next non-blank line is not indented: this\n\t\t\t\/\/ might be a heading.\n\t\t\tif head := heading(line); head != \"\" {\n\t\t\t\tclose()\n\t\t\t\tout = append(out, block{opHead, []string{head}})\n\t\t\t\ti += 2\n\t\t\t\tlastWasHeading = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t\/\/ open paragraph\n\t\tlastWasBlank = false\n\t\tlastWasHeading = false\n\t\tpara = append(para, lines[i])\n\t\ti++\n\t}\n\tclose()\n\n\treturn out\n}\n<commit_msg>internal\/lsp\/source: use space character in markdown formatting<commit_after>package source\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"regexp\"\n\t\"strings\"\n\t\"unicode\"\n\t\"unicode\/utf8\"\n)\n\n\/\/ CommentToMarkdown converts comment text to formatted markdown.\n\/\/ The comment was prepared by DocReader,\n\/\/ so it is known not to have leading, trailing blank lines\n\/\/ nor to have trailing spaces at the end of lines.\n\/\/ The comment markers have already been removed.\n\/\/\n\/\/ Each line is converted into a markdown line and empty lines are just converted to\n\/\/ newlines. Heading are prefixed with `### ` to make it a markdown heading.\n\/\/\n\/\/ A span of indented lines retains a 4 space prefix block, with the common indent\n\/\/ prefix removed unless empty, in which case it will be converted to a newline.\n\/\/\n\/\/ URLs in the comment text are converted into links.\nfunc CommentToMarkdown(text string) string {\n\tbuf := &bytes.Buffer{}\n\tcommentToMarkdown(buf, text)\n\treturn buf.String()\n}\n\nvar (\n\tmdNewline = []byte(\"\\n\")\n\tmdHeader = []byte(\"### \")\n\tmdIndent = []byte(\" \")\n\tmdLinkStart = []byte(\"[\")\n\tmdLinkDiv = []byte(\"](\")\n\tmdLinkEnd = []byte(\")\")\n)\n\nfunc commentToMarkdown(w io.Writer, text string) {\n\tisFirstLine := true\n\tfor _, b := range blocks(text) {\n\t\tswitch b.op {\n\t\tcase opPara:\n\t\t\tif !isFirstLine {\n\t\t\t\tw.Write(mdNewline)\n\t\t\t}\n\n\t\t\tfor _, line := range b.lines {\n\t\t\t\temphasize(w, line, true)\n\t\t\t}\n\t\tcase opHead:\n\t\t\tif !isFirstLine {\n\t\t\t\tw.Write(mdNewline)\n\t\t\t}\n\t\t\tw.Write(mdNewline)\n\n\t\t\tfor _, line := range b.lines {\n\t\t\t\tw.Write(mdHeader)\n\t\t\t\tcommentEscape(w, line, true)\n\t\t\t\tw.Write(mdNewline)\n\t\t\t}\n\t\tcase opPre:\n\t\t\tif !isFirstLine {\n\t\t\t\tw.Write(mdNewline)\n\t\t\t}\n\t\t\tw.Write(mdNewline)\n\n\t\t\tfor _, line := range b.lines {\n\t\t\t\tif isBlank(line) {\n\t\t\t\t\tw.Write(mdNewline)\n\t\t\t\t} else {\n\t\t\t\t\tw.Write(mdIndent)\n\t\t\t\t\tw.Write([]byte(line))\n\t\t\t\t\tw.Write(mdNewline)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tisFirstLine = false\n\t}\n}\n\nconst (\n\tulquo = \"“\"\n\turquo = \"”\"\n)\n\nvar (\n\tmarkdownEscape = regexp.MustCompile(`([\\\\\\x60*{}[\\]()#+\\-.!_>~|\"$%&'\\\/:;<=?@^])`)\n\n\tunicodeQuoteReplacer = strings.NewReplacer(\"``\", ulquo, \"''\", urquo)\n)\n\n\/\/ commentEscape escapes comment text for markdown. If nice is set,\n\/\/ also turn `` into “; and '' into ”;.\nfunc commentEscape(w io.Writer, text string, nice bool) {\n\tif nice {\n\t\ttext = convertQuotes(text)\n\t}\n\ttext = escapeRegex(text)\n\tw.Write([]byte(text))\n}\n\nfunc convertQuotes(text string) string {\n\treturn unicodeQuoteReplacer.Replace(text)\n}\n\nfunc escapeRegex(text string) string {\n\treturn markdownEscape.ReplaceAllString(text, `\\$1`)\n}\n\nfunc emphasize(w io.Writer, line string, nice bool) {\n\tfor {\n\t\tm := matchRx.FindStringSubmatchIndex(line)\n\t\tif m == nil {\n\t\t\tbreak\n\t\t}\n\t\t\/\/ m >= 6 (two parenthesized sub-regexps in matchRx, 1st one is urlRx)\n\n\t\t\/\/ write text before match\n\t\tcommentEscape(w, line[0:m[0]], nice)\n\n\t\t\/\/ adjust match for URLs\n\t\tmatch := line[m[0]:m[1]]\n\t\tif strings.Contains(match, \":\/\/\") {\n\t\t\tm0, m1 := m[0], m[1]\n\t\t\tfor _, s := range []string{\"()\", \"{}\", \"[]\"} {\n\t\t\t\topen, close := s[:1], s[1:] \/\/ E.g., \"(\" and \")\"\n\t\t\t\t\/\/ require opening parentheses before closing parentheses (#22285)\n\t\t\t\tif i := strings.Index(match, close); i >= 0 && i < strings.Index(match, open) {\n\t\t\t\t\tm1 = m0 + i\n\t\t\t\t\tmatch = line[m0:m1]\n\t\t\t\t}\n\t\t\t\t\/\/ require balanced pairs of parentheses (#5043)\n\t\t\t\tfor i := 0; strings.Count(match, open) != strings.Count(match, close) && i < 10; i++ {\n\t\t\t\t\tm1 = strings.LastIndexAny(line[:m1], s)\n\t\t\t\t\tmatch = line[m0:m1]\n\t\t\t\t}\n\t\t\t}\n\t\t\tif m1 != m[1] {\n\t\t\t\t\/\/ redo matching with shortened line for correct indices\n\t\t\t\tm = matchRx.FindStringSubmatchIndex(line[:m[0]+len(match)])\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Following code has been modified from go\/doc since words is always\n\t\t\/\/ nil. All html formatting has also been transformed into markdown formatting\n\n\t\t\/\/ analyze match\n\t\turl := \"\"\n\t\tif m[2] >= 0 {\n\t\t\turl = match\n\t\t}\n\n\t\t\/\/ write match\n\t\tif len(url) > 0 {\n\t\t\tw.Write(mdLinkStart)\n\t\t}\n\n\t\tcommentEscape(w, match, nice)\n\n\t\tif len(url) > 0 {\n\t\t\tw.Write(mdLinkDiv)\n\t\t\tw.Write([]byte(urlReplacer.Replace(url)))\n\t\t\tw.Write(mdLinkEnd)\n\t\t}\n\n\t\t\/\/ advance\n\t\tline = line[m[1]:]\n\t}\n\tcommentEscape(w, line, nice)\n}\n\n\/\/ Everything from here on is a copy of go\/doc\/comment.go\n\nconst (\n\t\/\/ Regexp for Go identifiers\n\tidentRx = `[\\pL_][\\pL_0-9]*`\n\n\t\/\/ Regexp for URLs\n\t\/\/ Match parens, and check later for balance - see #5043, #22285\n\t\/\/ Match .,:;?! within path, but not at end - see #18139, #16565\n\t\/\/ This excludes some rare yet valid urls ending in common punctuation\n\t\/\/ in order to allow sentences ending in URLs.\n\n\t\/\/ protocol (required) e.g. http\n\tprotoPart = `(https?|ftp|file|gopher|mailto|nntp)`\n\t\/\/ host (required) e.g. www.example.com or [::1]:8080\n\thostPart = `([a-zA-Z0-9_@\\-.\\[\\]:]+)`\n\t\/\/ path+query+fragment (optional) e.g. \/path\/index.html?q=foo#bar\n\tpathPart = `([.,:;?!]*[a-zA-Z0-9$'()*+&#=@~_\/\\-\\[\\]%])*`\n\n\turlRx = protoPart + `:\/\/` + hostPart + pathPart\n)\n\nvar (\n\tmatchRx = regexp.MustCompile(`(` + urlRx + `)|(` + identRx + `)`)\n\turlReplacer = strings.NewReplacer(`(`, `\\(`, `)`, `\\)`)\n)\n\nfunc indentLen(s string) int {\n\ti := 0\n\tfor i < len(s) && (s[i] == ' ' || s[i] == '\\t') {\n\t\ti++\n\t}\n\treturn i\n}\n\nfunc isBlank(s string) bool {\n\treturn len(s) == 0 || (len(s) == 1 && s[0] == '\\n')\n}\n\nfunc commonPrefix(a, b string) string {\n\ti := 0\n\tfor i < len(a) && i < len(b) && a[i] == b[i] {\n\t\ti++\n\t}\n\treturn a[0:i]\n}\n\nfunc unindent(block []string) {\n\tif len(block) == 0 {\n\t\treturn\n\t}\n\n\t\/\/ compute maximum common white prefix\n\tprefix := block[0][0:indentLen(block[0])]\n\tfor _, line := range block {\n\t\tif !isBlank(line) {\n\t\t\tprefix = commonPrefix(prefix, line[0:indentLen(line)])\n\t\t}\n\t}\n\tn := len(prefix)\n\n\t\/\/ remove\n\tfor i, line := range block {\n\t\tif !isBlank(line) {\n\t\t\tblock[i] = line[n:]\n\t\t}\n\t}\n}\n\n\/\/ heading returns the trimmed line if it passes as a section heading;\n\/\/ otherwise it returns the empty string.\nfunc heading(line string) string {\n\tline = strings.TrimSpace(line)\n\tif len(line) == 0 {\n\t\treturn \"\"\n\t}\n\n\t\/\/ a heading must start with an uppercase letter\n\tr, _ := utf8.DecodeRuneInString(line)\n\tif !unicode.IsLetter(r) || !unicode.IsUpper(r) {\n\t\treturn \"\"\n\t}\n\n\t\/\/ it must end in a letter or digit:\n\tr, _ = utf8.DecodeLastRuneInString(line)\n\tif !unicode.IsLetter(r) && !unicode.IsDigit(r) {\n\t\treturn \"\"\n\t}\n\n\t\/\/ exclude lines with illegal characters. we allow \"(),\"\n\tif strings.ContainsAny(line, \";:!?+*\/=[]{}_^°&§~%#@<\\\">\\\\\") {\n\t\treturn \"\"\n\t}\n\n\t\/\/ allow \"'\" for possessive \"'s\" only\n\tfor b := line; ; {\n\t\ti := strings.IndexRune(b, '\\'')\n\t\tif i < 0 {\n\t\t\tbreak\n\t\t}\n\t\tif i+1 >= len(b) || b[i+1] != 's' || (i+2 < len(b) && b[i+2] != ' ') {\n\t\t\treturn \"\" \/\/ not followed by \"s \"\n\t\t}\n\t\tb = b[i+2:]\n\t}\n\n\t\/\/ allow \".\" when followed by non-space\n\tfor b := line; ; {\n\t\ti := strings.IndexRune(b, '.')\n\t\tif i < 0 {\n\t\t\tbreak\n\t\t}\n\t\tif i+1 >= len(b) || b[i+1] == ' ' {\n\t\t\treturn \"\" \/\/ not followed by non-space\n\t\t}\n\t\tb = b[i+1:]\n\t}\n\n\treturn line\n}\n\ntype op int\n\nconst (\n\topPara op = iota\n\topHead\n\topPre\n)\n\ntype block struct {\n\top op\n\tlines []string\n}\n\nfunc blocks(text string) []block {\n\tvar (\n\t\tout []block\n\t\tpara []string\n\n\t\tlastWasBlank = false\n\t\tlastWasHeading = false\n\t)\n\n\tclose := func() {\n\t\tif para != nil {\n\t\t\tout = append(out, block{opPara, para})\n\t\t\tpara = nil\n\t\t}\n\t}\n\n\tlines := strings.SplitAfter(text, \"\\n\")\n\tunindent(lines)\n\tfor i := 0; i < len(lines); {\n\t\tline := lines[i]\n\t\tif isBlank(line) {\n\t\t\t\/\/ close paragraph\n\t\t\tclose()\n\t\t\ti++\n\t\t\tlastWasBlank = true\n\t\t\tcontinue\n\t\t}\n\t\tif indentLen(line) > 0 {\n\t\t\t\/\/ close paragraph\n\t\t\tclose()\n\n\t\t\t\/\/ count indented or blank lines\n\t\t\tj := i + 1\n\t\t\tfor j < len(lines) && (isBlank(lines[j]) || indentLen(lines[j]) > 0) {\n\t\t\t\tj++\n\t\t\t}\n\t\t\t\/\/ but not trailing blank lines\n\t\t\tfor j > i && isBlank(lines[j-1]) {\n\t\t\t\tj--\n\t\t\t}\n\t\t\tpre := lines[i:j]\n\t\t\ti = j\n\n\t\t\tunindent(pre)\n\n\t\t\t\/\/ put those lines in a pre block\n\t\t\tout = append(out, block{opPre, pre})\n\t\t\tlastWasHeading = false\n\t\t\tcontinue\n\t\t}\n\n\t\tif lastWasBlank && !lastWasHeading && i+2 < len(lines) &&\n\t\t\tisBlank(lines[i+1]) && !isBlank(lines[i+2]) && indentLen(lines[i+2]) == 0 {\n\t\t\t\/\/ current line is non-blank, surrounded by blank lines\n\t\t\t\/\/ and the next non-blank line is not indented: this\n\t\t\t\/\/ might be a heading.\n\t\t\tif head := heading(line); head != \"\" {\n\t\t\t\tclose()\n\t\t\t\tout = append(out, block{opHead, []string{head}})\n\t\t\t\ti += 2\n\t\t\t\tlastWasHeading = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t\/\/ open paragraph\n\t\tlastWasBlank = false\n\t\tlastWasHeading = false\n\t\tpara = append(para, lines[i])\n\t\ti++\n\t}\n\tclose()\n\n\treturn out\n}\n<|endoftext|>"} {"text":"<commit_before>package restic\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"sync\"\n\n\t\"github.com\/restic\/restic\/internal\/debug\"\n\t\"github.com\/restic\/restic\/internal\/ui\/progress\"\n\t\"golang.org\/x\/sync\/errgroup\"\n)\n\nconst streamTreeParallelism = 5\n\n\/\/ TreeItem is used to return either an error or the tree for a tree id\ntype TreeItem struct {\n\tID\n\tError error\n\t*Tree\n}\n\ntype trackedTreeItem struct {\n\tTreeItem\n\trootIdx int\n}\n\ntype trackedID struct {\n\tID\n\trootIdx int\n}\n\n\/\/ loadTreeWorker loads trees from repo and sends them to out.\nfunc loadTreeWorker(ctx context.Context, repo TreeLoader,\n\tin <-chan trackedID, out chan<- trackedTreeItem) {\n\n\tfor treeID := range in {\n\t\ttree, err := repo.LoadTree(ctx, treeID.ID)\n\t\tdebug.Log(\"load tree %v (%v) returned err: %v\", tree, treeID, err)\n\t\tjob := trackedTreeItem{TreeItem: TreeItem{ID: treeID.ID, Error: err, Tree: tree}, rootIdx: treeID.rootIdx}\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase out <- job:\n\t\t}\n\t}\n}\n\nfunc filterTrees(ctx context.Context, trees IDs, loaderChan chan<- trackedID,\n\tin <-chan trackedTreeItem, out chan<- TreeItem, skip func(tree ID) bool, p *progress.Counter) {\n\n\tvar (\n\t\tinCh = in\n\t\toutCh chan<- TreeItem\n\t\tloadCh chan<- trackedID\n\t\tjob TreeItem\n\t\tnextTreeID trackedID\n\t\toutstandingLoadTreeJobs = 0\n\t)\n\trootCounter := make([]int, len(trees))\n\tbacklog := make([]trackedID, 0, len(trees))\n\tfor idx, id := range trees {\n\t\tbacklog = append(backlog, trackedID{ID: id, rootIdx: idx})\n\t\trootCounter[idx] = 1\n\t}\n\n\tfor {\n\t\tif loadCh == nil && len(backlog) > 0 {\n\t\t\t\/\/ process last added ids first, that is traverse the tree in depth-first order\n\t\t\tln := len(backlog) - 1\n\t\t\tnextTreeID, backlog = backlog[ln], backlog[:ln]\n\n\t\t\tif skip(nextTreeID.ID) {\n\t\t\t\trootCounter[nextTreeID.rootIdx]--\n\t\t\t\tif p != nil && rootCounter[nextTreeID.rootIdx] == 0 {\n\t\t\t\t\tp.Add(1)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tloadCh = loaderChan\n\t\t}\n\n\t\tif loadCh == nil && outCh == nil && outstandingLoadTreeJobs == 0 {\n\t\t\tdebug.Log(\"backlog is empty, all channels nil, exiting\")\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\n\t\tcase loadCh <- nextTreeID:\n\t\t\toutstandingLoadTreeJobs++\n\t\t\tloadCh = nil\n\n\t\tcase j, ok := <-inCh:\n\t\t\tif !ok {\n\t\t\t\tdebug.Log(\"input channel closed\")\n\t\t\t\tinCh = nil\n\t\t\t\tin = nil\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\toutstandingLoadTreeJobs--\n\t\t\trootCounter[j.rootIdx]--\n\n\t\t\tdebug.Log(\"input job tree %v\", j.ID)\n\n\t\t\tif j.Error != nil {\n\t\t\t\tdebug.Log(\"received job with error: %v (tree %v, ID %v)\", j.Error, j.Tree, j.ID)\n\t\t\t} else if j.Tree == nil {\n\t\t\t\tdebug.Log(\"received job with nil tree pointer: %v (ID %v)\", j.Error, j.ID)\n\t\t\t\t\/\/ send a new job with the new error instead of the old one\n\t\t\t\tj = trackedTreeItem{TreeItem: TreeItem{ID: j.ID, Error: errors.New(\"tree is nil and error is nil\")}, rootIdx: j.rootIdx}\n\t\t\t} else {\n\t\t\t\tsubtrees := j.Tree.Subtrees()\n\t\t\t\tdebug.Log(\"subtrees for tree %v: %v\", j.ID, subtrees)\n\t\t\t\t\/\/ iterate backwards over subtree to compensate backwards traversal order of nextTreeID selection\n\t\t\t\tfor i := len(subtrees) - 1; i >= 0; i-- {\n\t\t\t\t\tid := subtrees[i]\n\t\t\t\t\tif id.IsNull() {\n\t\t\t\t\t\t\/\/ We do not need to raise this error here, it is\n\t\t\t\t\t\t\/\/ checked when the tree is checked. Just make sure\n\t\t\t\t\t\t\/\/ that we do not add any null IDs to the backlog.\n\t\t\t\t\t\tdebug.Log(\"tree %v has nil subtree\", j.ID)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tbacklog = append(backlog, trackedID{ID: id, rootIdx: j.rootIdx})\n\t\t\t\t\trootCounter[j.rootIdx]++\n\t\t\t\t}\n\t\t\t}\n\t\t\tif p != nil && rootCounter[j.rootIdx] == 0 {\n\t\t\t\tp.Add(1)\n\t\t\t}\n\n\t\t\tjob = j.TreeItem\n\t\t\toutCh = out\n\t\t\tinCh = nil\n\n\t\tcase outCh <- job:\n\t\t\tdebug.Log(\"tree sent to process: %v\", job.ID)\n\t\t\toutCh = nil\n\t\t\tinCh = in\n\t\t}\n\t}\n}\n\n\/\/ StreamTrees iteratively loads the given trees and their subtrees. The skip method\n\/\/ is guaranteed to always be called from the same goroutine.\nfunc StreamTrees(ctx context.Context, wg *errgroup.Group, repo TreeLoader, trees IDs, skip func(tree ID) bool, p *progress.Counter) <-chan TreeItem {\n\tloaderChan := make(chan trackedID)\n\tloadedTreeChan := make(chan trackedTreeItem)\n\ttreeStream := make(chan TreeItem)\n\n\tvar loadTreeWg sync.WaitGroup\n\n\tfor i := 0; i < streamTreeParallelism; i++ {\n\t\tloadTreeWg.Add(1)\n\t\twg.Go(func() error {\n\t\t\tdefer loadTreeWg.Done()\n\t\t\tloadTreeWorker(ctx, repo, loaderChan, loadedTreeChan)\n\t\t\treturn nil\n\t\t})\n\t}\n\n\t\/\/ close once all loadTreeWorkers have completed\n\twg.Go(func() error {\n\t\tloadTreeWg.Wait()\n\t\tclose(loadedTreeChan)\n\t\treturn nil\n\t})\n\n\twg.Go(func() error {\n\t\tdefer close(loaderChan)\n\t\tdefer close(treeStream)\n\t\tfilterTrees(ctx, trees, loaderChan, loadedTreeChan, treeStream, skip, p)\n\t\treturn nil\n\t})\n\treturn treeStream\n}\n<commit_msg>restic: add comment about StreamTrees shutdown<commit_after>package restic\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"sync\"\n\n\t\"github.com\/restic\/restic\/internal\/debug\"\n\t\"github.com\/restic\/restic\/internal\/ui\/progress\"\n\t\"golang.org\/x\/sync\/errgroup\"\n)\n\nconst streamTreeParallelism = 5\n\n\/\/ TreeItem is used to return either an error or the tree for a tree id\ntype TreeItem struct {\n\tID\n\tError error\n\t*Tree\n}\n\ntype trackedTreeItem struct {\n\tTreeItem\n\trootIdx int\n}\n\ntype trackedID struct {\n\tID\n\trootIdx int\n}\n\n\/\/ loadTreeWorker loads trees from repo and sends them to out.\nfunc loadTreeWorker(ctx context.Context, repo TreeLoader,\n\tin <-chan trackedID, out chan<- trackedTreeItem) {\n\n\tfor treeID := range in {\n\t\ttree, err := repo.LoadTree(ctx, treeID.ID)\n\t\tdebug.Log(\"load tree %v (%v) returned err: %v\", tree, treeID, err)\n\t\tjob := trackedTreeItem{TreeItem: TreeItem{ID: treeID.ID, Error: err, Tree: tree}, rootIdx: treeID.rootIdx}\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tcase out <- job:\n\t\t}\n\t}\n}\n\nfunc filterTrees(ctx context.Context, trees IDs, loaderChan chan<- trackedID,\n\tin <-chan trackedTreeItem, out chan<- TreeItem, skip func(tree ID) bool, p *progress.Counter) {\n\n\tvar (\n\t\tinCh = in\n\t\toutCh chan<- TreeItem\n\t\tloadCh chan<- trackedID\n\t\tjob TreeItem\n\t\tnextTreeID trackedID\n\t\toutstandingLoadTreeJobs = 0\n\t)\n\trootCounter := make([]int, len(trees))\n\tbacklog := make([]trackedID, 0, len(trees))\n\tfor idx, id := range trees {\n\t\tbacklog = append(backlog, trackedID{ID: id, rootIdx: idx})\n\t\trootCounter[idx] = 1\n\t}\n\n\tfor {\n\t\tif loadCh == nil && len(backlog) > 0 {\n\t\t\t\/\/ process last added ids first, that is traverse the tree in depth-first order\n\t\t\tln := len(backlog) - 1\n\t\t\tnextTreeID, backlog = backlog[ln], backlog[:ln]\n\n\t\t\tif skip(nextTreeID.ID) {\n\t\t\t\trootCounter[nextTreeID.rootIdx]--\n\t\t\t\tif p != nil && rootCounter[nextTreeID.rootIdx] == 0 {\n\t\t\t\t\tp.Add(1)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tloadCh = loaderChan\n\t\t}\n\n\t\tif loadCh == nil && outCh == nil && outstandingLoadTreeJobs == 0 {\n\t\t\tdebug.Log(\"backlog is empty, all channels nil, exiting\")\n\t\t\treturn\n\t\t}\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\n\t\tcase loadCh <- nextTreeID:\n\t\t\toutstandingLoadTreeJobs++\n\t\t\tloadCh = nil\n\n\t\tcase j, ok := <-inCh:\n\t\t\tif !ok {\n\t\t\t\tdebug.Log(\"input channel closed\")\n\t\t\t\tinCh = nil\n\t\t\t\tin = nil\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\toutstandingLoadTreeJobs--\n\t\t\trootCounter[j.rootIdx]--\n\n\t\t\tdebug.Log(\"input job tree %v\", j.ID)\n\n\t\t\tif j.Error != nil {\n\t\t\t\tdebug.Log(\"received job with error: %v (tree %v, ID %v)\", j.Error, j.Tree, j.ID)\n\t\t\t} else if j.Tree == nil {\n\t\t\t\tdebug.Log(\"received job with nil tree pointer: %v (ID %v)\", j.Error, j.ID)\n\t\t\t\t\/\/ send a new job with the new error instead of the old one\n\t\t\t\tj = trackedTreeItem{TreeItem: TreeItem{ID: j.ID, Error: errors.New(\"tree is nil and error is nil\")}, rootIdx: j.rootIdx}\n\t\t\t} else {\n\t\t\t\tsubtrees := j.Tree.Subtrees()\n\t\t\t\tdebug.Log(\"subtrees for tree %v: %v\", j.ID, subtrees)\n\t\t\t\t\/\/ iterate backwards over subtree to compensate backwards traversal order of nextTreeID selection\n\t\t\t\tfor i := len(subtrees) - 1; i >= 0; i-- {\n\t\t\t\t\tid := subtrees[i]\n\t\t\t\t\tif id.IsNull() {\n\t\t\t\t\t\t\/\/ We do not need to raise this error here, it is\n\t\t\t\t\t\t\/\/ checked when the tree is checked. Just make sure\n\t\t\t\t\t\t\/\/ that we do not add any null IDs to the backlog.\n\t\t\t\t\t\tdebug.Log(\"tree %v has nil subtree\", j.ID)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tbacklog = append(backlog, trackedID{ID: id, rootIdx: j.rootIdx})\n\t\t\t\t\trootCounter[j.rootIdx]++\n\t\t\t\t}\n\t\t\t}\n\t\t\tif p != nil && rootCounter[j.rootIdx] == 0 {\n\t\t\t\tp.Add(1)\n\t\t\t}\n\n\t\t\tjob = j.TreeItem\n\t\t\toutCh = out\n\t\t\tinCh = nil\n\n\t\tcase outCh <- job:\n\t\t\tdebug.Log(\"tree sent to process: %v\", job.ID)\n\t\t\toutCh = nil\n\t\t\tinCh = in\n\t\t}\n\t}\n}\n\n\/\/ StreamTrees iteratively loads the given trees and their subtrees. The skip method\n\/\/ is guaranteed to always be called from the same goroutine. To shutdown the started\n\/\/ goroutines, either read all items from the channel or cancel the context. Then `Wait()`\n\/\/ on the errgroup until all goroutines were stopped.\nfunc StreamTrees(ctx context.Context, wg *errgroup.Group, repo TreeLoader, trees IDs, skip func(tree ID) bool, p *progress.Counter) <-chan TreeItem {\n\tloaderChan := make(chan trackedID)\n\tloadedTreeChan := make(chan trackedTreeItem)\n\ttreeStream := make(chan TreeItem)\n\n\tvar loadTreeWg sync.WaitGroup\n\n\tfor i := 0; i < streamTreeParallelism; i++ {\n\t\tloadTreeWg.Add(1)\n\t\twg.Go(func() error {\n\t\t\tdefer loadTreeWg.Done()\n\t\t\tloadTreeWorker(ctx, repo, loaderChan, loadedTreeChan)\n\t\t\treturn nil\n\t\t})\n\t}\n\n\t\/\/ close once all loadTreeWorkers have completed\n\twg.Go(func() error {\n\t\tloadTreeWg.Wait()\n\t\tclose(loadedTreeChan)\n\t\treturn nil\n\t})\n\n\twg.Go(func() error {\n\t\tdefer close(loaderChan)\n\t\tdefer close(treeStream)\n\t\tfilterTrees(ctx, trees, loaderChan, loadedTreeChan, treeStream, skip, p)\n\t\treturn nil\n\t})\n\treturn treeStream\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2017 Circonus, Inc. <support@circonus.com>\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\/\/\n\npackage reverse\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"time\"\n\n\t\"github.com\/circonus-labs\/circonus-agent\/internal\/config\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/rs\/zerolog\/log\"\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/ startReverse manages the actual reverse connection to the Circonus broker\nfunc (c *Connection) startReverse() error {\n\tfor {\n\t\tconn, cerr := c.connect()\n\t\tif cerr != nil {\n\t\t\tif cerr.fatal {\n\t\t\t\tc.logger.Fatal().Err(cerr.err).Msg(\"unable to establish reverse connection to broker\")\n\t\t\t}\n\t\t\tc.logger.Warn().Err(cerr.err).Msg(\"retrying\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif c.shutdown() {\n\t\t\treturn nil\n\t\t}\n\n\t\tdone := make(chan interface{})\n\t\tcommandReader := c.newCommandReader(done, conn)\n\t\tcommandProcessor := c.newCommandProcessor(done, commandReader)\n\t\tfor result := range commandProcessor {\n\t\t\tif c.shutdown() {\n\t\t\t\tclose(done)\n\t\t\t\tconn.Close()\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif result.ignore {\n\t\t\t\t\/\/ c.logger.Debug().Err(result.err).Int(\"timeouts\", c.commTimeouts).Msg(\"ignored\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif result.err != nil {\n\t\t\t\tif result.reset {\n\t\t\t\t\tc.logger.Warn().Err(result.err).Int(\"timeouts\", c.commTimeouts).Msg(\"resetting connection\")\n\t\t\t\t\tclose(done)\n\t\t\t\t\tbreak\n\t\t\t\t} else if result.fatal {\n\t\t\t\t\tc.logger.Error().Err(result.err).Interface(\"result\", result).Msg(\"fatal error, exiting\")\n\t\t\t\t\tconn.Close()\n\t\t\t\t\tclose(done)\n\t\t\t\t\treturn result.err\n\t\t\t\t} else {\n\t\t\t\t\tc.logger.Error().Err(result.err).Interface(\"result\", result).Msg(\"unhandled error state...\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ send metrics to broker\n\t\t\tif err := c.sendMetricData(conn, result.channelID, result.metrics); err != nil {\n\t\t\t\tc.logger.Warn().Err(err).Msg(\"sending metric data, resetting connection\")\n\t\t\t\tclose(done)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tc.resetConnectionAttempts()\n\t\t}\n\n\t\tconn.Close()\n\t\tif c.shutdown() {\n\t\t\treturn nil\n\t\t}\n\n\t}\n}\n\n\/\/ connect to broker w\/tls and send initial introduction\n\/\/ NOTE: all reverse connections require tls\nfunc (c *Connection) connect() (*tls.Conn, *connError) {\n\tc.Lock()\n\tif c.connAttempts > 0 {\n\t\tif c.maxConnRetry != -1 && c.connAttempts >= c.maxConnRetry {\n\t\t\treturn nil, &connError{fatal: true, err: errors.Errorf(\"max broker connection attempts reached (%d of %d)\", c.connAttempts, c.maxConnRetry)}\n\t\t}\n\n\t\tc.logger.Info().\n\t\t\tStr(\"delay\", c.delay.String()).\n\t\t\tInt(\"attempt\", c.connAttempts).\n\t\t\tMsg(\"connect retry\")\n\n\t\ttime.Sleep(c.delay)\n\t\tc.delay = c.getNextDelay(c.delay)\n\n\t\t\/\/ Under normal circumstances the configuration for reverse is\n\t\t\/\/ non-volatile. There are, however, some situations where the\n\t\t\/\/ configuration must be rebuilt. (e.g. ip of broker changed,\n\t\t\/\/ check changed to use a different broker, broker certificate\n\t\t\/\/ changes, etc.) The majority of configuration based errors are\n\t\t\/\/ fatal, no attempt is made to resolve.\n\t\tif c.connAttempts%c.configRetryLimit == 0 {\n\t\t\tc.logger.Info().Int(\"attempts\", c.connAttempts).Msg(\"reconfig triggered\")\n\t\t\tc.logger.Debug().Str(\"check_bundle\", viper.GetString(config.KeyCheckBundleID)).Msg(\"refreshing check\")\n\t\t\tif err := c.check.RefreshCheckConfig(); err != nil {\n\t\t\t\treturn nil, &connError{fatal: true, err: errors.Wrap(err, \"refreshing check configuration\")}\n\t\t\t}\n\t\t\tc.logger.Debug().Str(\"check_bundle\", viper.GetString(config.KeyCheckBundleID)).Msg(\"setting reverse config\")\n\t\t\trc, err := c.check.GetReverseConfig()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, &connError{fatal: true, err: errors.Wrap(err, \"reconfiguring reverse connection\")}\n\t\t\t}\n\t\t\tif rc == nil {\n\t\t\t\treturn nil, &connError{fatal: true, err: errors.Wrap(err, \"invalid reverse configuration (nil)\")}\n\t\t\t}\n\t\t\tc.revConfig = *rc\n\t\t\tc.logger = log.With().Str(\"pkg\", \"reverse\").Str(\"cid\", viper.GetString(config.KeyCheckBundleID)).Logger()\n\t\t\tc.logger.Info().\n\t\t\t\tStr(\"check_bundle\", viper.GetString(config.KeyCheckBundleID)).\n\t\t\t\tStr(\"rev_host\", c.revConfig.ReverseURL.Hostname()).\n\t\t\t\tStr(\"rev_port\", c.revConfig.ReverseURL.Port()).\n\t\t\t\tStr(\"rev_path\", c.revConfig.ReverseURL.Path).\n\t\t\t\tStr(\"agent\", c.agentAddress).\n\t\t\t\tMsg(\"reverse configuration\")\n\t\t}\n\t}\n\tc.Unlock()\n\n\trevHost := c.revConfig.ReverseURL.Host\n\tc.logger.Debug().Str(\"host\", revHost).Msg(\"connecting\")\n\tc.Lock()\n\tc.connAttempts++\n\tc.Unlock()\n\tdialer := &net.Dialer{Timeout: c.dialerTimeout}\n\tconn, err := tls.DialWithDialer(dialer, \"tcp\", c.revConfig.BrokerAddr.String(), c.revConfig.TLSConfig)\n\tif err != nil {\n\t\treturn nil, &connError{fatal: false, err: errors.Wrapf(err, \"connecting to %s\", revHost)}\n\t}\n\tc.logger.Info().Str(\"host\", revHost).Msg(\"connected\")\n\n\tconn.SetDeadline(time.Now().Add(c.commTimeout))\n\tintroReq := \"REVERSE \" + c.revConfig.ReverseURL.Path\n\tif c.revConfig.ReverseURL.Fragment != \"\" {\n\t\tintroReq += \"#\" + c.revConfig.ReverseURL.Fragment \/\/ reverse secret is placed here when reverse url is parsed\n\t}\n\tc.logger.Debug().Msg(fmt.Sprintf(\"sending intro '%s'\", introReq))\n\tif _, err := fmt.Fprintf(conn, \"%s HTTP\/1.1\\r\\n\\r\\n\", introReq); err != nil {\n\t\tif err != nil {\n\t\t\tc.logger.Error().Err(err).Msg(\"sending intro\")\n\t\t\treturn nil, &connError{fatal: false, err: errors.Wrapf(err, \"unable to write intro to %s\", revHost)}\n\t\t}\n\t}\n\n\tc.Lock()\n\t\/\/ reset timeouts after successful (re)connection\n\tc.commTimeouts = 0\n\tc.Unlock()\n\n\treturn conn, nil\n}\n\n\/\/ getNextDelay for failed connection attempts\nfunc (c *Connection) getNextDelay(currDelay time.Duration) time.Duration {\n\tif currDelay == c.maxDelay {\n\t\treturn currDelay\n\t}\n\n\tdelay := currDelay\n\n\tif delay < c.maxDelay {\n\t\tdrift := rand.Intn(c.maxDelayStep-c.minDelayStep) + c.minDelayStep\n\t\tdelay += time.Duration(drift) * time.Second\n\t}\n\n\tif delay > c.maxDelay {\n\t\tdelay = c.maxDelay\n\t}\n\n\treturn delay\n}\n\n\/\/ resetConnectionAttempts on successful send\/receive\nfunc (c *Connection) resetConnectionAttempts() {\n\tc.Lock()\n\tif c.connAttempts > 0 {\n\t\tc.delay = 1 * time.Second\n\t\tc.connAttempts = 0\n\t}\n\tc.Unlock()\n}\n\n\/\/ Error returns string representation of a connError\nfunc (e *connError) Error() string {\n\treturn e.err.Error()\n}\n<commit_msg>upd: retry reverse check refresh failures for unstable infrastructures<commit_after>\/\/ Copyright © 2017 Circonus, Inc. <support@circonus.com>\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\/\/\n\npackage reverse\n\nimport (\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"time\"\n\n\t\"github.com\/circonus-labs\/circonus-agent\/internal\/config\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/rs\/zerolog\/log\"\n\t\"github.com\/spf13\/viper\"\n)\n\n\/\/ startReverse manages the actual reverse connection to the Circonus broker\nfunc (c *Connection) startReverse() error {\n\tfor {\n\t\tconn, cerr := c.connect()\n\t\tif cerr != nil {\n\t\t\tif cerr.fatal {\n\t\t\t\tc.logger.Fatal().Err(cerr.err).Msg(\"unable to establish reverse connection to broker\")\n\t\t\t}\n\t\t\tc.logger.Warn().Err(cerr.err).Msg(\"retrying\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif c.shutdown() {\n\t\t\treturn nil\n\t\t}\n\n\t\tdone := make(chan interface{})\n\t\tcommandReader := c.newCommandReader(done, conn)\n\t\tcommandProcessor := c.newCommandProcessor(done, commandReader)\n\t\tfor result := range commandProcessor {\n\t\t\tif c.shutdown() {\n\t\t\t\tclose(done)\n\t\t\t\tconn.Close()\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tif result.ignore {\n\t\t\t\t\/\/ c.logger.Debug().Err(result.err).Int(\"timeouts\", c.commTimeouts).Msg(\"ignored\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif result.err != nil {\n\t\t\t\tif result.reset {\n\t\t\t\t\tc.logger.Warn().Err(result.err).Int(\"timeouts\", c.commTimeouts).Msg(\"resetting connection\")\n\t\t\t\t\tclose(done)\n\t\t\t\t\tbreak\n\t\t\t\t} else if result.fatal {\n\t\t\t\t\tc.logger.Error().Err(result.err).Interface(\"result\", result).Msg(\"fatal error, exiting\")\n\t\t\t\t\tconn.Close()\n\t\t\t\t\tclose(done)\n\t\t\t\t\treturn result.err\n\t\t\t\t} else {\n\t\t\t\t\tc.logger.Error().Err(result.err).Interface(\"result\", result).Msg(\"unhandled error state...\")\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ send metrics to broker\n\t\t\tif err := c.sendMetricData(conn, result.channelID, result.metrics); err != nil {\n\t\t\t\tc.logger.Warn().Err(err).Msg(\"sending metric data, resetting connection\")\n\t\t\t\tclose(done)\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tc.resetConnectionAttempts()\n\t\t}\n\n\t\tconn.Close()\n\t\tif c.shutdown() {\n\t\t\treturn nil\n\t\t}\n\n\t}\n}\n\n\/\/ connect to broker w\/tls and send initial introduction\n\/\/ NOTE: all reverse connections require tls\nfunc (c *Connection) connect() (*tls.Conn, *connError) {\n\tc.Lock()\n\tif c.connAttempts > 0 {\n\t\tif c.maxConnRetry != -1 && c.connAttempts >= c.maxConnRetry {\n\t\t\treturn nil, &connError{fatal: true, err: errors.Errorf(\"max broker connection attempts reached (%d of %d)\", c.connAttempts, c.maxConnRetry)}\n\t\t}\n\n\t\tc.logger.Info().\n\t\t\tStr(\"delay\", c.delay.String()).\n\t\t\tInt(\"attempt\", c.connAttempts).\n\t\t\tMsg(\"connect retry\")\n\n\t\ttime.Sleep(c.delay)\n\t\tc.delay = c.getNextDelay(c.delay)\n\n\t\t\/\/ Under normal circumstances the configuration for reverse is\n\t\t\/\/ non-volatile. There are, however, some situations where the\n\t\t\/\/ configuration must be rebuilt. (e.g. ip of broker changed,\n\t\t\/\/ check changed to use a different broker, broker certificate\n\t\t\/\/ changes, etc.) The majority of configuration based errors are\n\t\t\/\/ fatal, no attempt is made to resolve.\n\t\tif c.connAttempts%c.configRetryLimit == 0 {\n\t\t\tc.logger.Info().Int(\"attempts\", c.connAttempts).Msg(\"reconfig triggered\")\n\t\t\tif err := c.check.RefreshCheckConfig(); err != nil {\n\t\t\t\treturn nil, &connError{fatal: false, err: errors.Wrap(err, \"refreshing check configuration\")}\n\t\t\t}\n\t\t\tc.logger.Debug().Str(\"check_bundle\", viper.GetString(config.KeyCheckBundleID)).Msg(\"setting reverse config\")\n\t\t\trc, err := c.check.GetReverseConfig()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, &connError{fatal: true, err: errors.Wrap(err, \"reconfiguring reverse connection\")}\n\t\t\t}\n\t\t\tif rc == nil {\n\t\t\t\treturn nil, &connError{fatal: true, err: errors.Wrap(err, \"invalid reverse configuration (nil)\")}\n\t\t\t}\n\t\t\tc.revConfig = *rc\n\t\t\tc.logger = log.With().Str(\"pkg\", \"reverse\").Str(\"cid\", viper.GetString(config.KeyCheckBundleID)).Logger()\n\t\t\tc.logger.Info().\n\t\t\t\tStr(\"check_bundle\", viper.GetString(config.KeyCheckBundleID)).\n\t\t\t\tStr(\"rev_host\", c.revConfig.ReverseURL.Hostname()).\n\t\t\t\tStr(\"rev_port\", c.revConfig.ReverseURL.Port()).\n\t\t\t\tStr(\"rev_path\", c.revConfig.ReverseURL.Path).\n\t\t\t\tStr(\"agent\", c.agentAddress).\n\t\t\t\tMsg(\"reverse configuration\")\n\t\t}\n\t}\n\tc.Unlock()\n\n\trevHost := c.revConfig.ReverseURL.Host\n\tc.logger.Debug().Str(\"host\", revHost).Msg(\"connecting\")\n\tc.Lock()\n\tc.connAttempts++\n\tc.Unlock()\n\tdialer := &net.Dialer{Timeout: c.dialerTimeout}\n\tconn, err := tls.DialWithDialer(dialer, \"tcp\", c.revConfig.BrokerAddr.String(), c.revConfig.TLSConfig)\n\tif err != nil {\n\t\treturn nil, &connError{fatal: false, err: errors.Wrapf(err, \"connecting to %s\", revHost)}\n\t}\n\tc.logger.Info().Str(\"host\", revHost).Msg(\"connected\")\n\n\tconn.SetDeadline(time.Now().Add(c.commTimeout))\n\tintroReq := \"REVERSE \" + c.revConfig.ReverseURL.Path\n\tif c.revConfig.ReverseURL.Fragment != \"\" {\n\t\tintroReq += \"#\" + c.revConfig.ReverseURL.Fragment \/\/ reverse secret is placed here when reverse url is parsed\n\t}\n\tc.logger.Debug().Msg(fmt.Sprintf(\"sending intro '%s'\", introReq))\n\tif _, err := fmt.Fprintf(conn, \"%s HTTP\/1.1\\r\\n\\r\\n\", introReq); err != nil {\n\t\tif err != nil {\n\t\t\tc.logger.Error().Err(err).Msg(\"sending intro\")\n\t\t\treturn nil, &connError{fatal: false, err: errors.Wrapf(err, \"unable to write intro to %s\", revHost)}\n\t\t}\n\t}\n\n\tc.Lock()\n\t\/\/ reset timeouts after successful (re)connection\n\tc.commTimeouts = 0\n\tc.Unlock()\n\n\treturn conn, nil\n}\n\n\/\/ getNextDelay for failed connection attempts\nfunc (c *Connection) getNextDelay(currDelay time.Duration) time.Duration {\n\tif currDelay == c.maxDelay {\n\t\treturn currDelay\n\t}\n\n\tdelay := currDelay\n\n\tif delay < c.maxDelay {\n\t\tdrift := rand.Intn(c.maxDelayStep-c.minDelayStep) + c.minDelayStep\n\t\tdelay += time.Duration(drift) * time.Second\n\t}\n\n\tif delay > c.maxDelay {\n\t\tdelay = c.maxDelay\n\t}\n\n\treturn delay\n}\n\n\/\/ resetConnectionAttempts on successful send\/receive\nfunc (c *Connection) resetConnectionAttempts() {\n\tc.Lock()\n\tif c.connAttempts > 0 {\n\t\tc.delay = 1 * time.Second\n\t\tc.connAttempts = 0\n\t}\n\tc.Unlock()\n}\n\n\/\/ Error returns string representation of a connError\nfunc (e *connError) Error() string {\n\treturn e.err.Error()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Generated automatically by make.\npackage build\n\nconst goosList = \"darwin freebsd linux netbsd openbsd plan9 windows \"\nconst goarchList = \"386 amd64 arm \"\n<commit_msg>go\/build: update syslist.go package comment<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage build\n\nconst goosList = \"darwin freebsd linux netbsd openbsd plan9 windows \"\nconst goarchList = \"386 amd64 arm \"\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Utility functions.\n\npackage ioutil\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n)\n\n\/\/ ReadAll reads from r until an error or EOF and returns the data it read.\nfunc ReadAll(r io.Reader) ([]byte, os.Error) {\n\tvar buf bytes.Buffer\n\t_, err := io.Copy(&buf, r)\n\treturn buf.Bytes(), err\n}\n\n\/\/ ReadFile reads the file named by filename and returns the contents.\nfunc ReadFile(filename string) ([]byte, os.Error) {\n\tf, err := os.Open(filename, os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\t\/\/ It's a good but not certain bet that Stat will tell us exactly how much to\n\t\/\/ read, so let's try it but be prepared for the answer to be wrong.\n\tdir, err := f.Stat()\n\tvar n uint64\n\tif err != nil && dir.Size < 2e9 { \/\/ Don't preallocate a huge buffer, just in case.\n\t\tn = dir.Size\n\t}\n\t\/\/ Add a little extra in case Size is zero, and to avoid another allocation after\n\t\/\/ Read has filled the buffer.\n\tn += bytes.MinRead\n\t\/\/ Pre-allocate the correct size of buffer, then set its size to zero. The\n\t\/\/ Buffer will read into the allocated space cheaply. If the size was wrong,\n\t\/\/ we'll either waste some space off the end or reallocate as needed, but\n\t\/\/ in the overwhelmingly common case we'll get it just right.\n\tbuf := bytes.NewBuffer(make([]byte, 0, n))\n\t_, err = buf.ReadFrom(f)\n\treturn buf.Bytes(), err\n}\n\n\/\/ WriteFile writes data to a file named by filename.\n\/\/ If the file does not exist, WriteFile creates it with permissions perm;\n\/\/ otherwise WriteFile truncates it before writing.\nfunc WriteFile(filename string, data []byte, perm int) os.Error {\n\tf, err := os.Open(filename, os.O_WRONLY|os.O_CREAT|os.O_TRUNC, perm)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn, err := f.Write(data)\n\tf.Close()\n\tif err == nil && n < len(data) {\n\t\terr = io.ErrShortWrite\n\t}\n\treturn err\n}\n\n\/\/ A dirList implements sort.Interface.\ntype dirList []*os.Dir\n\nfunc (d dirList) Len() int { return len(d) }\nfunc (d dirList) Less(i, j int) bool { return d[i].Name < d[j].Name }\nfunc (d dirList) Swap(i, j int) { d[i], d[j] = d[j], d[i] }\n\n\/\/ ReadDir reads the directory named by dirname and returns\n\/\/ a list of sorted directory entries.\nfunc ReadDir(dirname string) ([]*os.Dir, os.Error) {\n\tf, err := os.Open(dirname, os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlist, err := f.Readdir(-1)\n\tf.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdirs := make(dirList, len(list))\n\tfor i := range list {\n\t\tdirs[i] = &list[i]\n\t}\n\tsort.Sort(dirs)\n\treturn dirs, nil\n}\n<commit_msg>io\/ioutil: fix bug in ReadFile when Open succeeds but Stat fails<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Utility functions.\n\npackage ioutil\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n)\n\n\/\/ ReadAll reads from r until an error or EOF and returns the data it read.\nfunc ReadAll(r io.Reader) ([]byte, os.Error) {\n\tvar buf bytes.Buffer\n\t_, err := io.Copy(&buf, r)\n\treturn buf.Bytes(), err\n}\n\n\/\/ ReadFile reads the file named by filename and returns the contents.\nfunc ReadFile(filename string) ([]byte, os.Error) {\n\tf, err := os.Open(filename, os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\t\/\/ It's a good but not certain bet that Stat will tell us exactly how much to\n\t\/\/ read, so let's try it but be prepared for the answer to be wrong.\n\tdir, err := f.Stat()\n\tvar n uint64\n\tif err == nil && dir.Size < 2e9 { \/\/ Don't preallocate a huge buffer, just in case.\n\t\tn = dir.Size\n\t}\n\t\/\/ Add a little extra in case Size is zero, and to avoid another allocation after\n\t\/\/ Read has filled the buffer.\n\tn += bytes.MinRead\n\t\/\/ Pre-allocate the correct size of buffer, then set its size to zero. The\n\t\/\/ Buffer will read into the allocated space cheaply. If the size was wrong,\n\t\/\/ we'll either waste some space off the end or reallocate as needed, but\n\t\/\/ in the overwhelmingly common case we'll get it just right.\n\tbuf := bytes.NewBuffer(make([]byte, 0, n))\n\t_, err = buf.ReadFrom(f)\n\treturn buf.Bytes(), err\n}\n\n\/\/ WriteFile writes data to a file named by filename.\n\/\/ If the file does not exist, WriteFile creates it with permissions perm;\n\/\/ otherwise WriteFile truncates it before writing.\nfunc WriteFile(filename string, data []byte, perm int) os.Error {\n\tf, err := os.Open(filename, os.O_WRONLY|os.O_CREAT|os.O_TRUNC, perm)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn, err := f.Write(data)\n\tf.Close()\n\tif err == nil && n < len(data) {\n\t\terr = io.ErrShortWrite\n\t}\n\treturn err\n}\n\n\/\/ A dirList implements sort.Interface.\ntype dirList []*os.Dir\n\nfunc (d dirList) Len() int { return len(d) }\nfunc (d dirList) Less(i, j int) bool { return d[i].Name < d[j].Name }\nfunc (d dirList) Swap(i, j int) { d[i], d[j] = d[j], d[i] }\n\n\/\/ ReadDir reads the directory named by dirname and returns\n\/\/ a list of sorted directory entries.\nfunc ReadDir(dirname string) ([]*os.Dir, os.Error) {\n\tf, err := os.Open(dirname, os.O_RDONLY, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlist, err := f.Readdir(-1)\n\tf.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdirs := make(dirList, len(list))\n\tfor i := range list {\n\t\tdirs[i] = &list[i]\n\t}\n\tsort.Sort(dirs)\n\treturn dirs, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package ioutil implements some I\/O utility functions.\npackage ioutil\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n)\n\n\/\/ readAll reads from r until an error or EOF and returns the data it read\n\/\/ from the internal buffer allocated with a specified capacity.\nfunc readAll(r io.Reader, capacity int64) ([]byte, os.Error) {\n\tbuf := bytes.NewBuffer(make([]byte, 0, capacity))\n\t_, err := buf.ReadFrom(r)\n\treturn buf.Bytes(), err\n}\n\n\/\/ ReadAll reads from r until an error or EOF and returns the data it read.\nfunc ReadAll(r io.Reader) ([]byte, os.Error) {\n\treturn readAll(r, bytes.MinRead)\n}\n\n\/\/ ReadFile reads the file named by filename and returns the contents.\nfunc ReadFile(filename string) ([]byte, os.Error) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\t\/\/ It's a good but not certain bet that FileInfo will tell us exactly how much to\n\t\/\/ read, so let's try it but be prepared for the answer to be wrong.\n\tfi, err := f.Stat()\n\tvar n int64\n\tif err == nil && fi.Size < 2e9 { \/\/ Don't preallocate a huge buffer, just in case.\n\t\tn = fi.Size\n\t}\n\t\/\/ As initial capacity for readAll, use n + a little extra in case Size is zero,\n\t\/\/ and to avoid another allocation after Read has filled the buffer. The readAll\n\t\/\/ call will read into its allocated internal buffer cheaply. If the size was\n\t\/\/ wrong, we'll either waste some space off the end or reallocate as needed, but\n\t\/\/ in the overwhelmingly common case we'll get it just right.\n\treturn readAll(f, n+bytes.MinRead)\n}\n\n\/\/ WriteFile writes data to a file named by filename.\n\/\/ If the file does not exist, WriteFile creates it with permissions perm;\n\/\/ otherwise WriteFile truncates it before writing.\nfunc WriteFile(filename string, data []byte, perm uint32) os.Error {\n\tf, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn, err := f.Write(data)\n\tf.Close()\n\tif err == nil && n < len(data) {\n\t\terr = io.ErrShortWrite\n\t}\n\treturn err\n}\n\n\/\/ A dirList implements sort.Interface.\ntype fileInfoList []*os.FileInfo\n\nfunc (f fileInfoList) Len() int { return len(f) }\nfunc (f fileInfoList) Less(i, j int) bool { return f[i].Name < f[j].Name }\nfunc (f fileInfoList) Swap(i, j int) { f[i], f[j] = f[j], f[i] }\n\n\/\/ ReadDir reads the directory named by dirname and returns\n\/\/ a list of sorted directory entries.\nfunc ReadDir(dirname string) ([]*os.FileInfo, os.Error) {\n\tf, err := os.Open(dirname)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlist, err := f.Readdir(-1)\n\tf.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfi := make(fileInfoList, len(list))\n\tfor i := range list {\n\t\tfi[i] = &list[i]\n\t}\n\tsort.Sort(fi)\n\treturn fi, nil\n}\n\ntype nopCloser struct {\n\tio.Reader\n}\n\nfunc (nopCloser) Close() os.Error { return nil }\n\n\/\/ NopCloser returns a ReadCloser with a no-op Close method wrapping\n\/\/ the provided Reader r.\nfunc NopCloser(r io.Reader) io.ReadCloser {\n\treturn nopCloser{r}\n}\n\ntype devNull int\n\nfunc (devNull) Write(p []byte) (int, os.Error) {\n\treturn len(p), nil\n}\n\n\/\/ Discard is an io.Writer on which all Write calls succeed\n\/\/ without doing anything.\nvar Discard io.Writer = devNull(0)\n<commit_msg>io\/ioutil: fix typo in comment<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package ioutil implements some I\/O utility functions.\npackage ioutil\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n)\n\n\/\/ readAll reads from r until an error or EOF and returns the data it read\n\/\/ from the internal buffer allocated with a specified capacity.\nfunc readAll(r io.Reader, capacity int64) ([]byte, os.Error) {\n\tbuf := bytes.NewBuffer(make([]byte, 0, capacity))\n\t_, err := buf.ReadFrom(r)\n\treturn buf.Bytes(), err\n}\n\n\/\/ ReadAll reads from r until an error or EOF and returns the data it read.\nfunc ReadAll(r io.Reader) ([]byte, os.Error) {\n\treturn readAll(r, bytes.MinRead)\n}\n\n\/\/ ReadFile reads the file named by filename and returns the contents.\nfunc ReadFile(filename string) ([]byte, os.Error) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\t\/\/ It's a good but not certain bet that FileInfo will tell us exactly how much to\n\t\/\/ read, so let's try it but be prepared for the answer to be wrong.\n\tfi, err := f.Stat()\n\tvar n int64\n\tif err == nil && fi.Size < 2e9 { \/\/ Don't preallocate a huge buffer, just in case.\n\t\tn = fi.Size\n\t}\n\t\/\/ As initial capacity for readAll, use n + a little extra in case Size is zero,\n\t\/\/ and to avoid another allocation after Read has filled the buffer. The readAll\n\t\/\/ call will read into its allocated internal buffer cheaply. If the size was\n\t\/\/ wrong, we'll either waste some space off the end or reallocate as needed, but\n\t\/\/ in the overwhelmingly common case we'll get it just right.\n\treturn readAll(f, n+bytes.MinRead)\n}\n\n\/\/ WriteFile writes data to a file named by filename.\n\/\/ If the file does not exist, WriteFile creates it with permissions perm;\n\/\/ otherwise WriteFile truncates it before writing.\nfunc WriteFile(filename string, data []byte, perm uint32) os.Error {\n\tf, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn, err := f.Write(data)\n\tf.Close()\n\tif err == nil && n < len(data) {\n\t\terr = io.ErrShortWrite\n\t}\n\treturn err\n}\n\n\/\/ A fileInfoList implements sort.Interface.\ntype fileInfoList []*os.FileInfo\n\nfunc (f fileInfoList) Len() int { return len(f) }\nfunc (f fileInfoList) Less(i, j int) bool { return f[i].Name < f[j].Name }\nfunc (f fileInfoList) Swap(i, j int) { f[i], f[j] = f[j], f[i] }\n\n\/\/ ReadDir reads the directory named by dirname and returns\n\/\/ a list of sorted directory entries.\nfunc ReadDir(dirname string) ([]*os.FileInfo, os.Error) {\n\tf, err := os.Open(dirname)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlist, err := f.Readdir(-1)\n\tf.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfi := make(fileInfoList, len(list))\n\tfor i := range list {\n\t\tfi[i] = &list[i]\n\t}\n\tsort.Sort(fi)\n\treturn fi, nil\n}\n\ntype nopCloser struct {\n\tio.Reader\n}\n\nfunc (nopCloser) Close() os.Error { return nil }\n\n\/\/ NopCloser returns a ReadCloser with a no-op Close method wrapping\n\/\/ the provided Reader r.\nfunc NopCloser(r io.Reader) io.ReadCloser {\n\treturn nopCloser{r}\n}\n\ntype devNull int\n\nfunc (devNull) Write(p []byte) (int, os.Error) {\n\treturn len(p), nil\n}\n\n\/\/ Discard is an io.Writer on which all Write calls succeed\n\/\/ without doing anything.\nvar Discard io.Writer = devNull(0)\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Sockets for Windows\n\npackage net\n\nimport (\n\t\"syscall\"\n)\n\nfunc setKernelSpecificSockopt(s syscall.Handle, f int) {\n\t\/\/ Allow broadcast.\n\tsyscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)\n\n\tif f == syscall.AF_INET6 {\n\t\t\/\/ using ip, tcp, udp, etc.\n\t\t\/\/ allow both protocols even if the OS default is otherwise.\n\t\tsyscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, 0)\n\t}\n}\n<commit_msg>net: document why we do not use SO_REUSEADDR on windows<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Sockets for Windows\n\npackage net\n\nimport (\n\t\"syscall\"\n)\n\nfunc setKernelSpecificSockopt(s syscall.Handle, f int) {\n\t\/\/ Windows will reuse recently-used addresses by default.\n\t\/\/ SO_REUSEADDR should not be used here, as it allows\n\t\/\/ a socket to forcibly bind to a port in use by another socket.\n\t\/\/ This could lead to a non-deterministic behavior, where\n\t\/\/ connection requests over the port cannot be guaranteed\n\t\/\/ to be handled by the correct socket.\n\n\t\/\/ Allow broadcast.\n\tsyscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)\n\n\tif f == syscall.AF_INET6 {\n\t\t\/\/ using ip, tcp, udp, etc.\n\t\t\/\/ allow both protocols even if the OS default is otherwise.\n\t\tsyscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, 0)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage re combines regular expression matching with fmt.Scan like\nextraction of sub-matches into caller-supplied objects. Pointers to\nvariables can be passed as extra arguments to re.Scan. These\nvariables are filled in with regular expression sub-matches. The\nsub-matches are parsed appropriately based on the type of the\nvariable. E.g., if a *int is passed in, the sub-match is parsed as a\nnumber (and overflow is detected).\n\nFor example, the host and port portions of a URL can be extracted as\nfollows:\n\n\tvar host string\n\tvar port int\n\treg := regexp.MustCompile(`^https?:\/\/([^\/:]+):(\\d+)\/`)\n\tif err := re.Scan(reg, url, &host, &port); err == nil {\n\t\tProcess(host, port)\n\t}\n\nA \"func([]byte) error\" can also be passed in as an extra argument to provide\ncustom parsing.\n*\/\npackage re\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strconv\"\n)\n\n\/\/ Scan returns nil if regular expression re matches somewhere in\n\/\/ input, and for every non-nil entry in output, the corresponding\n\/\/ regular expression sub-match is succesfully parsed and stored into\n\/\/ *output[i].\n\/\/\n\/\/ The following can be passed as output arguments to Scan:\n\/\/\n\/\/ nil: The corresponding sub-match is discarded without being saved.\n\/\/\n\/\/ Pointer to string or []byte: The corresponding sub-match is\n\/\/ stored in the pointed-to object. When storing into a []byte, no\n\/\/ copying is done, and the stored slice is an alias of the input.\n\/\/\n\/\/ Pointer to some built-in numeric types (int, int8, int16, int32,\n\/\/ int64, uint, uintptr, uint8, uint16, uint32, uint64, float32,\n\/\/ float64): The corresponding sub-match will be parsed as a literal\n\/\/ of the numeric type and the result stored into *output[i]. Scan\n\/\/ will return an error if the sub-match cannot be parsed\n\/\/ successfully, or the parse result is out of range for the type.\n\/\/\n\/\/ Pointer to a rune or a byte: rune is an alias of uint32 and byte is\n\/\/ an alias of uint8, so the preceding rule applies; i.e., Scan treats\n\/\/ the input as a string of digits to be parsed into the rune or\n\/\/ byte. Therefore Scan cannot be used to directly extract a single\n\/\/ rune or byte from the input. For that, parse into a string or\n\/\/ []byte and use the first element, or pass in a custom parsing\n\/\/ function (see below).\n\/\/\n\/\/ func([]byte) error: The function is passed the corresponding\n\/\/ sub-match. If the result is a non-nil error, the Scan call fails\n\/\/ with that error. Pass in such a function to provide custom parsing:\n\/\/ e.g., treating a number as decimal even if it starts with \"0\"\n\/\/ (normally Scan would treat such as a number as octal); or parsing\n\/\/ an otherwise unsupported type like time.Duration.\n\/\/\n\/\/ An error is returned if output[i] does not have one of the preceding\n\/\/ types. Caveat: the set of supported types might be extended in the\n\/\/ future.\n\/\/\n\/\/ Extra sub-matches (ones with no corresponding output) are discarded silently.\nfunc Scan(re *regexp.Regexp, input []byte, output ...interface{}) error {\n\tmatches := re.FindSubmatchIndex(input)\n\tif matches == nil {\n\t\treturn fmt.Errorf(`re.Scan: could not find \"%s\" in \"%s\"`,\n\t\t\tre, input)\n\t}\n\tif len(matches) < 2+2*len(output) {\n\t\treturn fmt.Errorf(`re.Scan: only got %d matches from \"%s\"; need at least %d`,\n\t\t\tlen(matches)\/2-1, re, len(output))\n\t}\n\tfor i, r := range output {\n\t\tstart, limit := matches[2+2*i], matches[2+2*i+1]\n\t\tif start < 0 || limit < 0 {\n\t\t\t\/\/ Sub-expression is missing; treat as empty.\n\t\t\tstart = 0\n\t\t\tlimit = 0\n\t\t}\n\t\tif err := assign(r, input[start:limit]); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc assign(r interface{}, b []byte) error {\n\tswitch v := r.(type) {\n\tcase nil:\n\t\t\/\/ Discard the match.\n\tcase func([]byte) error:\n\t\tif err := v(b); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *string:\n\t\t*v = string(b)\n\tcase *[]byte:\n\t\t*v = b\n\tcase *int:\n\t\tif i, err := strconv.ParseInt(string(b), 0, 64); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\tif int64(int(i)) != i {\n\t\t\t\treturn parseError(\"out of range for int\", b)\n\t\t\t}\n\t\t\t*v = int(i)\n\t\t}\n\tcase *int8:\n\t\tif i, err := strconv.ParseInt(string(b), 0, 8); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\t*v = int8(i)\n\t\t}\n\tcase *int16:\n\t\tif i, err := strconv.ParseInt(string(b), 0, 16); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\t*v = int16(i)\n\t\t}\n\tcase *int32:\n\t\tif i, err := strconv.ParseInt(string(b), 0, 32); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\t*v = int32(i)\n\t\t}\n\tcase *int64:\n\t\tif i, err := strconv.ParseInt(string(b), 0, 64); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\t*v = i\n\t\t}\n\tcase *uint:\n\t\tif u, err := strconv.ParseUint(string(b), 0, 64); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\tif uint64(uint(u)) != u {\n\t\t\t\treturn parseError(\"out of range for uint\", b)\n\t\t\t}\n\t\t\t*v = uint(u)\n\t\t}\n\tcase *uintptr:\n\t\tif u, err := strconv.ParseUint(string(b), 0, 64); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\tif uint64(uintptr(u)) != u {\n\t\t\t\treturn parseError(\"out of range for uintptr\", b)\n\t\t\t}\n\t\t\t*v = uintptr(u)\n\t\t}\n\tcase *uint8:\n\t\tif u, err := strconv.ParseUint(string(b), 0, 8); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\t*v = uint8(u)\n\t\t}\n\tcase *uint16:\n\t\tif u, err := strconv.ParseUint(string(b), 0, 16); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\t*v = uint16(u)\n\t\t}\n\tcase *uint32:\n\t\tif u, err := strconv.ParseUint(string(b), 0, 32); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\t*v = uint32(u)\n\t\t}\n\tcase *uint64:\n\t\tif u, err := strconv.ParseUint(string(b), 0, 64); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\t*v = u\n\t\t}\n\tcase *float32:\n\t\tif f, err := strconv.ParseFloat(string(b), 32); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\t*v = float32(f)\n\t\t}\n\tcase *float64:\n\t\tif f, err := strconv.ParseFloat(string(b), 64); err != nil {\n\t\t\treturn err\n\t\t} else {\n\t\t\t*v = f\n\t\t}\n\tdefault:\n\t\tt := reflect.ValueOf(r).Type()\n\t\treturn parseError(fmt.Sprintf(\"unsupported type %s\", t), b)\n\t}\n\treturn nil\n}\n\nfunc parseError(explanation string, b []byte) error {\n\treturn fmt.Errorf(`re.Scan: parsing \"%s\": %s`, b, explanation)\n}\n<commit_msg>fixed lint errors<commit_after>\/*\nPackage re combines regular expression matching with fmt.Scan like\nextraction of sub-matches into caller-supplied objects. Pointers to\nvariables can be passed as extra arguments to re.Scan. These\nvariables are filled in with regular expression sub-matches. The\nsub-matches are parsed appropriately based on the type of the\nvariable. E.g., if a *int is passed in, the sub-match is parsed as a\nnumber (and overflow is detected).\n\nFor example, the host and port portions of a URL can be extracted as\nfollows:\n\n\tvar host string\n\tvar port int\n\treg := regexp.MustCompile(`^https?:\/\/([^\/:]+):(\\d+)\/`)\n\tif err := re.Scan(reg, url, &host, &port); err == nil {\n\t\tProcess(host, port)\n\t}\n\nA \"func([]byte) error\" can also be passed in as an extra argument to provide\ncustom parsing.\n*\/\npackage re\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strconv\"\n)\n\n\/\/ Scan returns nil if regular expression re matches somewhere in\n\/\/ input, and for every non-nil entry in output, the corresponding\n\/\/ regular expression sub-match is succesfully parsed and stored into\n\/\/ *output[i].\n\/\/\n\/\/ The following can be passed as output arguments to Scan:\n\/\/\n\/\/ nil: The corresponding sub-match is discarded without being saved.\n\/\/\n\/\/ Pointer to string or []byte: The corresponding sub-match is\n\/\/ stored in the pointed-to object. When storing into a []byte, no\n\/\/ copying is done, and the stored slice is an alias of the input.\n\/\/\n\/\/ Pointer to some built-in numeric types (int, int8, int16, int32,\n\/\/ int64, uint, uintptr, uint8, uint16, uint32, uint64, float32,\n\/\/ float64): The corresponding sub-match will be parsed as a literal\n\/\/ of the numeric type and the result stored into *output[i]. Scan\n\/\/ will return an error if the sub-match cannot be parsed\n\/\/ successfully, or the parse result is out of range for the type.\n\/\/\n\/\/ Pointer to a rune or a byte: rune is an alias of uint32 and byte is\n\/\/ an alias of uint8, so the preceding rule applies; i.e., Scan treats\n\/\/ the input as a string of digits to be parsed into the rune or\n\/\/ byte. Therefore Scan cannot be used to directly extract a single\n\/\/ rune or byte from the input. For that, parse into a string or\n\/\/ []byte and use the first element, or pass in a custom parsing\n\/\/ function (see below).\n\/\/\n\/\/ func([]byte) error: The function is passed the corresponding\n\/\/ sub-match. If the result is a non-nil error, the Scan call fails\n\/\/ with that error. Pass in such a function to provide custom parsing:\n\/\/ e.g., treating a number as decimal even if it starts with \"0\"\n\/\/ (normally Scan would treat such as a number as octal); or parsing\n\/\/ an otherwise unsupported type like time.Duration.\n\/\/\n\/\/ An error is returned if output[i] does not have one of the preceding\n\/\/ types. Caveat: the set of supported types might be extended in the\n\/\/ future.\n\/\/\n\/\/ Extra sub-matches (ones with no corresponding output) are discarded silently.\nfunc Scan(re *regexp.Regexp, input []byte, output ...interface{}) error {\n\tmatches := re.FindSubmatchIndex(input)\n\tif matches == nil {\n\t\treturn fmt.Errorf(`re.Scan: could not find \"%s\" in \"%s\"`,\n\t\t\tre, input)\n\t}\n\tif len(matches) < 2+2*len(output) {\n\t\treturn fmt.Errorf(`re.Scan: only got %d matches from \"%s\"; need at least %d`,\n\t\t\tlen(matches)\/2-1, re, len(output))\n\t}\n\tfor i, r := range output {\n\t\tstart, limit := matches[2+2*i], matches[2+2*i+1]\n\t\tif start < 0 || limit < 0 {\n\t\t\t\/\/ Sub-expression is missing; treat as empty.\n\t\t\tstart = 0\n\t\t\tlimit = 0\n\t\t}\n\t\tif err := assign(r, input[start:limit]); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc assign(r interface{}, b []byte) error {\n\tswitch v := r.(type) {\n\tcase nil:\n\t\t\/\/ Discard the match.\n\tcase func([]byte) error:\n\t\tif err := v(b); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *string:\n\t\t*v = string(b)\n\tcase *[]byte:\n\t\t*v = b\n\tcase *int:\n\t\ti, err := strconv.ParseInt(string(b), 0, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif int64(int(i)) != i {\n\t\t\treturn parseError(\"out of range for int\", b)\n\t\t}\n\t\t*v = int(i)\n\tcase *int8:\n\t\ti, err := strconv.ParseInt(string(b), 0, 8)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = int8(i)\n\tcase *int16:\n\t\ti, err := strconv.ParseInt(string(b), 0, 16)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = int16(i)\n\tcase *int32:\n\t\ti, err := strconv.ParseInt(string(b), 0, 32)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = int32(i)\n\tcase *int64:\n\t\ti, err := strconv.ParseInt(string(b), 0, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = i\n\tcase *uint:\n\t\tu, err := strconv.ParseUint(string(b), 0, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif uint64(uint(u)) != u {\n\t\t\treturn parseError(\"out of range for uint\", b)\n\t\t}\n\t\t*v = uint(u)\n\tcase *uintptr:\n\t\tu, err := strconv.ParseUint(string(b), 0, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif uint64(uintptr(u)) != u {\n\t\t\treturn parseError(\"out of range for uintptr\", b)\n\t\t}\n\t\t*v = uintptr(u)\n\tcase *uint8:\n\t\tu, err := strconv.ParseUint(string(b), 0, 8)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = uint8(u)\n\tcase *uint16:\n\t\tu, err := strconv.ParseUint(string(b), 0, 16)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = uint16(u)\n\tcase *uint32:\n\t\tu, err := strconv.ParseUint(string(b), 0, 32)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = uint32(u)\n\tcase *uint64:\n\t\tu, err := strconv.ParseUint(string(b), 0, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = u\n\tcase *float32:\n\t\tf, err := strconv.ParseFloat(string(b), 32)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = float32(f)\n\tcase *float64:\n\t\tf, err := strconv.ParseFloat(string(b), 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*v = f\n\tdefault:\n\t\tt := reflect.ValueOf(r).Type()\n\t\treturn parseError(fmt.Sprintf(\"unsupported type %s\", t), b)\n\t}\n\treturn nil\n}\n\nfunc parseError(explanation string, b []byte) error {\n\treturn fmt.Errorf(`re.Scan: parsing \"%s\": %s`, b, explanation)\n}\n<|endoftext|>"} {"text":"<commit_before>package sh\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/codegangsta\/inject\"\n)\n\ntype Return struct {\n\tStdout string\n\tStderr string\n}\n\nfunc (r *Return) String() string {\n\treturn r.Stdout\n}\n\nfunc (r *Return) Trim() string {\n\treturn strings.TrimSpace(r.Stdout)\n}\n\nfunc Capture(a ...interface{}) (ret *Return, err error) {\n\ts := NewSession()\n\treturn s.Capture(a...)\n}\n\ntype Dir string\n\ntype Session struct {\n\tinj inject.Injector\n\talias map[string][]string\n\tcmds []*exec.Cmd\n\tstarted bool\n\tEnv map[string]string\n\tStdout io.Writer\n\tStderr io.Writer\n}\n\nfunc NewSession(a ...interface{}) *Session {\n\tenv := map[string]string{\n\t\t\"PATH\": \"\/bin:\/usr\/bin:\/usr\/local\/bin\",\n\t}\n\ts := &Session{\n\t\tinj: inject.New(),\n\t\talias: make(map[string][]string),\n\t\tStdout: os.Stdout,\n\t\tStderr: os.Stderr,\n\t\tEnv: env,\n\t}\n\tdir := Dir(\"\")\n\targs := []string{}\n\ts.inj.Map(env).Map(dir).Map(args)\n\tfor _, v := range a {\n\t\ts.inj.Map(v)\n\t}\n\treturn s\n}\n\nfunc (s *Session) Alias(alias, cmd string, args ...string) {\n\tv := []string{cmd}\n\tv = append(v, args...)\n\ts.alias[alias] = v\n}\n\nfunc (s *Session) Command(a ...interface{}) *Session {\n\tfor _, v := range a {\n\t\ts.inj.Map(v)\n\t}\n\ts.inj.Invoke(s.appendCmd)\n\treturn s\n}\n\nfunc (s *Session) Call(a ...interface{}) error {\n\treturn s.Command(a...).Run()\n}\n\n\/*\nfunc (s *Session) Exec(cmd string, args ...string) error {\n\treturn s.Call(cmd, args)\n}\n*\/\n\nfunc (s *Session) Capture(a ...interface{}) (ret *Return, err error) {\n\tstdout := new(bytes.Buffer)\n\tstderr := new(bytes.Buffer)\n\toldout, olderr := s.Stdout, s.Stderr\n\ts.Stdout, s.Stderr = stdout, stderr\n\terr = s.Call(a...)\n\ts.Stdout, s.Stderr = oldout, olderr\n\n\tret = new(Return)\n\tret.Stdout = string(stdout.Bytes())\n\tret.Stderr = string(stderr.Bytes())\n\treturn\n}\n\nfunc (s *Session) Set(a ...interface{}) *Session {\n\tfor _, v := range a {\n\t\ts.inj.Map(v)\n\t}\n\treturn s\n}\n\nfunc (s *Session) appendCmd(cmd string, args []string, cwd Dir) {\n\tif s.started {\n\t\ts.started = false\n\t\ts.cmds = make([]*exec.Cmd, 0)\n\t}\n\tenvs := make([]string, 0, len(s.Env))\n\tfor k, v := range s.Env {\n\t\tenvs = append(envs, k+\"=\"+v)\n\t}\n\tv, ok := s.alias[cmd]\n\tif ok {\n\t\tcmd = v[0]\n\t\targs = append(v[1:], args...)\n\t}\n\tc := exec.Command(cmd, args...)\n\tc.Env = envs\n\tc.Dir = string(cwd)\n\ts.cmds = append(s.cmds, c)\n}\n<commit_msg>fix for windows<commit_after>package sh\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/codegangsta\/inject\"\n)\n\ntype Return struct {\n\tStdout string\n\tStderr string\n}\n\nfunc (r *Return) String() string {\n\treturn r.Stdout\n}\n\nfunc (r *Return) Trim() string {\n\treturn strings.TrimSpace(r.Stdout)\n}\n\nfunc Capture(a ...interface{}) (ret *Return, err error) {\n\ts := NewSession()\n\treturn s.Capture(a...)\n}\n\ntype Dir string\n\ntype Session struct {\n\tinj inject.Injector\n\talias map[string][]string\n\tcmds []*exec.Cmd\n\tstarted bool\n\tEnv map[string]string\n\tStdout io.Writer\n\tStderr io.Writer\n}\n\nfunc NewSession(a ...interface{}) *Session {\n\tenv := make(map[string]string)\n\tfor _, key := range []string{\"PATH\"} {\n\t\tenv[key] = os.Getenv(key)\n\t}\n\ts := &Session{\n\t\tinj: inject.New(),\n\t\talias: make(map[string][]string),\n\t\tStdout: os.Stdout,\n\t\tStderr: os.Stderr,\n\t\tEnv: env,\n\t}\n\tdir := Dir(\"\")\n\targs := []string{}\n\ts.inj.Map(env).Map(dir).Map(args)\n\tfor _, v := range a {\n\t\ts.inj.Map(v)\n\t}\n\treturn s\n}\n\nfunc (s *Session) Alias(alias, cmd string, args ...string) {\n\tv := []string{cmd}\n\tv = append(v, args...)\n\ts.alias[alias] = v\n}\n\nfunc (s *Session) Command(a ...interface{}) *Session {\n\tfor _, v := range a {\n\t\ts.inj.Map(v)\n\t}\n\ts.inj.Invoke(s.appendCmd)\n\treturn s\n}\n\nfunc (s *Session) Call(a ...interface{}) error {\n\treturn s.Command(a...).Run()\n}\n\n\/*\nfunc (s *Session) Exec(cmd string, args ...string) error {\n\treturn s.Call(cmd, args)\n}\n*\/\n\nfunc (s *Session) Capture(a ...interface{}) (ret *Return, err error) {\n\tstdout := new(bytes.Buffer)\n\tstderr := new(bytes.Buffer)\n\toldout, olderr := s.Stdout, s.Stderr\n\ts.Stdout, s.Stderr = stdout, stderr\n\terr = s.Call(a...)\n\ts.Stdout, s.Stderr = oldout, olderr\n\n\tret = new(Return)\n\tret.Stdout = string(stdout.Bytes())\n\tret.Stderr = string(stderr.Bytes())\n\treturn\n}\n\nfunc (s *Session) Set(a ...interface{}) *Session {\n\tfor _, v := range a {\n\t\ts.inj.Map(v)\n\t}\n\treturn s\n}\n\nfunc (s *Session) appendCmd(cmd string, args []string, cwd Dir) {\n\tif s.started {\n\t\ts.started = false\n\t\ts.cmds = make([]*exec.Cmd, 0)\n\t}\n\tenvs := make([]string, 0, len(s.Env))\n\tfor k, v := range s.Env {\n\t\tenvs = append(envs, k+\"=\"+v)\n\t}\n\tv, ok := s.alias[cmd]\n\tif ok {\n\t\tcmd = v[0]\n\t\targs = append(v[1:], args...)\n\t}\n\tc := exec.Command(cmd, args...)\n\tc.Env = envs\n\tc.Dir = string(cwd)\n\ts.cmds = append(s.cmds, c)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Cocoon Alarm Ltd.\n\/\/\n\/\/ See LICENSE file for terms and conditions.\n\npackage libflac\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/jbert\/testify\/assert\"\n)\n\nfunc TestDecode(t *testing.T) {\n\ta := assert.New(t)\n\n\td, err := NewDecoder(\"data\/nonexistent.flac\")\n\n\ta.Equal(d, (*Decoder)(nil), \"decoder is nil\")\n\n\td, err = NewDecoder(\"data\/sine24-00.flac\")\n\n\ta.Equal(err, nil, \"err is nil\")\n\ta.Equal(d.Channels, 1, \"channels is 1\")\n\ta.Equal(d.Depth, 24, \"depth is 24\")\n\ta.Equal(d.Rate, 48000, \"depth is 48000\")\n\n\tsamples := 0\n\n\tf, err := d.ReadFrame()\n\n\ta.Equal(err, nil, \"err is nil\")\n\ta.Equal(f.Channels, 1, \"channels is 1\")\n\ta.Equal(f.Depth, 24, \"depth is 24\")\n\ta.Equal(f.Rate, 48000, \"depth is 48000\")\n\n\tsamples = samples + len(f.Buffer)\n\n\tfor {\n\t\tf, err := d.ReadFrame()\n\n\t\tif err == nil || err == io.EOF {\n\t\t\tif f != nil {\n\t\t\t\tsamples = samples + len(f.Buffer)\n\t\t\t}\n\t\t} else {\n\t\t\ta.Equal(err, nil, \"error reported\")\n\t\t\tbreak\n\t\t}\n\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t}\n\n\ta.Equal(samples, 200000, \"all samples read\")\n\td.Close()\n}\n\nfunc TestEncode(t *testing.T) {\n\ta := assert.New(t)\n\n\tfileName := \"data\/test.flac\"\n\n\te, err := NewEncoder(fileName, 2, 24, 48000)\n\n\ta.Equal(err, nil, \"err is nil\")\n\n\tf := Frame{Channels: 1, Depth: 24, Rate: 48000}\n\n\terr = e.WriteFrame(f)\n\n\ta.Error(err, \"channels mismatch\")\n\n\tf.Channels = 2\n\tf.Buffer = make([]int32, 2*100)\n\n\terr = e.WriteFrame(f)\n\n\ta.Equal(err, nil, \"frame encoded\")\n\n\te.Close()\n\n\tos.Remove(fileName)\n}\n\nfunc TestRoundTrip(t *testing.T) {\n\ta := assert.New(t)\n\n\tinputFile := \"data\/sine24-00.flac\"\n\toutputFile := \"data\/test.flac\"\n\n\td, err := NewDecoder(inputFile)\n\n\ta.Equal(err, nil, \"err is nil\")\n\n\te, err := NewEncoder(outputFile, d.Channels, d.Depth, d.Rate)\n\n\tsamples := 0\n\n\tfor {\n\t\tf, err := d.ReadFrame()\n\t\tif err == nil || err == io.EOF {\n\t\t\tif f != nil {\n\t\t\t\t_ = e.WriteFrame(*f)\n\t\t\t\tsamples = samples + len(f.Buffer)\n\t\t\t}\n\t\t} else {\n\t\t\ta.Equal(err, nil, \"error reported\")\n\t\t\tbreak\n\t\t}\n\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t}\n\n\ta.Equal(samples, 200000, \"all samples read\")\n\td.Close()\n\te.Close()\n\n\tos.Remove(outputFile)\n}\n\nfunc TestRoundTripStereo(t *testing.T) {\n\ta := assert.New(t)\n\n\tinputFile := \"data\/sine16-12.flac\"\n\toutputFile := \"data\/test.flac\"\n\n\td, err := NewDecoder(inputFile)\n\n\ta.Equal(err, nil, \"err is nil\")\n\n\te, err := NewEncoder(outputFile, d.Channels, d.Depth, d.Rate)\n\n\tsamples := 0\n\n\tfor {\n\t\tf, err := d.ReadFrame()\n\t\tif err == nil || err == io.EOF {\n\t\t\tif f != nil {\n\t\t\t\t_ = e.WriteFrame(*f)\n\t\t\t\tsamples = samples + len(f.Buffer)\n\t\t\t}\n\t\t} else {\n\t\t\ta.Equal(err, nil, \"error reported\")\n\t\t\tbreak\n\t\t}\n\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t}\n\n\ta.Equal(samples, 400000, \"all samples read\")\n\td.Close()\n\te.Close()\n\n\tos.Remove(outputFile)\n}\n<commit_msg>add test for nil encoder if we can't create the file, check err (not nil) on failed decoder open<commit_after>\/\/ Copyright 2015 Cocoon Alarm Ltd.\n\/\/\n\/\/ See LICENSE file for terms and conditions.\n\npackage libflac\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/jbert\/testify\/assert\"\n)\n\nfunc TestDecode(t *testing.T) {\n\ta := assert.New(t)\n\n\td, err := NewDecoder(\"data\/nonexistent.flac\")\n\ta.Equal(d, (*Decoder)(nil), \"decoder is nil\")\n\ta.NotNil(err, \"err is not nil\")\n\n\td, err = NewDecoder(\"data\/sine24-00.flac\")\n\n\ta.Equal(err, nil, \"err is nil\")\n\ta.Equal(d.Channels, 1, \"channels is 1\")\n\ta.Equal(d.Depth, 24, \"depth is 24\")\n\ta.Equal(d.Rate, 48000, \"depth is 48000\")\n\n\tsamples := 0\n\n\tf, err := d.ReadFrame()\n\n\ta.Equal(err, nil, \"err is nil\")\n\ta.Equal(f.Channels, 1, \"channels is 1\")\n\ta.Equal(f.Depth, 24, \"depth is 24\")\n\ta.Equal(f.Rate, 48000, \"depth is 48000\")\n\n\tsamples = samples + len(f.Buffer)\n\n\tfor {\n\t\tf, err := d.ReadFrame()\n\n\t\tif err == nil || err == io.EOF {\n\t\t\tif f != nil {\n\t\t\t\tsamples = samples + len(f.Buffer)\n\t\t\t}\n\t\t} else {\n\t\t\ta.Equal(err, nil, \"error reported\")\n\t\t\tbreak\n\t\t}\n\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t}\n\n\ta.Equal(samples, 200000, \"all samples read\")\n\td.Close()\n}\n\nfunc TestEncode(t *testing.T) {\n\ta := assert.New(t)\n\n\te, err := NewEncoder(\"not-existdir\/foo.flac\", 2, 24, 48000)\n\ta.Equal(e, (*Encoder)(nil), \"encoder is nil\")\n\ta.NotNil(err, \"err is not nil\")\n\n\tfileName := \"data\/test.flac\"\n\n\te, err = NewEncoder(fileName, 2, 24, 48000)\n\n\ta.Equal(err, nil, \"err is nil\")\n\n\tf := Frame{Channels: 1, Depth: 24, Rate: 48000}\n\n\terr = e.WriteFrame(f)\n\n\ta.Error(err, \"channels mismatch\")\n\n\tf.Channels = 2\n\tf.Buffer = make([]int32, 2*100)\n\n\terr = e.WriteFrame(f)\n\n\ta.Equal(err, nil, \"frame encoded\")\n\n\te.Close()\n\n\tos.Remove(fileName)\n}\n\nfunc TestRoundTrip(t *testing.T) {\n\ta := assert.New(t)\n\n\tinputFile := \"data\/sine24-00.flac\"\n\toutputFile := \"data\/test.flac\"\n\n\td, err := NewDecoder(inputFile)\n\n\ta.Equal(err, nil, \"err is nil\")\n\n\te, err := NewEncoder(outputFile, d.Channels, d.Depth, d.Rate)\n\n\tsamples := 0\n\n\tfor {\n\t\tf, err := d.ReadFrame()\n\t\tif err == nil || err == io.EOF {\n\t\t\tif f != nil {\n\t\t\t\t_ = e.WriteFrame(*f)\n\t\t\t\tsamples = samples + len(f.Buffer)\n\t\t\t}\n\t\t} else {\n\t\t\ta.Equal(err, nil, \"error reported\")\n\t\t\tbreak\n\t\t}\n\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t}\n\n\ta.Equal(samples, 200000, \"all samples read\")\n\td.Close()\n\te.Close()\n\n\tos.Remove(outputFile)\n}\n\nfunc TestRoundTripStereo(t *testing.T) {\n\ta := assert.New(t)\n\n\tinputFile := \"data\/sine16-12.flac\"\n\toutputFile := \"data\/test.flac\"\n\n\td, err := NewDecoder(inputFile)\n\n\ta.Equal(err, nil, \"err is nil\")\n\n\te, err := NewEncoder(outputFile, d.Channels, d.Depth, d.Rate)\n\n\tsamples := 0\n\n\tfor {\n\t\tf, err := d.ReadFrame()\n\t\tif err == nil || err == io.EOF {\n\t\t\tif f != nil {\n\t\t\t\t_ = e.WriteFrame(*f)\n\t\t\t\tsamples = samples + len(f.Buffer)\n\t\t\t}\n\t\t} else {\n\t\t\ta.Equal(err, nil, \"error reported\")\n\t\t\tbreak\n\t\t}\n\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t}\n\n\ta.Equal(samples, 400000, \"all samples read\")\n\td.Close()\n\te.Close()\n\n\tos.Remove(outputFile)\n}\n<|endoftext|>"} {"text":"<commit_before>package keys\n\nfunc MinStep(n int) int {\n\tif n <= 1 {\n\t\treturn 0\n\t}\n\tdp := make([]int, n+1)\n\tfor i := 1; i <= n; i++ {\n\t\tdp[i] = i\n\t\tfor j := 2; j < i; j++ {\n\t\t\tif i%j == 0 {\n\t\t\t\tdp[i] = j + dp[i\/j]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn dp[n]\n}\n\nfunc MinStep2(n int) int {\n\tif n <= 1 {\n\t\treturn 0\n\t}\n\tvar ret int\n\tfor i := 2; i <= n; i++ {\n\t\tfor n%i == 0 {\n\t\t\tret, n = ret+i, n\/i\n\t\t}\n\t}\n\treturn ret\n}\n<commit_msg>with comments<commit_after>package keys\n\nfunc MinStep(n int) int {\n\tif n <= 1 {\n\t\treturn 0\n\t}\n\tdp := make([]int, n+1)\n\t\/\/ dp[k] represents the **min steps** to get k 'A's\n\t\/\/ so to get min steps, the action must be ended with paste\n\t\/\/ suppose the last copy-all pressed when there are j 'A' s\n\t\/\/ to get k 'A's we have to (k-j)\/j steps with paste\n\t\/\/ so the total steps to get k 'A's from j 'A's is 1 (copy-all) + (k-j)\/j (paste)\n\t\/\/ so we have this formular dp[k] = j + dp[k\/j]\n\tfor i := 1; i <= n; i++ {\n\t\tdp[i] = i\n\t\tfor j := 2; j < i; j++ {\n\t\t\tif i%j == 0 {\n\t\t\t\tdp[i] = j + dp[i\/j]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn dp[n]\n}\n\nfunc MinStep2(n int) int {\n\tif n <= 1 {\n\t\treturn 0\n\t}\n\tvar ret int\n\tfor i := 2; i <= n; i++ {\n\t\tfor n%i == 0 {\n\t\t\tret, n = ret+i, n\/i\n\t\t}\n\t}\n\treturn ret\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"log\"\n\t\"testing\"\n\n\t\"fmt\"\n\t\"github.com\/tg123\/sshpiper\/sshpiperd\/auditor\"\n\t\"github.com\/tg123\/sshpiper\/sshpiperd\/challenger\"\n\t\"github.com\/tg123\/sshpiper\/sshpiperd\/registry\"\n\t\"github.com\/tg123\/sshpiper\/sshpiperd\/upstream\"\n\t\"golang.org\/x\/crypto\/ssh\"\n\t\"net\"\n\t\"time\"\n)\n\ntype testplugin struct {\n\tname string\n\tinit func(logger *log.Logger) error\n}\n\nfunc (p *testplugin) GetName() string {\n\treturn p.name\n}\n\nfunc (p *testplugin) GetOpts() interface{} {\n\treturn nil\n}\n\nfunc (p *testplugin) Init(logger *log.Logger) error {\n\tif p.init == nil {\n\t\treturn nil\n\t}\n\treturn p.init(logger)\n}\n\nfunc Test_getAndInstall(t *testing.T) {\n\n\t\/\/ ignore empty\n\tgetAndInstall(\"\", func(n string) registry.Plugin {\n\t\tt.Errorf(\"should not call get\")\n\t\treturn nil\n\t}, func(plugin registry.Plugin) error {\n\t\tt.Errorf(\"should not call install\")\n\t\treturn nil\n\t}, nil)\n\n\t\/\/ fail when not found\n\terr := getAndInstall(\"test\", func(n string) registry.Plugin {\n\t\tif n != \"test\" {\n\t\t\tt.Errorf(\"plugin name changed\")\n\t\t}\n\t\treturn nil\n\t}, func(plugin registry.Plugin) error {\n\t\tt.Errorf(\"should not call install\")\n\t\treturn nil\n\t}, nil)\n\n\tif err == nil {\n\t\tt.Errorf(\"should err when not found\")\n\t}\n\n\t\/\/ init err\n\terr = getAndInstall(\"test\", func(n string) registry.Plugin {\n\t\treturn &testplugin{\n\t\t\tinit: func(logger *log.Logger) error {\n\t\t\t\treturn fmt.Errorf(\"init failed\")\n\t\t\t},\n\t\t}\n\t}, func(plugin registry.Plugin) error {\n\t\treturn fmt.Errorf(\"test\")\n\t}, nil)\n\n\tif err == nil {\n\t\tt.Errorf(\"should err when not found\")\n\t}\n\n\t\/\/ call init\n\tinited := false\n\tinstalled := false\n\terr = getAndInstall(\"test\", func(n string) registry.Plugin {\n\t\tif n != \"test\" {\n\t\t\tt.Errorf(\"plugin name changed\")\n\t\t}\n\t\treturn &testplugin{\n\t\t\tinit: func(logger *log.Logger) error {\n\t\t\t\tinited = true\n\t\t\t\treturn nil\n\t\t\t},\n\t\t}\n\t}, func(plugin registry.Plugin) error {\n\t\tif !inited {\n\t\t\tt.Errorf(\"not inited\")\n\t\t}\n\n\t\tinstalled = true\n\t\treturn nil\n\t}, nil)\n\n\tif !installed {\n\t\tt.Errorf(\"not installed\")\n\t}\n\n\tif err != nil {\n\t\tt.Errorf(\"should err when not found\")\n\t}\n}\n\ntype testupstream struct {\n\ttestplugin\n\n\th upstream.Handler\n}\n\nfunc (t *testupstream) GetHandler() upstream.Handler {\n\treturn t.h\n}\n\ntype testchallenger struct {\n\ttestplugin\n\n\th challenger.Handler\n}\n\nfunc (t *testchallenger) GetHandler() challenger.Handler {\n\treturn t.h\n}\n\ntype testauditorprovider struct {\n\ttestplugin\n\n\ta auditor.Auditor\n}\n\nfunc (t *testauditorprovider) Create(ssh.ConnMetadata) (auditor.Auditor, error) {\n\treturn t.a, nil\n}\n\ntype testauditor struct {\n\tup auditor.Hook\n\tdown auditor.Hook\n}\n\nfunc (t *testauditor) GetUpstreamHook() auditor.Hook {\n\treturn t.up\n}\n\nfunc (t *testauditor) GetDownstreamHook() auditor.Hook {\n\treturn t.down\n}\n\nfunc (t *testauditor) Close() error {\n\treturn nil\n}\n\nfunc Test_installDriver(t *testing.T) {\n\tvar (\n\t\tupstreamName = fmt.Sprintf(\"u_%v\", time.Now().UTC().UnixNano())\n\t\tchallengerName = fmt.Sprintf(\"c_%v\", time.Now().UTC().UnixNano())\n\t\tauditorName = fmt.Sprintf(\"a_%v\", time.Now().UTC().UnixNano())\n\t\tupstreamErrName = fmt.Sprintf(\"ue_%v\", time.Now().UTC().UnixNano())\n\t\tupstreamNilName = fmt.Sprintf(\"un_%v\", time.Now().UTC().UnixNano())\n\t)\n\n\tfindUpstream := func(conn ssh.ConnMetadata) (net.Conn, *ssh.SSHPiperAuthPipe, error) {\n\t\treturn nil, nil, nil\n\t}\n\n\tupstream.Register(upstreamName, &testupstream{\n\t\ttestplugin: testplugin{\n\t\t\tname: upstreamName,\n\t\t},\n\t\th: findUpstream,\n\t})\n\n\tupstream.Register(upstreamErrName, &testupstream{\n\t\ttestplugin: testplugin{\n\t\t\tname: upstreamErrName,\n\t\t\tinit: func(logger *log.Logger) error {\n\t\t\t\treturn fmt.Errorf(\"test err\")\n\t\t\t},\n\t\t},\n\t\th: findUpstream,\n\t})\n\n\tupstream.Register(upstreamNilName, &testupstream{\n\t\ttestplugin: testplugin{\n\t\t\tname: upstreamNilName,\n\t\t},\n\t\th: nil,\n\t})\n\n\tchallenger.Register(challengerName, &testchallenger{\n\t\ttestplugin: testplugin{\n\t\t\tname: upstreamName,\n\t\t},\n\t\th: func(conn ssh.ConnMetadata, client ssh.KeyboardInteractiveChallenge) (bool, error) {\n\t\t\treturn true, nil\n\t\t},\n\t})\n\n\tauditor.Register(auditorName, &testauditorprovider{})\n\n\t\/\/ empty driver name\n\t{\n\t\tpiper := &ssh.SSHPiperConfig{}\n\t\t_, err := installDrivers(piper, &piperdConfig{\n\t\t\tUpstreamDriver: \"\",\n\t\t}, nil)\n\n\t\tif err == nil {\n\t\t\tt.Errorf(\"should fail when empty driver\")\n\t\t}\n\t}\n\n\t\/\/ install upstream\n\t{\n\t\tpiper := &ssh.SSHPiperConfig{}\n\t\t_, err := installDrivers(piper, &piperdConfig{\n\t\t\tUpstreamDriver: upstreamName,\n\t\t}, nil)\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"install failed %v\", err)\n\t\t}\n\n\t\tif _, _, err := piper.FindUpstream(nil); err != nil {\n\t\t\tt.Errorf(\"install wrong func\")\n\t\t}\n\n\t\tif piper.AdditionalChallenge != nil {\n\t\t\tt.Errorf(\"should not install challenger\")\n\t\t}\n\t}\n\n\t\/\/ install upstream with failed init\n\t{\n\t\tpiper := &ssh.SSHPiperConfig{}\n\t\t_, err := installDrivers(piper, &piperdConfig{\n\t\t\tUpstreamDriver: upstreamErrName,\n\t\t}, nil)\n\n\t\tif err == nil {\n\t\t\tt.Errorf(\"install should fail\")\n\t\t}\n\n\t\tif piper.FindUpstream != nil {\n\t\t\tt.Errorf(\"should not install upstream provider\")\n\t\t}\n\n\t\tif piper.AdditionalChallenge != nil {\n\t\t\tt.Errorf(\"should not install challenger\")\n\t\t}\n\t}\n\n\t\/\/ install upstream with nil handler\n\t{\n\t\tpiper := &ssh.SSHPiperConfig{}\n\t\t_, err := installDrivers(piper, &piperdConfig{\n\t\t\tUpstreamDriver: upstreamNilName,\n\t\t}, nil)\n\n\t\tif err == nil {\n\t\t\tt.Errorf(\"install should fail\")\n\t\t}\n\n\t\tif piper.FindUpstream != nil {\n\t\t\tt.Errorf(\"should not install upstream provider\")\n\t\t}\n\n\t\tif piper.AdditionalChallenge != nil {\n\t\t\tt.Errorf(\"should not install challenger\")\n\t\t}\n\t}\n\n\t\/\/ install challenger\n\t{\n\t\tpiper := &ssh.SSHPiperConfig{}\n\t\t_, err := installDrivers(piper, &piperdConfig{\n\t\t\tUpstreamDriver: upstreamName,\n\t\t\tChallengerDriver: challengerName,\n\t\t}, nil)\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"install failed %v\", err)\n\t\t}\n\n\t\tif ok, err := piper.AdditionalChallenge(nil, nil); err != nil || !ok {\n\t\t\tt.Errorf(\"should install challenger\")\n\t\t}\n\t}\n\n\t\/\/ install auditor\n\t{\n\t\tpiper := &ssh.SSHPiperConfig{}\n\t\tap, err := installDrivers(piper, &piperdConfig{\n\t\t\tUpstreamDriver: upstreamName,\n\t\t\tAuditorDriver: auditorName,\n\t\t}, nil)\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"install failed %v\", err)\n\t\t}\n\n\t\tif ap == nil {\n\t\t\tt.Errorf(\"nil auditor provider\")\n\t\t}\n\n\t\tap0 := ap.(*testauditorprovider)\n\n\t\tap0.a = &testauditor{\n\t\t\tup: func(conn ssh.ConnMetadata, msg []byte) ([]byte, error) {\n\t\t\t\tmsg[0] = 42\n\t\t\t\treturn msg, nil\n\t\t\t},\n\t\t\tdown: func(conn ssh.ConnMetadata, msg []byte) ([]byte, error) {\n\t\t\t\tmsg[0] = 100\n\t\t\t\treturn msg, nil\n\t\t\t},\n\t\t}\n\n\t\ta, err := ap.Create(nil)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"install failed %v\", err)\n\t\t}\n\n\t\tm := []byte{0}\n\n\t\ta.GetUpstreamHook()(nil, m)\n\t\tif m[0] != 42 {\n\t\t\tt.Errorf(\"upstream not handled\")\n\t\t}\n\n\t\ta.GetDownstreamHook()(nil, m)\n\t\tif m[0] != 100 {\n\t\t\tt.Errorf(\"downstream not handled\")\n\t\t}\n\t}\n}\n<commit_msg>fix missing test sym<commit_after>package main\n\nimport (\n\t\"log\"\n\t\"testing\"\n\n\t\"fmt\"\n\t\"github.com\/tg123\/sshpiper\/sshpiperd\/auditor\"\n\t\"github.com\/tg123\/sshpiper\/sshpiperd\/challenger\"\n\t\"github.com\/tg123\/sshpiper\/sshpiperd\/registry\"\n\t\"github.com\/tg123\/sshpiper\/sshpiperd\/upstream\"\n\t\"golang.org\/x\/crypto\/ssh\"\n\t\"net\"\n\t\"time\"\n)\n\ntype testplugin struct {\n\tname string\n\tinit func(logger *log.Logger) error\n}\n\nfunc (p *testplugin) GetName() string {\n\treturn p.name\n}\n\nfunc (p *testplugin) GetOpts() interface{} {\n\treturn nil\n}\n\nfunc (p *testplugin) Init(logger *log.Logger) error {\n\tif p.init == nil {\n\t\treturn nil\n\t}\n\treturn p.init(logger)\n}\n\nfunc Test_getAndInstall(t *testing.T) {\n\n\t\/\/ ignore empty\n\tgetAndInstall(\"\", func(n string) registry.Plugin {\n\t\tt.Errorf(\"should not call get\")\n\t\treturn nil\n\t}, func(plugin registry.Plugin) error {\n\t\tt.Errorf(\"should not call install\")\n\t\treturn nil\n\t}, nil)\n\n\t\/\/ fail when not found\n\terr := getAndInstall(\"test\", func(n string) registry.Plugin {\n\t\tif n != \"test\" {\n\t\t\tt.Errorf(\"plugin name changed\")\n\t\t}\n\t\treturn nil\n\t}, func(plugin registry.Plugin) error {\n\t\tt.Errorf(\"should not call install\")\n\t\treturn nil\n\t}, nil)\n\n\tif err == nil {\n\t\tt.Errorf(\"should err when not found\")\n\t}\n\n\t\/\/ init err\n\terr = getAndInstall(\"test\", func(n string) registry.Plugin {\n\t\treturn &testplugin{\n\t\t\tinit: func(logger *log.Logger) error {\n\t\t\t\treturn fmt.Errorf(\"init failed\")\n\t\t\t},\n\t\t}\n\t}, func(plugin registry.Plugin) error {\n\t\treturn fmt.Errorf(\"test\")\n\t}, nil)\n\n\tif err == nil {\n\t\tt.Errorf(\"should err when not found\")\n\t}\n\n\t\/\/ call init\n\tinited := false\n\tinstalled := false\n\terr = getAndInstall(\"test\", func(n string) registry.Plugin {\n\t\tif n != \"test\" {\n\t\t\tt.Errorf(\"plugin name changed\")\n\t\t}\n\t\treturn &testplugin{\n\t\t\tinit: func(logger *log.Logger) error {\n\t\t\t\tinited = true\n\t\t\t\treturn nil\n\t\t\t},\n\t\t}\n\t}, func(plugin registry.Plugin) error {\n\t\tif !inited {\n\t\t\tt.Errorf(\"not inited\")\n\t\t}\n\n\t\tinstalled = true\n\t\treturn nil\n\t}, nil)\n\n\tif !installed {\n\t\tt.Errorf(\"not installed\")\n\t}\n\n\tif err != nil {\n\t\tt.Errorf(\"should err when not found\")\n\t}\n}\n\ntype testupstream struct {\n\ttestplugin\n\n\th upstream.Handler\n}\n\nfunc (t *testupstream) GetHandler() upstream.Handler {\n\treturn t.h\n}\n\ntype testchallenger struct {\n\ttestplugin\n\n\th challenger.Handler\n}\n\nfunc (t *testchallenger) GetHandler() challenger.Handler {\n\treturn t.h\n}\n\ntype testauditorprovider struct {\n\ttestplugin\n\n\ta auditor.Auditor\n}\n\nfunc (t *testauditorprovider) Create(ssh.ConnMetadata) (auditor.Auditor, error) {\n\treturn t.a, nil\n}\n\ntype testauditor struct {\n\tup auditor.Hook\n\tdown auditor.Hook\n}\n\nfunc (t *testauditor) GetUpstreamHook() auditor.Hook {\n\treturn t.up\n}\n\nfunc (t *testauditor) GetDownstreamHook() auditor.Hook {\n\treturn t.down\n}\n\nfunc (t *testauditor) Close() error {\n\treturn nil\n}\n\nfunc Test_installDriver(t *testing.T) {\n\tvar (\n\t\tupstreamName = fmt.Sprintf(\"u_%v\", time.Now().UTC().UnixNano())\n\t\tchallengerName = fmt.Sprintf(\"c_%v\", time.Now().UTC().UnixNano())\n\t\tauditorName = fmt.Sprintf(\"a_%v\", time.Now().UTC().UnixNano())\n\t\tupstreamErrName = fmt.Sprintf(\"ue_%v\", time.Now().UTC().UnixNano())\n\t\tupstreamNilName = fmt.Sprintf(\"un_%v\", time.Now().UTC().UnixNano())\n\t)\n\n\tfindUpstream := func(conn ssh.ConnMetadata) (net.Conn, *ssh.AuthPipe, error) {\n\t\treturn nil, nil, nil\n\t}\n\n\tupstream.Register(upstreamName, &testupstream{\n\t\ttestplugin: testplugin{\n\t\t\tname: upstreamName,\n\t\t},\n\t\th: findUpstream,\n\t})\n\n\tupstream.Register(upstreamErrName, &testupstream{\n\t\ttestplugin: testplugin{\n\t\t\tname: upstreamErrName,\n\t\t\tinit: func(logger *log.Logger) error {\n\t\t\t\treturn fmt.Errorf(\"test err\")\n\t\t\t},\n\t\t},\n\t\th: findUpstream,\n\t})\n\n\tupstream.Register(upstreamNilName, &testupstream{\n\t\ttestplugin: testplugin{\n\t\t\tname: upstreamNilName,\n\t\t},\n\t\th: nil,\n\t})\n\n\tchallenger.Register(challengerName, &testchallenger{\n\t\ttestplugin: testplugin{\n\t\t\tname: upstreamName,\n\t\t},\n\t\th: func(conn ssh.ConnMetadata, client ssh.KeyboardInteractiveChallenge) (bool, error) {\n\t\t\treturn true, nil\n\t\t},\n\t})\n\n\tauditor.Register(auditorName, &testauditorprovider{})\n\n\t\/\/ empty driver name\n\t{\n\t\tpiper := &ssh.PiperConfig{}\n\t\t_, err := installDrivers(piper, &piperdConfig{\n\t\t\tUpstreamDriver: \"\",\n\t\t}, nil)\n\n\t\tif err == nil {\n\t\t\tt.Errorf(\"should fail when empty driver\")\n\t\t}\n\t}\n\n\t\/\/ install upstream\n\t{\n\t\tpiper := &ssh.PiperConfig{}\n\t\t_, err := installDrivers(piper, &piperdConfig{\n\t\t\tUpstreamDriver: upstreamName,\n\t\t}, nil)\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"install failed %v\", err)\n\t\t}\n\n\t\tif _, _, err := piper.FindUpstream(nil); err != nil {\n\t\t\tt.Errorf(\"install wrong func\")\n\t\t}\n\n\t\tif piper.AdditionalChallenge != nil {\n\t\t\tt.Errorf(\"should not install challenger\")\n\t\t}\n\t}\n\n\t\/\/ install upstream with failed init\n\t{\n\t\tpiper := &ssh.PiperConfig{}\n\t\t_, err := installDrivers(piper, &piperdConfig{\n\t\t\tUpstreamDriver: upstreamErrName,\n\t\t}, nil)\n\n\t\tif err == nil {\n\t\t\tt.Errorf(\"install should fail\")\n\t\t}\n\n\t\tif piper.FindUpstream != nil {\n\t\t\tt.Errorf(\"should not install upstream provider\")\n\t\t}\n\n\t\tif piper.AdditionalChallenge != nil {\n\t\t\tt.Errorf(\"should not install challenger\")\n\t\t}\n\t}\n\n\t\/\/ install upstream with nil handler\n\t{\n\t\tpiper := &ssh.PiperConfig{}\n\t\t_, err := installDrivers(piper, &piperdConfig{\n\t\t\tUpstreamDriver: upstreamNilName,\n\t\t}, nil)\n\n\t\tif err == nil {\n\t\t\tt.Errorf(\"install should fail\")\n\t\t}\n\n\t\tif piper.FindUpstream != nil {\n\t\t\tt.Errorf(\"should not install upstream provider\")\n\t\t}\n\n\t\tif piper.AdditionalChallenge != nil {\n\t\t\tt.Errorf(\"should not install challenger\")\n\t\t}\n\t}\n\n\t\/\/ install challenger\n\t{\n\t\tpiper := &ssh.PiperConfig{}\n\t\t_, err := installDrivers(piper, &piperdConfig{\n\t\t\tUpstreamDriver: upstreamName,\n\t\t\tChallengerDriver: challengerName,\n\t\t}, nil)\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"install failed %v\", err)\n\t\t}\n\n\t\tif ok, err := piper.AdditionalChallenge(nil, nil); err != nil || !ok {\n\t\t\tt.Errorf(\"should install challenger\")\n\t\t}\n\t}\n\n\t\/\/ install auditor\n\t{\n\t\tpiper := &ssh.PiperConfig{}\n\t\tap, err := installDrivers(piper, &piperdConfig{\n\t\t\tUpstreamDriver: upstreamName,\n\t\t\tAuditorDriver: auditorName,\n\t\t}, nil)\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"install failed %v\", err)\n\t\t}\n\n\t\tif ap == nil {\n\t\t\tt.Errorf(\"nil auditor provider\")\n\t\t}\n\n\t\tap0 := ap.(*testauditorprovider)\n\n\t\tap0.a = &testauditor{\n\t\t\tup: func(conn ssh.ConnMetadata, msg []byte) ([]byte, error) {\n\t\t\t\tmsg[0] = 42\n\t\t\t\treturn msg, nil\n\t\t\t},\n\t\t\tdown: func(conn ssh.ConnMetadata, msg []byte) ([]byte, error) {\n\t\t\t\tmsg[0] = 100\n\t\t\t\treturn msg, nil\n\t\t\t},\n\t\t}\n\n\t\ta, err := ap.Create(nil)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"install failed %v\", err)\n\t\t}\n\n\t\tm := []byte{0}\n\n\t\ta.GetUpstreamHook()(nil, m)\n\t\tif m[0] != 42 {\n\t\t\tt.Errorf(\"upstream not handled\")\n\t\t}\n\n\t\ta.GetDownstreamHook()(nil, m)\n\t\tif m[0] != 100 {\n\t\t\tt.Errorf(\"downstream not handled\")\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage api_test\n\nimport (\n\t\"io\"\n\t\"net\"\n\n\tgc \"launchpad.net\/gocheck\"\n\n\tjujutesting \"launchpad.net\/juju-core\/juju\/testing\"\n\t\"launchpad.net\/juju-core\/state\/api\"\n\t\"launchpad.net\/juju-core\/utils\/parallel\"\n)\n\ntype apiclientSuite struct {\n\tjujutesting.JujuConnSuite\n}\n\nvar _ = gc.Suite(&apiclientSuite{})\n\nfunc (s *apiclientSuite) TestOpenMultiple(c *gc.C) {\n\t\/\/ Create a socket that proxies to the API server.\n\tinfo := s.APIInfo(c)\n\tserverAddr := info.Addrs[0]\n\tserver, err := net.Dial(\"tcp\", serverAddr)\n\tc.Assert(err, gc.IsNil)\n\tdefer server.Close()\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tc.Assert(err, gc.IsNil)\n\tdefer listener.Close()\n\tgo func() {\n\t\tfor {\n\t\t\tclient, err := listener.Accept()\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tgo io.Copy(client, server)\n\t\t\tgo io.Copy(server, client)\n\t\t}\n\t}()\n\n\t\/\/ Check that we can use the proxy to connect.\n\tproxyAddr := listener.Addr().String()\n\tinfo.Addrs = []string{proxyAddr}\n\tst, err := api.Open(info, api.DialOpts{})\n\tc.Assert(err, gc.IsNil)\n\tdefer st.Close()\n\tc.Assert(st.Addr(), gc.Equals, proxyAddr)\n\n\t\/\/ Now break Addrs[0], and ensure that Addrs[1]\n\t\/\/ is successfully connected to.\n\tinfo.Addrs = []string{proxyAddr, serverAddr}\n\tlistener.Close()\n\tst, err = api.Open(info, api.DialOpts{})\n\tc.Assert(err, gc.IsNil)\n\tdefer st.Close()\n\tc.Assert(st.Addr(), gc.Equals, serverAddr)\n}\n\nfunc (s *apiclientSuite) TestOpenMultipleError(c *gc.C) {\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tc.Assert(err, gc.IsNil)\n\tdefer listener.Close()\n\tgo func() {\n\t\tfor {\n\t\t\tclient, err := listener.Accept()\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tclient.Close()\n\t\t}\n\t}()\n\tinfo := s.APIInfo(c)\n\taddr := listener.Addr().String()\n\tc.Logf(\"addr: %q\", addr)\n\tinfo.Addrs = []string{addr, addr, addr}\n\t_, err = api.Open(info, api.DialOpts{})\n\tc.Assert(err, gc.ErrorMatches, `timed out connecting to \"wss:\/\/.*\/\"`)\n}\n\nfunc (s *apiclientSuite) TestDialWebsocketStopped(c *gc.C) {\n\tstopped := make(chan struct{})\n\tf := api.NewWebsocketDialer(nil, api.DialOpts{})\n\tclose(stopped)\n\tresult, err := f(stopped)\n\tc.Assert(err, gc.Equals, parallel.ErrStopped)\n\tc.Assert(result, gc.IsNil)\n}\n<commit_msg>delete log statement<commit_after>\/\/ Copyright 2014 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage api_test\n\nimport (\n\t\"io\"\n\t\"net\"\n\n\tgc \"launchpad.net\/gocheck\"\n\n\tjujutesting \"launchpad.net\/juju-core\/juju\/testing\"\n\t\"launchpad.net\/juju-core\/state\/api\"\n\t\"launchpad.net\/juju-core\/utils\/parallel\"\n)\n\ntype apiclientSuite struct {\n\tjujutesting.JujuConnSuite\n}\n\nvar _ = gc.Suite(&apiclientSuite{})\n\nfunc (s *apiclientSuite) TestOpenMultiple(c *gc.C) {\n\t\/\/ Create a socket that proxies to the API server.\n\tinfo := s.APIInfo(c)\n\tserverAddr := info.Addrs[0]\n\tserver, err := net.Dial(\"tcp\", serverAddr)\n\tc.Assert(err, gc.IsNil)\n\tdefer server.Close()\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tc.Assert(err, gc.IsNil)\n\tdefer listener.Close()\n\tgo func() {\n\t\tfor {\n\t\t\tclient, err := listener.Accept()\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tgo io.Copy(client, server)\n\t\t\tgo io.Copy(server, client)\n\t\t}\n\t}()\n\n\t\/\/ Check that we can use the proxy to connect.\n\tproxyAddr := listener.Addr().String()\n\tinfo.Addrs = []string{proxyAddr}\n\tst, err := api.Open(info, api.DialOpts{})\n\tc.Assert(err, gc.IsNil)\n\tdefer st.Close()\n\tc.Assert(st.Addr(), gc.Equals, proxyAddr)\n\n\t\/\/ Now break Addrs[0], and ensure that Addrs[1]\n\t\/\/ is successfully connected to.\n\tinfo.Addrs = []string{proxyAddr, serverAddr}\n\tlistener.Close()\n\tst, err = api.Open(info, api.DialOpts{})\n\tc.Assert(err, gc.IsNil)\n\tdefer st.Close()\n\tc.Assert(st.Addr(), gc.Equals, serverAddr)\n}\n\nfunc (s *apiclientSuite) TestOpenMultipleError(c *gc.C) {\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tc.Assert(err, gc.IsNil)\n\tdefer listener.Close()\n\tgo func() {\n\t\tfor {\n\t\t\tclient, err := listener.Accept()\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tclient.Close()\n\t\t}\n\t}()\n\tinfo := s.APIInfo(c)\n\taddr := listener.Addr().String()\n\tinfo.Addrs = []string{addr, addr, addr}\n\t_, err = api.Open(info, api.DialOpts{})\n\tc.Assert(err, gc.ErrorMatches, `timed out connecting to \"wss:\/\/.*\/\"`)\n}\n\nfunc (s *apiclientSuite) TestDialWebsocketStopped(c *gc.C) {\n\tstopped := make(chan struct{})\n\tf := api.NewWebsocketDialer(nil, api.DialOpts{})\n\tclose(stopped)\n\tresult, err := f(stopped)\n\tc.Assert(err, gc.Equals, parallel.ErrStopped)\n\tc.Assert(result, gc.IsNil)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\thumanize \"github.com\/dustin\/go-humanize\"\n\t\"github.com\/gizak\/termui\"\n\t\"github.com\/luan\/idope\/fetcher\"\n)\n\ntype UI struct {\n\tselectedIndex int\n\tlistContent []string\n\tlistOffset int\n\n\tlistWidget *termui.List\n}\n\nfunc NewUI() *UI {\n\treturn &UI{}\n}\n\nfunc (ui *UI) Render() {\n\tui.listWidget.Items = ui.selectItem()\n\ttermui.Render(termui.Body)\n}\n\nfunc (ui *UI) Setup() {\n\terr := termui.Init()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlist := termui.NewList()\n\tlist.BorderLabel = \"LRPs\"\n\tlist.ItemFgColor = termui.ColorYellow\n\tui.listWidget = list\n\ttermui.Body.AddRows(\n\t\ttermui.NewRow(\n\t\t\ttermui.NewCol(9, 0, ui.listWidget),\n\t\t),\n\t)\n\tui.listWidget.Height = termui.TermHeight()\n\ttermui.Body.Align()\n\n\tui.Render()\n\tui.bindEvents()\n}\n\nfunc (ui *UI) Close() {\n\ttermui.Close()\n}\n\nfunc (ui *UI) Loop() {\n\ttermui.Loop()\n}\n\nfunc colorizeState(state string) string {\n\tswitch state {\n\tcase \"UNCLAIMED\":\n\t\treturn \"[UNCLAIMED](fg-white)\"\n\tcase \"CLAIMED\":\n\t\treturn \"[CLAIMED](fg-yellow)\"\n\tcase \"RUNNING\":\n\t\treturn \"[RUNNING](fg-green)\"\n\tcase \"CRASHED\":\n\t\treturn \"[CRASHED](fg-red)\"\n\tdefault:\n\t\treturn state\n\t}\n}\n\nfunc fmtBytes(s uint64) string {\n\treturn strings.Replace(humanize.Bytes(s), \" \", \"\", -1)\n}\n\nfunc lrpToStrings(lrp *fetcher.LRP) []string {\n\tret := []string{}\n\tret = append(ret,\n\t\tfmt.Sprintf(\n\t\t\t\"guid: [%s](fg-bold)\\t[instances:](fg-white) [%d](fg-white,fg-bold) \",\n\t\t\tlrp.Desired.ProcessGuid[:8], lrp.Desired.Instances,\n\t\t),\n\t)\n\tfor _, actual := range lrp.ActualLRPsByCPU(true) {\n\t\tstate := colorizeState(actual.ActualLRP.State)\n\t\tret = append(ret,\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"\\t[%2d](fg-white) %s %12s [%5.1f%%](fg-magenta) [%6s](fg-cyan)\/[%-6s](fg-cyan,fg-bold) [%6s](fg-red)\/[%-6s](fg-red,fg-bold)\",\n\t\t\t\tactual.ActualLRP.Index, actual.ActualLRP.CellId, state,\n\t\t\t\tactual.Metrics.CPU*100,\n\t\t\t\tfmtBytes(actual.Metrics.Memory), fmtBytes(uint64(lrp.Desired.MemoryMb*1000*1000)),\n\t\t\t\tfmtBytes(actual.Metrics.Disk), fmtBytes(uint64(lrp.Desired.DiskMb*1000*1000)),\n\t\t\t))\n\t}\n\treturn ret\n}\n\nfunc (ui *UI) SetState(state *fetcher.Data) {\n\tui.listContent = []string{}\n\tlrps := state.LRPs.SortedByProcessGuid()\n\tfor _, lrp := range lrps {\n\t\tcontent := lrpToStrings(lrp)\n\t\tui.listContent = append(ui.listContent, content...)\n\t}\n\tui.Render()\n}\n\nfunc (ui *UI) bindEvents() {\n\ttermui.Handle(\"\/sys\/kbd\/q\", func(termui.Event) {\n\t\ttermui.StopLoop()\n\t})\n\n\ttermui.Handle(\"\/sys\/kbd\/j\", ui.handleDown)\n\ttermui.Handle(\"\/sys\/kbd\/<down>\", ui.handleDown)\n\ttermui.Handle(\"\/sys\/kbd\/k\", ui.handleUp)\n\ttermui.Handle(\"\/sys\/kbd\/<up>\", ui.handleUp)\n\ttermui.Handle(\"\/sys\/kbd\/g\", ui.handleTop)\n\ttermui.Handle(\"\/sys\/kbd\/<home>\", ui.handleTop)\n\ttermui.Handle(\"\/sys\/kbd\/G\", ui.handleBottom)\n\ttermui.Handle(\"\/sys\/kbd\/<end>\", ui.handleBottom)\n\n\ttermui.Handle(\"\/sys\/wnd\/resize\", func(termui.Event) {\n\t\tui.listWidget.Height = termui.TermHeight()\n\t\ttermui.Body.Align()\n\t\tui.Render()\n\t})\n}\n\nfunc (ui *UI) handleBottom(_ termui.Event) {\n\tvisibleHeight := ui.listWidget.InnerHeight()\n\ttotalHeight := len(ui.listContent)\n\tif totalHeight < visibleHeight {\n\t\tvisibleHeight = totalHeight\n\t}\n\tui.selectedIndex = visibleHeight - 1\n\tui.listOffset = totalHeight - visibleHeight\n\tui.Render()\n}\n\nfunc (ui *UI) handleTop(_ termui.Event) {\n\tui.selectedIndex = 0\n\tui.listOffset = 0\n\tui.Render()\n}\n\nfunc (ui *UI) handleDown(_ termui.Event) {\n\tvisibleHeight := ui.listWidget.InnerHeight()\n\ttotalHeight := len(ui.listContent)\n\tif totalHeight < visibleHeight {\n\t\tvisibleHeight = totalHeight\n\t}\n\n\tif ui.selectedIndex < visibleHeight-1 {\n\t\tui.selectedIndex++\n\t} else if ui.listOffset < totalHeight-visibleHeight-1 {\n\t\tui.listOffset++\n\t}\n\tui.Render()\n}\n\nfunc (ui *UI) handleUp(_ termui.Event) {\n\tif ui.selectedIndex > 0 {\n\t\tui.selectedIndex--\n\t} else if ui.listOffset > 0 {\n\t\tui.listOffset--\n\t}\n\tui.Render()\n}\n\nfunc (ui *UI) selectItem() []string {\n\tif ui.listWidget.InnerHeight() == 0 {\n\t\treturn []string{}\n\t}\n\tindex := ui.selectedIndex\n\tvisibleContent := ui.listContent[ui.listOffset:]\n\n\tret := make([]string, len(visibleContent))\n\tfor i, item := range visibleContent {\n\t\tif i == index {\n\t\t\tret[i] = fmt.Sprintf(\" [➤](fg-cyan,fg-bold) %s\", item)\n\t\t} else {\n\t\t\tret[i] = fmt.Sprintf(\" %s\", item)\n\t\t}\n\t}\n\treturn ret\n}\n<commit_msg>add header<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\thumanize \"github.com\/dustin\/go-humanize\"\n\t\"github.com\/gizak\/termui\"\n\t\"github.com\/luan\/idope\/fetcher\"\n)\n\ntype UI struct {\n\tselectedIndex int\n\tlistContent []string\n\tlistOffset int\n\n\tlistWidget *termui.List\n}\n\nfunc NewUI() *UI {\n\treturn &UI{}\n}\n\nfunc (ui *UI) Render() {\n\tui.listWidget.Items = ui.selectItem()\n\ttermui.Render(termui.Body)\n}\n\nfunc (ui *UI) Setup() {\n\terr := termui.Init()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tlist := termui.NewList()\n\tlist.BorderLabel = \"LRPs\"\n\tlist.ItemFgColor = termui.ColorYellow\n\tui.listWidget = list\n\ttermui.Body.AddRows(\n\t\ttermui.NewRow(\n\t\t\ttermui.NewCol(9, 0, ui.listWidget),\n\t\t),\n\t)\n\tui.listWidget.Height = termui.TermHeight()\n\ttermui.Body.Align()\n\n\tui.Render()\n\tui.bindEvents()\n}\n\nfunc (ui *UI) Close() {\n\ttermui.Close()\n}\n\nfunc (ui *UI) Loop() {\n\ttermui.Loop()\n}\n\nfunc colorizeState(state string) string {\n\tswitch state {\n\tcase \"UNCLAIMED\":\n\t\treturn fmt.Sprintf(\"[%-10s](fg-white)\", state)\n\tcase \"CLAIMED\":\n\t\treturn fmt.Sprintf(\"[%-10s](fg-yellow)\", state)\n\tcase \"RUNNING\":\n\t\treturn fmt.Sprintf(\"[%-10s](fg-green)\", state)\n\tcase \"CRASHED\":\n\t\treturn fmt.Sprintf(\"[%-10s](fg-red)\", state)\n\tdefault:\n\t\treturn state\n\t}\n}\n\nfunc fmtBytes(s uint64) string {\n\treturn strings.Replace(humanize.Bytes(s), \" \", \"\", -1)\n}\n\nfunc fmtCell(s string) string {\n\tparts := strings.Split(s, \"_\")\n\tif len(parts) != 2 {\n\t\treturn \"none\"\n\t}\n\tparts = strings.Split(parts[1], \"-\")\n\treturn fmt.Sprintf(\"%s\/%s\", parts[0], parts[1])\n}\n\nfunc lrpToStrings(lrp *fetcher.LRP) []string {\n\tret := []string{}\n\tret = append(ret,\n\t\tfmt.Sprintf(\n\t\t\t\"guid: [%s](fg-bold)\\t[instances:](fg-white) [%d](fg-white,fg-bold) \",\n\t\t\tlrp.Desired.ProcessGuid[:8], lrp.Desired.Instances,\n\t\t),\n\t)\n\tret = append(ret,\n\t\tfmt.Sprintf(\n\t\t\t\" %s %s %s %s %s %s\",\n\t\t\t\"[index](fg-white,bg-reverse)\",\n\t\t\t\"[cell ](fg-yellow,bg-reverse)\",\n\t\t\t\"[state ](fg-white,bg-reverse)\",\n\t\t\t\"[cpu ](fg-magenta,bg-reverse)\",\n\t\t\t\"[memory](fg-cyan,bg-reverse)[\/total ](fg-cyan,bg-reverse)\",\n\t\t\t\"[disk](fg-red,bg-reverse)[\/total ](fg-red,bg-reverse)\",\n\t\t),\n\t)\n\tfor _, actual := range lrp.ActualLRPsByCPU(true) {\n\t\tstate := colorizeState(actual.ActualLRP.State)\n\t\tret = append(ret,\n\t\t\tfmt.Sprintf(\n\t\t\t\t\" [%5d](fg-white) %-6s %s [%5.1f%%](fg-magenta) [%8s](fg-cyan)[\/%-8s](fg-cyan,fg-bold) [%8s](fg-red)[\/%-8s](fg-red,fg-bold)\",\n\t\t\t\tactual.ActualLRP.Index, fmtCell(actual.ActualLRP.CellId), state,\n\t\t\t\tactual.Metrics.CPU*100,\n\t\t\t\tfmtBytes(actual.Metrics.Memory), fmtBytes(uint64(lrp.Desired.MemoryMb*1000*1000)),\n\t\t\t\tfmtBytes(actual.Metrics.Disk), fmtBytes(uint64(lrp.Desired.DiskMb*1000*1000)),\n\t\t\t))\n\t}\n\treturn ret\n}\n\nfunc (ui *UI) SetState(state *fetcher.Data) {\n\tui.listContent = []string{}\n\tlrps := state.LRPs.SortedByProcessGuid()\n\tfor _, lrp := range lrps {\n\t\tcontent := lrpToStrings(lrp)\n\t\tui.listContent = append(ui.listContent, content...)\n\t}\n\tui.Render()\n}\n\nfunc (ui *UI) bindEvents() {\n\ttermui.Handle(\"\/sys\/kbd\/q\", func(termui.Event) {\n\t\ttermui.StopLoop()\n\t})\n\n\ttermui.Handle(\"\/sys\/kbd\/j\", ui.handleDown)\n\ttermui.Handle(\"\/sys\/kbd\/<down>\", ui.handleDown)\n\ttermui.Handle(\"\/sys\/kbd\/k\", ui.handleUp)\n\ttermui.Handle(\"\/sys\/kbd\/<up>\", ui.handleUp)\n\ttermui.Handle(\"\/sys\/kbd\/g\", ui.handleTop)\n\ttermui.Handle(\"\/sys\/kbd\/<home>\", ui.handleTop)\n\ttermui.Handle(\"\/sys\/kbd\/G\", ui.handleBottom)\n\ttermui.Handle(\"\/sys\/kbd\/<end>\", ui.handleBottom)\n\n\ttermui.Handle(\"\/sys\/wnd\/resize\", func(termui.Event) {\n\t\tui.listWidget.Height = termui.TermHeight()\n\t\ttermui.Body.Align()\n\t\tui.Render()\n\t})\n}\n\nfunc (ui *UI) handleBottom(_ termui.Event) {\n\tvisibleHeight := ui.listWidget.InnerHeight()\n\ttotalHeight := len(ui.listContent)\n\tif totalHeight < visibleHeight {\n\t\tvisibleHeight = totalHeight\n\t}\n\tui.selectedIndex = visibleHeight - 1\n\tui.listOffset = totalHeight - visibleHeight\n\tui.Render()\n}\n\nfunc (ui *UI) handleTop(_ termui.Event) {\n\tui.selectedIndex = 0\n\tui.listOffset = 0\n\tui.Render()\n}\n\nfunc (ui *UI) handleDown(_ termui.Event) {\n\tvisibleHeight := ui.listWidget.InnerHeight()\n\ttotalHeight := len(ui.listContent)\n\tif totalHeight < visibleHeight {\n\t\tvisibleHeight = totalHeight\n\t}\n\n\tif ui.selectedIndex < visibleHeight-1 {\n\t\tui.selectedIndex++\n\t} else if ui.listOffset < totalHeight-visibleHeight-1 {\n\t\tui.listOffset++\n\t}\n\tui.Render()\n}\n\nfunc (ui *UI) handleUp(_ termui.Event) {\n\tif ui.selectedIndex > 0 {\n\t\tui.selectedIndex--\n\t} else if ui.listOffset > 0 {\n\t\tui.listOffset--\n\t}\n\tui.Render()\n}\n\nfunc (ui *UI) selectItem() []string {\n\tif ui.listWidget.InnerHeight() == 0 {\n\t\treturn []string{}\n\t}\n\tindex := ui.selectedIndex\n\tvisibleContent := ui.listContent[ui.listOffset:]\n\n\tret := make([]string, len(visibleContent))\n\tfor i, item := range visibleContent {\n\t\tif i == index {\n\t\t\tret[i] = fmt.Sprintf(\" [➤](fg-cyan,fg-bold) %s\", item)\n\t\t} else {\n\t\t\tret[i] = fmt.Sprintf(\" %s\", item)\n\t\t}\n\t}\n\treturn ret\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"os\"\n\tstfl \"github.com\/akrennmair\/go-stfl\"\n)\n\ntype UserInterface struct {\n\tform *stfl.Form\n\tactionchan chan UserInterfaceAction\n\ttweetchan chan []*Tweet\n\tcmdchan chan TwitterCommand\n\tlookupchan chan TweetRequest\n\tin_reply_to_status_id int64\n}\n\ntype ActionId int\n\nconst (\n\tRESET_LAST_LINE ActionId = iota\n\tRAW_INPUT\n\tUPDATE_RATELIMIT\n\tKEY_PRESS\n)\n\ntype UserInterfaceAction struct {\n\tAction ActionId\n\tArgs []string\n}\n\nfunc NewUserInterface(cc chan TwitterCommand, tc chan []*Tweet, lc chan TweetRequest, uac chan UserInterfaceAction) *UserInterface {\n\tstfl.Init()\n\tui := &UserInterface{\n\t\tform: stfl.Create(\"<ui.stfl>\"),\n\t\tactionchan: uac,\n\t\ttweetchan: tc,\n\t\tcmdchan: cc,\n\t\tin_reply_to_status_id: 0,\n\t\tlookupchan: lc,\n\t}\n\tui.form.Set(\"program\", \"gockel 0.0\")\n\treturn ui\n}\n\nfunc (ui *UserInterface) GetActionChannel() chan UserInterfaceAction {\n\treturn ui.actionchan\n}\n\nfunc (ui *UserInterface) Run() {\n\tfor {\n\t\tselect {\n\t\tcase newtweets := <-ui.tweetchan:\n\t\t\tstr := formatTweets(newtweets)\n\t\t\tui.form.Modify(\"tweets\", \"insert_inner\", str)\n\t\t\tui.form.Run(-1)\n\t\tcase action := <-ui.actionchan:\n\t\t\tui.HandleAction(action)\n\t\t}\n\t}\n}\n\nfunc (ui *UserInterface) HandleAction(action UserInterfaceAction) {\n\tswitch action.Action {\n\tcase RESET_LAST_LINE:\n\t\tui.ResetLastLine()\n\tcase RAW_INPUT:\n\t\tinput := action.Args[0]\n\t\tui.HandleRawInput(input)\n\tcase UPDATE_RATELIMIT:\n\t\trem, _ := strconv.Atoui(action.Args[0])\n\t\tlimit, _ := strconv.Atoui(action.Args[1])\n\t\treset, _ := strconv.Atoi64(action.Args[2])\n\t\tnewtext := fmt.Sprintf(\"Next reset: %d min %d\/%d\", reset\/60, rem, limit)\n\t\tui.form.Set(\"rateinfo\", newtext)\n\t\tui.form.Run(-1)\n\tcase KEY_PRESS:\n\t\tui.UpdateInfoLine()\n\t\tui.form.Run(-1)\n\t}\n}\n\nfunc (ui *UserInterface) ResetLastLine() {\n\tui.form.Modify(\"lastline\", \"replace\", \"{hbox[lastline] .expand:0 {label text[msg]:\\\"\\\" .expand:h}}\")\n}\n\nfunc (ui *UserInterface) UpdateInfoLine() {\n\tstatus_id, err := strconv.Atoi64(ui.form.Get(\"status_id\"))\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttweet := ui.LookupTweet(status_id)\n\tif tweet != nil {\n\t\tvar screen_name, real_name, location, posttime string\n\t\tif tweet.User != nil {\n\t\t\tif tweet.User.Screen_name != nil {\n\t\t\t\tscreen_name = *tweet.User.Screen_name\n\t\t\t}\n\t\t\tif tweet.User.Name != nil {\n\t\t\t\treal_name = *tweet.User.Name\n\t\t\t}\n\t\t\tif tweet.User.Location != nil && *tweet.User.Location != \"\" {\n\t\t\t\tlocation = \" - \"+*tweet.User.Location\n\t\t\t}\n\t\t}\n\t\tif tweet.Created_at != nil {\n\t\t\tposttime = *tweet.Created_at\n\t\t}\n\t\tinfoline := fmt.Sprintf(\">> @%s (%s)%s | posted %s | https:\/\/twitter.com\/%s\/statuses\/%d\", screen_name, real_name, location, posttime, screen_name, status_id)\n\t\tui.form.Set(\"infoline\", infoline)\n\t}\n}\n\nfunc (ui *UserInterface) HandleRawInput(input string) {\n\tswitch input {\n\tcase \"ENTER\":\n\t\tui.SetInputField(\"Tweet: \", \"\", \"end-input\")\n\tcase \"r\":\n\t\tvar err os.Error\n\t\tui.in_reply_to_status_id, err = strconv.Atoi64(ui.form.Get(\"status_id\"))\n\t\tif err != nil {\n\t\t\t\/\/ TODO: show error\n\t\t\tbreak\n\t\t}\n\t\ttweet := ui.LookupTweet(ui.in_reply_to_status_id)\n\t\tif tweet != nil {\n\t\t\tui.SetInputField(\"Reply: \", \"@\"+*tweet.User.Screen_name+\" \",\"end-input\")\n\t\t} else {\n\t\t\t\/\/TODO: show error\n\t\t}\n\tcase \"^R\":\n\t\tstatus_id, err := strconv.Atoi64(ui.form.Get(\"status_id\"))\n\t\tif err != nil {\n\t\t\t\/\/ TODO: show error\n\t\t\tbreak\n\t\t}\n\t\tstatus_id_ptr := new(int64)\n\t\t*status_id_ptr = status_id\n\t\tui.cmdchan <- TwitterCommand{Cmd: RETWEET, Data: Tweet{Id: status_id_ptr}}\n\tcase \"end-input\":\n\t\ttweet_text := new(string)\n\t\t*tweet_text = ui.form.Get(\"inputfield\")\n\t\tif len(*tweet_text) > 0 {\n\t\t\tt := Tweet{Text: tweet_text}\n\t\t\tif ui.in_reply_to_status_id != 0 {\n\t\t\t\tt.In_reply_to_status_id = new(int64)\n\t\t\t\t*t.In_reply_to_status_id = ui.in_reply_to_status_id\n\t\t\t\tui.in_reply_to_status_id = int64(0)\n\t\t\t}\n\t\t\tui.cmdchan <- TwitterCommand{Cmd: UPDATE, Data: t}\n\t\t}\n\t\tui.ResetLastLine()\n\tcase \"cancel-input\":\n\t\tui.ResetLastLine()\n\t}\n\tui.form.Run(-1)\n}\n\nfunc (ui *UserInterface) LookupTweet(status_id int64) *Tweet {\n\treply := make(chan *Tweet)\n\treq := TweetRequest{Status_id: status_id, Reply: reply}\n\tui.lookupchan <- req\n\treturn <-reply\n}\n\nfunc (ui *UserInterface) InputLoop() {\n\tevent := \"\"\n\tfor event != \"q\" {\n\t\tevent = ui.form.Run(0)\n\t\tif event != \"\" {\n\t\t\tif event == \"^L\" {\n\t\t\t\tstfl.Reset()\n\t\t\t} else {\n\t\t\t\tui.actionchan <- UserInterfaceAction{RAW_INPUT, []string{event}}\n\t\t\t}\n\t\t} else {\n\t\t\tui.actionchan <- UserInterfaceAction{Action:KEY_PRESS}\n\t\t}\n\t}\n\tstfl.Reset()\n}\n\nfunc (ui *UserInterface) SetInputField(prompt, deftext, endevent string) {\n\tlast_line_text := \"{hbox[lastline] .expand:0 {label .expand:0 text[prompt]:\" + stfl.Quote(prompt) + \"}{input[tweetinput] on_ESC:cancel-input on_ENTER:\" + endevent + \" modal:1 .expand:h text[inputfield]:\" + stfl.Quote(deftext) + \"}}\"\n\n\tui.form.Modify(\"lastline\", \"replace\", last_line_text)\n\tui.form.SetFocus(\"tweetinput\")\n}\n\nfunc formatTweets(tweets []*Tweet) string {\n\tbuf := bytes.NewBufferString(\"{list\")\n\n\tfor _, t := range tweets {\n\t\ttweetline := fmt.Sprintf(\"[%16s] %s\", \"@\"+*t.User.Screen_name, *t.Text)\n\t\tbuf.WriteString(fmt.Sprintf(\"{listitem[%v] text:%v}\", *t.Id, stfl.Quote(tweetline)))\n\t}\n\n\tbuf.WriteString(\"}\")\n\treturn string(buf.Bytes())\n}\n<commit_msg>unescape tweet text.<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"os\"\n\t\"html\"\n\tstfl \"github.com\/akrennmair\/go-stfl\"\n)\n\ntype UserInterface struct {\n\tform *stfl.Form\n\tactionchan chan UserInterfaceAction\n\ttweetchan chan []*Tweet\n\tcmdchan chan TwitterCommand\n\tlookupchan chan TweetRequest\n\tin_reply_to_status_id int64\n}\n\ntype ActionId int\n\nconst (\n\tRESET_LAST_LINE ActionId = iota\n\tRAW_INPUT\n\tUPDATE_RATELIMIT\n\tKEY_PRESS\n)\n\ntype UserInterfaceAction struct {\n\tAction ActionId\n\tArgs []string\n}\n\nfunc NewUserInterface(cc chan TwitterCommand, tc chan []*Tweet, lc chan TweetRequest, uac chan UserInterfaceAction) *UserInterface {\n\tstfl.Init()\n\tui := &UserInterface{\n\t\tform: stfl.Create(\"<ui.stfl>\"),\n\t\tactionchan: uac,\n\t\ttweetchan: tc,\n\t\tcmdchan: cc,\n\t\tin_reply_to_status_id: 0,\n\t\tlookupchan: lc,\n\t}\n\tui.form.Set(\"program\", \"gockel 0.0\")\n\treturn ui\n}\n\nfunc (ui *UserInterface) GetActionChannel() chan UserInterfaceAction {\n\treturn ui.actionchan\n}\n\nfunc (ui *UserInterface) Run() {\n\tfor {\n\t\tselect {\n\t\tcase newtweets := <-ui.tweetchan:\n\t\t\tstr := formatTweets(newtweets)\n\t\t\tui.form.Modify(\"tweets\", \"insert_inner\", str)\n\t\t\tui.form.Run(-1)\n\t\tcase action := <-ui.actionchan:\n\t\t\tui.HandleAction(action)\n\t\t}\n\t}\n}\n\nfunc (ui *UserInterface) HandleAction(action UserInterfaceAction) {\n\tswitch action.Action {\n\tcase RESET_LAST_LINE:\n\t\tui.ResetLastLine()\n\tcase RAW_INPUT:\n\t\tinput := action.Args[0]\n\t\tui.HandleRawInput(input)\n\tcase UPDATE_RATELIMIT:\n\t\trem, _ := strconv.Atoui(action.Args[0])\n\t\tlimit, _ := strconv.Atoui(action.Args[1])\n\t\treset, _ := strconv.Atoi64(action.Args[2])\n\t\tnewtext := fmt.Sprintf(\"Next reset: %d min %d\/%d\", reset\/60, rem, limit)\n\t\tui.form.Set(\"rateinfo\", newtext)\n\t\tui.form.Run(-1)\n\tcase KEY_PRESS:\n\t\tui.UpdateInfoLine()\n\t\tui.form.Run(-1)\n\t}\n}\n\nfunc (ui *UserInterface) ResetLastLine() {\n\tui.form.Modify(\"lastline\", \"replace\", \"{hbox[lastline] .expand:0 {label text[msg]:\\\"\\\" .expand:h}}\")\n}\n\nfunc (ui *UserInterface) UpdateInfoLine() {\n\tstatus_id, err := strconv.Atoi64(ui.form.Get(\"status_id\"))\n\tif err != nil {\n\t\treturn\n\t}\n\n\ttweet := ui.LookupTweet(status_id)\n\tif tweet != nil {\n\t\tvar screen_name, real_name, location, posttime string\n\t\tif tweet.User != nil {\n\t\t\tif tweet.User.Screen_name != nil {\n\t\t\t\tscreen_name = *tweet.User.Screen_name\n\t\t\t}\n\t\t\tif tweet.User.Name != nil {\n\t\t\t\treal_name = *tweet.User.Name\n\t\t\t}\n\t\t\tif tweet.User.Location != nil && *tweet.User.Location != \"\" {\n\t\t\t\tlocation = \" - \"+*tweet.User.Location\n\t\t\t}\n\t\t}\n\t\tif tweet.Created_at != nil {\n\t\t\tposttime = *tweet.Created_at\n\t\t}\n\t\tinfoline := fmt.Sprintf(\">> @%s (%s)%s | posted %s | https:\/\/twitter.com\/%s\/statuses\/%d\", screen_name, real_name, location, posttime, screen_name, status_id)\n\t\tui.form.Set(\"infoline\", infoline)\n\t}\n}\n\nfunc (ui *UserInterface) HandleRawInput(input string) {\n\tswitch input {\n\tcase \"ENTER\":\n\t\tui.SetInputField(\"Tweet: \", \"\", \"end-input\")\n\tcase \"r\":\n\t\tvar err os.Error\n\t\tui.in_reply_to_status_id, err = strconv.Atoi64(ui.form.Get(\"status_id\"))\n\t\tif err != nil {\n\t\t\t\/\/ TODO: show error\n\t\t\tbreak\n\t\t}\n\t\ttweet := ui.LookupTweet(ui.in_reply_to_status_id)\n\t\tif tweet != nil {\n\t\t\tui.SetInputField(\"Reply: \", \"@\"+*tweet.User.Screen_name+\" \",\"end-input\")\n\t\t} else {\n\t\t\t\/\/TODO: show error\n\t\t}\n\tcase \"^R\":\n\t\tstatus_id, err := strconv.Atoi64(ui.form.Get(\"status_id\"))\n\t\tif err != nil {\n\t\t\t\/\/ TODO: show error\n\t\t\tbreak\n\t\t}\n\t\tstatus_id_ptr := new(int64)\n\t\t*status_id_ptr = status_id\n\t\tui.cmdchan <- TwitterCommand{Cmd: RETWEET, Data: Tweet{Id: status_id_ptr}}\n\tcase \"end-input\":\n\t\ttweet_text := new(string)\n\t\t*tweet_text = ui.form.Get(\"inputfield\")\n\t\tif len(*tweet_text) > 0 {\n\t\t\tt := Tweet{Text: tweet_text}\n\t\t\tif ui.in_reply_to_status_id != 0 {\n\t\t\t\tt.In_reply_to_status_id = new(int64)\n\t\t\t\t*t.In_reply_to_status_id = ui.in_reply_to_status_id\n\t\t\t\tui.in_reply_to_status_id = int64(0)\n\t\t\t}\n\t\t\tui.cmdchan <- TwitterCommand{Cmd: UPDATE, Data: t}\n\t\t}\n\t\tui.ResetLastLine()\n\tcase \"cancel-input\":\n\t\tui.ResetLastLine()\n\t}\n\tui.form.Run(-1)\n}\n\nfunc (ui *UserInterface) LookupTweet(status_id int64) *Tweet {\n\treply := make(chan *Tweet)\n\treq := TweetRequest{Status_id: status_id, Reply: reply}\n\tui.lookupchan <- req\n\treturn <-reply\n}\n\nfunc (ui *UserInterface) InputLoop() {\n\tevent := \"\"\n\tfor event != \"q\" {\n\t\tevent = ui.form.Run(0)\n\t\tif event != \"\" {\n\t\t\tif event == \"^L\" {\n\t\t\t\tstfl.Reset()\n\t\t\t} else {\n\t\t\t\tui.actionchan <- UserInterfaceAction{RAW_INPUT, []string{event}}\n\t\t\t}\n\t\t} else {\n\t\t\tui.actionchan <- UserInterfaceAction{Action:KEY_PRESS}\n\t\t}\n\t}\n\tstfl.Reset()\n}\n\nfunc (ui *UserInterface) SetInputField(prompt, deftext, endevent string) {\n\tlast_line_text := \"{hbox[lastline] .expand:0 {label .expand:0 text[prompt]:\" + stfl.Quote(prompt) + \"}{input[tweetinput] on_ESC:cancel-input on_ENTER:\" + endevent + \" modal:1 .expand:h text[inputfield]:\" + stfl.Quote(deftext) + \"}}\"\n\n\tui.form.Modify(\"lastline\", \"replace\", last_line_text)\n\tui.form.SetFocus(\"tweetinput\")\n}\n\nfunc formatTweets(tweets []*Tweet) string {\n\tbuf := bytes.NewBufferString(\"{list\")\n\n\tfor _, t := range tweets {\n\t\ttweetline := fmt.Sprintf(\"[%16s] %s\", \"@\"+*t.User.Screen_name, *t.Text)\n\t\tbuf.WriteString(fmt.Sprintf(\"{listitem[%v] text:%v}\", *t.Id, stfl.Quote(html.UnescapeString(tweetline))))\n\t}\n\n\tbuf.WriteString(\"}\")\n\treturn string(buf.Bytes())\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport \"fmt\"\nimport \"strings\"\nimport \"github.com\/nsf\/termbox-go\"\nimport \"strconv\"\n\nconst MAX_CELL_WIDTH = 20\nconst HILITE_FG = termbox.ColorBlack | termbox.AttrBold\nconst HILITE_BG = termbox.ColorWhite\n\ntype inputMode int\n\nconst (\n\tModeDefault = iota\n\tModeFilter\n\tModeColumnSelect\n\tModeRowSelect\n)\n\n\/\/ It is so dumb that go doesn't have this\nfunc clamp(val, lo, hi int) int {\n\tif val <= lo {\n\t\treturn lo\n\t} else if val >= hi {\n\t\treturn hi\n\t}\n\n\treturn val\n}\n\nvar pinnedBounds = 0\n\nfunc writeString(x, y int, fg, bg termbox.Attribute, msg string) int {\n\tfor _, c := range msg {\n\t\tif x >= pinnedBounds {\n\t\t\ttermbox.SetCell(x, y, c, fg, bg)\n\t\t}\n\t\tx += 1\n\t}\n\treturn x\n}\n\nfunc writeLine(x, y int, fg, bg termbox.Attribute, line string) {\n\twidth, _ := termbox.Size()\n\tfor _, c := range line {\n\t\ttermbox.SetCell(x, y, c, fg, bg)\n\t\tx += 1\n\t}\n\tfor i := x; i < width; i += 1 {\n\t\ttermbox.SetCell(x+i, y, ' ', fg, bg)\n\t}\n}\n\nvar cellFmtString = \"%-\" + strconv.Itoa(MAX_CELL_WIDTH) + \"s\"\n\nfunc (ui *UI) writeCell(cell string, x, y, index int, fg, bg termbox.Attribute) int {\n\tcolOpts := ui.columnOpts[index]\n\tlastCol := index == len(ui.columnOpts)-1\n\n\tif index == ui.colIdx && ui.mode == ModeColumnSelect {\n\t\tfg = HILITE_FG\n\t\tbg = HILITE_BG\n\t}\n\n\tif colOpts.collapsed {\n\t\tx = writeString(x, y, fg, bg, \"…\")\n\t} else if !colOpts.expanded && len(cell) < MAX_CELL_WIDTH {\n\t\tpadded := fmt.Sprintf(cellFmtString, cell)\n\t\tx = writeString(x, y, fg, bg, padded)\n\t} else if !colOpts.expanded && !lastCol {\n\t\twidth := clamp(len(cell)-1, 0, MAX_CELL_WIDTH-1)\n\t\tx = writeString(x, y, fg, bg, cell[:width])\n\t\tx = writeString(x, y, fg, bg, \"…\")\n\t} else {\n\t\twriteString(x, y, fg, bg, cell)\n\t\tx += colOpts.width\n\t}\n\n\t\/\/ Draw separator if this isn't the last element\n\tif index != len(ui.columns)-1 {\n\t\tx = writeString(x, y, termbox.ColorRed, termbox.ColorDefault, \" │ \")\n\t}\n\n\treturn x\n}\n\nfunc (ui *UI) writePinned(y int, fg, bg termbox.Attribute, row []string) int {\n\t\/\/ ignore our view offsets\n\tpinnedBounds = 0\n\n\tfor i, cell := range row {\n\t\tcolOpts := ui.columnOpts[i]\n\n\t\tif colOpts.pinned {\n\t\t\tpinnedBounds = ui.writeCell(cell, pinnedBounds, y, i, fg, bg)\n\t\t}\n\t}\n\n\treturn pinnedBounds\n}\n\nfunc (ui *UI) writeColumns(x, y int) {\n\tvar fg, bg termbox.Attribute\n\n\tx += ui.writePinned(y, termbox.ColorWhite, termbox.ColorDefault, ui.columns)\n\n\tfor i, col := range ui.columns {\n\t\tcolOpts := ui.columnOpts[i]\n\n\t\tfg = termbox.ColorBlack | termbox.AttrBold\n\t\tbg = termbox.ColorWhite\n\n\t\tif !colOpts.pinned {\n\t\t\tx = ui.writeCell(col, x, y, i, fg, bg)\n\t\t}\n\t}\n\n\tendOfLinePosition = x\n}\n\nfunc (ui *UI) writeRow(x, y int, row []string) {\n\tfg := termbox.ColorDefault\n\n\tif ui.zebraStripe && y%2 == 0 {\n\t\tfg = termbox.ColorMagenta\n\t}\n\n\tx += ui.writePinned(y, termbox.ColorCyan, termbox.ColorBlack, row)\n\n\tfor i, _ := range ui.columns {\n\t\tcolOpts := ui.columnOpts[i]\n\n\t\tif !colOpts.pinned {\n\t\t\tx = ui.writeCell(row[i], x, y, i, fg, termbox.ColorDefault)\n\t\t}\n\t}\n}\n\ntype columnOptions struct {\n\texpanded bool\n\tcollapsed bool\n\tpinned bool\n\twidth int\n}\n\ntype UI struct {\n\tmode inputMode\n\trowIdx, colIdx int \/\/ Selection control\n\toffsetX, offsetY int \/\/ Pan control\n\tfilterString string\n\tzebraStripe bool\n\tcolumnOpts []columnOptions\n\tcolumns []string\n\trows [][]string\n\twidth int\n}\n\nfunc NewUi(data TabularData) UI {\n\tcolOpts := make([]columnOptions, len(data.Columns))\n\tcolumns := make([]string, len(data.Columns))\n\n\tfor i, col := range data.Columns {\n\t\tcolumns[i] = col.Name\n\t\tcolOpts[i] = columnOptions{\n\t\t\texpanded: col.Width < MAX_CELL_WIDTH,\n\t\t\tcollapsed: false,\n\t\t\tpinned: false,\n\t\t\twidth: col.Width,\n\t\t}\n\t}\n\n\treturn UI{\n\t\toffsetX: 0,\n\t\toffsetY: 0,\n\t\tmode: ModeDefault,\n\t\tcolIdx: -1,\n\t\tcolumnOpts: colOpts,\n\t\trows: data.Rows,\n\t\tcolumns: columns,\n\t\twidth: data.Width,\n\t\tzebraStripe: false,\n\t}\n}\n\nfunc (ui *UI) Init() error {\n\tif err := termbox.Init(); err != nil {\n\t\treturn err\n\t}\n\n\ttermbox.SetInputMode(termbox.InputEsc | termbox.InputMouse)\n\n\treturn nil\n}\n\nfunc (ui *UI) Loop() {\n\tdefer termbox.Close()\n\n\tui.repaint()\n\neventloop:\n\tfor {\n\t\tswitch ev := termbox.PollEvent(); ev.Type {\n\t\tcase termbox.EventKey:\n\t\t\tif ev.Key == termbox.KeyCtrlC {\n\t\t\t\tbreak eventloop\n\t\t\t}\n\n\t\t\tswitch ui.mode {\n\t\t\tcase ModeFilter:\n\t\t\t\tui.handleKeyFilter(ev)\n\t\t\tcase ModeColumnSelect:\n\t\t\t\tui.handleKeyColumnSelect(ev)\n\t\t\tdefault:\n\t\t\t\tui.handleKeyDefault(ev)\n\t\t\t}\n\t\t}\n\n\t\tui.repaint()\n\t}\n}\n\n\/\/ Return indices of rows to display\nfunc (ui *UI) filterRows(num int) []int {\n\trows := make([]int, 0, num)\n\n\t\/\/ fast pass\n\tif ui.filterString == \"\" {\n\t\tfor i := 0; i < num; i += 1 {\n\t\t\tif i+ui.offsetY >= len(ui.rows) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\trows = append(rows, i+ui.offsetY)\n\t\t}\n\t} else {\n\t\tfor i := 0; i < num; i += 1 {\n\t\t\tif i+ui.offsetY >= len(ui.rows) {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tfor _, col := range ui.rows[i+ui.offsetY] {\n\t\t\t\tif strings.Contains(col, ui.filterString) {\n\t\t\t\t\trows = append(rows, i+ui.offsetY)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn rows\n}\n\nfunc (ui *UI) repaint() {\n\ttermbox.Clear(termbox.ColorDefault, termbox.ColorDefault)\n\ttermbox.HideCursor()\n\t_, height := termbox.Size()\n\n\tconst coldef = termbox.ColorDefault\n\n\tui.writeColumns(ui.offsetX+0, 0)\n\n\trowIdx := ui.filterRows(height - 2)\n\n\tfor i := 0; i < height-2; i += 1 {\n\t\tif i < len(rowIdx) {\n\t\t\tui.writeRow(ui.offsetX+0, i+1, ui.rows[rowIdx[i]])\n\t\t} else {\n\t\t\twriteLine(0, i+1, termbox.ColorWhite|termbox.AttrBold, termbox.ColorBlack, \"~\")\n\t\t}\n\t}\n\n\tswitch ui.mode {\n\tcase ModeFilter:\n\t\text := \"\"\n\t\tif len(rowIdx) == height-2 {\n\t\t\text = \"+\"\n\t\t}\n\t\tline := fmt.Sprintf(\"FILTER [%d%s matches]: %s\", len(rowIdx), ext, ui.filterString)\n\t\twriteLine(0, height-1, termbox.ColorWhite|termbox.AttrBold, termbox.ColorDefault, line)\n\t\ttermbox.SetCursor(len(line), height-1)\n\tcase ModeColumnSelect:\n\t\tline := \"COLUMN SELECT (^g quit) [\" + ui.columns[ui.colIdx] + \"]\"\n\t\twriteLine(0, height-1, termbox.ColorWhite|termbox.AttrBold, termbox.ColorDefault, line)\n\tdefault:\n\t\tfirst := 0\n\t\tlast := 0\n\t\ttotal := len(ui.rows) - 1\n\t\tfilter := \"\"\n\n\t\tif len(rowIdx) >= 2 {\n\t\t\tfirst = rowIdx[0]\n\t\t\tlast = rowIdx[len(rowIdx)-1]\n\t\t}\n\n\t\tif ui.filterString != \"\" {\n\t\t\tfilter = fmt.Sprintf(\"[filter: \\\"%s\\\"] \", ui.filterString)\n\t\t}\n\n\t\tline := fmt.Sprintf(\"%s[rows %d-%d of %d] :\", filter, first, last, total)\n\t\twriteLine(0, height-1, termbox.ColorDefault, termbox.ColorDefault, line)\n\t}\n\n\ttermbox.Flush()\n}\n\nfunc (ui *UI) handleKeyFilter(ev termbox.Event) {\n\t\/\/ Ch == 0 implies this was a special key\n\tif ev.Ch == 0 && ev.Key != termbox.KeySpace {\n\t\tif ev.Key == termbox.KeyEsc || ev.Key == termbox.KeyCtrlG || ev.Key == termbox.KeyEnter {\n\t\t\tui.mode = ModeDefault\n\t\t} else if ev.Key == termbox.KeyDelete || ev.Key == termbox.KeyBackspace ||\n\t\t\tev.Key == termbox.KeyBackspace2 {\n\t\t\tif sz := len(ui.filterString); sz > 0 {\n\t\t\t\tui.filterString = ui.filterString[:sz-1]\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Fallback to default handling for arrows etc\n\t\t\tui.handleKeyDefault(ev)\n\t\t}\n\t\treturn\n\t}\n\n\tif ev.Key == termbox.KeySpace {\n\t\tui.filterString += \" \"\n\t} else {\n\t\tui.filterString += string(ev.Ch)\n\t}\n\n\tui.offsetY = 0\n}\n\nvar globalExpanded = false\nvar endOfLinePosition = 0\n\nfunc (ui *UI) handleKeyColumnSelect(ev termbox.Event) {\n\tswitch {\n\tcase ev.Key == termbox.KeyArrowRight:\n\t\tui.colIdx = clamp(ui.colIdx+1, 0, len(ui.columns)-1)\n\tcase ev.Key == termbox.KeyArrowLeft:\n\t\tui.colIdx = clamp(ui.colIdx-1, 0, len(ui.columns)-1)\n\tcase ev.Ch == 'w':\n\t\tui.columnOpts[ui.colIdx].collapsed = !ui.columnOpts[ui.colIdx].collapsed\n\tcase ev.Ch == 'x':\n\t\tui.columnOpts[ui.colIdx].expanded = !ui.columnOpts[ui.colIdx].expanded\n\t\tif ui.columnOpts[ui.colIdx].expanded {\n\t\t\tui.columnOpts[ui.colIdx].collapsed = false\n\t\t}\n\tcase ev.Ch == '.':\n\t\tui.columnOpts[ui.colIdx].pinned = !ui.columnOpts[ui.colIdx].pinned\n\n\t\tif ui.columnOpts[ui.colIdx].pinned {\n\t\t\tui.columnOpts[ui.colIdx].collapsed = false\n\t\t}\n\n\tcase ev.Key == termbox.KeyCtrlG, ev.Key == termbox.KeyEsc:\n\t\tui.mode = ModeDefault\n\tdefault:\n\t\tui.handleKeyDefault(ev)\n\t}\n\n\t\/\/ find if we've gone off screen and readjust\n\t\/\/ TODO: this bit is buggy\n\tcursorPosition := 0\n\tfor i, _ := range ui.columns {\n\t\tcolOpts := ui.columnOpts[i]\n\n\t\tif i == ui.colIdx {\n\t\t\tbreak\n\t\t}\n\t\t\/\/cursorPosition += 3\n\t\tif !colOpts.collapsed {\n\t\t\tcursorPosition += colOpts.width\n\t\t}\n\t}\n\n\twidth, _ := termbox.Size()\n\tif cursorPosition > width-ui.offsetX || cursorPosition < -ui.offsetX {\n\t\tui.offsetX = -cursorPosition\n\t}\n}\n\nfunc (ui *UI) handleKeyDefault(ev termbox.Event) {\n\tswitch {\n\tcase ev.Key == termbox.KeyCtrlA:\n\t\tui.offsetX = 0\n\tcase ev.Key == termbox.KeyCtrlE:\n\t\t\/\/ FIXME: this is buggy\n\t\tw, _ := termbox.Size()\n\t\tui.offsetX = -endOfLinePosition + w\n\tcase ev.Key == termbox.KeyArrowRight:\n\t\tui.offsetX = clamp(ui.offsetX-5, -endOfLinePosition, 0)\n\tcase ev.Key == termbox.KeyArrowLeft:\n\t\tui.offsetX = clamp(ui.offsetX+5, -endOfLinePosition, 0)\n\tcase ev.Key == termbox.KeyArrowUp:\n\t\tui.offsetY = clamp(ui.offsetY-1, 0, len(ui.rows))\n\tcase ev.Key == termbox.KeyArrowDown:\n\t\tui.offsetY = clamp(ui.offsetY+1, 0, len(ui.rows))\n\tcase ev.Ch == '\/', ev.Key == termbox.KeyCtrlR:\n\t\tui.mode = ModeFilter\n\t\tui.filterString = \"\"\n\t\tui.offsetY = 0\n\tcase ev.Ch == 'C':\n\t\tui.mode = ModeColumnSelect\n\t\tui.offsetX = 0\n\t\tui.colIdx = 0\n\tcase ev.Ch == 'G':\n\t\t_, height := termbox.Size()\n\t\tui.offsetY = len(ui.rows) - (height - 3)\n\tcase ev.Ch == 'g':\n\t\tui.offsetY = 0\n\tcase ev.Ch == 'Z':\n\t\tui.zebraStripe = !ui.zebraStripe\n\tcase ev.Ch == 'X':\n\t\tfor i, _ := range ui.columnOpts {\n\t\t\tui.columnOpts[i].expanded = !globalExpanded\n\t\t\t\/\/ FIXME: Possibly not the best behavior\n\t\t\tui.columnOpts[i].collapsed = false\n\t\t}\n\t\tglobalExpanded = !globalExpanded\n\n\tcase ui.mode == ModeDefault && ev.Ch == 'q':\n\t\tpanic(\"TODO: real exit\")\n\t}\n}\n<commit_msg>Fix pinned column selection behavior<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/nsf\/termbox-go\"\n)\n\nconst MAX_CELL_WIDTH = 20\nconst HILITE_FG = termbox.ColorBlack | termbox.AttrBold\nconst HILITE_BG = termbox.ColorWhite\n\ntype inputMode int\n\nconst (\n\tModeDefault = iota\n\tModeFilter\n\tModeColumnSelect\n\tModeRowSelect\n)\n\n\/\/ It is so dumb that go doesn't have this\nfunc clamp(val, lo, hi int) int {\n\tif val <= lo {\n\t\treturn lo\n\t} else if val >= hi {\n\t\treturn hi\n\t}\n\n\treturn val\n}\n\nvar pinnedBounds = 0\n\nfunc writeString(x, y int, fg, bg termbox.Attribute, msg string) int {\n\tfor _, c := range msg {\n\t\tif x >= pinnedBounds {\n\t\t\ttermbox.SetCell(x, y, c, fg, bg)\n\t\t}\n\t\tx += 1\n\t}\n\treturn x\n}\n\nfunc writeLine(x, y int, fg, bg termbox.Attribute, line string) {\n\twidth, _ := termbox.Size()\n\tfor _, c := range line {\n\t\ttermbox.SetCell(x, y, c, fg, bg)\n\t\tx += 1\n\t}\n\tfor i := x; i < width; i += 1 {\n\t\ttermbox.SetCell(x+i, y, ' ', fg, bg)\n\t}\n}\n\nvar cellFmtString = \"%-\" + strconv.Itoa(MAX_CELL_WIDTH) + \"s\"\n\nfunc (ui *UI) writeCell(cell string, x, y, index int, fg, bg termbox.Attribute) int {\n\tcolOpts := ui.columnOpts[index]\n\tlastCol := index == len(ui.columnOpts)-1\n\n\tif index == ui.colIdx && ui.mode == ModeColumnSelect {\n\t\tfg = HILITE_FG\n\t\tbg = HILITE_BG\n\t}\n\n\tif colOpts.collapsed {\n\t\tx = writeString(x, y, fg, bg, \"…\")\n\t} else if !colOpts.expanded && len(cell) < MAX_CELL_WIDTH {\n\t\tpadded := fmt.Sprintf(cellFmtString, cell)\n\t\tx = writeString(x, y, fg, bg, padded)\n\t} else if !colOpts.expanded && !lastCol {\n\t\twidth := clamp(len(cell)-1, 0, MAX_CELL_WIDTH-1)\n\t\tx = writeString(x, y, fg, bg, cell[:width])\n\t\tx = writeString(x, y, fg, bg, \"…\")\n\t} else {\n\t\twriteString(x, y, fg, bg, cell)\n\t\tx += colOpts.width\n\t}\n\n\t\/\/ Draw separator if this isn't the last element\n\tif index != len(ui.columns)-1 {\n\t\tx = writeString(x, y, termbox.ColorRed, termbox.ColorDefault, \" │ \")\n\t}\n\n\treturn x\n}\n\nfunc (ui *UI) writePinned(y int, fg, bg termbox.Attribute, row []string) int {\n\t\/\/ ignore our view offsets\n\tpinnedBounds = 0\n\n\tfor i, cell := range row {\n\t\tcolOpts := ui.columnOpts[i]\n\n\t\tif colOpts.pinned {\n\t\t\tpinnedBounds = ui.writeCell(cell, pinnedBounds, y, i, fg, bg)\n\t\t}\n\t}\n\n\treturn pinnedBounds\n}\n\nfunc (ui *UI) writeColumns(x, y int) {\n\tvar fg, bg termbox.Attribute\n\n\tx += ui.writePinned(y, termbox.ColorWhite, termbox.ColorDefault, ui.columns)\n\n\tfor i, col := range ui.columns {\n\t\tcolOpts := ui.columnOpts[i]\n\n\t\tfg = termbox.ColorBlack | termbox.AttrBold\n\t\tbg = termbox.ColorWhite\n\n\t\tif !colOpts.pinned {\n\t\t\tx = ui.writeCell(col, x, y, i, fg, bg)\n\t\t}\n\t}\n\n\tendOfLinePosition = x\n}\n\nfunc (ui *UI) writeRow(x, y int, row []string) {\n\tfg := termbox.ColorDefault\n\n\tif ui.zebraStripe && y%2 == 0 {\n\t\tfg = termbox.ColorMagenta\n\t}\n\n\tx += ui.writePinned(y, termbox.ColorCyan, termbox.ColorBlack, row)\n\n\tfor i, _ := range ui.columns {\n\t\tcolOpts := ui.columnOpts[i]\n\n\t\tif !colOpts.pinned {\n\t\t\tx = ui.writeCell(row[i], x, y, i, fg, termbox.ColorDefault)\n\t\t}\n\t}\n}\n\ntype columnOptions struct {\n\texpanded bool\n\tcollapsed bool\n\tpinned bool\n\twidth int\n}\n\ntype UI struct {\n\tmode inputMode\n\trowIdx, colIdx int \/\/ Selection control\n\toffsetX, offsetY int \/\/ Pan control\n\tfilterString string\n\tzebraStripe bool\n\tcolumnOpts []columnOptions\n\tcolumns []string\n\trows [][]string\n\twidth int\n}\n\nfunc NewUi(data TabularData) UI {\n\tcolOpts := make([]columnOptions, len(data.Columns))\n\tcolumns := make([]string, len(data.Columns))\n\n\tfor i, col := range data.Columns {\n\t\tcolumns[i] = col.Name\n\t\tcolOpts[i] = columnOptions{\n\t\t\texpanded: col.Width < MAX_CELL_WIDTH,\n\t\t\tcollapsed: false,\n\t\t\tpinned: false,\n\t\t\twidth: col.Width,\n\t\t}\n\t}\n\n\treturn UI{\n\t\toffsetX: 0,\n\t\toffsetY: 0,\n\t\tmode: ModeDefault,\n\t\tcolIdx: -1,\n\t\tcolumnOpts: colOpts,\n\t\trows: data.Rows,\n\t\tcolumns: columns,\n\t\twidth: data.Width,\n\t\tzebraStripe: false,\n\t}\n}\n\nfunc (ui *UI) Init() error {\n\tif err := termbox.Init(); err != nil {\n\t\treturn err\n\t}\n\n\ttermbox.SetInputMode(termbox.InputEsc | termbox.InputMouse)\n\n\treturn nil\n}\n\nfunc (ui *UI) Loop() {\n\tdefer termbox.Close()\n\n\tui.repaint()\n\neventloop:\n\tfor {\n\t\tswitch ev := termbox.PollEvent(); ev.Type {\n\t\tcase termbox.EventKey:\n\t\t\tif ev.Key == termbox.KeyCtrlC {\n\t\t\t\tbreak eventloop\n\t\t\t}\n\n\t\t\tswitch ui.mode {\n\t\t\tcase ModeFilter:\n\t\t\t\tui.handleKeyFilter(ev)\n\t\t\tcase ModeColumnSelect:\n\t\t\t\tui.handleKeyColumnSelect(ev)\n\t\t\tdefault:\n\t\t\t\tui.handleKeyDefault(ev)\n\t\t\t}\n\t\t}\n\n\t\tui.repaint()\n\t}\n}\n\n\/\/ Return indices of rows to display\nfunc (ui *UI) filterRows(num int) []int {\n\trows := make([]int, 0, num)\n\n\t\/\/ fast pass\n\tif ui.filterString == \"\" {\n\t\tfor i := 0; i < num; i += 1 {\n\t\t\tif i+ui.offsetY >= len(ui.rows) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\trows = append(rows, i+ui.offsetY)\n\t\t}\n\t} else {\n\t\tfor i := 0; i < num; i += 1 {\n\t\t\tif i+ui.offsetY >= len(ui.rows) {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tfor _, col := range ui.rows[i+ui.offsetY] {\n\t\t\t\tif strings.Contains(col, ui.filterString) {\n\t\t\t\t\trows = append(rows, i+ui.offsetY)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn rows\n}\n\nfunc (ui *UI) repaint() {\n\ttermbox.Clear(termbox.ColorDefault, termbox.ColorDefault)\n\ttermbox.HideCursor()\n\t_, height := termbox.Size()\n\n\tconst coldef = termbox.ColorDefault\n\n\tui.writeColumns(ui.offsetX+0, 0)\n\n\trowIdx := ui.filterRows(height - 2)\n\n\tfor i := 0; i < height-2; i += 1 {\n\t\tif i < len(rowIdx) {\n\t\t\tui.writeRow(ui.offsetX+0, i+1, ui.rows[rowIdx[i]])\n\t\t} else {\n\t\t\twriteLine(0, i+1, termbox.ColorWhite|termbox.AttrBold, termbox.ColorBlack, \"~\")\n\t\t}\n\t}\n\n\tswitch ui.mode {\n\tcase ModeFilter:\n\t\text := \"\"\n\t\tif len(rowIdx) == height-2 {\n\t\t\text = \"+\"\n\t\t}\n\t\tline := fmt.Sprintf(\"FILTER [%d%s matches]: %s\", len(rowIdx), ext, ui.filterString)\n\t\twriteLine(0, height-1, termbox.ColorWhite|termbox.AttrBold, termbox.ColorDefault, line)\n\t\ttermbox.SetCursor(len(line), height-1)\n\tcase ModeColumnSelect:\n\t\tline := \"COLUMN SELECT (^g quit) [\" + ui.columns[ui.colIdx] + \"]\"\n\t\twriteLine(0, height-1, termbox.ColorWhite|termbox.AttrBold, termbox.ColorDefault, line)\n\tdefault:\n\t\tfirst := 0\n\t\tlast := 0\n\t\ttotal := len(ui.rows) - 1\n\t\tfilter := \"\"\n\n\t\tif len(rowIdx) >= 2 {\n\t\t\tfirst = rowIdx[0]\n\t\t\tlast = rowIdx[len(rowIdx)-1]\n\t\t}\n\n\t\tif ui.filterString != \"\" {\n\t\t\tfilter = fmt.Sprintf(\"[filter: \\\"%s\\\"] \", ui.filterString)\n\t\t}\n\n\t\tline := fmt.Sprintf(\"%s[rows %d-%d of %d] :\", filter, first, last, total)\n\t\twriteLine(0, height-1, termbox.ColorDefault, termbox.ColorDefault, line)\n\t}\n\n\ttermbox.Flush()\n}\n\nfunc (ui *UI) handleKeyFilter(ev termbox.Event) {\n\t\/\/ Ch == 0 implies this was a special key\n\tif ev.Ch == 0 && ev.Key != termbox.KeySpace {\n\t\tif ev.Key == termbox.KeyEsc || ev.Key == termbox.KeyCtrlG || ev.Key == termbox.KeyEnter {\n\t\t\tui.mode = ModeDefault\n\t\t} else if ev.Key == termbox.KeyDelete || ev.Key == termbox.KeyBackspace ||\n\t\t\tev.Key == termbox.KeyBackspace2 {\n\t\t\tif sz := len(ui.filterString); sz > 0 {\n\t\t\t\tui.filterString = ui.filterString[:sz-1]\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Fallback to default handling for arrows etc\n\t\t\tui.handleKeyDefault(ev)\n\t\t}\n\t\treturn\n\t}\n\n\tif ev.Key == termbox.KeySpace {\n\t\tui.filterString += \" \"\n\t} else {\n\t\tui.filterString += string(ev.Ch)\n\t}\n\n\tui.offsetY = 0\n}\n\nvar globalExpanded = false\nvar endOfLinePosition = 0\n\nfunc (ui *UI) findNextColumn(current, direction int) int {\n\tisPinned := ui.columnOpts[current].pinned\n\n\t\/\/ if pinned, find the next pinned col, or vice versa for unpinned\n\tfor i := current + direction; i >= 0 && i < len(ui.columns); i += direction {\n\t\tif ui.columnOpts[i].pinned == isPinned {\n\t\t\treturn i\n\t\t}\n\t}\n\n\t\/\/ Don't fall off the end\n\tif isPinned && direction < 0 || !isPinned && direction > 0 {\n\t\treturn current\n\t}\n\n\t\/\/ there are no remaining pinned \/ unpinned, just find next col\n\ti := 0\n\tif direction < 0 {\n\t\ti = len(ui.columns) - 1\n\t}\n\tfor ; i >= 0 && i < len(ui.columns); i += direction {\n\t\tif (isPinned && !ui.columnOpts[i].pinned) || (!isPinned && ui.columnOpts[i].pinned) {\n\t\t\treturn i\n\t\t}\n\t}\n\n\treturn current\n}\n\nfunc (ui *UI) handleKeyColumnSelect(ev termbox.Event) {\n\tswitch {\n\tcase ev.Key == termbox.KeyArrowRight:\n\t\tnext := ui.findNextColumn(ui.colIdx, 1)\n\t\tui.colIdx = clamp(next, 0, len(ui.columns)-1)\n\n\tcase ev.Key == termbox.KeyArrowLeft:\n\t\tnext := ui.findNextColumn(ui.colIdx, -1)\n\t\tui.colIdx = clamp(next, 0, len(ui.columns)-1)\n\n\tcase ev.Ch == 'w':\n\t\tui.columnOpts[ui.colIdx].collapsed = !ui.columnOpts[ui.colIdx].collapsed\n\tcase ev.Ch == 'x':\n\t\tui.columnOpts[ui.colIdx].expanded = !ui.columnOpts[ui.colIdx].expanded\n\t\tif ui.columnOpts[ui.colIdx].expanded {\n\t\t\tui.columnOpts[ui.colIdx].collapsed = false\n\t\t}\n\tcase ev.Ch == '.':\n\t\tui.columnOpts[ui.colIdx].pinned = !ui.columnOpts[ui.colIdx].pinned\n\n\t\tif ui.columnOpts[ui.colIdx].pinned {\n\t\t\tui.columnOpts[ui.colIdx].collapsed = false\n\t\t}\n\n\tcase ev.Key == termbox.KeyCtrlG, ev.Key == termbox.KeyEsc:\n\t\tui.mode = ModeDefault\n\tdefault:\n\t\tui.handleKeyDefault(ev)\n\t}\n\n\t\/\/ find if we've gone off screen and readjust\n\t\/\/ TODO: this bit is buggy\n\tcursorPosition := 0\n\tfor i, _ := range ui.columns {\n\t\tcolOpts := ui.columnOpts[i]\n\n\t\tif i == ui.colIdx {\n\t\t\tbreak\n\t\t}\n\t\t\/\/cursorPosition += 3\n\t\tif !colOpts.collapsed {\n\t\t\tcursorPosition += colOpts.width\n\t\t}\n\t}\n\n\twidth, _ := termbox.Size()\n\tif cursorPosition > width-ui.offsetX || cursorPosition < -ui.offsetX {\n\t\tui.offsetX = -cursorPosition\n\t}\n}\n\nfunc (ui *UI) handleKeyDefault(ev termbox.Event) {\n\tswitch {\n\tcase ev.Key == termbox.KeyCtrlA:\n\t\tui.offsetX = 0\n\tcase ev.Key == termbox.KeyCtrlE:\n\t\t\/\/ FIXME: this is buggy\n\t\tw, _ := termbox.Size()\n\t\tui.offsetX = -endOfLinePosition + w\n\tcase ev.Key == termbox.KeyArrowRight:\n\t\tui.offsetX = clamp(ui.offsetX-5, -endOfLinePosition, 0)\n\tcase ev.Key == termbox.KeyArrowLeft:\n\t\tui.offsetX = clamp(ui.offsetX+5, -endOfLinePosition, 0)\n\tcase ev.Key == termbox.KeyArrowUp:\n\t\tui.offsetY = clamp(ui.offsetY-1, 0, len(ui.rows))\n\tcase ev.Key == termbox.KeyArrowDown:\n\t\tui.offsetY = clamp(ui.offsetY+1, 0, len(ui.rows))\n\tcase ev.Ch == '\/', ev.Key == termbox.KeyCtrlR:\n\t\tui.mode = ModeFilter\n\t\tui.filterString = \"\"\n\t\tui.offsetY = 0\n\tcase ev.Ch == 'C':\n\t\tui.mode = ModeColumnSelect\n\t\tui.offsetX = 0\n\t\tui.colIdx = 0\n\tcase ev.Ch == 'G':\n\t\t_, height := termbox.Size()\n\t\tui.offsetY = len(ui.rows) - (height - 3)\n\tcase ev.Ch == 'g':\n\t\tui.offsetY = 0\n\tcase ev.Ch == 'Z':\n\t\tui.zebraStripe = !ui.zebraStripe\n\tcase ev.Ch == 'X':\n\t\tfor i, _ := range ui.columnOpts {\n\t\t\tui.columnOpts[i].expanded = !globalExpanded\n\t\t\t\/\/ FIXME: Possibly not the best behavior\n\t\t\tui.columnOpts[i].collapsed = false\n\t\t}\n\t\tglobalExpanded = !globalExpanded\n\n\tcase ui.mode == ModeDefault && ev.Ch == 'q':\n\t\tpanic(\"TODO: real exit\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>add nats connection error logging<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>[GC] Fix werf-host-cleanup default percentage and margin values<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage azure\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\toperatorMetrics \"github.com\/cilium\/cilium\/operator\/metrics\"\n\toperatorOption \"github.com\/cilium\/cilium\/operator\/option\"\n\tapiMetrics \"github.com\/cilium\/cilium\/pkg\/api\/metrics\"\n\tazureAPI \"github.com\/cilium\/cilium\/pkg\/azure\/api\"\n\tazureIPAM \"github.com\/cilium\/cilium\/pkg\/azure\/ipam\"\n\t\"github.com\/cilium\/cilium\/pkg\/ipam\"\n\t\"github.com\/cilium\/cilium\/pkg\/ipam\/allocator\"\n\tipamMetrics \"github.com\/cilium\/cilium\/pkg\/ipam\/metrics\"\n\t\"github.com\/cilium\/cilium\/pkg\/logging\"\n\t\"github.com\/cilium\/cilium\/pkg\/logging\/logfields\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\nvar log = logging.DefaultLogger.WithField(logfields.LogSubsys, \"ipam-allocator-azure\")\n\n\/\/ AllocatorAzure is an implementation of IPAM allocator interface for Azure\ntype AllocatorAzure struct{}\n\n\/\/ Init in Azure implementation doesn't need to do anything\nfunc (*AllocatorAzure) Init() error { return nil }\n\n\/\/ Start kicks of the Azure IP allocation\nfunc (*AllocatorAzure) Start(getterUpdater ipam.CiliumNodeGetterUpdater) (allocator.NodeEventHandler, error) {\n\n\tvar (\n\t\tazMetrics azureAPI.MetricsAPI\n\t\tiMetrics ipam.MetricsAPI\n\t)\n\n\tlog.Info(\"Starting Azure IP allocator...\")\n\n\tsubscriptionID := operatorOption.Config.AzureSubscriptionID\n\tif subscriptionID == \"\" {\n\t\tlog.Debug(\"SubscriptionID was not specified via CLI, retrieving it via Azure IMS\")\n\t\tsubID, err := azureAPI.GetSubscriptionID(context.TODO())\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"Azure subscription ID was not specified via CLI and retrieving it from the Azure IMS was not possible\")\n\t\t}\n\t\tsubscriptionID = subID\n\t\tlog.WithField(\"subscriptionID\", subscriptionID).Debug(\"Detected subscriptionID via Azure IMS\")\n\t}\n\n\tresourceGroupName := operatorOption.Config.AzureResourceGroup\n\tif resourceGroupName == \"\" {\n\t\tlog.Debug(\"ResourceGroupName was not specified via CLI, retrieving it via Azure IMS\")\n\t\trgName, err := azureAPI.GetResourceGroupName(context.TODO())\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"Azure resource group name was not specified via CLI and retrieving it from the Azure IMS was not possible\")\n\t\t}\n\t\tresourceGroupName = rgName\n\t\tlog.WithField(\"resourceGroupName\", resourceGroupName).Debug(\"Detected resource group name via Azure IMS\")\n\t}\n\n\tif operatorOption.Config.EnableMetrics {\n\t\tazMetrics = apiMetrics.NewPrometheusMetrics(operatorMetrics.Namespace, \"azure\", operatorMetrics.Registry)\n\t\tiMetrics = ipamMetrics.NewPrometheusMetrics(operatorMetrics.Namespace, operatorMetrics.Registry)\n\t} else {\n\t\tazMetrics = &apiMetrics.NoOpMetrics{}\n\t\tiMetrics = &ipamMetrics.NoOpMetrics{}\n\t}\n\n\tazureClient, err := azureAPI.NewClient(subscriptionID, resourceGroupName, azMetrics, operatorOption.Config.IPAMAPIQPSLimit, operatorOption.Config.IPAMAPIBurst)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create Azure client: %w\", err)\n\t}\n\tinstances := azureIPAM.NewInstancesManager(azureClient)\n\tnodeManager, err := ipam.NewNodeManager(instances, getterUpdater, iMetrics, operatorOption.Config.ParallelAllocWorkers, false)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to initialize Azure node manager: %w\", err)\n\t}\n\n\tif err := nodeManager.Start(context.TODO()); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nodeManager, nil\n}\n<commit_msg>ipam: use fmt.Errorf instead of github.com\/pkg\/errors<commit_after>\/\/ Copyright 2020 Authors of Cilium\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage azure\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\toperatorMetrics \"github.com\/cilium\/cilium\/operator\/metrics\"\n\toperatorOption \"github.com\/cilium\/cilium\/operator\/option\"\n\tapiMetrics \"github.com\/cilium\/cilium\/pkg\/api\/metrics\"\n\tazureAPI \"github.com\/cilium\/cilium\/pkg\/azure\/api\"\n\tazureIPAM \"github.com\/cilium\/cilium\/pkg\/azure\/ipam\"\n\t\"github.com\/cilium\/cilium\/pkg\/ipam\"\n\t\"github.com\/cilium\/cilium\/pkg\/ipam\/allocator\"\n\tipamMetrics \"github.com\/cilium\/cilium\/pkg\/ipam\/metrics\"\n\t\"github.com\/cilium\/cilium\/pkg\/logging\"\n\t\"github.com\/cilium\/cilium\/pkg\/logging\/logfields\"\n)\n\nvar log = logging.DefaultLogger.WithField(logfields.LogSubsys, \"ipam-allocator-azure\")\n\n\/\/ AllocatorAzure is an implementation of IPAM allocator interface for Azure\ntype AllocatorAzure struct{}\n\n\/\/ Init in Azure implementation doesn't need to do anything\nfunc (*AllocatorAzure) Init() error { return nil }\n\n\/\/ Start kicks of the Azure IP allocation\nfunc (*AllocatorAzure) Start(getterUpdater ipam.CiliumNodeGetterUpdater) (allocator.NodeEventHandler, error) {\n\n\tvar (\n\t\tazMetrics azureAPI.MetricsAPI\n\t\tiMetrics ipam.MetricsAPI\n\t)\n\n\tlog.Info(\"Starting Azure IP allocator...\")\n\n\tsubscriptionID := operatorOption.Config.AzureSubscriptionID\n\tif subscriptionID == \"\" {\n\t\tlog.Debug(\"SubscriptionID was not specified via CLI, retrieving it via Azure IMS\")\n\t\tsubID, err := azureAPI.GetSubscriptionID(context.TODO())\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Azure subscription ID was not specified via CLI and retrieving it from the Azure IMS was not possible: %w\", err)\n\t\t}\n\t\tsubscriptionID = subID\n\t\tlog.WithField(\"subscriptionID\", subscriptionID).Debug(\"Detected subscriptionID via Azure IMS\")\n\t}\n\n\tresourceGroupName := operatorOption.Config.AzureResourceGroup\n\tif resourceGroupName == \"\" {\n\t\tlog.Debug(\"ResourceGroupName was not specified via CLI, retrieving it via Azure IMS\")\n\t\trgName, err := azureAPI.GetResourceGroupName(context.TODO())\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Azure resource group name was not specified via CLI and retrieving it from the Azure IMS was not possible: %w\", err)\n\t\t}\n\t\tresourceGroupName = rgName\n\t\tlog.WithField(\"resourceGroupName\", resourceGroupName).Debug(\"Detected resource group name via Azure IMS\")\n\t}\n\n\tif operatorOption.Config.EnableMetrics {\n\t\tazMetrics = apiMetrics.NewPrometheusMetrics(operatorMetrics.Namespace, \"azure\", operatorMetrics.Registry)\n\t\tiMetrics = ipamMetrics.NewPrometheusMetrics(operatorMetrics.Namespace, operatorMetrics.Registry)\n\t} else {\n\t\tazMetrics = &apiMetrics.NoOpMetrics{}\n\t\tiMetrics = &ipamMetrics.NoOpMetrics{}\n\t}\n\n\tazureClient, err := azureAPI.NewClient(subscriptionID, resourceGroupName, azMetrics, operatorOption.Config.IPAMAPIQPSLimit, operatorOption.Config.IPAMAPIBurst)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create Azure client: %w\", err)\n\t}\n\tinstances := azureIPAM.NewInstancesManager(azureClient)\n\tnodeManager, err := ipam.NewNodeManager(instances, getterUpdater, iMetrics, operatorOption.Config.ParallelAllocWorkers, false)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to initialize Azure node manager: %w\", err)\n\t}\n\n\tif err := nodeManager.Start(context.TODO()); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn nodeManager, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package yang\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/openconfig\/goyang\/pkg\/yang\"\n\t\"github.com\/oshothebig\/pbast\"\n)\n\nvar builtinMap = map[yang.TypeKind]pbast.Type{\n\tyang.Yint8: pbast.Int32,\n\tyang.Yint16: pbast.Int32,\n\tyang.Yint32: pbast.Int32,\n\tyang.Yint64: pbast.Int64,\n\tyang.Yuint8: pbast.UInt32,\n\tyang.Yuint16: pbast.UInt32,\n\tyang.Yuint32: pbast.UInt32,\n\tyang.Yuint64: pbast.UInt64,\n\tyang.Ystring: pbast.String,\n\tyang.Ybool: pbast.Bool,\n\tyang.Ybinary: pbast.Bytes,\n}\n\nvar builtinTypes = map[string]struct{}{\n\t\"int8\": struct{}{},\n\t\"int16\": struct{}{},\n\t\"int32\": struct{}{},\n\t\"int64\": struct{}{},\n\t\"uint8\": struct{}{},\n\t\"uint16\": struct{}{},\n\t\"unit32\": struct{}{},\n\t\"uint64\": struct{}{},\n\t\"string\": struct{}{},\n\t\"boolean\": struct{}{},\n\t\"enumeration\": struct{}{},\n\t\"bits\": struct{}{},\n\t\"binary\": struct{}{},\n\t\"leafref\": struct{}{},\n\t\"identityref\": struct{}{},\n\t\"empty\": struct{}{},\n\t\"union\": struct{}{},\n\t\"instance-identifier\": struct{}{},\n}\n\ntype transformer struct {\n\ttopScope *scope\n\tdecimal64 *pbast.Message\n}\n\n\/\/ e must be YANG module\nfunc Transform(e *yang.Entry) *pbast.File {\n\tif _, ok := e.Node.(*yang.Module); !ok {\n\t\treturn nil\n\t}\n\n\tt := &transformer{\n\t\ttopScope: newScope(),\n\t}\n\n\treturn t.module(entry{e})\n}\n\nfunc (t *transformer) declare(m *pbast.Message) {\n\tif m == nil {\n\t\treturn\n\t}\n\tif err := t.topScope.addType(m); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t}\n}\n\nfunc (t *transformer) module(e entry) *pbast.File {\n\tnamespace := e.Namespace().Name\n\tf := pbast.NewFile(pbast.NewPackageWithElements(guessElements(namespace)))\n\n\tf.Comment = t.moduleComment(e)\n\n\troot := t.buildMessage(\"Root\", e)\n\t\/\/ Clear Root messgage comment because it overlaps with\n\t\/\/ the file level comment being generated from module description too\n\troot.Comment = nil\n\t\/\/ Child nodes are enclosed with Root message\n\tf.AddMessage(root)\n\n\t\/\/ RPCs\n\ts := t.rpcs(e)\n\tf.AddService(s)\n\n\t\/\/ Notifications\n\tn := t.notifications(e)\n\tf.AddService(n)\n\n\tfor _, m := range t.topScope.allTypes() {\n\t\tf.AddType(m)\n\t}\n\tf.AddMessage(t.decimal64)\n\n\treturn f\n}\n\nfunc (t *transformer) moduleComment(e entry) pbast.Comment {\n\tdescription := t.description(e)\n\tnamespace := t.namespace(e)\n\trevisions := t.revisions(e)\n\treference := t.reference(e)\n\n\tvar comment []string\n\tcomment = append(comment, description...)\n\tcomment = append(comment, namespace...)\n\tcomment = append(comment, revisions...)\n\tcomment = append(comment, reference...)\n\n\treturn comment\n}\n\nfunc (t *transformer) genericComments(e entry) pbast.Comment {\n\tdescription := t.description(e)\n\treference := t.reference(e)\n\n\tcomments := append(description, reference...)\n\treturn comments\n}\n\nfunc (t *transformer) description(e entry) pbast.Comment {\n\tdescription := e.Description\n\tif e.Description == \"\" {\n\t\treturn nil\n\t}\n\n\tlines := strings.Split(strings.TrimRight(description, \"\\n \"), \"\\n\")\n\n\tret := make([]string, 0, len(lines)+1)\n\tret = append(ret, \"Description:\")\n\tret = append(ret, lines...)\n\treturn ret\n}\n\nfunc (t *transformer) revisions(e entry) pbast.Comment {\n\tvar lines []string\n\tif v := e.Extra[\"revision\"]; len(v) > 0 {\n\t\tfor _, rev := range v[0].([]*yang.Revision) {\n\t\t\tlines = append(lines, \"Revision: \"+rev.Name)\n\t\t}\n\t}\n\n\treturn lines\n}\n\nfunc (t *transformer) namespace(e entry) pbast.Comment {\n\tnamespace := e.Namespace().Name\n\tif namespace == \"\" {\n\t\treturn nil\n\t}\n\n\treturn []string{\"Namespace: \" + namespace}\n}\n\nfunc (t *transformer) reference(e entry) pbast.Comment {\n\tv := e.Extra[\"reference\"]\n\tif len(v) == 0 {\n\t\treturn nil\n\t}\n\n\tref := v[0].(*yang.Value)\n\tif ref == nil {\n\t\treturn nil\n\t}\n\tif ref.Name == \"\" {\n\t\treturn nil\n\t}\n\n\tlines := strings.Split(strings.TrimRight(ref.Name, \"\\n \"), \"\\n\")\n\n\tret := make([]string, 0, len(lines)+1)\n\tret = append(ret, \"Reference:\")\n\tret = append(ret, lines...)\n\treturn ret\n}\n\nfunc (t *transformer) rpcs(e entry) *pbast.Service {\n\trpcs := e.rpcs()\n\tif len(rpcs) == 0 {\n\t\treturn nil\n\t}\n\n\ts := pbast.NewService(CamelCase(e.Name))\n\tfor _, rpc := range rpcs {\n\t\tr := t.rpc(rpc)\n\t\ts.AddRPC(r)\n\t}\n\n\treturn s\n}\n\nfunc (t *transformer) rpc(e entry) *pbast.RPC {\n\tmethod := CamelCase(e.Name)\n\tin := method + \"Request\"\n\tout := method + \"Response\"\n\n\trpc := pbast.NewRPC(\n\t\tmethod,\n\t\tpbast.NewReturnType(in),\n\t\tpbast.NewReturnType(out),\n\t)\n\trpc.Comment = t.genericComments(e)\n\n\tt.declare(t.buildMessage(in, entry{e.RPC.Input}))\n\tt.declare(t.buildMessage(out, entry{e.RPC.Output}))\n\n\treturn rpc\n}\n\nfunc (t *transformer) notifications(e entry) *pbast.Service {\n\tnotifications := e.notifications()\n\tif len(notifications) == 0 {\n\t\treturn nil\n\t}\n\n\ts := pbast.NewService(CamelCase(e.Name + \"Notification\"))\n\tfor _, notification := range notifications {\n\t\tn := t.notification(notification)\n\t\tn.Comment = t.genericComments(notification)\n\t\ts.AddRPC(n)\n\t}\n\n\treturn s\n}\n\nfunc (t *transformer) notification(e entry) *pbast.RPC {\n\tmethod := CamelCase(e.Name)\n\tin := method + \"NotificationRequest\"\n\tout := method + \"NotificationResponse\"\n\n\trpc := pbast.NewRPC(method, pbast.NewReturnType(in), pbast.NewReturnType(out))\n\n\t\/\/ notification statement doesn't have an input parameter equivalent,\n\t\/\/ then empty message is used for input as RPC\n\tt.declare(pbast.NewMessage(in))\n\tt.declare(t.buildMessage(out, e))\n\n\treturn rpc\n}\n\nfunc (t *transformer) buildMessage(name string, e entry) *pbast.Message {\n\tif e.Entry == nil {\n\t\treturn nil\n\t}\n\n\tmsg := pbast.NewMessage(name)\n\tscope := newScope()\n\tmsg.Comment = t.genericComments(e)\n\tfor index, child := range e.children() {\n\t\tfieldNum := index + 1\n\t\tvar field *pbast.MessageField\n\t\tvar inner pbast.Type\n\t\tswitch {\n\t\t\/\/ leaf-list case\n\t\tcase child.Type != nil && child.ListAttr != nil:\n\t\t\tfield, inner = t.leaf(child, fieldNum, true)\n\t\t\/\/ leaf case\n\t\tcase child.Type != nil:\n\t\t\tfield, inner = t.leaf(child, fieldNum, false)\n\t\t\/\/ list case\n\t\tcase child.ListAttr != nil:\n\t\t\tinner, field = t.directory(child, fieldNum, true)\n\t\t\/\/ others might be container case\n\t\tdefault:\n\t\t\tinner, field = t.directory(child, fieldNum, false)\n\t\t}\n\t\tif err := scope.addType(inner); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t}\n\t\tmsg.AddField(field)\n\t}\n\n\tfor _, t := range scope.allTypes() {\n\t\tmsg.AddType(t)\n\t}\n\n\treturn msg\n}\n\nfunc (t *transformer) leaf(e entry, index int, repeated bool) (field *pbast.MessageField, nested pbast.Type) {\n\ttyp := builtinMap[e.Type.Kind]\n\t\/\/ no direct builtin type mapping\n\t\/\/ custom message is built\n\tif typ == nil {\n\t\tname := t.typeName(e)\n\t\tswitch e.Type.Kind {\n\t\t\/\/ define at the top level\n\t\tcase yang.Ydecimal64:\n\t\t\tt.decimal64 = decimal64Message\n\t\t\ttyp = decimal64Message\n\t\t\/\/ define as a nested type\n\t\tcase yang.Ybits:\n\t\t\ttyp = t.customBits(name, e.Type.Bit)\n\t\t\/\/ define as a nested type\n\t\tcase yang.Yenum:\n\t\t\ttyp = t.customEnum(name, e.Type.Enum)\n\t\t\/\/ not implemented\n\t\tcase yang.Yunion, yang.Yempty, yang.Yleafref,\n\t\t\tyang.Yidentityref, yang.YinstanceIdentifier:\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\n\tname := underscoreCase(e.Name)\n\tfield = &pbast.MessageField{\n\t\tRepeated: repeated,\n\t\tType: typ.TypeName(),\n\t\tName: name,\n\t\tIndex: index,\n\t\tComment: t.genericComments(e),\n\t}\n\n\tif e.Type.Kind == yang.Ydecimal64 {\n\t\treturn field, nil\n\t}\n\n\treturn field, typ\n}\n\n\/\/ e must be a leaf entry\nfunc (t *transformer) typeName(e entry) string {\n\t\/\/ if the type name matches one of builtin type names,\n\t\/\/ it means typedef is not used\n\tif _, ok := builtinTypes[e.Type.Name]; ok {\n\t\treturn CamelCase(e.Name)\n\t}\n\n\t\/\/ if typedef is used, use the type name instead of the leaf node name\n\treturn CamelCase(e.Type.Name)\n}\n\nfunc (t *transformer) customBits(name string, bits *yang.EnumType) *pbast.Message {\n\tmsg := pbast.NewMessage(name)\n\tfor i, n := range bits.Names() {\n\t\tv := 1 << uint(bits.Values()[i])\n\t\tmsg.AddField(pbast.NewMessageField(pbast.Bool, n, v))\n\t}\n\n\treturn msg\n}\n\nfunc (t *transformer) customEnum(name string, e *yang.EnumType) *pbast.Enum {\n\tenum := pbast.NewEnum(name)\n\tfor i, n := range e.Names() {\n\t\tv := int(e.Values()[i])\n\t\tenum.AddField(pbast.NewEnumField(constantName(n), v))\n\t}\n\n\treturn enum\n}\n\nfunc (t *transformer) directory(e entry, index int, repeated bool) (*pbast.Message, *pbast.MessageField) {\n\tfieldName := underscoreCase(e.Name)\n\ttypeName := CamelCase(e.Name)\n\n\tinner := t.buildMessage(typeName, e)\n\tfield := &pbast.MessageField{\n\t\tRepeated: repeated,\n\t\tType: inner.TypeName(),\n\t\tName: fieldName,\n\t\tIndex: index,\n\t\tComment: t.genericComments(e),\n\t}\n\n\treturn inner, field\n}\n<commit_msg>Remove redundant nil check<commit_after>package yang\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/openconfig\/goyang\/pkg\/yang\"\n\t\"github.com\/oshothebig\/pbast\"\n)\n\nvar builtinMap = map[yang.TypeKind]pbast.Type{\n\tyang.Yint8: pbast.Int32,\n\tyang.Yint16: pbast.Int32,\n\tyang.Yint32: pbast.Int32,\n\tyang.Yint64: pbast.Int64,\n\tyang.Yuint8: pbast.UInt32,\n\tyang.Yuint16: pbast.UInt32,\n\tyang.Yuint32: pbast.UInt32,\n\tyang.Yuint64: pbast.UInt64,\n\tyang.Ystring: pbast.String,\n\tyang.Ybool: pbast.Bool,\n\tyang.Ybinary: pbast.Bytes,\n}\n\nvar builtinTypes = map[string]struct{}{\n\t\"int8\": struct{}{},\n\t\"int16\": struct{}{},\n\t\"int32\": struct{}{},\n\t\"int64\": struct{}{},\n\t\"uint8\": struct{}{},\n\t\"uint16\": struct{}{},\n\t\"unit32\": struct{}{},\n\t\"uint64\": struct{}{},\n\t\"string\": struct{}{},\n\t\"boolean\": struct{}{},\n\t\"enumeration\": struct{}{},\n\t\"bits\": struct{}{},\n\t\"binary\": struct{}{},\n\t\"leafref\": struct{}{},\n\t\"identityref\": struct{}{},\n\t\"empty\": struct{}{},\n\t\"union\": struct{}{},\n\t\"instance-identifier\": struct{}{},\n}\n\ntype transformer struct {\n\ttopScope *scope\n\tdecimal64 *pbast.Message\n}\n\n\/\/ e must be YANG module\nfunc Transform(e *yang.Entry) *pbast.File {\n\tif _, ok := e.Node.(*yang.Module); !ok {\n\t\treturn nil\n\t}\n\n\tt := &transformer{\n\t\ttopScope: newScope(),\n\t}\n\n\treturn t.module(entry{e})\n}\n\nfunc (t *transformer) declare(m *pbast.Message) {\n\tif err := t.topScope.addType(m); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t}\n}\n\nfunc (t *transformer) module(e entry) *pbast.File {\n\tnamespace := e.Namespace().Name\n\tf := pbast.NewFile(pbast.NewPackageWithElements(guessElements(namespace)))\n\n\tf.Comment = t.moduleComment(e)\n\n\troot := t.buildMessage(\"Root\", e)\n\t\/\/ Clear Root messgage comment because it overlaps with\n\t\/\/ the file level comment being generated from module description too\n\troot.Comment = nil\n\t\/\/ Child nodes are enclosed with Root message\n\tf.AddMessage(root)\n\n\t\/\/ RPCs\n\ts := t.rpcs(e)\n\tf.AddService(s)\n\n\t\/\/ Notifications\n\tn := t.notifications(e)\n\tf.AddService(n)\n\n\tfor _, m := range t.topScope.allTypes() {\n\t\tf.AddType(m)\n\t}\n\tf.AddMessage(t.decimal64)\n\n\treturn f\n}\n\nfunc (t *transformer) moduleComment(e entry) pbast.Comment {\n\tdescription := t.description(e)\n\tnamespace := t.namespace(e)\n\trevisions := t.revisions(e)\n\treference := t.reference(e)\n\n\tvar comment []string\n\tcomment = append(comment, description...)\n\tcomment = append(comment, namespace...)\n\tcomment = append(comment, revisions...)\n\tcomment = append(comment, reference...)\n\n\treturn comment\n}\n\nfunc (t *transformer) genericComments(e entry) pbast.Comment {\n\tdescription := t.description(e)\n\treference := t.reference(e)\n\n\tcomments := append(description, reference...)\n\treturn comments\n}\n\nfunc (t *transformer) description(e entry) pbast.Comment {\n\tdescription := e.Description\n\tif e.Description == \"\" {\n\t\treturn nil\n\t}\n\n\tlines := strings.Split(strings.TrimRight(description, \"\\n \"), \"\\n\")\n\n\tret := make([]string, 0, len(lines)+1)\n\tret = append(ret, \"Description:\")\n\tret = append(ret, lines...)\n\treturn ret\n}\n\nfunc (t *transformer) revisions(e entry) pbast.Comment {\n\tvar lines []string\n\tif v := e.Extra[\"revision\"]; len(v) > 0 {\n\t\tfor _, rev := range v[0].([]*yang.Revision) {\n\t\t\tlines = append(lines, \"Revision: \"+rev.Name)\n\t\t}\n\t}\n\n\treturn lines\n}\n\nfunc (t *transformer) namespace(e entry) pbast.Comment {\n\tnamespace := e.Namespace().Name\n\tif namespace == \"\" {\n\t\treturn nil\n\t}\n\n\treturn []string{\"Namespace: \" + namespace}\n}\n\nfunc (t *transformer) reference(e entry) pbast.Comment {\n\tv := e.Extra[\"reference\"]\n\tif len(v) == 0 {\n\t\treturn nil\n\t}\n\n\tref := v[0].(*yang.Value)\n\tif ref == nil {\n\t\treturn nil\n\t}\n\tif ref.Name == \"\" {\n\t\treturn nil\n\t}\n\n\tlines := strings.Split(strings.TrimRight(ref.Name, \"\\n \"), \"\\n\")\n\n\tret := make([]string, 0, len(lines)+1)\n\tret = append(ret, \"Reference:\")\n\tret = append(ret, lines...)\n\treturn ret\n}\n\nfunc (t *transformer) rpcs(e entry) *pbast.Service {\n\trpcs := e.rpcs()\n\tif len(rpcs) == 0 {\n\t\treturn nil\n\t}\n\n\ts := pbast.NewService(CamelCase(e.Name))\n\tfor _, rpc := range rpcs {\n\t\tr := t.rpc(rpc)\n\t\ts.AddRPC(r)\n\t}\n\n\treturn s\n}\n\nfunc (t *transformer) rpc(e entry) *pbast.RPC {\n\tmethod := CamelCase(e.Name)\n\tin := method + \"Request\"\n\tout := method + \"Response\"\n\n\trpc := pbast.NewRPC(\n\t\tmethod,\n\t\tpbast.NewReturnType(in),\n\t\tpbast.NewReturnType(out),\n\t)\n\trpc.Comment = t.genericComments(e)\n\n\tt.declare(t.buildMessage(in, entry{e.RPC.Input}))\n\tt.declare(t.buildMessage(out, entry{e.RPC.Output}))\n\n\treturn rpc\n}\n\nfunc (t *transformer) notifications(e entry) *pbast.Service {\n\tnotifications := e.notifications()\n\tif len(notifications) == 0 {\n\t\treturn nil\n\t}\n\n\ts := pbast.NewService(CamelCase(e.Name + \"Notification\"))\n\tfor _, notification := range notifications {\n\t\tn := t.notification(notification)\n\t\tn.Comment = t.genericComments(notification)\n\t\ts.AddRPC(n)\n\t}\n\n\treturn s\n}\n\nfunc (t *transformer) notification(e entry) *pbast.RPC {\n\tmethod := CamelCase(e.Name)\n\tin := method + \"NotificationRequest\"\n\tout := method + \"NotificationResponse\"\n\n\trpc := pbast.NewRPC(method, pbast.NewReturnType(in), pbast.NewReturnType(out))\n\n\t\/\/ notification statement doesn't have an input parameter equivalent,\n\t\/\/ then empty message is used for input as RPC\n\tt.declare(pbast.NewMessage(in))\n\tt.declare(t.buildMessage(out, e))\n\n\treturn rpc\n}\n\nfunc (t *transformer) buildMessage(name string, e entry) *pbast.Message {\n\tif e.Entry == nil {\n\t\treturn nil\n\t}\n\n\tmsg := pbast.NewMessage(name)\n\tscope := newScope()\n\tmsg.Comment = t.genericComments(e)\n\tfor index, child := range e.children() {\n\t\tfieldNum := index + 1\n\t\tvar field *pbast.MessageField\n\t\tvar inner pbast.Type\n\t\tswitch {\n\t\t\/\/ leaf-list case\n\t\tcase child.Type != nil && child.ListAttr != nil:\n\t\t\tfield, inner = t.leaf(child, fieldNum, true)\n\t\t\/\/ leaf case\n\t\tcase child.Type != nil:\n\t\t\tfield, inner = t.leaf(child, fieldNum, false)\n\t\t\/\/ list case\n\t\tcase child.ListAttr != nil:\n\t\t\tinner, field = t.directory(child, fieldNum, true)\n\t\t\/\/ others might be container case\n\t\tdefault:\n\t\t\tinner, field = t.directory(child, fieldNum, false)\n\t\t}\n\t\tif err := scope.addType(inner); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t}\n\t\tmsg.AddField(field)\n\t}\n\n\tfor _, t := range scope.allTypes() {\n\t\tmsg.AddType(t)\n\t}\n\n\treturn msg\n}\n\nfunc (t *transformer) leaf(e entry, index int, repeated bool) (field *pbast.MessageField, nested pbast.Type) {\n\ttyp := builtinMap[e.Type.Kind]\n\t\/\/ no direct builtin type mapping\n\t\/\/ custom message is built\n\tif typ == nil {\n\t\tname := t.typeName(e)\n\t\tswitch e.Type.Kind {\n\t\t\/\/ define at the top level\n\t\tcase yang.Ydecimal64:\n\t\t\tt.decimal64 = decimal64Message\n\t\t\ttyp = decimal64Message\n\t\t\/\/ define as a nested type\n\t\tcase yang.Ybits:\n\t\t\ttyp = t.customBits(name, e.Type.Bit)\n\t\t\/\/ define as a nested type\n\t\tcase yang.Yenum:\n\t\t\ttyp = t.customEnum(name, e.Type.Enum)\n\t\t\/\/ not implemented\n\t\tcase yang.Yunion, yang.Yempty, yang.Yleafref,\n\t\t\tyang.Yidentityref, yang.YinstanceIdentifier:\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\n\tname := underscoreCase(e.Name)\n\tfield = &pbast.MessageField{\n\t\tRepeated: repeated,\n\t\tType: typ.TypeName(),\n\t\tName: name,\n\t\tIndex: index,\n\t\tComment: t.genericComments(e),\n\t}\n\n\tif e.Type.Kind == yang.Ydecimal64 {\n\t\treturn field, nil\n\t}\n\n\treturn field, typ\n}\n\n\/\/ e must be a leaf entry\nfunc (t *transformer) typeName(e entry) string {\n\t\/\/ if the type name matches one of builtin type names,\n\t\/\/ it means typedef is not used\n\tif _, ok := builtinTypes[e.Type.Name]; ok {\n\t\treturn CamelCase(e.Name)\n\t}\n\n\t\/\/ if typedef is used, use the type name instead of the leaf node name\n\treturn CamelCase(e.Type.Name)\n}\n\nfunc (t *transformer) customBits(name string, bits *yang.EnumType) *pbast.Message {\n\tmsg := pbast.NewMessage(name)\n\tfor i, n := range bits.Names() {\n\t\tv := 1 << uint(bits.Values()[i])\n\t\tmsg.AddField(pbast.NewMessageField(pbast.Bool, n, v))\n\t}\n\n\treturn msg\n}\n\nfunc (t *transformer) customEnum(name string, e *yang.EnumType) *pbast.Enum {\n\tenum := pbast.NewEnum(name)\n\tfor i, n := range e.Names() {\n\t\tv := int(e.Values()[i])\n\t\tenum.AddField(pbast.NewEnumField(constantName(n), v))\n\t}\n\n\treturn enum\n}\n\nfunc (t *transformer) directory(e entry, index int, repeated bool) (*pbast.Message, *pbast.MessageField) {\n\tfieldName := underscoreCase(e.Name)\n\ttypeName := CamelCase(e.Name)\n\n\tinner := t.buildMessage(typeName, e)\n\tfield := &pbast.MessageField{\n\t\tRepeated: repeated,\n\t\tType: inner.TypeName(),\n\t\tName: fieldName,\n\t\tIndex: index,\n\t\tComment: t.genericComments(e),\n\t}\n\n\treturn inner, field\n}\n<|endoftext|>"} {"text":"<commit_before>package storage\n\nimport (\n\t\"code.google.com\/p\/go-uuid\/uuid\"\n\t\"encoding\/json\"\n\t\"github.com\/MerlinDMC\/dsapid\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"sort\"\n)\n\nconst (\n\tdefaultManifestFilename string = \"manifest.json\"\n)\n\ntype ManifestStorage interface {\n\tAdd(string, *dsapid.ManifestResource)\n\tGet(string) *dsapid.ManifestResource\n\tGetOK(string) (*dsapid.ManifestResource, bool)\n\tList() chan *dsapid.ManifestResource\n\tFilter(...ManifestFilter) chan *dsapid.ManifestResource\n\tManifestPath(*dsapid.ManifestResource) string\n\tFilePath(*dsapid.ManifestResource, *dsapid.ManifestFileResource) string\n}\n\ntype filesystemManifestStorage struct {\n\tbasedir string\n\n\tmanifests map[string]*dsapid.ManifestResource\n\tbyDate []*dsapid.ManifestResource\n}\n\ntype ManifestFilter func(*dsapid.ManifestResource) bool\n\ntype ManifestsByPublishedAt []*dsapid.ManifestResource\n\nfunc (t ManifestsByPublishedAt) Len() int { return len(t) }\nfunc (t ManifestsByPublishedAt) Swap(i, j int) { t[i], t[j] = t[j], t[i] }\nfunc (t ManifestsByPublishedAt) Less(i, j int) bool {\n\treturn t[i].PublishedAt.Unix() > t[j].PublishedAt.Unix()\n}\n\nfunc NewManifestStorage(basedir string) ManifestStorage {\n\tstorage := new(filesystemManifestStorage)\n\n\tstorage.basedir = basedir\n\n\tstorage.manifests = make(map[string]*dsapid.ManifestResource)\n\tstorage.byDate = make([]*dsapid.ManifestResource, 0)\n\n\tstorage.load()\n\n\treturn storage\n}\n\nfunc (me *filesystemManifestStorage) Add(id string, manifest *dsapid.ManifestResource) {\n\tos.MkdirAll(path.Join(me.basedir, id), 0770)\n\n\tif data, err := json.MarshalIndent(manifest, \"\", \" \"); err == nil {\n\t\terr = ioutil.WriteFile(path.Join(me.basedir, id, defaultManifestFilename), data, 0666)\n\t}\n\n\tme.add(id, manifest)\n}\n\nfunc (me *filesystemManifestStorage) Get(id string) *dsapid.ManifestResource {\n\treturn me.manifests[id]\n}\n\nfunc (me *filesystemManifestStorage) GetOK(id string) (*dsapid.ManifestResource, bool) {\n\tv, ok := me.manifests[id]\n\n\treturn v, ok\n}\n\nfunc (me *filesystemManifestStorage) List() (c chan *dsapid.ManifestResource) {\n\tc = make(chan *dsapid.ManifestResource)\n\n\tgo func() {\n\t\tfor _, item := range me.byDate {\n\t\t\tc <- item\n\t\t}\n\n\t\tclose(c)\n\t}()\n\n\treturn\n}\n\nfunc (me *filesystemManifestStorage) Filter(flist ...ManifestFilter) (c chan *dsapid.ManifestResource) {\n\tc = make(chan *dsapid.ManifestResource)\n\n\tgo func() {\n\tnextItem:\n\t\tfor item := range me.List() {\n\t\t\tfor _, f := range flist {\n\t\t\t\tif !f(item) {\n\t\t\t\t\tcontinue nextItem\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tc <- item\n\t\t}\n\n\t\tclose(c)\n\t}()\n\n\treturn\n}\n\nfunc (me *filesystemManifestStorage) ManifestPath(manifest *dsapid.ManifestResource) string {\n\treturn path.Join(me.basedir, manifest.Uuid)\n}\n\nfunc (me *filesystemManifestStorage) FilePath(manifest *dsapid.ManifestResource, file *dsapid.ManifestFileResource) string {\n\treturn path.Join(me.basedir, manifest.Uuid, file.Path)\n}\n\nfunc (me *filesystemManifestStorage) add(id string, manifest *dsapid.ManifestResource) {\n\tme.manifests[id] = manifest\n\tme.byDate = append(me.byDate, manifest)\n\n\tsort.Sort(ManifestsByPublishedAt(me.byDate))\n}\n\nfunc (me *filesystemManifestStorage) load() {\n\tif items, err := ioutil.ReadDir(me.basedir); err == nil {\n\t\tfor _, item := range items {\n\t\t\tif item.IsDir() {\n\t\t\t\tmanifestFilename := path.Join(me.basedir, item.Name(), defaultManifestFilename)\n\n\t\t\t\tif _, err := os.Stat(manifestFilename); err == nil {\n\t\t\t\t\tif data, err := ioutil.ReadFile(manifestFilename); err == nil {\n\t\t\t\t\t\tid, manifest := uuid.Parse(item.Name()), dsapid.ManifestResource{}\n\n\t\t\t\t\t\tjson.Unmarshal(data, &manifest)\n\n\t\t\t\t\t\tif id.String() == manifest.Uuid {\n\t\t\t\t\t\t\tme.add(manifest.Uuid, &manifest)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>manifests should be updateable and deletable<commit_after>package storage\n\nimport (\n\t\"code.google.com\/p\/go-uuid\/uuid\"\n\t\"encoding\/json\"\n\t\"github.com\/MerlinDMC\/dsapid\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"sort\"\n)\n\nconst (\n\tdefaultManifestFilename string = \"manifest.json\"\n)\n\ntype ManifestStorage interface {\n\tAdd(string, *dsapid.ManifestResource) error\n\tUpdate(string, *dsapid.ManifestResource) error\n\tDelete(string)\n\tGet(string) *dsapid.ManifestResource\n\tGetOK(string) (*dsapid.ManifestResource, bool)\n\tList() chan *dsapid.ManifestResource\n\tFilter(...ManifestFilter) chan *dsapid.ManifestResource\n\tManifestPath(*dsapid.ManifestResource) string\n\tFilePath(*dsapid.ManifestResource, *dsapid.ManifestFileResource) string\n}\n\ntype filesystemManifestStorage struct {\n\tbasedir string\n\n\tmanifests map[string]*dsapid.ManifestResource\n\tbyDate []*dsapid.ManifestResource\n}\n\ntype ManifestFilter func(*dsapid.ManifestResource) bool\n\ntype ManifestsByPublishedAt []*dsapid.ManifestResource\n\nfunc (t ManifestsByPublishedAt) Len() int { return len(t) }\nfunc (t ManifestsByPublishedAt) Swap(i, j int) { t[i], t[j] = t[j], t[i] }\nfunc (t ManifestsByPublishedAt) Less(i, j int) bool {\n\treturn t[i].PublishedAt.Unix() > t[j].PublishedAt.Unix()\n}\n\nfunc NewManifestStorage(basedir string) ManifestStorage {\n\tstorage := new(filesystemManifestStorage)\n\n\tstorage.basedir = basedir\n\n\tstorage.manifests = make(map[string]*dsapid.ManifestResource)\n\tstorage.byDate = make([]*dsapid.ManifestResource, 0)\n\n\tstorage.load()\n\n\treturn storage\n}\n\nfunc (me *filesystemManifestStorage) Add(id string, manifest *dsapid.ManifestResource) error {\n\tos.MkdirAll(path.Join(me.basedir, id), 0770)\n\n\tif data, err := json.MarshalIndent(manifest, \"\", \" \"); err == nil {\n\t\tif err = ioutil.WriteFile(path.Join(me.basedir, id, defaultManifestFilename), data, 0666); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tme.add(id, manifest)\n\n\treturn nil\n}\n\nfunc (me *filesystemManifestStorage) Update(id string, manifest *dsapid.ManifestResource) error {\n\tif data, err := json.MarshalIndent(manifest, \"\", \" \"); err == nil {\n\t\tif err = ioutil.WriteFile(path.Join(me.basedir, id, defaultManifestFilename), data, 0666); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (me *filesystemManifestStorage) Delete(id string) {\n\tos.RemoveAll(path.Join(me.basedir, id))\n\n\tme.delete(id)\n}\n\nfunc (me *filesystemManifestStorage) Get(id string) *dsapid.ManifestResource {\n\treturn me.manifests[id]\n}\n\nfunc (me *filesystemManifestStorage) GetOK(id string) (*dsapid.ManifestResource, bool) {\n\tv, ok := me.manifests[id]\n\n\treturn v, ok\n}\n\nfunc (me *filesystemManifestStorage) List() (c chan *dsapid.ManifestResource) {\n\tc = make(chan *dsapid.ManifestResource)\n\n\tgo func() {\n\t\tfor _, item := range me.byDate {\n\t\t\tc <- item\n\t\t}\n\n\t\tclose(c)\n\t}()\n\n\treturn\n}\n\nfunc (me *filesystemManifestStorage) Filter(flist ...ManifestFilter) (c chan *dsapid.ManifestResource) {\n\tc = make(chan *dsapid.ManifestResource)\n\n\tgo func() {\n\tnextItem:\n\t\tfor item := range me.List() {\n\t\t\tfor _, f := range flist {\n\t\t\t\tif !f(item) {\n\t\t\t\t\tcontinue nextItem\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tc <- item\n\t\t}\n\n\t\tclose(c)\n\t}()\n\n\treturn\n}\n\nfunc (me *filesystemManifestStorage) ManifestPath(manifest *dsapid.ManifestResource) string {\n\treturn path.Join(me.basedir, manifest.Uuid)\n}\n\nfunc (me *filesystemManifestStorage) FilePath(manifest *dsapid.ManifestResource, file *dsapid.ManifestFileResource) string {\n\treturn path.Join(me.basedir, manifest.Uuid, file.Path)\n}\n\nfunc (me *filesystemManifestStorage) add(id string, manifest *dsapid.ManifestResource) {\n\tme.manifests[id] = manifest\n\tme.byDate = append(me.byDate, manifest)\n\n\tsort.Sort(ManifestsByPublishedAt(me.byDate))\n}\n\nfunc (me *filesystemManifestStorage) delete(id string) {\n\tfor i, m := range me.byDate {\n\t\tif m.Uuid == id {\n\t\t\tme.byDate = append(me.byDate[:i], me.byDate[i+1:]...)\n\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif _, ok := me.manifests[id]; ok {\n\t\tdelete(me.manifests, id)\n\t}\n}\n\nfunc (me *filesystemManifestStorage) load() {\n\tif items, err := ioutil.ReadDir(me.basedir); err == nil {\n\t\tfor _, item := range items {\n\t\t\tif item.IsDir() {\n\t\t\t\tmanifestFilename := path.Join(me.basedir, item.Name(), defaultManifestFilename)\n\n\t\t\t\tif _, err := os.Stat(manifestFilename); err == nil {\n\t\t\t\t\tif data, err := ioutil.ReadFile(manifestFilename); err == nil {\n\t\t\t\t\t\tid, manifest := uuid.Parse(item.Name()), dsapid.ManifestResource{}\n\n\t\t\t\t\t\tjson.Unmarshal(data, &manifest)\n\n\t\t\t\t\t\tif id.String() == manifest.Uuid {\n\t\t\t\t\t\t\tme.add(manifest.Uuid, &manifest)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package google\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\n\t\"gitgud.io\/softashell\/comfy-translator\/translator\"\n)\n\nconst (\n\tuserAgent = \"Mozilla\/5.0 (Windows NT 6.1; WOW64; Trident\/7.0; rv:11.0) like Gecko\"\n\tdelay = time.Second \/ 2\n)\n\nvar (\n\tallStringRegex = regexp.MustCompile(\"\\\"(.+?)\\\",\\\"(.+?)\\\",?\")\n\tgarbageRegex = regexp.MustCompile(`\\s?_{2,3}(\\s\\d)?`)\n)\n\ntype Translate struct {\n\tclient *http.Client\n\tlastRequest time.Time\n\tmutex *sync.Mutex\n}\n\nfunc New() *Translate {\n\tlog.Info(\"Starting google translation engine\")\n\n\treturn &Translate{\n\t\tclient: &http.Client{Timeout: (10 * time.Second)},\n\t\tlastRequest: time.Now(),\n\t\tmutex: &sync.Mutex{},\n\t}\n}\n\nfunc (t Translate) Name() string {\n\treturn \"Google\"\n}\n\nfunc (t Translate) Translate(req *translator.Request) (string, error) {\n\tstart := time.Now()\n\n\tt.mutex.Lock()\n\ttranslator.CheckThrottle(t.lastRequest, delay)\n\tdefer t.mutex.Unlock()\n\n\tvar URL *url.URL\n\tURL, err := url.Parse(\"https:\/\/translate.google.com\/translate_a\/single\")\n\n\tparameters := url.Values{}\n\tparameters.Add(\"client\", \"gtx\")\n\tparameters.Add(\"dt\", \"t\")\n\tparameters.Add(\"sl\", req.From)\n\tparameters.Add(\"tl\", req.To)\n\tparameters.Add(\"ie\", \"UTF-8\")\n\tparameters.Add(\"oe\", \"UTF-8\")\n\tparameters.Add(\"q\", req.Text)\n\n\t\/\/ \/translate_a\/single?client=gtx&dt=t&sl=%hs&tl=%hs&ie=UTF-8&oe=UTF-8&q=%s\n\tURL.RawQuery = parameters.Encode()\n\n\tr, err := http.NewRequest(\"GET\", URL.String(), nil)\n\tif err != nil {\n\t\tlog.Errorln(\"Failed to create request\", err)\n\t\treturn \"\", err\n\t}\n\n\tr.Header.Set(\"User-Agent\", userAgent)\n\n\tt.lastRequest = time.Now()\n\n\tresp, err := t.client.Do(r)\n\tif err != nil {\n\t\tlog.Errorln(\"Failed to do request\", err)\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn \"\", fmt.Errorf(\"%s\", resp.Status)\n\t}\n\n\t\/\/ [[[\"It will be saved\",\"助かるわい\",,,3]],,\"ja\"]\n\tcontents, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Error(\"Failed to read response body\", err)\n\t\treturn \"\", err\n\t}\n\n\tallStrings := allStringRegex.FindAllStringSubmatch(string(contents), -1)\n\n\tif len(allStrings) < 1 {\n\t\treturn \"\", fmt.Errorf(\"Bad response %s\", contents)\n\t}\n\n\tvar out string\n\tfor _, v := range allStrings {\n\t\tif len(v) < 3 {\n\t\t\tcontinue\n\t\t}\n\n\t\tout += v[1]\n\t}\n\n\t\/\/ Delete garbage output which often leaves the output empty, fix your shit google tbh\n\tout2 := garbageRegex.ReplaceAllString(out, \"\")\n\tif len(out) < 1 || (len(out2) < len(out)\/2) {\n\t\treturn \"\", fmt.Errorf(\"Bad response %q\", out)\n\t}\n\n\tout = cleanText(out2)\n\n\tlog.WithFields(log.Fields{\n\t\t\"time\": time.Since(start),\n\t}).Debugf(\"Google: %q\", out)\n\n\treturn out, nil\n}\n\nfunc cleanText(text string) string {\n\ttext = strings.Replace(text, \"\\\\\\\\\", \"\\\\\", -1)\n\n\t\/\/ Replace escaped quotes and newlines\n\ttext = strings.Replace(text, \"\\\\\\\"\", \"\\\"\", -1)\n\ttext = strings.Replace(text, \"\\\\n\", \"\\n\", -1)\n\n\t\/\/ Replace raw characters\n\ttext = strings.Replace(text, \"\\\\u0026\", \"&\", -1)\n\n\treturn text\n}\n<commit_msg>Replace \\u003c and \\u003e with < and ><commit_after>package google\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"regexp\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\n\t\"gitgud.io\/softashell\/comfy-translator\/translator\"\n)\n\nconst (\n\tuserAgent = \"Mozilla\/5.0 (Windows NT 6.1; WOW64; Trident\/7.0; rv:11.0) like Gecko\"\n\tdelay = time.Second \/ 2\n)\n\nvar (\n\tallStringRegex = regexp.MustCompile(\"\\\"(.+?)\\\",\\\"(.+?)\\\",?\")\n\tgarbageRegex = regexp.MustCompile(`\\s?_{2,3}(\\s\\d)?`)\n)\n\ntype Translate struct {\n\tclient *http.Client\n\tlastRequest time.Time\n\tmutex *sync.Mutex\n}\n\nfunc New() *Translate {\n\tlog.Info(\"Starting google translation engine\")\n\n\treturn &Translate{\n\t\tclient: &http.Client{Timeout: (10 * time.Second)},\n\t\tlastRequest: time.Now(),\n\t\tmutex: &sync.Mutex{},\n\t}\n}\n\nfunc (t Translate) Name() string {\n\treturn \"Google\"\n}\n\nfunc (t Translate) Translate(req *translator.Request) (string, error) {\n\tstart := time.Now()\n\n\tt.mutex.Lock()\n\ttranslator.CheckThrottle(t.lastRequest, delay)\n\tdefer t.mutex.Unlock()\n\n\tvar URL *url.URL\n\tURL, err := url.Parse(\"https:\/\/translate.google.com\/translate_a\/single\")\n\n\tparameters := url.Values{}\n\tparameters.Add(\"client\", \"gtx\")\n\tparameters.Add(\"dt\", \"t\")\n\tparameters.Add(\"sl\", req.From)\n\tparameters.Add(\"tl\", req.To)\n\tparameters.Add(\"ie\", \"UTF-8\")\n\tparameters.Add(\"oe\", \"UTF-8\")\n\tparameters.Add(\"q\", req.Text)\n\n\t\/\/ \/translate_a\/single?client=gtx&dt=t&sl=%hs&tl=%hs&ie=UTF-8&oe=UTF-8&q=%s\n\tURL.RawQuery = parameters.Encode()\n\n\tr, err := http.NewRequest(\"GET\", URL.String(), nil)\n\tif err != nil {\n\t\tlog.Errorln(\"Failed to create request\", err)\n\t\treturn \"\", err\n\t}\n\n\tr.Header.Set(\"User-Agent\", userAgent)\n\n\tt.lastRequest = time.Now()\n\n\tresp, err := t.client.Do(r)\n\tif err != nil {\n\t\tlog.Errorln(\"Failed to do request\", err)\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn \"\", fmt.Errorf(\"%s\", resp.Status)\n\t}\n\n\t\/\/ [[[\"It will be saved\",\"助かるわい\",,,3]],,\"ja\"]\n\tcontents, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Error(\"Failed to read response body\", err)\n\t\treturn \"\", err\n\t}\n\n\tallStrings := allStringRegex.FindAllStringSubmatch(string(contents), -1)\n\n\tif len(allStrings) < 1 {\n\t\treturn \"\", fmt.Errorf(\"Bad response %s\", contents)\n\t}\n\n\tvar out string\n\tfor _, v := range allStrings {\n\t\tif len(v) < 3 {\n\t\t\tcontinue\n\t\t}\n\n\t\tout += v[1]\n\t}\n\n\t\/\/ Delete garbage output which often leaves the output empty, fix your shit google tbh\n\tout2 := garbageRegex.ReplaceAllString(out, \"\")\n\tif len(out) < 1 || (len(out2) < len(out)\/2) {\n\t\treturn \"\", fmt.Errorf(\"Bad response %q\", out)\n\t}\n\n\tout = cleanText(out2)\n\n\tlog.WithFields(log.Fields{\n\t\t\"time\": time.Since(start),\n\t}).Debugf(\"Google: %q\", out)\n\n\treturn out, nil\n}\n\nfunc cleanText(text string) string {\n\ttext = strings.Replace(text, \"\\\\\\\\\", \"\\\\\", -1)\n\n\t\/\/ Replace escaped quotes and newlines\n\ttext = strings.Replace(text, \"\\\\\\\"\", \"\\\"\", -1)\n\ttext = strings.Replace(text, \"\\\\n\", \"\\n\", -1)\n\n\t\/\/ Replace raw characters\n\ttext = strings.Replace(text, \"\\\\u0026\", \"&\", -1)\n\ttext = strings.Replace(text, \"\\\\u003c\", \"<\", -1)\n\ttext = strings.Replace(text, \"\\\\u003e\", \">\", -1)\n\n\treturn text\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"log\"\n\n\t\"github.com\/DexterLB\/traytor\"\n\t\"github.com\/DexterLB\/traytor\/gui\"\n)\n\nfunc testSphere(display *gui.Display) {\n\trnd := traytor.NewRandom(42)\n\tfor i := 0; i < 10000; i++ {\n\t\tvec := rnd.Vec3Sphere()\n\t\tvec.Scale(250)\n\t\tvec.Add(traytor.NewVec3(400, 0, 400))\n\t\tdisplay.SetPixel(int(vec.X), int(vec.Z), traytor.NewColour32Bit(0x0fff, 0xffff, 0x4000))\n\t}\n}\n\nfunc testHemi(display *gui.Display) {\n\trnd := traytor.NewRandom(42)\n\tfor i := 0; i < 4000; i++ {\n\t\tvec := rnd.Vec3Hemi(traytor.NewVec3(0, 0, -1))\n\t\tvec.Scale(250)\n\t\tvec.Add(traytor.NewVec3(400, 0, 400))\n\t\tdisplay.SetPixel(int(vec.X), int(vec.Z), traytor.NewColour32Bit(0x0fff, 0xffff, 0x4000))\n\t}\n}\n\nfunc testHemiCos(display *gui.Display) {\n\trnd := traytor.NewRandom(42)\n\tfor i := 0; i < 4000; i++ {\n\t\tvec := rnd.Vec3HemiCos(traytor.NewVec3(0, 0, 1))\n\t\tvec.Scale(250)\n\t\tvec.Add(traytor.NewVec3(400, 0, 400))\n\t\tdisplay.SetPixel(int(vec.X), int(vec.Z), traytor.NewColour32Bit(0xffff, 0x0fff, 0x4000))\n\t}\n}\n\nfunc main() {\n\tdefer gui.Quit()\n\n\tdisplay, err := gui.NewDisplay(800, 800, \"shite\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\tdefer display.Close()\n\n\ttestHemi(display)\n\ttestHemiCos(display)\n\n\tdisplay.Update()\n\n\tdisplay.Loop()\n}\n<commit_msg>make the random test prettier<commit_after>package main\n\nimport (\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/DexterLB\/traytor\"\n\t\"github.com\/DexterLB\/traytor\/gui\"\n)\n\nfunc testSphere(display *gui.Display) {\n\trnd := traytor.NewRandom(42)\n\tfor i := 0; i < 10000; i++ {\n\t\tvec := rnd.Vec3Sphere()\n\t\tvec.Scale(350)\n\t\tvec.Add(traytor.NewVec3(400, 0, 400))\n\t\tdisplay.SetPixel(int(vec.X), int(vec.Z), traytor.NewColour32Bit(0x0fff, 0xffff, 0x4000))\n\t\ttime.Sleep(100 * time.Microsecond)\n\t\tdisplay.Update()\n\t}\n}\n\nfunc testHemi(display *gui.Display) {\n\trnd := traytor.NewRandom(42)\n\tfor i := 0; i < 17000; i++ {\n\t\tvec := rnd.Vec3Hemi(traytor.NewVec3(0, 0, -1))\n\t\tvec.Scale(350)\n\t\tvec.Add(traytor.NewVec3(400, 0, 400))\n\t\tdisplay.SetPixel(int(vec.X), int(vec.Z), traytor.NewColour32Bit(0x0fff, 0xffff, 0x4000))\n\n\t\tvecCos := rnd.Vec3HemiCos(traytor.NewVec3(0, 0, 1))\n\t\tvecCos.Scale(350)\n\t\tvecCos.Add(traytor.NewVec3(400, 0, 400))\n\t\tdisplay.SetPixel(int(vecCos.X), int(vecCos.Z), traytor.NewColour32Bit(0xffff, 0x0fff, 0x4000))\n\n\t\ttime.Sleep(100 * time.Microsecond)\n\t\tdisplay.Update()\n\t}\n}\n\nfunc main() {\n\tdefer gui.Quit()\n\n\tdisplay, err := gui.NewDisplay(800, 800, \"shite\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\treturn\n\t}\n\tdefer display.Close()\n\n\ttestHemi(display)\n\n\tdisplay.Update()\n\n\tdisplay.Loop()\n}\n<|endoftext|>"} {"text":"<commit_before>\/* linked_list.go : Implement linked list with too many memory allocations\nAuthor: James Fairbanks\nDate: 2012-09-02\nLiscence: BSD\n*\/\n\npackage list\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\/\/ \"github.com\/jpfairbanks\/timing\"\n\/\/ \"math\/rand\"\n)\n\n\/\/Dtype: the data type of the arrays\ntype Dtype int64\n\nvar NULL *Node\n\n\/\/NULL := 0\n\n\/\/Node: Struct containing a datum and a pointer to the next Node.\ntype Node struct {\n\tDatum Dtype\n\tNext *Node\n}\n\n\/\/List: Struct for containing the head of the list.\ntype List struct {\n\tHead *Node\n}\n\n\/\/New: Make a new list of Nodes.\nfunc New() List {\n\tvar ell List\n\thead := &Node{Datum: 0, Next: NULL}\n\tell.Head = head\n\treturn ell\n}\n\n\/\/Insert: Insert a Node into the list.\nfunc (ell *List) Insert(pos int, element Dtype) {\n\tvar prev *Node\n\tnode := ell.Head\n\tfor i := 0; i < pos; i++ {\n\t\t\/\/fmt.Println(\"getting the next\")\n\t\tif i == pos-1 {\n\t\t\tprev = node\n\t\t}\n\t\tnode = node.Next\n\t}\n\t\/\/fmt.Printf(\"found pos node: %v\\n\", node)\n\t\/\/fmt.Printf(\"found prev: %v\\n\", prev)\n\tnewNode := &Node{Datum: element, Next: node}\n\t\/\/fmt.Printf(\"made newNode: %v\\n\", newNode)\n\tif pos > 0 {\n\t\tprev.Next = newNode\n\t} else {\n\t\tell.Head = newNode\n\t}\n\t\/\/fmt.Println(\"did previous\")\n\t\/\/fmt.Println(node.Next)\n}\n\n\/\/Remove: Remove a Node into the list.\nfunc (ell *List) Remove(pos int) Dtype {\n\tnode := ell.Head\n\tfor i := 0; i < pos; i++ {\n\t\tnode = node.Next\n\t}\n\tnext := node.Next\n\tnode.Next = node.Next.Next\n\treturn next.Datum\n}\n\n\/\/String: print list by printing each element on a line\nfunc (ell *List) String() string {\n\tvar curr *Node\n\tcurr = ell.Head\n\tvar repr map[int]string\n\trepr = make(map[int]string)\n\tvar sterm string\n\tvar i int\n\tfor curr != nil {\n\t\tsterm = fmt.Sprintf(\"%d %v %v\",\n\t\t\ti, curr.Datum, curr.Next)\n\t\t\/\/fmt.Println(sterm)\n\t\trepr[i] = sterm\n\t\ti++\n\t\tcurr = curr.Next\n\t}\n\tordered := make([]string, len(repr))\n\tfor j := 0; j < i; j++ {\n\t\tordered[j] = repr[j]\n\t}\n\tvar joined string\n\tjoined = strings.Join(ordered, \"\\n\")\n\t\/\/fmt.Println(joined)\n\treturn joined\n}\n\n\/\/Strider: a slice of type dtype with a fixed stride\ntype Strider interface {\n\tWalk() Dtype\n}\n<commit_msg>remove whitespace<commit_after>\/* linked_list.go : Implement linked list with too many memory allocations\nAuthor: James Fairbanks\nDate: 2012-09-02\nLiscence: BSD\n*\/\n\npackage list\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\/\/ \"github.com\/jpfairbanks\/timing\"\n\/\/ \"math\/rand\"\n)\n\n\/\/Dtype: the data type of the arrays\ntype Dtype int64\n\nvar NULL *Node\n\n\/\/NULL := 0\n\n\/\/Node: Struct containing a datum and a pointer to the next Node.\ntype Node struct {\n\tDatum Dtype\n\tNext *Node\n}\n\n\/\/List: Struct for containing the head of the list.\ntype List struct {\n\tHead *Node\n}\n\n\/\/New: Make a new list of Nodes.\nfunc New() List {\n\tvar ell List\n\thead := &Node{Datum: 0, Next: NULL}\n\tell.Head = head\n\treturn ell\n}\n\n\/\/Insert: Insert a Node into the list.\nfunc (ell *List) Insert(pos int, element Dtype) {\n\tvar prev *Node\n\tnode := ell.Head\n\tfor i := 0; i < pos; i++ {\n\t\t\/\/fmt.Println(\"getting the next\")\n\t\tif i == pos-1 {\n\t\t\tprev = node\n\t\t}\n\t\tnode = node.Next\n\t}\n\t\/\/fmt.Printf(\"found pos node: %v\\n\", node)\n\t\/\/fmt.Printf(\"found prev: %v\\n\", prev)\n\tnewNode := &Node{Datum: element, Next: node}\n\t\/\/fmt.Printf(\"made newNode: %v\\n\", newNode)\n\tif pos > 0 {\n\t\tprev.Next = newNode\n\t} else {\n\t\tell.Head = newNode\n\t}\n\t\/\/fmt.Println(\"did previous\")\n\t\/\/fmt.Println(node.Next)\n}\n\n\/\/Remove: Remove a Node into the list.\nfunc (ell *List) Remove(pos int) Dtype {\n\tnode := ell.Head\n\tfor i := 0; i < pos; i++ {\n\t\tnode = node.Next\n\t}\n\tnext := node.Next\n\tnode.Next = node.Next.Next\n\treturn next.Datum\n}\n\n\/\/String: print list by printing each element on a line\nfunc (ell *List) String() string {\n\tvar curr *Node\n\tcurr = ell.Head\n\tvar repr map[int]string\n\trepr = make(map[int]string)\n\tvar sterm string\n\tvar i int\n\tfor curr != nil {\n\t\tsterm = fmt.Sprintf(\"%d %v %v\",\n\t\t\ti, curr.Datum, curr.Next)\n\t\t\/\/fmt.Println(sterm)\n\t\trepr[i] = sterm\n\t\ti++\n\t\tcurr = curr.Next\n\t}\n\tordered := make([]string, len(repr))\n\tfor j := 0; j < i; j++ {\n\t\tordered[j] = repr[j]\n\t}\n\tvar joined string\n\tjoined = strings.Join(ordered, \"\\n\")\n\t\/\/fmt.Println(joined)\n\treturn joined\n}\n\n\/\/Strider: a slice of type dtype with a fixed stride\ntype Strider interface {\n\tWalk() Dtype\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage stackdriver\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\tmonitoring \"google.golang.org\/genproto\/googleapis\/monitoring\/v3\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/balancer\/roundrobin\"\n\t\"google.golang.org\/grpc\/codes\"\n\t\"google.golang.org\/grpc\/credentials\"\n\t\"google.golang.org\/grpc\/credentials\/oauth\"\n\t\"google.golang.org\/grpc\/status\"\n\n\t\"github.com\/go-kit\/kit\/log\"\n\t\"github.com\/go-kit\/kit\/log\/level\"\n\t\"github.com\/prometheus\/common\/model\"\n\t\"github.com\/prometheus\/common\/version\"\n\n\tconfig_util \"github.com\/prometheus\/common\/config\"\n)\n\nconst (\n\t\/\/ TODO(jkohen): Use a custom prefix specific to Prometheus.\n\tmetricsPrefix = \"custom.googleapis.com\"\n\tmaxTimeseriesesPerRequest = 200\n\tMonitoringWriteScope = \"https:\/\/www.googleapis.com\/auth\/monitoring.write\"\n)\n\n\/\/ Client allows reading and writing from\/to a remote gRPC endpoint. The\n\/\/ implementation may hit a single backend, so the application should create a\n\/\/ number of these clients.\ntype Client struct {\n\tindex int \/\/ Used to differentiate clients in metrics.\n\tlogger log.Logger\n\tprojectId string\n\turl *config_util.URL\n\ttimeout time.Duration\n\n\tconn *grpc.ClientConn\n}\n\n\/\/ ClientConfig configures a Client.\ntype ClientConfig struct {\n\tLogger log.Logger\n\tProjectId string \/\/ The Stackdriver project id in \"projects\/name-or-number\" format.\n\tURL *config_util.URL\n\tTimeout model.Duration\n}\n\n\/\/ NewClient creates a new Client.\nfunc NewClient(index int, conf *ClientConfig) *Client {\n\tlogger := conf.Logger\n\tif logger == nil {\n\t\tlogger = log.NewNopLogger()\n\t}\n\treturn &Client{\n\t\tindex: index,\n\t\tlogger: logger,\n\t\tprojectId: conf.ProjectId,\n\t\turl: conf.URL,\n\t\ttimeout: time.Duration(conf.Timeout),\n\t}\n}\n\ntype recoverableError struct {\n\terror\n}\n\n\/\/ version.* is populated for 'promu' builds, so this will look broken in unit tests.\nvar userAgent = fmt.Sprintf(\"StackdriverPrometheus\/%s\", version.Version)\n\nfunc (c *Client) getConnection(ctx context.Context) (*grpc.ClientConn, error) {\n\tif c.conn != nil {\n\t\treturn c.conn, nil\n\t}\n\n\tuseAuth, err := strconv.ParseBool(c.url.Query().Get(\"auth\"))\n\tif err != nil {\n\t\tuseAuth = true \/\/ Default to auth enabled.\n\t}\n\tlevel.Debug(c.logger).Log(\n\t\t\"msg\", \"is auth enabled\",\n\t\t\"auth\", useAuth,\n\t\t\"url\", c.url.String())\n\t\/\/ Google APIs currently return a single IP for the whole service. gRPC\n\t\/\/ client-side load-balancing won't spread the load across backends\n\t\/\/ while that's true, but it also doesn't hurt.\n\tdopts := []grpc.DialOption{\n\t\tgrpc.WithBalancerName(roundrobin.Name),\n\t\tgrpc.WithBlock(), \/\/ Wait for the connection to be established before using it.\n\t\tgrpc.WithUserAgent(userAgent),\n\t}\n\tif useAuth {\n\t\trpcCreds, err := oauth.NewApplicationDefault(context.Background(), MonitoringWriteScope)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttlsCreds := credentials.NewTLS(&tls.Config{})\n\t\tdopts = append(dopts,\n\t\t\tgrpc.WithTransportCredentials(tlsCreds),\n\t\t\tgrpc.WithPerRPCCredentials(rpcCreds))\n\t} else {\n\t\tdopts = append(dopts, grpc.WithInsecure())\n\t}\n\taddress := c.url.Hostname()\n\tif len(c.url.Port()) > 0 {\n\t\taddress = fmt.Sprintf(\"%s:%s\", address, c.url.Port())\n\t}\n\tconn, err := grpc.DialContext(ctx, address, dopts...)\n\tc.conn = conn\n\treturn conn, err\n}\n\n\/\/ Store sends a batch of samples to the HTTP endpoint.\nfunc (c *Client) Store(req *monitoring.CreateTimeSeriesRequest) error {\n\ttss := req.TimeSeries\n\tif len(tss) == 0 {\n\t\t\/\/ Nothing to do, return silently.\n\t\treturn nil\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), c.timeout)\n\tdefer cancel()\n\n\tconn, err := c.getConnection(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlevel.Debug(c.logger).Log(\"msg\", \"sending request to Stackdriver\")\n\tservice := monitoring.NewMetricServiceClient(conn)\n\n\terrors := make(chan error, len(tss)\/maxTimeseriesesPerRequest+1)\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < len(tss); i += maxTimeseriesesPerRequest {\n\t\tend := i + maxTimeseriesesPerRequest\n\t\tif end > len(tss) {\n\t\t\tend = len(tss)\n\t\t}\n\t\twg.Add(1)\n\t\tgo func(begin int, end int) {\n\t\t\tdefer wg.Done()\n\t\t\treq_copy := &monitoring.CreateTimeSeriesRequest{\n\t\t\t\tName: c.projectId,\n\t\t\t\tTimeSeries: req.TimeSeries[begin:end],\n\t\t\t}\n\t\t\t_, err := service.CreateTimeSeries(ctx, req_copy)\n\t\t\tif err != nil {\n\t\t\t\tlevel.Debug(c.logger).Log(\n\t\t\t\t\t\"msg\", \"Partial failure calling CreateTimeSeries\",\n\t\t\t\t\t\"err\", err,\n\t\t\t\t\t\"req\", req_copy.String())\n\t\t\t\tstatus, ok := status.FromError(err)\n\t\t\t\tif !ok {\n\t\t\t\t\tlevel.Warn(c.logger).Log(\"msg\", \"Unexpected error message type from Monitoring API\", \"err\", err)\n\t\t\t\t\terrors <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif status.Code() == codes.Unavailable {\n\t\t\t\t\terrors <- recoverableError{err}\n\t\t\t\t} else {\n\t\t\t\t\terrors <- err\n\t\t\t\t}\n\t\t\t}\n\t\t}(i, end)\n\t}\n\twg.Wait()\n\tclose(errors)\n\tif err, ok := <-errors; ok {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Name identifies the client.\nfunc (c Client) Name() string {\n\treturn fmt.Sprintf(\"%d:%s\", c.index, c.url)\n}\n\nfunc (c Client) Close() error {\n\tif c.conn == nil {\n\t\treturn nil\n\t}\n\treturn c.conn.Close()\n}\n<commit_msg>Changed prefix from 'custom.googleapis.com' to 'external.googleapis.com\/prometheus'. This will break this program until the prefix is supported in the backend, coming ASAP.<commit_after>\/\/ Copyright 2016 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage stackdriver\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\tmonitoring \"google.golang.org\/genproto\/googleapis\/monitoring\/v3\"\n\t\"google.golang.org\/grpc\"\n\t\"google.golang.org\/grpc\/balancer\/roundrobin\"\n\t\"google.golang.org\/grpc\/codes\"\n\t\"google.golang.org\/grpc\/credentials\"\n\t\"google.golang.org\/grpc\/credentials\/oauth\"\n\t\"google.golang.org\/grpc\/status\"\n\n\t\"github.com\/go-kit\/kit\/log\"\n\t\"github.com\/go-kit\/kit\/log\/level\"\n\t\"github.com\/prometheus\/common\/model\"\n\t\"github.com\/prometheus\/common\/version\"\n\n\tconfig_util \"github.com\/prometheus\/common\/config\"\n)\n\nconst (\n\tmetricsPrefix = \"external.googleapis.com\/prometheus\"\n\tmaxTimeseriesesPerRequest = 200\n\tMonitoringWriteScope = \"https:\/\/www.googleapis.com\/auth\/monitoring.write\"\n)\n\n\/\/ Client allows reading and writing from\/to a remote gRPC endpoint. The\n\/\/ implementation may hit a single backend, so the application should create a\n\/\/ number of these clients.\ntype Client struct {\n\tindex int \/\/ Used to differentiate clients in metrics.\n\tlogger log.Logger\n\tprojectId string\n\turl *config_util.URL\n\ttimeout time.Duration\n\n\tconn *grpc.ClientConn\n}\n\n\/\/ ClientConfig configures a Client.\ntype ClientConfig struct {\n\tLogger log.Logger\n\tProjectId string \/\/ The Stackdriver project id in \"projects\/name-or-number\" format.\n\tURL *config_util.URL\n\tTimeout model.Duration\n}\n\n\/\/ NewClient creates a new Client.\nfunc NewClient(index int, conf *ClientConfig) *Client {\n\tlogger := conf.Logger\n\tif logger == nil {\n\t\tlogger = log.NewNopLogger()\n\t}\n\treturn &Client{\n\t\tindex: index,\n\t\tlogger: logger,\n\t\tprojectId: conf.ProjectId,\n\t\turl: conf.URL,\n\t\ttimeout: time.Duration(conf.Timeout),\n\t}\n}\n\ntype recoverableError struct {\n\terror\n}\n\n\/\/ version.* is populated for 'promu' builds, so this will look broken in unit tests.\nvar userAgent = fmt.Sprintf(\"StackdriverPrometheus\/%s\", version.Version)\n\nfunc (c *Client) getConnection(ctx context.Context) (*grpc.ClientConn, error) {\n\tif c.conn != nil {\n\t\treturn c.conn, nil\n\t}\n\n\tuseAuth, err := strconv.ParseBool(c.url.Query().Get(\"auth\"))\n\tif err != nil {\n\t\tuseAuth = true \/\/ Default to auth enabled.\n\t}\n\tlevel.Debug(c.logger).Log(\n\t\t\"msg\", \"is auth enabled\",\n\t\t\"auth\", useAuth,\n\t\t\"url\", c.url.String())\n\t\/\/ Google APIs currently return a single IP for the whole service. gRPC\n\t\/\/ client-side load-balancing won't spread the load across backends\n\t\/\/ while that's true, but it also doesn't hurt.\n\tdopts := []grpc.DialOption{\n\t\tgrpc.WithBalancerName(roundrobin.Name),\n\t\tgrpc.WithBlock(), \/\/ Wait for the connection to be established before using it.\n\t\tgrpc.WithUserAgent(userAgent),\n\t}\n\tif useAuth {\n\t\trpcCreds, err := oauth.NewApplicationDefault(context.Background(), MonitoringWriteScope)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttlsCreds := credentials.NewTLS(&tls.Config{})\n\t\tdopts = append(dopts,\n\t\t\tgrpc.WithTransportCredentials(tlsCreds),\n\t\t\tgrpc.WithPerRPCCredentials(rpcCreds))\n\t} else {\n\t\tdopts = append(dopts, grpc.WithInsecure())\n\t}\n\taddress := c.url.Hostname()\n\tif len(c.url.Port()) > 0 {\n\t\taddress = fmt.Sprintf(\"%s:%s\", address, c.url.Port())\n\t}\n\tconn, err := grpc.DialContext(ctx, address, dopts...)\n\tc.conn = conn\n\treturn conn, err\n}\n\n\/\/ Store sends a batch of samples to the HTTP endpoint.\nfunc (c *Client) Store(req *monitoring.CreateTimeSeriesRequest) error {\n\ttss := req.TimeSeries\n\tif len(tss) == 0 {\n\t\t\/\/ Nothing to do, return silently.\n\t\treturn nil\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), c.timeout)\n\tdefer cancel()\n\n\tconn, err := c.getConnection(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlevel.Debug(c.logger).Log(\"msg\", \"sending request to Stackdriver\")\n\tservice := monitoring.NewMetricServiceClient(conn)\n\n\terrors := make(chan error, len(tss)\/maxTimeseriesesPerRequest+1)\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < len(tss); i += maxTimeseriesesPerRequest {\n\t\tend := i + maxTimeseriesesPerRequest\n\t\tif end > len(tss) {\n\t\t\tend = len(tss)\n\t\t}\n\t\twg.Add(1)\n\t\tgo func(begin int, end int) {\n\t\t\tdefer wg.Done()\n\t\t\treq_copy := &monitoring.CreateTimeSeriesRequest{\n\t\t\t\tName: c.projectId,\n\t\t\t\tTimeSeries: req.TimeSeries[begin:end],\n\t\t\t}\n\t\t\t_, err := service.CreateTimeSeries(ctx, req_copy)\n\t\t\tif err != nil {\n\t\t\t\tlevel.Debug(c.logger).Log(\n\t\t\t\t\t\"msg\", \"Partial failure calling CreateTimeSeries\",\n\t\t\t\t\t\"err\", err,\n\t\t\t\t\t\"req\", req_copy.String())\n\t\t\t\tstatus, ok := status.FromError(err)\n\t\t\t\tif !ok {\n\t\t\t\t\tlevel.Warn(c.logger).Log(\"msg\", \"Unexpected error message type from Monitoring API\", \"err\", err)\n\t\t\t\t\terrors <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif status.Code() == codes.Unavailable {\n\t\t\t\t\terrors <- recoverableError{err}\n\t\t\t\t} else {\n\t\t\t\t\terrors <- err\n\t\t\t\t}\n\t\t\t}\n\t\t}(i, end)\n\t}\n\twg.Wait()\n\tclose(errors)\n\tif err, ok := <-errors; ok {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Name identifies the client.\nfunc (c Client) Name() string {\n\treturn fmt.Sprintf(\"%d:%s\", c.index, c.url)\n}\n\nfunc (c Client) Close() error {\n\tif c.conn == nil {\n\t\treturn nil\n\t}\n\treturn c.conn.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage state\n\nimport (\n\t\"errors\"\n\t\"labix.org\/v2\/mgo\/txn\"\n\t\"launchpad.net\/juju-core\/utils\"\n)\n\n\/\/ minUnitsDoc keeps track of relevant changes on the service's MinUnits field\n\/\/ and on the number of alive units for the service.\n\/\/ A new document is created when MinUnits is set to a non zero value.\n\/\/ A document is deleted when either the associated service is destroyed\n\/\/ or MinUnits is restored to zero. The Revno is increased when either MinUnits\n\/\/ for a service is increased or a unit is destroyed.\n\/\/ TODO(frankban): the MinUnitsWatcher reacts to changes by sending events,\n\/\/ each one describing one or more services. A worker reacts to those events\n\/\/ ensuring the number of units for the service is never less than the actual\n\/\/ alive units: new units are added if required.\ntype minUnitsDoc struct {\n\t\/\/ ServiceName is safe to be used here in place of its globalKey, since\n\t\/\/ the referred entity type is always the Service.\n\tServiceName string `bson:\"_id\"`\n\tRevno int\n}\n\n\/\/ SetMinUnits changes the number of minimum units required by the service.\nfunc (s *Service) SetMinUnits(minUnits int) (err error) {\n\tdefer utils.ErrorContextf(&err, \"cannot set minimum units for service %q\", s)\n\tdefer func() {\n\t\tif err == nil {\n\t\t\ts.doc.MinUnits = minUnits\n\t\t}\n\t}()\n\tif minUnits < 0 {\n\t\treturn errors.New(\"cannot set a negative minimum number of units\")\n\t}\n\tservice := &Service{st: s.st, doc: s.doc}\n\t\/\/ Removing the document never fails. Racing clients trying to create the\n\t\/\/ document generate one failure, but the second attempt should succeed.\n\t\/\/ If one client tries to update the document, and a racing client removes\n\t\/\/ it, the former should be able to re-create the document in the second\n\t\/\/ attempt. If the referred-to service advanced its life cycle to a not\n\t\/\/ alive state, an error is returned after the first failing attempt.\n\tfor i := 0; i < 2; i++ {\n\t\tif service.doc.Life != Alive {\n\t\t\treturn errors.New(\"service is no longer alive\")\n\t\t}\n\t\tif minUnits == service.doc.MinUnits {\n\t\t\treturn nil\n\t\t}\n\t\tops := setMinUnitsOps(service, minUnits)\n\t\tif err := s.st.runTransaction(ops); err != txn.ErrAborted {\n\t\t\treturn err\n\t\t}\n\t\tif err := service.Refresh(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn ErrExcessiveContention\n}\n\n\/\/ setMinUnitsOps returns the operations required to set MinUnits on the\n\/\/ service and to create\/update\/remove the minUnits document in MongoDB.\nfunc setMinUnitsOps(service *Service, minUnits int) []txn.Op {\n\tstate := service.st\n\tserviceName := service.Name()\n\tops := []txn.Op{{\n\t\tC: state.services.Name,\n\t\tId: serviceName,\n\t\tAssert: isAliveDoc,\n\t\tUpdate: D{{\"$set\", D{{\"minunits\", minUnits}}}},\n\t}}\n\tif service.doc.MinUnits == 0 {\n\t\treturn append(ops, txn.Op{\n\t\t\tC: state.minUnits.Name,\n\t\t\tId: serviceName,\n\t\t\tAssert: txn.DocMissing,\n\t\t\tInsert: &minUnitsDoc{ServiceName: serviceName},\n\t\t})\n\t}\n\tif minUnits == 0 {\n\t\treturn append(ops, minUnitsRemoveOp(state, serviceName))\n\t}\n\tif minUnits > service.doc.MinUnits {\n\t\top := minUnitsTriggerOp(state, serviceName)\n\t\top.Assert = txn.DocExists\n\t\treturn append(ops, op)\n\t}\n\treturn ops\n}\n\n\/\/ minUnitsTriggerOp returns the operation required to increase the minimum\n\/\/ units revno for the service in MongoDB, ignoring the case of document not\n\/\/ existing. This is included in the operations performed when a unit is\n\/\/ destroyed: if the document exists, then we need to update the Revno.\n\/\/ If the service does not require a minimum number of units, then the\n\/\/ operation is a noop.\nfunc minUnitsTriggerOp(st *State, serviceName string) txn.Op {\n\treturn txn.Op{\n\t\tC: st.minUnits.Name,\n\t\tId: serviceName,\n\t\tUpdate: D{{\"$inc\", D{{\"revno\", 1}}}},\n\t}\n}\n\n\/\/ minUnitsRemoveOp returns the operation required to remove the minimum\n\/\/ units document from MongoDB.\nfunc minUnitsRemoveOp(st *State, serviceName string) txn.Op {\n\treturn txn.Op{\n\t\tC: st.minUnits.Name,\n\t\tId: serviceName,\n\t\tRemove: true,\n\t}\n}\n\n\/\/ MinUnits returns the minimum units count for the service.\nfunc (s *Service) MinUnits() int {\n\treturn s.doc.MinUnits\n}\n\n\/\/ EnsureMinUnits adds new units if the service's MinUnits value is greater\n\/\/ than the number of alive units.\nfunc (s *Service) EnsureMinUnits() (err error) {\n\tdefer utils.ErrorContextf(&err,\n\t\t\"cannot ensure minimum units for service %q\", s)\n\tservice := &Service{st: s.st, doc: s.doc}\n\tdefer func() {\n\t\tif err == nil {\n\t\t\ts.doc.MinUnits = service.doc.MinUnits\n\t\t\ts.doc.UnitCount = service.doc.UnitCount\n\t\t}\n\t}()\nloop:\n\tfor {\n\t\t\/\/ Ensure the service is alive.\n\t\tif service.doc.Life != Alive {\n\t\t\treturn errors.New(\"service is no longer alive\")\n\t\t}\n\t\t\/\/ Exit without errors if the MinUnits for the service is not set.\n\t\tif service.doc.MinUnits == 0 {\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ Retrieve the number of alive units for the service.\n\t\taliveUnits, err := aliveUnitsCount(service)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ Calculate the number of required units to be added.\n\t\tmissing := service.doc.MinUnits - aliveUnits\n\t\tif missing <= 0 {\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ Add missing units.\n\t\tfor i := 0; i < missing; i++ {\n\t\t\tname, ops, err := ensureMinUnitsOps(service)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = service.st.runTransaction(ops)\n\t\t\t\/\/ Refresh the service in two cases: either the transaction was\n\t\t\t\/\/ aborted (in which case we also restart the whole loop) or the\n\t\t\t\/\/ transaction run correctly but we still need to add more units.\n\t\t\tif err == txn.ErrAborted || (err == nil && i != missing-1) {\n\t\t\t\tif err := service.Refresh(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err == txn.ErrAborted {\n\t\t\t\t\tcontinue loop\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tunit, err := service.Unit(name)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := service.st.AssignUnit(unit, AssignNew); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\treturn nil\n}\n\n\/\/ aliveUnitsCount returns the number a alive units for the service.\nfunc aliveUnitsCount(service *Service) (int, error) {\n\tquery := D{{\"service\", service.doc.Name}, {\"life\", Alive}}\n\taliveUnits, err := service.st.units.Find(query).Count()\n\tif err != nil {\n\t\treturn 0, errors.New(\"cannot get alive units count\")\n\t}\n\treturn aliveUnits, nil\n}\n\n\/\/ ensureMinUnitsOps returns the operations required to add a unit for the\n\/\/ service in MongoDB. The operation is aborted if the service document changes\n\/\/ when running the transaction.\nfunc ensureMinUnitsOps(service *Service) (string, []txn.Op, error) {\n\tname, ops, err := service.addUnitOps(\"\")\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tops = append(ops, txn.Op{\n\t\tC: service.st.services.Name,\n\t\tId: service.doc.Name,\n\t\tAssert: D{{\"txn-revno\", service.doc.TxnRevno}},\n\t})\n\treturn name, ops, nil\n}\n<commit_msg>Fix documentation comments.<commit_after>\/\/ Copyright 2012, 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage state\n\nimport (\n\t\"errors\"\n\t\"labix.org\/v2\/mgo\/txn\"\n\t\"launchpad.net\/juju-core\/utils\"\n)\n\n\/\/ minUnitsDoc keeps track of relevant changes on the service's MinUnits field\n\/\/ and on the number of alive units for the service.\n\/\/ A new document is created when MinUnits is set to a non zero value.\n\/\/ A document is deleted when either the associated service is destroyed\n\/\/ or MinUnits is restored to zero. The Revno is increased when either MinUnits\n\/\/ for a service is increased or a unit is destroyed.\n\/\/ TODO(frankban): the MinUnitsWatcher reacts to changes by sending events,\n\/\/ each one describing one or more services. A worker reacts to those events\n\/\/ ensuring the number of units for the service is never less than the actual\n\/\/ alive units: new units are added if required.\ntype minUnitsDoc struct {\n\t\/\/ ServiceName is safe to be used here in place of its globalKey, since\n\t\/\/ the referred entity type is always the Service.\n\tServiceName string `bson:\"_id\"`\n\tRevno int\n}\n\n\/\/ SetMinUnits changes the number of minimum units required by the service.\nfunc (s *Service) SetMinUnits(minUnits int) (err error) {\n\tdefer utils.ErrorContextf(&err, \"cannot set minimum units for service %q\", s)\n\tdefer func() {\n\t\tif err == nil {\n\t\t\ts.doc.MinUnits = minUnits\n\t\t}\n\t}()\n\tif minUnits < 0 {\n\t\treturn errors.New(\"cannot set a negative minimum number of units\")\n\t}\n\tservice := &Service{st: s.st, doc: s.doc}\n\t\/\/ Removing the document never fails. Racing clients trying to create the\n\t\/\/ document generate one failure, but the second attempt should succeed.\n\t\/\/ If one client tries to update the document, and a racing client removes\n\t\/\/ it, the former should be able to re-create the document in the second\n\t\/\/ attempt. If the referred-to service advanced its life cycle to a not\n\t\/\/ alive state, an error is returned after the first failing attempt.\n\tfor i := 0; i < 2; i++ {\n\t\tif service.doc.Life != Alive {\n\t\t\treturn errors.New(\"service is no longer alive\")\n\t\t}\n\t\tif minUnits == service.doc.MinUnits {\n\t\t\treturn nil\n\t\t}\n\t\tops := setMinUnitsOps(service, minUnits)\n\t\tif err := s.st.runTransaction(ops); err != txn.ErrAborted {\n\t\t\treturn err\n\t\t}\n\t\tif err := service.Refresh(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn ErrExcessiveContention\n}\n\n\/\/ setMinUnitsOps returns the operations required to set MinUnits on the\n\/\/ service and to create\/update\/remove the minUnits document in MongoDB.\nfunc setMinUnitsOps(service *Service, minUnits int) []txn.Op {\n\tstate := service.st\n\tserviceName := service.Name()\n\tops := []txn.Op{{\n\t\tC: state.services.Name,\n\t\tId: serviceName,\n\t\tAssert: isAliveDoc,\n\t\tUpdate: D{{\"$set\", D{{\"minunits\", minUnits}}}},\n\t}}\n\tif service.doc.MinUnits == 0 {\n\t\treturn append(ops, txn.Op{\n\t\t\tC: state.minUnits.Name,\n\t\t\tId: serviceName,\n\t\t\tAssert: txn.DocMissing,\n\t\t\tInsert: &minUnitsDoc{ServiceName: serviceName},\n\t\t})\n\t}\n\tif minUnits == 0 {\n\t\treturn append(ops, minUnitsRemoveOp(state, serviceName))\n\t}\n\tif minUnits > service.doc.MinUnits {\n\t\top := minUnitsTriggerOp(state, serviceName)\n\t\top.Assert = txn.DocExists\n\t\treturn append(ops, op)\n\t}\n\treturn ops\n}\n\n\/\/ minUnitsTriggerOp returns the operation required to increase the minimum\n\/\/ units revno for the service in MongoDB, ignoring the case of document not\n\/\/ existing. This is included in the operations performed when a unit is\n\/\/ destroyed: if the document exists, then we need to update the Revno.\n\/\/ If the service does not require a minimum number of units, then the\n\/\/ operation is a noop.\nfunc minUnitsTriggerOp(st *State, serviceName string) txn.Op {\n\treturn txn.Op{\n\t\tC: st.minUnits.Name,\n\t\tId: serviceName,\n\t\tUpdate: D{{\"$inc\", D{{\"revno\", 1}}}},\n\t}\n}\n\n\/\/ minUnitsRemoveOp returns the operation required to remove the minimum\n\/\/ units document from MongoDB.\nfunc minUnitsRemoveOp(st *State, serviceName string) txn.Op {\n\treturn txn.Op{\n\t\tC: st.minUnits.Name,\n\t\tId: serviceName,\n\t\tRemove: true,\n\t}\n}\n\n\/\/ MinUnits returns the minimum units count for the service.\nfunc (s *Service) MinUnits() int {\n\treturn s.doc.MinUnits\n}\n\n\/\/ EnsureMinUnits adds new units if the service's MinUnits value is greater\n\/\/ than the number of alive units.\nfunc (s *Service) EnsureMinUnits() (err error) {\n\tdefer utils.ErrorContextf(&err,\n\t\t\"cannot ensure minimum units for service %q\", s)\n\tservice := &Service{st: s.st, doc: s.doc}\n\tdefer func() {\n\t\tif err == nil {\n\t\t\ts.doc.MinUnits = service.doc.MinUnits\n\t\t\ts.doc.UnitCount = service.doc.UnitCount\n\t\t}\n\t}()\nloop:\n\tfor {\n\t\t\/\/ Ensure the service is alive.\n\t\tif service.doc.Life != Alive {\n\t\t\treturn errors.New(\"service is no longer alive\")\n\t\t}\n\t\t\/\/ Exit without errors if the MinUnits for the service is not set.\n\t\tif service.doc.MinUnits == 0 {\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ Retrieve the number of alive units for the service.\n\t\taliveUnits, err := aliveUnitsCount(service)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t\/\/ Calculate the number of required units to be added.\n\t\tmissing := service.doc.MinUnits - aliveUnits\n\t\tif missing <= 0 {\n\t\t\treturn nil\n\t\t}\n\t\t\/\/ Add missing units.\n\t\tfor i := 0; i < missing; i++ {\n\t\t\tname, ops, err := ensureMinUnitsOps(service)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\terr = service.st.runTransaction(ops)\n\t\t\t\/\/ Refresh the service in two cases: either the transaction was\n\t\t\t\/\/ aborted (in which case we also restart the whole loop) or the\n\t\t\t\/\/ transaction run correctly but we still need to add more units.\n\t\t\tif err == txn.ErrAborted || (err == nil && i != missing-1) {\n\t\t\t\tif err := service.Refresh(); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err == txn.ErrAborted {\n\t\t\t\t\tcontinue loop\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tunit, err := service.Unit(name)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := service.st.AssignUnit(unit, AssignNew); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\treturn nil\n}\n\n\/\/ aliveUnitsCount returns the number a alive units for the service.\nfunc aliveUnitsCount(service *Service) (int, error) {\n\tquery := D{{\"service\", service.doc.Name}, {\"life\", Alive}}\n\treturn service.st.units.Find(query).Count()\n}\n\n\/\/ ensureMinUnitsOps returns the operations required to add a unit for the\n\/\/ service in MongoDB and the name for the new unit. The resulting transaction\n\/\/ will be aborted if the service document changes when running the operations.\nfunc ensureMinUnitsOps(service *Service) (string, []txn.Op, error) {\n\tname, ops, err := service.addUnitOps(\"\")\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tops = append(ops, txn.Op{\n\t\tC: service.st.services.Name,\n\t\tId: service.doc.Name,\n\t\tAssert: D{{\"txn-revno\", service.doc.TxnRevno}},\n\t})\n\treturn name, ops, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package aws\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\telasticsearch \"github.com\/aws\/aws-sdk-go\/service\/elasticsearchservice\"\n\t\"github.com\/hashicorp\/terraform\/helper\/acctest\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n)\n\nfunc TestAccAWSElasticSearchDomain_basic(t *testing.T) {\n\tvar domain elasticsearch.ElasticsearchDomainStatus\n\tri := acctest.RandInt()\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckESDomainDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccESDomainConfig(ri),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckESDomainExists(\"aws_elasticsearch_domain.example\", &domain),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"aws_elasticsearch_domain.example\", \"elasticsearch_version\", \"1.5\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccAWSElasticSearchDomain_importBasic(t *testing.T) {\n\tresourceName := \"aws_elasticsearch_domain.example\"\n\tri := acctest.RandInt()\n\tresourceId := fmt.Sprintf(\"tf-test-%d\", ri)\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckAWSRedshiftClusterDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccESDomainConfig(ri),\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceName: resourceName,\n\t\t\t\tImportState: true,\n\t\t\t\tImportStateVerify: true,\n\t\t\t\tImportStateId: resourceId,\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccAWSElasticSearchDomain_v23(t *testing.T) {\n\tvar domain elasticsearch.ElasticsearchDomainStatus\n\tri := acctest.RandInt()\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckESDomainDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccESDomainConfigV23(ri),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckESDomainExists(\"aws_elasticsearch_domain.example\", &domain),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"aws_elasticsearch_domain.example\", \"elasticsearch_version\", \"2.3\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccAWSElasticSearchDomain_complex(t *testing.T) {\n\tvar domain elasticsearch.ElasticsearchDomainStatus\n\tri := acctest.RandInt()\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckESDomainDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccESDomainConfig_complex(ri),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckESDomainExists(\"aws_elasticsearch_domain.example\", &domain),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccAWSElasticSearchDomain_tags(t *testing.T) {\n\tvar domain elasticsearch.ElasticsearchDomainStatus\n\tvar td elasticsearch.ListTagsOutput\n\tri := acctest.RandInt()\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckAWSELBDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccESDomainConfig(ri),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckESDomainExists(\"aws_elasticsearch_domain.example\", &domain),\n\t\t\t\t),\n\t\t\t},\n\n\t\t\t{\n\t\t\t\tConfig: testAccESDomainConfig_TagUpdate(ri),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckESDomainExists(\"aws_elasticsearch_domain.example\", &domain),\n\t\t\t\t\ttestAccLoadESTags(&domain, &td),\n\t\t\t\t\ttestAccCheckElasticsearchServiceTags(&td.TagList, \"foo\", \"bar\"),\n\t\t\t\t\ttestAccCheckElasticsearchServiceTags(&td.TagList, \"new\", \"type\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc testAccLoadESTags(conf *elasticsearch.ElasticsearchDomainStatus, td *elasticsearch.ListTagsOutput) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\tconn := testAccProvider.Meta().(*AWSClient).esconn\n\n\t\tdescribe, err := conn.ListTags(&elasticsearch.ListTagsInput{\n\t\t\tARN: conf.ARN,\n\t\t})\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(describe.TagList) > 0 {\n\t\t\t*td = *describe\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckESDomainExists(n string, domain *elasticsearch.ElasticsearchDomainStatus) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No ES Domain ID is set\")\n\t\t}\n\n\t\tconn := testAccProvider.Meta().(*AWSClient).esconn\n\t\topts := &elasticsearch.DescribeElasticsearchDomainInput{\n\t\t\tDomainName: aws.String(rs.Primary.Attributes[\"domain_name\"]),\n\t\t}\n\n\t\tresp, err := conn.DescribeElasticsearchDomain(opts)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error describing domain: %s\", err.Error())\n\t\t}\n\n\t\t*domain = *resp.DomainStatus\n\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckESDomainDestroy(s *terraform.State) error {\n\tfor _, rs := range s.RootModule().Resources {\n\t\tif rs.Type != \"aws_elasticsearch_domain\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tconn := testAccProvider.Meta().(*AWSClient).esconn\n\t\topts := &elasticsearch.DescribeElasticsearchDomainInput{\n\t\t\tDomainName: aws.String(rs.Primary.Attributes[\"domain_name\"]),\n\t\t}\n\n\t\t_, err := conn.DescribeElasticsearchDomain(opts)\n\t\t\/\/ Verify the error is what we want\n\t\tif err != nil {\n\t\t\tif awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == \"ResourceNotFoundException\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc testAccESDomainConfig(randInt int) string {\n\treturn fmt.Sprintf(`\nresource \"aws_elasticsearch_domain\" \"example\" {\n domain_name = \"tf-test-%d\"\n ebs_options {\n ebs_enabled = true\n volume_size = 10\n }\n}\n`, randInt)\n}\n\nfunc testAccESDomainConfig_TagUpdate(randInt int) string {\n\treturn fmt.Sprintf(`\nresource \"aws_elasticsearch_domain\" \"example\" {\n domain_name = \"tf-test-%d\"\n ebs_options {\n ebs_enabled = true\n volume_size = 10\n }\n\n tags {\n foo = \"bar\"\n new = \"type\"\n }\n}\n`, randInt)\n}\n\nfunc testAccESDomainConfig_complex(randInt int) string {\n\treturn fmt.Sprintf(`\nresource \"aws_elasticsearch_domain\" \"example\" {\n domain_name = \"tf-test-%d\"\n\n cluster_config {\n instance_type = \"r3.large.elasticsearch\"\n }\n\n advanced_options {\n \"indices.fielddata.cache.size\" = 80\n }\n\n ebs_options {\n ebs_enabled = false\n }\n\n cluster_config {\n instance_count = 2\n zone_awareness_enabled = true\n }\n\n snapshot_options {\n automated_snapshot_start_hour = 23\n }\n\n tags {\n bar = \"complex\"\n }\n}\n`, randInt)\n}\n\nfunc testAccESDomainConfigV23(randInt int) string {\n\treturn fmt.Sprintf(`\nresource \"aws_elasticsearch_domain\" \"example\" {\n domain_name = \"tf-test-%d\"\n ebs_options {\n ebs_enabled = true\n volume_size = 10\n }\n elasticsearch_version = \"2.3\"\n}\n`, randInt)\n}\n<commit_msg>provider\/aws: Fix wrong config in ES domain acceptance test (#13362)<commit_after>package aws\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/awserr\"\n\telasticsearch \"github.com\/aws\/aws-sdk-go\/service\/elasticsearchservice\"\n\t\"github.com\/hashicorp\/terraform\/helper\/acctest\"\n\t\"github.com\/hashicorp\/terraform\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n)\n\nfunc TestAccAWSElasticSearchDomain_basic(t *testing.T) {\n\tvar domain elasticsearch.ElasticsearchDomainStatus\n\tri := acctest.RandInt()\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckESDomainDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccESDomainConfig(ri),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckESDomainExists(\"aws_elasticsearch_domain.example\", &domain),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"aws_elasticsearch_domain.example\", \"elasticsearch_version\", \"1.5\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccAWSElasticSearchDomain_importBasic(t *testing.T) {\n\tresourceName := \"aws_elasticsearch_domain.example\"\n\tri := acctest.RandInt()\n\tresourceId := fmt.Sprintf(\"tf-test-%d\", ri)\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckAWSRedshiftClusterDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccESDomainConfig(ri),\n\t\t\t},\n\t\t\t{\n\t\t\t\tResourceName: resourceName,\n\t\t\t\tImportState: true,\n\t\t\t\tImportStateVerify: true,\n\t\t\t\tImportStateId: resourceId,\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccAWSElasticSearchDomain_v23(t *testing.T) {\n\tvar domain elasticsearch.ElasticsearchDomainStatus\n\tri := acctest.RandInt()\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckESDomainDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccESDomainConfigV23(ri),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckESDomainExists(\"aws_elasticsearch_domain.example\", &domain),\n\t\t\t\t\tresource.TestCheckResourceAttr(\n\t\t\t\t\t\t\"aws_elasticsearch_domain.example\", \"elasticsearch_version\", \"2.3\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccAWSElasticSearchDomain_complex(t *testing.T) {\n\tvar domain elasticsearch.ElasticsearchDomainStatus\n\tri := acctest.RandInt()\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckESDomainDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccESDomainConfig_complex(ri),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckESDomainExists(\"aws_elasticsearch_domain.example\", &domain),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc TestAccAWSElasticSearchDomain_tags(t *testing.T) {\n\tvar domain elasticsearch.ElasticsearchDomainStatus\n\tvar td elasticsearch.ListTagsOutput\n\tri := acctest.RandInt()\n\n\tresource.Test(t, resource.TestCase{\n\t\tPreCheck: func() { testAccPreCheck(t) },\n\t\tProviders: testAccProviders,\n\t\tCheckDestroy: testAccCheckAWSELBDestroy,\n\t\tSteps: []resource.TestStep{\n\t\t\t{\n\t\t\t\tConfig: testAccESDomainConfig(ri),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckESDomainExists(\"aws_elasticsearch_domain.example\", &domain),\n\t\t\t\t),\n\t\t\t},\n\n\t\t\t{\n\t\t\t\tConfig: testAccESDomainConfig_TagUpdate(ri),\n\t\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\t\ttestAccCheckESDomainExists(\"aws_elasticsearch_domain.example\", &domain),\n\t\t\t\t\ttestAccLoadESTags(&domain, &td),\n\t\t\t\t\ttestAccCheckElasticsearchServiceTags(&td.TagList, \"foo\", \"bar\"),\n\t\t\t\t\ttestAccCheckElasticsearchServiceTags(&td.TagList, \"new\", \"type\"),\n\t\t\t\t),\n\t\t\t},\n\t\t},\n\t})\n}\n\nfunc testAccLoadESTags(conf *elasticsearch.ElasticsearchDomainStatus, td *elasticsearch.ListTagsOutput) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\tconn := testAccProvider.Meta().(*AWSClient).esconn\n\n\t\tdescribe, err := conn.ListTags(&elasticsearch.ListTagsInput{\n\t\t\tARN: conf.ARN,\n\t\t})\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(describe.TagList) > 0 {\n\t\t\t*td = *describe\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckESDomainExists(n string, domain *elasticsearch.ElasticsearchDomainStatus) resource.TestCheckFunc {\n\treturn func(s *terraform.State) error {\n\t\trs, ok := s.RootModule().Resources[n]\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"Not found: %s\", n)\n\t\t}\n\n\t\tif rs.Primary.ID == \"\" {\n\t\t\treturn fmt.Errorf(\"No ES Domain ID is set\")\n\t\t}\n\n\t\tconn := testAccProvider.Meta().(*AWSClient).esconn\n\t\topts := &elasticsearch.DescribeElasticsearchDomainInput{\n\t\t\tDomainName: aws.String(rs.Primary.Attributes[\"domain_name\"]),\n\t\t}\n\n\t\tresp, err := conn.DescribeElasticsearchDomain(opts)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Error describing domain: %s\", err.Error())\n\t\t}\n\n\t\t*domain = *resp.DomainStatus\n\n\t\treturn nil\n\t}\n}\n\nfunc testAccCheckESDomainDestroy(s *terraform.State) error {\n\tfor _, rs := range s.RootModule().Resources {\n\t\tif rs.Type != \"aws_elasticsearch_domain\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tconn := testAccProvider.Meta().(*AWSClient).esconn\n\t\topts := &elasticsearch.DescribeElasticsearchDomainInput{\n\t\t\tDomainName: aws.String(rs.Primary.Attributes[\"domain_name\"]),\n\t\t}\n\n\t\t_, err := conn.DescribeElasticsearchDomain(opts)\n\t\t\/\/ Verify the error is what we want\n\t\tif err != nil {\n\t\t\tif awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == \"ResourceNotFoundException\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc testAccESDomainConfig(randInt int) string {\n\treturn fmt.Sprintf(`\nresource \"aws_elasticsearch_domain\" \"example\" {\n domain_name = \"tf-test-%d\"\n ebs_options {\n ebs_enabled = true\n volume_size = 10\n }\n}\n`, randInt)\n}\n\nfunc testAccESDomainConfig_TagUpdate(randInt int) string {\n\treturn fmt.Sprintf(`\nresource \"aws_elasticsearch_domain\" \"example\" {\n domain_name = \"tf-test-%d\"\n ebs_options {\n ebs_enabled = true\n volume_size = 10\n }\n\n tags {\n foo = \"bar\"\n new = \"type\"\n }\n}\n`, randInt)\n}\n\nfunc testAccESDomainConfig_complex(randInt int) string {\n\treturn fmt.Sprintf(`\nresource \"aws_elasticsearch_domain\" \"example\" {\n domain_name = \"tf-test-%d\"\n\n advanced_options {\n \"indices.fielddata.cache.size\" = 80\n }\n\n ebs_options {\n ebs_enabled = false\n }\n\n cluster_config {\n instance_count = 2\n zone_awareness_enabled = true\n instance_type = \"r3.large.elasticsearch\"\n }\n\n snapshot_options {\n automated_snapshot_start_hour = 23\n }\n\n tags {\n bar = \"complex\"\n }\n}\n`, randInt)\n}\n\nfunc testAccESDomainConfigV23(randInt int) string {\n\treturn fmt.Sprintf(`\nresource \"aws_elasticsearch_domain\" \"example\" {\n domain_name = \"tf-test-%d\"\n ebs_options {\n ebs_enabled = true\n volume_size = 10\n }\n elasticsearch_version = \"2.3\"\n}\n`, randInt)\n}\n<|endoftext|>"} {"text":"<commit_before>package stmtcache_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/jackc\/pgconn\"\n\t\"github.com\/jackc\/pgconn\/stmtcache\"\n\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestLRUModePrepare(t *testing.T) {\n\tt.Parallel()\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*5)\n\tdefer cancel()\n\n\tconn, err := pgconn.Connect(ctx, os.Getenv(\"PGX_TEST_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer conn.Close(ctx)\n\n\tcache := stmtcache.NewLRU(conn, stmtcache.ModePrepare, 2)\n\trequire.EqualValues(t, 0, cache.Len())\n\trequire.EqualValues(t, 2, cache.Cap())\n\trequire.EqualValues(t, stmtcache.ModePrepare, cache.Mode())\n\n\tpsd, err := cache.Get(ctx, \"select 1\")\n\trequire.NoError(t, err)\n\trequire.NotNil(t, psd)\n\trequire.EqualValues(t, 1, cache.Len())\n\trequire.ElementsMatch(t, []string{\"select 1\"}, fetchServerStatements(t, ctx, conn))\n\n\tpsd, err = cache.Get(ctx, \"select 1\")\n\trequire.NoError(t, err)\n\trequire.NotNil(t, psd)\n\trequire.EqualValues(t, 1, cache.Len())\n\trequire.ElementsMatch(t, []string{\"select 1\"}, fetchServerStatements(t, ctx, conn))\n\n\tpsd, err = cache.Get(ctx, \"select 2\")\n\trequire.NoError(t, err)\n\trequire.NotNil(t, psd)\n\trequire.EqualValues(t, 2, cache.Len())\n\trequire.ElementsMatch(t, []string{\"select 1\", \"select 2\"}, fetchServerStatements(t, ctx, conn))\n\n\tpsd, err = cache.Get(ctx, \"select 3\")\n\trequire.NoError(t, err)\n\trequire.NotNil(t, psd)\n\trequire.EqualValues(t, 2, cache.Len())\n\trequire.ElementsMatch(t, []string{\"select 2\", \"select 3\"}, fetchServerStatements(t, ctx, conn))\n\n\terr = cache.Clear(ctx)\n\trequire.NoError(t, err)\n\trequire.EqualValues(t, 0, cache.Len())\n\trequire.Empty(t, fetchServerStatements(t, ctx, conn))\n}\n\nfunc TestLRUStmtInvalidation(t *testing.T) {\n\tt.Parallel()\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*5)\n\tdefer cancel()\n\n\tconn, err := pgconn.Connect(ctx, os.Getenv(\"PGX_TEST_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer conn.Close(ctx)\n\n\t\/\/ we construct a fake error because its not super straightforward to actually call\n\t\/\/ a prepared statement from the LRU cache without the helper routines which live\n\t\/\/ in pgx proper.\n\tfakeInvalidCachePlanError := &pgconn.PgError{\n\t\tSeverity: \"ERROR\",\n\t\tCode: \"0A000\",\n\t\tMessage: \"cached plan must not change result type\",\n\t}\n\n\tcache := stmtcache.NewLRU(conn, stmtcache.ModePrepare, 2)\n\n\t\/\/\n\t\/\/ outside of a transaction, we eagerly flush the statement\n\t\/\/\n\n\t_, err = cache.Get(ctx, \"select 1\")\n\trequire.NoError(t, err)\n\trequire.EqualValues(t, 1, cache.Len())\n\trequire.ElementsMatch(t, []string{\"select 1\"}, fetchServerStatements(t, ctx, conn))\n\n\terr = cache.StatementErrored(ctx, \"select 1\", fakeInvalidCachePlanError)\n\trequire.NoError(t, err)\n\t_, err = cache.Get(ctx, \"select 2\")\n\trequire.NoError(t, err)\n\trequire.EqualValues(t, 1, cache.Len())\n\trequire.ElementsMatch(t, []string{\"select 2\"}, fetchServerStatements(t, ctx, conn))\n\n\terr = cache.Clear(ctx)\n\trequire.NoError(t, err)\n\n\t\/\/\n\t\/\/ within an errored transaction, we defer the flush to after the first get\n\t\/\/ that happens after the transaction is rolled back\n\t\/\/\n\n\t_, err = cache.Get(ctx, \"select 1\")\n\trequire.NoError(t, err)\n\trequire.EqualValues(t, 1, cache.Len())\n\trequire.ElementsMatch(t, []string{\"select 1\"}, fetchServerStatements(t, ctx, conn))\n\n\tres := conn.Exec(ctx, \"begin\")\n\trequire.NoError(t, res.Close())\n\trequire.Equal(t, byte('T'), conn.TxStatus())\n\n\tres = conn.Exec(ctx, \"selec\")\n\trequire.Error(t, res.Close())\n\trequire.Equal(t, byte('E'), conn.TxStatus())\n\n\terr = cache.StatementErrored(ctx, \"select 1\", fakeInvalidCachePlanError)\n\trequire.EqualValues(t, 1, cache.Len())\n\n\tres = conn.Exec(ctx, \"rollback\")\n\trequire.NoError(t, res.Close())\n\n\t_, err = cache.Get(ctx, \"select 2\")\n\trequire.EqualValues(t, 1, cache.Len())\n\trequire.ElementsMatch(t, []string{\"select 2\"}, fetchServerStatements(t, ctx, conn))\n}\n\nfunc TestLRUModePrepareStress(t *testing.T) {\n\tt.Parallel()\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*10)\n\tdefer cancel()\n\n\tconn, err := pgconn.Connect(ctx, os.Getenv(\"PGX_TEST_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer conn.Close(ctx)\n\n\tcache := stmtcache.NewLRU(conn, stmtcache.ModePrepare, 8)\n\trequire.EqualValues(t, 0, cache.Len())\n\trequire.EqualValues(t, 8, cache.Cap())\n\trequire.EqualValues(t, stmtcache.ModePrepare, cache.Mode())\n\n\tfor i := 0; i < 1000; i++ {\n\t\tpsd, err := cache.Get(ctx, fmt.Sprintf(\"select %d\", rand.Intn(50)))\n\t\trequire.NoError(t, err)\n\t\trequire.NotNil(t, psd)\n\t\tresult := conn.ExecPrepared(ctx, psd.Name, nil, nil, nil).Read()\n\t\trequire.NoError(t, result.Err)\n\t}\n}\n\nfunc TestLRUModeDescribe(t *testing.T) {\n\tt.Parallel()\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*5)\n\tdefer cancel()\n\n\tconn, err := pgconn.Connect(ctx, os.Getenv(\"PGX_TEST_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer conn.Close(ctx)\n\n\tcache := stmtcache.NewLRU(conn, stmtcache.ModeDescribe, 2)\n\trequire.EqualValues(t, 0, cache.Len())\n\trequire.EqualValues(t, 2, cache.Cap())\n\trequire.EqualValues(t, stmtcache.ModeDescribe, cache.Mode())\n\n\tpsd, err := cache.Get(ctx, \"select 1\")\n\trequire.NoError(t, err)\n\trequire.NotNil(t, psd)\n\trequire.EqualValues(t, 1, cache.Len())\n\trequire.Empty(t, fetchServerStatements(t, ctx, conn))\n\n\tpsd, err = cache.Get(ctx, \"select 1\")\n\trequire.NoError(t, err)\n\trequire.NotNil(t, psd)\n\trequire.EqualValues(t, 1, cache.Len())\n\trequire.Empty(t, fetchServerStatements(t, ctx, conn))\n\n\tpsd, err = cache.Get(ctx, \"select 2\")\n\trequire.NoError(t, err)\n\trequire.NotNil(t, psd)\n\trequire.EqualValues(t, 2, cache.Len())\n\trequire.Empty(t, fetchServerStatements(t, ctx, conn))\n\n\tpsd, err = cache.Get(ctx, \"select 3\")\n\trequire.NoError(t, err)\n\trequire.NotNil(t, psd)\n\trequire.EqualValues(t, 2, cache.Len())\n\trequire.Empty(t, fetchServerStatements(t, ctx, conn))\n\n\terr = cache.Clear(ctx)\n\trequire.NoError(t, err)\n\trequire.EqualValues(t, 0, cache.Len())\n\trequire.Empty(t, fetchServerStatements(t, ctx, conn))\n}\n\nfunc fetchServerStatements(t testing.TB, ctx context.Context, conn *pgconn.PgConn) []string {\n\tresult := conn.ExecParams(ctx, `select statement from pg_prepared_statements`, nil, nil, nil, nil).Read()\n\trequire.NoError(t, result.Err)\n\tvar statements []string\n\tfor _, r := range result.Rows {\n\t\tstatements = append(statements, string(r[0]))\n\t}\n\treturn statements\n}\n<commit_msg>Add stmtcache.LRU test thjat integrates over the database<commit_after>package stmtcache_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/jackc\/pgconn\"\n\t\"github.com\/jackc\/pgconn\/stmtcache\"\n\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestLRUModePrepare(t *testing.T) {\n\tt.Parallel()\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*5)\n\tdefer cancel()\n\n\tconn, err := pgconn.Connect(ctx, os.Getenv(\"PGX_TEST_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer conn.Close(ctx)\n\n\tcache := stmtcache.NewLRU(conn, stmtcache.ModePrepare, 2)\n\trequire.EqualValues(t, 0, cache.Len())\n\trequire.EqualValues(t, 2, cache.Cap())\n\trequire.EqualValues(t, stmtcache.ModePrepare, cache.Mode())\n\n\tpsd, err := cache.Get(ctx, \"select 1\")\n\trequire.NoError(t, err)\n\trequire.NotNil(t, psd)\n\trequire.EqualValues(t, 1, cache.Len())\n\trequire.ElementsMatch(t, []string{\"select 1\"}, fetchServerStatements(t, ctx, conn))\n\n\tpsd, err = cache.Get(ctx, \"select 1\")\n\trequire.NoError(t, err)\n\trequire.NotNil(t, psd)\n\trequire.EqualValues(t, 1, cache.Len())\n\trequire.ElementsMatch(t, []string{\"select 1\"}, fetchServerStatements(t, ctx, conn))\n\n\tpsd, err = cache.Get(ctx, \"select 2\")\n\trequire.NoError(t, err)\n\trequire.NotNil(t, psd)\n\trequire.EqualValues(t, 2, cache.Len())\n\trequire.ElementsMatch(t, []string{\"select 1\", \"select 2\"}, fetchServerStatements(t, ctx, conn))\n\n\tpsd, err = cache.Get(ctx, \"select 3\")\n\trequire.NoError(t, err)\n\trequire.NotNil(t, psd)\n\trequire.EqualValues(t, 2, cache.Len())\n\trequire.ElementsMatch(t, []string{\"select 2\", \"select 3\"}, fetchServerStatements(t, ctx, conn))\n\n\terr = cache.Clear(ctx)\n\trequire.NoError(t, err)\n\trequire.EqualValues(t, 0, cache.Len())\n\trequire.Empty(t, fetchServerStatements(t, ctx, conn))\n}\n\nfunc TestLRUStmtInvalidation(t *testing.T) {\n\tt.Parallel()\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*5)\n\tdefer cancel()\n\n\tconn, err := pgconn.Connect(ctx, os.Getenv(\"PGX_TEST_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer conn.Close(ctx)\n\n\t\/\/ we construct a fake error because its not super straightforward to actually call\n\t\/\/ a prepared statement from the LRU cache without the helper routines which live\n\t\/\/ in pgx proper.\n\tfakeInvalidCachePlanError := &pgconn.PgError{\n\t\tSeverity: \"ERROR\",\n\t\tCode: \"0A000\",\n\t\tMessage: \"cached plan must not change result type\",\n\t}\n\n\tcache := stmtcache.NewLRU(conn, stmtcache.ModePrepare, 2)\n\n\t\/\/\n\t\/\/ outside of a transaction, we eagerly flush the statement\n\t\/\/\n\n\t_, err = cache.Get(ctx, \"select 1\")\n\trequire.NoError(t, err)\n\trequire.EqualValues(t, 1, cache.Len())\n\trequire.ElementsMatch(t, []string{\"select 1\"}, fetchServerStatements(t, ctx, conn))\n\n\terr = cache.StatementErrored(ctx, \"select 1\", fakeInvalidCachePlanError)\n\trequire.NoError(t, err)\n\t_, err = cache.Get(ctx, \"select 2\")\n\trequire.NoError(t, err)\n\trequire.EqualValues(t, 1, cache.Len())\n\trequire.ElementsMatch(t, []string{\"select 2\"}, fetchServerStatements(t, ctx, conn))\n\n\terr = cache.Clear(ctx)\n\trequire.NoError(t, err)\n\n\t\/\/\n\t\/\/ within an errored transaction, we defer the flush to after the first get\n\t\/\/ that happens after the transaction is rolled back\n\t\/\/\n\n\t_, err = cache.Get(ctx, \"select 1\")\n\trequire.NoError(t, err)\n\trequire.EqualValues(t, 1, cache.Len())\n\trequire.ElementsMatch(t, []string{\"select 1\"}, fetchServerStatements(t, ctx, conn))\n\n\tres := conn.Exec(ctx, \"begin\")\n\trequire.NoError(t, res.Close())\n\trequire.Equal(t, byte('T'), conn.TxStatus())\n\n\tres = conn.Exec(ctx, \"selec\")\n\trequire.Error(t, res.Close())\n\trequire.Equal(t, byte('E'), conn.TxStatus())\n\n\terr = cache.StatementErrored(ctx, \"select 1\", fakeInvalidCachePlanError)\n\trequire.EqualValues(t, 1, cache.Len())\n\n\tres = conn.Exec(ctx, \"rollback\")\n\trequire.NoError(t, res.Close())\n\n\t_, err = cache.Get(ctx, \"select 2\")\n\trequire.EqualValues(t, 1, cache.Len())\n\trequire.ElementsMatch(t, []string{\"select 2\"}, fetchServerStatements(t, ctx, conn))\n}\n\nfunc TestLRUStmtInvalidationIntegration(t *testing.T) {\n\tt.Parallel()\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*5)\n\tdefer cancel()\n\n\tconn, err := pgconn.Connect(ctx, os.Getenv(\"PGX_TEST_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer conn.Close(ctx)\n\n\tcache := stmtcache.NewLRU(conn, stmtcache.ModePrepare, 2)\n\n\tresult := conn.ExecParams(ctx, \"create temporary table stmtcache_table (a text)\", nil, nil, nil, nil).Read()\n\trequire.NoError(t, result.Err)\n\n\tsql := \"select * from stmtcache_table\"\n\tsd1, err := cache.Get(ctx, sql)\n\trequire.NoError(t, err)\n\n\tresult = conn.ExecPrepared(ctx, sd1.Name, nil, nil, nil).Read()\n\trequire.NoError(t, result.Err)\n\n\tresult = conn.ExecParams(ctx, \"alter table stmtcache_table add column b text\", nil, nil, nil, nil).Read()\n\trequire.NoError(t, result.Err)\n\n\tresult = conn.ExecPrepared(ctx, sd1.Name, nil, nil, nil).Read()\n\trequire.EqualError(t, result.Err, \"ERROR: cached plan must not change result type (SQLSTATE 0A000)\")\n\n\tcache.StatementErrored(ctx, sql, result.Err)\n\n\tsd2, err := cache.Get(ctx, sql)\n\trequire.NoError(t, err)\n\trequire.NotEqual(t, sd1.Name, sd2.Name)\n\n\tresult = conn.ExecPrepared(ctx, sd2.Name, nil, nil, nil).Read()\n\trequire.NoError(t, result.Err)\n}\n\nfunc TestLRUModePrepareStress(t *testing.T) {\n\tt.Parallel()\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*10)\n\tdefer cancel()\n\n\tconn, err := pgconn.Connect(ctx, os.Getenv(\"PGX_TEST_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer conn.Close(ctx)\n\n\tcache := stmtcache.NewLRU(conn, stmtcache.ModePrepare, 8)\n\trequire.EqualValues(t, 0, cache.Len())\n\trequire.EqualValues(t, 8, cache.Cap())\n\trequire.EqualValues(t, stmtcache.ModePrepare, cache.Mode())\n\n\tfor i := 0; i < 1000; i++ {\n\t\tpsd, err := cache.Get(ctx, fmt.Sprintf(\"select %d\", rand.Intn(50)))\n\t\trequire.NoError(t, err)\n\t\trequire.NotNil(t, psd)\n\t\tresult := conn.ExecPrepared(ctx, psd.Name, nil, nil, nil).Read()\n\t\trequire.NoError(t, result.Err)\n\t}\n}\n\nfunc TestLRUModeDescribe(t *testing.T) {\n\tt.Parallel()\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second*5)\n\tdefer cancel()\n\n\tconn, err := pgconn.Connect(ctx, os.Getenv(\"PGX_TEST_CONN_STRING\"))\n\trequire.NoError(t, err)\n\tdefer conn.Close(ctx)\n\n\tcache := stmtcache.NewLRU(conn, stmtcache.ModeDescribe, 2)\n\trequire.EqualValues(t, 0, cache.Len())\n\trequire.EqualValues(t, 2, cache.Cap())\n\trequire.EqualValues(t, stmtcache.ModeDescribe, cache.Mode())\n\n\tpsd, err := cache.Get(ctx, \"select 1\")\n\trequire.NoError(t, err)\n\trequire.NotNil(t, psd)\n\trequire.EqualValues(t, 1, cache.Len())\n\trequire.Empty(t, fetchServerStatements(t, ctx, conn))\n\n\tpsd, err = cache.Get(ctx, \"select 1\")\n\trequire.NoError(t, err)\n\trequire.NotNil(t, psd)\n\trequire.EqualValues(t, 1, cache.Len())\n\trequire.Empty(t, fetchServerStatements(t, ctx, conn))\n\n\tpsd, err = cache.Get(ctx, \"select 2\")\n\trequire.NoError(t, err)\n\trequire.NotNil(t, psd)\n\trequire.EqualValues(t, 2, cache.Len())\n\trequire.Empty(t, fetchServerStatements(t, ctx, conn))\n\n\tpsd, err = cache.Get(ctx, \"select 3\")\n\trequire.NoError(t, err)\n\trequire.NotNil(t, psd)\n\trequire.EqualValues(t, 2, cache.Len())\n\trequire.Empty(t, fetchServerStatements(t, ctx, conn))\n\n\terr = cache.Clear(ctx)\n\trequire.NoError(t, err)\n\trequire.EqualValues(t, 0, cache.Len())\n\trequire.Empty(t, fetchServerStatements(t, ctx, conn))\n}\n\nfunc fetchServerStatements(t testing.TB, ctx context.Context, conn *pgconn.PgConn) []string {\n\tresult := conn.ExecParams(ctx, `select statement from pg_prepared_statements`, nil, nil, nil, nil).Read()\n\trequire.NoError(t, result.Err)\n\tvar statements []string\n\tfor _, r := range result.Rows {\n\t\tstatements = append(statements, string(r[0]))\n\t}\n\treturn statements\n}\n<|endoftext|>"} {"text":"<commit_before>package vault\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\n\t\"bytes\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\/stscreds\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/ieee0824\/getenv\"\n\t\"github.com\/jobtalk\/pnzr\/lib\"\n\t\"os\"\n\t\"os\/exec\"\n)\n\nfunc getEditor() string {\n\tif e := os.Getenv(\"PNZR_EDITOR\"); e != \"\" {\n\t\treturn e\n\t}\n\n\tif e := os.Getenv(\"EDITOR\"); e != \"\" {\n\t\treturn e\n\t}\n\n\treturn \"nano\"\n}\n\ntype mode struct {\n\tencrypt *bool\n\tdecrypt *bool\n\tedit *bool\n\tview *bool\n}\n\nfunc (m *mode) checkMultiFlagSet() bool {\n\treturn (*m.encrypt && *m.decrypt) ||\n\t\t(*m.encrypt && *m.edit) ||\n\t\t(*m.encrypt && *m.view) ||\n\t\t(*m.decrypt && *m.edit) ||\n\t\t(*m.decrypt && *m.view) ||\n\t\t(*m.edit && *m.view)\n}\n\ntype VaultCommand struct {\n\tsess *session.Session\n\tkmsKeyID *string\n\tvaultMode *mode\n\tfile *string\n\tprofile *string\n\tregion *string\n\tawsAccessKeyID *string\n\tawsSecretKeyID *string\n}\n\nfunc (v *VaultCommand) parseArgs(args []string) {\n\tvar (\n\t\tflagSet = new(flag.FlagSet)\n\t\tf *string\n\t)\n\tv.vaultMode = new(mode)\n\n\tv.kmsKeyID = flagSet.String(\"key_id\", getenv.String(\"KMS_KEY_ID\"), \"Amazon KMS key ID\")\n\tv.vaultMode.encrypt = flagSet.Bool(\"encrypt\", getenv.Bool(\"ENCRYPT\", false), \"encrypt mode\")\n\tv.vaultMode.decrypt = flagSet.Bool(\"decrypt\", getenv.Bool(\"DECRYPT\", false), \"decrypt mode\")\n\tv.vaultMode.view = flagSet.Bool(\"view\", false, \"view mode\")\n\tv.vaultMode.edit = flagSet.Bool(\"edit\", false, \"edit mode\")\n\tv.profile = flagSet.String(\"profile\", getenv.String(\"AWS_PROFILE_NAME\", \"default\"), \"aws credentials profile name\")\n\tv.region = flagSet.String(\"region\", getenv.String(\"AWS_REGION\", \"ap-northeast-1\"), \"aws region\")\n\tv.awsAccessKeyID = flagSet.String(\"aws-access-key-id\", getenv.String(\"AWS_ACCESS_KEY_ID\"), \"aws access key id\")\n\tv.awsSecretKeyID = flagSet.String(\"aws-secret-key-id\", getenv.String(\"AWS_SECRET_KEY_ID\"), \"aws secret key id\")\n\tv.file = flagSet.String(\"file\", \"\", \"target file\")\n\tf = flagSet.String(\"f\", \"\", \"target file\")\n\n\tif err := flagSet.Parse(args); err != nil {\n\t\tpanic(err)\n\t}\n\n\tif *f == \"\" && *v.file == \"\" && len(flagSet.Args()) != 0 {\n\t\ttargetName := flagSet.Args()[0]\n\t\tv.file = &targetName\n\t}\n\n\tif *v.file == \"\" {\n\t\tv.file = f\n\t}\n\n\tvar awsConfig = aws.Config{}\n\n\tif *v.awsAccessKeyID != \"\" && *v.awsSecretKeyID != \"\" && *v.profile == \"\" {\n\t\tawsConfig.Credentials = credentials.NewStaticCredentials(*v.awsAccessKeyID, *v.awsSecretKeyID, \"\")\n\t\tawsConfig.Region = v.region\n\t}\n\n\tv.sess = session.Must(session.NewSessionWithOptions(session.Options{\n\t\tAssumeRoleTokenProvider: stscreds.StdinTokenProvider,\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t\tProfile: *v.profile,\n\t\tConfig: awsConfig,\n\t}))\n}\n\nfunc (v *VaultCommand) encrypt(keyID string, fileName string) error {\n\tbin, err := ioutil.ReadFile(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tkms := lib.NewKMS(v.sess)\n\t_, err = kms.SetKeyID(keyID).Encrypt(bin)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ioutil.WriteFile(fileName, []byte(kms.String()), 0644)\n}\n\nfunc (v *VaultCommand) decrypt(keyID string, fileName string) error {\n\tbin, err := ioutil.ReadFile(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tkms := lib.NewKMSFromBinary(bin, v.sess)\n\tif kms == nil {\n\t\treturn errors.New(fmt.Sprintf(\"%v form is illegal\", fileName))\n\t}\n\tplainText, err := kms.SetKeyID(keyID).Decrypt()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(fileName, plainText, 0644)\n}\n\nfunc (v *VaultCommand) decryptTemporary(keyID string, fileName string) ([]byte, error) {\n\tbin, err := ioutil.ReadFile(fileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkms := lib.NewKMSFromBinary(bin, v.sess)\n\tif kms == nil {\n\t\treturn nil, errors.New(fmt.Sprintf(\"%v form is illegal\", fileName))\n\t}\n\tplainText, err := kms.SetKeyID(keyID).Decrypt()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn plainText, nil\n}\n\nfunc (c *VaultCommand) Help() string {\n\tvar msg string\n\tmsg += \"usage: pnzr vault [options ...]\\n\"\n\tmsg += \"options:\\n\"\n\tmsg += \" -key_id\\n\"\n\tmsg += \" set kms key id\\n\"\n\tmsg += \" -encrypt\\n\"\n\tmsg += \" use encrypt mode\\n\"\n\tmsg += \" -decrypt\\n\"\n\tmsg += \" use decrypt mode\\n\"\n\tmsg += \" -file\\n\"\n\tmsg += \" setting target file\\n\"\n\tmsg += \" -f\"\n\tmsg += \" setting target file\\n\"\n\tmsg += \" -profile\\n\"\n\tmsg += \" aws credential name\\n\"\n\tmsg += \" -region\\n\"\n\tmsg += \" aws region name\\n\"\n\tmsg += \" -aws-access-key-id\\n\"\n\tmsg += \" setting aws access key id\\n\"\n\tmsg += \" -aws-secret-key-id\\n\"\n\tmsg += \" setting aws secret key id\\n\"\n\tmsg += \"===================================================\\n\"\n\treturn msg\n}\n\nfunc (v *VaultCommand) Run(args []string) int {\n\tv.parseArgs(args)\n\n\tif v.vaultMode.checkMultiFlagSet() {\n\t\tpanic(\"Multiple vault options are selected.\")\n\t} else if *v.vaultMode.encrypt {\n\t\tif err := v.encrypt(*v.kmsKeyID, *v.file); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else if *v.vaultMode.decrypt {\n\t\tif err := v.decrypt(*v.kmsKeyID, *v.file); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else if *v.vaultMode.edit {\n\t\tif err := v.decrypt(*v.kmsKeyID, *v.file); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tdefer func() {\n\t\t\tif err := v.encrypt(*v.kmsKeyID, *v.file); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}()\n\t\tcmd := exec.Command(getEditor(), *v.file)\n\n\t\tcmd.Stdin = os.Stdin\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\n\t\tif err := cmd.Run(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else if *v.vaultMode.view {\n\t\tplain, err := v.decryptTemporary(*v.kmsKeyID, *v.file)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tcmd := exec.Command(\"less\")\n\t\tcmd.Stdin = bytes.NewReader(plain)\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\n\t\tif err := cmd.Run(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else {\n\t\tpanic(\"Vault mode is not selected.\")\n\t}\n\n\treturn 0\n}\n\nfunc (c *VaultCommand) Synopsis() string {\n\treturn c.Help()\n}\n<commit_msg>vault modeが増えたときにcheckMultiflagSetが動的に処理できるように変更<commit_after>package vault\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\n\t\"bytes\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/credentials\/stscreds\"\n\t\"github.com\/aws\/aws-sdk-go\/aws\/session\"\n\t\"github.com\/ieee0824\/getenv\"\n\t\"github.com\/jobtalk\/pnzr\/lib\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"reflect\"\n\t\"unsafe\"\n)\n\nfunc getEditor() string {\n\tif e := os.Getenv(\"PNZR_EDITOR\"); e != \"\" {\n\t\treturn e\n\t}\n\n\tif e := os.Getenv(\"EDITOR\"); e != \"\" {\n\t\treturn e\n\t}\n\n\treturn \"nano\"\n}\n\ntype mode struct {\n\tencrypt *bool\n\tdecrypt *bool\n\tedit *bool\n\tview *bool\n}\n\nfunc (m *mode) checkMultiFlagSet() bool {\n\tvar cnt int\n\tt := reflect.TypeOf(*m)\n\tv := reflect.ValueOf(*m)\n\n\tfor i := 0; i < t.NumField(); i ++ {\n\t\tfieldName := t.Field(i).Name\n\t\tb := (*bool)(unsafe.Pointer(reflect.Indirect(v).FieldByName(fieldName).Pointer()))\n\t\tif *b {\n\t\t\tcnt ++\n\t\t}\n\t}\n\treturn 1 < cnt\n}\n\ntype VaultCommand struct {\n\tsess *session.Session\n\tkmsKeyID *string\n\tvaultMode *mode\n\tfile *string\n\tprofile *string\n\tregion *string\n\tawsAccessKeyID *string\n\tawsSecretKeyID *string\n}\n\nfunc (v *VaultCommand) parseArgs(args []string) {\n\tvar (\n\t\tflagSet = new(flag.FlagSet)\n\t\tf *string\n\t)\n\tv.vaultMode = new(mode)\n\n\tv.kmsKeyID = flagSet.String(\"key_id\", getenv.String(\"KMS_KEY_ID\"), \"Amazon KMS key ID\")\n\tv.vaultMode.encrypt = flagSet.Bool(\"encrypt\", getenv.Bool(\"ENCRYPT\", false), \"encrypt mode\")\n\tv.vaultMode.decrypt = flagSet.Bool(\"decrypt\", getenv.Bool(\"DECRYPT\", false), \"decrypt mode\")\n\tv.vaultMode.view = flagSet.Bool(\"view\", false, \"view mode\")\n\tv.vaultMode.edit = flagSet.Bool(\"edit\", false, \"edit mode\")\n\tv.profile = flagSet.String(\"profile\", getenv.String(\"AWS_PROFILE_NAME\", \"default\"), \"aws credentials profile name\")\n\tv.region = flagSet.String(\"region\", getenv.String(\"AWS_REGION\", \"ap-northeast-1\"), \"aws region\")\n\tv.awsAccessKeyID = flagSet.String(\"aws-access-key-id\", getenv.String(\"AWS_ACCESS_KEY_ID\"), \"aws access key id\")\n\tv.awsSecretKeyID = flagSet.String(\"aws-secret-key-id\", getenv.String(\"AWS_SECRET_KEY_ID\"), \"aws secret key id\")\n\tv.file = flagSet.String(\"file\", \"\", \"target file\")\n\tf = flagSet.String(\"f\", \"\", \"target file\")\n\n\tif err := flagSet.Parse(args); err != nil {\n\t\tpanic(err)\n\t}\n\n\tif *f == \"\" && *v.file == \"\" && len(flagSet.Args()) != 0 {\n\t\ttargetName := flagSet.Args()[0]\n\t\tv.file = &targetName\n\t}\n\n\tif *v.file == \"\" {\n\t\tv.file = f\n\t}\n\n\tvar awsConfig = aws.Config{}\n\n\tif *v.awsAccessKeyID != \"\" && *v.awsSecretKeyID != \"\" && *v.profile == \"\" {\n\t\tawsConfig.Credentials = credentials.NewStaticCredentials(*v.awsAccessKeyID, *v.awsSecretKeyID, \"\")\n\t\tawsConfig.Region = v.region\n\t}\n\n\tv.sess = session.Must(session.NewSessionWithOptions(session.Options{\n\t\tAssumeRoleTokenProvider: stscreds.StdinTokenProvider,\n\t\tSharedConfigState: session.SharedConfigEnable,\n\t\tProfile: *v.profile,\n\t\tConfig: awsConfig,\n\t}))\n}\n\nfunc (v *VaultCommand) encrypt(keyID string, fileName string) error {\n\tbin, err := ioutil.ReadFile(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tkms := lib.NewKMS(v.sess)\n\t_, err = kms.SetKeyID(keyID).Encrypt(bin)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ioutil.WriteFile(fileName, []byte(kms.String()), 0644)\n}\n\nfunc (v *VaultCommand) decrypt(keyID string, fileName string) error {\n\tbin, err := ioutil.ReadFile(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tkms := lib.NewKMSFromBinary(bin, v.sess)\n\tif kms == nil {\n\t\treturn errors.New(fmt.Sprintf(\"%v form is illegal\", fileName))\n\t}\n\tplainText, err := kms.SetKeyID(keyID).Decrypt()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn ioutil.WriteFile(fileName, plainText, 0644)\n}\n\nfunc (v *VaultCommand) decryptTemporary(keyID string, fileName string) ([]byte, error) {\n\tbin, err := ioutil.ReadFile(fileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkms := lib.NewKMSFromBinary(bin, v.sess)\n\tif kms == nil {\n\t\treturn nil, errors.New(fmt.Sprintf(\"%v form is illegal\", fileName))\n\t}\n\tplainText, err := kms.SetKeyID(keyID).Decrypt()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn plainText, nil\n}\n\nfunc (c *VaultCommand) Help() string {\n\tvar msg string\n\tmsg += \"usage: pnzr vault [options ...]\\n\"\n\tmsg += \"options:\\n\"\n\tmsg += \" -key_id\\n\"\n\tmsg += \" set kms key id\\n\"\n\tmsg += \" -encrypt\\n\"\n\tmsg += \" use encrypt mode\\n\"\n\tmsg += \" -decrypt\\n\"\n\tmsg += \" use decrypt mode\\n\"\n\tmsg += \" -file\\n\"\n\tmsg += \" setting target file\\n\"\n\tmsg += \" -f\"\n\tmsg += \" setting target file\\n\"\n\tmsg += \" -profile\\n\"\n\tmsg += \" aws credential name\\n\"\n\tmsg += \" -region\\n\"\n\tmsg += \" aws region name\\n\"\n\tmsg += \" -aws-access-key-id\\n\"\n\tmsg += \" setting aws access key id\\n\"\n\tmsg += \" -aws-secret-key-id\\n\"\n\tmsg += \" setting aws secret key id\\n\"\n\tmsg += \"===================================================\\n\"\n\treturn msg\n}\n\nfunc (v *VaultCommand) Run(args []string) int {\n\tv.parseArgs(args)\n\n\tif v.vaultMode.checkMultiFlagSet() {\n\t\tpanic(\"Multiple vault options are selected.\")\n\t} else if *v.vaultMode.encrypt {\n\t\tif err := v.encrypt(*v.kmsKeyID, *v.file); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else if *v.vaultMode.decrypt {\n\t\tif err := v.decrypt(*v.kmsKeyID, *v.file); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else if *v.vaultMode.edit {\n\t\tif err := v.decrypt(*v.kmsKeyID, *v.file); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tdefer func() {\n\t\t\tif err := v.encrypt(*v.kmsKeyID, *v.file); err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t}()\n\t\tcmd := exec.Command(getEditor(), *v.file)\n\n\t\tcmd.Stdin = os.Stdin\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\n\t\tif err := cmd.Run(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else if *v.vaultMode.view {\n\t\tplain, err := v.decryptTemporary(*v.kmsKeyID, *v.file)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tcmd := exec.Command(\"less\")\n\t\tcmd.Stdin = bytes.NewReader(plain)\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\n\t\tif err := cmd.Run(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t} else {\n\t\tpanic(\"Vault mode is not selected.\")\n\t}\n\n\treturn 0\n}\n\nfunc (c *VaultCommand) Synopsis() string {\n\treturn c.Help()\n}\n<|endoftext|>"} {"text":"<commit_before>package cryptopals\n\nimport (\n\t\"crypto\/aes\"\n\t\"crypto\/cipher\"\n\t\"crypto\/rand\"\n\t\"crypto\/subtle\"\n\n\t\"github.com\/d1str0\/pkcs7\"\n)\n\n\/\/ AesCbcEncrypt encrypts the message using given key and random IV.\n\/\/ IV is prepended to the ciphertext.\nfunc AesCbcEncrypt(message, key []byte) []byte {\n\tpadded, _ := pkcs7.Pad(message, aes.BlockSize)\n\tciphertext := make([]byte, aes.BlockSize+len(padded))\n\n\tiv := ciphertext[0:aes.BlockSize]\n\trand.Read(iv)\n\n\tblock, _ := aes.NewCipher(key)\n\tmode := cipher.NewCBCEncrypter(block, iv)\n\tmode.CryptBlocks(ciphertext[aes.BlockSize:], padded)\n\n\treturn ciphertext\n}\n\n\/\/ AesCbcDecrypt decrypts the ciphertext using given key.\n\/\/ It assumes that IV is prepended to the ciphertext.\nfunc AesCbcDecrypt(ciphertext, key []byte) []byte {\n\tiv := ciphertext[0:aes.BlockSize]\n\tmessage := make([]byte, len(ciphertext)-len(iv))\n\n\tblock, _ := aes.NewCipher(key)\n\tmode := cipher.NewCBCDecrypter(block, iv)\n\tmode.CryptBlocks(message, ciphertext[aes.BlockSize:])\n\tunpadded, _ := pkcs7.Unpad(message)\n\n\treturn unpadded\n}\n\n\/\/ CbcMacSign calculates CBC-MAC for a given message.\nfunc CbcMacSign(message, key []byte, iv []byte) []byte {\n\tpadded, _ := pkcs7.Pad(message, aes.BlockSize)\n\tciphertext := make([]byte, len(padded))\n\n\tblock, _ := aes.NewCipher(key)\n\tmode := cipher.NewCBCEncrypter(block, iv)\n\tmode.CryptBlocks(ciphertext[:], padded)\n\n\treturn ciphertext[len(ciphertext)-aes.BlockSize:]\n}\n\n\/\/ CbcMacVerify verifies CBC-MAC for a given message.\nfunc CbcMacVerify(msg, key []byte) bool {\n\tsize := len(msg) - 2*aes.BlockSize\n\n\tif size < 0 {\n\t\treturn false\n\t}\n\n\tmessage := msg[0:size]\n\tiv := msg[size : size+aes.BlockSize]\n\tmac := msg[size+aes.BlockSize : size+2*aes.BlockSize]\n\n\treturn subtle.ConstantTimeCompare(CbcMacSign(message, key, iv), mac) == 1\n}\n<commit_msg>Update CbcMacSign to concatenate message, IV and MAC<commit_after>package cryptopals\n\nimport (\n\t\"crypto\/aes\"\n\t\"crypto\/cipher\"\n\t\"crypto\/rand\"\n\t\"crypto\/subtle\"\n\n\t\"github.com\/d1str0\/pkcs7\"\n)\n\n\/\/ AesCbcEncrypt encrypts the message using given key and random IV.\n\/\/ IV is prepended to the ciphertext.\nfunc AesCbcEncrypt(message, key []byte) []byte {\n\tpadded, _ := pkcs7.Pad(message, aes.BlockSize)\n\tciphertext := make([]byte, aes.BlockSize+len(padded))\n\n\tiv := ciphertext[0:aes.BlockSize]\n\trand.Read(iv)\n\n\tblock, _ := aes.NewCipher(key)\n\tmode := cipher.NewCBCEncrypter(block, iv)\n\tmode.CryptBlocks(ciphertext[aes.BlockSize:], padded)\n\n\treturn ciphertext\n}\n\n\/\/ AesCbcDecrypt decrypts the ciphertext using given key.\n\/\/ It assumes that IV is prepended to the ciphertext.\nfunc AesCbcDecrypt(ciphertext, key []byte) []byte {\n\tiv := ciphertext[0:aes.BlockSize]\n\tmessage := make([]byte, len(ciphertext)-len(iv))\n\n\tblock, _ := aes.NewCipher(key)\n\tmode := cipher.NewCBCDecrypter(block, iv)\n\tmode.CryptBlocks(message, ciphertext[aes.BlockSize:])\n\tunpadded, _ := pkcs7.Unpad(message)\n\n\treturn unpadded\n}\n\n\/\/ CbcMacSign calculates CBC-MAC for a given message.\n\/\/ IV and MAC are appended to the plaintext.\nfunc CbcMacSign(message, key []byte, iv []byte) []byte {\n\tpadded, _ := pkcs7.Pad(message, aes.BlockSize)\n\tciphertext := make([]byte, len(padded))\n\n\tblock, _ := aes.NewCipher(key)\n\tmode := cipher.NewCBCEncrypter(block, iv)\n\tmode.CryptBlocks(ciphertext[:], padded)\n\n\tsize := len(message)\n\tmsg := make([]byte, size+2*aes.BlockSize)\n\tmac := ciphertext[len(ciphertext)-aes.BlockSize:]\n\n\tcopy(msg[:], message)\n\tcopy(msg[size:], iv)\n\tcopy(msg[size+aes.BlockSize:], mac)\n\n\treturn msg\n}\n\n\/\/ CbcMacVerify verifies CBC-MAC for a given message.\n\/\/ It assumes that IV and mac are appended to the plaintext.\nfunc CbcMacVerify(msg, key []byte) bool {\n\tsize := len(msg) - 2*aes.BlockSize\n\n\tif size < 0 {\n\t\treturn false\n\t}\n\n\tmessage := msg[0:size]\n\tiv := msg[size : size+aes.BlockSize]\n\tmac := msg[size+aes.BlockSize:]\n\n\treturn subtle.ConstantTimeCompare(CbcMacSign(message, key, iv), mac) == 1\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"github.com\/julienschmidt\/httprouter\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ RegResponse is a struct for registration response JSON\ntype RegResponse struct {\n\tUsername string `json:\"username\"`\n\tPassword string `json:\"password\"`\n\tFulldomain string `json:\"fulldomain\"`\n\tSubdomain string `json:\"subdomain\"`\n\tAllowfrom []string `json:\"allowfrom\"`\n}\n\nfunc webRegisterPost(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tvar regStatus int\n\tvar reg []byte\n\taTXT := ACMETxt{}\n\tif r.Body == nil {\n\t\thttp.Error(w, string(jsonError(\"body_missing\")), http.StatusBadRequest)\n\t\treturn\n\t}\n\tjson.NewDecoder(r.Body).Decode(&aTXT)\n\t\/\/ Create new user\n\tnu, err := DB.Register(aTXT.AllowFrom)\n\tif err != nil {\n\t\terrstr := fmt.Sprintf(\"%v\", err)\n\t\treg = jsonError(errstr)\n\t\tregStatus = http.StatusInternalServerError\n\t\tlog.WithFields(log.Fields{\"error\": err.Error()}).Debug(\"Error in registration\")\n\t} else {\n\t\tlog.WithFields(log.Fields{\"user\": nu.Username.String()}).Debug(\"Created new user\")\n\t\tregStruct := RegResponse{nu.Username.String(), nu.Password, nu.Subdomain + \".\" + Config.General.Domain, nu.Subdomain, nu.AllowFrom.ValidEntries()}\n\t\tregStatus = http.StatusCreated\n\t\treg, err = json.Marshal(regStruct)\n\t\tif err != nil {\n\t\t\tregStatus = http.StatusInternalServerError\n\t\t\treg = jsonError(\"json_error\")\n\t\t\tlog.WithFields(log.Fields{\"error\": \"json\"}).Debug(\"Could not marshal JSON\")\n\t\t}\n\t}\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.WriteHeader(regStatus)\n\tw.Write(reg)\n}\n\nfunc webUpdatePost(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tvar updStatus int\n\tvar upd []byte\n\t\/\/ Get user\n\ta, ok := r.Context().Value(ACMETxtKey).(ACMETxt)\n\tif !ok {\n\t\tlog.WithFields(log.Fields{\"error\": \"context\"}).Error(\"Context error\")\n\t}\n\tif validSubdomain(a.Subdomain) && validTXT(a.Value) {\n\t\terr := DB.Update(a)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\"error\": err.Error()}).Debug(\"Error while trying to update record\")\n\t\t\tupdStatus = http.StatusInternalServerError\n\t\t\tupd = jsonError(\"db_error\")\n\t\t} else {\n\t\t\tlog.WithFields(log.Fields{\"subdomain\": a.Subdomain, \"txt\": a.Value}).Debug(\"TXT updated\")\n\t\t\tupdStatus = http.StatusOK\n\t\t\tupd = []byte(\"{\\\"txt\\\": \\\"\" + a.Value + \"\\\"}\")\n\t\t}\n\t} else {\n\t\tlog.WithFields(log.Fields{\"error\": \"subdomain\", \"subdomain\": a.Subdomain, \"txt\": a.Value}).Debug(\"Bad update data\")\n\t\tupdStatus = http.StatusBadRequest\n\t\tupd = jsonError(\"bad_subdomain\")\n\t}\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.WriteHeader(updStatus)\n\tw.Write(upd)\n}\n<commit_msg>Removed unnecessary body check (#21)<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\n\t\"github.com\/julienschmidt\/httprouter\"\n\tlog \"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ RegResponse is a struct for registration response JSON\ntype RegResponse struct {\n\tUsername string `json:\"username\"`\n\tPassword string `json:\"password\"`\n\tFulldomain string `json:\"fulldomain\"`\n\tSubdomain string `json:\"subdomain\"`\n\tAllowfrom []string `json:\"allowfrom\"`\n}\n\nfunc webRegisterPost(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tvar regStatus int\n\tvar reg []byte\n\taTXT := ACMETxt{}\n\tjson.NewDecoder(r.Body).Decode(&aTXT)\n\t\/\/ Create new user\n\tnu, err := DB.Register(aTXT.AllowFrom)\n\tif err != nil {\n\t\terrstr := fmt.Sprintf(\"%v\", err)\n\t\treg = jsonError(errstr)\n\t\tregStatus = http.StatusInternalServerError\n\t\tlog.WithFields(log.Fields{\"error\": err.Error()}).Debug(\"Error in registration\")\n\t} else {\n\t\tlog.WithFields(log.Fields{\"user\": nu.Username.String()}).Debug(\"Created new user\")\n\t\tregStruct := RegResponse{nu.Username.String(), nu.Password, nu.Subdomain + \".\" + Config.General.Domain, nu.Subdomain, nu.AllowFrom.ValidEntries()}\n\t\tregStatus = http.StatusCreated\n\t\treg, err = json.Marshal(regStruct)\n\t\tif err != nil {\n\t\t\tregStatus = http.StatusInternalServerError\n\t\t\treg = jsonError(\"json_error\")\n\t\t\tlog.WithFields(log.Fields{\"error\": \"json\"}).Debug(\"Could not marshal JSON\")\n\t\t}\n\t}\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.WriteHeader(regStatus)\n\tw.Write(reg)\n}\n\nfunc webUpdatePost(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tvar updStatus int\n\tvar upd []byte\n\t\/\/ Get user\n\ta, ok := r.Context().Value(ACMETxtKey).(ACMETxt)\n\tif !ok {\n\t\tlog.WithFields(log.Fields{\"error\": \"context\"}).Error(\"Context error\")\n\t}\n\tif validSubdomain(a.Subdomain) && validTXT(a.Value) {\n\t\terr := DB.Update(a)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\"error\": err.Error()}).Debug(\"Error while trying to update record\")\n\t\t\tupdStatus = http.StatusInternalServerError\n\t\t\tupd = jsonError(\"db_error\")\n\t\t} else {\n\t\t\tlog.WithFields(log.Fields{\"subdomain\": a.Subdomain, \"txt\": a.Value}).Debug(\"TXT updated\")\n\t\t\tupdStatus = http.StatusOK\n\t\t\tupd = []byte(\"{\\\"txt\\\": \\\"\" + a.Value + \"\\\"}\")\n\t\t}\n\t} else {\n\t\tlog.WithFields(log.Fields{\"error\": \"subdomain\", \"subdomain\": a.Subdomain, \"txt\": a.Value}).Debug(\"Bad update data\")\n\t\tupdStatus = http.StatusBadRequest\n\t\tupd = jsonError(\"bad_subdomain\")\n\t}\n\tw.Header().Set(\"Content-Type\", \"application\/json\")\n\tw.WriteHeader(updStatus)\n\tw.Write(upd)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"time\"\n\n\tdocker \"github.com\/fsouza\/go-dockerclient\"\n\tgin \"github.com\/gin-gonic\/gin\"\n)\n\nfunc errorResponse(err error, c *gin.Context) {\n\tresult := map[string]string{\"error\": err.Error()}\n\tc.JSON(400, result)\n}\n\nfunc setCorsHeaders(c *gin.Context) {\n\tc.Header(\"Access-Control-Allow-Origin\", \"*\")\n\tc.Header(\"Access-Control-Allow-Methods\", \"GET, POST, OPTIONS\")\n}\n\nfunc HandleRun(c *gin.Context) {\n\t\/\/ We only need CORS for this endpoint\n\tsetCorsHeaders(c)\n\n\treq, err := ParseRequest(c.Request)\n\tif err != nil {\n\t\terrorResponse(err, c)\n\t\treturn\n\t}\n\n\tconfig, exists := c.Get(\"config\")\n\tif !exists {\n\t\terrorResponse(fmt.Errorf(\"Cant get config\"), c)\n\t\treturn\n\t}\n\n\tclient, exists := c.Get(\"client\")\n\tif !exists {\n\t\terrorResponse(fmt.Errorf(\"Cant get client\"), c)\n\t\treturn\n\t}\n\n\trun := NewRun(config.(*Config), client.(*docker.Client), req)\n\tdefer run.Destroy()\n\n\tif err := run.Setup(); err != nil {\n\t\terrorResponse(err, c)\n\t\treturn\n\t}\n\n\t\/\/ TODO: make timeout configurable\n\tresult, err := run.StartWithTimeout(time.Second * 10)\n\tif err != nil {\n\t\terrorResponse(err, c)\n\t\treturn\n\t}\n\n\tc.Header(\"X-Run-ExitCode\", strconv.Itoa(result.ExitCode))\n\tc.Header(\"X-Run-Duration\", result.Duration)\n\n\tc.Data(200, req.Format, result.Output)\n}\n\nfunc RunApi(config *Config, client *docker.Client) {\n\trouter := gin.Default()\n\trouter.Use(func(c *gin.Context) {\n\t\tc.Set(\"config\", config)\n\t\tc.Set(\"client\", client)\n\t})\n\n\trouter.POST(\"\/run\", HandleRun)\n\trouter.Run(\"127.0.0.1:5000\")\n}\n<commit_msg>Add \/config endpoint to return used configuration<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"time\"\n\n\tdocker \"github.com\/fsouza\/go-dockerclient\"\n\tgin \"github.com\/gin-gonic\/gin\"\n)\n\nfunc errorResponse(err error, c *gin.Context) {\n\tresult := map[string]string{\"error\": err.Error()}\n\tc.JSON(400, result)\n}\n\nfunc setCorsHeaders(c *gin.Context) {\n\tc.Header(\"Access-Control-Allow-Origin\", \"*\")\n\tc.Header(\"Access-Control-Allow-Methods\", \"GET, POST, OPTIONS\")\n}\n\nfunc HandleRun(c *gin.Context) {\n\t\/\/ We only need CORS for this endpoint\n\tsetCorsHeaders(c)\n\n\treq, err := ParseRequest(c.Request)\n\tif err != nil {\n\t\terrorResponse(err, c)\n\t\treturn\n\t}\n\n\tconfig, exists := c.Get(\"config\")\n\tif !exists {\n\t\terrorResponse(fmt.Errorf(\"Cant get config\"), c)\n\t\treturn\n\t}\n\n\tclient, exists := c.Get(\"client\")\n\tif !exists {\n\t\terrorResponse(fmt.Errorf(\"Cant get client\"), c)\n\t\treturn\n\t}\n\n\trun := NewRun(config.(*Config), client.(*docker.Client), req)\n\tdefer run.Destroy()\n\n\tif err := run.Setup(); err != nil {\n\t\terrorResponse(err, c)\n\t\treturn\n\t}\n\n\t\/\/ TODO: make timeout configurable\n\tresult, err := run.StartWithTimeout(time.Second * 10)\n\tif err != nil {\n\t\terrorResponse(err, c)\n\t\treturn\n\t}\n\n\tc.Header(\"X-Run-ExitCode\", strconv.Itoa(result.ExitCode))\n\tc.Header(\"X-Run-Duration\", result.Duration)\n\n\tc.Data(200, req.Format, result.Output)\n}\n\nfunc HandleConfig(c *gin.Context) {\n\tc.JSON(200, Extensions)\n}\n\nfunc RunApi(config *Config, client *docker.Client) {\n\trouter := gin.Default()\n\trouter.Use(func(c *gin.Context) {\n\t\tc.Set(\"config\", config)\n\t\tc.Set(\"client\", client)\n\t})\n\n\trouter.GET(\"\/config\", HandleConfig)\n\trouter.POST(\"\/run\", HandleRun)\n\trouter.Run(\"127.0.0.1:5000\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/diginatu\/nagome\/nicolive\"\n)\n\n\/\/ Message is base API struct for plugin\ntype Message struct {\n\tDomain string `json:\"domain\"`\n\tCommand string `json:\"command\"`\n\tContent json.RawMessage `json:\"content,omitempty\"` \/\/ The structure of Content is depend on the Command (and Domain).\n\n\tprgno int\n}\n\nfunc (m *Message) String() string {\n\treturn fmt.Sprintf(\"{%s %s %s plug:%d}\", m.Domain, m.Command, m.Content, m.prgno)\n}\n\n\/\/ NewMessage returns new Message with the given values.\nfunc NewMessage(dom, com string, con interface{}) (*Message, error) {\n\tvar conj json.RawMessage\n\tvar err error\n\n\tif con == nil {\n\t\tconj = nil\n\t} else {\n\t\tconj, err = json.Marshal(con)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tm := &Message{\n\t\tDomain: dom,\n\t\tCommand: com,\n\t\tContent: conj,\n\t\tprgno: -1,\n\t}\n\treturn m, nil\n}\n\n\/\/ NewMessageMust is same as NewMessage but assume no error.\nfunc NewMessageMust(dom, com string, con interface{}) *Message {\n\tm, err := NewMessage(dom, com, con)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn m\n}\n\n\/\/ Dimain names\nconst (\n\tDomainNagome = \"nagome\"\n\tDomainQuery = \"nagome_query\"\n\tDomainComment = \"nagome_comment\"\n\tDomainUI = \"nagome_ui\"\n\tDomainAntenna = \"nagome_antenna\"\n\tDomainDirect = \"nagome_direct\" \/\/ DomainDirect is a special domain (from plugin).\n\tDomainDirectngm = \"nagome_directngm\" \/\/ DomainDirectNgm is a domain for direct message from Nagome.\n\n\t\/\/ Adding DomainSuffixFilter to the end of domain name in \"subscribe\" in your plugin.yml enables filtering messages by the plugin.\n\tDomainSuffixFilter = \"@filter\"\n)\n\n\/\/ Command names\nconst (\n\t\/\/ DomainNagome\n\tCommNagomeBroadOpen = \"Broad.Open\"\n\tCommNagomeBroadClose = \"Broad.Close\"\n\tCommNagomeBroadInfo = \"Broad.Info\"\n\tCommNagomeCommentSend = \"Comment.Send\"\n\tCommNagomeAntennaOpen = \"Antenna.Open\"\n\tCommNagomeAntennaClose = \"Antenna.Close\"\n\n\t\/\/ DomainComment\n\t\/\/ This domain is for only sending comments.\n\tCommCommentGot = \"Got\"\n\n\t\/\/ DomainQuery\n\t\/\/ Query from plugin to Nagome.\n\tCommQueryBroadConnect = \"Broad.Connect\"\n\tCommQueryBroadDisconnect = \"Broad.Disconnect\"\n\tCommQueryBroadSendComment = \"Broad.SendComment\"\n\n\tCommQueryAccountSet = \"Account.Set\" \/\/ Set the given content value as account values.\n\tCommQueryAccountLogin = \"Account.Login\" \/\/ Login and set the user session to account.\n\tCommQueryAccountLoad = \"Account.Load\"\n\tCommQueryAccountSave = \"Account.Save\"\n\n\tCommQueryLogPrint = \"Log.Print\" \/\/ Print string using logger of Nagome\n\n\tCommQuerySettingsSetCurrent = \"Settings.SetCurrent\" \/\/ Set settings to current slot.\n\tCommQuerySettingsSetAll = \"Settings.SetAll\" \/\/ Set all slots of settings.\n\n\tCommQueryPlugEnable = \"Plug.Enable\" \/\/ Enable or disable a plugin.\n\n\t\/\/ DomainUI\n\t\/\/ Event to be processed by UI plugin.\n\tCommUIDialog = \"Dialog\"\n\tCommUIClearComments = \"ClearComments\"\n\tCommUIConfigAccount = \"ConfigAccount\" \/\/ Open the window of account setting or suggest user to configure it.\n\n\t\/\/ DomainAntenna\n\t\/\/ All antenna items (started live).\n\tCommAntennaGot = \"Got\"\n\n\t\/\/ DomainDirect (special domain)\n\t\/\/ The messages is sent between a plugin and Nagome. It is not broadcasted and can not be filtered.\n\n\t\/\/ plugin to Nagome\n\tCommDirectNo = \"No\" \/\/ Tell plugin number to Nagome when the connection started. (TCP at first time only)\n\tCommDirectPlugList = \"Plug.List\" \/\/ Request a list of plugins.\n\n\tCommDirectSettingsCurrent = \"Settings.Current\" \/\/ Request current settings message.\n\tCommDirectSettingsAll = \"Settings.All\" \/\/ Request all slots of settings message.\n\n\t\/\/ Nagome to plugin\n\tCommDirectngmPlugEnabled = \"Plug.Enabled\" \/\/ Sent when the plugin is enabled.\n\tCommDirectngmPlugDisabled = \"Plug.Disabled\" \/\/ Sent when the plugin is disabled.\n\tCommDirectngmPlugList = \"Plug.List\"\n\n\tCommDirectngmSettingsCurrent = \"Settings.Current\"\n\tCommDirectngmSettingsAll = \"Settings.All\"\n)\n\n\/\/ Contents\n\/\/\n\/\/ Contents in the Message API\n\n\/\/ CtNagomeBroadOpen is a content of CommNagomeBroadOpen\ntype CtNagomeBroadOpen struct {\n\tBroadID string `json:\"broad_id\"`\n\tTitle string `json:\"title\"`\n\tDescription string `json:\"description\"`\n\tCommunityID string `json:\"community_id\"`\n\tOwnerID string `json:\"owner_id\"`\n\tOwnerName string `json:\"owner_name\"`\n\tOwnerBroad bool `json:\"owner_broad\"`\n\n\tOpenTime time.Time `json:\"open_time\"`\n\tStartTime time.Time `json:\"start_time\"`\n\tEndTime time.Time `json:\"end_time\"`\n}\n\n\/\/ CtNagomeBroadInfo is a content of CommNagomeBroadInfo\ntype CtNagomeBroadInfo struct {\n\tWatchCount string `json:\"watch_count\"`\n\tCommentCount string `json:\"comment_count\"`\n}\n\n\/\/ CtQueryBroadConnect is a content of CommQueryBroadConnect\ntype CtQueryBroadConnect struct {\n\tBroadID string `json:\"broad_id\"`\n\tRetryN int `json:\"retry_n,omitempty\"`\n}\n\n\/\/ type of CtQueryBroadSendComment\nconst (\n\tCtQueryBroadSendCommentTypeGeneral string = \"General\"\n\tCtQueryBroadSendCommentTypeOwner = \"Owner\" \/\/ ignored if the user is not the owner\n)\n\n\/\/ CtQueryBroadSendComment is a content of CommQueryBroadSendComment\ntype CtQueryBroadSendComment struct {\n\tText string `json:\"text\"`\n\tIyayo bool `json:\"iyayo\"`\n\tType string `json:\"type,omitempty\"` \/\/ if omitted, automatically selected depend on the settings\n}\n\n\/\/ CtQueryAccountSet is a content of CommQueryAccountSet\ntype CtQueryAccountSet nicolive.Account\n\n\/\/ CtQueryLogPrint is a content of CommQueryLogPrint\ntype CtQueryLogPrint struct {\n\tText string `json:\"text\"`\n}\n\n\/\/ CtQuerySettingsSetCurrent is a content of CommQuerySettingsSetCurrent\ntype CtQuerySettingsSetCurrent SettingsSlot\n\n\/\/ CtQuerySettingsSetAll is a content of CommQuerySettingsSetAll\ntype CtQuerySettingsSetAll SettingsSlots\n\n\/\/ CtQueryPlugEnable is a content of CommQueryPlugEnable\ntype CtQueryPlugEnable struct {\n\tNo int `json:\"no\"`\n\tEnable bool `json:\"enable\"`\n}\n\n\/\/ A CtCommentGot is a content of CommCommentGot\ntype CtCommentGot struct {\n\tNo int `json:\"no\"`\n\tDate time.Time `json:\"date\"`\n\tRaw string `json:\"raw\"`\n\tComment string `json:\"comment\"`\n\n\tUserID string `json:\"user_id\"`\n\tUserName string `json:\"user_name\"`\n\tUserThumbnailURL string `json:\"user_thumbnail_url,omitempty\"`\n\tScore int `json:\"score,omitempty\"`\n\tIsPremium bool `json:\"is_premium\"`\n\tIsBroadcaster bool `json:\"is_broadcaster\"`\n\tIsStaff bool `json:\"is_staff\"`\n\tIsAnonymity bool `json:\"is_anonymity\"`\n}\n\n\/\/ CtUIDialog is a content of CommUIDialog\ntype CtUIDialog struct {\n\t\/\/ Select type from below const string\n\tType string `json:\"type\"`\n\tTitle string `json:\"title\"`\n\tDescription string `json:\"description\"`\n}\n\n\/\/ CtAntennaGot is a content of CommAntennaGot\ntype CtAntennaGot struct {\n\tBroadID string `json:\"broad_id\"`\n\tCommunityID string `json:\"community_id\"`\n\tUserID string `json:\"user_id\"`\n}\n\n\/\/ type of CtUIDialog\nconst (\n\tCtUIDialogTypeInfo string = \"Info\"\n\tCtUIDialogTypeWarn = \"Warn\"\n)\n\n\/\/ CtDirectNo is a content for CommDirectNo\ntype CtDirectNo struct {\n\tNo int `json:\"no\"`\n}\n\n\/\/ CtDirectngmPlugList is a content for CommDirectngmPlugList\ntype CtDirectngmPlugList struct {\n\tPlugins *[]*Plugin `json:\"plugins\"`\n}\n\n\/\/ CtDirectngmSettingsCurrent is a content for CommDirectngmSettingsCurrent\ntype CtDirectngmSettingsCurrent SettingsSlot\n\n\/\/ CtDirectngmSettingsAll is a content for CommDirectngmSettingsAll\ntype CtDirectngmSettingsAll SettingsSlots\n<commit_msg>do not display content of a message<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/diginatu\/nagome\/nicolive\"\n)\n\n\/\/ Message is base API struct for plugin\ntype Message struct {\n\tDomain string `json:\"domain\"`\n\tCommand string `json:\"command\"`\n\tContent json.RawMessage `json:\"content,omitempty\"` \/\/ The structure of Content is depend on the Command (and Domain).\n\n\tprgno int\n}\n\nfunc (m *Message) String() string {\n\treturn fmt.Sprintf(\"{%s %s plug:%d}\", m.Domain, m.Command, m.prgno)\n}\n\n\/\/ NewMessage returns new Message with the given values.\nfunc NewMessage(dom, com string, con interface{}) (*Message, error) {\n\tvar conj json.RawMessage\n\tvar err error\n\n\tif con == nil {\n\t\tconj = nil\n\t} else {\n\t\tconj, err = json.Marshal(con)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tm := &Message{\n\t\tDomain: dom,\n\t\tCommand: com,\n\t\tContent: conj,\n\t\tprgno: -1,\n\t}\n\treturn m, nil\n}\n\n\/\/ NewMessageMust is same as NewMessage but assume no error.\nfunc NewMessageMust(dom, com string, con interface{}) *Message {\n\tm, err := NewMessage(dom, com, con)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\treturn m\n}\n\n\/\/ Dimain names\nconst (\n\tDomainNagome = \"nagome\"\n\tDomainQuery = \"nagome_query\"\n\tDomainComment = \"nagome_comment\"\n\tDomainUI = \"nagome_ui\"\n\tDomainAntenna = \"nagome_antenna\"\n\tDomainDirect = \"nagome_direct\" \/\/ DomainDirect is a special domain (from plugin).\n\tDomainDirectngm = \"nagome_directngm\" \/\/ DomainDirectNgm is a domain for direct message from Nagome.\n\n\t\/\/ Adding DomainSuffixFilter to the end of domain name in \"subscribe\" in your plugin.yml enables filtering messages by the plugin.\n\tDomainSuffixFilter = \"@filter\"\n)\n\n\/\/ Command names\nconst (\n\t\/\/ DomainNagome\n\tCommNagomeBroadOpen = \"Broad.Open\"\n\tCommNagomeBroadClose = \"Broad.Close\"\n\tCommNagomeBroadInfo = \"Broad.Info\"\n\tCommNagomeCommentSend = \"Comment.Send\"\n\tCommNagomeAntennaOpen = \"Antenna.Open\"\n\tCommNagomeAntennaClose = \"Antenna.Close\"\n\n\t\/\/ DomainComment\n\t\/\/ This domain is for only sending comments.\n\tCommCommentGot = \"Got\"\n\n\t\/\/ DomainQuery\n\t\/\/ Query from plugin to Nagome.\n\tCommQueryBroadConnect = \"Broad.Connect\"\n\tCommQueryBroadDisconnect = \"Broad.Disconnect\"\n\tCommQueryBroadSendComment = \"Broad.SendComment\"\n\n\tCommQueryAccountSet = \"Account.Set\" \/\/ Set the given content value as account values.\n\tCommQueryAccountLogin = \"Account.Login\" \/\/ Login and set the user session to account.\n\tCommQueryAccountLoad = \"Account.Load\"\n\tCommQueryAccountSave = \"Account.Save\"\n\n\tCommQueryLogPrint = \"Log.Print\" \/\/ Print string using logger of Nagome\n\n\tCommQuerySettingsSetCurrent = \"Settings.SetCurrent\" \/\/ Set settings to current slot.\n\tCommQuerySettingsSetAll = \"Settings.SetAll\" \/\/ Set all slots of settings.\n\n\tCommQueryPlugEnable = \"Plug.Enable\" \/\/ Enable or disable a plugin.\n\n\t\/\/ DomainUI\n\t\/\/ Event to be processed by UI plugin.\n\tCommUIDialog = \"Dialog\"\n\tCommUIClearComments = \"ClearComments\"\n\tCommUIConfigAccount = \"ConfigAccount\" \/\/ Open the window of account setting or suggest user to configure it.\n\n\t\/\/ DomainAntenna\n\t\/\/ All antenna items (started live).\n\tCommAntennaGot = \"Got\"\n\n\t\/\/ DomainDirect (special domain)\n\t\/\/ The messages is sent between a plugin and Nagome. It is not broadcasted and can not be filtered.\n\n\t\/\/ plugin to Nagome\n\tCommDirectNo = \"No\" \/\/ Tell plugin number to Nagome when the connection started. (TCP at first time only)\n\tCommDirectPlugList = \"Plug.List\" \/\/ Request a list of plugins.\n\n\tCommDirectSettingsCurrent = \"Settings.Current\" \/\/ Request current settings message.\n\tCommDirectSettingsAll = \"Settings.All\" \/\/ Request all slots of settings message.\n\n\t\/\/ Nagome to plugin\n\tCommDirectngmPlugEnabled = \"Plug.Enabled\" \/\/ Sent when the plugin is enabled.\n\tCommDirectngmPlugDisabled = \"Plug.Disabled\" \/\/ Sent when the plugin is disabled.\n\tCommDirectngmPlugList = \"Plug.List\"\n\n\tCommDirectngmSettingsCurrent = \"Settings.Current\"\n\tCommDirectngmSettingsAll = \"Settings.All\"\n)\n\n\/\/ Contents\n\/\/\n\/\/ Contents in the Message API\n\n\/\/ CtNagomeBroadOpen is a content of CommNagomeBroadOpen\ntype CtNagomeBroadOpen struct {\n\tBroadID string `json:\"broad_id\"`\n\tTitle string `json:\"title\"`\n\tDescription string `json:\"description\"`\n\tCommunityID string `json:\"community_id\"`\n\tOwnerID string `json:\"owner_id\"`\n\tOwnerName string `json:\"owner_name\"`\n\tOwnerBroad bool `json:\"owner_broad\"`\n\n\tOpenTime time.Time `json:\"open_time\"`\n\tStartTime time.Time `json:\"start_time\"`\n\tEndTime time.Time `json:\"end_time\"`\n}\n\n\/\/ CtNagomeBroadInfo is a content of CommNagomeBroadInfo\ntype CtNagomeBroadInfo struct {\n\tWatchCount string `json:\"watch_count\"`\n\tCommentCount string `json:\"comment_count\"`\n}\n\n\/\/ CtQueryBroadConnect is a content of CommQueryBroadConnect\ntype CtQueryBroadConnect struct {\n\tBroadID string `json:\"broad_id\"`\n\tRetryN int `json:\"retry_n,omitempty\"`\n}\n\n\/\/ type of CtQueryBroadSendComment\nconst (\n\tCtQueryBroadSendCommentTypeGeneral string = \"General\"\n\tCtQueryBroadSendCommentTypeOwner = \"Owner\" \/\/ ignored if the user is not the owner\n)\n\n\/\/ CtQueryBroadSendComment is a content of CommQueryBroadSendComment\ntype CtQueryBroadSendComment struct {\n\tText string `json:\"text\"`\n\tIyayo bool `json:\"iyayo\"`\n\tType string `json:\"type,omitempty\"` \/\/ if omitted, automatically selected depend on the settings\n}\n\n\/\/ CtQueryAccountSet is a content of CommQueryAccountSet\ntype CtQueryAccountSet nicolive.Account\n\n\/\/ CtQueryLogPrint is a content of CommQueryLogPrint\ntype CtQueryLogPrint struct {\n\tText string `json:\"text\"`\n}\n\n\/\/ CtQuerySettingsSetCurrent is a content of CommQuerySettingsSetCurrent\ntype CtQuerySettingsSetCurrent SettingsSlot\n\n\/\/ CtQuerySettingsSetAll is a content of CommQuerySettingsSetAll\ntype CtQuerySettingsSetAll SettingsSlots\n\n\/\/ CtQueryPlugEnable is a content of CommQueryPlugEnable\ntype CtQueryPlugEnable struct {\n\tNo int `json:\"no\"`\n\tEnable bool `json:\"enable\"`\n}\n\n\/\/ A CtCommentGot is a content of CommCommentGot\ntype CtCommentGot struct {\n\tNo int `json:\"no\"`\n\tDate time.Time `json:\"date\"`\n\tRaw string `json:\"raw\"`\n\tComment string `json:\"comment\"`\n\n\tUserID string `json:\"user_id\"`\n\tUserName string `json:\"user_name\"`\n\tUserThumbnailURL string `json:\"user_thumbnail_url,omitempty\"`\n\tScore int `json:\"score,omitempty\"`\n\tIsPremium bool `json:\"is_premium\"`\n\tIsBroadcaster bool `json:\"is_broadcaster\"`\n\tIsStaff bool `json:\"is_staff\"`\n\tIsAnonymity bool `json:\"is_anonymity\"`\n}\n\n\/\/ CtUIDialog is a content of CommUIDialog\ntype CtUIDialog struct {\n\t\/\/ Select type from below const string\n\tType string `json:\"type\"`\n\tTitle string `json:\"title\"`\n\tDescription string `json:\"description\"`\n}\n\n\/\/ CtAntennaGot is a content of CommAntennaGot\ntype CtAntennaGot struct {\n\tBroadID string `json:\"broad_id\"`\n\tCommunityID string `json:\"community_id\"`\n\tUserID string `json:\"user_id\"`\n}\n\n\/\/ type of CtUIDialog\nconst (\n\tCtUIDialogTypeInfo string = \"Info\"\n\tCtUIDialogTypeWarn = \"Warn\"\n)\n\n\/\/ CtDirectNo is a content for CommDirectNo\ntype CtDirectNo struct {\n\tNo int `json:\"no\"`\n}\n\n\/\/ CtDirectngmPlugList is a content for CommDirectngmPlugList\ntype CtDirectngmPlugList struct {\n\tPlugins *[]*Plugin `json:\"plugins\"`\n}\n\n\/\/ CtDirectngmSettingsCurrent is a content for CommDirectngmSettingsCurrent\ntype CtDirectngmSettingsCurrent SettingsSlot\n\n\/\/ CtDirectngmSettingsAll is a content for CommDirectngmSettingsAll\ntype CtDirectngmSettingsAll SettingsSlots\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"net\/http\"\n\t\"time\"\n\t\"fmt\"\n\t\"io\"\n\t\"bytes\"\n\t\"encoding\/xml\"\n\t\"strconv\"\n)\n\nconst (\n\tHeaderAuthorizationKey = \"Authorization\"\n\tHeaderIfModifiedSinceKey = \"If-Modified-Since\"\n\tHeaderTokenKey = \"Token\"\n\tHeaderTokenAuthFormatString = \"Basic %s\"\n)\n\ntype TokenStorage interface {\n\tSave(token token) error\n\tLoad() (*token, error)\n}\n\ntype Api struct {\n\tdataFeedId string\n\tcredentials *credentials\n\ttokenStorage TokenStorage\n\tError error\n\tStatusCode int\n}\n\nfunc NewApi(dataFeedId string, username string, password string) (*Api) {\n\treturn &Api{\n\t\tdataFeedId: dataFeedId,\n\t\tcredentials: &credentials{\n\t\t\tuserName: username,\n\t\t\tpassword: password,\n\t\t},\n\t}\n}\n\nfunc (api *Api) SetTokenStorage(tokenStorage TokenStorage) {\n\tapi.tokenStorage = tokenStorage\n}\n\n\/\/ GetBranches returns an array of branches\nfunc (api *Api) GetBranches() (*BranchSummaries, error) {\n\tbranches := new(BranchSummaries)\n\tbranchesURLBuilder := new(URLGetBranchesBuilder)\n\tbranchesURLBuilder.SetDataFeedID(api.dataFeedId)\n\tif err := api.doRequest(branchesURLBuilder, branches); err != nil {\n\t\treturn nil, err\n\t}\n\treturn branches, nil\n}\n\nfunc (api *Api) GetBranch(branchSummary *BranchSummary) (branch *Branch, err error) {\n\tbranch = new(Branch)\n\tbranchURLBuilder := new(URLGetBranchBuilder)\n\tbranchURLBuilder.SetDataFeedID(api.dataFeedId)\n\tbranchURLBuilder.SetClientID(branchSummary.GetClientIDString())\n\tif err := api.doRequest(branchURLBuilder, branch); err != nil {\n\t\treturn nil, err\n\t}\n\treturn branch, nil\n}\n\nfunc (api *Api) GetProperties(branchSummary *BranchSummary) (properties *PropertySummaries, err error) {\n\tproperties = new(PropertySummaries)\n\tpropertiesURLBuilder := new(URLGetPropertiesBuilder)\n\tpropertiesURLBuilder.SetDataFeedID(api.dataFeedId)\n\tpropertiesURLBuilder.SetClientID(branchSummary.GetClientIDString())\n\tif err := api.doRequest(propertiesURLBuilder, properties); err != nil {\n\t\treturn nil, err\n\t}\n\treturn properties, nil\n}\n\nfunc (api *Api) GetProperty(branchSummary *BranchSummary, summary PropertySummary) (property *Property, err error) {\n\tproperty = new(Property)\n\tpropertyURLBuilder := new(URLGetPropertyBuilder)\n\tpropertyURLBuilder.SetDataFeedID(api.dataFeedId)\n\tpropertyURLBuilder.SetClientID(branchSummary.GetClientIDString())\n\tpropertyURLBuilder.SetPropertyID(strconv.Itoa(int(summary.PropertyID)))\n\tif err := api.doRequest(propertyURLBuilder, property); err != nil {\n\t\treturn nil, err\n\t}\n\treturn property, nil\n}\n\n\nfunc (api *Api) GetPropertyFromChangedFileSummary(summary ChangedFileSummary) (property *Property, err error) {\n\tproperty = new(Property)\n\tpropertyURLBuilder := new(ChangedPropertyURLBuilder)\n\tpropertyURLBuilder.SetURL(summary.PropUrl)\n\tif err := api.doRequest(propertyURLBuilder, property); err != nil {\n\t\treturn nil, err\n\t}\n\treturn property, nil\n}\n\nfunc (api *Api) GetChangedProperties(since time.Time) (properties *ChangedPropertySummaries, err error) {\n\tproperties = new(ChangedPropertySummaries)\n\tpropertiesURLBuilder := new(URLGetChangedPropertiesBuilder)\n\tpropertiesURLBuilder.SetDataFeedID(api.dataFeedId)\n\tpropertiesURLBuilder.SetSince(since)\n\tif err = api.doRequest(propertiesURLBuilder, properties); err != nil {\n\t\treturn nil, err\n\t}\n\treturn properties, nil\n}\n\nfunc (api *Api) GetChangedProperty(changedProperty *ChangedPropertySummary) (property *Property, err error) {\n\tif changedProperty.LastAction == Deleted {\n\t\treturn nil, fmt.Errorf(\"property [%s] has been deleted\", strconv.Itoa(int(changedProperty.PropertyID)))\n\t}\n\tproperty = new(Property)\n\tpropertiesURLBuilder := new(ChangedPropertyURLBuilder)\n\tpropertiesURLBuilder.SetURL(changedProperty.Url)\n\tif err = api.doRequest(propertiesURLBuilder, property); err != nil {\n\t\treturn nil, err\n\t}\n\treturn property, nil\n}\n\nfunc (api *Api) GetChangedFiles(since time.Time) (changedFiles *ChangedFilesSummaries, err error) {\n\tchangedFiles = new(ChangedFilesSummaries)\n\turlGetChangedFilesBuilder := new(URLGetChangedFilesBuilder)\n\turlGetChangedFilesBuilder.SetDataFeedID(api.dataFeedId)\n\turlGetChangedFilesBuilder.SetSince(since)\n\tif err = api.doRequest(urlGetChangedFilesBuilder, changedFiles); err != nil {\n\t\treturn nil, err\n\t}\n\treturn changedFiles, nil\n}\n\nfunc (api *Api) doRequest(urlBuilder URLBuilder, out interface{}) (err error) {\n\trequestor := buildRequestor(api.dataFeedId, api.credentials)\n\tif api.tokenStorage != nil {\n\t\tif requestor.token, err = api.tokenStorage.Load(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\trequestor.urlBuilder = urlBuilder\n\tfor ; requestor.attempts < 2; requestor.attempts++ {\n\t\trequestor.buildRequest()\n\t\trequestor.doRequest()\n\t\trequestor.saveTokenIfExists(api.tokenStorage)\n\t\trequestor.handleErrors()\n\t\tif requestor.response.StatusCode == http.StatusOK {\n\t\t\treturn requestor.unmarshal(out)\n\t\t}\n\t}\n\tapi.StatusCode = requestor.response.StatusCode\n\tapi.Error = fmt.Errorf(requestor.response.Status)\n\treturn api.Error\n}\n\nfunc (api *Api) doRequestSince(urlBuilder URLBuilder, out interface{}, since *time.Time) (err error) {\n\trequestor := buildRequestor(api.dataFeedId, api.credentials)\n\trequestor.setIfModifiedSince(since)\n\tif api.tokenStorage != nil {\n\t\tif requestor.token, err = api.tokenStorage.Load(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\trequestor.urlBuilder = urlBuilder\n\tfor ; requestor.attempts < 2; requestor.attempts++ {\n\t\trequestor.buildRequest()\n\t\trequestor.doRequest()\n\t\trequestor.saveTokenIfExists(api.tokenStorage)\n\t\trequestor.handleErrors()\n\t\tapi.StatusCode = requestor.response.StatusCode\n\t\tif requestor.response.StatusCode == http.StatusOK {\n\t\t\treturn requestor.unmarshal(out)\n\t\t}\n\t}\n\tapi.Error = fmt.Errorf(requestor.response.Status)\n\treturn api.Error\n}\n\ntype credentials struct {\n\tuserName string\n\tpassword string\n}\n\ntype token struct {\n\ttokenString string\n\tisEmpty bool\n\tisValid bool\n\ttimeSet time.Time\n}\n\nfunc Token(tokenString string) *token {\n\ttoken := new(token)\n\ttoken.tokenString = tokenString\n\ttoken.isValid = true\n\ttoken.timeSet = time.Now()\n\treturn token\n}\n\nfunc (token *token) IsValid() bool {\n\treturn token.isValid\n}\n\nfunc (token *token) Invalidate() {\n\ttoken.isValid = false\n\ttoken.tokenString = \"\"\n}\n\ntype requestor struct {\n\tdataFeedID string\n\tcredentials *credentials\n\ttoken *token\n\turlBuilder URLBuilder\n\theader http.Header\n\tsince *time.Time\n\trequest *http.Request\n\tresponse *http.Response\n\tbody io.Reader\n\terr error\n\tattempts int\n}\n\nfunc buildRequestor(dataFeedId string, credentials *credentials) (*requestor) {\n\treturn &requestor{\n\t\tdataFeedID: dataFeedId,\n\t\tcredentials: credentials,\n\t\ttoken: &token{},\n\t\turlBuilder: nil,\n\t\theader: http.Header{},\n\t}\n}\n\nfunc (requestor *requestor) doRequest() {\n\trequestor.response, requestor.err = (&http.Client{}).Do(requestor.request)\n\tif requestor.err != nil {\n\t\tpanic(requestor.err)\n\t}\n}\n\nfunc (requestor *requestor) saveTokenIfExists(tokenStorage TokenStorage) {\n\tif token := requestor.response.Header.Get(HeaderTokenKey); token != \"\" {\n\t\trequestor.token = Token(token)\n\t\tif tokenStorage != nil {\n\t\t\ttokenStorage.Save(*requestor.token)\n\t\t}\n\t}\n}\nfunc (requestor *requestor) handleErrors() {\n\tswitch requestor.response.StatusCode {\n\tcase http.StatusUnauthorized:\n\t\trequestor.token.Invalidate()\n\tdefault:\n\t}\n}\n\nfunc (requestor *requestor) setIfModifiedSince(since *time.Time) {\n\tif since != nil {\n\t\trequestor.header.Add(HeaderIfModifiedSinceKey, since.Format(time.RFC1123))\n\t}\n}\n\nfunc (requestor *requestor) setHeaderAttribute(key string, value string) {\n\trequestor.header.Add(key, value)\n}\n\nfunc (requestor *requestor) setBasicAuth() {\n\trequestor.request.SetBasicAuth(requestor.credentials.userName, requestor.credentials.password)\n}\n\nfunc (requestor *requestor) setAuthenticationToken() {\n\trequestor.request.Header.Add(HeaderAuthorizationKey, fmt.Sprintf(HeaderTokenAuthFormatString, requestor.token.tokenString))\n}\n\nfunc (requestor *requestor) buildRequest() (err error) {\n\trequestor.request, err = http.NewRequest(http.MethodGet, requestor.urlBuilder.Build(), requestor.body)\n\tfor key, val := range requestor.header {\n\t\trequestor.request.Header[key] = val\n\t}\n\trequestor.setAuthenticationMethod()\n\treturn err\n}\n\nfunc (requestor *requestor) setAuthenticationMethod() {\n\tif requestor.token.IsValid() {\n\t\trequestor.setAuthenticationToken()\n\t\treturn\n\t}\n\trequestor.setBasicAuth()\n}\n\nfunc (requestor *requestor) unmarshal(out interface{}) error {\n\tdefer requestor.response.Body.Close()\n\tbodyBuffer := new(bytes.Buffer)\n\tif _, err := bodyBuffer.ReadFrom(requestor.response.Body); err != nil {\n\t\treturn err\n\t}\n\terr := xml.Unmarshal(bodyBuffer.Bytes(), out)\n\treturn err\n}\n<commit_msg>Added fix so error is not thrown if status 304 is returned from the API<commit_after>package api\n\nimport (\n\t\"net\/http\"\n\t\"time\"\n\t\"fmt\"\n\t\"io\"\n\t\"bytes\"\n\t\"encoding\/xml\"\n\t\"strconv\"\n)\n\nconst (\n\tHeaderAuthorizationKey = \"Authorization\"\n\tHeaderIfModifiedSinceKey = \"If-Modified-Since\"\n\tHeaderTokenKey = \"Token\"\n\tHeaderTokenAuthFormatString = \"Basic %s\"\n)\n\ntype TokenStorage interface {\n\tSave(token token) error\n\tLoad() (*token, error)\n}\n\ntype Api struct {\n\tdataFeedId string\n\tcredentials *credentials\n\ttokenStorage TokenStorage\n\tError error\n\tStatusCode int\n}\n\nfunc NewApi(dataFeedId string, username string, password string) (*Api) {\n\treturn &Api{\n\t\tdataFeedId: dataFeedId,\n\t\tcredentials: &credentials{\n\t\t\tuserName: username,\n\t\t\tpassword: password,\n\t\t},\n\t}\n}\n\nfunc (api *Api) SetTokenStorage(tokenStorage TokenStorage) {\n\tapi.tokenStorage = tokenStorage\n}\n\n\/\/ GetBranches returns an array of branches\nfunc (api *Api) GetBranches() (*BranchSummaries, error) {\n\tbranches := new(BranchSummaries)\n\tbranchesURLBuilder := new(URLGetBranchesBuilder)\n\tbranchesURLBuilder.SetDataFeedID(api.dataFeedId)\n\tif err := api.doRequest(branchesURLBuilder, branches); err != nil {\n\t\treturn nil, err\n\t}\n\treturn branches, nil\n}\n\nfunc (api *Api) GetBranch(branchSummary *BranchSummary) (branch *Branch, err error) {\n\tbranch = new(Branch)\n\tbranchURLBuilder := new(URLGetBranchBuilder)\n\tbranchURLBuilder.SetDataFeedID(api.dataFeedId)\n\tbranchURLBuilder.SetClientID(branchSummary.GetClientIDString())\n\tif err := api.doRequest(branchURLBuilder, branch); err != nil {\n\t\treturn nil, err\n\t}\n\treturn branch, nil\n}\n\nfunc (api *Api) GetProperties(branchSummary *BranchSummary) (properties *PropertySummaries, err error) {\n\tproperties = new(PropertySummaries)\n\tpropertiesURLBuilder := new(URLGetPropertiesBuilder)\n\tpropertiesURLBuilder.SetDataFeedID(api.dataFeedId)\n\tpropertiesURLBuilder.SetClientID(branchSummary.GetClientIDString())\n\tif err := api.doRequest(propertiesURLBuilder, properties); err != nil {\n\t\treturn nil, err\n\t}\n\treturn properties, nil\n}\n\nfunc (api *Api) GetProperty(branchSummary *BranchSummary, summary PropertySummary) (property *Property, err error) {\n\tproperty = new(Property)\n\tpropertyURLBuilder := new(URLGetPropertyBuilder)\n\tpropertyURLBuilder.SetDataFeedID(api.dataFeedId)\n\tpropertyURLBuilder.SetClientID(branchSummary.GetClientIDString())\n\tpropertyURLBuilder.SetPropertyID(strconv.Itoa(int(summary.PropertyID)))\n\tif err := api.doRequest(propertyURLBuilder, property); err != nil {\n\t\treturn nil, err\n\t}\n\treturn property, nil\n}\n\n\nfunc (api *Api) GetPropertyFromChangedFileSummary(summary ChangedFileSummary) (property *Property, err error) {\n\tproperty = new(Property)\n\tpropertyURLBuilder := new(ChangedPropertyURLBuilder)\n\tpropertyURLBuilder.SetURL(summary.PropUrl)\n\tif err := api.doRequest(propertyURLBuilder, property); err != nil {\n\t\treturn nil, err\n\t}\n\treturn property, nil\n}\n\nfunc (api *Api) GetChangedProperties(since time.Time) (properties *ChangedPropertySummaries, err error) {\n\tproperties = new(ChangedPropertySummaries)\n\tpropertiesURLBuilder := new(URLGetChangedPropertiesBuilder)\n\tpropertiesURLBuilder.SetDataFeedID(api.dataFeedId)\n\tpropertiesURLBuilder.SetSince(since)\n\tif err = api.doRequest(propertiesURLBuilder, properties); err != nil {\n\t\treturn nil, err\n\t}\n\treturn properties, nil\n}\n\nfunc (api *Api) GetChangedProperty(changedProperty *ChangedPropertySummary) (property *Property, err error) {\n\tif changedProperty.LastAction == Deleted {\n\t\treturn nil, fmt.Errorf(\"property [%s] has been deleted\", strconv.Itoa(int(changedProperty.PropertyID)))\n\t}\n\tproperty = new(Property)\n\tpropertiesURLBuilder := new(ChangedPropertyURLBuilder)\n\tpropertiesURLBuilder.SetURL(changedProperty.Url)\n\tif err = api.doRequest(propertiesURLBuilder, property); err != nil {\n\t\treturn nil, err\n\t}\n\treturn property, nil\n}\n\nfunc (api *Api) GetChangedFiles(since time.Time) (changedFiles *ChangedFilesSummaries, err error) {\n\tchangedFiles = new(ChangedFilesSummaries)\n\turlGetChangedFilesBuilder := new(URLGetChangedFilesBuilder)\n\turlGetChangedFilesBuilder.SetDataFeedID(api.dataFeedId)\n\turlGetChangedFilesBuilder.SetSince(since)\n\tif err = api.doRequest(urlGetChangedFilesBuilder, changedFiles); err != nil {\n\t\treturn nil, err\n\t}\n\treturn changedFiles, nil\n}\n\nfunc (api *Api) doRequest(urlBuilder URLBuilder, out interface{}) (err error) {\n\trequestor := buildRequestor(api.dataFeedId, api.credentials)\n\tif api.tokenStorage != nil {\n\t\tif requestor.token, err = api.tokenStorage.Load(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\trequestor.urlBuilder = urlBuilder\n\tfor ; requestor.attempts < 2; requestor.attempts++ {\n\t\trequestor.buildRequest()\n\t\trequestor.doRequest()\n\t\trequestor.saveTokenIfExists(api.tokenStorage)\n\t\trequestor.handleErrors()\n\t\tswitch requestor.response.StatusCode {\n\t\tcase http.StatusOK:\n\t\t\treturn requestor.unmarshal(out)\n\t\tcase http.StatusNotModified:\n\t\t\treturn nil\n\t\t}\n\t}\n\tapi.StatusCode = requestor.response.StatusCode\n\tapi.Error = fmt.Errorf(requestor.response.Status)\n\treturn api.Error\n}\n\nfunc (api *Api) doRequestSince(urlBuilder URLBuilder, out interface{}, since *time.Time) (err error) {\n\trequestor := buildRequestor(api.dataFeedId, api.credentials)\n\trequestor.setIfModifiedSince(since)\n\tif api.tokenStorage != nil {\n\t\tif requestor.token, err = api.tokenStorage.Load(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\trequestor.urlBuilder = urlBuilder\n\tfor ; requestor.attempts < 2; requestor.attempts++ {\n\t\trequestor.buildRequest()\n\t\trequestor.doRequest()\n\t\trequestor.saveTokenIfExists(api.tokenStorage)\n\t\trequestor.handleErrors()\n\t\tapi.StatusCode = requestor.response.StatusCode\n\t\tif requestor.response.StatusCode == http.StatusOK {\n\t\t\treturn requestor.unmarshal(out)\n\t\t}\n\t}\n\tapi.Error = fmt.Errorf(requestor.response.Status)\n\treturn api.Error\n}\n\ntype credentials struct {\n\tuserName string\n\tpassword string\n}\n\ntype token struct {\n\ttokenString string\n\tisEmpty bool\n\tisValid bool\n\ttimeSet time.Time\n}\n\nfunc Token(tokenString string) *token {\n\ttoken := new(token)\n\ttoken.tokenString = tokenString\n\ttoken.isValid = true\n\ttoken.timeSet = time.Now()\n\treturn token\n}\n\nfunc (token *token) IsValid() bool {\n\treturn token.isValid\n}\n\nfunc (token *token) Invalidate() {\n\ttoken.isValid = false\n\ttoken.tokenString = \"\"\n}\n\ntype requestor struct {\n\tdataFeedID string\n\tcredentials *credentials\n\ttoken *token\n\turlBuilder URLBuilder\n\theader http.Header\n\tsince *time.Time\n\trequest *http.Request\n\tresponse *http.Response\n\tbody io.Reader\n\terr error\n\tattempts int\n}\n\nfunc buildRequestor(dataFeedId string, credentials *credentials) (*requestor) {\n\treturn &requestor{\n\t\tdataFeedID: dataFeedId,\n\t\tcredentials: credentials,\n\t\ttoken: &token{},\n\t\turlBuilder: nil,\n\t\theader: http.Header{},\n\t}\n}\n\nfunc (requestor *requestor) doRequest() {\n\trequestor.response, requestor.err = (&http.Client{}).Do(requestor.request)\n\tif requestor.err != nil {\n\t\tpanic(requestor.err)\n\t}\n}\n\nfunc (requestor *requestor) saveTokenIfExists(tokenStorage TokenStorage) {\n\tif token := requestor.response.Header.Get(HeaderTokenKey); token != \"\" {\n\t\trequestor.token = Token(token)\n\t\tif tokenStorage != nil {\n\t\t\ttokenStorage.Save(*requestor.token)\n\t\t}\n\t}\n}\nfunc (requestor *requestor) handleErrors() {\n\tswitch requestor.response.StatusCode {\n\tcase http.StatusUnauthorized:\n\t\trequestor.token.Invalidate()\n\tdefault:\n\t}\n}\n\nfunc (requestor *requestor) setIfModifiedSince(since *time.Time) {\n\tif since != nil {\n\t\trequestor.header.Add(HeaderIfModifiedSinceKey, since.Format(time.RFC1123))\n\t}\n}\n\nfunc (requestor *requestor) setHeaderAttribute(key string, value string) {\n\trequestor.header.Add(key, value)\n}\n\nfunc (requestor *requestor) setBasicAuth() {\n\trequestor.request.SetBasicAuth(requestor.credentials.userName, requestor.credentials.password)\n}\n\nfunc (requestor *requestor) setAuthenticationToken() {\n\trequestor.request.Header.Add(HeaderAuthorizationKey, fmt.Sprintf(HeaderTokenAuthFormatString, requestor.token.tokenString))\n}\n\nfunc (requestor *requestor) buildRequest() (err error) {\n\trequestor.request, err = http.NewRequest(http.MethodGet, requestor.urlBuilder.Build(), requestor.body)\n\tfor key, val := range requestor.header {\n\t\trequestor.request.Header[key] = val\n\t}\n\trequestor.setAuthenticationMethod()\n\treturn err\n}\n\nfunc (requestor *requestor) setAuthenticationMethod() {\n\tif requestor.token.IsValid() {\n\t\trequestor.setAuthenticationToken()\n\t\treturn\n\t}\n\trequestor.setBasicAuth()\n}\n\nfunc (requestor *requestor) unmarshal(out interface{}) error {\n\tdefer requestor.response.Body.Close()\n\tbodyBuffer := new(bytes.Buffer)\n\tif _, err := bodyBuffer.ReadFrom(requestor.response.Body); err != nil {\n\t\treturn err\n\t}\n\terr := xml.Unmarshal(bodyBuffer.Bytes(), out)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\".\/helpers\"\n\t\"encoding\/json\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"github.com\/googollee\/go-socket.io\"\n\t\"strings\"\n)\n\ntype Space struct {\n\tSubnet []Player\n}\n\ntype Player struct {\n\tLocalIP string\n\tUserName string\n}\n\nfunc Adduser(msg string) {\n\thelpers.TRACE.Println(\"socket.io: adduser\", msg)\n\n}\n\nfunc Logon(msg string) {\n\n\tvar space Space\n\n\tipNumbers := strings.Split(msg, \" \")\n\thelpers.TRACE.Println(\"socket.io->Logon: IP\", ipNumbers)\n\n\tspaceID := ipNumbers[1]\n\thelpers.TRACE.Println(\"socket.io->Logon: SpaceID\", spaceID)\n\n\tredisDB := RedisPool.Get()\n\tdefer redisDB.Close()\n\n\tjsonSpace, err := redis.Bytes(redisDB.Do(\"GET\", spaceID))\n\tif err != nil {\n\n\t\tTRACE.Println(\"socket.io->Logon: newSpace\", err)\n\t\tspace = Space{\n\t\t\tSubnet: []Player{\n\t\t\t\t{\n\t\t\t\t\tLocalIP: ipNumbers[0],\n\t\t\t\t\tUserName: \"JonDoe\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tjsonSpace, err := json.Marshal(space)\n\t\tif err != nil {\n\t\t\tERROR.Println(\"socket.io->Logon json.Marshal error: \", err)\n\t\t}\n\t\t_, err = redisDB.Do(\"SET\", spaceID, jsonSpace)\n\t\tif err != nil {\n\t\t\tERROR.Println(\"socket.io->Logon RedisDB SET error: \", err)\n\t\t}\n\n\t} else {\n\t\terr = json.Unmarshal(jsonSpace, &space)\n\t\tif err != nil {\n\t\t\tERROR.Println(\"socket.io->Logon json.Unmarshal error: \", err)\n\t\t}\n\t}\n\n\tTRACE.Println(\"socket.io->Logon Result: \", space)\n}\n\nfunc JoinGame(so socketio.Socket, msg string) {\n\n\thelpers.TRACE.Println(\"socket.io: Join\", msg)\n\n\tso.Emit(\"channel\", \"abcde\")\n}\n<commit_msg>Logon test<commit_after>package main\n\nimport (\n\t\".\/helpers\"\n\t\"encoding\/json\"\n\t\"github.com\/garyburd\/redigo\/redis\"\n\t\"github.com\/googollee\/go-socket.io\"\n\t\"strings\"\n)\n\ntype Space struct {\n\tSpaceID string\n\tSpace []Player\n}\n\ntype Player struct {\n\tLocalIP string\n\tUserName string\n}\n\nfunc Adduser(msg string) {\n\thelpers.TRACE.Println(\"socket.io: adduser\", msg)\n\n}\n\nfunc Logon(msg string) {\n\n\tvar space Space\n\n\tipNumbers := strings.Split(msg, \" \")\n\thelpers.TRACE.Println(\"socket.io->Logon: IP\", ipNumbers)\n\n\tspaceID := ipNumbers[1]\n\thelpers.TRACE.Println(\"socket.io->Logon: SpaceID\", spaceID)\n\n\tredisDB := RedisPool.Get()\n\tdefer redisDB.Close()\n\n\tjsonSpace, err := redis.Bytes(redisDB.Do(\"GET\", spaceID))\n\tif err != nil {\n\n\t\tTRACE.Println(\"socket.io->Logon: newSpace\", err)\n\t\tspace = Space{\n\t\t\tSpaceID: spaceID,\n\t\t\tSpace: []Player{\n\t\t\t\t{\n\t\t\t\t\tLocalIP: ipNumbers[0],\n\t\t\t\t\tUserName: \"JonDoe\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tjsonSpace, err := json.Marshal(space)\n\t\tif err != nil {\n\t\t\tERROR.Println(\"socket.io->Logon json.Marshal error: \", err)\n\t\t}\n\t\t_, err = redisDB.Do(\"SET\", spaceID, jsonSpace)\n\t\tif err != nil {\n\t\t\tERROR.Println(\"socket.io->Logon RedisDB SET error: \", err)\n\t\t}\n\n\t} else {\n\t\terr = json.Unmarshal(jsonSpace, &space)\n\t\tif err != nil {\n\t\t\tERROR.Println(\"socket.io->Logon json.Unmarshal error: \", err)\n\t\t}\n\t}\n\n\tfor _, element := range space.Space {\n\t\tTRACE.Println(\"socket.io->Logon known LocalIP\", element.LocalIP, \"in Space\", spaceID)\n\t}\n\n}\n\nfunc JoinGame(so socketio.Socket, msg string) {\n\n\thelpers.TRACE.Println(\"socket.io: Join\", msg)\n\n\tso.Emit(\"channel\", \"abcde\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ vim: ts=8 sw=8 noet ai\n\npackage perigee\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\n\/\/ The UnexpectedResponseCodeError structure represents a mismatch in understanding between server and client in terms of response codes.\n\/\/ Most often, this is due to an actual error condition (e.g., getting a 404 for a resource when you expect a 200).\n\/\/ However, it needn't always be the case (e.g., getting a 204 (No Content) response back when a 200 is expected).\ntype UnexpectedResponseCodeError struct {\n\tUrl string\n\tExpected []int\n\tActual int\n\tBody []byte\n}\n\nfunc (err *UnexpectedResponseCodeError) Error() string {\n\treturn fmt.Sprintf(\"Expected HTTP response code %d when accessing URL(%s); got %d instead with the following body:\\n%s\", err.Expected, err.Url, err.Actual, string(err.Body))\n}\n\n\/\/ Request issues an HTTP request, marshaling parameters, and unmarshaling results, as configured in the provided Options parameter.\n\/\/ The Response structure returned, if any, will include accumulated results recovered from the HTTP server.\n\/\/ See the Response structure for more details.\nfunc Request(method string, url string, opts Options) (*Response, error) {\n\tvar body io.Reader\n\tvar response Response\n\n\tacceptableResponseCodes := opts.OkCodes\n\tif len(acceptableResponseCodes) == 0 {\n\t\tacceptableResponseCodes = []int{200}\n\t}\n\n\tclient := opts.CustomClient\n\tif client == nil {\n\t\tclient = new(http.Client)\n\t}\n\n\tcontentType := opts.ContentType\n\tif contentType == \"\" {\n\t\tcontentType = \"application\/json\"\n\t}\n\n\taccept := opts.Accept\n\tif accept == \"\" {\n\t\taccept = \"application\/json\"\n\t}\n\n\tbody = nil\n\tif opts.ReqBody != nil {\n\t\tif contentType == \"application\/json\" {\n\t\t\tbodyText, err := json.Marshal(opts.ReqBody)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbody = strings.NewReader(string(bodyText))\n\t\t\tif opts.DumpReqJson {\n\t\t\t\tlog.Printf(\"Making request:\\n%#v\\n\", string(bodyText))\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ assume opts.ReqBody implements the correct interface\n\t\t\tbody = opts.ReqBody.(io.Reader)\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(method, url, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(\"Content-Type\", contentType)\n\treq.Header.Add(\"Accept\", accept)\n\n\tif opts.ContentLength > 0 {\n\t\treq.ContentLength = opts.ContentLength\n\t\treq.Header.Add(\"Content-Length\", string(opts.ContentLength))\n\t}\n\n\tif opts.MoreHeaders != nil {\n\t\tfor k, v := range opts.MoreHeaders {\n\t\t\treq.Header.Add(k, v)\n\t\t}\n\t}\n\n\tif opts.SetHeaders != nil {\n\t\terr = opts.SetHeaders(req)\n\t\tif err != nil {\n\t\t\treturn &response, err\n\t\t}\n\t}\n\n\thttpResponse, err := client.Do(req)\n\tif httpResponse != nil {\n\t\tresponse.HttpResponse = *httpResponse\n\t\tresponse.StatusCode = httpResponse.StatusCode\n\t}\n\n\tif err != nil {\n\t\treturn &response, err\n\t}\n\t\/\/ This if-statement is legacy code, preserved for backward compatibility.\n\tif opts.StatusCode != nil {\n\t\t*opts.StatusCode = httpResponse.StatusCode\n\t}\n\tif not_in(httpResponse.StatusCode, acceptableResponseCodes) {\n\t\tb, _ := ioutil.ReadAll(httpResponse.Body)\n\t\thttpResponse.Body.Close()\n\t\treturn &response, &UnexpectedResponseCodeError{\n\t\t\tUrl: url,\n\t\t\tExpected: acceptableResponseCodes,\n\t\t\tActual: httpResponse.StatusCode,\n\t\t\tBody: b,\n\t\t}\n\t}\n\tif opts.Results != nil {\n\t\tdefer httpResponse.Body.Close()\n\t\tjsonResult, err := ioutil.ReadAll(httpResponse.Body)\n\t\tresponse.JsonResult = jsonResult\n\t\tif err != nil {\n\t\t\treturn &response, err\n\t\t}\n\n\t\terr = json.Unmarshal(jsonResult, opts.Results)\n\t\t\/\/ This if-statement is legacy code, preserved for backward compatibility.\n\t\tif opts.ResponseJson != nil {\n\t\t\t*opts.ResponseJson = jsonResult\n\t\t}\n\t}\n\treturn &response, err\n}\n\n\/\/ not_in returns false if, and only if, the provided needle is _not_\n\/\/ in the given set of integers.\nfunc not_in(needle int, haystack []int) bool {\n\tfor _, straw := range haystack {\n\t\tif needle == straw {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ Post makes a POST request against a server using the provided HTTP client.\n\/\/ The url must be a fully-formed URL string.\n\/\/ DEPRECATED. Use Request() instead.\nfunc Post(url string, opts Options) error {\n\tr, err := Request(\"POST\", url, opts)\n\tif opts.Response != nil {\n\t\t*opts.Response = r\n\t}\n\treturn err\n}\n\n\/\/ Get makes a GET request against a server using the provided HTTP client.\n\/\/ The url must be a fully-formed URL string.\n\/\/ DEPRECATED. Use Request() instead.\nfunc Get(url string, opts Options) error {\n\tr, err := Request(\"GET\", url, opts)\n\tif opts.Response != nil {\n\t\t*opts.Response = r\n\t}\n\treturn err\n}\n\n\/\/ Delete makes a DELETE request against a server using the provided HTTP client.\n\/\/ The url must be a fully-formed URL string.\n\/\/ DEPRECATED. Use Request() instead.\nfunc Delete(url string, opts Options) error {\n\tr, err := Request(\"DELETE\", url, opts)\n\tif opts.Response != nil {\n\t\t*opts.Response = r\n\t}\n\treturn err\n}\n\n\/\/ Put makes a PUT request against a server using the provided HTTP client.\n\/\/ The url must be a fully-formed URL string.\n\/\/ DEPRECATED. Use Request() instead.\nfunc Put(url string, opts Options) error {\n\tr, err := Request(\"PUT\", url, opts)\n\tif opts.Response != nil {\n\t\t*opts.Response = r\n\t}\n\treturn err\n}\n\n\/\/ Options describes a set of optional parameters to the various request calls.\n\/\/\n\/\/ The custom client can be used for a variety of purposes beyond selecting encrypted versus unencrypted channels.\n\/\/ Transports can be defined to provide augmented logging, header manipulation, et. al.\n\/\/\n\/\/ If the ReqBody field is provided, it will be embedded as a JSON object.\n\/\/ Otherwise, provide nil.\n\/\/\n\/\/ If JSON output is to be expected from the response,\n\/\/ provide either a pointer to the container structure in Results,\n\/\/ or a pointer to a nil-initialized pointer variable.\n\/\/ The latter method will cause the unmarshaller to allocate the container type for you.\n\/\/ If no response is expected, provide a nil Results value.\n\/\/\n\/\/ The MoreHeaders map, if non-nil or empty, provides a set of headers to add to those\n\/\/ already present in the request. At present, only Accepted and Content-Type are set\n\/\/ by default.\n\/\/\n\/\/ OkCodes provides a set of acceptable, positive responses.\n\/\/\n\/\/ If provided, StatusCode specifies a pointer to an integer, which will receive the\n\/\/ returned HTTP status code, successful or not. DEPRECATED; use the Response.StatusCode field instead for new software.\n\/\/\n\/\/ ResponseJson, if specified, provides a means for returning the raw JSON. This is\n\/\/ most useful for diagnostics. DEPRECATED; use the Response.JsonResult field instead for new software.\n\/\/\n\/\/ DumpReqJson, if set to true, will cause the request to appear to stdout for debugging purposes.\n\/\/ This attribute may be removed at any time in the future; DO NOT use this attribute in production software.\n\/\/\n\/\/ Response, if set, provides a way to communicate the complete set of HTTP response, raw JSON, status code, and\n\/\/ other useful attributes back to the caller. Note that the Request() method returns a Response structure as part\n\/\/ of its public interface; you don't need to set the Response field here to use this structure. The Response field\n\/\/ exists primarily for legacy or deprecated functions.\n\/\/\n\/\/ SetHeaders allows the caller to provide code to set any custom headers programmatically. Typically, this\n\/\/ facility can invoke, e.g., SetBasicAuth() on the request to easily set up authentication.\n\/\/ Any error generated will terminate the request and will propegate back to the caller.\ntype Options struct {\n\tCustomClient *http.Client\n\tReqBody interface{}\n\tResults interface{}\n\tMoreHeaders map[string]string\n\tOkCodes []int\n\tStatusCode *int `DEPRECATED`\n\tDumpReqJson bool `UNSUPPORTED`\n\tResponseJson *[]byte `DEPRECATED`\n\tResponse **Response\n\tContentType string `json:\"Content-Type,omitempty\"`\n\tContentLength int64 `json:\"Content-Length,omitempty\"`\n\tAccept string `json:\"Accept,omitempty\"`\n\tSetHeaders func(r *http.Request) error\n}\n\n\/\/ Response contains return values from the various request calls.\n\/\/\n\/\/ HttpResponse will return the http response from the request call.\n\/\/ Note: HttpResponse.Body is always closed and will not be available from this return value.\n\/\/\n\/\/ StatusCode specifies the returned HTTP status code, successful or not.\n\/\/\n\/\/ If Results is specified in the Options:\n\/\/ - JsonResult will contain the raw return from the request call\n\/\/ This is most useful for diagnostics.\n\/\/ - Result will contain the unmarshalled json either in the Result passed in\n\/\/ or the unmarshaller will allocate the container type for you.\n\ntype Response struct {\n\tHttpResponse http.Response\n\tJsonResult []byte\n\tResults interface{}\n\tStatusCode int\n}\n<commit_msg>allow user to handle http response codes<commit_after>\/\/ vim: ts=8 sw=8 noet ai\n\npackage perigee\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"strings\"\n)\n\n\/\/ The UnexpectedResponseCodeError structure represents a mismatch in understanding between server and client in terms of response codes.\n\/\/ Most often, this is due to an actual error condition (e.g., getting a 404 for a resource when you expect a 200).\n\/\/ However, it needn't always be the case (e.g., getting a 204 (No Content) response back when a 200 is expected).\ntype UnexpectedResponseCodeError struct {\n\tUrl string\n\tExpected []int\n\tActual int\n\tBody []byte\n}\n\nfunc (err *UnexpectedResponseCodeError) Error() string {\n\treturn fmt.Sprintf(\"Expected HTTP response code %d when accessing URL(%s); got %d instead with the following body:\\n%s\", err.Expected, err.Url, err.Actual, string(err.Body))\n}\n\n\/\/ Request issues an HTTP request, marshaling parameters, and unmarshaling results, as configured in the provided Options parameter.\n\/\/ The Response structure returned, if any, will include accumulated results recovered from the HTTP server.\n\/\/ See the Response structure for more details.\nfunc Request(method string, url string, opts Options) (*Response, error) {\n\tvar body io.Reader\n\tvar response Response\n\n\tclient := opts.CustomClient\n\tif client == nil {\n\t\tclient = new(http.Client)\n\t}\n\n\tcontentType := opts.ContentType\n\tif contentType == \"\" {\n\t\tcontentType = \"application\/json\"\n\t}\n\n\taccept := opts.Accept\n\tif accept == \"\" {\n\t\taccept = \"application\/json\"\n\t}\n\n\tbody = nil\n\tif opts.ReqBody != nil {\n\t\tif contentType == \"application\/json\" {\n\t\t\tbodyText, err := json.Marshal(opts.ReqBody)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbody = strings.NewReader(string(bodyText))\n\t\t\tif opts.DumpReqJson {\n\t\t\t\tlog.Printf(\"Making request:\\n%#v\\n\", string(bodyText))\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ assume opts.ReqBody implements the correct interface\n\t\t\tbody = opts.ReqBody.(io.Reader)\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(method, url, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(\"Content-Type\", contentType)\n\treq.Header.Add(\"Accept\", accept)\n\n\tif opts.ContentLength > 0 {\n\t\treq.ContentLength = opts.ContentLength\n\t\treq.Header.Add(\"Content-Length\", string(opts.ContentLength))\n\t}\n\n\tif opts.MoreHeaders != nil {\n\t\tfor k, v := range opts.MoreHeaders {\n\t\t\treq.Header.Add(k, v)\n\t\t}\n\t}\n\n\tif opts.SetHeaders != nil {\n\t\terr = opts.SetHeaders(req)\n\t\tif err != nil {\n\t\t\treturn &response, err\n\t\t}\n\t}\n\n\thttpResponse, err := client.Do(req)\n\tif httpResponse != nil {\n\t\tresponse.HttpResponse = *httpResponse\n\t\tresponse.StatusCode = httpResponse.StatusCode\n\t}\n\n\tif err != nil {\n\t\treturn &response, err\n\t}\n\t\/\/ This if-statement is legacy code, preserved for backward compatibility.\n\tif opts.StatusCode != nil {\n\t\t*opts.StatusCode = httpResponse.StatusCode\n\t}\n\n\tacceptableResponseCodes := opts.OkCodes\n\tif len(acceptableResponseCodes) != 0 {\n\t\tif not_in(httpResponse.StatusCode, acceptableResponseCodes) {\n\t\t\tb, _ := ioutil.ReadAll(httpResponse.Body)\n\t\t\thttpResponse.Body.Close()\n\t\t\treturn &response, &UnexpectedResponseCodeError{\n\t\t\t\tUrl: url,\n\t\t\t\tExpected: acceptableResponseCodes,\n\t\t\t\tActual: httpResponse.StatusCode,\n\t\t\t\tBody: b,\n\t\t\t}\n\t\t}\n\t}\n\tif opts.Results != nil {\n\t\tdefer httpResponse.Body.Close()\n\t\tjsonResult, err := ioutil.ReadAll(httpResponse.Body)\n\t\tresponse.JsonResult = jsonResult\n\t\tif err != nil {\n\t\t\treturn &response, err\n\t\t}\n\n\t\terr = json.Unmarshal(jsonResult, opts.Results)\n\t\t\/\/ This if-statement is legacy code, preserved for backward compatibility.\n\t\tif opts.ResponseJson != nil {\n\t\t\t*opts.ResponseJson = jsonResult\n\t\t}\n\t}\n\treturn &response, err\n}\n\n\/\/ not_in returns false if, and only if, the provided needle is _not_\n\/\/ in the given set of integers.\nfunc not_in(needle int, haystack []int) bool {\n\tfor _, straw := range haystack {\n\t\tif needle == straw {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ Post makes a POST request against a server using the provided HTTP client.\n\/\/ The url must be a fully-formed URL string.\n\/\/ DEPRECATED. Use Request() instead.\nfunc Post(url string, opts Options) error {\n\tr, err := Request(\"POST\", url, opts)\n\tif opts.Response != nil {\n\t\t*opts.Response = r\n\t}\n\treturn err\n}\n\n\/\/ Get makes a GET request against a server using the provided HTTP client.\n\/\/ The url must be a fully-formed URL string.\n\/\/ DEPRECATED. Use Request() instead.\nfunc Get(url string, opts Options) error {\n\tr, err := Request(\"GET\", url, opts)\n\tif opts.Response != nil {\n\t\t*opts.Response = r\n\t}\n\treturn err\n}\n\n\/\/ Delete makes a DELETE request against a server using the provided HTTP client.\n\/\/ The url must be a fully-formed URL string.\n\/\/ DEPRECATED. Use Request() instead.\nfunc Delete(url string, opts Options) error {\n\tr, err := Request(\"DELETE\", url, opts)\n\tif opts.Response != nil {\n\t\t*opts.Response = r\n\t}\n\treturn err\n}\n\n\/\/ Put makes a PUT request against a server using the provided HTTP client.\n\/\/ The url must be a fully-formed URL string.\n\/\/ DEPRECATED. Use Request() instead.\nfunc Put(url string, opts Options) error {\n\tr, err := Request(\"PUT\", url, opts)\n\tif opts.Response != nil {\n\t\t*opts.Response = r\n\t}\n\treturn err\n}\n\n\/\/ Options describes a set of optional parameters to the various request calls.\n\/\/\n\/\/ The custom client can be used for a variety of purposes beyond selecting encrypted versus unencrypted channels.\n\/\/ Transports can be defined to provide augmented logging, header manipulation, et. al.\n\/\/\n\/\/ If the ReqBody field is provided, it will be embedded as a JSON object.\n\/\/ Otherwise, provide nil.\n\/\/\n\/\/ If JSON output is to be expected from the response,\n\/\/ provide either a pointer to the container structure in Results,\n\/\/ or a pointer to a nil-initialized pointer variable.\n\/\/ The latter method will cause the unmarshaller to allocate the container type for you.\n\/\/ If no response is expected, provide a nil Results value.\n\/\/\n\/\/ The MoreHeaders map, if non-nil or empty, provides a set of headers to add to those\n\/\/ already present in the request. At present, only Accepted and Content-Type are set\n\/\/ by default.\n\/\/\n\/\/ OkCodes provides a set of acceptable, positive responses.\n\/\/\n\/\/ If provided, StatusCode specifies a pointer to an integer, which will receive the\n\/\/ returned HTTP status code, successful or not. DEPRECATED; use the Response.StatusCode field instead for new software.\n\/\/\n\/\/ ResponseJson, if specified, provides a means for returning the raw JSON. This is\n\/\/ most useful for diagnostics. DEPRECATED; use the Response.JsonResult field instead for new software.\n\/\/\n\/\/ DumpReqJson, if set to true, will cause the request to appear to stdout for debugging purposes.\n\/\/ This attribute may be removed at any time in the future; DO NOT use this attribute in production software.\n\/\/\n\/\/ Response, if set, provides a way to communicate the complete set of HTTP response, raw JSON, status code, and\n\/\/ other useful attributes back to the caller. Note that the Request() method returns a Response structure as part\n\/\/ of its public interface; you don't need to set the Response field here to use this structure. The Response field\n\/\/ exists primarily for legacy or deprecated functions.\n\/\/\n\/\/ SetHeaders allows the caller to provide code to set any custom headers programmatically. Typically, this\n\/\/ facility can invoke, e.g., SetBasicAuth() on the request to easily set up authentication.\n\/\/ Any error generated will terminate the request and will propegate back to the caller.\ntype Options struct {\n\tCustomClient *http.Client\n\tReqBody interface{}\n\tResults interface{}\n\tMoreHeaders map[string]string\n\tOkCodes []int\n\tStatusCode *int `DEPRECATED`\n\tDumpReqJson bool `UNSUPPORTED`\n\tResponseJson *[]byte `DEPRECATED`\n\tResponse **Response\n\tContentType string `json:\"Content-Type,omitempty\"`\n\tContentLength int64 `json:\"Content-Length,omitempty\"`\n\tAccept string `json:\"Accept,omitempty\"`\n\tSetHeaders func(r *http.Request) error\n}\n\n\/\/ Response contains return values from the various request calls.\n\/\/\n\/\/ HttpResponse will return the http response from the request call.\n\/\/ Note: HttpResponse.Body is always closed and will not be available from this return value.\n\/\/\n\/\/ StatusCode specifies the returned HTTP status code, successful or not.\n\/\/\n\/\/ If Results is specified in the Options:\n\/\/ - JsonResult will contain the raw return from the request call\n\/\/ This is most useful for diagnostics.\n\/\/ - Result will contain the unmarshalled json either in the Result passed in\n\/\/ or the unmarshaller will allocate the container type for you.\n\ntype Response struct {\n\tHttpResponse http.Response\n\tJsonResult []byte\n\tResults interface{}\n\tStatusCode int\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Smartling\/api-sdk-go\"\n\t\"github.com\/codegangsta\/cli\"\n)\n\nfunc PrintList(uriMask string, olderThan time.Duration) {\n\treq := smartling.FilesListRequest{\n\t\tURIMask: uriMask,\n\t}\n\n\tif olderThan > 0 {\n\t\treq.LastUploadedBefore = smartling.UTC{Time: time.Now().Add(-olderThan)}\n\t}\n\n\tfiles, err := client.List(req)\n\tlogAndQuitIfError(err)\n\n\tfor _, f := range files.Items {\n\t\tfmt.Println(f.FileURI)\n\t}\n}\n\nvar LsCommand = cli.Command{\n\tName: \"ls\",\n\tUsage: \"list remote files\",\n\tDescription: \"ls [<uriMask>]\",\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"older-than\",\n\t\t},\n\t},\n\tBefore: cmdBefore,\n\tAction: func(c *cli.Context) {\n\t\tif len(c.Args()) > 1 {\n\t\t\tlog.Println(\"Wrong number of arguments\")\n\t\t\tlog.Fatalln(\"Usage: ls [<uriMask>]\")\n\t\t}\n\t\turiMask := c.Args().Get(0)\n\n\t\tvar d time.Duration\n\t\tif len(c.String(\"older-than\")) > 0 {\n\t\t\tvar err error\n\t\t\td, err = time.ParseDuration(c.String(\"older-than\"))\n\t\t\tlogAndQuitIfError(err)\n\t\t}\n\n\t\tPrintList(uriMask, d)\n\t},\n}\n\nfunc PrintFileStatus(remotepath, locale string) {\n\tf, err := client.Status(remotepath, locale)\n\tlogAndQuitIfError(err)\n\n\tfmt.Println(\"File \", f.FileURI)\n\tfmt.Println(\"String Count \", f.TotalStringCount)\n\tfmt.Println(\"Word Count \", f.TotalWordCount)\n\tfmt.Println(\"Authorized String Count \", f.AuthorizedStringCount)\n\tfmt.Println(\"Completed String Count \", f.CompletedStringCount)\n\tfmt.Println(\"Excluded String Count \", f.ExcludedStringCount)\n\tfmt.Println(\"Last Uploaded \", f.LastUploaded)\n\tfmt.Println(\"File Type \", f.FileType)\n}\n\nvar StatusCommand = cli.Command{\n\tName: \"stat\",\n\tUsage: \"display the translation status of a remote file\",\n\tDescription: \"stat <remote file> <locale>\",\n\tBefore: cmdBefore,\n\tAction: func(c *cli.Context) {\n\t\tif len(c.Args()) != 2 {\n\t\t\tlog.Println(\"Wrong number of arguments\")\n\t\t\tlog.Fatalln(\"Usage: stat <remote file> <locale>\")\n\t\t}\n\n\t\tremotepath := c.Args().Get(0)\n\t\tlocale := c.Args().Get(1)\n\n\t\tPrintFileStatus(remotepath, locale)\n\t},\n}\n\nvar GetCommand = cli.Command{\n\tName: \"get\",\n\tUsage: \"downloads a remote file\",\n\tDescription: \"get <remote file>\",\n\tBefore: cmdBefore,\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"locale\",\n\t\t},\n\t},\n\tAction: func(c *cli.Context) {\n\t\tif len(c.Args()) != 1 {\n\t\t\tlog.Println(\"Wrong number of arguments\")\n\t\t\tlog.Fatalln(\"Usage: get <remote file>\")\n\t\t}\n\n\t\tremotepath := c.Args().Get(0)\n\t\tlocale := c.String(\"locale\")\n\n\t\tvar (\n\t\t\tb []byte\n\t\t\terr error\n\t\t)\n\n\t\tif locale == \"\" {\n\t\t\tb, err = client.Download(remotepath)\n\t\t} else {\n\t\t\tb, err = client.DownloadTranslation(locale, smartling.FileDownloadRequest{\n\t\t\t\tFileURIRequest: smartling.FileURIRequest{FileURI: remotepath},\n\t\t\t})\n\t\t}\n\t\tlogAndQuitIfError(err)\n\n\t\tfmt.Println(string(b))\n\t},\n}\n\nvar PutCommand = cli.Command{\n\tName: \"put\",\n\tUsage: \"uploads a local file\",\n\tDescription: \"put <local file>\",\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"filetype\",\n\t\t}, cli.StringFlag{\n\t\t\tName: \"parserconfig\",\n\t\t}, cli.BoolFlag{\n\t\t\tName: \"approve\",\n\t\t},\n\t},\n\tBefore: cmdBefore,\n\tAction: func(c *cli.Context) {\n\t\tif len(c.Args()) != 1 {\n\t\t\tlog.Println(\"Wrong number of arguments\")\n\t\t\tlog.Fatalln(\"Usage: put <local file>\")\n\t\t}\n\n\t\tlocalpath := c.Args().Get(0)\n\n\t\tft := smartling.FileType(c.String(\"filetype\"))\n\t\tif ft == \"\" {\n\t\t\tft = smartling.GetFileTypeByExtension(filepath.Ext(localpath))\n\t\t}\n\n\t\tparserconfig := map[string]string{}\n\t\tif c.String(\"parserconfig\") != \"\" {\n\t\t\tparts := strings.Split(c.String(\"parserconfig\"), \",\")\n\t\t\tif len(parts)%2 == 1 {\n\t\t\t\tlog.Fatalln(\"parserconfig must be in the format --parserconfig=key1,value1,key2,value2\")\n\t\t\t}\n\t\t\tfor i := 0; i < len(parts); i += 2 {\n\t\t\t\tparserconfig[parts[i]] = parts[i+1]\n\t\t\t}\n\t\t}\n\n\t\tf, err := ioutil.ReadFile(localpath)\n\t\tlogAndQuitIfError(err)\n\n\t\tr, err := client.Upload(&smartling.FileUploadRequest{\n\t\t\tFile: f,\n\t\t\tFileType: ft,\n\t\t\tAuthorize: c.Bool(\"approve\"),\n\t\t\tFileURIRequest: smartling.FileURIRequest{FileURI: localpath},\n\t\t})\n\n\t\tlogAndQuitIfError(err)\n\n\t\tfmt.Println(\"Overwritten: \", r.Overwritten)\n\t\tfmt.Println(\"String Count:\", r.StringCount)\n\t\tfmt.Println(\"Word Count: \", r.WordCount)\n\t},\n}\n\nvar RenameCommand = cli.Command{\n\tName: \"rename\",\n\tUsage: \"renames a remote file\",\n\tDescription: \"rename <remote file name> <new smartling file name>\",\n\tBefore: cmdBefore,\n\tAction: func(c *cli.Context) {\n\t\tif len(c.Args()) != 2 {\n\t\t\tlog.Println(\"Wrong number of arguments\")\n\t\t\tlog.Fatalln(\"Usage: rename <remote file> <new smartling file name>\")\n\t\t}\n\n\t\tremotepath := c.Args().Get(0)\n\t\tnewremotepath := c.Args().Get(1)\n\n\t\terr := client.Rename(remotepath, newremotepath)\n\n\t\tlogAndQuitIfError(err)\n\t},\n}\n\nvar RmCommand = cli.Command{\n\tName: \"rm\",\n\tUsage: \"removes a remote file\",\n\tDescription: \"rm <remote file>...\",\n\tBefore: cmdBefore,\n\tAction: func(c *cli.Context) {\n\t\tif len(c.Args()) < 1 {\n\t\t\tlog.Println(\"Wrong number of arguments\")\n\t\t\tlog.Fatalln(\"Usage: rm <remote file>...\")\n\t\t}\n\n\t\tfor _, remotepath := range c.Args() {\n\t\t\tlogAndQuitIfError(client.Delete(remotepath))\n\t\t}\n\t},\n}\n\nvar LastmodifiedCommand = cli.Command{\n\tName: \"lastmodified\",\n\tUsage: \"shows when a remote file was modified last\",\n\tDescription: \"lastmodified <remote file>\",\n\tBefore: cmdBefore,\n\tAction: func(c *cli.Context) {\n\t\tif len(c.Args()) != 1 {\n\t\t\tlog.Println(\"Wrong number of arguments\")\n\t\t\tlog.Fatalln(\"Usage: lastmodified <remote file>\")\n\t\t}\n\n\t\tremotepath := c.Args().Get(0)\n\n\t\tlocales, err := client.LastModified(smartling.FileLastModifiedRequest{\n\t\t\tFileURIRequest: smartling.FileURIRequest{FileURI: remotepath},\n\t\t})\n\t\tlogAndQuitIfError(err)\n\n\t\tfor _, i := range locales.Items {\n\t\t\tt := time.Time(i.LastModified.Time).Format(\"2 Jan 3:04\")\n\t\t\tfmt.Printf(\"%s %s\\n\", i.LocaleID, t)\n\t\t}\n\t},\n}\n\nvar LocalesCommand = cli.Command{\n\tName: \"locales\",\n\tUsage: \"list the locales for the project\",\n\tBefore: cmdBefore,\n\tAction: func(c *cli.Context) {\n\t\tif len(c.Args()) != 0 {\n\t\t\tlog.Println(\"Wrong number of arguments\")\n\t\t\tlog.Fatalln(\"Usage: locales\")\n\t\t}\n\n\t\ttl, err := client.Locales()\n\t\tlogAndQuitIfError(err)\n\n\t\tfor _, l := range tl {\n\t\t\tif l.Enabled {\n\t\t\t\tfmt.Printf(\"%-5s %s\\n\", l.LocaleID, l.Description)\n\t\t\t}\n\t\t}\n\t},\n}\n<commit_msg>Bring back remotepath option for put command<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/Smartling\/api-sdk-go\"\n\t\"github.com\/codegangsta\/cli\"\n)\n\nfunc PrintList(uriMask string, olderThan time.Duration) {\n\treq := smartling.FilesListRequest{\n\t\tURIMask: uriMask,\n\t}\n\n\tif olderThan > 0 {\n\t\treq.LastUploadedBefore = smartling.UTC{Time: time.Now().Add(-olderThan)}\n\t}\n\n\tfiles, err := client.List(req)\n\tlogAndQuitIfError(err)\n\n\tfor _, f := range files.Items {\n\t\tfmt.Println(f.FileURI)\n\t}\n}\n\nvar LsCommand = cli.Command{\n\tName: \"ls\",\n\tUsage: \"list remote files\",\n\tDescription: \"ls [<uriMask>]\",\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"older-than\",\n\t\t},\n\t},\n\tBefore: cmdBefore,\n\tAction: func(c *cli.Context) {\n\t\tif len(c.Args()) > 1 {\n\t\t\tlog.Println(\"Wrong number of arguments\")\n\t\t\tlog.Fatalln(\"Usage: ls [<uriMask>]\")\n\t\t}\n\t\turiMask := c.Args().Get(0)\n\n\t\tvar d time.Duration\n\t\tif len(c.String(\"older-than\")) > 0 {\n\t\t\tvar err error\n\t\t\td, err = time.ParseDuration(c.String(\"older-than\"))\n\t\t\tlogAndQuitIfError(err)\n\t\t}\n\n\t\tPrintList(uriMask, d)\n\t},\n}\n\nfunc PrintFileStatus(remotepath, locale string) {\n\tf, err := client.Status(remotepath, locale)\n\tlogAndQuitIfError(err)\n\n\tfmt.Println(\"File \", f.FileURI)\n\tfmt.Println(\"String Count \", f.TotalStringCount)\n\tfmt.Println(\"Word Count \", f.TotalWordCount)\n\tfmt.Println(\"Authorized String Count \", f.AuthorizedStringCount)\n\tfmt.Println(\"Completed String Count \", f.CompletedStringCount)\n\tfmt.Println(\"Excluded String Count \", f.ExcludedStringCount)\n\tfmt.Println(\"Last Uploaded \", f.LastUploaded)\n\tfmt.Println(\"File Type \", f.FileType)\n}\n\nvar StatusCommand = cli.Command{\n\tName: \"stat\",\n\tUsage: \"display the translation status of a remote file\",\n\tDescription: \"stat <remote file> <locale>\",\n\tBefore: cmdBefore,\n\tAction: func(c *cli.Context) {\n\t\tif len(c.Args()) != 2 {\n\t\t\tlog.Println(\"Wrong number of arguments\")\n\t\t\tlog.Fatalln(\"Usage: stat <remote file> <locale>\")\n\t\t}\n\n\t\tremotepath := c.Args().Get(0)\n\t\tlocale := c.Args().Get(1)\n\n\t\tPrintFileStatus(remotepath, locale)\n\t},\n}\n\nvar GetCommand = cli.Command{\n\tName: \"get\",\n\tUsage: \"downloads a remote file\",\n\tDescription: \"get <remote file>\",\n\tBefore: cmdBefore,\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"locale\",\n\t\t},\n\t},\n\tAction: func(c *cli.Context) {\n\t\tif len(c.Args()) != 1 {\n\t\t\tlog.Println(\"Wrong number of arguments\")\n\t\t\tlog.Fatalln(\"Usage: get <remote file>\")\n\t\t}\n\n\t\tremotepath := c.Args().Get(0)\n\t\tlocale := c.String(\"locale\")\n\n\t\tvar (\n\t\t\tb []byte\n\t\t\terr error\n\t\t)\n\n\t\tif locale == \"\" {\n\t\t\tb, err = client.Download(remotepath)\n\t\t} else {\n\t\t\tb, err = client.DownloadTranslation(locale, smartling.FileDownloadRequest{\n\t\t\t\tFileURIRequest: smartling.FileURIRequest{FileURI: remotepath},\n\t\t\t})\n\t\t}\n\t\tlogAndQuitIfError(err)\n\n\t\tfmt.Println(string(b))\n\t},\n}\n\nvar PutCommand = cli.Command{\n\tName: \"put\",\n\tUsage: \"uploads a local file\",\n\tDescription: \"put <local file> <remote file>\",\n\tFlags: []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"filetype\",\n\t\t}, cli.StringFlag{\n\t\t\tName: \"parserconfig\",\n\t\t}, cli.BoolFlag{\n\t\t\tName: \"approve\",\n\t\t},\n\t},\n\tBefore: cmdBefore,\n\tAction: func(c *cli.Context) {\n\t\tif len(c.Args()) != 2 {\n\t\t\tlog.Println(\"Wrong number of arguments\")\n\t\t\tlog.Fatalln(\"Usage: put <local file> <remote file>\")\n\t\t}\n\n\t\tlocalpath := c.Args().Get(0)\n\t\tremotepath := c.Args().Get(1)\n\n\t\tft := smartling.FileType(c.String(\"filetype\"))\n\t\tif ft == \"\" {\n\t\t\tft = smartling.GetFileTypeByExtension(filepath.Ext(localpath))\n\t\t}\n\n\t\tparserconfig := map[string]string{}\n\t\tif c.String(\"parserconfig\") != \"\" {\n\t\t\tparts := strings.Split(c.String(\"parserconfig\"), \",\")\n\t\t\tif len(parts)%2 == 1 {\n\t\t\t\tlog.Fatalln(\"parserconfig must be in the format --parserconfig=key1,value1,key2,value2\")\n\t\t\t}\n\t\t\tfor i := 0; i < len(parts); i += 2 {\n\t\t\t\tparserconfig[parts[i]] = parts[i+1]\n\t\t\t}\n\t\t}\n\n\t\tf, err := ioutil.ReadFile(localpath)\n\t\tlogAndQuitIfError(err)\n\n\t\tr, err := client.Upload(&smartling.FileUploadRequest{\n\t\t\tFile: f,\n\t\t\tFileType: ft,\n\t\t\tAuthorize: c.Bool(\"approve\"),\n\t\t\tFileURIRequest: smartling.FileURIRequest{FileURI: remotepath},\n\t\t})\n\n\t\tlogAndQuitIfError(err)\n\n\t\tfmt.Println(\"Overwritten: \", r.Overwritten)\n\t\tfmt.Println(\"String Count:\", r.StringCount)\n\t\tfmt.Println(\"Word Count: \", r.WordCount)\n\t},\n}\n\nvar RenameCommand = cli.Command{\n\tName: \"rename\",\n\tUsage: \"renames a remote file\",\n\tDescription: \"rename <remote file name> <new smartling file name>\",\n\tBefore: cmdBefore,\n\tAction: func(c *cli.Context) {\n\t\tif len(c.Args()) != 2 {\n\t\t\tlog.Println(\"Wrong number of arguments\")\n\t\t\tlog.Fatalln(\"Usage: rename <remote file> <new smartling file name>\")\n\t\t}\n\n\t\tremotepath := c.Args().Get(0)\n\t\tnewremotepath := c.Args().Get(1)\n\n\t\terr := client.Rename(remotepath, newremotepath)\n\n\t\tlogAndQuitIfError(err)\n\t},\n}\n\nvar RmCommand = cli.Command{\n\tName: \"rm\",\n\tUsage: \"removes a remote file\",\n\tDescription: \"rm <remote file>...\",\n\tBefore: cmdBefore,\n\tAction: func(c *cli.Context) {\n\t\tif len(c.Args()) < 1 {\n\t\t\tlog.Println(\"Wrong number of arguments\")\n\t\t\tlog.Fatalln(\"Usage: rm <remote file>...\")\n\t\t}\n\n\t\tfor _, remotepath := range c.Args() {\n\t\t\tlogAndQuitIfError(client.Delete(remotepath))\n\t\t}\n\t},\n}\n\nvar LastmodifiedCommand = cli.Command{\n\tName: \"lastmodified\",\n\tUsage: \"shows when a remote file was modified last\",\n\tDescription: \"lastmodified <remote file>\",\n\tBefore: cmdBefore,\n\tAction: func(c *cli.Context) {\n\t\tif len(c.Args()) != 1 {\n\t\t\tlog.Println(\"Wrong number of arguments\")\n\t\t\tlog.Fatalln(\"Usage: lastmodified <remote file>\")\n\t\t}\n\n\t\tremotepath := c.Args().Get(0)\n\n\t\tlocales, err := client.LastModified(smartling.FileLastModifiedRequest{\n\t\t\tFileURIRequest: smartling.FileURIRequest{FileURI: remotepath},\n\t\t})\n\t\tlogAndQuitIfError(err)\n\n\t\tfor _, i := range locales.Items {\n\t\t\tt := time.Time(i.LastModified.Time).Format(\"2 Jan 3:04\")\n\t\t\tfmt.Printf(\"%s %s\\n\", i.LocaleID, t)\n\t\t}\n\t},\n}\n\nvar LocalesCommand = cli.Command{\n\tName: \"locales\",\n\tUsage: \"list the locales for the project\",\n\tBefore: cmdBefore,\n\tAction: func(c *cli.Context) {\n\t\tif len(c.Args()) != 0 {\n\t\t\tlog.Println(\"Wrong number of arguments\")\n\t\t\tlog.Fatalln(\"Usage: locales\")\n\t\t}\n\n\t\ttl, err := client.Locales()\n\t\tlogAndQuitIfError(err)\n\n\t\tfor _, l := range tl {\n\t\t\tif l.Enabled {\n\t\t\t\tfmt.Printf(\"%-5s %s\\n\", l.LocaleID, l.Description)\n\t\t\t}\n\t\t}\n\t},\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2015-2016, RadiantBlue Technologies, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/*\npzsvc-pdal provides an endpoint for accepting PDAL requests.\n\nExamples\n\n $ curl -v -X POST -H \"Content-Type: application\/json\" \\\n -d '{\"source\":{\"bucket\":\"venicegeo-sample-data\",\"key\":\"pointcloud\/samp11-utm.laz\"},\"function\":\"info\"}' http:\/\/hostIP:8080\/pdal\n\nWe shall see where we land with the input and output message for the job manager, but for now, we are expecting something along these lines.\n\nInput:\n\n\t{\n\t\t\"source\": {\n\t\t\t\"bucket\": \"venicegeo-sample-data\",\n\t\t\t\"key\": \"pointcloud\/samp11-utm.laz\"\n\t\t},\n\t\t\"function\": \"ground\",\n\t\t\"options\": {\n\t\t\t\"slope\": 0.5\n\t\t},\n\t\t\"destination\": {\n\t\t\t\"bucket\": \"venicegeo-sample-data\",\n\t\t\t\"key\" \"temp\/output.laz\"\n\t\t}\n\t}\n\nOutput:\n\n\t{\n\t\t\"input\": <echo the input message>,\n\t\t\"started_at\": \"2015-12-23T18:07:36.987565884Z\",\n\t\t\"finished_at\": \"2015-12-23T18:07:38.111658707Z\",\n\t\t\"code\": 200,\n\t\t\"message\": \"Success!\"\n\t}\n\nThese messages are known to be incomplete at the moment. I'm sure there will be things like job IDs, etc. that have not been included at the moment. This is a good starting point though.\n*\/\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"github.com\/venicegeo\/pzsvc-pdal\/functions\"\n\t\"github.com\/venicegeo\/pzsvc-pdal\/handlers\"\n\t\"github.com\/venicegeo\/pzsvc-sdk-go\/job\"\n)\n\nvar pdalMetadata = job.ResourceMetadata{\n\tName: \"pdal\",\n\tServiceID: \"\",\n\tDescription: \"Process point cloud data using PDAL.\",\n\tURL: \"https:\/\/api.piazzageo.io\/v1\/pdal\",\n\tNetworks: \"TBD\",\n\tQoS: \"Development\",\n\tAvailability: \"UP\",\n\tTags: \"point cloud, pdal, lidar\",\n\tClassType: \"Unclassified\",\n\t\/\/ TermDate: time.Now(),\n\t\/\/ ClientCertRequired: false,\n\t\/\/ CredentialsRequired: false,\n\t\/\/ PreAuthRequired: false,\n\t\/\/ Contracts: \"\",\n\t\/\/ Method: \"\",\n\t\/\/ MimeType: \"\",\n\t\/\/ Params: \"\",\n\t\/\/ Reason: \"\",\n}\n\nfunc main() {\n\t\/\/ For standalone demo purposes, we will start two services: our PDAL service, and a mocked up JobManager.\n\n\trouter := httprouter.New()\n\n\trouter.GET(\"\/\", func(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {\n\t\tfmt.Fprintf(w, \"Hi!\")\n\t})\n\n\ttype ListFuncs struct {\n\t\tFunctions []string `json:\"functions\"`\n\t}\n\tout := ListFuncs{[]string{\"crop\", \"dart\", \"dtm\", \"ground\", \"height\", \"info\", \"radius\", \"statistical\", \"translate\"}}\n\n\trouter.GET(\"\/functions\/:name\", func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=UTF-8\")\n\t\tvar a interface{}\n\t\tswitch ps.ByName(\"name\") {\n\t\tcase \"crop\":\n\t\t\ta = functions.NewCropOptions()\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\tcase \"dart\":\n\t\t\ta = functions.NewDartOptions()\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\tcase \"dtm\":\n\t\t\ta = functions.NewDtmOptions()\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\tcase \"ground\":\n\t\t\ta = functions.NewGroundOptions()\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\tcase \"height\":\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\tcase \"info\":\n\t\t\ta = functions.NewInfoOptions()\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\tcase \"radius\":\n\t\t\ta = functions.NewRadiusOptions()\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\tcase \"statistical\":\n\t\t\ta = functions.NewStatisticalOptions()\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\tcase \"translate\":\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\tdefault:\n\t\t\ttype DefaultMsg struct {\n\t\t\t\tMessage string `json:\"message\"`\n\t\t\t\tListFuncs\n\t\t\t}\n\t\t\tmsg := \"Unrecognized function \" + ps.ByName(\"name\") + \".\"\n\t\t\ta = DefaultMsg{msg, out}\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t}\n\t\tif err := json.NewEncoder(w).Encode(a); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t})\n\n\trouter.GET(\"\/functions\", func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=UTF-8\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tif err := json.NewEncoder(w).Encode(out); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t})\n\n\t\/\/ Setup the PDAL service.\n\trouter.POST(\"\/pdal\", handlers.PdalHandler)\n\n\t\/\/ Setup the mocked up JobManager.\n\t\/\/ router.POST(\"\/manager\", handlers.JobManagerHandler)\n\n\tvar defaultPort = os.Getenv(\"PORT\")\n\tif defaultPort == \"\" {\n\t\tdefaultPort = \"8080\"\n\t}\n\n\tlog.Println(\"Starting on port \", defaultPort)\n\tlog.Println(os.Getenv(\"PATH\"))\n\tlog.Println(os.Getenv(\"LD_LIBRARY_PATH\"))\n\tif err := http.ListenAndServe(\":\"+defaultPort, router); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<commit_msg>Drop the ResourceMetadata for now<commit_after>\/*\nCopyright 2015-2016, RadiantBlue Technologies, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/*\npzsvc-pdal provides an endpoint for accepting PDAL requests.\n\nExamples\n\n $ curl -v -X POST -H \"Content-Type: application\/json\" \\\n -d '{\"source\":{\"bucket\":\"venicegeo-sample-data\",\"key\":\"pointcloud\/samp11-utm.laz\"},\"function\":\"info\"}' http:\/\/hostIP:8080\/pdal\n\nWe shall see where we land with the input and output message for the job manager, but for now, we are expecting something along these lines.\n\nInput:\n\n\t{\n\t\t\"source\": {\n\t\t\t\"bucket\": \"venicegeo-sample-data\",\n\t\t\t\"key\": \"pointcloud\/samp11-utm.laz\"\n\t\t},\n\t\t\"function\": \"ground\",\n\t\t\"options\": {\n\t\t\t\"slope\": 0.5\n\t\t},\n\t\t\"destination\": {\n\t\t\t\"bucket\": \"venicegeo-sample-data\",\n\t\t\t\"key\" \"temp\/output.laz\"\n\t\t}\n\t}\n\nOutput:\n\n\t{\n\t\t\"input\": <echo the input message>,\n\t\t\"started_at\": \"2015-12-23T18:07:36.987565884Z\",\n\t\t\"finished_at\": \"2015-12-23T18:07:38.111658707Z\",\n\t\t\"code\": 200,\n\t\t\"message\": \"Success!\"\n\t}\n\nThese messages are known to be incomplete at the moment. I'm sure there will be things like job IDs, etc. that have not been included at the moment. This is a good starting point though.\n*\/\npackage main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"github.com\/venicegeo\/pzsvc-pdal\/functions\"\n\t\"github.com\/venicegeo\/pzsvc-pdal\/handlers\"\n)\n\n\/\/ var pdalMetadata = job.ResourceMetadata{\n\/\/ \tName: \"pdal\",\n\/\/ \tServiceID: \"\",\n\/\/ \tDescription: \"Process point cloud data using PDAL.\",\n\/\/ \tURL: \"https:\/\/api.piazzageo.io\/v1\/pdal\",\n\/\/ \tNetworks: \"TBD\",\n\/\/ \tQoS: \"Development\",\n\/\/ \tAvailability: \"UP\",\n\/\/ \tTags: \"point cloud, pdal, lidar\",\n\/\/ \tClassType: \"Unclassified\",\n\/\/ \t\/\/ TermDate: time.Now(),\n\/\/ \t\/\/ ClientCertRequired: false,\n\/\/ \t\/\/ CredentialsRequired: false,\n\/\/ \t\/\/ PreAuthRequired: false,\n\/\/ \t\/\/ Contracts: \"\",\n\/\/ \t\/\/ Method: \"\",\n\/\/ \t\/\/ MimeType: \"\",\n\/\/ \t\/\/ Params: \"\",\n\/\/ \t\/\/ Reason: \"\",\n\/\/ }\n\nfunc main() {\n\t\/\/ For standalone demo purposes, we will start two services: our PDAL service, and a mocked up JobManager.\n\n\trouter := httprouter.New()\n\n\trouter.GET(\"\/\", func(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) {\n\t\tfmt.Fprintf(w, \"Hi!\")\n\t})\n\n\ttype ListFuncs struct {\n\t\tFunctions []string `json:\"functions\"`\n\t}\n\tout := ListFuncs{[]string{\"crop\", \"dart\", \"dtm\", \"ground\", \"height\", \"info\", \"radius\", \"statistical\", \"translate\"}}\n\n\trouter.GET(\"\/functions\/:name\", func(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {\n\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=UTF-8\")\n\t\tvar a interface{}\n\t\tswitch ps.ByName(\"name\") {\n\t\tcase \"crop\":\n\t\t\ta = functions.NewCropOptions()\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\tcase \"dart\":\n\t\t\ta = functions.NewDartOptions()\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\tcase \"dtm\":\n\t\t\ta = functions.NewDtmOptions()\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\tcase \"ground\":\n\t\t\ta = functions.NewGroundOptions()\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\tcase \"height\":\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\tcase \"info\":\n\t\t\ta = functions.NewInfoOptions()\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\tcase \"radius\":\n\t\t\ta = functions.NewRadiusOptions()\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\tcase \"statistical\":\n\t\t\ta = functions.NewStatisticalOptions()\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\tcase \"translate\":\n\t\t\tw.WriteHeader(http.StatusOK)\n\n\t\tdefault:\n\t\t\ttype DefaultMsg struct {\n\t\t\t\tMessage string `json:\"message\"`\n\t\t\t\tListFuncs\n\t\t\t}\n\t\t\tmsg := \"Unrecognized function \" + ps.ByName(\"name\") + \".\"\n\t\t\ta = DefaultMsg{msg, out}\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t}\n\t\tif err := json.NewEncoder(w).Encode(a); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t})\n\n\trouter.GET(\"\/functions\", func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\t\tw.Header().Set(\"Content-Type\", \"application\/json; charset=UTF-8\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tif err := json.NewEncoder(w).Encode(out); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t})\n\n\t\/\/ Setup the PDAL service.\n\trouter.POST(\"\/pdal\", handlers.PdalHandler)\n\n\t\/\/ Setup the mocked up JobManager.\n\t\/\/ router.POST(\"\/manager\", handlers.JobManagerHandler)\n\n\tvar defaultPort = os.Getenv(\"PORT\")\n\tif defaultPort == \"\" {\n\t\tdefaultPort = \"8080\"\n\t}\n\n\tlog.Println(\"Starting on port \", defaultPort)\n\tlog.Println(os.Getenv(\"PATH\"))\n\tlog.Println(os.Getenv(\"LD_LIBRARY_PATH\"))\n\tif err := http.ListenAndServe(\":\"+defaultPort, router); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package mdqi\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"os\/exec\"\n\n\t\"github.com\/peterh\/liner\"\n\t\"github.com\/pkg\/errors\"\n)\n\nconst (\n\tdefaultMdqPath = \"mdq\"\n\tdefaultHistoryFilename = \".mdqi_history\"\n)\n\nvar Version string\n\nvar (\n\tErrSlashCommandNotFound = errors.New(\"unknown SlashCommand\")\n\tErrNotASlashCommand = errors.New(\"there are no SlashCommand\")\n\tErrSlashCommandInvalidArgs = errors.New(\"invalid args\")\n\tErrUnknownPrinterName = errors.New(\"unknown printer name\")\n)\n\ntype App struct {\n\t\/\/ Alive turns into false, mdqi will exit.\n\tAlive bool\n\n\t\/\/ mdqPath is path to mdq command.\n\tmdqPath string\n\n\t\/\/ mdqConfigPath is path to configuration file for mdq command.\n\tmdqConfigPath string\n\n\t\/\/ historyPath is path to command history file for liner.\n\thistoryPath string\n\n\t\/\/ slashCommandDefinition holds SlashCommandDefinition.\n\t\/\/ app.slashCommandDefinition[category][name] = SlashCommandDefinition\n\tslashCommandDefinition map[string]map[string]SlashCommandDefinition\n\n\t\/\/ tag stores tag value for --tag option of mdq.\n\ttag string\n\n\tprinter Printer\n}\n\ntype Result struct {\n\tDatabase string\n\tColumns []string\n\tRows []map[string]interface{}\n}\n\nfunc init() {\n\tdefaultOutput = os.Stdout\n}\n\nfunc NewApp(conf Conf) (*App, error) {\n\t\/\/ validate mdq path\n\tmdqPath := defaultMdqPath\n\tif path := conf.Mdq.Bin; path != \"\" {\n\t\tif err := lookMdqPath(conf.Mdq.Bin); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"mdq command not found at %s\", path)\n\t\t}\n\t\tmdqPath = path\n\t\tdebug.Println(\"conf.Mdq.Bin =\", path)\n\t}\n\n\t\/\/ mdq config path\n\tif path := conf.Mdq.Config; path != \"\" {\n\t\tdebug.Println(\"conf.Mdq.Config =\", path)\n\t}\n\n\t\/\/ create history file\n\thistoryPath := conf.Mdqi.History\n\tif path := conf.Mdqi.History; path != \"\" {\n\t\tvar err error\n\t\tif err = createHistoryFile(path); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to create history file at %s\", path)\n\t\t}\n\t\thistoryPath = path\n\t\tdebug.Println(\"conf.Mdqi.History =\", historyPath)\n\t}\n\n\tapp := &App{\n\t\tAlive: true,\n\n\t\tmdqPath: mdqPath,\n\t\tmdqConfigPath: conf.Mdq.Config,\n\t\thistoryPath: historyPath,\n\t\tslashCommandDefinition: map[string]map[string]SlashCommandDefinition{},\n\t\tprinter: HorizontalPrinter{},\n\t}\n\n\t\/\/ set default tag\n\tif tag := conf.Mdqi.DefaultTag; tag != \"\" {\n\t\tapp.SetTag(tag)\n\t\tdebug.Println(\"conf.Mdqi.DefaultTag =\", tag)\n\t}\n\n\t\/\/ set default display\n\tif display := conf.Mdqi.DefaultDisplay; display != \"\" {\n\t\tif err := app.SetPrinterByName(display); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to set default printer\")\n\t\t}\n\n\t\tdebug.Println(\"conf.Mdqi.DefaultDisplay =\", display)\n\t}\n\n\treturn app, nil\n}\n\nfunc createHistoryFile(path string) error {\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\tif _, err := os.Create(path); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to create history file\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc defaultHistoryPath() (string, error) {\n\tusr, err := user.Current()\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to get current user\")\n\t}\n\n\treturn filepath.Join(usr.HomeDir, defaultHistoryFilename), err\n}\n\nfunc lookMdqPath(path string) error {\n\t_, err := exec.LookPath(path)\n\n\treturn err\n}\n\nfunc (app *App) slashCommandCategories() []string {\n\tdefs := app.slashCommandDefinition\n\tkeys := make([]string, 0, len(defs))\n\n\tfor key := range defs {\n\t\tkeys = append(keys, key)\n\t}\n\n\treturn keys\n}\n\nfunc (app *App) slashCommandNames(category string) []string {\n\tdefs := app.slashCommandDefinition[category]\n\tkeys := make([]string, 0, len(defs))\n\n\tfor key := range defs {\n\t\tkeys = append(keys, key)\n\t}\n\n\treturn keys\n}\n\nfunc (app *App) Run() {\n\tapp.runLiner()\n}\n\nfunc (app *App) runLiner() {\n\tline := liner.NewLiner()\n\tdefer line.Close()\n\n\tline.SetCtrlCAborts(true)\n\n\tapp.initHistory(line)\n\n\trgxFinishLine := regexp.MustCompile(\";$\")\n\tlineFinished := true\n\n\tvar input string\n\tvar err error\n\nLOOP:\n\tfor {\n\t\tif !app.Alive {\n\t\t\tfmt.Println(\"bye\")\n\t\t\tbreak LOOP\n\t\t}\n\n\t\tif lineFinished {\n\t\t\tinput, err = line.Prompt(\"mdq> \")\n\t\t} else {\n\t\t\tvar l string\n\t\t\tl, err = line.Prompt(\" | \")\n\t\t\tinput = strings.Join([]string{input, l}, \" \")\n\t\t}\n\n\t\tswitch err {\n\t\tcase nil:\n\t\t\tinput = strings.Trim(input, \" \\n\")\n\n\t\t\tif lineFinished = rgxFinishLine.MatchString(input); lineFinished {\n\t\t\t} else {\n\t\t\t\t\/\/ If line is not finished, read next line as continue.\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif input == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tline.AppendHistory(input)\n\n\t\t\tscmd, _ := ParseSlashCommand(input)\n\t\t\tif scmd != nil {\n\t\t\t\tapp.runSlashCommand(scmd)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tresults, err := app.RunCmd(input, app.buildCmdArgs()...)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Println(err.Error())\n\t\t\t}\n\n\t\t\tPrint(app.printer, results)\n\t\tcase liner.ErrPromptAborted:\n\t\t\tlogger.Println(\"aborted\")\n\t\t\tbreak LOOP\n\t\tcase io.EOF:\n\t\t\tfmt.Println(\"bye\")\n\t\t\tbreak LOOP\n\t\tdefault:\n\t\t\tlogger.Println(\"error on reading line: \", err)\n\t\t\tbreak LOOP\n\t\t}\n\n\t\tapp.saveHistory(line)\n\t}\n}\n\nfunc (app *App) initHistory(line *liner.State) {\n\tif f, err := os.Open(app.historyPath); err == nil {\n\t\tline.ReadHistory(f)\n\t\tf.Close()\n\t} else {\n\t\tlogger.Println(\"failed to read command history: \", err)\n\t}\n}\n\nfunc (app *App) saveHistory(line *liner.State) {\n\tif f, err := os.Create(app.historyPath); err == nil {\n\t\tif _, err := line.WriteHistory(f); err != nil {\n\t\t\tlogger.Println(\"failed to write history: \", err)\n\t\t}\n\n\t\tf.Close()\n\t} else {\n\t\tlogger.Println(\"failed to create history file: \", err)\n\t}\n}\n\nfunc (app *App) buildCmdArgs() []string {\n\targs := []string{}\n\n\t\/\/ config\n\tif path := app.mdqConfigPath; path != \"\" {\n\t\targs = append(args, \"--config=\"+path)\n\t}\n\n\t\/\/ tag\n\tif tag := app.tag; tag != \"\" {\n\t\targs = append(args, \"--tag=\"+tag)\n\t}\n\n\treturn args\n}\n\nfunc (app *App) runSlashCommand(scmd *SlashCommand) {\n\tsdef, err := app.FindSlashCommandDefinition(scmd.Category, scmd.Name)\n\n\tswitch err {\n\tcase nil:\n\t\tif err := sdef.Handle(app, scmd); err != nil {\n\t\t\tlogger.Println(\"failed to handle slash command:\", err)\n\t\t}\n\tcase ErrSlashCommandNotFound:\n\t\tlogger.Println(\"unknown slash command\")\n\t}\n\n\treturn\n}\n\nfunc (app *App) SetPrinterByName(name string) error {\n\n\tswitch name {\n\tcase \"horizontal\":\n\t\tapp.printer = HorizontalPrinter{}\n\tcase \"vertical\":\n\t\tapp.printer = VerticalPrinter{}\n\tdefault:\n\t\treturn ErrUnknownPrinterName\n\t}\n\n\treturn nil\n}\n\nfunc (app *App) GetTag() string {\n\treturn app.tag\n}\n\nfunc (app *App) SetTag(tag string) {\n\tapp.tag = tag\n}\n\nfunc (app *App) ClearTag() {\n\tapp.tag = \"\"\n}\n<commit_msg>fix error on place history file at default path.<commit_after>package mdqi\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"os\/exec\"\n\n\t\"github.com\/peterh\/liner\"\n\t\"github.com\/pkg\/errors\"\n)\n\nconst (\n\tdefaultMdqPath = \"mdq\"\n\tdefaultHistoryFilename = \".mdqi_history\"\n)\n\nvar Version string\n\nvar (\n\tErrSlashCommandNotFound = errors.New(\"unknown SlashCommand\")\n\tErrNotASlashCommand = errors.New(\"there are no SlashCommand\")\n\tErrSlashCommandInvalidArgs = errors.New(\"invalid args\")\n\tErrUnknownPrinterName = errors.New(\"unknown printer name\")\n)\n\ntype App struct {\n\t\/\/ Alive turns into false, mdqi will exit.\n\tAlive bool\n\n\t\/\/ mdqPath is path to mdq command.\n\tmdqPath string\n\n\t\/\/ mdqConfigPath is path to configuration file for mdq command.\n\tmdqConfigPath string\n\n\t\/\/ historyPath is path to command history file for liner.\n\thistoryPath string\n\n\t\/\/ slashCommandDefinition holds SlashCommandDefinition.\n\t\/\/ app.slashCommandDefinition[category][name] = SlashCommandDefinition\n\tslashCommandDefinition map[string]map[string]SlashCommandDefinition\n\n\t\/\/ tag stores tag value for --tag option of mdq.\n\ttag string\n\n\tprinter Printer\n}\n\ntype Result struct {\n\tDatabase string\n\tColumns []string\n\tRows []map[string]interface{}\n}\n\nfunc init() {\n\tdefaultOutput = os.Stdout\n}\n\nfunc NewApp(conf Conf) (*App, error) {\n\t\/\/ validate mdq path\n\tmdqPath := defaultMdqPath\n\tif path := conf.Mdq.Bin; path != \"\" {\n\t\tif err := lookMdqPath(conf.Mdq.Bin); err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"mdq command not found at %s\", path)\n\t\t}\n\t\tmdqPath = path\n\t\tdebug.Println(\"conf.Mdq.Bin =\", path)\n\t}\n\n\t\/\/ mdq config path\n\tif path := conf.Mdq.Config; path != \"\" {\n\t\tdebug.Println(\"conf.Mdq.Config =\", path)\n\t}\n\n\t\/\/ create history file\n\thistoryPath := conf.Mdqi.History\n\tif historyPath == \"\" {\n\t\tvar err error\n\t\tif historyPath, err = defaultHistoryPath(); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to create history file at default path\")\n\t\t}\n\t\tdebug.Println(\"conf.Mdqi.History =\", historyPath)\n\t}\n\tif err := createHistoryFile(historyPath); err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create history file at %s\", historyPath)\n\t}\n\n\tapp := &App{\n\t\tAlive: true,\n\n\t\tmdqPath: mdqPath,\n\t\tmdqConfigPath: conf.Mdq.Config,\n\t\thistoryPath: historyPath,\n\t\tslashCommandDefinition: map[string]map[string]SlashCommandDefinition{},\n\t\tprinter: HorizontalPrinter{},\n\t}\n\n\t\/\/ set default tag\n\tif tag := conf.Mdqi.DefaultTag; tag != \"\" {\n\t\tapp.SetTag(tag)\n\t\tdebug.Println(\"conf.Mdqi.DefaultTag =\", tag)\n\t}\n\n\t\/\/ set default display\n\tif display := conf.Mdqi.DefaultDisplay; display != \"\" {\n\t\tif err := app.SetPrinterByName(display); err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to set default printer\")\n\t\t}\n\n\t\tdebug.Println(\"conf.Mdqi.DefaultDisplay =\", display)\n\t}\n\n\treturn app, nil\n}\n\nfunc createHistoryFile(path string) error {\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\tif _, err := os.Create(path); err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to create history file at %s\", path)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc defaultHistoryPath() (string, error) {\n\tusr, err := user.Current()\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, \"failed to get current user\")\n\t}\n\n\treturn filepath.Join(usr.HomeDir, defaultHistoryFilename), err\n}\n\nfunc lookMdqPath(path string) error {\n\t_, err := exec.LookPath(path)\n\n\treturn err\n}\n\nfunc (app *App) slashCommandCategories() []string {\n\tdefs := app.slashCommandDefinition\n\tkeys := make([]string, 0, len(defs))\n\n\tfor key := range defs {\n\t\tkeys = append(keys, key)\n\t}\n\n\treturn keys\n}\n\nfunc (app *App) slashCommandNames(category string) []string {\n\tdefs := app.slashCommandDefinition[category]\n\tkeys := make([]string, 0, len(defs))\n\n\tfor key := range defs {\n\t\tkeys = append(keys, key)\n\t}\n\n\treturn keys\n}\n\nfunc (app *App) Run() {\n\tapp.runLiner()\n}\n\nfunc (app *App) runLiner() {\n\tline := liner.NewLiner()\n\tdefer line.Close()\n\n\tline.SetCtrlCAborts(true)\n\n\tapp.initHistory(line)\n\n\trgxFinishLine := regexp.MustCompile(\";$\")\n\tlineFinished := true\n\n\tvar input string\n\tvar err error\n\nLOOP:\n\tfor {\n\t\tif !app.Alive {\n\t\t\tfmt.Println(\"bye\")\n\t\t\tbreak LOOP\n\t\t}\n\n\t\tif lineFinished {\n\t\t\tinput, err = line.Prompt(\"mdq> \")\n\t\t} else {\n\t\t\tvar l string\n\t\t\tl, err = line.Prompt(\" | \")\n\t\t\tinput = strings.Join([]string{input, l}, \" \")\n\t\t}\n\n\t\tswitch err {\n\t\tcase nil:\n\t\t\tinput = strings.Trim(input, \" \\n\")\n\n\t\t\tif lineFinished = rgxFinishLine.MatchString(input); lineFinished {\n\t\t\t} else {\n\t\t\t\t\/\/ If line is not finished, read next line as continue.\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif input == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tline.AppendHistory(input)\n\n\t\t\tscmd, _ := ParseSlashCommand(input)\n\t\t\tif scmd != nil {\n\t\t\t\tapp.runSlashCommand(scmd)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tresults, err := app.RunCmd(input, app.buildCmdArgs()...)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Println(err.Error())\n\t\t\t}\n\n\t\t\tPrint(app.printer, results)\n\t\tcase liner.ErrPromptAborted:\n\t\t\tlogger.Println(\"aborted\")\n\t\t\tbreak LOOP\n\t\tcase io.EOF:\n\t\t\tfmt.Println(\"bye\")\n\t\t\tbreak LOOP\n\t\tdefault:\n\t\t\tlogger.Println(\"error on reading line: \", err)\n\t\t\tbreak LOOP\n\t\t}\n\n\t\tapp.saveHistory(line)\n\t}\n}\n\nfunc (app *App) initHistory(line *liner.State) {\n\tif f, err := os.Open(app.historyPath); err == nil {\n\t\tline.ReadHistory(f)\n\t\tf.Close()\n\t} else {\n\t\tlogger.Printf(\"failed to read command history at %s: %s\", app.historyPath, err)\n\t}\n}\n\nfunc (app *App) saveHistory(line *liner.State) {\n\tif f, err := os.Create(app.historyPath); err == nil {\n\t\tif _, err := line.WriteHistory(f); err != nil {\n\t\t\tlogger.Println(\"failed to write history: \", err)\n\t\t}\n\n\t\tf.Close()\n\t} else {\n\t\tlogger.Printf(\"failed to create history file at %s: %s\", app.historyPath, err)\n\t}\n}\n\nfunc (app *App) buildCmdArgs() []string {\n\targs := []string{}\n\n\t\/\/ config\n\tif path := app.mdqConfigPath; path != \"\" {\n\t\targs = append(args, \"--config=\"+path)\n\t}\n\n\t\/\/ tag\n\tif tag := app.tag; tag != \"\" {\n\t\targs = append(args, \"--tag=\"+tag)\n\t}\n\n\treturn args\n}\n\nfunc (app *App) runSlashCommand(scmd *SlashCommand) {\n\tsdef, err := app.FindSlashCommandDefinition(scmd.Category, scmd.Name)\n\n\tswitch err {\n\tcase nil:\n\t\tif err := sdef.Handle(app, scmd); err != nil {\n\t\t\tlogger.Println(\"failed to handle slash command:\", err)\n\t\t}\n\tcase ErrSlashCommandNotFound:\n\t\tlogger.Println(\"unknown slash command\")\n\t}\n\n\treturn\n}\n\nfunc (app *App) SetPrinterByName(name string) error {\n\n\tswitch name {\n\tcase \"horizontal\":\n\t\tapp.printer = HorizontalPrinter{}\n\tcase \"vertical\":\n\t\tapp.printer = VerticalPrinter{}\n\tdefault:\n\t\treturn ErrUnknownPrinterName\n\t}\n\n\treturn nil\n}\n\nfunc (app *App) GetTag() string {\n\treturn app.tag\n}\n\nfunc (app *App) SetTag(tag string) {\n\tapp.tag = tag\n}\n\nfunc (app *App) ClearTag() {\n\tapp.tag = \"\"\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/Financial-Times\/base-ft-rw-app-go\"\n\t\"github.com\/Financial-Times\/go-fthealth\/v1a\"\n\t\"github.com\/Financial-Times\/http-handlers-go\"\n\t\"github.com\/Financial-Times\/public-people-api\/people\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/jawher\/mow.cli\"\n\t\"github.com\/jmcvetta\/neoism\"\n\t\"github.com\/rcrowley\/go-metrics\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n)\n\nfunc main() {\n\tapp := cli.App(\"public-people-api-neo4j\", \"A public RESTful API for accessing People in neo4j\")\n\tneoURL := app.StringOpt(\"neo-url\", \"http:\/\/localhost:7474\/db\/data\", \"neo4j endpoint URL\")\n\tport := app.StringOpt(\"port\", \"8080\", \"Port to listen on\")\n\tlogLevel := app.StringOpt(\"log-level\", \"INFO\", \"Logging level (DEBUG, INFO, WARN, ERROR)\")\n\tgraphiteTCPAddress := app.StringOpt(\"graphiteTCPAddress\", \"\",\n\t\t\"Graphite TCP address, e.g. graphite.ft.com:2003. Leave as default if you do NOT want to output to graphite (e.g. if running locally)\")\n\tgraphitePrefix := app.StringOpt(\"graphitePrefix\", \"\",\n\t\t\"Prefix to use. Should start with content, include the environment, and the host name. e.g. content.test.public.people.api.ftaps59382-law1a-eu-t\")\n\tlogMetrics := app.BoolOpt(\"logMetrics\", false, \"Whether to log metrics. Set to true if running locally and you want metrics output\")\n\n\tapp.Action = func() {\n\t\tsetLogLevel(strings.ToUpper(*logLevel))\n\t\tbaseftrwapp.OutputMetricsIfRequired(*graphiteTCPAddress, *graphitePrefix, *logMetrics)\n\t\tlog.Infof(\"public-people-api will listen on port: %s, connecting to: %s\", *port, *neoURL)\n\t\trunServer(*neoURL, *port)\n\t}\n\tapp.Run(os.Args)\n}\n\nfunc runServer(neoURL string, port string) {\n\tdb, err := neoism.Connect(neoURL)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error connecting to neo4j %s\", err)\n\t}\n\tpeople.PeopleDriver = people.NewCypherDriver(db)\n\tr := mux.NewRouter()\n\n\t\/\/ Healthchecks and standards first\n\tr.HandleFunc(\"\/__health\", v1a.Handler(\"PeopleReadWriteNeo4j Healthchecks\",\n\t\t\"Checks for accessing neo4j\", people.HealthCheck()))\n\tr.HandleFunc(\"\/ping\", people.Ping)\n\tr.HandleFunc(\"\/__ping\", people.Ping)\n\n\t\/\/ Then API specific ones:\n\tr.HandleFunc(\"\/people\/{uuid}\", people.GetPerson).Methods(\"GET\")\n\n\tif err := http.ListenAndServe(\":\"+port,\n\t\thttphandlers.HTTPMetricsHandler(metrics.DefaultRegistry,\n\t\t\thttphandlers.TransactionAwareRequestLoggingHandler(log.StandardLogger(), r))); err != nil {\n\t\tlog.Fatalf(\"Unable to start server: %v\", err)\n\t}\n}\n\nfunc setLogLevel(level string) {\n\tswitch level {\n\tcase \"DEBUG\":\n\t\tlog.SetLevel(log.DebugLevel)\n\tcase \"INFO\":\n\t\tlog.SetLevel(log.InfoLevel)\n\tcase \"WARN\":\n\t\tlog.SetLevel(log.WarnLevel)\n\tcase \"ERROR\":\n\t\tlog.SetLevel(log.ErrorLevel)\n\tdefault:\n\t\tlog.Errorf(\"Requested log level %s is not supported, will default to INFO level\", level)\n\t\tlog.SetLevel(log.InfoLevel)\n\t}\n\tlog.Debugf(\"Logging level set to %s\", level)\n}\n<commit_msg>Added MaxIdleConnsPerHost to http client.<commit_after>package main\n\nimport (\n\t\"github.com\/Financial-Times\/base-ft-rw-app-go\"\n\t\"github.com\/Financial-Times\/go-fthealth\/v1a\"\n\t\"github.com\/Financial-Times\/http-handlers-go\"\n\t\"github.com\/Financial-Times\/public-people-api\/people\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/jawher\/mow.cli\"\n\t\"github.com\/jmcvetta\/neoism\"\n\t\"github.com\/rcrowley\/go-metrics\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n)\n\nfunc main() {\n\tapp := cli.App(\"public-people-api-neo4j\", \"A public RESTful API for accessing People in neo4j\")\n\tneoURL := app.StringOpt(\"neo-url\", \"http:\/\/localhost:7474\/db\/data\", \"neo4j endpoint URL\")\n\tport := app.StringOpt(\"port\", \"8080\", \"Port to listen on\")\n\tlogLevel := app.StringOpt(\"log-level\", \"INFO\", \"Logging level (DEBUG, INFO, WARN, ERROR)\")\n\tgraphiteTCPAddress := app.StringOpt(\"graphiteTCPAddress\", \"\",\n\t\t\"Graphite TCP address, e.g. graphite.ft.com:2003. Leave as default if you do NOT want to output to graphite (e.g. if running locally)\")\n\tgraphitePrefix := app.StringOpt(\"graphitePrefix\", \"\",\n\t\t\"Prefix to use. Should start with content, include the environment, and the host name. e.g. content.test.public.people.api.ftaps59382-law1a-eu-t\")\n\tlogMetrics := app.BoolOpt(\"logMetrics\", false, \"Whether to log metrics. Set to true if running locally and you want metrics output\")\n\n\tapp.Action = func() {\n\t\tsetLogLevel(strings.ToUpper(*logLevel))\n\t\tbaseftrwapp.OutputMetricsIfRequired(*graphiteTCPAddress, *graphitePrefix, *logMetrics)\n\t\tlog.Infof(\"public-people-api will listen on port: %s, connecting to: %s\", *port, *neoURL)\n\t\trunServer(*neoURL, *port)\n\t}\n\tapp.Run(os.Args)\n}\n\nfunc runServer(neoURL string, port string) {\n\tdb, err := neoism.Connect(neoURL)\n\tdb.Session.Client = &http.Client{Transport: &http.Transport{MaxIdleConnsPerHost: 100}}\n\tif err != nil {\n\t\tlog.Fatalf(\"Error connecting to neo4j %s\", err)\n\t}\n\tpeople.PeopleDriver = people.NewCypherDriver(db)\n\tr := mux.NewRouter()\n\n\t\/\/ Healthchecks and standards first\n\tr.HandleFunc(\"\/__health\", v1a.Handler(\"PeopleReadWriteNeo4j Healthchecks\",\n\t\t\"Checks for accessing neo4j\", people.HealthCheck()))\n\tr.HandleFunc(\"\/ping\", people.Ping)\n\tr.HandleFunc(\"\/__ping\", people.Ping)\n\n\t\/\/ Then API specific ones:\n\tr.HandleFunc(\"\/people\/{uuid}\", people.GetPerson).Methods(\"GET\")\n\n\tif err := http.ListenAndServe(\":\"+port,\n\t\thttphandlers.HTTPMetricsHandler(metrics.DefaultRegistry,\n\t\t\thttphandlers.TransactionAwareRequestLoggingHandler(log.StandardLogger(), r))); err != nil {\n\t\tlog.Fatalf(\"Unable to start server: %v\", err)\n\t}\n}\n\nfunc setLogLevel(level string) {\n\tswitch level {\n\tcase \"DEBUG\":\n\t\tlog.SetLevel(log.DebugLevel)\n\tcase \"INFO\":\n\t\tlog.SetLevel(log.InfoLevel)\n\tcase \"WARN\":\n\t\tlog.SetLevel(log.WarnLevel)\n\tcase \"ERROR\":\n\t\tlog.SetLevel(log.ErrorLevel)\n\tdefault:\n\t\tlog.Errorf(\"Requested log level %s is not supported, will default to INFO level\", level)\n\t\tlog.SetLevel(log.InfoLevel)\n\t}\n\tlog.Debugf(\"Logging level set to %s\", level)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/jmoiron\/sqlx\"\n\t\"github.com\/training_project\/config\"\n\t\"github.com\/training_project\/controller\/driver\"\n\t\"github.com\/training_project\/controller\/review\"\n\t\"github.com\/training_project\/database\"\n\n\tdriverModel \"github.com\/training_project\/model\/driver\"\n\treviewModel \"github.com\/training_project\/model\/review\"\n\n\t\"github.com\/training_project\/util\/logger\"\n\n\tmgo \"gopkg.in\/mgo.v2\"\n\tlogging \"gopkg.in\/tokopedia\/logging.v1\"\n\n\tdummy \"github.com\/dummy_data\/driver\"\n)\n\nvar cfg config.Config\n\nfunc init() {\n\t\/\/ get config from database.ini\n\t\/\/ assigne to global variable cfg\n\tok := logging.ReadModuleConfig(&cfg, \"\/etc\/test\", \"test\") || logging.ReadModuleConfig(&cfg, \"config\", \"test\")\n\tif !ok {\n\t\tlog.Fatalln(\"failed to read config\")\n\t}\n\n\tlogger.InitLogger(\"App :: \", \".\/logs\/\", \"App.txt\")\n}\n\nfunc main() {\n\t\/\/getting list of all the connection.\n\tlistConnection := database.SystemConnection()\n\n\t\/\/getting redis connection convert it from interface to *redisClient.\n\t\/\/redisConn := listConnection[\"redis\"].(*redis.Client)\n\n\t\/\/ get postgre connection.\n\tpostgreConn := listConnection[\"postgre\"].(*sqlx.DB)\n\tmongoConn := listConnection[\"mongodb\"].(*mgo.Session)\n\n\t\/\/pass connection to model to model\n\treviewData := &reviewModel.ReviewData{}\n\treviewData.GetConn(postgreConn)\n\n\tdriverData := &driverModel.DriverData{}\n\tdriverData.GetConn(mongoConn)\n\n\t\/\/ inserting dummy driver\n\tinsertDummyDriver(driverData)\n\n\t\/\/ review router\n\thttp.HandleFunc(\"\/\", review.CheckDataExist)\n\n\t\/\/ driver router\n\thttp.HandleFunc(\"\/driver\", driver.InsertDriver)\n\thttp.HandleFunc(\"\/driver\/find\", driver.FindDriver)\n\n\tport := \":8080\"\n\tfmt.Println(\"App Started on port = \", port)\n\terr := http.ListenAndServe(port, nil)\n\tif err != nil {\n\t\tlog.Panic(\"App Started Failed = \", err.Error())\n\t}\n\n}\n\n\/\/ insert database 50.000 rows\n\/\/ passed deriver struct to save the data to database.\nfunc insertDummyDriver(driverData *driverModel.DriverData) {\n\n\tdummyDrivers := dummy.GenereateDriver(50000)\n\tfor _, driver := range dummyDrivers {\n\t\tdriverData.Insert(driver.Name, driver.Lat, driver.Lon, driver.Status)\n\t}\n\n}\n<commit_msg>fix some comment<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/jmoiron\/sqlx\"\n\t\"github.com\/training_project\/config\"\n\t\"github.com\/training_project\/controller\/driver\"\n\t\"github.com\/training_project\/controller\/review\"\n\t\"github.com\/training_project\/database\"\n\n\tdriverModel \"github.com\/training_project\/model\/driver\"\n\treviewModel \"github.com\/training_project\/model\/review\"\n\n\t\"github.com\/training_project\/util\/logger\"\n\n\tmgo \"gopkg.in\/mgo.v2\"\n\tlogging \"gopkg.in\/tokopedia\/logging.v1\"\n\n\tdummy \"github.com\/dummy_data\/driver\"\n)\n\nvar cfg config.Config\n\nfunc init() {\n\t\/\/ get config from database.ini\n\t\/\/ assigne to global variable cfg\n\tok := logging.ReadModuleConfig(&cfg, \"\/etc\/test\", \"test\") || logging.ReadModuleConfig(&cfg, \"config\", \"test\")\n\tif !ok {\n\t\tlog.Fatalln(\"failed to read config\")\n\t}\n\n\tlogger.InitLogger(\"App :: \", \".\/logs\/\", \"App.txt\")\n}\n\nfunc main() {\n\t\/\/getting list of all the connection.\n\tlistConnection := database.SystemConnection()\n\n\t\/\/getting redis connection convert it from interface to *redisClient.\n\t\/\/redisConn := listConnection[\"redis\"].(*redis.Client)\n\n\t\/\/ get postgre connection.\n\tpostgreConn := listConnection[\"postgre\"].(*sqlx.DB)\n\tmongoConn := listConnection[\"mongodb\"].(*mgo.Session)\n\n\t\/\/pass connection to model to model\n\treviewData := &reviewModel.ReviewData{}\n\treviewData.GetConn(postgreConn)\n\n\tdriverData := &driverModel.DriverData{}\n\tdriverData.GetConn(mongoConn)\n\n\t\/\/ inserting dummy driver\n\tinsertDummyDriver(driverData)\n\n\t\/\/ review router\n\thttp.HandleFunc(\"\/\", review.CheckDataExist)\n\n\t\/\/ driver router\n\thttp.HandleFunc(\"\/driver\", driver.InsertDriver)\n\thttp.HandleFunc(\"\/driver\/find\", driver.FindDriver)\n\n\tport := \":8080\"\n\tfmt.Println(\"App Started on port = \", port)\n\terr := http.ListenAndServe(port, nil)\n\tif err != nil {\n\t\tlog.Panic(\"App Started Failed = \", err.Error())\n\t}\n\n}\n\n\/\/ insert database 50.000 rows\n\/\/ passed driver struct to save the data to database.\nfunc insertDummyDriver(driverData *driverModel.DriverData) {\n\n\tdummyDrivers := dummy.GenereateDriver(50000)\n\tfor _, driver := range dummyDrivers {\n\t\tdriverData.Insert(driver.Name, driver.Lat, driver.Lon, driver.Status)\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/gin-gonic\/gin\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"time\"\n\t\"fmt\"\n)\n\nfunc App(c *gin.Context) {\n\tpull := c.Param(\"pull\")\n\tbene := c.Param(\"benefit\")\n\tresumeKey := c.DefaultQuery(\"r\", \"\")\n\n\tvar mode uint8\n\tmode = 0\n\tif pull == \"1\" {\n\t\tmode += 1\n\t}\n\tif bene == \"1\" {\n\t\tmode += 2\n\t}\n\n\ttemplates := []string{\n\t\t\"b-push.html\",\n\t\t\"b-pull.html\",\n\t\t\"b+push.html\",\n\t\t\"b+pull.html\",\n\t}\n\n\tc.HTML(http.StatusOK, templates[mode], gin.H{\n\t\t\"resumeKey\": resumeKey,\n\t})\n}\n\nfunc Redirect(c *gin.Context) {\n\tpopular,_ := strconv.Atoi(c.Param(\"popular\"))\n\tpull,_ := strconv.Atoi(c.Param(\"pull\"))\n\tdisclose,_ := strconv.Atoi(c.Param(\"disclose\"))\n\n\turl := fmt.Sprintf(\"http:\/\/umfragen.ise.tu-darmstadt.de\/sosci\/privacyresearch\/?password=test&pull=%d&popular=%d&disclose=%d\", pull, popular, disclose)\n\tc.Redirect(http.StatusMovedPermanently, url)\n}\n\nfunc Home(c *gin.Context) {\n\trand.Seed(time.Now().UnixNano())\n\tmode := rand.Intn(6)\n\n\turls := []string{\n\t\t\"https:\/\/marvelapp.com\/17475hc\/screen\/17622836\",\n\t\t\"https:\/\/marvelapp.com\/17477hg\/screen\/17622959\",\n\t\t\"https:\/\/marvelapp.com\/4f55e1j\/screen\/17622902\",\n\t\t\"https:\/\/marvelapp.com\/33a2g74\/screen\/17622421\",\n\t\t\"https:\/\/marvelapp.com\/33a3bgh\/screen\/17622786\",\n\t\t\"https:\/\/marvelapp.com\/33a3a8e\/screen\/17622754\",\n\t}\n\n\t\/\/ urls := []string{\n\t\/\/ \t\"\/app\/0\/0\",\n\t\/\/ \t\"\/app\/0\/1\",\n\t\/\/ \t\"\/app\/1\/0\",\n\t\/\/ \t\"\/app\/1\/1\",\n\t\/\/ }\n\n\tc.Redirect(302, urls[mode])\n}\n<commit_msg>fixed an error<commit_after>package main\n\nimport (\n\t\"github.com\/gin-gonic\/gin\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"time\"\n\t\"fmt\"\n\t\"strconv\"\n)\n\nfunc App(c *gin.Context) {\n\tpull := c.Param(\"pull\")\n\tbene := c.Param(\"benefit\")\n\tresumeKey := c.DefaultQuery(\"r\", \"\")\n\n\tvar mode uint8\n\tmode = 0\n\tif pull == \"1\" {\n\t\tmode += 1\n\t}\n\tif bene == \"1\" {\n\t\tmode += 2\n\t}\n\n\ttemplates := []string{\n\t\t\"b-push.html\",\n\t\t\"b-pull.html\",\n\t\t\"b+push.html\",\n\t\t\"b+pull.html\",\n\t}\n\n\tc.HTML(http.StatusOK, templates[mode], gin.H{\n\t\t\"resumeKey\": resumeKey,\n\t})\n}\n\nfunc Redirect(c *gin.Context) {\n\tpopular,_ := strconv.Atoi(c.Param(\"popular\"))\n\tpull,_ := strconv.Atoi(c.Param(\"pull\"))\n\tdisclose,_ := strconv.Atoi(c.Param(\"disclose\"))\n\n\turl := fmt.Sprintf(\"http:\/\/umfragen.ise.tu-darmstadt.de\/sosci\/privacyresearch\/?password=test&pull=%d&popular=%d&disclose=%d\", pull, popular, disclose)\n\tc.Redirect(http.StatusMovedPermanently, url)\n}\n\nfunc Home(c *gin.Context) {\n\trand.Seed(time.Now().UnixNano())\n\tmode := rand.Intn(6)\n\n\turls := []string{\n\t\t\"https:\/\/marvelapp.com\/17475hc\/screen\/17622836\",\n\t\t\"https:\/\/marvelapp.com\/17477hg\/screen\/17622959\",\n\t\t\"https:\/\/marvelapp.com\/4f55e1j\/screen\/17622902\",\n\t\t\"https:\/\/marvelapp.com\/33a2g74\/screen\/17622421\",\n\t\t\"https:\/\/marvelapp.com\/33a3bgh\/screen\/17622786\",\n\t\t\"https:\/\/marvelapp.com\/33a3a8e\/screen\/17622754\",\n\t}\n\n\t\/\/ urls := []string{\n\t\/\/ \t\"\/app\/0\/0\",\n\t\/\/ \t\"\/app\/0\/1\",\n\t\/\/ \t\"\/app\/1\/0\",\n\t\/\/ \t\"\/app\/1\/1\",\n\t\/\/ }\n\n\tc.Redirect(302, urls[mode])\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\tfthealth \"github.com\/Financial-Times\/go-fthealth\/v1a\"\n\toldhttphandlers \"github.com\/Financial-Times\/http-handlers-go\/httphandlers\"\n\t\"github.com\/Financial-Times\/service-status-go\/httphandlers\"\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/gorilla\/handlers\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/jawher\/mow.cli\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n)\n\nconst serviceDescription = \"A RESTful API for retrieving and transforming content to preview data\"\n\nvar timeout = time.Duration(5 * time.Second)\nvar client = &http.Client{Timeout: timeout}\n\nfunc main() {\n\n\tapp := cli.App(\"content-preview\", serviceDescription)\n\tserviceName := app.StringOpt(\"app-name\", \"content-preview\", \"The name of this service\")\n\tappPort := app.StringOpt(\"app-port\", \"8084\", \"Default port for Content Preview app\")\n\tnativeContentAppAuth := app.StringOpt(\"source-app-auth\", \"default\", \"Basic auth for MAPI\")\n\tnativeContentAppUri := app.StringOpt(\"source-app-uri\", \"http:\/\/methode-api-uk-p.svc.ft.com\/eom-file\/\", \"URI of the Native Content Source Application endpoint\")\n\tnativeContentAppHealthUri := app.StringOpt(\"source-app-health-uri\", \"http:\/\/methode-api-uk-p.svc.ft.com\/build-info\", \"URI of the Native Content Source Application health endpoint\")\n\ttransformAppHostHeader := app.StringOpt(\"transform-app-host-header\", \"methode-article-transformer\", \"Transform Application Host Header\")\n\ttransformAppUri := app.StringOpt(\"transform-app-uri\", \"http:\/\/methode-article-transformer-01-iw-uk-p.svc.ft.com\/content-transform\/\", \"URI of the Transform Application endpoint\")\n\ttransformAppHealthUri := app.StringOpt(\"transform-app-health-uri\", \"http:\/\/methode-article-transformer-01-iw-uk-p.svc.ft.com\/build-info\", \"URI of the Transform Application health endpoint\")\n\tsourceAppName := app.StringOpt(\"source-app-name\", \"Native Content Service\", \"Service name of the source application\")\n\ttransformAppName := app.StringOpt(\"transform-app-name\", \"Native Content Transformer Service\", \"Service name of the content transformer application\")\n\tsourceAppPanicGuide := app.String(cli.StringOpt{\n\t\tName: \"source-app-panic-guide\",\n\t\tValue: \"https:\/\/sites.google.com\/a\/ft.com\/dynamic-publishing-team\/content-preview-panic-guide\",\n\t\tDesc: \"Native Content Source application panic guide url for healthcheck. Default panic guide is for content preview.\",\n\t\tEnvVar: \"SOURCE_APP_PANIC_GUIDE\",\n\t})\n\ttransformAppPanicGuide := app.String(cli.StringOpt{\n\t\tName: \"transform-app-panic-guide\",\n\t\tValue: \"https:\/\/sites.google.com\/a\/ft.com\/dynamic-publishing-team\/content-preview-panic-guide\",\n\t\tDesc: \"Transform application panic guide url for healthcheck. Default panic guide is for content preview.\",\n\t\tEnvVar: \"TRANSFORM_APP_PANIC_GUIDE\",\n\t})\n\n\tgraphiteTCPAddress := app.String(cli.StringOpt{\n\t\tName: \"graphite-tcp-address\",\n\t\tValue: \"\",\n\t\tDesc: \"Graphite TCP address, e.g. graphite.ft.com:2003. Leave as default if you do NOT want to output to graphite (e.g. if running locally)\",\n\t\tEnvVar: \"GRAPHITE_TCP_ADDRESS\",\n\t})\n\tgraphitePrefix := app.String(cli.StringOpt{\n\t\tName: \"graphite-prefix\",\n\t\tValue: \"coco.services.$ENV.content-preview.0\",\n\t\tDesc: \"Prefix to use. Should start with content, include the environment, and the host name. e.g. coco.pre-prod.sections-rw-neo4j.1\",\n\t\tEnvVar: \"GRAPHITE_PREFIX\",\n\t})\n\n\tlogMetrics := app.Bool(cli.BoolOpt{\n\t\tName: \"log-metrics\",\n\t\tValue: false,\n\t\tDesc: \"Whether to log metrics. Set to true if running locally and you want metrics output\",\n\t\tEnvVar: \"LOG_METRICS\",\n\t})\n\n\tapp.Action = func() {\n\t\tsc := ServiceConfig{*serviceName, *appPort, *nativeContentAppAuth,\n\t\t\t*transformAppHostHeader, *nativeContentAppUri, *transformAppUri, *nativeContentAppHealthUri, *transformAppHealthUri, *sourceAppName, *transformAppName, *sourceAppPanicGuide, *transformAppPanicGuide}\n\t\tappLogger := NewAppLogger()\n\t\tmetricsHandler := NewMetrics()\n\t\tcontentHandler := ContentHandler{&sc, appLogger, &metricsHandler}\n\n\t\tr := mux.NewRouter()\n\t\tr.Path(\"\/content-preview\/{uuid}\").Handler(handlers.MethodHandler{\"GET\": oldhttphandlers.HTTPMetricsHandler(metricsHandler.registry,\n\t\t\toldhttphandlers.TransactionAwareRequestLoggingHandler(logrus.StandardLogger(), contentHandler))})\n\n\t\tr.Path(httphandlers.BuildInfoPath).HandlerFunc(httphandlers.BuildInfoHandler)\n\t\tr.Path(httphandlers.PingPath).HandlerFunc(httphandlers.PingHandler)\n\n\t\tr.Path(\"\/__health\").Handler(handlers.MethodHandler{\"GET\": http.HandlerFunc(fthealth.Handler(*serviceName, serviceDescription, sc.nativeContentSourceCheck(), sc.transformerServiceCheck()))})\n\t\tr.Path(\"\/__metrics\").Handler(handlers.MethodHandler{\"GET\": http.HandlerFunc(metricsHttpEndpoint)})\n\n\t\tappLogger.ServiceStartedEvent(*serviceName, sc.asMap())\n\t\tmetricsHandler.OutputMetricsIfRequired(*graphiteTCPAddress, *graphitePrefix, *logMetrics)\n\n\t\terr := http.ListenAndServe(\":\"+*appPort, r)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Unable to start server: %v\", err)\n\t\t}\n\t}\n\tapp.Run(os.Args)\n}\n\ntype ServiceConfig struct {\n\tserviceName string\n\tappPort string\n\tnativeContentAppAuth string\n\ttransformAppHostHeader string\n\tnativeContentAppUri string\n\ttransformAppUri string\n\tnativeContentAppHealthUri string\n\ttransformAppHealthUri string\n\tsourceAppName string\n\ttransformAppName string\n\tsourceAppPanicGuide\t\t string\n\ttransformAppPanicGuide\t string\n}\n\nfunc (sc ServiceConfig) asMap() map[string]interface{} {\n\n\treturn map[string]interface{}{\n\t\t\"service-name\": sc.serviceName,\n\t\t\"service-port\": sc.appPort,\n\t\t\"source-app-name\": sc.sourceAppName,\n\t\t\"source-app-uri\": sc.nativeContentAppUri,\n\t\t\"transform-app-name\": sc.transformAppName,\n\t\t\"transform-app-uri\": sc.transformAppUri,\n\t\t\"source-app-health-uri\": sc.nativeContentAppHealthUri,\n\t\t\"transform-app-health-uri\": sc.transformAppHealthUri,\n\t\t\"source-app-panic-guide\":\tsc.sourceAppPanicGuide,\n\t\t\"transform-app-panic-guide\": sc.transformAppPanicGuide,\n\t}\n}\n<commit_msg>tidied up other properties to make app config consistent<commit_after>package main\n\nimport (\n\tfthealth \"github.com\/Financial-Times\/go-fthealth\/v1a\"\n\toldhttphandlers \"github.com\/Financial-Times\/http-handlers-go\/httphandlers\"\n\t\"github.com\/Financial-Times\/service-status-go\/httphandlers\"\n\t\"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/gorilla\/handlers\"\n\t\"github.com\/gorilla\/mux\"\n\t\"github.com\/jawher\/mow.cli\"\n\t\"net\/http\"\n\t\"os\"\n\t\"time\"\n)\n\nconst serviceDescription = \"A RESTful API for retrieving and transforming content to preview data\"\n\nvar timeout = time.Duration(5 * time.Second)\nvar client = &http.Client{Timeout: timeout}\n\nfunc main() {\n\n\tapp := cli.App(\"content-preview\", serviceDescription)\n\tserviceName := app.StringOpt(\"app-name\", \"content-preview\", \"The name of this service\")\n\tappPort := app.StringOpt(\"app-port\", \"8084\", \"Default port for Content Preview app\")\n\tnativeContentAppAuth := app.StringOpt(\"source-app-auth\", \"default\", \"Basic auth for MAPI\")\n\tnativeContentAppUri := app.StringOpt(\"source-app-uri\", \"http:\/\/methode-api-uk-p.svc.ft.com\/eom-file\/\", \"URI of the Native Content Source Application endpoint\")\n\tnativeContentAppHealthUri := app.StringOpt(\"source-app-health-uri\", \"http:\/\/methode-api-uk-p.svc.ft.com\/build-info\", \"URI of the Native Content Source Application health endpoint\")\n\ttransformAppHostHeader := app.StringOpt(\"transform-app-host-header\", \"methode-article-transformer\", \"Transform Application Host Header\")\n\ttransformAppUri := app.StringOpt(\"transform-app-uri\", \"http:\/\/methode-article-transformer-01-iw-uk-p.svc.ft.com\/content-transform\/\", \"URI of the Transform Application endpoint\")\n\ttransformAppHealthUri := app.StringOpt(\"transform-app-health-uri\", \"http:\/\/methode-article-transformer-01-iw-uk-p.svc.ft.com\/build-info\", \"URI of the Transform Application health endpoint\")\n\tsourceAppName := app.StringOpt(\"source-app-name\", \"Native Content Service\", \"Service name of the source application\")\n\ttransformAppName := app.StringOpt(\"transform-app-name\", \"Native Content Transformer Service\", \"Service name of the content transformer application\")\n\tsourceAppPanicGuide := app.String(cli.StringOpt{\n\t\tName: \"source-app-panic-guide\",\n\t\tValue: \"https:\/\/sites.google.com\/a\/ft.com\/dynamic-publishing-team\/content-preview-panic-guide\",\n\t\tDesc: \"Native Content Source application panic guide url for healthcheck. Default panic guide is for content preview.\",\n\t\tEnvVar: \"SOURCE_APP_PANIC_GUIDE\",\n\t})\n\ttransformAppPanicGuide := app.String(cli.StringOpt{\n\t\tName: \"transform-app-panic-guide\",\n\t\tValue: \"https:\/\/sites.google.com\/a\/ft.com\/dynamic-publishing-team\/content-preview-panic-guide\",\n\t\tDesc: \"Transform application panic guide url for healthcheck. Default panic guide is for content preview.\",\n\t\tEnvVar: \"TRANSFORM_APP_PANIC_GUIDE\",\n\t})\n\n\tgraphiteTCPAddress := app.String(cli.StringOpt{\n\t\tName: \"graphite-tcp-address\",\n\t\tValue: \"\",\n\t\tDesc: \"Graphite TCP address, e.g. graphite.ft.com:2003. Leave as default if you do NOT want to output to graphite (e.g. if running locally)\",\n\t\tEnvVar: \"GRAPHITE_TCP_ADDRESS\",\n\t})\n\tgraphitePrefix := app.String(cli.StringOpt{\n\t\tName: \"graphite-prefix\",\n\t\tValue: \"coco.services.$ENV.content-preview.0\",\n\t\tDesc: \"Prefix to use. Should start with content, include the environment, and the host name. e.g. coco.pre-prod.sections-rw-neo4j.1\",\n\t\tEnvVar: \"GRAPHITE_PREFIX\",\n\t})\n\n\tlogMetrics := app.Bool(cli.BoolOpt{\n\t\tName: \"log-metrics\",\n\t\tValue: false,\n\t\tDesc: \"Whether to log metrics. Set to true if running locally and you want metrics output\",\n\t\tEnvVar: \"LOG_METRICS\",\n\t})\n\n\tapp.Action = func() {\n\t\tsc := ServiceConfig{*serviceName, *appPort, *nativeContentAppAuth, *transformAppHostHeader,\n\t\t\t*nativeContentAppUri, *transformAppUri, *nativeContentAppHealthUri, *transformAppHealthUri,\n\t\t\t*sourceAppName, *transformAppName, *sourceAppPanicGuide, *transformAppPanicGuide,\n\t\t\t*graphiteTCPAddress, *graphitePrefix}\n\t\tappLogger := NewAppLogger()\n\t\tmetricsHandler := NewMetrics()\n\t\tcontentHandler := ContentHandler{&sc, appLogger, &metricsHandler}\n\n\t\tr := mux.NewRouter()\n\t\tr.Path(\"\/content-preview\/{uuid}\").Handler(handlers.MethodHandler{\"GET\": oldhttphandlers.HTTPMetricsHandler(metricsHandler.registry,\n\t\t\toldhttphandlers.TransactionAwareRequestLoggingHandler(logrus.StandardLogger(), contentHandler))})\n\n\t\tr.Path(httphandlers.BuildInfoPath).HandlerFunc(httphandlers.BuildInfoHandler)\n\t\tr.Path(httphandlers.PingPath).HandlerFunc(httphandlers.PingHandler)\n\n\t\tr.Path(\"\/__health\").Handler(handlers.MethodHandler{\"GET\": http.HandlerFunc(fthealth.Handler(*serviceName, serviceDescription, sc.nativeContentSourceCheck(), sc.transformerServiceCheck()))})\n\t\tr.Path(\"\/__metrics\").Handler(handlers.MethodHandler{\"GET\": http.HandlerFunc(metricsHttpEndpoint)})\n\n\t\tappLogger.ServiceStartedEvent(*serviceName, sc.asMap())\n\t\tmetricsHandler.OutputMetricsIfRequired(*graphiteTCPAddress, *graphitePrefix, *logMetrics)\n\n\t\terr := http.ListenAndServe(\":\"+*appPort, r)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Unable to start server: %v\", err)\n\t\t}\n\t}\n\tapp.Run(os.Args)\n}\n\ntype ServiceConfig struct {\n\tserviceName string\n\tappPort string\n\tnativeContentAppAuth string\n\ttransformAppHostHeader string\n\tnativeContentAppUri string\n\ttransformAppUri string\n\tnativeContentAppHealthUri string\n\ttransformAppHealthUri string\n\tsourceAppName string\n\ttransformAppName string\n\tsourceAppPanicGuide\t\t string\n\ttransformAppPanicGuide\t string\n\tgraphiteTCPAddress \t\t string\n\tgraphitePrefix\t\t\t string\n\n}\n\nfunc (sc ServiceConfig) asMap() map[string]interface{} {\n\n\treturn map[string]interface{}{\n\t\t\"service-name\": sc.serviceName,\n\t\t\"service-port\": sc.appPort,\n\t\t\"source-app-name\": sc.sourceAppName,\n\t\t\"source-app-uri\": sc.nativeContentAppUri,\n\t\t\"transform-app-name\": sc.transformAppName,\n\t\t\"transform-app-uri\": sc.transformAppUri,\n\t\t\"source-app-health-uri\": sc.nativeContentAppHealthUri,\n\t\t\"transform-app-health-uri\": sc.transformAppHealthUri,\n\t\t\"source-app-panic-guide\":\tsc.sourceAppPanicGuide,\n\t\t\"transform-app-panic-guide\": sc.transformAppPanicGuide,\n\t\t\"graphite-tcp-address\": \tsc.graphiteTCPAddress,\n\t\t\"graphite-prefix\": \t\t\tsc.graphitePrefix,\n\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package commander is used to manage a set of command-line \"commands\", with\n\/\/ per-command flags and arguments.\n\/\/\n\/\/ Supports command like so:\n\/\/\n\/\/ <command> <required> [<optional> [<optional> ...]]\n\/\/ <command> <remainder...>\n\/\/\n\/\/ eg.\n\/\/\n\/\/ register [--name <name>] <nick>|<id>\n\/\/ post --channel|-a <channel> [--image <image>] [<text>]\n\/\/\n\/\/ var (\n\/\/ chat = commander.New()\n\/\/ debug = chat.Flag(\"debug\", \"enable debug mode\").Default(\"false\").Bool()\n\/\/\n\/\/ register = chat.Command(\"register\", \"Register a new user.\")\n\/\/ registerName = register.Flag(\"name\", \"name of user\").Required().String()\n\/\/ registerNick = register.Arg(\"nick\", \"nickname for user\").Required().String()\n\/\/\n\/\/ post = chat.Command(\"post\", \"Post a message to a channel.\")\n\/\/ postChannel = post.Flag(\"channel\", \"channel to post to\").Short('a').Required().String()\n\/\/ postImage = post.Flag(\"image\", \"image to post\").String()\n\/\/ )\n\/\/\n\npackage kingpin\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n)\n\ntype Dispatch func(*ParseContext) error\n\ntype ApplicationValidator func(*Application) error\n\n\/\/ An Application contains the definitions of flags, arguments and commands\n\/\/ for an application.\ntype Application struct {\n\t*flagGroup\n\t*argGroup\n\t*cmdGroup\n\tinitialized bool\n\tName string\n\tHelp string\n\tvalidator ApplicationValidator\n}\n\n\/\/ New creates a new Kingpin application instance.\nfunc New(name, help string) *Application {\n\ta := &Application{\n\t\tflagGroup: newFlagGroup(),\n\t\targGroup: newArgGroup(),\n\t\tName: name,\n\t\tHelp: help,\n\t}\n\ta.cmdGroup = newCmdGroup(a)\n\ta.Flag(\"help\", \"Show help.\").Action(a.onHelp).Bool()\n\treturn a\n}\n\n\/\/ Validate sets a validation function to run when parsing.\nfunc (a *Application) Validate(validator ApplicationValidator) *Application {\n\ta.validator = validator\n\treturn a\n}\n\n\/\/ Parse parses command-line arguments. It returns the selected command and an\n\/\/ error. The selected command will be a space separated subcommand, if\n\/\/ subcommands have been configured.\nfunc (a *Application) Parse(args []string) (command string, err error) {\n\tif err := a.init(); err != nil {\n\t\treturn \"\", err\n\t}\n\tcontext := tokenize(args)\n\tcommand, err = a.parse(context)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif !context.EOL() {\n\t\treturn \"\", fmt.Errorf(\"unexpected argument '%s'\", context.Peek())\n\t}\n\n\treturn command, err\n}\n\n\/\/ Version adds a --version flag for displaying the application version.\nfunc (a *Application) Version(version string) *Application {\n\ta.Flag(\"version\", \"Show application version.\").Action(func(*ParseContext) error {\n\t\tfmt.Println(version)\n\t\tos.Exit(0)\n\t\treturn nil\n\t}).Bool()\n\treturn a\n}\n\n\/\/ Command adds a new top-level command.\nfunc (a *Application) Command(name, help string) *CmdClause {\n\treturn a.addCommand(name, help)\n}\n\nfunc (a *Application) init() error {\n\tif a.initialized {\n\t\treturn nil\n\t}\n\tif a.cmdGroup.have() && a.argGroup.have() {\n\t\treturn fmt.Errorf(\"can't mix top-level Arg()s with Command()s\")\n\t}\n\n\tif len(a.commands) > 0 {\n\t\tcmd := a.Command(\"help\", \"Show help for a command.\").Action(a.onHelp)\n\t\tcmd.Arg(\"command\", \"Command name.\").String()\n\t\t\/\/ Make \"help\" command first in order. Also, Go's slice operations are woeful.\n\t\tl := len(a.commandOrder) - 1\n\t\ta.commandOrder = append(a.commandOrder[l:], a.commandOrder[:l]...)\n\t}\n\n\tif err := a.flagGroup.init(); err != nil {\n\t\treturn err\n\t}\n\tif err := a.cmdGroup.init(); err != nil {\n\t\treturn err\n\t}\n\tif err := a.argGroup.init(); err != nil {\n\t\treturn err\n\t}\n\tfor _, cmd := range a.commands {\n\t\tif err := cmd.init(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tflagGroups := []*flagGroup{a.flagGroup}\n\tfor _, cmd := range a.commandOrder {\n\t\tif err := checkDuplicateFlags(cmd, flagGroups); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\ta.initialized = true\n\treturn nil\n}\n\n\/\/ Recursively check commands for duplicate flags.\nfunc checkDuplicateFlags(current *CmdClause, flagGroups []*flagGroup) error {\n\t\/\/ Check for duplicates.\n\tfor _, flags := range flagGroups {\n\t\tfor _, flag := range current.flagOrder {\n\t\t\tif flag.shorthand != 0 {\n\t\t\t\tif _, ok := flags.short[string(flag.shorthand)]; ok {\n\t\t\t\t\treturn fmt.Errorf(\"duplicate short flag -%c\", flag.shorthand)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif flag.name != \"help\" {\n\t\t\t\tif _, ok := flags.long[flag.name]; ok {\n\t\t\t\t\treturn fmt.Errorf(\"duplicate long flag --%s\", flag.name)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tflagGroups = append(flagGroups, current.flagGroup)\n\t\/\/ Check subcommands.\n\tfor _, subcmd := range current.commandOrder {\n\t\tif err := checkDuplicateFlags(subcmd, flagGroups); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *Application) onHelp(context *ParseContext) error {\n\tcandidates := []string{}\n\tfor {\n\t\ttoken := context.Peek()\n\t\tif token.Type == TokenArg {\n\t\t\tcandidates = append(candidates, token.String())\n\t\t\tcontext.Next()\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tvar cmd *CmdClause\n\tfor i := len(candidates); i > 0; i-- {\n\t\tcommand := strings.Join(candidates[:i], \" \")\n\t\tcmd = a.findCommand(command)\n\t\tif cmd != nil {\n\t\t\ta.CommandUsage(os.Stderr, command)\n\t\t\tbreak\n\t\t}\n\t}\n\tif cmd == nil {\n\t\ta.Usage(os.Stderr)\n\t}\n\tos.Exit(0)\n\treturn nil\n}\n\nfunc (a *Application) parse(context *ParseContext) (string, error) {\n\tcontext.mergeFlags(a.flagGroup)\n\n\tvar err error\n\terr = a.flagGroup.parse(context)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Parse arguments or commands.\n\tif a.argGroup.have() {\n\t\terr = a.argGroup.parse(context)\n\t} else if a.cmdGroup.have() {\n\t\t_, err = a.cmdGroup.parse(context)\n\t}\n\n\treturn a.execute(context)\n}\n\nfunc (a *Application) execute(context *ParseContext) (string, error) {\n\tvar err error\n\tselected := []string{}\n\n\tflagElements := map[string]*parseElement{}\n\tfor _, element := range context.elements {\n\t\tif element.isFlag() {\n\t\t\tflagElements[element.flag.name] = element\n\t\t}\n\t}\n\n\targElements := map[string]*parseElement{}\n\tfor _, element := range context.elements {\n\t\tif element.isArg() {\n\t\t\tflagElements[element.arg.name] = element\n\t\t}\n\t}\n\n\t\/\/ Check required flags and set defaults.\n\tfor _, flag := range context.flags.long {\n\t\tif flagElements[flag.name] == nil {\n\t\t\t\/\/ Check required flags were provided.\n\t\t\tif flag.needsValue() {\n\t\t\t\treturn \"\", fmt.Errorf(\"required flag --%s not provided\", flag.name)\n\t\t\t}\n\t\t\t\/\/ Set defaults, if any.\n\t\t\tif flag.defaultValue != \"\" {\n\t\t\t\tif err = flag.value.Set(flag.defaultValue); err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, arg := range context.arguments.args {\n\t\tif argElements[arg.name] == nil {\n\t\t\tif arg.required {\n\t\t\t\treturn \"\", fmt.Errorf(\"required argument '%s' not provided\", arg.name)\n\t\t\t}\n\t\t\t\/\/ Set defaults, if any.\n\t\t\tif arg.defaultValue != \"\" {\n\t\t\t\tif err = arg.value.Set(arg.defaultValue); err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Finally, apply everything.\n\t\/\/ - Set values for flags and dispatch actions.\n\t\/\/ - Set values for args and dispatch actions.\n\t\/\/ - Run command validators and dispatch actions.\n\tvar lastCmd *CmdClause\n\tfor _, element := range context.elements {\n\t\tswitch {\n\t\tcase element.isFlag():\n\t\t\tif err := element.flag.value.Set(*element.value); err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tif element.flag.dispatch != nil {\n\t\t\t\tif err := element.flag.dispatch(context); err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase element.isArg():\n\t\t\tif err := element.arg.value.Set(*element.value); err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tif element.arg.dispatch != nil {\n\t\t\t\tif err := element.arg.dispatch(context); err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase element.isCmd():\n\t\t\tif element.cmd.validator != nil {\n\t\t\t\tif err = element.cmd.validator(element.cmd); err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\t\t\t}\n\t\t\tif element.cmd.dispatch != nil {\n\t\t\t\tif err := element.cmd.dispatch(context); err != nil {\n\t\t\t\t\treturn \"\", err\n\t\t\t\t}\n\t\t\t}\n\t\t\tselected = append(selected, element.cmd.name)\n\t\t\tlastCmd = element.cmd\n\t\t}\n\t}\n\n\tif lastCmd != nil && len(lastCmd.commands) > 0 {\n\t\treturn \"\", fmt.Errorf(\"must select a subcommand of '%s'\", lastCmd.FullCommand())\n\t}\n\n\tif a.validator != nil {\n\t\terr = a.validator(a)\n\t}\n\treturn strings.Join(selected, \" \"), err\n}\n\n\/\/ Errorf prints an error message to w.\nfunc (a *Application) Errorf(w io.Writer, format string, args ...interface{}) {\n\tfmt.Fprintf(w, a.Name+\": error: \"+format+\"\\n\", args...)\n}\n\nfunc (a *Application) Fatalf(w io.Writer, format string, args ...interface{}) {\n\ta.Errorf(w, format, args...)\n\tos.Exit(1)\n}\n\n\/\/ UsageErrorf prints an error message followed by usage information, then\n\/\/ exits with a non-zero status.\nfunc (a *Application) UsageErrorf(w io.Writer, format string, args ...interface{}) {\n\ta.Errorf(w, format, args...)\n\ta.Usage(w)\n\tos.Exit(1)\n}\n\n\/\/ FatalIfError prints an error and exits if err is not nil. The error is printed\n\/\/ with the given prefix.\nfunc (a *Application) FatalIfError(w io.Writer, err error, prefix string) {\n\tif err != nil {\n\t\tif prefix != \"\" {\n\t\t\tprefix += \": \"\n\t\t}\n\t\ta.Errorf(w, prefix+\"%s\", err)\n\t\tos.Exit(1)\n\t}\n}\n<commit_msg>Split execution into multiple passes.<commit_after>\/\/ Package commander is used to manage a set of command-line \"commands\", with\n\/\/ per-command flags and arguments.\n\/\/\n\/\/ Supports command like so:\n\/\/\n\/\/ <command> <required> [<optional> [<optional> ...]]\n\/\/ <command> <remainder...>\n\/\/\n\/\/ eg.\n\/\/\n\/\/ register [--name <name>] <nick>|<id>\n\/\/ post --channel|-a <channel> [--image <image>] [<text>]\n\/\/\n\/\/ var (\n\/\/ chat = commander.New()\n\/\/ debug = chat.Flag(\"debug\", \"enable debug mode\").Default(\"false\").Bool()\n\/\/\n\/\/ register = chat.Command(\"register\", \"Register a new user.\")\n\/\/ registerName = register.Flag(\"name\", \"name of user\").Required().String()\n\/\/ registerNick = register.Arg(\"nick\", \"nickname for user\").Required().String()\n\/\/\n\/\/ post = chat.Command(\"post\", \"Post a message to a channel.\")\n\/\/ postChannel = post.Flag(\"channel\", \"channel to post to\").Short('a').Required().String()\n\/\/ postImage = post.Flag(\"image\", \"image to post\").String()\n\/\/ )\n\/\/\n\npackage kingpin\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n)\n\ntype Dispatch func(*ParseContext) error\n\ntype ApplicationValidator func(*Application) error\n\n\/\/ An Application contains the definitions of flags, arguments and commands\n\/\/ for an application.\ntype Application struct {\n\t*flagGroup\n\t*argGroup\n\t*cmdGroup\n\tinitialized bool\n\tName string\n\tHelp string\n\tvalidator ApplicationValidator\n}\n\n\/\/ New creates a new Kingpin application instance.\nfunc New(name, help string) *Application {\n\ta := &Application{\n\t\tflagGroup: newFlagGroup(),\n\t\targGroup: newArgGroup(),\n\t\tName: name,\n\t\tHelp: help,\n\t}\n\ta.cmdGroup = newCmdGroup(a)\n\ta.Flag(\"help\", \"Show help.\").Action(a.onHelp).Bool()\n\treturn a\n}\n\n\/\/ Validate sets a validation function to run when parsing.\nfunc (a *Application) Validate(validator ApplicationValidator) *Application {\n\ta.validator = validator\n\treturn a\n}\n\n\/\/ Parse parses command-line arguments. It returns the selected command and an\n\/\/ error. The selected command will be a space separated subcommand, if\n\/\/ subcommands have been configured.\nfunc (a *Application) Parse(args []string) (command string, err error) {\n\tif err := a.init(); err != nil {\n\t\treturn \"\", err\n\t}\n\tcontext := tokenize(args)\n\tcommand, err = a.parse(context)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif !context.EOL() {\n\t\treturn \"\", fmt.Errorf(\"unexpected argument '%s'\", context.Peek())\n\t}\n\n\treturn command, err\n}\n\n\/\/ Version adds a --version flag for displaying the application version.\nfunc (a *Application) Version(version string) *Application {\n\ta.Flag(\"version\", \"Show application version.\").Action(func(*ParseContext) error {\n\t\tfmt.Println(version)\n\t\tos.Exit(0)\n\t\treturn nil\n\t}).Bool()\n\treturn a\n}\n\n\/\/ Command adds a new top-level command.\nfunc (a *Application) Command(name, help string) *CmdClause {\n\treturn a.addCommand(name, help)\n}\n\nfunc (a *Application) init() error {\n\tif a.initialized {\n\t\treturn nil\n\t}\n\tif a.cmdGroup.have() && a.argGroup.have() {\n\t\treturn fmt.Errorf(\"can't mix top-level Arg()s with Command()s\")\n\t}\n\n\tif len(a.commands) > 0 {\n\t\tcmd := a.Command(\"help\", \"Show help for a command.\").Action(a.onHelp)\n\t\tcmd.Arg(\"command\", \"Command name.\").String()\n\t\t\/\/ Make \"help\" command first in order. Also, Go's slice operations are woeful.\n\t\tl := len(a.commandOrder) - 1\n\t\ta.commandOrder = append(a.commandOrder[l:], a.commandOrder[:l]...)\n\t}\n\n\tif err := a.flagGroup.init(); err != nil {\n\t\treturn err\n\t}\n\tif err := a.cmdGroup.init(); err != nil {\n\t\treturn err\n\t}\n\tif err := a.argGroup.init(); err != nil {\n\t\treturn err\n\t}\n\tfor _, cmd := range a.commands {\n\t\tif err := cmd.init(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tflagGroups := []*flagGroup{a.flagGroup}\n\tfor _, cmd := range a.commandOrder {\n\t\tif err := checkDuplicateFlags(cmd, flagGroups); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\ta.initialized = true\n\treturn nil\n}\n\n\/\/ Recursively check commands for duplicate flags.\nfunc checkDuplicateFlags(current *CmdClause, flagGroups []*flagGroup) error {\n\t\/\/ Check for duplicates.\n\tfor _, flags := range flagGroups {\n\t\tfor _, flag := range current.flagOrder {\n\t\t\tif flag.shorthand != 0 {\n\t\t\t\tif _, ok := flags.short[string(flag.shorthand)]; ok {\n\t\t\t\t\treturn fmt.Errorf(\"duplicate short flag -%c\", flag.shorthand)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif flag.name != \"help\" {\n\t\t\t\tif _, ok := flags.long[flag.name]; ok {\n\t\t\t\t\treturn fmt.Errorf(\"duplicate long flag --%s\", flag.name)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tflagGroups = append(flagGroups, current.flagGroup)\n\t\/\/ Check subcommands.\n\tfor _, subcmd := range current.commandOrder {\n\t\tif err := checkDuplicateFlags(subcmd, flagGroups); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *Application) onHelp(context *ParseContext) error {\n\tcandidates := []string{}\n\tfor {\n\t\ttoken := context.Peek()\n\t\tif token.Type == TokenArg {\n\t\t\tcandidates = append(candidates, token.String())\n\t\t\tcontext.Next()\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tvar cmd *CmdClause\n\tfor i := len(candidates); i > 0; i-- {\n\t\tcommand := strings.Join(candidates[:i], \" \")\n\t\tcmd = a.findCommand(command)\n\t\tif cmd != nil {\n\t\t\ta.CommandUsage(os.Stderr, command)\n\t\t\tbreak\n\t\t}\n\t}\n\tif cmd == nil {\n\t\ta.Usage(os.Stderr)\n\t}\n\tos.Exit(0)\n\treturn nil\n}\n\nfunc (a *Application) parse(context *ParseContext) (string, error) {\n\tcontext.mergeFlags(a.flagGroup)\n\n\tvar err error\n\terr = a.flagGroup.parse(context)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t\/\/ Parse arguments or commands.\n\tif a.argGroup.have() {\n\t\terr = a.argGroup.parse(context)\n\t} else if a.cmdGroup.have() {\n\t\t_, err = a.cmdGroup.parse(context)\n\t}\n\n\treturn a.execute(context)\n}\n\nfunc (a *Application) execute(context *ParseContext) (string, error) {\n\tvar err error\n\tselected := []string{}\n\n\tif err = a.setDefaults(context); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tselected, err = a.setValues(context)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err = a.applyValidators(context); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err = a.applyActions(context); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn strings.Join(selected, \" \"), err\n}\n\nfunc (a *Application) setDefaults(context *ParseContext) error {\n\tflagElements := map[string]*parseElement{}\n\tfor _, element := range context.elements {\n\t\tif element.isFlag() {\n\t\t\tflagElements[element.flag.name] = element\n\t\t}\n\t}\n\n\targElements := map[string]*parseElement{}\n\tfor _, element := range context.elements {\n\t\tif element.isArg() {\n\t\t\tflagElements[element.arg.name] = element\n\t\t}\n\t}\n\n\t\/\/ Check required flags and set defaults.\n\tfor _, flag := range context.flags.long {\n\t\tif flagElements[flag.name] == nil {\n\t\t\t\/\/ Check required flags were provided.\n\t\t\tif flag.needsValue() {\n\t\t\t\treturn fmt.Errorf(\"required flag --%s not provided\", flag.name)\n\t\t\t}\n\t\t\t\/\/ Set defaults, if any.\n\t\t\tif flag.defaultValue != \"\" {\n\t\t\t\tif err := flag.value.Set(flag.defaultValue); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, arg := range context.arguments.args {\n\t\tif argElements[arg.name] == nil {\n\t\t\tif arg.required {\n\t\t\t\treturn fmt.Errorf(\"required argument '%s' not provided\", arg.name)\n\t\t\t}\n\t\t\t\/\/ Set defaults, if any.\n\t\t\tif arg.defaultValue != \"\" {\n\t\t\t\tif err := arg.value.Set(arg.defaultValue); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (a *Application) setValues(context *ParseContext) (selected []string, err error) {\n\t\/\/ Set all arg and flag values.\n\tvar lastCmd *CmdClause\n\tfor _, element := range context.elements {\n\t\tswitch {\n\t\tcase element.isFlag():\n\t\t\tif err = element.flag.value.Set(*element.value); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\tcase element.isArg():\n\t\t\tif err = element.arg.value.Set(*element.value); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\tcase element.isCmd():\n\t\t\tif element.cmd.validator != nil {\n\t\t\t\tif err = element.cmd.validator(element.cmd); err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tselected = append(selected, element.cmd.name)\n\t\t\tlastCmd = element.cmd\n\t\t}\n\t}\n\n\tif lastCmd != nil && len(lastCmd.commands) > 0 {\n\t\treturn nil, fmt.Errorf(\"must select a subcommand of '%s'\", lastCmd.FullCommand())\n\t}\n\n\treturn\n}\n\nfunc (a *Application) applyValidators(context *ParseContext) (err error) {\n\t\/\/ Call command validation functions.\n\tfor _, element := range context.elements {\n\t\tswitch {\n\t\tcase element.isCmd():\n\t\t\tif element.cmd.validator != nil {\n\t\t\t\tif err = element.cmd.validator(element.cmd); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif a.validator != nil {\n\t\terr = a.validator(a)\n\t}\n\treturn err\n}\n\nfunc (a *Application) applyActions(context *ParseContext) error {\n\t\/\/ Action to actions.\n\tfor _, element := range context.elements {\n\t\tswitch {\n\t\tcase element.isFlag():\n\t\t\tif element.flag.dispatch != nil {\n\t\t\t\tif err := element.flag.dispatch(context); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase element.isArg():\n\t\t\tif element.arg.dispatch != nil {\n\t\t\t\tif err := element.arg.dispatch(context); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase element.isCmd():\n\t\t\tif element.cmd.dispatch != nil {\n\t\t\t\tif err := element.cmd.dispatch(context); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Errorf prints an error message to w.\nfunc (a *Application) Errorf(w io.Writer, format string, args ...interface{}) {\n\tfmt.Fprintf(w, a.Name+\": error: \"+format+\"\\n\", args...)\n}\n\nfunc (a *Application) Fatalf(w io.Writer, format string, args ...interface{}) {\n\ta.Errorf(w, format, args...)\n\tos.Exit(1)\n}\n\n\/\/ UsageErrorf prints an error message followed by usage information, then\n\/\/ exits with a non-zero status.\nfunc (a *Application) UsageErrorf(w io.Writer, format string, args ...interface{}) {\n\ta.Errorf(w, format, args...)\n\ta.Usage(w)\n\tos.Exit(1)\n}\n\n\/\/ FatalIfError prints an error and exits if err is not nil. The error is printed\n\/\/ with the given prefix.\nfunc (a *Application) FatalIfError(w io.Writer, err error, prefix string) {\n\tif err != nil {\n\t\tif prefix != \"\" {\n\t\t\tprefix += \": \"\n\t\t}\n\t\ta.Errorf(w, prefix+\"%s\", err)\n\t\tos.Exit(1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode\"\n\n\t\"github.com\/nsf\/termbox-go\"\n)\n\ntype App struct {\n\tui *UI\n\tnav *Nav\n\tquit chan bool\n}\n\nfunc newApp() *App {\n\tui := newUI()\n\tnav := newNav(ui.wins[0].h)\n\tquit := make(chan bool)\n\n\treturn &App{\n\t\tui: ui,\n\t\tnav: nav,\n\t\tquit: quit,\n\t}\n}\n\nfunc waitKey() error {\n\t\/\/ TODO: this should be done with termbox somehow\n\n\tc := `echo\n\t echo -n 'Press any key to continue'\n\t old=$(stty -g)\n\t stty raw -echo\n\t eval \"ignore=\\$(dd bs=1 count=1 2> \/dev\/null)\"\n\t stty $old\n\t echo`\n\n\tcmd := exec.Command(gOpts.shell, \"-c\", c)\n\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\tif err := cmd.Run(); err != nil {\n\t\treturn fmt.Errorf(\"waiting key: %s\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (app *App) readExpr() chan MultiExpr {\n\tch := make(chan MultiExpr)\n\n\trenew := &CallExpr{\"renew\", nil}\n\tcount := 1\n\n\tvar acc []rune\n\tvar cnt []rune\n\n\tgo func() {\n\t\tfor {\n\t\t\tswitch ev := app.ui.pollEvent(); ev.Type {\n\t\t\tcase termbox.EventKey:\n\t\t\t\tif ev.Ch != 0 {\n\t\t\t\t\tswitch {\n\t\t\t\t\tcase ev.Ch == '<':\n\t\t\t\t\t\tacc = append(acc, '<', 'l', 't', '>')\n\t\t\t\t\tcase ev.Ch == '>':\n\t\t\t\t\t\tacc = append(acc, '<', 'g', 't', '>')\n\t\t\t\t\t\/\/ Interpret digits as command count but only do this for\n\t\t\t\t\t\/\/ digits preceding any non-digit characters\n\t\t\t\t\t\/\/ (e.g. \"42y2k\" as 42 times \"y2k\").\n\t\t\t\t\tcase unicode.IsDigit(ev.Ch) && len(acc) == 0:\n\t\t\t\t\t\tcnt = append(cnt, ev.Ch)\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tacc = append(acc, ev.Ch)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tval := gKeyVal[ev.Key]\n\t\t\t\t\tif string(val) == \"<esc>\" {\n\t\t\t\t\t\tch <- MultiExpr{renew, 1}\n\t\t\t\t\t\tacc = nil\n\t\t\t\t\t\tcnt = nil\n\t\t\t\t\t}\n\t\t\t\t\tacc = append(acc, val...)\n\t\t\t\t}\n\n\t\t\t\tbinds, ok := findBinds(gOpts.keys, string(acc))\n\n\t\t\t\tswitch len(binds) {\n\t\t\t\tcase 0:\n\t\t\t\t\tapp.ui.message = fmt.Sprintf(\"unknown mapping: %s\", string(acc))\n\t\t\t\t\tch <- MultiExpr{renew, 1}\n\t\t\t\t\tacc = nil\n\t\t\t\t\tcnt = nil\n\t\t\t\tcase 1:\n\t\t\t\t\tif ok {\n\t\t\t\t\t\tif len(cnt) > 0 {\n\t\t\t\t\t\t\tc, err := strconv.Atoi(string(cnt))\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tlog.Printf(\"converting command count: %s\", err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcount = c\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tcount = 1\n\t\t\t\t\t\t}\n\t\t\t\t\t\texpr := gOpts.keys[string(acc)]\n\t\t\t\t\t\tswitch expr.(type) {\n\t\t\t\t\t\tcase *CallExpr:\n\t\t\t\t\t\t\tswitch expr.(*CallExpr).name {\n\t\t\t\t\t\t\tcase \"read\",\n\t\t\t\t\t\t\t\t\"read-shell\",\n\t\t\t\t\t\t\t\t\"read-shell-wait\",\n\t\t\t\t\t\t\t\t\"read-shell-async\",\n\t\t\t\t\t\t\t\t\"push\":\n\t\t\t\t\t\t\t\texpr.eval(app, nil)\n\t\t\t\t\t\t\t\tapp.ui.draw(app.nav)\n\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\tch <- MultiExpr{expr, count}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tch <- MultiExpr{expr, count}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tacc = nil\n\t\t\t\t\t\tcnt = nil\n\t\t\t\t\t}\n\t\t\t\t\tif len(acc) > 0 {\n\t\t\t\t\t\tapp.ui.listBinds(binds)\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\tif ok {\n\t\t\t\t\t\t\/\/ TODO: use a delay\n\t\t\t\t\t\tif len(cnt) > 0 {\n\t\t\t\t\t\t\tc, err := strconv.Atoi(string(cnt))\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tlog.Printf(\"converting command count: %s\", err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcount = c\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tcount = 1\n\t\t\t\t\t\t}\n\t\t\t\t\t\texpr := gOpts.keys[string(acc)]\n\t\t\t\t\t\tswitch expr.(type) {\n\t\t\t\t\t\tcase *CallExpr:\n\t\t\t\t\t\t\tswitch expr.(*CallExpr).name {\n\t\t\t\t\t\t\tcase \"read\",\n\t\t\t\t\t\t\t\t\"read-shell\",\n\t\t\t\t\t\t\t\t\"read-shell-wait\",\n\t\t\t\t\t\t\t\t\"read-shell-async\",\n\t\t\t\t\t\t\t\t\"push\":\n\t\t\t\t\t\t\t\texpr.eval(app, nil)\n\t\t\t\t\t\t\t\tapp.ui.draw(app.nav)\n\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\tch <- MultiExpr{expr, count}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tch <- MultiExpr{expr, count}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tacc = nil\n\t\t\t\t\t\tcnt = nil\n\t\t\t\t\t}\n\t\t\t\t\tif len(acc) > 0 {\n\t\t\t\t\t\tapp.ui.listBinds(binds)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase termbox.EventResize:\n\t\t\t\tch <- MultiExpr{renew, 1}\n\t\t\tdefault:\n\t\t\t\t\/\/ TODO: handle other events\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn ch\n}\n\nfunc (app *App) handleInp() {\n\tclientChan := app.readExpr()\n\n\tvar serverChan chan Expr\n\n\tc, err := net.Dial(\"unix\", gSocketPath)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"connecting server: %s\", err)\n\t\tapp.ui.message = msg\n\t\tlog.Printf(msg)\n\t} else {\n\t\tserverChan = readExpr(c)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-app.quit:\n\t\t\tlog.Print(\"bye!\")\n\n\t\t\tif gLastDirPath != \"\" {\n\t\t\t\tf, err := os.Create(gLastDirPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"opening last dir file: %s\", err)\n\t\t\t\t}\n\t\t\t\tdefer f.Close()\n\n\t\t\t\tdir := app.nav.currDir()\n\n\t\t\t\t_, err = f.WriteString(dir.path)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"writing last dir file: %s\", err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn\n\t\tcase e := <-clientChan:\n\t\t\tfor i := 0; i < e.count; i++ {\n\t\t\t\te.expr.eval(app, nil)\n\t\t\t}\n\t\t\tapp.ui.draw(app.nav)\n\t\tcase e := <-serverChan:\n\t\t\te.eval(app, nil)\n\t\t\tapp.ui.draw(app.nav)\n\t\t}\n\t}\n}\n\nfunc (app *App) exportVars() {\n\tvar envFile string\n\tif f, err := app.nav.currFile(); err == nil {\n\t\tenvFile = f.Path\n\t}\n\n\tmarks := app.nav.currMarks()\n\n\tenvFiles := strings.Join(marks, \":\")\n\n\tos.Setenv(\"f\", envFile)\n\tos.Setenv(\"fs\", envFiles)\n\n\tif len(marks) == 0 {\n\t\tos.Setenv(\"fx\", envFile)\n\t} else {\n\t\tos.Setenv(\"fx\", envFiles)\n\t}\n\n\tos.Setenv(\"id\", strconv.Itoa(gClientId))\n}\n\n\/\/ This function is used to run a command in shell. Following modes are used:\n\/\/\n\/\/ Prefix Wait Async Stdin\/Stdout\/Stderr UI action\n\/\/ $ No No Yes Pause and then resume\n\/\/ ! Yes No Yes Pause and then resume\n\/\/ & No Yes No Do nothing\n\/\/\n\/\/ Waiting async commands are not used for now.\nfunc (app *App) runShell(s string, args []string, wait bool, async bool) {\n\tapp.exportVars()\n\n\tif len(gOpts.ifs) != 0 {\n\t\ts = fmt.Sprintf(\"IFS='%s'; %s\", gOpts.ifs, s)\n\t}\n\n\targs = append([]string{\"-c\", s, \"--\"}, args...)\n\tcmd := exec.Command(gOpts.shell, args...)\n\n\tif !async {\n\t\tcmd.Stdin = os.Stdin\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\n\t\tapp.ui.pause()\n\t\tdefer app.ui.resume()\n\t\tdefer app.nav.renew(app.ui.wins[0].h)\n\t}\n\n\tvar err error\n\tif async {\n\t\terr = cmd.Start()\n\t} else {\n\t\terr = cmd.Run()\n\t}\n\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"running shell: %s\", err)\n\t\tapp.ui.message = msg\n\t\tlog.Print(msg)\n\t}\n\n\tif wait {\n\t\tif err := waitKey(); err != nil {\n\t\t\tmsg := fmt.Sprintf(\"waiting shell: %s\", err)\n\t\t\tapp.ui.message = msg\n\t\t\tlog.Print(msg)\n\t\t}\n\t}\n}\n<commit_msg>comment<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode\"\n\n\t\"github.com\/nsf\/termbox-go\"\n)\n\ntype App struct {\n\tui *UI\n\tnav *Nav\n\tquit chan bool\n}\n\nfunc newApp() *App {\n\tui := newUI()\n\tnav := newNav(ui.wins[0].h)\n\tquit := make(chan bool)\n\n\treturn &App{\n\t\tui: ui,\n\t\tnav: nav,\n\t\tquit: quit,\n\t}\n}\n\nfunc waitKey() error {\n\t\/\/ TODO: this should be done with termbox somehow\n\n\tc := `echo\n\t echo -n 'Press any key to continue'\n\t old=$(stty -g)\n\t stty raw -echo\n\t eval \"ignore=\\$(dd bs=1 count=1 2> \/dev\/null)\"\n\t stty $old\n\t echo`\n\n\tcmd := exec.Command(gOpts.shell, \"-c\", c)\n\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\tif err := cmd.Run(); err != nil {\n\t\treturn fmt.Errorf(\"waiting key: %s\", err)\n\t}\n\n\treturn nil\n}\n\n\/\/ This function is used to read expressions on the client side. Prompting\n\/\/ commands (e.g. \"read\") are recognized and evaluated while being read here.\n\/\/ Digits are interpreted as command counts but this is only done for digits\n\/\/ preceding any non-digit characters (e.g. \"42y2k\" as 42 times \"y2k\").\nfunc (app *App) readExpr() chan MultiExpr {\n\tch := make(chan MultiExpr)\n\n\trenew := &CallExpr{\"renew\", nil}\n\tcount := 1\n\n\tvar acc []rune\n\tvar cnt []rune\n\n\tgo func() {\n\t\tfor {\n\t\t\tswitch ev := app.ui.pollEvent(); ev.Type {\n\t\t\tcase termbox.EventKey:\n\t\t\t\tif ev.Ch != 0 {\n\t\t\t\t\tswitch {\n\t\t\t\t\tcase ev.Ch == '<':\n\t\t\t\t\t\tacc = append(acc, '<', 'l', 't', '>')\n\t\t\t\t\tcase ev.Ch == '>':\n\t\t\t\t\t\tacc = append(acc, '<', 'g', 't', '>')\n\t\t\t\t\tcase unicode.IsDigit(ev.Ch) && len(acc) == 0:\n\t\t\t\t\t\tcnt = append(cnt, ev.Ch)\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tacc = append(acc, ev.Ch)\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tval := gKeyVal[ev.Key]\n\t\t\t\t\tif string(val) == \"<esc>\" {\n\t\t\t\t\t\tch <- MultiExpr{renew, 1}\n\t\t\t\t\t\tacc = nil\n\t\t\t\t\t\tcnt = nil\n\t\t\t\t\t}\n\t\t\t\t\tacc = append(acc, val...)\n\t\t\t\t}\n\n\t\t\t\tbinds, ok := findBinds(gOpts.keys, string(acc))\n\n\t\t\t\tswitch len(binds) {\n\t\t\t\tcase 0:\n\t\t\t\t\tapp.ui.message = fmt.Sprintf(\"unknown mapping: %s\", string(acc))\n\t\t\t\t\tch <- MultiExpr{renew, 1}\n\t\t\t\t\tacc = nil\n\t\t\t\t\tcnt = nil\n\t\t\t\tcase 1:\n\t\t\t\t\tif ok {\n\t\t\t\t\t\tif len(cnt) > 0 {\n\t\t\t\t\t\t\tc, err := strconv.Atoi(string(cnt))\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tlog.Printf(\"converting command count: %s\", err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcount = c\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tcount = 1\n\t\t\t\t\t\t}\n\t\t\t\t\t\texpr := gOpts.keys[string(acc)]\n\t\t\t\t\t\tswitch expr.(type) {\n\t\t\t\t\t\tcase *CallExpr:\n\t\t\t\t\t\t\tswitch expr.(*CallExpr).name {\n\t\t\t\t\t\t\tcase \"read\",\n\t\t\t\t\t\t\t\t\"read-shell\",\n\t\t\t\t\t\t\t\t\"read-shell-wait\",\n\t\t\t\t\t\t\t\t\"read-shell-async\",\n\t\t\t\t\t\t\t\t\"push\":\n\t\t\t\t\t\t\t\texpr.eval(app, nil)\n\t\t\t\t\t\t\t\tapp.ui.draw(app.nav)\n\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\tch <- MultiExpr{expr, count}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tch <- MultiExpr{expr, count}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tacc = nil\n\t\t\t\t\t\tcnt = nil\n\t\t\t\t\t}\n\t\t\t\t\tif len(acc) > 0 {\n\t\t\t\t\t\tapp.ui.listBinds(binds)\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\tif ok {\n\t\t\t\t\t\t\/\/ TODO: use a delay\n\t\t\t\t\t\tif len(cnt) > 0 {\n\t\t\t\t\t\t\tc, err := strconv.Atoi(string(cnt))\n\t\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\t\tlog.Printf(\"converting command count: %s\", err)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tcount = c\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tcount = 1\n\t\t\t\t\t\t}\n\t\t\t\t\t\texpr := gOpts.keys[string(acc)]\n\t\t\t\t\t\tswitch expr.(type) {\n\t\t\t\t\t\tcase *CallExpr:\n\t\t\t\t\t\t\tswitch expr.(*CallExpr).name {\n\t\t\t\t\t\t\tcase \"read\",\n\t\t\t\t\t\t\t\t\"read-shell\",\n\t\t\t\t\t\t\t\t\"read-shell-wait\",\n\t\t\t\t\t\t\t\t\"read-shell-async\",\n\t\t\t\t\t\t\t\t\"push\":\n\t\t\t\t\t\t\t\texpr.eval(app, nil)\n\t\t\t\t\t\t\t\tapp.ui.draw(app.nav)\n\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\tch <- MultiExpr{expr, count}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tch <- MultiExpr{expr, count}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tacc = nil\n\t\t\t\t\t\tcnt = nil\n\t\t\t\t\t}\n\t\t\t\t\tif len(acc) > 0 {\n\t\t\t\t\t\tapp.ui.listBinds(binds)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase termbox.EventResize:\n\t\t\t\tch <- MultiExpr{renew, 1}\n\t\t\tdefault:\n\t\t\t\t\/\/ TODO: handle other events\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn ch\n}\n\n\/\/ This is the main event loop of the application. There are two channels to\n\/\/ read expressions from client and server. Reading and evaluation are done on\n\/\/ different goroutines except for prompting commands (e.g. \"read\"). Quitting\n\/\/ commands should create separate goroutines to prevent deadlock here.\nfunc (app *App) handleInp() {\n\tclientChan := app.readExpr()\n\n\tvar serverChan chan Expr\n\n\tc, err := net.Dial(\"unix\", gSocketPath)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"connecting server: %s\", err)\n\t\tapp.ui.message = msg\n\t\tlog.Printf(msg)\n\t} else {\n\t\tserverChan = readExpr(c)\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-app.quit:\n\t\t\tlog.Print(\"bye!\")\n\n\t\t\tif gLastDirPath != \"\" {\n\t\t\t\tf, err := os.Create(gLastDirPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"opening last dir file: %s\", err)\n\t\t\t\t}\n\t\t\t\tdefer f.Close()\n\n\t\t\t\tdir := app.nav.currDir()\n\n\t\t\t\t_, err = f.WriteString(dir.path)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"writing last dir file: %s\", err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn\n\t\tcase e := <-clientChan:\n\t\t\tfor i := 0; i < e.count; i++ {\n\t\t\t\te.expr.eval(app, nil)\n\t\t\t}\n\t\t\tapp.ui.draw(app.nav)\n\t\tcase e := <-serverChan:\n\t\t\te.eval(app, nil)\n\t\t\tapp.ui.draw(app.nav)\n\t\t}\n\t}\n}\n\nfunc (app *App) exportVars() {\n\tvar envFile string\n\tif f, err := app.nav.currFile(); err == nil {\n\t\tenvFile = f.Path\n\t}\n\n\tmarks := app.nav.currMarks()\n\n\tenvFiles := strings.Join(marks, \":\")\n\n\tos.Setenv(\"f\", envFile)\n\tos.Setenv(\"fs\", envFiles)\n\n\tif len(marks) == 0 {\n\t\tos.Setenv(\"fx\", envFile)\n\t} else {\n\t\tos.Setenv(\"fx\", envFiles)\n\t}\n\n\tos.Setenv(\"id\", strconv.Itoa(gClientId))\n}\n\n\/\/ This function is used to run a command in shell. Following modes are used:\n\/\/\n\/\/ Prefix Wait Async Stdin\/Stdout\/Stderr UI action\n\/\/ $ No No Yes Pause and then resume\n\/\/ ! Yes No Yes Pause and then resume\n\/\/ & No Yes No Do nothing\n\/\/\n\/\/ Waiting async commands are not used for now.\nfunc (app *App) runShell(s string, args []string, wait bool, async bool) {\n\tapp.exportVars()\n\n\tif len(gOpts.ifs) != 0 {\n\t\ts = fmt.Sprintf(\"IFS='%s'; %s\", gOpts.ifs, s)\n\t}\n\n\targs = append([]string{\"-c\", s, \"--\"}, args...)\n\tcmd := exec.Command(gOpts.shell, args...)\n\n\tif !async {\n\t\tcmd.Stdin = os.Stdin\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\n\t\tapp.ui.pause()\n\t\tdefer app.ui.resume()\n\t\tdefer app.nav.renew(app.ui.wins[0].h)\n\t}\n\n\tvar err error\n\tif async {\n\t\terr = cmd.Start()\n\t} else {\n\t\terr = cmd.Run()\n\t}\n\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"running shell: %s\", err)\n\t\tapp.ui.message = msg\n\t\tlog.Print(msg)\n\t}\n\n\tif wait {\n\t\tif err := waitKey(); err != nil {\n\t\t\tmsg := fmt.Sprintf(\"waiting shell: %s\", err)\n\t\t\tapp.ui.message = msg\n\t\t\tlog.Print(msg)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package cache\n\nimport (\n\t\"context\"\n\t\"sort\"\n\t\"sync\"\n\n\t\"github.com\/grafana\/metrictank\/mdata\/cache\/accnt\"\n\t\"github.com\/grafana\/metrictank\/mdata\/chunk\"\n\topentracing \"github.com\/opentracing\/opentracing-go\"\n\t\"github.com\/raintank\/worldping-api\/pkg\/log\"\n\t\"gopkg.in\/raintank\/schema.v1\"\n)\n\n\/\/ CCacheMetric caches data chunks\ntype CCacheMetric struct {\n\tsync.RWMutex\n\n\t\/\/ cached data chunks by timestamp\n\tchunks map[uint32]*CCacheChunk\n\n\t\/\/ chunk time stamps in ascending order\n\tkeys []uint32\n\n\tMKey schema.MKey\n}\n\n\/\/ NewCCacheMetric creates a CCacheMetric\nfunc NewCCacheMetric(mkey schema.MKey) *CCacheMetric {\n\treturn &CCacheMetric{\n\t\tMKey: mkey,\n\t\tchunks: make(map[uint32]*CCacheChunk),\n\t}\n}\n\n\/\/ Del deletes chunks for the given timestamp\nfunc (mc *CCacheMetric) Del(ts uint32) int {\n\tmc.Lock()\n\tdefer mc.Unlock()\n\n\tif _, ok := mc.chunks[ts]; !ok {\n\t\treturn len(mc.chunks)\n\t}\n\n\tprev := mc.chunks[ts].Prev\n\tnext := mc.chunks[ts].Next\n\n\tif prev != 0 {\n\t\tif _, ok := mc.chunks[prev]; ok {\n\t\t\tmc.chunks[prev].Next = 0\n\t\t}\n\t}\n\n\tif next != 0 {\n\t\tif _, ok := mc.chunks[next]; ok {\n\t\t\tmc.chunks[next].Prev = 0\n\t\t}\n\t}\n\n\tdelete(mc.chunks, ts)\n\n\t\/\/ regenerate the list of sorted keys after deleting a chunk\n\t\/\/ NOTE: we can improve perf by just taking out the ts (partially rewriting\n\t\/\/ the slice in one go), can we also batch deletes?\n\tmc.generateKeys()\n\n\treturn len(mc.chunks)\n}\n\n\/\/ AddRange adds a range (sequence) of chunks.\n\/\/ Note the following requirements:\n\/\/ the sequence should be in ascending timestamp order\n\/\/ the sequence should be complete (no gaps)\nfunc (mc *CCacheMetric) AddRange(prev uint32, itergens []chunk.IterGen) {\n\tif len(itergens) == 0 {\n\t\treturn\n\t}\n\n\tif len(itergens) == 1 {\n\t\tmc.Add(prev, itergens[0])\n\t\treturn\n\t}\n\n\tmc.Lock()\n\tdefer mc.Unlock()\n\n\t\/\/ pre-allocate 1 slice, cheaper than allocating one by one\n\tchunks := make([]CCacheChunk, 0, len(itergens))\n\n\t\/\/ handle the first one\n\titergen := itergens[0]\n\tts := itergen.Ts\n\n\t\/\/ if we add data that is older than chunks already cached,\n\t\/\/ we will have to sort the keys once we're done adding them\n\tsortKeys := len(mc.keys) > 0 && mc.keys[len(mc.keys)-1] > ts\n\n\t\/\/ add chunk if we don't have it yet (most likely)\n\tif _, ok := mc.chunks[ts]; !ok {\n\n\t\t\/\/ if previous chunk has not been passed we try to be smart and figure it out.\n\t\t\/\/ this is common in a scenario where a metric continuously gets queried\n\t\t\/\/ for a range that starts less than one chunkspan before now().\n\t\tif prev == 0 {\n\t\t\tres, ok := mc.seekDesc(ts - 1)\n\t\t\tif ok {\n\t\t\t\tprev = res\n\t\t\t}\n\t\t}\n\n\t\t\/\/ if the previous chunk is cached, link it\n\t\tif _, ok := mc.chunks[prev]; ok {\n\t\t\tmc.chunks[prev].Next = ts\n\t\t} else {\n\t\t\tprev = 0\n\t\t}\n\n\t\tchunks = append(chunks, CCacheChunk{\n\t\t\tTs: ts,\n\t\t\tPrev: prev,\n\t\t\tNext: itergens[1].Ts,\n\t\t\tItgen: itergen,\n\t\t})\n\t\tmc.chunks[ts] = &chunks[len(chunks)-1]\n\t\tmc.keys = append(mc.keys, ts)\n\t} else {\n\t\tmc.chunks[ts].Next = itergens[1].Ts\n\t}\n\n\tprev = ts\n\n\t\/\/ handle the 2nd until the last-but-one\n\tfor i := 1; i < len(itergens)-1; i++ {\n\t\titergen = itergens[i]\n\t\tts = itergen.Ts\n\t\t\/\/ add chunk, potentially overwriting pre-existing chunk (unlikely)\n\t\tchunks = append(chunks, CCacheChunk{\n\t\t\tTs: ts,\n\t\t\tPrev: prev,\n\t\t\tNext: itergens[i+1].Ts,\n\t\t\tItgen: itergen,\n\t\t})\n\t\tmc.chunks[ts] = &chunks[len(chunks)-1]\n\t\tmc.keys = append(mc.keys, ts)\n\n\t\tprev = ts\n\t}\n\n\t\/\/ handle the last one\n\titergen = itergens[len(itergens)-1]\n\tts = itergen.Ts\n\n\t\/\/ add chunk if we don't have it yet (most likely)\n\tif _, ok := mc.chunks[ts]; !ok {\n\n\t\t\/\/ if nextTs() can't figure out the end date it returns ts\n\t\tnext := mc.nextTsCore(itergen, prev, 0)\n\t\tif next == ts {\n\t\t\tnext = 0\n\t\t} else {\n\t\t\t\/\/ if the next chunk is cached, link in both directions\n\t\t\tif _, ok := mc.chunks[next]; ok {\n\t\t\t\tmc.chunks[next].Prev = ts\n\t\t\t} else {\n\t\t\t\tnext = 0\n\t\t\t}\n\t\t}\n\n\t\tchunks = append(chunks, CCacheChunk{\n\t\t\tTs: ts,\n\t\t\tPrev: prev,\n\t\t\tNext: next,\n\t\t\tItgen: itergen,\n\t\t})\n\t\tmc.chunks[ts] = &chunks[len(chunks)-1]\n\t\tmc.keys = append(mc.keys, ts)\n\t}\n\n\tif sortKeys {\n\t\tsort.Sort(accnt.Uint32Asc(mc.keys))\n\t}\n\n\treturn\n}\n\n\/\/ Add adds a chunk to the cache\nfunc (mc *CCacheMetric) Add(prev uint32, itergen chunk.IterGen) {\n\tts := itergen.Ts\n\n\tmc.Lock()\n\tdefer mc.Unlock()\n\n\tif _, ok := mc.chunks[ts]; ok {\n\t\t\/\/ chunk is already present. no need to error on that, just ignore it\n\t\treturn\n\t}\n\n\tmc.chunks[ts] = &CCacheChunk{\n\t\tTs: ts,\n\t\tPrev: 0,\n\t\tNext: 0,\n\t\tItgen: itergen,\n\t}\n\n\tnextTs := mc.nextTs(ts)\n\n\tlog.Debug(\"CCacheMetric Add: caching chunk ts %d, nextTs %d\", ts, nextTs)\n\n\t\/\/ if previous chunk has not been passed we try to be smart and figure it out.\n\t\/\/ this is common in a scenario where a metric continuously gets queried\n\t\/\/ for a range that starts less than one chunkspan before now().\n\tif prev == 0 {\n\t\tres, ok := mc.seekDesc(ts - 1)\n\t\tif ok {\n\t\t\tprev = res\n\t\t}\n\t}\n\n\t\/\/ if the previous chunk is cached, link in both directions\n\tif _, ok := mc.chunks[prev]; ok {\n\t\tmc.chunks[prev].Next = ts\n\t\tmc.chunks[ts].Prev = prev\n\t}\n\n\t\/\/ if nextTs() can't figure out the end date it returns ts\n\tif nextTs > ts {\n\t\t\/\/ if the next chunk is cached, link in both directions\n\t\tif _, ok := mc.chunks[nextTs]; ok {\n\t\t\tmc.chunks[nextTs].Prev = ts\n\t\t\tmc.chunks[ts].Next = nextTs\n\t\t}\n\t}\n\n\t\/\/ assure key is added to mc.keys\n\n\t\/\/ if no keys yet, just add it and it's sorted\n\tif len(mc.keys) == 0 {\n\t\tmc.keys = append(mc.keys, ts)\n\t\treturn\n\t}\n\n\t\/\/ add the ts, and sort if necessary\n\tmc.keys = append(mc.keys, ts)\n\tif mc.keys[len(mc.keys)-1] < mc.keys[len(mc.keys)-2] {\n\t\tsort.Sort(accnt.Uint32Asc(mc.keys))\n\t}\n}\n\n\/\/ generateKeys generates sorted slice of all chunk timestamps\n\/\/ assumes we have at least read lock\nfunc (mc *CCacheMetric) generateKeys() {\n\tkeys := make([]uint32, 0, len(mc.chunks))\n\tfor k := range mc.chunks {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Sort(accnt.Uint32Asc(keys))\n\tmc.keys = keys\n}\n\n\/\/ nextTs takes a chunk's ts and returns the ts of the next chunk. (guessing if necessary)\n\/\/ assumes we already have at least a read lock\nfunc (mc *CCacheMetric) nextTs(ts uint32) uint32 {\n\tchunk := mc.chunks[ts]\n\treturn mc.nextTsCore(chunk.Itgen, chunk.Prev, chunk.Next)\n}\n\n\/\/ nextTsCore returns the ts of the next chunk, given a chunks key properties\n\/\/ (to the extent we know them). It guesses if necessary.\n\/\/ assumes we already have at least a read lock\nfunc (mc *CCacheMetric) nextTsCore(itgen chunk.IterGen, prev, next uint32) uint32 {\n\tspan := itgen.Span\n\tif span > 0 {\n\t\t\/\/ if the chunk is span-aware we don't need anything else\n\t\treturn itgen.Ts + span\n\t}\n\n\t\/\/ if chunk has a next chunk, then that's the ts we need\n\tif next != 0 {\n\t\treturn next\n\t}\n\t\/\/ if chunk has no next chunk, but has a previous one, we assume the length of this one is same as the previous one\n\tif prev != 0 {\n\t\treturn itgen.Ts + (itgen.Ts - prev)\n\t}\n\t\/\/ if a chunk has no next and no previous chunk we have to assume it's length is 0\n\treturn itgen.Ts\n}\n\n\/\/ lastTs returns the last Ts of this metric cache\n\/\/ since ranges are exclusive at the end this is actually the first Ts that is not cached\nfunc (mc *CCacheMetric) lastTs() uint32 {\n\tmc.RLock()\n\tdefer mc.RUnlock()\n\treturn mc.nextTs(mc.keys[len(mc.keys)-1])\n}\n\n\/\/ seekAsc finds the t0 of the chunk that contains ts, by searching from old to recent\n\/\/ if not found or can't be sure returns 0, false\n\/\/ assumes we already have at least a read lock\nfunc (mc *CCacheMetric) seekAsc(ts uint32) (uint32, bool) {\n\tlog.Debug(\"CCacheMetric seekAsc: seeking for %d in the keys %+d\", ts, mc.keys)\n\n\tfor i := 0; i < len(mc.keys) && mc.keys[i] <= ts; i++ {\n\t\tif mc.nextTs(mc.keys[i]) > ts {\n\t\t\tlog.Debug(\"CCacheMetric seekAsc: seek found ts %d is between %d and %d\", ts, mc.keys[i], mc.nextTs(mc.keys[i]))\n\t\t\treturn mc.keys[i], true\n\t\t}\n\t}\n\n\tlog.Debug(\"CCacheMetric seekAsc: seekAsc unsuccessful\")\n\treturn 0, false\n}\n\n\/\/ seekDesc finds the t0 of the chunk that contains ts, by searching from recent to old\n\/\/ if not found or can't be sure returns 0, false\n\/\/ assumes we already have at least a read lock\nfunc (mc *CCacheMetric) seekDesc(ts uint32) (uint32, bool) {\n\tlog.Debug(\"CCacheMetric seekDesc: seeking for %d in the keys %+d\", ts, mc.keys)\n\n\tfor i := len(mc.keys) - 1; i >= 0 && mc.nextTs(mc.keys[i]) > ts; i-- {\n\t\tif mc.keys[i] <= ts {\n\t\t\tlog.Debug(\"CCacheMetric seekDesc: seek found ts %d is between %d and %d\", ts, mc.keys[i], mc.nextTs(mc.keys[i]))\n\t\t\treturn mc.keys[i], true\n\t\t}\n\t}\n\n\tlog.Debug(\"CCacheMetric seekDesc: seekDesc unsuccessful\")\n\treturn 0, false\n}\n\nfunc (mc *CCacheMetric) searchForward(ctx context.Context, metric schema.AMKey, from, until uint32, res *CCSearchResult) {\n\tts, ok := mc.seekAsc(from)\n\tif !ok {\n\t\treturn\n\t}\n\n\t\/\/ add all consecutive chunks to search results, starting at the one containing \"from\"\n\tfor ; ts != 0; ts = mc.chunks[ts].Next {\n\t\tlog.Debug(\"CCacheMetric searchForward: forward search adds chunk ts %d to start\", ts)\n\t\tres.Start = append(res.Start, mc.chunks[ts].Itgen)\n\t\tnextTs := mc.nextTs(ts)\n\t\tres.From = nextTs\n\n\t\tif nextTs >= until {\n\t\t\tres.Complete = true\n\t\t\tbreak\n\t\t}\n\t\tif mc.chunks[ts].Next != 0 && ts >= mc.chunks[ts].Next {\n\t\t\tlog.Warn(\"CCacheMetric: suspected bug suppressed. searchForward(%q, %d, %d, res) ts is %d while Next is %d\", metric, from, until, ts, mc.chunks[ts].Next)\n\t\t\tspan := opentracing.SpanFromContext(ctx)\n\t\t\tspan.SetTag(\"searchForwardBug\", true)\n\t\t\tsearchFwdBug.Inc()\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (mc *CCacheMetric) searchBackward(from, until uint32, res *CCSearchResult) {\n\tts, ok := mc.seekDesc(until - 1)\n\tif !ok {\n\t\treturn\n\t}\n\n\tfor ; ts != 0; ts = mc.chunks[ts].Prev {\n\t\tif ts < from {\n\t\t\tbreak\n\t\t}\n\n\t\tlog.Debug(\"CCacheMetric searchBackward: backward search adds chunk ts %d to end\", ts)\n\t\tres.End = append(res.End, mc.chunks[ts].Itgen)\n\t\tres.Until = ts\n\t}\n}\n\n\/\/ Search searches the CCacheMetric's data and returns a complete-as-possible CCSearchResult\n\/\/\n\/\/ we first look for the chunks where the \"from\" and \"until\" ts are in.\n\/\/ then we seek from the \"from\" towards \"until\"\n\/\/ and add as many cunks as possible to the result, if this did not result\n\/\/ in all chunks necessary to serve the request we do the same in the reverse\n\/\/ order from \"until\" to \"from\"\n\/\/ if the first seek in chronological direction already ends up with all the\n\/\/ chunks we need to serve the request, the second one can be skipped.\n\/\/\n\/\/ EXAMPLE:\n\/\/ from ts: |\n\/\/ until ts: |\n\/\/ cache: |---|---|---| | | | | |---|---|---|---|---|---|\n\/\/ chunks returned: |---| |---|---|---|\nfunc (mc *CCacheMetric) Search(ctx context.Context, metric schema.AMKey, res *CCSearchResult, from, until uint32) {\n\tmc.RLock()\n\tdefer mc.RUnlock()\n\n\tif len(mc.chunks) < 1 {\n\t\treturn\n\t}\n\n\tmc.searchForward(ctx, metric, from, until, res)\n\tif !res.Complete {\n\t\tmc.searchBackward(from, until, res)\n\t}\n\n\tif !res.Complete && res.From > res.Until {\n\t\tlog.Debug(\"CCacheMetric Search: Found from > until (%d\/%d), printing chunks\\n\", res.From, res.Until)\n\t\tmc.debugMetric()\n\t}\n}\n\nfunc (mc *CCacheMetric) debugMetric() {\n\tlog.Debug(\"CCacheMetric debugMetric: --- debugging metric ---\\n\")\n\tfor _, key := range mc.keys {\n\t\tlog.Debug(\"CCacheMetric debugMetric: ts %d; prev %d; next %d\\n\", key, mc.chunks[key].Prev, mc.chunks[key].Next)\n\t}\n\tlog.Debug(\"CCacheMetric debugMetric: ------------------------\\n\")\n}\n<commit_msg>Dump some debug info and don't fail on chunk cache issues<commit_after>package cache\n\nimport (\n\t\"context\"\n\t\"sort\"\n\t\"sync\"\n\n\t\"github.com\/grafana\/metrictank\/mdata\/cache\/accnt\"\n\t\"github.com\/grafana\/metrictank\/mdata\/chunk\"\n\topentracing \"github.com\/opentracing\/opentracing-go\"\n\t\"github.com\/raintank\/worldping-api\/pkg\/log\"\n\t\"gopkg.in\/raintank\/schema.v1\"\n)\n\n\/\/ CCacheMetric caches data chunks\ntype CCacheMetric struct {\n\tsync.RWMutex\n\n\t\/\/ cached data chunks by timestamp\n\tchunks map[uint32]*CCacheChunk\n\n\t\/\/ chunk time stamps in ascending order\n\tkeys []uint32\n\n\tMKey schema.MKey\n}\n\n\/\/ NewCCacheMetric creates a CCacheMetric\nfunc NewCCacheMetric(mkey schema.MKey) *CCacheMetric {\n\treturn &CCacheMetric{\n\t\tMKey: mkey,\n\t\tchunks: make(map[uint32]*CCacheChunk),\n\t}\n}\n\n\/\/ Del deletes chunks for the given timestamp\nfunc (mc *CCacheMetric) Del(ts uint32) int {\n\tmc.Lock()\n\tdefer mc.Unlock()\n\n\tif _, ok := mc.chunks[ts]; !ok {\n\t\treturn len(mc.chunks)\n\t}\n\n\tprev := mc.chunks[ts].Prev\n\tnext := mc.chunks[ts].Next\n\n\tif prev != 0 {\n\t\tif _, ok := mc.chunks[prev]; ok {\n\t\t\tmc.chunks[prev].Next = 0\n\t\t}\n\t}\n\n\tif next != 0 {\n\t\tif _, ok := mc.chunks[next]; ok {\n\t\t\tmc.chunks[next].Prev = 0\n\t\t}\n\t}\n\n\tdelete(mc.chunks, ts)\n\n\t\/\/ regenerate the list of sorted keys after deleting a chunk\n\t\/\/ NOTE: we can improve perf by just taking out the ts (partially rewriting\n\t\/\/ the slice in one go), can we also batch deletes?\n\tmc.generateKeys()\n\n\treturn len(mc.chunks)\n}\n\n\/\/ AddRange adds a range (sequence) of chunks.\n\/\/ Note the following requirements:\n\/\/ the sequence should be in ascending timestamp order\n\/\/ the sequence should be complete (no gaps)\nfunc (mc *CCacheMetric) AddRange(prev uint32, itergens []chunk.IterGen) {\n\tif len(itergens) == 0 {\n\t\treturn\n\t}\n\n\tif len(itergens) == 1 {\n\t\tmc.Add(prev, itergens[0])\n\t\treturn\n\t}\n\n\tmc.Lock()\n\tdefer mc.Unlock()\n\n\t\/\/ pre-allocate 1 slice, cheaper than allocating one by one\n\tchunks := make([]CCacheChunk, 0, len(itergens))\n\n\t\/\/ handle the first one\n\titergen := itergens[0]\n\tts := itergen.Ts\n\n\t\/\/ if we add data that is older than chunks already cached,\n\t\/\/ we will have to sort the keys once we're done adding them\n\tsortKeys := len(mc.keys) > 0 && mc.keys[len(mc.keys)-1] > ts\n\n\t\/\/ add chunk if we don't have it yet (most likely)\n\tif _, ok := mc.chunks[ts]; !ok {\n\n\t\t\/\/ if previous chunk has not been passed we try to be smart and figure it out.\n\t\t\/\/ this is common in a scenario where a metric continuously gets queried\n\t\t\/\/ for a range that starts less than one chunkspan before now().\n\t\tif prev == 0 {\n\t\t\tres, ok := mc.seekDesc(ts - 1)\n\t\t\tif ok {\n\t\t\t\tprev = res\n\t\t\t}\n\t\t}\n\n\t\t\/\/ if the previous chunk is cached, link it\n\t\tif _, ok := mc.chunks[prev]; ok {\n\t\t\tmc.chunks[prev].Next = ts\n\t\t} else {\n\t\t\tprev = 0\n\t\t}\n\n\t\tchunks = append(chunks, CCacheChunk{\n\t\t\tTs: ts,\n\t\t\tPrev: prev,\n\t\t\tNext: itergens[1].Ts,\n\t\t\tItgen: itergen,\n\t\t})\n\t\tmc.chunks[ts] = &chunks[len(chunks)-1]\n\t\tmc.keys = append(mc.keys, ts)\n\t} else {\n\t\tmc.chunks[ts].Next = itergens[1].Ts\n\t}\n\n\tprev = ts\n\n\t\/\/ handle the 2nd until the last-but-one\n\tfor i := 1; i < len(itergens)-1; i++ {\n\t\titergen = itergens[i]\n\t\tts = itergen.Ts\n\t\t\/\/ add chunk, potentially overwriting pre-existing chunk (unlikely)\n\t\tchunks = append(chunks, CCacheChunk{\n\t\t\tTs: ts,\n\t\t\tPrev: prev,\n\t\t\tNext: itergens[i+1].Ts,\n\t\t\tItgen: itergen,\n\t\t})\n\t\tmc.chunks[ts] = &chunks[len(chunks)-1]\n\t\tmc.keys = append(mc.keys, ts)\n\n\t\tprev = ts\n\t}\n\n\t\/\/ handle the last one\n\titergen = itergens[len(itergens)-1]\n\tts = itergen.Ts\n\n\t\/\/ add chunk if we don't have it yet (most likely)\n\tif _, ok := mc.chunks[ts]; !ok {\n\n\t\t\/\/ if nextTs() can't figure out the end date it returns ts\n\t\tnext := mc.nextTsCore(itergen, prev, 0)\n\t\tif next == ts {\n\t\t\tnext = 0\n\t\t} else {\n\t\t\t\/\/ if the next chunk is cached, link in both directions\n\t\t\tif _, ok := mc.chunks[next]; ok {\n\t\t\t\tmc.chunks[next].Prev = ts\n\t\t\t} else {\n\t\t\t\tnext = 0\n\t\t\t}\n\t\t}\n\n\t\tchunks = append(chunks, CCacheChunk{\n\t\t\tTs: ts,\n\t\t\tPrev: prev,\n\t\t\tNext: next,\n\t\t\tItgen: itergen,\n\t\t})\n\t\tmc.chunks[ts] = &chunks[len(chunks)-1]\n\t\tmc.keys = append(mc.keys, ts)\n\t}\n\n\tif sortKeys {\n\t\tsort.Sort(accnt.Uint32Asc(mc.keys))\n\t}\n\n\treturn\n}\n\n\/\/ Add adds a chunk to the cache\nfunc (mc *CCacheMetric) Add(prev uint32, itergen chunk.IterGen) {\n\tts := itergen.Ts\n\n\tmc.Lock()\n\tdefer mc.Unlock()\n\n\tif _, ok := mc.chunks[ts]; ok {\n\t\t\/\/ chunk is already present. no need to error on that, just ignore it\n\t\treturn\n\t}\n\n\tmc.chunks[ts] = &CCacheChunk{\n\t\tTs: ts,\n\t\tPrev: 0,\n\t\tNext: 0,\n\t\tItgen: itergen,\n\t}\n\n\tnextTs := mc.nextTs(ts)\n\n\tlog.Debug(\"CCacheMetric Add: caching chunk ts %d, nextTs %d\", ts, nextTs)\n\n\t\/\/ if previous chunk has not been passed we try to be smart and figure it out.\n\t\/\/ this is common in a scenario where a metric continuously gets queried\n\t\/\/ for a range that starts less than one chunkspan before now().\n\tif prev == 0 {\n\t\tres, ok := mc.seekDesc(ts - 1)\n\t\tif ok {\n\t\t\tprev = res\n\t\t}\n\t}\n\n\t\/\/ if the previous chunk is cached, link in both directions\n\tif _, ok := mc.chunks[prev]; ok {\n\t\tmc.chunks[prev].Next = ts\n\t\tmc.chunks[ts].Prev = prev\n\t}\n\n\t\/\/ if nextTs() can't figure out the end date it returns ts\n\tif nextTs > ts {\n\t\t\/\/ if the next chunk is cached, link in both directions\n\t\tif _, ok := mc.chunks[nextTs]; ok {\n\t\t\tmc.chunks[nextTs].Prev = ts\n\t\t\tmc.chunks[ts].Next = nextTs\n\t\t}\n\t}\n\n\t\/\/ assure key is added to mc.keys\n\n\t\/\/ if no keys yet, just add it and it's sorted\n\tif len(mc.keys) == 0 {\n\t\tmc.keys = append(mc.keys, ts)\n\t\treturn\n\t}\n\n\t\/\/ add the ts, and sort if necessary\n\tmc.keys = append(mc.keys, ts)\n\tif mc.keys[len(mc.keys)-1] < mc.keys[len(mc.keys)-2] {\n\t\tsort.Sort(accnt.Uint32Asc(mc.keys))\n\t}\n}\n\n\/\/ generateKeys generates sorted slice of all chunk timestamps\n\/\/ assumes we have at least read lock\nfunc (mc *CCacheMetric) generateKeys() {\n\tkeys := make([]uint32, 0, len(mc.chunks))\n\tfor k := range mc.chunks {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Sort(accnt.Uint32Asc(keys))\n\tmc.keys = keys\n}\n\n\/\/ nextTs takes a chunk's ts and returns the ts of the next chunk. (guessing if necessary)\n\/\/ assumes we already have at least a read lock\nfunc (mc *CCacheMetric) nextTs(ts uint32) uint32 {\n\tchunk := mc.chunks[ts]\n\treturn mc.nextTsCore(chunk.Itgen, chunk.Prev, chunk.Next)\n}\n\n\/\/ nextTsCore returns the ts of the next chunk, given a chunks key properties\n\/\/ (to the extent we know them). It guesses if necessary.\n\/\/ assumes we already have at least a read lock\nfunc (mc *CCacheMetric) nextTsCore(itgen chunk.IterGen, prev, next uint32) uint32 {\n\tspan := itgen.Span\n\tif span > 0 {\n\t\t\/\/ if the chunk is span-aware we don't need anything else\n\t\treturn itgen.Ts + span\n\t}\n\n\t\/\/ if chunk has a next chunk, then that's the ts we need\n\tif next != 0 {\n\t\treturn next\n\t}\n\t\/\/ if chunk has no next chunk, but has a previous one, we assume the length of this one is same as the previous one\n\tif prev != 0 {\n\t\treturn itgen.Ts + (itgen.Ts - prev)\n\t}\n\t\/\/ if a chunk has no next and no previous chunk we have to assume it's length is 0\n\treturn itgen.Ts\n}\n\n\/\/ lastTs returns the last Ts of this metric cache\n\/\/ since ranges are exclusive at the end this is actually the first Ts that is not cached\nfunc (mc *CCacheMetric) lastTs() uint32 {\n\tmc.RLock()\n\tdefer mc.RUnlock()\n\treturn mc.nextTs(mc.keys[len(mc.keys)-1])\n}\n\n\/\/ seekAsc finds the t0 of the chunk that contains ts, by searching from old to recent\n\/\/ if not found or can't be sure returns 0, false\n\/\/ assumes we already have at least a read lock\nfunc (mc *CCacheMetric) seekAsc(ts uint32) (uint32, bool) {\n\tlog.Debug(\"CCacheMetric seekAsc: seeking for %d in the keys %+d\", ts, mc.keys)\n\n\tfor i := 0; i < len(mc.keys) && mc.keys[i] <= ts; i++ {\n\t\tif mc.nextTs(mc.keys[i]) > ts {\n\t\t\tlog.Debug(\"CCacheMetric seekAsc: seek found ts %d is between %d and %d\", ts, mc.keys[i], mc.nextTs(mc.keys[i]))\n\t\t\treturn mc.keys[i], true\n\t\t}\n\t}\n\n\tlog.Debug(\"CCacheMetric seekAsc: seekAsc unsuccessful\")\n\treturn 0, false\n}\n\n\/\/ seekDesc finds the t0 of the chunk that contains ts, by searching from recent to old\n\/\/ if not found or can't be sure returns 0, false\n\/\/ assumes we already have at least a read lock\nfunc (mc *CCacheMetric) seekDesc(ts uint32) (uint32, bool) {\n\tlog.Debug(\"CCacheMetric seekDesc: seeking for %d in the keys %+d\", ts, mc.keys)\n\n\tfor i := len(mc.keys) - 1; i >= 0 && mc.nextTs(mc.keys[i]) > ts; i-- {\n\t\tif mc.keys[i] <= ts {\n\t\t\tlog.Debug(\"CCacheMetric seekDesc: seek found ts %d is between %d and %d\", ts, mc.keys[i], mc.nextTs(mc.keys[i]))\n\t\t\treturn mc.keys[i], true\n\t\t}\n\t}\n\n\tlog.Debug(\"CCacheMetric seekDesc: seekDesc unsuccessful\")\n\treturn 0, false\n}\n\nfunc (mc *CCacheMetric) searchForward(ctx context.Context, metric schema.AMKey, from, until uint32, res *CCSearchResult) {\n\tts, ok := mc.seekAsc(from)\n\tif !ok {\n\t\treturn\n\t}\n\n\t\/\/ add all consecutive chunks to search results, starting at the one containing \"from\"\n\tfor ; ts != 0; ts = mc.chunks[ts].Next {\n\t\tlog.Debug(\"CCacheMetric searchForward: forward search adds chunk ts %d to start\", ts)\n\t\tres.Start = append(res.Start, mc.chunks[ts].Itgen)\n\t\tnextTs := mc.nextTs(ts)\n\t\tres.From = nextTs\n\n\t\tif nextTs >= until {\n\t\t\tres.Complete = true\n\t\t\tbreak\n\t\t}\n\t\tif mc.chunks[ts].Next != 0 && ts >= mc.chunks[ts].Next {\n\t\t\tlog.Warn(\"CCacheMetric: suspected bug suppressed. searchForward(%q, %d, %d, res) ts is %d while Next is %d\", metric, from, until, ts, mc.chunks[ts].Next)\n\t\t\tspan := opentracing.SpanFromContext(ctx)\n\t\t\tspan.SetTag(\"searchForwardBug\", true)\n\t\t\tsearchFwdBug.Inc()\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (mc *CCacheMetric) searchBackward(from, until uint32, res *CCSearchResult) {\n\tts, ok := mc.seekDesc(until - 1)\n\tif !ok {\n\t\treturn\n\t}\n\n\tfor ; ts != 0; ts = mc.chunks[ts].Prev {\n\t\tif ts < from {\n\t\t\tbreak\n\t\t}\n\n\t\tlog.Debug(\"CCacheMetric searchBackward: backward search adds chunk ts %d to end\", ts)\n\t\tres.End = append(res.End, mc.chunks[ts].Itgen)\n\t\tres.Until = ts\n\t}\n}\n\n\/\/ Search searches the CCacheMetric's data and returns a complete-as-possible CCSearchResult\n\/\/\n\/\/ we first look for the chunks where the \"from\" and \"until\" ts are in.\n\/\/ then we seek from the \"from\" towards \"until\"\n\/\/ and add as many cunks as possible to the result, if this did not result\n\/\/ in all chunks necessary to serve the request we do the same in the reverse\n\/\/ order from \"until\" to \"from\"\n\/\/ if the first seek in chronological direction already ends up with all the\n\/\/ chunks we need to serve the request, the second one can be skipped.\n\/\/\n\/\/ EXAMPLE:\n\/\/ from ts: |\n\/\/ until ts: |\n\/\/ cache: |---|---|---| | | | | |---|---|---|---|---|---|\n\/\/ chunks returned: |---| |---|---|---|\nfunc (mc *CCacheMetric) Search(ctx context.Context, metric schema.AMKey, res *CCSearchResult, from, until uint32) {\n\tmc.RLock()\n\tdefer mc.RUnlock()\n\n\tif len(mc.chunks) < 1 {\n\t\treturn\n\t}\n\n\tmc.searchForward(ctx, metric, from, until, res)\n\tif !res.Complete {\n\t\tmc.searchBackward(from, until, res)\n\t}\n\n\tif !res.Complete && res.From > res.Until {\n\t\tlog.Warn(\"CCacheMetric Search: Found from > until (%d\/%d), printing chunks\\n\", res.From, res.Until)\n\t\tlog.Warn(\"Bad res = %v\", *res)\n\t\tmc.debugMetric(from, until)\n\t\tres.Complete = false\n\t\tres.Start = res.Start[:0]\n\t\tres.End = res.End[:0]\n\t\tres.From = from\n\t\tres.Until = until\n\t}\n}\n\nfunc (mc *CCacheMetric) debugMetric(from, until uint32) {\n\tlog.Warn(\"CCacheMetric debugMetric: --- debugging metric between %d and %d ---\\n\", from, until)\n\tfor _, key := range mc.keys {\n\t\tif key >= from && key <= until {\n\t\t\tlog.Warn(\"CCacheMetric debugMetric: ts %d; prev %d; next %d\\n\", key, mc.chunks[key].Prev, mc.chunks[key].Next)\n\t\t}\n\t}\n\tlog.Warn(\"CCacheMetric debugMetric: ------------------------\\n\")\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"time\"\n)\n\nfunc main() {\n\tstart := time.Now()\n\tints := GetAll()\n\tfmt.Println(ints)\n\tfmt.Println(\"took\", time.Now().Sub(start))\n}\n\nfunc datastoreGet() int {\n\ttime.Sleep(time.Duration(rand.Intn(1000)) * time.Millisecond)\n\treturn rand.Int()\n}\n\nfunc GetAll() []int {\n\tints := make([]int, 10)\n\tfor i := 0; i < 10; i++ {\n\t\tints[i] = datastoreGet()\n\t}\n\treturn ints\n}\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n}\n<commit_msg>comment on what datastoreGet does<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"time\"\n)\n\nfunc main() {\n\tstart := time.Now()\n\tints := GetAll()\n\tfmt.Println(ints)\n\tfmt.Println(\"took\", time.Now().Sub(start))\n}\n\nfunc datastoreGet() int {\n\ttime.Sleep(time.Duration(rand.Intn(1000)) * time.Millisecond)\n\treturn rand.Int()\n}\n\nfunc GetAll() []int {\n\tints := make([]int, 10)\n\tfor i := 0; i < 10; i++ {\n\t\tints[i] = datastoreGet() \/\/ sleeps for <= 1sec, then returns a random int\n\t}\n\treturn ints\n}\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>updated get to requery query to be parsed later up the chain<commit_after><|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/evolsnow\/robot\/conn\"\n\t\"github.com\/go-telegram-bot-api\/telegram-bot-api\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\"\n)\n\nvar saidGoodBye = make(chan int, 1)\nvar userAction = make(map[string]Action) \/\/map[user]Action\nvar userTask = make(map[string]Task)\n\ntype Robot struct {\n\tbot *tgbotapi.BotAPI\n\tupdates <-chan tgbotapi.Update\n\tshutUp bool\n\t\/\/\tlanguage []string\n\tname string \/\/name from telegram\n\tnickName string \/\/user defined name\n}\n\ntype Action struct {\n\tActionName string\n\tActionStep int\n}\n\ntype Task struct {\n\tChatId int\n\tOwner string\n\tDesc string\n\tDuration time.Duration\n}\n\nfunc newRobot(token, nickName, webHook string) *Robot {\n\tvar rb = new(Robot)\n\tvar err error\n\trb.bot, err = tgbotapi.NewBotAPI(token)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\trb.name = rb.bot.Self.UserName\n\trb.nickName = nickName\n\tlog.Printf(\"%s: Authorized on account %s\", rb.nickName, rb.name)\n\t_, err = rb.bot.SetWebhook(tgbotapi.NewWebhook(webHook + rb.bot.Token))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\trb.updates, _ = rb.bot.ListenForWebhook(\"\/\" + rb.bot.Token)\n\treturn rb\n}\n\nfunc (rb *Robot) run() {\n\tif rb.nickName == \"samaritan\" {\n\t\tchatId := conn.GetMasterId()\n\t\tmsg := tgbotapi.NewMessage(chatId, \"samaritan is coming back!\")\n\t\tif _, err := rb.bot.Send(msg); err != nil {\n\t\t\tlog.Println(\"evolution failed\")\n\t\t}\n\t}\n\tfor update := range rb.updates {\n\t\tgo handlerUpdate(rb, update)\n\t}\n}\n\nfunc handlerUpdate(rb *Robot, update tgbotapi.Update) {\n\tdefer func() {\n\t\tif p := recover(); p != nil {\n\t\t\terr := fmt.Errorf(\"internal error: %v\", p)\n\t\t\tlog.Println(err)\n\t\t}\n\t}()\n\n\tuser := update.Message.Chat.UserName + \":\" + rb.nickName\n\ttext := update.Message.Text\n\tchatId := update.Message.Chat.ID\n\tvar endPoint, rawMsg string\n\tif action, ok := userAction[user]; ok { \/\/detect if user is in interaction mode\n\t\tswitch action.ActionName {\n\t\tcase \"setReminder\":\n\t\t\trawMsg = rb.SetReminder(update, action.ActionStep)\n\t\t}\n\t} else if string(text[0]) == \"\/\" {\n\t\treceived := strings.Split(text, \" \")\n\t\tendPoint = received[0]\n\t\tlog.Println(endPoint)\n\t\tswitch endPoint {\n\t\tcase \"\/start\":\n\t\t\trawMsg = rb.Start(update)\n\t\tcase \"\/help\":\n\t\t\trawMsg = rb.Help(update)\n\t\tcase \"\/trans\":\n\t\t\trawMsg = rb.Translate(update)\n\t\tcase \"\/alarm\":\n\t\t\ttmpAction := userAction[user]\n\t\t\ttmpAction.ActionName = \"setReminder\"\n\t\t\tuserAction[user] = tmpAction\n\t\t\trawMsg = rb.SetReminder(update, 0)\n\t\tcase \"\/evolve\":\n\t\t\trawMsg = \"upgrading...\"\n\t\t\tgo conn.SetMasterId(chatId)\n\t\t\tgo rb.Evolve(update)\n\t\tdefault:\n\t\t\trawMsg = \"unknow command, type \/help?\"\n\t\t}\n\t} else {\n\t\trawMsg = rb.Talk(update)\n\t}\n\tif rawMsg == \"\" {\n\t\treturn\n\t}\n\tmsg := tgbotapi.NewMessage(chatId, rawMsg)\n\tmsg.ParseMode = \"markdown\"\n\tlog.Println(rawMsg)\n\t_, err := rb.bot.Send(msg)\n\tif endPoint == \"\/evolve\" {\n\t\tsaidGoodBye <- 1\n\t}\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}\n\n\/\/\/\/parse \"\/help text msg\" to \"text msg\"\n\/\/func parseText(text string) string {\n\/\/\treturn strings.SplitAfterN(text, \" \", 2)[1]\n\/\/}\n\nfunc (rb *Robot) Start(update tgbotapi.Update) string {\n\tuser := update.Message.Chat.UserName\n\tgo conn.SetUserChatId(user+\":\"+rb.nickName, update.Message.Chat.ID)\n\treturn \"welcome: \" + user\n}\n\nfunc (rb *Robot) Help(update tgbotapi.Update) string {\n\thelpMsg := `\n\/alarm - set a reminder\n\/trans - translate words between english and chinese\n\/evolve\t- self evolution of samaritan\n\/help - show this help message\n`\n\treturn helpMsg\n}\n\nfunc (rb *Robot) Evolve(update tgbotapi.Update) {\n\tif update.Message.Chat.FirstName != \"Evol\" || update.Message.Chat.LastName != \"Gan\" {\n\t\trb.bot.Send(tgbotapi.NewMessage(update.Message.Chat.ID, \"sorry, unauthorized\"))\n\t\treturn\n\t}\n\t<-saidGoodBye\n\tclose(saidGoodBye)\n\tcmd := exec.Command(\"bash\", \"\/root\/evolve\")\n\tif err := cmd.Start(); err != nil {\n\t\tlog.Println(err.Error())\n\t}\n}\n\nfunc (rb *Robot) Translate(update tgbotapi.Update) string {\n\traw := strings.SplitAfterN(update.Message.Text, \" \", 2)\n\tinfo := \"\"\n\tif len(raw) < 2 {\n\t\treturn \"what do you want to translate, try '\/trans cat'?\"\n\t} else {\n\t\tinfo = \"翻译\" + raw[1]\n\t}\n\treturn qinAI(info)\n\n}\nfunc (rb *Robot) Talk(update tgbotapi.Update) string {\n\tinfo := update.Message.Text\n\tchinese := false\n\tif strings.Contains(info, rb.name) {\n\t\tif strings.Contains(info, \"闭嘴\") || strings.Contains(info, \"别说话\") {\n\t\t\trb.shutUp = true\n\t\t} else if rb.shutUp && strings.Contains(info, \"说话\") {\n\t\t\trb.shutUp = false\n\t\t\treturn fmt.Sprintf(\"%s终于可以说话啦\", rb.nickName)\n\t\t}\n\t\tinfo = strings.Replace(info, fmt.Sprintf(\"@%s\", rb.name), \"\", -1)\n\t}\n\n\tif rb.shutUp {\n\t\treturn \"\"\n\t}\n\tlog.Println(info)\n\t\/\/\tvar response string\n\tfor _, r := range info {\n\t\tif unicode.Is(unicode.Scripts[\"Han\"], r) {\n\t\t\tchinese = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif rb.nickName == \"samaritan\" {\n\t\tif chinese {\n\t\t\treturn tlAI(info)\n\t\t} else {\n\t\t\treturn mitAI(info)\n\t\t}\n\t} else { \/\/jarvis use another AI\n\t\treturn qinAI(info)\n\t}\n\t\/\/\treturn response\n}\n\nfunc (rb *Robot) SetReminder(update tgbotapi.Update, step int) string {\n\tuser := update.Message.Chat.UserName + \":\" + rb.nickName\n\tswitch step {\n\tcase 0:\n\t\t\/\/known issue of go, you can not just assign update.Message.Chat.ID to userTask[user].ChatId\n\t\ttmpTask := userTask[user]\n\t\ttmpTask.ChatId = update.Message.Chat.ID\n\t\ttmpTask.Owner = update.Message.Chat.UserName\n\t\tuserTask[user] = tmpTask\n\n\t\ttmpAction := userAction[user]\n\t\ttmpAction.ActionStep++\n\t\tuserAction[user] = tmpAction\n\t\treturn \"Ok, what should I remind you to do?\"\n\tcase 1:\n\t\t\/\/save thing\n\t\ttmpTask := userTask[user]\n\t\ttmpTask.Desc = update.Message.Text\n\t\tuserTask[user] = tmpTask\n\n\t\ttmpAction := userAction[user]\n\t\ttmpAction.ActionStep++\n\t\tuserAction[user] = tmpAction\n\t\treturn \"When or how much time after?\\n\" +\n\t\t\t\"You can type '*2\/14 11:30*' means 11:30 at 2\/14 \\n\" + \/\/first format\n\t\t\t\"Or type '*11:30*' means at 11:30 today\\n\" + \/\/second format\n\t\t\t\"Or type '*5m10s*' means 5 minutes 10 seconds later\" \/\/third format\n\tcase 2:\n\t\t\/\/save time duration\n\t\ttext := update.Message.Text\n\t\tfirstFormat := \"1\/02 15:04\"\n\t\tsecondFormat := \"15:04\"\n\t\tvar scheduledTime time.Time\n\t\tvar nowTime = time.Now()\n\t\tvar du time.Duration\n\t\tvar err error\n\t\tif strings.Contains(text, \":\") {\n\t\t\tscheduledTime, err = time.Parse(firstFormat, text)\n\t\t\tnowTime, _ = time.Parse(firstFormat, nowTime.Format(firstFormat))\n\t\t\tif err != nil { \/\/try to parse with first format\n\t\t\t\tscheduledTime, err = time.Parse(secondFormat, text)\n\t\t\t\tnowTime, _ = time.Parse(secondFormat, nowTime.Format(secondFormat))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"wrong format, try '2\/14 11:30' or '11:30'?\"\n\t\t\t\t}\n\t\t\t}\n\t\t\tdu = scheduledTime.Sub(nowTime)\n\t\t} else {\n\n\t\t\tdu, err = time.ParseDuration(text)\n\t\t\tif err != nil {\n\t\t\t\treturn \"wrong format, try '1h2m3s'?\"\n\t\t\t}\n\t\t}\n\t\ttmpTask := userTask[user]\n\t\ttmpTask.Duration = du\n\t\tuserTask[user] = tmpTask\n\t\tgo func(rb *Robot, ts Task) {\n\t\t\ttimer := time.NewTimer(du)\n\t\t\t<-timer.C\n\t\t\trawMsg := fmt.Sprintf(\"Hi %s, maybe it's time to:\\n*%s*\", ts.Owner, ts.Desc)\n\t\t\tmsg := tgbotapi.NewMessage(ts.ChatId, rawMsg)\n\t\t\tmsg.ParseMode = \"markdown\"\n\t\t\t_, err := rb.bot.Send(msg)\n\t\t\tif err != nil {\n\t\t\t\trb.bot.Send(tgbotapi.NewMessage(conn.GetUserChatId(ts.Owner), rawMsg))\n\t\t\t}\n\t\t\tdelete(userTask, user)\n\t\t}(rb, userTask[user])\n\n\t\tdelete(userAction, user)\n\t\treturn fmt.Sprintf(\"Ok, I will remind you to %s later\", userTask[user].Desc)\n\t}\n\treturn \"\"\n}\n\nfunc tlAI(info string) string {\n\tinfo = strings.Replace(info, \" \", \"\", -1)\n\tkey := \"a5052a22b8232be1e387ff153e823975\"\n\ttuLingURL := fmt.Sprintf(\"http:\/\/www.tuling123.com\/openapi\/api?key=%s&info=%s\", key, info)\n\tresp, err := http.Get(tuLingURL)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t}\n\tdefer resp.Body.Close()\n\treply := new(tlReply)\n\tdecoder := json.NewDecoder(resp.Body)\n\tdecoder.Decode(reply)\n\tlog.Printf(\"reply from tuling machine: %s\", reply.Text+\"\\n\"+reply.Url)\n\treturn reply.Text + \"\\n\" + reply.Url\n}\n\ntype tlReply struct {\n\tcode int `json:\"code\"`\n\tUrl string `json:\"url,omitempty\"`\n\tText string `json:\"text\"`\n}\n\n\/\/func simAI(info, lc string) string {\n\/\/\tinfo = strings.Replace(info, \" \", \"+\", -1)\n\/\/\tsimURL := fmt.Sprintf(\"http:\/\/www.simsimi.com\/requestChat?lc=%s&ft=1.0&req=%s&uid=58642449&did=0\", lc, info)\n\/\/\tresp, err := http.Get(simURL)\n\/\/\tif err != nil {\n\/\/\t\tlog.Println(err.Error())\n\/\/\t}\n\/\/\tdefer resp.Body.Close()\n\/\/\treply := new(simReply)\n\/\/\tdecoder := json.NewDecoder(resp.Body)\n\/\/\tdecoder.Decode(reply)\n\/\/\treturn strings.Replace(reply.Res.Msg, \"<br>\", \"\\n\", -1)\n\/\/}\n\/\/\n\/\/type simReply struct {\n\/\/\tresult int `json:\"code\"`\n\/\/\tRes res\n\/\/}\n\/\/type res struct {\n\/\/\tMsg string `json:\"msg\"`\n\/\/}\n\nfunc qinAI(info string) string {\n\tinfo = strings.Replace(info, \" \", \"+\", -1)\n\tqinURL := fmt.Sprintf(\"http:\/\/api.qingyunke.com\/api.php?key=free&appid=0&msg=%s\", info)\n\tresp, err := http.Get(qinURL)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t}\n\tdefer resp.Body.Close()\n\treply := new(qinReply)\n\tdecoder := json.NewDecoder(resp.Body)\n\tdecoder.Decode(reply)\n\tlog.Printf(\"reply from qingyunke machine: %s\", reply.Content)\n\tret := strings.Replace(reply.Content, \"{br}\", \"\\n\", -1)\n\treturn strings.Replace(ret, \"菲菲\", \"Jarvis\", -1)\n}\n\ntype qinReply struct {\n\tresult int `json:\"resulte\"`\n\tContent string `json:\"content\"`\n}\n\nfunc mitAI(info string) string {\n\tmitURL := \"http:\/\/fiddle.pandorabots.com\/pandora\/talk?botid=9fa364f2fe345a10&skin=demochat\"\n\tresp, err := http.PostForm(mitURL, url.Values{\"message\": {info}, \"botcust2\": {\"d064e07d6e067535\"}})\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tre, _ := regexp.Compile(\"Mitsuku:<\/B>(.*?)<br> <br>\")\n\tall := re.FindAll(body, -1)\n\tif len(all) == 0 {\n\t\treturn \"change another question?\"\n\t}\n\tfound := (string(all[0]))\n\tlog.Printf(\"reply from mitsuku machine: %s\", found)\n\tret := strings.Replace(found, `<P ALIGN=\"CENTER\"><img src=\"http:\/\/`, \"\", -1)\n\tret = strings.Replace(ret, `\"><\/img><\/P>`, \"\", -1)\n\tret = strings.Replace(ret[13:], \"<br>\", \"\\n\", -1)\n\tret = strings.Replace(ret, \"Mitsuku\", \"samaritan\", -1)\n\treturn ret\n}\n<commit_msg>concurrency safe<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/evolsnow\/robot\/conn\"\n\t\"github.com\/go-telegram-bot-api\/telegram-bot-api\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\"\n)\n\nvar saidGoodBye = make(chan int, 1)\nvar userAction = make(map[string]Action) \/\/map[user]Action\nvar userTask = make(map[string]Task)\n\ntype Robot struct {\n\tbot *tgbotapi.BotAPI\n\tupdates <-chan tgbotapi.Update\n\tshutUp bool\n\t\/\/\tlanguage []string\n\tname string \/\/name from telegram\n\tnickName string \/\/user defined name\n}\n\ntype Action struct {\n\tActionName string\n\tActionStep int\n}\n\ntype Task struct {\n\tChatId int\n\tOwner string\n\tDesc string\n\tDuration time.Duration\n}\n\nfunc newRobot(token, nickName, webHook string) *Robot {\n\tvar rb = new(Robot)\n\tvar err error\n\trb.bot, err = tgbotapi.NewBotAPI(token)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\trb.name = rb.bot.Self.UserName\n\trb.nickName = nickName\n\tlog.Printf(\"%s: Authorized on account %s\", rb.nickName, rb.name)\n\t_, err = rb.bot.SetWebhook(tgbotapi.NewWebhook(webHook + rb.bot.Token))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\trb.updates, _ = rb.bot.ListenForWebhook(\"\/\" + rb.bot.Token)\n\treturn rb\n}\n\nfunc (rb *Robot) run() {\n\tif rb.nickName == \"samaritan\" {\n\t\tchatId := conn.GetMasterId()\n\t\tmsg := tgbotapi.NewMessage(chatId, \"samaritan is coming back!\")\n\t\tif _, err := rb.bot.Send(msg); err != nil {\n\t\t\tlog.Println(\"evolution failed\")\n\t\t}\n\t}\n\tfor update := range rb.updates {\n\t\tgo handlerUpdate(rb, update)\n\t}\n}\n\nfunc handlerUpdate(rb *Robot, update tgbotapi.Update) {\n\tdefer func() {\n\t\tif p := recover(); p != nil {\n\t\t\terr := fmt.Errorf(\"internal error: %v\", p)\n\t\t\tlog.Println(err)\n\t\t}\n\t}()\n\n\tuser := update.Message.Chat.UserName + \":\" + rb.nickName\n\ttext := update.Message.Text\n\tchatId := update.Message.Chat.ID\n\tvar endPoint, rawMsg string\n\tif action, ok := userAction[user]; ok { \/\/detect if user is in interaction mode\n\t\tswitch action.ActionName {\n\t\tcase \"setReminder\":\n\t\t\trawMsg = rb.SetReminder(update, action.ActionStep)\n\t\t}\n\t} else if string(text[0]) == \"\/\" {\n\t\treceived := strings.Split(text, \" \")\n\t\tendPoint = received[0]\n\t\tlog.Println(endPoint)\n\t\tswitch endPoint {\n\t\tcase \"\/start\":\n\t\t\trawMsg = rb.Start(update)\n\t\tcase \"\/help\":\n\t\t\trawMsg = rb.Help(update)\n\t\tcase \"\/trans\":\n\t\t\trawMsg = rb.Translate(update)\n\t\tcase \"\/alarm\":\n\t\t\ttmpAction := userAction[user]\n\t\t\ttmpAction.ActionName = \"setReminder\"\n\t\t\tuserAction[user] = tmpAction\n\t\t\trawMsg = rb.SetReminder(update, 0)\n\t\tcase \"\/evolve\":\n\t\t\trawMsg = \"upgrading...\"\n\t\t\tgo conn.SetMasterId(chatId)\n\t\t\tgo rb.Evolve(update)\n\t\tdefault:\n\t\t\trawMsg = \"unknow command, type \/help?\"\n\t\t}\n\t} else {\n\t\trawMsg = rb.Talk(update)\n\t}\n\tif rawMsg == \"\" {\n\t\treturn\n\t}\n\tmsg := tgbotapi.NewMessage(chatId, rawMsg)\n\tmsg.ParseMode = \"markdown\"\n\tlog.Println(rawMsg)\n\t_, err := rb.bot.Send(msg)\n\tif endPoint == \"\/evolve\" {\n\t\tsaidGoodBye <- 1\n\t}\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}\n\n\/\/\/\/parse \"\/help text msg\" to \"text msg\"\n\/\/func parseText(text string) string {\n\/\/\treturn strings.SplitAfterN(text, \" \", 2)[1]\n\/\/}\n\nfunc (rb *Robot) Start(update tgbotapi.Update) string {\n\tuser := update.Message.Chat.UserName\n\tgo conn.SetUserChatId(user+\":\"+rb.nickName, update.Message.Chat.ID)\n\treturn \"welcome: \" + user\n}\n\nfunc (rb *Robot) Help(update tgbotapi.Update) string {\n\thelpMsg := `\n\/alarm - set a reminder\n\/trans - translate words between english and chinese\n\/evolve\t- self evolution of samaritan\n\/help - show this help message\n`\n\treturn helpMsg\n}\n\nfunc (rb *Robot) Evolve(update tgbotapi.Update) {\n\tif update.Message.Chat.FirstName != \"Evol\" || update.Message.Chat.LastName != \"Gan\" {\n\t\trb.bot.Send(tgbotapi.NewMessage(update.Message.Chat.ID, \"sorry, unauthorized\"))\n\t\treturn\n\t}\n\t<-saidGoodBye\n\tclose(saidGoodBye)\n\tcmd := exec.Command(\"bash\", \"\/root\/evolve\")\n\tif err := cmd.Start(); err != nil {\n\t\tlog.Println(err.Error())\n\t}\n}\n\nfunc (rb *Robot) Translate(update tgbotapi.Update) string {\n\traw := strings.SplitAfterN(update.Message.Text, \" \", 2)\n\tinfo := \"\"\n\tif len(raw) < 2 {\n\t\treturn \"what do you want to translate, try '\/trans cat'?\"\n\t} else {\n\t\tinfo = \"翻译\" + raw[1]\n\t}\n\treturn qinAI(info)\n\n}\nfunc (rb *Robot) Talk(update tgbotapi.Update) string {\n\tinfo := update.Message.Text\n\tchinese := false\n\tif strings.Contains(info, rb.name) {\n\t\tif strings.Contains(info, \"闭嘴\") || strings.Contains(info, \"别说话\") {\n\t\t\trb.shutUp = true\n\t\t} else if rb.shutUp && strings.Contains(info, \"说话\") {\n\t\t\trb.shutUp = false\n\t\t\treturn fmt.Sprintf(\"%s终于可以说话啦\", rb.nickName)\n\t\t}\n\t\tinfo = strings.Replace(info, fmt.Sprintf(\"@%s\", rb.name), \"\", -1)\n\t}\n\n\tif rb.shutUp {\n\t\treturn \"\"\n\t}\n\tlog.Println(info)\n\t\/\/\tvar response string\n\tfor _, r := range info {\n\t\tif unicode.Is(unicode.Scripts[\"Han\"], r) {\n\t\t\tchinese = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif rb.nickName == \"samaritan\" {\n\t\tif chinese {\n\t\t\treturn tlAI(info)\n\t\t} else {\n\t\t\treturn mitAI(info)\n\t\t}\n\t} else { \/\/jarvis use another AI\n\t\treturn qinAI(info)\n\t}\n\t\/\/\treturn response\n}\n\nfunc (rb *Robot) SetReminder(update tgbotapi.Update, step int) string {\n\tuser := update.Message.Chat.UserName + \":\" + rb.nickName\n\tswitch step {\n\tcase 0:\n\t\t\/\/known issue of go, you can not just assign update.Message.Chat.ID to userTask[user].ChatId\n\t\ttmpTask := userTask[user]\n\t\ttmpTask.ChatId = update.Message.Chat.ID\n\t\ttmpTask.Owner = update.Message.Chat.UserName\n\t\tuserTask[user] = tmpTask\n\n\t\ttmpAction := userAction[user]\n\t\ttmpAction.ActionStep++\n\t\tuserAction[user] = tmpAction\n\t\treturn \"Ok, what should I remind you to do?\"\n\tcase 1:\n\t\t\/\/save thing\n\t\ttmpTask := userTask[user]\n\t\ttmpTask.Desc = update.Message.Text\n\t\tuserTask[user] = tmpTask\n\n\t\ttmpAction := userAction[user]\n\t\ttmpAction.ActionStep++\n\t\tuserAction[user] = tmpAction\n\t\treturn \"When or how much time after?\\n\" +\n\t\t\t\"You can type '*2\/14 11:30*' means 11:30 at 2\/14 \\n\" + \/\/first format\n\t\t\t\"Or type '*11:30*' means at 11:30 today\\n\" + \/\/second format\n\t\t\t\"Or type '*5m10s*' means 5 minutes 10 seconds later\" \/\/third format\n\tcase 2:\n\t\t\/\/save time duration\n\t\ttext := update.Message.Text\n\t\tfirstFormat := \"1\/02 15:04\"\n\t\tsecondFormat := \"15:04\"\n\t\tvar scheduledTime time.Time\n\t\tvar nowTime = time.Now()\n\t\tvar du time.Duration\n\t\tvar err error\n\t\tif strings.Contains(text, \":\") {\n\t\t\tscheduledTime, err = time.Parse(firstFormat, text)\n\t\t\tnowTime, _ = time.Parse(firstFormat, nowTime.Format(firstFormat))\n\t\t\tif err != nil { \/\/try to parse with first format\n\t\t\t\tscheduledTime, err = time.Parse(secondFormat, text)\n\t\t\t\tnowTime, _ = time.Parse(secondFormat, nowTime.Format(secondFormat))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn \"wrong format, try '2\/14 11:30' or '11:30'?\"\n\t\t\t\t}\n\t\t\t}\n\t\t\tdu = scheduledTime.Sub(nowTime)\n\t\t} else {\n\n\t\t\tdu, err = time.ParseDuration(text)\n\t\t\tif err != nil {\n\t\t\t\treturn \"wrong format, try '1h2m3s'?\"\n\t\t\t}\n\t\t}\n\t\ttmpTask := userTask[user]\n\t\ttmpTask.Duration = du\n\t\tuserTask[user] = tmpTask\n\t\tgo func(rb *Robot, ts Task) {\n\t\t\ttimer := time.NewTimer(du)\n\t\t\towner := ts.Owner\n\t\t\trawMsg := fmt.Sprintf(\"Hi %s, maybe it's time to:\\n*%s*\", owner, ts.Desc)\n\t\t\tmsg := tgbotapi.NewMessage(ts.ChatId, rawMsg)\n\t\t\tmsg.ParseMode = \"markdown\"\n\t\t\t<-timer.C\n\t\t\t_, err := rb.bot.Send(msg)\n\t\t\tif err != nil {\n\t\t\t\trb.bot.Send(tgbotapi.NewMessage(conn.GetUserChatId(owner), rawMsg))\n\t\t\t}\n\t\t\tdelete(userTask, user)\n\t\t}(rb, userTask[user])\n\n\t\tdelete(userAction, user)\n\t\treturn fmt.Sprintf(\"Ok, I will remind you to %s later\", userTask[user].Desc)\n\t}\n\treturn \"\"\n}\n\nfunc tlAI(info string) string {\n\tinfo = strings.Replace(info, \" \", \"\", -1)\n\tkey := \"a5052a22b8232be1e387ff153e823975\"\n\ttuLingURL := fmt.Sprintf(\"http:\/\/www.tuling123.com\/openapi\/api?key=%s&info=%s\", key, info)\n\tresp, err := http.Get(tuLingURL)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t}\n\tdefer resp.Body.Close()\n\treply := new(tlReply)\n\tdecoder := json.NewDecoder(resp.Body)\n\tdecoder.Decode(reply)\n\tlog.Printf(\"reply from tuling machine: %s\", reply.Text+\"\\n\"+reply.Url)\n\treturn reply.Text + \"\\n\" + reply.Url\n}\n\ntype tlReply struct {\n\tcode int `json:\"code\"`\n\tUrl string `json:\"url,omitempty\"`\n\tText string `json:\"text\"`\n}\n\n\/\/func simAI(info, lc string) string {\n\/\/\tinfo = strings.Replace(info, \" \", \"+\", -1)\n\/\/\tsimURL := fmt.Sprintf(\"http:\/\/www.simsimi.com\/requestChat?lc=%s&ft=1.0&req=%s&uid=58642449&did=0\", lc, info)\n\/\/\tresp, err := http.Get(simURL)\n\/\/\tif err != nil {\n\/\/\t\tlog.Println(err.Error())\n\/\/\t}\n\/\/\tdefer resp.Body.Close()\n\/\/\treply := new(simReply)\n\/\/\tdecoder := json.NewDecoder(resp.Body)\n\/\/\tdecoder.Decode(reply)\n\/\/\treturn strings.Replace(reply.Res.Msg, \"<br>\", \"\\n\", -1)\n\/\/}\n\/\/\n\/\/type simReply struct {\n\/\/\tresult int `json:\"code\"`\n\/\/\tRes res\n\/\/}\n\/\/type res struct {\n\/\/\tMsg string `json:\"msg\"`\n\/\/}\n\nfunc qinAI(info string) string {\n\tinfo = strings.Replace(info, \" \", \"+\", -1)\n\tqinURL := fmt.Sprintf(\"http:\/\/api.qingyunke.com\/api.php?key=free&appid=0&msg=%s\", info)\n\tresp, err := http.Get(qinURL)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t}\n\tdefer resp.Body.Close()\n\treply := new(qinReply)\n\tdecoder := json.NewDecoder(resp.Body)\n\tdecoder.Decode(reply)\n\tlog.Printf(\"reply from qingyunke machine: %s\", reply.Content)\n\tret := strings.Replace(reply.Content, \"{br}\", \"\\n\", -1)\n\treturn strings.Replace(ret, \"菲菲\", \"Jarvis\", -1)\n}\n\ntype qinReply struct {\n\tresult int `json:\"resulte\"`\n\tContent string `json:\"content\"`\n}\n\nfunc mitAI(info string) string {\n\tmitURL := \"http:\/\/fiddle.pandorabots.com\/pandora\/talk?botid=9fa364f2fe345a10&skin=demochat\"\n\tresp, err := http.PostForm(mitURL, url.Values{\"message\": {info}, \"botcust2\": {\"d064e07d6e067535\"}})\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tre, _ := regexp.Compile(\"Mitsuku:<\/B>(.*?)<br> <br>\")\n\tall := re.FindAll(body, -1)\n\tif len(all) == 0 {\n\t\treturn \"change another question?\"\n\t}\n\tfound := (string(all[0]))\n\tlog.Printf(\"reply from mitsuku machine: %s\", found)\n\tret := strings.Replace(found, `<P ALIGN=\"CENTER\"><img src=\"http:\/\/`, \"\", -1)\n\tret = strings.Replace(ret, `\"><\/img><\/P>`, \"\", -1)\n\tret = strings.Replace(ret[13:], \"<br>\", \"\\n\", -1)\n\tret = strings.Replace(ret, \"Mitsuku\", \"samaritan\", -1)\n\treturn ret\n}\n<|endoftext|>"} {"text":"<commit_before>package ircbot\n\nimport (\n\t\"bufio\"\n\t\"crypto\/rand\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/textproto\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype IrcBot struct {\n\t\/\/ identity\n\tUser string\n\tNick string\n\n\t\/\/ server info\n\tServer string\n\tPort string\n\tChannel []string\n\n\t\/\/ tcp communication\n\tconn net.Conn\n\treader *textproto.Reader\n\twriter *textproto.Writer\n\n\t\/\/ crypto\n\tEncrypted bool\n\tconfig tls.Config\n\n\t\/\/ data flow\n\tIn chan *IrcMsg\n\tOut chan *IrcMsg\n\tError chan error\n\n\t\/\/ exit flag\n\tExit chan bool\n\n\t\/\/action handlers\n\tHandlers map[string]ActionFunc\n\n\t\/\/are we joined in channel?\n\tjoined bool\n}\n\nfunc NewIrcBot() *IrcBot {\n\treturn &IrcBot{\n\t\tHandlers: make(map[string]ActionFunc),\n\t\tIn: make(chan *IrcMsg),\n\t\tOut: make(chan *IrcMsg),\n\t\tError: make(chan error),\n\t\tExit: make(chan bool),\n\t\tjoined: false,\n\t}\n}\n\nfunc (b *IrcBot) url() string {\n\treturn fmt.Sprintf(\"%s:%s\", b.Server, b.Port)\n}\n\nfunc (b *IrcBot) loadCert() {\n\n}\n\nfunc (b *IrcBot) Connect() {\n\t\/\/launch a go routine that handle errors\n\t\/\/ b.handleError()\n\n\tlog.Println(\"Info> connection to\", b.url())\n\n\tvar tcpCon net.Conn\n\tvar err error\n\tif b.Encrypted {\n\t\tcert, err := tls.LoadX509KeyPair(\"cert.pem\", \"key.pem\")\n\t\tb.errChk(err)\n\n\t\tconfig := tls.Config{Certificates: []tls.Certificate{cert}}\n\t\tconfig.Rand = rand.Reader\n\t\ttcpCon, err = tls.Dial(\"tcp\", b.url(), &config)\n\t\tb.errChk(err)\n\n\t} else {\n\t\ttcpCon, err = net.Dial(\"tcp\", b.url())\n\t\tb.errChk(err)\n\t}\n\n\tb.conn = tcpCon\n\tr := bufio.NewReader(b.conn)\n\tw := bufio.NewWriter(b.conn)\n\tb.reader = textproto.NewReader(r)\n\tb.writer = textproto.NewWriter(w)\n\n\tb.writer.PrintfLine(\"USER %s 8 * :%s\", b.Nick, b.Nick)\n\tb.writer.PrintfLine(\"NICK %s\", b.Nick)\n}\n\nfunc (b *IrcBot) Join() {\n\n\t\/\/prevent to send JOIN command before we are conected\n\tfor {\n\t\tif !b.joined {\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\n\tfor _, v := range b.Channel {\n\t\ts := fmt.Sprintf(\"JOIN %s\", v)\n\t\tfmt.Println(\"irc >> \", s)\n\t\tb.writer.PrintfLine(s)\n\t}\n\tb.joined = true\n}\n\nfunc (b *IrcBot) Listen() {\n\n\tgo func() {\n\n\t\tfor {\n\t\t\t\/\/block read line from socket\n\t\t\tline, err := b.reader.ReadLine()\n\t\t\tif err != nil {\n\t\t\t\tb.Error <- err\n\t\t\t}\n\t\t\t\/\/convert line into IrcMsg\n\t\t\tmsg := Parseline(line)\n\t\t\tb.In <- msg\n\t\t}\n\n\t}()\n}\n\nfunc (b *IrcBot) Say(s string) {\n\tmsg := NewIrcMsg()\n\tmsg.command = \"PRIVMSG\"\n\tmsg.args = append(msg.args, s)\n\n\tb.Out <- msg\n}\n\nfunc (b *IrcBot) HandleActionIn() {\n\tgo func() {\n\t\tfor {\n\t\t\t\/\/receive new message\n\t\t\tmsg := <-b.In\n\t\t\tfmt.Println(\"irc << \", msg.raw)\n\t\t\t\/\/handle action\n\t\t\tif action := b.Handlers[msg.command]; action != nil {\n\t\t\t\taction(b, msg)\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (b *IrcBot) HandleActionOut() {\n\tgo func() {\n\t\tfor {\n\t\t\tmsg := <-b.Out\n\n\t\t\t\/\/we send nothing before we sure we join channel\n\t\t\tif b.joined == false {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ts := fmt.Sprintf(\"%s %s\", msg.command, strings.Join(msg.args, \" \"))\n\t\t\tfmt.Println(\"irc >> \", s)\n\t\t\tb.writer.PrintfLine(s)\n\t\t}\n\t}()\n}\n\nfunc (b *IrcBot) HandleError() {\n\tgo func() {\n\t\tfor {\n\t\t\terr := <-b.Error\n\t\t\tfmt.Printf(\"error > %s\", err)\n\t\t\tif err != nil {\n\t\t\t\tb.Disconnect()\n\t\t\t\tlog.Fatalln(\"Error ocurs :\", err)\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (b *IrcBot) Disconnect() {\n\tb.writer.PrintfLine(\"QUIT\")\n\tb.conn.Close()\n}\n<commit_msg>utils function<commit_after>package ircbot\n\nimport (\n\t\"bufio\"\n\t\"crypto\/rand\"\n\t\"crypto\/tls\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net\/textproto\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype IrcBot struct {\n\t\/\/ identity\n\tUser string\n\tNick string\n\n\t\/\/ server info\n\tServer string\n\tPort string\n\tChannel []string\n\n\t\/\/ tcp communication\n\tconn net.Conn\n\treader *textproto.Reader\n\twriter *textproto.Writer\n\n\t\/\/ crypto\n\tEncrypted bool\n\tconfig tls.Config\n\n\t\/\/ data flow\n\tIn chan *IrcMsg\n\tOut chan *IrcMsg\n\tError chan error\n\n\t\/\/ exit flag\n\tExit chan bool\n\n\t\/\/action handlers\n\tHandlers map[string]ActionFunc\n\n\t\/\/are we joined in channel?\n\tjoined bool\n}\n\nfunc NewIrcBot() *IrcBot {\n\treturn &IrcBot{\n\t\tHandlers: make(map[string]ActionFunc),\n\t\tIn: make(chan *IrcMsg),\n\t\tOut: make(chan *IrcMsg),\n\t\tError: make(chan error),\n\t\tExit: make(chan bool),\n\t\tjoined: false,\n\t}\n}\n\nfunc (b *IrcBot) url() string {\n\treturn fmt.Sprintf(\"%s:%s\", b.Server, b.Port)\n}\n\nfunc (b *IrcBot) loadCert() {\n\n}\n\nfunc (b *IrcBot) Connect() {\n\t\/\/launch a go routine that handle errors\n\t\/\/ b.handleError()\n\n\tlog.Println(\"Info> connection to\", b.url())\n\n\tvar tcpCon net.Conn\n\tvar err error\n\tif b.Encrypted {\n\t\tcert, err := tls.LoadX509KeyPair(\"cert.pem\", \"key.pem\")\n\t\tb.errChk(err)\n\n\t\tconfig := tls.Config{Certificates: []tls.Certificate{cert}}\n\t\tconfig.Rand = rand.Reader\n\t\ttcpCon, err = tls.Dial(\"tcp\", b.url(), &config)\n\t\tb.errChk(err)\n\n\t} else {\n\t\ttcpCon, err = net.Dial(\"tcp\", b.url())\n\t\tb.errChk(err)\n\t}\n\n\tb.conn = tcpCon\n\tr := bufio.NewReader(b.conn)\n\tw := bufio.NewWriter(b.conn)\n\tb.reader = textproto.NewReader(r)\n\tb.writer = textproto.NewWriter(w)\n\n\tb.writer.PrintfLine(\"USER %s 8 * :%s\", b.Nick, b.Nick)\n\tb.writer.PrintfLine(\"NICK %s\", b.Nick)\n}\n\nfunc (b *IrcBot) Join() {\n\n\t\/\/prevent to send JOIN command before we are conected\n\tfor {\n\t\tif !b.joined {\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\n\tfor _, v := range b.Channel {\n\t\ts := fmt.Sprintf(\"JOIN %s\", v)\n\t\tfmt.Println(\"irc >> \", s)\n\t\tb.writer.PrintfLine(s)\n\t}\n\tb.joined = true\n}\n\nfunc (b *IrcBot) Listen() {\n\n\tgo func() {\n\n\t\tfor {\n\t\t\t\/\/block read line from socket\n\t\t\tline, err := b.reader.ReadLine()\n\t\t\tif err != nil {\n\t\t\t\tb.Error <- err\n\t\t\t}\n\t\t\t\/\/convert line into IrcMsg\n\t\t\tmsg := Parseline(line)\n\t\t\tb.In <- msg\n\t\t}\n\n\t}()\n}\n\nfunc (b *IrcBot) Say(s string) {\n\tmsg := NewIrcMsg()\n\tmsg.command = \"PRIVMSG\"\n\tmsg.args = append(msg.args, s)\n\n\tb.Out <- msg\n}\n\nfunc (b *IrcBot) HandleActionIn() {\n\tgo func() {\n\t\tfor {\n\t\t\t\/\/receive new message\n\t\t\tmsg := <-b.In\n\t\t\tfmt.Println(\"irc << \", msg.raw)\n\t\t\t\/\/handle action\n\t\t\tif action := b.Handlers[msg.command]; action != nil {\n\t\t\t\taction(b, msg)\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (b *IrcBot) HandleActionOut() {\n\tgo func() {\n\t\tfor {\n\t\t\tmsg := <-b.Out\n\n\t\t\t\/\/we send nothing before we sure we join channel\n\t\t\tif b.joined == false {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\ts := fmt.Sprintf(\"%s %s\", msg.command, strings.Join(msg.args, \" \"))\n\t\t\tfmt.Println(\"irc >> \", s)\n\t\t\tb.writer.PrintfLine(s)\n\t\t}\n\t}()\n}\n\nfunc (b *IrcBot) HandleError() {\n\tgo func() {\n\t\tfor {\n\t\t\terr := <-b.Error\n\t\t\tfmt.Printf(\"error > %s\", err)\n\t\t\tif err != nil {\n\t\t\t\tb.Disconnect()\n\t\t\t\tlog.Fatalln(\"Error ocurs :\", err)\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (b *IrcBot) errChk(err error) {\n\tif err != nil {\n\t\tlog.Println(\"Error> \", err)\n\t\tb.Error <- err\n\t}\n}\n\nfunc (b *IrcBot) Disconnect() {\n\tb.writer.PrintfLine(\"QUIT\")\n\tb.conn.Close()\n}\n\nfunc (b *IrcBot) String() string {\n\ts := fmt.Sprintf(\"server: %s\\n\", b.Server)\n\ts += fmt.Sprintf(\"port: %s\\n\", b.Port)\n\ts += fmt.Sprintf(\"ssl: %t\\n\", b.Encrypted)\n\n\tif len(b.Channel) > 0 {\n\t\ts += \"channels: \"\n\t\tfor _, v := range b.Channel {\n\t\t\ts += fmt.Sprintf(\"%s \", v)\n\t\t}\n\t}\n\n\treturn s\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Brief: Bot logic\n\/\/ Primary responsibility: Top level logic layer for bot\n\npackage main\n\nimport (\n\t\"log\"\n\n\tbmul \"github.com\/julwrites\/BotMultiplexer\"\n)\n\nfunc HelpMessage(env *bmul.SessionData) {\n\tenv.Res.Message = \"Hello %s! Give me a Bible reference and I'll give you the passage!\" \/\/\\nHere are some other things I can do:\\n\/tms - Get a card from the Navigators' Topical Memory System\\n\/version - Choose your preferred Bible version\\n\/dailydevo - Get reading material right now\\n\/subscribe - Subscribe to \/ Unsubscribe from daily reading material\\n\/search - Search for a passage, lexicon entry, word or phrase\\n\"\n}\n\nfunc RunCommands(env *bmul.SessionData) {\n\tswitch env.Msg.Command {\n\tdefault:\n\t\tGetBiblePassage(env)\n\t}\n}\n\nfunc HandleBotLogic(env *bmul.SessionData) {\n\tRunCommands(env)\n\n\tlog.Printf(\"Commands run, resulting message: %s\", env.Res.Message)\n\tif len(env.Res.Message) == 0 {\n\t\tlog.Printf(\"This message was not handled by bot\")\n\t\tHelpMessage(env)\n\t}\n}\n<commit_msg>Formatting help message for personalization<commit_after>\/\/ Brief: Bot logic\n\/\/ Primary responsibility: Top level logic layer for bot\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\n\tbmul \"github.com\/julwrites\/BotMultiplexer\"\n)\n\nfunc HelpMessage(env *bmul.SessionData) {\n\tenv.Res.Message = fmt.Sprintf(\"Hello %s! Give me a Bible reference and I'll give you the passage!\", env.User.Firstname)\n\t\/\/\\nHere are some other things I can do:\\n\/tms - Get a card from the Navigators' Topical Memory System\\n\/version - Choose your preferred Bible version\\n\/dailydevo - Get reading material right now\\n\/subscribe - Subscribe to \/ Unsubscribe from daily reading material\\n\/search - Search for a passage, lexicon entry, word or phrase\\n\"\n}\n\nfunc RunCommands(env *bmul.SessionData) {\n\tswitch env.Msg.Command {\n\tdefault:\n\t\tGetBiblePassage(env)\n\t}\n}\n\nfunc HandleBotLogic(env *bmul.SessionData) {\n\tRunCommands(env)\n\n\tlog.Printf(\"Commands run, resulting message: %s\", env.Res.Message)\n\tif len(env.Res.Message) == 0 {\n\t\tlog.Printf(\"This message was not handled by bot\")\n\t\tHelpMessage(env)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package mentionbot\n\nimport (\n\t\"github.com\/kurrik\/oauth1a\"\n\t\"github.com\/kurrik\/twittergo\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n)\n\n\/\/ Bot type\ntype Bot struct {\n\tclient *twittergo.Client\n}\n\n\/\/ NewBot returns new bot\nfunc NewBot(consumerKey string, consumerSecret string) *Bot {\n\tclientConfig := &oauth1a.ClientConfig{\n\t\tConsumerKey: consumerKey,\n\t\tConsumerSecret: consumerSecret,\n\t}\n\tclient := twittergo.NewClient(clientConfig, nil)\n\treturn &Bot{\n\t\tclient: client,\n\t}\n}\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n}\n\n\/\/ FollowersIDs returns follower's IDs\nfunc (bot *Bot) FollowersIDs(userID string) ([]string, error) {\n\tvar (\n\t\tids []string\n\t\tcursor string\n\t)\n\tfor {\n\t\tquery := url.Values{}\n\t\tquery.Set(\"user_id\", userID)\n\t\tquery.Set(\"stringify_ids\", \"true\")\n\t\tquery.Set(\"count\", \"5000\")\n\t\tif cursor != \"\" {\n\t\t\tquery.Set(\"cursor\", cursor)\n\t\t}\n\t\treq, err := http.NewRequest(\"GET\", \"\/1.1\/followers\/ids.json?\"+query.Encode(), nil)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tres, err := bot.client.SendRequest(req)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults := &CursoredIDs{}\n\t\tif err := res.Parse(results); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tids = append(ids, results.IDs()...)\n\n\t\tif results.NextCursorStr() == \"0\" {\n\t\t\tbreak\n\t\t} else {\n\t\t\tcursor = results.NextCursorStr()\n\t\t}\n\t}\n\n\tfor i := len(ids) - 1; i >= 0; i-- {\n\t\tj := rand.Intn(i + 1)\n\t\tids[i], ids[j] = ids[j], ids[i]\n\t}\n\treturn ids, nil\n}\n<commit_msg>add UsersLookup method<commit_after>package mentionbot\n\nimport (\n\t\"github.com\/kurrik\/oauth1a\"\n\t\"github.com\/kurrik\/twittergo\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ Bot type\ntype Bot struct {\n\tclient *twittergo.Client\n}\n\n\/\/ NewBot returns new bot\nfunc NewBot(consumerKey string, consumerSecret string) *Bot {\n\tclientConfig := &oauth1a.ClientConfig{\n\t\tConsumerKey: consumerKey,\n\t\tConsumerSecret: consumerSecret,\n\t}\n\tclient := twittergo.NewClient(clientConfig, nil)\n\treturn &Bot{\n\t\tclient: client,\n\t}\n}\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n}\n\n\/\/ UsersLookup returns list of users info\nfunc (bot *Bot) UsersLookup(ids []string) error {\n\tquery := url.Values{}\n\tquery.Set(\"user_id\", strings.Join(ids, \",\"))\n\treq, err := http.NewRequest(\"GET\", \"\/1.1\/users\/lookup.json?\"+query.Encode(), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tres, err := bot.client.SendRequest(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlog.Println(res.ReadBody())\n\treturn nil\n}\n\n\/\/ FollowersIDs returns follower's IDs\nfunc (bot *Bot) FollowersIDs(userID string) ([]string, error) {\n\tvar (\n\t\tids []string\n\t\tcursor string\n\t)\n\tfor {\n\t\tquery := url.Values{}\n\t\tquery.Set(\"user_id\", userID)\n\t\tquery.Set(\"stringify_ids\", \"true\")\n\t\tquery.Set(\"count\", \"5000\")\n\t\tif cursor != \"\" {\n\t\t\tquery.Set(\"cursor\", cursor)\n\t\t}\n\t\treq, err := http.NewRequest(\"GET\", \"\/1.1\/followers\/ids.json?\"+query.Encode(), nil)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tres, err := bot.client.SendRequest(req)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresults := &CursoredIDs{}\n\t\tif err := res.Parse(results); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tids = append(ids, results.IDs()...)\n\n\t\tif results.NextCursorStr() == \"0\" {\n\t\t\tbreak\n\t\t} else {\n\t\t\tcursor = results.NextCursorStr()\n\t\t}\n\t}\n\n\tfor i := len(ids) - 1; i >= 0; i-- {\n\t\tj := rand.Intn(i + 1)\n\t\tids[i], ids[j] = ids[j], ids[i]\n\t}\n\treturn ids, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/hacsoc\/slacksoc\/api\"\n)\n\nconst (\n\tTOKEN_VAR = \"SLACKSOC_TOKEN\"\n\tNO_TOKEN_ERROR = \"You must have the SLACKSOC_TOKEN variable to run the\" +\n\t\t\t\t\t \" slacksoc bot\"\n\tVERSION = \"0.0.5\"\n)\n\ntype Bot struct {\n\tToken string\n\tChannels map[string]string\n\tWebSocketURL string\n}\n\nfunc NewBot(token string) *Bot {\n\treturn &Bot{Token: token}\n}\n\nfunc (bot *Bot) Call(method string, data url.Values) (*http.Response, error) {\n\tdata.Set(\"token\", bot.Token)\n\treturn api.Call(method, data)\n}\n\nfunc (bot *Bot) Start() error {\n\tpayload, err := httpToJSON(bot.Call(\"rtm.start\", url.Values{}))\n\tif err != nil {\n\t\treturn err\n\t}\n\tok, present := payload[\"ok\"].(bool)\n\tif !present || ok != true {\n\t\treturn &SlacksocError{\"could not connect to RTM API\"}\n\t}\n\tbot.GetChannelInfo()\n\twebsocketURL, _ := payload[\"url\"].(string)\n\tbot.WebSocketURL = websocketURL\n\treturn nil\n}\n\nfunc (bot *Bot) Loop() error {\n\tdialer := websocket.Dialer{}\n\tconn, _, err := dialer.Dial(bot.WebSocketURL, http.Header{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor {\n\t\tmessageType, bytes, err := conn.ReadMessage()\n\t\tif err != nil {\n\t\t\t\/\/ NextReader returns an error if the connection is closed\n\t\t\tconn.Close()\n\t\t\treturn nil\n\t\t}\n\t\tif messageType == websocket.BinaryMessage {\n\t\t\tcontinue \/\/ ignore binary messages\n\t\t}\n\t\tvar message map[string]interface{}\n\t\tif err = json.Unmarshal(bytes, &message); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Println(message)\n\t\tif _, ok := message[\"type\"]; !ok {\n\t\t\tcontinue\n\t\t}\n\t\tswitch message[\"type\"].(string) {\n\t\tcase \"message\":\n\t\t\tbot.ReceiveMessage(conn, message)\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc (bot *Bot) ReceiveMessage(conn *websocket.Conn, message map[string]interface{}) {\n\tsubtype, _ := message[\"subtype\"]\n\thiddenSubtype, ok := message[\"hidden\"]\n\thidden := ok && hiddenSubtype.(bool)\n\treply := bot.ConstructReply(message, subtype, hidden)\n\tif reply != nil {\n\t\tconn.WriteJSON(reply)\n\t}\n}\n\nfunc (bot *Bot) ConstructReply(message map[string]interface{}, subtype interface{}, hidden bool) interface{} {\n\tif subtype != nil {\n\t\tswitch subtype.(string) {\n\t\tcase \"bot_message\":\n\t\t\t\/\/ don't reply to other bots\n\t\t\treturn nil\n\t\tcase \"channel_join\":\n\t\t\treturn bot.SetRealNameFields(message)\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\ttext := message[\"text\"].(string)\n\t\tif strings.Contains(text, \"hi slacksoc\") {\n\t\t\treturn Mention(message[\"user\"].(string), message[\"channel\"].(string), \"hi \", \"\")\n\t\t} else if text == \"slacksoc: pm me\" {\n\t\t\treturn bot.DirectMessage(message[\"user\"].(string), \"hi\")\n\t\t} else if text == \"have you tried installing Gentoo?\" {\n\t\t\tgo bot.React(message, \"funroll-loops\")\n\t\t\treturn nil\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc (bot *Bot) React(message map[string]interface{}, reaction string) {\n\tchannel := message[\"channel\"].(string)\n\ttimestamp := message[\"ts\"].(string)\n\tparameters := url.Values{}\n\tparameters.Set(\"channel\", channel)\n\tparameters.Set(\"timestamp\", timestamp)\n\tparameters.Set(\"name\", reaction)\n\tbot.Call(\"reactions.add\", parameters)\n}\n\nfunc (bot *Bot) SetRealNameFields(message map[string]interface{}) interface{} {\n\tchannel := message[\"channel\"].(string)\n\tif channel != bot.Channels[\"general\"] {\n\t\treturn nil\n\t}\n\tuserID := message[\"user\"].(string)\n\tdmChan := make(chan string)\n\tuserChan := make(chan interface{})\n\tgo func() {\n\t\tdm, _ := bot.OpenDirectMessage(userID)\n\t\tdmChan <- dm\n\t}()\n\tgo func() {\n\t\tresp, err := bot.Call(\"users.info\", url.Values{\"user\": []string{userID}})\n\t\tpayload, err := httpToJSON(resp, err)\n\t\tuserChan <- payload\n\t}()\n\tpayload := (<- userChan).(map[string]interface{})\n\tsuccess := payload[\"ok\"].(bool)\n\tif !success {\n\t\tfmt.Println(payload)\n\t\treturn nil\n\t}\n\tuser := payload[\"user\"].(map[string]interface{})\n\tnick := user[\"name\"].(string)\n\ttext := \"Please set your real name fields. https:\/\/hacsoc.slack.com\/team\/%s.\"\n\ttext += \" Then click \\\"Edit\\\".\"\n\ttext = fmt.Sprintf(text, nick)\n\tdm := <- dmChan\n\treturn NewMessage(text, dm).ToMap()\n}\n\nfunc main() {\n\ttoken := os.Getenv(TOKEN_VAR)\n\tif token == \"\" {\n\t\tfmt.Println(NO_TOKEN_ERROR)\n\t\tos.Exit(1)\n\t}\n\n\tbot := NewBot(token)\n\tfmt.Println(\"Starting bot\")\n\tif err := bot.Start(); err != nil {\n\t\tfmt.Println(err)\n\t}\n\tfmt.Println(\"Looping\")\n\tif err := bot.Loop(); err != nil {\n\t\tfmt.Println(err)\n\t}\n}\n<commit_msg>bot just listens for gentoo now, and bump patch number<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/hacsoc\/slacksoc\/api\"\n)\n\nconst (\n\tTOKEN_VAR = \"SLACKSOC_TOKEN\"\n\tNO_TOKEN_ERROR = \"You must have the SLACKSOC_TOKEN variable to run the\" +\n\t\t\t\t\t \" slacksoc bot\"\n\tVERSION = \"0.0.6\"\n)\n\ntype Bot struct {\n\tToken string\n\tChannels map[string]string\n\tWebSocketURL string\n}\n\nfunc NewBot(token string) *Bot {\n\treturn &Bot{Token: token}\n}\n\nfunc (bot *Bot) Call(method string, data url.Values) (*http.Response, error) {\n\tdata.Set(\"token\", bot.Token)\n\treturn api.Call(method, data)\n}\n\nfunc (bot *Bot) Start() error {\n\tpayload, err := httpToJSON(bot.Call(\"rtm.start\", url.Values{}))\n\tif err != nil {\n\t\treturn err\n\t}\n\tok, present := payload[\"ok\"].(bool)\n\tif !present || ok != true {\n\t\treturn &SlacksocError{\"could not connect to RTM API\"}\n\t}\n\tbot.GetChannelInfo()\n\twebsocketURL, _ := payload[\"url\"].(string)\n\tbot.WebSocketURL = websocketURL\n\treturn nil\n}\n\nfunc (bot *Bot) Loop() error {\n\tdialer := websocket.Dialer{}\n\tconn, _, err := dialer.Dial(bot.WebSocketURL, http.Header{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor {\n\t\tmessageType, bytes, err := conn.ReadMessage()\n\t\tif err != nil {\n\t\t\t\/\/ NextReader returns an error if the connection is closed\n\t\t\tconn.Close()\n\t\t\treturn nil\n\t\t}\n\t\tif messageType == websocket.BinaryMessage {\n\t\t\tcontinue \/\/ ignore binary messages\n\t\t}\n\t\tvar message map[string]interface{}\n\t\tif err = json.Unmarshal(bytes, &message); err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Println(message)\n\t\tif _, ok := message[\"type\"]; !ok {\n\t\t\tcontinue\n\t\t}\n\t\tswitch message[\"type\"].(string) {\n\t\tcase \"message\":\n\t\t\tbot.ReceiveMessage(conn, message)\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc (bot *Bot) ReceiveMessage(conn *websocket.Conn, message map[string]interface{}) {\n\tsubtype, _ := message[\"subtype\"]\n\thiddenSubtype, ok := message[\"hidden\"]\n\thidden := ok && hiddenSubtype.(bool)\n\treply := bot.ConstructReply(message, subtype, hidden)\n\tif reply != nil {\n\t\tconn.WriteJSON(reply)\n\t}\n}\n\nfunc (bot *Bot) ConstructReply(message map[string]interface{}, subtype interface{}, hidden bool) interface{} {\n\tif subtype != nil {\n\t\tswitch subtype.(string) {\n\t\tcase \"bot_message\":\n\t\t\t\/\/ don't reply to other bots\n\t\t\treturn nil\n\t\tcase \"channel_join\":\n\t\t\treturn bot.SetRealNameFields(message)\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\ttext := message[\"text\"].(string)\n\t\tif strings.Contains(text, \"hi slacksoc\") {\n\t\t\treturn Mention(message[\"user\"].(string), message[\"channel\"].(string), \"hi \", \"\")\n\t\t} else if text == \"slacksoc: pm me\" {\n\t\t\treturn bot.DirectMessage(message[\"user\"].(string), \"hi\")\n\t\t} else if strings.Contains(text, \"gentoo\") || strings.Contains(text, \"Gentoo\") {\n\t\t\tgo bot.React(message, \"funroll-loops\")\n\t\t\treturn nil\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc (bot *Bot) React(message map[string]interface{}, reaction string) {\n\tchannel := message[\"channel\"].(string)\n\ttimestamp := message[\"ts\"].(string)\n\tparameters := url.Values{}\n\tparameters.Set(\"channel\", channel)\n\tparameters.Set(\"timestamp\", timestamp)\n\tparameters.Set(\"name\", reaction)\n\tbot.Call(\"reactions.add\", parameters)\n}\n\nfunc (bot *Bot) SetRealNameFields(message map[string]interface{}) interface{} {\n\tchannel := message[\"channel\"].(string)\n\tif channel != bot.Channels[\"general\"] {\n\t\treturn nil\n\t}\n\tuserID := message[\"user\"].(string)\n\tdmChan := make(chan string)\n\tuserChan := make(chan interface{})\n\tgo func() {\n\t\tdm, _ := bot.OpenDirectMessage(userID)\n\t\tdmChan <- dm\n\t}()\n\tgo func() {\n\t\tresp, err := bot.Call(\"users.info\", url.Values{\"user\": []string{userID}})\n\t\tpayload, err := httpToJSON(resp, err)\n\t\tuserChan <- payload\n\t}()\n\tpayload := (<- userChan).(map[string]interface{})\n\tsuccess := payload[\"ok\"].(bool)\n\tif !success {\n\t\tfmt.Println(payload)\n\t\treturn nil\n\t}\n\tuser := payload[\"user\"].(map[string]interface{})\n\tnick := user[\"name\"].(string)\n\ttext := \"Please set your real name fields. https:\/\/hacsoc.slack.com\/team\/%s.\"\n\ttext += \" Then click \\\"Edit\\\".\"\n\ttext = fmt.Sprintf(text, nick)\n\tdm := <- dmChan\n\treturn NewMessage(text, dm).ToMap()\n}\n\nfunc main() {\n\ttoken := os.Getenv(TOKEN_VAR)\n\tif token == \"\" {\n\t\tfmt.Println(NO_TOKEN_ERROR)\n\t\tos.Exit(1)\n\t}\n\n\tbot := NewBot(token)\n\tfmt.Println(\"Starting bot\")\n\tif err := bot.Start(); err != nil {\n\t\tfmt.Println(err)\n\t}\n\tfmt.Println(\"Looping\")\n\tif err := bot.Loop(); err != nil {\n\t\tfmt.Println(err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package ccg\n\n\/\/go:generate myccg -uses AstDecls,AstDecls.Filter -package ccg -output utils.go slice ast.Decl AstDecls\n\/\/go:generate myccg -uses AstSpecs,AstSpecs.Filter -package ccg -output utils.go slice ast.Spec AstSpecs\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/format\"\n\t\"go\/token\"\n\t\"io\"\n\n\t\"golang.org\/x\/tools\/go\/loader\"\n\t\"golang.org\/x\/tools\/go\/types\"\n\t\"golang.org\/x\/tools\/imports\"\n)\n\nvar (\n\tpt = fmt.Printf\n)\n\ntype Config struct {\n\tFrom string\n\tParams map[string]string\n\tRenames map[string]string\n\tWriter io.Writer\n\tPackage string\n\tDecls []ast.Decl\n\tNameFilters []func(string) bool\n\tFileSet *token.FileSet\n}\n\nfunc Copy(config Config) error {\n\t\/\/ load package\n\tloadConf := loader.Config{\n\t\tFset: config.FileSet,\n\t}\n\tloadConf.Import(config.From)\n\tprogram, err := loadConf.Load()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"ccg: load package %v\", err)\n\t}\n\tinfo := program.Imported[config.From]\n\n\t\/\/ remove param declarations\n\tfor _, f := range info.Files {\n\t\tf.Decls = AstDecls(f.Decls).Filter(func(decl ast.Decl) (ret bool) {\n\t\t\tif decl, ok := decl.(*ast.GenDecl); !ok {\n\t\t\t\treturn true\n\t\t\t} else {\n\t\t\t\tif decl.Tok == token.TYPE {\n\t\t\t\t\tdecl.Specs = AstSpecs(decl.Specs).Filter(func(spec ast.Spec) bool {\n\t\t\t\t\t\tname := spec.(*ast.TypeSpec).Name.Name\n\t\t\t\t\t\t_, exists := config.Params[name]\n\t\t\t\t\t\treturn !exists\n\t\t\t\t\t})\n\t\t\t\t\tret = len(decl.Specs) > 0\n\t\t\t\t} else if decl.Tok == token.VAR {\n\t\t\t\t\tdecl.Specs = AstSpecs(decl.Specs).Filter(func(sp ast.Spec) bool {\n\t\t\t\t\t\tspec := sp.(*ast.ValueSpec)\n\t\t\t\t\t\tnames := []*ast.Ident{}\n\t\t\t\t\t\tvalues := []ast.Expr{}\n\t\t\t\t\t\tfor i, name := range spec.Names {\n\t\t\t\t\t\t\tif _, exists := config.Params[name.Name]; !exists {\n\t\t\t\t\t\t\t\tnames = append(names, name)\n\t\t\t\t\t\t\t\tif i < len(spec.Values) {\n\t\t\t\t\t\t\t\t\tvalues = append(values, spec.Values[i])\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tspec.Names = names\n\t\t\t\t\t\tif len(values) == 0 {\n\t\t\t\t\t\t\tspec.Values = nil\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tspec.Values = values\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn len(spec.Names) > 0\n\t\t\t\t\t})\n\t\t\t\t\tret = len(decl.Specs) > 0\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn\n\t\t})\n\t}\n\n\t\/\/ collect objects to rename\n\tobjects := make(map[types.Object]string)\n\tcollectObjects := func(mapping map[string]string) error {\n\t\tfor from, to := range mapping {\n\t\t\tobj := info.Pkg.Scope().Lookup(from)\n\t\t\tif obj == nil {\n\t\t\t\treturn fmt.Errorf(\"ccg: name not found %s\", from)\n\t\t\t}\n\t\t\tobjects[obj] = to\n\t\t}\n\t\treturn nil\n\t}\n\tif err := collectObjects(config.Params); err != nil {\n\t\treturn err\n\t}\n\tif err := collectObjects(config.Renames); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ rename\n\trename := func(defs map[*ast.Ident]types.Object) {\n\t\tfor id, obj := range defs {\n\t\t\tif to, ok := objects[obj]; ok {\n\t\t\t\tid.Name = to\n\t\t\t}\n\t\t}\n\t}\n\trename(info.Defs)\n\trename(info.Uses)\n\n\t\/\/ collect existing decls\n\texistingVars := make(map[string]func(expr ast.Expr))\n\texistingTypes := make(map[string]func(expr ast.Expr))\n\texistingFuncs := make(map[string]func(fn *ast.FuncDecl))\n\tfor i, decl := range config.Decls {\n\t\tswitch decl := decl.(type) {\n\t\tcase *ast.GenDecl:\n\t\t\tswitch decl.Tok {\n\t\t\tcase token.VAR:\n\t\t\t\tfor _, spec := range decl.Specs {\n\t\t\t\t\tspec := spec.(*ast.ValueSpec)\n\t\t\t\t\tfor i, name := range spec.Names {\n\t\t\t\t\t\ti := i\n\t\t\t\t\t\tspec := spec\n\t\t\t\t\t\texistingVars[name.Name] = func(expr ast.Expr) {\n\t\t\t\t\t\t\tspec.Values[i] = expr\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase token.TYPE:\n\t\t\t\tfor i, spec := range decl.Specs {\n\t\t\t\t\tspec := spec.(*ast.TypeSpec)\n\t\t\t\t\ti := i\n\t\t\t\t\tdecl := decl\n\t\t\t\t\texistingTypes[spec.Name.Name] = func(expr ast.Expr) {\n\t\t\t\t\t\tdecl.Specs[i].(*ast.TypeSpec).Type = expr\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase *ast.FuncDecl:\n\t\t\tname := decl.Name.Name\n\t\t\tif decl.Recv != nil {\n\t\t\t\tname = decl.Recv.List[0].Type.(*ast.Ident).Name + \".\" + name\n\t\t\t}\n\t\t\ti := i\n\t\t\texistingFuncs[name] = func(fndecl *ast.FuncDecl) {\n\t\t\t\tconfig.Decls[i] = fndecl\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ collect output declarations\n\tdecls := []ast.Decl{}\n\tfor _, f := range info.Files {\n\tloopSpec:\n\t\tfor _, decl := range f.Decls {\n\t\t\tswitch decl := decl.(type) {\n\t\t\tcase *ast.GenDecl:\n\t\t\t\tswitch decl.Tok {\n\t\t\t\t\/\/ var\n\t\t\t\tcase token.VAR:\n\t\t\t\t\tnewDecl := &ast.GenDecl{\n\t\t\t\t\t\tTok: token.VAR,\n\t\t\t\t\t}\n\t\t\t\t\tfor _, spec := range decl.Specs {\n\t\t\t\t\t\tspec := spec.(*ast.ValueSpec)\n\t\t\t\t\tloopVarName:\n\t\t\t\t\t\tfor i, name := range spec.Names {\n\t\t\t\t\t\t\tfor _, filter := range config.NameFilters {\n\t\t\t\t\t\t\t\tif !filter(name.Name) {\n\t\t\t\t\t\t\t\t\tcontinue loopVarName\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif mutator, ok := existingVars[name.Name]; ok {\n\t\t\t\t\t\t\t\tmutator(spec.Values[i])\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tnewDecl.Specs = append(newDecl.Specs, spec)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif len(newDecl.Specs) > 0 {\n\t\t\t\t\t\tdecls = append(decls, newDecl)\n\t\t\t\t\t}\n\t\t\t\t\/\/ type\n\t\t\t\tcase token.TYPE:\n\t\t\t\t\tnewDecl := &ast.GenDecl{\n\t\t\t\t\t\tTok: token.TYPE,\n\t\t\t\t\t}\n\t\t\t\tloopTypeSpec:\n\t\t\t\t\tfor _, spec := range decl.Specs {\n\t\t\t\t\t\tspec := spec.(*ast.TypeSpec)\n\t\t\t\t\t\tname := spec.Name.Name\n\t\t\t\t\t\tfor _, filter := range config.NameFilters {\n\t\t\t\t\t\t\tif !filter(name) {\n\t\t\t\t\t\t\t\tcontinue loopTypeSpec\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif mutator, ok := existingTypes[name]; ok {\n\t\t\t\t\t\t\tmutator(spec.Type)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tnewDecl.Specs = append(newDecl.Specs, spec)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif len(newDecl.Specs) > 0 {\n\t\t\t\t\t\tdecls = append(decls, newDecl)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\/\/ func\n\t\t\tcase *ast.FuncDecl:\n\t\t\t\tname := decl.Name.Name\n\t\t\t\tfor _, filter := range config.NameFilters {\n\t\t\t\t\tif !filter(name) {\n\t\t\t\t\t\tcontinue loopSpec\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif decl.Recv != nil {\n\t\t\t\t\tname = decl.Recv.List[0].Type.(*ast.Ident).Name + \".\" + name\n\t\t\t\t}\n\t\t\t\tif mutator, ok := existingFuncs[name]; ok {\n\t\t\t\t\tmutator(decl)\n\t\t\t\t} else {\n\t\t\t\t\tconfig.Decls = append(config.Decls, decl)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tdecls = append(decls, config.Decls...)\n\n\tvar importDecls, newDecls []ast.Decl\n\tfor _, decl := range decls {\n\t\t\/\/ ensure linebreak between decls\n\t\tswitch decl := decl.(type) {\n\t\tcase *ast.FuncDecl:\n\t\t\tif decl.Doc == nil {\n\t\t\t\tdecl.Doc = new(ast.CommentGroup)\n\t\t\t}\n\t\tcase *ast.GenDecl:\n\t\t\tif decl.Doc == nil {\n\t\t\t\tdecl.Doc = new(ast.CommentGroup)\n\t\t\t}\n\t\t}\n\t\t\/\/ move import decls to beginning\n\t\tif decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT {\n\t\t\timportDecls = append(importDecls, decl)\n\t\t\tcontinue\n\t\t}\n\t\tnewDecls = append(newDecls, decl)\n\t}\n\tdecls = append(importDecls, newDecls...)\n\n\t\/\/ output\n\tif config.Writer != nil {\n\t\tif config.Package != \"\" { \/\/ output complete file\n\t\t\tfile := &ast.File{\n\t\t\t\tName: ast.NewIdent(config.Package),\n\t\t\t\tDecls: decls,\n\t\t\t}\n\t\t\tbuf := new(bytes.Buffer)\n\t\t\terr = format.Node(buf, program.Fset, file)\n\t\t\tif err != nil { \/\/NOCOVER\n\t\t\t\treturn fmt.Errorf(\"ccg: format output %v\", err)\n\t\t\t}\n\t\t\tbs, err := imports.Process(\"\", buf.Bytes(), nil)\n\t\t\tif err != nil { \/\/NOCOVER\n\t\t\t\treturn fmt.Errorf(\"ccg: format output %v\", err)\n\t\t\t}\n\t\t\tconfig.Writer.Write(bs)\n\t\t} else { \/\/ output decls only\n\t\t\terr = format.Node(config.Writer, program.Fset, decls)\n\t\t\tif err != nil { \/\/NOCOVER\n\t\t\t\treturn fmt.Errorf(\"ccg: format output %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>deal with import and const decls correctly<commit_after>package ccg\n\n\/\/go:generate myccg -uses AstDecls,AstDecls.Filter -package ccg -output utils.go slice ast.Decl AstDecls\n\/\/go:generate myccg -uses AstSpecs,AstSpecs.Filter -package ccg -output utils.go slice ast.Spec AstSpecs\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/format\"\n\t\"go\/token\"\n\t\"io\"\n\n\t\"golang.org\/x\/tools\/go\/loader\"\n\t\"golang.org\/x\/tools\/go\/types\"\n\t\"golang.org\/x\/tools\/imports\"\n)\n\nvar (\n\tpt = fmt.Printf\n)\n\ntype Config struct {\n\tFrom string\n\tParams map[string]string\n\tRenames map[string]string\n\tWriter io.Writer\n\tPackage string\n\tDecls []ast.Decl\n\tNameFilters []func(string) bool\n\tFileSet *token.FileSet\n}\n\nfunc Copy(config Config) error {\n\t\/\/ load package\n\tloadConf := loader.Config{\n\t\tFset: config.FileSet,\n\t}\n\tloadConf.Import(config.From)\n\tprogram, err := loadConf.Load()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"ccg: load package %v\", err)\n\t}\n\tinfo := program.Imported[config.From]\n\n\t\/\/ remove param declarations\n\tfor _, f := range info.Files {\n\t\tf.Decls = AstDecls(f.Decls).Filter(func(decl ast.Decl) (ret bool) {\n\t\t\tswitch decl := decl.(type) {\n\t\t\tcase *ast.GenDecl:\n\t\t\t\tswitch decl.Tok {\n\t\t\t\tcase token.TYPE:\n\t\t\t\t\tdecl.Specs = AstSpecs(decl.Specs).Filter(func(spec ast.Spec) bool {\n\t\t\t\t\t\tname := spec.(*ast.TypeSpec).Name.Name\n\t\t\t\t\t\t_, exists := config.Params[name]\n\t\t\t\t\t\treturn !exists\n\t\t\t\t\t})\n\t\t\t\t\tret = len(decl.Specs) > 0\n\t\t\t\tcase token.VAR:\n\t\t\t\t\tdecl.Specs = AstSpecs(decl.Specs).Filter(func(sp ast.Spec) bool {\n\t\t\t\t\t\tspec := sp.(*ast.ValueSpec)\n\t\t\t\t\t\tnames := []*ast.Ident{}\n\t\t\t\t\t\tvalues := []ast.Expr{}\n\t\t\t\t\t\tfor i, name := range spec.Names {\n\t\t\t\t\t\t\tif _, exists := config.Params[name.Name]; !exists {\n\t\t\t\t\t\t\t\tnames = append(names, name)\n\t\t\t\t\t\t\t\tif i < len(spec.Values) {\n\t\t\t\t\t\t\t\t\tvalues = append(values, spec.Values[i])\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tspec.Names = names\n\t\t\t\t\t\tif len(values) == 0 {\n\t\t\t\t\t\t\tspec.Values = nil\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tspec.Values = values\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn len(spec.Names) > 0\n\t\t\t\t\t})\n\t\t\t\t\tret = len(decl.Specs) > 0\n\t\t\t\tdefault:\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn\n\t\t})\n\t}\n\n\t\/\/ collect objects to rename\n\tobjects := make(map[types.Object]string)\n\tcollectObjects := func(mapping map[string]string) error {\n\t\tfor from, to := range mapping {\n\t\t\tobj := info.Pkg.Scope().Lookup(from)\n\t\t\tif obj == nil {\n\t\t\t\treturn fmt.Errorf(\"ccg: name not found %s\", from)\n\t\t\t}\n\t\t\tobjects[obj] = to\n\t\t}\n\t\treturn nil\n\t}\n\tif err := collectObjects(config.Params); err != nil {\n\t\treturn err\n\t}\n\tif err := collectObjects(config.Renames); err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ rename\n\trename := func(defs map[*ast.Ident]types.Object) {\n\t\tfor id, obj := range defs {\n\t\t\tif to, ok := objects[obj]; ok {\n\t\t\t\tid.Name = to\n\t\t\t}\n\t\t}\n\t}\n\trename(info.Defs)\n\trename(info.Uses)\n\n\t\/\/ collect existing decls\n\texistingVars := make(map[string]func(expr ast.Expr))\n\texistingTypes := make(map[string]func(expr ast.Expr))\n\texistingFuncs := make(map[string]func(fn *ast.FuncDecl))\n\tfor i, decl := range config.Decls {\n\t\tswitch decl := decl.(type) {\n\t\tcase *ast.GenDecl:\n\t\t\tswitch decl.Tok {\n\t\t\tcase token.VAR:\n\t\t\t\tfor _, spec := range decl.Specs {\n\t\t\t\t\tspec := spec.(*ast.ValueSpec)\n\t\t\t\t\tfor i, name := range spec.Names {\n\t\t\t\t\t\ti := i\n\t\t\t\t\t\tspec := spec\n\t\t\t\t\t\texistingVars[name.Name] = func(expr ast.Expr) {\n\t\t\t\t\t\t\tspec.Values[i] = expr\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase token.TYPE:\n\t\t\t\tfor i, spec := range decl.Specs {\n\t\t\t\t\tspec := spec.(*ast.TypeSpec)\n\t\t\t\t\ti := i\n\t\t\t\t\tdecl := decl\n\t\t\t\t\texistingTypes[spec.Name.Name] = func(expr ast.Expr) {\n\t\t\t\t\t\tdecl.Specs[i].(*ast.TypeSpec).Type = expr\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\tcase *ast.FuncDecl:\n\t\t\tname := decl.Name.Name\n\t\t\tif decl.Recv != nil {\n\t\t\t\tname = decl.Recv.List[0].Type.(*ast.Ident).Name + \".\" + name\n\t\t\t}\n\t\t\ti := i\n\t\t\texistingFuncs[name] = func(fndecl *ast.FuncDecl) {\n\t\t\t\tconfig.Decls[i] = fndecl\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ collect output declarations\n\tdecls := []ast.Decl{}\n\tfor _, f := range info.Files {\n\tloopSpec:\n\t\tfor _, decl := range f.Decls {\n\t\t\tswitch decl := decl.(type) {\n\t\t\tcase *ast.GenDecl:\n\t\t\t\tswitch decl.Tok {\n\t\t\t\t\/\/ var\n\t\t\t\tcase token.VAR:\n\t\t\t\t\tnewDecl := &ast.GenDecl{\n\t\t\t\t\t\tTok: token.VAR,\n\t\t\t\t\t}\n\t\t\t\t\tfor _, spec := range decl.Specs {\n\t\t\t\t\t\tspec := spec.(*ast.ValueSpec)\n\t\t\t\t\tloopVarName:\n\t\t\t\t\t\tfor i, name := range spec.Names {\n\t\t\t\t\t\t\tfor _, filter := range config.NameFilters {\n\t\t\t\t\t\t\t\tif !filter(name.Name) {\n\t\t\t\t\t\t\t\t\tcontinue loopVarName\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif mutator, ok := existingVars[name.Name]; ok {\n\t\t\t\t\t\t\t\tmutator(spec.Values[i])\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tnewDecl.Specs = append(newDecl.Specs, spec)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif len(newDecl.Specs) > 0 {\n\t\t\t\t\t\tdecls = append(decls, newDecl)\n\t\t\t\t\t}\n\t\t\t\t\/\/ type\n\t\t\t\tcase token.TYPE:\n\t\t\t\t\tnewDecl := &ast.GenDecl{\n\t\t\t\t\t\tTok: token.TYPE,\n\t\t\t\t\t}\n\t\t\t\tloopTypeSpec:\n\t\t\t\t\tfor _, spec := range decl.Specs {\n\t\t\t\t\t\tspec := spec.(*ast.TypeSpec)\n\t\t\t\t\t\tname := spec.Name.Name\n\t\t\t\t\t\tfor _, filter := range config.NameFilters {\n\t\t\t\t\t\t\tif !filter(name) {\n\t\t\t\t\t\t\t\tcontinue loopTypeSpec\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif mutator, ok := existingTypes[name]; ok {\n\t\t\t\t\t\t\tmutator(spec.Type)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tnewDecl.Specs = append(newDecl.Specs, spec)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif len(newDecl.Specs) > 0 {\n\t\t\t\t\t\tdecls = append(decls, newDecl)\n\t\t\t\t\t}\n\t\t\t\t\/\/ import or const\n\t\t\t\tdefault:\n\t\t\t\t\tdecls = append(decls, decl)\n\t\t\t\t}\n\t\t\t\/\/ func\n\t\t\tcase *ast.FuncDecl:\n\t\t\t\tname := decl.Name.Name\n\t\t\t\tfor _, filter := range config.NameFilters {\n\t\t\t\t\tif !filter(name) {\n\t\t\t\t\t\tcontinue loopSpec\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif decl.Recv != nil {\n\t\t\t\t\tname = decl.Recv.List[0].Type.(*ast.Ident).Name + \".\" + name\n\t\t\t\t}\n\t\t\t\tif mutator, ok := existingFuncs[name]; ok {\n\t\t\t\t\tmutator(decl)\n\t\t\t\t} else {\n\t\t\t\t\tconfig.Decls = append(config.Decls, decl)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tdecls = append(decls, config.Decls...)\n\n\tvar importDecls, newDecls []ast.Decl\n\tfor _, decl := range decls {\n\t\t\/\/ ensure linebreak between decls\n\t\tswitch decl := decl.(type) {\n\t\tcase *ast.FuncDecl:\n\t\t\tif decl.Doc == nil {\n\t\t\t\tdecl.Doc = new(ast.CommentGroup)\n\t\t\t}\n\t\tcase *ast.GenDecl:\n\t\t\tif decl.Doc == nil {\n\t\t\t\tdecl.Doc = new(ast.CommentGroup)\n\t\t\t}\n\t\t}\n\t\t\/\/ move import decls to beginning\n\t\tif decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT {\n\t\t\timportDecls = append(importDecls, decl)\n\t\t\tcontinue\n\t\t}\n\t\tnewDecls = append(newDecls, decl)\n\t}\n\tdecls = append(importDecls, newDecls...)\n\n\t\/\/ output\n\tif config.Writer != nil {\n\t\tif config.Package != \"\" { \/\/ output complete file\n\t\t\tfile := &ast.File{\n\t\t\t\tName: ast.NewIdent(config.Package),\n\t\t\t\tDecls: decls,\n\t\t\t}\n\t\t\tbuf := new(bytes.Buffer)\n\t\t\terr = format.Node(buf, program.Fset, file)\n\t\t\tif err != nil { \/\/NOCOVER\n\t\t\t\treturn fmt.Errorf(\"ccg: format output %v\", err)\n\t\t\t}\n\t\t\tbs, err := imports.Process(\"\", buf.Bytes(), nil)\n\t\t\tif err != nil { \/\/NOCOVER\n\t\t\t\treturn fmt.Errorf(\"ccg: format output %v\", err)\n\t\t\t}\n\t\t\tconfig.Writer.Write(bs)\n\t\t} else { \/\/ output decls only\n\t\t\terr = format.Node(config.Writer, program.Fset, decls)\n\t\t\tif err != nil { \/\/NOCOVER\n\t\t\t\treturn fmt.Errorf(\"ccg: format output %v\", err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\t\"github.com\/naoty\/table\/table\"\n\t\"github.com\/naoty\/table\/writers\"\n)\n\n\/\/ Exit codes represent exit codes for particular situations.\nconst (\n\tExitCodeOK = 0\n\n\tExitCodeError = 10 + iota\n)\n\n\/\/ Format option values\nconst (\n\tFormatOptionASCII = \"ascii\"\n\tFormatOptionMarkdown = \"markdown\"\n\tFormatOptionConfluence = \"confluence\"\n)\n\n\/\/ CLI represents the CLI for this application.\ntype CLI struct {\n\tinStream io.Reader\n\toutStream, errStream io.Writer\n}\n\n\/\/ Run runs commands with given args.\nfunc (cli *CLI) Run(args []string) int {\n\tvar writer writers.Writer = writers.ASCIIWriter{}\n\tshouldShowHeader := false\n\n\tfor i, arg := range args {\n\t\tswitch arg {\n\t\tcase \"--format\", \"-f\":\n\t\t\tif i < len(args)-1 {\n\t\t\t\tswitch args[i+1] {\n\t\t\t\tcase FormatOptionASCII:\n\t\t\t\t\twriter = writers.ASCIIWriter{}\n\t\t\t\tcase FormatOptionMarkdown:\n\t\t\t\t\twriter = writers.MarkdownWriter{}\n\t\t\t\tcase FormatOptionConfluence:\n\t\t\t\t\twriter = writers.ConfluenceWriter{}\n\t\t\t\tdefault:\n\t\t\t\t\twriter = writers.ASCIIWriter{}\n\t\t\t\t}\n\t\t\t}\n\t\tcase \"--header\", \"-H\":\n\t\t\tshouldShowHeader = true\n\t\tcase \"--help\", \"-h\":\n\t\t\tfmt.Fprintln(cli.outStream, cli.Help())\n\t\t\treturn ExitCodeOK\n\t\tcase \"--version\", \"-v\":\n\t\t\tfmt.Fprintln(cli.outStream, Version)\n\t\t\treturn ExitCodeOK\n\t\t}\n\t}\n\n\ttable := table.NewTable()\n\tscanner := bufio.NewScanner(cli.inStream)\n\tfor i := 0; scanner.Scan(); i++ {\n\t\tif shouldShowHeader && i == 0 {\n\t\t\ttable.AppendHeader(scanner.Text())\n\t\t} else {\n\t\t\ttable.AppendRow(scanner.Text())\n\t\t}\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\tfmt.Fprintln(cli.errStream, err)\n\t\treturn ExitCodeError\n\t}\n\n\tfmt.Fprintf(cli.outStream, \"%v\", writer.Write(table))\n\treturn ExitCodeOK\n}\n\n\/\/ Help returns help messages.\nfunc (cli *CLI) Help() string {\n\tindent := strings.Repeat(\" \", 2)\n\n\tlines := []string{}\n\tlines = append(lines, \"Usage:\")\n\tlines = append(lines, fmt.Sprintf(\"%stable [--header | -H] [--format | -f (ascii|markdown|confluence)]\", indent))\n\tlines = append(lines, fmt.Sprintf(\"%stable --help | -h\", indent))\n\tlines = append(lines, fmt.Sprintf(\"%stable --version | -v\", indent))\n\tlines = append(lines, \"\")\n\tlines = append(lines, \"Options:\")\n\tlines = append(lines, fmt.Sprintf(\"%s--header, -H: Show header\", indent))\n\tlines = append(lines, fmt.Sprintf(\"%s--format, -f [ascii|markdown|confluence]: Set format (default: ascii)\", indent))\n\tlines = append(lines, fmt.Sprintf(\"%s--help, -h: Show version number\", indent))\n\tlines = append(lines, fmt.Sprintf(\"%s--version, -v: Show version number\", indent))\n\n\treturn strings.Join(lines, \"\\n\")\n}\n<commit_msg>Parse format option<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\t\"github.com\/naoty\/table\/table\"\n\t\"github.com\/naoty\/table\/writers\"\n)\n\n\/\/ Exit codes represent exit codes for particular situations.\nconst (\n\tExitCodeOK = 0\n\n\tExitCodeError = 10 + iota\n)\n\n\/\/ Format option values\nconst (\n\tFormatOptionASCII = \"ascii\"\n\tFormatOptionMarkdown = \"markdown\"\n\tFormatOptionConfluence = \"confluence\"\n)\n\n\/\/ CLI represents the CLI for this application.\ntype CLI struct {\n\tinStream io.Reader\n\toutStream, errStream io.Writer\n}\n\n\/\/ Run runs commands with given args.\nfunc (cli *CLI) Run(args []string) int {\n\tvar writer writers.Writer = writers.ASCIIWriter{}\n\tshouldShowHeader := false\n\n\tfor i, arg := range args {\n\t\tswitch arg {\n\t\tcase \"--format\", \"-f\":\n\t\t\tif i == len(args)-1 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t_, writerName := parseFormat(arg)\n\n\t\t\tswitch writerName {\n\t\t\tcase FormatOptionASCII:\n\t\t\t\twriter = writers.ASCIIWriter{}\n\t\t\tcase FormatOptionMarkdown:\n\t\t\t\twriter = writers.MarkdownWriter{}\n\t\t\tcase FormatOptionConfluence:\n\t\t\t\twriter = writers.ConfluenceWriter{}\n\t\t\tdefault:\n\t\t\t\twriter = writers.ASCIIWriter{}\n\t\t\t}\n\t\tcase \"--header\", \"-H\":\n\t\t\tshouldShowHeader = true\n\t\tcase \"--help\", \"-h\":\n\t\t\tfmt.Fprintln(cli.outStream, cli.Help())\n\t\t\treturn ExitCodeOK\n\t\tcase \"--version\", \"-v\":\n\t\t\tfmt.Fprintln(cli.outStream, Version)\n\t\t\treturn ExitCodeOK\n\t\t}\n\t}\n\n\ttable := table.NewTable()\n\tscanner := bufio.NewScanner(cli.inStream)\n\tfor i := 0; scanner.Scan(); i++ {\n\t\tif shouldShowHeader && i == 0 {\n\t\t\ttable.AppendHeader(scanner.Text())\n\t\t} else {\n\t\t\ttable.AppendRow(scanner.Text())\n\t\t}\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\tfmt.Fprintln(cli.errStream, err)\n\t\treturn ExitCodeError\n\t}\n\n\tfmt.Fprintf(cli.outStream, \"%v\", writer.Write(table))\n\treturn ExitCodeOK\n}\n\n\/\/ Help returns help messages.\nfunc (cli *CLI) Help() string {\n\tindent := strings.Repeat(\" \", 2)\n\n\tlines := []string{}\n\tlines = append(lines, \"Usage:\")\n\tlines = append(lines, fmt.Sprintf(\"%stable [--header | -H] [--format | -f (ascii|markdown|confluence)]\", indent))\n\tlines = append(lines, fmt.Sprintf(\"%stable --help | -h\", indent))\n\tlines = append(lines, fmt.Sprintf(\"%stable --version | -v\", indent))\n\tlines = append(lines, \"\")\n\tlines = append(lines, \"Options:\")\n\tlines = append(lines, fmt.Sprintf(\"%s--header, -H: Show header\", indent))\n\tlines = append(lines, fmt.Sprintf(\"%s--format, -f [ascii|markdown|confluence]: Set format (default: ascii)\", indent))\n\tlines = append(lines, fmt.Sprintf(\"%s--help, -h: Show version number\", indent))\n\tlines = append(lines, fmt.Sprintf(\"%s--version, -v: Show version number\", indent))\n\n\treturn strings.Join(lines, \"\\n\")\n}\n\nfunc parseFormat(format string) (string, string) {\n\titems := strings.Split(format, \":\")\n\tif len(items) == 0 {\n\t\treturn \"\", \"\"\n\t} else if len(items) == 1 {\n\t\treturn \"\", items[0]\n\t} else {\n\t\treturn items[0], items[1]\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2015 Victor Antonovich <victor@antonovich.me>\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/pflag\"\n\n\t\"k8s.io\/kubernetes\/pkg\/util\"\n)\n\nconst (\n\tFLAG_V = \"v\"\n\tFLAG_STDERR_THRESH = \"stderrthreshold\"\n\tFLAG_RUN_ONCE = \"once\"\n\tFLAG_DRY_RUN = \"dry-run\"\n\tFLAG_SERVER = \"server\"\n\tFLAG_CONFIG = \"config\"\n\tFLAG_POLL_TIME = \"poll-time\"\n\tFLAG_TEMPLATE = \"template\"\n)\n\nfunc newCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"kube-template\",\n\t\tShort: \"kube-template\",\n\t\tLong: \"Watches Kubernetes for updates, writing output of a series of templates to files.\",\n\t\tRun: runCmd,\n\t}\n\tinitCmd(cmd)\n\treturn cmd\n}\n\nfunc initCmd(cmd *cobra.Command) {\n\t\/\/ Command-related flags set\n\tf := cmd.Flags()\n\tf.Bool(FLAG_DRY_RUN, false, \"don't write template output, dump result to stdout\")\n\tf.Bool(FLAG_RUN_ONCE, false, \"run template processing once and exit\")\n\tf.StringP(FLAG_SERVER, \"s\", \"\", \"the address and port of the Kubernetes API server\")\n\tf.StringVarP(&cfgFile, FLAG_CONFIG, \"c\", \"\", fmt.Sprintf(\"config file (default is .\/%s.(yaml|json))\", CFG_FILE))\n\tf.StringSliceP(FLAG_TEMPLATE, \"t\", nil, `adds a new template to watch on disk in the format\n\t\t'templatePath:outputPath[:command]'. This option is additive\n\t\tand may be specified multiple times for multiple templates.`)\n\t\/\/ Merge glog-related flags\n\t\/\/ FIXME probably we shouldn't use k8s utils there\n\tpflag.CommandLine.AddFlagSet(f)\n\tutil.InitFlags()\n\tutil.InitLogs()\n\tdefer util.FlushLogs()\n}\n\nfunc runCmd(cmd *cobra.Command, args []string) {\n\tconfig, err := newConfig(cmd)\n\n\tif err != nil {\n\t\tglog.Fatalf(\"configuration error: %v\", err)\n\t}\n\tif len(config.TemplateDescriptors) == 0 {\n\t\tglog.Fatalf(\"no templates to process (use --help to get configuration options), exiting...\")\n\t}\n\n\t\/\/ Start application\n\tapp, err := newApp(config)\n\tif err != nil {\n\t\tglog.Fatalf(\"can't create application: %v\", err)\n\t}\n\n\tgo app.Start()\n\n\t\/\/ Listen for signals\n\tsignalCh := make(chan os.Signal, 1)\n\tsignal.Notify(signalCh,\n\t\tsyscall.SIGHUP,\n\t\tsyscall.SIGINT,\n\t\tsyscall.SIGTERM,\n\t\tsyscall.SIGQUIT,\n\t)\n\n\t\/\/ Event loop\nEventLoop:\n\tfor {\n\t\tselect {\n\t\tcase signal := <-signalCh:\n\t\t\tglog.V(2).Infof(\"received %v signal, stopping\", signal)\n\t\t\tapp.Stop()\n\t\tcase <-app.doneCh:\n\t\t\tbreak EventLoop\n\t\t}\n\t}\n}\n<commit_msg>Returned back accidentally removed flag<commit_after>\/\/ Copyright © 2015 Victor Antonovich <victor@antonovich.me>\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/pflag\"\n\n\t\"k8s.io\/kubernetes\/pkg\/util\"\n)\n\nconst (\n\tFLAG_V = \"v\"\n\tFLAG_STDERR_THRESH = \"stderrthreshold\"\n\tFLAG_RUN_ONCE = \"once\"\n\tFLAG_DRY_RUN = \"dry-run\"\n\tFLAG_SERVER = \"server\"\n\tFLAG_CONFIG = \"config\"\n\tFLAG_POLL_TIME = \"poll-time\"\n\tFLAG_TEMPLATE = \"template\"\n)\n\nfunc newCmd() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"kube-template\",\n\t\tShort: \"kube-template\",\n\t\tLong: \"Watches Kubernetes for updates, writing output of a series of templates to files.\",\n\t\tRun: runCmd,\n\t}\n\tinitCmd(cmd)\n\treturn cmd\n}\n\nfunc initCmd(cmd *cobra.Command) {\n\t\/\/ Command-related flags set\n\tf := cmd.Flags()\n\tf.Bool(FLAG_DRY_RUN, false, \"don't write template output, dump result to stdout\")\n\tf.Bool(FLAG_RUN_ONCE, false, \"run template processing once and exit\")\n\tf.StringP(FLAG_SERVER, \"s\", \"\", \"the address and port of the Kubernetes API server\")\n\tf.DurationP(FLAG_POLL_TIME, \"p\", 15*time.Second, \"Kubernetes API server poll time\")\n\tf.StringVarP(&cfgFile, FLAG_CONFIG, \"c\", \"\", fmt.Sprintf(\"config file (default is .\/%s.(yaml|json))\", CFG_FILE))\n\tf.StringSliceP(FLAG_TEMPLATE, \"t\", nil, `adds a new template to watch on disk in the format\n\t\t'templatePath:outputPath[:command]'. This option is additive\n\t\tand may be specified multiple times for multiple templates.`)\n\t\/\/ Merge glog-related flags\n\t\/\/ FIXME probably we shouldn't use k8s utils there\n\tpflag.CommandLine.AddFlagSet(f)\n\tutil.InitFlags()\n\tutil.InitLogs()\n\tdefer util.FlushLogs()\n}\n\nfunc runCmd(cmd *cobra.Command, args []string) {\n\tconfig, err := newConfig(cmd)\n\n\tif err != nil {\n\t\tglog.Fatalf(\"configuration error: %v\", err)\n\t}\n\tif len(config.TemplateDescriptors) == 0 {\n\t\tglog.Fatalf(\"no templates to process (use --help to get configuration options), exiting...\")\n\t}\n\n\t\/\/ Start application\n\tapp, err := newApp(config)\n\tif err != nil {\n\t\tglog.Fatalf(\"can't create application: %v\", err)\n\t}\n\n\tgo app.Start()\n\n\t\/\/ Listen for signals\n\tsignalCh := make(chan os.Signal, 1)\n\tsignal.Notify(signalCh,\n\t\tsyscall.SIGHUP,\n\t\tsyscall.SIGINT,\n\t\tsyscall.SIGTERM,\n\t\tsyscall.SIGQUIT,\n\t)\n\n\t\/\/ Event loop\nEventLoop:\n\tfor {\n\t\tselect {\n\t\tcase signal := <-signalCh:\n\t\t\tglog.V(2).Infof(\"received %v signal, stopping\", signal)\n\t\t\tapp.Stop()\n\t\tcase <-app.doneCh:\n\t\t\tbreak EventLoop\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ the edgeworth system CPU - a simple 64-bit RISC cpu with a large compliment of registers, most of them purpose built\npackage edgeworth\n\nimport (\n\t\"fmt\"\n)\n\nconst (\n\tRegisterCount = 256\n)\n\ntype InvalidRegisterTypeError struct {\n\tType string\n}\n\nfunc (e InvalidRegisterTypeError) Error() string {\n\treturn fmt.Sprintf(\"Error: invalid register type: %s provided for integer register\", e.Type)\n}\n\ntype Register interface {\n\tGetValue() interface{}\n\tSetValue(interface{}) error\n}\n\ntype IntegerRegister struct {\n\tvalue uint64\n}\n\ntype FloatRegister struct {\n\tvalue float64\n}\n\nfunc (r IntegerRegister) GetValue() interface{} {\n\treturn r.value\n}\n\nfunc (r IntegerRegister) SetValue(v interface{}) error {\n\n\tswitch t := v.(type) {\n\tdefault:\n\t\tvar q InvalidRegisterTypeError\n\t\tq.Type = fmt.Sprintf(\"%T\", t)\n\t\treturn &q\n\tcase uint8:\n\t\tr.value = v.(uint64)\n\t\treturn nil\n\tcase int8:\n\t\tr.value = v.(uint64)\n\t\treturn nil\n\tcase uint16:\n\t\tr.value = v.(uint64)\n\t\treturn nil\n\tcase int16:\n\t\tr.value = v.(uint64)\n\t\treturn nil\n\tcase int32:\n\t\tr.value = v.(uint64)\n\t\treturn nil\n\tcase uint32:\n\t\tr.value = v.(uint64)\n\t\treturn nil\n\tcase int64:\n\t\tr.value = v.(uint64)\n\t\treturn nil\n\tcase uint64:\n\t\tr.value = v.(uint64)\n\t\treturn nil\n\t}\n}\n\nfunc (r FloatRegister) GetValue() interface{} {\n\treturn r.value\n}\n\nfunc (r FloatRegister) SetValue(v interface{}) error {\n\n\tswitch t := v.(type) {\n\tdefault:\n\t\tvar q InvalidRegisterTypeError\n\t\tq.Type = fmt.Sprintf(\"%T\", t)\n\t\treturn &q\n\tcase float32:\n\t\tr.value = v.(float64)\n\t\treturn nil\n\tcase float64:\n\t\tr.value = v.(float64)\n\t\treturn nil\n\t}\n}\n<commit_msg>Registers are now of a common type with a 64-bit raw bits backend<commit_after>\/\/ the edgeworth system CPU - a simple 64-bit RISC cpu with a large compliment of registers, most of them purpose built\npackage edgeworth\n\nimport (\n\t\"fmt\"\n\t\"math\"\n)\n\nconst (\n\tRegisterCount = 256\n)\n\nconst (\n\tRegisterTypeInteger = iota\n\tRegisterTypeFloat\n\tRegisterTypePackedFloat32\n\tRegisterTypePackedInt32\n)\n\ntype PackedFloat32 struct {\n\tLower float32\n\tUpper float32\n}\n\ntype PackedInt32 struct {\n\tLower uint32\n\tUpper uint32\n}\n\ntype InvalidRegisterTypeError struct {\n\tType string\n}\n\nfunc (e InvalidRegisterTypeError) Error() string {\n\treturn fmt.Sprintf(\"Error: invalid register type: %s provided for integer register\", e.Type)\n}\n\ntype Register struct {\n\tBits uint64 \/\/ raw bits\n\tTag byte \/\/ tag bits\n}\n\nfunc (r *Register) SetValue(tag byte, bits uint64) {\n\tr.Tag = tag\n\tr.Bits = bits\n}\nfunc (r *Register) SetIntegerValue(value uint64) {\n\tr.SetValue(RegisterTypeInteger, value)\n}\n\nfunc (r *Register) SetFloatValue(value float64) {\n\tr.SetValue(RegisterTypeFloat, math.Float64bits(value))\n}\n\nfunc (r *Register) GetFloatValue() float64 {\n\treturn math.Float64frombits(r.Bits)\n}\n\nfunc (r *Register) GetValue() interface{} {\n\tswitch r.Tag {\n\tcase RegisterTypeFloat:\n\t\treturn r.GetFloatValue()\n\tcase RegisterTypePackedFloat32:\n\t\treturn r.GetFloat32Values()\n\tcase RegisterTypePackedInt32:\n\t\treturn r.GetInt32Values()\n\tcase RegisterTypeInteger:\n\t\tfallthrough\n\tdefault:\n\t\treturn r.Bits\n\t}\n}\n\nfunc (r *Register) GetFloat32Values() *PackedFloat32 {\n\t\/\/get the raw bits for the upper and lower half\n\tlower := uint32(r.Bits)\n\tupper := uint32((r.Bits & 0xFFFFFFFF00000000) >> 32)\n\treturn &PackedFloat32{Lower: math.Float32frombits(lower), Upper: math.Float32frombits(upper)}\n}\n\nfunc (r *Register) SetPackedFloat32Value(p *PackedFloat32) {\n\tblower := uint64(math.Float32bits(p.Lower))\n\tbupper := uint64(math.Float32bits(p.Upper))\n\tvalue := ((bupper << 32) | (blower & 0x00000000FFFFFFFF))\n\tr.SetValue(RegisterTypePackedFloat32, value)\n}\nfunc (r *Register) SetFromFloat32Values(lower, upper float32) {\n\tvar p PackedFloat32\n\tp.Lower = lower\n\tp.Upper = upper\n\tr.SetPackedFloat32Value(&p)\n}\nfunc (r *Register) GetInt32Values() *PackedInt32 {\n\t\/\/get the raw bits for the upper and lower half\n\tlower := uint32(r.Bits)\n\tupper := uint32(r.Bits >> 32)\n\treturn &PackedInt32{Lower: lower, Upper: upper}\n}\n\nfunc (r *Register) SetPackedInt32Value(p *PackedInt32) {\n\tblower := uint64(p.Lower)\n\tbupper := uint64(p.Upper)\n\tvalue := ((bupper << 32) | (blower & 0x00000000FFFFFFFF))\n\tr.SetValue(RegisterTypePackedInt32, value)\n}\nfunc (r *Register) SetFromInt32Values(lower, upper uint32) {\n\tvar p PackedInt32\n\tp.Lower = lower\n\tp.Upper = upper\n\tr.SetPackedInt32Value(&p)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ A helper that allows using gcsfuse with mount(8).\n\/\/\n\/\/ Can be invoked using a command-line of the form expected for mount helpers.\n\/\/ Calls the gcsfuse binary, which it finds from one of a list of expected\n\/\/ locations, and waits for it to complete. The device and mount point are\n\/\/ passed on as positional arguments, and other known options are converted to\n\/\/ appropriate flags.\n\/\/\n\/\/ This binary returns with exit code zero only after gcsfuse has reported that\n\/\/ it has successfuly mounted the file system. Further output from gcsfuse is\n\/\/ suppressed.\npackage main\n\n\/\/ Example invocation on OS X:\n\/\/\n\/\/ mount -t porp -o foo=bar\\ baz -o ro,blah bucket ~\/tmp\/mp\n\/\/\n\/\/ becomes the following arguments:\n\/\/\n\/\/ Arg 0: \"\/sbin\/mount_gcsfuse \"\n\/\/ Arg 1: \"-o\"\n\/\/ Arg 2: \"foo=bar baz\"\n\/\/ Arg 3: \"-o\"\n\/\/ Arg 4: \"ro\"\n\/\/ Arg 5: \"-o\"\n\/\/ Arg 6: \"blah\"\n\/\/ Arg 7: \"bucket\"\n\/\/ Arg 8: \"\/path\/to\/mp\"\n\/\/\n\/\/ On Linux, the fstab entry\n\/\/\n\/\/ bucket \/path\/to\/mp porp user,foo=bar\\040baz\n\/\/\n\/\/ becomes\n\/\/\n\/\/ Arg 0: \"\/sbin\/mount.gcsfuse\"\n\/\/ Arg 1: \"bucket\"\n\/\/ Arg 2: \"\/path\/to\/mp\"\n\/\/ Arg 3: \"-o\"\n\/\/ Arg 4: \"rw,noexec,nosuid,nodev,user,foo=bar baz\"\n\/\/\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"strings\"\n\n\t\"github.com\/googlecloudplatform\/gcsfuse\/internal\/mount\"\n)\n\n\/\/ Turn mount-style options into gcsfuse arguments. Skip known detritus that\n\/\/ the mount command gives us.\n\/\/\n\/\/ The result of this function should be appended to exec.Command.Args.\nfunc makeGcsfuseArgs(\n\tdevice string,\n\tmountPoint string,\n\topts map[string]string) (args []string, err error) {\n\t\/\/ Deal with options.\n\tfor name, value := range opts {\n\t\tswitch name {\n\t\t\/\/ Don't pass through options that are relevant to mount(8) but not to\n\t\t\/\/ gcsfuse, and that fusermount chokes on with \"Invalid argument\" on Linux.\n\t\tcase \"user\", \"nouser\", \"auto\", \"noauto\", \"_netdev\", \"no_netdev\":\n\n\t\t\/\/ Special case: support mount-like formatting for gcsfuse bool flags.\n\t\tcase \"implicit_dirs\":\n\t\t\targs = append(\n\t\t\t\targs,\n\t\t\t\t\"--\"+strings.Replace(name, \"_\", \"-\", -1),\n\t\t\t)\n\n\t\t\t\/\/ Special case: support mount-like formatting for gcsfuse string flags.\n\t\tcase \"dir_mode\", \"file_mode\", \"key_file\", \"temp_dir\", \"gid\", \"uid\", \"only_dir\", \"limit_ops_per_sec\", \"limit_bytes_per_sec\":\n\t\t\targs = append(\n\t\t\t\targs,\n\t\t\t\t\"--\"+strings.Replace(name, \"_\", \"-\", -1),\n\t\t\t\tvalue,\n\t\t\t)\n\n\t\t\/\/ Pass through everything else.\n\t\tdefault:\n\t\t\tvar formatted string\n\t\t\tif value == \"\" {\n\t\t\t\tformatted = name\n\t\t\t} else {\n\t\t\t\tformatted = fmt.Sprintf(\"%s=%s\", name, value)\n\t\t\t}\n\n\t\t\targs = append(args, \"-o\", formatted)\n\t\t}\n\t}\n\n\t\/\/ Set the bucket and mount point.\n\targs = append(args, device, mountPoint)\n\n\treturn\n}\n\n\/\/ Parse the supplied command-line arguments from a mount(8) invocation on OS X\n\/\/ or Linux.\nfunc parseArgs(\n\targs []string) (\n\tdevice string,\n\tmountPoint string,\n\topts map[string]string,\n\terr error) {\n\topts = make(map[string]string)\n\n\t\/\/ Process each argument in turn.\n\tpositionalCount := 0\n\tfor i, s := range args {\n\t\tswitch {\n\t\t\/\/ Skip the program name.\n\t\tcase i == 0:\n\t\t\tcontinue\n\n\t\t\/\/ \"-o\" is illegal only when at the end. We handle its argument in the case\n\t\t\/\/ below.\n\t\tcase s == \"-o\":\n\t\t\tif i == len(args)-1 {\n\t\t\t\terr = fmt.Errorf(\"Unexpected -o at end of args.\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\/\/ systemd passes -n (alias --no-mtab) to the mount helper. This seems to\n\t\t\/\/ be a result of the new setup on many Linux systems with \/etc\/mtab as a\n\t\t\/\/ symlink pointing to \/proc\/self\/mounts. \/proc\/self\/mounts is read-only,\n\t\t\/\/ so any helper that would normally write to \/etc\/mtab should be\n\t\t\/\/ configured not to do so. Because systemd does not provide a way to\n\t\t\/\/ disable this behavior for mount helpers that do not write to \/etc\/mtab,\n\t\t\/\/ we ignore the flag.\n\t\tcase s == \"-n\":\n\t\t\tcontinue\n\n\t\t\/\/ Is this an options string following a \"-o\"?\n\t\tcase i > 0 && args[i-1] == \"-o\":\n\t\t\tmount.ParseOptions(opts, s)\n\n\t\t\/\/ Is this the device?\n\t\tcase positionalCount == 0:\n\t\t\tdevice = s\n\t\t\tpositionalCount++\n\n\t\t\/\/ Is this the mount point?\n\t\tcase positionalCount == 1:\n\t\t\tmountPoint = s\n\t\t\tpositionalCount++\n\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"Unexpected arg %d: %q\", i, s)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif positionalCount != 2 {\n\t\terr = fmt.Errorf(\"Expected two positional arguments; got %d.\", positionalCount)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc run(args []string) (err error) {\n\t\/\/ If invoked with a single \"--help\" argument, print a usage message and exit\n\t\/\/ successfully.\n\tif len(args) == 2 && args[1] == \"--help\" {\n\t\tfmt.Fprintf(\n\t\t\tos.Stderr,\n\t\t\t\"Usage: %s [-o options] bucket_name mount_point\\n\",\n\t\t\targs[0])\n\n\t\treturn\n\t}\n\n\t\/\/ Find the path to gcsfuse.\n\tgcsfusePath, err := findGcsfuse()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"findGcsfuse: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Find the path to fusermount.\n\tfusermountPath, err := findFusermount()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"findFusermount: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Attempt to parse arguments.\n\tdevice, mountPoint, opts, err := parseArgs(args)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"parseArgs: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Choose gcsfuse args.\n\tgcsfuseArgs, err := makeGcsfuseArgs(device, mountPoint, opts)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"makeGcsfuseArgs: %v\", err)\n\t\treturn\n\t}\n\n\tfmt.Fprintf(\n\t\tos.Stderr,\n\t\t\"Calling gcsfuse with arguments: %s\\n\",\n\t\tstrings.Join(gcsfuseArgs, \" \"))\n\n\t\/\/ Run gcsfuse.\n\tcmd := exec.Command(gcsfusePath, gcsfuseArgs...)\n\tcmd.Env = append(cmd.Env, fmt.Sprintf(\"PATH=%s\", path.Dir(fusermountPath)))\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\terr = cmd.Run()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"running gcsfuse: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc main() {\n\terr := run(os.Args)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n<commit_msg>Add stat-cache-ttl and type-cache-ttl<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ A helper that allows using gcsfuse with mount(8).\n\/\/\n\/\/ Can be invoked using a command-line of the form expected for mount helpers.\n\/\/ Calls the gcsfuse binary, which it finds from one of a list of expected\n\/\/ locations, and waits for it to complete. The device and mount point are\n\/\/ passed on as positional arguments, and other known options are converted to\n\/\/ appropriate flags.\n\/\/\n\/\/ This binary returns with exit code zero only after gcsfuse has reported that\n\/\/ it has successfuly mounted the file system. Further output from gcsfuse is\n\/\/ suppressed.\npackage main\n\n\/\/ Example invocation on OS X:\n\/\/\n\/\/ mount -t porp -o foo=bar\\ baz -o ro,blah bucket ~\/tmp\/mp\n\/\/\n\/\/ becomes the following arguments:\n\/\/\n\/\/ Arg 0: \"\/sbin\/mount_gcsfuse \"\n\/\/ Arg 1: \"-o\"\n\/\/ Arg 2: \"foo=bar baz\"\n\/\/ Arg 3: \"-o\"\n\/\/ Arg 4: \"ro\"\n\/\/ Arg 5: \"-o\"\n\/\/ Arg 6: \"blah\"\n\/\/ Arg 7: \"bucket\"\n\/\/ Arg 8: \"\/path\/to\/mp\"\n\/\/\n\/\/ On Linux, the fstab entry\n\/\/\n\/\/ bucket \/path\/to\/mp porp user,foo=bar\\040baz\n\/\/\n\/\/ becomes\n\/\/\n\/\/ Arg 0: \"\/sbin\/mount.gcsfuse\"\n\/\/ Arg 1: \"bucket\"\n\/\/ Arg 2: \"\/path\/to\/mp\"\n\/\/ Arg 3: \"-o\"\n\/\/ Arg 4: \"rw,noexec,nosuid,nodev,user,foo=bar baz\"\n\/\/\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\"\n\t\"strings\"\n\n\t\"github.com\/googlecloudplatform\/gcsfuse\/internal\/mount\"\n)\n\n\/\/ Turn mount-style options into gcsfuse arguments. Skip known detritus that\n\/\/ the mount command gives us.\n\/\/\n\/\/ The result of this function should be appended to exec.Command.Args.\nfunc makeGcsfuseArgs(\n\tdevice string,\n\tmountPoint string,\n\topts map[string]string) (args []string, err error) {\n\t\/\/ Deal with options.\n\tfor name, value := range opts {\n\t\tswitch name {\n\t\t\/\/ Don't pass through options that are relevant to mount(8) but not to\n\t\t\/\/ gcsfuse, and that fusermount chokes on with \"Invalid argument\" on Linux.\n\t\tcase \"user\", \"nouser\", \"auto\", \"noauto\", \"_netdev\", \"no_netdev\":\n\n\t\t\/\/ Special case: support mount-like formatting for gcsfuse bool flags.\n\t\tcase \"implicit_dirs\":\n\t\t\targs = append(\n\t\t\t\targs,\n\t\t\t\t\"--\"+strings.Replace(name, \"_\", \"-\", -1),\n\t\t\t)\n\n\t\t\t\/\/ Special case: support mount-like formatting for gcsfuse string flags.\n\t\tcase \"dir_mode\", \"file_mode\", \"key_file\", \"temp_dir\", \"gid\", \"uid\", \"only_dir\", \"limit_ops_per_sec\", \"limit_bytes_per_sec\", \"stat_cache_ttl\", \"type_cache_ttl\":\n\t\t\targs = append(\n\t\t\t\targs,\n\t\t\t\t\"--\"+strings.Replace(name, \"_\", \"-\", -1),\n\t\t\t\tvalue,\n\t\t\t)\n\n\t\t\/\/ Pass through everything else.\n\t\tdefault:\n\t\t\tvar formatted string\n\t\t\tif value == \"\" {\n\t\t\t\tformatted = name\n\t\t\t} else {\n\t\t\t\tformatted = fmt.Sprintf(\"%s=%s\", name, value)\n\t\t\t}\n\n\t\t\targs = append(args, \"-o\", formatted)\n\t\t}\n\t}\n\n\t\/\/ Set the bucket and mount point.\n\targs = append(args, device, mountPoint)\n\n\treturn\n}\n\n\/\/ Parse the supplied command-line arguments from a mount(8) invocation on OS X\n\/\/ or Linux.\nfunc parseArgs(\n\targs []string) (\n\tdevice string,\n\tmountPoint string,\n\topts map[string]string,\n\terr error) {\n\topts = make(map[string]string)\n\n\t\/\/ Process each argument in turn.\n\tpositionalCount := 0\n\tfor i, s := range args {\n\t\tswitch {\n\t\t\/\/ Skip the program name.\n\t\tcase i == 0:\n\t\t\tcontinue\n\n\t\t\/\/ \"-o\" is illegal only when at the end. We handle its argument in the case\n\t\t\/\/ below.\n\t\tcase s == \"-o\":\n\t\t\tif i == len(args)-1 {\n\t\t\t\terr = fmt.Errorf(\"Unexpected -o at end of args.\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\/\/ systemd passes -n (alias --no-mtab) to the mount helper. This seems to\n\t\t\/\/ be a result of the new setup on many Linux systems with \/etc\/mtab as a\n\t\t\/\/ symlink pointing to \/proc\/self\/mounts. \/proc\/self\/mounts is read-only,\n\t\t\/\/ so any helper that would normally write to \/etc\/mtab should be\n\t\t\/\/ configured not to do so. Because systemd does not provide a way to\n\t\t\/\/ disable this behavior for mount helpers that do not write to \/etc\/mtab,\n\t\t\/\/ we ignore the flag.\n\t\tcase s == \"-n\":\n\t\t\tcontinue\n\n\t\t\/\/ Is this an options string following a \"-o\"?\n\t\tcase i > 0 && args[i-1] == \"-o\":\n\t\t\tmount.ParseOptions(opts, s)\n\n\t\t\/\/ Is this the device?\n\t\tcase positionalCount == 0:\n\t\t\tdevice = s\n\t\t\tpositionalCount++\n\n\t\t\/\/ Is this the mount point?\n\t\tcase positionalCount == 1:\n\t\t\tmountPoint = s\n\t\t\tpositionalCount++\n\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"Unexpected arg %d: %q\", i, s)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif positionalCount != 2 {\n\t\terr = fmt.Errorf(\"Expected two positional arguments; got %d.\", positionalCount)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc run(args []string) (err error) {\n\t\/\/ If invoked with a single \"--help\" argument, print a usage message and exit\n\t\/\/ successfully.\n\tif len(args) == 2 && args[1] == \"--help\" {\n\t\tfmt.Fprintf(\n\t\t\tos.Stderr,\n\t\t\t\"Usage: %s [-o options] bucket_name mount_point\\n\",\n\t\t\targs[0])\n\n\t\treturn\n\t}\n\n\t\/\/ Find the path to gcsfuse.\n\tgcsfusePath, err := findGcsfuse()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"findGcsfuse: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Find the path to fusermount.\n\tfusermountPath, err := findFusermount()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"findFusermount: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Attempt to parse arguments.\n\tdevice, mountPoint, opts, err := parseArgs(args)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"parseArgs: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Choose gcsfuse args.\n\tgcsfuseArgs, err := makeGcsfuseArgs(device, mountPoint, opts)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"makeGcsfuseArgs: %v\", err)\n\t\treturn\n\t}\n\n\tfmt.Fprintf(\n\t\tos.Stderr,\n\t\t\"Calling gcsfuse with arguments: %s\\n\",\n\t\tstrings.Join(gcsfuseArgs, \" \"))\n\n\t\/\/ Run gcsfuse.\n\tcmd := exec.Command(gcsfusePath, gcsfuseArgs...)\n\tcmd.Env = append(cmd.Env, fmt.Sprintf(\"PATH=%s\", path.Dir(fusermountPath)))\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\terr = cmd.Run()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"running gcsfuse: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc main() {\n\terr := run(os.Args)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ A helper that allows using gcsfuse with mount(8).\n\/\/\n\/\/ Can be invoked using a command-line of the form expected for mount helpers.\n\/\/ Calls the gcsfuse binary, which must be in $PATH, and waits for it to\n\/\/ complete. The device and mount point are passed on as positional arguments,\n\/\/ and other known options are converted to appropriate flags.\n\/\/\n\/\/ This binary returns with exit code zero only after gcsfuse has reported that\n\/\/ it has successfuly mounted the file system. Further output from gcsfuse is\n\/\/ suppressed.\npackage main\n\n\/\/ Example invocation on OS X:\n\/\/\n\/\/ mount -t porp -o foo=bar\\ baz -o ro,blah bucket ~\/tmp\/mp\n\/\/\n\/\/ becomes the following arguments:\n\/\/\n\/\/ Arg 0: \"\/sbin\/mount_gcsfuse \"\n\/\/ Arg 1: \"-o\"\n\/\/ Arg 2: \"foo=bar baz\"\n\/\/ Arg 3: \"-o\"\n\/\/ Arg 4: \"ro\"\n\/\/ Arg 5: \"-o\"\n\/\/ Arg 6: \"blah\"\n\/\/ Arg 7: \"bucket\"\n\/\/ Arg 8: \"\/path\/to\/mp\"\n\/\/\n\/\/ On Linux, the fstab entry\n\/\/\n\/\/ bucket \/path\/to\/mp porp user,foo=bar\\040baz\n\/\/\n\/\/ becomes\n\/\/\n\/\/ Arg 0: \"\/sbin\/mount.gcsfuse\"\n\/\/ Arg 1: \"bucket\"\n\/\/ Arg 2: \"\/path\/to\/mp\"\n\/\/ Arg 3: \"-o\"\n\/\/ Arg 4: \"rw,noexec,nosuid,nodev,user,foo=bar baz\"\n\/\/\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/googlecloudplatform\/gcsfuse\/internal\/daemon\"\n\t\"github.com\/googlecloudplatform\/gcsfuse\/internal\/mount\"\n)\n\n\/\/ Turn mount-style options into gcsfuse arguments. Skip known detritus that\n\/\/ the mount command gives us.\n\/\/\n\/\/ The result of this function should be appended to exec.Command.Args.\nfunc makeGcsfuseArgs(\n\tdevice string,\n\tmountPoint string,\n\topts map[string]string) (args []string, err error) {\n\t\/\/ Deal with options.\n\tfor name, value := range opts {\n\t\tswitch name {\n\t\tcase \"fuse_debug\":\n\t\t\targs = append(args, \"--fuse.debug\")\n\n\t\tcase \"gcs_debug\":\n\t\t\targs = append(args, \"--gcs.debug\")\n\n\t\tcase \"uid\":\n\t\t\targs = append(args, \"--uid=\"+value)\n\n\t\tcase \"gid\":\n\t\t\targs = append(args, \"--gid=\"+value)\n\n\t\tcase \"file_mode\":\n\t\t\targs = append(args, \"--file-mode=\"+value)\n\n\t\tcase \"dir_mode\":\n\t\t\targs = append(args, \"--dir-mode=\"+value)\n\n\t\t\/\/ Don't pass through options that are relevant to mount(8) but not to\n\t\t\/\/ gcsfuse, and that fusermount chokes on with \"Invalid argument\" on Linux.\n\t\tcase \"user\", \"nouser\", \"auto\", \"noauto\":\n\n\t\t\/\/ Pass through everything else.\n\t\tdefault:\n\t\t\tvar formatted string\n\t\t\tif value == \"\" {\n\t\t\t\tformatted = name\n\t\t\t} else {\n\t\t\t\tformatted = fmt.Sprintf(\"%s=%s\", name, value)\n\t\t\t}\n\n\t\t\targs = append(args, \"-o\", formatted)\n\t\t}\n\t}\n\n\t\/\/ Set the bucket and mount point.\n\targs = append(args, device, mountPoint)\n\n\treturn\n}\n\n\/\/ Parse the supplied command-line arguments from a mount(8) invocation on OS X\n\/\/ or Linux.\nfunc parseArgs(\n\targs []string) (\n\tdevice string,\n\tmountPoint string,\n\topts map[string]string,\n\terr error) {\n\topts = make(map[string]string)\n\n\t\/\/ Process each argument in turn.\n\tpositionalCount := 0\n\tfor i, s := range args {\n\t\tswitch {\n\t\t\/\/ Skip the program name.\n\t\tcase i == 0:\n\t\t\tcontinue\n\n\t\t\/\/ \"-o\" is illegal only when at the end. We handle its argument in the case\n\t\t\/\/ below.\n\t\tcase s == \"-o\":\n\t\t\tif i == len(args)-1 {\n\t\t\t\terr = fmt.Errorf(\"Unexpected -o at end of args.\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\/\/ Is this an options string following a \"-o\"?\n\t\tcase i > 0 && args[i-1] == \"-o\":\n\t\t\tmount.ParseOptions(opts, s)\n\n\t\t\/\/ Is this the device?\n\t\tcase positionalCount == 0:\n\t\t\tdevice = s\n\t\t\tpositionalCount++\n\n\t\t\/\/ Is this the mount point?\n\t\tcase positionalCount == 1:\n\t\t\tmountPoint = s\n\t\t\tpositionalCount++\n\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"Unexpected arg %d: %q\", i, s)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif positionalCount != 2 {\n\t\terr = fmt.Errorf(\"Expected two positional arguments; got %d.\", positionalCount)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc run(args []string) (err error) {\n\t\/\/ If invoked with a single \"--help\" argument, print a usage message and exit\n\t\/\/ successfully.\n\tif len(args) == 2 && args[1] == \"--help\" {\n\t\tfmt.Fprintf(\n\t\t\tos.Stderr,\n\t\t\t\"Usage: %s [-o options] bucket_name mount_point\\n\",\n\t\t\targs[0])\n\n\t\treturn\n\t}\n\n\t\/\/ Attempt to parse arguments.\n\tdevice, mountPoint, opts, err := parseArgs(args)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"parseArgs: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Choose gcsfuse args.\n\tgcsfuseArgs, err := makeGcsfuseArgs(device, mountPoint, opts)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"makeGcsfuseArgs: %v\", err)\n\t\treturn\n\t}\n\n\tfmt.Fprintf(\n\t\tos.Stderr,\n\t\t\"Calling gcsfuse with arguments: %s\\n\",\n\t\tstrings.Join(gcsfuseArgs, \" \"))\n\n\t\/\/ Call gcsfuse and wait for it to successfully mount.\n\terr = daemon.Mount(\n\t\t\"gcsfuse\",\n\t\t\"fusermount\",\n\t\tgcsfuseArgs,\n\t\tos.Stderr)\n\n\tif err != nil {\n\t\terr = fmt.Errorf(\"daemon.Mount: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc main() {\n\terr := run(os.Args)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n<commit_msg>Removed support for undocumented mount helper options.<commit_after>\/\/ Copyright 2015 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ A helper that allows using gcsfuse with mount(8).\n\/\/\n\/\/ Can be invoked using a command-line of the form expected for mount helpers.\n\/\/ Calls the gcsfuse binary, which must be in $PATH, and waits for it to\n\/\/ complete. The device and mount point are passed on as positional arguments,\n\/\/ and other known options are converted to appropriate flags.\n\/\/\n\/\/ This binary returns with exit code zero only after gcsfuse has reported that\n\/\/ it has successfuly mounted the file system. Further output from gcsfuse is\n\/\/ suppressed.\npackage main\n\n\/\/ Example invocation on OS X:\n\/\/\n\/\/ mount -t porp -o foo=bar\\ baz -o ro,blah bucket ~\/tmp\/mp\n\/\/\n\/\/ becomes the following arguments:\n\/\/\n\/\/ Arg 0: \"\/sbin\/mount_gcsfuse \"\n\/\/ Arg 1: \"-o\"\n\/\/ Arg 2: \"foo=bar baz\"\n\/\/ Arg 3: \"-o\"\n\/\/ Arg 4: \"ro\"\n\/\/ Arg 5: \"-o\"\n\/\/ Arg 6: \"blah\"\n\/\/ Arg 7: \"bucket\"\n\/\/ Arg 8: \"\/path\/to\/mp\"\n\/\/\n\/\/ On Linux, the fstab entry\n\/\/\n\/\/ bucket \/path\/to\/mp porp user,foo=bar\\040baz\n\/\/\n\/\/ becomes\n\/\/\n\/\/ Arg 0: \"\/sbin\/mount.gcsfuse\"\n\/\/ Arg 1: \"bucket\"\n\/\/ Arg 2: \"\/path\/to\/mp\"\n\/\/ Arg 3: \"-o\"\n\/\/ Arg 4: \"rw,noexec,nosuid,nodev,user,foo=bar baz\"\n\/\/\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/googlecloudplatform\/gcsfuse\/internal\/daemon\"\n\t\"github.com\/googlecloudplatform\/gcsfuse\/internal\/mount\"\n)\n\n\/\/ Turn mount-style options into gcsfuse arguments. Skip known detritus that\n\/\/ the mount command gives us.\n\/\/\n\/\/ The result of this function should be appended to exec.Command.Args.\nfunc makeGcsfuseArgs(\n\tdevice string,\n\tmountPoint string,\n\topts map[string]string) (args []string, err error) {\n\t\/\/ Deal with options.\n\tfor name, value := range opts {\n\t\tswitch name {\n\t\t\/\/ Don't pass through options that are relevant to mount(8) but not to\n\t\t\/\/ gcsfuse, and that fusermount chokes on with \"Invalid argument\" on Linux.\n\t\tcase \"user\", \"nouser\", \"auto\", \"noauto\":\n\n\t\t\/\/ Pass through everything else.\n\t\tdefault:\n\t\t\tvar formatted string\n\t\t\tif value == \"\" {\n\t\t\t\tformatted = name\n\t\t\t} else {\n\t\t\t\tformatted = fmt.Sprintf(\"%s=%s\", name, value)\n\t\t\t}\n\n\t\t\targs = append(args, \"-o\", formatted)\n\t\t}\n\t}\n\n\t\/\/ Set the bucket and mount point.\n\targs = append(args, device, mountPoint)\n\n\treturn\n}\n\n\/\/ Parse the supplied command-line arguments from a mount(8) invocation on OS X\n\/\/ or Linux.\nfunc parseArgs(\n\targs []string) (\n\tdevice string,\n\tmountPoint string,\n\topts map[string]string,\n\terr error) {\n\topts = make(map[string]string)\n\n\t\/\/ Process each argument in turn.\n\tpositionalCount := 0\n\tfor i, s := range args {\n\t\tswitch {\n\t\t\/\/ Skip the program name.\n\t\tcase i == 0:\n\t\t\tcontinue\n\n\t\t\/\/ \"-o\" is illegal only when at the end. We handle its argument in the case\n\t\t\/\/ below.\n\t\tcase s == \"-o\":\n\t\t\tif i == len(args)-1 {\n\t\t\t\terr = fmt.Errorf(\"Unexpected -o at end of args.\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\/\/ Is this an options string following a \"-o\"?\n\t\tcase i > 0 && args[i-1] == \"-o\":\n\t\t\tmount.ParseOptions(opts, s)\n\n\t\t\/\/ Is this the device?\n\t\tcase positionalCount == 0:\n\t\t\tdevice = s\n\t\t\tpositionalCount++\n\n\t\t\/\/ Is this the mount point?\n\t\tcase positionalCount == 1:\n\t\t\tmountPoint = s\n\t\t\tpositionalCount++\n\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"Unexpected arg %d: %q\", i, s)\n\t\t\treturn\n\t\t}\n\t}\n\n\tif positionalCount != 2 {\n\t\terr = fmt.Errorf(\"Expected two positional arguments; got %d.\", positionalCount)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc run(args []string) (err error) {\n\t\/\/ If invoked with a single \"--help\" argument, print a usage message and exit\n\t\/\/ successfully.\n\tif len(args) == 2 && args[1] == \"--help\" {\n\t\tfmt.Fprintf(\n\t\t\tos.Stderr,\n\t\t\t\"Usage: %s [-o options] bucket_name mount_point\\n\",\n\t\t\targs[0])\n\n\t\treturn\n\t}\n\n\t\/\/ Attempt to parse arguments.\n\tdevice, mountPoint, opts, err := parseArgs(args)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"parseArgs: %v\", err)\n\t\treturn\n\t}\n\n\t\/\/ Choose gcsfuse args.\n\tgcsfuseArgs, err := makeGcsfuseArgs(device, mountPoint, opts)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"makeGcsfuseArgs: %v\", err)\n\t\treturn\n\t}\n\n\tfmt.Fprintf(\n\t\tos.Stderr,\n\t\t\"Calling gcsfuse with arguments: %s\\n\",\n\t\tstrings.Join(gcsfuseArgs, \" \"))\n\n\t\/\/ Call gcsfuse and wait for it to successfully mount.\n\terr = daemon.Mount(\n\t\t\"gcsfuse\",\n\t\t\"fusermount\",\n\t\tgcsfuseArgs,\n\t\tos.Stderr)\n\n\tif err != nil {\n\t\terr = fmt.Errorf(\"daemon.Mount: %v\", err)\n\t\treturn\n\t}\n\n\treturn\n}\n\nfunc main() {\n\terr := run(os.Args)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2014, All rights reserved\n\/\/ Joel Scoble, https:\/\/github.com\/mohae\/tomd\n\/\/\n\/\/ This is licensed under The MIT License. Please refer to the included\n\/\/ LICENSE file for more information. If the LICENSE file has not been\n\/\/ included, please refer to the url above.\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License\n\/\/\n\/\/ tomd: to markdown, takes input and converts it to markdown\n\/\/\n\/\/ Notes: \n\/\/\t* This is not a general markdown processor. It is a package to provide\n\/\/ functions that allow things to be converted to their representation\n\/\/ in markdown.\n\/\/ Currently that means taking a .csv file and converting it to a table.\n\/\/\t* Uses seelog for 'library logging', to enable logging see:\n\/\/ http:\/\/github.com\/cihub\/seelog\/wiki\/Writing-libraries-with-Seelog\npackage tomd\n\nimport (\n\t\"encoding\/csv\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t_ \"strconv\"\n\t\"strings\"\n)\n\n\/\/ CSV is a struct for representing and working with csv data.\ntype CSV struct {\n\t\/\/ Source is the source of the CSV data. It is currently assumed to be\n\t\/\/ a path location\n\tsource string\n\n\t\/\/ destination is where the generated markdown should be put, if it is\n\t\/\/ to be put anywhere. When used, this setting is used in conjunction \n\t\/\/ with destinationType. Not all destinationTypes need to specify a\n\t\/\/ destinatin, bytes, for example. \n\tdestination string\n\n\t\/\/ destinationType is the type of destination for the md, e.g. file.\n\t\/\/ If the destinationType requires specification of the destination,\n\t\/\/ the Destination variable should be set to that value.\n\t\/\/ Supported:\n\t\/\/\t[]byte\tno destination needed\n\t\/\/\tfile\tdestination optional, if not set the output will be\n\t\/\/\t\t`sourceFilename.md` instead of `sourceFilename.csv`.\n\tdestinationType string\n\n\t\/\/ hasHeaderRows: whether the csv data includes a header row as its\n\t\/\/ first row. If the csv data does not include header data, the header\n\t\/\/ data must be provided via template, e.g. false implies \n\t\/\/ 'useFormat' == true. True does not have any implications on using\n\t\/\/ the format file.\n\thasHeaderRow bool\n\n\t\/\/ headerRow contains the header row information. This is when a format\n\t\/\/ has been supplied, the header row information is set.\n\theaderRow []string\n\t\n\t\/\/ columnAlignment contains the alignment information for each column\n\t\/\/ in the table. This is supplied by the format\n\tcolumnAlignment []string\n\n\t\/\/ columnEmphasis contains the emphasis information, if any. for each\n\t\/\/ column. This is supplied by the format.\n\tcolumnEmphasis []string\n\n\t\/\/ formatSource: the location and name of the source file to use. It\n\t\/\/ can either be explicitely set, or TOMD will look for it as\n\t\/\/ `source.fmt`, for `source.csv`.\n\tformatSource string\n\n\t\/\/ whether for formatSource was autoset or not.\n\tformatSourceAutoset bool\n\n\t\/\/ useFormat: whether there's a format to use with the CSV or not. For\n\t\/\/ files, this is usually a file, with the same name and path as the\n\t\/\/ source, using the 'fmt' extension. This can also be set explicitely.\n\t\/\/ 'useFormat' == false implies 'hasHeaderRow' == true.\n\tuseFormat bool\n\n\t\/\/ formatType:\tthe type of format to use. By default, this is in sync\n\t\/\/\t\twith the source type, but it can be set independently.\n\t\/\/ Supported:\n\t\/\/\tfile\tThe format information is in a format file. By default,\n\t\/\/\t\tthis is the source filename with the `.fmt` file\n\t\/\/\t\textension, instead of the original extension. This can\n\t\/\/\t\tbe set independently too.\n\t\/\/\tdefault Any setting other than another supported type will be\n\t\/\/\t\tinterpreted as using the default, which is to manually\n\t\/\/\t\tset the different format information you wish to use\n\t\/\/\t\tin the marshal using their Setters.\n\tformatType string\n\n\t\/\/ table is the parsed csv data\n\ttable [][]string\n\n\t\/\/ md holds the md representation of the csv data\n\tmd []byte\n}\n\n\/\/ NewCSV returns an initialize CSV object. It still needs to be configured\n\/\/ for use.\nfunc NewCSV() *CSV {\n\tC := &CSV{\n\t\thasHeaderRow: true,\n\t\tdestinationType: \"bytes\",\n\t\ttable: [][]string{},\n\t}\n\treturn C\n}\n\n\/\/ NewSourceCSV creates a new *CSV with its source set and initialized.\nfunc NewSourcesCSV(s, t string, b bool) *CSV {\n\tc := NewCSV()\n\tc.useFormat = b\n\n\t\/\/ currently anything that's not file uses the default \"\", which\n\t\/\/ means set it yourself to use it.\n\tswitch t {\n\tcase \"bytes\":\n\tcase \"file\":\n\t\tc.formatType = \"file\"\n\t}\n\n\tc.SetSource(s)\n\treturn c\n}\n\n\/\/ ToMDTable takes a reader for csv and converts the read csv to a markdown\n\/\/ table. To get the md, call CSV.md()\nfunc (c *CSV) ToMDTable(r io.Reader) error {\n\tvar err error\n\tc.table, err = ReadCSV(r)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn err\n\t}\n\n\t\/\/Now convert the data to md\n\tc.toMD()\n\treturn nil\n}\n\n\/\/ FileToMDTable takes a file and marshals it to a md table.\nfunc (c *CSV) FileToMDTable(source string) error{\n\tlogger.Debugf(\"FileToMDTable enter with: %s\", source)\n\tvar err error\n\t\/\/ Try to read the source\n\tc.table, err = ReadCSVFile(source)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn err\n\t}\n\t\t\n\tvar formatName string\n\t\/\/ otherwise see if HasFormat\n\tif c.useFormat {\n\/\/\t\tc.setFormatFile()\n\t\tif c.formatType == \"file\" {\n\t\t\t\/\/derive the format filename\n\t\t\tfilename := filepath.Base(source)\n\t\t\tif filename == \".\" {\n\t\t\t\terr = fmt.Errorf(\"unable to determine format filename\")\n\t\t\t\tlogger.Error(err)\n\t\t\t\treturn err\n\t\t\t}\n\t\n\t\t\tdir := filepath.Dir(source)\n\t\t\tparts := strings.Split(filename, \".\")\n\t\t\tformatName = parts[0] + \".fmt\"\n\t\t\tif dir != \".\" {\n\t\t\t\tformatName = dir + formatName\n\t\t\t}\n\t\t}\n\t}\n\t\n\tif c.useFormat {\n\t\terr := c.formatFromFile()\n\t\tif err != nil {\n\t\t\tlogger.Error(err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Now convert the data to md\n\tc.toMD()\n\n\tlogger.Debug(\"FileToMDTable exit with error: nil\")\n\treturn nil\n}\n\n\/\/ MD() returns the markdown as []byte\nfunc (c *CSV) MD() []byte {\n\treturn c.md\n}\n\n\/\/ ReadCSV takes a reader, and reads the data connected with it as CSV data.\n\/\/ A slice of slice of type string, or an error, are returned. This reads the\n\/\/ entire file, so if the file is very large and you don't have sufficent RAM\n\/\/ you will not like the results. There may be a row or chunk oriented\n\/\/ implementation in the future.\nfunc ReadCSV(r io.Reader ) ([][]string, error) {\n\tcr := csv.NewReader(r)\n\trows, err := cr.ReadAll()\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn nil, err\n\t}\n\n\treturn rows, nil\n}\n\n\/\/ ReadCSVFile takes a path, reads the contents of the file and returns int.\nfunc ReadCSVFile(f string) ([][]string, error) {\n\tfile, err := os.Open(f)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn nil, err\n\t}\n\t\n\t\/\/ because we don't want to forget or worry about hanldling close prior\n\t\/\/ to every return.\n\tdefer file.Close()\n\t\n\t\/\/\n\tdata, err := ReadCSV(file)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn nil, err\n\t}\n\n\treturn data, nil\n}\n\n\/\/ tomd does table header processing then converts its table data to md,\nfunc (c *CSV) toMD() () {\n\t\/\/ Process the header first\n\tc.addHeader()\n\n\t\/\/ for each row of table data, process it.\n\tfor _, row := range c.table {\n\t\tc.rowToMD(row)\n\t}\n\t\n\treturn\n}\n\n\/\/ rowTomd takes a csv table row and returns the md version of it consistent\n\/\/ with its configuration.\nfunc (c *CSV) rowToMD(cols []string) {\n\tc.appendColumnSeparator()\n\n\tfor _, col := range cols {\n\t\t\/\/ TODO this is where column data decoration would occur\n\t\t\/\/ with templates\n\t\tbcol := []byte(col)\n\t\tc.md = append(c.md, bcol...)\n\t\tc.appendColumnSeparator()\n\t}\n\n}\n\n\/\/ setFormatSource sets the formatSource if it is not already set or if the \n\/\/ previously set value was set by setFormatSource. The latter allows auto-\n\/\/ generated default source name to be updated when the source is while \n\/\/ preserving overrides.\nfunc (c *CSV) autosetFormatFile() error {\n\t\/\/ if the source isn't set, nothing to do.\n\tif c.source == \"\" {\n\t\tlogger.Trace(\"setFormatSource exit: source not set\")\n\t\treturn nil\n\t}\n\n\t\/\/ if formatSource isn't empty and wasn't set by setFormatSource,\n\t\/\/ nothing to do\n\tif c.formatSource != \"\" && !c.formatSourceAutoset {\n\t\tlogger.Infof(\"setFormatSource exit: formatSource was already set to %s\", c.formatSource)\n\t\treturn nil\n\t}\n\n\tif c.formatType != \"file\" {\n\t\tlogger.Trace(\"setFormatSource exit: not using format file, format type is %s\", c.formatType)\n\t\treturn nil\n\t}\n\n\t\/\/ Figure out the filename\n\tdir, file := filepath.Split(c.source)\n\t\n\t\/\/ break up the filename into its part, the last is extension.\n\tvar fname string\n\tfParts := strings.Split(file, \".\")\n\n\tif len(fParts) <= 2 {\n\t\tfname = fParts[0]\n\t} else {\n\t\t\/\/ Join all but the last part together for the name\n\t\t\/\/ This handles names with multiple `.`\n\t\tfname = strings.Join(fParts[0:len(fParts) - 2], \".\")\n\t}\n\n\tfname += \".md\"\n\tc.formatSource = dir + fname\t\n\tc.formatSourceAutoset = true\n\treturn nil\n}\n\n\/\/ addHeader adds the table header row and the separator row that goes between\n\/\/ the header row and the data.\nfunc (c *CSV) addHeader() () {\n\tif c.hasHeaderRow {\n\t\tc.rowToMD(c.table[0])\n\t\t\/\/remove the first row\n\t\tc.table = append(c.table[1:])\n\t} else {\n\t\tif c.useFormat {\n\t\t\tc.rowToMD(c.headerRow)\n\t\t}\n\t}\n\n\tc.appendHeaderSeparatorRow(len(c.table[0]))\n\treturn\n}\n\n\/\/ appendHeaderSeparator adds the configured column separator\nfunc (c *CSV) appendHeaderSeparatorRow(cols int) {\n\tc.appendColumnSeparator()\n\n\tfor i := 0; i < cols; i++ {\n\t\tvar separator []byte\t\n\n\t\tif c.useFormat {\n\t\t\tswitch c.columnAlignment[i] {\n\t\t\tcase \"left\", \"l\":\n\t\t\t\tseparator = mdLeftJustify\n\t\t\tcase \"center\", \"c\":\n\t\t\t\tseparator = mdCentered\n\t\t\tcase \"right\", \"r\":\n\t\t\t\tseparator = mdRightJustify\n\t\t\tdefault:\n\t\t\t\tseparator = mdDontJustify\n\t\t\t}\n\t\t} else {\n\t\t\tseparator = mdDontJustify\n\t\t}\n\n\t\tseparator = append(separator, mdPipe...)\n\t\n\t\tc.md = append(c.md, separator...)\n\t}\n\n\treturn\n\t\t\t\n}\n\n\/\/ appendColumnSeparator appends a pip to the md array\nfunc (c *CSV) appendColumnSeparator() {\n\tc.md = append(c.md, mdPipe...)\n}\n\n\/\/ FormatFromFile loads the format file specified. \nfunc (c *CSV) formatFromFile() error {\n\t\/\/ not really considering this an error that stops things, just one\n\t\/\/ that requirs error level logging. Is this right?\n\tif c.formatType != \"file\" {\n\t\tlogger.Error(\"formatFromFile: nothing to do, formatType was %s, expected file\", c.formatType)\n\t\treturn nil\n\t}\n\n\t\/\/ if formatSource isn't set, nothing todo\n\tif c.formatSource == \"\" {\n\t\tlogger.Error(\"formatFromFile: nothing to do, formatSource was not set\", c.formatType)\n\t\treturn nil\n\t}\n\n\t\/\/ Read from the format file\n\ttable, err := ReadCSVFile(c.formatSource)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn err\n\t}\n\t\n\t\/\/Row 0 is the header information\n\tc.headerRow = table[0]\n\tc.columnAlignment = table[1]\n\tc.columnEmphasis = table[2]\n\n\treturn nil\n}\n\n\/\/ Source returns the source of the CSV\nfunc (c *CSV) Source() string {\n\treturn c.source\n}\n\n\/\/ SetSource sets the source and has the formatFile updated, if applicable.\nfunc (c *CSV) SetSource(s string) {\n\tc.source = s\n\tc.autosetFormatFile() \n}\n\n\/\/ Destination is the of destination for the output, if applicable.\nfunc (c *CSV) Destination() string {\n\treturn c.destination\n}\n\n\/\/ SetDestination sets the destination of the output, if applicable.\nfunc (c *CSV) SetDestination(s string) {\n\tc.destination = s\n}\n\n\/\/ DestinationType is the type of destination for the output.\nfunc (c *CSV) DestinationType() string {\n\treturn c.destinationType\n}\n\n\/\/ SetDestinationType sets the destinationType.\nfunc (c *CSV) SetDestinationType(s string) {\n\tc.destinationType = s\n}\n\n\/\/ HasHeaderRow returns whether, or not, this csv file has a format file to\n\/\/ use.\nfunc (c *CSV) HasHeaderRow() bool {\n\treturn c.hasHeaderRow\n}\n\n\/\/ SetHasHeaderRow sets whether, or not, the source has a header row.\nfunc (c *CSV) SetHasHeaderRow(b bool) {\n\tc.hasHeaderRow = b\n}\n\n\/\/ HeaderRow returns the column headers; i.e., the header row.\nfunc (c *CSV) HeaderRow() []string {\n\treturn c.headerRow\n}\n\n\/\/ SetHeaderRow sets the headerRow information.\nfunc (c *CSV) SetHeaderRow(s []string) {\n\tc.headerRow = s\n}\n\n\/\/ ColumnAlignment returns the columnAlignment information. This can be set\n\/\/ either explicitely or using a format file.\nfunc (c *CSV) ColumnAlignment() []string {\n\treturn c.columnAlignment\n}\n\n\/\/ SetColumnAlignment sets the columnAlignment informatin.\nfunc (c *CSV) SetColumnAlignment(s []string) {\n\tc.columnAlignment = s\n}\n\n\/\/ ColumnEmphasis returns the columnEmphasis information. This can be set\n\/\/ either explicitly or with a format file.\nfunc (c *CSV) ColumnEmphasis() []string {\n\treturn c.columnEmphasis\n}\n\n\/\/ SetColumnEmphasis sets columnEmphasis information.\nfunc (c *CSV) SetColumnEmphasis(s []string) {\n\tc.columnEmphasis = s\n}\n\n\/\/ FormatSource returns the formatSource information.\nfunc (c *CSV) FormatSource() string {\n\treturn c.formatSource\n}\n\n\/\/ SetFormatSource sets formatSource information. A side-affect of this is that\n\/\/ setting the format file will automatically set `useFormat` and\n\/\/ `useFormatFile`.\nfunc (c *CSV) SetFormatSource(s string) {\n\tc.formatSource = s\n}\n\n\/\/ UseFormat returns whether this csv file has a format file to use.\nfunc (c *CSV) UseFormat() bool {\n\treturn c.useFormat\n}\n\n\/\/ SetUseFormat sets whether a format should be used. This triggers a setting\n\/\/ of the FormatFilename, if applicable.\nfunc (c *CSV) SetUseFormat(b bool) {\n\tc.useFormat = b\n\tc.autosetFormatFile()\n}\n<commit_msg>changed parm ordering and naming of marshal method on csv<commit_after>\/\/ Copyright © 2014, All rights reserved\n\/\/ Joel Scoble, https:\/\/github.com\/mohae\/tomd\n\/\/\n\/\/ This is licensed under The MIT License. Please refer to the included\n\/\/ LICENSE file for more information. If the LICENSE file has not been\n\/\/ included, please refer to the url above.\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License\n\/\/\n\/\/ tomd: to markdown, takes input and converts it to markdown\n\/\/\n\/\/ Notes: \n\/\/\t* This is not a general markdown processor. It is a package to provide\n\/\/ functions that allow things to be converted to their representation\n\/\/ in markdown.\n\/\/ Currently that means taking a .csv file and converting it to a table.\n\/\/\t* Uses seelog for 'library logging', to enable logging see:\n\/\/ http:\/\/github.com\/cihub\/seelog\/wiki\/Writing-libraries-with-Seelog\npackage tomd\n\nimport (\n\t\"encoding\/csv\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t_ \"strconv\"\n\t\"strings\"\n)\n\n\/\/ CSV is a struct for representing and working with csv data.\ntype CSV struct {\n\t\/\/ Source is the source of the CSV data. It is currently assumed to be\n\t\/\/ a path location\n\tsource string\n\n\t\/\/ destination is where the generated markdown should be put, if it is\n\t\/\/ to be put anywhere. When used, this setting is used in conjunction \n\t\/\/ with destinationType. Not all destinationTypes need to specify a\n\t\/\/ destinatin, bytes, for example. \n\tdestination string\n\n\t\/\/ destinationType is the type of destination for the md, e.g. file.\n\t\/\/ If the destinationType requires specification of the destination,\n\t\/\/ the Destination variable should be set to that value.\n\t\/\/ Supported:\n\t\/\/\t[]byte\tno destination needed\n\t\/\/\tfile\tdestination optional, if not set the output will be\n\t\/\/\t\t`sourceFilename.md` instead of `sourceFilename.csv`.\n\tdestinationType string\n\n\t\/\/ hasHeaderRows: whether the csv data includes a header row as its\n\t\/\/ first row. If the csv data does not include header data, the header\n\t\/\/ data must be provided via template, e.g. false implies \n\t\/\/ 'useFormat' == true. True does not have any implications on using\n\t\/\/ the format file.\n\thasHeaderRow bool\n\n\t\/\/ headerRow contains the header row information. This is when a format\n\t\/\/ has been supplied, the header row information is set.\n\theaderRow []string\n\t\n\t\/\/ columnAlignment contains the alignment information for each column\n\t\/\/ in the table. This is supplied by the format\n\tcolumnAlignment []string\n\n\t\/\/ columnEmphasis contains the emphasis information, if any. for each\n\t\/\/ column. This is supplied by the format.\n\tcolumnEmphasis []string\n\n\t\/\/ formatSource: the location and name of the source file to use. It\n\t\/\/ can either be explicitely set, or TOMD will look for it as\n\t\/\/ `source.fmt`, for `source.csv`.\n\tformatSource string\n\n\t\/\/ whether for formatSource was autoset or not.\n\tformatSourceAutoset bool\n\n\t\/\/ useFormat: whether there's a format to use with the CSV or not. For\n\t\/\/ files, this is usually a file, with the same name and path as the\n\t\/\/ source, using the 'fmt' extension. This can also be set explicitely.\n\t\/\/ 'useFormat' == false implies 'hasHeaderRow' == true.\n\tuseFormat bool\n\n\t\/\/ formatType:\tthe type of format to use. By default, this is in sync\n\t\/\/\t\twith the source type, but it can be set independently.\n\t\/\/ Supported:\n\t\/\/\tfile\tThe format information is in a format file. By default,\n\t\/\/\t\tthis is the source filename with the `.fmt` file\n\t\/\/\t\textension, instead of the original extension. This can\n\t\/\/\t\tbe set independently too.\n\t\/\/\tdefault Any setting other than another supported type will be\n\t\/\/\t\tinterpreted as using the default, which is to manually\n\t\/\/\t\tset the different format information you wish to use\n\t\/\/\t\tin the marshal using their Setters.\n\tformatType string\n\n\t\/\/ table is the parsed csv data\n\ttable [][]string\n\n\t\/\/ md holds the md representation of the csv data\n\tmd []byte\n}\n\n\/\/ NewCSV returns an initialize CSV object. It still needs to be configured\n\/\/ for use.\nfunc NewCSV() *CSV {\n\tC := &CSV{\n\t\thasHeaderRow: true,\n\t\tdestinationType: \"bytes\",\n\t\ttable: [][]string{},\n\t}\n\treturn C\n}\n\n\/\/ NewSourceCSV creates a new *CSV with its source set and initialized.\nfunc NewSourcesCSV(t, s string, b bool) *CSV {\n\tc := NewCSV()\n\tc.useFormat = b\n\n\t\/\/ currently anything that's not file uses the default \"\", which\n\t\/\/ means set it yourself to use it.\n\tswitch t {\n\tcase \"bytes\":\n\tcase \"file\":\n\t\tc.formatType = \"file\"\n\t}\n\n\tc.SetSource(s)\n\treturn c\n}\n\n\/\/ MDTable takes a reader for csv and converts the read csv to a markdown\n\/\/ table. To get the md, call CSV.md()\nfunc (c *CSV) ToMDTable(r io.Reader) error {\n\tvar err error\n\tc.table, err = ReadCSV(r)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn err\n\t}\n\n\t\/\/Now convert the data to md\n\tc.toMD()\n\treturn nil\n}\n\n\/\/ MarshalTable marshals the CSV info to a markdown table.\nfunc (c *CSV) MarshalTable() error{\n\tlogger.Debugf(\"MarshalTable enter, source: %s\", c.source)\n\tvar err error\n\t\/\/ Try to read the source\n\tc.table, err = ReadCSVFile(c.source)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn err\n\t}\n\t\t\n\tvar formatName string\n\t\/\/ otherwise see if HasFormat\n\tif c.useFormat {\n\/\/\t\tc.setFormatFile()\n\t\tif c.formatType == \"file\" {\n\t\t\t\/\/derive the format filename\n\t\t\tfilename := filepath.Base(c.source)\n\t\t\tif filename == \".\" {\n\t\t\t\terr = fmt.Errorf(\"unable to determine format filename\")\n\t\t\t\tlogger.Error(err)\n\t\t\t\treturn err\n\t\t\t}\n\t\n\t\t\tdir := filepath.Dir(c.source)\n\t\t\tparts := strings.Split(filename, \".\")\n\t\t\tformatName = parts[0] + \".fmt\"\n\t\t\tif dir != \".\" {\n\t\t\t\tformatName = dir + formatName\n\t\t\t}\n\t\t}\n\t}\n\t\n\tif c.useFormat {\n\t\terr := c.formatFromFile()\n\t\tif err != nil {\n\t\t\tlogger.Error(err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Now convert the data to md\n\tc.toMD()\n\n\tlogger.Debug(\"FileToMDTable exit with error: nil\")\n\treturn nil\n}\n\n\/\/ MD() returns the markdown as []byte\nfunc (c *CSV) MD() []byte {\n\treturn c.md\n}\n\n\/\/ ReadCSV takes a reader, and reads the data connected with it as CSV data.\n\/\/ A slice of slice of type string, or an error, are returned. This reads the\n\/\/ entire file, so if the file is very large and you don't have sufficent RAM\n\/\/ you will not like the results. There may be a row or chunk oriented\n\/\/ implementation in the future.\nfunc ReadCSV(r io.Reader ) ([][]string, error) {\n\tcr := csv.NewReader(r)\n\trows, err := cr.ReadAll()\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn nil, err\n\t}\n\n\treturn rows, nil\n}\n\n\/\/ ReadCSVFile takes a path, reads the contents of the file and returns int.\nfunc ReadCSVFile(f string) ([][]string, error) {\n\tfile, err := os.Open(f)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn nil, err\n\t}\n\t\n\t\/\/ because we don't want to forget or worry about hanldling close prior\n\t\/\/ to every return.\n\tdefer file.Close()\n\t\n\t\/\/\n\tdata, err := ReadCSV(file)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn nil, err\n\t}\n\n\treturn data, nil\n}\n\n\/\/ tomd does table header processing then converts its table data to md,\nfunc (c *CSV) toMD() () {\n\t\/\/ Process the header first\n\tc.addHeader()\n\n\t\/\/ for each row of table data, process it.\n\tfor _, row := range c.table {\n\t\tc.rowToMD(row)\n\t}\n\t\n\treturn\n}\n\n\/\/ rowTomd takes a csv table row and returns the md version of it consistent\n\/\/ with its configuration.\nfunc (c *CSV) rowToMD(cols []string) {\n\tc.appendColumnSeparator()\n\n\tfor _, col := range cols {\n\t\t\/\/ TODO this is where column data decoration would occur\n\t\t\/\/ with templates\n\t\tbcol := []byte(col)\n\t\tc.md = append(c.md, bcol...)\n\t\tc.appendColumnSeparator()\n\t}\n\n}\n\n\/\/ setFormatSource sets the formatSource if it is not already set or if the \n\/\/ previously set value was set by setFormatSource. The latter allows auto-\n\/\/ generated default source name to be updated when the source is while \n\/\/ preserving overrides.\nfunc (c *CSV) autosetFormatFile() error {\n\t\/\/ if the source isn't set, nothing to do.\n\tif c.source == \"\" {\n\t\tlogger.Trace(\"setFormatSource exit: source not set\")\n\t\treturn nil\n\t}\n\n\t\/\/ if formatSource isn't empty and wasn't set by setFormatSource,\n\t\/\/ nothing to do\n\tif c.formatSource != \"\" && !c.formatSourceAutoset {\n\t\tlogger.Infof(\"setFormatSource exit: formatSource was already set to %s\", c.formatSource)\n\t\treturn nil\n\t}\n\n\tif c.formatType != \"file\" {\n\t\tlogger.Trace(\"setFormatSource exit: not using format file, format type is %s\", c.formatType)\n\t\treturn nil\n\t}\n\n\t\/\/ Figure out the filename\n\tdir, file := filepath.Split(c.source)\n\t\n\t\/\/ break up the filename into its part, the last is extension.\n\tvar fname string\n\tfParts := strings.Split(file, \".\")\n\n\tif len(fParts) <= 2 {\n\t\tfname = fParts[0]\n\t} else {\n\t\t\/\/ Join all but the last part together for the name\n\t\t\/\/ This handles names with multiple `.`\n\t\tfname = strings.Join(fParts[0:len(fParts) - 2], \".\")\n\t}\n\n\tfname += \".md\"\n\tc.formatSource = dir + fname\t\n\tc.formatSourceAutoset = true\n\treturn nil\n}\n\n\/\/ addHeader adds the table header row and the separator row that goes between\n\/\/ the header row and the data.\nfunc (c *CSV) addHeader() () {\n\tif c.hasHeaderRow {\n\t\tc.rowToMD(c.table[0])\n\t\t\/\/remove the first row\n\t\tc.table = append(c.table[1:])\n\t} else {\n\t\tif c.useFormat {\n\t\t\tc.rowToMD(c.headerRow)\n\t\t}\n\t}\n\n\tc.appendHeaderSeparatorRow(len(c.table[0]))\n\treturn\n}\n\n\/\/ appendHeaderSeparator adds the configured column separator\nfunc (c *CSV) appendHeaderSeparatorRow(cols int) {\n\tc.appendColumnSeparator()\n\n\tfor i := 0; i < cols; i++ {\n\t\tvar separator []byte\t\n\n\t\tif c.useFormat {\n\t\t\tswitch c.columnAlignment[i] {\n\t\t\tcase \"left\", \"l\":\n\t\t\t\tseparator = mdLeftJustify\n\t\t\tcase \"center\", \"c\":\n\t\t\t\tseparator = mdCentered\n\t\t\tcase \"right\", \"r\":\n\t\t\t\tseparator = mdRightJustify\n\t\t\tdefault:\n\t\t\t\tseparator = mdDontJustify\n\t\t\t}\n\t\t} else {\n\t\t\tseparator = mdDontJustify\n\t\t}\n\n\t\tseparator = append(separator, mdPipe...)\n\t\n\t\tc.md = append(c.md, separator...)\n\t}\n\n\treturn\n\t\t\t\n}\n\n\/\/ appendColumnSeparator appends a pip to the md array\nfunc (c *CSV) appendColumnSeparator() {\n\tc.md = append(c.md, mdPipe...)\n}\n\n\/\/ FormatFromFile loads the format file specified. \nfunc (c *CSV) formatFromFile() error {\n\t\/\/ not really considering this an error that stops things, just one\n\t\/\/ that requirs error level logging. Is this right?\n\tif c.formatType != \"file\" {\n\t\tlogger.Error(\"formatFromFile: nothing to do, formatType was %s, expected file\", c.formatType)\n\t\treturn nil\n\t}\n\n\t\/\/ if formatSource isn't set, nothing todo\n\tif c.formatSource == \"\" {\n\t\tlogger.Error(\"formatFromFile: nothing to do, formatSource was not set\", c.formatType)\n\t\treturn nil\n\t}\n\n\t\/\/ Read from the format file\n\ttable, err := ReadCSVFile(c.formatSource)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn err\n\t}\n\t\n\t\/\/Row 0 is the header information\n\tc.headerRow = table[0]\n\tc.columnAlignment = table[1]\n\tc.columnEmphasis = table[2]\n\n\treturn nil\n}\n\n\/\/ Source returns the source of the CSV\nfunc (c *CSV) Source() string {\n\treturn c.source\n}\n\n\/\/ SetSource sets the source and has the formatFile updated, if applicable.\nfunc (c *CSV) SetSource(s string) {\n\tc.source = s\n\tc.autosetFormatFile() \n}\n\n\/\/ Destination is the of destination for the output, if applicable.\nfunc (c *CSV) Destination() string {\n\treturn c.destination\n}\n\n\/\/ SetDestination sets the destination of the output, if applicable.\nfunc (c *CSV) SetDestination(s string) {\n\tc.destination = s\n}\n\n\/\/ DestinationType is the type of destination for the output.\nfunc (c *CSV) DestinationType() string {\n\treturn c.destinationType\n}\n\n\/\/ SetDestinationType sets the destinationType.\nfunc (c *CSV) SetDestinationType(s string) {\n\tc.destinationType = s\n}\n\n\/\/ HasHeaderRow returns whether, or not, this csv file has a format file to\n\/\/ use.\nfunc (c *CSV) HasHeaderRow() bool {\n\treturn c.hasHeaderRow\n}\n\n\/\/ SetHasHeaderRow sets whether, or not, the source has a header row.\nfunc (c *CSV) SetHasHeaderRow(b bool) {\n\tc.hasHeaderRow = b\n}\n\n\/\/ HeaderRow returns the column headers; i.e., the header row.\nfunc (c *CSV) HeaderRow() []string {\n\treturn c.headerRow\n}\n\n\/\/ SetHeaderRow sets the headerRow information.\nfunc (c *CSV) SetHeaderRow(s []string) {\n\tc.headerRow = s\n}\n\n\/\/ ColumnAlignment returns the columnAlignment information. This can be set\n\/\/ either explicitely or using a format file.\nfunc (c *CSV) ColumnAlignment() []string {\n\treturn c.columnAlignment\n}\n\n\/\/ SetColumnAlignment sets the columnAlignment informatin.\nfunc (c *CSV) SetColumnAlignment(s []string) {\n\tc.columnAlignment = s\n}\n\n\/\/ ColumnEmphasis returns the columnEmphasis information. This can be set\n\/\/ either explicitly or with a format file.\nfunc (c *CSV) ColumnEmphasis() []string {\n\treturn c.columnEmphasis\n}\n\n\/\/ SetColumnEmphasis sets columnEmphasis information.\nfunc (c *CSV) SetColumnEmphasis(s []string) {\n\tc.columnEmphasis = s\n}\n\n\/\/ FormatSource returns the formatSource information.\nfunc (c *CSV) FormatSource() string {\n\treturn c.formatSource\n}\n\n\/\/ SetFormatSource sets formatSource information. A side-affect of this is that\n\/\/ setting the format file will automatically set `useFormat` and\n\/\/ `useFormatFile`.\nfunc (c *CSV) SetFormatSource(s string) {\n\tc.formatSource = s\n}\n\n\/\/ UseFormat returns whether this csv file has a format file to use.\nfunc (c *CSV) UseFormat() bool {\n\treturn c.useFormat\n}\n\n\/\/ SetUseFormat sets whether a format should be used. This triggers a setting\n\/\/ of the FormatFilename, if applicable.\nfunc (c *CSV) SetUseFormat(b bool) {\n\tc.useFormat = b\n\tc.autosetFormatFile()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Jens Rantil. All rights reserved. Use of this source code is\n\/\/ governed by a BSD-style license that can be found in the LICENSE file.\n\n\/\/ A CSV implementation inspired by Python's CSV module. Supports custom CSV\n\/\/ formats. Currently only writing CSV files is supported.\npackage csv\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"strings\"\n)\n\n\/\/ A helper interface for a general CSV writer. Adheres to encoding\/csv Writer\n\/\/ in the standard go library as well as the Writer implemented by this\n\/\/ package.\ntype CsvWriter interface {\n\t\/\/ Currently no errors are possible.\n\tError() error\n\n\t\/\/ Flush writes any buffered data to the underlying io.Writer.\n\t\/\/ To check if an error occurred during the Flush, call Error.\n\tFlush()\n\n\t\/\/ Writer writes a single CSV record to w along with any necessary quoting.\n\t\/\/ A record is a slice of strings with each string being one field.\n\tWrite(record []string) error\n\n\t\/\/ WriteAll writes multiple CSV records to w using Write and then calls Flush.\n\tWriteAll(records [][]string) error\n}\n\n\/\/ Values Dialect.Quoting can take.\nconst (\n\tQuoteDefault = iota \/\/ See DefaultQuoting.\n\tQuoteAll = iota\n\tQuoteMinimal = iota\n\tQuoteNonNumeric = iota\n\tQuoteNone = iota\n)\n\n\/\/ Values Dialect.DoubleQuote can take.\nconst (\n\tDoubleQuoteDefault = iota \/\/ See DefaultDoubleQuote.\n\tDoDoubleQuote = iota\n\tNoDoubleQuote = iota\n)\n\n\/\/ Default dialect.\nconst (\n\tDefaultDelimiter = \" \"\n\tDefaultQuoting = QuoteMinimal\n\tDefaultDoubleQuote = DoDoubleQuote\n\tDefaultEscapeChar = '\\\\'\n\tDefaultQuoteChar = '\"'\n\tDefaultLineTerminator = \"\\n\"\n)\n\ntype Dialect struct {\n\tDelimiter string\n\tQuoting int\n\tDoubleQuote int\n\tEscapeChar rune\n\tQuoteChar rune\n\tLineTerminator string\n}\n\nfunc (wo *Dialect) setDefaults() {\n\tif wo.Delimiter == \"\" {\n\t\two.Delimiter = DefaultDelimiter\n\t}\n\tif wo.Quoting == QuoteDefault {\n\t\two.Quoting = DefaultQuoting\n\t}\n\tif wo.LineTerminator == \"\" {\n\t\two.LineTerminator = DefaultLineTerminator\n\t}\n\tif wo.DoubleQuote == DoubleQuoteDefault {\n\t\two.DoubleQuote = DefaultDoubleQuote\n\t}\n\tif wo.QuoteChar == 0 {\n\t\two.QuoteChar = DefaultQuoteChar\n\t}\n\tif wo.EscapeChar == 0 {\n\t\two.EscapeChar = DefaultEscapeChar\n\t}\n}\n\ntype Writer struct {\n\topts Dialect\n\tw *bufio.Writer\n}\n\n\/\/ Create a writer that adheres to the Golang CSV writer.\nfunc NewWriter(w io.Writer) Writer {\n\topts := Dialect{}\n\topts.setDefaults()\n\treturn Writer{\n\t\topts: opts,\n\t\tw: bufio.NewWriter(w),\n\t}\n}\n\n\/\/ Create a custom CSV writer.\nfunc NewDialectWriter(w io.Writer, opts Dialect) Writer {\n\topts.setDefaults()\n\treturn Writer{\n\t\topts: opts,\n\t\tw: bufio.NewWriter(w),\n\t}\n}\n\n\/\/ Error reports any error that has occurred during a previous Write or Flush.\nfunc (w Writer) Error() error {\n\t_, err := w.w.Write(nil)\n\treturn err\n}\n\n\/\/ Flush writes any buffered data to the underlying io.Writer.\n\/\/ To check if an error occurred during the Flush, call Error.\nfunc (w Writer) Flush() {\n\tw.w.Flush()\n}\n\n\/\/ Helper function that ditches the first return value of w.w.WriteString().\n\/\/ Simplifies code.\nfunc (w Writer) writeString(s string) error {\n\t_, err := w.w.WriteString(s)\n\treturn err\n}\n\nfunc (w Writer) writeDelimiter() error {\n\treturn w.writeString(w.opts.Delimiter)\n}\n\nfunc isDigit(s rune) bool {\n\treturn s >= '0' && s <= '9'\n}\n\nfunc isNumeric(s string) bool {\n\tif len(s) == 0 {\n\t\treturn false\n\t}\n\tfor _, r := range s {\n\t\tif !isDigit(r) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (w Writer) fieldNeedsQuote(field string) bool {\n\tswitch w.opts.Quoting {\n\tcase QuoteNone:\n\t\treturn false\n\tcase QuoteAll:\n\t\treturn true\n\tcase QuoteNonNumeric:\n\t\treturn !isNumeric(field)\n\tcase QuoteMinimal:\n\t\t\/\/ TODO: Can be improved by making a single search with trie.\n\t\t\/\/ See https:\/\/docs.python.org\/2\/library\/csv.html#csv.QUOTE_MINIMAL for info on this.\n\t\treturn strings.Contains(field, w.opts.LineTerminator) || strings.Contains(field, w.opts.Delimiter) || strings.ContainsRune(field, w.opts.QuoteChar)\n\tdefault:\n\t\tpanic(\"Unexpected quoting.\")\n\t}\n}\n\nfunc (w Writer) writeRune(r rune) error {\n\t_, err := w.w.WriteRune(r)\n\treturn err\n}\n\nfunc (w Writer) writeEscapeChar(r rune) error {\n\tswitch w.opts.DoubleQuote {\n\tcase DoDoubleQuote:\n\t\treturn w.writeRune(r)\n\tcase NoDoubleQuote:\n\t\treturn w.writeRune(w.opts.EscapeChar)\n\tdefault:\n\t\tpanic(\"Unrecognized double quote type.\")\n\t}\n}\n\nfunc (w Writer) writeQuotedRune(r rune) error {\n\tswitch r {\n\tcase w.opts.EscapeChar:\n\t\tif err := w.writeEscapeChar(r); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase w.opts.QuoteChar:\n\t\tif err := w.writeEscapeChar(r); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn w.writeRune(r)\n}\n\nfunc (w Writer) writeQuoted(field string) error {\n\tif err := w.writeRune(w.opts.QuoteChar); err != nil {\n\t\treturn err\n\t}\n\tfor _, r := range field {\n\t\tif err := w.writeQuotedRune(r); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn w.writeRune(w.opts.QuoteChar)\n}\n\nfunc (w Writer) writeField(field string) error {\n\tif w.fieldNeedsQuote(field) {\n\t\treturn w.writeQuoted(field)\n\t} else {\n\t\treturn w.writeString(field)\n\t}\n}\n\nfunc (w Writer) writeNewline() error {\n\treturn w.writeString(w.opts.LineTerminator)\n}\n\n\/\/ Writer writes a single CSV record to w along with any necessary quoting.\n\/\/ A record is a slice of strings with each string being one field.\nfunc (w Writer) Write(record []string) (err error) {\n\tfor n, field := range record {\n\t\tif n > 0 {\n\t\t\tif err = w.writeDelimiter(); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err = w.writeField(field); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\terr = w.writeNewline()\n\treturn\n}\n\n\/\/ WriteAll writes multiple CSV records to w using Write and then calls Flush.\nfunc (w Writer) WriteAll(records [][]string) (err error) {\n\tfor _, record := range records {\n\t\tif err := w.Write(record); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn w.w.Flush()\n}\n<commit_msg>chore: reuse IsDigit logic from unicode<commit_after>\/\/ Copyright 2014 Jens Rantil. All rights reserved. Use of this source code is\n\/\/ governed by a BSD-style license that can be found in the LICENSE file.\n\n\/\/ A CSV implementation inspired by Python's CSV module. Supports custom CSV\n\/\/ formats. Currently only writing CSV files is supported.\npackage csv\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"strings\"\n\t\"unicode\"\n)\n\n\/\/ A helper interface for a general CSV writer. Adheres to encoding\/csv Writer\n\/\/ in the standard go library as well as the Writer implemented by this\n\/\/ package.\ntype CsvWriter interface {\n\t\/\/ Currently no errors are possible.\n\tError() error\n\n\t\/\/ Flush writes any buffered data to the underlying io.Writer.\n\t\/\/ To check if an error occurred during the Flush, call Error.\n\tFlush()\n\n\t\/\/ Writer writes a single CSV record to w along with any necessary quoting.\n\t\/\/ A record is a slice of strings with each string being one field.\n\tWrite(record []string) error\n\n\t\/\/ WriteAll writes multiple CSV records to w using Write and then calls Flush.\n\tWriteAll(records [][]string) error\n}\n\n\/\/ Values Dialect.Quoting can take.\nconst (\n\tQuoteDefault = iota \/\/ See DefaultQuoting.\n\tQuoteAll = iota\n\tQuoteMinimal = iota\n\tQuoteNonNumeric = iota\n\tQuoteNone = iota\n)\n\n\/\/ Values Dialect.DoubleQuote can take.\nconst (\n\tDoubleQuoteDefault = iota \/\/ See DefaultDoubleQuote.\n\tDoDoubleQuote = iota\n\tNoDoubleQuote = iota\n)\n\n\/\/ Default dialect.\nconst (\n\tDefaultDelimiter = \" \"\n\tDefaultQuoting = QuoteMinimal\n\tDefaultDoubleQuote = DoDoubleQuote\n\tDefaultEscapeChar = '\\\\'\n\tDefaultQuoteChar = '\"'\n\tDefaultLineTerminator = \"\\n\"\n)\n\ntype Dialect struct {\n\tDelimiter string\n\tQuoting int\n\tDoubleQuote int\n\tEscapeChar rune\n\tQuoteChar rune\n\tLineTerminator string\n}\n\nfunc (wo *Dialect) setDefaults() {\n\tif wo.Delimiter == \"\" {\n\t\two.Delimiter = DefaultDelimiter\n\t}\n\tif wo.Quoting == QuoteDefault {\n\t\two.Quoting = DefaultQuoting\n\t}\n\tif wo.LineTerminator == \"\" {\n\t\two.LineTerminator = DefaultLineTerminator\n\t}\n\tif wo.DoubleQuote == DoubleQuoteDefault {\n\t\two.DoubleQuote = DefaultDoubleQuote\n\t}\n\tif wo.QuoteChar == 0 {\n\t\two.QuoteChar = DefaultQuoteChar\n\t}\n\tif wo.EscapeChar == 0 {\n\t\two.EscapeChar = DefaultEscapeChar\n\t}\n}\n\ntype Writer struct {\n\topts Dialect\n\tw *bufio.Writer\n}\n\n\/\/ Create a writer that adheres to the Golang CSV writer.\nfunc NewWriter(w io.Writer) Writer {\n\topts := Dialect{}\n\topts.setDefaults()\n\treturn Writer{\n\t\topts: opts,\n\t\tw: bufio.NewWriter(w),\n\t}\n}\n\n\/\/ Create a custom CSV writer.\nfunc NewDialectWriter(w io.Writer, opts Dialect) Writer {\n\topts.setDefaults()\n\treturn Writer{\n\t\topts: opts,\n\t\tw: bufio.NewWriter(w),\n\t}\n}\n\n\/\/ Error reports any error that has occurred during a previous Write or Flush.\nfunc (w Writer) Error() error {\n\t_, err := w.w.Write(nil)\n\treturn err\n}\n\n\/\/ Flush writes any buffered data to the underlying io.Writer.\n\/\/ To check if an error occurred during the Flush, call Error.\nfunc (w Writer) Flush() {\n\tw.w.Flush()\n}\n\n\/\/ Helper function that ditches the first return value of w.w.WriteString().\n\/\/ Simplifies code.\nfunc (w Writer) writeString(s string) error {\n\t_, err := w.w.WriteString(s)\n\treturn err\n}\n\nfunc (w Writer) writeDelimiter() error {\n\treturn w.writeString(w.opts.Delimiter)\n}\n\nfunc isNumeric(s string) bool {\n\tif len(s) == 0 {\n\t\treturn false\n\t}\n\tfor _, r := range s {\n\t\tif !unicode.IsDigit(r) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (w Writer) fieldNeedsQuote(field string) bool {\n\tswitch w.opts.Quoting {\n\tcase QuoteNone:\n\t\treturn false\n\tcase QuoteAll:\n\t\treturn true\n\tcase QuoteNonNumeric:\n\t\treturn !isNumeric(field)\n\tcase QuoteMinimal:\n\t\t\/\/ TODO: Can be improved by making a single search with trie.\n\t\t\/\/ See https:\/\/docs.python.org\/2\/library\/csv.html#csv.QUOTE_MINIMAL for info on this.\n\t\treturn strings.Contains(field, w.opts.LineTerminator) || strings.Contains(field, w.opts.Delimiter) || strings.ContainsRune(field, w.opts.QuoteChar)\n\tdefault:\n\t\tpanic(\"Unexpected quoting.\")\n\t}\n}\n\nfunc (w Writer) writeRune(r rune) error {\n\t_, err := w.w.WriteRune(r)\n\treturn err\n}\n\nfunc (w Writer) writeEscapeChar(r rune) error {\n\tswitch w.opts.DoubleQuote {\n\tcase DoDoubleQuote:\n\t\treturn w.writeRune(r)\n\tcase NoDoubleQuote:\n\t\treturn w.writeRune(w.opts.EscapeChar)\n\tdefault:\n\t\tpanic(\"Unrecognized double quote type.\")\n\t}\n}\n\nfunc (w Writer) writeQuotedRune(r rune) error {\n\tswitch r {\n\tcase w.opts.EscapeChar:\n\t\tif err := w.writeEscapeChar(r); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase w.opts.QuoteChar:\n\t\tif err := w.writeEscapeChar(r); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn w.writeRune(r)\n}\n\nfunc (w Writer) writeQuoted(field string) error {\n\tif err := w.writeRune(w.opts.QuoteChar); err != nil {\n\t\treturn err\n\t}\n\tfor _, r := range field {\n\t\tif err := w.writeQuotedRune(r); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn w.writeRune(w.opts.QuoteChar)\n}\n\nfunc (w Writer) writeField(field string) error {\n\tif w.fieldNeedsQuote(field) {\n\t\treturn w.writeQuoted(field)\n\t} else {\n\t\treturn w.writeString(field)\n\t}\n}\n\nfunc (w Writer) writeNewline() error {\n\treturn w.writeString(w.opts.LineTerminator)\n}\n\n\/\/ Writer writes a single CSV record to w along with any necessary quoting.\n\/\/ A record is a slice of strings with each string being one field.\nfunc (w Writer) Write(record []string) (err error) {\n\tfor n, field := range record {\n\t\tif n > 0 {\n\t\t\tif err = w.writeDelimiter(); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif err = w.writeField(field); err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\terr = w.writeNewline()\n\treturn\n}\n\n\/\/ WriteAll writes multiple CSV records to w using Write and then calls Flush.\nfunc (w Writer) WriteAll(records [][]string) (err error) {\n\tfor _, record := range records {\n\t\tif err := w.Write(record); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn w.w.Flush()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage tensorflow\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"image\"\n\t_ \"image\/jpeg\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n)\n\nfunc Example() {\n\t\/\/ An example for using the TensorFlow Go API for image recognition\n\t\/\/ using a pre-trained inception model (http:\/\/arxiv.org\/abs\/1512.00567).\n\t\/\/\n\t\/\/ The pre-trained model takes input in the form of a 4-dimensional\n\t\/\/ tensor with shape [ BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, 3 ],\n\t\/\/ where:\n\t\/\/ - BATCH_SIZE allows for inference of multiple images in one pass through the graph\n\t\/\/ - IMAGE_HEIGHT is the height of the images on which the model was trained\n\t\/\/ - IMAGE_WIDTH is the width of the images on which the model was trained\n\t\/\/ - 3 is the (R, G, B) values of the pixel colors represented as a float.\n\t\/\/\n\t\/\/ And produces as output a vector with shape [ NUM_LABELS ].\n\t\/\/ output[i] is the probability that the input image was recognized as\n\t\/\/ having the i-th label.\n\t\/\/\n\t\/\/ A separate file contains a list of string labels corresponding to the\n\t\/\/ integer indices of the output.\n\t\/\/\n\t\/\/ This example:\n\t\/\/ - Loads the serialized representation of the pre-trained model into a Graph\n\t\/\/ - Creates a Session to execute operations on the Graph\n\t\/\/ - Converts an image file to a Tensor to provide as input for Graph execution\n\t\/\/ - Exectues the graph and prints out the label with the highest probability\n\tconst (\n\t\t\/\/ Path to a pre-trained inception model.\n\t\t\/\/ The two files are extracted from a zip archive as so:\n\t\t\/*\n\t\t curl -L https:\/\/storage.googleapis.com\/download.tensorflow.org\/models\/inception5h.zip -o \/tmp\/inception5h.zip\n\t\t unzip \/tmp\/inception5h.zip -d \/tmp\n\t\t*\/\n\t\tmodelFile = \"\/tmp\/tensorflow_inception_graph.pb\"\n\t\tlabelsFile = \"\/tmp\/imagenet_comp_graph_label_strings.txt\"\n\n\t\t\/\/ Image file to \"recognize\".\n\t\ttestImageFilename = \"\/tmp\/test.jpg\"\n\t)\n\n\t\/\/ Load the serialized GraphDef from a file.\n\tmodel, err := ioutil.ReadFile(modelFile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Construct an in-memory graph from the serialized form.\n\tgraph := NewGraph()\n\tif err := graph.Import(model, \"\"); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Create a session for inference over graph.\n\tsession, err := NewSession(graph, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer session.Close()\n\n\t\/\/ Run inference on testImageFilename.\n\t\/\/ For multiple images, session.Run() can be called in a loop (and\n\t\/\/ concurrently). Furthermore, images can be batched together since the\n\t\/\/ model accepts batches of image data as input.\n\ttensor, err := makeTensorFromImageForInception(testImageFilename)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\toutput, err := session.Run(\n\t\tmap[Output]*Tensor{\n\t\t\tgraph.Operation(\"input\").Output(0): tensor,\n\t\t},\n\t\t[]Output{\n\t\t\tgraph.Operation(\"output\").Output(0),\n\t\t},\n\t\tnil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t\/\/ output[0].Value() is a vector containing probabilities of\n\t\/\/ labels for each image in the \"batch\". The batch size was 1.\n\t\/\/ Find the most probably label index.\n\tprobabilities := output[0].Value().([][]float32)[0]\n\tprintBestLabel(probabilities, labelsFile)\n}\n\nfunc printBestLabel(probabilities []float32, labelsFile string) {\n\tbestIdx := 0\n\tfor i, p := range probabilities {\n\t\tif p > probabilities[bestIdx] {\n\t\t\tbestIdx = i\n\t\t}\n\t}\n\t\/\/ Found a best match, now read the string from the labelsFile where\n\t\/\/ there is one line per label.\n\tfile, err := os.Open(labelsFile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer file.Close()\n\tscanner := bufio.NewScanner(file)\n\tvar labels []string\n\tfor scanner.Scan() {\n\t\tlabels = append(labels, scanner.Text())\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tlog.Printf(\"ERROR: failed to read %s: %v\", labelsFile, err)\n\t}\n\tfmt.Printf(\"BEST MATCH: (%2.0f%% likely) %s\\n\", probabilities[bestIdx]*100.0, labels[bestIdx])\n}\n\n\/\/ Given an image stored in filename, returns a Tensor which is suitable for\n\/\/ providing the image data to the pre-defined model.\nfunc makeTensorFromImageForInception(filename string) (*Tensor, error) {\n\tconst (\n\t\t\/\/ Some constants specific to the pre-trained model at:\n\t\t\/\/ https:\/\/storage.googleapis.com\/download.tensorflow.org\/models\/inception5h.zip\n\t\t\/\/\n\t\t\/\/ - The model was trained after with images scaled to 224x224 pixels.\n\t\t\/\/ - The colors, represented as R, G, B in 1-byte each were converted to\n\t\t\/\/ float using (value - Mean)\/Std.\n\t\t\/\/\n\t\t\/\/ If using a different pre-trained model, the values will have to be adjusted.\n\t\tH, W = 224, 224\n\t\tMean = 117\n\t\tStd = float32(1)\n\t)\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\timg, _, err := image.Decode(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsz := img.Bounds().Size()\n\tif sz.X != W || sz.Y != H {\n\t\treturn nil, fmt.Errorf(\"input image is required to be %dx%d pixels, was %dx%d\", W, H, sz.X, sz.Y)\n\t}\n\t\/\/ 4-dimensional input:\n\t\/\/ - 1st dimension: Batch size (the model takes a batch of images as\n\t\/\/ input, here the \"batch size\" is 1)\n\t\/\/ - 2nd dimension: Rows of the image\n\t\/\/ - 3rd dimension: Columns of the row\n\t\/\/ - 4th dimension: Colors of the pixel as (B, G, R)\n\t\/\/ Thus, the shape is [1, 224, 224, 3]\n\tvar ret [1][H][W][3]float32\n\tfor y := 0; y < H; y++ {\n\t\tfor x := 0; x < W; x++ {\n\t\t\tpx := x + img.Bounds().Min.X\n\t\t\tpy := y + img.Bounds().Min.Y\n\t\t\tr, g, b, _ := img.At(px, py).RGBA()\n\t\t\tret[0][y][x][0] = float32((int(b>>8) - Mean)) \/ Std\n\t\t\tret[0][y][x][1] = float32((int(g>>8) - Mean)) \/ Std\n\t\t\tret[0][y][x][2] = float32((int(r>>8) - Mean)) \/ Std\n\t\t}\n\t}\n\treturn NewTensor(ret)\n}\n<commit_msg>go: Make example easier to try out.<commit_after>\/\/ Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage tensorflow_test\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"image\"\n\t_ \"image\/jpeg\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"os\"\n\n\ttf \"github.com\/tensorflow\/tensorflow\/tensorflow\/go\"\n)\n\nfunc Example() {\n\t\/\/ An example for using the TensorFlow Go API for image recognition\n\t\/\/ using a pre-trained inception model (http:\/\/arxiv.org\/abs\/1512.00567).\n\t\/\/\n\t\/\/ The pre-trained model takes input in the form of a 4-dimensional\n\t\/\/ tensor with shape [ BATCH_SIZE, IMAGE_HEIGHT, IMAGE_WIDTH, 3 ],\n\t\/\/ where:\n\t\/\/ - BATCH_SIZE allows for inference of multiple images in one pass through the graph\n\t\/\/ - IMAGE_HEIGHT is the height of the images on which the model was trained\n\t\/\/ - IMAGE_WIDTH is the width of the images on which the model was trained\n\t\/\/ - 3 is the (R, G, B) values of the pixel colors represented as a float.\n\t\/\/\n\t\/\/ And produces as output a vector with shape [ NUM_LABELS ].\n\t\/\/ output[i] is the probability that the input image was recognized as\n\t\/\/ having the i-th label.\n\t\/\/\n\t\/\/ A separate file contains a list of string labels corresponding to the\n\t\/\/ integer indices of the output.\n\t\/\/\n\t\/\/ This example:\n\t\/\/ - Loads the serialized representation of the pre-trained model into a Graph\n\t\/\/ - Creates a Session to execute operations on the Graph\n\t\/\/ - Converts an image file to a Tensor to provide as input for Graph execution\n\t\/\/ - Exectues the graph and prints out the label with the highest probability\n\tconst (\n\t\t\/\/ Path to a pre-trained inception model.\n\t\t\/\/ The two files are extracted from a zip archive as so:\n\t\t\/*\n\t\t curl -L https:\/\/storage.googleapis.com\/download.tensorflow.org\/models\/inception5h.zip -o \/tmp\/inception5h.zip\n\t\t unzip \/tmp\/inception5h.zip -d \/tmp\n\t\t*\/\n\t\tmodelFile = \"\/tmp\/tensorflow_inception_graph.pb\"\n\t\tlabelsFile = \"\/tmp\/imagenet_comp_graph_label_strings.txt\"\n\n\t\t\/\/ Image file to \"recognize\".\n\t\ttestImageFilename = \"\/tmp\/test.jpg\"\n\t)\n\n\t\/\/ Load the serialized GraphDef from a file.\n\tmodel, err := ioutil.ReadFile(modelFile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Construct an in-memory graph from the serialized form.\n\tgraph := tf.NewGraph()\n\tif err := graph.Import(model, \"\"); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Create a session for inference over graph.\n\tsession, err := tf.NewSession(graph, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer session.Close()\n\n\t\/\/ Run inference on testImageFilename.\n\t\/\/ For multiple images, session.Run() can be called in a loop (and\n\t\/\/ concurrently). Furthermore, images can be batched together since the\n\t\/\/ model accepts batches of image data as input.\n\ttensor, err := makeTensorFromImageForInception(testImageFilename)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\toutput, err := session.Run(\n\t\tmap[tf.Output]*tf.Tensor{\n\t\t\tgraph.Operation(\"input\").Output(0): tensor,\n\t\t},\n\t\t[]tf.Output{\n\t\t\tgraph.Operation(\"output\").Output(0),\n\t\t},\n\t\tnil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t\/\/ output[0].Value() is a vector containing probabilities of\n\t\/\/ labels for each image in the \"batch\". The batch size was 1.\n\t\/\/ Find the most probably label index.\n\tprobabilities := output[0].Value().([][]float32)[0]\n\tprintBestLabel(probabilities, labelsFile)\n}\n\nfunc printBestLabel(probabilities []float32, labelsFile string) {\n\tbestIdx := 0\n\tfor i, p := range probabilities {\n\t\tif p > probabilities[bestIdx] {\n\t\t\tbestIdx = i\n\t\t}\n\t}\n\t\/\/ Found a best match, now read the string from the labelsFile where\n\t\/\/ there is one line per label.\n\tfile, err := os.Open(labelsFile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer file.Close()\n\tscanner := bufio.NewScanner(file)\n\tvar labels []string\n\tfor scanner.Scan() {\n\t\tlabels = append(labels, scanner.Text())\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tlog.Printf(\"ERROR: failed to read %s: %v\", labelsFile, err)\n\t}\n\tfmt.Printf(\"BEST MATCH: (%2.0f%% likely) %s\\n\", probabilities[bestIdx]*100.0, labels[bestIdx])\n}\n\n\/\/ Given an image stored in filename, returns a Tensor which is suitable for\n\/\/ providing the image data to the pre-defined model.\nfunc makeTensorFromImageForInception(filename string) (*tf.Tensor, error) {\n\tconst (\n\t\t\/\/ Some constants specific to the pre-trained model at:\n\t\t\/\/ https:\/\/storage.googleapis.com\/download.tensorflow.org\/models\/inception5h.zip\n\t\t\/\/\n\t\t\/\/ - The model was trained after with images scaled to 224x224 pixels.\n\t\t\/\/ - The colors, represented as R, G, B in 1-byte each were converted to\n\t\t\/\/ float using (value - Mean)\/Std.\n\t\t\/\/\n\t\t\/\/ If using a different pre-trained model, the values will have to be adjusted.\n\t\tH, W = 224, 224\n\t\tMean = 117\n\t\tStd = float32(1)\n\t)\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\timg, _, err := image.Decode(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsz := img.Bounds().Size()\n\tif sz.X != W || sz.Y != H {\n\t\treturn nil, fmt.Errorf(\"input image is required to be %dx%d pixels, was %dx%d\", W, H, sz.X, sz.Y)\n\t}\n\t\/\/ 4-dimensional input:\n\t\/\/ - 1st dimension: Batch size (the model takes a batch of images as\n\t\/\/ input, here the \"batch size\" is 1)\n\t\/\/ - 2nd dimension: Rows of the image\n\t\/\/ - 3rd dimension: Columns of the row\n\t\/\/ - 4th dimension: Colors of the pixel as (B, G, R)\n\t\/\/ Thus, the shape is [1, 224, 224, 3]\n\tvar ret [1][H][W][3]float32\n\tfor y := 0; y < H; y++ {\n\t\tfor x := 0; x < W; x++ {\n\t\t\tpx := x + img.Bounds().Min.X\n\t\t\tpy := y + img.Bounds().Min.Y\n\t\t\tr, g, b, _ := img.At(px, py).RGBA()\n\t\t\tret[0][y][x][0] = float32((int(b>>8) - Mean)) \/ Std\n\t\t\tret[0][y][x][1] = float32((int(g>>8) - Mean)) \/ Std\n\t\t\tret[0][y][x][2] = float32((int(r>>8) - Mean)) \/ Std\n\t\t}\n\t}\n\treturn tf.NewTensor(ret)\n}\n<|endoftext|>"} {"text":"<commit_before>package sqlbuilder\n\nimport (\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestLiteralConvert(t *testing.T) {\n\ta := assert.New(t)\n\ttype testcase struct {\n\t\tin literal\n\t\tout interface{}\n\t\terr bool\n\t}\n\tvar cases = []testcase{\n\t\t{toLiteral(int(10)), int64(10), false},\n\t\t{toLiteral(int64(10)), int64(10), false},\n\t\t{toLiteral(uint(10)), uint64(10), false},\n\t\t{toLiteral(uint64(10)), uint64(10), false},\n\t\t{toLiteral(float32(10)), float64(10), false},\n\t\t{toLiteral(float64(10)), float64(10), false},\n\t\t{toLiteral(bool(true)), bool(true), false},\n\t\t{toLiteral([]byte{0x11}), []byte{0x11}, false},\n\t\t{toLiteral(string(\"makise-kurisu\")), string(\"makise-kurisu\"), false},\n\t\t{toLiteral(time.Unix(0, 0)), time.Unix(0, 0), false},\n\t\t{toLiteral(nil), nil, false},\n\t\t{toLiteral(complex(0, 0)), nil, true},\n\t}\n\n\tfor _, c := range cases {\n\t\tval, err := c.in.(*literalImpl).converted()\n\t\ta.Equal(c.out, val)\n\t\tif c.err {\n\t\t\ta.Error(err)\n\t\t} else {\n\t\t\ta.NoError(err)\n\t\t}\n\t}\n}\n\nfunc TestLiteralString(t *testing.T) {\n\ta := assert.New(t)\n\ttype testcase struct {\n\t\tin literal\n\t\tout string\n\t\terr bool\n\t}\n\tvar cases = []testcase{\n\t\t{toLiteral(int(10)), \"10\", false},\n\t\t{toLiteral(int64(10)), \"10\", false},\n\t\t{toLiteral(uint(10)), \"10\", false},\n\t\t{toLiteral(uint64(10)), \"10\", false},\n\t\t{toLiteral(float32(10)), \"10.0000000000\", false},\n\t\t{toLiteral(float64(10)), \"10.0000000000\", false},\n\t\t{toLiteral(bool(true)), \"true\", false},\n\t\t{toLiteral([]byte{0x11}), string([]byte{0x11}), false},\n\t\t{toLiteral(string(\"shibuya-rin\")), \"shibuya-rin\", false},\n\t\t{toLiteral(time.Unix(0, 0).UTC()), \"1970-01-01 00:00:00\", false},\n\t\t{toLiteral(nil), \"NULL\", false},\n\t\t{toLiteral(complex(0, 0)), \"\", true},\n\t}\n\n\tfor _, c := range cases {\n\t\tval := c.in.(*literalImpl).string()\n\t\ta.Equal(c.out, val)\n\t}\n}\n\nfunc TestLiteralIsNil(t *testing.T) {\n\ta := assert.New(t)\n\ttype testcase struct {\n\t\tin literal\n\t\tout bool\n\t}\n\tvar cases = []testcase{\n\t\t{toLiteral(int(10)), false},\n\t\t{toLiteral([]byte{}), false},\n\t\t{toLiteral(nil), true},\n\t\t{toLiteral([]byte(nil)), true},\n\t}\n\n\tfor _, c := range cases {\n\t\tisnil := c.in.IsNil()\n\t\ta.Equal(c.out, isnil)\n\t}\n}\n<commit_msg>Fixed test case miss.<commit_after>package sqlbuilder\n\nimport (\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestLiteralConvert(t *testing.T) {\n\ta := assert.New(t)\n\ttype testcase struct {\n\t\tin literal\n\t\tout interface{}\n\t\terr bool\n\t}\n\tvar cases = []testcase{\n\t\t{toLiteral(int(10)), int64(10), false},\n\t\t{toLiteral(int64(10)), int64(10), false},\n\t\t{toLiteral(uint(10)), int64(10), false},\n\t\t{toLiteral(uint64(10)), int64(10), false},\n\t\t{toLiteral(float32(10)), float64(10), false},\n\t\t{toLiteral(float64(10)), float64(10), false},\n\t\t{toLiteral(bool(true)), bool(true), false},\n\t\t{toLiteral([]byte{0x11}), []byte{0x11}, false},\n\t\t{toLiteral(string(\"makise-kurisu\")), string(\"makise-kurisu\"), false},\n\t\t{toLiteral(time.Unix(0, 0)), time.Unix(0, 0), false},\n\t\t{toLiteral(nil), nil, false},\n\t\t{toLiteral(complex(0, 0)), nil, true},\n\t}\n\n\tfor _, c := range cases {\n\t\tval, err := c.in.(*literalImpl).converted()\n\t\ta.Equal(c.out, val)\n\t\tif c.err {\n\t\t\ta.Error(err)\n\t\t} else {\n\t\t\ta.NoError(err)\n\t\t}\n\t}\n}\n\nfunc TestLiteralString(t *testing.T) {\n\ta := assert.New(t)\n\ttype testcase struct {\n\t\tin literal\n\t\tout string\n\t\terr bool\n\t}\n\tvar cases = []testcase{\n\t\t{toLiteral(int(10)), \"10\", false},\n\t\t{toLiteral(int64(10)), \"10\", false},\n\t\t{toLiteral(uint(10)), \"10\", false},\n\t\t{toLiteral(uint64(10)), \"10\", false},\n\t\t{toLiteral(float32(10)), \"10.0000000000\", false},\n\t\t{toLiteral(float64(10)), \"10.0000000000\", false},\n\t\t{toLiteral(bool(true)), \"true\", false},\n\t\t{toLiteral([]byte{0x11}), string([]byte{0x11}), false},\n\t\t{toLiteral(string(\"shibuya-rin\")), \"shibuya-rin\", false},\n\t\t{toLiteral(time.Unix(0, 0).UTC()), \"1970-01-01 00:00:00\", false},\n\t\t{toLiteral(nil), \"NULL\", false},\n\t\t{toLiteral(complex(0, 0)), \"\", true},\n\t}\n\n\tfor _, c := range cases {\n\t\tval := c.in.(*literalImpl).string()\n\t\ta.Equal(c.out, val)\n\t}\n}\n\nfunc TestLiteralIsNil(t *testing.T) {\n\ta := assert.New(t)\n\ttype testcase struct {\n\t\tin literal\n\t\tout bool\n\t}\n\tvar cases = []testcase{\n\t\t{toLiteral(int(10)), false},\n\t\t{toLiteral([]byte{}), false},\n\t\t{toLiteral(nil), true},\n\t\t{toLiteral([]byte(nil)), true},\n\t}\n\n\tfor _, c := range cases {\n\t\tisnil := c.in.IsNil()\n\t\ta.Equal(c.out, isnil)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\/\/ Extended and bugfixes by Miek Gieben\n\n\/\/ Package dns implements a full featured interface to the DNS.\n\/\/ The package allows complete control over what is send out to the DNS. \n\/\/\n\/\/ Resource records are native types. They are not stored in wire format.\n\/\/ Basic usage pattern for creating a new resource record:\n\/\/\n\/\/ r := new(RR_TXT)\n\/\/ r.Hdr = RR_Header{Name: \"a.miek.nl\", Rrtype: TypeTXT, Class: ClassINET, Ttl: 3600}\n\/\/ r.TXT = \"This is the content of the TXT record\"\n\/\/ \n\/\/ The package dns supports querying, incoming\/outgoing Axfr\/Ixfr, TSIG, EDNS0,\n\/\/ dynamic updates, notifies and DNSSEC validation\/signing.\n\/\/\n\/\/ Querying the DNS is done by using a Resolver structure. Basic use pattern for creating \n\/\/ a resolver:\n\/\/\n\/\/ res := new(Resolver)\n\/\/ res.Servers = []string{\"127.0.0.1\"} \n\/\/ m := new(Msg)\n\/\/ m.MsgHdr.Recursion_desired = true\n\/\/ m.Question = make([]Question, 1)\n\/\/ m.Question[0] = Question{\"miek.nl\", TypeSOA, ClassINET}\n\/\/ in, err := res.Query(m)\n\/\/\n\/\/ Server side programming is also supported.\n\/\/ Basic use pattern for creating an UDP DNS server:\n\/\/\n\/\/ func handle(d *dns.Conn, i *dns.Msg) { \/* handle request *\/ }\n\/\/\n\/\/ func listen(addr string, e chan os.Error) {\n\/\/ err := dns.ListenAndServeUDP(addr, handle)\n\/\/ e <- err\n\/\/ }\n\/\/ err := make(chan os.Error)\n\/\/ go listen(\"127.0.0.1:8053\", err)\n\/\/\npackage dns\n\nimport (\n \"io\"\n\t\"os\"\n\t\"net\"\n\t\"strconv\"\n)\n\nconst (\n\tYear68 = 2 << (32 - 1) \/\/ For RFC1982 (Serial Arithmetic) calculations in 32 bits.\n\tDefaultMsgSize = 4096 \/\/ A standard default for larger than 512 packets.\n\tMaxMsgSize = 65536 \/\/ Largest possible DNS packet.\n\tDefaultTTL = 3600 \/\/ Default Ttl.\n)\n\n\/\/ Error represents a DNS error\ntype Error struct {\n\tError string\n\tName string\n\tServer net.Addr\n\tTimeout bool\n}\n\nfunc (e *Error) String() string {\n\tif e == nil {\n\t\treturn \"<nil>\"\n\t}\n\treturn e.Error\n}\n\n\/\/ A Conn is the lowest primative in the dns package.\n\/\/ A Conn holds both the UDP and TCP connection, but only one\n\/\/ can be active any given time. \ntype Conn struct {\n\t\/\/ The current UDP connection.\n\tUDP *net.UDPConn\n\n\t\/\/ The current TCP connection.\n\tTCP *net.TCPConn\n\n\t\/\/ The remote side of the connection.\n\tAddr net.Addr\n\n\t\/\/ The remote port number of the connection.\n\tPort int\n\n\t\/\/ If TSIG is used, this holds all the information.\n\tTsig *Tsig\n\n\t\/\/ Timeout in sec before giving up on a connection.\n\tTimeout int\n\n\t\/\/ Number of attempts to try to Read\/Write from\/to a\n\t\/\/ connection.\n\tAttempts int\n}\n\n\/\/ Dial connects to the remote address raddr on the network net.\n\/\/ If the string laddr is not empty, it is used as the local address\n\/\/ for the connection. Any errors are return in err otherwise err is nil.\nfunc Dial(n, laddr, raddr string) (*Conn, os.Error) {\n d := new(Conn)\n c, err := net.Dial(n, laddr, raddr)\n if err != nil {\n return nil, err\n }\n switch n {\n case \"tcp\":\n d.TCP = c.(*net.TCPConn)\n d.Addr = d.TCP.RemoteAddr()\n d.Port = d.TCP.RemoteAddr().(*net.TCPAddr).Port\n case \"udp\":\n d.UDP = c.(*net.UDPConn)\n d.Addr = d.UDP.RemoteAddr()\n d.Port = d.UDP.RemoteAddr().(*net.UDPAddr).Port\n }\n return d, nil\n}\n\n\/\/ Fill in a Conn from a TCPConn\nfunc (d *Conn) SetTCPConn(l *net.TCPConn, a net.Addr) {\n d.TCP = l\n d.UDP = nil\n if a == nil {\n d.Addr = l.RemoteAddr()\n }\n d.Port = d.Addr.(*net.TCPAddr).Port\n}\n\n\/\/ Fill in a Conn from a UDPConn\nfunc (d *Conn) SetUDPConn(l *net.UDPConn, a net.Addr) {\n d.TCP = nil\n d.UDP = l\n if a == nil {\n d.Addr = l.RemoteAddr()\n }\n d.Port = d.Addr.(*net.UDPAddr).Port\n}\n\n\/\/ Create a new buffer of the appropiate size. With\n\/\/ TCP the buffer is 64K, with UDP the returned buffer\n\/\/ has a length of 4K bytes.\nfunc (d *Conn) NewBuffer() []byte {\n\tif d.TCP != nil {\n\t\tb := make([]byte, MaxMsgSize)\n\t\treturn b\n\t}\n\tif d.UDP != nil {\n\t\tb := make([]byte, DefaultMsgSize)\n\t\treturn b\n\t}\n\treturn nil\n}\n\n\/\/ ReadMsg reads a dns message m from d.\n\/\/ Any errors of the underlaying Read call are returned.\nfunc (d *Conn) ReadMsg(m *Msg) os.Error {\n\tin := d.NewBuffer()\n\tn, err := d.Read(in)\n\tif err != nil {\n\t\treturn err\n\t}\n\tin = in[:n]\n\tok := m.Unpack(in)\n\tif !ok {\n return ErrUnpack\n\t}\n\treturn nil\n}\n\n\/\/ WriteMsg writes dns message m to d.\n\/\/ Any errors of the underlaying Write call are returned.\nfunc (d *Conn) WriteMsg(m *Msg) os.Error {\n\tout, ok := m.Pack()\n\tif !ok {\n\t\treturn ErrPack\n\t}\n\t_, err := d.Write(out)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Read implements the standard Read interface:\n\/\/ it reads from d. If there was an error\n\/\/ reading that error is returned; otherwise err is nil.\nfunc (d *Conn) Read(p []byte) (n int, err os.Error) {\n\tif d.UDP != nil && d.TCP != nil {\n\t\treturn 0, ErrConn\n\t}\n\tswitch {\n\tcase d.UDP != nil:\n\t\tvar addr net.Addr\n\t\tn, addr, err = d.UDP.ReadFromUDP(p)\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\td.Addr = addr\n\t\td.Port = addr.(*net.UDPAddr).Port\n\tcase d.TCP != nil:\n\t\tif len(p) < 1 {\n\t\t\treturn 0, io.ErrShortBuffer\n\t\t}\n\t\tn, err = d.TCP.Read(p[0:2])\n\t\tif err != nil || n != 2 {\n\t\t\treturn n, err\n\t\t}\n\t\td.Addr = d.TCP.RemoteAddr()\n\t\td.Port = d.TCP.RemoteAddr().(*net.TCPAddr).Port\n\t\tl, _ := unpackUint16(p[0:2], 0)\n\t\tif l == 0 {\n\t\t\treturn 0, ErrShortRead\n\t\t}\n\t\tif int(l) > len(p) {\n\t\t\treturn int(l), io.ErrShortBuffer\n\t\t}\n\t\tn, err = d.TCP.Read(p[:l])\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\ti := n\n\t\tfor i < int(l) {\n\t\t\tj, err := d.TCP.Read(p[i:int(l)])\n\t\t\tif err != nil {\n\t\t\t\treturn i, err\n\t\t\t}\n\t\t\ti += j\n\t\t}\n\t\tn = i\n\t}\n\tif d.Tsig != nil {\n\t\t\/\/ Check the TSIG that we should be read\n\t\t_, err = d.Tsig.Verify(p)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ Write implements the standard Write interface:\n\/\/ It write data to d. If there was an error writing\n\/\/ that error is returned; otherwise err is nil\nfunc (d *Conn) Write(p []byte) (n int, err os.Error) {\n\tif d.UDP != nil && d.TCP != nil {\n\t\treturn 0, ErrConn\n\t}\n\n\tvar attempts int\n\tvar q []byte\n\tif d.Attempts == 0 {\n\t\tattempts = 1\n\t} else {\n\t\tattempts = d.Attempts\n\t}\n\td.SetTimeout()\n\tif d.Tsig != nil {\n\t\t\/\/ Create a new buffer with the TSIG added.\n\t\tq, err = d.Tsig.Generate(p)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t} else {\n\t\tq = p\n\t}\n\n\tswitch {\n\tcase d.UDP != nil:\n\t\tfor a := 0; a < attempts; a++ {\n\t\t\tn, err = d.UDP.WriteTo(q, d.Addr)\n\t\t\tif err != nil {\n\t\t\t\tif e, ok := err.(net.Error); ok && e.Timeout() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\tcase d.TCP != nil:\n\t\tfor a := 0; a < attempts; a++ {\n\t\t\tl := make([]byte, 2)\n\t\t\tl[0], l[1] = packUint16(uint16(len(q)))\n\t\t\tn, err = d.TCP.Write(l)\n\t\t\tif err != nil {\n\t\t\t\tif e, ok := err.(net.Error); ok && e.Timeout() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\treturn n, err\n\t\t\t}\n\t\t\tif n != 2 {\n\t\t\t\treturn n, io.ErrShortWrite\n\t\t\t}\n\t\t\tn, err = d.TCP.Write(q)\n\t\t\tif err != nil {\n\t\t\t\tif e, ok := err.(net.Error); ok && e.Timeout() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\treturn n, err\n\t\t\t}\n\t\t\ti := n\n\t\t\tif i < len(q) {\n\t\t\t\tj, err := d.TCP.Write(q[i:len(q)])\n\t\t\t\tif err != nil {\n\t\t\t\t\tif e, ok := err.(net.Error); ok && e.Timeout() {\n\t\t\t\t\t\t\/\/ We are half way in our write...\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\treturn i, err\n\t\t\t\t}\n\t\t\t\ti += j\n\t\t\t}\n\t\t\tn = i\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ Close closes the connection in d. Possible\n\/\/ errors are returned in err; otherwise it is nil.\nfunc (d *Conn) Close() (err os.Error) {\n\tif d.UDP != nil && d.TCP != nil {\n\t\treturn ErrConn\n\t}\n\tswitch {\n\tcase d.UDP != nil:\n\t\terr = d.UDP.Close()\n\tcase d.TCP != nil:\n\t\terr = d.TCP.Close()\n\t}\n\treturn\n}\n\n\/\/ SetTimeout sets the timeout of the socket\n\/\/ that is contained in d.\nfunc (d *Conn) SetTimeout() (err os.Error) {\n\tvar sec int64\n\tif d.UDP != nil && d.TCP != nil {\n\t\treturn ErrConn\n\t}\n\tsec = int64(d.Timeout)\n\tif sec == 0 {\n\t\tsec = 1\n\t}\n\tif d.UDP != nil {\n\t\terr = d.TCP.SetTimeout(sec * 1e9)\n\t}\n\tif d.TCP != nil {\n\t\terr = d.TCP.SetTimeout(sec * 1e9)\n\t}\n\treturn\n}\n\n\/\/ Exchange combines a Write and a Read.\n\/\/ First the request is written to d and then it waits\n\/\/ for a reply with Read. \n\/\/ If nosend is true, the write is skipped.\nfunc (d *Conn) Exchange(request []byte, nosend bool) (reply []byte, err os.Error) {\n\tvar n int\n\tif !nosend {\n\t\tn, err = d.Write(request)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\t\/\/ Layer violation to save memory. Its okay then...\n\treply = d.NewBuffer()\n\tn, err = d.Read(reply)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treply = reply[:n]\n\treturn\n}\n\ntype RR interface {\n\tHeader() *RR_Header\n\tString() string\n}\n\n\/\/ An RRset is a slice of RRs.\ntype RRset []RR\n\nfunc (r RRset) Len() int { return len(r) }\nfunc (r RRset) Less(i, j int) bool { return r[i].Header().Name < r[j].Header().Name }\nfunc (r RRset) Swap(i, j int) { r[i], r[j] = r[j], r[i] }\n\n\/\/ Check if the RRset is RFC 2181 compliant\nfunc (r RRset) Ok() bool {\n\tttl := r[0].Header().Ttl\n\tname := r[0].Header().Name\n\tclass := r[0].Header().Class\n\tfor _, rr := range r[1:] {\n\t\tif rr.Header().Ttl != ttl {\n\t\t\treturn false\n\t\t}\n\t\tif rr.Header().Name != name {\n\t\t\treturn false\n\t\t}\n\t\tif rr.Header().Class != class {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ DNS resource records.\n\/\/ There are many types of messages,\n\/\/ but they all share the same header.\ntype RR_Header struct {\n\tName string \"domain-name\"\n\tRrtype uint16\n\tClass uint16\n\tTtl uint32\n\tRdlength uint16 \/\/ length of data after header\n}\n\nfunc (h *RR_Header) Header() *RR_Header {\n\treturn h\n}\n\nfunc (h *RR_Header) String() string {\n\tvar s string\n\n\tif h.Rrtype == TypeOPT {\n\t\ts = \";\"\n\t\t\/\/ and maybe other things\n\t}\n\n\tif len(h.Name) == 0 {\n\t\ts += \".\\t\"\n\t} else {\n\t\ts += h.Name + \"\\t\"\n\t}\n\ts = s + strconv.Itoa(int(h.Ttl)) + \"\\t\"\n\n\tif _, ok := Class_str[h.Class]; ok {\n\t\ts += Class_str[h.Class] + \"\\t\"\n\t} else {\n\t\ts += \"CLASS\" + strconv.Itoa(int(h.Class)) + \"\\t\"\n\t}\n\n\tif _, ok := Rr_str[h.Rrtype]; ok {\n\t\ts += Rr_str[h.Rrtype] + \"\\t\"\n\t} else {\n\t\ts += \"TYPE\" + strconv.Itoa(int(h.Rrtype)) + \"\\t\"\n\t}\n\treturn s\n}\n\n\/\/ Return the number of labels in a domain name.\nfunc LabelCount(a string) (c uint8) {\n\t\/\/ walk the string and count the dots\n\t\/\/ except when it is escaped\n\tesc := false\n\tfor _, v := range a {\n\t\tswitch v {\n\t\tcase '.':\n\t\t\tif esc {\n\t\t\t\tesc = !esc\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc++\n\t\tcase '\\\\':\n\t\t\tesc = true\n\t\t}\n\t}\n\treturn\n}\n<commit_msg>doc updates<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\/\/ Extended and bugfixes by Miek Gieben\n\n\/\/ Package dns implements a full featured interface to the DNS.\n\/\/ The package allows complete control over what is send out to the DNS. \n\/\/\n\/\/ Resource records are native types. They are not stored in wire format.\n\/\/ Basic usage pattern for creating a new resource record:\n\/\/\n\/\/ r := new(RR_TXT)\n\/\/ r.Hdr = RR_Header{Name: \"a.miek.nl\", Rrtype: TypeTXT, Class: ClassINET, Ttl: 3600}\n\/\/ r.TXT = \"This is the content of the TXT record\"\n\/\/ \n\/\/ The package dns supports querying, incoming\/outgoing Axfr\/Ixfr, TSIG, EDNS0,\n\/\/ dynamic updates, notifies and DNSSEC validation\/signing.\n\/\/\n\/\/ Querying the DNS is done by using a Resolver structure. Basic use pattern for creating \n\/\/ a resolver:\n\/\/\n\/\/ res := new(Resolver)\n\/\/ res.Servers = []string{\"127.0.0.1\"} \n\/\/ m := new(Msg)\n\/\/ m.MsgHdr.Recursion_desired = true\n\/\/ m.Question = make([]Question, 1)\n\/\/ m.Question[0] = Question{\"miek.nl\", TypeSOA, ClassINET}\n\/\/ in, err := res.Query(m)\n\/\/\n\/\/ Server side programming is also supported.\n\/\/ Basic use pattern for creating an UDP DNS server:\n\/\/\n\/\/ func handle(d *dns.Conn, i *dns.Msg) { \/* handle request *\/ }\n\/\/\n\/\/ func listen(addr string, e chan os.Error) {\n\/\/ err := dns.ListenAndServeUDP(addr, handle)\n\/\/ e <- err\n\/\/ }\n\/\/ err := make(chan os.Error)\n\/\/ go listen(\"127.0.0.1:8053\", err)\n\/\/\npackage dns\n\nimport (\n \"io\"\n\t\"os\"\n\t\"net\"\n\t\"strconv\"\n)\n\nconst (\n\tYear68 = 2 << (32 - 1) \/\/ For RFC1982 (Serial Arithmetic) calculations in 32 bits.\n\tDefaultMsgSize = 4096 \/\/ A standard default for larger than 512 packets.\n\tMaxMsgSize = 65536 \/\/ Largest possible DNS packet.\n\tDefaultTTL = 3600 \/\/ Default Ttl.\n)\n\n\/\/ Error represents a DNS error\ntype Error struct {\n\tError string\n\tName string\n\tServer net.Addr\n\tTimeout bool\n}\n\nfunc (e *Error) String() string {\n\tif e == nil {\n\t\treturn \"<nil>\"\n\t}\n\treturn e.Error\n}\n\n\/\/ A Conn is the lowest primative in the dns package.\n\/\/ A Conn holds both the UDP and TCP connection, but only one\n\/\/ can be active any given time. \ntype Conn struct {\n\t\/\/ The current UDP connection.\n\tUDP *net.UDPConn\n\n\t\/\/ The current TCP connection.\n\tTCP *net.TCPConn\n\n\t\/\/ The remote side of the connection.\n\tAddr net.Addr\n\n\t\/\/ The remote port number of the connection.\n\tPort int\n\n\t\/\/ If TSIG is used, this holds all the information.\n\tTsig *Tsig\n\n\t\/\/ Timeout in sec before giving up on a connection.\n\tTimeout int\n\n\t\/\/ Number of attempts to try to Read\/Write from\/to a\n\t\/\/ connection.\n\tAttempts int\n}\n\n\/\/ Dial connects to the remote address raddr on the network net.\n\/\/ If the string laddr is not empty, it is used as the local address\n\/\/ for the connection. Any errors are return in err otherwise err is nil.\nfunc Dial(n, laddr, raddr string) (*Conn, os.Error) {\n d := new(Conn)\n c, err := net.Dial(n, laddr, raddr)\n if err != nil {\n return nil, err\n }\n switch n {\n case \"tcp\":\n d.TCP = c.(*net.TCPConn)\n d.Addr = d.TCP.RemoteAddr()\n d.Port = d.TCP.RemoteAddr().(*net.TCPAddr).Port\n case \"udp\":\n d.UDP = c.(*net.UDPConn)\n d.Addr = d.UDP.RemoteAddr()\n d.Port = d.UDP.RemoteAddr().(*net.UDPAddr).Port\n }\n return d, nil\n}\n\n\/\/ Fill in a Conn from a TCPConn. If a is nil, the \n\/\/ remote address in the connection is used.\nfunc (d *Conn) SetTCPConn(l *net.TCPConn, a net.Addr) {\n d.TCP = l\n d.UDP = nil\n if a == nil {\n d.Addr = l.RemoteAddr()\n }\n d.Port = d.Addr.(*net.TCPAddr).Port\n}\n\n\/\/ Fill in a Conn from a TCPConn. If a is nil, the \n\/\/ remote address in the connection is used.\nfunc (d *Conn) SetUDPConn(l *net.UDPConn, a net.Addr) {\n d.TCP = nil\n d.UDP = l\n if a == nil {\n d.Addr = l.RemoteAddr()\n }\n d.Port = d.Addr.(*net.UDPAddr).Port\n}\n\n\/\/ Create a new buffer of the appropiate size. With\n\/\/ TCP the buffer is 64K, with UDP the returned buffer\n\/\/ has a length of 4K bytes.\nfunc (d *Conn) NewBuffer() []byte {\n\tif d.TCP != nil {\n\t\tb := make([]byte, MaxMsgSize)\n\t\treturn b\n\t}\n\tif d.UDP != nil {\n\t\tb := make([]byte, DefaultMsgSize)\n\t\treturn b\n\t}\n\treturn nil\n}\n\n\/\/ ReadMsg reads a dns message m from d.\n\/\/ Any errors of the underlaying Read call are returned.\nfunc (d *Conn) ReadMsg(m *Msg) os.Error {\n\tin := d.NewBuffer()\n\tn, err := d.Read(in)\n\tif err != nil {\n\t\treturn err\n\t}\n\tin = in[:n]\n\tok := m.Unpack(in)\n\tif !ok {\n return ErrUnpack\n\t}\n\treturn nil\n}\n\n\/\/ WriteMsg writes dns message m to d.\n\/\/ Any errors of the underlaying Write call are returned.\nfunc (d *Conn) WriteMsg(m *Msg) os.Error {\n\tout, ok := m.Pack()\n\tif !ok {\n\t\treturn ErrPack\n\t}\n\t_, err := d.Write(out)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Read implements the standard Read interface:\n\/\/ it reads from d. If there was an error\n\/\/ reading that error is returned; otherwise err is nil.\nfunc (d *Conn) Read(p []byte) (n int, err os.Error) {\n\tif d.UDP != nil && d.TCP != nil {\n\t\treturn 0, ErrConn\n\t}\n\tswitch {\n\tcase d.UDP != nil:\n\t\tvar addr net.Addr\n\t\tn, addr, err = d.UDP.ReadFromUDP(p)\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\td.Addr = addr\n\t\td.Port = addr.(*net.UDPAddr).Port\n\tcase d.TCP != nil:\n\t\tif len(p) < 1 {\n\t\t\treturn 0, io.ErrShortBuffer\n\t\t}\n\t\tn, err = d.TCP.Read(p[0:2])\n\t\tif err != nil || n != 2 {\n\t\t\treturn n, err\n\t\t}\n\t\td.Addr = d.TCP.RemoteAddr()\n\t\td.Port = d.TCP.RemoteAddr().(*net.TCPAddr).Port\n\t\tl, _ := unpackUint16(p[0:2], 0)\n\t\tif l == 0 {\n\t\t\treturn 0, ErrShortRead\n\t\t}\n\t\tif int(l) > len(p) {\n\t\t\treturn int(l), io.ErrShortBuffer\n\t\t}\n\t\tn, err = d.TCP.Read(p[:l])\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\ti := n\n\t\tfor i < int(l) {\n\t\t\tj, err := d.TCP.Read(p[i:int(l)])\n\t\t\tif err != nil {\n\t\t\t\treturn i, err\n\t\t\t}\n\t\t\ti += j\n\t\t}\n\t\tn = i\n\t}\n\tif d.Tsig != nil {\n\t\t\/\/ Check the TSIG that we should be read\n\t\t_, err = d.Tsig.Verify(p)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ Write implements the standard Write interface:\n\/\/ It write data to d. If there was an error writing\n\/\/ that error is returned; otherwise err is nil\nfunc (d *Conn) Write(p []byte) (n int, err os.Error) {\n\tif d.UDP != nil && d.TCP != nil {\n\t\treturn 0, ErrConn\n\t}\n\n\tvar attempts int\n\tvar q []byte\n\tif d.Attempts == 0 {\n\t\tattempts = 1\n\t} else {\n\t\tattempts = d.Attempts\n\t}\n\td.SetTimeout()\n\tif d.Tsig != nil {\n\t\t\/\/ Create a new buffer with the TSIG added.\n\t\tq, err = d.Tsig.Generate(p)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t} else {\n\t\tq = p\n\t}\n\n\tswitch {\n\tcase d.UDP != nil:\n\t\tfor a := 0; a < attempts; a++ {\n\t\t\tn, err = d.UDP.WriteTo(q, d.Addr)\n\t\t\tif err != nil {\n\t\t\t\tif e, ok := err.(net.Error); ok && e.Timeout() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\tcase d.TCP != nil:\n\t\tfor a := 0; a < attempts; a++ {\n\t\t\tl := make([]byte, 2)\n\t\t\tl[0], l[1] = packUint16(uint16(len(q)))\n\t\t\tn, err = d.TCP.Write(l)\n\t\t\tif err != nil {\n\t\t\t\tif e, ok := err.(net.Error); ok && e.Timeout() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\treturn n, err\n\t\t\t}\n\t\t\tif n != 2 {\n\t\t\t\treturn n, io.ErrShortWrite\n\t\t\t}\n\t\t\tn, err = d.TCP.Write(q)\n\t\t\tif err != nil {\n\t\t\t\tif e, ok := err.(net.Error); ok && e.Timeout() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\treturn n, err\n\t\t\t}\n\t\t\ti := n\n\t\t\tif i < len(q) {\n\t\t\t\tj, err := d.TCP.Write(q[i:len(q)])\n\t\t\t\tif err != nil {\n\t\t\t\t\tif e, ok := err.(net.Error); ok && e.Timeout() {\n\t\t\t\t\t\t\/\/ We are half way in our write...\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\treturn i, err\n\t\t\t\t}\n\t\t\t\ti += j\n\t\t\t}\n\t\t\tn = i\n\t\t}\n\t}\n\treturn\n}\n\n\/\/ Close closes the connection in d. Possible\n\/\/ errors are returned in err; otherwise it is nil.\nfunc (d *Conn) Close() (err os.Error) {\n\tif d.UDP != nil && d.TCP != nil {\n\t\treturn ErrConn\n\t}\n\tswitch {\n\tcase d.UDP != nil:\n\t\terr = d.UDP.Close()\n\tcase d.TCP != nil:\n\t\terr = d.TCP.Close()\n\t}\n\treturn\n}\n\n\/\/ SetTimeout sets the timeout of the socket\n\/\/ that is contained in d.\nfunc (d *Conn) SetTimeout() (err os.Error) {\n\tvar sec int64\n\tif d.UDP != nil && d.TCP != nil {\n\t\treturn ErrConn\n\t}\n\tsec = int64(d.Timeout)\n\tif sec == 0 {\n\t\tsec = 1\n\t}\n\tif d.UDP != nil {\n\t\terr = d.TCP.SetTimeout(sec * 1e9)\n\t}\n\tif d.TCP != nil {\n\t\terr = d.TCP.SetTimeout(sec * 1e9)\n\t}\n\treturn\n}\n\n\/\/ Exchange combines a Write and a Read.\n\/\/ First the request is written to d and then it waits\n\/\/ for a reply with Read. \n\/\/ If nosend is true, the write is skipped.\nfunc (d *Conn) Exchange(request []byte, nosend bool) (reply []byte, err os.Error) {\n\tvar n int\n\tif !nosend {\n\t\tn, err = d.Write(request)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\t\/\/ Layer violation to save memory. Its okay then...\n\treply = d.NewBuffer()\n\tn, err = d.Read(reply)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treply = reply[:n]\n\treturn\n}\n\ntype RR interface {\n\tHeader() *RR_Header\n\tString() string\n}\n\n\/\/ An RRset is a slice of RRs.\ntype RRset []RR\n\nfunc (r RRset) Len() int { return len(r) }\nfunc (r RRset) Less(i, j int) bool { return r[i].Header().Name < r[j].Header().Name }\nfunc (r RRset) Swap(i, j int) { r[i], r[j] = r[j], r[i] }\n\n\/\/ Check if the RRset is RFC 2181 compliant\nfunc (r RRset) Ok() bool {\n\tttl := r[0].Header().Ttl\n\tname := r[0].Header().Name\n\tclass := r[0].Header().Class\n\tfor _, rr := range r[1:] {\n\t\tif rr.Header().Ttl != ttl {\n\t\t\treturn false\n\t\t}\n\t\tif rr.Header().Name != name {\n\t\t\treturn false\n\t\t}\n\t\tif rr.Header().Class != class {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ DNS resource records.\n\/\/ There are many types of messages,\n\/\/ but they all share the same header.\ntype RR_Header struct {\n\tName string \"domain-name\"\n\tRrtype uint16\n\tClass uint16\n\tTtl uint32\n\tRdlength uint16 \/\/ length of data after header\n}\n\nfunc (h *RR_Header) Header() *RR_Header {\n\treturn h\n}\n\nfunc (h *RR_Header) String() string {\n\tvar s string\n\n\tif h.Rrtype == TypeOPT {\n\t\ts = \";\"\n\t\t\/\/ and maybe other things\n\t}\n\n\tif len(h.Name) == 0 {\n\t\ts += \".\\t\"\n\t} else {\n\t\ts += h.Name + \"\\t\"\n\t}\n\ts = s + strconv.Itoa(int(h.Ttl)) + \"\\t\"\n\n\tif _, ok := Class_str[h.Class]; ok {\n\t\ts += Class_str[h.Class] + \"\\t\"\n\t} else {\n\t\ts += \"CLASS\" + strconv.Itoa(int(h.Class)) + \"\\t\"\n\t}\n\n\tif _, ok := Rr_str[h.Rrtype]; ok {\n\t\ts += Rr_str[h.Rrtype] + \"\\t\"\n\t} else {\n\t\ts += \"TYPE\" + strconv.Itoa(int(h.Rrtype)) + \"\\t\"\n\t}\n\treturn s\n}\n\n\/\/ Return the number of labels in a domain name.\nfunc LabelCount(a string) (c uint8) {\n\t\/\/ walk the string and count the dots\n\t\/\/ except when it is escaped\n\tesc := false\n\tfor _, v := range a {\n\t\tswitch v {\n\t\tcase '.':\n\t\t\tif esc {\n\t\t\t\tesc = !esc\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc++\n\t\tcase '\\\\':\n\t\t\tesc = true\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\/\/ Extensions of the original work are copyright (c) 2011 Miek Gieben\n\n\/\/ Package dns implements a full featured interface to the Domain Name System.\n\/\/ Server- and client-side programming is supported.\n\/\/ The package allows complete control over what is send out to the DNS. The package\n\/\/ API follows the less-is-more principle, by presenting a small, clean interface.\n\/\/\n\/\/ The package dns supports (asynchronous) querying\/replying, incoming\/outgoing AXFR\/IXFR,\n\/\/ TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation\/signing.\n\/\/ Note that domain names MUST be fully qualified, before sending them, unqualified\n\/\/ names in a message will result in a packing failure.\n\/\/\n\/\/ Resource records are native types. They are not stored in wire format.\n\/\/ Basic usage pattern for creating a new resource record:\n\/\/\n\/\/ r := new(dns.MX)\n\/\/ r.Hdr = dns.RR_Header{Name: \"miek.nl.\", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}\n\/\/ r.Pref = 10\n\/\/ r.Mx = \"mx.miek.nl.\"\n\/\/\n\/\/ Or directly from a string:\n\/\/\n\/\/ mx, err := dns.NewRR(\"miek.nl. 3600 IN MX 10 mx.miek.nl.\")\n\/\/\n\/\/ Or when the default TTL (3600) and class (IN) suit you:\n\/\/\n\/\/ mx, err := dns.NewRR(\"miek.nl. MX 10 mx.miek.nl.\")\n\/\/\n\/\/ Or even:\n\/\/\n\/\/ mx, err := dns.NewRR(\"$ORIGIN nl.\\nmiek 1H IN MX 10 mx.miek\")\n\/\/\n\/\/ In the DNS messages are exchanged, these messages contain resource\n\/\/ records (sets). Use pattern for creating a message:\n\/\/\n\/\/ m := dns.new(Msg)\n\/\/ m.SetQuestion(\"miek.nl.\", dns.TypeMX)\n\/\/\n\/\/ Or when not certain if the domain name is fully qualified:\n\/\/\n\/\/\tm.SetQuestion(dns.Fqdn(\"miek.nl\"), dns.TypeMX)\n\/\/\n\/\/ The message m is now a message with the question section set to ask\n\/\/ the MX records for the miek.nl. zone.\n\/\/\n\/\/ The following is slightly more verbose, but more flexible:\n\/\/\n\/\/ m1 := new(dns.Msg)\n\/\/ m1.Id = Id()\n\/\/ m1.RecursionDesired = true\n\/\/ m1.Question = make([]Question, 1)\n\/\/ m1.Question[0] = dns.Question{\"miek.nl.\", dns.TypeMX, dns.ClassINET}\n\/\/\n\/\/ After creating a message it can be send.\n\/\/ Basic use pattern for synchronous querying the DNS at a\n\/\/ server configured on 127.0.0.1 and port 53:\n\/\/\n\/\/ c := new(Client)\n\/\/ in, rtt, err := c.Exchange(m1, \"127.0.0.1:53\")\n\/\/\n\/\/ For asynchronous queries it is easy to wrap Exchange() in a goroutine.\n\/\/\n\/\/ A dns message consists out of four sections.\n\/\/ The question section: in.Question, the answer section: in.Answer,\n\/\/ the authority section: in.Ns and the additional section: in.Extra.\n\/\/\n\/\/ Each of these sections (except the Question section) contain a []RR. Basic\n\/\/ use pattern for accessing the rdata of a TXT RR as the first RR in\n\/\/ the Answer section:\n\/\/\n\/\/\tif t, ok := in.Answer[0].(*dns.TXT); ok {\n\/\/\t\t\/\/ do something with t.Txt\n\/\/\t}\npackage dns\n\nimport (\n\t\"net\"\n\t\"strconv\"\n)\n\nconst (\n\tyear68 = 1 << 31 \/\/ For RFC1982 (Serial Arithmetic) calculations in 32 bits.\n\tDefaultMsgSize = 4096 \/\/ Standard default for larger than 512 packets.\n\tudpMsgSize = 512 \/\/ Default buffer size for servers receiving UDP packets.\n\tMaxMsgSize = 65536 \/\/ Largest possible DNS packet.\n\tdefaultTtl = 3600 \/\/ Default TTL.\n)\n\n\/\/ Error represents a DNS error\ntype Error struct {\n\tErr string\n\tName string\n\tServer net.Addr\n\tTimeout bool\n}\n\nfunc (e *Error) Error() string {\n\tif e == nil {\n\t\treturn \"dns: <nil>\"\n\t}\n\tif e.Name == \"\" {\n\t\treturn \"dns: \" + e.Err\n\t}\n\treturn \"dns: \" + e.Name + \": \" + e.Err\n\n}\n\n\/\/ An RR represents a resource record.\ntype RR interface {\n\t\/\/ Header returns the header of an resource record. The header contains\n\t\/\/ everything up to the rdata.\n\tHeader() *RR_Header\n\t\/\/ String returns the text representation of the resource record.\n\tString() string\n\t\/\/ copy returns a copy of the RR\n\tcopy() RR\n\t\/\/ len returns the length (in octects) of the uncompressed RR in wire format.\n\tlen() int\n}\n\n\/\/ DNS resource records.\n\/\/ There are many types of RRs,\n\/\/ but they all share the same header.\ntype RR_Header struct {\n\tName string `dns:\"cdomain-name\"`\n\tRrtype uint16\n\tClass uint16\n\tTtl uint32\n\tRdlength uint16 \/\/ length of data after header\n}\n\nfunc (h *RR_Header) Header() *RR_Header { return h }\n\n\/\/ Just to imlement the RR interface\nfunc (h *RR_Header) copy() RR { return nil }\n\nfunc (h *RR_Header) copyHeader() *RR_Header {\n\tr := new(RR_Header)\n\tr.Name = h.Name\n\tr.Rrtype = h.Rrtype\n\tr.Class = h.Class\n\tr.Ttl = h.Ttl\n\tr.Rdlength = h.Rdlength\n\treturn r\n}\n\nfunc (h *RR_Header) String() string {\n\tvar s string\n\n\tif h.Rrtype == TypeOPT {\n\t\ts = \";\"\n\t\t\/\/ and maybe other things\n\t}\n\n\tif len(h.Name) == 0 {\n\t\ts += \".\\t\"\n\t} else {\n\t\ts += h.Name + \"\\t\"\n\t}\n\ts = s + strconv.FormatInt(int64(h.Ttl), 10) + \"\\t\"\n\n\tif _, ok := ClassToString[h.Class]; ok {\n\t\ts += ClassToString[h.Class] + \"\\t\"\n\t} else {\n\t\ts += \"CLASS\" + strconv.Itoa(int(h.Class)) + \"\\t\"\n\t}\n\n\tif _, ok := TypeToString[h.Rrtype]; ok {\n\t\ts += TypeToString[h.Rrtype] + \"\\t\"\n\t} else {\n\t\ts += \"TYPE\" + strconv.Itoa(int(h.Rrtype)) + \"\\t\"\n\t}\n\treturn s\n}\n\nfunc (h *RR_Header) len() int {\n\tl := len(h.Name) + 1\n\tl += 10 \/\/ rrtype(2) + class(2) + ttl(4) + rdlength(2)\n\treturn l\n}\n\n\/\/ find best matching pattern for zone\nfunc zoneMatch(pattern, zone string) (ok bool) {\n\tif len(pattern) == 0 {\n\t\treturn\n\t}\n\tif len(zone) == 0 {\n\t\tzone = \".\"\n\t}\n\t\/\/ pattern = Fqdn(pattern) \/\/ should already be a fqdn\n\tzone = Fqdn(zone)\n\ti := 0\n\tfor {\n\t\tok = pattern[len(pattern)-1-i] == zone[len(zone)-1-i]\n\t\ti++\n\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tif len(pattern)-1-i < 0 || len(zone)-1-i < 0 {\n\t\t\tbreak\n\t\t}\n\n\t}\n\treturn\n}\n\n\/\/ ToRFC3597 converts a known RR to the unknown RR representation\n\/\/ from RFC 3597.\nfunc (rr *RFC3597) ToRFC3597(r RR) error {\n\tbuf := make([]byte, r.len()*2)\n\toff, err := PackStruct(r, buf, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbuf = buf[:off]\n\trawSetRdlength(buf, 0, off)\n\t_, err = UnpackStruct(rr, buf, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<commit_msg>Use new stuff here too<commit_after>\/\/ Copyright 2009 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\/\/ Extensions of the original work are copyright (c) 2011 Miek Gieben\n\n\/\/ Package dns implements a full featured interface to the Domain Name System.\n\/\/ Server- and client-side programming is supported.\n\/\/ The package allows complete control over what is send out to the DNS. The package\n\/\/ API follows the less-is-more principle, by presenting a small, clean interface.\n\/\/\n\/\/ The package dns supports (asynchronous) querying\/replying, incoming\/outgoing AXFR\/IXFR,\n\/\/ TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation\/signing.\n\/\/ Note that domain names MUST be fully qualified, before sending them, unqualified\n\/\/ names in a message will result in a packing failure.\n\/\/\n\/\/ Resource records are native types. They are not stored in wire format.\n\/\/ Basic usage pattern for creating a new resource record:\n\/\/\n\/\/ r := new(dns.MX)\n\/\/ r.Hdr = dns.RR_Header{Name: \"miek.nl.\", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600}\n\/\/ r.Pref = 10\n\/\/ r.Mx = \"mx.miek.nl.\"\n\/\/\n\/\/ Or directly from a string:\n\/\/\n\/\/ mx, err := dns.NewRR(\"miek.nl. 3600 IN MX 10 mx.miek.nl.\")\n\/\/\n\/\/ Or when the default TTL (3600) and class (IN) suit you:\n\/\/\n\/\/ mx, err := dns.NewRR(\"miek.nl. MX 10 mx.miek.nl.\")\n\/\/\n\/\/ Or even:\n\/\/\n\/\/ mx, err := dns.NewRR(\"$ORIGIN nl.\\nmiek 1H IN MX 10 mx.miek\")\n\/\/\n\/\/ In the DNS messages are exchanged, these messages contain resource\n\/\/ records (sets). Use pattern for creating a message:\n\/\/\n\/\/ m := dns.new(Msg)\n\/\/ m.SetQuestion(\"miek.nl.\", dns.TypeMX)\n\/\/\n\/\/ Or when not certain if the domain name is fully qualified:\n\/\/\n\/\/\tm.SetQuestion(dns.Fqdn(\"miek.nl\"), dns.TypeMX)\n\/\/\n\/\/ The message m is now a message with the question section set to ask\n\/\/ the MX records for the miek.nl. zone.\n\/\/\n\/\/ The following is slightly more verbose, but more flexible:\n\/\/\n\/\/ m1 := new(dns.Msg)\n\/\/ m1.Id = Id()\n\/\/ m1.RecursionDesired = true\n\/\/ m1.Question = make([]Question, 1)\n\/\/ m1.Question[0] = dns.Question{\"miek.nl.\", dns.TypeMX, dns.ClassINET}\n\/\/\n\/\/ After creating a message it can be send.\n\/\/ Basic use pattern for synchronous querying the DNS at a\n\/\/ server configured on 127.0.0.1 and port 53:\n\/\/\n\/\/ c := new(Client)\n\/\/ in, rtt, err := c.Exchange(m1, \"127.0.0.1:53\")\n\/\/\n\/\/ For asynchronous queries it is easy to wrap Exchange() in a goroutine.\n\/\/\n\/\/ A dns message consists out of four sections.\n\/\/ The question section: in.Question, the answer section: in.Answer,\n\/\/ the authority section: in.Ns and the additional section: in.Extra.\n\/\/\n\/\/ Each of these sections (except the Question section) contain a []RR. Basic\n\/\/ use pattern for accessing the rdata of a TXT RR as the first RR in\n\/\/ the Answer section:\n\/\/\n\/\/\tif t, ok := in.Answer[0].(*dns.TXT); ok {\n\/\/\t\t\/\/ do something with t.Txt\n\/\/\t}\npackage dns\n\nimport (\n\t\"net\"\n\t\"strconv\"\n)\n\nconst (\n\tyear68 = 1 << 31 \/\/ For RFC1982 (Serial Arithmetic) calculations in 32 bits.\n\tDefaultMsgSize = 4096 \/\/ Standard default for larger than 512 packets.\n\tudpMsgSize = 512 \/\/ Default buffer size for servers receiving UDP packets.\n\tMaxMsgSize = 65536 \/\/ Largest possible DNS packet.\n\tdefaultTtl = 3600 \/\/ Default TTL.\n)\n\n\/\/ Error represents a DNS error\ntype Error struct {\n\tErr string\n\tName string\n\tServer net.Addr\n\tTimeout bool\n}\n\nfunc (e *Error) Error() string {\n\tif e == nil {\n\t\treturn \"dns: <nil>\"\n\t}\n\tif e.Name == \"\" {\n\t\treturn \"dns: \" + e.Err\n\t}\n\treturn \"dns: \" + e.Name + \": \" + e.Err\n\n}\n\n\/\/ An RR represents a resource record.\ntype RR interface {\n\t\/\/ Header returns the header of an resource record. The header contains\n\t\/\/ everything up to the rdata.\n\tHeader() *RR_Header\n\t\/\/ String returns the text representation of the resource record.\n\tString() string\n\t\/\/ copy returns a copy of the RR\n\tcopy() RR\n\t\/\/ len returns the length (in octects) of the uncompressed RR in wire format.\n\tlen() int\n}\n\n\/\/ DNS resource records.\n\/\/ There are many types of RRs,\n\/\/ but they all share the same header.\ntype RR_Header struct {\n\tName string `dns:\"cdomain-name\"`\n\tRrtype uint16\n\tClass uint16\n\tTtl uint32\n\tRdlength uint16 \/\/ length of data after header\n}\n\nfunc (h *RR_Header) Header() *RR_Header { return h }\n\n\/\/ Just to imlement the RR interface\nfunc (h *RR_Header) copy() RR { return nil }\n\nfunc (h *RR_Header) copyHeader() *RR_Header {\n\tr := new(RR_Header)\n\tr.Name = h.Name\n\tr.Rrtype = h.Rrtype\n\tr.Class = h.Class\n\tr.Ttl = h.Ttl\n\tr.Rdlength = h.Rdlength\n\treturn r\n}\n\nfunc (h *RR_Header) String() string {\n\tvar s string\n\n\tif h.Rrtype == TypeOPT {\n\t\ts = \";\"\n\t\t\/\/ and maybe other things\n\t}\n\n\tif len(h.Name) == 0 {\n\t\ts += \".\\t\"\n\t} else {\n\t\ts += h.Name + \"\\t\"\n\t}\n\ts += strconv.FormatInt(int64(h.Ttl), 10) + \"\\t\"\n\ts += Class(h.Class).String() + \"\\t\"\n\ts += Type(h.Rrtype).String() + \"\\t\"\n\treturn s\n}\n\nfunc (h *RR_Header) len() int {\n\tl := len(h.Name) + 1\n\tl += 10 \/\/ rrtype(2) + class(2) + ttl(4) + rdlength(2)\n\treturn l\n}\n\n\/\/ find best matching pattern for zone\nfunc zoneMatch(pattern, zone string) (ok bool) {\n\tif len(pattern) == 0 {\n\t\treturn\n\t}\n\tif len(zone) == 0 {\n\t\tzone = \".\"\n\t}\n\t\/\/ pattern = Fqdn(pattern) \/\/ should already be a fqdn\n\tzone = Fqdn(zone)\n\ti := 0\n\tfor {\n\t\tok = pattern[len(pattern)-1-i] == zone[len(zone)-1-i]\n\t\ti++\n\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tif len(pattern)-1-i < 0 || len(zone)-1-i < 0 {\n\t\t\tbreak\n\t\t}\n\n\t}\n\treturn\n}\n\n\/\/ ToRFC3597 converts a known RR to the unknown RR representation\n\/\/ from RFC 3597.\nfunc (rr *RFC3597) ToRFC3597(r RR) error {\n\tbuf := make([]byte, r.len()*2)\n\toff, err := PackStruct(r, buf, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbuf = buf[:off]\n\trawSetRdlength(buf, 0, off)\n\t_, err = UnpackStruct(rr, buf, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/**********************************************************\\\n| |\n| hprose |\n| |\n| Official WebSite: http:\/\/www.hprose.com\/ |\n| http:\/\/www.hprose.org\/ |\n| |\n\\**********************************************************\/\n\/**********************************************************\\\n * *\n * hprose\/doc.go *\n * *\n * hprose doc for Go. *\n * *\n * LastModified: May 26, 2015 *\n * Author: Ma Bingyao <andot@hprose.com> *\n * *\n\\**********************************************************\/\n\n\/*\nPackage hprose is a High Performance Remote Object Service Engine.\n\nIt is a modern, lightweight, cross-language, cross-platform, object-oriented, high performance, remote dynamic communication middleware. It is not only easy to use, but powerful. You just need a little time to learn, then you can use it to easily construct cross language cross platform distributed application system.\n\nHprose supports many programming languages, for example:\n\n* AAuto Quicker\n* ActionScript\n* ASP\n* C++\n* Dart\n* Delphi\/Free Pascal\n* dotNET(C#, Visual Basic...)\n* Golang\n* Java\n* JavaScript\n* Node.js\n* Objective-C\n* Perl\n* PHP\n* Python\n* Ruby\n* ...\n\nThrough Hprose, You can conveniently and efficiently intercommunicate between those programming languages.\n\nThis project is the implementation of Hprose for Golang.\n*\/\npackage hprose\n<commit_msg>Update doc<commit_after>\/**********************************************************\\\n| |\n| hprose |\n| |\n| Official WebSite: http:\/\/www.hprose.com\/ |\n| http:\/\/www.hprose.org\/ |\n| |\n\\**********************************************************\/\n\/**********************************************************\\\n * *\n * hprose\/doc.go *\n * *\n * hprose doc for Go. *\n * *\n * LastModified: May 26, 2015 *\n * Author: Ma Bingyao <andot@hprose.com> *\n * *\n\\**********************************************************\/\n\n\/*\nPackage hprose is a High Performance Remote Object Service Engine.\n\nIt is a modern, lightweight, cross-language, cross-platform, object-oriented, high performance, remote dynamic communication middleware. It is not only easy to use, but powerful. You just need a little time to learn, then you can use it to easily construct cross language cross platform distributed application system.\n\nHprose supports many programming languages, for example:\n\n * AAuto Quicker\n * ActionScript\n * ASP\n * C++\n * Dart\n * Delphi\/Free Pascal\n * dotNET(C#, Visual Basic...)\n * Golang\n * Java\n * JavaScript\n * Node.js\n * Objective-C\n * Perl\n * PHP\n * Python\n * Ruby\n * ...\n\nThrough Hprose, You can conveniently and efficiently intercommunicate between those programming languages.\n\nThis project is the implementation of Hprose for Golang.\n*\/\npackage hprose\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package websocket implements the WebSocket protocol defined in RFC 6455.\n\/\/\n\/\/ Overview\n\/\/\n\/\/ The Conn type represents a WebSocket connection. A server application calls\n\/\/ the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:\n\/\/\n\/\/ var upgrader = websocket.Upgrader{\n\/\/ ReadBufferSize: 1024,\n\/\/ WriteBufferSize: 1024,\n\/\/ }\n\/\/\n\/\/ func handler(w http.ResponseWriter, r *http.Request) {\n\/\/ conn, err := upgrader.Upgrade(w, r, nil)\n\/\/ if err != nil {\n\/\/ log.Println(err)\n\/\/ return\n\/\/ }\n\/\/ ... Use conn to send and receive messages.\n\/\/ }\n\/\/\n\/\/ Call the connection's WriteMessage and ReadMessage methods to send and\n\/\/ receive messages as a slice of bytes. This snippet of code shows how to echo\n\/\/ messages using these methods:\n\/\/\n\/\/ for {\n\/\/ messageType, p, err := conn.ReadMessage()\n\/\/ if err != nil {\n\/\/ log.Println(err)\n\/\/ return\n\/\/ }\n\/\/ if err := conn.WriteMessage(messageType, p); err != nil {\n\/\/ log.Println(err)\n\/\/ return\n\/\/ }\n\/\/ }\n\/\/\n\/\/ In above snippet of code, p is a []byte and messageType is an int with value\n\/\/ websocket.BinaryMessage or websocket.TextMessage.\n\/\/\n\/\/ An application can also send and receive messages using the io.WriteCloser\n\/\/ and io.Reader interfaces. To send a message, call the connection NextWriter\n\/\/ method to get an io.WriteCloser, write the message to the writer and close\n\/\/ the writer when done. To receive a message, call the connection NextReader\n\/\/ method to get an io.Reader and read until io.EOF is returned. This snippet\n\/\/ shows how to echo messages using the NextWriter and NextReader methods:\n\/\/\n\/\/ for {\n\/\/ messageType, r, err := conn.NextReader()\n\/\/ if err != nil {\n\/\/ return\n\/\/ }\n\/\/ w, err := conn.NextWriter(messageType)\n\/\/ if err != nil {\n\/\/ return err\n\/\/ }\n\/\/ if _, err := io.Copy(w, r); err != nil {\n\/\/ return err\n\/\/ }\n\/\/ if err := w.Close(); err != nil {\n\/\/ return err\n\/\/ }\n\/\/ }\n\/\/\n\/\/ Data Messages\n\/\/\n\/\/ The WebSocket protocol distinguishes between text and binary data messages.\n\/\/ Text messages are interpreted as UTF-8 encoded text. The interpretation of\n\/\/ binary messages is left to the application.\n\/\/\n\/\/ This package uses the TextMessage and BinaryMessage integer constants to\n\/\/ identify the two data message types. The ReadMessage and NextReader methods\n\/\/ return the type of the received message. The messageType argument to the\n\/\/ WriteMessage and NextWriter methods specifies the type of a sent message.\n\/\/\n\/\/ It is the application's responsibility to ensure that text messages are\n\/\/ valid UTF-8 encoded text.\n\/\/\n\/\/ Control Messages\n\/\/\n\/\/ The WebSocket protocol defines three types of control messages: close, ping\n\/\/ and pong. Call the connection WriteControl, WriteMessage or NextWriter\n\/\/ methods to send a control message to the peer.\n\/\/\n\/\/ Connections handle received close messages by calling the handler function\n\/\/ set with the SetCloseHandler method and by returning a *CloseError from the\n\/\/ NextReader, ReadMessage or the message Read method. The default close\n\/\/ handler sends a close message to the peer.\n\/\/\n\/\/ Connections handle received ping messages by calling the handler function\n\/\/ set with the SetPingHandler method. The default ping handler sends a pong\n\/\/ message to the peer.\n\/\/\n\/\/ Connections handle received pong messages by calling the handler function\n\/\/ set with the SetPongHandler method. The default pong handler does nothing.\n\/\/ If an application sends ping messages, then the application should set a\n\/\/ pong handler to receive the corresponding pong.\n\/\/\n\/\/ The control message handler functions are called from the NextReader,\n\/\/ ReadMessage and message reader Read methods. The default close and ping\n\/\/ handlers can block these methods for a short time when the handler writes to\n\/\/ the connection.\n\/\/\n\/\/ The application must read the connection to process close, ping and pong\n\/\/ messages sent from the peer. If the application is not otherwise interested\n\/\/ in messages from the peer, then the application should start a goroutine to\n\/\/ read and discard messages from the peer. A simple example is:\n\/\/\n\/\/ func readLoop(c *websocket.Conn) {\n\/\/ for {\n\/\/ if _, _, err := c.NextReader(); err != nil {\n\/\/ c.Close()\n\/\/ break\n\/\/ }\n\/\/ }\n\/\/ }\n\/\/\n\/\/ Concurrency\n\/\/\n\/\/ Connections support one concurrent reader and one concurrent writer.\n\/\/\n\/\/ Applications are responsible for ensuring that no more than one goroutine\n\/\/ calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,\n\/\/ WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and\n\/\/ that no more than one goroutine calls the read methods (NextReader,\n\/\/ SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)\n\/\/ concurrently.\n\/\/\n\/\/ The Close and WriteControl methods can be called concurrently with all other\n\/\/ methods.\n\/\/\n\/\/ Origin Considerations\n\/\/\n\/\/ Web browsers allow Javascript applications to open a WebSocket connection to\n\/\/ any host. It's up to the server to enforce an origin policy using the Origin\n\/\/ request header sent by the browser.\n\/\/\n\/\/ The Upgrader calls the function specified in the CheckOrigin field to check\n\/\/ the origin. If the CheckOrigin function returns false, then the Upgrade\n\/\/ method fails the WebSocket handshake with HTTP status 403.\n\/\/\n\/\/ If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail\n\/\/ the handshake if the Origin request header is present and the Origin host is\n\/\/ not equal to the Host request header.\n\/\/\n\/\/ The deprecated package-level Upgrade function does not perform origin\n\/\/ checking. The application is responsible for checking the Origin header\n\/\/ before calling the Upgrade function.\n\/\/\n\/\/ Compression EXPERIMENTAL\n\/\/\n\/\/ Per message compression extensions (RFC 7692) are experimentally supported\n\/\/ by this package in a limited capacity. Setting the EnableCompression option\n\/\/ to true in Dialer or Upgrader will attempt to negotiate per message deflate\n\/\/ support.\n\/\/\n\/\/ var upgrader = websocket.Upgrader{\n\/\/ EnableCompression: true,\n\/\/ }\n\/\/\n\/\/ If compression was successfully negotiated with the connection's peer, any\n\/\/ message received in compressed form will be automatically decompressed.\n\/\/ All Read methods will return uncompressed bytes.\n\/\/\n\/\/ Per message compression of messages written to a connection can be enabled\n\/\/ or disabled by calling the corresponding Conn method:\n\/\/\n\/\/ conn.EnableWriteCompression(false)\n\/\/\n\/\/ Currently this package does not support compression with \"context takeover\".\n\/\/ This means that messages must be compressed and decompressed in isolation,\n\/\/ without retaining sliding window or dictionary state across messages. For\n\/\/ more details refer to RFC 7692.\n\/\/\n\/\/ Use of compression is experimental and may result in decreased performance.\npackage websocket\n<commit_msg>Add buffer commentary<commit_after>\/\/ Copyright 2013 The Gorilla WebSocket Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package websocket implements the WebSocket protocol defined in RFC 6455.\n\/\/\n\/\/ Overview\n\/\/\n\/\/ The Conn type represents a WebSocket connection. A server application calls\n\/\/ the Upgrader.Upgrade method from an HTTP request handler to get a *Conn:\n\/\/\n\/\/ var upgrader = websocket.Upgrader{\n\/\/ ReadBufferSize: 1024,\n\/\/ WriteBufferSize: 1024,\n\/\/ }\n\/\/\n\/\/ func handler(w http.ResponseWriter, r *http.Request) {\n\/\/ conn, err := upgrader.Upgrade(w, r, nil)\n\/\/ if err != nil {\n\/\/ log.Println(err)\n\/\/ return\n\/\/ }\n\/\/ ... Use conn to send and receive messages.\n\/\/ }\n\/\/\n\/\/ Call the connection's WriteMessage and ReadMessage methods to send and\n\/\/ receive messages as a slice of bytes. This snippet of code shows how to echo\n\/\/ messages using these methods:\n\/\/\n\/\/ for {\n\/\/ messageType, p, err := conn.ReadMessage()\n\/\/ if err != nil {\n\/\/ log.Println(err)\n\/\/ return\n\/\/ }\n\/\/ if err := conn.WriteMessage(messageType, p); err != nil {\n\/\/ log.Println(err)\n\/\/ return\n\/\/ }\n\/\/ }\n\/\/\n\/\/ In above snippet of code, p is a []byte and messageType is an int with value\n\/\/ websocket.BinaryMessage or websocket.TextMessage.\n\/\/\n\/\/ An application can also send and receive messages using the io.WriteCloser\n\/\/ and io.Reader interfaces. To send a message, call the connection NextWriter\n\/\/ method to get an io.WriteCloser, write the message to the writer and close\n\/\/ the writer when done. To receive a message, call the connection NextReader\n\/\/ method to get an io.Reader and read until io.EOF is returned. This snippet\n\/\/ shows how to echo messages using the NextWriter and NextReader methods:\n\/\/\n\/\/ for {\n\/\/ messageType, r, err := conn.NextReader()\n\/\/ if err != nil {\n\/\/ return\n\/\/ }\n\/\/ w, err := conn.NextWriter(messageType)\n\/\/ if err != nil {\n\/\/ return err\n\/\/ }\n\/\/ if _, err := io.Copy(w, r); err != nil {\n\/\/ return err\n\/\/ }\n\/\/ if err := w.Close(); err != nil {\n\/\/ return err\n\/\/ }\n\/\/ }\n\/\/\n\/\/ Data Messages\n\/\/\n\/\/ The WebSocket protocol distinguishes between text and binary data messages.\n\/\/ Text messages are interpreted as UTF-8 encoded text. The interpretation of\n\/\/ binary messages is left to the application.\n\/\/\n\/\/ This package uses the TextMessage and BinaryMessage integer constants to\n\/\/ identify the two data message types. The ReadMessage and NextReader methods\n\/\/ return the type of the received message. The messageType argument to the\n\/\/ WriteMessage and NextWriter methods specifies the type of a sent message.\n\/\/\n\/\/ It is the application's responsibility to ensure that text messages are\n\/\/ valid UTF-8 encoded text.\n\/\/\n\/\/ Control Messages\n\/\/\n\/\/ The WebSocket protocol defines three types of control messages: close, ping\n\/\/ and pong. Call the connection WriteControl, WriteMessage or NextWriter\n\/\/ methods to send a control message to the peer.\n\/\/\n\/\/ Connections handle received close messages by calling the handler function\n\/\/ set with the SetCloseHandler method and by returning a *CloseError from the\n\/\/ NextReader, ReadMessage or the message Read method. The default close\n\/\/ handler sends a close message to the peer.\n\/\/\n\/\/ Connections handle received ping messages by calling the handler function\n\/\/ set with the SetPingHandler method. The default ping handler sends a pong\n\/\/ message to the peer.\n\/\/\n\/\/ Connections handle received pong messages by calling the handler function\n\/\/ set with the SetPongHandler method. The default pong handler does nothing.\n\/\/ If an application sends ping messages, then the application should set a\n\/\/ pong handler to receive the corresponding pong.\n\/\/\n\/\/ The control message handler functions are called from the NextReader,\n\/\/ ReadMessage and message reader Read methods. The default close and ping\n\/\/ handlers can block these methods for a short time when the handler writes to\n\/\/ the connection.\n\/\/\n\/\/ The application must read the connection to process close, ping and pong\n\/\/ messages sent from the peer. If the application is not otherwise interested\n\/\/ in messages from the peer, then the application should start a goroutine to\n\/\/ read and discard messages from the peer. A simple example is:\n\/\/\n\/\/ func readLoop(c *websocket.Conn) {\n\/\/ for {\n\/\/ if _, _, err := c.NextReader(); err != nil {\n\/\/ c.Close()\n\/\/ break\n\/\/ }\n\/\/ }\n\/\/ }\n\/\/\n\/\/ Concurrency\n\/\/\n\/\/ Connections support one concurrent reader and one concurrent writer.\n\/\/\n\/\/ Applications are responsible for ensuring that no more than one goroutine\n\/\/ calls the write methods (NextWriter, SetWriteDeadline, WriteMessage,\n\/\/ WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and\n\/\/ that no more than one goroutine calls the read methods (NextReader,\n\/\/ SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler)\n\/\/ concurrently.\n\/\/\n\/\/ The Close and WriteControl methods can be called concurrently with all other\n\/\/ methods.\n\/\/\n\/\/ Origin Considerations\n\/\/\n\/\/ Web browsers allow Javascript applications to open a WebSocket connection to\n\/\/ any host. It's up to the server to enforce an origin policy using the Origin\n\/\/ request header sent by the browser.\n\/\/\n\/\/ The Upgrader calls the function specified in the CheckOrigin field to check\n\/\/ the origin. If the CheckOrigin function returns false, then the Upgrade\n\/\/ method fails the WebSocket handshake with HTTP status 403.\n\/\/\n\/\/ If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail\n\/\/ the handshake if the Origin request header is present and the Origin host is\n\/\/ not equal to the Host request header.\n\/\/\n\/\/ The deprecated package-level Upgrade function does not perform origin\n\/\/ checking. The application is responsible for checking the Origin header\n\/\/ before calling the Upgrade function.\n\/\/\n\/\/ Buffers\n\/\/\n\/\/ Connections buffer network input and output to reduce the number\n\/\/ of system calls when reading or writing messages.\n\/\/\n\/\/ Write buffers are also used for constructing WebSocket frames. See RFC 6455,\n\/\/ Section 5 for a discussion of message framing. A WebSocket frame header is\n\/\/ written to the network each time a write buffer is flushed to the network.\n\/\/ Decreasing the size of the write buffer can increase the amount of framing\n\/\/ overhead on the connection.\n\/\/\n\/\/ The buffer sizes in bytes are specified by the ReadBufferSize and\n\/\/ WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default\n\/\/ size of 4096 when a buffer size field is set to zero. The Upgrader reuses\n\/\/ buffers created by the HTTP server when a buffer size field is set to zero.\n\/\/ The HTTP server buffers have a size of 4096 at the time of this writing.\n\/\/\n\/\/ The buffer sizes do not limit the size of a message that can be read or\n\/\/ written by a connection.\n\/\/\n\/\/ Buffers are held for the lifetime of the connection by default. If the\n\/\/ Dialer or Upgrader WriteBufferPool field is set, then a connection holds the\n\/\/ write buffer only when writing a message.\n\/\/\n\/\/ Applications should tune the buffer sizes to balance memory use and\n\/\/ performance. Increasing the buffer size uses more memory, but can reduce the\n\/\/ number of system calls to read or write the network. In the case of writing,\n\/\/ increasing the buffer size can reduce the number of frame headers written to\n\/\/ the network.\n\/\/\n\/\/ Some guidelines for setting buffer parameters are:\n\/\/\n\/\/ Limit the buffer sizes to the maximum expected message size. Buffers larger\n\/\/ than the largest message do not provide any benefit.\n\/\/\n\/\/ Depending on the distribution of message sizes, setting the buffer size to\n\/\/ to a value less than the maximum expected message size can greatly reduce\n\/\/ memory use with a small impact on performance. Here's an example: If 99% of\n\/\/ the messages are smaller than 256 bytes and the maximum message size is 512\n\/\/ bytes, then a buffer size of 256 bytes will result in 1.01 more system calls\n\/\/ than a buffer size of 512 bytes. The memory savings is 50%.\n\/\/\n\/\/ A write buffer pool is useful when the application has a modest number\n\/\/ writes over a large number of connections. when buffers are pooled, a larger\n\/\/ buffer size has a reduced impact on total memory use and has the benefit of\n\/\/ reducing system calls and frame overhead.\n\/\/\n\/\/ Compression EXPERIMENTAL\n\/\/\n\/\/ Per message compression extensions (RFC 7692) are experimentally supported\n\/\/ by this package in a limited capacity. Setting the EnableCompression option\n\/\/ to true in Dialer or Upgrader will attempt to negotiate per message deflate\n\/\/ support.\n\/\/\n\/\/ var upgrader = websocket.Upgrader{\n\/\/ EnableCompression: true,\n\/\/ }\n\/\/\n\/\/ If compression was successfully negotiated with the connection's peer, any\n\/\/ message received in compressed form will be automatically decompressed.\n\/\/ All Read methods will return uncompressed bytes.\n\/\/\n\/\/ Per message compression of messages written to a connection can be enabled\n\/\/ or disabled by calling the corresponding Conn method:\n\/\/\n\/\/ conn.EnableWriteCompression(false)\n\/\/\n\/\/ Currently this package does not support compression with \"context takeover\".\n\/\/ This means that messages must be compressed and decompressed in isolation,\n\/\/ without retaining sliding window or dictionary state across messages. For\n\/\/ more details refer to RFC 7692.\n\/\/\n\/\/ Use of compression is experimental and may result in decreased performance.\npackage websocket\n<|endoftext|>"} {"text":"<commit_before>\/*\nPackage junos provides automation for Junos (Juniper Networks) devices.\n\nEstablishing A Session\n\nTo connect to a Junos device, the process is fairly straightforward.\n\n jnpr := junos.NewSession(host, user, password)\n defer jnpr.Close()\n\nCompare Rollback Configurations\n\nIf you want to view the difference between the current configuration and a rollback\none, then you can use the ConfigDiff() function.\n\n diff, err := jnpr.ConfigDiff(3)\n if err != nil {\n fmt.Println(err)\n }\n fmt.Println(diff)\n\nThis will output exactly how it does on the CLI when you \"| compare.\"\n\nRolling Back to a Previous State\n\nYou can also rollback to a previous state, or the \"rescue\" configuration by using\nthe RollbackConfig() function:\n\n err := jnpr.RollbackConfig(3)\n if err != nil {\n fmt.Println(err)\n }\n\n \/\/ Create a rescue config from the active configuration.\n jnpr.Rescue(\"save\")\n\n \/\/ You can also delete a rescue config.\n jnpr.Rescue(\"delete\")\n\n \/\/ Rollback to the \"rescue\" configuration.\n err := jnpr.RollbackConfig(\"rescue\")\n if err != nil {\n fmt.Println(err)\n }\n\nDevice Configuration\n\nWhen configuring a device, it is good practice to lock the configuration database,\nload the config, commit the configuration, and then unlock the configuration database.\n\nYou can do this with the following functions:\n\n Lock(), Commit(), Unlock()\n\nThere are multiple ways to commit a configuration as well:\n\n \/\/ Commit the configuration as normal\n Commit()\n\n \/\/ Check the configuration for any syntax errors (NOTE: you must still issue a Commit())\n CommitCheck()\n\n \/\/ Commit at a later time, i.e. 4:30 PM\n CommitAt(\"16:30:00\")\n\n \/\/ Rollback configuration if a Commit() is not issued within the given <minutes>.\n CommitConfirm(15)\n\nYou can configure the Junos device by uploading a local file, or pulling from an\nFTP\/HTTP server. The LoadConfig() function takes three arguments:\n\n filename or URL, format, and commit-on-load\n\nIf you specify a URL, it must be in the following format:\n\n ftp:\/\/user:password@server\/path\/to\/file\n http:\/\/user:password@server\/path\/to\/file\n\nThe format of the commands within the file must be one of the following types:\n\n set\n \/\/ system name-server 1.1.1.1\n\n text\n \/\/ system {\n \/\/ name-server 1.1.1.1;\n \/\/ }\n\n xml\n \/\/ <system>\n \/\/ <name-server>\n \/\/ <name>1.1.1.1<\/name>\n \/\/ <\/name-server>\n \/\/ <\/system>\n\nIf the third option is \"true\" then after the configuration is loaded, a commit\nwill be issued. If set to \"false,\" you will have to commit the configuration\nusing the Commit() function.\n\n jnpr.Lock()\n err := jnpr.LoadConfig(\"path-to-file.txt\", \"set\", true)\n if err != nil {\n fmt.Println(err)\n }\n jnpr.Unlock()\n\nYou don't have to use Lock() and Unlock() if you wish, but if by chance someone\nelse tries to edit the device configuration at the same time, there can be conflics\nand most likely an error will be returned.\n*\/\npackage junos\n<commit_msg>Updated documentation<commit_after>\/*\nPackage junos provides automation for Junos (Juniper Networks) devices.\n\nEstablishing A Session\n\nTo connect to a Junos device, the process is fairly straightforward.\n\n jnpr := junos.NewSession(host, user, password)\n defer jnpr.Close()\n\nCompare Rollback Configurations\n\nIf you want to view the difference between the current configuration and a rollback\none, then you can use the ConfigDiff() function.\n\n diff, err := jnpr.ConfigDiff(3)\n if err != nil {\n fmt.Println(err)\n }\n fmt.Println(diff)\n\nThis will output exactly how it does on the CLI when you \"| compare.\"\n\nRolling Back to a Previous State\n\nYou can also rollback to a previous state, or the \"rescue\" configuration by using\nthe RollbackConfig() function:\n\n err := jnpr.RollbackConfig(3)\n if err != nil {\n fmt.Println(err)\n }\n\n \/\/ Create a rescue config from the active configuration.\n jnpr.Rescue(\"save\")\n\n \/\/ You can also delete a rescue config.\n jnpr.Rescue(\"delete\")\n\n \/\/ Rollback to the \"rescue\" configuration.\n err := jnpr.RollbackConfig(\"rescue\")\n if err != nil {\n fmt.Println(err)\n }\n\nDevice Configuration\n\nWhen configuring a device, it is good practice to lock the configuration database,\nload the config, commit the configuration, and then unlock the configuration database.\n\nYou can do this with the following functions:\n\n Lock(), Commit(), Unlock()\n\nThere are multiple ways to commit a configuration as well:\n\n \/\/ Commit the configuration as normal\n Commit()\n\n \/\/ Check the configuration for any syntax errors (NOTE: you must still issue a Commit())\n CommitCheck()\n\n \/\/ Commit at a later time, i.e. 4:30 PM\n CommitAt(\"16:30:00\")\n\n \/\/ Rollback configuration if a Commit() is not issued within the given <minutes>.\n CommitConfirm(15)\n\nYou can configure the Junos device by uploading a local file, or pulling from an\nFTP\/HTTP server. The LoadConfig() function takes three arguments:\n\n filename or URL, format, and commit-on-load\n\nIf you specify a URL, it must be in the following format:\n\n ftp:\/\/<username>:<password>@hostname\/pathname\/file-name\n http:\/\/<username>:<password>@hostname\/pathname\/file-name\n\nThe format of the commands within the file must be one of the following types:\n\n set\n \/\/ system name-server 1.1.1.1\n\n text\n \/\/ system {\n \/\/ name-server 1.1.1.1;\n \/\/ }\n\n xml\n \/\/ <system>\n \/\/ <name-server>\n \/\/ <name>1.1.1.1<\/name>\n \/\/ <\/name-server>\n \/\/ <\/system>\n\nIf the third option is \"true\" then after the configuration is loaded, a commit\nwill be issued. If set to \"false,\" you will have to commit the configuration\nusing the Commit() function.\n\n jnpr.Lock()\n err := jnpr.LoadConfig(\"path-to-file.txt\", \"set\", true)\n if err != nil {\n fmt.Println(err)\n }\n jnpr.Unlock()\n\nYou don't have to use Lock() and Unlock() if you wish, but if by chance someone\nelse tries to edit the device configuration at the same time, there can be conflics\nand most likely an error will be returned.\n*\/\npackage junos\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package sample\n\/\/\n\/\/ Go package sample implements sampling algorithms for 1 in n sampling for a random value (probe):\n\/\/\n\/\/ * Modulo, using modulo-operation\n\/\/ * PowerOf2, using bitwise AND-operation, only usable if the sampling rate is a power of 2\n\/\/ * LowerThan, checking if the probe is lower than a pre calculated boundary (maximum value for probe divided by sampling rate)\n\/\/ * Reciprocal, using a multiplication by the reciprocal value of the sampling rate (Details: https:\/\/breml.github.io\/blog\/2015\/10\/22\/dividable-without-remainder\/)\n\/\/ * Decrement, implementation variant, where the random value is only calculated after a successful sampling\npackage sample\n<commit_msg>Changed package comment to satisfy golint<commit_after>\/\/ Package sample implements sampling algorithms for 1 in n sampling for a random value (probe):\n\/\/\n\/\/ * Modulo, using modulo-operation\n\/\/ * PowerOf2, using bitwise AND-operation, only usable if the sampling rate is a power of 2\n\/\/ * LowerThan, checking if the probe is lower than a pre calculated boundary (maximum value for probe divided by sampling rate)\n\/\/ * Reciprocal, using a multiplication by the reciprocal value of the sampling rate (Details: https:\/\/breml.github.io\/blog\/2015\/10\/22\/dividable-without-remainder\/)\n\/\/ * Decrement, implementation variant, where the random value is only calculated after a successful sampling\npackage sample\n<|endoftext|>"} {"text":"<commit_before>\/\/ All interaction with the library takes place through an instance of Service,\n\/\/ which is created in the following manner:\n\/\/\n\/\/ s := New(ServiceConfig{\n\/\/ PollInterval: 1*time.Minute,\n\/\/ PingInterval: 2*time.Second,\n\/\/ PeerTimeout: 8*time.Second,\n\/\/ Port: 1234,\n\/\/ ID: \"machine01\",\n\/\/ UserData: []byte(\"data\"),\n\/\/ })\n\/\/\n\/\/ At this point, the service will begin sending broadcast and multicast\n\/\/ packets on all appropriate network interfaces and listening for packets from\n\/\/ other peers. The service provides two channels that provide notifications\n\/\/ when peers are added or removed:\n\/\/\n\/\/ for {\n\/\/ select {\n\/\/ case id := <- s.PeerAdded:\n\/\/ fmt.Printf(\"Peer %s added!\\n\", id)\n\/\/ case id := <- s.PeerRemoved:\n\/\/ fmt.Printf(\"Peer %s removed!\\n\", id)\n\/\/ }\n\/\/ }\n\/\/\n\/\/ Once you have a peer ID, you can use it to retrieve the custom user data for\n\/\/ that specific peer:\n\/\/\n\/\/ data, _ := s.PeerUserData(id)\n\/\/ fmt.Printf(\"UserData: %s\\n\", data)\n\/\/\n\/\/ If you need to connect to the peer, it is possible to obtain a slice of IP\n\/\/ addresses for the peer. As packets are received from the peer, the IP\n\/\/ address and timestamp are recored. This allows the service to determine\n\/\/ the best IP address for contacting the peer.\n\/\/\n\/\/ addrs, _ := s.PeerAddrs(id)\n\/\/ for _, a := range addrs {\n\/\/ fmt.Printf(\"- %s\", a)\n\/\/ }\n\/\/\n\/\/ Note that you may want to filter the addresses since the slice may contain\n\/\/ both IPv4 and IPv6 addresses.\n\/\/\n\/\/ The service can be shutdown by invoking the Stop() method:\n\/\/\n\/\/ s.Stop()\n\/\/\npackage sdiscovery\n<commit_msg>Corrected minor issue with documentation.<commit_after>\/\/ All interaction with the library takes place through an instance of Service,\n\/\/ which is created in the following manner:\n\/\/\n\/\/ s := sdiscovery.New(ServiceConfig{\n\/\/ PollInterval: 1*time.Minute,\n\/\/ PingInterval: 2*time.Second,\n\/\/ PeerTimeout: 8*time.Second,\n\/\/ Port: 1234,\n\/\/ ID: \"machine01\",\n\/\/ UserData: []byte(\"data\"),\n\/\/ })\n\/\/\n\/\/ At this point, the service will begin sending broadcast and multicast\n\/\/ packets on all appropriate network interfaces and listening for packets from\n\/\/ other peers. The service provides two channels that provide notifications\n\/\/ when peers are added or removed:\n\/\/\n\/\/ for {\n\/\/ select {\n\/\/ case id := <- s.PeerAdded:\n\/\/ fmt.Printf(\"Peer %s added!\\n\", id)\n\/\/ case id := <- s.PeerRemoved:\n\/\/ fmt.Printf(\"Peer %s removed!\\n\", id)\n\/\/ }\n\/\/ }\n\/\/\n\/\/ Once you have a peer ID, you can use it to retrieve the custom user data for\n\/\/ that specific peer:\n\/\/\n\/\/ data, _ := s.PeerUserData(id)\n\/\/ fmt.Printf(\"UserData: %s\\n\", data)\n\/\/\n\/\/ If you need to connect to the peer, it is possible to obtain a slice of IP\n\/\/ addresses for the peer. As packets are received from the peer, the IP\n\/\/ address and timestamp are recored. This allows the service to determine\n\/\/ the best IP address for contacting the peer.\n\/\/\n\/\/ addrs, _ := s.PeerAddrs(id)\n\/\/ for _, a := range addrs {\n\/\/ fmt.Printf(\"- %s\", a)\n\/\/ }\n\/\/\n\/\/ Note that you may want to filter the addresses since the slice may contain\n\/\/ both IPv4 and IPv6 addresses.\n\/\/\n\/\/ The service can be shutdown by invoking the Stop() method:\n\/\/\n\/\/ s.Stop()\n\/\/\npackage sdiscovery\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package vsock provides access to Linux VM sockets (AF_VSOCK) for\n\/\/ communication between a hypervisor and its virtual machines.\n\/\/\n\/\/ The types in this package implement interfaces provided by package net and\n\/\/ may be used in applications that expect a net.Listener or net.Conn.\n\/\/\n\/\/ - *Addr implements net.Addr\n\/\/ - *Conn implements net.Conn\n\/\/ - *Listener implements net.Listener\n\/\/\n\/\/ Go version support\n\/\/\n\/\/ This package supports varying levels of functionality depending on the version\n\/\/ of Go used during compilation. The Listener and Conn types produced by this\n\/\/ package are backed by non-blocking I\/O, in order to integrate with Go's\n\/\/ runtime network poller in Go 1.11+. Additional functionality is available\n\/\/ starting in Go 1.12+.\n\/\/\n\/\/ Go 1.12+ (recommended):\n\/\/ - *Listener:\n\/\/ - Accept blocks until a connection is received\n\/\/ - Close can interrupt Accept and make it return a permanent error\n\/\/ - SetDeadline can set timeouts which can interrupt Accept and make it return a\n\/\/ temporary error\n\/\/ - *Conn:\n\/\/ - SetDeadline family of methods are fully supported\n\/\/ - CloseRead and CloseWrite can close the reading or writing sides of a\n\/\/ Conn, respectively\n\/\/\n\/\/ Go 1.11 (not recommended):\n\/\/ - *Listener:\n\/\/ - Accept is non-blocking and should be called in a loop, checking for\n\/\/ net.Error.Temporary() == true and sleeping for a short period to avoid wasteful\n\/\/ CPU cycle consumption\n\/\/ - Close makes Accept return a permanent error on the next loop iteration\n\/\/ - SetDeadline is not supported and will always return an error\n\/\/ - *Conn:\n\/\/ - SetDeadline family of methods are fully supported\n\/\/ - CloseRead and CloseWrite are not supported and will always return an error\n\/\/\n\/\/ Go 1.10 and below are not supported. The runtime network poller integration\n\/\/ required by this package is not available in Go versions prior to Go 1.11.\n\/\/\n\/\/ Stability\n\/\/\n\/\/ At this time, package vsock is in a pre-v1.0.0 state. Changes are being made\n\/\/ which may impact the exported API of this package and others in its ecosystem.\n\/\/\n\/\/ If you depend on this package in your application, please use Go modules when\n\/\/ building your application.\npackage vsock\n<commit_msg>vsock: Go version support documentation for Conn.SyscallConn<commit_after>\/\/ Package vsock provides access to Linux VM sockets (AF_VSOCK) for\n\/\/ communication between a hypervisor and its virtual machines.\n\/\/\n\/\/ The types in this package implement interfaces provided by package net and\n\/\/ may be used in applications that expect a net.Listener or net.Conn.\n\/\/\n\/\/ - *Addr implements net.Addr\n\/\/ - *Conn implements net.Conn\n\/\/ - *Listener implements net.Listener\n\/\/\n\/\/ Go version support\n\/\/\n\/\/ This package supports varying levels of functionality depending on the version\n\/\/ of Go used during compilation. The Listener and Conn types produced by this\n\/\/ package are backed by non-blocking I\/O, in order to integrate with Go's\n\/\/ runtime network poller in Go 1.11+. Additional functionality is available\n\/\/ starting in Go 1.12+.\n\/\/\n\/\/ Go 1.12+ (recommended):\n\/\/ - *Listener:\n\/\/ - Accept blocks until a connection is received\n\/\/ - Close can interrupt Accept and make it return a permanent error\n\/\/ - SetDeadline can set timeouts which can interrupt Accept and make it return a\n\/\/ temporary error\n\/\/ - *Conn:\n\/\/ - SetDeadline family of methods are fully supported\n\/\/ - CloseRead and CloseWrite can close the reading or writing sides of a\n\/\/ Conn, respectively\n\/\/ - SyscallConn provides access to raw network control\/read\/write functionality\n\/\/\n\/\/ Go 1.11 (not recommended):\n\/\/ - *Listener:\n\/\/ - Accept is non-blocking and should be called in a loop, checking for\n\/\/ net.Error.Temporary() == true and sleeping for a short period to avoid wasteful\n\/\/ CPU cycle consumption\n\/\/ - Close makes Accept return a permanent error on the next loop iteration\n\/\/ - SetDeadline is not supported and will always return an error\n\/\/ - *Conn:\n\/\/ - SetDeadline family of methods are fully supported\n\/\/ - CloseRead and CloseWrite are not supported and will always return an error\n\/\/ - SyscallConn is not supported and will always return an error\n\/\/\n\/\/ Go 1.10 and below are not supported. The runtime network poller integration\n\/\/ required by this package is not available in Go versions prior to Go 1.11.\n\/\/\n\/\/ Stability\n\/\/\n\/\/ At this time, package vsock is in a pre-v1.0.0 state. Changes are being made\n\/\/ which may impact the exported API of this package and others in its ecosystem.\n\/\/\n\/\/ If you depend on this package in your application, please use Go modules when\n\/\/ building your application.\npackage vsock\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"net\/http\"\n\t\"log\"\n\t\".\/src\"\n)\n\nfunc main() {\n\tvar (\n\t\tmaxWorkers = flag.Int(\"max_workers\", 5, \"The number of workers to start\")\n\t\tmaxQueueSize = flag.Int(\"max_queue_size\", 100, \"The size of job queue\")\n\t\tport = flag.String(\"port\", \"8081\", \"The server port\")\n\t\tsource = flag.String(\"source\", \"https:\/\/repo.gin.g-node.org\", \"The default URI\")\n\t\tbaseTarget = flag.String(\"target\", \".\/\", \"The default base path for storgae\")\n\t)\n\tflag.Parse()\n\tds := ginDoi.GinDataSource{GinURL: *source}\n\tstorage := ginDoi.LocalStorage{Path:*baseTarget, Source:ds}\n\n\t\/\/ Create the job queue.\n\tjobQueue := make(chan ginDoi.Job, *maxQueueSize)\n\t\/\/ Start the dispatcher.\n\tdispatcher := ginDoi.NewDispatcher(jobQueue, *maxWorkers)\n\tdispatcher.Run(ginDoi.NewWorker)\n\n\t\/\/ Start the HTTP handler.\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tginDoi.InitDoiJob(w, r, &ds)\n\t})\n\thttp.HandleFunc(\"\/do\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tginDoi.DoDoiJob(w,r,jobQueue, storage)\n\t})\n\thttp.Handle(\"\/assets\/\",\n\t\thttp.StripPrefix(\"\/assets\/\", http.FileServer(http.Dir(\"\/assets\"))))\n\n\tlog.Fatal(http.ListenAndServe(\":\"+*port, nil))\n}\n\n<commit_msg>Updated server default config<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"net\/http\"\n\t\"log\"\n\t\".\/src\"\n)\n\nfunc main() {\n\tvar (\n\t\tmaxWorkers = flag.Int(\"max_workers\", 5, \"The number of workers to start\")\n\t\tmaxQueueSize = flag.Int(\"max_queue_size\", 100, \"The size of job queue\")\n\t\tport = flag.String(\"port\", \"8083\", \"The server port\")\n\t\tsource = flag.String(\"source\", \"https:\/\/repo.gin.g-node.org\", \"The default URI\")\n\t\tbaseTarget = flag.String(\"target\", \".\/\", \"The default base path for storgae\")\n\t)\n\tflag.Parse()\n\tds := ginDoi.GinDataSource{GinURL: *source}\n\tstorage := ginDoi.LocalStorage{Path:*baseTarget, Source:ds}\n\n\t\/\/ Create the job queue.\n\tjobQueue := make(chan ginDoi.Job, *maxQueueSize)\n\t\/\/ Start the dispatcher.\n\tdispatcher := ginDoi.NewDispatcher(jobQueue, *maxWorkers)\n\tdispatcher.Run(ginDoi.NewWorker)\n\n\t\/\/ Start the HTTP handler.\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tginDoi.InitDoiJob(w, r, &ds)\n\t})\n\thttp.HandleFunc(\"\/do\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tginDoi.DoDoiJob(w,r,jobQueue, storage)\n\t})\n\thttp.Handle(\"\/assets\/\",\n\t\thttp.StripPrefix(\"\/assets\/\", http.FileServer(http.Dir(\"\/assets\"))))\n\n\tlog.Fatal(http.ListenAndServe(\":\"+*port, nil))\n}\n\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/go:build !gogit\n\npackage git\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"strings\"\n\n\t\"code.gitea.io\/gitea\/modules\/log\"\n)\n\n\/\/ GetNote retrieves the git-notes data for a given commit.\n\/\/ FIXME: Add LastCommitCache support\nfunc GetNote(ctx context.Context, repo *Repository, commitID string, note *Note) error {\n\tlog.Trace(\"Searching for git note corresponding to the commit %q in the repository %q\", commitID, repo.Path)\n\tnotes, err := repo.GetCommit(NotesRef)\n\tif err != nil {\n\t\tif IsErrNotExist(err) {\n\t\t\treturn err\n\t\t}\n\t\tlog.Error(\"Unable to get commit from ref %q. Error: %v\", NotesRef, err)\n\t\treturn err\n\t}\n\n\tpath := \"\"\n\n\ttree := ¬es.Tree\n\tlog.Trace(\"Found tree with ID %q while searching for git note corresponding to the commit %q\", tree.ID, commitID)\n\n\tvar entry *TreeEntry\n\toriginalCommitID := commitID\n\tfor len(commitID) > 2 {\n\t\tentry, err = tree.GetTreeEntryByPath(commitID)\n\t\tif err == nil {\n\t\t\tpath += commitID\n\t\t\tbreak\n\t\t}\n\t\tif IsErrNotExist(err) {\n\t\t\ttree, err = tree.SubTree(commitID[0:2])\n\t\t\tpath += commitID[0:2] + \"\/\"\n\t\t\tcommitID = commitID[2:]\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Error(\"Unable to find git note corresponding to the commit %q. Error: %v\", originalCommitID, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tblob := entry.Blob()\n\tdataRc, err := blob.DataAsync()\n\tif err != nil {\n\t\tlog.Error(\"Unable to read blob with ID %q. Error: %v\", blob.ID, err)\n\t\treturn err\n\t}\n\tclosed := false\n\tdefer func() {\n\t\tif !closed {\n\t\t\t_ = dataRc.Close()\n\t\t}\n\t}()\n\td, err := io.ReadAll(dataRc)\n\tif err != nil {\n\t\tlog.Error(\"Unable to read blob with ID %q. Error: %v\", blob.ID, err)\n\t\treturn err\n\t}\n\t_ = dataRc.Close()\n\tclosed = true\n\tnote.Message = d\n\n\ttreePath := \"\"\n\tif idx := strings.LastIndex(path, \"\/\"); idx > -1 {\n\t\ttreePath = path[:idx]\n\t\tpath = path[idx+1:]\n\t}\n\n\tlastCommits, err := GetLastCommitForPaths(ctx, nil, notes, treePath, []string{path})\n\tif err != nil {\n\t\tlog.Error(\"Unable to get the commit for the path %q. Error: %v\", treePath, err)\n\t\treturn err\n\t}\n\tnote.Commit = lastCommits[path]\n\n\treturn nil\n}\n<commit_msg>Only log non ErrNotExist errors in git.GetNote (#19884)<commit_after>\/\/ Copyright 2019 The Gitea Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/go:build !gogit\n\npackage git\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"strings\"\n\n\t\"code.gitea.io\/gitea\/modules\/log\"\n)\n\n\/\/ GetNote retrieves the git-notes data for a given commit.\n\/\/ FIXME: Add LastCommitCache support\nfunc GetNote(ctx context.Context, repo *Repository, commitID string, note *Note) error {\n\tlog.Trace(\"Searching for git note corresponding to the commit %q in the repository %q\", commitID, repo.Path)\n\tnotes, err := repo.GetCommit(NotesRef)\n\tif err != nil {\n\t\tif IsErrNotExist(err) {\n\t\t\treturn err\n\t\t}\n\t\tlog.Error(\"Unable to get commit from ref %q. Error: %v\", NotesRef, err)\n\t\treturn err\n\t}\n\n\tpath := \"\"\n\n\ttree := ¬es.Tree\n\tlog.Trace(\"Found tree with ID %q while searching for git note corresponding to the commit %q\", tree.ID, commitID)\n\n\tvar entry *TreeEntry\n\toriginalCommitID := commitID\n\tfor len(commitID) > 2 {\n\t\tentry, err = tree.GetTreeEntryByPath(commitID)\n\t\tif err == nil {\n\t\t\tpath += commitID\n\t\t\tbreak\n\t\t}\n\t\tif IsErrNotExist(err) {\n\t\t\ttree, err = tree.SubTree(commitID[0:2])\n\t\t\tpath += commitID[0:2] + \"\/\"\n\t\t\tcommitID = commitID[2:]\n\t\t}\n\t\tif err != nil {\n\t\t\t\/\/ Err may have been updated by the SubTree we need to recheck if it's again an ErrNotExist\n\t\t\tif !IsErrNotExist(err) {\n\t\t\t\tlog.Error(\"Unable to find git note corresponding to the commit %q. Error: %v\", originalCommitID, err)\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\n\tblob := entry.Blob()\n\tdataRc, err := blob.DataAsync()\n\tif err != nil {\n\t\tlog.Error(\"Unable to read blob with ID %q. Error: %v\", blob.ID, err)\n\t\treturn err\n\t}\n\tclosed := false\n\tdefer func() {\n\t\tif !closed {\n\t\t\t_ = dataRc.Close()\n\t\t}\n\t}()\n\td, err := io.ReadAll(dataRc)\n\tif err != nil {\n\t\tlog.Error(\"Unable to read blob with ID %q. Error: %v\", blob.ID, err)\n\t\treturn err\n\t}\n\t_ = dataRc.Close()\n\tclosed = true\n\tnote.Message = d\n\n\ttreePath := \"\"\n\tif idx := strings.LastIndex(path, \"\/\"); idx > -1 {\n\t\ttreePath = path[:idx]\n\t\tpath = path[idx+1:]\n\t}\n\n\tlastCommits, err := GetLastCommitForPaths(ctx, nil, notes, treePath, []string{path})\n\tif err != nil {\n\t\tlog.Error(\"Unable to get the commit for the path %q. Error: %v\", treePath, err)\n\t\treturn err\n\t}\n\tnote.Commit = lastCommits[path]\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package triggers\n\ntype Donators struct{}\n\nfunc (d *Donators) Triggers() []string {\n\treturn []string{\n\t\t\"donators\",\n\t\t\"donations\",\n\t\t\"donate\",\n\t\t\"supporters\",\n\t\t\"support\",\n\t\t\"patreon\",\n\t\t\"patreons\",\n\t\t\"credits\",\n\t}\n}\n\nfunc (d *Donators) Response(trigger string, content string) string {\n\treturn \"<:robyulblush:327206930437373952> **These awesome people support me:**\\nKakkela 💕\\nSunny 💓\\nsomicidal minaiac 💞\\nOokami 🖤\\nKeldra 💗\\nTN 💝\\nseulguille 💘\\nSlenn 💜\\nFugu ❣️\\nWoori 💞\\nhikari 💙\\nAshton 💖\\nKay 💝\\njamie 💓\\nHomeboywill 💘\\nRimbol 💕\\nGenisphere 💖\\nekgus 💗\\nCPark 💞\\njungoo 💕\\nShawn 💗\\nSaltiestPeach 💘\\nBae Nja Min 💖\\nThank you so much!\\n_You want to be in this list? <https:\/\/www.patreon.com\/sekl>!_\"\n}\n<commit_msg>[donators] adds haerts!<commit_after>package triggers\n\ntype Donators struct{}\n\nfunc (d *Donators) Triggers() []string {\n\treturn []string{\n\t\t\"donators\",\n\t\t\"donations\",\n\t\t\"donate\",\n\t\t\"supporters\",\n\t\t\"support\",\n\t\t\"patreon\",\n\t\t\"patreons\",\n\t\t\"credits\",\n\t}\n}\n\nfunc (d *Donators) Response(trigger string, content string) string {\n\treturn \"<:robyulblush:327206930437373952> **These awesome people support me:**\\nKakkela 💕\\nSunny 💓\\nsomicidal minaiac 💞\\nOokami 🖤\\nKeldra 💗\\nTN 💝\\nseulguille 💘\\nSlenn 💜\\nFugu ❣️\\nWoori 💞\\nhikari 💙\\nAshton 💖\\nKay 💝\\njamie 💓\\nHomeboywill 💘\\nRimbol 💕\\nGenisphere 💖\\nekgus 💗\\nCPark 💞\\njungoo 💕\\nShawn 💗\\nSaltiestPeach 💘\\nBae Nja Min 💖\\nhaerts 💓\\nThank you so much!\\n_You want to be in this list? <https:\/\/www.patreon.com\/sekl>!_\"\n}\n<|endoftext|>"} {"text":"<commit_before>package msgpack\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"sync\"\n\n\t\"gopkg.in\/vmihailenco\/msgpack.v2\/codes\"\n)\n\nvar (\n\textTypes [128]reflect.Type\n)\n\nvar bufferPool = &sync.Pool{\n\tNew: func() interface{} {\n\t\treturn &bytes.Buffer{}\n\t},\n}\n\nfunc RegisterExt(id int8, value interface{}) {\n\tif extTypes[id] != nil {\n\t\tpanic(fmt.Errorf(\"ext with id %d is already registered\", id))\n\t}\n\textTypes[id] = reflect.TypeOf(value)\n}\n\nfunc extTypeId(typ reflect.Type) int8 {\n\tfor id, t := range extTypes {\n\t\tif t == typ {\n\t\t\treturn int8(id)\n\t\t}\n\t}\n\treturn -1\n}\n\nfunc makeExtEncoder(id int8, enc encoderFunc) encoderFunc {\n\treturn func(e *Encoder, v reflect.Value) error {\n\t\tbuf := bufferPool.Get().(*bytes.Buffer)\n\t\tdefer bufferPool.Put(buf)\n\n\t\toldw := e.w\n\t\te.w = buf\n\t\terr := enc(e, v)\n\t\te.w = oldw\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := e.encodeExtLen(buf.Len()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := e.w.WriteByte(byte(id)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn e.write(buf.Bytes())\n\t}\n}\n\nfunc (e *Encoder) encodeExtLen(l int) error {\n\tswitch {\n\tcase l == 1:\n\t\treturn e.w.WriteByte(codes.FixExt1)\n\tcase l == 2:\n\t\treturn e.w.WriteByte(codes.FixExt2)\n\tcase l == 4:\n\t\treturn e.w.WriteByte(codes.FixExt4)\n\tcase l == 8:\n\t\treturn e.w.WriteByte(codes.FixExt8)\n\tcase l == 16:\n\t\treturn e.w.WriteByte(codes.FixExt16)\n\tcase l < 256:\n\t\treturn e.write1(codes.Ext8, uint64(l))\n\tcase l < 65536:\n\t\treturn e.write2(codes.Ext16, uint64(l))\n\tdefault:\n\t\treturn e.write4(codes.Ext32, uint64(l))\n\t}\n}\n\nfunc (d *Decoder) decodeExtLen() (int, error) {\n\tc, err := d.r.ReadByte()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tswitch c {\n\tcase codes.FixExt1:\n\t\treturn 1, nil\n\tcase codes.FixExt2:\n\t\treturn 2, nil\n\tcase codes.FixExt4:\n\t\treturn 4, nil\n\tcase codes.FixExt8:\n\t\treturn 8, nil\n\tcase codes.FixExt16:\n\t\treturn 16, nil\n\tcase codes.Ext8:\n\t\tn, err := d.uint8()\n\t\treturn int(n), err\n\tcase codes.Ext16:\n\t\tn, err := d.uint16()\n\t\treturn int(n), err\n\tcase codes.Ext32:\n\t\tn, err := d.uint32()\n\t\treturn int(n), err\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"msgpack: invalid code %x decoding ext length\", c)\n\t}\n}\n\nfunc (d *Decoder) decodeExt() (interface{}, error) {\n\t\/\/ TODO: use decoded length.\n\t_, err := d.decodeExtLen()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\textId, err := d.r.ReadByte()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttyp := extTypes[extId]\n\tif typ == nil {\n\t\treturn nil, fmt.Errorf(\"msgpack: unregistered ext id %d\", extId)\n\t}\n\tv := reflect.New(typ).Elem()\n\tif err := d.DecodeValue(v); err != nil {\n\t\treturn nil, err\n\t}\n\treturn v.Interface(), nil\n}\n<commit_msg>Reset buffer before reusing.<commit_after>package msgpack\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"sync\"\n\n\t\"gopkg.in\/vmihailenco\/msgpack.v2\/codes\"\n)\n\nvar (\n\textTypes [128]reflect.Type\n)\n\nvar bufferPool = &sync.Pool{\n\tNew: func() interface{} {\n\t\treturn &bytes.Buffer{}\n\t},\n}\n\nfunc RegisterExt(id int8, value interface{}) {\n\tif extTypes[id] != nil {\n\t\tpanic(fmt.Errorf(\"ext with id %d is already registered\", id))\n\t}\n\textTypes[id] = reflect.TypeOf(value)\n}\n\nfunc extTypeId(typ reflect.Type) int8 {\n\tfor id, t := range extTypes {\n\t\tif t == typ {\n\t\t\treturn int8(id)\n\t\t}\n\t}\n\treturn -1\n}\n\nfunc makeExtEncoder(id int8, enc encoderFunc) encoderFunc {\n\treturn func(e *Encoder, v reflect.Value) error {\n\t\tbuf := bufferPool.Get().(*bytes.Buffer)\n\t\tdefer bufferPool.Put(buf)\n\t\tbuf.Reset()\n\n\t\toldw := e.w\n\t\te.w = buf\n\t\terr := enc(e, v)\n\t\te.w = oldw\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := e.encodeExtLen(buf.Len()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := e.w.WriteByte(byte(id)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn e.write(buf.Bytes())\n\t}\n}\n\nfunc (e *Encoder) encodeExtLen(l int) error {\n\tswitch {\n\tcase l == 1:\n\t\treturn e.w.WriteByte(codes.FixExt1)\n\tcase l == 2:\n\t\treturn e.w.WriteByte(codes.FixExt2)\n\tcase l == 4:\n\t\treturn e.w.WriteByte(codes.FixExt4)\n\tcase l == 8:\n\t\treturn e.w.WriteByte(codes.FixExt8)\n\tcase l == 16:\n\t\treturn e.w.WriteByte(codes.FixExt16)\n\tcase l < 256:\n\t\treturn e.write1(codes.Ext8, uint64(l))\n\tcase l < 65536:\n\t\treturn e.write2(codes.Ext16, uint64(l))\n\tdefault:\n\t\treturn e.write4(codes.Ext32, uint64(l))\n\t}\n}\n\nfunc (d *Decoder) decodeExtLen() (int, error) {\n\tc, err := d.r.ReadByte()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tswitch c {\n\tcase codes.FixExt1:\n\t\treturn 1, nil\n\tcase codes.FixExt2:\n\t\treturn 2, nil\n\tcase codes.FixExt4:\n\t\treturn 4, nil\n\tcase codes.FixExt8:\n\t\treturn 8, nil\n\tcase codes.FixExt16:\n\t\treturn 16, nil\n\tcase codes.Ext8:\n\t\tn, err := d.uint8()\n\t\treturn int(n), err\n\tcase codes.Ext16:\n\t\tn, err := d.uint16()\n\t\treturn int(n), err\n\tcase codes.Ext32:\n\t\tn, err := d.uint32()\n\t\treturn int(n), err\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"msgpack: invalid code %x decoding ext length\", c)\n\t}\n}\n\nfunc (d *Decoder) decodeExt() (interface{}, error) {\n\t\/\/ TODO: use decoded length.\n\t_, err := d.decodeExtLen()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\textId, err := d.r.ReadByte()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttyp := extTypes[extId]\n\tif typ == nil {\n\t\treturn nil, fmt.Errorf(\"msgpack: unregistered ext id %d\", extId)\n\t}\n\tv := reflect.New(typ).Elem()\n\tif err := d.DecodeValue(v); err != nil {\n\t\treturn nil, err\n\t}\n\treturn v.Interface(), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package httputilmore\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/grokify\/gotilla\/strconv\/strconvutil\"\n)\n\nconst (\n\tHeaderAuthorization = \"Authorization\"\n\tHeaderContentDisposition = \"Content-Disposition\"\n\tHeaderContentLength = \"Content-Length\"\n\tHeaderContentTransferEncoding = \"Content-Transfer-Encoding\"\n\tHeaderContentType = \"Content-Type\"\n\tHeaderLocation = \"Location\"\n\tHeaderUserAgent = \"User-Agent\"\n\tHeaderXContentTypeOptions = \"X-Content-Type-Options\"\n\tContentTypeAppJsonUtf8 = \"application\/json; charset=utf-8\"\n\tContentTypeAppFormUrlEncoded = \"application\/x-www-form-urlencoded\"\n\tContentTypeTextCalendarUtf8Request = `text\/calendar; charset=\"utf-8\"; method=REQUEST`\n\tContentTypeTextHtmlUtf8 = \"text\/html; charset=utf-8\"\n\tContentTypeTextPlainUsAscii = \"text\/plain; charset=us-ascii\"\n\tContentTypeTextPlainUtf8 = \"text\/plain; charset=utf-8\"\n\tSchemeHTTPS = \"https\"\n)\n\n\/\/ GetWriteFile performs a HTTP GET request and saves the response body\n\/\/ to the file path specified\nfunc GetWriteFile(url string, filename string, perm os.FileMode) ([]byte, error) {\n\t_, bytes, err := GetResponseAndBytes(url)\n\tif err != nil {\n\t\treturn bytes, err\n\t}\n\terr = ioutil.WriteFile(filename, bytes, perm)\n\treturn bytes, err\n}\n\n\/\/ ResponseBody returns the body as a byte array\nfunc ResponseBody(res *http.Response) ([]byte, error) {\n\tdefer res.Body.Close()\n\tcontents, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\treturn contents, nil\n}\n\n\/\/ ResponseBodyJSONMapIndent returns the body as a generic JSON dictionary\nfunc ResponseBodyJSONMapIndent(res *http.Response, prefix string, indent string) ([]byte, error) {\n\tbody, err := ResponseBody(res)\n\tif err != nil {\n\t\treturn body, err\n\t}\n\tany := map[string]interface{}{}\n\tjson.Unmarshal(body, &any)\n\treturn json.MarshalIndent(any, prefix, indent)\n}\n\n\/\/ GetResponseAndBytes retreives a URL and returns the response body\n\/\/ as a byte array in addition to the *http.Response.\nfunc GetResponseAndBytes(url string) (*http.Response, []byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn resp, []byte{}, err\n\t}\n\tbytes, err := ResponseBody(resp)\n\treturn resp, bytes, err\n}\n\n\/\/ UnmarshalResponseJSON unmarshal a `*http.Response` JSON body into\n\/\/ a data pointer.\nfunc UnmarshalResponseJSON(resp *http.Response, data interface{}) error {\n\t\/\/bytes, err := ResponseBody(resp)\n\tbytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn json.Unmarshal(bytes, data)\n}\n\n\/\/ PrintRequestOut prints a http.Request using `httputil.DumpRequestOut`.\nfunc PrintRequestOut(req *http.Request, includeBody bool) error {\n\treqBytes, err := httputil.DumpRequestOut(req, includeBody)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println(string(reqBytes))\n\treturn nil\n}\n\n\/\/ PrintResponse prints a http.Response using `httputil.DumpResponse`.\nfunc PrintResponse(resp *http.Response, includeBody bool) error {\n\trespBytes, err := httputil.DumpResponse(resp, includeBody)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println(string(respBytes))\n\treturn nil\n}\n\n\/\/ MergeHeader merges two http.Header adding the values of the second\n\/\/ to the first.\nfunc MergeHeader(base, extra http.Header, overwrite bool) http.Header {\n\tfor k, vals := range extra {\n\t\tif overwrite {\n\t\t\tbase.Del(k)\n\t\t}\n\n\t\tfor _, v := range vals {\n\t\t\tv = strings.TrimSpace(v)\n\t\t\tif len(v) > 0 {\n\t\t\t\tbase.Add(k, v)\n\t\t\t}\n\t\t}\n\t}\n\treturn base\n}\n\nfunc ParseMultipartFormDataBoundaryFromHeader(contentType string) string {\n\trx := regexp.MustCompile(`^multipart\/form-data.+boundary=\"?([^;\"]+)`)\n\tm := rx.FindStringSubmatch(contentType)\n\tif len(m) > 0 {\n\t\treturn m[1]\n\t}\n\treturn \"\"\n}\n\n\/\/ RateLimitInfo is a structure for holding parsed rate limit info.\n\/\/ It uses headers from the GitHub, RingCentral and Twitter APIs.\ntype RateLimitInfo struct {\n\tStatusCode int\n\tRetryAfter int\n\tXRateLimitLimit int\n\tXRateLimitRemaining int\n\tXRateLimitReset int\n\tXRateLimitWindow int\n}\n\n\/\/ NewResponseRateLimitInfo returns a RateLimitInfo from a http.Response.\nfunc NewResponseRateLimitInfo(resp *http.Response, useXrlHyphen bool) RateLimitInfo {\n\trlstat := RateLimitInfo{\n\t\tStatusCode: resp.StatusCode,\n\t\tRetryAfter: strconvutil.AtoiWithDefault(resp.Header.Get(\"Retry-After\"), 0)}\n\n\tif useXrlHyphen {\n\t\trlstat.XRateLimitLimit = strconvutil.AtoiWithDefault(resp.Header.Get(\"X-Rate-Limit-Limit\"), 0)\n\t\trlstat.XRateLimitRemaining = strconvutil.AtoiWithDefault(resp.Header.Get(\"X-Rate-Limit-Remaining\"), 0)\n\t\trlstat.XRateLimitReset = strconvutil.AtoiWithDefault(resp.Header.Get(\"X-Rate-Limit-Reset\"), 0)\n\t\trlstat.XRateLimitWindow = strconvutil.AtoiWithDefault(resp.Header.Get(\"X-Rate-Limit-Window\"), 0)\n\t} else {\n\t\trlstat.XRateLimitLimit = strconvutil.AtoiWithDefault(resp.Header.Get(\"X-RateLimit-Limit\"), 0)\n\t\trlstat.XRateLimitRemaining = strconvutil.AtoiWithDefault(resp.Header.Get(\"X-RateLimit-Remaining\"), 0)\n\t\trlstat.XRateLimitReset = strconvutil.AtoiWithDefault(resp.Header.Get(\"X-RateLimit-Reset\"), 0)\n\t\trlstat.XRateLimitWindow = strconvutil.AtoiWithDefault(resp.Header.Get(\"X-RateLimit-Window\"), 0)\n\t}\n\treturn rlstat\n}\n\ntype FnLogRateLimitInfo func(RateLimitInfo)\n<commit_msg>add httputilmore.PostJsonSimple<commit_after>package httputilmore\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com\/grokify\/gotilla\/strconv\/strconvutil\"\n)\n\nconst (\n\tHeaderAuthorization = \"Authorization\"\n\tHeaderContentDisposition = \"Content-Disposition\"\n\tHeaderContentLength = \"Content-Length\"\n\tHeaderContentTransferEncoding = \"Content-Transfer-Encoding\"\n\tHeaderContentType = \"Content-Type\"\n\tHeaderLocation = \"Location\"\n\tHeaderUserAgent = \"User-Agent\"\n\tHeaderXContentTypeOptions = \"X-Content-Type-Options\"\n\tContentTypeAppJsonUtf8 = \"application\/json; charset=utf-8\"\n\tContentTypeAppFormUrlEncoded = \"application\/x-www-form-urlencoded\"\n\tContentTypeTextCalendarUtf8Request = `text\/calendar; charset=\"utf-8\"; method=REQUEST`\n\tContentTypeTextHtmlUtf8 = \"text\/html; charset=utf-8\"\n\tContentTypeTextPlainUsAscii = \"text\/plain; charset=us-ascii\"\n\tContentTypeTextPlainUtf8 = \"text\/plain; charset=utf-8\"\n\tSchemeHTTPS = \"https\"\n)\n\n\/\/ GetWriteFile performs a HTTP GET request and saves the response body\n\/\/ to the file path specified\nfunc GetWriteFile(url string, filename string, perm os.FileMode) ([]byte, error) {\n\t_, bytes, err := GetResponseAndBytes(url)\n\tif err != nil {\n\t\treturn bytes, err\n\t}\n\terr = ioutil.WriteFile(filename, bytes, perm)\n\treturn bytes, err\n}\n\nfunc PostJsonSimple(requrl string, body interface{}) (*http.Response, error) {\n\tbodyBytes, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn &http.Response{}, err\n\t}\n\n\treq, err := http.NewRequest(http.MethodPost, requrl, bytes.NewBuffer(bodyBytes))\n\tif err != nil {\n\t\treturn &http.Response{}, err\n\t}\n\treq.Header.Set(HeaderContentType, ContentTypeAppJsonUtf8)\n\n\tclient := &http.Client{}\n\treturn client.Do(req)\n}\n\n\/\/ ResponseBody returns the body as a byte array\nfunc ResponseBody(res *http.Response) ([]byte, error) {\n\tdefer res.Body.Close()\n\tcontents, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\treturn contents, nil\n}\n\n\/\/ ResponseBodyJSONMapIndent returns the body as a generic JSON dictionary\nfunc ResponseBodyJSONMapIndent(res *http.Response, prefix string, indent string) ([]byte, error) {\n\tbody, err := ResponseBody(res)\n\tif err != nil {\n\t\treturn body, err\n\t}\n\tany := map[string]interface{}{}\n\tjson.Unmarshal(body, &any)\n\treturn json.MarshalIndent(any, prefix, indent)\n}\n\n\/\/ GetResponseAndBytes retreives a URL and returns the response body\n\/\/ as a byte array in addition to the *http.Response.\nfunc GetResponseAndBytes(url string) (*http.Response, []byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn resp, []byte{}, err\n\t}\n\tbytes, err := ResponseBody(resp)\n\treturn resp, bytes, err\n}\n\n\/\/ UnmarshalResponseJSON unmarshal a `*http.Response` JSON body into\n\/\/ a data pointer.\nfunc UnmarshalResponseJSON(resp *http.Response, data interface{}) error {\n\t\/\/bytes, err := ResponseBody(resp)\n\tbytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn json.Unmarshal(bytes, data)\n}\n\n\/\/ PrintRequestOut prints a http.Request using `httputil.DumpRequestOut`.\nfunc PrintRequestOut(req *http.Request, includeBody bool) error {\n\treqBytes, err := httputil.DumpRequestOut(req, includeBody)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println(string(reqBytes))\n\treturn nil\n}\n\n\/\/ PrintResponse prints a http.Response using `httputil.DumpResponse`.\nfunc PrintResponse(resp *http.Response, includeBody bool) error {\n\trespBytes, err := httputil.DumpResponse(resp, includeBody)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println(string(respBytes))\n\treturn nil\n}\n\n\/\/ MergeHeader merges two http.Header adding the values of the second\n\/\/ to the first.\nfunc MergeHeader(base, extra http.Header, overwrite bool) http.Header {\n\tfor k, vals := range extra {\n\t\tif overwrite {\n\t\t\tbase.Del(k)\n\t\t}\n\n\t\tfor _, v := range vals {\n\t\t\tv = strings.TrimSpace(v)\n\t\t\tif len(v) > 0 {\n\t\t\t\tbase.Add(k, v)\n\t\t\t}\n\t\t}\n\t}\n\treturn base\n}\n\nfunc ParseMultipartFormDataBoundaryFromHeader(contentType string) string {\n\trx := regexp.MustCompile(`^multipart\/form-data.+boundary=\"?([^;\"]+)`)\n\tm := rx.FindStringSubmatch(contentType)\n\tif len(m) > 0 {\n\t\treturn m[1]\n\t}\n\treturn \"\"\n}\n\n\/\/ RateLimitInfo is a structure for holding parsed rate limit info.\n\/\/ It uses headers from the GitHub, RingCentral and Twitter APIs.\ntype RateLimitInfo struct {\n\tStatusCode int\n\tRetryAfter int\n\tXRateLimitLimit int\n\tXRateLimitRemaining int\n\tXRateLimitReset int\n\tXRateLimitWindow int\n}\n\n\/\/ NewResponseRateLimitInfo returns a RateLimitInfo from a http.Response.\nfunc NewResponseRateLimitInfo(resp *http.Response, useXrlHyphen bool) RateLimitInfo {\n\trlstat := RateLimitInfo{\n\t\tStatusCode: resp.StatusCode,\n\t\tRetryAfter: strconvutil.AtoiWithDefault(resp.Header.Get(\"Retry-After\"), 0)}\n\n\tif useXrlHyphen {\n\t\trlstat.XRateLimitLimit = strconvutil.AtoiWithDefault(resp.Header.Get(\"X-Rate-Limit-Limit\"), 0)\n\t\trlstat.XRateLimitRemaining = strconvutil.AtoiWithDefault(resp.Header.Get(\"X-Rate-Limit-Remaining\"), 0)\n\t\trlstat.XRateLimitReset = strconvutil.AtoiWithDefault(resp.Header.Get(\"X-Rate-Limit-Reset\"), 0)\n\t\trlstat.XRateLimitWindow = strconvutil.AtoiWithDefault(resp.Header.Get(\"X-Rate-Limit-Window\"), 0)\n\t} else {\n\t\trlstat.XRateLimitLimit = strconvutil.AtoiWithDefault(resp.Header.Get(\"X-RateLimit-Limit\"), 0)\n\t\trlstat.XRateLimitRemaining = strconvutil.AtoiWithDefault(resp.Header.Get(\"X-RateLimit-Remaining\"), 0)\n\t\trlstat.XRateLimitReset = strconvutil.AtoiWithDefault(resp.Header.Get(\"X-RateLimit-Reset\"), 0)\n\t\trlstat.XRateLimitWindow = strconvutil.AtoiWithDefault(resp.Header.Get(\"X-RateLimit-Window\"), 0)\n\t}\n\treturn rlstat\n}\n\ntype FnLogRateLimitInfo func(RateLimitInfo)\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/ninjasphere\/go-hue\"\n\t\"github.com\/ninjasphere\/go-ninja\/channels\"\n\t\"github.com\/ninjasphere\/go-ninja\/config\"\n\t\"github.com\/ninjasphere\/go-ninja\/logger\"\n)\n\nvar log = logger.GetLogger(\"hue\")\n\nfunc getBridge() *hue.Bridge {\n\tnobridge := true\n\tvar allbridges []*hue.Bridge\n\tvar err error\n\tfor nobridge {\n\t\tallbridges, err = hue.FindBridgesUsingCloud()\n\t\tif err != nil {\n\t\t\t\/\/log.Infof(\"Warning: Failed finding bridges using cloud (%s). Falling back to ssdp.\", err)\n\t\t\tallbridges, _ = hue.FindBridges()\n\t\t}\n\t\tif len(allbridges) == 0 {\n\t\t\ttime.Sleep(time.Second * 5) \/\/this sucks\n\t\t} else {\n\t\t\tnobridge = false\n\t\t\tlog.Infof(\"Found %d bridges: %s\", len(allbridges), allbridges)\n\t\t}\n\t}\n\treturn allbridges[0]\n}\n\nvar pushButtonNotification = channels.Notification{\n\tTitle: \"Please press the pairing button on your Philips Hue base-station\",\n\tSubtitle: \"New Hue base-station found\",\n\tPriority: channels.NotificationPriorityDefault,\n\tCategory: channels.NotificationCategorySuggestion,\n}\n\nfunc getUser(driver *HueDriver, bridge *hue.Bridge) *hue.User {\n\tvar user *hue.User\n\tvar err error\n\tnoUser := true\n\tretries := 0\n\tserial := config.Serial()\n\tusername := serial + serial \/\/username must be long 10-40 characters\n\tisvaliduser, err := bridge.IsValidUser(username)\n\tif err != nil {\n\t\tlog.Warningf(\"Problem determining if hue user is valid\")\n\t}\n\n\tvar notificationTime time.Time\n\n\tif isvaliduser {\n\t\tuser = hue.NewUserWithBridge(username, bridge)\n\t} else {\n\t\tfor noUser {\n\t\t\tuser, err = bridge.CreateUser(\"ninjadevice\", username)\n\t\t\tif err != nil {\n\t\t\t\tif strings.Contains(err.Error(), \"101\") { \/\/ there's probably a nicer way to check this\n\t\t\t\t\tretries++\n\t\t\t\t\tlog.Infof(\"Couldn't make user, push link button. Retry: %d\", retries)\n\n\t\t\t\t\tif time.Since(notificationTime) > time.Minute*5 {\n\t\t\t\t\t\tnotificationTime = time.Now()\n\t\t\t\t\t\tdriver.sendEvent(\"notification\", pushButtonNotification)\n\t\t\t\t\t}\n\n\t\t\t\t\ttime.Sleep(time.Second * 2) \/\/this sucks\n\n\t\t\t\t} else {\n\t\t\t\t\tlog.Debugf(\"Error creating user %s\", err)\n\t\t\t\t\ttime.Sleep(time.Second * 20)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif user != nil {\n\t\t\t\tnoUser = false\n\t\t\t}\n\t\t}\n\t}\n\treturn user\n}\n<commit_msg>quiet, you<commit_after>package main\n\nimport (\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/ninjasphere\/go-hue\"\n\t\"github.com\/ninjasphere\/go-ninja\/channels\"\n\t\"github.com\/ninjasphere\/go-ninja\/config\"\n\t\"github.com\/ninjasphere\/go-ninja\/logger\"\n)\n\nvar log = logger.GetLogger(\"hue\")\n\nfunc getBridge() *hue.Bridge {\n\tnobridge := true\n\tvar allbridges []*hue.Bridge\n\tvar err error\n\tfor nobridge {\n\t\tallbridges, err = hue.FindBridgesUsingCloud()\n\t\tif err != nil {\n\t\t\t\/\/log.Infof(\"Warning: Failed finding bridges using cloud (%s). Falling back to ssdp.\", err)\n\t\t\tallbridges, _ = hue.FindBridges()\n\t\t}\n\t\tif len(allbridges) == 0 {\n\t\t\ttime.Sleep(time.Second * 5) \/\/this sucks\n\t\t} else {\n\t\t\tnobridge = false\n\t\t\tlog.Infof(\"Found %d bridges: %s\", len(allbridges), allbridges)\n\t\t}\n\t}\n\treturn allbridges[0]\n}\n\nvar pushButtonNotification = channels.Notification{\n\tTitle: \"Please press the pairing button on your Philips Hue base-station\",\n\tSubtitle: \"New Hue base-station found\",\n\tPriority: channels.NotificationPriorityDefault,\n\tCategory: channels.NotificationCategorySuggestion,\n}\n\nfunc getUser(driver *HueDriver, bridge *hue.Bridge) *hue.User {\n\tvar user *hue.User\n\tvar err error\n\tnoUser := true\n\tretries := 0\n\tserial := config.Serial()\n\tusername := serial + serial \/\/username must be long 10-40 characters\n\tisvaliduser, err := bridge.IsValidUser(username)\n\tif err != nil {\n\t\tlog.Warningf(\"Problem determining if hue user is valid\")\n\t}\n\n\tvar notificationTime time.Time\n\n\tif isvaliduser {\n\t\tuser = hue.NewUserWithBridge(username, bridge)\n\t} else {\n\t\tfor noUser {\n\t\t\tuser, err = bridge.CreateUser(\"ninjadevice\", username)\n\t\t\tif err != nil {\n\t\t\t\tif strings.Contains(err.Error(), \"101\") { \/\/ there's probably a nicer way to check this\n\t\t\t\t\tretries++\n\t\t\t\t\tlog.Debugf(\"Couldn't make user, push link button. Retry: %d\", retries)\n\n\t\t\t\t\tif time.Since(notificationTime) > time.Minute*5 {\n\t\t\t\t\t\tnotificationTime = time.Now()\n\t\t\t\t\t\tdriver.sendEvent(\"notification\", pushButtonNotification)\n\t\t\t\t\t}\n\n\t\t\t\t\ttime.Sleep(time.Second * 2) \/\/this sucks\n\n\t\t\t\t} else {\n\t\t\t\t\tlog.Debugf(\"Error creating user %s\", err)\n\t\t\t\t\ttime.Sleep(time.Second * 20)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif user != nil {\n\t\t\t\tnoUser = false\n\t\t\t}\n\t\t}\n\t}\n\treturn user\n}\n<|endoftext|>"} {"text":"<commit_before>package bsw\n\nimport \"strings\"\n\n\/\/ MX returns the A record for an MX record for a domain.\nfunc MX(domain, serverAddr string) (string, Results, error) {\n\ttask := \"mx\"\n\tresults := Results{}\n\tservers, err := LookupMX(domain, serverAddr)\n\tif err != nil {\n\t\treturn task, results, err\n\t}\n\tfor _, s := range servers {\n\t\tip, err := LookupName(s, serverAddr)\n\t\tif err != nil || ip == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tresults = append(results, Result{\n\t\t\tSource: task,\n\t\t\tIP: ip,\n\t\t\tHostname: strings.TrimRight(s, \".\"),\n\t\t})\n\t}\n\treturn task, results, nil\n}\n<commit_msg>Minor cleanup<commit_after>package bsw\n\nimport (\n\t\"strings\"\n)\n\n\/\/ MX returns the A record for any MX records for a domain.\nfunc MX(domain, serverAddr string) (string, Results, error) {\n\ttask := \"mx\"\n\tresults := Results{}\n\tservers, err := LookupMX(domain, serverAddr)\n\tif err != nil {\n\t\treturn task, results, err\n\t}\n\tfor _, s := range servers {\n\t\tip, err := LookupName(s, serverAddr)\n\t\tif err != nil || ip == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tresults = append(results, Result{\n\t\t\tSource: task,\n\t\t\tIP: ip,\n\t\t\tHostname: strings.TrimRight(s, \".\"),\n\t\t})\n\t}\n\treturn task, results, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package beep\n\nimport (\n\t\"fmt\"\n\t\"time\"\n)\n\n\/\/ Format is the format of a Buffer or another audio source.\ntype Format struct {\n\t\/\/ SampleRate is the number of samples per second.\n\tSampleRate int\n\n\t\/\/ NumChannels is the number of channels. The value of 1 is mono, the value of 2 is stereo.\n\t\/\/ The samples should always be interleaved.\n\tNumChannels int\n\n\t\/\/ Precision is the number of bytes used to encode a single sample.\n\tPrecision int\n}\n\n\/\/ Width returns the number of bytes per one sample (all channels).\n\/\/\n\/\/ This is equal to f.NumChannels * f.Precision.\nfunc (f Format) Width() int {\n\treturn f.NumChannels * f.Precision\n}\n\n\/\/ Duration returns the duration of n samples in this format.\nfunc (f Format) Duration(n int) time.Duration {\n\treturn time.Second * time.Duration(n) \/ time.Duration(f.SampleRate)\n}\n\n\/\/ NumSamples returns the number of samples in this format which last for d duration.\nfunc (f Format) NumSamples(d time.Duration) int {\n\treturn int(d * time.Duration(f.SampleRate) \/ time.Second)\n}\n\n\/\/ EncodeSigned encodes a single sample in f.Width() bytes to p in signed format.\nfunc (f Format) EncodeSigned(p []byte, sample [2]float64) (n int) {\n\treturn f.encode(true, p, sample)\n}\n\n\/\/ EncodeUnsigned encodes a single sample in f.Width() bytes to p in unsigned format.\nfunc (f Format) EncodeUnsigned(p []byte, sample [2]float64) (n int) {\n\treturn f.encode(false, p, sample)\n}\n\n\/\/ DecodeSigned decodes a single sample encoded in f.Width() bytes from p in signed format.\nfunc (f Format) DecodeSigned(p []byte) (sample [2]float64, n int) {\n\treturn f.decode(true, p)\n}\n\n\/\/ DecodeUnsigned decodes a single sample encoded in f.Width() bytes from p in unsigned format.\nfunc (f Format) DecodeUnsigned(p []byte) (sample [2]float64, n int) {\n\treturn f.decode(false, p)\n}\n\nfunc (f Format) encode(signed bool, p []byte, sample [2]float64) (n int) {\n\tswitch {\n\tcase f.NumChannels == 1:\n\t\tx := norm((sample[0] + sample[1]) \/ 2)\n\t\tp = p[encodeFloat(signed, p, f.Precision, x):]\n\tcase f.NumChannels >= 2:\n\t\tfor c := range sample {\n\t\t\tx := norm(sample[c])\n\t\t\tp = p[encodeFloat(signed, p, f.Precision, x):]\n\t\t}\n\t\tfor c := len(sample); c < f.NumChannels; c++ {\n\t\t\tp = p[encodeFloat(signed, p, f.Precision, 0):]\n\t\t}\n\tdefault:\n\t\tpanic(fmt.Errorf(\"format: encode: invalid number of channels: %d\", f.NumChannels))\n\t}\n\treturn f.Width()\n}\n\nfunc (f Format) decode(signed bool, p []byte) (sample [2]float64, n int) {\n\tswitch {\n\tcase f.NumChannels == 1:\n\t\tx, _ := decodeFloat(signed, p, f.Precision)\n\t\treturn [2]float64{x, x}, f.Width()\n\tcase f.NumChannels >= 2:\n\t\tfor c := range sample {\n\t\t\tx, n := decodeFloat(signed, p, f.Precision)\n\t\t\tsample[c] = x\n\t\t\tp = p[n:]\n\t\t}\n\t\tfor c := len(sample); c < f.NumChannels; c++ {\n\t\t\t_, n := decodeFloat(signed, p, f.Precision)\n\t\t\tp = p[n:]\n\t\t}\n\t\treturn sample, f.Width()\n\tdefault:\n\t\tpanic(fmt.Errorf(\"format: decode: invalid number of channels: %d\", f.NumChannels))\n\t}\n}\n\nfunc encodeFloat(signed bool, p []byte, precision int, x float64) (n int) {\n\tvar xUint64 uint64\n\tif signed {\n\t\txUint64 = floatToSigned(precision, x)\n\t} else {\n\t\txUint64 = floatToUnsigned(precision, x)\n\t}\n\tfor i := 0; i < precision; i++ {\n\t\tp[i] = byte(xUint64)\n\t\txUint64 >>= 8\n\t}\n\treturn precision\n}\n\nfunc decodeFloat(signed bool, p []byte, precision int) (x float64, n int) {\n\tvar xUint64 uint64\n\tfor i := precision - 1; i >= 0; i-- {\n\t\txUint64 <<= 8\n\t\txUint64 += uint64(p[i])\n\t}\n\tif signed {\n\t\treturn signedToFloat(precision, xUint64), precision\n\t}\n\treturn unsignedToFloat(precision, xUint64), precision\n}\n\nfunc floatToSigned(precision int, x float64) uint64 {\n\tif x < 0 {\n\t\tcompl := uint64(-x * float64(uint64(1)<<uint(precision*8-1)-1))\n\t\treturn uint64(1<<uint(precision*8)) - compl\n\t}\n\treturn uint64(x * float64(uint64(1)<<uint(precision*8-1)-1))\n}\n\nfunc floatToUnsigned(precision int, x float64) uint64 {\n\treturn uint64((x + 1) \/ 2 * float64(uint64(1)<<uint(precision*8)-1))\n}\n\nfunc signedToFloat(precision int, xUint64 uint64) float64 {\n\tif xUint64 >= 1<<uint(precision*8-1) {\n\t\tcompl := 1<<uint(precision*8) - xUint64\n\t\treturn -float64(int64(compl)) \/ float64(uint64(1)<<uint(precision*8-1)-1)\n\t}\n\treturn float64(int64(xUint64)) \/ float64(uint64(1)<<uint(precision*8-1)-1)\n}\n\nfunc unsignedToFloat(precision int, xUint64 uint64) float64 {\n\treturn float64(xUint64)\/float64(uint(1)<<uint(precision*8)-1)*2 - 1\n}\n\nfunc norm(x float64) float64 {\n\tif x < -1 {\n\t\treturn -1\n\t}\n\tif x > +1 {\n\t\treturn +1\n\t}\n\treturn x\n}\n\ntype Buffer struct {\n\tf Format\n\tdata []byte\n}\n\nfunc NewBuffer(f Format) *Buffer {\n\treturn &Buffer{f: f}\n}\n\nfunc (b *Buffer) Format() Format {\n\treturn b.f\n}\n\nfunc (b *Buffer) Len() int {\n\treturn len(b.data)\n}\n\nfunc (b *Buffer) Duration() time.Duration {\n\treturn b.f.Duration(len(b.data) \/ b.f.Width())\n}\n\nfunc (b *Buffer) Append(s Streamer) {\n\tvar (\n\t\tsamples [512][2]float64\n\t\tp = make([]byte, b.f.Width())\n\t)\n\tfor {\n\t\tn, ok := s.Stream(samples[:])\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tfor _, sample := range samples[:n] {\n\t\t\tb.f.EncodeSigned(p, sample)\n\t\t\tb.data = append(b.data, p...)\n\t\t}\n\t}\n}\n\nfunc (b *Buffer) Streamer(from, to time.Duration) StreamSeeker {\n\tfromByte := b.f.NumSamples(from) * b.f.Width()\n\ttoByte := b.f.NumSamples(to) * b.f.Width()\n\treturn &bufferStreamer{\n\t\tf: b.f,\n\t\tdata: b.data[fromByte:toByte],\n\t\tpos: 0,\n\t}\n}\n\ntype bufferStreamer struct {\n\tf Format\n\tdata []byte\n\tpos int\n}\n\nfunc (bs *bufferStreamer) Stream(samples [][2]float64) (n int, ok bool) {\n\tif bs.pos >= len(bs.data) {\n\t\treturn 0, false\n\t}\n\tfor i := range samples {\n\t\tif bs.pos >= len(bs.data) {\n\t\t\tbreak\n\t\t}\n\t\tsample, advance := bs.f.DecodeSigned(bs.data[bs.pos:])\n\t\tsamples[i] = sample\n\t\tbs.pos += advance\n\t\tn++\n\t}\n\treturn n, true\n}\n\nfunc (bs *bufferStreamer) Err() error {\n\treturn nil\n}\n\nfunc (bs *bufferStreamer) Duration() time.Duration {\n\treturn bs.f.Duration(len(bs.data) \/ bs.f.Width())\n}\n\nfunc (bs *bufferStreamer) Position() time.Duration {\n\treturn bs.f.Duration(bs.pos \/ bs.f.Width())\n}\n\nfunc (bs *bufferStreamer) Seek(d time.Duration) error {\n\tif d < 0 || bs.Duration() < d {\n\t\treturn fmt.Errorf(\"buffer: seek duration %v out of range [%v, %v]\", d, 0, bs.Duration())\n\t}\n\tbs.pos = bs.f.NumSamples(d) * bs.f.Width()\n\treturn nil\n}\n<commit_msg>fix format encode\/decode<commit_after>package beep\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"time\"\n)\n\n\/\/ Format is the format of a Buffer or another audio source.\ntype Format struct {\n\t\/\/ SampleRate is the number of samples per second.\n\tSampleRate int\n\n\t\/\/ NumChannels is the number of channels. The value of 1 is mono, the value of 2 is stereo.\n\t\/\/ The samples should always be interleaved.\n\tNumChannels int\n\n\t\/\/ Precision is the number of bytes used to encode a single sample.\n\tPrecision int\n}\n\n\/\/ Width returns the number of bytes per one sample (all channels).\n\/\/\n\/\/ This is equal to f.NumChannels * f.Precision.\nfunc (f Format) Width() int {\n\treturn f.NumChannels * f.Precision\n}\n\n\/\/ Duration returns the duration of n samples in this format.\nfunc (f Format) Duration(n int) time.Duration {\n\treturn time.Second * time.Duration(n) \/ time.Duration(f.SampleRate)\n}\n\n\/\/ NumSamples returns the number of samples in this format which last for d duration.\nfunc (f Format) NumSamples(d time.Duration) int {\n\treturn int(d * time.Duration(f.SampleRate) \/ time.Second)\n}\n\n\/\/ EncodeSigned encodes a single sample in f.Width() bytes to p in signed format.\nfunc (f Format) EncodeSigned(p []byte, sample [2]float64) (n int) {\n\treturn f.encode(true, p, sample)\n}\n\n\/\/ EncodeUnsigned encodes a single sample in f.Width() bytes to p in unsigned format.\nfunc (f Format) EncodeUnsigned(p []byte, sample [2]float64) (n int) {\n\treturn f.encode(false, p, sample)\n}\n\n\/\/ DecodeSigned decodes a single sample encoded in f.Width() bytes from p in signed format.\nfunc (f Format) DecodeSigned(p []byte) (sample [2]float64, n int) {\n\treturn f.decode(true, p)\n}\n\n\/\/ DecodeUnsigned decodes a single sample encoded in f.Width() bytes from p in unsigned format.\nfunc (f Format) DecodeUnsigned(p []byte) (sample [2]float64, n int) {\n\treturn f.decode(false, p)\n}\n\nfunc (f Format) encode(signed bool, p []byte, sample [2]float64) (n int) {\n\tswitch {\n\tcase f.NumChannels == 1:\n\t\tx := norm((sample[0] + sample[1]) \/ 2)\n\t\tp = p[encodeFloat(signed, f.Precision, p, x):]\n\tcase f.NumChannels >= 2:\n\t\tfor c := range sample {\n\t\t\tx := norm(sample[c])\n\t\t\tp = p[encodeFloat(signed, f.Precision, p, x):]\n\t\t}\n\t\tfor c := len(sample); c < f.NumChannels; c++ {\n\t\t\tp = p[encodeFloat(signed, f.Precision, p, 0):]\n\t\t}\n\tdefault:\n\t\tpanic(fmt.Errorf(\"format: encode: invalid number of channels: %d\", f.NumChannels))\n\t}\n\treturn f.Width()\n}\n\nfunc (f Format) decode(signed bool, p []byte) (sample [2]float64, n int) {\n\tswitch {\n\tcase f.NumChannels == 1:\n\t\tx, _ := decodeFloat(signed, f.Precision, p)\n\t\treturn [2]float64{x, x}, f.Width()\n\tcase f.NumChannels >= 2:\n\t\tfor c := range sample {\n\t\t\tx, n := decodeFloat(signed, f.Precision, p)\n\t\t\tsample[c] = x\n\t\t\tp = p[n:]\n\t\t}\n\t\tfor c := len(sample); c < f.NumChannels; c++ {\n\t\t\t_, n := decodeFloat(signed, f.Precision, p)\n\t\t\tp = p[n:]\n\t\t}\n\t\treturn sample, f.Width()\n\tdefault:\n\t\tpanic(fmt.Errorf(\"format: decode: invalid number of channels: %d\", f.NumChannels))\n\t}\n}\n\nfunc encodeFloat(signed bool, precision int, p []byte, x float64) (n int) {\n\tvar xUint64 uint64\n\tif signed {\n\t\txUint64 = floatToSigned(precision, x)\n\t} else {\n\t\txUint64 = floatToUnsigned(precision, x)\n\t}\n\tfor i := 0; i < precision; i++ {\n\t\tp[i] = byte(xUint64)\n\t\txUint64 >>= 8\n\t}\n\treturn precision\n}\n\nfunc decodeFloat(signed bool, precision int, p []byte) (x float64, n int) {\n\tvar xUint64 uint64\n\tfor i := precision - 1; i >= 0; i-- {\n\t\txUint64 <<= 8\n\t\txUint64 += uint64(p[i])\n\t}\n\tif signed {\n\t\treturn signedToFloat(precision, xUint64), precision\n\t}\n\treturn unsignedToFloat(precision, xUint64), precision\n}\n\nfunc floatToSigned(precision int, x float64) uint64 {\n\tif x < 0 {\n\t\tcompl := uint64(-x * (math.Exp2(float64(precision)*8-1) - 1))\n\t\treturn uint64(1<<uint(precision*8)) - compl\n\t}\n\treturn uint64(x * (math.Exp2(float64(precision)*8-1) - 1))\n}\n\nfunc floatToUnsigned(precision int, x float64) uint64 {\n\treturn uint64((x + 1) \/ 2 * (math.Exp2(float64(precision)*8) - 1))\n}\n\nfunc signedToFloat(precision int, xUint64 uint64) float64 {\n\tif xUint64 >= 1<<uint(precision*8-1) {\n\t\tcompl := 1<<uint(precision*8) - xUint64\n\t\treturn -float64(int64(compl)) \/ (math.Exp2(float64(precision)*8-1) - 1)\n\t}\n\treturn float64(int64(xUint64)) \/ (math.Exp2(float64(precision)*8-1) - 1)\n}\n\nfunc unsignedToFloat(precision int, xUint64 uint64) float64 {\n\treturn float64(xUint64)\/(math.Exp2(float64(precision)*8)-1)*2 - 1\n}\n\nfunc norm(x float64) float64 {\n\tif x < -1 {\n\t\treturn -1\n\t}\n\tif x > +1 {\n\t\treturn +1\n\t}\n\treturn x\n}\n\ntype Buffer struct {\n\tf Format\n\tdata []byte\n\ttmp []byte\n}\n\nfunc NewBuffer(f Format) *Buffer {\n\treturn &Buffer{f: f, tmp: make([]byte, f.Width())}\n}\n\nfunc (b *Buffer) Format() Format {\n\treturn b.f\n}\n\nfunc (b *Buffer) Duration() time.Duration {\n\treturn b.f.Duration(len(b.data) \/ b.f.Width())\n}\n\nfunc (b *Buffer) Pop(d time.Duration) {\n\tif d > b.Duration() {\n\t\td = b.Duration()\n\t}\n\tn := b.f.NumSamples(d)\n\tb.data = b.data[n:]\n}\n\nfunc (b *Buffer) Append(s Streamer) {\n\tvar samples [512][2]float64\n\tfor {\n\t\tn, ok := s.Stream(samples[:])\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tfor _, sample := range samples[:n] {\n\t\t\tb.f.EncodeSigned(b.tmp, sample)\n\t\t\tb.data = append(b.data, b.tmp...)\n\t\t}\n\t}\n}\n\nfunc (b *Buffer) Streamer(from, to time.Duration) StreamSeeker {\n\tfromByte := b.f.NumSamples(from) * b.f.Width()\n\ttoByte := b.f.NumSamples(to) * b.f.Width()\n\treturn &bufferStreamer{\n\t\tf: b.f,\n\t\tdata: b.data[fromByte:toByte],\n\t\tpos: 0,\n\t}\n}\n\ntype bufferStreamer struct {\n\tf Format\n\tdata []byte\n\tpos int\n}\n\nfunc (bs *bufferStreamer) Stream(samples [][2]float64) (n int, ok bool) {\n\tif bs.pos >= len(bs.data) {\n\t\treturn 0, false\n\t}\n\tfor i := range samples {\n\t\tif bs.pos >= len(bs.data) {\n\t\t\tbreak\n\t\t}\n\t\tsample, advance := bs.f.DecodeSigned(bs.data[bs.pos:])\n\t\tsamples[i] = sample\n\t\tbs.pos += advance\n\t\tn++\n\t}\n\treturn n, true\n}\n\nfunc (bs *bufferStreamer) Err() error {\n\treturn nil\n}\n\nfunc (bs *bufferStreamer) Duration() time.Duration {\n\treturn bs.f.Duration(len(bs.data) \/ bs.f.Width())\n}\n\nfunc (bs *bufferStreamer) Position() time.Duration {\n\treturn bs.f.Duration(bs.pos \/ bs.f.Width())\n}\n\nfunc (bs *bufferStreamer) Seek(d time.Duration) error {\n\tif d < 0 || bs.Duration() < d {\n\t\treturn fmt.Errorf(\"buffer: seek duration %v out of range [%v, %v]\", d, 0, bs.Duration())\n\t}\n\tbs.pos = bs.f.NumSamples(d) * bs.f.Width()\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ package multibuf implements buffer optimized for streaming large chunks of data,\n\/\/ multiple reads and optional partial buffering to disk.\npackage multibuf\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n)\n\n\/\/ MultiBuf provides Read, Close, Seek and Size methods. In addition to that it supports WriterTo interface\n\/\/ to provide efficient writing schemes, as functions like io.Copy use WriterTo when it's available.\ntype MultiBuf interface {\n\tio.Reader\n\tio.Seeker\n\tio.Closer\n\tio.WriterTo\n\n\t\/\/ Size calculates and returns the total size of the reader and not the length remaining.\n\tSize() (int64, error)\n}\n\n\/\/ MaxBytes, ignored if set to value >=, if request exceeds the specified limit, the reader will return error,\n\/\/ by default buffer is not limited\nfunc MaxBytes(m int64) optionSetter {\n\treturn func(o *options) error {\n\t\tif m <= 0 {\n\t\t\treturn fmt.Errorf(\"MaxSizeBytes should be > 0\")\n\t\t}\n\t\to.maxSizeBytes = m\n\t\treturn nil\n\t}\n}\n\n\/\/ MemBytes specifies the largest buffer to hold in RAM before writing to disk, default is 1MB\nfunc MemBytes(m int64) optionSetter {\n\treturn func(o *options) error {\n\t\tif m < 0 {\n\t\t\treturn fmt.Errorf(\"MemBytes should be >= 0\")\n\t\t}\n\t\to.maxSizeBytes = m\n\t\treturn nil\n\t}\n}\n\n\/\/ New returns MultiBuf that can limit the size of the buffer and persist large buffers to disk.\n\/\/ By default New returns unbound buffer that will read up to 1MB in RAM and will start buffering to disk\n\/\/ It supports multiple functional optional arguments:\n\/\/\n\/\/ \/\/ Buffer up to 1MB in RAM and limit max buffer size to 20MB\n\/\/ multibuf.New(r, multibuf.MemBytes(1024 * 1024), multibuf.MaxBytes(1024 * 1024 * 20))\n\/\/\n\/\/\nfunc New(input io.Reader, setters ...optionSetter) (MultiBuf, error) {\n\to := options{\n\t\tmemBytes: DefaultMemBytes,\n\t\tmaxSizeBytes: DefaultMaxSizeBytes,\n\t}\n\n\tfor _, s := range setters {\n\t\tif err := s(&o); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tmemReader := &io.LimitedReader{\n\t\tR: input, \/\/ Read from this reader\n\t\tN: o.memBytes, \/\/ Maximum amount of data to read\n\t}\n\treaders := make([]io.ReadSeeker, 0, 2)\n\n\tbuffer, err := ioutil.ReadAll(memReader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treaders = append(readers, bytes.NewReader(buffer))\n\n\tvar file *os.File\n\t\/\/ This means that we have exceeded all the memory capacity and we will start buffering the body to disk.\n\ttotalBytes := int64(len(buffer))\n\tif memReader.N <= 0 {\n\t\tfile, err = ioutil.TempFile(\"\", \"multibuf-\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tos.Remove(file.Name())\n\n\t\treadSrc := input\n\t\tif o.maxSizeBytes > 0 {\n\t\t\treadSrc = &maxReader{R: input, Max: o.maxSizeBytes - o.memBytes}\n\t\t}\n\n\t\twrittenBytes, err := io.Copy(file, readSrc)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttotalBytes += writtenBytes\n\t\tfile.Seek(0, 0)\n\t\treaders = append(readers, file)\n\t}\n\n\tvar cleanupFn cleanupFunc\n\tif file != nil {\n\t\tcleanupFn = func() error {\n\t\t\tfile.Close()\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn newBuf(totalBytes, cleanupFn, readers...), nil\n}\n\n\/\/ MaxSizeReachedError is returned when the maximum allowed buffer size is reached when reading\ntype MaxSizeReachedError struct {\n\tMaxSize int64\n}\n\nfunc (e *MaxSizeReachedError) Error() string {\n\treturn fmt.Sprintf(\"Maximum size %d was reached\", e)\n}\n\nconst (\n\tDefaultMemBytes = 1048576\n\tDefaultMaxSizeBytes = -1\n\t\/\/ Equivalent of bytes.MinRead used in ioutil.ReadAll\n\tDefaultBufferBytes = 512\n)\n\n\/\/ Constraints:\n\/\/ - Implements io.Reader\n\/\/ - Implements Seek(0, 0)\n\/\/\t- Designed for Write once, Read many times.\ntype multiReaderSeek struct {\n\tlength int64\n\treaders []io.ReadSeeker\n\tmr io.Reader\n\tcleanup cleanupFunc\n}\n\ntype cleanupFunc func() error\n\nfunc newBuf(length int64, cleanup cleanupFunc, readers ...io.ReadSeeker) *multiReaderSeek {\n\tconverted := make([]io.Reader, len(readers))\n\tfor i, r := range readers {\n\t\t\/\/ This conversion is safe as ReadSeeker includes Reader\n\t\tconverted[i] = r.(io.Reader)\n\t}\n\n\treturn &multiReaderSeek{\n\t\tlength: length,\n\t\treaders: readers,\n\t\tmr: io.MultiReader(converted...),\n\t\tcleanup: cleanup,\n\t}\n}\n\nfunc (mr *multiReaderSeek) Close() (err error) {\n\tif mr.cleanup != nil {\n\t\treturn mr.cleanup()\n\t}\n\treturn nil\n}\n\nfunc (mr *multiReaderSeek) WriteTo(w io.Writer) (int64, error) {\n\tb := make([]byte, DefaultBufferBytes)\n\tvar total int64\n\tfor {\n\t\tn, err := mr.mr.Read(b)\n\t\t\/\/ Recommended way is to always handle non 0 reads despite the errors\n\t\tif n > 0 {\n\t\t\tnw, errw := w.Write(b[:n])\n\t\t\ttotal += int64(nw)\n\t\t\t\/\/ Write must return a non-nil error if it returns nw < n\n\t\t\tif nw != n || errw != nil {\n\t\t\t\treturn total, errw\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\treturn total, nil\n\t\t\t}\n\t\t\treturn total, err\n\t\t}\n\t}\n}\n\nfunc (mr *multiReaderSeek) Read(p []byte) (n int, err error) {\n\treturn mr.mr.Read(p)\n}\n\nfunc (mr *multiReaderSeek) Size() (int64, error) {\n\treturn mr.length, nil\n}\n\nfunc (mr *multiReaderSeek) Seek(offset int64, whence int) (int64, error) {\n\t\/\/ TODO: implement other whence\n\t\/\/ TODO: implement real offsets\n\n\tif whence != 0 {\n\t\treturn 0, fmt.Errorf(\"multiReaderSeek: unsupported whence\")\n\t}\n\n\tif offset != 0 {\n\t\treturn 0, fmt.Errorf(\"multiReaderSeek: unsupported offset\")\n\t}\n\n\tfor _, seeker := range mr.readers {\n\t\tseeker.Seek(0, 0)\n\t}\n\n\tior := make([]io.Reader, len(mr.readers))\n\tfor i, arg := range mr.readers {\n\t\tior[i] = arg.(io.Reader)\n\t}\n\tmr.mr = io.MultiReader(ior...)\n\n\treturn 0, nil\n}\n\ntype options struct {\n\t\/\/ MemBufferBytes sets up the size of the memory buffer for this request.\n\t\/\/ If the data size exceeds the limit, the remaining request part will be saved on the file system.\n\tmemBytes int64\n\n\tmaxSizeBytes int64\n}\n\ntype optionSetter func(o *options) error\n\n\/\/ MaxReader does not allow to read more than Max bytes and returns error if this limit has been exceeded.\ntype maxReader struct {\n\tR io.Reader \/\/ underlying reader\n\tN int64 \/\/ bytes read\n\tMax int64 \/\/ max bytes to read\n}\n\nfunc (r *maxReader) Read(p []byte) (int, error) {\n\treadBytes, err := r.R.Read(p)\n\tif err != nil && err != io.EOF {\n\t\treturn readBytes, err\n\t}\n\n\tr.N += int64(readBytes)\n\tif r.N > r.Max {\n\t\treturn readBytes, &MaxSizeReachedError{MaxSize: r.Max}\n\t}\n\treturn readBytes, err\n}\n<commit_msg>Fix behavior<commit_after>\/\/ package multibuf implements buffer optimized for streaming large chunks of data,\n\/\/ multiple reads and optional partial buffering to disk.\npackage multibuf\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n)\n\n\/\/ MultiBuf provides Read, Close, Seek and Size methods. In addition to that it supports WriterTo interface\n\/\/ to provide efficient writing schemes, as functions like io.Copy use WriterTo when it's available.\ntype MultiBuf interface {\n\tio.Reader\n\tio.Seeker\n\tio.Closer\n\tio.WriterTo\n\n\t\/\/ Size calculates and returns the total size of the reader and not the length remaining.\n\tSize() (int64, error)\n}\n\n\/\/ MaxBytes, ignored if set to value >=, if request exceeds the specified limit, the reader will return error,\n\/\/ by default buffer is not limited, negative values mean no limit\nfunc MaxBytes(m int64) optionSetter {\n\treturn func(o *options) error {\n\t\to.maxSizeBytes = m\n\t\treturn nil\n\t}\n}\n\n\/\/ MemBytes specifies the largest buffer to hold in RAM before writing to disk, default is 1MB\nfunc MemBytes(m int64) optionSetter {\n\treturn func(o *options) error {\n\t\tif m < 0 {\n\t\t\treturn fmt.Errorf(\"MemBytes should be >= 0\")\n\t\t}\n\t\to.maxSizeBytes = m\n\t\treturn nil\n\t}\n}\n\n\/\/ New returns MultiBuf that can limit the size of the buffer and persist large buffers to disk.\n\/\/ By default New returns unbound buffer that will read up to 1MB in RAM and will start buffering to disk\n\/\/ It supports multiple functional optional arguments:\n\/\/\n\/\/ \/\/ Buffer up to 1MB in RAM and limit max buffer size to 20MB\n\/\/ multibuf.New(r, multibuf.MemBytes(1024 * 1024), multibuf.MaxBytes(1024 * 1024 * 20))\n\/\/\n\/\/\nfunc New(input io.Reader, setters ...optionSetter) (MultiBuf, error) {\n\to := options{\n\t\tmemBytes: DefaultMemBytes,\n\t\tmaxSizeBytes: DefaultMaxSizeBytes,\n\t}\n\n\tfor _, s := range setters {\n\t\tif err := s(&o); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tmemReader := &io.LimitedReader{\n\t\tR: input, \/\/ Read from this reader\n\t\tN: o.memBytes, \/\/ Maximum amount of data to read\n\t}\n\treaders := make([]io.ReadSeeker, 0, 2)\n\n\tbuffer, err := ioutil.ReadAll(memReader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treaders = append(readers, bytes.NewReader(buffer))\n\n\tvar file *os.File\n\t\/\/ This means that we have exceeded all the memory capacity and we will start buffering the body to disk.\n\ttotalBytes := int64(len(buffer))\n\tif memReader.N <= 0 {\n\t\tfile, err = ioutil.TempFile(\"\", \"multibuf-\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tos.Remove(file.Name())\n\n\t\treadSrc := input\n\t\tif o.maxSizeBytes > 0 {\n\t\t\treadSrc = &maxReader{R: input, Max: o.maxSizeBytes - o.memBytes}\n\t\t}\n\n\t\twrittenBytes, err := io.Copy(file, readSrc)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\ttotalBytes += writtenBytes\n\t\tfile.Seek(0, 0)\n\t\treaders = append(readers, file)\n\t}\n\n\tvar cleanupFn cleanupFunc\n\tif file != nil {\n\t\tcleanupFn = func() error {\n\t\t\tfile.Close()\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn newBuf(totalBytes, cleanupFn, readers...), nil\n}\n\n\/\/ MaxSizeReachedError is returned when the maximum allowed buffer size is reached when reading\ntype MaxSizeReachedError struct {\n\tMaxSize int64\n}\n\nfunc (e *MaxSizeReachedError) Error() string {\n\treturn fmt.Sprintf(\"Maximum size %d was reached\", e)\n}\n\nconst (\n\tDefaultMemBytes = 1048576\n\tDefaultMaxSizeBytes = -1\n\t\/\/ Equivalent of bytes.MinRead used in ioutil.ReadAll\n\tDefaultBufferBytes = 512\n)\n\n\/\/ Constraints:\n\/\/ - Implements io.Reader\n\/\/ - Implements Seek(0, 0)\n\/\/\t- Designed for Write once, Read many times.\ntype multiReaderSeek struct {\n\tlength int64\n\treaders []io.ReadSeeker\n\tmr io.Reader\n\tcleanup cleanupFunc\n}\n\ntype cleanupFunc func() error\n\nfunc newBuf(length int64, cleanup cleanupFunc, readers ...io.ReadSeeker) *multiReaderSeek {\n\tconverted := make([]io.Reader, len(readers))\n\tfor i, r := range readers {\n\t\t\/\/ This conversion is safe as ReadSeeker includes Reader\n\t\tconverted[i] = r.(io.Reader)\n\t}\n\n\treturn &multiReaderSeek{\n\t\tlength: length,\n\t\treaders: readers,\n\t\tmr: io.MultiReader(converted...),\n\t\tcleanup: cleanup,\n\t}\n}\n\nfunc (mr *multiReaderSeek) Close() (err error) {\n\tif mr.cleanup != nil {\n\t\treturn mr.cleanup()\n\t}\n\treturn nil\n}\n\nfunc (mr *multiReaderSeek) WriteTo(w io.Writer) (int64, error) {\n\tb := make([]byte, DefaultBufferBytes)\n\tvar total int64\n\tfor {\n\t\tn, err := mr.mr.Read(b)\n\t\t\/\/ Recommended way is to always handle non 0 reads despite the errors\n\t\tif n > 0 {\n\t\t\tnw, errw := w.Write(b[:n])\n\t\t\ttotal += int64(nw)\n\t\t\t\/\/ Write must return a non-nil error if it returns nw < n\n\t\t\tif nw != n || errw != nil {\n\t\t\t\treturn total, errw\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\treturn total, nil\n\t\t\t}\n\t\t\treturn total, err\n\t\t}\n\t}\n}\n\nfunc (mr *multiReaderSeek) Read(p []byte) (n int, err error) {\n\treturn mr.mr.Read(p)\n}\n\nfunc (mr *multiReaderSeek) Size() (int64, error) {\n\treturn mr.length, nil\n}\n\nfunc (mr *multiReaderSeek) Seek(offset int64, whence int) (int64, error) {\n\t\/\/ TODO: implement other whence\n\t\/\/ TODO: implement real offsets\n\n\tif whence != 0 {\n\t\treturn 0, fmt.Errorf(\"multiReaderSeek: unsupported whence\")\n\t}\n\n\tif offset != 0 {\n\t\treturn 0, fmt.Errorf(\"multiReaderSeek: unsupported offset\")\n\t}\n\n\tfor _, seeker := range mr.readers {\n\t\tseeker.Seek(0, 0)\n\t}\n\n\tior := make([]io.Reader, len(mr.readers))\n\tfor i, arg := range mr.readers {\n\t\tior[i] = arg.(io.Reader)\n\t}\n\tmr.mr = io.MultiReader(ior...)\n\n\treturn 0, nil\n}\n\ntype options struct {\n\t\/\/ MemBufferBytes sets up the size of the memory buffer for this request.\n\t\/\/ If the data size exceeds the limit, the remaining request part will be saved on the file system.\n\tmemBytes int64\n\n\tmaxSizeBytes int64\n}\n\ntype optionSetter func(o *options) error\n\n\/\/ MaxReader does not allow to read more than Max bytes and returns error if this limit has been exceeded.\ntype maxReader struct {\n\tR io.Reader \/\/ underlying reader\n\tN int64 \/\/ bytes read\n\tMax int64 \/\/ max bytes to read\n}\n\nfunc (r *maxReader) Read(p []byte) (int, error) {\n\treadBytes, err := r.R.Read(p)\n\tif err != nil && err != io.EOF {\n\t\treturn readBytes, err\n\t}\n\n\tr.N += int64(readBytes)\n\tif r.N > r.Max {\n\t\treturn readBytes, &MaxSizeReachedError{MaxSize: r.Max}\n\t}\n\treturn readBytes, err\n}\n<|endoftext|>"} {"text":"<commit_before>package buffer\n\nimport (\n\t\"io\"\n)\n\nconst (\n\tmaxUint = ^uint32(0)\n\tmaxInt = int32(maxUint >> 1)\n\tmaxUint64 = ^uint64(0)\n\tmaxInt64 = int64(maxUint64 >> 1)\n\tmaxAlloc = int64(1024 * 32)\n)\n\ntype Buffer interface {\n\tLen() int64\n\tCap() int64\n\tio.Reader\n\tio.Writer\n}\n\ntype buffer struct {\n\thead []byte\n\tBuffer\n}\n\nfunc MaxCap() int64 {\n\treturn maxInt64\n}\n\nfunc LimitAlloc(n int64) int64 {\n\tif n > maxAlloc {\n\t\treturn maxAlloc\n\t}\n\treturn n\n}\n\nfunc Gap(buf Buffer) int64 {\n\treturn buf.Cap() - buf.Len()\n}\n\nfunc Full(buf Buffer) bool {\n\treturn Gap(buf) == 0\n}\n\ntype Lener interface {\n\tLen() int64\n}\n\nfunc Empty(l Lener) bool {\n\treturn l.Len() == 0\n}\n\nfunc RoomFor(buf Buffer, p []byte) bool {\n\treturn Gap(buf) >= int64(len(p))\n}\n\nfunc ShrinkToRead(buf Buffer, p []byte) []byte {\n\tif buf.Len() < int64(len(p)) {\n\t\treturn p[:buf.Len()]\n\t}\n\treturn p\n}\n\nfunc ShrinkToFit(buf Buffer, p []byte) []byte {\n\tif !RoomFor(buf, p) {\n\t\tp = p[:Gap(buf)]\n\t}\n\treturn p\n}\n\nfunc TotalLen(buffers []Buffer) (n int64) {\n\tfor _, buffer := range buffers {\n\t\tif n > MaxCap()-buffer.Len() {\n\t\t\treturn MaxCap()\n\t\t} else {\n\t\t\tn += buffer.Len()\n\t\t}\n\t}\n\treturn n\n}\n\nfunc TotalCap(buffers []Buffer) (n int64) {\n\tfor _, buffer := range buffers {\n\t\tif n > MaxCap()-buffer.Cap() {\n\t\t\treturn MaxCap()\n\t\t} else {\n\t\t\tn += buffer.Cap()\n\t\t}\n\t}\n\treturn n\n}\n\nfunc NewUnboundedBuffer(mem, chunk int64) Buffer {\n\treturn NewMulti(New(mem), NewPartition(chunk, NewFile))\n}\n<commit_msg>Made buffer functions more generic<commit_after>package buffer\n\nimport (\n\t\"io\"\n)\n\nconst (\n\tmaxUint = ^uint32(0)\n\tmaxInt = int32(maxUint >> 1)\n\tmaxUint64 = ^uint64(0)\n\tmaxInt64 = int64(maxUint64 >> 1)\n\tmaxAlloc = int64(1024 * 32)\n)\n\ntype Lener interface {\n\tLen() int64\n}\n\ntype Caper interface {\n\tCap() int64\n}\n\ntype LenCaper interface {\n\tLener\n\tCaper\n}\n\ntype Buffer interface {\n\tLener\n\tCaper\n\tio.Reader\n\tio.Writer\n}\n\ntype buffer struct {\n\thead []byte\n\tBuffer\n}\n\nfunc MaxCap() int64 {\n\treturn maxInt64\n}\n\nfunc LimitAlloc(n int64) int64 {\n\tif n > maxAlloc {\n\t\treturn maxAlloc\n\t}\n\treturn n\n}\n\nfunc Gap(buf LenCaper) int64 {\n\treturn buf.Cap() - buf.Len()\n}\n\nfunc Full(buf LenCaper) bool {\n\treturn Gap(buf) == 0\n}\n\nfunc Empty(l Lener) bool {\n\treturn l.Len() == 0\n}\n\nfunc RoomFor(buf LenCaper, p []byte) bool {\n\treturn Gap(buf) >= int64(len(p))\n}\n\nfunc ShrinkToRead(buf Lener, p []byte) []byte {\n\tif buf.Len() < int64(len(p)) {\n\t\treturn p[:buf.Len()]\n\t}\n\treturn p\n}\n\nfunc ShrinkToFit(buf LenCaper, p []byte) []byte {\n\tif !RoomFor(buf, p) {\n\t\tp = p[:Gap(buf)]\n\t}\n\treturn p\n}\n\nfunc TotalLen(buffers []Buffer) (n int64) {\n\tfor _, buffer := range buffers {\n\t\tif n > MaxCap()-buffer.Len() {\n\t\t\treturn MaxCap()\n\t\t} else {\n\t\t\tn += buffer.Len()\n\t\t}\n\t}\n\treturn n\n}\n\nfunc TotalCap(buffers []Buffer) (n int64) {\n\tfor _, buffer := range buffers {\n\t\tif n > MaxCap()-buffer.Cap() {\n\t\t\treturn MaxCap()\n\t\t} else {\n\t\t\tn += buffer.Cap()\n\t\t}\n\t}\n\treturn n\n}\n\nfunc NewUnboundedBuffer(mem, chunk int64) Buffer {\n\treturn NewMulti(New(mem), NewPartition(chunk, NewFile))\n}\n<|endoftext|>"} {"text":"<commit_before>package kinesis\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"text\/template\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/kinesis\"\n\t\"github.com\/gliderlabs\/logspout\/router\"\n)\n\n\/\/ PutRecordsLimit is the maximum number of records allowed for a PutRecords request.\nvar PutRecordsLimit = 500\n\n\/\/ RecordSizeLimit is the maximum allowed size per record.\nvar RecordSizeLimit int = 1 * 1024 * 1024 \/\/ 1MB\n\n\/\/ PutRecordsSizeLimit is the maximum allowed size per PutRecords request.\nvar PutRecordsSizeLimit int = 5 * 1024 * 1024 \/\/ 5MB\n\ntype recordBuffer struct {\n\tclient *kinesis.Kinesis\n\tpKeyTmpl *template.Template\n\tinput *kinesis.PutRecordsInput\n\tcount int\n\tbyteSize int\n}\n\nfunc newRecordBuffer(client *kinesis.Kinesis, streamName string) (*recordBuffer, error) {\n\tpKeyTmpl, err := pKeyTmpl()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinput := &kinesis.PutRecordsInput{\n\t\tStreamName: aws.String(streamName),\n\t\tRecords: make([]*kinesis.PutRecordsRequestEntry, 0),\n\t}\n\n\treturn &recordBuffer{\n\t\tclient: client,\n\t\tpKeyTmpl: pKeyTmpl,\n\t\tinput: input,\n\t}, nil\n}\n\nfunc pKeyTmpl() (*template.Template, error) {\n\tpKeyTmplString := os.Getenv(\"KINESIS_PARTITION_KEY_TEMPLATE\")\n\tif pKeyTmplString == \"\" {\n\t\treturn nil, errors.New(\"The partition key template is missing. Please set the KINESIS_PARTITION_KEY_TEMPLATE env variable\")\n\t}\n\n\tpKeyTmpl, err := template.New(\"kinesisPartitionKey\").Parse(pKeyTmplString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn pKeyTmpl, nil\n}\n\nfunc (r *recordBuffer) Add(m *router.Message) error {\n\tdata := m.Data\n\tdataLen := len(data)\n\n\t\/\/ This record is too large, we can't submit it to kinesis.\n\tif dataLen > RecordSizeLimit {\n\t\treturn errors.New(fmt.Sprintf(\"recordBuffer.Add: log data byte size (%d) is over the limit.\", dataLen))\n\t}\n\n\t\/\/ Adding this event would make our request have too many records. Flush first.\n\tif r.count+1 > PutRecordsLimit {\n\t\terr := r.Flush()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Adding this event would make our request too large. Flush first.\n\tif r.byteSize+dataLen > PutRecordsSizeLimit {\n\t\terr := r.Flush()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Partition key\n\tpKey, err := pKey(r.pKeyTmpl, m)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Add to count\n\tr.count += 1\n\n\t\/\/ Add data and partition key size to byteSize\n\tr.byteSize += dataLen + len(pKey)\n\n\t\/\/ Add record\n\tr.input.Records = append(r.input.Records, &kinesis.PutRecordsRequestEntry{\n\t\tData: []byte(data),\n\t\tPartitionKey: aws.String(pKey),\n\t})\n\n\treturn nil\n}\n\nfunc (r *recordBuffer) Flush() error {\n\tif r.count == 0 {\n\t\treturn nil\n\t}\n\n\tdefer r.reset()\n\n\t_, err := r.client.PutRecords(r.input)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (r *recordBuffer) reset() {\n\tr.count = 0\n\tr.byteSize = 0\n\tr.input.Records = make([]*kinesis.PutRecordsRequestEntry, 0)\n}\n\nfunc pKey(tmpl *template.Template, m *router.Message) (string, error) {\n\tvar pKey bytes.Buffer\n\terr := tmpl.Execute(&pKey, m)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn pKey.String(), nil\n}\n<commit_msg>Add a mutex to prevent race condition bt Flush() and reset().<commit_after>package kinesis\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"text\/template\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/kinesis\"\n\t\"github.com\/gliderlabs\/logspout\/router\"\n)\n\n\/\/ PutRecordsLimit is the maximum number of records allowed for a PutRecords request.\nvar PutRecordsLimit = 500\n\n\/\/ RecordSizeLimit is the maximum allowed size per record.\nvar RecordSizeLimit int = 1 * 1024 * 1024 \/\/ 1MB\n\n\/\/ PutRecordsSizeLimit is the maximum allowed size per PutRecords request.\nvar PutRecordsSizeLimit int = 5 * 1024 * 1024 \/\/ 5MB\n\n\/\/ Prevent Flush() and reset() race condition.\nvar mutex sync.Mutex\n\ntype recordBuffer struct {\n\tclient *kinesis.Kinesis\n\tpKeyTmpl *template.Template\n\tinput *kinesis.PutRecordsInput\n\tcount int\n\tbyteSize int\n}\n\nfunc newRecordBuffer(client *kinesis.Kinesis, streamName string) (*recordBuffer, error) {\n\tpKeyTmpl, err := pKeyTmpl()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinput := &kinesis.PutRecordsInput{\n\t\tStreamName: aws.String(streamName),\n\t\tRecords: make([]*kinesis.PutRecordsRequestEntry, 0),\n\t}\n\n\treturn &recordBuffer{\n\t\tclient: client,\n\t\tpKeyTmpl: pKeyTmpl,\n\t\tinput: input,\n\t}, nil\n}\n\nfunc pKeyTmpl() (*template.Template, error) {\n\tpKeyTmplString := os.Getenv(\"KINESIS_PARTITION_KEY_TEMPLATE\")\n\tif pKeyTmplString == \"\" {\n\t\treturn nil, errors.New(\"The partition key template is missing. Please set the KINESIS_PARTITION_KEY_TEMPLATE env variable\")\n\t}\n\n\tpKeyTmpl, err := template.New(\"kinesisPartitionKey\").Parse(pKeyTmplString)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn pKeyTmpl, nil\n}\n\nfunc (r *recordBuffer) Add(m *router.Message) error {\n\tdata := m.Data\n\tdataLen := len(data)\n\n\t\/\/ This record is too large, we can't submit it to kinesis.\n\tif dataLen > RecordSizeLimit {\n\t\treturn errors.New(fmt.Sprintf(\"recordBuffer.Add: log data byte size (%d) is over the limit.\", dataLen))\n\t}\n\n\t\/\/ Adding this event would make our request have too many records. Flush first.\n\tif r.count+1 > PutRecordsLimit {\n\t\terr := r.Flush()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Adding this event would make our request too large. Flush first.\n\tif r.byteSize+dataLen > PutRecordsSizeLimit {\n\t\terr := r.Flush()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ Partition key\n\tpKey, err := pKey(r.pKeyTmpl, m)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Add to count\n\tr.count += 1\n\n\t\/\/ Add data and partition key size to byteSize\n\tr.byteSize += dataLen + len(pKey)\n\n\t\/\/ Add record\n\tr.input.Records = append(r.input.Records, &kinesis.PutRecordsRequestEntry{\n\t\tData: []byte(data),\n\t\tPartitionKey: aws.String(pKey),\n\t})\n\n\treturn nil\n}\n\nfunc (r *recordBuffer) Flush() error {\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\n\tif r.count == 0 {\n\t\treturn nil\n\t}\n\n\tdefer r.reset()\n\n\t_, err := r.client.PutRecords(r.input)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (r *recordBuffer) reset() {\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\n\tr.count = 0\n\tr.byteSize = 0\n\tr.input.Records = make([]*kinesis.PutRecordsRequestEntry, 0)\n}\n\nfunc pKey(tmpl *template.Template, m *router.Message) (string, error) {\n\tvar pKey bytes.Buffer\n\terr := tmpl.Execute(&pKey, m)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn pKey.String(), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package std\n\nimport \"github.com\/DeedleFake\/wdte\"\n\nfunc save(f wdte.Func, saved ...wdte.Func) wdte.Func {\n\treturn wdte.GoFunc(func(frame []wdte.Func, args ...wdte.Func) wdte.Func {\n\t\treturn f(frame, append(saved, args...))\n\t})\n}\n\n\/\/ Add returns the sum of its arguments. If called with only 1\n\/\/ argument, it returns a function which adds arguments given to that\n\/\/ one argument.\nfunc Add(frame []wdte.Func, args ...wdte.Func) wdte.Func {\n\tswitch len(args) {\n\tcase 0:\n\t\treturn Add\n\n\tcase 1:\n\t\treturn save(Add, args[0])\n\t}\n\n\tvar sum wdte.Number\n\tfor _, arg := range args {\n\t\tsum += arg.Call(frame).(wdte.Number)\n\t}\n\treturn sum\n}\n\n\/\/ Sub returns args[0] - args[1]. If called with only 1 argument, it\n\/\/ returns a function which returns that argument minus the argument\n\/\/ given.\nfunc Sub(frame []wdte.Func, args ...wdte.Func) wdte.Func {\n\tswitch len(args) {\n\tcase 0:\n\t\treturn Sub\n\n\tcase 1:\n\t\treturn save(Sub, args[0])\n\t}\n\n\ta1 := args[0].Call(frame).(wdte.Number)\n\ta2 := args[1].Call(frame).(wdte.Number)\n\treturn a1 - a2\n}\n\n\/\/ Mult returns the product of its arguments. If called with only 1\n\/\/ argument, it returns a function that multiplies that argument by\n\/\/ its own arguments.\nfunc Mult(frame []wdte.Func, args ...wdte.Func) wdte.Func {\n\tswitch len(args) {\n\tcase 0:\n\t\treturn Mult\n\n\tcase 1:\n\t\treturn save(Mult, args[0])\n\t}\n\n\tp := wdte.Number(1)\n\tfor _, arg := range args {\n\t\tp *= arg.Call(frame).(wdte.Number)\n\t}\n\treturn p\n}\n\n\/\/ Div returns args[0] \/ args[1]. If called with only 1 argument, it\n\/\/ returns a function which divides its own argument by the original\n\/\/ argument.\nfunc Div(frame []wdte.Func, args ...wdte.Func) wdte.Func {\n\tswitch len(args) {\n\tcase 0:\n\t\treturn Div\n\n\tcase 1:\n\t\treturn save(Div, args[0])\n\t}\n\n\ta1 := args[0].Call(frame).(wdte.Number)\n\ta2 := args[1].Call(frame).(wdte.Number)\n\treturn a1 \/ a2\n}\n\n\/\/ Insert adds the functions in this package to m. It maps them to the\n\/\/ cooresponding mathematical operators. For example, Add() becomes\n\/\/ `+`, Sub() becomes `-`, and so on.\nfunc Insert(m *wdte.Module) {\n\tm.Funcs[\"+\"] = wdte.GoFunc(Add)\n\tm.Funcs[\"-\"] = wdte.GoFunc(Sub)\n\tm.Funcs[\"*\"] = wdte.GoFunc(Mult)\n\tm.Funcs[\"\/\"] = wdte.GoFunc(Div)\n}\n<commit_msg>std: Woops. Fix compilation errors.<commit_after>package std\n\nimport (\n\t\"github.com\/DeedleFake\/wdte\"\n)\n\nfunc save(f wdte.Func, saved ...wdte.Func) wdte.Func {\n\treturn wdte.GoFunc(func(frame []wdte.Func, args ...wdte.Func) wdte.Func {\n\t\treturn f.Call(frame, append(saved, args...)...)\n\t})\n}\n\n\/\/ Add returns the sum of its arguments. If called with only 1\n\/\/ argument, it returns a function which adds arguments given to that\n\/\/ one argument.\nfunc Add(frame []wdte.Func, args ...wdte.Func) wdte.Func {\n\tswitch len(args) {\n\tcase 0:\n\t\treturn wdte.GoFunc(Add)\n\n\tcase 1:\n\t\treturn save(wdte.GoFunc(Add), args[0])\n\t}\n\n\tvar sum wdte.Number\n\tfor _, arg := range args {\n\t\tsum += arg.Call(frame).(wdte.Number)\n\t}\n\treturn sum\n}\n\n\/\/ Sub returns args[0] - args[1]. If called with only 1 argument, it\n\/\/ returns a function which returns that argument minus the argument\n\/\/ given.\nfunc Sub(frame []wdte.Func, args ...wdte.Func) wdte.Func {\n\tswitch len(args) {\n\tcase 0:\n\t\treturn wdte.GoFunc(Sub)\n\n\tcase 1:\n\t\treturn save(wdte.GoFunc(Sub), args[0])\n\t}\n\n\ta1 := args[0].Call(frame).(wdte.Number)\n\ta2 := args[1].Call(frame).(wdte.Number)\n\treturn a1 - a2\n}\n\n\/\/ Mult returns the product of its arguments. If called with only 1\n\/\/ argument, it returns a function that multiplies that argument by\n\/\/ its own arguments.\nfunc Mult(frame []wdte.Func, args ...wdte.Func) wdte.Func {\n\tswitch len(args) {\n\tcase 0:\n\t\treturn wdte.GoFunc(Mult)\n\n\tcase 1:\n\t\treturn save(wdte.GoFunc(Mult), args[0])\n\t}\n\n\tp := wdte.Number(1)\n\tfor _, arg := range args {\n\t\tp *= arg.Call(frame).(wdte.Number)\n\t}\n\treturn p\n}\n\n\/\/ Div returns args[0] \/ args[1]. If called with only 1 argument, it\n\/\/ returns a function which divides its own argument by the original\n\/\/ argument.\nfunc Div(frame []wdte.Func, args ...wdte.Func) wdte.Func {\n\tswitch len(args) {\n\tcase 0:\n\t\treturn wdte.GoFunc(Div)\n\n\tcase 1:\n\t\treturn save(wdte.GoFunc(Div), args[0])\n\t}\n\n\ta1 := args[0].Call(frame).(wdte.Number)\n\ta2 := args[1].Call(frame).(wdte.Number)\n\treturn a1 \/ a2\n}\n\n\/\/ Insert adds the functions in this package to m. It maps them to the\n\/\/ cooresponding mathematical operators. For example, Add() becomes\n\/\/ `+`, Sub() becomes `-`, and so on.\nfunc Insert(m *wdte.Module) {\n\tm.Funcs[\"+\"] = wdte.GoFunc(Add)\n\tm.Funcs[\"-\"] = wdte.GoFunc(Sub)\n\tm.Funcs[\"*\"] = wdte.GoFunc(Mult)\n\tm.Funcs[\"\/\"] = wdte.GoFunc(Div)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/codegangsta\/cli\"\n\t\"os\"\n\t\"log\"\n)\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"structr\"\n\tapp.Flags = []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName: \"config\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"configuration for structr\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName: \"outDir\",\n\t\t\tValue: \"\",\n\t\t\tUsage: \"output directory for generated structure\",\n\t\t},\n\t}\n\tapp.Action = func(c *cli.Context) {\n\t\targs := c.Args()\n\t\tif len(args) < 1 {\n\t\t\tcli.ShowAppHelp(c)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tcontext, err := NewContext(c.String(\"config\"), c.String(\"outDir\"), args)\n\t\tif err != nil {\n\t\t\tlog.Fatalln(\"initialize error: \", err.Error())\n\t\t}\n\t\tbundler := NewJsonSchemaBundler(NewJsonSchemaLoader())\n\t\tif err := bundler.AddJsonSchema(context.Inputs...); err != nil {\n\t\t\tlog.Fatalln(\"cannot add load json schema: \", err.Error())\n\t\t}\n\t\tcreator := NewJsonSchemaNodeCreator(context, bundler)\n\t\texporter := NewExporter(context)\n\t\tfor _, b := range bundler.GetBundles() {\n\t\t\tstructure, err := creator.CreateStructureNode(b)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalln(\"cannot create structure node: \", err.Error())\n\t\t\t}\n\t\t\tif err := exporter.Export(structure); err != nil {\n\t\t\t\tlog.Fatalln(\"cannot export structure node: \", err.Error())\n\t\t\t}\n\t\t}\n\t}\n\tapp.Run(os.Args)\n}\n<commit_msg>remove main action<commit_after>package main\n\nimport (\n\t\"github.com\/codegangsta\/cli\"\n\t\"os\"\n)\n\nvar Version string\n\nfunc main() {\n\tapp := cli.NewApp()\n\tapp.Name = \"structr\"\n\tapp.Usage = \"Generate structure definition(s) from JSON Schema\"\n\tapp.UsageText = \"structr [command] [command options] [filepath...]\"\n\tapp.Author = \"dameleon\"\n\tapp.Email = \"dameleon@gmail.com\"\n\tapp.Version = Version\n\tapp.Commands = Commands\n\tapp.Run(os.Args)\n}\n<|endoftext|>"} {"text":"<commit_before>package couchdb\n\nimport (\n\t\"fmt\"\n)\n\n\/\/ http:\/\/docs.couchdb.org\/en\/latest\/intro\/api.html#server\ntype Server struct {\n\tCouchdb string\n\tUuid string\n\tVendor struct {\n\t\tVersion string\n\t\tName string\n\t}\n\tVersion string\n}\n\n\/\/ http:\/\/docs.couchdb.org\/en\/latest\/api\/database\/common.html#get--db\ntype DatabaseInfo struct {\n\tDbName string `json:\"db_name\"`\n\tDocCount int `json:\"doc_count\"`\n\tDocDelCount int `json:\"doc_del_count\"`\n\tUpdateSeq int `json:\"update_seq\"`\n\tPurgeSeq int `json:\"purge_seq\"`\n\tCompactRunning bool `json:\"compact_running\"`\n\tDiskSize int `json:\"disk_size\"`\n\tDataSize int `json:\"data_size\"`\n\tInstanceStartTime string `json:\"instance_start_time\"`\n\tDiskFormatVersion int `json:\"disk_format_version\"`\n\tCommittedUpdateSeq int `json:\"committed_update_seq\"`\n}\n\ntype DatabaseResponse struct {\n\tOk bool\n}\n\ntype Error struct {\n\tMethod string\n\tUrl string\n\tStatusCode int\n\tType string `json:\"error\"`\n\tReason string\n}\n\nfunc (e *Error) Error() string {\n\treturn fmt.Sprintf(\"CouchDB - %s %s, Status Code: %d, Error: %s, Reason: %s\", e.Method, e.Url, e.StatusCode, e.Type, e.Reason)\n}\n\ntype Document struct {\n\tId string `json:\"_id,omitempty\"`\n\tRev string `json:\"_rev,omitempty\"`\n\tAttachments map[string]Attachment `json:\"_attachments,omitempty\"`\n}\n\n\/\/ http:\/\/docs.couchdb.org\/en\/latest\/api\/document\/common.html#creating-multiple-attachments\ntype Attachment struct {\n\tFollows bool `json:\"follows\"`\n\tContentType string `json:\"content_type\"`\n\tLength int64 `json:\"length\"`\n}\n\ntype CouchDoc interface {\n\tGetDocument() *Document\n}\n\ntype DocumentResponse struct {\n\tOk bool\n\tId string\n\tRev string\n}\n\n\/\/ http:\/\/docs.couchdb.org\/en\/latest\/api\/server\/common.html#active-tasks\ntype Task struct {\n\tChangesDone int `json:\"changes_done\"`\n\tDatabase string\n\tPid string\n\tProgress int\n\tStartedOn int `json:\"started_on\"`\n\tStatus string\n\tTask string\n\tTotalChanges int `json:\"total_changes\"`\n\tType string\n\tUpdatedOn string `json:\"updated_on\"`\n}\n\ntype QueryParameters struct {\n\tConflicts bool `url:\"conflicts,omitempty\"`\n\tDescending bool `url:\"descending,omitempty\"`\n\tEndKey []interface{} `url:\"endkey,comma,omitempty\"`\n\tEndKeyDocId string `url:\"end_key_doc_id,omitempty\"`\n\tGroup bool `url:\"group,omitempty\"`\n\tGroupLevel int `url:\"group_level,omitempty\"`\n\tIncludeDocs bool `url:\"include_docs,omitempty\"`\n\tAttachments bool `url:\"attachments,omitempty\"`\n\tAttEncodingInfo bool `url:\"att_encoding_info,omitempty\"`\n\tInclusiveEnd bool `url:\"inclusive_end,omitempty\"`\n\tKey string `url:\"key,omitempty\"`\n\tLimit int `url:\"limit,omitempty\"`\n\tReduce bool `url:\"reduce,omitempty\"`\n\tSkip int `url:\"skip,omitempty\"`\n\tStale string `url:\"stale,omitempty\"`\n\tStartKey []interface{} `url:\"startkey,comma,omitempty\"`\n\tStartKeyDocId string `url:\"startkey_docid,omitempty\"`\n\tUpdateSeq bool `url:\"update_seq,omitempty\"`\n}\n\ntype ViewResponse struct {\n\tOffset int `json:\"offset,omitempty\"`\n\tRows []Row `json:\"rows,omitempty\"`\n\tTotalRows int `json:\"total_rows,omitempty\"`\n\tUpdateSeq int `json:\"update_seq,omitempty\"`\n}\n\ntype Row struct {\n\tId string `json:\"id\"`\n\tKey interface{} `json:\"key\"`\n\tValue interface{} `json:\"value,omitempty\"`\n\tDoc map[string]interface{} `json:\"doc,omitempty\"`\n}\n\n\/\/ http:\/\/docs.couchdb.org\/en\/latest\/api\/database\/bulk-api.html#post--db-_bulk_docs\ntype BulkDoc struct {\n\tAllOrNothing bool `json:\"all_or_nothing,omitempty\"`\n\tDocs []interface{} `json:\"docs\"`\n\tNewEdits bool `json:\"new_edits,omitempty\"`\n}\n\n\/\/ http:\/\/docs.couchdb.org\/en\/latest\/api\/server\/authn.html#cookie-authentication\ntype Credentials struct {\n\tName string `json:\"name\"`\n\tPassword string `json:\"password\"`\n}\n\ntype PostSessionResponse struct {\n\tOk bool\n\tName string\n\tRoles []string\n}\n\ntype User struct {\n\tDocument\n\tDerivedKey string `json:\"derived_key,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tRoles []string `json:\"roles\"`\n\tPassword string `json:\"password,omitempty\"` \/\/ plain text password when creating the user\n\tPasswordSha string `json:\"password_sha,omitempty\"` \/\/ hashed password when requesting user information\n\tPasswordScheme string `json:\"password_scheme,omitempty\"`\n\tSalt string `json:\"salt,omitempty\"`\n\tType string `json:\"type,omitempty\"`\n\tIterations int `json:\"iterations,omitempty\"`\n}\n\nfunc (user User) GetDocument() *Document {\n\treturn &user.Document\n}\n\nfunc NewUser(name, password string, roles []string) User {\n\tuser := User{\n\t\tDocument: Document{\n\t\t\tId: \"org.couchdb.user:\" + name,\n\t\t},\n\t\tDerivedKey: \"\",\n\t\tName: name,\n\t\tRoles: roles,\n\t\tPassword: password,\n\t\tPasswordSha: \"\",\n\t\tPasswordScheme: \"\",\n\t\tSalt: \"\",\n\t\tType: \"user\",\n\t}\n\treturn user\n}\n\ntype GetSessionResponse struct {\n\tInfo struct {\n\t\tAuthenticated string `json:\"authenticated\"`\n\t\tAuthenticationDb string `json:\"authentication_db\"`\n\t\tAuthenticationHandlers []string `json:\"authentication_handlers\"`\n\t} `json:\"info\"`\n\tOk bool `json:\"ok\"`\n\tUserContext struct {\n\t\tDb string `json:\"db\"`\n\t\tName string `json:\"name\"`\n\t\tRoles []string `json:\"roles\"`\n\t} `json:\"userCtx\"`\n}\n<commit_msg>make startkey and endkey string type<commit_after>package couchdb\n\nimport (\n\t\"fmt\"\n)\n\n\/\/ http:\/\/docs.couchdb.org\/en\/latest\/intro\/api.html#server\ntype Server struct {\n\tCouchdb string\n\tUuid string\n\tVendor struct {\n\t\tVersion string\n\t\tName string\n\t}\n\tVersion string\n}\n\n\/\/ http:\/\/docs.couchdb.org\/en\/latest\/api\/database\/common.html#get--db\ntype DatabaseInfo struct {\n\tDbName string `json:\"db_name\"`\n\tDocCount int `json:\"doc_count\"`\n\tDocDelCount int `json:\"doc_del_count\"`\n\tUpdateSeq int `json:\"update_seq\"`\n\tPurgeSeq int `json:\"purge_seq\"`\n\tCompactRunning bool `json:\"compact_running\"`\n\tDiskSize int `json:\"disk_size\"`\n\tDataSize int `json:\"data_size\"`\n\tInstanceStartTime string `json:\"instance_start_time\"`\n\tDiskFormatVersion int `json:\"disk_format_version\"`\n\tCommittedUpdateSeq int `json:\"committed_update_seq\"`\n}\n\ntype DatabaseResponse struct {\n\tOk bool\n}\n\ntype Error struct {\n\tMethod string\n\tUrl string\n\tStatusCode int\n\tType string `json:\"error\"`\n\tReason string\n}\n\nfunc (e *Error) Error() string {\n\treturn fmt.Sprintf(\"CouchDB - %s %s, Status Code: %d, Error: %s, Reason: %s\", e.Method, e.Url, e.StatusCode, e.Type, e.Reason)\n}\n\ntype Document struct {\n\tId string `json:\"_id,omitempty\"`\n\tRev string `json:\"_rev,omitempty\"`\n\tAttachments map[string]Attachment `json:\"_attachments,omitempty\"`\n}\n\n\/\/ http:\/\/docs.couchdb.org\/en\/latest\/api\/document\/common.html#creating-multiple-attachments\ntype Attachment struct {\n\tFollows bool `json:\"follows\"`\n\tContentType string `json:\"content_type\"`\n\tLength int64 `json:\"length\"`\n}\n\ntype CouchDoc interface {\n\tGetDocument() *Document\n}\n\ntype DocumentResponse struct {\n\tOk bool\n\tId string\n\tRev string\n}\n\n\/\/ http:\/\/docs.couchdb.org\/en\/latest\/api\/server\/common.html#active-tasks\ntype Task struct {\n\tChangesDone int `json:\"changes_done\"`\n\tDatabase string\n\tPid string\n\tProgress int\n\tStartedOn int `json:\"started_on\"`\n\tStatus string\n\tTask string\n\tTotalChanges int `json:\"total_changes\"`\n\tType string\n\tUpdatedOn string `json:\"updated_on\"`\n}\n\ntype QueryParameters struct {\n\tConflicts bool `url:\"conflicts,omitempty\"`\n\tDescending bool `url:\"descending,omitempty\"`\n\tEndKey string `url:\"endkey,comma,omitempty\"`\n\tEndKeyDocId string `url:\"end_key_doc_id,omitempty\"`\n\tGroup bool `url:\"group,omitempty\"`\n\tGroupLevel int `url:\"group_level,omitempty\"`\n\tIncludeDocs bool `url:\"include_docs,omitempty\"`\n\tAttachments bool `url:\"attachments,omitempty\"`\n\tAttEncodingInfo bool `url:\"att_encoding_info,omitempty\"`\n\tInclusiveEnd bool `url:\"inclusive_end,omitempty\"`\n\tKey string `url:\"key,omitempty\"`\n\tLimit int `url:\"limit,omitempty\"`\n\tReduce bool `url:\"reduce,omitempty\"`\n\tSkip int `url:\"skip,omitempty\"`\n\tStale string `url:\"stale,omitempty\"`\n\tStartKey string `url:\"startkey,comma,omitempty\"`\n\tStartKeyDocId string `url:\"startkey_docid,omitempty\"`\n\tUpdateSeq bool `url:\"update_seq,omitempty\"`\n}\n\ntype ViewResponse struct {\n\tOffset int `json:\"offset,omitempty\"`\n\tRows []Row `json:\"rows,omitempty\"`\n\tTotalRows int `json:\"total_rows,omitempty\"`\n\tUpdateSeq int `json:\"update_seq,omitempty\"`\n}\n\ntype Row struct {\n\tId string `json:\"id\"`\n\tKey interface{} `json:\"key\"`\n\tValue interface{} `json:\"value,omitempty\"`\n\tDoc map[string]interface{} `json:\"doc,omitempty\"`\n}\n\n\/\/ http:\/\/docs.couchdb.org\/en\/latest\/api\/database\/bulk-api.html#post--db-_bulk_docs\ntype BulkDoc struct {\n\tAllOrNothing bool `json:\"all_or_nothing,omitempty\"`\n\tDocs []interface{} `json:\"docs\"`\n\tNewEdits bool `json:\"new_edits,omitempty\"`\n}\n\n\/\/ http:\/\/docs.couchdb.org\/en\/latest\/api\/server\/authn.html#cookie-authentication\ntype Credentials struct {\n\tName string `json:\"name\"`\n\tPassword string `json:\"password\"`\n}\n\ntype PostSessionResponse struct {\n\tOk bool\n\tName string\n\tRoles []string\n}\n\ntype User struct {\n\tDocument\n\tDerivedKey string `json:\"derived_key,omitempty\"`\n\tName string `json:\"name,omitempty\"`\n\tRoles []string `json:\"roles\"`\n\tPassword string `json:\"password,omitempty\"` \/\/ plain text password when creating the user\n\tPasswordSha string `json:\"password_sha,omitempty\"` \/\/ hashed password when requesting user information\n\tPasswordScheme string `json:\"password_scheme,omitempty\"`\n\tSalt string `json:\"salt,omitempty\"`\n\tType string `json:\"type,omitempty\"`\n\tIterations int `json:\"iterations,omitempty\"`\n}\n\nfunc (user User) GetDocument() *Document {\n\treturn &user.Document\n}\n\nfunc NewUser(name, password string, roles []string) User {\n\tuser := User{\n\t\tDocument: Document{\n\t\t\tId: \"org.couchdb.user:\" + name,\n\t\t},\n\t\tDerivedKey: \"\",\n\t\tName: name,\n\t\tRoles: roles,\n\t\tPassword: password,\n\t\tPasswordSha: \"\",\n\t\tPasswordScheme: \"\",\n\t\tSalt: \"\",\n\t\tType: \"user\",\n\t}\n\treturn user\n}\n\ntype GetSessionResponse struct {\n\tInfo struct {\n\t\tAuthenticated string `json:\"authenticated\"`\n\t\tAuthenticationDb string `json:\"authentication_db\"`\n\t\tAuthenticationHandlers []string `json:\"authentication_handlers\"`\n\t} `json:\"info\"`\n\tOk bool `json:\"ok\"`\n\tUserContext struct {\n\t\tDb string `json:\"db\"`\n\t\tName string `json:\"name\"`\n\t\tRoles []string `json:\"roles\"`\n\t} `json:\"userCtx\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\/\/\"net\/http\"\n\t\/\/\"net\/http\/httptest\"\n\t\/\/\"github.com\/Charlesworth\/phoneBook\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"testing\"\n)\n\n\/\/func main() {}\n\nfunc TestNewRouter(t *testing.T) {\n\t\/\/Get NewRouter to return *httpRouter\n\trouter := NewRouter()\n\n\t\/\/Did it return anything?\n\tif router == nil {\n\t\tt.Error(\"Router() did not return anything *httprouter.Router\")\n\t}\n\n\t\/\/did it return a *httpRouter?\n\tif reflect.TypeOf(router) != reflect.TypeOf(httprouter.New()) {\n\t\tt.Error(\"Router() did not return type *httprouter\")\n\t}\n\n}\n\nfunc TestSetProc(t *testing.T) {\n\tSetProc()\n\n\tif runtime.GOMAXPROCS(0) != 2 {\n\t\tt.Error(\"Application not using 2 processors as set by setProc()\")\n\t}\n}\n\nfunc Testmain(t *testing.T) {\n\n}\n\nfunc TestNewBoltClient(t *testing.T) {\n\tNewBoltClient()\n\n}\n<commit_msg>updated NewBoltClient tests<commit_after>package main\n\nimport (\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\/\/\"net\/http\"\n\t\/\/\"net\/http\/httptest\"\n\t\/\/\"github.com\/Charlesworth\/phoneBook\"\n\t\/\/\"fmt\"\n\t\"github.com\/boltdb\/bolt\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"testing\"\n)\n\n\/\/func main() {}\n\nfunc TestNewRouter(t *testing.T) {\n\t\/\/Get NewRouter to return *httpRouter\n\trouter := NewRouter()\n\n\t\/\/Did it return anything?\n\tif router == nil {\n\t\tt.Error(\"Router() did not return anything *httprouter.Router\")\n\t}\n\n\t\/\/did it return a *httpRouter?\n\tif reflect.TypeOf(router) != reflect.TypeOf(httprouter.New()) {\n\t\tt.Error(\"Router() did not return type *httprouter\")\n\t}\n\n}\n\nfunc TestSetProc(t *testing.T) {\n\tSetProc()\n\n\tif runtime.GOMAXPROCS(0) != 2 {\n\t\tt.Error(\"Application not using 2 processors as set by setProc()\")\n\t}\n}\n\nfunc Testmain(t *testing.T) {\n\n}\n\nfunc TestNewBoltClient(t *testing.T) {\n\tboltTest := NewBoltClient()\n\n\tboltTest.Mutex.Lock()\n\n\t\/\/Test creating bucket\n\terr := boltTest.DB.Update(func(tx *bolt.Tx) error {\n\t\t_, err := tx.CreateBucketIfNotExists([]byte(\"test\"))\n\n\t\treturn err\n\t})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\t\/\/Write to bucket\n\terr = boltTest.DB.Update(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"test\"))\n\t\terr = b.Put([]byte(\"hello\"), []byte(\"world\"))\n\t\treturn err\n\t})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tboltTest.Mutex.Unlock()\n\tboltTest.Mutex.RLock()\n\n\t\/\/Get from bucket\n\tvar v []byte\n\tboltTest.DB.View(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"test\"))\n\t\tv = b.Get([]byte(\"hello\"))\n\t\treturn nil\n\t})\n\tif v == nil {\n\t\tt.Error(\"Cannot retrive values from BOLTDB\")\n\t}\n\tif string(v) != \"world\" {\n\t\tt.Error(\"BoltDB not storing values correctly\")\n\t}\n\n\tboltTest.Mutex.RUnlock()\n\tboltTest.Mutex.Lock()\n\n\t\/\/Delete from bucket\n\terr = boltTest.DB.Update(func(tx *bolt.Tx) error {\n\t\tb := tx.Bucket([]byte(\"test\"))\n\t\terr = b.Delete([]byte(\"hello\"))\n\t\treturn err\n\t})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\t\/\/Delete test bucket\n\terr = boltTest.DB.Update(func(tx *bolt.Tx) error {\n\t\terr := tx.DeleteBucket([]byte(\"test\"))\n\t\treturn err\n\t})\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tboltTest.Mutex.Unlock()\n\n\tboltTest.DB.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>package svg \/\/ import \"github.com\/tdewolff\/minify\/svg\"\n\nimport (\n\t\"io\"\n\n\t\"github.com\/tdewolff\/buffer\"\n\t\"github.com\/tdewolff\/minify\"\n\txmlMinify \"github.com\/tdewolff\/minify\/xml\"\n\t\"github.com\/tdewolff\/parse\"\n\tcssParse \"github.com\/tdewolff\/parse\/css\"\n\t\"github.com\/tdewolff\/parse\/svg\"\n\t\"github.com\/tdewolff\/parse\/xml\"\n)\n\nvar (\n\tltBytes = []byte(\"<\")\n\tgtBytes = []byte(\">\")\n\tvoidBytes = []byte(\"\/>\")\n\tisBytes = []byte(\"=\")\n\tspaceBytes = []byte(\" \")\n\temptyBytes = []byte(\"\\\"\\\"\")\n\tendBytes = []byte(\"<\/\")\n\tCDATAStartBytes = []byte(\"<![CDATA[\")\n\tCDATAEndBytes = []byte(\"]]>\")\n)\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ Minify minifies XML files, it reads from r and writes to w.\n\/\/ Removes unnecessary whitespace, tags, attributes, quotes and comments and typically saves 10% in size.\nfunc Minify(m minify.Minifier, _ string, w io.Writer, r io.Reader) error {\n\tvar tag svg.Hash\n\n\tattrMinifyBuffer := buffer.NewWriter(make([]byte, 0, 64))\n\tattrByteBuffer := make([]byte, 0, 64)\n\n\tz := xml.NewTokenizer(r)\n\ttb := xmlMinify.NewTokenBuffer(z)\n\tfor {\n\t\tt := *tb.Shift()\n\t\tif t.TokenType == xml.CDATAToken {\n\t\t\tvar useCDATA bool\n\t\t\tif t.Data, useCDATA = xmlMinify.EscapeCDATAVal(&attrByteBuffer, t.Data); !useCDATA {\n\t\t\t\tt.TokenType = xml.TextToken\n\t\t\t}\n\t\t}\n\t\tswitch t.TokenType {\n\t\tcase xml.ErrorToken:\n\t\t\tif z.Err() == io.EOF {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn z.Err()\n\t\tcase xml.TextToken:\n\t\t\tt.Data = parse.ReplaceMultiple(parse.Trim(t.Data, parse.IsWhitespace), parse.IsWhitespace, ' ')\n\t\t\tif tag == svg.Style && len(t.Data) > 0 {\n\t\t\t\tif err := m.Minify(\"text\/css\", w, buffer.NewReader(t.Data)); err != nil {\n\t\t\t\t\tif err == minify.ErrNotExist { \/\/ no minifier, write the original\n\t\t\t\t\t\tif _, err := w.Write(t.Data); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if _, err := w.Write(t.Data); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase xml.CDATAToken:\n\t\t\tif _, err := w.Write(CDATAStartBytes); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.Data = parse.ReplaceMultiple(parse.Trim(t.Data, parse.IsWhitespace), parse.IsWhitespace, ' ')\n\t\t\tif tag == svg.Style && len(t.Data) > 0 {\n\t\t\t\tif err := m.Minify(\"text\/css\", w, buffer.NewReader(t.Data)); err != nil {\n\t\t\t\t\tif err == minify.ErrNotExist { \/\/ no minifier, write the original\n\t\t\t\t\t\tif _, err := w.Write(t.Data); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if _, err := w.Write(t.Data); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := w.Write(CDATAEndBytes); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase xml.StartTagToken:\n\t\t\tif _, err := w.Write(ltBytes); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := w.Write(t.Data); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttag = svg.ToHash(t.Data)\n\t\tcase xml.StartTagPIToken:\n\t\t\tfor {\n\t\t\t\tif t := *tb.Shift(); t.TokenType != xml.StartTagClosePIToken {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\tcase xml.AttributeToken:\n\t\t\tif len(t.AttrVal) < 2 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tattr := svg.ToHash(t.Data)\n\t\t\tval := parse.ReplaceMultiple(parse.Trim(t.AttrVal[1:len(t.AttrVal)-1], parse.IsWhitespace), parse.IsWhitespace, ' ')\n\t\t\tif tag == svg.Svg && attr == svg.Version {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif _, err := w.Write(spaceBytes); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := w.Write(t.Data); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := w.Write(isBytes); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif attr == svg.Style {\n\t\t\t\tattrMinifyBuffer.Reset()\n\t\t\t\tif m.Minify(\"text\/css;inline=1\", attrMinifyBuffer, buffer.NewReader(val)) == nil {\n\t\t\t\t\tval = attrMinifyBuffer.Bytes()\n\t\t\t\t}\n\t\t\t} else if tag == svg.Path && attr == svg.D {\n\t\t\t\tval = shortenPathData(val)\n\t\t\t} else if num, dim, ok := cssParse.SplitNumberDimension(val); ok {\n\t\t\t\tnum = minify.Number(num)\n\t\t\t\tif len(num) == 1 && num[0] == '0' {\n\t\t\t\t\tval = num\n\t\t\t\t} else {\n\t\t\t\t\tif len(dim) > 1 { \/\/ only percentage is length 1\n\t\t\t\t\t\tparse.ToLower(dim)\n\t\t\t\t\t}\n\t\t\t\t\tval = append(num, dim...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ prefer single or double quotes depending on what occurs more often in value\n\t\t\tval = xmlMinify.EscapeAttrVal(&attrByteBuffer, val)\n\t\t\tif _, err := w.Write(val); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase xml.StartTagCloseToken:\n\t\t\tnext := tb.Peek(0)\n\t\t\tskipExtra := false\n\t\t\tif next.TokenType == xml.TextToken && parse.IsAllWhitespace(next.Data) {\n\t\t\t\tnext = tb.Peek(1)\n\t\t\t\tskipExtra = true\n\t\t\t}\n\t\t\tif next.TokenType == xml.EndTagToken {\n\t\t\t\t\/\/ collapse empty tags to single void tag\n\t\t\t\ttb.Shift()\n\t\t\t\tif skipExtra {\n\t\t\t\t\ttb.Shift()\n\t\t\t\t}\n\t\t\t\tif _, err := w.Write(voidBytes); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif _, err := w.Write(gtBytes); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\tcase xml.StartTagCloseVoidToken:\n\t\t\tif _, err := w.Write(voidBytes); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase xml.EndTagToken:\n\t\t\tif _, err := w.Write(endBytes); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := w.Write(t.Data); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := w.Write(gtBytes); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc shortenPathData(b []byte) []byte {\n\tcmd := byte(0)\n\tprevDigit := false\n\tprevDigitRequiresSpace := true\n\tj := 0\n\tstart := 0\n\tfor i := 0; i < len(b); i++ {\n\t\tc := b[i]\n\t\tif c == ' ' || c == ',' || c == '\\t' || c == '\\n' || c == '\\r' {\n\t\t\tif start != 0 {\n\t\t\t\tj += copy(b[j:], b[start:i])\n\t\t\t} else {\n\t\t\t\tj += i\n\t\t\t}\n\t\t\tstart = i + 1\n\t\t} else if n, ok := parse.Number(b[i:]); ok {\n\t\t\tif start != 0 {\n\t\t\t\tj += copy(b[j:], b[start:i])\n\t\t\t} else {\n\t\t\t\tj += i\n\t\t\t}\n\t\t\tnum := minify.Number(b[i : i+n])\n\t\t\tif prevDigit && (num[0] >= '0' && num[0] <= '9' || num[0] == '.' && prevDigitRequiresSpace) {\n\t\t\t\tb[j] = ' '\n\t\t\t\tj++\n\t\t\t}\n\t\t\tprevDigit = true\n\t\t\tprevDigitRequiresSpace = true\n\t\t\tfor _, c := range num {\n\t\t\t\tif c == '.' || c == 'e' || c == 'E' {\n\t\t\t\t\tprevDigitRequiresSpace = false\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tj += copy(b[j:], num)\n\t\t\tstart = i + n\n\t\t\ti += n - 1\n\t\t} else {\n\t\t\tif cmd == c {\n\t\t\t\tif start != 0 {\n\t\t\t\t\tj += copy(b[j:], b[start:i])\n\t\t\t\t} else {\n\t\t\t\t\tj += i\n\t\t\t\t}\n\t\t\t\tstart = i + 1\n\t\t\t} else {\n\t\t\t\tcmd = c\n\t\t\t\tprevDigit = false\n\t\t\t}\n\t\t}\n\t}\n\tif start != 0 {\n\t\tj += copy(b[j:], b[start:])\n\t\treturn b[:j]\n\t}\n\treturn b\n}\n<commit_msg>parse.Number returns only the length now, not ok<commit_after>package svg \/\/ import \"github.com\/tdewolff\/minify\/svg\"\n\nimport (\n\t\"io\"\n\n\t\"github.com\/tdewolff\/buffer\"\n\t\"github.com\/tdewolff\/minify\"\n\txmlMinify \"github.com\/tdewolff\/minify\/xml\"\n\t\"github.com\/tdewolff\/parse\"\n\tcssParse \"github.com\/tdewolff\/parse\/css\"\n\t\"github.com\/tdewolff\/parse\/svg\"\n\t\"github.com\/tdewolff\/parse\/xml\"\n)\n\nvar (\n\tltBytes = []byte(\"<\")\n\tgtBytes = []byte(\">\")\n\tvoidBytes = []byte(\"\/>\")\n\tisBytes = []byte(\"=\")\n\tspaceBytes = []byte(\" \")\n\temptyBytes = []byte(\"\\\"\\\"\")\n\tendBytes = []byte(\"<\/\")\n\tCDATAStartBytes = []byte(\"<![CDATA[\")\n\tCDATAEndBytes = []byte(\"]]>\")\n)\n\n\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\n\n\/\/ Minify minifies XML files, it reads from r and writes to w.\n\/\/ Removes unnecessary whitespace, tags, attributes, quotes and comments and typically saves 10% in size.\nfunc Minify(m minify.Minifier, _ string, w io.Writer, r io.Reader) error {\n\tvar tag svg.Hash\n\n\tattrMinifyBuffer := buffer.NewWriter(make([]byte, 0, 64))\n\tattrByteBuffer := make([]byte, 0, 64)\n\n\tz := xml.NewTokenizer(r)\n\ttb := xmlMinify.NewTokenBuffer(z)\n\tfor {\n\t\tt := *tb.Shift()\n\t\tif t.TokenType == xml.CDATAToken {\n\t\t\tvar useCDATA bool\n\t\t\tif t.Data, useCDATA = xmlMinify.EscapeCDATAVal(&attrByteBuffer, t.Data); !useCDATA {\n\t\t\t\tt.TokenType = xml.TextToken\n\t\t\t}\n\t\t}\n\t\tswitch t.TokenType {\n\t\tcase xml.ErrorToken:\n\t\t\tif z.Err() == io.EOF {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn z.Err()\n\t\tcase xml.TextToken:\n\t\t\tt.Data = parse.ReplaceMultiple(parse.Trim(t.Data, parse.IsWhitespace), parse.IsWhitespace, ' ')\n\t\t\tif tag == svg.Style && len(t.Data) > 0 {\n\t\t\t\tif err := m.Minify(\"text\/css\", w, buffer.NewReader(t.Data)); err != nil {\n\t\t\t\t\tif err == minify.ErrNotExist { \/\/ no minifier, write the original\n\t\t\t\t\t\tif _, err := w.Write(t.Data); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if _, err := w.Write(t.Data); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase xml.CDATAToken:\n\t\t\tif _, err := w.Write(CDATAStartBytes); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tt.Data = parse.ReplaceMultiple(parse.Trim(t.Data, parse.IsWhitespace), parse.IsWhitespace, ' ')\n\t\t\tif tag == svg.Style && len(t.Data) > 0 {\n\t\t\t\tif err := m.Minify(\"text\/css\", w, buffer.NewReader(t.Data)); err != nil {\n\t\t\t\t\tif err == minify.ErrNotExist { \/\/ no minifier, write the original\n\t\t\t\t\t\tif _, err := w.Write(t.Data); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if _, err := w.Write(t.Data); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := w.Write(CDATAEndBytes); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase xml.StartTagToken:\n\t\t\tif _, err := w.Write(ltBytes); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := w.Write(t.Data); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttag = svg.ToHash(t.Data)\n\t\tcase xml.StartTagPIToken:\n\t\t\tfor {\n\t\t\t\tif t := *tb.Shift(); t.TokenType != xml.StartTagClosePIToken {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\tcase xml.AttributeToken:\n\t\t\tif len(t.AttrVal) < 2 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tattr := svg.ToHash(t.Data)\n\t\t\tval := parse.ReplaceMultiple(parse.Trim(t.AttrVal[1:len(t.AttrVal)-1], parse.IsWhitespace), parse.IsWhitespace, ' ')\n\t\t\tif tag == svg.Svg && attr == svg.Version {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif _, err := w.Write(spaceBytes); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := w.Write(t.Data); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := w.Write(isBytes); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif attr == svg.Style {\n\t\t\t\tattrMinifyBuffer.Reset()\n\t\t\t\tif m.Minify(\"text\/css;inline=1\", attrMinifyBuffer, buffer.NewReader(val)) == nil {\n\t\t\t\t\tval = attrMinifyBuffer.Bytes()\n\t\t\t\t}\n\t\t\t} else if tag == svg.Path && attr == svg.D {\n\t\t\t\tval = shortenPathData(val)\n\t\t\t} else if num, dim, ok := cssParse.SplitNumberDimension(val); ok {\n\t\t\t\tnum = minify.Number(num)\n\t\t\t\tif len(num) == 1 && num[0] == '0' {\n\t\t\t\t\tval = num\n\t\t\t\t} else {\n\t\t\t\t\tif len(dim) > 1 { \/\/ only percentage is length 1\n\t\t\t\t\t\tparse.ToLower(dim)\n\t\t\t\t\t}\n\t\t\t\t\tval = append(num, dim...)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ prefer single or double quotes depending on what occurs more often in value\n\t\t\tval = xmlMinify.EscapeAttrVal(&attrByteBuffer, val)\n\t\t\tif _, err := w.Write(val); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase xml.StartTagCloseToken:\n\t\t\tnext := tb.Peek(0)\n\t\t\tskipExtra := false\n\t\t\tif next.TokenType == xml.TextToken && parse.IsAllWhitespace(next.Data) {\n\t\t\t\tnext = tb.Peek(1)\n\t\t\t\tskipExtra = true\n\t\t\t}\n\t\t\tif next.TokenType == xml.EndTagToken {\n\t\t\t\t\/\/ collapse empty tags to single void tag\n\t\t\t\ttb.Shift()\n\t\t\t\tif skipExtra {\n\t\t\t\t\ttb.Shift()\n\t\t\t\t}\n\t\t\t\tif _, err := w.Write(voidBytes); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif _, err := w.Write(gtBytes); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\tcase xml.StartTagCloseVoidToken:\n\t\t\tif _, err := w.Write(voidBytes); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase xml.EndTagToken:\n\t\t\tif _, err := w.Write(endBytes); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := w.Write(t.Data); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := w.Write(gtBytes); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc shortenPathData(b []byte) []byte {\n\tcmd := byte(0)\n\tprevDigit := false\n\tprevDigitRequiresSpace := true\n\tj := 0\n\tstart := 0\n\tfor i := 0; i < len(b); i++ {\n\t\tc := b[i]\n\t\tif c == ' ' || c == ',' || c == '\\t' || c == '\\n' || c == '\\r' {\n\t\t\tif start != 0 {\n\t\t\t\tj += copy(b[j:], b[start:i])\n\t\t\t} else {\n\t\t\t\tj += i\n\t\t\t}\n\t\t\tstart = i + 1\n\t\t} else if n := parse.Number(b[i:]); n > 0 {\n\t\t\tif start != 0 {\n\t\t\t\tj += copy(b[j:], b[start:i])\n\t\t\t} else {\n\t\t\t\tj += i\n\t\t\t}\n\t\t\tnum := minify.Number(b[i : i+n])\n\t\t\tif prevDigit && (num[0] >= '0' && num[0] <= '9' || num[0] == '.' && prevDigitRequiresSpace) {\n\t\t\t\tb[j] = ' '\n\t\t\t\tj++\n\t\t\t}\n\t\t\tprevDigit = true\n\t\t\tprevDigitRequiresSpace = true\n\t\t\tfor _, c := range num {\n\t\t\t\tif c == '.' || c == 'e' || c == 'E' {\n\t\t\t\t\tprevDigitRequiresSpace = false\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tj += copy(b[j:], num)\n\t\t\tstart = i + n\n\t\t\ti += n - 1\n\t\t} else {\n\t\t\tif cmd == c {\n\t\t\t\tif start != 0 {\n\t\t\t\t\tj += copy(b[j:], b[start:i])\n\t\t\t\t} else {\n\t\t\t\t\tj += i\n\t\t\t\t}\n\t\t\t\tstart = i + 1\n\t\t\t} else {\n\t\t\t\tcmd = c\n\t\t\t\tprevDigit = false\n\t\t\t}\n\t\t}\n\t}\n\tif start != 0 {\n\t\tj += copy(b[j:], b[start:])\n\t\treturn b[:j]\n\t}\n\treturn b\n}\n<|endoftext|>"} {"text":"<commit_before>package utils\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"testing\"\n)\n\nfunc hexDump(data []byte) string {\n\tif data == nil {\n\t\treturn \"nil\"\n\t}\n\n\tif len(data) > 8 {\n\t\treturn hex.EncodeToString(data[:8]) + \"...\"\n\t}\n\n\treturn hex.EncodeToString(data)\n}\n\nvar intSerializationData = []struct {\n\tvalue uint64\n\tserialized []byte\n}{\n\t{0x00, []byte{0x00}},\n\t{0x01, []byte{0x01}},\n\t{0x02, []byte{0x02}},\n\t{0x7F, []byte{0x7F}},\n\t{0x80, []byte{0x80, 0x01}},\n\t{0x81, []byte{0x81, 0x01}},\n\t{0x100, []byte{0x80, 0x02}},\n\t{0xFFFFFFFFFFFFFFFF, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01}},\n}\n\nfunc TestIntSerialization(t *testing.T) {\n\n\tfor _, tst := range intSerializationData {\n\n\t\tvar b bytes.Buffer\n\n\t\terr := SerializeInt(tst.value, &b)\n\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error while serializing int: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\ts := b.Bytes()\n\t\tif !bytes.Equal(s, tst.serialized) {\n\t\t\tt.Fatalf(\n\t\t\t\t\"Serialization of value %v failed, data not equal (%v vs %v)\",\n\t\t\t\ttst.value,\n\t\t\t\thexDump(s),\n\t\t\t\thexDump(tst.serialized))\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Try to serialize back\n\t\td, err := DeserializeInt(&b)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error while deserializing int back: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif d != tst.value {\n\t\t\tt.Fatalf(\"Incorrectly deserialized the value back, was: %v, got %v\", tst.value, d)\n\t\t\tcontinue\n\t\t}\n\n\t}\n\n}\n\nvar intDeserializationData = []struct {\n\tserialized []byte\n\tvalue uint64\n\tbogus bool\n}{\n\t{[]byte{0x00}, 0x00, false},\n\t{[]byte{0x01}, 0x01, false},\n\t{[]byte{0x02}, 0x02, false},\n\t{[]byte{0x7F}, 0x7F, false},\n\t{[]byte{0x80, 0x01}, 0x80, false},\n\t{[]byte{0x81, 0x01}, 0x81, false},\n\t{[]byte{0x80, 0x02}, 0x100, false},\n\t{[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01}, 0xFFFFFFFFFFFFFFFF, false},\n\n\t{[]byte{0x80, 0x00}, 0x00, true},\n\t{[]byte{0x81, 0x00}, 0x01, true},\n\t{[]byte{0x80, 0x82, 0x00}, 0x100, true},\n\t{[]byte{0x80}, 0x00, true},\n\t{[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00}, 0, true},\n\t{[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x02}, 0, true},\n\t{[]byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x02}, 0, true},\n}\n\nfunc TestIntDeserialization(t *testing.T) {\n\n\tfor _, tst := range intDeserializationData {\n\n\t\tv, err := DeserializeInt(bytes.NewBuffer(tst.serialized))\n\n\t\tif tst.bogus {\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"Dserialization of bogus data did not produce an error\")\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Could not deserialize data, expected value: %v, got error: %v\", tst.value, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif v != tst.value {\n\t\t\tt.Errorf(\"Mismatch of deserialized value: %v, should be: %v\", v, tst.value)\n\t\t}\n\t}\n}\n\nvar bufferSerializationData = []struct {\n\tvalue []byte\n\tserialized []byte\n\terr error\n\tmaxLength uint64\n}{\n\t{[]byte{}, []byte{0}, nil, 0},\n\t{[]byte{0}, []byte{1, 0}, nil, 1},\n\t{[]byte{0}, nil, ErrBufferToLarge, 0},\n\t{[]byte{7, 13}, []byte{2, 7, 13}, nil, 2},\n\t{[]byte{7, 13}, nil, ErrBufferToLarge, 1},\n\t{[]byte{7, 13}, nil, ErrBufferToLarge, 0},\n}\n\nfunc TestBufferSerialization(t *testing.T) {\n\n\tfor _, tst := range bufferSerializationData {\n\n\t\tvar b bytes.Buffer\n\n\t\terr := SerializeBuffer(tst.value, &b, tst.maxLength)\n\n\t\tif tst.err != nil {\n\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"Was able to serialize buffer but expected error\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Could not serialize buffer: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif !bytes.Equal(b.Bytes(), tst.serialized) {\n\t\t\tt.Errorf(\"Serialized buffer value is incorrect, expected: %v, is: %v\", hexDump(tst.serialized), hexDump(b.Bytes()))\n\t\t\tcontinue\n\t\t}\n\n\t\tv2, err := DeserializeBuffer(&b, tst.maxLength)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unexpected deserialization error: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif !bytes.Equal(v2, tst.value) {\n\t\t\tt.Errorf(\"Buffer value after deserialization is invalid, expected: %v, is: %v\", hexDump(tst.value), v2)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nvar bufferDeserializationData = []struct {\n\tserialized []byte\n\tvalue []byte\n\terr error\n\tmaxLength uint64\n}{\n\t{[]byte{0}, []byte{}, nil, 0},\n\t{[]byte{1, 0}, []byte{0}, nil, 1},\n\t{[]byte{1, 0}, nil, ErrBufferToLarge, 0},\n\t{[]byte{2, 7, 13}, []byte{7, 13}, nil, 2},\n\t{[]byte{2, 7, 13}, nil, ErrBufferToLarge, 1},\n\t{[]byte{2, 7, 13}, nil, ErrBufferToLarge, 0},\n}\n\nfunc TestBufferDeserialization(t *testing.T) {\n\n\tfor _, tst := range bufferDeserializationData {\n\n\t\tv, err := DeserializeBuffer(bytes.NewBuffer(tst.serialized), tst.maxLength)\n\n\t\tif tst.err != nil {\n\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"Was able to serialize buffer but expected error\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Could not deserialize buffer: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif !bytes.Equal(v, tst.value) {\n\t\t\tt.Errorf(\"Buffer value after deserialization is invalid, expected: %v, is: %v\", hexDump(tst.value), v)\n\t\t\tcontinue\n\t\t}\n\n\t}\n\n}\n<commit_msg>Add string serialization tests<commit_after>package utils\n\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"testing\"\n)\n\nfunc hexDump(data []byte) string {\n\tif data == nil {\n\t\treturn \"nil\"\n\t}\n\n\tif len(data) > 8 {\n\t\treturn hex.EncodeToString(data[:8]) + \"...\"\n\t}\n\n\treturn hex.EncodeToString(data)\n}\n\nvar intSerializationData = []struct {\n\tvalue uint64\n\tserialized []byte\n}{\n\t{0x00, []byte{0x00}},\n\t{0x01, []byte{0x01}},\n\t{0x02, []byte{0x02}},\n\t{0x7F, []byte{0x7F}},\n\t{0x80, []byte{0x80, 0x01}},\n\t{0x81, []byte{0x81, 0x01}},\n\t{0x100, []byte{0x80, 0x02}},\n\t{0xFFFFFFFFFFFFFFFF, []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01}},\n}\n\nfunc TestIntSerialization(t *testing.T) {\n\n\tfor _, tst := range intSerializationData {\n\n\t\tvar b bytes.Buffer\n\n\t\terr := SerializeInt(tst.value, &b)\n\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error while serializing int: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\ts := b.Bytes()\n\t\tif !bytes.Equal(s, tst.serialized) {\n\t\t\tt.Fatalf(\n\t\t\t\t\"Serialization of value %v failed, data not equal (%v vs %v)\",\n\t\t\t\ttst.value,\n\t\t\t\thexDump(s),\n\t\t\t\thexDump(tst.serialized))\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Try to serialize back\n\t\td, err := DeserializeInt(&b)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Error while deserializing int back: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif d != tst.value {\n\t\t\tt.Fatalf(\"Incorrectly deserialized the value back, was: %v, got %v\", tst.value, d)\n\t\t\tcontinue\n\t\t}\n\n\t}\n\n}\n\nvar intDeserializationData = []struct {\n\tserialized []byte\n\tvalue uint64\n\tbogus bool\n}{\n\t{[]byte{0x00}, 0x00, false},\n\t{[]byte{0x01}, 0x01, false},\n\t{[]byte{0x02}, 0x02, false},\n\t{[]byte{0x7F}, 0x7F, false},\n\t{[]byte{0x80, 0x01}, 0x80, false},\n\t{[]byte{0x81, 0x01}, 0x81, false},\n\t{[]byte{0x80, 0x02}, 0x100, false},\n\t{[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01}, 0xFFFFFFFFFFFFFFFF, false},\n\n\t{[]byte{0x80, 0x00}, 0x00, true},\n\t{[]byte{0x81, 0x00}, 0x01, true},\n\t{[]byte{0x80, 0x82, 0x00}, 0x100, true},\n\t{[]byte{0x80}, 0x00, true},\n\t{[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00}, 0, true},\n\t{[]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x02}, 0, true},\n\t{[]byte{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x02}, 0, true},\n}\n\nfunc TestIntDeserialization(t *testing.T) {\n\n\tfor _, tst := range intDeserializationData {\n\n\t\tv, err := DeserializeInt(bytes.NewBuffer(tst.serialized))\n\n\t\tif tst.bogus {\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"Dserialization of bogus data did not produce an error\")\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Could not deserialize data, expected value: %v, got error: %v\", tst.value, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif v != tst.value {\n\t\t\tt.Errorf(\"Mismatch of deserialized value: %v, should be: %v\", v, tst.value)\n\t\t}\n\t}\n}\n\nvar bufferSerializationData = []struct {\n\tvalue []byte\n\tserialized []byte\n\terr error\n\tmaxLength uint64\n}{\n\t{[]byte{}, []byte{0}, nil, 0},\n\t{[]byte{0}, []byte{1, 0}, nil, 1},\n\t{[]byte{0}, nil, ErrBufferToLarge, 0},\n\t{[]byte{7, 13}, []byte{2, 7, 13}, nil, 2},\n\t{[]byte{7, 13}, nil, ErrBufferToLarge, 1},\n\t{[]byte{7, 13}, nil, ErrBufferToLarge, 0},\n}\n\nfunc TestBufferSerialization(t *testing.T) {\n\n\tfor _, tst := range bufferSerializationData {\n\n\t\tvar b bytes.Buffer\n\n\t\terr := SerializeBuffer(tst.value, &b, tst.maxLength)\n\n\t\tif tst.err != nil {\n\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"Was able to serialize buffer but expected error\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Could not serialize buffer: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif !bytes.Equal(b.Bytes(), tst.serialized) {\n\t\t\tt.Errorf(\"Serialized buffer value is incorrect, expected: %v, is: %v\", hexDump(tst.serialized), hexDump(b.Bytes()))\n\t\t\tcontinue\n\t\t}\n\n\t\tv2, err := DeserializeBuffer(&b, tst.maxLength)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unexpected deserialization error: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif !bytes.Equal(v2, tst.value) {\n\t\t\tt.Errorf(\"Buffer value after deserialization is invalid, expected: %v, is: %v\", hexDump(tst.value), v2)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nvar bufferDeserializationData = []struct {\n\tserialized []byte\n\tvalue []byte\n\terr error\n\tmaxLength uint64\n}{\n\t{[]byte{0}, []byte{}, nil, 0},\n\t{[]byte{1, 0}, []byte{0}, nil, 1},\n\t{[]byte{1, 0}, nil, ErrBufferToLarge, 0},\n\t{[]byte{2, 7, 13}, []byte{7, 13}, nil, 2},\n\t{[]byte{2, 7, 13}, nil, ErrBufferToLarge, 1},\n\t{[]byte{2, 7, 13}, nil, ErrBufferToLarge, 0},\n}\n\nfunc TestBufferDeserialization(t *testing.T) {\n\n\tfor _, tst := range bufferDeserializationData {\n\n\t\tv, err := DeserializeBuffer(bytes.NewBuffer(tst.serialized), tst.maxLength)\n\n\t\tif tst.err != nil {\n\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"Was able to serialize buffer but expected error\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Could not deserialize buffer: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif !bytes.Equal(v, tst.value) {\n\t\t\tt.Errorf(\"Buffer value after deserialization is invalid, expected: %v, is: %v\", hexDump(tst.value), hexDump(v))\n\t\t\tcontinue\n\t\t}\n\n\t}\n\n}\n\nvar stringSerializationData = []struct {\n\tvalue string\n\tserialized []byte\n\terr error\n\tmaxLength uint64\n}{\n\t{\"\", []byte{0}, nil, 0},\n\t{\"a\", []byte{1, 'a'}, nil, 1},\n\t{\"a\", nil, ErrBufferToLarge, 0},\n\t{\"abc\", []byte{3, 'a', 'b', 'c'}, nil, 3},\n\t{\"abc\", nil, ErrBufferToLarge, 2},\n\t{\"abc\", nil, ErrBufferToLarge, 1},\n\t{\"abc\", nil, ErrBufferToLarge, 0},\n\t{\"\\u2318\", []byte{3, 0xe2, 0x8c, 0x98}, nil, 3},\n}\n\nfunc TestStringSerialization(t *testing.T) {\n\n\tfor _, tst := range stringSerializationData {\n\n\t\tvar b bytes.Buffer\n\n\t\terr := SerializeString(tst.value, &b, tst.maxLength)\n\n\t\tif tst.err != nil {\n\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"Was able to serialize string but expected error\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Could not serialize string: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif !bytes.Equal(b.Bytes(), tst.serialized) {\n\t\t\tt.Errorf(\"Serialized string value is incorrect, expected: %v, is: %v\", hexDump(tst.serialized), hexDump(b.Bytes()))\n\t\t\tcontinue\n\t\t}\n\n\t\tv2, err := DeserializeString(&b, tst.maxLength)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unexpected deserialization error: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif v2 != tst.value {\n\t\t\tt.Errorf(\"String value after deserialization is invalid, expected: %v, is: %v\", tst.value, v2)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nvar stringDeserializationData = []struct {\n\tserialized []byte\n\tvalue string\n\terr error\n\tmaxLength uint64\n}{\n\t{[]byte{0}, \"\", nil, 0},\n\t{[]byte{1, 'a'}, \"a\", nil, 1},\n\t{[]byte{1, 'a'}, \"\", ErrBufferToLarge, 0},\n\t{[]byte{2, 'a', 'b'}, \"ab\", nil, 2},\n\t{[]byte{2, 'a', 'b'}, \"\", ErrBufferToLarge, 1},\n\t{[]byte{2, 'a', 'b'}, \"\", ErrBufferToLarge, 0},\n\t{[]byte{2, 0x80, 'a'}, \"\", ErrStringNotUTF8, 2},\n}\n\nfunc TestStrigDeserialization(t *testing.T) {\n\n\tfor _, tst := range stringDeserializationData {\n\n\t\tv, err := DeserializeString(bytes.NewBuffer(tst.serialized), tst.maxLength)\n\n\t\tif tst.err != nil {\n\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"Was able to serialize string but expected error\")\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Could not deserialize string: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif v != tst.value {\n\t\t\tt.Errorf(\"String value after deserialization is invalid, expected: %v, is: %v\", tst.value, v)\n\t\t\tcontinue\n\t\t}\n\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package physical\n\nimport (\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com\/mgutz\/logxi\/v1\"\n\n\t\"github.com\/armon\/go-metrics\"\n\tmysql \"github.com\/go-sql-driver\/mysql\"\n)\n\n\/\/ Unreserved tls key\n\/\/ Reserved values are \"true\", \"false\", \"skip-verify\"\nconst mysqlTLSKey = \"default\"\n\n\/\/ MySQLBackend is a physical backend that stores data\n\/\/ within MySQL database.\ntype MySQLBackend struct {\n\tdbTable string\n\tclient *sql.DB\n\tstatements map[string]*sql.Stmt\n\tlogger log.Logger\n}\n\n\/\/ newMySQLBackend constructs a MySQL backend using the given API client and\n\/\/ server address and credential for accessing mysql database.\nfunc newMySQLBackend(conf map[string]string, logger log.Logger) (Backend, error) {\n\t\/\/ Get the MySQL credentials to perform read\/write operations.\n\tusername, ok := conf[\"username\"]\n\tif !ok || username == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing username\")\n\t}\n\tpassword, ok := conf[\"password\"]\n\tif !ok || username == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing password\")\n\t}\n\n\t\/\/ Get or set MySQL server address. Defaults to localhost and default port(3306)\n\taddress, ok := conf[\"address\"]\n\tif !ok {\n\t\taddress = \"127.0.0.1:3306\"\n\t}\n\n\t\/\/ Get the MySQL database and table details.\n\tdatabase, ok := conf[\"database\"]\n\tif !ok {\n\t\tdatabase = \"vault\"\n\t}\n\ttable, ok := conf[\"table\"]\n\tif !ok {\n\t\ttable = \"vault\"\n\t}\n\tdbTable := database + \".\" + table\n\n\tdsnParams := url.Values{}\n\ttlsCaFile, ok := conf[\"tls_ca_file\"]\n\tif ok {\n\t\tif err := setupMySQLTLSConfig(tlsCaFile); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed register TLS config: %v\", err)\n\t\t}\n\n\t\tdsnParams.Add(\"tls\", mysqlTLSKey)\n\t}\n\n\t\/\/ Create MySQL handle for the database.\n\tdsn := username + \":\" + password + \"@tcp(\" + address + \")\/?\" + dsnParams.Encode()\n\tdb, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to connect to mysql: %v\", err)\n\t}\n\n\t\/\/ Create the required database if it doesn't exists.\n\tif _, err := db.Exec(\"CREATE DATABASE IF NOT EXISTS \" + database); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create mysql database: %v\", err)\n\t}\n\n\t\/\/ Create the required table if it doesn't exists.\n\tcreate_query := \"CREATE TABLE IF NOT EXISTS \" + dbTable +\n\t\t\" (vault_key varbinary(512), vault_value mediumblob, PRIMARY KEY (vault_key))\"\n\tif _, err := db.Exec(create_query); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create mysql table: %v\", err)\n\t}\n\n\t\/\/ Setup the backend.\n\tm := &MySQLBackend{\n\t\tdbTable: dbTable,\n\t\tclient: db,\n\t\tstatements: make(map[string]*sql.Stmt),\n\t\tlogger: logger,\n\t}\n\n\t\/\/ Prepare all the statements required\n\tstatements := map[string]string{\n\t\t\"put\": \"INSERT INTO \" + dbTable +\n\t\t\t\" VALUES( ?, ? ) ON DUPLICATE KEY UPDATE vault_value=VALUES(vault_value)\",\n\t\t\"get\": \"SELECT vault_value FROM \" + dbTable + \" WHERE vault_key = ?\",\n\t\t\"delete\": \"DELETE FROM \" + dbTable + \" WHERE vault_key = ?\",\n\t\t\"list\": \"SELECT vault_key FROM \" + dbTable + \" WHERE vault_key LIKE ?\",\n\t}\n\tfor name, query := range statements {\n\t\tif err := m.prepare(name, query); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn m, nil\n}\n\n\/\/ prepare is a helper to prepare a query for future execution\nfunc (m *MySQLBackend) prepare(name, query string) error {\n\tstmt, err := m.client.Prepare(query)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to prepare '%s': %v\", name, err)\n\t}\n\tm.statements[name] = stmt\n\treturn nil\n}\n\n\/\/ Put is used to insert or update an entry.\nfunc (m *MySQLBackend) Put(entry *Entry) error {\n\tdefer metrics.MeasureSince([]string{\"mysql\", \"put\"}, time.Now())\n\n\t_, err := m.statements[\"put\"].Exec(entry.Key, entry.Value)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Get is used to fetch and entry.\nfunc (m *MySQLBackend) Get(key string) (*Entry, error) {\n\tdefer metrics.MeasureSince([]string{\"mysql\", \"get\"}, time.Now())\n\n\tvar result []byte\n\terr := m.statements[\"get\"].QueryRow(key).Scan(&result)\n\tif err == sql.ErrNoRows {\n\t\treturn nil, nil\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tent := &Entry{\n\t\tKey: key,\n\t\tValue: result,\n\t}\n\treturn ent, nil\n}\n\n\/\/ Delete is used to permanently delete an entry\nfunc (m *MySQLBackend) Delete(key string) error {\n\tdefer metrics.MeasureSince([]string{\"mysql\", \"delete\"}, time.Now())\n\n\t_, err := m.statements[\"delete\"].Exec(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ List is used to list all the keys under a given\n\/\/ prefix, up to the next prefix.\nfunc (m *MySQLBackend) List(prefix string) ([]string, error) {\n\tdefer metrics.MeasureSince([]string{\"mysql\", \"list\"}, time.Now())\n\n\t\/\/ Add the % wildcard to the prefix to do the prefix search\n\tlikePrefix := prefix + \"%\"\n\trows, err := m.statements[\"list\"].Query(likePrefix)\n\n\tvar keys []string\n\tfor rows.Next() {\n\t\tvar key string\n\t\terr = rows.Scan(&key)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to scan rows: %v\", err)\n\t\t}\n\n\t\tkey = strings.TrimPrefix(key, prefix)\n\t\tif i := strings.Index(key, \"\/\"); i == -1 {\n\t\t\t\/\/ Add objects only from the current 'folder'\n\t\t\tkeys = append(keys, key)\n\t\t} else if i != -1 {\n\t\t\t\/\/ Add truncated 'folder' paths\n\t\t\tkeys = appendIfMissing(keys, string(key[:i+1]))\n\t\t}\n\t}\n\n\tsort.Strings(keys)\n\treturn keys, nil\n}\n\n\/\/ Establish a TLS connection with a given CA certificate\n\/\/ Register a tsl.Config associted with the same key as the dns param from sql.Open\n\/\/ foo:bar@tcp(127.0.0.1:3306)\/dbname?tls=default\nfunc setupMySQLTLSConfig(tlsCaFile string) error {\n\trootCertPool := x509.NewCertPool()\n\n\tpem, err := ioutil.ReadFile(tlsCaFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif ok := rootCertPool.AppendCertsFromPEM(pem); !ok {\n\t\treturn err\n\t}\n\n\terr = mysql.RegisterTLSConfig(mysqlTLSKey, &tls.Config{\n\t\tRootCAs: rootCertPool,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>check for failure on that mysql query (#2105)<commit_after>package physical\n\nimport (\n\t\"crypto\/tls\"\n\t\"crypto\/x509\"\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\tlog \"github.com\/mgutz\/logxi\/v1\"\n\n\t\"github.com\/armon\/go-metrics\"\n\tmysql \"github.com\/go-sql-driver\/mysql\"\n)\n\n\/\/ Unreserved tls key\n\/\/ Reserved values are \"true\", \"false\", \"skip-verify\"\nconst mysqlTLSKey = \"default\"\n\n\/\/ MySQLBackend is a physical backend that stores data\n\/\/ within MySQL database.\ntype MySQLBackend struct {\n\tdbTable string\n\tclient *sql.DB\n\tstatements map[string]*sql.Stmt\n\tlogger log.Logger\n}\n\n\/\/ newMySQLBackend constructs a MySQL backend using the given API client and\n\/\/ server address and credential for accessing mysql database.\nfunc newMySQLBackend(conf map[string]string, logger log.Logger) (Backend, error) {\n\t\/\/ Get the MySQL credentials to perform read\/write operations.\n\tusername, ok := conf[\"username\"]\n\tif !ok || username == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing username\")\n\t}\n\tpassword, ok := conf[\"password\"]\n\tif !ok || username == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing password\")\n\t}\n\n\t\/\/ Get or set MySQL server address. Defaults to localhost and default port(3306)\n\taddress, ok := conf[\"address\"]\n\tif !ok {\n\t\taddress = \"127.0.0.1:3306\"\n\t}\n\n\t\/\/ Get the MySQL database and table details.\n\tdatabase, ok := conf[\"database\"]\n\tif !ok {\n\t\tdatabase = \"vault\"\n\t}\n\ttable, ok := conf[\"table\"]\n\tif !ok {\n\t\ttable = \"vault\"\n\t}\n\tdbTable := database + \".\" + table\n\n\tdsnParams := url.Values{}\n\ttlsCaFile, ok := conf[\"tls_ca_file\"]\n\tif ok {\n\t\tif err := setupMySQLTLSConfig(tlsCaFile); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed register TLS config: %v\", err)\n\t\t}\n\n\t\tdsnParams.Add(\"tls\", mysqlTLSKey)\n\t}\n\n\t\/\/ Create MySQL handle for the database.\n\tdsn := username + \":\" + password + \"@tcp(\" + address + \")\/?\" + dsnParams.Encode()\n\tdb, err := sql.Open(\"mysql\", dsn)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to connect to mysql: %v\", err)\n\t}\n\n\t\/\/ Create the required database if it doesn't exists.\n\tif _, err := db.Exec(\"CREATE DATABASE IF NOT EXISTS \" + database); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create mysql database: %v\", err)\n\t}\n\n\t\/\/ Create the required table if it doesn't exists.\n\tcreate_query := \"CREATE TABLE IF NOT EXISTS \" + dbTable +\n\t\t\" (vault_key varbinary(512), vault_value mediumblob, PRIMARY KEY (vault_key))\"\n\tif _, err := db.Exec(create_query); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create mysql table: %v\", err)\n\t}\n\n\t\/\/ Setup the backend.\n\tm := &MySQLBackend{\n\t\tdbTable: dbTable,\n\t\tclient: db,\n\t\tstatements: make(map[string]*sql.Stmt),\n\t\tlogger: logger,\n\t}\n\n\t\/\/ Prepare all the statements required\n\tstatements := map[string]string{\n\t\t\"put\": \"INSERT INTO \" + dbTable +\n\t\t\t\" VALUES( ?, ? ) ON DUPLICATE KEY UPDATE vault_value=VALUES(vault_value)\",\n\t\t\"get\": \"SELECT vault_value FROM \" + dbTable + \" WHERE vault_key = ?\",\n\t\t\"delete\": \"DELETE FROM \" + dbTable + \" WHERE vault_key = ?\",\n\t\t\"list\": \"SELECT vault_key FROM \" + dbTable + \" WHERE vault_key LIKE ?\",\n\t}\n\tfor name, query := range statements {\n\t\tif err := m.prepare(name, query); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn m, nil\n}\n\n\/\/ prepare is a helper to prepare a query for future execution\nfunc (m *MySQLBackend) prepare(name, query string) error {\n\tstmt, err := m.client.Prepare(query)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to prepare '%s': %v\", name, err)\n\t}\n\tm.statements[name] = stmt\n\treturn nil\n}\n\n\/\/ Put is used to insert or update an entry.\nfunc (m *MySQLBackend) Put(entry *Entry) error {\n\tdefer metrics.MeasureSince([]string{\"mysql\", \"put\"}, time.Now())\n\n\t_, err := m.statements[\"put\"].Exec(entry.Key, entry.Value)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ Get is used to fetch and entry.\nfunc (m *MySQLBackend) Get(key string) (*Entry, error) {\n\tdefer metrics.MeasureSince([]string{\"mysql\", \"get\"}, time.Now())\n\n\tvar result []byte\n\terr := m.statements[\"get\"].QueryRow(key).Scan(&result)\n\tif err == sql.ErrNoRows {\n\t\treturn nil, nil\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tent := &Entry{\n\t\tKey: key,\n\t\tValue: result,\n\t}\n\treturn ent, nil\n}\n\n\/\/ Delete is used to permanently delete an entry\nfunc (m *MySQLBackend) Delete(key string) error {\n\tdefer metrics.MeasureSince([]string{\"mysql\", \"delete\"}, time.Now())\n\n\t_, err := m.statements[\"delete\"].Exec(key)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ List is used to list all the keys under a given\n\/\/ prefix, up to the next prefix.\nfunc (m *MySQLBackend) List(prefix string) ([]string, error) {\n\tdefer metrics.MeasureSince([]string{\"mysql\", \"list\"}, time.Now())\n\n\t\/\/ Add the % wildcard to the prefix to do the prefix search\n\tlikePrefix := prefix + \"%\"\n\trows, err := m.statements[\"list\"].Query(likePrefix)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to execute statement: %v\", err)\n\t}\n\n\tvar keys []string\n\tfor rows.Next() {\n\t\tvar key string\n\t\terr = rows.Scan(&key)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to scan rows: %v\", err)\n\t\t}\n\n\t\tkey = strings.TrimPrefix(key, prefix)\n\t\tif i := strings.Index(key, \"\/\"); i == -1 {\n\t\t\t\/\/ Add objects only from the current 'folder'\n\t\t\tkeys = append(keys, key)\n\t\t} else if i != -1 {\n\t\t\t\/\/ Add truncated 'folder' paths\n\t\t\tkeys = appendIfMissing(keys, string(key[:i+1]))\n\t\t}\n\t}\n\n\tsort.Strings(keys)\n\treturn keys, nil\n}\n\n\/\/ Establish a TLS connection with a given CA certificate\n\/\/ Register a tsl.Config associted with the same key as the dns param from sql.Open\n\/\/ foo:bar@tcp(127.0.0.1:3306)\/dbname?tls=default\nfunc setupMySQLTLSConfig(tlsCaFile string) error {\n\trootCertPool := x509.NewCertPool()\n\n\tpem, err := ioutil.ReadFile(tlsCaFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif ok := rootCertPool.AppendCertsFromPEM(pem); !ok {\n\t\treturn err\n\t}\n\n\terr = mysql.RegisterTLSConfig(mysqlTLSKey, &tls.Config{\n\t\tRootCAs: rootCertPool,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Copyright 2021 The CloudEvents Authors\n SPDX-License-Identifier: Apache-2.0\n*\/\n\npackage http\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\tnethttp \"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ Option is the function signature required to be considered an http.Option.\ntype Option func(*Protocol) error\n\n\/\/ WithTarget sets the outbound recipient of cloudevents when using an HTTP\n\/\/ request.\nfunc WithTarget(targetUrl string) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http target option can not set nil protocol\")\n\t\t}\n\t\ttargetUrl = strings.TrimSpace(targetUrl)\n\t\tif targetUrl != \"\" {\n\t\t\tvar err error\n\t\t\tvar target *url.URL\n\t\t\ttarget, err = url.Parse(targetUrl)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"http target option failed to parse target url: %s\", err.Error())\n\t\t\t}\n\n\t\t\tp.Target = target\n\n\t\t\tif p.RequestTemplate == nil {\n\t\t\t\tp.RequestTemplate = &nethttp.Request{\n\t\t\t\t\tMethod: nethttp.MethodPost,\n\t\t\t\t}\n\t\t\t}\n\t\t\tp.RequestTemplate.URL = target\n\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"http target option was empty string\")\n\t}\n}\n\n\/\/ WithHeader sets an additional default outbound header for all cloudevents\n\/\/ when using an HTTP request.\nfunc WithHeader(key, value string) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http header option can not set nil protocol\")\n\t\t}\n\t\tkey = strings.TrimSpace(key)\n\t\tif key != \"\" {\n\t\t\tif p.RequestTemplate == nil {\n\t\t\t\tp.RequestTemplate = &nethttp.Request{\n\t\t\t\t\tMethod: nethttp.MethodPost,\n\t\t\t\t}\n\t\t\t}\n\t\t\tif p.RequestTemplate.Header == nil {\n\t\t\t\tp.RequestTemplate.Header = nethttp.Header{}\n\t\t\t}\n\t\t\tp.RequestTemplate.Header.Add(key, value)\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"http header option was empty string\")\n\t}\n}\n\n\/\/ WithShutdownTimeout sets the shutdown timeout when the http server is being shutdown.\nfunc WithShutdownTimeout(timeout time.Duration) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http shutdown timeout option can not set nil protocol\")\n\t\t}\n\t\tp.ShutdownTimeout = timeout\n\t\treturn nil\n\t}\n}\n\nfunc checkListen(p *Protocol, prefix string) error {\n\tswitch {\n\tcase p.listener.Load() != nil:\n\t\treturn fmt.Errorf(\"error setting %v: listener already set\", prefix)\n\t}\n\treturn nil\n}\n\n\/\/ WithPort sets the listening port for StartReceiver.\n\/\/ Only one of WithListener or WithPort is allowed.\nfunc WithPort(port int) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http port option can not set nil protocol\")\n\t\t}\n\t\tif port < 0 || port > 65535 {\n\t\t\treturn fmt.Errorf(\"http port option was given an invalid port: %d\", port)\n\t\t}\n\t\tif err := checkListen(p, \"http port option\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.Port = port\n\t\treturn nil\n\t}\n}\n\n\/\/ WithListener sets the listener for StartReceiver.\n\/\/ Only one of WithListener or WithPort is allowed.\nfunc WithListener(l net.Listener) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http listener option can not set nil protocol\")\n\t\t}\n\t\tif err := checkListen(p, \"http listener\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.listener.Store(l)\n\t\treturn nil\n\t}\n}\n\n\/\/ WithPath sets the path to receive cloudevents on for HTTP transports.\nfunc WithPath(path string) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http path option can not set nil protocol\")\n\t\t}\n\t\tpath = strings.TrimSpace(path)\n\t\tif len(path) == 0 {\n\t\t\treturn fmt.Errorf(\"http path option was given an invalid path: %q\", path)\n\t\t}\n\t\tp.Path = path\n\t\treturn nil\n\t}\n}\n\n\/\/ WithMethod sets the HTTP verb (GET, POST, PUT, etc.) to use\n\/\/ when using an HTTP request.\nfunc WithMethod(method string) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http method option can not set nil protocol\")\n\t\t}\n\t\tmethod = strings.TrimSpace(method)\n\t\tif method != \"\" {\n\t\t\tif p.RequestTemplate == nil {\n\t\t\t\tp.RequestTemplate = &nethttp.Request{}\n\t\t\t}\n\t\t\tp.RequestTemplate.Method = method\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"http method option was empty string\")\n\t}\n}\n\n\/\/\n\/\/ Middleware is a function that takes an existing http.Handler and wraps it in middleware,\n\/\/ returning the wrapped http.Handler.\ntype Middleware func(next nethttp.Handler) nethttp.Handler\n\n\/\/ WithMiddleware adds an HTTP middleware to the transport. It may be specified multiple times.\n\/\/ Middleware is applied to everything before it. For example\n\/\/ `NewClient(WithMiddleware(foo), WithMiddleware(bar))` would result in `bar(foo(original))`.\nfunc WithMiddleware(middleware Middleware) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http middleware option can not set nil protocol\")\n\t\t}\n\t\tp.middleware = append(p.middleware, middleware)\n\t\treturn nil\n\t}\n}\n\n\/\/ WithRoundTripper sets the HTTP RoundTripper.\nfunc WithRoundTripper(roundTripper nethttp.RoundTripper) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http round tripper option can not set nil protocol\")\n\t\t}\n\t\tp.roundTripper = roundTripper\n\t\treturn nil\n\t}\n}\n\n\/\/ WithRoundTripperDecorator decorates the default HTTP RoundTripper chosen.\nfunc WithRoundTripperDecorator(decorator func(roundTripper nethttp.RoundTripper) nethttp.RoundTripper) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http round tripper option can not set nil protocol\")\n\t\t}\n\t\tif p.roundTripper == nil {\n\t\t\tif p.Client == nil {\n\t\t\t\tp.roundTripper = nethttp.DefaultTransport\n\t\t\t} else {\n\t\t\t\tp.roundTripper = p.Client.Transport\n\t\t\t}\n\t\t}\n\t\tp.roundTripper = decorator(p.roundTripper)\n\t\treturn nil\n\t}\n}\n\n\/\/ WithClient sets the protocol client\nfunc WithClient(client nethttp.Client) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"client option can not set nil protocol\")\n\t\t}\n\t\tp.Client = &client\n\t\treturn nil\n\t}\n}\n\n\/\/ WithGetHandlerFunc sets the http GET handler func\nfunc WithGetHandlerFunc(fn nethttp.HandlerFunc) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http GET handler func can not set nil protocol\")\n\t\t}\n\t\tp.GetHandlerFn = fn\n\t\treturn nil\n\t}\n}\n\n\/\/ WithOptionsHandlerFunc sets the http OPTIONS handler func\nfunc WithOptionsHandlerFunc(fn nethttp.HandlerFunc) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http OPTIONS handler func can not set nil protocol\")\n\t\t}\n\t\tp.OptionsHandlerFn = fn\n\t\treturn nil\n\t}\n}\n\n\/\/ WithDefaultOptionsHandlerFunc sets the options handler to be the built in handler and configures the options.\n\/\/ methods: the supported methods reported to OPTIONS caller.\n\/\/ rate: the rate limit reported to OPTIONS caller.\n\/\/ origins: the prefix of the accepted origins, or \"*\".\n\/\/ callback: preform the callback to ACK the OPTIONS request.\nfunc WithDefaultOptionsHandlerFunc(methods []string, rate int, origins []string, callback bool) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http OPTIONS handler func can not set nil protocol\")\n\t\t}\n\t\tp.OptionsHandlerFn = p.DeleteHandlerFn\n\t\tp.WebhookConfig = &WebhookConfig{\n\t\t\tAllowedMethods: methods,\n\t\t\tAllowedRate: &rate,\n\t\t\tAllowedOrigins: origins,\n\t\t\tAutoACKCallback: callback,\n\t\t}\n\t\treturn nil\n\t}\n}\n\n\/\/ IsRetriable is a custom function that can be used to override the\n\/\/ default retriable status codes.\ntype IsRetriable func(statusCode int) bool\n\n\/\/ WithIsRetriableFunc sets the function that gets called to determine if an\n\/\/ error should be retried. If not set, the defaultIsRetriableFunc is used.\nfunc WithIsRetriableFunc(isRetriable IsRetriable) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"isRetriable handler func can not set nil protocol\")\n\t\t}\n\t\tif isRetriable == nil {\n\t\t\treturn fmt.Errorf(\"isRetriable handler can not be nil\")\n\t\t}\n\t\tp.isRetriableFunc = isRetriable\n\t\treturn nil\n\t}\n}\n\nfunc WithRateLimiter(rl RateLimiter) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http OPTIONS handler func can not set nil protocol\")\n\t\t}\n\t\tp.limiter = rl\n\t\treturn nil\n\t}\n}\n<commit_msg>Adding proper abuse protection handler (#743)<commit_after>\/*\n Copyright 2021 The CloudEvents Authors\n SPDX-License-Identifier: Apache-2.0\n*\/\n\npackage http\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\tnethttp \"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/\/ Option is the function signature required to be considered an http.Option.\ntype Option func(*Protocol) error\n\n\/\/ WithTarget sets the outbound recipient of cloudevents when using an HTTP\n\/\/ request.\nfunc WithTarget(targetUrl string) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http target option can not set nil protocol\")\n\t\t}\n\t\ttargetUrl = strings.TrimSpace(targetUrl)\n\t\tif targetUrl != \"\" {\n\t\t\tvar err error\n\t\t\tvar target *url.URL\n\t\t\ttarget, err = url.Parse(targetUrl)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"http target option failed to parse target url: %s\", err.Error())\n\t\t\t}\n\n\t\t\tp.Target = target\n\n\t\t\tif p.RequestTemplate == nil {\n\t\t\t\tp.RequestTemplate = &nethttp.Request{\n\t\t\t\t\tMethod: nethttp.MethodPost,\n\t\t\t\t}\n\t\t\t}\n\t\t\tp.RequestTemplate.URL = target\n\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"http target option was empty string\")\n\t}\n}\n\n\/\/ WithHeader sets an additional default outbound header for all cloudevents\n\/\/ when using an HTTP request.\nfunc WithHeader(key, value string) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http header option can not set nil protocol\")\n\t\t}\n\t\tkey = strings.TrimSpace(key)\n\t\tif key != \"\" {\n\t\t\tif p.RequestTemplate == nil {\n\t\t\t\tp.RequestTemplate = &nethttp.Request{\n\t\t\t\t\tMethod: nethttp.MethodPost,\n\t\t\t\t}\n\t\t\t}\n\t\t\tif p.RequestTemplate.Header == nil {\n\t\t\t\tp.RequestTemplate.Header = nethttp.Header{}\n\t\t\t}\n\t\t\tp.RequestTemplate.Header.Add(key, value)\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"http header option was empty string\")\n\t}\n}\n\n\/\/ WithShutdownTimeout sets the shutdown timeout when the http server is being shutdown.\nfunc WithShutdownTimeout(timeout time.Duration) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http shutdown timeout option can not set nil protocol\")\n\t\t}\n\t\tp.ShutdownTimeout = timeout\n\t\treturn nil\n\t}\n}\n\nfunc checkListen(p *Protocol, prefix string) error {\n\tswitch {\n\tcase p.listener.Load() != nil:\n\t\treturn fmt.Errorf(\"error setting %v: listener already set\", prefix)\n\t}\n\treturn nil\n}\n\n\/\/ WithPort sets the listening port for StartReceiver.\n\/\/ Only one of WithListener or WithPort is allowed.\nfunc WithPort(port int) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http port option can not set nil protocol\")\n\t\t}\n\t\tif port < 0 || port > 65535 {\n\t\t\treturn fmt.Errorf(\"http port option was given an invalid port: %d\", port)\n\t\t}\n\t\tif err := checkListen(p, \"http port option\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.Port = port\n\t\treturn nil\n\t}\n}\n\n\/\/ WithListener sets the listener for StartReceiver.\n\/\/ Only one of WithListener or WithPort is allowed.\nfunc WithListener(l net.Listener) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http listener option can not set nil protocol\")\n\t\t}\n\t\tif err := checkListen(p, \"http listener\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.listener.Store(l)\n\t\treturn nil\n\t}\n}\n\n\/\/ WithPath sets the path to receive cloudevents on for HTTP transports.\nfunc WithPath(path string) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http path option can not set nil protocol\")\n\t\t}\n\t\tpath = strings.TrimSpace(path)\n\t\tif len(path) == 0 {\n\t\t\treturn fmt.Errorf(\"http path option was given an invalid path: %q\", path)\n\t\t}\n\t\tp.Path = path\n\t\treturn nil\n\t}\n}\n\n\/\/ WithMethod sets the HTTP verb (GET, POST, PUT, etc.) to use\n\/\/ when using an HTTP request.\nfunc WithMethod(method string) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http method option can not set nil protocol\")\n\t\t}\n\t\tmethod = strings.TrimSpace(method)\n\t\tif method != \"\" {\n\t\t\tif p.RequestTemplate == nil {\n\t\t\t\tp.RequestTemplate = &nethttp.Request{}\n\t\t\t}\n\t\t\tp.RequestTemplate.Method = method\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"http method option was empty string\")\n\t}\n}\n\n\/\/\n\/\/ Middleware is a function that takes an existing http.Handler and wraps it in middleware,\n\/\/ returning the wrapped http.Handler.\ntype Middleware func(next nethttp.Handler) nethttp.Handler\n\n\/\/ WithMiddleware adds an HTTP middleware to the transport. It may be specified multiple times.\n\/\/ Middleware is applied to everything before it. For example\n\/\/ `NewClient(WithMiddleware(foo), WithMiddleware(bar))` would result in `bar(foo(original))`.\nfunc WithMiddleware(middleware Middleware) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http middleware option can not set nil protocol\")\n\t\t}\n\t\tp.middleware = append(p.middleware, middleware)\n\t\treturn nil\n\t}\n}\n\n\/\/ WithRoundTripper sets the HTTP RoundTripper.\nfunc WithRoundTripper(roundTripper nethttp.RoundTripper) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http round tripper option can not set nil protocol\")\n\t\t}\n\t\tp.roundTripper = roundTripper\n\t\treturn nil\n\t}\n}\n\n\/\/ WithRoundTripperDecorator decorates the default HTTP RoundTripper chosen.\nfunc WithRoundTripperDecorator(decorator func(roundTripper nethttp.RoundTripper) nethttp.RoundTripper) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http round tripper option can not set nil protocol\")\n\t\t}\n\t\tif p.roundTripper == nil {\n\t\t\tif p.Client == nil {\n\t\t\t\tp.roundTripper = nethttp.DefaultTransport\n\t\t\t} else {\n\t\t\t\tp.roundTripper = p.Client.Transport\n\t\t\t}\n\t\t}\n\t\tp.roundTripper = decorator(p.roundTripper)\n\t\treturn nil\n\t}\n}\n\n\/\/ WithClient sets the protocol client\nfunc WithClient(client nethttp.Client) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"client option can not set nil protocol\")\n\t\t}\n\t\tp.Client = &client\n\t\treturn nil\n\t}\n}\n\n\/\/ WithGetHandlerFunc sets the http GET handler func\nfunc WithGetHandlerFunc(fn nethttp.HandlerFunc) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http GET handler func can not set nil protocol\")\n\t\t}\n\t\tp.GetHandlerFn = fn\n\t\treturn nil\n\t}\n}\n\n\/\/ WithOptionsHandlerFunc sets the http OPTIONS handler func\nfunc WithOptionsHandlerFunc(fn nethttp.HandlerFunc) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http OPTIONS handler func can not set nil protocol\")\n\t\t}\n\t\tp.OptionsHandlerFn = fn\n\t\treturn nil\n\t}\n}\n\n\/\/ WithDefaultOptionsHandlerFunc sets the options handler to be the built in handler and configures the options.\n\/\/ methods: the supported methods reported to OPTIONS caller.\n\/\/ rate: the rate limit reported to OPTIONS caller.\n\/\/ origins: the prefix of the accepted origins, or \"*\".\n\/\/ callback: preform the callback to ACK the OPTIONS request.\nfunc WithDefaultOptionsHandlerFunc(methods []string, rate int, origins []string, callback bool) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http OPTIONS handler func can not set nil protocol\")\n\t\t}\n\t\tp.OptionsHandlerFn = p.OptionsHandler\n\t\tp.WebhookConfig = &WebhookConfig{\n\t\t\tAllowedMethods: methods,\n\t\t\tAllowedRate: &rate,\n\t\t\tAllowedOrigins: origins,\n\t\t\tAutoACKCallback: callback,\n\t\t}\n\t\treturn nil\n\t}\n}\n\n\/\/ IsRetriable is a custom function that can be used to override the\n\/\/ default retriable status codes.\ntype IsRetriable func(statusCode int) bool\n\n\/\/ WithIsRetriableFunc sets the function that gets called to determine if an\n\/\/ error should be retried. If not set, the defaultIsRetriableFunc is used.\nfunc WithIsRetriableFunc(isRetriable IsRetriable) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"isRetriable handler func can not set nil protocol\")\n\t\t}\n\t\tif isRetriable == nil {\n\t\t\treturn fmt.Errorf(\"isRetriable handler can not be nil\")\n\t\t}\n\t\tp.isRetriableFunc = isRetriable\n\t\treturn nil\n\t}\n}\n\nfunc WithRateLimiter(rl RateLimiter) Option {\n\treturn func(p *Protocol) error {\n\t\tif p == nil {\n\t\t\treturn fmt.Errorf(\"http OPTIONS handler func can not set nil protocol\")\n\t\t}\n\t\tp.limiter = rl\n\t\treturn nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/build\"\n\t\"go\/parser\"\n\t\"go\/token\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"code.google.com\/p\/go.tools\/astutil\"\n)\n\nfunc fixImports(f *ast.File) error {\n\t\/\/ refs are a set of possible package references currently unsatisified by imports.\n\t\/\/ first key: either base package (e.g. \"fmt\") or renamed package\n\t\/\/ second key: referenced package symbol (e.g. \"Println\")\n\trefs := make(map[string]map[string]bool)\n\n\t\/\/ decls are the current package imports. key is base package or renamed package.\n\tdecls := make(map[string]*ast.ImportSpec)\n\n\t\/\/ collect potential uses of packages.\n\tvar visitor visitFn\n\tvisitor = visitFn(func(node ast.Node) ast.Visitor {\n\t\tif node == nil {\n\t\t\treturn visitor\n\t\t}\n\t\tswitch v := node.(type) {\n\t\tcase *ast.ImportSpec:\n\t\t\tif v.Name != nil {\n\t\t\t\tdecls[v.Name.Name] = v\n\t\t\t} else {\n\t\t\t\tlocal := path.Base(strings.Trim(v.Path.Value, `\\\"`))\n\t\t\t\tdecls[local] = v\n\t\t\t}\n\t\tcase *ast.SelectorExpr:\n\t\t\txident, ok := v.X.(*ast.Ident)\n\t\t\tif !ok {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif xident.Obj != nil {\n\t\t\t\t\/\/ if the parser can resolve it, it's not a package ref\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tpkgName := xident.Name\n\t\t\tif refs[pkgName] == nil {\n\t\t\t\trefs[pkgName] = make(map[string]bool)\n\t\t\t}\n\t\t\tif decls[pkgName] == nil {\n\t\t\t\trefs[pkgName][v.Sel.Name] = true\n\t\t\t}\n\t\t}\n\t\treturn visitor\n\t})\n\tast.Walk(visitor, f)\n\n\t\/\/ Search for imports matching potential package references.\n\tsearches := 0\n\ttype result struct {\n\t\tipath string\n\t\terr error\n\t}\n\tresults := make(chan result)\n\tfor pkgName, symbols := range refs {\n\t\tif len(symbols) == 0 {\n\t\t\tcontinue \/\/ skip over packages already imported\n\t\t}\n\t\tgo func(pkgName string, symbols map[string]bool) {\n\t\t\tipath, err := findImport(pkgName, symbols)\n\t\t\tresults <- result{ ipath, err }\n\t\t}(pkgName, symbols)\n\t\tsearches++\n\t}\n\tfor i := 0; i < searches; i++ {\n\t\tresult := <-results\n\t\tif result.err != nil {\n\t\t\treturn result.err\n\t\t}\n\t\tif result.ipath != \"\" {\n\t\t\tastutil.AddImport(f, result.ipath)\n\t\t}\n\t}\n\n\t\/\/ Nil out any unused ImportSpecs, to be removed in following passes\n\tunusedImport := map[string]bool{}\n\tfor pkg, is := range decls {\n\t\tif refs[pkg] == nil && pkg != \"_\" && pkg != \".\" {\n\t\t\tunusedImport[strings.Trim(is.Path.Value, `\"`)] = true\n\t\t}\n\t}\n\tfor ipath := range unusedImport {\n\t\tastutil.DeleteImport(f, ipath)\n\t}\n\n\treturn nil\n}\n\ntype pkg struct {\n\timportpath string \/\/ full pkg import path, e.g. \"net\/http\"\n\tdir string \/\/ absolute file path to pkg directory e.g. \"\/usr\/lib\/go\/src\/fmt\"\n}\n\nvar pkgIndexOnce sync.Once\n\nvar pkgIndex struct {\n\tsync.Mutex\n\tm map[string][]pkg \/\/ shortname => []pkg, e.g \"http\" => \"net\/http\"\n}\n\nfunc loadPkgIndex() {\n\tpkgIndex.Lock()\n\tpkgIndex.m = make(map[string][]pkg)\n\tpkgIndex.Unlock()\n\n\tvar wg sync.WaitGroup\n\tfor _, path := range build.Default.SrcDirs() {\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\tfmt.Fprint(os.Stderr, err)\n\t\t\tcontinue\n\t\t}\n\t\tchildren, err := f.Readdir(-1)\n\t\tf.Close()\n\t\tif err != nil {\n\t\t\tfmt.Fprint(os.Stderr, err)\n\t\t\tcontinue\n\t\t}\n\t\tfor _, child := range children {\n\t\t\tif child.IsDir() {\n\t\t\t\twg.Add(1)\n\t\t\t\tgo func(path, name string) {\n\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\tloadPkg(&wg, path, name)\n\t\t\t\t}(path, child.Name())\n\t\t\t}\n\t\t}\n\t}\n\twg.Wait()\n}\n\nvar fset = token.NewFileSet()\n\nfunc loadPkg(wg *sync.WaitGroup, root, importpath string) {\n\tshortName := path.Base(importpath)\n\n\tdir := filepath.Join(root, importpath)\n\tpkgIndex.Lock()\n\tpkgIndex.m[shortName] = append(pkgIndex.m[shortName], pkg{\n\t\timportpath: importpath,\n\t\tdir: dir,\n\t})\n\tpkgIndex.Unlock()\n\n\tpkgDir, err := os.Open(dir)\n\tif err != nil {\n\t\treturn\n\t}\n\tchildren, err := pkgDir.Readdir(-1)\n\tpkgDir.Close()\n\tif err != nil {\n\t\treturn\n\t}\n\tfor _, child := range children {\n\t\tif child.IsDir() {\n\t\t\twg.Add(1)\n\t\t\tgo func(root, name string) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tloadPkg(wg, root, name)\n\t\t\t}(root, filepath.Join(importpath, child.Name()))\n\t\t}\n\t}\n}\n\n\/\/ loadExports returns a list exports for a package.\nfunc loadExports(dir string) map[string]bool {\n\texports := make(map[string]bool)\n\tbuildPkg, err := build.ImportDir(dir, 0)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"could not import %q: %v\", dir, err)\n\t\treturn nil\n\t}\n\tfor _, file := range buildPkg.GoFiles {\n\t\tf, err := parser.ParseFile(fset, filepath.Join(dir, file), nil, 0)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"could not parse %q: %v\", file, err)\n\t\t\tcontinue\n\t\t}\n\t\tfor name := range f.Scope.Objects {\n\t\t\tif ast.IsExported(name) {\n\t\t\t\texports[name] = true\n\t\t\t}\n\t\t}\n\t}\n\treturn exports\n}\n\n\/\/ findImport searches for a package with the given symbols.\n\/\/ If no package is found, findImport returns \"\".\n\/\/ Declared as a variable rather than a function so goimports can be easily\n\/\/ extended by adding a file with an init function.\nvar findImport = func(pkgName string, symbols map[string]bool) (string, error) {\n\tpkgIndexOnce.Do(loadPkgIndex)\n\n\t\/\/ Collect exports for packages with matching names.\n\tvar wg sync.WaitGroup\n\tvar pkgsMu sync.Mutex \/\/ guards pkgs\n\t\/\/ full importpath => exported symbol => True\n\t\/\/ e.g. \"net\/http\" => \"Client\" => True\n\tpkgs := make(map[string]map[string]bool)\n\tpkgIndex.Lock()\n\tfor _, pkg := range pkgIndex.m[pkgName] {\n\t\twg.Add(1)\n\t\tgo func(importpath, dir string) {\n\t\t\tdefer wg.Done()\n\t\t\texports := loadExports(dir)\n\t\t\tif exports != nil {\n\t\t\t\tpkgsMu.Lock()\n\t\t\t\tpkgs[importpath] = exports\n\t\t\t\tpkgsMu.Unlock()\n\t\t\t}\n\t\t}(pkg.importpath, pkg.dir)\n\t}\n\tpkgIndex.Unlock()\n\twg.Wait()\n\n\t\/\/ Filter out packages missing required exported symbols.\n\tfor symbol := range symbols {\n\t\tfor importpath, exports := range pkgs {\n\t\t\tif !exports[symbol] {\n\t\t\t\tdelete(pkgs, importpath)\n\t\t\t}\n\t\t}\n\t}\n\tfor importpath := range pkgs {\n\t\treturn importpath, nil\n\t}\n\treturn \"\", nil\n}\n\ntype visitFn func(node ast.Node) ast.Visitor\n\nfunc (fn visitFn) Visit(node ast.Node) ast.Visitor {\n\treturn fn(node)\n}\n<commit_msg>gofmt<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/build\"\n\t\"go\/parser\"\n\t\"go\/token\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"code.google.com\/p\/go.tools\/astutil\"\n)\n\nfunc fixImports(f *ast.File) error {\n\t\/\/ refs are a set of possible package references currently unsatisified by imports.\n\t\/\/ first key: either base package (e.g. \"fmt\") or renamed package\n\t\/\/ second key: referenced package symbol (e.g. \"Println\")\n\trefs := make(map[string]map[string]bool)\n\n\t\/\/ decls are the current package imports. key is base package or renamed package.\n\tdecls := make(map[string]*ast.ImportSpec)\n\n\t\/\/ collect potential uses of packages.\n\tvar visitor visitFn\n\tvisitor = visitFn(func(node ast.Node) ast.Visitor {\n\t\tif node == nil {\n\t\t\treturn visitor\n\t\t}\n\t\tswitch v := node.(type) {\n\t\tcase *ast.ImportSpec:\n\t\t\tif v.Name != nil {\n\t\t\t\tdecls[v.Name.Name] = v\n\t\t\t} else {\n\t\t\t\tlocal := path.Base(strings.Trim(v.Path.Value, `\\\"`))\n\t\t\t\tdecls[local] = v\n\t\t\t}\n\t\tcase *ast.SelectorExpr:\n\t\t\txident, ok := v.X.(*ast.Ident)\n\t\t\tif !ok {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif xident.Obj != nil {\n\t\t\t\t\/\/ if the parser can resolve it, it's not a package ref\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tpkgName := xident.Name\n\t\t\tif refs[pkgName] == nil {\n\t\t\t\trefs[pkgName] = make(map[string]bool)\n\t\t\t}\n\t\t\tif decls[pkgName] == nil {\n\t\t\t\trefs[pkgName][v.Sel.Name] = true\n\t\t\t}\n\t\t}\n\t\treturn visitor\n\t})\n\tast.Walk(visitor, f)\n\n\t\/\/ Search for imports matching potential package references.\n\tsearches := 0\n\ttype result struct {\n\t\tipath string\n\t\terr error\n\t}\n\tresults := make(chan result)\n\tfor pkgName, symbols := range refs {\n\t\tif len(symbols) == 0 {\n\t\t\tcontinue \/\/ skip over packages already imported\n\t\t}\n\t\tgo func(pkgName string, symbols map[string]bool) {\n\t\t\tipath, err := findImport(pkgName, symbols)\n\t\t\tresults <- result{ipath, err}\n\t\t}(pkgName, symbols)\n\t\tsearches++\n\t}\n\tfor i := 0; i < searches; i++ {\n\t\tresult := <-results\n\t\tif result.err != nil {\n\t\t\treturn result.err\n\t\t}\n\t\tif result.ipath != \"\" {\n\t\t\tastutil.AddImport(f, result.ipath)\n\t\t}\n\t}\n\n\t\/\/ Nil out any unused ImportSpecs, to be removed in following passes\n\tunusedImport := map[string]bool{}\n\tfor pkg, is := range decls {\n\t\tif refs[pkg] == nil && pkg != \"_\" && pkg != \".\" {\n\t\t\tunusedImport[strings.Trim(is.Path.Value, `\"`)] = true\n\t\t}\n\t}\n\tfor ipath := range unusedImport {\n\t\tastutil.DeleteImport(f, ipath)\n\t}\n\n\treturn nil\n}\n\ntype pkg struct {\n\timportpath string \/\/ full pkg import path, e.g. \"net\/http\"\n\tdir string \/\/ absolute file path to pkg directory e.g. \"\/usr\/lib\/go\/src\/fmt\"\n}\n\nvar pkgIndexOnce sync.Once\n\nvar pkgIndex struct {\n\tsync.Mutex\n\tm map[string][]pkg \/\/ shortname => []pkg, e.g \"http\" => \"net\/http\"\n}\n\nfunc loadPkgIndex() {\n\tpkgIndex.Lock()\n\tpkgIndex.m = make(map[string][]pkg)\n\tpkgIndex.Unlock()\n\n\tvar wg sync.WaitGroup\n\tfor _, path := range build.Default.SrcDirs() {\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\tfmt.Fprint(os.Stderr, err)\n\t\t\tcontinue\n\t\t}\n\t\tchildren, err := f.Readdir(-1)\n\t\tf.Close()\n\t\tif err != nil {\n\t\t\tfmt.Fprint(os.Stderr, err)\n\t\t\tcontinue\n\t\t}\n\t\tfor _, child := range children {\n\t\t\tif child.IsDir() {\n\t\t\t\twg.Add(1)\n\t\t\t\tgo func(path, name string) {\n\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\tloadPkg(&wg, path, name)\n\t\t\t\t}(path, child.Name())\n\t\t\t}\n\t\t}\n\t}\n\twg.Wait()\n}\n\nvar fset = token.NewFileSet()\n\nfunc loadPkg(wg *sync.WaitGroup, root, importpath string) {\n\tshortName := path.Base(importpath)\n\n\tdir := filepath.Join(root, importpath)\n\tpkgIndex.Lock()\n\tpkgIndex.m[shortName] = append(pkgIndex.m[shortName], pkg{\n\t\timportpath: importpath,\n\t\tdir: dir,\n\t})\n\tpkgIndex.Unlock()\n\n\tpkgDir, err := os.Open(dir)\n\tif err != nil {\n\t\treturn\n\t}\n\tchildren, err := pkgDir.Readdir(-1)\n\tpkgDir.Close()\n\tif err != nil {\n\t\treturn\n\t}\n\tfor _, child := range children {\n\t\tif child.IsDir() {\n\t\t\twg.Add(1)\n\t\t\tgo func(root, name string) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tloadPkg(wg, root, name)\n\t\t\t}(root, filepath.Join(importpath, child.Name()))\n\t\t}\n\t}\n}\n\n\/\/ loadExports returns a list exports for a package.\nfunc loadExports(dir string) map[string]bool {\n\texports := make(map[string]bool)\n\tbuildPkg, err := build.ImportDir(dir, 0)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"could not import %q: %v\", dir, err)\n\t\treturn nil\n\t}\n\tfor _, file := range buildPkg.GoFiles {\n\t\tf, err := parser.ParseFile(fset, filepath.Join(dir, file), nil, 0)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"could not parse %q: %v\", file, err)\n\t\t\tcontinue\n\t\t}\n\t\tfor name := range f.Scope.Objects {\n\t\t\tif ast.IsExported(name) {\n\t\t\t\texports[name] = true\n\t\t\t}\n\t\t}\n\t}\n\treturn exports\n}\n\n\/\/ findImport searches for a package with the given symbols.\n\/\/ If no package is found, findImport returns \"\".\n\/\/ Declared as a variable rather than a function so goimports can be easily\n\/\/ extended by adding a file with an init function.\nvar findImport = func(pkgName string, symbols map[string]bool) (string, error) {\n\tpkgIndexOnce.Do(loadPkgIndex)\n\n\t\/\/ Collect exports for packages with matching names.\n\tvar wg sync.WaitGroup\n\tvar pkgsMu sync.Mutex \/\/ guards pkgs\n\t\/\/ full importpath => exported symbol => True\n\t\/\/ e.g. \"net\/http\" => \"Client\" => True\n\tpkgs := make(map[string]map[string]bool)\n\tpkgIndex.Lock()\n\tfor _, pkg := range pkgIndex.m[pkgName] {\n\t\twg.Add(1)\n\t\tgo func(importpath, dir string) {\n\t\t\tdefer wg.Done()\n\t\t\texports := loadExports(dir)\n\t\t\tif exports != nil {\n\t\t\t\tpkgsMu.Lock()\n\t\t\t\tpkgs[importpath] = exports\n\t\t\t\tpkgsMu.Unlock()\n\t\t\t}\n\t\t}(pkg.importpath, pkg.dir)\n\t}\n\tpkgIndex.Unlock()\n\twg.Wait()\n\n\t\/\/ Filter out packages missing required exported symbols.\n\tfor symbol := range symbols {\n\t\tfor importpath, exports := range pkgs {\n\t\t\tif !exports[symbol] {\n\t\t\t\tdelete(pkgs, importpath)\n\t\t\t}\n\t\t}\n\t}\n\tfor importpath := range pkgs {\n\t\treturn importpath, nil\n\t}\n\treturn \"\", nil\n}\n\ntype visitFn func(node ast.Node) ast.Visitor\n\nfunc (fn visitFn) Visit(node ast.Node) ast.Visitor {\n\treturn fn(node)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Package gofsm is a library for building finite-state machines (automata).\n\/\/\n\/\/ Why yet another FSM library for go?\n\/\/\n\/\/ gofsm is aimed squarely at home automation - human visible, configured and\n\/\/ friendly. The configuration format is yaml, and easy to read\/write by hand.\n\/\/\n\/\/ gofsm is used in the gohome automation project:\n\/\/ http:\/\/github.com\/barnybug\/gohome\npackage gofsm\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\t\"gopkg.in\/yaml.v2\"\n)\n\ntype Automata struct {\n\tAutomaton map[string]*Automaton\n\tActions chan Action\n\tChanges chan Change\n}\n\ntype State struct {\n\tName string\n\tSteps []Step\n\tEntering Actions\n\tLeaving Actions\n}\n\ntype Step struct {\n\tWhen string\n\tActions Actions\n\tNext string\n}\n\ntype Actions []string\n\ntype Transition struct {\n\tWhen string\n\tActions Actions\n}\n\ntype Automaton struct {\n\tStart string\n\tStates map[string]struct {\n\t\tEntering Actions\n\t\tLeaving Actions\n\t}\n\tTransitions map[string][]Transition\n\tName string\n\tState *State\n\tSince time.Time\n\tactions chan Action\n\tchanges chan Change\n\tsm map[string]*State\n}\n\ntype Action struct {\n\tName string\n\tTrigger interface{}\n\tChange Change\n}\n\ntype Change struct {\n\tAutomaton string\n\tOld string\n\tNew string\n\tSince time.Time\n\tDuration time.Duration\n\tTrigger interface{}\n}\n\ntype Event interface {\n\tMatch(s string) bool\n}\n\nfunc (self Action) String() string {\n\treturn self.Name\n}\n\nfunc (self *Automata) Process(event Event) {\n\tfor _, aut := range self.Automaton {\n\t\taut.Process(event)\n\t}\n}\n\nfunc (self *Automata) String() string {\n\tvar out string\n\tnow := time.Now()\n\tfor k, aut := range self.Automaton {\n\t\tif out != \"\" {\n\t\t\tout += \"\\n\"\n\t\t}\n\t\tdu := now.Sub(aut.Since)\n\t\tout += fmt.Sprintf(\"%s: %s for %s\", k, aut.State.Name, du)\n\t}\n\treturn out\n}\n\nfunc (self *Automaton) ChangeState(next string, event Event) {\n\tself.changeState(next, event, Actions{})\n}\n\nfunc (self *Automaton) changeState(next string, event Event, actions Actions) {\n\tnow := time.Now()\n\tvar change Change\n\t\/\/ is a state change happening\n\tstateChanged := (self.State.Name != next)\n\n\tif stateChanged {\n\t\tduration := now.Sub(self.Since)\n\t\tchange = Change{Automaton: self.Name, Old: self.State.Name, New: next, Duration: duration, Since: self.Since, Trigger: event}\n\n\t\t\/\/ emit leaving actions\n\t\tfor _, action := range self.State.Leaving {\n\t\t\tself.actions <- Action{action, event, change}\n\t\t}\n\t}\n\n\t\/\/ emit transition actions\n\tfor _, action := range actions {\n\t\tself.actions <- Action{action, event, change}\n\t}\n\n\t\/\/ change state\n\tif stateChanged {\n\t\tself.State = self.sm[next]\n\t\tself.Since = now\n\t\tself.changes <- change\n\n\t\t\/\/ emit entering actions\n\t\tfor _, action := range self.State.Entering {\n\t\t\tself.actions <- Action{action, event, change}\n\t\t}\n\t}\n}\n\nfunc (self *Automaton) Process(event Event) {\n\tfor _, t := range self.State.Steps {\n\t\tif event.Match(t.When) {\n\t\t\tself.changeState(t.Next, event, t.Actions)\n\t\t}\n\t}\n}\n\nfunc (self *Automaton) load() error {\n\tif self.Start == \"\" {\n\t\treturn errors.New(\"missing Start entry\")\n\t}\n\tif len(self.States) == 0 {\n\t\treturn errors.New(\"missing States entries\")\n\t}\n\tif len(self.Transitions) == 0 {\n\t\treturn errors.New(\"missing Transitions entries\")\n\t}\n\n\tsm := map[string]*State{}\n\n\tambigMap := map[string]map[string]bool{}\n\n\tvar allStates []string\n\tfor name, val := range self.States {\n\t\tstate := State{Name: name}\n\t\tstate.Entering = val.Entering\n\t\tstate.Leaving = val.Leaving\n\t\tsm[name] = &state\n\n\t\tallStates = append(allStates, name)\n\t\tambigMap[name] = map[string]bool{}\n\t}\n\tself.sm = sm\n\n\tvar ok bool\n\tif self.State, ok = sm[self.Start]; !ok {\n\t\treturn errors.New(\"starting State invalid\")\n\t}\n\tself.Since = time.Now()\n\n\ttype StringPair struct {\n\t\t_1 string\n\t\t_2 string\n\t}\n\n\tfor name, trans := range self.Transitions {\n\t\tvar pairs []StringPair\n\t\tlr := strings.SplitN(name, \"->\", 2)\n\t\tif len(lr) == 2 {\n\t\t\t\/\/ from->to\n\t\t\tvar froms, tos []string\n\t\t\tif lr[0] == \"*\" {\n\t\t\t\tfroms = allStates\n\t\t\t} else {\n\t\t\t\tfroms = strings.Split(lr[0], \",\")\n\t\t\t}\n\t\t\tif lr[1] == \"*\" {\n\t\t\t\ttos = allStates\n\t\t\t} else {\n\t\t\t\ttos = strings.Split(lr[1], \",\")\n\t\t\t}\n\t\t\tfor _, f := range froms {\n\t\t\t\tfor _, t := range tos {\n\t\t\t\t\tpairs = append(pairs, StringPair{f, t})\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ from1,from2 = from1->from1, from2->from2\n\t\t\tvar froms []string\n\t\t\tif lr[0] == \"*\" {\n\t\t\t\tfroms = allStates\n\t\t\t} else {\n\t\t\t\tfroms = strings.Split(lr[0], \",\")\n\t\t\t}\n\t\t\tfor _, f := range froms {\n\t\t\t\tpairs = append(pairs, StringPair{f, f})\n\t\t\t}\n\t\t}\n\n\t\tfor _, pair := range pairs {\n\t\t\tfrom, to := pair._1, pair._2\n\t\t\tvar sfrom *State\n\t\t\tif sfrom, ok = self.sm[from]; !ok {\n\t\t\t\treturn errors.New(fmt.Sprintf(\"State: %s not found\", from))\n\t\t\t}\n\t\t\tif _, ok := self.sm[to]; !ok {\n\t\t\t\treturn errors.New(fmt.Sprintf(\"State: %s not found\", from))\n\t\t\t}\n\n\t\t\tfor _, v := range trans {\n\t\t\t\t\/\/ check this state hasn't this exact condition already\n\t\t\t\tif ambigMap[from][v.When] {\n\t\t\t\t\treturn errors.New(fmt.Sprintf(\"State: %s condition: %s is ambiguous\", from, v.When))\n\t\t\t\t}\n\t\t\t\tambigMap[from][v.When] = true\n\t\t\t\tt := Step{v.When, v.Actions, to}\n\t\t\t\tsfrom.Steps = append(sfrom.Steps, t)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\ntype AutomataState map[string]AutomatonState\n\ntype AutomatonState struct {\n\tState string\n\tSince time.Time\n}\n\nfunc (self *Automata) Persist() AutomataState {\n\tret := AutomataState{}\n\tfor k, aut := range self.Automaton {\n\t\tret[k] = AutomatonState{aut.State.Name, aut.Since}\n\t}\n\treturn ret\n}\n\nfunc (self *Automata) Restore(s AutomataState) {\n\tfor k, as := range s {\n\t\tif aut, ok := self.Automaton[k]; ok {\n\t\t\tif state, ok := aut.sm[as.State]; ok {\n\t\t\t\taut.State = state\n\t\t\t\taut.Since = as.Since\n\t\t\t} else {\n\t\t\t\tlog.Println(\"Invalid restored state:\", as.State)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc LoadFile(filename string) (*Automata, error) {\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn Load(data)\n}\n\nfunc Load(str []byte) (*Automata, error) {\n\tvar aut Automata = Automata{Actions: make(chan Action, 32), Changes: make(chan Change, 32)}\n\terr := yaml.Unmarshal(str, &aut.Automaton)\n\tfor k, a := range aut.Automaton {\n\t\terr := a.load()\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(fmt.Sprintf(\"%s: %s\", k, err.Error()))\n\t\t}\n\t\ta.Name = k\n\t\ta.actions = aut.Actions\n\t\ta.changes = aut.Changes\n\t}\n\n\treturn &aut, err\n}\n<commit_msg>Only perform one transition per call to Process<commit_after>\/\/ Package gofsm is a library for building finite-state machines (automata).\n\/\/\n\/\/ Why yet another FSM library for go?\n\/\/\n\/\/ gofsm is aimed squarely at home automation - human visible, configured and\n\/\/ friendly. The configuration format is yaml, and easy to read\/write by hand.\n\/\/\n\/\/ gofsm is used in the gohome automation project:\n\/\/ http:\/\/github.com\/barnybug\/gohome\npackage gofsm\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"strings\"\n\t\"time\"\n\n\t\"gopkg.in\/yaml.v2\"\n)\n\ntype Automata struct {\n\tAutomaton map[string]*Automaton\n\tActions chan Action\n\tChanges chan Change\n}\n\ntype State struct {\n\tName string\n\tSteps []Step\n\tEntering Actions\n\tLeaving Actions\n}\n\ntype Step struct {\n\tWhen string\n\tActions Actions\n\tNext string\n}\n\ntype Actions []string\n\ntype Transition struct {\n\tWhen string\n\tActions Actions\n}\n\ntype Automaton struct {\n\tStart string\n\tStates map[string]struct {\n\t\tEntering Actions\n\t\tLeaving Actions\n\t}\n\tTransitions map[string][]Transition\n\tName string\n\tState *State\n\tSince time.Time\n\tactions chan Action\n\tchanges chan Change\n\tsm map[string]*State\n}\n\ntype Action struct {\n\tName string\n\tTrigger interface{}\n\tChange Change\n}\n\ntype Change struct {\n\tAutomaton string\n\tOld string\n\tNew string\n\tSince time.Time\n\tDuration time.Duration\n\tTrigger interface{}\n}\n\ntype Event interface {\n\tMatch(s string) bool\n}\n\nfunc (self Action) String() string {\n\treturn self.Name\n}\n\nfunc (self *Automata) Process(event Event) {\n\tfor _, aut := range self.Automaton {\n\t\taut.Process(event)\n\t}\n}\n\nfunc (self *Automata) String() string {\n\tvar out string\n\tnow := time.Now()\n\tfor k, aut := range self.Automaton {\n\t\tif out != \"\" {\n\t\t\tout += \"\\n\"\n\t\t}\n\t\tdu := now.Sub(aut.Since)\n\t\tout += fmt.Sprintf(\"%s: %s for %s\", k, aut.State.Name, du)\n\t}\n\treturn out\n}\n\nfunc (self *Automaton) ChangeState(next string, event Event) {\n\tself.changeState(next, event, Actions{})\n}\n\nfunc (self *Automaton) changeState(next string, event Event, actions Actions) {\n\tnow := time.Now()\n\tvar change Change\n\t\/\/ is a state change happening\n\tstateChanged := (self.State.Name != next)\n\n\tif stateChanged {\n\t\tduration := now.Sub(self.Since)\n\t\tchange = Change{Automaton: self.Name, Old: self.State.Name, New: next, Duration: duration, Since: self.Since, Trigger: event}\n\n\t\t\/\/ emit leaving actions\n\t\tfor _, action := range self.State.Leaving {\n\t\t\tself.actions <- Action{action, event, change}\n\t\t}\n\t}\n\n\t\/\/ emit transition actions\n\tfor _, action := range actions {\n\t\tself.actions <- Action{action, event, change}\n\t}\n\n\t\/\/ change state\n\tif stateChanged {\n\t\tself.State = self.sm[next]\n\t\tself.Since = now\n\t\tself.changes <- change\n\n\t\t\/\/ emit entering actions\n\t\tfor _, action := range self.State.Entering {\n\t\t\tself.actions <- Action{action, event, change}\n\t\t}\n\t}\n}\n\nfunc (self *Automaton) Process(event Event) {\n\tfor _, t := range self.State.Steps {\n\t\tif event.Match(t.When) {\n\t\t\tself.changeState(t.Next, event, t.Actions)\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (self *Automaton) load() error {\n\tif self.Start == \"\" {\n\t\treturn errors.New(\"missing Start entry\")\n\t}\n\tif len(self.States) == 0 {\n\t\treturn errors.New(\"missing States entries\")\n\t}\n\tif len(self.Transitions) == 0 {\n\t\treturn errors.New(\"missing Transitions entries\")\n\t}\n\n\tsm := map[string]*State{}\n\n\tambigMap := map[string]map[string]bool{}\n\n\tvar allStates []string\n\tfor name, val := range self.States {\n\t\tstate := State{Name: name}\n\t\tstate.Entering = val.Entering\n\t\tstate.Leaving = val.Leaving\n\t\tsm[name] = &state\n\n\t\tallStates = append(allStates, name)\n\t\tambigMap[name] = map[string]bool{}\n\t}\n\tself.sm = sm\n\n\tvar ok bool\n\tif self.State, ok = sm[self.Start]; !ok {\n\t\treturn errors.New(\"starting State invalid\")\n\t}\n\tself.Since = time.Now()\n\n\ttype StringPair struct {\n\t\t_1 string\n\t\t_2 string\n\t}\n\n\tfor name, trans := range self.Transitions {\n\t\tvar pairs []StringPair\n\t\tlr := strings.SplitN(name, \"->\", 2)\n\t\tif len(lr) == 2 {\n\t\t\t\/\/ from->to\n\t\t\tvar froms, tos []string\n\t\t\tif lr[0] == \"*\" {\n\t\t\t\tfroms = allStates\n\t\t\t} else {\n\t\t\t\tfroms = strings.Split(lr[0], \",\")\n\t\t\t}\n\t\t\tif lr[1] == \"*\" {\n\t\t\t\ttos = allStates\n\t\t\t} else {\n\t\t\t\ttos = strings.Split(lr[1], \",\")\n\t\t\t}\n\t\t\tfor _, f := range froms {\n\t\t\t\tfor _, t := range tos {\n\t\t\t\t\tpairs = append(pairs, StringPair{f, t})\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ from1,from2 = from1->from1, from2->from2\n\t\t\tvar froms []string\n\t\t\tif lr[0] == \"*\" {\n\t\t\t\tfroms = allStates\n\t\t\t} else {\n\t\t\t\tfroms = strings.Split(lr[0], \",\")\n\t\t\t}\n\t\t\tfor _, f := range froms {\n\t\t\t\tpairs = append(pairs, StringPair{f, f})\n\t\t\t}\n\t\t}\n\n\t\tfor _, pair := range pairs {\n\t\t\tfrom, to := pair._1, pair._2\n\t\t\tvar sfrom *State\n\t\t\tif sfrom, ok = self.sm[from]; !ok {\n\t\t\t\treturn errors.New(fmt.Sprintf(\"State: %s not found\", from))\n\t\t\t}\n\t\t\tif _, ok := self.sm[to]; !ok {\n\t\t\t\treturn errors.New(fmt.Sprintf(\"State: %s not found\", from))\n\t\t\t}\n\n\t\t\tfor _, v := range trans {\n\t\t\t\t\/\/ check this state hasn't this exact condition already\n\t\t\t\tif ambigMap[from][v.When] {\n\t\t\t\t\treturn errors.New(fmt.Sprintf(\"State: %s condition: %s is ambiguous\", from, v.When))\n\t\t\t\t}\n\t\t\t\tambigMap[from][v.When] = true\n\t\t\t\tt := Step{v.When, v.Actions, to}\n\t\t\t\tsfrom.Steps = append(sfrom.Steps, t)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\ntype AutomataState map[string]AutomatonState\n\ntype AutomatonState struct {\n\tState string\n\tSince time.Time\n}\n\nfunc (self *Automata) Persist() AutomataState {\n\tret := AutomataState{}\n\tfor k, aut := range self.Automaton {\n\t\tret[k] = AutomatonState{aut.State.Name, aut.Since}\n\t}\n\treturn ret\n}\n\nfunc (self *Automata) Restore(s AutomataState) {\n\tfor k, as := range s {\n\t\tif aut, ok := self.Automaton[k]; ok {\n\t\t\tif state, ok := aut.sm[as.State]; ok {\n\t\t\t\taut.State = state\n\t\t\t\taut.Since = as.Since\n\t\t\t} else {\n\t\t\t\tlog.Println(\"Invalid restored state:\", as.State)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc LoadFile(filename string) (*Automata, error) {\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn Load(data)\n}\n\nfunc Load(str []byte) (*Automata, error) {\n\tvar aut Automata = Automata{Actions: make(chan Action, 32), Changes: make(chan Change, 32)}\n\terr := yaml.Unmarshal(str, &aut.Automaton)\n\tfor k, a := range aut.Automaton {\n\t\terr := a.load()\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(fmt.Sprintf(\"%s: %s\", k, err.Error()))\n\t\t}\n\t\ta.Name = k\n\t\ta.actions = aut.Actions\n\t\ta.changes = aut.Changes\n\t}\n\n\treturn &aut, err\n}\n<|endoftext|>"} {"text":"<commit_before>package tag\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"image\"\n\t\"image\/color\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\/\/\t\"time\"\n\n\t\"github.com\/as\/edit\"\n\t\"github.com\/as\/event\"\n\t\"github.com\/as\/frame\"\n\t\"github.com\/as\/frame\/font\"\n\t\"github.com\/as\/frame\/win\"\n\t\"github.com\/as\/text\"\n\t\"github.com\/as\/text\/action\"\n\t\"github.com\/as\/text\/find\"\n\t\"github.com\/as\/text\/kbd\"\n\tmus \"github.com\/as\/text\/mouse\"\n\t\/\/\"github.com\/as\/worm\"\n\t\"golang.org\/x\/exp\/shiny\/screen\"\n\t\"golang.org\/x\/mobile\/event\/key\"\n\t\"golang.org\/x\/mobile\/event\/mouse\"\n)\n\nvar db = win.Db\nvar un = win.Un\nvar trace = win.Trace\n\nfunc p(e mouse.Event) image.Point {\n\treturn image.Pt(int(e.X), int(e.Y))\n\n}\n\ntype doter interface {\n\tDot() (int64, int64)\n}\n\nfunc whatsdot(d doter) string {\n\tq0, q1 := d.Dot()\n\treturn fmt.Sprintf(\"Dot: [%d:%d]\", q0, q1)\n}\n\n\/\/ Put\nvar (\n\tButtonsdown = 0\n\tnoselect bool\n\tlastclickpt image.Point\n)\n\ntype Tag struct {\n\tsp image.Point\n\t*win.Win\n\tBody *win.Win\n\tScrolling bool\n\tscrolldy int\n\tdirty bool\n\tr0, r1 int64\n\tred, green frame.Color\n\tescR image.Rectangle\n\/\/\tLog worm.Logger\t\/\/ TODO \n\toffset int64\n}\n\nfunc (t *Tag) Dirty() bool {\n\treturn t.dirty || t.Win.Dirty() || (t.Body != nil && t.Body.Dirty())\n}\n\nfunc (t *Tag) Mark() {\n\tt.dirty = true\n}\n\nfunc (t *Tag) Loc() image.Rectangle {\n\tr := t.Win.Loc()\n\tif t.Body != nil {\n\t\tr.Max.Y += t.Body.Loc().Dy()\n\t}\n\treturn r\n}\n\n\/\/ TagSize returns the size of a tag given the font\nfunc TagSize(ft *font.Font) int {\n\treturn ft.Dy() + ft.Dy()\/2\n}\n\n\/\/ TagPad returns the padding for the tag given the window's padding\n\/\/ always returns an x-aligned point\nfunc TagPad(wpad image.Point) image.Point {\n\treturn image.Pt(wpad.X, 3)\n}\n\n\/\/ Put\nfunc NewTag(src screen.Screen, wind screen.Window, ft *font.Font, sp, size, pad image.Point, cols frame.Color) *Tag {\n\n\t\/\/ Make the main tag\n\ttagY := TagSize(ft)\n\n\t\/\/ Make tag\n\twtag := win.New(src, ft, wind,\n\t\tsp,\n\t\timage.Pt(size.X, tagY),\n\t\tTagPad(pad), cols,\n\t)\n\n\tsp = sp.Add(image.Pt(0, tagY))\n\tsize = size.Sub(image.Pt(0, tagY))\n\tif size.Y < tagY {\n\t\treturn &Tag{sp: sp, Win: wtag, Body: nil}\n\t}\n\t\/\/ Make window\n\tcols.Back = Yellow\n\tw := win.New(src, ft, wind,\n\t\tsp,\n\t\tsize,\n\t\tpad, frame.A,\n\t)\n\/\/\tlg := worm.NewCoalescer(worm.NewLogger(), time.Second*3)\n\/\/\tw.Editor = text.NewHistory(w.Editor, lg)\n\tacol := frame.A\n\tGreen := image.NewUniform(color.RGBA{0x99, 0xDD, 0x99, 192})\n\tacol.Hi.Back = Green\n\tgreen := acol\n\n\tRed := image.NewUniform(color.RGBA{0xDD, 0x99, 0x99, 192})\n\tacol.Hi.Back = Red\n\tred := acol\n\n\treturn &Tag{sp: sp, Win: wtag, Body: w,\n\t\t \/\/ Log: lg, \n\t\tred: red, green: green}\n}\n\nfunc (t *Tag) Move(pt image.Point) {\n\tt.Win.Move(pt)\n\tif t.Body == nil {\n\t\treturn\n\t}\n\tpt.Y += t.Win.Loc().Dy()\n\tt.Body.Move(pt)\n}\n\nfunc (t *Tag) Resize(pt image.Point) {\n\tdy := TagSize(t.Win.Font)\n\tif pt.X < dy || pt.Y < dy {\n\t\tprintln(\"ignore daft size request:\", pt.String())\n\t\treturn\n\t}\n\tt.Win.Resize(image.Pt(pt.X, dy))\n\tpt.Y -= dy\n\tif t.Body != nil {\n\t\tt.Body.Resize(pt)\n\t}\n}\n\nfunc mustCompile(prog string) *edit.Command {\n\tp, err := edit.Compile(prog)\n\tif err != nil {\n\t\tlog.Printf(\"tag.go:\/mustCompile\/: failed to compile %q\\n\", prog)\n\t\treturn nil\n\t}\n\treturn p\n}\n\nfunc (t *Tag) Get(name string) {\n\tname, addr := action.SplitPath(name)\n\tw := t.Body\n\twtag := t.Win\n\twtag.Delete(0, wtag.Len())\n\twtag.InsertString(name+\"\\tPut Del [Edit , ]\", 0)\n\twtag.Refresh()\n\tif w == nil {\n\t\treturn\n\t}\n\ts := readfile(name)\n\tfmt.Printf(\"files size is %d\\n\", len(s))\n\tw.Insert(s, 0)\n\tif addr != \"\" {\n\t\tw.Send(mustCompile(\"#0\"))\n\t\tw.Send(mustCompile(addr))\n\t}\n}\n\ntype GetEvent struct {\n\tPath string\n\tAddr string\n\tIsDir bool\n}\n\nfunc (t *Tag) FileName() string {\n\tif t == nil || t.Win == nil {\n\t\treturn \"\"\n\t}\n\tname, err := bufio.NewReader(bytes.NewReader(t.Win.Bytes())).ReadString('\\t')\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn strings.TrimSpace(name)\n}\n\nfunc (t *Tag) Put() (err error) {\n\tname := t.FileName()\n\tif name == \"\" {\n\t\treturn fmt.Errorf(\"no file\")\n\t}\n\twritefile(name, t.Body.Bytes())\n\treturn nil\n}\nfunc pt(e mouse.Event) image.Point{\n\treturn image.Pt(int(e.X), int(e.Y))\n}\nfunc (t *Tag) Mouse(act text.Editor, e interface{}) {\n\twin := act.(*win.Win)\n\tif act := win; true {\n\t\torg := act.Origin()\n\t\tswitch e := e.(type) {\n\t\tcase mus.SnarfEvent:\n\t\t\tsnarf(act)\n\t\tcase mus.InsertEvent:\n\t\t\tpaste(act)\n\t\tcase mus.MarkEvent:\n\t\t\tif e.Button != 1 {\n\t\t\t\tt.r0, t.r1 = act.Dot()\n\t\t\t}\n\t\t\tq0 := org + act.IndexOf(p(e.Event))\n\t\t\tq1 := q0\n\t\t\tact.Sq = q0\n\t\t\tif e.Button == 1 && e.Double {\n\t\t\t\tq0, q1 = find.FreeExpand(act, q0)\n\t\t\t\tt.escR = image.Rect(-3,-3,3,3).Add(pt(e.Event))\n\t\t\t\tprintln(q0,q1)\n\t\t\t\tprintln(\"double click\")\n\t\t\t}\n\t\t\tact.Select(q0, q1)\n\t\tcase mus.SweepEvent:\n\t\t\tif t.escR != image.ZR {\n\t\t\t\tif pt(e.Event).In(t.escR){\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tt.escR = image.ZR\n\t\t\t\tact.Select(act.Sq, act.Sq)\n\t\t\t}\n\t\t\tq0, q1 := act.Dot()\n\t\t\t\/\/r0 := org+act.IndexOf(p(e.Event))\n\t\t\tsweeper := text.Sweeper(act)\n\t\t\tif act == t.Win {\n\t\t\t\tsweeper = mus.NewNopScroller(act)\n\t\t\t}\n\t\t\tact.Sq, q0, q1 = mus.Sweep(sweeper, e, 15, act.Sq, q0, q1, act)\n\t\t\tif e.Button == 1 {\n\t\t\t\tact.Select(q0, q1)\n\t\t\t} else {\n\t\t\t\tact.Select(q0, q1)\n\t\t\t}\n\t\tcase mus.SelectEvent:\n\t\t\tq0, q1 := act.Dot()\n\t\t\tprintln(q0,q1)\n\t\t\tif e.Button == 1 {\n\t\t\t\tact.Select(q0, q1)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif e.Button == 2 || e.Button == 3 {\n\t\t\t\tq0, q1 := act.Dot()\n\t\t\t\tif q0 == q1 && text.Region3(q0, t.r0-1, t.r1) == 0 {\n\t\t\t\t\t\/\/ just use the existing selection and look\n\t\t\t\t\tq0, q1 = t.r0, t.r1\n\t\t\t\t\tact.Select(q0, q1)\n\t\t\t\t}\n\t\t\t\tif q0 == q1 {\n\t\t\t\t\tq0, q1 = find.ExpandFile(act.Bytes(), q0)\n\t\t\t\t}\n\n\t\t\t\tfrom := text.Editor(act)\n\t\t\t\tif from == t.Win {\n\t\t\t\t\tfrom = t\n\t\t\t\t}\n\t\t\t\tif e.Button == 3 {\n\t\t\t\t\tact.Select(q0, q1)\n\t\t\t\t\tact.SendFirst(event.Look{\n\t\t\t\t\t\tRec: event.Rec{\n\t\t\t\t\t\t\tQ0: q0,\n\t\t\t\t\t\t\tQ1: q1,\n\t\t\t\t\t\t\tP: act.Bytes()[q0:q1],\n\t\t\t\t\t\t},\n\t\t\t\t\t\tFrom: from,\n\t\t\t\t\t\tTo: []event.Editor{t.Body},\n\t\t\t\t\t\tFromFile: t.FileName(),\n\t\t\t\t\t})\n\t\t\t\t} else {\n\t\t\t\t\tact.SendFirst(event.Cmd{\n\t\t\t\t\t\tRec: event.Rec{\n\t\t\t\t\t\t\tQ0: q0, Q1: q1,\n\t\t\t\t\t\t\tP: act.Bytes()[q0:q1],\n\t\t\t\t\t\t},\n\t\t\t\t\t\tFrom: from,\n\t\t\t\t\t\tTo: []event.Editor{t.Body},\n\t\t\t\t\t\tFromFile: t.FileName(),\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Put\nfunc (t *Tag) Handle(act text.Editor, e interface{}) {\n\tswitch e := e.(type) {\n\tcase mus.MarkEvent, mus.SweepEvent, mus.SelectEvent, mus.SnarfEvent, mus.InsertEvent:\n\t\tt.Mouse(act, e)\n\tcase string:\n\t\tif e == \"Redo\" {\n\t\t\t\/\/\t\t\tact.Redo()\n\t\t} else if e == \"Undo\" {\n\t\t\/*\n\t\t\tev, err := t.Log.ReadAt(t.Log.Len()-1-t.offset)\n\t\t\tt.offset++\n\t\t\tif err != nil{\n\t\t\t\tt.SendFirst(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tev2 := event.Invert(ev)\n\t\t\tswitch ev2 := ev2.(type){\n\t\t\tcase *event.Insert:\n\t\t\tt.Send(fmt.Errorf(\"INsert %#v\\n\", ev))\n\t\t\t\tact.Insert(ev2.P, ev2.Q0)\n\t\t\tcase *event.Delete:\n\t\t\t\tq0,q1 := ev2.Q0, ev2.Q1\n\t\t\t\tif q0 > q1{\n\t\t\t\t\tq0,q1=q1,q0\n\t\t\t\t}\n\t\t\t\tif q0 != q1{\n\t\t\t\t\tq1--\n\t\t\t\t}\n\t\t\tt.Send(fmt.Errorf(\"Delete %#v\\n\", ev))\n\t\t\t\tact.Delete(q0,q1)\n\t\t\t}\n\t\t\tt.Send(fmt.Errorf(\"%#v\\n\", ev))\n\t\t*\/\n\t\t\t\/\/\t\t\tact.Undo()\n\t\t} else if e == \"Put\" {\n\t\t\tt.Put()\n\t\t} else if e == \"Get\" {\n\t\t\tt.Get(t.FileName())\n\t\t}\n\t\tt.Mark()\n\tcase *edit.Command:\n\t\tfmt.Printf(\"command %#v\\n\", e)\n\t\tif e == nil {\n\t\t\tbreak\n\t\t}\n\t\tfn := e.Func()\n\t\tif fn != nil {\n\t\t\tfn(t.Body) \/\/ Always execute on body for now\n\t\t}\n\t\tt.Mark()\n\tcase key.Event:\n\t\tif e.Direction == 2 {\n\t\t\tbreak\n\t\t}\n\t\tntab := int64(-1)\n\t\tif e.Rune == '\\n' || e.Rune == '\\r' && act == t.Body {\n\t\t\tq0, q1 := act.Dot()\n\t\t\tif q0 == q1 {\n\t\t\t\tp := act.Bytes()\n\t\t\t\tl0, _ := find.Findlinerev(p, q0, 0)\n\t\t\t\tntab = find.Accept(p, l0, []byte{'\\t'})\n\t\t\t\tntab -= l0 + 1\n\t\t\t}\n\t\t}\n\t\tkbd.SendClient(act, e)\n\t\te.Rune = '\\t'\n\t\tfor ntab >= 0 {\n\t\t\tkbd.SendClient(act, e)\n\t\t\tntab--\n\t\t}\n\t}\n\tt.dirty = true\n}\n\nfunc (t *Tag) Upload(wind screen.Window) {\n\tif t.Body != nil && t.Body.Dirty() {\n\t\tt.Body.Upload()\n\t}\n\tif t.Win.Dirty() {\n\t\tt.Win.Upload()\n\t}\n}\n\nfunc (t *Tag) Refresh() {\n\tif t.Body != nil {\n\t\tt.Body.Refresh()\n\t}\n\tif t.Win.Dirty() {\n\t\tt.Win.Refresh()\n\t}\n}\n\nfunc isdir(path string) bool {\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\tif err == os.ErrNotExist {\n\t\t\treturn false\n\t\t}\n\t\tfmt.Println(err)\n\t\treturn false\n\t}\n\treturn fi.IsDir()\n}\nfunc isfile(path string) bool {\n\t_, err := os.Stat(path)\n\treturn err == nil\n}\n<commit_msg>fixes as\/a#8<commit_after>package tag\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"image\"\n\t\"image\/color\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\/\/\t\"time\"\n\n\t\"github.com\/as\/edit\"\n\t\"github.com\/as\/event\"\n\t\"github.com\/as\/frame\"\n\t\"github.com\/as\/frame\/font\"\n\t\"github.com\/as\/frame\/win\"\n\t\"github.com\/as\/text\"\n\t\"github.com\/as\/text\/action\"\n\t\"github.com\/as\/text\/find\"\n\t\"github.com\/as\/text\/kbd\"\n\tmus \"github.com\/as\/text\/mouse\"\n\t\/\/\"github.com\/as\/worm\"\n\t\"golang.org\/x\/exp\/shiny\/screen\"\n\t\"golang.org\/x\/mobile\/event\/key\"\n\t\"golang.org\/x\/mobile\/event\/mouse\"\n)\n\nvar db = win.Db\nvar un = win.Un\nvar trace = win.Trace\n\nfunc p(e mouse.Event) image.Point {\n\treturn image.Pt(int(e.X), int(e.Y))\n\n}\n\ntype doter interface {\n\tDot() (int64, int64)\n}\n\nfunc whatsdot(d doter) string {\n\tq0, q1 := d.Dot()\n\treturn fmt.Sprintf(\"Dot: [%d:%d]\", q0, q1)\n}\n\n\/\/ Put\nvar (\n\tButtonsdown = 0\n\tnoselect bool\n\tlastclickpt image.Point\n)\n\ntype Tag struct {\n\tsp image.Point\n\t*win.Win\n\tBody *win.Win\n\tScrolling bool\n\tscrolldy int\n\tdirty bool\n\tr0, r1 int64\n\tred, green frame.Color\n\tescR image.Rectangle\n\/\/\tLog worm.Logger\t\/\/ TODO \n\toffset int64\n}\n\nfunc (t *Tag) Dirty() bool {\n\treturn t.dirty || t.Win.Dirty() || (t.Body != nil && t.Body.Dirty())\n}\n\nfunc (t *Tag) Mark() {\n\tt.dirty = true\n}\n\nfunc (t *Tag) Loc() image.Rectangle {\n\tr := t.Win.Loc()\n\tif t.Body != nil {\n\t\tr.Max.Y += t.Body.Loc().Dy()\n\t}\n\treturn r\n}\n\n\/\/ TagSize returns the size of a tag given the font\nfunc TagSize(ft *font.Font) int {\n\treturn ft.Dy() + ft.Dy()\/2\n}\n\n\/\/ TagPad returns the padding for the tag given the window's padding\n\/\/ always returns an x-aligned point\nfunc TagPad(wpad image.Point) image.Point {\n\treturn image.Pt(wpad.X, 3)\n}\n\n\/\/ Put\nfunc NewTag(src screen.Screen, wind screen.Window, ft *font.Font, sp, size, pad image.Point, cols frame.Color) *Tag {\n\n\t\/\/ Make the main tag\n\ttagY := TagSize(ft)\n\n\t\/\/ Make tag\n\twtag := win.New(src, ft, wind,\n\t\tsp,\n\t\timage.Pt(size.X, tagY),\n\t\tTagPad(pad), cols,\n\t)\n\n\tsp = sp.Add(image.Pt(0, tagY))\n\tsize = size.Sub(image.Pt(0, tagY))\n\tif size.Y < tagY {\n\t\treturn &Tag{sp: sp, Win: wtag, Body: nil}\n\t}\n\t\/\/ Make window\n\tcols.Back = Yellow\n\tw := win.New(src, ft, wind,\n\t\tsp,\n\t\tsize,\n\t\tpad, frame.A,\n\t)\n\/\/\tlg := worm.NewCoalescer(worm.NewLogger(), time.Second*3)\n\/\/\tw.Editor = text.NewHistory(w.Editor, lg)\n\tacol := frame.A\n\tGreen := image.NewUniform(color.RGBA{0x99, 0xDD, 0x99, 192})\n\tacol.Hi.Back = Green\n\tgreen := acol\n\n\tRed := image.NewUniform(color.RGBA{0xDD, 0x99, 0x99, 192})\n\tacol.Hi.Back = Red\n\tred := acol\n\n\treturn &Tag{sp: sp, Win: wtag, Body: w,\n\t\t \/\/ Log: lg, \n\t\tred: red, green: green}\n}\n\nfunc (t *Tag) Move(pt image.Point) {\n\tt.Win.Move(pt)\n\tif t.Body == nil {\n\t\treturn\n\t}\n\tpt.Y += t.Win.Loc().Dy()\n\tt.Body.Move(pt)\n}\n\nfunc (t *Tag) Resize(pt image.Point) {\n\tdy := TagSize(t.Win.Font)\n\tif pt.X < dy || pt.Y < dy {\n\t\tprintln(\"ignore daft size request:\", pt.String())\n\t\treturn\n\t}\n\tt.Win.Resize(image.Pt(pt.X, dy))\n\tpt.Y -= dy\n\tif t.Body != nil {\n\t\tt.Body.Resize(pt)\n\t}\n}\n\nfunc mustCompile(prog string) *edit.Command {\n\tp, err := edit.Compile(prog)\n\tif err != nil {\n\t\tlog.Printf(\"tag.go:\/mustCompile\/: failed to compile %q\\n\", prog)\n\t\treturn nil\n\t}\n\treturn p\n}\n\nfunc (t *Tag) Get(name string) {\n\tw := t.Body\n\tif w == nil{\n\t\tw.SendFirst(fmt.Errorf(\"tag: no body to get %q\\n\", name))\n\t\treturn\n\t}\n\tname, addr := action.SplitPath(name)\n\twtag := t.Win\n\tp := wtag.Bytes()\n\tmaint := find.Find(p, 0, []byte{'|'})\n\tif maint == -1 {\n\t\tmaint = int64(len(p))\n\t}\n\twtag.Delete(0, maint+1)\n\twtag.InsertString(name+\"\\tPut Del |\", 0)\n\twtag.Refresh()\n\ts := readfile(name)\n\tfmt.Printf(\"files size is %d\\n\", len(s))\n\tw.Delete(0, w.Len())\n\tw.Insert(s, 0)\n\tw.Select(0,0)\n\tif addr != \"\" {\n\t\tw.SendFirst(mustCompile(\"#0\"))\n\t\tw.SendFirst(mustCompile(addr))\n\t}\n}\n\ntype GetEvent struct {\n\tPath string\n\tAddr string\n\tIsDir bool\n}\n\nfunc (t *Tag) FileName() string {\n\tif t == nil || t.Win == nil {\n\t\treturn \"\"\n\t}\n\tname, err := bufio.NewReader(bytes.NewReader(t.Win.Bytes())).ReadString('\\t')\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn strings.TrimSpace(name)\n}\n\nfunc (t *Tag) Put() (err error) {\n\tname := t.FileName()\n\tif name == \"\" {\n\t\treturn fmt.Errorf(\"no file\")\n\t}\n\twritefile(name, t.Body.Bytes())\n\treturn nil\n}\nfunc pt(e mouse.Event) image.Point{\n\treturn image.Pt(int(e.X), int(e.Y))\n}\nfunc (t *Tag) Mouse(act text.Editor, e interface{}) {\n\twin := act.(*win.Win)\n\tif act := win; true {\n\t\torg := act.Origin()\n\t\tswitch e := e.(type) {\n\t\tcase mus.SnarfEvent:\n\t\t\tsnarf(act)\n\t\tcase mus.InsertEvent:\n\t\t\tpaste(act)\n\t\tcase mus.MarkEvent:\n\t\t\tif e.Button != 1 {\n\t\t\t\tt.r0, t.r1 = act.Dot()\n\t\t\t}\n\t\t\tq0 := org + act.IndexOf(p(e.Event))\n\t\t\tq1 := q0\n\t\t\tact.Sq = q0\n\t\t\tif e.Button == 1 && e.Double {\n\t\t\t\tq0, q1 = find.FreeExpand(act, q0)\n\t\t\t\tt.escR = image.Rect(-3,-3,3,3).Add(pt(e.Event))\n\t\t\t\tprintln(q0,q1)\n\t\t\t\tprintln(\"double click\")\n\t\t\t}\n\t\t\tact.Select(q0, q1)\n\t\tcase mus.SweepEvent:\n\t\t\tif t.escR != image.ZR {\n\t\t\t\tif pt(e.Event).In(t.escR){\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tt.escR = image.ZR\n\t\t\t\tact.Select(act.Sq, act.Sq)\n\t\t\t}\n\t\t\tq0, q1 := act.Dot()\n\t\t\t\/\/r0 := org+act.IndexOf(p(e.Event))\n\t\t\tsweeper := text.Sweeper(act)\n\t\t\tif act == t.Win {\n\t\t\t\tsweeper = mus.NewNopScroller(act)\n\t\t\t}\n\t\t\tact.Sq, q0, q1 = mus.Sweep(sweeper, e, 15, act.Sq, q0, q1, act)\n\t\t\tif e.Button == 1 {\n\t\t\t\tact.Select(q0, q1)\n\t\t\t} else {\n\t\t\t\tact.Select(q0, q1)\n\t\t\t}\n\t\tcase mus.SelectEvent:\n\t\t\tq0, q1 := act.Dot()\n\t\t\tprintln(q0,q1)\n\t\t\tif e.Button == 1 {\n\t\t\t\tact.Select(q0, q1)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif e.Button == 2 || e.Button == 3 {\n\t\t\t\tq0, q1 := act.Dot()\n\t\t\t\tif q0 == q1 && text.Region3(q0, t.r0-1, t.r1) == 0 {\n\t\t\t\t\t\/\/ just use the existing selection and look\n\t\t\t\t\tq0, q1 = t.r0, t.r1\n\t\t\t\t\tact.Select(q0, q1)\n\t\t\t\t}\n\t\t\t\tif q0 == q1 {\n\t\t\t\t\tq0, q1 = find.ExpandFile(act.Bytes(), q0)\n\t\t\t\t}\n\n\t\t\t\tfrom := text.Editor(act)\n\t\t\t\tif from == t.Win {\n\t\t\t\t\tfrom = t\n\t\t\t\t}\n\t\t\t\tif e.Button == 3 {\n\t\t\t\t\tact.Select(q0, q1)\n\t\t\t\t\tact.SendFirst(event.Look{\n\t\t\t\t\t\tRec: event.Rec{\n\t\t\t\t\t\t\tQ0: q0,\n\t\t\t\t\t\t\tQ1: q1,\n\t\t\t\t\t\t\tP: act.Bytes()[q0:q1],\n\t\t\t\t\t\t},\n\t\t\t\t\t\tFrom: from,\n\t\t\t\t\t\tTo: []event.Editor{t.Body},\n\t\t\t\t\t\tFromFile: t.FileName(),\n\t\t\t\t\t})\n\t\t\t\t} else {\n\t\t\t\t\tact.SendFirst(event.Cmd{\n\t\t\t\t\t\tRec: event.Rec{\n\t\t\t\t\t\t\tQ0: q0, Q1: q1,\n\t\t\t\t\t\t\tP: act.Bytes()[q0:q1],\n\t\t\t\t\t\t},\n\t\t\t\t\t\tFrom: from,\n\t\t\t\t\t\tTo: []event.Editor{t.Body},\n\t\t\t\t\t\tFromFile: t.FileName(),\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Put\nfunc (t *Tag) Handle(act text.Editor, e interface{}) {\n\tswitch e := e.(type) {\n\tcase mus.MarkEvent, mus.SweepEvent, mus.SelectEvent, mus.SnarfEvent, mus.InsertEvent:\n\t\tt.Mouse(act, e)\n\tcase string:\n\t\tif e == \"Redo\" {\n\t\t\t\/\/\t\t\tact.Redo()\n\t\t} else if e == \"Undo\" {\n\t\t\/*\n\t\t\tev, err := t.Log.ReadAt(t.Log.Len()-1-t.offset)\n\t\t\tt.offset++\n\t\t\tif err != nil{\n\t\t\t\tt.SendFirst(err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tev2 := event.Invert(ev)\n\t\t\tswitch ev2 := ev2.(type){\n\t\t\tcase *event.Insert:\n\t\t\tt.Send(fmt.Errorf(\"INsert %#v\\n\", ev))\n\t\t\t\tact.Insert(ev2.P, ev2.Q0)\n\t\t\tcase *event.Delete:\n\t\t\t\tq0,q1 := ev2.Q0, ev2.Q1\n\t\t\t\tif q0 > q1{\n\t\t\t\t\tq0,q1=q1,q0\n\t\t\t\t}\n\t\t\t\tif q0 != q1{\n\t\t\t\t\tq1--\n\t\t\t\t}\n\t\t\tt.Send(fmt.Errorf(\"Delete %#v\\n\", ev))\n\t\t\t\tact.Delete(q0,q1)\n\t\t\t}\n\t\t\tt.Send(fmt.Errorf(\"%#v\\n\", ev))\n\t\t*\/\n\t\t\t\/\/\t\t\tact.Undo()\n\t\t} else if e == \"Put\" {\n\t\t\tt.Put()\n\t\t} else if e == \"Get\" {\n\t\t\tt.Get(t.FileName())\n\t\t}\n\t\tt.Mark()\n\tcase *edit.Command:\n\t\tfmt.Printf(\"command %#v\\n\", e)\n\t\tif e == nil {\n\t\t\tbreak\n\t\t}\n\t\tfn := e.Func()\n\t\tif fn != nil {\n\t\t\tfn(t.Body) \/\/ Always execute on body for now\n\t\t}\n\t\tt.Mark()\n\tcase key.Event:\n\t\tif e.Direction == 2 {\n\t\t\tbreak\n\t\t}\n\t\tntab := int64(-1)\n\t\tif (e.Rune == '\\n' || e.Rune == '\\r') && act == t.Body {\n\t\t\tq0, q1 := act.Dot()\n\t\t\tif q0 == q1 {\n\t\t\t\tp := act.Bytes()\n\t\t\t\tl0, _ := find.Findlinerev(p, q0, 0)\n\t\t\t\tntab = find.Accept(p, l0, []byte{'\\t'})\n\t\t\t\tntab -= l0 + 1\n\t\t\t}\n\t\t}\n\t\tkbd.SendClient(act, e)\n\t\tfor ntab >= 0 {\n\t\t\te.Rune = '\\t'\n\t\t\tkbd.SendClient(act, e)\n\t\t\tntab--\n\t\t}\n\t}\n\tt.dirty = true\n}\n\nfunc (t *Tag) Upload(wind screen.Window) {\n\tif t.Body != nil && t.Body.Dirty() {\n\t\tt.Body.Upload()\n\t}\n\tif t.Win.Dirty() {\n\t\tt.Win.Upload()\n\t}\n}\n\nfunc (t *Tag) Refresh() {\n\tif t.Body != nil {\n\t\tt.Body.Refresh()\n\t}\n\tif t.Win.Dirty() {\n\t\tt.Win.Refresh()\n\t}\n}\n\nfunc isdir(path string) bool {\n\tfi, err := os.Stat(path)\n\tif err != nil {\n\t\tif err == os.ErrNotExist {\n\t\t\treturn false\n\t\t}\n\t\tfmt.Println(err)\n\t\treturn false\n\t}\n\treturn fi.IsDir()\n}\nfunc isfile(path string) bool {\n\t_, err := os.Stat(path)\n\treturn err == nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n)\n\nconst (\n\tVERSION = \"0.0.1\"\n\tdefaultPlaceholder = \"{{}}\"\n\tkeyBackspace = 8\n\tkeyDelete = 127\n\tkeyEndOfTransmission = 4\n\tkeyLineFeed = 10\n\tkeyCarriageReturn = 13\n\tkeyEndOfTransmissionBlock = 23\n\tkeyEscape\t\t = 27\n)\n\nvar placeholder string\n\nvar usage = `fzz allows you to run a command interactively.\n\nUsage:\n\n\tfzz command\n\nThe command MUST include the placeholder '{{}}'.\n\nArguments:\n\n\t-v\t\tPrint version and exit\n`\n\nfunc printUsage() {\n\tfmt.Printf(usage)\n}\n\nfunc isPipe(f *os.File) bool {\n\ts, err := f.Stat()\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn s.Mode()&os.ModeNamedPipe != 0\n}\n\nfunc containsPlaceholder(s []string, ph string) bool {\n\tfor _, v := range s {\n\t\tif strings.Contains(v, ph) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc validPlaceholder(p string) bool {\n\treturn len(p)%2 == 0\n}\n\nfunc removeLastWord(s []byte) []byte {\n\tfields := bytes.Fields(s)\n\tif len(fields) > 0 {\n\t\tr := bytes.Join(fields[:len(fields)-1], []byte{' '})\n\t\tif len(r) > 1 {\n\t\t\tr = append(r, ' ')\n\t\t}\n\t\treturn r\n\t}\n\treturn []byte{}\n}\n\nfunc main() {\n\tflVersion := flag.Bool(\"v\", false, \"Print fzz version and quit\")\n\tflag.Usage = printUsage\n\tflag.Parse()\n\n\tif *flVersion {\n\t\tfmt.Printf(\"fzz %s\\n\", VERSION)\n\t\tos.Exit(0)\n\t}\n\n\tif len(flag.Args()) < 2 {\n\t\tfmt.Fprintf(os.Stderr, usage)\n\t\tos.Exit(1)\n\t}\n\n\tif placeholder = os.Getenv(\"FZZ_PLACEHOLDER\"); placeholder == \"\" {\n\t\tplaceholder = defaultPlaceholder\n\t}\n\n\tif !validPlaceholder(placeholder) {\n\t\tfmt.Fprintln(os.Stderr, \"Placeholder is not valid, needs even number of characters\")\n\t\tos.Exit(1)\n\t}\n\n\tif !containsPlaceholder(flag.Args(), placeholder) {\n\t\tfmt.Fprintln(os.Stderr, \"No placeholder in arguments\")\n\t\tos.Exit(1)\n\t}\n\n\ttty, err := NewTTY()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer tty.resetState()\n\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\tgo func() {\n\t\t<-c\n\t\ttty.resetState()\n\t\tos.Exit(1)\n\t}()\n\ttty.setSttyState(\"cbreak\", \"-echo\")\n\n\tprinter := NewPrinter(tty, tty.cols, tty.rows-1) \/\/ prompt is one row\n\trunner := &Runner{\n\t\tprinter: printer,\n\t\ttemplate: flag.Args(),\n\t\tplaceholder: placeholder,\n\t}\n\n\tif isPipe(os.Stdin) {\n\t\trunner.stdinbuf = new(bytes.Buffer)\n\t\tio.Copy(runner.stdinbuf, os.Stdin)\n\t}\n\n\tinput := make([]byte, 0)\n\tb := make([]byte, 1)\n\n\tfor {\n\t\ttty.resetScreen()\n\t\ttty.printPrompt(input[:len(input)])\n\n\t\tif len(input) > 0 {\n\t\t\trunner.killCurrent()\n\n\t\t\tcmdInput := make([]byte, len(input))\n\t\t\tcopy(cmdInput, input)\n\n\t\t\tgo func() {\n\t\t\t\trunner.runWithInput(cmdInput)\n\t\t\t\ttty.cursorAfterPrompt(utf8.RuneCount(input))\n\t\t\t}()\n\t\t}\n\n\t\ttty.Read(b)\n\t\tswitch b[0] {\n\t\tcase keyBackspace, keyDelete:\n\t\t\tif len(input) > 1 {\n\t\t\t\tr, rsize := utf8.DecodeLastRune(input)\n\t\t\t\tif r == utf8.RuneError {\n\t\t\t\t\tinput = input[:len(input)-1]\n\t\t\t\t} else {\n\t\t\t\t\tinput = input[:len(input)-rsize]\n\t\t\t\t}\n\t\t\t} else if len(input) == 1 {\n\t\t\t\tinput = nil\n\t\t\t}\n\t\tcase keyEndOfTransmission, keyLineFeed, keyCarriageReturn:\n\t\t\ttty.resetScreen()\n\t\t\trunner.writeCmdStdout(os.Stdout)\n\t\t\treturn\n\t\tcase keyEscape:\n\t\t\ttty.resetScreen()\n\t\t\treturn\n\t\tcase keyEndOfTransmissionBlock:\n\t\t\tinput = removeLastWord(input)\n\t\tdefault:\n\t\t\t\/\/ TODO: Default is wrong here. Only append printable characters to\n\t\t\t\/\/ input\n\t\t\tinput = append(input, b...)\n\t\t}\n\t}\n}\n<commit_msg>Avoid reading incomplete utf8 sequences<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n)\n\nconst (\n\tVERSION = \"0.0.1\"\n\tdefaultPlaceholder = \"{{}}\"\n\tkeyBackspace = 8\n\tkeyDelete = 127\n\tkeyEndOfTransmission = 4\n\tkeyLineFeed = 10\n\tkeyCarriageReturn = 13\n\tkeyEndOfTransmissionBlock = 23\n\tkeyEscape\t\t = 27\n)\n\nvar placeholder string\n\nvar usage = `fzz allows you to run a command interactively.\n\nUsage:\n\n\tfzz command\n\nThe command MUST include the placeholder '{{}}'.\n\nArguments:\n\n\t-v\t\tPrint version and exit\n`\n\nfunc printUsage() {\n\tfmt.Printf(usage)\n}\n\nfunc isPipe(f *os.File) bool {\n\ts, err := f.Stat()\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn s.Mode()&os.ModeNamedPipe != 0\n}\n\nfunc containsPlaceholder(s []string, ph string) bool {\n\tfor _, v := range s {\n\t\tif strings.Contains(v, ph) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc validPlaceholder(p string) bool {\n\treturn len(p)%2 == 0\n}\n\nfunc removeLastWord(s []byte) []byte {\n\tfields := bytes.Fields(s)\n\tif len(fields) > 0 {\n\t\tr := bytes.Join(fields[:len(fields)-1], []byte{' '})\n\t\tif len(r) > 1 {\n\t\t\tr = append(r, ' ')\n\t\t}\n\t\treturn r\n\t}\n\treturn []byte{}\n}\n\nfunc main() {\n\tflVersion := flag.Bool(\"v\", false, \"Print fzz version and quit\")\n\tflag.Usage = printUsage\n\tflag.Parse()\n\n\tif *flVersion {\n\t\tfmt.Printf(\"fzz %s\\n\", VERSION)\n\t\tos.Exit(0)\n\t}\n\n\tif len(flag.Args()) < 2 {\n\t\tfmt.Fprintf(os.Stderr, usage)\n\t\tos.Exit(1)\n\t}\n\n\tif placeholder = os.Getenv(\"FZZ_PLACEHOLDER\"); placeholder == \"\" {\n\t\tplaceholder = defaultPlaceholder\n\t}\n\n\tif !validPlaceholder(placeholder) {\n\t\tfmt.Fprintln(os.Stderr, \"Placeholder is not valid, needs even number of characters\")\n\t\tos.Exit(1)\n\t}\n\n\tif !containsPlaceholder(flag.Args(), placeholder) {\n\t\tfmt.Fprintln(os.Stderr, \"No placeholder in arguments\")\n\t\tos.Exit(1)\n\t}\n\n\ttty, err := NewTTY()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer tty.resetState()\n\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\tgo func() {\n\t\t<-c\n\t\ttty.resetState()\n\t\tos.Exit(1)\n\t}()\n\ttty.setSttyState(\"cbreak\", \"-echo\")\n\n\tprinter := NewPrinter(tty, tty.cols, tty.rows-1) \/\/ prompt is one row\n\trunner := &Runner{\n\t\tprinter: printer,\n\t\ttemplate: flag.Args(),\n\t\tplaceholder: placeholder,\n\t}\n\n\tif isPipe(os.Stdin) {\n\t\trunner.stdinbuf = new(bytes.Buffer)\n\t\tio.Copy(runner.stdinbuf, os.Stdin)\n\t}\n\n\tinput := make([]byte, 0)\n\trs := bufio.NewScanner(tty)\n\trs.Split(bufio.ScanRunes)\n\n\tfor {\n\t\ttty.resetScreen()\n\t\ttty.printPrompt(input[:len(input)])\n\n\t\tif len(input) > 0 {\n\t\t\trunner.killCurrent()\n\n\t\t\tcmdInput := make([]byte, len(input))\n\t\t\tcopy(cmdInput, input)\n\n\t\t\tgo func() {\n\t\t\t\trunner.runWithInput(cmdInput)\n\t\t\t\ttty.cursorAfterPrompt(utf8.RuneCount(input))\n\t\t\t}()\n\t\t}\n\n\t\tif !rs.Scan() {\n\t\t\ttty.resetScreen()\n\t\t\tlog.Fatal(rs.Err())\n\t\t}\n\t\tb := rs.Bytes()\n\n\t\tswitch b[0] {\n\t\tcase keyBackspace, keyDelete:\n\t\t\tif len(input) > 1 {\n\t\t\t\tr, rsize := utf8.DecodeLastRune(input)\n\t\t\t\tif r == utf8.RuneError {\n\t\t\t\t\tinput = input[:len(input)-1]\n\t\t\t\t} else {\n\t\t\t\t\tinput = input[:len(input)-rsize]\n\t\t\t\t}\n\t\t\t} else if len(input) == 1 {\n\t\t\t\tinput = nil\n\t\t\t}\n\t\tcase keyEndOfTransmission, keyLineFeed, keyCarriageReturn:\n\t\t\ttty.resetScreen()\n\t\t\trunner.writeCmdStdout(os.Stdout)\n\t\t\treturn\n\t\tcase keyEscape:\n\t\t\ttty.resetScreen()\n\t\t\treturn\n\t\tcase keyEndOfTransmissionBlock:\n\t\t\tinput = removeLastWord(input)\n\t\tdefault:\n\t\t\t\/\/ TODO: Default is wrong here. Only append printable characters to\n\t\t\t\/\/ input\n\t\t\tinput = append(input, b...)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package g\n\nimport (\n\t\"log\"\n\t\"runtime\"\n)\n\n\/\/ changelog:\n\/\/ 0.0.1: init project\n\/\/ 0.0.4: bugfix: set replicas before add node\n\/\/ 0.0.8: change receiver, mv proc cron to proc pkg, add readme, add gitversion, add config reload, add trace tools\n\/\/ 0.0.9: fix bugs of conn pool(use transfer's private conn pool, named & minimum)\n\/\/ 0.0.10: use more efficient proc & sema, rm conn_pool status log\n\/\/ 0.0.11: fix bug: all graphs' traffic delined when one graph broken down, modify retry interval\n\/\/ 0.0.14: support sending multi copies to graph node, align ts for judge, add filter\n\/\/ 0.1.4: 添加influxdb存储支持;用于流量采集系统,修改程序名称以区分;修改程序启功方式支持supervisor。\n\/\/ 0.1.5: 修改项目名称\n\/\/ 0.1.7:删除 judge 和 graph 部分\n\/\/ 0.1.9: 添加 mallard pfc统计\n\/\/ 0.2.0: 添加优雅重启支持\n\nconst (\n\tVERSION = \"0.2.0\"\n\tGAUGE = \"GAUGE\"\n\tCOUNTER = \"COUNTER\"\n\tDERIVE = \"DERIVE\"\n\tDEFAULT_STEP = 60\n\tMIN_STEP = 30\n)\n\nfunc init() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tlog.SetFlags(log.Ldate | log.Ltime | log.Lshortfile)\n}\n<commit_msg>更新版本号<commit_after>package g\n\nimport (\n\t\"log\"\n\t\"runtime\"\n)\n\n\/\/ changelog:\n\/\/ 0.0.1: init project\n\/\/ 0.0.4: bugfix: set replicas before add node\n\/\/ 0.0.8: change receiver, mv proc cron to proc pkg, add readme, add gitversion, add config reload, add trace tools\n\/\/ 0.0.9: fix bugs of conn pool(use transfer's private conn pool, named & minimum)\n\/\/ 0.0.10: use more efficient proc & sema, rm conn_pool status log\n\/\/ 0.0.11: fix bug: all graphs' traffic delined when one graph broken down, modify retry interval\n\/\/ 0.0.14: support sending multi copies to graph node, align ts for judge, add filter\n\/\/ 0.1.4: 添加influxdb存储支持;用于流量采集系统,修改程序名称以区分;修改程序启功方式支持supervisor。\n\/\/ 0.1.5: 修改项目名称\n\/\/ 0.1.7:删除 judge 和 graph 部分\n\/\/ 0.1.9: 添加 mallard pfc统计\n\/\/ 0.2.0: 添加优雅重启支持\n\/\/ 1.0.0: 修改打包方式\n\nconst (\n\tVERSION = \"1.0.0\"\n\tGAUGE = \"GAUGE\"\n\tCOUNTER = \"COUNTER\"\n\tDERIVE = \"DERIVE\"\n\tDEFAULT_STEP = 60\n\tMIN_STEP = 30\n)\n\nfunc init() {\n\truntime.GOMAXPROCS(runtime.NumCPU())\n\tlog.SetFlags(log.Ldate | log.Ltime | log.Lshortfile)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright ©2016 The Gonum Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage gonum\n\nimport (\n\t\"math\"\n\n\t\"gonum.org\/v1\/gonum\/blas\/blas64\"\n)\n\n\/\/ Dlasy2 solves the Sylvester matrix equation where the matrices are of order 1\n\/\/ or 2. It computes the unknown n1×n2 matrix X so that\n\/\/ TL*X + sgn*X*TR = scale*B if tranl == false and tranr == false,\n\/\/ TLᵀ*X + sgn*X*TR = scale*B if tranl == true and tranr == false,\n\/\/ TL*X + sgn*X*TRᵀ = scale*B if tranl == false and tranr == true,\n\/\/ TLᵀ*X + sgn*X*TRᵀ = scale*B if tranl == true and tranr == true,\n\/\/ where TL is n1×n1, TR is n2×n2, B is n1×n2, and 1 <= n1,n2 <= 2.\n\/\/\n\/\/ isgn must be 1 or -1, and n1 and n2 must be 0, 1, or 2, but these conditions\n\/\/ are not checked.\n\/\/\n\/\/ Dlasy2 returns three values, a scale factor that is chosen less than or equal\n\/\/ to 1 to prevent the solution overflowing, the infinity norm of the solution,\n\/\/ and an indicator of success. If ok is false, TL and TR have eigenvalues that\n\/\/ are too close, so TL or TR is perturbed to get a non-singular equation.\n\/\/\n\/\/ Dlasy2 is an internal routine. It is exported for testing purposes.\nfunc (impl Implementation) Dlasy2(tranl, tranr bool, isgn, n1, n2 int, tl []float64, ldtl int, tr []float64, ldtr int, b []float64, ldb int, x []float64, ldx int) (scale, xnorm float64, ok bool) {\n\t\/\/ TODO(vladimir-ch): Add input validation checks conditionally skipped\n\t\/\/ using the build tag mechanism.\n\n\tok = true\n\t\/\/ Quick return if possible.\n\tif n1 == 0 || n2 == 0 {\n\t\treturn scale, xnorm, ok\n\t}\n\n\t\/\/ Set constants to control overflow.\n\teps := dlamchP\n\tsmlnum := dlamchS \/ eps\n\tsgn := float64(isgn)\n\n\tif n1 == 1 && n2 == 1 {\n\t\t\/\/ 1×1 case: TL11*X + sgn*X*TR11 = B11.\n\t\ttau1 := tl[0] + sgn*tr[0]\n\t\tbet := math.Abs(tau1)\n\t\tif bet <= smlnum {\n\t\t\ttau1 = smlnum\n\t\t\tbet = smlnum\n\t\t\tok = false\n\t\t}\n\t\tscale = 1\n\t\tgam := math.Abs(b[0])\n\t\tif smlnum*gam > bet {\n\t\t\tscale = 1 \/ gam\n\t\t}\n\t\tx[0] = b[0] * scale \/ tau1\n\t\txnorm = math.Abs(x[0])\n\t\treturn scale, xnorm, ok\n\t}\n\n\tif n1+n2 == 3 {\n\t\t\/\/ 1×2 or 2×1 case.\n\t\tvar (\n\t\t\tsmin float64\n\t\t\ttmp [4]float64 \/\/ tmp is used as a 2×2 row-major matrix.\n\t\t\tbtmp [2]float64\n\t\t)\n\t\tif n1 == 1 && n2 == 2 {\n\t\t\t\/\/ 1×2 case: TL11*[X11 X12] + sgn*[X11 X12]*op[TR11 TR12] = [B11 B12].\n\t\t\t\/\/ [TR21 TR22]\n\t\t\tsmin = math.Abs(tl[0])\n\t\t\tsmin = math.Max(smin, math.Max(math.Abs(tr[0]), math.Abs(tr[1])))\n\t\t\tsmin = math.Max(smin, math.Max(math.Abs(tr[ldtr]), math.Abs(tr[ldtr+1])))\n\t\t\tsmin = math.Max(eps*smin, smlnum)\n\t\t\ttmp[0] = tl[0] + sgn*tr[0]\n\t\t\ttmp[3] = tl[0] + sgn*tr[ldtr+1]\n\t\t\tif tranr {\n\t\t\t\ttmp[1] = sgn * tr[1]\n\t\t\t\ttmp[2] = sgn * tr[ldtr]\n\t\t\t} else {\n\t\t\t\ttmp[1] = sgn * tr[ldtr]\n\t\t\t\ttmp[2] = sgn * tr[1]\n\t\t\t}\n\t\t\tbtmp[0] = b[0]\n\t\t\tbtmp[1] = b[1]\n\t\t} else {\n\t\t\t\/\/ 2×1 case: op[TL11 TL12]*[X11] + sgn*[X11]*TR11 = [B11].\n\t\t\t\/\/ [TL21 TL22]*[X21] [X21] [B21]\n\t\t\tsmin = math.Abs(tr[0])\n\t\t\tsmin = math.Max(smin, math.Max(math.Abs(tl[0]), math.Abs(tl[1])))\n\t\t\tsmin = math.Max(smin, math.Max(math.Abs(tl[ldtl]), math.Abs(tl[ldtl+1])))\n\t\t\tsmin = math.Max(eps*smin, smlnum)\n\t\t\ttmp[0] = tl[0] + sgn*tr[0]\n\t\t\ttmp[3] = tl[ldtl+1] + sgn*tr[0]\n\t\t\tif tranl {\n\t\t\t\ttmp[1] = tl[ldtl]\n\t\t\t\ttmp[2] = tl[1]\n\t\t\t} else {\n\t\t\t\ttmp[1] = tl[1]\n\t\t\t\ttmp[2] = tl[ldtl]\n\t\t\t}\n\t\t\tbtmp[0] = b[0]\n\t\t\tbtmp[1] = b[ldb]\n\t\t}\n\n\t\t\/\/ Solve 2×2 system using complete pivoting.\n\t\t\/\/ Set pivots less than smin to smin.\n\n\t\tbi := blas64.Implementation()\n\t\tipiv := bi.Idamax(len(tmp), tmp[:], 1)\n\t\t\/\/ Compute the upper triangular matrix [u11 u12].\n\t\t\/\/ [ 0 u22]\n\t\tu11 := tmp[ipiv]\n\t\tif math.Abs(u11) <= smin {\n\t\t\tok = false\n\t\t\tu11 = smin\n\t\t}\n\t\tlocu12 := [4]int{1, 0, 3, 2} \/\/ Index in tmp of the element on the same row as the pivot.\n\t\tu12 := tmp[locu12[ipiv]]\n\t\tlocl21 := [4]int{2, 3, 0, 1} \/\/ Index in tmp of the element on the same column as the pivot.\n\t\tl21 := tmp[locl21[ipiv]] \/ u11\n\t\tlocu22 := [4]int{3, 2, 1, 0} \/\/ Index in tmp of the remaining element.\n\t\tu22 := tmp[locu22[ipiv]] - l21*u12\n\t\tif math.Abs(u22) <= smin {\n\t\t\tok = false\n\t\t\tu22 = smin\n\t\t}\n\t\tif ipiv&0x2 != 0 { \/\/ true for ipiv equal to 2 and 3.\n\t\t\t\/\/ The pivot was in the second row, swap the elements of\n\t\t\t\/\/ the right-hand side.\n\t\t\tbtmp[0], btmp[1] = btmp[1], btmp[0]-l21*btmp[1]\n\t\t} else {\n\t\t\tbtmp[1] -= l21 * btmp[0]\n\t\t}\n\t\tscale = 1\n\t\tif 2*smlnum*math.Abs(btmp[1]) > math.Abs(u22) || 2*smlnum*math.Abs(btmp[0]) > math.Abs(u11) {\n\t\t\tscale = 0.5 \/ math.Max(math.Abs(btmp[0]), math.Abs(btmp[1]))\n\t\t\tbtmp[0] *= scale\n\t\t\tbtmp[1] *= scale\n\t\t}\n\t\t\/\/ Solve the system [u11 u12] [x21] = [ btmp[0] ].\n\t\t\/\/ [ 0 u22] [x22] [ btmp[1] ]\n\t\tx22 := btmp[1] \/ u22\n\t\tx21 := btmp[0]\/u11 - (u12\/u11)*x22\n\t\tif ipiv&0x1 != 0 { \/\/ true for ipiv equal to 1 and 3.\n\t\t\t\/\/ The pivot was in the second column, swap the elements\n\t\t\t\/\/ of the solution.\n\t\t\tx21, x22 = x22, x21\n\t\t}\n\t\tx[0] = x21\n\t\tif n1 == 1 {\n\t\t\tx[1] = x22\n\t\t\txnorm = math.Abs(x[0]) + math.Abs(x[1])\n\t\t} else {\n\t\t\tx[ldx] = x22\n\t\t\txnorm = math.Max(math.Abs(x[0]), math.Abs(x[ldx]))\n\t\t}\n\t\treturn scale, xnorm, ok\n\t}\n\n\t\/\/ 2×2 case: op[TL11 TL12]*[X11 X12] + SGN*[X11 X12]*op[TR11 TR12] = [B11 B12].\n\t\/\/ [TL21 TL22] [X21 X22] [X21 X22] [TR21 TR22] [B21 B22]\n\t\/\/\n\t\/\/ Solve equivalent 4×4 system using complete pivoting.\n\t\/\/ Set pivots less than smin to smin.\n\n\tsmin := math.Max(math.Abs(tr[0]), math.Abs(tr[1]))\n\tsmin = math.Max(smin, math.Max(math.Abs(tr[ldtr]), math.Abs(tr[ldtr+1])))\n\tsmin = math.Max(smin, math.Max(math.Abs(tl[0]), math.Abs(tl[1])))\n\tsmin = math.Max(smin, math.Max(math.Abs(tl[ldtl]), math.Abs(tl[ldtl+1])))\n\tsmin = math.Max(eps*smin, smlnum)\n\n\tvar t [4][4]float64\n\tt[0][0] = tl[0] + sgn*tr[0]\n\tt[1][1] = tl[0] + sgn*tr[ldtr+1]\n\tt[2][2] = tl[ldtl+1] + sgn*tr[0]\n\tt[3][3] = tl[ldtl+1] + sgn*tr[ldtr+1]\n\tif tranl {\n\t\tt[0][2] = tl[ldtl]\n\t\tt[1][3] = tl[ldtl]\n\t\tt[2][0] = tl[1]\n\t\tt[3][1] = tl[1]\n\t} else {\n\t\tt[0][2] = tl[1]\n\t\tt[1][3] = tl[1]\n\t\tt[2][0] = tl[ldtl]\n\t\tt[3][1] = tl[ldtl]\n\t}\n\tif tranr {\n\t\tt[0][1] = sgn * tr[1]\n\t\tt[1][0] = sgn * tr[ldtr]\n\t\tt[2][3] = sgn * tr[1]\n\t\tt[3][2] = sgn * tr[ldtr]\n\t} else {\n\t\tt[0][1] = sgn * tr[ldtr]\n\t\tt[1][0] = sgn * tr[1]\n\t\tt[2][3] = sgn * tr[ldtr]\n\t\tt[3][2] = sgn * tr[1]\n\t}\n\n\tvar btmp [4]float64\n\tbtmp[0] = b[0]\n\tbtmp[1] = b[1]\n\tbtmp[2] = b[ldb]\n\tbtmp[3] = b[ldb+1]\n\n\t\/\/ Perform elimination.\n\tvar jpiv [4]int \/\/ jpiv records any column swaps for pivoting.\n\tfor i := 0; i < 3; i++ {\n\t\tvar (\n\t\t\txmax float64\n\t\t\tipsv, jpsv int\n\t\t)\n\t\tfor ip := i; ip < 4; ip++ {\n\t\t\tfor jp := i; jp < 4; jp++ {\n\t\t\t\tif math.Abs(t[ip][jp]) >= xmax {\n\t\t\t\t\txmax = math.Abs(t[ip][jp])\n\t\t\t\t\tipsv = ip\n\t\t\t\t\tjpsv = jp\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ipsv != i {\n\t\t\t\/\/ The pivot is not in the top row of the unprocessed\n\t\t\t\/\/ block, swap rows ipsv and i of t and btmp.\n\t\t\tt[ipsv], t[i] = t[i], t[ipsv]\n\t\t\tbtmp[ipsv], btmp[i] = btmp[i], btmp[ipsv]\n\t\t}\n\t\tif jpsv != i {\n\t\t\t\/\/ The pivot is not in the left column of the\n\t\t\t\/\/ unprocessed block, swap columns jpsv and i of t.\n\t\t\tfor k := 0; k < 4; k++ {\n\t\t\t\tt[k][jpsv], t[k][i] = t[k][i], t[k][jpsv]\n\t\t\t}\n\t\t}\n\t\tjpiv[i] = jpsv\n\t\tif math.Abs(t[i][i]) < smin {\n\t\t\tok = false\n\t\t\tt[i][i] = smin\n\t\t}\n\t\tfor k := i + 1; k < 4; k++ {\n\t\t\tt[k][i] \/= t[i][i]\n\t\t\tbtmp[k] -= t[k][i] * btmp[i]\n\t\t\tfor j := i + 1; j < 4; j++ {\n\t\t\t\tt[k][j] -= t[k][i] * t[i][j]\n\t\t\t}\n\t\t}\n\t}\n\tif math.Abs(t[3][3]) < smin {\n\t\tok = false\n\t\tt[3][3] = smin\n\t}\n\tscale = 1\n\tif 8*smlnum*math.Abs(btmp[0]) > math.Abs(t[0][0]) ||\n\t\t8*smlnum*math.Abs(btmp[1]) > math.Abs(t[1][1]) ||\n\t\t8*smlnum*math.Abs(btmp[2]) > math.Abs(t[2][2]) ||\n\t\t8*smlnum*math.Abs(btmp[3]) > math.Abs(t[3][3]) {\n\n\t\tmaxbtmp := math.Max(math.Abs(btmp[0]), math.Abs(btmp[1]))\n\t\tmaxbtmp = math.Max(maxbtmp, math.Max(math.Abs(btmp[2]), math.Abs(btmp[3])))\n\t\tscale = 1 \/ 8 \/ maxbtmp\n\t\tbtmp[0] *= scale\n\t\tbtmp[1] *= scale\n\t\tbtmp[2] *= scale\n\t\tbtmp[3] *= scale\n\t}\n\t\/\/ Compute the solution of the upper triangular system t * tmp = btmp.\n\tvar tmp [4]float64\n\tfor i := 3; i >= 0; i-- {\n\t\ttemp := 1 \/ t[i][i]\n\t\ttmp[i] = btmp[i] * temp\n\t\tfor j := i + 1; j < 4; j++ {\n\t\t\ttmp[i] -= temp * t[i][j] * tmp[j]\n\t\t}\n\t}\n\tfor i := 2; i >= 0; i-- {\n\t\tif jpiv[i] != i {\n\t\t\ttmp[i], tmp[jpiv[i]] = tmp[jpiv[i]], tmp[i]\n\t\t}\n\t}\n\tx[0] = tmp[0]\n\tx[1] = tmp[1]\n\tx[ldx] = tmp[2]\n\tx[ldx+1] = tmp[3]\n\txnorm = math.Max(math.Abs(tmp[0])+math.Abs(tmp[1]), math.Abs(tmp[2])+math.Abs(tmp[3]))\n\treturn scale, xnorm, ok\n}\n<commit_msg>lapack\/gonum: prevent division of untyped ints always yielding zero in Dlasy2 (#1653)<commit_after>\/\/ Copyright ©2016 The Gonum Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage gonum\n\nimport (\n\t\"math\"\n\n\t\"gonum.org\/v1\/gonum\/blas\/blas64\"\n)\n\n\/\/ Dlasy2 solves the Sylvester matrix equation where the matrices are of order 1\n\/\/ or 2. It computes the unknown n1×n2 matrix X so that\n\/\/ TL*X + sgn*X*TR = scale*B if tranl == false and tranr == false,\n\/\/ TLᵀ*X + sgn*X*TR = scale*B if tranl == true and tranr == false,\n\/\/ TL*X + sgn*X*TRᵀ = scale*B if tranl == false and tranr == true,\n\/\/ TLᵀ*X + sgn*X*TRᵀ = scale*B if tranl == true and tranr == true,\n\/\/ where TL is n1×n1, TR is n2×n2, B is n1×n2, and 1 <= n1,n2 <= 2.\n\/\/\n\/\/ isgn must be 1 or -1, and n1 and n2 must be 0, 1, or 2, but these conditions\n\/\/ are not checked.\n\/\/\n\/\/ Dlasy2 returns three values, a scale factor that is chosen less than or equal\n\/\/ to 1 to prevent the solution overflowing, the infinity norm of the solution,\n\/\/ and an indicator of success. If ok is false, TL and TR have eigenvalues that\n\/\/ are too close, so TL or TR is perturbed to get a non-singular equation.\n\/\/\n\/\/ Dlasy2 is an internal routine. It is exported for testing purposes.\nfunc (impl Implementation) Dlasy2(tranl, tranr bool, isgn, n1, n2 int, tl []float64, ldtl int, tr []float64, ldtr int, b []float64, ldb int, x []float64, ldx int) (scale, xnorm float64, ok bool) {\n\t\/\/ TODO(vladimir-ch): Add input validation checks conditionally skipped\n\t\/\/ using the build tag mechanism.\n\n\tok = true\n\t\/\/ Quick return if possible.\n\tif n1 == 0 || n2 == 0 {\n\t\treturn scale, xnorm, ok\n\t}\n\n\t\/\/ Set constants to control overflow.\n\teps := dlamchP\n\tsmlnum := dlamchS \/ eps\n\tsgn := float64(isgn)\n\n\tif n1 == 1 && n2 == 1 {\n\t\t\/\/ 1×1 case: TL11*X + sgn*X*TR11 = B11.\n\t\ttau1 := tl[0] + sgn*tr[0]\n\t\tbet := math.Abs(tau1)\n\t\tif bet <= smlnum {\n\t\t\ttau1 = smlnum\n\t\t\tbet = smlnum\n\t\t\tok = false\n\t\t}\n\t\tscale = 1\n\t\tgam := math.Abs(b[0])\n\t\tif smlnum*gam > bet {\n\t\t\tscale = 1 \/ gam\n\t\t}\n\t\tx[0] = b[0] * scale \/ tau1\n\t\txnorm = math.Abs(x[0])\n\t\treturn scale, xnorm, ok\n\t}\n\n\tif n1+n2 == 3 {\n\t\t\/\/ 1×2 or 2×1 case.\n\t\tvar (\n\t\t\tsmin float64\n\t\t\ttmp [4]float64 \/\/ tmp is used as a 2×2 row-major matrix.\n\t\t\tbtmp [2]float64\n\t\t)\n\t\tif n1 == 1 && n2 == 2 {\n\t\t\t\/\/ 1×2 case: TL11*[X11 X12] + sgn*[X11 X12]*op[TR11 TR12] = [B11 B12].\n\t\t\t\/\/ [TR21 TR22]\n\t\t\tsmin = math.Abs(tl[0])\n\t\t\tsmin = math.Max(smin, math.Max(math.Abs(tr[0]), math.Abs(tr[1])))\n\t\t\tsmin = math.Max(smin, math.Max(math.Abs(tr[ldtr]), math.Abs(tr[ldtr+1])))\n\t\t\tsmin = math.Max(eps*smin, smlnum)\n\t\t\ttmp[0] = tl[0] + sgn*tr[0]\n\t\t\ttmp[3] = tl[0] + sgn*tr[ldtr+1]\n\t\t\tif tranr {\n\t\t\t\ttmp[1] = sgn * tr[1]\n\t\t\t\ttmp[2] = sgn * tr[ldtr]\n\t\t\t} else {\n\t\t\t\ttmp[1] = sgn * tr[ldtr]\n\t\t\t\ttmp[2] = sgn * tr[1]\n\t\t\t}\n\t\t\tbtmp[0] = b[0]\n\t\t\tbtmp[1] = b[1]\n\t\t} else {\n\t\t\t\/\/ 2×1 case: op[TL11 TL12]*[X11] + sgn*[X11]*TR11 = [B11].\n\t\t\t\/\/ [TL21 TL22]*[X21] [X21] [B21]\n\t\t\tsmin = math.Abs(tr[0])\n\t\t\tsmin = math.Max(smin, math.Max(math.Abs(tl[0]), math.Abs(tl[1])))\n\t\t\tsmin = math.Max(smin, math.Max(math.Abs(tl[ldtl]), math.Abs(tl[ldtl+1])))\n\t\t\tsmin = math.Max(eps*smin, smlnum)\n\t\t\ttmp[0] = tl[0] + sgn*tr[0]\n\t\t\ttmp[3] = tl[ldtl+1] + sgn*tr[0]\n\t\t\tif tranl {\n\t\t\t\ttmp[1] = tl[ldtl]\n\t\t\t\ttmp[2] = tl[1]\n\t\t\t} else {\n\t\t\t\ttmp[1] = tl[1]\n\t\t\t\ttmp[2] = tl[ldtl]\n\t\t\t}\n\t\t\tbtmp[0] = b[0]\n\t\t\tbtmp[1] = b[ldb]\n\t\t}\n\n\t\t\/\/ Solve 2×2 system using complete pivoting.\n\t\t\/\/ Set pivots less than smin to smin.\n\n\t\tbi := blas64.Implementation()\n\t\tipiv := bi.Idamax(len(tmp), tmp[:], 1)\n\t\t\/\/ Compute the upper triangular matrix [u11 u12].\n\t\t\/\/ [ 0 u22]\n\t\tu11 := tmp[ipiv]\n\t\tif math.Abs(u11) <= smin {\n\t\t\tok = false\n\t\t\tu11 = smin\n\t\t}\n\t\tlocu12 := [4]int{1, 0, 3, 2} \/\/ Index in tmp of the element on the same row as the pivot.\n\t\tu12 := tmp[locu12[ipiv]]\n\t\tlocl21 := [4]int{2, 3, 0, 1} \/\/ Index in tmp of the element on the same column as the pivot.\n\t\tl21 := tmp[locl21[ipiv]] \/ u11\n\t\tlocu22 := [4]int{3, 2, 1, 0} \/\/ Index in tmp of the remaining element.\n\t\tu22 := tmp[locu22[ipiv]] - l21*u12\n\t\tif math.Abs(u22) <= smin {\n\t\t\tok = false\n\t\t\tu22 = smin\n\t\t}\n\t\tif ipiv&0x2 != 0 { \/\/ true for ipiv equal to 2 and 3.\n\t\t\t\/\/ The pivot was in the second row, swap the elements of\n\t\t\t\/\/ the right-hand side.\n\t\t\tbtmp[0], btmp[1] = btmp[1], btmp[0]-l21*btmp[1]\n\t\t} else {\n\t\t\tbtmp[1] -= l21 * btmp[0]\n\t\t}\n\t\tscale = 1\n\t\tif 2*smlnum*math.Abs(btmp[1]) > math.Abs(u22) || 2*smlnum*math.Abs(btmp[0]) > math.Abs(u11) {\n\t\t\tscale = 0.5 \/ math.Max(math.Abs(btmp[0]), math.Abs(btmp[1]))\n\t\t\tbtmp[0] *= scale\n\t\t\tbtmp[1] *= scale\n\t\t}\n\t\t\/\/ Solve the system [u11 u12] [x21] = [ btmp[0] ].\n\t\t\/\/ [ 0 u22] [x22] [ btmp[1] ]\n\t\tx22 := btmp[1] \/ u22\n\t\tx21 := btmp[0]\/u11 - (u12\/u11)*x22\n\t\tif ipiv&0x1 != 0 { \/\/ true for ipiv equal to 1 and 3.\n\t\t\t\/\/ The pivot was in the second column, swap the elements\n\t\t\t\/\/ of the solution.\n\t\t\tx21, x22 = x22, x21\n\t\t}\n\t\tx[0] = x21\n\t\tif n1 == 1 {\n\t\t\tx[1] = x22\n\t\t\txnorm = math.Abs(x[0]) + math.Abs(x[1])\n\t\t} else {\n\t\t\tx[ldx] = x22\n\t\t\txnorm = math.Max(math.Abs(x[0]), math.Abs(x[ldx]))\n\t\t}\n\t\treturn scale, xnorm, ok\n\t}\n\n\t\/\/ 2×2 case: op[TL11 TL12]*[X11 X12] + SGN*[X11 X12]*op[TR11 TR12] = [B11 B12].\n\t\/\/ [TL21 TL22] [X21 X22] [X21 X22] [TR21 TR22] [B21 B22]\n\t\/\/\n\t\/\/ Solve equivalent 4×4 system using complete pivoting.\n\t\/\/ Set pivots less than smin to smin.\n\n\tsmin := math.Max(math.Abs(tr[0]), math.Abs(tr[1]))\n\tsmin = math.Max(smin, math.Max(math.Abs(tr[ldtr]), math.Abs(tr[ldtr+1])))\n\tsmin = math.Max(smin, math.Max(math.Abs(tl[0]), math.Abs(tl[1])))\n\tsmin = math.Max(smin, math.Max(math.Abs(tl[ldtl]), math.Abs(tl[ldtl+1])))\n\tsmin = math.Max(eps*smin, smlnum)\n\n\tvar t [4][4]float64\n\tt[0][0] = tl[0] + sgn*tr[0]\n\tt[1][1] = tl[0] + sgn*tr[ldtr+1]\n\tt[2][2] = tl[ldtl+1] + sgn*tr[0]\n\tt[3][3] = tl[ldtl+1] + sgn*tr[ldtr+1]\n\tif tranl {\n\t\tt[0][2] = tl[ldtl]\n\t\tt[1][3] = tl[ldtl]\n\t\tt[2][0] = tl[1]\n\t\tt[3][1] = tl[1]\n\t} else {\n\t\tt[0][2] = tl[1]\n\t\tt[1][3] = tl[1]\n\t\tt[2][0] = tl[ldtl]\n\t\tt[3][1] = tl[ldtl]\n\t}\n\tif tranr {\n\t\tt[0][1] = sgn * tr[1]\n\t\tt[1][0] = sgn * tr[ldtr]\n\t\tt[2][3] = sgn * tr[1]\n\t\tt[3][2] = sgn * tr[ldtr]\n\t} else {\n\t\tt[0][1] = sgn * tr[ldtr]\n\t\tt[1][0] = sgn * tr[1]\n\t\tt[2][3] = sgn * tr[ldtr]\n\t\tt[3][2] = sgn * tr[1]\n\t}\n\n\tvar btmp [4]float64\n\tbtmp[0] = b[0]\n\tbtmp[1] = b[1]\n\tbtmp[2] = b[ldb]\n\tbtmp[3] = b[ldb+1]\n\n\t\/\/ Perform elimination.\n\tvar jpiv [4]int \/\/ jpiv records any column swaps for pivoting.\n\tfor i := 0; i < 3; i++ {\n\t\tvar (\n\t\t\txmax float64\n\t\t\tipsv, jpsv int\n\t\t)\n\t\tfor ip := i; ip < 4; ip++ {\n\t\t\tfor jp := i; jp < 4; jp++ {\n\t\t\t\tif math.Abs(t[ip][jp]) >= xmax {\n\t\t\t\t\txmax = math.Abs(t[ip][jp])\n\t\t\t\t\tipsv = ip\n\t\t\t\t\tjpsv = jp\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ipsv != i {\n\t\t\t\/\/ The pivot is not in the top row of the unprocessed\n\t\t\t\/\/ block, swap rows ipsv and i of t and btmp.\n\t\t\tt[ipsv], t[i] = t[i], t[ipsv]\n\t\t\tbtmp[ipsv], btmp[i] = btmp[i], btmp[ipsv]\n\t\t}\n\t\tif jpsv != i {\n\t\t\t\/\/ The pivot is not in the left column of the\n\t\t\t\/\/ unprocessed block, swap columns jpsv and i of t.\n\t\t\tfor k := 0; k < 4; k++ {\n\t\t\t\tt[k][jpsv], t[k][i] = t[k][i], t[k][jpsv]\n\t\t\t}\n\t\t}\n\t\tjpiv[i] = jpsv\n\t\tif math.Abs(t[i][i]) < smin {\n\t\t\tok = false\n\t\t\tt[i][i] = smin\n\t\t}\n\t\tfor k := i + 1; k < 4; k++ {\n\t\t\tt[k][i] \/= t[i][i]\n\t\t\tbtmp[k] -= t[k][i] * btmp[i]\n\t\t\tfor j := i + 1; j < 4; j++ {\n\t\t\t\tt[k][j] -= t[k][i] * t[i][j]\n\t\t\t}\n\t\t}\n\t}\n\tif math.Abs(t[3][3]) < smin {\n\t\tok = false\n\t\tt[3][3] = smin\n\t}\n\tscale = 1\n\tif 8*smlnum*math.Abs(btmp[0]) > math.Abs(t[0][0]) ||\n\t\t8*smlnum*math.Abs(btmp[1]) > math.Abs(t[1][1]) ||\n\t\t8*smlnum*math.Abs(btmp[2]) > math.Abs(t[2][2]) ||\n\t\t8*smlnum*math.Abs(btmp[3]) > math.Abs(t[3][3]) {\n\n\t\tmaxbtmp := math.Max(math.Abs(btmp[0]), math.Abs(btmp[1]))\n\t\tmaxbtmp = math.Max(maxbtmp, math.Max(math.Abs(btmp[2]), math.Abs(btmp[3])))\n\t\tscale = (1.0 \/ 8.0) \/ maxbtmp\n\t\tbtmp[0] *= scale\n\t\tbtmp[1] *= scale\n\t\tbtmp[2] *= scale\n\t\tbtmp[3] *= scale\n\t}\n\t\/\/ Compute the solution of the upper triangular system t * tmp = btmp.\n\tvar tmp [4]float64\n\tfor i := 3; i >= 0; i-- {\n\t\ttemp := 1 \/ t[i][i]\n\t\ttmp[i] = btmp[i] * temp\n\t\tfor j := i + 1; j < 4; j++ {\n\t\t\ttmp[i] -= temp * t[i][j] * tmp[j]\n\t\t}\n\t}\n\tfor i := 2; i >= 0; i-- {\n\t\tif jpiv[i] != i {\n\t\t\ttmp[i], tmp[jpiv[i]] = tmp[jpiv[i]], tmp[i]\n\t\t}\n\t}\n\tx[0] = tmp[0]\n\tx[1] = tmp[1]\n\tx[ldx] = tmp[2]\n\tx[ldx+1] = tmp[3]\n\txnorm = math.Max(math.Abs(tmp[0])+math.Abs(tmp[1]), math.Abs(tmp[2])+math.Abs(tmp[3]))\n\treturn scale, xnorm, ok\n}\n<|endoftext|>"} {"text":"<commit_before>package dotmatrix\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"image\"\n\t\"image\/color\"\n\t\"image\/gif\"\n\t\"io\"\n\t\"time\"\n)\n\n\/*\nPlayGIF is an experimental function that will draw each frame of a gif to\nthe given writer (usually os.Stdout). Terminal codes are used to reposition\nthe cursor at the beginning of each frame. Delays and disposal methods are\nrespected.\n*\/\nfunc PlayGIF(w io.Writer, giff *gif.GIF) error {\n\tvar screen *image.Paletted\n\n\tfor c := 0; giff.LoopCount == 0 || c < giff.LoopCount; c++ {\n\t\tfor i := 0; i < len(giff.Image); i++ {\n\t\t\tdelay := time.After(time.Duration(giff.Delay[i]) * time.Second \/ 100)\n\t\t\tframe := convert(giff.Image[i])\n\n\t\t\t\/\/ Always draw the first frame from scratch\n\t\t\tif i == 0 {\n\t\t\t\tscreen = frame\n\t\t\t}\n\n\t\t\tswitch giff.Disposal[i] {\n\t\t\t\/\/ Dispose previous essentially means draw then undo\n\t\t\tcase gif.DisposalPrevious:\n\t\t\t\tprevious := image.NewPaletted(screen.Bounds(), screen.Palette)\n\t\t\t\tcopy(previous.Pix, screen.Pix)\n\t\t\t\tdrawFrame(screen, frame)\n\t\t\t\tif err := flush(w, screen); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tscreen = previous\n\t\t\t\t<-delay\n\t\t\t\/\/ Dispose background replaces everything just drawn with the background canvas\n\t\t\tcase gif.DisposalBackground:\n\t\t\t\tbackground := image.NewPaletted(frame.Bounds(), frame.Palette)\n\t\t\t\tfor i := 0; i < len(background.Pix); i++ {\n\t\t\t\t\tbackground.Pix[i] = 2\n\t\t\t\t}\n\t\t\t\tdrawFrame(screen, frame)\n\t\t\t\tif err := flush(w, screen); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t<-delay\n\t\t\t\tdrawFrame(screen, background)\n\t\t\t\tif err := flush(w, screen); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\/\/ Dispose none or undefined means we just draw what we got over top\n\t\t\tdefault:\n\t\t\t\tdrawFrame(screen, frame)\n\t\t\t\tif err := flush(w, screen); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t<-delay\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc drawFrame(target *image.Paletted, source image.Image) {\n\tbounds := source.Bounds()\n\tfor y := bounds.Min.Y; y < bounds.Max.Y; y++ {\n\t\tfor x := bounds.Min.X; x < bounds.Max.X; x++ {\n\t\t\tc := source.At(x, y)\n\t\t\tif c == color.Transparent {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttarget.Set(x, y, c)\n\t\t}\n\t}\n}\n\nfunc flush(w io.Writer, img image.Image) error {\n\tvar buf bytes.Buffer\n\tif err := Encode(&buf, img); err != nil {\n\t\treturn err\n\t}\n\tvar height int\n\tfor {\n\t\tc, err := buf.ReadByte()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif c == '\\n' {\n\t\t\theight++\n\t\t}\n\t\tw.Write([]byte{c})\n\t}\n\tw.Write([]byte(\"\\033[999D\")) \/\/ Move the cursor to the beginning of the line\n\tw.Write([]byte(fmt.Sprintf(\"\\033[%dA\", height))) \/\/ Move the cursor to the top of the image\n\treturn nil\n}\n<commit_msg>fixes background gif disponsal<commit_after>package dotmatrix\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"image\"\n\t\"image\/color\"\n\t\"image\/gif\"\n\t\"io\"\n\t\"time\"\n)\n\n\/*\nPlayGIF is an experimental function that will draw each frame of a gif to\nthe given writer (usually os.Stdout). Terminal codes are used to reposition\nthe cursor at the beginning of each frame. Delays and disposal methods are\nrespected.\n*\/\nfunc PlayGIF(w io.Writer, giff *gif.GIF) error {\n\tif len(giff.Image) < 1 {\n\t\treturn nil\n\t}\n\n\t\/\/ The screen is what we flush to the writer on each iteration\n\tvar screen *image.Paletted\n\t\/\/ Only used if we see background disposal methods\n\tbgPallette := color.Palette{color.Transparent}\n\tif p, ok := giff.Config.ColorModel.(color.Palette); ok {\n\t\tbgPallette = p\n\t}\n\n\tfor c := 0; giff.LoopCount == 0 || c < giff.LoopCount; c++ {\n\t\tfor i := 0; i < len(giff.Image); i++ {\n\t\t\tdelay := time.After(time.Duration(giff.Delay[i]) * time.Second \/ 100)\n\t\t\tframe := convert(giff.Image[i])\n\n\t\t\t\/\/ Always draw the first frame from scratch\n\t\t\tif i == 0 {\n\t\t\t\tscreen = convert(image.NewPaletted(frame.Bounds(), bgPallette))\n\t\t\t}\n\n\t\t\tswitch giff.Disposal[i] {\n\n\t\t\t\/\/ Dispose previous essentially means draw then undo\n\t\t\tcase gif.DisposalPrevious:\n\t\t\t\ttemp := image.NewPaletted(screen.Bounds(), screen.Palette)\n\t\t\t\tcopy(temp.Pix, screen.Pix)\n\n\t\t\t\tdrawOver(screen, frame)\n\t\t\t\tif err := flush(w, screen); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t<-delay\n\n\t\t\t\tscreen = temp\n\n\t\t\t\/\/ Dispose background replaces everything just drawn with the background canvas\n\t\t\tcase gif.DisposalBackground:\n\t\t\t\tbackground := convert(image.NewPaletted(frame.Bounds(), bgPallette))\n\t\t\t\tdrawExact(screen, background)\n\t\t\t\ttemp := image.NewPaletted(screen.Bounds(), screen.Palette)\n\t\t\t\tcopy(temp.Pix, screen.Pix)\n\n\t\t\t\tdrawOver(screen, frame)\n\t\t\t\tif err := flush(w, screen); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t<-delay\n\n\t\t\t\tscreen = temp\n\n\t\t\t\/\/ Dispose none or undefined means we just draw what we got over top\n\t\t\tdefault:\n\t\t\t\tdrawOver(screen, frame)\n\t\t\t\tif err := flush(w, screen); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t<-delay\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Draws any non-transparent pixels into target\nfunc drawOver(target *image.Paletted, source image.Image) {\n\tbounds := source.Bounds()\n\tfor y := bounds.Min.Y; y < bounds.Max.Y; y++ {\n\t\tfor x := bounds.Min.X; x < bounds.Max.X; x++ {\n\t\t\tc := source.At(x, y)\n\t\t\tif c == color.Transparent {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttarget.Set(x, y, c)\n\t\t}\n\t}\n}\n\n\/\/ Draws pixels into target, including transparent ones.\nfunc drawExact(target *image.Paletted, source image.Image) {\n\tbounds := source.Bounds()\n\tfor y := bounds.Min.Y; y < bounds.Max.Y; y++ {\n\t\tfor x := bounds.Min.X; x < bounds.Max.X; x++ {\n\t\t\ttarget.Set(x, y, source.At(x, y))\n\t\t}\n\t}\n}\n\nfunc flush(w io.Writer, img image.Image) error {\n\tvar buf bytes.Buffer\n\tif err := Encode(&buf, img); err != nil {\n\t\treturn err\n\t}\n\tvar height int\n\tfor {\n\t\tc, err := buf.ReadByte()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif c == '\\n' {\n\t\t\theight++\n\t\t}\n\t\tw.Write([]byte{c})\n\t}\n\tw.Write([]byte(\"\\033[999D\")) \/\/ Move the cursor to the beginning of the line\n\tw.Write([]byte(fmt.Sprintf(\"\\033[%dA\", height))) \/\/ Move the cursor to the top of the image\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/xorpaul\/uiprogress\"\n)\n\nfunc resolveGitRepositories(uniqueGitModules map[string]GitModule) {\n\tdefer timeTrack(time.Now(), funcName())\n\tif len(uniqueGitModules) <= 0 {\n\t\tDebugf(\"uniqueGitModules[] is empty, skipping...\")\n\t\treturn\n\t}\n\tbar := uiprogress.AddBar(len(uniqueGitModules)).AppendCompleted().PrependElapsed()\n\tbar.PrependFunc(func(b *uiprogress.Bar) string {\n\t\treturn fmt.Sprintf(\"Resolving Git modules (%d\/%d)\", b.Current(), len(uniqueGitModules))\n\t})\n\t\/\/ Dummy channel to coordinate the number of concurrent goroutines.\n\t\/\/ This channel should be buffered otherwise we will be immediately blocked\n\t\/\/ when trying to fill it.\n\n\tDebugf(\"Resolving \" + strconv.Itoa(len(uniqueGitModules)) + \" Git modules with \" + strconv.Itoa(config.Maxworker) + \" workers\")\n\tconcurrentGoroutines := make(chan struct{}, config.Maxworker)\n\t\/\/ Fill the dummy channel with config.Maxworker empty struct.\n\tfor i := 0; i < config.Maxworker; i++ {\n\t\tconcurrentGoroutines <- struct{}{}\n\t}\n\n\t\/\/ The done channel indicates when a single goroutine has finished its job.\n\tdone := make(chan bool)\n\t\/\/ The waitForAllJobs channel allows the main program\n\t\/\/ to wait until we have indeed done all the jobs.\n\twaitForAllJobs := make(chan bool)\n\t\/\/ Collect all the jobs, and since the job is finished, we can\n\t\/\/ release another spot for a goroutine.\n\tgo func() {\n\t\tfor _, gm := range uniqueGitModules {\n\t\t\tgo func(gm GitModule) {\n\t\t\t\t<-done\n\t\t\t\t\/\/ Say that another goroutine can now start.\n\t\t\t\tconcurrentGoroutines <- struct{}{}\n\t\t\t}(gm)\n\t\t}\n\t\t\/\/ We have collected all the jobs, the program can now terminate\n\t\twaitForAllJobs <- true\n\t}()\n\twg := sync.WaitGroup{}\n\twg.Add(len(uniqueGitModules))\n\n\tfor url, gm := range uniqueGitModules {\n\t\tDebugf(\"git repo url \" + url)\n\t\tprivateKey := gm.privateKey\n\t\tgo func(url string, gm GitModule, bar *uiprogress.Bar) {\n\t\t\t\/\/ Try to receive from the concurrentGoroutines channel. When we have something,\n\t\t\t\/\/ it means we can start a new goroutine because another one finished.\n\t\t\t\/\/ Otherwise, it will block the execution until an execution\n\t\t\t\/\/ spot is available.\n\t\t\t<-concurrentGoroutines\n\t\t\tdefer bar.Incr()\n\t\t\tdefer wg.Done()\n\n\t\t\tif len(gm.privateKey) > 0 {\n\t\t\t\tDebugf(\"git repo url \" + url + \" with ssh key \" + privateKey)\n\t\t\t} else {\n\t\t\t\tDebugf(\"git repo url \" + url + \" without ssh key\")\n\t\t\t}\n\n\t\t\t\/\/log.Println(config)\n\t\t\t\/\/ create save directory name from Git repo name\n\t\t\trepoDir := strings.Replace(strings.Replace(url, \"\/\", \"_\", -1), \":\", \"-\", -1)\n\t\t\tworkDir := filepath.Join(config.ModulesCacheDir, repoDir)\n\n\t\t\tsuccess := doMirrorOrUpdate(gm, workDir, 0)\n\t\t\tif !success && config.UseCacheFallback == false {\n\t\t\t\tFatalf(\"Fatal: Could not reach git repository \" + url)\n\t\t\t}\n\t\t\t\/\/\tdoCloneOrPull(source, workDir, targetDir, sa.Remote, branch, sa.PrivateKey)\n\t\t\tdone <- true\n\t\t}(url, gm, bar)\n\t}\n\n\t\/\/ Wait for all jobs to finish\n\t<-waitForAllJobs\n\twg.Wait()\n}\n\nfunc doMirrorOrUpdate(gitModule GitModule, workDir string, retryCount int) bool {\n\tisControlRepo := strings.HasPrefix(workDir, config.EnvCacheDir)\n\tisInModulesCacheDir := strings.HasPrefix(workDir, config.ModulesCacheDir)\n\n\tneedSSHKey := true\n\tif len(gitModule.privateKey) == 0 || strings.Contains(gitModule.git, \"github.com\") {\n\t\tif isControlRepo {\n\t\t\tneedSSHKey = true\n\t\t} else {\n\t\t\tneedSSHKey = false\n\t\t}\n\t}\n\ter := ExecResult{}\n\tgitCmd := \"git clone --mirror \" + gitModule.git + \" \" + workDir\n\tif config.CloneGitModules && !isControlRepo && !isInModulesCacheDir {\n\t\t\/\/fmt.Printf(\"%+v\\n\", gitModule)\n\t\tgitCmd = \"git clone --single-branch --branch \" + gitModule.tree + \" \" + gitModule.git + \" \" + workDir\n\t}\n\tif isDir(workDir) {\n\t\tgitCmd = \"git --git-dir \" + workDir + \" remote update --prune\"\n\t}\n\n\tif needSSHKey {\n\t\ter = executeCommand(\"ssh-agent bash -c 'ssh-add \"+gitModule.privateKey+\"; \"+gitCmd+\"'\", config.Timeout, gitModule.ignoreUnreachable)\n\t} else {\n\t\ter = executeCommand(gitCmd, config.Timeout, gitModule.ignoreUnreachable)\n\t}\n\n\tif er.returnCode != 0 {\n\t\tif config.UseCacheFallback {\n\t\t\tWarnf(\"WARN: git repository \" + gitModule.git + \" does not exist or is unreachable at this moment!\")\n\t\t\tWarnf(\"WARN: Trying to use cache for \" + gitModule.git + \" git repository\")\n\t\t\treturn false\n\t\t} else if config.RetryGitCommands && retryCount > -1 {\n\t\t\tWarnf(\"WARN: git command failed: \" + gitCmd + \" deleting local cached repository and retrying...\")\n\t\t\tpurgeDir(workDir, \"doMirrorOrUpdate, because git command failed, retrying\")\n\t\t\treturn doMirrorOrUpdate(gitModule, workDir, retryCount-1)\n\t\t}\n\t\tWarnf(\"WARN: git repository \" + gitModule.git + \" does not exist or is unreachable at this moment!\")\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc syncToModuleDir(gitModule GitModule, srcDir string, targetDir string, correspondingPuppetEnvironment string) bool {\n\tstartedAt := time.Now()\n\tmutex.Lock()\n\tsyncGitCount++\n\tmutex.Unlock()\n\tif !isDir(srcDir) {\n\t\tif config.UseCacheFallback {\n\t\t\tFatalf(\"Could not find cached git module \" + srcDir)\n\t\t}\n\t}\n\tlogCmd := \"git --git-dir \" + srcDir + \" rev-parse --verify '\" + gitModule.tree\n\tif config.GitObjectSyntaxNotSupported != true {\n\t\tlogCmd = logCmd + \"^{object}'\"\n\t} else {\n\t\tlogCmd = logCmd + \"'\"\n\t}\n\n\tisControlRepo := strings.HasPrefix(srcDir, config.EnvCacheDir)\n\n\ter := executeCommand(logCmd, config.Timeout, gitModule.ignoreUnreachable)\n\thashFile := filepath.Join(targetDir, \".latest_commit\")\n\tdeployFile := filepath.Join(targetDir, \".g10k-deploy.json\")\n\tneedToSync := true\n\tif er.returnCode != 0 {\n\t\tif gitModule.ignoreUnreachable {\n\t\t\tDebugf(\"Failed to populate module \" + targetDir + \" but ignore-unreachable is set. Continuing...\")\n\t\t\tpurgeDir(targetDir, \"syncToModuleDir, because ignore-unreachable is set for this module\")\n\t\t}\n\t\treturn false\n\t}\n\n\tif len(er.output) > 0 {\n\t\tif strings.HasPrefix(srcDir, config.EnvCacheDir) {\n\t\t\tmutex.Lock()\n\t\t\tdesiredContent = append(desiredContent, deployFile)\n\t\t\tmutex.Unlock()\n\t\t\tif fileExists(deployFile) {\n\t\t\t\tdr := readDeployResultFile(deployFile)\n\t\t\t\tif dr.Signature == strings.TrimSuffix(er.output, \"\\n\") {\n\t\t\t\t\tneedToSync = false\n\t\t\t\t\t\/\/ need to get the content of the git repository to detect and purge unmanaged files\n\t\t\t\t\taddDesiredContent(srcDir, gitModule.tree, targetDir)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tDebugf(\"adding path to managed content: \" + targetDir)\n\t\t\tmutex.Lock()\n\t\t\tdesiredContent = append(desiredContent, hashFile)\n\t\t\tdesiredContent = append(desiredContent, targetDir)\n\t\t\tmutex.Unlock()\n\t\t\ttargetHashByte, _ := ioutil.ReadFile(hashFile)\n\t\t\ttargetHash := string(targetHashByte)\n\t\t\tif targetHash == strings.TrimSuffix(er.output, \"\\n\") {\n\t\t\t\tneedToSync = false\n\t\t\t\tmutex.Lock()\n\t\t\t\tunchangedModuleDirs = append(unchangedModuleDirs, targetDir)\n\t\t\t\tmutex.Unlock()\n\t\t\t\t\/\/Debugf(\"Skipping, because no diff found between \" + srcDir + \"(\" + er.output + \") and \" + targetDir + \"(\" + string(targetHash) + \")\")\n\t\t\t}\n\t\t}\n\n\t}\n\tif needToSync && er.returnCode == 0 {\n\t\tInfof(\"Need to sync \" + targetDir)\n\t\tmutex.Lock()\n\t\tneedSyncDirs = append(needSyncDirs, targetDir)\n\t\tif _, ok := needSyncEnvs[correspondingPuppetEnvironment]; !ok {\n\t\t\tneedSyncEnvs[correspondingPuppetEnvironment] = empty\n\t\t}\n\t\tneedSyncGitCount++\n\t\tmutex.Unlock()\n\n\t\tif !dryRun && !config.CloneGitModules || isControlRepo {\n\t\t\tif pfMode {\n\t\t\t\tpurgeDir(targetDir, \"git dir with changes in -puppetfile mode\")\n\t\t\t}\n\t\t\tcheckDirAndCreate(targetDir, \"git dir\")\n\t\t\tgitArchiveArgs := []string{\"--git-dir\", srcDir, \"archive\", gitModule.tree}\n\t\t\tcmd := exec.Command(\"git\", gitArchiveArgs...)\n\t\t\tDebugf(\"Executing git --git-dir \" + srcDir + \" archive \" + gitModule.tree)\n\t\t\tcmdOut, err := cmd.StdoutPipe()\n\t\t\tif err != nil {\n\t\t\t\tif !gitModule.ignoreUnreachable {\n\t\t\t\t\tInfof(\"Failed to populate module \" + targetDir + \" but ignore-unreachable is set. Continuing...\")\n\t\t\t\t} else {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tFatalf(\"syncToModuleDir(): Failed to execute command: git --git-dir \" + srcDir + \" archive \" + gitModule.tree + \" Error: \" + err.Error())\n\t\t\t}\n\t\t\tcmd.Start()\n\n\t\t\tbefore := time.Now()\n\t\t\tunTar(cmdOut, targetDir)\n\t\t\tduration := time.Since(before).Seconds()\n\t\t\tmutex.Lock()\n\t\t\tioGitTime += duration\n\t\t\tmutex.Unlock()\n\n\t\t\terr = cmd.Wait()\n\t\t\tif err != nil {\n\t\t\t\tFatalf(\"syncToModuleDir(): Failed to execute command: git --git-dir \" + srcDir + \" archive \" + gitModule.tree + \" Error: \" + err.Error())\n\t\t\t\t\/\/\"\\nIf you are using GitLab please ensure that you've added your deploy key to your repository.\" +\n\t\t\t\t\/\/\"\\nThe Puppet environment which is using this unresolveable repository is \" + correspondingPuppetEnvironment)\n\t\t\t}\n\n\t\t\tVerbosef(\"syncToModuleDir(): Executing git --git-dir \" + srcDir + \" archive \" + gitModule.tree + \" took \" + strconv.FormatFloat(duration, 'f', 5, 64) + \"s\")\n\n\t\t\ter = executeCommand(logCmd, config.Timeout, false)\n\t\t\tif er.returnCode != 0 {\n\t\t\t\tFatalf(\"executeCommand(): git command failed: \" + logCmd + \" \" + err.Error() + \"\\nOutput: \" + er.output)\n\t\t\t}\n\t\t\tif len(er.output) > 0 {\n\t\t\t\tcommitHash := strings.TrimSuffix(er.output, \"\\n\")\n\t\t\t\tif isControlRepo {\n\t\t\t\t\tDebugf(\"Writing to deploy file \" + deployFile)\n\t\t\t\t\tdr := DeployResult{\n\t\t\t\t\t\tName: gitModule.tree,\n\t\t\t\t\t\tSignature: commitHash,\n\t\t\t\t\t\tStartedAt: startedAt,\n\t\t\t\t\t}\n\t\t\t\t\twriteStructJSONFile(deployFile, dr)\n\t\t\t\t} else {\n\t\t\t\t\tDebugf(\"Writing hash \" + commitHash + \" from command \" + logCmd + \" to \" + hashFile)\n\t\t\t\t\tf, _ := os.Create(hashFile)\n\t\t\t\t\tdefer f.Close()\n\t\t\t\t\tf.WriteString(commitHash)\n\t\t\t\t\tf.Sync()\n\t\t\t\t}\n\n\t\t\t}\n\t\t} else if config.CloneGitModules {\n\t\t\treturn doMirrorOrUpdate(gitModule, targetDir, 0)\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ addDesiredContent takes the given git repository directory and the\n\/\/ relevant reference (branch, commit hash, tag) and adds its content to\n\/\/ the global desiredContent slice so that it doesn't get purged by g10k\nfunc addDesiredContent(gitDir string, tree string, targetDir string) {\n\ttreeCmd := \"git --git-dir \" + gitDir + \" ls-tree --full-tree -r -t --name-only \" + tree\n\ter := executeCommand(treeCmd, config.Timeout, false)\n\tfoundGitFiles := strings.Split(er.output, \"\\n\")\n\tmutex.Lock()\n\tfor _, desiredFile := range foundGitFiles[:len(foundGitFiles)-1] {\n\t\tdesiredContent = append(desiredContent, filepath.Join(targetDir, desiredFile))\n\n\t\t\/\/ because we're using -r which prints git managed files in subfolders like this: foo\/test3\n\t\t\/\/ we have to split up the given string and add the possible parent directories (foo in this case)\n\t\tparentDirs := strings.Split(desiredFile, \"\/\")\n\t\tif len(parentDirs) > 1 {\n\t\t\tfor _, dir := range parentDirs[:len(parentDirs)-1] {\n\t\t\t\tdesiredContent = append(desiredContent, filepath.Join(targetDir, dir))\n\t\t\t}\n\t\t}\n\t}\n\tmutex.Unlock()\n\n}\n<commit_msg>add error output<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/xorpaul\/uiprogress\"\n)\n\nfunc resolveGitRepositories(uniqueGitModules map[string]GitModule) {\n\tdefer timeTrack(time.Now(), funcName())\n\tif len(uniqueGitModules) <= 0 {\n\t\tDebugf(\"uniqueGitModules[] is empty, skipping...\")\n\t\treturn\n\t}\n\tbar := uiprogress.AddBar(len(uniqueGitModules)).AppendCompleted().PrependElapsed()\n\tbar.PrependFunc(func(b *uiprogress.Bar) string {\n\t\treturn fmt.Sprintf(\"Resolving Git modules (%d\/%d)\", b.Current(), len(uniqueGitModules))\n\t})\n\t\/\/ Dummy channel to coordinate the number of concurrent goroutines.\n\t\/\/ This channel should be buffered otherwise we will be immediately blocked\n\t\/\/ when trying to fill it.\n\n\tDebugf(\"Resolving \" + strconv.Itoa(len(uniqueGitModules)) + \" Git modules with \" + strconv.Itoa(config.Maxworker) + \" workers\")\n\tconcurrentGoroutines := make(chan struct{}, config.Maxworker)\n\t\/\/ Fill the dummy channel with config.Maxworker empty struct.\n\tfor i := 0; i < config.Maxworker; i++ {\n\t\tconcurrentGoroutines <- struct{}{}\n\t}\n\n\t\/\/ The done channel indicates when a single goroutine has finished its job.\n\tdone := make(chan bool)\n\t\/\/ The waitForAllJobs channel allows the main program\n\t\/\/ to wait until we have indeed done all the jobs.\n\twaitForAllJobs := make(chan bool)\n\t\/\/ Collect all the jobs, and since the job is finished, we can\n\t\/\/ release another spot for a goroutine.\n\tgo func() {\n\t\tfor _, gm := range uniqueGitModules {\n\t\t\tgo func(gm GitModule) {\n\t\t\t\t<-done\n\t\t\t\t\/\/ Say that another goroutine can now start.\n\t\t\t\tconcurrentGoroutines <- struct{}{}\n\t\t\t}(gm)\n\t\t}\n\t\t\/\/ We have collected all the jobs, the program can now terminate\n\t\twaitForAllJobs <- true\n\t}()\n\twg := sync.WaitGroup{}\n\twg.Add(len(uniqueGitModules))\n\n\tfor url, gm := range uniqueGitModules {\n\t\tDebugf(\"git repo url \" + url)\n\t\tprivateKey := gm.privateKey\n\t\tgo func(url string, gm GitModule, bar *uiprogress.Bar) {\n\t\t\t\/\/ Try to receive from the concurrentGoroutines channel. When we have something,\n\t\t\t\/\/ it means we can start a new goroutine because another one finished.\n\t\t\t\/\/ Otherwise, it will block the execution until an execution\n\t\t\t\/\/ spot is available.\n\t\t\t<-concurrentGoroutines\n\t\t\tdefer bar.Incr()\n\t\t\tdefer wg.Done()\n\n\t\t\tif len(gm.privateKey) > 0 {\n\t\t\t\tDebugf(\"git repo url \" + url + \" with ssh key \" + privateKey)\n\t\t\t} else {\n\t\t\t\tDebugf(\"git repo url \" + url + \" without ssh key\")\n\t\t\t}\n\n\t\t\t\/\/log.Println(config)\n\t\t\t\/\/ create save directory name from Git repo name\n\t\t\trepoDir := strings.Replace(strings.Replace(url, \"\/\", \"_\", -1), \":\", \"-\", -1)\n\t\t\tworkDir := filepath.Join(config.ModulesCacheDir, repoDir)\n\n\t\t\tsuccess := doMirrorOrUpdate(gm, workDir, 0)\n\t\t\tif !success && config.UseCacheFallback == false {\n\t\t\t\tFatalf(\"Fatal: Could not reach git repository \" + url)\n\t\t\t}\n\t\t\t\/\/\tdoCloneOrPull(source, workDir, targetDir, sa.Remote, branch, sa.PrivateKey)\n\t\t\tdone <- true\n\t\t}(url, gm, bar)\n\t}\n\n\t\/\/ Wait for all jobs to finish\n\t<-waitForAllJobs\n\twg.Wait()\n}\n\nfunc doMirrorOrUpdate(gitModule GitModule, workDir string, retryCount int) bool {\n\tisControlRepo := strings.HasPrefix(workDir, config.EnvCacheDir)\n\tisInModulesCacheDir := strings.HasPrefix(workDir, config.ModulesCacheDir)\n\n\tneedSSHKey := true\n\tif len(gitModule.privateKey) == 0 || strings.Contains(gitModule.git, \"github.com\") {\n\t\tif isControlRepo {\n\t\t\tneedSSHKey = true\n\t\t} else {\n\t\t\tneedSSHKey = false\n\t\t}\n\t}\n\ter := ExecResult{}\n\tgitCmd := \"git clone --mirror \" + gitModule.git + \" \" + workDir\n\tif config.CloneGitModules && !isControlRepo && !isInModulesCacheDir {\n\t\t\/\/fmt.Printf(\"%+v\\n\", gitModule)\n\t\tgitCmd = \"git clone --single-branch --branch \" + gitModule.tree + \" \" + gitModule.git + \" \" + workDir\n\t}\n\tif isDir(workDir) {\n\t\tgitCmd = \"git --git-dir \" + workDir + \" remote update --prune\"\n\t}\n\n\tif needSSHKey {\n\t\ter = executeCommand(\"ssh-agent bash -c 'ssh-add \"+gitModule.privateKey+\"; \"+gitCmd+\"'\", config.Timeout, gitModule.ignoreUnreachable)\n\t} else {\n\t\ter = executeCommand(gitCmd, config.Timeout, gitModule.ignoreUnreachable)\n\t}\n\n\tif er.returnCode != 0 {\n\t\tif config.UseCacheFallback {\n\t\t\tWarnf(\"WARN: git repository \" + gitModule.git + \" does not exist or is unreachable at this moment!\")\n\t\t\tWarnf(\"WARN: Trying to use cache for \" + gitModule.git + \" git repository\")\n\t\t\treturn false\n\t\t} else if config.RetryGitCommands && retryCount > -1 {\n\t\t\tWarnf(\"WARN: git command failed: \" + gitCmd + \" deleting local cached repository and retrying...\")\n\t\t\tpurgeDir(workDir, \"doMirrorOrUpdate, because git command failed, retrying\")\n\t\t\treturn doMirrorOrUpdate(gitModule, workDir, retryCount-1)\n\t\t}\n\t\tWarnf(\"WARN: git repository \" + gitModule.git + \" does not exist or is unreachable at this moment! Error: \" + er.output)\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc syncToModuleDir(gitModule GitModule, srcDir string, targetDir string, correspondingPuppetEnvironment string) bool {\n\tstartedAt := time.Now()\n\tmutex.Lock()\n\tsyncGitCount++\n\tmutex.Unlock()\n\tif !isDir(srcDir) {\n\t\tif config.UseCacheFallback {\n\t\t\tFatalf(\"Could not find cached git module \" + srcDir)\n\t\t}\n\t}\n\tlogCmd := \"git --git-dir \" + srcDir + \" rev-parse --verify '\" + gitModule.tree\n\tif config.GitObjectSyntaxNotSupported != true {\n\t\tlogCmd = logCmd + \"^{object}'\"\n\t} else {\n\t\tlogCmd = logCmd + \"'\"\n\t}\n\n\tisControlRepo := strings.HasPrefix(srcDir, config.EnvCacheDir)\n\n\ter := executeCommand(logCmd, config.Timeout, gitModule.ignoreUnreachable)\n\thashFile := filepath.Join(targetDir, \".latest_commit\")\n\tdeployFile := filepath.Join(targetDir, \".g10k-deploy.json\")\n\tneedToSync := true\n\tif er.returnCode != 0 {\n\t\tif gitModule.ignoreUnreachable {\n\t\t\tDebugf(\"Failed to populate module \" + targetDir + \" but ignore-unreachable is set. Continuing...\")\n\t\t\tpurgeDir(targetDir, \"syncToModuleDir, because ignore-unreachable is set for this module\")\n\t\t}\n\t\treturn false\n\t}\n\n\tif len(er.output) > 0 {\n\t\tif strings.HasPrefix(srcDir, config.EnvCacheDir) {\n\t\t\tmutex.Lock()\n\t\t\tdesiredContent = append(desiredContent, deployFile)\n\t\t\tmutex.Unlock()\n\t\t\tif fileExists(deployFile) {\n\t\t\t\tdr := readDeployResultFile(deployFile)\n\t\t\t\tif dr.Signature == strings.TrimSuffix(er.output, \"\\n\") {\n\t\t\t\t\tneedToSync = false\n\t\t\t\t\t\/\/ need to get the content of the git repository to detect and purge unmanaged files\n\t\t\t\t\taddDesiredContent(srcDir, gitModule.tree, targetDir)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tDebugf(\"adding path to managed content: \" + targetDir)\n\t\t\tmutex.Lock()\n\t\t\tdesiredContent = append(desiredContent, hashFile)\n\t\t\tdesiredContent = append(desiredContent, targetDir)\n\t\t\tmutex.Unlock()\n\t\t\ttargetHashByte, _ := ioutil.ReadFile(hashFile)\n\t\t\ttargetHash := string(targetHashByte)\n\t\t\tif targetHash == strings.TrimSuffix(er.output, \"\\n\") {\n\t\t\t\tneedToSync = false\n\t\t\t\tmutex.Lock()\n\t\t\t\tunchangedModuleDirs = append(unchangedModuleDirs, targetDir)\n\t\t\t\tmutex.Unlock()\n\t\t\t\t\/\/Debugf(\"Skipping, because no diff found between \" + srcDir + \"(\" + er.output + \") and \" + targetDir + \"(\" + string(targetHash) + \")\")\n\t\t\t}\n\t\t}\n\n\t}\n\tif needToSync && er.returnCode == 0 {\n\t\tInfof(\"Need to sync \" + targetDir)\n\t\tmutex.Lock()\n\t\tneedSyncDirs = append(needSyncDirs, targetDir)\n\t\tif _, ok := needSyncEnvs[correspondingPuppetEnvironment]; !ok {\n\t\t\tneedSyncEnvs[correspondingPuppetEnvironment] = empty\n\t\t}\n\t\tneedSyncGitCount++\n\t\tmutex.Unlock()\n\n\t\tif !dryRun && !config.CloneGitModules || isControlRepo {\n\t\t\tif pfMode {\n\t\t\t\tpurgeDir(targetDir, \"git dir with changes in -puppetfile mode\")\n\t\t\t}\n\t\t\tcheckDirAndCreate(targetDir, \"git dir\")\n\t\t\tgitArchiveArgs := []string{\"--git-dir\", srcDir, \"archive\", gitModule.tree}\n\t\t\tcmd := exec.Command(\"git\", gitArchiveArgs...)\n\t\t\tDebugf(\"Executing git --git-dir \" + srcDir + \" archive \" + gitModule.tree)\n\t\t\tcmdOut, err := cmd.StdoutPipe()\n\t\t\tif err != nil {\n\t\t\t\tif !gitModule.ignoreUnreachable {\n\t\t\t\t\tInfof(\"Failed to populate module \" + targetDir + \" but ignore-unreachable is set. Continuing...\")\n\t\t\t\t} else {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tFatalf(\"syncToModuleDir(): Failed to execute command: git --git-dir \" + srcDir + \" archive \" + gitModule.tree + \" Error: \" + err.Error())\n\t\t\t}\n\t\t\tcmd.Start()\n\n\t\t\tbefore := time.Now()\n\t\t\tunTar(cmdOut, targetDir)\n\t\t\tduration := time.Since(before).Seconds()\n\t\t\tmutex.Lock()\n\t\t\tioGitTime += duration\n\t\t\tmutex.Unlock()\n\n\t\t\terr = cmd.Wait()\n\t\t\tif err != nil {\n\t\t\t\tFatalf(\"syncToModuleDir(): Failed to execute command: git --git-dir \" + srcDir + \" archive \" + gitModule.tree + \" Error: \" + err.Error())\n\t\t\t\t\/\/\"\\nIf you are using GitLab please ensure that you've added your deploy key to your repository.\" +\n\t\t\t\t\/\/\"\\nThe Puppet environment which is using this unresolveable repository is \" + correspondingPuppetEnvironment)\n\t\t\t}\n\n\t\t\tVerbosef(\"syncToModuleDir(): Executing git --git-dir \" + srcDir + \" archive \" + gitModule.tree + \" took \" + strconv.FormatFloat(duration, 'f', 5, 64) + \"s\")\n\n\t\t\ter = executeCommand(logCmd, config.Timeout, false)\n\t\t\tif er.returnCode != 0 {\n\t\t\t\tFatalf(\"executeCommand(): git command failed: \" + logCmd + \" \" + err.Error() + \"\\nOutput: \" + er.output)\n\t\t\t}\n\t\t\tif len(er.output) > 0 {\n\t\t\t\tcommitHash := strings.TrimSuffix(er.output, \"\\n\")\n\t\t\t\tif isControlRepo {\n\t\t\t\t\tDebugf(\"Writing to deploy file \" + deployFile)\n\t\t\t\t\tdr := DeployResult{\n\t\t\t\t\t\tName: gitModule.tree,\n\t\t\t\t\t\tSignature: commitHash,\n\t\t\t\t\t\tStartedAt: startedAt,\n\t\t\t\t\t}\n\t\t\t\t\twriteStructJSONFile(deployFile, dr)\n\t\t\t\t} else {\n\t\t\t\t\tDebugf(\"Writing hash \" + commitHash + \" from command \" + logCmd + \" to \" + hashFile)\n\t\t\t\t\tf, _ := os.Create(hashFile)\n\t\t\t\t\tdefer f.Close()\n\t\t\t\t\tf.WriteString(commitHash)\n\t\t\t\t\tf.Sync()\n\t\t\t\t}\n\n\t\t\t}\n\t\t} else if config.CloneGitModules {\n\t\t\treturn doMirrorOrUpdate(gitModule, targetDir, 0)\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ addDesiredContent takes the given git repository directory and the\n\/\/ relevant reference (branch, commit hash, tag) and adds its content to\n\/\/ the global desiredContent slice so that it doesn't get purged by g10k\nfunc addDesiredContent(gitDir string, tree string, targetDir string) {\n\ttreeCmd := \"git --git-dir \" + gitDir + \" ls-tree --full-tree -r -t --name-only \" + tree\n\ter := executeCommand(treeCmd, config.Timeout, false)\n\tfoundGitFiles := strings.Split(er.output, \"\\n\")\n\tmutex.Lock()\n\tfor _, desiredFile := range foundGitFiles[:len(foundGitFiles)-1] {\n\t\tdesiredContent = append(desiredContent, filepath.Join(targetDir, desiredFile))\n\n\t\t\/\/ because we're using -r which prints git managed files in subfolders like this: foo\/test3\n\t\t\/\/ we have to split up the given string and add the possible parent directories (foo in this case)\n\t\tparentDirs := strings.Split(desiredFile, \"\/\")\n\t\tif len(parentDirs) > 1 {\n\t\t\tfor _, dir := range parentDirs[:len(parentDirs)-1] {\n\t\t\t\tdesiredContent = append(desiredContent, filepath.Join(targetDir, dir))\n\t\t\t}\n\t\t}\n\t}\n\tmutex.Unlock()\n\n}\n<|endoftext|>"} {"text":"<commit_before>package git\n\n\/*\n#include <git2.h>\n#include <git2\/sys\/openssl.h>\n*\/\nimport \"C\"\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"runtime\"\n\t\"strings\"\n\t\"unsafe\"\n)\n\ntype ErrorClass int\n\nconst (\n\tErrClassNone ErrorClass = C.GITERR_NONE\n\tErrClassNoMemory ErrorClass = C.GITERR_NOMEMORY\n\tErrClassOs ErrorClass = C.GITERR_OS\n\tErrClassInvalid ErrorClass = C.GITERR_INVALID\n\tErrClassReference ErrorClass = C.GITERR_REFERENCE\n\tErrClassZlib ErrorClass = C.GITERR_ZLIB\n\tErrClassRepository ErrorClass = C.GITERR_REPOSITORY\n\tErrClassConfig ErrorClass = C.GITERR_CONFIG\n\tErrClassRegex ErrorClass = C.GITERR_REGEX\n\tErrClassOdb ErrorClass = C.GITERR_ODB\n\tErrClassIndex ErrorClass = C.GITERR_INDEX\n\tErrClassObject ErrorClass = C.GITERR_OBJECT\n\tErrClassNet ErrorClass = C.GITERR_NET\n\tErrClassTag ErrorClass = C.GITERR_TAG\n\tErrClassTree ErrorClass = C.GITERR_TREE\n\tErrClassIndexer ErrorClass = C.GITERR_INDEXER\n\tErrClassSSL ErrorClass = C.GITERR_SSL\n\tErrClassSubmodule ErrorClass = C.GITERR_SUBMODULE\n\tErrClassThread ErrorClass = C.GITERR_THREAD\n\tErrClassStash ErrorClass = C.GITERR_STASH\n\tErrClassCheckout ErrorClass = C.GITERR_CHECKOUT\n\tErrClassFetchHead ErrorClass = C.GITERR_FETCHHEAD\n\tErrClassMerge ErrorClass = C.GITERR_MERGE\n\tErrClassSsh ErrorClass = C.GITERR_SSH\n\tErrClassFilter ErrorClass = C.GITERR_FILTER\n\tErrClassRevert ErrorClass = C.GITERR_REVERT\n\tErrClassCallback ErrorClass = C.GITERR_CALLBACK\n\tErrClassRebase ErrorClass = C.GITERR_REBASE\n)\n\ntype ErrorCode int\n\nconst (\n\n\t\/\/ No error\n\tErrOk ErrorCode = C.GIT_OK\n\n\t\/\/ Generic error\n\tErrGeneric ErrorCode = C.GIT_ERROR\n\t\/\/ Requested object could not be found\n\tErrNotFound ErrorCode = C.GIT_ENOTFOUND\n\t\/\/ Object exists preventing operation\n\tErrExists ErrorCode = C.GIT_EEXISTS\n\t\/\/ More than one object matches\n\tErrAmbiguous ErrorCode = C.GIT_EAMBIGUOUS\n\t\/\/ (backwards compatibility misspelling)\n\tErrAmbigious ErrorCode = C.GIT_EAMBIGUOUS\n\t\/\/ Output buffer too short to hold data\n\tErrBuffs ErrorCode = C.GIT_EBUFS\n\n\t\/\/ GIT_EUSER is a special error that is never generated by libgit2\n\t\/\/ code. You can return it from a callback (e.g to stop an iteration)\n\t\/\/ to know that it was generated by the callback and not by libgit2.\n\tErrUser ErrorCode = C.GIT_EUSER\n\n\t\/\/ Operation not allowed on bare repository\n\tErrBareRepo ErrorCode = C.GIT_EBAREREPO\n\t\/\/ HEAD refers to branch with no commits\n\tErrUnbornBranch ErrorCode = C.GIT_EUNBORNBRANCH\n\t\/\/ Merge in progress prevented operation\n\tErrUnmerged ErrorCode = C.GIT_EUNMERGED\n\t\/\/ Reference was not fast-forwardable\n\tErrNonFastForward ErrorCode = C.GIT_ENONFASTFORWARD\n\t\/\/ Name\/ref spec was not in a valid format\n\tErrInvalidSpec ErrorCode = C.GIT_EINVALIDSPEC\n\t\/\/ Checkout conflicts prevented operation\n\tErrConflict ErrorCode = C.GIT_ECONFLICT\n\t\/\/ Lock file prevented operation\n\tErrLocked ErrorCode = C.GIT_ELOCKED\n\t\/\/ Reference value does not match expected\n\tErrModified ErrorCode = C.GIT_EMODIFIED\n\t\/\/ Authentication failed\n\tErrAuth ErrorCode = C.GIT_EAUTH\n\t\/\/ Server certificate is invalid\n\tErrCertificate ErrorCode = C.GIT_ECERTIFICATE\n\t\/\/ Patch\/merge has already been applied\n\tErrApplied ErrorCode = C.GIT_EAPPLIED\n\t\/\/ The requested peel operation is not possible\n\tErrPeel ErrorCode = C.GIT_EPEEL\n\t\/\/ Unexpected EOF\n\tErrEOF ErrorCode = C.GIT_EEOF\n\t\/\/ Uncommitted changes in index prevented operation\n\tErrUncommitted ErrorCode = C.GIT_EUNCOMMITTED\n\t\/\/ The operation is not valid for a directory\n\tErrDirectory ErrorCode = C.GIT_EDIRECTORY\n\t\/\/ A merge conflict exists and cannot continue\n\tErrMergeConflict ErrorCode = C.GIT_EMERGECONFLICT\n\n\t\/\/ Internal only\n\tErrPassthrough ErrorCode = C.GIT_PASSTHROUGH\n\t\/\/ Signals end of iteration with iterator\n\tErrIterOver ErrorCode = C.GIT_ITEROVER\n)\n\nvar (\n\tErrInvalid = errors.New(\"Invalid state for operation\")\n)\n\nvar pointerHandles *HandleList\n\nfunc init() {\n\tpointerHandles = NewHandleList()\n\n\tC.git_libgit2_init()\n\n\t\/\/ Due to the multithreaded nature of Go and its interaction with\n\t\/\/ calling C functions, we cannot work with a library that was not built\n\t\/\/ with multi-threading support. The most likely outcome is a segfault\n\t\/\/ or panic at an incomprehensible time, so let's make it easy by\n\t\/\/ panicking right here.\n\tif Features()&FeatureThreads == 0 {\n\t\tpanic(\"libgit2 was not built with threading support\")\n\t}\n\n\t\/\/ This is not something we should be doing, as we may be\n\t\/\/ stomping all over someone else's setup. The user should do\n\t\/\/ this themselves or use some binding\/wrapper which does it\n\t\/\/ in such a way that they can be sure they're the only ones\n\t\/\/ setting it up.\n\tC.git_openssl_set_locking()\n}\n\n\/\/ Shutdown frees all the resources acquired by libgit2. Make sure no\n\/\/ references to any git2go objects are live before calling this.\n\/\/ After this is called, invoking any function from this library will result in\n\/\/ undefined behavior, so make sure this is called carefully.\nfunc Shutdown() {\n\tpointerHandles.Clear()\n\n\tC.git_libgit2_shutdown()\n}\n\n\/\/ Oid represents the id for a Git object.\ntype Oid [20]byte\n\nfunc newOidFromC(coid *C.git_oid) *Oid {\n\tif coid == nil {\n\t\treturn nil\n\t}\n\n\toid := new(Oid)\n\tcopy(oid[0:20], C.GoBytes(unsafe.Pointer(coid), 20))\n\treturn oid\n}\n\nfunc NewOidFromBytes(b []byte) *Oid {\n\toid := new(Oid)\n\tcopy(oid[0:20], b[0:20])\n\treturn oid\n}\n\nfunc (oid *Oid) toC() *C.git_oid {\n\treturn (*C.git_oid)(unsafe.Pointer(oid))\n}\n\nfunc NewOid(s string) (*Oid, error) {\n\tif len(s) > C.GIT_OID_HEXSZ {\n\t\treturn nil, errors.New(\"string is too long for oid\")\n\t}\n\n\to := new(Oid)\n\n\tslice, error := hex.DecodeString(s)\n\tif error != nil {\n\t\treturn nil, error\n\t}\n\n\tif len(slice) != 20 {\n\t\treturn nil, &GitError{\"Invalid Oid\", ErrClassNone, ErrGeneric}\n\t}\n\n\tcopy(o[:], slice[:20])\n\treturn o, nil\n}\n\nfunc (oid *Oid) String() string {\n\treturn hex.EncodeToString(oid[:])\n}\n\nfunc (oid *Oid) Cmp(oid2 *Oid) int {\n\treturn bytes.Compare(oid[:], oid2[:])\n}\n\nfunc (oid *Oid) Copy() *Oid {\n\tret := *oid\n\treturn &ret\n}\n\nfunc (oid *Oid) Equal(oid2 *Oid) bool {\n\treturn *oid == *oid2\n}\n\nfunc (oid *Oid) IsZero() bool {\n\treturn *oid == Oid{}\n}\n\nfunc (oid *Oid) NCmp(oid2 *Oid, n uint) int {\n\treturn bytes.Compare(oid[:n], oid2[:n])\n}\n\nfunc ShortenOids(ids []*Oid, minlen int) (int, error) {\n\tshorten := C.git_oid_shorten_new(C.size_t(minlen))\n\tif shorten == nil {\n\t\tpanic(\"Out of memory\")\n\t}\n\tdefer C.git_oid_shorten_free(shorten)\n\n\tvar ret C.int\n\n\truntime.LockOSThread()\n\tdefer runtime.UnlockOSThread()\n\n\tfor _, id := range ids {\n\t\tbuf := make([]byte, 41)\n\t\tC.git_oid_fmt((*C.char)(unsafe.Pointer(&buf[0])), id.toC())\n\t\tbuf[40] = 0\n\t\tret = C.git_oid_shorten_add(shorten, (*C.char)(unsafe.Pointer(&buf[0])))\n\t\tif ret < 0 {\n\t\t\treturn int(ret), MakeGitError(ret)\n\t\t}\n\t}\n\truntime.KeepAlive(ids)\n\treturn int(ret), nil\n}\n\ntype GitError struct {\n\tMessage string\n\tClass ErrorClass\n\tCode ErrorCode\n}\n\nfunc (e GitError) Error() string {\n\treturn e.Message\n}\n\nfunc IsErrorClass(err error, c ErrorClass) bool {\n\n\tif err == nil {\n\t\treturn false\n\t}\n\tif gitError, ok := err.(*GitError); ok {\n\t\treturn gitError.Class == c\n\t}\n\treturn false\n}\n\nfunc IsErrorCode(err error, c ErrorCode) bool {\n\tif err == nil {\n\t\treturn false\n\t}\n\tif gitError, ok := err.(*GitError); ok {\n\t\treturn gitError.Code == c\n\t}\n\treturn false\n}\n\nfunc MakeGitError(errorCode C.int) error {\n\n\tvar errMessage string\n\tvar errClass ErrorClass\n\tif errorCode != C.GIT_ITEROVER {\n\t\terr := C.giterr_last()\n\t\tif err != nil {\n\t\t\terrMessage = C.GoString(err.message)\n\t\t\terrClass = ErrorClass(err.klass)\n\t\t} else {\n\t\t\terrClass = ErrClassInvalid\n\t\t}\n\t}\n\treturn &GitError{errMessage, errClass, ErrorCode(errorCode)}\n}\n\nfunc MakeGitError2(err int) error {\n\treturn MakeGitError(C.int(err))\n}\n\nfunc cbool(b bool) C.int {\n\tif b {\n\t\treturn C.int(1)\n\t}\n\treturn C.int(0)\n}\n\nfunc ucbool(b bool) C.uint {\n\tif b {\n\t\treturn C.uint(1)\n\t}\n\treturn C.uint(0)\n}\n\nfunc Discover(start string, across_fs bool, ceiling_dirs []string) (string, error) {\n\tceildirs := C.CString(strings.Join(ceiling_dirs, string(C.GIT_PATH_LIST_SEPARATOR)))\n\tdefer C.free(unsafe.Pointer(ceildirs))\n\n\tcstart := C.CString(start)\n\tdefer C.free(unsafe.Pointer(cstart))\n\n\tvar buf C.git_buf\n\tdefer C.git_buf_dispose(&buf)\n\n\truntime.LockOSThread()\n\tdefer runtime.UnlockOSThread()\n\n\tret := C.git_repository_discover(&buf, cstart, cbool(across_fs), ceildirs)\n\tif ret < 0 {\n\t\treturn \"\", MakeGitError(ret)\n\t}\n\n\treturn C.GoString(buf.ptr), nil\n}\n<commit_msg>Add a ReInit function (#647)<commit_after>package git\n\n\/*\n#include <git2.h>\n#include <git2\/sys\/openssl.h>\n*\/\nimport \"C\"\nimport (\n\t\"bytes\"\n\t\"encoding\/hex\"\n\t\"errors\"\n\t\"runtime\"\n\t\"strings\"\n\t\"unsafe\"\n)\n\ntype ErrorClass int\n\nconst (\n\tErrClassNone ErrorClass = C.GITERR_NONE\n\tErrClassNoMemory ErrorClass = C.GITERR_NOMEMORY\n\tErrClassOs ErrorClass = C.GITERR_OS\n\tErrClassInvalid ErrorClass = C.GITERR_INVALID\n\tErrClassReference ErrorClass = C.GITERR_REFERENCE\n\tErrClassZlib ErrorClass = C.GITERR_ZLIB\n\tErrClassRepository ErrorClass = C.GITERR_REPOSITORY\n\tErrClassConfig ErrorClass = C.GITERR_CONFIG\n\tErrClassRegex ErrorClass = C.GITERR_REGEX\n\tErrClassOdb ErrorClass = C.GITERR_ODB\n\tErrClassIndex ErrorClass = C.GITERR_INDEX\n\tErrClassObject ErrorClass = C.GITERR_OBJECT\n\tErrClassNet ErrorClass = C.GITERR_NET\n\tErrClassTag ErrorClass = C.GITERR_TAG\n\tErrClassTree ErrorClass = C.GITERR_TREE\n\tErrClassIndexer ErrorClass = C.GITERR_INDEXER\n\tErrClassSSL ErrorClass = C.GITERR_SSL\n\tErrClassSubmodule ErrorClass = C.GITERR_SUBMODULE\n\tErrClassThread ErrorClass = C.GITERR_THREAD\n\tErrClassStash ErrorClass = C.GITERR_STASH\n\tErrClassCheckout ErrorClass = C.GITERR_CHECKOUT\n\tErrClassFetchHead ErrorClass = C.GITERR_FETCHHEAD\n\tErrClassMerge ErrorClass = C.GITERR_MERGE\n\tErrClassSsh ErrorClass = C.GITERR_SSH\n\tErrClassFilter ErrorClass = C.GITERR_FILTER\n\tErrClassRevert ErrorClass = C.GITERR_REVERT\n\tErrClassCallback ErrorClass = C.GITERR_CALLBACK\n\tErrClassRebase ErrorClass = C.GITERR_REBASE\n)\n\ntype ErrorCode int\n\nconst (\n\n\t\/\/ No error\n\tErrOk ErrorCode = C.GIT_OK\n\n\t\/\/ Generic error\n\tErrGeneric ErrorCode = C.GIT_ERROR\n\t\/\/ Requested object could not be found\n\tErrNotFound ErrorCode = C.GIT_ENOTFOUND\n\t\/\/ Object exists preventing operation\n\tErrExists ErrorCode = C.GIT_EEXISTS\n\t\/\/ More than one object matches\n\tErrAmbiguous ErrorCode = C.GIT_EAMBIGUOUS\n\t\/\/ (backwards compatibility misspelling)\n\tErrAmbigious ErrorCode = C.GIT_EAMBIGUOUS\n\t\/\/ Output buffer too short to hold data\n\tErrBuffs ErrorCode = C.GIT_EBUFS\n\n\t\/\/ GIT_EUSER is a special error that is never generated by libgit2\n\t\/\/ code. You can return it from a callback (e.g to stop an iteration)\n\t\/\/ to know that it was generated by the callback and not by libgit2.\n\tErrUser ErrorCode = C.GIT_EUSER\n\n\t\/\/ Operation not allowed on bare repository\n\tErrBareRepo ErrorCode = C.GIT_EBAREREPO\n\t\/\/ HEAD refers to branch with no commits\n\tErrUnbornBranch ErrorCode = C.GIT_EUNBORNBRANCH\n\t\/\/ Merge in progress prevented operation\n\tErrUnmerged ErrorCode = C.GIT_EUNMERGED\n\t\/\/ Reference was not fast-forwardable\n\tErrNonFastForward ErrorCode = C.GIT_ENONFASTFORWARD\n\t\/\/ Name\/ref spec was not in a valid format\n\tErrInvalidSpec ErrorCode = C.GIT_EINVALIDSPEC\n\t\/\/ Checkout conflicts prevented operation\n\tErrConflict ErrorCode = C.GIT_ECONFLICT\n\t\/\/ Lock file prevented operation\n\tErrLocked ErrorCode = C.GIT_ELOCKED\n\t\/\/ Reference value does not match expected\n\tErrModified ErrorCode = C.GIT_EMODIFIED\n\t\/\/ Authentication failed\n\tErrAuth ErrorCode = C.GIT_EAUTH\n\t\/\/ Server certificate is invalid\n\tErrCertificate ErrorCode = C.GIT_ECERTIFICATE\n\t\/\/ Patch\/merge has already been applied\n\tErrApplied ErrorCode = C.GIT_EAPPLIED\n\t\/\/ The requested peel operation is not possible\n\tErrPeel ErrorCode = C.GIT_EPEEL\n\t\/\/ Unexpected EOF\n\tErrEOF ErrorCode = C.GIT_EEOF\n\t\/\/ Uncommitted changes in index prevented operation\n\tErrUncommitted ErrorCode = C.GIT_EUNCOMMITTED\n\t\/\/ The operation is not valid for a directory\n\tErrDirectory ErrorCode = C.GIT_EDIRECTORY\n\t\/\/ A merge conflict exists and cannot continue\n\tErrMergeConflict ErrorCode = C.GIT_EMERGECONFLICT\n\n\t\/\/ Internal only\n\tErrPassthrough ErrorCode = C.GIT_PASSTHROUGH\n\t\/\/ Signals end of iteration with iterator\n\tErrIterOver ErrorCode = C.GIT_ITEROVER\n)\n\nvar (\n\tErrInvalid = errors.New(\"Invalid state for operation\")\n)\n\nvar pointerHandles *HandleList\n\nfunc init() {\n\tinitLibGit2()\n}\n\nfunc initLibGit2() {\n\tpointerHandles = NewHandleList()\n\n\tC.git_libgit2_init()\n\n\t\/\/ Due to the multithreaded nature of Go and its interaction with\n\t\/\/ calling C functions, we cannot work with a library that was not built\n\t\/\/ with multi-threading support. The most likely outcome is a segfault\n\t\/\/ or panic at an incomprehensible time, so let's make it easy by\n\t\/\/ panicking right here.\n\tif Features()&FeatureThreads == 0 {\n\t\tpanic(\"libgit2 was not built with threading support\")\n\t}\n\n\t\/\/ This is not something we should be doing, as we may be\n\t\/\/ stomping all over someone else's setup. The user should do\n\t\/\/ this themselves or use some binding\/wrapper which does it\n\t\/\/ in such a way that they can be sure they're the only ones\n\t\/\/ setting it up.\n\tC.git_openssl_set_locking()\n}\n\n\/\/ Shutdown frees all the resources acquired by libgit2. Make sure no\n\/\/ references to any git2go objects are live before calling this.\n\/\/ After this is called, invoking any function from this library will result in\n\/\/ undefined behavior, so make sure this is called carefully.\nfunc Shutdown() {\n\tpointerHandles.Clear()\n\n\tC.git_libgit2_shutdown()\n}\n\n\/\/ ReInit reinitializes the global state, this is useful if the effective user\n\/\/ id has changed and you want to update the stored search paths for gitconfig\n\/\/ files. This function frees any references to objects, so it should be called\n\/\/ before any other functions are called.\nfunc ReInit() {\n\tShutdown()\n\tinitLibGit2()\n}\n\n\/\/ Oid represents the id for a Git object.\ntype Oid [20]byte\n\nfunc newOidFromC(coid *C.git_oid) *Oid {\n\tif coid == nil {\n\t\treturn nil\n\t}\n\n\toid := new(Oid)\n\tcopy(oid[0:20], C.GoBytes(unsafe.Pointer(coid), 20))\n\treturn oid\n}\n\nfunc NewOidFromBytes(b []byte) *Oid {\n\toid := new(Oid)\n\tcopy(oid[0:20], b[0:20])\n\treturn oid\n}\n\nfunc (oid *Oid) toC() *C.git_oid {\n\treturn (*C.git_oid)(unsafe.Pointer(oid))\n}\n\nfunc NewOid(s string) (*Oid, error) {\n\tif len(s) > C.GIT_OID_HEXSZ {\n\t\treturn nil, errors.New(\"string is too long for oid\")\n\t}\n\n\to := new(Oid)\n\n\tslice, error := hex.DecodeString(s)\n\tif error != nil {\n\t\treturn nil, error\n\t}\n\n\tif len(slice) != 20 {\n\t\treturn nil, &GitError{\"Invalid Oid\", ErrClassNone, ErrGeneric}\n\t}\n\n\tcopy(o[:], slice[:20])\n\treturn o, nil\n}\n\nfunc (oid *Oid) String() string {\n\treturn hex.EncodeToString(oid[:])\n}\n\nfunc (oid *Oid) Cmp(oid2 *Oid) int {\n\treturn bytes.Compare(oid[:], oid2[:])\n}\n\nfunc (oid *Oid) Copy() *Oid {\n\tret := *oid\n\treturn &ret\n}\n\nfunc (oid *Oid) Equal(oid2 *Oid) bool {\n\treturn *oid == *oid2\n}\n\nfunc (oid *Oid) IsZero() bool {\n\treturn *oid == Oid{}\n}\n\nfunc (oid *Oid) NCmp(oid2 *Oid, n uint) int {\n\treturn bytes.Compare(oid[:n], oid2[:n])\n}\n\nfunc ShortenOids(ids []*Oid, minlen int) (int, error) {\n\tshorten := C.git_oid_shorten_new(C.size_t(minlen))\n\tif shorten == nil {\n\t\tpanic(\"Out of memory\")\n\t}\n\tdefer C.git_oid_shorten_free(shorten)\n\n\tvar ret C.int\n\n\truntime.LockOSThread()\n\tdefer runtime.UnlockOSThread()\n\n\tfor _, id := range ids {\n\t\tbuf := make([]byte, 41)\n\t\tC.git_oid_fmt((*C.char)(unsafe.Pointer(&buf[0])), id.toC())\n\t\tbuf[40] = 0\n\t\tret = C.git_oid_shorten_add(shorten, (*C.char)(unsafe.Pointer(&buf[0])))\n\t\tif ret < 0 {\n\t\t\treturn int(ret), MakeGitError(ret)\n\t\t}\n\t}\n\truntime.KeepAlive(ids)\n\treturn int(ret), nil\n}\n\ntype GitError struct {\n\tMessage string\n\tClass ErrorClass\n\tCode ErrorCode\n}\n\nfunc (e GitError) Error() string {\n\treturn e.Message\n}\n\nfunc IsErrorClass(err error, c ErrorClass) bool {\n\n\tif err == nil {\n\t\treturn false\n\t}\n\tif gitError, ok := err.(*GitError); ok {\n\t\treturn gitError.Class == c\n\t}\n\treturn false\n}\n\nfunc IsErrorCode(err error, c ErrorCode) bool {\n\tif err == nil {\n\t\treturn false\n\t}\n\tif gitError, ok := err.(*GitError); ok {\n\t\treturn gitError.Code == c\n\t}\n\treturn false\n}\n\nfunc MakeGitError(errorCode C.int) error {\n\n\tvar errMessage string\n\tvar errClass ErrorClass\n\tif errorCode != C.GIT_ITEROVER {\n\t\terr := C.giterr_last()\n\t\tif err != nil {\n\t\t\terrMessage = C.GoString(err.message)\n\t\t\terrClass = ErrorClass(err.klass)\n\t\t} else {\n\t\t\terrClass = ErrClassInvalid\n\t\t}\n\t}\n\treturn &GitError{errMessage, errClass, ErrorCode(errorCode)}\n}\n\nfunc MakeGitError2(err int) error {\n\treturn MakeGitError(C.int(err))\n}\n\nfunc cbool(b bool) C.int {\n\tif b {\n\t\treturn C.int(1)\n\t}\n\treturn C.int(0)\n}\n\nfunc ucbool(b bool) C.uint {\n\tif b {\n\t\treturn C.uint(1)\n\t}\n\treturn C.uint(0)\n}\n\nfunc Discover(start string, across_fs bool, ceiling_dirs []string) (string, error) {\n\tceildirs := C.CString(strings.Join(ceiling_dirs, string(C.GIT_PATH_LIST_SEPARATOR)))\n\tdefer C.free(unsafe.Pointer(ceildirs))\n\n\tcstart := C.CString(start)\n\tdefer C.free(unsafe.Pointer(cstart))\n\n\tvar buf C.git_buf\n\tdefer C.git_buf_dispose(&buf)\n\n\truntime.LockOSThread()\n\tdefer runtime.UnlockOSThread()\n\n\tret := C.git_repository_discover(&buf, cstart, cbool(across_fs), ceildirs)\n\tif ret < 0 {\n\t\treturn \"\", MakeGitError(ret)\n\t}\n\n\treturn C.GoString(buf.ptr), nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Gogs Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage git\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst _VERSION = \"0.6.6\"\n\nfunc Version() string {\n\treturn _VERSION\n}\n\nvar (\n\t\/\/ Debug enables verbose logging on everything.\n\t\/\/ This should be false in case Gogs starts in SSH mode.\n\tDebug = false\n\tPrefix = \"[git-module] \"\n)\n\nfunc log(format string, args ...interface{}) {\n\tif !Debug {\n\t\treturn\n\t}\n\n\tfmt.Print(Prefix)\n\tif len(args) == 0 {\n\t\tfmt.Println(format)\n\t} else {\n\t\tfmt.Printf(format+\"\\n\", args...)\n\t}\n}\n\nvar gitVersion string\n\n\/\/ Version returns current Git version from shell.\nfunc BinVersion() (string, error) {\n\tif len(gitVersion) > 0 {\n\t\treturn gitVersion, nil\n\t}\n\n\tstdout, err := NewCommand(\"version\").Run()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfields := strings.Fields(stdout)\n\tif len(fields) < 3 {\n\t\treturn \"\", fmt.Errorf(\"not enough output: %s\", stdout)\n\t}\n\n\t\/\/ Handle special case on Windows.\n\ti := strings.Index(fields[2], \"windows\")\n\tif i >= 1 {\n\t\tgitVersion = fields[2][:i-1]\n\t\treturn gitVersion, nil\n\t}\n\n\tgitVersion = fields[2]\n\treturn gitVersion, nil\n}\n\nfunc init() {\n\tBinVersion()\n}\n\n\/\/ Fsck verifies the connectivity and validity of the objects in the database\nfunc Fsck(repoPath string, timeout time.Duration, args ...string) error {\n\t\/\/ Make sure timeout makes sense.\n\tif timeout <= 0 {\n\t\ttimeout = -1\n\t}\n\t_, err := NewCommand(\"fsck\").AddArguments(args...).RunInDirTimeout(timeout, repoPath)\n\treturn err\n}\n<commit_msg>Bump version<commit_after>\/\/ Copyright 2015 The Gogs Authors. All rights reserved.\n\/\/ Use of this source code is governed by a MIT-style\n\/\/ license that can be found in the LICENSE file.\n\npackage git\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst _VERSION = \"0.6.7\"\n\nfunc Version() string {\n\treturn _VERSION\n}\n\nvar (\n\t\/\/ Debug enables verbose logging on everything.\n\t\/\/ This should be false in case Gogs starts in SSH mode.\n\tDebug = false\n\tPrefix = \"[git-module] \"\n)\n\nfunc log(format string, args ...interface{}) {\n\tif !Debug {\n\t\treturn\n\t}\n\n\tfmt.Print(Prefix)\n\tif len(args) == 0 {\n\t\tfmt.Println(format)\n\t} else {\n\t\tfmt.Printf(format+\"\\n\", args...)\n\t}\n}\n\nvar gitVersion string\n\n\/\/ Version returns current Git version from shell.\nfunc BinVersion() (string, error) {\n\tif len(gitVersion) > 0 {\n\t\treturn gitVersion, nil\n\t}\n\n\tstdout, err := NewCommand(\"version\").Run()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfields := strings.Fields(stdout)\n\tif len(fields) < 3 {\n\t\treturn \"\", fmt.Errorf(\"not enough output: %s\", stdout)\n\t}\n\n\t\/\/ Handle special case on Windows.\n\ti := strings.Index(fields[2], \"windows\")\n\tif i >= 1 {\n\t\tgitVersion = fields[2][:i-1]\n\t\treturn gitVersion, nil\n\t}\n\n\tgitVersion = fields[2]\n\treturn gitVersion, nil\n}\n\nfunc init() {\n\tBinVersion()\n}\n\n\/\/ Fsck verifies the connectivity and validity of the objects in the database\nfunc Fsck(repoPath string, timeout time.Duration, args ...string) error {\n\t\/\/ Make sure timeout makes sense.\n\tif timeout <= 0 {\n\t\ttimeout = -1\n\t}\n\t_, err := NewCommand(\"fsck\").AddArguments(args...).RunInDirTimeout(timeout, repoPath)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package issues\n\nimport (\n\t\/\/ Stdlib\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"regexp\"\n)\n\n\/\/ ReviewIssueCommonBody represents the issue body part that is shared by all issue types,\n\/\/ i.e. by both story review issues and commit review issues.\ntype ReviewIssueCommonBody struct {\n\t*CommitList\n\t*ReviewBlockerList\n\n\tUserContent string\n}\n\nfunc newReviewIssueCommonBody() *ReviewIssueCommonBody {\n\treturn &ReviewIssueCommonBody{\n\t\tCommitList: &CommitList{},\n\t\tReviewBlockerList: &ReviewBlockerList{},\n\t}\n}\n\n\/\/ Formatting ------------------------------------------------------------------\n\nconst userContentSeparator = \"----------\"\n\nvar reviewIssueCommonBodyTemplate = fmt.Sprintf(`The commits to be reviewed are following:\n{{range .CommitList.CommitItems}}- {{if .Reviewed}}[x]{{else}}[ ]{{end}} {{.CommitSHA}}: {{.CommitTitle}}\n{{end}}\n{{with .ReviewBlockerList.ReviewBlockerItems}}The following review blockers were opened by the reviewer:{{range .}}\n- {{if .Fixed}}[x]{{else}}[ ]{{end}} [blocker {{.BlockerNumber}}]({{.CommentURL}}) (commit {{.CommitSHA}}): {{.BlockerSummary}}{{end}}\n{{end}}\n%v\n{{if .UserContent}}{{.UserContent}}{{else}}\nThe content above was generated by SalsaFlow.\nYou can insert custom description here, but not above the separator.\n{{end}}`, userContentSeparator)\n\nfunc (body *ReviewIssueCommonBody) execTemplate(w io.Writer) {\n\texecTemplate(w, \"review issue common body\", reviewIssueCommonBodyTemplate, body)\n}\n\n\/\/ Parsing ---------------------------------------------------------------------\n\nvar (\n\tcommonBodyCommitItemRegexp = regexp.MustCompile(`^- \\[([ xX])\\] ([0-9a-f]+): (.+)$`)\n\tcommonBodyBlockerItemRegexp = regexp.MustCompile(`^- \\[([ xX])\\] \\[blocker ([0-9]+)\\]\\(([^)]+)\\) \\(commit ([0-9a-f]+)\\): (.+)$`)\n)\n\nfunc parseRemainingIssueBody(err *error, scanner *bodyScanner) *ReviewIssueCommonBody {\n\t\/\/ For optimization.\n\tif *err != nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Parse the review issue common body part.\n\t\/\/ We pass a pointer to error to the parsing functions,\n\t\/\/ so in case there is an error, it just falls through\n\t\/\/ so that the error value can be checked at the very end.\n\t\/\/\n\t\/\/ Also in case the user content separator is encountered,\n\t\/\/ it falls through and the user content is collected at the end.\n\n\t\/\/ Parse the commit list.\n\tcommitList := parseCommonBodyCommitList(err, scanner)\n\n\t\/\/ An empty line follows. It is already set as the current line\n\t\/\/ in the scanner since that is what marks the end of the commit list.\n\n\t\/\/ Parse the review blocker list.\n\treviewBlockerList := parseCommonBodyBlockerList(err, scanner)\n\n\t\/\/ And empty line follows. It is already set as the current line\n\t\/\/ in the scanner since that is what marks the end of the blocker list.\n\n\t\/\/ Parse the user content separator.\n\treadCommonBodySeparator(err, scanner)\n\n\t\/\/ Parse user content.\n\tuserContent := parseCommonBodyUserContent(err, scanner)\n\n\t\/\/ Check for errors.\n\tif *err != nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Return the common body object on success.\n\treturn &ReviewIssueCommonBody{\n\t\tCommitList: commitList,\n\t\tReviewBlockerList: reviewBlockerList,\n\t\tUserContent: userContent,\n\t}\n}\n\nfunc parseCommonBodyCommitList(err *error, scanner *bodyScanner) *CommitList {\n\tif *err != nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Read the list heading.\n\t_, _, ex := scanner.ReadLine()\n\tif ex != nil {\n\t\t*err = ex\n\t\treturn nil\n\t}\n\n\t\/\/ Read the list items.\n\tcommitList := &CommitList{}\n\tfor {\n\t\t\/\/ Read the next line.\n\t\tline, _, ex := scanner.ReadLine()\n\t\tif ex != nil {\n\t\t\t*err = ex\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ In case this is an empty line, we are done.\n\t\tif line == \"\" || line == userContentSeparator {\n\t\t\treturn commitList\n\t\t}\n\n\t\t\/\/ Parse the line as a commit item.\n\t\tmatch := commonBodyCommitItemRegexp.FindStringSubmatch(line)\n\t\tif len(match) == 0 {\n\t\t\t*err = scanner.CurrentLineInvalid()\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ Add the commit to the commit list.\n\t\treviewed := match[1] != \" \"\n\t\tcommitSHA, commitTitle := match[2], match[3]\n\t\tcommitList.AddCommit(reviewed, commitSHA, commitTitle)\n\t}\n}\n\nfunc parseCommonBodyBlockerList(err *error, scanner *bodyScanner) *ReviewBlockerList {\n\tif *err != nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Check the current line for the user content separator.\n\tline, _, _ := scanner.CurrentLine()\n\tif line == userContentSeparator {\n\t\t\/\/ Return empty blocker list.\n\t\treturn &ReviewBlockerList{}\n\t}\n\n\t\/\/ Drop the list heading line.\n\t\/\/ We can be reading the user content separator here as well.\n\tline, _, ex := scanner.ReadLine()\n\tif ex != nil {\n\t\t*err = ex\n\t\treturn nil\n\t}\n\tif line == userContentSeparator {\n\t\t\/\/ Return empty blocker list.\n\t\treturn &ReviewBlockerList{}\n\t}\n\n\t\/\/ Read the list items.\n\treviewBlockerList := &ReviewBlockerList{}\n\tfor {\n\t\t\/\/ Read the next line.\n\t\tline, _, ex := scanner.ReadLine()\n\t\tif ex != nil {\n\t\t\t*err = ex\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ In case this is an empty line, we are done.\n\t\tif line == \"\" || line == userContentSeparator {\n\t\t\treturn reviewBlockerList\n\t\t}\n\n\t\t\/\/ Parse the line as a review blocker item.\n\t\tmatch := commonBodyBlockerItemRegexp.FindStringSubmatch(line)\n\t\tif len(match) == 0 {\n\t\t\t*err = scanner.CurrentLineInvalid()\n\t\t\treturn nil\n\t\t}\n\n\t\t\/\/ Add the blocker to the blocker list.\n\t\tfixed := match[1] != \" \"\n\t\tcommentURL, commitSHA, blockerSummary := match[3], match[4], match[5]\n\t\treviewBlockerList.AddReviewBlocker(fixed, commentURL, commitSHA, blockerSummary)\n\t}\n}\n\nfunc readCommonBodySeparator(err *error, scanner *bodyScanner) {\n\tif *err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Check the current line for the user content separator.\n\tif line, _, _ := scanner.CurrentLine(); line == userContentSeparator {\n\t\treturn\n\t}\n\n\t\/\/ Read the input until the separator is encountered.\n\t\/\/ We can read the whole input here, but that is ok.\n\t\/\/ ReadLine returns io.EOF in that case and we return.\n\tfor {\n\t\tline, _, ex := scanner.ReadLine()\n\t\tif ex != nil {\n\t\t\t*err = ex\n\t\t\treturn\n\t\t}\n\n\t\tif line == userContentSeparator {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc parseCommonBodyUserContent(err *error, scanner *bodyScanner) string {\n\tif *err != nil {\n\t\treturn \"\"\n\t}\n\n\t\/\/ Prepare a buffer for the user content.\n\tbuffer := bytes.NewBuffer(nil)\n\n\twriteLine := func(line string) {\n\t\tbuffer.WriteString(\"\\n\")\n\t\tbuffer.WriteString(line)\n\t}\n\n\t\/\/ Read the user content.\n\t\/\/ Make sure there is a single leading empty line.\n\tdroppingEmptyLines := true\n\tfor {\n\t\tline, _, ex := scanner.ReadLine()\n\t\tswitch ex {\n\t\tcase nil:\n\t\t\t\/\/ Drop leading empty lines.\n\t\t\tif droppingEmptyLines {\n\t\t\t\tif line == \"\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tdroppingEmptyLines = false\n\t\t\t}\n\n\t\t\t\/\/ Write the user content.\n\t\t\twriteLine(line)\n\n\t\tcase io.EOF:\n\t\t\t\/\/ In case the error is io.EOF, we are done.\n\t\t\treturn buffer.String()\n\n\t\tdefault:\n\t\t\t\/\/ Otherwise return the error.\n\t\t\t*err = ex\n\t\t\treturn \"\"\n\t\t}\n\t}\n}\n<commit_msg>github\/issues: Fix review issue parser<commit_after>package issues\n\nimport (\n\t\/\/ Stdlib\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"regexp\"\n)\n\n\/\/ ReviewIssueCommonBody represents the issue body part that is shared by all issue types,\n\/\/ i.e. by both story review issues and commit review issues.\ntype ReviewIssueCommonBody struct {\n\t*CommitList\n\t*ReviewBlockerList\n\n\tUserContent string\n}\n\nfunc newReviewIssueCommonBody() *ReviewIssueCommonBody {\n\treturn &ReviewIssueCommonBody{\n\t\tCommitList: &CommitList{},\n\t\tReviewBlockerList: &ReviewBlockerList{},\n\t}\n}\n\n\/\/ Formatting ------------------------------------------------------------------\n\nconst userContentSeparator = \"----------\"\n\nvar reviewIssueCommonBodyTemplate = fmt.Sprintf(`The commits to be reviewed are following:\n{{range .CommitList.CommitItems}}- {{if .Reviewed}}[x]{{else}}[ ]{{end}} {{.CommitSHA}}: {{.CommitTitle}}\n{{end}}\n{{with .ReviewBlockerList.ReviewBlockerItems}}The following review blockers were opened by the reviewer:{{range .}}\n- {{if .Fixed}}[x]{{else}}[ ]{{end}} [blocker {{.BlockerNumber}}]({{.CommentURL}}) (commit {{.CommitSHA}}): {{.BlockerSummary}}{{end}}\n{{end}}\n%v\n{{if .UserContent}}{{.UserContent}}{{else}}\nThe content above was generated by SalsaFlow.\nYou can insert custom description here, but not above the separator.\n{{end}}`, userContentSeparator)\n\nfunc (body *ReviewIssueCommonBody) execTemplate(w io.Writer) {\n\texecTemplate(w, \"review issue common body\", reviewIssueCommonBodyTemplate, body)\n}\n\n\/\/ Parsing ---------------------------------------------------------------------\n\nvar (\n\tcommonBodyCommitItemRegexp = regexp.MustCompile(`^- \\[([ xX])\\] ([0-9a-f]+): (.+)$`)\n\tcommonBodyBlockerItemRegexp = regexp.MustCompile(`^- \\[([ xX])\\] \\[blocker ([0-9]+)\\]\\(([^)]+)\\) \\(commit ([0-9a-f]+)\\): (.+)$`)\n)\n\nfunc parseRemainingIssueBody(err *error, scanner *bodyScanner) *ReviewIssueCommonBody {\n\t\/\/ For optimization.\n\tif *err != nil {\n\t\treturn nil\n\t}\n\n\t\/\/ Parse the review issue common body part.\n\t\/\/ We pass a pointer to error to the parsing functions,\n\t\/\/ so in case there is an error, it just falls through\n\t\/\/ so that the error value can be checked at the very end.\n\t\/\/\n\t\/\/ Also in case the user content separator is encountered,\n\t\/\/ it falls through and the user content is collected at the end.\n\n\t\/\/ Parse the commit list.\n\tcommitList := parseCommonBodyCommitList(err, scanner)\n\n\t\/\/ An empty line follows. It is already set as the current line\n\t\/\/ in the scanner since that is what marks the end of the commit list.\n\n\t\/\/ Parse the review blocker list.\n\treviewBlockerList := parseCommonBodyBlockerList(err, scanner)\n\n\t\/\/ And empty line follows. It is already set as the current line\n\t\/\/ in the scanner since that is what marks the end of the blocker list.\n\n\t\/\/ Parse the user content separator.\n\treadCommonBodySeparator(err, scanner)\n\n\t\/\/ Parse user content.\n\tuserContent := parseCommonBodyUserContent(err, scanner)\n\n\t\/\/ Check for errors, but ignore io.EOF since that is ok.\n\t\/\/ The parsing functions return some reasonable empty values\n\t\/\/ even when there is an error.\n\tif ex := *err; ex != nil {\n\t\tif ex != io.EOF {\n\t\t\treturn nil\n\t\t}\n\t\t*err = nil\n\t}\n\n\t\/\/ Return the common body object on success.\n\treturn &ReviewIssueCommonBody{\n\t\tCommitList: commitList,\n\t\tReviewBlockerList: reviewBlockerList,\n\t\tUserContent: userContent,\n\t}\n}\n\nfunc parseCommonBodyCommitList(err *error, scanner *bodyScanner) *CommitList {\n\temptyList := &CommitList{}\n\n\tif *err != nil {\n\t\treturn emptyList\n\t}\n\n\t\/\/ Read the list heading.\n\t_, _, ex := scanner.ReadLine()\n\tif ex != nil {\n\t\t*err = ex\n\t\treturn emptyList\n\t}\n\n\t\/\/ Read the list items.\n\tcommitList := &CommitList{}\n\tfor {\n\t\t\/\/ Read the next line.\n\t\tline, _, ex := scanner.ReadLine()\n\t\tif ex != nil {\n\t\t\t*err = ex\n\t\t\treturn commitList\n\t\t}\n\n\t\t\/\/ In case this is an empty line, we are done.\n\t\tif line == \"\" || line == userContentSeparator {\n\t\t\treturn commitList\n\t\t}\n\n\t\t\/\/ Parse the line as a commit item.\n\t\tmatch := commonBodyCommitItemRegexp.FindStringSubmatch(line)\n\t\tif len(match) == 0 {\n\t\t\t*err = scanner.CurrentLineInvalid()\n\t\t\treturn commitList\n\t\t}\n\n\t\t\/\/ Add the commit to the commit list.\n\t\treviewed := match[1] != \" \"\n\t\tcommitSHA, commitTitle := match[2], match[3]\n\t\tcommitList.AddCommit(reviewed, commitSHA, commitTitle)\n\t}\n}\n\nfunc parseCommonBodyBlockerList(err *error, scanner *bodyScanner) *ReviewBlockerList {\n\temptyList := &ReviewBlockerList{}\n\n\tif *err != nil {\n\t\treturn emptyList\n\t}\n\n\t\/\/ Check the current line for the user content separator.\n\tline, _, _ := scanner.CurrentLine()\n\tif line == userContentSeparator {\n\t\treturn emptyList\n\t}\n\n\t\/\/ Drop the list heading line.\n\t\/\/ We can be reading the user content separator here as well.\n\tline, _, ex := scanner.ReadLine()\n\tif ex != nil {\n\t\t*err = ex\n\t\treturn emptyList\n\t}\n\tif line == userContentSeparator {\n\t\treturn emptyList\n\t}\n\n\t\/\/ Read the list items.\n\treviewBlockerList := &ReviewBlockerList{}\n\tfor {\n\t\t\/\/ Read the next line.\n\t\tline, _, ex := scanner.ReadLine()\n\t\tif ex != nil {\n\t\t\t*err = ex\n\t\t\treturn reviewBlockerList\n\t\t}\n\n\t\t\/\/ In case this is an empty line, we are done.\n\t\tif line == \"\" || line == userContentSeparator {\n\t\t\treturn reviewBlockerList\n\t\t}\n\n\t\t\/\/ Parse the line as a review blocker item.\n\t\tmatch := commonBodyBlockerItemRegexp.FindStringSubmatch(line)\n\t\tif len(match) == 0 {\n\t\t\t*err = scanner.CurrentLineInvalid()\n\t\t\treturn reviewBlockerList\n\t\t}\n\n\t\t\/\/ Add the blocker to the blocker list.\n\t\tfixed := match[1] != \" \"\n\t\tcommentURL, commitSHA, blockerSummary := match[3], match[4], match[5]\n\t\treviewBlockerList.AddReviewBlocker(fixed, commentURL, commitSHA, blockerSummary)\n\t}\n}\n\nfunc readCommonBodySeparator(err *error, scanner *bodyScanner) {\n\tif *err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Check the current line for the user content separator.\n\tif line, _, _ := scanner.CurrentLine(); line == userContentSeparator {\n\t\treturn\n\t}\n\n\t\/\/ Read the input until the separator is encountered.\n\t\/\/ We can read the whole input here, but that is ok.\n\t\/\/ ReadLine returns io.EOF in that case and we return.\n\tfor {\n\t\tline, _, ex := scanner.ReadLine()\n\t\tif ex != nil {\n\t\t\t*err = ex\n\t\t\treturn\n\t\t}\n\n\t\tif line == userContentSeparator {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc parseCommonBodyUserContent(err *error, scanner *bodyScanner) string {\n\tif *err != nil {\n\t\treturn \"\"\n\t}\n\n\t\/\/ Prepare a buffer for the user content.\n\tbuffer := bytes.NewBuffer(nil)\n\n\twriteLine := func(line string) {\n\t\tbuffer.WriteString(\"\\n\")\n\t\tbuffer.WriteString(line)\n\t}\n\n\t\/\/ Read the user content.\n\t\/\/ Make sure there is a single leading empty line.\n\tdroppingEmptyLines := true\n\tfor {\n\t\tline, _, ex := scanner.ReadLine()\n\t\tswitch ex {\n\t\tcase nil:\n\t\t\t\/\/ Drop leading empty lines.\n\t\t\tif droppingEmptyLines {\n\t\t\t\tif line == \"\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tdroppingEmptyLines = false\n\t\t\t}\n\n\t\t\t\/\/ Write the user content.\n\t\t\twriteLine(line)\n\n\t\tcase io.EOF:\n\t\t\t\/\/ In case the error is io.EOF, we are done.\n\t\t\treturn buffer.String()\n\n\t\tdefault:\n\t\t\t\/\/ Otherwise return the error.\n\t\t\t*err = ex\n\t\t\treturn \"\"\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package gocui\n\nimport (\n\t\"errors\"\n\n\t\"github.com\/jroimartin\/termbox-go\"\n)\n\nvar ErrorQuit = errors.New(\"quit\")\n\ntype Gui struct {\n\tevents chan termbox.Event\n\tviews []*View\n\tcurrentView *View\n\tBgColor, FgColor termbox.Attribute\n}\n\nfunc NewGui() (g *Gui) {\n\treturn &Gui{}\n}\n\nfunc (g *Gui) Init() (err error) {\n\tg.events = make(chan termbox.Event, 20)\n\tg.BgColor = termbox.ColorWhite\n\tg.FgColor = termbox.ColorBlack\n\treturn termbox.Init()\n}\n\nfunc (g *Gui) Close() {\n\ttermbox.Close()\n}\n\nfunc (g *Gui) Size() (x, y int) {\n\treturn termbox.Size()\n}\n\nfunc (g *Gui) AddView(name string, x0, y0, x1, y1 int) (v *View, err error) {\n\tmaxX, maxY := termbox.Size()\n\n\tif x0 < -1 || y0 < -1 || x1 < -1 || y1 < -1 ||\n\t\tx0 > maxX || y0 > maxY || x1 > maxX || y1 > maxY ||\n\t\tx0 >= x1 || y0 >= y1 {\n\t\treturn nil, errors.New(\"invalid points\")\n\t}\n\n\tfor _, v := range g.views {\n\t\tif name == v.Name {\n\t\t\treturn nil, errors.New(\"invalid name\")\n\t\t}\n\t}\n\n\tv = NewView(name, x0, y0, x1, y1)\n\tg.views = append(g.views, v)\n\treturn v, nil\n}\n\nfunc (g *Gui) MainLoop() (err error) {\n\tgo func() {\n\t\tfor {\n\t\t\tg.events <- termbox.PollEvent()\n\t\t}\n\t}()\n\n\tif err := g.resize(); err != nil {\n\t\treturn err\n\t}\n\tif err := g.draw(); err != nil {\n\t\treturn err\n\t}\n\t\/\/ TODO: Set initial cursor position\n\t\/\/termbox.SetCursor(10, 10)\n\ttermbox.Flush()\n\n\tfor {\n\t\tselect {\n\t\tcase ev := <-g.events:\n\t\t\tif err := g.handleEvent(&ev); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := g.consumeevents(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := g.draw(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttermbox.Flush()\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (g *Gui) SetCell(x, y int, ch rune) (err error) {\n\tmaxX, maxY := termbox.Size()\n\tif x < 0 || y < 0 || x >= maxX || y >= maxY {\n\t\treturn errors.New(\"invalid point\")\n\t}\n\ttermbox.SetCell(x, y, ch, g.FgColor, g.BgColor)\n\treturn nil\n}\n\nfunc (g *Gui) GetCell(x, y int) (ch rune, err error) {\n\tmaxX, maxY := termbox.Size()\n\tif x < 0 || y < 0 || x >= maxX || y >= maxY {\n\t\treturn 0, errors.New(\"invalid point\")\n\t}\n\tc := termbox.CellBuffer()[y*maxX+x]\n\treturn c.Ch, nil\n}\n\nfunc (g *Gui) consumeevents() (err error) {\n\tfor {\n\t\tselect {\n\t\tcase ev := <-g.events:\n\t\t\tif err := g.handleEvent(&ev); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (g *Gui) handleEvent(ev *termbox.Event) (err error) {\n\tswitch ev.Type {\n\tcase termbox.EventKey:\n\t\treturn g.onKey(ev)\n\tcase termbox.EventResize:\n\t\treturn g.resize()\n\tcase termbox.EventError:\n\t\treturn ev.Err\n\tdefault:\n\t\treturn nil\n\t}\n}\n\nfunc (g *Gui) draw() (err error) {\n\tfor _, v := range g.views {\n\t\tif err := g.drawView(v); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (g *Gui) drawView(v *View) (err error) {\n\treturn nil\n}\n\nfunc (g *Gui) resize() (err error) {\n\ttermbox.Clear(termbox.ColorDefault, termbox.ColorDefault)\n\tif err := g.resizeViews(); err != nil {\n\t\treturn err\n\t}\n\tif err := g.drawFrames(); err != nil {\n\t\treturn err\n\t}\n\tif err := g.drawIntersections(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}\n\nfunc (g *Gui) drawFrames() (err error) {\n\tmaxX, maxY := termbox.Size()\n\tfor _, v := range g.views {\n\t\tfor x := v.X0 + 1; x < v.X1; x++ {\n\t\t\tif v.Y0 != -1 {\n\t\t\t\tg.SetCell(x, v.Y0, '─')\n\t\t\t}\n\t\t\tif v.Y1 != maxY {\n\t\t\t\tg.SetCell(x, v.Y1, '─')\n\t\t\t}\n\t\t}\n\t\tfor y := v.Y0 + 1; y < v.Y1; y++ {\n\t\t\tif v.X0 != -1 {\n\t\t\t\tg.SetCell(v.X0, y, '│')\n\t\t\t}\n\t\t\tif v.X1 != maxX {\n\t\t\t\tg.SetCell(v.X1, y, '│')\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (g *Gui) drawIntersections() (err error) {\n\tfor _, v := range g.views {\n\t\tif ch, ok := g.getIntersectionRune(v.X0, v.Y0); ok {\n\t\t\tg.SetCell(v.X0, v.Y0, ch)\n\t\t}\n\t\tif ch, ok := g.getIntersectionRune(v.X0, v.Y1); ok {\n\t\t\tg.SetCell(v.X0, v.Y1, ch)\n\t\t}\n\t\tif ch, ok := g.getIntersectionRune(v.X1, v.Y0); ok {\n\t\t\tg.SetCell(v.X1, v.Y0, ch)\n\t\t}\n\t\tif ch, ok := g.getIntersectionRune(v.X1, v.Y1); ok {\n\t\t\tg.SetCell(v.X1, v.Y1, ch)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (g *Gui) getIntersectionRune(x, y int) (ch rune, ok bool) {\n\tmaxX, maxY := termbox.Size()\n\tif x < 0 || y < 0 || x >= maxX || y >= maxY {\n\t\treturn 0, false\n\t}\n\n\t_chTop, _ := g.GetCell(x, y-1)\n\tchTop := verticalRune(_chTop)\n\t_chBottom, _ := g.GetCell(x, y+1)\n\tchBottom := verticalRune(_chBottom)\n\t_chLeft, _ := g.GetCell(x-1, y)\n\tchLeft := horizontalRune(_chLeft)\n\t_chRight, _ := g.GetCell(x+1, y)\n\tchRight := horizontalRune(_chRight)\n\n\tswitch {\n\tcase !chTop && chBottom && !chLeft && chRight:\n\t\tch = '┌'\n\tcase !chTop && chBottom && chLeft && !chRight:\n\t\tch = '┐'\n\tcase chTop && !chBottom && !chLeft && chRight:\n\t\tch = '└'\n\tcase chTop && !chBottom && chLeft && !chRight:\n\t\tch = '┘'\n\tcase chTop && chBottom && chLeft && chRight:\n\t\tch = '┼'\n\tcase chTop && chBottom && !chLeft && chRight:\n\t\tch = '├'\n\tcase chTop && chBottom && chLeft && !chRight:\n\t\tch = '┤'\n\tcase !chTop && chBottom && chLeft && chRight:\n\t\tch = '┬'\n\tcase chTop && !chBottom && chLeft && chRight:\n\t\tch = '┴'\n\tdefault:\n\t\treturn 0, false\n\t}\n\treturn ch, true\n}\n\nfunc verticalRune(ch rune) bool {\n\tif ch == '│' || ch == '┼' || ch == '├' || ch == '┤' {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc horizontalRune(ch rune) bool {\n\tif ch == '─' || ch == '┼' || ch == '┬' || ch == '┴' {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (g *Gui) resizeViews() (err error) {\n\treturn nil\n}\n\nfunc (g *Gui) onKey(ev *termbox.Event) (err error) {\n\tswitch ev.Key {\n\tcase termbox.KeyCtrlC:\n\t\treturn ErrorQuit\n\tdefault:\n\t\treturn nil\n\t}\n}\n<commit_msg>More simplifications<commit_after>package gocui\n\nimport (\n\t\"errors\"\n\n\t\"github.com\/jroimartin\/termbox-go\"\n)\n\nvar ErrorQuit = errors.New(\"quit\")\n\ntype Gui struct {\n\tevents chan termbox.Event\n\tviews []*View\n\tcurrentView *View\n\tBgColor, FgColor termbox.Attribute\n}\n\nfunc NewGui() (g *Gui) {\n\treturn &Gui{}\n}\n\nfunc (g *Gui) Init() (err error) {\n\tg.events = make(chan termbox.Event, 20)\n\tg.BgColor = termbox.ColorWhite\n\tg.FgColor = termbox.ColorBlack\n\treturn termbox.Init()\n}\n\nfunc (g *Gui) Close() {\n\ttermbox.Close()\n}\n\nfunc (g *Gui) Size() (x, y int) {\n\treturn termbox.Size()\n}\n\nfunc (g *Gui) AddView(name string, x0, y0, x1, y1 int) (v *View, err error) {\n\tmaxX, maxY := termbox.Size()\n\n\tif x0 < -1 || y0 < -1 || x1 < -1 || y1 < -1 ||\n\t\tx0 > maxX || y0 > maxY || x1 > maxX || y1 > maxY ||\n\t\tx0 >= x1 || y0 >= y1 {\n\t\treturn nil, errors.New(\"invalid points\")\n\t}\n\n\tfor _, v := range g.views {\n\t\tif name == v.Name {\n\t\t\treturn nil, errors.New(\"invalid name\")\n\t\t}\n\t}\n\n\tv = NewView(name, x0, y0, x1, y1)\n\tg.views = append(g.views, v)\n\treturn v, nil\n}\n\nfunc (g *Gui) MainLoop() (err error) {\n\tgo func() {\n\t\tfor {\n\t\t\tg.events <- termbox.PollEvent()\n\t\t}\n\t}()\n\n\tif err := g.resize(); err != nil {\n\t\treturn err\n\t}\n\tif err := g.draw(); err != nil {\n\t\treturn err\n\t}\n\t\/\/ TODO: Set initial cursor position\n\t\/\/termbox.SetCursor(10, 10)\n\ttermbox.Flush()\n\n\tfor {\n\t\tselect {\n\t\tcase ev := <-g.events:\n\t\t\tif err := g.handleEvent(&ev); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := g.consumeevents(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := g.draw(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\ttermbox.Flush()\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (g *Gui) SetCell(x, y int, ch rune) (err error) {\n\tmaxX, maxY := termbox.Size()\n\tif x < 0 || y < 0 || x >= maxX || y >= maxY {\n\t\treturn errors.New(\"invalid point\")\n\t}\n\ttermbox.SetCell(x, y, ch, g.FgColor, g.BgColor)\n\treturn nil\n}\n\nfunc (g *Gui) GetCell(x, y int) (ch rune, err error) {\n\tmaxX, maxY := termbox.Size()\n\tif x < 0 || y < 0 || x >= maxX || y >= maxY {\n\t\treturn 0, errors.New(\"invalid point\")\n\t}\n\tc := termbox.CellBuffer()[y*maxX+x]\n\treturn c.Ch, nil\n}\n\nfunc (g *Gui) consumeevents() (err error) {\n\tfor {\n\t\tselect {\n\t\tcase ev := <-g.events:\n\t\t\tif err := g.handleEvent(&ev); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\nfunc (g *Gui) handleEvent(ev *termbox.Event) (err error) {\n\tswitch ev.Type {\n\tcase termbox.EventKey:\n\t\treturn g.onKey(ev)\n\tcase termbox.EventResize:\n\t\treturn g.resize()\n\tcase termbox.EventError:\n\t\treturn ev.Err\n\tdefault:\n\t\treturn nil\n\t}\n}\n\nfunc (g *Gui) draw() (err error) {\n\tfor _, v := range g.views {\n\t\tif err := g.drawView(v); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (g *Gui) drawView(v *View) (err error) {\n\treturn nil\n}\n\nfunc (g *Gui) resize() (err error) {\n\ttermbox.Clear(termbox.ColorDefault, termbox.ColorDefault)\n\tif err := g.resizeViews(); err != nil {\n\t\treturn err\n\t}\n\tif err := g.drawFrames(); err != nil {\n\t\treturn err\n\t}\n\tif err := g.drawIntersections(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}\n\nfunc (g *Gui) drawFrames() (err error) {\n\tmaxX, maxY := termbox.Size()\n\tfor _, v := range g.views {\n\t\tfor x := v.X0 + 1; x < v.X1; x++ {\n\t\t\tif v.Y0 != -1 {\n\t\t\t\tg.SetCell(x, v.Y0, '─')\n\t\t\t}\n\t\t\tif v.Y1 != maxY {\n\t\t\t\tg.SetCell(x, v.Y1, '─')\n\t\t\t}\n\t\t}\n\t\tfor y := v.Y0 + 1; y < v.Y1; y++ {\n\t\t\tif v.X0 != -1 {\n\t\t\t\tg.SetCell(v.X0, y, '│')\n\t\t\t}\n\t\t\tif v.X1 != maxX {\n\t\t\t\tg.SetCell(v.X1, y, '│')\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (g *Gui) drawIntersections() (err error) {\n\tfor _, v := range g.views {\n\t\tif ch, ok := g.getIntersectionRune(v.X0, v.Y0); ok {\n\t\t\tg.SetCell(v.X0, v.Y0, ch)\n\t\t}\n\t\tif ch, ok := g.getIntersectionRune(v.X0, v.Y1); ok {\n\t\t\tg.SetCell(v.X0, v.Y1, ch)\n\t\t}\n\t\tif ch, ok := g.getIntersectionRune(v.X1, v.Y0); ok {\n\t\t\tg.SetCell(v.X1, v.Y0, ch)\n\t\t}\n\t\tif ch, ok := g.getIntersectionRune(v.X1, v.Y1); ok {\n\t\t\tg.SetCell(v.X1, v.Y1, ch)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (g *Gui) getIntersectionRune(x, y int) (ch rune, ok bool) {\n\tmaxX, maxY := termbox.Size()\n\tif x < 0 || y < 0 || x >= maxX || y >= maxY {\n\t\treturn 0, false\n\t}\n\n\tchTop, _ := g.GetCell(x, y-1)\n\ttop := verticalRune(chTop)\n\tchBottom, _ := g.GetCell(x, y+1)\n\tbottom := verticalRune(chBottom)\n\tchLeft, _ := g.GetCell(x-1, y)\n\tleft := horizontalRune(chLeft)\n\tchRight, _ := g.GetCell(x+1, y)\n\tright := horizontalRune(chRight)\n\n\tswitch {\n\tcase !top && bottom && !left && right:\n\t\tch = '┌'\n\tcase !top && bottom && left && !right:\n\t\tch = '┐'\n\tcase top && !bottom && !left && right:\n\t\tch = '└'\n\tcase top && !bottom && left && !right:\n\t\tch = '┘'\n\tcase top && bottom && left && right:\n\t\tch = '┼'\n\tcase top && bottom && !left && right:\n\t\tch = '├'\n\tcase top && bottom && left && !right:\n\t\tch = '┤'\n\tcase !top && bottom && left && right:\n\t\tch = '┬'\n\tcase top && !bottom && left && right:\n\t\tch = '┴'\n\tdefault:\n\t\treturn 0, false\n\t}\n\treturn ch, true\n}\n\nfunc verticalRune(ch rune) bool {\n\tif ch == '│' || ch == '┼' || ch == '├' || ch == '┤' {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc horizontalRune(ch rune) bool {\n\tif ch == '─' || ch == '┼' || ch == '┬' || ch == '┴' {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (g *Gui) resizeViews() (err error) {\n\treturn nil\n}\n\nfunc (g *Gui) onKey(ev *termbox.Event) (err error) {\n\tswitch ev.Key {\n\tcase termbox.KeyCtrlC:\n\t\treturn ErrorQuit\n\tdefault:\n\t\treturn nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/Masterminds\/glide\/cfg\"\n\tgpath \"github.com\/Masterminds\/glide\/path\"\n\t\"github.com\/bmatcuk\/doublestar\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar cmd = &cobra.Command{\n\tUse: \"glide-vc\",\n\tShort: \"glide vendor cleaner\",\n\tRun: glidevc,\n}\n\ntype options struct {\n\tdryrun bool\n\tonlyCode bool\n\tnoTests bool\n\tnoLegalFiles bool\n\tkeepPatterns []string\n}\n\nvar (\n\topts options\n\tcodeSuffixes = []string{\".go\", \".c\", \".s\", \".S\", \".cc\", \".cpp\", \".cxx\", \".h\", \".hh\", \".hpp\", \".hxx\"}\n)\n\nconst (\n\tgoTestSuffix = \"_test.go\"\n)\n\nfunc init() {\n\tcmd.PersistentFlags().BoolVar(&opts.dryrun, \"dryrun\", false, \"just output what will be removed\")\n\tcmd.PersistentFlags().BoolVar(&opts.onlyCode, \"only-code\", false, \"keep only go files (including go test files)\")\n\tcmd.PersistentFlags().BoolVar(&opts.noTests, \"no-tests\", false, \"remove also go test files (requires --only-code)\")\n\tcmd.PersistentFlags().BoolVar(&opts.noLegalFiles, \"no-legal-files\", false, \"remove also licenses and legal files\")\n\tcmd.PersistentFlags().StringSliceVar(&opts.keepPatterns, \"keep\", []string{}, \"A pattern to keep additional files inside needed packages. The pattern match will be relative to the deeper vendor dir. Supports double star (**) patterns. (see https:\/\/golang.org\/pkg\/path\/filepath\/#Match and https:\/\/github.com\/bmatcuk\/doublestar). Can be specified multiple times. For example to keep all the files with json extension use the '**\/*.json' pattern.\")\n}\n\nfunc main() {\n\tcmd.Execute()\n}\n\nfunc glidevc(cmd *cobra.Command, args []string) {\n\tif opts.noTests && !opts.onlyCode {\n\t\tfmt.Printf(\"--no-tests requires --only-code\")\n\t\tos.Exit(1)\n\t}\n\n\tif err := cleanup(\".\", opts); err != nil {\n\t\tfmt.Print(err)\n\t\tos.Exit(1)\n\t}\n\treturn\n}\n\nfunc cleanup(path string, opts options) error {\n\tlock, err := LoadGlideLockfile(path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not load lockfile: %v\", err)\n\t}\n\n\t\/\/ The package list already have the path converted to the os specific\n\t\/\/ path separator, needed for future comparisons.\n\tpkgList := []string{}\n\trepoList := []string{}\n\t\/\/ TODO(sgotti) Should we also consider devImports?\n\tfor _, imp := range lock.Imports {\n\t\t\/\/ This converts pkg separator \"\/\" to os specific separator\n\t\trepoList = append(repoList, filepath.FromSlash(imp.Name))\n\n\t\tif len(imp.Subpackages) > 0 {\n\t\t\tfor _, sp := range imp.Subpackages {\n\t\t\t\t\/\/ This converts pkg separator \"\/\" to os specific separator\n\t\t\t\tpkgList = append(pkgList, filepath.Join(imp.Name, sp))\n\t\t\t}\n\t\t}\n\t\t\/\/ TODO(sgotti) we cannot skip the base import if it has subpackages\n\t\t\/\/ because glide doesn't write \".\" as a subpackage, otherwise if some\n\t\t\/\/ files in the base import are needed they will be removed.\n\n\t\t\/\/ This converts pkg separator \"\/\" to os specific separator\n\t\tpkgList = append(pkgList, filepath.FromSlash(imp.Name))\n\t}\n\n\tvpath, err := gpath.Vendor()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif vpath == \"\" {\n\t\treturn fmt.Errorf(\"cannot fine vendor dir\")\n\t}\n\n\ttype pathData struct {\n\t\tpath string\n\t\tisDir bool\n\t}\n\tvar searchPath string\n\tmarkForKeep := map[string]pathData{}\n\tmarkForDelete := []pathData{}\n\n\t\/\/ Walk vendor directory\n\tsearchPath = vpath + string(os.PathSeparator)\n\terr = filepath.Walk(searchPath, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif path == searchPath || path == vpath {\n\t\t\treturn nil\n\t\t}\n\n\t\tlocalPath := strings.TrimPrefix(path, searchPath)\n\n\t\tlastVendorPath, err := getLastVendorPath(localPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif lastVendorPath == \"\" {\n\t\t\tlastVendorPath = localPath\n\t\t}\n\n\t\tkeep := false\n\t\tfor _, name := range pkgList {\n\t\t\t\/\/ If the file's parent directory is a needed package, keep it.\n\t\t\tif !info.IsDir() && filepath.Dir(lastVendorPath) == name {\n\t\t\t\tif opts.onlyCode {\n\t\t\t\t\tif opts.noTests && strings.HasSuffix(path, \"_test.go\") {\n\t\t\t\t\t\tkeep = false\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tfor _, suffix := range codeSuffixes {\n\t\t\t\t\t\tif strings.HasSuffix(path, suffix) {\n\t\t\t\t\t\t\tkeep = true\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ Match keep patterns\n\t\t\t\t\tfor _, keepPattern := range opts.keepPatterns {\n\t\t\t\t\t\tok, err := doublestar.Match(keepPattern, lastVendorPath)\n\t\t\t\t\t\t\/\/ TODO(sgotti) if a bad pattern is encountered stop here. Actually there's no function to verify a pattern before using it, perhaps just a fake match at the start will work.\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"bad pattern: %q\", keepPattern)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif ok {\n\t\t\t\t\t\t\tkeep = true\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tkeep = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Keep all the legal files inside top repo dir and required package dirs\n\t\tfor _, name := range append(repoList, pkgList...) {\n\t\t\tif !info.IsDir() && filepath.Dir(lastVendorPath) == name {\n\t\t\t\tif !opts.noLegalFiles {\n\t\t\t\t\tif IsLegalFile(path) {\n\t\t\t\t\t\tkeep = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ If a directory is a needed package then keep it\n\t\tif keep == false && info.IsDir() {\n\t\t\tfor _, name := range pkgList {\n\t\t\t\tif name == lastVendorPath {\n\t\t\t\t\tkeep = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif keep {\n\t\t\t\/\/ Keep also all parents of current path\n\t\t\tcurpath := localPath\n\t\t\tfor {\n\t\t\t\tcurpath = filepath.Dir(curpath)\n\t\t\t\tif curpath == \".\" {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif _, ok := markForKeep[curpath]; ok {\n\t\t\t\t\t\/\/ Already marked for keep\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tmarkForKeep[curpath] = pathData{curpath, true}\n\t\t\t}\n\n\t\t\t\/\/ Mark for keep\n\t\t\tmarkForKeep[localPath] = pathData{localPath, info.IsDir()}\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Generate deletion list\n\terr = filepath.Walk(searchPath, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\t\/\/ Ignore not existant files due to previous removal of the parent directory\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tlocalPath := strings.TrimPrefix(path, searchPath)\n\t\tif localPath == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tif _, ok := markForKeep[localPath]; !ok {\n\t\t\tmarkForDelete = append(markForDelete, pathData{path, info.IsDir()})\n\t\t\tif info.IsDir() {\n\t\t\t\t\/\/ skip directory contents since it has been marked for removal\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Perform the actual delete.\n\tfor _, marked := range markForDelete {\n\t\tlocalPath := strings.TrimPrefix(marked.path, searchPath)\n\t\tif marked.isDir {\n\t\t\tfmt.Printf(\"Removing unused dir: %s\\n\", localPath)\n\t\t} else {\n\t\t\tfmt.Printf(\"Removing unused file: %s\\n\", localPath)\n\t\t}\n\t\tif !opts.dryrun {\n\t\t\trerr := os.RemoveAll(marked.path)\n\t\t\tif rerr != nil {\n\t\t\t\treturn rerr\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc getLastVendorPath(path string) (string, error) {\n\tcurpath := path\n\tfor {\n\t\tif curpath == \".\" {\n\t\t\treturn \"\", nil\n\t\t}\n\t\tif filepath.Base(curpath) == \"vendor\" {\n\t\t\treturn filepath.Rel(curpath, path)\n\t\t}\n\t\tcurpath = filepath.Dir(curpath)\n\t}\n}\n\n\/\/ LoadLockfile loads the contents of a glide.lock file.\nfunc LoadGlideLockfile(base string) (*cfg.Lockfile, error) {\n\tyml, err := ioutil.ReadFile(filepath.Join(base, gpath.LockFile))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlock, err := cfg.LockfileFromYaml(yml)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn lock, nil\n}\n\n\/\/ File lists and code took from https:\/\/github.com\/client9\/gosupplychain\/blob\/master\/license.go\n\n\/\/ LicenseFilePrefix is a list of filename prefixes that indicate it\n\/\/ might contain a software license\nvar LicenseFilePrefix = []string{\n\t\"licence\", \/\/ UK spelling\n\t\"license\", \/\/ US spelling\n\t\"copying\",\n\t\"unlicense\",\n\t\"copyright\",\n\t\"copyleft\",\n}\n\n\/\/ LegalFileSubstring are substrings that indicate the file is likely\n\/\/ to contain some type of legal declaration. \"legal\" is often used\n\/\/ that it might moved to LicenseFilePrefix\nvar LegalFileSubstring = []string{\n\t\"legal\",\n\t\"notice\",\n\t\"disclaimer\",\n\t\"patent\",\n\t\"third-party\",\n\t\"thirdparty\",\n}\n\n\/\/ IsLegalFile returns true if the file is likely to contain some type\n\/\/ of of legal declaration or licensing information\nfunc IsLegalFile(path string) bool {\n\tlowerfile := strings.ToLower(filepath.Base(path))\n\tfor _, prefix := range LicenseFilePrefix {\n\t\tif strings.HasPrefix(lowerfile, prefix) {\n\t\t\treturn true\n\t\t}\n\t}\n\tfor _, substring := range LegalFileSubstring {\n\t\tif strings.Index(lowerfile, substring) != -1 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<commit_msg>gvc: do not keep test files if they contain legal words<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/Masterminds\/glide\/cfg\"\n\tgpath \"github.com\/Masterminds\/glide\/path\"\n\t\"github.com\/bmatcuk\/doublestar\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nvar cmd = &cobra.Command{\n\tUse: \"glide-vc\",\n\tShort: \"glide vendor cleaner\",\n\tRun: glidevc,\n}\n\ntype options struct {\n\tdryrun bool\n\tonlyCode bool\n\tnoTests bool\n\tnoLegalFiles bool\n\tkeepPatterns []string\n}\n\nvar (\n\topts options\n\tcodeSuffixes = []string{\".go\", \".c\", \".s\", \".S\", \".cc\", \".cpp\", \".cxx\", \".h\", \".hh\", \".hpp\", \".hxx\"}\n)\n\nconst (\n\tgoTestSuffix = \"_test.go\"\n)\n\nfunc init() {\n\tcmd.PersistentFlags().BoolVar(&opts.dryrun, \"dryrun\", false, \"just output what will be removed\")\n\tcmd.PersistentFlags().BoolVar(&opts.onlyCode, \"only-code\", false, \"keep only go files (including go test files)\")\n\tcmd.PersistentFlags().BoolVar(&opts.noTests, \"no-tests\", false, \"remove also go test files (requires --only-code)\")\n\tcmd.PersistentFlags().BoolVar(&opts.noLegalFiles, \"no-legal-files\", false, \"remove also licenses and legal files\")\n\tcmd.PersistentFlags().StringSliceVar(&opts.keepPatterns, \"keep\", []string{}, \"A pattern to keep additional files inside needed packages. The pattern match will be relative to the deeper vendor dir. Supports double star (**) patterns. (see https:\/\/golang.org\/pkg\/path\/filepath\/#Match and https:\/\/github.com\/bmatcuk\/doublestar). Can be specified multiple times. For example to keep all the files with json extension use the '**\/*.json' pattern.\")\n}\n\nfunc main() {\n\tcmd.Execute()\n}\n\nfunc glidevc(cmd *cobra.Command, args []string) {\n\tif opts.noTests && !opts.onlyCode {\n\t\tfmt.Printf(\"--no-tests requires --only-code\")\n\t\tos.Exit(1)\n\t}\n\n\tif err := cleanup(\".\", opts); err != nil {\n\t\tfmt.Print(err)\n\t\tos.Exit(1)\n\t}\n\treturn\n}\n\nfunc cleanup(path string, opts options) error {\n\tlock, err := LoadGlideLockfile(path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not load lockfile: %v\", err)\n\t}\n\n\t\/\/ The package list already have the path converted to the os specific\n\t\/\/ path separator, needed for future comparisons.\n\tpkgList := []string{}\n\trepoList := []string{}\n\t\/\/ TODO(sgotti) Should we also consider devImports?\n\tfor _, imp := range lock.Imports {\n\t\t\/\/ This converts pkg separator \"\/\" to os specific separator\n\t\trepoList = append(repoList, filepath.FromSlash(imp.Name))\n\n\t\tif len(imp.Subpackages) > 0 {\n\t\t\tfor _, sp := range imp.Subpackages {\n\t\t\t\t\/\/ This converts pkg separator \"\/\" to os specific separator\n\t\t\t\tpkgList = append(pkgList, filepath.Join(imp.Name, sp))\n\t\t\t}\n\t\t}\n\t\t\/\/ TODO(sgotti) we cannot skip the base import if it has subpackages\n\t\t\/\/ because glide doesn't write \".\" as a subpackage, otherwise if some\n\t\t\/\/ files in the base import are needed they will be removed.\n\n\t\t\/\/ This converts pkg separator \"\/\" to os specific separator\n\t\tpkgList = append(pkgList, filepath.FromSlash(imp.Name))\n\t}\n\n\tvpath, err := gpath.Vendor()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif vpath == \"\" {\n\t\treturn fmt.Errorf(\"cannot fine vendor dir\")\n\t}\n\n\ttype pathData struct {\n\t\tpath string\n\t\tisDir bool\n\t}\n\tvar searchPath string\n\tmarkForKeep := map[string]pathData{}\n\tmarkForDelete := []pathData{}\n\n\t\/\/ Walk vendor directory\n\tsearchPath = vpath + string(os.PathSeparator)\n\terr = filepath.Walk(searchPath, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif path == searchPath || path == vpath {\n\t\t\treturn nil\n\t\t}\n\n\t\tlocalPath := strings.TrimPrefix(path, searchPath)\n\n\t\tlastVendorPath, err := getLastVendorPath(localPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif lastVendorPath == \"\" {\n\t\t\tlastVendorPath = localPath\n\t\t}\n\n\t\tkeep := false\n\t\tfor _, name := range pkgList {\n\t\t\t\/\/ If the file's parent directory is a needed package, keep it.\n\t\t\tif !info.IsDir() && filepath.Dir(lastVendorPath) == name {\n\t\t\t\tif opts.onlyCode {\n\t\t\t\t\tif opts.noTests && strings.HasSuffix(path, \"_test.go\") {\n\t\t\t\t\t\tkeep = false\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tfor _, suffix := range codeSuffixes {\n\t\t\t\t\t\tif strings.HasSuffix(path, suffix) {\n\t\t\t\t\t\t\tkeep = true\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\t\/\/ Match keep patterns\n\t\t\t\t\tfor _, keepPattern := range opts.keepPatterns {\n\t\t\t\t\t\tok, err := doublestar.Match(keepPattern, lastVendorPath)\n\t\t\t\t\t\t\/\/ TODO(sgotti) if a bad pattern is encountered stop here. Actually there's no function to verify a pattern before using it, perhaps just a fake match at the start will work.\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"bad pattern: %q\", keepPattern)\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif ok {\n\t\t\t\t\t\t\tkeep = true\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tkeep = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ Keep all the legal files inside top repo dir and required package dirs\n\t\tfor _, name := range append(repoList, pkgList...) {\n\t\t\tif !info.IsDir() && filepath.Dir(lastVendorPath) == name {\n\t\t\t\tif !opts.noLegalFiles {\n\t\t\t\t\tif IsLegalFile(path) {\n\t\t\t\t\t\tkeep = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t\/\/ If a directory is a needed package then keep it\n\t\tif keep == false && info.IsDir() {\n\t\t\tfor _, name := range pkgList {\n\t\t\t\tif name == lastVendorPath {\n\t\t\t\t\tkeep = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif keep {\n\t\t\t\/\/ Keep also all parents of current path\n\t\t\tcurpath := localPath\n\t\t\tfor {\n\t\t\t\tcurpath = filepath.Dir(curpath)\n\t\t\t\tif curpath == \".\" {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif _, ok := markForKeep[curpath]; ok {\n\t\t\t\t\t\/\/ Already marked for keep\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tmarkForKeep[curpath] = pathData{curpath, true}\n\t\t\t}\n\n\t\t\t\/\/ Mark for keep\n\t\t\tmarkForKeep[localPath] = pathData{localPath, info.IsDir()}\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Generate deletion list\n\terr = filepath.Walk(searchPath, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\t\/\/ Ignore not existant files due to previous removal of the parent directory\n\t\t\tif !os.IsNotExist(err) {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tlocalPath := strings.TrimPrefix(path, searchPath)\n\t\tif localPath == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tif _, ok := markForKeep[localPath]; !ok {\n\t\t\tmarkForDelete = append(markForDelete, pathData{path, info.IsDir()})\n\t\t\tif info.IsDir() {\n\t\t\t\t\/\/ skip directory contents since it has been marked for removal\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Perform the actual delete.\n\tfor _, marked := range markForDelete {\n\t\tlocalPath := strings.TrimPrefix(marked.path, searchPath)\n\t\tif marked.isDir {\n\t\t\tfmt.Printf(\"Removing unused dir: %s\\n\", localPath)\n\t\t} else {\n\t\t\tfmt.Printf(\"Removing unused file: %s\\n\", localPath)\n\t\t}\n\t\tif !opts.dryrun {\n\t\t\trerr := os.RemoveAll(marked.path)\n\t\t\tif rerr != nil {\n\t\t\t\treturn rerr\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc getLastVendorPath(path string) (string, error) {\n\tcurpath := path\n\tfor {\n\t\tif curpath == \".\" {\n\t\t\treturn \"\", nil\n\t\t}\n\t\tif filepath.Base(curpath) == \"vendor\" {\n\t\t\treturn filepath.Rel(curpath, path)\n\t\t}\n\t\tcurpath = filepath.Dir(curpath)\n\t}\n}\n\n\/\/ LoadLockfile loads the contents of a glide.lock file.\nfunc LoadGlideLockfile(base string) (*cfg.Lockfile, error) {\n\tyml, err := ioutil.ReadFile(filepath.Join(base, gpath.LockFile))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlock, err := cfg.LockfileFromYaml(yml)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn lock, nil\n}\n\n\/\/ File lists and code took from https:\/\/github.com\/client9\/gosupplychain\/blob\/master\/license.go\n\n\/\/ LicenseFilePrefix is a list of filename prefixes that indicate it\n\/\/ might contain a software license\nvar LicenseFilePrefix = []string{\n\t\"licence\", \/\/ UK spelling\n\t\"license\", \/\/ US spelling\n\t\"copying\",\n\t\"unlicense\",\n\t\"copyright\",\n\t\"copyleft\",\n}\n\n\/\/ LegalFileSubstring are substrings that indicate the file is likely\n\/\/ to contain some type of legal declaration. \"legal\" is often used\n\/\/ that it might be moved to LicenseFilePrefix\nvar LegalFileSubstring = []string{\n\t\"legal\",\n\t\"notice\",\n\t\"disclaimer\",\n\t\"patent\",\n\t\"third-party\",\n\t\"thirdparty\",\n}\n\n\/\/ IsLegalFile returns true if the file is likely to contain some type\n\/\/ of of legal declaration or licensing information\nfunc IsLegalFile(path string) bool {\n\tlowerfile := strings.ToLower(filepath.Base(path))\n\tfor _, prefix := range LicenseFilePrefix {\n\t\tif strings.HasPrefix(lowerfile, prefix) && !strings.HasSuffix(lowerfile, goTestSuffix) {\n\t\t\treturn true\n\t\t}\n\t}\n\tfor _, substring := range LegalFileSubstring {\n\t\tif strings.Index(lowerfile, substring) != -1 && !strings.HasSuffix(lowerfile, goTestSuffix) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/gorilla\/context\"\n\t\"github.com\/gorilla\/sessions\"\n\t\"github.com\/maiah\/han.go\/components\/auth\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n)\n\nvar (\n\tstore = sessions.NewCookieStore([]byte(\"smallelephantandbigfly\"))\n)\n\nfunc main() {\n\t\/\/ Setup route handlers\n\thttp.HandleFunc(\"\/\", index)\n\thttp.HandleFunc(\"\/login\", login)\n\thttp.HandleFunc(\"\/logout\", logout)\n\thttp.HandleFunc(\"\/home\", home)\n\thttp.HandleFunc(\"\/settings\", settings)\n\n\t\/\/ Start the server\n\tlog.Println(\"Listening on port 5000\")\n\tlog.Fatal(http.ListenAndServe(\":5000\",\n\t\tcontext.ClearHandler(http.DefaultServeMux)))\n}\n\n\/\/ Route Handlers\n\nfunc index(w http.ResponseWriter, r *http.Request) {\n\tpath := r.URL.Path\n\n\tif len(path) >= 7 {\n\t\tpath = r.URL.Path[0:7]\n\t}\n\n\tif path == \"\/public\" {\n\t\tpublic(w, r) \/\/ invoke static file handler\n\t} else {\n\t\tfmt.Fprintf(w, \"welcome my han.go\")\n\t}\n}\n\n\/\/ Custom static file handling\n\nfunc public(w http.ResponseWriter, r *http.Request) {\n\tfile := r.URL.Path[1:]\n\thttp.ServeFile(w, r, file)\n}\n\ntype page struct {\n\tMessage string\n}\n\nfunc login(w http.ResponseWriter, r *http.Request) {\n\tif auth.IsAuthorized(store, r) {\n\t\thttp.Redirect(w, r, \"\/home\", http.StatusFound)\n\n\t} else {\n\t\tloginPage := \"pages\/login.html\"\n\n\t\tif r.Method == \"GET\" {\n\t\t\tt, _ := template.ParseFiles(loginPage)\n\t\t\tt.Execute(w, nil)\n\n\t\t} else if r.Method == \"POST\" {\n\t\t\tif auth.IsAuthenticated(store, w, r) {\n\t\t\t\thttp.Redirect(w, r, \"\/home\", http.StatusFound)\n\n\t\t\t} else {\n\t\t\t\tt, err := template.ParseFiles(loginPage)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tif t.Execute(w,\n\t\t\t\t\tpage{Message: \"Invalid username\/password\"}) != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc logout(w http.ResponseWriter, r *http.Request) {\n\tsession, _ := store.Get(r, \"user-session\")\n\tsession.Options = &sessions.Options{MaxAge: -1}\n\tsession.Values = nil\n\tsession.Save(r, w)\n\n\thttp.Redirect(w, r, \"\/login\", http.StatusFound)\n}\n\nfunc home(w http.ResponseWriter, r *http.Request) {\n\tif auth.IsAuthorized(store, r) {\n\t\thomePage := \"pages\/home.html\"\n\t\tt, err := template.ParseFiles(homePage)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tsession, _ := store.Get(r, \"user-session\")\n\n\t\tt.Execute(w, page{Message: session.Values[\"username\"].(string)})\n\n\t} else {\n\t\thttp.Redirect(w, r, \"\/login\", http.StatusFound)\n\t}\n}\n\nfunc settings(w http.ResponseWriter, r *http.Request) {\n\tif auth.IsAuthorized(store, r, \"ADMIN\") {\n\t\tfile := \"pages\/settings.html\"\n\t\thttp.ServeFile(w, r, file)\n\n\t} else {\n\t\thttp.Redirect(w, r, \"\/login\", http.StatusFound)\n\t}\n}\n<commit_msg>Use port from env variable<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"github.com\/gorilla\/context\"\n\t\"github.com\/gorilla\/sessions\"\n\t\"github.com\/maiah\/han.go\/components\/auth\"\n\t\"html\/template\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n)\n\nvar (\n\tstore = sessions.NewCookieStore([]byte(\"smallelephantandbigfly\"))\n)\n\nfunc main() {\n\t\/\/ Setup route handlers\n\thttp.HandleFunc(\"\/\", index)\n\thttp.HandleFunc(\"\/login\", login)\n\thttp.HandleFunc(\"\/logout\", logout)\n\thttp.HandleFunc(\"\/home\", home)\n\thttp.HandleFunc(\"\/settings\", settings)\n\n\tport := os.Getenv(\"PORT\")\n\n\t\/\/ Start the server\n\tlog.Println(\"Listening on port \" + port)\n\tlog.Fatal(http.ListenAndServe(\":\"+port,\n\t\tcontext.ClearHandler(http.DefaultServeMux)))\n}\n\n\/\/ Route Handlers\n\nfunc index(w http.ResponseWriter, r *http.Request) {\n\tpath := r.URL.Path\n\n\tif len(path) >= 7 {\n\t\tpath = r.URL.Path[0:7]\n\t}\n\n\tif path == \"\/public\" {\n\t\tpublic(w, r) \/\/ invoke static file handler\n\t} else {\n\t\tfmt.Fprintf(w, \"welcome my han.go\")\n\t}\n}\n\n\/\/ Custom static file handling\n\nfunc public(w http.ResponseWriter, r *http.Request) {\n\tfile := r.URL.Path[1:]\n\thttp.ServeFile(w, r, file)\n}\n\ntype page struct {\n\tMessage string\n}\n\nfunc login(w http.ResponseWriter, r *http.Request) {\n\tif auth.IsAuthorized(store, r) {\n\t\thttp.Redirect(w, r, \"\/home\", http.StatusFound)\n\n\t} else {\n\t\tloginPage := \"pages\/login.html\"\n\n\t\tif r.Method == \"GET\" {\n\t\t\tt, _ := template.ParseFiles(loginPage)\n\t\t\tt.Execute(w, nil)\n\n\t\t} else if r.Method == \"POST\" {\n\t\t\tif auth.IsAuthenticated(store, w, r) {\n\t\t\t\thttp.Redirect(w, r, \"\/home\", http.StatusFound)\n\n\t\t\t} else {\n\t\t\t\tt, err := template.ParseFiles(loginPage)\n\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tif t.Execute(w,\n\t\t\t\t\tpage{Message: \"Invalid username\/password\"}) != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc logout(w http.ResponseWriter, r *http.Request) {\n\tsession, _ := store.Get(r, \"user-session\")\n\tsession.Options = &sessions.Options{MaxAge: -1}\n\tsession.Values = nil\n\tsession.Save(r, w)\n\n\thttp.Redirect(w, r, \"\/login\", http.StatusFound)\n}\n\nfunc home(w http.ResponseWriter, r *http.Request) {\n\tif auth.IsAuthorized(store, r) {\n\t\thomePage := \"pages\/home.html\"\n\t\tt, err := template.ParseFiles(homePage)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tsession, _ := store.Get(r, \"user-session\")\n\n\t\tt.Execute(w, page{Message: session.Values[\"username\"].(string)})\n\n\t} else {\n\t\thttp.Redirect(w, r, \"\/login\", http.StatusFound)\n\t}\n}\n\nfunc settings(w http.ResponseWriter, r *http.Request) {\n\tif auth.IsAuthorized(store, r, \"ADMIN\") {\n\t\tfile := \"pages\/settings.html\"\n\t\thttp.ServeFile(w, r, file)\n\n\t} else {\n\t\thttp.Redirect(w, r, \"\/login\", http.StatusFound)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"code.google.com\/p\/go.net\/html\"\n\t\"code.google.com\/p\/go.net\/html\/atom\"\n\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/user\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar barMap = map[string]string{}\n\ntype beerInfo struct {\n\tbrewery string\n\tbrew string\n}\n\nfunc findBeer(node *html.Node, beers *[]beerInfo) {\n\tif node.DataAtom == atom.Div {\n\t\tfor _, attr := range node.Attr {\n\t\t\tif attr.Key == \"id\" && strings.HasPrefix(attr.Val, \"beer-\") {\n\t\t\t\tbrewery, brew := \"\", \"\"\n\t\t\t\tfindBrewery(node, &brewery)\n\t\t\t\tfindBrew(node, &brew)\n\t\t\t\t*beers = append(*beers, beerInfo{brewery, brew})\n\t\t\t}\n\t\t}\n\t}\n\tfor kid := node.FirstChild; kid != nil; kid = kid.NextSibling {\n\t\tfindBeer(kid, beers)\n\t}\n}\n\nfunc findBrewery(node *html.Node, brewery *string) bool {\n\tif node.DataAtom == atom.H4 {\n\t\tif content := node.FirstChild; content != nil {\n\t\t\t*brewery = content.Data\n\t\t\treturn true\n\t\t}\n\t}\n\tfor kid := node.FirstChild; kid != nil; kid = kid.NextSibling {\n\t\tif findBrewery(kid, brewery) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc findBrew(node *html.Node, brew *string) bool {\n\tfor _, attr := range node.Attr {\n\t\tif attr.Key == \"class\" && attr.Val == \"beer-name\" {\n\t\t\tif content := node.FirstChild; content != nil {\n\t\t\t\t*brew = content.Data\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\tfor kid := node.FirstChild; kid != nil; kid = kid.NextSibling {\n\t\tif findBrew(kid, brew) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc checkId(id string) bool {\n\tok, err := regexp.MatchString(\"^[[:xdigit:]]{24}$\", id)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn ok\n}\n\nfunc readRc() {\n\tusr, err := user.Current()\n\tif err != nil {\n\t\treturn\n\t}\n\tdata, err := ioutil.ReadFile(usr.HomeDir + \"\/.taplistrc\")\n\tif err != nil {\n\t\treturn\n\t}\n\tlines := strings.Split(string(data), \"\\n\")\n\tfor _, line := range lines {\n\t\tline = strings.TrimSpace(line)\n\t\tidx := strings.IndexAny(line, \" \\t\")\n\t\tif idx < len(line) - 1 {\n\t\t\tid, name := line[:idx], strings.TrimSpace(line[idx:])\n\t\t\tbarMap[id] = name\n\t\t}\n\t}\n}\n\nfunc findBar(arg string) (string, string) {\n\tfor id, name := range barMap {\n\t\tif strings.Contains(strings.ToLower(name), strings.ToLower(arg)) {\n\t\t\treturn id, name\n\t\t}\n\t}\n\treturn \"\", \"\"\n}\n\nfunc main() {\n\tlog.SetFlags(0)\n\tlog.SetPrefix(\"taplist: \")\n\n\tif len(os.Args) != 2 {\n\t\tlog.Fatalln(\"usage: taplist <id>\")\n\t}\n\treadRc()\n\targ := strings.ToLower(os.Args[1])\n\tid, name := \"\", \"\"\n\tif checkId(arg) {\n\t\tid, name = arg, arg\n\t} else {\n\t\tid, name = findBar(arg)\n\t}\n\tif id == \"\" {\n\t\tlog.Fatalln(arg + \" doesn't look like a valid name or taplister bar id\")\n\t}\n\n\tresp, err := http.Get(\"http:\/\/www.taplister.com\/bars\/\" + id)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\tdoc, err := html.Parse(resp.Body)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tbeers := []beerInfo{}\n\tfindBeer(doc, &beers)\n\tfmt.Println(\"On tap at \" + name + \":\\n\")\n\tfor _, beer := range beers {\n\t\tfmt.Printf(\"%-38.38s %s\\n\", beer.brewery, beer.brew)\n\t}\n}\n<commit_msg>Updated usage for new name option.<commit_after>package main\n\nimport (\n\t\"code.google.com\/p\/go.net\/html\"\n\t\"code.google.com\/p\/go.net\/html\/atom\"\n\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/user\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar barMap = map[string]string{}\n\ntype beerInfo struct {\n\tbrewery string\n\tbrew string\n}\n\nfunc findBeer(node *html.Node, beers *[]beerInfo) {\n\tif node.DataAtom == atom.Div {\n\t\tfor _, attr := range node.Attr {\n\t\t\tif attr.Key == \"id\" && strings.HasPrefix(attr.Val, \"beer-\") {\n\t\t\t\tbrewery, brew := \"\", \"\"\n\t\t\t\tfindBrewery(node, &brewery)\n\t\t\t\tfindBrew(node, &brew)\n\t\t\t\t*beers = append(*beers, beerInfo{brewery, brew})\n\t\t\t}\n\t\t}\n\t}\n\tfor kid := node.FirstChild; kid != nil; kid = kid.NextSibling {\n\t\tfindBeer(kid, beers)\n\t}\n}\n\nfunc findBrewery(node *html.Node, brewery *string) bool {\n\tif node.DataAtom == atom.H4 {\n\t\tif content := node.FirstChild; content != nil {\n\t\t\t*brewery = content.Data\n\t\t\treturn true\n\t\t}\n\t}\n\tfor kid := node.FirstChild; kid != nil; kid = kid.NextSibling {\n\t\tif findBrewery(kid, brewery) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc findBrew(node *html.Node, brew *string) bool {\n\tfor _, attr := range node.Attr {\n\t\tif attr.Key == \"class\" && attr.Val == \"beer-name\" {\n\t\t\tif content := node.FirstChild; content != nil {\n\t\t\t\t*brew = content.Data\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\tfor kid := node.FirstChild; kid != nil; kid = kid.NextSibling {\n\t\tif findBrew(kid, brew) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc checkId(id string) bool {\n\tok, err := regexp.MatchString(\"^[[:xdigit:]]{24}$\", id)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn ok\n}\n\nfunc readRc() {\n\tusr, err := user.Current()\n\tif err != nil {\n\t\treturn\n\t}\n\tdata, err := ioutil.ReadFile(usr.HomeDir + \"\/.taplistrc\")\n\tif err != nil {\n\t\treturn\n\t}\n\tlines := strings.Split(string(data), \"\\n\")\n\tfor _, line := range lines {\n\t\tline = strings.TrimSpace(line)\n\t\tidx := strings.IndexAny(line, \" \\t\")\n\t\tif idx < len(line) - 1 {\n\t\t\tid, name := line[:idx], strings.TrimSpace(line[idx:])\n\t\t\tbarMap[id] = name\n\t\t}\n\t}\n}\n\nfunc findBar(arg string) (string, string) {\n\tfor id, name := range barMap {\n\t\tif strings.Contains(strings.ToLower(name), strings.ToLower(arg)) {\n\t\t\treturn id, name\n\t\t}\n\t}\n\treturn \"\", \"\"\n}\n\nfunc main() {\n\tlog.SetFlags(0)\n\tlog.SetPrefix(\"taplist: \")\n\n\tif len(os.Args) != 2 {\n\t\tlog.Fatalln(\"usage: taplist <id> | <name>\")\n\t}\n\treadRc()\n\targ := strings.ToLower(os.Args[1])\n\tid, name := \"\", \"\"\n\tif checkId(arg) {\n\t\tid, name = arg, arg\n\t} else {\n\t\tid, name = findBar(arg)\n\t}\n\tif id == \"\" {\n\t\tlog.Fatalln(arg + \" doesn't look like a valid name or taplister bar id\")\n\t}\n\n\tresp, err := http.Get(\"http:\/\/www.taplister.com\/bars\/\" + id)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer resp.Body.Close()\n\tdoc, err := html.Parse(resp.Body)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tbeers := []beerInfo{}\n\tfindBeer(doc, &beers)\n\tfmt.Println(\"On tap at \" + name + \":\\n\")\n\tfor _, beer := range beers {\n\t\tfmt.Printf(\"%-38.38s %s\\n\", beer.brewery, beer.brew)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package gsd\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto\/rand\"\n\t\"crypto\/tls\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"time\"\n)\n\ntype TCPTLSService struct {\n\tname string\n}\n\nfunc NewTCPTLSService() *TCPTLSService {\n\treturn &TCPTLSService{name: \"Generic TCP TLS\"}\n}\n\nfunc (s *TCPTLSService) Name() string {\n\treturn s.name\n}\n\nfunc (s *TCPTLSService) GetBanner(ip string, port string) Banner {\n\tbanner := Banner{\n\t\tIp: ip,\n\t\tPort: port,\n\t\tService: s.Name(),\n\t}\n\n\t\/\/ Connect\n\tdialer := &GsdDialer{\n\t\tDialer: &net.Dialer{\n\t\t\tTimeout: connTimeout,\n\t\t\tKeepAlive: 0,\n\t\t},\n\t}\n\tconfig := &tls.Config{InsecureSkipVerify: true}\n\tdconn, err := dialer.Dial(\"tcp\", ip+\":\"+port)\n\tif err != nil {\n\t\tbanner.Error = err.Error()\n\t\treturn banner\n\t}\n\tdefer dconn.Close()\n\n\tconn := tls.Client(dconn, config)\n\tdefer conn.Close()\n\n\t\/\/ Check if the connection is encrypted, get the certificate and base64 it\n\tstate := conn.ConnectionState()\n\tif state.PeerCertificates == nil {\n\t\tbanner.Error = \"Non encrypted HTTP connection\"\n\t\treturn banner\n\t}\n\trawCert := state.PeerCertificates[0].Raw\n\tb64Cert := base64.StdEncoding.EncodeToString(rawCert)\n\tbanner.Content = \"-----BEGIN CERTIFICATE-----\\n\" + b64Cert +\n\t\t\"-----END CERTIFICATE-----\\n\"\n\n\t\/\/ Wait to receive content\n\tnow := time.Now()\n\tconn.SetReadDeadline(now.Add(readTimeout))\n\n\tbuff := bytes.NewBuffer(nil)\n\tr := bufio.NewReader(conn)\n\n\t_, err = io.Copy(buff, r)\n\tif err == nil || buff.Len() > 0 {\n\t\t\/\/ Got something\n\t\tbanner.Content += buff.String()\n\t\treturn banner\n\t}\n\n\t\/\/ Timeout! Lets fuzz with something random\n\tfuzz := make([]byte, 128)\n\t_, err = rand.Read(fuzz)\n\tif err != nil {\n\t\tbanner.Error = err.Error()\n\t\treturn banner\n\t}\n\t_, err = fmt.Fprintf(conn, \"%s\\n\\n\", fuzz)\n\tif err != nil {\n\t\tbanner.Error = err.Error()\n\t\treturn banner\n\t}\n\n\t\/\/ Wait to receive content again\n\tnow = time.Now()\n\tconn.SetReadDeadline(now.Add(readTimeout))\n\t_, err = io.Copy(buff, r)\n\tif err != nil {\n\t\tbanner.Error = err.Error()\n\t\treturn banner\n\t}\n\n\tbanner.Content += buff.String()\n\treturn banner\n}\n<commit_msg>Fixed Generic TCP TLS Service<commit_after>package gsd\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto\/rand\"\n\t\"crypto\/tls\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"time\"\n)\n\ntype TCPTLSService struct {\n\tname string\n}\n\nfunc NewTCPTLSService() *TCPTLSService {\n\treturn &TCPTLSService{name: \"Generic TCP TLS\"}\n}\n\nfunc (s *TCPTLSService) Name() string {\n\treturn s.name\n}\n\nfunc (s *TCPTLSService) GetBanner(ip string, port string) Banner {\n\tbanner := Banner{\n\t\tIp: ip,\n\t\tPort: port,\n\t\tService: s.Name(),\n\t}\n\n\t\/\/ Connect\n\tdialer := &GsdDialer{\n\t\tDialer: &net.Dialer{\n\t\t\tTimeout: connTimeout,\n\t\t\tKeepAlive: 0,\n\t\t},\n\t}\n\tconfig := &tls.Config{InsecureSkipVerify: true}\n\tconn, err := tls.DialWithDialer(dialer.Dialer, \"tcp\", ip+\":\"+port, config)\n\tif err != nil {\n\t\tbanner.Error = err.Error()\n\t\treturn banner\n\t}\n\tdefer conn.Close()\n\n\t\/\/ Check if the connection is encrypted, get the certificate and base64 it\n\tstate := conn.ConnectionState()\n\tif state.PeerCertificates == nil {\n\t\tbanner.Error = \"Non encrypted HTTP connection\"\n\t\treturn banner\n\t}\n\trawCert := state.PeerCertificates[0].Raw\n\tb64Cert := base64.StdEncoding.EncodeToString(rawCert)\n\tbanner.Content = \"-----BEGIN CERTIFICATE-----\\n\" + b64Cert +\n\t\t\"-----END CERTIFICATE-----\\n\"\n\n\t\/\/ Wait to receive content\n\tnow := time.Now()\n\tconn.SetReadDeadline(now.Add(readTimeout))\n\n\tbuff := bytes.NewBuffer(nil)\n\tr := bufio.NewReader(conn)\n\n\t_, err = io.Copy(buff, r)\n\tif err == nil || buff.Len() > 0 {\n\t\t\/\/ Got something\n\t\tbanner.Content += buff.String()\n\t\treturn banner\n\t}\n\n\t\/\/ Timeout! Lets fuzz with something random\n\tfuzz := make([]byte, 128)\n\t_, err = rand.Read(fuzz)\n\tif err != nil {\n\t\tbanner.Error = err.Error()\n\t\treturn banner\n\t}\n\t_, err = fmt.Fprintf(conn, \"%s\\n\\n\", fuzz)\n\tif err != nil {\n\t\tbanner.Error = err.Error()\n\t\treturn banner\n\t}\n\n\t\/\/ Wait to receive content again\n\tnow = time.Now()\n\tconn.SetReadDeadline(now.Add(readTimeout))\n\t_, err = io.Copy(buff, r)\n\tif err != nil {\n\t\tbanner.Error = err.Error()\n\t\treturn banner\n\t}\n\n\tbanner.Content += buff.String()\n\treturn banner\n}\n<|endoftext|>"} {"text":"<commit_before>package google\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/helper\/schema\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/helper\/validation\"\n\tcomputeBeta \"google.golang.org\/api\/compute\/v0.beta\"\n\t\"google.golang.org\/api\/googleapi\"\n)\n\nfunc instanceSchedulingNodeAffinitiesElemSchema() *schema.Resource {\n\treturn &schema.Resource{\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"key\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"operator\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validation.StringInSlice([]string{\"IN\", \"NOT_IN\"}, false),\n\t\t\t},\n\t\t\t\"values\": {\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\tSet: schema.HashString,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc expandAliasIpRanges(ranges []interface{}) []*computeBeta.AliasIpRange {\n\tipRanges := make([]*computeBeta.AliasIpRange, 0, len(ranges))\n\tfor _, raw := range ranges {\n\t\tdata := raw.(map[string]interface{})\n\t\tipRanges = append(ipRanges, &computeBeta.AliasIpRange{\n\t\t\tIpCidrRange: data[\"ip_cidr_range\"].(string),\n\t\t\tSubnetworkRangeName: data[\"subnetwork_range_name\"].(string),\n\t\t})\n\t}\n\treturn ipRanges\n}\n\nfunc flattenAliasIpRange(ranges []*computeBeta.AliasIpRange) []map[string]interface{} {\n\trangesSchema := make([]map[string]interface{}, 0, len(ranges))\n\tfor _, ipRange := range ranges {\n\t\trangesSchema = append(rangesSchema, map[string]interface{}{\n\t\t\t\"ip_cidr_range\": ipRange.IpCidrRange,\n\t\t\t\"subnetwork_range_name\": ipRange.SubnetworkRangeName,\n\t\t})\n\t}\n\treturn rangesSchema\n}\n\nfunc expandScheduling(v interface{}) (*computeBeta.Scheduling, error) {\n\tif v == nil {\n\t\t\/\/ We can't set default values for lists.\n\t\treturn &computeBeta.Scheduling{\n\t\t\tAutomaticRestart: googleapi.Bool(true),\n\t\t}, nil\n\t}\n\n\tls := v.([]interface{})\n\tif len(ls) == 0 {\n\t\t\/\/ We can't set default values for lists\n\t\treturn &computeBeta.Scheduling{\n\t\t\tAutomaticRestart: googleapi.Bool(true),\n\t\t}, nil\n\t}\n\n\tif len(ls) > 1 || ls[0] == nil {\n\t\treturn nil, fmt.Errorf(\"expected exactly one scheduling block\")\n\t}\n\n\toriginal := ls[0].(map[string]interface{})\n\tscheduling := &computeBeta.Scheduling{\n\t\tForceSendFields: make([]string, 0, 4),\n\t}\n\n\tif v, ok := original[\"automatic_restart\"]; ok {\n\t\tscheduling.AutomaticRestart = googleapi.Bool(v.(bool))\n\t\tscheduling.ForceSendFields = append(scheduling.ForceSendFields, \"AutomaticRestart\")\n\t}\n\n\tif v, ok := original[\"preemptible\"]; ok {\n\t\tscheduling.Preemptible = v.(bool)\n\t\tscheduling.ForceSendFields = append(scheduling.ForceSendFields, \"Preemptible\")\n\n\t}\n\n\tif v, ok := original[\"on_host_maintenance\"]; ok {\n\t\tscheduling.OnHostMaintenance = v.(string)\n\t\tscheduling.ForceSendFields = append(scheduling.ForceSendFields, \"OnHostMaintenance\")\n\t}\n\n\tif v, ok := original[\"node_affinities\"]; ok && v != nil {\n\t\tnaSet := v.(*schema.Set).List()\n\t\tscheduling.NodeAffinities = make([]*computeBeta.SchedulingNodeAffinity, len(ls))\n\t\tscheduling.ForceSendFields = append(scheduling.ForceSendFields, \"NodeAffinities\")\n\t\tfor _, nodeAffRaw := range naSet {\n\t\t\tif nodeAffRaw == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnodeAff := nodeAffRaw.(map[string]interface{})\n\t\t\ttransformed := &computeBeta.SchedulingNodeAffinity{\n\t\t\t\tKey: nodeAff[\"key\"].(string),\n\t\t\t\tOperator: nodeAff[\"operator\"].(string),\n\t\t\t\tValues: convertStringArr(nodeAff[\"values\"].(*schema.Set).List()),\n\t\t\t}\n\t\t\tscheduling.NodeAffinities = append(scheduling.NodeAffinities, transformed)\n\t\t}\n\t}\n\n\treturn scheduling, nil\n}\n\nfunc flattenScheduling(resp *computeBeta.Scheduling) []map[string]interface{} {\n\tschedulingMap := map[string]interface{}{\n\t\t\"on_host_maintenance\": resp.OnHostMaintenance,\n\t\t\"preemptible\": resp.Preemptible,\n\t}\n\n\tif resp.AutomaticRestart != nil {\n\t\tschedulingMap[\"automatic_restart\"] = *resp.AutomaticRestart\n\t}\n\n\tnodeAffinities := schema.NewSet(schema.HashResource(instanceSchedulingNodeAffinitiesElemSchema()), nil)\n\tfor _, na := range resp.NodeAffinities {\n\t\tnodeAffinities.Add(map[string]interface{}{\n\t\t\t\"key\": na.Key,\n\t\t\t\"operator\": na.Operator,\n\t\t\t\"values\": schema.NewSet(schema.HashString, convertStringArrToInterface(na.Values)),\n\t\t})\n\t}\n\tschedulingMap[\"node_affinities\"] = nodeAffinities\n\n\treturn []map[string]interface{}{schedulingMap}\n}\n\nfunc flattenAccessConfigs(accessConfigs []*computeBeta.AccessConfig) ([]map[string]interface{}, string) {\n\tflattened := make([]map[string]interface{}, len(accessConfigs))\n\tnatIP := \"\"\n\tfor i, ac := range accessConfigs {\n\t\tflattened[i] = map[string]interface{}{\n\t\t\t\"nat_ip\": ac.NatIP,\n\t\t\t\"network_tier\": ac.NetworkTier,\n\t\t}\n\t\tif ac.SetPublicPtr {\n\t\t\tflattened[i][\"public_ptr_domain_name\"] = ac.PublicPtrDomainName\n\t\t}\n\t\tif natIP == \"\" {\n\t\t\tnatIP = ac.NatIP\n\t\t}\n\t}\n\treturn flattened, natIP\n}\n\nfunc flattenNetworkInterfaces(d *schema.ResourceData, config *Config, networkInterfaces []*computeBeta.NetworkInterface) ([]map[string]interface{}, string, string, string, error) {\n\tflattened := make([]map[string]interface{}, len(networkInterfaces))\n\tvar region, internalIP, externalIP string\n\n\tfor i, iface := range networkInterfaces {\n\t\tvar ac []map[string]interface{}\n\t\tac, externalIP = flattenAccessConfigs(iface.AccessConfigs)\n\n\t\tsubnet, err := ParseSubnetworkFieldValue(iface.Subnetwork, d, config)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", \"\", \"\", err\n\t\t}\n\t\tregion = subnet.Region\n\n\t\tflattened[i] = map[string]interface{}{\n\t\t\t\"network_ip\": iface.NetworkIP,\n\t\t\t\"network\": ConvertSelfLinkToV1(iface.Network),\n\t\t\t\"subnetwork\": ConvertSelfLinkToV1(iface.Subnetwork),\n\t\t\t\"subnetwork_project\": subnet.Project,\n\t\t\t\"access_config\": ac,\n\t\t\t\"alias_ip_range\": flattenAliasIpRange(iface.AliasIpRanges),\n\t\t}\n\t\t\/\/ Instance template interfaces never have names, so they're absent\n\t\t\/\/ in the instance template network_interface schema. We want to use the\n\t\t\/\/ same flattening code for both resource types, so we avoid trying to\n\t\t\/\/ set the name field when it's not set at the GCE end.\n\t\tif iface.Name != \"\" {\n\t\t\tflattened[i][\"name\"] = iface.Name\n\t\t}\n\t\tif internalIP == \"\" {\n\t\t\tinternalIP = iface.NetworkIP\n\t\t}\n\t}\n\treturn flattened, region, internalIP, externalIP, nil\n}\n\nfunc expandAccessConfigs(configs []interface{}) []*computeBeta.AccessConfig {\n\tacs := make([]*computeBeta.AccessConfig, len(configs))\n\tfor i, raw := range configs {\n\t\tdata := raw.(map[string]interface{})\n\t\tacs[i] = &computeBeta.AccessConfig{\n\t\t\tType: \"ONE_TO_ONE_NAT\",\n\t\t\tNatIP: data[\"nat_ip\"].(string),\n\t\t\tNetworkTier: data[\"network_tier\"].(string),\n\t\t}\n\t\tif ptr, ok := data[\"public_ptr_domain_name\"]; ok && ptr != \"\" {\n\t\t\tacs[i].SetPublicPtr = true\n\t\t\tacs[i].PublicPtrDomainName = ptr.(string)\n\t\t}\n\t}\n\treturn acs\n}\n\nfunc expandNetworkInterfaces(d TerraformResourceData, config *Config) ([]*computeBeta.NetworkInterface, error) {\n\tconfigs := d.Get(\"network_interface\").([]interface{})\n\tifaces := make([]*computeBeta.NetworkInterface, len(configs))\n\tfor i, raw := range configs {\n\t\tdata := raw.(map[string]interface{})\n\n\t\tnetwork := data[\"network\"].(string)\n\t\tsubnetwork := data[\"subnetwork\"].(string)\n\t\tif network == \"\" && subnetwork == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"exactly one of network or subnetwork must be provided\")\n\t\t}\n\n\t\tnf, err := ParseNetworkFieldValue(network, d, config)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"cannot determine self_link for network %q: %s\", network, err)\n\t\t}\n\n\t\tsubnetProjectField := fmt.Sprintf(\"network_interface.%d.subnetwork_project\", i)\n\t\tsf, err := ParseSubnetworkFieldValueWithProjectField(subnetwork, subnetProjectField, d, config)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"cannot determine self_link for subnetwork %q: %s\", subnetwork, err)\n\t\t}\n\n\t\tifaces[i] = &computeBeta.NetworkInterface{\n\t\t\tNetworkIP: data[\"network_ip\"].(string),\n\t\t\tNetwork: nf.RelativeLink(),\n\t\t\tSubnetwork: sf.RelativeLink(),\n\t\t\tAccessConfigs: expandAccessConfigs(data[\"access_config\"].([]interface{})),\n\t\t\tAliasIpRanges: expandAliasIpRanges(data[\"alias_ip_range\"].([]interface{})),\n\t\t}\n\n\t}\n\treturn ifaces, nil\n}\n\nfunc flattenServiceAccounts(serviceAccounts []*computeBeta.ServiceAccount) []map[string]interface{} {\n\tresult := make([]map[string]interface{}, len(serviceAccounts))\n\tfor i, serviceAccount := range serviceAccounts {\n\t\tresult[i] = map[string]interface{}{\n\t\t\t\"email\": serviceAccount.Email,\n\t\t\t\"scopes\": schema.NewSet(stringScopeHashcode, convertStringArrToInterface(serviceAccount.Scopes)),\n\t\t}\n\t}\n\treturn result\n}\n\nfunc expandServiceAccounts(configs []interface{}) []*computeBeta.ServiceAccount {\n\taccounts := make([]*computeBeta.ServiceAccount, len(configs))\n\tfor i, raw := range configs {\n\t\tdata := raw.(map[string]interface{})\n\n\t\taccounts[i] = &computeBeta.ServiceAccount{\n\t\t\tEmail: data[\"email\"].(string),\n\t\t\tScopes: canonicalizeServiceScopes(convertStringSet(data[\"scopes\"].(*schema.Set))),\n\t\t}\n\n\t\tif accounts[i].Email == \"\" {\n\t\t\taccounts[i].Email = \"default\"\n\t\t}\n\t}\n\treturn accounts\n}\n\nfunc flattenGuestAccelerators(accelerators []*computeBeta.AcceleratorConfig) []map[string]interface{} {\n\tacceleratorsSchema := make([]map[string]interface{}, len(accelerators))\n\tfor i, accelerator := range accelerators {\n\t\tacceleratorsSchema[i] = map[string]interface{}{\n\t\t\t\"count\": accelerator.AcceleratorCount,\n\t\t\t\"type\": accelerator.AcceleratorType,\n\t\t}\n\t}\n\treturn acceleratorsSchema\n}\n\nfunc resourceInstanceTags(d TerraformResourceData) *computeBeta.Tags {\n\t\/\/ Calculate the tags\n\tvar tags *computeBeta.Tags\n\tif v := d.Get(\"tags\"); v != nil {\n\t\tvs := v.(*schema.Set)\n\t\ttags = new(computeBeta.Tags)\n\t\ttags.Items = make([]string, vs.Len())\n\t\tfor i, v := range vs.List() {\n\t\t\ttags.Items[i] = v.(string)\n\t\t}\n\n\t\ttags.Fingerprint = d.Get(\"tags_fingerprint\").(string)\n\t}\n\n\treturn tags\n}\n\nfunc expandShieldedVmConfigs(d TerraformResourceData) *computeBeta.ShieldedVmConfig {\n\tif _, ok := d.GetOk(\"shielded_instance_config\"); !ok {\n\t\treturn nil\n\t}\n\n\tprefix := \"shielded_instance_config.0\"\n\treturn &computeBeta.ShieldedVmConfig{\n\t\tEnableSecureBoot: d.Get(prefix + \".enable_secure_boot\").(bool),\n\t\tEnableVtpm: d.Get(prefix + \".enable_vtpm\").(bool),\n\t\tEnableIntegrityMonitoring: d.Get(prefix + \".enable_integrity_monitoring\").(bool),\n\t\tForceSendFields: []string{\"EnableSecureBoot\", \"EnableVtpm\", \"EnableIntegrityMonitoring\"},\n\t}\n}\n\nfunc flattenShieldedVmConfig(shieldedVmConfig *computeBeta.ShieldedVmConfig) []map[string]bool {\n\tif shieldedVmConfig == nil {\n\t\treturn nil\n\t}\n\n\treturn []map[string]bool{{\n\t\t\"enable_secure_boot\": shieldedVmConfig.EnableSecureBoot,\n\t\t\"enable_vtpm\": shieldedVmConfig.EnableVtpm,\n\t\t\"enable_integrity_monitoring\": shieldedVmConfig.EnableIntegrityMonitoring,\n\t}}\n}\n\nfunc expandDisplayDevice(d TerraformResourceData) *computeBeta.DisplayDevice {\n\tif _, ok := d.GetOk(\"enable_display\"); !ok {\n\t\treturn nil\n\t}\n\treturn &computeBeta.DisplayDevice{\n\t\tEnableDisplay: d.Get(\"enable_display\").(bool),\n\t\tForceSendFields: []string{\"EnableDisplay\"},\n\t}\n}\n\nfunc flattenEnableDisplay(displayDevice *computeBeta.DisplayDevice) interface{} {\n\tif displayDevice == nil {\n\t\treturn nil\n\t}\n\n\treturn displayDevice.EnableDisplay\n}\n\n\/\/ Terraform doesn't correctly calculate changes on schema.Set, so we do it manually\n\/\/ https:\/\/github.com\/hashicorp\/terraform-plugin-sdk\/issues\/98\nfunc schedulingHasChange(d *schema.ResourceData) bool {\n\tif !d.HasChange(\"scheduling\") {\n\t\t\/\/ This doesn't work correctly, which is why this method exists\n\t\t\/\/ But it is here for posterity\n\t\treturn false\n\t}\n\to, n := d.GetChange(\"scheduling\")\n\toScheduling := o.([]interface{})[0].(map[string]interface{})\n\tnewScheduling := n.([]interface{})[0].(map[string]interface{})\n\toriginalNa := oScheduling[\"node_affinities\"].(*schema.Set)\n\tnewNa := newScheduling[\"node_affinities\"].(*schema.Set)\n\tif oScheduling[\"automatic_restart\"] != newScheduling[\"automatic_restart\"] {\n\t\treturn true\n\t}\n\n\tif oScheduling[\"preemptible\"] != newScheduling[\"preemptible\"] {\n\t\treturn true\n\t}\n\n\tif oScheduling[\"on_host_maintenance\"] != newScheduling[\"on_host_maintenance\"] {\n\t\treturn true\n\t}\n\n\treturn reflect.DeepEqual(newNa, originalNa)\n}\n<commit_msg>add min_node_cpus to compute_instance.scheduling (#3634) (#466)<commit_after>\/\/ <% autogen_exception -%>\n\npackage google\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/helper\/schema\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/helper\/validation\"\n\tcomputeBeta \"google.golang.org\/api\/compute\/v0.beta\"\n\t\"google.golang.org\/api\/googleapi\"\n)\n\nfunc instanceSchedulingNodeAffinitiesElemSchema() *schema.Resource {\n\treturn &schema.Resource{\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"key\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t},\n\t\t\t\"operator\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validation.StringInSlice([]string{\"IN\", \"NOT_IN\"}, false),\n\t\t\t},\n\t\t\t\"values\": {\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tElem: &schema.Schema{Type: schema.TypeString},\n\t\t\t\tSet: schema.HashString,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc expandAliasIpRanges(ranges []interface{}) []*computeBeta.AliasIpRange {\n\tipRanges := make([]*computeBeta.AliasIpRange, 0, len(ranges))\n\tfor _, raw := range ranges {\n\t\tdata := raw.(map[string]interface{})\n\t\tipRanges = append(ipRanges, &computeBeta.AliasIpRange{\n\t\t\tIpCidrRange: data[\"ip_cidr_range\"].(string),\n\t\t\tSubnetworkRangeName: data[\"subnetwork_range_name\"].(string),\n\t\t})\n\t}\n\treturn ipRanges\n}\n\nfunc flattenAliasIpRange(ranges []*computeBeta.AliasIpRange) []map[string]interface{} {\n\trangesSchema := make([]map[string]interface{}, 0, len(ranges))\n\tfor _, ipRange := range ranges {\n\t\trangesSchema = append(rangesSchema, map[string]interface{}{\n\t\t\t\"ip_cidr_range\": ipRange.IpCidrRange,\n\t\t\t\"subnetwork_range_name\": ipRange.SubnetworkRangeName,\n\t\t})\n\t}\n\treturn rangesSchema\n}\n\nfunc expandScheduling(v interface{}) (*computeBeta.Scheduling, error) {\n\tif v == nil {\n\t\t\/\/ We can't set default values for lists.\n\t\treturn &computeBeta.Scheduling{\n\t\t\tAutomaticRestart: googleapi.Bool(true),\n\t\t}, nil\n\t}\n\n\tls := v.([]interface{})\n\tif len(ls) == 0 {\n\t\t\/\/ We can't set default values for lists\n\t\treturn &computeBeta.Scheduling{\n\t\t\tAutomaticRestart: googleapi.Bool(true),\n\t\t}, nil\n\t}\n\n\tif len(ls) > 1 || ls[0] == nil {\n\t\treturn nil, fmt.Errorf(\"expected exactly one scheduling block\")\n\t}\n\n\toriginal := ls[0].(map[string]interface{})\n\tscheduling := &computeBeta.Scheduling{\n\t\tForceSendFields: make([]string, 0, 4),\n\t}\n\n\tif v, ok := original[\"automatic_restart\"]; ok {\n\t\tscheduling.AutomaticRestart = googleapi.Bool(v.(bool))\n\t\tscheduling.ForceSendFields = append(scheduling.ForceSendFields, \"AutomaticRestart\")\n\t}\n\n\tif v, ok := original[\"preemptible\"]; ok {\n\t\tscheduling.Preemptible = v.(bool)\n\t\tscheduling.ForceSendFields = append(scheduling.ForceSendFields, \"Preemptible\")\n\t}\n\n\tif v, ok := original[\"on_host_maintenance\"]; ok {\n\t\tscheduling.OnHostMaintenance = v.(string)\n\t\tscheduling.ForceSendFields = append(scheduling.ForceSendFields, \"OnHostMaintenance\")\n\t}\n\n\tif v, ok := original[\"node_affinities\"]; ok && v != nil {\n\t\tnaSet := v.(*schema.Set).List()\n\t\tscheduling.NodeAffinities = make([]*computeBeta.SchedulingNodeAffinity, len(ls))\n\t\tscheduling.ForceSendFields = append(scheduling.ForceSendFields, \"NodeAffinities\")\n\t\tfor _, nodeAffRaw := range naSet {\n\t\t\tif nodeAffRaw == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnodeAff := nodeAffRaw.(map[string]interface{})\n\t\t\ttransformed := &computeBeta.SchedulingNodeAffinity{\n\t\t\t\tKey: nodeAff[\"key\"].(string),\n\t\t\t\tOperator: nodeAff[\"operator\"].(string),\n\t\t\t\tValues: convertStringArr(nodeAff[\"values\"].(*schema.Set).List()),\n\t\t\t}\n\t\t\tscheduling.NodeAffinities = append(scheduling.NodeAffinities, transformed)\n\t\t}\n\t}\n\n<% unless version == 'ga' -%>\n\tif v, ok := original[\"min_node_cpus\"]; ok {\n\t\tscheduling.MinNodeCpus = int64(v.(int))\n\t}\n<% end -%>\n\n\treturn scheduling, nil\n}\n\nfunc flattenScheduling(resp *computeBeta.Scheduling) []map[string]interface{} {\n\tschedulingMap := map[string]interface{}{\n\t\t\"on_host_maintenance\": resp.OnHostMaintenance,\n\t\t\"preemptible\": resp.Preemptible,\n<% unless version == 'ga' -%>\n\t\t\"min_node_cpus\": resp.MinNodeCpus,\n<% end -%>\n\t}\n\n\tif resp.AutomaticRestart != nil {\n\t\tschedulingMap[\"automatic_restart\"] = *resp.AutomaticRestart\n\t}\n\n\tnodeAffinities := schema.NewSet(schema.HashResource(instanceSchedulingNodeAffinitiesElemSchema()), nil)\n\tfor _, na := range resp.NodeAffinities {\n\t\tnodeAffinities.Add(map[string]interface{}{\n\t\t\t\"key\": na.Key,\n\t\t\t\"operator\": na.Operator,\n\t\t\t\"values\": schema.NewSet(schema.HashString, convertStringArrToInterface(na.Values)),\n\t\t})\n\t}\n\tschedulingMap[\"node_affinities\"] = nodeAffinities\n\n\treturn []map[string]interface{}{schedulingMap}\n}\n\nfunc flattenAccessConfigs(accessConfigs []*computeBeta.AccessConfig) ([]map[string]interface{}, string) {\n\tflattened := make([]map[string]interface{}, len(accessConfigs))\n\tnatIP := \"\"\n\tfor i, ac := range accessConfigs {\n\t\tflattened[i] = map[string]interface{}{\n\t\t\t\"nat_ip\": ac.NatIP,\n\t\t\t\"network_tier\": ac.NetworkTier,\n\t\t}\n\t\tif ac.SetPublicPtr {\n\t\t\tflattened[i][\"public_ptr_domain_name\"] = ac.PublicPtrDomainName\n\t\t}\n\t\tif natIP == \"\" {\n\t\t\tnatIP = ac.NatIP\n\t\t}\n\t}\n\treturn flattened, natIP\n}\n\nfunc flattenNetworkInterfaces(d *schema.ResourceData, config *Config, networkInterfaces []*computeBeta.NetworkInterface) ([]map[string]interface{}, string, string, string, error) {\n\tflattened := make([]map[string]interface{}, len(networkInterfaces))\n\tvar region, internalIP, externalIP string\n\n\tfor i, iface := range networkInterfaces {\n\t\tvar ac []map[string]interface{}\n\t\tac, externalIP = flattenAccessConfigs(iface.AccessConfigs)\n\n\t\tsubnet, err := ParseSubnetworkFieldValue(iface.Subnetwork, d, config)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", \"\", \"\", err\n\t\t}\n\t\tregion = subnet.Region\n\n\t\tflattened[i] = map[string]interface{}{\n\t\t\t\"network_ip\": iface.NetworkIP,\n\t\t\t\"network\": ConvertSelfLinkToV1(iface.Network),\n\t\t\t\"subnetwork\": ConvertSelfLinkToV1(iface.Subnetwork),\n\t\t\t\"subnetwork_project\": subnet.Project,\n\t\t\t\"access_config\": ac,\n\t\t\t\"alias_ip_range\": flattenAliasIpRange(iface.AliasIpRanges),\n\t\t}\n\t\t\/\/ Instance template interfaces never have names, so they're absent\n\t\t\/\/ in the instance template network_interface schema. We want to use the\n\t\t\/\/ same flattening code for both resource types, so we avoid trying to\n\t\t\/\/ set the name field when it's not set at the GCE end.\n\t\tif iface.Name != \"\" {\n\t\t\tflattened[i][\"name\"] = iface.Name\n\t\t}\n\t\tif internalIP == \"\" {\n\t\t\tinternalIP = iface.NetworkIP\n\t\t}\n\t}\n\treturn flattened, region, internalIP, externalIP, nil\n}\n\nfunc expandAccessConfigs(configs []interface{}) []*computeBeta.AccessConfig {\n\tacs := make([]*computeBeta.AccessConfig, len(configs))\n\tfor i, raw := range configs {\n\t\tdata := raw.(map[string]interface{})\n\t\tacs[i] = &computeBeta.AccessConfig{\n\t\t\tType: \"ONE_TO_ONE_NAT\",\n\t\t\tNatIP: data[\"nat_ip\"].(string),\n\t\t\tNetworkTier: data[\"network_tier\"].(string),\n\t\t}\n\t\tif ptr, ok := data[\"public_ptr_domain_name\"]; ok && ptr != \"\" {\n\t\t\tacs[i].SetPublicPtr = true\n\t\t\tacs[i].PublicPtrDomainName = ptr.(string)\n\t\t}\n\t}\n\treturn acs\n}\n\nfunc expandNetworkInterfaces(d TerraformResourceData, config *Config) ([]*computeBeta.NetworkInterface, error) {\n\tconfigs := d.Get(\"network_interface\").([]interface{})\n\tifaces := make([]*computeBeta.NetworkInterface, len(configs))\n\tfor i, raw := range configs {\n\t\tdata := raw.(map[string]interface{})\n\n\t\tnetwork := data[\"network\"].(string)\n\t\tsubnetwork := data[\"subnetwork\"].(string)\n\t\tif network == \"\" && subnetwork == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"exactly one of network or subnetwork must be provided\")\n\t\t}\n\n\t\tnf, err := ParseNetworkFieldValue(network, d, config)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"cannot determine self_link for network %q: %s\", network, err)\n\t\t}\n\n\t\tsubnetProjectField := fmt.Sprintf(\"network_interface.%d.subnetwork_project\", i)\n\t\tsf, err := ParseSubnetworkFieldValueWithProjectField(subnetwork, subnetProjectField, d, config)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"cannot determine self_link for subnetwork %q: %s\", subnetwork, err)\n\t\t}\n\n\t\tifaces[i] = &computeBeta.NetworkInterface{\n\t\t\tNetworkIP: data[\"network_ip\"].(string),\n\t\t\tNetwork: nf.RelativeLink(),\n\t\t\tSubnetwork: sf.RelativeLink(),\n\t\t\tAccessConfigs: expandAccessConfigs(data[\"access_config\"].([]interface{})),\n\t\t\tAliasIpRanges: expandAliasIpRanges(data[\"alias_ip_range\"].([]interface{})),\n\t\t}\n\n\t}\n\treturn ifaces, nil\n}\n\nfunc flattenServiceAccounts(serviceAccounts []*computeBeta.ServiceAccount) []map[string]interface{} {\n\tresult := make([]map[string]interface{}, len(serviceAccounts))\n\tfor i, serviceAccount := range serviceAccounts {\n\t\tresult[i] = map[string]interface{}{\n\t\t\t\"email\": serviceAccount.Email,\n\t\t\t\"scopes\": schema.NewSet(stringScopeHashcode, convertStringArrToInterface(serviceAccount.Scopes)),\n\t\t}\n\t}\n\treturn result\n}\n\nfunc expandServiceAccounts(configs []interface{}) []*computeBeta.ServiceAccount {\n\taccounts := make([]*computeBeta.ServiceAccount, len(configs))\n\tfor i, raw := range configs {\n\t\tdata := raw.(map[string]interface{})\n\n\t\taccounts[i] = &computeBeta.ServiceAccount{\n\t\t\tEmail: data[\"email\"].(string),\n\t\t\tScopes: canonicalizeServiceScopes(convertStringSet(data[\"scopes\"].(*schema.Set))),\n\t\t}\n\n\t\tif accounts[i].Email == \"\" {\n\t\t\taccounts[i].Email = \"default\"\n\t\t}\n\t}\n\treturn accounts\n}\n\nfunc flattenGuestAccelerators(accelerators []*computeBeta.AcceleratorConfig) []map[string]interface{} {\n\tacceleratorsSchema := make([]map[string]interface{}, len(accelerators))\n\tfor i, accelerator := range accelerators {\n\t\tacceleratorsSchema[i] = map[string]interface{}{\n\t\t\t\"count\": accelerator.AcceleratorCount,\n\t\t\t\"type\": accelerator.AcceleratorType,\n\t\t}\n\t}\n\treturn acceleratorsSchema\n}\n\nfunc resourceInstanceTags(d TerraformResourceData) *computeBeta.Tags {\n\t\/\/ Calculate the tags\n\tvar tags *computeBeta.Tags\n\tif v := d.Get(\"tags\"); v != nil {\n\t\tvs := v.(*schema.Set)\n\t\ttags = new(computeBeta.Tags)\n\t\ttags.Items = make([]string, vs.Len())\n\t\tfor i, v := range vs.List() {\n\t\t\ttags.Items[i] = v.(string)\n\t\t}\n\n\t\ttags.Fingerprint = d.Get(\"tags_fingerprint\").(string)\n\t}\n\n\treturn tags\n}\n\nfunc expandShieldedVmConfigs(d TerraformResourceData) *computeBeta.ShieldedVmConfig {\n\tif _, ok := d.GetOk(\"shielded_instance_config\"); !ok {\n\t\treturn nil\n\t}\n\n\tprefix := \"shielded_instance_config.0\"\n\treturn &computeBeta.ShieldedVmConfig{\n\t\tEnableSecureBoot: d.Get(prefix + \".enable_secure_boot\").(bool),\n\t\tEnableVtpm: d.Get(prefix + \".enable_vtpm\").(bool),\n\t\tEnableIntegrityMonitoring: d.Get(prefix + \".enable_integrity_monitoring\").(bool),\n\t\tForceSendFields: []string{\"EnableSecureBoot\", \"EnableVtpm\", \"EnableIntegrityMonitoring\"},\n\t}\n}\n\nfunc flattenShieldedVmConfig(shieldedVmConfig *computeBeta.ShieldedVmConfig) []map[string]bool {\n\tif shieldedVmConfig == nil {\n\t\treturn nil\n\t}\n\n\treturn []map[string]bool{{\n\t\t\"enable_secure_boot\": shieldedVmConfig.EnableSecureBoot,\n\t\t\"enable_vtpm\": shieldedVmConfig.EnableVtpm,\n\t\t\"enable_integrity_monitoring\": shieldedVmConfig.EnableIntegrityMonitoring,\n\t}}\n}\n\nfunc expandDisplayDevice(d TerraformResourceData) *computeBeta.DisplayDevice {\n\tif _, ok := d.GetOk(\"enable_display\"); !ok {\n\t\treturn nil\n\t}\n\treturn &computeBeta.DisplayDevice{\n\t\tEnableDisplay: d.Get(\"enable_display\").(bool),\n\t\tForceSendFields: []string{\"EnableDisplay\"},\n\t}\n}\n\nfunc flattenEnableDisplay(displayDevice *computeBeta.DisplayDevice) interface{} {\n\tif displayDevice == nil {\n\t\treturn nil\n\t}\n\n\treturn displayDevice.EnableDisplay\n}\n\n\/\/ Terraform doesn't correctly calculate changes on schema.Set, so we do it manually\n\/\/ https:\/\/github.com\/hashicorp\/terraform-plugin-sdk\/issues\/98\nfunc schedulingHasChange(d *schema.ResourceData) bool {\n\tif !d.HasChange(\"scheduling\") {\n\t\t\/\/ This doesn't work correctly, which is why this method exists\n\t\t\/\/ But it is here for posterity\n\t\treturn false\n\t}\n\to, n := d.GetChange(\"scheduling\")\n\toScheduling := o.([]interface{})[0].(map[string]interface{})\n\tnewScheduling := n.([]interface{})[0].(map[string]interface{})\n\toriginalNa := oScheduling[\"node_affinities\"].(*schema.Set)\n\tnewNa := newScheduling[\"node_affinities\"].(*schema.Set)\n\tif oScheduling[\"automatic_restart\"] != newScheduling[\"automatic_restart\"] {\n\t\treturn true\n\t}\n\n\tif oScheduling[\"preemptible\"] != newScheduling[\"preemptible\"] {\n\t\treturn true\n\t}\n\n\tif oScheduling[\"on_host_maintenance\"] != newScheduling[\"on_host_maintenance\"] {\n\t\treturn true\n\t}\n\n<% unless version == 'ga' -%>\n\tif oScheduling[\"min_node_cpus\"] != newScheduling[\"min_node_cpus\"] {\n\t\treturn true\n\t}\n<% end -%>\n\n\treturn reflect.DeepEqual(newNa, originalNa)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strconv\"\n\t\"syscall\"\n)\n\nfunc main() {\n\n\tinitFlags()\n\n\t\/\/ check permissions\n\tif os.Getuid() != 0 && os.Geteuid() != 0 {\n\t\tfmt.Printf(\"npserver-daemon should be run as root, have uid=%d and euid=%d\\n\", os.Getuid(), os.Geteuid())\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ check start || sto\n\tif flags.Start == flags.Stop {\n\t\tfmt.Println(\"need --start or --stop flag\")\n\t}\n\n\tif flags.Start {\n\t\tstartDaemon()\n\t}\n\tif flags.Stop {\n\t\tstopDaemon()\n\t}\n\n\t\/\/ all good :)\n}\n\nfunc startDaemon() {\n\t\/\/ setup args for daemon call\n\targs := []string{\n\t\t\"--name=npserver\",\n\t\t\"--noconfig\",\n\t\t\"--errlog=\/var\/log\/npserver-daemon.log\",\n\t\t\"--output=\/var\/log\/npserver.log\",\n\t\tfmt.Sprintf(\"--pidfile=%s\", flags.PIDFile),\n\t\t\"--unsafe\",\n\t\t\"--\",\n\t\t\"\/usr\/local\/bin\/npserver\",\n\t}\n\n\t\/\/ append extra args to args\n\targs = append(args, extraArgs...)\n\n\t\/\/ start process\n\tproc, err := os.StartProcess(\"daemon\", args, &os.ProcAttr{\n\t\tFiles: []*os.File{os.Stdin, os.Stdout, os.Stderr},\n\t\tSys: &syscall.SysProcAttr{\n\t\t\tCredential: &syscall.Credential{\n\t\t\t\tUid: uint32(os.Geteuid()),\n\t\t\t\tGid: uint32(os.Getegid()),\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tfmt.Printf(\"os\/exec returned an error: '%s'\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ wait for daemon to be ready\n\t_, err = proc.Wait()\n\tif err != nil {\n\t\tfmt.Printf(\"proc.Wait() failed. %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc stopDaemon() {\n\tpidFile, err := os.Open(flags.PIDFile)\n\tif err != nil {\n\t\tif err == os.ErrNotExist {\n\t\t\tfmt.Printf(\"it looks like npserver is not running\")\n\t\t\tos.Exit(0)\n\t\t}\n\t\tfmt.Printf(\"error on opening pidfile: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tpidFileContents, err := ioutil.ReadAll(pidFile)\n\tpidFile.Close()\n\tif err != nil {\n\t\tfmt.Printf(\"error reading pidfile contents: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ convert pid string to pid int\n\tpid, err := strconv.Atoi(string(pidFileContents))\n\tif err != nil {\n\t\tfmt.Printf(\"error parsing pidfile contents: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ lookup process\n\tproc, err := os.FindProcess(pid)\n\tif err != nil {\n\t\tfmt.Printf(\"error finding process with pid %d: %s\\n\", pid, err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ signal process to stop\n\terr = proc.Signal(os.Interrupt)\n\tif err != nil {\n\t\tfmt.Printf(\"error sending interrupt signal to npserver: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ wait until process is done\n\tstate, err := proc.Wait()\n\tif err != nil {\n\t\tfmt.Printf(\"error waiting for process to stop: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tif !state.Exited() || !state.Success() {\n\t\tfmt.Printf(\"npserver process exited badly\")\n\t\tos.Exit(1)\n\t}\n\t\n\t\/\/ remove pid file\n\terr := os.Remove(flags.PIDFile)\n\tif err != nil {\n\t\tfmt.Printf(\"error removing pid file: %s\\n\", err)\n\t}\n}\n<commit_msg>bugfix<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strconv\"\n\t\"syscall\"\n)\n\nfunc main() {\n\n\tinitFlags()\n\n\t\/\/ check permissions\n\tif os.Getuid() != 0 && os.Geteuid() != 0 {\n\t\tfmt.Printf(\"npserver-daemon should be run as root, have uid=%d and euid=%d\\n\", os.Getuid(), os.Geteuid())\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ check start || sto\n\tif flags.Start == flags.Stop {\n\t\tfmt.Println(\"need --start or --stop flag\")\n\t}\n\n\tif flags.Start {\n\t\tstartDaemon()\n\t}\n\tif flags.Stop {\n\t\tstopDaemon()\n\t}\n\n\t\/\/ all good :)\n}\n\nfunc startDaemon() {\n\t\/\/ setup args for daemon call\n\targs := []string{\n\t\t\"--name=npserver\",\n\t\t\"--noconfig\",\n\t\t\"--errlog=\/var\/log\/npserver-daemon.log\",\n\t\t\"--output=\/var\/log\/npserver.log\",\n\t\tfmt.Sprintf(\"--pidfile=%s\", flags.PIDFile),\n\t\t\"--unsafe\",\n\t\t\"--\",\n\t\t\"\/usr\/local\/bin\/npserver\",\n\t}\n\n\t\/\/ append extra args to args\n\targs = append(args, extraArgs...)\n\n\t\/\/ start process\n\tproc, err := os.StartProcess(\"daemon\", args, &os.ProcAttr{\n\t\tFiles: []*os.File{os.Stdin, os.Stdout, os.Stderr},\n\t\tSys: &syscall.SysProcAttr{\n\t\t\tCredential: &syscall.Credential{\n\t\t\t\tUid: uint32(os.Geteuid()),\n\t\t\t\tGid: uint32(os.Getegid()),\n\t\t\t},\n\t\t},\n\t})\n\tif err != nil {\n\t\tfmt.Printf(\"os\/exec returned an error: '%s'\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ wait for daemon to be ready\n\t_, err = proc.Wait()\n\tif err != nil {\n\t\tfmt.Printf(\"proc.Wait() failed. %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc stopDaemon() {\n\tpidFile, err := os.Open(flags.PIDFile)\n\tif err != nil {\n\t\tif err == os.ErrNotExist {\n\t\t\tfmt.Printf(\"it looks like npserver is not running\")\n\t\t\tos.Exit(0)\n\t\t}\n\t\tfmt.Printf(\"error on opening pidfile: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tpidFileContents, err := ioutil.ReadAll(pidFile)\n\tpidFile.Close()\n\tif err != nil {\n\t\tfmt.Printf(\"error reading pidfile contents: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ convert pid string to pid int\n\tpid, err := strconv.Atoi(string(pidFileContents))\n\tif err != nil {\n\t\tfmt.Printf(\"error parsing pidfile contents: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ lookup process\n\tproc, err := os.FindProcess(pid)\n\tif err != nil {\n\t\tfmt.Printf(\"error finding process with pid %d: %s\\n\", pid, err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ signal process to stop\n\terr = proc.Signal(os.Interrupt)\n\tif err != nil {\n\t\tfmt.Printf(\"error sending interrupt signal to npserver: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ wait until process is done\n\tstate, err := proc.Wait()\n\tif err != nil {\n\t\tfmt.Printf(\"error waiting for process to stop: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tif !state.Exited() || !state.Success() {\n\t\tfmt.Printf(\"npserver process exited badly\")\n\t\tos.Exit(1)\n\t}\n\n\t\/\/ remove pid file\n\terr = os.Remove(flags.PIDFile)\n\tif err != nil {\n\t\tfmt.Printf(\"error removing pid file: %s\\n\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/user\"\n\t\"strings\"\n)\n\n\/\/ Size of internal buffer for reading. 4kB should be enough to cover even the\n\/\/ poorest implementation's idea of a longest permissible line.\nconst maxlinesize = 4096\n\n\/\/ Maximum length of a line we output - anything else will be wrapped\nconst lineLength = 80\n\n\/\/ Size of the ServerMessage buffer\nconst serverMsgBufSize = 10\n\n\/\/ A message sent by the server\ntype ServerMessage struct {\n\tFrom string\n\tCode string\n\tTo string\n\n\t\/\/ The raw content of the message, excluding From, Code, and To\n\tRaw string\n\n\t\/\/ The full faw content of the message\n\tFull string\n\n\t\/\/ All other fields.\n\tFields []string\n}\n\ntype Client struct {\n\tconnection net.Conn\n\tserverName string\n\tnick string\n\tserver chan ServerMessage\n\tout io.Writer\n}\n\nfunc nextField(line string) (string, string, bool) {\n\tfs := strings.SplitN(line, \" \", 2)\n\tif len(fs) > 1 {\n\t\treturn fs[0], fs[1], true\n\t}\n\treturn fs[0], \"\", false\n}\n\nfunc parseServerMessage(line string) (m ServerMessage) {\n\tm.Full = line\n\tm.From, line, _ = nextField(line)\n\tm.From = m.From[1:]\n\tm.Code, line, _ = nextField(line)\n\tm.To, line, _ = nextField(line)\n\tm.Raw = line\n\tm.Fields = make([]string, 0)\n\n\tswitch m.Code {\n\tcase \"NOTICE\":\n\tcase \"PRIVMSG\":\n\t\tm.Fields = append(m.Fields, line[1:])\n\tcase RPL_BOUNCE:\n\tdefault:\n\t\t\/\/ fill in variable fields\n\t\tvar f string\n\t\tf, line, b := nextField(line)\n\t\tfor b {\n\t\t\tif f[0] == ':' {\n\t\t\t\tif f[1] == '-' {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\t\/\/ read until f[len(f)-1] is a semicolon\n\t\t\t\tfor b && len(f) > 0 && f[len(f)-1] != ':' {\n\t\t\t\t\tf, line, b = nextField(line)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tm.Fields = append(m.Fields, f)\n\t\t\t}\n\t\t\tf, line, b = nextField(line)\n\t\t}\n\t}\n\treturn\n}\n\nfunc getUname() string {\n\tuid := os.Getuid()\n\tuname := \"go\"\n\tu, e := user.LookupId(uid)\n\tif e != nil {\n\t\tlog.Print(\"WARNING: user.Lookupid: \", e)\n\t} else {\n\t\tuname = u.Username\n\t}\n\treturn uname\n}\n\nfunc getHostname() string {\n\tn, e := os.Hostname()\n\tif e != nil {\n\t\tlog.Print(\"WARNING: os.Hostname: \", e)\n\t\tn = \"*\"\n\t}\n\treturn n\n}\n\nfunc getServername() string {\n\treturn \"*\"\n}\n\nfunc Connect(serverName, nick, realName string) (Client, error) {\n\tconn, err := Dial(serverName)\n\tif err != nil {\n\t\treturn conn, err\n\t}\n\n\tconn.Pass(\"notmeaningfull\")\n\tconn.User(getUname(), getHostname(), getServername(), realName)\n\tconn.Nick(nick)\n\treturn conn, nil\n}\n\n\/\/ Low-level method to connect to server - normal clients should not need this.\n\/\/ Use Connect() instead.\nfunc Dial(server string) (conn Client, err error) {\n\tnconn, err := net.Dial(\"tcp\", server)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tconn.connection = nconn\n\tconn.serverName = server\n\tconn.server = make(chan ServerMessage, serverMsgBufSize)\n\tconn.out = conn.connection\n\n\t\/\/ spawn the connection reader\n\tgo func() {\n\t\tr, err := bufio.NewReaderSize(conn.connection, maxlinesize)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tline, beFalse, err := r.ReadLine()\n\t\tfor err == nil && !beFalse {\n\t\t\tconn.server <- parseServerMessage(string(line))\n\t\t\tline, beFalse, err = r.ReadLine()\n\t\t}\n\t\tif beFalse {\n\t\t\tlog.Fatal(\"Line too long\")\n\t\t} else {\n\t\t\tlog.Fatal(err) \/\/ TODO handle me better\n\t\t}\n\t}()\n\treturn\n}\n\nfunc (c Client) Pass(pass string) {\n\tfmt.Fprintf(c.out, \"PASS %s\\n\", pass)\n}\n\n\/\/ Low-level method to send USER command - normal clients should not need this\nfunc (c Client) User(user, host, server, name string) {\n\tfmt.Fprintf(c.out, \"USER %s %s %s :%s\\n\", user, host, server, name)\n}\n\n\/\/ Low-level method to send NICK command - normal clients should not need this.\nfunc (c Client) Nick(nick string) {\n\tfmt.Fprintf(c.out, \"NICK %s\\n\", nick)\n\tc.nick = nick \/\/ TODO fix possible race condition\n}\n\n\/\/ Listen for messages coming in and return them on the returned channel. Also\n\/\/ handles low-level information from the server correctly, making information\n\/\/ available in the Client object as appropriate.\nfunc (c Client) Listen() (<-chan Message) {\n\tch := make(chan Message)\n\thandleMessage := func(sm ServerMessage) {\n\t\tswitch sm.Code {\n\t\tcase \"NOTICE\":\n\t\t\tch <- Message{\n\t\t\t\tKind: MSG_NOTICE,\n\t\t\t\tFrom: sm.From,\n\t\t\t\tTo: sm.To,\n\t\t\t\tText: sm.Raw[1:],\n\t\t\t}\n\t\tcase \"PRIVMSG\":\n\t\t\tch <- Message{\n\t\t\t\tKind: MSG_PRIVMSG,\n\t\t\t\tFrom: sm.From,\n\t\t\t\tTo: sm.To,\n\t\t\t\tText: sm.Raw[1:],\n\t\t\t}\n\t\tcase RPL_MOTD:\n\t\tcase RPL_MOTDSTART:\n\t\tcase RPL_ENDOFMOTD:\n\t\tcase RPL_WELCOME:\n\t\tcase RPL_YOURHOST:\n\t\tcase RPL_CREATED:\n\t\tcase RPL_MYINFO:\n\t\tcase RPL_BOUNCE:\n\t\tcase RPL_LUSERCLIENT:\n\t\tcase RPL_LUSEROP:\n\t\tcase RPL_LUSERUNKNOWN:\n\t\tcase RPL_LUSERCHANNELS:\n\t\tcase RPL_LUSERME:\n\t\tcase \"MODE\":\n\t\tdefault:\n\t\t\tlog.Printf(\"Unhandled message %s\\t\\t%s\", sm.Code, sm.Full)\n\t\t}\n\t}\n\tgo func() {\n\t\tfor {\n\t\t\tvar m ServerMessage\n\t\t\tselect {\n\t\t\t\/\/ TODO allow input on stdin\n\t\t\tcase m = <-c.server:\n\t\t\t\thandleMessage(m)\n\t\t\t}\n\t\t}\n\t}()\n\treturn ch\n}\n\n\/\/ Join the specified channel\nfunc (c Client) Join(ch string) {\n\t\/\/ TODO don't join until 001 is received\n\tlog.Print(\"JOIN \", ch)\n\tfmt.Fprintf(c.out, \"JOIN %s\\n\", ch)\n}\n\n\/\/ Join the specified channels\nfunc (c Client) JoinChannels(chs []string) {\n\tfor _, ch := range chs {\n\t\tc.Join(ch)\n\t}\n}\n\n\/\/ Leave the specified channel\nfunc (c Client) Part(ch string) {\n\tlog.Print(\"PART \", ch)\n\tfmt.Fprintf(c.out, \"PART %s\\n\", ch)\n}\n\nfunc (c Client) Quit(msg string) {\n\tlog.Print(\"QUIT :\", msg)\n\tfmt.Fprintf(c.out, \"QUIT :%s\\n\", msg)\n}\n\nfunc (c Client) PrivMsg(to, msg string) {\n\tfmt.Fprintf(c.out, \"PRIVMSG %s :%s\\n\", to, msg)\n}\n<commit_msg>Handling PINGs appropriately<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"os\"\n\t\"os\/user\"\n\t\"strings\"\n)\n\n\/\/ Size of internal buffer for reading. 4kB should be enough to cover even the\n\/\/ poorest implementation's idea of a longest permissible line.\nconst maxlinesize = 4096\n\n\/\/ Maximum length of a line we output - anything else will be wrapped\nconst lineLength = 80\n\n\/\/ Size of the ServerMessage buffer\nconst serverMsgBufSize = 10\n\n\/\/ A message sent by the server\ntype ServerMessage struct {\n\tFrom string\n\tCode string\n\tTo string\n\n\t\/\/ The raw content of the message, excluding From, Code, and To\n\tRaw string\n\n\t\/\/ The full faw content of the message\n\tFull string\n\n\t\/\/ All other fields.\n\tFields []string\n}\n\ntype Client struct {\n\tconnection net.Conn\n\tserverName string\n\tnick string\n\tserver chan ServerMessage\n\tout io.Writer\n}\n\nfunc nextField(line string) (string, string, bool) {\n\tfs := strings.SplitN(line, \" \", 2)\n\tif len(fs) > 1 {\n\t\treturn fs[0], fs[1], true\n\t}\n\treturn fs[0], \"\", false\n}\n\nfunc parseServerMessage(line string) (m ServerMessage) {\n\tm.Full = line\n\tm.From, line, _ = nextField(line)\n\tif m.From[0] != ':' {\n\t\tm.Code = m.From\n\t\tm.From = \"\"\n\t} else {\n\t\tm.From = m.From[1:]\n\t\tm.Code, line, _ = nextField(line)\n\t}\n\tm.To, line, _ = nextField(line)\n\tm.Raw = line\n\tm.Fields = make([]string, 0)\n\n\tswitch m.Code {\n\tcase \"NOTICE\":\n\tcase \"PRIVMSG\":\n\t\tm.Fields = append(m.Fields, line[1:])\n\tcase RPL_BOUNCE:\n\tdefault:\n\t\t\/\/ fill in variable fields\n\t\tvar f string\n\t\tf, line, b := nextField(line)\n\t\tfor b {\n\t\t\tif f[0] == ':' {\n\t\t\t\tif f[1] == '-' {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\t\/\/ read until f[len(f)-1] is a semicolon\n\t\t\t\tfor b && len(f) > 0 && f[len(f)-1] != ':' {\n\t\t\t\t\tf, line, b = nextField(line)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tm.Fields = append(m.Fields, f)\n\t\t\t}\n\t\t\tf, line, b = nextField(line)\n\t\t}\n\t}\n\treturn\n}\n\nfunc getUname() string {\n\tuid := os.Getuid()\n\tuname := \"go\"\n\tu, e := user.LookupId(uid)\n\tif e != nil {\n\t\tlog.Print(\"WARNING: user.Lookupid: \", e)\n\t} else {\n\t\tuname = u.Username\n\t}\n\treturn uname\n}\n\nfunc getHostname() string {\n\tn, e := os.Hostname()\n\tif e != nil {\n\t\tlog.Print(\"WARNING: os.Hostname: \", e)\n\t\tn = \"*\"\n\t}\n\treturn n\n}\n\nfunc getServername() string {\n\treturn \"*\"\n}\n\nfunc Connect(serverName, nick, realName string) (Client, error) {\n\tconn, err := Dial(serverName)\n\tif err != nil {\n\t\treturn conn, err\n\t}\n\n\tconn.Pass(\"notmeaningfull\")\n\tconn.User(getUname(), getHostname(), getServername(), realName)\n\tconn.Nick(nick)\n\treturn conn, nil\n}\n\n\/\/ Low-level method to connect to server - normal clients should not need this.\n\/\/ Use Connect() instead.\nfunc Dial(server string) (conn Client, err error) {\n\tnconn, err := net.Dial(\"tcp\", server)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tconn.connection = nconn\n\tconn.serverName = server\n\tconn.server = make(chan ServerMessage, serverMsgBufSize)\n\tconn.out = conn.connection\n\n\t\/\/ spawn the connection reader\n\tgo func() {\n\t\tr, err := bufio.NewReaderSize(conn.connection, maxlinesize)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tline, beFalse, err := r.ReadLine()\n\t\tfor err == nil && !beFalse {\n\t\t\tconn.server <- parseServerMessage(string(line))\n\t\t\tline, beFalse, err = r.ReadLine()\n\t\t}\n\t\tif beFalse {\n\t\t\tlog.Fatal(\"Line too long\")\n\t\t} else {\n\t\t\tlog.Fatal(err) \/\/ TODO handle me better\n\t\t}\n\t}()\n\treturn\n}\n\nfunc (c Client) Pass(pass string) {\n\tfmt.Fprintf(c.out, \"PASS %s\\n\", pass)\n}\n\n\/\/ Low-level method to send USER command - normal clients should not need this\nfunc (c Client) User(user, host, server, name string) {\n\tfmt.Fprintf(c.out, \"USER %s %s %s :%s\\n\", user, host, server, name)\n}\n\n\/\/ Low-level method to send NICK command - normal clients should not need this.\nfunc (c Client) Nick(nick string) {\n\tfmt.Fprintf(c.out, \"NICK %s\\n\", nick)\n\tc.nick = nick \/\/ TODO fix possible race condition\n}\n\n\/\/ Listen for messages coming in and return them on the returned channel. Also\n\/\/ handles low-level information from the server correctly, making information\n\/\/ available in the Client object as appropriate.\nfunc (c Client) Listen() (<-chan Message) {\n\tch := make(chan Message)\n\thandleMessage := func(sm ServerMessage) {\n\t\tswitch sm.Code {\n\t\tcase \"NOTICE\":\n\t\t\tch <- Message{\n\t\t\t\tKind: MSG_NOTICE,\n\t\t\t\tFrom: sm.From,\n\t\t\t\tTo: sm.To,\n\t\t\t\tText: sm.Raw[1:],\n\t\t\t}\n\t\tcase \"PRIVMSG\":\n\t\t\tch <- Message{\n\t\t\t\tKind: MSG_PRIVMSG,\n\t\t\t\tFrom: sm.From,\n\t\t\t\tTo: sm.To,\n\t\t\t\tText: sm.Raw[1:],\n\t\t\t}\n\t\tcase \"PING\":\n\t\t\tfmt.Fprintf(c.out, \"PONG :%s\\n\", sm.To)\n\t\tcase RPL_MOTD:\n\t\tcase RPL_MOTDSTART:\n\t\tcase RPL_ENDOFMOTD:\n\t\tcase RPL_WELCOME:\n\t\tcase RPL_YOURHOST:\n\t\tcase RPL_CREATED:\n\t\tcase RPL_MYINFO:\n\t\tcase RPL_BOUNCE:\n\t\tcase RPL_LUSERCLIENT:\n\t\tcase RPL_LUSEROP:\n\t\tcase RPL_LUSERUNKNOWN:\n\t\tcase RPL_LUSERCHANNELS:\n\t\tcase RPL_LUSERME:\n\t\tcase \"MODE\":\n\t\tdefault:\n\t\t\tlog.Printf(\"Unhandled message %s\\t\\t%s\", sm.Code, sm.Full)\n\t\t}\n\t}\n\tgo func() {\n\t\tfor {\n\t\t\tvar m ServerMessage\n\t\t\tselect {\n\t\t\t\/\/ TODO allow input on stdin\n\t\t\tcase m = <-c.server:\n\t\t\t\thandleMessage(m)\n\t\t\t}\n\t\t}\n\t}()\n\treturn ch\n}\n\n\/\/ Join the specified channel\nfunc (c Client) Join(ch string) {\n\t\/\/ TODO don't join until 001 is received\n\tlog.Print(\"JOIN \", ch)\n\tfmt.Fprintf(c.out, \"JOIN %s\\n\", ch)\n}\n\n\/\/ Join the specified channels\nfunc (c Client) JoinChannels(chs []string) {\n\tfor _, ch := range chs {\n\t\tc.Join(ch)\n\t}\n}\n\n\/\/ Leave the specified channel\nfunc (c Client) Part(ch string) {\n\tlog.Print(\"PART \", ch)\n\tfmt.Fprintf(c.out, \"PART %s\\n\", ch)\n}\n\nfunc (c Client) Quit(msg string) {\n\tlog.Print(\"QUIT :\", msg)\n\tfmt.Fprintf(c.out, \"QUIT :%s\\n\", msg)\n}\n\nfunc (c Client) PrivMsg(to, msg string) {\n\tfmt.Fprintf(c.out, \"PRIVMSG %s :%s\\n\", to, msg)\n}\n<|endoftext|>"} {"text":"<commit_before>package twitch\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n)\n\ntype ircMessage struct {\n\tRaw string\n\tTags map[string]string\n\tSource ircMessageSource\n\tCommand string\n\tParams []string\n}\n\ntype ircMessageSource struct {\n\tNickname string\n\tUsername string\n\tHost string\n}\n\nfunc parseIRCMessage(line string) (*ircMessage, error) {\n\tmessage := ircMessage{\n\t\tRaw: line,\n\t\tTags: make(map[string]string),\n\t\tParams: []string{},\n\t}\n\n\tsplit := strings.Split(line, \" \")\n\tindex := 0\n\n\tif strings.HasPrefix(split[index], \"@\") {\n\t\tmessage.Tags = parseIRCTags(split[index])\n\t\tindex++\n\t}\n\n\tif index >= len(split) {\n\t\treturn &message, fmt.Errorf(\"parseIRCMessage: partial message\")\n\t}\n\n\tif strings.HasPrefix(split[index], \":\") {\n\t\tmessage.Source = *parseIRCMessageSource(split[index])\n\t\tindex++\n\t}\n\n\tif index >= len(split) {\n\t\treturn &message, fmt.Errorf(\"parseIRCMessage: no command\")\n\t}\n\n\tmessage.Command = split[index]\n\tindex++\n\n\tif index >= len(split) {\n\t\treturn &message, nil\n\t}\n\n\tvar params []string\n\tfor i, v := range split[index:] {\n\t\tif strings.HasPrefix(v, \":\") {\n\t\t\tv = strings.Join(split[index+i:], \" \")\n\t\t\tv = strings.TrimPrefix(v, \":\")\n\t\t\tparams = append(params, v)\n\t\t\tbreak\n\t\t}\n\n\t\tparams = append(params, v)\n\t}\n\n\tmessage.Params = params\n\n\treturn &message, nil\n}\n\nfunc parseIRCTags(rawTags string) map[string]string {\n\ttags := make(map[string]string)\n\n\trawTags = strings.TrimPrefix(rawTags, \"@\")\n\n\tfor _, tag := range strings.Split(rawTags, \";\") {\n\t\tif !strings.Contains(tag, \"=\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tpair := strings.SplitN(tag, \"=\", 2)\n\t\tkey := pair[0]\n\n\t\tvar value string\n\t\tif len(pair) == 2 {\n\t\t\tvalue = parseIRCTagValue(pair[1])\n\t\t}\n\n\t\ttags[key] = value\n\t}\n\n\treturn tags\n}\n\nvar escapeCharacters = map[string]string{\n\t\"\\\\s\": \" \",\n\t\"\\\\n\": \"\",\n\t\"\\\\:\": \";\",\n\t\"\\\\\\\\\": \"\\\\\",\n}\n\nfunc parseIRCTagValue(rawValue string) string {\n\tfor char, value := range escapeCharacters {\n\t\trawValue = strings.Replace(rawValue, char, value, -1)\n\t}\n\n\trawValue = strings.TrimSuffix(rawValue, \"\\\\\")\n\trawValue = strings.TrimSpace(rawValue)\n\n\treturn rawValue\n}\n\nfunc parseIRCMessageSource(rawSource string) *ircMessageSource {\n\tvar source ircMessageSource\n\n\trawSource = strings.TrimPrefix(rawSource, \":\")\n\n\tregex := regexp.MustCompile(`!|@`)\n\tsplit := regex.Split(rawSource, -1)\n\n\tif len(split) == 1 {\n\t\tsource.Host = split[0]\n\t} else {\n\t\tsource.Nickname = split[0]\n\t\tsource.Username = split[1]\n\t\tsource.Host = split[2]\n\t}\n\n\treturn &source\n}\n<commit_msg>Minor IRC parsing fixes<commit_after>package twitch\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n)\n\ntype ircMessage struct {\n\tRaw string\n\tTags map[string]string\n\tSource ircMessageSource\n\tCommand string\n\tParams []string\n}\n\ntype ircMessageSource struct {\n\tNickname string\n\tUsername string\n\tHost string\n}\n\nfunc parseIRCMessage(line string) (*ircMessage, error) {\n\tmessage := ircMessage{\n\t\tRaw: line,\n\t\tTags: make(map[string]string),\n\t\tParams: []string{},\n\t}\n\n\tsplit := strings.Split(line, \" \")\n\tindex := 0\n\n\tif strings.HasPrefix(split[index], \"@\") {\n\t\tmessage.Tags = parseIRCTags(split[index])\n\t\tindex++\n\t}\n\n\tif index >= len(split) {\n\t\treturn &message, fmt.Errorf(\"parseIRCMessage: partial message\")\n\t}\n\n\tif strings.HasPrefix(split[index], \":\") {\n\t\tmessage.Source = *parseIRCMessageSource(split[index])\n\t\tindex++\n\t}\n\n\tif index >= len(split) {\n\t\treturn &message, fmt.Errorf(\"parseIRCMessage: no command\")\n\t}\n\n\tmessage.Command = split[index]\n\tindex++\n\n\tif index >= len(split) {\n\t\treturn &message, nil\n\t}\n\n\tvar params []string\n\tfor i, v := range split[index:] {\n\t\tif strings.HasPrefix(v, \":\") {\n\t\t\tv = strings.Join(split[index+i:], \" \")\n\t\t\tv = strings.TrimPrefix(v, \":\")\n\t\t\tparams = append(params, v)\n\t\t\tbreak\n\t\t}\n\n\t\tparams = append(params, v)\n\t}\n\n\tmessage.Params = params\n\n\treturn &message, nil\n}\n\nfunc parseIRCTags(rawTags string) map[string]string {\n\ttags := make(map[string]string)\n\n\trawTags = strings.TrimPrefix(rawTags, \"@\")\n\n\tfor _, tag := range strings.Split(rawTags, \";\") {\n\t\tpair := strings.SplitN(tag, \"=\", 2)\n\t\tkey := pair[0]\n\n\t\tvar value string\n\t\tif len(pair) == 2 {\n\t\t\tvalue = parseIRCTagValue(pair[1])\n\t\t}\n\n\t\ttags[key] = value\n\t}\n\n\treturn tags\n}\n\nvar escapeCharacters = map[string]string{\n\t\"\\\\s\": \" \",\n\t\"\\\\n\": \"\",\n\t\"\\\\r\": \"\",\n\t\"\\\\:\": \";\",\n\t\"\\\\\\\\\": \"\\\\\",\n}\n\nfunc parseIRCTagValue(rawValue string) string {\n\tfor char, value := range escapeCharacters {\n\t\trawValue = strings.Replace(rawValue, char, value, -1)\n\t}\n\n\trawValue = strings.TrimSuffix(rawValue, \"\\\\\")\n\n\t\/\/ Some Twitch values can end with a trailing \\s\n\t\/\/ Example: \"system-msg=An\\sanonymous\\suser\\sgifted\\sa\\sTier\\s1\\ssub\\sto\\sTenureCalculator!\\s\"\n\trawValue = strings.TrimSpace(rawValue)\n\n\treturn rawValue\n}\n\nfunc parseIRCMessageSource(rawSource string) *ircMessageSource {\n\tvar source ircMessageSource\n\n\trawSource = strings.TrimPrefix(rawSource, \":\")\n\n\tregex := regexp.MustCompile(`!|@`)\n\tsplit := regex.Split(rawSource, -1)\n\n\tif len(split) == 1 {\n\t\tsource.Host = split[0]\n\t} else {\n\t\tsource.Nickname = split[0]\n\t\tsource.Username = split[1]\n\t\tsource.Host = split[2]\n\t}\n\n\treturn &source\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/golang\/glog\"\n\t_ \"github.com\/golang\/glog\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/pflag\"\n\n\t\"github.com\/open-policy-agent\/frameworks\/constraint\/pkg\/compiler\/regorewriter\"\n)\n\nvar rootCmd = &cobra.Command{\n\tUse: \"rewrite-compatibility\",\n\tShort: \"rewrite-compatibility is the constraint framework package path rewriter compatibility tool\",\n\tLong: `\nThe rewrite-compatibility exists to dump out the rego source as it would exist after a rewrite in\nthe constraint framework. The idea is to allow user visibility into what's going on as well as\nfacilitate developer debugging when making changes to the rewriter.\n\nExample usage for transforming the forseti-security\/policy-library constraints:\ngit clone git@github.com:forseti-security\/policy-library.git\ngo run .\/cmd\/rewrite-compatibility\/main.go \\\n --ct policy-library\/validator \\\n --lib policy-library\/lib \\\n --input .\/policy-library \\\n --output .\/policy-library-rewrite \\\n --pkgPrefix x.y.z \\\n --alsologtostderr\nopa test -v rewrite\/lib\/ rewrite\/validator\/\nmeld policy-library\/lib\/ rewrite\/lib\/\nmeld policy-library\/validator\/ rewrite\/validator\/\n`,\n\tRunE: rootCmdFn,\n}\n\nvar (\n\tcts []string\n\tlibs []string\n\tpkgPrefix string\n\toldRoot string\n\tnewRoot string\n)\n\nfunc init() {\n\trootCmd.Flags().StringSliceVar(\n\t\t&cts, \"ct\", nil, \"The rego for the constraint template body\")\n\trootCmd.Flags().StringSliceVar(\n\t\t&libs, \"lib\", nil, \"Libs associated with the rego, can be file or directory\")\n\trootCmd.Flags().StringVar(\n\t\t&pkgPrefix, \"pkgPrefix\", \"\", \"The new prefix to insert into the package path\")\n\trootCmd.Flags().StringVarP(\n\t\t&oldRoot, \"input\", \"i\", \"\", \"The input 'root' directory, outputs will be\")\n\trootCmd.Flags().StringVarP(\n\t\t&newRoot, \"output\", \"o\", \"\", \"The output 'root' directory, outputs will be\")\n\tpflag.CommandLine.AddGoFlagSet(flag.CommandLine)\n}\n\nfunc compileSrcs(\n\tcts []string,\n\tlibs []string,\n\tnewPkgPrefix string,\n\toldRoot string,\n\tnewRoot string) error {\n\tif len(cts) == 0 && len(libs) == 0 {\n\t\treturn errors.Errorf(\"must specify --ct or --lib or both\")\n\t}\n\tif (oldRoot == \"\") != (newRoot == \"\") {\n\t\treturn errors.Errorf(\"--input and --output must be empty or non empty together\")\n\t}\n\n\tregoRewriter, err := regorewriter.New(\n\t\tregorewriter.NewPackagePrefixer(newPkgPrefix),\n\t\t[]string{\n\t\t\t\"data.lib\",\n\t\t},\n\t\t[]string{\n\t\t\t\"data.inventory\",\n\t\t},\n\t)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, ct := range cts {\n\t\tif err := regoRewriter.AddBaseFromFs(ct); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor _, libPath := range libs {\n\t\tif err := regoRewriter.AddLibFromFs(libPath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tsrcs, err := regoRewriter.Rewrite()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif oldRoot != \"\" && newRoot != \"\" {\n\t\tif err := srcs.Reparent(oldRoot, newRoot); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := srcs.Write(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tglog.Info(\"SUCCESS!\")\n\treturn nil\n}\n\nfunc rootCmdFn(cmd *cobra.Command, args []string) error {\n\treturn compileSrcs(cts, libs, pkgPrefix, oldRoot, newRoot)\n}\n\nfunc main() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Printf(\"%+v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n<commit_msg>Fix import path.<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com\/golang\/glog\"\n\t_ \"github.com\/golang\/glog\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/pflag\"\n\n\t\"github.com\/open-policy-agent\/frameworks\/constraint\/pkg\/regorewriter\"\n)\n\nvar rootCmd = &cobra.Command{\n\tUse: \"rewrite-compatibility\",\n\tShort: \"rewrite-compatibility is the constraint framework package path rewriter compatibility tool\",\n\tLong: `\nThe rewrite-compatibility exists to dump out the rego source as it would exist after a rewrite in\nthe constraint framework. The idea is to allow user visibility into what's going on as well as\nfacilitate developer debugging when making changes to the rewriter.\n\nExample usage for transforming the forseti-security\/policy-library constraints:\ngit clone git@github.com:forseti-security\/policy-library.git\ngo run .\/cmd\/rewrite-compatibility\/main.go \\\n --ct policy-library\/validator \\\n --lib policy-library\/lib \\\n --input .\/policy-library \\\n --output .\/policy-library-rewrite \\\n --pkgPrefix x.y.z \\\n --alsologtostderr\nopa test -v rewrite\/lib\/ rewrite\/validator\/\nmeld policy-library\/lib\/ rewrite\/lib\/\nmeld policy-library\/validator\/ rewrite\/validator\/\n`,\n\tRunE: rootCmdFn,\n}\n\nvar (\n\tcts []string\n\tlibs []string\n\tpkgPrefix string\n\toldRoot string\n\tnewRoot string\n)\n\nfunc init() {\n\trootCmd.Flags().StringSliceVar(\n\t\t&cts, \"ct\", nil, \"The rego for the constraint template body\")\n\trootCmd.Flags().StringSliceVar(\n\t\t&libs, \"lib\", nil, \"Libs associated with the rego, can be file or directory\")\n\trootCmd.Flags().StringVar(\n\t\t&pkgPrefix, \"pkgPrefix\", \"\", \"The new prefix to insert into the package path\")\n\trootCmd.Flags().StringVarP(\n\t\t&oldRoot, \"input\", \"i\", \"\", \"The input 'root' directory, outputs will be\")\n\trootCmd.Flags().StringVarP(\n\t\t&newRoot, \"output\", \"o\", \"\", \"The output 'root' directory, outputs will be\")\n\tpflag.CommandLine.AddGoFlagSet(flag.CommandLine)\n}\n\nfunc compileSrcs(\n\tcts []string,\n\tlibs []string,\n\tnewPkgPrefix string,\n\toldRoot string,\n\tnewRoot string) error {\n\tif len(cts) == 0 && len(libs) == 0 {\n\t\treturn errors.Errorf(\"must specify --ct or --lib or both\")\n\t}\n\tif (oldRoot == \"\") != (newRoot == \"\") {\n\t\treturn errors.Errorf(\"--input and --output must be empty or non empty together\")\n\t}\n\n\tregoRewriter, err := regorewriter.New(\n\t\tregorewriter.NewPackagePrefixer(newPkgPrefix),\n\t\t[]string{\n\t\t\t\"data.lib\",\n\t\t},\n\t\t[]string{\n\t\t\t\"data.inventory\",\n\t\t},\n\t)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, ct := range cts {\n\t\tif err := regoRewriter.AddBaseFromFs(ct); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfor _, libPath := range libs {\n\t\tif err := regoRewriter.AddLibFromFs(libPath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tsrcs, err := regoRewriter.Rewrite()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif oldRoot != \"\" && newRoot != \"\" {\n\t\tif err := srcs.Reparent(oldRoot, newRoot); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := srcs.Write(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tglog.Info(\"SUCCESS!\")\n\treturn nil\n}\n\nfunc rootCmdFn(cmd *cobra.Command, args []string) error {\n\treturn compileSrcs(cts, libs, pkgPrefix, oldRoot, newRoot)\n}\n\nfunc main() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Printf(\"%+v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 Gary Burd\n\/\/ Copyright 2013 Unknown\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\n\npackage doc\n\nimport (\n\t\"errors\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/Unknwon\/gowalker\/utils\"\n)\n\nvar (\n\tgoogleRepoRe = regexp.MustCompile(`id=\"checkoutcmd\">(hg|git|svn)`)\n\tgoogleRevisionRe = regexp.MustCompile(`<h2>(?:[^ ]+ - )?Revision *([^:]+):`)\n\tgoogleEtagRe = regexp.MustCompile(`^(hg|git|svn)-`)\n\tgoogleFileRe = regexp.MustCompile(`<li><a href=\"([^\"\/]+)\"`)\n\tgoogleDirRe = regexp.MustCompile(`<li><a href=\"([^\".]+)\"`)\n\tgooglePattern = regexp.MustCompile(`^code\\.google\\.com\/p\/(?P<repo>[a-z0-9\\-]+)(:?\\.(?P<subrepo>[a-z0-9\\-]+))?(?P<dir>\/[a-z0-9A-Z_.\\-\/]+)?$`)\n)\n\nfunc getStandardDoc(client *http.Client, importPath string, savedEtag string) (pdoc *Package, err error) {\n\tp, err := httpGetBytes(client, \"http:\/\/go.googlecode.com\/hg-history\/release\/src\/pkg\/\"+importPath+\"\/\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Check revision tag.\n\tvar etag string\n\tif m := googleRevisionRe.FindSubmatch(p); m == nil {\n\t\treturn nil, errors.New(\"doc.getStandardDoc(): Could not find revision for \" + importPath)\n\t} else {\n\t\tetag = string(m[1])\n\t\tif etag == savedEtag {\n\t\t\treturn nil, errNotModified\n\t\t}\n\t}\n\n\t\/\/ Get source file data.\n\tfiles := make([]*source, 0, 5)\n\tfor _, m := range googleFileRe.FindAllSubmatch(p, -1) {\n\t\tfname := strings.Split(string(m[1]), \"?\")[0]\n\t\tif utils.IsDocFile(fname) {\n\t\t\tfiles = append(files, &source{\n\t\t\t\tname: fname,\n\t\t\t\tbrowseURL: \"http:\/\/code.google.com\/p\/go\/source\/browse\/src\/pkg\/\" + importPath + \"\/\" + fname + \"?name=release\",\n\t\t\t\trawURL: \"http:\/\/go.googlecode.com\/hg-history\/release\/src\/pkg\/\" + importPath + \"\/\" + fname,\n\t\t\t})\n\t\t}\n\t}\n\n\tdirs := make([]string, 0, 5)\n\t\/\/ Get subdirectories.\n\tfor _, m := range googleDirRe.FindAllSubmatch(p, -1) {\n\t\tdirName := strings.Split(string(m[1]), \"?\")[0]\n\t\t\/\/ Make sure we get directories.\n\t\tif strings.HasSuffix(dirName, \"\/\") {\n\t\t\tdirs = append(dirs, strings.Replace(dirName, \"\/\", \"\", -1))\n\t\t}\n\t}\n\n\tif len(files) == 0 && len(dirs) == 0 {\n\t\treturn nil, NotFoundError{\"Directory tree does not contain Go files and subdirs.\"}\n\t}\n\n\t\/\/ Fetch file from VCS.\n\tif err := fetchFiles(client, files, nil); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Start generating data.\n\tw := &walker{\n\t\tlineFmt: \"#%d\",\n\t\tpdoc: &Package{\n\t\t\tImportPath: importPath,\n\t\t\tProjectName: \"Go\",\n\t\t\tEtag: etag,\n\t\t\tDirs: dirs,\n\t\t},\n\t}\n\treturn w.build(files)\n}\n\nfunc getGoogleDoc(client *http.Client, match map[string]string, savedEtag string) (*Package, error) {\n\tsetupGoogleMatch(match)\n\tif m := googleEtagRe.FindStringSubmatch(savedEtag); m != nil {\n\t\tmatch[\"vcs\"] = m[1]\n\t} else if err := getGoogleVCS(client, match); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Scrape the repo browser to find the project revision and individual Go files.\n\tp, err := httpGetBytes(client, expand(\"http:\/\/{subrepo}{dot}{repo}.googlecode.com\/{vcs}{dir}\/\", match), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Check revision tag.\n\tvar etag string\n\tif m := googleRevisionRe.FindSubmatch(p); m == nil {\n\t\treturn nil, errors.New(\"doc.getGoogleDoc(): Could not find revision for \" + match[\"importPath\"])\n\t} else {\n\t\tetag = expand(\"{vcs}-{0}\", match, string(m[1]))\n\t\tif etag == savedEtag {\n\t\t\treturn nil, errNotModified\n\t\t}\n\t}\n\n\t\/\/ Get source file data.\n\tfiles := make([]*source, 0, 5)\n\tfor _, m := range googleFileRe.FindAllSubmatch(p, -1) {\n\t\tfname := string(m[1])\n\t\tif utils.IsDocFile(fname) {\n\t\t\tfiles = append(files, &source{\n\t\t\t\tname: fname,\n\t\t\t\tbrowseURL: expand(\"http:\/\/code.google.com\/p\/{repo}\/source\/browse{dir}\/{0}{query}\", match, fname),\n\t\t\t\trawURL: expand(\"http:\/\/{subrepo}{dot}{repo}.googlecode.com\/{vcs}{dir}\/{0}\", match, fname),\n\t\t\t})\n\t\t}\n\t}\n\n\tif len(files) == 0 {\n\t\treturn nil, NotFoundError{\"Directory tree does not contain Go files.\"}\n\t}\n\n\tdirs := make([]string, 0, 3)\n\t\/\/ Get subdirectories.\n\tfor _, m := range googleDirRe.FindAllSubmatch(p, -1) {\n\t\tdirName := strings.Split(string(m[1]), \"?\")[0]\n\t\tdirs = append(dirs, strings.Replace(dirName, \"\/\", \"\", -1))\n\t}\n\n\t\/\/ Fetch file from VCS.\n\tif err := fetchFiles(client, files, nil); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Start generating data.\n\tw := &walker{\n\t\tlineFmt: \"#%d\",\n\t\tpdoc: &Package{\n\t\t\tImportPath: match[\"importPath\"],\n\t\t\tProjectName: expand(\"{repo}{dot}{subrepo}\", match),\n\t\t\tEtag: etag,\n\t\t\tDirs: dirs,\n\t\t},\n\t}\n\treturn w.build(files)\n}\n\nfunc setupGoogleMatch(match map[string]string) {\n\tif s := match[\"subrepo\"]; s != \"\" {\n\t\tmatch[\"dot\"] = \".\"\n\t\tmatch[\"query\"] = \"?repo=\" + s\n\t} else {\n\t\tmatch[\"dot\"] = \"\"\n\t\tmatch[\"query\"] = \"\"\n\t}\n}\n\nfunc getGoogleVCS(client *http.Client, match map[string]string) error {\n\t\/\/ Scrape the HTML project page to find the VCS.\n\tp, err := httpGetBytes(client, expand(\"http:\/\/code.google.com\/p\/{repo}\/source\/checkout\", match), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm := googleRepoRe.FindSubmatch(p)\n\tif m == nil {\n\t\treturn NotFoundError{\"Could not VCS on Google Code project page.\"}\n\t}\n\tmatch[\"vcs\"] = string(m[1])\n\treturn nil\n}\n\n\/\/ expand replaces {k} in template with match[k] or subs[atoi(k)] if k is not in match.\nfunc expand(template string, match map[string]string, subs ...string) string {\n\tvar p []byte\n\tvar i int\n\tfor {\n\t\ti = strings.Index(template, \"{\")\n\t\tif i < 0 {\n\t\t\tbreak\n\t\t}\n\t\tp = append(p, template[:i]...)\n\t\ttemplate = template[i+1:]\n\t\ti = strings.Index(template, \"}\")\n\t\tif s, ok := match[template[:i]]; ok {\n\t\t\tp = append(p, s...)\n\t\t} else {\n\t\t\tj, _ := strconv.Atoi(template[:i])\n\t\t\tp = append(p, subs[j]...)\n\t\t}\n\t\ttemplate = template[i+1:]\n\t}\n\tp = append(p, template...)\n\treturn string(p)\n}\n<commit_msg>bug fix<commit_after>\/\/ Copyright 2011 Gary Burd\n\/\/ Copyright 2013 Unknown\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n\/\/ not use this file except in compliance with the License. You may obtain\n\/\/ a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n\/\/ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n\/\/ License for the specific language governing permissions and limitations\n\/\/ under the License.\n\npackage doc\n\nimport (\n\t\"errors\"\n\t\"net\/http\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/Unknwon\/gowalker\/utils\"\n)\n\nvar (\n\tgoogleRepoRe = regexp.MustCompile(`id=\"checkoutcmd\">(hg|git|svn)`)\n\tgoogleRevisionRe = regexp.MustCompile(`<h2>(?:[^ ]+ - )?Revision *([^:]+):`)\n\tgoogleEtagRe = regexp.MustCompile(`^(hg|git|svn)-`)\n\tgoogleFileRe = regexp.MustCompile(`<li><a href=\"([^\"\/]+)\"`)\n\tgoogleDirRe = regexp.MustCompile(`<li><a href=\"([^\".]+)\"`)\n\tgooglePattern = regexp.MustCompile(`^code\\.google\\.com\/p\/(?P<repo>[a-z0-9\\-]+)(:?\\.(?P<subrepo>[a-z0-9\\-]+))?(?P<dir>\/[a-z0-9A-Z_.\\-\/]+)?$`)\n)\n\nfunc getStandardDoc(client *http.Client, importPath string, savedEtag string) (pdoc *Package, err error) {\n\tp, err := httpGetBytes(client, \"http:\/\/go.googlecode.com\/hg-history\/release\/src\/pkg\/\"+importPath+\"\/\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Check revision tag.\n\tvar etag string\n\tif m := googleRevisionRe.FindSubmatch(p); m == nil {\n\t\treturn nil, errors.New(\"doc.getStandardDoc(): Could not find revision for \" + importPath)\n\t} else {\n\t\tetag = string(m[1])\n\t\tif etag == savedEtag {\n\t\t\treturn nil, errNotModified\n\t\t}\n\t}\n\n\t\/\/ Get source file data.\n\tfiles := make([]*source, 0, 5)\n\tfor _, m := range googleFileRe.FindAllSubmatch(p, -1) {\n\t\tfname := strings.Split(string(m[1]), \"?\")[0]\n\t\tif utils.IsDocFile(fname) {\n\t\t\tfiles = append(files, &source{\n\t\t\t\tname: fname,\n\t\t\t\tbrowseURL: \"http:\/\/code.google.com\/p\/go\/source\/browse\/src\/pkg\/\" + importPath + \"\/\" + fname + \"?name=release\",\n\t\t\t\trawURL: \"http:\/\/go.googlecode.com\/hg-history\/release\/src\/pkg\/\" + importPath + \"\/\" + fname,\n\t\t\t})\n\t\t}\n\t}\n\n\tdirs := make([]string, 0, 5)\n\t\/\/ Get subdirectories.\n\tfor _, m := range googleDirRe.FindAllSubmatch(p, -1) {\n\t\tdirName := strings.Split(string(m[1]), \"?\")[0]\n\t\t\/\/ Make sure we get directories.\n\t\tif strings.HasSuffix(dirName, \"\/\") {\n\t\t\tdirs = append(dirs, strings.Replace(dirName, \"\/\", \"\", -1))\n\t\t}\n\t}\n\n\tif len(files) == 0 && len(dirs) == 0 {\n\t\treturn nil, NotFoundError{\"Directory tree does not contain Go files and subdirs.\"}\n\t}\n\n\t\/\/ Fetch file from VCS.\n\tif err := fetchFiles(client, files, nil); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Start generating data.\n\tw := &walker{\n\t\tlineFmt: \"#%d\",\n\t\tpdoc: &Package{\n\t\t\tImportPath: importPath,\n\t\t\tProjectName: \"Go\",\n\t\t\tEtag: etag,\n\t\t\tDirs: dirs,\n\t\t},\n\t}\n\treturn w.build(files)\n}\n\nfunc getGoogleDoc(client *http.Client, match map[string]string, savedEtag string) (*Package, error) {\n\tsetupGoogleMatch(match)\n\tif m := googleEtagRe.FindStringSubmatch(savedEtag); m != nil {\n\t\tmatch[\"vcs\"] = m[1]\n\t} else if err := getGoogleVCS(client, match); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Scrape the repo browser to find the project revision and individual Go files.\n\tp, err := httpGetBytes(client, expand(\"http:\/\/{subrepo}{dot}{repo}.googlecode.com\/{vcs}{dir}\/\", match), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Check revision tag.\n\tvar etag string\n\tif m := googleRevisionRe.FindSubmatch(p); m == nil {\n\t\treturn nil, errors.New(\"doc.getGoogleDoc(): Could not find revision for \" + match[\"importPath\"])\n\t} else {\n\t\tetag = expand(\"{vcs}-{0}\", match, string(m[1]))\n\t\tif etag == savedEtag {\n\t\t\treturn nil, errNotModified\n\t\t}\n\t}\n\n\t\/\/ Get source file data.\n\tfiles := make([]*source, 0, 5)\n\tfor _, m := range googleFileRe.FindAllSubmatch(p, -1) {\n\t\tfname := string(m[1])\n\t\tif utils.IsDocFile(fname) {\n\t\t\tfiles = append(files, &source{\n\t\t\t\tname: fname,\n\t\t\t\tbrowseURL: expand(\"http:\/\/code.google.com\/p\/{repo}\/source\/browse{dir}\/{0}{query}\", match, fname),\n\t\t\t\trawURL: expand(\"http:\/\/{subrepo}{dot}{repo}.googlecode.com\/{vcs}{dir}\/{0}\", match, fname),\n\t\t\t})\n\t\t}\n\t}\n\n\tif len(files) == 0 {\n\t\treturn nil, NotFoundError{\"Directory tree does not contain Go files.\"}\n\t}\n\n\tdirs := make([]string, 0, 3)\n\t\/\/ Get subdirectories.\n\tfor _, m := range googleDirRe.FindAllSubmatch(p, -1) {\n\t\tdirName := strings.Split(string(m[1]), \"?\")[0]\n\t\tif strings.HasSuffix(dirName, \"\/\") {\n\t\t\tdirs = append(dirs, strings.Replace(dirName, \"\/\", \"\", -1))\n\t\t}\n\t}\n\n\t\/\/ Fetch file from VCS.\n\tif err := fetchFiles(client, files, nil); err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Start generating data.\n\tw := &walker{\n\t\tlineFmt: \"#%d\",\n\t\tpdoc: &Package{\n\t\t\tImportPath: match[\"importPath\"],\n\t\t\tProjectName: expand(\"{repo}{dot}{subrepo}\", match),\n\t\t\tEtag: etag,\n\t\t\tDirs: dirs,\n\t\t},\n\t}\n\treturn w.build(files)\n}\n\nfunc setupGoogleMatch(match map[string]string) {\n\tif s := match[\"subrepo\"]; s != \"\" {\n\t\tmatch[\"dot\"] = \".\"\n\t\tmatch[\"query\"] = \"?repo=\" + s\n\t} else {\n\t\tmatch[\"dot\"] = \"\"\n\t\tmatch[\"query\"] = \"\"\n\t}\n}\n\nfunc getGoogleVCS(client *http.Client, match map[string]string) error {\n\t\/\/ Scrape the HTML project page to find the VCS.\n\tp, err := httpGetBytes(client, expand(\"http:\/\/code.google.com\/p\/{repo}\/source\/checkout\", match), nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm := googleRepoRe.FindSubmatch(p)\n\tif m == nil {\n\t\treturn NotFoundError{\"Could not VCS on Google Code project page.\"}\n\t}\n\tmatch[\"vcs\"] = string(m[1])\n\treturn nil\n}\n\n\/\/ expand replaces {k} in template with match[k] or subs[atoi(k)] if k is not in match.\nfunc expand(template string, match map[string]string, subs ...string) string {\n\tvar p []byte\n\tvar i int\n\tfor {\n\t\ti = strings.Index(template, \"{\")\n\t\tif i < 0 {\n\t\t\tbreak\n\t\t}\n\t\tp = append(p, template[:i]...)\n\t\ttemplate = template[i+1:]\n\t\ti = strings.Index(template, \"}\")\n\t\tif s, ok := match[template[:i]]; ok {\n\t\t\tp = append(p, s...)\n\t\t} else {\n\t\t\tj, _ := strconv.Atoi(template[:i])\n\t\t\tp = append(p, subs[j]...)\n\t\t}\n\t\ttemplate = template[i+1:]\n\t}\n\tp = append(p, template...)\n\treturn string(p)\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2020 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage magnum\n\nimport (\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"gopkg.in\/gcfg.v1\"\n\tnetutil \"k8s.io\/apimachinery\/pkg\/util\/net\"\n\t\"k8s.io\/autoscaler\/cluster-autoscaler\/cloudprovider\/magnum\/gophercloud\"\n\t\"k8s.io\/autoscaler\/cluster-autoscaler\/cloudprovider\/magnum\/gophercloud\/openstack\"\n\t\"k8s.io\/autoscaler\/cluster-autoscaler\/cloudprovider\/magnum\/gophercloud\/openstack\/containerinfra\/v1\/clusters\"\n\t\"k8s.io\/autoscaler\/cluster-autoscaler\/cloudprovider\/magnum\/gophercloud\/openstack\/identity\/v3\/extensions\/trusts\"\n\t\"k8s.io\/autoscaler\/cluster-autoscaler\/config\"\n\t\"k8s.io\/autoscaler\/cluster-autoscaler\/version\"\n\tcertutil \"k8s.io\/client-go\/util\/cert\"\n\tklog \"k8s.io\/klog\/v2\"\n)\n\n\/\/ These Opts types are for parsing an OpenStack cloud-config file.\n\/\/ The definitions are taken from cloud-provider-openstack.\n\n\/\/ MyDuration is the encoding.TextUnmarshaler interface for time.Duration\ntype MyDuration struct {\n\ttime.Duration\n}\n\n\/\/ UnmarshalText is used to convert from text to Duration\nfunc (d *MyDuration) UnmarshalText(text []byte) error {\n\tres, err := time.ParseDuration(string(text))\n\tif err != nil {\n\t\treturn err\n\t}\n\td.Duration = res\n\treturn nil\n}\n\n\/\/ LoadBalancerOpts have the options to talk to Neutron LBaaSV2 or Octavia\ntype LoadBalancerOpts struct {\n\tLBVersion string `gcfg:\"lb-version\"` \/\/ overrides autodetection. Only support v2.\n\tUseOctavia bool `gcfg:\"use-octavia\"` \/\/ uses Octavia V2 service catalog endpoint\n\tSubnetID string `gcfg:\"subnet-id\"` \/\/ overrides autodetection.\n\tFloatingNetworkID string `gcfg:\"floating-network-id\"` \/\/ If specified, will create floating ip for loadbalancer, or do not create floating ip.\n\tLBMethod string `gcfg:\"lb-method\"` \/\/ default to ROUND_ROBIN.\n\tLBProvider string `gcfg:\"lb-provider\"`\n\tCreateMonitor bool `gcfg:\"create-monitor\"`\n\tMonitorDelay MyDuration `gcfg:\"monitor-delay\"`\n\tMonitorTimeout MyDuration `gcfg:\"monitor-timeout\"`\n\tMonitorMaxRetries uint `gcfg:\"monitor-max-retries\"`\n\tManageSecurityGroups bool `gcfg:\"manage-security-groups\"`\n\tNodeSecurityGroupIDs []string \/\/ Do not specify, get it automatically when enable manage-security-groups. TODO(FengyunPan): move it into cache\n}\n\n\/\/ BlockStorageOpts is used to talk to Cinder service\ntype BlockStorageOpts struct {\n\tBSVersion string `gcfg:\"bs-version\"` \/\/ overrides autodetection. v1 or v2. Defaults to auto\n\tTrustDevicePath bool `gcfg:\"trust-device-path\"` \/\/ See Issue #33128\n\tIgnoreVolumeAZ bool `gcfg:\"ignore-volume-az\"`\n\tNodeVolumeAttachLimit int `gcfg:\"node-volume-attach-limit\"` \/\/ override volume attach limit for Cinder. Default is : 256\n}\n\n\/\/ RouterOpts is used for Neutron routes\ntype RouterOpts struct {\n\tRouterID string `gcfg:\"router-id\"` \/\/ required\n}\n\n\/\/ MetadataOpts is used for configuring how to talk to metadata service or config drive\ntype MetadataOpts struct {\n\tSearchOrder string `gcfg:\"search-order\"`\n\tRequestTimeout MyDuration `gcfg:\"request-timeout\"`\n}\n\n\/\/ Config is used to read and store information from the cloud configuration file\n\/\/\n\/\/ Taken from kubernetes\/pkg\/cloudprovider\/providers\/openstack\/openstack.go\n\/\/ LoadBalancer, BlockStorage, Route, Metadata are not needed for the autoscaler,\n\/\/ but are kept so that if a cloud-config file with those sections is provided\n\/\/ then the parsing will not fail.\ntype Config struct {\n\tGlobal struct {\n\t\tAuthURL string `gcfg:\"auth-url\"`\n\t\tUsername string\n\t\tUserID string `gcfg:\"user-id\"`\n\t\tPassword string\n\t\tTenantID string `gcfg:\"tenant-id\"`\n\t\tTenantName string `gcfg:\"tenant-name\"`\n\t\tTrustID string `gcfg:\"trust-id\"`\n\t\tDomainID string `gcfg:\"domain-id\"`\n\t\tDomainName string `gcfg:\"domain-name\"`\n\t\tRegion string\n\t\tCAFile string `gcfg:\"ca-file\"`\n\t\tSecretName string `gcfg:\"secret-name\"`\n\t\tSecretNamespace string `gcfg:\"secret-namespace\"`\n\t}\n\tLoadBalancer LoadBalancerOpts\n\tBlockStorage BlockStorageOpts\n\tRoute RouterOpts\n\tMetadata MetadataOpts\n}\n\nfunc toAuthOptsExt(cfg Config) trusts.AuthOptsExt {\n\topts := gophercloud.AuthOptions{\n\t\tIdentityEndpoint: cfg.Global.AuthURL,\n\t\tUsername: cfg.Global.Username,\n\t\tUserID: cfg.Global.UserID,\n\t\tPassword: cfg.Global.Password,\n\t\tTenantID: cfg.Global.TenantID,\n\t\tTenantName: cfg.Global.TenantName,\n\t\tDomainID: cfg.Global.DomainID,\n\t\tDomainName: cfg.Global.DomainName,\n\n\t\t\/\/ Persistent service, so we need to be able to renew tokens.\n\t\tAllowReauth: true,\n\t}\n\n\treturn trusts.AuthOptsExt{\n\t\tTrustID: cfg.Global.TrustID,\n\t\tAuthOptionsBuilder: &opts,\n\t}\n}\n\n\/\/ readConfig parses an OpenStack cloud-config file from an io.Reader.\nfunc readConfig(configReader io.Reader) (*Config, error) {\n\tvar cfg Config\n\tif configReader != nil {\n\t\tif err := gcfg.ReadInto(&cfg, configReader); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"couldn't read cloud config: %v\", err)\n\t\t}\n\t}\n\treturn &cfg, nil\n}\n\n\/\/ createProviderClient creates and authenticates a gophercloud provider client.\nfunc createProviderClient(cfg *Config, opts config.AutoscalingOptions) (*gophercloud.ProviderClient, error) {\n\tif opts.ClusterName == \"\" {\n\t\treturn nil, errors.New(\"the cluster-name parameter must be set\")\n\t}\n\n\tauthOpts := toAuthOptsExt(*cfg)\n\n\tprovider, err := openstack.NewClient(cfg.Global.AuthURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not create openstack client: %v\", err)\n\t}\n\n\tuserAgent := gophercloud.UserAgent{}\n\tuserAgent.Prepend(fmt.Sprintf(\"cluster-autoscaler\/%s\", version.ClusterAutoscalerVersion))\n\tuserAgent.Prepend(fmt.Sprintf(\"cluster\/%s\", opts.ClusterName))\n\tprovider.UserAgent = userAgent\n\n\tklog.V(5).Infof(\"Using user-agent %q\", userAgent.Join())\n\n\tif cfg.Global.CAFile != \"\" {\n\t\troots, err := certutil.NewPool(cfg.Global.CAFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tconfig := &tls.Config{}\n\t\tconfig.RootCAs = roots\n\t\tprovider.HTTPClient.Transport = netutil.SetOldTransportDefaults(&http.Transport{TLSClientConfig: config})\n\t}\n\n\terr = openstack.AuthenticateV3(provider, authOpts, gophercloud.EndpointOpts{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not authenticate client: %v\", err)\n\t}\n\n\treturn provider, nil\n}\n\n\/\/ createClusterClient creates a gophercloud service client for communicating with Magnum.\nfunc createClusterClient(cfg *Config, provider *gophercloud.ProviderClient, opts config.AutoscalingOptions) (*gophercloud.ServiceClient, error) {\n\tclusterClient, err := openstack.NewContainerInfraV1(provider, gophercloud.EndpointOpts{Type: \"container-infra\", Name: \"magnum\", Region: cfg.Global.Region})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not create container-infra client: %v\", err)\n\t}\n\treturn clusterClient, nil\n}\n\n\/\/ checkClusterUUID replaces a cluster name with the UUID, if the name was given in the parameters\n\/\/ to the cluster autoscaler.\nfunc checkClusterUUID(provider *gophercloud.ProviderClient, clusterClient *gophercloud.ServiceClient, opts config.AutoscalingOptions) error {\n\tcluster, err := clusters.Get(clusterClient, opts.ClusterName).Extract()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to access cluster %q: %v\", opts.ClusterName, err)\n\t}\n\n\t\/\/ Prefer to use the cluster UUID if the cluster name was given in the parameters\n\tif opts.ClusterName != cluster.UUID {\n\t\tklog.V(2).Infof(\"Using cluster UUID %q instead of name %q\", cluster.UUID, opts.ClusterName)\n\t\topts.ClusterName = cluster.UUID\n\n\t\t\/\/ Need to remake user-agent with UUID instead of name\n\t\tuserAgent := gophercloud.UserAgent{}\n\t\tuserAgent.Prepend(fmt.Sprintf(\"cluster-autoscaler\/%s\", version.ClusterAutoscalerVersion))\n\t\tuserAgent.Prepend(fmt.Sprintf(\"cluster\/%s\", opts.ClusterName))\n\t\tprovider.UserAgent = userAgent\n\t\tklog.V(5).Infof(\"Using updated user-agent %q\", userAgent.Join())\n\t}\n\n\treturn nil\n}\n\n\/\/ createHeatClient creates a gophercloud service client for communicating with Heat.\nfunc createHeatClient(cfg *Config, provider *gophercloud.ProviderClient, opts config.AutoscalingOptions) (*gophercloud.ServiceClient, error) {\n\theatClient, err := openstack.NewOrchestrationV1(provider, gophercloud.EndpointOpts{Type: \"orchestration\", Name: \"heat\", Region: cfg.Global.Region})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not create orchestration client: %v\", err)\n\t}\n\n\treturn heatClient, nil\n}\n<commit_msg>magnum: add an option to create insecure TLS connections<commit_after>\/*\nCopyright 2020 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage magnum\n\nimport (\n\t\"crypto\/tls\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"gopkg.in\/gcfg.v1\"\n\tnetutil \"k8s.io\/apimachinery\/pkg\/util\/net\"\n\t\"k8s.io\/autoscaler\/cluster-autoscaler\/cloudprovider\/magnum\/gophercloud\"\n\t\"k8s.io\/autoscaler\/cluster-autoscaler\/cloudprovider\/magnum\/gophercloud\/openstack\"\n\t\"k8s.io\/autoscaler\/cluster-autoscaler\/cloudprovider\/magnum\/gophercloud\/openstack\/containerinfra\/v1\/clusters\"\n\t\"k8s.io\/autoscaler\/cluster-autoscaler\/cloudprovider\/magnum\/gophercloud\/openstack\/identity\/v3\/extensions\/trusts\"\n\t\"k8s.io\/autoscaler\/cluster-autoscaler\/config\"\n\t\"k8s.io\/autoscaler\/cluster-autoscaler\/version\"\n\tcertutil \"k8s.io\/client-go\/util\/cert\"\n\tklog \"k8s.io\/klog\/v2\"\n)\n\n\/\/ These Opts types are for parsing an OpenStack cloud-config file.\n\/\/ The definitions are taken from cloud-provider-openstack.\n\n\/\/ MyDuration is the encoding.TextUnmarshaler interface for time.Duration\ntype MyDuration struct {\n\ttime.Duration\n}\n\n\/\/ UnmarshalText is used to convert from text to Duration\nfunc (d *MyDuration) UnmarshalText(text []byte) error {\n\tres, err := time.ParseDuration(string(text))\n\tif err != nil {\n\t\treturn err\n\t}\n\td.Duration = res\n\treturn nil\n}\n\n\/\/ LoadBalancerOpts have the options to talk to Neutron LBaaSV2 or Octavia\ntype LoadBalancerOpts struct {\n\tLBVersion string `gcfg:\"lb-version\"` \/\/ overrides autodetection. Only support v2.\n\tUseOctavia bool `gcfg:\"use-octavia\"` \/\/ uses Octavia V2 service catalog endpoint\n\tSubnetID string `gcfg:\"subnet-id\"` \/\/ overrides autodetection.\n\tFloatingNetworkID string `gcfg:\"floating-network-id\"` \/\/ If specified, will create floating ip for loadbalancer, or do not create floating ip.\n\tLBMethod string `gcfg:\"lb-method\"` \/\/ default to ROUND_ROBIN.\n\tLBProvider string `gcfg:\"lb-provider\"`\n\tCreateMonitor bool `gcfg:\"create-monitor\"`\n\tMonitorDelay MyDuration `gcfg:\"monitor-delay\"`\n\tMonitorTimeout MyDuration `gcfg:\"monitor-timeout\"`\n\tMonitorMaxRetries uint `gcfg:\"monitor-max-retries\"`\n\tManageSecurityGroups bool `gcfg:\"manage-security-groups\"`\n\tNodeSecurityGroupIDs []string \/\/ Do not specify, get it automatically when enable manage-security-groups. TODO(FengyunPan): move it into cache\n}\n\n\/\/ BlockStorageOpts is used to talk to Cinder service\ntype BlockStorageOpts struct {\n\tBSVersion string `gcfg:\"bs-version\"` \/\/ overrides autodetection. v1 or v2. Defaults to auto\n\tTrustDevicePath bool `gcfg:\"trust-device-path\"` \/\/ See Issue #33128\n\tIgnoreVolumeAZ bool `gcfg:\"ignore-volume-az\"`\n\tNodeVolumeAttachLimit int `gcfg:\"node-volume-attach-limit\"` \/\/ override volume attach limit for Cinder. Default is : 256\n}\n\n\/\/ RouterOpts is used for Neutron routes\ntype RouterOpts struct {\n\tRouterID string `gcfg:\"router-id\"` \/\/ required\n}\n\n\/\/ MetadataOpts is used for configuring how to talk to metadata service or config drive\ntype MetadataOpts struct {\n\tSearchOrder string `gcfg:\"search-order\"`\n\tRequestTimeout MyDuration `gcfg:\"request-timeout\"`\n}\n\n\/\/ Config is used to read and store information from the cloud configuration file\n\/\/\n\/\/ Taken from kubernetes\/pkg\/cloudprovider\/providers\/openstack\/openstack.go\n\/\/ LoadBalancer, BlockStorage, Route, Metadata are not needed for the autoscaler,\n\/\/ but are kept so that if a cloud-config file with those sections is provided\n\/\/ then the parsing will not fail.\ntype Config struct {\n\tGlobal struct {\n\t\tAuthURL string `gcfg:\"auth-url\"`\n\t\tUsername string\n\t\tUserID string `gcfg:\"user-id\"`\n\t\tPassword string\n\t\tTenantID string `gcfg:\"tenant-id\"`\n\t\tTenantName string `gcfg:\"tenant-name\"`\n\t\tTrustID string `gcfg:\"trust-id\"`\n\t\tDomainID string `gcfg:\"domain-id\"`\n\t\tDomainName string `gcfg:\"domain-name\"`\n\t\tRegion string\n\t\tCAFile string `gcfg:\"ca-file\"`\n\t\tTLSInsecure string `gcfg:\"tls-insecure\"`\n\t\tSecretName string `gcfg:\"secret-name\"`\n\t\tSecretNamespace string `gcfg:\"secret-namespace\"`\n\t}\n\tLoadBalancer LoadBalancerOpts\n\tBlockStorage BlockStorageOpts\n\tRoute RouterOpts\n\tMetadata MetadataOpts\n}\n\nfunc toAuthOptsExt(cfg Config) trusts.AuthOptsExt {\n\topts := gophercloud.AuthOptions{\n\t\tIdentityEndpoint: cfg.Global.AuthURL,\n\t\tUsername: cfg.Global.Username,\n\t\tUserID: cfg.Global.UserID,\n\t\tPassword: cfg.Global.Password,\n\t\tTenantID: cfg.Global.TenantID,\n\t\tTenantName: cfg.Global.TenantName,\n\t\tDomainID: cfg.Global.DomainID,\n\t\tDomainName: cfg.Global.DomainName,\n\n\t\t\/\/ Persistent service, so we need to be able to renew tokens.\n\t\tAllowReauth: true,\n\t}\n\n\treturn trusts.AuthOptsExt{\n\t\tTrustID: cfg.Global.TrustID,\n\t\tAuthOptionsBuilder: &opts,\n\t}\n}\n\n\/\/ readConfig parses an OpenStack cloud-config file from an io.Reader.\nfunc readConfig(configReader io.Reader) (*Config, error) {\n\tvar cfg Config\n\tif configReader != nil {\n\t\tif err := gcfg.ReadInto(&cfg, configReader); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"couldn't read cloud config: %v\", err)\n\t\t}\n\t}\n\treturn &cfg, nil\n}\n\n\/\/ createProviderClient creates and authenticates a gophercloud provider client.\nfunc createProviderClient(cfg *Config, opts config.AutoscalingOptions) (*gophercloud.ProviderClient, error) {\n\tif opts.ClusterName == \"\" {\n\t\treturn nil, errors.New(\"the cluster-name parameter must be set\")\n\t}\n\n\tauthOpts := toAuthOptsExt(*cfg)\n\n\tprovider, err := openstack.NewClient(cfg.Global.AuthURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not create openstack client: %v\", err)\n\t}\n\n\tuserAgent := gophercloud.UserAgent{}\n\tuserAgent.Prepend(fmt.Sprintf(\"cluster-autoscaler\/%s\", version.ClusterAutoscalerVersion))\n\tuserAgent.Prepend(fmt.Sprintf(\"cluster\/%s\", opts.ClusterName))\n\tprovider.UserAgent = userAgent\n\n\tklog.V(5).Infof(\"Using user-agent %q\", userAgent.Join())\n\n\tconfig := &tls.Config{}\n\tconfig.InsecureSkipVerify = cfg.Global.TLSInsecure == \"true\"\n\tif cfg.Global.CAFile != \"\" {\n\t\troots, err := certutil.NewPool(cfg.Global.CAFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tconfig.RootCAs = roots\n\t}\n\tprovider.HTTPClient.Transport = netutil.SetOldTransportDefaults(&http.Transport{TLSClientConfig: config})\n\n\terr = openstack.AuthenticateV3(provider, authOpts, gophercloud.EndpointOpts{})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not authenticate client: %v\", err)\n\t}\n\n\treturn provider, nil\n}\n\n\/\/ createClusterClient creates a gophercloud service client for communicating with Magnum.\nfunc createClusterClient(cfg *Config, provider *gophercloud.ProviderClient, opts config.AutoscalingOptions) (*gophercloud.ServiceClient, error) {\n\tclusterClient, err := openstack.NewContainerInfraV1(provider, gophercloud.EndpointOpts{Type: \"container-infra\", Name: \"magnum\", Region: cfg.Global.Region})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not create container-infra client: %v\", err)\n\t}\n\treturn clusterClient, nil\n}\n\n\/\/ checkClusterUUID replaces a cluster name with the UUID, if the name was given in the parameters\n\/\/ to the cluster autoscaler.\nfunc checkClusterUUID(provider *gophercloud.ProviderClient, clusterClient *gophercloud.ServiceClient, opts config.AutoscalingOptions) error {\n\tcluster, err := clusters.Get(clusterClient, opts.ClusterName).Extract()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to access cluster %q: %v\", opts.ClusterName, err)\n\t}\n\n\t\/\/ Prefer to use the cluster UUID if the cluster name was given in the parameters\n\tif opts.ClusterName != cluster.UUID {\n\t\tklog.V(2).Infof(\"Using cluster UUID %q instead of name %q\", cluster.UUID, opts.ClusterName)\n\t\topts.ClusterName = cluster.UUID\n\n\t\t\/\/ Need to remake user-agent with UUID instead of name\n\t\tuserAgent := gophercloud.UserAgent{}\n\t\tuserAgent.Prepend(fmt.Sprintf(\"cluster-autoscaler\/%s\", version.ClusterAutoscalerVersion))\n\t\tuserAgent.Prepend(fmt.Sprintf(\"cluster\/%s\", opts.ClusterName))\n\t\tprovider.UserAgent = userAgent\n\t\tklog.V(5).Infof(\"Using updated user-agent %q\", userAgent.Join())\n\t}\n\n\treturn nil\n}\n\n\/\/ createHeatClient creates a gophercloud service client for communicating with Heat.\nfunc createHeatClient(cfg *Config, provider *gophercloud.ProviderClient, opts config.AutoscalingOptions) (*gophercloud.ServiceClient, error) {\n\theatClient, err := openstack.NewOrchestrationV1(provider, gophercloud.EndpointOpts{Type: \"orchestration\", Name: \"heat\", Region: cfg.Global.Region})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not create orchestration client: %v\", err)\n\t}\n\n\treturn heatClient, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The prometheus-operator Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage e2e\n\nimport (\n\t\"flag\"\n\t\"log\"\n\t\"os\"\n\t\"testing\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\n\t\"github.com\/coreos\/prometheus-operator\/pkg\/k8sutil\"\n\toperatorFramework \"github.com\/coreos\/prometheus-operator\/test\/framework\"\n)\n\nvar framework *operatorFramework.Framework\n\n\/\/ Basic set of e2e tests for the operator:\n\/\/ - config reload (with and without external url)\n\nfunc TestMain(m *testing.M) {\n\tkubeconfig := flag.String(\"kubeconfig\", \"\", \"kube config path, e.g. $HOME\/.kube\/config\")\n\topImage := flag.String(\"operator-image\", \"\", \"operator image, e.g. quay.io\/coreos\/prometheus-operator\")\n\tns := flag.String(\"namespace\", \"prometheus-operator-e2e-tests\", \"e2e test namespace\")\n\tflag.Parse()\n\n\tvar (\n\t\terr error\n\t\tcode int = 0\n\t)\n\n\tif framework, err = operatorFramework.New(*ns, *kubeconfig, *opImage); err != nil {\n\t\tlog.Printf(\"failed to setup framework: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\terr = k8sutil.WaitForCRDReady(framework.MonClientV1.Prometheuses(v1.NamespaceAll).List)\n\tif err != nil {\n\t\tlog.Printf(\"Prometheus CRD not ready: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\terr = k8sutil.WaitForCRDReady(framework.MonClientV1.ServiceMonitors(v1.NamespaceAll).List)\n\tif err != nil {\n\t\tlog.Printf(\"ServiceMonitor CRD not ready: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\terr = k8sutil.WaitForCRDReady(framework.MonClientV1.Alertmanagers(v1.NamespaceAll).List)\n\tif err != nil {\n\t\tlog.Printf(\"Alertmanagers CRD not ready: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tdefer func() {\n\t\tif err := framework.Teardown(); err != nil {\n\t\t\tlog.Printf(\"failed to teardown framework: %v\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tos.Exit(code)\n\t}()\n\n\tcode = m.Run()\n}\n<commit_msg>e2e: Print Kubernetes events on test failures<commit_after>\/\/ Copyright 2016 The prometheus-operator Authors\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage e2e\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"testing\"\n\n\t\"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\n\t\"github.com\/coreos\/prometheus-operator\/pkg\/k8sutil\"\n\toperatorFramework \"github.com\/coreos\/prometheus-operator\/test\/framework\"\n)\n\nvar framework *operatorFramework.Framework\n\n\/\/ Basic set of e2e tests for the operator:\n\/\/ - config reload (with and without external url)\n\nfunc TestMain(m *testing.M) {\n\tkubeconfig := flag.String(\"kubeconfig\", \"\", \"kube config path, e.g. $HOME\/.kube\/config\")\n\topImage := flag.String(\"operator-image\", \"\", \"operator image, e.g. quay.io\/coreos\/prometheus-operator\")\n\tns := flag.String(\"namespace\", \"prometheus-operator-e2e-tests\", \"e2e test namespace\")\n\tflag.Parse()\n\n\tvar (\n\t\terr error\n\t\tcode int = 0\n\t)\n\n\tif framework, err = operatorFramework.New(*ns, *kubeconfig, *opImage); err != nil {\n\t\tlog.Printf(\"failed to setup framework: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\terr = k8sutil.WaitForCRDReady(framework.MonClientV1.Prometheuses(v1.NamespaceAll).List)\n\tif err != nil {\n\t\tlog.Printf(\"Prometheus CRD not ready: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\terr = k8sutil.WaitForCRDReady(framework.MonClientV1.ServiceMonitors(v1.NamespaceAll).List)\n\tif err != nil {\n\t\tlog.Printf(\"ServiceMonitor CRD not ready: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\terr = k8sutil.WaitForCRDReady(framework.MonClientV1.Alertmanagers(v1.NamespaceAll).List)\n\tif err != nil {\n\t\tlog.Printf(\"Alertmanagers CRD not ready: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tdefer func() {\n\t\tif err := framework.Teardown(); err != nil {\n\t\t\tlog.Printf(\"failed to teardown framework: %v\\n\", err)\n\t\t\tcode = 1\n\t\t}\n\n\t\tif code != 0 {\n\t\t\tif err := printKubernetesEvents(); err != nil {\n\t\t\t\tlog.Printf(\"failed to print events: %v\", err)\n\t\t\t}\n\t\t}\n\n\t\tos.Exit(code)\n\t}()\n\n\tcode = m.Run()\n}\n\nfunc printKubernetesEvents() error {\n\tfmt.Println(\"Printing Kubernetes events for debugging:\")\n\tevents, err := framework.KubeClient.CoreV1().Events(\"\").List(metav1.ListOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tif events != nil {\n\t\tfor _, e := range events.Items {\n\t\t\tfmt.Printf(\"FirstTimestamp: '%v', Reason: '%v', Message: '%v'\\n\", e.FirstTimestamp, e.Reason, e.Message)\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2015 Serge Gebhardt. All rights reserved.\n\/\/\n\/\/ Use of this source code is governed by the ISC\n\/\/ license that can be found in the LICENSE file.\n\npackage acd\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestAccount_getInfo(t *testing.T) {\n\tr := *NewMockResponseOkString(`{ \"termsOfUse\": \"1.0.0\", \"status\": \"ACTIVE\" }`)\n\tc := NewMockClient(r)\n\n\tinfo, _, err := c.Account.GetInfo()\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"ACTIVE\", *info.Status)\n\tassert.Equal(t, \"1.0.0\", *info.TermsOfUse)\n}\n<commit_msg>Test account.GetAccount<commit_after>\/\/ Copyright (c) 2015 Serge Gebhardt. All rights reserved.\n\/\/\n\/\/ Use of this source code is governed by the ISC\n\/\/ license that can be found in the LICENSE file.\n\npackage acd\n\nimport (\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestAccount_getInfo(t *testing.T) {\n\tr := *NewMockResponseOkString(`{ \"termsOfUse\": \"1.0.0\", \"status\": \"ACTIVE\" }`)\n\tc := NewMockClient(r)\n\n\tinfo, _, err := c.Account.GetInfo()\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"ACTIVE\", *info.Status)\n\tassert.Equal(t, \"1.0.0\", *info.TermsOfUse)\n}\n\nfunc TestAccount_getQuota(t *testing.T) {\n\tr := *NewMockResponseOkString(`\n{\n\"quota\": 5368709120,\n\"lastCalculated\": \"2014-08-13T23:01:47.479Z\",\n\"available\": 4069088896\n}\n\t`)\n\tc := NewMockClient(r)\n\n\tquota, _, err := c.Account.GetQuota()\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"2014-08-13 23:01:47.479 +0000 UTC\", quota.LastCalculated.String())\n\tassert.Equal(t, uint64(5368709120), *quota.Quota)\n\tassert.Equal(t, uint64(4069088896), *quota.Available)\n}\n<|endoftext|>"} {"text":"<commit_before>package action\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\n\t\"github.com\/fatih\/color\"\n\t\"github.com\/justwatchcom\/gopass\/store\"\n\t\"github.com\/justwatchcom\/gopass\/tree\/simple\"\n\t\"github.com\/urfave\/cli\"\n)\n\n\/\/ MountRemove removes an existing mount\nfunc (s *Action) MountRemove(c *cli.Context) error {\n\tif len(c.Args()) != 1 {\n\t\treturn fmt.Errorf(\"usage: gopass mount remove [alias]\")\n\t}\n\tif err := s.Store.RemoveMount(c.Args()[0]); err != nil {\n\t\tcolor.Yellow(\"Failed to remove mount: %s\", err)\n\t}\n\tif err := s.Store.Config().Save(); err != nil {\n\t\treturn err\n\t}\n\n\tcolor.Green(\"Password Store %s umounted\", c.Args()[0])\n\treturn nil\n}\n\n\/\/ MountsPrint prints all existing mounts\nfunc (s *Action) MountsPrint(c *cli.Context) error {\n\tif len(s.Store.Mounts()) < 1 {\n\t\tfmt.Println(\"No mounts\")\n\t\treturn nil\n\t}\n\troot := simple.New(color.GreenString(fmt.Sprintf(\"gopass (%s)\", s.Store.Path())))\n\tmounts := s.Store.Mounts()\n\tmps := s.Store.MountPoints()\n\tsort.Sort(store.ByPathLen(mps))\n\tfor _, alias := range mps {\n\t\tpath := mounts[alias]\n\t\tif err := root.AddMount(alias, path); err != nil {\n\t\t\tfmt.Printf(\"Failed to add mount: %s\\n\", err)\n\t\t}\n\t}\n\tfmt.Fprintln(color.Output, root.Format(0))\n\treturn nil\n}\n\n\/\/ MountsComplete will print a list of existings mount points for bash\n\/\/ completion\nfunc (s *Action) MountsComplete(*cli.Context) {\n\tfor alias := range s.Store.Mounts() {\n\t\tfmt.Println(alias)\n\t}\n}\n\n\/\/ MountAdd adds a new mount\nfunc (s *Action) MountAdd(c *cli.Context) error {\n\tif len(c.Args()) != 2 {\n\t\treturn fmt.Errorf(\"usage: gopass mount add [alias] [local path]\")\n\t}\n\tkeys := make([]string, 0, 1)\n\tif k := c.String(\"init\"); k != \"\" {\n\t\tkeys = append(keys, k)\n\t}\n\tif err := s.Store.AddMount(c.Args()[0], c.Args()[1], keys...); err != nil {\n\t\treturn err\n\t}\n\tif err := s.Store.Config().Save(); err != nil {\n\t\treturn err\n\t}\n\n\tcolor.Green(\"Mounted %s as %s\", c.Args()[0], c.Args()[1])\n\treturn nil\n}\n<commit_msg>Check for already existing entries, when add mount (#180)<commit_after>package action\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\n\t\"github.com\/fatih\/color\"\n\t\"github.com\/justwatchcom\/gopass\/store\"\n\t\"github.com\/justwatchcom\/gopass\/tree\/simple\"\n\t\"github.com\/urfave\/cli\"\n)\n\n\/\/ MountRemove removes an existing mount\nfunc (s *Action) MountRemove(c *cli.Context) error {\n\tif len(c.Args()) != 1 {\n\t\treturn fmt.Errorf(\"usage: gopass mount remove [alias]\")\n\t}\n\tif err := s.Store.RemoveMount(c.Args()[0]); err != nil {\n\t\tcolor.Yellow(\"Failed to remove mount: %s\", err)\n\t}\n\tif err := s.Store.Config().Save(); err != nil {\n\t\treturn err\n\t}\n\n\tcolor.Green(\"Password Store %s umounted\", c.Args()[0])\n\treturn nil\n}\n\n\/\/ MountsPrint prints all existing mounts\nfunc (s *Action) MountsPrint(c *cli.Context) error {\n\tif len(s.Store.Mounts()) < 1 {\n\t\tfmt.Println(\"No mounts\")\n\t\treturn nil\n\t}\n\troot := simple.New(color.GreenString(fmt.Sprintf(\"gopass (%s)\", s.Store.Path())))\n\tmounts := s.Store.Mounts()\n\tmps := s.Store.MountPoints()\n\tsort.Sort(store.ByPathLen(mps))\n\tfor _, alias := range mps {\n\t\tpath := mounts[alias]\n\t\tif err := root.AddMount(alias, path); err != nil {\n\t\t\tfmt.Printf(\"Failed to add mount: %s\\n\", err)\n\t\t}\n\t}\n\tfmt.Fprintln(color.Output, root.Format(0))\n\treturn nil\n}\n\n\/\/ MountsComplete will print a list of existings mount points for bash\n\/\/ completion\nfunc (s *Action) MountsComplete(*cli.Context) {\n\tfor alias := range s.Store.Mounts() {\n\t\tfmt.Println(alias)\n\t}\n}\n\n\/\/ MountAdd adds a new mount\nfunc (s *Action) MountAdd(c *cli.Context) error {\n\tif len(c.Args()) != 2 {\n\t\treturn fmt.Errorf(\"usage: gopass mount add [alias] [local path]\")\n\t}\n\tkeys := make([]string, 0, 1)\n\tif k := c.String(\"init\"); k != \"\" {\n\t\tkeys = append(keys, k)\n\t}\n\talias, localPath := c.Args()[0], c.Args()[1]\n\tif s.Store.Exists(alias) {\n\t\tfmt.Printf(color.YellowString(\"WARNING: shadowing %s entry\\n\"), alias)\n\t}\n\tif err := s.Store.AddMount(alias, localPath, keys...); err != nil {\n\t\treturn err\n\t}\n\tif err := s.Store.Config().Save(); err != nil {\n\t\treturn err\n\t}\n\n\tcolor.Green(\"Mounted %s as %s\", c.Args()[0], c.Args()[1])\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package actions\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com\/agupta666\/elf\/utils\"\n)\n\n\/\/ FileAction action represents actions which responds with the contents of a file\ntype FileAction struct {\n\tPatternHolder\n\tPath string\n}\n\nfunc (fa *FileAction) String() string {\n\treturn fmt.Sprintf(\"@[Path=%s]\", fa.Path)\n}\n\n\/\/ SetPattern sets the matched pattern in the action\nfunc (fa *FileAction) SetPattern(p string) {}\n\n\/\/ NewFileAction creates a new FileAction\nfunc NewFileAction(p string) (*FileAction, error) {\n\treturn &FileAction{Path: p}, nil\n}\n\n\/\/ Exec executes a file action\nfunc (fa *FileAction) Exec(w http.ResponseWriter, r *http.Request) error {\n\treader, err := os.Open(fa.Path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\temitMimeType(fa.Path, w)\n\t_, err = io.Copy(w, reader)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc emitMimeType(fname string, w http.ResponseWriter) {\n\text := path.Ext(fname)\n\tmimeType := utils.TypeByExtension(ext)\n\tw.Header().Set(\"Content-Type\", mimeType)\n}\n<commit_msg>error handling in file action<commit_after>package actions\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com\/agupta666\/elf\/utils\"\n)\n\n\/\/ FileAction action represents actions which responds with the contents of a file\ntype FileAction struct {\n\tPatternHolder\n\tPath string\n}\n\nfunc (fa *FileAction) String() string {\n\treturn fmt.Sprintf(\"@[Path=%s]\", fa.Path)\n}\n\n\/\/ SetPattern sets the matched pattern in the action\nfunc (fa *FileAction) SetPattern(p string) {}\n\n\/\/ NewFileAction creates a new FileAction\nfunc NewFileAction(p string) (*FileAction, error) {\n\tfi, err := os.Lstat(p)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !fi.Mode().IsRegular() {\n\t\treturn nil, errors.New(\"not a regular file\")\n\t}\n\n\treturn &FileAction{Path: p}, nil\n}\n\n\/\/ Exec executes a file action\nfunc (fa *FileAction) Exec(w http.ResponseWriter, r *http.Request) error {\n\treader, err := os.Open(fa.Path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\temitMimeType(fa.Path, w)\n\t_, err = io.Copy(w, reader)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc emitMimeType(fname string, w http.ResponseWriter) {\n\text := path.Ext(fname)\n\tmimeType := utils.TypeByExtension(ext)\n\tw.Header().Set(\"Content-Type\", mimeType)\n}\n<|endoftext|>"} {"text":"<commit_before>package operation\n\ntype Manifest struct {\n\tApplications []AppManifest `yaml:\"applications\"`\n}\n\ntype AppManifest struct {\n\tName string `yaml:\"name\"`\n\tBuildpacks []string `yaml:\"buildpacks,omitempty\"`\n\tCommand string `yaml:\"command,omitempty\"`\n\tDiskQuota string `yaml:\"disk_quota,omitempty\"`\n\tDocker struct {\n\t\tImage string `yaml:\"image,omitempty\"`\n\t\tUsername string `yaml:\"username,omitempty\"`\n\t} `yaml:\"docker,omitempty\"`\n\tEnv map[string]string `yaml:\"env,omitempty\"`\n\tHealthCheckType string `yaml:\"health-check-type,omitempty\"`\n\tHealthCheckHTTPEndpoint string `yaml:\"health-check-http-endpoint,omitempty\"`\n\tInstances int `yaml:\"instances,omitempty\"`\n\tLogRateLimit string `yaml:\"log-rate-limit,omitempty\"`\n\tMemory string `yaml:\"memory,omitempty\"`\n\tNoRoute bool `yaml:\"no-route,omitempty\"`\n\tRoutes []AppManifestRoutes `yaml:\"routes,omitempty\"`\n\tServices []string `yaml:\"services,omitempty\"`\n\tStack string `yaml:\"stack,omitempty\"`\n\tTimeout int `yaml:\"timeout,omitempty\"`\n}\n\ntype AppManifestRoutes struct {\n\tRoute string `yaml:\"route,omitempty\"`\n}\n<commit_msg>Add struct for Docker manifest<commit_after>package operation\n\ntype Manifest struct {\n\tApplications []AppManifest `yaml:\"applications\"`\n}\n\ntype AppManifest struct {\n\tName string `yaml:\"name\"`\n\tBuildpacks []string `yaml:\"buildpacks,omitempty\"`\n\tCommand string `yaml:\"command,omitempty\"`\n\tDiskQuota string `yaml:\"disk_quota,omitempty\"`\n\tDocker *AppManifestDocker `yaml:\"docker,omitempty\"`\n\tEnv map[string]string `yaml:\"env,omitempty\"`\n\tHealthCheckType string `yaml:\"health-check-type,omitempty\"`\n\tHealthCheckHTTPEndpoint string `yaml:\"health-check-http-endpoint,omitempty\"`\n\tInstances int `yaml:\"instances,omitempty\"`\n\tLogRateLimit string `yaml:\"log-rate-limit,omitempty\"`\n\tMemory string `yaml:\"memory,omitempty\"`\n\tNoRoute bool `yaml:\"no-route,omitempty\"`\n\tRoutes []AppManifestRoutes `yaml:\"routes,omitempty\"`\n\tServices []string `yaml:\"services,omitempty\"`\n\tStack string `yaml:\"stack,omitempty\"`\n\tTimeout int `yaml:\"timeout,omitempty\"`\n}\n\ntype AppManifestDocker struct {\n\tImage string `yaml:\"image,omitempty\"`\n\tUsername string `yaml:\"username,omitempty\"`\n}\n\ntype AppManifestRoutes struct {\n\tRoute string `yaml:\"route,omitempty\"`\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage oracle_test\n\n\/\/ This file defines a test framework for oracle queries.\n\/\/\n\/\/ The files beneath testdata\/src\/main contain Go programs containing\n\/\/ query annotations of the form:\n\/\/\n\/\/ @verb id \"select\"\n\/\/\n\/\/ where verb is the query mode (e.g. \"callers\"), id is a unique name\n\/\/ for this query, and \"select\" is a regular expression matching the\n\/\/ substring of the current line that is the query's input selection.\n\/\/\n\/\/ The expected output for each query is provided in the accompanying\n\/\/ .golden file.\n\/\/\n\/\/ (Location information is not included because it's too fragile to\n\/\/ display as text. TODO(adonovan): think about how we can test its\n\/\/ correctness, since it is critical information.)\n\/\/\n\/\/ Run this test with:\n\/\/ \t% go test code.google.com\/p\/go.tools\/oracle -update\n\/\/ to update the golden files.\n\n\/\/ TODO(adonovan): improve coverage:\n\/\/ - output of @callgraph is nondeterministic.\n\/\/ - as are lists of labels.\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"go\/build\"\n\t\"go\/parser\"\n\t\"go\/token\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"code.google.com\/p\/go.tools\/oracle\"\n)\n\nvar updateFlag = flag.Bool(\"update\", false, \"Update the golden files.\")\n\ntype query struct {\n\tid string \/\/ unique id\n\tverb string \/\/ query mode, e.g. \"callees\"\n\tposn token.Position \/\/ position of of query\n\tfilename string\n\tstart, end int \/\/ selection of file to pass to oracle\n}\n\nfunc parseRegexp(text string) (*regexp.Regexp, error) {\n\tpattern, err := strconv.Unquote(text)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't unquote %s\", text)\n\t}\n\treturn regexp.Compile(pattern)\n}\n\n\/\/ parseQueries parses and returns the queries in the named file.\nfunc parseQueries(t *testing.T, filename string) []*query {\n\tfiledata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Parse the file once to discover the test queries.\n\tvar fset token.FileSet\n\tf, err := parser.ParseFile(&fset, filename, filedata,\n\t\tparser.DeclarationErrors|parser.ParseComments)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tlines := bytes.Split(filedata, []byte(\"\\n\"))\n\n\tvar queries []*query\n\tqueriesById := make(map[string]*query)\n\n\t\/\/ Find all annotations of these forms:\n\texpectRe := regexp.MustCompile(`@([a-z]+)\\s+(\\S+)\\s+(\\\".*)$`) \/\/ @verb id \"regexp\"\n\tfor _, c := range f.Comments {\n\t\ttext := strings.TrimSpace(c.Text())\n\t\tif text == \"\" || text[0] != '@' {\n\t\t\tcontinue\n\t\t}\n\t\tposn := fset.Position(c.Pos())\n\n\t\t\/\/ @verb id \"regexp\"\n\t\tmatch := expectRe.FindStringSubmatch(text)\n\t\tif match == nil {\n\t\t\tt.Errorf(\"%s: ill-formed query: %s\", posn, text)\n\t\t\tcontinue\n\t\t}\n\n\t\tid := match[2]\n\t\tif prev, ok := queriesById[id]; ok {\n\t\t\tt.Errorf(\"%s: duplicate id %s\", posn, id)\n\t\t\tt.Errorf(\"%s: previously used here\", prev.posn)\n\t\t\tcontinue\n\t\t}\n\n\t\tselectRe, err := parseRegexp(match[3])\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: %s\", posn, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Find text of the current line, sans query.\n\t\t\/\/ (Queries must be \/\/ not \/**\/ comments.)\n\t\tline := lines[posn.Line-1][:posn.Column-1]\n\n\t\t\/\/ Apply regexp to current line to find input selection.\n\t\tloc := selectRe.FindIndex(line)\n\t\tif loc == nil {\n\t\t\tt.Errorf(\"%s: selection pattern %s doesn't match line %q\",\n\t\t\t\tposn, match[3], string(line))\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Assumes ASCII. TODO(adonovan): test on UTF-8.\n\t\tlinestart := posn.Offset - (posn.Column - 1)\n\n\t\t\/\/ Compute the file offsets\n\t\tq := &query{\n\t\t\tid: id,\n\t\t\tverb: match[1],\n\t\t\tposn: posn,\n\t\t\tfilename: filename,\n\t\t\tstart: linestart + loc[0],\n\t\t\tend: linestart + loc[1],\n\t\t}\n\t\tqueries = append(queries, q)\n\t\tqueriesById[id] = q\n\t}\n\n\t\/\/ Return the slice, not map, for deterministic iteration.\n\treturn queries\n}\n\n\/\/ stripLocation removes a \"file:line: \" prefix.\nfunc stripLocation(line string) string {\n\tif i := strings.Index(line, \": \"); i >= 0 {\n\t\tline = line[i+2:]\n\t}\n\treturn line\n}\n\n\/\/ doQuery poses query q to the oracle and writes its response and\n\/\/ error (if any) to out.\nfunc doQuery(out io.Writer, q *query) {\n\tfmt.Fprintf(out, \"-------- @%s %s --------\\n\", q.verb, q.id)\n\n\tcapture := new(bytes.Buffer) \/\/ capture standard output\n\tvar buildContext = build.Default\n\tbuildContext.GOPATH = \"testdata\"\n\terr := oracle.Main([]string{q.filename},\n\t\tq.verb,\n\t\tfmt.Sprintf(\"%s %d-%d\", q.filename, q.start, q.end),\n\t\t\/*PTA-log=*\/ nil, capture, &buildContext)\n\n\tfor _, line := range strings.Split(capture.String(), \"\\n\") {\n\t\tfmt.Fprintf(out, \"%s\\n\", stripLocation(line))\n\t}\n\n\tif err != nil {\n\t\tfmt.Fprintf(out, \"Error: %s\\n\", stripLocation(err.Error()))\n\t}\n}\n\nfunc TestOracle(t *testing.T) {\n\tfor _, filename := range []string{\n\t\t\"testdata\/src\/main\/calls.go\",\n\t\t\"testdata\/src\/main\/describe.go\",\n\t\t\"testdata\/src\/main\/freevars.go\",\n\t\t\"testdata\/src\/main\/implements.go\",\n\t\t\"testdata\/src\/main\/imports.go\",\n\t\t\"testdata\/src\/main\/peers.go\",\n\t} {\n\t\tqueries := parseQueries(t, filename)\n\t\tgolden := filename + \"lden\"\n\t\tgot := filename + \"t\"\n\t\tgotfh, err := os.Create(got)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Create(%s) failed: %s\", got, err)\n\t\t\tcontinue\n\t\t}\n\t\tdefer gotfh.Close()\n\n\t\t\/\/ Run the oracle on each query, redirecting its output\n\t\t\/\/ and error (if any) to the foo.got file.\n\t\tfor _, q := range queries {\n\t\t\tdoQuery(gotfh, q)\n\t\t}\n\n\t\t\/\/ Compare foo.got with foo.golden.\n\t\tcmd := exec.Command(\"\/usr\/bin\/diff\", \"-u3\", golden, got) \/\/ assumes POSIX\n\t\tbuf := new(bytes.Buffer)\n\t\tcmd.Stdout = buf\n\t\tif err := cmd.Run(); err != nil {\n\t\t\tt.Errorf(\"Oracle tests for %s failed: %s.\\n%s\\n\",\n\t\t\t\tfilename, err, buf)\n\n\t\t\tif *updateFlag {\n\t\t\t\tt.Logf(\"Updating %s...\", golden)\n\t\t\t\tif err := exec.Command(\"\/bin\/cp\", got, golden).Run(); err != nil {\n\t\t\t\t\tt.Errorf(\"Update failed: %s\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n<commit_msg>go.tools\/oracle: disable TestOracle test on windows (fixes windows build)<commit_after>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage oracle_test\n\n\/\/ This file defines a test framework for oracle queries.\n\/\/\n\/\/ The files beneath testdata\/src\/main contain Go programs containing\n\/\/ query annotations of the form:\n\/\/\n\/\/ @verb id \"select\"\n\/\/\n\/\/ where verb is the query mode (e.g. \"callers\"), id is a unique name\n\/\/ for this query, and \"select\" is a regular expression matching the\n\/\/ substring of the current line that is the query's input selection.\n\/\/\n\/\/ The expected output for each query is provided in the accompanying\n\/\/ .golden file.\n\/\/\n\/\/ (Location information is not included because it's too fragile to\n\/\/ display as text. TODO(adonovan): think about how we can test its\n\/\/ correctness, since it is critical information.)\n\/\/\n\/\/ Run this test with:\n\/\/ \t% go test code.google.com\/p\/go.tools\/oracle -update\n\/\/ to update the golden files.\n\n\/\/ TODO(adonovan): improve coverage:\n\/\/ - output of @callgraph is nondeterministic.\n\/\/ - as are lists of labels.\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"go\/build\"\n\t\"go\/parser\"\n\t\"go\/token\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"code.google.com\/p\/go.tools\/oracle\"\n)\n\nvar updateFlag = flag.Bool(\"update\", false, \"Update the golden files.\")\n\ntype query struct {\n\tid string \/\/ unique id\n\tverb string \/\/ query mode, e.g. \"callees\"\n\tposn token.Position \/\/ position of of query\n\tfilename string\n\tstart, end int \/\/ selection of file to pass to oracle\n}\n\nfunc parseRegexp(text string) (*regexp.Regexp, error) {\n\tpattern, err := strconv.Unquote(text)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"can't unquote %s\", text)\n\t}\n\treturn regexp.Compile(pattern)\n}\n\n\/\/ parseQueries parses and returns the queries in the named file.\nfunc parseQueries(t *testing.T, filename string) []*query {\n\tfiledata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ Parse the file once to discover the test queries.\n\tvar fset token.FileSet\n\tf, err := parser.ParseFile(&fset, filename, filedata,\n\t\tparser.DeclarationErrors|parser.ParseComments)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tlines := bytes.Split(filedata, []byte(\"\\n\"))\n\n\tvar queries []*query\n\tqueriesById := make(map[string]*query)\n\n\t\/\/ Find all annotations of these forms:\n\texpectRe := regexp.MustCompile(`@([a-z]+)\\s+(\\S+)\\s+(\\\".*)$`) \/\/ @verb id \"regexp\"\n\tfor _, c := range f.Comments {\n\t\ttext := strings.TrimSpace(c.Text())\n\t\tif text == \"\" || text[0] != '@' {\n\t\t\tcontinue\n\t\t}\n\t\tposn := fset.Position(c.Pos())\n\n\t\t\/\/ @verb id \"regexp\"\n\t\tmatch := expectRe.FindStringSubmatch(text)\n\t\tif match == nil {\n\t\t\tt.Errorf(\"%s: ill-formed query: %s\", posn, text)\n\t\t\tcontinue\n\t\t}\n\n\t\tid := match[2]\n\t\tif prev, ok := queriesById[id]; ok {\n\t\t\tt.Errorf(\"%s: duplicate id %s\", posn, id)\n\t\t\tt.Errorf(\"%s: previously used here\", prev.posn)\n\t\t\tcontinue\n\t\t}\n\n\t\tselectRe, err := parseRegexp(match[3])\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: %s\", posn, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Find text of the current line, sans query.\n\t\t\/\/ (Queries must be \/\/ not \/**\/ comments.)\n\t\tline := lines[posn.Line-1][:posn.Column-1]\n\n\t\t\/\/ Apply regexp to current line to find input selection.\n\t\tloc := selectRe.FindIndex(line)\n\t\tif loc == nil {\n\t\t\tt.Errorf(\"%s: selection pattern %s doesn't match line %q\",\n\t\t\t\tposn, match[3], string(line))\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Assumes ASCII. TODO(adonovan): test on UTF-8.\n\t\tlinestart := posn.Offset - (posn.Column - 1)\n\n\t\t\/\/ Compute the file offsets\n\t\tq := &query{\n\t\t\tid: id,\n\t\t\tverb: match[1],\n\t\t\tposn: posn,\n\t\t\tfilename: filename,\n\t\t\tstart: linestart + loc[0],\n\t\t\tend: linestart + loc[1],\n\t\t}\n\t\tqueries = append(queries, q)\n\t\tqueriesById[id] = q\n\t}\n\n\t\/\/ Return the slice, not map, for deterministic iteration.\n\treturn queries\n}\n\n\/\/ stripLocation removes a \"file:line: \" prefix.\nfunc stripLocation(line string) string {\n\tif i := strings.Index(line, \": \"); i >= 0 {\n\t\tline = line[i+2:]\n\t}\n\treturn line\n}\n\n\/\/ doQuery poses query q to the oracle and writes its response and\n\/\/ error (if any) to out.\nfunc doQuery(out io.Writer, q *query) {\n\tfmt.Fprintf(out, \"-------- @%s %s --------\\n\", q.verb, q.id)\n\n\tcapture := new(bytes.Buffer) \/\/ capture standard output\n\tvar buildContext = build.Default\n\tbuildContext.GOPATH = \"testdata\"\n\terr := oracle.Main([]string{q.filename},\n\t\tq.verb,\n\t\tfmt.Sprintf(\"%s %d-%d\", q.filename, q.start, q.end),\n\t\t\/*PTA-log=*\/ nil, capture, &buildContext)\n\n\tfor _, line := range strings.Split(capture.String(), \"\\n\") {\n\t\tfmt.Fprintf(out, \"%s\\n\", stripLocation(line))\n\t}\n\n\tif err != nil {\n\t\tfmt.Fprintf(out, \"Error: %s\\n\", stripLocation(err.Error()))\n\t}\n}\n\nfunc TestOracle(t *testing.T) {\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\tt.Skipf(\"skipping test on %q (no \/usr\/bin\/diff)\", runtime.GOOS)\n\t}\n\n\tfor _, filename := range []string{\n\t\t\"testdata\/src\/main\/calls.go\",\n\t\t\"testdata\/src\/main\/describe.go\",\n\t\t\"testdata\/src\/main\/freevars.go\",\n\t\t\"testdata\/src\/main\/implements.go\",\n\t\t\"testdata\/src\/main\/imports.go\",\n\t\t\"testdata\/src\/main\/peers.go\",\n\t} {\n\t\tqueries := parseQueries(t, filename)\n\t\tgolden := filename + \"lden\"\n\t\tgot := filename + \"t\"\n\t\tgotfh, err := os.Create(got)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Create(%s) failed: %s\", got, err)\n\t\t\tcontinue\n\t\t}\n\t\tdefer gotfh.Close()\n\n\t\t\/\/ Run the oracle on each query, redirecting its output\n\t\t\/\/ and error (if any) to the foo.got file.\n\t\tfor _, q := range queries {\n\t\t\tdoQuery(gotfh, q)\n\t\t}\n\n\t\t\/\/ Compare foo.got with foo.golden.\n\t\tcmd := exec.Command(\"\/usr\/bin\/diff\", \"-u3\", golden, got) \/\/ assumes POSIX\n\t\tbuf := new(bytes.Buffer)\n\t\tcmd.Stdout = buf\n\t\tif err := cmd.Run(); err != nil {\n\t\t\tt.Errorf(\"Oracle tests for %s failed: %s.\\n%s\\n\",\n\t\t\t\tfilename, err, buf)\n\n\t\t\tif *updateFlag {\n\t\t\t\tt.Logf(\"Updating %s...\", golden)\n\t\t\t\tif err := exec.Command(\"\/bin\/cp\", got, golden).Run(); err != nil {\n\t\t\t\t\tt.Errorf(\"Update failed: %s\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package osutil\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestExistsFile(t *testing.T) {\n\tassert.False(t, ExistsFile(\"\/paht\/to\/notexist\"))\n\n\ttmpDir := os.TempDir()\n\ttmp, _ := ioutil.TempFile(tmpDir, \"droot_test\")\n\tdefer func() {\n\t\ttmp.Close()\n\t\tos.Remove(tmp.Name())\n\t}()\n\n\tassert.True(t, ExistsFile(tmp.Name()))\n\tassert.False(t, ExistsFile(tmpDir))\n}\n\nfunc TestIsSymlink(t *testing.T) {\n\ttmpDir := os.TempDir()\n\ttmp, _ := ioutil.TempFile(tmpDir, \"droot_test\")\n\tdefer func() {\n\t\ttmp.Close()\n\t\tos.Remove(tmp.Name())\n\t}()\n\n\tassert.False(t, IsSymlink(tmp.Name()))\n\n\tos.Symlink(tmp.Name(), tmpDir+\"\/symlink\")\n\n\tassert.True(t, IsSymlink(tmpDir+\"\/symlink\"))\n}\n\nfunc TestExistsDir(t *testing.T) {\n\tassert.False(t, ExistsDir(\"\/paht\/to\/notexist\"))\n\n\ttmpDir := os.TempDir()\n\ttmp, _ := ioutil.TempFile(tmpDir, \"droot_test\")\n\tdefer func() {\n\t\ttmp.Close()\n\t\tos.Remove(tmp.Name())\n\t}()\n\n\tassert.True(t, ExistsDir(tmpDir))\n\tassert.False(t, ExistsDir(tmp.Name()))\n}\n\nfunc TestIsDirEmpty(t *testing.T) {\n\tok, err := IsDirEmpty(\"\/paht\/to\/notexist\")\n\tassert.False(t, ok)\n\tassert.Error(t, err)\n\n\ttmpDir := os.TempDir()\n\tos.Mkdir(tmpDir+\"\/empty\", 0755)\n\tos.Mkdir(tmpDir+\"\/noempty\", 0755)\n\tos.Create(tmpDir+\"\/noempty\/test\")\n\tdefer func() {\n\t\tos.Remove(tmpDir+\"\/empty\")\n\t\tos.RemoveAll(tmpDir+\"\/noempty\")\n\t}()\n\n\tok, err = IsDirEmpty(tmpDir+\"\/empty\")\n\tassert.True(t, ok)\n\tassert.NoError(t, err)\n\n\tok, err = IsDirEmpty(tmpDir+\"\/noempty\")\n\tassert.False(t, ok)\n\tassert.NoError(t, err)\n}\n\n<commit_msg>Add TestRunCmd<commit_after>package osutil\n\nimport (\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestExistsFile(t *testing.T) {\n\tassert.False(t, ExistsFile(\"\/paht\/to\/notexist\"))\n\n\ttmpDir := os.TempDir()\n\ttmp, _ := ioutil.TempFile(tmpDir, \"droot_test\")\n\tdefer func() {\n\t\ttmp.Close()\n\t\tos.Remove(tmp.Name())\n\t}()\n\n\tassert.True(t, ExistsFile(tmp.Name()))\n\tassert.False(t, ExistsFile(tmpDir))\n}\n\nfunc TestIsSymlink(t *testing.T) {\n\ttmpDir := os.TempDir()\n\ttmp, _ := ioutil.TempFile(tmpDir, \"droot_test\")\n\tdefer func() {\n\t\ttmp.Close()\n\t\tos.Remove(tmp.Name())\n\t}()\n\n\tassert.False(t, IsSymlink(tmp.Name()))\n\n\tos.Symlink(tmp.Name(), tmpDir+\"\/symlink\")\n\n\tassert.True(t, IsSymlink(tmpDir+\"\/symlink\"))\n}\n\nfunc TestExistsDir(t *testing.T) {\n\tassert.False(t, ExistsDir(\"\/paht\/to\/notexist\"))\n\n\ttmpDir := os.TempDir()\n\ttmp, _ := ioutil.TempFile(tmpDir, \"droot_test\")\n\tdefer func() {\n\t\ttmp.Close()\n\t\tos.Remove(tmp.Name())\n\t}()\n\n\tassert.True(t, ExistsDir(tmpDir))\n\tassert.False(t, ExistsDir(tmp.Name()))\n}\n\nfunc TestIsDirEmpty(t *testing.T) {\n\tok, err := IsDirEmpty(\"\/paht\/to\/notexist\")\n\tassert.False(t, ok)\n\tassert.Error(t, err)\n\n\ttmpDir := os.TempDir()\n\tos.Mkdir(tmpDir+\"\/empty\", 0755)\n\tos.Mkdir(tmpDir+\"\/noempty\", 0755)\n\tos.Create(tmpDir+\"\/noempty\/test\")\n\tdefer func() {\n\t\tos.Remove(tmpDir+\"\/empty\")\n\t\tos.RemoveAll(tmpDir+\"\/noempty\")\n\t}()\n\n\tok, err = IsDirEmpty(tmpDir+\"\/empty\")\n\tassert.True(t, ok)\n\tassert.NoError(t, err)\n\n\tok, err = IsDirEmpty(tmpDir+\"\/noempty\")\n\tassert.False(t, ok)\n\tassert.NoError(t, err)\n}\n\nfunc TestRunCmd(t *testing.T) {\n\tassert.NoError(t, RunCmd(\"\/bin\/ls\"))\n\tassert.Error(t, RunCmd(\"\/bin\/hoge\"))\n}\n<|endoftext|>"} {"text":"<commit_before>package schema\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\"github.com\/mitchellh\/mapstructure\"\n)\n\n\/\/ DiffFieldReader reads fields out of a diff structures.\n\/\/\n\/\/ It also requires access to a Reader that reads fields from the structure\n\/\/ that the diff was derived from. This is usually the state. This is required\n\/\/ because a diff on its own doesn't have complete data about full objects\n\/\/ such as maps.\n\/\/\n\/\/ The Source MUST be the data that the diff was derived from. If it isn't,\n\/\/ the behavior of this struct is undefined.\n\/\/\n\/\/ Reading fields from a DiffFieldReader is identical to reading from\n\/\/ Source except the diff will be applied to the end result.\n\/\/\n\/\/ The \"Exists\" field on the result will be set to true if the complete\n\/\/ field exists whether its from the source, diff, or a combination of both.\n\/\/ It cannot be determined whether a retrieved value is composed of\n\/\/ diff elements.\ntype DiffFieldReader struct {\n\tDiff *terraform.InstanceDiff\n\tSource FieldReader\n\tSchema map[string]*Schema\n}\n\nfunc (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) {\n\tschemaList := addrToSchema(address, r.Schema)\n\tif len(schemaList) == 0 {\n\t\treturn FieldReadResult{}, nil\n\t}\n\n\tschema := schemaList[len(schemaList)-1]\n\tswitch schema.Type {\n\tcase TypeBool, TypeInt, TypeFloat, TypeString:\n\t\treturn r.readPrimitive(address, schema)\n\tcase TypeList:\n\t\treturn readListField(r, address, schema)\n\tcase TypeMap:\n\t\treturn r.readMap(address, schema)\n\tcase TypeSet:\n\t\treturn r.readSet(address, schema)\n\tcase typeObject:\n\t\treturn readObjectField(r, address, schema.Elem.(map[string]*Schema))\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"Unknown type: %#v\", schema.Type))\n\t}\n}\n\nfunc (r *DiffFieldReader) readMap(\n\taddress []string, schema *Schema) (FieldReadResult, error) {\n\tresult := make(map[string]interface{})\n\tresultSet := false\n\n\t\/\/ First read the map from the underlying source\n\tsource, err := r.Source.ReadField(address)\n\tif err != nil {\n\t\treturn FieldReadResult{}, err\n\t}\n\tif source.Exists {\n\t\tresult = source.Value.(map[string]interface{})\n\t\tresultSet = true\n\t}\n\n\t\/\/ Next, read all the elements we have in our diff, and apply\n\t\/\/ the diff to our result.\n\tprefix := strings.Join(address, \".\") + \".\"\n\tfor k, v := range r.Diff.Attributes {\n\t\tif !strings.HasPrefix(k, prefix) {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(k, prefix+\"%\") {\n\t\t\t\/\/ Ignore the count field\n\t\t\tcontinue\n\t\t}\n\n\t\tresultSet = true\n\n\t\tk = k[len(prefix):]\n\t\tif v.NewRemoved {\n\t\t\tdelete(result, k)\n\t\t\tcontinue\n\t\t}\n\n\t\tresult[k] = v.New\n\t}\n\n\terr = mapValuesToPrimitive(result, schema)\n\tif err != nil {\n\t\treturn FieldReadResult{}, nil\n\t}\n\n\tvar resultVal interface{}\n\tif resultSet {\n\t\tresultVal = result\n\t}\n\n\treturn FieldReadResult{\n\t\tValue: resultVal,\n\t\tExists: resultSet,\n\t}, nil\n}\n\nfunc (r *DiffFieldReader) readPrimitive(\n\taddress []string, schema *Schema) (FieldReadResult, error) {\n\tresult, err := r.Source.ReadField(address)\n\tif err != nil {\n\t\treturn FieldReadResult{}, err\n\t}\n\n\tattrD, ok := r.Diff.Attributes[strings.Join(address, \".\")]\n\tif !ok {\n\t\treturn result, nil\n\t}\n\n\tvar resultVal string\n\tif !attrD.NewComputed {\n\t\tresultVal = attrD.New\n\t\tif attrD.NewExtra != nil {\n\t\t\tresult.ValueProcessed = resultVal\n\t\t\tif err := mapstructure.WeakDecode(attrD.NewExtra, &resultVal); err != nil {\n\t\t\t\treturn FieldReadResult{}, err\n\t\t\t}\n\t\t}\n\t}\n\n\tresult.Computed = attrD.NewComputed\n\tresult.Exists = true\n\tresult.Value, err = stringToPrimitive(resultVal, false, schema)\n\tif err != nil {\n\t\treturn FieldReadResult{}, err\n\t}\n\n\treturn result, nil\n}\n\nfunc (r *DiffFieldReader) readSet(\n\taddress []string, schema *Schema) (FieldReadResult, error) {\n\tprefix := strings.Join(address, \".\") + \".\"\n\n\t\/\/ Create the set that will be our result\n\tset := schema.ZeroValue().(*Set)\n\n\t\/\/ Go through the map and find all the set items\n\tfor k, d := range r.Diff.Attributes {\n\t\tif d.NewRemoved {\n\t\t\t\/\/ If the field is removed, we always ignore it\n\t\t\tcontinue\n\t\t}\n\t\tif !strings.HasPrefix(k, prefix) {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasSuffix(k, \"#\") {\n\t\t\t\/\/ Ignore any count field\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Split the key, since it might be a sub-object like \"idx.field\"\n\t\tparts := strings.Split(k[len(prefix):], \".\")\n\t\tidx := parts[0]\n\n\t\traw, err := r.ReadField(append(address, idx))\n\t\tif err != nil {\n\t\t\treturn FieldReadResult{}, err\n\t\t}\n\t\tif !raw.Exists {\n\t\t\t\/\/ This shouldn't happen because we just verified it does exist\n\t\t\tpanic(\"missing field in set: \" + k + \".\" + idx)\n\t\t}\n\n\t\tset.Add(raw.Value)\n\t}\n\n\t\/\/ Determine if the set \"exists\". It exists if there are items or if\n\t\/\/ the diff explicitly wanted it empty.\n\texists := set.Len() > 0\n\tif !exists {\n\t\t\/\/ We could check if the diff value is \"0\" here but I think the\n\t\t\/\/ existence of \"#\" on its own is enough to show it existed. This\n\t\t\/\/ protects us in the future from the zero value changing from\n\t\t\/\/ \"0\" to \"\" breaking us (if that were to happen).\n\t\tif _, ok := r.Diff.Attributes[prefix+\"#\"]; ok {\n\t\t\texists = true\n\t\t}\n\t}\n\n\tif !exists {\n\t\tresult, err := r.Source.ReadField(address)\n\t\tif err != nil {\n\t\t\treturn FieldReadResult{}, err\n\t\t}\n\t\tif result.Exists {\n\t\t\treturn result, nil\n\t\t}\n\t}\n\n\treturn FieldReadResult{\n\t\tValue: set,\n\t\tExists: exists,\n\t}, nil\n}\n<commit_msg>memoize DiffFieldReader.ReadField<commit_after>package schema\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\"github.com\/mitchellh\/mapstructure\"\n)\n\n\/\/ DiffFieldReader reads fields out of a diff structures.\n\/\/\n\/\/ It also requires access to a Reader that reads fields from the structure\n\/\/ that the diff was derived from. This is usually the state. This is required\n\/\/ because a diff on its own doesn't have complete data about full objects\n\/\/ such as maps.\n\/\/\n\/\/ The Source MUST be the data that the diff was derived from. If it isn't,\n\/\/ the behavior of this struct is undefined.\n\/\/\n\/\/ Reading fields from a DiffFieldReader is identical to reading from\n\/\/ Source except the diff will be applied to the end result.\n\/\/\n\/\/ The \"Exists\" field on the result will be set to true if the complete\n\/\/ field exists whether its from the source, diff, or a combination of both.\n\/\/ It cannot be determined whether a retrieved value is composed of\n\/\/ diff elements.\ntype DiffFieldReader struct {\n\tDiff *terraform.InstanceDiff\n\tSource FieldReader\n\tSchema map[string]*Schema\n\n\t\/\/ cache for memoizing ReadField calls.\n\tcache map[string]cachedFieldReadResult\n}\n\ntype cachedFieldReadResult struct {\n\tval FieldReadResult\n\terr error\n}\n\nfunc (r *DiffFieldReader) ReadField(address []string) (FieldReadResult, error) {\n\tif r.cache == nil {\n\t\tr.cache = make(map[string]cachedFieldReadResult)\n\t}\n\n\t\/\/ Create the cache key by joining around a value that isn't a valid part\n\t\/\/ of an address. This assumes that the Source and Schema are not changed\n\t\/\/ for the life of this DiffFieldReader.\n\tcacheKey := strings.Join(address, \"|\")\n\tif cached, ok := r.cache[cacheKey]; ok {\n\t\treturn cached.val, cached.err\n\t}\n\n\tschemaList := addrToSchema(address, r.Schema)\n\tif len(schemaList) == 0 {\n\t\tr.cache[cacheKey] = cachedFieldReadResult{}\n\t\treturn FieldReadResult{}, nil\n\t}\n\n\tvar res FieldReadResult\n\tvar err error\n\n\tschema := schemaList[len(schemaList)-1]\n\tswitch schema.Type {\n\tcase TypeBool, TypeInt, TypeFloat, TypeString:\n\t\tres, err = r.readPrimitive(address, schema)\n\tcase TypeList:\n\t\tres, err = readListField(r, address, schema)\n\tcase TypeMap:\n\t\tres, err = r.readMap(address, schema)\n\tcase TypeSet:\n\t\tres, err = r.readSet(address, schema)\n\tcase typeObject:\n\t\tres, err = readObjectField(r, address, schema.Elem.(map[string]*Schema))\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"Unknown type: %#v\", schema.Type))\n\t}\n\n\tr.cache[cacheKey] = cachedFieldReadResult{\n\t\tval: res,\n\t\terr: err,\n\t}\n\treturn res, err\n}\n\nfunc (r *DiffFieldReader) readMap(\n\taddress []string, schema *Schema) (FieldReadResult, error) {\n\tresult := make(map[string]interface{})\n\tresultSet := false\n\n\t\/\/ First read the map from the underlying source\n\tsource, err := r.Source.ReadField(address)\n\tif err != nil {\n\t\treturn FieldReadResult{}, err\n\t}\n\tif source.Exists {\n\t\tresult = source.Value.(map[string]interface{})\n\t\tresultSet = true\n\t}\n\n\t\/\/ Next, read all the elements we have in our diff, and apply\n\t\/\/ the diff to our result.\n\tprefix := strings.Join(address, \".\") + \".\"\n\tfor k, v := range r.Diff.Attributes {\n\t\tif !strings.HasPrefix(k, prefix) {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(k, prefix+\"%\") {\n\t\t\t\/\/ Ignore the count field\n\t\t\tcontinue\n\t\t}\n\n\t\tresultSet = true\n\n\t\tk = k[len(prefix):]\n\t\tif v.NewRemoved {\n\t\t\tdelete(result, k)\n\t\t\tcontinue\n\t\t}\n\n\t\tresult[k] = v.New\n\t}\n\n\terr = mapValuesToPrimitive(result, schema)\n\tif err != nil {\n\t\treturn FieldReadResult{}, nil\n\t}\n\n\tvar resultVal interface{}\n\tif resultSet {\n\t\tresultVal = result\n\t}\n\n\treturn FieldReadResult{\n\t\tValue: resultVal,\n\t\tExists: resultSet,\n\t}, nil\n}\n\nfunc (r *DiffFieldReader) readPrimitive(\n\taddress []string, schema *Schema) (FieldReadResult, error) {\n\tresult, err := r.Source.ReadField(address)\n\tif err != nil {\n\t\treturn FieldReadResult{}, err\n\t}\n\n\tattrD, ok := r.Diff.Attributes[strings.Join(address, \".\")]\n\tif !ok {\n\t\treturn result, nil\n\t}\n\n\tvar resultVal string\n\tif !attrD.NewComputed {\n\t\tresultVal = attrD.New\n\t\tif attrD.NewExtra != nil {\n\t\t\tresult.ValueProcessed = resultVal\n\t\t\tif err := mapstructure.WeakDecode(attrD.NewExtra, &resultVal); err != nil {\n\t\t\t\treturn FieldReadResult{}, err\n\t\t\t}\n\t\t}\n\t}\n\n\tresult.Computed = attrD.NewComputed\n\tresult.Exists = true\n\tresult.Value, err = stringToPrimitive(resultVal, false, schema)\n\tif err != nil {\n\t\treturn FieldReadResult{}, err\n\t}\n\n\treturn result, nil\n}\n\nfunc (r *DiffFieldReader) readSet(\n\taddress []string, schema *Schema) (FieldReadResult, error) {\n\tprefix := strings.Join(address, \".\") + \".\"\n\n\t\/\/ Create the set that will be our result\n\tset := schema.ZeroValue().(*Set)\n\n\t\/\/ Go through the map and find all the set items\n\tfor k, d := range r.Diff.Attributes {\n\t\tif d.NewRemoved {\n\t\t\t\/\/ If the field is removed, we always ignore it\n\t\t\tcontinue\n\t\t}\n\t\tif !strings.HasPrefix(k, prefix) {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasSuffix(k, \"#\") {\n\t\t\t\/\/ Ignore any count field\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ Split the key, since it might be a sub-object like \"idx.field\"\n\t\tparts := strings.Split(k[len(prefix):], \".\")\n\t\tidx := parts[0]\n\n\t\traw, err := r.ReadField(append(address, idx))\n\t\tif err != nil {\n\t\t\treturn FieldReadResult{}, err\n\t\t}\n\t\tif !raw.Exists {\n\t\t\t\/\/ This shouldn't happen because we just verified it does exist\n\t\t\tpanic(\"missing field in set: \" + k + \".\" + idx)\n\t\t}\n\n\t\tset.Add(raw.Value)\n\t}\n\n\t\/\/ Determine if the set \"exists\". It exists if there are items or if\n\t\/\/ the diff explicitly wanted it empty.\n\texists := set.Len() > 0\n\tif !exists {\n\t\t\/\/ We could check if the diff value is \"0\" here but I think the\n\t\t\/\/ existence of \"#\" on its own is enough to show it existed. This\n\t\t\/\/ protects us in the future from the zero value changing from\n\t\t\/\/ \"0\" to \"\" breaking us (if that were to happen).\n\t\tif _, ok := r.Diff.Attributes[prefix+\"#\"]; ok {\n\t\t\texists = true\n\t\t}\n\t}\n\n\tif !exists {\n\t\tresult, err := r.Source.ReadField(address)\n\t\tif err != nil {\n\t\t\treturn FieldReadResult{}, err\n\t\t}\n\t\tif result.Exists {\n\t\t\treturn result, nil\n\t\t}\n\t}\n\n\treturn FieldReadResult{\n\t\tValue: set,\n\t\tExists: exists,\n\t}, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\/\/ Delete_file implements the probe operation for deleting a file in a\n\/\/ storage system.\n\n\/\/ Package delete implements the file deletion operation with a storage system.\npackage delete\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"time\"\n\n\t\"cloud.google.com\/go\/storage\"\n\t\"github.com\/google\/cloudprober\/logger\"\n\t\"github.com\/googleapis\/google-cloud-go-testing\/storage\/stiface\"\n\t\"google.golang.org\/api\/iterator\"\n\n\tprobe \"github.com\/googleinterns\/step224-2020\/hermes\/probe\"\n\tm \"github.com\/googleinterns\/step224-2020\/hermes\/probe\/metrics\"\n\tpb \"github.com\/googleinterns\/step224-2020\/hermes\/proto\"\n)\n\n\/\/ DeleteRandomFile deletes a random file in the target storage system bucket.\n\/\/ It then checks that the file has been deleted by trying to get the object.\n\/\/ Arguments:\n\/\/\t- ctx: pass the context so this probe can be cancelled if needed.\n\/\/\t- target: pass the target run information.\n\/\/\t- client: pass an initialised storage client for this target system.\n\/\/\t- logger: pass the logger associated with the probe calling this function.\n\/\/\t\t-> logger can log to Google Cloud if configured to do so.\n\/\/ Returns:\n\/\/\t- fileID: returns the ID of the file delete OR a missing file to be created if one is found.\n\/\/\t- err:\n\/\/\t\tStatus:\n\/\/\t\t- StateJournalInconsistent: the file to be deleted does not exist in Hermes' StateJournal.\n\/\/\t\t- FileMissing: the file to be deleted could not be found in the target bucket.\n\/\/\t\t- BucketMissing: the target bucket on this target system was not found.\n\/\/\t\t- ProbeFailed: there was an error during one of the API calls and the probe failed.\nfunc DeleteRandomFile(ctx context.Context, target *probe.Target, client stiface.Client, logger *logger.Logger) (int32, error) {\n\tfileID := pickFileToDelete()\n\treturn DeleteFile(ctx, fileID, target, client, logger)\n}\n\n\/\/ DeleteFile deletes the file, corresponding to the ID passed, in the target storage system bucket.\n\/\/ It then checks that the file has been deleted by trying to get the object.\n\/\/ Arguments:\n\/\/\t- ctx: pass the context so this probe can be cancelled if needed.\n\/\/\t- config: pass the HermesProbeDef config for the probe calling this function.\n\/\/\t- target: pass the target run information.\n\/\/\t- client: pass an initialised storage client for this target system.\n\/\/\t- logger: pass the logger associated with the probe calling this function.\n\/\/ Returns:\n\/\/\t- fileID: returns the ID of the file delete OR a missing file to be created if one is found.\n\/\/\t- err:\n\/\/\t\tStatus:\n\/\/\t\t- StateJournalInconsistent: the file to be deleted does not exist in Hermes' StateJournal.\n\/\/\t\t- FileMissing: the file to be deleted could not be found in the target bucket.\n\/\/\t\t- BucketMissing: the target bucket on this target system was not found.\n\/\/\t\t- ProbeFailed: there was an error during one of the API calls and the probe failed.\nfunc DeleteFile(ctx context.Context, fileID int32, target *probe.Target, client stiface.Client, logger *logger.Logger) (int32, error) {\n\tbucket := target.Target.GetBucketName()\n\n\t\/\/ TODO(evanSpendlove): Add custom error object to return value and modify all returns.\n\tfilename, ok := target.Journal.Filenames[fileID]\n\tif !ok {\n\t\treturn fileID, fmt.Errorf(\"delete(%q, %q) failed; status StateJournalInconsistent: expected fileID %d to exist\", bucket, filename, fileID)\n\t}\n\n\ttarget.Journal.Intent = &pb.Intent{\n\t\tFileOperation: pb.Intent_CREATE,\n\t\tFilename: filename,\n\t}\n\n\tfile := client.Bucket(bucket).Object(filename)\n\n\tstart := time.Now()\n\tif err := file.Delete(ctx); err != nil {\n\t\tvar status m.ExitStatus\n\t\tswitch err {\n\t\tcase storage.ErrObjectNotExist:\n\t\t\tstatus = m.FileMissing\n\t\tcase storage.ErrBucketNotExist:\n\t\t\tstatus = m.BucketMissing\n\t\tdefault:\n\t\t\tstatus = m.ProbeFailed\n\t\t}\n\n\t\ttarget.LatencyMetrics.APICallLatency[m.APIDeleteFile][status].Metric(\"hermes_api_latency_s\").AddFloat64(time.Now().Sub(start).Seconds())\n\t\treturn fileID, fmt.Errorf(\"delete(%q, %q) failed; status %v: %w\", bucket, filename, status, err)\n\t}\n\ttarget.LatencyMetrics.APICallLatency[m.APIDeleteFile][m.Success].Metric(\"hermes_api_latency_s\").AddFloat64(time.Now().Sub(start).Seconds())\n\n\tquery := &storage.Query{Prefix: filename}\n\tstart = time.Now()\n\tobjects := client.Bucket(bucket).Objects(ctx, query)\n\ttarget.LatencyMetrics.APICallLatency[m.APIListFiles][m.Success].Metric(\"hermes_api_latency_s\").AddFloat64(time.Now().Sub(start).Seconds())\n\tfor {\n\t\tobj, err := objects.Next()\n\t\tif err == iterator.Done {\n\t\t\tbreak\n\t\t}\n\t\tif obj.Name == filename {\n\t\t\tstatus := m.ProbeFailed\n\t\t\treturn fileID, fmt.Errorf(\"delete(%q, %q) failed; status %v: object %v still listed after delete\", bucket, filename, status, obj.Name)\n\t\t}\n\t\tif err != nil {\n\t\t\tstatus := m.BucketMissing\n\t\t\treturn fileID, fmt.Errorf(\"delete(%q, %q) failed; status %v: %w\", bucket, filename, status, err)\n\t\t}\n\t}\n\n\t\/\/ Update in-memory NIL file after delete operation.\n\tdelete(target.Journal.Filenames, int32(fileID))\n\n\tlogger.Infof(\"Object %v deleted in bucket %s.\", file, bucket)\n\treturn fileID, nil\n}\n\n\/\/ pickFileToDelete picks which file to delete and returns the integer ID of this file.\n\/\/ Returns:\n\/\/\t- ID: returns the ID of the file to be deleted.\nfunc pickFileToDelete() int32 {\n\tconst (\n\t\tbeg = 10 \/\/ we can delete files staring from the file Hermes_10\n\t\tnumberOfDeletableFiles = 41 \/\/ there are 41 files to delete from [Hermes_10,Hermes_50]\n\t)\n\trand.Seed(time.Now().UnixNano())\n\treturn int32(rand.Intn(numberOfDeletableFiles) + beg)\n}\n<commit_msg>Refactored constants in pickFileToDelete().<commit_after>\/\/ Copyright 2020 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\/\/\n\/\/ Delete_file implements the probe operation for deleting a file in a\n\/\/ storage system.\n\n\/\/ Package delete implements the file deletion operation with a storage system.\npackage delete\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"time\"\n\n\t\"cloud.google.com\/go\/storage\"\n\t\"github.com\/google\/cloudprober\/logger\"\n\t\"github.com\/googleapis\/google-cloud-go-testing\/storage\/stiface\"\n\t\"google.golang.org\/api\/iterator\"\n\n\tprobe \"github.com\/googleinterns\/step224-2020\/hermes\/probe\"\n\tm \"github.com\/googleinterns\/step224-2020\/hermes\/probe\/metrics\"\n\tpb \"github.com\/googleinterns\/step224-2020\/hermes\/proto\"\n)\n\n\/\/ DeleteRandomFile deletes a random file in the target storage system bucket.\n\/\/ It then checks that the file has been deleted by trying to get the object.\n\/\/ Arguments:\n\/\/\t- ctx: pass the context so this probe can be cancelled if needed.\n\/\/\t- target: pass the target run information.\n\/\/\t- client: pass an initialised storage client for this target system.\n\/\/\t- logger: pass the logger associated with the probe calling this function.\n\/\/\t\t-> logger can log to Google Cloud if configured to do so.\n\/\/ Returns:\n\/\/\t- fileID: returns the ID of the file delete OR a missing file to be created if one is found.\n\/\/\t- err:\n\/\/\t\tStatus:\n\/\/\t\t- StateJournalInconsistent: the file to be deleted does not exist in Hermes' StateJournal.\n\/\/\t\t- FileMissing: the file to be deleted could not be found in the target bucket.\n\/\/\t\t- BucketMissing: the target bucket on this target system was not found.\n\/\/\t\t- ProbeFailed: there was an error during one of the API calls and the probe failed.\nfunc DeleteRandomFile(ctx context.Context, target *probe.Target, client stiface.Client, logger *logger.Logger) (int32, error) {\n\tfileID := pickFileToDelete()\n\treturn DeleteFile(ctx, fileID, target, client, logger)\n}\n\n\/\/ DeleteFile deletes the file, corresponding to the ID passed, in the target storage system bucket.\n\/\/ It then checks that the file has been deleted by trying to get the object.\n\/\/ Arguments:\n\/\/\t- ctx: pass the context so this probe can be cancelled if needed.\n\/\/\t- config: pass the HermesProbeDef config for the probe calling this function.\n\/\/\t- target: pass the target run information.\n\/\/\t- client: pass an initialised storage client for this target system.\n\/\/\t- logger: pass the logger associated with the probe calling this function.\n\/\/ Returns:\n\/\/\t- fileID: returns the ID of the file delete OR a missing file to be created if one is found.\n\/\/\t- err:\n\/\/\t\tStatus:\n\/\/\t\t- StateJournalInconsistent: the file to be deleted does not exist in Hermes' StateJournal.\n\/\/\t\t- FileMissing: the file to be deleted could not be found in the target bucket.\n\/\/\t\t- BucketMissing: the target bucket on this target system was not found.\n\/\/\t\t- ProbeFailed: there was an error during one of the API calls and the probe failed.\nfunc DeleteFile(ctx context.Context, fileID int32, target *probe.Target, client stiface.Client, logger *logger.Logger) (int32, error) {\n\tbucket := target.Target.GetBucketName()\n\n\t\/\/ TODO(evanSpendlove): Add custom error object to return value and modify all returns.\n\tfilename, ok := target.Journal.Filenames[fileID]\n\tif !ok {\n\t\treturn fileID, fmt.Errorf(\"delete(%q, %q) failed; status StateJournalInconsistent: expected fileID %d to exist\", bucket, filename, fileID)\n\t}\n\n\ttarget.Journal.Intent = &pb.Intent{\n\t\tFileOperation: pb.Intent_CREATE,\n\t\tFilename: filename,\n\t}\n\n\tfile := client.Bucket(bucket).Object(filename)\n\n\tstart := time.Now()\n\tif err := file.Delete(ctx); err != nil {\n\t\tvar status m.ExitStatus\n\t\tswitch err {\n\t\tcase storage.ErrObjectNotExist:\n\t\t\tstatus = m.FileMissing\n\t\tcase storage.ErrBucketNotExist:\n\t\t\tstatus = m.BucketMissing\n\t\tdefault:\n\t\t\tstatus = m.ProbeFailed\n\t\t}\n\n\t\ttarget.LatencyMetrics.APICallLatency[m.APIDeleteFile][status].Metric(\"hermes_api_latency_s\").AddFloat64(time.Now().Sub(start).Seconds())\n\t\treturn fileID, fmt.Errorf(\"delete(%q, %q) failed; status %v: %w\", bucket, filename, status, err)\n\t}\n\ttarget.LatencyMetrics.APICallLatency[m.APIDeleteFile][m.Success].Metric(\"hermes_api_latency_s\").AddFloat64(time.Now().Sub(start).Seconds())\n\n\tquery := &storage.Query{Prefix: filename}\n\tstart = time.Now()\n\tobjects := client.Bucket(bucket).Objects(ctx, query)\n\ttarget.LatencyMetrics.APICallLatency[m.APIListFiles][m.Success].Metric(\"hermes_api_latency_s\").AddFloat64(time.Now().Sub(start).Seconds())\n\tfor {\n\t\tobj, err := objects.Next()\n\t\tif err == iterator.Done {\n\t\t\tbreak\n\t\t}\n\t\tif obj.Name == filename {\n\t\t\tstatus := m.ProbeFailed\n\t\t\treturn fileID, fmt.Errorf(\"delete(%q, %q) failed; status %v: object %v still listed after delete\", bucket, filename, status, obj.Name)\n\t\t}\n\t\tif err != nil {\n\t\t\tstatus := m.BucketMissing\n\t\t\treturn fileID, fmt.Errorf(\"delete(%q, %q) failed; status %v: %w\", bucket, filename, status, err)\n\t\t}\n\t}\n\n\t\/\/ Update in-memory NIL file after delete operation.\n\tdelete(target.Journal.Filenames, int32(fileID))\n\n\tlogger.Infof(\"Object %v deleted in bucket %s.\", file, bucket)\n\treturn fileID, nil\n}\n\n\/\/ pickFileToDelete picks which file to delete and returns the integer ID of this file.\n\/\/ Returns:\n\/\/\t- ID: returns the ID of the file to be deleted.\nfunc pickFileToDelete() int32 {\n\tconst (\n\t\tbegin = 11 \/\/ we can delete files staring from the file Hermes_11\n\t\tnumberOfDeletableFiles = 40 \/\/ there are 40 files to delete from [Hermes_11,Hermes_50]\n\t)\n\trand.Seed(time.Now().UnixNano())\n\treturn int32(rand.Intn(numberOfDeletableFiles) + being)\n}\n<|endoftext|>"} {"text":"<commit_before>package ams\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"html\"\n\t\"net\/http\"\n\n\t\"github.com\/pkg\/errors\"\n\t\"time\"\n)\n\ntype MetaData struct {\n\tURI string `json:\"uri\"`\n}\n\ntype MediaAsset struct {\n\tMetaData MetaData `json:\"__metadata\"`\n}\n\nconst (\n\ttaskBodyTemplate = \"<?xml version=\\\"1.0\\\" encoding=\\\"utf-8\\\"?><taskBody><inputAsset>JobInputAsset(0)<\/inputAsset><outputAsset assetName=\\\"%s\\\">JobOutputAsset(0)<\/outputAsset><\/taskBody>\"\n)\n\nconst (\n\tJobQueued = iota\n\tJobScheduled\n\tJobProcessing\n\tJobFinished\n\tJobError\n\tJobCanceled\n\tJobCanceling\n)\n\nfunc NewMediaAsset(uri string) MediaAsset {\n\treturn MediaAsset{\n\t\tMetaData: MetaData{\n\t\t\tURI: uri,\n\t\t},\n\t}\n}\n\ntype Task struct {\n\tName string `json:\"Name\"`\n\tConfiguration string `json:\"Configuration\"`\n\tMediaProcessorID string `json:\"MediaProcessorId\"`\n\tTaskBody string `json:\"TaskBody\"`\n}\n\ntype Job struct {\n\tID string `json:\"Id\"`\n\tName string `json:\"Name\"`\n\tStartTime string `json:\"StartTime\"`\n\tEndTime string `json:\"EndTime\"`\n\tLastModified string `json:\"LastModified\"`\n\tPriority int `json:\"Priority\"`\n\tRunningDuration float64 `json:\"RunningDuration\"`\n\tState int `json:\"State\"`\n}\n\nfunc (j *Job) toResource() string {\n\treturn fmt.Sprintf(\"Jobs('%s')\", j.ID)\n}\n\nfunc (c *Client) EncodeAssetWithContext(ctx context.Context, mediaProcessorID, configuration string, asset *Asset) (*Job, error) {\n\toutAssetName := fmt.Sprintf(\"[ENCODED]%s\", asset.Name)\n\n\tparams := map[string]interface{}{\n\t\t\"Name\": fmt.Sprintf(\"EncodeJob - %s\", asset.ID),\n\t\t\"InputMediaAssets\": []MediaAsset{\n\t\t\tNewMediaAsset(c.buildAssetURI(asset)),\n\t\t},\n\t\t\"Tasks\": []Task{\n\t\t\tTask{\n\t\t\t\tName: fmt.Sprintf(\"task-%s\", outAssetName),\n\t\t\t\tConfiguration: configuration,\n\t\t\t\tMediaProcessorID: mediaProcessorID,\n\t\t\t\tTaskBody: buildTaskBody(outAssetName),\n\t\t\t},\n\t\t},\n\t}\n\tbody, err := encodeParams(params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq, err := c.newRequest(ctx, http.MethodPost, \"Jobs\", body)\n\treq.Header.Set(\"Content-Type\", \"application\/json;odata=verbose\")\n\treq.Header.Set(\"Accept\", \"application\/json;odata=verbose\")\n\n\tvar out struct {\n\t\tData Job `json:\"d\"`\n\t}\n\tif err := c.do(req, http.StatusCreated, &out); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &out.Data, nil\n}\n\nfunc (c *Client) GetOutputMediaAssetsWithContext(ctx context.Context, job *Job) ([]Asset, error) {\n\tendpoint := job.toResource() + \"\/OutputMediaAssets\"\n\treq, err := c.newRequest(ctx, \"GET\", endpoint, nil)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get output media assets request build failed\")\n\t}\n\tvar out struct {\n\t\tAssets []Asset `json:\"value\"`\n\t}\n\tif err := c.do(req, http.StatusOK, &out); err != nil {\n\t\treturn nil, errors.Wrap(err, \"get output media assets request failed\")\n\t}\n\treturn out.Assets, nil\n}\n\nfunc (c *Client) GetJobWithContext(ctx context.Context, jobID string) (*Job, error) {\n\treq, err := c.newRequest(ctx, http.MethodGet, fmt.Sprintf(\"Jobs('%s')\", jobID), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar out Job\n\tif err := c.do(req, http.StatusOK, &out); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &out, nil\n}\n\nfunc (c *Client) WaitJobWithContext(ctx context.Context, job *Job) error {\n\tfor {\n\t\tcurrent, err := c.GetJobWithContext(ctx, job.ID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif current.State == JobError {\n\t\t\treturn errors.New(\"job failed\")\n\t\t}\n\t\tif current.State == JobCanceled {\n\t\t\treturn errors.New(\"job canceled\")\n\t\t}\n\t\tif current.State == JobFinished {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(8 * time.Second)\n\t}\n}\n\nfunc buildTaskBody(assetName string) string {\n\treturn fmt.Sprintf(taskBodyTemplate, html.EscapeString(assetName))\n}\n<commit_msg>style: following goimports<commit_after>package ams\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"html\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n)\n\ntype MetaData struct {\n\tURI string `json:\"uri\"`\n}\n\ntype MediaAsset struct {\n\tMetaData MetaData `json:\"__metadata\"`\n}\n\nconst (\n\ttaskBodyTemplate = \"<?xml version=\\\"1.0\\\" encoding=\\\"utf-8\\\"?><taskBody><inputAsset>JobInputAsset(0)<\/inputAsset><outputAsset assetName=\\\"%s\\\">JobOutputAsset(0)<\/outputAsset><\/taskBody>\"\n)\n\nconst (\n\tJobQueued = iota\n\tJobScheduled\n\tJobProcessing\n\tJobFinished\n\tJobError\n\tJobCanceled\n\tJobCanceling\n)\n\nfunc NewMediaAsset(uri string) MediaAsset {\n\treturn MediaAsset{\n\t\tMetaData: MetaData{\n\t\t\tURI: uri,\n\t\t},\n\t}\n}\n\ntype Task struct {\n\tName string `json:\"Name\"`\n\tConfiguration string `json:\"Configuration\"`\n\tMediaProcessorID string `json:\"MediaProcessorId\"`\n\tTaskBody string `json:\"TaskBody\"`\n}\n\ntype Job struct {\n\tID string `json:\"Id\"`\n\tName string `json:\"Name\"`\n\tStartTime string `json:\"StartTime\"`\n\tEndTime string `json:\"EndTime\"`\n\tLastModified string `json:\"LastModified\"`\n\tPriority int `json:\"Priority\"`\n\tRunningDuration float64 `json:\"RunningDuration\"`\n\tState int `json:\"State\"`\n}\n\nfunc (j *Job) toResource() string {\n\treturn fmt.Sprintf(\"Jobs('%s')\", j.ID)\n}\n\nfunc (c *Client) EncodeAssetWithContext(ctx context.Context, mediaProcessorID, configuration string, asset *Asset) (*Job, error) {\n\toutAssetName := fmt.Sprintf(\"[ENCODED]%s\", asset.Name)\n\n\tparams := map[string]interface{}{\n\t\t\"Name\": fmt.Sprintf(\"EncodeJob - %s\", asset.ID),\n\t\t\"InputMediaAssets\": []MediaAsset{\n\t\t\tNewMediaAsset(c.buildAssetURI(asset)),\n\t\t},\n\t\t\"Tasks\": []Task{\n\t\t\tTask{\n\t\t\t\tName: fmt.Sprintf(\"task-%s\", outAssetName),\n\t\t\t\tConfiguration: configuration,\n\t\t\t\tMediaProcessorID: mediaProcessorID,\n\t\t\t\tTaskBody: buildTaskBody(outAssetName),\n\t\t\t},\n\t\t},\n\t}\n\tbody, err := encodeParams(params)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq, err := c.newRequest(ctx, http.MethodPost, \"Jobs\", body)\n\treq.Header.Set(\"Content-Type\", \"application\/json;odata=verbose\")\n\treq.Header.Set(\"Accept\", \"application\/json;odata=verbose\")\n\n\tvar out struct {\n\t\tData Job `json:\"d\"`\n\t}\n\tif err := c.do(req, http.StatusCreated, &out); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &out.Data, nil\n}\n\nfunc (c *Client) GetOutputMediaAssetsWithContext(ctx context.Context, job *Job) ([]Asset, error) {\n\tendpoint := job.toResource() + \"\/OutputMediaAssets\"\n\treq, err := c.newRequest(ctx, \"GET\", endpoint, nil)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"get output media assets request build failed\")\n\t}\n\tvar out struct {\n\t\tAssets []Asset `json:\"value\"`\n\t}\n\tif err := c.do(req, http.StatusOK, &out); err != nil {\n\t\treturn nil, errors.Wrap(err, \"get output media assets request failed\")\n\t}\n\treturn out.Assets, nil\n}\n\nfunc (c *Client) GetJobWithContext(ctx context.Context, jobID string) (*Job, error) {\n\treq, err := c.newRequest(ctx, http.MethodGet, fmt.Sprintf(\"Jobs('%s')\", jobID), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar out Job\n\tif err := c.do(req, http.StatusOK, &out); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &out, nil\n}\n\nfunc (c *Client) WaitJobWithContext(ctx context.Context, job *Job) error {\n\tfor {\n\t\tcurrent, err := c.GetJobWithContext(ctx, job.ID)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif current.State == JobError {\n\t\t\treturn errors.New(\"job failed\")\n\t\t}\n\t\tif current.State == JobCanceled {\n\t\t\treturn errors.New(\"job canceled\")\n\t\t}\n\t\tif current.State == JobFinished {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(8 * time.Second)\n\t}\n}\n\nfunc buildTaskBody(assetName string) string {\n\treturn fmt.Sprintf(taskBodyTemplate, html.EscapeString(assetName))\n}\n<|endoftext|>"} {"text":"<commit_before>package apnsapi\n\nimport (\n\t\"bytes\"\n\t\"crypto\"\n\t\"crypto\/ecdsa\"\n\t\"crypto\/rand\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"time\"\n)\n\ntype jwtHeader struct {\n\tAlg string `json:\"alg\"`\n\tKid string `json:\"kid\"`\n}\n\ntype jwtClaim struct {\n\tIss string `json:\"iss\"`\n\tIat int64 `json:\"iat\"`\n}\n\nfunc CreateToken(key *ecdsa.PrivateKey, kid string, teamID string) (string, error) {\n\theader := jwtHeader{\n\t\tAlg: \"ES256\",\n\t\tKid: kid,\n\t}\n\tclaim := jwtClaim{\n\t\tIss: teamID,\n\t\tIat: time.Now().Unix(),\n\t}\n\tvar b bytes.Buffer\n\n\theaderJson, err := json.Marshal(&header)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif err := base64Encode(&b, headerJson); err != nil {\n\t\treturn \"\", err\n\t}\n\tb.WriteString(\".\")\n\n\tclaimJson, err := json.Marshal(&claim)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif err := base64Encode(&b, claimJson); err != nil {\n\t\treturn \"\", err\n\t}\n\n\th := crypto.SHA256.New()\n\th.Write(b.Bytes())\n\tmsg := h.Sum(nil)\n\tsig, err := key.Sign(rand.Reader, msg, crypto.SHA256)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tb.WriteString(\".\")\n\tif err := base64Encode(&b, sig); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn b.String(), nil\n}\n\nfunc base64Encode(w io.Writer, byt []byte) error {\n\tenc := base64.NewEncoder(base64.RawURLEncoding, w)\n\tenc.Write(byt)\n\treturn enc.Close()\n}\n<commit_msg>use ecdsa.Sign directly<commit_after>package apnsapi\n\nimport (\n\t\"bytes\"\n\t\"crypto\"\n\t\"crypto\/ecdsa\"\n\t\"crypto\/rand\"\n\t\"encoding\/asn1\"\n\t\"encoding\/base64\"\n\t\"encoding\/json\"\n\t\"io\"\n\t\"math\/big\"\n\t\"time\"\n)\n\ntype jwtHeader struct {\n\tAlg string `json:\"alg\"`\n\tKid string `json:\"kid\"`\n}\n\ntype jwtClaim struct {\n\tIss string `json:\"iss\"`\n\tIat int64 `json:\"iat\"`\n}\n\ntype ecdsaSignature struct {\n\tR, S *big.Int\n}\n\nfunc CreateToken(key *ecdsa.PrivateKey, kid string, teamID string) (string, error) {\n\theader := jwtHeader{\n\t\tAlg: \"ES256\",\n\t\tKid: kid,\n\t}\n\tclaim := jwtClaim{\n\t\tIss: teamID,\n\t\tIat: time.Now().Unix(),\n\t}\n\tvar b bytes.Buffer\n\n\theaderJson, err := json.Marshal(&header)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif err := base64Encode(&b, headerJson); err != nil {\n\t\treturn \"\", err\n\t}\n\tb.WriteString(\".\")\n\n\tclaimJson, err := json.Marshal(&claim)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif err := base64Encode(&b, claimJson); err != nil {\n\t\treturn \"\", err\n\t}\n\n\th := crypto.SHA256.New()\n\th.Write(b.Bytes())\n\tmsg := h.Sum(nil)\n\n\tr, s, err := ecdsa.Sign(rand.Reader, key, msg)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tsig, err := asn1.Marshal(ecdsaSignature{r, s})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tb.WriteString(\".\")\n\tif err := base64Encode(&b, sig); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn b.String(), nil\n}\n\nfunc base64Encode(w io.Writer, byt []byte) error {\n\tenc := base64.NewEncoder(base64.RawURLEncoding, w)\n\tenc.Write(byt)\n\treturn enc.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright ©2015 The corridor Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.package main\n\npackage corridor\n\nimport (\n\t\"math\"\n\n\t\"github.com\/gonum\/matrix\/mat64\"\n)\n\n\/\/ compute euclidean distance for a pair of subscript indices\nfunc Distance(aSubs, bSubs []int) (dist float64) {\n\n\t\/\/ initialize power variable\n\tvar pow float64 = 2.0\n\n\t\/\/ initialize output variable\n\tvar output float64\n\n\t\/\/ compute distance\n\toutput = math.Sqrt(math.Pow(float64(aSubs[1]-aSubs[0]), pow) + math.Pow(float64(bSubs[0]-bSubs[1]), pow))\n\n\t\/\/ return final output\n\n\treturn output\n}\n\n\/\/ THIS CODE IS UNFINISHED AND NOT WORKING PROPERLY...NEEDS WORK...\nfunc Bresenham(aSubs, bSubs []int, searchDomain *mat64.Dense) (lineSubs [][]int) {\n\n\tvar x0 = aSubs[0]\n\tvar x1 = bSubs[0]\n\tvar y0 = aSubs[1]\n\tvar y1 = bSubs[1]\n\n\tdx := x1 - x0\n\n\tif dx < 0 {\n\t\tdx = -dx\n\t}\n\n\tdy := y1 - y0\n\n\tif dy < 0 {\n\t\tdy = -dy\n\t}\n\n\tvar sx, sy int\n\n\tif x0 < x1 {\n\t\tsx = 1\n\t} else {\n\t\tsx = -1\n\t}\n\n\tif y0 < y1 {\n\t\tsy = 1\n\t} else {\n\t\tsy = -1\n\t}\n\n\terr := dx - dy\n\trows, cols := searchDomain.Dims()\n\tmaxLen := rows * cols\n\toutput := make([][]int, 1, maxLen)\n\toutput[0] = make([]int, 2)\n\tval := make([]int, 2)\n\n\tfor {\n\t\tif x0 == x1 && y0 == y1 {\n\t\t\tbreak\n\t\t}\n\t\te2 := 2 * err\n\t\tif e2 > -dy {\n\t\t\terr -= dy\n\t\t\tx0 += sx\n\t\t}\n\t\tif e2 < dx {\n\t\t\terr += dx\n\t\t\ty0 += sy\n\t\t}\n\t\tval[0] = x0\n\t\tval[1] = y0\n\t\toutput = append(output, val)\n\t}\n\n\treturn output\n}\n\n\/\/ fitness function to generate the total fitness and individual\n\/\/ fitness values for a given input set of subscripts\n\/\/ corresponding to a single individual\nfunc Fitness(subs [][]int, obj *mat64.Dense) (fitnessValues []float64, totalFitness float64) {\n\n\t\/\/ get individual length\n\tindSize := len(subs)\n\n\t\/\/ initialize fitness values and total fitness\n\tfitVal := make([]float64, indSize)\n\tvar totFit float64 = 0.0\n\n\t\/\/ evaluate individual fitness according to input objective\n\tfor i := 0; i < indSize; i++ {\n\t\tcurFit := obj.At(subs[i][0], subs[i][1])\n\t\tfitVal[i] = curFit\n\t\ttotFit = totFit + curFit\n\t}\n\n\t\/\/ return outputs\n\treturn fitVal, totFit\n\n}\n<commit_msg>Bresenham's algorithm now working. Implemented single distance and minimum distance functions for use in establishing the covariance values for sigma dynamically during the directed walk procedure.<commit_after>\/\/ Copyright ©2015 The corridor Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.package main\n\npackage corridor\n\nimport (\n\t\"math\"\n\t\"sort\"\n\n\t\"github.com\/gonum\/matrix\/mat64\"\n)\n\n\/\/ compute euclidean distance for a pair of subscript indices\nfunc Distance(aSubs, bSubs []int) (dist float64) {\n\n\t\/\/ initialize variables\n\tvar x0 float64 = float64(aSubs[0])\n\tvar x1 float64 = float64(bSubs[0])\n\tvar y0 float64 = float64(aSubs[1])\n\tvar y1 float64 = float64(bSubs[1])\n\tvar pow float64 = 2.0\n\tvar dx float64 = x1 - x0\n\tvar dy float64 = y1 - y0\n\n\t\/\/ compute distance\n\tvar output float64 = math.Sqrt(math.Pow(dx, pow) + math.Pow(dy, pow))\n\n\t\/\/ return final output\n\treturn output\n}\n\nfunc MinDistance(aSubs []int, lineSubs [][]int) (minDist float64) {\n\n\t\/\/ initialize variables\n\tmaxLen := len(lineSubs)\n\tdistVec := make([]float64, maxLen)\n\n\t\/\/ loop through and compute distances\n\tfor i := 0; i < maxLen; i++ {\n\t\tdistVec[i] = Distance(aSubs, lineSubs[i])\n\t}\n\n\t\/\/ sort distances\n\tsort.Float64s(distVec)\n\n\t\/\/ get final output\n\toutput := distVec[0]\n\n\t\/\/ return final output\n\treturn output\n}\n\n\/\/ bresenham generates the list of subscript indices corresponding to the\n\/\/ euclidean shortest paths connecting two subscript pairs in discrete space\nfunc Bresenham(aSubs, bSubs []int) (lineSubs [][]int) {\n\n\t\/\/ initialize variables\n\tvar x0 int = aSubs[0]\n\tvar x1 int = bSubs[0]\n\tvar y0 int = aSubs[1]\n\tvar y1 int = bSubs[1]\n\n\t\/\/ check row differential\n\tdx := x1 - x0\n\tif dx < 0 {\n\t\tdx = -dx\n\t}\n\n\t\/\/ check column differential\n\tdy := y1 - y0\n\n\tif dy < 0 {\n\t\tdy = -dy\n\t}\n\n\t\/\/ initialize stride variables\n\tvar sx, sy int\n\n\t\/\/ set row stride direction\n\tif x0 < x1 {\n\t\tsx = 1\n\t} else {\n\t\tsx = -1\n\t}\n\n\t\/\/ set column stride direction\n\tif y0 < y1 {\n\t\tsy = 1\n\t} else {\n\t\tsy = -1\n\t}\n\n\t\/\/ calculate error component\n\terr := dx - dy\n\n\t\/\/ initialize output 2D slice vector\n\tdist := math.Ceil(Distance(aSubs, bSubs))\n\tmaxLen := int(dist)\n\toutput := make([][]int, 0, maxLen)\n\n\t\/\/ loop through and generate subscripts\n\tfor {\n\t\tvar val = []int{x0, y0}\n\t\toutput = append(output, val)\n\t\tif x0 == x1 && y0 == y1 {\n\t\t\tbreak\n\t\t}\n\t\te2 := 2 * err\n\t\tif e2 > -dy {\n\t\t\terr -= dy\n\t\t\tx0 += sx\n\t\t}\n\t\tif e2 < dx {\n\t\t\terr += dx\n\t\t\ty0 += sy\n\t\t}\n\t}\n\n\t\/\/ return final output\n\treturn output\n}\n\n\/\/ fitness function to generate the total fitness and individual\n\/\/ fitness values for a given input set of subscripts\n\/\/ corresponding to a single individual\nfunc Fitness(subs [][]int, obj *mat64.Dense) (fitnessValues []float64, totalFitness float64) {\n\n\t\/\/ get individual length\n\tindSize := len(subs)\n\n\t\/\/ initialize fitness values and total fitness\n\tfitVal := make([]float64, indSize)\n\tvar totFit float64 = 0.0\n\n\t\/\/ evaluate individual fitness according to input objective\n\tfor i := 0; i < indSize; i++ {\n\t\tcurFit := obj.At(subs[i][0], subs[i][1])\n\t\tfitVal[i] = curFit\n\t\ttotFit = totFit + curFit\n\t}\n\n\t\/\/ return outputs\n\treturn fitVal, totFit\n\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/buger\/jsonparser\"\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/joho\/godotenv\"\n\t\"github.com\/syhlion\/requestwork.v2\"\n\t\"github.com\/urfave\/cli\"\n)\n\nvar (\n\tname string\n\tversion string\n\tcmdStart = cli.Command{\n\t\tName: \"start\",\n\t\tUsage: \"connect ws cli\",\n\t\tAction: start,\n\t\tFlags: []cli.Flag{\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"env-file\",\n\t\t\t},\n\t\t},\n\t}\n\twg sync.WaitGroup\n\tlisten_wg sync.WaitGroup\n)\n\nfunc start(c *cli.Context) {\n\tif c.String(\"env-file\") != \"\" {\n\t\tenvfile := c.String(\"env-file\")\n\t\t\/\/flag.Parse()\n\t\terr := godotenv.Load(envfile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\tws_api := os.Getenv(\"GUSHER-CONN-TEST_WS_API\")\n\tif ws_api == \"\" {\n\t\tlog.Fatal(\"empty env GUSHER-CONN-TEST_WS_API\")\n\t}\n\tpush_api := os.Getenv(\"GUSHER-CONN-TEST_PUSH_API\")\n\tif push_api == \"\" {\n\t\tlog.Fatal(\"empty env GUSHER-CONN-TEST_PUSH_API\")\n\t}\n\tlogin_msg := os.Getenv(\"GUSHER-CONN-TEST_LOGIN_MESSAGE\")\n\tif login_msg == \"\" {\n\t\tlog.Fatal(\"empty env GUSHER-CONN-TEST_LOGIN_MESSAGE\")\n\t}\n\tsub_msg := os.Getenv(\"GUSHER-CONN-TEST_SUBSCRIBE_MESSAGE\")\n\tif sub_msg == \"\" {\n\t\tlog.Fatal(\"empty env GUSHER-CONN-TEST_SUBSCRIBE_MESSAGE\")\n\t}\n\tsub_resp := os.Getenv(\"GUSHER-CONN-TEST_SUBSCRIBE_RESPONSE\")\n\tif sub_resp == \"\" {\n\t\tlog.Fatal(\"empty env GUSHER-CONN-TEST_SUBSCRIBE_RESPONSE\")\n\t}\n\tpush_msg := os.Getenv(\"GUSHER-CONN-TEST_PUSH_MESSAGE\")\n\tif push_msg == \"\" {\n\t\tlog.Fatal(\"empty env GUSHER-CONN-TEST_PUSH_MESSAGE\")\n\t}\n\tconnections := os.Getenv(\"GUSHER-CONN-TEST_CONNECTIONS\")\n\tif connections == \"\" {\n\t\tlog.Fatal(\"empty env GUSHER-CONN-TEST_CONNECTIONS\")\n\t}\n\tconn_total, err := strconv.Atoi(connections)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\twsurl, err := url.Parse(ws_api)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tpushurl, err := url.Parse(push_api)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\twsHeaders := http.Header{\n\t\t\"Origin\": {wsurl.String()},\n\t\t\"Sec-WebSocket-Extensions\": {\"permessage-deflate; client_max_window_bits, x-webkit-deflate-frame\"},\n\t}\n\tconns := make([]*websocket.Conn, 0)\n\tfor i := 0; i < conn_total; i++ {\n\t\twg.Add(1)\n\t\trawConn, err := net.Dial(\"tcp\", wsurl.Host)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\twg.Done()\n\t\t\tcontinue\n\t\t}\n\n\t\tconn, _, err := websocket.NewClient(rawConn, wsurl, wsHeaders, 1024, 1024)\n\t\tif err != nil {\n\t\t\trawConn.Close()\n\t\t\twg.Done()\n\t\t\tlog.Fatal(err)\n\t\t\tcontinue\n\t\t}\n\t\terr = conn.WriteMessage(websocket.TextMessage, []byte(login_msg))\n\t\tif err != nil {\n\t\t\trawConn.Close()\n\t\t\tconn.Close()\n\t\t\twg.Done()\n\t\t\tlog.Warn(err)\n\t\t\tcontinue\n\t\t}\n\t\tconns = append(conns, conn)\n\t}\n\tfor i, conn := range conns {\n\t\tlisten_wg.Add(1)\n\t\tgo func(i int, conn *websocket.Conn) {\n\t\t\tfor {\n\t\t\t\t_, d, err := conn.ReadMessage()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t\tlisten_wg.Done()\n\t\t\t\t\twg.Done()\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif string(d) == sub_resp {\n\t\t\t\t\tlisten_wg.Done()\n\t\t\t\t}\n\t\t\t\tlog.Println(i, \" slave repsonse message\", string(d))\n\t\t\t\tdata, _ := jsonparser.GetString(d, \"data\")\n\t\t\t\tif data == push_msg {\n\t\t\t\t\twg.Done()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(i, conn)\n\t\terr = conn.WriteMessage(websocket.TextMessage, []byte(sub_msg))\n\t\tif err != nil {\n\t\t\tconn.Close()\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tlisten_wg.Wait()\n\t\/\/push start\n\twork := requestwork.New(5)\n\tv := url.Values{}\n\n\tv.Add(\"data\", push_msg)\n\treq, err := http.NewRequest(\"POST\", pushurl.String(), bytes.NewBufferString(v.Encode()))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treq.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\treq.Header.Add(\"Content-Length\", strconv.Itoa(len(v.Encode())))\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar firstTime time.Time\n\tctx, _ := context.WithTimeout(context.Background(), 30*time.Second)\n\terr = work.Execute(ctx, req, func(resp *http.Response, e error) (err error) {\n\t\tif e != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Println(\"master response\", string(b))\n\t\tfirstTime = time.Now()\n\t\treturn\n\t})\n\tlog.Println(\"Waiting...\")\n\twg.Wait()\n\tlog.Println(\"Sucess\")\n\tt := time.Now().Sub(firstTime)\n\tlog.Printf(\"Total Use time:%s\", t)\n\n\treturn\n}\n\nfunc main() {\n\tgusher := cli.NewApp()\n\tgusher.Name = name\n\tgusher.Version = version\n\tgusher.Commands = []cli.Command{\n\t\tcmdStart,\n\t}\n\tgusher.Compiled = time.Now()\n\tgusher.Run(os.Args)\n}\n<commit_msg>add connect time<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"sync\"\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/buger\/jsonparser\"\n\t\"github.com\/gorilla\/websocket\"\n\t\"github.com\/joho\/godotenv\"\n\t\"github.com\/syhlion\/requestwork.v2\"\n\t\"github.com\/urfave\/cli\"\n)\n\nvar (\n\tname string\n\tversion string\n\tcmdStart = cli.Command{\n\t\tName: \"start\",\n\t\tUsage: \"connect ws cli\",\n\t\tAction: start,\n\t\tFlags: []cli.Flag{\n\t\t\tcli.StringFlag{\n\t\t\t\tName: \"env-file\",\n\t\t\t},\n\t\t},\n\t}\n\twg sync.WaitGroup\n\tlisten_wg sync.WaitGroup\n)\n\nfunc start(c *cli.Context) {\n\tif c.String(\"env-file\") != \"\" {\n\t\tenvfile := c.String(\"env-file\")\n\t\t\/\/flag.Parse()\n\t\terr := godotenv.Load(envfile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\tws_api := os.Getenv(\"GUSHER-CONN-TEST_WS_API\")\n\tif ws_api == \"\" {\n\t\tlog.Fatal(\"empty env GUSHER-CONN-TEST_WS_API\")\n\t}\n\tpush_api := os.Getenv(\"GUSHER-CONN-TEST_PUSH_API\")\n\tif push_api == \"\" {\n\t\tlog.Fatal(\"empty env GUSHER-CONN-TEST_PUSH_API\")\n\t}\n\tlogin_msg := os.Getenv(\"GUSHER-CONN-TEST_LOGIN_MESSAGE\")\n\tif login_msg == \"\" {\n\t\tlog.Fatal(\"empty env GUSHER-CONN-TEST_LOGIN_MESSAGE\")\n\t}\n\tsub_msg := os.Getenv(\"GUSHER-CONN-TEST_SUBSCRIBE_MESSAGE\")\n\tif sub_msg == \"\" {\n\t\tlog.Fatal(\"empty env GUSHER-CONN-TEST_SUBSCRIBE_MESSAGE\")\n\t}\n\tsub_resp := os.Getenv(\"GUSHER-CONN-TEST_SUBSCRIBE_RESPONSE\")\n\tif sub_resp == \"\" {\n\t\tlog.Fatal(\"empty env GUSHER-CONN-TEST_SUBSCRIBE_RESPONSE\")\n\t}\n\tpush_msg := os.Getenv(\"GUSHER-CONN-TEST_PUSH_MESSAGE\")\n\tif push_msg == \"\" {\n\t\tlog.Fatal(\"empty env GUSHER-CONN-TEST_PUSH_MESSAGE\")\n\t}\n\tconnections := os.Getenv(\"GUSHER-CONN-TEST_CONNECTIONS\")\n\tif connections == \"\" {\n\t\tlog.Fatal(\"empty env GUSHER-CONN-TEST_CONNECTIONS\")\n\t}\n\tconn_total, err := strconv.Atoi(connections)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\twsurl, err := url.Parse(ws_api)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tpushurl, err := url.Parse(push_api)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\twsHeaders := http.Header{\n\t\t\"Origin\": {wsurl.String()},\n\t\t\"Sec-WebSocket-Extensions\": {\"permessage-deflate; client_max_window_bits, x-webkit-deflate-frame\"},\n\t}\n\tconns := make([]*websocket.Conn, 0)\n\tlistenStart := time.Now()\n\tfor i := 0; i < conn_total; i++ {\n\t\twg.Add(1)\n\t\trawConn, err := net.Dial(\"tcp\", wsurl.Host)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\twg.Done()\n\t\t\tcontinue\n\t\t}\n\n\t\tconn, _, err := websocket.NewClient(rawConn, wsurl, wsHeaders, 1024, 1024)\n\t\tif err != nil {\n\t\t\trawConn.Close()\n\t\t\twg.Done()\n\t\t\tlog.Fatal(err)\n\t\t\tcontinue\n\t\t}\n\t\terr = conn.WriteMessage(websocket.TextMessage, []byte(login_msg))\n\t\tif err != nil {\n\t\t\trawConn.Close()\n\t\t\tconn.Close()\n\t\t\twg.Done()\n\t\t\tlog.Warn(err)\n\t\t\tcontinue\n\t\t}\n\t\tconns = append(conns, conn)\n\t}\n\tfor i, conn := range conns {\n\t\tlisten_wg.Add(1)\n\t\tgo func(i int, conn *websocket.Conn) {\n\t\t\tfor {\n\t\t\t\t_, d, err := conn.ReadMessage()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t\tlisten_wg.Done()\n\t\t\t\t\twg.Done()\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif string(d) == sub_resp {\n\t\t\t\t\tlisten_wg.Done()\n\t\t\t\t}\n\t\t\t\tlog.Println(i, \" slave repsonse message\", string(d))\n\t\t\t\tdata, _ := jsonparser.GetString(d, \"data\")\n\t\t\t\tif data == push_msg {\n\t\t\t\t\twg.Done()\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}(i, conn)\n\t\terr = conn.WriteMessage(websocket.TextMessage, []byte(sub_msg))\n\t\tif err != nil {\n\t\t\tconn.Close()\n\t\t\tcontinue\n\t\t}\n\t}\n\n\tlisten_wg.Wait()\n\tlistenTime := time.Now().Sub(listenStart)\n\t\/\/push start\n\twork := requestwork.New(5)\n\tv := url.Values{}\n\n\tv.Add(\"data\", push_msg)\n\treq, err := http.NewRequest(\"POST\", pushurl.String(), bytes.NewBufferString(v.Encode()))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treq.Header.Add(\"Content-Type\", \"application\/x-www-form-urlencoded\")\n\treq.Header.Add(\"Content-Length\", strconv.Itoa(len(v.Encode())))\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar pushStart time.Time\n\tctx, _ := context.WithTimeout(context.Background(), 30*time.Second)\n\terr = work.Execute(ctx, req, func(resp *http.Response, e error) (err error) {\n\t\tif e != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tb, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Println(\"master response\", string(b))\n\t\tpushStart = time.Now()\n\t\treturn\n\t})\n\tlog.Println(\"Waiting...\")\n\twg.Wait()\n\tlog.Println(\"Sucess\")\n\tt := time.Now().Sub(pushStart)\n\tlog.Printf(\"All Client receive msg time:%s, All Client connect time:%s\", t, listenTime)\n\n\treturn\n}\n\nfunc main() {\n\tgusher := cli.NewApp()\n\tgusher.Name = name\n\tgusher.Version = version\n\tgusher.Commands = []cli.Command{\n\t\tcmdStart,\n\t}\n\tgusher.Compiled = time.Now()\n\tgusher.Run(os.Args)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport\t(\n\t\"os\"\n\t\"fmt\"\n\t\"crypto\/sha512\"\n\t\"encoding\/hex\"\n\t\"hash\"\n\t\"path\/filepath\" \n\t\"flag\"\n\t\"net\/http\" \n\t\"net\/url\"\n\t\"io\/ioutil\" \t\n\t\"log\"\n\t\"strconv\"\n)\n\ntype LMVFile struct {\n\thash string\n\tsize int64\n\tname string\n}\n\nfunc CalculateSHA512(str string) string {\n\n\tvar hasher hash.Hash = sha512.New()\n\n\thasher.Reset()\n\thasher.Write([]byte(str))\n\treturn hex.EncodeToString(hasher.Sum(nil))\n\n}\n\nfunc encode(fp string, token bool) {\n\n\tlmv_file := new(LMVFile)\n\n\tfile, err := os.Open(fp)\n\n if err != nil {\n \tlog.Fatal(err)\n }\n\n defer file.Close()\t\n\n stat, err := file.Stat()\n\n if err != nil {\n \tlog.Fatal(err)\n }\n\n bs := make([]byte, stat.Size())\n _, err = file.Read(bs)\n\n if err != nil {\n log.Fatal(err)\n }\n\n lmv_file.hash = CalculateSHA512(string(bs))\n lmv_file.size = stat.Size()\n lmv_file.name = filepath.Base(fp)\n\n\tif token {\n\n \tupload_address := \"http:\/\/127.0.0.1:8081\/upload\"\n\t\t\n\t\tfields := make(url.Values)\n fields.Set(\"name\", lmv_file.name)\n fields.Set(\"hash\", lmv_file.hash)\n fields.Set(\"size\", strconv.FormatInt(lmv_file.size, 10))\n \t\n \tresp, err := http.PostForm(upload_address, fields)\n\n \tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\t\n\t\tdefer resp.Body.Close()\n\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfmt.Println(\"'\" + lmv_file.name + \"'\" + \" --> \" + \"'\" + string(body) + \"'\")\n\t\n\t} else {\n\n\t\tos.Create(lmv_file.name + \".lmv\")\n\n\t\tfull := lmv_file.hash + \"\\n\" + lmv_file.name + \"\\n\" + strconv.FormatInt(lmv_file.size, 10)\n\n\t\terr = ioutil.WriteFile(lmv_file.name + \".lmv\", []byte(full), 0644)\n \t\n\t\tif err != nil { \n\t\t\tlog.Fatal(err)\n\t\t}\n\n }\n\n}\n\nfunc main() {\n\n\ttoken := flag.Bool(\"token\", false, \"Use tokens in place of .lmv files\")\n\n\tflag.Parse()\n\n\tif len(os.Args) < 2 {\n\n\t\tfmt.Println(\"Use lmv -h for usage\")\n\n\t} else {\n\n\t\tfor i := 0; i < len(os.Args[1:]); i++ {\n\t \n\t\t\tif _, err := os.Stat(os.Args[i+1]); err == nil {\n\n\t \t\tencode(os.Args[i+1], *token)\n\n\t\t\t}\n\n\t }\n\n\t}\n}<commit_msg>Pass []bytes to hashing function, don't convert to string<commit_after>package main\n\nimport\t(\n\t\"os\"\n\t\"fmt\"\n\t\"crypto\/sha512\"\n\t\"encoding\/hex\"\n\t\"hash\"\n\t\"path\/filepath\" \n\t\"flag\"\n\t\"net\/http\" \n\t\"net\/url\"\n\t\"io\/ioutil\" \t\n\t\"log\"\n\t\"strconv\"\n)\n\ntype LMVFile struct {\n\thash string\n\tsize int64\n\tname string\n}\n\nfunc CalculateSHA512(data []byte) string {\n\n\tvar hasher hash.Hash = sha512.New()\n\n\thasher.Reset()\n\thasher.Write(data)\n\treturn hex.EncodeToString(hasher.Sum(nil))\n\n}\n\nfunc encode(fp string, token bool) {\n\n\tlmv_file := new(LMVFile)\n\n\tfile, err := os.Open(fp)\n\n if err != nil {\n \tlog.Fatal(err)\n }\n\n defer file.Close()\t\n\n stat, err := file.Stat()\n\n if err != nil {\n \tlog.Fatal(err)\n }\n\n bs := make([]byte, stat.Size())\n _, err = file.Read(bs)\n\n if err != nil {\n log.Fatal(err)\n }\n\n lmv_file.hash = CalculateSHA512(bs)\n lmv_file.size = stat.Size()\n lmv_file.name = filepath.Base(fp)\n\n\tif token {\n\n \tupload_address := \"http:\/\/127.0.0.1:8081\/upload\"\n\t\t\n\t\tfields := make(url.Values)\n fields.Set(\"name\", lmv_file.name)\n fields.Set(\"hash\", lmv_file.hash)\n fields.Set(\"size\", strconv.FormatInt(lmv_file.size, 10))\n \t\n \tresp, err := http.PostForm(upload_address, fields)\n\n \tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\t\n\t\tdefer resp.Body.Close()\n\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tfmt.Println(\"'\" + lmv_file.name + \"'\" + \" --> \" + \"'\" + string(body) + \"'\")\n\t\n\t} else {\n\n\t\tos.Create(lmv_file.name + \".lmv\")\n\n\t\tfull := lmv_file.hash + \"\\n\" + lmv_file.name + \"\\n\" + strconv.FormatInt(lmv_file.size, 10)\n\n\t\terr = ioutil.WriteFile(lmv_file.name + \".lmv\", []byte(full), 0644)\n \t\n\t\tif err != nil { \n\t\t\tlog.Fatal(err)\n\t\t}\n\n }\n\n}\n\nfunc main() {\n\n\ttoken := flag.Bool(\"token\", false, \"Use tokens in place of .lmv files\")\n\n\tflag.Parse()\n\n\tif len(os.Args) < 2 {\n\n\t\tfmt.Println(\"Use lmv -h for usage\")\n\n\t} else {\n\n\t\tfor i := 0; i < len(os.Args[1:]); i++ {\n\t \n\t\t\tif _, err := os.Stat(os.Args[i+1]); err == nil {\n\n\t \t\tencode(os.Args[i+1], *token)\n\n\t\t\t}\n\n\t }\n\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Fixing err chan<commit_after><|endoftext|>"} {"text":"<commit_before>package weblog\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/gorilla\/websocket\"\n)\n\nvar upgrader = &websocket.Upgrader{ReadBufferSize: 1024, WriteBufferSize: 1024}\n\nfunc NewWebLogger() *WebLogger {\n\tww := NewWebWriter()\n\tl := log.New(ww, \"\", log.Lshortfile)\n\tgo ww.Run()\n\n\treturn &WebLogger{l, ww}\n}\n\ntype WebLogger struct {\n\t*log.Logger\n\n\twriter *WebWriter\n}\n\nfunc (wl *WebLogger) Handle(w http.ResponseWriter, r *http.Request) {\n\twl.writer.Handle(w, r)\n}\n\nfunc NewWebWriter() *WebWriter {\n\tww := &WebWriter{register: make(chan *connection),\n\t\tunregister: make(chan *connection),\n\t\tconnections: make(map[*connection]bool)}\n\treturn ww\n}\n\ntype WebWriter struct {\n\tregister chan *connection\n\tunregister chan *connection\n\tconnections map[*connection]bool\n}\n\nfunc (ww *WebWriter) Run() {\n\tfor {\n\t\tselect {\n\t\tcase c := <-ww.unregister:\n\t\t\tww.connections[c] = false\n\t\t\tdelete(ww.connections, c)\n\t\t\tc.ws.Close()\n\t\tcase c := <-ww.register:\n\t\t\tww.connections[c] = true\n\t\t\tfmt.Println(\"Just registered a client\")\n\t\t}\n\t}\n}\n\nfunc (ww *WebWriter) Write(p []byte) (int, error) {\n\tfor conn := range ww.connections {\n\t\tconn.send <- p\n\t}\n\n\treturn len(p), nil\n}\n\nfunc (ww *WebWriter) Handle(w http.ResponseWriter, r *http.Request) {\n\tws, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\tc := &connection{send: make(chan []byte, 256), ws: ws}\n\tww.register <- c\n\tdefer func() { ww.unregister <- c }()\n\n\tc.send <- []byte(\"Welcome to WebWriter!\")\n\tc.writer()\n}\n\ntype connection struct {\n\tsend chan []byte\n\tws *websocket.Conn\n}\n\nfunc (c connection) writer() {\n\tfor message := range c.send {\n\t\terr := c.ws.WriteMessage(websocket.TextMessage, message)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n}\n<commit_msg>log: WebWriter -> webWriter, minimising public API<commit_after>package weblog\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\n\t\"github.com\/gorilla\/websocket\"\n)\n\nvar upgrader = &websocket.Upgrader{ReadBufferSize: 1024, WriteBufferSize: 1024}\n\nfunc NewWebLogger() *WebLogger {\n\tww := &webWriter{register: make(chan *connection),\n\t\tunregister: make(chan *connection),\n\t\tconnections: make(map[*connection]bool)}\n\n\tl := log.New(ww, \"\", log.Lshortfile)\n\tgo ww.Run()\n\n\treturn &WebLogger{l, ww}\n}\n\ntype WebLogger struct {\n\t*log.Logger\n\n\twriter *webWriter\n}\n\nfunc (wl *WebLogger) Handle(w http.ResponseWriter, r *http.Request) {\n\twl.writer.Handle(w, r)\n}\n\ntype webWriter struct {\n\tregister chan *connection\n\tunregister chan *connection\n\tconnections map[*connection]bool\n}\n\nfunc (ww *webWriter) Run() {\n\tfor {\n\t\tselect {\n\t\tcase c := <-ww.unregister:\n\t\t\tww.connections[c] = false\n\t\t\tdelete(ww.connections, c)\n\t\t\tc.ws.Close()\n\t\tcase c := <-ww.register:\n\t\t\tww.connections[c] = true\n\t\t\tfmt.Println(\"Just registered a client\")\n\t\t}\n\t}\n}\n\nfunc (ww *webWriter) Write(p []byte) (int, error) {\n\tfor conn := range ww.connections {\n\t\tconn.send <- p\n\t}\n\n\treturn len(p), nil\n}\n\nfunc (ww *webWriter) Handle(w http.ResponseWriter, r *http.Request) {\n\tws, err := upgrader.Upgrade(w, r, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\tc := &connection{send: make(chan []byte, 256), ws: ws}\n\tww.register <- c\n\tdefer func() { ww.unregister <- c }()\n\n\tc.send <- []byte(\"Welcome to the weblog!\")\n\tc.writer()\n}\n\ntype connection struct {\n\tsend chan []byte\n\tws *websocket.Conn\n}\n\nfunc (c connection) writer() {\n\tfor message := range c.send {\n\t\terr := c.ws.WriteMessage(websocket.TextMessage, message)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package forestdb\n\nimport \"C\"\nimport \"unsafe\"\n\n\/\/export LogCallbackInternal\nfunc LogCallbackInternal(errCode C.int, msg *C.char, ctx *C.char) {\n\tcontext := (*logContext)(unsafe.Pointer(ctx))\n\t(*context.callback)(context.name, int(errCode), C.GoString(msg), context.userCtx)\n}\n\n\/\/export FatalErrorCallbackInternal\nfunc FatalErrorCallbackInternal() {\n\tfatalErrorCallback()\n}\n\n\/\/ Logger interface\ntype Logger interface {\n\t\/\/ Warnings, logged by default.\n\tWarnf(format string, v ...interface{})\n\t\/\/ Errors, logged by default.\n\tErrorf(format string, v ...interface{})\n\t\/\/ Fatal errors. Will not terminate execution.\n\tFatalf(format string, v ...interface{})\n\t\/\/ Informational messages.\n\tInfof(format string, v ...interface{})\n\t\/\/ Timing utility\n\tDebugf(format string, v ...interface{})\n\t\/\/ Program execution tracing. Not logged by default\n\tTracef(format string, v ...interface{})\n}\n\ntype Dummy struct {\n}\n\nfunc (*Dummy) Fatalf(_ string, _ ...interface{}) {\n}\n\nfunc (*Dummy) Errorf(_ string, _ ...interface{}) {\n}\n\nfunc (*Dummy) Warnf(_ string, _ ...interface{}) {\n}\n\nfunc (*Dummy) Infof(_ string, _ ...interface{}) {\n}\n\nfunc (*Dummy) Debugf(_ string, _ ...interface{}) {\n}\n\nfunc (*Dummy) Tracef(_ string, _ ...interface{}) {\n}\n\n\/\/ Logger to use\nvar Log Logger = &Dummy{}\n<commit_msg>add implementation of leveled logger<commit_after>package forestdb\n\nimport \"C\"\nimport (\n\t\"log\"\n\t\"unsafe\"\n)\n\n\/\/export LogCallbackInternal\nfunc LogCallbackInternal(errCode C.int, msg *C.char, ctx *C.char) {\n\tcontext := (*logContext)(unsafe.Pointer(ctx))\n\t(*context.callback)(context.name, int(errCode), C.GoString(msg), context.userCtx)\n}\n\n\/\/export FatalErrorCallbackInternal\nfunc FatalErrorCallbackInternal() {\n\tfatalErrorCallback()\n}\n\n\/\/ Logger interface\ntype Logger interface {\n\t\/\/ Warnings, logged by default.\n\tWarnf(format string, v ...interface{})\n\t\/\/ Errors, logged by default.\n\tErrorf(format string, v ...interface{})\n\t\/\/ Fatal errors. Will not terminate execution.\n\tFatalf(format string, v ...interface{})\n\t\/\/ Informational messages.\n\tInfof(format string, v ...interface{})\n\t\/\/ Timing utility\n\tDebugf(format string, v ...interface{})\n\t\/\/ Program execution tracing. Not logged by default\n\tTracef(format string, v ...interface{})\n}\n\ntype Dummy struct {\n}\n\nfunc (*Dummy) Fatalf(_ string, _ ...interface{}) {\n}\n\nfunc (*Dummy) Errorf(_ string, _ ...interface{}) {\n}\n\nfunc (*Dummy) Warnf(_ string, _ ...interface{}) {\n}\n\nfunc (*Dummy) Infof(_ string, _ ...interface{}) {\n}\n\nfunc (*Dummy) Debugf(_ string, _ ...interface{}) {\n}\n\nfunc (*Dummy) Tracef(_ string, _ ...interface{}) {\n}\n\ntype LogLevel int\n\nconst (\n\tLogFatal LogLevel = iota\n\tLogError\n\tLogWarn\n\tLogInfo\n\tLogDebug\n\tLogTrace\n)\n\ntype LeveledLog struct {\n\tlevel LogLevel\n}\n\nfunc NewLeveledLog(level LogLevel) *LeveledLog {\n\treturn &LeveledLog{level: level}\n}\n\nfunc (l *LeveledLog) Fatalf(format string, a ...interface{}) {\n\tif l.level >= LogFatal {\n\t\tlog.Fatalf(format, a...)\n\t}\n}\n\nfunc (l *LeveledLog) Errorf(format string, a ...interface{}) {\n\tif l.level >= LogError {\n\t\tlog.Printf(format, a...)\n\t}\n}\n\nfunc (l *LeveledLog) Warnf(format string, a ...interface{}) {\n\tif l.level >= LogWarn {\n\t\tlog.Printf(format, a...)\n\t}\n}\n\nfunc (l *LeveledLog) Infof(format string, a ...interface{}) {\n\tif l.level >= LogInfo {\n\t\tlog.Printf(format, a...)\n\t}\n}\n\nfunc (l *LeveledLog) Debugf(format string, a ...interface{}) {\n\tif l.level >= LogDebug {\n\t\tlog.Printf(format, a...)\n\t}\n}\n\nfunc (l *LeveledLog) Tracef(format string, a ...interface{}) {\n\tif l.level >= LogTrace {\n\t\tlog.Printf(format, a...)\n\t}\n}\n\n\/\/ Logger to use\nvar Log Logger = &Dummy{}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012-2018, Rolf Veen and contributors.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage ogdl\n\nimport \"os\"\n\n\/\/ Log is a log store for binary OGDL objects.\n\/\/\n\/\/ All objects are appended to a file, and a position is returned.\n\/\/\ntype Log struct {\n\tf *os.File\n\tautoSync bool\n}\n\n\/\/ OpenLog opens a log file. If the file doesn't exist, it is created.\nfunc OpenLog(file string) (*Log, error) {\n\n\tf, err := os.OpenFile(file, os.O_RDWR|os.O_CREATE, 0666)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog := Log{f, true}\n\n\treturn &log, nil\n}\n\n\/\/ Close closes a log file\nfunc (log *Log) Close() {\n\tlog.f.Close()\n}\n\n\/\/ Sync commits the changes to disk (the exact behavior is OS dependent).\nfunc (log *Log) Sync() {\n\tlog.f.Sync()\n}\n\n\/\/ Add adds an OGDL object to the log. The starting position into the log\n\/\/ is returned.\nfunc (log *Log) Add(g *Graph) int64 {\n\n\tb := g.Binary()\n\n\tif b == nil {\n\t\treturn 0\n\t}\n\n\ti, _ := log.f.Seek(0, 2)\n\n\tlog.f.Write(b)\n\n\tif log.autoSync {\n\t\tlog.f.Sync()\n\t}\n\n\treturn i\n}\n\n\/\/ AddBinary adds an OGDL binary object to the log. The starting position into\n\/\/ the log is returned.\nfunc (log *Log) AddBinary(b []byte) int64 {\n\n\ti, _ := log.f.Seek(0, 2)\n\tlog.f.Write(b)\n\n\tif log.autoSync {\n\t\tlog.f.Sync()\n\t}\n\n\treturn i\n}\n\n\/\/ Get returns the OGDL object at the position given and the position of the\n\/\/ next object, or an error.\nfunc (log *Log) Get(i int64) (*Graph, int64, error) {\n\n\t\/* Position in file *\/\n\t_, err := log.f.Seek(i, 0)\n\tif err != nil {\n\t\treturn nil, -1, err\n\t}\n\n\tp := newBinParser(log.f)\n\tg := p.parse()\n\n\tif p.n == 0 {\n\t\treturn g, -1, nil\n\t}\n\n\treturn g, i + int64(p.n), err\n}\n\n\/\/ GetBinary returns the OGDL object at the position given and the position of the\n\/\/ next object, or an error. The object returned is in binary form, exactly\n\/\/ as it is stored in the log.\nfunc (log *Log) GetBinary(i int64) ([]byte, int64, error) {\n\n\t\/\/ Position in file\n\t_, err := log.f.Seek(i, 0)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\t\/* Read until EOS of binary OGDL.\n\n\t There should be a Header first.\n\t*\/\n\tp := newBinParser(log.f)\n\n\tif !p.header() {\n\t\treturn nil, 0, err\n\t}\n\tfor {\n\t\tlev, _, _ \/* typ, b*\/ := p.line(false)\n\t\tif lev == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tn := p.n\n\n\t\/\/ Read bytes\n\tb := make([]byte, n)\n\t_, err = log.f.ReadAt(b, i)\n\n\treturn b, int64(n), err\n}\n<commit_msg>add SetSync<commit_after>\/\/ Copyright 2012-2018, Rolf Veen and contributors.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage ogdl\n\nimport \"os\"\n\n\/\/ Log is a log store for binary OGDL objects.\n\/\/\n\/\/ All objects are appended to a file, and a position is returned.\n\/\/\ntype Log struct {\n\tf *os.File\n\tautoSync bool\n}\n\n\/\/ OpenLog opens a log file. If the file doesn't exist, it is created.\nfunc OpenLog(file string) (*Log, error) {\n\n\tf, err := os.OpenFile(file, os.O_RDWR|os.O_CREATE, 0666)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlog := Log{f, true}\n\n\treturn &log, nil\n}\n\n\/\/ Close closes a log file\nfunc (log *Log) Close() {\n\tlog.f.Close()\n}\n\n\/\/ Sync commits the changes to disk (the exact behavior is OS dependent).\nfunc (log *Log) Sync() {\n\tlog.f.Sync()\n}\n\n\/\/ Sync commits the changes to disk (the exact behavior is OS dependent).\nfunc (log *Log) SetSync(sync bool) {\n\tlog.autoSync = sync\n}\n\n\/\/ Add adds an OGDL object to the log. The starting position into the log\n\/\/ is returned.\nfunc (log *Log) Add(g *Graph) int64 {\n\n\tb := g.Binary()\n\n\tif b == nil {\n\t\treturn 0\n\t}\n\n\ti, _ := log.f.Seek(0, 2)\n\n\tlog.f.Write(b)\n\n\tif log.autoSync {\n\t\tlog.f.Sync()\n\t}\n\n\treturn i\n}\n\n\/\/ AddBinary adds an OGDL binary object to the log. The starting position into\n\/\/ the log is returned.\nfunc (log *Log) AddBinary(b []byte) int64 {\n\n\ti, _ := log.f.Seek(0, 2)\n\tlog.f.Write(b)\n\n\tif log.autoSync {\n\t\tlog.f.Sync()\n\t}\n\n\treturn i\n}\n\n\/\/ Get returns the OGDL object at the position given and the position of the\n\/\/ next object, or an error.\nfunc (log *Log) Get(i int64) (*Graph, int64, error) {\n\n\t\/* Position in file *\/\n\t_, err := log.f.Seek(i, 0)\n\tif err != nil {\n\t\treturn nil, -1, err\n\t}\n\n\tp := newBinParser(log.f)\n\tg := p.parse()\n\n\tif p.n == 0 {\n\t\treturn g, -1, nil\n\t}\n\n\treturn g, i + int64(p.n), err\n}\n\n\/\/ GetBinary returns the OGDL object at the position given and the position of the\n\/\/ next object, or an error. The object returned is in binary form, exactly\n\/\/ as it is stored in the log.\nfunc (log *Log) GetBinary(i int64) ([]byte, int64, error) {\n\n\t\/\/ Position in file\n\t_, err := log.f.Seek(i, 0)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\t\/* Read until EOS of binary OGDL.\n\n\t There should be a Header first.\n\t*\/\n\tp := newBinParser(log.f)\n\n\tif !p.header() {\n\t\treturn nil, 0, err\n\t}\n\tfor {\n\t\tlev, _, _ \/* typ, b*\/ := p.line(false)\n\t\tif lev == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tn := p.n\n\n\t\/\/ Read bytes\n\tb := make([]byte, n)\n\t_, err = log.f.ReadAt(b, i)\n\n\treturn b, int64(n), err\n}\n<|endoftext|>"} {"text":"<commit_before>package raft\n\n\/\/ A state machine command.\n\/\/ The Raft consensus module does not care about an actual type of a Command.\n\/\/ It is up to you to make sure that the other components that the consensus\n\/\/ module talks to agree on the actual type.\ntype Command interface{}\n\n\/\/ An entry in the Raft Log\ntype LogEntry struct {\n\tTermNo\n\tCommand\n}\n\n\/\/ Log entry index. First index is 1.\ntype LogIndex uint64\n\n\/\/ The Raft Log\n\/\/ An ordered array of `LogEntry`s with first index 1.\ntype Log interface {\n\t\/\/ An index of 0 indicates no entries present.\n\tgetIndexOfLastEntry() LogIndex\n\n\t\/\/ TODO: return values for invalid params or log errors\n\tgetLogEntryAtIndex(LogIndex) LogEntry\n\n\t\/\/ Get the term of the entry at the given index.\n\t\/\/ Equivalent to getLogEntryAtIndex(...).TermNo but this call allows\n\t\/\/ the Log implementation to not fetch the Command if that's a useful\n\t\/\/ optimization.\n\t\/\/ TODO: return values for invalid params or log errors\n\t\/\/ TODO: 0 for 0 and simplify callers and tests\n\tgetTermAtIndex(LogIndex) TermNo\n\n\t\/\/ Set the entries after the given index.\n\t\/\/\n\t\/\/ Theoretically, the Log can just delete all existing entries\n\t\/\/ following the given index and then append the given new\n\t\/\/ entries after that index.\n\t\/\/\n\t\/\/ However, Raft properties means that the Log can use this logic:\n\t\/\/ - (AppendEntries receiver step 3.) If an existing entry conflicts with\n\t\/\/ a new one (same index but different terms), delete the existing entry\n\t\/\/ and all that follow it (#5.3)\n\t\/\/ - (AppendEntries receiver step 4.) Append any new entries not already\n\t\/\/ in the log\n\t\/\/\n\t\/\/ I.e. the Log can choose to set only the entries starting from\n\t\/\/ the first index where the terms of the existing entry and the new\n\t\/\/ entry don't match.\n\t\/\/\n\t\/\/ Note that an index of 0 is valid and implies deleting all entries.\n\t\/\/ A zero length slice and nil both indicate no new entries to be added\n\t\/\/ after deleting.\n\t\/\/\n\t\/\/ This method is expected to always succeed. All errors should be indicated\n\t\/\/ by panicking, and this will shutdown the consensus module. This includes\n\t\/\/ both invalid parameters from the caller and internal errors in the Log.\n\tsetEntriesAfterIndex(LogIndex, []LogEntry)\n}\n\n\/\/ Helper method\nfunc getIndexAndTermOfLastEntry(log Log) (LogIndex, TermNo) {\n\tlastLogIndex := log.getIndexOfLastEntry()\n\tvar lastLogTerm TermNo = 0\n\tif lastLogIndex > 0 {\n\t\tlastLogTerm = log.getTermAtIndex(lastLogIndex)\n\t}\n\treturn lastLogIndex, lastLogTerm\n}\n<commit_msg>Edit Log comments on error handling and invalid values<commit_after>package raft\n\n\/\/ A state machine command.\n\/\/ The Raft consensus module does not care about an actual type of a Command.\n\/\/ It is up to you to make sure that the other components that the consensus\n\/\/ module talks to agree on the actual type.\ntype Command interface{}\n\n\/\/ An entry in the Raft Log\ntype LogEntry struct {\n\tTermNo\n\tCommand\n}\n\n\/\/ Log entry index. First index is 1.\ntype LogIndex uint64\n\n\/\/ The Raft Log\n\/\/ An ordered array of `LogEntry`s with first index 1.\n\/\/\n\/\/ All errors should be indicated using panic(). This includes both invalid\n\/\/ parameters sent by the consensus module and internal errors in the Log.\n\/\/ Note that such a panic will shutdown the consensus module.\n\/\/\ntype Log interface {\n\t\/\/ An index of 0 indicates no entries present.\n\tgetIndexOfLastEntry() LogIndex\n\n\t\/\/ An index of 0 is invalid for this call.\n\tgetLogEntryAtIndex(LogIndex) LogEntry\n\n\t\/\/ Get the term of the entry at the given index.\n\t\/\/ Equivalent to getLogEntryAtIndex(...).TermNo but this call allows\n\t\/\/ the Log implementation to not fetch the Command if that's a useful\n\t\/\/ optimization.\n\t\/\/ An index of 0 is invalid for this call.\n\tgetTermAtIndex(LogIndex) TermNo\n\n\t\/\/ Set the entries after the given index.\n\t\/\/\n\t\/\/ Theoretically, the Log can just delete all existing entries\n\t\/\/ following the given index and then append the given new\n\t\/\/ entries after that index.\n\t\/\/\n\t\/\/ However, Raft properties means that the Log can use this logic:\n\t\/\/ - (AppendEntries receiver step 3.) If an existing entry conflicts with\n\t\/\/ a new one (same index but different terms), delete the existing entry\n\t\/\/ and all that follow it (#5.3)\n\t\/\/ - (AppendEntries receiver step 4.) Append any new entries not already\n\t\/\/ in the log\n\t\/\/\n\t\/\/ I.e. the Log can choose to set only the entries starting from\n\t\/\/ the first index where the terms of the existing entry and the new\n\t\/\/ entry don't match.\n\t\/\/\n\t\/\/ An index of 0 is valid and implies deleting all entries.\n\t\/\/\n\t\/\/ A zero length slice and nil both indicate no new entries to be added\n\t\/\/ after deleting.\n\tsetEntriesAfterIndex(LogIndex, []LogEntry)\n}\n\n\/\/ Helper method\nfunc getIndexAndTermOfLastEntry(log Log) (LogIndex, TermNo) {\n\tlastLogIndex := log.getIndexOfLastEntry()\n\tvar lastLogTerm TermNo = 0\n\tif lastLogIndex > 0 {\n\t\tlastLogTerm = log.getTermAtIndex(lastLogIndex)\n\t}\n\treturn lastLogIndex, lastLogTerm\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/csv\"\n\t\"fmt\"\n\t\"log\"\n\t\"mime\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ recording pages filtered to access log\n\nvar (\n\taccessLog CSVLog\n\ttlsLog CSVLog\n)\n\ntype CSVLog struct {\n\tlock sync.Mutex\n\tfile *os.File\n\tcsv *csv.Writer\n}\n\nfunc (l *CSVLog) Open(filename string) {\n\tl.lock.Lock()\n\tdefer l.lock.Unlock()\n\tif l.file != nil && l.file != os.Stdout {\n\t\tl.file.Close()\n\t\tl.file = nil\n\t}\n\n\tif filename != \"\" {\n\t\tlogfile, err := os.OpenFile(filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Could not open log file (%s): %s\\n Sending access log messages to standard output instead.\", filename, err)\n\t\t} else {\n\t\t\tl.file = logfile\n\t\t}\n\t}\n\tif l.file == nil {\n\t\tl.file = os.Stdout\n\t}\n\n\tl.csv = csv.NewWriter(l.file)\n}\n\nfunc (l *CSVLog) Log(data []string) {\n\tl.lock.Lock()\n\tdefer l.lock.Unlock()\n\tl.csv.Write(data)\n\tl.csv.Flush()\n}\n\nfunc logAccess(req *http.Request, resp *http.Response, contentLength int, pruned bool, user string, tally map[rule]int, scores map[string]int, rule ACLActionRule, title string, ignored []string) []string {\n\tconf := getConfig()\n\n\tmodified := \"\"\n\tif pruned {\n\t\tmodified = \"pruned\"\n\t}\n\n\tstatus := 0\n\tif resp != nil {\n\t\tstatus = resp.StatusCode\n\t}\n\n\tif rule.Action == \"\" {\n\t\trule.Action = \"allow\"\n\t}\n\n\tvar contentType string\n\tif resp != nil {\n\t\tcontentType = resp.Header.Get(\"Content-Type\")\n\t}\n\tif ct2, _, err := mime.ParseMediaType(contentType); err == nil {\n\t\tcontentType = ct2\n\t}\n\n\tvar userAgent string\n\tif conf.LogUserAgent {\n\t\tuserAgent = req.Header.Get(\"User-Agent\")\n\t}\n\n\tlogLine := toStrings(time.Now().Format(\"2006-01-02 15:04:05.000\"), user, rule.Action, req.URL, req.Method, status, contentType, contentLength, modified, listTally(stringTally(tally)), listTally(scores), rule.Conditions(), title, strings.Join(ignored, \",\"), userAgent, req.Proto, req.Referer(), platform(req.Header.Get(\"User-Agent\")))\n\n\taccessLog.Log(logLine)\n\treturn logLine\n}\n\nfunc logTLS(user, serverAddr, serverName string, err error, cachedCert bool) {\n\terrStr := \"\"\n\tif err != nil {\n\t\terrStr = err.Error()\n\t}\n\n\tcached := \"\"\n\tif cachedCert {\n\t\tcached = \"cached certificate\"\n\t}\n\n\ttlsLog.Log(toStrings(time.Now().Format(\"2006-01-02 15:04:05.000\"), user, serverName, serverAddr, errStr, cached))\n}\n\n\/\/ toStrings converts its arguments into a slice of strings.\nfunc toStrings(a ...interface{}) []string {\n\tresult := make([]string, len(a))\n\tfor i, x := range a {\n\t\tresult[i] = fmt.Sprint(x)\n\t}\n\treturn result\n}\n\n\/\/ stringTally returns a copy of tally with strings instead of rules as keys.\nfunc stringTally(tally map[rule]int) map[string]int {\n\tst := make(map[string]int)\n\tfor r, n := range tally {\n\t\tst[r.String()] = n\n\t}\n\treturn st\n}\n\n\/\/ listTally sorts the tally and formats it as a comma-separated string.\nfunc listTally(tally map[string]int) string {\n\tb := new(bytes.Buffer)\n\tfor i, rule := range sortedKeys(tally) {\n\t\tif i > 0 {\n\t\t\tb.WriteString(\", \")\n\t\t}\n\t\tfmt.Fprint(b, rule, \" \", tally[rule])\n\t}\n\treturn b.String()\n}\n<commit_msg>Switch to microseconds instead<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"encoding\/csv\"\n\t\"fmt\"\n\t\"log\"\n\t\"mime\"\n\t\"net\/http\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ recording pages filtered to access log\n\nvar (\n\taccessLog CSVLog\n\ttlsLog CSVLog\n)\n\ntype CSVLog struct {\n\tlock sync.Mutex\n\tfile *os.File\n\tcsv *csv.Writer\n}\n\nfunc (l *CSVLog) Open(filename string) {\n\tl.lock.Lock()\n\tdefer l.lock.Unlock()\n\tif l.file != nil && l.file != os.Stdout {\n\t\tl.file.Close()\n\t\tl.file = nil\n\t}\n\n\tif filename != \"\" {\n\t\tlogfile, err := os.OpenFile(filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Could not open log file (%s): %s\\n Sending access log messages to standard output instead.\", filename, err)\n\t\t} else {\n\t\t\tl.file = logfile\n\t\t}\n\t}\n\tif l.file == nil {\n\t\tl.file = os.Stdout\n\t}\n\n\tl.csv = csv.NewWriter(l.file)\n}\n\nfunc (l *CSVLog) Log(data []string) {\n\tl.lock.Lock()\n\tdefer l.lock.Unlock()\n\tl.csv.Write(data)\n\tl.csv.Flush()\n}\n\nfunc logAccess(req *http.Request, resp *http.Response, contentLength int, pruned bool, user string, tally map[rule]int, scores map[string]int, rule ACLActionRule, title string, ignored []string) []string {\n\tconf := getConfig()\n\n\tmodified := \"\"\n\tif pruned {\n\t\tmodified = \"pruned\"\n\t}\n\n\tstatus := 0\n\tif resp != nil {\n\t\tstatus = resp.StatusCode\n\t}\n\n\tif rule.Action == \"\" {\n\t\trule.Action = \"allow\"\n\t}\n\n\tvar contentType string\n\tif resp != nil {\n\t\tcontentType = resp.Header.Get(\"Content-Type\")\n\t}\n\tif ct2, _, err := mime.ParseMediaType(contentType); err == nil {\n\t\tcontentType = ct2\n\t}\n\n\tvar userAgent string\n\tif conf.LogUserAgent {\n\t\tuserAgent = req.Header.Get(\"User-Agent\")\n\t}\n\n\tlogLine := toStrings(time.Now().Format(\"2006-01-02 15:04:05.000000\"), user, rule.Action, req.URL, req.Method, status, contentType, contentLength, modified, listTally(stringTally(tally)), listTally(scores), rule.Conditions(), title, strings.Join(ignored, \",\"), userAgent, req.Proto, req.Referer(), platform(req.Header.Get(\"User-Agent\")))\n\n\taccessLog.Log(logLine)\n\treturn logLine\n}\n\nfunc logTLS(user, serverAddr, serverName string, err error, cachedCert bool) {\n\terrStr := \"\"\n\tif err != nil {\n\t\terrStr = err.Error()\n\t}\n\n\tcached := \"\"\n\tif cachedCert {\n\t\tcached = \"cached certificate\"\n\t}\n\n\ttlsLog.Log(toStrings(time.Now().Format(\"2006-01-02 15:04:05.000000\"), user, serverName, serverAddr, errStr, cached))\n}\n\n\/\/ toStrings converts its arguments into a slice of strings.\nfunc toStrings(a ...interface{}) []string {\n\tresult := make([]string, len(a))\n\tfor i, x := range a {\n\t\tresult[i] = fmt.Sprint(x)\n\t}\n\treturn result\n}\n\n\/\/ stringTally returns a copy of tally with strings instead of rules as keys.\nfunc stringTally(tally map[rule]int) map[string]int {\n\tst := make(map[string]int)\n\tfor r, n := range tally {\n\t\tst[r.String()] = n\n\t}\n\treturn st\n}\n\n\/\/ listTally sorts the tally and formats it as a comma-separated string.\nfunc listTally(tally map[string]int) string {\n\tb := new(bytes.Buffer)\n\tfor i, rule := range sortedKeys(tally) {\n\t\tif i > 0 {\n\t\t\tb.WriteString(\", \")\n\t\t}\n\t\tfmt.Fprint(b, rule, \" \", tally[rule])\n\t}\n\treturn b.String()\n}\n<|endoftext|>"} {"text":"<commit_before>package lnd\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/btcsuite\/btcd\/connmgr\"\n\t\"github.com\/btcsuite\/btclog\"\n\t\"github.com\/jrick\/logrotate\/rotator\"\n\t\"github.com\/lightninglabs\/neutrino\"\n\tsphinx \"github.com\/lightningnetwork\/lightning-onion\"\n\t\"github.com\/lightningnetwork\/lnd\/autopilot\"\n\t\"github.com\/lightningnetwork\/lnd\/build\"\n\t\"github.com\/lightningnetwork\/lnd\/chainntnfs\"\n\t\"github.com\/lightningnetwork\/lnd\/chanbackup\"\n\t\"github.com\/lightningnetwork\/lnd\/channeldb\"\n\t\"github.com\/lightningnetwork\/lnd\/channelnotifier\"\n\t\"github.com\/lightningnetwork\/lnd\/contractcourt\"\n\t\"github.com\/lightningnetwork\/lnd\/discovery\"\n\t\"github.com\/lightningnetwork\/lnd\/htlcswitch\"\n\t\"github.com\/lightningnetwork\/lnd\/invoices\"\n\t\"github.com\/lightningnetwork\/lnd\/lnrpc\/autopilotrpc\"\n\t\"github.com\/lightningnetwork\/lnd\/lnrpc\/chainrpc\"\n\t\"github.com\/lightningnetwork\/lnd\/lnrpc\/invoicesrpc\"\n\t\"github.com\/lightningnetwork\/lnd\/lnrpc\/routerrpc\"\n\t\"github.com\/lightningnetwork\/lnd\/lnrpc\/signrpc\"\n\t\"github.com\/lightningnetwork\/lnd\/lnrpc\/walletrpc\"\n\t\"github.com\/lightningnetwork\/lnd\/lnrpc\/wtclientrpc\"\n\t\"github.com\/lightningnetwork\/lnd\/lnwallet\"\n\t\"github.com\/lightningnetwork\/lnd\/monitoring\"\n\t\"github.com\/lightningnetwork\/lnd\/netann\"\n\t\"github.com\/lightningnetwork\/lnd\/routing\"\n\t\"github.com\/lightningnetwork\/lnd\/signal\"\n\t\"github.com\/lightningnetwork\/lnd\/sweep\"\n\t\"github.com\/lightningnetwork\/lnd\/watchtower\"\n\t\"github.com\/lightningnetwork\/lnd\/watchtower\/wtclient\"\n)\n\n\/\/ Loggers per subsystem. A single backend logger is created and all subsystem\n\/\/ loggers created from it will write to the backend. When adding new\n\/\/ subsystems, add the subsystem logger variable here and to the\n\/\/ subsystemLoggers map.\n\/\/\n\/\/ Loggers can not be used before the log rotator has been initialized with a\n\/\/ log file. This must be performed early during application startup by\n\/\/ calling initLogRotator.\nvar (\n\tlogWriter = &build.LogWriter{}\n\n\t\/\/ backendLog is the logging backend used to create all subsystem\n\t\/\/ loggers. The backend must not be used before the log rotator has\n\t\/\/ been initialized, or data races and\/or nil pointer dereferences will\n\t\/\/ occur.\n\tbackendLog = btclog.NewBackend(logWriter)\n\n\t\/\/ logRotator is one of the logging outputs. It should be closed on\n\t\/\/ application shutdown.\n\tlogRotator *rotator.Rotator\n\n\tltndLog = build.NewSubLogger(\"LTND\", backendLog.Logger)\n\tlnwlLog = build.NewSubLogger(\"LNWL\", backendLog.Logger)\n\tpeerLog = build.NewSubLogger(\"PEER\", backendLog.Logger)\n\tdiscLog = build.NewSubLogger(\"DISC\", backendLog.Logger)\n\trpcsLog = build.NewSubLogger(\"RPCS\", backendLog.Logger)\n\tsrvrLog = build.NewSubLogger(\"SRVR\", backendLog.Logger)\n\tntfnLog = build.NewSubLogger(\"NTFN\", backendLog.Logger)\n\tchdbLog = build.NewSubLogger(\"CHDB\", backendLog.Logger)\n\tfndgLog = build.NewSubLogger(\"FNDG\", backendLog.Logger)\n\thswcLog = build.NewSubLogger(\"HSWC\", backendLog.Logger)\n\tutxnLog = build.NewSubLogger(\"UTXN\", backendLog.Logger)\n\tbrarLog = build.NewSubLogger(\"BRAR\", backendLog.Logger)\n\tcmgrLog = build.NewSubLogger(\"CMGR\", backendLog.Logger)\n\tcrtrLog = build.NewSubLogger(\"CRTR\", backendLog.Logger)\n\tbtcnLog = build.NewSubLogger(\"BTCN\", backendLog.Logger)\n\tatplLog = build.NewSubLogger(\"ATPL\", backendLog.Logger)\n\tcnctLog = build.NewSubLogger(\"CNCT\", backendLog.Logger)\n\tsphxLog = build.NewSubLogger(\"SPHX\", backendLog.Logger)\n\tswprLog = build.NewSubLogger(\"SWPR\", backendLog.Logger)\n\tsgnrLog = build.NewSubLogger(\"SGNR\", backendLog.Logger)\n\twlktLog = build.NewSubLogger(\"WLKT\", backendLog.Logger)\n\tarpcLog = build.NewSubLogger(\"ARPC\", backendLog.Logger)\n\tinvcLog = build.NewSubLogger(\"INVC\", backendLog.Logger)\n\tnannLog = build.NewSubLogger(\"NANN\", backendLog.Logger)\n\twtwrLog = build.NewSubLogger(\"WTWR\", backendLog.Logger)\n\tntfrLog = build.NewSubLogger(\"NTFR\", backendLog.Logger)\n\tirpcLog = build.NewSubLogger(\"IRPC\", backendLog.Logger)\n\tchnfLog = build.NewSubLogger(\"CHNF\", backendLog.Logger)\n\tchbuLog = build.NewSubLogger(\"CHBU\", backendLog.Logger)\n\tpromLog = build.NewSubLogger(\"PROM\", backendLog.Logger)\n\twtclLog = build.NewSubLogger(\"WTCL\", backendLog.Logger)\n)\n\n\/\/ Initialize package-global logger variables.\nfunc init() {\n\tlnwallet.UseLogger(lnwlLog)\n\tdiscovery.UseLogger(discLog)\n\tchainntnfs.UseLogger(ntfnLog)\n\tchanneldb.UseLogger(chdbLog)\n\thtlcswitch.UseLogger(hswcLog)\n\tconnmgr.UseLogger(cmgrLog)\n\trouting.UseLogger(crtrLog)\n\tneutrino.UseLogger(btcnLog)\n\tautopilot.UseLogger(atplLog)\n\tcontractcourt.UseLogger(cnctLog)\n\tsphinx.UseLogger(sphxLog)\n\tsignal.UseLogger(ltndLog)\n\tsweep.UseLogger(swprLog)\n\tsignrpc.UseLogger(sgnrLog)\n\twalletrpc.UseLogger(wlktLog)\n\tautopilotrpc.UseLogger(arpcLog)\n\tinvoices.UseLogger(invcLog)\n\tnetann.UseLogger(nannLog)\n\twatchtower.UseLogger(wtwrLog)\n\tchainrpc.UseLogger(ntfrLog)\n\tinvoicesrpc.UseLogger(irpcLog)\n\tchannelnotifier.UseLogger(chnfLog)\n\tchanbackup.UseLogger(chbuLog)\n\tmonitoring.UseLogger(promLog)\n\twtclient.UseLogger(wtclLog)\n\n\taddSubLogger(routerrpc.Subsystem, routerrpc.UseLogger)\n\taddSubLogger(wtclientrpc.Subsystem, wtclientrpc.UseLogger)\n}\n\n\/\/ addSubLogger is a helper method to conveniently register the logger of a sub\n\/\/ system.\nfunc addSubLogger(subsystem string, useLogger func(btclog.Logger)) {\n\tlogger := build.NewSubLogger(subsystem, backendLog.Logger)\n\tuseLogger(logger)\n\tsubsystemLoggers[subsystem] = logger\n}\n\n\/\/ subsystemLoggers maps each subsystem identifier to its associated logger.\nvar subsystemLoggers = map[string]btclog.Logger{\n\t\"LTND\": ltndLog,\n\t\"LNWL\": lnwlLog,\n\t\"PEER\": peerLog,\n\t\"DISC\": discLog,\n\t\"RPCS\": rpcsLog,\n\t\"SRVR\": srvrLog,\n\t\"NTFN\": ntfnLog,\n\t\"CHDB\": chdbLog,\n\t\"FNDG\": fndgLog,\n\t\"HSWC\": hswcLog,\n\t\"UTXN\": utxnLog,\n\t\"BRAR\": brarLog,\n\t\"CMGR\": cmgrLog,\n\t\"CRTR\": crtrLog,\n\t\"BTCN\": btcnLog,\n\t\"ATPL\": atplLog,\n\t\"CNCT\": cnctLog,\n\t\"SPHX\": sphxLog,\n\t\"SWPR\": swprLog,\n\t\"SGNR\": sgnrLog,\n\t\"WLKT\": wlktLog,\n\t\"ARPC\": arpcLog,\n\t\"INVC\": invcLog,\n\t\"NANN\": nannLog,\n\t\"WTWR\": wtwrLog,\n\t\"NTFR\": ntfnLog,\n\t\"IRPC\": irpcLog,\n\t\"CHNF\": chnfLog,\n\t\"CHBU\": chbuLog,\n\t\"PROM\": promLog,\n\t\"WTCL\": wtclLog,\n}\n\n\/\/ initLogRotator initializes the logging rotator to write logs to logFile and\n\/\/ create roll files in the same directory. It must be called before the\n\/\/ package-global log rotator variables are used.\nfunc initLogRotator(logFile string, MaxLogFileSize int, MaxLogFiles int) {\n\tlogDir, _ := filepath.Split(logFile)\n\terr := os.MkdirAll(logDir, 0700)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"failed to create log directory: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tr, err := rotator.New(logFile, int64(MaxLogFileSize*1024), false, MaxLogFiles)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"failed to create file rotator: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tpr, pw := io.Pipe()\n\tgo r.Run(pr)\n\n\tlogWriter.RotatorPipe = pw\n\tlogRotator = r\n}\n\n\/\/ setLogLevel sets the logging level for provided subsystem. Invalid\n\/\/ subsystems are ignored. Uninitialized subsystems are dynamically created as\n\/\/ needed.\nfunc setLogLevel(subsystemID string, logLevel string) {\n\t\/\/ Ignore invalid subsystems.\n\tlogger, ok := subsystemLoggers[subsystemID]\n\tif !ok {\n\t\treturn\n\t}\n\n\t\/\/ Defaults to info if the log level is invalid.\n\tlevel, _ := btclog.LevelFromString(logLevel)\n\tlogger.SetLevel(level)\n}\n\n\/\/ setLogLevels sets the log level for all subsystem loggers to the passed\n\/\/ level. It also dynamically creates the subsystem loggers as needed, so it\n\/\/ can be used to initialize the logging system.\nfunc setLogLevels(logLevel string) {\n\t\/\/ Configure all sub-systems with the new logging level. Dynamically\n\t\/\/ create loggers as needed.\n\tfor subsystemID := range subsystemLoggers {\n\t\tsetLogLevel(subsystemID, logLevel)\n\t}\n}\n\n\/\/ logClosure is used to provide a closure over expensive logging operations so\n\/\/ don't have to be performed when the logging level doesn't warrant it.\ntype logClosure func() string\n\n\/\/ String invokes the underlying function and returns the result.\nfunc (c logClosure) String() string {\n\treturn c()\n}\n\n\/\/ newLogClosure returns a new closure over a function that returns a string\n\/\/ which itself provides a Stringer interface so that it can be used with the\n\/\/ logging system.\nfunc newLogClosure(c func() string) logClosure {\n\treturn logClosure(c)\n}\n<commit_msg>log: correct NTFR -> ntfrLog mapping<commit_after>package lnd\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/btcsuite\/btcd\/connmgr\"\n\t\"github.com\/btcsuite\/btclog\"\n\t\"github.com\/jrick\/logrotate\/rotator\"\n\t\"github.com\/lightninglabs\/neutrino\"\n\tsphinx \"github.com\/lightningnetwork\/lightning-onion\"\n\t\"github.com\/lightningnetwork\/lnd\/autopilot\"\n\t\"github.com\/lightningnetwork\/lnd\/build\"\n\t\"github.com\/lightningnetwork\/lnd\/chainntnfs\"\n\t\"github.com\/lightningnetwork\/lnd\/chanbackup\"\n\t\"github.com\/lightningnetwork\/lnd\/channeldb\"\n\t\"github.com\/lightningnetwork\/lnd\/channelnotifier\"\n\t\"github.com\/lightningnetwork\/lnd\/contractcourt\"\n\t\"github.com\/lightningnetwork\/lnd\/discovery\"\n\t\"github.com\/lightningnetwork\/lnd\/htlcswitch\"\n\t\"github.com\/lightningnetwork\/lnd\/invoices\"\n\t\"github.com\/lightningnetwork\/lnd\/lnrpc\/autopilotrpc\"\n\t\"github.com\/lightningnetwork\/lnd\/lnrpc\/chainrpc\"\n\t\"github.com\/lightningnetwork\/lnd\/lnrpc\/invoicesrpc\"\n\t\"github.com\/lightningnetwork\/lnd\/lnrpc\/routerrpc\"\n\t\"github.com\/lightningnetwork\/lnd\/lnrpc\/signrpc\"\n\t\"github.com\/lightningnetwork\/lnd\/lnrpc\/walletrpc\"\n\t\"github.com\/lightningnetwork\/lnd\/lnrpc\/wtclientrpc\"\n\t\"github.com\/lightningnetwork\/lnd\/lnwallet\"\n\t\"github.com\/lightningnetwork\/lnd\/monitoring\"\n\t\"github.com\/lightningnetwork\/lnd\/netann\"\n\t\"github.com\/lightningnetwork\/lnd\/routing\"\n\t\"github.com\/lightningnetwork\/lnd\/signal\"\n\t\"github.com\/lightningnetwork\/lnd\/sweep\"\n\t\"github.com\/lightningnetwork\/lnd\/watchtower\"\n\t\"github.com\/lightningnetwork\/lnd\/watchtower\/wtclient\"\n)\n\n\/\/ Loggers per subsystem. A single backend logger is created and all subsystem\n\/\/ loggers created from it will write to the backend. When adding new\n\/\/ subsystems, add the subsystem logger variable here and to the\n\/\/ subsystemLoggers map.\n\/\/\n\/\/ Loggers can not be used before the log rotator has been initialized with a\n\/\/ log file. This must be performed early during application startup by\n\/\/ calling initLogRotator.\nvar (\n\tlogWriter = &build.LogWriter{}\n\n\t\/\/ backendLog is the logging backend used to create all subsystem\n\t\/\/ loggers. The backend must not be used before the log rotator has\n\t\/\/ been initialized, or data races and\/or nil pointer dereferences will\n\t\/\/ occur.\n\tbackendLog = btclog.NewBackend(logWriter)\n\n\t\/\/ logRotator is one of the logging outputs. It should be closed on\n\t\/\/ application shutdown.\n\tlogRotator *rotator.Rotator\n\n\tltndLog = build.NewSubLogger(\"LTND\", backendLog.Logger)\n\tlnwlLog = build.NewSubLogger(\"LNWL\", backendLog.Logger)\n\tpeerLog = build.NewSubLogger(\"PEER\", backendLog.Logger)\n\tdiscLog = build.NewSubLogger(\"DISC\", backendLog.Logger)\n\trpcsLog = build.NewSubLogger(\"RPCS\", backendLog.Logger)\n\tsrvrLog = build.NewSubLogger(\"SRVR\", backendLog.Logger)\n\tntfnLog = build.NewSubLogger(\"NTFN\", backendLog.Logger)\n\tchdbLog = build.NewSubLogger(\"CHDB\", backendLog.Logger)\n\tfndgLog = build.NewSubLogger(\"FNDG\", backendLog.Logger)\n\thswcLog = build.NewSubLogger(\"HSWC\", backendLog.Logger)\n\tutxnLog = build.NewSubLogger(\"UTXN\", backendLog.Logger)\n\tbrarLog = build.NewSubLogger(\"BRAR\", backendLog.Logger)\n\tcmgrLog = build.NewSubLogger(\"CMGR\", backendLog.Logger)\n\tcrtrLog = build.NewSubLogger(\"CRTR\", backendLog.Logger)\n\tbtcnLog = build.NewSubLogger(\"BTCN\", backendLog.Logger)\n\tatplLog = build.NewSubLogger(\"ATPL\", backendLog.Logger)\n\tcnctLog = build.NewSubLogger(\"CNCT\", backendLog.Logger)\n\tsphxLog = build.NewSubLogger(\"SPHX\", backendLog.Logger)\n\tswprLog = build.NewSubLogger(\"SWPR\", backendLog.Logger)\n\tsgnrLog = build.NewSubLogger(\"SGNR\", backendLog.Logger)\n\twlktLog = build.NewSubLogger(\"WLKT\", backendLog.Logger)\n\tarpcLog = build.NewSubLogger(\"ARPC\", backendLog.Logger)\n\tinvcLog = build.NewSubLogger(\"INVC\", backendLog.Logger)\n\tnannLog = build.NewSubLogger(\"NANN\", backendLog.Logger)\n\twtwrLog = build.NewSubLogger(\"WTWR\", backendLog.Logger)\n\tntfrLog = build.NewSubLogger(\"NTFR\", backendLog.Logger)\n\tirpcLog = build.NewSubLogger(\"IRPC\", backendLog.Logger)\n\tchnfLog = build.NewSubLogger(\"CHNF\", backendLog.Logger)\n\tchbuLog = build.NewSubLogger(\"CHBU\", backendLog.Logger)\n\tpromLog = build.NewSubLogger(\"PROM\", backendLog.Logger)\n\twtclLog = build.NewSubLogger(\"WTCL\", backendLog.Logger)\n)\n\n\/\/ Initialize package-global logger variables.\nfunc init() {\n\tlnwallet.UseLogger(lnwlLog)\n\tdiscovery.UseLogger(discLog)\n\tchainntnfs.UseLogger(ntfnLog)\n\tchanneldb.UseLogger(chdbLog)\n\thtlcswitch.UseLogger(hswcLog)\n\tconnmgr.UseLogger(cmgrLog)\n\trouting.UseLogger(crtrLog)\n\tneutrino.UseLogger(btcnLog)\n\tautopilot.UseLogger(atplLog)\n\tcontractcourt.UseLogger(cnctLog)\n\tsphinx.UseLogger(sphxLog)\n\tsignal.UseLogger(ltndLog)\n\tsweep.UseLogger(swprLog)\n\tsignrpc.UseLogger(sgnrLog)\n\twalletrpc.UseLogger(wlktLog)\n\tautopilotrpc.UseLogger(arpcLog)\n\tinvoices.UseLogger(invcLog)\n\tnetann.UseLogger(nannLog)\n\twatchtower.UseLogger(wtwrLog)\n\tchainrpc.UseLogger(ntfrLog)\n\tinvoicesrpc.UseLogger(irpcLog)\n\tchannelnotifier.UseLogger(chnfLog)\n\tchanbackup.UseLogger(chbuLog)\n\tmonitoring.UseLogger(promLog)\n\twtclient.UseLogger(wtclLog)\n\n\taddSubLogger(routerrpc.Subsystem, routerrpc.UseLogger)\n\taddSubLogger(wtclientrpc.Subsystem, wtclientrpc.UseLogger)\n}\n\n\/\/ addSubLogger is a helper method to conveniently register the logger of a sub\n\/\/ system.\nfunc addSubLogger(subsystem string, useLogger func(btclog.Logger)) {\n\tlogger := build.NewSubLogger(subsystem, backendLog.Logger)\n\tuseLogger(logger)\n\tsubsystemLoggers[subsystem] = logger\n}\n\n\/\/ subsystemLoggers maps each subsystem identifier to its associated logger.\nvar subsystemLoggers = map[string]btclog.Logger{\n\t\"LTND\": ltndLog,\n\t\"LNWL\": lnwlLog,\n\t\"PEER\": peerLog,\n\t\"DISC\": discLog,\n\t\"RPCS\": rpcsLog,\n\t\"SRVR\": srvrLog,\n\t\"NTFN\": ntfnLog,\n\t\"CHDB\": chdbLog,\n\t\"FNDG\": fndgLog,\n\t\"HSWC\": hswcLog,\n\t\"UTXN\": utxnLog,\n\t\"BRAR\": brarLog,\n\t\"CMGR\": cmgrLog,\n\t\"CRTR\": crtrLog,\n\t\"BTCN\": btcnLog,\n\t\"ATPL\": atplLog,\n\t\"CNCT\": cnctLog,\n\t\"SPHX\": sphxLog,\n\t\"SWPR\": swprLog,\n\t\"SGNR\": sgnrLog,\n\t\"WLKT\": wlktLog,\n\t\"ARPC\": arpcLog,\n\t\"INVC\": invcLog,\n\t\"NANN\": nannLog,\n\t\"WTWR\": wtwrLog,\n\t\"NTFR\": ntfrLog,\n\t\"IRPC\": irpcLog,\n\t\"CHNF\": chnfLog,\n\t\"CHBU\": chbuLog,\n\t\"PROM\": promLog,\n\t\"WTCL\": wtclLog,\n}\n\n\/\/ initLogRotator initializes the logging rotator to write logs to logFile and\n\/\/ create roll files in the same directory. It must be called before the\n\/\/ package-global log rotator variables are used.\nfunc initLogRotator(logFile string, MaxLogFileSize int, MaxLogFiles int) {\n\tlogDir, _ := filepath.Split(logFile)\n\terr := os.MkdirAll(logDir, 0700)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"failed to create log directory: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\tr, err := rotator.New(logFile, int64(MaxLogFileSize*1024), false, MaxLogFiles)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"failed to create file rotator: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tpr, pw := io.Pipe()\n\tgo r.Run(pr)\n\n\tlogWriter.RotatorPipe = pw\n\tlogRotator = r\n}\n\n\/\/ setLogLevel sets the logging level for provided subsystem. Invalid\n\/\/ subsystems are ignored. Uninitialized subsystems are dynamically created as\n\/\/ needed.\nfunc setLogLevel(subsystemID string, logLevel string) {\n\t\/\/ Ignore invalid subsystems.\n\tlogger, ok := subsystemLoggers[subsystemID]\n\tif !ok {\n\t\treturn\n\t}\n\n\t\/\/ Defaults to info if the log level is invalid.\n\tlevel, _ := btclog.LevelFromString(logLevel)\n\tlogger.SetLevel(level)\n}\n\n\/\/ setLogLevels sets the log level for all subsystem loggers to the passed\n\/\/ level. It also dynamically creates the subsystem loggers as needed, so it\n\/\/ can be used to initialize the logging system.\nfunc setLogLevels(logLevel string) {\n\t\/\/ Configure all sub-systems with the new logging level. Dynamically\n\t\/\/ create loggers as needed.\n\tfor subsystemID := range subsystemLoggers {\n\t\tsetLogLevel(subsystemID, logLevel)\n\t}\n}\n\n\/\/ logClosure is used to provide a closure over expensive logging operations so\n\/\/ don't have to be performed when the logging level doesn't warrant it.\ntype logClosure func() string\n\n\/\/ String invokes the underlying function and returns the result.\nfunc (c logClosure) String() string {\n\treturn c()\n}\n\n\/\/ newLogClosure returns a new closure over a function that returns a string\n\/\/ which itself provides a Stringer interface so that it can be used with the\n\/\/ logging system.\nfunc newLogClosure(c func() string) logClosure {\n\treturn logClosure(c)\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\ntype raynode struct {\n\tCost int\n}\n\ntype rayMap map[position]raynode\n\nfunc (g *game) bestParent(rm rayMap, from, pos position) (position, int) {\n\tp := pos.Parents(from)\n\tb := p[0]\n\tif len(p) > 1 && rm[p[1]].Cost+g.losCost(p[1]) < rm[b].Cost+g.losCost(b) {\n\t\tb = p[1]\n\t}\n\treturn b, rm[b].Cost + g.losCost(b)\n}\n\nfunc (g *game) losCost(pos position) int {\n\tif g.Player.Pos == pos {\n\t\treturn 0\n\t}\n\tc := g.Dungeon.Cell(pos)\n\tif c.T == WallCell {\n\t\treturn g.LosRange()\n\t}\n\tif _, ok := g.Clouds[pos]; ok {\n\t\treturn g.LosRange()\n\t}\n\tif _, ok := g.Doors[pos]; ok {\n\t\tif pos != g.Player.Pos {\n\t\t\tmons := g.MonsterAt(pos)\n\t\t\tif !mons.Exists() {\n\t\t\t\treturn g.LosRange()\n\t\t\t}\n\t\t}\n\t}\n\tif _, ok := g.Fungus[pos]; ok {\n\t\treturn g.LosRange() - 1\n\t}\n\treturn 1\n}\n\nfunc (g *game) buildRayMap(from position, distance int) rayMap {\n\trm := rayMap{}\n\trm[from] = raynode{Cost: 0}\n\tfor d := 1; d <= distance; d++ {\n\t\tfor x := -d + from.X; x <= d+from.X; x++ {\n\t\t\tfor _, pos := range []position{{x, from.Y + d}, {x, from.Y - d}} {\n\t\t\t\tif !pos.valid() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t_, c := g.bestParent(rm, from, pos)\n\t\t\t\trm[pos] = raynode{Cost: c}\n\t\t\t}\n\t\t}\n\t\tfor y := -d + 1 + from.Y; y <= d-1+from.Y; y++ {\n\t\t\tfor _, pos := range []position{{from.X + d, y}, {from.X - d, y}} {\n\t\t\t\tif !pos.valid() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t_, c := g.bestParent(rm, from, pos)\n\t\t\t\trm[pos] = raynode{Cost: c}\n\t\t\t}\n\t\t}\n\t}\n\treturn rm\n}\n\nfunc (g *game) LosRange() int {\n\tlosRange := 6\n\tif g.Player.Armour == ShinyPlates {\n\t\tlosRange++\n\t}\n\tif g.Player.Aptitudes[AptStealthyLOS] {\n\t\tlosRange -= 2\n\t}\n\tif g.Player.Armour == HarmonistRobe {\n\t\tlosRange -= 1\n\t}\n\tif g.Player.Weapon == Frundis {\n\t\tlosRange -= 1\n\t}\n\tif g.Player.HasStatus(StatusShadows) {\n\t\tlosRange = 1\n\t}\n\tif losRange < 1 {\n\t\tlosRange = 1\n\t}\n\treturn losRange\n}\n\nfunc (g *game) StopAuto() {\n\tif g.Autoexploring && !g.AutoHalt {\n\t\tg.Print(\"You stop exploring.\")\n\t} else if g.AutoDir != NoDir {\n\t\tg.Print(\"You stop.\")\n\t} else if g.AutoTarget != InvalidPos {\n\t\tg.Print(\"You stop.\")\n\t}\n\tg.AutoHalt = true\n\tg.AutoDir = NoDir\n\tg.AutoTarget = InvalidPos\n\tif g.Resting {\n\t\tg.Stats.RestInterrupt++\n\t\tg.Resting = false\n\t\tg.Print(\"You could not sleep.\")\n\t}\n}\n\nfunc (g *game) ComputeLOS() {\n\tm := map[position]bool{}\n\tlosRange := g.LosRange()\n\tg.Player.Rays = g.buildRayMap(g.Player.Pos, losRange)\n\tfor pos, n := range g.Player.Rays {\n\t\tif n.Cost < g.LosRange() {\n\t\t\tm[pos] = true\n\t\t\tg.SeePosition(pos)\n\t\t}\n\t}\n\tg.Player.LOS = m\n\tfor _, mons := range g.Monsters {\n\t\tif mons.Exists() && g.Player.LOS[mons.Pos] {\n\t\t\tif mons.Seen {\n\t\t\t\tg.StopAuto()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmons.Seen = true\n\t\t\tg.Printf(\"You see %s (%v).\", mons.Kind.Indefinite(false), mons.State)\n\t\t\tif mons.Kind.Dangerousness() > 10 {\n\t\t\t\tg.StoryPrint(mons.Kind.SeenStoryText())\n\t\t\t}\n\t\t\tg.StopAuto()\n\t\t}\n\t}\n}\n\nfunc (g *game) SeePosition(pos position) {\n\tif !g.Dungeon.Cell(pos).Explored {\n\t\tsee := \"see\"\n\t\tif c, ok := g.Collectables[pos]; ok {\n\t\t\tif c.Quantity > 1 {\n\t\t\t\tg.Printf(\"You %s %d %s.\", see, c.Quantity, c.Consumable.Plural())\n\t\t\t} else {\n\t\t\t\tg.Printf(\"You %s %s.\", see, Indefinite(c.Consumable.String(), false))\n\t\t\t}\n\t\t\tg.StopAuto()\n\t\t} else if _, ok := g.Stairs[pos]; ok {\n\t\t\tg.Printf(\"You %s stairs.\", see)\n\t\t\tg.StopAuto()\n\t\t} else if eq, ok := g.Equipables[pos]; ok {\n\t\t\tg.Printf(\"You %s %s.\", see, Indefinite(eq.String(), false))\n\t\t\tg.StopAuto()\n\t\t} else if rd, ok := g.Rods[pos]; ok {\n\t\t\tg.Printf(\"You %s %s.\", see, Indefinite(rd.String(), false))\n\t\t\tg.StopAuto()\n\t\t} else if stn, ok := g.MagicalStones[pos]; ok {\n\t\t\tg.Printf(\"You %s %s.\", see, Indefinite(stn.String(), false))\n\t\t\tg.StopAuto()\n\t\t}\n\t\tg.FunAction()\n\t\tg.Dungeon.SetExplored(pos)\n\t\tg.DijkstraMapRebuild = true\n\t} else {\n\t\tif g.WrongWall[pos] {\n\t\t\tg.Printf(\"There is no longer a wall there.\")\n\t\t\tg.StopAuto()\n\t\t\tg.DijkstraMapRebuild = true\n\t\t}\n\t\tif cld, ok := g.Clouds[pos]; ok && cld == CloudFire && (g.WrongDoor[pos] || g.WrongFoliage[pos]) {\n\t\t\tg.Printf(\"There are flames there.\")\n\t\t\tg.StopAuto()\n\t\t\tg.DijkstraMapRebuild = true\n\t\t}\n\t}\n\tif g.WrongWall[pos] {\n\t\tdelete(g.WrongWall, pos)\n\t\tif g.Dungeon.Cell(pos).T == FreeCell {\n\t\t\tdelete(g.TemporalWalls, pos)\n\t\t}\n\t}\n\tif _, ok := g.WrongDoor[pos]; ok {\n\t\tdelete(g.WrongDoor, pos)\n\t}\n\tif _, ok := g.WrongFoliage[pos]; ok {\n\t\tdelete(g.WrongFoliage, pos)\n\t}\n\tif _, ok := g.DreamingMonster[pos]; ok {\n\t\tdelete(g.DreamingMonster, pos)\n\t}\n}\n\nfunc (g *game) ComputeExclusion(pos position, toggle bool) {\n\texclusionRange := g.LosRange()\n\tg.ExclusionsMap[pos] = toggle\n\tfor d := 1; d <= exclusionRange; d++ {\n\t\tfor x := -d + pos.X; x <= d+pos.X; x++ {\n\t\t\tfor _, pos := range []position{{x, pos.Y + d}, {x, pos.Y - d}} {\n\t\t\t\tif !pos.valid() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tg.ExclusionsMap[pos] = toggle\n\t\t\t}\n\t\t}\n\t\tfor y := -d + 1 + pos.Y; y <= d-1+pos.Y; y++ {\n\t\t\tfor _, pos := range []position{{pos.X + d, y}, {pos.X - d, y}} {\n\t\t\t\tif !pos.valid() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tg.ExclusionsMap[pos] = toggle\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (g *game) Ray(pos position) []position {\n\tif !g.Player.LOS[pos] {\n\t\treturn nil\n\t}\n\tray := []position{}\n\tfor pos != g.Player.Pos {\n\t\tray = append(ray, pos)\n\t\tpos, _ = g.bestParent(g.Player.Rays, g.Player.Pos, pos)\n\t}\n\treturn ray\n}\n\nfunc (g *game) ComputeRayHighlight(pos position) {\n\tg.Highlight = map[position]bool{}\n\tray := g.Ray(pos)\n\tfor _, p := range ray {\n\t\tg.Highlight[p] = true\n\t}\n}\n\nfunc (g *game) ComputeNoise() {\n\tdij := &noisePath{game: g}\n\trg := g.LosRange() + 2\n\tif rg <= 5 {\n\t\trg++\n\t}\n\tif g.Player.Aptitudes[AptHear] {\n\t\trg++\n\t}\n\tnm := Dijkstra(dij, []position{g.Player.Pos}, rg)\n\tcount := 0\n\tnoise := map[position]bool{}\n\trmax := 3\n\tif g.Player.Aptitudes[AptHear] {\n\t\trmax--\n\t}\n\tfor pos := range nm {\n\t\tif g.Player.LOS[pos] {\n\t\t\tcontinue\n\t\t}\n\t\tmons := g.MonsterAt(pos)\n\t\tif mons.Exists() && mons.State != Resting && RandInt(rmax) == 0 {\n\t\t\tswitch mons.Kind {\n\t\t\tcase MonsMirrorSpecter, MonsGiantBee, MonsSatowalgaPlant:\n\t\t\t\t\/\/ no footsteps\n\t\t\tdefault:\n\t\t\t\tnoise[pos] = true\n\t\t\t\tg.Print(\"You hear footsteps.\")\n\t\t\t\tcount++\n\t\t\t}\n\t\t}\n\t}\n\tif count > 0 {\n\t\tg.StopAuto()\n\t}\n\tg.Noise = noise\n}\n<commit_msg>now winged monsters do not produce footstep noise but wing flapping<commit_after>package main\n\ntype raynode struct {\n\tCost int\n}\n\ntype rayMap map[position]raynode\n\nfunc (g *game) bestParent(rm rayMap, from, pos position) (position, int) {\n\tp := pos.Parents(from)\n\tb := p[0]\n\tif len(p) > 1 && rm[p[1]].Cost+g.losCost(p[1]) < rm[b].Cost+g.losCost(b) {\n\t\tb = p[1]\n\t}\n\treturn b, rm[b].Cost + g.losCost(b)\n}\n\nfunc (g *game) losCost(pos position) int {\n\tif g.Player.Pos == pos {\n\t\treturn 0\n\t}\n\tc := g.Dungeon.Cell(pos)\n\tif c.T == WallCell {\n\t\treturn g.LosRange()\n\t}\n\tif _, ok := g.Clouds[pos]; ok {\n\t\treturn g.LosRange()\n\t}\n\tif _, ok := g.Doors[pos]; ok {\n\t\tif pos != g.Player.Pos {\n\t\t\tmons := g.MonsterAt(pos)\n\t\t\tif !mons.Exists() {\n\t\t\t\treturn g.LosRange()\n\t\t\t}\n\t\t}\n\t}\n\tif _, ok := g.Fungus[pos]; ok {\n\t\treturn g.LosRange() - 1\n\t}\n\treturn 1\n}\n\nfunc (g *game) buildRayMap(from position, distance int) rayMap {\n\trm := rayMap{}\n\trm[from] = raynode{Cost: 0}\n\tfor d := 1; d <= distance; d++ {\n\t\tfor x := -d + from.X; x <= d+from.X; x++ {\n\t\t\tfor _, pos := range []position{{x, from.Y + d}, {x, from.Y - d}} {\n\t\t\t\tif !pos.valid() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t_, c := g.bestParent(rm, from, pos)\n\t\t\t\trm[pos] = raynode{Cost: c}\n\t\t\t}\n\t\t}\n\t\tfor y := -d + 1 + from.Y; y <= d-1+from.Y; y++ {\n\t\t\tfor _, pos := range []position{{from.X + d, y}, {from.X - d, y}} {\n\t\t\t\tif !pos.valid() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t_, c := g.bestParent(rm, from, pos)\n\t\t\t\trm[pos] = raynode{Cost: c}\n\t\t\t}\n\t\t}\n\t}\n\treturn rm\n}\n\nfunc (g *game) LosRange() int {\n\tlosRange := 6\n\tif g.Player.Armour == ShinyPlates {\n\t\tlosRange++\n\t}\n\tif g.Player.Aptitudes[AptStealthyLOS] {\n\t\tlosRange -= 2\n\t}\n\tif g.Player.Armour == HarmonistRobe {\n\t\tlosRange -= 1\n\t}\n\tif g.Player.Weapon == Frundis {\n\t\tlosRange -= 1\n\t}\n\tif g.Player.HasStatus(StatusShadows) {\n\t\tlosRange = 1\n\t}\n\tif losRange < 1 {\n\t\tlosRange = 1\n\t}\n\treturn losRange\n}\n\nfunc (g *game) StopAuto() {\n\tif g.Autoexploring && !g.AutoHalt {\n\t\tg.Print(\"You stop exploring.\")\n\t} else if g.AutoDir != NoDir {\n\t\tg.Print(\"You stop.\")\n\t} else if g.AutoTarget != InvalidPos {\n\t\tg.Print(\"You stop.\")\n\t}\n\tg.AutoHalt = true\n\tg.AutoDir = NoDir\n\tg.AutoTarget = InvalidPos\n\tif g.Resting {\n\t\tg.Stats.RestInterrupt++\n\t\tg.Resting = false\n\t\tg.Print(\"You could not sleep.\")\n\t}\n}\n\nfunc (g *game) ComputeLOS() {\n\tm := map[position]bool{}\n\tlosRange := g.LosRange()\n\tg.Player.Rays = g.buildRayMap(g.Player.Pos, losRange)\n\tfor pos, n := range g.Player.Rays {\n\t\tif n.Cost < g.LosRange() {\n\t\t\tm[pos] = true\n\t\t\tg.SeePosition(pos)\n\t\t}\n\t}\n\tg.Player.LOS = m\n\tfor _, mons := range g.Monsters {\n\t\tif mons.Exists() && g.Player.LOS[mons.Pos] {\n\t\t\tif mons.Seen {\n\t\t\t\tg.StopAuto()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmons.Seen = true\n\t\t\tg.Printf(\"You see %s (%v).\", mons.Kind.Indefinite(false), mons.State)\n\t\t\tif mons.Kind.Dangerousness() > 10 {\n\t\t\t\tg.StoryPrint(mons.Kind.SeenStoryText())\n\t\t\t}\n\t\t\tg.StopAuto()\n\t\t}\n\t}\n}\n\nfunc (g *game) SeePosition(pos position) {\n\tif !g.Dungeon.Cell(pos).Explored {\n\t\tsee := \"see\"\n\t\tif c, ok := g.Collectables[pos]; ok {\n\t\t\tif c.Quantity > 1 {\n\t\t\t\tg.Printf(\"You %s %d %s.\", see, c.Quantity, c.Consumable.Plural())\n\t\t\t} else {\n\t\t\t\tg.Printf(\"You %s %s.\", see, Indefinite(c.Consumable.String(), false))\n\t\t\t}\n\t\t\tg.StopAuto()\n\t\t} else if _, ok := g.Stairs[pos]; ok {\n\t\t\tg.Printf(\"You %s stairs.\", see)\n\t\t\tg.StopAuto()\n\t\t} else if eq, ok := g.Equipables[pos]; ok {\n\t\t\tg.Printf(\"You %s %s.\", see, Indefinite(eq.String(), false))\n\t\t\tg.StopAuto()\n\t\t} else if rd, ok := g.Rods[pos]; ok {\n\t\t\tg.Printf(\"You %s %s.\", see, Indefinite(rd.String(), false))\n\t\t\tg.StopAuto()\n\t\t} else if stn, ok := g.MagicalStones[pos]; ok {\n\t\t\tg.Printf(\"You %s %s.\", see, Indefinite(stn.String(), false))\n\t\t\tg.StopAuto()\n\t\t}\n\t\tg.FunAction()\n\t\tg.Dungeon.SetExplored(pos)\n\t\tg.DijkstraMapRebuild = true\n\t} else {\n\t\tif g.WrongWall[pos] {\n\t\t\tg.Printf(\"There is no longer a wall there.\")\n\t\t\tg.StopAuto()\n\t\t\tg.DijkstraMapRebuild = true\n\t\t}\n\t\tif cld, ok := g.Clouds[pos]; ok && cld == CloudFire && (g.WrongDoor[pos] || g.WrongFoliage[pos]) {\n\t\t\tg.Printf(\"There are flames there.\")\n\t\t\tg.StopAuto()\n\t\t\tg.DijkstraMapRebuild = true\n\t\t}\n\t}\n\tif g.WrongWall[pos] {\n\t\tdelete(g.WrongWall, pos)\n\t\tif g.Dungeon.Cell(pos).T == FreeCell {\n\t\t\tdelete(g.TemporalWalls, pos)\n\t\t}\n\t}\n\tif _, ok := g.WrongDoor[pos]; ok {\n\t\tdelete(g.WrongDoor, pos)\n\t}\n\tif _, ok := g.WrongFoliage[pos]; ok {\n\t\tdelete(g.WrongFoliage, pos)\n\t}\n\tif _, ok := g.DreamingMonster[pos]; ok {\n\t\tdelete(g.DreamingMonster, pos)\n\t}\n}\n\nfunc (g *game) ComputeExclusion(pos position, toggle bool) {\n\texclusionRange := g.LosRange()\n\tg.ExclusionsMap[pos] = toggle\n\tfor d := 1; d <= exclusionRange; d++ {\n\t\tfor x := -d + pos.X; x <= d+pos.X; x++ {\n\t\t\tfor _, pos := range []position{{x, pos.Y + d}, {x, pos.Y - d}} {\n\t\t\t\tif !pos.valid() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tg.ExclusionsMap[pos] = toggle\n\t\t\t}\n\t\t}\n\t\tfor y := -d + 1 + pos.Y; y <= d-1+pos.Y; y++ {\n\t\t\tfor _, pos := range []position{{pos.X + d, y}, {pos.X - d, y}} {\n\t\t\t\tif !pos.valid() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tg.ExclusionsMap[pos] = toggle\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (g *game) Ray(pos position) []position {\n\tif !g.Player.LOS[pos] {\n\t\treturn nil\n\t}\n\tray := []position{}\n\tfor pos != g.Player.Pos {\n\t\tray = append(ray, pos)\n\t\tpos, _ = g.bestParent(g.Player.Rays, g.Player.Pos, pos)\n\t}\n\treturn ray\n}\n\nfunc (g *game) ComputeRayHighlight(pos position) {\n\tg.Highlight = map[position]bool{}\n\tray := g.Ray(pos)\n\tfor _, p := range ray {\n\t\tg.Highlight[p] = true\n\t}\n}\n\nfunc (g *game) ComputeNoise() {\n\tdij := &noisePath{game: g}\n\trg := g.LosRange() + 2\n\tif rg <= 5 {\n\t\trg++\n\t}\n\tif g.Player.Aptitudes[AptHear] {\n\t\trg++\n\t}\n\tnm := Dijkstra(dij, []position{g.Player.Pos}, rg)\n\tcount := 0\n\tnoise := map[position]bool{}\n\trmax := 3\n\tif g.Player.Aptitudes[AptHear] {\n\t\trmax--\n\t}\n\tfor pos := range nm {\n\t\tif g.Player.LOS[pos] {\n\t\t\tcontinue\n\t\t}\n\t\tmons := g.MonsterAt(pos)\n\t\tif mons.Exists() && mons.State != Resting && RandInt(rmax) == 0 {\n\t\t\tswitch mons.Kind {\n\t\t\tcase MonsMirrorSpecter, MonsSatowalgaPlant:\n\t\t\t\t\/\/ no footsteps\n\t\t\tcase MonsTinyHarpy, MonsWingedMilfid, MonsGiantBee:\n\t\t\t\tnoise[pos] = true\n\t\t\t\tg.Print(\"You hear the flapping of wings.\")\n\t\t\t\tcount++\n\t\t\tdefault:\n\t\t\t\tnoise[pos] = true\n\t\t\t\tg.Print(\"You hear footsteps.\")\n\t\t\t\tcount++\n\t\t\t}\n\t\t}\n\t}\n\tif count > 0 {\n\t\tg.StopAuto()\n\t}\n\tg.Noise = noise\n}\n<|endoftext|>"} {"text":"<commit_before>package gorivets\n\nimport (\n\t\"container\/list\"\n\t\"strconv\"\n)\n\ntype (\n\tLru struct {\n\t\tlist *list.List\n\t\telements map[interface{}]*list.Element\n\t\tsize int64\n\t\tmaxSize int64\n\t\tcallback Callback\n\t}\n\n\telement struct {\n\t\tkey interface{}\n\t\tval interface{}\n\t\tsize int64\n\t}\n\n\tCallback func(k, v interface{})\n)\n\nfunc NewLRU(maxSize int64, callback Callback) *Lru {\n\tif maxSize < 1 {\n\t\tpanic(\"LRU size=\" + strconv.FormatInt(maxSize, 10) + \" should be positive.\")\n\t}\n\tl := new(Lru)\n\tl.list = list.New()\n\tl.elements = make(map[interface{}]*list.Element)\n\tl.size = 0\n\tl.maxSize = maxSize\n\tl.callback = callback\n\treturn l\n}\n\nfunc (lru *Lru) Add(k, v interface{}, size int64) {\n\tlru.Delete(k)\n\te := &element{key: k, val: v, size: size}\n\tel := lru.list.PushBack(e)\n\tlru.elements[k] = el\n\tlru.size += size\n\tfor lru.size > lru.maxSize && lru.deleteLast() {\n\n\t}\n}\n\nfunc (lru *Lru) Get(k interface{}) (interface{}, bool) {\n\tif e, ok := lru.elements[k]; ok {\n\t\tlru.list.MoveToBack(e)\n\t\treturn e.Value.(*element).val, true\n\t}\n\treturn nil, false\n}\n\nfunc (lru *Lru) Delete(k interface{}) interface{} {\n\treturn lru.DeleteWithCallback(k, true)\n}\n\nfunc (lru *Lru) DeleteWithCallback(k interface{}, callback bool) interface{} {\n\tel, ok := lru.elements[k]\n\tif !ok {\n\t\treturn nil\n\t}\n\tdelete(lru.elements, k)\n\te := lru.list.Remove(el).(*element)\n\tlru.size -= e.size\n\tif callback && lru.callback != nil {\n\t\tlru.callback(e.key, e.val)\n\t}\n\treturn e.val\n}\n\n\/\/ Clear the cache. This method will not invoke callbacks for the deleted\n\/\/ elements\nfunc (lru *Lru) Clear() {\n\tlru.list.Init()\n\tlru.elements = make(map[interface{}]*list.Element)\n\tlru.size = 0\n}\n\nfunc (lru *Lru) Len() int {\n\treturn len(lru.elements)\n}\n\nfunc (lru *Lru) Size() int64 {\n\treturn lru.size\n}\n\nfunc (lru *Lru) deleteLast() bool {\n\tel := lru.list.Front()\n\tif el == nil {\n\t\treturn false\n\t}\n\te := el.Value.(*element)\n\tlru.Delete(e.key)\n\treturn true\n}\n<commit_msg>rename callback type<commit_after>package gorivets\n\nimport (\n\t\"container\/list\"\n\t\"strconv\"\n)\n\ntype (\n\tLru struct {\n\t\tlist *list.List\n\t\telements map[interface{}]*list.Element\n\t\tsize int64\n\t\tmaxSize int64\n\t\tcallback LruCallback\n\t}\n\n\telement struct {\n\t\tkey interface{}\n\t\tval interface{}\n\t\tsize int64\n\t}\n\n\tLruCallback func(k, v interface{})\n)\n\nfunc NewLRU(maxSize int64, callback LruCallback) *Lru {\n\tif maxSize < 1 {\n\t\tpanic(\"LRU size=\" + strconv.FormatInt(maxSize, 10) + \" should be positive.\")\n\t}\n\tl := new(Lru)\n\tl.list = list.New()\n\tl.elements = make(map[interface{}]*list.Element)\n\tl.size = 0\n\tl.maxSize = maxSize\n\tl.callback = callback\n\treturn l\n}\n\nfunc (lru *Lru) Add(k, v interface{}, size int64) {\n\tlru.Delete(k)\n\te := &element{key: k, val: v, size: size}\n\tel := lru.list.PushBack(e)\n\tlru.elements[k] = el\n\tlru.size += size\n\tfor lru.size > lru.maxSize && lru.deleteLast() {\n\n\t}\n}\n\nfunc (lru *Lru) Get(k interface{}) (interface{}, bool) {\n\tif e, ok := lru.elements[k]; ok {\n\t\tlru.list.MoveToBack(e)\n\t\treturn e.Value.(*element).val, true\n\t}\n\treturn nil, false\n}\n\nfunc (lru *Lru) Delete(k interface{}) interface{} {\n\treturn lru.DeleteWithCallback(k, true)\n}\n\nfunc (lru *Lru) DeleteWithCallback(k interface{}, callback bool) interface{} {\n\tel, ok := lru.elements[k]\n\tif !ok {\n\t\treturn nil\n\t}\n\tdelete(lru.elements, k)\n\te := lru.list.Remove(el).(*element)\n\tlru.size -= e.size\n\tif callback && lru.callback != nil {\n\t\tlru.callback(e.key, e.val)\n\t}\n\treturn e.val\n}\n\n\/\/ Clear the cache. This method will not invoke callbacks for the deleted\n\/\/ elements\nfunc (lru *Lru) Clear() {\n\tlru.list.Init()\n\tlru.elements = make(map[interface{}]*list.Element)\n\tlru.size = 0\n}\n\nfunc (lru *Lru) Len() int {\n\treturn len(lru.elements)\n}\n\nfunc (lru *Lru) Size() int64 {\n\treturn lru.size\n}\n\nfunc (lru *Lru) deleteLast() bool {\n\tel := lru.list.Front()\n\tif el == nil {\n\t\treturn false\n\t}\n\te := el.Value.(*element)\n\tlru.Delete(e.key)\n\treturn true\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n * lxc.go: Go bindings for lxc\n *\n * Copyright © 2013, S.Çağlar Onur\n *\n * Authors:\n * S.Çağlar Onur <caglar@10ur.org>\n *\n * This library is free software; you can redistribute it and\/or modify\n * it under the terms of the GNU General Public License version 2, as\n * published by the Free Software Foundation.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License along\n * with this program; if not, write to the Free Software Foundation, Inc.,\n * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n *\/\n\n\/\/Go (golang) Bindings for LXC (Linux Containers)\n\/\/\n\/\/This package implements Go bindings for the LXC C API.\npackage lxc\n\n\/\/ #cgo linux LDFLAGS: -llxc -lutil\n\/\/ #include <lxc\/lxc.h>\n\/\/ #include <lxc\/lxccontainer.h>\n\/\/ #include \"lxc.h\"\nimport \"C\"\n\nimport (\n\t\"strings\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nconst (\n\t\/\/ Timeout\n\tWAIT_FOREVER int = iota\n\tDONT_WAIT\n\n\tLXC_NETWORK_KEY = \"lxc.network\"\n)\n\nfunc makeArgs(args []string) []*C.char {\n\tret := make([]*C.char, len(args))\n\tfor i, s := range args {\n\t\tret[i] = C.CString(s)\n\t}\n\treturn ret\n}\n\nfunc freeArgs(cArgs []*C.char) {\n\tfor _, s := range cArgs {\n\t\tC.free(unsafe.Pointer(s))\n\t}\n}\n\ntype Container struct {\n\tcontainer *C.struct_lxc_container\n}\n\nfunc (lxc *Container) Error() string {\n\treturn C.GoString(lxc.container.error_string)\n}\n\nfunc (lxc *Container) GetError() error {\n\treturn syscall.Errno(int(lxc.container.error_num))\n}\n\nfunc NewContainer(name string) Container {\n\tcname := C.CString(name)\n\tdefer C.free(unsafe.Pointer(cname))\n\treturn Container{C.lxc_container_new(cname, nil)}\n}\n\n\/\/ Returns LXC version\nfunc GetVersion() string {\n\treturn C.GoString(C.lxc_get_version())\n}\n\nfunc GetDefaultConfigPath() string {\n\treturn C.GoString(C.lxc_get_default_config_path())\n}\n\n\/\/ Returns container's name\nfunc (lxc *Container) GetName() string {\n\treturn C.GoString(lxc.container.name)\n}\n\n\/\/ Returns whether the container is already defined or not\nfunc (lxc *Container) Defined() bool {\n\treturn bool(C.lxc_container_defined(lxc.container))\n}\n\n\/\/ Returns whether the container is already running or not\nfunc (lxc *Container) Running() bool {\n\treturn bool(C.lxc_container_running(lxc.container))\n}\n\n\/\/ Returns the container's state\nfunc (lxc *Container) GetState() State {\n\treturn stateMap[C.GoString(C.lxc_container_state(lxc.container))]\n}\n\n\/\/ Returns the container's PID\nfunc (lxc *Container) GetInitPID() int {\n\treturn int(C.lxc_container_init_pid(lxc.container))\n}\n\n\/\/ Returns whether the daemonize flag is set\nfunc (lxc *Container) GetDaemonize() bool {\n\treturn bool(lxc.container.daemonize != 0)\n}\n\n\/\/ Sets the daemonize flag\nfunc (lxc *Container) SetDaemonize() {\n\tC.lxc_container_want_daemonize(lxc.container)\n}\n\n\/\/ Freezes the running container\nfunc (lxc *Container) Freeze() bool {\n\treturn bool(C.lxc_container_freeze(lxc.container))\n}\n\n\/\/ Unfreezes the frozen container\nfunc (lxc *Container) Unfreeze() bool {\n\treturn bool(C.lxc_container_unfreeze(lxc.container))\n}\n\n\/\/ Creates the container using given template and arguments\nfunc (lxc *Container) Create(template string, args []string) bool {\n\tctemplate := C.CString(template)\n\tdefer C.free(unsafe.Pointer(ctemplate))\n\tif args != nil {\n\t\tcargs := makeArgs(args)\n\t\tdefer freeArgs(cargs)\n\t\treturn bool(C.lxc_container_create(lxc.container, ctemplate, &cargs[0]))\n\t}\n\treturn bool(C.lxc_container_create(lxc.container, ctemplate, nil))\n}\n\n\/\/ Starts the container\nfunc (lxc *Container) Start(useinit bool, args []string) bool {\n\tcuseinit := 0\n\tif useinit {\n\t\tcuseinit = 1\n\t}\n\tif args != nil {\n\t\tcargs := makeArgs(args)\n\t\tdefer freeArgs(cargs)\n\t\treturn bool(C.lxc_container_start(lxc.container, C.int(cuseinit), &cargs[0]))\n\t}\n\treturn bool(C.lxc_container_start(lxc.container, C.int(cuseinit), nil))\n}\n\n\/\/ Stops the container\nfunc (lxc *Container) Stop() bool {\n\treturn bool(C.lxc_container_stop(lxc.container))\n}\n\n\/\/ Shutdowns the container\nfunc (lxc *Container) Shutdown(timeout int) bool {\n\treturn bool(C.lxc_container_shutdown(lxc.container, C.int(timeout)))\n}\n\n\/\/ Destroys the container\nfunc (lxc *Container) Destroy() bool {\n\treturn bool(C.lxc_container_destroy(lxc.container))\n}\n\n\/\/ Waits till the container changes its state or timeouts\nfunc (lxc *Container) Wait(state State, timeout int) bool {\n\tcstate := C.CString(state.String())\n\tdefer C.free(unsafe.Pointer(cstate))\n\treturn bool(C.lxc_container_wait(lxc.container, cstate, C.int(timeout)))\n}\n\n\/\/ Returns the container's configuration file's name\nfunc (lxc *Container) GetConfigFileName() string {\n\treturn C.GoString(C.lxc_container_config_file_name(lxc.container))\n}\n\n\/\/ Returns the value of the given key\nfunc (lxc *Container) GetConfigItem(key string) []string {\n\tckey := C.CString(key)\n\tdefer C.free(unsafe.Pointer(ckey))\n\tret := strings.TrimSpace(C.GoString(C.lxc_container_get_config_item(lxc.container, ckey)))\n\treturn strings.Split(ret, \"\\n\")\n}\n\n\/\/ Sets the value of given key\nfunc (lxc *Container) SetConfigItem(key string, value string) bool {\n\tckey := C.CString(key)\n\tdefer C.free(unsafe.Pointer(ckey))\n\tcvalue := C.CString(value)\n\tdefer C.free(unsafe.Pointer(cvalue))\n\treturn bool(C.lxc_container_set_config_item(lxc.container, ckey, cvalue))\n}\n\n\/\/ Returns the value of the given key\nfunc (lxc *Container) GetCgroupItem(key string) []string {\n\tckey := C.CString(key)\n\tdefer C.free(unsafe.Pointer(ckey))\n\tret := strings.TrimSpace(C.GoString(C.lxc_container_get_cgroup_item(lxc.container, ckey)))\n\treturn strings.Split(ret, \"\\n\")\n}\n\n\/\/ Sets the value of given key\nfunc (lxc *Container) SetCgroupItem(key string, value string) bool {\n\tckey := C.CString(key)\n\tdefer C.free(unsafe.Pointer(ckey))\n\tcvalue := C.CString(value)\n\tdefer C.free(unsafe.Pointer(cvalue))\n\treturn bool(C.lxc_container_set_cgroup_item(lxc.container, ckey, cvalue))\n}\n\n\/\/ Clears the value of given key\nfunc (lxc *Container) ClearConfigItem(key string) bool {\n\tckey := C.CString(key)\n\tdefer C.free(unsafe.Pointer(ckey))\n\treturn bool(C.lxc_container_clear_config_item(lxc.container, ckey))\n}\n\n\/\/ Returns the keys\nfunc (lxc *Container) GetKeys(key string) []string {\n\tckey := C.CString(key)\n\tdefer C.free(unsafe.Pointer(ckey))\n\tret := strings.TrimSpace(C.GoString(C.lxc_container_get_keys(lxc.container, ckey)))\n\treturn strings.Split(ret, \"\\n\")\n}\n\n\/\/ Loads the configuration file from given path\nfunc (lxc *Container) LoadConfigFile(path string) bool {\n\tcpath := C.CString(path)\n\tdefer C.free(unsafe.Pointer(cpath))\n\treturn bool(C.lxc_container_load_config(lxc.container, cpath))\n}\n\n\/\/ Saves the configuration file to given path\nfunc (lxc *Container) SaveConfigFile(path string) bool {\n\tcpath := C.CString(path)\n\tdefer C.free(unsafe.Pointer(cpath))\n\treturn bool(C.lxc_container_save_config(lxc.container, cpath))\n}\n\nfunc (lxc *Container) GetConfigPath() string {\n\treturn C.GoString(C.lxc_container_get_config_path(lxc.container))\n}\n\nfunc (lxc *Container) SetConfigPath(path string) bool {\n\tcpath := C.CString(path)\n\tdefer C.free(unsafe.Pointer(cpath))\n\treturn bool(C.lxc_container_set_config_path(lxc.container, cpath))\n}\n\nfunc (lxc *Container) GetNumberOfNetworkInterfaces() int {\n\treturn len(lxc.GetConfigItem(LXC_NETWORK_KEY))\n}\n<commit_msg>WAIT_FOREVER is -1 not 0<commit_after>\/*\n * lxc.go: Go bindings for lxc\n *\n * Copyright © 2013, S.Çağlar Onur\n *\n * Authors:\n * S.Çağlar Onur <caglar@10ur.org>\n *\n * This library is free software; you can redistribute it and\/or modify\n * it under the terms of the GNU General Public License version 2, as\n * published by the Free Software Foundation.\n *\n * This program is distributed in the hope that it will be useful,\n * but WITHOUT ANY WARRANTY; without even the implied warranty of\n * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n * GNU General Public License for more details.\n *\n * You should have received a copy of the GNU General Public License along\n * with this program; if not, write to the Free Software Foundation, Inc.,\n * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n *\/\n\n\/\/Go (golang) Bindings for LXC (Linux Containers)\n\/\/\n\/\/This package implements Go bindings for the LXC C API.\npackage lxc\n\n\/\/ #cgo linux LDFLAGS: -llxc -lutil\n\/\/ #include <lxc\/lxc.h>\n\/\/ #include <lxc\/lxccontainer.h>\n\/\/ #include \"lxc.h\"\nimport \"C\"\n\nimport (\n\t\"strings\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nconst (\n\t\/\/ Timeout\n\tWAIT_FOREVER int = iota - 1\n\tDONT_WAIT\n\n\tLXC_NETWORK_KEY = \"lxc.network\"\n)\n\nfunc makeArgs(args []string) []*C.char {\n\tret := make([]*C.char, len(args))\n\tfor i, s := range args {\n\t\tret[i] = C.CString(s)\n\t}\n\treturn ret\n}\n\nfunc freeArgs(cArgs []*C.char) {\n\tfor _, s := range cArgs {\n\t\tC.free(unsafe.Pointer(s))\n\t}\n}\n\ntype Container struct {\n\tcontainer *C.struct_lxc_container\n}\n\nfunc (lxc *Container) Error() string {\n\treturn C.GoString(lxc.container.error_string)\n}\n\nfunc (lxc *Container) GetError() error {\n\treturn syscall.Errno(int(lxc.container.error_num))\n}\n\nfunc NewContainer(name string) Container {\n\tcname := C.CString(name)\n\tdefer C.free(unsafe.Pointer(cname))\n\treturn Container{C.lxc_container_new(cname, nil)}\n}\n\n\/\/ Returns LXC version\nfunc GetVersion() string {\n\treturn C.GoString(C.lxc_get_version())\n}\n\nfunc GetDefaultConfigPath() string {\n\treturn C.GoString(C.lxc_get_default_config_path())\n}\n\n\/\/ Returns container's name\nfunc (lxc *Container) GetName() string {\n\treturn C.GoString(lxc.container.name)\n}\n\n\/\/ Returns whether the container is already defined or not\nfunc (lxc *Container) Defined() bool {\n\treturn bool(C.lxc_container_defined(lxc.container))\n}\n\n\/\/ Returns whether the container is already running or not\nfunc (lxc *Container) Running() bool {\n\treturn bool(C.lxc_container_running(lxc.container))\n}\n\n\/\/ Returns the container's state\nfunc (lxc *Container) GetState() State {\n\treturn stateMap[C.GoString(C.lxc_container_state(lxc.container))]\n}\n\n\/\/ Returns the container's PID\nfunc (lxc *Container) GetInitPID() int {\n\treturn int(C.lxc_container_init_pid(lxc.container))\n}\n\n\/\/ Returns whether the daemonize flag is set\nfunc (lxc *Container) GetDaemonize() bool {\n\treturn bool(lxc.container.daemonize != 0)\n}\n\n\/\/ Sets the daemonize flag\nfunc (lxc *Container) SetDaemonize() {\n\tC.lxc_container_want_daemonize(lxc.container)\n}\n\n\/\/ Freezes the running container\nfunc (lxc *Container) Freeze() bool {\n\treturn bool(C.lxc_container_freeze(lxc.container))\n}\n\n\/\/ Unfreezes the frozen container\nfunc (lxc *Container) Unfreeze() bool {\n\treturn bool(C.lxc_container_unfreeze(lxc.container))\n}\n\n\/\/ Creates the container using given template and arguments\nfunc (lxc *Container) Create(template string, args []string) bool {\n\tctemplate := C.CString(template)\n\tdefer C.free(unsafe.Pointer(ctemplate))\n\tif args != nil {\n\t\tcargs := makeArgs(args)\n\t\tdefer freeArgs(cargs)\n\t\treturn bool(C.lxc_container_create(lxc.container, ctemplate, &cargs[0]))\n\t}\n\treturn bool(C.lxc_container_create(lxc.container, ctemplate, nil))\n}\n\n\/\/ Starts the container\nfunc (lxc *Container) Start(useinit bool, args []string) bool {\n\tcuseinit := 0\n\tif useinit {\n\t\tcuseinit = 1\n\t}\n\tif args != nil {\n\t\tcargs := makeArgs(args)\n\t\tdefer freeArgs(cargs)\n\t\treturn bool(C.lxc_container_start(lxc.container, C.int(cuseinit), &cargs[0]))\n\t}\n\treturn bool(C.lxc_container_start(lxc.container, C.int(cuseinit), nil))\n}\n\n\/\/ Stops the container\nfunc (lxc *Container) Stop() bool {\n\treturn bool(C.lxc_container_stop(lxc.container))\n}\n\n\/\/ Shutdowns the container\nfunc (lxc *Container) Shutdown(timeout int) bool {\n\treturn bool(C.lxc_container_shutdown(lxc.container, C.int(timeout)))\n}\n\n\/\/ Destroys the container\nfunc (lxc *Container) Destroy() bool {\n\treturn bool(C.lxc_container_destroy(lxc.container))\n}\n\n\/\/ Waits till the container changes its state or timeouts\nfunc (lxc *Container) Wait(state State, timeout int) bool {\n\tcstate := C.CString(state.String())\n\tdefer C.free(unsafe.Pointer(cstate))\n\treturn bool(C.lxc_container_wait(lxc.container, cstate, C.int(timeout)))\n}\n\n\/\/ Returns the container's configuration file's name\nfunc (lxc *Container) GetConfigFileName() string {\n\treturn C.GoString(C.lxc_container_config_file_name(lxc.container))\n}\n\n\/\/ Returns the value of the given key\nfunc (lxc *Container) GetConfigItem(key string) []string {\n\tckey := C.CString(key)\n\tdefer C.free(unsafe.Pointer(ckey))\n\tret := strings.TrimSpace(C.GoString(C.lxc_container_get_config_item(lxc.container, ckey)))\n\treturn strings.Split(ret, \"\\n\")\n}\n\n\/\/ Sets the value of given key\nfunc (lxc *Container) SetConfigItem(key string, value string) bool {\n\tckey := C.CString(key)\n\tdefer C.free(unsafe.Pointer(ckey))\n\tcvalue := C.CString(value)\n\tdefer C.free(unsafe.Pointer(cvalue))\n\treturn bool(C.lxc_container_set_config_item(lxc.container, ckey, cvalue))\n}\n\n\/\/ Returns the value of the given key\nfunc (lxc *Container) GetCgroupItem(key string) []string {\n\tckey := C.CString(key)\n\tdefer C.free(unsafe.Pointer(ckey))\n\tret := strings.TrimSpace(C.GoString(C.lxc_container_get_cgroup_item(lxc.container, ckey)))\n\treturn strings.Split(ret, \"\\n\")\n}\n\n\/\/ Sets the value of given key\nfunc (lxc *Container) SetCgroupItem(key string, value string) bool {\n\tckey := C.CString(key)\n\tdefer C.free(unsafe.Pointer(ckey))\n\tcvalue := C.CString(value)\n\tdefer C.free(unsafe.Pointer(cvalue))\n\treturn bool(C.lxc_container_set_cgroup_item(lxc.container, ckey, cvalue))\n}\n\n\/\/ Clears the value of given key\nfunc (lxc *Container) ClearConfigItem(key string) bool {\n\tckey := C.CString(key)\n\tdefer C.free(unsafe.Pointer(ckey))\n\treturn bool(C.lxc_container_clear_config_item(lxc.container, ckey))\n}\n\n\/\/ Returns the keys\nfunc (lxc *Container) GetKeys(key string) []string {\n\tckey := C.CString(key)\n\tdefer C.free(unsafe.Pointer(ckey))\n\tret := strings.TrimSpace(C.GoString(C.lxc_container_get_keys(lxc.container, ckey)))\n\treturn strings.Split(ret, \"\\n\")\n}\n\n\/\/ Loads the configuration file from given path\nfunc (lxc *Container) LoadConfigFile(path string) bool {\n\tcpath := C.CString(path)\n\tdefer C.free(unsafe.Pointer(cpath))\n\treturn bool(C.lxc_container_load_config(lxc.container, cpath))\n}\n\n\/\/ Saves the configuration file to given path\nfunc (lxc *Container) SaveConfigFile(path string) bool {\n\tcpath := C.CString(path)\n\tdefer C.free(unsafe.Pointer(cpath))\n\treturn bool(C.lxc_container_save_config(lxc.container, cpath))\n}\n\nfunc (lxc *Container) GetConfigPath() string {\n\treturn C.GoString(C.lxc_container_get_config_path(lxc.container))\n}\n\nfunc (lxc *Container) SetConfigPath(path string) bool {\n\tcpath := C.CString(path)\n\tdefer C.free(unsafe.Pointer(cpath))\n\treturn bool(C.lxc_container_set_config_path(lxc.container, cpath))\n}\n\nfunc (lxc *Container) GetNumberOfNetworkInterfaces() int {\n\treturn len(lxc.GetConfigItem(LXC_NETWORK_KEY))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 The Wuffs Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ ----------------\n\n\/\/ Package cgozlib wraps the C \"zlib\" library.\n\/\/\n\/\/ Unlike some other wrappers, this one supports dictionaries.\npackage cgozlib\n\n\/*\n#cgo pkg-config: zlib\n#include \"zlib.h\"\n\ntypedef struct {\n\tuInt ndst;\n\tuInt nsrc;\n} advances;\n\nint cgozlib_inflateInit(z_stream* z) {\n\treturn inflateInit(z);\n}\n\nint cgozlib_inflateSetDictionary(z_stream* z,\n\t\tBytef* dict_ptr,\n\t\tuInt dict_len) {\n\treturn inflateSetDictionary(z, dict_ptr, dict_len);\n}\n\nint cgozlib_inflate(z_stream* z,\n\t\tadvances *a,\n\t\tBytef* next_out,\n\t\tuInt avail_out,\n\t\tBytef* next_in,\n\t\tuInt avail_in) {\n\tz->next_out = next_out;\n\tz->avail_out = avail_out;\n\tz->next_in = next_in;\n\tz->avail_in = avail_in;\n\n\tint ret = inflate(z, Z_NO_FLUSH);\n\n\ta->ndst = avail_out - z->avail_out;\n\ta->nsrc = avail_in - z->avail_in;\n\n\tz->next_out = NULL;\n\tz->avail_out = 0;\n\tz->next_in = NULL;\n\tz->avail_in = 0;\n\n\treturn ret;\n}\n\nint cgozlib_inflateEnd(z_stream* z) {\n\treturn inflateEnd(z);\n}\n*\/\nimport \"C\"\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"unsafe\"\n)\n\nconst cgoEnabled = true\n\nvar (\n\terrMissingResetCall = errors.New(\"cgozlib: missing Reset call\")\n\terrNilIOReader = errors.New(\"cgozlib: nil io.Reader\")\n\terrNilReceiver = errors.New(\"cgozlib: nil receiver\")\n)\n\nconst (\n\terrCodeStreamEnd = 1\n\terrCodeNeedDict = 2\n)\n\ntype errCode int32\n\nfunc (e errCode) Error() string {\n\tswitch e {\n\tcase +1:\n\t\treturn \"cgozlib: Z_STREAM_END\"\n\tcase +2:\n\t\treturn \"cgozlib: Z_NEED_DICT\"\n\tcase -1:\n\t\treturn \"cgozlib: Z_ERRNO\"\n\tcase -2:\n\t\treturn \"cgozlib: Z_STREAM_ERROR\"\n\tcase -3:\n\t\treturn \"cgozlib: Z_DATA_ERROR\"\n\tcase -4:\n\t\treturn \"cgozlib: Z_MEM_ERROR\"\n\tcase -5:\n\t\treturn \"cgozlib: Z_BUF_ERROR\"\n\tcase -6:\n\t\treturn \"cgozlib: Z_VERSION_ERROR\"\n\t}\n\treturn \"cgozlib: unknown zlib error\"\n}\n\n\/\/ Reader is both a zlib.Resetter and an io.ReadCloser. Call Reset before\n\/\/ calling Read.\n\/\/\n\/\/ It is analogous to the value returned by zlib.NewReader in the Go standard\n\/\/ library.\ntype Reader struct {\n\tbuf [4096]byte\n\ti, j uint32\n\tr io.Reader\n\tdict []byte\n\n\treadErr error\n\tzlibErr error\n\n\tz C.z_stream\n\ta C.advances\n}\n\n\/\/ Reset implements compression.Reader.\nfunc (r *Reader) Reset(reader io.Reader, dictionary []byte) error {\n\tif r == nil {\n\t\treturn errNilReceiver\n\t}\n\tif reader == nil {\n\t\treturn errNilIOReader\n\t}\n\tif err := r.Close(); err != nil {\n\t\treturn err\n\t}\n\n\tif e := C.cgozlib_inflateInit(&r.z); e != 0 {\n\t\treturn errCode(e)\n\t}\n\n\tr.r = reader\n\tr.dict = dictionary\n\tif n := len(r.dict); n > 32768 {\n\t\tr.dict = r.dict[n-32768:]\n\t}\n\treturn nil\n}\n\n\/\/ Close implements compression.Reader.\nfunc (r *Reader) Close() error {\n\tif r == nil {\n\t\treturn errNilReceiver\n\t}\n\tif r.r == nil {\n\t\treturn nil\n\t}\n\tr.i = 0\n\tr.j = 0\n\tr.r = nil\n\tr.dict = nil\n\tr.readErr = nil\n\tr.zlibErr = nil\n\tif e := C.cgozlib_inflateEnd(&r.z); e != 0 {\n\t\treturn errCode(e)\n\t}\n\treturn nil\n}\n\n\/\/ Read implements compression.Reader.\nfunc (r *Reader) Read(p []byte) (int, error) {\n\tif r == nil {\n\t\treturn 0, errNilReceiver\n\t}\n\tif r.r == nil {\n\t\treturn 0, errMissingResetCall\n\t}\n\n\tconst maxLen = 1 << 30\n\tif len(p) > maxLen {\n\t\tp = p[:maxLen]\n\t}\n\n\tfor numRead := 0; ; {\n\t\tif r.zlibErr != nil {\n\t\t\treturn numRead, r.zlibErr\n\t\t}\n\t\tif len(p) == 0 {\n\t\t\treturn numRead, nil\n\t\t}\n\n\t\tif r.i >= r.j {\n\t\t\tif r.readErr != nil {\n\t\t\t\treturn numRead, r.readErr\n\t\t\t}\n\n\t\t\tn, err := r.r.Read(r.buf[:])\n\t\t\tif err == io.EOF {\n\t\t\t\terr = io.ErrUnexpectedEOF\n\t\t\t}\n\t\t\tr.i, r.j, r.readErr = 0, uint32(n), err\n\t\t\tcontinue\n\t\t}\n\n\t\te := C.cgozlib_inflate(&r.z, &r.a,\n\t\t\t(*C.Bytef)(unsafe.Pointer(&p[0])),\n\t\t\t(C.uInt)(len(p)),\n\t\t\t(*C.Bytef)(unsafe.Pointer(&r.buf[r.i])),\n\t\t\t(C.uInt)(r.j-r.i),\n\t\t)\n\n\t\tnumRead += int(r.a.ndst)\n\t\tp = p[int(r.a.ndst):]\n\n\t\tr.i += uint32(r.a.nsrc)\n\n\t\tif e == 0 {\n\t\t\tcontinue\n\t\t} else if e == errCodeStreamEnd {\n\t\t\tr.zlibErr = io.EOF\n\t\t} else if (e == errCodeNeedDict) && (len(r.dict) > 0) {\n\t\t\te = C.cgozlib_inflateSetDictionary(&r.z,\n\t\t\t\t(*C.Bytef)(unsafe.Pointer(&r.dict[0])),\n\t\t\t\t(C.uInt)(len(r.dict)),\n\t\t\t)\n\t\t\tif e == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tr.zlibErr = errCode(e)\n\t\t} else {\n\t\t\tr.zlibErr = errCode(e)\n\t\t}\n\t\treturn numRead, r.zlibErr\n\t}\n}\n<commit_msg>Tweak cgozlib to be consistent with cgozstd<commit_after>\/\/ Copyright 2019 The Wuffs Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ https:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ ----------------\n\n\/\/ Package cgozlib wraps the C \"zlib\" library.\n\/\/\n\/\/ Unlike some other wrappers, this one supports dictionaries.\npackage cgozlib\n\n\/*\n#cgo pkg-config: zlib\n#include \"zlib.h\"\n\ntypedef struct {\n\tuInt ndst;\n\tuInt nsrc;\n} advances;\n\nint cgozlib_inflateInit(z_stream* z) {\n\treturn inflateInit(z);\n}\n\nint cgozlib_inflateSetDictionary(z_stream* z,\n\t\tBytef* dict_ptr,\n\t\tuInt dict_len) {\n\treturn inflateSetDictionary(z, dict_ptr, dict_len);\n}\n\nint cgozlib_inflate(z_stream* z,\n\t\tadvances* a,\n\t\tBytef* next_out,\n\t\tuInt avail_out,\n\t\tBytef* next_in,\n\t\tuInt avail_in) {\n\tz->next_out = next_out;\n\tz->avail_out = avail_out;\n\tz->next_in = next_in;\n\tz->avail_in = avail_in;\n\n\tint ret = inflate(z, Z_NO_FLUSH);\n\n\ta->ndst = avail_out - z->avail_out;\n\ta->nsrc = avail_in - z->avail_in;\n\n\tz->next_out = NULL;\n\tz->avail_out = 0;\n\tz->next_in = NULL;\n\tz->avail_in = 0;\n\n\treturn ret;\n}\n\nint cgozlib_inflateEnd(z_stream* z) {\n\treturn inflateEnd(z);\n}\n*\/\nimport \"C\"\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"unsafe\"\n)\n\nconst cgoEnabled = true\n\nvar (\n\terrMissingResetCall = errors.New(\"cgozlib: missing Reset call\")\n\terrNilIOReader = errors.New(\"cgozlib: nil io.Reader\")\n\terrNilReceiver = errors.New(\"cgozlib: nil receiver\")\n)\n\nconst (\n\terrCodeStreamEnd = 1\n\terrCodeNeedDict = 2\n)\n\ntype errCode int32\n\nfunc (e errCode) Error() string {\n\tswitch e {\n\tcase +1:\n\t\treturn \"cgozlib: Z_STREAM_END\"\n\tcase +2:\n\t\treturn \"cgozlib: Z_NEED_DICT\"\n\tcase -1:\n\t\treturn \"cgozlib: Z_ERRNO\"\n\tcase -2:\n\t\treturn \"cgozlib: Z_STREAM_ERROR\"\n\tcase -3:\n\t\treturn \"cgozlib: Z_DATA_ERROR\"\n\tcase -4:\n\t\treturn \"cgozlib: Z_MEM_ERROR\"\n\tcase -5:\n\t\treturn \"cgozlib: Z_BUF_ERROR\"\n\tcase -6:\n\t\treturn \"cgozlib: Z_VERSION_ERROR\"\n\t}\n\treturn \"cgozlib: unknown zlib error\"\n}\n\n\/\/ Reader is both a zlib.Resetter and an io.ReadCloser. Call Reset before\n\/\/ calling Read.\n\/\/\n\/\/ It is analogous to the value returned by zlib.NewReader in the Go standard\n\/\/ library.\ntype Reader struct {\n\tbuf [4096]byte\n\ti, j uint32\n\tr io.Reader\n\tdict []byte\n\n\treadErr error\n\tzlibErr error\n\n\tz C.z_stream\n\ta C.advances\n}\n\n\/\/ Reset implements compression.Reader.\nfunc (r *Reader) Reset(reader io.Reader, dictionary []byte) error {\n\tif r == nil {\n\t\treturn errNilReceiver\n\t}\n\tif err := r.Close(); err != nil {\n\t\treturn err\n\t}\n\tif reader == nil {\n\t\treturn errNilIOReader\n\t}\n\n\tif e := C.cgozlib_inflateInit(&r.z); e != 0 {\n\t\treturn errCode(e)\n\t}\n\n\tr.r = reader\n\tr.dict = dictionary\n\tif n := len(r.dict); n > 32768 {\n\t\tr.dict = r.dict[n-32768:]\n\t}\n\treturn nil\n}\n\n\/\/ Close implements compression.Reader.\nfunc (r *Reader) Close() error {\n\tif r == nil {\n\t\treturn errNilReceiver\n\t}\n\tif r.r == nil {\n\t\treturn nil\n\t}\n\tr.i = 0\n\tr.j = 0\n\tr.r = nil\n\tr.dict = nil\n\tr.readErr = nil\n\tr.zlibErr = nil\n\tif e := C.cgozlib_inflateEnd(&r.z); e != 0 {\n\t\treturn errCode(e)\n\t}\n\treturn nil\n}\n\n\/\/ Read implements compression.Reader.\nfunc (r *Reader) Read(p []byte) (int, error) {\n\tif r == nil {\n\t\treturn 0, errNilReceiver\n\t}\n\tif r.r == nil {\n\t\treturn 0, errMissingResetCall\n\t}\n\n\tconst maxLen = 1 << 30\n\tif len(p) > maxLen {\n\t\tp = p[:maxLen]\n\t}\n\n\tfor numRead := 0; ; {\n\t\tif r.zlibErr != nil {\n\t\t\treturn numRead, r.zlibErr\n\t\t}\n\t\tif len(p) == 0 {\n\t\t\treturn numRead, nil\n\t\t}\n\n\t\tif r.i >= r.j {\n\t\t\tif r.readErr != nil {\n\t\t\t\treturn numRead, r.readErr\n\t\t\t}\n\n\t\t\tn, err := r.r.Read(r.buf[:])\n\t\t\tif err == io.EOF {\n\t\t\t\terr = io.ErrUnexpectedEOF\n\t\t\t}\n\t\t\tr.i, r.j, r.readErr = 0, uint32(n), err\n\t\t\tcontinue\n\t\t}\n\n\t\te := C.cgozlib_inflate(&r.z, &r.a,\n\t\t\t(*C.Bytef)(unsafe.Pointer(&p[0])),\n\t\t\t(C.uInt)(len(p)),\n\t\t\t(*C.Bytef)(unsafe.Pointer(&r.buf[r.i])),\n\t\t\t(C.uInt)(r.j-r.i),\n\t\t)\n\n\t\tnumRead += int(r.a.ndst)\n\t\tp = p[int(r.a.ndst):]\n\n\t\tr.i += uint32(r.a.nsrc)\n\n\t\tif e == 0 {\n\t\t\tcontinue\n\t\t} else if e == errCodeStreamEnd {\n\t\t\tr.zlibErr = io.EOF\n\t\t} else if (e == errCodeNeedDict) && (len(r.dict) > 0) {\n\t\t\te = C.cgozlib_inflateSetDictionary(&r.z,\n\t\t\t\t(*C.Bytef)(unsafe.Pointer(&r.dict[0])),\n\t\t\t\t(C.uInt)(len(r.dict)),\n\t\t\t)\n\t\t\tif e == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tr.zlibErr = errCode(e)\n\t\t} else {\n\t\t\tr.zlibErr = errCode(e)\n\t\t}\n\t\treturn numRead, r.zlibErr\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package discovery\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"io\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\tlogging \"github.com\/ipfs\/go-log\"\n\t\"github.com\/libp2p\/go-libp2p-host\"\n\t\"github.com\/libp2p\/go-libp2p-peer\"\n\tpstore \"github.com\/libp2p\/go-libp2p-peerstore\"\n\tma \"github.com\/multiformats\/go-multiaddr\"\n\tmanet \"github.com\/multiformats\/go-multiaddr-net\"\n\t\"github.com\/whyrusleeping\/mdns\"\n)\n\nvar log = logging.Logger(\"mdns\")\n\nconst ServiceTag = \"_ipfs-discovery._udp\"\n\ntype Service interface {\n\tio.Closer\n\tRegisterNotifee(Notifee)\n\tUnregisterNotifee(Notifee)\n}\n\ntype Notifee interface {\n\tHandlePeerFound(pstore.PeerInfo)\n}\n\ntype mdnsService struct {\n\tserver *mdns.Server\n\tservice *mdns.MDNSService\n\thost host.Host\n\ttag string\n\n\tlk sync.Mutex\n\tnotifees []Notifee\n\tinterval time.Duration\n}\n\nfunc getDialableListenAddrs(ph host.Host) ([]*net.TCPAddr, error) {\n\tvar out []*net.TCPAddr\n\tfor _, addr := range ph.Addrs() {\n\t\tna, err := manet.ToNetAddr(addr)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\ttcp, ok := na.(*net.TCPAddr)\n\t\tif ok {\n\t\t\tout = append(out, tcp)\n\t\t}\n\t}\n\tif len(out) == 0 {\n\t\treturn nil, errors.New(\"failed to find good external addr from peerhost\")\n\t}\n\treturn out, nil\n}\n\nfunc NewMdnsService(ctx context.Context, peerhost host.Host, interval time.Duration, serviceTag string) (Service, error) {\n\n\t\/\/ don't let mdns use logging...\n\tmdns.DisableLogging = true\n\n\tvar ipaddrs []net.IP\n\tport := 4001\n\n\taddrs, err := getDialableListenAddrs(peerhost)\n\tif err != nil {\n\t\tlog.Warning(err)\n\t} else {\n\t\tport = addrs[0].Port\n\t\tfor _, a := range addrs {\n\t\t\tipaddrs = append(ipaddrs, a.IP)\n\t\t}\n\t}\n\n\tmyid := peerhost.ID().Pretty()\n\n\tinfo := []string{myid}\n\tif serviceTag == \"\" {\n\t\tserviceTag = ServiceTag\n\t}\n\tservice, err := mdns.NewMDNSService(myid, serviceTag, \"\", \"\", port, ipaddrs, info)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Create the mDNS server, defer shutdown\n\tserver, err := mdns.NewServer(&mdns.Config{Zone: service})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := &mdnsService{\n\t\tserver: server,\n\t\tservice: service,\n\t\thost: peerhost,\n\t\tinterval: interval,\n\t\ttag: serviceTag,\n\t}\n\n\tgo s.pollForEntries(ctx)\n\n\treturn s, nil\n}\n\nfunc (m *mdnsService) Close() error {\n\treturn m.server.Shutdown()\n}\n\nfunc (m *mdnsService) pollForEntries(ctx context.Context) {\n\n\tticker := time.NewTicker(m.interval)\n\tfor {\n\t\t\/\/execute mdns query right away at method call and then with every tick\n\t\tentriesCh := make(chan *mdns.ServiceEntry, 16)\n\t\tgo func() {\n\t\t\tfor entry := range entriesCh {\n\t\t\t\tm.handleEntry(entry)\n\t\t\t}\n\t\t}()\n\n\t\tlog.Debug(\"starting mdns query\")\n\t\tqp := &mdns.QueryParam{\n\t\t\tDomain: \"local\",\n\t\t\tEntries: entriesCh,\n\t\t\tService: m.tag,\n\t\t\tTimeout: time.Second * 5,\n\t\t}\n\n\t\terr := mdns.Query(qp)\n\t\tif err != nil {\n\t\t\tlog.Error(\"mdns lookup error: \", err)\n\t\t}\n\t\tclose(entriesCh)\n\t\tlog.Debug(\"mdns query complete\")\n\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tcontinue\n\t\tcase <-ctx.Done():\n\t\t\tlog.Debug(\"mdns service halting\")\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (m *mdnsService) handleEntry(e *mdns.ServiceEntry) {\n\tlog.Debugf(\"Handling MDNS entry: %s:%d %s\", e.AddrV4, e.Port, e.Info)\n\tmpeer, err := peer.IDB58Decode(e.Info)\n\tif err != nil {\n\t\tlog.Warning(\"Error parsing peer ID from mdns entry: \", err)\n\t\treturn\n\t}\n\n\tif mpeer == m.host.ID() {\n\t\tlog.Debug(\"got our own mdns entry, skipping\")\n\t\treturn\n\t}\n\n\tmaddr, err := manet.FromNetAddr(&net.TCPAddr{\n\t\tIP: e.AddrV4,\n\t\tPort: e.Port,\n\t})\n\tif err != nil {\n\t\tlog.Warning(\"Error parsing multiaddr from mdns entry: \", err)\n\t\treturn\n\t}\n\n\tpi := pstore.PeerInfo{\n\t\tID: mpeer,\n\t\tAddrs: []ma.Multiaddr{maddr},\n\t}\n\n\tm.lk.Lock()\n\tfor _, n := range m.notifees {\n\t\tgo n.HandlePeerFound(pi)\n\t}\n\tm.lk.Unlock()\n}\n\nfunc (m *mdnsService) RegisterNotifee(n Notifee) {\n\tm.lk.Lock()\n\tm.notifees = append(m.notifees, n)\n\tm.lk.Unlock()\n}\n\nfunc (m *mdnsService) UnregisterNotifee(n Notifee) {\n\tm.lk.Lock()\n\tfound := -1\n\tfor i, notif := range m.notifees {\n\t\tif notif == n {\n\t\t\tfound = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif found != -1 {\n\t\tm.notifees = append(m.notifees[:found], m.notifees[found+1:]...)\n\t}\n\tm.lk.Unlock()\n}\n<commit_msg>disable mdns logging from init<commit_after>package discovery\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"io\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\tlogging \"github.com\/ipfs\/go-log\"\n\t\"github.com\/libp2p\/go-libp2p-host\"\n\t\"github.com\/libp2p\/go-libp2p-peer\"\n\tpstore \"github.com\/libp2p\/go-libp2p-peerstore\"\n\tma \"github.com\/multiformats\/go-multiaddr\"\n\tmanet \"github.com\/multiformats\/go-multiaddr-net\"\n\t\"github.com\/whyrusleeping\/mdns\"\n)\n\nfunc init() {\n\t\/\/ don't let mdns use logging...\n\tmdns.DisableLogging = true\n}\n\nvar log = logging.Logger(\"mdns\")\n\nconst ServiceTag = \"_ipfs-discovery._udp\"\n\ntype Service interface {\n\tio.Closer\n\tRegisterNotifee(Notifee)\n\tUnregisterNotifee(Notifee)\n}\n\ntype Notifee interface {\n\tHandlePeerFound(pstore.PeerInfo)\n}\n\ntype mdnsService struct {\n\tserver *mdns.Server\n\tservice *mdns.MDNSService\n\thost host.Host\n\ttag string\n\n\tlk sync.Mutex\n\tnotifees []Notifee\n\tinterval time.Duration\n}\n\nfunc getDialableListenAddrs(ph host.Host) ([]*net.TCPAddr, error) {\n\tvar out []*net.TCPAddr\n\tfor _, addr := range ph.Addrs() {\n\t\tna, err := manet.ToNetAddr(addr)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\ttcp, ok := na.(*net.TCPAddr)\n\t\tif ok {\n\t\t\tout = append(out, tcp)\n\t\t}\n\t}\n\tif len(out) == 0 {\n\t\treturn nil, errors.New(\"failed to find good external addr from peerhost\")\n\t}\n\treturn out, nil\n}\n\nfunc NewMdnsService(ctx context.Context, peerhost host.Host, interval time.Duration, serviceTag string) (Service, error) {\n\n\tvar ipaddrs []net.IP\n\tport := 4001\n\n\taddrs, err := getDialableListenAddrs(peerhost)\n\tif err != nil {\n\t\tlog.Warning(err)\n\t} else {\n\t\tport = addrs[0].Port\n\t\tfor _, a := range addrs {\n\t\t\tipaddrs = append(ipaddrs, a.IP)\n\t\t}\n\t}\n\n\tmyid := peerhost.ID().Pretty()\n\n\tinfo := []string{myid}\n\tif serviceTag == \"\" {\n\t\tserviceTag = ServiceTag\n\t}\n\tservice, err := mdns.NewMDNSService(myid, serviceTag, \"\", \"\", port, ipaddrs, info)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Create the mDNS server, defer shutdown\n\tserver, err := mdns.NewServer(&mdns.Config{Zone: service})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts := &mdnsService{\n\t\tserver: server,\n\t\tservice: service,\n\t\thost: peerhost,\n\t\tinterval: interval,\n\t\ttag: serviceTag,\n\t}\n\n\tgo s.pollForEntries(ctx)\n\n\treturn s, nil\n}\n\nfunc (m *mdnsService) Close() error {\n\treturn m.server.Shutdown()\n}\n\nfunc (m *mdnsService) pollForEntries(ctx context.Context) {\n\n\tticker := time.NewTicker(m.interval)\n\tfor {\n\t\t\/\/execute mdns query right away at method call and then with every tick\n\t\tentriesCh := make(chan *mdns.ServiceEntry, 16)\n\t\tgo func() {\n\t\t\tfor entry := range entriesCh {\n\t\t\t\tm.handleEntry(entry)\n\t\t\t}\n\t\t}()\n\n\t\tlog.Debug(\"starting mdns query\")\n\t\tqp := &mdns.QueryParam{\n\t\t\tDomain: \"local\",\n\t\t\tEntries: entriesCh,\n\t\t\tService: m.tag,\n\t\t\tTimeout: time.Second * 5,\n\t\t}\n\n\t\terr := mdns.Query(qp)\n\t\tif err != nil {\n\t\t\tlog.Error(\"mdns lookup error: \", err)\n\t\t}\n\t\tclose(entriesCh)\n\t\tlog.Debug(\"mdns query complete\")\n\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tcontinue\n\t\tcase <-ctx.Done():\n\t\t\tlog.Debug(\"mdns service halting\")\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (m *mdnsService) handleEntry(e *mdns.ServiceEntry) {\n\tlog.Debugf(\"Handling MDNS entry: %s:%d %s\", e.AddrV4, e.Port, e.Info)\n\tmpeer, err := peer.IDB58Decode(e.Info)\n\tif err != nil {\n\t\tlog.Warning(\"Error parsing peer ID from mdns entry: \", err)\n\t\treturn\n\t}\n\n\tif mpeer == m.host.ID() {\n\t\tlog.Debug(\"got our own mdns entry, skipping\")\n\t\treturn\n\t}\n\n\tmaddr, err := manet.FromNetAddr(&net.TCPAddr{\n\t\tIP: e.AddrV4,\n\t\tPort: e.Port,\n\t})\n\tif err != nil {\n\t\tlog.Warning(\"Error parsing multiaddr from mdns entry: \", err)\n\t\treturn\n\t}\n\n\tpi := pstore.PeerInfo{\n\t\tID: mpeer,\n\t\tAddrs: []ma.Multiaddr{maddr},\n\t}\n\n\tm.lk.Lock()\n\tfor _, n := range m.notifees {\n\t\tgo n.HandlePeerFound(pi)\n\t}\n\tm.lk.Unlock()\n}\n\nfunc (m *mdnsService) RegisterNotifee(n Notifee) {\n\tm.lk.Lock()\n\tm.notifees = append(m.notifees, n)\n\tm.lk.Unlock()\n}\n\nfunc (m *mdnsService) UnregisterNotifee(n Notifee) {\n\tm.lk.Lock()\n\tfound := -1\n\tfor i, notif := range m.notifees {\n\t\tif notif == n {\n\t\t\tfound = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif found != -1 {\n\t\tm.notifees = append(m.notifees[:found], m.notifees[found+1:]...)\n\t}\n\tm.lk.Unlock()\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>fixed type case issue for SRID. #161<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>attribute: add Read\/Write Uint64 method<commit_after><|endoftext|>"} {"text":"<commit_before>package cuckoo\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"hash\/fnv\"\n\t\"sort\"\n)\n\nconst ASSOCIATIVITY int = 8\n\nfunc (m *cmap) bin(n int, key keyt) int {\n\th := fnv.New64a()\n\th.Write(key)\n\n\tvar bs [8]byte\n\tbinary.PutVarint(bs[:], int64(n))\n\th.Write(bs[:])\n\n\treturn int(h.Sum64() % uint64(len(m.bins)))\n}\n\nfunc (m *cmap) kbins(key keyt) []int {\n\tbins := make([]int, 0, m.hashes)\n\th := fnv.New64a()\n\tfor i := 0; i < int(m.hashes); i++ {\n\t\th.Reset()\n\t\th.Write(key)\n\n\t\tvar bs [8]byte\n\t\tbinary.PutVarint(bs[:], int64(i))\n\t\th.Write(bs[:])\n\n\t\tbins = append(bins, int(h.Sum64()%uint64(len(m.bins))))\n\t}\n\treturn bins\n}\n\ntype cval struct {\n\tbno int\n\texpires int\n\tkey keyt\n\tval interface{}\n}\n\nfunc (v *cval) present() bool {\n\treturn !(v.expires == 0 || v.val == nil)\n}\n\ntype cbin struct {\n\tvals [ASSOCIATIVITY]cval\n\tmx SpinLock\n}\n\nfunc (b *cbin) subin(v cval) {\n\tfor j, jv := range b.vals {\n\t\tif !jv.present() {\n\t\t\tb.vals[j] = v\n\t\t}\n\t}\n}\n\nfunc (b *cbin) kill(i int) {\n\tb.vals[i].expires = 0\n}\n\nfunc (b *cbin) available() bool {\n\tpresent := 0\n\tfor i := 0; i < ASSOCIATIVITY; i++ {\n\t\tif b.vals[i].present() {\n\t\t\tpresent++\n\t\t}\n\t}\n\treturn present < ASSOCIATIVITY\n}\n\ntype cmap struct {\n\tbins []cbin\n\thashes uint32\n}\n\nfunc (m *cmap) iterate() <-chan cval {\n\tch := make(chan cval)\n\tgo func() {\n\t\tfor i, bin := range m.bins {\n\t\t\tvals := make([]cval, 0, ASSOCIATIVITY)\n\t\t\tm.bins[i].mx.Lock()\n\t\t\tfor _, v := range bin.vals {\n\t\t\t\tif v.present() {\n\t\t\t\t\tvals = append(vals, v)\n\t\t\t\t}\n\t\t\t}\n\t\t\tm.bins[i].mx.Unlock()\n\t\t\tfor _, cv := range vals {\n\t\t\t\tch <- cv\n\t\t\t}\n\t\t}\n\t\tclose(ch)\n\t}()\n\treturn ch\n}\n\nfunc (m *cmap) add(bini int, bin int, key keyt, val interface{}) bool {\n\tm.bins[bin].mx.Lock()\n\tdefer m.bins[bin].mx.Unlock()\n\tif m.bins[bin].available() {\n\t\tm.bins[bin].subin(cval{bini, 1, key, val})\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (m *cmap) lock_in_order(bins ...int) {\n\tlocks := make([]int, len(bins))\n\tfor i := range bins {\n\t\tlocks[i] = bins[i]\n\t}\n\n\tsort.Ints(locks)\n\tlast := -1\n\tfor _, bin := range locks {\n\t\tif bin != last {\n\t\t\tm.bins[bin].mx.Lock()\n\t\t\tlast = bin\n\t\t}\n\t}\n}\n\nfunc (m *cmap) unlock(bins ...int) {\n\tlocks := make([]int, len(bins))\n\tfor i := range bins {\n\t\tlocks[i] = bins[i]\n\t}\n\n\tsort.Ints(locks)\n\tlast := -1\n\tfor _, bin := range locks {\n\t\tif bin != last {\n\t\t\tm.bins[bin].mx.Unlock()\n\t\t\tlast = bin\n\t\t}\n\t}\n}\n\nfunc (m *cmap) validate_execute(path []mv) bool {\n\tfor i := len(path) - 1; i >= 0; i-- {\n\t\tk := path[i]\n\n\t\tm.lock_in_order(k.from, k.to)\n\t\tif !m.bins[k.to].available() {\n\t\t\tm.unlock(k.from, k.to)\n\t\t\tfmt.Println(\"path to occupancy no longer valid, target bucket now full\")\n\t\t\treturn false\n\t\t}\n\n\t\tki := -1\n\t\tfor j, jk := range m.bins[k.from].vals {\n\t\t\tif jk.present() && bytes.Equal(jk.key, k.key) {\n\t\t\t\tki = j\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif ki == -1 {\n\t\t\tm.unlock(k.from, k.to)\n\t\t\tfmt.Println(\"path to occupancy no longer valid, key already swapped\")\n\t\t\treturn false\n\t\t}\n\n\t\tv := m.bins[k.from].vals[ki]\n\t\tv.bno = k.tobn\n\n\t\tm.bins[k.to].subin(v)\n\t\tm.bins[k.from].kill(ki)\n\n\t\tm.unlock(k.from, k.to)\n\t}\n\n\treturn true\n}\n\nfunc (m *cmap) has(bin int, key keyt) int {\n\tfor i := 0; i < ASSOCIATIVITY; i++ {\n\t\tif m.bins[bin].vals[i].present() && bytes.Equal(m.bins[bin].vals[i].key, key) {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n\n\/\/ del removes the entry with the given key, and returns its value (if any)\nfunc (m *cmap) del(key keyt) (v interface{}) {\n\tbins := m.kbins(key)\n\n\tm.lock_in_order(bins...)\n\tdefer m.unlock(bins...)\n\n\tfor _, bin := range bins {\n\t\tki := m.has(bin, key)\n\t\tif ki != -1 {\n\t\t\tv = m.bins[bin].vals[ki]\n\t\t\tm.bins[bin].kill(ki)\n\t\t\treturn\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (m *cmap) insert(key keyt, val interface{}) int {\n\tbins := m.kbins(key)\n\n\tm.lock_in_order(bins...)\n\tfor _, bin := range bins {\n\t\tki := m.has(bin, key)\n\t\tif ki != -1 {\n\t\t\tm.bins[bin].vals[ki].val = val\n\t\t\tm.unlock(bins...)\n\t\t\treturn 0\n\t\t}\n\t}\n\tm.unlock(bins...)\n\n\tfor i, b := range bins {\n\t\tif m.bins[b].available() {\n\t\t\tif m.add(i, b, key, val) {\n\t\t\t\treturn 0\n\t\t\t}\n\t\t}\n\t}\n\n\tfor {\n\t\tpath := m.search(bins...)\n\t\tif path == nil {\n\t\t\treturn -1\n\t\t}\n\n\t\tfreeing := path[0].from\n\n\t\t\/\/ recompute bins because #hashes might have changed\n\t\tbins = m.kbins(key)\n\n\t\t\/\/ sanity check that this path will make room\n\t\ttobin := -1\n\t\tfor i, bin := range bins {\n\t\t\tif freeing == bin {\n\t\t\t\ttobin = i\n\t\t\t}\n\t\t}\n\t\tif tobin == -1 {\n\t\t\tpanic(fmt.Sprintf(\"path %v leads to occupancy in bin %v, but is unhelpful for key %s with bins: %v\", path, freeing, key, bins))\n\t\t}\n\n\t\tif m.validate_execute(path) {\n\t\t\tif m.add(tobin, freeing, key, val) {\n\t\t\t\treturn len(path)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (m *cmap) get(key keyt) (interface{}, bool) {\n\tbins := m.kbins(key)\n\n\tfor _, bin := range bins {\n\t\tb := m.bins[bin]\n\t\tfor _, s := range b.vals {\n\t\t\tif s.present() && bytes.Equal(s.key, key) {\n\t\t\t\treturn s.val, true\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, false\n}\n<commit_msg>Faster present() and available()<commit_after>package cuckoo\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"hash\/fnv\"\n\t\"sort\"\n)\n\nconst ASSOCIATIVITY int = 8\n\nfunc (m *cmap) bin(n int, key keyt) int {\n\th := fnv.New64a()\n\th.Write(key)\n\n\tvar bs [8]byte\n\tbinary.PutVarint(bs[:], int64(n))\n\th.Write(bs[:])\n\n\treturn int(h.Sum64() % uint64(len(m.bins)))\n}\n\nfunc (m *cmap) kbins(key keyt) []int {\n\tbins := make([]int, 0, m.hashes)\n\th := fnv.New64a()\n\tfor i := 0; i < int(m.hashes); i++ {\n\t\th.Reset()\n\t\th.Write(key)\n\n\t\tvar bs [8]byte\n\t\tbinary.PutVarint(bs[:], int64(i))\n\t\th.Write(bs[:])\n\n\t\tbins = append(bins, int(h.Sum64()%uint64(len(m.bins))))\n\t}\n\treturn bins\n}\n\ntype cval struct {\n\tbno int\n\texpires int\n\tkey keyt\n\tval interface{}\n}\n\nfunc (v *cval) present() bool {\n\treturn v.val != nil \/\/ TODO: && not expired\n}\n\ntype cbin struct {\n\tvals [ASSOCIATIVITY]cval\n\tmx SpinLock\n}\n\nfunc (b *cbin) subin(v cval) {\n\tfor j, jv := range b.vals {\n\t\tif !jv.present() {\n\t\t\tb.vals[j] = v\n\t\t}\n\t}\n}\n\nfunc (b *cbin) kill(i int) {\n\tb.vals[i].val = nil\n\t\/\/b.vals[i].expires = 0\n}\n\nfunc (b *cbin) available() bool {\n\tfor i := 0; i < ASSOCIATIVITY; i++ {\n\t\tif !b.vals[i].present() {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\ntype cmap struct {\n\tbins []cbin\n\thashes uint32\n}\n\nfunc (m *cmap) iterate() <-chan cval {\n\tch := make(chan cval)\n\tgo func() {\n\t\tfor i, bin := range m.bins {\n\t\t\tvals := make([]cval, 0, ASSOCIATIVITY)\n\t\t\tm.bins[i].mx.Lock()\n\t\t\tfor _, v := range bin.vals {\n\t\t\t\tif v.present() {\n\t\t\t\t\tvals = append(vals, v)\n\t\t\t\t}\n\t\t\t}\n\t\t\tm.bins[i].mx.Unlock()\n\t\t\tfor _, cv := range vals {\n\t\t\t\tch <- cv\n\t\t\t}\n\t\t}\n\t\tclose(ch)\n\t}()\n\treturn ch\n}\n\nfunc (m *cmap) add(bini int, bin int, key keyt, val interface{}) bool {\n\tm.bins[bin].mx.Lock()\n\tdefer m.bins[bin].mx.Unlock()\n\tif m.bins[bin].available() {\n\t\tm.bins[bin].subin(cval{bini, 1, key, val})\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (m *cmap) lock_in_order(bins ...int) {\n\tlocks := make([]int, len(bins))\n\tfor i := range bins {\n\t\tlocks[i] = bins[i]\n\t}\n\n\tsort.Ints(locks)\n\tlast := -1\n\tfor _, bin := range locks {\n\t\tif bin != last {\n\t\t\tm.bins[bin].mx.Lock()\n\t\t\tlast = bin\n\t\t}\n\t}\n}\n\nfunc (m *cmap) unlock(bins ...int) {\n\tlocks := make([]int, len(bins))\n\tfor i := range bins {\n\t\tlocks[i] = bins[i]\n\t}\n\n\tsort.Ints(locks)\n\tlast := -1\n\tfor _, bin := range locks {\n\t\tif bin != last {\n\t\t\tm.bins[bin].mx.Unlock()\n\t\t\tlast = bin\n\t\t}\n\t}\n}\n\nfunc (m *cmap) validate_execute(path []mv) bool {\n\tfor i := len(path) - 1; i >= 0; i-- {\n\t\tk := path[i]\n\n\t\tm.lock_in_order(k.from, k.to)\n\t\tif !m.bins[k.to].available() {\n\t\t\tm.unlock(k.from, k.to)\n\t\t\tfmt.Println(\"path to occupancy no longer valid, target bucket now full\")\n\t\t\treturn false\n\t\t}\n\n\t\tki := -1\n\t\tfor j, jk := range m.bins[k.from].vals {\n\t\t\tif jk.present() && bytes.Equal(jk.key, k.key) {\n\t\t\t\tki = j\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif ki == -1 {\n\t\t\tm.unlock(k.from, k.to)\n\t\t\tfmt.Println(\"path to occupancy no longer valid, key already swapped\")\n\t\t\treturn false\n\t\t}\n\n\t\tv := m.bins[k.from].vals[ki]\n\t\tv.bno = k.tobn\n\n\t\tm.bins[k.to].subin(v)\n\t\tm.bins[k.from].kill(ki)\n\n\t\tm.unlock(k.from, k.to)\n\t}\n\n\treturn true\n}\n\nfunc (m *cmap) has(bin int, key keyt) int {\n\tfor i := 0; i < ASSOCIATIVITY; i++ {\n\t\tif m.bins[bin].vals[i].present() && bytes.Equal(m.bins[bin].vals[i].key, key) {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n\n\/\/ del removes the entry with the given key, and returns its value (if any)\nfunc (m *cmap) del(key keyt) (v interface{}) {\n\tbins := m.kbins(key)\n\n\tm.lock_in_order(bins...)\n\tdefer m.unlock(bins...)\n\n\tfor _, bin := range bins {\n\t\tki := m.has(bin, key)\n\t\tif ki != -1 {\n\t\t\tv = m.bins[bin].vals[ki]\n\t\t\tm.bins[bin].kill(ki)\n\t\t\treturn\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (m *cmap) insert(key keyt, val interface{}) int {\n\tbins := m.kbins(key)\n\n\tm.lock_in_order(bins...)\n\tfor _, bin := range bins {\n\t\tki := m.has(bin, key)\n\t\tif ki != -1 {\n\t\t\tm.bins[bin].vals[ki].val = val\n\t\t\tm.unlock(bins...)\n\t\t\treturn 0\n\t\t}\n\t}\n\tm.unlock(bins...)\n\n\tfor i, b := range bins {\n\t\tif m.bins[b].available() {\n\t\t\tif m.add(i, b, key, val) {\n\t\t\t\treturn 0\n\t\t\t}\n\t\t}\n\t}\n\n\tfor {\n\t\tpath := m.search(bins...)\n\t\tif path == nil {\n\t\t\treturn -1\n\t\t}\n\n\t\tfreeing := path[0].from\n\n\t\t\/\/ recompute bins because #hashes might have changed\n\t\tbins = m.kbins(key)\n\n\t\t\/\/ sanity check that this path will make room\n\t\ttobin := -1\n\t\tfor i, bin := range bins {\n\t\t\tif freeing == bin {\n\t\t\t\ttobin = i\n\t\t\t}\n\t\t}\n\t\tif tobin == -1 {\n\t\t\tpanic(fmt.Sprintf(\"path %v leads to occupancy in bin %v, but is unhelpful for key %s with bins: %v\", path, freeing, key, bins))\n\t\t}\n\n\t\tif m.validate_execute(path) {\n\t\t\tif m.add(tobin, freeing, key, val) {\n\t\t\t\treturn len(path)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (m *cmap) get(key keyt) (interface{}, bool) {\n\tbins := m.kbins(key)\n\n\tfor _, bin := range bins {\n\t\tb := m.bins[bin]\n\t\tfor _, s := range b.vals {\n\t\t\tif s.present() && bytes.Equal(s.key, key) {\n\t\t\t\treturn s.val, true\n\t\t\t}\n\t\t}\n\t}\n\treturn nil, false\n}\n<|endoftext|>"} {"text":"<commit_before>package login\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/tarent\/loginsrv\/logging\"\n\t\"github.com\/tarent\/loginsrv\/oauth2\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar jwtDefaultSecret string\n\nfunc init() {\n\trand.Seed(time.Now().UTC().UnixNano())\n\tjwtDefaultSecret = randStringBytes(32)\n}\n\n\/\/ DefaultConfig for the loginsrv handler\nfunc DefaultConfig() *Config {\n\treturn &Config{\n\t\tHost: \"localhost\",\n\t\tPort: \"6789\",\n\t\tLogLevel: \"info\",\n\t\tJwtSecret: jwtDefaultSecret,\n\t\tJwtExpiry: 24 * time.Hour,\n\t\tJwtRefreshes: 0,\n\t\tSuccessURL: \"\/\",\n\t\tLogoutURL: \"\",\n\t\tLoginPath: \"\/login\",\n\t\tCookieName: \"jwt_token\",\n\t\tCookieHTTPOnly: true,\n\t\tBackends: Options{},\n\t\tOauth: Options{},\n\t\tGracePeriod: 5 * time.Second,\n\t}\n}\n\nconst envPrefix = \"LOGINSRV_\"\n\n\/\/ Config for the loginsrv handler\ntype Config struct {\n\tHost string\n\tPort string\n\tLogLevel string\n\tTextLogging bool\n\tJwtSecret string\n\tJwtExpiry time.Duration\n\tJwtRefreshes int\n\tSuccessURL string\n\tLogoutURL string\n\tTemplate string\n\tLoginPath string\n\tCookieName string\n\tCookieExpiry time.Duration\n\tCookieDomain string\n\tCookieHTTPOnly bool\n\tBackends Options\n\tOauth Options\n\tGracePeriod time.Duration\n}\n\n\/\/ Options is the configuration structure for oauth and backend provider\n\/\/ key is the providername, value is a options map.\ntype Options map[string]map[string]string\n\n\/\/ addOauthOpts adds the options for a provider in the form of key=value,key=value,..\nfunc (c *Config) addOauthOpts(providerName, optsKvList string) error {\n\topts, err := parseOptions(optsKvList)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.Oauth[providerName] = opts\n\treturn nil\n}\n\n\/\/ addBackendOpts adds the options for a provider in the form of key=value,key=value,..\nfunc (c *Config) addBackendOpts(providerName, optsKvList string) error {\n\topts, err := parseOptions(optsKvList)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.Backends[providerName] = opts\n\treturn nil\n}\n\n\/\/ ConfigureFlagSet adds all flags to the supplied flag set\nfunc (c *Config) ConfigureFlagSet(f *flag.FlagSet) {\n\tf.StringVar(&c.Host, \"host\", c.Host, \"The host to listen on\")\n\tf.StringVar(&c.Port, \"port\", c.Port, \"The port to listen on\")\n\tf.StringVar(&c.LogLevel, \"log-level\", c.LogLevel, \"The log level\")\n\tf.BoolVar(&c.TextLogging, \"text-logging\", c.TextLogging, \"Log in text format instead of json\")\n\tf.StringVar(&c.JwtSecret, \"jwt-secret\", \"random key\", \"The secret to sign the jwt token\")\n\tf.DurationVar(&c.JwtExpiry, \"jwt-expiry\", c.JwtExpiry, \"The expiry duration for the jwt token, e.g. 2h or 3h30m\")\n\tf.IntVar(&c.JwtRefreshes, \"jwt-refreshes\", c.JwtRefreshes, \"The maximum amount of jwt refreshes. 0 by Default\")\n\tf.StringVar(&c.CookieName, \"cookie-name\", c.CookieName, \"The name of the jwt cookie\")\n\tf.BoolVar(&c.CookieHTTPOnly, \"cookie-http-only\", c.CookieHTTPOnly, \"Set the cookie with the http only flag\")\n\tf.DurationVar(&c.CookieExpiry, \"cookie-expiry\", c.CookieExpiry, \"The expiry duration for the cookie, e.g. 2h or 3h30m. Default is browser session\")\n\tf.StringVar(&c.CookieDomain, \"cookie-domain\", c.CookieDomain, \"The optional domain parameter for the cookie\")\n\tf.StringVar(&c.SuccessURL, \"success-url\", c.SuccessURL, \"The url to redirect after login\")\n\tf.StringVar(&c.LogoutURL, \"logout-url\", c.LogoutURL, \"The url or path to redirect after logout\")\n\tf.StringVar(&c.Template, \"template\", c.Template, \"An alternative template for the login form\")\n\tf.StringVar(&c.LoginPath, \"login-path\", c.LoginPath, \"The path of the login resource\")\n\tf.DurationVar(&c.GracePeriod, \"grace-period\", c.GracePeriod, \"Graceful shutdown grace period\")\n\n\t\/\/ the -backends is deprecated, but we support it for backwards compatibility\n\tdeprecatedBackends := setFunc(func(optsKvList string) error {\n\t\tlogging.Logger.Warn(\"DEPRECATED: '-backend' is no longer supported. Please set the backends by explicit parameters\")\n\t\topts, err := parseOptions(optsKvList)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpName, ok := opts[\"provider\"]\n\t\tif !ok {\n\t\t\treturn errors.New(\"missing provider name provider=...\")\n\t\t}\n\t\tdelete(opts, \"provider\")\n\t\tc.Backends[pName] = opts\n\t\treturn nil\n\t})\n\tf.Var(deprecatedBackends, \"backend\", \"Deprecated, please use the explicit flags\")\n\n\t\/\/ One option for each oauth provider\n\tfor _, pName := range oauth2.ProviderList() {\n\t\tfunc(pName string) {\n\t\t\tsetter := setFunc(func(optsKvList string) error {\n\t\t\t\treturn c.addOauthOpts(pName, optsKvList)\n\t\t\t})\n\t\t\tf.Var(setter, pName, \"Oauth config in the form: client_id=..,client_secret=..[,scope=..,][redirect_uri=..]\")\n\t\t}(pName)\n\t}\n\n\t\/\/ One option for each backend provider\n\tfor _, pName := range ProviderList() {\n\t\tfunc(pName string) {\n\t\t\tsetter := setFunc(func(optsKvList string) error {\n\t\t\t\treturn c.addBackendOpts(pName, optsKvList)\n\t\t\t})\n\t\t\tdesc, _ := GetProviderDescription(pName)\n\t\t\tf.Var(setter, pName, desc.HelpText)\n\t\t}(pName)\n\t}\n}\n\n\/\/ ReadConfig from the commandline args\nfunc ReadConfig() *Config {\n\tc, err := readConfig(flag.CommandLine, os.Args[1:])\n\tif err != nil {\n\t\t\/\/ should never happen, because of flag default policy ExitOnError\n\t\tpanic(err)\n\t}\n\treturn c\n}\n\nfunc readConfig(f *flag.FlagSet, args []string) (*Config, error) {\n\tconfig := DefaultConfig()\n\tconfig.ConfigureFlagSet(f)\n\n\t\/\/ prefer environment settings\n\tf.VisitAll(func(f *flag.Flag) {\n\t\tif val, isPresent := os.LookupEnv(envName(f.Name)); isPresent {\n\t\t\tf.Value.Set(val)\n\t\t}\n\t})\n\n\terr := f.Parse(args)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif config.JwtSecret == \"random key\" {\n\t\tif s, set := os.LookupEnv(\"LOGINSRV_JWT_SECRET\"); set {\n\t\t\tconfig.JwtSecret = s\n\t\t} else {\n\t\t\tconfig.JwtSecret = jwtDefaultSecret\n\t\t}\n\t}\n\n\treturn config, err\n}\n\nconst letterBytes = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n\nfunc randStringBytes(n int) string {\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = letterBytes[rand.Intn(len(letterBytes))]\n\t}\n\treturn string(b)\n}\n\nfunc envName(flagName string) string {\n\treturn envPrefix + strings.Replace(strings.ToUpper(flagName), \"-\", \"_\", -1)\n}\n\nfunc parseOptions(b string) (map[string]string, error) {\n\topts := map[string]string{}\n\tpairs := strings.Split(b, \",\")\n\tfor _, p := range pairs {\n\t\tpair := strings.SplitN(p, \"=\", 2)\n\t\tif len(pair) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"provider configuration has to be in form 'key1=value1,key2=..', but was %v\", p)\n\t\t}\n\t\topts[pair[0]] = pair[1]\n\t}\n\treturn opts, nil\n}\n\n\/\/ Helper type to wrap a function closure with the Value interface\ntype setFunc func(optsKvList string) error\n\nfunc (f setFunc) Set(value string) error {\n\treturn f(value)\n}\n\nfunc (f setFunc) String() string {\n\treturn \"setFunc\"\n}\n<commit_msg>set the default jwt secret to a random string<commit_after>package login\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"math\/rand\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/tarent\/loginsrv\/logging\"\n\t\"github.com\/tarent\/loginsrv\/oauth2\"\n)\n\nvar jwtDefaultSecret string\n\nfunc init() {\n\trand.Seed(time.Now().UTC().UnixNano())\n\tjwtDefaultSecret = randStringBytes(32)\n}\n\n\/\/ DefaultConfig for the loginsrv handler\nfunc DefaultConfig() *Config {\n\treturn &Config{\n\t\tHost: \"localhost\",\n\t\tPort: \"6789\",\n\t\tLogLevel: \"info\",\n\t\tJwtSecret: jwtDefaultSecret,\n\t\tJwtExpiry: 24 * time.Hour,\n\t\tJwtRefreshes: 0,\n\t\tSuccessURL: \"\/\",\n\t\tLogoutURL: \"\",\n\t\tLoginPath: \"\/login\",\n\t\tCookieName: \"jwt_token\",\n\t\tCookieHTTPOnly: true,\n\t\tBackends: Options{},\n\t\tOauth: Options{},\n\t\tGracePeriod: 5 * time.Second,\n\t}\n}\n\nconst envPrefix = \"LOGINSRV_\"\n\n\/\/ Config for the loginsrv handler\ntype Config struct {\n\tHost string\n\tPort string\n\tLogLevel string\n\tTextLogging bool\n\tJwtSecret string\n\tJwtExpiry time.Duration\n\tJwtRefreshes int\n\tSuccessURL string\n\tLogoutURL string\n\tTemplate string\n\tLoginPath string\n\tCookieName string\n\tCookieExpiry time.Duration\n\tCookieDomain string\n\tCookieHTTPOnly bool\n\tBackends Options\n\tOauth Options\n\tGracePeriod time.Duration\n}\n\n\/\/ Options is the configuration structure for oauth and backend provider\n\/\/ key is the providername, value is a options map.\ntype Options map[string]map[string]string\n\n\/\/ addOauthOpts adds the options for a provider in the form of key=value,key=value,..\nfunc (c *Config) addOauthOpts(providerName, optsKvList string) error {\n\topts, err := parseOptions(optsKvList)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.Oauth[providerName] = opts\n\treturn nil\n}\n\n\/\/ addBackendOpts adds the options for a provider in the form of key=value,key=value,..\nfunc (c *Config) addBackendOpts(providerName, optsKvList string) error {\n\topts, err := parseOptions(optsKvList)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.Backends[providerName] = opts\n\treturn nil\n}\n\n\/\/ ConfigureFlagSet adds all flags to the supplied flag set\nfunc (c *Config) ConfigureFlagSet(f *flag.FlagSet) {\n\tf.StringVar(&c.Host, \"host\", c.Host, \"The host to listen on\")\n\tf.StringVar(&c.Port, \"port\", c.Port, \"The port to listen on\")\n\tf.StringVar(&c.LogLevel, \"log-level\", c.LogLevel, \"The log level\")\n\tf.BoolVar(&c.TextLogging, \"text-logging\", c.TextLogging, \"Log in text format instead of json\")\n\tf.StringVar(&c.JwtSecret, \"jwt-secret\", c.JwtSecret, \"The secret to sign the jwt token\")\n\tf.DurationVar(&c.JwtExpiry, \"jwt-expiry\", c.JwtExpiry, \"The expiry duration for the jwt token, e.g. 2h or 3h30m\")\n\tf.IntVar(&c.JwtRefreshes, \"jwt-refreshes\", c.JwtRefreshes, \"The maximum amount of jwt refreshes. 0 by Default\")\n\tf.StringVar(&c.CookieName, \"cookie-name\", c.CookieName, \"The name of the jwt cookie\")\n\tf.BoolVar(&c.CookieHTTPOnly, \"cookie-http-only\", c.CookieHTTPOnly, \"Set the cookie with the http only flag\")\n\tf.DurationVar(&c.CookieExpiry, \"cookie-expiry\", c.CookieExpiry, \"The expiry duration for the cookie, e.g. 2h or 3h30m. Default is browser session\")\n\tf.StringVar(&c.CookieDomain, \"cookie-domain\", c.CookieDomain, \"The optional domain parameter for the cookie\")\n\tf.StringVar(&c.SuccessURL, \"success-url\", c.SuccessURL, \"The url to redirect after login\")\n\tf.StringVar(&c.LogoutURL, \"logout-url\", c.LogoutURL, \"The url or path to redirect after logout\")\n\tf.StringVar(&c.Template, \"template\", c.Template, \"An alternative template for the login form\")\n\tf.StringVar(&c.LoginPath, \"login-path\", c.LoginPath, \"The path of the login resource\")\n\tf.DurationVar(&c.GracePeriod, \"grace-period\", c.GracePeriod, \"Graceful shutdown grace period\")\n\n\t\/\/ the -backends is deprecated, but we support it for backwards compatibility\n\tdeprecatedBackends := setFunc(func(optsKvList string) error {\n\t\tlogging.Logger.Warn(\"DEPRECATED: '-backend' is no longer supported. Please set the backends by explicit parameters\")\n\t\topts, err := parseOptions(optsKvList)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpName, ok := opts[\"provider\"]\n\t\tif !ok {\n\t\t\treturn errors.New(\"missing provider name provider=...\")\n\t\t}\n\t\tdelete(opts, \"provider\")\n\t\tc.Backends[pName] = opts\n\t\treturn nil\n\t})\n\tf.Var(deprecatedBackends, \"backend\", \"Deprecated, please use the explicit flags\")\n\n\t\/\/ One option for each oauth provider\n\tfor _, pName := range oauth2.ProviderList() {\n\t\tfunc(pName string) {\n\t\t\tsetter := setFunc(func(optsKvList string) error {\n\t\t\t\treturn c.addOauthOpts(pName, optsKvList)\n\t\t\t})\n\t\t\tf.Var(setter, pName, \"Oauth config in the form: client_id=..,client_secret=..[,scope=..,][redirect_uri=..]\")\n\t\t}(pName)\n\t}\n\n\t\/\/ One option for each backend provider\n\tfor _, pName := range ProviderList() {\n\t\tfunc(pName string) {\n\t\t\tsetter := setFunc(func(optsKvList string) error {\n\t\t\t\treturn c.addBackendOpts(pName, optsKvList)\n\t\t\t})\n\t\t\tdesc, _ := GetProviderDescription(pName)\n\t\t\tf.Var(setter, pName, desc.HelpText)\n\t\t}(pName)\n\t}\n}\n\n\/\/ ReadConfig from the commandline args\nfunc ReadConfig() *Config {\n\tc, err := readConfig(flag.CommandLine, os.Args[1:])\n\tif err != nil {\n\t\t\/\/ should never happen, because of flag default policy ExitOnError\n\t\tpanic(err)\n\t}\n\treturn c\n}\n\nfunc readConfig(f *flag.FlagSet, args []string) (*Config, error) {\n\tconfig := DefaultConfig()\n\tconfig.ConfigureFlagSet(f)\n\n\t\/\/ prefer environment settings\n\tf.VisitAll(func(f *flag.Flag) {\n\t\tif val, isPresent := os.LookupEnv(envName(f.Name)); isPresent {\n\t\t\tf.Value.Set(val)\n\t\t}\n\t})\n\n\terr := f.Parse(args)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif config.JwtSecret == \"random key\" {\n\t\tif s, set := os.LookupEnv(\"LOGINSRV_JWT_SECRET\"); set {\n\t\t\tconfig.JwtSecret = s\n\t\t} else {\n\t\t\tconfig.JwtSecret = jwtDefaultSecret\n\t\t}\n\t}\n\n\treturn config, err\n}\n\nconst letterBytes = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n\nfunc randStringBytes(n int) string {\n\tb := make([]byte, n)\n\tfor i := range b {\n\t\tb[i] = letterBytes[rand.Intn(len(letterBytes))]\n\t}\n\treturn string(b)\n}\n\nfunc envName(flagName string) string {\n\treturn envPrefix + strings.Replace(strings.ToUpper(flagName), \"-\", \"_\", -1)\n}\n\nfunc parseOptions(b string) (map[string]string, error) {\n\topts := map[string]string{}\n\tpairs := strings.Split(b, \",\")\n\tfor _, p := range pairs {\n\t\tpair := strings.SplitN(p, \"=\", 2)\n\t\tif len(pair) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"provider configuration has to be in form 'key1=value1,key2=..', but was %v\", p)\n\t\t}\n\t\topts[pair[0]] = pair[1]\n\t}\n\treturn opts, nil\n}\n\n\/\/ Helper type to wrap a function closure with the Value interface\ntype setFunc func(optsKvList string) error\n\nfunc (f setFunc) Set(value string) error {\n\treturn f(value)\n}\n\nfunc (f setFunc) String() string {\n\treturn \"setFunc\"\n}\n<|endoftext|>"} {"text":"<commit_before>package catlady\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/patrickmn\/go-cache\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\tuserAgent = \"Catbot\/1 by cattebot\"\n)\n\ntype Image struct {\n\turl string\n\ttitle string\n\tdomain string\n}\n\ntype AuthToken struct {\n\tAccessToken string `json:\"access_token\"`\n\tExpiresIn int `json:\"expires_in\"`\n\tScope string `json:\"scope\"`\n\tTokenType string `json:\"token_type\"`\n}\n\ntype CatLady struct {\n\ttoken AuthToken\n\tlastTokenTime time.Time\n\tcatCache *cache.Cache\n\tredditUsername string\n\tredditPassword string\n\tredditAppId string\n\tredditAppSecret string\n\tsubreddits map[string]string\n}\n\n\/\/ Constructs a new Catlady object including creating a new cache object.\nfunc NewCatLady(username string, password string, appid string, appsecret string, subreddits map[string]string, logLevel log.Level) *CatLady {\n\tlog.SetLevel(logLevel)\n\tc := &CatLady{\n\t\tcatCache: cache.New(5*time.Minute, 30*time.Second),\n\t\tredditUsername: username,\n\t\tredditPassword: password,\n\t\tredditAppId: appid,\n\t\tredditAppSecret: appsecret,\n\t\tsubreddits: subreddits,\n\t}\n\treturn c\n}\n\nfunc (c *CatLady) getToken() {\n\tclient := &http.Client{}\n\tdata := url.Values{}\n\tdata.Set(\"grant_type\", \"password\")\n\tdata.Add(\"username\", c.redditUsername)\n\tdata.Add(\"password\", c.redditPassword)\n\treq, err := http.NewRequest(\"POST\", \"https:\/\/www.reddit.com\/api\/v1\/access_token\", strings.NewReader(data.Encode()))\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Failed to build Authentication POST\")\n\t}\n\treq.Header.Add(\"User-Agent\", userAgent)\n\treq.SetBasicAuth(c.redditAppId, c.redditAppSecret)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Failed to login to Reddit\")\n\t}\n\tcontents, err := ioutil.ReadAll(resp.Body)\n\tjson.Unmarshal(contents, &c.token)\n\tc.lastTokenTime = time.Now()\n\tlog.WithFields(log.Fields{\n\t\t\"AccessToken\": c.token.AccessToken,\n\t\t\"ExpiresIn\": c.token.ExpiresIn,\n\t\t\"Scope\": c.token.Scope,\n\t\t\"tokenType\": c.token.TokenType,\n\t\t\"lastTokenTime\": c.lastTokenTime,\n\t}).Debug(\"Got token!\")\n}\n\nfunc randInt(min int, max int) int {\n\tif max <= min {\n\t\treturn max\n\t}\n\trand.Seed(time.Now().UTC().UnixNano())\n\treturn min + rand.Intn(max-min)\n}\n\nfunc (c *CatLady) getReddit(sub string) RedditResponse {\n\tlog.WithField(\"subreddit\", sub).Debug(\"Getting Reddit\")\n\tlog.WithField(\"token\", c.token).Debug(\"token value\")\n\tif c.token.ExpiresIn == 0 || time.Since(c.lastTokenTime).Seconds() >= float64(c.token.ExpiresIn) {\n\t\tc.getToken()\n\t}\n\tclient := &http.Client{}\n\treqUrl := fmt.Sprintf(\"https:\/\/oauth.reddit.com\/r\/%s.json\", sub)\n\tlog.WithField(\"url\", reqUrl).Debug(\"Request URL\")\n\treq, err := http.NewRequest(\"GET\", reqUrl, nil)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Failed request to reddit\")\n\t}\n\treq.Header.Add(\"User-Agent\", userAgent)\n\treq.Header.Add(\"Authorization\", fmt.Sprintf(\"%s %s\", c.token.TokenType, c.token.AccessToken))\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Getting subreddit failed\")\n\t}\n\tcontents, err := ioutil.ReadAll(resp.Body)\n\tlog.WithField(\"rawjson\", string(contents)).Debug(\"Raw response from reddit\")\n\tvar subReddit RedditResponse\n\tjson.Unmarshal(contents, &subReddit)\n\treturn subReddit\n}\n\nfunc checkForImage(url string) bool {\n\twhitelist := [...]string{\"imgur.com\", \"imgur\", \"giphy\", \"flickr\", \"photobucket\", \"youtube\", \"youtu.be\", \"gif\", \"gifv\", \"png\", \"jpg\", \"tiff\", \"webem\", \"bmp\", \"flv\", \"mpg\", \"mpeg\", \"avi\"}\n\tfor _, thing := range whitelist {\n\t\tif strings.Contains(url, thing) {\n\t\t\tlog.WithField(\"url\", url).Debug(\"Found Image\")\n\t\t\treturn true\n\t\t}\n\t}\n\tlog.WithField(\"url\", url).Debug(\"Didn't Find Image\")\n\treturn false\n}\n\nfunc cleanURL(url string) string {\n\tif strings.Contains(url, \"imgur\") {\n\t\tlog.WithField(\"url\", url).Debug(\"Found imgur url\")\n\t\textention := url[len(url)-3]\n\t\tif extention != \"gif\" && extention != \"jpg\" && extention != \"ifv\" && extention != \"png\" {\n\t\t\turl = url + \".jpg\"\n\t\t} else if extention == \"gif\" {\n\t\t\turl = url + \"v\"\n\t\t\tlog.WithField(\"url\", url).Debug(\"Converting to gifv\")\n\t\t}\n\t}\n\treturn url\n}\n\n\/\/ Returns a url for a given reddit\nfunc (c *CatLady) GetImage(sub string) string {\n\tvar submissions RedditResponse\n\tif subs, found := c.catCache.Get(sub); !found {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"cache\": false,\n\t\t\t\"subreddit\": sub,\n\t\t}).Info(\"Subreddit not found in cache.\")\n\t\tsubmissions = c.getReddit(sub)\n\t\tc.catCache.Set(sub, submissions, cache.DefaultExpiration)\n\t} else {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"cache\": true,\n\t\t\t\"subreddit\": sub,\n\t\t}).Info(\"Subreddit Found in Cache.\")\n\n\t\tsubmissions = subs.(RedditResponse)\n\n\t}\n\tsize := len(submissions.Data.Children)\n\tif size == 0 {\n\t\treturn \"\"\n\t}\n\tcount := 0\n\tnoImage := true\n\tfor noImage {\n\t\tcount += 1\n\t\trandom := randInt(0, size-1)\n\t\ts := submissions.Data.Children[random].Data\n\t\tif !s.Over18 {\n\t\t\tif checkForImage(s.URL) {\n\t\t\t\tnoImage = false\n\t\t\t\treturn cleanURL(s.URL)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.WithField(\"nsfw\", \"true\").Info(\"NSFW Link Found.\")\n\t\t}\n\t\tif count >= size {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"subreddit\": sub,\n\t\t\t\t\"size\": size,\n\t\t\t}).Info(\"I ran out of links\")\n\t\t\treturn \"\"\n\t\t}\n\t}\n\treturn \"\"\n}\n<commit_msg>This has a bug where it wil .jpg jpgs twice, but it works for now.<commit_after>package catlady\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/patrickmn\/go-cache\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar (\n\tuserAgent = \"Catbot\/1 by cattebot\"\n)\n\ntype Image struct {\n\turl string\n\ttitle string\n\tdomain string\n}\n\ntype AuthToken struct {\n\tAccessToken string `json:\"access_token\"`\n\tExpiresIn int `json:\"expires_in\"`\n\tScope string `json:\"scope\"`\n\tTokenType string `json:\"token_type\"`\n}\n\ntype CatLady struct {\n\ttoken AuthToken\n\tlastTokenTime time.Time\n\tcatCache *cache.Cache\n\tredditUsername string\n\tredditPassword string\n\tredditAppId string\n\tredditAppSecret string\n\tsubreddits map[string]string\n}\n\n\/\/ Constructs a new Catlady object including creating a new cache object.\nfunc NewCatLady(username string, password string, appid string, appsecret string, subreddits map[string]string, logLevel log.Level) *CatLady {\n\tlog.SetLevel(logLevel)\n\tc := &CatLady{\n\t\tcatCache: cache.New(5*time.Minute, 30*time.Second),\n\t\tredditUsername: username,\n\t\tredditPassword: password,\n\t\tredditAppId: appid,\n\t\tredditAppSecret: appsecret,\n\t\tsubreddits: subreddits,\n\t}\n\treturn c\n}\n\nfunc (c *CatLady) getToken() {\n\tclient := &http.Client{}\n\tdata := url.Values{}\n\tdata.Set(\"grant_type\", \"password\")\n\tdata.Add(\"username\", c.redditUsername)\n\tdata.Add(\"password\", c.redditPassword)\n\treq, err := http.NewRequest(\"POST\", \"https:\/\/www.reddit.com\/api\/v1\/access_token\", strings.NewReader(data.Encode()))\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Failed to build Authentication POST\")\n\t}\n\treq.Header.Add(\"User-Agent\", userAgent)\n\treq.SetBasicAuth(c.redditAppId, c.redditAppSecret)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Failed to login to Reddit\")\n\t}\n\tcontents, err := ioutil.ReadAll(resp.Body)\n\tjson.Unmarshal(contents, &c.token)\n\tc.lastTokenTime = time.Now()\n\tlog.WithFields(log.Fields{\n\t\t\"AccessToken\": c.token.AccessToken,\n\t\t\"ExpiresIn\": c.token.ExpiresIn,\n\t\t\"Scope\": c.token.Scope,\n\t\t\"tokenType\": c.token.TokenType,\n\t\t\"lastTokenTime\": c.lastTokenTime,\n\t}).Debug(\"Got token!\")\n}\n\nfunc randInt(min int, max int) int {\n\tif max <= min {\n\t\treturn max\n\t}\n\trand.Seed(time.Now().UTC().UnixNano())\n\treturn min + rand.Intn(max-min)\n}\n\nfunc (c *CatLady) getReddit(sub string) RedditResponse {\n\tlog.WithField(\"subreddit\", sub).Debug(\"Getting Reddit\")\n\tlog.WithField(\"token\", c.token).Debug(\"token value\")\n\tif c.token.ExpiresIn == 0 || time.Since(c.lastTokenTime).Seconds() >= float64(c.token.ExpiresIn) {\n\t\tc.getToken()\n\t}\n\tclient := &http.Client{}\n\treqUrl := fmt.Sprintf(\"https:\/\/oauth.reddit.com\/r\/%s.json\", sub)\n\tlog.WithField(\"url\", reqUrl).Debug(\"Request URL\")\n\treq, err := http.NewRequest(\"GET\", reqUrl, nil)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Failed request to reddit\")\n\t}\n\treq.Header.Add(\"User-Agent\", userAgent)\n\treq.Header.Add(\"Authorization\", fmt.Sprintf(\"%s %s\", c.token.TokenType, c.token.AccessToken))\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Getting subreddit failed\")\n\t}\n\tcontents, err := ioutil.ReadAll(resp.Body)\n\tlog.WithField(\"rawjson\", string(contents)).Debug(\"Raw response from reddit\")\n\tvar subReddit RedditResponse\n\tjson.Unmarshal(contents, &subReddit)\n\treturn subReddit\n}\n\nfunc checkForImage(url string) bool {\n\twhitelist := [...]string{\"imgur.com\", \"imgur\", \"giphy\", \"flickr\", \"photobucket\", \"youtube\", \"youtu.be\", \"gif\", \"gifv\", \"png\", \"jpg\", \"tiff\", \"webem\", \"bmp\", \"flv\", \"mpg\", \"mpeg\", \"avi\"}\n\tfor _, thing := range whitelist {\n\t\tif strings.Contains(url, thing) {\n\t\t\tlog.WithField(\"url\", url).Debug(\"Found Image\")\n\t\t\treturn true\n\t\t}\n\t}\n\tlog.WithField(\"url\", url).Debug(\"Didn't Find Image\")\n\treturn false\n}\n\nfunc cleanURL(url string) string {\n\tif strings.Contains(url, \"imgur\") {\n\t\tlog.WithField(\"url\", url).Debug(\"Found imgur url\")\n\t\textention := string(url[len(url)-3])\n\t\tif !strings.Contains(url, \"\/a\/\") && extention != \"gif\" && extention != \"jpg\" && extention != \"ifv\" && extention != \"png\" {\n\t\t\turl = url + \".jpg\"\n\t\t} else if extention == \"gif\" {\n\t\t\turl = url + \"v\"\n\t\t\tlog.WithField(\"url\", url).Debug(\"Converting to gifv\")\n\t\t}\n\t}\n\treturn url\n}\n\n\/\/ Returns a url for a given reddit\nfunc (c *CatLady) GetImage(sub string) string {\n\tvar submissions RedditResponse\n\tif subs, found := c.catCache.Get(sub); !found {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"cache\": false,\n\t\t\t\"subreddit\": sub,\n\t\t}).Info(\"Subreddit not found in cache.\")\n\t\tsubmissions = c.getReddit(sub)\n\t\tc.catCache.Set(sub, submissions, cache.DefaultExpiration)\n\t} else {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"cache\": true,\n\t\t\t\"subreddit\": sub,\n\t\t}).Info(\"Subreddit Found in Cache.\")\n\n\t\tsubmissions = subs.(RedditResponse)\n\n\t}\n\tsize := len(submissions.Data.Children)\n\tif size == 0 {\n\t\treturn \"\"\n\t}\n\tcount := 0\n\tnoImage := true\n\tfor noImage {\n\t\tcount += 1\n\t\trandom := randInt(0, size-1)\n\t\ts := submissions.Data.Children[random].Data\n\t\tif !s.Over18 {\n\t\t\tif checkForImage(s.URL) {\n\t\t\t\tnoImage = false\n\t\t\t\treturn cleanURL(s.URL)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.WithField(\"nsfw\", \"true\").Info(\"NSFW Link Found.\")\n\t\t}\n\t\tif count >= size {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"subreddit\": sub,\n\t\t\t\t\"size\": size,\n\t\t\t}).Info(\"I ran out of links\")\n\t\t\treturn \"\"\n\t\t}\n\t}\n\treturn \"\"\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"net\"\n\t\"log\"\n\t\"os\"\n)\n\nconst (\n\tprotocolVersion = 2\n\n\t\/\/ Packet type IDs\n\tpacketIDLogin = 0x1\n\tpacketIDHandshake = 0x2\n)\n\nfunc ReadByte(conn net.Conn) (b byte, err os.Error) {\n\tbs := make([]byte, 1)\n\t_, err = conn.Read(bs)\n\treturn bs[0], err\n}\n\nfunc ReadShort(conn net.Conn) (i int, err os.Error) {\n\tbs := make([]byte, 2)\n\t_, err = conn.Read(bs)\n\treturn int(uint16(bs[0]) << 8 | uint16(bs[1])), err\n}\n\nfunc WriteShort(conn net.Conn, i int) (err os.Error) {\n\tbs := []byte{byte(i >> 8), byte(i)}\n\t_, err = conn.Write(bs)\n\treturn err\n}\n\nfunc ReadInt(conn net.Conn) (i int, err os.Error) {\n\tbs := make([]byte, 4)\n\t_, err = conn.Read(bs)\n\treturn int(uint32(bs[0]) << 24 | uint32(bs[1]) << 16 | uint32(bs[2]) << 8 | uint32(bs[3])), err\n}\n\nfunc ReadString(conn net.Conn) (s string, err os.Error) {\n\tn, e := ReadShort(conn)\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\n\tbs := make([]byte, n)\n\t_, err = conn.Read(bs)\n\treturn string(bs), err\n}\n\nfunc WriteString(conn net.Conn, s string) (err os.Error) {\n\tbs := []byte(s)\n\n\terr = WriteShort(conn, len(bs))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = conn.Write(bs)\n\treturn err\n}\n\nfunc ReadHandshake(conn net.Conn) (username string, err os.Error) {\n\tpacketID, e := ReadByte(conn)\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\tif packetID != packetIDHandshake {\n\t\tlog.Exitf(\"ReadHandshake: invalid packet ID %#x\", packetID)\n\t}\n\n\treturn ReadString(conn)\n}\n\nfunc WriteHandshake(conn net.Conn, reply string) (err os.Error) {\n\t_, err = conn.Write([]byte{packetIDHandshake})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn WriteString(conn, reply)\n}\n\nfunc ReadLogin(conn net.Conn) (username, password string, err os.Error) {\n\tpacketID, e := ReadByte(conn)\n\tif e != nil {\n\t\treturn \"\", \"\", e\n\t}\n\tif packetID != packetIDLogin {\n\t\tlog.Exitf(\"ReadLogin: invalid packet ID %#x\", packetID)\n\t}\n\n\tversion, e2 := ReadInt(conn)\n\tif e2 != nil {\n\t\treturn \"\", \"\", e2\n\t}\n\tif version != protocolVersion {\n\t\tlog.Exit(\"ReadLogin: unsupported protocol version %#x\", version)\n\t}\n\n\tusername, e3 := ReadString(conn)\n\tif e3 != nil {\n\t\treturn \"\", \"\", e3\n\t}\n\n\tpassword, e4 := ReadString(conn)\n\tif e4 != nil {\n\t\treturn \"\", \"\", e4\n\t}\n\n\treturn username, password, nil\n}\n\nfunc WriteLogin(conn net.Conn) (err os.Error) {\n\t_, err = conn.Write([]byte{packetIDLogin, 0, 0, 0, 0, 0, 0, 0, 0})\n\treturn err\n}\n\nfunc main() {\n\tlistener, e := net.Listen(\"tcp\", \":35124\")\n\tif e != nil {\n\t\tlog.Exit(\"Listen: \", e.String())\n\t}\n\n\tconn, e2 := listener.Accept()\n\tif e2 != nil {\n\t\tlog.Exit(\"Accept: \", e2.String())\n\t}\n\n\tusername, e3 := ReadHandshake(conn)\n\tif e3 != nil {\n\t\tlog.Exit(\"ReadHandshake: \", e3.String())\n\t}\n\tlog.Stderr(\"username: \", username)\n\tWriteHandshake(conn, \"-\")\n\n\tusername2, password, e4 := ReadLogin(conn)\n\tif e4 != nil {\n\t\tlog.Exit(\"ReadLogin: \", e4.String())\n\t}\n\tlog.Stderr(\"username: \", username2)\n\tlog.Stderr(\"password: \", password)\n\tWriteLogin(conn)\n}\n<commit_msg>Serve client session in a goroutine<commit_after>package main\n\nimport (\n\t\"net\"\n\t\"log\"\n\t\"os\"\n\t\"fmt\"\n)\n\nconst (\n\tprotocolVersion = 2\n\n\t\/\/ Packet type IDs\n\tpacketIDLogin = 0x1\n\tpacketIDHandshake = 0x2\n)\n\nfunc ReadByte(conn net.Conn) (b byte, err os.Error) {\n\tbs := make([]byte, 1)\n\t_, err = conn.Read(bs)\n\treturn bs[0], err\n}\n\nfunc ReadShort(conn net.Conn) (i int, err os.Error) {\n\tbs := make([]byte, 2)\n\t_, err = conn.Read(bs)\n\treturn int(uint16(bs[0]) << 8 | uint16(bs[1])), err\n}\n\nfunc WriteShort(conn net.Conn, i int) (err os.Error) {\n\tbs := []byte{byte(i >> 8), byte(i)}\n\t_, err = conn.Write(bs)\n\treturn err\n}\n\nfunc ReadInt(conn net.Conn) (i int, err os.Error) {\n\tbs := make([]byte, 4)\n\t_, err = conn.Read(bs)\n\treturn int(uint32(bs[0]) << 24 | uint32(bs[1]) << 16 | uint32(bs[2]) << 8 | uint32(bs[3])), err\n}\n\nfunc ReadString(conn net.Conn) (s string, err os.Error) {\n\tn, e := ReadShort(conn)\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\n\tbs := make([]byte, n)\n\t_, err = conn.Read(bs)\n\treturn string(bs), err\n}\n\nfunc WriteString(conn net.Conn, s string) (err os.Error) {\n\tbs := []byte(s)\n\n\terr = WriteShort(conn, len(bs))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = conn.Write(bs)\n\treturn err\n}\n\nfunc ReadHandshake(conn net.Conn) (username string, err os.Error) {\n\tpacketID, e := ReadByte(conn)\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\tif packetID != packetIDHandshake {\n\t\tlog.Exitf(\"ReadHandshake: invalid packet ID %#x\", packetID)\n\t}\n\n\treturn ReadString(conn)\n}\n\nfunc WriteHandshake(conn net.Conn, reply string) (err os.Error) {\n\t_, err = conn.Write([]byte{packetIDHandshake})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn WriteString(conn, reply)\n}\n\nfunc ReadLogin(conn net.Conn) (username, password string, err os.Error) {\n\tpacketID, e := ReadByte(conn)\n\tif e != nil {\n\t\treturn \"\", \"\", e\n\t}\n\tif packetID != packetIDLogin {\n\t\tlog.Exitf(\"ReadLogin: invalid packet ID %#x\", packetID)\n\t}\n\n\tversion, e2 := ReadInt(conn)\n\tif e2 != nil {\n\t\treturn \"\", \"\", e2\n\t}\n\tif version != protocolVersion {\n\t\tlog.Exit(\"ReadLogin: unsupported protocol version %#x\", version)\n\t}\n\n\tusername, e3 := ReadString(conn)\n\tif e3 != nil {\n\t\treturn \"\", \"\", e3\n\t}\n\n\tpassword, e4 := ReadString(conn)\n\tif e4 != nil {\n\t\treturn \"\", \"\", e4\n\t}\n\n\treturn username, password, nil\n}\n\nfunc WriteLogin(conn net.Conn) (err os.Error) {\n\t_, err = conn.Write([]byte{packetIDLogin, 0, 0, 0, 0, 0, 0, 0, 0})\n\treturn err\n}\n\nfunc StartSession(conn net.Conn) {\n\tlog.Stderr(\"Client connected from \", conn.RemoteAddr())\n\n\tusername, e := ReadHandshake(conn)\n\tif e != nil {\n\t\tpanic(fmt.Sprint(\"ReadHandshake: \", e.String()))\n\t}\n\tlog.Stderr(\"username: \", username)\n\tWriteHandshake(conn, \"-\")\n\n\t_, _, e2 := ReadLogin(conn)\n\tif e2 != nil {\n\t\tpanic(fmt.Sprint(\"ReadLogin: \", e2.String()))\n\t}\n\tWriteLogin(conn)\n}\n\nfunc ServeSession(conn net.Conn) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tlog.Stderr(err)\n\t\t}\n\t\tconn.Close()\n\t}()\n\n\tStartSession(conn)\n}\n\nfunc Serve(addr string) {\n\tlistener, e := net.Listen(\"tcp\", addr)\n\tif e != nil {\n\t\tlog.Exit(\"Listen: \", e.String())\n\t}\n\tlog.Stderr(\"Listening on \", addr)\n\n\tfor {\n\t\tconn, e2 := listener.Accept()\n\t\tif e2 != nil {\n\t\t\tlog.Stderr(\"Accept: \", e2.String())\n\t\t\tcontinue\n\t\t}\n\n\t\tgo ServeSession(conn)\n\t}\n}\n\nfunc main() {\n\tServe(\":25565\")\n}\n<|endoftext|>"} {"text":"<commit_before>package feature\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n\t\"unicode\"\n\n\t\"github.com\/ikawaha\/kagome\/tokenizer\"\n\t\"github.com\/jdkato\/prose\/tag\"\n\t\"github.com\/jdkato\/prose\/tokenize\"\n)\n\ntype FeatureVector []string\n\nfunc (fv *FeatureVector) MarshalBinary() ([]byte, error) {\n\tjson, err := json.Marshal(fv)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn []byte(json), nil\n}\n\nfunc (fv *FeatureVector) UnmarshalBinary(data []byte) error {\n\terr := json.Unmarshal(data, fv)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nvar (\n\tjapaneseTokenizer tokenizer.Tokenizer\n\tjapaneseTokenizerOnce sync.Once\n\tenglishTokenizer *tokenize.TreebankWordTokenizer\n\tenglishTokenizerOnce sync.Once\n\tenglishTagger *tag.PerceptronTagger\n\tenglishTaggerOnce sync.Once\n)\n\nfunc GetJapaneseTokenizer() tokenizer.Tokenizer {\n\tjapaneseTokenizerOnce.Do(func() {\n\t\tjapaneseTokenizer = tokenizer.New()\n\t})\n\n\treturn japaneseTokenizer\n}\n\nfunc GetEnglishTokenizer() *tokenize.TreebankWordTokenizer {\n\tenglishTokenizerOnce.Do(func() {\n\t\tenglishTokenizer = tokenize.NewTreebankWordTokenizer()\n\t})\n\treturn englishTokenizer\n}\n\nfunc GetEnglishTagger() *tag.PerceptronTagger {\n\tenglishTaggerOnce.Do(func() {\n\t\tenglishTagger = tag.NewPerceptronTagger()\n\t})\n\treturn englishTagger\n}\n\nfunc isJapanese(str string) bool {\n\tflag := false\n\tfor _, r := range str {\n\t\tif unicode.In(r, unicode.Hiragana) || unicode.In(r, unicode.Katakana) {\n\t\t\tflag = true\n\t\t}\n\t}\n\n\tif strings.ContainsAny(str, \"。、\") {\n\t\tflag = true\n\t}\n\n\treturn flag\n}\n\nfunc extractEngNounFeaturesWithoutPrefix(s string) FeatureVector {\n\tvar fv FeatureVector\n\tif s == \"\" {\n\t\treturn fv\n\t}\n\n\twords := GetEnglishTokenizer().Tokenize(s)\n\ttagger := GetEnglishTagger()\n\tfor _, tok := range tagger.Tag(words) {\n\t\tswitch tok.Tag {\n\t\t\/\/ https:\/\/www.ling.upenn.edu\/courses\/Fall_2003\/ling001\/penn_treebank_pos.html\n\t\tcase \"NN\", \"NNS\", \"NNP\", \"NNPS\", \"PRP\", \"PRP$\":\n\t\t\tfv = append(fv, strings.ToLower(tok.Text))\n\t\t}\n\t}\n\n\treturn fv\n}\n\nfunc extractEngNounFeatures(s string, prefix string) FeatureVector {\n\tvar fv FeatureVector\n\tfor _, surface := range extractEngNounFeaturesWithoutPrefix(s) {\n\t\tfv = append(fv, prefix+\":\"+surface)\n\t}\n\treturn fv\n}\n\nfunc ExtractJpnNounFeaturesWithoutPrefix(s string) FeatureVector {\n\tvar fv FeatureVector\n\tif s == \"\" {\n\t\treturn fv\n\t}\n\tt := GetJapaneseTokenizer()\n\ttokens := t.Tokenize(strings.ToLower(s))\n\tfor _, token := range tokens {\n\t\tif token.Pos() == \"名詞\" {\n\t\t\tsurface := token.Surface\n\t\t\tif len(token.Features()) >= 2 && token.Features()[1] == \"数\" {\n\t\t\t\tsurface = \"NUM\"\n\t\t\t}\n\t\t\tfv = append(fv, surface)\n\t\t}\n\t}\n\treturn fv\n}\n\nfunc ExtractJpnNounFeatures(s string, prefix string) FeatureVector {\n\tvar fv FeatureVector\n\tfor _, surface := range ExtractJpnNounFeaturesWithoutPrefix(s) {\n\t\tfv = append(fv, prefix+\":\"+surface)\n\t}\n\treturn fv\n}\n\nfunc ExtractNounFeatures(s string, prefix string) FeatureVector {\n\tif isJapanese(s) {\n\t\treturn ExtractJpnNounFeatures(s, prefix)\n\t} else {\n\t\treturn extractEngNounFeatures(s, prefix)\n\t}\n}\n\nfunc ExtractNounFeaturesWithoutPrefix(s string) FeatureVector {\n\tif isJapanese(s) {\n\t\treturn ExtractJpnNounFeaturesWithoutPrefix(s)\n\t} else {\n\t\treturn extractEngNounFeaturesWithoutPrefix(s)\n\t}\n}\n\nfunc ExtractHostFeature(urlString string) string {\n\tprefix := \"HOST\"\n\tu, err := url.Parse(urlString)\n\tif err != nil {\n\t\treturn prefix + \":INVALID_HOST\"\n\t}\n\treturn prefix + \":\" + u.Host\n}\n\nfunc ExtractPath(urlString string) string {\n\tpath := \"\"\n\tu, err := url.Parse(urlString)\n\tif err != nil {\n\t\treturn path\n\t}\n\treturn u.Path\n}\n<commit_msg>統一してポインタを持つようにしておく<commit_after>package feature\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"sync\"\n\t\"unicode\"\n\n\t\"github.com\/ikawaha\/kagome\/tokenizer\"\n\t\"github.com\/jdkato\/prose\/tag\"\n\t\"github.com\/jdkato\/prose\/tokenize\"\n)\n\ntype FeatureVector []string\n\nfunc (fv *FeatureVector) MarshalBinary() ([]byte, error) {\n\tjson, err := json.Marshal(fv)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn []byte(json), nil\n}\n\nfunc (fv *FeatureVector) UnmarshalBinary(data []byte) error {\n\terr := json.Unmarshal(data, fv)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nvar (\n\tjapaneseTokenizer *tokenizer.Tokenizer\n\tjapaneseTokenizerOnce sync.Once\n\tenglishTokenizer *tokenize.TreebankWordTokenizer\n\tenglishTokenizerOnce sync.Once\n\tenglishTagger *tag.PerceptronTagger\n\tenglishTaggerOnce sync.Once\n)\n\nfunc GetJapaneseTokenizer() *tokenizer.Tokenizer {\n\tjapaneseTokenizerOnce.Do(func() {\n\t\tt := tokenizer.New()\n\t\tjapaneseTokenizer = &t\n\t})\n\n\treturn japaneseTokenizer\n}\n\nfunc GetEnglishTokenizer() *tokenize.TreebankWordTokenizer {\n\tenglishTokenizerOnce.Do(func() {\n\t\tenglishTokenizer = tokenize.NewTreebankWordTokenizer()\n\t})\n\treturn englishTokenizer\n}\n\nfunc GetEnglishTagger() *tag.PerceptronTagger {\n\tenglishTaggerOnce.Do(func() {\n\t\tenglishTagger = tag.NewPerceptronTagger()\n\t})\n\treturn englishTagger\n}\n\nfunc isJapanese(str string) bool {\n\tflag := false\n\tfor _, r := range str {\n\t\tif unicode.In(r, unicode.Hiragana) || unicode.In(r, unicode.Katakana) {\n\t\t\tflag = true\n\t\t}\n\t}\n\n\tif strings.ContainsAny(str, \"。、\") {\n\t\tflag = true\n\t}\n\n\treturn flag\n}\n\nfunc extractEngNounFeaturesWithoutPrefix(s string) FeatureVector {\n\tvar fv FeatureVector\n\tif s == \"\" {\n\t\treturn fv\n\t}\n\n\twords := GetEnglishTokenizer().Tokenize(s)\n\ttagger := GetEnglishTagger()\n\tfor _, tok := range tagger.Tag(words) {\n\t\tswitch tok.Tag {\n\t\t\/\/ https:\/\/www.ling.upenn.edu\/courses\/Fall_2003\/ling001\/penn_treebank_pos.html\n\t\tcase \"NN\", \"NNS\", \"NNP\", \"NNPS\", \"PRP\", \"PRP$\":\n\t\t\tfv = append(fv, strings.ToLower(tok.Text))\n\t\t}\n\t}\n\n\treturn fv\n}\n\nfunc extractEngNounFeatures(s string, prefix string) FeatureVector {\n\tvar fv FeatureVector\n\tfor _, surface := range extractEngNounFeaturesWithoutPrefix(s) {\n\t\tfv = append(fv, prefix+\":\"+surface)\n\t}\n\treturn fv\n}\n\nfunc ExtractJpnNounFeaturesWithoutPrefix(s string) FeatureVector {\n\tvar fv FeatureVector\n\tif s == \"\" {\n\t\treturn fv\n\t}\n\tt := GetJapaneseTokenizer()\n\ttokens := t.Tokenize(strings.ToLower(s))\n\tfor _, token := range tokens {\n\t\tif token.Pos() == \"名詞\" {\n\t\t\tsurface := token.Surface\n\t\t\tif len(token.Features()) >= 2 && token.Features()[1] == \"数\" {\n\t\t\t\tsurface = \"NUM\"\n\t\t\t}\n\t\t\tfv = append(fv, surface)\n\t\t}\n\t}\n\treturn fv\n}\n\nfunc ExtractJpnNounFeatures(s string, prefix string) FeatureVector {\n\tvar fv FeatureVector\n\tfor _, surface := range ExtractJpnNounFeaturesWithoutPrefix(s) {\n\t\tfv = append(fv, prefix+\":\"+surface)\n\t}\n\treturn fv\n}\n\nfunc ExtractNounFeatures(s string, prefix string) FeatureVector {\n\tif isJapanese(s) {\n\t\treturn ExtractJpnNounFeatures(s, prefix)\n\t} else {\n\t\treturn extractEngNounFeatures(s, prefix)\n\t}\n}\n\nfunc ExtractNounFeaturesWithoutPrefix(s string) FeatureVector {\n\tif isJapanese(s) {\n\t\treturn ExtractJpnNounFeaturesWithoutPrefix(s)\n\t} else {\n\t\treturn extractEngNounFeaturesWithoutPrefix(s)\n\t}\n}\n\nfunc ExtractHostFeature(urlString string) string {\n\tprefix := \"HOST\"\n\tu, err := url.Parse(urlString)\n\tif err != nil {\n\t\treturn prefix + \":INVALID_HOST\"\n\t}\n\treturn prefix + \":\" + u.Host\n}\n\nfunc ExtractPath(urlString string) string {\n\tpath := \"\"\n\tu, err := url.Parse(urlString)\n\tif err != nil {\n\t\treturn path\n\t}\n\treturn u.Path\n}\n<|endoftext|>"} {"text":"<commit_before>package rpc\n\nimport (\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ MuxConn is a connection that can be used bi-directionally for RPC. Normally,\n\/\/ Go RPC only allows client-to-server connections. This allows the client\n\/\/ to actually act as a server as well.\n\/\/\n\/\/ MuxConn works using a fairly dumb multiplexing technique of simply\n\/\/ framing every piece of data sent into a prefix + data format. Streams\n\/\/ are established using a subset of the TCP protocol. Only a subset is\n\/\/ necessary since we assume ordering on the underlying RWC.\ntype MuxConn struct {\n\tcurId uint32\n\trwc io.ReadWriteCloser\n\tstreams map[uint32]*Stream\n\tmu sync.Mutex\n\twlock sync.Mutex\n}\n\ntype muxPacketType byte\n\nconst (\n\tmuxPacketSyn muxPacketType = iota\n\tmuxPacketAck\n\tmuxPacketFin\n\tmuxPacketData\n)\n\nfunc NewMuxConn(rwc io.ReadWriteCloser) *MuxConn {\n\tm := &MuxConn{\n\t\trwc: rwc,\n\t\tstreams: make(map[uint32]*Stream),\n\t}\n\n\tgo m.loop()\n\n\treturn m\n}\n\n\/\/ Close closes the underlying io.ReadWriteCloser. This will also close\n\/\/ all streams that are open.\nfunc (m *MuxConn) Close() error {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\t\/\/ Close all the streams\n\tfor _, w := range m.streams {\n\t\tw.Close()\n\t}\n\tm.streams = make(map[uint32]*Stream)\n\n\treturn m.rwc.Close()\n}\n\n\/\/ Accept accepts a multiplexed connection with the given ID. This\n\/\/ will block until a request is made to connect.\nfunc (m *MuxConn) Accept(id uint32) (io.ReadWriteCloser, error) {\n\tstream, err := m.openStream(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ If the stream isn't closed, then it is already open somehow\n\tstream.mu.Lock()\n\tif stream.state != streamStateSynRecv && stream.state != streamStateClosed {\n\t\tstream.mu.Unlock()\n\t\treturn nil, fmt.Errorf(\"Stream %d already open in bad state: %d\", id, stream.state)\n\t}\n\n\tif stream.state == streamStateSynRecv {\n\t\t\/\/ Fast track establishing since we already got the syn\n\t\tstream.setState(streamStateEstablished)\n\t\tstream.mu.Unlock()\n\t}\n\n\tif stream.state != streamStateEstablished {\n\t\t\/\/ Go into the listening state\n\t\tstream.setState(streamStateListen)\n\t\tstream.mu.Unlock()\n\n\t\t\/\/ Wait for the connection to establish\n\tACCEPT_ESTABLISH_LOOP:\n\t\tfor {\n\t\t\ttime.Sleep(50 * time.Millisecond)\n\t\t\tstream.mu.Lock()\n\t\t\tswitch stream.state {\n\t\t\tcase streamStateListen:\n\t\t\t\tstream.mu.Unlock()\n\t\t\tcase streamStateEstablished:\n\t\t\t\tstream.mu.Unlock()\n\t\t\t\tbreak ACCEPT_ESTABLISH_LOOP\n\t\t\tdefault:\n\t\t\t\tdefer stream.mu.Unlock()\n\t\t\t\treturn nil, fmt.Errorf(\"Stream went to bad state: %d\", stream.state)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Send the ack down\n\tif _, err := m.write(stream.id, muxPacketAck, nil); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn stream, nil\n}\n\n\/\/ Dial opens a connection to the remote end using the given stream ID.\n\/\/ An Accept on the remote end will only work with if the IDs match.\nfunc (m *MuxConn) Dial(id uint32) (io.ReadWriteCloser, error) {\n\tstream, err := m.openStream(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ If the stream isn't closed, then it is already open somehow\n\tstream.mu.Lock()\n\tif stream.state != streamStateClosed {\n\t\tstream.mu.Unlock()\n\t\treturn nil, fmt.Errorf(\"Stream %d already open in bad state: %d\", id, stream.state)\n\t}\n\n\t\/\/ Open a connection\n\tif _, err := m.write(stream.id, muxPacketSyn, nil); err != nil {\n\t\treturn nil, err\n\t}\n\tstream.setState(streamStateSynSent)\n\tstream.mu.Unlock()\n\n\tfor {\n\t\ttime.Sleep(50 * time.Millisecond)\n\t\tstream.mu.Lock()\n\t\tswitch stream.state {\n\t\tcase streamStateSynSent:\n\t\t\tstream.mu.Unlock()\n\t\tcase streamStateEstablished:\n\t\t\tstream.mu.Unlock()\n\t\t\treturn stream, nil\n\t\tdefault:\n\t\t\tdefer stream.mu.Unlock()\n\t\t\treturn nil, fmt.Errorf(\"Stream went to bad state: %d\", stream.state)\n\t\t}\n\t}\n}\n\n\/\/ NextId returns the next available stream ID that isn't currently\n\/\/ taken.\nfunc (m *MuxConn) NextId() uint32 {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tfor {\n\t\tresult := m.curId\n\t\tm.curId++\n\t\tif _, ok := m.streams[result]; !ok {\n\t\t\treturn result\n\t\t}\n\t}\n}\n\nfunc (m *MuxConn) openStream(id uint32) (*Stream, error) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tif stream, ok := m.streams[id]; ok {\n\t\treturn stream, nil\n\t}\n\n\t\/\/ Create the stream object and channel where data will be sent to\n\tdataR, dataW := io.Pipe()\n\twriteCh := make(chan []byte, 10)\n\n\t\/\/ Set the data channel so we can write to it.\n\tstream := &Stream{\n\t\tid: id,\n\t\tmux: m,\n\t\treader: dataR,\n\t\twriter: dataW,\n\t\twriteCh: writeCh,\n\t}\n\tstream.setState(streamStateClosed)\n\n\t\/\/ Start the goroutine that will read from the queue and write\n\t\/\/ data out.\n\tgo func() {\n\t\tfor {\n\t\t\tdata := <-writeCh\n\t\t\tif _, err := dataW.Write(data); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tm.streams[id] = stream\n\treturn m.streams[id], nil\n}\n\nfunc (m *MuxConn) loop() {\n\tdefer m.Close()\n\n\tvar id uint32\n\tvar packetType muxPacketType\n\tvar length int32\n\tfor {\n\t\tif err := binary.Read(m.rwc, binary.BigEndian, &id); err != nil {\n\t\t\tlog.Printf(\"[ERR] Error reading stream ID: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tif err := binary.Read(m.rwc, binary.BigEndian, &packetType); err != nil {\n\t\t\tlog.Printf(\"[ERR] Error reading packet type: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tif err := binary.Read(m.rwc, binary.BigEndian, &length); err != nil {\n\t\t\tlog.Printf(\"[ERR] Error reading length: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ TODO(mitchellh): probably would be better to re-use a buffer...\n\t\tdata := make([]byte, length)\n\t\tif length > 0 {\n\t\t\tif _, err := m.rwc.Read(data); err != nil {\n\t\t\t\tlog.Printf(\"[ERR] Error reading data: %s\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tstream, err := m.openStream(id)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[ERR] Error opening stream %d: %s\", id, err)\n\t\t\treturn\n\t\t}\n\n\t\tswitch packetType {\n\t\tcase muxPacketAck:\n\t\t\tstream.mu.Lock()\n\t\t\tif stream.state == streamStateSynSent {\n\t\t\t\tstream.setState(streamStateEstablished)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"[ERR] Ack received for stream in state: %d\", stream.state)\n\t\t\t}\n\t\t\tstream.mu.Unlock()\n\t\tcase muxPacketSyn:\n\t\t\tstream.mu.Lock()\n\t\t\tswitch stream.state {\n\t\t\tcase streamStateClosed:\n\t\t\t\tstream.setState(streamStateSynRecv)\n\t\t\tcase streamStateListen:\n\t\t\t\tstream.setState(streamStateEstablished)\n\t\t\tdefault:\n\t\t\t\tlog.Printf(\"[ERR] Syn received for stream in state: %d\", stream.state)\n\t\t\t}\n\t\t\tstream.mu.Unlock()\n\t\tcase muxPacketFin:\n\t\t\tstream.mu.Lock()\n\t\t\tstream.setState(streamStateClosed)\n\t\t\tstream.writer.Close()\n\t\t\tstream.mu.Unlock()\n\n\t\t\tm.mu.Lock()\n\t\t\tdelete(m.streams, stream.id)\n\t\t\tm.mu.Unlock()\n\t\tcase muxPacketData:\n\t\t\tstream.mu.Lock()\n\t\t\tif stream.state == streamStateEstablished {\n\t\t\t\tselect {\n\t\t\t\tcase stream.writeCh <- data:\n\t\t\t\tdefault:\n\t\t\t\t\tlog.Printf(\"[ERR] Failed to write data, buffer full: %d\", id)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"[ERR] Data received for stream in state: %d\", stream.state)\n\t\t\t}\n\t\t\tstream.mu.Unlock()\n\t\t}\n\t}\n}\n\nfunc (m *MuxConn) write(id uint32, dataType muxPacketType, p []byte) (int, error) {\n\tm.wlock.Lock()\n\tdefer m.wlock.Unlock()\n\n\tif err := binary.Write(m.rwc, binary.BigEndian, id); err != nil {\n\t\treturn 0, err\n\t}\n\tif err := binary.Write(m.rwc, binary.BigEndian, byte(dataType)); err != nil {\n\t\treturn 0, err\n\t}\n\tif err := binary.Write(m.rwc, binary.BigEndian, int32(len(p))); err != nil {\n\t\treturn 0, err\n\t}\n\tif len(p) == 0 {\n\t\treturn 0, nil\n\t}\n\treturn m.rwc.Write(p)\n}\n\n\/\/ Stream is a single stream of data and implements io.ReadWriteCloser\ntype Stream struct {\n\tid uint32\n\tmux *MuxConn\n\treader io.Reader\n\twriter io.WriteCloser\n\tstate streamState\n\tstateUpdated time.Time\n\tmu sync.Mutex\n\twriteCh chan<- []byte\n}\n\ntype streamState byte\n\nconst (\n\tstreamStateClosed streamState = iota\n\tstreamStateListen\n\tstreamStateSynRecv\n\tstreamStateSynSent\n\tstreamStateEstablished\n\tstreamStateFinWait\n)\n\nfunc (s *Stream) Close() error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tif s.state != streamStateEstablished {\n\t\treturn fmt.Errorf(\"Stream in bad state: %d\", s.state)\n\t}\n\n\tif _, err := s.mux.write(s.id, muxPacketFin, nil); err != nil {\n\t\treturn err\n\t}\n\n\ts.setState(streamStateClosed)\n\ts.writer.Close()\n\treturn nil\n}\n\nfunc (s *Stream) Read(p []byte) (int, error) {\n\treturn s.reader.Read(p)\n}\n\nfunc (s *Stream) Write(p []byte) (int, error) {\n\treturn s.mux.write(s.id, muxPacketData, p)\n}\n\nfunc (s *Stream) setState(state streamState) {\n\ts.state = state\n\ts.stateUpdated = time.Now().UTC()\n}\n<commit_msg>packer\/rpc: better close states<commit_after>package rpc\n\nimport (\n\t\"encoding\/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n)\n\n\/\/ MuxConn is a connection that can be used bi-directionally for RPC. Normally,\n\/\/ Go RPC only allows client-to-server connections. This allows the client\n\/\/ to actually act as a server as well.\n\/\/\n\/\/ MuxConn works using a fairly dumb multiplexing technique of simply\n\/\/ framing every piece of data sent into a prefix + data format. Streams\n\/\/ are established using a subset of the TCP protocol. Only a subset is\n\/\/ necessary since we assume ordering on the underlying RWC.\ntype MuxConn struct {\n\tcurId uint32\n\trwc io.ReadWriteCloser\n\tstreams map[uint32]*Stream\n\tmu sync.RWMutex\n\twlock sync.Mutex\n}\n\ntype muxPacketType byte\n\nconst (\n\tmuxPacketSyn muxPacketType = iota\n\tmuxPacketAck\n\tmuxPacketFin\n\tmuxPacketData\n)\n\nfunc NewMuxConn(rwc io.ReadWriteCloser) *MuxConn {\n\tm := &MuxConn{\n\t\trwc: rwc,\n\t\tstreams: make(map[uint32]*Stream),\n\t}\n\n\tgo m.loop()\n\n\treturn m\n}\n\n\/\/ Close closes the underlying io.ReadWriteCloser. This will also close\n\/\/ all streams that are open.\nfunc (m *MuxConn) Close() error {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\n\t\/\/ Close all the streams\n\tfor _, w := range m.streams {\n\t\tw.Close()\n\t}\n\tm.streams = make(map[uint32]*Stream)\n\n\treturn m.rwc.Close()\n}\n\n\/\/ Accept accepts a multiplexed connection with the given ID. This\n\/\/ will block until a request is made to connect.\nfunc (m *MuxConn) Accept(id uint32) (io.ReadWriteCloser, error) {\n\tstream, err := m.openStream(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ If the stream isn't closed, then it is already open somehow\n\tstream.mu.Lock()\n\tif stream.state != streamStateSynRecv && stream.state != streamStateClosed {\n\t\tstream.mu.Unlock()\n\t\treturn nil, fmt.Errorf(\"Stream %d already open in bad state: %d\", id, stream.state)\n\t}\n\n\tif stream.state == streamStateSynRecv {\n\t\t\/\/ Fast track establishing since we already got the syn\n\t\tstream.setState(streamStateEstablished)\n\t\tstream.mu.Unlock()\n\t}\n\n\tif stream.state != streamStateEstablished {\n\t\t\/\/ Go into the listening state\n\t\tstream.setState(streamStateListen)\n\t\tstream.mu.Unlock()\n\n\t\t\/\/ Wait for the connection to establish\n\tACCEPT_ESTABLISH_LOOP:\n\t\tfor {\n\t\t\ttime.Sleep(50 * time.Millisecond)\n\t\t\tstream.mu.Lock()\n\t\t\tswitch stream.state {\n\t\t\tcase streamStateListen:\n\t\t\t\tstream.mu.Unlock()\n\t\t\tcase streamStateClosed:\n\t\t\t\t\/\/ This can happen if it becomes established, some data is sent,\n\t\t\t\t\/\/ and it closed all within the time period we wait above.\n\t\t\t\t\/\/ This case will be fixed when we have edge-triggered checks.\n\t\t\t\tfallthrough\n\t\t\tcase streamStateEstablished:\n\t\t\t\tstream.mu.Unlock()\n\t\t\t\tbreak ACCEPT_ESTABLISH_LOOP\n\t\t\tdefault:\n\t\t\t\tdefer stream.mu.Unlock()\n\t\t\t\treturn nil, fmt.Errorf(\"Stream %d went to bad state: %d\", id, stream.state)\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Send the ack down\n\tif _, err := m.write(stream.id, muxPacketAck, nil); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn stream, nil\n}\n\n\/\/ Dial opens a connection to the remote end using the given stream ID.\n\/\/ An Accept on the remote end will only work with if the IDs match.\nfunc (m *MuxConn) Dial(id uint32) (io.ReadWriteCloser, error) {\n\tstream, err := m.openStream(id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ If the stream isn't closed, then it is already open somehow\n\tstream.mu.Lock()\n\tif stream.state != streamStateClosed {\n\t\tstream.mu.Unlock()\n\t\treturn nil, fmt.Errorf(\"Stream %d already open in bad state: %d\", id, stream.state)\n\t}\n\n\t\/\/ Open a connection\n\tif _, err := m.write(stream.id, muxPacketSyn, nil); err != nil {\n\t\treturn nil, err\n\t}\n\tstream.setState(streamStateSynSent)\n\tstream.mu.Unlock()\n\n\tfor {\n\t\ttime.Sleep(50 * time.Millisecond)\n\t\tstream.mu.Lock()\n\t\tswitch stream.state {\n\t\tcase streamStateSynSent:\n\t\t\tstream.mu.Unlock()\n\t\tcase streamStateClosed:\n\t\t\t\/\/ This can happen if it becomes established, some data is sent,\n\t\t\t\/\/ and it closed all within the time period we wait above.\n\t\t\t\/\/ This case will be fixed when we have edge-triggered checks.\n\t\t\tfallthrough\n\t\tcase streamStateEstablished:\n\t\t\tstream.mu.Unlock()\n\t\t\treturn stream, nil\n\t\tdefault:\n\t\t\tdefer stream.mu.Unlock()\n\t\t\treturn nil, fmt.Errorf(\"Stream %d went to bad state: %d\", id, stream.state)\n\t\t}\n\t}\n}\n\n\/\/ NextId returns the next available stream ID that isn't currently\n\/\/ taken.\nfunc (m *MuxConn) NextId() uint32 {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tfor {\n\t\tresult := m.curId\n\t\tm.curId++\n\t\tif _, ok := m.streams[result]; !ok {\n\t\t\treturn result\n\t\t}\n\t}\n}\n\nfunc (m *MuxConn) openStream(id uint32) (*Stream, error) {\n\t\/\/ First grab a read-lock if we have the stream already we can\n\t\/\/ cheaply return it.\n\tm.mu.RLock()\n\tif stream, ok := m.streams[id]; ok {\n\t\tm.mu.RUnlock()\n\t\treturn stream, nil\n\t}\n\n\t\/\/ Now acquire a full blown write lock so we can create the stream\n\tm.mu.RUnlock()\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\t\/\/ We have to check this again because there is a time period\n\t\/\/ above where we couldn't lost this lock.\n\tif stream, ok := m.streams[id]; ok {\n\t\treturn stream, nil\n\t}\n\n\t\/\/ Create the stream object and channel where data will be sent to\n\tdataR, dataW := io.Pipe()\n\twriteCh := make(chan []byte, 10)\n\n\t\/\/ Set the data channel so we can write to it.\n\tstream := &Stream{\n\t\tid: id,\n\t\tmux: m,\n\t\treader: dataR,\n\t\twriteCh: writeCh,\n\t}\n\tstream.setState(streamStateClosed)\n\n\t\/\/ Start the goroutine that will read from the queue and write\n\t\/\/ data out.\n\tgo func() {\n\t\tdefer dataW.Close()\n\n\t\tfor {\n\t\t\tdata := <-writeCh\n\t\t\tif data == nil {\n\t\t\t\t\/\/ A nil is a tombstone letting us know we're done\n\t\t\t\t\/\/ accepting data.\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif _, err := dataW.Write(data); err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tm.streams[id] = stream\n\treturn m.streams[id], nil\n}\n\nfunc (m *MuxConn) loop() {\n\tdefer m.Close()\n\n\tvar id uint32\n\tvar packetType muxPacketType\n\tvar length int32\n\tfor {\n\t\tif err := binary.Read(m.rwc, binary.BigEndian, &id); err != nil {\n\t\t\tlog.Printf(\"[ERR] Error reading stream ID: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tif err := binary.Read(m.rwc, binary.BigEndian, &packetType); err != nil {\n\t\t\tlog.Printf(\"[ERR] Error reading packet type: %s\", err)\n\t\t\treturn\n\t\t}\n\t\tif err := binary.Read(m.rwc, binary.BigEndian, &length); err != nil {\n\t\t\tlog.Printf(\"[ERR] Error reading length: %s\", err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ TODO(mitchellh): probably would be better to re-use a buffer...\n\t\tdata := make([]byte, length)\n\t\tif length > 0 {\n\t\t\tif _, err := m.rwc.Read(data); err != nil {\n\t\t\t\tlog.Printf(\"[ERR] Error reading data: %s\", err)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tstream, err := m.openStream(id)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"[ERR] Error opening stream %d: %s\", id, err)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Printf(\"[DEBUG] Stream %d received packet %d\", id, packetType)\n\t\tswitch packetType {\n\t\tcase muxPacketAck:\n\t\t\tstream.mu.Lock()\n\t\t\tswitch stream.state {\n\t\t\tcase streamStateSynSent:\n\t\t\t\tstream.setState(streamStateEstablished)\n\t\t\tcase streamStateFinWait1:\n\t\t\t\tstream.remoteClose()\n\t\t\tdefault:\n\t\t\t\tlog.Printf(\"[ERR] Ack received for stream in state: %d\", stream.state)\n\t\t\t}\n\t\t\tstream.mu.Unlock()\n\t\tcase muxPacketSyn:\n\t\t\tstream.mu.Lock()\n\t\t\tswitch stream.state {\n\t\t\tcase streamStateClosed:\n\t\t\t\tstream.setState(streamStateSynRecv)\n\t\t\tcase streamStateListen:\n\t\t\t\tstream.setState(streamStateEstablished)\n\t\t\tdefault:\n\t\t\t\tlog.Printf(\"[ERR] Syn received for stream in state: %d\", stream.state)\n\t\t\t}\n\t\t\tstream.mu.Unlock()\n\t\tcase muxPacketFin:\n\t\t\tstream.mu.Lock()\n\t\t\tswitch stream.state {\n\t\t\tcase streamStateEstablished:\n\t\t\t\tm.write(id, muxPacketAck, nil)\n\t\t\t\tfallthrough\n\t\t\tcase streamStateFinWait1:\n\t\t\t\tstream.remoteClose()\n\n\t\t\t\t\/\/ Remove this stream from being active so that it\n\t\t\t\t\/\/ can be re-used\n\t\t\t\tm.mu.Lock()\n\t\t\t\tdelete(m.streams, stream.id)\n\t\t\t\tm.mu.Unlock()\n\t\t\tdefault:\n\t\t\t\tlog.Printf(\"[ERR] Fin received for stream %d in state: %d\", id, stream.state)\n\t\t\t}\n\t\t\tstream.mu.Unlock()\n\n\t\tcase muxPacketData:\n\t\t\tstream.mu.Lock()\n\t\t\tif stream.state == streamStateEstablished {\n\t\t\t\tselect {\n\t\t\t\tcase stream.writeCh <- data:\n\t\t\t\tdefault:\n\t\t\t\t\tlog.Printf(\"[ERR] Failed to write data, buffer full: %d\", id)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"[ERR] Data received for stream in state: %d\", stream.state)\n\t\t\t}\n\t\t\tstream.mu.Unlock()\n\t\t}\n\t}\n}\n\nfunc (m *MuxConn) write(id uint32, dataType muxPacketType, p []byte) (int, error) {\n\tm.wlock.Lock()\n\tdefer m.wlock.Unlock()\n\n\tif err := binary.Write(m.rwc, binary.BigEndian, id); err != nil {\n\t\treturn 0, err\n\t}\n\tif err := binary.Write(m.rwc, binary.BigEndian, byte(dataType)); err != nil {\n\t\treturn 0, err\n\t}\n\tif err := binary.Write(m.rwc, binary.BigEndian, int32(len(p))); err != nil {\n\t\treturn 0, err\n\t}\n\tif len(p) == 0 {\n\t\treturn 0, nil\n\t}\n\treturn m.rwc.Write(p)\n}\n\n\/\/ Stream is a single stream of data and implements io.ReadWriteCloser\ntype Stream struct {\n\tid uint32\n\tmux *MuxConn\n\treader io.Reader\n\tstate streamState\n\tstateUpdated time.Time\n\tmu sync.Mutex\n\twriteCh chan<- []byte\n}\n\ntype streamState byte\n\nconst (\n\tstreamStateClosed streamState = iota\n\tstreamStateListen\n\tstreamStateSynRecv\n\tstreamStateSynSent\n\tstreamStateEstablished\n\tstreamStateFinWait1\n)\n\nfunc (s *Stream) Close() error {\n\ts.mu.Lock()\n\tif s.state != streamStateEstablished {\n\t\ts.mu.Unlock()\n\t\treturn fmt.Errorf(\"Stream in bad state: %d\", s.state)\n\t}\n\n\tif _, err := s.mux.write(s.id, muxPacketFin, nil); err != nil {\n\t\treturn err\n\t}\n\ts.setState(streamStateFinWait1)\n\ts.mu.Unlock()\n\n\tfor {\n\t\ttime.Sleep(50 * time.Millisecond)\n\t\ts.mu.Lock()\n\t\tswitch s.state {\n\t\tcase streamStateFinWait1:\n\t\t\ts.mu.Unlock()\n\t\tcase streamStateClosed:\n\t\t\ts.mu.Unlock()\n\t\t\treturn nil\n\t\tdefault:\n\t\t\tdefer s.mu.Unlock()\n\t\t\treturn fmt.Errorf(\"Stream %d went to bad state: %d\", s.id, s.state)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (s *Stream) Read(p []byte) (int, error) {\n\treturn s.reader.Read(p)\n}\n\nfunc (s *Stream) Write(p []byte) (int, error) {\n\ts.mu.Lock()\n\tstate := s.state\n\ts.mu.Unlock()\n\n\tif state != streamStateEstablished {\n\t\treturn 0, fmt.Errorf(\"Stream in bad state to send: %d\", state)\n\t}\n\n\treturn s.mux.write(s.id, muxPacketData, p)\n}\n\nfunc (s *Stream) remoteClose() {\n\ts.setState(streamStateClosed)\n\ts.writeCh <- nil\n}\n\nfunc (s *Stream) setState(state streamState) {\n\ts.state = state\n\ts.stateUpdated = time.Now().UTC()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2016 Prometheus Team\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"go\/build\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\tshell \"github.com\/progrium\/go-shell\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/viper\"\n)\n\nvar (\n\tsh = shell.Run\n\tq = shell.Quote\n\tdocker = shell.Cmd(\"docker\").ErrFn()\n\tbuildContext = build.Default\n\tgoos = buildContext.GOOS\n\tgoarch = buildContext.GOARCH\n\tinfo = NewProjectInfo()\n\n\tcfgFile string\n\tuseViper bool\n\tverbose bool\n)\n\n\/\/ This represents the base command when called without any subcommands\nvar Promu = &cobra.Command{\n\tUse: \"promu\",\n\tShort: \"promu is the utility tool for Prometheus projects\",\n\tLong: `promu is the utility tool for Prometheus projects`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcmd.Help()\n\t},\n}\n\n\/\/ Execute adds all child commands to the root command sets flags appropriately.\n\/\/ This is called by main.main(). It only needs to happen once to the rootCmd.\nfunc Execute() {\n\tif err := Promu.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(-1)\n\t}\n}\n\n\/\/ init prepares cobra flags\nfunc init() {\n\tcobra.OnInitialize(initConfig)\n\n\tPromu.PersistentFlags().StringVar(&cfgFile, \"config\", \"\", \"Config file (default is .\/.promu.yml)\")\n\tPromu.PersistentFlags().BoolVarP(&verbose, \"verbose\", \"v\", false, \"Verbose output\")\n\tPromu.PersistentFlags().BoolVar(&useViper, \"viper\", true, \"Use Viper for configuration\")\n\n\tviper.BindPFlag(\"useViper\", Promu.PersistentFlags().Lookup(\"viper\"))\n\tviper.BindPFlag(\"verbose\", Promu.PersistentFlags().Lookup(\"verbose\"))\n}\n\n\/\/ initConfig reads in config file and ENV variables if set.\nfunc initConfig() {\n\tif useViper != true {\n\t\treturn\n\t}\n\tif cfgFile != \"\" { \/\/ enable ability to specify config file via flag\n\t\tviper.SetConfigFile(cfgFile)\n\t}\n\n\tviper.SetConfigName(\".promu\") \/\/ name of config file (without extension)\n\tviper.AddConfigPath(\".\") \/\/ look for config in the working directory\n\tviper.SetEnvPrefix(\"promu\")\n\tviper.AutomaticEnv() \/\/ read in environment variables that match\n\n\t\/\/ If a config file is found, read it in.\n\tif err := viper.ReadInConfig(); err != nil {\n\t\tfatalMsg(\"Error in config file\", err)\n\t}\n\tif verbose {\n\t\tfmt.Println(\"Using config file:\", viper.ConfigFileUsed())\n\t}\n\n\tsetDefaultConfigValues()\n}\n\nfunc setDefaultConfigValues() {\n\tif !viper.IsSet(\"build.binaries\") {\n\t\tbinaries := []map[string]string{{\"name\": info.Name, \"path\": \".\"}}\n\t\tviper.Set(\"build.binaries\", binaries)\n\t}\n\tif !viper.IsSet(\"build.prefix\") {\n\t\tviper.Set(\"build.prefix\", \".\")\n\t}\n\tif !viper.IsSet(\"crossbuild.platforms\") {\n\t\tplatforms := defaultMainPlatforms\n\t\tplatforms = append(platforms, defaultARMPlatforms...)\n\t\tplatforms = append(platforms, defaultPowerPCPlatforms...)\n\t\tplatforms = append(platforms, defaultMIPSPlatforms...)\n\t\tviper.Set(\"crossbuild.platforms\", platforms)\n\t}\n\tif !viper.IsSet(\"tarball.prefix\") {\n\t\tviper.Set(\"tarball.prefix\", \".\")\n\t}\n}\n\n\/\/ warn prints a non-fatal err\nfunc warn(err error) {\n\tfmt.Println(`\/!\\`, err)\n}\n\n\/\/ fatal prints a error and exit\nfunc fatal(err error) {\n\tfmt.Println(\"!!\", err)\n\tos.Exit(1)\n}\n\n\/\/ fatalMsg prints a message and exit\nfunc fatalMsg(msg string, err error) {\n\tfmt.Printf(\"!! %s: %s\\n\", msg, err)\n\tos.Exit(1)\n}\n\n\/\/ shellOutput executes a shell command and return the trimmed output\nfunc shellOutput(cmd string) string {\n\targs := strings.Split(cmd, \" \")\n\tout, _ := exec.Command(args[0], args[1:]...).Output()\n\treturn strings.Trim(string(out), \" \\n\")\n}\n\n\/\/ fileExists checks if a file exists\nfunc fileExists(path ...string) bool {\n\tfinfo, err := os.Stat(filepath.Join(path...))\n\tif err == nil && !finfo.IsDir() {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) || finfo.IsDir() {\n\t\treturn false\n\t}\n\tif err != nil {\n\t\tfatal(err)\n\t}\n\treturn true\n}\n\n\/\/ readFile reads a file and return the trimmed output\nfunc readFile(path string) string {\n\tdata, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn strings.Trim(string(data), \"\\n \")\n}\n\nfunc optArg(args []string, i int, default_ string) string {\n\tif i+1 > len(args) {\n\t\treturn default_\n\t}\n\treturn args[i]\n}\n\nfunc envOr(name, def string) string {\n\ts := os.Getenv(name)\n\tif s == \"\" {\n\t\treturn def\n\t}\n\treturn s\n}\n\nfunc stringInSlice(needle string, haystack []string) bool {\n\tfor _, hay := range haystack {\n\t\tif hay == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc hasRequiredConfigurations(configVars ...string) error {\n\tfor _, configVar := range configVars {\n\t\tif !viper.IsSet(configVar) {\n\t\t\treturn fmt.Errorf(\"missing required '%s' configuration\", configVar)\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>strings.Fields handles redundant spaces<commit_after>\/\/ Copyright © 2016 Prometheus Team\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"go\/build\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\tshell \"github.com\/progrium\/go-shell\"\n\t\"github.com\/spf13\/cobra\"\n\t\"github.com\/spf13\/viper\"\n)\n\nvar (\n\tsh = shell.Run\n\tq = shell.Quote\n\tdocker = shell.Cmd(\"docker\").ErrFn()\n\tbuildContext = build.Default\n\tgoos = buildContext.GOOS\n\tgoarch = buildContext.GOARCH\n\tinfo = NewProjectInfo()\n\n\tcfgFile string\n\tuseViper bool\n\tverbose bool\n)\n\n\/\/ This represents the base command when called without any subcommands\nvar Promu = &cobra.Command{\n\tUse: \"promu\",\n\tShort: \"promu is the utility tool for Prometheus projects\",\n\tLong: `promu is the utility tool for Prometheus projects`,\n\tRun: func(cmd *cobra.Command, args []string) {\n\t\tcmd.Help()\n\t},\n}\n\n\/\/ Execute adds all child commands to the root command sets flags appropriately.\n\/\/ This is called by main.main(). It only needs to happen once to the rootCmd.\nfunc Execute() {\n\tif err := Promu.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(-1)\n\t}\n}\n\n\/\/ init prepares cobra flags\nfunc init() {\n\tcobra.OnInitialize(initConfig)\n\n\tPromu.PersistentFlags().StringVar(&cfgFile, \"config\", \"\", \"Config file (default is .\/.promu.yml)\")\n\tPromu.PersistentFlags().BoolVarP(&verbose, \"verbose\", \"v\", false, \"Verbose output\")\n\tPromu.PersistentFlags().BoolVar(&useViper, \"viper\", true, \"Use Viper for configuration\")\n\n\tviper.BindPFlag(\"useViper\", Promu.PersistentFlags().Lookup(\"viper\"))\n\tviper.BindPFlag(\"verbose\", Promu.PersistentFlags().Lookup(\"verbose\"))\n}\n\n\/\/ initConfig reads in config file and ENV variables if set.\nfunc initConfig() {\n\tif useViper != true {\n\t\treturn\n\t}\n\tif cfgFile != \"\" { \/\/ enable ability to specify config file via flag\n\t\tviper.SetConfigFile(cfgFile)\n\t}\n\n\tviper.SetConfigName(\".promu\") \/\/ name of config file (without extension)\n\tviper.AddConfigPath(\".\") \/\/ look for config in the working directory\n\tviper.SetEnvPrefix(\"promu\")\n\tviper.AutomaticEnv() \/\/ read in environment variables that match\n\n\t\/\/ If a config file is found, read it in.\n\tif err := viper.ReadInConfig(); err != nil {\n\t\tfatalMsg(\"Error in config file\", err)\n\t}\n\tif verbose {\n\t\tfmt.Println(\"Using config file:\", viper.ConfigFileUsed())\n\t}\n\n\tsetDefaultConfigValues()\n}\n\nfunc setDefaultConfigValues() {\n\tif !viper.IsSet(\"build.binaries\") {\n\t\tbinaries := []map[string]string{{\"name\": info.Name, \"path\": \".\"}}\n\t\tviper.Set(\"build.binaries\", binaries)\n\t}\n\tif !viper.IsSet(\"build.prefix\") {\n\t\tviper.Set(\"build.prefix\", \".\")\n\t}\n\tif !viper.IsSet(\"crossbuild.platforms\") {\n\t\tplatforms := defaultMainPlatforms\n\t\tplatforms = append(platforms, defaultARMPlatforms...)\n\t\tplatforms = append(platforms, defaultPowerPCPlatforms...)\n\t\tplatforms = append(platforms, defaultMIPSPlatforms...)\n\t\tviper.Set(\"crossbuild.platforms\", platforms)\n\t}\n\tif !viper.IsSet(\"tarball.prefix\") {\n\t\tviper.Set(\"tarball.prefix\", \".\")\n\t}\n}\n\n\/\/ warn prints a non-fatal err\nfunc warn(err error) {\n\tfmt.Println(`\/!\\`, err)\n}\n\n\/\/ fatal prints a error and exit\nfunc fatal(err error) {\n\tfmt.Println(\"!!\", err)\n\tos.Exit(1)\n}\n\n\/\/ fatalMsg prints a message and exit\nfunc fatalMsg(msg string, err error) {\n\tfmt.Printf(\"!! %s: %s\\n\", msg, err)\n\tos.Exit(1)\n}\n\n\/\/ shellOutput executes a shell command and return the trimmed output\nfunc shellOutput(cmd string) string {\n\targs := strings.Fields(cmd)\n\tout, _ := exec.Command(args[0], args[1:]...).Output()\n\treturn strings.Trim(string(out), \" \\n\")\n}\n\n\/\/ fileExists checks if a file exists\nfunc fileExists(path ...string) bool {\n\tfinfo, err := os.Stat(filepath.Join(path...))\n\tif err == nil && !finfo.IsDir() {\n\t\treturn true\n\t}\n\tif os.IsNotExist(err) || finfo.IsDir() {\n\t\treturn false\n\t}\n\tif err != nil {\n\t\tfatal(err)\n\t}\n\treturn true\n}\n\n\/\/ readFile reads a file and return the trimmed output\nfunc readFile(path string) string {\n\tdata, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn strings.Trim(string(data), \"\\n \")\n}\n\nfunc optArg(args []string, i int, default_ string) string {\n\tif i+1 > len(args) {\n\t\treturn default_\n\t}\n\treturn args[i]\n}\n\nfunc envOr(name, def string) string {\n\ts := os.Getenv(name)\n\tif s == \"\" {\n\t\treturn def\n\t}\n\treturn s\n}\n\nfunc stringInSlice(needle string, haystack []string) bool {\n\tfor _, hay := range haystack {\n\t\tif hay == needle {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc hasRequiredConfigurations(configVars ...string) error {\n\tfor _, configVar := range configVars {\n\t\tif !viper.IsSet(configVar) {\n\t\t\treturn fmt.Errorf(\"missing required '%s' configuration\", configVar)\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2016 Adam Kramer <akramer@gmail.com>\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cmd\n\nimport (\n\t\"os\"\n\t\"syscall\"\n\n\t\"github.com\/akramer\/lateral\/client\"\n\t\"github.com\/akramer\/lateral\/platform\"\n\t\"github.com\/akramer\/lateral\/server\"\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nconst MAGICENV = \"LAT_MAGIC\"\n\nfunc realStart(cmd *cobra.Command, args []string) error {\n\terr := syscall.Setpgid(0, 0)\n\tif err != nil {\n\t\tglog.Errorln(\"Error setting process group ID\")\n\t\treturn err\n\t}\n\tos.Remove(Viper.GetString(\"socket\"))\n\tl, err := server.NewUnixListener(Viper)\n\tdefer l.Close()\n\tif err != nil {\n\t\tglog.Errorln(\"Error opening listening socket:\", err)\n\t\treturn err\n\t}\n\tserver.Run(Viper, l)\n\tos.Remove(Viper.GetString(\"socket\"))\n\treturn nil\n}\n\nfunc forkMyself() error {\n\texe, err := platform.Getexe()\n\tos.Setenv(MAGICENV, Viper.GetString(\"socket\"))\n\tattr := &syscall.ProcAttr{\n\t\tDir: \"\/\",\n\t\tEnv: os.Environ(),\n\t\tFiles: []uintptr{0, 1, 2}}\n\t_, err = syscall.ForkExec(exe, os.Args, attr)\n\treturn err\n}\n\nfunc isRunning() bool {\n\tc, err := client.NewUnixConn(Viper)\n\tif c != nil {\n\t\tc.Close()\n\t}\n\tif err == nil {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc runStart(cmd *cobra.Command, args []string) {\n\t\/\/ If MAGICENV is set to the socket path, we can be (relatively) sure we're the child process.\n\tif Viper.GetBool(\"start.foreground\") || os.Getenv(MAGICENV) == Viper.GetString(\"socket\") {\n\t\tglog.Infoln(\"Not forking a child server\")\n\t\terr := realStart(cmd, args)\n\t\tif err != nil {\n\t\t\tExitCode = 1\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif isRunning() {\n\t\t\tif Viper.GetBool(\"start.new_server\") {\n\t\t\t\tglog.Errorln(\"Server already running and new_server specified.\")\n\t\t\t\tExitCode = 1\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\terr := forkMyself()\n\t\tif err != nil {\n\t\t\tglog.Errorln(\"Error forking subprocess: \", err)\n\t\t\tExitCode = 1\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ startCmd represents the start command\nvar startCmd = &cobra.Command{\n\tUse: \"start\",\n\tShort: \"Start the lateral background server\",\n\tLong: `Start the lateral background server. By default, this creates a new server\nfor every session. This essentially means each login shell will have its own\nserver.`,\n\tRun: runStart,\n}\n\nfunc init() {\n\tRootCmd.AddCommand(startCmd)\n\n\tstartCmd.Flags().BoolP(\"new_server\", \"n\", false, \"Print an error and return a non-zero status if the server is already running\")\n\tViper.BindPFlag(\"start.new_server\", startCmd.Flags().Lookup(\"new_server\"))\n\tstartCmd.Flags().BoolP(\"foreground\", \"f\", false, \"Do not fork off a background server: run in the foreground.\")\n\tViper.BindPFlag(\"start.foreground\", startCmd.Flags().Lookup(\"foreground\"))\n\tstartCmd.Flags().IntP(\"parallel\", \"p\", 10, \"Number of concurrent tasks to run\")\n\tViper.BindPFlag(\"start.parallel\", startCmd.Flags().Lookup(\"parallel\"))\n}\n<commit_msg>create the socket directory (.lateral dir) if it doesn't exist<commit_after>\/\/ Copyright © 2016 Adam Kramer <akramer@gmail.com>\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage cmd\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"syscall\"\n\n\t\"github.com\/akramer\/lateral\/client\"\n\t\"github.com\/akramer\/lateral\/platform\"\n\t\"github.com\/akramer\/lateral\/server\"\n\t\"github.com\/golang\/glog\"\n\t\"github.com\/spf13\/cobra\"\n)\n\nconst MAGICENV = \"LAT_MAGIC\"\n\n\/\/ Make the socket directory, if it does not yet exist.\nfunc makeSocketDir(socket string) error {\n\tdir := path.Dir(socket)\n\tinfo, err := os.Stat(dir)\n\tif err == nil {\n\t\tif !info.Mode().IsDir() {\n\t\t\treturn fmt.Errorf(\"%q is not a directory\", dir)\n\t\t}\n\t\treturn nil \/\/ directory exists\n\t}\n\terr = os.Mkdir(dir, 0700)\n\treturn err\n}\n\nfunc realStart(cmd *cobra.Command, args []string) error {\n\terr := syscall.Setpgid(0, 0)\n\tif err != nil {\n\t\tglog.Errorln(\"Error setting process group ID\")\n\t\treturn err\n\t}\n\tsocket := Viper.GetString(\"socket\")\n\tos.Remove(socket)\n\terr = makeSocketDir(socket)\n\tif err != nil {\n\t\tglog.Errorln(\"Error creating directory for socket %q\", socket)\n\t}\n\tl, err := server.NewUnixListener(Viper)\n\tdefer l.Close()\n\tif err != nil {\n\t\tglog.Errorln(\"Error opening listening socket:\", err)\n\t\treturn err\n\t}\n\tserver.Run(Viper, l)\n\tos.Remove(Viper.GetString(\"socket\"))\n\treturn nil\n}\n\nfunc forkMyself() error {\n\texe, err := platform.Getexe()\n\tos.Setenv(MAGICENV, Viper.GetString(\"socket\"))\n\tattr := &syscall.ProcAttr{\n\t\tDir: \"\/\",\n\t\tEnv: os.Environ(),\n\t\tFiles: []uintptr{0, 1, 2}}\n\t_, err = syscall.ForkExec(exe, os.Args, attr)\n\treturn err\n}\n\nfunc isRunning() bool {\n\tc, err := client.NewUnixConn(Viper)\n\tif c != nil {\n\t\tc.Close()\n\t}\n\tif err == nil {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc runStart(cmd *cobra.Command, args []string) {\n\t\/\/ If MAGICENV is set to the socket path, we can be (relatively) sure we're the child process.\n\tif Viper.GetBool(\"start.foreground\") || os.Getenv(MAGICENV) == Viper.GetString(\"socket\") {\n\t\tglog.Infoln(\"Not forking a child server\")\n\t\terr := realStart(cmd, args)\n\t\tif err != nil {\n\t\t\tExitCode = 1\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tif isRunning() {\n\t\t\tif Viper.GetBool(\"start.new_server\") {\n\t\t\t\tglog.Errorln(\"Server already running and new_server specified.\")\n\t\t\t\tExitCode = 1\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\terr := forkMyself()\n\t\tif err != nil {\n\t\t\tglog.Errorln(\"Error forking subprocess: \", err)\n\t\t\tExitCode = 1\n\t\t\treturn\n\t\t}\n\t}\n}\n\n\/\/ startCmd represents the start command\nvar startCmd = &cobra.Command{\n\tUse: \"start\",\n\tShort: \"Start the lateral background server\",\n\tLong: `Start the lateral background server. By default, this creates a new server\nfor every session. This essentially means each login shell will have its own\nserver.`,\n\tRun: runStart,\n}\n\nfunc init() {\n\tRootCmd.AddCommand(startCmd)\n\n\tstartCmd.Flags().BoolP(\"new_server\", \"n\", false, \"Print an error and return a non-zero status if the server is already running\")\n\tViper.BindPFlag(\"start.new_server\", startCmd.Flags().Lookup(\"new_server\"))\n\tstartCmd.Flags().BoolP(\"foreground\", \"f\", false, \"Do not fork off a background server: run in the foreground.\")\n\tViper.BindPFlag(\"start.foreground\", startCmd.Flags().Lookup(\"foreground\"))\n\tstartCmd.Flags().IntP(\"parallel\", \"p\", 10, \"Number of concurrent tasks to run\")\n\tViper.BindPFlag(\"start.parallel\", startCmd.Flags().Lookup(\"parallel\"))\n}\n<|endoftext|>"} {"text":"<commit_before>package module\n\nimport (\n\t\"fmt\"\n\t\"github.com\/davidscholberg\/irkbot\/lib\/configure\"\n\t\"github.com\/davidscholberg\/irkbot\/lib\/message\"\n\t\"github.com\/thoj\/go-ircevent\"\n\t\"strings\"\n)\n\ntype CommandModule struct {\n\tConfigure func(*configure.Config)\n\tGetHelp func() []string\n\tRun func(*configure.Config, *message.InboundMsg, *Actions)\n}\n\ntype ParserModule struct {\n\tConfigure func(*configure.Config)\n\tGetHelp func() []string\n\tRun func(*configure.Config, *message.InboundMsg, *Actions) bool\n}\n\ntype Actions struct {\n\tQuit func()\n\tSay func(string)\n\tSayTo func(string, string)\n}\n\nfunc RegisterModules(conn *irc.Connection, cfg *configure.Config, outChan chan message.OutboundMsg) error {\n\t\/\/ register modules\n\tparserModules := []*ParserModule{}\n\tcmdMap := make(map[string]*CommandModule)\n\tfor moduleName, _ := range cfg.Modules {\n\t\tswitch moduleName {\n\t\tcase \"echo_name\":\n\t\t\tparserModules = append(parserModules, &ParserModule{nil, nil, EchoName})\n\t\tcase \"help\":\n\t\t\tcmdMap[\"help\"] = &CommandModule{nil, nil, Help}\n\t\tcase \"slam\":\n\t\t\tcmdMap[\"slam\"] = &CommandModule{ConfigSlam, HelpSlam, Slam}\n\t\tcase \"compliment\":\n\t\t\tcmdMap[\"compliment\"] = &CommandModule{ConfigCompliment, HelpCompliment, Compliment}\n\t\tcase \"quit\":\n\t\t\tcmdMap[\"quit\"] = &CommandModule{nil, HelpQuit, Quit}\n\t\tcase \"quote\":\n\t\t\tparserModules = append(parserModules, &ParserModule{ConfigQuote, nil, UpdateQuoteBuffer})\n\t\t\tcmdMap[\"grab\"] = &CommandModule{nil, HelpGrabQuote, GrabQuote}\n\t\t\tcmdMap[\"quote\"] = &CommandModule{nil, HelpGetQuote, GetQuote}\n\t\tcase \"say\":\n\t\t\tcmdMap[\"say\"] = &CommandModule{nil, HelpSay, Say}\n\t\tcase \"urban\":\n\t\t\tcmdMap[\"urban\"] = &CommandModule{nil, HelpUrban, Urban}\n\t\tcase \"urban_wotd\":\n\t\t\tcmdMap[\"urban_wotd\"] = &CommandModule{nil, HelpUrbanWotd, UrbanWotd}\n\t\tcase \"urban_trending\":\n\t\t\tcmdMap[\"urban_trending\"] = &CommandModule{nil, HelpUrbanTrending, UrbanTrending}\n\t\tcase \"url\":\n\t\t\tparserModules = append(parserModules, &ParserModule{nil, nil, Url})\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"invalid name '%s' in module config\", moduleName)\n\t\t}\n\t}\n\n\tfor _, m := range cmdMap {\n\t\tif m.GetHelp != nil {\n\t\t\tRegisterHelp(m.GetHelp())\n\t\t}\n\t\tif m.Configure != nil {\n\t\t\tm.Configure(cfg)\n\t\t}\n\t}\n\n\tfor _, m := range parserModules {\n\t\tif m.GetHelp != nil {\n\t\t\tRegisterHelp(m.GetHelp())\n\t\t}\n\t\tif m.Configure != nil {\n\t\t\tm.Configure(cfg)\n\t\t}\n\t}\n\n\tconn.AddCallback(\"PRIVMSG\", func(e *irc.Event) {\n\t\tinboundMsg := message.InboundMsg{}\n\t\tinboundMsg.Msg = e.Message()\n\t\tinboundMsg.MsgArgs = strings.Fields(inboundMsg.Msg)\n\t\tinboundMsg.Src = e.Arguments[0]\n\t\tif !strings.HasPrefix(inboundMsg.Src, \"#\") {\n\t\t\tinboundMsg.Src = e.Nick\n\t\t}\n\t\tinboundMsg.Event = e\n\n\t\toutboundMsg := message.OutboundMsg{}\n\t\toutboundMsg.Dest = inboundMsg.Src\n\t\toutboundMsg.Conn = conn\n\t\t\/\/p.SayChan = sayChan\n\n\t\tquitFunc := func() {\n\t\t\tconn.Quit()\n\t\t}\n\t\tsayFunc := func(msg string) {\n\t\t\toutboundMsg.Msg = msg\n\t\t\toutChan <- outboundMsg\n\t\t}\n\t\tsayToFunc := func(dest string, msg string) {\n\t\t\toutboundMsg.Dest = dest\n\t\t\toutboundMsg.Msg = msg\n\t\t\toutChan <- outboundMsg\n\t\t}\n\t\tactions := Actions{\n\t\t\tQuit: quitFunc,\n\t\t\tSay: sayFunc,\n\t\t\tSayTo: sayToFunc,\n\t\t}\n\n\t\t\/\/ run parser modules\n\t\tfor _, m := range parserModules {\n\t\t\tif m.Run(cfg, &inboundMsg, &actions) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t\/\/ check commands\n\t\tcmdPrefix := cfg.Channel.CmdPrefix\n\t\tif cmdPrefix == \"\" {\n\t\t\tcmdPrefix = \".\"\n\t\t}\n\t\tif strings.HasPrefix(inboundMsg.Msg, cmdPrefix) {\n\t\t\tif m, ok := cmdMap[strings.TrimPrefix(inboundMsg.MsgArgs[0], cmdPrefix)]; ok {\n\t\t\t\tm.Run(cfg, &inboundMsg, &actions)\n\t\t\t}\n\t\t}\n\n\t})\n\n\treturn nil\n}\n<commit_msg>removed unnecessary GetHelp function from ParserModule<commit_after>package module\n\nimport (\n\t\"fmt\"\n\t\"github.com\/davidscholberg\/irkbot\/lib\/configure\"\n\t\"github.com\/davidscholberg\/irkbot\/lib\/message\"\n\t\"github.com\/thoj\/go-ircevent\"\n\t\"strings\"\n)\n\ntype CommandModule struct {\n\tConfigure func(*configure.Config)\n\tGetHelp func() []string\n\tRun func(*configure.Config, *message.InboundMsg, *Actions)\n}\n\ntype ParserModule struct {\n\tConfigure func(*configure.Config)\n\tRun func(*configure.Config, *message.InboundMsg, *Actions) bool\n}\n\ntype Actions struct {\n\tQuit func()\n\tSay func(string)\n\tSayTo func(string, string)\n}\n\nfunc RegisterModules(conn *irc.Connection, cfg *configure.Config, outChan chan message.OutboundMsg) error {\n\t\/\/ register modules\n\tparserModules := []*ParserModule{}\n\tcmdMap := make(map[string]*CommandModule)\n\tfor moduleName, _ := range cfg.Modules {\n\t\tswitch moduleName {\n\t\tcase \"echo_name\":\n\t\t\tparserModules = append(parserModules, &ParserModule{nil, EchoName})\n\t\tcase \"help\":\n\t\t\tcmdMap[\"help\"] = &CommandModule{nil, nil, Help}\n\t\tcase \"slam\":\n\t\t\tcmdMap[\"slam\"] = &CommandModule{ConfigSlam, HelpSlam, Slam}\n\t\tcase \"compliment\":\n\t\t\tcmdMap[\"compliment\"] = &CommandModule{ConfigCompliment, HelpCompliment, Compliment}\n\t\tcase \"quit\":\n\t\t\tcmdMap[\"quit\"] = &CommandModule{nil, HelpQuit, Quit}\n\t\tcase \"quote\":\n\t\t\tparserModules = append(parserModules, &ParserModule{ConfigQuote, UpdateQuoteBuffer})\n\t\t\tcmdMap[\"grab\"] = &CommandModule{nil, HelpGrabQuote, GrabQuote}\n\t\t\tcmdMap[\"quote\"] = &CommandModule{nil, HelpGetQuote, GetQuote}\n\t\tcase \"say\":\n\t\t\tcmdMap[\"say\"] = &CommandModule{nil, HelpSay, Say}\n\t\tcase \"urban\":\n\t\t\tcmdMap[\"urban\"] = &CommandModule{nil, HelpUrban, Urban}\n\t\tcase \"urban_wotd\":\n\t\t\tcmdMap[\"urban_wotd\"] = &CommandModule{nil, HelpUrbanWotd, UrbanWotd}\n\t\tcase \"urban_trending\":\n\t\t\tcmdMap[\"urban_trending\"] = &CommandModule{nil, HelpUrbanTrending, UrbanTrending}\n\t\tcase \"url\":\n\t\t\tparserModules = append(parserModules, &ParserModule{nil, Url})\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"invalid name '%s' in module config\", moduleName)\n\t\t}\n\t}\n\n\tfor _, m := range cmdMap {\n\t\tif m.GetHelp != nil {\n\t\t\tRegisterHelp(m.GetHelp())\n\t\t}\n\t\tif m.Configure != nil {\n\t\t\tm.Configure(cfg)\n\t\t}\n\t}\n\n\tfor _, m := range parserModules {\n\t\tif m.Configure != nil {\n\t\t\tm.Configure(cfg)\n\t\t}\n\t}\n\n\tconn.AddCallback(\"PRIVMSG\", func(e *irc.Event) {\n\t\tinboundMsg := message.InboundMsg{}\n\t\tinboundMsg.Msg = e.Message()\n\t\tinboundMsg.MsgArgs = strings.Fields(inboundMsg.Msg)\n\t\tinboundMsg.Src = e.Arguments[0]\n\t\tif !strings.HasPrefix(inboundMsg.Src, \"#\") {\n\t\t\tinboundMsg.Src = e.Nick\n\t\t}\n\t\tinboundMsg.Event = e\n\n\t\toutboundMsg := message.OutboundMsg{}\n\t\toutboundMsg.Dest = inboundMsg.Src\n\t\toutboundMsg.Conn = conn\n\t\t\/\/p.SayChan = sayChan\n\n\t\tquitFunc := func() {\n\t\t\tconn.Quit()\n\t\t}\n\t\tsayFunc := func(msg string) {\n\t\t\toutboundMsg.Msg = msg\n\t\t\toutChan <- outboundMsg\n\t\t}\n\t\tsayToFunc := func(dest string, msg string) {\n\t\t\toutboundMsg.Dest = dest\n\t\t\toutboundMsg.Msg = msg\n\t\t\toutChan <- outboundMsg\n\t\t}\n\t\tactions := Actions{\n\t\t\tQuit: quitFunc,\n\t\t\tSay: sayFunc,\n\t\t\tSayTo: sayToFunc,\n\t\t}\n\n\t\t\/\/ run parser modules\n\t\tfor _, m := range parserModules {\n\t\t\tif m.Run(cfg, &inboundMsg, &actions) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t\/\/ check commands\n\t\tcmdPrefix := cfg.Channel.CmdPrefix\n\t\tif cmdPrefix == \"\" {\n\t\t\tcmdPrefix = \".\"\n\t\t}\n\t\tif strings.HasPrefix(inboundMsg.Msg, cmdPrefix) {\n\t\t\tif m, ok := cmdMap[strings.TrimPrefix(inboundMsg.MsgArgs[0], cmdPrefix)]; ok {\n\t\t\t\tm.Run(cfg, &inboundMsg, &actions)\n\t\t\t}\n\t\t}\n\n\t})\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package turn\n\nimport (\n\t\"testing\"\n)\n\nfunc TestLtCredMech(t *testing.T) {\n\tusername := \"1599491771\"\n\tsharedSecret := \"foobar\"\n\n\texpectedPassword := \"Tpz\/nKkyvX\/vMSLKvL4sbtBt8Vs=\"\n\tactualPassword, _ := longTermCredentials(username, sharedSecret)\n\tif expectedPassword != actualPassword {\n\t\tt.Errorf(\"Expected %q, got %q\", expectedPassword, actualPassword)\n\t}\n}\n\n\/\/ export SECRET=foobar\n\/\/ secret=$SECRET && \\\n\/\/ time=$(date +%s) && \\\n\/\/ expiry=8400 && \\\n\/\/ username=$(( $time + $expiry )) &&\\\n\/\/ echo username:$username && \\\n\/\/ echo password : $(echo -n $username | openssl dgst -binary -sha1 -hmac $secret | openssl base64)\n\/\/ username : 1599491771\n\/\/ password : M+WLqSVjDc7kfj2U8ZUmk+hTQl8=\n<commit_msg>Add TestLtCredMech<commit_after>\/\/ +build !js\n\npackage turn\n\nimport (\n\t\"net\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/pion\/logging\"\n\t\"github.com\/stretchr\/testify\/assert\"\n)\n\nfunc TestLtCredMech(t *testing.T) {\n\tusername := \"1599491771\"\n\tsharedSecret := \"foobar\"\n\n\texpectedPassword := \"Tpz\/nKkyvX\/vMSLKvL4sbtBt8Vs=\"\n\tactualPassword, _ := longTermCredentials(username, sharedSecret)\n\tif expectedPassword != actualPassword {\n\t\tt.Errorf(\"Expected %q, got %q\", expectedPassword, actualPassword)\n\t}\n}\n\nfunc TestNewLongTermAuthHandler(t *testing.T) {\n\tconst sharedSecret = \"HELLO_WORLD\"\n\n\tserverListener, err := net.ListenPacket(\"udp4\", \"0.0.0.0:3478\")\n\tassert.NoError(t, err)\n\n\tserver, err := NewServer(ServerConfig{\n\t\tAuthHandler: NewLongTermAuthHandler(sharedSecret, nil),\n\t\tPacketConnConfigs: []PacketConnConfig{\n\t\t\t{\n\t\t\t\tPacketConn: serverListener,\n\t\t\t\tRelayAddressGenerator: &RelayAddressGeneratorStatic{\n\t\t\t\t\tRelayAddress: net.ParseIP(\"127.0.0.1\"),\n\t\t\t\t\tAddress: \"0.0.0.0\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tRealm: \"pion.ly\",\n\t\tLoggerFactory: logging.NewDefaultLoggerFactory(),\n\t})\n\tassert.NoError(t, err)\n\n\tconn, err := net.ListenPacket(\"udp4\", \"0.0.0.0:0\")\n\tassert.NoError(t, err)\n\n\tusername, password, err := GenerateLongTermCredentials(sharedSecret, time.Minute)\n\tassert.NoError(t, err)\n\n\tclient, err := NewClient(&ClientConfig{\n\t\tSTUNServerAddr: \"0.0.0.0:3478\",\n\t\tTURNServerAddr: \"0.0.0.0:3478\",\n\t\tConn: conn,\n\t\tUsername: username,\n\t\tPassword: password,\n\t\tLoggerFactory: logging.NewDefaultLoggerFactory(),\n\t})\n\tassert.NoError(t, err)\n\tassert.NoError(t, client.Listen())\n\n\trelayConn, err := client.Allocate()\n\tassert.NoError(t, err)\n\n\tclient.Close()\n\tassert.NoError(t, relayConn.Close())\n\tassert.NoError(t, conn.Close())\n\tassert.NoError(t, server.Close())\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/lxc\/lxd\/client\"\n\t\"github.com\/lxc\/lxd\/lxd\/revert\"\n\t\"github.com\/lxc\/lxd\/lxd\/util\"\n\t\"github.com\/lxc\/lxd\/shared\"\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n)\n\nfunc lxdIsConfigured(client lxd.InstanceServer) (bool, error) {\n\t\/\/ Look for networks.\n\tnetworks, err := client.GetNetworkNames()\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"Failed to list networks: %w\", err)\n\t}\n\tif !shared.StringInSlice(\"lxdbr0\", networks) {\n\t\t\/\/ Couldn't find lxdbr0.\n\t\treturn false, nil\n\t}\n\n\t\/\/ Look for storage pools.\n\tpools, err := client.GetStoragePoolNames()\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"Failed to list storage pools: %w\", err)\n\t}\n\n\tif !shared.StringInSlice(\"default\", pools) {\n\t\t\/\/ No storage pool found.\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}\n\nfunc lxdInitialConfiguration(client lxd.InstanceServer) error {\n\t\/\/ Load current server config.\n\tinfo, _, err := client.GetServer()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to get server info: %w\", err)\n\t}\n\tavailableBackends := util.AvailableStorageDrivers(info.Environment.StorageSupportedDrivers, util.PoolTypeLocal)\n\n\t\/\/ Load the default profile.\n\tprofile, profileEtag, err := client.GetProfile(\"default\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to load default profile: %w\", err)\n\t}\n\n\t\/\/ Look for storage pools.\n\tpools, err := client.GetStoragePools()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to list storage pools: %w\", err)\n\t}\n\n\tif len(pools) == 0 {\n\t\tpool := api.StoragePoolsPost{}\n\t\tpool.Config = map[string]string{}\n\t\tpool.Name = \"default\"\n\n\t\t\/\/ Check if ZFS supported.\n\t\tif shared.StringInSlice(\"zfs\", availableBackends) {\n\t\t\tpool.Driver = \"zfs\"\n\n\t\t\t\/\/ Check if zsys.\n\t\t\tpoolName, _ := shared.RunCommand(\"zpool\", \"get\", \"-H\", \"-o\", \"value\", \"name\", \"rpool\")\n\t\t\tif strings.TrimSpace(poolName) == \"rpool\" {\n\t\t\t\tpool.Config[\"source\"] = \"rpool\/lxd\"\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Fallback to dir backend.\n\t\t\tpool.Driver = \"dir\"\n\t\t}\n\n\t\t\/\/ Create the storage pool.\n\t\terr := client.CreateStoragePool(pool)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to create storage pool: %w\", err)\n\t\t}\n\n\t\t\/\/ Add to default profile in default project.\n\t\tprofile.Devices[\"root\"] = map[string]string{\n\t\t\t\"type\": \"disk\",\n\t\t\t\"pool\": \"default\",\n\t\t\t\"path\": \"\/\",\n\t\t}\n\t}\n\n\t\/\/ Look for networks.\n\tnetworks, err := client.GetNetworks()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to list networks: %w\", err)\n\t}\n\n\tfound := false\n\tfor _, network := range networks {\n\t\tif network.Managed {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !found {\n\t\t\/\/ Create lxdbr0.\n\t\tnetwork := api.NetworksPost{}\n\t\tnetwork.Config = map[string]string{}\n\t\tnetwork.Type = \"bridge\"\n\t\tnetwork.Name = \"lxdbr0\"\n\n\t\terr := client.CreateNetwork(network)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to create network: %w\", err)\n\t\t}\n\n\t\t\/\/ Add to default profile in default project.\n\t\tprofile.Devices[\"eth0\"] = map[string]string{\n\t\t\t\"type\": \"nic\",\n\t\t\t\"network\": \"default\",\n\t\t\t\"name\": \"eth0\",\n\t\t}\n\t}\n\n\t\/\/ Update the default profile.\n\terr = client.UpdateProfile(\"default\", profile.Writable(), profileEtag)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to update default profile: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc lxdSetupUser(uid uint32) error {\n\tprojectName := fmt.Sprintf(\"user-%d\", uid)\n\tuserPath := filepath.Join(\"users\", fmt.Sprintf(\"%d\", uid))\n\n\t\/\/ User account.\n\tpw, err := user.LookupId(fmt.Sprintf(\"%d\", uid))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to retrieve user information: %w\", err)\n\t}\n\n\t\/\/ Setup reverter.\n\trevert := revert.New()\n\tdefer revert.Fail()\n\n\t\/\/ Create certificate directory.\n\terr = os.MkdirAll(userPath, 0700)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to create user directory: %w\", err)\n\t}\n\trevert.Add(func() { os.RemoveAll(userPath) })\n\n\t\/\/ Generate certificate.\n\terr = shared.FindOrGenCert(filepath.Join(userPath, \"client.crt\"), filepath.Join(userPath, \"client.key\"), true, false)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to generate user certificate: %w\", err)\n\t}\n\n\t\/\/ Connect to LXD.\n\tclient, err := lxd.ConnectLXDUnix(\"\", nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to connect to LXD: %w\", err)\n\t}\n\n\tclient.GetServer()\n\n\t\/\/ Setup the project (with restrictions).\n\tprojects, err := client.GetProjectNames()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to retrieve project list: %w\", err)\n\t}\n\n\tif !shared.StringInSlice(projectName, projects) {\n\t\t\/\/ Create the project.\n\t\terr := client.CreateProject(api.ProjectsPost{\n\t\t\tName: projectName,\n\t\t\tProjectPut: api.ProjectPut{\n\t\t\t\tDescription: fmt.Sprintf(\"User restricted project for %q (%s)\", pw.Username, pw.Uid),\n\t\t\t\tConfig: map[string]string{\n\t\t\t\t\t\"features.images\": \"true\",\n\t\t\t\t\t\"features.networks\": \"false\",\n\t\t\t\t\t\"features.profiles\": \"true\",\n\t\t\t\t\t\"features.storage.volumes\": \"true\",\n\t\t\t\t\t\"restricted\": \"true\",\n\t\t\t\t\t\"restricted.containers.nesting\": \"allow\",\n\t\t\t\t\t\"restricted.devices.disk\": \"allow\",\n\t\t\t\t\t\"restricted.devices.disk.paths\": pw.HomeDir,\n\t\t\t\t\t\"restricted.devices.gpu\": \"allow\",\n\t\t\t\t\t\"restricted.idmap.uid\": pw.Uid,\n\t\t\t\t\t\"restricted.idmap.gid\": pw.Gid,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to create project: %w\", err)\n\t\t}\n\n\t\trevert.Add(func() { client.DeleteProject(projectName) })\n\t}\n\n\t\/\/ Parse the certificate.\n\tx509Cert, err := shared.ReadCert(filepath.Join(userPath, \"client.crt\"))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to read user certificate: %w\", err)\n\t}\n\n\t\/\/ Add the certificate to the trust store.\n\terr = client.CreateCertificate(api.CertificatesPost{\n\t\tCertificatePut: api.CertificatePut{\n\t\t\tName: fmt.Sprintf(\"lxd-user-%d\", uid),\n\t\t\tType: \"client\",\n\t\t\tRestricted: true,\n\t\t\tProjects: []string{projectName},\n\t\t\tCertificate: base64.StdEncoding.EncodeToString(x509Cert.Raw),\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to add user certificate: %w\", err)\n\t}\n\n\trevert.Add(func() { client.DeleteCertificate(shared.CertFingerprint(x509Cert)) })\n\n\t\/\/ Setup default profile.\n\terr = client.UseProject(projectName).UpdateProfile(\"default\", api.ProfilePut{\n\t\tDescription: \"Default LXD profile\",\n\t\tConfig: map[string]string{\n\t\t\t\"raw.idmap\": fmt.Sprintf(\"uid %s %s\\ngid %s %s\", pw.Uid, pw.Uid, pw.Gid, pw.Gid),\n\t\t},\n\t\tDevices: map[string]map[string]string{\n\t\t\t\"root\": {\n\t\t\t\t\"type\": \"disk\",\n\t\t\t\t\"path\": \"\/\",\n\t\t\t\t\"pool\": \"default\",\n\t\t\t},\n\t\t\t\"eth0\": {\n\t\t\t\t\"type\": \"nic\",\n\t\t\t\t\"name\": \"eth0\",\n\t\t\t\t\"network\": \"lxdbr0\",\n\t\t\t},\n\t\t},\n\t}, \"\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to update the default profile: %w\", err)\n\t}\n\n\trevert.Success()\n\treturn nil\n}\n<commit_msg>lxd-user: Fix default network name<commit_after>package main\n\nimport (\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"os\"\n\t\"os\/user\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/lxc\/lxd\/client\"\n\t\"github.com\/lxc\/lxd\/lxd\/revert\"\n\t\"github.com\/lxc\/lxd\/lxd\/util\"\n\t\"github.com\/lxc\/lxd\/shared\"\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n)\n\nfunc lxdIsConfigured(client lxd.InstanceServer) (bool, error) {\n\t\/\/ Look for networks.\n\tnetworks, err := client.GetNetworkNames()\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"Failed to list networks: %w\", err)\n\t}\n\tif !shared.StringInSlice(\"lxdbr0\", networks) {\n\t\t\/\/ Couldn't find lxdbr0.\n\t\treturn false, nil\n\t}\n\n\t\/\/ Look for storage pools.\n\tpools, err := client.GetStoragePoolNames()\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"Failed to list storage pools: %w\", err)\n\t}\n\n\tif !shared.StringInSlice(\"default\", pools) {\n\t\t\/\/ No storage pool found.\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}\n\nfunc lxdInitialConfiguration(client lxd.InstanceServer) error {\n\t\/\/ Load current server config.\n\tinfo, _, err := client.GetServer()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to get server info: %w\", err)\n\t}\n\tavailableBackends := util.AvailableStorageDrivers(info.Environment.StorageSupportedDrivers, util.PoolTypeLocal)\n\n\t\/\/ Load the default profile.\n\tprofile, profileEtag, err := client.GetProfile(\"default\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to load default profile: %w\", err)\n\t}\n\n\t\/\/ Look for storage pools.\n\tpools, err := client.GetStoragePools()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to list storage pools: %w\", err)\n\t}\n\n\tif len(pools) == 0 {\n\t\tpool := api.StoragePoolsPost{}\n\t\tpool.Config = map[string]string{}\n\t\tpool.Name = \"default\"\n\n\t\t\/\/ Check if ZFS supported.\n\t\tif shared.StringInSlice(\"zfs\", availableBackends) {\n\t\t\tpool.Driver = \"zfs\"\n\n\t\t\t\/\/ Check if zsys.\n\t\t\tpoolName, _ := shared.RunCommand(\"zpool\", \"get\", \"-H\", \"-o\", \"value\", \"name\", \"rpool\")\n\t\t\tif strings.TrimSpace(poolName) == \"rpool\" {\n\t\t\t\tpool.Config[\"source\"] = \"rpool\/lxd\"\n\t\t\t}\n\t\t} else {\n\t\t\t\/\/ Fallback to dir backend.\n\t\t\tpool.Driver = \"dir\"\n\t\t}\n\n\t\t\/\/ Create the storage pool.\n\t\terr := client.CreateStoragePool(pool)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to create storage pool: %w\", err)\n\t\t}\n\n\t\t\/\/ Add to default profile in default project.\n\t\tprofile.Devices[\"root\"] = map[string]string{\n\t\t\t\"type\": \"disk\",\n\t\t\t\"pool\": \"default\",\n\t\t\t\"path\": \"\/\",\n\t\t}\n\t}\n\n\t\/\/ Look for networks.\n\tnetworks, err := client.GetNetworks()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to list networks: %w\", err)\n\t}\n\n\tfound := false\n\tfor _, network := range networks {\n\t\tif network.Managed {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !found {\n\t\t\/\/ Create lxdbr0.\n\t\tnetwork := api.NetworksPost{}\n\t\tnetwork.Config = map[string]string{}\n\t\tnetwork.Type = \"bridge\"\n\t\tnetwork.Name = \"lxdbr0\"\n\n\t\terr := client.CreateNetwork(network)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to create network: %w\", err)\n\t\t}\n\n\t\t\/\/ Add to default profile in default project.\n\t\tprofile.Devices[\"eth0\"] = map[string]string{\n\t\t\t\"type\": \"nic\",\n\t\t\t\"network\": \"lxdbr0\",\n\t\t\t\"name\": \"eth0\",\n\t\t}\n\t}\n\n\t\/\/ Update the default profile.\n\terr = client.UpdateProfile(\"default\", profile.Writable(), profileEtag)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to update default profile: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc lxdSetupUser(uid uint32) error {\n\tprojectName := fmt.Sprintf(\"user-%d\", uid)\n\tuserPath := filepath.Join(\"users\", fmt.Sprintf(\"%d\", uid))\n\n\t\/\/ User account.\n\tpw, err := user.LookupId(fmt.Sprintf(\"%d\", uid))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to retrieve user information: %w\", err)\n\t}\n\n\t\/\/ Setup reverter.\n\trevert := revert.New()\n\tdefer revert.Fail()\n\n\t\/\/ Create certificate directory.\n\terr = os.MkdirAll(userPath, 0700)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to create user directory: %w\", err)\n\t}\n\trevert.Add(func() { os.RemoveAll(userPath) })\n\n\t\/\/ Generate certificate.\n\terr = shared.FindOrGenCert(filepath.Join(userPath, \"client.crt\"), filepath.Join(userPath, \"client.key\"), true, false)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to generate user certificate: %w\", err)\n\t}\n\n\t\/\/ Connect to LXD.\n\tclient, err := lxd.ConnectLXDUnix(\"\", nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to connect to LXD: %w\", err)\n\t}\n\n\tclient.GetServer()\n\n\t\/\/ Setup the project (with restrictions).\n\tprojects, err := client.GetProjectNames()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to retrieve project list: %w\", err)\n\t}\n\n\tif !shared.StringInSlice(projectName, projects) {\n\t\t\/\/ Create the project.\n\t\terr := client.CreateProject(api.ProjectsPost{\n\t\t\tName: projectName,\n\t\t\tProjectPut: api.ProjectPut{\n\t\t\t\tDescription: fmt.Sprintf(\"User restricted project for %q (%s)\", pw.Username, pw.Uid),\n\t\t\t\tConfig: map[string]string{\n\t\t\t\t\t\"features.images\": \"true\",\n\t\t\t\t\t\"features.networks\": \"false\",\n\t\t\t\t\t\"features.profiles\": \"true\",\n\t\t\t\t\t\"features.storage.volumes\": \"true\",\n\t\t\t\t\t\"restricted\": \"true\",\n\t\t\t\t\t\"restricted.containers.nesting\": \"allow\",\n\t\t\t\t\t\"restricted.devices.disk\": \"allow\",\n\t\t\t\t\t\"restricted.devices.disk.paths\": pw.HomeDir,\n\t\t\t\t\t\"restricted.devices.gpu\": \"allow\",\n\t\t\t\t\t\"restricted.idmap.uid\": pw.Uid,\n\t\t\t\t\t\"restricted.idmap.gid\": pw.Gid,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to create project: %w\", err)\n\t\t}\n\n\t\trevert.Add(func() { client.DeleteProject(projectName) })\n\t}\n\n\t\/\/ Parse the certificate.\n\tx509Cert, err := shared.ReadCert(filepath.Join(userPath, \"client.crt\"))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to read user certificate: %w\", err)\n\t}\n\n\t\/\/ Add the certificate to the trust store.\n\terr = client.CreateCertificate(api.CertificatesPost{\n\t\tCertificatePut: api.CertificatePut{\n\t\t\tName: fmt.Sprintf(\"lxd-user-%d\", uid),\n\t\t\tType: \"client\",\n\t\t\tRestricted: true,\n\t\t\tProjects: []string{projectName},\n\t\t\tCertificate: base64.StdEncoding.EncodeToString(x509Cert.Raw),\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to add user certificate: %w\", err)\n\t}\n\n\trevert.Add(func() { client.DeleteCertificate(shared.CertFingerprint(x509Cert)) })\n\n\t\/\/ Setup default profile.\n\terr = client.UseProject(projectName).UpdateProfile(\"default\", api.ProfilePut{\n\t\tDescription: \"Default LXD profile\",\n\t\tConfig: map[string]string{\n\t\t\t\"raw.idmap\": fmt.Sprintf(\"uid %s %s\\ngid %s %s\", pw.Uid, pw.Uid, pw.Gid, pw.Gid),\n\t\t},\n\t\tDevices: map[string]map[string]string{\n\t\t\t\"root\": {\n\t\t\t\t\"type\": \"disk\",\n\t\t\t\t\"path\": \"\/\",\n\t\t\t\t\"pool\": \"default\",\n\t\t\t},\n\t\t\t\"eth0\": {\n\t\t\t\t\"type\": \"nic\",\n\t\t\t\t\"name\": \"eth0\",\n\t\t\t\t\"network\": \"lxdbr0\",\n\t\t\t},\n\t\t},\n\t}, \"\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to update the default profile: %w\", err)\n\t}\n\n\trevert.Success()\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package core\n\nimport (\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"fmt\"\n\n\tds \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/datastore.go\"\n\tb58 \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-base58\"\n\tma \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-multiaddr\"\n\t\"github.com\/jbenet\/go-ipfs\/bitswap\"\n\tbserv \"github.com\/jbenet\/go-ipfs\/blockservice\"\n\tconfig \"github.com\/jbenet\/go-ipfs\/config\"\n\tci \"github.com\/jbenet\/go-ipfs\/crypto\"\n\tmerkledag \"github.com\/jbenet\/go-ipfs\/merkledag\"\n\tpath \"github.com\/jbenet\/go-ipfs\/path\"\n\tpeer \"github.com\/jbenet\/go-ipfs\/peer\"\n\trouting \"github.com\/jbenet\/go-ipfs\/routing\"\n\tdht \"github.com\/jbenet\/go-ipfs\/routing\/dht\"\n\tswarm \"github.com\/jbenet\/go-ipfs\/swarm\"\n\tu \"github.com\/jbenet\/go-ipfs\/util\"\n)\n\n\/\/ IpfsNode is IPFS Core module. It represents an IPFS instance.\ntype IpfsNode struct {\n\n\t\/\/ the node's configuration\n\tConfig *config.Config\n\n\t\/\/ the local node's identity\n\tIdentity *peer.Peer\n\n\t\/\/ the map of other nodes (Peer instances)\n\tPeerMap *peer.Map\n\n\t\/\/ the local datastore\n\tDatastore ds.Datastore\n\n\t\/\/ the network message stream\n\tSwarm *swarm.Swarm\n\n\t\/\/ the routing system. recommend ipfs-dht\n\tRouting routing.IpfsRouting\n\n\t\/\/ the block exchange + strategy (bitswap)\n\tBitSwap *bitswap.BitSwap\n\n\t\/\/ the block service, get\/add blocks.\n\tBlocks *bserv.BlockService\n\n\t\/\/ the merkle dag service, get\/add objects.\n\tDAG *merkledag.DAGService\n\n\t\/\/ the path resolution system\n\tResolver *path.Resolver\n\n\t\/\/ the name system, resolves paths to hashes\n\t\/\/ Namesys *namesys.Namesys\n}\n\n\/\/ NewIpfsNode constructs a new IpfsNode based on the given config.\nfunc NewIpfsNode(cfg *config.Config, online bool) (*IpfsNode, error) {\n\tif cfg == nil {\n\t\treturn nil, fmt.Errorf(\"configuration required\")\n\t}\n\n\td, err := makeDatastore(cfg.Datastore)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocal, err := initIdentity(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar (\n\t\tnet *swarm.Swarm\n\t\t\/\/ TODO: refactor so we can use IpfsRouting interface instead of being DHT-specific\n\t\troute* dht.IpfsDHT\n\t\tswap *bitswap.BitSwap\n\t)\n\n\tif online {\n\t\tnet = swarm.NewSwarm(local)\n\t\terr = net.Listen()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\troute = dht.NewDHT(local, net, d)\n\t\troute.Start()\n\n\t\tswap = bitswap.NewBitSwap(local, net, d, route)\n\t\tswap.SetStrategy(bitswap.YesManStrategy)\n\n\t\tgo initConnections(cfg, route)\n\t}\n\n\tbs, err := bserv.NewBlockService(d, swap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdag := &merkledag.DAGService{Blocks: bs}\n\n\treturn &IpfsNode{\n\t\tConfig: cfg,\n\t\tPeerMap: &peer.Map{},\n\t\tDatastore: d,\n\t\tBlocks: bs,\n\t\tDAG: dag,\n\t\tResolver: &path.Resolver{DAG: dag},\n\t\tBitSwap: swap,\n\t\tIdentity: local,\n\t\tRouting: route,\n\t}, nil\n}\n\nfunc initIdentity(cfg *config.Config) (*peer.Peer, error) {\n\tif len(cfg.Identity.PeerID) == 0 {\n\t\treturn nil, errors.New(\"No peer ID in config! (was ipfs init run?)\")\n\t}\n\n\t\/\/ address is optional\n\tvar addresses []*ma.Multiaddr\n\tif len(cfg.Identity.Address) > 0 {\n\t\tmaddr, err := ma.NewMultiaddr(cfg.Identity.Address)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\taddresses = []*ma.Multiaddr{ maddr }\n\t}\n\n\tskb, err := base64.StdEncoding.DecodeString(cfg.Identity.PrivKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsk, err := ci.UnmarshalPrivateKey(skb)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &peer.Peer{\n\t\tID: peer.ID(b58.Decode(cfg.Identity.PeerID)),\n\t\tAddresses: addresses,\n\t\tPrivKey: sk,\n\t\tPubKey: sk.GetPublic(),\n\t}, nil\n}\n\nfunc initConnections(cfg *config.Config, route *dht.IpfsDHT) {\n\tfor _, p := range cfg.Peers {\n\t\tmaddr, err := ma.NewMultiaddr(p.Address)\n\t\tif err != nil {\n\t\t\tu.PErr(\"error: %v\\n\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err = route.Connect(maddr)\n\t\tif err != nil {\n\t\t\tu.PErr(\"Bootstrapping error: %v\\n\", err)\n\t\t}\n\t}\n}\n\nfunc (n *IpfsNode) PinDagNode(nd *merkledag.Node) error {\n\tu.POut(\"Pinning node. Currently No-Op\\n\")\n\treturn nil\n}\n<commit_msg>core: Show error when config identity is not set<commit_after>package core\n\nimport (\n\t\"encoding\/base64\"\n\t\"errors\"\n\t\"fmt\"\n\n\tds \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/datastore.go\"\n\tb58 \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-base58\"\n\tma \"github.com\/jbenet\/go-ipfs\/Godeps\/_workspace\/src\/github.com\/jbenet\/go-multiaddr\"\n\t\"github.com\/jbenet\/go-ipfs\/bitswap\"\n\tbserv \"github.com\/jbenet\/go-ipfs\/blockservice\"\n\tconfig \"github.com\/jbenet\/go-ipfs\/config\"\n\tci \"github.com\/jbenet\/go-ipfs\/crypto\"\n\tmerkledag \"github.com\/jbenet\/go-ipfs\/merkledag\"\n\tpath \"github.com\/jbenet\/go-ipfs\/path\"\n\tpeer \"github.com\/jbenet\/go-ipfs\/peer\"\n\trouting \"github.com\/jbenet\/go-ipfs\/routing\"\n\tdht \"github.com\/jbenet\/go-ipfs\/routing\/dht\"\n\tswarm \"github.com\/jbenet\/go-ipfs\/swarm\"\n\tu \"github.com\/jbenet\/go-ipfs\/util\"\n)\n\n\/\/ IpfsNode is IPFS Core module. It represents an IPFS instance.\ntype IpfsNode struct {\n\n\t\/\/ the node's configuration\n\tConfig *config.Config\n\n\t\/\/ the local node's identity\n\tIdentity *peer.Peer\n\n\t\/\/ the map of other nodes (Peer instances)\n\tPeerMap *peer.Map\n\n\t\/\/ the local datastore\n\tDatastore ds.Datastore\n\n\t\/\/ the network message stream\n\tSwarm *swarm.Swarm\n\n\t\/\/ the routing system. recommend ipfs-dht\n\tRouting routing.IpfsRouting\n\n\t\/\/ the block exchange + strategy (bitswap)\n\tBitSwap *bitswap.BitSwap\n\n\t\/\/ the block service, get\/add blocks.\n\tBlocks *bserv.BlockService\n\n\t\/\/ the merkle dag service, get\/add objects.\n\tDAG *merkledag.DAGService\n\n\t\/\/ the path resolution system\n\tResolver *path.Resolver\n\n\t\/\/ the name system, resolves paths to hashes\n\t\/\/ Namesys *namesys.Namesys\n}\n\n\/\/ NewIpfsNode constructs a new IpfsNode based on the given config.\nfunc NewIpfsNode(cfg *config.Config, online bool) (*IpfsNode, error) {\n\tif cfg == nil {\n\t\treturn nil, fmt.Errorf(\"configuration required\")\n\t}\n\n\td, err := makeDatastore(cfg.Datastore)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlocal, err := initIdentity(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar (\n\t\tnet *swarm.Swarm\n\t\t\/\/ TODO: refactor so we can use IpfsRouting interface instead of being DHT-specific\n\t\troute* dht.IpfsDHT\n\t\tswap *bitswap.BitSwap\n\t)\n\n\tif online {\n\t\tnet = swarm.NewSwarm(local)\n\t\terr = net.Listen()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\troute = dht.NewDHT(local, net, d)\n\t\troute.Start()\n\n\t\tswap = bitswap.NewBitSwap(local, net, d, route)\n\t\tswap.SetStrategy(bitswap.YesManStrategy)\n\n\t\tgo initConnections(cfg, route)\n\t}\n\n\tbs, err := bserv.NewBlockService(d, swap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdag := &merkledag.DAGService{Blocks: bs}\n\n\treturn &IpfsNode{\n\t\tConfig: cfg,\n\t\tPeerMap: &peer.Map{},\n\t\tDatastore: d,\n\t\tBlocks: bs,\n\t\tDAG: dag,\n\t\tResolver: &path.Resolver{DAG: dag},\n\t\tBitSwap: swap,\n\t\tIdentity: local,\n\t\tRouting: route,\n\t}, nil\n}\n\nfunc initIdentity(cfg *config.Config) (*peer.Peer, error) {\n\tif cfg.Identity == nil {\n\t\treturn nil, errors.New(\"Identity was not set in config (was ipfs init run?)\")\n\t}\n\n\tif len(cfg.Identity.PeerID) == 0 {\n\t\treturn nil, errors.New(\"No peer ID in config! (was ipfs init run?)\")\n\t}\n\n\t\/\/ address is optional\n\tvar addresses []*ma.Multiaddr\n\tif len(cfg.Identity.Address) > 0 {\n\t\tmaddr, err := ma.NewMultiaddr(cfg.Identity.Address)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\taddresses = []*ma.Multiaddr{ maddr }\n\t}\n\n\tskb, err := base64.StdEncoding.DecodeString(cfg.Identity.PrivKey)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsk, err := ci.UnmarshalPrivateKey(skb)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &peer.Peer{\n\t\tID: peer.ID(b58.Decode(cfg.Identity.PeerID)),\n\t\tAddresses: addresses,\n\t\tPrivKey: sk,\n\t\tPubKey: sk.GetPublic(),\n\t}, nil\n}\n\nfunc initConnections(cfg *config.Config, route *dht.IpfsDHT) {\n\tfor _, p := range cfg.Peers {\n\t\tmaddr, err := ma.NewMultiaddr(p.Address)\n\t\tif err != nil {\n\t\t\tu.PErr(\"error: %v\\n\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err = route.Connect(maddr)\n\t\tif err != nil {\n\t\t\tu.PErr(\"Bootstrapping error: %v\\n\", err)\n\t\t}\n\t}\n}\n\nfunc (n *IpfsNode) PinDagNode(nd *merkledag.Node) error {\n\tu.POut(\"Pinning node. Currently No-Op\\n\")\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package mtr\n\nimport (\n\t\"net\"\n\t\"os\"\n\t\"time\"\n)\n\nfunc Mtr(dest [4]byte, options *TracerouteOptions, c ...chan TracerouteHop) (result TracerouteResult, err error) {\n\tresult.Hops = []TracerouteHop{}\n\tresult.DestAddress = dest\n\tdestAddr := AddressString(dest)\n\tlocalAddr := \"0.0.0.0\"\n\tipAddr := net.IPAddr{IP: net.ParseIP(destAddr)}\n\tpid := os.Getpid() & 0xffff\n\ttimeout := time.Duration(options.TimeoutMs()) * time.Millisecond\n\n\tmtrResults := make([]MtrResult, options.MaxHops()+1)\n\n\tfor snt := 0; snt < options.SntSize(); snt++ {\n\t\tretry := 0\n\t\tfor ttl := 1; ttl < options.MaxHops(); ttl++ {\n\t\t\thop := TracerouteHop{TTL: ttl, Snt: snt}\n\t\t\tif mtrResults[ttl].TTL == 0 {\n\t\t\t\tmtrResults[ttl] = MtrResult{TTL: ttl, Host: \"???\", SuccSum: 0, Success: false, LastTime: time.Duration(0), AllTime: time.Duration(0), BestTime: time.Duration(0), WrstTime: time.Duration(0), AvgTime: time.Duration(0)}\n\t\t\t}\n\t\t\thopReturn, err := Icmp(localAddr, &ipAddr, ttl, pid, timeout)\n\t\t\tif err != nil || !hopReturn.Success {\n\t\t\t\t\/\/mtrResults[ttl].Success = false\n\t\t\t\thop.Loss = (float32)(snt+1-mtrResults[ttl].SuccSum) \/ (float32)(snt+1) * 100\n\t\t\t\thop.Address = mtrResults[ttl].Host\n\t\t\t\thop.AvgTime = mtrResults[ttl].AvgTime\n\t\t\t\thop.BestTime = mtrResults[ttl].BestTime\n\t\t\t\thop.Host = mtrResults[ttl].Host\n\t\t\t\thop.LastTime = mtrResults[ttl].LastTime\n\t\t\t\thop.Success = false\n\t\t\t\thop.WrstTime = mtrResults[ttl].WrstTime\n\t\t\t\tnotifyMtr(hop, c)\n\t\t\t\tretry++\n\t\t\t\tif retry >= options.Retries() {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tretry = 0\n\t\t\tmtrResults[ttl].SuccSum = mtrResults[ttl].SuccSum + 1\n\t\t\tmtrResults[ttl].Host = hopReturn.Addr\n\t\t\tmtrResults[ttl].LastTime = hopReturn.Elapsed\n\t\t\tif mtrResults[ttl].WrstTime == time.Duration(0) || hopReturn.Elapsed > mtrResults[ttl].WrstTime {\n\t\t\t\tmtrResults[ttl].WrstTime = hopReturn.Elapsed\n\t\t\t}\n\t\t\tif mtrResults[ttl].BestTime == time.Duration(0) || hopReturn.Elapsed < mtrResults[ttl].BestTime {\n\t\t\t\tmtrResults[ttl].BestTime = hopReturn.Elapsed\n\t\t\t}\n\t\t\tmtrResults[ttl].AllTime += hopReturn.Elapsed\n\t\t\tmtrResults[ttl].AvgTime = time.Duration((int64)(mtrResults[ttl].AllTime\/time.Microsecond)\/(int64)(mtrResults[ttl].SuccSum)) * time.Microsecond\n\n\t\t\thop.Loss = (float32)(snt+1-mtrResults[ttl].SuccSum) \/ (float32)(snt+1) * 100\n\t\t\thop.Address = mtrResults[ttl].Host\n\t\t\thop.AvgTime = mtrResults[ttl].AvgTime\n\t\t\thop.BestTime = mtrResults[ttl].BestTime\n\t\t\thop.Host = mtrResults[ttl].Host\n\t\t\thop.LastTime = mtrResults[ttl].LastTime\n\t\t\thop.Success = true\n\t\t\thop.WrstTime = mtrResults[ttl].WrstTime\n\t\t\tnotifyMtr(hop, c)\n\n\t\t\tif hop.Address == destAddr {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tretry := 0\n\tfor _, mtrResult := range mtrResults {\n\t\tif mtrResult.TTL == 0 {\n\t\t\tretry++\n\t\t\tif retry >= options.Retries() {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tretry = 0\n\t\thop := TracerouteHop{TTL: mtrResult.TTL}\n\t\thop.Address = mtrResult.Host\n\t\thop.Host = mtrResult.Host\n\t\thop.AvgTime = mtrResult.AvgTime\n\t\thop.BestTime = mtrResult.BestTime\n\t\thop.LastTime = mtrResult.LastTime\n\t\tfailSum := options.SntSize() - mtrResult.SuccSum\n\t\tloss := (float32)(failSum) \/ (float32)(options.SntSize()) * 100\n\t\thop.Loss = float32(loss)\n\t\thop.WrstTime = mtrResult.WrstTime\n\t\thop.Success = true\n\n\t\tresult.Hops = append(result.Hops, hop)\n\n\t\tif hop.Host == destAddr {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tcloseNotify(c)\n\n\treturn result, nil\n}\n\nfunc notifyMtr(hop TracerouteHop, channels []chan TracerouteHop) {\n\tfor _, c := range channels {\n\t\tc <- hop\n\t}\n}\n\nfunc closeNotifyMtr(channels []chan TracerouteHop) {\n\tfor _, c := range channels {\n\t\tclose(c)\n\t}\n}\n<commit_msg>0 -> 1<commit_after>package mtr\n\nimport (\n\t\"net\"\n\t\"os\"\n\t\"time\"\n)\n\nfunc Mtr(dest [4]byte, options *TracerouteOptions, c ...chan TracerouteHop) (result TracerouteResult, err error) {\n\tresult.Hops = []TracerouteHop{}\n\tresult.DestAddress = dest\n\tdestAddr := AddressString(dest)\n\tlocalAddr := \"0.0.0.0\"\n\tipAddr := net.IPAddr{IP: net.ParseIP(destAddr)}\n\tpid := os.Getpid() & 0xffff\n\ttimeout := time.Duration(options.TimeoutMs()) * time.Millisecond\n\n\tmtrResults := make([]MtrResult, options.MaxHops()+1)\n\n\tfor snt := 0; snt < options.SntSize(); snt++ {\n\t\tretry := 0\n\t\tfor ttl := 1; ttl < options.MaxHops(); ttl++ {\n\t\t\tsnt_plus := snt + 1\n\t\t\thop := TracerouteHop{TTL: ttl, Snt: snt_plus}\n\t\t\tif mtrResults[ttl].TTL == 0 {\n\t\t\t\tmtrResults[ttl] = MtrResult{TTL: ttl, Host: \"???\", SuccSum: 0, Success: false, LastTime: time.Duration(0), AllTime: time.Duration(0), BestTime: time.Duration(0), WrstTime: time.Duration(0), AvgTime: time.Duration(0)}\n\t\t\t}\n\t\t\thopReturn, err := Icmp(localAddr, &ipAddr, ttl, pid, timeout)\n\t\t\tif err != nil || !hopReturn.Success {\n\t\t\t\t\/\/mtrResults[ttl].Success = false\n\t\t\t\thop.Loss = (float32)(snt+1-mtrResults[ttl].SuccSum) \/ (float32)(snt+1) * 100\n\t\t\t\thop.Address = mtrResults[ttl].Host\n\t\t\t\thop.AvgTime = mtrResults[ttl].AvgTime\n\t\t\t\thop.BestTime = mtrResults[ttl].BestTime\n\t\t\t\thop.Host = mtrResults[ttl].Host\n\t\t\t\thop.LastTime = mtrResults[ttl].LastTime\n\t\t\t\thop.Success = false\n\t\t\t\thop.WrstTime = mtrResults[ttl].WrstTime\n\t\t\t\tnotifyMtr(hop, c)\n\t\t\t\tretry++\n\t\t\t\tif retry >= options.Retries() {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tretry = 0\n\t\t\tmtrResults[ttl].SuccSum = mtrResults[ttl].SuccSum + 1\n\t\t\tmtrResults[ttl].Host = hopReturn.Addr\n\t\t\tmtrResults[ttl].LastTime = hopReturn.Elapsed\n\t\t\tif mtrResults[ttl].WrstTime == time.Duration(0) || hopReturn.Elapsed > mtrResults[ttl].WrstTime {\n\t\t\t\tmtrResults[ttl].WrstTime = hopReturn.Elapsed\n\t\t\t}\n\t\t\tif mtrResults[ttl].BestTime == time.Duration(0) || hopReturn.Elapsed < mtrResults[ttl].BestTime {\n\t\t\t\tmtrResults[ttl].BestTime = hopReturn.Elapsed\n\t\t\t}\n\t\t\tmtrResults[ttl].AllTime += hopReturn.Elapsed\n\t\t\tmtrResults[ttl].AvgTime = time.Duration((int64)(mtrResults[ttl].AllTime\/time.Microsecond)\/(int64)(mtrResults[ttl].SuccSum)) * time.Microsecond\n\n\t\t\thop.Loss = (float32)(snt+1-mtrResults[ttl].SuccSum) \/ (float32)(snt+1) * 100\n\t\t\thop.Address = mtrResults[ttl].Host\n\t\t\thop.AvgTime = mtrResults[ttl].AvgTime\n\t\t\thop.BestTime = mtrResults[ttl].BestTime\n\t\t\thop.Host = mtrResults[ttl].Host\n\t\t\thop.LastTime = mtrResults[ttl].LastTime\n\t\t\thop.Success = true\n\t\t\thop.WrstTime = mtrResults[ttl].WrstTime\n\t\t\tnotifyMtr(hop, c)\n\n\t\t\tif hop.Address == destAddr {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\tretry := 0\n\tfor _, mtrResult := range mtrResults {\n\t\tif mtrResult.TTL == 0 {\n\t\t\tretry++\n\t\t\tif retry >= options.Retries() {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tretry = 0\n\t\thop := TracerouteHop{TTL: mtrResult.TTL}\n\t\thop.Address = mtrResult.Host\n\t\thop.Host = mtrResult.Host\n\t\thop.AvgTime = mtrResult.AvgTime\n\t\thop.BestTime = mtrResult.BestTime\n\t\thop.LastTime = mtrResult.LastTime\n\t\tfailSum := options.SntSize() - mtrResult.SuccSum\n\t\tloss := (float32)(failSum) \/ (float32)(options.SntSize()) * 100\n\t\thop.Loss = float32(loss)\n\t\thop.WrstTime = mtrResult.WrstTime\n\t\thop.Success = true\n\n\t\tresult.Hops = append(result.Hops, hop)\n\n\t\tif hop.Host == destAddr {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tcloseNotify(c)\n\n\treturn result, nil\n}\n\nfunc notifyMtr(hop TracerouteHop, channels []chan TracerouteHop) {\n\tfor _, c := range channels {\n\t\tc <- hop\n\t}\n}\n\nfunc closeNotifyMtr(channels []chan TracerouteHop) {\n\tfor _, c := range channels {\n\t\tclose(c)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (C) 2014 Space Monkey, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage openssl\n\nimport (\n\t\"errors\"\n\t\"net\"\n)\n\ntype listener struct {\n\tnet.Listener\n\tctx *Ctx\n}\n\nfunc (l *listener) Accept() (c net.Conn, err error) {\n\tc, err = l.Listener.Accept()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tssl_c, err := Server(c, l.ctx)\n\tif err != nil {\n\t\tc.Close()\n\t\treturn nil, err\n\t}\n\treturn ssl_c, nil\n}\n\n\/\/ NewListener wraps an existing net.Listener such that all accepted\n\/\/ connections are wrapped as OpenSSL server connections using the provided\n\/\/ context ctx.\nfunc NewListener(inner net.Listener, ctx *Ctx) net.Listener {\n\treturn &listener{\n\t\tListener: inner,\n\t\tctx: ctx}\n}\n\n\/\/ Listen is a wrapper around net.Listen that wraps incoming connections with\n\/\/ an OpenSSL server connection using the provided context ctx.\nfunc Listen(network, laddr string, ctx *Ctx) (net.Listener, error) {\n\tif ctx == nil {\n\t\treturn nil, errors.New(\"no ssl context provided\")\n\t}\n\tl, err := net.Listen(network, laddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewListener(l, ctx), nil\n}\n\ntype DialFlags int\n\nconst (\n\tInsecureSkipHostVerification DialFlags = 1 << iota\n\tDisableSNI\n)\n\n\/\/ Dial will connect to network\/address and then wrap the corresponding\n\/\/ underlying connection with an OpenSSL client connection using context ctx.\n\/\/ If flags includes InsecureSkipHostVerification, the server certificate's\n\/\/ hostname will not be checked to match the hostname in addr. Otherwise, flags\n\/\/ should be 0.\n\/\/\n\/\/ Dial probably won't work for you unless you set a verify location or add\n\/\/ some certs to the certificate store of the client context you're using.\n\/\/ This library is not nice enough to use the system certificate store by\n\/\/ default for you yet.\nfunc Dial(network, addr string, ctx *Ctx, flags DialFlags) (*Conn, error) {\n\thost, _, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ctx == nil {\n\t\tvar err error\n\t\tctx, err = NewCtx()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t\/\/ TODO: use operating system default certificate chain?\n\t}\n\tc, err := net.Dial(network, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconn, err := Client(c, ctx)\n\tif err != nil {\n\t\tc.Close()\n\t\treturn nil, err\n\t}\n\tif flags&DisableSNI == 0 {\n\t\terr = conn.SetTlsExtHostName(host)\n\t\tif err != nil {\n\t\t\tconn.Close()\n\t\t\treturn nil, err\n\t\t}\n\t}\n\terr = conn.Handshake()\n\tif err != nil {\n\t\tconn.Close()\n\t\treturn nil, err\n\t}\n\tif flags&InsecureSkipHostVerification == 0 {\n\t}\n\treturn conn, nil\n}\n<commit_msg>adding wrapper function which allows customized dialer object<commit_after>\/\/ Copyright (C) 2014 Space Monkey, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage openssl\n\nimport (\n\t\"errors\"\n\t\"net\"\n)\n\ntype listener struct {\n\tnet.Listener\n\tctx *Ctx\n}\n\nfunc (l *listener) Accept() (c net.Conn, err error) {\n\tc, err = l.Listener.Accept()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tssl_c, err := Server(c, l.ctx)\n\tif err != nil {\n\t\tc.Close()\n\t\treturn nil, err\n\t}\n\treturn ssl_c, nil\n}\n\n\/\/ NewListener wraps an existing net.Listener such that all accepted\n\/\/ connections are wrapped as OpenSSL server connections using the provided\n\/\/ context ctx.\nfunc NewListener(inner net.Listener, ctx *Ctx) net.Listener {\n\treturn &listener{\n\t\tListener: inner,\n\t\tctx: ctx}\n}\n\n\/\/ Listen is a wrapper around net.Listen that wraps incoming connections with\n\/\/ an OpenSSL server connection using the provided context ctx.\nfunc Listen(network, laddr string, ctx *Ctx) (net.Listener, error) {\n\tif ctx == nil {\n\t\treturn nil, errors.New(\"no ssl context provided\")\n\t}\n\tl, err := net.Listen(network, laddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn NewListener(l, ctx), nil\n}\n\ntype DialFlags int\n\nconst (\n\tInsecureSkipHostVerification DialFlags = 1 << iota\n\tDisableSNI\n)\n\n\/\/ Dial will connect to network\/address and then wrap the corresponding\n\/\/ underlying connection with an OpenSSL client connection using context ctx.\n\/\/ If flags includes InsecureSkipHostVerification, the server certificate's\n\/\/ hostname will not be checked to match the hostname in addr. Otherwise, flags\n\/\/ should be 0.\n\/\/\n\/\/ Dial probably won't work for you unless you set a verify location or add\n\/\/ some certs to the certificate store of the client context you're using.\n\/\/ This library is not nice enough to use the system certificate store by\n\/\/ default for you yet.\nfunc Dial(network, addr string, ctx *Ctx, flags DialFlags) (*Conn, error) {\n\treturn DialWithDialer(network, addr, ctx, flags, &net.Dialer{})\n}\n\n\/\/ DialWithDialer is a wrapper function which allows an additional parameter for a customized dialer objects\nfunc DialWithDialer(network, addr string, ctx *Ctx, flags DialFlags, dialer *net.Dialer) (*Conn, error) {\n\tif dialer == nil {\n\t\tdialer = &net.Dialer{}\n\t}\n\thost, _, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif ctx == nil {\n\t\tvar err error\n\t\tctx, err = NewCtx()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t\/\/ TODO: use operating system default certificate chain?\n\t}\n\tc, err := dialer.Dial(network, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tconn, err := Client(c, ctx)\n\tif err != nil {\n\t\tc.Close()\n\t\treturn nil, err\n\t}\n\tif flags&DisableSNI == 0 {\n\t\terr = conn.SetTlsExtHostName(host)\n\t\tif err != nil {\n\t\t\tconn.Close()\n\t\t\treturn nil, err\n\t\t}\n\t}\n\terr = conn.Handshake()\n\tif err != nil {\n\t\tconn.Close()\n\t\treturn nil, err\n\t}\n\tif flags&InsecureSkipHostVerification == 0 {\n\t}\n\treturn conn, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package nsf\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"time\"\n\n\t\"github.com\/mjibson\/mog\/codec\/nsf\/cpu6502\"\n)\n\nconst (\n\t\/\/ 1.79 MHz\n\tcpuClock = 236250000 \/ 11 \/ 12\n)\n\nvar (\n\t\/\/ DefaultSampleRate is the default sample rate of a track after calling\n\t\/\/ Init().\n\tDefaultSampleRate = 44100\n\tErrUnrecognized = errors.New(\"nsf: unrecognized format\")\n)\n\nconst (\n\tNSF_HEADER_LEN = 0x80\n\tNSF_VERSION = 0x5\n\tNSF_SONGS = 0x6\n\tNSF_START = 0x7\n\tNSF_LOAD = 0x8\n\tNSF_INIT = 0xa\n\tNSF_PLAY = 0xc\n\tNSF_SONG = 0xe\n\tNSF_ARTIST = 0x2e\n\tNSF_COPYRIGHT = 0x4e\n\tNSF_SPEED_NTSC = 0x6e\n\tNSF_BANKSWITCH = 0x70\n\tNSF_SPEED_PAL = 0x78\n\tNSF_PAL_NTSC = 0x7a\n\tNSF_EXTRA = 0x7b\n\tNSF_ZERO = 0x7c\n)\n\nfunc ReadNSF(r io.Reader) (n *NSF, err error) {\n\tn = &NSF{}\n\tn.b, err = ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(n.b) < NSF_HEADER_LEN ||\n\t\tstring(n.b[0:NSF_VERSION]) != \"NESM\\u001a\" {\n\t\treturn nil, ErrUnrecognized\n\t}\n\tn.Version = n.b[NSF_VERSION]\n\tn.Songs = n.b[NSF_SONGS]\n\tn.Start = n.b[NSF_START]\n\tn.LoadAddr = bLEtoUint16(n.b[NSF_LOAD:])\n\tn.InitAddr = bLEtoUint16(n.b[NSF_INIT:])\n\tn.PlayAddr = bLEtoUint16(n.b[NSF_PLAY:])\n\tn.Song = bToString(n.b[NSF_SONG:])\n\tn.Artist = bToString(n.b[NSF_ARTIST:])\n\tn.Copyright = bToString(n.b[NSF_COPYRIGHT:])\n\tn.SpeedNTSC = bLEtoUint16(n.b[NSF_SPEED_NTSC:])\n\tcopy(n.Bankswitch[:], n.b[NSF_BANKSWITCH:NSF_SPEED_PAL])\n\tn.SpeedPAL = bLEtoUint16(n.b[NSF_SPEED_PAL:])\n\tn.PALNTSC = n.b[NSF_PAL_NTSC]\n\tn.Extra = n.b[NSF_EXTRA]\n\tn.Data = n.b[NSF_HEADER_LEN:]\n\treturn\n}\n\ntype NSF struct {\n\t*Ram\n\t*cpu6502.Cpu\n\n\tb []byte \/\/ raw NSF data\n\n\tVersion byte\n\tSongs byte\n\tStart byte\n\n\tLoadAddr uint16\n\tInitAddr uint16\n\tPlayAddr uint16\n\n\tSong string\n\tArtist string\n\tCopyright string\n\n\tSpeedNTSC uint16\n\tBankswitch [8]byte\n\tSpeedPAL uint16\n\tPALNTSC byte\n\tExtra byte\n\tData []byte\n\n\t\/\/ SampleRate is the sample rate at which samples will be generated. If not\n\t\/\/ set before Init(), it is set to DefaultSampleRate.\n\tSampleRate int64\n\ttotalTicks int64\n\tframeTicks int64\n\tsampleTicks int64\n\tplayTicks int64\n\tsamples []float32\n\tprevs [4]float32\n\tpi int \/\/ prevs index\n}\n\nfunc New() *NSF {\n\tn := NSF{\n\t\tRam: new(Ram),\n\t}\n\tn.Cpu = cpu6502.New(n.Ram)\n\tn.Cpu.DisableDecimal = true\n\tn.Cpu.P = 0x24\n\tn.Cpu.S = 0xfd\n\treturn &n\n}\n\nfunc (n *NSF) Tick() {\n\tn.Ram.A.Step()\n\tn.totalTicks++\n\tn.frameTicks++\n\tif n.frameTicks == cpuClock\/240 {\n\t\tn.frameTicks = 0\n\t\tn.Ram.A.FrameStep()\n\t}\n\tn.sampleTicks++\n\tif n.sampleTicks >= cpuClock\/n.SampleRate {\n\t\tn.sampleTicks = 0\n\t\tn.append(n.Ram.A.Volume())\n\t}\n\tn.playTicks++\n}\n\nfunc (n *NSF) append(v float32) {\n\tn.prevs[n.pi] = v\n\tn.pi++\n\tif n.pi >= len(n.prevs) {\n\t\tn.pi = 0\n\t}\n\tvar sum float32\n\tfor _, s := range n.prevs {\n\t\tsum += s\n\t}\n\tsum \/= float32(len(n.prevs))\n\tn.samples = append(n.samples, sum)\n}\n\nfunc (n *NSF) Init(song byte) {\n\tif n.SampleRate == 0 {\n\t\tn.SampleRate = int64(DefaultSampleRate)\n\t}\n\tcopy(n.Ram.M[n.LoadAddr:], n.Data)\n\tn.Ram.A.Init()\n\tn.Cpu.A = song - 1\n\tn.Cpu.PC = n.InitAddr\n\tn.Cpu.T = nil\n\tn.Cpu.Run()\n\tn.Cpu.T = n\n}\n\nfunc (n *NSF) Play(samples int) []float32 {\n\tplayDur := time.Duration(n.SpeedNTSC) * time.Nanosecond * 1000\n\tticksPerPlay := int64(playDur \/ (time.Second \/ cpuClock))\n\tn.samples = make([]float32, 0, samples)\n\tfor len(n.samples) < samples {\n\t\tn.playTicks = 0\n\t\tn.Cpu.PC = n.PlayAddr\n\t\tn.Cpu.Halt = false\n\t\tfor !n.Cpu.Halt && len(n.samples) < samples {\n\t\t\tn.Cpu.Step()\n\t\t\tif !n.Cpu.I() {\n\t\t\t\tpanic(\"INTERRUPT\")\n\t\t\t}\n\t\t}\n\t\tfor i := ticksPerPlay - n.playTicks; i > 0 && len(n.samples) < samples; i-- {\n\t\t\tn.Tick()\n\t\t}\n\t}\n\treturn n.samples\n}\n\n\/\/ little-endian [2]byte to uint16 conversion\nfunc bLEtoUint16(b []byte) uint16 {\n\treturn uint16(b[1])<<8 + uint16(b[0])\n}\n\n\/\/ null-terminated bytes to string\nfunc bToString(b []byte) string {\n\ti := 0\n\tfor i = range b {\n\t\tif b[i] == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn string(b[:i])\n}\n\ntype Ram struct {\n\tM [0xffff + 1]byte\n\tA Apu\n}\n\nfunc (r *Ram) Read(v uint16) byte {\n\tswitch v {\n\tcase 0x4015:\n\t\treturn r.A.Read(v)\n\tdefault:\n\t\treturn r.M[v]\n\t}\n}\n\nfunc (r *Ram) Write(v uint16, b byte) {\n\tr.M[v] = b\n\tif v&0xf000 == 0x4000 {\n\t\tr.A.Write(v, b)\n\t}\n}\n<commit_msg>Use a standard Step function, useful for tests<commit_after>package nsf\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"time\"\n\n\t\"github.com\/mjibson\/mog\/codec\/nsf\/cpu6502\"\n)\n\nconst (\n\t\/\/ 1.79 MHz\n\tcpuClock = 236250000 \/ 11 \/ 12\n)\n\nvar (\n\t\/\/ DefaultSampleRate is the default sample rate of a track after calling\n\t\/\/ Init().\n\tDefaultSampleRate = 44100\n\tErrUnrecognized = errors.New(\"nsf: unrecognized format\")\n)\n\nconst (\n\tNSF_HEADER_LEN = 0x80\n\tNSF_VERSION = 0x5\n\tNSF_SONGS = 0x6\n\tNSF_START = 0x7\n\tNSF_LOAD = 0x8\n\tNSF_INIT = 0xa\n\tNSF_PLAY = 0xc\n\tNSF_SONG = 0xe\n\tNSF_ARTIST = 0x2e\n\tNSF_COPYRIGHT = 0x4e\n\tNSF_SPEED_NTSC = 0x6e\n\tNSF_BANKSWITCH = 0x70\n\tNSF_SPEED_PAL = 0x78\n\tNSF_PAL_NTSC = 0x7a\n\tNSF_EXTRA = 0x7b\n\tNSF_ZERO = 0x7c\n)\n\nfunc ReadNSF(r io.Reader) (n *NSF, err error) {\n\tn = &NSF{}\n\tn.b, err = ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(n.b) < NSF_HEADER_LEN ||\n\t\tstring(n.b[0:NSF_VERSION]) != \"NESM\\u001a\" {\n\t\treturn nil, ErrUnrecognized\n\t}\n\tn.Version = n.b[NSF_VERSION]\n\tn.Songs = n.b[NSF_SONGS]\n\tn.Start = n.b[NSF_START]\n\tn.LoadAddr = bLEtoUint16(n.b[NSF_LOAD:])\n\tn.InitAddr = bLEtoUint16(n.b[NSF_INIT:])\n\tn.PlayAddr = bLEtoUint16(n.b[NSF_PLAY:])\n\tn.Song = bToString(n.b[NSF_SONG:])\n\tn.Artist = bToString(n.b[NSF_ARTIST:])\n\tn.Copyright = bToString(n.b[NSF_COPYRIGHT:])\n\tn.SpeedNTSC = bLEtoUint16(n.b[NSF_SPEED_NTSC:])\n\tcopy(n.Bankswitch[:], n.b[NSF_BANKSWITCH:NSF_SPEED_PAL])\n\tn.SpeedPAL = bLEtoUint16(n.b[NSF_SPEED_PAL:])\n\tn.PALNTSC = n.b[NSF_PAL_NTSC]\n\tn.Extra = n.b[NSF_EXTRA]\n\tn.Data = n.b[NSF_HEADER_LEN:]\n\treturn\n}\n\ntype NSF struct {\n\t*Ram\n\t*cpu6502.Cpu\n\n\tb []byte \/\/ raw NSF data\n\n\tVersion byte\n\tSongs byte\n\tStart byte\n\n\tLoadAddr uint16\n\tInitAddr uint16\n\tPlayAddr uint16\n\n\tSong string\n\tArtist string\n\tCopyright string\n\n\tSpeedNTSC uint16\n\tBankswitch [8]byte\n\tSpeedPAL uint16\n\tPALNTSC byte\n\tExtra byte\n\tData []byte\n\n\t\/\/ SampleRate is the sample rate at which samples will be generated. If not\n\t\/\/ set before Init(), it is set to DefaultSampleRate.\n\tSampleRate int64\n\ttotalTicks int64\n\tframeTicks int64\n\tsampleTicks int64\n\tplayTicks int64\n\tsamples []float32\n\tprevs [4]float32\n\tpi int \/\/ prevs index\n}\n\nfunc New() *NSF {\n\tn := NSF{\n\t\tRam: new(Ram),\n\t}\n\tn.Cpu = cpu6502.New(n.Ram)\n\tn.Cpu.DisableDecimal = true\n\tn.Cpu.P = 0x24\n\tn.Cpu.S = 0xfd\n\treturn &n\n}\n\nfunc (n *NSF) Tick() {\n\tn.Ram.A.Step()\n\tn.totalTicks++\n\tn.frameTicks++\n\tif n.frameTicks == cpuClock\/240 {\n\t\tn.frameTicks = 0\n\t\tn.Ram.A.FrameStep()\n\t}\n\tn.sampleTicks++\n\tif n.sampleTicks >= cpuClock\/n.SampleRate {\n\t\tn.sampleTicks = 0\n\t\tn.append(n.Ram.A.Volume())\n\t}\n\tn.playTicks++\n}\n\nfunc (n *NSF) append(v float32) {\n\tn.prevs[n.pi] = v\n\tn.pi++\n\tif n.pi >= len(n.prevs) {\n\t\tn.pi = 0\n\t}\n\tvar sum float32\n\tfor _, s := range n.prevs {\n\t\tsum += s\n\t}\n\tsum \/= float32(len(n.prevs))\n\tn.samples = append(n.samples, sum)\n}\n\nfunc (n *NSF) Init(song byte) {\n\tif n.SampleRate == 0 {\n\t\tn.SampleRate = int64(DefaultSampleRate)\n\t}\n\tcopy(n.Ram.M[n.LoadAddr:], n.Data)\n\tn.Ram.A.Init()\n\tn.Cpu.A = song - 1\n\tn.Cpu.PC = n.InitAddr\n\tn.Cpu.T = nil\n\tn.Cpu.Run()\n\tn.Cpu.T = n\n}\n\nfunc (n *NSF) Step() {\n\tn.Cpu.Step()\n\tif !n.Cpu.I() && n.Ram.A.Interrupt {\n\t\tprintln(\"INTERRUPT\")\n\t\tn.Cpu.Interrupt()\n\t}\n}\n\nfunc (n *NSF) Play(samples int) []float32 {\n\tplayDur := time.Duration(n.SpeedNTSC) * time.Nanosecond * 1000\n\tticksPerPlay := int64(playDur \/ (time.Second \/ cpuClock))\n\tn.samples = make([]float32, 0, samples)\n\tfor len(n.samples) < samples {\n\t\tn.playTicks = 0\n\t\tn.Cpu.PC = n.PlayAddr\n\t\tn.Cpu.Halt = false\n\t\tfor !n.Cpu.Halt && len(n.samples) < samples {\n\t\t\tn.Step()\n\t\t}\n\t\tfor i := ticksPerPlay - n.playTicks; i > 0 && len(n.samples) < samples; i-- {\n\t\t\tn.Tick()\n\t\t}\n\t}\n\treturn n.samples\n}\n\n\/\/ little-endian [2]byte to uint16 conversion\nfunc bLEtoUint16(b []byte) uint16 {\n\treturn uint16(b[1])<<8 + uint16(b[0])\n}\n\n\/\/ null-terminated bytes to string\nfunc bToString(b []byte) string {\n\ti := 0\n\tfor i = range b {\n\t\tif b[i] == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn string(b[:i])\n}\n\ntype Ram struct {\n\tM [0xffff + 1]byte\n\tA Apu\n}\n\nfunc (r *Ram) Read(v uint16) byte {\n\tswitch v {\n\tcase 0x4015:\n\t\treturn r.A.Read(v)\n\tdefault:\n\t\treturn r.M[v]\n\t}\n}\n\nfunc (r *Ram) Write(v uint16, b byte) {\n\tr.M[v] = b\n\tif v&0xf000 == 0x4000 {\n\t\tr.A.Write(v, b)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package nsf\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"time\"\n\n\t\"github.com\/mjibson\/mog\/codec\/nsf\/cpu6502\"\n)\n\nconst (\n\t\/\/ 1.79 MHz\n\tcpuClock = 236250000 \/ 11 \/ 12\n\tDefaultSampleRate = 44100\n)\n\nvar (\n\tErrUnrecognized = errors.New(\"nsf: unrecognized format\")\n)\n\nconst (\n\tNSF_HEADER_LEN = 0x80\n\tNSF_VERSION = 0x5\n\tNSF_SONGS = 0x6\n\tNSF_START = 0x7\n\tNSF_LOAD = 0x8\n\tNSF_INIT = 0xa\n\tNSF_PLAY = 0xc\n\tNSF_SONG = 0xe\n\tNSF_ARTIST = 0x2e\n\tNSF_COPYRIGHT = 0x4e\n\tNSF_SPEED_NTSC = 0x6e\n\tNSF_BANKSWITCH = 0x70\n\tNSF_SPEED_PAL = 0x78\n\tNSF_PAL_NTSC = 0x7a\n\tNSF_EXTRA = 0x7b\n\tNSF_ZERO = 0x7c\n)\n\nfunc ReadNSF(r io.Reader) (n *NSF, err error) {\n\tn = &NSF{}\n\tn.b, err = ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(n.b) < NSF_HEADER_LEN ||\n\t\tstring(n.b[0:NSF_VERSION]) != \"NESM\\u001a\" {\n\t\treturn nil, ErrUnrecognized\n\t}\n\tn.Version = n.b[NSF_VERSION]\n\tn.Songs = n.b[NSF_SONGS]\n\tn.Start = n.b[NSF_START]\n\tn.LoadAddr = bLEtoUint16(n.b[NSF_LOAD:])\n\tn.InitAddr = bLEtoUint16(n.b[NSF_INIT:])\n\tn.PlayAddr = bLEtoUint16(n.b[NSF_PLAY:])\n\tn.Song = bToString(n.b[NSF_SONG:])\n\tn.Artist = bToString(n.b[NSF_ARTIST:])\n\tn.Copyright = bToString(n.b[NSF_COPYRIGHT:])\n\tn.SpeedNTSC = bLEtoUint16(n.b[NSF_SPEED_NTSC:])\n\tcopy(n.Bankswitch[:], n.b[NSF_BANKSWITCH:NSF_SPEED_PAL])\n\tn.SpeedPAL = bLEtoUint16(n.b[NSF_SPEED_PAL:])\n\tn.PALNTSC = n.b[NSF_PAL_NTSC]\n\tn.Extra = n.b[NSF_EXTRA]\n\tn.Data = n.b[NSF_HEADER_LEN:]\n\treturn\n}\n\ntype NSF struct {\n\t*Ram\n\t*cpu6502.Cpu\n\n\tb []byte \/\/ raw NSF data\n\n\tVersion byte\n\tSongs byte\n\tStart byte\n\n\tLoadAddr uint16\n\tInitAddr uint16\n\tPlayAddr uint16\n\n\tSong string\n\tArtist string\n\tCopyright string\n\n\tSpeedNTSC uint16\n\tBankswitch [8]byte\n\tSpeedPAL uint16\n\tPALNTSC byte\n\tExtra byte\n\tData []byte\n\n\tSampleRate int64\n\ttotalTicks int64\n\tframeTicks int64\n\tsampleTicks int64\n\tplayTicks int64\n\tsamples []float32\n\tprevs [4]float32\n\tpi int \/\/ prevs index\n}\n\nfunc (n *NSF) Tick() {\n\tn.Ram.A.Step()\n\tn.totalTicks++\n\tn.frameTicks++\n\tif n.frameTicks == cpuClock\/240 {\n\t\tn.frameTicks = 0\n\t\tn.Ram.A.FrameStep()\n\t}\n\tn.sampleTicks++\n\tif n.sampleTicks >= cpuClock\/n.SampleRate {\n\t\tn.sampleTicks = 0\n\t\tn.append(n.Ram.A.Volume())\n\t}\n\tn.playTicks++\n}\n\nfunc (n *NSF) append(v float32) {\n\tn.prevs[n.pi] = v\n\tn.pi++\n\tif n.pi >= len(n.prevs) {\n\t\tn.pi = 0\n\t}\n\tvar sum float32\n\tfor _, s := range n.prevs {\n\t\tsum += s\n\t}\n\tsum \/= float32(len(n.prevs))\n\tn.samples = append(n.samples, sum)\n}\n\nfunc (n *NSF) Init(song byte) {\n\tif n.SampleRate == 0 {\n\t\tn.SampleRate = DefaultSampleRate\n\t}\n\tn.Ram = new(Ram)\n\tn.Cpu = cpu6502.New(n.Ram)\n\tcopy(n.Ram.M[n.LoadAddr:], n.Data)\n\tn.Ram.A.Init()\n\tn.Cpu.A = song - 1\n\tn.Cpu.PC = n.InitAddr\n\tn.Cpu.T = nil\n\tn.Cpu.Run()\n\tn.Cpu.T = n\n}\n\nfunc (n *NSF) Play(samples int) []float32 {\n\tplayDur := time.Duration(n.SpeedNTSC) * time.Nanosecond * 1000\n\tticksPerPlay := int64(playDur \/ (time.Second \/ cpuClock))\n\tn.samples = make([]float32, 0, samples)\n\tfor len(n.samples) < samples {\n\t\tn.playTicks = 0\n\t\tn.Cpu.PC = n.PlayAddr\n\t\tn.Cpu.Halt = false\n\t\tfor !n.Cpu.Halt && len(n.samples) < samples {\n\t\t\tn.Cpu.Step()\n\t\t\tif !n.Cpu.I() {\n\t\t\t\tpanic(\"INTERRUPT\")\n\t\t\t}\n\t\t}\n\t\tfor i := ticksPerPlay - n.playTicks; i > 0 && len(n.samples) < samples; i-- {\n\t\t\tn.Tick()\n\t\t}\n\t}\n\treturn n.samples\n}\n\n\/\/ little-endian [2]byte to uint16 conversion\nfunc bLEtoUint16(b []byte) uint16 {\n\treturn uint16(b[1])<<8 + uint16(b[0])\n}\n\n\/\/ null-terminated bytes to string\nfunc bToString(b []byte) string {\n\ti := 0\n\tfor i = range b {\n\t\tif b[i] == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn string(b[:i])\n}\n\ntype Ram struct {\n\tM [0xffff + 1]byte\n\tA Apu\n}\n\nfunc (r *Ram) Read(v uint16) byte {\n\tif v == 0x4015 {\n\t\treturn r.A.Read(v)\n\t} else {\n\t\treturn r.M[v]\n\t}\n}\n\nfunc (r *Ram) Write(v uint16, b byte) {\n\tr.M[v] = b\n\tif v&0xf000 == 0x4000 {\n\t\tr.A.Write(v, b)\n\t}\n}\n<commit_msg>Document sample rates<commit_after>package nsf\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"time\"\n\n\t\"github.com\/mjibson\/mog\/codec\/nsf\/cpu6502\"\n)\n\nconst (\n\t\/\/ 1.79 MHz\n\tcpuClock = 236250000 \/ 11 \/ 12\n)\n\nvar (\n\t\/\/ DefaultSampleRate is the default sample rate of a track after calling\n\t\/\/ Init().\n\tDefaultSampleRate = 44100\n\tErrUnrecognized = errors.New(\"nsf: unrecognized format\")\n)\n\nconst (\n\tNSF_HEADER_LEN = 0x80\n\tNSF_VERSION = 0x5\n\tNSF_SONGS = 0x6\n\tNSF_START = 0x7\n\tNSF_LOAD = 0x8\n\tNSF_INIT = 0xa\n\tNSF_PLAY = 0xc\n\tNSF_SONG = 0xe\n\tNSF_ARTIST = 0x2e\n\tNSF_COPYRIGHT = 0x4e\n\tNSF_SPEED_NTSC = 0x6e\n\tNSF_BANKSWITCH = 0x70\n\tNSF_SPEED_PAL = 0x78\n\tNSF_PAL_NTSC = 0x7a\n\tNSF_EXTRA = 0x7b\n\tNSF_ZERO = 0x7c\n)\n\nfunc ReadNSF(r io.Reader) (n *NSF, err error) {\n\tn = &NSF{}\n\tn.b, err = ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn\n\t}\n\tif len(n.b) < NSF_HEADER_LEN ||\n\t\tstring(n.b[0:NSF_VERSION]) != \"NESM\\u001a\" {\n\t\treturn nil, ErrUnrecognized\n\t}\n\tn.Version = n.b[NSF_VERSION]\n\tn.Songs = n.b[NSF_SONGS]\n\tn.Start = n.b[NSF_START]\n\tn.LoadAddr = bLEtoUint16(n.b[NSF_LOAD:])\n\tn.InitAddr = bLEtoUint16(n.b[NSF_INIT:])\n\tn.PlayAddr = bLEtoUint16(n.b[NSF_PLAY:])\n\tn.Song = bToString(n.b[NSF_SONG:])\n\tn.Artist = bToString(n.b[NSF_ARTIST:])\n\tn.Copyright = bToString(n.b[NSF_COPYRIGHT:])\n\tn.SpeedNTSC = bLEtoUint16(n.b[NSF_SPEED_NTSC:])\n\tcopy(n.Bankswitch[:], n.b[NSF_BANKSWITCH:NSF_SPEED_PAL])\n\tn.SpeedPAL = bLEtoUint16(n.b[NSF_SPEED_PAL:])\n\tn.PALNTSC = n.b[NSF_PAL_NTSC]\n\tn.Extra = n.b[NSF_EXTRA]\n\tn.Data = n.b[NSF_HEADER_LEN:]\n\treturn\n}\n\ntype NSF struct {\n\t*Ram\n\t*cpu6502.Cpu\n\n\tb []byte \/\/ raw NSF data\n\n\tVersion byte\n\tSongs byte\n\tStart byte\n\n\tLoadAddr uint16\n\tInitAddr uint16\n\tPlayAddr uint16\n\n\tSong string\n\tArtist string\n\tCopyright string\n\n\tSpeedNTSC uint16\n\tBankswitch [8]byte\n\tSpeedPAL uint16\n\tPALNTSC byte\n\tExtra byte\n\tData []byte\n\n\t\/\/ SampleRate is the sample rate at which samples will be generated. If not\n\t\/\/ set before Init(), it is set to DefaultSampleRate.\n\tSampleRate int64\n\ttotalTicks int64\n\tframeTicks int64\n\tsampleTicks int64\n\tplayTicks int64\n\tsamples []float32\n\tprevs [4]float32\n\tpi int \/\/ prevs index\n}\n\nfunc (n *NSF) Tick() {\n\tn.Ram.A.Step()\n\tn.totalTicks++\n\tn.frameTicks++\n\tif n.frameTicks == cpuClock\/240 {\n\t\tn.frameTicks = 0\n\t\tn.Ram.A.FrameStep()\n\t}\n\tn.sampleTicks++\n\tif n.sampleTicks >= cpuClock\/n.SampleRate {\n\t\tn.sampleTicks = 0\n\t\tn.append(n.Ram.A.Volume())\n\t}\n\tn.playTicks++\n}\n\nfunc (n *NSF) append(v float32) {\n\tn.prevs[n.pi] = v\n\tn.pi++\n\tif n.pi >= len(n.prevs) {\n\t\tn.pi = 0\n\t}\n\tvar sum float32\n\tfor _, s := range n.prevs {\n\t\tsum += s\n\t}\n\tsum \/= float32(len(n.prevs))\n\tn.samples = append(n.samples, sum)\n}\n\nfunc (n *NSF) Init(song byte) {\n\tif n.SampleRate == 0 {\n\t\tn.SampleRate = int64(DefaultSampleRate)\n\t}\n\tn.Ram = new(Ram)\n\tn.Cpu = cpu6502.New(n.Ram)\n\tcopy(n.Ram.M[n.LoadAddr:], n.Data)\n\tn.Ram.A.Init()\n\tn.Cpu.A = song - 1\n\tn.Cpu.PC = n.InitAddr\n\tn.Cpu.T = nil\n\tn.Cpu.Run()\n\tn.Cpu.T = n\n}\n\nfunc (n *NSF) Play(samples int) []float32 {\n\tplayDur := time.Duration(n.SpeedNTSC) * time.Nanosecond * 1000\n\tticksPerPlay := int64(playDur \/ (time.Second \/ cpuClock))\n\tn.samples = make([]float32, 0, samples)\n\tfor len(n.samples) < samples {\n\t\tn.playTicks = 0\n\t\tn.Cpu.PC = n.PlayAddr\n\t\tn.Cpu.Halt = false\n\t\tfor !n.Cpu.Halt && len(n.samples) < samples {\n\t\t\tn.Cpu.Step()\n\t\t\tif !n.Cpu.I() {\n\t\t\t\tpanic(\"INTERRUPT\")\n\t\t\t}\n\t\t}\n\t\tfor i := ticksPerPlay - n.playTicks; i > 0 && len(n.samples) < samples; i-- {\n\t\t\tn.Tick()\n\t\t}\n\t}\n\treturn n.samples\n}\n\n\/\/ little-endian [2]byte to uint16 conversion\nfunc bLEtoUint16(b []byte) uint16 {\n\treturn uint16(b[1])<<8 + uint16(b[0])\n}\n\n\/\/ null-terminated bytes to string\nfunc bToString(b []byte) string {\n\ti := 0\n\tfor i = range b {\n\t\tif b[i] == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn string(b[:i])\n}\n\ntype Ram struct {\n\tM [0xffff + 1]byte\n\tA Apu\n}\n\nfunc (r *Ram) Read(v uint16) byte {\n\tif v == 0x4015 {\n\t\treturn r.A.Read(v)\n\t} else {\n\t\treturn r.M[v]\n\t}\n}\n\nfunc (r *Ram) Write(v uint16, b byte) {\n\tr.M[v] = b\n\tif v&0xf000 == 0x4000 {\n\t\tr.A.Write(v, b)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2012-2018 Eli Janssen\n\/\/ Use of this source code is governed by an MIT-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package camo provides an HTTP proxy server with content type\n\/\/ restrictions as well as regex host allow list support.\npackage camo\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/cactus\/go-camo\/pkg\/camo\/encoding\"\n\t\"github.com\/cactus\/go-camo\/pkg\/htrie\"\n\n\t\"github.com\/cactus\/mlog\"\n)\n\n\/\/ Config holds configuration data used when creating a Proxy with New.\ntype Config struct {\n\t\/\/ HMACKey is a byte slice to be used as the hmac key\n\tHMACKey []byte\n\t\/\/ Server name used in Headers and Via checks\n\tServerName string\n\t\/\/ MaxSize is the maximum valid image size response (in bytes).\n\tMaxSize int64\n\t\/\/ MaxRedirects is the maximum number of redirects to follow.\n\tMaxRedirects int\n\t\/\/ Request timeout is a timeout for fetching upstream data.\n\tRequestTimeout time.Duration\n\t\/\/ Keepalive enable\/disable\n\tDisableKeepAlivesFE bool\n\tDisableKeepAlivesBE bool\n\t\/\/ x-forwarded-for enable\/disable\n\tEnableXFwdFor bool\n\t\/\/ additional content types to allow\n\tAllowContentVideo bool\n\t\/\/ allow URLs to contain user\/pass credentials\n\tAllowCredetialURLs bool\n\t\/\/ no ip filtering (test mode)\n\tnoIPFiltering bool\n}\n\n\/\/ The FilterFunc type is a function that validates a *url.URL\n\/\/ A true value approves the url. A false value rejects the url.\ntype FilterFunc func(*url.URL) bool\n\n\/\/ A Proxy is a Camo like HTTP proxy, that provides content type\n\/\/ restrictions as well as regex host allow list support.\ntype Proxy struct {\n\tclient *http.Client\n\tconfig *Config\n\tacceptTypesFilter *htrie.GlobPathChecker\n\tacceptTypesString string\n\tfilters []FilterFunc\n\tfiltersLen int\n}\n\n\/\/ ServerHTTP handles the client request, validates the request is validly\n\/\/ HMAC signed, filters based on the Allow list, and then proxies\n\/\/ valid requests to the desired endpoint. Responses are filtered for\n\/\/ proper image content types.\nfunc (p *Proxy) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tif p.config.DisableKeepAlivesFE {\n\t\tw.Header().Set(\"Connection\", \"close\")\n\t}\n\n\tif req.Header.Get(\"Via\") == p.config.ServerName {\n\t\thttp.Error(w, \"Request loop failure\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\t\/\/ split path and get components\n\tcomponents := strings.Split(req.URL.Path, \"\/\")\n\tif len(components) < 3 {\n\t\thttp.Error(w, \"Malformed request path\", http.StatusNotFound)\n\t\treturn\n\t}\n\tsigHash, encodedURL := components[1], components[2]\n\n\tmlog.Debugm(\"client request\", mlog.Map{\"req\": req})\n\n\tsURL, ok := encoding.DecodeURL(p.config.HMACKey, sigHash, encodedURL)\n\tif !ok {\n\t\thttp.Error(w, \"Bad Signature\", http.StatusForbidden)\n\t\treturn\n\t}\n\n\tmlog.Debugm(\"signed client url\", mlog.Map{\"url\": sURL})\n\n\tu, err := url.Parse(sURL)\n\tif err != nil {\n\t\tmlog.Debugm(\"url parse error\", mlog.Map{\"err\": err})\n\t\thttp.Error(w, \"Bad url\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\terr = p.checkURL(u)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tnreq, err := http.NewRequest(req.Method, sURL, nil)\n\tif err != nil {\n\t\tmlog.Debugm(\"could not create NewRequest\", mlog.Map{\"err\": err})\n\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusBadGateway)\n\t\treturn\n\t}\n\n\t\/\/ filter headers\n\tp.copyHeaders(&nreq.Header, &req.Header, &ValidReqHeaders)\n\n\t\/\/ x-forwarded-for (if appropriate)\n\tif p.config.EnableXFwdFor {\n\t\txfwd4 := req.Header.Get(\"X-Forwarded-For\")\n\t\tif xfwd4 == \"\" {\n\t\t\thostIP, _, err := net.SplitHostPort(req.RemoteAddr)\n\t\t\tif err == nil {\n\t\t\t\t\/\/ add forwarded for header, as long as it isn't a private\n\t\t\t\t\/\/ ip address (use isRejectedIP to get private filtering for free)\n\t\t\t\tif ip := net.ParseIP(hostIP); ip != nil {\n\t\t\t\t\tif !isRejectedIP(ip) {\n\t\t\t\t\t\tnreq.Header.Add(\"X-Forwarded-For\", hostIP)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tnreq.Header.Add(\"X-Forwarded-For\", xfwd4)\n\t\t}\n\t}\n\n\t\/\/ add\/squash an accept header if the client didn't send one\n\tnreq.Header.Set(\"Accept\", p.acceptTypesString)\n\n\tnreq.Header.Add(\"User-Agent\", p.config.ServerName)\n\tnreq.Header.Add(\"Via\", p.config.ServerName)\n\n\tmlog.Debugm(\"built outgoing request\", mlog.Map{\"req\": nreq})\n\n\tresp, err := p.client.Do(nreq)\n\n\tif resp != nil {\n\t\tdefer resp.Body.Close()\n\t}\n\n\tif err != nil {\n\t\tmlog.Debugm(\"could not connect to endpoint\", mlog.Map{\"err\": err})\n\t\t\/\/ this is a bit janky, but better than peeling off the\n\t\t\/\/ 3 layers of wrapped errors and trying to get to net.OpErr and\n\t\t\/\/ still having to rely on string comparison to find out if it is\n\t\t\/\/ a net.errClosing or not.\n\t\terrString := err.Error()\n\t\t\/\/ go 1.5 changes this to http.httpError\n\t\t\/\/ go 1.4 has this as net.OpError\n\t\t\/\/ and the error strings are different depending on which version too.\n\t\tif strings.Contains(errString, \"timeout\") || strings.Contains(errString, \"Client.Timeout\") {\n\t\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusGatewayTimeout)\n\t\t} else if strings.Contains(errString, \"use of closed\") {\n\t\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusBadGateway)\n\t\t} else if strings.Contains(errString, \"BadRedirect:\") {\n\t\t\t\/\/ Got a bad redirect\n\t\t\tmlog.Debugm(\"response from upstream\", mlog.Map{\"err\": err})\n\t\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusNotFound)\n\t\t} else {\n\t\t\t\/\/ some other error. call it a not found (camo compliant)\n\t\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusNotFound)\n\t\t}\n\t\treturn\n\t}\n\n\tmlog.Debugm(\"response from upstream\", mlog.Map{\"resp\": resp})\n\n\t\/\/ check for too large a response\n\tif resp.ContentLength > p.config.MaxSize {\n\t\tmlog.Debugm(\"content length exceeded\", mlog.Map{\"url\": sURL})\n\t\thttp.Error(w, \"Content length exceeded\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tswitch resp.StatusCode {\n\tcase 200, 206:\n\t\tcontentType := resp.Header.Get(\"Content-Type\")\n\n\t\tif contentType == \"\" {\n\t\t\tmlog.Debug(\"Empty content-type returned\")\n\t\t\thttp.Error(w, \"Empty content-type returned\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tif !p.acceptTypesFilter.CheckPath(contentType) {\n\t\t\tmlog.Debugm(\"Unsupported content-type returned\", mlog.Map{\"type\": u})\n\t\t\thttp.Error(w, \"Unsupported content-type returned\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\tcase 300:\n\t\thttp.Error(w, \"Multiple choices not supported\", http.StatusNotFound)\n\t\treturn\n\tcase 301, 302, 303, 307:\n\t\t\/\/ if we get a redirect here, we either disabled following,\n\t\t\/\/ or followed until max depth and still got one (redirect loop)\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\treturn\n\tcase 304:\n\t\th := w.Header()\n\t\tp.copyHeaders(&h, &resp.Header, &ValidRespHeaders)\n\t\tw.WriteHeader(304)\n\t\treturn\n\tcase 404:\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\treturn\n\tcase 500, 502, 503, 504:\n\t\t\/\/ upstream errors should probably just 502. client can try later.\n\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusBadGateway)\n\t\treturn\n\tdefault:\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\th := w.Header()\n\tp.copyHeaders(&h, &resp.Header, &ValidRespHeaders)\n\tw.WriteHeader(resp.StatusCode)\n\n\t\/\/ get a []byte from bufpool, and put it back on defer\n\tbuf := *bufPool.Get().(*[]byte)\n\tdefer bufPool.Put(&buf)\n\n\t\/\/ since this uses io.Copy\/CopyBuffer from the respBody, it is streaming\n\t\/\/ from the request to the response. This means it will nearly\n\t\/\/ always end up with a chunked response.\n\t_, err = io.CopyBuffer(w, resp.Body, buf)\n\tif err != nil {\n\t\t\/\/ only log broken pipe errors at debug level\n\t\tif isBrokenPipe(err) {\n\t\t\tmlog.Debugm(\"error writing response\", mlog.Map{\"err\": err})\n\t\t} else {\n\t\t\t\/\/ unknown error and not a broken pipe\n\t\t\tmlog.Printm(\"error writing response\", mlog.Map{\"err\": err})\n\t\t}\n\n\t\treturn\n\t}\n\n\tmlog.Debugm(\"response to client\", mlog.Map{\"resp\": w})\n}\n\nfunc (p *Proxy) checkURL(reqURL *url.URL) error {\n\t\/\/ reject localhost urls\n\t\/\/ lower case for matching is done by CheckHostname, so no need to\n\t\/\/ ToLower here also\n\tuHostname := reqURL.Hostname()\n\tif uHostname == \"\" || localsFilter.CheckHostname(uHostname) {\n\t\treturn errors.New(\"Bad url host\")\n\t}\n\n\t\/\/ if not allowed, reject credentialed\/userinfo urls\n\tif !p.config.AllowCredetialURLs && reqURL.User != nil {\n\t\treturn errors.New(\"Userinfo URL rejected\")\n\t}\n\n\t\/\/ ip\/whitelist\/blacklist filtering\n\tif !p.config.noIPFiltering {\n\t\t\/\/ filter out rejected networks\n\t\tif ip := net.ParseIP(uHostname); ip != nil {\n\t\t\tif isRejectedIP(ip) {\n\t\t\t\treturn errors.New(\"Denylist host failure\")\n\t\t\t}\n\t\t} else {\n\t\t\tif ips, err := net.LookupIP(uHostname); err == nil {\n\t\t\t\tfor _, ip := range ips {\n\t\t\t\t\tif isRejectedIP(ip) {\n\t\t\t\t\t\treturn errors.New(\"Denylist host failure\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ evaluate filters. first false value \"fails\"\n\tfor i := 0; i < p.filtersLen; i++ {\n\t\tif !p.filters[i](reqURL) {\n\t\t\treturn errors.New(\"Rejected due to filter-ruleset\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ copy headers from src into dst\n\/\/ empty filter map will result in no filtering being done\nfunc (p *Proxy) copyHeaders(dst, src *http.Header, filter *map[string]bool) {\n\tf := *filter\n\tfiltering := false\n\tif len(f) > 0 {\n\t\tfiltering = true\n\t}\n\n\tfor k, vv := range *src {\n\t\tif x, ok := f[k]; filtering && (!ok || !x) {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, v := range vv {\n\t\t\tdst.Add(k, v)\n\t\t}\n\t}\n}\n\n\/\/ NewWithFilters returns a new Proxy that utilises the passed in proxy filters.\n\/\/ filters are evaluated in order, and the first false response from a filter\n\/\/ function halts further evaluation and fails the request.\nfunc NewWithFilters(pc Config, filters []FilterFunc) (*Proxy, error) {\n\tproxy, err := New(pc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfilterFuncs := make([]FilterFunc, 0)\n\t\/\/ check for nil entries, and copy the slice in case the original\n\t\/\/ is mutated.\n\tfor _, filter := range filters {\n\t\tif filter != nil {\n\t\t\tfilterFuncs = append(filterFuncs, filter)\n\t\t}\n\t}\n\tproxy.filters = filterFuncs\n\tproxy.filtersLen = len(filterFuncs)\n\treturn proxy, nil\n}\n\n\/\/ New returns a new Proxy. Returns an error if Proxy could not be constructed.\nfunc New(pc Config) (*Proxy, error) {\n\ttr := &http.Transport{\n\t\tDialContext: (&net.Dialer{\n\t\t\tTimeout: 3 * time.Second,\n\t\t\tKeepAlive: 30 * time.Second,\n\t\t}).DialContext,\n\t\tTLSHandshakeTimeout: 3 * time.Second,\n\n\t\t\/\/ max idle conns. Go DetaultTransport uses 100, which seems like a\n\t\t\/\/ fairly reasonable number. Very busy servers may wish to raise\n\t\t\/\/ or lower this value.\n\t\tMaxIdleConns: 100,\n\t\tMaxIdleConnsPerHost: 8,\n\n\t\t\/\/ more defaults from DefaultTransport, with a few tweaks\n\t\tIdleConnTimeout: 30 * time.Second,\n\t\tExpectContinueTimeout: 1 * time.Second,\n\n\t\tDisableKeepAlives: pc.DisableKeepAlivesBE,\n\t\t\/\/ no need for compression with images\n\t\t\/\/ some xml\/svg can be compressed, but apparently some clients can\n\t\t\/\/ exhibit weird behavior when those are compressed\n\t\tDisableCompression: true,\n\t}\n\n\tclient := &http.Client{\n\t\tTransport: tr,\n\t\t\/\/ timeout\n\t\tTimeout: pc.RequestTimeout,\n\t}\n\n\tacceptTypes := []string{\"image\/*\"}\n\t\/\/ add additional accept types, if appropriate\n\tif pc.AllowContentVideo {\n\t\tacceptTypes = append(acceptTypes, \"video\/*\")\n\t}\n\n\t\/\/ re-use the htrie glob path checker for accept types validation\n\tacceptTypesFilter := htrie.NewGlobPathChecker()\n\tfor _, v := range acceptTypes {\n\t\terr := acceptTypesFilter.AddRule(\"|i|\" + v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tp := &Proxy{\n\t\tclient: client,\n\t\tconfig: &pc,\n\t\tacceptTypesString: strings.Join(acceptTypes, \", \"),\n\t\tacceptTypesFilter: acceptTypesFilter,\n\t}\n\n\tclient.CheckRedirect = func(req *http.Request, via []*http.Request) error {\n\t\tif len(via) >= pc.MaxRedirects {\n\t\t\tmlog.Debug(\"Got bad redirect: Too many redirects\")\n\t\t\treturn errors.New(\"BadRedirect: Too many redirects\")\n\t\t}\n\t\terr := p.checkURL(req.URL)\n\t\tif err != nil {\n\t\t\tmlog.Debugm(\"Got bad redirect\", mlog.Map{\"url\": req})\n\t\t\treturn fmt.Errorf(\"BadRedirect: %s\", err)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn p, nil\n\n}\n<commit_msg>fix space<commit_after>\/\/ Copyright (c) 2012-2018 Eli Janssen\n\/\/ Use of this source code is governed by an MIT-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Package camo provides an HTTP proxy server with content type\n\/\/ restrictions as well as regex host allow list support.\npackage camo\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/cactus\/go-camo\/pkg\/camo\/encoding\"\n\t\"github.com\/cactus\/go-camo\/pkg\/htrie\"\n\n\t\"github.com\/cactus\/mlog\"\n)\n\n\/\/ Config holds configuration data used when creating a Proxy with New.\ntype Config struct {\n\t\/\/ HMACKey is a byte slice to be used as the hmac key\n\tHMACKey []byte\n\t\/\/ Server name used in Headers and Via checks\n\tServerName string\n\t\/\/ MaxSize is the maximum valid image size response (in bytes).\n\tMaxSize int64\n\t\/\/ MaxRedirects is the maximum number of redirects to follow.\n\tMaxRedirects int\n\t\/\/ Request timeout is a timeout for fetching upstream data.\n\tRequestTimeout time.Duration\n\t\/\/ Keepalive enable\/disable\n\tDisableKeepAlivesFE bool\n\tDisableKeepAlivesBE bool\n\t\/\/ x-forwarded-for enable\/disable\n\tEnableXFwdFor bool\n\t\/\/ additional content types to allow\n\tAllowContentVideo bool\n\t\/\/ allow URLs to contain user\/pass credentials\n\tAllowCredetialURLs bool\n\t\/\/ no ip filtering (test mode)\n\tnoIPFiltering bool\n}\n\n\/\/ The FilterFunc type is a function that validates a *url.URL\n\/\/ A true value approves the url. A false value rejects the url.\ntype FilterFunc func(*url.URL) bool\n\n\/\/ A Proxy is a Camo like HTTP proxy, that provides content type\n\/\/ restrictions as well as regex host allow list support.\ntype Proxy struct {\n\tclient *http.Client\n\tconfig *Config\n\tacceptTypesFilter *htrie.GlobPathChecker\n\tacceptTypesString string\n\tfilters []FilterFunc\n\tfiltersLen int\n}\n\n\/\/ ServerHTTP handles the client request, validates the request is validly\n\/\/ HMAC signed, filters based on the Allow list, and then proxies\n\/\/ valid requests to the desired endpoint. Responses are filtered for\n\/\/ proper image content types.\nfunc (p *Proxy) ServeHTTP(w http.ResponseWriter, req *http.Request) {\n\tif p.config.DisableKeepAlivesFE {\n\t\tw.Header().Set(\"Connection\", \"close\")\n\t}\n\n\tif req.Header.Get(\"Via\") == p.config.ServerName {\n\t\thttp.Error(w, \"Request loop failure\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\t\/\/ split path and get components\n\tcomponents := strings.Split(req.URL.Path, \"\/\")\n\tif len(components) < 3 {\n\t\thttp.Error(w, \"Malformed request path\", http.StatusNotFound)\n\t\treturn\n\t}\n\tsigHash, encodedURL := components[1], components[2]\n\n\tmlog.Debugm(\"client request\", mlog.Map{\"req\": req})\n\n\tsURL, ok := encoding.DecodeURL(p.config.HMACKey, sigHash, encodedURL)\n\tif !ok {\n\t\thttp.Error(w, \"Bad Signature\", http.StatusForbidden)\n\t\treturn\n\t}\n\n\tmlog.Debugm(\"signed client url\", mlog.Map{\"url\": sURL})\n\n\tu, err := url.Parse(sURL)\n\tif err != nil {\n\t\tmlog.Debugm(\"url parse error\", mlog.Map{\"err\": err})\n\t\thttp.Error(w, \"Bad url\", http.StatusBadRequest)\n\t\treturn\n\t}\n\n\terr = p.checkURL(u)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tnreq, err := http.NewRequest(req.Method, sURL, nil)\n\tif err != nil {\n\t\tmlog.Debugm(\"could not create NewRequest\", mlog.Map{\"err\": err})\n\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusBadGateway)\n\t\treturn\n\t}\n\n\t\/\/ filter headers\n\tp.copyHeaders(&nreq.Header, &req.Header, &ValidReqHeaders)\n\n\t\/\/ x-forwarded-for (if appropriate)\n\tif p.config.EnableXFwdFor {\n\t\txfwd4 := req.Header.Get(\"X-Forwarded-For\")\n\t\tif xfwd4 == \"\" {\n\t\t\thostIP, _, err := net.SplitHostPort(req.RemoteAddr)\n\t\t\tif err == nil {\n\t\t\t\t\/\/ add forwarded for header, as long as it isn't a private\n\t\t\t\t\/\/ ip address (use isRejectedIP to get private filtering for free)\n\t\t\t\tif ip := net.ParseIP(hostIP); ip != nil {\n\t\t\t\t\tif !isRejectedIP(ip) {\n\t\t\t\t\t\tnreq.Header.Add(\"X-Forwarded-For\", hostIP)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tnreq.Header.Add(\"X-Forwarded-For\", xfwd4)\n\t\t}\n\t}\n\n\t\/\/ add\/squash an accept header if the client didn't send one\n\tnreq.Header.Set(\"Accept\", p.acceptTypesString)\n\n\tnreq.Header.Add(\"User-Agent\", p.config.ServerName)\n\tnreq.Header.Add(\"Via\", p.config.ServerName)\n\n\tmlog.Debugm(\"built outgoing request\", mlog.Map{\"req\": nreq})\n\n\tresp, err := p.client.Do(nreq)\n\n\tif resp != nil {\n\t\tdefer resp.Body.Close()\n\t}\n\n\tif err != nil {\n\t\tmlog.Debugm(\"could not connect to endpoint\", mlog.Map{\"err\": err})\n\t\t\/\/ this is a bit janky, but better than peeling off the\n\t\t\/\/ 3 layers of wrapped errors and trying to get to net.OpErr and\n\t\t\/\/ still having to rely on string comparison to find out if it is\n\t\t\/\/ a net.errClosing or not.\n\t\terrString := err.Error()\n\t\t\/\/ go 1.5 changes this to http.httpError\n\t\t\/\/ go 1.4 has this as net.OpError\n\t\t\/\/ and the error strings are different depending on which version too.\n\t\tif strings.Contains(errString, \"timeout\") || strings.Contains(errString, \"Client.Timeout\") {\n\t\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusGatewayTimeout)\n\t\t} else if strings.Contains(errString, \"use of closed\") {\n\t\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusBadGateway)\n\t\t} else if strings.Contains(errString, \"BadRedirect:\") {\n\t\t\t\/\/ Got a bad redirect\n\t\t\tmlog.Debugm(\"response from upstream\", mlog.Map{\"err\": err})\n\t\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusNotFound)\n\t\t} else {\n\t\t\t\/\/ some other error. call it a not found (camo compliant)\n\t\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusNotFound)\n\t\t}\n\t\treturn\n\t}\n\n\tmlog.Debugm(\"response from upstream\", mlog.Map{\"resp\": resp})\n\n\t\/\/ check for too large a response\n\tif resp.ContentLength > p.config.MaxSize {\n\t\tmlog.Debugm(\"content length exceeded\", mlog.Map{\"url\": sURL})\n\t\thttp.Error(w, \"Content length exceeded\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\tswitch resp.StatusCode {\n\tcase 200, 206:\n\t\tcontentType := resp.Header.Get(\"Content-Type\")\n\n\t\tif contentType == \"\" {\n\t\t\tmlog.Debug(\"Empty content-type returned\")\n\t\t\thttp.Error(w, \"Empty content-type returned\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tif !p.acceptTypesFilter.CheckPath(contentType) {\n\t\t\tmlog.Debugm(\"Unsupported content-type returned\", mlog.Map{\"type\": u})\n\t\t\thttp.Error(w, \"Unsupported content-type returned\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\tcase 300:\n\t\thttp.Error(w, \"Multiple choices not supported\", http.StatusNotFound)\n\t\treturn\n\tcase 301, 302, 303, 307:\n\t\t\/\/ if we get a redirect here, we either disabled following,\n\t\t\/\/ or followed until max depth and still got one (redirect loop)\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\treturn\n\tcase 304:\n\t\th := w.Header()\n\t\tp.copyHeaders(&h, &resp.Header, &ValidRespHeaders)\n\t\tw.WriteHeader(304)\n\t\treturn\n\tcase 404:\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\treturn\n\tcase 500, 502, 503, 504:\n\t\t\/\/ upstream errors should probably just 502. client can try later.\n\t\thttp.Error(w, \"Error Fetching Resource\", http.StatusBadGateway)\n\t\treturn\n\tdefault:\n\t\thttp.Error(w, \"Not Found\", http.StatusNotFound)\n\t\treturn\n\t}\n\n\th := w.Header()\n\tp.copyHeaders(&h, &resp.Header, &ValidRespHeaders)\n\tw.WriteHeader(resp.StatusCode)\n\n\t\/\/ get a []byte from bufpool, and put it back on defer\n\tbuf := *bufPool.Get().(*[]byte)\n\tdefer bufPool.Put(&buf)\n\n\t\/\/ since this uses io.Copy\/CopyBuffer from the respBody, it is streaming\n\t\/\/ from the request to the response. This means it will nearly\n\t\/\/ always end up with a chunked response.\n\t_, err = io.CopyBuffer(w, resp.Body, buf)\n\tif err != nil {\n\t\t\/\/ only log broken pipe errors at debug level\n\t\tif isBrokenPipe(err) {\n\t\t\tmlog.Debugm(\"error writing response\", mlog.Map{\"err\": err})\n\t\t} else {\n\t\t\t\/\/ unknown error and not a broken pipe\n\t\t\tmlog.Printm(\"error writing response\", mlog.Map{\"err\": err})\n\t\t}\n\n\t\treturn\n\t}\n\n\tmlog.Debugm(\"response to client\", mlog.Map{\"resp\": w})\n}\n\nfunc (p *Proxy) checkURL(reqURL *url.URL) error {\n\t\/\/ reject localhost urls\n\t\/\/ lower case for matching is done by CheckHostname, so no need to\n\t\/\/ ToLower here also\n\tuHostname := reqURL.Hostname()\n\tif uHostname == \"\" || localsFilter.CheckHostname(uHostname) {\n\t\treturn errors.New(\"Bad url host\")\n\t}\n\n\t\/\/ if not allowed, reject credentialed\/userinfo urls\n\tif !p.config.AllowCredetialURLs && reqURL.User != nil {\n\t\treturn errors.New(\"Userinfo URL rejected\")\n\t}\n\n\t\/\/ ip\/whitelist\/blacklist filtering\n\tif !p.config.noIPFiltering {\n\t\t\/\/ filter out rejected networks\n\t\tif ip := net.ParseIP(uHostname); ip != nil {\n\t\t\tif isRejectedIP(ip) {\n\t\t\t\treturn errors.New(\"Denylist host failure\")\n\t\t\t}\n\t\t} else {\n\t\t\tif ips, err := net.LookupIP(uHostname); err == nil {\n\t\t\t\tfor _, ip := range ips {\n\t\t\t\t\tif isRejectedIP(ip) {\n\t\t\t\t\t\treturn errors.New(\"Denylist host failure\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ evaluate filters. first false value \"fails\"\n\tfor i := 0; i < p.filtersLen; i++ {\n\t\tif !p.filters[i](reqURL) {\n\t\t\treturn errors.New(\"Rejected due to filter-ruleset\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ copy headers from src into dst\n\/\/ empty filter map will result in no filtering being done\nfunc (p *Proxy) copyHeaders(dst, src *http.Header, filter *map[string]bool) {\n\tf := *filter\n\tfiltering := false\n\tif len(f) > 0 {\n\t\tfiltering = true\n\t}\n\n\tfor k, vv := range *src {\n\t\tif x, ok := f[k]; filtering && (!ok || !x) {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, v := range vv {\n\t\t\tdst.Add(k, v)\n\t\t}\n\t}\n}\n\n\/\/ NewWithFilters returns a new Proxy that utilises the passed in proxy filters.\n\/\/ filters are evaluated in order, and the first false response from a filter\n\/\/ function halts further evaluation and fails the request.\nfunc NewWithFilters(pc Config, filters []FilterFunc) (*Proxy, error) {\n\tproxy, err := New(pc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfilterFuncs := make([]FilterFunc, 0)\n\t\/\/ check for nil entries, and copy the slice in case the original\n\t\/\/ is mutated.\n\tfor _, filter := range filters {\n\t\tif filter != nil {\n\t\t\tfilterFuncs = append(filterFuncs, filter)\n\t\t}\n\t}\n\tproxy.filters = filterFuncs\n\tproxy.filtersLen = len(filterFuncs)\n\treturn proxy, nil\n}\n\n\/\/ New returns a new Proxy. Returns an error if Proxy could not be constructed.\nfunc New(pc Config) (*Proxy, error) {\n\ttr := &http.Transport{\n\t\tDialContext: (&net.Dialer{\n\t\t\tTimeout: 3 * time.Second,\n\t\t\tKeepAlive: 30 * time.Second,\n\t\t}).DialContext,\n\t\tTLSHandshakeTimeout: 3 * time.Second,\n\n\t\t\/\/ max idle conns. Go DetaultTransport uses 100, which seems like a\n\t\t\/\/ fairly reasonable number. Very busy servers may wish to raise\n\t\t\/\/ or lower this value.\n\t\tMaxIdleConns: 100,\n\t\tMaxIdleConnsPerHost: 8,\n\n\t\t\/\/ more defaults from DefaultTransport, with a few tweaks\n\t\tIdleConnTimeout: 30 * time.Second,\n\t\tExpectContinueTimeout: 1 * time.Second,\n\n\t\tDisableKeepAlives: pc.DisableKeepAlivesBE,\n\t\t\/\/ no need for compression with images\n\t\t\/\/ some xml\/svg can be compressed, but apparently some clients can\n\t\t\/\/ exhibit weird behavior when those are compressed\n\t\tDisableCompression: true,\n\t}\n\n\tclient := &http.Client{\n\t\tTransport: tr,\n\t\t\/\/ timeout\n\t\tTimeout: pc.RequestTimeout,\n\t}\n\n\tacceptTypes := []string{\"image\/*\"}\n\t\/\/ add additional accept types, if appropriate\n\tif pc.AllowContentVideo {\n\t\tacceptTypes = append(acceptTypes, \"video\/*\")\n\t}\n\n\t\/\/ re-use the htrie glob path checker for accept types validation\n\tacceptTypesFilter := htrie.NewGlobPathChecker()\n\tfor _, v := range acceptTypes {\n\t\terr := acceptTypesFilter.AddRule(\"|i|\" + v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tp := &Proxy{\n\t\tclient: client,\n\t\tconfig: &pc,\n\t\tacceptTypesString: strings.Join(acceptTypes, \", \"),\n\t\tacceptTypesFilter: acceptTypesFilter,\n\t}\n\n\tclient.CheckRedirect = func(req *http.Request, via []*http.Request) error {\n\t\tif len(via) >= pc.MaxRedirects {\n\t\t\tmlog.Debug(\"Got bad redirect: Too many redirects\")\n\t\t\treturn errors.New(\"BadRedirect: Too many redirects\")\n\t\t}\n\t\terr := p.checkURL(req.URL)\n\t\tif err != nil {\n\t\t\tmlog.Debugm(\"Got bad redirect\", mlog.Map{\"url\": req})\n\t\t\treturn fmt.Errorf(\"BadRedirect: %s\", err)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\treturn p, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package cli\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"strconv\"\n\n\t\"github.com\/apprenda\/kismatic\/pkg\/install\"\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ NewCmdVolume returns the storage command\nfunc NewCmdVolume(out io.Writer) *cobra.Command {\n\tvar planFile string\n\tcmd := &cobra.Command{\n\t\tUse: \"volume\",\n\t\tShort: \"manage storage volumes on your Kubernetes cluster\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn cmd.Usage()\n\t\t},\n\t}\n\taddPlanFileFlag(cmd.PersistentFlags(), &planFile)\n\tcmd.AddCommand(NewCmdVolumeAdd(out, &planFile))\n\treturn cmd\n}\n\ntype volumeAddOptions struct {\n\treplicaCount int\n\tdistributionCount int\n\tallowAddress []string\n\tverbose bool\n\toutputFormat string\n\tgeneratedAssetsDir string\n}\n\n\/\/ NewCmdVolumeAdd returns the command for adding storage volumes\nfunc NewCmdVolumeAdd(out io.Writer, planFile *string) *cobra.Command {\n\topts := volumeAddOptions{}\n\tcmd := &cobra.Command{\n\t\tUse: \"add size_in_gigabytes [volume name]\",\n\t\tShort: \"add storage volumes to the Kubernetes cluster\",\n\t\tLong: `Add storage volumes to the Kubernetes cluster.\n\nThis function requires a target cluster that has storage nodes.`,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn doVolumeAdd(out, opts, *planFile, args)\n\t\t},\n\t\tExample: ` Create a distributed, replicated volume,\n named \"storage01\" with a 10 GB quota,\n grant access to any IP address starting with 10.10.\n kismatic volume add 10 storage01 -r 2 -d 2 -a 10.10.*.*\n\t\t`,\n\t}\n\tcmd.Flags().IntVarP(&opts.replicaCount, \"replica-count\", \"r\", 2, \"The number of times each file will be written.\")\n\tcmd.Flags().IntVarP(&opts.distributionCount, \"distribution-count\", \"d\", 1, \"This is the degree to which data will be distributed across the cluster. By default, it won't be -- each replica will receive 100% of the data. Distribution makes listing or backing up the cluster more complicated by spreading data around the cluster but makes reads and writes more performant.\")\n\tcmd.Flags().StringSliceVarP(&opts.allowAddress, \"allow-address\", \"a\", nil, \"Comma delimited list of address wildcards permitted access to the volume in addition to Kubernetes nodes.\")\n\tcmd.Flags().BoolVar(&opts.verbose, \"verbose\", false, \"enable verbose logging\")\n\tcmd.Flags().StringVarP(&opts.outputFormat, \"output\", \"o\", \"simple\", `output format (options \"simple\"|\"raw\")`)\n\tcmd.Flags().StringVar(&opts.generatedAssetsDir, \"generated-assets-dir\", \"generated\", \"path to the directory where assets generated during the installation process will be stored\")\n\treturn cmd\n}\n\nfunc doVolumeAdd(out io.Writer, opts volumeAddOptions, planFile string, args []string) error {\n\tvar volumeName string\n\tvar volumeSizeStrGB string\n\tswitch len(args) {\n\tcase 0:\n\t\treturn errors.New(\"the volume size (in gigabytes) must be provided as the first argument to add\")\n\tcase 1:\n\t\tvolumeSizeStrGB = args[0]\n\t\tvolumeName = \"kismatic-\" + generateRandomString(5)\n\tcase 2:\n\t\tvolumeSizeStrGB = args[0]\n\t\tvolumeName = args[1]\n\tdefault:\n\t\treturn fmt.Errorf(\"%d arguments were provided, but add does not support more than two arguments\", len(args))\n\t}\n\tvolumeSizeGB, err := strconv.Atoi(volumeSizeStrGB)\n\tif err != nil {\n\t\treturn errors.New(\"the volume size provided is not valid\")\n\t}\n\t\/\/ Setup ansible\n\tplanner := &install.FilePlanner{File: planFile}\n\tif !planner.PlanExists() {\n\t\treturn fmt.Errorf(\"Plan file not found at %q\", planFile)\n\t}\n\texecOpts := install.ExecutorOptions{\n\t\tOutputFormat: opts.outputFormat,\n\t\tVerbose: opts.verbose,\n\t\t\/\/ Need to refactor executor code... this will do for now as we don't need the generated assets dir in this command\n\t\tGeneratedAssetsDirectory: opts.generatedAssetsDir,\n\t}\n\texec, err := install.NewExecutor(out, out, execOpts)\n\tif err != nil {\n\t\treturn err\n\t}\n\tplan, err := planner.Read()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Run validation\n\tvopts := &validateOpts{\n<<<<<<< HEAD\n\t\toutputFormat: opts.outputFormat,\n\t\tverbose: opts.verbose,\n\t\tplanFile: planFile,\n\t\tskipPreFlight: true,\n\t\tgeneratedAssetsDir: opts.generatedAssetsDir,\n=======\n\t\toutputFormat: opts.outputFormat,\n\t\tverbose: opts.verbose,\n\t\tplanFile: planFile,\n\t\tskipPreFlight: true,\n>>>>>>> abff976bcc8e149fc2c7302a96b3ba3cd6dcf0e9\n\t}\n\tif err := doValidate(out, planner, vopts); err != nil {\n\t\treturn err\n\t}\n\n\tv := install.StorageVolume{\n\t\tName: volumeName,\n\t\tSizeGB: volumeSizeGB,\n\t\tReplicateCount: opts.replicaCount,\n\t\tDistributionCount: opts.distributionCount,\n\t}\n\tif opts.allowAddress != nil {\n\t\tv.AllowAddresses = opts.allowAddress\n\t}\n\tif ok, errs := install.ValidateStorageVolume(v); !ok {\n\t\tfmt.Println(\"The storage volume configuration is not valid:\")\n\t\tfor _, e := range errs {\n\t\t\tfmt.Printf(\"- %s\\n\", e)\n\t\t}\n\t\treturn errors.New(\"storage volume validation failed\")\n\t}\n\tif err := exec.AddVolume(plan, v); err != nil {\n\t\treturn fmt.Errorf(\"error adding new volume: %v\", err)\n\t}\n\n\tfmt.Fprintln(out)\n\tfmt.Fprintln(out, \"Successfully added the persistent volume to the kubernetes cluster.\")\n\tfmt.Fprintln(out)\n\tfmt.Fprintf(out, \"Use \\\"kubectl describe pv %s\\\" to view volume details.\\n\", v.Name)\n\treturn nil\n}\n\nfunc generateRandomString(n int) string {\n\t\/\/ removed 1, l, o, 0 to prevent confusion\n\tchars := []rune(\"abcdefghijkmnpqrstuvwxyz23456789\")\n\tres := make([]rune, n)\n\tfor i := range res {\n\t\tres[i] = chars[rand.Intn(len(chars))]\n\t}\n\treturn string(res)\n}\n<commit_msg>Remove merge conflict text<commit_after>package cli\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\/rand\"\n\t\"strconv\"\n\n\t\"github.com\/apprenda\/kismatic\/pkg\/install\"\n\t\"github.com\/spf13\/cobra\"\n)\n\n\/\/ NewCmdVolume returns the storage command\nfunc NewCmdVolume(out io.Writer) *cobra.Command {\n\tvar planFile string\n\tcmd := &cobra.Command{\n\t\tUse: \"volume\",\n\t\tShort: \"manage storage volumes on your Kubernetes cluster\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn cmd.Usage()\n\t\t},\n\t}\n\taddPlanFileFlag(cmd.PersistentFlags(), &planFile)\n\tcmd.AddCommand(NewCmdVolumeAdd(out, &planFile))\n\treturn cmd\n}\n\ntype volumeAddOptions struct {\n\treplicaCount int\n\tdistributionCount int\n\tallowAddress []string\n\tverbose bool\n\toutputFormat string\n\tgeneratedAssetsDir string\n}\n\n\/\/ NewCmdVolumeAdd returns the command for adding storage volumes\nfunc NewCmdVolumeAdd(out io.Writer, planFile *string) *cobra.Command {\n\topts := volumeAddOptions{}\n\tcmd := &cobra.Command{\n\t\tUse: \"add size_in_gigabytes [volume name]\",\n\t\tShort: \"add storage volumes to the Kubernetes cluster\",\n\t\tLong: `Add storage volumes to the Kubernetes cluster.\n\nThis function requires a target cluster that has storage nodes.`,\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\treturn doVolumeAdd(out, opts, *planFile, args)\n\t\t},\n\t\tExample: ` Create a distributed, replicated volume,\n named \"storage01\" with a 10 GB quota,\n grant access to any IP address starting with 10.10.\n kismatic volume add 10 storage01 -r 2 -d 2 -a 10.10.*.*\n\t\t`,\n\t}\n\tcmd.Flags().IntVarP(&opts.replicaCount, \"replica-count\", \"r\", 2, \"The number of times each file will be written.\")\n\tcmd.Flags().IntVarP(&opts.distributionCount, \"distribution-count\", \"d\", 1, \"This is the degree to which data will be distributed across the cluster. By default, it won't be -- each replica will receive 100% of the data. Distribution makes listing or backing up the cluster more complicated by spreading data around the cluster but makes reads and writes more performant.\")\n\tcmd.Flags().StringSliceVarP(&opts.allowAddress, \"allow-address\", \"a\", nil, \"Comma delimited list of address wildcards permitted access to the volume in addition to Kubernetes nodes.\")\n\tcmd.Flags().BoolVar(&opts.verbose, \"verbose\", false, \"enable verbose logging\")\n\tcmd.Flags().StringVarP(&opts.outputFormat, \"output\", \"o\", \"simple\", `output format (options \"simple\"|\"raw\")`)\n\tcmd.Flags().StringVar(&opts.generatedAssetsDir, \"generated-assets-dir\", \"generated\", \"path to the directory where assets generated during the installation process will be stored\")\n\treturn cmd\n}\n\nfunc doVolumeAdd(out io.Writer, opts volumeAddOptions, planFile string, args []string) error {\n\tvar volumeName string\n\tvar volumeSizeStrGB string\n\tswitch len(args) {\n\tcase 0:\n\t\treturn errors.New(\"the volume size (in gigabytes) must be provided as the first argument to add\")\n\tcase 1:\n\t\tvolumeSizeStrGB = args[0]\n\t\tvolumeName = \"kismatic-\" + generateRandomString(5)\n\tcase 2:\n\t\tvolumeSizeStrGB = args[0]\n\t\tvolumeName = args[1]\n\tdefault:\n\t\treturn fmt.Errorf(\"%d arguments were provided, but add does not support more than two arguments\", len(args))\n\t}\n\tvolumeSizeGB, err := strconv.Atoi(volumeSizeStrGB)\n\tif err != nil {\n\t\treturn errors.New(\"the volume size provided is not valid\")\n\t}\n\t\/\/ Setup ansible\n\tplanner := &install.FilePlanner{File: planFile}\n\tif !planner.PlanExists() {\n\t\treturn fmt.Errorf(\"Plan file not found at %q\", planFile)\n\t}\n\texecOpts := install.ExecutorOptions{\n\t\tOutputFormat: opts.outputFormat,\n\t\tVerbose: opts.verbose,\n\t\t\/\/ Need to refactor executor code... this will do for now as we don't need the generated assets dir in this command\n\t\tGeneratedAssetsDirectory: opts.generatedAssetsDir,\n\t}\n\texec, err := install.NewExecutor(out, out, execOpts)\n\tif err != nil {\n\t\treturn err\n\t}\n\tplan, err := planner.Read()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Run validation\n\tvopts := &validateOpts{\n\t\toutputFormat: opts.outputFormat,\n\t\tverbose: opts.verbose,\n\t\tplanFile: planFile,\n\t\tskipPreFlight: true,\n\t\tgeneratedAssetsDir: opts.generatedAssetsDir,\n\t}\n\tif err := doValidate(out, planner, vopts); err != nil {\n\t\treturn err\n\t}\n\n\tv := install.StorageVolume{\n\t\tName: volumeName,\n\t\tSizeGB: volumeSizeGB,\n\t\tReplicateCount: opts.replicaCount,\n\t\tDistributionCount: opts.distributionCount,\n\t}\n\tif opts.allowAddress != nil {\n\t\tv.AllowAddresses = opts.allowAddress\n\t}\n\tif ok, errs := install.ValidateStorageVolume(v); !ok {\n\t\tfmt.Println(\"The storage volume configuration is not valid:\")\n\t\tfor _, e := range errs {\n\t\t\tfmt.Printf(\"- %s\\n\", e)\n\t\t}\n\t\treturn errors.New(\"storage volume validation failed\")\n\t}\n\tif err := exec.AddVolume(plan, v); err != nil {\n\t\treturn fmt.Errorf(\"error adding new volume: %v\", err)\n\t}\n\n\tfmt.Fprintln(out)\n\tfmt.Fprintln(out, \"Successfully added the persistent volume to the kubernetes cluster.\")\n\tfmt.Fprintln(out)\n\tfmt.Fprintf(out, \"Use \\\"kubectl describe pv %s\\\" to view volume details.\\n\", v.Name)\n\treturn nil\n}\n\nfunc generateRandomString(n int) string {\n\t\/\/ removed 1, l, o, 0 to prevent confusion\n\tchars := []rune(\"abcdefghijkmnpqrstuvwxyz23456789\")\n\tres := make([]rune, n)\n\tfor i := range res {\n\t\tres[i] = chars[rand.Intn(len(chars))]\n\t}\n\treturn string(res)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2020 The SQLFlow Authors. All rights reserved.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage log\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\t\"gopkg.in\/natefinch\/lumberjack.v2\"\n)\n\n\/\/ Formatter is for user to specific the log formatter\ntype Formatter int\n\nconst (\n\t\/\/ TextFormatter means unordered fields\n\tTextFormatter Formatter = iota\n\t\/\/ OrderedTextFormatter writes the fields(only but not level&msg) orderly\n\tOrderedTextFormatter\n\t\/\/ JSON, easy to support but we don't need it right now\n)\n\n\/\/ InitLogger set the output and formatter\nfunc InitLogger(filename string, f Formatter) {\n\tsetOutput(filename)\n\tif f == OrderedTextFormatter {\n\t\tfm := &orderedFieldsTextFormatter{}\n\t\tlogrus.SetFormatter(fm)\n\t}\n}\n\n\/\/ setOutput sets log output to filename globally.\n\/\/ filename=\"\/var\/log\/sqlflow.log\": write the log to file\n\/\/ filename=\"\": write the file to stdout or stderr\n\/\/ filename=\"\/dev\/null\": ignore log message\nfunc setOutput(filename string) {\n\tfilename = strings.Trim(filename, \" \")\n\tif filename == \"\/dev\/null\" {\n\t\tlogrus.SetOutput(ioutil.Discard)\n\t} else if len(filename) > 0 {\n\t\tlogrus.SetOutput(&lumberjack.Logger{\n\t\t\tFilename: filename,\n\t\t\tMaxSize: 32, \/\/ megabytes\n\t\t\tMaxBackups: 64,\n\t\t\tMaxAge: 15, \/\/ days\n\t\t\tCompress: true,\n\t\t})\n\t}\n}\n\n\/\/ orderedFieldsTextFormatter writes the fields(only but not level or msg) orderly\ntype orderedFieldsTextFormatter struct {\n}\n\nfunc (f *orderedFieldsTextFormatter) Format(logger *logrus.Entry) ([]byte, error) {\n\tvar b *bytes.Buffer\n\tif logger.Buffer != nil {\n\t\tb = logger.Buffer\n\t} else {\n\t\tb = &bytes.Buffer{}\n\t}\n\tfmt.Fprintf(b, \"%s %s msg=\\\"%s\\\"\", logger.Time.Format(\"2006-01-02 15:04:05\"), logger.Level.String(), logger.Message)\n\n\tkeys := make([]string, 0, len(logger.Data))\n\tfor k := range logger.Data {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\tfor _, k := range keys {\n\t\tv := logger.Data[k]\n\t\t_, ok := v.(string)\n\t\tif ok {\n\t\t\tfmt.Fprintf(b, \" %s=\\\"%s\\\"\", k, v)\n\t\t} else {\n\t\t\tfmt.Fprintf(b, \" %s=%v\", k, v)\n\t\t}\n\t}\n\tb.WriteByte('\\n')\n\treturn b.Bytes(), nil\n}\n<commit_msg>Enable log in sqlflowserver (#2550)<commit_after>\/\/ Copyright 2020 The SQLFlow Authors. All rights reserved.\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage log\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com\/sirupsen\/logrus\"\n\t\"gopkg.in\/natefinch\/lumberjack.v2\"\n)\n\n\/\/ Formatter is for user to specific the log formatter\ntype Formatter int\n\nconst (\n\t\/\/ TextFormatter means unordered fields\n\tTextFormatter Formatter = iota\n\t\/\/ OrderedTextFormatter writes the fields(only but not level&msg) orderly\n\tOrderedTextFormatter\n\t\/\/ JSON, easy to support but we don't need it right now\n)\n\n\/\/ InitLogger set the output and formatter\nfunc InitLogger(filename string, f Formatter) {\n\tsetOutput(filename)\n\tif f == OrderedTextFormatter {\n\t\tfm := &orderedFieldsTextFormatter{}\n\t\tlogrus.SetFormatter(fm)\n\t}\n}\n\n\/\/ setOutput sets log output to filename globally.\n\/\/ filename=\"\/var\/log\/sqlflow.log\": write the log to file\n\/\/ filename=\"\": write the file to stdout or stderr\n\/\/ filename=\"\/dev\/null\": ignore log message\nfunc setOutput(filename string) {\n\tfilename = strings.Trim(filename, \" \")\n\tif filename == \"\/dev\/null\" {\n\t\tlogrus.SetOutput(ioutil.Discard)\n\t} else if filename == \"\" {\n\t\tlogrus.SetOutput(os.Stdout)\n\t} else if len(filename) > 0 {\n\t\tlogrus.SetOutput(&lumberjack.Logger{\n\t\t\tFilename: filename,\n\t\t\tMaxSize: 32, \/\/ megabytes\n\t\t\tMaxBackups: 64,\n\t\t\tMaxAge: 15, \/\/ days\n\t\t\tCompress: true,\n\t\t})\n\t}\n}\n\n\/\/ orderedFieldsTextFormatter writes the fields(only but not level or msg) orderly\ntype orderedFieldsTextFormatter struct {\n}\n\nfunc (f *orderedFieldsTextFormatter) Format(logger *logrus.Entry) ([]byte, error) {\n\tvar b *bytes.Buffer\n\tif logger.Buffer != nil {\n\t\tb = logger.Buffer\n\t} else {\n\t\tb = &bytes.Buffer{}\n\t}\n\tfmt.Fprintf(b, \"%s %s msg=\\\"%s\\\"\", logger.Time.Format(\"2006-01-02 15:04:05\"), logger.Level.String(), logger.Message)\n\n\tkeys := make([]string, 0, len(logger.Data))\n\tfor k := range logger.Data {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\tfor _, k := range keys {\n\t\tv := logger.Data[k]\n\t\t_, ok := v.(string)\n\t\tif ok {\n\t\t\tfmt.Fprintf(b, \" %s=\\\"%s\\\"\", k, v)\n\t\t} else {\n\t\t\tfmt.Fprintf(b, \" %s=%v\", k, v)\n\t\t}\n\t}\n\tb.WriteByte('\\n')\n\treturn b.Bytes(), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package md_test\n\nimport (\n\t_ \"embed\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/google\/go-cmp\/cmp\"\n\t. \"src.elv.sh\/pkg\/md\"\n\t\"src.elv.sh\/pkg\/must\"\n)\n\ntype testCase struct {\n\tMarkdown string `json:\"markdown\"`\n\tHTML string `json:\"html\"`\n\tExample int `json:\"example\"`\n\tSection string `json:\"section\"`\n\tName string\n}\n\n\/\/go:embed spec.json\nvar specJSON []byte\n\nvar testCases []testCase\n\nvar additionalCases = []testCase{\n\t{\n\t\tMarkdown: `> a\n>> b\n`,\n\t\tHTML: `<blockquote>\n<p>a<\/p>\n<blockquote>\n<p>b<\/p>\n<\/blockquote>\n<\/blockquote>\n`,\n\t\tName: \"Blockquote supplemental\/Increasing level\",\n\t},\n\t{\n\t\tMarkdown: `>> a\n>\n> b\n`,\n\t\tHTML: `<blockquote>\n<blockquote>\n<p>a<\/p>\n<\/blockquote>\n<p>b<\/p>\n<\/blockquote>\n`,\n\t\tName: \"Blockquote supplemental\/Reducing level\",\n\t},\n}\n\nfunc init() {\n\tmust.OK(json.Unmarshal(specJSON, &testCases))\n\ttestCases = append(testCases, additionalCases...)\n}\n\nvar (\n\tescapeHTML = strings.NewReplacer(\n\t\t\"&\", \"&\", `\"`, \""\", \"<\", \"<\", \">\", \">\").Replace\n\tescapeDest = strings.NewReplacer(\n\t\t`\"`, \"%22\", `\\`, \"%5C\", \" \", \"%20\", \"`\", \"%60\",\n\t\t\"[\", \"%5B\", \"]\", \"%5D\",\n\t\t\"ö\", \"%C3%B6\",\n\t\t\"ä\", \"%C3%A4\", \" \", \"%C2%A0\").Replace\n)\n\nvar htmlSyntax = OutputSyntax{\n\tThematicBreak: func(_ string) string { return \"<hr \/>\" },\n\tHeading: func(level int) TagPair {\n\t\ttag := \"h\" + strconv.Itoa(level)\n\t\treturn TagPair{Start: \"<\" + tag + \">\", End: \"<\/\" + tag + \">\"}\n\t},\n\tParagraph: TagPair{Start: \"<p>\", End: \"<\/p>\"},\n\tBlockquote: TagPair{Start: \"<blockquote>\", End: \"<\/blockquote>\"},\n\tBulletList: TagPair{Start: \"<ul>\", End: \"<\/ul>\"},\n\tBulletItem: TagPair{Start: \"<li>\", End: \"<\/li>\"},\n\tOrderedList: func(start int) TagPair {\n\t\tif start == 1 {\n\t\t\treturn TagPair{Start: \"<ol>\", End: \"<\/ol>\"}\n\t\t}\n\t\treturn TagPair{\n\t\t\tStart: `<ol start=\"` + strconv.Itoa(start) + `\">`,\n\t\t\tEnd: \"<\/ol>\"}\n\t},\n\tOrderedItem: TagPair{Start: \"<li>\", End: \"<\/li>\"},\n\tCodeSpan: TagPair{Start: \"<code>\", End: \"<\/code>\"},\n\tEmphasis: TagPair{Start: \"<em>\", End: \"<\/em>\"},\n\tStrongEmphasis: TagPair{Start: \"<strong>\", End: \"<\/strong>\"},\n\tLink: func(dest, title string) TagPair {\n\t\tstart := \"\"\n\t\tif title == \"\" {\n\t\t\tstart = fmt.Sprintf(`<a href=\"%s\">`, escapeDest(dest))\n\t\t} else {\n\t\t\tstart = fmt.Sprintf(`<a href=\"%s\" title=\"%s\">`, escapeDest(dest), escapeHTML(title))\n\t\t}\n\t\treturn TagPair{Start: start, End: \"<\/a>\"}\n\t},\n\tImage: func(dest, alt, title string) string {\n\t\tif title == \"\" {\n\t\t\treturn fmt.Sprintf(`<img src=\"%s\" alt=\"%s\" \/>`, escapeDest(dest), escapeHTML(alt))\n\t\t}\n\t\treturn fmt.Sprintf(`<img src=\"%s\" alt=\"%s\" title=\"%s\" \/>`, escapeDest(dest), escapeHTML(alt), escapeHTML(title))\n\t},\n\tEscape: escapeHTML,\n}\n\nvar (\n\tlinkRef = regexp.MustCompile(`(^|\\n) {0,3}\\[([^\\\\\\[\\]]|\\\\[\\\\\\[\\]])+\\]:`)\n\tcodeBlock = regexp.MustCompile(\"(^|\\n)[ >]*(```|~~~| )\")\n\temptyListItem = regexp.MustCompile(`(^|\\n)([-+*]|[0-9]{1,9}[.)])(\\n|$)`)\n\thtmlBlock = regexp.MustCompile(`(^|\\n)(<a |<!--)`)\n)\n\nfunc TestRender(t *testing.T) {\n\tfor _, tc := range testCases {\n\t\tname := tc.Name\n\t\tif name == \"\" {\n\t\t\tname = fmt.Sprintf(\"%s\/%d\", tc.Section, tc.Example)\n\t\t}\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tif unsupportedSection(tc.Section) {\n\t\t\t\tt.Skipf(\"Section %q not supported\", tc.Section)\n\t\t\t}\n\t\t\tif reason := unsupportedExample(tc.Example); reason != \"\" {\n\t\t\t\tt.Skipf(\"Example %d not supported: %s\", tc.Example, reason)\n\t\t\t}\n\t\t\tif codeBlock.MatchString(tc.Markdown) {\n\t\t\t\tt.Skipf(\"Code block not supported\")\n\t\t\t}\n\t\t\tif linkRef.MatchString(tc.Markdown) {\n\t\t\t\tt.Skipf(\"Link reference not supported\")\n\t\t\t}\n\t\t\tif emptyListItem.MatchString(tc.Markdown) {\n\t\t\t\tt.Skipf(\"Empty list item not supported\")\n\t\t\t}\n\t\t\tif htmlBlock.MatchString(tc.Markdown) {\n\t\t\t\tt.Skipf(\"HTML block not supported\")\n\t\t\t}\n\n\t\t\tgot := Render(tc.Markdown, htmlSyntax)\n\t\t\twant := loosifyLists(tc.HTML)\n\t\t\tif diff := cmp.Diff(want, got); diff != \"\" {\n\t\t\t\tt.Errorf(\"input:\\n%sdiff (-want +got):\\n%s\", tc.Markdown, diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc unsupportedSection(section string) bool {\n\tswitch section {\n\tcase \"Tabs\",\n\t\t\"Setext headings\",\n\t\t\"Indented code blocks\",\n\t\t\"Fenced code blocks\",\n\t\t\"HTML blocks\",\n\t\t\"Link reference definitions\":\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc unsupportedExample(example int) string {\n\tswitch example {\n\tcase 59, 300:\n\t\treturn \"has setext heading\"\n\tcase 320, 321, 323:\n\t\treturn \"tight list not implemented\"\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\nvar looseListItem = regexp.MustCompile(`<li>([^<]+)<\/li>`)\n\nfunc loosifyLists(html string) string {\n\treturn strings.ReplaceAll(\n\t\tlooseListItem.ReplaceAllString(html, \"<li>\\n<p>$1<\/p>\\n<\/li>\"),\n\t\t\"<li><\/li>\", \"<li>\\n<\/li>\")\n}\n<commit_msg>pkg\/md: Add helpers for building HTML tags in tests.<commit_after>package md_test\n\nimport (\n\t_ \"embed\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com\/google\/go-cmp\/cmp\"\n\t. \"src.elv.sh\/pkg\/md\"\n\t\"src.elv.sh\/pkg\/must\"\n)\n\ntype testCase struct {\n\tMarkdown string `json:\"markdown\"`\n\tHTML string `json:\"html\"`\n\tExample int `json:\"example\"`\n\tSection string `json:\"section\"`\n\tName string\n}\n\n\/\/go:embed spec.json\nvar specJSON []byte\n\nvar testCases []testCase\n\nvar additionalCases = []testCase{\n\t{\n\t\tMarkdown: `> a\n>> b\n`,\n\t\tHTML: `<blockquote>\n<p>a<\/p>\n<blockquote>\n<p>b<\/p>\n<\/blockquote>\n<\/blockquote>\n`,\n\t\tName: \"Blockquote supplemental\/Increasing level\",\n\t},\n\t{\n\t\tMarkdown: `>> a\n>\n> b\n`,\n\t\tHTML: `<blockquote>\n<blockquote>\n<p>a<\/p>\n<\/blockquote>\n<p>b<\/p>\n<\/blockquote>\n`,\n\t\tName: \"Blockquote supplemental\/Reducing level\",\n\t},\n}\n\nfunc init() {\n\tmust.OK(json.Unmarshal(specJSON, &testCases))\n\ttestCases = append(testCases, additionalCases...)\n}\n\nvar (\n\tescapeHTML = strings.NewReplacer(\n\t\t\"&\", \"&\", `\"`, \""\", \"<\", \"<\", \">\", \">\").Replace\n\tescapeDest = strings.NewReplacer(\n\t\t`\"`, \"%22\", `\\`, \"%5C\", \" \", \"%20\", \"`\", \"%60\",\n\t\t\"[\", \"%5B\", \"]\", \"%5D\",\n\t\t\"ö\", \"%C3%B6\",\n\t\t\"ä\", \"%C3%A4\", \" \", \"%C2%A0\").Replace\n)\n\nvar htmlSyntax = OutputSyntax{\n\tThematicBreak: func(_ string) string { return htmlSelfCloseTag(\"hr\") },\n\tHeading: func(level int) TagPair {\n\t\treturn htmlTagPair(\"h\" + strconv.Itoa(level))\n\t},\n\tParagraph: htmlTagPair(\"p\"),\n\tBlockquote: htmlTagPair(\"blockquote\"),\n\tBulletList: htmlTagPair(\"ul\"),\n\tBulletItem: htmlTagPair(\"li\"),\n\tOrderedList: func(start int) TagPair {\n\t\tvar attrs []string\n\t\tif start != 1 {\n\t\t\tattrs = []string{\"start\", strconv.Itoa(start)}\n\t\t}\n\t\treturn htmlTagPair(\"ol\", attrs...)\n\t},\n\tOrderedItem: htmlTagPair(\"li\"),\n\tCodeSpan: htmlTagPair(\"code\"),\n\tEmphasis: htmlTagPair(\"em\"),\n\tStrongEmphasis: htmlTagPair(\"strong\"),\n\tLink: func(dest, title string) TagPair {\n\t\tattrs := []string{\"href\", escapeDest(dest)}\n\t\tif title != \"\" {\n\t\t\tattrs = append(attrs, \"title\", escapeHTML(title))\n\t\t}\n\t\treturn htmlTagPair(\"a\", attrs...)\n\t},\n\tImage: func(dest, alt, title string) string {\n\t\tattrs := []string{\"src\", escapeDest(dest), \"alt\", escapeHTML(alt)}\n\t\tif title != \"\" {\n\t\t\tattrs = append(attrs, \"title\", escapeHTML(title))\n\t\t}\n\t\treturn htmlSelfCloseTag(\"img\", attrs...)\n\t},\n\tEscape: escapeHTML,\n}\n\nfunc htmlSelfCloseTag(name string, attrPairs ...string) string {\n\treturn \"<\" + name + concatAttrPairs(attrPairs) + \" \/>\"\n}\n\nfunc htmlTagPair(name string, attrPairs ...string) TagPair {\n\treturn TagPair{\n\t\tStart: \"<\" + name + concatAttrPairs(attrPairs) + \">\",\n\t\tEnd: \"<\/\" + name + \">\"}\n}\n\nfunc concatAttrPairs(attrPairs []string) string {\n\tvar sb strings.Builder\n\tfor i := 0; i+1 < len(attrPairs); i += 2 {\n\t\tfmt.Fprintf(&sb, ` %s=\"%s\"`, attrPairs[i], attrPairs[i+1])\n\t}\n\treturn sb.String()\n}\n\nfunc combineHTMLTagPairs(p TagPair, more ...TagPair) TagPair {\n\tfor _, q := range more {\n\t\tp.Start += q.Start\n\t\tp.End = q.End + p.End\n\t}\n\treturn p\n}\n\nvar (\n\tlinkRef = regexp.MustCompile(`(^|\\n) {0,3}\\[([^\\\\\\[\\]]|\\\\[\\\\\\[\\]])+\\]:`)\n\tcodeBlock = regexp.MustCompile(\"(^|\\n)[ >]*(```|~~~| )\")\n\temptyListItem = regexp.MustCompile(`(^|\\n)([-+*]|[0-9]{1,9}[.)])(\\n|$)`)\n\thtmlBlock = regexp.MustCompile(`(^|\\n)(<a |<!--)`)\n)\n\nfunc TestRender(t *testing.T) {\n\tfor _, tc := range testCases {\n\t\tname := tc.Name\n\t\tif name == \"\" {\n\t\t\tname = fmt.Sprintf(\"%s\/%d\", tc.Section, tc.Example)\n\t\t}\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tif unsupportedSection(tc.Section) {\n\t\t\t\tt.Skipf(\"Section %q not supported\", tc.Section)\n\t\t\t}\n\t\t\tif reason := unsupportedExample(tc.Example); reason != \"\" {\n\t\t\t\tt.Skipf(\"Example %d not supported: %s\", tc.Example, reason)\n\t\t\t}\n\t\t\tif codeBlock.MatchString(tc.Markdown) {\n\t\t\t\tt.Skipf(\"Code block not supported\")\n\t\t\t}\n\t\t\tif linkRef.MatchString(tc.Markdown) {\n\t\t\t\tt.Skipf(\"Link reference not supported\")\n\t\t\t}\n\t\t\tif emptyListItem.MatchString(tc.Markdown) {\n\t\t\t\tt.Skipf(\"Empty list item not supported\")\n\t\t\t}\n\t\t\tif htmlBlock.MatchString(tc.Markdown) {\n\t\t\t\tt.Skipf(\"HTML block not supported\")\n\t\t\t}\n\n\t\t\tgot := Render(tc.Markdown, htmlSyntax)\n\t\t\twant := loosifyLists(tc.HTML)\n\t\t\tif diff := cmp.Diff(want, got); diff != \"\" {\n\t\t\t\tt.Errorf(\"input:\\n%sdiff (-want +got):\\n%s\", tc.Markdown, diff)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc unsupportedSection(section string) bool {\n\tswitch section {\n\tcase \"Tabs\",\n\t\t\"Setext headings\",\n\t\t\"Indented code blocks\",\n\t\t\"Fenced code blocks\",\n\t\t\"HTML blocks\",\n\t\t\"Link reference definitions\":\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc unsupportedExample(example int) string {\n\tswitch example {\n\tcase 59, 300:\n\t\treturn \"has setext heading\"\n\tcase 320, 321, 323:\n\t\treturn \"tight list not implemented\"\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\nvar looseListItem = regexp.MustCompile(`<li>([^<]+)<\/li>`)\n\nfunc loosifyLists(html string) string {\n\treturn strings.ReplaceAll(\n\t\tlooseListItem.ReplaceAllString(html, \"<li>\\n<p>$1<\/p>\\n<\/li>\"),\n\t\t\"<li><\/li>\", \"<li>\\n<\/li>\")\n}\n<|endoftext|>"} {"text":"<commit_before>package otp\n\nimport (\n\t\"fmt\"\n)\n\ntype Pad struct {\n\tpages [][]byte\n\tcurrentPage int\n}\n\n\/\/ NewPad creates a new \"one-time pad\"\nfunc NewPad(material []byte, pageSize int, startPage int) (*Pad, error) {\n\tif len(material)%pageSize != 0 {\n\t\treturn nil, fmt.Errorf(\"pad size must be divisible by page size\")\n\t}\n\n\t\/\/ Do the page-splitting work up front\n\tvar pages [][]byte\n\tfor i := 0; i < len(material); i += pageSize {\n\t\tpages = append(pages, material[i:i+pageSize])\n\t}\n\n\tif startPage < 1 || startPage > len(pages) {\n\t\treturn nil, fmt.Errorf(\"start page (%d) out of bounds\", startPage)\n\t}\n\n\tp := Pad{\n\t\tpages: pages,\n\t\tcurrentPage: startPage - 1,\n\t}\n\n\treturn &p, nil\n}\n\n\/\/ TotalPages returns the number of pages in the pad\nfunc (p *Pad) TotalPages() int {\n\treturn len(p.pages)\n}\n\n\/\/ UnusedPages returns the number of unused pages in the pad\nfunc (p *Pad) RemainingPages() int {\n\treturn len(p.pages) - (p.currentPage + 1)\n}\n\n\/\/ UsedPages returns the number of pages that have been used\nfunc (p *Pad) UsedPages() int {\n\treturn p.currentPage + 1\n}\n\n\/\/ PreviousPage returns the payload of the last used page\nfunc (p *Pad) PreviousPage() ([]byte, error) {\n\tif p.currentPage == 0 {\n\t\treturn nil, fmt.Errorf(\"no previous pages\")\n\t}\n\treturn p.pages[p.currentPage-1], nil\n}\n\n\/\/ CurrentPage returns the payload of the current page\nfunc (p *Pad) CurrentPage() []byte {\n\treturn p.pages[p.currentPage]\n}\n\n\/\/ NextPage will advance the page pointer, and return the payload of the\n\/\/ new current key.\nfunc (p *Pad) NextPage() ([]byte, error) {\n\tif p.RemainingPages() == 0 {\n\t\treturn nil, fmt.Errorf(\"pad depleted\")\n\t}\n\tp.currentPage++\n\treturn p.CurrentPage(), nil\n}\n<commit_msg>Added Encode\/Decode functions<commit_after>package otp\n\nimport (\n\t\"fmt\"\n)\n\ntype Pad struct {\n\tpages [][]byte\n\tcurrentPage int\n}\n\n\/\/ NewPad creates a new \"one-time pad\"\nfunc NewPad(material []byte, pageSize int, startPage int) (*Pad, error) {\n\tif len(material)%pageSize != 0 {\n\t\treturn nil, fmt.Errorf(\"pad size must be divisible by page size\")\n\t}\n\n\t\/\/ Do the page-splitting work up front\n\tvar pages [][]byte\n\tfor i := 0; i < len(material); i += pageSize {\n\t\tpages = append(pages, material[i:i+pageSize])\n\t}\n\n\tif startPage < 1 || startPage > len(pages) {\n\t\treturn nil, fmt.Errorf(\"start page (%d) out of bounds\", startPage)\n\t}\n\n\tp := Pad{\n\t\tpages: pages,\n\t\tcurrentPage: startPage - 1,\n\t}\n\n\treturn &p, nil\n}\n\n\/\/ TotalPages returns the number of pages in the pad\nfunc (p *Pad) TotalPages() int {\n\treturn len(p.pages)\n}\n\n\/\/ UnusedPages returns the number of unused pages in the pad\nfunc (p *Pad) RemainingPages() int {\n\treturn len(p.pages) - (p.currentPage + 1)\n}\n\n\/\/ UsedPages returns the number of pages that have been used\nfunc (p *Pad) UsedPages() int {\n\treturn p.currentPage + 1\n}\n\n\/\/ PreviousPage returns the payload of the last used page\nfunc (p *Pad) PreviousPage() ([]byte, error) {\n\tif p.currentPage == 0 {\n\t\treturn nil, fmt.Errorf(\"no previous pages\")\n\t}\n\treturn p.pages[p.currentPage-1], nil\n}\n\n\/\/ CurrentPage returns the payload of the current page\nfunc (p *Pad) CurrentPage() []byte {\n\treturn p.pages[p.currentPage]\n}\n\n\/\/ NextPage will advance the page pointer, and return the payload of the\n\/\/ new current key.\nfunc (p *Pad) NextPage() ([]byte, error) {\n\tif p.RemainingPages() == 0 {\n\t\treturn nil, fmt.Errorf(\"pad depleted\")\n\t}\n\tp.currentPage++\n\treturn p.CurrentPage(), nil\n}\n\nfunc (p *Pad) Encode(in []byte) ([]byte, error) {\n\tvar result []byte\n\tkey := p.CurrentPage()\n\n\t\/\/ Key must be at least as long as plain text\n\tif len(key) < len(in) {\n\t\treturn nil, fmt.Errorf(\"insufficient key size\")\n\t}\n\n\tfor i := range in {\n\t\tbdec := int64(in[i])\n\t\tkdec := int64(key[i])\n\t\tencoded := uint64(bdec+kdec) % (1 << 63)\n\t\tresult = append(result, byte(encoded))\n\t}\n\treturn result, nil\n}\n\nfunc (p *Pad) Decode(in []byte) ([]byte, error) {\n\tvar result []byte\n\tkey := p.CurrentPage()\n\n\t\/\/ Key must be at least as long as plain text\n\tif len(key) < len(in) {\n\t\treturn nil, fmt.Errorf(\"insufficient key size\")\n\t}\n\n\tfor i := range in {\n\t\tbdec := int64(in[i])\n\t\tkdec := int64(key[i])\n\t\tdecoded := uint64(bdec-kdec) % (1 << 63)\n\t\tif decoded < 0 {\n\t\t\tdecoded += 26\n\t\t}\n\t\tresult = append(result, byte(decoded))\n\t}\n\treturn result, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"text\/template\"\n\t\"time\"\n)\n\nvar pac struct {\n\ttemplate *template.Template\n\ttopLevelDomain string\n\tdirectList *string \/\/ use pointer to guarantee atomic update\n\tupdated time.Time\n\tlock sync.Mutex\n}\n\nfunc init() {\n\tconst pacRawTmpl = `var direct = 'DIRECT';\nvar httpProxy = 'PROXY {{.ProxyAddr}}; DIRECT';\n\nvar directList = [\n\"\",\n\"{{.DirectDomains}}\"\n];\n\nvar directAcc = {};\nfor (var i = 0; i < directList.length; i += 1) {\n\tdirectAcc[directList[i]] = true;\n}\n\nvar topLevel = {\n{{.TopLevel}}\n};\n\n\/\/ only handles IPv4 address now\nfunction hostIsIP(host) {\n\tvar parts = host.split('.');\n\tif (parts.length != 4) {\n\t\treturn false;\n\t}\n\tfor (var i = 3; i >= 0; i--) {\n\t\tif (parts[i].length === 0 || parts[i].length > 3) {\n\t\t\treturn false;\n\t\t}\n\t\tvar n = Number(parts[i]);\n\t\tif (isNaN(n) || n < 0 || n > 255) {\n\t\t\treturn false;\n\t\t}\n\t}\n\treturn true;\n}\n\nfunction host2domain(host) {\n\tvar lastDot = host.lastIndexOf('.');\n\tif (lastDot === -1) {\n\t\treturn \"\"; \/\/ simple host name has no domain\n\t}\n\t\/\/ Find the second last dot\n\tdot2ndLast = host.lastIndexOf(\".\", lastDot-1);\n\tif (dot2ndLast === -1)\n\t\treturn host;\n\n\tvar part = host.substring(dot2ndLast+1, lastDot);\n\tif (topLevel[part]) {\n\t\tvar dot3rdLast = host.lastIndexOf(\".\", dot2ndLast-1);\n\t\tif (dot3rdLast === -1) {\n\t\t\treturn host;\n\t\t}\n\t\treturn host.substring(dot3rdLast+1);\n\t}\n\treturn host.substring(dot2ndLast+1);\n}\n\nfunction FindProxyForURL(url, host) {\n\treturn (hostIsIP(host) || directAcc[host] || directAcc[host2domain(host)]) ? direct : httpProxy;\n}\n`\n\tvar err error\n\tpac.template, err = template.New(\"pac\").Parse(pacRawTmpl)\n\tif err != nil {\n\t\tfmt.Println(\"Internal error on generating pac file template:\", err)\n\t\tos.Exit(1)\n\t}\n\n\tvar buf bytes.Buffer\n\tfor k, _ := range topLevelDomain {\n\t\tbuf.WriteString(fmt.Sprintf(\"\\t\\\"%s\\\": true,\\n\", k))\n\t}\n\tpac.topLevelDomain = buf.String()[:buf.Len()-2] \/\/ remove the final comma\n}\n\n\/\/ No need for content-length as we are closing connection\nvar pacHeader = []byte(\"HTTP\/1.1 200 OK\\r\\nServer: cow-proxy\\r\\n\" +\n\t\"Content-Type: application\/x-ns-proxy-autoconfig\\r\\nConnection: close\\r\\n\\r\\n\")\n\nfunc genPAC(c *clientConn) []byte {\n\tbuf := new(bytes.Buffer)\n\n\tproxyAddr := c.proxy.addrInPAC\n\tif proxyAddr == \"\" {\n\t\thost, _ := splitHostPort(c.LocalAddr().String())\n\t\tproxyAddr = net.JoinHostPort(host, c.proxy.port)\n\t}\n\n\tif *pac.directList == \"\" {\n\t\t\/\/ Empty direct domain list\n\t\tbuf.Write(pacHeader)\n\t\tpacproxy := fmt.Sprintf(\"function FindProxyForURL(url, host) { return 'PROXY %s; DIRECT'; };\",\n\t\t\tproxyAddr)\n\t\tbuf.Write([]byte(pacproxy))\n\t\treturn buf.Bytes()\n\t}\n\n\tdata := struct {\n\t\tProxyAddr string\n\t\tDirectDomains string\n\t\tTopLevel string\n\t}{\n\t\tproxyAddr,\n\t\t*pac.directList,\n\t\tpac.topLevelDomain,\n\t}\n\n\tbuf.Write(pacHeader)\n\tif err := pac.template.Execute(buf, data); err != nil {\n\t\terrl.Println(\"Error generating pac file:\", err)\n\t\tpanic(\"Error generating pac file\")\n\t}\n\treturn buf.Bytes()\n}\n\nfunc initPAC() {\n\ts := strings.Join(siteStat.GetDirectList(), \"\\\",\\n\\\"\")\n\tpac.directList = &s\n\tgo func() {\n\t\tfor {\n\t\t\ttime.Sleep(10 * time.Minute)\n\t\t\ts = strings.Join(siteStat.GetDirectList(), \"\\\",\\n\\\"\")\n\t\t\tpac.directList = &s\n\t\t}\n\t}()\n}\n\nfunc sendPAC(c *clientConn) {\n\tif _, err := c.Write(genPAC(c)); err != nil {\n\t\tdebug.Println(\"Error sending PAC file\")\n\t\treturn\n\t}\n}\n<commit_msg>A little clean up for PAC.<commit_after>package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/template\"\n\t\"time\"\n)\n\nvar pac struct {\n\ttemplate *template.Template\n\ttopLevelDomain string\n\tdirectList string\n}\n\nfunc init() {\n\tconst pacRawTmpl = `var direct = 'DIRECT';\nvar httpProxy = 'PROXY {{.ProxyAddr}}; DIRECT';\n\nvar directList = [\n\"\",\n\"{{.DirectDomains}}\"\n];\n\nvar directAcc = {};\nfor (var i = 0; i < directList.length; i += 1) {\n\tdirectAcc[directList[i]] = true;\n}\n\nvar topLevel = {\n{{.TopLevel}}\n};\n\n\/\/ only handles IPv4 address now\nfunction hostIsIP(host) {\n\tvar parts = host.split('.');\n\tif (parts.length != 4) {\n\t\treturn false;\n\t}\n\tfor (var i = 3; i >= 0; i--) {\n\t\tif (parts[i].length === 0 || parts[i].length > 3) {\n\t\t\treturn false;\n\t\t}\n\t\tvar n = Number(parts[i]);\n\t\tif (isNaN(n) || n < 0 || n > 255) {\n\t\t\treturn false;\n\t\t}\n\t}\n\treturn true;\n}\n\nfunction host2domain(host) {\n\tvar lastDot = host.lastIndexOf('.');\n\tif (lastDot === -1) {\n\t\treturn \"\"; \/\/ simple host name has no domain\n\t}\n\t\/\/ Find the second last dot\n\tdot2ndLast = host.lastIndexOf(\".\", lastDot-1);\n\tif (dot2ndLast === -1)\n\t\treturn host;\n\n\tvar part = host.substring(dot2ndLast+1, lastDot);\n\tif (topLevel[part]) {\n\t\tvar dot3rdLast = host.lastIndexOf(\".\", dot2ndLast-1);\n\t\tif (dot3rdLast === -1) {\n\t\t\treturn host;\n\t\t}\n\t\treturn host.substring(dot3rdLast+1);\n\t}\n\treturn host.substring(dot2ndLast+1);\n}\n\nfunction FindProxyForURL(url, host) {\n\treturn (hostIsIP(host) || directAcc[host] || directAcc[host2domain(host)]) ? direct : httpProxy;\n}\n`\n\tvar err error\n\tpac.template, err = template.New(\"pac\").Parse(pacRawTmpl)\n\tif err != nil {\n\t\tfmt.Println(\"Internal error on generating pac file template:\", err)\n\t\tos.Exit(1)\n\t}\n\n\tvar buf bytes.Buffer\n\tfor k, _ := range topLevelDomain {\n\t\tbuf.WriteString(fmt.Sprintf(\"\\t\\\"%s\\\": true,\\n\", k))\n\t}\n\tpac.topLevelDomain = buf.String()[:buf.Len()-2] \/\/ remove the final comma\n}\n\n\/\/ No need for content-length as we are closing connection\nvar pacHeader = []byte(\"HTTP\/1.1 200 OK\\r\\nServer: cow-proxy\\r\\n\" +\n\t\"Content-Type: application\/x-ns-proxy-autoconfig\\r\\nConnection: close\\r\\n\\r\\n\")\n\n\/\/ Different client will have different proxy URL, so generate it upon each request.\nfunc genPAC(c *clientConn) []byte {\n\tbuf := new(bytes.Buffer)\n\n\tproxyAddr := c.proxy.addrInPAC\n\tif proxyAddr == \"\" {\n\t\thost, _ := splitHostPort(c.LocalAddr().String())\n\t\tproxyAddr = net.JoinHostPort(host, c.proxy.port)\n\t}\n\n\tif pac.directList == \"\" {\n\t\t\/\/ Empty direct domain list\n\t\tbuf.Write(pacHeader)\n\t\tpacproxy := fmt.Sprintf(\"function FindProxyForURL(url, host) { return 'PROXY %s; DIRECT'; };\",\n\t\t\tproxyAddr)\n\t\tbuf.Write([]byte(pacproxy))\n\t\treturn buf.Bytes()\n\t}\n\n\tdata := struct {\n\t\tProxyAddr string\n\t\tDirectDomains string\n\t\tTopLevel string\n\t}{\n\t\tproxyAddr,\n\t\tpac.directList,\n\t\tpac.topLevelDomain,\n\t}\n\n\tbuf.Write(pacHeader)\n\tif err := pac.template.Execute(buf, data); err != nil {\n\t\terrl.Println(\"Error generating pac file:\", err)\n\t\tpanic(\"Error generating pac file\")\n\t}\n\treturn buf.Bytes()\n}\n\nfunc initPAC() {\n\tpac.directList = strings.Join(siteStat.GetDirectList(), \"\\\",\\n\\\"\")\n\tgo func() {\n\t\tfor {\n\t\t\ttime.Sleep(10 * time.Minute)\n\t\t\tpac.directList = strings.Join(siteStat.GetDirectList(), \"\\\",\\n\\\"\")\n\t\t}\n\t}()\n}\n\nfunc sendPAC(c *clientConn) {\n\tif _, err := c.Write(genPAC(c)); err != nil {\n\t\tdebug.Println(\"Error sending PAC file\")\n\t\treturn\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\nvar pacRawTmpl = `var direct = 'DIRECT';\nvar httpProxy = 'PROXY {{.ProxyAddr}}; DIRECT';\n\nvar directList = [\n\"{{.DirectDomains}}\"\n];\n\nvar directAcc = {};\nfor (var i = 0; i < directList.length; i += 1) {\n\tdirectAcc[directList[i]] = true;\n}\n\nvar topLevel = {\n\t\"co\": true,\n\t\"org\": true,\n\t\"com\": true,\n\t\"net\": true,\n\t\"edu\": true\n};\n\nfunction host2domain(host) {\n\tvar lastDot = host.lastIndexOf(\".\");\n\tif (lastDot === -1)\n\t\treturn host;\n\t\/\/ Find the second last dot\n\tdot2ndLast = host.lastIndexOf(\".\", lastDot-1);\n\tif (dot2ndLast === -1)\n\treturn host;\n\n\tvar part = host.substring(dot2ndLast+1, lastDot)\n\tif (topLevel[part]) {\n\t\tvar dot3rdLast = host.lastIndexOf(\".\", dot2ndLast-1)\n\t\tif (dot3rdLast === -1) {\n\t\t\treturn host\n\t\t}\n\t\treturn host.substring(dot3rdLast+1)\n\t}\n\treturn host.substring(dot2ndLast+1);\n};\n\nfunction FindProxyForURL(url, host) {\n\treturn directAcc[host2domain(host)] ? direct : httpProxy;\n};\n`\n\nvar pacTmpl *template.Template\n\nfunc init() {\n\tvar err error\n\tpacTmpl, err = template.New(\"pac\").Parse(pacRawTmpl)\n\tif err != nil {\n\t\tfmt.Println(\"Internal error on generating pac file template\")\n\t\tos.Exit(1)\n\t}\n}\n\nfunc genPAC() string {\n\t\/\/ domains in PAC file needs double quote\n\tds1 := strings.Join(alwaysDirectDs.toArray(), \"\\\",\\n\\\"\")\n\tds2 := strings.Join(directDs.toArray(), \"\\\",\\n\\\"\")\n\tds := \"\"\n\tif ds1 != \"\" && ds2 != \"\" {\n\t\tds = ds1 + \"\\\",\\n\\\"\" + ds2\n\t}\n\tdata := struct {\n\t\tProxyAddr string\n\t\tDirectDomains string\n\t}{\n\t\tconfig.listenAddr,\n\t\tds,\n\t}\n\n\t\/\/ debug.Println(\"direct:\", data.DirectDomains)\n\n\tbuf := new(bytes.Buffer)\n\tif err := pacTmpl.Execute(buf, data); err != nil {\n\t\terrl.Println(\"Error generating pac file:\", err)\n\t\tos.Exit(1)\n\t}\n\tpac := buf.String()\n\tpacHeader := \"HTTP\/1.1 200 Okay\\r\\nServer: cow-proxy\\r\\nContent-Type: text\/html\\r\\nConnection: close\\r\\n\" +\n\t\tfmt.Sprintf(\"Content-Length: %d\\r\\n\\r\\n\", len(pac))\n\tpac = pacHeader + pac\n\treturn pac\n}\n\nfunc sendPAC(w *bufio.Writer) {\n\tw.WriteString(genPAC())\n\tw.Flush()\n}\n<commit_msg>Fix bug in generating PAC file.<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/template\"\n)\n\nvar pacRawTmpl = `var direct = 'DIRECT';\nvar httpProxy = 'PROXY {{.ProxyAddr}}; DIRECT';\n\nvar directList = [\n\"{{.DirectDomains}}\"\n];\n\nvar directAcc = {};\nfor (var i = 0; i < directList.length; i += 1) {\n\tdirectAcc[directList[i]] = true;\n}\n\nvar topLevel = {\n\t\"co\": true,\n\t\"org\": true,\n\t\"com\": true,\n\t\"net\": true,\n\t\"edu\": true\n};\n\nfunction host2domain(host) {\n\tvar lastDot = host.lastIndexOf(\".\");\n\tif (lastDot === -1)\n\t\treturn host;\n\t\/\/ Find the second last dot\n\tdot2ndLast = host.lastIndexOf(\".\", lastDot-1);\n\tif (dot2ndLast === -1)\n\treturn host;\n\n\tvar part = host.substring(dot2ndLast+1, lastDot)\n\tif (topLevel[part]) {\n\t\tvar dot3rdLast = host.lastIndexOf(\".\", dot2ndLast-1)\n\t\tif (dot3rdLast === -1) {\n\t\t\treturn host\n\t\t}\n\t\treturn host.substring(dot3rdLast+1)\n\t}\n\treturn host.substring(dot2ndLast+1);\n};\n\nfunction FindProxyForURL(url, host) {\n\treturn directAcc[host2domain(host)] ? direct : httpProxy;\n};\n`\n\nvar pacTmpl *template.Template\n\nfunc init() {\n\tvar err error\n\tpacTmpl, err = template.New(\"pac\").Parse(pacRawTmpl)\n\tif err != nil {\n\t\tfmt.Println(\"Internal error on generating pac file template\")\n\t\tos.Exit(1)\n\t}\n}\n\nfunc genPAC() string {\n\t\/\/ domains in PAC file needs double quote\n\tds1 := strings.Join(alwaysDirectDs.toArray(), \"\\\",\\n\\\"\")\n\tds2 := strings.Join(directDs.toArray(), \"\\\",\\n\\\"\")\n\tvar ds string\n\tif ds1 == \"\" {\n\t\tds = ds2\n\t} else if ds2 == \"\" {\n\t\tds = ds1\n\t} else {\n\t\tds = ds1 + \"\\\",\\n\\\"\" + ds2\n\t}\n\tdata := struct {\n\t\tProxyAddr string\n\t\tDirectDomains string\n\t}{\n\t\tconfig.listenAddr,\n\t\tds,\n\t}\n\n\t\/\/ debug.Println(\"direct:\", data.DirectDomains)\n\n\tbuf := new(bytes.Buffer)\n\tif err := pacTmpl.Execute(buf, data); err != nil {\n\t\terrl.Println(\"Error generating pac file:\", err)\n\t\tos.Exit(1)\n\t}\n\tpac := buf.String()\n\tpacHeader := \"HTTP\/1.1 200 Okay\\r\\nServer: cow-proxy\\r\\nContent-Type: text\/html\\r\\nConnection: close\\r\\n\" +\n\t\tfmt.Sprintf(\"Content-Length: %d\\r\\n\\r\\n\", len(pac))\n\tpac = pacHeader + pac\n\treturn pac\n}\n\nfunc sendPAC(w *bufio.Writer) {\n\tw.WriteString(genPAC())\n\tw.Flush()\n}\n<|endoftext|>"} {"text":"<commit_before>package service\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/syou6162\/go-active-learning\/lib\/model\"\n)\n\nfunc (app *goActiveLearningApp) InsertOrUpdateExample(e *model.Example) error {\n\treturn app.repo.InsertOrUpdateExample(e)\n}\n\nfunc (app *goActiveLearningApp) InsertExampleFromScanner(scanner *bufio.Scanner) (*model.Example, error) {\n\treturn app.repo.InsertExampleFromScanner(scanner)\n}\n\nfunc (app *goActiveLearningApp) InsertExamplesFromReader(reader io.Reader) error {\n\treturn app.repo.InsertExamplesFromReader(reader)\n}\n\nfunc (app *goActiveLearningApp) ReadExamples() (model.Examples, error) {\n\treturn app.repo.ReadExamples()\n}\n\nfunc (app *goActiveLearningApp) ReadRecentExamples(from time.Time) (model.Examples, error) {\n\treturn app.repo.ReadRecentExamples(from)\n}\n\nfunc (app *goActiveLearningApp) ReadExamplesByLabel(label model.LabelType, limit int) (model.Examples, error) {\n\treturn app.repo.ReadExamplesByLabel(label, limit)\n}\n\nfunc (app *goActiveLearningApp) ReadLabeledExamples(limit int) (model.Examples, error) {\n\treturn app.repo.ReadLabeledExamples(limit)\n}\n\nfunc (app *goActiveLearningApp) ReadPositiveExamples(limit int) (model.Examples, error) {\n\treturn app.repo.ReadPositiveExamples(limit)\n}\n\nfunc (app *goActiveLearningApp) ReadNegativeExamples(limit int) (model.Examples, error) {\n\treturn app.repo.ReadNegativeExamples(limit)\n}\n\nfunc (app *goActiveLearningApp) ReadUnlabeledExamples(limit int) (model.Examples, error) {\n\treturn app.repo.ReadUnlabeledExamples(limit)\n}\n\nfunc (app *goActiveLearningApp) FindExampleByUlr(url string) (*model.Example, error) {\n\treturn app.repo.FindExampleByUlr(url)\n}\n\nfunc (app *goActiveLearningApp) SearchExamplesByUlrs(urls []string) (model.Examples, error) {\n\treturn app.repo.SearchExamplesByUlrs(urls)\n}\n\nfunc (app *goActiveLearningApp) DeleteAllExamples() error {\n\treturn app.repo.DeleteAllExamples()\n}\n\nfunc (app *goActiveLearningApp) UpdateExampleMetadata(e model.Example) error {\n\tif err := app.cache.UpdateExampleMetadata(e); err != nil {\n\t\treturn err\n\t}\n\tif err := app.repo.InsertOrUpdateExample(&e); err != nil {\n\t\treturn err\n\t}\n\tif err := app.repo.UpdateFeatureVector(&e); err != nil {\n\t\treturn err\n\t}\n\tif err := app.repo.UpdateHatenaBookmark(&e); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (app *goActiveLearningApp) UpdateExamplesMetadata(examples model.Examples) error {\n\tif err := app.cache.UpdateExamplesMetadata(examples); err != nil {\n\t\treturn err\n\t}\n\tfor _, e := range examples {\n\t\tif err := app.repo.InsertOrUpdateExample(e); err != nil {\n\t\t\tlog.Println(fmt.Sprintf(\"Error occured proccessing %s %s\", e.Url, err.Error()))\n\t\t}\n\t\tif err := app.repo.UpdateFeatureVector(e); err != nil {\n\t\t\tlog.Println(fmt.Sprintf(\"Error occured updating feature vector %s %s\", e.Url, err.Error()))\n\t\t}\n\t\tif err := app.repo.UpdateHatenaBookmark(e); err != nil {\n\t\t\tlog.Println(fmt.Sprintf(\"Error occured updating feature vector %s %s\", e.Url, err.Error()))\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (app *goActiveLearningApp) UpdateExampleExpire(e model.Example, duration time.Duration) error {\n\treturn app.cache.UpdateExampleExpire(e, duration)\n}\n\nfunc (app *goActiveLearningApp) AttachMetadata(examples model.Examples) error {\n\tfvList, err := app.repo.SearchFeatureVector(examples)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor idx, e := range examples {\n\t\te.Fv = fvList[idx]\n\t}\n\n\thatenaBookmarks, err := app.repo.SearchHatenaBookmarks(examples)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor idx, e := range examples {\n\t\te.HatenaBookmark = hatenaBookmarks[idx]\n\t}\n\treturn nil\n}\n\nfunc (app *goActiveLearningApp) AttachLightMetadata(examples model.Examples) error {\n\thatenaBookmarks, err := app.repo.SearchHatenaBookmarks(examples)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor idx, e := range examples {\n\t\te.HatenaBookmark = hatenaBookmarks[idx]\n\t}\n\treturn nil\n}\n\nfunc (app *goActiveLearningApp) Fetch(examples model.Examples) {\n\tapp.cache.Fetch(examples)\n}\n\nfunc (app *goActiveLearningApp) AddExamplesToList(listName string, examples model.Examples) error {\n\treturn app.cache.AddExamplesToList(listName, examples)\n}\n\nfunc (app *goActiveLearningApp) GetUrlsFromList(listName string, from int64, to int64) ([]string, error) {\n\treturn app.cache.GetUrlsFromList(listName, from, to)\n}\n<commit_msg>情報ミスってた<commit_after>package service\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/syou6162\/go-active-learning\/lib\/model\"\n)\n\nfunc (app *goActiveLearningApp) InsertOrUpdateExample(e *model.Example) error {\n\treturn app.repo.InsertOrUpdateExample(e)\n}\n\nfunc (app *goActiveLearningApp) InsertExampleFromScanner(scanner *bufio.Scanner) (*model.Example, error) {\n\treturn app.repo.InsertExampleFromScanner(scanner)\n}\n\nfunc (app *goActiveLearningApp) InsertExamplesFromReader(reader io.Reader) error {\n\treturn app.repo.InsertExamplesFromReader(reader)\n}\n\nfunc (app *goActiveLearningApp) ReadExamples() (model.Examples, error) {\n\treturn app.repo.ReadExamples()\n}\n\nfunc (app *goActiveLearningApp) ReadRecentExamples(from time.Time) (model.Examples, error) {\n\treturn app.repo.ReadRecentExamples(from)\n}\n\nfunc (app *goActiveLearningApp) ReadExamplesByLabel(label model.LabelType, limit int) (model.Examples, error) {\n\treturn app.repo.ReadExamplesByLabel(label, limit)\n}\n\nfunc (app *goActiveLearningApp) ReadLabeledExamples(limit int) (model.Examples, error) {\n\treturn app.repo.ReadLabeledExamples(limit)\n}\n\nfunc (app *goActiveLearningApp) ReadPositiveExamples(limit int) (model.Examples, error) {\n\treturn app.repo.ReadPositiveExamples(limit)\n}\n\nfunc (app *goActiveLearningApp) ReadNegativeExamples(limit int) (model.Examples, error) {\n\treturn app.repo.ReadNegativeExamples(limit)\n}\n\nfunc (app *goActiveLearningApp) ReadUnlabeledExamples(limit int) (model.Examples, error) {\n\treturn app.repo.ReadUnlabeledExamples(limit)\n}\n\nfunc (app *goActiveLearningApp) FindExampleByUlr(url string) (*model.Example, error) {\n\treturn app.repo.FindExampleByUlr(url)\n}\n\nfunc (app *goActiveLearningApp) SearchExamplesByUlrs(urls []string) (model.Examples, error) {\n\treturn app.repo.SearchExamplesByUlrs(urls)\n}\n\nfunc (app *goActiveLearningApp) DeleteAllExamples() error {\n\treturn app.repo.DeleteAllExamples()\n}\n\nfunc (app *goActiveLearningApp) UpdateExampleMetadata(e model.Example) error {\n\tif err := app.cache.UpdateExampleMetadata(e); err != nil {\n\t\treturn err\n\t}\n\tif err := app.repo.InsertOrUpdateExample(&e); err != nil {\n\t\treturn err\n\t}\n\tif err := app.repo.UpdateFeatureVector(&e); err != nil {\n\t\treturn err\n\t}\n\tif err := app.repo.UpdateHatenaBookmark(&e); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (app *goActiveLearningApp) UpdateExamplesMetadata(examples model.Examples) error {\n\tif err := app.cache.UpdateExamplesMetadata(examples); err != nil {\n\t\treturn err\n\t}\n\tfor _, e := range examples {\n\t\tif err := app.repo.InsertOrUpdateExample(e); err != nil {\n\t\t\tlog.Println(fmt.Sprintf(\"Error occured proccessing %s %s\", e.Url, err.Error()))\n\t\t}\n\t\tif err := app.repo.UpdateFeatureVector(e); err != nil {\n\t\t\tlog.Println(fmt.Sprintf(\"Error occured updating feature vector %s %s\", e.Url, err.Error()))\n\t\t}\n\t\tif err := app.repo.UpdateHatenaBookmark(e); err != nil {\n\t\t\tlog.Println(fmt.Sprintf(\"Error occured updating bookmark info %s %s\", e.Url, err.Error()))\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (app *goActiveLearningApp) UpdateExampleExpire(e model.Example, duration time.Duration) error {\n\treturn app.cache.UpdateExampleExpire(e, duration)\n}\n\nfunc (app *goActiveLearningApp) AttachMetadata(examples model.Examples) error {\n\tfvList, err := app.repo.SearchFeatureVector(examples)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor idx, e := range examples {\n\t\te.Fv = fvList[idx]\n\t}\n\n\thatenaBookmarks, err := app.repo.SearchHatenaBookmarks(examples)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor idx, e := range examples {\n\t\te.HatenaBookmark = hatenaBookmarks[idx]\n\t}\n\treturn nil\n}\n\nfunc (app *goActiveLearningApp) AttachLightMetadata(examples model.Examples) error {\n\thatenaBookmarks, err := app.repo.SearchHatenaBookmarks(examples)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor idx, e := range examples {\n\t\te.HatenaBookmark = hatenaBookmarks[idx]\n\t}\n\treturn nil\n}\n\nfunc (app *goActiveLearningApp) Fetch(examples model.Examples) {\n\tapp.cache.Fetch(examples)\n}\n\nfunc (app *goActiveLearningApp) AddExamplesToList(listName string, examples model.Examples) error {\n\treturn app.cache.AddExamplesToList(listName, examples)\n}\n\nfunc (app *goActiveLearningApp) GetUrlsFromList(listName string, from int64, to int64) ([]string, error) {\n\treturn app.cache.GetUrlsFromList(listName, from, to)\n}\n<|endoftext|>"} {"text":"<commit_before>package deployment\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/v1\"\n\t\"k8s.io\/kubernetes\/pkg\/runtime\"\n)\n\n\/\/ ManifestStep implements a deployment step\ntype ManifestStep struct {\n\tobject runtime.Object\n}\n\nvar _ Step = &ManifestStep{}\n\n\/\/ NewManifestStep creates a default step\nfunc NewManifestStep(object runtime.Object) Step {\n\treturn &ManifestStep{\n\t\tobject: object,\n\t}\n}\n\nfunc check(name string, r bool) bool {\n\tif r == false {\n\t\tglog.Info(\"Found difference: \" + name)\n\t}\n\treturn r\n}\n\nfunc compareI(name string, a, b interface{}) bool {\n\treturn check(name, reflect.DeepEqual(a, b))\n}\n\nfunc compareS(name, a, b string) bool {\n\treturn check(name, a == b)\n}\n\nfunc compareContainers(a, b v1.Container) bool {\n\treturn a.Name == b.Name && a.Image == b.Image &&\n\t\tcompareI(\"container command\", a.Command, b.Command) &&\n\t\tcompareI(\"container args\", a.Args, b.Args) &&\n\t\tcompareS(\"container workdir\", a.WorkingDir, b.WorkingDir) &&\n\t\tcompareI(\"container ports\", a.Ports, b.Ports) &&\n\t\tcompareI(\"container env\", a.Env, b.Env) &&\n\t\tcompareI(\"container resources\", a.Resources, b.Resources) &&\n\t\tcheck(\"container pull policy\", a.ImagePullPolicy == b.ImagePullPolicy)\n}\n\nfunc compareContainerLists(a, b []v1.Container) bool {\n\taMap := map[string]v1.Container{}\n\tbMap := map[string]v1.Container{}\n\n\tfor _, c := range a {\n\t\taMap[c.Name] = c\n\t}\n\tfor _, c := range b {\n\t\tbMap[c.Name] = c\n\t}\n\n\tif !check(\"conainer list len\", len(aMap) == len(bMap)) {\n\t\treturn false\n\t}\n\n\tfor name, ac := range aMap {\n\t\tbc, ok := bMap[name]\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\tif !compareContainers(ac, bc) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc comparePodSpecs(a, b v1.PodSpec) bool {\n\tif !check(\"pod spec containers len\", len(a.Containers) == len(b.Containers)) {\n\t\treturn false\n\t}\n\tif !compareContainerLists(a.Containers, b.Containers) {\n\t\treturn false\n\t}\n\treturn compareI(\"pod spec image pull secrets\", a.ImagePullSecrets, b.ImagePullSecrets)\n}\n\nfunc comparePods(a, b *v1.Pod) bool {\n\treturn compareS(\"pod object meta name\", a.ObjectMeta.Name, b.ObjectMeta.Name) &&\n\t\tcompareI(\"pod object meta labels\", a.ObjectMeta.Labels, b.ObjectMeta.Labels) &&\n\t\tcomparePodSpecs(a.Spec, b.Spec)\n}\n\nfunc compareRCs(a, b *v1.ReplicationController) bool {\n\tif a.ObjectMeta.Name == \"\" {\n\t\treturn false\n\t}\n\treturn compareS(\"rc object meta name\", a.ObjectMeta.Name, b.ObjectMeta.Name) &&\n\t\tcompareI(\"rc object meta labels\", a.ObjectMeta.Labels, b.ObjectMeta.Labels) &&\n\t\tcompareI(\"rc spec replicas\", a.Spec.Replicas, b.Spec.Replicas) &&\n\t\tcompareI(\"rc sepc selector\", a.Spec.Selector, b.Spec.Selector) &&\n\t\tcompareI(\"rc spec template object meta labels\", a.Spec.Template.ObjectMeta.Labels, b.Spec.Template.ObjectMeta.Labels) &&\n\t\tcompareS(\"rc spec template object meta name\", a.Spec.Template.ObjectMeta.Name, b.Spec.Template.ObjectMeta.Name) &&\n\t\tcomparePodSpecs(a.Spec.Template.Spec, b.Spec.Template.Spec)\n}\n\n\/\/ Deploy executes the deployment of a step\nfunc (s *ManifestStep) Deploy() error {\n\toGVK := s.object.GetObjectKind().GroupVersionKind()\n\tswitch oGVK.Kind {\n\tcase \"ReplicationController\":\n\t\tvar o *v1.ReplicationController\n\t\tswitch s.object.(type) {\n\t\tcase *v1.ReplicationController:\n\t\t\to = s.object.(*v1.ReplicationController)\n\t\tcase *api.ReplicationController:\n\t\t\trr := s.object.(*api.ReplicationController)\n\t\t\tif err := scheme.Convert(rr, o); err != nil {\n\t\t\t\tglog.Error(\"API object conversion failed.\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\trc, err := client.ReplicationControllers(namespace).Get(o.ObjectMeta.Name)\n\t\tif err == nil && rc != nil {\n\t\t\tif compareRCs(rc, o) {\n\t\t\t\tglog.Info(\"Existing RC is identical, skipping deployment\")\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tvar i int32\n\t\t\trc.Spec.Replicas = &i\n\t\t\tclient.ReplicationControllers(namespace).Update(rc)\n\t\t\ttime.Sleep(1 * time.Second) \/\/ Wait for Kubernetes to delete pods\n\n\t\t\tglog.Info(\"Deleting old replication controller: \", o.ObjectMeta.Name)\n\t\t\terr = client.ReplicationControllers(namespace).Delete(o.ObjectMeta.Name, nil)\n\n\t\t\tfor k := 1; err == nil && k < 20; k++ {\n\t\t\t\ttime.Sleep(200 * time.Millisecond) \/\/ Wait for Kubernetes to delete the resource\n\t\t\t\t_, err = client.ReplicationControllers(namespace).Get(o.ObjectMeta.Name)\n\t\t\t}\n\t\t}\n\n\t\tglog.Info(\"Creating new replication controller: \", o.ObjectMeta.Name)\n\t\t_, err = client.ReplicationControllers(namespace).Create(o)\n\n\t\tif err != nil {\n\t\t\tglog.Error(\"Create or Update failed: \", err)\n\t\t\treturn err\n\t\t}\n\tcase \"Pod\":\n\t\to := s.object.(*v1.Pod)\n\t\tpod, err := client.Pods(namespace).Get(o.ObjectMeta.Name)\n\n\t\tif err == nil && pod != nil {\n\t\t\tif comparePods(pod, o) {\n\t\t\t\tglog.Info(\"Existing Pod is identical, skipping deployment\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tglog.Info(\"Deleting old pod\", o.ObjectMeta.Name)\n\t\t\terr = client.Pods(namespace).Delete(o.ObjectMeta.Name, nil)\n\n\t\t\tfor k := 1; err == nil && k < 20; k++ {\n\t\t\t\ttime.Sleep(200 * time.Millisecond) \/\/ Wait for Kubernetes to delete the resource\n\t\t\t\t_, err = client.Pods(namespace).Get(o.ObjectMeta.Name)\n\t\t\t}\n\t\t}\n\n\t\tglog.Info(\"Creating new pod: \", o.ObjectMeta.Name)\n\t\t_, err = client.Pods(namespace).Create(o)\n\t\tif err != nil {\n\t\t\tglog.Info(\"Create or Update failed: \", err)\n\t\t\treturn err\n\t\t}\n\tcase \"Service\":\n\t\to := s.object.(*v1.Service)\n\t\tservice, err := client.Services(namespace).Get(o.ObjectMeta.Name)\n\n\t\tif err != nil {\n\t\t\tglog.Info(\"Creating new service: \", o.ObjectMeta.Name)\n\t\t\t_, err = client.Services(namespace).Create(o)\n\t\t} else {\n\t\t\tglog.Info(\"Updating service\", o.ObjectMeta.Name)\n\t\t\to.ObjectMeta.ResourceVersion = service.ObjectMeta.ResourceVersion\n\t\t\to.Spec.ClusterIP = service.Spec.ClusterIP\n\t\t\t_, err = client.Services(namespace).Update(o)\n\t\t}\n\t\tif err != nil {\n\t\t\tglog.Info(\"Create or Update failed: \", err)\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\treturn errors.New(\"Kubernetes resource is not recognized: \" + oGVK.Kind)\n\t}\n\treturn nil\n}\n\n\/\/ Destroy deletes kubernetes resource\nfunc (s *ManifestStep) Destroy() error {\n\tvar err error\n\toGVK := s.object.GetObjectKind().GroupVersionKind()\n\tmeta, err := api.ObjectMetaFor(s.object)\n\tif err != nil {\n\t\treturn err\n\t}\n\tswitch oGVK.Kind {\n\tcase \"ReplicationController\":\n\t\terr = client.ReplicationControllers(namespace).Delete(meta.Name, nil)\n\tcase \"Service\":\n\t\terr = client.Services(namespace).Delete(meta.Name, nil)\n\tcase \"Pod\":\n\t\terr = client.Pods(namespace).Delete(meta.Name, nil)\n\t}\n\treturn err\n}\n<commit_msg>compare selectors only if they specify selectors (#97)<commit_after>package deployment\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"github.com\/golang\/glog\"\n\t\"k8s.io\/kubernetes\/pkg\/api\"\n\t\"k8s.io\/kubernetes\/pkg\/api\/v1\"\n\t\"k8s.io\/kubernetes\/pkg\/runtime\"\n)\n\n\/\/ ManifestStep implements a deployment step\ntype ManifestStep struct {\n\tobject runtime.Object\n}\n\nvar _ Step = &ManifestStep{}\n\n\/\/ NewManifestStep creates a default step\nfunc NewManifestStep(object runtime.Object) Step {\n\treturn &ManifestStep{\n\t\tobject: object,\n\t}\n}\n\nfunc check(name string, r bool) bool {\n\tif r == false {\n\t\tglog.Info(\"Found difference: \" + name)\n\t}\n\treturn r\n}\n\nfunc compareI(name string, a, b interface{}) bool {\n\treturn check(name, reflect.DeepEqual(a, b))\n}\n\nfunc compareS(name, a, b string) bool {\n\treturn check(name, a == b)\n}\n\nfunc compareContainers(a, b v1.Container) bool {\n\treturn a.Name == b.Name && a.Image == b.Image &&\n\t\tcompareI(\"container command\", a.Command, b.Command) &&\n\t\tcompareI(\"container args\", a.Args, b.Args) &&\n\t\tcompareS(\"container workdir\", a.WorkingDir, b.WorkingDir) &&\n\t\tcompareI(\"container ports\", a.Ports, b.Ports) &&\n\t\tcompareI(\"container env\", a.Env, b.Env) &&\n\t\tcompareI(\"container resources\", a.Resources, b.Resources) &&\n\t\tcheck(\"container pull policy\", a.ImagePullPolicy == b.ImagePullPolicy)\n}\n\nfunc compareContainerLists(a, b []v1.Container) bool {\n\taMap := map[string]v1.Container{}\n\tbMap := map[string]v1.Container{}\n\n\tfor _, c := range a {\n\t\taMap[c.Name] = c\n\t}\n\tfor _, c := range b {\n\t\tbMap[c.Name] = c\n\t}\n\n\tif !check(\"conainer list len\", len(aMap) == len(bMap)) {\n\t\treturn false\n\t}\n\n\tfor name, ac := range aMap {\n\t\tbc, ok := bMap[name]\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\tif !compareContainers(ac, bc) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc comparePodSpecs(a, b v1.PodSpec) bool {\n\tif !check(\"pod spec containers len\", len(a.Containers) == len(b.Containers)) {\n\t\treturn false\n\t}\n\tif !compareContainerLists(a.Containers, b.Containers) {\n\t\treturn false\n\t}\n\treturn compareI(\"pod spec image pull secrets\", a.ImagePullSecrets, b.ImagePullSecrets)\n}\n\nfunc comparePods(a, b *v1.Pod) bool {\n\treturn compareS(\"pod object meta name\", a.ObjectMeta.Name, b.ObjectMeta.Name) &&\n\t\tcompareI(\"pod object meta labels\", a.ObjectMeta.Labels, b.ObjectMeta.Labels) &&\n\t\tcomparePodSpecs(a.Spec, b.Spec)\n}\n\nfunc compareRCs(a, b *v1.ReplicationController) bool {\n\tif a.ObjectMeta.Name == \"\" {\n\t\treturn false\n\t}\n\treturn compareS(\"rc object meta name\", a.ObjectMeta.Name, b.ObjectMeta.Name) &&\n\t\tcompareI(\"rc object meta labels\", a.ObjectMeta.Labels, b.ObjectMeta.Labels) &&\n\t\tcompareI(\"rc spec replicas\", a.Spec.Replicas, b.Spec.Replicas) &&\n\t\t(len(a.Spec.Selector) == 0 || len(b.Spec.Selector) == 0 || compareI(\"rc spec selector\", a.Spec.Selector, b.Spec.Selector)) &&\n\t\tcompareI(\"rc spec template object meta labels\", a.Spec.Template.ObjectMeta.Labels, b.Spec.Template.ObjectMeta.Labels) &&\n\t\tcompareS(\"rc spec template object meta name\", a.Spec.Template.ObjectMeta.Name, b.Spec.Template.ObjectMeta.Name) &&\n\t\tcomparePodSpecs(a.Spec.Template.Spec, b.Spec.Template.Spec)\n}\n\n\/\/ Deploy executes the deployment of a step\nfunc (s *ManifestStep) Deploy() error {\n\toGVK := s.object.GetObjectKind().GroupVersionKind()\n\tswitch oGVK.Kind {\n\tcase \"ReplicationController\":\n\t\tvar o *v1.ReplicationController\n\t\tswitch s.object.(type) {\n\t\tcase *v1.ReplicationController:\n\t\t\to = s.object.(*v1.ReplicationController)\n\t\tcase *api.ReplicationController:\n\t\t\trr := s.object.(*api.ReplicationController)\n\t\t\tif err := scheme.Convert(rr, o); err != nil {\n\t\t\t\tglog.Error(\"API object conversion failed.\")\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\trc, err := client.ReplicationControllers(namespace).Get(o.ObjectMeta.Name)\n\t\tif err == nil && rc != nil {\n\t\t\tif compareRCs(rc, o) {\n\t\t\t\tglog.Info(\"Existing RC is identical, skipping deployment\")\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tvar i int32\n\t\t\trc.Spec.Replicas = &i\n\t\t\tclient.ReplicationControllers(namespace).Update(rc)\n\t\t\ttime.Sleep(1 * time.Second) \/\/ Wait for Kubernetes to delete pods\n\n\t\t\tglog.Info(\"Deleting old replication controller: \", o.ObjectMeta.Name)\n\t\t\terr = client.ReplicationControllers(namespace).Delete(o.ObjectMeta.Name, nil)\n\n\t\t\tfor k := 1; err == nil && k < 20; k++ {\n\t\t\t\ttime.Sleep(200 * time.Millisecond) \/\/ Wait for Kubernetes to delete the resource\n\t\t\t\t_, err = client.ReplicationControllers(namespace).Get(o.ObjectMeta.Name)\n\t\t\t}\n\t\t}\n\n\t\tglog.Info(\"Creating new replication controller: \", o.ObjectMeta.Name)\n\t\t_, err = client.ReplicationControllers(namespace).Create(o)\n\n\t\tif err != nil {\n\t\t\tglog.Error(\"Create or Update failed: \", err)\n\t\t\treturn err\n\t\t}\n\tcase \"Pod\":\n\t\to := s.object.(*v1.Pod)\n\t\tpod, err := client.Pods(namespace).Get(o.ObjectMeta.Name)\n\n\t\tif err == nil && pod != nil {\n\t\t\tif comparePods(pod, o) {\n\t\t\t\tglog.Info(\"Existing Pod is identical, skipping deployment\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tglog.Info(\"Deleting old pod\", o.ObjectMeta.Name)\n\t\t\terr = client.Pods(namespace).Delete(o.ObjectMeta.Name, nil)\n\n\t\t\tfor k := 1; err == nil && k < 20; k++ {\n\t\t\t\ttime.Sleep(200 * time.Millisecond) \/\/ Wait for Kubernetes to delete the resource\n\t\t\t\t_, err = client.Pods(namespace).Get(o.ObjectMeta.Name)\n\t\t\t}\n\t\t}\n\n\t\tglog.Info(\"Creating new pod: \", o.ObjectMeta.Name)\n\t\t_, err = client.Pods(namespace).Create(o)\n\t\tif err != nil {\n\t\t\tglog.Info(\"Create or Update failed: \", err)\n\t\t\treturn err\n\t\t}\n\tcase \"Service\":\n\t\to := s.object.(*v1.Service)\n\t\tservice, err := client.Services(namespace).Get(o.ObjectMeta.Name)\n\n\t\tif err != nil {\n\t\t\tglog.Info(\"Creating new service: \", o.ObjectMeta.Name)\n\t\t\t_, err = client.Services(namespace).Create(o)\n\t\t} else {\n\t\t\tglog.Info(\"Updating service\", o.ObjectMeta.Name)\n\t\t\to.ObjectMeta.ResourceVersion = service.ObjectMeta.ResourceVersion\n\t\t\to.Spec.ClusterIP = service.Spec.ClusterIP\n\t\t\t_, err = client.Services(namespace).Update(o)\n\t\t}\n\t\tif err != nil {\n\t\t\tglog.Info(\"Create or Update failed: \", err)\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\treturn errors.New(\"Kubernetes resource is not recognized: \" + oGVK.Kind)\n\t}\n\treturn nil\n}\n\n\/\/ Destroy deletes kubernetes resource\nfunc (s *ManifestStep) Destroy() error {\n\tvar err error\n\toGVK := s.object.GetObjectKind().GroupVersionKind()\n\tmeta, err := api.ObjectMetaFor(s.object)\n\tif err != nil {\n\t\treturn err\n\t}\n\tswitch oGVK.Kind {\n\tcase \"ReplicationController\":\n\t\terr = client.ReplicationControllers(namespace).Delete(meta.Name, nil)\n\tcase \"Service\":\n\t\terr = client.Services(namespace).Delete(meta.Name, nil)\n\tcase \"Pod\":\n\t\terr = client.Pods(namespace).Delete(meta.Name, nil)\n\t}\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package gocbcore\n\nimport (\n\t\"encoding\/binary\"\n)\n\n\/\/ SnapshotState represents the state of a particular cluster snapshot.\ntype SnapshotState uint32\n\n\/\/ HasInMemory returns whether this snapshot is available in memory.\nfunc (s SnapshotState) HasInMemory() bool {\n\treturn uint32(s)&1 != 0\n}\n\n\/\/ HasOnDisk returns whether this snapshot is available on disk.\nfunc (s SnapshotState) HasOnDisk() bool {\n\treturn uint32(s)&2 != 0\n}\n\n\/\/ FailoverEntry represents a single entry in the server fail-over log.\ntype FailoverEntry struct {\n\tVbUuid VbUuid\n\tSeqNo SeqNo\n}\n\n\/\/ StreamObserver provides an interface to receive events from a running DCP stream.\ntype StreamObserver interface {\n\tSnapshotMarker(startSeqNo, endSeqNo uint64, vbId uint16, snapshotType SnapshotState)\n\tMutation(seqNo, revNo uint64, flags, expiry, lockTime uint32, cas uint64, datatype uint8, vbId uint16, key, value []byte)\n\tDeletion(seqNo, revNo, cas uint64, datatype uint8, vbId uint16, key, value []byte)\n\tExpiration(seqNo, revNo, cas uint64, vbId uint16, key []byte)\n\tEnd(vbId uint16, err error)\n}\n\n\/\/ OpenStreamCallback is invoked with the results of `OpenStream` operations.\ntype OpenStreamCallback func([]FailoverEntry, error)\n\n\/\/ CloseStreamCallback is invoked with the results of `CloseStream` operations.\ntype CloseStreamCallback func(error)\n\n\/\/ GetFailoverLogCallback is invoked with the results of `GetFailoverLog` operations.\ntype GetFailoverLogCallback func([]FailoverEntry, error)\n\n\/\/ VbSeqNoEntry represents a single GetVbucketSeqnos sequence number entry.\ntype VbSeqNoEntry struct {\n\tVbId uint16\n\tSeqNo SeqNo\n}\n\n\/\/ GetVBucketSeqnosCallback is invoked with the results of `GetVBucketSeqnos` operations.\ntype GetVBucketSeqnosCallback func([]VbSeqNoEntry, error)\n\n\/\/ OpenStream opens a DCP stream for a particular VBucket.\nfunc (agent *Agent) OpenStream(vbId uint16, flags DcpStreamAddFlag, vbUuid VbUuid, startSeqNo, endSeqNo, snapStartSeqNo, snapEndSeqNo SeqNo, evtHandler StreamObserver, cb OpenStreamCallback) (PendingOp, error) {\n\tvar req *memdQRequest\n\thandler := func(resp *memdQResponse, _ *memdQRequest, err error) {\n\t\tif resp != nil && resp.Magic == resMagic {\n\t\t\t\/\/ This is the response to the open stream request.\n\t\t\tif err != nil {\n\t\t\t\treq.Cancel()\n\n\t\t\t\t\/\/ All client errors are handled by the StreamObserver\n\t\t\t\tcb(nil, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tnumEntries := len(resp.Value) \/ 16\n\t\t\tentries := make([]FailoverEntry, numEntries)\n\t\t\tfor i := 0; i < numEntries; i++ {\n\t\t\t\tentries[i] = FailoverEntry{\n\t\t\t\t\tVbUuid: VbUuid(binary.BigEndian.Uint64(resp.Value[i*16+0:])),\n\t\t\t\t\tSeqNo: SeqNo(binary.BigEndian.Uint64(resp.Value[i*16+8:])),\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tcb(entries, nil)\n\t\t\treturn\n\t\t}\n\n\t\tif err != nil {\n\t\t\treq.Cancel()\n\t\t\tevtHandler.End(vbId, err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ This is one of the stream events\n\t\tswitch resp.Opcode {\n\t\tcase cmdDcpSnapshotMarker:\n\t\t\tvbId := uint16(resp.Vbucket)\n\t\t\tnewStartSeqNo := binary.BigEndian.Uint64(resp.Extras[0:])\n\t\t\tnewEndSeqNo := binary.BigEndian.Uint64(resp.Extras[8:])\n\t\t\tsnapshotType := binary.BigEndian.Uint32(resp.Extras[16:])\n\t\t\tevtHandler.SnapshotMarker(newStartSeqNo, newEndSeqNo, vbId, SnapshotState(snapshotType))\n\t\tcase cmdDcpMutation:\n\t\t\tvbId := uint16(resp.Vbucket)\n\t\t\tseqNo := binary.BigEndian.Uint64(resp.Extras[0:])\n\t\t\trevNo := binary.BigEndian.Uint64(resp.Extras[8:])\n\t\t\tflags := binary.BigEndian.Uint32(resp.Extras[16:])\n\t\t\texpiry := binary.BigEndian.Uint32(resp.Extras[20:])\n\t\t\tlockTime := binary.BigEndian.Uint32(resp.Extras[24:])\n\t\t\tevtHandler.Mutation(seqNo, revNo, flags, expiry, lockTime, resp.Cas, resp.Datatype, vbId, resp.Key, resp.Value)\n\t\tcase cmdDcpDeletion:\n\t\t\tvbId := uint16(resp.Vbucket)\n\t\t\tseqNo := binary.BigEndian.Uint64(resp.Extras[0:])\n\t\t\trevNo := binary.BigEndian.Uint64(resp.Extras[8:])\n\t\t\tevtHandler.Deletion(seqNo, revNo, resp.Cas, resp.Datatype, vbId, resp.Key, resp.Value)\n\t\tcase cmdDcpExpiration:\n\t\t\tvbId := uint16(resp.Vbucket)\n\t\t\tseqNo := binary.BigEndian.Uint64(resp.Extras[0:])\n\t\t\trevNo := binary.BigEndian.Uint64(resp.Extras[8:])\n\t\t\tevtHandler.Expiration(seqNo, revNo, resp.Cas, vbId, resp.Key)\n\t\tcase cmdDcpStreamEnd:\n\t\t\tvbId := uint16(resp.Vbucket)\n\t\t\tcode := streamEndStatus(binary.BigEndian.Uint32(resp.Extras[0:]))\n\t\t\tevtHandler.End(vbId, getStreamEndError(code))\n\t\t\treq.Cancel()\n\t\t}\n\t}\n\n\textraBuf := make([]byte, 48)\n\tbinary.BigEndian.PutUint32(extraBuf[0:], 0)\n\tbinary.BigEndian.PutUint32(extraBuf[4:], 0)\n\tbinary.BigEndian.PutUint64(extraBuf[8:], uint64(startSeqNo))\n\tbinary.BigEndian.PutUint64(extraBuf[16:], uint64(endSeqNo))\n\tbinary.BigEndian.PutUint64(extraBuf[24:], uint64(vbUuid))\n\tbinary.BigEndian.PutUint64(extraBuf[32:], uint64(snapStartSeqNo))\n\tbinary.BigEndian.PutUint64(extraBuf[40:], uint64(snapEndSeqNo))\n\n\treq = &memdQRequest{\n\t\tmemdPacket: memdPacket{\n\t\t\tMagic: reqMagic,\n\t\t\tOpcode: cmdDcpStreamReq,\n\t\t\tDatatype: 0,\n\t\t\tCas: 0,\n\t\t\tExtras: extraBuf,\n\t\t\tKey: nil,\n\t\t\tValue: nil,\n\t\t\tVbucket: vbId,\n\t\t},\n\t\tCallback: handler,\n\t\tReplicaIdx: 0,\n\t\tPersistent: true,\n\t}\n\treturn agent.dispatchOp(req)\n}\n\n\/\/ CloseStream shuts down an open stream for the specified VBucket.\nfunc (agent *Agent) CloseStream(vbId uint16, cb CloseStreamCallback) (PendingOp, error) {\n\thandler := func(_ *memdQResponse, _ *memdQRequest, err error) {\n\t\tcb(err)\n\t}\n\n\treq := &memdQRequest{\n\t\tmemdPacket: memdPacket{\n\t\t\tMagic: reqMagic,\n\t\t\tOpcode: cmdDcpCloseStream,\n\t\t\tDatatype: 0,\n\t\t\tCas: 0,\n\t\t\tExtras: nil,\n\t\t\tKey: nil,\n\t\t\tValue: nil,\n\t\t\tVbucket: vbId,\n\t\t},\n\t\tCallback: handler,\n\t\tReplicaIdx: 0,\n\t\tPersistent: false,\n\t}\n\treturn agent.dispatchOp(req)\n}\n\n\/\/ GetFailoverLog retrieves the fail-over log for a particular VBucket. This is used\n\/\/ to resume an interrupted stream after a node fail-over has occurred.\nfunc (agent *Agent) GetFailoverLog(vbId uint16, cb GetFailoverLogCallback) (PendingOp, error) {\n\thandler := func(resp *memdQResponse, _ *memdQRequest, err error) {\n\t\tif err != nil {\n\t\t\tcb(nil, err)\n\t\t\treturn\n\t\t}\n\n\t\tnumEntries := len(resp.Value) \/ 16\n\t\tentries := make([]FailoverEntry, numEntries)\n\t\tfor i := 0; i < numEntries; i++ {\n\t\t\tentries[i] = FailoverEntry{\n\t\t\t\tVbUuid: VbUuid(binary.BigEndian.Uint64(resp.Value[i*16+0:])),\n\t\t\t\tSeqNo: SeqNo(binary.BigEndian.Uint64(resp.Value[i*16+8:])),\n\t\t\t}\n\t\t}\n\t\tcb(entries, nil)\n\t}\n\n\treq := &memdQRequest{\n\t\tmemdPacket: memdPacket{\n\t\t\tMagic: reqMagic,\n\t\t\tOpcode: cmdDcpGetFailoverLog,\n\t\t\tDatatype: 0,\n\t\t\tCas: 0,\n\t\t\tExtras: nil,\n\t\t\tKey: nil,\n\t\t\tValue: nil,\n\t\t\tVbucket: vbId,\n\t\t},\n\t\tCallback: handler,\n\t\tReplicaIdx: 0,\n\t\tPersistent: false,\n\t}\n\treturn agent.dispatchOp(req)\n}\n\n\/\/ GetVbucketSeqnos returns the last checkpoint for a particular VBucket. This is useful\n\/\/ for starting a DCP stream from wherever the server currently is.\nfunc (agent *Agent) GetVbucketSeqnos(serverIdx int, state VbucketState, cb GetVBucketSeqnosCallback) (PendingOp, error) {\n\thandler := func(resp *memdQResponse, _ *memdQRequest, err error) {\n\t\tif err != nil {\n\t\t\tcb(nil, err)\n\t\t\treturn\n\t\t}\n\n\t\tvar vbs []VbSeqNoEntry\n\n\t\tnumVbs := len(resp.Value) \/ 10\n\t\tfor i := 0; i < numVbs; i++ {\n\t\t\tvbs = append(vbs, VbSeqNoEntry{\n\t\t\t\tVbId: binary.BigEndian.Uint16(resp.Value[i*10:]),\n\t\t\t\tSeqNo: SeqNo(binary.BigEndian.Uint64(resp.Value[i*10+2:])),\n\t\t\t})\n\t\t}\n\n\t\tcb(vbs, nil)\n\t}\n\n\textraBuf := make([]byte, 4)\n\tbinary.BigEndian.PutUint32(extraBuf[0:], uint32(state))\n\n\treq := &memdQRequest{\n\t\tmemdPacket: memdPacket{\n\t\t\tMagic: reqMagic,\n\t\t\tOpcode: cmdGetAllVBSeqnos,\n\t\t\tDatatype: 0,\n\t\t\tCas: 0,\n\t\t\tExtras: extraBuf,\n\t\t\tKey: nil,\n\t\t\tValue: nil,\n\t\t\tVbucket: 0,\n\t\t},\n\t\tCallback: handler,\n\t\tReplicaIdx: -serverIdx,\n\t\tPersistent: false,\n\t}\n\n\treturn agent.dispatchOp(req)\n}\n<commit_msg>Properly encode flags into the DCP open stream packet<commit_after>package gocbcore\n\nimport (\n\t\"encoding\/binary\"\n)\n\n\/\/ SnapshotState represents the state of a particular cluster snapshot.\ntype SnapshotState uint32\n\n\/\/ HasInMemory returns whether this snapshot is available in memory.\nfunc (s SnapshotState) HasInMemory() bool {\n\treturn uint32(s)&1 != 0\n}\n\n\/\/ HasOnDisk returns whether this snapshot is available on disk.\nfunc (s SnapshotState) HasOnDisk() bool {\n\treturn uint32(s)&2 != 0\n}\n\n\/\/ FailoverEntry represents a single entry in the server fail-over log.\ntype FailoverEntry struct {\n\tVbUuid VbUuid\n\tSeqNo SeqNo\n}\n\n\/\/ StreamObserver provides an interface to receive events from a running DCP stream.\ntype StreamObserver interface {\n\tSnapshotMarker(startSeqNo, endSeqNo uint64, vbId uint16, snapshotType SnapshotState)\n\tMutation(seqNo, revNo uint64, flags, expiry, lockTime uint32, cas uint64, datatype uint8, vbId uint16, key, value []byte)\n\tDeletion(seqNo, revNo, cas uint64, datatype uint8, vbId uint16, key, value []byte)\n\tExpiration(seqNo, revNo, cas uint64, vbId uint16, key []byte)\n\tEnd(vbId uint16, err error)\n}\n\n\/\/ OpenStreamCallback is invoked with the results of `OpenStream` operations.\ntype OpenStreamCallback func([]FailoverEntry, error)\n\n\/\/ CloseStreamCallback is invoked with the results of `CloseStream` operations.\ntype CloseStreamCallback func(error)\n\n\/\/ GetFailoverLogCallback is invoked with the results of `GetFailoverLog` operations.\ntype GetFailoverLogCallback func([]FailoverEntry, error)\n\n\/\/ VbSeqNoEntry represents a single GetVbucketSeqnos sequence number entry.\ntype VbSeqNoEntry struct {\n\tVbId uint16\n\tSeqNo SeqNo\n}\n\n\/\/ GetVBucketSeqnosCallback is invoked with the results of `GetVBucketSeqnos` operations.\ntype GetVBucketSeqnosCallback func([]VbSeqNoEntry, error)\n\n\/\/ OpenStream opens a DCP stream for a particular VBucket.\nfunc (agent *Agent) OpenStream(vbId uint16, flags DcpStreamAddFlag, vbUuid VbUuid, startSeqNo, endSeqNo, snapStartSeqNo, snapEndSeqNo SeqNo, evtHandler StreamObserver, cb OpenStreamCallback) (PendingOp, error) {\n\tvar req *memdQRequest\n\thandler := func(resp *memdQResponse, _ *memdQRequest, err error) {\n\t\tif resp != nil && resp.Magic == resMagic {\n\t\t\t\/\/ This is the response to the open stream request.\n\t\t\tif err != nil {\n\t\t\t\treq.Cancel()\n\n\t\t\t\t\/\/ All client errors are handled by the StreamObserver\n\t\t\t\tcb(nil, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tnumEntries := len(resp.Value) \/ 16\n\t\t\tentries := make([]FailoverEntry, numEntries)\n\t\t\tfor i := 0; i < numEntries; i++ {\n\t\t\t\tentries[i] = FailoverEntry{\n\t\t\t\t\tVbUuid: VbUuid(binary.BigEndian.Uint64(resp.Value[i*16+0:])),\n\t\t\t\t\tSeqNo: SeqNo(binary.BigEndian.Uint64(resp.Value[i*16+8:])),\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tcb(entries, nil)\n\t\t\treturn\n\t\t}\n\n\t\tif err != nil {\n\t\t\treq.Cancel()\n\t\t\tevtHandler.End(vbId, err)\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ This is one of the stream events\n\t\tswitch resp.Opcode {\n\t\tcase cmdDcpSnapshotMarker:\n\t\t\tvbId := uint16(resp.Vbucket)\n\t\t\tnewStartSeqNo := binary.BigEndian.Uint64(resp.Extras[0:])\n\t\t\tnewEndSeqNo := binary.BigEndian.Uint64(resp.Extras[8:])\n\t\t\tsnapshotType := binary.BigEndian.Uint32(resp.Extras[16:])\n\t\t\tevtHandler.SnapshotMarker(newStartSeqNo, newEndSeqNo, vbId, SnapshotState(snapshotType))\n\t\tcase cmdDcpMutation:\n\t\t\tvbId := uint16(resp.Vbucket)\n\t\t\tseqNo := binary.BigEndian.Uint64(resp.Extras[0:])\n\t\t\trevNo := binary.BigEndian.Uint64(resp.Extras[8:])\n\t\t\tflags := binary.BigEndian.Uint32(resp.Extras[16:])\n\t\t\texpiry := binary.BigEndian.Uint32(resp.Extras[20:])\n\t\t\tlockTime := binary.BigEndian.Uint32(resp.Extras[24:])\n\t\t\tevtHandler.Mutation(seqNo, revNo, flags, expiry, lockTime, resp.Cas, resp.Datatype, vbId, resp.Key, resp.Value)\n\t\tcase cmdDcpDeletion:\n\t\t\tvbId := uint16(resp.Vbucket)\n\t\t\tseqNo := binary.BigEndian.Uint64(resp.Extras[0:])\n\t\t\trevNo := binary.BigEndian.Uint64(resp.Extras[8:])\n\t\t\tevtHandler.Deletion(seqNo, revNo, resp.Cas, resp.Datatype, vbId, resp.Key, resp.Value)\n\t\tcase cmdDcpExpiration:\n\t\t\tvbId := uint16(resp.Vbucket)\n\t\t\tseqNo := binary.BigEndian.Uint64(resp.Extras[0:])\n\t\t\trevNo := binary.BigEndian.Uint64(resp.Extras[8:])\n\t\t\tevtHandler.Expiration(seqNo, revNo, resp.Cas, vbId, resp.Key)\n\t\tcase cmdDcpStreamEnd:\n\t\t\tvbId := uint16(resp.Vbucket)\n\t\t\tcode := streamEndStatus(binary.BigEndian.Uint32(resp.Extras[0:]))\n\t\t\tevtHandler.End(vbId, getStreamEndError(code))\n\t\t\treq.Cancel()\n\t\t}\n\t}\n\n\textraBuf := make([]byte, 48)\n\tbinary.BigEndian.PutUint32(extraBuf[0:], uint32(flags))\n\tbinary.BigEndian.PutUint32(extraBuf[4:], 0)\n\tbinary.BigEndian.PutUint64(extraBuf[8:], uint64(startSeqNo))\n\tbinary.BigEndian.PutUint64(extraBuf[16:], uint64(endSeqNo))\n\tbinary.BigEndian.PutUint64(extraBuf[24:], uint64(vbUuid))\n\tbinary.BigEndian.PutUint64(extraBuf[32:], uint64(snapStartSeqNo))\n\tbinary.BigEndian.PutUint64(extraBuf[40:], uint64(snapEndSeqNo))\n\n\treq = &memdQRequest{\n\t\tmemdPacket: memdPacket{\n\t\t\tMagic: reqMagic,\n\t\t\tOpcode: cmdDcpStreamReq,\n\t\t\tDatatype: 0,\n\t\t\tCas: 0,\n\t\t\tExtras: extraBuf,\n\t\t\tKey: nil,\n\t\t\tValue: nil,\n\t\t\tVbucket: vbId,\n\t\t},\n\t\tCallback: handler,\n\t\tReplicaIdx: 0,\n\t\tPersistent: true,\n\t}\n\treturn agent.dispatchOp(req)\n}\n\n\/\/ CloseStream shuts down an open stream for the specified VBucket.\nfunc (agent *Agent) CloseStream(vbId uint16, cb CloseStreamCallback) (PendingOp, error) {\n\thandler := func(_ *memdQResponse, _ *memdQRequest, err error) {\n\t\tcb(err)\n\t}\n\n\treq := &memdQRequest{\n\t\tmemdPacket: memdPacket{\n\t\t\tMagic: reqMagic,\n\t\t\tOpcode: cmdDcpCloseStream,\n\t\t\tDatatype: 0,\n\t\t\tCas: 0,\n\t\t\tExtras: nil,\n\t\t\tKey: nil,\n\t\t\tValue: nil,\n\t\t\tVbucket: vbId,\n\t\t},\n\t\tCallback: handler,\n\t\tReplicaIdx: 0,\n\t\tPersistent: false,\n\t}\n\treturn agent.dispatchOp(req)\n}\n\n\/\/ GetFailoverLog retrieves the fail-over log for a particular VBucket. This is used\n\/\/ to resume an interrupted stream after a node fail-over has occurred.\nfunc (agent *Agent) GetFailoverLog(vbId uint16, cb GetFailoverLogCallback) (PendingOp, error) {\n\thandler := func(resp *memdQResponse, _ *memdQRequest, err error) {\n\t\tif err != nil {\n\t\t\tcb(nil, err)\n\t\t\treturn\n\t\t}\n\n\t\tnumEntries := len(resp.Value) \/ 16\n\t\tentries := make([]FailoverEntry, numEntries)\n\t\tfor i := 0; i < numEntries; i++ {\n\t\t\tentries[i] = FailoverEntry{\n\t\t\t\tVbUuid: VbUuid(binary.BigEndian.Uint64(resp.Value[i*16+0:])),\n\t\t\t\tSeqNo: SeqNo(binary.BigEndian.Uint64(resp.Value[i*16+8:])),\n\t\t\t}\n\t\t}\n\t\tcb(entries, nil)\n\t}\n\n\treq := &memdQRequest{\n\t\tmemdPacket: memdPacket{\n\t\t\tMagic: reqMagic,\n\t\t\tOpcode: cmdDcpGetFailoverLog,\n\t\t\tDatatype: 0,\n\t\t\tCas: 0,\n\t\t\tExtras: nil,\n\t\t\tKey: nil,\n\t\t\tValue: nil,\n\t\t\tVbucket: vbId,\n\t\t},\n\t\tCallback: handler,\n\t\tReplicaIdx: 0,\n\t\tPersistent: false,\n\t}\n\treturn agent.dispatchOp(req)\n}\n\n\/\/ GetVbucketSeqnos returns the last checkpoint for a particular VBucket. This is useful\n\/\/ for starting a DCP stream from wherever the server currently is.\nfunc (agent *Agent) GetVbucketSeqnos(serverIdx int, state VbucketState, cb GetVBucketSeqnosCallback) (PendingOp, error) {\n\thandler := func(resp *memdQResponse, _ *memdQRequest, err error) {\n\t\tif err != nil {\n\t\t\tcb(nil, err)\n\t\t\treturn\n\t\t}\n\n\t\tvar vbs []VbSeqNoEntry\n\n\t\tnumVbs := len(resp.Value) \/ 10\n\t\tfor i := 0; i < numVbs; i++ {\n\t\t\tvbs = append(vbs, VbSeqNoEntry{\n\t\t\t\tVbId: binary.BigEndian.Uint16(resp.Value[i*10:]),\n\t\t\t\tSeqNo: SeqNo(binary.BigEndian.Uint64(resp.Value[i*10+2:])),\n\t\t\t})\n\t\t}\n\n\t\tcb(vbs, nil)\n\t}\n\n\textraBuf := make([]byte, 4)\n\tbinary.BigEndian.PutUint32(extraBuf[0:], uint32(state))\n\n\treq := &memdQRequest{\n\t\tmemdPacket: memdPacket{\n\t\t\tMagic: reqMagic,\n\t\t\tOpcode: cmdGetAllVBSeqnos,\n\t\t\tDatatype: 0,\n\t\t\tCas: 0,\n\t\t\tExtras: extraBuf,\n\t\t\tKey: nil,\n\t\t\tValue: nil,\n\t\t\tVbucket: 0,\n\t\t},\n\t\tCallback: handler,\n\t\tReplicaIdx: -serverIdx,\n\t\tPersistent: false,\n\t}\n\n\treturn agent.dispatchOp(req)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage admin\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/ajg\/form\"\n\t\"github.com\/tsuru\/tsuru\/cmd\"\n\t\"github.com\/tsuru\/tsuru\/cmd\/cmdtest\"\n\t\"github.com\/tsuru\/tsuru\/provision\/cluster\"\n\t\"gopkg.in\/check.v1\"\n)\n\nfunc (s *S) TestClusterAddRun(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\tcontext := cmd.Context{\n\t\tStdout: &stdout,\n\t\tStderr: &stderr,\n\t\tArgs: []string{\"c1\", \"myprov\"},\n\t}\n\ttrans := &cmdtest.ConditionalTransport{\n\t\tTransport: cmdtest.Transport{Status: http.StatusOK},\n\t\tCondFunc: func(req *http.Request) bool {\n\t\t\terr := req.ParseForm()\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tdec := form.NewDecoder(nil)\n\t\t\tdec.IgnoreCase(true)\n\t\t\tdec.IgnoreUnknownKeys(true)\n\t\t\tvar clus cluster.Cluster\n\t\t\terr = dec.DecodeValues(&clus, req.Form)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Assert(clus, check.DeepEquals, cluster.Cluster{\n\t\t\t\tName: \"c1\",\n\t\t\t\tCaCert: []byte(\"cadata\"),\n\t\t\t\tClientCert: []byte(\"certdata\"),\n\t\t\t\tClientKey: []byte(\"keydata\"),\n\t\t\t\tCustomData: map[string]string{\"a\": \"b\", \"c\": \"d\"},\n\t\t\t\tAddresses: []string{\"addr1\", \"addr2\"},\n\t\t\t\tPools: []string{\"p1\", \"p2\"},\n\t\t\t\tDefault: true,\n\t\t\t\tProvisioner: \"myprov\",\n\t\t\t\tCreateData: map[string]string{\"iaas\": \"dockermachine\"},\n\t\t\t})\n\t\t\treturn req.URL.Path == \"\/1.3\/provisioner\/clusters\" && req.Method == \"POST\"\n\t\t},\n\t}\n\tmanager := cmd.NewManager(\"admin\", \"0.1\", \"admin-ver\", &stdout, &stderr, nil, nil)\n\tclient := cmd.NewClient(&http.Client{Transport: trans}, nil, manager)\n\tmyCmd := ClusterAdd{}\n\tdir, err := ioutil.TempDir(\"\", \"tsuru\")\n\tc.Assert(err, check.IsNil)\n\tdefer os.RemoveAll(dir)\n\terr = ioutil.WriteFile(filepath.Join(dir, \"ca\"), []byte(\"cadata\"), 0600)\n\tc.Assert(err, check.IsNil)\n\terr = ioutil.WriteFile(filepath.Join(dir, \"cert\"), []byte(\"certdata\"), 0600)\n\tc.Assert(err, check.IsNil)\n\terr = ioutil.WriteFile(filepath.Join(dir, \"key\"), []byte(\"keydata\"), 0600)\n\tc.Assert(err, check.IsNil)\n\terr = myCmd.Flags().Parse(true, []string{\n\t\t\"--cacert\", filepath.Join(dir, \"ca\"),\n\t\t\"--clientcert\", filepath.Join(dir, \"cert\"),\n\t\t\"--clientkey\", filepath.Join(dir, \"key\"),\n\t\t\"--addr\", \"addr1\",\n\t\t\"--addr\", \"addr2\",\n\t\t\"--pool\", \"p1\",\n\t\t\"--pool\", \"p2\",\n\t\t\"--custom\", \"a=b\",\n\t\t\"--custom\", \"c=d\",\n\t\t\"--create-data\", \"iaas=dockermachine\",\n\t\t\"--default\",\n\t})\n\tc.Assert(err, check.IsNil)\n\terr = myCmd.Run(&context, client)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(stdout.String(), check.Equals, \"Cluster successfully added.\\n\")\n}\n\nfunc (s *S) TestClusterUpdateRun(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\tcontext := cmd.Context{\n\t\tStdout: &stdout,\n\t\tStderr: &stderr,\n\t\tArgs: []string{\"c1\", \"myprov\"},\n\t}\n\ttrans := &cmdtest.ConditionalTransport{\n\t\tTransport: cmdtest.Transport{Status: http.StatusOK},\n\t\tCondFunc: func(req *http.Request) bool {\n\t\t\terr := req.ParseForm()\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tdec := form.NewDecoder(nil)\n\t\t\tdec.IgnoreCase(true)\n\t\t\tdec.IgnoreUnknownKeys(true)\n\t\t\tvar clus cluster.Cluster\n\t\t\terr = dec.DecodeValues(&clus, req.Form)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Assert(clus, check.DeepEquals, cluster.Cluster{\n\t\t\t\tName: \"c1\",\n\t\t\t\tCaCert: []byte(\"cadata\"),\n\t\t\t\tClientCert: []byte(\"certdata\"),\n\t\t\t\tClientKey: []byte(\"keydata\"),\n\t\t\t\tCustomData: map[string]string{\"a\": \"b\", \"c\": \"d\"},\n\t\t\t\tAddresses: []string{\"addr1\", \"addr2\"},\n\t\t\t\tPools: []string{\"p1\", \"p2\"},\n\t\t\t\tDefault: true,\n\t\t\t\tProvisioner: \"myprov\",\n\t\t\t})\n\t\t\treturn req.URL.Path == \"\/1.4\/provisioner\/clusters\/c1\" && req.Method == \"POST\"\n\t\t},\n\t}\n\tmanager := cmd.NewManager(\"admin\", \"0.1\", \"admin-ver\", &stdout, &stderr, nil, nil)\n\tclient := cmd.NewClient(&http.Client{Transport: trans}, nil, manager)\n\tmyCmd := ClusterUpdate{}\n\tdir, err := ioutil.TempDir(\"\", \"tsuru\")\n\tc.Assert(err, check.IsNil)\n\tdefer os.RemoveAll(dir)\n\terr = ioutil.WriteFile(filepath.Join(dir, \"ca\"), []byte(\"cadata\"), 0600)\n\tc.Assert(err, check.IsNil)\n\terr = ioutil.WriteFile(filepath.Join(dir, \"cert\"), []byte(\"certdata\"), 0600)\n\tc.Assert(err, check.IsNil)\n\terr = ioutil.WriteFile(filepath.Join(dir, \"key\"), []byte(\"keydata\"), 0600)\n\tc.Assert(err, check.IsNil)\n\terr = myCmd.Flags().Parse(true, []string{\n\t\t\"--cacert\", filepath.Join(dir, \"ca\"),\n\t\t\"--clientcert\", filepath.Join(dir, \"cert\"),\n\t\t\"--clientkey\", filepath.Join(dir, \"key\"),\n\t\t\"--addr\", \"addr1\",\n\t\t\"--addr\", \"addr2\",\n\t\t\"--pool\", \"p1\",\n\t\t\"--pool\", \"p2\",\n\t\t\"--custom\", \"a=b\",\n\t\t\"--custom\", \"c=d\",\n\t\t\"--default\",\n\t})\n\tc.Assert(err, check.IsNil)\n\terr = myCmd.Run(&context, client)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(stdout.String(), check.Equals, \"Cluster successfully updated.\\n\")\n}\n\nfunc (s *S) TestClusterListRun(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\tcontext := cmd.Context{\n\t\tStdout: &stdout,\n\t\tStderr: &stderr,\n\t}\n\tclusters := []cluster.Cluster{{\n\t\tName: \"c1\",\n\t\tAddresses: []string{\"addr1\", \"addr2\"},\n\t\tCaCert: []byte(\"cacert\"),\n\t\tClientCert: []byte(\"clientcert\"),\n\t\tClientKey: []byte(\"clientkey\"),\n\t\tCustomData: map[string]string{\"namespace\": \"ns1\"},\n\t\tDefault: true,\n\t\tProvisioner: \"prov1\",\n\t}, {\n\t\tName: \"c2\",\n\t\tAddresses: []string{\"addr3\"},\n\t\tDefault: false,\n\t\tPools: []string{\"p1\", \"p2\"},\n\t\tProvisioner: \"prov2\",\n\t}}\n\tdata, err := json.Marshal(clusters)\n\tc.Assert(err, check.IsNil)\n\ttrans := &cmdtest.ConditionalTransport{\n\t\tTransport: cmdtest.Transport{Message: string(data), Status: http.StatusOK},\n\t\tCondFunc: func(req *http.Request) bool {\n\t\t\treturn req.URL.Path == \"\/1.3\/provisioner\/clusters\" && req.Method == \"GET\"\n\t\t},\n\t}\n\tmanager := cmd.NewManager(\"admin\", \"0.1\", \"admin-ver\", &stdout, &stderr, nil, nil)\n\tclient := cmd.NewClient(&http.Client{Transport: trans}, nil, manager)\n\tmyCmd := ClusterList{}\n\terr = myCmd.Run(&context, client)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(stdout.String(), check.Equals, `+------+-------------+-----------+---------------+---------+-------+\n| Name | Provisioner | Addresses | Custom Data | Default | Pools |\n+------+-------------+-----------+---------------+---------+-------+\n| c1 | prov1 | addr1 | namespace=ns1 | true | |\n| | | addr2 | | | |\n+------+-------------+-----------+---------------+---------+-------+\n| c2 | prov2 | addr3 | | false | p1 |\n| | | | | | p2 |\n+------+-------------+-----------+---------------+---------+-------+\n`)\n}\n\nfunc (s *S) TestClusterRemoveRun(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\tcontext := cmd.Context{\n\t\tStdout: &stdout,\n\t\tStderr: &stderr,\n\t\tArgs: []string{\"c1\"},\n\t}\n\ttrans := &cmdtest.ConditionalTransport{\n\t\tTransport: cmdtest.Transport{Status: http.StatusNoContent},\n\t\tCondFunc: func(req *http.Request) bool {\n\t\t\treturn req.URL.Path == \"\/1.3\/provisioner\/clusters\/c1\" && req.Method == \"DELETE\"\n\t\t},\n\t}\n\tmanager := cmd.NewManager(\"admin\", \"0.1\", \"admin-ver\", &stdout, &stderr, nil, nil)\n\tclient := cmd.NewClient(&http.Client{Transport: trans}, nil, manager)\n\tmyCmd := ClusterRemove{}\n\terr := myCmd.Run(&context, client)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(stdout.String(), check.Equals, \"Cluster successfully removed.\\n\")\n}\n<commit_msg>client: fixes cluster-remove test<commit_after>\/\/ Copyright 2017 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage admin\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/ajg\/form\"\n\t\"github.com\/tsuru\/tsuru\/cmd\"\n\t\"github.com\/tsuru\/tsuru\/cmd\/cmdtest\"\n\t\"github.com\/tsuru\/tsuru\/provision\/cluster\"\n\t\"gopkg.in\/check.v1\"\n)\n\nfunc (s *S) TestClusterAddRun(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\tcontext := cmd.Context{\n\t\tStdout: &stdout,\n\t\tStderr: &stderr,\n\t\tArgs: []string{\"c1\", \"myprov\"},\n\t}\n\ttrans := &cmdtest.ConditionalTransport{\n\t\tTransport: cmdtest.Transport{Status: http.StatusOK},\n\t\tCondFunc: func(req *http.Request) bool {\n\t\t\terr := req.ParseForm()\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tdec := form.NewDecoder(nil)\n\t\t\tdec.IgnoreCase(true)\n\t\t\tdec.IgnoreUnknownKeys(true)\n\t\t\tvar clus cluster.Cluster\n\t\t\terr = dec.DecodeValues(&clus, req.Form)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Assert(clus, check.DeepEquals, cluster.Cluster{\n\t\t\t\tName: \"c1\",\n\t\t\t\tCaCert: []byte(\"cadata\"),\n\t\t\t\tClientCert: []byte(\"certdata\"),\n\t\t\t\tClientKey: []byte(\"keydata\"),\n\t\t\t\tCustomData: map[string]string{\"a\": \"b\", \"c\": \"d\"},\n\t\t\t\tAddresses: []string{\"addr1\", \"addr2\"},\n\t\t\t\tPools: []string{\"p1\", \"p2\"},\n\t\t\t\tDefault: true,\n\t\t\t\tProvisioner: \"myprov\",\n\t\t\t\tCreateData: map[string]string{\"iaas\": \"dockermachine\"},\n\t\t\t})\n\t\t\treturn req.URL.Path == \"\/1.3\/provisioner\/clusters\" && req.Method == \"POST\"\n\t\t},\n\t}\n\tmanager := cmd.NewManager(\"admin\", \"0.1\", \"admin-ver\", &stdout, &stderr, nil, nil)\n\tclient := cmd.NewClient(&http.Client{Transport: trans}, nil, manager)\n\tmyCmd := ClusterAdd{}\n\tdir, err := ioutil.TempDir(\"\", \"tsuru\")\n\tc.Assert(err, check.IsNil)\n\tdefer os.RemoveAll(dir)\n\terr = ioutil.WriteFile(filepath.Join(dir, \"ca\"), []byte(\"cadata\"), 0600)\n\tc.Assert(err, check.IsNil)\n\terr = ioutil.WriteFile(filepath.Join(dir, \"cert\"), []byte(\"certdata\"), 0600)\n\tc.Assert(err, check.IsNil)\n\terr = ioutil.WriteFile(filepath.Join(dir, \"key\"), []byte(\"keydata\"), 0600)\n\tc.Assert(err, check.IsNil)\n\terr = myCmd.Flags().Parse(true, []string{\n\t\t\"--cacert\", filepath.Join(dir, \"ca\"),\n\t\t\"--clientcert\", filepath.Join(dir, \"cert\"),\n\t\t\"--clientkey\", filepath.Join(dir, \"key\"),\n\t\t\"--addr\", \"addr1\",\n\t\t\"--addr\", \"addr2\",\n\t\t\"--pool\", \"p1\",\n\t\t\"--pool\", \"p2\",\n\t\t\"--custom\", \"a=b\",\n\t\t\"--custom\", \"c=d\",\n\t\t\"--create-data\", \"iaas=dockermachine\",\n\t\t\"--default\",\n\t})\n\tc.Assert(err, check.IsNil)\n\terr = myCmd.Run(&context, client)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(stdout.String(), check.Equals, \"Cluster successfully added.\\n\")\n}\n\nfunc (s *S) TestClusterUpdateRun(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\tcontext := cmd.Context{\n\t\tStdout: &stdout,\n\t\tStderr: &stderr,\n\t\tArgs: []string{\"c1\", \"myprov\"},\n\t}\n\ttrans := &cmdtest.ConditionalTransport{\n\t\tTransport: cmdtest.Transport{Status: http.StatusOK},\n\t\tCondFunc: func(req *http.Request) bool {\n\t\t\terr := req.ParseForm()\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tdec := form.NewDecoder(nil)\n\t\t\tdec.IgnoreCase(true)\n\t\t\tdec.IgnoreUnknownKeys(true)\n\t\t\tvar clus cluster.Cluster\n\t\t\terr = dec.DecodeValues(&clus, req.Form)\n\t\t\tc.Assert(err, check.IsNil)\n\t\t\tc.Assert(clus, check.DeepEquals, cluster.Cluster{\n\t\t\t\tName: \"c1\",\n\t\t\t\tCaCert: []byte(\"cadata\"),\n\t\t\t\tClientCert: []byte(\"certdata\"),\n\t\t\t\tClientKey: []byte(\"keydata\"),\n\t\t\t\tCustomData: map[string]string{\"a\": \"b\", \"c\": \"d\"},\n\t\t\t\tAddresses: []string{\"addr1\", \"addr2\"},\n\t\t\t\tPools: []string{\"p1\", \"p2\"},\n\t\t\t\tDefault: true,\n\t\t\t\tProvisioner: \"myprov\",\n\t\t\t})\n\t\t\treturn req.URL.Path == \"\/1.4\/provisioner\/clusters\/c1\" && req.Method == \"POST\"\n\t\t},\n\t}\n\tmanager := cmd.NewManager(\"admin\", \"0.1\", \"admin-ver\", &stdout, &stderr, nil, nil)\n\tclient := cmd.NewClient(&http.Client{Transport: trans}, nil, manager)\n\tmyCmd := ClusterUpdate{}\n\tdir, err := ioutil.TempDir(\"\", \"tsuru\")\n\tc.Assert(err, check.IsNil)\n\tdefer os.RemoveAll(dir)\n\terr = ioutil.WriteFile(filepath.Join(dir, \"ca\"), []byte(\"cadata\"), 0600)\n\tc.Assert(err, check.IsNil)\n\terr = ioutil.WriteFile(filepath.Join(dir, \"cert\"), []byte(\"certdata\"), 0600)\n\tc.Assert(err, check.IsNil)\n\terr = ioutil.WriteFile(filepath.Join(dir, \"key\"), []byte(\"keydata\"), 0600)\n\tc.Assert(err, check.IsNil)\n\terr = myCmd.Flags().Parse(true, []string{\n\t\t\"--cacert\", filepath.Join(dir, \"ca\"),\n\t\t\"--clientcert\", filepath.Join(dir, \"cert\"),\n\t\t\"--clientkey\", filepath.Join(dir, \"key\"),\n\t\t\"--addr\", \"addr1\",\n\t\t\"--addr\", \"addr2\",\n\t\t\"--pool\", \"p1\",\n\t\t\"--pool\", \"p2\",\n\t\t\"--custom\", \"a=b\",\n\t\t\"--custom\", \"c=d\",\n\t\t\"--default\",\n\t})\n\tc.Assert(err, check.IsNil)\n\terr = myCmd.Run(&context, client)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(stdout.String(), check.Equals, \"Cluster successfully updated.\\n\")\n}\n\nfunc (s *S) TestClusterListRun(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\tcontext := cmd.Context{\n\t\tStdout: &stdout,\n\t\tStderr: &stderr,\n\t}\n\tclusters := []cluster.Cluster{{\n\t\tName: \"c1\",\n\t\tAddresses: []string{\"addr1\", \"addr2\"},\n\t\tCaCert: []byte(\"cacert\"),\n\t\tClientCert: []byte(\"clientcert\"),\n\t\tClientKey: []byte(\"clientkey\"),\n\t\tCustomData: map[string]string{\"namespace\": \"ns1\"},\n\t\tDefault: true,\n\t\tProvisioner: \"prov1\",\n\t}, {\n\t\tName: \"c2\",\n\t\tAddresses: []string{\"addr3\"},\n\t\tDefault: false,\n\t\tPools: []string{\"p1\", \"p2\"},\n\t\tProvisioner: \"prov2\",\n\t}}\n\tdata, err := json.Marshal(clusters)\n\tc.Assert(err, check.IsNil)\n\ttrans := &cmdtest.ConditionalTransport{\n\t\tTransport: cmdtest.Transport{Message: string(data), Status: http.StatusOK},\n\t\tCondFunc: func(req *http.Request) bool {\n\t\t\treturn req.URL.Path == \"\/1.3\/provisioner\/clusters\" && req.Method == \"GET\"\n\t\t},\n\t}\n\tmanager := cmd.NewManager(\"admin\", \"0.1\", \"admin-ver\", &stdout, &stderr, nil, nil)\n\tclient := cmd.NewClient(&http.Client{Transport: trans}, nil, manager)\n\tmyCmd := ClusterList{}\n\terr = myCmd.Run(&context, client)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(stdout.String(), check.Equals, `+------+-------------+-----------+---------------+---------+-------+\n| Name | Provisioner | Addresses | Custom Data | Default | Pools |\n+------+-------------+-----------+---------------+---------+-------+\n| c1 | prov1 | addr1 | namespace=ns1 | true | |\n| | | addr2 | | | |\n+------+-------------+-----------+---------------+---------+-------+\n| c2 | prov2 | addr3 | | false | p1 |\n| | | | | | p2 |\n+------+-------------+-----------+---------------+---------+-------+\n`)\n}\n\nfunc (s *S) TestClusterRemoveRun(c *check.C) {\n\tvar stdout, stderr bytes.Buffer\n\tcontext := cmd.Context{\n\t\tStdout: &stdout,\n\t\tStderr: &stderr,\n\t\tArgs: []string{\"c1\"},\n\t}\n\ttrans := &cmdtest.ConditionalTransport{\n\t\tTransport: cmdtest.Transport{Status: http.StatusNoContent},\n\t\tCondFunc: func(req *http.Request) bool {\n\t\t\treturn req.URL.Path == \"\/1.3\/provisioner\/clusters\/c1\" && req.Method == \"DELETE\"\n\t\t},\n\t}\n\tmanager := cmd.NewManager(\"admin\", \"0.1\", \"admin-ver\", &stdout, &stderr, nil, nil)\n\tclient := cmd.NewClient(&http.Client{Transport: trans}, nil, manager)\n\tmyCmd := ClusterRemove{}\n\tmyCmd.Flags().Parse(true, []string{\"-y\"})\n\terr := myCmd.Run(&context, client)\n\tc.Assert(err, check.IsNil)\n\tc.Assert(stdout.String(), check.Equals, \"Cluster successfully removed.\\n\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/+build integration\n\npackage integration\n\nimport (\n\t\"crypto\/sha256\"\n\t\"encoding\/pem\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"path\/filepath\"\n\t\"testing\"\n\t\"time\"\n\n\tct \"github.com\/google\/certificate-transparency\/go\"\n\t\"github.com\/google\/certificate-transparency\/go\/client\"\n\t\"github.com\/google\/certificate-transparency\/go\/jsonclient\"\n\t\"github.com\/google\/certificate-transparency\/go\/merkletree\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nvar httpServerFlag = flag.String(\"ct_http_server\", \"localhost:8092\", \"Server address:port\")\nvar pubKey = flag.String(\"public_key_file\", \"\", \"Name of file containing log's public key\")\nvar testdata = flag.String(\"testdata\", \"testdata\", \"Name of directory with test data\")\nvar seed = flag.Int64(\"seed\", -1, \"Seed for random number generation\")\n\nfunc TestCTIntegration(t *testing.T) {\n\tflag.Parse()\n\tlogURI := \"http:\/\/\" + (*httpServerFlag)\n\tif *seed == -1 {\n\t\t*seed = time.Now().UTC().UnixNano() & 0xFFFFFFFF\n\t}\n\tfmt.Printf(\"Today's test has been brought to you by the letters C and T and the number %#x\\n\", *seed)\n\trand.Seed(*seed)\n\n\topts := jsonclient.Options{}\n\tif *pubKey != \"\" {\n\t\tpubkey, err := ioutil.ReadFile(*pubKey)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to get public key contents: %v\", err)\n\t\t}\n\t\topts.PublicKey = string(pubkey)\n\t}\n\tlogClient, err := client.New(logURI, nil, opts)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create LogClient instance: %v\", err)\n\t}\n\tctx := context.Background()\n\n\t\/\/ Stage 0: get accepted roots, which should just be the fake CA.\n\troots, err := logClient.GetAcceptedRoots(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get roots: %v\", err)\n\t}\n\tif len(roots) != 1 {\n\t\tt.Errorf(\"len(GetAcceptableRoots())=%d; want 1\", len(roots))\n\t}\n\n\t\/\/ Stage 1: get the STH, which should be empty.\n\tsth0, err := logClient.GetSTH(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get STH: %v\", err)\n\t}\n\tif sth0.Version != 0 {\n\t\tt.Errorf(\"sth.Version=%v; want V1(0)\", sth0.Version)\n\t}\n\tif sth0.TreeSize != 0 {\n\t\tt.Errorf(\"sth.TreeSize=%d; want 0\", sth0.TreeSize)\n\t}\n\tfmt.Printf(\"%v: Got STH(size=%d): roothash=%x\\n\", ctTime(sth0.Timestamp), sth0.TreeSize, sth0.SHA256RootHash)\n\n\t\/\/ Stage 2: add a single cert (the intermediate CA), get an SCT.\n\tchain0, err := getChain(\"int-ca.cert\")\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to load certificate: %v\", err)\n\t}\n\tsct, err := logClient.AddChain(ctx, chain0)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to AddChain(): %v\", err)\n\t}\n\t\/\/ Display the SCT\n\tfmt.Printf(\"%v: Uploaded int-ca.cert to %v log, got SCT\\n\", ctTime(sct.Timestamp), sct.SCTVersion)\n\n\t\/\/ Keep getting the STH until tree size becomes 1.\n\tsth1, err := awaitTreeSize(ctx, logClient, 1, true)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get STH for size=1: %v\", err)\n\t}\n\tfmt.Printf(\"%v: Got STH(size=%d): roothash=%x\\n\", ctTime(sth1.Timestamp), sth1.TreeSize, sth1.SHA256RootHash)\n\n\t\/\/ Stage 3: add a second cert, wait for tree size = 2\n\tchain1, err := getChain(\"leaf01.chain\")\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to load certificate: %v\", err)\n\t}\n\tsct, err = logClient.AddChain(ctx, chain1)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to AddChain(): %v\", err)\n\t}\n\tfmt.Printf(\"%v: Uploaded cert01.chain to %v log, got SCT\\n\", ctTime(sct.Timestamp), sct.SCTVersion)\n\tsth2, err := awaitTreeSize(ctx, logClient, 2, true)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get STH for size=1: %v\", err)\n\t}\n\tfmt.Printf(\"%v: Got STH(size=%d): roothash=%x\\n\", ctTime(sth2.Timestamp), sth2.TreeSize, sth2.SHA256RootHash)\n\n\t\/\/ Stage 4: get a consistency proof from size 1-> size 2.\n\tproof12, err := logClient.GetSTHConsistency(ctx, 1, 2)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to GetSTHConsistency(1, 2): %v\", err)\n\t}\n\t\/\/ sth2\n\t\/\/ \/ \\\n\t\/\/ sth1 => a b\n\t\/\/ | | |\n\t\/\/ d0 d0 d1\n\t\/\/ So consistency proof is [b] and we should have:\n\t\/\/ sth2 == SHA256(0x01 | sth1 | b)\n\tif len(proof12) != 1 {\n\t\tt.Fatalf(\"len(proof12)=%d; want 1\", len(proof12))\n\t}\n\tif err := checkCTConsistencyProof(sth1, sth2, proof12); err != nil {\n\t\tt.Fatalf(\"consistency proof verification failed: %v\", err)\n\t}\n\n\t\/\/ Stage 5: add certificates 2, 3, 4, 5,...N, for some random N in [4,20]\n\tatLeast := 4\n\tcount := atLeast + rand.Intn(20-atLeast)\n\tfor i := 2; i <= count; i++ {\n\t\tfilename := fmt.Sprintf(\"leaf%02d.chain\", i)\n\t\tchain, err := getChain(filename)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Failed to load certificate: %v\", err)\n\t\t}\n\t\tsct, err = logClient.AddChain(ctx, chain)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to AddChain(): %v\", err)\n\t\t}\n\t\tfmt.Printf(\"%v: Uploaded %s to %v log, got SCT\\n\", ctTime(sct.Timestamp), filename, sct.SCTVersion)\n\t}\n\n\t\/\/ Stage 6: keep getting the STH until tree size becomes 1 + N (allows for int-ca.cert).\n\tcount++\n\tsthN, err := awaitTreeSize(ctx, logClient, uint64(count), true)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get STH for size=%d: %v\", count, err)\n\t}\n\tfmt.Printf(\"%v: Got STH(size=%d): roothash=%x\\n\", ctTime(sthN.Timestamp), sthN.TreeSize, sthN.SHA256RootHash)\n\n\t\/\/ Stage 7: get a consistency proof from 2->N.\n\tproof2N, err := logClient.GetSTHConsistency(ctx, 2, uint64(count))\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to GetSTHConsistency(2, %d): %v\", count, err)\n\t}\n\tfmt.Printf(\"Proof 2->%d: %x\\n\", count, proof2N)\n\tif err := checkCTConsistencyProof(sth2, sthN, proof2N); err != nil {\n\t\tt.Fatalf(\"consistency proof verification failed: %v\", err)\n\t}\n\n\t\/\/ Stage 8: get entries [1, N]\n\t\/\/ TODO(drysdale)\n\n\t\/\/ Stage 9: get an audit proof for cert M, randomly chosen in [1,N]\n\t\/\/ TODO(drysdale)\n\n\t\/\/ Stage 10: attempt to upload a corrupt certificate.\n\tcorruptAt := len(chain1[0].Data) - 3\n\tchain1[0].Data[corruptAt] = (chain1[0].Data[corruptAt] + 1)\n\tsct, err = logClient.AddChain(ctx, chain1)\n\tif err == nil {\n\t\tt.Fatalf(\"AddChain(corrupt-cert)=%+v,nil; want error\", sct)\n\t}\n\tfmt.Printf(\"AddChain(corrupt-cert)=nil,%v\\n\", err)\n}\n\nfunc ctTime(ts uint64) time.Time {\n\tsecs := int64(ts \/ 1000)\n\tmsecs := int64(ts % 1000)\n\treturn time.Unix(secs, msecs*1000000)\n}\n\nfunc signatureToString(signed *ct.DigitallySigned) string {\n\treturn fmt.Sprintf(\"Signature: Hash=%v Sign=%v Value=%x\", signed.Algorithm.Hash, signed.Algorithm.Signature, signed.Signature)\n}\n\nfunc certsFromPEM(data []byte) []ct.ASN1Cert {\n\tvar chain []ct.ASN1Cert\n\tfor {\n\t\tvar block *pem.Block\n\t\tblock, data = pem.Decode(data)\n\t\tif block == nil {\n\t\t\tbreak\n\t\t}\n\t\tif block.Type == \"CERTIFICATE\" {\n\t\t\tchain = append(chain, ct.ASN1Cert{Data: block.Bytes})\n\t\t}\n\t}\n\treturn chain\n}\n\nfunc getChain(path string) ([]ct.ASN1Cert, error) {\n\tcertdata, err := ioutil.ReadFile(filepath.Join(*testdata, path))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to load certificate: %v\", err)\n\t}\n\treturn certsFromPEM(certdata), nil\n}\n\nfunc awaitTreeSize(ctx context.Context, logClient *client.LogClient, size uint64, exact bool) (*ct.SignedTreeHead, error) {\n\tvar sth *ct.SignedTreeHead\n\tfor sth == nil || sth.TreeSize < size {\n\t\ttime.Sleep(200 * time.Millisecond)\n\t\tvar err error\n\t\tsth, err = logClient.GetSTH(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to get STH: %v\", err)\n\t\t}\n\t}\n\tif exact && sth.TreeSize != size {\n\t\treturn nil, fmt.Errorf(\"sth.TreeSize=%d; want 1\", sth.TreeSize)\n\t}\n\treturn sth, nil\n}\n\nfunc checkCTConsistencyProof(sth1, sth2 *ct.SignedTreeHead, proof [][]byte) error {\n\tverifier := merkletree.NewMerkleVerifier(func(data []byte) []byte {\n\t\thash := sha256.Sum256(data)\n\t\treturn hash[:]\n\t})\n\treturn verifier.VerifyConsistencyProof(int64(sth1.TreeSize), int64(sth2.TreeSize),\n\t\tsth1.SHA256RootHash[:], sth2.SHA256RootHash[:], proof)\n}\n<commit_msg>integration\/ct: do get-entries<commit_after>\/\/+build integration\n\npackage integration\n\nimport (\n\t\"bytes\"\n\t\"crypto\/sha256\"\n\t\"encoding\/hex\"\n\t\"encoding\/pem\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"math\/rand\"\n\t\"path\/filepath\"\n\t\"testing\"\n\t\"time\"\n\n\tct \"github.com\/google\/certificate-transparency\/go\"\n\t\"github.com\/google\/certificate-transparency\/go\/client\"\n\t\"github.com\/google\/certificate-transparency\/go\/jsonclient\"\n\t\"github.com\/google\/certificate-transparency\/go\/merkletree\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nvar httpServerFlag = flag.String(\"ct_http_server\", \"localhost:8092\", \"Server address:port\")\nvar pubKey = flag.String(\"public_key_file\", \"\", \"Name of file containing log's public key\")\nvar testdata = flag.String(\"testdata\", \"testdata\", \"Name of directory with test data\")\nvar seed = flag.Int64(\"seed\", -1, \"Seed for random number generation\")\n\nfunc TestCTIntegration(t *testing.T) {\n\tflag.Parse()\n\tlogURI := \"http:\/\/\" + (*httpServerFlag)\n\tif *seed == -1 {\n\t\t*seed = time.Now().UTC().UnixNano() & 0xFFFFFFFF\n\t}\n\tfmt.Printf(\"Today's test has been brought to you by the letters C and T and the number %#x\\n\", *seed)\n\trand.Seed(*seed)\n\n\topts := jsonclient.Options{}\n\tif *pubKey != \"\" {\n\t\tpubkey, err := ioutil.ReadFile(*pubKey)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to get public key contents: %v\", err)\n\t\t}\n\t\topts.PublicKey = string(pubkey)\n\t}\n\tlogClient, err := client.New(logURI, nil, opts)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to create LogClient instance: %v\", err)\n\t}\n\tctx := context.Background()\n\n\t\/\/ Stage 0: get accepted roots, which should just be the fake CA.\n\troots, err := logClient.GetAcceptedRoots(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get roots: %v\", err)\n\t}\n\tif len(roots) != 1 {\n\t\tt.Errorf(\"len(GetAcceptableRoots())=%d; want 1\", len(roots))\n\t}\n\n\t\/\/ Stage 1: get the STH, which should be empty.\n\tsth0, err := logClient.GetSTH(ctx)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get STH: %v\", err)\n\t}\n\tif sth0.Version != 0 {\n\t\tt.Errorf(\"sth.Version=%v; want V1(0)\", sth0.Version)\n\t}\n\tif sth0.TreeSize != 0 {\n\t\tt.Fatalf(\"sth.TreeSize=%d; want 0\", sth0.TreeSize)\n\t}\n\tfmt.Printf(\"%v: Got STH(size=%d): roothash=%x\\n\", ctTime(sth0.Timestamp), sth0.TreeSize, sth0.SHA256RootHash)\n\n\t\/\/ Stage 2: add a single cert (the intermediate CA), get an SCT.\n\tvar scts [21]*ct.SignedCertificateTimestamp \/\/ 0=int-ca, 1-20=leaves\n\tvar chain [21][]ct.ASN1Cert\n\tchain[0], err = getChain(\"int-ca.cert\")\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to load certificate: %v\", err)\n\t}\n\tscts[0], err = logClient.AddChain(ctx, chain[0])\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to AddChain(): %v\", err)\n\t}\n\t\/\/ Display the SCT\n\tfmt.Printf(\"%v: Uploaded int-ca.cert to %v log, got SCT\\n\", ctTime(scts[0].Timestamp), scts[0].SCTVersion)\n\n\t\/\/ Keep getting the STH until tree size becomes 1.\n\tsth1, err := awaitTreeSize(ctx, logClient, 1, true)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get STH for size=1: %v\", err)\n\t}\n\tfmt.Printf(\"%v: Got STH(size=%d): roothash=%x\\n\", ctTime(sth1.Timestamp), sth1.TreeSize, sth1.SHA256RootHash)\n\n\t\/\/ Stage 3: add a second cert, wait for tree size = 2\n\tchain[1], err = getChain(\"leaf01.chain\")\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to load certificate: %v\", err)\n\t}\n\tscts[1], err = logClient.AddChain(ctx, chain[1])\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to AddChain(): %v\", err)\n\t}\n\tfmt.Printf(\"%v: Uploaded cert01.chain to %v log, got SCT\\n\", ctTime(scts[1].Timestamp), scts[1].SCTVersion)\n\tsth2, err := awaitTreeSize(ctx, logClient, 2, true)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get STH for size=1: %v\", err)\n\t}\n\tfmt.Printf(\"%v: Got STH(size=%d): roothash=%x\\n\", ctTime(sth2.Timestamp), sth2.TreeSize, sth2.SHA256RootHash)\n\n\t\/\/ Stage 4: get a consistency proof from size 1-> size 2.\n\tproof12, err := logClient.GetSTHConsistency(ctx, 1, 2)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to GetSTHConsistency(1, 2): %v\", err)\n\t}\n\t\/\/ sth2\n\t\/\/ \/ \\\n\t\/\/ sth1 => a b\n\t\/\/ | | |\n\t\/\/ d0 d0 d1\n\t\/\/ So consistency proof is [b] and we should have:\n\t\/\/ sth2 == SHA256(0x01 | sth1 | b)\n\tif len(proof12) != 1 {\n\t\tt.Fatalf(\"len(proof12)=%d; want 1\", len(proof12))\n\t}\n\tif err := checkCTConsistencyProof(sth1, sth2, proof12); err != nil {\n\t\tt.Fatalf(\"consistency proof verification failed: %v\", err)\n\t}\n\n\t\/\/ Stage 5: add certificates 2, 3, 4, 5,...N, for some random N in [4,20]\n\tatLeast := 4\n\tcount := atLeast + rand.Intn(20-atLeast)\n\tfor i := 2; i <= count; i++ {\n\t\tfilename := fmt.Sprintf(\"leaf%02d.chain\", i)\n\t\tchain[i], err = getChain(filename)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Failed to load certificate: %v\", err)\n\t\t}\n\t\tscts[i], err = logClient.AddChain(ctx, chain[i])\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Failed to AddChain(): %v\", err)\n\t\t}\n\t\tfmt.Printf(\"%v: Uploaded %s to %v log, got SCT[%d]\\n\", ctTime(scts[i].Timestamp), filename, scts[i].SCTVersion, i)\n\t}\n\n\t\/\/ Stage 6: keep getting the STH until tree size becomes 1 + N (allows for int-ca.cert).\n\ttreeSize := 1 + count\n\tsthN, err := awaitTreeSize(ctx, logClient, uint64(treeSize), true)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to get STH for size=%d: %v\", treeSize, err)\n\t}\n\tfmt.Printf(\"%v: Got STH(size=%d): roothash=%x\\n\", ctTime(sthN.Timestamp), sthN.TreeSize, sthN.SHA256RootHash)\n\n\t\/\/ Stage 7: get a consistency proof from 2->(1+N).\n\tproof2N, err := logClient.GetSTHConsistency(ctx, 2, uint64(treeSize))\n\tif err != nil {\n\t\tt.Errorf(\"Failed to GetSTHConsistency(2, %d): %v\", treeSize, err)\n\t} else {\n\t\tfmt.Printf(\"Proof size 2->%d: %x\\n\", treeSize, proof2N)\n\t\tif err := checkCTConsistencyProof(sth2, sthN, proof2N); err != nil {\n\t\t\tt.Errorf(\"consistency proof verification failed: %v\", err)\n\t\t}\n\t}\n\n\t\/\/ Stage 8: get entries [1, N] (start at 1 to skip int-ca.cert)\n\tentries, err := logClient.GetEntries(ctx, 1, int64(count))\n\tif err != nil {\n\t\tt.Errorf(\"Failed to GetEntries(1, %d): %v\", count, err)\n\t} else {\n\t\tif len(entries) < count {\n\t\t\tt.Errorf(\"Fewer entries (%d) retrieved than expected (%d)\", len(entries), count)\n\t\t}\n\t\tfor i, entry := range entries {\n\t\t\tleaf := entry.Leaf\n\t\t\tts := leaf.TimestampedEntry\n\t\t\tfmt.Printf(\"Entry[%d] = {Index:%d Leaf:{Version:%v TS:{EntryType:%v Timestamp:%v}}}\\n\", 1+i, entry.Index, leaf.Version, ts.EntryType, ctTime(ts.Timestamp))\n\t\t\tif leaf.Version != 0 {\n\t\t\t\tt.Errorf(\"leaf[%d].Version=%v; want V1(0)\", i, leaf.Version)\n\t\t\t}\n\t\t\tif leaf.LeafType != ct.TimestampedEntryLeafType {\n\t\t\t\tt.Errorf(\"leaf[%d].Version=%v; want TimestampedEntryLeafType\", i, leaf.LeafType)\n\t\t\t}\n\n\t\t\tif ts.EntryType != ct.X509LogEntryType {\n\t\t\t\tt.Errorf(\"leaf[%d].ts.EntryType=%v; want X509LogEntryType\", i, ts.EntryType)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t\/\/ This assumes that the added entries are sequenced in order.\n\t\t\tif !bytes.Equal(ts.X509Entry.Data, chain[i+1][0].Data) {\n\t\t\t\tt.Errorf(\"leaf[%d].ts.X509Entry differs from originally uploaded cert\", i)\n\t\t\t\tt.Errorf(\"\\tuploaded: %s\", hex.EncodeToString(chain[i+1][0].Data))\n\t\t\t\tt.Errorf(\"\\tretrieved: %s\", hex.EncodeToString(ts.X509Entry.Data))\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ Stage 9: get an audit proof for each certificate we have an SCT for.\n\tfor i := 1; i <= count; i++ {\n\t\tsct := scts[i]\n\t\tfmt.Printf(\"Inclusion proof leaf %d @ %d -> root %d = \", i, sct.Timestamp, sthN.TreeSize)\n\t\t\/\/ Calculate leaf hash = SHA256(0x00 | d[0])\n\t\thash := []byte{0x00}\n\t\t\/\/ TODO(drysdale): build leaf hash\n\t\trsp, err := logClient.GetProofByHash(ctx, hash, sthN.TreeSize)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"<fail: %v>\\n\", err)\n\t\t\tt.Errorf(\"GetProofByHash(sct[%d], size=%d)=nil,%v\", i, sthN.TreeSize, err)\n\t\t\tcontinue\n\t\t}\n\t\tif rsp.LeafIndex != int64(i) {\n\t\t\tfmt.Printf(\"<fail: wrong index>\\n\", err)\n\t\t\tt.Errorf(\"GetProofByHash(sct[%d], size=%d) has LeafIndex %d\", i, sthN.TreeSize, rsp.LeafIndex)\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Printf(\"%x\\n\", rsp.AuditPath)\n\t\t\/\/ TODO(drysdale): check inclusion proof\n\t\t\/*\n\t\t\tif err := checkCTInclusionProof(i, sthN.TreeSize, rsp.AuditPath, sthN.SHA256RootHash, @@leaf); err != nil {\n\t\t\t\tt.Errorf(\"inclusion proof verification failed: %v\", err)\n\t\t\t}\n\t\t*\/\n\t}\n\n\t\/\/ Stage 10: attempt to upload a corrupt certificate.\n\tcorruptChain := make([]ct.ASN1Cert, len(chain[1]))\n\tcopy(corruptChain, chain[1])\n\tcorruptAt := len(corruptChain[0].Data) - 3\n\tcorruptChain[0].Data[corruptAt] = (corruptChain[0].Data[corruptAt] + 1)\n\tif sct, err := logClient.AddChain(ctx, corruptChain); err == nil {\n\t\tt.Fatalf(\"AddChain(corrupt-cert)=%+v,nil; want error\", sct)\n\t} else {\n\t\tfmt.Printf(\"AddChain(corrupt-cert)=nil,%v\\n\", err)\n\t}\n}\n\nfunc ctTime(ts uint64) time.Time {\n\tsecs := int64(ts \/ 1000)\n\tmsecs := int64(ts % 1000)\n\treturn time.Unix(secs, msecs*1000000)\n}\n\nfunc signatureToString(signed *ct.DigitallySigned) string {\n\treturn fmt.Sprintf(\"Signature: Hash=%v Sign=%v Value=%x\", signed.Algorithm.Hash, signed.Algorithm.Signature, signed.Signature)\n}\n\nfunc certsFromPEM(data []byte) []ct.ASN1Cert {\n\tvar chain []ct.ASN1Cert\n\tfor {\n\t\tvar block *pem.Block\n\t\tblock, data = pem.Decode(data)\n\t\tif block == nil {\n\t\t\tbreak\n\t\t}\n\t\tif block.Type == \"CERTIFICATE\" {\n\t\t\tchain = append(chain, ct.ASN1Cert{Data: block.Bytes})\n\t\t}\n\t}\n\treturn chain\n}\n\nfunc getChain(path string) ([]ct.ASN1Cert, error) {\n\tcertdata, err := ioutil.ReadFile(filepath.Join(*testdata, path))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to load certificate: %v\", err)\n\t}\n\treturn certsFromPEM(certdata), nil\n}\n\nfunc awaitTreeSize(ctx context.Context, logClient *client.LogClient, size uint64, exact bool) (*ct.SignedTreeHead, error) {\n\tvar sth *ct.SignedTreeHead\n\tfor sth == nil || sth.TreeSize < size {\n\t\ttime.Sleep(200 * time.Millisecond)\n\t\tvar err error\n\t\tsth, err = logClient.GetSTH(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to get STH: %v\", err)\n\t\t}\n\t}\n\tif exact && sth.TreeSize != size {\n\t\treturn nil, fmt.Errorf(\"sth.TreeSize=%d; want 1\", sth.TreeSize)\n\t}\n\treturn sth, nil\n}\n\nfunc checkCTInclusionProof(leafIndex, treeSize int64, proof [][]byte, root []byte, leaf []byte) error {\n\tverifier := merkletree.NewMerkleVerifier(func(data []byte) []byte {\n\t\thash := sha256.Sum256(data)\n\t\treturn hash[:]\n\t})\n\treturn verifier.VerifyInclusionProof(leafIndex, treeSize, proof, root, leaf)\n}\n\nfunc checkCTConsistencyProof(sth1, sth2 *ct.SignedTreeHead, proof [][]byte) error {\n\tverifier := merkletree.NewMerkleVerifier(func(data []byte) []byte {\n\t\thash := sha256.Sum256(data)\n\t\treturn hash[:]\n\t})\n\treturn verifier.VerifyConsistencyProof(int64(sth1.TreeSize), int64(sth2.TreeSize),\n\t\tsth1.SHA256RootHash[:], sth2.SHA256RootHash[:], proof)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage framework\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/google\/cadvisor\/client\"\n\t\"github.com\/google\/cadvisor\/client\/v2\"\n\t\"github.com\/google\/cadvisor\/integration\/common\"\n)\n\nvar host = flag.String(\"host\", \"localhost\", \"Address of the host being tested\")\nvar port = flag.Int(\"port\", 8080, \"Port of the application on the host being tested\")\n\n\/\/ Integration test framework.\ntype Framework interface {\n\t\/\/ Clean the framework state.\n\tCleanup()\n\n\t\/\/ The testing.T used by the framework and the current test.\n\tT() *testing.T\n\n\t\/\/ Returns the hostname being tested.\n\tHostname() HostnameInfo\n\n\t\/\/ Returns the Docker actions for the test framework.\n\tDocker() DockerActions\n\n\t\/\/ Returns the shell actions for the test framework.\n\tShell() ShellActions\n\n\t\/\/ Returns the cAdvisor actions for the test framework.\n\tCadvisor() CadvisorActions\n}\n\n\/\/ Instantiates a Framework. Cleanup *must* be called. Class is thread-compatible.\n\/\/ All framework actions report fatal errors on the t specified at creation time.\n\/\/\n\/\/ Typical use:\n\/\/\n\/\/ func TestFoo(t *testing.T) {\n\/\/ \tfm := framework.New(t)\n\/\/ \tdefer fm.Cleanup()\n\/\/ ... actual test ...\n\/\/ }\nfunc New(t *testing.T) Framework {\n\t\/\/ All integration tests are large.\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping framework test in short mode\")\n\t}\n\n\t\/\/ Try to see if non-localhost hosts are GCE instances.\n\tvar gceInstanceName string\n\thostname := *host\n\tif hostname != \"localhost\" {\n\t\tgceInstanceName = hostname\n\t\tgceIp, err := common.GetGceIp(hostname)\n\t\tif err == nil {\n\t\t\thostname = gceIp\n\t\t}\n\t}\n\n\tfm := &realFramework{\n\t\thostname: HostnameInfo{\n\t\t\tHost: hostname,\n\t\t\tPort: *port,\n\t\t\tGceInstanceName: gceInstanceName,\n\t\t},\n\t\tt: t,\n\t\tcleanups: make([]func(), 0),\n\t}\n\tfm.shellActions = shellActions{\n\t\tfm: fm,\n\t}\n\tfm.dockerActions = dockerActions{\n\t\tfm: fm,\n\t}\n\n\treturn fm\n}\n\nconst (\n\tAufs string = \"aufs\"\n\tOverlay string = \"overlay\"\n\tDeviceMapper string = \"devicemapper\"\n\tUnknown string = \"\"\n)\n\ntype DockerActions interface {\n\t\/\/ Run the no-op pause Docker container and return its ID.\n\tRunPause() string\n\n\t\/\/ Run the specified command in a Docker busybox container and return its ID.\n\tRunBusybox(cmd ...string) string\n\n\t\/\/ Runs a Docker container in the background. Uses the specified DockerRunArgs and command.\n\t\/\/ Returns the ID of the new container.\n\t\/\/\n\t\/\/ e.g.:\n\t\/\/ Run(DockerRunArgs{Image: \"busybox\"}, \"ping\", \"www.google.com\")\n\t\/\/ -> docker run busybox ping www.google.com\n\tRun(args DockerRunArgs, cmd ...string) string\n\tRunStress(args DockerRunArgs, cmd ...string) string\n\n\tVersion() []string\n\tStorageDriver() string\n}\n\ntype ShellActions interface {\n\t\/\/ Runs a specified command and arguments. Returns the stdout and stderr.\n\tRun(cmd string, args ...string) (string, string)\n\tRunStress(cmd string, args ...string) (string, string)\n}\n\ntype CadvisorActions interface {\n\t\/\/ Returns a cAdvisor client to the machine being tested.\n\tClient() *client.Client\n\tClientV2() *v2.Client\n}\n\ntype realFramework struct {\n\thostname HostnameInfo\n\tt *testing.T\n\tcadvisorClient *client.Client\n\tcadvisorClientV2 *v2.Client\n\n\tshellActions shellActions\n\tdockerActions dockerActions\n\n\t\/\/ Cleanup functions to call on Cleanup()\n\tcleanups []func()\n}\n\ntype shellActions struct {\n\tfm *realFramework\n}\n\ntype dockerActions struct {\n\tfm *realFramework\n}\n\ntype HostnameInfo struct {\n\tHost string\n\tPort int\n\tGceInstanceName string\n}\n\n\/\/ Returns: http:\/\/<host>:<port>\/\nfunc (self HostnameInfo) FullHostname() string {\n\treturn fmt.Sprintf(\"http:\/\/%s:%d\/\", self.Host, self.Port)\n}\n\nfunc (self *realFramework) T() *testing.T {\n\treturn self.t\n}\n\nfunc (self *realFramework) Hostname() HostnameInfo {\n\treturn self.hostname\n}\n\nfunc (self *realFramework) Shell() ShellActions {\n\treturn self.shellActions\n}\n\nfunc (self *realFramework) Docker() DockerActions {\n\treturn self.dockerActions\n}\n\nfunc (self *realFramework) Cadvisor() CadvisorActions {\n\treturn self\n}\n\n\/\/ Call all cleanup functions.\nfunc (self *realFramework) Cleanup() {\n\tfor _, cleanupFunc := range self.cleanups {\n\t\tcleanupFunc()\n\t}\n}\n\n\/\/ Gets a client to the cAdvisor being tested.\nfunc (self *realFramework) Client() *client.Client {\n\tif self.cadvisorClient == nil {\n\t\tcadvisorClient, err := client.NewClient(self.Hostname().FullHostname())\n\t\tif err != nil {\n\t\t\tself.t.Fatalf(\"Failed to instantiate the cAdvisor client: %v\", err)\n\t\t}\n\t\tself.cadvisorClient = cadvisorClient\n\t}\n\treturn self.cadvisorClient\n}\n\n\/\/ Gets a v2 client to the cAdvisor being tested.\nfunc (self *realFramework) ClientV2() *v2.Client {\n\tif self.cadvisorClientV2 == nil {\n\t\tcadvisorClientV2, err := v2.NewClient(self.Hostname().FullHostname())\n\t\tif err != nil {\n\t\t\tself.t.Fatalf(\"Failed to instantiate the cAdvisor client: %v\", err)\n\t\t}\n\t\tself.cadvisorClientV2 = cadvisorClientV2\n\t}\n\treturn self.cadvisorClientV2\n}\n\nfunc (self dockerActions) RunPause() string {\n\treturn self.Run(DockerRunArgs{\n\t\tImage: \"kubernetes\/pause\",\n\t})\n}\n\n\/\/ Run the specified command in a Docker busybox container.\nfunc (self dockerActions) RunBusybox(cmd ...string) string {\n\treturn self.Run(DockerRunArgs{\n\t\tImage: \"busybox\",\n\t}, cmd...)\n}\n\ntype DockerRunArgs struct {\n\t\/\/ Image to use.\n\tImage string\n\n\t\/\/ Arguments to the Docker CLI.\n\tArgs []string\n\n\tInnerArgs []string\n}\n\n\/\/ TODO(vmarmol): Use the Docker remote API.\n\/\/ TODO(vmarmol): Refactor a set of \"RunCommand\" actions.\n\/\/ Runs a Docker container in the background. Uses the specified DockerRunArgs and command.\n\/\/\n\/\/ e.g.:\n\/\/ RunDockerContainer(DockerRunArgs{Image: \"busybox\"}, \"ping\", \"www.google.com\")\n\/\/ -> docker run busybox ping www.google.com\nfunc (self dockerActions) Run(args DockerRunArgs, cmd ...string) string {\n\tdockerCommand := append(append(append([]string{\"docker\", \"run\", \"-d\"}, args.Args...), args.Image), cmd...)\n\n\toutput, _ := self.fm.Shell().Run(\"sudo\", dockerCommand...)\n\n\t\/\/ The last line is the container ID.\n\telements := strings.Fields(output)\n\tcontainerId := elements[len(elements)-1]\n\n\tself.fm.cleanups = append(self.fm.cleanups, func() {\n\t\tself.fm.Shell().Run(\"sudo\", \"docker\", \"rm\", \"-f\", containerId)\n\t})\n\treturn containerId\n}\n\nfunc (self dockerActions) Version() []string {\n\tdockerCommand := []string{\"docker\", \"version\", \"-f\", \"'{{.Server.Version}}'\"}\n\toutput, _ := self.fm.Shell().Run(\"sudo\", dockerCommand...)\n\tif len(output) != 1 {\n\t\tself.fm.T().Fatalf(\"need 1 arguments in output %v to get the version but have %v\", output, len(output))\n\t}\n\treturn strings.Split(output, \".\")\n}\n\nfunc (self dockerActions) StorageDriver() string {\n\tdockerCommand := []string{\"docker\", \"info\"}\n\toutput, _ := self.fm.Shell().Run(\"sudo\", dockerCommand...)\n\tif len(output) < 1 {\n\t\tself.fm.T().Fatalf(\"failed to find docker storage driver - %v\", output)\n\t}\n\tfor _, line := range strings.Split(output, \"\\n\") {\n\t\tline = strings.TrimSpace(line)\n\t\tif strings.HasPrefix(line, \"Storage Driver: \") {\n\t\t\tidx := strings.LastIndex(line, \": \") + 2\n\t\t\tdriver := line[idx:]\n\t\t\tswitch driver {\n\t\t\tcase Aufs:\n\t\t\t\treturn Aufs\n\t\t\tcase Overlay:\n\t\t\t\treturn Overlay\n\t\t\tcase DeviceMapper:\n\t\t\t\treturn DeviceMapper\n\t\t\tdefault:\n\t\t\t\treturn Unknown\n\t\t\t}\n\t\t}\n\t}\n\tself.fm.T().Fatalf(\"failed to find docker storage driver from info - %v\", output)\n\treturn Unknown\n}\n\nfunc (self dockerActions) RunStress(args DockerRunArgs, cmd ...string) string {\n\tdockerCommand := append(append(append(append([]string{\"docker\", \"run\", \"-m=4M\", \"-d\", \"-t\", \"-i\"}, args.Args...), args.Image), args.InnerArgs...), cmd...)\n\n\toutput, _ := self.fm.Shell().RunStress(\"sudo\", dockerCommand...)\n\n\t\/\/ The last line is the container ID.\n\tif len(output) < 1 {\n\t\tself.fm.T().Fatalf(\"need 1 arguments in output %v to get the name but have %v\", output, len(output))\n\t}\n\telements := strings.Fields(output)\n\tcontainerId := elements[len(elements)-1]\n\n\tself.fm.cleanups = append(self.fm.cleanups, func() {\n\t\tself.fm.Shell().Run(\"sudo\", \"docker\", \"rm\", \"-f\", containerId)\n\t})\n\treturn containerId\n}\n\nfunc (self shellActions) Run(command string, args ...string) (string, string) {\n\tvar cmd *exec.Cmd\n\tif self.fm.Hostname().Host == \"localhost\" {\n\t\t\/\/ Just run locally.\n\t\tcmd = exec.Command(command, args...)\n\t} else {\n\t\t\/\/ We must SSH to the remote machine and run the command.\n\t\targs = append(common.GetGCComputeArgs(\"ssh\", self.fm.Hostname().GceInstanceName, \"--\", command), args...)\n\t\tcmd = exec.Command(\"gcloud\", args...)\n\t}\n\tvar stdout bytes.Buffer\n\tvar stderr bytes.Buffer\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\tself.fm.T().Fatalf(\"Failed to run %q %v in %q with error: %q. Stdout: %q, Stderr: %s\", command, args, self.fm.Hostname().Host, err, stdout.String(), stderr.String())\n\t\treturn \"\", \"\"\n\t}\n\treturn stdout.String(), stderr.String()\n}\n\nfunc (self shellActions) RunStress(command string, args ...string) (string, string) {\n\tvar cmd *exec.Cmd\n\tif self.fm.Hostname().Host == \"localhost\" {\n\t\t\/\/ Just run locally.\n\t\tcmd = exec.Command(command, args...)\n\t} else {\n\t\t\/\/ We must SSH to the remote machine and run the command.\n\t\targs = append(common.GetGCComputeArgs(\"ssh\", self.fm.Hostname().GceInstanceName, \"--\", command), args...)\n\t\tcmd = exec.Command(\"gcloud\", args...)\n\t}\n\tvar stdout bytes.Buffer\n\tvar stderr bytes.Buffer\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\tself.fm.T().Logf(\"Ran %q %v in %q and received error: %q. Stdout: %q, Stderr: %s\", command, args, self.fm.Hostname().Host, err, stdout.String(), stderr.String())\n\t\treturn stdout.String(), stderr.String()\n\t}\n\treturn stdout.String(), stderr.String()\n}\n\n\/\/ Runs retryFunc until no error is returned. After dur time the last error is returned.\n\/\/ Note that the function does not timeout the execution of retryFunc when the limit is reached.\nfunc RetryForDuration(retryFunc func() error, dur time.Duration) error {\n\twaitUntil := time.Now().Add(dur)\n\tvar err error\n\tfor time.Now().Before(waitUntil) {\n\t\terr = retryFunc()\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn err\n}\n<commit_msg>fix version handling in e2e tests<commit_after>\/\/ Copyright 2014 Google Inc. All Rights Reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage framework\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\/exec\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com\/google\/cadvisor\/client\"\n\t\"github.com\/google\/cadvisor\/client\/v2\"\n\t\"github.com\/google\/cadvisor\/integration\/common\"\n)\n\nvar host = flag.String(\"host\", \"localhost\", \"Address of the host being tested\")\nvar port = flag.Int(\"port\", 8080, \"Port of the application on the host being tested\")\n\n\/\/ Integration test framework.\ntype Framework interface {\n\t\/\/ Clean the framework state.\n\tCleanup()\n\n\t\/\/ The testing.T used by the framework and the current test.\n\tT() *testing.T\n\n\t\/\/ Returns the hostname being tested.\n\tHostname() HostnameInfo\n\n\t\/\/ Returns the Docker actions for the test framework.\n\tDocker() DockerActions\n\n\t\/\/ Returns the shell actions for the test framework.\n\tShell() ShellActions\n\n\t\/\/ Returns the cAdvisor actions for the test framework.\n\tCadvisor() CadvisorActions\n}\n\n\/\/ Instantiates a Framework. Cleanup *must* be called. Class is thread-compatible.\n\/\/ All framework actions report fatal errors on the t specified at creation time.\n\/\/\n\/\/ Typical use:\n\/\/\n\/\/ func TestFoo(t *testing.T) {\n\/\/ \tfm := framework.New(t)\n\/\/ \tdefer fm.Cleanup()\n\/\/ ... actual test ...\n\/\/ }\nfunc New(t *testing.T) Framework {\n\t\/\/ All integration tests are large.\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping framework test in short mode\")\n\t}\n\n\t\/\/ Try to see if non-localhost hosts are GCE instances.\n\tvar gceInstanceName string\n\thostname := *host\n\tif hostname != \"localhost\" {\n\t\tgceInstanceName = hostname\n\t\tgceIp, err := common.GetGceIp(hostname)\n\t\tif err == nil {\n\t\t\thostname = gceIp\n\t\t}\n\t}\n\n\tfm := &realFramework{\n\t\thostname: HostnameInfo{\n\t\t\tHost: hostname,\n\t\t\tPort: *port,\n\t\t\tGceInstanceName: gceInstanceName,\n\t\t},\n\t\tt: t,\n\t\tcleanups: make([]func(), 0),\n\t}\n\tfm.shellActions = shellActions{\n\t\tfm: fm,\n\t}\n\tfm.dockerActions = dockerActions{\n\t\tfm: fm,\n\t}\n\n\treturn fm\n}\n\nconst (\n\tAufs string = \"aufs\"\n\tOverlay string = \"overlay\"\n\tDeviceMapper string = \"devicemapper\"\n\tUnknown string = \"\"\n)\n\ntype DockerActions interface {\n\t\/\/ Run the no-op pause Docker container and return its ID.\n\tRunPause() string\n\n\t\/\/ Run the specified command in a Docker busybox container and return its ID.\n\tRunBusybox(cmd ...string) string\n\n\t\/\/ Runs a Docker container in the background. Uses the specified DockerRunArgs and command.\n\t\/\/ Returns the ID of the new container.\n\t\/\/\n\t\/\/ e.g.:\n\t\/\/ Run(DockerRunArgs{Image: \"busybox\"}, \"ping\", \"www.google.com\")\n\t\/\/ -> docker run busybox ping www.google.com\n\tRun(args DockerRunArgs, cmd ...string) string\n\tRunStress(args DockerRunArgs, cmd ...string) string\n\n\tVersion() []string\n\tStorageDriver() string\n}\n\ntype ShellActions interface {\n\t\/\/ Runs a specified command and arguments. Returns the stdout and stderr.\n\tRun(cmd string, args ...string) (string, string)\n\tRunStress(cmd string, args ...string) (string, string)\n}\n\ntype CadvisorActions interface {\n\t\/\/ Returns a cAdvisor client to the machine being tested.\n\tClient() *client.Client\n\tClientV2() *v2.Client\n}\n\ntype realFramework struct {\n\thostname HostnameInfo\n\tt *testing.T\n\tcadvisorClient *client.Client\n\tcadvisorClientV2 *v2.Client\n\n\tshellActions shellActions\n\tdockerActions dockerActions\n\n\t\/\/ Cleanup functions to call on Cleanup()\n\tcleanups []func()\n}\n\ntype shellActions struct {\n\tfm *realFramework\n}\n\ntype dockerActions struct {\n\tfm *realFramework\n}\n\ntype HostnameInfo struct {\n\tHost string\n\tPort int\n\tGceInstanceName string\n}\n\n\/\/ Returns: http:\/\/<host>:<port>\/\nfunc (self HostnameInfo) FullHostname() string {\n\treturn fmt.Sprintf(\"http:\/\/%s:%d\/\", self.Host, self.Port)\n}\n\nfunc (self *realFramework) T() *testing.T {\n\treturn self.t\n}\n\nfunc (self *realFramework) Hostname() HostnameInfo {\n\treturn self.hostname\n}\n\nfunc (self *realFramework) Shell() ShellActions {\n\treturn self.shellActions\n}\n\nfunc (self *realFramework) Docker() DockerActions {\n\treturn self.dockerActions\n}\n\nfunc (self *realFramework) Cadvisor() CadvisorActions {\n\treturn self\n}\n\n\/\/ Call all cleanup functions.\nfunc (self *realFramework) Cleanup() {\n\tfor _, cleanupFunc := range self.cleanups {\n\t\tcleanupFunc()\n\t}\n}\n\n\/\/ Gets a client to the cAdvisor being tested.\nfunc (self *realFramework) Client() *client.Client {\n\tif self.cadvisorClient == nil {\n\t\tcadvisorClient, err := client.NewClient(self.Hostname().FullHostname())\n\t\tif err != nil {\n\t\t\tself.t.Fatalf(\"Failed to instantiate the cAdvisor client: %v\", err)\n\t\t}\n\t\tself.cadvisorClient = cadvisorClient\n\t}\n\treturn self.cadvisorClient\n}\n\n\/\/ Gets a v2 client to the cAdvisor being tested.\nfunc (self *realFramework) ClientV2() *v2.Client {\n\tif self.cadvisorClientV2 == nil {\n\t\tcadvisorClientV2, err := v2.NewClient(self.Hostname().FullHostname())\n\t\tif err != nil {\n\t\t\tself.t.Fatalf(\"Failed to instantiate the cAdvisor client: %v\", err)\n\t\t}\n\t\tself.cadvisorClientV2 = cadvisorClientV2\n\t}\n\treturn self.cadvisorClientV2\n}\n\nfunc (self dockerActions) RunPause() string {\n\treturn self.Run(DockerRunArgs{\n\t\tImage: \"kubernetes\/pause\",\n\t})\n}\n\n\/\/ Run the specified command in a Docker busybox container.\nfunc (self dockerActions) RunBusybox(cmd ...string) string {\n\treturn self.Run(DockerRunArgs{\n\t\tImage: \"busybox\",\n\t}, cmd...)\n}\n\ntype DockerRunArgs struct {\n\t\/\/ Image to use.\n\tImage string\n\n\t\/\/ Arguments to the Docker CLI.\n\tArgs []string\n\n\tInnerArgs []string\n}\n\n\/\/ TODO(vmarmol): Use the Docker remote API.\n\/\/ TODO(vmarmol): Refactor a set of \"RunCommand\" actions.\n\/\/ Runs a Docker container in the background. Uses the specified DockerRunArgs and command.\n\/\/\n\/\/ e.g.:\n\/\/ RunDockerContainer(DockerRunArgs{Image: \"busybox\"}, \"ping\", \"www.google.com\")\n\/\/ -> docker run busybox ping www.google.com\nfunc (self dockerActions) Run(args DockerRunArgs, cmd ...string) string {\n\tdockerCommand := append(append(append([]string{\"docker\", \"run\", \"-d\"}, args.Args...), args.Image), cmd...)\n\n\toutput, _ := self.fm.Shell().Run(\"sudo\", dockerCommand...)\n\n\t\/\/ The last line is the container ID.\n\telements := strings.Fields(output)\n\tcontainerId := elements[len(elements)-1]\n\n\tself.fm.cleanups = append(self.fm.cleanups, func() {\n\t\tself.fm.Shell().Run(\"sudo\", \"docker\", \"rm\", \"-f\", containerId)\n\t})\n\treturn containerId\n}\n\nfunc (self dockerActions) Version() []string {\n\tdockerCommand := []string{\"docker\", \"version\", \"-f\", \"'{{.Server.Version}}'\"}\n\toutput, _ := self.fm.Shell().Run(\"sudo\", dockerCommand...)\n\toutput = strings.TrimSpace(output)\n\tret := strings.Split(output, \".\")\n\tif len(ret) != 3 {\n\t\tself.fm.T().Fatalf(\"invalid version %v\", output)\n\t}\n\treturn ret\n}\n\nfunc (self dockerActions) StorageDriver() string {\n\tdockerCommand := []string{\"docker\", \"info\"}\n\toutput, _ := self.fm.Shell().Run(\"sudo\", dockerCommand...)\n\tif len(output) < 1 {\n\t\tself.fm.T().Fatalf(\"failed to find docker storage driver - %v\", output)\n\t}\n\tfor _, line := range strings.Split(output, \"\\n\") {\n\t\tline = strings.TrimSpace(line)\n\t\tif strings.HasPrefix(line, \"Storage Driver: \") {\n\t\t\tidx := strings.LastIndex(line, \": \") + 2\n\t\t\tdriver := line[idx:]\n\t\t\tswitch driver {\n\t\t\tcase Aufs:\n\t\t\t\treturn Aufs\n\t\t\tcase Overlay:\n\t\t\t\treturn Overlay\n\t\t\tcase DeviceMapper:\n\t\t\t\treturn DeviceMapper\n\t\t\tdefault:\n\t\t\t\treturn Unknown\n\t\t\t}\n\t\t}\n\t}\n\tself.fm.T().Fatalf(\"failed to find docker storage driver from info - %v\", output)\n\treturn Unknown\n}\n\nfunc (self dockerActions) RunStress(args DockerRunArgs, cmd ...string) string {\n\tdockerCommand := append(append(append(append([]string{\"docker\", \"run\", \"-m=4M\", \"-d\", \"-t\", \"-i\"}, args.Args...), args.Image), args.InnerArgs...), cmd...)\n\n\toutput, _ := self.fm.Shell().RunStress(\"sudo\", dockerCommand...)\n\n\t\/\/ The last line is the container ID.\n\tif len(output) < 1 {\n\t\tself.fm.T().Fatalf(\"need 1 arguments in output %v to get the name but have %v\", output, len(output))\n\t}\n\telements := strings.Fields(output)\n\tcontainerId := elements[len(elements)-1]\n\n\tself.fm.cleanups = append(self.fm.cleanups, func() {\n\t\tself.fm.Shell().Run(\"sudo\", \"docker\", \"rm\", \"-f\", containerId)\n\t})\n\treturn containerId\n}\n\nfunc (self shellActions) Run(command string, args ...string) (string, string) {\n\tvar cmd *exec.Cmd\n\tif self.fm.Hostname().Host == \"localhost\" {\n\t\t\/\/ Just run locally.\n\t\tcmd = exec.Command(command, args...)\n\t} else {\n\t\t\/\/ We must SSH to the remote machine and run the command.\n\t\targs = append(common.GetGCComputeArgs(\"ssh\", self.fm.Hostname().GceInstanceName, \"--\", command), args...)\n\t\tcmd = exec.Command(\"gcloud\", args...)\n\t}\n\tvar stdout bytes.Buffer\n\tvar stderr bytes.Buffer\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\tself.fm.T().Fatalf(\"Failed to run %q %v in %q with error: %q. Stdout: %q, Stderr: %s\", command, args, self.fm.Hostname().Host, err, stdout.String(), stderr.String())\n\t\treturn \"\", \"\"\n\t}\n\treturn stdout.String(), stderr.String()\n}\n\nfunc (self shellActions) RunStress(command string, args ...string) (string, string) {\n\tvar cmd *exec.Cmd\n\tif self.fm.Hostname().Host == \"localhost\" {\n\t\t\/\/ Just run locally.\n\t\tcmd = exec.Command(command, args...)\n\t} else {\n\t\t\/\/ We must SSH to the remote machine and run the command.\n\t\targs = append(common.GetGCComputeArgs(\"ssh\", self.fm.Hostname().GceInstanceName, \"--\", command), args...)\n\t\tcmd = exec.Command(\"gcloud\", args...)\n\t}\n\tvar stdout bytes.Buffer\n\tvar stderr bytes.Buffer\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\tself.fm.T().Logf(\"Ran %q %v in %q and received error: %q. Stdout: %q, Stderr: %s\", command, args, self.fm.Hostname().Host, err, stdout.String(), stderr.String())\n\t\treturn stdout.String(), stderr.String()\n\t}\n\treturn stdout.String(), stderr.String()\n}\n\n\/\/ Runs retryFunc until no error is returned. After dur time the last error is returned.\n\/\/ Note that the function does not timeout the execution of retryFunc when the limit is reached.\nfunc RetryForDuration(retryFunc func() error, dur time.Duration) error {\n\twaitUntil := time.Now().Add(dur)\n\tvar err error\n\tfor time.Now().Before(waitUntil) {\n\t\terr = retryFunc()\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ The shenzhen-go binary serves a visual Go environment.\npackage main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"github.com\/improbable-eng\/grpc-web\/go\/grpcweb\"\n\t\"github.com\/zserge\/webview\"\n\t\"google.golang.org\/grpc\"\n\n\tpb \"github.com\/google\/shenzhen-go\/dev\/proto\/go\"\n\t\"github.com\/google\/shenzhen-go\/dev\/server\"\n\t\"github.com\/google\/shenzhen-go\/dev\/server\/view\"\n)\n\nconst pingMsg = \"Pong!\"\n\nvar (\n\tuiAddr = flag.String(\"ui_addr\", \"localhost:0\", \"`address` to bind UI server to\")\n\tuseDefaultBrowser = flag.Bool(\"use_browser\", true, \"Load in the system's default web browser instead of the inbuilt webview\")\n)\n\nfunc systemOpen(url string) error {\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\t\treturn exec.Command(\"open\", url).Run()\n\tcase \"linux\":\n\t\t\/\/ Seems to work on Linux Mint. YMMV.\n\t\treturn exec.Command(\"xdg-open\", url).Run()\n\tcase \"windows\":\n\t\treturn exec.Command(\"cmd.exe\", \"\/C\", \"start\", url).Run()\n\tdefault:\n\t\tfmt.Printf(\"Ready to open %s\\n\", url)\n\t\treturn nil\n\t}\n}\n\nfunc webviewOpen(url string) error {\n\treturn webview.Open(\"Shenzhen Go\", url, 1152, 720, true)\n}\n\nfunc isUp(url string) bool {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer resp.Body.Close()\n\tmsg, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn string(msg) == pingMsg\n}\n\nfunc waitForUp(addr net.Addr) error {\n\turl := fmt.Sprintf(`http:\/\/%s\/ping`, addr)\n\ttry := time.NewTicker(100 * time.Millisecond)\n\tdefer try.Stop()\n\ttimeout := time.NewTimer(5 * time.Second)\n\tdefer timeout.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-try.C:\n\t\t\tif isUp(url) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase <-timeout.C:\n\t\t\treturn errors.New(\"timed out waiting 5s for server\")\n\t\t}\n\t}\n}\n\nfunc open(addr net.Addr, path string, useBrowser bool) {\n\turl := fmt.Sprintf(`http:\/\/%s\/%s`, addr, path)\n\topener := systemOpen\n\tif !useBrowser {\n\t\topener = webviewOpen\n\t}\n\tif err := opener(url); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Couldn't automatically open: %v\\n\", err)\n\t\tfmt.Printf(\"Ready to open %s\\n\", url)\n\t}\n}\n\nfunc serve(addr chan<- net.Addr) error {\n\tln, err := net.Listen(\"tcp\", *uiAddr)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"net.Listen: %v\", err)\n\t}\n\tdefer ln.Close()\n\n\tif addr != nil {\n\t\taddr <- ln.Addr()\n\t}\n\n\thttp.HandleFunc(\"\/ping\", func(w http.ResponseWriter, _ *http.Request) { w.Write([]byte(pingMsg)) })\n\thttp.Handle(\"\/favicon.ico\", view.Favicon)\n\thttp.Handle(\"\/.static\/\", http.StripPrefix(\"\/.static\/\", view.Static))\n\n\tgs := grpc.NewServer()\n\tpb.RegisterShenzhenGoServer(gs, server.S)\n\tws := grpcweb.WrapServer(gs)\n\thttp.Handle(\"\/.api\/\", http.StripPrefix(\"\/.api\/\", ws))\n\n\t\/\/ Finally, all unknown paths are assumed to be files.\n\thttp.Handle(\"\/\", server.S)\n\tif err := http.Serve(ln, nil); err != nil {\n\t\treturn fmt.Errorf(\"http.Serve: %v\", err)\n\t}\n\treturn nil\n}\n\nfunc usage() {\n\tfmt.Fprintf(flag.CommandLine.Output(), `Shenzhen Go is a tool for managing Shenzhen Go source code.\n\t\nUsage:\n\n %s [command] [files]\n \nThe (optional) commands are:\n \n build generate and build Go packages\n edit launch a Shenzhen Go server and open the editor interface\n generate generate Go packages\n install generate and install Go packages\n run generate Go package and run binaries\n serve launch a Shenzhen Go server\n \n\"edit\" is the default command.\n\nFlags:`, os.Args[0])\n\t\/\/ TODO: Add per-command help. `shenzhen-go help [command]`\n\n\tflag.PrintDefaults()\n}\n\nfunc main() {\n\tflag.Usage = usage\n\tflag.Parse()\n\n\topenUI := true\n\targs := flag.Args()\n\tif len(args) > 0 {\n\t\tswitch args[0] {\n\t\tcase \"build\":\n\t\t\tlog.Fatalf(\"TODO: build is not yet implemented\")\n\t\tcase \"edit\":\n\t\t\targs = args[1:]\n\t\tcase \"generate\":\n\t\t\tlog.Fatalf(\"TODO: generate is not yet implemented\")\n\t\tcase \"help\":\n\t\t\tusage()\n\t\t\treturn\n\t\tcase \"install\":\n\t\t\tlog.Fatalf(\"TODO: install is not yet implemented\")\n\t\tcase \"run\":\n\t\t\tlog.Fatalf(\"TODO: run is not yet implemented\")\n\t\tcase \"serve\":\n\t\t\tif len(args) > 1 {\n\t\t\t\tlog.Print(`Note: extra arguments to \"serve\" command are ignored`)\n\t\t\t}\n\t\t\topenUI = false\n\t\tdefault:\n\t\t\t\/\/ Edit, but every arg is a file.\n\t\t}\n\t} else {\n\t\t\/\/ Opens the browser at the root.\n\t\targs = []string{\"\"}\n\t}\n\n\tadch := make(chan net.Addr)\n\twait := make(chan struct{})\n\tgo func() {\n\t\tserve(adch)\n\t\tclose(wait)\n\t}()\n\n\t\/\/ Wait until properly serving.\n\taddr := <-adch\n\tif err := waitForUp(addr); err != nil {\n\t\tlog.Fatalf(\"Couldn't reach server: %v\", err)\n\t}\n\tlog.Printf(\"Serving HTTP on %v\", addr)\n\n\tif openUI {\n\t\tfor _, a := range args {\n\t\t\t\/\/ Launch \"open\" which should launch a browser,\n\t\t\t\/\/ or ask the user to do so.\n\t\t\t\/\/ This must be called from the main thread to avoid\n\t\t\t\/\/ https:\/\/github.com\/zserge\/webview\/issues\/29.\n\t\t\topen(addr, a, *useDefaultBrowser)\n\t\t}\n\t}\n\n\t\/\/ Job done.\n\t<-wait\n}\n<commit_msg>Fix build on pre 1.10 - flagset didn't have Output<commit_after>\/\/ Copyright 2016 Google Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ The shenzhen-go binary serves a visual Go environment.\npackage main\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"github.com\/improbable-eng\/grpc-web\/go\/grpcweb\"\n\t\"github.com\/zserge\/webview\"\n\t\"google.golang.org\/grpc\"\n\n\tpb \"github.com\/google\/shenzhen-go\/dev\/proto\/go\"\n\t\"github.com\/google\/shenzhen-go\/dev\/server\"\n\t\"github.com\/google\/shenzhen-go\/dev\/server\/view\"\n)\n\nconst pingMsg = \"Pong!\"\n\nvar (\n\tuiAddr = flag.String(\"ui_addr\", \"localhost:0\", \"`address` to bind UI server to\")\n\tuseDefaultBrowser = flag.Bool(\"use_browser\", true, \"Load in the system's default web browser instead of the inbuilt webview\")\n)\n\nfunc systemOpen(url string) error {\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\t\treturn exec.Command(\"open\", url).Run()\n\tcase \"linux\":\n\t\t\/\/ Seems to work on Linux Mint. YMMV.\n\t\treturn exec.Command(\"xdg-open\", url).Run()\n\tcase \"windows\":\n\t\treturn exec.Command(\"cmd.exe\", \"\/C\", \"start\", url).Run()\n\tdefault:\n\t\tfmt.Printf(\"Ready to open %s\\n\", url)\n\t\treturn nil\n\t}\n}\n\nfunc webviewOpen(url string) error {\n\treturn webview.Open(\"Shenzhen Go\", url, 1152, 720, true)\n}\n\nfunc isUp(url string) bool {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer resp.Body.Close()\n\tmsg, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn string(msg) == pingMsg\n}\n\nfunc waitForUp(addr net.Addr) error {\n\turl := fmt.Sprintf(`http:\/\/%s\/ping`, addr)\n\ttry := time.NewTicker(100 * time.Millisecond)\n\tdefer try.Stop()\n\ttimeout := time.NewTimer(5 * time.Second)\n\tdefer timeout.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-try.C:\n\t\t\tif isUp(url) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase <-timeout.C:\n\t\t\treturn errors.New(\"timed out waiting 5s for server\")\n\t\t}\n\t}\n}\n\nfunc open(addr net.Addr, path string, useBrowser bool) {\n\turl := fmt.Sprintf(`http:\/\/%s\/%s`, addr, path)\n\topener := systemOpen\n\tif !useBrowser {\n\t\topener = webviewOpen\n\t}\n\tif err := opener(url); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Couldn't automatically open: %v\\n\", err)\n\t\tfmt.Printf(\"Ready to open %s\\n\", url)\n\t}\n}\n\nfunc serve(addr chan<- net.Addr) error {\n\tln, err := net.Listen(\"tcp\", *uiAddr)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"net.Listen: %v\", err)\n\t}\n\tdefer ln.Close()\n\n\tif addr != nil {\n\t\taddr <- ln.Addr()\n\t}\n\n\thttp.HandleFunc(\"\/ping\", func(w http.ResponseWriter, _ *http.Request) { w.Write([]byte(pingMsg)) })\n\thttp.Handle(\"\/favicon.ico\", view.Favicon)\n\thttp.Handle(\"\/.static\/\", http.StripPrefix(\"\/.static\/\", view.Static))\n\n\tgs := grpc.NewServer()\n\tpb.RegisterShenzhenGoServer(gs, server.S)\n\tws := grpcweb.WrapServer(gs)\n\thttp.Handle(\"\/.api\/\", http.StripPrefix(\"\/.api\/\", ws))\n\n\t\/\/ Finally, all unknown paths are assumed to be files.\n\thttp.Handle(\"\/\", server.S)\n\tif err := http.Serve(ln, nil); err != nil {\n\t\treturn fmt.Errorf(\"http.Serve: %v\", err)\n\t}\n\treturn nil\n}\n\nfunc usage() {\n\tfmt.Fprintf(os.Stderr, `Shenzhen Go is a tool for managing Shenzhen Go source code.\n\t\nUsage:\n\n %s [command] [files]\n \nThe (optional) commands are:\n \n build generate and build Go packages\n edit launch a Shenzhen Go server and open the editor interface\n generate generate Go packages\n install generate and install Go packages\n run generate Go package and run binaries\n serve launch a Shenzhen Go server\n \n\"edit\" is the default command.\n\nFlags:`, os.Args[0])\n\t\/\/ TODO: Add per-command help. `shenzhen-go help [command]`\n\n\tflag.PrintDefaults()\n}\n\nfunc main() {\n\tflag.Usage = usage\n\tflag.Parse()\n\n\topenUI := true\n\targs := flag.Args()\n\tif len(args) > 0 {\n\t\tswitch args[0] {\n\t\tcase \"build\":\n\t\t\tlog.Fatalf(\"TODO: build is not yet implemented\")\n\t\tcase \"edit\":\n\t\t\targs = args[1:]\n\t\tcase \"generate\":\n\t\t\tlog.Fatalf(\"TODO: generate is not yet implemented\")\n\t\tcase \"help\":\n\t\t\tusage()\n\t\t\treturn\n\t\tcase \"install\":\n\t\t\tlog.Fatalf(\"TODO: install is not yet implemented\")\n\t\tcase \"run\":\n\t\t\tlog.Fatalf(\"TODO: run is not yet implemented\")\n\t\tcase \"serve\":\n\t\t\tif len(args) > 1 {\n\t\t\t\tlog.Print(`Note: extra arguments to \"serve\" command are ignored`)\n\t\t\t}\n\t\t\topenUI = false\n\t\tdefault:\n\t\t\t\/\/ Edit, but every arg is a file.\n\t\t}\n\t} else {\n\t\t\/\/ Opens the browser at the root.\n\t\targs = []string{\"\"}\n\t}\n\n\tadch := make(chan net.Addr)\n\twait := make(chan struct{})\n\tgo func() {\n\t\tserve(adch)\n\t\tclose(wait)\n\t}()\n\n\t\/\/ Wait until properly serving.\n\taddr := <-adch\n\tif err := waitForUp(addr); err != nil {\n\t\tlog.Fatalf(\"Couldn't reach server: %v\", err)\n\t}\n\tlog.Printf(\"Serving HTTP on %v\", addr)\n\n\tif openUI {\n\t\tfor _, a := range args {\n\t\t\t\/\/ Launch \"open\" which should launch a browser,\n\t\t\t\/\/ or ask the user to do so.\n\t\t\t\/\/ This must be called from the main thread to avoid\n\t\t\t\/\/ https:\/\/github.com\/zserge\/webview\/issues\/29.\n\t\t\topen(addr, a, *useDefaultBrowser)\n\t\t}\n\t}\n\n\t\/\/ Job done.\n\t<-wait\n}\n<|endoftext|>"} {"text":"<commit_before>package smpp34\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"io\"\n\t\"reflect\"\n)\n\ntype PduReadErr string\n\ntype Pdu interface {\n\tFields() map[string]Field\n\tMandatoryFieldsList() []string\n\tGetField(string) Field\n\tGetHeader() *Header\n\tTLVFields() map[uint16]*TLVField\n\tWriter() []byte\n\tSetField(f string, v interface{}) error\n\tSetTLVField(t, l int, v []byte) error\n\tSetSeqNum(uint32)\n\tOk() bool\n}\n\nfunc (p PduReadErr) Error() string {\n\treturn string(p)\n}\n\nfunc ParsePdu(data []byte) (Pdu, error) {\n\tif len(data) < 16 {\n\t\treturn nil, PduReadErr(\"Invalid PDU. Length under 16 bytes\")\n\t}\n\n\theader := ParsePduHeader(data[:16])\n\n\tswitch header.Id {\n\tcase SUBMIT_SM:\n\t\tn, err := NewSubmitSm(header, data[16:])\n\t\treturn Pdu(n), err\n\tcase SUBMIT_SM_RESP:\n\t\tn, err := NewSubmitSmResp(header, data[16:])\n\t\treturn Pdu(n), err\n\tcase DELIVER_SM:\n\t\tn, err := NewDeliverSm(header, data[16:])\n\t\treturn Pdu(n), err\n\tcase DELIVER_SM_RESP:\n\t\tn, err := NewDeliverSmResp(header, data[16:])\n\t\treturn Pdu(n), err\n\tcase BIND_TRANSCEIVER, BIND_RECEIVER, BIND_TRANSMITTER:\n\t\tn, err := NewBind(header, data[16:])\n\t\treturn Pdu(n), err\n\tcase BIND_TRANSCEIVER_RESP, BIND_RECEIVER_RESP, BIND_TRANSMITTER_RESP:\n\t\tn, err := NewBindResp(header, data[16:])\n\t\treturn Pdu(n), err\n\tcase ENQUIRE_LINK:\n\t\tn, err := NewEnquireLink(header)\n\t\treturn Pdu(n), err\n\tcase ENQUIRE_LINK_RESP:\n\t\tn, err := NewEnquireLinkResp(header)\n\t\treturn Pdu(n), err\n\tcase UNBIND:\n\t\tn, err := NewUnbind(header)\n\t\treturn Pdu(n), err\n\tcase UNBIND_RESP:\n\t\tn, err := NewUnbindResp(header)\n\t\treturn Pdu(n), err\n\tdefault:\n\t\treturn nil, PduReadErr(header.Id.Error())\n\t}\n}\n\nfunc ParsePduHeader(data []byte) *Header {\n\treturn NewPduHeader(\n\t\tunpackUi32(data[:4]),\n\t\tCMDId(unpackUi32(data[4:8])),\n\t\tCMDStatus(unpackUi32(data[8:12])),\n\t\tunpackUi32(data[12:16]),\n\t)\n}\n\nfunc create_pdu_fields(fieldNames []string, r *bytes.Buffer) (map[string]Field, map[uint16]*TLVField, error) {\n\n\tfields := make(map[string]Field)\n\teof := false\n\tfor _, k := range fieldNames {\n\t\tswitch k {\n\t\tcase SERVICE_TYPE, SOURCE_ADDR, DESTINATION_ADDR, SCHEDULE_DELIVERY_TIME, VALIDITY_PERIOD, SYSTEM_ID, PASSWORD, SYSTEM_TYPE, ADDRESS_RANGE, MESSAGE_ID:\n\t\t\tt, err := r.ReadBytes(0x00)\n\n\t\t\tif err == io.EOF {\n\t\t\t\teof = true\n\t\t\t} else if err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\tfields[k] = NewVariableField(t)\n\t\tcase SOURCE_ADDR_TON, SOURCE_ADDR_NPI, DEST_ADDR_TON, DEST_ADDR_NPI, ESM_CLASS, PROTOCOL_ID, PRIORITY_FLAG, REGISTERED_DELIVERY, REPLACE_IF_PRESENT_FLAG, DATA_CODING, SM_DEFAULT_MSG_ID, INTERFACE_VERSION, ADDR_TON, ADDR_NPI:\n\t\t\tt, err := r.ReadByte()\n\n\t\t\tif err == io.EOF {\n\t\t\t\teof = true\n\t\t\t} else if err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\tfields[k] = NewFixedField(t)\n\t\tcase SM_LENGTH:\n\t\t\t\/\/ Short Message Length\n\t\t\tt, err := r.ReadByte()\n\n\t\t\tif err == io.EOF {\n\t\t\t\teof = true\n\t\t\t} else if err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\tfields[k] = NewFixedField(t)\n\n\t\t\t\/\/ Short Message\n\t\t\tp := make([]byte, t)\n\n\t\t\t_, err = r.Read(p)\n\t\t\tif err == io.EOF {\n\t\t\t\teof = true\n\t\t\t} else if err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\tfields[SHORT_MESSAGE] = NewVariableField(p)\n\t\tcase SHORT_MESSAGE:\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t\/\/ Optional Fields\n\ttlvs := map[uint16]*TLVField{}\n\tvar err error\n\n\tif !eof {\n\t\ttlvs, err = parse_tlv_fields(r)\n\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\treturn fields, tlvs, nil\n}\n\nfunc parse_tlv_fields(r *bytes.Buffer) (map[uint16]*TLVField, error) {\n\ttlvs := map[uint16]*TLVField{}\n\n\tfor {\n\t\tp := make([]byte, 4)\n\t\t_, err := r.Read(p)\n\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ length\n\t\tl := unpackUi16(p[2:4])\n\n\t\t\/\/ Get Value\n\t\tv := make([]byte, l)\n\n\t\t_, err = r.Read(v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttlvs[unpackUi16(p[0:2])] = &TLVField{\n\t\t\tunpackUi16(p[0:2]),\n\t\t\tunpackUi16(p[2:4]),\n\t\t\tv,\n\t\t}\n\t}\n\n\treturn tlvs, nil\n}\n\nfunc validate_pdu_field(f string, v interface{}) bool {\n\tswitch f {\n\tcase SOURCE_ADDR_TON, SOURCE_ADDR_NPI, DEST_ADDR_TON, DEST_ADDR_NPI, ESM_CLASS, PROTOCOL_ID, PRIORITY_FLAG, REGISTERED_DELIVERY, REPLACE_IF_PRESENT_FLAG, DATA_CODING, SM_DEFAULT_MSG_ID, INTERFACE_VERSION, ADDR_TON, ADDR_NPI, SM_LENGTH:\n\t\tif validate_pdu_field_type(0x00, v) {\n\t\t\treturn true\n\t\t}\n\tcase SERVICE_TYPE, SOURCE_ADDR, DESTINATION_ADDR, SCHEDULE_DELIVERY_TIME, VALIDITY_PERIOD, SYSTEM_ID, PASSWORD, SYSTEM_TYPE, ADDRESS_RANGE, MESSAGE_ID, SHORT_MESSAGE:\n\t\tif validate_pdu_field_type(\"string\", v) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc validate_pdu_field_type(t interface{}, v interface{}) bool {\n\tif reflect.TypeOf(t) == reflect.TypeOf(v) {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc included_check(a []string, v string) bool {\n\tfor _, k := range a {\n\t\tif k == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc unpackUi32(b []byte) (n uint32) {\n\tn = binary.BigEndian.Uint32(b)\n\treturn\n}\n\nfunc packUi32(n uint32) (b []byte) {\n\tb = make([]byte, 4)\n\tbinary.BigEndian.PutUint32(b, n)\n\treturn\n}\n\nfunc unpackUi16(b []byte) (n uint16) {\n\tn = binary.BigEndian.Uint16(b)\n\treturn\n}\n\nfunc packUi16(n uint16) (b []byte) {\n\tb = make([]byte, 2)\n\tbinary.BigEndian.PutUint16(b, n)\n\treturn\n}\n\nfunc packUi8(n uint8) (b []byte) {\n\tb = make([]byte, 2)\n\tbinary.BigEndian.PutUint16(b, uint16(n))\n\treturn b[1:]\n}\n<commit_msg>Updated PDU errors<commit_after>package smpp34\n\nimport (\n\t\"bytes\"\n\t\"encoding\/binary\"\n\t\"io\"\n\t\"reflect\"\n)\n\ntype PduReadErr string\ntype PduCmdIdErr string\n\ntype Pdu interface {\n\tFields() map[string]Field\n\tMandatoryFieldsList() []string\n\tGetField(string) Field\n\tGetHeader() *Header\n\tTLVFields() map[uint16]*TLVField\n\tWriter() []byte\n\tSetField(f string, v interface{}) error\n\tSetTLVField(t, l int, v []byte) error\n\tSetSeqNum(uint32)\n\tOk() bool\n}\n\nfunc (p PduReadErr) Error() string {\n\treturn string(p)\n}\n\nfunc (p PduCmdIdErr) Error() string {\n\treturn string(p)\n}\n\nfunc ParsePdu(data []byte) (Pdu, error) {\n\tif len(data) < 16 {\n\t\treturn nil, PduReadErr(\"Invalid PDU. Length under 16 bytes\")\n\t}\n\n\theader := ParsePduHeader(data[:16])\n\n\tswitch header.Id {\n\tcase SUBMIT_SM:\n\t\tn, err := NewSubmitSm(header, data[16:])\n\t\treturn Pdu(n), err\n\tcase SUBMIT_SM_RESP:\n\t\tn, err := NewSubmitSmResp(header, data[16:])\n\t\treturn Pdu(n), err\n\tcase DELIVER_SM:\n\t\tn, err := NewDeliverSm(header, data[16:])\n\t\treturn Pdu(n), err\n\tcase DELIVER_SM_RESP:\n\t\tn, err := NewDeliverSmResp(header, data[16:])\n\t\treturn Pdu(n), err\n\tcase BIND_TRANSCEIVER, BIND_RECEIVER, BIND_TRANSMITTER:\n\t\tn, err := NewBind(header, data[16:])\n\t\treturn Pdu(n), err\n\tcase BIND_TRANSCEIVER_RESP, BIND_RECEIVER_RESP, BIND_TRANSMITTER_RESP:\n\t\tn, err := NewBindResp(header, data[16:])\n\t\treturn Pdu(n), err\n\tcase ENQUIRE_LINK:\n\t\tn, err := NewEnquireLink(header)\n\t\treturn Pdu(n), err\n\tcase ENQUIRE_LINK_RESP:\n\t\tn, err := NewEnquireLinkResp(header)\n\t\treturn Pdu(n), err\n\tcase UNBIND:\n\t\tn, err := NewUnbind(header)\n\t\treturn Pdu(n), err\n\tcase UNBIND_RESP:\n\t\tn, err := NewUnbindResp(header)\n\t\treturn Pdu(n), err\n\tdefault:\n\t\treturn nil, PduCmdIdErr(header.Id.Error())\n\t}\n}\n\nfunc ParsePduHeader(data []byte) *Header {\n\treturn NewPduHeader(\n\t\tunpackUi32(data[:4]),\n\t\tCMDId(unpackUi32(data[4:8])),\n\t\tCMDStatus(unpackUi32(data[8:12])),\n\t\tunpackUi32(data[12:16]),\n\t)\n}\n\nfunc create_pdu_fields(fieldNames []string, r *bytes.Buffer) (map[string]Field, map[uint16]*TLVField, error) {\n\n\tfields := make(map[string]Field)\n\teof := false\n\tfor _, k := range fieldNames {\n\t\tswitch k {\n\t\tcase SERVICE_TYPE, SOURCE_ADDR, DESTINATION_ADDR, SCHEDULE_DELIVERY_TIME, VALIDITY_PERIOD, SYSTEM_ID, PASSWORD, SYSTEM_TYPE, ADDRESS_RANGE, MESSAGE_ID:\n\t\t\tt, err := r.ReadBytes(0x00)\n\n\t\t\tif err == io.EOF {\n\t\t\t\teof = true\n\t\t\t} else if err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\tfields[k] = NewVariableField(t)\n\t\tcase SOURCE_ADDR_TON, SOURCE_ADDR_NPI, DEST_ADDR_TON, DEST_ADDR_NPI, ESM_CLASS, PROTOCOL_ID, PRIORITY_FLAG, REGISTERED_DELIVERY, REPLACE_IF_PRESENT_FLAG, DATA_CODING, SM_DEFAULT_MSG_ID, INTERFACE_VERSION, ADDR_TON, ADDR_NPI:\n\t\t\tt, err := r.ReadByte()\n\n\t\t\tif err == io.EOF {\n\t\t\t\teof = true\n\t\t\t} else if err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\tfields[k] = NewFixedField(t)\n\t\tcase SM_LENGTH:\n\t\t\t\/\/ Short Message Length\n\t\t\tt, err := r.ReadByte()\n\n\t\t\tif err == io.EOF {\n\t\t\t\teof = true\n\t\t\t} else if err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\tfields[k] = NewFixedField(t)\n\n\t\t\t\/\/ Short Message\n\t\t\tp := make([]byte, t)\n\n\t\t\t_, err = r.Read(p)\n\t\t\tif err == io.EOF {\n\t\t\t\teof = true\n\t\t\t} else if err != nil {\n\t\t\t\treturn nil, nil, err\n\t\t\t}\n\n\t\t\tfields[SHORT_MESSAGE] = NewVariableField(p)\n\t\tcase SHORT_MESSAGE:\n\t\t\tcontinue\n\t\t}\n\t}\n\n\t\/\/ Optional Fields\n\ttlvs := map[uint16]*TLVField{}\n\tvar err error\n\n\tif !eof {\n\t\ttlvs, err = parse_tlv_fields(r)\n\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t}\n\n\treturn fields, tlvs, nil\n}\n\nfunc parse_tlv_fields(r *bytes.Buffer) (map[uint16]*TLVField, error) {\n\ttlvs := map[uint16]*TLVField{}\n\n\tfor {\n\t\tp := make([]byte, 4)\n\t\t_, err := r.Read(p)\n\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t\/\/ length\n\t\tl := unpackUi16(p[2:4])\n\n\t\t\/\/ Get Value\n\t\tv := make([]byte, l)\n\n\t\t_, err = r.Read(v)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttlvs[unpackUi16(p[0:2])] = &TLVField{\n\t\t\tunpackUi16(p[0:2]),\n\t\t\tunpackUi16(p[2:4]),\n\t\t\tv,\n\t\t}\n\t}\n\n\treturn tlvs, nil\n}\n\nfunc validate_pdu_field(f string, v interface{}) bool {\n\tswitch f {\n\tcase SOURCE_ADDR_TON, SOURCE_ADDR_NPI, DEST_ADDR_TON, DEST_ADDR_NPI, ESM_CLASS, PROTOCOL_ID, PRIORITY_FLAG, REGISTERED_DELIVERY, REPLACE_IF_PRESENT_FLAG, DATA_CODING, SM_DEFAULT_MSG_ID, INTERFACE_VERSION, ADDR_TON, ADDR_NPI, SM_LENGTH:\n\t\tif validate_pdu_field_type(0x00, v) {\n\t\t\treturn true\n\t\t}\n\tcase SERVICE_TYPE, SOURCE_ADDR, DESTINATION_ADDR, SCHEDULE_DELIVERY_TIME, VALIDITY_PERIOD, SYSTEM_ID, PASSWORD, SYSTEM_TYPE, ADDRESS_RANGE, MESSAGE_ID, SHORT_MESSAGE:\n\t\tif validate_pdu_field_type(\"string\", v) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc validate_pdu_field_type(t interface{}, v interface{}) bool {\n\tif reflect.TypeOf(t) == reflect.TypeOf(v) {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc included_check(a []string, v string) bool {\n\tfor _, k := range a {\n\t\tif k == v {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc unpackUi32(b []byte) (n uint32) {\n\tn = binary.BigEndian.Uint32(b)\n\treturn\n}\n\nfunc packUi32(n uint32) (b []byte) {\n\tb = make([]byte, 4)\n\tbinary.BigEndian.PutUint32(b, n)\n\treturn\n}\n\nfunc unpackUi16(b []byte) (n uint16) {\n\tn = binary.BigEndian.Uint16(b)\n\treturn\n}\n\nfunc packUi16(n uint16) (b []byte) {\n\tb = make([]byte, 2)\n\tbinary.BigEndian.PutUint16(b, n)\n\treturn\n}\n\nfunc packUi8(n uint8) (b []byte) {\n\tb = make([]byte, 2)\n\tbinary.BigEndian.PutUint16(b, uint16(n))\n\treturn b[1:]\n}\n<|endoftext|>"} {"text":"<commit_before>package market\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/nzai\/stockrecorder\/analyse\"\n\t\"github.com\/nzai\/stockrecorder\/config\"\n\t\"github.com\/nzai\/stockrecorder\/db\"\n\t\"github.com\/nzai\/stockrecorder\/io\"\n)\n\ntype YahooJson struct {\n\tChart YahooChart `json:\"chart\"`\n}\n\ntype YahooChart struct {\n\tResult []YahooResult `json:\"result\"`\n\tErr *YahooError `json:\"error\"`\n}\n\ntype YahooError struct {\n\tCode string `json:\"code\"`\n\tDescription string `json:\"description\"`\n}\n\ntype YahooResult struct {\n\tMeta YahooMeta `json:\"meta\"`\n\tTimestamp []int64 `json:\"timestamp\"`\n\tIndicators YahooIndicators `json:\"indicators\"`\n}\n\ntype YahooMeta struct {\n\tCurrency string `json:\"currency\"`\n\tSymbol string `json:\"symbol\"`\n\tExchangeName string `json:\"exchangeName\"`\n\tInstrumentType string `json:\"instrumentType\"`\n\tFirstTradeDate int64 `json:\"firstTradeDate\"`\n\tGMTOffset int `json:\"gmtoffset\"`\n\tTimezone string `json:\"timezone\"`\n\tPreviousClose float32 `json:\"previousClose\"`\n\tScale int `json:\"scale\"`\n\tCurrentTradingPeriod YahooTradingPeroid `json:\"currentTradingPeriod\"`\n\tTradingPeriods YahooTradingPeroids `json:\"tradingPeriods\"`\n\tDataGranularity string `json:\"dataGranularity\"`\n\tValidRanges []string `json:\"validRanges\"`\n}\n\ntype YahooTradingPeroid struct {\n\tPre YahooTradingPeroidSection `json:\"pre\"`\n\tRegular YahooTradingPeroidSection `json:\"regular\"`\n\tPost YahooTradingPeroidSection `json:\"post\"`\n}\n\ntype YahooTradingPeroids struct {\n\tPres [][]YahooTradingPeroidSection `json:\"pre\"`\n\tRegulars [][]YahooTradingPeroidSection `json:\"regular\"`\n\tPosts [][]YahooTradingPeroidSection `json:\"post\"`\n}\n\ntype YahooTradingPeroidSection struct {\n\tTimezone string `json:\"timezone\"`\n\tStart int64 `json:\"start\"`\n\tEnd int64 `json:\"end\"`\n\tGMTOffset int `json:\"gmtoffset\"`\n}\n\ntype YahooIndicators struct {\n\tQuotes []YahooQuote `json:\"quote\"`\n}\n\ntype YahooQuote struct {\n\tOpen []float32 `json:\"open\"`\n\tClose []float32 `json:\"close\"`\n\tHigh []float32 `json:\"high\"`\n\tLow []float32 `json:\"low\"`\n\tVolume []int64 `json:\"volume\"`\n}\n\n\/\/\t从雅虎财经获取上市公司分时数据\nfunc DownloadCompanyDaily(marketName, companyCode, queryCode string, day time.Time) error {\n\t\/\/\t文件保存路径\n\tfileName := fmt.Sprintf(\"%s_raw.txt\", day.Format(\"20060102\"))\n\tfilePath := filepath.Join(config.Get().DataDir, marketName, companyCode, fileName)\n\n\t\/\/\t如果文件已存在就忽略\n\tvar content []byte\n\t_, err := os.Stat(filePath)\n\tif os.IsNotExist(err) {\n\t\t\/\/\t如果不存在就抓取并保存\n\t\tstart := time.Date(day.Year(), day.Month(), day.Day(), 0, 0, 0, 0, day.Location())\n\t\tend := start.Add(time.Hour * 24)\n\n\t\tpattern := \"https:\/\/finance-yql.media.yahoo.com\/v7\/finance\/chart\/%s?period2=%d&period1=%d&interval=1m&indicators=quote&includeTimestamps=true&includePrePost=true&events=div%7Csplit%7Cearn&corsDomain=finance.yahoo.com\"\n\t\turl := fmt.Sprintf(pattern, queryCode, end.Unix(), start.Unix())\n\n\t\thtml, err := io.DownloadStringRetry(url, retryTimes, retryIntervalSeconds)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcontent = []byte(html)\n\n\t\t\/\/\t写入文件\n\t\treturn io.WriteString(filePath, html)\n\t}\n\n\t\/\/\t如果不解析\n\tif !config.Get().EnableAnalyse {\n\t\treturn nil\n\t}\n\n\t\/\/\t检查数据库是否解析过,解析过就忽略\n\tfound, err := db.DailyExists(marketName, companyCode, day)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !found {\n\t\tif content == nil {\n\t\t\t\/\/\t读取文件\n\t\t\tbuffer, err := io.ReadAllBytes(filePath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tcontent = buffer\n\t\t}\n\n\t\t\/\/\t解析\n\t\tdar, err := parseDailyYahooJson(marketName, companyCode, day, content)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/\t保存\n\t\terr = db.DailySave(dar)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/\t解析雅虎Json\nfunc parseDailyYahooJson(marketName, companyCode string, date time.Time, buffer []byte) (*analyse.DailyAnalyzeResult, error) {\n\n\tyj := &YahooJson{}\n\terr := json.Unmarshal(buffer, &yj)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"解析雅虎Json发生错误: %s\", err)\n\t}\n\t\/\/\tlog.Print(\"AAAAA:\", yj)\n\n\tresult := &analyse.DailyAnalyzeResult{\n\t\tDailyResult: analyse.DailyResult{\n\t\t\tCode: companyCode,\n\t\t\tMarket: marketName,\n\t\t\tDate: date,\n\t\t\tError: false,\n\t\t\tMessage: \"\"},\n\t\tPre: make([]analyse.Peroid60, 0),\n\t\tRegular: make([]analyse.Peroid60, 0),\n\t\tPost: make([]analyse.Peroid60, 0)}\n\n\t\/\/\t检查数据\n\terr = validateDailyYahooJson(yj)\n\tif err != nil {\n\t\tresult.DailyResult.Error = true\n\t\tresult.DailyResult.Message = err.Error()\n\n\t\treturn result, nil\n\t}\n\n\tperiods, quote := yj.Chart.Result[0].Meta.TradingPeriods, yj.Chart.Result[0].Indicators.Quotes[0]\n\tfor index, ts := range yj.Chart.Result[0].Timestamp {\n\n\t\tp := analyse.Peroid60{\n\t\t\tCode: companyCode,\n\t\t\tMarket: marketName,\n\t\t\tStart: time.Unix(ts, 0),\n\t\t\tEnd: time.Unix(ts+60, 0),\n\t\t\tOpen: quote.Open[index],\n\t\t\tClose: quote.Close[index],\n\t\t\tHigh: quote.High[index],\n\t\t\tLow: quote.Low[index],\n\t\t\tVolume: quote.Volume[index]}\n\n\t\t\/\/\tPre, Regular, Post\n\t\tif ts >= periods.Pres[0][0].Start && ts < periods.Pres[0][0].End {\n\t\t\tresult.Pre = append(result.Pre, p)\n\t\t} else if ts >= periods.Regulars[0][0].Start && ts < periods.Regulars[0][0].End {\n\t\t\tresult.Regular = append(result.Regular, p)\n\t\t} else if ts >= periods.Posts[0][0].Start && ts < periods.Posts[0][0].End {\n\t\t\tresult.Post = append(result.Regular, p)\n\t\t}\n\t}\n\n\treturn result, nil\n}\n\nfunc validateDailyYahooJson(yj *YahooJson) error {\n\n\tif yj.Chart.Err != nil {\n\t\treturn fmt.Errorf(\"[%s]%s\", yj.Chart.Err.Code, yj.Chart.Err.Description)\n\t}\n\n\tif yj.Chart.Result == nil || len(yj.Chart.Result) == 0 {\n\t\treturn fmt.Errorf(\"Result为空\")\n\t}\n\n\tif yj.Chart.Result[0].Indicators.Quotes == nil || len(yj.Chart.Result[0].Indicators.Quotes) == 0 {\n\t\treturn fmt.Errorf(\"Quotes为空\")\n\t}\n\n\tresult, quote := yj.Chart.Result[0], yj.Chart.Result[0].Indicators.Quotes[0]\n\tif len(result.Timestamp) != len(quote.Open) ||\n\t\tlen(result.Timestamp) != len(quote.Close) ||\n\t\tlen(result.Timestamp) != len(quote.High) ||\n\t\tlen(result.Timestamp) != len(quote.Low) ||\n\t\tlen(result.Timestamp) != len(quote.Volume) {\n\t\treturn fmt.Errorf(\"Quotes数量不正确\")\n\t}\n\n\tif len(result.Meta.TradingPeriods.Pres) == 0 ||\n\t\tlen(result.Meta.TradingPeriods.Pres[0]) == 0 ||\n\t\tlen(result.Meta.TradingPeriods.Posts) == 0 ||\n\t\tlen(result.Meta.TradingPeriods.Posts[0]) == 0 ||\n\t\tlen(result.Meta.TradingPeriods.Regulars) == 0 ||\n\t\tlen(result.Meta.TradingPeriods.Regulars[0]) == 0 {\n\t\treturn fmt.Errorf(\"TradingPeriods数量不正确\")\n\t}\n\treturn nil\n}\n<commit_msg>调整雅虎财经数据的解析及保存方法,有问题的数据才以文件形式存盘,解析过的文件一律删除,将数据从文件系统全部搬移到数据库中<commit_after>package market\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"time\"\n\n\t\"github.com\/nzai\/stockrecorder\/config\"\n\t\"github.com\/nzai\/stockrecorder\/db\"\n\t\"github.com\/nzai\/stockrecorder\/io\"\n)\n\ntype YahooJson struct {\n\tChart YahooChart `json:\"chart\"`\n}\n\ntype YahooChart struct {\n\tResult []YahooResult `json:\"result\"`\n\tErr *YahooError `json:\"error\"`\n}\n\ntype YahooError struct {\n\tCode string `json:\"code\"`\n\tDescription string `json:\"description\"`\n}\n\ntype YahooResult struct {\n\tMeta YahooMeta `json:\"meta\"`\n\tTimestamp []int64 `json:\"timestamp\"`\n\tIndicators YahooIndicators `json:\"indicators\"`\n}\n\ntype YahooMeta struct {\n\tCurrency string `json:\"currency\"`\n\tSymbol string `json:\"symbol\"`\n\tExchangeName string `json:\"exchangeName\"`\n\tInstrumentType string `json:\"instrumentType\"`\n\tFirstTradeDate int64 `json:\"firstTradeDate\"`\n\tGMTOffset int `json:\"gmtoffset\"`\n\tTimezone string `json:\"timezone\"`\n\tPreviousClose float32 `json:\"previousClose\"`\n\tScale int `json:\"scale\"`\n\tCurrentTradingPeriod YahooTradingPeroid `json:\"currentTradingPeriod\"`\n\tTradingPeriods YahooTradingPeroids `json:\"tradingPeriods\"`\n\tDataGranularity string `json:\"dataGranularity\"`\n\tValidRanges []string `json:\"validRanges\"`\n}\n\ntype YahooTradingPeroid struct {\n\tPre YahooTradingPeroidSection `json:\"pre\"`\n\tRegular YahooTradingPeroidSection `json:\"regular\"`\n\tPost YahooTradingPeroidSection `json:\"post\"`\n}\n\ntype YahooTradingPeroids struct {\n\tPres [][]YahooTradingPeroidSection `json:\"pre\"`\n\tRegulars [][]YahooTradingPeroidSection `json:\"regular\"`\n\tPosts [][]YahooTradingPeroidSection `json:\"post\"`\n}\n\ntype YahooTradingPeroidSection struct {\n\tTimezone string `json:\"timezone\"`\n\tStart int64 `json:\"start\"`\n\tEnd int64 `json:\"end\"`\n\tGMTOffset int `json:\"gmtoffset\"`\n}\n\ntype YahooIndicators struct {\n\tQuotes []YahooQuote `json:\"quote\"`\n}\n\ntype YahooQuote struct {\n\tOpen []float32 `json:\"open\"`\n\tClose []float32 `json:\"close\"`\n\tHigh []float32 `json:\"high\"`\n\tLow []float32 `json:\"low\"`\n\tVolume []int64 `json:\"volume\"`\n}\n\n\/\/\t从雅虎财经获取上市公司分时数据\nfunc DownloadCompanyDaily(marketName, companyCode, queryCode string, day time.Time) error {\n\n\t\/\/\t检查数据库是否解析过\n\tfound, err := db.DailyExists(marketName, companyCode, day)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/\t解析过的不再重复解析\n\tif found {\n\t\treturn nil\n\t}\n\n\t\/\/\t文件保存路径\n\tfileName := fmt.Sprintf(\"%s_raw.txt\", day.Format(\"20060102\"))\n\tfilePath := filepath.Join(config.Get().DataDir, marketName, companyCode, fileName)\n\n\tvar buffer []byte\n\tfileExists := false\n\n\t\/\/\t检查磁盘上数据文件是否已经存在\n\t_, err = os.Stat(filePath)\n\tif os.IsNotExist(err) {\n\n\t\t\/\/\t如果不存在就抓取\n\t\tstart := time.Date(day.Year(), day.Month(), day.Day(), 0, 0, 0, 0, day.Location())\n\t\tend := start.Add(time.Hour * 24)\n\n\t\tpattern := \"https:\/\/finance-yql.media.yahoo.com\/v7\/finance\/chart\/%s?period2=%d&period1=%d&interval=1m&indicators=quote&includeTimestamps=true&includePrePost=true&events=div%7Csplit%7Cearn&corsDomain=finance.yahoo.com\"\n\t\turl := fmt.Sprintf(pattern, queryCode, end.Unix(), start.Unix())\n\n\t\t\/\/\t查询Yahoo财经接口,返回股票分时数据\n\t\tcontent, err := io.DownloadStringRetry(url, retryTimes, retryIntervalSeconds)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tbuffer = []byte(content)\n\t} else {\n\t\tfileExists = true\n\t\t\n\t\t\/\/\t如果已经存在就读取文件\n\t\tbuffer, err = io.ReadAllBytes(filePath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/\t解析\n\tdar, err := ParseDailyYahooJson(marketName, companyCode, day, buffer)\n\tif err != nil {\n\n\t\t\/\/\t解析错误的先保存为文件\n\t\terr1 := saveDaily(marketName, companyCode, day, buffer)\n\t\tif err1 != nil {\n\t\t\treturn err1\n\t\t}\n\n\t\treturn err\n\t}\n\n\t\/\/\t保存\n\terr = db.DailySave(dar)\n\tif err != nil {\n\n\t\t\/\/\t保存错误的先保存为文件\n\t\terr1 := saveDaily(marketName, companyCode, day, buffer)\n\t\tif err1 != nil {\n\t\t\treturn err1\n\t\t}\n\n\t\treturn err\n\t}\n\n\tif fileExists {\n\t\t\/\/\t解析成功就删除文件\n\t\treturn os.Remove(filePath)\n\t}\n\n\treturn nil\n}\n\n\/\/\t解析雅虎Json\nfunc ParseDailyYahooJson(marketName, companyCode string, date time.Time, buffer []byte) (*db.DailyAnalyzeResult, error) {\n\n\tyj := &YahooJson{}\n\terr := json.Unmarshal(buffer, &yj)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"解析雅虎Json发生错误: %s\", err)\n\t}\n\n\tresult := &db.DailyAnalyzeResult{\n\t\tDailyResult: db.DailyResult{\n\t\t\tCode: companyCode,\n\t\t\tMarket: marketName,\n\t\t\tDate: date,\n\t\t\tError: false,\n\t\t\tMessage: \"\"},\n\t\tPre: make([]db.Peroid60, 0),\n\t\tRegular: make([]db.Peroid60, 0),\n\t\tPost: make([]db.Peroid60, 0)}\n\n\t\/\/\t检查数据\n\terr = validateDailyYahooJson(yj)\n\tif err != nil {\n\t\tresult.DailyResult.Error = true\n\t\tresult.DailyResult.Message = err.Error()\n\n\t\treturn result, nil\n\t}\n\n\tperiods, quote := yj.Chart.Result[0].Meta.TradingPeriods, yj.Chart.Result[0].Indicators.Quotes[0]\n\tfor index, ts := range yj.Chart.Result[0].Timestamp {\n\n\t\tp := db.Peroid60{\n\t\t\tCode: companyCode,\n\t\t\tMarket: marketName,\n\t\t\tStart: time.Unix(ts, 0),\n\t\t\tEnd: time.Unix(ts+60, 0),\n\t\t\tOpen: quote.Open[index],\n\t\t\tClose: quote.Close[index],\n\t\t\tHigh: quote.High[index],\n\t\t\tLow: quote.Low[index],\n\t\t\tVolume: quote.Volume[index]}\n\n\t\t\/\/\tPre, Regular, Post\n\t\tif ts >= periods.Pres[0][0].Start && ts < periods.Pres[0][0].End {\n\t\t\tresult.Pre = append(result.Pre, p)\n\t\t} else if ts >= periods.Regulars[0][0].Start && ts < periods.Regulars[0][0].End {\n\t\t\tresult.Regular = append(result.Regular, p)\n\t\t} else if ts >= periods.Posts[0][0].Start && ts < periods.Posts[0][0].End {\n\t\t\tresult.Post = append(result.Regular, p)\n\t\t}\n\t}\n\n\treturn result, nil\n}\n\n\/\/\t验证雅虎Json\nfunc validateDailyYahooJson(yj *YahooJson) error {\n\n\tif yj.Chart.Err != nil {\n\t\treturn fmt.Errorf(\"[%s]%s\", yj.Chart.Err.Code, yj.Chart.Err.Description)\n\t}\n\n\tif yj.Chart.Result == nil || len(yj.Chart.Result) == 0 {\n\t\treturn fmt.Errorf(\"Result为空\")\n\t}\n\n\tif yj.Chart.Result[0].Indicators.Quotes == nil || len(yj.Chart.Result[0].Indicators.Quotes) == 0 {\n\t\treturn fmt.Errorf(\"Quotes为空\")\n\t}\n\n\tresult, quote := yj.Chart.Result[0], yj.Chart.Result[0].Indicators.Quotes[0]\n\tif len(result.Timestamp) != len(quote.Open) ||\n\t\tlen(result.Timestamp) != len(quote.Close) ||\n\t\tlen(result.Timestamp) != len(quote.High) ||\n\t\tlen(result.Timestamp) != len(quote.Low) ||\n\t\tlen(result.Timestamp) != len(quote.Volume) {\n\t\treturn fmt.Errorf(\"Quotes数量不正确\")\n\t}\n\n\tif len(result.Meta.TradingPeriods.Pres) == 0 ||\n\t\tlen(result.Meta.TradingPeriods.Pres[0]) == 0 ||\n\t\tlen(result.Meta.TradingPeriods.Posts) == 0 ||\n\t\tlen(result.Meta.TradingPeriods.Posts[0]) == 0 ||\n\t\tlen(result.Meta.TradingPeriods.Regulars) == 0 ||\n\t\tlen(result.Meta.TradingPeriods.Regulars[0]) == 0 {\n\t\treturn fmt.Errorf(\"TradingPeriods数量不正确\")\n\t}\n\treturn nil\n}\n\n\/\/\t保存到文件\nfunc saveDaily(marketName, companyCode string, day time.Time, buffer []byte) error {\n\n\t\/\/\t文件保存路径\n\tfileName := fmt.Sprintf(\"%s_raw.txt\", day.Format(\"20060102\"))\n\tfilePath := filepath.Join(config.Get().DataDir, marketName, companyCode, fileName)\n\n\t\/\/\t不覆盖原文件\n\t_, err := os.Stat(filePath)\n\tif os.IsNotExist(err) {\n\t\treturn io.WriteBytes(filePath, buffer)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package session\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/breml\/logstash-config\/ast\"\n\t\"github.com\/breml\/logstash-config\/ast\/astutil\"\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/magnusbaeck\/logstash-filter-verifier\/v2\/internal\/daemon\/idgen\"\n\t\"github.com\/magnusbaeck\/logstash-filter-verifier\/v2\/internal\/daemon\/logstashconfig\"\n\t\"github.com\/magnusbaeck\/logstash-filter-verifier\/v2\/internal\/daemon\/pipeline\"\n\t\"github.com\/magnusbaeck\/logstash-filter-verifier\/v2\/internal\/daemon\/pool\"\n\t\"github.com\/magnusbaeck\/logstash-filter-verifier\/v2\/internal\/daemon\/template\"\n\t\"github.com\/magnusbaeck\/logstash-filter-verifier\/v2\/internal\/logging\"\n\t\"github.com\/magnusbaeck\/logstash-filter-verifier\/v2\/internal\/testcase\"\n)\n\ntype Session struct {\n\tid string\n\n\tlogstashController pool.LogstashController\n\tisOrderedPipelineSupported bool\n\n\tbaseDir string\n\tsessionDir string\n\n\tpipelines pipeline.Pipelines\n\tinputPluginCodecs map[string]string\n\ttestexec int\n\n\tnoCleanup bool\n\n\tlog logging.Logger\n}\n\nfunc newSession(baseDir string, logstashController pool.LogstashController, noCleanup bool, isOrderedPipelineSupported bool, log logging.Logger) *Session {\n\tsessionID := idgen.New()\n\tsessionDir := fmt.Sprintf(\"%s\/session\/%s\", baseDir, sessionID)\n\treturn &Session{\n\t\tid: sessionID,\n\t\tbaseDir: baseDir,\n\t\tsessionDir: sessionDir,\n\t\tlogstashController: logstashController,\n\t\tisOrderedPipelineSupported: isOrderedPipelineSupported,\n\t\tnoCleanup: noCleanup,\n\t\tinputPluginCodecs: map[string]string{},\n\t\tlog: log,\n\t}\n}\n\n\/\/ ID returns the id of the session.\nfunc (s Session) ID() string {\n\treturn s.id\n}\n\n\/\/ setupTest prepares the Logstash configuration for a new test run.\nfunc (s *Session) setupTest(pipelines pipeline.Pipelines, configFiles []logstashconfig.File) error {\n\terr := os.MkdirAll(s.sessionDir, 0700)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsutConfigDir := filepath.Join(s.sessionDir, \"sut\")\n\n\t\/\/ adjust pipeline names and config directories to session\n\tfor i := range pipelines {\n\t\tpipelineName := fmt.Sprintf(\"lfv_%s_%s\", s.id, pipelines[i].ID)\n\n\t\tpipelines[i].ID = pipelineName\n\t\tpipelines[i].Config = filepath.Join(sutConfigDir, pipelines[i].Config)\n\t\tif s.isOrderedPipelineSupported {\n\t\t\tpipelines[i].Ordered = \"true\"\n\t\t}\n\t\tpipelines[i].Workers = 1\n\t}\n\n\t\/\/ Preprocess and Save Config Files\n\tfor _, configFile := range configFiles {\n\t\tinputCodecs, err := configFile.ReplaceInputs(s.id)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.inputPluginCodecs = inputCodecs\n\n\t\toutputs, err := configFile.ReplaceOutputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = configFile.Save(sutConfigDir)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\toutputPipelines, err := s.createOutputPipelines(outputs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpipelines = append(pipelines, outputPipelines...)\n\t}\n\n\t\/\/ Reload Logstash Config\n\ts.pipelines = pipelines\n\t\/\/ err = s.logstash.ReloadPipelines(pipelines)\n\terr = s.logstashController.SetupTest(pipelines)\n\tif err != nil {\n\t\ts.log.Errorf(\"failed to reload Logstash config: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (s *Session) createOutputPipelines(outputs []string) ([]pipeline.Pipeline, error) {\n\tlfvOutputsDir := filepath.Join(s.sessionDir, \"lfv_outputs\")\n\terr := os.MkdirAll(lfvOutputsDir, 0700)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpipelines := make([]pipeline.Pipeline, 0)\n\tfor _, output := range outputs {\n\t\tpipelineName := fmt.Sprintf(\"lfv_output_%s\", output)\n\n\t\ttemplateData := struct {\n\t\t\tPipelineName string\n\t\t\tPipelineOrigName string\n\t\t}{\n\t\t\tPipelineName: pipelineName,\n\t\t\tPipelineOrigName: output,\n\t\t}\n\n\t\terr = template.ToFile(filepath.Join(lfvOutputsDir, output+\".conf\"), outputPipeline, templateData, 0644)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpipeline := pipeline.Pipeline{\n\t\t\tID: pipelineName,\n\t\t\tConfig: filepath.Join(lfvOutputsDir, output+\".conf\"),\n\t\t\tWorkers: 1,\n\t\t}\n\t\tif s.isOrderedPipelineSupported {\n\t\t\tpipeline.Ordered = \"true\"\n\t\t}\n\t\tpipelines = append(pipelines, pipeline)\n\t}\n\n\treturn pipelines, nil\n}\n\n\/\/ ExecuteTest runs a test case set against the Logstash configuration, that has\n\/\/ been loaded previously with SetupTest.\nfunc (s *Session) ExecuteTest(inputPlugin string, inputLines []string, inEvents []map[string]interface{}, expectedEvents int) error {\n\ts.testexec++\n\tpipelineName := fmt.Sprintf(\"lfv_input_%d\", s.testexec)\n\tinputDir := filepath.Join(s.sessionDir, \"lfv_inputs\", strconv.Itoa(s.testexec))\n\tinputPluginName := fmt.Sprintf(\"%s_%s_%s\", \"__lfv_input\", s.id, inputPlugin)\n\tinputCodec, ok := s.inputPluginCodecs[inputPlugin]\n\tif !ok {\n\t\tinputCodec = \"codec => plain\"\n\t}\n\n\t\/\/ Prepare input directory\n\terr := os.MkdirAll(inputDir, 0700)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfieldsFilename := filepath.Join(inputDir, \"fields.json\")\n\terr = prepareFields(fieldsFilename, inEvents)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpipelineFilename := filepath.Join(inputDir, \"input.conf\")\n\terr = createInput(pipelineFilename, fieldsFilename, inputPluginName, inputLines, inputCodec)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpipeline := pipeline.Pipeline{\n\t\tID: pipelineName,\n\t\tConfig: pipelineFilename,\n\t\tWorkers: 1,\n\t}\n\tif s.isOrderedPipelineSupported {\n\t\tpipeline.Ordered = \"true\"\n\t}\n\tpipelines := append(s.pipelines, pipeline)\n\terr = s.logstashController.ExecuteTest(pipelines, expectedEvents)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc prepareFields(fieldsFilename string, inEvents []map[string]interface{}) error {\n\tfields := make(map[string]map[string]interface{})\n\n\tfor i, event := range inEvents {\n\t\tid := fmt.Sprintf(\"%d\", i)\n\t\tfields[id] = event\n\t}\n\n\tbfields, err := json.Marshal(fields)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(fieldsFilename, bfields, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc createInput(pipelineFilename string, fieldsFilename string, inputPluginName string, inputLines []string, inputCodec string) error {\n\tfor i := range inputLines {\n\t\tvar err error\n\t\tinputLine, err := astutil.Quote(inputLines[i], ast.DoubleQuoted)\n\t\tif err != nil {\n\t\t\tinputLine = astutil.QuoteWithEscape(inputLines[i], ast.SingleQuoted)\n\t\t}\n\t\tinputLines[i] = inputLine\n\t}\n\n\ttemplateData := struct {\n\t\tInputPluginName string\n\t\tInputLines string\n\t\tInputCodec string\n\t\tFieldsFilename string\n\t\tDummyEventInputIndicator string\n\t}{\n\t\tInputPluginName: inputPluginName,\n\t\tInputLines: strings.Join(inputLines, \", \"),\n\t\tInputCodec: inputCodec,\n\t\tFieldsFilename: fieldsFilename,\n\t\tDummyEventInputIndicator: testcase.DummyEventInputIndicator,\n\t}\n\terr := template.ToFile(pipelineFilename, inputGenerator, templateData, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ GetResults returns the returned events from Logstash.\nfunc (s *Session) GetResults() ([]string, error) {\n\tresults, err := s.logstashController.GetResults()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn results, nil\n}\n\n\/\/ GetStats returns the statistics for a test suite.\nfunc (s *Session) GetStats() {\n\tpanic(\"not implemented\")\n}\n\nfunc (s *Session) teardown() error {\n\t\/\/ TODO: Perform a reset of the Logstash instance including Stdin Buffer, etc.\n\terr1 := s.logstashController.Teardown()\n\tvar err2 error\n\tif !s.noCleanup {\n\t\terr2 = os.RemoveAll(s.sessionDir)\n\t}\n\tif err1 != nil || err2 != nil {\n\t\treturn errors.Errorf(\"session teardown failed: %v, %v\", err1, err2)\n\t}\n\n\treturn nil\n}\n<commit_msg>Fix handling of input codecs<commit_after>package session\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com\/breml\/logstash-config\/ast\"\n\t\"github.com\/breml\/logstash-config\/ast\/astutil\"\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/magnusbaeck\/logstash-filter-verifier\/v2\/internal\/daemon\/idgen\"\n\t\"github.com\/magnusbaeck\/logstash-filter-verifier\/v2\/internal\/daemon\/logstashconfig\"\n\t\"github.com\/magnusbaeck\/logstash-filter-verifier\/v2\/internal\/daemon\/pipeline\"\n\t\"github.com\/magnusbaeck\/logstash-filter-verifier\/v2\/internal\/daemon\/pool\"\n\t\"github.com\/magnusbaeck\/logstash-filter-verifier\/v2\/internal\/daemon\/template\"\n\t\"github.com\/magnusbaeck\/logstash-filter-verifier\/v2\/internal\/logging\"\n\t\"github.com\/magnusbaeck\/logstash-filter-verifier\/v2\/internal\/testcase\"\n)\n\ntype Session struct {\n\tid string\n\n\tlogstashController pool.LogstashController\n\tisOrderedPipelineSupported bool\n\n\tbaseDir string\n\tsessionDir string\n\n\tpipelines pipeline.Pipelines\n\tinputPluginCodecs map[string]string\n\ttestexec int\n\n\tnoCleanup bool\n\n\tlog logging.Logger\n}\n\nfunc newSession(baseDir string, logstashController pool.LogstashController, noCleanup bool, isOrderedPipelineSupported bool, log logging.Logger) *Session {\n\tsessionID := idgen.New()\n\tsessionDir := fmt.Sprintf(\"%s\/session\/%s\", baseDir, sessionID)\n\treturn &Session{\n\t\tid: sessionID,\n\t\tbaseDir: baseDir,\n\t\tsessionDir: sessionDir,\n\t\tlogstashController: logstashController,\n\t\tisOrderedPipelineSupported: isOrderedPipelineSupported,\n\t\tnoCleanup: noCleanup,\n\t\tinputPluginCodecs: map[string]string{},\n\t\tlog: log,\n\t}\n}\n\n\/\/ ID returns the id of the session.\nfunc (s Session) ID() string {\n\treturn s.id\n}\n\n\/\/ setupTest prepares the Logstash configuration for a new test run.\nfunc (s *Session) setupTest(pipelines pipeline.Pipelines, configFiles []logstashconfig.File) error {\n\terr := os.MkdirAll(s.sessionDir, 0700)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsutConfigDir := filepath.Join(s.sessionDir, \"sut\")\n\n\t\/\/ adjust pipeline names and config directories to session\n\tfor i := range pipelines {\n\t\tpipelineName := fmt.Sprintf(\"lfv_%s_%s\", s.id, pipelines[i].ID)\n\n\t\tpipelines[i].ID = pipelineName\n\t\tpipelines[i].Config = filepath.Join(sutConfigDir, pipelines[i].Config)\n\t\tif s.isOrderedPipelineSupported {\n\t\t\tpipelines[i].Ordered = \"true\"\n\t\t}\n\t\tpipelines[i].Workers = 1\n\t}\n\n\t\/\/ Preprocess and Save Config Files\n\tfor _, configFile := range configFiles {\n\t\tinputCodecs, err := configFile.ReplaceInputs(s.id)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor id, codec := range inputCodecs {\n\t\t\ts.inputPluginCodecs[id] = codec\n\t\t}\n\n\t\toutputs, err := configFile.ReplaceOutputs()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = configFile.Save(sutConfigDir)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\toutputPipelines, err := s.createOutputPipelines(outputs)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tpipelines = append(pipelines, outputPipelines...)\n\t}\n\n\t\/\/ Reload Logstash Config\n\ts.pipelines = pipelines\n\t\/\/ err = s.logstash.ReloadPipelines(pipelines)\n\terr = s.logstashController.SetupTest(pipelines)\n\tif err != nil {\n\t\ts.log.Errorf(\"failed to reload Logstash config: %v\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (s *Session) createOutputPipelines(outputs []string) ([]pipeline.Pipeline, error) {\n\tlfvOutputsDir := filepath.Join(s.sessionDir, \"lfv_outputs\")\n\terr := os.MkdirAll(lfvOutputsDir, 0700)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpipelines := make([]pipeline.Pipeline, 0)\n\tfor _, output := range outputs {\n\t\tpipelineName := fmt.Sprintf(\"lfv_output_%s\", output)\n\n\t\ttemplateData := struct {\n\t\t\tPipelineName string\n\t\t\tPipelineOrigName string\n\t\t}{\n\t\t\tPipelineName: pipelineName,\n\t\t\tPipelineOrigName: output,\n\t\t}\n\n\t\terr = template.ToFile(filepath.Join(lfvOutputsDir, output+\".conf\"), outputPipeline, templateData, 0644)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpipeline := pipeline.Pipeline{\n\t\t\tID: pipelineName,\n\t\t\tConfig: filepath.Join(lfvOutputsDir, output+\".conf\"),\n\t\t\tWorkers: 1,\n\t\t}\n\t\tif s.isOrderedPipelineSupported {\n\t\t\tpipeline.Ordered = \"true\"\n\t\t}\n\t\tpipelines = append(pipelines, pipeline)\n\t}\n\n\treturn pipelines, nil\n}\n\n\/\/ ExecuteTest runs a test case set against the Logstash configuration, that has\n\/\/ been loaded previously with SetupTest.\nfunc (s *Session) ExecuteTest(inputPlugin string, inputLines []string, inEvents []map[string]interface{}, expectedEvents int) error {\n\ts.testexec++\n\tpipelineName := fmt.Sprintf(\"lfv_input_%d\", s.testexec)\n\tinputDir := filepath.Join(s.sessionDir, \"lfv_inputs\", strconv.Itoa(s.testexec))\n\tinputPluginName := fmt.Sprintf(\"%s_%s_%s\", \"__lfv_input\", s.id, inputPlugin)\n\tinputCodec, ok := s.inputPluginCodecs[inputPlugin]\n\tif !ok {\n\t\tinputCodec = \"codec => plain\"\n\t}\n\n\t\/\/ Prepare input directory\n\terr := os.MkdirAll(inputDir, 0700)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfieldsFilename := filepath.Join(inputDir, \"fields.json\")\n\terr = prepareFields(fieldsFilename, inEvents)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpipelineFilename := filepath.Join(inputDir, \"input.conf\")\n\terr = createInput(pipelineFilename, fieldsFilename, inputPluginName, inputLines, inputCodec)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpipeline := pipeline.Pipeline{\n\t\tID: pipelineName,\n\t\tConfig: pipelineFilename,\n\t\tWorkers: 1,\n\t}\n\tif s.isOrderedPipelineSupported {\n\t\tpipeline.Ordered = \"true\"\n\t}\n\tpipelines := append(s.pipelines, pipeline)\n\terr = s.logstashController.ExecuteTest(pipelines, expectedEvents)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc prepareFields(fieldsFilename string, inEvents []map[string]interface{}) error {\n\tfields := make(map[string]map[string]interface{})\n\n\tfor i, event := range inEvents {\n\t\tid := fmt.Sprintf(\"%d\", i)\n\t\tfields[id] = event\n\t}\n\n\tbfields, err := json.Marshal(fields)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ioutil.WriteFile(fieldsFilename, bfields, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc createInput(pipelineFilename string, fieldsFilename string, inputPluginName string, inputLines []string, inputCodec string) error {\n\tfor i := range inputLines {\n\t\tvar err error\n\t\tinputLine, err := astutil.Quote(inputLines[i], ast.DoubleQuoted)\n\t\tif err != nil {\n\t\t\tinputLine = astutil.QuoteWithEscape(inputLines[i], ast.SingleQuoted)\n\t\t}\n\t\tinputLines[i] = inputLine\n\t}\n\n\ttemplateData := struct {\n\t\tInputPluginName string\n\t\tInputLines string\n\t\tInputCodec string\n\t\tFieldsFilename string\n\t\tDummyEventInputIndicator string\n\t}{\n\t\tInputPluginName: inputPluginName,\n\t\tInputLines: strings.Join(inputLines, \", \"),\n\t\tInputCodec: inputCodec,\n\t\tFieldsFilename: fieldsFilename,\n\t\tDummyEventInputIndicator: testcase.DummyEventInputIndicator,\n\t}\n\terr := template.ToFile(pipelineFilename, inputGenerator, templateData, 0600)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ GetResults returns the returned events from Logstash.\nfunc (s *Session) GetResults() ([]string, error) {\n\tresults, err := s.logstashController.GetResults()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn results, nil\n}\n\n\/\/ GetStats returns the statistics for a test suite.\nfunc (s *Session) GetStats() {\n\tpanic(\"not implemented\")\n}\n\nfunc (s *Session) teardown() error {\n\t\/\/ TODO: Perform a reset of the Logstash instance including Stdin Buffer, etc.\n\terr1 := s.logstashController.Teardown()\n\tvar err2 error\n\tif !s.noCleanup {\n\t\terr2 = os.RemoveAll(s.sessionDir)\n\t}\n\tif err1 != nil || err2 != nil {\n\t\treturn errors.Errorf(\"session teardown failed: %v, %v\", err1, err2)\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"time\"\n)\n\nconst (\n\tWIN = 100\n)\n\nfunc main() {\n\n\trand.Seed(time.Now().Unix())\n\n\tl, err := net.Listen(\"tcp\", \"localhost:4000\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor {\n\t\tconn, err := l.Accept()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tenc := json.NewEncoder(conn)\n\t\tdec := json.NewDecoder(conn)\n\t\tvar player user\n\t\tconn.Write([]byte(\"input your name plase: {\\\"Name\\\":\\\"gwy\\\"}\\n\"))\n\t\tif err := dec.Decode(&player); err == io.EOF {\n\t\t\tenc.Encode(fmt.Sprintf(\"err: %v\", err))\n\t\t} else if err != nil {\n\t\t\tenc.Encode(fmt.Sprintf(\"err: %v\", err))\n\t\t} else {\n\t\t\tplayer.Score = 0\n\t\t\tplayer.Rwc = conn\n\t\t}\n\t\tgo match(&player)\n\t}\n}\n\nvar players = make(chan Player)\n\nfunc match(p Player) {\n\tp.Write([]byte(\"Waiting for a player...\"))\n\t\/\/fmt.Fprint(p \"Waiting for a player...\")\n\n\tselect {\n\tcase players <- p:\n\t\t\/\/ now handled by the other goroutine\n\tcase p2 := <-players:\n\t\tPlay(p, p2)\n\tcase <-time.After(5 * time.Second):\n\t\t\/\/r, w := io.Pipe()\n\t\tw := ioutil.Discard\n\t\tc := &computer{w, \"computer1\", 0, 0, 0, 27}\n\t\tPlay(p, c)\n\t}\n}\n\nfunc Play(a, b Player) {\n\tdefer a.Close()\n\tdefer b.Close()\n\nagain:\n\ta.Reset()\n\tb.Reset()\n\ta.Write([]byte(\"Game Begin\\n\"))\n\ta.Write([]byte(\"----------------------------\\n\"))\n\tb.Write([]byte(\"Game Begin\\n\"))\n\tb.Write([]byte(\"----------------------------\\n\"))\n\tvar players = make(chan Player, 2)\n\tplayers <- a\n\tplayers <- b\n\tfor {\n\t\tplayer := <-players\n\t\ta.Write([]byte(fmt.Sprintf(\"----------Now %s turn----------\\n\", player.GetName())))\n\t\tb.Write([]byte(fmt.Sprintf(\"----------Now %s turn----------\\n\", player.GetName())))\n\t\tscore := player.Play()\n\t\tif score > WIN {\n\t\t\ta.Write([]byte(fmt.Sprintf(\"GAME OVER.\\n %s win.\\n\", player.GetName())))\n\t\t\ta.Write([]byte(fmt.Sprintf(\"Play again? y\/n\\n\")))\n\t\t\tb.Write([]byte(fmt.Sprintf(\"GAME OVER.\\n %s win.\\n\", player.GetName())))\n\t\t\tb.Write([]byte(fmt.Sprintf(\"Play again? y\/n\\n\")))\n\n\t\t\tvar buffer []byte\n\t\t\tbuffer = make([]byte, 2)\n\t\t\t_, err := a.Read(buffer)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif string(buffer) == \"n\\n\" {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tplayers <- player\n\t\t\t\tgoto again \/\/need modfiy here\n\t\t\t}\n\t\t}\n\t\ta.Write([]byte(fmt.Sprintf(\"Player %s get %d in this turn, now total score is %d\\n\",\n\t\t\tplayer.GetName(), player.GetThisTurn(), player.GetScore())))\n\t\tb.Write([]byte(fmt.Sprintf(\"Player %s get %d in this turn, now total score is %d\\n\",\n\t\t\tplayer.GetName(), player.GetThisTurn(), player.GetScore())))\n\t\tplayers <- player\n\t}\n\n}\n\ntype Player interface {\n\tio.ReadWriteCloser\n\tPlay() int\n\tGetName() string\n\tGetThisTurn() int\n\tGetScore() int\n\tReset()\n}\n\ntype computer struct {\n\t\/\/Rc io.ReadCloser\n\tWt io.Writer\n\tName string\n\tScore int\n\tThisTurn int\n\tRollScore int\n\tmax int\n}\n\nfunc (player *computer) Read(p []byte) (n int, err error) {\n\treturn 0, nil \/\/player.Rc.Read(p)\n}\nfunc (player *computer) Write(p []byte) (n int, err error) {\n\treturn player.Wt.Write(p)\n}\nfunc (player *computer) Close() error {\n\treturn nil \/\/player.Rc.Close()\n}\nfunc (player *computer) GetName() string {\n\treturn player.Name\n}\nfunc (player *computer) GetThisTurn() int {\n\treturn player.ThisTurn\n}\nfunc (player *computer) GetScore() int {\n\treturn player.Score\n}\nfunc (player *computer) Reset() {\n\tplayer.Score = 0\n\tplayer.ThisTurn = 0\n\tplayer.RollScore = 0\n\treturn\n}\nfunc (player *computer) Play() int {\n\tplayer.ThisTurn = 0\n\tfor player.ThisTurn < player.max {\n\t\tplayer.RollScore = rand.Intn(6) + 1\n\t\tplayer.Wt.Write([]byte(fmt.Sprintf(\"This turn %d, rolling... get %d.\\n\", player.ThisTurn, player.RollScore)))\n\t\tif player.RollScore == 1 {\n\t\t\tplayer.ThisTurn = 0\n\t\t\tbreak\n\t\t} else {\n\t\t\tplayer.ThisTurn += player.RollScore\n\t\t}\n\t}\n\tplayer.Score += player.ThisTurn\n\tplayer.Wt.Write([]byte(fmt.Sprintf(\"Finally, total score %d, this turn %d.\\n\", player.Score, player.ThisTurn)))\n\treturn player.Score\n}\n\ntype CmdMessage struct {\n\tCMD uint32 \/\/ROLL or STAY\n}\n\nconst (\n\tROLL = iota\n\tSTAY\n)\n\ntype user struct {\n\tRwc io.ReadWriteCloser\n\tName string\n\tScore int\n\tThisTurn int\n\tRollScore int\n}\n\nfunc (player *user) Read(p []byte) (n int, err error) {\n\treturn player.Rwc.Read(p)\n}\nfunc (player *user) Write(p []byte) (n int, err error) {\n\treturn player.Rwc.Write(p)\n}\nfunc (player *user) Close() error {\n\treturn player.Rwc.Close()\n}\n\nfunc (player *user) GetName() string {\n\treturn player.Name\n}\nfunc (player *user) GetThisTurn() int {\n\treturn player.ThisTurn\n}\nfunc (player *user) GetScore() int {\n\treturn player.Score\n}\nfunc (player *user) Reset() {\n\tplayer.Score = 0\n\tplayer.ThisTurn = 0\n\tplayer.RollScore = 0\n\treturn\n}\n\nfunc (player *user) Roll() (end bool) {\n\tend = false\n\tplayer.RollScore = rand.Intn(6) + 1\n\tif player.RollScore == 1 {\n\t\tplayer.ThisTurn = 0\n\t\tend = true\n\t} else {\n\t\tplayer.ThisTurn += player.RollScore\n\t}\n\treturn\n}\nfunc (player *user) Play() int {\n\tdec := json.NewDecoder(player.Rwc)\n\tenc := json.NewEncoder(player.Rwc)\n\tend := false\n\tplayer.ThisTurn = 0\n\tplayer.RollScore = 0\n\tfor !end {\n\t\tenc.Encode(player)\n\t\tplayer.Rwc.Write([]byte(fmt.Sprintf(\"roll: {\\\"CMD\\\":0} or stay: {\\\"CMD\\\":1}?\\n\")))\n\t\tvar msg CmdMessage\n\t\tif err := dec.Decode(&msg); err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tplayer.Rwc.Write([]byte(fmt.Sprintf(\"%v\", err)))\n\t\t\tlog.Fatal(\"Error when decode: \", err)\n\t\t} else {\n\t\t\tswitch msg.CMD {\n\t\t\tcase ROLL:\n\t\t\t\tplayer.Rwc.Write([]byte(fmt.Sprintf(\"Rolling... \\n\")))\n\t\t\t\tend = player.Roll()\n\t\t\tcase STAY:\n\t\t\t\tend = true\n\t\t\tdefault:\n\t\t\t\tplayer.Rwc.Write([]byte(fmt.Sprintf(\"Invalid input, roll: {\\\"CMD\\\":0} or stay: {\\\"CMD\\\":1}?\")))\n\t\t\t}\n\t\t}\n\t}\n\tplayer.Score += player.ThisTurn\n\tenc.Encode(player)\n\treturn player.Score\n}\n<commit_msg>version 0.9.3 output notice to opponnet when playing.<commit_after>package main\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\"\n\t\/\/\"io\/ioutil\"\n\t\"log\"\n\t\"math\/rand\"\n\t\"net\"\n\t\"time\"\n)\n\nconst (\n\tWIN = 100\n)\n\nfunc main() {\n\n\trand.Seed(time.Now().Unix())\n\n\tl, err := net.Listen(\"tcp\", \"localhost:4000\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor {\n\t\tconn, err := l.Accept()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tenc := json.NewEncoder(conn)\n\t\tdec := json.NewDecoder(conn)\n\t\tvar player user\n\t\tconn.Write([]byte(\"input your name plase: {\\\"Name\\\":\\\"gwy\\\"}\\n\"))\n\t\tif err := dec.Decode(&player); err == io.EOF {\n\t\t\tenc.Encode(fmt.Sprintf(\"err: %v\", err))\n\t\t} else if err != nil {\n\t\t\tenc.Encode(fmt.Sprintf(\"err: %v\", err))\n\t\t} else {\n\t\t\tplayer.Score = 0\n\t\t\tplayer.Rwc = conn\n\t\t}\n\t\tgo match(&player)\n\t}\n}\n\nvar players = make(chan Player)\n\nfunc match(p Player) {\n\tp.Write([]byte(\"Waiting for a player...\\n\"))\n\n\tselect {\n\tcase players <- p:\n\t\t\/\/ now handled by the other goroutine\n\tcase p2 := <-players:\n\t\tPlay(p, p2)\n\tcase <-time.After(5 * time.Second):\n\t\t\/\/w := ioutil.Discard\n\t\tc := &computer{Name: \"computer1\", max: 27}\n\t\tPlay(p, c)\n\t}\n}\n\nfunc Play(a, b Player) {\n\tdefer a.Close()\n\tdefer b.Close()\n\nagain:\n\ta.Reset()\n\tb.Reset()\n\ta.Write([]byte(fmt.Sprintf(\"%s VS %s, Game Begin\\n\", a.GetName(), b.GetName())))\n\ta.Write([]byte(\"----------------------------\\n\"))\n\tb.Write([]byte(fmt.Sprintf(\"%s VS %s, Game Begin\\n\", a.GetName(), b.GetName())))\n\tb.Write([]byte(\"----------------------------\\n\"))\n\tcurrent, opponent := a, b\n\tfor {\n\t\tscore := current.Play(opponent)\n\t\tif score > WIN {\n\t\t\ta.Write([]byte(fmt.Sprintf(\"GAME OVER.\\n %s win.\\n\", current.GetName())))\n\t\t\ta.Write([]byte(fmt.Sprintf(\"Play again? y\/n\\n\")))\n\t\t\tb.Write([]byte(fmt.Sprintf(\"GAME OVER.\\n %s win.\\n\", current.GetName())))\n\t\t\tb.Write([]byte(fmt.Sprintf(\"Play again? y\/n\\n\")))\n\n\t\t\tvar buffer []byte\n\t\t\tbuffer = make([]byte, 2)\n\t\t\t_, err := a.Read(buffer)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif string(buffer) == \"n\\n\" {\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\tgoto again \/\/need modfiy here\n\t\t\t}\n\t\t}\n\t\tcurrent, opponent = opponent, current\n\t}\n\n}\n\ntype Player interface {\n\tio.ReadWriteCloser\n\tPlay(opponent Player) int\n\tGetName() string\n\tGetThisTurn() int\n\tGetScore() int\n\tReset()\n}\n\ntype computer struct {\n\t\/\/Rc io.ReadCloser\n\t\/\/Wt io.Writer\n\tName string\n\tScore int\n\tThisTurn int\n\tRollScore int\n\tmax int\n}\n\nfunc (player *computer) Read(p []byte) (n int, err error) {\n\treturn 0, nil \/\/player.Rc.Read(p)\n}\nfunc (player *computer) Write(p []byte) (n int, err error) {\n\treturn 9, nil \/\/player.Wt.Write(p)\n}\nfunc (player *computer) Close() error {\n\treturn nil \/\/player.Rc.Close()\n}\nfunc (player *computer) GetName() string {\n\treturn player.Name\n}\nfunc (player *computer) GetThisTurn() int {\n\treturn player.ThisTurn\n}\nfunc (player *computer) GetScore() int {\n\treturn player.Score\n}\nfunc (player *computer) Reset() {\n\tplayer.Score = 0\n\tplayer.ThisTurn = 0\n\tplayer.RollScore = 0\n\treturn\n}\nfunc (player *computer) Play(opponent Player) int {\n\topponent.Write([]byte(fmt.Sprintf(\"----------Now %s turn----------\\n\", player.GetName())))\n\tplayer.ThisTurn = 0\n\tend := false\n\tfor player.ThisTurn < player.max && !end {\n\t\tplayer.RollScore = rand.Intn(6) + 1\n\t\tif player.RollScore == 1 {\n\t\t\tplayer.ThisTurn = 0\n\t\t\ttime.Sleep(100 * time.Millisecond)\n\t\t\tend = true\n\t\t} else {\n\t\t\tplayer.ThisTurn += player.RollScore\n\t\t}\n\t\topponent.Write([]byte(fmt.Sprintf(\"Rolling... get %d, this turn %d.\\n\",\n\t\t\tplayer.RollScore, player.ThisTurn)))\n\t}\n\tplayer.Score += player.ThisTurn\n\topponent.Write([]byte(fmt.Sprintf(\"Finally, this turn %d, total score (%d vs %d).\\n\",\n\t\tplayer.ThisTurn, player.Score, opponent.GetScore())))\n\treturn player.Score\n}\n\ntype CmdMessage struct {\n\tCMD uint32 \/\/ROLL or STAY\n}\n\nconst (\n\tROLL = iota\n\tSTAY\n)\n\ntype user struct {\n\tRwc io.ReadWriteCloser\n\tName string\n\tScore int\n\tThisTurn int\n\tRollScore int\n}\n\nfunc (player *user) Read(p []byte) (n int, err error) {\n\treturn player.Rwc.Read(p)\n}\nfunc (player *user) Write(p []byte) (n int, err error) {\n\treturn player.Rwc.Write(p)\n}\nfunc (player *user) Close() error {\n\treturn player.Rwc.Close()\n}\n\nfunc (player *user) GetName() string {\n\treturn player.Name\n}\nfunc (player *user) GetThisTurn() int {\n\treturn player.ThisTurn\n}\nfunc (player *user) GetScore() int {\n\treturn player.Score\n}\nfunc (player *user) Reset() {\n\tplayer.Score = 0\n\tplayer.ThisTurn = 0\n\tplayer.RollScore = 0\n\treturn\n}\n\nfunc (player *user) Roll() (end bool) {\n\tend = false\n\tplayer.RollScore = rand.Intn(6) + 1\n\tif player.RollScore == 1 {\n\t\tplayer.ThisTurn = 0\n\t\tend = true\n\t} else {\n\t\tplayer.ThisTurn += player.RollScore\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n\treturn\n}\nfunc (player *user) Play(opponent Player) int {\n\tplayer.Write([]byte(fmt.Sprintf(\"----------Now %s turn----------\\n\", player.GetName())))\n\topponent.Write([]byte(fmt.Sprintf(\"----------Now %s turn----------\\n\", player.GetName())))\n\tdec := json.NewDecoder(player)\n\t\/\/enc := json.NewEncoder(player)\n\tplayer.ThisTurn = 0\n\tplayer.RollScore = 0\n\tend := false\n\tfor !end {\n\t\t\/\/enc.Encode(player)\n\t\tplayer.Write([]byte(fmt.Sprintf(\"roll: {\\\"CMD\\\":0} or stay: {\\\"CMD\\\":1}?\\n\")))\n\t\tvar msg CmdMessage\n\t\tif err := dec.Decode(&msg); err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tplayer.Write([]byte(fmt.Sprintf(\"%v\", err)))\n\t\t\tlog.Fatal(\"Error when decode: \", err)\n\t\t} else {\n\t\t\tswitch msg.CMD {\n\t\t\tcase ROLL:\n\t\t\t\tend = player.Roll()\n\t\t\t\tplayer.Write([]byte(fmt.Sprintf(\"Rolling... get %d, this turn %d.\\n\",\n\t\t\t\t\tplayer.RollScore, player.ThisTurn)))\n\t\t\t\topponent.Write([]byte(fmt.Sprintf(\"Rolling... get %d, this turn %d.\\n\",\n\t\t\t\t\tplayer.RollScore, player.ThisTurn)))\n\t\t\tcase STAY:\n\t\t\t\tend = true\n\t\t\tdefault:\n\t\t\t\tplayer.Write([]byte(fmt.Sprintf(\"Invalid input, roll: {\\\"CMD\\\":0} or stay: {\\\"CMD\\\":1}?\")))\n\t\t\t}\n\t\t}\n\t}\n\tplayer.Score += player.ThisTurn\n\tplayer.Write([]byte(fmt.Sprintf(\"Finally, this turn %d, total score (%d vs %d).\\n\",\n\t\tplayer.ThisTurn, player.Score, opponent.GetScore())))\n\topponent.Write([]byte(fmt.Sprintf(\"Finally, this turn %d, total score (%d vs %d).\\n\",\n\t\tplayer.ThisTurn, player.Score, opponent.GetScore())))\n\treturn player.Score\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"context\"\n\t\"errors\"\n\tggio \"github.com\/gogo\/protobuf\/io\"\n\tp2p_peer \"github.com\/ipfs\/go-libp2p-peer\"\n\tp2p_pstore \"github.com\/ipfs\/go-libp2p-peerstore\"\n\tmultiaddr \"github.com\/jbenet\/go-multiaddr\"\n\tp2p_net \"github.com\/libp2p\/go-libp2p\/p2p\/net\"\n\tmc \"github.com\/mediachain\/concat\/mc\"\n\tpb \"github.com\/mediachain\/concat\/proto\"\n\t\"log\"\n\t\"time\"\n)\n\nvar NodeOffline = errors.New(\"Node is offline\")\nvar NoDirectory = errors.New(\"No directory server\")\nvar UnknownPeer = errors.New(\"Unknown peer\")\nvar IllegalState = errors.New(\"Illegal node state\")\n\n\/\/ goOffline stops the network\nfunc (node *Node) goOffline() error {\n\tnode.mx.Lock()\n\tdefer node.mx.Unlock()\n\n\tswitch node.status {\n\tcase StatusPublic:\n\t\tnode.dirCancel()\n\t\tfallthrough\n\tcase StatusOnline:\n\t\terr := node.host.Close()\n\t\tnode.status = StatusOffline\n\t\tlog.Println(\"Node is offline\")\n\t\treturn err\n\n\tdefault:\n\t\treturn nil\n\t}\n}\n\n\/\/ goOnline starts the network; if the node is already public, it stays public\nfunc (node *Node) goOnline() error {\n\tnode.mx.Lock()\n\tdefer node.mx.Unlock()\n\n\tswitch node.status {\n\tcase StatusOffline:\n\t\terr := node._goOnline()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tnode.status = StatusOnline\n\t\tlog.Println(\"Node is online\")\n\t\treturn nil\n\n\tdefault:\n\t\treturn nil\n\t}\n}\n\nfunc (node *Node) _goOnline() error {\n\thost, err := mc.NewHost(node.Identity, node.laddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thost.SetStreamHandler(\"\/mediachain\/node\/ping\", node.pingHandler)\n\tnode.host = host\n\n\treturn nil\n}\n\n\/\/ goPublic starts the network if it's not already up and registers with the\n\/\/ directory; fails with NoDirectory if that hasn't been configured.\nfunc (node *Node) goPublic() error {\n\tif node.dir == nil {\n\t\treturn NoDirectory\n\t}\n\n\tnode.mx.Lock()\n\tdefer node.mx.Unlock()\n\n\tswitch node.status {\n\tcase StatusOffline:\n\t\terr := node._goOnline()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfallthrough\n\n\tcase StatusOnline:\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tgo node.registerPeer(ctx, node.laddr)\n\n\t\tnode.dirCancel = cancel\n\t\tnode.status = StatusPublic\n\n\t\tlog.Println(\"Node is public\")\n\t\treturn nil\n\n\tdefault:\n\t\treturn nil\n\t}\n}\n\nfunc (node *Node) pingHandler(s p2p_net.Stream) {\n\tdefer s.Close()\n\n\tpid := s.Conn().RemotePeer()\n\tlog.Printf(\"node\/ping: new stream from %s\", pid.Pretty())\n\n\tvar ping pb.Ping\n\tvar pong pb.Pong\n\tr := ggio.NewDelimitedReader(s, mc.MaxMessageSize)\n\tw := ggio.NewDelimitedWriter(s)\n\n\tfor {\n\t\terr := r.ReadMsg(&ping)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tlog.Printf(\"node\/ping: ping from %s; ponging\", pid.Pretty())\n\n\t\terr = w.WriteMsg(&pong)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (node *Node) registerPeer(ctx context.Context, addrs ...multiaddr.Multiaddr) {\n\tfor {\n\t\terr := node.registerPeerImpl(ctx, addrs...)\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ sleep and retry\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\n\t\tcase <-time.After(5 * time.Minute):\n\t\t\tlog.Println(\"Retrying to register with directory\")\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc (node *Node) registerPeerImpl(ctx context.Context, addrs ...multiaddr.Multiaddr) error {\n\terr := node.host.Connect(ctx, *node.dir)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to connect to directory: %s\", err.Error())\n\t\treturn err\n\t}\n\n\ts, err := node.host.NewStream(ctx, node.dir.ID, \"\/mediachain\/dir\/register\")\n\tif err != nil {\n\t\tlog.Printf(\"Failed to open directory stream: %s\", err.Error())\n\t\treturn err\n\t}\n\tdefer s.Close()\n\n\tpinfo := p2p_pstore.PeerInfo{node.ID, addrs}\n\tvar pbpi pb.PeerInfo\n\tmc.PBFromPeerInfo(&pbpi, pinfo)\n\tmsg := pb.RegisterPeer{&pbpi}\n\n\tw := ggio.NewDelimitedWriter(s)\n\tfor {\n\t\tlog.Printf(\"Registering with directory\")\n\t\terr = w.WriteMsg(&msg)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to register with directory: %s\", err.Error())\n\t\t\treturn err\n\t\t}\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil\n\n\t\tcase <-time.After(5 * time.Minute):\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc (node *Node) doPing(ctx context.Context, pid p2p_peer.ID) error {\n\tif node.status == StatusOffline {\n\t\treturn NodeOffline\n\t}\n\n\tpinfo, err := node.doLookup(ctx, pid)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = node.host.Connect(ctx, pinfo)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts, err := node.host.NewStream(ctx, pinfo.ID, \"\/mediachain\/node\/ping\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer s.Close()\n\n\tvar ping pb.Ping\n\tw := ggio.NewDelimitedWriter(s)\n\terr = w.WriteMsg(&ping)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar pong pb.Pong\n\tr := ggio.NewDelimitedReader(s, mc.MaxMessageSize)\n\terr = r.ReadMsg(&pong)\n\n\treturn err\n}\n\nfunc (node *Node) doLookup(ctx context.Context, pid p2p_peer.ID) (empty p2p_pstore.PeerInfo, err error) {\n\tif node.status == StatusOffline {\n\t\treturn empty, NodeOffline\n\t}\n\n\tif node.dir == nil {\n\t\treturn empty, NoDirectory\n\t}\n\n\tnode.host.Connect(ctx, *node.dir)\n\tif err != nil {\n\t\treturn empty, err\n\t}\n\n\ts, err := node.host.NewStream(ctx, node.dir.ID, \"\/mediachain\/dir\/lookup\")\n\tif err != nil {\n\t\treturn empty, err\n\t}\n\tdefer s.Close()\n\n\treq := pb.LookupPeerRequest{pid.Pretty()}\n\tw := ggio.NewDelimitedWriter(s)\n\terr = w.WriteMsg(&req)\n\tif err != nil {\n\t\treturn empty, err\n\t}\n\n\tvar resp pb.LookupPeerResponse\n\tr := ggio.NewDelimitedReader(s, mc.MaxMessageSize)\n\terr = r.ReadMsg(&resp)\n\tif err != nil {\n\t\treturn empty, err\n\t}\n\n\tif resp.Peer == nil {\n\t\treturn empty, UnknownPeer\n\t}\n\n\tpinfo, err := mc.PBToPeerInfo(resp.Peer)\n\tif err != nil {\n\t\treturn empty, err\n\t}\n\n\treturn pinfo, nil\n}\n<commit_msg>mcnode\/proto: variable cosmetics<commit_after>package main\n\nimport (\n\t\"context\"\n\t\"errors\"\n\tggio \"github.com\/gogo\/protobuf\/io\"\n\tp2p_peer \"github.com\/ipfs\/go-libp2p-peer\"\n\tp2p_pstore \"github.com\/ipfs\/go-libp2p-peerstore\"\n\tmultiaddr \"github.com\/jbenet\/go-multiaddr\"\n\tp2p_net \"github.com\/libp2p\/go-libp2p\/p2p\/net\"\n\tmc \"github.com\/mediachain\/concat\/mc\"\n\tpb \"github.com\/mediachain\/concat\/proto\"\n\t\"log\"\n\t\"time\"\n)\n\nvar (\n\tNodeOffline = errors.New(\"Node is offline\")\n\tNoDirectory = errors.New(\"No directory server\")\n\tUnknownPeer = errors.New(\"Unknown peer\")\n\tIllegalState = errors.New(\"Illegal node state\")\n)\n\n\/\/ goOffline stops the network\nfunc (node *Node) goOffline() error {\n\tnode.mx.Lock()\n\tdefer node.mx.Unlock()\n\n\tswitch node.status {\n\tcase StatusPublic:\n\t\tnode.dirCancel()\n\t\tfallthrough\n\tcase StatusOnline:\n\t\terr := node.host.Close()\n\t\tnode.status = StatusOffline\n\t\tlog.Println(\"Node is offline\")\n\t\treturn err\n\n\tdefault:\n\t\treturn nil\n\t}\n}\n\n\/\/ goOnline starts the network; if the node is already public, it stays public\nfunc (node *Node) goOnline() error {\n\tnode.mx.Lock()\n\tdefer node.mx.Unlock()\n\n\tswitch node.status {\n\tcase StatusOffline:\n\t\terr := node._goOnline()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tnode.status = StatusOnline\n\t\tlog.Println(\"Node is online\")\n\t\treturn nil\n\n\tdefault:\n\t\treturn nil\n\t}\n}\n\nfunc (node *Node) _goOnline() error {\n\thost, err := mc.NewHost(node.Identity, node.laddr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\thost.SetStreamHandler(\"\/mediachain\/node\/ping\", node.pingHandler)\n\tnode.host = host\n\n\treturn nil\n}\n\n\/\/ goPublic starts the network if it's not already up and registers with the\n\/\/ directory; fails with NoDirectory if that hasn't been configured.\nfunc (node *Node) goPublic() error {\n\tif node.dir == nil {\n\t\treturn NoDirectory\n\t}\n\n\tnode.mx.Lock()\n\tdefer node.mx.Unlock()\n\n\tswitch node.status {\n\tcase StatusOffline:\n\t\terr := node._goOnline()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfallthrough\n\n\tcase StatusOnline:\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tgo node.registerPeer(ctx, node.laddr)\n\n\t\tnode.dirCancel = cancel\n\t\tnode.status = StatusPublic\n\n\t\tlog.Println(\"Node is public\")\n\t\treturn nil\n\n\tdefault:\n\t\treturn nil\n\t}\n}\n\nfunc (node *Node) pingHandler(s p2p_net.Stream) {\n\tdefer s.Close()\n\n\tpid := s.Conn().RemotePeer()\n\tlog.Printf(\"node\/ping: new stream from %s\", pid.Pretty())\n\n\tvar ping pb.Ping\n\tvar pong pb.Pong\n\tr := ggio.NewDelimitedReader(s, mc.MaxMessageSize)\n\tw := ggio.NewDelimitedWriter(s)\n\n\tfor {\n\t\terr := r.ReadMsg(&ping)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tlog.Printf(\"node\/ping: ping from %s; ponging\", pid.Pretty())\n\n\t\terr = w.WriteMsg(&pong)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (node *Node) registerPeer(ctx context.Context, addrs ...multiaddr.Multiaddr) {\n\tfor {\n\t\terr := node.registerPeerImpl(ctx, addrs...)\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\n\t\t\/\/ sleep and retry\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\n\t\tcase <-time.After(5 * time.Minute):\n\t\t\tlog.Println(\"Retrying to register with directory\")\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc (node *Node) registerPeerImpl(ctx context.Context, addrs ...multiaddr.Multiaddr) error {\n\terr := node.host.Connect(ctx, *node.dir)\n\tif err != nil {\n\t\tlog.Printf(\"Failed to connect to directory: %s\", err.Error())\n\t\treturn err\n\t}\n\n\ts, err := node.host.NewStream(ctx, node.dir.ID, \"\/mediachain\/dir\/register\")\n\tif err != nil {\n\t\tlog.Printf(\"Failed to open directory stream: %s\", err.Error())\n\t\treturn err\n\t}\n\tdefer s.Close()\n\n\tpinfo := p2p_pstore.PeerInfo{node.ID, addrs}\n\tvar pbpi pb.PeerInfo\n\tmc.PBFromPeerInfo(&pbpi, pinfo)\n\tmsg := pb.RegisterPeer{&pbpi}\n\n\tw := ggio.NewDelimitedWriter(s)\n\tfor {\n\t\tlog.Printf(\"Registering with directory\")\n\t\terr = w.WriteMsg(&msg)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to register with directory: %s\", err.Error())\n\t\t\treturn err\n\t\t}\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil\n\n\t\tcase <-time.After(5 * time.Minute):\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc (node *Node) doPing(ctx context.Context, pid p2p_peer.ID) error {\n\tif node.status == StatusOffline {\n\t\treturn NodeOffline\n\t}\n\n\tpinfo, err := node.doLookup(ctx, pid)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = node.host.Connect(ctx, pinfo)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts, err := node.host.NewStream(ctx, pinfo.ID, \"\/mediachain\/node\/ping\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer s.Close()\n\n\tvar ping pb.Ping\n\tw := ggio.NewDelimitedWriter(s)\n\terr = w.WriteMsg(&ping)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar pong pb.Pong\n\tr := ggio.NewDelimitedReader(s, mc.MaxMessageSize)\n\terr = r.ReadMsg(&pong)\n\n\treturn err\n}\n\nfunc (node *Node) doLookup(ctx context.Context, pid p2p_peer.ID) (empty p2p_pstore.PeerInfo, err error) {\n\tif node.status == StatusOffline {\n\t\treturn empty, NodeOffline\n\t}\n\n\tif node.dir == nil {\n\t\treturn empty, NoDirectory\n\t}\n\n\tnode.host.Connect(ctx, *node.dir)\n\tif err != nil {\n\t\treturn empty, err\n\t}\n\n\ts, err := node.host.NewStream(ctx, node.dir.ID, \"\/mediachain\/dir\/lookup\")\n\tif err != nil {\n\t\treturn empty, err\n\t}\n\tdefer s.Close()\n\n\treq := pb.LookupPeerRequest{pid.Pretty()}\n\tw := ggio.NewDelimitedWriter(s)\n\terr = w.WriteMsg(&req)\n\tif err != nil {\n\t\treturn empty, err\n\t}\n\n\tvar resp pb.LookupPeerResponse\n\tr := ggio.NewDelimitedReader(s, mc.MaxMessageSize)\n\terr = r.ReadMsg(&resp)\n\tif err != nil {\n\t\treturn empty, err\n\t}\n\n\tif resp.Peer == nil {\n\t\treturn empty, UnknownPeer\n\t}\n\n\tpinfo, err := mc.PBToPeerInfo(resp.Peer)\n\tif err != nil {\n\t\treturn empty, err\n\t}\n\n\treturn pinfo, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\n\/\/ The Command interface must be implemented by the different command types. It is used to run the command in different\n\/\/ contexts, i.e. either shell, docker, or logging. The last one comes in handy if a command's underlying actions are\n\/\/ rather lengthy or cryptic, but the intent is described easily (like writing assets or files for example).\ntype Command interface {\n\tShell() string \/\/ Used for executing the action in a shell (locally or via ssh).\n\tLogging() string \/\/ Get string used for logging.\n}\n\ntype DockerCommand interface {\n\tDocker() string \/\/ Used for executing the action in a docker context.\n}\n\ntype Renderer interface {\n\tRender(i interface{})\n}\n\ntype Validator interface {\n\tValidate() error\n}\n<commit_msg>removed Command interface from urknall binary assets<commit_after><|endoftext|>"} {"text":"<commit_before>package tree\n\/* \n#cgo LDFLAGS: -lxml2\n#cgo CFLAGS: -I\/usr\/include\/libxml2\n#include <libxml\/tree.h>\n*\/\nimport \"C\"\nimport \"unsafe\"\n\ntype Element struct {\n\t*XmlNode\n}\n\nfunc (node *Element) ElementType() int {\n\telem := (*C.xmlElement)(unsafe.Pointer(node.ptr()))\n\treturn int(elem.etype)\n}\n\nfunc (node *Element) new(ptr *C.xmlNode) *Element {\n\tif ptr == nil {\n\t\treturn nil\n\t}\n\treturn NewNode(unsafe.Pointer(ptr), node.Doc()).(*Element)\n}\n\nfunc (node *Element) NextElement() *Element {\n\treturn node.new(C.xmlNextElementSibling(node.NodePtr))\n}\n\nfunc (node *Element) PrevElement() *Element {\n\treturn node.new(C.xmlPreviousElementSibling(node.NodePtr))\n}\n\nfunc (node *Element) FirstElement() *Element {\n\treturn node.new(C.xmlFirstElementChild(node.NodePtr))\n}\n\nfunc (node *Element) LastElement() *Element {\n\treturn node.new(C.xmlLastElementChild(node.NodePtr))\n}\n\nfunc (node *Element) Clear() {\n\t\/\/ Remember, as we delete them, the last one moves to the front\n\tchild := node.First()\n\tfor child != nil {\n\t\tchild.Remove()\n\t\tchild.Free()\n\t\tchild = node.First()\n\t}\n}\n\nfunc (node *Element) Content() string {\n\tchild := node.First()\n\toutput := \"\"\n\tfor child != nil {\n\t\toutput = output + child.DumpHTML()\n\t\tchild = child.Next()\n\t}\n\treturn output\n}\n\nfunc (node *Element) SetContent(content string) {\n\tnode.Clear()\n\tnode.AppendContent(content)\n}\n\nfunc (node *Element) AppendContent(content string) {\n\tchild := node.Doc().ParseFragment(content)\n\tfor child != nil {\n\t\tnode.AppendChildNode(child)\n\t\tchild = child.Next()\n\t}\n}\n\nfunc (node *Element) PrependContent(content string) {\n\tchild := node.Doc().ParseFragment(content).Parent().Last()\n\tfor child != nil {\n\t\tnode.PrependChildNode(child)\n\t\tchild = child.Prev()\n\t}\n}\n<commit_msg>adding in before\/after content methods<commit_after>package tree\n\/* \n#cgo LDFLAGS: -lxml2\n#cgo CFLAGS: -I\/usr\/include\/libxml2\n#include <libxml\/tree.h>\n*\/\nimport \"C\"\nimport \"unsafe\"\n\ntype Element struct {\n\t*XmlNode\n}\n\nfunc (node *Element) ElementType() int {\n\telem := (*C.xmlElement)(unsafe.Pointer(node.ptr()))\n\treturn int(elem.etype)\n}\n\nfunc (node *Element) new(ptr *C.xmlNode) *Element {\n\tif ptr == nil {\n\t\treturn nil\n\t}\n\treturn NewNode(unsafe.Pointer(ptr), node.Doc()).(*Element)\n}\n\nfunc (node *Element) NextElement() *Element {\n\treturn node.new(C.xmlNextElementSibling(node.NodePtr))\n}\n\nfunc (node *Element) PrevElement() *Element {\n\treturn node.new(C.xmlPreviousElementSibling(node.NodePtr))\n}\n\nfunc (node *Element) FirstElement() *Element {\n\treturn node.new(C.xmlFirstElementChild(node.NodePtr))\n}\n\nfunc (node *Element) LastElement() *Element {\n\treturn node.new(C.xmlLastElementChild(node.NodePtr))\n}\n\nfunc (node *Element) Clear() {\n\t\/\/ Remember, as we delete them, the last one moves to the front\n\tchild := node.First()\n\tfor child != nil {\n\t\tchild.Remove()\n\t\tchild.Free()\n\t\tchild = node.First()\n\t}\n}\n\nfunc (node *Element) Content() string {\n\tchild := node.First()\n\toutput := \"\"\n\tfor child != nil {\n\t\toutput = output + child.DumpHTML()\n\t\tchild = child.Next()\n\t}\n\treturn output\n}\n\nfunc (node *Element) SetContent(content string) {\n\tnode.Clear()\n\tnode.AppendContent(content)\n}\n\nfunc (node *Element) AppendContent(content string) {\n\tchild := node.Doc().ParseFragment(content)\n\tfor child != nil {\n\t\tnode.AppendChildNode(child)\n\t\tchild = child.Next()\n\t}\n}\n\nfunc (node *Element) PrependContent(content string) {\n\tchild := node.Doc().ParseFragment(content).Parent().Last()\n\tfor child != nil {\n\t\tnode.PrependChildNode(child)\n\t\tchild = child.Prev()\n\t}\n}\n\nfunc (node *XmlNode) AddContentAfter(content string) {\n\tchild := node.Doc().ParseFragment(content).Parent().Last()\n\tfor child != nil {\n\t\tnode.AddNodeAfter(child)\n\t\tchild = child.Prev()\n\t}\n}\nfunc (node *XmlNode) AddContentBefore(content string) {\n\tchild := node.Doc().ParseFragment(content).Parent().First()\n\tfor child != nil {\n\t\tnode.AddNodeBefore(child)\n\t\tchild = child.Next()\n\t}\n}\n\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/lib\/pq\"\n)\n\nfunc connectToDatabase() {\n\tvar err error\n\tvar rows *sql.Rows\n\tvar schema string\n\tvar schemaVer int64\n\n\tdriver := \"postgres\"\n\n\tconnect := fmt.Sprintf(\"dbname='%s' user='%s' password='%s' host='%s' port='%s' sslmode='%s' connect_timeout='%s'\",\n\t\tSomaCfg.Database.Name,\n\t\tSomaCfg.Database.User,\n\t\tSomaCfg.Database.Pass,\n\t\tSomaCfg.Database.Host,\n\t\tSomaCfg.Database.Port,\n\t\tSomaCfg.Database.TlsMode,\n\t\tSomaCfg.Database.Timeout,\n\t)\n\n\t\/\/ enable handling of infinity timestamps\n\tpq.EnableInfinityTs(NegTimeInf, PosTimeInf)\n\n\tconn, err = sql.Open(driver, connect)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif err = conn.Ping(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Print(\"Connected to database\")\n\tif _, err = conn.Exec(`SET TIME ZONE 'UTC';`); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ required schema versions\n\trequired := map[string]int64{\n\t\t\"inventory\": 201605060001,\n\t\t\"root\": 201605150001,\n\t\t\"auth\": 201605150002,\n\t\t\"soma\": 201605310001,\n\t}\n\n\tif rows, err = conn.Query(`\nSELECT schema,\n MAX(version) AS version\nFROM public.schema_versions\nGROUP BY schema;`); err != nil {\n\t\tlog.Fatal(\"Query db schema versions: \", err)\n\t}\n\n\tfor rows.Next() {\n\t\tif err = rows.Scan(\n\t\t\t&schema,\n\t\t\t&schemaVer,\n\t\t); err != nil {\n\t\t\tlog.Fatal(\"Schema check: \", err)\n\t\t}\n\t\tif rsv, ok := required[schema]; ok {\n\t\t\tif rsv != required[schema] {\n\t\t\t\tlog.Fatal(\"Incompatible schema: \", schema, schemaVer)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"DB Schema %s, version: %d\", schema, schemaVer)\n\t\t\t\tdelete(required, schema)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Fatal(\"Unknown schema: \", schema)\n\t\t}\n\t}\n\tif err = rows.Err(); err != nil {\n\t\tlog.Fatal(\"Schema check: \", err)\n\t}\n\tif len(required) != 0 {\n\t\tfor s, _ := range required {\n\t\t\tlog.Printf(\"Missing schema: %s\", s)\n\t\t}\n\t\tlog.Fatal(\"FATAL - database incomplete\")\n\t}\n}\n\nfunc pingDatabase() {\n\tticker := time.NewTicker(time.Second).C\n\n\tfor {\n\t\t<-ticker\n\t\terr := conn.Ping()\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t}\n\t}\n}\n\n\/\/ vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix\n<commit_msg>Require db schema soma\/201606150001<commit_after>package main\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/lib\/pq\"\n)\n\nfunc connectToDatabase() {\n\tvar err error\n\tvar rows *sql.Rows\n\tvar schema string\n\tvar schemaVer int64\n\n\tdriver := \"postgres\"\n\n\tconnect := fmt.Sprintf(\"dbname='%s' user='%s' password='%s' host='%s' port='%s' sslmode='%s' connect_timeout='%s'\",\n\t\tSomaCfg.Database.Name,\n\t\tSomaCfg.Database.User,\n\t\tSomaCfg.Database.Pass,\n\t\tSomaCfg.Database.Host,\n\t\tSomaCfg.Database.Port,\n\t\tSomaCfg.Database.TlsMode,\n\t\tSomaCfg.Database.Timeout,\n\t)\n\n\t\/\/ enable handling of infinity timestamps\n\tpq.EnableInfinityTs(NegTimeInf, PosTimeInf)\n\n\tconn, err = sql.Open(driver, connect)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif err = conn.Ping(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Print(\"Connected to database\")\n\tif _, err = conn.Exec(`SET TIME ZONE 'UTC';`); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ required schema versions\n\trequired := map[string]int64{\n\t\t\"inventory\": 201605060001,\n\t\t\"root\": 201605150001,\n\t\t\"auth\": 201605150002,\n\t\t\"soma\": 201606150001,\n\t}\n\n\tif rows, err = conn.Query(`\nSELECT schema,\n MAX(version) AS version\nFROM public.schema_versions\nGROUP BY schema;`); err != nil {\n\t\tlog.Fatal(\"Query db schema versions: \", err)\n\t}\n\n\tfor rows.Next() {\n\t\tif err = rows.Scan(\n\t\t\t&schema,\n\t\t\t&schemaVer,\n\t\t); err != nil {\n\t\t\tlog.Fatal(\"Schema check: \", err)\n\t\t}\n\t\tif rsv, ok := required[schema]; ok {\n\t\t\tif rsv != required[schema] {\n\t\t\t\tlog.Fatal(\"Incompatible schema: \", schema, schemaVer)\n\t\t\t} else {\n\t\t\t\tlog.Printf(\"DB Schema %s, version: %d\", schema, schemaVer)\n\t\t\t\tdelete(required, schema)\n\t\t\t}\n\t\t} else {\n\t\t\tlog.Fatal(\"Unknown schema: \", schema)\n\t\t}\n\t}\n\tif err = rows.Err(); err != nil {\n\t\tlog.Fatal(\"Schema check: \", err)\n\t}\n\tif len(required) != 0 {\n\t\tfor s, _ := range required {\n\t\t\tlog.Printf(\"Missing schema: %s\", s)\n\t\t}\n\t\tlog.Fatal(\"FATAL - database incomplete\")\n\t}\n}\n\nfunc pingDatabase() {\n\tticker := time.NewTicker(time.Second).C\n\n\tfor {\n\t\t<-ticker\n\t\terr := conn.Ping()\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t}\n\t}\n}\n\n\/\/ vim: ts=4 sw=4 sts=4 noet fenc=utf-8 ffs=unix\n<|endoftext|>"} {"text":"<commit_before>package plugin\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\"\n\n\t\"github.com\/appc\/cni\/pkg\/ipam\"\n\t\"github.com\/appc\/cni\/pkg\/skel\"\n\t\"github.com\/appc\/cni\/pkg\/types\"\n\t\"github.com\/vishvananda\/netlink\"\n\t\"github.com\/vishvananda\/netns\"\n\tweaveapi \"github.com\/weaveworks\/weave\/api\"\n\t\"github.com\/weaveworks\/weave\/common\"\n\tweavenet \"github.com\/weaveworks\/weave\/net\"\n\tipamplugin \"github.com\/weaveworks\/weave\/plugin\/ipam\"\n)\n\nvar (\n\tzeroNetwork = net.IPNet{IP: net.IPv4zero, Mask: net.IPv4Mask(0, 0, 0, 0)}\n\tmask32 = net.IPv4Mask(0xff, 0xff, 0xff, 0xff)\n)\n\ntype CNIPlugin struct {\n\tweave *weaveapi.Client\n}\n\nfunc NewCNIPlugin(weave *weaveapi.Client) *CNIPlugin {\n\treturn &CNIPlugin{weave: weave}\n}\n\nfunc loadNetConf(bytes []byte) (*NetConf, error) {\n\tn := &NetConf{\n\t\tBrName: weavenet.WeaveBridgeName,\n\t}\n\tif err := json.Unmarshal(bytes, n); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to load netconf: %v\", err)\n\t}\n\treturn n, nil\n}\n\nfunc (c *CNIPlugin) getIP(ipamType string, args *skel.CmdArgs) (result *types.Result, err error) {\n\t\/\/ Default IPAM is Weave's own\n\tif ipamType == \"\" {\n\t\tresult, err = ipamplugin.NewIpam(c.weave).Allocate(args)\n\t} else {\n\t\tresult, err = ipam.ExecAdd(ipamType, args.StdinData)\n\t}\n\tif err == nil && result.IP4 == nil {\n\t\treturn nil, fmt.Errorf(\"IPAM plugin failed to allocate IP address\")\n\t}\n\treturn result, err\n}\n\nfunc (c *CNIPlugin) CmdAdd(args *skel.CmdArgs) error {\n\tconf, err := loadNetConf(args.StdinData)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif conf.IsGW {\n\t\treturn fmt.Errorf(\"Gateway functionality not supported\")\n\t}\n\tif conf.IPMasq {\n\t\treturn fmt.Errorf(\"IP Masquerading functionality not supported\")\n\t}\n\n\tresult, err := c.getIP(conf.IPAM.Type, args)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to allocate IP address: %s\", err)\n\t}\n\n\t\/\/ If config says nothing about routes or gateway, default one will be via the bridge\n\tif result.IP4.Routes == nil && result.IP4.Gateway == nil {\n\t\tbridgeIP, err := findBridgeIP(conf.BrName, result.IP4.IP)\n\t\tif err == errBridgeNoIP {\n\t\t\tbridgeArgs := *args\n\t\t\tbridgeArgs.ContainerID = \"weave:expose\"\n\t\t\tbridgeIPResult, err := c.getIP(conf.IPAM.Type, &bridgeArgs)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to allocate IP address for bridge: %s\", err)\n\t\t\t}\n\t\t\tif err := assignBridgeIP(conf.BrName, bridgeIPResult.IP4.IP); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to assign IP address to bridge: %s\", err)\n\t\t\t}\n\t\t\tbridgeIP = bridgeIPResult.IP4.IP.IP\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresult.IP4.Gateway = bridgeIP\n\t}\n\n\tns, err := netns.GetFromPath(args.Netns)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer ns.Close()\n\n\tid := args.ContainerID\n\tif len(id) < 5 {\n\t\tdata := make([]byte, 5)\n\t\t_, err := rand.Reader.Read(data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tid = fmt.Sprintf(\"%x\", data)\n\t}\n\n\tif err := weavenet.AttachContainer(ns, id, args.IfName, conf.BrName, conf.MTU, false, []*net.IPNet{&result.IP4.IP}, false); err != nil {\n\t\treturn err\n\t}\n\tif err := weavenet.WithNetNSLinkUnsafe(ns, args.IfName, func(link netlink.Link) error {\n\t\treturn setupRoutes(link, args.IfName, result.IP4.IP, result.IP4.Gateway, result.IP4.Routes)\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"error setting up routes: %s\", err)\n\t}\n\n\tresult.DNS = conf.DNS\n\treturn result.Print()\n}\n\nfunc setupRoutes(link netlink.Link, name string, ipnet net.IPNet, gw net.IP, routes []types.Route) error {\n\tvar err error\n\tif routes == nil { \/\/ If config says nothing about routes, add a default one\n\t\tif !ipnet.Contains(gw) {\n\t\t\t\/\/ The bridge IP is not on the same subnet; add a specific route to it\n\t\t\tgw32 := &net.IPNet{IP: gw, Mask: mask32}\n\t\t\tif err = weavenet.AddRoute(link, netlink.SCOPE_LINK, gw32, nil); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\troutes = []types.Route{{Dst: zeroNetwork}}\n\t}\n\tfor _, r := range routes {\n\t\tif r.GW != nil {\n\t\t\terr = weavenet.AddRoute(link, netlink.SCOPE_UNIVERSE, &r.Dst, r.GW)\n\t\t} else {\n\t\t\terr = weavenet.AddRoute(link, netlink.SCOPE_UNIVERSE, &r.Dst, gw)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to add route '%v via %v dev %v': %v\", r.Dst, gw, name, err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc assignBridgeIP(bridgeName string, ipnet net.IPNet) error {\n\tlink, err := netlink.LinkByName(bridgeName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := netlink.AddrAdd(link, &netlink.Addr{IPNet: &ipnet}); err != nil {\n\t\treturn fmt.Errorf(\"failed to add IP address to %q: %v\", bridgeName, err)\n\t}\n\treturn nil\n}\n\nvar errBridgeNoIP = fmt.Errorf(\"Bridge has no IP address\")\n\nfunc findBridgeIP(bridgeName string, subnet net.IPNet) (net.IP, error) {\n\tnetdev, err := common.GetBridgeNetDev(bridgeName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to get netdev for %q bridge: %s\", bridgeName, err)\n\t}\n\tif len(netdev.CIDRs) == 0 {\n\t\treturn nil, errBridgeNoIP\n\t}\n\tfor _, cidr := range netdev.CIDRs {\n\t\tif subnet.Contains(cidr.IP) {\n\t\t\treturn cidr.IP, nil\n\t\t}\n\t}\n\t\/\/ None in the required subnet; just return the first one\n\treturn netdev.CIDRs[0].IP, nil\n}\n\nfunc (c *CNIPlugin) CmdDel(args *skel.CmdArgs) error {\n\tconf, err := loadNetConf(args.StdinData)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tns, err := netns.GetFromPath(args.Netns)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer ns.Close()\n\terr = weavenet.WithNetNSUnsafe(ns, func() error {\n\t\tlink, err := netlink.LinkByName(args.IfName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn netlink.LinkDel(link)\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error removing interface: %s\", err)\n\t}\n\n\t\/\/ Default IPAM is Weave's own\n\tif conf.IPAM.Type == \"\" {\n\t\terr = ipamplugin.NewIpam(c.weave).Release(args)\n\t} else {\n\t\terr = ipam.ExecDel(conf.IPAM.Type, args.StdinData)\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to release IP address: %s\", err)\n\t}\n\treturn nil\n}\n\ntype NetConf struct {\n\ttypes.NetConf\n\tBrName string `json:\"bridge\"`\n\tIsGW bool `json:\"isGateway\"`\n\tIPMasq bool `json:\"ipMasq\"`\n\tMTU int `json:\"mtu\"`\n}\n<commit_msg>Include more context in error messages from CNI operations<commit_after>package plugin\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\"\n\n\t\"github.com\/appc\/cni\/pkg\/ipam\"\n\t\"github.com\/appc\/cni\/pkg\/skel\"\n\t\"github.com\/appc\/cni\/pkg\/types\"\n\t\"github.com\/vishvananda\/netlink\"\n\t\"github.com\/vishvananda\/netns\"\n\tweaveapi \"github.com\/weaveworks\/weave\/api\"\n\t\"github.com\/weaveworks\/weave\/common\"\n\tweavenet \"github.com\/weaveworks\/weave\/net\"\n\tipamplugin \"github.com\/weaveworks\/weave\/plugin\/ipam\"\n)\n\nvar (\n\tzeroNetwork = net.IPNet{IP: net.IPv4zero, Mask: net.IPv4Mask(0, 0, 0, 0)}\n\tmask32 = net.IPv4Mask(0xff, 0xff, 0xff, 0xff)\n)\n\ntype CNIPlugin struct {\n\tweave *weaveapi.Client\n}\n\nfunc NewCNIPlugin(weave *weaveapi.Client) *CNIPlugin {\n\treturn &CNIPlugin{weave: weave}\n}\n\nfunc loadNetConf(bytes []byte) (*NetConf, error) {\n\tn := &NetConf{\n\t\tBrName: weavenet.WeaveBridgeName,\n\t}\n\tif err := json.Unmarshal(bytes, n); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to load netconf: %v\", err)\n\t}\n\treturn n, nil\n}\n\nfunc (c *CNIPlugin) getIP(ipamType string, args *skel.CmdArgs) (result *types.Result, err error) {\n\t\/\/ Default IPAM is Weave's own\n\tif ipamType == \"\" {\n\t\tresult, err = ipamplugin.NewIpam(c.weave).Allocate(args)\n\t} else {\n\t\tresult, err = ipam.ExecAdd(ipamType, args.StdinData)\n\t}\n\tif err == nil && result.IP4 == nil {\n\t\treturn nil, fmt.Errorf(\"IPAM plugin failed to allocate IP address\")\n\t}\n\treturn result, err\n}\n\nfunc (c *CNIPlugin) CmdAdd(args *skel.CmdArgs) error {\n\tconf, err := loadNetConf(args.StdinData)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif conf.IsGW {\n\t\treturn fmt.Errorf(\"Gateway functionality not supported\")\n\t}\n\tif conf.IPMasq {\n\t\treturn fmt.Errorf(\"IP Masquerading functionality not supported\")\n\t}\n\n\tresult, err := c.getIP(conf.IPAM.Type, args)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to allocate IP address: %s\", err)\n\t}\n\n\t\/\/ If config says nothing about routes or gateway, default one will be via the bridge\n\tif result.IP4.Routes == nil && result.IP4.Gateway == nil {\n\t\tbridgeIP, err := findBridgeIP(conf.BrName, result.IP4.IP)\n\t\tif err == errBridgeNoIP {\n\t\t\tbridgeArgs := *args\n\t\t\tbridgeArgs.ContainerID = \"weave:expose\"\n\t\t\tbridgeIPResult, err := c.getIP(conf.IPAM.Type, &bridgeArgs)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to allocate IP address for bridge: %s\", err)\n\t\t\t}\n\t\t\tif err := assignBridgeIP(conf.BrName, bridgeIPResult.IP4.IP); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to assign IP address to bridge: %s\", err)\n\t\t\t}\n\t\t\tbridgeIP = bridgeIPResult.IP4.IP.IP\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\t\tresult.IP4.Gateway = bridgeIP\n\t}\n\n\tns, err := netns.GetFromPath(args.Netns)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error accessing namespace %q: %s\", args.Netns, err)\n\t}\n\tdefer ns.Close()\n\n\tid := args.ContainerID\n\tif len(id) < 5 {\n\t\tdata := make([]byte, 5)\n\t\t_, err := rand.Reader.Read(data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tid = fmt.Sprintf(\"%x\", data)\n\t}\n\n\tif err := weavenet.AttachContainer(ns, id, args.IfName, conf.BrName, conf.MTU, false, []*net.IPNet{&result.IP4.IP}, false); err != nil {\n\t\treturn err\n\t}\n\tif err := weavenet.WithNetNSLinkUnsafe(ns, args.IfName, func(link netlink.Link) error {\n\t\treturn setupRoutes(link, args.IfName, result.IP4.IP, result.IP4.Gateway, result.IP4.Routes)\n\t}); err != nil {\n\t\treturn fmt.Errorf(\"error setting up routes: %s\", err)\n\t}\n\n\tresult.DNS = conf.DNS\n\treturn result.Print()\n}\n\nfunc setupRoutes(link netlink.Link, name string, ipnet net.IPNet, gw net.IP, routes []types.Route) error {\n\tvar err error\n\tif routes == nil { \/\/ If config says nothing about routes, add a default one\n\t\tif !ipnet.Contains(gw) {\n\t\t\t\/\/ The bridge IP is not on the same subnet; add a specific route to it\n\t\t\tgw32 := &net.IPNet{IP: gw, Mask: mask32}\n\t\t\tif err = weavenet.AddRoute(link, netlink.SCOPE_LINK, gw32, nil); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\troutes = []types.Route{{Dst: zeroNetwork}}\n\t}\n\tfor _, r := range routes {\n\t\tif r.GW != nil {\n\t\t\terr = weavenet.AddRoute(link, netlink.SCOPE_UNIVERSE, &r.Dst, r.GW)\n\t\t} else {\n\t\t\terr = weavenet.AddRoute(link, netlink.SCOPE_UNIVERSE, &r.Dst, gw)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to add route '%v via %v dev %v': %v\", r.Dst, gw, name, err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc assignBridgeIP(bridgeName string, ipnet net.IPNet) error {\n\tlink, err := netlink.LinkByName(bridgeName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := netlink.AddrAdd(link, &netlink.Addr{IPNet: &ipnet}); err != nil {\n\t\treturn fmt.Errorf(\"failed to add IP address to %q: %v\", bridgeName, err)\n\t}\n\treturn nil\n}\n\nvar errBridgeNoIP = fmt.Errorf(\"Bridge has no IP address\")\n\nfunc findBridgeIP(bridgeName string, subnet net.IPNet) (net.IP, error) {\n\tnetdev, err := common.GetBridgeNetDev(bridgeName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to get netdev for %q bridge: %s\", bridgeName, err)\n\t}\n\tif len(netdev.CIDRs) == 0 {\n\t\treturn nil, errBridgeNoIP\n\t}\n\tfor _, cidr := range netdev.CIDRs {\n\t\tif subnet.Contains(cidr.IP) {\n\t\t\treturn cidr.IP, nil\n\t\t}\n\t}\n\t\/\/ None in the required subnet; just return the first one\n\treturn netdev.CIDRs[0].IP, nil\n}\n\nfunc (c *CNIPlugin) CmdDel(args *skel.CmdArgs) error {\n\tconf, err := loadNetConf(args.StdinData)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tns, err := netns.GetFromPath(args.Netns)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error accessing namespace %q: %s\", args.Netns, err)\n\t}\n\tdefer ns.Close()\n\terr = weavenet.WithNetNSUnsafe(ns, func() error {\n\t\tlink, err := netlink.LinkByName(args.IfName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn netlink.LinkDel(link)\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error removing interface %q: %s\", args.IfName, err)\n\t}\n\n\t\/\/ Default IPAM is Weave's own\n\tif conf.IPAM.Type == \"\" {\n\t\terr = ipamplugin.NewIpam(c.weave).Release(args)\n\t} else {\n\t\terr = ipam.ExecDel(conf.IPAM.Type, args.StdinData)\n\t}\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to release IP address: %s\", err)\n\t}\n\treturn nil\n}\n\ntype NetConf struct {\n\ttypes.NetConf\n\tBrName string `json:\"bridge\"`\n\tIsGW bool `json:\"isGateway\"`\n\tIPMasq bool `json:\"ipMasq\"`\n\tMTU int `json:\"mtu\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package messenger\n\nimport (\n\t\"errors\"\n\t\"github.com\/maciekmm\/messenger-platform-go-sdk\/template\"\n)\n\ntype SendMessage struct {\n\tText string `json:\"text,omiempty\"`\n\tAttachment *Attachment `json:\"attachment,omitempty\"`\n}\n\n\/\/ Recipient describes the person who will receive the message\n\/\/ Either ID or PhoneNumber has to be set\ntype Recipient struct {\n\tID string `json:\"id,omitempty\"`\n\tPhoneNumber string `json:\"phone_number,omitempty\"`\n}\n\n\/\/ NotificationType describes the behavior phone will execute after receiving the message\ntype NotificationType string\n\nconst (\n\t\/\/ NotificationTypeRegular will emit a sound\/vibration and a phone notification\n\tNotificationTypeRegular NotificationType = \"REGULAR\"\n\t\/\/ NotificationTypeSilentPush will just emit a phone notification\n\tNotificationTypeSilentPush NotificationType = \"SILENT_PUSH\"\n\t\/\/ NotificationTypeNoPush will not emit sound\/vibration nor a phone notification\n\tNotificationTypeNoPush NotificationType = \"NO_PUSH\"\n)\n\ntype MessageQuery struct {\n\tRecipient Recipient `json:\"recipient\"`\n\tMessage SendMessage `json:\"message\"`\n\tNotificationType NotificationType `json:\"notification_type,omitempty\"`\n}\n\nfunc (mq *MessageQuery) RecipientID(recipientID string) error {\n\tif mq.Recipient.PhoneNumber != \"\" {\n\t\treturn errors.New(\"Only one user identification (phone or id) can be specified.\")\n\t}\n\tmq.Recipient.ID = recipientID\n\treturn nil\n}\n\nfunc (mq *MessageQuery) RecipientPhoneNumber(phoneNumber string, button template.Button) error {\n\tif mq.Recipient.ID != \"\" {\n\t\treturn errors.New(\"Only one user identification (phone or id) can be specified.\")\n\t}\n\tmq.Recipient.PhoneNumber = phoneNumber\n\treturn nil\n}\n\nfunc (mq *MessageQuery) Notification(notification NotificationType) *MessageQuery {\n\tmq.NotificationType = notification\n\treturn mq\n}\n\nfunc (mq *MessageQuery) Text(text string) error {\n\tif mq.Message.Attachment != nil && mq.Message.Attachment.Type == AttachmentTypeTemplate {\n\t\treturn errors.New(\"Can't set both text and template.\")\n\t}\n\tmq.Message.Text = text;\n\treturn nil\n}\n\nfunc (mq *MessageQuery) resource(typ AttachmentType, url string) error {\n\tif mq.Message.Attachment.Payload != nil {\n\t\treturn errors.New(\"Attachment already specified.\")\n\t}\n\tmq.Message.Attachment.Type = typ\n\tmq.Message.Attachment.Payload = &Resource{URL:url}\n\treturn nil\n}\n\nfunc (mq *MessageQuery) Audio(url string) error {\n\treturn mq.resource(AttachmentTypeAudio, url)\n}\n\nfunc (mq *MessageQuery) Video(url string) error {\n\treturn mq.resource(AttachmentTypeVideo, url)\n}\n\nfunc (mq *MessageQuery) Image(url string) error {\n\treturn mq.resource(AttachmentTypeImage, url)\n}\n\nfunc (mq *MessageQuery) Template(tpl template.Template) error {\n\tif mq.Message.Attachment == nil {\n\t\tmq.Message.Attachment = &Attachment{}\n\t}\n\tif mq.Message.Attachment.Type != AttachmentTypeTemplate && mq.Message.Attachment.Payload != nil {\n\t\treturn errors.New(\"Non-template attachment already specified.\")\n\t}\n\n\tif mq.Message.Attachment.Payload == nil {\n\t\tmq.Message.Attachment.Type = AttachmentTypeTemplate\n\t\tmq.Message.Attachment.Payload = &template.Payload{}\n\t}\n\n\tpayload := mq.Message.Attachment.Payload.(*template.Payload)\n\n\tfor _,v := range payload.Elements {\n\t\tif v.Type() != tpl.Type() {\n\t\t\treturn errors.New(\"All templates have to have thesame type.\")\n\t\t}\n\t}\n\n\tpayload.Elements = append(payload.Elements, tpl)\n\treturn nil\n}\n\n<commit_msg>Added some nil checks<commit_after>package messenger\n\nimport (\n\t\"errors\"\n\t\"github.com\/maciekmm\/messenger-platform-go-sdk\/template\"\n)\n\ntype SendMessage struct {\n\tText string `json:\"text,omiempty\"`\n\tAttachment *Attachment `json:\"attachment,omitempty\"`\n}\n\n\/\/ Recipient describes the person who will receive the message\n\/\/ Either ID or PhoneNumber has to be set\ntype Recipient struct {\n\tID string `json:\"id,omitempty\"`\n\tPhoneNumber string `json:\"phone_number,omitempty\"`\n}\n\n\/\/ NotificationType describes the behavior phone will execute after receiving the message\ntype NotificationType string\n\nconst (\n\t\/\/ NotificationTypeRegular will emit a sound\/vibration and a phone notification\n\tNotificationTypeRegular NotificationType = \"REGULAR\"\n\t\/\/ NotificationTypeSilentPush will just emit a phone notification\n\tNotificationTypeSilentPush NotificationType = \"SILENT_PUSH\"\n\t\/\/ NotificationTypeNoPush will not emit sound\/vibration nor a phone notification\n\tNotificationTypeNoPush NotificationType = \"NO_PUSH\"\n)\n\ntype MessageQuery struct {\n\tRecipient Recipient `json:\"recipient\"`\n\tMessage SendMessage `json:\"message\"`\n\tNotificationType NotificationType `json:\"notification_type,omitempty\"`\n}\n\nfunc (mq *MessageQuery) RecipientID(recipientID string) error {\n\tif mq.Recipient.PhoneNumber != \"\" {\n\t\treturn errors.New(\"Only one user identification (phone or id) can be specified.\")\n\t}\n\tmq.Recipient.ID = recipientID\n\treturn nil\n}\n\nfunc (mq *MessageQuery) RecipientPhoneNumber(phoneNumber string, button template.Button) error {\n\tif mq.Recipient.ID != \"\" {\n\t\treturn errors.New(\"Only one user identification (phone or id) can be specified.\")\n\t}\n\tmq.Recipient.PhoneNumber = phoneNumber\n\treturn nil\n}\n\nfunc (mq *MessageQuery) Notification(notification NotificationType) *MessageQuery {\n\tmq.NotificationType = notification\n\treturn mq\n}\n\nfunc (mq *MessageQuery) Text(text string) error {\n\tif mq.Message.Attachment == nil {\n\t\tmq.Message.Attachment = &Attachment{}\n\t}\n\tif mq.Message.Attachment != nil && mq.Message.Attachment.Type == AttachmentTypeTemplate {\n\t\treturn errors.New(\"Can't set both text and template.\")\n\t}\n\tmq.Message.Text = text;\n\treturn nil\n}\n\nfunc (mq *MessageQuery) resource(typ AttachmentType, url string) error {\n\tif mq.Message.Attachment == nil {\n\t\tmq.Message.Attachment = &Attachment{}\n\t}\n\tif mq.Message.Attachment.Payload != nil {\n\t\treturn errors.New(\"Attachment already specified.\")\n\t}\n\tmq.Message.Attachment.Type = typ\n\tmq.Message.Attachment.Payload = &Resource{URL:url}\n\treturn nil\n}\n\nfunc (mq *MessageQuery) Audio(url string) error {\n\treturn mq.resource(AttachmentTypeAudio, url)\n}\n\nfunc (mq *MessageQuery) Video(url string) error {\n\treturn mq.resource(AttachmentTypeVideo, url)\n}\n\nfunc (mq *MessageQuery) Image(url string) error {\n\treturn mq.resource(AttachmentTypeImage, url)\n}\n\nfunc (mq *MessageQuery) Template(tpl template.Template) error {\n\tif mq.Message.Attachment == nil {\n\t\tmq.Message.Attachment = &Attachment{}\n\t}\n\tif mq.Message.Attachment.Type != AttachmentTypeTemplate && mq.Message.Attachment.Payload != nil {\n\t\treturn errors.New(\"Non-template attachment already specified.\")\n\t}\n\n\tif mq.Message.Attachment.Payload == nil {\n\t\tmq.Message.Attachment.Type = AttachmentTypeTemplate\n\t\tmq.Message.Attachment.Payload = &template.Payload{}\n\t}\n\n\tpayload := mq.Message.Attachment.Payload.(*template.Payload)\n\n\tfor _,v := range payload.Elements {\n\t\tif v.Type() != tpl.Type() {\n\t\t\treturn errors.New(\"All templates have to have thesame type.\")\n\t\t}\n\t}\n\n\tpayload.Elements = append(payload.Elements, tpl)\n\treturn nil\n}\n\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ Package testfiles provides a wrapper around various optional ways\n\/\/ of retrieving additional files needed during a test run:\n\/\/ - builtin bindata\n\/\/ - filesystem access\n\/\/\n\/\/ Because it is a is self-contained package, it can be used by\n\/\/ test\/e2e\/framework and test\/e2e\/manifest without creating\n\/\/ a circular dependency.\npackage testfiles\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strings\"\n)\n\nvar filesources []FileSource\n\n\/\/ AddFileSource registers another provider for files that may be\n\/\/ needed at runtime. Should be called during initialization of a test\n\/\/ binary.\nfunc AddFileSource(filesource FileSource) {\n\tfilesources = append(filesources, filesource)\n}\n\n\/\/ FileSource implements one way of retrieving test file content. For\n\/\/ example, one file source could read from the original source code\n\/\/ file tree, another from bindata compiled into a test executable.\ntype FileSource interface {\n\t\/\/ ReadTestFile retrieves the content of a file that gets maintained\n\t\/\/ alongside a test's source code. Files are identified by the\n\t\/\/ relative path inside the repository containing the tests, for\n\t\/\/ example \"cluster\/gce\/upgrade.sh\" inside kubernetes\/kubernetes.\n\t\/\/\n\t\/\/ When the file is not found, a nil slice is returned. An error is\n\t\/\/ returned for all fatal errors.\n\tReadTestFile(filePath string) ([]byte, error)\n\n\t\/\/ DescribeFiles returns a multi-line description of which\n\t\/\/ files are available via this source. It is meant to be\n\t\/\/ used as part of the error message when a file cannot be\n\t\/\/ found.\n\tDescribeFiles() string\n}\n\n\/\/ Fail is an error handler function with the same prototype and\n\/\/ semantic as ginkgo.Fail. Typically ginkgo.Fail is what callers\n\/\/ of ReadOrDie and Exists will pass. This way this package\n\/\/ avoids depending on Ginkgo.\ntype Fail func(failure string, callerSkip ...int)\n\n\/\/ ReadOrDie tries to retrieve the desired file content from\n\/\/ one of the registered file sources. In contrast to FileSource, it\n\/\/ will either return a valid slice or abort the test by calling the fatal function,\n\/\/ i.e. the caller doesn't have to implement error checking.\nfunc ReadOrDie(filePath string, fail Fail) []byte {\n\tdata, err := Read(filePath)\n\tif err != nil {\n\t\tfail(err.Error(), 1)\n\t}\n\treturn data\n}\n\n\/\/ Read tries to retrieve the desired file content from\n\/\/ one of the registered file sources.\nfunc Read(filePath string) ([]byte, error) {\n\tif len(filesources) == 0 {\n\t\treturn nil, fmt.Errorf(\"no file sources registered (yet?), cannot retrieve test file %s\", filePath)\n\t}\n\tfor _, filesource := range filesources {\n\t\tdata, err := filesource.ReadTestFile(filePath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"fatal error retrieving test file %s: %s\", filePath, err)\n\t\t}\n\t\tif data != nil {\n\t\t\treturn data, nil\n\t\t}\n\t}\n\t\/\/ Here we try to generate an error that points test authors\n\t\/\/ or users in the right direction for resolving the problem.\n\terror := fmt.Sprintf(\"Test file %q was not found.\\n\", filePath)\n\tfor _, filesource := range filesources {\n\t\terror += filesource.DescribeFiles()\n\t\terror += \"\\n\"\n\t}\n\treturn nil, errors.New(error)\n}\n\n\/\/ Exists checks whether a file could be read. Unexpected errors\n\/\/ are handled by calling the fail function, which then should\n\/\/ abort the current test.\nfunc Exists(filePath string, fail Fail) bool {\n\tfor _, filesource := range filesources {\n\t\tdata, err := filesource.ReadTestFile(filePath)\n\t\tif err != nil {\n\t\t\tfail(fmt.Sprintf(\"fatal error looking for test file %s: %s\", filePath, err), 1)\n\t\t}\n\t\tif data != nil {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ RootFileSource looks for files relative to a root directory.\ntype RootFileSource struct {\n\tRoot string\n}\n\n\/\/ ReadTestFile looks for the file relative to the configured\n\/\/ root directory.\nfunc (r RootFileSource) ReadTestFile(filePath string) ([]byte, error) {\n\tfullPath := filepath.Join(r.Root, filePath)\n\tdata, err := ioutil.ReadFile(fullPath)\n\tif os.IsNotExist(err) {\n\t\t\/\/ Not an error (yet), some other provider may have the file.\n\t\treturn nil, nil\n\t}\n\treturn data, err\n}\n\n\/\/ DescribeFiles explains that it looks for files inside a certain\n\/\/ root directory.\nfunc (r RootFileSource) DescribeFiles() string {\n\tdescription := fmt.Sprintf(\"Test files are expected in %q\", r.Root)\n\tif !path.IsAbs(r.Root) {\n\t\t\/\/ The default in test_context.go is the relative path\n\t\t\/\/ ..\/..\/, which doesn't really help locating the\n\t\t\/\/ actual location. Therefore we add also the absolute\n\t\t\/\/ path if necessary.\n\t\tabs, err := filepath.Abs(r.Root)\n\t\tif err == nil {\n\t\t\tdescription += fmt.Sprintf(\" = %q\", abs)\n\t\t}\n\t}\n\tdescription += \".\"\n\treturn description\n}\n\n\/\/ BindataFileSource handles files stored in a package generated with bindata.\ntype BindataFileSource struct {\n\tAsset func(string) ([]byte, error)\n\tAssetNames func() []string\n}\n\n\/\/ ReadTestFile looks for an asset with the given path.\nfunc (b BindataFileSource) ReadTestFile(filePath string) ([]byte, error) {\n\tfileBytes, err := b.Asset(filePath)\n\tif err != nil {\n\t\t\/\/ It would be nice to have a better way to detect\n\t\t\/\/ \"not found\" errors :-\/\n\t\tif strings.HasSuffix(err.Error(), \"not found\") {\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\treturn fileBytes, nil\n}\n\n\/\/ DescribeFiles explains about gobindata and then lists all available files.\nfunc (b BindataFileSource) DescribeFiles() string {\n\tvar lines []string\n\tlines = append(lines, \"The following files are built into the test executable via gobindata. For questions on maintaining gobindata, contact the sig-testing group.\")\n\tassets := b.AssetNames()\n\tsort.Strings(assets)\n\tlines = append(lines, assets...)\n\tdescription := strings.Join(lines, \"\\n \")\n\treturn description\n}\n<commit_msg>e2e\/framework: absolute test file paths<commit_after>\/*\nCopyright 2018 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ Package testfiles provides a wrapper around various optional ways\n\/\/ of retrieving additional files needed during a test run:\n\/\/ - builtin bindata\n\/\/ - filesystem access\n\/\/\n\/\/ Because it is a is self-contained package, it can be used by\n\/\/ test\/e2e\/framework and test\/e2e\/manifest without creating\n\/\/ a circular dependency.\npackage testfiles\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strings\"\n)\n\nvar filesources []FileSource\n\n\/\/ AddFileSource registers another provider for files that may be\n\/\/ needed at runtime. Should be called during initialization of a test\n\/\/ binary.\nfunc AddFileSource(filesource FileSource) {\n\tfilesources = append(filesources, filesource)\n}\n\n\/\/ FileSource implements one way of retrieving test file content. For\n\/\/ example, one file source could read from the original source code\n\/\/ file tree, another from bindata compiled into a test executable.\ntype FileSource interface {\n\t\/\/ ReadTestFile retrieves the content of a file that gets maintained\n\t\/\/ alongside a test's source code. Files are identified by the\n\t\/\/ relative path inside the repository containing the tests, for\n\t\/\/ example \"cluster\/gce\/upgrade.sh\" inside kubernetes\/kubernetes.\n\t\/\/\n\t\/\/ When the file is not found, a nil slice is returned. An error is\n\t\/\/ returned for all fatal errors.\n\tReadTestFile(filePath string) ([]byte, error)\n\n\t\/\/ DescribeFiles returns a multi-line description of which\n\t\/\/ files are available via this source. It is meant to be\n\t\/\/ used as part of the error message when a file cannot be\n\t\/\/ found.\n\tDescribeFiles() string\n}\n\n\/\/ Fail is an error handler function with the same prototype and\n\/\/ semantic as ginkgo.Fail. Typically ginkgo.Fail is what callers\n\/\/ of ReadOrDie and Exists will pass. This way this package\n\/\/ avoids depending on Ginkgo.\ntype Fail func(failure string, callerSkip ...int)\n\n\/\/ ReadOrDie tries to retrieve the desired file content from\n\/\/ one of the registered file sources. In contrast to FileSource, it\n\/\/ will either return a valid slice or abort the test by calling the fatal function,\n\/\/ i.e. the caller doesn't have to implement error checking.\nfunc ReadOrDie(filePath string, fail Fail) []byte {\n\tdata, err := Read(filePath)\n\tif err != nil {\n\t\tfail(err.Error(), 1)\n\t}\n\treturn data\n}\n\n\/\/ Read tries to retrieve the desired file content from\n\/\/ one of the registered file sources.\nfunc Read(filePath string) ([]byte, error) {\n\tif len(filesources) == 0 {\n\t\treturn nil, fmt.Errorf(\"no file sources registered (yet?), cannot retrieve test file %s\", filePath)\n\t}\n\tfor _, filesource := range filesources {\n\t\tdata, err := filesource.ReadTestFile(filePath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"fatal error retrieving test file %s: %s\", filePath, err)\n\t\t}\n\t\tif data != nil {\n\t\t\treturn data, nil\n\t\t}\n\t}\n\t\/\/ Here we try to generate an error that points test authors\n\t\/\/ or users in the right direction for resolving the problem.\n\terror := fmt.Sprintf(\"Test file %q was not found.\\n\", filePath)\n\tfor _, filesource := range filesources {\n\t\terror += filesource.DescribeFiles()\n\t\terror += \"\\n\"\n\t}\n\treturn nil, errors.New(error)\n}\n\n\/\/ Exists checks whether a file could be read. Unexpected errors\n\/\/ are handled by calling the fail function, which then should\n\/\/ abort the current test.\nfunc Exists(filePath string, fail Fail) bool {\n\tfor _, filesource := range filesources {\n\t\tdata, err := filesource.ReadTestFile(filePath)\n\t\tif err != nil {\n\t\t\tfail(fmt.Sprintf(\"fatal error looking for test file %s: %s\", filePath, err), 1)\n\t\t}\n\t\tif data != nil {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ RootFileSource looks for files relative to a root directory.\ntype RootFileSource struct {\n\tRoot string\n}\n\n\/\/ ReadTestFile looks for the file relative to the configured\n\/\/ root directory. If the path is already absolute, for example\n\/\/ in a test that has its own method of determining where\n\/\/ files are, then the path will be used directly.\nfunc (r RootFileSource) ReadTestFile(filePath string) ([]byte, error) {\n\tvar fullPath string\n\tif path.IsAbs(filePath) {\n\t\tfullPath = filePath\n\t} else {\n\t\tfullPath = filepath.Join(r.Root, filePath)\n\t}\n\tdata, err := ioutil.ReadFile(fullPath)\n\tif os.IsNotExist(err) {\n\t\t\/\/ Not an error (yet), some other provider may have the file.\n\t\treturn nil, nil\n\t}\n\treturn data, err\n}\n\n\/\/ DescribeFiles explains that it looks for files inside a certain\n\/\/ root directory.\nfunc (r RootFileSource) DescribeFiles() string {\n\tdescription := fmt.Sprintf(\"Test files are expected in %q\", r.Root)\n\tif !path.IsAbs(r.Root) {\n\t\t\/\/ The default in test_context.go is the relative path\n\t\t\/\/ ..\/..\/, which doesn't really help locating the\n\t\t\/\/ actual location. Therefore we add also the absolute\n\t\t\/\/ path if necessary.\n\t\tabs, err := filepath.Abs(r.Root)\n\t\tif err == nil {\n\t\t\tdescription += fmt.Sprintf(\" = %q\", abs)\n\t\t}\n\t}\n\tdescription += \".\"\n\treturn description\n}\n\n\/\/ BindataFileSource handles files stored in a package generated with bindata.\ntype BindataFileSource struct {\n\tAsset func(string) ([]byte, error)\n\tAssetNames func() []string\n}\n\n\/\/ ReadTestFile looks for an asset with the given path.\nfunc (b BindataFileSource) ReadTestFile(filePath string) ([]byte, error) {\n\tfileBytes, err := b.Asset(filePath)\n\tif err != nil {\n\t\t\/\/ It would be nice to have a better way to detect\n\t\t\/\/ \"not found\" errors :-\/\n\t\tif strings.HasSuffix(err.Error(), \"not found\") {\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\treturn fileBytes, nil\n}\n\n\/\/ DescribeFiles explains about gobindata and then lists all available files.\nfunc (b BindataFileSource) DescribeFiles() string {\n\tvar lines []string\n\tlines = append(lines, \"The following files are built into the test executable via gobindata. For questions on maintaining gobindata, contact the sig-testing group.\")\n\tassets := b.AssetNames()\n\tsort.Strings(assets)\n\tlines = append(lines, assets...)\n\tdescription := strings.Join(lines, \"\\n \")\n\treturn description\n}\n<|endoftext|>"} {"text":"<commit_before>package adhandler\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"time\"\n\t\"math\/rand\"\n\t\"adserver\"\n\t\"strings\"\n\t\"fmt\"\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"github.com\/satori\/go.uuid\"\n)\n\nfunc SearchHandler(w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\treq := new(adserver.Request)\n\t\/\/ slot_id\n\tif len(r.Form[\"slot_id\"]) > 0 {\n\t\tslotId, _ := strconv.ParseUint(r.Form[\"slot_id\"][0], 10, 32)\n\t\treq.SlotId = uint32(slotId)\n\t}\n\t\/\/ ad_num\n\tif len(r.Form[\"ad_num\"]) > 0 {\n\t\tadNum, _ := strconv.ParseUint(r.Form[\"ad_num\"][0], 10, 32)\n\t\treq.AdNum = uint32(adNum)\n\t}\n\t\/\/ ip\n\tif len(r.Form[\"ip\"]) > 0 {\n\t\treq.Ip = r.Form[\"ip\"][0]\n\t}\n\t\/\/ device_id\n\tif len(r.Form[\"device_id\"]) > 0 {\n\t\treq.DeviceId = r.Form[\"device_id\"][0]\n\t}\n\t\/\/ os\n\tif len(r.Form[\"os\"]) > 0 {\n\t\tos, _ := strconv.ParseUint(r.Form[\"os\"][0], 10, 32)\n\t\treq.Os = uint32(os)\n\t}\n\t\/\/ os_version\n\tif len(r.Form[\"os_version\"]) > 0 {\n\t\treq.OsVersion = r.Form[\"os_version\"][0]\n\t}\n\n\t\/\/ searchId\n\treq.SearchId = uuid.NewV4().String()\n\n\tadData := adserver.AdDictObject.GetCurrentAdData()\n\t\/\/ search by request ip\n\tvar unitIdList1 []uint32\n\tvar exist1 bool\n\tlocationInfo := adserver.SearchLocationByIp(req.Ip)\n\tif locationInfo != nil {\n\t\tcountry := locationInfo.Country\n\t\tcity := locationInfo.City\n\t\tadserver.AdServerLog.Debug(fmt.Sprintf(\n\t\t\t\"ip=%s country=%s city=%s\\n\", req.Ip, country, city))\n\t\tkey := strings.ToLower(country) + \"_\" + strings.ToLower(city)\n\t\tunitIdList1, exist1 = adData.LocationUnitMap[key]\n\t}\n\t\/\/ search by CN_ALL\n\tkey := \"cn_all\"\n\tunitIdList2, exist2 := adData.LocationUnitMap[key]\n\t\/\/ merge two unit id list\n\tunitNum := 0\n\tif exist1 {\n\t\tunitNum += len(unitIdList1)\n\t}\n\tif exist2 {\n\t\tunitNum += len(unitIdList2)\n\t}\n\tunitIdList := make([]uint32, unitNum)\n\tif exist1 && unitIdList1 != nil {\n\t\tcopy(unitIdList, unitIdList1)\n\t}\n\tif exist2 && unitIdList2 != nil {\n\t\tcopy(unitIdList, unitIdList2)\n\t}\n\n\t\/\/ select one from unit id list\n\tvar res = &adserver.Response{}\n\tif unitIdList != nil {\n\t\tunitNum = len(unitIdList)\n\t\trandom := rand.New(rand.NewSource(time.Now().UnixNano()))\n\t\trandIndex := random.Intn(unitNum)\n\t\tunitId := unitIdList[randIndex]\n\t\tunitInfo := adData.AdUnitMap[unitId]\n\t\tadCreative := adData.AdCreativeMap[unitInfo.CreativeId]\n\n\t\tadInfo := adserver.AdInfo{\n\t\t\tUnitId: unitInfo.UnitId,\n\t\t\tCreativeId: adCreative.CreativeId,\n\t\t\tTitle: adCreative.Title,\n\t\t\tDescription: adCreative.Description,\n\t\t\tAppPackageName: adCreative.AppPackageName,\n\t\t\tIconImageUrl: adCreative.IconImageUrl,\n\t\t\tMainImageUrl: adCreative.MainImageUrl,\n\t\t\tClickUrl: adCreative.ClickUrl,\n\t\t}\n\t\tadInfo.ImpressionTrackUrl = buildImpressionTrackUrl(req, adInfo)\n\t\tadInfo.ClickTrackUrl = buildClickTrackUrl(req, adInfo)\n\t\tadInfo.ConversionTrackUrl = buildConversionTrackUrl(req, adInfo)\n\t\tadList := make([]adserver.AdInfo, 0, 1)\n\t\tadList = append(adList, adInfo)\n\t\tres.ResCode = 0\n\t\tres.AdList = adList\n\t\tadserver.SearchLog.Info(fmt.Sprintf(\n\t\t\t\"searchId=%s slotId=%d adNum=%d iP=%s deviceId=%s oS=%d osVersion=%s \" +\n\t\t\t\"unitId=%d creativeId=%d\\n\",\n\t\t\treq.SearchId, req.SlotId, req.AdNum, req.Ip, req.DeviceId, req.Os, req.OsVersion,\n\t\t\tadInfo.UnitId, adInfo.CreativeId))\n\t} else {\n\t\tres.ResCode = 0\n\t\tres.AdList = make([]adserver.AdInfo, 0, 1)\n\t\tadserver.SearchLog.Info(fmt.Sprintf(\n\t\t\t\"searchId=%s slotId=%d adNum=%d iP=%s deviceId=%s oS=%d osVersion=%s resNum=0\\n\",\n\t\t\treq.SearchId, req.SlotId, req.AdNum, req.Ip, req.DeviceId, req.Os, req.OsVersion))\n\t}\n\n\tresBytes, _ := json.Marshal(res)\n\tw.Write(resBytes)\n}\n\nfunc buildImpressionTrackUrl(req *adserver.Request, adInfo adserver.AdInfo) string {\n\tvar paramBuf bytes.Buffer\n\tparamBuf.WriteString(fmt.Sprintf(\"search_id=%s\", req.SearchId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&slot_id=%d\", req.SlotId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&ip=%s\", req.Ip))\n\tparamBuf.WriteString(fmt.Sprintf(\"&device_id=%s\", req.DeviceId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&os=%d\", req.Os))\n\tparamBuf.WriteString(fmt.Sprintf(\"&os_version=%s\", req.OsVersion))\n\tparamBuf.WriteString(fmt.Sprintf(\"&unit_id=%d\", adInfo.UnitId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&creative_id=%d\", adInfo.CreativeId))\n\tparamEncoded := base64.StdEncoding.EncodeToString(paramBuf.Bytes())\n\timpressionTrackUrl := fmt.Sprintf(\"%s?i=%s\",\n\t\tadserver.GlobalConfObject.ImpressionTrackUrlPrefix, paramEncoded)\n\treturn impressionTrackUrl\n}\n\nfunc buildClickTrackUrl(req *adserver.Request, adInfo adserver.AdInfo) string {\n\tvar paramBuf bytes.Buffer\n\tparamBuf.WriteString(fmt.Sprintf(\"search_id=%s\", req.SearchId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&slot_id=%d\", req.SlotId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&ip=%s\", req.Ip))\n\tparamBuf.WriteString(fmt.Sprintf(\"&device_id=%s\", req.DeviceId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&os=%d\", req.Os))\n\tparamBuf.WriteString(fmt.Sprintf(\"&os_version=%s\", req.OsVersion))\n\tparamBuf.WriteString(fmt.Sprintf(\"&unit_id=%d\", adInfo.UnitId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&creative_id=%d\", adInfo.CreativeId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&click_url=%s\", adInfo.ClickUrl))\n\tparamEncoded := base64.StdEncoding.EncodeToString(paramBuf.Bytes())\n\timpressionTrackUrl := fmt.Sprintf(\"%s?i=%s\",\n\t\tadserver.GlobalConfObject.ClickTrackUrlPrefix, paramEncoded)\n\treturn impressionTrackUrl\n}\n\nfunc buildConversionTrackUrl(req *adserver.Request, adInfo adserver.AdInfo) string {\n\tvar paramBuf bytes.Buffer\n\tparamBuf.WriteString(fmt.Sprintf(\"search_id=%s\", req.SearchId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&slot_id=%d\", req.SlotId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&ip=%s\", req.Ip))\n\tparamBuf.WriteString(fmt.Sprintf(\"&device_id=%s\", req.DeviceId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&os=%d\", req.Os))\n\tparamBuf.WriteString(fmt.Sprintf(\"&os_version=%s\", req.OsVersion))\n\tparamBuf.WriteString(fmt.Sprintf(\"&unit_id=%d\", adInfo.UnitId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&creative_id=%d\", adInfo.CreativeId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&click_url=%s\", adInfo.ClickUrl))\n\tparamEncoded := base64.StdEncoding.EncodeToString(paramBuf.Bytes())\n\tconversionTrackUrl := fmt.Sprintf(\"%s?i=%s\",\n\t\tadserver.GlobalConfObject.ConversionTrackUrlPrefix, paramEncoded)\n\treturn conversionTrackUrl\n}\n\n<commit_msg>add the function of returning multiple adinfo<commit_after>package adhandler\n\nimport (\n\t\"encoding\/json\"\n\t\"net\/http\"\n\t\"strconv\"\n\t\"time\"\n\t\"math\/rand\"\n\t\"adserver\"\n\t\"strings\"\n\t\"fmt\"\n\t\"bytes\"\n\t\"encoding\/base64\"\n\t\"github.com\/satori\/go.uuid\"\n)\n\nfunc SearchHandler(w http.ResponseWriter, r *http.Request) {\n\tr.ParseForm()\n\treq := new(adserver.Request)\n\t\/\/ slot_id\n\tif len(r.Form[\"slot_id\"]) > 0 {\n\t\tslotId, _ := strconv.ParseUint(r.Form[\"slot_id\"][0], 10, 32)\n\t\treq.SlotId = uint32(slotId)\n\t}\n\t\/\/ ad_num\n\tvar reqAdNum int\n\tif len(r.Form[\"ad_num\"]) > 0 {\n\t\tadNum, _ := strconv.ParseUint(r.Form[\"ad_num\"][0], 10, 32)\n\t\treqAdNum, _ = strconv.Atoi(r.Form[\"ad_num\"][0])\n\t\treq.AdNum = uint32(adNum)\n\t}\n\t\/\/ ip\n\tif len(r.Form[\"ip\"]) > 0 {\n\t\treq.Ip = r.Form[\"ip\"][0]\n\t}\n\t\/\/ device_id\n\tif len(r.Form[\"device_id\"]) > 0 {\n\t\treq.DeviceId = r.Form[\"device_id\"][0]\n\t}\n\t\/\/ os\n\tif len(r.Form[\"os\"]) > 0 {\n\t\tos, _ := strconv.ParseUint(r.Form[\"os\"][0], 10, 32)\n\t\treq.Os = uint32(os)\n\t}\n\t\/\/ os_version\n\tif len(r.Form[\"os_version\"]) > 0 {\n\t\treq.OsVersion = r.Form[\"os_version\"][0]\n\t}\n\n\t\/\/ searchId\n\treq.SearchId = uuid.NewV4().String()\n\n\tadData := adserver.AdDictObject.GetCurrentAdData()\n\t\/\/ search by request ip\n\tvar unitIdList1 []uint32\n\tvar exist1 bool\n\tlocationInfo := adserver.SearchLocationByIp(req.Ip)\n\tif locationInfo != nil {\n\t\tcountry := locationInfo.Country\n\t\tcity := locationInfo.City\n\t\tadserver.AdServerLog.Debug(fmt.Sprintf(\n\t\t\t\"ip=%s country=%s city=%s\\n\", req.Ip, country, city))\n\t\tkey := strings.ToLower(country) + \"_\" + strings.ToLower(city)\n\t\tunitIdList1, exist1 = adData.LocationUnitMap[key]\n\t}\n\t\/\/ search by CN_ALL\n\tkey := \"cn_all\"\n\tunitIdList2, exist2 := adData.LocationUnitMap[key]\n\t\/\/ merge two unit id list\n\tunitNum := 0\n\tif exist1 {\n\t\tunitNum += len(unitIdList1)\n\t}\n\tif exist2 {\n\t\tunitNum += len(unitIdList2)\n\t}\n\tunitIdList := make([]uint32, unitNum)\n\tif exist1 && unitIdList1 != nil {\n\t\tcopy(unitIdList, unitIdList1)\n\t}\n\tif exist2 && unitIdList2 != nil {\n\t\tcopy(unitIdList, unitIdList2)\n\t}\n \n\t\/\/ select one from unit id list\n\tvar res = &adserver.Response{}\n\tadList := make([]adserver.AdInfo, 0, 1)\n\tunitIdMap := make(map[int] bool)\n\tvar unitIdsStr, creativeIdsStr string \n\tif unitIdList != nil && reqAdNum >= 1 {\n\t\tunitNum = len(unitIdList)\n\t\trandom := rand.New(rand.NewSource(time.Now().UnixNano()))\n\t\tfor i := 0; i < unitNum && i < reqAdNum; i++ {\n\t\t\trandIndex := random.Intn(unitNum)\n\t\t\tif unitIdMap[randIndex] {\n\t\t\t\ti--\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tunitIdMap[randIndex] = true\n\t\t\tunitId := unitIdList[randIndex]\n\t\t\tunitInfo := adData.AdUnitMap[unitId]\n\t\t\tadCreative := adData.AdCreativeMap[unitInfo.CreativeId]\n\t\t\tadInfo := adserver.AdInfo{\n\t\t\t\tUnitId: unitInfo.UnitId,\n\t\t\t\tCreativeId: adCreative.CreativeId,\n\t\t\t\tTitle: adCreative.Title,\n\t\t\t\tDescription: adCreative.Description,\n\t\t\t\tAppPackageName: adCreative.AppPackageName,\n\t\t\t\tIconImageUrl: adCreative.IconImageUrl,\n\t\t\t\tMainImageUrl: adCreative.MainImageUrl,\n\t\t\t\tClickUrl: adCreative.ClickUrl,\n\t\t\t}\n\t\t\tadInfo.ImpressionTrackUrl = buildImpressionTrackUrl(req, adInfo)\n\t\t\tadInfo.ClickTrackUrl = buildClickTrackUrl(req, adInfo)\n\t\t\tadInfo.ConversionTrackUrl = buildConversionTrackUrl(req, adInfo)\n\t\t\tadList = append(adList, adInfo)\n\t\t\tif i == unitNum - 1 || i == reqAdNum - 1 {\n\t\t\t\tunitIdsStr += fmt.Sprint(adInfo.UnitId)\n\t\t\t\tcreativeIdsStr += fmt.Sprint(adInfo.CreativeId)\n\t\t\t} else {\n\t\t\t\tunitIdsStr += fmt.Sprint(adInfo.UnitId) + \",\"\n\t\t\t\tcreativeIdsStr += fmt.Sprint(adInfo.CreativeId) + \",\"\n\t\t\t}\n\t\t}\n\t\tres.ResCode = 0\n\t\tres.AdList = adList\n\t} else {\n\t\tres.ResCode = 0\n\t\tres.AdList = make([]adserver.AdInfo, 0, 1)\n\t}\n adserver.SearchLog.Info(fmt.Sprintf(\n\t\t\t\"searchId=%s slotId=%d adNum=%d iP=%s deviceId=%s oS=%d osVersion=%s \" +\n\t\t\t\"unitId=%s creativeId=%s\\n\",\n\t\t\treq.SearchId, req.SlotId, req.AdNum, req.Ip, req.DeviceId, req.Os, req.OsVersion,\n\t\t\tunitIdsStr, creativeIdsStr))\n\n\tresBytes, _ := json.Marshal(res)\n\tw.Write(resBytes)\n}\n\nfunc buildImpressionTrackUrl(req *adserver.Request, adInfo adserver.AdInfo) string {\n\tvar paramBuf bytes.Buffer\n\tparamBuf.WriteString(fmt.Sprintf(\"search_id=%s\", req.SearchId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&slot_id=%d\", req.SlotId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&ip=%s\", req.Ip))\n\tparamBuf.WriteString(fmt.Sprintf(\"&device_id=%s\", req.DeviceId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&os=%d\", req.Os))\n\tparamBuf.WriteString(fmt.Sprintf(\"&os_version=%s\", req.OsVersion))\n\tparamBuf.WriteString(fmt.Sprintf(\"&unit_id=%d\", adInfo.UnitId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&creative_id=%d\", adInfo.CreativeId))\n\tparamEncoded := base64.StdEncoding.EncodeToString(paramBuf.Bytes())\n\timpressionTrackUrl := fmt.Sprintf(\"%s?i=%s\",\n\t\tadserver.GlobalConfObject.ImpressionTrackUrlPrefix, paramEncoded)\n\treturn impressionTrackUrl\n}\n\nfunc buildClickTrackUrl(req *adserver.Request, adInfo adserver.AdInfo) string {\n\tvar paramBuf bytes.Buffer\n\tparamBuf.WriteString(fmt.Sprintf(\"search_id=%s\", req.SearchId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&slot_id=%d\", req.SlotId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&ip=%s\", req.Ip))\n\tparamBuf.WriteString(fmt.Sprintf(\"&device_id=%s\", req.DeviceId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&os=%d\", req.Os))\n\tparamBuf.WriteString(fmt.Sprintf(\"&os_version=%s\", req.OsVersion))\n\tparamBuf.WriteString(fmt.Sprintf(\"&unit_id=%d\", adInfo.UnitId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&creative_id=%d\", adInfo.CreativeId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&click_url=%s\", adInfo.ClickUrl))\n\tparamEncoded := base64.StdEncoding.EncodeToString(paramBuf.Bytes())\n\timpressionTrackUrl := fmt.Sprintf(\"%s?i=%s\",\n\t\tadserver.GlobalConfObject.ClickTrackUrlPrefix, paramEncoded)\n\treturn impressionTrackUrl\n}\n\nfunc buildConversionTrackUrl(req *adserver.Request, adInfo adserver.AdInfo) string {\n\tvar paramBuf bytes.Buffer\n\tparamBuf.WriteString(fmt.Sprintf(\"search_id=%s\", req.SearchId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&slot_id=%d\", req.SlotId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&ip=%s\", req.Ip))\n\tparamBuf.WriteString(fmt.Sprintf(\"&device_id=%s\", req.DeviceId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&os=%d\", req.Os))\n\tparamBuf.WriteString(fmt.Sprintf(\"&os_version=%s\", req.OsVersion))\n\tparamBuf.WriteString(fmt.Sprintf(\"&unit_id=%d\", adInfo.UnitId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&creative_id=%d\", adInfo.CreativeId))\n\tparamBuf.WriteString(fmt.Sprintf(\"&click_url=%s\", adInfo.ClickUrl))\n\tparamEncoded := base64.StdEncoding.EncodeToString(paramBuf.Bytes())\n\tconversionTrackUrl := fmt.Sprintf(\"%s?i=%s\",\n\t\tadserver.GlobalConfObject.ConversionTrackUrlPrefix, paramEncoded)\n\treturn conversionTrackUrl\n}\n\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"context\"\n\t\"sort\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/bytom\/account\"\n\t\"github.com\/bytom\/common\"\n\t\"github.com\/bytom\/consensus\"\n\t\"github.com\/bytom\/crypto\/ed25519\/chainkd\"\n\t\"github.com\/bytom\/protocol\/vm\/vmutil\"\n)\n\n\/\/ POST \/create-account\nfunc (a *API) createAccount(ctx context.Context, ins struct {\n\tRootXPubs []chainkd.XPub `json:\"root_xpubs\"`\n\tQuorum int `json:\"quorum\"`\n\tAlias string `json:\"alias\"`\n}) Response {\n\tacc, err := a.wallet.AccountMgr.Create(ins.RootXPubs, ins.Quorum, ins.Alias)\n\tif err != nil {\n\t\treturn NewErrorResponse(err)\n\t}\n\n\tannotatedAccount := account.Annotated(acc)\n\tlog.WithField(\"account ID\", annotatedAccount.ID).Info(\"Created account\")\n\n\treturn NewSuccessResponse(annotatedAccount)\n}\n\n\/\/ AccountInfo is request struct for deleteAccount\ntype AccountInfo struct {\n\tInfo string `json:\"account_info\"`\n}\n\n\/\/ POST \/delete-account\nfunc (a *API) deleteAccount(ctx context.Context, in AccountInfo) Response {\n\tif err := a.wallet.AccountMgr.DeleteAccount(in.Info); err != nil {\n\t\treturn NewErrorResponse(err)\n\t}\n\treturn NewSuccessResponse(nil)\n}\n\ntype validateAddressResp struct {\n\tValid bool `json:\"valid\"`\n\tIsLocal bool `json:\"is_local\"`\n}\n\n\/\/ POST \/validate-address\nfunc (a *API) validateAddress(ctx context.Context, ins struct {\n\tAddress string `json:\"address\"`\n}) Response {\n\tresp := &validateAddressResp{\n\t\tValid: false,\n\t\tIsLocal: false,\n\t}\n\taddress, err := common.DecodeAddress(ins.Address, &consensus.ActiveNetParams)\n\tif err != nil {\n\t\treturn NewSuccessResponse(resp)\n\t}\n\n\tredeemContract := address.ScriptAddress()\n\tprogram := []byte{}\n\tswitch address.(type) {\n\tcase *common.AddressWitnessPubKeyHash:\n\t\tprogram, err = vmutil.P2WPKHProgram(redeemContract)\n\tcase *common.AddressWitnessScriptHash:\n\t\tprogram, err = vmutil.P2WSHProgram(redeemContract)\n\tdefault:\n\t\treturn NewSuccessResponse(resp)\n\t}\n\tif err != nil {\n\t\treturn NewSuccessResponse(resp)\n\t}\n\n\tresp.Valid = true\n\tresp.IsLocal = a.wallet.AccountMgr.IsLocalControlProgram(program)\n\treturn NewSuccessResponse(resp)\n}\n\ntype addressResp struct {\n\tAccountAlias string `json:\"account_alias\"`\n\tAccountID string `json:\"account_id\"`\n\tAddress string `json:\"address\"`\n\tChange bool `json:\"change\"`\n\tKeyIndex uint64 `json:\"-\"`\n}\n\n\/\/ SortByIndex implements sort.Interface for addressResp slices\ntype SortByIndex []addressResp\n\nfunc (a SortByIndex) Len() int { return len(a) }\nfunc (a SortByIndex) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a SortByIndex) Less(i, j int) bool { return a[i].KeyIndex < a[j].KeyIndex }\n\nfunc (a *API) listAddresses(ctx context.Context, ins struct {\n\tAccountID string `json:\"account_id\"`\n\tAccountAlias string `json:\"account_alias\"`\n\tFrom uint `json:\"from\"`\n\tCount uint `json:\"count\"`\n}) Response {\n\taccountID := ins.AccountID\n\tvar target *account.Account\n\tif ins.AccountAlias != \"\" {\n\t\tacc, err := a.wallet.AccountMgr.FindByAlias(ins.AccountAlias)\n\t\tif err != nil {\n\t\t\treturn NewErrorResponse(err)\n\t\t}\n\t\ttarget = acc\n\t} else {\n\t\tacc, err := a.wallet.AccountMgr.FindByID(accountID)\n\t\tif err != nil {\n\t\t\treturn NewErrorResponse(err)\n\t\t}\n\t\ttarget = acc\n\t}\n\n\tcps, err := a.wallet.AccountMgr.ListControlProgram()\n\tif err != nil {\n\t\treturn NewErrorResponse(err)\n\t}\n\n\taddresses := []addressResp{}\n\tfor _, cp := range cps {\n\t\tif cp.Address == \"\" || cp.AccountID != target.ID {\n\t\t\tcontinue\n\t\t}\n\t\taddresses = append(addresses, addressResp{\n\t\t\tAccountAlias: target.Alias,\n\t\t\tAccountID: cp.AccountID,\n\t\t\tAddress: cp.Address,\n\t\t\tChange: cp.Change,\n\t\t\tKeyIndex: cp.KeyIndex,\n\t\t})\n\t}\n\n\t\/\/ sort AddressResp by KeyIndex\n\tsort.Sort(SortByIndex(addresses))\n\tstart, end := getPageRange(len(addresses), ins.From, ins.Count)\n\treturn NewSuccessResponse(addresses[start:end])\n}\n\ntype minigAddressResp struct {\n\tMiningAddress string `json:\"mining_address\"`\n}\n\nfunc (a *API) getMiningAddress(ctx context.Context) Response {\n\tminingAddress, err := a.wallet.AccountMgr.GetMiningAddress()\n\tif err != nil {\n\t\treturn NewErrorResponse(err)\n\t}\n\treturn NewSuccessResponse(minigAddressResp{\n\t\tMiningAddress: miningAddress,\n\t})\n}\n\n\/\/ POST \/set-mining-address\nfunc (a *API) setMiningAddress(ctx context.Context, in struct {\n\tMiningAddress string `json:\"mining_address\"`\n}) Response {\n\tminingAddress, err := a.wallet.AccountMgr.SetMiningAddress(in.MiningAddress)\n\tif err != nil {\n\t\treturn NewErrorResponse(err)\n\t}\n\treturn NewSuccessResponse(minigAddressResp{\n\t\tMiningAddress: miningAddress,\n\t})\n}\n<commit_msg>add control-program for list-addresses (#1235)<commit_after>package api\n\nimport (\n\t\"context\"\n\t\"encoding\/hex\"\n\t\"sort\"\n\n\tlog \"github.com\/sirupsen\/logrus\"\n\n\t\"github.com\/bytom\/account\"\n\t\"github.com\/bytom\/common\"\n\t\"github.com\/bytom\/consensus\"\n\t\"github.com\/bytom\/crypto\/ed25519\/chainkd\"\n\t\"github.com\/bytom\/protocol\/vm\/vmutil\"\n)\n\n\/\/ POST \/create-account\nfunc (a *API) createAccount(ctx context.Context, ins struct {\n\tRootXPubs []chainkd.XPub `json:\"root_xpubs\"`\n\tQuorum int `json:\"quorum\"`\n\tAlias string `json:\"alias\"`\n}) Response {\n\tacc, err := a.wallet.AccountMgr.Create(ins.RootXPubs, ins.Quorum, ins.Alias)\n\tif err != nil {\n\t\treturn NewErrorResponse(err)\n\t}\n\n\tannotatedAccount := account.Annotated(acc)\n\tlog.WithField(\"account ID\", annotatedAccount.ID).Info(\"Created account\")\n\n\treturn NewSuccessResponse(annotatedAccount)\n}\n\n\/\/ AccountInfo is request struct for deleteAccount\ntype AccountInfo struct {\n\tInfo string `json:\"account_info\"`\n}\n\n\/\/ POST \/delete-account\nfunc (a *API) deleteAccount(ctx context.Context, in AccountInfo) Response {\n\tif err := a.wallet.AccountMgr.DeleteAccount(in.Info); err != nil {\n\t\treturn NewErrorResponse(err)\n\t}\n\treturn NewSuccessResponse(nil)\n}\n\ntype validateAddressResp struct {\n\tValid bool `json:\"valid\"`\n\tIsLocal bool `json:\"is_local\"`\n}\n\n\/\/ POST \/validate-address\nfunc (a *API) validateAddress(ctx context.Context, ins struct {\n\tAddress string `json:\"address\"`\n}) Response {\n\tresp := &validateAddressResp{\n\t\tValid: false,\n\t\tIsLocal: false,\n\t}\n\taddress, err := common.DecodeAddress(ins.Address, &consensus.ActiveNetParams)\n\tif err != nil {\n\t\treturn NewSuccessResponse(resp)\n\t}\n\n\tredeemContract := address.ScriptAddress()\n\tprogram := []byte{}\n\tswitch address.(type) {\n\tcase *common.AddressWitnessPubKeyHash:\n\t\tprogram, err = vmutil.P2WPKHProgram(redeemContract)\n\tcase *common.AddressWitnessScriptHash:\n\t\tprogram, err = vmutil.P2WSHProgram(redeemContract)\n\tdefault:\n\t\treturn NewSuccessResponse(resp)\n\t}\n\tif err != nil {\n\t\treturn NewSuccessResponse(resp)\n\t}\n\n\tresp.Valid = true\n\tresp.IsLocal = a.wallet.AccountMgr.IsLocalControlProgram(program)\n\treturn NewSuccessResponse(resp)\n}\n\ntype addressResp struct {\n\tAccountAlias string `json:\"account_alias\"`\n\tAccountID string `json:\"account_id\"`\n\tAddress string `json:\"address\"`\n\tControlProgram string `json:\"control_program\"`\n\tChange bool `json:\"change\"`\n\tKeyIndex uint64 `json:\"-\"`\n}\n\n\/\/ SortByIndex implements sort.Interface for addressResp slices\ntype SortByIndex []addressResp\n\nfunc (a SortByIndex) Len() int { return len(a) }\nfunc (a SortByIndex) Swap(i, j int) { a[i], a[j] = a[j], a[i] }\nfunc (a SortByIndex) Less(i, j int) bool { return a[i].KeyIndex < a[j].KeyIndex }\n\nfunc (a *API) listAddresses(ctx context.Context, ins struct {\n\tAccountID string `json:\"account_id\"`\n\tAccountAlias string `json:\"account_alias\"`\n\tFrom uint `json:\"from\"`\n\tCount uint `json:\"count\"`\n}) Response {\n\taccountID := ins.AccountID\n\tvar target *account.Account\n\tif ins.AccountAlias != \"\" {\n\t\tacc, err := a.wallet.AccountMgr.FindByAlias(ins.AccountAlias)\n\t\tif err != nil {\n\t\t\treturn NewErrorResponse(err)\n\t\t}\n\t\ttarget = acc\n\t} else {\n\t\tacc, err := a.wallet.AccountMgr.FindByID(accountID)\n\t\tif err != nil {\n\t\t\treturn NewErrorResponse(err)\n\t\t}\n\t\ttarget = acc\n\t}\n\n\tcps, err := a.wallet.AccountMgr.ListControlProgram()\n\tif err != nil {\n\t\treturn NewErrorResponse(err)\n\t}\n\n\taddresses := []addressResp{}\n\tfor _, cp := range cps {\n\t\tif cp.Address == \"\" || cp.AccountID != target.ID {\n\t\t\tcontinue\n\t\t}\n\t\taddresses = append(addresses, addressResp{\n\t\t\tAccountAlias: target.Alias,\n\t\t\tAccountID: cp.AccountID,\n\t\t\tAddress: cp.Address,\n\t\t\tControlProgram: hex.EncodeToString(cp.ControlProgram),\n\t\t\tChange: cp.Change,\n\t\t\tKeyIndex: cp.KeyIndex,\n\t\t})\n\t}\n\n\t\/\/ sort AddressResp by KeyIndex\n\tsort.Sort(SortByIndex(addresses))\n\tstart, end := getPageRange(len(addresses), ins.From, ins.Count)\n\treturn NewSuccessResponse(addresses[start:end])\n}\n\ntype minigAddressResp struct {\n\tMiningAddress string `json:\"mining_address\"`\n}\n\nfunc (a *API) getMiningAddress(ctx context.Context) Response {\n\tminingAddress, err := a.wallet.AccountMgr.GetMiningAddress()\n\tif err != nil {\n\t\treturn NewErrorResponse(err)\n\t}\n\treturn NewSuccessResponse(minigAddressResp{\n\t\tMiningAddress: miningAddress,\n\t})\n}\n\n\/\/ POST \/set-mining-address\nfunc (a *API) setMiningAddress(ctx context.Context, in struct {\n\tMiningAddress string `json:\"mining_address\"`\n}) Response {\n\tminingAddress, err := a.wallet.AccountMgr.SetMiningAddress(in.MiningAddress)\n\tif err != nil {\n\t\treturn NewErrorResponse(err)\n\t}\n\treturn NewSuccessResponse(minigAddressResp{\n\t\tMiningAddress: miningAddress,\n\t})\n}\n<|endoftext|>"} {"text":"<commit_before>package driver\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/mail\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/blang\/semver\"\n\t\"github.com\/concourse\/semver-resource\/version\"\n)\n\nvar gitRepoDir string\nvar privateKeyPath string\nvar netRcPath string\n\nvar ErrEncryptedKey = errors.New(\"private keys with passphrases are not supported\")\n\nfunc init() {\n\tgitRepoDir = filepath.Join(os.TempDir(), \"semver-git-repo\")\n\tprivateKeyPath = filepath.Join(os.TempDir(), \"private-key\")\n\tnetRcPath = filepath.Join(os.Getenv(\"HOME\"), \".netrc\")\n}\n\ntype GitDriver struct {\n\tInitialVersion semver.Version\n\n\tURI string\n\tBranch string\n\tPrivateKey string\n\tUsername string\n\tPassword string\n\tFile string\n\tGitUser string\n}\n\nfunc (driver *GitDriver) Bump(bump version.Bump) (semver.Version, error) {\n\terr := driver.setUpAuth()\n\tif err != nil {\n\t\treturn semver.Version{}, err\n\t}\n\n\terr = driver.setUserInfo()\n\tif err != nil {\n\t\treturn semver.Version{}, err\n\t}\n\n\tvar newVersion semver.Version\n\n\tfor {\n\t\terr = driver.setUpRepo()\n\t\tif err != nil {\n\t\t\treturn semver.Version{}, err\n\t\t}\n\n\t\tcurrentVersion, exists, err := driver.readVersion()\n\t\tif err != nil {\n\t\t\treturn semver.Version{}, err\n\t\t}\n\n\t\tif !exists {\n\t\t\tcurrentVersion = driver.InitialVersion\n\t\t}\n\n\t\tnewVersion = bump.Apply(currentVersion)\n\n\t\twrote, err := driver.writeVersion(newVersion)\n\t\tif wrote {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn newVersion, nil\n}\n\nfunc (driver *GitDriver) Set(newVersion semver.Version) error {\n\terr := driver.setUpAuth()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = driver.setUserInfo()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor {\n\t\terr = driver.setUpRepo()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\twrote, err := driver.writeVersion(newVersion)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif wrote {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (driver *GitDriver) Check(cursor *semver.Version) ([]semver.Version, error) {\n\terr := driver.setUpAuth()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = driver.setUpRepo()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcurrentVersion, exists, err := driver.readVersion()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !exists {\n\t\treturn []semver.Version{driver.InitialVersion}, nil\n\t}\n\n\tif cursor == nil || currentVersion.GTE(*cursor) {\n\t\treturn []semver.Version{currentVersion}, nil\n\t}\n\n\treturn []semver.Version{}, nil\n}\n\nfunc (driver *GitDriver) setUpRepo() error {\n\t_, err := os.Stat(gitRepoDir)\n\tif err != nil {\n\t\tgitClone := exec.Command(\"git\", \"clone\", driver.URI, \"--branch\", driver.Branch, gitRepoDir)\n\t\tgitClone.Stdout = os.Stderr\n\t\tgitClone.Stderr = os.Stderr\n\t\tif err := gitClone.Run(); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tgitFetch := exec.Command(\"git\", \"fetch\", \"origin\", driver.Branch)\n\t\tgitFetch.Dir = gitRepoDir\n\t\tgitFetch.Stdout = os.Stderr\n\t\tgitFetch.Stderr = os.Stderr\n\t\tif err := gitFetch.Run(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tgitCheckout := exec.Command(\"git\", \"reset\", \"--hard\", \"origin\/\"+driver.Branch)\n\tgitCheckout.Dir = gitRepoDir\n\tgitCheckout.Stdout = os.Stderr\n\tgitCheckout.Stderr = os.Stderr\n\tif err := gitCheckout.Run(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (driver *GitDriver) setUpAuth() error {\n\t_, err := os.Stat(netRcPath)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\terr := os.Remove(netRcPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(driver.PrivateKey) > 0 {\n\t\terr := driver.setUpKey()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(driver.Username) > 0 && len(driver.Password) > 0 {\n\t\terr := driver.setUpUsernamePassword()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (driver *GitDriver) setUpKey() error {\n\tif strings.Contains(driver.PrivateKey, \"ENCRYPTED\") {\n\t\treturn ErrEncryptedKey\n\t}\n\n\t_, err := os.Stat(privateKeyPath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\terr := ioutil.WriteFile(privateKeyPath, []byte(driver.PrivateKey), 0600)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn os.Setenv(\"GIT_SSH_COMMAND\", \"ssh -o StrictHostKeyChecking=no -i \"+privateKeyPath)\n}\n\nfunc (driver *GitDriver) setUpUsernamePassword() error {\n\t_, err := os.Stat(netRcPath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tcontent := fmt.Sprintf(\"default login %s password %s\", driver.Username, driver.Password)\n\t\t\terr := ioutil.WriteFile(netRcPath, []byte(content), 0600)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (driver *GitDriver) setUserInfo() error {\n\tif len(driver.GitUser) == 0 {\n\t\treturn nil\n\t}\n\n\te, err := mail.ParseAddress(driver.GitUser)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(e.Name) > 0 {\n\t\tgitName := exec.Command(\"git\", \"config\", \"--global\", \"user.name\", e.Name)\n\t\tgitName.Stdout = os.Stderr\n\t\tgitName.Stderr = os.Stderr\n\t\tif err := gitName.Run(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tgitEmail := exec.Command(\"git\", \"config\", \"--global\", \"user.email\", e.Address)\n\tgitEmail.Stdout = os.Stderr\n\tgitEmail.Stderr = os.Stderr\n\tif err := gitEmail.Run(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (driver *GitDriver) readVersion() (semver.Version, bool, error) {\n\tvar currentVersionStr string\n\tversionFile, err := os.Open(filepath.Join(gitRepoDir, driver.File))\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn semver.Version{}, false, nil\n\t\t}\n\n\t\treturn semver.Version{}, false, err\n\t}\n\n\tdefer versionFile.Close()\n\n\t_, err = fmt.Fscanf(versionFile, \"%s\", ¤tVersionStr)\n\tif err != nil {\n\t\treturn semver.Version{}, false, err\n\t}\n\n\tcurrentVersion, err := semver.Parse(currentVersionStr)\n\tif err != nil {\n\t\treturn semver.Version{}, false, err\n\t}\n\n\treturn currentVersion, true, nil\n}\n\nconst nothingToCommitString = \"nothing to commit\"\nconst falsePushString = \"Everything up-to-date\"\nconst pushRejectedString = \"[rejected]\"\nconst pushRemoteRejectedString = \"[remote rejected]\"\n\nfunc (driver *GitDriver) writeVersion(newVersion semver.Version) (bool, error) {\n\terr := ioutil.WriteFile(filepath.Join(gitRepoDir, driver.File), []byte(newVersion.String()+\"\\n\"), 0644)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tgitAdd := exec.Command(\"git\", \"add\", driver.File)\n\tgitAdd.Dir = gitRepoDir\n\tgitAdd.Stdout = os.Stderr\n\tgitAdd.Stderr = os.Stderr\n\tif err := gitAdd.Run(); err != nil {\n\t\treturn false, err\n\t}\n\n\tgitCommit := exec.Command(\"git\", \"commit\", \"-m\", \"bump to \"+newVersion.String())\n\tgitCommit.Dir = gitRepoDir\n\n\tcommitOutput, err := gitCommit.CombinedOutput()\n\n\tif strings.Contains(string(commitOutput), nothingToCommitString) {\n\t\treturn true, nil\n\t}\n\n\tif err != nil {\n\t\tos.Stderr.Write(commitOutput)\n\t\treturn false, err\n\t}\n\n\tgitPush := exec.Command(\"git\", \"push\", \"origin\", \"HEAD:\"+driver.Branch)\n\tgitPush.Dir = gitRepoDir\n\n\tpushOutput, err := gitPush.CombinedOutput()\n\n\tif strings.Contains(string(pushOutput), falsePushString) {\n\t\treturn false, nil\n\t}\n\n\tif strings.Contains(string(pushOutput), pushRejectedString) {\n\t\treturn false, nil\n\t}\n\n\tif strings.Contains(string(pushOutput), pushRemoteRejectedString) {\n\t\treturn false, nil\n\t}\n\n\tif err != nil {\n\t\tos.Stderr.Write(pushOutput)\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}\n<commit_msg>Introduce --depth option for git driver<commit_after>package driver\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/mail\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\n\t\"github.com\/blang\/semver\"\n\t\"github.com\/concourse\/semver-resource\/version\"\n)\n\nvar gitRepoDir string\nvar privateKeyPath string\nvar netRcPath string\n\nvar ErrEncryptedKey = errors.New(\"private keys with passphrases are not supported\")\n\nfunc init() {\n\tgitRepoDir = filepath.Join(os.TempDir(), \"semver-git-repo\")\n\tprivateKeyPath = filepath.Join(os.TempDir(), \"private-key\")\n\tnetRcPath = filepath.Join(os.Getenv(\"HOME\"), \".netrc\")\n}\n\ntype GitDriver struct {\n\tInitialVersion semver.Version\n\n\tURI string\n\tBranch string\n\tPrivateKey string\n\tUsername string\n\tPassword string\n\tFile string\n\tGitUser string\n\tDepth string\n}\n\nfunc (driver *GitDriver) Bump(bump version.Bump) (semver.Version, error) {\n\terr := driver.setUpAuth()\n\tif err != nil {\n\t\treturn semver.Version{}, err\n\t}\n\n\terr = driver.setUserInfo()\n\tif err != nil {\n\t\treturn semver.Version{}, err\n\t}\n\n\tvar newVersion semver.Version\n\n\tfor {\n\t\terr = driver.setUpRepo()\n\t\tif err != nil {\n\t\t\treturn semver.Version{}, err\n\t\t}\n\n\t\tcurrentVersion, exists, err := driver.readVersion()\n\t\tif err != nil {\n\t\t\treturn semver.Version{}, err\n\t\t}\n\n\t\tif !exists {\n\t\t\tcurrentVersion = driver.InitialVersion\n\t\t}\n\n\t\tnewVersion = bump.Apply(currentVersion)\n\n\t\twrote, err := driver.writeVersion(newVersion)\n\t\tif wrote {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn newVersion, nil\n}\n\nfunc (driver *GitDriver) Set(newVersion semver.Version) error {\n\terr := driver.setUpAuth()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = driver.setUserInfo()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor {\n\t\terr = driver.setUpRepo()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\twrote, err := driver.writeVersion(newVersion)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif wrote {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (driver *GitDriver) Check(cursor *semver.Version) ([]semver.Version, error) {\n\terr := driver.setUpAuth()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = driver.setUpRepo()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcurrentVersion, exists, err := driver.readVersion()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !exists {\n\t\treturn []semver.Version{driver.InitialVersion}, nil\n\t}\n\n\tif cursor == nil || currentVersion.GTE(*cursor) {\n\t\treturn []semver.Version{currentVersion}, nil\n\t}\n\n\treturn []semver.Version{}, nil\n}\n\nfunc (driver *GitDriver) setUpRepo() error {\n\t_, err := os.Stat(gitRepoDir)\n\tif err != nil {\n\t\tgitClone := exec.Command(\"git\", \"clone\", driver.URI, \"--branch\", driver.Branch)\n\t\tif len(driver.Depth) > 0 {\n\t\t\tgitClone.Args = append(gitClone.Args, \"--depth\", driver.Depth)\n\t\t}\n\t\tgitClone.Args = append(gitClone.Args, gitRepoDir)\n\t\tgitClone.Stdout = os.Stderr\n\t\tgitClone.Stderr = os.Stderr\n\t\tif err := gitClone.Run(); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tgitFetch := exec.Command(\"git\", \"fetch\", \"origin\", driver.Branch)\n\t\tgitFetch.Dir = gitRepoDir\n\t\tgitFetch.Stdout = os.Stderr\n\t\tgitFetch.Stderr = os.Stderr\n\t\tif err := gitFetch.Run(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tgitCheckout := exec.Command(\"git\", \"reset\", \"--hard\", \"origin\/\"+driver.Branch)\n\tgitCheckout.Dir = gitRepoDir\n\tgitCheckout.Stdout = os.Stderr\n\tgitCheckout.Stderr = os.Stderr\n\tif err := gitCheckout.Run(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (driver *GitDriver) setUpAuth() error {\n\t_, err := os.Stat(netRcPath)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\terr := os.Remove(netRcPath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(driver.PrivateKey) > 0 {\n\t\terr := driver.setUpKey()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(driver.Username) > 0 && len(driver.Password) > 0 {\n\t\terr := driver.setUpUsernamePassword()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (driver *GitDriver) setUpKey() error {\n\tif strings.Contains(driver.PrivateKey, \"ENCRYPTED\") {\n\t\treturn ErrEncryptedKey\n\t}\n\n\t_, err := os.Stat(privateKeyPath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\terr := ioutil.WriteFile(privateKeyPath, []byte(driver.PrivateKey), 0600)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn os.Setenv(\"GIT_SSH_COMMAND\", \"ssh -o StrictHostKeyChecking=no -i \"+privateKeyPath)\n}\n\nfunc (driver *GitDriver) setUpUsernamePassword() error {\n\t_, err := os.Stat(netRcPath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tcontent := fmt.Sprintf(\"default login %s password %s\", driver.Username, driver.Password)\n\t\t\terr := ioutil.WriteFile(netRcPath, []byte(content), 0600)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (driver *GitDriver) setUserInfo() error {\n\tif len(driver.GitUser) == 0 {\n\t\treturn nil\n\t}\n\n\te, err := mail.ParseAddress(driver.GitUser)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(e.Name) > 0 {\n\t\tgitName := exec.Command(\"git\", \"config\", \"--global\", \"user.name\", e.Name)\n\t\tgitName.Stdout = os.Stderr\n\t\tgitName.Stderr = os.Stderr\n\t\tif err := gitName.Run(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tgitEmail := exec.Command(\"git\", \"config\", \"--global\", \"user.email\", e.Address)\n\tgitEmail.Stdout = os.Stderr\n\tgitEmail.Stderr = os.Stderr\n\tif err := gitEmail.Run(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (driver *GitDriver) readVersion() (semver.Version, bool, error) {\n\tvar currentVersionStr string\n\tversionFile, err := os.Open(filepath.Join(gitRepoDir, driver.File))\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn semver.Version{}, false, nil\n\t\t}\n\n\t\treturn semver.Version{}, false, err\n\t}\n\n\tdefer versionFile.Close()\n\n\t_, err = fmt.Fscanf(versionFile, \"%s\", ¤tVersionStr)\n\tif err != nil {\n\t\treturn semver.Version{}, false, err\n\t}\n\n\tcurrentVersion, err := semver.Parse(currentVersionStr)\n\tif err != nil {\n\t\treturn semver.Version{}, false, err\n\t}\n\n\treturn currentVersion, true, nil\n}\n\nconst nothingToCommitString = \"nothing to commit\"\nconst falsePushString = \"Everything up-to-date\"\nconst pushRejectedString = \"[rejected]\"\nconst pushRemoteRejectedString = \"[remote rejected]\"\n\nfunc (driver *GitDriver) writeVersion(newVersion semver.Version) (bool, error) {\n\terr := ioutil.WriteFile(filepath.Join(gitRepoDir, driver.File), []byte(newVersion.String()+\"\\n\"), 0644)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tgitAdd := exec.Command(\"git\", \"add\", driver.File)\n\tgitAdd.Dir = gitRepoDir\n\tgitAdd.Stdout = os.Stderr\n\tgitAdd.Stderr = os.Stderr\n\tif err := gitAdd.Run(); err != nil {\n\t\treturn false, err\n\t}\n\n\tgitCommit := exec.Command(\"git\", \"commit\", \"-m\", \"bump to \"+newVersion.String())\n\tgitCommit.Dir = gitRepoDir\n\n\tcommitOutput, err := gitCommit.CombinedOutput()\n\n\tif strings.Contains(string(commitOutput), nothingToCommitString) {\n\t\treturn true, nil\n\t}\n\n\tif err != nil {\n\t\tos.Stderr.Write(commitOutput)\n\t\treturn false, err\n\t}\n\n\tgitPush := exec.Command(\"git\", \"push\", \"origin\", \"HEAD:\"+driver.Branch)\n\tgitPush.Dir = gitRepoDir\n\n\tpushOutput, err := gitPush.CombinedOutput()\n\n\tif strings.Contains(string(pushOutput), falsePushString) {\n\t\treturn false, nil\n\t}\n\n\tif strings.Contains(string(pushOutput), pushRejectedString) {\n\t\treturn false, nil\n\t}\n\n\tif strings.Contains(string(pushOutput), pushRemoteRejectedString) {\n\t\treturn false, nil\n\t}\n\n\tif err != nil {\n\t\tos.Stderr.Write(pushOutput)\n\t\treturn false, err\n\t}\n\n\treturn true, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux\n\n\/\/ Forked from https:\/\/github.com\/containerd\/containerd\/blob\/9ade247b38b5a685244e1391c86ff41ab109556e\/snapshots\/overlay\/check.go\n\/*\n Copyright The containerd Authors.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage overlayutils\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/containerd\/containerd\/mount\"\n\t\"github.com\/containerd\/containerd\/sys\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ NeedsUserXAttr returns whether overlayfs should be mounted with the \"userxattr\" mount option.\n\/\/\n\/\/ The \"userxattr\" option is needed for mounting overlayfs inside a user namespace with kernel >= 5.11.\n\/\/\n\/\/ The \"userxattr\" option is NOT needed for the initial user namespace (aka \"the host\").\n\/\/\n\/\/ Also, Ubuntu (since circa 2015) and Debian (since 10) with kernel < 5.11 can mount\n\/\/ the overlayfs in a user namespace without the \"userxattr\" option.\n\/\/\n\/\/ The corresponding kernel commit: https:\/\/github.com\/torvalds\/linux\/commit\/2d2f2d7322ff43e0fe92bf8cccdc0b09449bf2e1\n\/\/ > ovl: user xattr\n\/\/ >\n\/\/ > Optionally allow using \"user.overlay.\" namespace instead of \"trusted.overlay.\"\n\/\/ > ...\n\/\/ > Disable redirect_dir and metacopy options, because these would allow privilege escalation through direct manipulation of the\n\/\/ > \"user.overlay.redirect\" or \"user.overlay.metacopy\" xattrs.\n\/\/ > ...\n\/\/\n\/\/ The \"userxattr\" support is not exposed in \"\/sys\/module\/overlay\/parameters\".\nfunc NeedsUserXAttr(d string) (bool, error) {\n\tif !sys.RunningInUserNS() {\n\t\t\/\/ we are the real root (i.e., the root in the initial user NS),\n\t\t\/\/ so we do never need \"userxattr\" opt.\n\t\treturn false, nil\n\t}\n\n\t\/\/ TODO: add fast path for kernel >= 5.11 .\n\t\/\/\n\t\/\/ Keep in mind that distro vendors might be going to backport the patch to older kernels.\n\t\/\/ So we can't completely remove the check.\n\n\ttdRoot := filepath.Join(d, \"userxattr-check\")\n\tif err := os.RemoveAll(tdRoot); err != nil {\n\t\tlogrus.WithError(err).Warnf(\"Failed to remove check directory %v\", tdRoot)\n\t}\n\n\tif err := os.MkdirAll(tdRoot, 0700); err != nil {\n\t\treturn false, err\n\t}\n\n\tdefer func() {\n\t\tif err := os.RemoveAll(tdRoot); err != nil {\n\t\t\tlogrus.WithError(err).Warnf(\"Failed to remove check directory %v\", tdRoot)\n\t\t}\n\t}()\n\n\ttd, err := ioutil.TempDir(tdRoot, \"\")\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tfor _, dir := range []string{\"lower1\", \"lower2\", \"upper\", \"work\", \"merged\"} {\n\t\tif err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\topts := []string{\n\t\tfmt.Sprintf(\"lowerdir=%s:%s,upperdir=%s,workdir=%s\", filepath.Join(td, \"lower2\"), filepath.Join(td, \"lower1\"), filepath.Join(td, \"upper\"), filepath.Join(td, \"work\")),\n\t\t\"userxattr\",\n\t}\n\n\tm := mount.Mount{\n\t\tType: \"overlay\",\n\t\tSource: \"overlay\",\n\t\tOptions: opts,\n\t}\n\n\tdest := filepath.Join(td, \"merged\")\n\tif err := m.Mount(dest); err != nil {\n\t\t\/\/ Probably the host is running Ubuntu\/Debian kernel (< 5.11) with the userns patch but without the userxattr patch.\n\t\t\/\/ Return false without error.\n\t\tlogrus.WithError(err).Debugf(\"cannot mount overlay with \\\"userxattr\\\", probably the kernel does not support userxattr\")\n\t\treturn false, nil\n\t}\n\tif err := mount.UnmountAll(dest, 0); err != nil {\n\t\tlogrus.WithError(err).Warnf(\"Failed to unmount check directory %v\", dest)\n\t}\n\treturn true, nil\n}\n<commit_msg>overlayutils\/userxattr.go: add \"fast path\" for kernel >= 5.11.0<commit_after>\/\/ +build linux\n\n\/\/ Forked from https:\/\/github.com\/containerd\/containerd\/blob\/9ade247b38b5a685244e1391c86ff41ab109556e\/snapshots\/overlay\/check.go\n\/*\n Copyright The containerd Authors.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n*\/\n\npackage overlayutils\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\n\t\"github.com\/containerd\/containerd\/mount\"\n\t\"github.com\/containerd\/containerd\/sys\"\n\t\"github.com\/docker\/docker\/pkg\/parsers\/kernel\"\n\t\"github.com\/sirupsen\/logrus\"\n)\n\n\/\/ NeedsUserXAttr returns whether overlayfs should be mounted with the \"userxattr\" mount option.\n\/\/\n\/\/ The \"userxattr\" option is needed for mounting overlayfs inside a user namespace with kernel >= 5.11.\n\/\/\n\/\/ The \"userxattr\" option is NOT needed for the initial user namespace (aka \"the host\").\n\/\/\n\/\/ Also, Ubuntu (since circa 2015) and Debian (since 10) with kernel < 5.11 can mount\n\/\/ the overlayfs in a user namespace without the \"userxattr\" option.\n\/\/\n\/\/ The corresponding kernel commit: https:\/\/github.com\/torvalds\/linux\/commit\/2d2f2d7322ff43e0fe92bf8cccdc0b09449bf2e1\n\/\/ > ovl: user xattr\n\/\/ >\n\/\/ > Optionally allow using \"user.overlay.\" namespace instead of \"trusted.overlay.\"\n\/\/ > ...\n\/\/ > Disable redirect_dir and metacopy options, because these would allow privilege escalation through direct manipulation of the\n\/\/ > \"user.overlay.redirect\" or \"user.overlay.metacopy\" xattrs.\n\/\/ > ...\n\/\/\n\/\/ The \"userxattr\" support is not exposed in \"\/sys\/module\/overlay\/parameters\".\nfunc NeedsUserXAttr(d string) (bool, error) {\n\tif !sys.RunningInUserNS() {\n\t\t\/\/ we are the real root (i.e., the root in the initial user NS),\n\t\t\/\/ so we do never need \"userxattr\" opt.\n\t\treturn false, nil\n\t}\n\n\t\/\/ Fast path for kernel >= 5.11 .\n\t\/\/\n\t\/\/ Keep in mind that distro vendors might be going to backport the patch to older kernels.\n\t\/\/ So we can't completely remove the \"slow path\".\n\tif kernel.CheckKernelVersion(5, 11, 0) {\n\t\treturn true, nil\n\t}\n\n\ttdRoot := filepath.Join(d, \"userxattr-check\")\n\tif err := os.RemoveAll(tdRoot); err != nil {\n\t\tlogrus.WithError(err).Warnf(\"Failed to remove check directory %v\", tdRoot)\n\t}\n\n\tif err := os.MkdirAll(tdRoot, 0700); err != nil {\n\t\treturn false, err\n\t}\n\n\tdefer func() {\n\t\tif err := os.RemoveAll(tdRoot); err != nil {\n\t\t\tlogrus.WithError(err).Warnf(\"Failed to remove check directory %v\", tdRoot)\n\t\t}\n\t}()\n\n\ttd, err := ioutil.TempDir(tdRoot, \"\")\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tfor _, dir := range []string{\"lower1\", \"lower2\", \"upper\", \"work\", \"merged\"} {\n\t\tif err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\topts := []string{\n\t\tfmt.Sprintf(\"lowerdir=%s:%s,upperdir=%s,workdir=%s\", filepath.Join(td, \"lower2\"), filepath.Join(td, \"lower1\"), filepath.Join(td, \"upper\"), filepath.Join(td, \"work\")),\n\t\t\"userxattr\",\n\t}\n\n\tm := mount.Mount{\n\t\tType: \"overlay\",\n\t\tSource: \"overlay\",\n\t\tOptions: opts,\n\t}\n\n\tdest := filepath.Join(td, \"merged\")\n\tif err := m.Mount(dest); err != nil {\n\t\t\/\/ Probably the host is running Ubuntu\/Debian kernel (< 5.11) with the userns patch but without the userxattr patch.\n\t\t\/\/ Return false without error.\n\t\tlogrus.WithError(err).Debugf(\"cannot mount overlay with \\\"userxattr\\\", probably the kernel does not support userxattr\")\n\t\treturn false, nil\n\t}\n\tif err := mount.UnmountAll(dest, 0); err != nil {\n\t\tlogrus.WithError(err).Warnf(\"Failed to unmount check directory %v\", dest)\n\t}\n\treturn true, nil\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>fix(redhat): failed to parse date extra text: Z (#31)<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage action\n\nimport (\n\t\"errors\"\n\t\"github.com\/globocom\/tsuru\/log\"\n)\n\n\/\/ Result is the value returned by Forward. It is used in the call of the next\n\/\/ action, and also when rolling back the actions.\ntype Result interface{}\n\n\/\/ Forward is the function called by the pipeline executor in the forward\n\/\/ phase. It receives a FWContext instance, that contains the list of\n\/\/ parameters given to the pipeline executor and the result of the previous\n\/\/ action in the pipeline (which will be nil for the first action in the\n\/\/ pipeline).\ntype Forward func(context FWContext) (Result, error)\n\n\/\/ Backward is the function called by the pipeline executor when in the\n\/\/ backward phase. It receives the context instance, that contains the list of\n\/\/ parameters given to the pipeline executor and the result of the forward\n\/\/ phase.\ntype Backward func(context BWContext)\n\n\/\/ FWContext is the context used in calls to Forward functions (forward phase).\ntype FWContext struct {\n\t\/\/ Result of the previous action.\n\tPrevious Result\n\n\t\/\/ List of parameters given to the executor.\n\tParams []interface{}\n}\n\n\/\/ BWContext is the context used in calls to Backward functions (backward\n\/\/ phase).\ntype BWContext struct {\n\t\/\/ Result of the forward phase (for the current action).\n\tFWResult Result\n\n\t\/\/ List of parameters given to the executor.\n\tParams []interface{}\n}\n\n\/\/ Action defines actions that should be . It is composed of two functions:\n\/\/ Forward and Backward.\n\/\/\n\/\/ Each action should do only one thing, and do it well. All information that\n\/\/ is needed to undo the action should be returned by the Forward function.\ntype Action struct {\n\t\/\/ Name is the action name. Used by the log.\n\tName string\n\n\t\/\/ Function that will be invoked in the forward phase. This value\n\t\/\/ cannot be nil.\n\tForward Forward\n\n\t\/\/ Function that will be invoked in the backward phase. For actions\n\t\/\/ that are not undoable, this attribute should be nil.\n\tBackward Backward\n\n\t\/\/ Minimum number of parameters that this action requires to run.\n\tMinParams int\n\n\t\/\/ Result of the action. Stored for use in the backward phase.\n\tresult Result\n}\n\n\/\/ Pipeline is a list of actions. Each pipeline is atomic: either all actions\n\/\/ are successfully executed, or none of them are. For that, it's fundamental\n\/\/ that all actions are really small and atomic.\ntype Pipeline struct {\n\tactions []*Action\n}\n\n\/\/ NewPipeline creates a new pipeline instance with the given list of actions.\nfunc NewPipeline(actions ...*Action) *Pipeline {\n\treturn &Pipeline{actions: actions}\n}\n\n\/\/ Execute executes the pipeline.\n\/\/\n\/\/ The execution starts in the forward phase, calling the Forward function of\n\/\/ all actions. If none of the Forward calls return error, the pipeline\n\/\/ execution ends in the forward phase and is \"committed\".\n\/\/\n\/\/ If any of the Forward call fail, the executor switches to the backward phase\n\/\/ (roll back) and call the Backward function for each action completed. It\n\/\/ does not call the Backward function of the action that has failed.\n\/\/\n\/\/ After rolling back all completed actions, it returns the original error\n\/\/ returned by the action that failed.\nfunc (p *Pipeline) Execute(params ...interface{}) error {\n\tvar (\n\t\tr Result\n\t\terr error\n\t)\n\tif len(p.actions) == 0 {\n\t\treturn errors.New(\"No actions to execute.\")\n\t}\n\tfwCtx := FWContext{Params: params}\n\tfor i, a := range p.actions {\n\t\tlog.Printf(\"[pipeline] running the Forward for the %s action\", a.Name)\n\t\tif a.Forward == nil {\n\t\t\terr = errors.New(\"All actions must define the forward function.\")\n\t\t} else if len(fwCtx.Params) < a.MinParams {\n\t\t\terr = errors.New(\"Not enough parameters to call Action.Forward.\")\n\t\t} else {\n\t\t\tr, err = a.Forward(fwCtx)\n\t\t\ta.result = r\n\t\t\tfwCtx.Previous = r\n\t\t}\n\t\tif err != nil {\n\t\t\tp.rollback(i-1, params)\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (p *Pipeline) rollback(index int, params []interface{}) {\n\tbwCtx := BWContext{Params: params}\n\tfor i := index; i >= 0; i-- {\n\t\tlog.Printf(\"[pipeline] running the Backward for the %s action\", p.actions[i].Name)\n\t\tif p.actions[i].Backward != nil {\n\t\t\tbwCtx.FWResult = p.actions[i].result\n\t\t\tp.actions[i].Backward(bwCtx)\n\t\t}\n\t}\n}\n<commit_msg>actions: changed log message.<commit_after>\/\/ Copyright 2013 tsuru authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage action\n\nimport (\n\t\"errors\"\n\t\"github.com\/globocom\/tsuru\/log\"\n)\n\n\/\/ Result is the value returned by Forward. It is used in the call of the next\n\/\/ action, and also when rolling back the actions.\ntype Result interface{}\n\n\/\/ Forward is the function called by the pipeline executor in the forward\n\/\/ phase. It receives a FWContext instance, that contains the list of\n\/\/ parameters given to the pipeline executor and the result of the previous\n\/\/ action in the pipeline (which will be nil for the first action in the\n\/\/ pipeline).\ntype Forward func(context FWContext) (Result, error)\n\n\/\/ Backward is the function called by the pipeline executor when in the\n\/\/ backward phase. It receives the context instance, that contains the list of\n\/\/ parameters given to the pipeline executor and the result of the forward\n\/\/ phase.\ntype Backward func(context BWContext)\n\n\/\/ FWContext is the context used in calls to Forward functions (forward phase).\ntype FWContext struct {\n\t\/\/ Result of the previous action.\n\tPrevious Result\n\n\t\/\/ List of parameters given to the executor.\n\tParams []interface{}\n}\n\n\/\/ BWContext is the context used in calls to Backward functions (backward\n\/\/ phase).\ntype BWContext struct {\n\t\/\/ Result of the forward phase (for the current action).\n\tFWResult Result\n\n\t\/\/ List of parameters given to the executor.\n\tParams []interface{}\n}\n\n\/\/ Action defines actions that should be . It is composed of two functions:\n\/\/ Forward and Backward.\n\/\/\n\/\/ Each action should do only one thing, and do it well. All information that\n\/\/ is needed to undo the action should be returned by the Forward function.\ntype Action struct {\n\t\/\/ Name is the action name. Used by the log.\n\tName string\n\n\t\/\/ Function that will be invoked in the forward phase. This value\n\t\/\/ cannot be nil.\n\tForward Forward\n\n\t\/\/ Function that will be invoked in the backward phase. For actions\n\t\/\/ that are not undoable, this attribute should be nil.\n\tBackward Backward\n\n\t\/\/ Minimum number of parameters that this action requires to run.\n\tMinParams int\n\n\t\/\/ Result of the action. Stored for use in the backward phase.\n\tresult Result\n}\n\n\/\/ Pipeline is a list of actions. Each pipeline is atomic: either all actions\n\/\/ are successfully executed, or none of them are. For that, it's fundamental\n\/\/ that all actions are really small and atomic.\ntype Pipeline struct {\n\tactions []*Action\n}\n\n\/\/ NewPipeline creates a new pipeline instance with the given list of actions.\nfunc NewPipeline(actions ...*Action) *Pipeline {\n\treturn &Pipeline{actions: actions}\n}\n\n\/\/ Execute executes the pipeline.\n\/\/\n\/\/ The execution starts in the forward phase, calling the Forward function of\n\/\/ all actions. If none of the Forward calls return error, the pipeline\n\/\/ execution ends in the forward phase and is \"committed\".\n\/\/\n\/\/ If any of the Forward call fail, the executor switches to the backward phase\n\/\/ (roll back) and call the Backward function for each action completed. It\n\/\/ does not call the Backward function of the action that has failed.\n\/\/\n\/\/ After rolling back all completed actions, it returns the original error\n\/\/ returned by the action that failed.\nfunc (p *Pipeline) Execute(params ...interface{}) error {\n\tvar (\n\t\tr Result\n\t\terr error\n\t)\n\tif len(p.actions) == 0 {\n\t\treturn errors.New(\"No actions to execute.\")\n\t}\n\tfwCtx := FWContext{Params: params}\n\tfor i, a := range p.actions {\n\t\tlog.Printf(\"[pipeline] running the Forward for the %s action\", a.Name)\n\t\tif a.Forward == nil {\n\t\t\terr = errors.New(\"All actions must define the forward function.\")\n\t\t} else if len(fwCtx.Params) < a.MinParams {\n\t\t\terr = errors.New(\"Not enough parameters to call Action.Forward.\")\n\t\t} else {\n\t\t\tr, err = a.Forward(fwCtx)\n\t\t\ta.result = r\n\t\t\tfwCtx.Previous = r\n\t\t}\n\t\tif err != nil {\n\t\t\tp.rollback(i-1, params)\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (p *Pipeline) rollback(index int, params []interface{}) {\n\tbwCtx := BWContext{Params: params}\n\tfor i := index; i >= 0; i-- {\n\t\tlog.Printf(\"[pipeline] running Backward for %s action\", p.actions[i].Name)\n\t\tif p.actions[i].Backward != nil {\n\t\t\tbwCtx.FWResult = p.actions[i].result\n\t\t\tp.actions[i].Backward(bwCtx)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>pkg\/api: move Semantic equality to k8s.io\/apimachinery\/pkg\/api\/equality<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\n\/\/ PerUserKeyUpkeep rolls the user's per-user-key if the last PUK\n\/\/ was added by a now-revoked device.\n\/\/ Does not add a first per-user-key. Use PerUserKeyUpgrade for that.\n\/\/ This engine makes up for the fact that after a self-deprovision\n\/\/ the latest PUK for a user was generated on the very machine they\n\/\/ wanted to deprovision.\n\/\/ This will not notice if a device revoked another device but neglected\n\/\/ to roll the PUK. No clients should do that.\npackage engine\n\nimport (\n\t\"errors\"\n\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\t\"github.com\/keybase\/client\/go\/protocol\/keybase1\"\n)\n\n\/\/ PerUserKeyUpkeep is an engine.\ntype PerUserKeyUpkeep struct {\n\tlibkb.Contextified\n\targs *PerUserKeyUpkeepArgs\n\tDidRollKey bool\n}\n\ntype PerUserKeyUpkeepArgs struct {\n\tLoginContext libkb.LoginContext \/\/ optional\n}\n\n\/\/ NewPerUserKeyUpkeep creates a PerUserKeyUpkeep engine.\nfunc NewPerUserKeyUpkeep(g *libkb.GlobalContext, args *PerUserKeyUpkeepArgs) *PerUserKeyUpkeep {\n\treturn &PerUserKeyUpkeep{\n\t\targs: args,\n\t\tContextified: libkb.NewContextified(g),\n\t}\n}\n\n\/\/ Name is the unique engine name.\nfunc (e *PerUserKeyUpkeep) Name() string {\n\treturn \"PerUserKeyUpkeep\"\n}\n\n\/\/ GetPrereqs returns the engine prereqs.\nfunc (e *PerUserKeyUpkeep) Prereqs() Prereqs {\n\treturn Prereqs{\n\t\tSession: true,\n\t}\n}\n\n\/\/ RequiredUIs returns the required UIs.\nfunc (e *PerUserKeyUpkeep) RequiredUIs() []libkb.UIKind {\n\treturn []libkb.UIKind{}\n}\n\n\/\/ SubConsumers returns the other UI consumers for this engine.\nfunc (e *PerUserKeyUpkeep) SubConsumers() []libkb.UIConsumer {\n\treturn []libkb.UIConsumer{}\n}\n\n\/\/ Run starts the engine.\nfunc (e *PerUserKeyUpkeep) Run(ctx *Context) (err error) {\n\tdefer e.G().CTrace(ctx.GetNetContext(), \"PerUserKeyUpkeep\", func() error { return err })()\n\treturn e.inner(ctx)\n}\n\nfunc (e *PerUserKeyUpkeep) inner(ctx *Context) error {\n\te.G().Log.CDebugf(ctx.GetNetContext(), \"PerUserKeyUpkeep load self\")\n\n\tuid := e.G().GetMyUID()\n\tif uid.IsNil() {\n\t\treturn libkb.NoUIDError{}\n\t}\n\n\tloadArg := libkb.NewLoadUserArgBase(e.G()).\n\t\tWithNetContext(ctx.GetNetContext()).\n\t\tWithUID(uid).\n\t\tWithSelf(true).\n\t\tWithPublicKeyOptional()\n\tloadArg.LoginContext = e.args.LoginContext\n\tupak, me, err := e.G().GetUPAKLoader().LoadV2(*loadArg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ `me` could be nil.\n\n\tvar shouldRollKey bool\n\tshouldRollKey, err = e.shouldRollKey(ctx, uid, &upak.Current)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !shouldRollKey {\n\t\te.G().Log.CDebugf(ctx.GetNetContext(), \"PerUserKeyUpkeep skipping\")\n\t\treturn nil\n\t}\n\n\t\/\/ Roll the key\n\te.G().Log.CDebugf(ctx.GetNetContext(), \"PerUserKeyUpkeep rolling key\")\n\targ := &PerUserKeyRollArgs{\n\t\tLoginContext: e.args.LoginContext,\n\t\tMe: me,\n\t}\n\teng := NewPerUserKeyRoll(e.G(), arg)\n\terr = RunEngine(eng, ctx)\n\te.DidRollKey = eng.DidNewKey\n\treturn err\n}\n\n\/\/ Whether we should roll the per-user-key.\nfunc (e *PerUserKeyUpkeep) shouldRollKey(ctx *Context, uid keybase1.UID, upak *keybase1.UserPlusKeysV2) (bool, error) {\n\n\tif len(upak.PerUserKeys) == 0 {\n\t\te.G().Log.CDebugf(ctx.GetNetContext(), \"PerUserKeyUpkeep has no per-user-key\")\n\t\treturn false, nil\n\t}\n\te.G().Log.CDebugf(ctx.GetNetContext(), \"PerUserKeyUpkeep has %v per-user-keys\", len(upak.PerUserKeys))\n\n\tlastPuk := upak.PerUserKeys[len(upak.PerUserKeys)-1]\n\tif !lastPuk.SignedByKID.IsValid() {\n\t\treturn false, errors.New(\"latest per-user-key had invalid signed-by KID\")\n\t}\n\te.G().Log.CDebugf(ctx.GetNetContext(), \"PerUserKeyUpkeep last key signed by KID: %v\", lastPuk.SignedByKID.String())\n\treturn !e.keyIsActiveSibkey(ctx, lastPuk.SignedByKID, upak), nil\n}\n\nfunc (e *PerUserKeyUpkeep) keyIsActiveSibkey(ctx *Context, kid keybase1.KID, upak *keybase1.UserPlusKeysV2) bool {\n\tfor _, dkey := range upak.DeviceKeys {\n\t\tactive := dkey.Base.Revocation == nil\n\t\tif active && dkey.Base.IsSibkey && dkey.Base.Kid.Equal(kid) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<commit_msg>feedback: remove logincontexts<commit_after>\/\/ Copyright 2017 Keybase, Inc. All rights reserved. Use of\n\/\/ this source code is governed by the included BSD license.\n\n\/\/ PerUserKeyUpkeep rolls the user's per-user-key if the last PUK\n\/\/ was added by a now-revoked device.\n\/\/ Does not add a first per-user-key. Use PerUserKeyUpgrade for that.\n\/\/ This engine makes up for the fact that after a self-deprovision\n\/\/ the latest PUK for a user was generated on the very machine they\n\/\/ wanted to deprovision.\n\/\/ This will not notice if a device revoked another device but neglected\n\/\/ to roll the PUK. No clients should do that.\npackage engine\n\nimport (\n\t\"errors\"\n\n\t\"github.com\/keybase\/client\/go\/libkb\"\n\t\"github.com\/keybase\/client\/go\/protocol\/keybase1\"\n)\n\n\/\/ PerUserKeyUpkeep is an engine.\ntype PerUserKeyUpkeep struct {\n\tlibkb.Contextified\n\targs *PerUserKeyUpkeepArgs\n\tDidRollKey bool\n}\n\ntype PerUserKeyUpkeepArgs struct{}\n\n\/\/ NewPerUserKeyUpkeep creates a PerUserKeyUpkeep engine.\nfunc NewPerUserKeyUpkeep(g *libkb.GlobalContext, args *PerUserKeyUpkeepArgs) *PerUserKeyUpkeep {\n\treturn &PerUserKeyUpkeep{\n\t\targs: args,\n\t\tContextified: libkb.NewContextified(g),\n\t}\n}\n\n\/\/ Name is the unique engine name.\nfunc (e *PerUserKeyUpkeep) Name() string {\n\treturn \"PerUserKeyUpkeep\"\n}\n\n\/\/ GetPrereqs returns the engine prereqs.\nfunc (e *PerUserKeyUpkeep) Prereqs() Prereqs {\n\treturn Prereqs{\n\t\tSession: true,\n\t}\n}\n\n\/\/ RequiredUIs returns the required UIs.\nfunc (e *PerUserKeyUpkeep) RequiredUIs() []libkb.UIKind {\n\treturn []libkb.UIKind{}\n}\n\n\/\/ SubConsumers returns the other UI consumers for this engine.\nfunc (e *PerUserKeyUpkeep) SubConsumers() []libkb.UIConsumer {\n\treturn []libkb.UIConsumer{}\n}\n\n\/\/ Run starts the engine.\nfunc (e *PerUserKeyUpkeep) Run(ctx *Context) (err error) {\n\tdefer e.G().CTrace(ctx.GetNetContext(), \"PerUserKeyUpkeep\", func() error { return err })()\n\treturn e.inner(ctx)\n}\n\nfunc (e *PerUserKeyUpkeep) inner(ctx *Context) error {\n\te.G().Log.CDebugf(ctx.GetNetContext(), \"PerUserKeyUpkeep load self\")\n\n\tuid := e.G().GetMyUID()\n\tif uid.IsNil() {\n\t\treturn libkb.NoUIDError{}\n\t}\n\n\tloadArg := libkb.NewLoadUserArgBase(e.G()).\n\t\tWithNetContext(ctx.GetNetContext()).\n\t\tWithUID(uid).\n\t\tWithSelf(true).\n\t\tWithPublicKeyOptional()\n\tupak, me, err := e.G().GetUPAKLoader().LoadV2(*loadArg)\n\tif err != nil {\n\t\treturn err\n\t}\n\t\/\/ `me` could be nil.\n\n\tvar shouldRollKey bool\n\tshouldRollKey, err = e.shouldRollKey(ctx, uid, &upak.Current)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !shouldRollKey {\n\t\te.G().Log.CDebugf(ctx.GetNetContext(), \"PerUserKeyUpkeep skipping\")\n\t\treturn nil\n\t}\n\n\t\/\/ Roll the key\n\te.G().Log.CDebugf(ctx.GetNetContext(), \"PerUserKeyUpkeep rolling key\")\n\targ := &PerUserKeyRollArgs{\n\t\tMe: me,\n\t}\n\teng := NewPerUserKeyRoll(e.G(), arg)\n\terr = RunEngine(eng, ctx)\n\te.DidRollKey = eng.DidNewKey\n\treturn err\n}\n\n\/\/ Whether we should roll the per-user-key.\nfunc (e *PerUserKeyUpkeep) shouldRollKey(ctx *Context, uid keybase1.UID, upak *keybase1.UserPlusKeysV2) (bool, error) {\n\n\tif len(upak.PerUserKeys) == 0 {\n\t\te.G().Log.CDebugf(ctx.GetNetContext(), \"PerUserKeyUpkeep has no per-user-key\")\n\t\treturn false, nil\n\t}\n\te.G().Log.CDebugf(ctx.GetNetContext(), \"PerUserKeyUpkeep has %v per-user-keys\", len(upak.PerUserKeys))\n\n\tlastPuk := upak.PerUserKeys[len(upak.PerUserKeys)-1]\n\tif !lastPuk.SignedByKID.IsValid() {\n\t\treturn false, errors.New(\"latest per-user-key had invalid signed-by KID\")\n\t}\n\te.G().Log.CDebugf(ctx.GetNetContext(), \"PerUserKeyUpkeep last key signed by KID: %v\", lastPuk.SignedByKID.String())\n\treturn !e.keyIsActiveSibkey(ctx, lastPuk.SignedByKID, upak), nil\n}\n\nfunc (e *PerUserKeyUpkeep) keyIsActiveSibkey(ctx *Context, kid keybase1.KID, upak *keybase1.UserPlusKeysV2) bool {\n\tfor _, dkey := range upak.DeviceKeys {\n\t\tactive := dkey.Base.Revocation == nil\n\t\tif active && dkey.Base.IsSibkey && dkey.Base.Kid.Equal(kid) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package container \/\/ import \"github.com\/docker\/docker\/integration\/container\"\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/docker\/docker\/api\/types\/container\"\n\t\"github.com\/docker\/docker\/api\/types\/mount\"\n\t\"github.com\/docker\/docker\/api\/types\/network\"\n\t\"github.com\/docker\/docker\/client\"\n\t\"github.com\/docker\/docker\/integration-cli\/daemon\"\n\t\"github.com\/docker\/docker\/integration\/util\/request\"\n\t\"github.com\/docker\/docker\/pkg\/stdcopy\"\n\t\"github.com\/docker\/docker\/pkg\/system\"\n\t\"github.com\/gotestyourself\/gotestyourself\/fs\"\n\t\"github.com\/gotestyourself\/gotestyourself\/skip\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestContainerShmNoLeak(t *testing.T) {\n\tt.Parallel()\n\td := daemon.New(t, \"docker\", \"dockerd\", daemon.Config{})\n\tclient, err := d.NewClient()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\td.StartWithBusybox(t)\n\tdefer d.Stop(t)\n\n\tctx := context.Background()\n\tcfg := container.Config{\n\t\tImage: \"busybox\",\n\t\tCmd: []string{\"top\"},\n\t}\n\n\tctr, err := client.ContainerCreate(ctx, &cfg, nil, nil, \"\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer client.ContainerRemove(ctx, ctr.ID, types.ContainerRemoveOptions{Force: true})\n\n\tif err := client.ContainerStart(ctx, ctr.ID, types.ContainerStartOptions{}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ this should recursively bind mount everything in the test daemons root\n\t\/\/ except of course we are hoping that the previous containers \/dev\/shm mount did not leak into this new container\n\thc := container.HostConfig{\n\t\tMounts: []mount.Mount{\n\t\t\t{\n\t\t\t\tType: mount.TypeBind,\n\t\t\t\tSource: d.Root,\n\t\t\t\tTarget: \"\/testdaemonroot\",\n\t\t\t},\n\t\t},\n\t}\n\tcfg.Cmd = []string{\"\/bin\/sh\", \"-c\", fmt.Sprintf(\"mount | grep testdaemonroot | grep containers | grep %s\", ctr.ID)}\n\tcfg.AttachStdout = true\n\tcfg.AttachStderr = true\n\tctrLeak, err := client.ContainerCreate(ctx, &cfg, &hc, nil, \"\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tattach, err := client.ContainerAttach(ctx, ctrLeak.ID, types.ContainerAttachOptions{\n\t\tStream: true,\n\t\tStdout: true,\n\t\tStderr: true,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := client.ContainerStart(ctx, ctrLeak.ID, types.ContainerStartOptions{}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tbuf := bytes.NewBuffer(nil)\n\n\tif _, err := stdcopy.StdCopy(buf, buf, attach.Reader); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tout := bytes.TrimSpace(buf.Bytes())\n\tif !bytes.Equal(out, []byte{}) {\n\t\tt.Fatalf(\"mount leaked: %s\", string(out))\n\t}\n}\n\nfunc TestContainerNetworkMountsNoChown(t *testing.T) {\n\t\/\/ chown only applies to Linux bind mounted volumes; must be same host to verify\n\tskip.If(t, testEnv.DaemonInfo.OSType != \"linux\" || !testEnv.IsLocalDaemon())\n\n\tdefer setupTest(t)()\n\n\tctx := context.Background()\n\n\ttmpDir := fs.NewDir(t, \"network-file-mounts\", fs.WithMode(0755), fs.WithFile(\"nwfile\", \"network file bind mount\", fs.WithMode(0644)))\n\tdefer tmpDir.Remove()\n\n\ttmpNWFileMount := tmpDir.Join(\"nwfile\")\n\n\tconfig := container.Config{\n\t\tImage: \"busybox\",\n\t}\n\thostConfig := container.HostConfig{\n\t\tMounts: []mount.Mount{\n\t\t\t{\n\t\t\t\tType: \"bind\",\n\t\t\t\tSource: tmpNWFileMount,\n\t\t\t\tTarget: \"\/etc\/resolv.conf\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: \"bind\",\n\t\t\t\tSource: tmpNWFileMount,\n\t\t\t\tTarget: \"\/etc\/hostname\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: \"bind\",\n\t\t\t\tSource: tmpNWFileMount,\n\t\t\t\tTarget: \"\/etc\/hosts\",\n\t\t\t},\n\t\t},\n\t}\n\n\tcli, err := client.NewEnvClient()\n\trequire.NoError(t, err)\n\tdefer cli.Close()\n\n\tctrCreate, err := cli.ContainerCreate(ctx, &config, &hostConfig, &network.NetworkingConfig{}, \"\")\n\trequire.NoError(t, err)\n\t\/\/ container will exit immediately because of no tty, but we only need the start sequence to test the condition\n\terr = cli.ContainerStart(ctx, ctrCreate.ID, types.ContainerStartOptions{})\n\trequire.NoError(t, err)\n\n\t\/\/ Check that host-located bind mount network file did not change ownership when the container was started\n\t\/\/ Note: If the user specifies a mountpath from the host, we should not be\n\t\/\/ attempting to chown files outside the daemon's metadata directory\n\t\/\/ (represented by `daemon.repository` at init time).\n\t\/\/ This forces users who want to use user namespaces to handle the\n\t\/\/ ownership needs of any external files mounted as network files\n\t\/\/ (\/etc\/resolv.conf, \/etc\/hosts, \/etc\/hostname) separately from the\n\t\/\/ daemon. In all other volume\/bind mount situations we have taken this\n\t\/\/ same line--we don't chown host file content.\n\t\/\/ See GitHub PR 34224 for details.\n\tstatT, err := system.Stat(tmpNWFileMount)\n\trequire.NoError(t, err)\n\tassert.Equal(t, uint32(0), statT.UID(), \"bind mounted network file should not change ownership from root\")\n}\n\nfunc TestMountDaemonRoot(t *testing.T) {\n\tt.Parallel()\n\n\tclient := request.NewAPIClient(t)\n\tctx := context.Background()\n\tinfo, err := client.Info(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor _, test := range []struct {\n\t\tdesc string\n\t\tpropagation mount.Propagation\n\t\texpected mount.Propagation\n\t}{\n\t\t{\n\t\t\tdesc: \"default\",\n\t\t\tpropagation: \"\",\n\t\t\texpected: mount.PropagationRSlave,\n\t\t},\n\t\t{\n\t\t\tdesc: \"private\",\n\t\t\tpropagation: mount.PropagationPrivate,\n\t\t},\n\t\t{\n\t\t\tdesc: \"rprivate\",\n\t\t\tpropagation: mount.PropagationRPrivate,\n\t\t},\n\t\t{\n\t\t\tdesc: \"slave\",\n\t\t\tpropagation: mount.PropagationSlave,\n\t\t},\n\t\t{\n\t\t\tdesc: \"rslave\",\n\t\t\tpropagation: mount.PropagationRSlave,\n\t\t\texpected: mount.PropagationRSlave,\n\t\t},\n\t\t{\n\t\t\tdesc: \"shared\",\n\t\t\tpropagation: mount.PropagationShared,\n\t\t},\n\t\t{\n\t\t\tdesc: \"rshared\",\n\t\t\tpropagation: mount.PropagationRShared,\n\t\t\texpected: mount.PropagationRShared,\n\t\t},\n\t} {\n\t\tt.Run(test.desc, func(t *testing.T) {\n\t\t\ttest := test\n\t\t\tt.Parallel()\n\n\t\t\tpropagationSpec := fmt.Sprintf(\":%s\", test.propagation)\n\t\t\tif test.propagation == \"\" {\n\t\t\t\tpropagationSpec = \"\"\n\t\t\t}\n\t\t\tbindSpecRoot := info.DockerRootDir + \":\" + \"\/foo\" + propagationSpec\n\t\t\tbindSpecSub := filepath.Join(info.DockerRootDir, \"containers\") + \":\/foo\" + propagationSpec\n\n\t\t\tfor name, hc := range map[string]*container.HostConfig{\n\t\t\t\t\"bind root\": {Binds: []string{bindSpecRoot}},\n\t\t\t\t\"bind subpath\": {Binds: []string{bindSpecSub}},\n\t\t\t\t\"mount root\": {\n\t\t\t\t\tMounts: []mount.Mount{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tType: mount.TypeBind,\n\t\t\t\t\t\t\tSource: info.DockerRootDir,\n\t\t\t\t\t\t\tTarget: \"\/foo\",\n\t\t\t\t\t\t\tBindOptions: &mount.BindOptions{Propagation: test.propagation},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"mount subpath\": {\n\t\t\t\t\tMounts: []mount.Mount{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tType: mount.TypeBind,\n\t\t\t\t\t\t\tSource: filepath.Join(info.DockerRootDir, \"containers\"),\n\t\t\t\t\t\t\tTarget: \"\/foo\",\n\t\t\t\t\t\t\tBindOptions: &mount.BindOptions{Propagation: test.propagation},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t} {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\thc := hc\n\t\t\t\t\tt.Parallel()\n\n\t\t\t\t\tc, err := client.ContainerCreate(ctx, &container.Config{\n\t\t\t\t\t\tImage: \"busybox\",\n\t\t\t\t\t\tCmd: []string{\"true\"},\n\t\t\t\t\t}, hc, nil, \"\")\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tif test.expected != \"\" {\n\t\t\t\t\t\t\tt.Fatal(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\/\/ expected an error, so this is ok and should not continue\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tif test.expected == \"\" {\n\t\t\t\t\t\tt.Fatal(\"expected create to fail\")\n\t\t\t\t\t}\n\n\t\t\t\t\tdefer func() {\n\t\t\t\t\t\tif err := client.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{Force: true}); err != nil {\n\t\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\n\t\t\t\t\tinspect, err := client.ContainerInspect(ctx, c.ID)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t\tif len(inspect.Mounts) != 1 {\n\t\t\t\t\t\tt.Fatalf(\"unexpected number of mounts: %+v\", inspect.Mounts)\n\t\t\t\t\t}\n\n\t\t\t\t\tm := inspect.Mounts[0]\n\t\t\t\t\tif m.Propagation != test.expected {\n\t\t\t\t\t\tt.Fatalf(\"got unexpected propagation mode, expected %q, got: %v\", test.expected, m.Propagation)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n<commit_msg>Fix import path<commit_after>package container \/\/ import \"github.com\/docker\/docker\/integration\/container\"\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"path\/filepath\"\n\t\"testing\"\n\n\t\"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/docker\/docker\/api\/types\/container\"\n\t\"github.com\/docker\/docker\/api\/types\/mount\"\n\t\"github.com\/docker\/docker\/api\/types\/network\"\n\t\"github.com\/docker\/docker\/client\"\n\t\"github.com\/docker\/docker\/integration-cli\/daemon\"\n\t\"github.com\/docker\/docker\/integration\/internal\/request\"\n\t\"github.com\/docker\/docker\/pkg\/stdcopy\"\n\t\"github.com\/docker\/docker\/pkg\/system\"\n\t\"github.com\/gotestyourself\/gotestyourself\/fs\"\n\t\"github.com\/gotestyourself\/gotestyourself\/skip\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc TestContainerShmNoLeak(t *testing.T) {\n\tt.Parallel()\n\td := daemon.New(t, \"docker\", \"dockerd\", daemon.Config{})\n\tclient, err := d.NewClient()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\td.StartWithBusybox(t)\n\tdefer d.Stop(t)\n\n\tctx := context.Background()\n\tcfg := container.Config{\n\t\tImage: \"busybox\",\n\t\tCmd: []string{\"top\"},\n\t}\n\n\tctr, err := client.ContainerCreate(ctx, &cfg, nil, nil, \"\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer client.ContainerRemove(ctx, ctr.ID, types.ContainerRemoveOptions{Force: true})\n\n\tif err := client.ContainerStart(ctx, ctr.ID, types.ContainerStartOptions{}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t\/\/ this should recursively bind mount everything in the test daemons root\n\t\/\/ except of course we are hoping that the previous containers \/dev\/shm mount did not leak into this new container\n\thc := container.HostConfig{\n\t\tMounts: []mount.Mount{\n\t\t\t{\n\t\t\t\tType: mount.TypeBind,\n\t\t\t\tSource: d.Root,\n\t\t\t\tTarget: \"\/testdaemonroot\",\n\t\t\t},\n\t\t},\n\t}\n\tcfg.Cmd = []string{\"\/bin\/sh\", \"-c\", fmt.Sprintf(\"mount | grep testdaemonroot | grep containers | grep %s\", ctr.ID)}\n\tcfg.AttachStdout = true\n\tcfg.AttachStderr = true\n\tctrLeak, err := client.ContainerCreate(ctx, &cfg, &hc, nil, \"\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tattach, err := client.ContainerAttach(ctx, ctrLeak.ID, types.ContainerAttachOptions{\n\t\tStream: true,\n\t\tStdout: true,\n\t\tStderr: true,\n\t})\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif err := client.ContainerStart(ctx, ctrLeak.ID, types.ContainerStartOptions{}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tbuf := bytes.NewBuffer(nil)\n\n\tif _, err := stdcopy.StdCopy(buf, buf, attach.Reader); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tout := bytes.TrimSpace(buf.Bytes())\n\tif !bytes.Equal(out, []byte{}) {\n\t\tt.Fatalf(\"mount leaked: %s\", string(out))\n\t}\n}\n\nfunc TestContainerNetworkMountsNoChown(t *testing.T) {\n\t\/\/ chown only applies to Linux bind mounted volumes; must be same host to verify\n\tskip.If(t, testEnv.DaemonInfo.OSType != \"linux\" || !testEnv.IsLocalDaemon())\n\n\tdefer setupTest(t)()\n\n\tctx := context.Background()\n\n\ttmpDir := fs.NewDir(t, \"network-file-mounts\", fs.WithMode(0755), fs.WithFile(\"nwfile\", \"network file bind mount\", fs.WithMode(0644)))\n\tdefer tmpDir.Remove()\n\n\ttmpNWFileMount := tmpDir.Join(\"nwfile\")\n\n\tconfig := container.Config{\n\t\tImage: \"busybox\",\n\t}\n\thostConfig := container.HostConfig{\n\t\tMounts: []mount.Mount{\n\t\t\t{\n\t\t\t\tType: \"bind\",\n\t\t\t\tSource: tmpNWFileMount,\n\t\t\t\tTarget: \"\/etc\/resolv.conf\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: \"bind\",\n\t\t\t\tSource: tmpNWFileMount,\n\t\t\t\tTarget: \"\/etc\/hostname\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tType: \"bind\",\n\t\t\t\tSource: tmpNWFileMount,\n\t\t\t\tTarget: \"\/etc\/hosts\",\n\t\t\t},\n\t\t},\n\t}\n\n\tcli, err := client.NewEnvClient()\n\trequire.NoError(t, err)\n\tdefer cli.Close()\n\n\tctrCreate, err := cli.ContainerCreate(ctx, &config, &hostConfig, &network.NetworkingConfig{}, \"\")\n\trequire.NoError(t, err)\n\t\/\/ container will exit immediately because of no tty, but we only need the start sequence to test the condition\n\terr = cli.ContainerStart(ctx, ctrCreate.ID, types.ContainerStartOptions{})\n\trequire.NoError(t, err)\n\n\t\/\/ Check that host-located bind mount network file did not change ownership when the container was started\n\t\/\/ Note: If the user specifies a mountpath from the host, we should not be\n\t\/\/ attempting to chown files outside the daemon's metadata directory\n\t\/\/ (represented by `daemon.repository` at init time).\n\t\/\/ This forces users who want to use user namespaces to handle the\n\t\/\/ ownership needs of any external files mounted as network files\n\t\/\/ (\/etc\/resolv.conf, \/etc\/hosts, \/etc\/hostname) separately from the\n\t\/\/ daemon. In all other volume\/bind mount situations we have taken this\n\t\/\/ same line--we don't chown host file content.\n\t\/\/ See GitHub PR 34224 for details.\n\tstatT, err := system.Stat(tmpNWFileMount)\n\trequire.NoError(t, err)\n\tassert.Equal(t, uint32(0), statT.UID(), \"bind mounted network file should not change ownership from root\")\n}\n\nfunc TestMountDaemonRoot(t *testing.T) {\n\tt.Parallel()\n\n\tclient := request.NewAPIClient(t)\n\tctx := context.Background()\n\tinfo, err := client.Info(ctx)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor _, test := range []struct {\n\t\tdesc string\n\t\tpropagation mount.Propagation\n\t\texpected mount.Propagation\n\t}{\n\t\t{\n\t\t\tdesc: \"default\",\n\t\t\tpropagation: \"\",\n\t\t\texpected: mount.PropagationRSlave,\n\t\t},\n\t\t{\n\t\t\tdesc: \"private\",\n\t\t\tpropagation: mount.PropagationPrivate,\n\t\t},\n\t\t{\n\t\t\tdesc: \"rprivate\",\n\t\t\tpropagation: mount.PropagationRPrivate,\n\t\t},\n\t\t{\n\t\t\tdesc: \"slave\",\n\t\t\tpropagation: mount.PropagationSlave,\n\t\t},\n\t\t{\n\t\t\tdesc: \"rslave\",\n\t\t\tpropagation: mount.PropagationRSlave,\n\t\t\texpected: mount.PropagationRSlave,\n\t\t},\n\t\t{\n\t\t\tdesc: \"shared\",\n\t\t\tpropagation: mount.PropagationShared,\n\t\t},\n\t\t{\n\t\t\tdesc: \"rshared\",\n\t\t\tpropagation: mount.PropagationRShared,\n\t\t\texpected: mount.PropagationRShared,\n\t\t},\n\t} {\n\t\tt.Run(test.desc, func(t *testing.T) {\n\t\t\ttest := test\n\t\t\tt.Parallel()\n\n\t\t\tpropagationSpec := fmt.Sprintf(\":%s\", test.propagation)\n\t\t\tif test.propagation == \"\" {\n\t\t\t\tpropagationSpec = \"\"\n\t\t\t}\n\t\t\tbindSpecRoot := info.DockerRootDir + \":\" + \"\/foo\" + propagationSpec\n\t\t\tbindSpecSub := filepath.Join(info.DockerRootDir, \"containers\") + \":\/foo\" + propagationSpec\n\n\t\t\tfor name, hc := range map[string]*container.HostConfig{\n\t\t\t\t\"bind root\": {Binds: []string{bindSpecRoot}},\n\t\t\t\t\"bind subpath\": {Binds: []string{bindSpecSub}},\n\t\t\t\t\"mount root\": {\n\t\t\t\t\tMounts: []mount.Mount{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tType: mount.TypeBind,\n\t\t\t\t\t\t\tSource: info.DockerRootDir,\n\t\t\t\t\t\t\tTarget: \"\/foo\",\n\t\t\t\t\t\t\tBindOptions: &mount.BindOptions{Propagation: test.propagation},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"mount subpath\": {\n\t\t\t\t\tMounts: []mount.Mount{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tType: mount.TypeBind,\n\t\t\t\t\t\t\tSource: filepath.Join(info.DockerRootDir, \"containers\"),\n\t\t\t\t\t\t\tTarget: \"\/foo\",\n\t\t\t\t\t\t\tBindOptions: &mount.BindOptions{Propagation: test.propagation},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t} {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\thc := hc\n\t\t\t\t\tt.Parallel()\n\n\t\t\t\t\tc, err := client.ContainerCreate(ctx, &container.Config{\n\t\t\t\t\t\tImage: \"busybox\",\n\t\t\t\t\t\tCmd: []string{\"true\"},\n\t\t\t\t\t}, hc, nil, \"\")\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tif test.expected != \"\" {\n\t\t\t\t\t\t\tt.Fatal(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t\t\/\/ expected an error, so this is ok and should not continue\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tif test.expected == \"\" {\n\t\t\t\t\t\tt.Fatal(\"expected create to fail\")\n\t\t\t\t\t}\n\n\t\t\t\t\tdefer func() {\n\t\t\t\t\t\tif err := client.ContainerRemove(ctx, c.ID, types.ContainerRemoveOptions{Force: true}); err != nil {\n\t\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\n\t\t\t\t\tinspect, err := client.ContainerInspect(ctx, c.ID)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t\tif len(inspect.Mounts) != 1 {\n\t\t\t\t\t\tt.Fatalf(\"unexpected number of mounts: %+v\", inspect.Mounts)\n\t\t\t\t\t}\n\n\t\t\t\t\tm := inspect.Mounts[0]\n\t\t\t\t\tif m.Propagation != test.expected {\n\t\t\t\t\t\tt.Fatalf(\"got unexpected propagation mode, expected %q, got: %v\", test.expected, m.Propagation)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Markus Lindenberg, Stig Bakken\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"regexp\"\n\t\"syscall\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/common\/log\"\n)\n\nconst (\n\tnamespace = \"varnish_request\"\n)\n\ntype path_mappings struct {\n\tPattern *regexp.Regexp\n\tReplacement string\n}\n\nfunc main() {\n\tvar (\n\t\tlistenAddress = flag.String(\"http.port\", \":9151\", \"Host\/port for HTTP server\")\n\t\tmetricsPath = flag.String(\"http.metricsurl\", \"\/metrics\", \"Prometheus metrics path\")\n\t\thttpHost = flag.String(\"varnish.host\", \"\", \"Virtual host to look for in Varnish logs (defaults to all hosts)\")\n\t\tmappingsFile = flag.String(\"varnish.path-mappings\", \"\", \"Name of file with path mappings\")\n\t\tinstance = flag.String(\"varnish.instance\", \"\", \"Name of Varnish instance\")\n\t\tbefirstbyte = flag.Bool(\"varnish.firstbyte\", false, \"Also export metrics for backend time to first byte\")\n\t\tuser_query = flag.String(\"varnish.query\", \"\", \"VSL query override (defaults to one that is generated\")\n\t\tsizes = flag.Bool(\"varnish.sizes\", false, \"Also export metrics for response size\")\n\t)\n\tflag.Parse()\n\n\t\/\/ Listen to signals\n\tsigchan := make(chan os.Signal, 1)\n\tsignal.Notify(sigchan, syscall.SIGTERM, syscall.SIGINT)\n\n\t\/\/ Set up 'varnishncsa' pipe\n\tcmdName := \"varnishncsa\"\n\tvslQuery := buildVslQuery(*httpHost, *user_query)\n\tvarnishFormat := buildVarnishncsaFormat(*befirstbyte, *sizes, *user_query)\n\tcmdArgs := buildVarnishncsaArgs(vslQuery, *instance, varnishFormat)\n\tlog.Infof(\"Running command: %v %v\\n\", cmdName, cmdArgs)\n\tcmd := exec.Command(cmdName, cmdArgs...)\n\tcmdReader, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tscanner := bufio.NewScanner(cmdReader)\n\n\tpath_mappings, err := parseMappings(*mappingsFile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Setup metrics\n\tvarnishMessages := prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: namespace,\n\t\tName: \"exporter_log_messages\",\n\t\tHelp: \"Current total log messages received.\",\n\t})\n\terr = prometheus.Register(varnishMessages)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvarnishParseFailures := prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: namespace,\n\t\tName: \"exporter_log_parse_failure\",\n\t\tHelp: \"Number of errors while parsing log messages.\",\n\t})\n\terr = prometheus.Register(varnishParseFailures)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar msgs int64\n\n\tgo func() {\n\t\tfor scanner.Scan() {\n\t\t\tvarnishMessages.Inc()\n\t\t\tcontent := scanner.Text()\n\t\t\tmsgs++\n\t\t\tmetrics, labels, err := parseMessage(content, path_mappings)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, metric := range metrics {\n\t\t\t\tvar collector prometheus.Collector\n\t\t\t\tcollector, err = prometheus.RegisterOrGet(prometheus.NewHistogramVec(prometheus.HistogramOpts{\n\t\t\t\t\tNamespace: namespace,\n\t\t\t\t\tName: metric.Name,\n\t\t\t\t\tHelp: fmt.Sprintf(\"Varnish request log value for %s\", metric.Name),\n\t\t\t\t}, labels.Names))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tcollector.(*prometheus.HistogramVec).WithLabelValues(labels.Values...).Observe(metric.Value)\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ Setup HTTP server\n\thttp.Handle(*metricsPath, prometheus.Handler())\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Write([]byte(`<html>\n <head><title>Varnish Request Exporter<\/title><\/head>\n <body>\n <h1>Varnish Request Exporter<\/h1>\n <p><a href='` + *metricsPath + `'>Metrics<\/a><\/p>\n <\/body>\n <\/html>`))\n\t})\n\n\tgo func() {\n\t\tlog.Infof(\"Starting Server: %s\", *listenAddress)\n\t\tlog.Fatal(http.ListenAndServe(*listenAddress, nil))\n\t}()\n\n\tgo func() {\n\t\terr = cmd.Start()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\terr = cmd.Wait()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tlog.Infof(\"varnishncsa command exited\")\n\t\tlog.Infof(\"Messages received: %d\", msgs)\n\t\tos.Exit(0)\n\t}()\n\n\ts := <-sigchan\n\tlog.Infof(\"Received %v, terminating\", s)\n\tlog.Infof(\"Messages received: %d\", msgs)\n\n\tos.Exit(0)\n}\n\nfunc parseMappings(mappings_file string) (mappings []path_mappings, err error) {\n\tmappings = make([]path_mappings, 0)\n\tif mappings_file == \"\" {\n\t\treturn\n\t}\n\tin_file, err := os.Open(mappings_file)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tscanner := bufio.NewScanner(in_file)\n\tscanner.Split(bufio.ScanLines)\n\tcomment_re := regexp.MustCompile(\"(#.*|^\\\\s+|\\\\s+$)\")\n\tsplit_re := regexp.MustCompile(\"\\\\s+\")\n\tline_no := 0\n\tfor scanner.Scan() {\n\t\tline_no++\n\t\tline := comment_re.ReplaceAllString(scanner.Text(), \"\")\n\t\tif line == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tparts := split_re.Split(line, 2)\n\t\tswitch len(parts) {\n\t\tcase 1:\n\t\t\tlog.Debugf(\"mapping strip: %s\", parts[0])\n\t\t\tmappings = append(mappings, path_mappings{regexp.MustCompile(parts[0]), \"\"})\n\t\tcase 2:\n\t\t\tlog.Debugf(\"mapping replace: %s => %s\", parts[0], parts[1])\n\t\t\tmappings = append(mappings, path_mappings{regexp.MustCompile(parts[0]), parts[1]})\n\t\t}\n\t}\n\tin_file.Close()\n\treturn\n}\n\nfunc buildVslQuery(httpHost string, user_query string) (query string) {\n\tquery = user_query\n\tif httpHost != \"\" {\n\t\tif query != \"\" {\n\t\t\tquery += \" and \"\n\t\t}\n\t\tquery += \"ReqHeader:host eq \\\"\" + httpHost + \"\\\"\"\n\t}\n\treturn\n}\n\nfunc buildVarnishncsaFormat(befirstbyte bool, sizes bool, user_format string) (format string) {\n\tif user_format != \"\" {\n\t\tformat = user_format + \" \"\n\t}\n\tformat += \"method=\\\"%m\\\" status=%s path=\\\"%U\\\" cache=\\\"%{Varnish:hitmiss}x\\\" host=\\\"%{host}i\\\" time:%D\"\n\tif befirstbyte {\n\t\tformat += \" time_firstbyte:%{Varnish:time_firstbyte}x\"\n\t}\n\tif sizes {\n\t\tformat += \" respsize:%b\"\n\t}\n\treturn\n}\n\nfunc buildVarnishncsaArgs(vsl_query string, instance string, format string) (args []string) {\n\targs = make([]string, 0, 6)\n\targs = append(args, \"-F\")\n\targs = append(args, format)\n\tif vsl_query != \"\" {\n\t\targs = append(args, \"-q\")\n\t\targs = append(args, vsl_query)\n\t}\n\tif instance != \"\" {\n\t\targs = append(args, \"-n\", instance)\n\t}\n\treturn\n}\n<commit_msg>Fix bug with `--varnish.query`<commit_after>\/\/ Copyright 2016 Markus Lindenberg, Stig Bakken\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"regexp\"\n\t\"syscall\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/common\/log\"\n)\n\nconst (\n\tnamespace = \"varnish_request\"\n)\n\ntype path_mappings struct {\n\tPattern *regexp.Regexp\n\tReplacement string\n}\n\nfunc main() {\n\tvar (\n\t\tlistenAddress = flag.String(\"http.port\", \":9151\", \"Host\/port for HTTP server\")\n\t\tmetricsPath = flag.String(\"http.metricsurl\", \"\/metrics\", \"Prometheus metrics path\")\n\t\thttpHost = flag.String(\"varnish.host\", \"\", \"Virtual host to look for in Varnish logs (defaults to all hosts)\")\n\t\tmappingsFile = flag.String(\"varnish.path-mappings\", \"\", \"Name of file with path mappings\")\n\t\tinstance = flag.String(\"varnish.instance\", \"\", \"Name of Varnish instance\")\n\t\tbefirstbyte = flag.Bool(\"varnish.firstbyte\", false, \"Also export metrics for backend time to first byte\")\n\t\tuser_query = flag.String(\"varnish.query\", \"\", \"VSL query override (defaults to one that is generated\")\n\t\tsizes = flag.Bool(\"varnish.sizes\", false, \"Also export metrics for response size\")\n\t)\n\tflag.Parse()\n\n\t\/\/ Listen to signals\n\tsigchan := make(chan os.Signal, 1)\n\tsignal.Notify(sigchan, syscall.SIGTERM, syscall.SIGINT)\n\n\t\/\/ Set up 'varnishncsa' pipe\n\tcmdName := \"varnishncsa\"\n\tvslQuery := buildVslQuery(*httpHost, *user_query)\n\tvarnishFormat := buildVarnishncsaFormat(*befirstbyte, *sizes)\n\tcmdArgs := buildVarnishncsaArgs(vslQuery, *instance, varnishFormat)\n\tlog.Infof(\"Running command: %v %v\\n\", cmdName, cmdArgs)\n\tcmd := exec.Command(cmdName, cmdArgs...)\n\tcmdReader, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tscanner := bufio.NewScanner(cmdReader)\n\n\tpath_mappings, err := parseMappings(*mappingsFile)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Setup metrics\n\tvarnishMessages := prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: namespace,\n\t\tName: \"exporter_log_messages\",\n\t\tHelp: \"Current total log messages received.\",\n\t})\n\terr = prometheus.Register(varnishMessages)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvarnishParseFailures := prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: namespace,\n\t\tName: \"exporter_log_parse_failure\",\n\t\tHelp: \"Number of errors while parsing log messages.\",\n\t})\n\terr = prometheus.Register(varnishParseFailures)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar msgs int64\n\n\tgo func() {\n\t\tfor scanner.Scan() {\n\t\t\tvarnishMessages.Inc()\n\t\t\tcontent := scanner.Text()\n\t\t\tmsgs++\n\t\t\tmetrics, labels, err := parseMessage(content, path_mappings)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, metric := range metrics {\n\t\t\t\tvar collector prometheus.Collector\n\t\t\t\tcollector, err = prometheus.RegisterOrGet(prometheus.NewHistogramVec(prometheus.HistogramOpts{\n\t\t\t\t\tNamespace: namespace,\n\t\t\t\t\tName: metric.Name,\n\t\t\t\t\tHelp: fmt.Sprintf(\"Varnish request log value for %s\", metric.Name),\n\t\t\t\t}, labels.Names))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tcollector.(*prometheus.HistogramVec).WithLabelValues(labels.Values...).Observe(metric.Value)\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ Setup HTTP server\n\thttp.Handle(*metricsPath, prometheus.Handler())\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Write([]byte(`<html>\n <head><title>Varnish Request Exporter<\/title><\/head>\n <body>\n <h1>Varnish Request Exporter<\/h1>\n <p><a href='` + *metricsPath + `'>Metrics<\/a><\/p>\n <\/body>\n <\/html>`))\n\t})\n\n\tgo func() {\n\t\tlog.Infof(\"Starting Server: %s\", *listenAddress)\n\t\tlog.Fatal(http.ListenAndServe(*listenAddress, nil))\n\t}()\n\n\tgo func() {\n\t\terr = cmd.Start()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\terr = cmd.Wait()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tlog.Infof(\"varnishncsa command exited\")\n\t\tlog.Infof(\"Messages received: %d\", msgs)\n\t\tos.Exit(0)\n\t}()\n\n\ts := <-sigchan\n\tlog.Infof(\"Received %v, terminating\", s)\n\tlog.Infof(\"Messages received: %d\", msgs)\n\n\tos.Exit(0)\n}\n\nfunc parseMappings(mappings_file string) (mappings []path_mappings, err error) {\n\tmappings = make([]path_mappings, 0)\n\tif mappings_file == \"\" {\n\t\treturn\n\t}\n\tin_file, err := os.Open(mappings_file)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tscanner := bufio.NewScanner(in_file)\n\tscanner.Split(bufio.ScanLines)\n\tcomment_re := regexp.MustCompile(\"(#.*|^\\\\s+|\\\\s+$)\")\n\tsplit_re := regexp.MustCompile(\"\\\\s+\")\n\tline_no := 0\n\tfor scanner.Scan() {\n\t\tline_no++\n\t\tline := comment_re.ReplaceAllString(scanner.Text(), \"\")\n\t\tif line == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tparts := split_re.Split(line, 2)\n\t\tswitch len(parts) {\n\t\tcase 1:\n\t\t\tlog.Debugf(\"mapping strip: %s\", parts[0])\n\t\t\tmappings = append(mappings, path_mappings{regexp.MustCompile(parts[0]), \"\"})\n\t\tcase 2:\n\t\t\tlog.Debugf(\"mapping replace: %s => %s\", parts[0], parts[1])\n\t\t\tmappings = append(mappings, path_mappings{regexp.MustCompile(parts[0]), parts[1]})\n\t\t}\n\t}\n\tin_file.Close()\n\treturn\n}\n\nfunc buildVslQuery(httpHost string, user_query string) (query string) {\n\tquery = user_query\n\tif httpHost != \"\" {\n\t\tif query != \"\" {\n\t\t\tquery += \" and \"\n\t\t}\n\t\tquery += \"ReqHeader:host eq \\\"\" + httpHost + \"\\\"\"\n\t}\n\treturn\n}\n\nfunc buildVarnishncsaFormat(befirstbyte bool, sizes bool) (format string) {\n\tformat += \"method=\\\"%m\\\" status=%s path=\\\"%U\\\" cache=\\\"%{Varnish:hitmiss}x\\\" host=\\\"%{host}i\\\" time:%D\"\n\tif befirstbyte {\n\t\tformat += \" time_firstbyte:%{Varnish:time_firstbyte}x\"\n\t}\n\tif sizes {\n\t\tformat += \" respsize:%b\"\n\t}\n\treturn\n}\n\nfunc buildVarnishncsaArgs(vsl_query string, instance string, format string) (args []string) {\n\targs = make([]string, 0, 6)\n\targs = append(args, \"-F\")\n\targs = append(args, format)\n\tif vsl_query != \"\" {\n\t\targs = append(args, \"-q\")\n\t\targs = append(args, vsl_query)\n\t}\n\tif instance != \"\" {\n\t\targs = append(args, \"-n\", instance)\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Signed-off-by: Alborz A. Amir-Khalili <alborza@bisicl.ad.ece.ubc.ca><commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ +build integration\n\n\/*\nCopyright 2018 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage containeranalysis\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\tkritisv1beta1 \"github.com\/grafeas\/kritis\/pkg\/kritis\/apis\/kritis\/v1beta1\"\n\t\"github.com\/grafeas\/kritis\/pkg\/kritis\/metadata\"\n\t\"github.com\/grafeas\/kritis\/pkg\/kritis\/secrets\"\n\t\"github.com\/grafeas\/kritis\/pkg\/kritis\/testutil\"\n\t\"github.com\/grafeas\/kritis\/pkg\/kritis\/util\"\n\t\"google.golang.org\/genproto\/googleapis\/devtools\/containeranalysis\/v1beta1\/grafeas\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\nvar (\n\tIntTestNoteName = \"test-aa-note\"\n\tIntProject = \"kritis-int-test\"\n)\n\nfunc GetAA() *kritisv1beta1.AttestationAuthority {\n\taa := &kritisv1beta1.AttestationAuthority{\n\t\tSpec: kritisv1beta1.AttestationAuthoritySpec{\n\t\t\tNoteReference: fmt.Sprintf(\"projects\/%s\/notes\/%s\", IntProject, IntTestNoteName),\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: IntTestNoteName,\n\t\t},\n\t}\n\treturn aa\n}\n\nfunc TestGetVulnerabilities(t *testing.T) {\n\tclient, err := New()\n\tif err != nil {\n\t\tt.Fatalf(\"Could not initialize the client %s\", err)\n\t}\n\tvuln, err := client.Vulnerabilities(\"gcr.io\/kritis-int-test\/java-with-vulnz@sha256:358687cfd3ec8e1dfeb2bf51b5110e4e16f6df71f64fba01986f720b2fcba68a\")\n\tif err != nil {\n\t\tt.Fatalf(\"Found err %s\", err)\n\t}\n\tif vuln == nil {\n\t\tt.Fatalf(\"Expected some vulnerabilities. Nil found\")\n\t}\n}\n\nfunc TestCreateAttestationNoteAndOccurrence(t *testing.T) {\n\tclient, err := New()\n\tif err != nil {\n\t\tt.Fatalf(\"Could not initialize the client %s\", err)\n\t}\n\taa := GetAA()\n\t_, err = client.CreateAttestationNote(aa)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error while creating Note %v\", err)\n\t}\n\tdefer client.DeleteAttestationNote(aa)\n\n\tnote, err := client.AttestationNote(aa)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected no error while getting attestation note %v\", err)\n\t}\n\n\texpectedNoteName := fmt.Sprintf(\"projects\/%s\/notes\/%s\", IntProject, IntTestNoteName)\n\tif note.Name != expectedNoteName {\n\t\tt.Fatalf(\"Expected %s.\\n Got %s\", expectedNoteName, note.Name)\n\t}\n\n\tactualHint := note.GetAttestationAuthority().Hint.GetHumanReadableName()\n\tif actualHint != IntTestNoteName {\n\t\tt.Fatalf(\"Expected %s.\\n Got %s\", expectedNoteName, actualHint)\n\t}\n\n\t\/\/ Test Create Attestation Occurence\n\tpub, priv := testutil.CreateKeyPair(t, \"test\")\n\tpgpKey, err := secrets.NewPgpKey(priv, \"\", pub)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error while creating PGP key %v\", err)\n\t}\n\tsecret := &secrets.PGPSigningSecret{\n\t\tPgpKey: pgpKey,\n\t\tSecretName: \"test\",\n\t}\n\n\tproj, _, err := metadata.ParseNoteReference(aa.Spec.NoteReference)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to extract project ID %v\", err)\n\t}\n\tocc, err := client.CreateAttestationOccurrence(note, testutil.IntTestImage, secret, proj)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error while creating Occurence %v\", err)\n\t}\n\texpectedPgpKeyID := pgpKey.Fingerprint()\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error while extracting PGP key id %v\", err)\n\t}\n\tpgpKeyID := occ.GetAttestation().GetAttestation().GetPgpSignedAttestation().GetPgpKeyId()\n\tif pgpKeyID != expectedPgpKeyID {\n\t\tt.Errorf(\"Expected PGP key id: %q, got %q\", expectedPgpKeyID, pgpKeyID)\n\t}\n\tdefer client.DeleteOccurrence(occ.GetName())\n\n\t\/\/ Keep trying to list attestation occurrences until we time out.\n\t\/\/ Because the staleness bound is on the order of seconds, no need to try faster than once a second.\n\ttimeout := time.After(20 * time.Second)\n\ttick := time.Tick(1 * time.Second)\n\tfor {\n\t\tselect {\n\t\t\/\/ Got a timeout! fail with a timeout error\n\t\tcase <-timeout:\n\t\t\tt.Fatal(\"Should have created at least 1 occurrence\")\n\n\t\t\t\/\/ Got a tick, we should check note occurrences\n\t\tcase <-tick:\n\t\t\tif occurrences, err := client.Attestations(testutil.IntTestImage, aa); err != nil {\n\t\t\t\tt.Fatalf(\"Failed to retrieve attestations: %v\", err)\n\t\t\t} else if len(occurrences) > 0 {\n\t\t\t\t\/\/ Successfully retrieved attestations, exit the loop and the test.\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestGetMultiplePagesVulnerabilities(t *testing.T) {\n\tclient, err := New()\n\tif err != nil {\n\t\tt.Fatalf(\"Could not initialize the client %s\", err)\n\t}\n\n\t\/\/ Set PageSize to 300\n\tcreateListOccurrencesRequest = createListOccurrencesRequestTest\n\n\tvuln, err := client.Vulnerabilities(\"gcr.io\/kritis-int-test\/java-with-vulnz@sha256:358687cfd3ec8e1dfeb2bf51b5110e4e16f6df71f64fba01986f720b2fcba68a\")\n\tif err != nil {\n\t\tt.Fatalf(\"Found err %s\", err)\n\t}\n\n\tif len(vuln) <= 900 {\n\t\tt.Fatalf(\"Pagination error: expected at least 900 results on image 'gcr.io\/kritis-int-test\/java-with-vulnz'. Received %d.\", len(vuln))\n\t}\n}\n\nfunc createListOccurrencesRequestTest(containerImage, kind string) *grafeas.ListOccurrencesRequest {\n\treturn &grafeas.ListOccurrencesRequest{\n\t\tFilter: fmt.Sprintf(\"resourceUrl=%q AND kind=%q\", util.GetResourceURL(containerImage), kind),\n\t\tParent: fmt.Sprintf(\"projects\/%s\", getProjectFromContainerImage(containerImage)),\n\t\tPageSize: int32(100),\n\t}\n}\n<commit_msg>Correct pagesize comment<commit_after>\/\/ +build integration\n\n\/*\nCopyright 2018 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage containeranalysis\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\tkritisv1beta1 \"github.com\/grafeas\/kritis\/pkg\/kritis\/apis\/kritis\/v1beta1\"\n\t\"github.com\/grafeas\/kritis\/pkg\/kritis\/metadata\"\n\t\"github.com\/grafeas\/kritis\/pkg\/kritis\/secrets\"\n\t\"github.com\/grafeas\/kritis\/pkg\/kritis\/testutil\"\n\t\"github.com\/grafeas\/kritis\/pkg\/kritis\/util\"\n\t\"google.golang.org\/genproto\/googleapis\/devtools\/containeranalysis\/v1beta1\/grafeas\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\nvar (\n\tIntTestNoteName = \"test-aa-note\"\n\tIntProject = \"kritis-int-test\"\n)\n\nfunc GetAA() *kritisv1beta1.AttestationAuthority {\n\taa := &kritisv1beta1.AttestationAuthority{\n\t\tSpec: kritisv1beta1.AttestationAuthoritySpec{\n\t\t\tNoteReference: fmt.Sprintf(\"projects\/%s\/notes\/%s\", IntProject, IntTestNoteName),\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: IntTestNoteName,\n\t\t},\n\t}\n\treturn aa\n}\n\nfunc TestGetVulnerabilities(t *testing.T) {\n\tclient, err := New()\n\tif err != nil {\n\t\tt.Fatalf(\"Could not initialize the client %s\", err)\n\t}\n\tvuln, err := client.Vulnerabilities(\"gcr.io\/kritis-int-test\/java-with-vulnz@sha256:358687cfd3ec8e1dfeb2bf51b5110e4e16f6df71f64fba01986f720b2fcba68a\")\n\tif err != nil {\n\t\tt.Fatalf(\"Found err %s\", err)\n\t}\n\tif vuln == nil {\n\t\tt.Fatalf(\"Expected some vulnerabilities. Nil found\")\n\t}\n}\n\nfunc TestCreateAttestationNoteAndOccurrence(t *testing.T) {\n\tclient, err := New()\n\tif err != nil {\n\t\tt.Fatalf(\"Could not initialize the client %s\", err)\n\t}\n\taa := GetAA()\n\t_, err = client.CreateAttestationNote(aa)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error while creating Note %v\", err)\n\t}\n\tdefer client.DeleteAttestationNote(aa)\n\n\tnote, err := client.AttestationNote(aa)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected no error while getting attestation note %v\", err)\n\t}\n\n\texpectedNoteName := fmt.Sprintf(\"projects\/%s\/notes\/%s\", IntProject, IntTestNoteName)\n\tif note.Name != expectedNoteName {\n\t\tt.Fatalf(\"Expected %s.\\n Got %s\", expectedNoteName, note.Name)\n\t}\n\n\tactualHint := note.GetAttestationAuthority().Hint.GetHumanReadableName()\n\tif actualHint != IntTestNoteName {\n\t\tt.Fatalf(\"Expected %s.\\n Got %s\", expectedNoteName, actualHint)\n\t}\n\n\t\/\/ Test Create Attestation Occurence\n\tpub, priv := testutil.CreateKeyPair(t, \"test\")\n\tpgpKey, err := secrets.NewPgpKey(priv, \"\", pub)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error while creating PGP key %v\", err)\n\t}\n\tsecret := &secrets.PGPSigningSecret{\n\t\tPgpKey: pgpKey,\n\t\tSecretName: \"test\",\n\t}\n\n\tproj, _, err := metadata.ParseNoteReference(aa.Spec.NoteReference)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to extract project ID %v\", err)\n\t}\n\tocc, err := client.CreateAttestationOccurrence(note, testutil.IntTestImage, secret, proj)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error while creating Occurence %v\", err)\n\t}\n\texpectedPgpKeyID := pgpKey.Fingerprint()\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected error while extracting PGP key id %v\", err)\n\t}\n\tpgpKeyID := occ.GetAttestation().GetAttestation().GetPgpSignedAttestation().GetPgpKeyId()\n\tif pgpKeyID != expectedPgpKeyID {\n\t\tt.Errorf(\"Expected PGP key id: %q, got %q\", expectedPgpKeyID, pgpKeyID)\n\t}\n\tdefer client.DeleteOccurrence(occ.GetName())\n\n\t\/\/ Keep trying to list attestation occurrences until we time out.\n\t\/\/ Because the staleness bound is on the order of seconds, no need to try faster than once a second.\n\ttimeout := time.After(20 * time.Second)\n\ttick := time.Tick(1 * time.Second)\n\tfor {\n\t\tselect {\n\t\t\/\/ Got a timeout! fail with a timeout error\n\t\tcase <-timeout:\n\t\t\tt.Fatal(\"Should have created at least 1 occurrence\")\n\n\t\t\t\/\/ Got a tick, we should check note occurrences\n\t\tcase <-tick:\n\t\t\tif occurrences, err := client.Attestations(testutil.IntTestImage, aa); err != nil {\n\t\t\t\tt.Fatalf(\"Failed to retrieve attestations: %v\", err)\n\t\t\t} else if len(occurrences) > 0 {\n\t\t\t\t\/\/ Successfully retrieved attestations, exit the loop and the test.\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestGetMultiplePagesVulnerabilities(t *testing.T) {\n\tclient, err := New()\n\tif err != nil {\n\t\tt.Fatalf(\"Could not initialize the client %s\", err)\n\t}\n\n\t\/\/ Set PageSize to 100\n\tcreateListOccurrencesRequest = createListOccurrencesRequestTest\n\n\tvuln, err := client.Vulnerabilities(\"gcr.io\/kritis-int-test\/java-with-vulnz@sha256:358687cfd3ec8e1dfeb2bf51b5110e4e16f6df71f64fba01986f720b2fcba68a\")\n\tif err != nil {\n\t\tt.Fatalf(\"Found err %s\", err)\n\t}\n\n\tif len(vuln) <= 900 {\n\t\tt.Fatalf(\"Pagination error: expected at least 900 results on image 'gcr.io\/kritis-int-test\/java-with-vulnz'. Received %d.\", len(vuln))\n\t}\n}\n\nfunc createListOccurrencesRequestTest(containerImage, kind string) *grafeas.ListOccurrencesRequest {\n\treturn &grafeas.ListOccurrencesRequest{\n\t\tFilter: fmt.Sprintf(\"resourceUrl=%q AND kind=%q\", util.GetResourceURL(containerImage), kind),\n\t\tParent: fmt.Sprintf(\"projects\/%s\", getProjectFromContainerImage(containerImage)),\n\t\tPageSize: int32(100),\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 Documize Inc. <legal@documize.com>. All rights reserved.\n\/\/\n\/\/ This software (Documize Community Edition) is licensed under\n\/\/ GNU AGPL v3 http:\/\/www.gnu.org\/licenses\/agpl-3.0.en.html\n\/\/\n\/\/ You can operate outside the AGPL restrictions by purchasing\n\/\/ Documize Enterprise Edition and obtaining a commercial license\n\/\/ by contacting <sales@documize.com>.\n\/\/\n\/\/ https:\/\/documize.com\n\npackage mysql\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/documize\/community\/core\/env\"\n\t\"github.com\/documize\/community\/domain\"\n\t\"github.com\/documize\/community\/domain\/store\/mysql\"\n\t\"github.com\/documize\/community\/model\/group\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ Scope provides data access to MySQL.\ntype Scope struct {\n\tRuntime *env.Runtime\n}\n\n\/\/ Add inserts new user group into store.\nfunc (s Scope) Add(ctx domain.RequestContext, g group.Group) (err error) {\n\tg.Created = time.Now().UTC()\n\tg.Revised = time.Now().UTC()\n\n\t_, err = ctx.Transaction.Exec(\"INSERT INTO role (refid, orgid, role, purpose, created, revised) VALUES (?, ?, ?, ?, ?, ?)\",\n\t\tg.RefID, g.OrgID, g.Name, g.Purpose, g.Created, g.Revised)\n\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"insert group\")\n\t}\n\n\treturn\n}\n\n\/\/ Get returns requested group.\nfunc (s Scope) Get(ctx domain.RequestContext, refID string) (g group.Group, err error) {\n\terr = s.Runtime.Db.Get(&g,\n\t\t`SELECT id, refid, orgid, role as name, purpose, created, revised FROM role WHERE orgid=? AND refid=?`,\n\t\tctx.OrgID, refID)\n\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"select group\")\n\t}\n\n\treturn\n}\n\n\/\/ GetAll returns all user groups for current orgID.\nfunc (s Scope) GetAll(ctx domain.RequestContext) (groups []group.Group, err error) {\n\terr = s.Runtime.Db.Select(&groups,\n\t\t`SELECT a.id, a.refid, a.orgid, a.role as name, a.purpose, a.created, a.revised, COUNT(b.roleid) AS members\n\t\tFROM role a\n\t\tLEFT JOIN rolemember b ON a.refid=b.roleid\n\t\tWHERE a.orgid=?\n\t\tGROUP BY a.id, a.refid, a.orgid, a.role, a.purpose, a.created, a.revised\n\t\tORDER BY a.role`,\n\t\tctx.OrgID)\n\n\tif err == sql.ErrNoRows || len(groups) == 0 {\n\t\terr = nil\n\t\tgroups = []group.Group{}\n\t}\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"select groups\")\n\t}\n\n\treturn\n}\n\n\/\/ Update group name and description.\nfunc (s Scope) Update(ctx domain.RequestContext, g group.Group) (err error) {\n\tg.Revised = time.Now().UTC()\n\n\t_, err = ctx.Transaction.Exec(\"UPDATE role SET role=?, purpose=?, revised=? WHERE orgid=? AND refid=?\",\n\t\tg.Name, g.Purpose, g.Revised, ctx.OrgID, g.RefID)\n\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"update group\")\n\t}\n\n\treturn\n}\n\n\/\/ Delete removes group from store.\nfunc (s Scope) Delete(ctx domain.RequestContext, refID string) (rows int64, err error) {\n\tb := mysql.BaseQuery{}\n\tb.DeleteConstrained(ctx.Transaction, \"role\", ctx.OrgID, refID)\n\treturn b.DeleteWhere(ctx.Transaction, fmt.Sprintf(\"DELETE FROM rolemember WHERE orgid=\\\"%s\\\" AND roleid=\\\"%s\\\"\", ctx.OrgID, refID))\n}\n\n\/\/ GetGroupMembers returns all user associated with given group.\nfunc (s Scope) GetGroupMembers(ctx domain.RequestContext, groupID string) (members []group.Member, err error) {\n\terr = s.Runtime.Db.Select(&members,\n\t\t`SELECT a.id, a.orgid, a.roleid, a.userid, \n\t\tIFNULL(b.firstname, '') as firstname, IFNULL(b.lastname, '') as lastname\n\t\tFROM rolemember a\n\t\tLEFT JOIN user b ON b.refid=a.userid\n\t\tWHERE a.orgid=? AND a.roleid=?\n\t\tORDER BY b.firstname, b.lastname`,\n\t\tctx.OrgID, groupID)\n\n\tif err == sql.ErrNoRows || len(members) == 0 {\n\t\terr = nil\n\t\tmembers = []group.Member{}\n\t}\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"select members\")\n\t}\n\n\treturn\n}\n\n\/\/ JoinGroup adds user to group.\nfunc (s Scope) JoinGroup(ctx domain.RequestContext, groupID, userID string) (err error) {\n\t_, err = ctx.Transaction.Exec(\"INSERT INTO rolemember (orgid, roleid, userid) VALUES (?, ?, ?)\", ctx.OrgID, groupID, userID)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"insert group member\")\n\t}\n\n\treturn\n}\n\n\/\/ LeaveGroup removes user from group.\nfunc (s Scope) LeaveGroup(ctx domain.RequestContext, groupID, userID string) (err error) {\n\tb := mysql.BaseQuery{}\n\t_, err = b.DeleteWhere(ctx.Transaction, fmt.Sprintf(\"DELETE FROM rolemember WHERE orgid=\\\"%s\\\" AND roleid=\\\"%s\\\" AND userid=\\\"%s\\\"\", ctx.OrgID, groupID, userID))\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"clear group member\")\n\t}\n\n\treturn\n}\n\n\/\/ GetMembers returns members for every group.\n\/\/ Useful when you need to bulk fetch membership records\n\/\/ for subsequent processing.\nfunc (s Scope) GetMembers(ctx domain.RequestContext) (r []group.Record, err error) {\n\terr = s.Runtime.Db.Select(&r,\n\t\t`SELECT a.id, a.orgid, a.roleid, a.userid, b.role as name, b.purpose\n\t\tFROM rolemember a, role b\n\t\tWHERE a.orgid=? AND a.roleid=b.refid \n\t\tORDER BY a.userid`,\n\t\tctx.OrgID)\n\n\tif err == sql.ErrNoRows || len(r) == 0 {\n\t\terr = nil\n\t\tr = []group.Record{}\n\t}\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"select group members\")\n\t}\n\n\treturn\n}\n<commit_msg>Fix silent error drop<commit_after>\/\/ Copyright 2018 Documize Inc. <legal@documize.com>. All rights reserved.\n\/\/\n\/\/ This software (Documize Community Edition) is licensed under\n\/\/ GNU AGPL v3 http:\/\/www.gnu.org\/licenses\/agpl-3.0.en.html\n\/\/\n\/\/ You can operate outside the AGPL restrictions by purchasing\n\/\/ Documize Enterprise Edition and obtaining a commercial license\n\/\/ by contacting <sales@documize.com>.\n\/\/\n\/\/ https:\/\/documize.com\n\npackage mysql\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/documize\/community\/core\/env\"\n\t\"github.com\/documize\/community\/domain\"\n\t\"github.com\/documize\/community\/domain\/store\/mysql\"\n\t\"github.com\/documize\/community\/model\/group\"\n\t\"github.com\/pkg\/errors\"\n)\n\n\/\/ Scope provides data access to MySQL.\ntype Scope struct {\n\tRuntime *env.Runtime\n}\n\n\/\/ Add inserts new user group into store.\nfunc (s Scope) Add(ctx domain.RequestContext, g group.Group) (err error) {\n\tg.Created = time.Now().UTC()\n\tg.Revised = time.Now().UTC()\n\n\t_, err = ctx.Transaction.Exec(\"INSERT INTO role (refid, orgid, role, purpose, created, revised) VALUES (?, ?, ?, ?, ?, ?)\",\n\t\tg.RefID, g.OrgID, g.Name, g.Purpose, g.Created, g.Revised)\n\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"insert group\")\n\t}\n\n\treturn\n}\n\n\/\/ Get returns requested group.\nfunc (s Scope) Get(ctx domain.RequestContext, refID string) (g group.Group, err error) {\n\terr = s.Runtime.Db.Get(&g,\n\t\t`SELECT id, refid, orgid, role as name, purpose, created, revised FROM role WHERE orgid=? AND refid=?`,\n\t\tctx.OrgID, refID)\n\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"select group\")\n\t}\n\n\treturn\n}\n\n\/\/ GetAll returns all user groups for current orgID.\nfunc (s Scope) GetAll(ctx domain.RequestContext) (groups []group.Group, err error) {\n\tgroups = []group.Group{}\n\n\terr = s.Runtime.Db.Select(&groups,\n\t\t`SELECT a.id, a.refid, a.orgid, a.role as name, a.purpose, a.created, a.revised, COUNT(b.roleid) AS members\n\t\tFROM role a\n\t\tLEFT JOIN rolemember b ON a.refid=b.roleid\n\t\tWHERE a.orgid=?\n\t\tGROUP BY a.id, a.refid, a.orgid, a.role, a.purpose, a.created, a.revised\n\t\tORDER BY a.role`,\n\t\tctx.OrgID)\n\n\tif err == sql.ErrNoRows {\n\t\terr = nil\n\t}\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"select groups\")\n\t}\n\n\treturn\n}\n\n\/\/ Update group name and description.\nfunc (s Scope) Update(ctx domain.RequestContext, g group.Group) (err error) {\n\tg.Revised = time.Now().UTC()\n\n\t_, err = ctx.Transaction.Exec(\"UPDATE role SET role=?, purpose=?, revised=? WHERE orgid=? AND refid=?\",\n\t\tg.Name, g.Purpose, g.Revised, ctx.OrgID, g.RefID)\n\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"update group\")\n\t}\n\n\treturn\n}\n\n\/\/ Delete removes group from store.\nfunc (s Scope) Delete(ctx domain.RequestContext, refID string) (rows int64, err error) {\n\tb := mysql.BaseQuery{}\n\tb.DeleteConstrained(ctx.Transaction, \"role\", ctx.OrgID, refID)\n\treturn b.DeleteWhere(ctx.Transaction, fmt.Sprintf(\"DELETE FROM rolemember WHERE orgid=\\\"%s\\\" AND roleid=\\\"%s\\\"\", ctx.OrgID, refID))\n}\n\n\/\/ GetGroupMembers returns all user associated with given group.\nfunc (s Scope) GetGroupMembers(ctx domain.RequestContext, groupID string) (members []group.Member, err error) {\n\tmembers = []group.Member{}\n\n\terr = s.Runtime.Db.Select(&members,\n\t\t`SELECT a.id, a.orgid, a.roleid, a.userid,\n\t\tIFNULL(b.firstname, '') as firstname, IFNULL(b.lastname, '') as lastname\n\t\tFROM rolemember a\n\t\tLEFT JOIN user b ON b.refid=a.userid\n\t\tWHERE a.orgid=? AND a.roleid=?\n\t\tORDER BY b.firstname, b.lastname`,\n\t\tctx.OrgID, groupID)\n\n\tif err == sql.ErrNoRows {\n\t\terr = nil\n\t}\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"select members\")\n\t}\n\n\treturn\n}\n\n\/\/ JoinGroup adds user to group.\nfunc (s Scope) JoinGroup(ctx domain.RequestContext, groupID, userID string) (err error) {\n\t_, err = ctx.Transaction.Exec(\"INSERT INTO rolemember (orgid, roleid, userid) VALUES (?, ?, ?)\", ctx.OrgID, groupID, userID)\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"insert group member\")\n\t}\n\n\treturn\n}\n\n\/\/ LeaveGroup removes user from group.\nfunc (s Scope) LeaveGroup(ctx domain.RequestContext, groupID, userID string) (err error) {\n\tb := mysql.BaseQuery{}\n\t_, err = b.DeleteWhere(ctx.Transaction, fmt.Sprintf(\"DELETE FROM rolemember WHERE orgid=\\\"%s\\\" AND roleid=\\\"%s\\\" AND userid=\\\"%s\\\"\", ctx.OrgID, groupID, userID))\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"clear group member\")\n\t}\n\n\treturn\n}\n\n\/\/ GetMembers returns members for every group.\n\/\/ Useful when you need to bulk fetch membership records\n\/\/ for subsequent processing.\nfunc (s Scope) GetMembers(ctx domain.RequestContext) (r []group.Record, err error) {\n\tr = []group.Record{}\n\n\terr = s.Runtime.Db.Select(&r,\n\t\t`SELECT a.id, a.orgid, a.roleid, a.userid, b.role as name, b.purpose\n\t\tFROM rolemember a, role b\n\t\tWHERE a.orgid=? AND a.roleid=b.refid\n\t\tORDER BY a.userid`,\n\t\tctx.OrgID)\n\n\tif err == sql.ErrNoRows {\n\t\terr = nil\n\t}\n\tif err != nil {\n\t\terr = errors.Wrap(err, \"select group members\")\n\t}\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build java android\n\npackage syncbase\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"unsafe\"\n\n\t\"v.io\/v23\"\n\t\"v.io\/x\/ref\/services\/syncbase\/server\"\n\n\tjrpc \"v.io\/x\/jni\/impl\/google\/rpc\"\n\tjutil \"v.io\/x\/jni\/util\"\n\tjcontext \"v.io\/x\/jni\/v23\/context\"\n\tjaccess \"v.io\/x\/jni\/v23\/security\/access\"\n)\n\n\/\/ #include \"jni.h\"\nimport \"C\"\n\nvar (\n\tpermissionsSign = jutil.ClassSign(\"io.v.v23.security.access.Permissions\")\n\tcontextSign = jutil.ClassSign(\"io.v.v23.context.VContext\")\n\tstorageEngineSign = jutil.ClassSign(\"io.v.impl.google.services.syncbase.SyncbaseServer$StorageEngine\")\n\tserverSign = jutil.ClassSign(\"io.v.v23.rpc.Server\")\n\n\tjVRuntimeImplClass jutil.Class\n)\n\n\/\/ Init initializes the JNI code with the given Java environment. This method\n\/\/ must be invoked before any other method in this package and must be called\n\/\/ from the main Java thread (e.g., On_Load()).\nfunc Init(env jutil.Env) error {\n\tvar err error\n\tjVRuntimeImplClass, err = jutil.JFindClass(env, \"io\/v\/impl\/google\/rt\/VRuntimeImpl\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/export Java_io_v_impl_google_services_syncbase_SyncbaseServer_nativeWithNewServer\nfunc Java_io_v_impl_google_services_syncbase_SyncbaseServer_nativeWithNewServer(jenv *C.JNIEnv, jSyncbaseServerClass C.jclass, jContext C.jobject, jSyncbaseServerParams C.jobject) C.jobject {\n\tenv := jutil.Env(uintptr(unsafe.Pointer(jenv)))\n\tjCtx := jutil.Object(uintptr(unsafe.Pointer(jContext)))\n\tjParams := jutil.Object(uintptr(unsafe.Pointer(jSyncbaseServerParams)))\n\n\t\/\/ Read and translate all of the server params.\n\tjPerms, err := jutil.CallObjectMethod(env, jParams, \"getPermissions\", nil, permissionsSign)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\tperms, err := jaccess.GoPermissions(env, jPerms)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\tname, err := jutil.CallStringMethod(env, jParams, \"getName\", nil)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\trootDir, err := jutil.CallStringMethod(env, jParams, \"getStorageRootDir\", nil)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\tif rootDir == \"\" {\n\t\trootDir = filepath.Join(os.TempDir(), \"syncbaseserver\")\n\t\tif err := os.Mkdir(rootDir, 0755); err != nil && !os.IsExist(err) {\n\t\t\tjutil.JThrowV(env, err)\n\t\t\treturn nil\n\t\t}\n\t}\n\tjEngine, err := jutil.CallObjectMethod(env, jParams, \"getStorageEngine\", nil, storageEngineSign)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\tengine, err := GoStorageEngine(env, jEngine)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\tctx, err := jcontext.GoContext(env, jCtx)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\n\t\/\/ Start the server.\n\tservice, err := server.NewService(ctx, nil, server.ServiceOptions{\n\t\tPerms: perms,\n\t\tRootDir: rootDir,\n\t\tEngine: engine,\n\t})\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\td := server.NewDispatcher(service)\n\tnewCtx, s, err := v23.WithNewDispatchingServer(ctx, name, d)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\tif err := service.AddNames(ctx, s); err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\tjNewCtx, err := jcontext.JavaContext(env, newCtx)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\tjServer, err := jrpc.JavaServer(env, s)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\t\/\/ Attach a server to the new context.\n\tjServerAttCtx, err := jutil.CallStaticObjectMethod(env, jVRuntimeImplClass, \"withServer\", []jutil.Sign{contextSign, serverSign}, contextSign, jNewCtx, jServer)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\treturn C.jobject(unsafe.Pointer(jServerAttCtx))\n}\n<commit_msg>jni\/impl\/google\/services\/syncbase: remove 'call' arg from NewService<commit_after>\/\/ Copyright 2015 The Vanadium Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ +build java android\n\npackage syncbase\n\nimport (\n\t\"os\"\n\t\"path\/filepath\"\n\t\"unsafe\"\n\n\t\"v.io\/v23\"\n\t\"v.io\/x\/ref\/services\/syncbase\/server\"\n\n\tjrpc \"v.io\/x\/jni\/impl\/google\/rpc\"\n\tjutil \"v.io\/x\/jni\/util\"\n\tjcontext \"v.io\/x\/jni\/v23\/context\"\n\tjaccess \"v.io\/x\/jni\/v23\/security\/access\"\n)\n\n\/\/ #include \"jni.h\"\nimport \"C\"\n\nvar (\n\tpermissionsSign = jutil.ClassSign(\"io.v.v23.security.access.Permissions\")\n\tcontextSign = jutil.ClassSign(\"io.v.v23.context.VContext\")\n\tstorageEngineSign = jutil.ClassSign(\"io.v.impl.google.services.syncbase.SyncbaseServer$StorageEngine\")\n\tserverSign = jutil.ClassSign(\"io.v.v23.rpc.Server\")\n\n\tjVRuntimeImplClass jutil.Class\n)\n\n\/\/ Init initializes the JNI code with the given Java environment. This method\n\/\/ must be invoked before any other method in this package and must be called\n\/\/ from the main Java thread (e.g., On_Load()).\nfunc Init(env jutil.Env) error {\n\tvar err error\n\tjVRuntimeImplClass, err = jutil.JFindClass(env, \"io\/v\/impl\/google\/rt\/VRuntimeImpl\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/export Java_io_v_impl_google_services_syncbase_SyncbaseServer_nativeWithNewServer\nfunc Java_io_v_impl_google_services_syncbase_SyncbaseServer_nativeWithNewServer(jenv *C.JNIEnv, jSyncbaseServerClass C.jclass, jContext C.jobject, jSyncbaseServerParams C.jobject) C.jobject {\n\tenv := jutil.Env(uintptr(unsafe.Pointer(jenv)))\n\tjCtx := jutil.Object(uintptr(unsafe.Pointer(jContext)))\n\tjParams := jutil.Object(uintptr(unsafe.Pointer(jSyncbaseServerParams)))\n\n\t\/\/ Read and translate all of the server params.\n\tjPerms, err := jutil.CallObjectMethod(env, jParams, \"getPermissions\", nil, permissionsSign)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\tperms, err := jaccess.GoPermissions(env, jPerms)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\tname, err := jutil.CallStringMethod(env, jParams, \"getName\", nil)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\trootDir, err := jutil.CallStringMethod(env, jParams, \"getStorageRootDir\", nil)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\tif rootDir == \"\" {\n\t\trootDir = filepath.Join(os.TempDir(), \"syncbaseserver\")\n\t\tif err := os.Mkdir(rootDir, 0755); err != nil && !os.IsExist(err) {\n\t\t\tjutil.JThrowV(env, err)\n\t\t\treturn nil\n\t\t}\n\t}\n\tjEngine, err := jutil.CallObjectMethod(env, jParams, \"getStorageEngine\", nil, storageEngineSign)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\tengine, err := GoStorageEngine(env, jEngine)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\tctx, err := jcontext.GoContext(env, jCtx)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\n\t\/\/ Start the server.\n\tservice, err := server.NewService(ctx, server.ServiceOptions{\n\t\tPerms: perms,\n\t\tRootDir: rootDir,\n\t\tEngine: engine,\n\t})\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\td := server.NewDispatcher(service)\n\tnewCtx, s, err := v23.WithNewDispatchingServer(ctx, name, d)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\tif err := service.AddNames(ctx, s); err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\tjNewCtx, err := jcontext.JavaContext(env, newCtx)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\tjServer, err := jrpc.JavaServer(env, s)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\t\/\/ Attach a server to the new context.\n\tjServerAttCtx, err := jutil.CallStaticObjectMethod(env, jVRuntimeImplClass, \"withServer\", []jutil.Sign{contextSign, serverSign}, contextSign, jNewCtx, jServer)\n\tif err != nil {\n\t\tjutil.JThrowV(env, err)\n\t\treturn nil\n\t}\n\treturn C.jobject(unsafe.Pointer(jServerAttCtx))\n}\n<|endoftext|>"} {"text":"<commit_before>package arn\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/animenotifier\/anilist\"\n)\n\n\/\/ AniListAnimeFinder holds an internal map of ID to anime mappings\n\/\/ and is therefore very efficient to use when trying to find\n\/\/ anime by a given service and ID.\ntype AniListAnimeFinder struct {\n\tidToAnime map[string]*Anime\n\tmalIDToAnime map[string]*Anime\n}\n\n\/\/ NewAniListAnimeFinder creates a new finder for Anilist anime.\nfunc NewAniListAnimeFinder() *AniListAnimeFinder {\n\tfinder := &AniListAnimeFinder{\n\t\tidToAnime: map[string]*Anime{},\n\t\tmalIDToAnime: map[string]*Anime{},\n\t}\n\n\tfor anime := range StreamAnime() {\n\t\tid := anime.GetMapping(\"anilist\/anime\")\n\n\t\tif id != \"\" {\n\t\t\tfinder.idToAnime[id] = anime\n\t\t}\n\n\t\tmalID := anime.GetMapping(\"myanimelist\/anime\")\n\n\t\tif malID != \"\" {\n\t\t\tfinder.malIDToAnime[malID] = anime\n\t\t}\n\t}\n\n\treturn finder\n}\n\n\/\/ GetAnime tries to find an AniList anime in our anime database.\nfunc (finder *AniListAnimeFinder) GetAnime(id string, malID string) *Anime {\n\tanimeByID, existsByID := finder.idToAnime[id]\n\tanimeByMALID, existsByMALID := finder.malIDToAnime[malID]\n\n\t\/\/ If both MAL ID and AniList ID are matched, but the matched anime are different,\n\t\/\/ then we're trusting the MAL ID matching more and deleting the incorrect mapping.\n\tif existsByID && existsByMALID && animeByID.ID != animeByMALID.ID {\n\t\tanimeByID.RemoveMapping(\"anilist\/anime\")\n\t\tanimeByID.Save()\n\n\t\tdelete(finder.idToAnime, id)\n\n\t\tfmt.Println(\"MAL \/ Anilist mismatch:\")\n\t\tfmt.Println(animeByID.ID, animeByID)\n\t\tfmt.Println(animeByMALID.ID, animeByMALID)\n\t}\n\n\t\/\/ Add anilist mapping to the MAL mapped anime if it's missing\n\tif existsByMALID && animeByMALID.GetMapping(\"anilist\/anime\") != id {\n\t\tanimeByMALID.SetMapping(\"anilist\/anime\", id)\n\t\tanimeByMALID.Save()\n\n\t\tfinder.idToAnime[id] = animeByMALID\n\t}\n\n\tif existsByID {\n\t\treturn animeByID\n\t}\n\n\tif existsByMALID {\n\t\treturn animeByMALID\n\t}\n\n\treturn nil\n}\n\n\/\/ AniListAnimeListStatus returns the ARN version of the anime status.\nfunc AniListAnimeListStatus(item *anilist.AnimeListItem) string {\n\tswitch item.Status {\n\tcase \"CURRENT\", \"REPEATING\":\n\t\treturn AnimeListStatusWatching\n\tcase \"COMPLETED\":\n\t\treturn AnimeListStatusCompleted\n\tcase \"PLANNING\":\n\t\treturn AnimeListStatusPlanned\n\tcase \"PAUSED\":\n\t\treturn AnimeListStatusHold\n\tcase \"DROPPED\":\n\t\treturn AnimeListStatusDropped\n\tdefault:\n\t\treturn AnimeListStatusPlanned\n\t}\n}\n<commit_msg>Improved anilist importer<commit_after>package arn\n\nimport (\n\t\"fmt\"\n\n\t\"github.com\/animenotifier\/anilist\"\n)\n\n\/\/ AniListAnimeFinder holds an internal map of ID to anime mappings\n\/\/ and is therefore very efficient to use when trying to find\n\/\/ anime by a given service and ID.\ntype AniListAnimeFinder struct {\n\tidToAnime map[string]*Anime\n\tmalIDToAnime map[string]*Anime\n}\n\n\/\/ NewAniListAnimeFinder creates a new finder for Anilist anime.\nfunc NewAniListAnimeFinder() *AniListAnimeFinder {\n\tfinder := &AniListAnimeFinder{\n\t\tidToAnime: map[string]*Anime{},\n\t\tmalIDToAnime: map[string]*Anime{},\n\t}\n\n\tfor anime := range StreamAnime() {\n\t\tid := anime.GetMapping(\"anilist\/anime\")\n\n\t\tif id != \"\" {\n\t\t\tfinder.idToAnime[id] = anime\n\t\t}\n\n\t\tmalID := anime.GetMapping(\"myanimelist\/anime\")\n\n\t\tif malID != \"\" {\n\t\t\tfinder.malIDToAnime[malID] = anime\n\t\t}\n\t}\n\n\treturn finder\n}\n\n\/\/ GetAnime tries to find an AniList anime in our anime database.\nfunc (finder *AniListAnimeFinder) GetAnime(id string, malID string) *Anime {\n\tanimeByID, existsByID := finder.idToAnime[id]\n\tanimeByMALID, existsByMALID := finder.malIDToAnime[malID]\n\n\t\/\/ Add anilist mapping to the MAL mapped anime if it's missing\n\tif existsByMALID && animeByMALID.GetMapping(\"anilist\/anime\") != id {\n\t\tanimeByMALID.SetMapping(\"anilist\/anime\", id)\n\t\tanimeByMALID.Save()\n\n\t\tfinder.idToAnime[id] = animeByMALID\n\t}\n\n\t\/\/ If both MAL ID and AniList ID are matched, but the matched anime are different,\n\t\/\/ while the MAL IDs are different as well,\n\t\/\/ then we're trusting the MAL ID matching more and deleting the incorrect mapping.\n\tif existsByID && existsByMALID && animeByID.ID != animeByMALID.ID && animeByID.GetMapping(\"myanimelist\/anime\") != animeByMALID.GetMapping(\"myanimelist\/anime\") {\n\t\tanimeByID.RemoveMapping(\"anilist\/anime\")\n\t\tanimeByID.Save()\n\n\t\tdelete(finder.idToAnime, id)\n\n\t\tfmt.Println(\"MAL \/ Anilist mismatch:\")\n\t\tfmt.Println(animeByID.ID, animeByID)\n\t\tfmt.Println(animeByMALID.ID, animeByMALID)\n\t}\n\n\tif existsByID {\n\t\treturn animeByID\n\t}\n\n\tif existsByMALID {\n\t\treturn animeByMALID\n\t}\n\n\treturn nil\n}\n\n\/\/ AniListAnimeListStatus returns the ARN version of the anime status.\nfunc AniListAnimeListStatus(item *anilist.AnimeListItem) string {\n\tswitch item.Status {\n\tcase \"CURRENT\", \"REPEATING\":\n\t\treturn AnimeListStatusWatching\n\tcase \"COMPLETED\":\n\t\treturn AnimeListStatusCompleted\n\tcase \"PLANNING\":\n\t\treturn AnimeListStatusPlanned\n\tcase \"PAUSED\":\n\t\treturn AnimeListStatusHold\n\tcase \"DROPPED\":\n\t\treturn AnimeListStatusDropped\n\tdefault:\n\t\treturn AnimeListStatusPlanned\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package decoders\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"regexp\"\n\t\"strconv\"\n\n\t\"github.com\/seadsystem\/Backend\/DB\/landingzone\/constants\"\n)\n\ntype SeadPacket struct {\n\tType byte\n\tLocation byte\n\tTimestamp float64\n\tPeriod float64\n\tCount int\n\tData float64\n\tSerial int\n}\n\nvar headerRegex *regexp.Regexp\nvar InvalidHeader = errors.New(\"Invalid header.\")\nvar InvalidPacket = errors.New(\"Invalid packet.\")\nvar InvalidTime = errors.New(\"Invalid time.\")\n\n\/\/ init sets up stuff we need with proper error handling. If it isn't complicated or doesn't need error handling, it can probably just be assigned directly.\nfunc init() {\n\tvar err error\n\theaderRegex, err = regexp.Compile(constants.HEADER_REGEX)\n\tif err != nil {\n\t\tlog.Panic(\"Regex compile error:\", err)\n\t}\n}\n\n\/\/ DecodeHeader verifies that the header is in the correct format and extracts the serial number\nfunc DecodeHeader(packet []byte) (serial int, err error) {\n\tserialStrings := headerRegex.FindSubmatch(packet)\n\n\tif serialStrings == nil || len(serialStrings) != 2 {\n\t\terr = InvalidHeader\n\t\treturn\n\t}\n\n\tlog.Printf(\"Header serial string: %s\\n\", string(serialStrings[1]))\n\n\tserial, err = strconv.Atoi(string(serialStrings[1]))\n\treturn\n}\n\n\/\/ DecodePacket extracts the data sent from sensor\nfunc DecodePacket(buffer []byte) (packet SeadPacket, err error) {\n\tfor i := 0; i < len(buffer); {\n\t\tdatatype := buffer[i]\n\t\ti++\n\n\t\t\/\/ Switch on the type of data sent in the packet\n\t\tswitch {\n\t\tcase datatype == 'T':\n\t\t\t\/\/ Type\n\t\t\tpacket.Type = buffer[i]\n\t\t\ti++\n\t\tcase datatype == 'l':\n\t\t\t\/\/ Location\n\t\t\tpacket.Location = buffer[i]\n\t\t\ti++\n\t\tcase datatype == 't':\n\t\t\t\/\/ Timestamp\n\t\t\tpacket.Timestamp, err = asciiTimeToDouble(buffer[i : i+14])\n\t\t\ti += 14\n\t\tcase datatype == 'P':\n\t\t\t\/\/ Period separator\n\t\t\tpacket.Period, err = asciiTimeToDouble(buffer[i : i+14])\n\t\t\ti += 14\n\t\tcase datatype == 'C':\n\t\t\t\/\/ Count\n\t\t\tpacket.Count, err = Binary2int(buffer[i : i+2])\n\t\t\ti += 2\n\t\tcase datatype == 'D':\n\t\t\t\/\/ Data\n\t\t\t\/\/ if count isn't set, return error\n\t\t\t\/\/ TODO finish parsing data\n\t\tcase datatype == 'S':\n\t\t\t\/\/ Serial\n\t\t\tpacket.Serial, err = strconv.Atoi(string(buffer[i : i+6]))\n\t\t\ti += 6\n\t\tcase datatype == 'X':\n\t\t\treturn\n\t\tdefault:\n\t\t\terr = InvalidPacket\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\terr = InvalidPacket\n\treturn\n}\n\nfunc doubleToAsciiTime(double_time float64) string {\n\t\/\/ TODO: Check if this logic is correct or if we need to use http:\/\/golang.org\/pkg\/math\/#Mod\n\tint_time := int(double_time)\n\tvar days = math.Floor(double_time \/ (60 * 60 * 24))\n\tvar hours = (int_time % (60 * 60 * 24)) \/ (60 * 60)\n\tvar minutes = (int_time % (60 * 60)) \/ 60\n\tvar seconds = (int_time % (60)) \/ 1\n\tvar milliseconds = (int_time * 1000) % 1000\n\tvar clock_time = (int_time * 12000) % 12\n\n\treturn fmt.Sprintf(\"%03d%02d%02d%02d%03d%02d\", days, hours, minutes, seconds, milliseconds, clock_time)\n}\n\nfunc asciiTimeToDouble(ascii_time []byte) (time float64, err error) {\n\t\/\/ Check time string format\n\tif len(ascii_time) != 16 {\n\t\terr = InvalidTime\n\t}\n\t_, err = strconv.Atoi(string(ascii_time))\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Do the conversion now that we know it should work\n\tvar ptr int = 0\n\tdays, err := strconv.Atoi(string(ascii_time[ptr : ptr+3]))\n\tif err != nil {\n\t\treturn\n\t}\n\tptr += 3\n\ttime += float64(60 * 60 * 24 * days)\n\thours, err := strconv.Atoi(string(ascii_time[ptr : ptr+2]))\n\tif err != nil {\n\t\treturn\n\t}\n\tptr += 2\n\ttime += float64(60 * 60 * hours)\n\tminutes, err := strconv.Atoi(string(ascii_time[ptr : ptr+2]))\n\tif err != nil {\n\t\treturn\n\t}\n\tptr += 2\n\ttime += float64(60 * minutes)\n\tseconds, err := strconv.Atoi(string(ascii_time[ptr : ptr+2]))\n\tif err != nil {\n\t\treturn\n\t}\n\tptr += 2\n\ttime += float64(seconds)\n\tmilliseconds, err := strconv.Atoi(string(ascii_time[ptr : ptr+3]))\n\tif err != nil {\n\t\treturn\n\t}\n\tptr += 3\n\ttime += float64(milliseconds) \/ 1000.0\n\tclock, err := strconv.Atoi(string(ascii_time[ptr : ptr+2]))\n\tif err != nil {\n\t\treturn\n\t}\n\tptr += 2\n\ttime += float64(clock) \/ 12000.0\n\treturn\n}\n\n\/\/ Every checks if every byte in a slice meets some criteria\nfunc Every(data []byte, check func(byte) bool) bool {\n\tfor _, element := range data {\n\t\tif !check(element) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ Binary2int converts a byte array containing binary data into an int\nfunc Binary2int(data []byte) (total int) {\n\tfor index, element := range data {\n\t\ttotal += int(element)<<(index * 8)\n\t}\n\treturn true\n}\n<commit_msg>Removed left over return value from new function.<commit_after>package decoders\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"regexp\"\n\t\"strconv\"\n\n\t\"github.com\/seadsystem\/Backend\/DB\/landingzone\/constants\"\n)\n\ntype SeadPacket struct {\n\tType byte\n\tLocation byte\n\tTimestamp float64\n\tPeriod float64\n\tCount int\n\tData float64\n\tSerial int\n}\n\nvar headerRegex *regexp.Regexp\nvar InvalidHeader = errors.New(\"Invalid header.\")\nvar InvalidPacket = errors.New(\"Invalid packet.\")\nvar InvalidTime = errors.New(\"Invalid time.\")\n\n\/\/ init sets up stuff we need with proper error handling. If it isn't complicated or doesn't need error handling, it can probably just be assigned directly.\nfunc init() {\n\tvar err error\n\theaderRegex, err = regexp.Compile(constants.HEADER_REGEX)\n\tif err != nil {\n\t\tlog.Panic(\"Regex compile error:\", err)\n\t}\n}\n\n\/\/ DecodeHeader verifies that the header is in the correct format and extracts the serial number\nfunc DecodeHeader(packet []byte) (serial int, err error) {\n\tserialStrings := headerRegex.FindSubmatch(packet)\n\n\tif serialStrings == nil || len(serialStrings) != 2 {\n\t\terr = InvalidHeader\n\t\treturn\n\t}\n\n\tlog.Printf(\"Header serial string: %s\\n\", string(serialStrings[1]))\n\n\tserial, err = strconv.Atoi(string(serialStrings[1]))\n\treturn\n}\n\n\/\/ DecodePacket extracts the data sent from sensor\nfunc DecodePacket(buffer []byte) (packet SeadPacket, err error) {\n\tfor i := 0; i < len(buffer); {\n\t\tdatatype := buffer[i]\n\t\ti++\n\n\t\t\/\/ Switch on the type of data sent in the packet\n\t\tswitch {\n\t\tcase datatype == 'T':\n\t\t\t\/\/ Type\n\t\t\tpacket.Type = buffer[i]\n\t\t\ti++\n\t\tcase datatype == 'l':\n\t\t\t\/\/ Location\n\t\t\tpacket.Location = buffer[i]\n\t\t\ti++\n\t\tcase datatype == 't':\n\t\t\t\/\/ Timestamp\n\t\t\tpacket.Timestamp, err = asciiTimeToDouble(buffer[i : i+14])\n\t\t\ti += 14\n\t\tcase datatype == 'P':\n\t\t\t\/\/ Period separator\n\t\t\tpacket.Period, err = asciiTimeToDouble(buffer[i : i+14])\n\t\t\ti += 14\n\t\tcase datatype == 'C':\n\t\t\t\/\/ Count\n\t\t\tpacket.Count, err = Binary2int(buffer[i : i+2])\n\t\t\ti += 2\n\t\tcase datatype == 'D':\n\t\t\t\/\/ Data\n\t\t\t\/\/ if count isn't set, return error\n\t\t\t\/\/ TODO finish parsing data\n\t\tcase datatype == 'S':\n\t\t\t\/\/ Serial\n\t\t\tpacket.Serial, err = strconv.Atoi(string(buffer[i : i+6]))\n\t\t\ti += 6\n\t\tcase datatype == 'X':\n\t\t\treturn\n\t\tdefault:\n\t\t\terr = InvalidPacket\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\terr = InvalidPacket\n\treturn\n}\n\nfunc doubleToAsciiTime(double_time float64) string {\n\t\/\/ TODO: Check if this logic is correct or if we need to use http:\/\/golang.org\/pkg\/math\/#Mod\n\tint_time := int(double_time)\n\tvar days = math.Floor(double_time \/ (60 * 60 * 24))\n\tvar hours = (int_time % (60 * 60 * 24)) \/ (60 * 60)\n\tvar minutes = (int_time % (60 * 60)) \/ 60\n\tvar seconds = (int_time % (60)) \/ 1\n\tvar milliseconds = (int_time * 1000) % 1000\n\tvar clock_time = (int_time * 12000) % 12\n\n\treturn fmt.Sprintf(\"%03d%02d%02d%02d%03d%02d\", days, hours, minutes, seconds, milliseconds, clock_time)\n}\n\nfunc asciiTimeToDouble(ascii_time []byte) (time float64, err error) {\n\t\/\/ Check time string format\n\tif len(ascii_time) != 16 {\n\t\terr = InvalidTime\n\t}\n\t_, err = strconv.Atoi(string(ascii_time))\n\tif err != nil {\n\t\treturn\n\t}\n\n\t\/\/ Do the conversion now that we know it should work\n\tvar ptr int = 0\n\tdays, err := strconv.Atoi(string(ascii_time[ptr : ptr+3]))\n\tif err != nil {\n\t\treturn\n\t}\n\tptr += 3\n\ttime += float64(60 * 60 * 24 * days)\n\thours, err := strconv.Atoi(string(ascii_time[ptr : ptr+2]))\n\tif err != nil {\n\t\treturn\n\t}\n\tptr += 2\n\ttime += float64(60 * 60 * hours)\n\tminutes, err := strconv.Atoi(string(ascii_time[ptr : ptr+2]))\n\tif err != nil {\n\t\treturn\n\t}\n\tptr += 2\n\ttime += float64(60 * minutes)\n\tseconds, err := strconv.Atoi(string(ascii_time[ptr : ptr+2]))\n\tif err != nil {\n\t\treturn\n\t}\n\tptr += 2\n\ttime += float64(seconds)\n\tmilliseconds, err := strconv.Atoi(string(ascii_time[ptr : ptr+3]))\n\tif err != nil {\n\t\treturn\n\t}\n\tptr += 3\n\ttime += float64(milliseconds) \/ 1000.0\n\tclock, err := strconv.Atoi(string(ascii_time[ptr : ptr+2]))\n\tif err != nil {\n\t\treturn\n\t}\n\tptr += 2\n\ttime += float64(clock) \/ 12000.0\n\treturn\n}\n\n\/\/ Every checks if every byte in a slice meets some criteria\nfunc Every(data []byte, check func(byte) bool) bool {\n\tfor _, element := range data {\n\t\tif !check(element) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ Binary2int converts a byte array containing binary data into an int\nfunc Binary2int(data []byte) (total int) {\n\tfor index, element := range data {\n\t\ttotal += int(element)<<(index * 8)\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package emulator\n\nimport (\n \"github.com\/kierdavis\/avr\"\n \"testing\"\n)\n\ntype decodeTest struct {\n word uint16\n inst avr.Instruction\n}\n\nvar decodeTests = []decodeTest{\n decodeTest{0x1ef9, avr.ADC},\n decodeTest{0x1c23, avr.ADC},\n decodeTest{0x0efc, avr.ADD},\n decodeTest{0x0f6d, avr.ADD},\n decodeTest{0x96c7, avr.ADIW},\n decodeTest{0x96b4, avr.ADIW},\n decodeTest{0x2013, avr.AND},\n decodeTest{0x23ed, avr.AND},\n decodeTest{0x7dfc, avr.ANDI},\n decodeTest{0x70f6, avr.ANDI},\n decodeTest{0x9415, avr.ASR},\n decodeTest{0x95b5, avr.ASR},\n decodeTest{0x9498, avr.BCLR},\n decodeTest{0x94a8, avr.BCLR},\n decodeTest{0xf802, avr.BLD},\n decodeTest{0xf8f6, avr.BLD},\n decodeTest{0xf456, avr.BRBC},\n decodeTest{0xf5e3, avr.BRBC},\n decodeTest{0xf254, avr.BRBS},\n decodeTest{0xf179, avr.BRBS},\n decodeTest{0x9598, avr.BREAK},\n decodeTest{0x9418, avr.BSET},\n decodeTest{0x9468, avr.BSET},\n decodeTest{0xfb14, avr.BST},\n decodeTest{0xfbe7, avr.BST},\n decodeTest{0x94de, avr.CALL},\n decodeTest{0x95ef, avr.CALL},\n decodeTest{0x981c, avr.CBI},\n decodeTest{0x98b4, avr.CBI},\n decodeTest{0x9400, avr.COM},\n decodeTest{0x9530, avr.COM},\n decodeTest{0x170a, avr.CP},\n decodeTest{0x146f, avr.CP},\n decodeTest{0x060d, avr.CPC},\n decodeTest{0x0796, avr.CPC},\n decodeTest{0x3e57, avr.CPI},\n decodeTest{0x3ead, avr.CPI},\n decodeTest{0x1310, avr.CPSE},\n decodeTest{0x121e, avr.CPSE},\n decodeTest{0x944a, avr.DEC},\n decodeTest{0x94fa, avr.DEC},\n decodeTest{0x941b, avr.DES},\n decodeTest{0x94ab, avr.DES},\n decodeTest{0x9519, avr.EICALL},\n decodeTest{0x9419, avr.EIJMP},\n decodeTest{0x95D8, avr.ELPM_R0},\n decodeTest{0x9076, avr.ELPM},\n decodeTest{0x91a6, avr.ELPM},\n decodeTest{0x91f7, avr.ELPM_INC},\n decodeTest{0x9147, avr.ELPM_INC},\n decodeTest{0x242b, avr.EOR},\n decodeTest{0x24c1, avr.EOR},\n decodeTest{0x035d, avr.FMUL},\n decodeTest{0x0339, avr.FMUL},\n decodeTest{0x03f2, avr.FMULS},\n decodeTest{0x0380, avr.FMULS},\n decodeTest{0x03ce, avr.FMULSU},\n decodeTest{0x03ea, avr.FMULSU},\n decodeTest{0x9509, avr.ICALL},\n decodeTest{0x9409, avr.IJMP},\n decodeTest{0xb41d, avr.IN},\n decodeTest{0xb5c9, avr.IN},\n decodeTest{0x95f3, avr.INC},\n decodeTest{0x95d3, avr.INC},\n decodeTest{0x951d, avr.JMP},\n decodeTest{0x94ed, avr.JMP},\n decodeTest{0x9226, avr.LAC},\n decodeTest{0x93c6, avr.LAC},\n decodeTest{0x92d5, avr.LAS},\n decodeTest{0x9325, avr.LAS},\n decodeTest{0x9227, avr.LAT},\n decodeTest{0x93e7, avr.LAT},\n decodeTest{0x90cc, avr.LD_X},\n decodeTest{0x908c, avr.LD_X},\n decodeTest{0x904d, avr.LD_X_INC},\n decodeTest{0x91ad, avr.LD_X_INC},\n decodeTest{0x905e, avr.LD_X_DEC},\n decodeTest{0x90fe, avr.LD_X_DEC},\n decodeTest{0x80f8, avr.LDD_Y}, \n decodeTest{0x8128, avr.LDD_Y},\n decodeTest{0x9119, avr.LD_Y_INC},\n decodeTest{0x9089, avr.LD_Y_INC},\n decodeTest{0x91da, avr.LD_Y_DEC},\n decodeTest{0x916a, avr.LD_Y_DEC},\n decodeTest{0xa938, avr.LDD_Y},\n decodeTest{0x80d9, avr.LDD_Y},\n decodeTest{0x8060, avr.LDD_Z},\n decodeTest{0x8130, avr.LDD_Z},\n decodeTest{0x9011, avr.LD_Z_INC},\n decodeTest{0x9031, avr.LD_Z_INC},\n decodeTest{0x90a2, avr.LD_Z_DEC},\n decodeTest{0x9102, avr.LD_Z_DEC},\n decodeTest{0xa9a7, avr.LDD_Z},\n decodeTest{0x8964, avr.LDD_Z},\n decodeTest{0xeb93, avr.LDI},\n decodeTest{0xeadd, avr.LDI},\n}\n\nfunc TestDecode(t *testing.T) {\n for _, test := range decodeTests {\n inst := Decode(test.word)\n if inst != test.inst {\n t.Errorf(\"Decode(0x%04x): expected '%s', got '%s'\", test.word, test.inst, inst)\n }\n }\n}\n\nfunc BenchmarkDecode(b *testing.B) {\n \/\/ implement an xorshift RNG for speed\n x := uint16(0xabcd)\n for i := 0; i < b.N; i++ {\n x ^= x << 13\n x ^= x >> 9\n x ^= x << 7\n Decode(x)\n }\n}\n<commit_msg>Split Decode benchmark into random and non-random versions<commit_after>package emulator\n\nimport (\n \"github.com\/kierdavis\/avr\"\n \"testing\"\n)\n\ntype decodeTest struct {\n word uint16\n inst avr.Instruction\n}\n\nvar decodeTests = []decodeTest{\n decodeTest{0x1ef9, avr.ADC},\n decodeTest{0x1c23, avr.ADC},\n decodeTest{0x0efc, avr.ADD},\n decodeTest{0x0f6d, avr.ADD},\n decodeTest{0x96c7, avr.ADIW},\n decodeTest{0x96b4, avr.ADIW},\n decodeTest{0x2013, avr.AND},\n decodeTest{0x23ed, avr.AND},\n decodeTest{0x7dfc, avr.ANDI},\n decodeTest{0x70f6, avr.ANDI},\n decodeTest{0x9415, avr.ASR},\n decodeTest{0x95b5, avr.ASR},\n decodeTest{0x9498, avr.BCLR},\n decodeTest{0x94a8, avr.BCLR},\n decodeTest{0xf802, avr.BLD},\n decodeTest{0xf8f6, avr.BLD},\n decodeTest{0xf456, avr.BRBC},\n decodeTest{0xf5e3, avr.BRBC},\n decodeTest{0xf254, avr.BRBS},\n decodeTest{0xf179, avr.BRBS},\n decodeTest{0x9598, avr.BREAK},\n decodeTest{0x9418, avr.BSET},\n decodeTest{0x9468, avr.BSET},\n decodeTest{0xfb14, avr.BST},\n decodeTest{0xfbe7, avr.BST},\n decodeTest{0x94de, avr.CALL},\n decodeTest{0x95ef, avr.CALL},\n decodeTest{0x981c, avr.CBI},\n decodeTest{0x98b4, avr.CBI},\n decodeTest{0x9400, avr.COM},\n decodeTest{0x9530, avr.COM},\n decodeTest{0x170a, avr.CP},\n decodeTest{0x146f, avr.CP},\n decodeTest{0x060d, avr.CPC},\n decodeTest{0x0796, avr.CPC},\n decodeTest{0x3e57, avr.CPI},\n decodeTest{0x3ead, avr.CPI},\n decodeTest{0x1310, avr.CPSE},\n decodeTest{0x121e, avr.CPSE},\n decodeTest{0x944a, avr.DEC},\n decodeTest{0x94fa, avr.DEC},\n decodeTest{0x941b, avr.DES},\n decodeTest{0x94ab, avr.DES},\n decodeTest{0x9519, avr.EICALL},\n decodeTest{0x9419, avr.EIJMP},\n decodeTest{0x95D8, avr.ELPM_R0},\n decodeTest{0x9076, avr.ELPM},\n decodeTest{0x91a6, avr.ELPM},\n decodeTest{0x91f7, avr.ELPM_INC},\n decodeTest{0x9147, avr.ELPM_INC},\n decodeTest{0x242b, avr.EOR},\n decodeTest{0x24c1, avr.EOR},\n decodeTest{0x035d, avr.FMUL},\n decodeTest{0x0339, avr.FMUL},\n decodeTest{0x03f2, avr.FMULS},\n decodeTest{0x0380, avr.FMULS},\n decodeTest{0x03ce, avr.FMULSU},\n decodeTest{0x03ea, avr.FMULSU},\n decodeTest{0x9509, avr.ICALL},\n decodeTest{0x9409, avr.IJMP},\n decodeTest{0xb41d, avr.IN},\n decodeTest{0xb5c9, avr.IN},\n decodeTest{0x95f3, avr.INC},\n decodeTest{0x95d3, avr.INC},\n decodeTest{0x951d, avr.JMP},\n decodeTest{0x94ed, avr.JMP},\n decodeTest{0x9226, avr.LAC},\n decodeTest{0x93c6, avr.LAC},\n decodeTest{0x92d5, avr.LAS},\n decodeTest{0x9325, avr.LAS},\n decodeTest{0x9227, avr.LAT},\n decodeTest{0x93e7, avr.LAT},\n decodeTest{0x90cc, avr.LD_X},\n decodeTest{0x908c, avr.LD_X},\n decodeTest{0x904d, avr.LD_X_INC},\n decodeTest{0x91ad, avr.LD_X_INC},\n decodeTest{0x905e, avr.LD_X_DEC},\n decodeTest{0x90fe, avr.LD_X_DEC},\n decodeTest{0x80f8, avr.LDD_Y}, \n decodeTest{0x8128, avr.LDD_Y},\n decodeTest{0x9119, avr.LD_Y_INC},\n decodeTest{0x9089, avr.LD_Y_INC},\n decodeTest{0x91da, avr.LD_Y_DEC},\n decodeTest{0x916a, avr.LD_Y_DEC},\n decodeTest{0xa938, avr.LDD_Y},\n decodeTest{0x80d9, avr.LDD_Y},\n decodeTest{0x8060, avr.LDD_Z},\n decodeTest{0x8130, avr.LDD_Z},\n decodeTest{0x9011, avr.LD_Z_INC},\n decodeTest{0x9031, avr.LD_Z_INC},\n decodeTest{0x90a2, avr.LD_Z_DEC},\n decodeTest{0x9102, avr.LD_Z_DEC},\n decodeTest{0xa9a7, avr.LDD_Z},\n decodeTest{0x8964, avr.LDD_Z},\n decodeTest{0xeb93, avr.LDI},\n decodeTest{0xeadd, avr.LDI},\n}\n\nfunc TestDecode(t *testing.T) {\n for _, test := range decodeTests {\n inst := Decode(test.word)\n if inst != test.inst {\n t.Errorf(\"Decode(0x%04x): expected '%s', got '%s'\", test.word, test.inst, inst)\n }\n }\n}\n\nfunc BenchmarkDecode951d(b *testing.B) {\n for i := 0; i < b.N; i++ {\n Decode(0x951d)\n }\n}\n\nfunc BenchmarkDecodeRandom(b *testing.B) {\n \/\/ implement an xorshift RNG for speed\n x := uint16(0xabcd)\n for i := 0; i < b.N; i++ {\n x ^= x << 13\n x ^= x >> 9\n x ^= x << 7\n Decode(x)\n }\n}\n<|endoftext|>"} {"text":"<commit_before>package warserver\n\nimport (\n \"encoding\/binary\"\n \"github.com\/gorilla\/websocket\"\n \"net\"\n)\n\ntype connection interface {\n Read() ([]byte, error)\n Write(msg []byte) error\n Close()\n}\n\ntype websocketConn struct {\n ws *websocket.Conn\n}\n\nfunc (c *websocketConn) Read() ([]byte, error) {\n _, msg, err := c.ws.ReadMessage()\n return msg, err\n}\n\nfunc (c *websocketConn) Write(msg []byte) error {\n return c.ws.WriteMessage(websocket.TextMessage, msg)\n}\n\nfunc (c *websocketConn) Close() {\n c.ws.Close()\n}\n\ntype socketConn struct {\n sock net.Conn\n}\n\nfunc (c *socketConn) Read() ([]byte, error) {\n \/\/ the 32 bit initial message dictates the size of the message\n sizeBuf := make([]byte, 4)\n _, err := c.sock.Read(sizeBuf)\n if err != nil {\n return nil, err\n }\n size, _ := binary.Varint(sizeBuf)\n msgBuf := make([]byte, size)\n _, err = c.sock.Read(msgBuf)\n return msgBuf, err\n}\n\nfunc (c *socketConn) Write(msg []byte) error {\n n, err := c.sock.Write(msg)\n for num_sent := len(msg) - n; num_sent > 0; n, err = c.sock.Write(msg) {\n num_sent -= n\n if err != nil {\n return err\n }\n }\n return err\n}\n\nfunc (c *socketConn) Close() {\n c.sock.Close()\n}\n<commit_msg>Untested Write functions, yay!<commit_after>package warserver\n\nimport (\n \"encoding\/binary\"\n \"github.com\/gorilla\/websocket\"\n \"net\"\n)\n\ntype connection interface {\n Read() ([]byte, error)\n Write(msg []byte) error\n Close()\n}\n\ntype websocketConn struct {\n ws *websocket.Conn\n}\n\nfunc (c *websocketConn) Read() ([]byte, error) {\n _, msg, err := c.ws.ReadMessage()\n return msg, err\n}\n\nfunc (c *websocketConn) Write(msg []byte) error {\n return c.ws.WriteMessage(websocket.TextMessage, msg)\n}\n\nfunc (c *websocketConn) Close() {\n c.ws.Close()\n}\n\ntype socketConn struct {\n sock net.Conn\n}\n\nfunc (c *socketConn) Read() ([]byte, error) {\n \/\/ the 32 bit initial message dictates the size of the message\n sizeBuf := make([]byte, 4)\n _, err := c.sock.Read(sizeBuf)\n if err != nil {\n return nil, err\n }\n size, _ := binary.Varint(sizeBuf)\n msgBuf := make([]byte, size)\n _, err = c.sock.Read(msgBuf)\n return msgBuf, err\n}\n\nfunc (c *socketConn) Write(msg []byte) error {\n lenBuf := make([]byte, 4)\n msgLen := len(msg)\n binary.PutUvarint(lenBuf, uint64(msgLen))\n err := c.fullWrite(lenBuf)\n if err != nil {\n return err\n }\n return c.fullWrite(msg)\n}\n\nfunc (c *socketConn) fullWrite(msg []byte) error {\n msgLen := len(msg)\n n, err := c.sock.Write(msg)\n for num_sent := 0; num_sent < msgLen; n, err = c.sock.Write(msg) {\n num_sent += n\n msg = msg[n:]\n if err != nil {\n return err\n }\n }\n return err\n}\n\nfunc (c *socketConn) Close() {\n c.sock.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The nvim-go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage autocmd\n\nimport (\n\t\"time\"\n\n\t\"github.com\/zchee\/nvim-go\/src\/config\"\n\t\"github.com\/zchee\/nvim-go\/src\/nvimutil\"\n)\n\n\/\/ BufReadPre gets user config variables and assign to global variable when autocmd BufReadPre.\nfunc (a *Autocmd) BufReadPre(cfg *config.Config) {\n\tdefer nvimutil.Profile(a.ctx, time.Now(), \"BufReadPre\")\n}\n<commit_msg>autocmd\/bufreadpre: change args to empty struct pointer<commit_after>\/\/ Copyright 2016 The nvim-go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage autocmd\n\nimport (\n\t\"time\"\n\n\t\"github.com\/zchee\/nvim-go\/src\/nvimutil\"\n)\n\ntype bufReadPreEval struct {}\n\n\/\/ BufReadPre gets user config variables and assign to global variable when autocmd BufReadPre.\nfunc (a *Autocmd) BufReadPre(eval *bufReadPreEval) {\n\tdefer nvimutil.Profile(a.ctx, time.Now(), \"BufReadPre\")\n}\n<|endoftext|>"} {"text":"<commit_before>package api_test\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com\/cloudfoundry\/gosteno\"\n\t\"github.com\/codegangsta\/martini\"\n\t\"github.com\/drewolson\/testflight\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"github.com\/pivotal-cf\/go-service-broker\/api\"\n)\n\nfunc configureBrokerTestSinkLogger(sink *gosteno.TestingSink) *gosteno.Logger {\n\tlogFlags := gosteno.EXCLUDE_DATA | gosteno.EXCLUDE_FILE | gosteno.EXCLUDE_LINE | gosteno.EXCLUDE_METHOD\n\tgostenoConfig := &gosteno.Config{\n\t\tSinks: []gosteno.Sink{sink},\n\t\tLevel: gosteno.LOG_INFO,\n\t\tCodec: gosteno.NewJsonPrettifier(logFlags),\n\t\tEnableLOC: true,\n\t}\n\tgosteno.Init(gostenoConfig)\n\treturn gosteno.NewLogger(\"brokerLogger\")\n}\n\nfunc sinkContains(sink *gosteno.TestingSink, loggingMessage string) bool {\n\tfoundMessage := false\n\tfor _, record := range sink.Records {\n\t\tif record.Message == loggingMessage {\n\t\t\tfoundMessage = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !foundMessage {\n\t\tfmt.Printf(\"Didn't find [%s]\\n\", loggingMessage)\n\n\t\tfor index, record := range sink.Records {\n\t\t\tfmt.Printf(\"Index %d: [%s] \\n\", index, record.Message)\n\t\t}\n\t}\n\n\treturn foundMessage\n}\n\nvar _ = Describe(\"Service Broker API\", func() {\n\tvar fakeServiceBroker *api.FakeServiceBroker\n\tvar brokerAPI *martini.ClassicMartini\n\tvar sink *gosteno.TestingSink\n\n\tmakeInstanceProvisioningRequest := func(instanceID string) *testflight.Response {\n\t\tresponse := &testflight.Response{}\n\t\ttestflight.WithServer(brokerAPI, func(r *testflight.Requester) {\n\t\t\tpath := fmt.Sprintf(\"\/v2\/service_instances\/%s\", instanceID)\n\t\t\tresponse = r.Put(path, \"application\/json\", \"\")\n\t\t})\n\t\treturn response\n\t}\n\n\tBeforeEach(func() {\n\t\tfakeServiceBroker = &api.FakeServiceBroker{\n\t\t\tInstanceLimit: 3,\n\t\t}\n\t\tsink = gosteno.NewTestingSink()\n\t\tbrokerLogger := configureBrokerTestSinkLogger(sink)\n\n\t\tbrokerAPI = api.New(fakeServiceBroker, nullLogger(), brokerLogger)\n\t})\n\n\tDescribe(\"catalog endpoint\", func() {\n\t\tmakeCatalogRequest := func() *testflight.Response {\n\t\t\tresponse := &testflight.Response{}\n\t\t\ttestflight.WithServer(brokerAPI, func(r *testflight.Requester) {\n\t\t\t\tresponse = r.Get(\"\/v2\/catalog\")\n\t\t\t})\n\t\t\treturn response\n\t\t}\n\n\t\tIt(\"returns a 200\", func() {\n\t\t\tresponse := makeCatalogRequest()\n\t\t\tExpect(response.StatusCode).To(Equal(200))\n\t\t})\n\n\t\tIt(\"returns valid catalog json\", func() {\n\t\t\tresponse := makeCatalogRequest()\n\t\t\tExpect(response.Body).To(MatchJSON(fixture(\"catalog.json\")))\n\t\t})\n\t})\n\n\tDescribe(\"instance lifecycle endpoint\", func() {\n\t\tmakeInstanceDeprovisioningRequest := func(instanceID string) *testflight.Response {\n\t\t\tresponse := &testflight.Response{}\n\t\t\ttestflight.WithServer(brokerAPI, func(r *testflight.Requester) {\n\t\t\t\tpath := fmt.Sprintf(\"\/v2\/service_instances\/%s\", instanceID)\n\t\t\t\tresponse = r.Delete(path, \"application\/json\", \"\")\n\t\t\t})\n\t\t\treturn response\n\t\t}\n\n\t\tDescribe(\"provisioning\", func() {\n\t\t\tIt(\"calls Provision on the service broker with the instance id\", func() {\n\t\t\t\tinstanceID := uniqueInstanceID()\n\t\t\t\tmakeInstanceProvisioningRequest(instanceID)\n\t\t\t\tExpect(fakeServiceBroker.ProvisionedInstanceIDs).To(ContainElement(instanceID))\n\t\t\t})\n\n\t\t\tContext(\"when the instance does not exist\", func() {\n\t\t\t\tIt(\"returns a 201\", func() {\n\t\t\t\t\tresponse := makeInstanceProvisioningRequest(uniqueInstanceID())\n\t\t\t\t\tExpect(response.StatusCode).To(Equal(201))\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns json with a dashboard_url field\", func() {\n\t\t\t\t\tresponse := makeInstanceProvisioningRequest(uniqueInstanceID())\n\t\t\t\t\tExpect(response.Body).To(MatchJSON(fixture(\"provisioning.json\")))\n\t\t\t\t})\n\n\t\t\t\tContext(\"when the instance limit has been reached\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tfor i := 0; i < fakeServiceBroker.InstanceLimit; i++ {\n\t\t\t\t\t\t\tmakeInstanceProvisioningRequest(uniqueInstanceID())\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns a 500\", func() {\n\t\t\t\t\t\tresponse := makeInstanceProvisioningRequest(uniqueInstanceID())\n\t\t\t\t\t\tExpect(response.StatusCode).To(Equal(500))\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns json with a description field and a useful error message\", func() {\n\t\t\t\t\t\tresponse := makeInstanceProvisioningRequest(uniqueInstanceID())\n\t\t\t\t\t\tExpect(response.Body).To(MatchJSON(fixture(\"instance_limit_error.json\")))\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"logs an appropriate error\", func() {\n\t\t\t\t\t\tmakeInstanceProvisioningRequest(uniqueInstanceID())\n\t\t\t\t\t\tExpect(sinkContains(sink, \"Provisioning error: instance limit for this service has been reached\")).To(BeTrue())\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when an unexpected error occurs\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tfakeServiceBroker.ProvisionError = errors.New(\"broker failed\")\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns a 500\", func() {\n\t\t\t\t\t\tresponse := makeInstanceProvisioningRequest(uniqueInstanceID())\n\t\t\t\t\t\tExpect(response.StatusCode).To(Equal(500))\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns json with a description field and a useful error message\", func() {\n\t\t\t\t\t\tresponse := makeInstanceProvisioningRequest(uniqueInstanceID())\n\t\t\t\t\t\tExpect(response.Body).To(MatchJSON(fixture(\"unexpected_error.json\")))\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"logs an appropriate error\", func() {\n\t\t\t\t\t\tmakeInstanceProvisioningRequest(uniqueInstanceID())\n\t\t\t\t\t\tExpect(sinkContains(sink, \"Provisioning error: broker failed\")).To(BeTrue())\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t})\n\n\t\t\tContext(\"when the instance already exists\", func() {\n\t\t\t\tvar instanceID string\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tinstanceID = uniqueInstanceID()\n\t\t\t\t\tmakeInstanceProvisioningRequest(instanceID)\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns a 409\", func() {\n\t\t\t\t\tresponse := makeInstanceProvisioningRequest(instanceID)\n\t\t\t\t\tExpect(response.StatusCode).To(Equal(409))\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns an empty JSON object\", func() {\n\t\t\t\t\tresponse := makeInstanceProvisioningRequest(instanceID)\n\t\t\t\t\tExpect(response.Body).To(Equal(`{}`))\n\t\t\t\t})\n\n\t\t\t\tIt(\"logs an appropriate error\", func() {\n\t\t\t\t\tmakeInstanceProvisioningRequest(instanceID)\n\t\t\t\t\terrorLog := fmt.Sprintf(\"Provisioning error: instance %s already exists\", instanceID)\n\t\t\t\t\tExpect(sinkContains(sink, errorLog)).To(BeTrue())\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t\tDescribe(\"deprovisioning\", func() {\n\t\t\tIt(\"calls Deprovision on the service broker with the instance id\", func() {\n\t\t\t\tinstanceID := uniqueInstanceID()\n\t\t\t\tmakeInstanceDeprovisioningRequest(instanceID)\n\t\t\t\tExpect(fakeServiceBroker.DeprovisionedInstanceIDs).To(ContainElement(instanceID))\n\t\t\t})\n\n\t\t\tContext(\"when the instance exists\", func() {\n\t\t\t\tvar instanceID string\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tinstanceID = uniqueInstanceID()\n\t\t\t\t\tmakeInstanceProvisioningRequest(instanceID)\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns a 200\", func() {\n\t\t\t\t\tresponse := makeInstanceDeprovisioningRequest(instanceID)\n\t\t\t\t\tExpect(response.StatusCode).To(Equal(200))\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns an empty JSON object\", func() {\n\t\t\t\t\tresponse := makeInstanceDeprovisioningRequest(instanceID)\n\t\t\t\t\tExpect(response.Body).To(Equal(`{}`))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when the instance does not exist\", func() {\n\t\t\t\tvar instanceID string\n\n\t\t\t\tIt(\"returns a 410\", func() {\n\t\t\t\t\tresponse := makeInstanceDeprovisioningRequest(uniqueInstanceID())\n\t\t\t\t\tExpect(response.StatusCode).To(Equal(410))\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns an empty JSON object\", func() {\n\t\t\t\t\tresponse := makeInstanceDeprovisioningRequest(uniqueInstanceID())\n\t\t\t\t\tExpect(response.Body).To(Equal(`{}`))\n\t\t\t\t})\n\n\t\t\t\tIt(\"logs an appropriate error\", func() {\n\t\t\t\t\tinstanceID = uniqueInstanceID()\n\t\t\t\t\tmakeInstanceDeprovisioningRequest(instanceID)\n\t\t\t\t\terrorLog := fmt.Sprintf(\"Deprovisioning error: instance %s does not exist\", instanceID)\n\t\t\t\t\tExpect(sinkContains(sink, errorLog)).To(BeTrue())\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n\n\tDescribe(\"binding lifecycle endpoint\", func() {\n\t\tmakeBindingRequest := func(instanceID string, bindingID string) *testflight.Response {\n\t\t\tresponse := &testflight.Response{}\n\t\t\ttestflight.WithServer(brokerAPI, func(r *testflight.Requester) {\n\t\t\t\tpath := fmt.Sprintf(\"\/v2\/service_instances\/%s\/service_bindings\/%s\",\n\t\t\t\t\tinstanceID, bindingID)\n\t\t\t\tresponse = r.Put(path, \"application\/json\", \"\")\n\t\t\t})\n\t\t\treturn response\n\t\t}\n\n\t\tDescribe(\"binding\", func() {\n\n\t\t\tContext(\"when the associated instance exists\", func() {\n\t\t\t\tIt(\"calls Bind on the service broker with the instance and binding ids\", func() {\n\t\t\t\t\tinstanceID := uniqueInstanceID()\n\t\t\t\t\tbindingID := uniqueBindingID()\n\t\t\t\t\tmakeBindingRequest(instanceID, bindingID)\n\t\t\t\t\tExpect(fakeServiceBroker.BoundInstanceIDs).To(ContainElement(instanceID))\n\t\t\t\t\tExpect(fakeServiceBroker.BoundBindingIDs).To(ContainElement(bindingID))\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns the credentials returned by Bind\", func() {\n\t\t\t\t\tresponse := makeBindingRequest(uniqueInstanceID(), uniqueBindingID())\n\t\t\t\t\tExpect(response.Body).To(MatchJSON(fixture(\"binding.json\")))\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns a 201\", func() {\n\t\t\t\t\tresponse := makeBindingRequest(uniqueInstanceID(), uniqueBindingID())\n\t\t\t\t\tExpect(response.StatusCode).To(Equal(201))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when the associated instance does not exist\", func() {\n\t\t\t\tvar instanceID string\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tfakeServiceBroker.BindError = api.ErrInstanceDoesNotExist\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns a 404\", func() {\n\t\t\t\t\tresponse := makeBindingRequest(uniqueInstanceID(), uniqueBindingID())\n\t\t\t\t\tExpect(response.StatusCode).To(Equal(404))\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns an error JSON object\", func() {\n\t\t\t\t\tresponse := makeBindingRequest(uniqueInstanceID(), uniqueBindingID())\n\t\t\t\t\tExpect(response.Body).To(MatchJSON(`{\"description\":\"instance does not exist\"}`))\n\t\t\t\t})\n\n\t\t\t\tIt(\"logs an appropriate error\", func() {\n\t\t\t\t\tinstanceID = uniqueInstanceID()\n\t\t\t\t\tmakeBindingRequest(instanceID, uniqueBindingID())\n\t\t\t\t\terrorLog := fmt.Sprintf(\"Binding error: instance %s does not exist\", instanceID)\n\t\t\t\t\tExpect(sinkContains(sink, errorLog)).To(BeTrue())\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when the requested binding already exists\", func() {\n\t\t\t\tvar instanceID string\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tfakeServiceBroker.BindError = api.ErrBindingAlreadyExists\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns a 409\", func() {\n\t\t\t\t\tresponse := makeBindingRequest(uniqueInstanceID(), uniqueBindingID())\n\t\t\t\t\tExpect(response.StatusCode).To(Equal(409))\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns an error JSON object\", func() {\n\t\t\t\t\tresponse := makeBindingRequest(uniqueInstanceID(), uniqueBindingID())\n\t\t\t\t\tExpect(response.Body).To(MatchJSON(`{\"description\":\"binding already exists\"}`))\n\t\t\t\t})\n\n\t\t\t\tIt(\"logs an appropriate error\", func() {\n\t\t\t\t\tinstanceID = uniqueInstanceID()\n\t\t\t\t\tmakeBindingRequest(instanceID, uniqueBindingID())\n\t\t\t\t\tmakeBindingRequest(instanceID, uniqueBindingID())\n\t\t\t\t\terrorLog := fmt.Sprintf(\"Binding error: binding already exists\")\n\t\t\t\t\tExpect(sinkContains(sink, errorLog)).To(BeTrue())\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when the binding returns an error\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tfakeServiceBroker.BindError = errors.New(\"random error\")\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns a generic 500 error response\", func() {\n\t\t\t\t\tresponse := makeBindingRequest(uniqueInstanceID(), uniqueBindingID())\n\t\t\t\t\tExpect(response.StatusCode).To(Equal(500))\n\t\t\t\t\tExpect(response.Body).To(MatchJSON(`{\"description\":\"Internal service error: please contact support\"}`))\n\t\t\t\t})\n\n\t\t\t\tIt(\"logs a detailed error message\", func() {\n\t\t\t\t\tmakeBindingRequest(uniqueInstanceID(), uniqueBindingID())\n\t\t\t\t\tExpect(sinkContains(sink, \"random error\")).To(BeTrue())\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t\tDescribe(\"unbinding\", func() {\n\t\t\tmakeUnbindingRequest := func(instanceID string, bindingID string) *testflight.Response {\n\t\t\t\tresponse := &testflight.Response{}\n\t\t\t\ttestflight.WithServer(brokerAPI, func(r *testflight.Requester) {\n\t\t\t\t\tpath := fmt.Sprintf(\"\/v2\/service_instances\/%s\/service_bindings\/%s\",\n\t\t\t\t\t\tinstanceID, bindingID)\n\t\t\t\t\tresponse = r.Delete(path, \"application\/json\", \"\")\n\t\t\t\t})\n\t\t\t\treturn response\n\t\t\t}\n\n\t\t\tContext(\"when the associated instance exists\", func() {\n\t\t\t\tvar instanceID string\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tinstanceID = uniqueInstanceID()\n\t\t\t\t\tmakeInstanceProvisioningRequest(instanceID)\n\t\t\t\t})\n\n\t\t\t\tContext(\"and the binding exists\", func() {\n\t\t\t\t\tvar bindingID string\n\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tbindingID = uniqueBindingID()\n\t\t\t\t\t\tmakeBindingRequest(instanceID, bindingID)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns a 200\", func() {\n\t\t\t\t\t\tresponse := makeUnbindingRequest(instanceID, bindingID)\n\t\t\t\t\t\tExpect(response.StatusCode).To(Equal(200))\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns an empty JSON object\", func() {\n\t\t\t\t\t\tresponse := makeUnbindingRequest(instanceID, bindingID)\n\t\t\t\t\t\tExpect(response.Body).To(Equal(`{}`))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"but the binding does not exist\", func() {\n\t\t\t\t\tIt(\"returns a 410\", func() {\n\t\t\t\t\t\tresponse := makeUnbindingRequest(instanceID, \"does-not-exist\")\n\t\t\t\t\t\tExpect(response.StatusCode).To(Equal(410))\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"logs an appropriate error message\", func() {\n\t\t\t\t\t\tmakeUnbindingRequest(instanceID, \"does-not-exist\")\n\t\t\t\t\t\terrorLog := fmt.Sprintf(\"Unbinding error: binding %s does not exist\", \"does-not-exist\")\n\t\t\t\t\t\tExpect(sinkContains(sink, errorLog)).To(BeTrue())\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when the associated instance does not exist\", func() {\n\t\t\t\tvar instanceID string\n\n\t\t\t\tIt(\"returns a 404\", func() {\n\t\t\t\t\tresponse := makeUnbindingRequest(uniqueInstanceID(), uniqueBindingID())\n\t\t\t\t\tExpect(response.StatusCode).To(Equal(404))\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns an empty JSON object\", func() {\n\t\t\t\t\tresponse := makeUnbindingRequest(uniqueInstanceID(), uniqueBindingID())\n\t\t\t\t\tExpect(response.Body).To(Equal(`{}`))\n\t\t\t\t})\n\n\t\t\t\tIt(\"logs an appropriate error\", func() {\n\t\t\t\t\tinstanceID = uniqueInstanceID()\n\t\t\t\t\tmakeUnbindingRequest(instanceID, uniqueBindingID())\n\t\t\t\t\terrorLog := fmt.Sprintf(\"Unbinding error: instance %s does not exist\", instanceID)\n\t\t\t\t\tExpect(sinkContains(sink, errorLog)).To(BeTrue())\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n<commit_msg>rename sink -> logSink in api_test<commit_after>package api_test\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com\/cloudfoundry\/gosteno\"\n\t\"github.com\/codegangsta\/martini\"\n\t\"github.com\/drewolson\/testflight\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\n\t\"github.com\/pivotal-cf\/go-service-broker\/api\"\n)\n\nfunc configureBrokerTestSinkLogger(sink *gosteno.TestingSink) *gosteno.Logger {\n\tlogFlags := gosteno.EXCLUDE_DATA | gosteno.EXCLUDE_FILE | gosteno.EXCLUDE_LINE | gosteno.EXCLUDE_METHOD\n\tgostenoConfig := &gosteno.Config{\n\t\tSinks: []gosteno.Sink{sink},\n\t\tLevel: gosteno.LOG_INFO,\n\t\tCodec: gosteno.NewJsonPrettifier(logFlags),\n\t\tEnableLOC: true,\n\t}\n\tgosteno.Init(gostenoConfig)\n\treturn gosteno.NewLogger(\"brokerLogger\")\n}\n\nfunc sinkContains(sink *gosteno.TestingSink, loggingMessage string) bool {\n\tfoundMessage := false\n\tfor _, record := range sink.Records {\n\t\tif record.Message == loggingMessage {\n\t\t\tfoundMessage = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !foundMessage {\n\t\tfmt.Printf(\"Didn't find [%s]\\n\", loggingMessage)\n\n\t\tfor index, record := range sink.Records {\n\t\t\tfmt.Printf(\"Index %d: [%s] \\n\", index, record.Message)\n\t\t}\n\t}\n\n\treturn foundMessage\n}\n\nvar _ = Describe(\"Service Broker API\", func() {\n\tvar fakeServiceBroker *api.FakeServiceBroker\n\tvar brokerAPI *martini.ClassicMartini\n\tvar logSink *gosteno.TestingSink\n\n\tmakeInstanceProvisioningRequest := func(instanceID string) *testflight.Response {\n\t\tresponse := &testflight.Response{}\n\t\ttestflight.WithServer(brokerAPI, func(r *testflight.Requester) {\n\t\t\tpath := fmt.Sprintf(\"\/v2\/service_instances\/%s\", instanceID)\n\t\t\tresponse = r.Put(path, \"application\/json\", \"\")\n\t\t})\n\t\treturn response\n\t}\n\n\tBeforeEach(func() {\n\t\tfakeServiceBroker = &api.FakeServiceBroker{\n\t\t\tInstanceLimit: 3,\n\t\t}\n\t\tlogSink = gosteno.NewTestingSink()\n\t\tbrokerLogger := configureBrokerTestSinkLogger(logSink)\n\n\t\tbrokerAPI = api.New(fakeServiceBroker, nullLogger(), brokerLogger)\n\t})\n\n\tDescribe(\"catalog endpoint\", func() {\n\t\tmakeCatalogRequest := func() *testflight.Response {\n\t\t\tresponse := &testflight.Response{}\n\t\t\ttestflight.WithServer(brokerAPI, func(r *testflight.Requester) {\n\t\t\t\tresponse = r.Get(\"\/v2\/catalog\")\n\t\t\t})\n\t\t\treturn response\n\t\t}\n\n\t\tIt(\"returns a 200\", func() {\n\t\t\tresponse := makeCatalogRequest()\n\t\t\tExpect(response.StatusCode).To(Equal(200))\n\t\t})\n\n\t\tIt(\"returns valid catalog json\", func() {\n\t\t\tresponse := makeCatalogRequest()\n\t\t\tExpect(response.Body).To(MatchJSON(fixture(\"catalog.json\")))\n\t\t})\n\t})\n\n\tDescribe(\"instance lifecycle endpoint\", func() {\n\t\tmakeInstanceDeprovisioningRequest := func(instanceID string) *testflight.Response {\n\t\t\tresponse := &testflight.Response{}\n\t\t\ttestflight.WithServer(brokerAPI, func(r *testflight.Requester) {\n\t\t\t\tpath := fmt.Sprintf(\"\/v2\/service_instances\/%s\", instanceID)\n\t\t\t\tresponse = r.Delete(path, \"application\/json\", \"\")\n\t\t\t})\n\t\t\treturn response\n\t\t}\n\n\t\tDescribe(\"provisioning\", func() {\n\t\t\tIt(\"calls Provision on the service broker with the instance id\", func() {\n\t\t\t\tinstanceID := uniqueInstanceID()\n\t\t\t\tmakeInstanceProvisioningRequest(instanceID)\n\t\t\t\tExpect(fakeServiceBroker.ProvisionedInstanceIDs).To(ContainElement(instanceID))\n\t\t\t})\n\n\t\t\tContext(\"when the instance does not exist\", func() {\n\t\t\t\tIt(\"returns a 201\", func() {\n\t\t\t\t\tresponse := makeInstanceProvisioningRequest(uniqueInstanceID())\n\t\t\t\t\tExpect(response.StatusCode).To(Equal(201))\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns json with a dashboard_url field\", func() {\n\t\t\t\t\tresponse := makeInstanceProvisioningRequest(uniqueInstanceID())\n\t\t\t\t\tExpect(response.Body).To(MatchJSON(fixture(\"provisioning.json\")))\n\t\t\t\t})\n\n\t\t\t\tContext(\"when the instance limit has been reached\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tfor i := 0; i < fakeServiceBroker.InstanceLimit; i++ {\n\t\t\t\t\t\t\tmakeInstanceProvisioningRequest(uniqueInstanceID())\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns a 500\", func() {\n\t\t\t\t\t\tresponse := makeInstanceProvisioningRequest(uniqueInstanceID())\n\t\t\t\t\t\tExpect(response.StatusCode).To(Equal(500))\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns json with a description field and a useful error message\", func() {\n\t\t\t\t\t\tresponse := makeInstanceProvisioningRequest(uniqueInstanceID())\n\t\t\t\t\t\tExpect(response.Body).To(MatchJSON(fixture(\"instance_limit_error.json\")))\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"logs an appropriate error\", func() {\n\t\t\t\t\t\tmakeInstanceProvisioningRequest(uniqueInstanceID())\n\t\t\t\t\t\tExpect(sinkContains(logSink, \"Provisioning error: instance limit for this service has been reached\")).To(BeTrue())\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when an unexpected error occurs\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tfakeServiceBroker.ProvisionError = errors.New(\"broker failed\")\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns a 500\", func() {\n\t\t\t\t\t\tresponse := makeInstanceProvisioningRequest(uniqueInstanceID())\n\t\t\t\t\t\tExpect(response.StatusCode).To(Equal(500))\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns json with a description field and a useful error message\", func() {\n\t\t\t\t\t\tresponse := makeInstanceProvisioningRequest(uniqueInstanceID())\n\t\t\t\t\t\tExpect(response.Body).To(MatchJSON(fixture(\"unexpected_error.json\")))\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"logs an appropriate error\", func() {\n\t\t\t\t\t\tmakeInstanceProvisioningRequest(uniqueInstanceID())\n\t\t\t\t\t\tExpect(sinkContains(logSink, \"Provisioning error: broker failed\")).To(BeTrue())\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t})\n\n\t\t\tContext(\"when the instance already exists\", func() {\n\t\t\t\tvar instanceID string\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tinstanceID = uniqueInstanceID()\n\t\t\t\t\tmakeInstanceProvisioningRequest(instanceID)\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns a 409\", func() {\n\t\t\t\t\tresponse := makeInstanceProvisioningRequest(instanceID)\n\t\t\t\t\tExpect(response.StatusCode).To(Equal(409))\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns an empty JSON object\", func() {\n\t\t\t\t\tresponse := makeInstanceProvisioningRequest(instanceID)\n\t\t\t\t\tExpect(response.Body).To(Equal(`{}`))\n\t\t\t\t})\n\n\t\t\t\tIt(\"logs an appropriate error\", func() {\n\t\t\t\t\tmakeInstanceProvisioningRequest(instanceID)\n\t\t\t\t\terrorLog := fmt.Sprintf(\"Provisioning error: instance %s already exists\", instanceID)\n\t\t\t\t\tExpect(sinkContains(logSink, errorLog)).To(BeTrue())\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t\tDescribe(\"deprovisioning\", func() {\n\t\t\tIt(\"calls Deprovision on the service broker with the instance id\", func() {\n\t\t\t\tinstanceID := uniqueInstanceID()\n\t\t\t\tmakeInstanceDeprovisioningRequest(instanceID)\n\t\t\t\tExpect(fakeServiceBroker.DeprovisionedInstanceIDs).To(ContainElement(instanceID))\n\t\t\t})\n\n\t\t\tContext(\"when the instance exists\", func() {\n\t\t\t\tvar instanceID string\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tinstanceID = uniqueInstanceID()\n\t\t\t\t\tmakeInstanceProvisioningRequest(instanceID)\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns a 200\", func() {\n\t\t\t\t\tresponse := makeInstanceDeprovisioningRequest(instanceID)\n\t\t\t\t\tExpect(response.StatusCode).To(Equal(200))\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns an empty JSON object\", func() {\n\t\t\t\t\tresponse := makeInstanceDeprovisioningRequest(instanceID)\n\t\t\t\t\tExpect(response.Body).To(Equal(`{}`))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when the instance does not exist\", func() {\n\t\t\t\tvar instanceID string\n\n\t\t\t\tIt(\"returns a 410\", func() {\n\t\t\t\t\tresponse := makeInstanceDeprovisioningRequest(uniqueInstanceID())\n\t\t\t\t\tExpect(response.StatusCode).To(Equal(410))\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns an empty JSON object\", func() {\n\t\t\t\t\tresponse := makeInstanceDeprovisioningRequest(uniqueInstanceID())\n\t\t\t\t\tExpect(response.Body).To(Equal(`{}`))\n\t\t\t\t})\n\n\t\t\t\tIt(\"logs an appropriate error\", func() {\n\t\t\t\t\tinstanceID = uniqueInstanceID()\n\t\t\t\t\tmakeInstanceDeprovisioningRequest(instanceID)\n\t\t\t\t\terrorLog := fmt.Sprintf(\"Deprovisioning error: instance %s does not exist\", instanceID)\n\t\t\t\t\tExpect(sinkContains(logSink, errorLog)).To(BeTrue())\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n\n\tDescribe(\"binding lifecycle endpoint\", func() {\n\t\tmakeBindingRequest := func(instanceID string, bindingID string) *testflight.Response {\n\t\t\tresponse := &testflight.Response{}\n\t\t\ttestflight.WithServer(brokerAPI, func(r *testflight.Requester) {\n\t\t\t\tpath := fmt.Sprintf(\"\/v2\/service_instances\/%s\/service_bindings\/%s\",\n\t\t\t\t\tinstanceID, bindingID)\n\t\t\t\tresponse = r.Put(path, \"application\/json\", \"\")\n\t\t\t})\n\t\t\treturn response\n\t\t}\n\n\t\tDescribe(\"binding\", func() {\n\n\t\t\tContext(\"when the associated instance exists\", func() {\n\t\t\t\tIt(\"calls Bind on the service broker with the instance and binding ids\", func() {\n\t\t\t\t\tinstanceID := uniqueInstanceID()\n\t\t\t\t\tbindingID := uniqueBindingID()\n\t\t\t\t\tmakeBindingRequest(instanceID, bindingID)\n\t\t\t\t\tExpect(fakeServiceBroker.BoundInstanceIDs).To(ContainElement(instanceID))\n\t\t\t\t\tExpect(fakeServiceBroker.BoundBindingIDs).To(ContainElement(bindingID))\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns the credentials returned by Bind\", func() {\n\t\t\t\t\tresponse := makeBindingRequest(uniqueInstanceID(), uniqueBindingID())\n\t\t\t\t\tExpect(response.Body).To(MatchJSON(fixture(\"binding.json\")))\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns a 201\", func() {\n\t\t\t\t\tresponse := makeBindingRequest(uniqueInstanceID(), uniqueBindingID())\n\t\t\t\t\tExpect(response.StatusCode).To(Equal(201))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when the associated instance does not exist\", func() {\n\t\t\t\tvar instanceID string\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tfakeServiceBroker.BindError = api.ErrInstanceDoesNotExist\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns a 404\", func() {\n\t\t\t\t\tresponse := makeBindingRequest(uniqueInstanceID(), uniqueBindingID())\n\t\t\t\t\tExpect(response.StatusCode).To(Equal(404))\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns an error JSON object\", func() {\n\t\t\t\t\tresponse := makeBindingRequest(uniqueInstanceID(), uniqueBindingID())\n\t\t\t\t\tExpect(response.Body).To(MatchJSON(`{\"description\":\"instance does not exist\"}`))\n\t\t\t\t})\n\n\t\t\t\tIt(\"logs an appropriate error\", func() {\n\t\t\t\t\tinstanceID = uniqueInstanceID()\n\t\t\t\t\tmakeBindingRequest(instanceID, uniqueBindingID())\n\t\t\t\t\terrorLog := fmt.Sprintf(\"Binding error: instance %s does not exist\", instanceID)\n\t\t\t\t\tExpect(sinkContains(logSink, errorLog)).To(BeTrue())\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when the requested binding already exists\", func() {\n\t\t\t\tvar instanceID string\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tfakeServiceBroker.BindError = api.ErrBindingAlreadyExists\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns a 409\", func() {\n\t\t\t\t\tresponse := makeBindingRequest(uniqueInstanceID(), uniqueBindingID())\n\t\t\t\t\tExpect(response.StatusCode).To(Equal(409))\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns an error JSON object\", func() {\n\t\t\t\t\tresponse := makeBindingRequest(uniqueInstanceID(), uniqueBindingID())\n\t\t\t\t\tExpect(response.Body).To(MatchJSON(`{\"description\":\"binding already exists\"}`))\n\t\t\t\t})\n\n\t\t\t\tIt(\"logs an appropriate error\", func() {\n\t\t\t\t\tinstanceID = uniqueInstanceID()\n\t\t\t\t\tmakeBindingRequest(instanceID, uniqueBindingID())\n\t\t\t\t\tmakeBindingRequest(instanceID, uniqueBindingID())\n\t\t\t\t\tExpect(sinkContains(logSink, \"Binding error: binding already exists\")).To(BeTrue())\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when the binding returns an error\", func() {\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tfakeServiceBroker.BindError = errors.New(\"random error\")\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns a generic 500 error response\", func() {\n\t\t\t\t\tresponse := makeBindingRequest(uniqueInstanceID(), uniqueBindingID())\n\t\t\t\t\tExpect(response.StatusCode).To(Equal(500))\n\t\t\t\t\tExpect(response.Body).To(MatchJSON(`{\"description\":\"Internal service error: please contact support\"}`))\n\t\t\t\t})\n\n\t\t\t\tIt(\"logs a detailed error message\", func() {\n\t\t\t\t\tmakeBindingRequest(uniqueInstanceID(), uniqueBindingID())\n\t\t\t\t\tExpect(sinkContains(logSink, \"random error\")).To(BeTrue())\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\n\t\tDescribe(\"unbinding\", func() {\n\t\t\tmakeUnbindingRequest := func(instanceID string, bindingID string) *testflight.Response {\n\t\t\t\tresponse := &testflight.Response{}\n\t\t\t\ttestflight.WithServer(brokerAPI, func(r *testflight.Requester) {\n\t\t\t\t\tpath := fmt.Sprintf(\"\/v2\/service_instances\/%s\/service_bindings\/%s\",\n\t\t\t\t\t\tinstanceID, bindingID)\n\t\t\t\t\tresponse = r.Delete(path, \"application\/json\", \"\")\n\t\t\t\t})\n\t\t\t\treturn response\n\t\t\t}\n\n\t\t\tContext(\"when the associated instance exists\", func() {\n\t\t\t\tvar instanceID string\n\n\t\t\t\tBeforeEach(func() {\n\t\t\t\t\tinstanceID = uniqueInstanceID()\n\t\t\t\t\tmakeInstanceProvisioningRequest(instanceID)\n\t\t\t\t})\n\n\t\t\t\tContext(\"and the binding exists\", func() {\n\t\t\t\t\tvar bindingID string\n\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\tbindingID = uniqueBindingID()\n\t\t\t\t\t\tmakeBindingRequest(instanceID, bindingID)\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns a 200\", func() {\n\t\t\t\t\t\tresponse := makeUnbindingRequest(instanceID, bindingID)\n\t\t\t\t\t\tExpect(response.StatusCode).To(Equal(200))\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"returns an empty JSON object\", func() {\n\t\t\t\t\t\tresponse := makeUnbindingRequest(instanceID, bindingID)\n\t\t\t\t\t\tExpect(response.Body).To(Equal(`{}`))\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"but the binding does not exist\", func() {\n\t\t\t\t\tIt(\"returns a 410\", func() {\n\t\t\t\t\t\tresponse := makeUnbindingRequest(instanceID, \"does-not-exist\")\n\t\t\t\t\t\tExpect(response.StatusCode).To(Equal(410))\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"logs an appropriate error message\", func() {\n\t\t\t\t\t\tmakeUnbindingRequest(instanceID, \"does-not-exist\")\n\t\t\t\t\t\terrorLog := fmt.Sprintf(\"Unbinding error: binding %s does not exist\", \"does-not-exist\")\n\t\t\t\t\t\tExpect(sinkContains(logSink, errorLog)).To(BeTrue())\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"when the associated instance does not exist\", func() {\n\t\t\t\tvar instanceID string\n\n\t\t\t\tIt(\"returns a 404\", func() {\n\t\t\t\t\tresponse := makeUnbindingRequest(uniqueInstanceID(), uniqueBindingID())\n\t\t\t\t\tExpect(response.StatusCode).To(Equal(404))\n\t\t\t\t})\n\n\t\t\t\tIt(\"returns an empty JSON object\", func() {\n\t\t\t\t\tresponse := makeUnbindingRequest(uniqueInstanceID(), uniqueBindingID())\n\t\t\t\t\tExpect(response.Body).To(Equal(`{}`))\n\t\t\t\t})\n\n\t\t\t\tIt(\"logs an appropriate error\", func() {\n\t\t\t\t\tinstanceID = uniqueInstanceID()\n\t\t\t\t\tmakeUnbindingRequest(instanceID, uniqueBindingID())\n\t\t\t\t\terrorLog := fmt.Sprintf(\"Unbinding error: instance %s does not exist\", instanceID)\n\t\t\t\t\tExpect(sinkContains(logSink, errorLog)).To(BeTrue())\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package validator\n\nimport (\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/viant\/endly\"\n\t\"github.com\/viant\/endly\/util\"\n\t\"github.com\/viant\/endly\/workflow\"\n\t\"github.com\/viant\/toolbox\"\n)\n\n\/\/NewAssertRequestFromContext creates a new assert rquest from context for current activity\nfunc NewAssertRequestFromContext(context *endly.Context, source, expected, actual interface{}, name, description string) (*AssertRequest, error) {\n\tprocess := workflow.Last(context)\n\tif process == nil {\n\t\treturn nil, errors.New(\"process was empty\")\n\t}\n\tactivity := process.Last()\n\tif process == nil {\n\t\treturn nil, errors.New(\"activity was empty\")\n\t}\n\n\tif description == \"\" {\n\t\tdescription = activity.Description\n\t}\n\n\tif expected != nil && toolbox.IsSlice(expected) {\n\t\tif normalized, err := util.NormalizeMap(expected, true); err == nil {\n\t\t\texpected = normalized\n\t\t}\n\t}\n\treturn NewAssertRequest(activity.TagID, name, description, source, expected, actual), nil\n\n}\n\n\/\/Assert compares provided expected amd actual\nfunc Assert(context *endly.Context, source, expected, actual interface{}, name, description string) (*AssertResponse, error) {\n\tvar request, err = NewAssertRequestFromContext(context, source, expected, actual, name, description)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar response = &AssertResponse{}\n\terr = endly.Run(context, request, response)\n\treturn response, err\n}\n<commit_msg>updated assert request<commit_after>package validator\n\nimport (\n\t\"github.com\/viant\/endly\"\n\t\"github.com\/viant\/endly\/util\"\n\t\"github.com\/viant\/endly\/workflow\"\n\t\"github.com\/viant\/toolbox\"\n)\n\n\/\/NewAssertRequestFromContext creates a new assert rquest from context for current activity\nfunc NewAssertRequestFromContext(context *endly.Context, source, expected, actual interface{}, name, description string) (*AssertRequest, error) {\n\ttagID := \"\"\n\tif process := workflow.Last(context); process != nil {\n\t\tif activity := process.Last(); activity != nil {\n\t\t\tif description == \"\" {\n\t\t\t\tdescription = activity.Description\n\t\t\t}\n\t\t\ttagID = activity.TagID\n\t\t}\n\t}\n\tif expected != nil && toolbox.IsSlice(expected) {\n\t\tif normalized, err := util.NormalizeMap(expected, true); err == nil {\n\t\t\texpected = normalized\n\t\t}\n\t}\n\treturn NewAssertRequest(tagID, name, description, source, expected, actual), nil\n\n}\n\n\/\/Assert compares provided expected amd actual\nfunc Assert(context *endly.Context, source, expected, actual interface{}, name, description string) (*AssertResponse, error) {\n\tvar request, err = NewAssertRequestFromContext(context, source, expected, actual, name, description)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar response = &AssertResponse{}\n\terr = endly.Run(context, request, response)\n\treturn response, err\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 Sascha Peilicke. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage tribool\n\n\/\/ Tribool represents a ternary (three-valued) logic type.\ntype Tribool int\n\n\/\/ Those constants for all possible values in a ternary logic.\nconst (\n True = Tribool(2)\n Indeterminate = Tribool(1)\n False = Tribool(0)\n)\n\nfunc (a Tribool) True() bool { return a == True }\nfunc (a Tribool) Indeterminate() bool { return a == Indeterminate }\nfunc (a Tribool) False() bool { return a == False }\nfunc (a Tribool) Boolean() bool { return a != Indeterminate }\n\nfunc (a Tribool) Not() Tribool {\n switch a {\n\tcase True: return False\n\tcase False: return True\n }\n return Indeterminate\n}\n\nfunc (a Tribool) Equal(b Tribool) bool { return a == b }\n\nfunc (a Tribool) And(b Tribool) Tribool {\n if int(a) < int(b) {\n return a\n }\n return b\n}\n\nfunc (a Tribool) Or(b Tribool) Tribool {\n if int(a) > int(b) {\n return a\n }\n return b\n}\n\nfunc FromBool(value bool) Tribool {\n ret := Tribool(False)\n if value == true {\n ret = True\n }\n return ret\n}\n\nfunc FromString(value string) Tribool {\n ret := Tribool(Indeterminate)\n switch value {\n case \"true\": ret = True\n case \"false\": ret = False\n }\n return ret\n}\n\nfunc (a Tribool) String() string {\n ret := \"indeterminate\"\n switch a {\n case True: ret = \"true\"\n case False: ret = \"false\"\n }\n return ret\n}\n<commit_msg>Fixed typo in constants description<commit_after>\/\/ Copyright 2011 Sascha Peilicke. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage tribool\n\n\/\/ Tribool represents a ternary (three-valued) logic type.\ntype Tribool int\n\n\/\/ Constants for all possible values in a ternary logic.\nconst (\n True = Tribool(2)\n Indeterminate = Tribool(1)\n False = Tribool(0)\n)\n\nfunc (a Tribool) True() bool { return a == True }\nfunc (a Tribool) Indeterminate() bool { return a == Indeterminate }\nfunc (a Tribool) False() bool { return a == False }\nfunc (a Tribool) Boolean() bool { return a != Indeterminate }\n\nfunc (a Tribool) Not() Tribool {\n switch a {\n\tcase True: return False\n\tcase False: return True\n }\n return Indeterminate\n}\n\nfunc (a Tribool) Equal(b Tribool) bool { return a == b }\n\nfunc (a Tribool) And(b Tribool) Tribool {\n if int(a) < int(b) {\n return a\n }\n return b\n}\n\nfunc (a Tribool) Or(b Tribool) Tribool {\n if int(a) > int(b) {\n return a\n }\n return b\n}\n\nfunc FromBool(value bool) Tribool {\n ret := Tribool(False)\n if value == true {\n ret = True\n }\n return ret\n}\n\nfunc FromString(value string) Tribool {\n ret := Tribool(Indeterminate)\n switch value {\n case \"true\": ret = True\n case \"false\": ret = False\n }\n return ret\n}\n\nfunc (a Tribool) String() string {\n ret := \"indeterminate\"\n switch a {\n case True: ret = \"true\"\n case False: ret = \"false\"\n }\n return ret\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ cli.go\n\/\/\n\/\/ Copyright (c) 2016, Ayke van Laethem\n\/\/ All rights reserved.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions are\n\/\/ met:\n\/\/\n\/\/ 1. Redistributions of source code must retain the above copyright\n\/\/ notice, this list of conditions and the following disclaimer.\n\/\/\n\/\/ 2. Redistributions in binary form must reproduce the above copyright\n\/\/ notice, this list of conditions and the following disclaimer in the\n\/\/ documentation and\/or other materials provided with the distribution.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n\/\/ IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\n\/\/ TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n\/\/ PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n\/\/ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n\/\/ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n\/\/ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n\/\/ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n\/\/ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n\/\/ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\/\/ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\/pprof\"\n\t\"strings\"\n\n\t\"github.com\/aykevl\/dtsync\/sync\"\n\t\"github.com\/aykevl\/dtsync\/tree\/file\"\n)\n\nvar cpuprofile = flag.String(\"cpuprofile\", \"\", \"write CPU profile to file\")\n\nfunc main() {\n\tflag.Parse()\n\n\tif *cpuprofile != \"\" {\n\t\tf, err := os.Create(*cpuprofile)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"Could not open CPU profile file:\", err)\n\t\t}\n\t\tdefer f.Close()\n\t\tpprof.StartCPUProfile(f)\n\t\tdefer pprof.StopCPUProfile()\n\t}\n\n\tif flag.NArg() != 2 {\n\t\tfmt.Fprintf(os.Stderr, \"Provide exactly two directories on the command line (got %d).\\n\", flag.NArg())\n\t\treturn\n\t}\n\n\tfs1, err := file.NewRoot(flag.Arg(0))\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Could not open first root:\", err)\n\t}\n\tfs2, err := file.NewRoot(flag.Arg(1))\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Could not open second root:\", err)\n\t}\n\n\tresult, err := sync.Scan(fs1, fs2)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Could not scan roots:\", err)\n\t}\n\n\tif len(result.Jobs()) == 0 {\n\t\t\/\/ Nice! We don't have to do anything.\n\t\terr := result.SaveStatus()\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"No changes, but could not save status:\", err)\n\t\t} else {\n\t\t\tfmt.Println(\"No changes.\")\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(\"Scan results:\")\n\tfor _, job := range result.Jobs() {\n\t\tvar direction string\n\t\tswitch job.Direction() {\n\t\tcase -1:\n\t\t\tdirection = \"<--\"\n\t\tcase 0:\n\t\t\tdirection = \" ? \"\n\t\tcase 1:\n\t\t\tdirection = \"-->\"\n\t\tdefault:\n\t\t\t\/\/ We might as wel just panic.\n\t\t\tdirection = \"!!!\"\n\t\t}\n\t\tfmt.Printf(\"%-8s %s %8s %s\\n\", job.StatusLeft(), direction, job.StatusRight(), job.RelativePath())\n\t}\n\n\tscanner := bufio.NewScanner(os.Stdin)\n\taction := 0\n\tfor action == 0 {\n\t\tfmt.Printf(\"Apply these changes? \")\n\t\tif !scanner.Scan() {\n\t\t\treturn\n\t\t}\n\t\tinput := strings.ToLower(scanner.Text())\n\t\tswitch input {\n\t\tcase \"y\", \"yes\":\n\t\t\t\/\/ apply changes\n\t\t\taction = 1\n\t\tcase \"n\", \"no\":\n\t\t\t\/\/ exit\n\t\t\taction = -1\n\t\tcase \"q\", \"quit\":\n\t\t\t\/\/ exit\n\t\t\taction = -2\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t}\n\tif action < 0 {\n\t\terr = result.SaveStatus()\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"Could not save status:\", err)\n\t\t}\n\t\treturn\n\t}\n\n\tstats, err := result.SyncAll()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Could not apply all changes:\", err)\n\t}\n\tfmt.Printf(\"Applied %d changes (%d errors)\\n\", stats.CountTotal, stats.CountError)\n\terr = result.SaveStatus()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Could not save status:\", err)\n\t}\n}\n<commit_msg>cli: return on errors<commit_after>\/\/ cli.go\n\/\/\n\/\/ Copyright (c) 2016, Ayke van Laethem\n\/\/ All rights reserved.\n\/\/\n\/\/ Redistribution and use in source and binary forms, with or without\n\/\/ modification, are permitted provided that the following conditions are\n\/\/ met:\n\/\/\n\/\/ 1. Redistributions of source code must retain the above copyright\n\/\/ notice, this list of conditions and the following disclaimer.\n\/\/\n\/\/ 2. Redistributions in binary form must reproduce the above copyright\n\/\/ notice, this list of conditions and the following disclaimer in the\n\/\/ documentation and\/or other materials provided with the distribution.\n\/\/\n\/\/ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n\/\/ IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\n\/\/ TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n\/\/ PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n\/\/ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n\/\/ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n\/\/ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n\/\/ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n\/\/ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n\/\/ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\/\/ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\/pprof\"\n\t\"strings\"\n\n\t\"github.com\/aykevl\/dtsync\/sync\"\n\t\"github.com\/aykevl\/dtsync\/tree\/file\"\n)\n\nvar cpuprofile = flag.String(\"cpuprofile\", \"\", \"write CPU profile to file\")\n\nfunc main() {\n\tflag.Parse()\n\n\tif *cpuprofile != \"\" {\n\t\tf, err := os.Create(*cpuprofile)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"Could not open CPU profile file:\", err)\n\t\t}\n\t\tdefer f.Close()\n\t\tpprof.StartCPUProfile(f)\n\t\tdefer pprof.StopCPUProfile()\n\t}\n\n\tif flag.NArg() != 2 {\n\t\tfmt.Fprintf(os.Stderr, \"Provide exactly two directories on the command line (got %d).\\n\", flag.NArg())\n\t\treturn\n\t}\n\n\tfs1, err := file.NewRoot(flag.Arg(0))\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Could not open first root:\", err)\n\t\treturn\n\t}\n\tfs2, err := file.NewRoot(flag.Arg(1))\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Could not open second root:\", err)\n\t\treturn\n\t}\n\n\tresult, err := sync.Scan(fs1, fs2)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Could not scan roots:\", err)\n\t\treturn\n\t}\n\n\tif len(result.Jobs()) == 0 {\n\t\t\/\/ Nice! We don't have to do anything.\n\t\terr := result.SaveStatus()\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"No changes, but could not save status:\", err)\n\t\t} else {\n\t\t\tfmt.Println(\"No changes.\")\n\t\t}\n\t\treturn\n\t}\n\n\tfmt.Println(\"Scan results:\")\n\tfor _, job := range result.Jobs() {\n\t\tvar direction string\n\t\tswitch job.Direction() {\n\t\tcase -1:\n\t\t\tdirection = \"<--\"\n\t\tcase 0:\n\t\t\tdirection = \" ? \"\n\t\tcase 1:\n\t\t\tdirection = \"-->\"\n\t\tdefault:\n\t\t\t\/\/ We might as wel just panic.\n\t\t\tdirection = \"!!!\"\n\t\t}\n\t\tfmt.Printf(\"%-8s %s %8s %s\\n\", job.StatusLeft(), direction, job.StatusRight(), job.RelativePath())\n\t}\n\n\tscanner := bufio.NewScanner(os.Stdin)\n\taction := 0\n\tfor action == 0 {\n\t\tfmt.Printf(\"Apply these changes? \")\n\t\tif !scanner.Scan() {\n\t\t\treturn\n\t\t}\n\t\tinput := strings.ToLower(scanner.Text())\n\t\tswitch input {\n\t\tcase \"y\", \"yes\":\n\t\t\t\/\/ apply changes\n\t\t\taction = 1\n\t\tcase \"n\", \"no\":\n\t\t\t\/\/ exit\n\t\t\taction = -1\n\t\tcase \"q\", \"quit\":\n\t\t\t\/\/ exit\n\t\t\taction = -2\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t}\n\tif action < 0 {\n\t\terr = result.SaveStatus()\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"Could not save status:\", err)\n\t\t}\n\t\treturn\n\t}\n\n\tstats, err := result.SyncAll()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Could not apply all changes:\", err)\n\t}\n\tfmt.Printf(\"Applied %d changes (%d errors)\\n\", stats.CountTotal, stats.CountError)\n\terr = result.SaveStatus()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"Could not save status:\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>Restructured the program to support multiple argument files and data from sdtin - pipe<commit_after><|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2019 Shivaram Lingamneni <slingamn@cs.stanford.edu>\n\/\/ released under the MIT license\n\npackage irc\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype zncCommandHandler func(client *Client, command string, params []string, rb *ResponseBuffer)\n\nvar zncHandlers = map[string]zncCommandHandler{\n\t\"*playback\": zncPlaybackHandler,\n}\n\nfunc zncPrivmsgHandler(client *Client, command string, privmsg string, rb *ResponseBuffer) {\n\tzncModuleHandler(client, command, strings.Fields(privmsg), rb)\n}\n\nfunc zncModuleHandler(client *Client, command string, params []string, rb *ResponseBuffer) {\n\tcommand = strings.ToLower(command)\n\tif subHandler, ok := zncHandlers[command]; ok {\n\t\tsubHandler(client, command, params, rb)\n\t} else {\n\t\trb.Add(nil, \"*status!znc@znc.in\", \"NOTICE\", rb.target.Nick(), fmt.Sprintf(client.t(\"No such module [%s]\"), command))\n\t}\n}\n\n\/\/ \"number of seconds (floating point for millisecond precision) elapsed since January 1, 1970\"\nfunc zncWireTimeToTime(str string) (result time.Time) {\n\tvar secondsPortion, fracPortion string\n\tdot := strings.IndexByte(str, '.')\n\tif dot == -1 {\n\t\tsecondsPortion = str\n\t} else {\n\t\tsecondsPortion = str[:dot]\n\t\tfracPortion = str[dot:]\n\t}\n\tseconds, _ := strconv.ParseInt(secondsPortion, 10, 64)\n\tfraction, _ := strconv.ParseFloat(fracPortion, 64)\n\treturn time.Unix(seconds, int64(fraction*1000000000))\n}\n\ntype zncPlaybackTimes struct {\n\tafter time.Time\n\tbefore time.Time\n\ttargets map[string]bool \/\/ nil for \"*\" (everything), otherwise the channel names\n}\n\n\/\/ https:\/\/wiki.znc.in\/Playback\n\/\/ PRIVMSG *playback :play <target> [lower_bound] [upper_bound]\n\/\/ e.g., PRIVMSG *playback :play * 1558374442\nfunc zncPlaybackHandler(client *Client, command string, params []string, rb *ResponseBuffer) {\n\tif len(params) < 2 {\n\t\treturn\n\t} else if strings.ToLower(params[0]) != \"play\" {\n\t\treturn\n\t}\n\ttargetString := params[1]\n\n\tvar after, before time.Time\n\tif 2 < len(params) {\n\t\tafter = zncWireTimeToTime(params[2])\n\t}\n\tif 3 < len(params) {\n\t\tbefore = zncWireTimeToTime(params[3])\n\t}\n\n\tvar targets map[string]bool\n\n\t\/\/ OK: the user's PMs get played back immediately on receiving this,\n\t\/\/ then we save the timestamps in the session to handle replay on future channel joins\n\tconfig := client.server.Config()\n\tif params[1] == \"*\" {\n\t\titems, _ := client.history.Between(after, before, false, config.History.ChathistoryMax)\n\t\tclient.replayPrivmsgHistory(rb, items, true)\n\t} else {\n\t\tfor _, targetName := range strings.Split(targetString, \",\") {\n\t\t\tif cfTarget, err := CasefoldChannel(targetName); err == nil {\n\t\t\t\tif targets == nil {\n\t\t\t\t\ttargets = make(map[string]bool)\n\t\t\t\t}\n\t\t\t\ttargets[cfTarget] = true\n\t\t\t}\n\t\t}\n\t}\n\n\trb.session.zncPlaybackTimes = &zncPlaybackTimes{\n\t\tafter: after,\n\t\tbefore: before,\n\t\ttargets: targets,\n\t}\n}\n<commit_msg>review fix<commit_after>\/\/ Copyright (c) 2019 Shivaram Lingamneni <slingamn@cs.stanford.edu>\n\/\/ released under the MIT license\n\npackage irc\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype zncCommandHandler func(client *Client, command string, params []string, rb *ResponseBuffer)\n\nvar zncHandlers = map[string]zncCommandHandler{\n\t\"*playback\": zncPlaybackHandler,\n}\n\nfunc zncPrivmsgHandler(client *Client, command string, privmsg string, rb *ResponseBuffer) {\n\tzncModuleHandler(client, command, strings.Fields(privmsg), rb)\n}\n\nfunc zncModuleHandler(client *Client, command string, params []string, rb *ResponseBuffer) {\n\tcommand = strings.ToLower(command)\n\tif subHandler, ok := zncHandlers[command]; ok {\n\t\tsubHandler(client, command, params, rb)\n\t} else {\n\t\tnick := rb.target.Nick()\n\t\trb.Add(nil, client.server.name, \"NOTICE\", nick, fmt.Sprintf(client.t(\"Oragono does not emulate the ZNC module %s\"), command))\n\t\trb.Add(nil, \"*status!znc@znc.in\", \"NOTICE\", nick, fmt.Sprintf(client.t(\"No such module [%s]\"), command))\n\t}\n}\n\n\/\/ \"number of seconds (floating point for millisecond precision) elapsed since January 1, 1970\"\nfunc zncWireTimeToTime(str string) (result time.Time) {\n\tvar secondsPortion, fracPortion string\n\tdot := strings.IndexByte(str, '.')\n\tif dot == -1 {\n\t\tsecondsPortion = str\n\t} else {\n\t\tsecondsPortion = str[:dot]\n\t\tfracPortion = str[dot:]\n\t}\n\tseconds, _ := strconv.ParseInt(secondsPortion, 10, 64)\n\tfraction, _ := strconv.ParseFloat(fracPortion, 64)\n\treturn time.Unix(seconds, int64(fraction*1000000000))\n}\n\ntype zncPlaybackTimes struct {\n\tafter time.Time\n\tbefore time.Time\n\ttargets map[string]bool \/\/ nil for \"*\" (everything), otherwise the channel names\n}\n\n\/\/ https:\/\/wiki.znc.in\/Playback\n\/\/ PRIVMSG *playback :play <target> [lower_bound] [upper_bound]\n\/\/ e.g., PRIVMSG *playback :play * 1558374442\nfunc zncPlaybackHandler(client *Client, command string, params []string, rb *ResponseBuffer) {\n\tif len(params) < 2 {\n\t\treturn\n\t} else if strings.ToLower(params[0]) != \"play\" {\n\t\treturn\n\t}\n\ttargetString := params[1]\n\n\tvar after, before time.Time\n\tif 2 < len(params) {\n\t\tafter = zncWireTimeToTime(params[2])\n\t}\n\tif 3 < len(params) {\n\t\tbefore = zncWireTimeToTime(params[3])\n\t}\n\n\tvar targets map[string]bool\n\n\t\/\/ OK: the user's PMs get played back immediately on receiving this,\n\t\/\/ then we save the timestamps in the session to handle replay on future channel joins\n\tconfig := client.server.Config()\n\tif params[1] == \"*\" {\n\t\titems, _ := client.history.Between(after, before, false, config.History.ChathistoryMax)\n\t\tclient.replayPrivmsgHistory(rb, items, true)\n\t} else {\n\t\tfor _, targetName := range strings.Split(targetString, \",\") {\n\t\t\tif cfTarget, err := CasefoldChannel(targetName); err == nil {\n\t\t\t\tif targets == nil {\n\t\t\t\t\ttargets = make(map[string]bool)\n\t\t\t\t}\n\t\t\t\ttargets[cfTarget] = true\n\t\t\t}\n\t\t}\n\t}\n\n\trb.session.zncPlaybackTimes = &zncPlaybackTimes{\n\t\tafter: after,\n\t\tbefore: before,\n\t\ttargets: targets,\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package network\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/openshift\/source-to-image\/pkg\/tar\"\n\ts2ifs \"github.com\/openshift\/source-to-image\/pkg\/util\/fs\"\n\n\tkerrs \"k8s.io\/apimachinery\/pkg\/util\/errors\"\n\tkapi \"k8s.io\/kubernetes\/pkg\/apis\/core\"\n\t\"k8s.io\/kubernetes\/pkg\/kubectl\/polymorphichelpers\"\n\n\t\"github.com\/openshift\/origin\/pkg\/oc\/cli\/admin\/diagnostics\/diagnostics\/cluster\/network\/in_pod\/util\"\n)\n\nfunc (d *NetworkDiagnostic) CollectNetworkPodLogs() error {\n\tpodList, err := d.getPodList(d.nsName1, util.NetworkDiagPodNamePrefix)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terrList := []error{}\n\tfor _, pod := range podList.Items {\n\t\tif err := d.getNetworkPodLogs(&pod); err != nil {\n\t\t\terrList = append(errList, err)\n\t\t}\n\t}\n\treturn kerrs.NewAggregate(errList)\n}\n\nfunc (d *NetworkDiagnostic) CollectNetworkInfo() error {\n\t\/\/ Collect useful info from master\n\tl := util.LogInterface{\n\t\tResult: d.res,\n\t\tLogdir: filepath.Join(d.LogDir, util.NetworkDiagMasterLogDirPrefix),\n\t}\n\tl.LogMaster()\n\n\tpodList, err := d.getPodList(d.nsName1, util.NetworkDiagPodNamePrefix)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terrList := []error{}\n\tfor _, pod := range podList.Items {\n\t\tif pod.Status.Phase != kapi.PodRunning {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := d.copyNetworkPodInfo(&pod); err != nil {\n\t\t\terrList = append(errList, err)\n\t\t}\n\t}\n\treturn kerrs.NewAggregate(errList)\n}\n\nfunc (d *NetworkDiagnostic) copyNetworkPodInfo(pod *kapi.Pod) error {\n\ttmp, err := ioutil.TempFile(\"\", \"network-diags\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Can not create local temporary file for tar: %v\", err)\n\t}\n\tdefer os.Remove(tmp.Name())\n\n\t\/\/ Tar logdir on the remote node and copy to a local temporary file\n\terrBuf := &bytes.Buffer{}\n\tnodeLogDir := filepath.Join(util.NetworkDiagDefaultLogDir, util.NetworkDiagNodeLogDirPrefix, pod.Spec.NodeName)\n\tcmd := []string{\"chroot\", util.NetworkDiagContainerMountPath, \"tar\", \"-C\", nodeLogDir, \"-c\", \".\"}\n\tif err = util.Execute(d.Factory, cmd, pod, nil, tmp, errBuf); err != nil {\n\t\treturn fmt.Errorf(\"Creating remote tar locally failed: %v, %s\", err, errBuf.String())\n\t}\n\tif err := tmp.Close(); err != nil {\n\t\treturn fmt.Errorf(\"Closing temporary tar file %s failed: %v\", tmp.Name(), err)\n\t}\n\n\t\/\/ Extract copied temporary file locally\n\ttmp, err = os.Open(tmp.Name())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Can not open temporary tar file %s: %v\", tmp.Name(), err)\n\t}\n\tdefer tmp.Close()\n\n\ttarHelper := tar.New(s2ifs.NewFileSystem())\n\ttarHelper.SetExclusionPattern(nil)\n\tlogdir := filepath.Join(d.LogDir, util.NetworkDiagNodeLogDirPrefix, pod.Spec.NodeName)\n\terr = tarHelper.ExtractTarStream(logdir, tmp)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Untar local directory failed: %v, %s\", err, errBuf.String())\n\t}\n\treturn nil\n}\n\nfunc (d *NetworkDiagnostic) getNetworkPodLogs(pod *kapi.Pod) error {\n\tbytelim := int64(1024000)\n\topts := &kapi.PodLogOptions{\n\t\tTypeMeta: pod.TypeMeta,\n\t\tContainer: pod.Name,\n\t\tFollow: true,\n\t\tLimitBytes: &bytelim,\n\t}\n\n\trequests, err := polymorphichelpers.LogsForObjectFn(d.Factory, pod, opts, 1*time.Minute, false)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Request for network diagnostic pod on node %q failed unexpectedly: %v\", pod.Spec.NodeName, err)\n\t}\n\tfor _, req := range requests {\n\t\treadCloser, err := req.Stream()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Logs for network diagnostic pod on node %q failed: %v\", pod.Spec.NodeName, err)\n\t\t}\n\t\tdefer readCloser.Close()\n\n\t\tscanner := bufio.NewScanner(readCloser)\n\t\tpodLogs, nwarnings, nerrors := \"\", 0, 0\n\t\terrorRegex := regexp.MustCompile(`^\\[Note\\]\\s+Errors\\s+seen:\\s+(\\d+)`)\n\t\twarnRegex := regexp.MustCompile(`^\\[Note\\]\\s+Warnings\\s+seen:\\s+(\\d+)`)\n\n\t\tfor scanner.Scan() {\n\t\t\tline := scanner.Text()\n\t\t\tpodLogs += line + \"\\n\"\n\t\t\tif matches := errorRegex.FindStringSubmatch(line); matches != nil {\n\t\t\t\tnerrors, _ = strconv.Atoi(matches[1])\n\t\t\t} else if matches := warnRegex.FindStringSubmatch(line); matches != nil {\n\t\t\t\tnwarnings, _ = strconv.Atoi(matches[1])\n\t\t\t}\n\t\t}\n\n\t\tif err := scanner.Err(); err != nil { \/\/ Scan terminated abnormally\n\t\t\treturn fmt.Errorf(\"Unexpected error reading network diagnostic pod on node %q: (%T) %[1]v\\nLogs are:\\n%[3]s\", pod.Spec.NodeName, err, podLogs)\n\t\t} else {\n\t\t\tif nerrors > 0 {\n\t\t\t\treturn fmt.Errorf(\"See the errors below in the output from the network diagnostic pod on node %q:\\n%s\", pod.Spec.NodeName, podLogs)\n\t\t\t} else if nwarnings > 0 {\n\t\t\t\td.res.Warn(\"DNet4002\", nil, fmt.Sprintf(\"See the warnings below in the output from the network diagnostic pod on node %q:\\n%s\", pod.Spec.NodeName, podLogs))\n\t\t\t} else {\n\t\t\t\td.res.Info(\"DNet4003\", fmt.Sprintf(\"Output from the network diagnostic pod on node %q:\\n%s\", pod.Spec.NodeName, podLogs))\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Fix provided options object is not a PodLogOptions error in network diags<commit_after>package network\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com\/openshift\/source-to-image\/pkg\/tar\"\n\ts2ifs \"github.com\/openshift\/source-to-image\/pkg\/util\/fs\"\n\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tkerrs \"k8s.io\/apimachinery\/pkg\/util\/errors\"\n\tkapi \"k8s.io\/kubernetes\/pkg\/apis\/core\"\n\t\"k8s.io\/kubernetes\/pkg\/kubectl\/polymorphichelpers\"\n\n\t\"github.com\/openshift\/origin\/pkg\/oc\/cli\/admin\/diagnostics\/diagnostics\/cluster\/network\/in_pod\/util\"\n)\n\nfunc (d *NetworkDiagnostic) CollectNetworkPodLogs() error {\n\tpodList, err := d.getPodList(d.nsName1, util.NetworkDiagPodNamePrefix)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terrList := []error{}\n\tfor _, pod := range podList.Items {\n\t\tif err := d.getNetworkPodLogs(&pod); err != nil {\n\t\t\terrList = append(errList, err)\n\t\t}\n\t}\n\treturn kerrs.NewAggregate(errList)\n}\n\nfunc (d *NetworkDiagnostic) CollectNetworkInfo() error {\n\t\/\/ Collect useful info from master\n\tl := util.LogInterface{\n\t\tResult: d.res,\n\t\tLogdir: filepath.Join(d.LogDir, util.NetworkDiagMasterLogDirPrefix),\n\t}\n\tl.LogMaster()\n\n\tpodList, err := d.getPodList(d.nsName1, util.NetworkDiagPodNamePrefix)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terrList := []error{}\n\tfor _, pod := range podList.Items {\n\t\tif pod.Status.Phase != kapi.PodRunning {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := d.copyNetworkPodInfo(&pod); err != nil {\n\t\t\terrList = append(errList, err)\n\t\t}\n\t}\n\treturn kerrs.NewAggregate(errList)\n}\n\nfunc (d *NetworkDiagnostic) copyNetworkPodInfo(pod *kapi.Pod) error {\n\ttmp, err := ioutil.TempFile(\"\", \"network-diags\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Can not create local temporary file for tar: %v\", err)\n\t}\n\tdefer os.Remove(tmp.Name())\n\n\t\/\/ Tar logdir on the remote node and copy to a local temporary file\n\terrBuf := &bytes.Buffer{}\n\tnodeLogDir := filepath.Join(util.NetworkDiagDefaultLogDir, util.NetworkDiagNodeLogDirPrefix, pod.Spec.NodeName)\n\tcmd := []string{\"chroot\", util.NetworkDiagContainerMountPath, \"tar\", \"-C\", nodeLogDir, \"-c\", \".\"}\n\tif err = util.Execute(d.Factory, cmd, pod, nil, tmp, errBuf); err != nil {\n\t\treturn fmt.Errorf(\"Creating remote tar locally failed: %v, %s\", err, errBuf.String())\n\t}\n\tif err := tmp.Close(); err != nil {\n\t\treturn fmt.Errorf(\"Closing temporary tar file %s failed: %v\", tmp.Name(), err)\n\t}\n\n\t\/\/ Extract copied temporary file locally\n\ttmp, err = os.Open(tmp.Name())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Can not open temporary tar file %s: %v\", tmp.Name(), err)\n\t}\n\tdefer tmp.Close()\n\n\ttarHelper := tar.New(s2ifs.NewFileSystem())\n\ttarHelper.SetExclusionPattern(nil)\n\tlogdir := filepath.Join(d.LogDir, util.NetworkDiagNodeLogDirPrefix, pod.Spec.NodeName)\n\terr = tarHelper.ExtractTarStream(logdir, tmp)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Untar local directory failed: %v, %s\", err, errBuf.String())\n\t}\n\treturn nil\n}\n\nfunc (d *NetworkDiagnostic) getNetworkPodLogs(pod *kapi.Pod) error {\n\tbytelim := int64(1024000)\n\topts := &corev1.PodLogOptions{\n\t\tTypeMeta: pod.TypeMeta,\n\t\tContainer: pod.Name,\n\t\tFollow: true,\n\t\tLimitBytes: &bytelim,\n\t}\n\n\trequests, err := polymorphichelpers.LogsForObjectFn(d.Factory, pod, opts, 1*time.Minute, false)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Request for network diagnostic pod on node %q failed unexpectedly: %v\", pod.Spec.NodeName, err)\n\t}\n\tfor _, req := range requests {\n\t\treadCloser, err := req.Stream()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Logs for network diagnostic pod on node %q failed: %v\", pod.Spec.NodeName, err)\n\t\t}\n\t\tdefer readCloser.Close()\n\n\t\tscanner := bufio.NewScanner(readCloser)\n\t\tpodLogs, nwarnings, nerrors := \"\", 0, 0\n\t\terrorRegex := regexp.MustCompile(`^\\[Note\\]\\s+Errors\\s+seen:\\s+(\\d+)`)\n\t\twarnRegex := regexp.MustCompile(`^\\[Note\\]\\s+Warnings\\s+seen:\\s+(\\d+)`)\n\n\t\tfor scanner.Scan() {\n\t\t\tline := scanner.Text()\n\t\t\tpodLogs += line + \"\\n\"\n\t\t\tif matches := errorRegex.FindStringSubmatch(line); matches != nil {\n\t\t\t\tnerrors, _ = strconv.Atoi(matches[1])\n\t\t\t} else if matches := warnRegex.FindStringSubmatch(line); matches != nil {\n\t\t\t\tnwarnings, _ = strconv.Atoi(matches[1])\n\t\t\t}\n\t\t}\n\n\t\tif err := scanner.Err(); err != nil { \/\/ Scan terminated abnormally\n\t\t\treturn fmt.Errorf(\"Unexpected error reading network diagnostic pod on node %q: (%T) %[1]v\\nLogs are:\\n%[3]s\", pod.Spec.NodeName, err, podLogs)\n\t\t} else {\n\t\t\tif nerrors > 0 {\n\t\t\t\treturn fmt.Errorf(\"See the errors below in the output from the network diagnostic pod on node %q:\\n%s\", pod.Spec.NodeName, podLogs)\n\t\t\t} else if nwarnings > 0 {\n\t\t\t\td.res.Warn(\"DNet4002\", nil, fmt.Sprintf(\"See the warnings below in the output from the network diagnostic pod on node %q:\\n%s\", pod.Spec.NodeName, podLogs))\n\t\t\t} else {\n\t\t\t\td.res.Info(\"DNet4003\", fmt.Sprintf(\"Output from the network diagnostic pod on node %q:\\n%s\", pod.Spec.NodeName, podLogs))\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package route_test\n\nimport (\n\t\"cf\"\n\t. \"cf\/commands\/route\"\n\t\"cf\/configuration\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\ttestapi \"testhelpers\/api\"\n\ttestcmd \"testhelpers\/commands\"\n\ttestconfig \"testhelpers\/configuration\"\n\ttestreq \"testhelpers\/requirements\"\n\ttestterm \"testhelpers\/terminal\"\n\t\"testing\"\n)\n\nfunc TestListingRoutes(t *testing.T) {\n\tdomain := cf.DomainFields{}\n\tdomain.Name = \"example.com\"\n\tdomain2 := cf.DomainFields{}\n\tdomain2.Name = \"cfapps.com\"\n\tdomain3 := cf.DomainFields{}\n\tdomain3.Name = \"another-example.com\"\n\n\tapp1 := cf.ApplicationFields{}\n\tapp1.Name = \"dora\"\n\tapp2 := cf.ApplicationFields{}\n\tapp2.Name = \"dora2\"\n\n\tapp3 := cf.ApplicationFields{}\n\tapp3.Name = \"my-app\"\n\tapp4 := cf.ApplicationFields{}\n\tapp4.Name = \"my-app2\"\n\n\tapp5 := cf.ApplicationFields{}\n\tapp5.Name = \"july\"\n\n\troute := cf.Route{}\n\troute.Host = \"hostname-1\"\n\troute.Domain = domain\n\troute.Apps = []cf.ApplicationFields{app1, app2}\n\troute2 := cf.Route{}\n\troute2.Host = \"hostname-2\"\n\troute2.Domain = domain2\n\troute2.Apps = []cf.ApplicationFields{app3, app4}\n\troute3 := cf.Route{}\n\troute3.Host = \"hostname-3\"\n\troute3.Domain = domain3\n\troute3.Apps = []cf.ApplicationFields{app5}\n\troutes := []cf.Route{route, route2, route3}\n\n\trouteRepo := &testapi.FakeRouteRepository{Routes: routes}\n\n\tui := callListRoutes(t, []string{}, &testreq.FakeReqFactory{}, routeRepo)\n\n\tassert.Contains(t, ui.Outputs[0], \"Getting routes\")\n\tassert.Contains(t, ui.Outputs[0], \"my-user\")\n\n\tassert.Contains(t, ui.Outputs[1], \"host\")\n\tassert.Contains(t, ui.Outputs[1], \"domain\")\n\tassert.Contains(t, ui.Outputs[1], \"apps\")\n\n\tassert.Contains(t, ui.Outputs[2], \"hostname-1\")\n\tassert.Contains(t, ui.Outputs[2], \"example.com\")\n\tassert.Contains(t, ui.Outputs[2], \"dora, dora2\")\n\n\tassert.Contains(t, ui.Outputs[3], \"hostname-2\")\n\tassert.Contains(t, ui.Outputs[3], \"cfapps.com\")\n\tassert.Contains(t, ui.Outputs[3], \"my-app, my-app2\")\n\n\tassert.Contains(t, ui.Outputs[4], \"hostname-3\")\n\tassert.Contains(t, ui.Outputs[4], \"another-example.com\")\n\tassert.Contains(t, ui.Outputs[4], \"july\")\n}\n\nfunc TestListingRoutesWhenNoneExist(t *testing.T) {\n\troutes := []cf.Route{}\n\trouteRepo := &testapi.FakeRouteRepository{Routes: routes}\n\n\tui := callListRoutes(t, []string{}, &testreq.FakeReqFactory{}, routeRepo)\n\n\tassert.Contains(t, ui.Outputs[0], \"Getting routes\")\n\tassert.Contains(t, ui.Outputs[0], \"my-user\")\n\tassert.Contains(t, ui.Outputs[1], \"No routes found\")\n}\n\nfunc TestListingRoutesWhenFindFails(t *testing.T) {\n\trouteRepo := &testapi.FakeRouteRepository{ListErr: true}\n\n\tui := callListRoutes(t, []string{}, &testreq.FakeReqFactory{}, routeRepo)\n\n\tassert.Contains(t, ui.Outputs[0], \"Getting routes\")\n\tassert.Contains(t, ui.Outputs[1], \"FAILED\")\n}\n\nfunc callListRoutes(t *testing.T, args []string, reqFactory *testreq.FakeReqFactory, routeRepo *testapi.FakeRouteRepository) (ui *testterm.FakeUI) {\n\n\tui = &testterm.FakeUI{}\n\n\tctxt := testcmd.NewContext(\"list-routes\", args)\n\n\ttoken, err := testconfig.CreateAccessTokenWithTokenInfo(configuration.TokenInfo{\n\t\tUsername: \"my-user\",\n\t})\n\tassert.NoError(t, err)\n\tspace := cf.SpaceFields{}\n\tspace.Name = \"my-space\"\n\torg := cf.OrganizationFields{}\n\torg.Name = \"my-org\"\n\tconfig := &configuration.Configuration{\n\t\tSpaceFields: space,\n\t\tOrganizationFields: org,\n\t\tAccessToken: token,\n\t}\n\n\tcmd := NewListRoutes(ui, config, routeRepo)\n\ttestcmd.RunCommand(cmd, ctxt, reqFactory)\n\n\treturn\n}\n<commit_msg>correct test context for routes command in test helper<commit_after>package route_test\n\nimport (\n\t\"cf\"\n\t. \"cf\/commands\/route\"\n\t\"cf\/configuration\"\n\t\"github.com\/stretchr\/testify\/assert\"\n\ttestapi \"testhelpers\/api\"\n\ttestcmd \"testhelpers\/commands\"\n\ttestconfig \"testhelpers\/configuration\"\n\ttestreq \"testhelpers\/requirements\"\n\ttestterm \"testhelpers\/terminal\"\n\t\"testing\"\n)\n\nfunc TestListingRoutes(t *testing.T) {\n\tdomain := cf.DomainFields{}\n\tdomain.Name = \"example.com\"\n\tdomain2 := cf.DomainFields{}\n\tdomain2.Name = \"cfapps.com\"\n\tdomain3 := cf.DomainFields{}\n\tdomain3.Name = \"another-example.com\"\n\n\tapp1 := cf.ApplicationFields{}\n\tapp1.Name = \"dora\"\n\tapp2 := cf.ApplicationFields{}\n\tapp2.Name = \"dora2\"\n\n\tapp3 := cf.ApplicationFields{}\n\tapp3.Name = \"my-app\"\n\tapp4 := cf.ApplicationFields{}\n\tapp4.Name = \"my-app2\"\n\n\tapp5 := cf.ApplicationFields{}\n\tapp5.Name = \"july\"\n\n\troute := cf.Route{}\n\troute.Host = \"hostname-1\"\n\troute.Domain = domain\n\troute.Apps = []cf.ApplicationFields{app1, app2}\n\troute2 := cf.Route{}\n\troute2.Host = \"hostname-2\"\n\troute2.Domain = domain2\n\troute2.Apps = []cf.ApplicationFields{app3, app4}\n\troute3 := cf.Route{}\n\troute3.Host = \"hostname-3\"\n\troute3.Domain = domain3\n\troute3.Apps = []cf.ApplicationFields{app5}\n\troutes := []cf.Route{route, route2, route3}\n\n\trouteRepo := &testapi.FakeRouteRepository{Routes: routes}\n\n\tui := callListRoutes(t, []string{}, &testreq.FakeReqFactory{}, routeRepo)\n\n\tassert.Contains(t, ui.Outputs[0], \"Getting routes\")\n\tassert.Contains(t, ui.Outputs[0], \"my-user\")\n\n\tassert.Contains(t, ui.Outputs[1], \"host\")\n\tassert.Contains(t, ui.Outputs[1], \"domain\")\n\tassert.Contains(t, ui.Outputs[1], \"apps\")\n\n\tassert.Contains(t, ui.Outputs[2], \"hostname-1\")\n\tassert.Contains(t, ui.Outputs[2], \"example.com\")\n\tassert.Contains(t, ui.Outputs[2], \"dora, dora2\")\n\n\tassert.Contains(t, ui.Outputs[3], \"hostname-2\")\n\tassert.Contains(t, ui.Outputs[3], \"cfapps.com\")\n\tassert.Contains(t, ui.Outputs[3], \"my-app, my-app2\")\n\n\tassert.Contains(t, ui.Outputs[4], \"hostname-3\")\n\tassert.Contains(t, ui.Outputs[4], \"another-example.com\")\n\tassert.Contains(t, ui.Outputs[4], \"july\")\n}\n\nfunc TestListingRoutesWhenNoneExist(t *testing.T) {\n\troutes := []cf.Route{}\n\trouteRepo := &testapi.FakeRouteRepository{Routes: routes}\n\n\tui := callListRoutes(t, []string{}, &testreq.FakeReqFactory{}, routeRepo)\n\n\tassert.Contains(t, ui.Outputs[0], \"Getting routes\")\n\tassert.Contains(t, ui.Outputs[0], \"my-user\")\n\tassert.Contains(t, ui.Outputs[1], \"No routes found\")\n}\n\nfunc TestListingRoutesWhenFindFails(t *testing.T) {\n\trouteRepo := &testapi.FakeRouteRepository{ListErr: true}\n\n\tui := callListRoutes(t, []string{}, &testreq.FakeReqFactory{}, routeRepo)\n\n\tassert.Contains(t, ui.Outputs[0], \"Getting routes\")\n\tassert.Contains(t, ui.Outputs[1], \"FAILED\")\n}\n\nfunc callListRoutes(t *testing.T, args []string, reqFactory *testreq.FakeReqFactory, routeRepo *testapi.FakeRouteRepository) (ui *testterm.FakeUI) {\n\n\tui = &testterm.FakeUI{}\n\n\tctxt := testcmd.NewContext(\"routes\", args)\n\n\ttoken, err := testconfig.CreateAccessTokenWithTokenInfo(configuration.TokenInfo{\n\t\tUsername: \"my-user\",\n\t})\n\tassert.NoError(t, err)\n\tspace := cf.SpaceFields{}\n\tspace.Name = \"my-space\"\n\torg := cf.OrganizationFields{}\n\torg.Name = \"my-org\"\n\tconfig := &configuration.Configuration{\n\t\tSpaceFields: space,\n\t\tOrganizationFields: org,\n\t\tAccessToken: token,\n\t}\n\n\tcmd := NewListRoutes(ui, config, routeRepo)\n\ttestcmd.RunCommand(cmd, ctxt, reqFactory)\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"math\"\n\t\"net\/http\/httptest\"\n\t\"os\"\n\t\"testing\"\n\n\tdbm \"github.com\/tendermint\/tmlibs\/db\"\n\n\t\"github.com\/bytom\/accesstoken\"\n\t\"github.com\/bytom\/blockchain\/rpc\"\n\t\"github.com\/bytom\/blockchain\/txbuilder\"\n\t\"github.com\/bytom\/testutil\"\n)\n\nfunc TestAPIHandler(t *testing.T) {\n\ta := &API{}\n\tresponse := &Response{}\n\n\t\/\/ init httptest server\n\ta.buildHandler()\n\tserver := httptest.NewServer(a.handler)\n\tdefer server.Close()\n\n\t\/\/ create accessTokens\n\ttestDB := dbm.NewDB(\"testdb\", \"leveldb\", \"temp\")\n\tdefer os.RemoveAll(\"temp\")\n\ta.accessTokens = accesstoken.NewStore(testDB)\n\n\tclient := &rpc.Client{\n\t\tBaseURL: server.URL,\n\t\tAccessToken: \"test-user:test-secret\",\n\t}\n\n\tcases := []struct {\n\t\tpath string\n\t\trequest interface{}\n\t\trespWant *Response\n\t}{\n\t\t{\n\t\t\tpath: \"\/create-key\",\n\t\t\trequest: struct {\n\t\t\t\tAlias string `json:\"alias\"`\n\t\t\t\tPassword string `json:\"password\"`\n\t\t\t}{Alias: \"alice\", Password: \"123456\"},\n\t\t\trespWant: &Response{\n\t\t\t\tStatus: \"fail\",\n\t\t\t\tMsg: \"wallet not found, please check that the wallet is open\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tpath: \"\/error\",\n\t\t\trequest: nil,\n\t\t\trespWant: &Response{\n\t\t\t\tStatus: \"fail\",\n\t\t\t\tMsg: \"wallet not found, please check that the wallet is open\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tpath: \"\/\",\n\t\t\trequest: nil,\n\t\t\trespWant: &Response{\n\t\t\t\tStatus: \"\",\n\t\t\t\tMsg: \"\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tpath: \"\/create-access-token\",\n\t\t\trequest: struct {\n\t\t\t\tID string `json:\"id\"`\n\t\t\t\tType string `json:\"type\"`\n\t\t\t}{ID: \"test-access-id\", Type: \"test-access-type\"},\n\t\t\trespWant: &Response{\n\t\t\t\tStatus: \"success\",\n\t\t\t\tMsg: \"\",\n\t\t\t\tData: map[string]interface{}{\"id\": \"test-access-id\", \"type\": \"test-access-type\", \"token\": \"test-access-id:440d87ae0d625a7fcf076275b18372e09a0899e37ec86398879388de90cb0c67\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tpath: \"\/gas-rate\",\n\t\t\trequest: nil,\n\t\t\trespWant: &Response{\n\t\t\t\tStatus: \"success\",\n\t\t\t\tMsg: \"\",\n\t\t\t\tData: map[string]interface{}{\"gasRate\": 1000},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\tresponse = &Response{}\n\t\tclient.Call(context.Background(), c.path, c.request, &response)\n\n\t\tif !testutil.DeepEqual(response.Status, c.respWant.Status) {\n\t\t\tt.Errorf(`got=%#v; want=%#v`, response.Status, c.respWant.Status)\n\t\t}\n\t}\n}\n\nfunc TestEstimateTxGas(t *testing.T) {\n\ttmplStr := `{\"allow_additional_actions\":false,\"raw_transaction\":\"070100010161015ffe8a1209937a6a8b22e8c01f056fd5f1730734ba8964d6b79de4a639032cecddffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8099c4d59901000116001485eb6eee8023332da85df60157dc9b16cc553fb2010002013dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80afa08b4f011600142b4fd033bc76b4ddf5cb00f625362c4bc7b10efa00013dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8090dfc04a011600146eea1ce6cfa5b718ae8094376be9bc1a87c9c82700\",\"signing_instructions\":[{\"position\":0,\"witness_components\":[{\"keys\":[{\"derivation_path\":[\"010100000000000000\",\"0100000000000000\"],\"xpub\":\"cb4e5932d808ee060df9552963d87f60edac42360b11d4ad89558ef2acea4d4aaf4818f2ebf5a599382b8dfce0a0c798c7e44ec2667b3a1d34c61ba57609de55\"}],\"quorum\":1,\"signatures\":null,\"type\":\"raw_tx_signature\"},{\"type\":\"data\",\"value\":\"1c9b5c1db7f4afe31fd1b7e0495a8bb042a271d8d7924d4fc1ff7cf1bff15813\"}]}]}`\n\ttemplate := txbuilder.Template{}\n\terr := json.Unmarshal([]byte(tmplStr), &template)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\testimateResult, err := EstimateTxGas(template)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttotalNeu := float64(estimateResult.StorageNeu+estimateResult.VMNeu) \/ float64(100000)\n\troundingNeu := math.Ceil(totalNeu)\n\testimateNeu := int64(roundingNeu) * int64(100000)\n\n\tif estimateResult.TotalNeu != estimateNeu {\n\t\tt.Errorf(`got=%#v; want=%#v`, estimateResult.TotalNeu, estimateNeu)\n\t}\n}\n<commit_msg>optmise test<commit_after>package api\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"math\"\n\t\"net\/http\/httptest\"\n\t\"os\"\n\t\"testing\"\n\n\tdbm \"github.com\/tendermint\/tmlibs\/db\"\n\n\t\"github.com\/bytom\/accesstoken\"\n\t\"github.com\/bytom\/blockchain\/rpc\"\n\t\"github.com\/bytom\/blockchain\/txbuilder\"\n\t\"github.com\/bytom\/testutil\"\n)\n\nfunc TestAPIHandler(t *testing.T) {\n\ta := &API{}\n\tresponse := &Response{}\n\n\t\/\/ init httptest server\n\ta.buildHandler()\n\tserver := httptest.NewServer(a.handler)\n\tdefer server.Close()\n\n\t\/\/ create accessTokens\n\ttestDB := dbm.NewDB(\"testdb\", \"leveldb\", \"temp\")\n\tdefer os.RemoveAll(\"temp\")\n\ta.accessTokens = accesstoken.NewStore(testDB)\n\n\tclient := &rpc.Client{\n\t\tBaseURL: server.URL,\n\t\tAccessToken: \"test-user:test-secret\",\n\t}\n\n\tcases := []struct {\n\t\tpath string\n\t\trequest interface{}\n\t\trespWant *Response\n\t}{\n\t\t{\n\t\t\tpath: \"\/create-key\",\n\t\t\trequest: struct {\n\t\t\t\tAlias string `json:\"alias\"`\n\t\t\t\tPassword string `json:\"password\"`\n\t\t\t}{Alias: \"alice\", Password: \"123456\"},\n\t\t\trespWant: &Response{\n\t\t\t\tStatus: \"fail\",\n\t\t\t\tMsg: \"wallet not found, please check that the wallet is open\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tpath: \"\/error\",\n\t\t\trequest: nil,\n\t\t\trespWant: &Response{\n\t\t\t\tStatus: \"fail\",\n\t\t\t\tMsg: \"wallet not found, please check that the wallet is open\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tpath: \"\/\",\n\t\t\trequest: nil,\n\t\t\trespWant: &Response{\n\t\t\t\tStatus: \"\",\n\t\t\t\tMsg: \"\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tpath: \"\/create-access-token\",\n\t\t\trequest: struct {\n\t\t\t\tID string `json:\"id\"`\n\t\t\t\tType string `json:\"type\"`\n\t\t\t}{ID: \"test-access-id\", Type: \"test-access-type\"},\n\t\t\trespWant: &Response{\n\t\t\t\tStatus: \"success\",\n\t\t\t\tMsg: \"\",\n\t\t\t\tData: map[string]interface{}{\"id\": \"test-access-id\", \"type\": \"test-access-type\", \"token\": \"test-access-id:440d87ae0d625a7fcf076275b18372e09a0899e37ec86398879388de90cb0c67\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tpath: \"\/gas-rate\",\n\t\t\trequest: nil,\n\t\t\trespWant: &Response{\n\t\t\t\tStatus: \"success\",\n\t\t\t\tMsg: \"\",\n\t\t\t\tData: map[string]interface{}{\"gasRate\": 1000},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\tresponse = &Response{}\n\t\tclient.Call(context.Background(), c.path, c.request, &response)\n\n\t\tif !testutil.DeepEqual(response.Status, c.respWant.Status) {\n\t\t\tt.Errorf(`got=%#v; want=%#v`, response.Status, c.respWant.Status)\n\t\t}\n\t}\n}\n\nfunc TestEstimateTxGas(t *testing.T) {\n\ttmplStr := `{\"allow_additional_actions\":false,\"raw_transaction\":\"070100010161015ffe8a1209937a6a8b22e8c01f056fd5f1730734ba8964d6b79de4a639032cecddffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8099c4d59901000116001485eb6eee8023332da85df60157dc9b16cc553fb2010002013dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80afa08b4f011600142b4fd033bc76b4ddf5cb00f625362c4bc7b10efa00013dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8090dfc04a011600146eea1ce6cfa5b718ae8094376be9bc1a87c9c82700\",\"signing_instructions\":[{\"position\":0,\"witness_components\":[{\"keys\":[{\"derivation_path\":[\"010100000000000000\",\"0100000000000000\"],\"xpub\":\"cb4e5932d808ee060df9552963d87f60edac42360b11d4ad89558ef2acea4d4aaf4818f2ebf5a599382b8dfce0a0c798c7e44ec2667b3a1d34c61ba57609de55\"}],\"quorum\":1,\"signatures\":null,\"type\":\"raw_tx_signature\"},{\"type\":\"data\",\"value\":\"1c9b5c1db7f4afe31fd1b7e0495a8bb042a271d8d7924d4fc1ff7cf1bff15813\"}]}]}`\n\ttemplate := txbuilder.Template{}\n\terr := json.Unmarshal([]byte(tmplStr), &template)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\testimateResult, err := EstimateTxGas(template)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tbaseRate := float64(100000)\n\ttotalNeu := float64(estimateResult.StorageNeu+estimateResult.VMNeu) \/ baseRate\n\troundingNeu := math.Ceil(totalNeu)\n\testimateNeu := int64(roundingNeu) * int64(baseRate)\n\n\tif estimateResult.TotalNeu != estimateNeu {\n\t\tt.Errorf(`got=%#v; want=%#v`, estimateResult.TotalNeu, estimateNeu)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package rocserv\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"gitlab.pri.ibanyu.com\/middleware\/seaweed\/xcontext\"\n\txprom \"gitlab.pri.ibanyu.com\/middleware\/seaweed\/xstat\/xmetric\/xprometheus\"\n\t\"gitlab.pri.ibanyu.com\/middleware\/seaweed\/xtrace\"\n\t\"gitlab.pri.ibanyu.com\/middleware\/util\/idl\/gen-go\/util\/thriftutil\"\n\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/gin-gonic\/gin\/binding\"\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"github.com\/uber\/jaeger-client-go\"\n)\n\nconst (\n\tHttpHeaderKeyTraceID = \"ipalfish-trace-id\"\n\n\tWildCharacter = \":\"\n\n\tRoutePath = \"req-simple-path\"\n\n\tHttpHeaderKeyGroup = \"ipalfish-group\"\n\tHttpHeaderKeyHead = \"ipalfish-head\"\n\n\tCookieNameGroup = \"ipalfish_group\"\n)\n\n\/\/ HttpServer is the http server, Create an instance of GinServer, by using NewGinServer()\ntype HttpServer struct {\n\t*gin.Engine\n}\n\n\/\/ Context warp gin Context\ntype Context struct {\n\t*gin.Context\n}\n\n\/\/ HandlerFunc ...\ntype HandlerFunc func(*Context)\n\n\/\/ NewHttpServer create http server with gin\nfunc NewHttpServer() *HttpServer {\n\t\/\/ 实例化gin Server\n\trouter := gin.New()\n\trouter.Use(Recovery(), InjectFromRequest(), Metric(), Trace())\n\n\treturn &HttpServer{router}\n}\n\n\/\/ Use attachs a global middleware to the router\nfunc (s *HttpServer) Use(middleware ...HandlerFunc) {\n\ts.Engine.Use(mutilWrap(middleware...)...)\n}\n\n\/\/ GET is a shortcut for router.Handle(\"GET\", path, handle).\nfunc (s *HttpServer) GET(relativePath string, handlers ...HandlerFunc) {\n\tws := append([]gin.HandlerFunc{pathHook(relativePath)}, mutilWrap(handlers...)...)\n\ts.Engine.GET(relativePath, ws...)\n}\n\n\/\/ WGET is wrap httprouter handle to GET\nfunc (s *HttpServer) WGET(relativePath string, handlers ...httprouter.Handle) {\n\ts.GET(relativePath, MutilWrapHttpRouter(handlers...)...)\n}\n\n\/\/ POST is a shortcut for router.Handle(\"POST\", path, handle).\nfunc (s *HttpServer) POST(relativePath string, handlers ...HandlerFunc) {\n\tws := append([]gin.HandlerFunc{pathHook(relativePath)}, mutilWrap(handlers...)...)\n\ts.Engine.POST(relativePath, ws...)\n}\n\n\/\/ WPOST is wrap httprouter handle to POST\nfunc (s *HttpServer) WPOST(relativePath string, handlers ...httprouter.Handle) {\n\ts.POST(relativePath, MutilWrapHttpRouter(handlers...)...)\n}\n\n\/\/ PUT is a shortcut for router.Handle(\"PUT\", path, handle).\nfunc (s *HttpServer) PUT(relativePath string, handlers ...HandlerFunc) {\n\tws := append([]gin.HandlerFunc{pathHook(relativePath)}, mutilWrap(handlers...)...)\n\ts.Engine.PUT(relativePath, ws...)\n}\n\n\/\/ WPUT is wrap httprouter handle to PUT\nfunc (s *HttpServer) WPUT(relativePath string, handlers ...httprouter.Handle) {\n\ts.PUT(relativePath, MutilWrapHttpRouter(handlers...)...)\n}\n\n\/\/ Any registers a route that matches all the HTTP methods.\nfunc (s *HttpServer) Any(relativePath string, handlers ...HandlerFunc) {\n\tws := append([]gin.HandlerFunc{pathHook(relativePath)}, mutilWrap(handlers...)...)\n\ts.Engine.Any(relativePath, ws...)\n}\n\n\/\/ WAny is wrap httprouter handle to ANY\nfunc (s *HttpServer) WAny(relativePath string, handlers ...httprouter.Handle) {\n\ts.Any(relativePath, MutilWrapHttpRouter(handlers...)...)\n}\n\n\/\/ DELETE is a shortcut for router.Handle(\"DELETE\", path, handle).\nfunc (s *HttpServer) DELETE(relativePath string, handlers ...HandlerFunc) {\n\tws := append([]gin.HandlerFunc{pathHook(relativePath)}, mutilWrap(handlers...)...)\n\ts.Engine.DELETE(relativePath, ws...)\n}\n\n\/\/ WDELETE is wrap httprouter handle to DELETE\nfunc (s *HttpServer) WDELETE(relativePath string, handlers ...httprouter.Handle) {\n\ts.DELETE(relativePath, MutilWrapHttpRouter(handlers...)...)\n}\n\n\/\/ PATCH is a shortcut for router.Handle(\"PATCH\", path, handle).\nfunc (s *HttpServer) PATCH(relativePath string, handlers ...HandlerFunc) {\n\tws := append([]gin.HandlerFunc{pathHook(relativePath)}, mutilWrap(handlers...)...)\n\ts.Engine.PATCH(relativePath, ws...)\n}\n\n\/\/ WPATCH is wrap httprouter handle to PATCH\nfunc (s *HttpServer) WPATCH(relativePath string, handlers ...httprouter.Handle) {\n\ts.PATCH(relativePath, MutilWrapHttpRouter(handlers...)...)\n}\n\n\/\/ OPTIONS is a shortcut for router.Handle(\"OPTIONS\", path, handle).\nfunc (s *HttpServer) OPTIONS(relativePath string, handlers ...HandlerFunc) {\n\tws := append([]gin.HandlerFunc{pathHook(relativePath)}, mutilWrap(handlers...)...)\n\ts.Engine.OPTIONS(relativePath, ws...)\n}\n\n\/\/ WOPTIONS is wrap httprouter handle to OPTIONS\nfunc (s *HttpServer) WOPTIONS(relativePath string, handlers ...httprouter.Handle) {\n\ts.OPTIONS(relativePath, MutilWrapHttpRouter(handlers...)...)\n}\n\n\/\/ HEAD is a shortcut for router.Handle(\"HEAD\", path, handle).\nfunc (s *HttpServer) HEAD(relativePath string, handlers ...HandlerFunc) {\n\tws := append([]gin.HandlerFunc{pathHook(relativePath)}, mutilWrap(handlers...)...)\n\ts.Engine.HEAD(relativePath, ws...)\n}\n\n\/\/ WHEAD is wrap httprouter handle to HEAD\nfunc (s *HttpServer) WHEAD(relativePath string, handlers ...httprouter.Handle) {\n\ts.HEAD(relativePath, MutilWrapHttpRouter(handlers...)...)\n}\n\n\/\/ Bind checks the Content-Type to select a binding engine automatically\nfunc (c *Context) Bind(obj interface{}) error {\n\tb := binding.Default(c.Request.Method, c.ContentType())\n\treturn c.MustBindWith(obj, b)\n}\n\nfunc InjectFromRequest() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tctx := c.Request.Context()\n\t\tctx = extractThriftUtilContextControlFromRequest(ctx, c.Request)\n\t\tctx = extractThriftUtilContextHeadFromRequest(ctx, c.Request)\n\t\tc.Request = c.Request.WithContext(ctx)\n\t}\n}\n\n\/\/ Metric returns a metric middleware\nfunc Metric() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tctx := c.Request.Context()\n\t\tctx = contextWithErrCode(ctx,1)\n\t\tc.Request = c.Request.WithContext(ctx)\n\n\t\tnow := time.Now()\n\t\tc.Next()\n\t\tdt := time.Since(now)\n\n\t\terrCode := getErrCodeFromContext(c.Request.Context())\n\t\tif path, exist := c.Get(RoutePath); exist {\n\t\t\tif fun, ok := path.(string); ok {\n\t\t\t\tgroup, serviceName := GetGroupAndService()\n\t\t\t\t_metricAPIRequestCount.With(xprom.LabelGroupName, group, xprom.LabelServiceName, serviceName, xprom.LabelAPI, fun, xprom.LabelErrCode, strconv.Itoa(errCode)).Inc()\n\t\t\t\t_metricAPIRequestTime.With(xprom.LabelGroupName, group, xprom.LabelServiceName, serviceName, xprom.LabelAPI, fun, xprom.LabelErrCode, strconv.Itoa(errCode)).Observe(float64(dt \/ time.Millisecond))\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Trace returns a trace middleware\nfunc Trace() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tspan := xtrace.SpanFromContext(c.Request.Context())\n\t\tif span == nil {\n\t\t\tnewSpan, ctx := xtrace.StartSpanFromContext(c.Request.Context(), c.Request.RequestURI)\n\t\t\tc.Request.WithContext(ctx)\n\t\t\tspan = newSpan\n\t\t}\n\t\tdefer span.Finish()\n\n\t\tif sc, ok := span.Context().(jaeger.SpanContext); ok {\n\t\t\tc.Writer.Header()[HttpHeaderKeyTraceID] = []string{fmt.Sprint(sc.TraceID())}\n\t\t}\n\n\t\tc.Next()\n\t}\n}\n\n\/\/ Recovery returns a middleware that recovers from any panics and writes a 500 if there was one.\nfunc Recovery() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tdefer func() {\n\t\t\tvar rawReq []byte\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tconst size = 64 << 10\n\t\t\t\tbuf := make([]byte, size)\n\t\t\t\tbuf = buf[:runtime.Stack(buf, false)]\n\t\t\t\tif c.Request != nil {\n\t\t\t\t\trawReq, _ = httputil.DumpRequest(c.Request, false)\n\t\t\t\t}\n\t\t\t\tpl := fmt.Sprintf(\"http call panic: %s\\n%v\\n%s\\n\", string(rawReq), err, buf)\n\t\t\t\tfmt.Println(pl)\n\t\t\t\tc.AbortWithStatus(500)\n\t\t\t}\n\t\t}()\n\t\tc.Next()\n\t}\n}\n\nfunc pathHook(relativePath string) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tvalues := strings.Split(relativePath, WildCharacter)\n\t\tc.Set(RoutePath, values[0])\n\t}\n}\n\nfunc mutilWrap(handlers ...HandlerFunc) []gin.HandlerFunc {\n\tvar h = make([]gin.HandlerFunc, len(handlers))\n\tfor k, v := range handlers {\n\t\th[k] = wrap(v)\n\t}\n\treturn h\n}\n\nfunc wrap(h HandlerFunc) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\th(&Context{c})\n\t}\n}\n\n\/\/ MutilWrapHttpRouter wrap many httprouter handle to roc httpserver handle\nfunc MutilWrapHttpRouter(handlers ...httprouter.Handle) []HandlerFunc {\n\tvar h = make([]HandlerFunc, len(handlers))\n\tfor k, v := range handlers {\n\t\th[k] = WrapHttpRouter(v)\n\t}\n\treturn h\n}\n\n\/\/ WrapHttpRouter wrap httprouter handle to roc httpserver handle\nfunc WrapHttpRouter(handle httprouter.Handle) HandlerFunc {\n\treturn func(c *Context) {\n\t\tparams := make([]httprouter.Param, 0)\n\t\tfor param := range c.Params {\n\t\t\tparams = append(params, httprouter.Param{\n\t\t\t\tKey: c.Params[param].Key,\n\t\t\t\tValue: c.Params[param].Value,\n\t\t\t})\n\t\t}\n\t\thandle(c.Writer, c.Request, params)\n\t}\n}\n\nfunc extractThriftUtilContextControlFromRequest(ctx context.Context, req *http.Request) context.Context {\n\tvar group string\n\tif group = extractRouteGroupFromHost(req); group != \"\" {\n\t\treturn injectRouteGroupToContext(ctx, group)\n\t}\n\n\tif group = extractRouteGroupFromHeader(req); group != \"\" {\n\t\treturn injectRouteGroupToContext(ctx, group)\n\t}\n\n\tif group = extractRouteGroupFromCookie(req); group != \"\" {\n\t\treturn injectRouteGroupToContext(ctx, group)\n\t}\n\n\treturn injectRouteGroupToContext(ctx, xcontext.DefaultGroup)\n}\n\nfunc extractThriftUtilContextHeadFromRequest(ctx context.Context, req *http.Request) context.Context {\n\t\/\/ NOTE: 如果已经有了就先不覆盖\n\tval := ctx.Value(xcontext.ContextKeyHead)\n\tif val != nil {\n\t\treturn ctx\n\t}\n\n\theadJsonString := req.Header.Get(HttpHeaderKeyHead)\n\tvar head thriftutil.Head\n\t_ = json.Unmarshal([]byte(headJsonString), &head)\n\tctx = context.WithValue(ctx, xcontext.ContextKeyHead, &head)\n\treturn ctx\n}\n\nvar domainRouteRegexp = regexp.MustCompile(`(?P<group>.+)\\.group\\..+`)\n\nfunc extractRouteGroupFromHost(r *http.Request) (group string) {\n\tmatches := domainRouteRegexp.FindStringSubmatch(r.Host)\n\tnames := domainRouteRegexp.SubexpNames()\n\tfor i, _ := range matches {\n\t\tif names[i] == \"group\" {\n\t\t\tgroup = matches[i]\n\t\t}\n\t}\n\treturn\n}\n\nfunc injectRouteGroupToContext(ctx context.Context, group string) context.Context {\n\tcontrol := thriftutil.NewDefaultControl()\n\tcontrol.Route.Group = group\n\n\treturn context.WithValue(ctx, xcontext.ContextKeyControl, control)\n}\n\nfunc extractRouteGroupFromHeader(r *http.Request) (group string) {\n\treturn r.Header.Get(HttpHeaderKeyGroup)\n}\n\nfunc extractRouteGroupFromCookie(r *http.Request) (group string) {\n\tck, err := r.Cookie(CookieNameGroup)\n\tif err == nil {\n\t\tgroup = ck.Value\n\t}\n\treturn\n}\n\ntype ErrCode struct {\n\tint\n}\n\nconst ErrCodeKey = \"ErrCode\"\n\nfunc ContextSetErrCode(ctx context.Context, errCode int) {\n\terrCodeContext, ok := ctx.Value(ErrCodeKey).(*ErrCode)\n\tif !ok {\n\t\treturn\n\t}\n\terrCodeContext.int = errCode\n\tif span := xtrace.SpanFromContext(ctx); span != nil {\n\t\tspan.SetTag(\"errcode\", errCode)\n\t}\n}\n\nfunc contextWithErrCode(ctx context.Context, errCode int) context.Context {\n\tif span := xtrace.SpanFromContext(ctx); span != nil {\n\t\tspan.SetTag(\"errcode\", errCode)\n\t}\n\treturn context.WithValue(ctx, ErrCodeKey, &ErrCode{errCode})\n}\n\nfunc getErrCodeFromContext(ctx context.Context) int {\n\terrCode, ok := ctx.Value(ErrCodeKey).(*ErrCode)\n\tif !ok {\n\t\treturn 1\n\t}\n\treturn errCode.int\n}<commit_msg>add Blank line<commit_after>package rocserv\n\nimport (\n\t\"context\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"net\/http\/httputil\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"gitlab.pri.ibanyu.com\/middleware\/seaweed\/xcontext\"\n\txprom \"gitlab.pri.ibanyu.com\/middleware\/seaweed\/xstat\/xmetric\/xprometheus\"\n\t\"gitlab.pri.ibanyu.com\/middleware\/seaweed\/xtrace\"\n\t\"gitlab.pri.ibanyu.com\/middleware\/util\/idl\/gen-go\/util\/thriftutil\"\n\n\t\"github.com\/gin-gonic\/gin\"\n\t\"github.com\/gin-gonic\/gin\/binding\"\n\t\"github.com\/julienschmidt\/httprouter\"\n\t\"github.com\/uber\/jaeger-client-go\"\n)\n\nconst (\n\tHttpHeaderKeyTraceID = \"ipalfish-trace-id\"\n\n\tWildCharacter = \":\"\n\n\tRoutePath = \"req-simple-path\"\n\n\tHttpHeaderKeyGroup = \"ipalfish-group\"\n\tHttpHeaderKeyHead = \"ipalfish-head\"\n\n\tCookieNameGroup = \"ipalfish_group\"\n)\n\n\/\/ HttpServer is the http server, Create an instance of GinServer, by using NewGinServer()\ntype HttpServer struct {\n\t*gin.Engine\n}\n\n\/\/ Context warp gin Context\ntype Context struct {\n\t*gin.Context\n}\n\n\/\/ HandlerFunc ...\ntype HandlerFunc func(*Context)\n\n\/\/ NewHttpServer create http server with gin\nfunc NewHttpServer() *HttpServer {\n\t\/\/ 实例化gin Server\n\trouter := gin.New()\n\trouter.Use(Recovery(), InjectFromRequest(), Metric(), Trace())\n\n\treturn &HttpServer{router}\n}\n\n\/\/ Use attachs a global middleware to the router\nfunc (s *HttpServer) Use(middleware ...HandlerFunc) {\n\ts.Engine.Use(mutilWrap(middleware...)...)\n}\n\n\/\/ GET is a shortcut for router.Handle(\"GET\", path, handle).\nfunc (s *HttpServer) GET(relativePath string, handlers ...HandlerFunc) {\n\tws := append([]gin.HandlerFunc{pathHook(relativePath)}, mutilWrap(handlers...)...)\n\ts.Engine.GET(relativePath, ws...)\n}\n\n\/\/ WGET is wrap httprouter handle to GET\nfunc (s *HttpServer) WGET(relativePath string, handlers ...httprouter.Handle) {\n\ts.GET(relativePath, MutilWrapHttpRouter(handlers...)...)\n}\n\n\/\/ POST is a shortcut for router.Handle(\"POST\", path, handle).\nfunc (s *HttpServer) POST(relativePath string, handlers ...HandlerFunc) {\n\tws := append([]gin.HandlerFunc{pathHook(relativePath)}, mutilWrap(handlers...)...)\n\ts.Engine.POST(relativePath, ws...)\n}\n\n\/\/ WPOST is wrap httprouter handle to POST\nfunc (s *HttpServer) WPOST(relativePath string, handlers ...httprouter.Handle) {\n\ts.POST(relativePath, MutilWrapHttpRouter(handlers...)...)\n}\n\n\/\/ PUT is a shortcut for router.Handle(\"PUT\", path, handle).\nfunc (s *HttpServer) PUT(relativePath string, handlers ...HandlerFunc) {\n\tws := append([]gin.HandlerFunc{pathHook(relativePath)}, mutilWrap(handlers...)...)\n\ts.Engine.PUT(relativePath, ws...)\n}\n\n\/\/ WPUT is wrap httprouter handle to PUT\nfunc (s *HttpServer) WPUT(relativePath string, handlers ...httprouter.Handle) {\n\ts.PUT(relativePath, MutilWrapHttpRouter(handlers...)...)\n}\n\n\/\/ Any registers a route that matches all the HTTP methods.\nfunc (s *HttpServer) Any(relativePath string, handlers ...HandlerFunc) {\n\tws := append([]gin.HandlerFunc{pathHook(relativePath)}, mutilWrap(handlers...)...)\n\ts.Engine.Any(relativePath, ws...)\n}\n\n\/\/ WAny is wrap httprouter handle to ANY\nfunc (s *HttpServer) WAny(relativePath string, handlers ...httprouter.Handle) {\n\ts.Any(relativePath, MutilWrapHttpRouter(handlers...)...)\n}\n\n\/\/ DELETE is a shortcut for router.Handle(\"DELETE\", path, handle).\nfunc (s *HttpServer) DELETE(relativePath string, handlers ...HandlerFunc) {\n\tws := append([]gin.HandlerFunc{pathHook(relativePath)}, mutilWrap(handlers...)...)\n\ts.Engine.DELETE(relativePath, ws...)\n}\n\n\/\/ WDELETE is wrap httprouter handle to DELETE\nfunc (s *HttpServer) WDELETE(relativePath string, handlers ...httprouter.Handle) {\n\ts.DELETE(relativePath, MutilWrapHttpRouter(handlers...)...)\n}\n\n\/\/ PATCH is a shortcut for router.Handle(\"PATCH\", path, handle).\nfunc (s *HttpServer) PATCH(relativePath string, handlers ...HandlerFunc) {\n\tws := append([]gin.HandlerFunc{pathHook(relativePath)}, mutilWrap(handlers...)...)\n\ts.Engine.PATCH(relativePath, ws...)\n}\n\n\/\/ WPATCH is wrap httprouter handle to PATCH\nfunc (s *HttpServer) WPATCH(relativePath string, handlers ...httprouter.Handle) {\n\ts.PATCH(relativePath, MutilWrapHttpRouter(handlers...)...)\n}\n\n\/\/ OPTIONS is a shortcut for router.Handle(\"OPTIONS\", path, handle).\nfunc (s *HttpServer) OPTIONS(relativePath string, handlers ...HandlerFunc) {\n\tws := append([]gin.HandlerFunc{pathHook(relativePath)}, mutilWrap(handlers...)...)\n\ts.Engine.OPTIONS(relativePath, ws...)\n}\n\n\/\/ WOPTIONS is wrap httprouter handle to OPTIONS\nfunc (s *HttpServer) WOPTIONS(relativePath string, handlers ...httprouter.Handle) {\n\ts.OPTIONS(relativePath, MutilWrapHttpRouter(handlers...)...)\n}\n\n\/\/ HEAD is a shortcut for router.Handle(\"HEAD\", path, handle).\nfunc (s *HttpServer) HEAD(relativePath string, handlers ...HandlerFunc) {\n\tws := append([]gin.HandlerFunc{pathHook(relativePath)}, mutilWrap(handlers...)...)\n\ts.Engine.HEAD(relativePath, ws...)\n}\n\n\/\/ WHEAD is wrap httprouter handle to HEAD\nfunc (s *HttpServer) WHEAD(relativePath string, handlers ...httprouter.Handle) {\n\ts.HEAD(relativePath, MutilWrapHttpRouter(handlers...)...)\n}\n\n\/\/ Bind checks the Content-Type to select a binding engine automatically\nfunc (c *Context) Bind(obj interface{}) error {\n\tb := binding.Default(c.Request.Method, c.ContentType())\n\treturn c.MustBindWith(obj, b)\n}\n\nfunc InjectFromRequest() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tctx := c.Request.Context()\n\t\tctx = extractThriftUtilContextControlFromRequest(ctx, c.Request)\n\t\tctx = extractThriftUtilContextHeadFromRequest(ctx, c.Request)\n\t\tc.Request = c.Request.WithContext(ctx)\n\t}\n}\n\n\/\/ Metric returns a metric middleware\nfunc Metric() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tctx := c.Request.Context()\n\t\tctx = contextWithErrCode(ctx,1)\n\t\tc.Request = c.Request.WithContext(ctx)\n\n\t\tnow := time.Now()\n\t\tc.Next()\n\t\tdt := time.Since(now)\n\n\t\terrCode := getErrCodeFromContext(c.Request.Context())\n\t\tif path, exist := c.Get(RoutePath); exist {\n\t\t\tif fun, ok := path.(string); ok {\n\t\t\t\tgroup, serviceName := GetGroupAndService()\n\t\t\t\t_metricAPIRequestCount.With(xprom.LabelGroupName, group, xprom.LabelServiceName, serviceName, xprom.LabelAPI, fun, xprom.LabelErrCode, strconv.Itoa(errCode)).Inc()\n\t\t\t\t_metricAPIRequestTime.With(xprom.LabelGroupName, group, xprom.LabelServiceName, serviceName, xprom.LabelAPI, fun, xprom.LabelErrCode, strconv.Itoa(errCode)).Observe(float64(dt \/ time.Millisecond))\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ Trace returns a trace middleware\nfunc Trace() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tspan := xtrace.SpanFromContext(c.Request.Context())\n\t\tif span == nil {\n\t\t\tnewSpan, ctx := xtrace.StartSpanFromContext(c.Request.Context(), c.Request.RequestURI)\n\t\t\tc.Request.WithContext(ctx)\n\t\t\tspan = newSpan\n\t\t}\n\t\tdefer span.Finish()\n\n\t\tif sc, ok := span.Context().(jaeger.SpanContext); ok {\n\t\t\tc.Writer.Header()[HttpHeaderKeyTraceID] = []string{fmt.Sprint(sc.TraceID())}\n\t\t}\n\n\t\tc.Next()\n\t}\n}\n\n\/\/ Recovery returns a middleware that recovers from any panics and writes a 500 if there was one.\nfunc Recovery() gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tdefer func() {\n\t\t\tvar rawReq []byte\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tconst size = 64 << 10\n\t\t\t\tbuf := make([]byte, size)\n\t\t\t\tbuf = buf[:runtime.Stack(buf, false)]\n\t\t\t\tif c.Request != nil {\n\t\t\t\t\trawReq, _ = httputil.DumpRequest(c.Request, false)\n\t\t\t\t}\n\t\t\t\tpl := fmt.Sprintf(\"http call panic: %s\\n%v\\n%s\\n\", string(rawReq), err, buf)\n\t\t\t\tfmt.Println(pl)\n\t\t\t\tc.AbortWithStatus(500)\n\t\t\t}\n\t\t}()\n\t\tc.Next()\n\t}\n}\n\nfunc pathHook(relativePath string) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\tvalues := strings.Split(relativePath, WildCharacter)\n\t\tc.Set(RoutePath, values[0])\n\t}\n}\n\nfunc mutilWrap(handlers ...HandlerFunc) []gin.HandlerFunc {\n\tvar h = make([]gin.HandlerFunc, len(handlers))\n\tfor k, v := range handlers {\n\t\th[k] = wrap(v)\n\t}\n\treturn h\n}\n\nfunc wrap(h HandlerFunc) gin.HandlerFunc {\n\treturn func(c *gin.Context) {\n\t\th(&Context{c})\n\t}\n}\n\n\/\/ MutilWrapHttpRouter wrap many httprouter handle to roc httpserver handle\nfunc MutilWrapHttpRouter(handlers ...httprouter.Handle) []HandlerFunc {\n\tvar h = make([]HandlerFunc, len(handlers))\n\tfor k, v := range handlers {\n\t\th[k] = WrapHttpRouter(v)\n\t}\n\treturn h\n}\n\n\/\/ WrapHttpRouter wrap httprouter handle to roc httpserver handle\nfunc WrapHttpRouter(handle httprouter.Handle) HandlerFunc {\n\treturn func(c *Context) {\n\t\tparams := make([]httprouter.Param, 0)\n\t\tfor param := range c.Params {\n\t\t\tparams = append(params, httprouter.Param{\n\t\t\t\tKey: c.Params[param].Key,\n\t\t\t\tValue: c.Params[param].Value,\n\t\t\t})\n\t\t}\n\t\thandle(c.Writer, c.Request, params)\n\t}\n}\n\nfunc extractThriftUtilContextControlFromRequest(ctx context.Context, req *http.Request) context.Context {\n\tvar group string\n\tif group = extractRouteGroupFromHost(req); group != \"\" {\n\t\treturn injectRouteGroupToContext(ctx, group)\n\t}\n\n\tif group = extractRouteGroupFromHeader(req); group != \"\" {\n\t\treturn injectRouteGroupToContext(ctx, group)\n\t}\n\n\tif group = extractRouteGroupFromCookie(req); group != \"\" {\n\t\treturn injectRouteGroupToContext(ctx, group)\n\t}\n\n\treturn injectRouteGroupToContext(ctx, xcontext.DefaultGroup)\n}\n\nfunc extractThriftUtilContextHeadFromRequest(ctx context.Context, req *http.Request) context.Context {\n\t\/\/ NOTE: 如果已经有了就先不覆盖\n\tval := ctx.Value(xcontext.ContextKeyHead)\n\tif val != nil {\n\t\treturn ctx\n\t}\n\n\theadJsonString := req.Header.Get(HttpHeaderKeyHead)\n\tvar head thriftutil.Head\n\t_ = json.Unmarshal([]byte(headJsonString), &head)\n\tctx = context.WithValue(ctx, xcontext.ContextKeyHead, &head)\n\treturn ctx\n}\n\nvar domainRouteRegexp = regexp.MustCompile(`(?P<group>.+)\\.group\\..+`)\n\nfunc extractRouteGroupFromHost(r *http.Request) (group string) {\n\tmatches := domainRouteRegexp.FindStringSubmatch(r.Host)\n\tnames := domainRouteRegexp.SubexpNames()\n\tfor i, _ := range matches {\n\t\tif names[i] == \"group\" {\n\t\t\tgroup = matches[i]\n\t\t}\n\t}\n\treturn\n}\n\nfunc injectRouteGroupToContext(ctx context.Context, group string) context.Context {\n\tcontrol := thriftutil.NewDefaultControl()\n\tcontrol.Route.Group = group\n\n\treturn context.WithValue(ctx, xcontext.ContextKeyControl, control)\n}\n\nfunc extractRouteGroupFromHeader(r *http.Request) (group string) {\n\treturn r.Header.Get(HttpHeaderKeyGroup)\n}\n\nfunc extractRouteGroupFromCookie(r *http.Request) (group string) {\n\tck, err := r.Cookie(CookieNameGroup)\n\tif err == nil {\n\t\tgroup = ck.Value\n\t}\n\treturn\n}\n\ntype ErrCode struct {\n\tint\n}\n\nconst ErrCodeKey = \"ErrCode\"\n\nfunc ContextSetErrCode(ctx context.Context, errCode int) {\n\terrCodeContext, ok := ctx.Value(ErrCodeKey).(*ErrCode)\n\tif !ok {\n\t\treturn\n\t}\n\terrCodeContext.int = errCode\n\tif span := xtrace.SpanFromContext(ctx); span != nil {\n\t\tspan.SetTag(\"errcode\", errCode)\n\t}\n}\n\nfunc contextWithErrCode(ctx context.Context, errCode int) context.Context {\n\tif span := xtrace.SpanFromContext(ctx); span != nil {\n\t\tspan.SetTag(\"errcode\", errCode)\n\t}\n\treturn context.WithValue(ctx, ErrCodeKey, &ErrCode{errCode})\n}\n\nfunc getErrCodeFromContext(ctx context.Context) int {\n\terrCode, ok := ctx.Value(ErrCodeKey).(*ErrCode)\n\tif !ok {\n\t\treturn 1\n\t}\n\treturn errCode.int\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/the42\/ogdat\/ckan\"\n\t\"github.com\/the42\/ogdat\/ogdatv21\"\n\t\"github.com\/the42\/ogdat\/schedule\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst AppID = \"a6545f8f-e0c9-4917-83c7-3e47bd1e0247\"\n\nvar logger *log.Logger\nvar db *DBConn\nvar portal *ckan.Portal\n\nvar resettdb = flag.Bool(\"resetdb\", false, \"Delete the tracking database. You will be prompted before actual deletion. Process will terminate afterwards.\")\nvar servetdb = flag.Bool(\"serve\", false, \"Start in watchdog mode. Process will continue to run until it receives a (clean shutdown) or gets killed\")\n\nfunc gotyesonprompt() bool {\n\tvar prompt string\n\tfmt.Scanf(\"%s\", &prompt)\n\tprompt = strings.ToLower(strings.TrimSpace(prompt))\n\tif len(prompt) > 0 {\n\t\treturn prompt[0] == 'y'\n\t}\n\treturn false\n}\n\nfunc getheartbeatinterval() int {\n\n\tif i, err := strconv.Atoi(os.Getenv(\"HEARTBEAT_INTERVAL\")); err == nil {\n\t\treturn i\n\t}\n\treturn 10 \/\/ Minutes\n}\n\nfunc getnumworkers() int {\n\tif i, err := strconv.Atoi(os.Getenv(\"PARALLEL_FETCHNO\")); err == nil {\n\t\treturn i\n\t}\n\treturn 4 \/\/ process four IDs in parallel\n}\n\nfunc getckanurl() (url string) {\n\n\tconst CKAN_URL = \"http:\/\/www.data.gv.at\/katalog\/api\/\"\n\n\turl = os.Getenv(\"CKAN_URL\")\n\tif url == \"\" {\n\t\turl = CKAN_URL\n\t}\n\treturn\n}\n\nfunc resetdb() {\n\tlogger.Println(\"Warning: Requesting database reset\")\n\tfmt.Print(\"\\n\\nALL RECORDED DATA IN DATABASE WILL BE DELETED.\\nDO YOU REALLY WANT TO PROCEED? [N,y]\\n\")\n\tif !gotyesonprompt() {\n\t\tfmt.Print(\"\\nABORTING\\n\\n\")\n\t\tlogger.Println(\"Info: Database reset canceled\")\n\t} else {\n\t\tif err := db.ResetDatabase(); err != nil {\n\t\t\ts := fmt.Sprintf(\"Database reset failed: %s\", err)\n\t\t\tfmt.Println(s)\n\t\t\tlogger.Panic(s)\n\t\t}\n\t}\n}\n\nfunc stringslicetoiface(ss []string) []interface{} {\n\tslice := make([]interface{}, len(ss))\n\tfor i, v := range ss {\n\t\tslice[i] = v\n\t}\n\treturn slice\n}\n\nfunc ifaceslicetostring(ifs []interface{}) []string {\n\tslice := make([]string, len(ifs))\n\tfor i, v := range ifs {\n\t\ts, ok := v.(string)\n\t\tif !ok {\n\t\t\tpanic(\"Interface value not of string type\")\n\t\t}\n\t\tslice[i] = s\n\t}\n\treturn slice\n}\n\nfunc processmetadataids(conn *DBConn, processids []string) error {\n\n\tnums := len(processids)\n\tfor idx, id := range processids {\n\n\t\tlogger.Printf(\"%4d \/ %4d : processing %v\\n\", idx+1, nums, id)\n\n\t\tmdjson, err := portal.GetDatasetStreamforID(id, true)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Cannot fetch JSON for ID %v: %s\", id, err)\n\t\t}\n\n\t\tmd, err := ogdatv21.MetadatafromJSONStream(mdjson)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Cannot access metadata for ID %v: %s\", id, err)\n\t\t}\n\n\t\tdbdatasetid, isnew, err := conn.InsertOrUpdateMetadataInfo(id, md)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"InsertOrUpdateMetadataInfo: database error at id %v: %s\", id, err)\n\t\t}\n\n\t\tmessages, err := md.Check(true)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Metadata check error for id %v: %s\", id, err)\n\t\t}\n\n\t\tif err = conn.ProtocollCheck(dbdatasetid, isnew, messages); err != nil {\n\t\t\treturn fmt.Errorf(\"ProtocollCheck: database error at id %v: %s\", id, err)\n\t\t}\n\t}\n\tlogger.Printf(\"Worker finished processing %d entries\", nums)\n\treturn nil\n}\n\nfunc heartbeat(interval int) {\n\tfor {\n\t\tdbconn := GetDatabaseConnection()\n\t\tdb := &DBConn{dbconn, AppID}\n\t\tif err := db.HeartBeat(); err != nil {\n\t\t\tlogger.Panicln(err)\n\t\t}\n\t\tdbconn.Close()\n\t\tlogger.Println(\"Watchdog alive\")\n\t\ttime.Sleep(time.Duration(interval) * time.Minute)\n\t}\n}\n\nfunc mymain() int {\n\n\tif flag.NFlag() == 0 {\n\t\tfmt.Println(\"No command line flags given. Usage:\")\n\t\tflag.PrintDefaults()\n\t\tlogger.Panicln(\"Fatal: No command line flags given\")\n\t}\n\n\tlockfile := NewLockfile(lockfilename)\n\tdefer lockfile.Delete()\n\tlockfile.WriteInfo()\n\n\t\/\/ When the process gets killed, try to delete the lock file\n\tinterrupt := make(chan os.Signal)\n\tsignal.Notify(interrupt, os.Interrupt)\n\tgo func() {\n\t\t<-interrupt\n\t\tlogger.Println(\"Terminate requested\")\n\t\tlockfile.Delete()\n\t\tos.Exit(10)\n\t}()\n\n\tdbconnection := GetDatabaseConnection()\n\tdb = &DBConn{dbconnection, AppID}\n\tdefer dbconnection.Close()\n\n\tif *resettdb {\n\t\tresetdb()\n\t\tlogger.Println(\"Info: Earyl exit due to maintainance switches\")\n\t\treturn 2\n\t}\n\n\tif *servetdb {\n\n\t\tportal = ckan.NewDataPortalAPIEndpoint(getckanurl(), \"2\/\")\n\t\theartbeatinterval := getheartbeatinterval()\n\t\tnumworkers := getnumworkers()\n\n\t\tlogger.Printf(\"Doing %d jobs in parallel\\n\", numworkers)\n\t\tgo heartbeat(heartbeatinterval)\n\n\t\tfor {\n\t\t\thit, err := db.GetLastHit()\n\t\t\tif err != nil {\n\t\t\t\ts := fmt.Sprintf(\"Cannot read last DBHit: %s\", err)\n\t\t\t\tfmt.Println(s)\n\t\t\t\tlogger.Panic(s)\n\t\t\t}\n\n\t\t\tvar processids []string\n\t\t\tif hit == nil {\n\t\t\t\tlogger.Println(\"No checkpoint in database found, getting all datasets\")\n\t\t\t\tprocessids, err = portal.GetAllMetaDataIDs()\n\t\t\t} else {\n\t\t\t\tlogger.Printf(\"Getting changed datasets since %s\\n\", hit)\n\t\t\t\tprocessids, err = portal.GetChangedPackageIDsSince(*hit, numworkers)\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tlogger.Panic(err)\n\n\t\t\t}\n\n\t\t\tif anzids := len(processids); anzids > 0 {\n\n\t\t\t\ttx, err := dbconnection.Begin()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.Panicln(\"Cannot create database transaction\")\n\t\t\t\t}\n\t\t\t\tscheduler := schedule.New(numworkers)\n\t\t\t\tconn := &DBConn{DBer: tx, appid: AppID}\n\t\t\t\tf := func(slice []interface{}) error {\n\t\t\t\t\tif err := processmetadataids(conn, ifaceslicetostring(slice)); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tdb.LogMessage(fmt.Sprintf(\"%d Medadaten werden verarbeitet\", anzids), StateOk, true)\n\t\t\t\tworkchannel := scheduler.Schedule(f, stringslicetoiface(processids))\n\t\t\t\tselect {\n\t\t\t\tcase workreply := <-workchannel:\n\t\t\t\t\tif err := workreply.Err; err != nil {\n\t\t\t\t\t\tlogger.Panicln(\"Scheduler didn't return success:\", err)\n\t\t\t\t\t} else if workreply.Code == schedule.StateFinish {\n\t\t\t\t\t\ttx.Commit()\n\t\t\t\t\t\tdb.LogMessage(\"Idle\", StateOk, true)\n\t\t\t\t\t\tlogger.Printf(\"Finished processing %d datasets\\n\", anzids)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\t\/\/ When there was nothing to do, wait for heartbeatinterval time\n\t\t\t\tlogger.Printf(\"Nothing to do, sleeping for %d minutes\\n\", heartbeatinterval)\n\t\t\t\ttime.Sleep(time.Duration(heartbeatinterval) * time.Minute)\n\t\t\t}\n\t\t}\n\t}\n\treturn 0\n}\n\nfunc main() {\n\tos.Exit(mymain())\n}\n\nfunc init() {\n\tlogger = log.New(os.Stderr, filepath.Base(os.Args[0])+\": \", log.LstdFlags)\n\tflag.Parse()\n}\n<commit_msg>log to self: what to do about links<commit_after>package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/the42\/ogdat\/ckan\"\n\t\"github.com\/the42\/ogdat\/ogdatv21\"\n\t\"github.com\/the42\/ogdat\/schedule\"\n\t\"log\"\n\t\"os\"\n\t\"os\/signal\"\n\t\"path\/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst AppID = \"a6545f8f-e0c9-4917-83c7-3e47bd1e0247\"\n\nvar logger *log.Logger\nvar db *DBConn\nvar portal *ckan.Portal\n\nvar resettdb = flag.Bool(\"resetdb\", false, \"Delete the tracking database. You will be prompted before actual deletion. Process will terminate afterwards.\")\nvar servetdb = flag.Bool(\"serve\", false, \"Start in watchdog mode. Process will continue to run until it receives a (clean shutdown) or gets killed\")\n\nfunc gotyesonprompt() bool {\n\tvar prompt string\n\tfmt.Scanf(\"%s\", &prompt)\n\tprompt = strings.ToLower(strings.TrimSpace(prompt))\n\tif len(prompt) > 0 {\n\t\treturn prompt[0] == 'y'\n\t}\n\treturn false\n}\n\nfunc getheartbeatinterval() int {\n\n\tif i, err := strconv.Atoi(os.Getenv(\"HEARTBEAT_INTERVAL\")); err == nil {\n\t\treturn i\n\t}\n\treturn 60 \/\/ Minutes\n}\n\nfunc getnumworkers() int {\n\tif i, err := strconv.Atoi(os.Getenv(\"PARALLEL_FETCHNO\")); err == nil {\n\t\treturn i\n\t}\n\treturn 4 \/\/ process four IDs in parallel\n}\n\nfunc getckanurl() (url string) {\n\n\tconst CKAN_URL = \"http:\/\/www.data.gv.at\/katalog\/api\/\"\n\n\turl = os.Getenv(\"CKAN_URL\")\n\tif url == \"\" {\n\t\turl = CKAN_URL\n\t}\n\treturn\n}\n\nfunc resetdb() {\n\tlogger.Println(\"Warning: Requesting database reset\")\n\tfmt.Print(\"\\n\\nALL RECORDED DATA IN DATABASE WILL BE DELETED.\\nDO YOU REALLY WANT TO PROCEED? [N,y]\\n\")\n\tif !gotyesonprompt() {\n\t\tfmt.Print(\"\\nABORTING\\n\\n\")\n\t\tlogger.Println(\"Info: Database reset canceled\")\n\t} else {\n\t\tif err := db.ResetDatabase(); err != nil {\n\t\t\ts := fmt.Sprintf(\"Database reset failed: %s\", err)\n\t\t\tfmt.Println(s)\n\t\t\tlogger.Panic(s)\n\t\t}\n\t}\n}\n\nfunc stringslicetoiface(ss []string) []interface{} {\n\tslice := make([]interface{}, len(ss))\n\tfor i, v := range ss {\n\t\tslice[i] = v\n\t}\n\treturn slice\n}\n\nfunc ifaceslicetostring(ifs []interface{}) []string {\n\tslice := make([]string, len(ifs))\n\tfor i, v := range ifs {\n\t\ts, ok := v.(string)\n\t\tif !ok {\n\t\t\tpanic(\"Interface value not of string type\")\n\t\t}\n\t\tslice[i] = s\n\t}\n\treturn slice\n}\n\nfunc processmetadataids(conn *DBConn, processids []string) error {\n\n\tnums := len(processids)\n\tfor idx, id := range processids {\n\n\t\tlogger.Printf(\"%4d \/ %4d : processing %v\\n\", idx+1, nums, id)\n\n\t\tmdjson, err := portal.GetDatasetStreamforID(id, true)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Cannot fetch JSON for ID %v: %s\", id, err)\n\t\t}\n\n\t\tmd, err := ogdatv21.MetadatafromJSONStream(mdjson)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Cannot access metadata for ID %v: %s\", id, err)\n\t\t}\n\n\t\tdbdatasetid, isnew, err := conn.InsertOrUpdateMetadataInfo(id, md)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"InsertOrUpdateMetadataInfo: database error at id %v: %s\", id, err)\n\t\t}\n\n\t\tmessages, err := md.Check(true)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Metadata check error for id %v: %s\", id, err)\n\t\t}\n\n\t\tif err = conn.ProtocollCheck(dbdatasetid, isnew, messages); err != nil {\n\t\t\treturn fmt.Errorf(\"ProtocollCheck: database error at id %v: %s\", id, err)\n\t\t}\n\t}\n\tlogger.Printf(\"Worker finished processing %d entries\", nums)\n\treturn nil\n}\n\nfunc heartbeat(interval int) {\n\tfor {\n\t\tdbconn := GetDatabaseConnection()\n\t\tdb := &DBConn{dbconn, AppID}\n\t\tif err := db.HeartBeat(); err != nil {\n\t\t\tlogger.Panicln(err)\n\t\t}\n\t\tdbconn.Close()\n\t\tlogger.Println(\"Watchdog alive\")\n\t\ttime.Sleep(time.Duration(interval) * time.Minute)\n\t}\n}\n\nfunc mymain() int {\n\n\tif flag.NFlag() == 0 {\n\t\tfmt.Println(\"No command line flags given. Usage:\")\n\t\tflag.PrintDefaults()\n\t\tlogger.Panicln(\"Fatal: No command line flags given\")\n\t}\n\n\tlockfile := NewLockfile(lockfilename)\n\tdefer lockfile.Delete()\n\tlockfile.WriteInfo()\n\n\t\/\/ When the process gets killed, try to delete the lock file\n\tinterrupt := make(chan os.Signal)\n\tsignal.Notify(interrupt, os.Interrupt)\n\tgo func() {\n\t\t<-interrupt\n\t\tlogger.Println(\"Terminate requested\")\n\t\tlockfile.Delete()\n\t\tos.Exit(10)\n\t}()\n\n\tdbconnection := GetDatabaseConnection()\n\tdb = &DBConn{dbconnection, AppID}\n\tdefer dbconnection.Close()\n\n\tif *resettdb {\n\t\tresetdb()\n\t\tlogger.Println(\"Info: Earyl exit due to maintainance switches\")\n\t\treturn 2\n\t}\n\n\tif *servetdb {\n\n\t\tportal = ckan.NewDataPortalAPIEndpoint(getckanurl(), \"2\/\")\n\t\theartbeatinterval := getheartbeatinterval()\n\t\tnumworkers := getnumworkers()\n\n\t\tlogger.Printf(\"Doing %d jobs in parallel\\n\", numworkers)\n\t\tgo heartbeat(heartbeatinterval)\n\t\t\n\t\turlcheckpointchan := time.Tick(1 * time.Hour * 24)\n\n\t\tfor {\n\t\t\thit, err := db.GetLastHit()\n\t\t\tif err != nil {\n\t\t\t\ts := fmt.Sprintf(\"Cannot read last DBHit: %s\", err)\n\t\t\t\tfmt.Println(s)\n\t\t\t\tlogger.Panic(s)\n\t\t\t}\n\n\t\t\tvar processids []string\n\t\t\tif hit == nil {\n\t\t\t\tlogger.Println(\"No checkpoint in database found, getting all datasets\")\n\t\t\t\tprocessids, err = portal.GetAllMetaDataIDs()\n\t\t\t} else {\n\t\t\t\tlogger.Printf(\"Getting changed datasets since %s\\n\", hit)\n\t\t\t\tprocessids, err = portal.GetChangedPackageIDsSince(*hit, numworkers)\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t\tlogger.Panic(err)\n\n\t\t\t}\n\n\t\t\tif anzids := len(processids); anzids > 0 {\n\n\t\t\t\ttx, err := dbconnection.Begin()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogger.Panicln(\"Cannot create database transaction\")\n\t\t\t\t}\n\t\t\t\tscheduler := schedule.New(numworkers)\n\t\t\t\tconn := &DBConn{DBer: tx, appid: AppID}\n\t\t\t\tf := func(slice []interface{}) error {\n\t\t\t\t\tif err := processmetadataids(conn, ifaceslicetostring(slice)); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tdb.LogMessage(fmt.Sprintf(\"%d Medadaten werden verarbeitet\", anzids), StateOk, true)\n\t\t\t\tworkchannel := scheduler.Schedule(f, stringslicetoiface(processids))\n\t\t\t\tselect {\n\t\t\t\tcase workreply := <-workchannel:\n\t\t\t\t\tif err := workreply.Err; err != nil {\n\t\t\t\t\t\tlogger.Panicln(\"Scheduler didn't return success:\", err)\n\t\t\t\t\t} else if workreply.Code == schedule.StateFinish {\n\t\t\t\t\t\ttx.Commit()\n\t\t\t\t\t\tdb.LogMessage(\"Idle\", StateOk, true)\n\t\t\t\t\t\tlogger.Printf(\"Finished processing %d datasets\\n\", anzids)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\t\/\/ When there was nothing to do, wait for heartbeatinterval time\n\t\t\t\tlogger.Printf(\"Nothing to do, sleeping for %d minutes\\n\", heartbeatinterval)\n\t\t\t\ttime.Sleep(time.Duration(heartbeatinterval) * time.Minute)\n\t\t\t}\n\t\t\t\n\t\t\t\/\/ Check urls once a day, and when there is an error, report\n\t\t\tfor _ = range urlcheckpointchan {\n\t\t\t\t\/\/ get all urls to check\n\t\t\t\t\/\/ = jene, die als fetchable eingestuft sind nicht mit einem strukturellen Problem belegt sind\n\t\t\t\t\/\/ = und die im bezug auf den datensatz am aktuellesten im statusrecord sind (group by time)\n\t\t\t\t\n\t\t\t\t\/\/ schedule check:\n\t\t\t\t\/\/ getable: OK <-- what to do, when last check was 'not getable'?\n\t\t\t\t\/\/ report result\n\t\t\t}\n\t\t}\n\t}\n\treturn 0\n}\n\nfunc main() {\n\tos.Exit(mymain())\n}\n\nfunc init() {\n\tlogger = log.New(os.Stderr, filepath.Base(os.Args[0])+\": \", log.LstdFlags)\n\tflag.Parse()\n}\n<|endoftext|>"} {"text":"<commit_before>package cloudcontroller_test\n\nimport (\n\t\"time\"\n\n\t. \"code.cloudfoundry.org\/cli\/api\/cloudcontroller\"\n\t\"code.cloudfoundry.org\/cli\/api\/cloudcontroller\/ccerror\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"Minimum Version Check\", func() {\n\tDescribe(\"MinimumAPIVersionCheck\", func() {\n\t\tminimumVersion := \"1.0.0\"\n\t\tContext(\"current version is greater than min\", func() {\n\t\t\tIt(\"does not return an error\", func() {\n\t\t\t\tcurrentVersion := \"1.0.1\"\n\t\t\t\terr := MinimumAPIVersionCheck(currentVersion, minimumVersion)\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t})\n\t\t})\n\n\t\tContext(\"current version is less than min\", func() {\n\t\t\tIt(\"does return an error\", func() {\n\t\t\t\tcurrentVersion := \"1.0.0-alpha.5\"\n\t\t\t\terr := MinimumAPIVersionCheck(currentVersion, minimumVersion)\n\t\t\t\tExpect(err).To(MatchError(ccerror.MinimumAPIVersionNotMetError{\n\t\t\t\t\tCurrentVersion: currentVersion,\n\t\t\t\t\tMinimumVersion: minimumVersion,\n\t\t\t\t}))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"minimum version is empty\", func() {\n\t\t\tIt(\"does not return an error\", func() {\n\t\t\t\terr := MinimumAPIVersionCheck(\"2.0.0\", \"\")\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t})\n\t\t})\n\t})\n\n\tDescribe(\"Minimum version numbers\", func() {\n\t\tIt(\"are up to date\", func() {\n\t\t\texpirationDate, err := time.Parse(time.RFC3339, \"2020-01-01T00:00:00Z\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(time.Now().Before(expirationDate)).To(BeTrue(), \"Check https:\/\/github.com\/cloudfoundry\/cli\/wiki\/Versioning-Policy#cf-cli-minimum-supported-version and update versions if necessary\")\n\t\t})\n\t})\n})\n<commit_msg>Disable \"time bomb\" test<commit_after>package cloudcontroller_test\n\nimport (\n\t\"time\"\n\n\t. \"code.cloudfoundry.org\/cli\/api\/cloudcontroller\"\n\t\"code.cloudfoundry.org\/cli\/api\/cloudcontroller\/ccerror\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n)\n\nvar _ = Describe(\"Minimum Version Check\", func() {\n\tDescribe(\"MinimumAPIVersionCheck\", func() {\n\t\tminimumVersion := \"1.0.0\"\n\t\tContext(\"current version is greater than min\", func() {\n\t\t\tIt(\"does not return an error\", func() {\n\t\t\t\tcurrentVersion := \"1.0.1\"\n\t\t\t\terr := MinimumAPIVersionCheck(currentVersion, minimumVersion)\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t})\n\t\t})\n\n\t\tContext(\"current version is less than min\", func() {\n\t\t\tIt(\"does return an error\", func() {\n\t\t\t\tcurrentVersion := \"1.0.0-alpha.5\"\n\t\t\t\terr := MinimumAPIVersionCheck(currentVersion, minimumVersion)\n\t\t\t\tExpect(err).To(MatchError(ccerror.MinimumAPIVersionNotMetError{\n\t\t\t\t\tCurrentVersion: currentVersion,\n\t\t\t\t\tMinimumVersion: minimumVersion,\n\t\t\t\t}))\n\t\t\t})\n\t\t})\n\n\t\tContext(\"minimum version is empty\", func() {\n\t\t\tIt(\"does not return an error\", func() {\n\t\t\t\terr := MinimumAPIVersionCheck(\"2.0.0\", \"\")\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t})\n\t\t})\n\t})\n\n\tXDescribe(\"Minimum version numbers\", func() {\n\t\tIt(\"are up to date\", func() {\n\t\t\texpirationDate, err := time.Parse(time.RFC3339, \"2020-01-01T00:00:00Z\")\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\tExpect(time.Now().Before(expirationDate)).To(BeTrue(), \"Check https:\/\/github.com\/cloudfoundry\/cli\/wiki\/Versioning-Policy#cf-cli-minimum-supported-version and update versions if necessary\")\n\t\t})\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package deepcopier\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/guregu\/null\"\n\t\"github.com\/lib\/pq\"\n\t\"github.com\/oleiade\/reflections\"\n)\n\nconst (\n\t\/\/ TagName is struct field tag name.\n\tTagName = \"deepcopier\"\n\n\t\/\/ FieldOptionName is the from field option name for struct tag.\n\tFieldOptionName = \"field\"\n\n\t\/\/ ContextOptionName is the context option name for struct tag.\n\tContextOptionName = \"context\"\n\n\t\/\/ SkipOptionName is the skip option name for struct tag.\n\tSkipOptionName = \"skip\"\n)\n\n\/\/ DeepCopier deep copies a struct to\/from a struct.\ntype DeepCopier struct {\n\tSource interface{}\n\tDestination interface{}\n\tTagged interface{}\n\tContext map[string]interface{}\n\tReversed bool\n}\n\n\/\/ FieldOptions contains options passed to SetField method.\ntype FieldOptions struct {\n\tSourceField string\n\tDestinationField string\n\tWithContext bool\n\tSkip bool\n}\n\n\/\/ NewFieldOptions returns a FieldOptions instance for the given instance's field.\nfunc NewFieldOptions(instance interface{}, field string, reversed bool) *FieldOptions {\n\tfieldOptions := &FieldOptions{\n\t\tSourceField: field,\n\t\tDestinationField: field,\n\t\tWithContext: false,\n\t\tSkip: false,\n\t}\n\n\ttagOptions, _ := reflections.GetFieldTag(instance, field, TagName)\n\n\tif tagOptions == \"\" {\n\t\treturn fieldOptions\n\t}\n\n\topts := GetTagOptions(tagOptions)\n\n\tif _, ok := opts[FieldOptionName]; ok {\n\t\tfieldName := opts[FieldOptionName]\n\n\t\tif !reversed {\n\t\t\tfieldOptions.SourceField = fieldName\n\t\t} else {\n\t\t\tfieldOptions.DestinationField = fieldName\n\t\t}\n\t}\n\n\tif _, ok := opts[ContextOptionName]; ok {\n\t\tfieldOptions.WithContext = true\n\t}\n\n\tif _, ok := opts[SkipOptionName]; ok {\n\t\tfieldOptions.Skip = true\n\t}\n\n\treturn fieldOptions\n}\n\n\/\/ Copy sets the source.\nfunc Copy(source interface{}) *DeepCopier {\n\treturn &DeepCopier{\n\t\tSource: source,\n\t\tReversed: false,\n\t}\n}\n\n\/\/ To sets the given tagged struct as destination struct.\n\/\/ Source -> Destination\nfunc (dc *DeepCopier) To(tagged interface{}) error {\n\tdc.Destination = tagged\n\tdc.Tagged = tagged\n\n\treturn dc.ProcessCopy()\n}\n\n\/\/ From sets the given tagged struct as source and the current source as destination.\n\/\/ Source <- Destination\nfunc (dc *DeepCopier) From(tagged interface{}) error {\n\tdc.Destination = dc.Source\n\tdc.Source = tagged\n\tdc.Tagged = tagged\n\tdc.Reversed = true\n\n\treturn dc.ProcessCopy()\n}\n\n\/\/ ProcessCopy processes copy.\nfunc (dc *DeepCopier) ProcessCopy() error {\n\tvar (\n\t\tfields = []string{}\n\t\ttaggedValue = reflect.ValueOf(dc.Tagged).Elem()\n\t\ttaggedType = taggedValue.Type()\n\t)\n\n\tfor i := 0; i < taggedValue.NumField(); i++ {\n\t\tvar (\n\t\t\tfv = taggedValue.Field(i)\n\t\t\tft = taggedType.Field(i)\n\t\t)\n\n\t\t\/\/ Embedded struct\n\t\tif ft.Anonymous {\n\t\t\tf, _ := reflections.Fields(fv.Interface())\n\t\t\tfields = append(fields, f...)\n\t\t} else {\n\t\t\tfields = append(fields, ft.Name)\n\t\t}\n\t}\n\n\tfor _, field := range fields {\n\t\tif err := dc.SetField(NewFieldOptions(dc.Tagged, field, dc.Reversed)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ -----------------------------------------------------------------------------\n\/\/ Options\n\/\/ -----------------------------------------------------------------------------\n\n\/\/ WithContext injects the given context into the builder instance.\nfunc (dc *DeepCopier) WithContext(context map[string]interface{}) *DeepCopier {\n\tdc.Context = context\n\treturn dc\n}\n\n\/\/ -----------------------------------------------------------------------------\n\/\/ Struct tags\n\/\/ -----------------------------------------------------------------------------\n\n\/\/ GetTagOptions parses deepcopier tag field and returns options.\nfunc GetTagOptions(value string) map[string]string {\n\toptions := map[string]string{}\n\n\tfor _, opt := range strings.Split(value, \";\") {\n\t\to := strings.Split(opt, \":\")\n\n\t\t\/\/ deepcopier:\"keyword; without; value;\"\n\t\tif len(o) == 1 {\n\t\t\tk := o[0]\n\t\t\toptions[k] = \"\"\n\t\t}\n\n\t\t\/\/ deepcopier:\"key:value; anotherkey:anothervalue\"\n\t\tif len(o) == 2 {\n\t\t\tk, v := o[0], o[1]\n\t\t\tk = strings.TrimSpace(k)\n\t\t\tv = strings.TrimSpace(v)\n\t\t\toptions[k] = v\n\t\t}\n\t}\n\treturn options\n}\n\n\/\/ -----------------------------------------------------------------------------\n\/\/ Field Setters\n\/\/ -----------------------------------------------------------------------------\n\n\/\/ SetField sets the value of the given field.\nfunc (dc *DeepCopier) SetField(options *FieldOptions) error {\n\tif options.Skip {\n\t\treturn nil\n\t}\n\n\tif dc.Reversed {\n\t\thas, _ := reflections.HasField(dc.Destination, options.DestinationField)\n\t\tif !has {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\thas, _ := reflections.HasField(dc.Source, options.SourceField)\n\tif !has {\n\t\terr := dc.HandleMethod(options)\n\t\tif err != nil {\n\t\t\thas, _ = reflections.HasField(dc.Destination, options.DestinationField)\n\t\t\tif has {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\tkind, _ := reflections.GetFieldKind(dc.Source, options.SourceField)\n\tif kind == reflect.Struct {\n\t\tif err := dc.HandleStructField(options); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\tif err := dc.HandleField(options); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ SetFieldValue Sets the given value to the given field.\nfunc (dc *DeepCopier) SetFieldValue(entity interface{}, name string, value reflect.Value) error {\n\tkind := value.Kind()\n\n\tif kind == reflect.Ptr {\n\t\tif value.IsNil() {\n\t\t\treturn nil\n\t\t}\n\t\tvalue = value.Elem()\n\t\tkind = value.Kind()\n\t}\n\n\t\/\/ Maps\n\tif kind == reflect.Map {\n\t\tswitch v := value.Interface().(type) {\n\t\tcase map[string]interface{}, map[string]string, map[string]map[string]string, map[string]map[string]map[string]string:\n\t\t\tif err := reflections.SetField(entity, name, v); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ Structs\n\tif kind == reflect.Struct {\n\t\tswitch v := value.Interface().(type) {\n\t\tcase time.Time, pq.NullTime, null.String:\n\t\t\tif err := reflections.SetField(entity, name, v); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ Slices\n\tif kind == reflect.Slice {\n\t\tswitch v := value.Interface().(type) {\n\t\tcase []int8, []int16, []int32, []int64, []int, []uint8, []uint16, []uint32, []uint64, []uint, []float32, []float64, []string, []bool:\n\t\t\tif err := reflections.SetField(entity, name, v); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ Reflect\n\tswitch kind {\n\tcase reflect.Int8:\n\t\tif err := reflections.SetField(entity, name, int8(value.Int())); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Int16:\n\t\tif err := reflections.SetField(entity, name, int16(value.Int())); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Int32:\n\t\tif err := reflections.SetField(entity, name, int32(value.Int())); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Int64:\n\t\tif err := reflections.SetField(entity, name, value.Int()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Int:\n\t\tif err := reflections.SetField(entity, name, int(value.Int())); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Uint8:\n\t\tif err := reflections.SetField(entity, name, uint8(value.Uint())); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Uint16:\n\t\tif err := reflections.SetField(entity, name, uint16(value.Uint())); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Uint32:\n\t\tif err := reflections.SetField(entity, name, uint32(value.Uint())); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Uint64:\n\t\tif err := reflections.SetField(entity, name, value.Uint()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Uint:\n\t\tif err := reflections.SetField(entity, name, uint(value.Uint())); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Float32:\n\t\tif err := reflections.SetField(entity, name, float32(value.Float())); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Float64:\n\t\tif err := reflections.SetField(entity, name, value.Float()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.String:\n\t\tif err := reflections.SetField(entity, name, value.String()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Bool:\n\t\tif err := reflections.SetField(entity, name, value.Bool()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn nil\n}\n\n\/\/ -----------------------------------------------------------------------------\n\/\/ Field Type Handlers\n\/\/ -----------------------------------------------------------------------------\n\n\/\/ HandleStructField sets the value for the given supported struct field.\nfunc (dc *DeepCopier) HandleStructField(options *FieldOptions) error {\n\tf, err := reflections.GetField(dc.Source, options.SourceField)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch v := f.(type) {\n\tcase pq.NullTime:\n\t\tif v.Valid {\n\t\t\tif err := reflections.SetField(dc.Destination, options.DestinationField, &v.Time); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\tcase time.Time:\n\t\tif err := reflections.SetField(dc.Destination, options.DestinationField, v); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ HandleField sets value for the given field.\nfunc (dc *DeepCopier) HandleField(options *FieldOptions) error {\n\tv, err := reflections.GetField(dc.Source, options.SourceField)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvalue := reflect.ValueOf(v)\n\tif err := dc.SetFieldValue(dc.Destination, options.DestinationField, value); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ HandleMethod tries to call method on model and sets result in resource field.\nfunc (dc *DeepCopier) HandleMethod(options *FieldOptions) error {\n\tif dc.Reversed {\n\t\treturn nil\n\t}\n\n\tmethod := reflect.ValueOf(dc.Source).MethodByName(options.SourceField)\n\tif !method.IsValid() {\n\t\treturn fmt.Errorf(\"Method %s does not exist\", options.SourceField)\n\t}\n\n\tvar results []reflect.Value\n\tif options.WithContext {\n\t\tresults = method.Call([]reflect.Value{reflect.ValueOf(dc.Context)})\n\t} else {\n\t\tresults = method.Call([]reflect.Value{})\n\t}\n\n\tif err := dc.SetFieldValue(dc.Destination, options.DestinationField, results[0]); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ -----------------------------------------------------------------------------\n\/\/ Refacto\n\/\/ -----------------------------------------------------------------------------\n\n\/\/ Options are copier options.\ntype Options struct {\n\t\/\/ Context given to WithContext() method.\n\tContext map[string]interface{}\n}\n\nfunc getMethods(t reflect.Type) []string {\n\tvar methods []string\n\tfor i := 0; i < t.NumMethod(); i++ {\n\t\tmethods = append(methods, t.Method(i).Name)\n\t}\n\treturn methods\n}\n\n\/\/ InStringSlice checks if the given string is in the given slice of string\nfunc InStringSlice(haystack []string, needle string) bool {\n\tfor _, str := range haystack {\n\t\tif needle == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Copier is the brand new way to process copy.\nfunc Copier(dst interface{}, src interface{}, args ...Options) error {\n\tvar (\n\t\toptions = Options{}\n\t\tsrcValue = reflect.Indirect(reflect.ValueOf(src))\n\t\tsrcType = srcValue.Type()\n\t\tsrcMethods = getMethods(srcType)\n\t\tdstValue = reflect.Indirect(reflect.ValueOf(dst))\n\t)\n\n\t\/\/ Pointer only for receiver\n\tif !dstValue.CanAddr() {\n\t\treturn errors.New(\"dst value is unaddressable\")\n\t}\n\n\tif len(args) > 0 {\n\t\toptions = args[0]\n\t}\n\n\tfor i := 0; i < srcValue.NumField(); i++ {\n\t\tvar (\n\t\t\tsrcFieldValue = srcValue.Field(i)\n\t\t\tsrcFieldType = srcValue.Type().Field(i)\n\t\t\tsrcFieldName = srcFieldType.Name\n\t\t)\n\n\t\tif !srcFieldValue.IsValid() {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor ii := 0; ii < dstValue.NumField(); ii++ {\n\t\t\tvar (\n\t\t\t\tdstFieldValue = dstValue.Field(ii)\n\t\t\t\tdstFieldType = dstValue.Type().Field(ii)\n\t\t\t\tdstFieldName = dstFieldType.Name\n\t\t\t\tdstFieldTag = dstFieldType.Tag.Get(TagName)\n\t\t\t\t\/\/ Options\n\t\t\t\tfieldName = dstFieldName\n\t\t\t\twithContext = false\n\t\t\t)\n\n\t\t\ttagOptions := GetTagOptions(dstFieldTag)\n\n\t\t\t\/\/ If skip option is set, bypass copy.\n\t\t\tif v, ok := tagOptions[SkipOptionName]; ok && v != \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Get real source field \/ method name src struct tag.\n\t\t\tif v, ok := tagOptions[FieldOptionName]; ok && v != \"\" {\n\t\t\t\tfieldName = v\n\t\t\t}\n\n\t\t\t\/\/ Give context as function argument?\n\t\t\tif v, ok := tagOptions[ContextOptionName]; ok && v != \"\" {\n\t\t\t\twithContext = true\n\t\t\t}\n\n\t\t\t\/\/ Method() -> field -- TODO: handle WithContext\n\t\t\tif InStringSlice(srcMethods, fieldName) {\n\t\t\t\tmethod := reflect.ValueOf(src).MethodByName(fieldName)\n\n\t\t\t\tif !method.IsValid() {\n\t\t\t\t\treturn fmt.Errorf(\"method %v in source is not valid\", fieldName)\n\t\t\t\t}\n\n\t\t\t\tvar results []reflect.Value\n\n\t\t\t\tif withContext {\n\t\t\t\t\tresults = method.Call([]reflect.Value{reflect.ValueOf(options.Context)})\n\t\t\t\t} else {\n\t\t\t\t\tresults = method.Call([]reflect.Value{})\n\t\t\t\t}\n\n\t\t\t\tdstFieldValue.Set(results[0])\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif fieldName != srcFieldName {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Ptr -> Value\n\t\t\tif srcFieldType.Type.Kind() == reflect.Ptr && !srcFieldValue.IsNil() && dstFieldType.Type.Kind() != reflect.Ptr {\n\t\t\t\tdstFieldValue.Set(reflect.Indirect(srcFieldValue))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif srcFieldType.Type.AssignableTo(dstFieldType.Type) {\n\t\t\t\tdstFieldValue.Set(srcFieldValue)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>Refacto: remove obsolete comment.<commit_after>package deepcopier\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/guregu\/null\"\n\t\"github.com\/lib\/pq\"\n\t\"github.com\/oleiade\/reflections\"\n)\n\nconst (\n\t\/\/ TagName is struct field tag name.\n\tTagName = \"deepcopier\"\n\n\t\/\/ FieldOptionName is the from field option name for struct tag.\n\tFieldOptionName = \"field\"\n\n\t\/\/ ContextOptionName is the context option name for struct tag.\n\tContextOptionName = \"context\"\n\n\t\/\/ SkipOptionName is the skip option name for struct tag.\n\tSkipOptionName = \"skip\"\n)\n\n\/\/ DeepCopier deep copies a struct to\/from a struct.\ntype DeepCopier struct {\n\tSource interface{}\n\tDestination interface{}\n\tTagged interface{}\n\tContext map[string]interface{}\n\tReversed bool\n}\n\n\/\/ FieldOptions contains options passed to SetField method.\ntype FieldOptions struct {\n\tSourceField string\n\tDestinationField string\n\tWithContext bool\n\tSkip bool\n}\n\n\/\/ NewFieldOptions returns a FieldOptions instance for the given instance's field.\nfunc NewFieldOptions(instance interface{}, field string, reversed bool) *FieldOptions {\n\tfieldOptions := &FieldOptions{\n\t\tSourceField: field,\n\t\tDestinationField: field,\n\t\tWithContext: false,\n\t\tSkip: false,\n\t}\n\n\ttagOptions, _ := reflections.GetFieldTag(instance, field, TagName)\n\n\tif tagOptions == \"\" {\n\t\treturn fieldOptions\n\t}\n\n\topts := GetTagOptions(tagOptions)\n\n\tif _, ok := opts[FieldOptionName]; ok {\n\t\tfieldName := opts[FieldOptionName]\n\n\t\tif !reversed {\n\t\t\tfieldOptions.SourceField = fieldName\n\t\t} else {\n\t\t\tfieldOptions.DestinationField = fieldName\n\t\t}\n\t}\n\n\tif _, ok := opts[ContextOptionName]; ok {\n\t\tfieldOptions.WithContext = true\n\t}\n\n\tif _, ok := opts[SkipOptionName]; ok {\n\t\tfieldOptions.Skip = true\n\t}\n\n\treturn fieldOptions\n}\n\n\/\/ Copy sets the source.\nfunc Copy(source interface{}) *DeepCopier {\n\treturn &DeepCopier{\n\t\tSource: source,\n\t\tReversed: false,\n\t}\n}\n\n\/\/ To sets the given tagged struct as destination struct.\n\/\/ Source -> Destination\nfunc (dc *DeepCopier) To(tagged interface{}) error {\n\tdc.Destination = tagged\n\tdc.Tagged = tagged\n\n\treturn dc.ProcessCopy()\n}\n\n\/\/ From sets the given tagged struct as source and the current source as destination.\n\/\/ Source <- Destination\nfunc (dc *DeepCopier) From(tagged interface{}) error {\n\tdc.Destination = dc.Source\n\tdc.Source = tagged\n\tdc.Tagged = tagged\n\tdc.Reversed = true\n\n\treturn dc.ProcessCopy()\n}\n\n\/\/ ProcessCopy processes copy.\nfunc (dc *DeepCopier) ProcessCopy() error {\n\tvar (\n\t\tfields = []string{}\n\t\ttaggedValue = reflect.ValueOf(dc.Tagged).Elem()\n\t\ttaggedType = taggedValue.Type()\n\t)\n\n\tfor i := 0; i < taggedValue.NumField(); i++ {\n\t\tvar (\n\t\t\tfv = taggedValue.Field(i)\n\t\t\tft = taggedType.Field(i)\n\t\t)\n\n\t\t\/\/ Embedded struct\n\t\tif ft.Anonymous {\n\t\t\tf, _ := reflections.Fields(fv.Interface())\n\t\t\tfields = append(fields, f...)\n\t\t} else {\n\t\t\tfields = append(fields, ft.Name)\n\t\t}\n\t}\n\n\tfor _, field := range fields {\n\t\tif err := dc.SetField(NewFieldOptions(dc.Tagged, field, dc.Reversed)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ -----------------------------------------------------------------------------\n\/\/ Options\n\/\/ -----------------------------------------------------------------------------\n\n\/\/ WithContext injects the given context into the builder instance.\nfunc (dc *DeepCopier) WithContext(context map[string]interface{}) *DeepCopier {\n\tdc.Context = context\n\treturn dc\n}\n\n\/\/ -----------------------------------------------------------------------------\n\/\/ Struct tags\n\/\/ -----------------------------------------------------------------------------\n\n\/\/ GetTagOptions parses deepcopier tag field and returns options.\nfunc GetTagOptions(value string) map[string]string {\n\toptions := map[string]string{}\n\n\tfor _, opt := range strings.Split(value, \";\") {\n\t\to := strings.Split(opt, \":\")\n\n\t\t\/\/ deepcopier:\"keyword; without; value;\"\n\t\tif len(o) == 1 {\n\t\t\tk := o[0]\n\t\t\toptions[k] = \"\"\n\t\t}\n\n\t\t\/\/ deepcopier:\"key:value; anotherkey:anothervalue\"\n\t\tif len(o) == 2 {\n\t\t\tk, v := o[0], o[1]\n\t\t\tk = strings.TrimSpace(k)\n\t\t\tv = strings.TrimSpace(v)\n\t\t\toptions[k] = v\n\t\t}\n\t}\n\treturn options\n}\n\n\/\/ -----------------------------------------------------------------------------\n\/\/ Field Setters\n\/\/ -----------------------------------------------------------------------------\n\n\/\/ SetField sets the value of the given field.\nfunc (dc *DeepCopier) SetField(options *FieldOptions) error {\n\tif options.Skip {\n\t\treturn nil\n\t}\n\n\tif dc.Reversed {\n\t\thas, _ := reflections.HasField(dc.Destination, options.DestinationField)\n\t\tif !has {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\thas, _ := reflections.HasField(dc.Source, options.SourceField)\n\tif !has {\n\t\terr := dc.HandleMethod(options)\n\t\tif err != nil {\n\t\t\thas, _ = reflections.HasField(dc.Destination, options.DestinationField)\n\t\t\tif has {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t}\n\n\tkind, _ := reflections.GetFieldKind(dc.Source, options.SourceField)\n\tif kind == reflect.Struct {\n\t\tif err := dc.HandleStructField(options); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\tif err := dc.HandleField(options); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ SetFieldValue Sets the given value to the given field.\nfunc (dc *DeepCopier) SetFieldValue(entity interface{}, name string, value reflect.Value) error {\n\tkind := value.Kind()\n\n\tif kind == reflect.Ptr {\n\t\tif value.IsNil() {\n\t\t\treturn nil\n\t\t}\n\t\tvalue = value.Elem()\n\t\tkind = value.Kind()\n\t}\n\n\t\/\/ Maps\n\tif kind == reflect.Map {\n\t\tswitch v := value.Interface().(type) {\n\t\tcase map[string]interface{}, map[string]string, map[string]map[string]string, map[string]map[string]map[string]string:\n\t\t\tif err := reflections.SetField(entity, name, v); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ Structs\n\tif kind == reflect.Struct {\n\t\tswitch v := value.Interface().(type) {\n\t\tcase time.Time, pq.NullTime, null.String:\n\t\t\tif err := reflections.SetField(entity, name, v); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ Slices\n\tif kind == reflect.Slice {\n\t\tswitch v := value.Interface().(type) {\n\t\tcase []int8, []int16, []int32, []int64, []int, []uint8, []uint16, []uint32, []uint64, []uint, []float32, []float64, []string, []bool:\n\t\t\tif err := reflections.SetField(entity, name, v); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\t\/\/ Reflect\n\tswitch kind {\n\tcase reflect.Int8:\n\t\tif err := reflections.SetField(entity, name, int8(value.Int())); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Int16:\n\t\tif err := reflections.SetField(entity, name, int16(value.Int())); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Int32:\n\t\tif err := reflections.SetField(entity, name, int32(value.Int())); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Int64:\n\t\tif err := reflections.SetField(entity, name, value.Int()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Int:\n\t\tif err := reflections.SetField(entity, name, int(value.Int())); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Uint8:\n\t\tif err := reflections.SetField(entity, name, uint8(value.Uint())); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Uint16:\n\t\tif err := reflections.SetField(entity, name, uint16(value.Uint())); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Uint32:\n\t\tif err := reflections.SetField(entity, name, uint32(value.Uint())); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Uint64:\n\t\tif err := reflections.SetField(entity, name, value.Uint()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Uint:\n\t\tif err := reflections.SetField(entity, name, uint(value.Uint())); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Float32:\n\t\tif err := reflections.SetField(entity, name, float32(value.Float())); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Float64:\n\t\tif err := reflections.SetField(entity, name, value.Float()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.String:\n\t\tif err := reflections.SetField(entity, name, value.String()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\tcase reflect.Bool:\n\t\tif err := reflections.SetField(entity, name, value.Bool()); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\treturn nil\n}\n\n\/\/ -----------------------------------------------------------------------------\n\/\/ Field Type Handlers\n\/\/ -----------------------------------------------------------------------------\n\n\/\/ HandleStructField sets the value for the given supported struct field.\nfunc (dc *DeepCopier) HandleStructField(options *FieldOptions) error {\n\tf, err := reflections.GetField(dc.Source, options.SourceField)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch v := f.(type) {\n\tcase pq.NullTime:\n\t\tif v.Valid {\n\t\t\tif err := reflections.SetField(dc.Destination, options.DestinationField, &v.Time); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\tcase time.Time:\n\t\tif err := reflections.SetField(dc.Destination, options.DestinationField, v); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ HandleField sets value for the given field.\nfunc (dc *DeepCopier) HandleField(options *FieldOptions) error {\n\tv, err := reflections.GetField(dc.Source, options.SourceField)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvalue := reflect.ValueOf(v)\n\tif err := dc.SetFieldValue(dc.Destination, options.DestinationField, value); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ HandleMethod tries to call method on model and sets result in resource field.\nfunc (dc *DeepCopier) HandleMethod(options *FieldOptions) error {\n\tif dc.Reversed {\n\t\treturn nil\n\t}\n\n\tmethod := reflect.ValueOf(dc.Source).MethodByName(options.SourceField)\n\tif !method.IsValid() {\n\t\treturn fmt.Errorf(\"Method %s does not exist\", options.SourceField)\n\t}\n\n\tvar results []reflect.Value\n\tif options.WithContext {\n\t\tresults = method.Call([]reflect.Value{reflect.ValueOf(dc.Context)})\n\t} else {\n\t\tresults = method.Call([]reflect.Value{})\n\t}\n\n\tif err := dc.SetFieldValue(dc.Destination, options.DestinationField, results[0]); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n\/\/ -----------------------------------------------------------------------------\n\/\/ Refacto\n\/\/ -----------------------------------------------------------------------------\n\n\/\/ Options are copier options.\ntype Options struct {\n\t\/\/ Context given to WithContext() method.\n\tContext map[string]interface{}\n}\n\nfunc getMethods(t reflect.Type) []string {\n\tvar methods []string\n\tfor i := 0; i < t.NumMethod(); i++ {\n\t\tmethods = append(methods, t.Method(i).Name)\n\t}\n\treturn methods\n}\n\n\/\/ InStringSlice checks if the given string is in the given slice of string\nfunc InStringSlice(haystack []string, needle string) bool {\n\tfor _, str := range haystack {\n\t\tif needle == str {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n\/\/ Copier is the brand new way to process copy.\nfunc Copier(dst interface{}, src interface{}, args ...Options) error {\n\tvar (\n\t\toptions = Options{}\n\t\tsrcValue = reflect.Indirect(reflect.ValueOf(src))\n\t\tsrcType = srcValue.Type()\n\t\tsrcMethods = getMethods(srcType)\n\t\tdstValue = reflect.Indirect(reflect.ValueOf(dst))\n\t)\n\n\t\/\/ Pointer only for receiver\n\tif !dstValue.CanAddr() {\n\t\treturn errors.New(\"dst value is unaddressable\")\n\t}\n\n\tif len(args) > 0 {\n\t\toptions = args[0]\n\t}\n\n\tfor i := 0; i < srcValue.NumField(); i++ {\n\t\tvar (\n\t\t\tsrcFieldValue = srcValue.Field(i)\n\t\t\tsrcFieldType = srcValue.Type().Field(i)\n\t\t\tsrcFieldName = srcFieldType.Name\n\t\t)\n\n\t\tif !srcFieldValue.IsValid() {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor ii := 0; ii < dstValue.NumField(); ii++ {\n\t\t\tvar (\n\t\t\t\tdstFieldValue = dstValue.Field(ii)\n\t\t\t\tdstFieldType = dstValue.Type().Field(ii)\n\t\t\t\tdstFieldName = dstFieldType.Name\n\t\t\t\tdstFieldTag = dstFieldType.Tag.Get(TagName)\n\t\t\t\t\/\/ Options\n\t\t\t\tfieldName = dstFieldName\n\t\t\t\twithContext = false\n\t\t\t)\n\n\t\t\ttagOptions := GetTagOptions(dstFieldTag)\n\n\t\t\t\/\/ If skip option is set, bypass copy.\n\t\t\tif v, ok := tagOptions[SkipOptionName]; ok && v != \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Get real source field \/ method name src struct tag.\n\t\t\tif v, ok := tagOptions[FieldOptionName]; ok && v != \"\" {\n\t\t\t\tfieldName = v\n\t\t\t}\n\n\t\t\t\/\/ Give context as function argument?\n\t\t\tif v, ok := tagOptions[ContextOptionName]; ok && v != \"\" {\n\t\t\t\twithContext = true\n\t\t\t}\n\n\t\t\tif InStringSlice(srcMethods, fieldName) {\n\t\t\t\tmethod := reflect.ValueOf(src).MethodByName(fieldName)\n\n\t\t\t\tif !method.IsValid() {\n\t\t\t\t\treturn fmt.Errorf(\"method %v in source is not valid\", fieldName)\n\t\t\t\t}\n\n\t\t\t\tvar results []reflect.Value\n\n\t\t\t\tif withContext {\n\t\t\t\t\tresults = method.Call([]reflect.Value{reflect.ValueOf(options.Context)})\n\t\t\t\t} else {\n\t\t\t\t\tresults = method.Call([]reflect.Value{})\n\t\t\t\t}\n\n\t\t\t\tdstFieldValue.Set(results[0])\n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif fieldName != srcFieldName {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t\/\/ Ptr -> Value\n\t\t\tif srcFieldType.Type.Kind() == reflect.Ptr && !srcFieldValue.IsNil() && dstFieldType.Type.Kind() != reflect.Ptr {\n\t\t\t\tdstFieldValue.Set(reflect.Indirect(srcFieldValue))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif srcFieldType.Type.AssignableTo(dstFieldType.Type) {\n\t\t\t\tdstFieldValue.Set(srcFieldValue)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"src\/todo\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/*\nREVISION HISTORY\n-------- -------\n11 Jan 22 -- Started copying out of \"Powerful Command-Line Applications in Go\" by Ricardo Gerardi\n15 Jan 22 -- Modifying the output for the -h flag using the book code. I don't need -v flag anymore.\n Then added the String method, but that had to be a value receiver to work as like in the book.\n Then added use of TODO_FILENAME environment variable.\n16 Jan 22 -- Added stdin as a source. And changed name of string task flag to a boolean add flag.\n17 Jan 22 -- Added default actions if no switch is provided. If there are arguments then add as a task, if not list tasks.\n 8 Feb 22 -- Will show a timestamp of adding a task, and will use a network file if available.\n*\/\n\nconst lastModified = \"9 Feb 2022\"\nconst linuxNetworkPrefix = \"\/mnt\/docs\/\"\nconst windowsNetworkPrefix = \"z:\/\"\n\nvar todoFilename = \"todo.json\" \/\/ now a var instead of a const so can use environment variable if set.\nvar todoFileBin = \"todo.gob\" \/\/ now a var instead of a const so can use environment variable if set.\nvar fileExists bool\n\nvar verboseFlag = flag.Bool(\"v\", false, \"Set verbose mode.\")\n\n\/\/ var task = flag.String(\"task\", \"\", \"Task to be added to the ToDo list.\")\nvar add = flag.Bool(\"add\", false, \"Add task to the ToDo list.\")\nvar complete = flag.Int(\"complete\", 0, \"Item to be completed.\") \/\/ here, 0 means NTD. That's why we have to start at 1 for item numbers.\nvar listFlag = flag.Bool(\"list\", false, \"List all tasks to the display.\")\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(flag.CommandLine.Output(), \" %s last modified %s. \\n\", os.Args[0], lastModified)\n\t\tfmt.Fprintf(flag.CommandLine.Output(), \" Usage information:\\n\")\n\t\tflag.PrintDefaults()\n\t}\n\tflag.Parse()\n\tif *verboseFlag {\n\t\tfmt.Printf(\" todo last modified %s. It will display and manage a todo list.\\n\", lastModified)\n\t\tfmt.Printf(\" Default file root is todo for todo.json and todo.gob. TODO_FILENAME environment variable is read.\\n\")\n\n\t}\n\n\thomeDir, err := os.UserHomeDir()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \" Error from os.UserHomeDir is %v.\\n\", err)\n\t}\n\n\tif runtime.GOOS == \"linux\" {\n\n\t} else { \/\/ must be on Windows.\n\n\t}\n\n\tenvValue, ok := os.LookupEnv(\"TODO_FILENAME\")\n\tif ok {\n\t\ttodoFilename = envValue + \".json\"\n\t\t\/\/todoFileBin = filepath.Base(envValue) + \".gob\"\n\t\ttodoFileBin = envValue + \".gob\"\n\t}\n\tif *verboseFlag {\n\t\tfmt.Printf(\" todoFilename = %s, todoFileBin = %s\\n\", todoFilename, todoFileBin)\n\t}\n\n\tvar fullFilenameJson, fullFilenameBin string\n\tif ok {\n\t\tfullFilenameJson, fullFilenameBin = todoFilename, todoFileBin\n\t} else {\n\t\tfullFilenameJson = filepath.Join(homeDir, todoFilename)\n\t\tfullFilenameBin = filepath.Join(homeDir, todoFileBin)\n\t}\n\tif *verboseFlag {\n\t\tfmt.Printf(\" fullFilenameJson = %s, fullFilenameBin = %s\\n\", fullFilenameJson, fullFilenameBin)\n\t}\n\n\t_, err = os.Stat(fullFilenameJson)\n\tif err != nil {\n\t\t\/\/fmt.Fprintf(os.Stderr, \" %s got error from os.Stat of %v.\\n\", fullFilenameJson, err)\n\t\tfileExists = false\n\t} else {\n\t\tfileExists = true\n\t}\n\t_, err = os.Stat(fullFilenameBin)\n\tif err != nil {\n\t\t\/\/fmt.Fprintf(os.Stderr, \" %s got error from os.Stat of %v.\\n\", fullFilenameBin, err)\n\t}\n\n\tl := todo.ListType{}\n\terr = l.LoadJSON(fullFilenameJson) \/\/ if file doesn't exist, this doesn't return an error.\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \" Error returned while reading %s is %v\\n\", fullFilenameJson, err)\n\t\ter := l.LoadBinary(fullFilenameBin)\n\t\tif er != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \" Error returned while reading %s is %v\\n\", fullFilenameBin, er)\n\t\t\tfmt.Print(\" Should I exit? \")\n\t\t\tvar ans string\n\t\t\tfmt.Scanln(&ans)\n\t\t\tif strings.HasPrefix(strings.ToLower(ans), \"y\") {\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tfmt.Println()\n\t\t\tfileExists = false\n\t\t}\n\t}\n\n\tif *verboseFlag {\n\t\tfor i, t := range l {\n\t\t\tfmt.Printf(\" %d: %s, %t, %s, %s\\n\", i+1, t.Task, t.Done, t.CreatedAt.Format(time.RFC822), t.CompletedAt.Format(time.RFC822))\n\t\t}\n\t\tfmt.Println()\n\t}\n\tswitch {\n\tcase *listFlag:\n\t\t\/* Replaced by the stringer interface\n\t\tfor _, item := range l {\n\t\t\tif !item.Done {\n\t\t\t\tfmt.Printf(\" Not done: %s\\n\", item.Task)\n\t\t\t}\n\t\t}\n\t\tfmt.Println()\n\t\tfor _, item := range l {\n\t\t\tif item.Done {\n\t\t\t\tfmt.Printf(\" Done: %s was completed on %s\\n\", item.Task, item.CompletedAt.Format(\"Jan-02-2006 15:04:05\"))\n\t\t\t}\n\t\t}\n\t\t*\/\n\n\t\t\/\/ This should invoke the stringer interface from the fmt package. IE, call the String method I defined in todo. But it's not working.\n\t\t\/\/ I kept playing w\/ it and I read the docs at golang.org. I concluded that the stringer interface required a value receiver. I had\n\t\t\/\/ followed the book that defined it as a pointer receiver. So I defined it in todo.go as a value receiver, and it started to work.\n\t\tif fileExists {\n\t\t\tfmt.Println(l)\n\t\t} else {\n\t\t\tfmt.Fprintf(os.Stderr, \" Cannot list todo files (%s or %s) as they cannot be found.\\n\", fullFilenameJson, fullFilenameBin)\n\t\t}\n\n\t\t\/\/fmt.Printf(\"%s\", l) \/\/ this does not work.\n\t\t\/\/fmt.Print(l.String()) \/\/ this works. But I figured out why it didn't work at first like the book said it should. See the above comment.\n\tcase *complete > 0:\n\t\tif !fileExists {\n\t\t\tfmt.Fprintf(os.Stderr, \" Cannot complete todo entries because files (%s or %s) cannot be found.\\n\", fullFilenameJson, fullFilenameBin)\n\t\t\tos.Exit(1)\n\t\t}\n\t\terr = l.Complete(*complete)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \" Item number %d cannot be completed because %v\\n\", *complete, err)\n\t\t}\n\n\t\terr = l.SaveJSON(fullFilenameJson)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \" List could not be saved in json because %v\\n\", err)\n\t\t}\n\t\terr = l.SaveBinary(fullFilenameBin)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \" List could not be saved in binary format because %v\\n\", err)\n\t\t}\n\tcase *add:\n\t\ttask, err := getTask(os.Stdin, flag.Args()...)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tl.Add(task)\n\t\terr = l.SaveJSON(fullFilenameJson)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \" List could not be saved in JSON because %v \\n\", err)\n\t\t}\n\t\terr = l.SaveBinary(fullFilenameBin)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \" List could not be saved in binary format because %v \\n\", err)\n\t\t}\n\tdefault: \/\/ task add\n\t\tif flag.NArg() > 0 {\n\t\t\ttsk := strings.Join(flag.Args(), \" \")\n\t\t\tl.Add(tsk)\n\t\t\terr = l.SaveJSON(fullFilenameJson)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \" List could not be saved in JSON because %v \\n\", err)\n\t\t\t}\n\t\t\terr = l.SaveBinary(fullFilenameBin)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \" List could not be saved in binary format because %v \\n\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tif fileExists {\n\t\t\t\tfmt.Println(l)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(os.Stderr, \" Cannot list todo files (%s or %s) as they cannot be found.\\n\", fullFilenameJson, fullFilenameBin)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc getTask(r io.Reader, args ...string) (string, error) { \/\/ decides where to get task string, from args or stdin.\n\tif len(args) > 0 {\n\t\treturn strings.Join(args, \" \"), nil\n\t}\n\n\tscnr := bufio.NewScanner(r)\n\tscnr.Scan()\n\tif err := scnr.Err(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(scnr.Text()) == 0 {\n\t\treturn \"\", fmt.Errorf(\"Task to add cannot be blank.\")\n\t}\n\n\treturn scnr.Text(), nil\n}\n<commit_msg>02\/09\/2022 07:31:05 AM todo\/cmd\/todo\/main.go<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"src\/todo\"\n\t\"strings\"\n\t\"time\"\n)\n\n\/*\nREVISION HISTORY\n-------- -------\n11 Jan 22 -- Started copying out of \"Powerful Command-Line Applications in Go\" by Ricardo Gerardi\n15 Jan 22 -- Modifying the output for the -h flag using the book code. I don't need -v flag anymore.\n Then added the String method, but that had to be a value receiver to work as like in the book.\n Then added use of TODO_FILENAME environment variable.\n16 Jan 22 -- Added stdin as a source. And changed name of string task flag to a boolean add flag.\n17 Jan 22 -- Added default actions if no switch is provided. If there are arguments then add as a task, if not list tasks.\n 8 Feb 22 -- Will show a timestamp of adding a task, done by updating the stringer method in todo.go. I changed how the\n filename is constructed. I am considering adding another environment variable, called TODO_PREFIX to more easily cover the networking prefix.\n*\/\n\nconst lastModified = \"9 Feb 2022\"\n\nvar todoFilename = \"todo.json\" \/\/ now a var instead of a const so can use environment variable if set.\nvar todoFileBin = \"todo.gob\" \/\/ now a var instead of a const so can use environment variable if set.\nvar prefix string\nvar fileExists bool\n\nvar verboseFlag = flag.Bool(\"v\", false, \"Set verbose mode.\")\n\n\/\/ var task = flag.String(\"task\", \"\", \"Task to be added to the ToDo list.\")\nvar add = flag.Bool(\"add\", false, \"Add task to the ToDo list.\")\nvar complete = flag.Int(\"complete\", 0, \"Item to be completed.\") \/\/ here, 0 means NTD. That's why we have to start at 1 for item numbers.\nvar listFlag = flag.Bool(\"list\", false, \"List all tasks to the display.\")\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(flag.CommandLine.Output(), \" %s last modified %s. \\n\", os.Args[0], lastModified)\n\t\tfmt.Fprintf(flag.CommandLine.Output(), \"TODO_PREFIX and TODO_FILENAME are the environment variables used. Do not use an extension for TODO_FILENAME\")\n\t\tfmt.Fprintf(flag.CommandLine.Output(), \" Usage information:\\n\")\n\t\tflag.PrintDefaults()\n\t}\n\tflag.Parse()\n\tif *verboseFlag {\n\t\tfmt.Printf(\" todo last modified %s. It will display and manage a todo list.\\n\", lastModified)\n\t\tfmt.Printf(\" Default filename root is todo for todo.json and todo.gob. TODO_FILENAME environment variable is read, and should not have an extension.\\n\")\n\t}\n\n\thomeDir, err := os.UserHomeDir()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \" Error from os.UserHomeDir is %v.\\n\", err)\n\t}\n\n\tvar fullFilenameJson, fullFilenameBin string\n\tenvFN, ok := os.LookupEnv(\"TODO_FILENAME\")\n\tif ok {\n\t\ttodoFilename = envFN + \".json\"\n\t\ttodoFileBin = filepath.Base(envFN) + \".gob\"\n\t}\n\tif *verboseFlag {\n\t\tfmt.Printf(\" todoFilename = %s, todoFileBin = %s\\n\", todoFilename, todoFileBin)\n\t}\n\n\tprefix, ok = os.LookupEnv(\"TODO_PREFIX\")\n\tif ok {\n\t\tfullFilenameJson = filepath.Join(prefix, todoFilename)\n\t\tfullFilenameBin = filepath.Join(prefix, todoFileBin)\n\t} else {\n\t\tfullFilenameJson = filepath.Join(homeDir, todoFilename)\n\t\tfullFilenameBin = filepath.Join(homeDir, todoFileBin)\n\t}\n\n\tif *verboseFlag {\n\t\tfmt.Printf(\" fullFilenameJson = %s, fullFilenameBin = %s\\n\", fullFilenameJson, fullFilenameBin)\n\t}\n\n\t_, err = os.Stat(fullFilenameJson)\n\tif err != nil {\n\t\t\/\/fmt.Fprintf(os.Stderr, \" %s got error from os.Stat of %v.\\n\", fullFilenameJson, err)\n\t\tfileExists = false\n\t} else {\n\t\tfileExists = true\n\t}\n\t_, err = os.Stat(fullFilenameBin)\n\tif err != nil {\n\t\t\/\/fmt.Fprintf(os.Stderr, \" %s got error from os.Stat of %v.\\n\", fullFilenameBin, err)\n\t}\n\n\tl := todo.ListType{}\n\terr = l.LoadJSON(fullFilenameJson) \/\/ if file doesn't exist, this doesn't return an error.\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \" Error returned while reading %s is %v\\n\", fullFilenameJson, err)\n\t\ter := l.LoadBinary(fullFilenameBin)\n\t\tif er != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \" Error returned while reading %s is %v\\n\", fullFilenameBin, er)\n\t\t\tfmt.Print(\" Should I exit? \")\n\t\t\tvar ans string\n\t\t\tfmt.Scanln(&ans)\n\t\t\tif strings.HasPrefix(strings.ToLower(ans), \"y\") {\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tfmt.Println()\n\t\t\tfileExists = false\n\t\t}\n\t}\n\n\tif *verboseFlag {\n\t\tfor i, t := range l {\n\t\t\tfmt.Printf(\" %d: %s, %t, %s, %s\\n\", i+1, t.Task, t.Done, t.CreatedAt.Format(time.RFC822), t.CompletedAt.Format(time.RFC822))\n\t\t}\n\t\tfmt.Println()\n\t}\n\tswitch {\n\tcase *listFlag:\n\t\t\/* Replaced by the stringer interface\n\t\tfor _, item := range l {\n\t\t\tif !item.Done {\n\t\t\t\tfmt.Printf(\" Not done: %s\\n\", item.Task)\n\t\t\t}\n\t\t}\n\t\tfmt.Println()\n\t\tfor _, item := range l {\n\t\t\tif item.Done {\n\t\t\t\tfmt.Printf(\" Done: %s was completed on %s\\n\", item.Task, item.CompletedAt.Format(\"Jan-02-2006 15:04:05\"))\n\t\t\t}\n\t\t}\n\t\t*\/\n\n\t\t\/\/ This should invoke the stringer interface from the fmt package. IE, call the String method I defined in todo. But it's not working.\n\t\t\/\/ I kept playing w\/ it and I read the docs at golang.org. I concluded that the stringer interface required a value receiver. I had\n\t\t\/\/ followed the book that defined it as a pointer receiver. So I defined it in todo.go as a value receiver, and it started to work.\n\t\tif fileExists {\n\t\t\tfmt.Println(l)\n\t\t} else {\n\t\t\tfmt.Fprintf(os.Stderr, \" Cannot list todo files (%s or %s) as they cannot be found.\\n\", fullFilenameJson, fullFilenameBin)\n\t\t}\n\n\t\t\/\/fmt.Printf(\"%s\", l) \/\/ this does not work.\n\t\t\/\/fmt.Print(l.String()) \/\/ this works. But I figured out why it didn't work at first like the book said it should. See the above comment.\n\tcase *complete > 0:\n\t\tif !fileExists {\n\t\t\tfmt.Fprintf(os.Stderr, \" Cannot complete todo entries because files (%s or %s) cannot be found.\\n\", fullFilenameJson, fullFilenameBin)\n\t\t\tos.Exit(1)\n\t\t}\n\t\terr = l.Complete(*complete)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \" Item number %d cannot be completed because %v\\n\", *complete, err)\n\t\t}\n\n\t\terr = l.SaveJSON(fullFilenameJson)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \" List could not be saved in json because %v\\n\", err)\n\t\t}\n\t\terr = l.SaveBinary(fullFilenameBin)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \" List could not be saved in binary format because %v\\n\", err)\n\t\t}\n\tcase *add:\n\t\ttask, err := getTask(os.Stdin, flag.Args()...)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tl.Add(task)\n\t\terr = l.SaveJSON(fullFilenameJson)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \" List could not be saved in JSON because %v \\n\", err)\n\t\t}\n\t\terr = l.SaveBinary(fullFilenameBin)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \" List could not be saved in binary format because %v \\n\", err)\n\t\t}\n\tdefault: \/\/ task add\n\t\tif flag.NArg() > 0 {\n\t\t\ttsk := strings.Join(flag.Args(), \" \")\n\t\t\tl.Add(tsk)\n\t\t\terr = l.SaveJSON(fullFilenameJson)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \" List could not be saved in JSON because %v \\n\", err)\n\t\t\t}\n\t\t\terr = l.SaveBinary(fullFilenameBin)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \" List could not be saved in binary format because %v \\n\", err)\n\t\t\t}\n\t\t} else {\n\t\t\tif fileExists {\n\t\t\t\tfmt.Println(l)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(os.Stderr, \" Cannot list todo files (%s or %s) as they cannot be found.\\n\", fullFilenameJson, fullFilenameBin)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc getTask(r io.Reader, args ...string) (string, error) { \/\/ decides where to get task string, from args or stdin.\n\tif len(args) > 0 {\n\t\treturn strings.Join(args, \" \"), nil\n\t}\n\n\tscnr := bufio.NewScanner(r)\n\tscnr.Scan()\n\tif err := scnr.Err(); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif len(scnr.Text()) == 0 {\n\t\treturn \"\", fmt.Errorf(\"Task to add cannot be blank.\")\n\t}\n\n\treturn scnr.Text(), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"cf\"\n\t\"cf\/configuration\"\n\t\"cf\/net\"\n\t\"fmt\"\n\t\"strings\"\n)\n\ntype ApplicationSummaries struct {\n\tApps []ApplicationFromSummary\n}\n\nfunc (resource ApplicationSummaries) ToModels() (apps []cf.ApplicationFields) {\n\tfor _, appSummary := range resource.Apps {\n\t\tapps = append(apps, appSummary.ToFields())\n\t}\n\treturn\n}\n\ntype ApplicationFromSummary struct {\n\tGuid string\n\tName string\n\tRoutes []RouteSummary\n\tRunningInstances int `json:\"running_instances\"`\n\tMemory uint64\n\tInstances int\n\tDiskQuota uint64 `json:\"disk_quota\"`\n\tUrls []string\n\tState string\n}\n\nfunc (resource ApplicationFromSummary) ToFields() (app cf.ApplicationFields) {\n\tapp = cf.ApplicationFields{}\n\tapp.Guid = resource.Guid\n\tapp.Name = resource.Name\n\tapp.State = strings.ToLower(resource.State)\n\tapp.InstanceCount = resource.Instances\n\tapp.DiskQuota = resource.DiskQuota\n\tapp.RunningInstances = resource.RunningInstances\n\tapp.Memory = resource.Memory\n\n\treturn\n}\n\nfunc (resource ApplicationFromSummary) ToModel() (app cf.AppSummary) {\n\tapp.ApplicationFields = resource.ToFields()\n\troutes := []cf.RouteSummary{}\n\tfor _, route := range resource.Routes {\n\t\troutes = append(routes, route.ToModel())\n\t}\n\tapp.RouteSummaries = routes\n\n\treturn\n}\n\ntype RouteSummary struct {\n\tGuid string\n\tHost string\n\tDomain DomainSummary\n}\n\nfunc (resource RouteSummary) ToModel() (route cf.RouteSummary) {\n\tdomain := cf.DomainFields{}\n\tdomain.Guid = resource.Domain.Guid\n\tdomain.Name = resource.Domain.Name\n\tdomain.Shared = resource.Domain.OwningOrganizationGuid != \"\"\n\n\troute.Guid = resource.Guid\n\troute.Host = resource.Host\n\troute.Domain = domain\n\treturn\n}\n\ntype DomainSummary struct {\n\tGuid string\n\tName string\n\tOwningOrganizationGuid string\n}\n\ntype AppSummaryRepository interface {\n\tGetSummariesInCurrentSpace() (apps []cf.AppSummary, apiResponse net.ApiResponse)\n\tGetSummary(appGuid string) (summary cf.AppSummary, apiResponse net.ApiResponse)\n}\n\ntype CloudControllerAppSummaryRepository struct {\n\tconfig *configuration.Configuration\n\tgateway net.Gateway\n}\n\nfunc NewCloudControllerAppSummaryRepository(config *configuration.Configuration, gateway net.Gateway) (repo CloudControllerAppSummaryRepository) {\n\trepo.config = config\n\trepo.gateway = gateway\n\treturn\n}\n\nfunc (repo CloudControllerAppSummaryRepository) GetSummariesInCurrentSpace() (apps []cf.AppSummary, apiResponse net.ApiResponse) {\n\tresources := new(ApplicationSummaries)\n\n\tpath := fmt.Sprintf(\"%s\/v2\/spaces\/%s\/summary\", repo.config.Target, repo.config.SpaceFields.Guid)\n\tapiResponse = repo.gateway.GetResource(path, repo.config.AccessToken, resources)\n\tif apiResponse.IsNotSuccessful() {\n\t\treturn\n\t}\n\n\tfor _, resource := range resources.Apps {\n\t\tvar app cf.AppSummary\n\t\tapp, apiResponse = repo.createSummary(&resource)\n\t\tif apiResponse.IsNotSuccessful() {\n\t\t\treturn\n\t\t}\n\t\tapps = append(apps, app)\n\t}\n\treturn\n}\n\nfunc (repo CloudControllerAppSummaryRepository) GetSummary(appGuid string) (summary cf.AppSummary, apiResponse net.ApiResponse) {\n\tpath := fmt.Sprintf(\"%s\/v2\/apps\/%s\/summary\", repo.config.Target, appGuid)\n\tsummaryResponse := new(ApplicationFromSummary)\n\tapiResponse = repo.gateway.GetResource(path, repo.config.AccessToken, summaryResponse)\n\tif apiResponse.IsNotSuccessful() {\n\t\treturn\n\t}\n\n\treturn repo.createSummary(summaryResponse)\n}\n\nfunc (repo CloudControllerAppSummaryRepository) createSummary(resource *ApplicationFromSummary) (summary cf.AppSummary, apiResponse net.ApiResponse) {\n\tsummary = resource.ToModel()\n\treturn\n}\n<commit_msg>cleaning up app summary, removing unused apiResponse [finishes #61009376]<commit_after>package api\n\nimport (\n\t\"cf\"\n\t\"cf\/configuration\"\n\t\"cf\/net\"\n\t\"fmt\"\n\t\"strings\"\n)\n\ntype ApplicationSummaries struct {\n\tApps []ApplicationFromSummary\n}\n\nfunc (resource ApplicationSummaries) ToModels() (apps []cf.ApplicationFields) {\n\tfor _, appSummary := range resource.Apps {\n\t\tapps = append(apps, appSummary.ToFields())\n\t}\n\treturn\n}\n\ntype ApplicationFromSummary struct {\n\tGuid string\n\tName string\n\tRoutes []RouteSummary\n\tRunningInstances int `json:\"running_instances\"`\n\tMemory uint64\n\tInstances int\n\tDiskQuota uint64 `json:\"disk_quota\"`\n\tUrls []string\n\tState string\n}\n\nfunc (resource ApplicationFromSummary) ToFields() (app cf.ApplicationFields) {\n\tapp = cf.ApplicationFields{}\n\tapp.Guid = resource.Guid\n\tapp.Name = resource.Name\n\tapp.State = strings.ToLower(resource.State)\n\tapp.InstanceCount = resource.Instances\n\tapp.DiskQuota = resource.DiskQuota\n\tapp.RunningInstances = resource.RunningInstances\n\tapp.Memory = resource.Memory\n\n\treturn\n}\n\nfunc (resource ApplicationFromSummary) ToModel() (app cf.AppSummary) {\n\tapp.ApplicationFields = resource.ToFields()\n\troutes := []cf.RouteSummary{}\n\tfor _, route := range resource.Routes {\n\t\troutes = append(routes, route.ToModel())\n\t}\n\tapp.RouteSummaries = routes\n\n\treturn\n}\n\ntype RouteSummary struct {\n\tGuid string\n\tHost string\n\tDomain DomainSummary\n}\n\nfunc (resource RouteSummary) ToModel() (route cf.RouteSummary) {\n\tdomain := cf.DomainFields{}\n\tdomain.Guid = resource.Domain.Guid\n\tdomain.Name = resource.Domain.Name\n\tdomain.Shared = resource.Domain.OwningOrganizationGuid != \"\"\n\n\troute.Guid = resource.Guid\n\troute.Host = resource.Host\n\troute.Domain = domain\n\treturn\n}\n\ntype DomainSummary struct {\n\tGuid string\n\tName string\n\tOwningOrganizationGuid string\n}\n\ntype AppSummaryRepository interface {\n\tGetSummariesInCurrentSpace() (apps []cf.AppSummary, apiResponse net.ApiResponse)\n\tGetSummary(appGuid string) (summary cf.AppSummary, apiResponse net.ApiResponse)\n}\n\ntype CloudControllerAppSummaryRepository struct {\n\tconfig *configuration.Configuration\n\tgateway net.Gateway\n}\n\nfunc NewCloudControllerAppSummaryRepository(config *configuration.Configuration, gateway net.Gateway) (repo CloudControllerAppSummaryRepository) {\n\trepo.config = config\n\trepo.gateway = gateway\n\treturn\n}\n\nfunc (repo CloudControllerAppSummaryRepository) GetSummariesInCurrentSpace() (apps []cf.AppSummary, apiResponse net.ApiResponse) {\n\tresources := new(ApplicationSummaries)\n\n\tpath := fmt.Sprintf(\"%s\/v2\/spaces\/%s\/summary\", repo.config.Target, repo.config.SpaceFields.Guid)\n\tapiResponse = repo.gateway.GetResource(path, repo.config.AccessToken, resources)\n\tif apiResponse.IsNotSuccessful() {\n\t\treturn\n\t}\n\n\tfor _, resource := range resources.Apps {\n\t\tapps = append(apps, resource.ToModel())\n\t}\n\treturn\n}\n\nfunc (repo CloudControllerAppSummaryRepository) GetSummary(appGuid string) (summary cf.AppSummary, apiResponse net.ApiResponse) {\n\tpath := fmt.Sprintf(\"%s\/v2\/apps\/%s\/summary\", repo.config.Target, appGuid)\n\tsummaryResponse := new(ApplicationFromSummary)\n\tapiResponse = repo.gateway.GetResource(path, repo.config.AccessToken, summaryResponse)\n\tif apiResponse.IsNotSuccessful() {\n\t\treturn\n\t}\n\n\tsummary = summaryResponse.ToModel()\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2013-2014 by Michael Dvorkin. All Rights Reserved.\n\/\/ Use of this source code is governed by a MIT-style license that can\n\/\/ be found in the LICENSE file.\n\npackage donna\n\nimport (\n\t`regexp`\n)\n\nfunc (p *Position) NewMove(from, to int) Move {\n\tpiece, capture := p.pieces[from], p.pieces[to]\n\n\tif p.enpassant != 0 && to == p.enpassant {\n\t\tcapture = pawn(piece.color() ^ 1)\n\t}\n\n\treturn Move(from | (to << 8) | (int(piece) << 16) | (int(capture) << 20))\n}\n\nfunc (p *Position) NewCastle(from, to int) Move {\n\treturn Move(from | (to << 8) | (int(p.pieces[from]) << 16) | isCastle)\n}\n\nfunc (p *Position) NewEnpassant(from, to int) Move {\n\treturn Move(from | (to << 8) | (int(p.pieces[from]) << 16) | isEnpassant)\n}\n\n\/\/ Returns true if *non-evasion* move is valid, i.e. it is possible to make\n\/\/ the move in current position without violating chess rules. If the king is\n\/\/ in check the generator is expected to generate valid evasions where extra\n\/\/ validation is not needed.\nfunc (p *Position) isValid(move Move, pins Bitmask) bool {\n\tcolor := move.color() \/\/ TODO: make color part of move split.\n\tfrom, to, piece, capture := move.split()\n\n\t\/\/ For rare en-passant pawn captures we validate the move by actually\n\t\/\/ making it, and then taking it back.\n\tif p.enpassant != 0 && to == p.enpassant && capture.isPawn() {\n\t\tif position := p.MakeMove(move); position != nil {\n\t\t\tposition.UndoLastMove()\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\t\/\/ King's move is valid when a) the move is a castle or b) the destination\n\t\/\/ square is not being attacked by the opponent.\n\tif piece.isKing() {\n\t\treturn (move & isCastle != 0) || !p.isAttacked(to, color^1)\n\t}\n\n\t\/\/ For all other peices the move is valid when it doesn't cause a\n\t\/\/ check. For pinned sliders this includes moves along the pinning\n\t\/\/ file, rank, or diagonal.\n\treturn pins == 0 || pins.isClear(from) || IsBetween(from, to, p.king[color])\n}\n\n\/\/ Returns a bitmask of all pinned pieces preventing a check for the king on\n\/\/ given square. The color of the pieces match the color of the king.\nfunc (p *Position) pinnedMask(square int) (mask Bitmask) {\n\tcolor := p.pieces[square].color()\n\tenemy := color ^ 1\n\tattackers := (p.outposts[bishop(enemy)] | p.outposts[queen(enemy)]) & bishopMagicMoves[square][0]\n\tattackers |= (p.outposts[rook(enemy)] | p.outposts[queen(enemy)]) & rookMagicMoves[square][0]\n\n\tfor attackers != 0 {\n\t\tattackSquare := attackers.pop()\n\t\tblockers := maskBlock[square][attackSquare] & ^bit[attackSquare] & p.board\n\n\t\tif blockers.count() == 1 {\n\t\t\tmask |= blockers & p.outposts[color] \/\/ Only friendly pieces are pinned.\n\t\t}\n\t}\n\treturn\n}\n\nfunc (p *Position) pawnMove(square, target int) Move {\n\tif RelRow(square, p.color) == 1 && RelRow(target, p.color) == 3 {\n\t\tif p.causesEnpassant(target) {\n\t\t\treturn p.NewEnpassant(square, target)\n\t\t} else {\n\t\t\treturn p.NewMove(square, target)\n\t\t}\n\t}\n\n\treturn p.NewMove(square, target)\n}\n\nfunc (p *Position) pawnPromotion(square, target int) (Move, Move, Move, Move) {\n\treturn p.NewMove(square, target).promote(Queen),\n\t\tp.NewMove(square, target).promote(Rook),\n\t\tp.NewMove(square, target).promote(Bishop),\n\t\tp.NewMove(square, target).promote(Knight)\n}\n\nfunc (p *Position) causesEnpassant(target int) bool {\n\tpawns := p.outposts[pawn(p.color^1)] \/\/ Opposite color pawns.\n\tswitch col := Col(target); col {\n\tcase 0:\n\t\treturn pawns.isSet(target + 1)\n\tcase 7:\n\t\treturn pawns.isSet(target - 1)\n\tdefault:\n\t\treturn pawns.isSet(target+1) || pawns.isSet(target-1)\n\t}\n\treturn false\n}\n\nfunc (p *Position) NewMoveFromString(e2e4 string) (move Move) {\n\tre := regexp.MustCompile(`([KkQqRrBbNn]?)([a-h])([1-8])-?([a-h])([1-8])([QqRrBbNn]?)`)\n\tarr := re.FindStringSubmatch(e2e4)\n\n\tif len(arr) > 0 {\n\t\tname := arr[1]\n\t\tfrom := Square(int(arr[3][0]-'1'), int(arr[2][0]-'a'))\n\t\tto := Square(int(arr[5][0]-'1'), int(arr[4][0]-'a'))\n\t\tpromo := arr[6]\n\n\t\tvar piece Piece\n\t\tswitch name {\n\t\tcase `K`, `k`:\n\t\t\tpiece = king(p.color)\n\t\tcase `Q`, `q`:\n\t\t\tpiece = queen(p.color)\n\t\tcase `R`, `r`:\n\t\t\tpiece = rook(p.color)\n\t\tcase `B`, `b`:\n\t\t\tpiece = bishop(p.color)\n\t\tcase `N`, `n`:\n\t\t\tpiece = knight(p.color)\n\t\tdefault:\n\t\t\tpiece = p.pieces[from] \/\/ <-- Makes piece character optional.\n\t\t}\n\t\tif (p.pieces[from] != piece) || (p.targets(from)&bit[to] == 0) {\n\t\t\tmove = 0 \/\/ Invalid move.\n\t\t} else {\n\t\t\tmove = p.NewMove(from, to)\n\t\t\tif len(promo) > 0 {\n\t\t\t\tswitch promo {\n\t\t\t\tcase `Q`, `q`:\n\t\t\t\t\tmove = move.promote(Queen)\n\t\t\t\tcase `R`, `r`:\n\t\t\t\t\tmove = move.promote(Rook)\n\t\t\t\tcase `B`, `b`:\n\t\t\t\t\tmove = move.promote(Bishop)\n\t\t\t\tcase `N`, `n`:\n\t\t\t\t\tmove = move.promote(Knight)\n\t\t\t\tdefault:\n\t\t\t\t\tmove = 0\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else if e2e4 == `0-0` || e2e4 == `0-0-0` {\n\t\tfrom := p.king[p.color]\n\t\tto := G1\n\t\tif e2e4 == `0-0-0` {\n\t\t\tto = C1\n\t\t}\n\t\tif p.color == Black {\n\t\t\tto += 56\n\t\t}\n\t\tmove = p.NewCastle(from, to)\n\t\tif !move.isCastle() {\n\t\t\tmove = 0\n\t\t}\n\t}\n\treturn\n}\n<commit_msg>Faster pawn move and en-passant detection<commit_after>\/\/ Copyright (c) 2013-2014 by Michael Dvorkin. All Rights Reserved.\n\/\/ Use of this source code is governed by a MIT-style license that can\n\/\/ be found in the LICENSE file.\n\npackage donna\n\nimport (\n\t`regexp`\n)\n\nfunc (p *Position) NewMove(from, to int) Move {\n\tpiece, capture := p.pieces[from], p.pieces[to]\n\n\tif p.enpassant != 0 && to == p.enpassant {\n\t\tcapture = pawn(piece.color() ^ 1)\n\t}\n\n\treturn Move(from | (to << 8) | (int(piece) << 16) | (int(capture) << 20))\n}\n\nfunc (p *Position) NewCastle(from, to int) Move {\n\treturn Move(from | (to << 8) | (int(p.pieces[from]) << 16) | isCastle)\n}\n\nfunc (p *Position) NewEnpassant(from, to int) Move {\n\treturn Move(from | (to << 8) | (int(p.pieces[from]) << 16) | isEnpassant)\n}\n\n\/\/ Returns true if *non-evasion* move is valid, i.e. it is possible to make\n\/\/ the move in current position without violating chess rules. If the king is\n\/\/ in check the generator is expected to generate valid evasions where extra\n\/\/ validation is not needed.\nfunc (p *Position) isValid(move Move, pins Bitmask) bool {\n\tcolor := move.color() \/\/ TODO: make color part of move split.\n\tfrom, to, piece, capture := move.split()\n\n\t\/\/ For rare en-passant pawn captures we validate the move by actually\n\t\/\/ making it, and then taking it back.\n\tif p.enpassant != 0 && to == p.enpassant && capture.isPawn() {\n\t\tif position := p.MakeMove(move); position != nil {\n\t\t\tposition.UndoLastMove()\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\t\/\/ King's move is valid when a) the move is a castle or b) the destination\n\t\/\/ square is not being attacked by the opponent.\n\tif piece.isKing() {\n\t\treturn (move & isCastle != 0) || !p.isAttacked(to, color^1)\n\t}\n\n\t\/\/ For all other peices the move is valid when it doesn't cause a\n\t\/\/ check. For pinned sliders this includes moves along the pinning\n\t\/\/ file, rank, or diagonal.\n\treturn pins == 0 || pins.isClear(from) || IsBetween(from, to, p.king[color])\n}\n\n\/\/ Returns a bitmask of all pinned pieces preventing a check for the king on\n\/\/ given square. The color of the pieces match the color of the king.\nfunc (p *Position) pinnedMask(square int) (mask Bitmask) {\n\tcolor := p.pieces[square].color()\n\tenemy := color ^ 1\n\tattackers := (p.outposts[bishop(enemy)] | p.outposts[queen(enemy)]) & bishopMagicMoves[square][0]\n\tattackers |= (p.outposts[rook(enemy)] | p.outposts[queen(enemy)]) & rookMagicMoves[square][0]\n\n\tfor attackers != 0 {\n\t\tattackSquare := attackers.pop()\n\t\tblockers := maskBlock[square][attackSquare] & ^bit[attackSquare] & p.board\n\n\t\tif blockers.count() == 1 {\n\t\t\tmask |= blockers & p.outposts[color] \/\/ Only friendly pieces are pinned.\n\t\t}\n\t}\n\treturn\n}\n\nfunc (p *Position) pawnMove(square, target int) Move {\n\tif Abs(square - target) == 16 && p.causesEnpassant(target) {\n\t\treturn p.NewEnpassant(square, target)\n\t}\n\n\treturn p.NewMove(square, target)\n}\n\nfunc (p *Position) pawnPromotion(square, target int) (Move, Move, Move, Move) {\n\treturn p.NewMove(square, target).promote(Queen),\n\t\tp.NewMove(square, target).promote(Rook),\n\t\tp.NewMove(square, target).promote(Bishop),\n\t\tp.NewMove(square, target).promote(Knight)\n}\n\n\/\/ Returns true if a pawn jump causes en-passant. This is done by checking whether\n\/\/ the enemy pawns occupy squares ajacent to the target square.\nfunc (p *Position) causesEnpassant(target int) bool {\n\tpawns := p.outposts[pawn(p.color^1)] \/\/ Opposite color pawns.\n\n\treturn maskIsolated[Col(target)] & maskRank[Row(target)] & pawns != 0\n}\n\nfunc (p *Position) NewMoveFromString(e2e4 string) (move Move) {\n\tre := regexp.MustCompile(`([KkQqRrBbNn]?)([a-h])([1-8])-?([a-h])([1-8])([QqRrBbNn]?)`)\n\tarr := re.FindStringSubmatch(e2e4)\n\n\tif len(arr) > 0 {\n\t\tname := arr[1]\n\t\tfrom := Square(int(arr[3][0]-'1'), int(arr[2][0]-'a'))\n\t\tto := Square(int(arr[5][0]-'1'), int(arr[4][0]-'a'))\n\t\tpromo := arr[6]\n\n\t\tvar piece Piece\n\t\tswitch name {\n\t\tcase `K`, `k`:\n\t\t\tpiece = king(p.color)\n\t\tcase `Q`, `q`:\n\t\t\tpiece = queen(p.color)\n\t\tcase `R`, `r`:\n\t\t\tpiece = rook(p.color)\n\t\tcase `B`, `b`:\n\t\t\tpiece = bishop(p.color)\n\t\tcase `N`, `n`:\n\t\t\tpiece = knight(p.color)\n\t\tdefault:\n\t\t\tpiece = p.pieces[from] \/\/ <-- Makes piece character optional.\n\t\t}\n\t\tif (p.pieces[from] != piece) || (p.targets(from)&bit[to] == 0) {\n\t\t\tmove = 0 \/\/ Invalid move.\n\t\t} else {\n\t\t\tmove = p.NewMove(from, to)\n\t\t\tif len(promo) > 0 {\n\t\t\t\tswitch promo {\n\t\t\t\tcase `Q`, `q`:\n\t\t\t\t\tmove = move.promote(Queen)\n\t\t\t\tcase `R`, `r`:\n\t\t\t\t\tmove = move.promote(Rook)\n\t\t\t\tcase `B`, `b`:\n\t\t\t\t\tmove = move.promote(Bishop)\n\t\t\t\tcase `N`, `n`:\n\t\t\t\t\tmove = move.promote(Knight)\n\t\t\t\tdefault:\n\t\t\t\t\tmove = 0\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else if e2e4 == `0-0` || e2e4 == `0-0-0` {\n\t\tfrom := p.king[p.color]\n\t\tto := G1\n\t\tif e2e4 == `0-0-0` {\n\t\t\tto = C1\n\t\t}\n\t\tif p.color == Black {\n\t\t\tto += 56\n\t\t}\n\t\tmove = p.NewCastle(from, to)\n\t\tif !move.isCastle() {\n\t\t\tmove = 0\n\t\t}\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Parsing of Mach-O executables (OS X).\n\npackage objfile\n\nimport (\n\t\"debug\/macho\"\n\t\"fmt\"\n\t\"os\"\n\t\"sort\"\n)\n\ntype machoFile struct {\n\tmacho *macho.File\n}\n\nfunc openMacho(r *os.File) (rawFile, error) {\n\tf, err := macho.NewFile(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &machoFile{f}, nil\n}\n\nfunc (f *machoFile) symbols() ([]Sym, error) {\n\tif f.macho.Symtab == nil {\n\t\treturn nil, fmt.Errorf(\"missing symbol table\")\n\t}\n\n\t\/\/ Build sorted list of addresses of all symbols.\n\t\/\/ We infer the size of a symbol by looking at where the next symbol begins.\n\tvar addrs []uint64\n\tfor _, s := range f.macho.Symtab.Syms {\n\t\taddrs = append(addrs, s.Value)\n\t}\n\tsort.Sort(uint64s(addrs))\n\n\tvar syms []Sym\n\tfor _, s := range f.macho.Symtab.Syms {\n\t\tsym := Sym{Name: s.Name, Addr: s.Value, Code: '?'}\n\t\ti := sort.Search(len(addrs), func(x int) bool { return addrs[x] > s.Value })\n\t\tif i < len(addrs) {\n\t\t\tsym.Size = int64(addrs[i] - s.Value)\n\t\t}\n\t\tif s.Sect == 0 {\n\t\t\tsym.Code = 'U'\n\t\t} else if int(s.Sect) <= len(f.macho.Sections) {\n\t\t\tsect := f.macho.Sections[s.Sect-1]\n\t\t\tswitch sect.Seg {\n\t\t\tcase \"__TEXT\":\n\t\t\t\tsym.Code = 'R'\n\t\t\tcase \"__DATA\":\n\t\t\t\tsym.Code = 'D'\n\t\t\t}\n\t\t\tswitch sect.Seg + \" \" + sect.Name {\n\t\t\tcase \"__TEXT __text\":\n\t\t\t\tsym.Code = 'T'\n\t\t\tcase \"__DATA __bss\", \"__DATA __noptrbss\":\n\t\t\t\tsym.Code = 'B'\n\t\t\t}\n\t\t}\n\t\tsyms = append(syms, sym)\n\t}\n\n\treturn syms, nil\n}\n\nfunc (f *machoFile) pcln() (textStart uint64, symtab, pclntab []byte, err error) {\n\tif sect := f.macho.Section(\"__text\"); sect != nil {\n\t\ttextStart = sect.Addr\n\t}\n\tif sect := f.macho.Section(\"__gosymtab\"); sect != nil {\n\t\tif symtab, err = sect.Data(); err != nil {\n\t\t\treturn 0, nil, nil, err\n\t\t}\n\t}\n\tif sect := f.macho.Section(\"__gopclntab\"); sect != nil {\n\t\tif pclntab, err = sect.Data(); err != nil {\n\t\t\treturn 0, nil, nil, err\n\t\t}\n\t}\n\treturn textStart, symtab, pclntab, nil\n}\n\nfunc (f *machoFile) text() (textStart uint64, text []byte, err error) {\n\tsect := f.macho.Section(\"__text\")\n\tif sect == nil {\n\t\treturn 0, nil, fmt.Errorf(\"text section not found\")\n\t}\n\ttextStart = sect.Addr\n\ttext, err = sect.Data()\n\treturn\n}\n\nfunc (f *machoFile) goarch() string {\n\tswitch f.macho.Cpu {\n\tcase macho.Cpu386:\n\t\treturn \"386\"\n\tcase macho.CpuAmd64:\n\t\treturn \"amd64\"\n\tcase macho.CpuArm:\n\t\treturn \"arm\"\n\tcase macho.CpuPpc64:\n\t\treturn \"ppc64\"\n\t}\n\treturn \"\"\n}\n\ntype uint64s []uint64\n\nfunc (x uint64s) Len() int { return len(x) }\nfunc (x uint64s) Swap(i, j int) { x[i], x[j] = x[j], x[i] }\nfunc (x uint64s) Less(i, j int) bool { return x[i] < x[j] }\n<commit_msg>cmd\/internal\/objfile: Skip mach-o debug symbols.<commit_after>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ Parsing of Mach-O executables (OS X).\n\npackage objfile\n\nimport (\n\t\"debug\/macho\"\n\t\"fmt\"\n\t\"os\"\n\t\"sort\"\n)\n\nconst stabTypeMask = 0xe0\n\ntype machoFile struct {\n\tmacho *macho.File\n}\n\nfunc openMacho(r *os.File) (rawFile, error) {\n\tf, err := macho.NewFile(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &machoFile{f}, nil\n}\n\nfunc (f *machoFile) symbols() ([]Sym, error) {\n\tif f.macho.Symtab == nil {\n\t\treturn nil, fmt.Errorf(\"missing symbol table\")\n\t}\n\n\t\/\/ Build sorted list of addresses of all symbols.\n\t\/\/ We infer the size of a symbol by looking at where the next symbol begins.\n\tvar addrs []uint64\n\tfor _, s := range f.macho.Symtab.Syms {\n\t\t\/\/ Skip stab debug info.\n\t\tif s.Type&stabTypeMask == 0 {\n\t\t\taddrs = append(addrs, s.Value)\n\t\t}\n\t}\n\tsort.Sort(uint64s(addrs))\n\n\tvar syms []Sym\n\tfor _, s := range f.macho.Symtab.Syms {\n\t\tif s.Type&stabTypeMask != 0 {\n\t\t\t\/\/ Skip stab debug info.\n\t\t\tcontinue\n\t\t}\n\t\tsym := Sym{Name: s.Name, Addr: s.Value, Code: '?'}\n\t\ti := sort.Search(len(addrs), func(x int) bool { return addrs[x] > s.Value })\n\t\tif i < len(addrs) {\n\t\t\tsym.Size = int64(addrs[i] - s.Value)\n\t\t}\n\t\tif s.Sect == 0 {\n\t\t\tsym.Code = 'U'\n\t\t} else if int(s.Sect) <= len(f.macho.Sections) {\n\t\t\tsect := f.macho.Sections[s.Sect-1]\n\t\t\tswitch sect.Seg {\n\t\t\tcase \"__TEXT\":\n\t\t\t\tsym.Code = 'R'\n\t\t\tcase \"__DATA\":\n\t\t\t\tsym.Code = 'D'\n\t\t\t}\n\t\t\tswitch sect.Seg + \" \" + sect.Name {\n\t\t\tcase \"__TEXT __text\":\n\t\t\t\tsym.Code = 'T'\n\t\t\tcase \"__DATA __bss\", \"__DATA __noptrbss\":\n\t\t\t\tsym.Code = 'B'\n\t\t\t}\n\t\t}\n\t\tsyms = append(syms, sym)\n\t}\n\n\treturn syms, nil\n}\n\nfunc (f *machoFile) pcln() (textStart uint64, symtab, pclntab []byte, err error) {\n\tif sect := f.macho.Section(\"__text\"); sect != nil {\n\t\ttextStart = sect.Addr\n\t}\n\tif sect := f.macho.Section(\"__gosymtab\"); sect != nil {\n\t\tif symtab, err = sect.Data(); err != nil {\n\t\t\treturn 0, nil, nil, err\n\t\t}\n\t}\n\tif sect := f.macho.Section(\"__gopclntab\"); sect != nil {\n\t\tif pclntab, err = sect.Data(); err != nil {\n\t\t\treturn 0, nil, nil, err\n\t\t}\n\t}\n\treturn textStart, symtab, pclntab, nil\n}\n\nfunc (f *machoFile) text() (textStart uint64, text []byte, err error) {\n\tsect := f.macho.Section(\"__text\")\n\tif sect == nil {\n\t\treturn 0, nil, fmt.Errorf(\"text section not found\")\n\t}\n\ttextStart = sect.Addr\n\ttext, err = sect.Data()\n\treturn\n}\n\nfunc (f *machoFile) goarch() string {\n\tswitch f.macho.Cpu {\n\tcase macho.Cpu386:\n\t\treturn \"386\"\n\tcase macho.CpuAmd64:\n\t\treturn \"amd64\"\n\tcase macho.CpuArm:\n\t\treturn \"arm\"\n\tcase macho.CpuPpc64:\n\t\treturn \"ppc64\"\n\t}\n\treturn \"\"\n}\n\ntype uint64s []uint64\n\nfunc (x uint64s) Len() int { return len(x) }\nfunc (x uint64s) Swap(i, j int) { x[i], x[j] = x[j], x[i] }\nfunc (x uint64s) Less(i, j int) bool { return x[i] < x[j] }\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2018 Cisco and\/or its affiliates.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nconst adapterTemplate = `\/\/ Code generated by adapter-generator. DO NOT EDIT.\n\npackage adapter\n\nimport (\n\t\"fmt\"\n\t\"github.com\/gogo\/protobuf\/proto\"\n\t\"github.com\/ligato\/cn-infra\/datasync\"\n\t. \"github.com\/ligato\/cn-infra\/kvscheduler\/api\"\n\n {{- range $i, $path := .Imports }}\n\t\"{{ $path }}\"\n\t{{- end }}\n)\n\n\/\/\/\/\/\/\/\/\/\/ type-safe key-value pair with metadata \/\/\/\/\/\/\/\/\/\/\n\ntype {{ .DescriptorName }}KVWithMetadata struct {\n\tKey string\n\tValue {{ .ValueT }}\n\tMetadata {{ .MetadataT }}\n\tOrigin ValueOrigin\n}\n\n\/\/\/\/\/\/\/\/\/\/ type-safe Descriptor structure \/\/\/\/\/\/\/\/\/\/\n\ntype {{ .DescriptorName }}Descriptor struct {\n\tName string\n\tKeySelector KeySelector\n\tValueTypeName string\n\tKeyLabel func(key string) string\n\tValueComparator func(key string, v1, v2 {{ .ValueT }}) bool\n\tNBKeyPrefix string\n\tWithMetadata bool\n\tMetadataMapFactory MetadataMapFactory\n\tAdd func(key string, value {{ .ValueT }}) (metadata {{ .MetadataT }}, err error)\n\tDelete func(key string, value {{ .ValueT }}, metadata {{ .MetadataT }}) error\n\tModify func(key string, oldValue, newValue {{ .ValueT }}, oldMetadata {{ .MetadataT }}) (newMetadata {{ .MetadataT }}, err error)\n\tModifyWithRecreate func(key string, oldValue, newValue {{ .ValueT }}, metadata {{ .MetadataT }}) bool\n\tUpdate func(key string, value {{ .ValueT }}, metadata {{ .MetadataT }}) error\n\tIsRetriableFailure func(err error) bool\n\tDependencies func(key string, value {{ .ValueT }}) []Dependency\n\tDerivedValues func(key string, value {{ .ValueT }}) []KeyValuePair\n\tDump func(correlate []{{ .DescriptorName }}KVWithMetadata) ([]{{ .DescriptorName }}KVWithMetadata, error)\n\tDumpDependencies []string \/* descriptor name *\/\n}\n\n\/\/\/\/\/\/\/\/\/\/ Descriptor adapter \/\/\/\/\/\/\/\/\/\/\n\ntype {{ .DescriptorName }}DescriptorAdapter struct {\n\tdescriptor *{{ .DescriptorName }}Descriptor\n}\n\nfunc New{{ .DescriptorName }}Descriptor(typedDescriptor *{{ .DescriptorName }}Descriptor) *KVDescriptor {\n\tadapter := &{{ .DescriptorName }}DescriptorAdapter{descriptor: typedDescriptor}\n\tdescriptor := &KVDescriptor{\n\t\tName: typedDescriptor.Name,\n KeySelector: typedDescriptor.KeySelector,\n ValueTypeName: typedDescriptor.ValueTypeName,\n\t\tKeyLabel: typedDescriptor.KeyLabel,\n\t\tNBKeyPrefix: typedDescriptor.NBKeyPrefix,\n\t\tWithMetadata: typedDescriptor.WithMetadata,\n MetadataMapFactory: typedDescriptor.MetadataMapFactory,\n\t\tIsRetriableFailure: typedDescriptor.IsRetriableFailure,\n\t\tDumpDependencies: typedDescriptor.DumpDependencies,\n\t}\n\tif typedDescriptor.ValueComparator != nil {\n\t\tdescriptor.ValueComparator = adapter.ValueComparator\n\t}\n\tif typedDescriptor.Add != nil {\n\t\tdescriptor.Add = adapter.Add\n\t}\n\tif typedDescriptor.Delete != nil {\n\t\tdescriptor.Delete = adapter.Delete\n\t}\n\tif typedDescriptor.Modify != nil {\n\t\tdescriptor.Modify = adapter.Modify\n\t}\n\tif typedDescriptor.ModifyWithRecreate != nil {\n\t\tdescriptor.ModifyWithRecreate = adapter.ModifyWithRecreate\n\t}\n\tif typedDescriptor.Update != nil {\n\t\tdescriptor.Update = adapter.Update\n\t}\n\tif typedDescriptor.Dependencies != nil {\n\t\tdescriptor.Dependencies = adapter.Dependencies\n\t}\n\tif typedDescriptor.DerivedValues != nil {\n\t\tdescriptor.DerivedValues = adapter.DerivedValues\n\t}\n\tif typedDescriptor.Dump != nil {\n\t\tdescriptor.Dump = adapter.Dump\n\t}\n\treturn descriptor\n}\n\nfunc (da *{{ .DescriptorName }}DescriptorAdapter) ValueComparator(key string, v1, v2 proto.Message) bool {\n\ttypedV1, err1 := cast{{ .DescriptorName }}Value(key, v1)\n\ttypedV2, err1 := cast{{ .DescriptorName }}Value(key, v2)\n\tif err1 != nil || err2 != nil {\n\t\treturn false\n\t}\n\treturn da.descriptor.ValueComparator(key, typedV1, typedV2)\n}\n\nfunc (da *{{ .DescriptorName }}DescriptorAdapter) Add(key string, value proto.Message) (metadata Metadata, err error) {\n\ttypedValue, err := cast{{ .DescriptorName }}Value(key, value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn da.descriptor.Add(key, typedValue)\n}\n\nfunc (da *{{ .DescriptorName }}DescriptorAdapter) Modify(key string, oldValue, newValue proto.Message, oldMetadata Metadata) (newMetadata Metadata, err error) {\n\toldTypedValue, err := cast{{ .DescriptorName }}Value(key, oldValue)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnewTypedValue, err := cast{{ .DescriptorName }}Value(key, newValue)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttypedOldMetadata, err := cast{{ .DescriptorName }}Metadata(key, oldMetadata)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn da.descriptor.Modify(key, oldTypedValue, newTypedValue, typedOldMetadata)\n}\n\nfunc (da *{{ .DescriptorName }}DescriptorAdapter) Delete(key string, value proto.Message, metadata Metadata) error {\n\ttypedValue, err := cast{{ .DescriptorName }}Value(key, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttypedMetadata, err := cast{{ .DescriptorName }}Metadata(key, metadata)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn da.descriptor.Delete(key, typedValue, typedMetadata)\n}\n\nfunc (da *{{ .DescriptorName }}DescriptorAdapter) ModifyWithRecreate(key string, oldValue, newValue proto.Message, metadata Metadata) bool {\n\toldTypedValue, err := cast{{ .DescriptorName }}Value(key, oldValue)\n\tif err != nil {\n\t\treturn true\n\t}\n\tnewTypedValue, err := cast{{ .DescriptorName }}Value(key, newValue)\n\tif err != nil {\n\t\treturn true\n\t}\n\ttypedMetadata, err := cast{{ .DescriptorName }}Metadata(key, metadata)\n\tif err != nil {\n\t\treturn true\n\t}\n\treturn da.descriptor.ModifyWithRecreate(key, oldTypedValue, newTypedValue, typedMetadata)\n}\n\nfunc (da *{{ .DescriptorName }}DescriptorAdapter) Update(key string, value proto.Message, metadata Metadata) error {\n\ttypedValue, err := cast{{ .DescriptorName }}Value(key, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttypedMetadata, err := cast{{ .DescriptorName }}Metadata(key, metadata)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn da.descriptor.Update(key, typedValue, typedMetadata)\n}\n\nfunc (da *{{ .DescriptorName }}DescriptorAdapter) Dependencies(key string, value proto.Message) []Dependency {\n\ttypedValue, err := cast{{ .DescriptorName }}Value(key, value)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn da.descriptor.Dependencies(key, typedValue)\n}\n\nfunc (da *{{ .DescriptorName }}DescriptorAdapter) DerivedValues(key string, value proto.Message) []KeyValuePair {\n\ttypedValue, err := cast{{ .DescriptorName }}Value(key, value)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn da.descriptor.DerivedValues(key, typedValue)\n}\n\nfunc (da *{{ .DescriptorName }}DescriptorAdapter) Dump(correlate []KVWithMetadata) ([]KVWithMetadata, error) {\n\tvar correlateWithType []{{ .DescriptorName }}KVWithMetadata\n\tfor _, kvpair := range correlate {\n\t\ttypedValue, err := cast{{ .DescriptorName }}Value(kvpair.Key, kvpair.Value)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\ttypedMetadata, err := cast{{ .DescriptorName }}Metadata(kvpair.Key, kvpair.Metadata)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tcorrelateWithType = append(correlateWithType,\n\t\t\t{{ .DescriptorName }}KVWithMetadata{\n\t\t\t\tKey: kvpair.Key,\n\t\t\t\tValue: typedValue,\n\t\t\t\tMetadata: typedMetadata,\n\t\t\t\tOrigin: kvpair.Origin,\n\t\t\t})\n\t}\n\n\ttypedDump, err := da.descriptor.Dump(correlateWithType)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar dump []KVWithMetadata\n\tfor _, typedKVWithMetadata := range typedDump {\n\t\tkvWithMetadata := KVWithMetadata{\n\t\t\tKey: typedKVWithMetadata.Key,\n\t\t\tMetadata: typedKVWithMetadata.Metadata,\n\t\t\tOrigin: typedKVWithMetadata.Origin,\n\t\t\t}\n\t\tkvWithMetadata.Value = typedKVWithMetadata.Value\n\t\tdump = append(dump, kvWithMetadata)\n\t}\n\treturn dump, err\n}\n\n\/\/\/\/\/\/\/\/\/\/ Helper methods \/\/\/\/\/\/\/\/\/\/\n\nfunc cast{{ .DescriptorName }}Value(key string, value proto.Message) ({{ .ValueT }}, error) {\n\ttypedValue, ok := value.({{ .ValueT }})\n\tif !ok {\n\t\treturn nil, ErrInvalidValueType(key, value)\n\t}\n\treturn typedValue, nil\n}\n\nfunc cast{{ .DescriptorName }}Metadata(key string, metadata Metadata) ({{ .MetadataT }}, error) {\n\tif metadata == nil {\n\t\treturn nil, nil\n\t}\n\ttypedMetadata, ok := metadata.({{ .MetadataT }})\n\tif !ok {\n\t\treturn nil, ErrInvalidMetadataType(key)\n\t}\n\treturn typedMetadata, nil\n}\n`\n<commit_msg>One more descriptor adapter template fix.<commit_after>\/\/ Copyright (c) 2018 Cisco and\/or its affiliates.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at:\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nconst adapterTemplate = `\/\/ Code generated by adapter-generator. DO NOT EDIT.\n\npackage adapter\n\nimport (\n\t\"github.com\/gogo\/protobuf\/proto\"\n\t. \"github.com\/ligato\/cn-infra\/kvscheduler\/api\"\n\n {{- range $i, $path := .Imports }}\n\t\"{{ $path }}\"\n\t{{- end }}\n)\n\n\/\/\/\/\/\/\/\/\/\/ type-safe key-value pair with metadata \/\/\/\/\/\/\/\/\/\/\n\ntype {{ .DescriptorName }}KVWithMetadata struct {\n\tKey string\n\tValue {{ .ValueT }}\n\tMetadata {{ .MetadataT }}\n\tOrigin ValueOrigin\n}\n\n\/\/\/\/\/\/\/\/\/\/ type-safe Descriptor structure \/\/\/\/\/\/\/\/\/\/\n\ntype {{ .DescriptorName }}Descriptor struct {\n\tName string\n\tKeySelector KeySelector\n\tValueTypeName string\n\tKeyLabel func(key string) string\n\tValueComparator func(key string, v1, v2 {{ .ValueT }}) bool\n\tNBKeyPrefix string\n\tWithMetadata bool\n\tMetadataMapFactory MetadataMapFactory\n\tAdd func(key string, value {{ .ValueT }}) (metadata {{ .MetadataT }}, err error)\n\tDelete func(key string, value {{ .ValueT }}, metadata {{ .MetadataT }}) error\n\tModify func(key string, oldValue, newValue {{ .ValueT }}, oldMetadata {{ .MetadataT }}) (newMetadata {{ .MetadataT }}, err error)\n\tModifyWithRecreate func(key string, oldValue, newValue {{ .ValueT }}, metadata {{ .MetadataT }}) bool\n\tUpdate func(key string, value {{ .ValueT }}, metadata {{ .MetadataT }}) error\n\tIsRetriableFailure func(err error) bool\n\tDependencies func(key string, value {{ .ValueT }}) []Dependency\n\tDerivedValues func(key string, value {{ .ValueT }}) []KeyValuePair\n\tDump func(correlate []{{ .DescriptorName }}KVWithMetadata) ([]{{ .DescriptorName }}KVWithMetadata, error)\n\tDumpDependencies []string \/* descriptor name *\/\n}\n\n\/\/\/\/\/\/\/\/\/\/ Descriptor adapter \/\/\/\/\/\/\/\/\/\/\n\ntype {{ .DescriptorName }}DescriptorAdapter struct {\n\tdescriptor *{{ .DescriptorName }}Descriptor\n}\n\nfunc New{{ .DescriptorName }}Descriptor(typedDescriptor *{{ .DescriptorName }}Descriptor) *KVDescriptor {\n\tadapter := &{{ .DescriptorName }}DescriptorAdapter{descriptor: typedDescriptor}\n\tdescriptor := &KVDescriptor{\n\t\tName: typedDescriptor.Name,\n KeySelector: typedDescriptor.KeySelector,\n ValueTypeName: typedDescriptor.ValueTypeName,\n\t\tKeyLabel: typedDescriptor.KeyLabel,\n\t\tNBKeyPrefix: typedDescriptor.NBKeyPrefix,\n\t\tWithMetadata: typedDescriptor.WithMetadata,\n MetadataMapFactory: typedDescriptor.MetadataMapFactory,\n\t\tIsRetriableFailure: typedDescriptor.IsRetriableFailure,\n\t\tDumpDependencies: typedDescriptor.DumpDependencies,\n\t}\n\tif typedDescriptor.ValueComparator != nil {\n\t\tdescriptor.ValueComparator = adapter.ValueComparator\n\t}\n\tif typedDescriptor.Add != nil {\n\t\tdescriptor.Add = adapter.Add\n\t}\n\tif typedDescriptor.Delete != nil {\n\t\tdescriptor.Delete = adapter.Delete\n\t}\n\tif typedDescriptor.Modify != nil {\n\t\tdescriptor.Modify = adapter.Modify\n\t}\n\tif typedDescriptor.ModifyWithRecreate != nil {\n\t\tdescriptor.ModifyWithRecreate = adapter.ModifyWithRecreate\n\t}\n\tif typedDescriptor.Update != nil {\n\t\tdescriptor.Update = adapter.Update\n\t}\n\tif typedDescriptor.Dependencies != nil {\n\t\tdescriptor.Dependencies = adapter.Dependencies\n\t}\n\tif typedDescriptor.DerivedValues != nil {\n\t\tdescriptor.DerivedValues = adapter.DerivedValues\n\t}\n\tif typedDescriptor.Dump != nil {\n\t\tdescriptor.Dump = adapter.Dump\n\t}\n\treturn descriptor\n}\n\nfunc (da *{{ .DescriptorName }}DescriptorAdapter) ValueComparator(key string, v1, v2 proto.Message) bool {\n\ttypedV1, err1 := cast{{ .DescriptorName }}Value(key, v1)\n\ttypedV2, err2 := cast{{ .DescriptorName }}Value(key, v2)\n\tif err1 != nil || err2 != nil {\n\t\treturn false\n\t}\n\treturn da.descriptor.ValueComparator(key, typedV1, typedV2)\n}\n\nfunc (da *{{ .DescriptorName }}DescriptorAdapter) Add(key string, value proto.Message) (metadata Metadata, err error) {\n\ttypedValue, err := cast{{ .DescriptorName }}Value(key, value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn da.descriptor.Add(key, typedValue)\n}\n\nfunc (da *{{ .DescriptorName }}DescriptorAdapter) Modify(key string, oldValue, newValue proto.Message, oldMetadata Metadata) (newMetadata Metadata, err error) {\n\toldTypedValue, err := cast{{ .DescriptorName }}Value(key, oldValue)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnewTypedValue, err := cast{{ .DescriptorName }}Value(key, newValue)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttypedOldMetadata, err := cast{{ .DescriptorName }}Metadata(key, oldMetadata)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn da.descriptor.Modify(key, oldTypedValue, newTypedValue, typedOldMetadata)\n}\n\nfunc (da *{{ .DescriptorName }}DescriptorAdapter) Delete(key string, value proto.Message, metadata Metadata) error {\n\ttypedValue, err := cast{{ .DescriptorName }}Value(key, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttypedMetadata, err := cast{{ .DescriptorName }}Metadata(key, metadata)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn da.descriptor.Delete(key, typedValue, typedMetadata)\n}\n\nfunc (da *{{ .DescriptorName }}DescriptorAdapter) ModifyWithRecreate(key string, oldValue, newValue proto.Message, metadata Metadata) bool {\n\toldTypedValue, err := cast{{ .DescriptorName }}Value(key, oldValue)\n\tif err != nil {\n\t\treturn true\n\t}\n\tnewTypedValue, err := cast{{ .DescriptorName }}Value(key, newValue)\n\tif err != nil {\n\t\treturn true\n\t}\n\ttypedMetadata, err := cast{{ .DescriptorName }}Metadata(key, metadata)\n\tif err != nil {\n\t\treturn true\n\t}\n\treturn da.descriptor.ModifyWithRecreate(key, oldTypedValue, newTypedValue, typedMetadata)\n}\n\nfunc (da *{{ .DescriptorName }}DescriptorAdapter) Update(key string, value proto.Message, metadata Metadata) error {\n\ttypedValue, err := cast{{ .DescriptorName }}Value(key, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\ttypedMetadata, err := cast{{ .DescriptorName }}Metadata(key, metadata)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn da.descriptor.Update(key, typedValue, typedMetadata)\n}\n\nfunc (da *{{ .DescriptorName }}DescriptorAdapter) Dependencies(key string, value proto.Message) []Dependency {\n\ttypedValue, err := cast{{ .DescriptorName }}Value(key, value)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn da.descriptor.Dependencies(key, typedValue)\n}\n\nfunc (da *{{ .DescriptorName }}DescriptorAdapter) DerivedValues(key string, value proto.Message) []KeyValuePair {\n\ttypedValue, err := cast{{ .DescriptorName }}Value(key, value)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn da.descriptor.DerivedValues(key, typedValue)\n}\n\nfunc (da *{{ .DescriptorName }}DescriptorAdapter) Dump(correlate []KVWithMetadata) ([]KVWithMetadata, error) {\n\tvar correlateWithType []{{ .DescriptorName }}KVWithMetadata\n\tfor _, kvpair := range correlate {\n\t\ttypedValue, err := cast{{ .DescriptorName }}Value(kvpair.Key, kvpair.Value)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\ttypedMetadata, err := cast{{ .DescriptorName }}Metadata(kvpair.Key, kvpair.Metadata)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tcorrelateWithType = append(correlateWithType,\n\t\t\t{{ .DescriptorName }}KVWithMetadata{\n\t\t\t\tKey: kvpair.Key,\n\t\t\t\tValue: typedValue,\n\t\t\t\tMetadata: typedMetadata,\n\t\t\t\tOrigin: kvpair.Origin,\n\t\t\t})\n\t}\n\n\ttypedDump, err := da.descriptor.Dump(correlateWithType)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar dump []KVWithMetadata\n\tfor _, typedKVWithMetadata := range typedDump {\n\t\tkvWithMetadata := KVWithMetadata{\n\t\t\tKey: typedKVWithMetadata.Key,\n\t\t\tMetadata: typedKVWithMetadata.Metadata,\n\t\t\tOrigin: typedKVWithMetadata.Origin,\n\t\t\t}\n\t\tkvWithMetadata.Value = typedKVWithMetadata.Value\n\t\tdump = append(dump, kvWithMetadata)\n\t}\n\treturn dump, err\n}\n\n\/\/\/\/\/\/\/\/\/\/ Helper methods \/\/\/\/\/\/\/\/\/\/\n\nfunc cast{{ .DescriptorName }}Value(key string, value proto.Message) ({{ .ValueT }}, error) {\n\ttypedValue, ok := value.({{ .ValueT }})\n\tif !ok {\n\t\treturn nil, ErrInvalidValueType(key, value)\n\t}\n\treturn typedValue, nil\n}\n\nfunc cast{{ .DescriptorName }}Metadata(key string, metadata Metadata) ({{ .MetadataT }}, error) {\n\tif metadata == nil {\n\t\treturn nil, nil\n\t}\n\ttypedMetadata, ok := metadata.({{ .MetadataT }})\n\tif !ok {\n\t\treturn nil, ErrInvalidMetadataType(key)\n\t}\n\treturn typedMetadata, nil\n}\n`\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage model\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n)\n\nconst (\n\t\/\/ AlertNameLabel is the name of the label containing the an alert's name.\n\tAlertNameLabel = \"alertname\"\n\n\t\/\/ ExportedLabelPrefix is the prefix to prepend to the label names present in\n\t\/\/ exported metrics if a label of the same name is added by the server.\n\tExportedLabelPrefix = \"exported_\"\n\n\t\/\/ MetricNameLabel is the label name indicating the metric name of a\n\t\/\/ timeseries.\n\tMetricNameLabel = \"__name__\"\n\n\t\/\/ SchemeLabel is the name of the label that holds the scheme on which to\n\t\/\/ scrape a target.\n\tSchemeLabel = \"__scheme__\"\n\n\t\/\/ AddressLabel is the name of the label that holds the address of\n\t\/\/ a scrape target.\n\tAddressLabel = \"__address__\"\n\n\t\/\/ MetricsPathLabel is the name of the label that holds the path on which to\n\t\/\/ scrape a target.\n\tMetricsPathLabel = \"__metrics_path__\"\n\n\t\/\/ ReservedLabelPrefix is a prefix which is not legal in user-supplied\n\t\/\/ label names.\n\tReservedLabelPrefix = \"__\"\n\n\t\/\/ MetaLabelPrefix is a prefix for labels that provide meta information.\n\t\/\/ Labels with this prefix are used for intermediate label processing and\n\t\/\/ will not be attached to time series.\n\tMetaLabelPrefix = \"__meta_\"\n\n\t\/\/ TmpLabelPrefix is a prefix for temporary labels as part of relabelling.\n\t\/\/ Labels with this prefix are used for intermediate label processing and\n\t\/\/ will not be attached to time series. This is reserved for use in\n\t\/\/ Prometheus configuration files by users.\n\tTmpLabelPrefix = \"__tmp_\"\n\n\t\/\/ ParamLabelPrefix is a prefix for labels that provide URL parameters\n\t\/\/ used to scrape a target.\n\tParamLabelPrefix = \"__param_\"\n\n\t\/\/ JobLabel is the label name indicating the job from which a timeseries\n\t\/\/ was scraped.\n\tJobLabel = \"job\"\n\n\t\/\/ InstanceLabel is the label name used for the instance label.\n\tInstanceLabel = \"instance\"\n\n\t\/\/ BucketLabel is used for the label that defines the upper bound of a\n\t\/\/ bucket of a histogram (\"le\" -> \"less or equal\").\n\tBucketLabel = \"le\"\n\n\t\/\/ QuantileLabel is used for the label that defines the quantile in a\n\t\/\/ summary.\n\tQuantileLabel = \"quantile\"\n)\n\n\/\/ LabelNameRE is a regular expression matching valid label names. Note that the\n\/\/ IsValid method of LabelName performs the same check but faster than a match\n\/\/ with this regular expression.\nvar LabelNameRE = regexp.MustCompile(\"^[a-zA-Z_][a-zA-Z0-9_]*$\")\n\n\/\/ A LabelName is a key for a LabelSet or Metric. It has a value associated\n\/\/ therewith.\ntype LabelName string\n\n\/\/ IsValid is true iff the label name matches the pattern of LabelNameRE. This\n\/\/ method, however, does not use LabelNameRE for the check but a much faster\n\/\/ hardcoded implementation.\nfunc (ln LabelName) IsValid() bool {\n\tif len(ln) == 0 {\n\t\treturn false\n\t}\n\tfor i, b := range ln {\n\t\tif !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ UnmarshalYAML implements the yaml.Unmarshaler interface.\nfunc (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {\n\tvar s string\n\tif err := unmarshal(&s); err != nil {\n\t\treturn err\n\t}\n\tif !LabelName(s).IsValid() {\n\t\treturn fmt.Errorf(\"%q is not a valid label name\", s)\n\t}\n\t*ln = LabelName(s)\n\treturn nil\n}\n\n\/\/ UnmarshalJSON implements the json.Unmarshaler interface.\nfunc (ln *LabelName) UnmarshalJSON(b []byte) error {\n\tvar s string\n\tif err := json.Unmarshal(b, &s); err != nil {\n\t\treturn err\n\t}\n\tif !LabelName(s).IsValid() {\n\t\treturn fmt.Errorf(\"%q is not a valid label name\", s)\n\t}\n\t*ln = LabelName(s)\n\treturn nil\n}\n\n\/\/ LabelNames is a sortable LabelName slice. In implements sort.Interface.\ntype LabelNames []LabelName\n\nfunc (l LabelNames) Len() int {\n\treturn len(l)\n}\n\nfunc (l LabelNames) Less(i, j int) bool {\n\treturn l[i] < l[j]\n}\n\nfunc (l LabelNames) Swap(i, j int) {\n\tl[i], l[j] = l[j], l[i]\n}\n\nfunc (l LabelNames) String() string {\n\tlabelStrings := make([]string, 0, len(l))\n\tfor _, label := range l {\n\t\tlabelStrings = append(labelStrings, string(label))\n\t}\n\treturn strings.Join(labelStrings, \", \")\n}\n\n\/\/ A LabelValue is an associated value for a LabelName.\ntype LabelValue string\n\n\/\/ IsValid returns true iff the string is a valid UTF8.\nfunc (lv LabelValue) IsValid() bool {\n\treturn utf8.ValidString(string(lv))\n}\n\n\/\/ LabelValues is a sortable LabelValue slice. It implements sort.Interface.\ntype LabelValues []LabelValue\n\nfunc (l LabelValues) Len() int {\n\treturn len(l)\n}\n\nfunc (l LabelValues) Less(i, j int) bool {\n\treturn string(l[i]) < string(l[j])\n}\n\nfunc (l LabelValues) Swap(i, j int) {\n\tl[i], l[j] = l[j], l[i]\n}\n\n\/\/ LabelPair pairs a name with a value.\ntype LabelPair struct {\n\tName LabelName\n\tValue LabelValue\n}\n\n\/\/ LabelPairs is a sortable slice of LabelPair pointers. It implements\n\/\/ sort.Interface.\ntype LabelPairs []*LabelPair\n\nfunc (l LabelPairs) Len() int {\n\treturn len(l)\n}\n\nfunc (l LabelPairs) Less(i, j int) bool {\n\tswitch {\n\tcase l[i].Name > l[j].Name:\n\t\treturn false\n\tcase l[i].Name < l[j].Name:\n\t\treturn true\n\tcase l[i].Value > l[j].Value:\n\t\treturn false\n\tcase l[i].Value < l[j].Value:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc (l LabelPairs) Swap(i, j int) {\n\tl[i], l[j] = l[j], l[i]\n}\n<commit_msg>Add scrape interval meta label<commit_after>\/\/ Copyright 2013 The Prometheus Authors\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage model\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\t\"unicode\/utf8\"\n)\n\nconst (\n\t\/\/ AlertNameLabel is the name of the label containing the an alert's name.\n\tAlertNameLabel = \"alertname\"\n\n\t\/\/ ExportedLabelPrefix is the prefix to prepend to the label names present in\n\t\/\/ exported metrics if a label of the same name is added by the server.\n\tExportedLabelPrefix = \"exported_\"\n\n\t\/\/ MetricNameLabel is the label name indicating the metric name of a\n\t\/\/ timeseries.\n\tMetricNameLabel = \"__name__\"\n\n\t\/\/ SchemeLabel is the name of the label that holds the scheme on which to\n\t\/\/ scrape a target.\n\tSchemeLabel = \"__scheme__\"\n\n\t\/\/ AddressLabel is the name of the label that holds the address of\n\t\/\/ a scrape target.\n\tAddressLabel = \"__address__\"\n\n\t\/\/ MetricsPathLabel is the name of the label that holds the path on which to\n\t\/\/ scrape a target.\n\tMetricsPathLabel = \"__metrics_path__\"\n\n\t\/\/ ScrapeIntervalLabel is the name of the label that holds the scrape interval\n\t\/\/ used to scrape a target.\n\tScrapeIntervalLabel = \"__scrape_interval__\"\n\n\t\/\/ ScrapeTimeoutLabel is the name of the label that holds the scrape\n\t\/\/ timeout used to scrape a target.\n\tScrapeTimeoutLabel = \"__scrape_timeout__\"\n\n\t\/\/ ReservedLabelPrefix is a prefix which is not legal in user-supplied\n\t\/\/ label names.\n\tReservedLabelPrefix = \"__\"\n\n\t\/\/ MetaLabelPrefix is a prefix for labels that provide meta information.\n\t\/\/ Labels with this prefix are used for intermediate label processing and\n\t\/\/ will not be attached to time series.\n\tMetaLabelPrefix = \"__meta_\"\n\n\t\/\/ TmpLabelPrefix is a prefix for temporary labels as part of relabelling.\n\t\/\/ Labels with this prefix are used for intermediate label processing and\n\t\/\/ will not be attached to time series. This is reserved for use in\n\t\/\/ Prometheus configuration files by users.\n\tTmpLabelPrefix = \"__tmp_\"\n\n\t\/\/ ParamLabelPrefix is a prefix for labels that provide URL parameters\n\t\/\/ used to scrape a target.\n\tParamLabelPrefix = \"__param_\"\n\n\t\/\/ JobLabel is the label name indicating the job from which a timeseries\n\t\/\/ was scraped.\n\tJobLabel = \"job\"\n\n\t\/\/ InstanceLabel is the label name used for the instance label.\n\tInstanceLabel = \"instance\"\n\n\t\/\/ BucketLabel is used for the label that defines the upper bound of a\n\t\/\/ bucket of a histogram (\"le\" -> \"less or equal\").\n\tBucketLabel = \"le\"\n\n\t\/\/ QuantileLabel is used for the label that defines the quantile in a\n\t\/\/ summary.\n\tQuantileLabel = \"quantile\"\n)\n\n\/\/ LabelNameRE is a regular expression matching valid label names. Note that the\n\/\/ IsValid method of LabelName performs the same check but faster than a match\n\/\/ with this regular expression.\nvar LabelNameRE = regexp.MustCompile(\"^[a-zA-Z_][a-zA-Z0-9_]*$\")\n\n\/\/ A LabelName is a key for a LabelSet or Metric. It has a value associated\n\/\/ therewith.\ntype LabelName string\n\n\/\/ IsValid is true iff the label name matches the pattern of LabelNameRE. This\n\/\/ method, however, does not use LabelNameRE for the check but a much faster\n\/\/ hardcoded implementation.\nfunc (ln LabelName) IsValid() bool {\n\tif len(ln) == 0 {\n\t\treturn false\n\t}\n\tfor i, b := range ln {\n\t\tif !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/ UnmarshalYAML implements the yaml.Unmarshaler interface.\nfunc (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {\n\tvar s string\n\tif err := unmarshal(&s); err != nil {\n\t\treturn err\n\t}\n\tif !LabelName(s).IsValid() {\n\t\treturn fmt.Errorf(\"%q is not a valid label name\", s)\n\t}\n\t*ln = LabelName(s)\n\treturn nil\n}\n\n\/\/ UnmarshalJSON implements the json.Unmarshaler interface.\nfunc (ln *LabelName) UnmarshalJSON(b []byte) error {\n\tvar s string\n\tif err := json.Unmarshal(b, &s); err != nil {\n\t\treturn err\n\t}\n\tif !LabelName(s).IsValid() {\n\t\treturn fmt.Errorf(\"%q is not a valid label name\", s)\n\t}\n\t*ln = LabelName(s)\n\treturn nil\n}\n\n\/\/ LabelNames is a sortable LabelName slice. In implements sort.Interface.\ntype LabelNames []LabelName\n\nfunc (l LabelNames) Len() int {\n\treturn len(l)\n}\n\nfunc (l LabelNames) Less(i, j int) bool {\n\treturn l[i] < l[j]\n}\n\nfunc (l LabelNames) Swap(i, j int) {\n\tl[i], l[j] = l[j], l[i]\n}\n\nfunc (l LabelNames) String() string {\n\tlabelStrings := make([]string, 0, len(l))\n\tfor _, label := range l {\n\t\tlabelStrings = append(labelStrings, string(label))\n\t}\n\treturn strings.Join(labelStrings, \", \")\n}\n\n\/\/ A LabelValue is an associated value for a LabelName.\ntype LabelValue string\n\n\/\/ IsValid returns true iff the string is a valid UTF8.\nfunc (lv LabelValue) IsValid() bool {\n\treturn utf8.ValidString(string(lv))\n}\n\n\/\/ LabelValues is a sortable LabelValue slice. It implements sort.Interface.\ntype LabelValues []LabelValue\n\nfunc (l LabelValues) Len() int {\n\treturn len(l)\n}\n\nfunc (l LabelValues) Less(i, j int) bool {\n\treturn string(l[i]) < string(l[j])\n}\n\nfunc (l LabelValues) Swap(i, j int) {\n\tl[i], l[j] = l[j], l[i]\n}\n\n\/\/ LabelPair pairs a name with a value.\ntype LabelPair struct {\n\tName LabelName\n\tValue LabelValue\n}\n\n\/\/ LabelPairs is a sortable slice of LabelPair pointers. It implements\n\/\/ sort.Interface.\ntype LabelPairs []*LabelPair\n\nfunc (l LabelPairs) Len() int {\n\treturn len(l)\n}\n\nfunc (l LabelPairs) Less(i, j int) bool {\n\tswitch {\n\tcase l[i].Name > l[j].Name:\n\t\treturn false\n\tcase l[i].Name < l[j].Name:\n\t\treturn true\n\tcase l[i].Value > l[j].Value:\n\t\treturn false\n\tcase l[i].Value < l[j].Value:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc (l LabelPairs) Swap(i, j int) {\n\tl[i], l[j] = l[j], l[i]\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage agent\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"launchpad.net\/goyaml\"\n\t\"launchpad.net\/juju-core\/state\"\n\t\"launchpad.net\/juju-core\/state\/api\"\n\t\"launchpad.net\/juju-core\/utils\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n)\n\n\/\/ Conf holds information for a given agent.\ntype Conf struct {\n\t\/\/ DataDir specifies the path of the data directory used by all\n\t\/\/ agents\n\tDataDir string\n\n\t\/\/ StateServerCert and StateServerKey hold the state server\n\t\/\/ certificate and private key in PEM format.\n\tStateServerCert []byte `yaml:\",omitempty\"`\n\tStateServerKey []byte `yaml:\",omitempty\"`\n\n\tMongoPort int `yaml:\",omitempty\"`\n\tAPIPort int `yaml:\",omitempty\"`\n\n\t\/\/ OldPassword specifies a password that should be\n\t\/\/ used to connect to the state if StateInfo.Password\n\t\/\/ is blank or invalid.\n\tOldPassword string\n\n\t\/\/ MachineNonce is set at provisioning\/bootstrap time and used to\n\t\/\/ ensure the agent is running on the correct instance.\n\tMachineNonce string\n\n\t\/\/ StateInfo specifies how the agent should connect to the\n\t\/\/ state. The password may be empty if an old password is\n\t\/\/ specified, or when bootstrapping.\n\tStateInfo *state.Info `yaml:\",omitempty\"`\n\n\t\/\/ OldAPIPassword specifies a password that should\n\t\/\/ be used to connect to the API if APIInfo.Password\n\t\/\/ is blank or invalid.\n\tOldAPIPassword string\n\n\t\/\/ APIInfo specifies how the agent should connect to the\n\t\/\/ state through the API.\n\tAPIInfo *api.Info `yaml:\",omitempty\"`\n}\n\n\/\/ ReadConf reads configuration data for the given\n\/\/ entity from the given data directory.\nfunc ReadConf(dataDir, tag string) (*Conf, error) {\n\tdir := Dir(dataDir, tag)\n\tdata, err := ioutil.ReadFile(path.Join(dir, \"agent.conf\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar c Conf\n\tif err := goyaml.Unmarshal(data, &c); err != nil {\n\t\treturn nil, err\n\t}\n\tc.DataDir = dataDir\n\tif err := c.Check(); err != nil {\n\t\treturn nil, err\n\t}\n\tif c.StateInfo != nil {\n\t\tc.StateInfo.Tag = tag\n\t}\n\tif c.APIInfo != nil {\n\t\tc.APIInfo.Tag = tag\n\t}\n\treturn &c, nil\n}\n\nfunc requiredError(what string) error {\n\treturn fmt.Errorf(\"%s not found in configuration\", what)\n}\n\n\/\/ File returns the path of the given file in the agent's directory.\nfunc (c *Conf) File(name string) string {\n\treturn path.Join(c.Dir(), name)\n}\n\nfunc (c *Conf) confFile() string {\n\treturn c.File(\"agent.conf\")\n}\n\n\/\/ Tag returns the tag of the entity on whose behalf the state connection will\n\/\/ be made.\nfunc (c *Conf) Tag() string {\n\tif c.StateInfo != nil {\n\t\treturn c.StateInfo.Tag\n\t}\n\treturn c.APIInfo.Tag\n}\n\n\/\/ Dir returns the agent's directory.\nfunc (c *Conf) Dir() string {\n\treturn Dir(c.DataDir, c.Tag())\n}\n\n\/\/ Check checks that the configuration has all the required elements.\nfunc (c *Conf) Check() error {\n\tif c.DataDir == \"\" {\n\t\treturn requiredError(\"data directory\")\n\t}\n\tif c.StateInfo == nil && c.APIInfo == nil {\n\t\treturn requiredError(\"state info or API info\")\n\t}\n\tif c.StateInfo != nil {\n\t\tif c.StateInfo.Tag == \"\" {\n\t\t\treturn requiredError(\"state entity tag\")\n\t\t}\n\t\tif err := checkAddrs(c.StateInfo.Addrs, \"state server address\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(c.StateInfo.CACert) == 0 {\n\t\t\treturn requiredError(\"state CA certificate\")\n\t\t}\n\t}\n\t\/\/ TODO(rog) make APIInfo mandatory\n\tif c.APIInfo != nil {\n\t\tif c.APIInfo.Tag == \"\" {\n\t\t\treturn requiredError(\"API entity tag\")\n\t\t}\n\t\tif err := checkAddrs(c.APIInfo.Addrs, \"API server address\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(c.APIInfo.CACert) == 0 {\n\t\t\treturn requiredError(\"API CA certficate\")\n\t\t}\n\t}\n\tif c.StateInfo != nil && c.APIInfo != nil && c.StateInfo.Tag != c.APIInfo.Tag {\n\t\treturn fmt.Errorf(\"mismatched entity tags\")\n\t}\n\treturn nil\n}\n\nvar validAddr = regexp.MustCompile(\"^.+:[0-9]+$\")\n\nfunc checkAddrs(addrs []string, what string) error {\n\tif len(addrs) == 0 {\n\t\treturn requiredError(what)\n\t}\n\tfor _, a := range addrs {\n\t\tif !validAddr.MatchString(a) {\n\t\t\treturn fmt.Errorf(\"invalid %s %q\", what, a)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Write writes the agent configuration.\nfunc (c *Conf) Write() error {\n\tif err := c.Check(); err != nil {\n\t\treturn err\n\t}\n\tdata, err := goyaml.Marshal(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := os.MkdirAll(c.Dir(), 0755); err != nil {\n\t\treturn err\n\t}\n\tf := c.File(\"agent.conf-new\")\n\tif err := ioutil.WriteFile(f, data, 0600); err != nil {\n\t\treturn err\n\t}\n\tif err := os.Rename(f, c.confFile()); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ WriteCommands returns shell commands to write the agent\n\/\/ configuration. It returns an error if the configuration does not\n\/\/ have all the right elements.\nfunc (c *Conf) WriteCommands() ([]string, error) {\n\tif err := c.Check(); err != nil {\n\t\treturn nil, err\n\t}\n\tdata, err := goyaml.Marshal(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar cmds []string\n\taddCmd := func(f string, a ...interface{}) {\n\t\tcmds = append(cmds, fmt.Sprintf(f, a...))\n\t}\n\tf := utils.ShQuote(c.confFile())\n\taddCmd(\"mkdir -p %s\", utils.ShQuote(c.Dir()))\n\taddCmd(\"echo %s > %s\", utils.ShQuote(string(data)), f)\n\taddCmd(\"chmod %o %s\", 0600, f)\n\treturn cmds, nil\n}\n\n\/\/ OpenAPIState tries to open the state using the given Conf. If it\n\/\/ returns a non-empty newPassword, the password used to connect\n\/\/ to the state should be changed accordingly - the caller should write the\n\/\/ configuration with StateInfo.Password set to newPassword, then\n\/\/ set the entity's password accordingly.\nfunc (c *Conf) OpenAPIState() (st *api.State, newPassword string, err error) {\n\tinfo := *c.APIInfo\n\tif info.Password != \"\" {\n\t\tst, err := api.Open(&info)\n\t\tif err == nil {\n\t\t\treturn st, \"\", nil\n\t\t}\n\t\tif !state.IsUnauthorizedError(err) {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\t\/\/ Access isn't authorized even though we have a password\n\t\t\/\/ This can happen if we crash after saving the\n\t\t\/\/ password but before changing it, so we'll try again\n\t\t\/\/ with the old password.\n\t}\n\tinfo.Password = c.OldPassword\n\tst, err = api.Open(&info)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\t\/\/ We've succeeded in connecting with the old password, so\n\t\/\/ we can now change it to something more private.\n\tpassword, err := utils.RandomPassword()\n\tif err != nil {\n\t\tst.Close()\n\t\treturn nil, \"\", err\n\t}\n\treturn st, password, nil\n}\n\n\/\/ OpenState tries to open the state using the given Conf.\nfunc (c *Conf) OpenState() (st *state.State, err error) {\n\treturn state.Open(c.StateInfo, state.DefaultDialOpts())\n}\n<commit_msg>environs\/agent: revert spurious changes<commit_after>\/\/ Copyright 2013 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage agent\n\nimport (\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"launchpad.net\/goyaml\"\n\t\"launchpad.net\/juju-core\/state\"\n\t\"launchpad.net\/juju-core\/state\/api\"\n\t\"launchpad.net\/juju-core\/utils\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n)\n\n\/\/ Conf holds information for a given agent.\ntype Conf struct {\n\t\/\/ DataDir specifies the path of the data directory used by all\n\t\/\/ agents\n\tDataDir string\n\n\t\/\/ StateServerCert and StateServerKey hold the state server\n\t\/\/ certificate and private key in PEM format.\n\tStateServerCert []byte `yaml:\",omitempty\"`\n\tStateServerKey []byte `yaml:\",omitempty\"`\n\n\tMongoPort int `yaml:\",omitempty\"`\n\tAPIPort int `yaml:\",omitempty\"`\n\n\t\/\/ OldPassword specifies a password that should be\n\t\/\/ used to connect to the state if StateInfo.Password\n\t\/\/ is blank or invalid.\n\tOldPassword string\n\n\t\/\/ MachineNonce is set at provisioning\/bootstrap time and used to\n\t\/\/ ensure the agent is running on the correct instance.\n\tMachineNonce string\n\n\t\/\/ StateInfo specifies how the agent should connect to the\n\t\/\/ state. The password may be empty if an old password is\n\t\/\/ specified, or when bootstrapping.\n\tStateInfo *state.Info `yaml:\",omitempty\"`\n\n\t\/\/ OldAPIPassword specifies a password that should\n\t\/\/ be used to connect to the API if APIInfo.Password\n\t\/\/ is blank or invalid.\n\tOldAPIPassword string\n\n\t\/\/ APIInfo specifies how the agent should connect to the\n\t\/\/ state through the API.\n\tAPIInfo *api.Info `yaml:\",omitempty\"`\n}\n\n\/\/ ReadConf reads configuration data for the given\n\/\/ entity from the given data directory.\nfunc ReadConf(dataDir, tag string) (*Conf, error) {\n\tdir := Dir(dataDir, tag)\n\tdata, err := ioutil.ReadFile(path.Join(dir, \"agent.conf\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar c Conf\n\tif err := goyaml.Unmarshal(data, &c); err != nil {\n\t\treturn nil, err\n\t}\n\tc.DataDir = dataDir\n\tif err := c.Check(); err != nil {\n\t\treturn nil, err\n\t}\n\tif c.StateInfo != nil {\n\t\tc.StateInfo.Tag = tag\n\t}\n\tif c.APIInfo != nil {\n\t\tc.APIInfo.Tag = tag\n\t}\n\treturn &c, nil\n}\n\nfunc requiredError(what string) error {\n\treturn fmt.Errorf(\"%s not found in configuration\", what)\n}\n\n\/\/ File returns the path of the given file in the agent's directory.\nfunc (c *Conf) File(name string) string {\n\treturn path.Join(c.Dir(), name)\n}\n\nfunc (c *Conf) confFile() string {\n\treturn c.File(\"agent.conf\")\n}\n\n\/\/ Tag returns the tag of the entity on whose behalf the state connection will\n\/\/ be made.\nfunc (c *Conf) Tag() string {\n\tif c.StateInfo != nil {\n\t\treturn c.StateInfo.Tag\n\t}\n\treturn c.APIInfo.Tag\n}\n\n\/\/ Dir returns the agent's directory.\nfunc (c *Conf) Dir() string {\n\treturn Dir(c.DataDir, c.Tag())\n}\n\n\/\/ Check checks that the configuration has all the required elements.\nfunc (c *Conf) Check() error {\n\tif c.DataDir == \"\" {\n\t\treturn requiredError(\"data directory\")\n\t}\n\tif c.StateInfo == nil && c.APIInfo == nil {\n\t\treturn requiredError(\"state info or API info\")\n\t}\n\tif c.StateInfo != nil {\n\t\tif c.StateInfo.Tag == \"\" {\n\t\t\treturn requiredError(\"state entity tag\")\n\t\t}\n\t\tif err := checkAddrs(c.StateInfo.Addrs, \"state server address\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(c.StateInfo.CACert) == 0 {\n\t\t\treturn requiredError(\"state CA certificate\")\n\t\t}\n\t}\n\t\/\/ TODO(rog) make APIInfo mandatory\n\tif c.APIInfo != nil {\n\t\tif c.APIInfo.Tag == \"\" {\n\t\t\treturn requiredError(\"API entity tag\")\n\t\t}\n\t\tif err := checkAddrs(c.APIInfo.Addrs, \"API server address\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(c.APIInfo.CACert) == 0 {\n\t\t\treturn requiredError(\"API CA certficate\")\n\t\t}\n\t}\n\tif c.StateInfo != nil && c.APIInfo != nil && c.StateInfo.Tag != c.APIInfo.Tag {\n\t\treturn fmt.Errorf(\"mismatched entity tags\")\n\t}\n\treturn nil\n}\n\nvar validAddr = regexp.MustCompile(\"^.+:[0-9]+$\")\n\nfunc checkAddrs(addrs []string, what string) error {\n\tif len(addrs) == 0 {\n\t\treturn requiredError(what)\n\t}\n\tfor _, a := range addrs {\n\t\tif !validAddr.MatchString(a) {\n\t\t\treturn fmt.Errorf(\"invalid %s %q\", what, a)\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ Write writes the agent configuration.\nfunc (c *Conf) Write() error {\n\tif err := c.Check(); err != nil {\n\t\treturn err\n\t}\n\tdata, err := goyaml.Marshal(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := os.MkdirAll(c.Dir(), 0755); err != nil {\n\t\treturn err\n\t}\n\tf := c.File(\"agent.conf-new\")\n\tif err := ioutil.WriteFile(f, data, 0600); err != nil {\n\t\treturn err\n\t}\n\tif err := os.Rename(f, c.confFile()); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n\/\/ WriteCommands returns shell commands to write the agent\n\/\/ configuration. It returns an error if the configuration does not\n\/\/ have all the right elements.\nfunc (c *Conf) WriteCommands() ([]string, error) {\n\tif err := c.Check(); err != nil {\n\t\treturn nil, err\n\t}\n\tdata, err := goyaml.Marshal(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar cmds []string\n\taddCmd := func(f string, a ...interface{}) {\n\t\tcmds = append(cmds, fmt.Sprintf(f, a...))\n\t}\n\tf := utils.ShQuote(c.confFile())\n\taddCmd(\"mkdir -p %s\", utils.ShQuote(c.Dir()))\n\taddCmd(\"echo %s > %s\", utils.ShQuote(string(data)), f)\n\taddCmd(\"chmod %o %s\", 0600, f)\n\treturn cmds, nil\n}\n\n\/\/ OpenState tries to open the state using the given Conf. If it\n\/\/ returns a non-empty newPassword, the password used to connect\n\/\/ to the state should be changed accordingly - the caller should write the\n\/\/ configuration with StateInfo.Password set to newPassword, then\n\/\/ set the entity's password accordingly.\nfunc (c *Conf) OpenState() (st *state.State, newPassword string, err error) {\n\tinfo := *c.StateInfo\n\topts := state.DefaultDialOpts()\n\tif info.Password != \"\" {\n\t\tst, err := state.Open(&info, opts)\n\t\tif err == nil {\n\t\t\treturn st, \"\", nil\n\t\t}\n\t\tif !state.IsUnauthorizedError(err) {\n\t\t\treturn nil, \"\", err\n\t\t}\n\t\t\/\/ Access isn't authorized even though we have a password\n\t\t\/\/ This can happen if we crash after saving the\n\t\t\/\/ password but before changing it, so we'll try again\n\t\t\/\/ with the old password.\n\t}\n\tinfo.Password = c.OldPassword\n\tst, err = state.Open(&info, opts)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\t\/\/ We've succeeded in connecting with the old password, so\n\t\/\/ we can now change it to something more private.\n\tpassword, err := utils.RandomPassword()\n\tif err != nil {\n\t\tst.Close()\n\t\treturn nil, \"\", err\n\t}\n\treturn st, password, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ App Engine godoc Playground functionality.\n\n\/\/ +build appengine\n\npackage main\n\nimport (\n\t\"io\"\n\t\"net\/http\"\n\n\t\"appengine\"\n\t\"appengine\/urlfetch\"\n)\n\nfunc bounceToPlayground(w http.ResponseWriter, req *http.Request) {\n\tc := appengine.NewContext(req)\n\tclient := urlfetch.Client(c)\n\turl := playgroundBaseURL + req.URL.Path\n\tdefer req.Body.Close()\n\tresp, err := client.Post(url, req.Header.Get(\"Content-type\"), req.Body)\n\tif err != nil {\n\t\thttp.Error(w, \"Internal Server Error\", 500)\n\t\tc.Errorf(\"making POST request:\", err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tif _, err := io.Copy(w, resp.Body); err != nil {\n\t\thttp.Error(w, \"Internal Server Error\", 500)\n\t\tc.Errorf(\"making POST request:\", err)\n\t}\n}\n<commit_msg>cmd\/godoc: fix format strings.<commit_after>\/\/ Copyright 2012 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ App Engine godoc Playground functionality.\n\n\/\/ +build appengine\n\npackage main\n\nimport (\n\t\"io\"\n\t\"net\/http\"\n\n\t\"appengine\"\n\t\"appengine\/urlfetch\"\n)\n\nfunc bounceToPlayground(w http.ResponseWriter, req *http.Request) {\n\tc := appengine.NewContext(req)\n\tclient := urlfetch.Client(c)\n\turl := playgroundBaseURL + req.URL.Path\n\tdefer req.Body.Close()\n\tresp, err := client.Post(url, req.Header.Get(\"Content-type\"), req.Body)\n\tif err != nil {\n\t\thttp.Error(w, \"Internal Server Error\", 500)\n\t\tc.Errorf(\"making POST request: %v\", err)\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tif _, err := io.Copy(w, resp.Body); err != nil {\n\t\thttp.Error(w, \"Internal Server Error\", 500)\n\t\tc.Errorf(\"making POST request: %v\", err)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package lifecycle_test\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/cloudfoundry-incubator\/garden\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/types\"\n)\n\nvar dockerRegistryRootFSPath = os.Getenv(\"GARDEN_DOCKER_REGISTRY_TEST_ROOTFS\")\n\nvar _ = Describe(\"Rootfs container create parameter\", func() {\n\tvar container garden.Container\n\tvar args []string\n\n\tBeforeEach(func() {\n\t\tcontainer = nil\n\t\targs = []string{}\n\t})\n\n\tJustBeforeEach(func() {\n\t\tclient = startGarden(args...)\n\t})\n\n\tAfterEach(func() {\n\t\tif container != nil {\n\t\t\tExpect(client.Destroy(container.Handle())).To(Succeed())\n\t\t}\n\t})\n\n\tContext(\"without a default rootfs\", func() {\n\t\tBeforeEach(func() {\n\t\t\targs = []string{\"--rootfs\", \"\"}\n\t\t})\n\n\t\tIt(\"without a rootfs in container spec, the container creation fails\", func() {\n\t\t\tvar err error\n\n\t\t\tcontainer, err = client.Create(garden.ContainerSpec{RootFSPath: \"\"})\n\t\t\tΩ(err).Should(HaveOccurred())\n\t\t\tΩ(err).Should(MatchError(ContainSubstring(\n\t\t\t\t\"RootFSPath: is a required parameter, since no default rootfs was provided to the server. To provide a default rootfs, use the --rootfs flag on startup.\",\n\t\t\t)))\n\t\t})\n\n\t\tIt(\"with a rootfs in container spec, the container is created successfully\", func() {\n\t\t\tvar err error\n\n\t\t\tcontainer, err = client.Create(garden.ContainerSpec{RootFSPath: os.Getenv(\"GARDEN_TEST_ROOTFS\")})\n\t\t\tΩ(err).ShouldNot(HaveOccurred())\n\t\t})\n\t})\n\n\tContext(\"with a default rootfs\", func() {\n\t\tIt(\"the container is created successfully\", func() {\n\t\t\tvar err error\n\n\t\t\tcontainer, err = client.Create(garden.ContainerSpec{RootFSPath: \"\"})\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t})\n\t})\n\n\tContext(\"with a docker rootfs URI\", func() {\n\t\tContext(\"not containing a host\", func() {\n\t\t\tIt(\"the container is created successfully\", func() {\n\t\t\t\tvar err error\n\n\t\t\t\tcontainer, err = client.Create(garden.ContainerSpec{RootFSPath: \"docker:\/\/\/busybox\"})\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t})\n\t\t})\n\n\t\tContext(\"containing a host\", func() {\n\t\t\tContext(\"which is valid\", func() {\n\t\t\t\tIt(\"the container is created successfully\", func() {\n\t\t\t\t\tvar err error\n\n\t\t\t\t\tcontainer, err = client.Create(garden.ContainerSpec{RootFSPath: \"docker:\/\/index.docker.io\/busybox\"})\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"which is invalid\", func() {\n\t\t\t\tIt(\"the container is not created successfully\", func() {\n\t\t\t\t\tvar err error\n\n\t\t\t\t\tcontainer, err = client.Create(garden.ContainerSpec{RootFSPath: \"docker:\/\/xindex.docker.io\/busybox\"})\n\t\t\t\t\tExpect(err.Error()).To(MatchRegexp(\"could not resolve\"))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"which is insecure\", func() {\n\t\t\t\tvar dockerRegistry garden.Container\n\n\t\t\t\tdockerRegistryIP := \"10.0.0.1\"\n\t\t\t\tdockerRegistryPort := \"5001\"\n\n\t\t\t\tif dockerRegistryRootFSPath == \"\" {\n\t\t\t\t\tlog.Println(\"GARDEN_DOCKER_REGISTRY_TEST_ROOTFS undefined; skipping\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tJustBeforeEach(func() {\n\t\t\t\t\tdockerRegistry = startDockerRegistry(dockerRegistryIP, dockerRegistryPort)\n\t\t\t\t})\n\n\t\t\t\tAfterEach(func() {\n\t\t\t\t\tif dockerRegistry != nil {\n\t\t\t\t\t\tExpect(client.Destroy(dockerRegistry.Handle())).To(Succeed())\n\t\t\t\t\t}\n\t\t\t\t})\n\n\t\t\t\tContext(\"when the host is listed in -insecureDockerRegistryList\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\targs = []string{\n\t\t\t\t\t\t\t\"-insecureDockerRegistryList\", dockerRegistryIP + \":\" + dockerRegistryPort,\n\t\t\t\t\t\t\t\"-allowHostAccess=true\",\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"creates the container successfully \", func() {\n\t\t\t\t\t\t_, err := client.Create(garden.ContainerSpec{\n\t\t\t\t\t\t\tRootFSPath: fmt.Sprintf(\"docker:\/\/%s:%s\/busybox\", dockerRegistryIP, dockerRegistryPort),\n\t\t\t\t\t\t})\n\t\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when the host is NOT listed in -insecureDockerRegistryList\", func() {\n\t\t\t\t\tIt(\"fails, and suggests the -insecureDockerRegistryList flag\", func() {\n\t\t\t\t\t\t_, err := client.Create(garden.ContainerSpec{\n\t\t\t\t\t\t\tRootFSPath: fmt.Sprintf(\"docker:\/\/%s:%s\/busybox\", dockerRegistryIP, dockerRegistryPort),\n\t\t\t\t\t\t})\n\n\t\t\t\t\t\tExpect(err).To(MatchError(ContainSubstring(\n\t\t\t\t\t\t\t\"Registry %s:%s is missing from -insecureDockerRegistryList ([])\", dockerRegistryIP, dockerRegistryPort,\n\t\t\t\t\t\t)))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n\nfunc startDockerRegistry(dockerRegistryIP string, dockerRegistryPort string) garden.Container {\n\tdockerRegistry, err := client.Create(\n\t\tgarden.ContainerSpec{\n\t\t\tRootFSPath: dockerRegistryRootFSPath,\n\t\t\tNetwork: dockerRegistryIP,\n\t\t},\n\t)\n\tExpect(err).ToNot(HaveOccurred())\n\n\t_, err = dockerRegistry.Run(garden.ProcessSpec{\n\t\tUser: \"vcap\",\n\t\tEnv: []string{\n\t\t\t\"DOCKER_REGISTRY_CONFIG=\/docker-registry\/config\/config_sample.yml\",\n\t\t\tfmt.Sprintf(\"REGISTRY_PORT=%s\", dockerRegistryPort),\n\t\t\t\"STANDALONE=true\",\n\t\t\t\"MIRROR_SOURCE=https:\/\/registry-1.docker.io\",\n\t\t\t\"MIRROR_SOURCE_INDEX=https:\/\/index.docker.io\",\n\t\t},\n\t\tPath: \"docker-registry\",\n\t}, garden.ProcessIO{Stdout: GinkgoWriter, Stderr: GinkgoWriter})\n\tExpect(err).ToNot(HaveOccurred())\n\n\tEventually(\n\t\tfmt.Sprintf(\"http:\/\/%s:%s\/_ping\", dockerRegistryIP, dockerRegistryPort),\n\t\t\"10s\",\n\t).Should(RespondToGETWith(200))\n\n\treturn dockerRegistry\n}\n\ntype statusMatcher struct {\n\texpectedStatus int\n\n\thttpError error\n\tactualStatus int\n}\n\nfunc RespondToGETWith(expected int) types.GomegaMatcher {\n\treturn &statusMatcher{expected, nil, 200}\n}\n\nfunc (m *statusMatcher) Match(actual interface{}) (success bool, err error) {\n\tresponse, err := http.Get(fmt.Sprintf(\"%s\", actual))\n\tif err != nil {\n\t\tm.httpError = err\n\t\treturn false, nil\n\t}\n\n\tm.httpError = nil\n\tm.actualStatus = response.StatusCode\n\treturn response.StatusCode == m.expectedStatus, nil\n}\n\nfunc (m *statusMatcher) FailureMessage(actual interface{}) string {\n\tif m.httpError != nil {\n\t\treturn fmt.Sprintf(\"Expected http request to have status %d but got error: %s\", m.expectedStatus, m.httpError.Error())\n\t}\n\n\treturn fmt.Sprintf(\"Expected http status code to be %d but was %d\", m.expectedStatus, m.actualStatus)\n}\n\nfunc (m *statusMatcher) NegatedFailureMessage(actual interface{}) string {\n\tif m.httpError != nil {\n\t\treturn fmt.Sprintf(\"Expected http request to have status %d, but got error: %s\", m.expectedStatus, m.httpError.Error())\n\t}\n\n\treturn fmt.Sprintf(\"Expected http status code not to be %d\", m.expectedStatus)\n}\n<commit_msg>Increase timeout to avoid flakes<commit_after>package lifecycle_test\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net\/http\"\n\t\"os\"\n\n\t\"github.com\/cloudfoundry-incubator\/garden\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\t\"github.com\/onsi\/gomega\/types\"\n)\n\nvar dockerRegistryRootFSPath = os.Getenv(\"GARDEN_DOCKER_REGISTRY_TEST_ROOTFS\")\n\nvar _ = Describe(\"Rootfs container create parameter\", func() {\n\tvar container garden.Container\n\tvar args []string\n\n\tBeforeEach(func() {\n\t\tcontainer = nil\n\t\targs = []string{}\n\t})\n\n\tJustBeforeEach(func() {\n\t\tclient = startGarden(args...)\n\t})\n\n\tAfterEach(func() {\n\t\tif container != nil {\n\t\t\tExpect(client.Destroy(container.Handle())).To(Succeed())\n\t\t}\n\t})\n\n\tContext(\"without a default rootfs\", func() {\n\t\tBeforeEach(func() {\n\t\t\targs = []string{\"--rootfs\", \"\"}\n\t\t})\n\n\t\tIt(\"without a rootfs in container spec, the container creation fails\", func() {\n\t\t\tvar err error\n\n\t\t\tcontainer, err = client.Create(garden.ContainerSpec{RootFSPath: \"\"})\n\t\t\tΩ(err).Should(HaveOccurred())\n\t\t\tΩ(err).Should(MatchError(ContainSubstring(\n\t\t\t\t\"RootFSPath: is a required parameter, since no default rootfs was provided to the server. To provide a default rootfs, use the --rootfs flag on startup.\",\n\t\t\t)))\n\t\t})\n\n\t\tIt(\"with a rootfs in container spec, the container is created successfully\", func() {\n\t\t\tvar err error\n\n\t\t\tcontainer, err = client.Create(garden.ContainerSpec{RootFSPath: os.Getenv(\"GARDEN_TEST_ROOTFS\")})\n\t\t\tΩ(err).ShouldNot(HaveOccurred())\n\t\t})\n\t})\n\n\tContext(\"with a default rootfs\", func() {\n\t\tIt(\"the container is created successfully\", func() {\n\t\t\tvar err error\n\n\t\t\tcontainer, err = client.Create(garden.ContainerSpec{RootFSPath: \"\"})\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t})\n\t})\n\n\tContext(\"with a docker rootfs URI\", func() {\n\t\tContext(\"not containing a host\", func() {\n\t\t\tIt(\"the container is created successfully\", func() {\n\t\t\t\tvar err error\n\n\t\t\t\tcontainer, err = client.Create(garden.ContainerSpec{RootFSPath: \"docker:\/\/\/busybox\"})\n\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t})\n\t\t})\n\n\t\tContext(\"containing a host\", func() {\n\t\t\tContext(\"which is valid\", func() {\n\t\t\t\tIt(\"the container is created successfully\", func() {\n\t\t\t\t\tvar err error\n\n\t\t\t\t\tcontainer, err = client.Create(garden.ContainerSpec{RootFSPath: \"docker:\/\/index.docker.io\/busybox\"})\n\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"which is invalid\", func() {\n\t\t\t\tIt(\"the container is not created successfully\", func() {\n\t\t\t\t\tvar err error\n\n\t\t\t\t\tcontainer, err = client.Create(garden.ContainerSpec{RootFSPath: \"docker:\/\/xindex.docker.io\/busybox\"})\n\t\t\t\t\tExpect(err.Error()).To(MatchRegexp(\"could not resolve\"))\n\t\t\t\t})\n\t\t\t})\n\n\t\t\tContext(\"which is insecure\", func() {\n\t\t\t\tvar dockerRegistry garden.Container\n\n\t\t\t\tdockerRegistryIP := \"10.0.0.1\"\n\t\t\t\tdockerRegistryPort := \"5001\"\n\n\t\t\t\tif dockerRegistryRootFSPath == \"\" {\n\t\t\t\t\tlog.Println(\"GARDEN_DOCKER_REGISTRY_TEST_ROOTFS undefined; skipping\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tJustBeforeEach(func() {\n\t\t\t\t\tdockerRegistry = startDockerRegistry(dockerRegistryIP, dockerRegistryPort)\n\t\t\t\t})\n\n\t\t\t\tAfterEach(func() {\n\t\t\t\t\tif dockerRegistry != nil {\n\t\t\t\t\t\tExpect(client.Destroy(dockerRegistry.Handle())).To(Succeed())\n\t\t\t\t\t}\n\t\t\t\t})\n\n\t\t\t\tContext(\"when the host is listed in -insecureDockerRegistryList\", func() {\n\t\t\t\t\tBeforeEach(func() {\n\t\t\t\t\t\targs = []string{\n\t\t\t\t\t\t\t\"-insecureDockerRegistryList\", dockerRegistryIP + \":\" + dockerRegistryPort,\n\t\t\t\t\t\t\t\"-allowHostAccess=true\",\n\t\t\t\t\t\t}\n\t\t\t\t\t})\n\n\t\t\t\t\tIt(\"creates the container successfully \", func() {\n\t\t\t\t\t\t_, err := client.Create(garden.ContainerSpec{\n\t\t\t\t\t\t\tRootFSPath: fmt.Sprintf(\"docker:\/\/%s:%s\/busybox\", dockerRegistryIP, dockerRegistryPort),\n\t\t\t\t\t\t})\n\t\t\t\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t\t\t\t})\n\t\t\t\t})\n\n\t\t\t\tContext(\"when the host is NOT listed in -insecureDockerRegistryList\", func() {\n\t\t\t\t\tIt(\"fails, and suggests the -insecureDockerRegistryList flag\", func() {\n\t\t\t\t\t\t_, err := client.Create(garden.ContainerSpec{\n\t\t\t\t\t\t\tRootFSPath: fmt.Sprintf(\"docker:\/\/%s:%s\/busybox\", dockerRegistryIP, dockerRegistryPort),\n\t\t\t\t\t\t})\n\n\t\t\t\t\t\tExpect(err).To(MatchError(ContainSubstring(\n\t\t\t\t\t\t\t\"Registry %s:%s is missing from -insecureDockerRegistryList ([])\", dockerRegistryIP, dockerRegistryPort,\n\t\t\t\t\t\t)))\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t})\n\t\t})\n\t})\n})\n\nfunc startDockerRegistry(dockerRegistryIP string, dockerRegistryPort string) garden.Container {\n\tdockerRegistry, err := client.Create(\n\t\tgarden.ContainerSpec{\n\t\t\tRootFSPath: dockerRegistryRootFSPath,\n\t\t\tNetwork: dockerRegistryIP,\n\t\t},\n\t)\n\tExpect(err).ToNot(HaveOccurred())\n\n\t_, err = dockerRegistry.Run(garden.ProcessSpec{\n\t\tUser: \"vcap\",\n\t\tEnv: []string{\n\t\t\t\"DOCKER_REGISTRY_CONFIG=\/docker-registry\/config\/config_sample.yml\",\n\t\t\tfmt.Sprintf(\"REGISTRY_PORT=%s\", dockerRegistryPort),\n\t\t\t\"STANDALONE=true\",\n\t\t\t\"MIRROR_SOURCE=https:\/\/registry-1.docker.io\",\n\t\t\t\"MIRROR_SOURCE_INDEX=https:\/\/index.docker.io\",\n\t\t},\n\t\tPath: \"docker-registry\",\n\t}, garden.ProcessIO{Stdout: GinkgoWriter, Stderr: GinkgoWriter})\n\tExpect(err).ToNot(HaveOccurred())\n\n\tEventually(\n\t\tfmt.Sprintf(\"http:\/\/%s:%s\/_ping\", dockerRegistryIP, dockerRegistryPort),\n\t\t\"60s\",\n\t).Should(RespondToGETWith(200))\n\n\treturn dockerRegistry\n}\n\ntype statusMatcher struct {\n\texpectedStatus int\n\n\thttpError error\n\tactualStatus int\n}\n\nfunc RespondToGETWith(expected int) types.GomegaMatcher {\n\treturn &statusMatcher{expected, nil, 200}\n}\n\nfunc (m *statusMatcher) Match(actual interface{}) (success bool, err error) {\n\tresponse, err := http.Get(fmt.Sprintf(\"%s\", actual))\n\tif err != nil {\n\t\tm.httpError = err\n\t\treturn false, nil\n\t}\n\n\tm.httpError = nil\n\tm.actualStatus = response.StatusCode\n\treturn response.StatusCode == m.expectedStatus, nil\n}\n\nfunc (m *statusMatcher) FailureMessage(actual interface{}) string {\n\tif m.httpError != nil {\n\t\treturn fmt.Sprintf(\"Expected http request to have status %d but got error: %s\", m.expectedStatus, m.httpError.Error())\n\t}\n\n\treturn fmt.Sprintf(\"Expected http status code to be %d but was %d\", m.expectedStatus, m.actualStatus)\n}\n\nfunc (m *statusMatcher) NegatedFailureMessage(actual interface{}) string {\n\tif m.httpError != nil {\n\t\treturn fmt.Sprintf(\"Expected http request to have status %d, but got error: %s\", m.expectedStatus, m.httpError.Error())\n\t}\n\n\treturn fmt.Sprintf(\"Expected http status code not to be %d\", m.expectedStatus)\n}\n<|endoftext|>"} {"text":"<commit_before>package dess\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\n\t\"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/urfave\/cli\"\n)\n\n\/\/ Start is the alternative 'main' function of dess.\nfunc Start(c *cli.Context) error {\n\t\/\/ First we set the global configuration\n\tsetConfig(c)\n\t\/\/ This is a channel on which we can receive events\n\tevents, err := events()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor {\n\t\tselect {\n\t\tcase msg := <-events:\n\t\t\tmsgJSON, err := json.Marshal(msg)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tmsgJSONString := string(msgJSON)\n\t\t\tlog.Printf(\"Received event: %v\", msgJSONString)\n\t\t\tlogSyslog(msgJSONString)\n\t\t}\n\t}\n}\n\n\/\/ events sregisters and starts an Event listener, and returns a channel\n\/\/ that can be used to read the events from.\nfunc events() (chan *docker.APIEvents, error) {\n\tvar listener chan *docker.APIEvents\n\tvar err error\n\n\tclient, err := DockerClient()\n\tif err != nil {\n\t\treturn listener, err\n\t}\n\n\tlistener = make(chan *docker.APIEvents, 50)\n\terr = client.AddEventListener(listener)\n\treturn listener, err\n}\n<commit_msg>replace for { select }} by range<commit_after>package dess\n\nimport (\n\t\"encoding\/json\"\n\t\"log\"\n\n\t\"github.com\/fsouza\/go-dockerclient\"\n\t\"github.com\/urfave\/cli\"\n)\n\n\/\/ Start is the alternative 'main' function of dess.\nfunc Start(c *cli.Context) error {\n\t\/\/ First we set the global configuration\n\tsetConfig(c)\n\t\/\/ This is a channel on which we can receive events\n\tevents, err := events()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor msg := range events {\n\t\tmsgJSON, err := json.Marshal(msg)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmsgJSONString := string(msgJSON)\n\t\tlog.Printf(\"Received event: %v\", msgJSONString)\n\t\tlogSyslog(msgJSONString)\n\t}\n\treturn nil\n}\n\n\/\/ events sregisters and starts an Event listener, and returns a channel\n\/\/ that can be used to read the events from.\nfunc events() (chan *docker.APIEvents, error) {\n\tvar listener chan *docker.APIEvents\n\tvar err error\n\n\tclient, err := DockerClient()\n\tif err != nil {\n\t\treturn listener, err\n\t}\n\n\tlistener = make(chan *docker.APIEvents, 50)\n\terr = client.AddEventListener(listener)\n\treturn listener, err\n}\n<|endoftext|>"} {"text":"<commit_before>package integration\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"github.com\/bitrise-io\/go-utils\/command\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc Test_VersionOutput(t *testing.T) {\n\tt.Log(\"Version\")\n\t{\n\t\tout, err := command.RunCommandAndReturnCombinedStdoutAndStderr(binPath(), \"version\")\n\t\trequire.NoError(t, err, out)\n\t\trequire.Equal(t, \"0.9.32\", out)\n\t}\n\n\tt.Log(\"Version --full\")\n\t{\n\t\tout, err := command.RunCommandAndReturnCombinedStdoutAndStderr(binPath(), \"version\", \"--full\")\n\t\trequire.NoError(t, err, out)\n\n\t\texpectedOSVersion := fmt.Sprintf(\"%s (%s)\", runtime.GOOS, runtime.GOARCH)\n\t\texpectedVersionOut := fmt.Sprintf(`version: 0.9.32\nos: %s\ngo: %s\nbuild_number: \ncommit:`, expectedOSVersion, runtime.Version())\n\n\t\trequire.Equal(t, expectedVersionOut, out)\n\t}\n}\n<commit_msg>version test fix<commit_after>package integration\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"github.com\/bitrise-io\/go-utils\/command\"\n\t\"github.com\/stretchr\/testify\/require\"\n)\n\nfunc Test_VersionOutput(t *testing.T) {\n\tt.Log(\"Version\")\n\t{\n\t\tout, err := command.RunCommandAndReturnCombinedStdoutAndStderr(binPath(), \"version\")\n\t\trequire.NoError(t, err, out)\n\t\trequire.Equal(t, \"0.9.33\", out)\n\t}\n\n\tt.Log(\"Version --full\")\n\t{\n\t\tout, err := command.RunCommandAndReturnCombinedStdoutAndStderr(binPath(), \"version\", \"--full\")\n\t\trequire.NoError(t, err, out)\n\n\t\texpectedOSVersion := fmt.Sprintf(\"%s (%s)\", runtime.GOOS, runtime.GOARCH)\n\t\texpectedVersionOut := fmt.Sprintf(`version: 0.9.33\nos: %s\ngo: %s\nbuild_number: \ncommit:`, expectedOSVersion, runtime.Version())\n\n\t\trequire.Equal(t, expectedVersionOut, out)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package logfanout\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\"\n\t\"sync\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/gorilla\/websocket\"\n)\n\ntype LogDB interface {\n\tBuildLog(build int) ([]byte, error)\n\tAppendBuildLog(build int, log []byte) error\n}\n\ntype LogFanout struct {\n\tbuild int\n\n\tdb LogDB\n\n\tlock *sync.Mutex\n\n\tsinks []*websocket.Conn\n\n\tclosed bool\n\twaitForClosed chan struct{}\n}\n\nfunc NewLogFanout(build int, db LogDB) *LogFanout {\n\treturn &LogFanout{\n\t\tbuild: build,\n\t\tdb: db,\n\n\t\tlock: new(sync.Mutex),\n\t\twaitForClosed: make(chan struct{}),\n\t}\n}\n\nfunc (fanout *LogFanout) WriteMessage(msg *json.RawMessage) error {\n\tfanout.lock.Lock()\n\tdefer fanout.lock.Unlock()\n\n\terr := fanout.db.AppendBuildLog(fanout.build, []byte(*msg))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewSinks := []*websocket.Conn{}\n\tfor _, sink := range fanout.sinks {\n\t\terr := sink.WriteJSON(msg)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tnewSinks = append(newSinks, sink)\n\t}\n\n\tfanout.sinks = newSinks\n\n\treturn nil\n}\n\nfunc (fanout *LogFanout) Attach(sink *websocket.Conn) error {\n\tfanout.lock.Lock()\n\n\tbuildLog, err := fanout.db.BuildLog(fanout.build)\n\tif err == nil {\n\t\tdecoder := json.NewDecoder(bytes.NewBuffer(buildLog))\n\n\t\tfor {\n\t\t\tvar msg *json.RawMessage\n\t\t\terr := decoder.Decode(&msg)\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\tfanout.emitBackwardsCompatible(sink, buildLog)\n\t\t\t\t}\n\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\terr = sink.WriteJSON(msg)\n\t\t\tif err != nil {\n\t\t\t\tfanout.lock.Unlock()\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif fanout.closed {\n\t\tsink.Close()\n\t} else {\n\t\tfanout.sinks = append(fanout.sinks, sink)\n\t}\n\n\tfanout.lock.Unlock()\n\n\treturn nil\n}\n\nfunc (fanout *LogFanout) Close() error {\n\tfanout.lock.Lock()\n\tdefer fanout.lock.Unlock()\n\n\tif fanout.closed {\n\t\treturn errors.New(\"close twice\")\n\t}\n\n\tfor _, sink := range fanout.sinks {\n\t\tsink.Close()\n\t}\n\n\tfanout.closed = true\n\tfanout.sinks = nil\n\n\tclose(fanout.waitForClosed)\n\n\treturn nil\n}\n\nfunc (fanout *LogFanout) emitBackwardsCompatible(sink *websocket.Conn, log []byte) {\n\terr := sink.WriteMessage(websocket.TextMessage, []byte(`{\"version\":\"0.0\"}`))\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar dangling []byte\n\tfor i := 0; i < len(log); i += 1024 {\n\t\tend := i + 1024\n\t\tif end > len(log) {\n\t\t\tend = len(log)\n\t\t}\n\n\t\ttext := append(dangling, log[i:end]...)\n\n\t\tcheckEncoding, _ := utf8.DecodeLastRune(text)\n\t\tif checkEncoding == utf8.RuneError {\n\t\t\tdangling = text\n\t\t\tcontinue\n\t\t}\n\n\t\terr := sink.WriteMessage(websocket.TextMessage, text)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tdangling = nil\n\t}\n}\n<commit_msg>remove unused channel<commit_after>package logfanout\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"errors\"\n\t\"io\"\n\t\"sync\"\n\t\"unicode\/utf8\"\n\n\t\"github.com\/gorilla\/websocket\"\n)\n\ntype LogDB interface {\n\tBuildLog(build int) ([]byte, error)\n\tAppendBuildLog(build int, log []byte) error\n}\n\ntype LogFanout struct {\n\tbuild int\n\n\tdb LogDB\n\n\tlock *sync.Mutex\n\n\tsinks []*websocket.Conn\n\n\tclosed bool\n}\n\nfunc NewLogFanout(build int, db LogDB) *LogFanout {\n\treturn &LogFanout{\n\t\tbuild: build,\n\t\tdb: db,\n\n\t\tlock: new(sync.Mutex),\n\t}\n}\n\nfunc (fanout *LogFanout) WriteMessage(msg *json.RawMessage) error {\n\tfanout.lock.Lock()\n\tdefer fanout.lock.Unlock()\n\n\terr := fanout.db.AppendBuildLog(fanout.build, []byte(*msg))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewSinks := []*websocket.Conn{}\n\tfor _, sink := range fanout.sinks {\n\t\terr := sink.WriteJSON(msg)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tnewSinks = append(newSinks, sink)\n\t}\n\n\tfanout.sinks = newSinks\n\n\treturn nil\n}\n\nfunc (fanout *LogFanout) Attach(sink *websocket.Conn) error {\n\tfanout.lock.Lock()\n\n\tbuildLog, err := fanout.db.BuildLog(fanout.build)\n\tif err == nil {\n\t\tdecoder := json.NewDecoder(bytes.NewBuffer(buildLog))\n\n\t\tfor {\n\t\t\tvar msg *json.RawMessage\n\t\t\terr := decoder.Decode(&msg)\n\t\t\tif err != nil {\n\t\t\t\tif err != io.EOF {\n\t\t\t\t\tfanout.emitBackwardsCompatible(sink, buildLog)\n\t\t\t\t}\n\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\terr = sink.WriteJSON(msg)\n\t\t\tif err != nil {\n\t\t\t\tfanout.lock.Unlock()\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif fanout.closed {\n\t\tsink.Close()\n\t} else {\n\t\tfanout.sinks = append(fanout.sinks, sink)\n\t}\n\n\tfanout.lock.Unlock()\n\n\treturn nil\n}\n\nfunc (fanout *LogFanout) Close() error {\n\tfanout.lock.Lock()\n\tdefer fanout.lock.Unlock()\n\n\tif fanout.closed {\n\t\treturn errors.New(\"close twice\")\n\t}\n\n\tfor _, sink := range fanout.sinks {\n\t\tsink.Close()\n\t}\n\n\tfanout.closed = true\n\tfanout.sinks = nil\n\n\treturn nil\n}\n\nfunc (fanout *LogFanout) emitBackwardsCompatible(sink *websocket.Conn, log []byte) {\n\terr := sink.WriteMessage(websocket.TextMessage, []byte(`{\"version\":\"0.0\"}`))\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar dangling []byte\n\tfor i := 0; i < len(log); i += 1024 {\n\t\tend := i + 1024\n\t\tif end > len(log) {\n\t\t\tend = len(log)\n\t\t}\n\n\t\ttext := append(dangling, log[i:end]...)\n\n\t\tcheckEncoding, _ := utf8.DecodeLastRune(text)\n\t\tif checkEncoding == utf8.RuneError {\n\t\t\tdangling = text\n\t\t\tcontinue\n\t\t}\n\n\t\terr := sink.WriteMessage(websocket.TextMessage, text)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tdangling = nil\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\nvar input = \"((((()(()(((((((()))(((()((((()())(())()(((()((((((()((()(()(((()(()((())))()((()()())))))))))()((((((())((()))(((((()(((((((((()()))((()(())()((())((()(()))((()))()))()(((((()(((()()))()())((()((((())()())()((((())()(()(()(((()(())(()(())(((((((())()()(((())(()(()(()(())))(()((((())((()))(((()(()()(((((()()(()(((()(((((())()))()((()(()))()((()((((())((((())(()(((())()()(()()()()()(())((((())((())(()()))()((((())))((((()())()((((())((()())((())(())(((((()((((()(((()((((())(()(((()()))()))((((((()((())()())))(((()(()))(()()(()(((()(()))((()()()())((()()()(((())())()())())())((()))(()(()))(((((()(()(())((()(())(())()((((()())()))((((())(())((())())((((()(((())(())((()()((((()((((((()(())()()(()(()()((((()))(())()())()))(())))(())))())()()(())(()))()((()(()(())()()))(()())))))(()))(()()))(())(((((()(()(()()((())()())))))((())())((())(()(())((()))(())(((()((((((((()()()(()))()()(((()))()((()()(())(())())()(()(())))(((((()(())(())(()))))())()))(()))()(()(((((((()((((())))())())())())()((((((((((((((()()((((((()()()())())()())())())(())(())))())((()())((()(()))))))()))))))))))))))))())((())((())()()))))))(((()((()(()()))((())(()()))()()())))(())))))))(()(((())))())()())))()()(())()))()(()))())((()()))))(()))))()))(()()(())))))))()(((()))))()(()))(())())))))()))((()))((()))())(())))))))))((((())()))()))()))())(())()()(())))())))(()())()))((()()(())))(())((((((()(())((()(((()(()()(())))()))))))()))()(()((()))()(()))(()(((())((((())())(())(()))))))))())))))))())())))))())))))()()(((())()(()))))))))())))))(())()()()))()))()))(()(())()()())())))))))())()(()(()))))()()()))))())(()))))()()))))()())))))(((())()()))(()))))))))))()()))))()()()))))(()())())()()())()(()))))()(()))(())))))))(((((())(())())()()))()()))(())))))()(()))))(())(()()))()())()))()))()))()))))())()()))())())))(()))(()))))))())()(((())()))))))))()))()())))())))())))()))))))))))()()))(()()))))))(())()(()))))())(()))))(()))))(()())))))())())()()))))())()))))))))(()))))()))))))()(()())))))))()))())))())))())))())))))))())(()()))))))(()())())))()())()))))))))))))))())))()(())))()))())()()(())(()()))(())))())()())(()(()(()))))())))))))))))())(()))()))()))))(())()())()())))))))))))()()))))))))))))())())))))(()())))))))))))())(())))()))))))))())())(()))()))(())))()))()()(())()))))))()((((())()))())())))))()))()))))((()())()))))())))(())))))))))))))))))()))))()()())()))()()))))())()))((()())))())))(()))(()())))))))()))()))))(())))))))(())))))())()()(()))())()))()()))))())()()))))())()))())))))))(()))))()())()))))))))(()))())))(()))()))))(())()))())())(())())())))))))((((())))))()))()))()())()(())))()))()))()())(()())()()(()())()))))())())))))(()))()))))())(()()(())))))(())()()((())())))))(())(())))))))())))))))))()(())))))))()())())())()(()))))))))(()))))))))())()()))()(()))))))()))))))())))))))(())))()()(())()())))))(((())))()((())()))())))(()()))())(())())))()(((()())))))()(()()())))()()(()()(()()))())()(()()()))())()()))()())(()))))())))))())))(())()()))))(()))))(())(()))(())))))()()))()))))())()))()()(())())))((()))())()))))))()()))))((()(()))))()()))))))())))))())(()((()())))))))))))()())())))()))(()))))))(()))(())()())))(()))))))))())()()()()))))(()())))))))((())))()))(()))(())(())()())()))))))))(())))())))(()))()()))(()()))(()))())))()(())))())((()((()(())))((())))()))))((((())())()())))(())))()))))))())(()()((())))())()(()())))))(()())()))())))))))((())())))))))(()(()))())()()(()()(((()(((()())))))()))))))()(())(()()((()()(())()()))())()())()))()())())())))))))(((())))))))()()))))))(((())()))(()()))(()()))))(()(()()((((())()())((()()))))(()(())))))()((()()()())()()((()((()()))(()))(((()()()))(((())))()(((())()))))))((()(())())))(()())(((((()(()))(()((()))(()())()))))(()(()))()(()))(())(((())(()()))))()()))(((()))))(()()()()))())))((()()()(())()))()))))()()))()))))))((((((()()()))))())((()()(((()))))(()(())(()()())())())))()(((()()))(())((())))(()))(()()()())((())())())(()))))()))()((()(())()(()()(())(()))(())()))(())(()))))(())(())())(()()(()((()()((())))((()))()((())))(((()()()()((((()))(()()))()()()(((())((())())(()()(()()()))()((())(())()))())(((()()(())))()((()()())()())(()(())())(((())(())())((())(())()(((()()))(())))((())(()())())(())((()()()((((((())))((()(((((())()))()))(())(()()))()))(())()()))(())((()()())()()(()))())()((())))()((()()())((((()())((())())())((()((()))()))((())((()()(()((()()(((())(()()))))((()((())()(((())(()((())())((())(()((((((())())()(()())()(())(((())((((((()(())(()((()()()((()()(()()()())))()()(((((()()))()((((((()))()(()(()(()(((()())((()))())()((()))(())))()))()()))())()()))())((((())(()(()))(((((((())(((()(((((()(((()()((((())(((())())))(()()()(()(()))()))((((((()))((()(((()(())((()((((()((((((())(((((())))(((()(()))))(((()(((())()((())(()((()))(((()()(((())((((()(()(((((()))(((()(((((((()(()()()(()(()(()()())(())(((((()(())())()())(()(()(()))()(()()()())(()()(()((()))()((())())()(()))((())(()))()(()))()(((()(()(()((((((()()()()())()(((((()()(((()()()((()(((((()))((((((((()()()(((((()))))))(()()()(())(()))(()()))))(())()))(((((()(((((()()(()(()())(((()))((((()((()(()(()((()(()((())))()(((()((()))((()))(((((((((()((()((()(())))()((((()((()()))((())(((()(((((()()(()(()()((()(()()()(((((((())())()())))))((((()()(()))()))(()((())()(()(((((((((()()(((()(()())(()((()())((())())((((()(((()(((()((((()((()((((()(()((((((())((((((((((((()()(()()((((((((((((((()((()()))()((((((((((((())((((()(()())((()(()(()))()(((((()()(((()()))()())(())((()(((((()((())(((((()((()(((((()))()()((((())()((((())(((((((((()(())(()(())))())(()((())(((())(())(())())(()(()(())()()((()((())()(((()(((((()(())))()(((()((())))((()()()(((()(((()((()(()(())(()((()())(()(()(((()(((((((((())(()((((()()))(()((((()()()()(((()((((((((()(()()((((((()(()()(()((()((((((((((()()(((((((()())(())))(((()()))(((((()((()()())(()()((((())((()((((()))))(())((()(()()(((()(()(((()((((()(((((()))())())(()((())()))(((()())((())((())((((()((()((((((())(()((((()()))((((((())()(()))((()(((())((((((((((()()(((((()(((((()((()()()((((())))(()))()((()(())()()((()((((((((((()((())(())(((((()(()(()()))((((()((((()()((()(((()(((((((((()(()((()((()))((((((()(((())()()((()(((((((()())))()()(()((()((()()(((()(()()()()((((()((())((((()(((((((((()(((()()(((()(()(((()(((()((())()(()((()(()(()(()))()(((()))(()((((()((())((((())((((((())(()))(()((((())((()(()((((((((()()((((((()(()(()()()(())((()((()()(((()(((((((()()((()(((((((()))(((((()(((()(()()()(()(((()((()()((())(()(((((((((()(()((()((((((()()((())()))(((((()((())()())()(((((((((((()))((((()()()()())(()()(()(()()))()))(()))(()(((()()))())(()(()))()()((())(()())()())()(()))()))(()()(()((((((())((()(((((((((((()(())()((()(()((()((()(()((()((((((((((()()())((())()(())))((())()())()(((((()(()())((((()((()(())(()))(((())()((()))(((((())(()))()()(()))(((())((((()((((()(())))(((((((()))))())()())(())((())()(()()((()(()))()(()()(()()((()())((())((()()))((((()))()()))(()()(())()()(((((()(())((()((((()))()))(()())())(((()()(()()))(())))))(()))((())(((((()((((()))()((((()))()((())(((())))(((()())))((()(()()((\"\n\nfunc finalFloor(i string) int {\n\tu := strings.Count(i, \"(\")\n\td := strings.Count(i, \")\")\n\treturn u - d\n}\n\nfunc firstBasementPos(i string) int {\n\tpos, floor := 0, 0\n\tfor i, c := range input {\n\t\tif string(c) == \"(\" {\n\t\t\tfloor++\n\t\t} else if string(c) == \")\" {\n\t\t\tfloor--\n\t\t}\n\n\t\tif floor == -1 && pos == 0 {\n\t\t\tpos = i + 1\n\t\t}\n\t}\n\treturn pos\n}\n\nfunc main() {\n\tfmt.Println(\"Part One:\", finalFloor(input))\n\tfmt.Println(\"Part Two:\", firstBasementPos(input))\n}\n<commit_msg>Return early for performance boost and catch error<commit_after>package main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n)\n\nvar input = \"((((()(()(((((((()))(((()((((()())(())()(((()((((((()((()(()(((()(()((())))()((()()())))))))))()((((((())((()))(((((()(((((((((()()))((()(())()((())((()(()))((()))()))()(((((()(((()()))()())((()((((())()())()((((())()(()(()(((()(())(()(())(((((((())()()(((())(()(()(()(())))(()((((())((()))(((()(()()(((((()()(()(((()(((((())()))()((()(()))()((()((((())((((())(()(((())()()(()()()()()(())((((())((())(()()))()((((())))((((()())()((((())((()())((())(())(((((()((((()(((()((((())(()(((()()))()))((((((()((())()())))(((()(()))(()()(()(((()(()))((()()()())((()()()(((())())()())())())((()))(()(()))(((((()(()(())((()(())(())()((((()())()))((((())(())((())())((((()(((())(())((()()((((()((((((()(())()()(()(()()((((()))(())()())()))(())))(())))())()()(())(()))()((()(()(())()()))(()())))))(()))(()()))(())(((((()(()(()()((())()())))))((())())((())(()(())((()))(())(((()((((((((()()()(()))()()(((()))()((()()(())(())())()(()(())))(((((()(())(())(()))))())()))(()))()(()(((((((()((((())))())())())())()((((((((((((((()()((((((()()()())())()())())())(())(())))())((()())((()(()))))))()))))))))))))))))())((())((())()()))))))(((()((()(()()))((())(()()))()()())))(())))))))(()(((())))())()())))()()(())()))()(()))())((()()))))(()))))()))(()()(())))))))()(((()))))()(()))(())())))))()))((()))((()))())(())))))))))((((())()))()))()))())(())()()(())))())))(()())()))((()()(())))(())((((((()(())((()(((()(()()(())))()))))))()))()(()((()))()(()))(()(((())((((())())(())(()))))))))())))))))())())))))())))))()()(((())()(()))))))))())))))(())()()()))()))()))(()(())()()())())))))))())()(()(()))))()()()))))())(()))))()()))))()())))))(((())()()))(()))))))))))()()))))()()()))))(()())())()()())()(()))))()(()))(())))))))(((((())(())())()()))()()))(())))))()(()))))(())(()()))()())()))()))()))()))))())()()))())())))(()))(()))))))())()(((())()))))))))()))()())))())))())))()))))))))))()()))(()()))))))(())()(()))))())(()))))(()))))(()())))))())())()()))))())()))))))))(()))))()))))))()(()())))))))()))())))())))())))())))))))())(()()))))))(()())())))()())()))))))))))))))())))()(())))()))())()()(())(()()))(())))())()())(()(()(()))))())))))))))))())(()))()))()))))(())()())()())))))))))))()()))))))))))))())())))))(()())))))))))))())(())))()))))))))())())(()))()))(())))()))()()(())()))))))()((((())()))())())))))()))()))))((()())()))))())))(())))))))))))))))))()))))()()())()))()()))))())()))((()())))())))(()))(()())))))))()))()))))(())))))))(())))))())()()(()))())()))()()))))())()()))))())()))())))))))(()))))()())()))))))))(()))())))(()))()))))(())()))())())(())())())))))))((((())))))()))()))()())()(())))()))()))()())(()())()()(()())()))))())())))))(()))()))))())(()()(())))))(())()()((())())))))(())(())))))))())))))))))()(())))))))()())())())()(()))))))))(()))))))))())()()))()(()))))))()))))))())))))))(())))()()(())()())))))(((())))()((())()))())))(()()))())(())())))()(((()())))))()(()()())))()()(()()(()()))())()(()()()))())()()))()())(()))))())))))())))(())()()))))(()))))(())(()))(())))))()()))()))))())()))()()(())())))((()))())()))))))()()))))((()(()))))()()))))))())))))())(()((()())))))))))))()())())))()))(()))))))(()))(())()())))(()))))))))())()()()()))))(()())))))))((())))()))(()))(())(())()())()))))))))(())))())))(()))()()))(()()))(()))())))()(())))())((()((()(())))((())))()))))((((())())()())))(())))()))))))())(()()((())))())()(()())))))(()())()))())))))))((())())))))))(()(()))())()()(()()(((()(((()())))))()))))))()(())(()()((()()(())()()))())()())()))()())())())))))))(((())))))))()()))))))(((())()))(()()))(()()))))(()(()()((((())()())((()()))))(()(())))))()((()()()())()()((()((()()))(()))(((()()()))(((())))()(((())()))))))((()(())())))(()())(((((()(()))(()((()))(()())()))))(()(()))()(()))(())(((())(()()))))()()))(((()))))(()()()()))())))((()()()(())()))()))))()()))()))))))((((((()()()))))())((()()(((()))))(()(())(()()())())())))()(((()()))(())((())))(()))(()()()())((())())())(()))))()))()((()(())()(()()(())(()))(())()))(())(()))))(())(())())(()()(()((()()((())))((()))()((())))(((()()()()((((()))(()()))()()()(((())((())())(()()(()()()))()((())(())()))())(((()()(())))()((()()())()())(()(())())(((())(())())((())(())()(((()()))(())))((())(()())())(())((()()()((((((())))((()(((((())()))()))(())(()()))()))(())()()))(())((()()())()()(()))())()((())))()((()()())((((()())((())())())((()((()))()))((())((()()(()((()()(((())(()()))))((()((())()(((())(()((())())((())(()((((((())())()(()())()(())(((())((((((()(())(()((()()()((()()(()()()())))()()(((((()()))()((((((()))()(()(()(()(((()())((()))())()((()))(())))()))()()))())()()))())((((())(()(()))(((((((())(((()(((((()(((()()((((())(((())())))(()()()(()(()))()))((((((()))((()(((()(())((()((((()((((((())(((((())))(((()(()))))(((()(((())()((())(()((()))(((()()(((())((((()(()(((((()))(((()(((((((()(()()()(()(()(()()())(())(((((()(())())()())(()(()(()))()(()()()())(()()(()((()))()((())())()(()))((())(()))()(()))()(((()(()(()((((((()()()()())()(((((()()(((()()()((()(((((()))((((((((()()()(((((()))))))(()()()(())(()))(()()))))(())()))(((((()(((((()()(()(()())(((()))((((()((()(()(()((()(()((())))()(((()((()))((()))(((((((((()((()((()(())))()((((()((()()))((())(((()(((((()()(()(()()((()(()()()(((((((())())()())))))((((()()(()))()))(()((())()(()(((((((((()()(((()(()())(()((()())((())())((((()(((()(((()((((()((()((((()(()((((((())((((((((((((()()(()()((((((((((((((()((()()))()((((((((((((())((((()(()())((()(()(()))()(((((()()(((()()))()())(())((()(((((()((())(((((()((()(((((()))()()((((())()((((())(((((((((()(())(()(())))())(()((())(((())(())(())())(()(()(())()()((()((())()(((()(((((()(())))()(((()((())))((()()()(((()(((()((()(()(())(()((()())(()(()(((()(((((((((())(()((((()()))(()((((()()()()(((()((((((((()(()()((((((()(()()(()((()((((((((((()()(((((((()())(())))(((()()))(((((()((()()())(()()((((())((()((((()))))(())((()(()()(((()(()(((()((((()(((((()))())())(()((())()))(((()())((())((())((((()((()((((((())(()((((()()))((((((())()(()))((()(((())((((((((((()()(((((()(((((()((()()()((((())))(()))()((()(())()()((()((((((((((()((())(())(((((()(()(()()))((((()((((()()((()(((()(((((((((()(()((()((()))((((((()(((())()()((()(((((((()())))()()(()((()((()()(((()(()()()()((((()((())((((()(((((((((()(((()()(((()(()(((()(((()((())()(()((()(()(()(()))()(((()))(()((((()((())((((())((((((())(()))(()((((())((()(()((((((((()()((((((()(()(()()()(())((()((()()(((()(((((((()()((()(((((((()))(((((()(((()(()()()(()(((()((()()((())(()(((((((((()(()((()((((((()()((())()))(((((()((())()())()(((((((((((()))((((()()()()())(()()(()(()()))()))(()))(()(((()()))())(()(()))()()((())(()())()())()(()))()))(()()(()((((((())((()(((((((((((()(())()((()(()((()((()(()((()((((((((((()()())((())()(())))((())()())()(((((()(()())((((()((()(())(()))(((())()((()))(((((())(()))()()(()))(((())((((()((((()(())))(((((((()))))())()())(())((())()(()()((()(()))()(()()(()()((()())((())((()()))((((()))()()))(()()(())()()(((((()(())((()((((()))()))(()())())(((()()(()()))(())))))(()))((())(((((()((((()))()((((()))()((())(((())))(((()())))((()(()()((\"\n\nfunc finalFloor(i string) int {\n\tu := strings.Count(i, \"(\")\n\td := strings.Count(i, \")\")\n\treturn u - d\n}\n\nfunc firstBasementPos(i string) (int, error) {\n\tfloor := 0\n\tfor i, c := range input {\n\t\tif string(c) == \"(\" {\n\t\t\tfloor++\n\t\t} else if string(c) == \")\" {\n\t\t\tfloor--\n\t\t}\n\n\t\tif floor == -1 {\n\t\t\treturn i + 1, nil\n\t\t}\n\t}\n\treturn 0, errors.New(\"Basement never reached!\")\n}\n\nfunc main() {\n\tfmt.Println(\"Part One:\", finalFloor(input))\n\n\tpos, err := firstBasementPos(input)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t} else {\n\t\tfmt.Println(\"Part Two:\", pos)\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright (c) 2016 Tigera, Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage etcd\n\nimport (\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/coreos\/etcd\/client\"\n\tetcd \"github.com\/coreos\/etcd\/client\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/backend\/api\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/backend\/model\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/hwm\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nfunc newSyncer(keysAPI etcd.KeysAPI, callbacks api.SyncerCallbacks) *etcdSyncer {\n\treturn &etcdSyncer{\n\t\tkeysAPI: keysAPI,\n\t\tcallbacks: callbacks,\n\t}\n}\n\ntype etcdSyncer struct {\n\tcallbacks api.SyncerCallbacks\n\tkeysAPI etcd.KeysAPI\n\tOneShot bool\n}\n\nfunc (syn *etcdSyncer) Start() {\n\t\/\/ Start a background thread to read events from etcd. It will\n\t\/\/ queue events onto the etcdEvents channel. If it drops out of sync,\n\t\/\/ it will signal on the resyncIndex channel.\n\tlog.Info(\"Starting etcd Syncer\")\n\tetcdEvents := make(chan event, 20000)\n\ttriggerResync := make(chan uint64, 5)\n\tinitialSnapshotIndex := make(chan uint64)\n\tif !syn.OneShot {\n\t\tlog.Info(\"Syncer not in one-shot mode, starting watcher thread\")\n\t\tgo syn.watchEtcd(etcdEvents, triggerResync, initialSnapshotIndex)\n\t}\n\n\t\/\/ Start a background thread to read snapshots from etcd. It will\n\t\/\/ read a start-of-day snapshot and then wait to be signalled on the\n\t\/\/ resyncIndex channel.\n\tsnapshotUpdates := make(chan event)\n\tgo syn.readSnapshotsFromEtcd(snapshotUpdates, triggerResync, initialSnapshotIndex)\n\tgo syn.mergeUpdates(snapshotUpdates, etcdEvents)\n}\n\nconst (\n\tactionSet uint8 = iota\n\tactionDel\n\tactionSnapFinished\n)\n\n\/\/ TODO Split this into different types of struct and use a type-switch to unpack.\ntype event struct {\n\taction uint8\n\tmodifiedIndex uint64\n\tsnapshotIndex uint64\n\tkey string\n\tvalue string\n\tsnapshotStarting bool\n\tsnapshotFinished bool\n}\n\nfunc (syn *etcdSyncer) readSnapshotsFromEtcd(snapshotUpdates chan<- event, triggerResync <-chan uint64, initialSnapshotIndex chan<- uint64) {\n\tlog.Info(\"Syncer snapshot-reading thread started\")\n\tgetOpts := client.GetOptions{\n\t\tRecursive: true,\n\t\tSort: false,\n\t\tQuorum: false,\n\t}\n\tvar highestSnapshotIndex uint64\n\tvar minIndex uint64\n\n\tfor {\n\t\tif highestSnapshotIndex > 0 {\n\t\t\t\/\/ Wait for the watcher thread to tell us what index\n\t\t\t\/\/ it starts from. We need to load a snapshot with\n\t\t\t\/\/ an equal or later index, otherwise we could miss\n\t\t\t\/\/ some updates. (Since we may connect to a follower\n\t\t\t\/\/ server, it's possible, if unlikely, for us to read\n\t\t\t\/\/ a stale snapshot.)\n\t\t\tminIndex = <-triggerResync\n\t\t\tlog.Infof(\"Asked for snapshot > %v; last snapshot was %v\",\n\t\t\t\tminIndex, highestSnapshotIndex)\n\t\t\tif highestSnapshotIndex >= minIndex {\n\t\t\t\t\/\/ We've already read a newer snapshot, no\n\t\t\t\t\/\/ need to re-read.\n\t\t\t\tlog.Info(\"Snapshot already new enough\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\treadRetryLoop:\n\t\tfor {\n\t\t\tresp, err := syn.keysAPI.Get(context.Background(),\n\t\t\t\t\"\/calico\/v1\", &getOpts)\n\t\t\tif err != nil {\n\t\t\t\tif syn.OneShot {\n\t\t\t\t\t\/\/ One-shot mode is used to grab a snapshot and then\n\t\t\t\t\t\/\/ stop. We don't want to go into a retry loop.\n\t\t\t\t\tlog.Fatal(\"Failed to read snapshot from etcd: \", err)\n\t\t\t\t}\n\t\t\t\tlog.Warning(\"Error getting snapshot, retrying...\", err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\tcontinue readRetryLoop\n\t\t\t}\n\n\t\t\tif resp.Index < minIndex {\n\t\t\t\tlog.Info(\"Retrieved stale snapshot, rereading...\")\n\t\t\t\tcontinue readRetryLoop\n\t\t\t}\n\n\t\t\t\/\/ If we get here, we should have a good\n\t\t\t\/\/ snapshot. Send it to the merge thread.\n\t\t\tsendNode(resp.Node, snapshotUpdates, resp)\n\t\t\tsnapshotUpdates <- event{\n\t\t\t\taction: actionSnapFinished,\n\t\t\t\tsnapshotIndex: resp.Index,\n\t\t\t}\n\t\t\tif resp.Index > highestSnapshotIndex {\n\t\t\t\tif highestSnapshotIndex == 0 {\n\t\t\t\t\tinitialSnapshotIndex <- resp.Index\n\t\t\t\t\tclose(initialSnapshotIndex)\n\t\t\t\t}\n\t\t\t\thighestSnapshotIndex = resp.Index\n\t\t\t}\n\t\t\tbreak readRetryLoop\n\t\t}\n\t}\n}\n\nfunc sendNode(node *client.Node, snapshotUpdates chan<- event, resp *client.Response) {\n\tif !node.Dir {\n\t\tsnapshotUpdates <- event{\n\t\t\tkey: node.Key,\n\t\t\tmodifiedIndex: node.ModifiedIndex,\n\t\t\tsnapshotIndex: resp.Index,\n\t\t\tvalue: node.Value,\n\t\t\taction: actionSet,\n\t\t}\n\t} else {\n\t\tfor _, child := range node.Nodes {\n\t\t\tsendNode(child, snapshotUpdates, resp)\n\t\t}\n\t}\n}\n\nfunc (syn *etcdSyncer) watchEtcd(etcdEvents chan<- event, triggerResync chan<- uint64, initialSnapshotIndex <-chan uint64) {\n\tstart_index := <-initialSnapshotIndex\n\n\twatcherOpts := client.WatcherOptions{\n\t\tAfterIndex: start_index + 1,\n\t\tRecursive: true,\n\t}\n\twatcher := syn.keysAPI.Watcher(\"\/calico\/v1\", &watcherOpts)\n\tinSync := true\n\tfor {\n\t\tresp, err := watcher.Next(context.Background())\n\t\tif err != nil {\n\t\t\tswitch err := err.(type) {\n\t\t\tcase client.Error:\n\t\t\t\terrCode := err.Code\n\t\t\t\tif errCode == client.ErrorCodeWatcherCleared ||\n\t\t\t\t\terrCode == client.ErrorCodeEventIndexCleared {\n\t\t\t\t\tlog.Warning(\"Lost sync with etcd, restarting watcher\")\n\t\t\t\t\twatcherOpts.AfterIndex = 0\n\t\t\t\t\twatcher = syn.keysAPI.Watcher(\"\/calico\/v1\",\n\t\t\t\t\t\t&watcherOpts)\n\t\t\t\t\tinSync = false\n\t\t\t\t\t\/\/ FIXME, we'll only trigger a resync after the next event\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\tlog.Error(\"Error from etcd\", err)\n\t\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\t}\n\t\t\tcase *client.ClusterError:\n\t\t\t\tlog.Error(\"Cluster error from etcd\", err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tdefault:\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t} else {\n\t\t\tvar actionType uint8\n\t\t\tswitch resp.Action {\n\t\t\tcase \"set\", \"compareAndSwap\", \"update\", \"create\":\n\t\t\t\tactionType = actionSet\n\t\t\tcase \"delete\", \"compareAndDelete\", \"expire\":\n\t\t\t\tactionType = actionDel\n\t\t\tdefault:\n\t\t\t\tpanic(\"Unknown action type\")\n\t\t\t}\n\n\t\t\tnode := resp.Node\n\t\t\tif node.Dir && actionType == actionSet {\n\t\t\t\t\/\/ Creation of a directory, we don't care.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !inSync {\n\t\t\t\t\/\/ Tell the snapshot thread that we need a\n\t\t\t\t\/\/ new snapshot. The snapshot needs to be\n\t\t\t\t\/\/ from our index or one lower.\n\t\t\t\tsnapIdx := node.ModifiedIndex - 1\n\t\t\t\tlog.Infof(\"Asking for snapshot @ %v\",\n\t\t\t\t\tsnapIdx)\n\t\t\t\ttriggerResync <- snapIdx\n\t\t\t\tinSync = true\n\t\t\t}\n\t\t\tetcdEvents <- event{\n\t\t\t\taction: actionType,\n\t\t\t\tmodifiedIndex: node.ModifiedIndex,\n\t\t\t\tkey: resp.Node.Key,\n\t\t\t\tvalue: node.Value,\n\t\t\t\tsnapshotStarting: !inSync,\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (syn *etcdSyncer) mergeUpdates(snapshotUpdates <-chan event, watcherUpdates <-chan event) {\n\tvar e event\n\tvar minSnapshotIndex uint64\n\thwms := hwm.NewHighWatermarkTracker()\n\n\tsyn.callbacks.OnStatusUpdated(api.WaitForDatastore)\n\tfor {\n\t\tselect {\n\t\tcase e = <-snapshotUpdates:\n\t\t\tlog.Debugf(\"Snapshot update %v @ %v\", e.key, e.modifiedIndex)\n\t\tcase e = <-watcherUpdates:\n\t\t\tlog.Debugf(\"Watcher update %v @ %v\", e.key, e.modifiedIndex)\n\t\t}\n\t\tif e.snapshotStarting {\n\t\t\t\/\/ Watcher lost sync, need to track deletions until\n\t\t\t\/\/ we get a snapshot from after this index.\n\t\t\tlog.Infof(\"Watcher out-of-sync, starting to track deletions\")\n\t\t\tminSnapshotIndex = e.modifiedIndex\n\t\t\tsyn.callbacks.OnStatusUpdated(api.ResyncInProgress)\n\t\t}\n\t\tswitch e.action {\n\t\tcase actionSet:\n\t\t\tvar indexToStore uint64\n\t\t\tif e.snapshotIndex != 0 {\n\t\t\t\t\/\/ Store the snapshot index in the trie so that\n\t\t\t\t\/\/ we can scan the trie later looking for\n\t\t\t\t\/\/ prefixes that are older than the snapshot\n\t\t\t\t\/\/ (and hence must have been deleted while\n\t\t\t\t\/\/ we were out-of-sync).\n\t\t\t\tindexToStore = e.snapshotIndex\n\t\t\t} else {\n\t\t\t\tindexToStore = e.modifiedIndex\n\t\t\t}\n\t\t\toldIdx := hwms.StoreUpdate(e.key, indexToStore)\n\t\t\t\/\/log.Infof(\"%v update %v -> %v\",\n\t\t\t\/\/\te.key, oldIdx, e.modifiedIndex)\n\t\t\tif oldIdx < e.modifiedIndex {\n\t\t\t\t\/\/ Event is newer than value for that key.\n\t\t\t\t\/\/ Send the update to Felix.\n\t\t\t\tsyn.sendUpdate(e.key, &e.value, e.modifiedIndex)\n\t\t\t}\n\t\tcase actionDel:\n\t\t\tdeletedKeys := hwms.StoreDeletion(e.key,\n\t\t\t\te.modifiedIndex)\n\t\t\tlog.Debugf(\"Prefix %v deleted; %v keys\",\n\t\t\t\te.key, len(deletedKeys))\n\t\t\tsyn.sendDeletions(deletedKeys, e.modifiedIndex)\n\t\tcase actionSnapFinished:\n\t\t\tif e.snapshotIndex >= minSnapshotIndex {\n\t\t\t\t\/\/ Now in sync.\n\t\t\t\thwms.StopTrackingDeletions()\n\t\t\t\tdeletedKeys := hwms.DeleteOldKeys(e.snapshotIndex)\n\t\t\t\tlog.Infof(\"Snapshot finished at index %v; \"+\n\t\t\t\t\t\"%v keys deleted in cleanup.\",\n\t\t\t\t\te.snapshotIndex, len(deletedKeys))\n\t\t\t\tsyn.sendDeletions(deletedKeys, e.snapshotIndex)\n\t\t\t}\n\t\t\tsyn.callbacks.OnStatusUpdated(api.InSync)\n\t\t}\n\t}\n}\n\nfunc (syn *etcdSyncer) sendUpdate(key string, value *string, revision uint64) {\n\tlog.Debugf(\"Parsing etcd key %#v\", key)\n\tparsedKey := model.KeyFromDefaultPath(key)\n\tif parsedKey == nil {\n\t\tlog.Debugf(\"Failed to parse key %v\", key)\n\t\tif cb, ok := syn.callbacks.(api.SyncerParseFailCallbacks); ok {\n\t\t\tcb.ParseFailed(key, value)\n\t\t}\n\t\treturn\n\t}\n\tlog.Debugf(\"Parsed etcd key: %v\", parsedKey)\n\n\tvar parsedValue interface{}\n\tvar err error\n\tif value != nil {\n\t\tparsedValue, err = model.ParseValue(parsedKey, []byte(*value))\n\t\tif err != nil {\n\t\t\tlog.Warningf(\"Failed to parse value for %v: %#v\", key, *value)\n\t\t}\n\t\tlog.Debugf(\"Parsed value: %#v\", parsedValue)\n\t}\n\tupdates := []model.KVPair{\n\t\t{Key: parsedKey, Value: parsedValue, Revision: revision},\n\t}\n\tsyn.callbacks.OnUpdates(updates)\n}\n\nfunc (syn *etcdSyncer) sendDeletions(deletedKeys []string, revision uint64) {\n\tupdates := make([]model.KVPair, 0, len(deletedKeys))\n\tfor _, key := range deletedKeys {\n\t\tparsedKey := model.KeyFromDefaultPath(key)\n\t\tif parsedKey == nil {\n\t\t\tlog.Debugf(\"Failed to parse key %v\", key)\n\t\t\tif cb, ok := syn.callbacks.(api.SyncerParseFailCallbacks); ok {\n\t\t\t\tcb.ParseFailed(key, nil)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tupdates = append(updates, model.KVPair{\n\t\t\tKey: parsedKey,\n\t\t\tValue: nil,\n\t\t\tRevision: revision,\n\t\t})\n\t}\n\tsyn.callbacks.OnUpdates(updates)\n}\n<commit_msg>Fix that Syncer could miss the first update.<commit_after>\/\/ Copyright (c) 2016 Tigera, Inc. All rights reserved.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage etcd\n\nimport (\n\t\"time\"\n\n\tlog \"github.com\/Sirupsen\/logrus\"\n\t\"github.com\/coreos\/etcd\/client\"\n\tetcd \"github.com\/coreos\/etcd\/client\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/backend\/api\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/backend\/model\"\n\t\"github.com\/projectcalico\/libcalico-go\/lib\/hwm\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nfunc newSyncer(keysAPI etcd.KeysAPI, callbacks api.SyncerCallbacks) *etcdSyncer {\n\treturn &etcdSyncer{\n\t\tkeysAPI: keysAPI,\n\t\tcallbacks: callbacks,\n\t}\n}\n\ntype etcdSyncer struct {\n\tcallbacks api.SyncerCallbacks\n\tkeysAPI etcd.KeysAPI\n\tOneShot bool\n}\n\nfunc (syn *etcdSyncer) Start() {\n\t\/\/ Start a background thread to read events from etcd. It will\n\t\/\/ queue events onto the etcdEvents channel. If it drops out of sync,\n\t\/\/ it will signal on the resyncIndex channel.\n\tlog.Info(\"Starting etcd Syncer\")\n\tetcdEvents := make(chan event, 20000)\n\ttriggerResync := make(chan uint64, 5)\n\tinitialSnapshotIndex := make(chan uint64)\n\tif !syn.OneShot {\n\t\tlog.Info(\"Syncer not in one-shot mode, starting watcher thread\")\n\t\tgo syn.watchEtcd(etcdEvents, triggerResync, initialSnapshotIndex)\n\t}\n\n\t\/\/ Start a background thread to read snapshots from etcd. It will\n\t\/\/ read a start-of-day snapshot and then wait to be signalled on the\n\t\/\/ resyncIndex channel.\n\tsnapshotUpdates := make(chan event)\n\tgo syn.readSnapshotsFromEtcd(snapshotUpdates, triggerResync, initialSnapshotIndex)\n\tgo syn.mergeUpdates(snapshotUpdates, etcdEvents)\n}\n\nconst (\n\tactionSet uint8 = iota\n\tactionDel\n\tactionSnapFinished\n)\n\n\/\/ TODO Split this into different types of struct and use a type-switch to unpack.\ntype event struct {\n\taction uint8\n\tmodifiedIndex uint64\n\tsnapshotIndex uint64\n\tkey string\n\tvalue string\n\tsnapshotStarting bool\n\tsnapshotFinished bool\n}\n\nfunc (syn *etcdSyncer) readSnapshotsFromEtcd(snapshotUpdates chan<- event, triggerResync <-chan uint64, initialSnapshotIndex chan<- uint64) {\n\tlog.Info(\"Syncer snapshot-reading thread started\")\n\tgetOpts := client.GetOptions{\n\t\tRecursive: true,\n\t\tSort: false,\n\t\tQuorum: false,\n\t}\n\tvar highestSnapshotIndex uint64\n\tvar minIndex uint64\n\n\tfor {\n\t\tif highestSnapshotIndex > 0 {\n\t\t\t\/\/ Wait for the watcher thread to tell us what index\n\t\t\t\/\/ it starts from. We need to load a snapshot with\n\t\t\t\/\/ an equal or later index, otherwise we could miss\n\t\t\t\/\/ some updates. (Since we may connect to a follower\n\t\t\t\/\/ server, it's possible, if unlikely, for us to read\n\t\t\t\/\/ a stale snapshot.)\n\t\t\tminIndex = <-triggerResync\n\t\t\tlog.Infof(\"Asked for snapshot > %v; last snapshot was %v\",\n\t\t\t\tminIndex, highestSnapshotIndex)\n\t\t\tif highestSnapshotIndex >= minIndex {\n\t\t\t\t\/\/ We've already read a newer snapshot, no\n\t\t\t\t\/\/ need to re-read.\n\t\t\t\tlog.Info(\"Snapshot already new enough\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\treadRetryLoop:\n\t\tfor {\n\t\t\tresp, err := syn.keysAPI.Get(context.Background(),\n\t\t\t\t\"\/calico\/v1\", &getOpts)\n\t\t\tif err != nil {\n\t\t\t\tif syn.OneShot {\n\t\t\t\t\t\/\/ One-shot mode is used to grab a snapshot and then\n\t\t\t\t\t\/\/ stop. We don't want to go into a retry loop.\n\t\t\t\t\tlog.Fatal(\"Failed to read snapshot from etcd: \", err)\n\t\t\t\t}\n\t\t\t\tlog.Warning(\"Error getting snapshot, retrying...\", err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\tcontinue readRetryLoop\n\t\t\t}\n\n\t\t\tif resp.Index < minIndex {\n\t\t\t\tlog.Info(\"Retrieved stale snapshot, rereading...\")\n\t\t\t\tcontinue readRetryLoop\n\t\t\t}\n\n\t\t\t\/\/ If we get here, we should have a good\n\t\t\t\/\/ snapshot. Send it to the merge thread.\n\t\t\tsendNode(resp.Node, snapshotUpdates, resp)\n\t\t\tsnapshotUpdates <- event{\n\t\t\t\taction: actionSnapFinished,\n\t\t\t\tsnapshotIndex: resp.Index,\n\t\t\t}\n\t\t\tif resp.Index > highestSnapshotIndex {\n\t\t\t\tif highestSnapshotIndex == 0 {\n\t\t\t\t\tinitialSnapshotIndex <- resp.Index\n\t\t\t\t\tclose(initialSnapshotIndex)\n\t\t\t\t}\n\t\t\t\thighestSnapshotIndex = resp.Index\n\t\t\t}\n\t\t\tbreak readRetryLoop\n\t\t}\n\t}\n}\n\nfunc sendNode(node *client.Node, snapshotUpdates chan<- event, resp *client.Response) {\n\tif !node.Dir {\n\t\tsnapshotUpdates <- event{\n\t\t\tkey: node.Key,\n\t\t\tmodifiedIndex: node.ModifiedIndex,\n\t\t\tsnapshotIndex: resp.Index,\n\t\t\tvalue: node.Value,\n\t\t\taction: actionSet,\n\t\t}\n\t} else {\n\t\tfor _, child := range node.Nodes {\n\t\t\tsendNode(child, snapshotUpdates, resp)\n\t\t}\n\t}\n}\n\nfunc (syn *etcdSyncer) watchEtcd(etcdEvents chan<- event, triggerResync chan<- uint64, initialSnapshotIndex <-chan uint64) {\n\tlog.Info(\"Watcher started, waiting for initial snapshot index...\")\n\tstartIndex := <-initialSnapshotIndex\n\tlog.WithField(\"index\", startIndex).Info(\"Received initial snapshot index\")\n\n\twatcherOpts := client.WatcherOptions{\n\t\tAfterIndex: startIndex,\n\t\tRecursive: true,\n\t}\n\twatcher := syn.keysAPI.Watcher(\"\/calico\/v1\", &watcherOpts)\n\tinSync := true\n\tfor {\n\t\tresp, err := watcher.Next(context.Background())\n\t\tif err != nil {\n\t\t\tswitch err := err.(type) {\n\t\t\tcase client.Error:\n\t\t\t\terrCode := err.Code\n\t\t\t\tif errCode == client.ErrorCodeWatcherCleared ||\n\t\t\t\t\terrCode == client.ErrorCodeEventIndexCleared {\n\t\t\t\t\tlog.Warning(\"Lost sync with etcd, restarting watcher\")\n\t\t\t\t\twatcherOpts.AfterIndex = 0\n\t\t\t\t\twatcher = syn.keysAPI.Watcher(\"\/calico\/v1\",\n\t\t\t\t\t\t&watcherOpts)\n\t\t\t\t\tinSync = false\n\t\t\t\t\t\/\/ FIXME, we'll only trigger a resync after the next event\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\tlog.Error(\"Error from etcd\", err)\n\t\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\t\t}\n\t\t\tcase *client.ClusterError:\n\t\t\t\tlog.Error(\"Cluster error from etcd\", err)\n\t\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tdefault:\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t} else {\n\t\t\tvar actionType uint8\n\t\t\tswitch resp.Action {\n\t\t\tcase \"set\", \"compareAndSwap\", \"update\", \"create\":\n\t\t\t\tactionType = actionSet\n\t\t\tcase \"delete\", \"compareAndDelete\", \"expire\":\n\t\t\t\tactionType = actionDel\n\t\t\tdefault:\n\t\t\t\tpanic(\"Unknown action type\")\n\t\t\t}\n\n\t\t\tnode := resp.Node\n\t\t\tif node.Dir && actionType == actionSet {\n\t\t\t\t\/\/ Creation of a directory, we don't care.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !inSync {\n\t\t\t\t\/\/ Tell the snapshot thread that we need a\n\t\t\t\t\/\/ new snapshot. The snapshot needs to be\n\t\t\t\t\/\/ from our index or one lower.\n\t\t\t\tsnapIdx := node.ModifiedIndex - 1\n\t\t\t\tlog.Infof(\"Asking for snapshot @ %v\",\n\t\t\t\t\tsnapIdx)\n\t\t\t\ttriggerResync <- snapIdx\n\t\t\t\tinSync = true\n\t\t\t}\n\t\t\tetcdEvents <- event{\n\t\t\t\taction: actionType,\n\t\t\t\tmodifiedIndex: node.ModifiedIndex,\n\t\t\t\tkey: resp.Node.Key,\n\t\t\t\tvalue: node.Value,\n\t\t\t\tsnapshotStarting: !inSync,\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (syn *etcdSyncer) mergeUpdates(snapshotUpdates <-chan event, watcherUpdates <-chan event) {\n\tvar e event\n\tvar minSnapshotIndex uint64\n\thwms := hwm.NewHighWatermarkTracker()\n\n\tsyn.callbacks.OnStatusUpdated(api.WaitForDatastore)\n\tfor {\n\t\tselect {\n\t\tcase e = <-snapshotUpdates:\n\t\t\tlog.Debugf(\"Snapshot update %v @ %v (snapshot @ %v)\", e.key, e.modifiedIndex, e.snapshotIndex)\n\t\tcase e = <-watcherUpdates:\n\t\t\tlog.Debugf(\"Watcher update %v @ %v\", e.key, e.modifiedIndex)\n\t\t}\n\t\tif e.snapshotStarting {\n\t\t\t\/\/ Watcher lost sync, need to track deletions until\n\t\t\t\/\/ we get a snapshot from after this index.\n\t\t\tlog.Infof(\"Watcher out-of-sync, starting to track deletions\")\n\t\t\tminSnapshotIndex = e.modifiedIndex\n\t\t\tsyn.callbacks.OnStatusUpdated(api.ResyncInProgress)\n\t\t}\n\t\tswitch e.action {\n\t\tcase actionSet:\n\t\t\tvar indexToStore uint64\n\t\t\tif e.snapshotIndex != 0 {\n\t\t\t\t\/\/ Store the snapshot index in the trie so that\n\t\t\t\t\/\/ we can scan the trie later looking for\n\t\t\t\t\/\/ prefixes that are older than the snapshot\n\t\t\t\t\/\/ (and hence must have been deleted while\n\t\t\t\t\/\/ we were out-of-sync).\n\t\t\t\tindexToStore = e.snapshotIndex\n\t\t\t} else {\n\t\t\t\tindexToStore = e.modifiedIndex\n\t\t\t}\n\t\t\toldIdx := hwms.StoreUpdate(e.key, indexToStore)\n\t\t\t\/\/log.Infof(\"%v update %v -> %v\",\n\t\t\t\/\/\te.key, oldIdx, e.modifiedIndex)\n\t\t\tif oldIdx < e.modifiedIndex {\n\t\t\t\t\/\/ Event is newer than value for that key.\n\t\t\t\t\/\/ Send the update to Felix.\n\t\t\t\tsyn.sendUpdate(e.key, &e.value, e.modifiedIndex)\n\t\t\t}\n\t\tcase actionDel:\n\t\t\tdeletedKeys := hwms.StoreDeletion(e.key,\n\t\t\t\te.modifiedIndex)\n\t\t\tlog.Debugf(\"Prefix %v deleted; %v keys\",\n\t\t\t\te.key, len(deletedKeys))\n\t\t\tsyn.sendDeletions(deletedKeys, e.modifiedIndex)\n\t\tcase actionSnapFinished:\n\t\t\tif e.snapshotIndex >= minSnapshotIndex {\n\t\t\t\t\/\/ Now in sync.\n\t\t\t\thwms.StopTrackingDeletions()\n\t\t\t\tdeletedKeys := hwms.DeleteOldKeys(e.snapshotIndex)\n\t\t\t\tlog.Infof(\"Snapshot finished at index %v; \"+\n\t\t\t\t\t\"%v keys deleted in cleanup.\",\n\t\t\t\t\te.snapshotIndex, len(deletedKeys))\n\t\t\t\tsyn.sendDeletions(deletedKeys, e.snapshotIndex)\n\t\t\t}\n\t\t\tsyn.callbacks.OnStatusUpdated(api.InSync)\n\t\t}\n\t}\n}\n\nfunc (syn *etcdSyncer) sendUpdate(key string, value *string, revision uint64) {\n\tlog.Debugf(\"Parsing etcd key %#v\", key)\n\tparsedKey := model.KeyFromDefaultPath(key)\n\tif parsedKey == nil {\n\t\tlog.Debugf(\"Failed to parse key %v\", key)\n\t\tif cb, ok := syn.callbacks.(api.SyncerParseFailCallbacks); ok {\n\t\t\tcb.ParseFailed(key, value)\n\t\t}\n\t\treturn\n\t}\n\tlog.Debugf(\"Parsed etcd key: %v\", parsedKey)\n\n\tvar parsedValue interface{}\n\tvar err error\n\tif value != nil {\n\t\tparsedValue, err = model.ParseValue(parsedKey, []byte(*value))\n\t\tif err != nil {\n\t\t\tlog.Warningf(\"Failed to parse value for %v: %#v\", key, *value)\n\t\t}\n\t\tlog.Debugf(\"Parsed value: %#v\", parsedValue)\n\t}\n\tupdates := []model.KVPair{\n\t\t{Key: parsedKey, Value: parsedValue, Revision: revision},\n\t}\n\tsyn.callbacks.OnUpdates(updates)\n}\n\nfunc (syn *etcdSyncer) sendDeletions(deletedKeys []string, revision uint64) {\n\tupdates := make([]model.KVPair, 0, len(deletedKeys))\n\tfor _, key := range deletedKeys {\n\t\tparsedKey := model.KeyFromDefaultPath(key)\n\t\tif parsedKey == nil {\n\t\t\tlog.Debugf(\"Failed to parse key %v\", key)\n\t\t\tif cb, ok := syn.callbacks.(api.SyncerParseFailCallbacks); ok {\n\t\t\t\tcb.ParseFailed(key, nil)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tupdates = append(updates, model.KVPair{\n\t\t\tKey: parsedKey,\n\t\t\tValue: nil,\n\t\t\tRevision: revision,\n\t\t})\n\t}\n\tsyn.callbacks.OnUpdates(updates)\n}\n<|endoftext|>"} {"text":"<commit_before>package templatehelper\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\t\"text\/template\"\n)\n\nfunc executeTemplate(text string, data interface{}) (string, error) {\n\tvar res bytes.Buffer\n\ttmpl, err := template.New(\"_test\").Funcs(FuncMap).Parse(text)\n\tif err == nil {\n\t\terr = tmpl.Execute(&res, data)\n\t}\n\treturn res.String(), err\n}\n\nfunc Test_FuncMap_Positive(t *testing.T) {\n\tt.Parallel()\n\tcases := []struct {\n\t\ttext string\n\t\texpected string\n\t}{\n\t\t\/\/ sanity checks\n\n\t\t{`{{if true}}ok{{end}}`, \"ok\"},\n\t\t{`{{if false}}ok{{end}}`, \"\"},\n\n\t\t\/\/ boolean filters\n\n\t\t{`{{if Matches \"123\" \"\\\\d+\"}}ok{{end}}`, \"ok\"},\n\t\t{`{{if Matches \"hello\" \"\\\\d+\"}}ok{{end}}`, \"\"},\n\n\t\t{`{{if Contains \"123\" \"2\"}}ok{{end}}`, \"ok\"},\n\t\t{`{{if Contains \"123\" \"4\"}}ok{{end}}`, \"\"},\n\n\t\t{`{{if ContainsAny \"123\" \"24\"}}ok{{end}}`, \"ok\"},\n\t\t{`{{if ContainsAny \"123\" \"45\"}}ok{{end}}`, \"\"},\n\n\t\t{`{{if EqualFold \"HellO\" \"hello\"}}ok{{end}}`, \"ok\"},\n\t\t{`{{if EqualFold \"ПривеТ\" \"привет\"}}ok{{end}}`, \"ok\"},\n\t\t{`{{if EqualFold \"good\" \"goed\"}}ok{{end}}`, \"\"},\n\n\t\t{`{{if HasPrefix \"hello\" \"he\"}}ok{{end}}`, \"ok\"},\n\t\t{`{{if HasPrefix \"hello\" \"lo\"}}ok{{end}}`, \"\"},\n\n\t\t{`{{if HasSuffix \"hello\" \"lo\"}}ok{{end}}`, \"ok\"},\n\t\t{`{{if HasSuffix \"hello\" \"he\"}}ok{{end}}`, \"\"},\n\t}\n\n\tfor _, tcase := range cases {\n\t\ttcase := tcase \/\/ important, needed for running in parallel\n\t\tt.Run(tcase.text, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tresult, err := executeTemplate(tcase.text, nil)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"error executing template: %s\", err.Error())\n\t\t\t\tt.FailNow()\n\t\t\t}\n\t\t\tif result != tcase.expected {\n\t\t\t\tt.Errorf(\"expected `%s` but actually `%s`\", tcase.expected, result)\n\t\t\t\tt.FailNow()\n\t\t\t}\n\t\t})\n\t}\n}\n<commit_msg>test string filters<commit_after>package templatehelper\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\t\"text\/template\"\n)\n\nfunc executeTemplate(text string, data interface{}) (string, error) {\n\tvar res bytes.Buffer\n\ttmpl, err := template.New(\"_test\").Funcs(FuncMap).Parse(text)\n\tif err == nil {\n\t\terr = tmpl.Execute(&res, data)\n\t}\n\treturn res.String(), err\n}\n\nfunc Test_FuncMap_Positive(t *testing.T) {\n\tt.Parallel()\n\tcases := []struct {\n\t\ttext string\n\t\texpected string\n\t}{\n\t\t\/\/ sanity checks\n\n\t\t{`{{if true}}ok{{end}}`, \"ok\"},\n\t\t{`{{if false}}ok{{end}}`, \"\"},\n\n\t\t\/\/ boolean filters\n\n\t\t{`{{if Matches \"123\" \"\\\\d+\"}}ok{{end}}`, \"ok\"},\n\t\t{`{{if Matches \"hello\" \"\\\\d+\"}}ok{{end}}`, \"\"},\n\n\t\t{`{{if Contains \"123\" \"2\"}}ok{{end}}`, \"ok\"},\n\t\t{`{{if Contains \"123\" \"4\"}}ok{{end}}`, \"\"},\n\n\t\t{`{{if ContainsAny \"123\" \"24\"}}ok{{end}}`, \"ok\"},\n\t\t{`{{if ContainsAny \"123\" \"45\"}}ok{{end}}`, \"\"},\n\n\t\t{`{{if EqualFold \"HellO\" \"hello\"}}ok{{end}}`, \"ok\"},\n\t\t{`{{if EqualFold \"ПривеТ\" \"привет\"}}ok{{end}}`, \"ok\"},\n\t\t{`{{if EqualFold \"good\" \"goed\"}}ok{{end}}`, \"\"},\n\n\t\t{`{{if HasPrefix \"hello\" \"he\"}}ok{{end}}`, \"ok\"},\n\t\t{`{{if HasPrefix \"hello\" \"lo\"}}ok{{end}}`, \"\"},\n\n\t\t{`{{if HasSuffix \"hello\" \"lo\"}}ok{{end}}`, \"ok\"},\n\t\t{`{{if HasSuffix \"hello\" \"he\"}}ok{{end}}`, \"\"},\n\n\t\t\/\/ filters returning a string\n\n\t\t{`{{JSON 123}}`, \"[123]\"},\n\n\t\t{`{{Left \"123456\" 2}}`, \"12\"},\n\t\t{`{{Left \"123456\" 10}}`, \"123456\"},\n\n\t\t{`{{Right \"123456\" 2}}`, \"56\"},\n\t\t{`{{Right \"123456\" 10}}`, \"123456\"},\n\n\t\t{`{{Last (Split \"12,34,56\" \",\")}}`, \"56\"},\n\n\t\t{`{{Mid \"123456\" 2}}`, \"3456\"},\n\t\t{`{{Mid \"123456\" 10}}`, \"\"},\n\t\t{`{{Mid \"123456\" 2 4}}`, \"34\"},\n\t\t{`{{Mid \"123456\" 2 10}}`, \"3456\"},\n\n\t\t{`{{Join (Split \"12,34,56\" \",\") \"|\"}}`, \"12|34|56\"},\n\t\t{`{{Join (Split \"12\" \",\") \"|\"}}`, \"12\"},\n\n\t\t{`{{Repeat \"12\" 3}}`, \"121212\"},\n\t\t{`{{Repeat \"12\" 0}}`, \"\"},\n\t\t{`{{Repeat \"\" 10}}`, \"\"},\n\n\t\t{`{{Replace \"1234\" \"23\" \"56\" -1}}`, \"1564\"},\n\t\t{`{{Replace \"12223\" \"2\" \"5\" 1}}`, \"15223\"},\n\t\t\/\/ ...\n\t}\n\n\tfor _, tcase := range cases {\n\t\ttcase := tcase \/\/ important, needed for running in parallel\n\t\tt.Run(tcase.text, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tresult, err := executeTemplate(tcase.text, nil)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"error executing template: %s\", err.Error())\n\t\t\t\tt.FailNow()\n\t\t\t}\n\t\t\tif result != tcase.expected {\n\t\t\t\tt.Errorf(\"expected `%s` but actually `%s`\", tcase.expected, result)\n\t\t\t\tt.FailNow()\n\t\t\t}\n\t\t})\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build windows\n\npackage goridge\n\nimport (\n\t\"fmt\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nfunc getAllocSize() uint {\n\tvar mod = syscall.NewLazyDLL(\"kernel32.dll\")\n\tvar proc = mod.NewProc(\"GetPhysicallyInstalledSystemMemory\")\n\tvar mem uint64\n\n\tret, _, err := proc.Call(uintptr(unsafe.Pointer(&mem)))\n\tfmt.Printf(\"Ret: %d, err: %v, Physical memory: %d\\n\", ret, err, mem)\n}<commit_msg>fix bug in alloc_windows<commit_after>\/\/ +build windows\n\npackage goridge\n\nimport (\n\t\"fmt\"\n\t\"syscall\"\n\t\"unsafe\"\n)\n\nfunc getAllocSize() uint {\n\tvar mod = syscall.NewLazyDLL(\"kernel32.dll\")\n\tvar proc = mod.NewProc(\"GetPhysicallyInstalledSystemMemory\")\n\tvar mem uint64\n\n\tret, _, err := proc.Call(uintptr(unsafe.Pointer(&mem)))\n\tfmt.Printf(\"Ret: %d, err: %v, Physical memory: %d\\n\", ret, err, mem)\n\treturn 0\n}<|endoftext|>"} {"text":"<commit_before>package v1alpha1\n\nimport (\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\n\/\/ +genclient\n\/\/ +genclient:noStatus\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\ntype EnvironmentMapping struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ObjectMeta `json:\"metadata\"`\n}\n\nfunc (rem *EnvironmentMapping) GetObjectKind() schema.ObjectKind {\n\treturn &EnvironmentMapping{}\n}\n\n\/\/ +genclient\n\/\/ +genclient:nonNamespaced\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\ntype RemoteEnvironment struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ObjectMeta `json:\"metadata\"`\n\tSpec RemoteEnvironmentSpec `json:\"spec\"`\n\tStatus REStatus `json:\"status,omitempty\"`\n}\n\ntype REStatus struct {\n\t\/\/ Represents the status of Remote Environment release installation\n\tInstallationStatus InstallationStatus `json:\"installationStatus\"`\n}\n\ntype InstallationStatus struct {\n\tStatus string `json:\"status\"`\n\tDescription string `json:\"description\"`\n}\n\nfunc (pw *RemoteEnvironment) GetObjectKind() schema.ObjectKind {\n\treturn &RemoteEnvironment{}\n}\n\n\/\/ RemoteEnvironmentSpec defines spec section of the RemoteEnvironment custom resource\ntype RemoteEnvironmentSpec struct {\n\tDescription string `json:\"description\"`\n\tServices []Service `json:\"services\"`\n\t\/\/ AccessLabel is not required, 'omitempty' is needed because of regexp validation\n\tAccessLabel string `json:\"accessLabel,omitempty\"`\n\tLabels map[string]string `json:\"labels\"`\n}\n\n\/\/ Entry defines, what is enabled by activating the service.\ntype Entry struct {\n\tType string `json:\"type\"`\n\tGatewayUrl string `json:\"gatewayUrl\"`\n\t\/\/ AccessLabel is not required for Events, 'omitempty' is needed because of regexp validation\n\tAccessLabel string `json:\"accessLabel,omitempty\"`\n\tTargetUrl string `json:\"targetUrl\"`\n\tSpecificationUrl string `json:\"specificationUrl,omitempty\"`\n\tApiType string `json:\"apiType,omitempty\"`\n\tCredentials Credentials `json:\"credentials,omitempty\"`\n}\n\n\/\/ Credentials defines type of authentication and where the credentials are stored\ntype Credentials struct {\n\tType string `json:\"type\"`\n\tSecretName string `json:\"secretName\"`\n\tAuthenticationUrl string `json:\"authenticationUrl,omitempty\"`\n}\n\n\/\/ Service represents part of the remote environment, which is mapped 1 to 1 to service class in the service-catalog\ntype Service struct {\n\tID string `json:\"id\"`\n\tIdentifier string `json:\"identifier\"`\n\tName string `json:\"name\"`\n\tDisplayName string `json:\"displayName\"`\n\tDescription string `json:\"description\"`\n\tLabels map[string]string `json:\"labels,omitempty\"`\n\tLongDescription string `json:\"longDescription,omitempty\"`\n\tProviderDisplayName string `json:\"providerDisplayName\"`\n\tTags []string `json:\"tags,omitempty\"`\n\tEntries []Entry `json:\"entries\"`\n}\n\n\/\/ +genclient\n\/\/ +genclient:noStatus\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\ntype EventActivation struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ObjectMeta `json:\"metadata\"`\n\n\tSpec EventActivationSpec `json:\"spec\"`\n}\n\ntype EventActivationSpec struct {\n\tDisplayName string `json:\"displayName\"`\n\tSourceID string `json:\"sourceId\"`\n}\n\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\ntype RemoteEnvironmentList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata\"`\n\n\tItems []RemoteEnvironment `json:\"items\"`\n}\n\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\ntype EnvironmentMappingList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata\"`\n\n\tItems []EnvironmentMapping `json:\"items\"`\n}\n\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\ntype EventActivationList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata\"`\n\n\tItems []EventActivation `json:\"items\"`\n}\n<commit_msg>Add SkipInstallation flag to Remote Environment (#1797)<commit_after>package v1alpha1\n\nimport (\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n)\n\n\/\/ +genclient\n\/\/ +genclient:noStatus\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\ntype EnvironmentMapping struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ObjectMeta `json:\"metadata\"`\n}\n\nfunc (rem *EnvironmentMapping) GetObjectKind() schema.ObjectKind {\n\treturn &EnvironmentMapping{}\n}\n\n\/\/ +genclient\n\/\/ +genclient:nonNamespaced\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\ntype RemoteEnvironment struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ObjectMeta `json:\"metadata\"`\n\tSpec RemoteEnvironmentSpec `json:\"spec\"`\n\tStatus REStatus `json:\"status,omitempty\"`\n}\n\ntype REStatus struct {\n\t\/\/ Represents the status of Remote Environment release installation\n\tInstallationStatus InstallationStatus `json:\"installationStatus\"`\n}\n\ntype InstallationStatus struct {\n\tStatus string `json:\"status\"`\n\tDescription string `json:\"description\"`\n}\n\nfunc (pw *RemoteEnvironment) GetObjectKind() schema.ObjectKind {\n\treturn &RemoteEnvironment{}\n}\n\n\/\/ RemoteEnvironmentSpec defines spec section of the RemoteEnvironment custom resource\ntype RemoteEnvironmentSpec struct {\n\tDescription string `json:\"description\"`\n\tSkipInstallation bool `json:\"skipInstallation,omitempty\"`\n\tServices []Service `json:\"services\"`\n\t\/\/ AccessLabel is not required, 'omitempty' is needed because of regexp validation\n\tAccessLabel string `json:\"accessLabel,omitempty\"`\n\tLabels map[string]string `json:\"labels\"`\n}\n\n\/\/ Entry defines, what is enabled by activating the service.\ntype Entry struct {\n\tType string `json:\"type\"`\n\tGatewayUrl string `json:\"gatewayUrl\"`\n\t\/\/ AccessLabel is not required for Events, 'omitempty' is needed because of regexp validation\n\tAccessLabel string `json:\"accessLabel,omitempty\"`\n\tTargetUrl string `json:\"targetUrl\"`\n\tSpecificationUrl string `json:\"specificationUrl,omitempty\"`\n\tApiType string `json:\"apiType,omitempty\"`\n\tCredentials Credentials `json:\"credentials,omitempty\"`\n}\n\n\/\/ Credentials defines type of authentication and where the credentials are stored\ntype Credentials struct {\n\tType string `json:\"type\"`\n\tSecretName string `json:\"secretName\"`\n\tAuthenticationUrl string `json:\"authenticationUrl,omitempty\"`\n}\n\n\/\/ Service represents part of the remote environment, which is mapped 1 to 1 to service class in the service-catalog\ntype Service struct {\n\tID string `json:\"id\"`\n\tIdentifier string `json:\"identifier\"`\n\tName string `json:\"name\"`\n\tDisplayName string `json:\"displayName\"`\n\tDescription string `json:\"description\"`\n\tLabels map[string]string `json:\"labels,omitempty\"`\n\tLongDescription string `json:\"longDescription,omitempty\"`\n\tProviderDisplayName string `json:\"providerDisplayName\"`\n\tTags []string `json:\"tags,omitempty\"`\n\tEntries []Entry `json:\"entries\"`\n}\n\n\/\/ +genclient\n\/\/ +genclient:noStatus\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\ntype EventActivation struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ObjectMeta `json:\"metadata\"`\n\n\tSpec EventActivationSpec `json:\"spec\"`\n}\n\ntype EventActivationSpec struct {\n\tDisplayName string `json:\"displayName\"`\n\tSourceID string `json:\"sourceId\"`\n}\n\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\ntype RemoteEnvironmentList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata\"`\n\n\tItems []RemoteEnvironment `json:\"items\"`\n}\n\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\ntype EnvironmentMappingList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata\"`\n\n\tItems []EnvironmentMapping `json:\"items\"`\n}\n\n\/\/ +k8s:deepcopy-gen:interfaces=k8s.io\/apimachinery\/pkg\/runtime.Object\n\ntype EventActivationList struct {\n\tmetav1.TypeMeta `json:\",inline\"`\n\tmetav1.ListMeta `json:\"metadata\"`\n\n\tItems []EventActivation `json:\"items\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package workflows\n\nimport (\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/stelligent\/mu\/common\"\n)\n\ntype serviceWorkflow struct {\n\tenvStack *common.Stack\n\tlbStack *common.Stack\n\tartifactProvider common.ArtifactProvider\n\tserviceName string\n\tserviceTag string\n\tserviceImage string\n\tregistryAuth string\n\tregistryAuthConfig map[string]types.AuthConfig\n\tpriority int\n\tcodeRevision string\n\trepoName string\n\tappName string\n\tappRevisionBucket string\n\tappRevisionKey string\n\tdatabaseName string\n\tcloudFormationRoleArn string\n\tmicroserviceTaskDefinitionArn string\n\tecsEventsRoleArn string\n\tkubernetesResourceManager common.KubernetesResourceManager\n}\n\n\/\/ Find a service in config, by name and set the reference\nfunc (workflow *serviceWorkflow) serviceLoader(ctx *common.Context, tag string, provider string) Executor {\n\treturn func() error {\n\t\terr := workflow.serviceInput(ctx, \"\")()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Tag\n\t\tif tag != \"\" {\n\t\t\tworkflow.serviceTag = tag\n\t\t} else if ctx.Config.Repo.Revision != \"\" {\n\t\t\tworkflow.serviceTag = ctx.Config.Repo.Revision\n\t\t} else {\n\t\t\tworkflow.serviceTag = \"latest\"\n\t\t}\n\t\tworkflow.appRevisionKey = fmt.Sprintf(\"%s\/%s.zip\", workflow.serviceName, workflow.serviceTag)\n\n\t\tworkflow.databaseName = ctx.Config.Service.Database.Name\n\t\tworkflow.codeRevision = ctx.Config.Repo.Revision\n\t\tworkflow.repoName = ctx.Config.Repo.Slug\n\t\tworkflow.priority = ctx.Config.Service.Priority\n\n\t\tif provider == \"\" {\n\t\t\tdockerfile := ctx.Config.Service.Dockerfile\n\t\t\tif dockerfile == \"\" {\n\t\t\t\tdockerfile = \"Dockerfile\"\n\t\t\t}\n\n\t\t\tdockerfilePath := fmt.Sprintf(\"%s\/%s\", ctx.Config.Basedir, dockerfile)\n\t\t\tlog.Debugf(\"Determining repo provider by checking for existence of '%s'\", dockerfilePath)\n\n\t\t\tif _, err := os.Stat(dockerfilePath); !os.IsNotExist(err) {\n\t\t\t\tlog.Infof(\"Dockerfile found, assuming ECR pipeline\")\n\t\t\t\tworkflow.artifactProvider = common.ArtifactProviderEcr\n\t\t\t} else {\n\t\t\t\tlog.Infof(\"No Dockerfile found, assuming CodeDeploy pipeline\")\n\t\t\t\tworkflow.artifactProvider = common.ArtifactProviderS3\n\t\t\t}\n\t\t} else {\n\t\t\tworkflow.artifactProvider = common.ArtifactProvider(provider)\n\t\t}\n\n\t\tlog.Debugf(\"Working with service:'%s' tag:'%s'\", workflow.serviceName, workflow.serviceTag)\n\t\treturn nil\n\t}\n}\n\nfunc (workflow *serviceWorkflow) isEcrProvider() Conditional {\n\treturn func() bool {\n\t\treturn strings.EqualFold(string(workflow.artifactProvider), string(common.ArtifactProviderEcr))\n\t}\n}\n\nfunc (workflow *serviceWorkflow) isS3Provider() Conditional {\n\treturn func() bool {\n\t\treturn strings.EqualFold(string(workflow.artifactProvider), string(common.ArtifactProviderS3))\n\t}\n}\n\nfunc (workflow *serviceWorkflow) isEcsProvider() Conditional {\n\treturn func() bool {\n\t\treturn strings.EqualFold(string(workflow.envStack.Tags[\"provider\"]), string(common.EnvProviderEcs)) ||\n\t\t\tstrings.EqualFold(string(workflow.envStack.Tags[\"provider\"]), string(common.EnvProviderEcsFargate))\n\t}\n}\nfunc (workflow *serviceWorkflow) isEksProvider() Conditional {\n\treturn func() bool {\n\t\treturn strings.EqualFold(string(workflow.envStack.Tags[\"provider\"]), string(common.EnvProviderEks)) ||\n\t\t\tstrings.EqualFold(string(workflow.envStack.Tags[\"provider\"]), string(common.EnvProviderEksFargate))\n\t}\n}\nfunc (workflow *serviceWorkflow) isFargateProvider() Conditional {\n\treturn func() bool {\n\t\treturn strings.EqualFold(string(workflow.envStack.Tags[\"provider\"]), string(common.EnvProviderEcsFargate))\n\t}\n}\n\nfunc (workflow *serviceWorkflow) isEc2Provider() Conditional {\n\treturn func() bool {\n\t\treturn strings.EqualFold(string(workflow.envStack.Tags[\"provider\"]), string(common.EnvProviderEc2))\n\t}\n}\n\nfunc (workflow *serviceWorkflow) serviceInput(ctx *common.Context, serviceName string) Executor {\n\treturn func() error {\n\t\t\/\/ Repo Name\n\t\tif serviceName != \"\" {\n\t\t\tworkflow.serviceName = serviceName\n\t\t} else if ctx.Config.Service.Name != \"\" {\n\t\t\tworkflow.serviceName = ctx.Config.Service.Name\n\t\t} else if ctx.Config.Repo.Name != \"\" {\n\t\t\tworkflow.serviceName = ctx.Config.Repo.Name\n\t\t} else {\n\t\t\treturn errors.New(\"Service name must be provided\")\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc (workflow *serviceWorkflow) serviceRepoUpserter(namespace string, service *common.Service, stackUpserter common.StackUpserter, stackWaiter common.StackWaiter) Executor {\n\treturn func() error {\n\t\tif service.ImageRepository != \"\" {\n\t\t\tlog.Noticef(\"Using repo '%s' for service '%s'\", service.ImageRepository, workflow.serviceName)\n\t\t\tworkflow.serviceImage = service.ImageRepository\n\t\t\treturn nil\n\t\t}\n\n\t\tlog.Noticef(\"Upsert repo for service '%s'\", workflow.serviceName)\n\n\t\tecrStackName := common.CreateStackName(namespace, common.StackTypeRepo, workflow.serviceName)\n\n\t\tstackParams := make(map[string]string)\n\t\tstackParams[\"RepoName\"] = fmt.Sprintf(\"%s-%s\", namespace, workflow.serviceName)\n\n\t\ttags := createTagMap(&ServiceTags{\n\t\t\tService: workflow.serviceName,\n\t\t\tType: string(common.StackTypeRepo),\n\t\t\tProvider: \"\",\n\t\t\tRevision: workflow.codeRevision,\n\t\t\tRepo: workflow.repoName,\n\t\t})\n\n\t\terr := stackUpserter.UpsertStack(ecrStackName, common.TemplateRepo, nil, stackParams, tags, \"\", \"\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlog.Debugf(\"Waiting for stack '%s' to complete\", ecrStackName)\n\t\tstack := stackWaiter.AwaitFinalStatus(ecrStackName)\n\t\tif stack == nil {\n\t\t\treturn fmt.Errorf(\"Unable to create stack %s\", ecrStackName)\n\t\t}\n\t\tif strings.HasSuffix(stack.Status, \"ROLLBACK_COMPLETE\") || !strings.HasSuffix(stack.Status, \"_COMPLETE\") {\n\t\t\treturn fmt.Errorf(\"Ended in failed status %s %s\", stack.Status, stack.StatusReason)\n\t\t}\n\t\tworkflow.serviceImage = fmt.Sprintf(\"%s:%s\", stack.Outputs[\"RepoUrl\"], workflow.serviceTag)\n\t\treturn nil\n\t}\n}\nfunc (workflow *serviceWorkflow) serviceAppUpserter(namespace string, service *common.Service, stackUpserter common.StackUpserter, stackWaiter common.StackWaiter) Executor {\n\treturn func() error {\n\t\tlog.Noticef(\"Upsert app for service '%s'\", workflow.serviceName)\n\n\t\tappStackName := common.CreateStackName(namespace, common.StackTypeApp, workflow.serviceName)\n\n\t\tstackParams := make(map[string]string)\n\n\t\ttags := createTagMap(&EnvironmentTags{\n\t\t\tEnvironment: workflow.serviceName,\n\t\t\tType: string(common.StackTypeApp),\n\t\t\tProvider: \"\",\n\t\t\tRevision: workflow.codeRevision,\n\t\t\tRepo: workflow.repoName,\n\t\t})\n\n\t\terr := stackUpserter.UpsertStack(appStackName, common.TemplateApp, nil, stackParams, tags, \"\", workflow.cloudFormationRoleArn)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlog.Debugf(\"Waiting for stack '%s' to complete\", appStackName)\n\t\tstack := stackWaiter.AwaitFinalStatus(appStackName)\n\t\tif stack == nil {\n\t\t\treturn fmt.Errorf(\"Unable to create stack %s\", appStackName)\n\t\t}\n\t\tif strings.HasSuffix(stack.Status, \"ROLLBACK_COMPLETE\") || !strings.HasSuffix(stack.Status, \"_COMPLETE\") {\n\t\t\treturn fmt.Errorf(\"Ended in failed status %s %s\", stack.Status, stack.StatusReason)\n\t\t}\n\t\tworkflow.appName = stack.Outputs[\"ApplicationName\"]\n\t\treturn nil\n\t}\n}\n\nfunc (workflow *serviceWorkflow) serviceBucketUpserter(namespace string, service *common.Service, stackUpserter common.StackUpserter, stackWaiter common.StackWaiter) Executor {\n\treturn func() error {\n\n\t\tif service.Pipeline.Build.Bucket != \"\" {\n\t\t\tworkflow.appRevisionBucket = service.Pipeline.Build.Bucket\n\t\t} else {\n\t\t\tbucketStackName := common.CreateStackName(namespace, common.StackTypeBucket, \"codedeploy\")\n\t\t\tlog.Noticef(\"Upserting Bucket for CodeDeploy\")\n\t\t\tbucketParams := make(map[string]string)\n\t\t\tbucketParams[\"Namespace\"] = namespace\n\t\t\tbucketParams[\"BucketPrefix\"] = \"codedeploy\"\n\n\t\t\ttags := createTagMap(&PipelineTags{\n\t\t\t\tType: common.StackTypeBucket,\n\t\t\t})\n\n\t\t\terr := stackUpserter.UpsertStack(bucketStackName, common.TemplateBucket, nil, bucketParams, tags, \"\", workflow.cloudFormationRoleArn)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tlog.Debugf(\"Waiting for stack '%s' to complete\", bucketStackName)\n\t\t\tstack := stackWaiter.AwaitFinalStatus(bucketStackName)\n\t\t\tif stack == nil {\n\t\t\t\treturn fmt.Errorf(\"Unable to create stack %s\", bucketStackName)\n\t\t\t}\n\t\t\tif strings.HasSuffix(stack.Status, \"ROLLBACK_COMPLETE\") || !strings.HasSuffix(stack.Status, \"_COMPLETE\") {\n\t\t\t\treturn fmt.Errorf(\"Ended in failed status %s %s\", stack.Status, stack.StatusReason)\n\t\t\t}\n\n\t\t\tworkflow.appRevisionBucket = stack.Outputs[\"Bucket\"]\n\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\nfunc (workflow *serviceWorkflow) serviceRegistryAuthenticator(authenticator common.RepositoryAuthenticator) Executor {\n\treturn func() error {\n\t\tlog.Debugf(\"Authenticating to registry '%s'\", workflow.serviceImage)\n\t\tregistryAuth, err := authenticator.AuthenticateRepository(workflow.serviceImage)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdata, err := base64.StdEncoding.DecodeString(registryAuth)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tauthParts := strings.Split(string(data), \":\")\n\n\t\tworkflow.registryAuth = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(\"{\\\"username\\\":\\\"%s\\\", \\\"password\\\":\\\"%s\\\"}\", authParts[0], authParts[1])))\n\n\t\t\/\/ ImageBuild pull auth\n\t\tvar authConfigs2 map[string]types.AuthConfig = make(map[string]types.AuthConfig)\n\t\tvar serviceImagePart = strings.Split(workflow.serviceImage, \":\")[0]\n\n\t\tauthConfigs2[serviceImagePart] = types.AuthConfig{\n\t\t\tUsername: authParts[0],\n\t\t\tPassword: authParts[1],\n\t\t\tServerAddress: fmt.Sprintf(\"https:\/\/%s\", serviceImagePart),\n\t\t}\n\n\t\tworkflow.registryAuthConfig = authConfigs2\n\n\t\treturn nil\n\t}\n}\n\nfunc (workflow *serviceWorkflow) connectKubernetes(provider common.KubernetesResourceManagerProvider) Executor {\n\treturn func() error {\n\t\tclusterName := workflow.envStack.Name\n\t\tkubernetesResourceManager, err := provider.GetResourceManager(clusterName)\n\t\tworkflow.kubernetesResourceManager = kubernetesResourceManager\n\t\treturn err\n\t}\n}\n<commit_msg>cleanup<commit_after>package workflows\n\nimport (\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com\/docker\/docker\/api\/types\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/stelligent\/mu\/common\"\n)\n\ntype serviceWorkflow struct {\n\tenvStack *common.Stack\n\tlbStack *common.Stack\n\tartifactProvider common.ArtifactProvider\n\tserviceName string\n\tserviceTag string\n\tserviceImage string\n\tregistryAuth string\n\tregistryAuthConfig map[string]types.AuthConfig\n\tpriority int\n\tcodeRevision string\n\trepoName string\n\tappName string\n\tappRevisionBucket string\n\tappRevisionKey string\n\tdatabaseName string\n\tcloudFormationRoleArn string\n\tmicroserviceTaskDefinitionArn string\n\tecsEventsRoleArn string\n\tkubernetesResourceManager common.KubernetesResourceManager\n}\n\n\/\/ Find a service in config, by name and set the reference\nfunc (workflow *serviceWorkflow) serviceLoader(ctx *common.Context, tag string, provider string) Executor {\n\treturn func() error {\n\t\terr := workflow.serviceInput(ctx, \"\")()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t\/\/ Tag\n\t\tif tag != \"\" {\n\t\t\tworkflow.serviceTag = tag\n\t\t} else if ctx.Config.Repo.Revision != \"\" {\n\t\t\tworkflow.serviceTag = ctx.Config.Repo.Revision\n\t\t} else {\n\t\t\tworkflow.serviceTag = \"latest\"\n\t\t}\n\t\tworkflow.appRevisionKey = fmt.Sprintf(\"%s\/%s.zip\", workflow.serviceName, workflow.serviceTag)\n\n\t\tworkflow.databaseName = ctx.Config.Service.Database.Name\n\t\tworkflow.codeRevision = ctx.Config.Repo.Revision\n\t\tworkflow.repoName = ctx.Config.Repo.Slug\n\t\tworkflow.priority = ctx.Config.Service.Priority\n\n\t\tif provider == \"\" {\n\t\t\tdockerfile := ctx.Config.Service.Dockerfile\n\t\t\tif dockerfile == \"\" {\n\t\t\t\tdockerfile = \"Dockerfile\"\n\t\t\t}\n\n\t\t\tdockerfilePath := fmt.Sprintf(\"%s\/%s\", ctx.Config.Basedir, dockerfile)\n\t\t\tlog.Debugf(\"Determining repo provider by checking for existence of '%s'\", dockerfilePath)\n\n\t\t\tif _, err := os.Stat(dockerfilePath); !os.IsNotExist(err) {\n\t\t\t\tlog.Infof(\"Dockerfile found, assuming ECR pipeline\")\n\t\t\t\tworkflow.artifactProvider = common.ArtifactProviderEcr\n\t\t\t} else {\n\t\t\t\tlog.Infof(\"No Dockerfile found, assuming CodeDeploy pipeline\")\n\t\t\t\tworkflow.artifactProvider = common.ArtifactProviderS3\n\t\t\t}\n\t\t} else {\n\t\t\tworkflow.artifactProvider = common.ArtifactProvider(provider)\n\t\t}\n\n\t\tlog.Debugf(\"Working with service:'%s' tag:'%s'\", workflow.serviceName, workflow.serviceTag)\n\t\treturn nil\n\t}\n}\n\nfunc (workflow *serviceWorkflow) isEcrProvider() Conditional {\n\treturn func() bool {\n\t\treturn strings.EqualFold(string(workflow.artifactProvider), string(common.ArtifactProviderEcr))\n\t}\n}\n\nfunc (workflow *serviceWorkflow) isS3Provider() Conditional {\n\treturn func() bool {\n\t\treturn strings.EqualFold(string(workflow.artifactProvider), string(common.ArtifactProviderS3))\n\t}\n}\n\nfunc (workflow *serviceWorkflow) isEcsProvider() Conditional {\n\treturn func() bool {\n\t\treturn strings.EqualFold(string(workflow.envStack.Tags[\"provider\"]), string(common.EnvProviderEcs)) ||\n\t\t\tstrings.EqualFold(string(workflow.envStack.Tags[\"provider\"]), string(common.EnvProviderEcsFargate))\n\t}\n}\nfunc (workflow *serviceWorkflow) isEksProvider() Conditional {\n\treturn func() bool {\n\t\treturn strings.EqualFold(string(workflow.envStack.Tags[\"provider\"]), string(common.EnvProviderEks)) ||\n\t\t\tstrings.EqualFold(string(workflow.envStack.Tags[\"provider\"]), string(common.EnvProviderEksFargate))\n\t}\n}\nfunc (workflow *serviceWorkflow) isFargateProvider() Conditional {\n\treturn func() bool {\n\t\treturn strings.EqualFold(string(workflow.envStack.Tags[\"provider\"]), string(common.EnvProviderEcsFargate))\n\t}\n}\n\nfunc (workflow *serviceWorkflow) isEc2Provider() Conditional {\n\treturn func() bool {\n\t\treturn strings.EqualFold(string(workflow.envStack.Tags[\"provider\"]), string(common.EnvProviderEc2))\n\t}\n}\n\nfunc (workflow *serviceWorkflow) serviceInput(ctx *common.Context, serviceName string) Executor {\n\treturn func() error {\n\t\t\/\/ Repo Name\n\t\tif serviceName != \"\" {\n\t\t\tworkflow.serviceName = serviceName\n\t\t} else if ctx.Config.Service.Name != \"\" {\n\t\t\tworkflow.serviceName = ctx.Config.Service.Name\n\t\t} else if ctx.Config.Repo.Name != \"\" {\n\t\t\tworkflow.serviceName = ctx.Config.Repo.Name\n\t\t} else {\n\t\t\treturn errors.New(\"Service name must be provided\")\n\t\t}\n\t\treturn nil\n\t}\n}\n\nfunc (workflow *serviceWorkflow) serviceRepoUpserter(namespace string, service *common.Service, stackUpserter common.StackUpserter, stackWaiter common.StackWaiter) Executor {\n\treturn func() error {\n\t\tif service.ImageRepository != \"\" {\n\t\t\tlog.Noticef(\"Using repo '%s' for service '%s'\", service.ImageRepository, workflow.serviceName)\n\t\t\tworkflow.serviceImage = service.ImageRepository\n\t\t\treturn nil\n\t\t}\n\n\t\tlog.Noticef(\"Upsert repo for service '%s'\", workflow.serviceName)\n\n\t\tecrStackName := common.CreateStackName(namespace, common.StackTypeRepo, workflow.serviceName)\n\n\t\tstackParams := make(map[string]string)\n\t\tstackParams[\"RepoName\"] = fmt.Sprintf(\"%s-%s\", namespace, workflow.serviceName)\n\n\t\ttags := createTagMap(&ServiceTags{\n\t\t\tService: workflow.serviceName,\n\t\t\tType: string(common.StackTypeRepo),\n\t\t\tProvider: \"\",\n\t\t\tRevision: workflow.codeRevision,\n\t\t\tRepo: workflow.repoName,\n\t\t})\n\n\t\terr := stackUpserter.UpsertStack(ecrStackName, common.TemplateRepo, nil, stackParams, tags, \"\", \"\")\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlog.Debugf(\"Waiting for stack '%s' to complete\", ecrStackName)\n\t\tstack := stackWaiter.AwaitFinalStatus(ecrStackName)\n\t\tif stack == nil {\n\t\t\treturn fmt.Errorf(\"Unable to create stack %s\", ecrStackName)\n\t\t}\n\t\tif strings.HasSuffix(stack.Status, \"ROLLBACK_COMPLETE\") || !strings.HasSuffix(stack.Status, \"_COMPLETE\") {\n\t\t\treturn fmt.Errorf(\"Ended in failed status %s %s\", stack.Status, stack.StatusReason)\n\t\t}\n\t\tworkflow.serviceImage = fmt.Sprintf(\"%s:%s\", stack.Outputs[\"RepoUrl\"], workflow.serviceTag)\n\t\treturn nil\n\t}\n}\nfunc (workflow *serviceWorkflow) serviceAppUpserter(namespace string, service *common.Service, stackUpserter common.StackUpserter, stackWaiter common.StackWaiter) Executor {\n\treturn func() error {\n\t\tlog.Noticef(\"Upsert app for service '%s'\", workflow.serviceName)\n\n\t\tappStackName := common.CreateStackName(namespace, common.StackTypeApp, workflow.serviceName)\n\n\t\tstackParams := make(map[string]string)\n\n\t\ttags := createTagMap(&EnvironmentTags{\n\t\t\tEnvironment: workflow.serviceName,\n\t\t\tType: string(common.StackTypeApp),\n\t\t\tProvider: \"\",\n\t\t\tRevision: workflow.codeRevision,\n\t\t\tRepo: workflow.repoName,\n\t\t})\n\n\t\terr := stackUpserter.UpsertStack(appStackName, common.TemplateApp, nil, stackParams, tags, \"\", workflow.cloudFormationRoleArn)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tlog.Debugf(\"Waiting for stack '%s' to complete\", appStackName)\n\t\tstack := stackWaiter.AwaitFinalStatus(appStackName)\n\t\tif stack == nil {\n\t\t\treturn fmt.Errorf(\"Unable to create stack %s\", appStackName)\n\t\t}\n\t\tif strings.HasSuffix(stack.Status, \"ROLLBACK_COMPLETE\") || !strings.HasSuffix(stack.Status, \"_COMPLETE\") {\n\t\t\treturn fmt.Errorf(\"Ended in failed status %s %s\", stack.Status, stack.StatusReason)\n\t\t}\n\t\tworkflow.appName = stack.Outputs[\"ApplicationName\"]\n\t\treturn nil\n\t}\n}\n\nfunc (workflow *serviceWorkflow) serviceBucketUpserter(namespace string, service *common.Service, stackUpserter common.StackUpserter, stackWaiter common.StackWaiter) Executor {\n\treturn func() error {\n\n\t\tif service.Pipeline.Build.Bucket != \"\" {\n\t\t\tworkflow.appRevisionBucket = service.Pipeline.Build.Bucket\n\t\t} else {\n\t\t\tbucketStackName := common.CreateStackName(namespace, common.StackTypeBucket, \"codedeploy\")\n\t\t\tlog.Noticef(\"Upserting Bucket for CodeDeploy\")\n\t\t\tbucketParams := make(map[string]string)\n\t\t\tbucketParams[\"Namespace\"] = namespace\n\t\t\tbucketParams[\"BucketPrefix\"] = \"codedeploy\"\n\n\t\t\ttags := createTagMap(&PipelineTags{\n\t\t\t\tType: common.StackTypeBucket,\n\t\t\t})\n\n\t\t\terr := stackUpserter.UpsertStack(bucketStackName, common.TemplateBucket, nil, bucketParams, tags, \"\", workflow.cloudFormationRoleArn)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tlog.Debugf(\"Waiting for stack '%s' to complete\", bucketStackName)\n\t\t\tstack := stackWaiter.AwaitFinalStatus(bucketStackName)\n\t\t\tif stack == nil {\n\t\t\t\treturn fmt.Errorf(\"Unable to create stack %s\", bucketStackName)\n\t\t\t}\n\t\t\tif strings.HasSuffix(stack.Status, \"ROLLBACK_COMPLETE\") || !strings.HasSuffix(stack.Status, \"_COMPLETE\") {\n\t\t\t\treturn fmt.Errorf(\"Ended in failed status %s %s\", stack.Status, stack.StatusReason)\n\t\t\t}\n\n\t\t\tworkflow.appRevisionBucket = stack.Outputs[\"Bucket\"]\n\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\nfunc (workflow *serviceWorkflow) serviceRegistryAuthenticator(authenticator common.RepositoryAuthenticator) Executor {\n\treturn func() error {\n\t\tlog.Debugf(\"Authenticating to registry '%s'\", workflow.serviceImage)\n\t\tregistryAuth, err := authenticator.AuthenticateRepository(workflow.serviceImage)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdata, err := base64.StdEncoding.DecodeString(registryAuth)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tauthParts := strings.Split(string(data), \":\")\n\n\t\tworkflow.registryAuth = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(\"{\\\"username\\\":\\\"%s\\\", \\\"password\\\":\\\"%s\\\"}\", authParts[0], authParts[1])))\n\n\t\t\/\/ ImageBuild pull auth\n\t\tvar authConfigs2 = make(map[string]types.AuthConfig)\n\t\tvar serviceImagePart = strings.Split(workflow.serviceImage, \":\")[0]\n\n\t\tauthConfigs2[serviceImagePart] = types.AuthConfig{\n\t\t\tUsername: authParts[0],\n\t\t\tPassword: authParts[1],\n\t\t\tServerAddress: fmt.Sprintf(\"https:\/\/%s\", serviceImagePart),\n\t\t}\n\n\t\tworkflow.registryAuthConfig = authConfigs2\n\n\t\treturn nil\n\t}\n}\n\nfunc (workflow *serviceWorkflow) connectKubernetes(provider common.KubernetesResourceManagerProvider) Executor {\n\treturn func() error {\n\t\tclusterName := workflow.envStack.Name\n\t\tkubernetesResourceManager, err := provider.GetResourceManager(clusterName)\n\t\tworkflow.kubernetesResourceManager = kubernetesResourceManager\n\t\treturn err\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build !appengine\n\npackage fasthttp\n\nimport (\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/admpub\/fasthttp\"\n\t\"github.com\/admpub\/log\"\n\t\"github.com\/webx-top\/echo\/engine\"\n\t\"github.com\/webx-top\/echo\/logger\"\n)\n\ntype (\n\tResponse struct {\n\t\tcontext *fasthttp.RequestCtx\n\t\theader engine.Header\n\t\tstatus int\n\t\tsize int64\n\t\tcommitted bool\n\t\twriter io.Writer\n\t\tlogger logger.Logger\n\t}\n)\n\nfunc NewResponse(c *fasthttp.RequestCtx) *Response {\n\treturn &Response{\n\t\tcontext: c,\n\t\theader: &ResponseHeader{&c.Response.Header},\n\t\twriter: c,\n\t\tlogger: log.New(\"echo\"),\n\t}\n}\n\nfunc (r *Response) Object() interface{} {\n\treturn r.context\n}\n\nfunc (r *Response) Header() engine.Header {\n\treturn r.header\n}\n\nfunc (r *Response) WriteHeader(code int) {\n\tif r.committed {\n\t\tr.logger.Warn(\"response already committed\")\n\t\treturn\n\t}\n\tr.status = code\n\tr.context.SetStatusCode(code)\n\tr.committed = true\n}\n\nfunc (r *Response) Write(b []byte) (n int, err error) {\n\tn, err = r.writer.Write(b)\n\tr.size += int64(n)\n\treturn\n}\n\nfunc (r *Response) Status() int {\n\treturn r.status\n}\n\nfunc (r *Response) Size() int64 {\n\treturn r.size\n}\n\nfunc (r *Response) Committed() bool {\n\treturn r.committed\n}\n\nfunc (r *Response) SetWriter(w io.Writer) {\n\tr.writer = w\n}\n\nfunc (r *Response) Writer() io.Writer {\n\treturn r.writer\n}\n\nfunc (r *Response) Hijack(fn func(net.Conn)) {\n\tr.context.Hijack(fasthttp.HijackHandler(fn))\n}\n\nfunc (r *Response) Body() []byte {\n\tswitch strings.ToLower(r.header.Get(`Content-Encoding`)) {\n\tcase `gzip`:\n\t\tbody, err := r.context.Response.BodyGunzip()\n\t\tif err != nil {\n\t\t\tr.logger.Error(err)\n\t\t}\n\t\treturn body\n\tcase `deflate`:\n\t\tbody, err := r.context.Response.BodyInflate()\n\t\tif err != nil {\n\t\t\tr.logger.Error(err)\n\t\t}\n\t\treturn body\n\tdefault:\n\t\treturn r.context.Response.Body()\n\t}\n}\n\nfunc (r *Response) Redirect(url string, code int) {\n\tr.context.Redirect(url, code)\n}\n\nfunc (r *Response) NotFound() {\n\tr.context.NotFound()\n}\n\nfunc (r *Response) SetCookie(cookie *http.Cookie) {\n\tr.header.Set(\"Set-Cookie\", cookie.String())\n}\n\nfunc (r *Response) ServeFile(file string) {\n\tfasthttp.ServeFile(r.context, file)\n}\n\nfunc (r *Response) reset(c *fasthttp.RequestCtx, h engine.Header) {\n\tr.context = c\n\tr.header = h\n\tr.status = http.StatusOK\n\tr.size = 0\n\tr.committed = false\n\tr.writer = c\n}\n<commit_msg>update<commit_after>\/\/ +build !appengine\n\npackage fasthttp\n\nimport (\n\t\"io\"\n\t\"net\"\n\t\"net\/http\"\n\t\"strings\"\n\n\t\"github.com\/admpub\/fasthttp\"\n\t\"github.com\/admpub\/log\"\n\t\"github.com\/webx-top\/echo\/engine\"\n\t\"github.com\/webx-top\/echo\/logger\"\n)\n\ntype (\n\tResponse struct {\n\t\tcontext *fasthttp.RequestCtx\n\t\theader engine.Header\n\t\tstatus int\n\t\tsize int64\n\t\tcommitted bool\n\t\twriter io.Writer\n\t\tlogger logger.Logger\n\t}\n)\n\nfunc NewResponse(c *fasthttp.RequestCtx) *Response {\n\treturn &Response{\n\t\tcontext: c,\n\t\theader: &ResponseHeader{&c.Response.Header},\n\t\twriter: c,\n\t\tlogger: log.New(\"echo\"),\n\t}\n}\n\nfunc (r *Response) Object() interface{} {\n\treturn r.context\n}\n\nfunc (r *Response) Header() engine.Header {\n\treturn r.header\n}\n\nfunc (r *Response) WriteHeader(code int) {\n\tif r.committed {\n\t\tr.logger.Warn(\"response already committed\")\n\t\treturn\n\t}\n\tr.status = code\n\tr.context.SetStatusCode(code)\n\tr.committed = true\n}\n\nfunc (r *Response) Write(b []byte) (n int, err error) {\n\tn, err = r.writer.Write(b)\n\tr.size += int64(n)\n\treturn\n}\n\nfunc (r *Response) Status() int {\n\treturn r.status\n}\n\nfunc (r *Response) Size() int64 {\n\treturn r.size\n}\n\nfunc (r *Response) Committed() bool {\n\treturn r.committed\n}\n\nfunc (r *Response) SetWriter(w io.Writer) {\n\tr.writer = w\n}\n\nfunc (r *Response) Writer() io.Writer {\n\treturn r.writer\n}\n\nfunc (r *Response) Hijack(fn func(net.Conn)) {\n\tr.context.Hijack(fasthttp.HijackHandler(fn))\n}\n\nfunc (r *Response) Body() []byte {\n\tswitch strings.ToLower(r.header.Get(`Content-Encoding`)) {\n\tcase `gzip`:\n\t\tbody, err := r.context.Response.BodyGunzip()\n\t\tif err != nil {\n\t\t\tr.logger.Error(err)\n\t\t}\n\t\treturn body\n\tcase `deflate`:\n\t\tbody, err := r.context.Response.BodyInflate()\n\t\tif err != nil {\n\t\t\tr.logger.Error(err)\n\t\t}\n\t\treturn body\n\tdefault:\n\t\treturn r.context.Response.Body()\n\t}\n}\n\nfunc (r *Response) Redirect(url string, code int) {\n\t\/\/r.context.Redirect(url, code) bug: missing port number\n\tr.header.Set(`Location`, url)\n\tr.WriteHeader(code)\n}\n\nfunc (r *Response) NotFound() {\n\tr.context.NotFound()\n}\n\nfunc (r *Response) SetCookie(cookie *http.Cookie) {\n\tr.header.Set(\"Set-Cookie\", cookie.String())\n}\n\nfunc (r *Response) ServeFile(file string) {\n\tfasthttp.ServeFile(r.context, file)\n}\n\nfunc (r *Response) reset(c *fasthttp.RequestCtx, h engine.Header) {\n\tr.context = c\n\tr.header = h\n\tr.status = http.StatusOK\n\tr.size = 0\n\tr.committed = false\n\tr.writer = c\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\n Nging is a toolbox for webmasters\n Copyright (C) 2018-present Wenhui Shen <swh@admpub.com>\n\n This program is free software: you can redistribute it and\/or modify\n it under the terms of the GNU Affero General Public License as published\n by the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU Affero General Public License for more details.\n\n You should have received a copy of the GNU Affero General Public License\n along with this program. If not, see <https:\/\/www.gnu.org\/licenses\/>.\n*\/\n\npackage license\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"net\/url\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/admpub\/license_gen\/lib\"\n\t\"github.com\/admpub\/once\"\n\t\"github.com\/shirou\/gopsutil\/v3\/cpu\"\n\t\"github.com\/webx-top\/com\"\n\t\"github.com\/webx-top\/echo\"\n\t\"golang.org\/x\/net\/publicsuffix\"\n\n\t\"github.com\/admpub\/nging\/v3\/application\/library\/config\"\n)\n\ntype Mode int\n\nconst (\n\tModeMachineID Mode = iota\n\tModeDomain\n)\n\nvar (\n\ttrackerURL = `https:\/\/www.webx.top\/product\/script\/nging\/tracker.js`\n\tproductURL = `https:\/\/www.webx.top\/product\/detail\/nging`\n\tlicenseURL = `https:\/\/www.webx.top\/product\/license\/nging`\n\tversionURL = `https:\/\/www.webx.top\/product\/version\/nging`\n\tlicenseMode = ModeMachineID\n\tlicenseData *lib.LicenseData \/\/ 拥有的授权数据\n\tlicenseFileName = `license.key`\n\tlicenseFile = filepath.Join(echo.Wd(), licenseFileName)\n\tlicenseError = lib.UnlicensedVersion\n\tlicenseModTime time.Time\n\temptyLicense = lib.LicenseData{}\n\tdownloadOnce once.Once\n\tdownloadError error\n\tdownloadTime time.Time\n\tlock4err sync.RWMutex\n\tlock4data sync.RWMutex\n\t\/\/ ErrLicenseNotFound 授权证书不存在\n\tErrLicenseNotFound = errors.New(`License does not exist`)\n\t\/\/ SkipLicenseCheck 跳过授权检测\n\tSkipLicenseCheck = true\n\n\t\/\/ - 需要验证的数据\n\n\tversion string \/\/1.2.3-beta\n\tpackageName string \/\/free\n\tmachineID string\n\tdomain string\n)\n\ntype ServerURL struct {\n\tTracker string \/\/用于统计分析的js地址\n\tProduct string \/\/该产品的详情介绍页面网址\n\tLicense string \/\/许可证验证和许可证下载API网址\n\tVersion string \/\/该产品最新版本信息API网址\n\tLicenseFileName string \/\/许可证文件名称\n}\n\nfunc (s *ServerURL) Apply() {\n\tif len(s.Tracker) > 0 {\n\t\ttrackerURL = s.Tracker\n\t}\n\tif len(s.Product) > 0 {\n\t\tproductURL = s.Product\n\t}\n\tif len(s.License) > 0 {\n\t\tlicenseURL = s.License\n\t}\n\tif len(s.Version) > 0 {\n\t\tversionURL = s.Version\n\t}\n\tif len(s.LicenseFileName) > 0 {\n\t\tlicenseFileName = s.LicenseFileName\n\t\tlicenseFile = filepath.Join(echo.Wd(), licenseFileName)\n\t}\n}\n\nfunc SetServerURL(s *ServerURL) {\n\tif s != nil {\n\t\ts.Apply()\n\t}\n}\n\nfunc SetProductName(name string, domains ...string) {\n\tdomain := `www.webx.top`\n\tif len(domains) > 0 && len(domains[0]) > 0 {\n\t\tdomain = domains[0]\n\t}\n\ttrackerURL = `https:\/\/` + domain + `\/product\/script\/` + name + `\/tracker.js`\n\tproductURL = `https:\/\/` + domain + `\/product\/detail\/` + name\n\tlicenseURL = `https:\/\/` + domain + `\/product\/license\/` + name\n\tversionURL = `https:\/\/` + domain + `\/product\/version\/` + name\n}\n\nfunc SetProductDomain(domain string) {\n\ttrackerURL = `https:\/\/` + domain + `\/script\/tracker.js`\n\tproductURL = `https:\/\/` + domain + `\/`\n\tlicenseURL = `https:\/\/` + domain + `\/license`\n\tversionURL = `https:\/\/` + domain + `\/version`\n}\n\nfunc SetVersion(ver string) {\n\tversion = ver\n}\n\nfunc SetPackage(pkg string) {\n\tpackageName = pkg\n}\n\nfunc Version() string {\n\treturn version\n}\n\nfunc Package() string {\n\treturn packageName\n}\n\nfunc ProductURL() string {\n\treturn productURL\n}\n\nfunc Domain() string {\n\treturn domain\n}\n\nfunc SetDomain(_domain string) {\n\tif licenseMode != ModeDomain {\n\t\tlicenseMode = ModeDomain\n\t}\n\tdomain = _domain\n}\n\nfunc FullDomain() string {\n\trootDomain := License().Info.Domain\n\tif len(rootDomain) == 0 {\n\t\treturn rootDomain\n\t}\n\trootDomain = strings.Trim(rootDomain, `.`)\n\trealDomain, _ := publicsuffix.EffectiveTLDPlusOne(rootDomain)\n\tif rootDomain == realDomain {\n\t\treturn `www.` + realDomain\n\t}\n\treturn rootDomain\n}\n\nfunc EqDomain(fullDomain string, rootDomain string) bool {\n\treturn lib.CheckDomain(fullDomain, rootDomain)\n}\n\nfunc LicenseMode() Mode {\n\treturn licenseMode\n}\n\nfunc DownloadTime() time.Time {\n\treturn downloadTime\n}\n\nfunc ProductDetailURL() (url string) {\n\turl = ProductURL() + `?version=` + Version()\n\tswitch licenseMode {\n\tcase ModeMachineID:\n\t\tmid, err := MachineID()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\turl += `&machineID=` + mid\n\tcase ModeDomain:\n\t\tif len(Domain()) > 0 {\n\t\t\turl += `&domain=` + Domain()\n\t\t}\n\tdefault:\n\t\tpanic(fmt.Sprintf(`unsupported license mode: %d`, licenseMode))\n\t}\n\treturn\n}\n\nfunc TrackerURL() string {\n\tif trackerURL == `#` {\n\t\treturn ``\n\t}\n\treturn trackerURL + `?version=` + Version() + `&package=` + Package() + `&os=` + config.Version.BuildOS + `&arch=` + config.Version.BuildArch\n}\n\nfunc TrackerHTML() template.HTML {\n\t_trackerURL := TrackerURL()\n\tif len(_trackerURL) == 0 {\n\t\treturn template.HTML(``)\n\t}\n\treturn template.HTML(`<script type=\"text\/javascript\" async src=\"` + _trackerURL + `\"><\/script>`)\n}\n\nfunc FilePath() string {\n\treturn licenseFile\n}\n\nfunc FileName() string {\n\treturn licenseFileName\n}\n\nfunc Error() error {\n\tlock4err.RLock()\n\tdefer lock4err.RUnlock()\n\treturn licenseError\n}\n\nfunc SetError(err error) {\n\tlock4err.Lock()\n\tlicenseError = err\n\tlock4err.Unlock()\n}\n\nfunc License() lib.LicenseData {\n\tlock4data.RLock()\n\tdefer lock4data.RUnlock()\n\tif licenseData == nil {\n\t\treturn emptyLicense\n\t}\n\treturn *licenseData\n}\n\nfunc SetLicense(data *lib.LicenseData) {\n\tlock4data.Lock()\n\tlicenseData = data\n\tlock4data.Unlock()\n\tswitch licenseMode {\n\tcase ModeDomain:\n\t\tif len(Domain()) == 0 {\n\t\t\tSetDomain(data.Info.Domain)\n\t\t}\n\tcase ModeMachineID:\n\t}\n}\n\nvar (\n\tMachineIDEncode = func(v string) string {\n\t\treturn com.MakePassword(v, `coscms`, 3, 8, 19)\n\t}\n\tLicenseDecode = func(b []byte) ([]byte, string) {\n\t\treturn b, GetOrLoadPublicKey()\n\t}\n)\n\n\/\/ MachineID 生成当前机器的机器码\nfunc MachineID() (string, error) {\n\tif len(machineID) > 0 {\n\t\treturn machineID, nil\n\t}\n\taddrs, err := lib.MACAddresses(false)\n\tif err != nil {\n\t\treturn ``, err\n\t}\n\tif len(addrs) < 1 {\n\t\treturn ``, lib.ErrorMachineID\n\t}\n\tcpuInfo, err := cpu.Info()\n\tif err != nil {\n\t\treturn ``, err\n\t}\n\tvar cpuID string\n\tif len(cpuInfo) > 0 {\n\t\tcpuID = cpuInfo[0].PhysicalID\n\t\tif len(cpuID) == 0 {\n\t\t\tcpuID = com.Md5(com.Dump(cpuInfo, false))\n\t\t}\n\t}\n\tmachineID = MachineIDEncode(lib.Hash(addrs[0]) + `#` + cpuID)\n\treturn machineID, err\n}\n\n\/\/ FullLicenseURL 包含完整参数的授权网址\nfunc FullLicenseURL(ctx echo.Context) string {\n\treturn licenseURL + `?` + URLValues(ctx).Encode()\n}\n\n\/\/ URLValues 组装网址参数\nfunc URLValues(ctx echo.Context) url.Values {\n\tv := url.Values{}\n\tv.Set(`os`, config.Version.BuildOS)\n\tv.Set(`arch`, config.Version.BuildArch)\n\tv.Set(`sn`, License().Info.LicenseID)\n\tv.Set(`version`, Version())\n\tv.Set(`package`, Package())\n\tif ctx != nil {\n\t\tv.Set(`source`, ctx.RequestURI())\n\t}\n\tswitch licenseMode {\n\tcase ModeMachineID:\n\t\tif len(machineID) == 0 {\n\t\t\tvar err error\n\t\t\tmachineID, err = MachineID()\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Errorf(`failed to get machineID: %v`, err))\n\t\t\t}\n\t\t}\n\t\tv.Set(`machineID`, machineID)\n\tcase ModeDomain:\n\t\tif len(Domain()) == 0 {\n\t\t\tpanic(`license domain is required`)\n\t\t}\n\t\tv.Set(`domain`, Domain())\n\tdefault:\n\t\tpanic(fmt.Sprintf(`unsupported license mode: %d`, licenseMode))\n\t}\n\tv.Set(`time`, time.Now().Format(`20060102-150405`))\n\treturn v\n}\n<commit_msg>update<commit_after>\/*\n Nging is a toolbox for webmasters\n Copyright (C) 2018-present Wenhui Shen <swh@admpub.com>\n\n This program is free software: you can redistribute it and\/or modify\n it under the terms of the GNU Affero General Public License as published\n by the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU Affero General Public License for more details.\n\n You should have received a copy of the GNU Affero General Public License\n along with this program. If not, see <https:\/\/www.gnu.org\/licenses\/>.\n*\/\n\npackage license\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"net\/url\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com\/admpub\/license_gen\/lib\"\n\t\"github.com\/admpub\/once\"\n\t\"github.com\/shirou\/gopsutil\/v3\/cpu\"\n\t\"github.com\/webx-top\/com\"\n\t\"github.com\/webx-top\/echo\"\n\t\"golang.org\/x\/net\/publicsuffix\"\n\n\t\"github.com\/admpub\/nging\/v3\/application\/library\/config\"\n)\n\ntype Mode int\n\nconst (\n\tModeMachineID Mode = iota\n\tModeDomain\n)\n\nvar (\n\ttrackerURL = `https:\/\/www.webx.top\/product\/script\/nging\/tracker.js`\n\tproductURL = `https:\/\/www.webx.top\/product\/detail\/nging`\n\tlicenseURL = `https:\/\/www.webx.top\/product\/license\/nging`\n\tversionURL = `https:\/\/www.webx.top\/product\/version\/nging`\n\tlicenseMode = ModeMachineID\n\tlicenseData *lib.LicenseData \/\/ 拥有的授权数据\n\tlicenseFileName = `license.key`\n\tlicenseFile = filepath.Join(echo.Wd(), licenseFileName)\n\tlicenseError = lib.UnlicensedVersion\n\tlicenseModTime time.Time\n\temptyLicense = lib.LicenseData{}\n\tdownloadOnce once.Once\n\tdownloadError error\n\tdownloadTime time.Time\n\tlock4err sync.RWMutex\n\tlock4data sync.RWMutex\n\tonSetLicenseHooks []func(*lib.LicenseData)\n\t\/\/ ErrLicenseNotFound 授权证书不存在\n\tErrLicenseNotFound = errors.New(`License does not exist`)\n\t\/\/ SkipLicenseCheck 跳过授权检测\n\tSkipLicenseCheck = true\n\n\t\/\/ - 需要验证的数据\n\n\tversion string \/\/1.2.3-beta\n\tpackageName string \/\/free\n\tmachineID string\n\tdomain string\n)\n\nfunc OnSetLicense(fn func(*lib.LicenseData)) {\n\tif fn == nil {\n\t\treturn\n\t}\n\tonSetLicenseHooks = append(onSetLicenseHooks, fn)\n}\n\nfunc FireSetLicense(data *lib.LicenseData) {\n\tfor _, fn := range onSetLicenseHooks {\n\t\tfn(data)\n\t}\n}\n\ntype ServerURL struct {\n\tTracker string \/\/用于统计分析的js地址\n\tProduct string \/\/该产品的详情介绍页面网址\n\tLicense string \/\/许可证验证和许可证下载API网址\n\tVersion string \/\/该产品最新版本信息API网址\n\tLicenseFileName string \/\/许可证文件名称\n}\n\nfunc (s *ServerURL) Apply() {\n\tif len(s.Tracker) > 0 {\n\t\ttrackerURL = s.Tracker\n\t}\n\tif len(s.Product) > 0 {\n\t\tproductURL = s.Product\n\t}\n\tif len(s.License) > 0 {\n\t\tlicenseURL = s.License\n\t}\n\tif len(s.Version) > 0 {\n\t\tversionURL = s.Version\n\t}\n\tif len(s.LicenseFileName) > 0 {\n\t\tlicenseFileName = s.LicenseFileName\n\t\tlicenseFile = filepath.Join(echo.Wd(), licenseFileName)\n\t}\n}\n\nfunc SetServerURL(s *ServerURL) {\n\tif s != nil {\n\t\ts.Apply()\n\t}\n}\n\nfunc SetProductName(name string, domains ...string) {\n\tdomain := `www.webx.top`\n\tif len(domains) > 0 && len(domains[0]) > 0 {\n\t\tdomain = domains[0]\n\t}\n\ttrackerURL = `https:\/\/` + domain + `\/product\/script\/` + name + `\/tracker.js`\n\tproductURL = `https:\/\/` + domain + `\/product\/detail\/` + name\n\tlicenseURL = `https:\/\/` + domain + `\/product\/license\/` + name\n\tversionURL = `https:\/\/` + domain + `\/product\/version\/` + name\n}\n\nfunc SetProductDomain(domain string) {\n\ttrackerURL = `https:\/\/` + domain + `\/script\/tracker.js`\n\tproductURL = `https:\/\/` + domain + `\/`\n\tlicenseURL = `https:\/\/` + domain + `\/license`\n\tversionURL = `https:\/\/` + domain + `\/version`\n}\n\nfunc SetVersion(ver string) {\n\tversion = ver\n}\n\nfunc SetPackage(pkg string) {\n\tpackageName = pkg\n}\n\nfunc Version() string {\n\treturn version\n}\n\nfunc Package() string {\n\treturn packageName\n}\n\nfunc ProductURL() string {\n\treturn productURL\n}\n\nfunc Domain() string {\n\treturn domain\n}\n\nfunc SetDomain(_domain string) {\n\tif licenseMode != ModeDomain {\n\t\tlicenseMode = ModeDomain\n\t}\n\tdomain = _domain\n}\n\nfunc FullDomain() string {\n\trootDomain := License().Info.Domain\n\tif len(rootDomain) == 0 {\n\t\treturn rootDomain\n\t}\n\trootDomain = strings.Trim(rootDomain, `.`)\n\trealDomain, _ := publicsuffix.EffectiveTLDPlusOne(rootDomain)\n\tif rootDomain == realDomain {\n\t\treturn `www.` + realDomain\n\t}\n\treturn rootDomain\n}\n\nfunc EqDomain(fullDomain string, rootDomain string) bool {\n\treturn lib.CheckDomain(fullDomain, rootDomain)\n}\n\nfunc LicenseMode() Mode {\n\treturn licenseMode\n}\n\nfunc DownloadTime() time.Time {\n\treturn downloadTime\n}\n\nfunc ProductDetailURL() (url string) {\n\turl = ProductURL() + `?version=` + Version()\n\tswitch licenseMode {\n\tcase ModeMachineID:\n\t\tmid, err := MachineID()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\turl += `&machineID=` + mid\n\tcase ModeDomain:\n\t\tif len(Domain()) > 0 {\n\t\t\turl += `&domain=` + Domain()\n\t\t}\n\tdefault:\n\t\tpanic(fmt.Sprintf(`unsupported license mode: %d`, licenseMode))\n\t}\n\treturn\n}\n\nfunc TrackerURL() string {\n\tif trackerURL == `#` {\n\t\treturn ``\n\t}\n\treturn trackerURL + `?version=` + Version() + `&package=` + Package() + `&os=` + config.Version.BuildOS + `&arch=` + config.Version.BuildArch\n}\n\nfunc TrackerHTML() template.HTML {\n\t_trackerURL := TrackerURL()\n\tif len(_trackerURL) == 0 {\n\t\treturn template.HTML(``)\n\t}\n\treturn template.HTML(`<script type=\"text\/javascript\" async src=\"` + _trackerURL + `\"><\/script>`)\n}\n\nfunc FilePath() string {\n\treturn licenseFile\n}\n\nfunc FileName() string {\n\treturn licenseFileName\n}\n\nfunc Error() error {\n\tlock4err.RLock()\n\tdefer lock4err.RUnlock()\n\treturn licenseError\n}\n\nfunc SetError(err error) {\n\tlock4err.Lock()\n\tlicenseError = err\n\tlock4err.Unlock()\n}\n\nfunc License() lib.LicenseData {\n\tlock4data.RLock()\n\tdefer lock4data.RUnlock()\n\tif licenseData == nil {\n\t\treturn emptyLicense\n\t}\n\treturn *licenseData\n}\n\nfunc SetLicense(data *lib.LicenseData) {\n\tFireSetLicense(data)\n\tlock4data.Lock()\n\tlicenseData = data\n\tlock4data.Unlock()\n\tswitch licenseMode {\n\tcase ModeDomain:\n\t\tif len(Domain()) == 0 {\n\t\t\tSetDomain(data.Info.Domain)\n\t\t}\n\tcase ModeMachineID:\n\t}\n}\n\nvar (\n\tMachineIDEncode = func(v string) string {\n\t\treturn com.MakePassword(v, `coscms`, 3, 8, 19)\n\t}\n\tLicenseDecode = func(b []byte) ([]byte, string) {\n\t\treturn b, GetOrLoadPublicKey()\n\t}\n)\n\n\/\/ MachineID 生成当前机器的机器码\nfunc MachineID() (string, error) {\n\tif len(machineID) > 0 {\n\t\treturn machineID, nil\n\t}\n\taddrs, err := lib.MACAddresses(false)\n\tif err != nil {\n\t\treturn ``, err\n\t}\n\tif len(addrs) < 1 {\n\t\treturn ``, lib.ErrorMachineID\n\t}\n\tcpuInfo, err := cpu.Info()\n\tif err != nil {\n\t\treturn ``, err\n\t}\n\tvar cpuID string\n\tif len(cpuInfo) > 0 {\n\t\tcpuID = cpuInfo[0].PhysicalID\n\t\tif len(cpuID) == 0 {\n\t\t\tcpuID = com.Md5(com.Dump(cpuInfo, false))\n\t\t}\n\t}\n\tmachineID = MachineIDEncode(lib.Hash(addrs[0]) + `#` + cpuID)\n\treturn machineID, err\n}\n\n\/\/ FullLicenseURL 包含完整参数的授权网址\nfunc FullLicenseURL(ctx echo.Context) string {\n\treturn licenseURL + `?` + URLValues(ctx).Encode()\n}\n\n\/\/ URLValues 组装网址参数\nfunc URLValues(ctx echo.Context) url.Values {\n\tv := url.Values{}\n\tv.Set(`os`, config.Version.BuildOS)\n\tv.Set(`arch`, config.Version.BuildArch)\n\tv.Set(`sn`, License().Info.LicenseID)\n\tv.Set(`version`, Version())\n\tv.Set(`package`, Package())\n\tif ctx != nil {\n\t\tv.Set(`source`, ctx.RequestURI())\n\t}\n\tswitch licenseMode {\n\tcase ModeMachineID:\n\t\tif len(machineID) == 0 {\n\t\t\tvar err error\n\t\t\tmachineID, err = MachineID()\n\t\t\tif err != nil {\n\t\t\t\tpanic(fmt.Errorf(`failed to get machineID: %v`, err))\n\t\t\t}\n\t\t}\n\t\tv.Set(`machineID`, machineID)\n\tcase ModeDomain:\n\t\tif len(Domain()) == 0 {\n\t\t\tpanic(`license domain is required`)\n\t\t}\n\t\tv.Set(`domain`, Domain())\n\tdefault:\n\t\tpanic(fmt.Sprintf(`unsupported license mode: %d`, licenseMode))\n\t}\n\tv.Set(`time`, time.Now().Format(`20060102-150405`))\n\treturn v\n}\n<|endoftext|>"} {"text":"<commit_before>package terraformer\n\nimport (\n\t\"bytes\"\n\t\"koding\/kites\/terraformer\/kodingcontext\"\n\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\"github.com\/koding\/kite\"\n\t\"github.com\/koding\/logging\"\n\t\"github.com\/koding\/metrics\"\n)\n\nvar (\n\tName = \"terraformer\"\n\tVersion = \"0.0.1\"\n)\n\ntype Terraformer struct {\n\t\/\/ Log is a specialized log system for terraform\n\tLog logging.Logger\n\n\t\/\/ Metrics holds the metric aggregator\n\tMetrics *metrics.DogStatsD\n\n\t\/\/ Enable debug mode\n\tDebug bool\n\n\t\/\/ Context holds the initial context, all usages should clone it\n\tContext *kodingcontext.Context\n}\n\ntype TerraformRequest struct {\n\tContent string\n\tVariables map[string]string\n}\n\nfunc New() *Terraformer {\n\treturn &Terraformer{}\n}\n\nfunc (t *Terraformer) Apply(r *kite.Request) (interface{}, error) {\n\tc := t.Context.Clone()\n\tdefer c.Close()\n\n\tctx, err := t.context(c, r, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstate, err := ctx.Apply()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn state, nil\n}\n\nfunc (t *Terraformer) Destroy(r *kite.Request) (interface{}, error) {\n\tc := t.Context.Clone()\n\tdefer c.Close()\n\n\t\/\/\n\t\/\/ plan first with destroy option\n\t\/\/\n\tplan, err := t.plan(c, r, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/\n\t\/\/ create terraform context options from plan\n\t\/\/\n\tcopts := c.TerraformContextOptsWithPlan(plan)\n\n\tcopts.Destroy = true \/\/ this is the key point\n\n\t\/\/ create terraform context with its options\n\tctx := terraform.NewContext(copts)\n\n\t\/\/\n\t\/\/ apply the change\n\t\/\/\n\tstate, err := ctx.Apply()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn state, nil\n}\n\nfunc (t *Terraformer) Plan(r *kite.Request) (interface{}, error) {\n\tc := t.Context.Clone()\n\tdefer c.Close()\n\n\tplan, err := t.plan(c, r, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn plan, nil\n}\n\nfunc (t *Terraformer) context(\n\tc *kodingcontext.Context,\n\tr *kite.Request,\n\tdestroy bool,\n) (*terraform.Context, error) {\n\t\/\/ get the plan\n\tplan, err := t.plan(c, r, destroy)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ create terraform context options from plan\n\tcopts := c.TerraformContextOptsWithPlan(plan)\n\n\t\/\/ create terraform context with its options\n\tctx := terraform.NewContext(copts)\n\n\treturn ctx, nil\n}\n\nfunc (t *Terraformer) plan(\n\tc *kodingcontext.Context,\n\tr *kite.Request,\n\tdestroy bool,\n) (*terraform.Plan, error) {\n\targs := TerraformRequest{}\n\tif err := r.Args.One().Unmarshal(&args); err != nil {\n\t\treturn nil, err\n\t}\n\n\tc.Variables = args.Variables\n\n\tplan, err := c.Plan(\n\t\tbytes.NewBufferString(args.Content),\n\t\tdestroy,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn plan, nil\n}\n<commit_msg>Terrafromer: clean up code<commit_after>package terraformer\n\nimport (\n\t\"koding\/kites\/terraformer\/kodingcontext\"\n\t\"runtime\/debug\"\n\t\"strings\"\n\n\t\"github.com\/hashicorp\/terraform\/terraform\"\n\t\"github.com\/koding\/kite\"\n\t\"github.com\/koding\/logging\"\n\t\"github.com\/koding\/metrics\"\n)\n\nvar (\n\tName = \"terraformer\"\n\tVersion = \"0.0.1\"\n)\n\ntype Terraformer struct {\n\t\/\/ Log is a specialized log system for terraform\n\tLog logging.Logger\n\n\t\/\/ Metrics holds the metric aggregator\n\tMetrics *metrics.DogStatsD\n\n\t\/\/ Enable debug mode\n\tDebug bool\n\n\t\/\/ Context holds the initial context, all usages should clone it\n\tContext *kodingcontext.Context\n}\n\ntype TerraformRequest struct {\n\tContent string\n\tVariables map[string]string\n\tLocation string\n}\n\nfunc New() *Terraformer { return &Terraformer{} }\n\nfunc (t *Terraformer) Plan(r *kite.Request) (plan interface{}, err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tdebug.PrintStack()\n\t\t}\n\t}()\n\n\tc := t.Context.Clone()\n\tdefer c.Close()\n\n\tplan, err = t.plan(c, r, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn plan, nil\n}\n\nfunc (t *Terraformer) Apply(r *kite.Request) (interface{}, error) {\n\tc := t.Context.Clone()\n\tdefer c.Close()\n\n\tctx, err := t.context(c, r, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ctx.Apply()\n}\n\nfunc (t *Terraformer) Destroy(r *kite.Request) (interface{}, error) {\n\tc := t.Context.Clone()\n\tdefer c.Close()\n\n\t\/\/ plan first with destroy option\n\tplan, err := t.plan(c, r, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ create terraform context options from plan\n\tcopts := c.TerraformContextOptsWithPlan(plan)\n\n\tcopts.Destroy = true \/\/ this is the key point\n\n\t\/\/ create terraform context with its options\n\tctx := terraform.NewContext(copts)\n\n\t\/\/\n\t\/\/ apply the change\n\t\/\/\n\treturn ctx.Apply()\n}\n\nfunc (t *Terraformer) context(\n\tc *kodingcontext.Context,\n\tr *kite.Request,\n\tdestroy bool,\n) (*terraform.Context, error) {\n\t\/\/ get the plan\n\tplan, err := t.plan(c, r, destroy)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ create terraform context options from plan\n\tcopts := c.TerraformContextOptsWithPlan(plan)\n\n\t\/\/ create terraform context with its options\n\treturn terraform.NewContext(copts), nil\n}\n\nfunc (t *Terraformer) plan(\n\tc *kodingcontext.Context,\n\tr *kite.Request,\n\tdestroy bool,\n) (*terraform.Plan, error) {\n\targs := TerraformRequest{}\n\tif err := r.Args.One().Unmarshal(&args); err != nil {\n\t\treturn nil, err\n\t}\n\n\tc.Variables = args.Variables\n\tc.Location = args.Location\n\n\tcontent := strings.NewReader(args.Content)\n\treturn c.Plan(content, destroy)\n}\n<|endoftext|>"} {"text":"<commit_before>package sparse\n\nimport (\n\t\"github.com\/gonum\/matrix\"\n\t\"github.com\/gonum\/matrix\/mat64\"\n)\n\ntype compressedSparse struct {\n\ti, j int\n\tindptr []int\n\tind []int\n\tdata []float64\n}\n\nfunc (c *compressedSparse) NNZ() int {\n\treturn len(c.data)\n}\n\nfunc (c *compressedSparse) at(i, j int) float64 {\n\tif uint(i) < 0 || uint(i) >= uint(c.i) {\n\t\tpanic(matrix.ErrRowAccess)\n\t}\n\tif uint(j) < 0 || uint(j) >= uint(c.j) {\n\t\tpanic(matrix.ErrColAccess)\n\t}\n\n\t\/\/ todo: consider a binary search if we can assume the data is ordered.\n\tfor k := c.indptr[i]; k < c.indptr[i+1]; k++ {\n\t\tif c.ind[k] == j {\n\t\t\treturn c.data[k]\n\t\t}\n\t}\n\n\treturn 0\n}\n\ntype CSR struct {\n\tcompressedSparse\n}\n\nfunc NewCSR(r int, c int, ia []int, ja []int, data []float64) *CSR {\n\tif uint(r) < 0 {\n\t\tpanic(matrix.ErrRowAccess)\n\t}\n\tif uint(c) < 0 {\n\t\tpanic(matrix.ErrColAccess)\n\t}\n\n\treturn &CSR{\n\t\tcompressedSparse: compressedSparse{\n\t\t\ti: r, j: c,\n\t\t\tindptr: ia,\n\t\t\tind: ja,\n\t\t\tdata: data,\n\t\t},\n\t}\n}\n\nfunc (c *CSR) Dims() (int, int) {\n\treturn c.i, c.j\n}\n\nfunc (c *CSR) At(m, n int) float64 {\n\treturn c.at(m, n)\n}\n\nfunc (c *CSR) T() mat64.Matrix {\n\treturn NewCSC(c.j, c.i, c.indptr, c.ind, c.data)\n}\n\nfunc (c *CSR) ToDense() *mat64.Dense {\n\tmat := mat64.NewDense(c.i, c.j, nil)\n\n\tfor i := 0; i < len(c.indptr)-1; i++ {\n\t\tfor j := c.indptr[i]; j < c.indptr[i+1]; j++ {\n\t\t\tmat.Set(i, c.ind[j], c.data[j])\n\t\t}\n\t}\n\n\treturn mat\n}\n\nfunc (c *CSR) ToDOK() *DOK {\n\tdok := NewDOK(c.i, c.j)\n\tfor i := 0; i < len(c.indptr)-1; i++ {\n\t\tfor j := c.indptr[i]; j < c.indptr[i+1]; j++ {\n\t\t\tdok.Set(i, c.ind[j], c.data[j])\n\t\t}\n\t}\n\n\treturn dok\n}\n\nfunc (c *CSR) ToCOO() *COO {\n\trows := make([]int, c.NNZ())\n\n\tfor i := 0; i < len(c.indptr)-1; i++ {\n\t\tfor j := c.indptr[i]; j < c.indptr[i+1]; j++ {\n\t\t\trows[j] = i\n\t\t}\n\t}\n\n\tcoo := NewCOO(c.i, c.j, rows, c.ind, c.data)\n\n\treturn coo\n}\n\nfunc (c *CSR) ToCSR() *CSR {\n\treturn c\n}\n\nfunc (c *CSR) ToCSC() *CSC {\n\treturn c.ToCOO().ToCSC()\n}\n\nfunc (c *CSR) ToType(matType MatrixType) mat64.Matrix {\n\treturn matType.Convert(c)\n}\n\nfunc (c *CSR) Mul(a, b mat64.Matrix) {\n\tif dia, ok := a.(*DIA); ok {\n\t\tc.mulDIA(dia, b, false)\n\t\treturn\n\t}\n\tif dia, ok := b.(*DIA); ok {\n\t\tc.mulDIA(dia, a, true)\n\t\treturn\n\t}\n\n\tar, ac := a.Dims()\n\tbr, bc := b.Dims()\n\n\tc.indptr = make([]int, ar+1)\n\n\tif ac != br {\n\t\tpanic(matrix.ErrShape)\n\t}\n\n\tc.i, c.j = ar, bc\n\tt := 0\n\n\tlhs, isCsr := a.(*CSR)\n\n\tif isCsr {\n\t\tfor i := 0; i < ar; i++ {\n\t\t\tc.indptr[i] = t\n\t\t\tfor j := 0; j < bc; j++ {\n\t\t\t\tvar v float64\n\t\t\t\t\/\/ TODO Consider converting all Sparsers to CSR\n\t\t\t\tfor k := lhs.indptr[i]; k < lhs.indptr[i+1]; k++ {\n\t\t\t\t\tv += lhs.data[k] * b.At(lhs.ind[k], j)\n\t\t\t\t}\n\t\t\t\tif v != 0 {\n\t\t\t\t\tt++\n\t\t\t\t\tc.ind = append(c.ind, j)\n\t\t\t\t\tc.data = append(c.data, v)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\trow := make([]float64, ac)\n\t\tfor i := 0; i < ar; i++ {\n\t\t\tc.indptr[i] = t\n\t\t\tfor ci := range row {\n\t\t\t\trow[ci] = a.At(i, ci)\n\t\t\t}\n\t\t\tfor j := 0; j < bc; j++ {\n\t\t\t\tvar v float64\n\t\t\t\tfor ci, e := range row {\n\t\t\t\t\tv += e * b.At(ci, j)\n\t\t\t\t}\n\t\t\t\tif v != 0 {\n\t\t\t\t\tt++\n\t\t\t\t\tc.ind = append(c.ind, j)\n\t\t\t\t\tc.data = append(c.data, v)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tc.indptr[c.i] = t\n}\n\nfunc (c *CSR) mulDIA(dia *DIA, other mat64.Matrix, trans bool) {\n\tvar csMat compressedSparse\n\tisCS := false\n\n\tif csr, ok := other.(*CSR); ok {\n\t\t\/\/ TODO consider converting all sparsers to CSR (for RHS operand)\n\t\tcsMat = csr.compressedSparse\n\t\tisCS = true\n\t\tc.ind = make([]int, len(csMat.ind))\n\t\tc.data = make([]float64, len(csMat.data))\n\t}\n\n\tc.i, c.j = other.Dims()\n\tc.indptr = make([]int, c.i+1)\n\tt := 0\n\traw := dia.Diagonal()\n\n\tfor i := 0; i < c.i; i++ {\n\t\tc.indptr[i] = t\n\t\tvar v float64\n\n\t\tif isCS {\n\t\t\tfor k := csMat.indptr[i]; k < csMat.indptr[i+1]; k++ {\n\t\t\t\tvar rawval float64\n\t\t\t\tif trans {\n\t\t\t\t\trawval = raw[csMat.ind[k]]\n\t\t\t\t} else {\n\t\t\t\t\trawval = raw[i]\n\t\t\t\t}\n\t\t\t\tv = csMat.data[k] * rawval\n\t\t\t\tif v != 0 {\n\t\t\t\t\tc.ind[t] = csMat.ind[k]\n\t\t\t\t\tc.data[t] = v\n\t\t\t\t\tt++\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfor k := 0; k < c.j; k++ {\n\t\t\t\tvar rawval float64\n\t\t\t\tif trans {\n\t\t\t\t\trawval = raw[k]\n\t\t\t\t} else {\n\t\t\t\t\trawval = raw[i]\n\t\t\t\t}\n\t\t\t\tv = other.At(i, k) * rawval\n\t\t\t\tif v != 0 {\n\t\t\t\t\tc.ind = append(c.ind, k)\n\t\t\t\t\tc.data = append(c.data, v)\n\t\t\t\t\tt++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tc.indptr[c.i] = t\n\n\treturn\n}\n\nfunc (c *CSR) Mul2(a, b mat64.Matrix) {\n\tar, ac := a.Dims()\n\tbr, bc := b.Dims()\n\n\tc.indptr = make([]int, ar+1)\n\n\tif rhs, ok := b.(*DIA); ok {\n\t\tc.i, c.j = ar, ac\n\t\tvar size int\n\t\tif lhs, ok := a.(*CSR); ok {\n\t\t\tsize = lhs.NNZ()\n\t\t} else {\n\t\t\tsize = ar * bc\n\t\t}\n\t\tc.ind = make([]int, size)\n\t\tc.data = make([]float64, size)\n\t\tt := 0\n\t\traw := rhs.Diagonal()\n\n\t\tfor i := 0; i < ar; i++ {\n\t\t\tc.indptr[i] = t\n\t\t\tvar v float64\n\t\t\tif lhs, ok := a.(*CSR); ok {\n\t\t\t\tfor k := lhs.indptr[i]; k < lhs.indptr[i+1]; k++ {\n\t\t\t\t\tv = lhs.data[k] * raw[i]\n\t\t\t\t\tif v != 0 {\n\t\t\t\t\t\tc.ind[t] = k\n\t\t\t\t\t\tc.data[t] = v\n\t\t\t\t\t\tt++\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tfor k := 0; k < ac; k++ {\n\t\t\t\t\tv = a.At(i, k) * raw[i]\n\t\t\t\t\tif v != 0 {\n\t\t\t\t\t\tc.ind[t] = k\n\t\t\t\t\t\tc.data[t] = v\n\t\t\t\t\t\tt++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tc.indptr[c.i] = t\n\t\tc.ind = c.ind[:t]\n\t\tc.data = c.data[:t]\n\t\treturn\n\t}\n\n\tif ac != br {\n\t\tpanic(matrix.ErrShape)\n\t}\n\n\tc.i, c.j = ar, bc\n\tc.ind = make([]int, ar*bc)\n\tc.data = make([]float64, ar*bc)\n\n\tt := 0\n\n\tfor i := 0; i < ar; i++ {\n\t\tc.indptr[i] = t\n\t\tfor j := 0; j < bc; j++ {\n\t\t\tvar v float64\n\t\t\tif lhs, ok := a.(*CSR); ok {\n\t\t\t\tfor k := lhs.indptr[i]; k < lhs.indptr[i+1]; k++ {\n\t\t\t\t\tv += lhs.data[k] * b.At(lhs.ind[k], j)\n\t\t\t\t}\n\t\t\t\tif v != 0 {\n\t\t\t\t\tc.ind[t] = j\n\t\t\t\t\tc.data[t] = v\n\t\t\t\t\tt++\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfor k := 0; k < ac; k++ {\n\t\t\t\t\tv += a.At(i, k) * b.At(k, j)\n\t\t\t\t}\n\t\t\t\tif v != 0 {\n\t\t\t\t\tc.ind[t] = j\n\t\t\t\t\tc.data[t] = v\n\t\t\t\t\tt++\n\t\t\t\t}\n\t\t\t\t\/\/c.set(i, j, v)\n\t\t\t}\n\t\t}\n\t}\n\tc.indptr[c.i] = t\n\tc.ind = c.ind[:t]\n\tc.data = c.data[:t]\n}\n\nfunc (c *CSR) RowNNZ(i int) int {\n\tif uint(i) < 0 || uint(i) >= uint(c.i) {\n\t\tpanic(matrix.ErrRowAccess)\n\t}\n\treturn c.indptr[i+1] - c.indptr[i]\n}\n\ntype CSC struct {\n\tcompressedSparse\n}\n\nfunc NewCSC(r int, c int, indptr []int, ind []int, data []float64) *CSC {\n\tif uint(r) < 0 {\n\t\tpanic(matrix.ErrRowAccess)\n\t}\n\tif uint(c) < 0 {\n\t\tpanic(matrix.ErrColAccess)\n\t}\n\n\treturn &CSC{\n\t\tcompressedSparse: compressedSparse{\n\t\t\ti: c, j: r,\n\t\t\tindptr: indptr,\n\t\t\tind: ind,\n\t\t\tdata: data,\n\t\t},\n\t}\n}\n\nfunc (c *CSC) Dims() (int, int) {\n\treturn c.j, c.i\n}\n\nfunc (c *CSC) At(m, n int) float64 {\n\treturn c.at(n, m)\n}\n\nfunc (c *CSC) T() mat64.Matrix {\n\treturn NewCSR(c.i, c.j, c.indptr, c.ind, c.data)\n}\n\nfunc (c *CSC) ToDense() *mat64.Dense {\n\tmat := mat64.NewDense(c.j, c.i, nil)\n\n\tfor i := 0; i < len(c.indptr)-1; i++ {\n\t\tfor j := c.indptr[i]; j < c.indptr[i+1]; j++ {\n\t\t\tmat.Set(c.ind[j], i, c.data[j])\n\t\t}\n\t}\n\n\treturn mat\n}\n\nfunc (c *CSC) ToDOK() *DOK {\n\tdok := NewDOK(c.j, c.i)\n\tfor i := 0; i < len(c.indptr)-1; i++ {\n\t\tfor j := c.indptr[i]; j < c.indptr[i+1]; j++ {\n\t\t\tdok.Set(c.ind[j], i, c.data[j])\n\t\t}\n\t}\n\n\treturn dok\n}\n\nfunc (c *CSC) ToCOO() *COO {\n\tcols := make([]int, c.NNZ())\n\n\tfor i := 0; i < len(c.indptr)-1; i++ {\n\t\tfor j := c.indptr[i]; j < c.indptr[i+1]; j++ {\n\t\t\tcols[j] = i\n\t\t}\n\t}\n\n\tcoo := NewCOO(c.j, c.i, c.ind, cols, c.data)\n\n\treturn coo\n}\n\nfunc (c *CSC) ToCSR() *CSR {\n\treturn c.ToCOO().ToCSR()\n}\n\nfunc (c *CSC) ToCSC() *CSC {\n\treturn c\n}\n\nfunc (c *CSC) ToType(matType MatrixType) mat64.Matrix {\n\treturn matType.Convert(c)\n}\n<commit_msg>added dimension mismatch check for multiplication with a DIA matrix)<commit_after>package sparse\n\nimport (\n\t\"github.com\/gonum\/matrix\"\n\t\"github.com\/gonum\/matrix\/mat64\"\n)\n\ntype compressedSparse struct {\n\ti, j int\n\tindptr []int\n\tind []int\n\tdata []float64\n}\n\nfunc (c *compressedSparse) NNZ() int {\n\treturn len(c.data)\n}\n\nfunc (c *compressedSparse) at(i, j int) float64 {\n\tif uint(i) < 0 || uint(i) >= uint(c.i) {\n\t\tpanic(matrix.ErrRowAccess)\n\t}\n\tif uint(j) < 0 || uint(j) >= uint(c.j) {\n\t\tpanic(matrix.ErrColAccess)\n\t}\n\n\t\/\/ todo: consider a binary search if we can assume the data is ordered.\n\tfor k := c.indptr[i]; k < c.indptr[i+1]; k++ {\n\t\tif c.ind[k] == j {\n\t\t\treturn c.data[k]\n\t\t}\n\t}\n\n\treturn 0\n}\n\ntype CSR struct {\n\tcompressedSparse\n}\n\nfunc NewCSR(r int, c int, ia []int, ja []int, data []float64) *CSR {\n\tif uint(r) < 0 {\n\t\tpanic(matrix.ErrRowAccess)\n\t}\n\tif uint(c) < 0 {\n\t\tpanic(matrix.ErrColAccess)\n\t}\n\n\treturn &CSR{\n\t\tcompressedSparse: compressedSparse{\n\t\t\ti: r, j: c,\n\t\t\tindptr: ia,\n\t\t\tind: ja,\n\t\t\tdata: data,\n\t\t},\n\t}\n}\n\nfunc (c *CSR) Dims() (int, int) {\n\treturn c.i, c.j\n}\n\nfunc (c *CSR) At(m, n int) float64 {\n\treturn c.at(m, n)\n}\n\nfunc (c *CSR) T() mat64.Matrix {\n\treturn NewCSC(c.j, c.i, c.indptr, c.ind, c.data)\n}\n\nfunc (c *CSR) ToDense() *mat64.Dense {\n\tmat := mat64.NewDense(c.i, c.j, nil)\n\n\tfor i := 0; i < len(c.indptr)-1; i++ {\n\t\tfor j := c.indptr[i]; j < c.indptr[i+1]; j++ {\n\t\t\tmat.Set(i, c.ind[j], c.data[j])\n\t\t}\n\t}\n\n\treturn mat\n}\n\nfunc (c *CSR) ToDOK() *DOK {\n\tdok := NewDOK(c.i, c.j)\n\tfor i := 0; i < len(c.indptr)-1; i++ {\n\t\tfor j := c.indptr[i]; j < c.indptr[i+1]; j++ {\n\t\t\tdok.Set(i, c.ind[j], c.data[j])\n\t\t}\n\t}\n\n\treturn dok\n}\n\nfunc (c *CSR) ToCOO() *COO {\n\trows := make([]int, c.NNZ())\n\n\tfor i := 0; i < len(c.indptr)-1; i++ {\n\t\tfor j := c.indptr[i]; j < c.indptr[i+1]; j++ {\n\t\t\trows[j] = i\n\t\t}\n\t}\n\n\tcoo := NewCOO(c.i, c.j, rows, c.ind, c.data)\n\n\treturn coo\n}\n\nfunc (c *CSR) ToCSR() *CSR {\n\treturn c\n}\n\nfunc (c *CSR) ToCSC() *CSC {\n\treturn c.ToCOO().ToCSC()\n}\n\nfunc (c *CSR) ToType(matType MatrixType) mat64.Matrix {\n\treturn matType.Convert(c)\n}\n\nfunc (c *CSR) Mul(a, b mat64.Matrix) {\n\tar, ac := a.Dims()\n\tbr, bc := b.Dims()\n\n\tif dia, ok := a.(*DIA); ok {\n\t\tif ac != br {\n\t\t\tpanic(matrix.ErrShape)\n\t\t}\n\t\tc.mulDIA(dia, b, false)\n\t\treturn\n\t}\n\tif dia, ok := b.(*DIA); ok {\n\t\tif bc != ar {\n\t\t\tpanic(matrix.ErrShape)\n\t\t}\n\t\tc.mulDIA(dia, a, true)\n\t\treturn\n\t}\n\n\tif ac != br {\n\t\tpanic(matrix.ErrShape)\n\t}\n\n\tc.indptr = make([]int, ar+1)\n\n\tc.i, c.j = ar, bc\n\tt := 0\n\n\tlhs, isCsr := a.(*CSR)\n\n\tif isCsr {\n\t\tfor i := 0; i < ar; i++ {\n\t\t\tc.indptr[i] = t\n\t\t\tfor j := 0; j < bc; j++ {\n\t\t\t\tvar v float64\n\t\t\t\t\/\/ TODO Consider converting all Sparsers to CSR\n\t\t\t\tfor k := lhs.indptr[i]; k < lhs.indptr[i+1]; k++ {\n\t\t\t\t\tv += lhs.data[k] * b.At(lhs.ind[k], j)\n\t\t\t\t}\n\t\t\t\tif v != 0 {\n\t\t\t\t\tt++\n\t\t\t\t\tc.ind = append(c.ind, j)\n\t\t\t\t\tc.data = append(c.data, v)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\trow := make([]float64, ac)\n\t\tfor i := 0; i < ar; i++ {\n\t\t\tc.indptr[i] = t\n\t\t\tfor ci := range row {\n\t\t\t\trow[ci] = a.At(i, ci)\n\t\t\t}\n\t\t\tfor j := 0; j < bc; j++ {\n\t\t\t\tvar v float64\n\t\t\t\tfor ci, e := range row {\n\t\t\t\t\tv += e * b.At(ci, j)\n\t\t\t\t}\n\t\t\t\tif v != 0 {\n\t\t\t\t\tt++\n\t\t\t\t\tc.ind = append(c.ind, j)\n\t\t\t\t\tc.data = append(c.data, v)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tc.indptr[c.i] = t\n}\n\nfunc (c *CSR) mulDIA(dia *DIA, other mat64.Matrix, trans bool) {\n\tvar csMat compressedSparse\n\tisCS := false\n\n\tif csr, ok := other.(*CSR); ok {\n\t\t\/\/ TODO consider converting all sparsers to CSR (for RHS operand)\n\t\tcsMat = csr.compressedSparse\n\t\tisCS = true\n\t\tc.ind = make([]int, len(csMat.ind))\n\t\tc.data = make([]float64, len(csMat.data))\n\t}\n\n\tc.i, c.j = other.Dims()\n\tc.indptr = make([]int, c.i+1)\n\tt := 0\n\traw := dia.Diagonal()\n\n\tfor i := 0; i < c.i; i++ {\n\t\tc.indptr[i] = t\n\t\tvar v float64\n\n\t\tif isCS {\n\t\t\tfor k := csMat.indptr[i]; k < csMat.indptr[i+1]; k++ {\n\t\t\t\tvar rawval float64\n\t\t\t\tif trans {\n\t\t\t\t\trawval = raw[csMat.ind[k]]\n\t\t\t\t} else {\n\t\t\t\t\trawval = raw[i]\n\t\t\t\t}\n\t\t\t\tv = csMat.data[k] * rawval\n\t\t\t\tif v != 0 {\n\t\t\t\t\tc.ind[t] = csMat.ind[k]\n\t\t\t\t\tc.data[t] = v\n\t\t\t\t\tt++\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfor k := 0; k < c.j; k++ {\n\t\t\t\tvar rawval float64\n\t\t\t\tif trans {\n\t\t\t\t\trawval = raw[k]\n\t\t\t\t} else {\n\t\t\t\t\trawval = raw[i]\n\t\t\t\t}\n\t\t\t\tv = other.At(i, k) * rawval\n\t\t\t\tif v != 0 {\n\t\t\t\t\tc.ind = append(c.ind, k)\n\t\t\t\t\tc.data = append(c.data, v)\n\t\t\t\t\tt++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tc.indptr[c.i] = t\n\n\treturn\n}\n\nfunc (c *CSR) Mul2(a, b mat64.Matrix) {\n\tar, ac := a.Dims()\n\tbr, bc := b.Dims()\n\n\tc.indptr = make([]int, ar+1)\n\n\tif rhs, ok := b.(*DIA); ok {\n\t\tc.i, c.j = ar, ac\n\t\tvar size int\n\t\tif lhs, ok := a.(*CSR); ok {\n\t\t\tsize = lhs.NNZ()\n\t\t} else {\n\t\t\tsize = ar * bc\n\t\t}\n\t\tc.ind = make([]int, size)\n\t\tc.data = make([]float64, size)\n\t\tt := 0\n\t\traw := rhs.Diagonal()\n\n\t\tfor i := 0; i < ar; i++ {\n\t\t\tc.indptr[i] = t\n\t\t\tvar v float64\n\t\t\tif lhs, ok := a.(*CSR); ok {\n\t\t\t\tfor k := lhs.indptr[i]; k < lhs.indptr[i+1]; k++ {\n\t\t\t\t\tv = lhs.data[k] * raw[i]\n\t\t\t\t\tif v != 0 {\n\t\t\t\t\t\tc.ind[t] = k\n\t\t\t\t\t\tc.data[t] = v\n\t\t\t\t\t\tt++\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tfor k := 0; k < ac; k++ {\n\t\t\t\t\tv = a.At(i, k) * raw[i]\n\t\t\t\t\tif v != 0 {\n\t\t\t\t\t\tc.ind[t] = k\n\t\t\t\t\t\tc.data[t] = v\n\t\t\t\t\t\tt++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tc.indptr[c.i] = t\n\t\tc.ind = c.ind[:t]\n\t\tc.data = c.data[:t]\n\t\treturn\n\t}\n\n\tif ac != br {\n\t\tpanic(matrix.ErrShape)\n\t}\n\n\tc.i, c.j = ar, bc\n\tc.ind = make([]int, ar*bc)\n\tc.data = make([]float64, ar*bc)\n\n\tt := 0\n\n\tfor i := 0; i < ar; i++ {\n\t\tc.indptr[i] = t\n\t\tfor j := 0; j < bc; j++ {\n\t\t\tvar v float64\n\t\t\tif lhs, ok := a.(*CSR); ok {\n\t\t\t\tfor k := lhs.indptr[i]; k < lhs.indptr[i+1]; k++ {\n\t\t\t\t\tv += lhs.data[k] * b.At(lhs.ind[k], j)\n\t\t\t\t}\n\t\t\t\tif v != 0 {\n\t\t\t\t\tc.ind[t] = j\n\t\t\t\t\tc.data[t] = v\n\t\t\t\t\tt++\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfor k := 0; k < ac; k++ {\n\t\t\t\t\tv += a.At(i, k) * b.At(k, j)\n\t\t\t\t}\n\t\t\t\tif v != 0 {\n\t\t\t\t\tc.ind[t] = j\n\t\t\t\t\tc.data[t] = v\n\t\t\t\t\tt++\n\t\t\t\t}\n\t\t\t\t\/\/c.set(i, j, v)\n\t\t\t}\n\t\t}\n\t}\n\tc.indptr[c.i] = t\n\tc.ind = c.ind[:t]\n\tc.data = c.data[:t]\n}\n\nfunc (c *CSR) RowNNZ(i int) int {\n\tif uint(i) < 0 || uint(i) >= uint(c.i) {\n\t\tpanic(matrix.ErrRowAccess)\n\t}\n\treturn c.indptr[i+1] - c.indptr[i]\n}\n\ntype CSC struct {\n\tcompressedSparse\n}\n\nfunc NewCSC(r int, c int, indptr []int, ind []int, data []float64) *CSC {\n\tif uint(r) < 0 {\n\t\tpanic(matrix.ErrRowAccess)\n\t}\n\tif uint(c) < 0 {\n\t\tpanic(matrix.ErrColAccess)\n\t}\n\n\treturn &CSC{\n\t\tcompressedSparse: compressedSparse{\n\t\t\ti: c, j: r,\n\t\t\tindptr: indptr,\n\t\t\tind: ind,\n\t\t\tdata: data,\n\t\t},\n\t}\n}\n\nfunc (c *CSC) Dims() (int, int) {\n\treturn c.j, c.i\n}\n\nfunc (c *CSC) At(m, n int) float64 {\n\treturn c.at(n, m)\n}\n\nfunc (c *CSC) T() mat64.Matrix {\n\treturn NewCSR(c.i, c.j, c.indptr, c.ind, c.data)\n}\n\nfunc (c *CSC) ToDense() *mat64.Dense {\n\tmat := mat64.NewDense(c.j, c.i, nil)\n\n\tfor i := 0; i < len(c.indptr)-1; i++ {\n\t\tfor j := c.indptr[i]; j < c.indptr[i+1]; j++ {\n\t\t\tmat.Set(c.ind[j], i, c.data[j])\n\t\t}\n\t}\n\n\treturn mat\n}\n\nfunc (c *CSC) ToDOK() *DOK {\n\tdok := NewDOK(c.j, c.i)\n\tfor i := 0; i < len(c.indptr)-1; i++ {\n\t\tfor j := c.indptr[i]; j < c.indptr[i+1]; j++ {\n\t\t\tdok.Set(c.ind[j], i, c.data[j])\n\t\t}\n\t}\n\n\treturn dok\n}\n\nfunc (c *CSC) ToCOO() *COO {\n\tcols := make([]int, c.NNZ())\n\n\tfor i := 0; i < len(c.indptr)-1; i++ {\n\t\tfor j := c.indptr[i]; j < c.indptr[i+1]; j++ {\n\t\t\tcols[j] = i\n\t\t}\n\t}\n\n\tcoo := NewCOO(c.j, c.i, c.ind, cols, c.data)\n\n\treturn coo\n}\n\nfunc (c *CSC) ToCSR() *CSR {\n\treturn c.ToCOO().ToCSR()\n}\n\nfunc (c *CSC) ToCSC() *CSC {\n\treturn c\n}\n\nfunc (c *CSC) ToType(matType MatrixType) mat64.Matrix {\n\treturn matType.Convert(c)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage html\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"json\"\n\t\"strings\"\n\t\"utf8\"\n)\n\n\/\/ nextJSCtx returns the context that determines whether a slash after the\n\/\/ given run of tokens tokens starts a regular expression instead of a division\n\/\/ operator: \/ or \/=.\n\/\/\n\/\/ This assumes that the token run does not include any string tokens, comment\n\/\/ tokens, regular expression literal tokens, or division operators.\n\/\/\n\/\/ This fails on some valid but nonsensical JavaScript programs like\n\/\/ \"x = ++\/foo\/i\" which is quite different than \"x++\/foo\/i\", but is not known to\n\/\/ fail on any known useful programs. It is based on the draft\n\/\/ JavaScript 2.0 lexical grammar and requires one token of lookbehind:\n\/\/ http:\/\/www.mozilla.org\/js\/language\/js20-2000-07\/rationale\/syntax.html\nfunc nextJSCtx(s []byte, preceding jsCtx) jsCtx {\n\ts = bytes.TrimRight(s, \"\\t\\n\\f\\r \\u2028\\u2029\")\n\tif len(s) == 0 {\n\t\treturn preceding\n\t}\n\n\t\/\/ All cases below are in the single-byte UTF-8 group.\n\tswitch c, n := s[len(s)-1], len(s); c {\n\tcase '+', '-':\n\t\t\/\/ ++ and -- are not regexp preceders, but + and - are whether\n\t\t\/\/ they are used as infix or prefix operators.\n\t\tstart := n - 1\n\t\t\/\/ Count the number of adjacent dashes or pluses.\n\t\tfor start > 0 && s[start-1] == c {\n\t\t\tstart--\n\t\t}\n\t\tif (n-start)&1 == 1 {\n\t\t\t\/\/ Reached for trailing minus signs since \"---\" is the\n\t\t\t\/\/ same as \"-- -\".\n\t\t\treturn jsCtxRegexp\n\t\t}\n\t\treturn jsCtxDivOp\n\tcase '.':\n\t\t\/\/ Handle \"42.\"\n\t\tif n != 1 && '0' <= s[n-2] && s[n-2] <= '9' {\n\t\t\treturn jsCtxDivOp\n\t\t}\n\t\treturn jsCtxRegexp\n\t\/\/ Suffixes for all punctuators from section 7.7 of the language spec\n\t\/\/ that only end binary operators not handled above.\n\tcase ',', '<', '>', '=', '*', '%', '&', '|', '^', '?':\n\t\treturn jsCtxRegexp\n\t\/\/ Suffixes for all punctuators from section 7.7 of the language spec\n\t\/\/ that are prefix operators not handled above.\n\tcase '!', '~':\n\t\treturn jsCtxRegexp\n\t\/\/ Matches all the punctuators from section 7.7 of the language spec\n\t\/\/ that are open brackets not handled above.\n\tcase '(', '[':\n\t\treturn jsCtxRegexp\n\t\/\/ Matches all the punctuators from section 7.7 of the language spec\n\t\/\/ that precede expression starts.\n\tcase ':', ';', '{':\n\t\treturn jsCtxRegexp\n\t\/\/ CAVEAT: the close punctuators ('}', ']', ')') precede div ops and\n\t\/\/ are handled in the default except for '}' which can precede a\n\t\/\/ division op as in\n\t\/\/ ({ valueOf: function () { return 42 } } \/ 2\n\t\/\/ which is valid, but, in practice, developers don't divide object\n\t\/\/ literals, so our heuristic works well for code like\n\t\/\/ function () { ... } \/foo\/.test(x) && sideEffect();\n\t\/\/ The ')' punctuator can precede a regular expression as in\n\t\/\/ if (b) \/foo\/.test(x) && ...\n\t\/\/ but this is much less likely than\n\t\/\/ (a + b) \/ c\n\tcase '}':\n\t\treturn jsCtxRegexp\n\tdefault:\n\t\t\/\/ Look for an IdentifierName and see if it is a keyword that\n\t\t\/\/ can precede a regular expression.\n\t\tj := n\n\t\tfor j > 0 && isJSIdentPart(int(s[j-1])) {\n\t\t\tj--\n\t\t}\n\t\tif regexpPrecederKeywords[string(s[j:])] {\n\t\t\treturn jsCtxRegexp\n\t\t}\n\t}\n\t\/\/ Otherwise is a punctuator not listed above, or\n\t\/\/ a string which precedes a div op, or an identifier\n\t\/\/ which precedes a div op.\n\treturn jsCtxDivOp\n}\n\n\/\/ regexPrecederKeywords is a set of reserved JS keywords that can precede a\n\/\/ regular expression in JS source.\nvar regexpPrecederKeywords = map[string]bool{\n\t\"break\": true,\n\t\"case\": true,\n\t\"continue\": true,\n\t\"delete\": true,\n\t\"do\": true,\n\t\"else\": true,\n\t\"finally\": true,\n\t\"in\": true,\n\t\"instanceof\": true,\n\t\"return\": true,\n\t\"throw\": true,\n\t\"try\": true,\n\t\"typeof\": true,\n\t\"void\": true,\n}\n\n\/\/ jsValEscaper escapes its inputs to a JS Expression (section 11.14) that has\n\/\/ nether side-effects nor free variables outside (NaN, Infinity).\nfunc jsValEscaper(args ...interface{}) string {\n\tvar a interface{}\n\tif len(args) == 1 {\n\t\ta = args[0]\n\t} else {\n\t\ta = fmt.Sprint(args...)\n\t}\n\t\/\/ TODO: detect cycles before calling Marshal which loops infinitely on\n\t\/\/ cyclic data. This may be an unnacceptable DoS risk.\n\n\t\/\/ TODO: make sure that json.Marshal escapes codepoints U+2028 & U+2029\n\t\/\/ so it falls within the subset of JSON which is valid JS and maybe\n\t\/\/ post-process to prevent it from containing\n\t\/\/ \"<!--\", \"-->\", \"<![CDATA[\", \"]]>\", or \"<\/script\"\n\t\/\/ in case custom marshallers produce output containing those.\n\n\t\/\/ TODO: Maybe abbreviate \\u00ab to \\xab to produce more compact output.\n\n\t\/\/ TODO: JSON allows arbitrary unicode codepoints, but EcmaScript\n\t\/\/ defines a SourceCharacter as either a UTF-16 or UCS-2 code-unit.\n\t\/\/ Determine whether supplemental codepoints in UTF-8 encoded JS inside\n\t\/\/ string literals are properly interpreted by major interpreters.\n\n\tb, err := json.Marshal(a)\n\tif err != nil {\n\t\t\/\/ Put a space before comment so that if it is flush against\n\t\t\/\/ a division operator it is not turned into a line comment:\n\t\t\/\/ x\/{{y}}\n\t\t\/\/ turning into\n\t\t\/\/ x\/\/* error marshalling y:\n\t\t\/\/ second line of error message *\/null\n\t\treturn fmt.Sprintf(\" \/* %s *\/null \", strings.Replace(err.String(), \"*\/\", \"* \/\", -1))\n\t}\n\tif len(b) != 0 {\n\t\tfirst, _ := utf8.DecodeRune(b)\n\t\tlast, _ := utf8.DecodeLastRune(b)\n\t\tif isJSIdentPart(first) || isJSIdentPart(last) {\n\t\t\treturn \" \" + string(b) + \" \"\n\t\t}\n\t}\n\treturn string(b)\n}\n\n\/\/ jsStrEscaper produces a string that can be included between quotes in\n\/\/ JavaScript source, in JavaScript embedded in an HTML5 <script> element,\n\/\/ or in an HTML5 event handler attribute such as onclick.\nfunc jsStrEscaper(args ...interface{}) string {\n\tok := false\n\tvar s string\n\tif len(args) == 1 {\n\t\ts, ok = args[0].(string)\n\t}\n\tif !ok {\n\t\ts = fmt.Sprint(args...)\n\t}\n\tvar b bytes.Buffer\n\twritten := 0\n\tfor i, r := range s {\n\t\tvar repl string\n\t\tswitch r {\n\t\tcase 0:\n\t\t\trepl = `\\0`\n\t\tcase '\\t':\n\t\t\trepl = `\\t`\n\t\tcase '\\n':\n\t\t\trepl = `\\n`\n\t\tcase '\\v':\n\t\t\t\/\/ \"\\v\" == \"v\" on IE 6.\n\t\t\trepl = `\\x0b`\n\t\tcase '\\f':\n\t\t\trepl = `\\f`\n\t\tcase '\\r':\n\t\t\trepl = `\\r`\n\t\t\/\/ Encode HTML specials as hex so the output can be embedded\n\t\t\/\/ in HTML attributes without further encoding.\n\t\tcase '\"':\n\t\t\trepl = `\\x22`\n\t\tcase '&':\n\t\t\trepl = `\\x26`\n\t\tcase '\\'':\n\t\t\trepl = `\\x27`\n\t\tcase '+':\n\t\t\trepl = `\\x2b`\n\t\tcase '\/':\n\t\t\trepl = `\\\/`\n\t\tcase '<':\n\t\t\trepl = `\\x3c`\n\t\tcase '>':\n\t\t\trepl = `\\x3e`\n\t\tcase '\\\\':\n\t\t\trepl = `\\\\`\n\t\tcase '\\u2028':\n\t\t\trepl = `\\u2028`\n\t\tcase '\\u2029':\n\t\t\trepl = `\\u2029`\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t\tb.WriteString(s[written:i])\n\t\tb.WriteString(repl)\n\t\twritten = i + utf8.RuneLen(r)\n\t}\n\tif written == 0 {\n\t\treturn s\n\t}\n\tb.WriteString(s[written:])\n\treturn b.String()\n}\n\n\/\/ jsRegexpEscaper behaves like jsStrEscaper but escapes regular expression\n\/\/ specials so the result is treated literally when included in a regular\n\/\/ expression literal. \/foo{{.X}}bar\/ matches the string \"foo\" followed by\n\/\/ the literal text of {{.X}} followed by the string \"bar\".\nfunc jsRegexpEscaper(args ...interface{}) string {\n\tok := false\n\tvar s string\n\tif len(args) == 1 {\n\t\ts, ok = args[0].(string)\n\t}\n\tif !ok {\n\t\ts = fmt.Sprint(args...)\n\t}\n\tvar b bytes.Buffer\n\twritten := 0\n\tfor i, r := range s {\n\t\tvar repl string\n\t\tswitch r {\n\t\tcase 0:\n\t\t\trepl = `\\0`\n\t\tcase '\\t':\n\t\t\trepl = `\\t`\n\t\tcase '\\n':\n\t\t\trepl = `\\n`\n\t\tcase '\\v':\n\t\t\t\/\/ \"\\v\" == \"v\" on IE 6.\n\t\t\trepl = `\\x0b`\n\t\tcase '\\f':\n\t\t\trepl = `\\f`\n\t\tcase '\\r':\n\t\t\trepl = `\\r`\n\t\t\/\/ Encode HTML specials as hex so the output can be embedded\n\t\t\/\/ in HTML attributes without further encoding.\n\t\tcase '\"':\n\t\t\trepl = `\\x22`\n\t\tcase '$':\n\t\t\trepl = `\\$`\n\t\tcase '&':\n\t\t\trepl = `\\x26`\n\t\tcase '\\'':\n\t\t\trepl = `\\x27`\n\t\tcase '(':\n\t\t\trepl = `\\(`\n\t\tcase ')':\n\t\t\trepl = `\\)`\n\t\tcase '*':\n\t\t\trepl = `\\*`\n\t\tcase '+':\n\t\t\trepl = `\\x2b`\n\t\tcase '-':\n\t\t\trepl = `\\-`\n\t\tcase '.':\n\t\t\trepl = `\\.`\n\t\tcase '\/':\n\t\t\trepl = `\\\/`\n\t\tcase '<':\n\t\t\trepl = `\\x3c`\n\t\tcase '>':\n\t\t\trepl = `\\x3e`\n\t\tcase '?':\n\t\t\trepl = `\\?`\n\t\tcase '[':\n\t\t\trepl = `\\[`\n\t\tcase '\\\\':\n\t\t\trepl = `\\\\`\n\t\tcase ']':\n\t\t\trepl = `\\]`\n\t\tcase '^':\n\t\t\trepl = `\\^`\n\t\tcase '{':\n\t\t\trepl = `\\{`\n\t\tcase '|':\n\t\t\trepl = `\\|`\n\t\tcase '}':\n\t\t\trepl = `\\}`\n\t\tcase '\\u2028':\n\t\t\trepl = `\\u2028`\n\t\tcase '\\u2029':\n\t\t\trepl = `\\u2029`\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t\tb.WriteString(s[written:i])\n\t\tb.WriteString(repl)\n\t\twritten = i + utf8.RuneLen(r)\n\t}\n\tif written == 0 {\n\t\treturn s\n\t}\n\tb.WriteString(s[written:])\n\treturn b.String()\n}\n\n\/\/ isJSIdentPart is true if the given rune is a JS identifier part.\n\/\/ It does not handle all the non-Latin letters, joiners, and combining marks,\n\/\/ but it does handle every codepoint that can occur in a numeric literal or\n\/\/ a keyword.\nfunc isJSIdentPart(rune int) bool {\n\tswitch {\n\tcase '$' == rune:\n\t\treturn true\n\tcase '0' <= rune && rune <= '9':\n\t\treturn true\n\tcase 'A' <= rune && rune <= 'Z':\n\t\treturn true\n\tcase '_' == rune:\n\t\treturn true\n\tcase 'a' <= rune && rune <= 'z':\n\t\treturn true\n\t}\n\treturn false\n}\n<commit_msg>exp\/template\/html: string replacement refactoring.<commit_after>\/\/ Copyright 2011 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage html\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"json\"\n\t\"strings\"\n\t\"utf8\"\n)\n\n\/\/ nextJSCtx returns the context that determines whether a slash after the\n\/\/ given run of tokens tokens starts a regular expression instead of a division\n\/\/ operator: \/ or \/=.\n\/\/\n\/\/ This assumes that the token run does not include any string tokens, comment\n\/\/ tokens, regular expression literal tokens, or division operators.\n\/\/\n\/\/ This fails on some valid but nonsensical JavaScript programs like\n\/\/ \"x = ++\/foo\/i\" which is quite different than \"x++\/foo\/i\", but is not known to\n\/\/ fail on any known useful programs. It is based on the draft\n\/\/ JavaScript 2.0 lexical grammar and requires one token of lookbehind:\n\/\/ http:\/\/www.mozilla.org\/js\/language\/js20-2000-07\/rationale\/syntax.html\nfunc nextJSCtx(s []byte, preceding jsCtx) jsCtx {\n\ts = bytes.TrimRight(s, \"\\t\\n\\f\\r \\u2028\\u2029\")\n\tif len(s) == 0 {\n\t\treturn preceding\n\t}\n\n\t\/\/ All cases below are in the single-byte UTF-8 group.\n\tswitch c, n := s[len(s)-1], len(s); c {\n\tcase '+', '-':\n\t\t\/\/ ++ and -- are not regexp preceders, but + and - are whether\n\t\t\/\/ they are used as infix or prefix operators.\n\t\tstart := n - 1\n\t\t\/\/ Count the number of adjacent dashes or pluses.\n\t\tfor start > 0 && s[start-1] == c {\n\t\t\tstart--\n\t\t}\n\t\tif (n-start)&1 == 1 {\n\t\t\t\/\/ Reached for trailing minus signs since \"---\" is the\n\t\t\t\/\/ same as \"-- -\".\n\t\t\treturn jsCtxRegexp\n\t\t}\n\t\treturn jsCtxDivOp\n\tcase '.':\n\t\t\/\/ Handle \"42.\"\n\t\tif n != 1 && '0' <= s[n-2] && s[n-2] <= '9' {\n\t\t\treturn jsCtxDivOp\n\t\t}\n\t\treturn jsCtxRegexp\n\t\/\/ Suffixes for all punctuators from section 7.7 of the language spec\n\t\/\/ that only end binary operators not handled above.\n\tcase ',', '<', '>', '=', '*', '%', '&', '|', '^', '?':\n\t\treturn jsCtxRegexp\n\t\/\/ Suffixes for all punctuators from section 7.7 of the language spec\n\t\/\/ that are prefix operators not handled above.\n\tcase '!', '~':\n\t\treturn jsCtxRegexp\n\t\/\/ Matches all the punctuators from section 7.7 of the language spec\n\t\/\/ that are open brackets not handled above.\n\tcase '(', '[':\n\t\treturn jsCtxRegexp\n\t\/\/ Matches all the punctuators from section 7.7 of the language spec\n\t\/\/ that precede expression starts.\n\tcase ':', ';', '{':\n\t\treturn jsCtxRegexp\n\t\/\/ CAVEAT: the close punctuators ('}', ']', ')') precede div ops and\n\t\/\/ are handled in the default except for '}' which can precede a\n\t\/\/ division op as in\n\t\/\/ ({ valueOf: function () { return 42 } } \/ 2\n\t\/\/ which is valid, but, in practice, developers don't divide object\n\t\/\/ literals, so our heuristic works well for code like\n\t\/\/ function () { ... } \/foo\/.test(x) && sideEffect();\n\t\/\/ The ')' punctuator can precede a regular expression as in\n\t\/\/ if (b) \/foo\/.test(x) && ...\n\t\/\/ but this is much less likely than\n\t\/\/ (a + b) \/ c\n\tcase '}':\n\t\treturn jsCtxRegexp\n\tdefault:\n\t\t\/\/ Look for an IdentifierName and see if it is a keyword that\n\t\t\/\/ can precede a regular expression.\n\t\tj := n\n\t\tfor j > 0 && isJSIdentPart(int(s[j-1])) {\n\t\t\tj--\n\t\t}\n\t\tif regexpPrecederKeywords[string(s[j:])] {\n\t\t\treturn jsCtxRegexp\n\t\t}\n\t}\n\t\/\/ Otherwise is a punctuator not listed above, or\n\t\/\/ a string which precedes a div op, or an identifier\n\t\/\/ which precedes a div op.\n\treturn jsCtxDivOp\n}\n\n\/\/ regexPrecederKeywords is a set of reserved JS keywords that can precede a\n\/\/ regular expression in JS source.\nvar regexpPrecederKeywords = map[string]bool{\n\t\"break\": true,\n\t\"case\": true,\n\t\"continue\": true,\n\t\"delete\": true,\n\t\"do\": true,\n\t\"else\": true,\n\t\"finally\": true,\n\t\"in\": true,\n\t\"instanceof\": true,\n\t\"return\": true,\n\t\"throw\": true,\n\t\"try\": true,\n\t\"typeof\": true,\n\t\"void\": true,\n}\n\n\/\/ jsValEscaper escapes its inputs to a JS Expression (section 11.14) that has\n\/\/ nether side-effects nor free variables outside (NaN, Infinity).\nfunc jsValEscaper(args ...interface{}) string {\n\tvar a interface{}\n\tif len(args) == 1 {\n\t\ta = args[0]\n\t} else {\n\t\ta = fmt.Sprint(args...)\n\t}\n\t\/\/ TODO: detect cycles before calling Marshal which loops infinitely on\n\t\/\/ cyclic data. This may be an unnacceptable DoS risk.\n\n\t\/\/ TODO: make sure that json.Marshal escapes codepoints U+2028 & U+2029\n\t\/\/ so it falls within the subset of JSON which is valid JS and maybe\n\t\/\/ post-process to prevent it from containing\n\t\/\/ \"<!--\", \"-->\", \"<![CDATA[\", \"]]>\", or \"<\/script\"\n\t\/\/ in case custom marshallers produce output containing those.\n\n\t\/\/ TODO: Maybe abbreviate \\u00ab to \\xab to produce more compact output.\n\n\t\/\/ TODO: JSON allows arbitrary unicode codepoints, but EcmaScript\n\t\/\/ defines a SourceCharacter as either a UTF-16 or UCS-2 code-unit.\n\t\/\/ Determine whether supplemental codepoints in UTF-8 encoded JS inside\n\t\/\/ string literals are properly interpreted by major interpreters.\n\n\tb, err := json.Marshal(a)\n\tif err != nil {\n\t\t\/\/ Put a space before comment so that if it is flush against\n\t\t\/\/ a division operator it is not turned into a line comment:\n\t\t\/\/ x\/{{y}}\n\t\t\/\/ turning into\n\t\t\/\/ x\/\/* error marshalling y:\n\t\t\/\/ second line of error message *\/null\n\t\treturn fmt.Sprintf(\" \/* %s *\/null \", strings.Replace(err.String(), \"*\/\", \"* \/\", -1))\n\t}\n\tif len(b) != 0 {\n\t\tfirst, _ := utf8.DecodeRune(b)\n\t\tlast, _ := utf8.DecodeLastRune(b)\n\t\tif isJSIdentPart(first) || isJSIdentPart(last) {\n\t\t\treturn \" \" + string(b) + \" \"\n\t\t}\n\t}\n\treturn string(b)\n}\n\n\/\/ jsStrEscaper produces a string that can be included between quotes in\n\/\/ JavaScript source, in JavaScript embedded in an HTML5 <script> element,\n\/\/ or in an HTML5 event handler attribute such as onclick.\nfunc jsStrEscaper(args ...interface{}) string {\n\treturn replace(stringify(args...), jsStrReplacementTable)\n}\n\n\/\/ jsRegexpEscaper behaves like jsStrEscaper but escapes regular expression\n\/\/ specials so the result is treated literally when included in a regular\n\/\/ expression literal. \/foo{{.X}}bar\/ matches the string \"foo\" followed by\n\/\/ the literal text of {{.X}} followed by the string \"bar\".\nfunc jsRegexpEscaper(args ...interface{}) string {\n\treturn replace(stringify(args...), jsRegexpReplacementTable)\n}\n\n\/\/ stringify is an optimized form of fmt.Sprint.\nfunc stringify(args ...interface{}) string {\n\tif len(args) == 1 {\n\t\tif s, ok := args[0].(string); ok {\n\t\t\treturn s\n\t\t}\n\t}\n\treturn fmt.Sprint(args...)\n}\n\n\/\/ replace replaces each rune r of s with replacementTable[r], provided that\n\/\/ r < len(replacementTable). If replacementTable[r] is the empty string then\n\/\/ no replacement is made.\n\/\/ It also replaces the runes '\\u2028' and '\\u2029' with the strings\n\/\/ `\\u2028` and `\\u2029`. Note the different quotes used.\nfunc replace(s string, replacementTable []string) string {\n\tvar b bytes.Buffer\n\twritten := 0\n\tfor i, r := range s {\n\t\tvar repl string\n\t\tswitch {\n\t\tcase r < len(replacementTable) && replacementTable[r] != \"\":\n\t\t\trepl = replacementTable[r]\n\t\tcase r == '\\u2028':\n\t\t\trepl = `\\u2028`\n\t\tcase r == '\\u2029':\n\t\t\trepl = `\\u2029`\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\t\tb.WriteString(s[written:i])\n\t\tb.WriteString(repl)\n\t\twritten = i + utf8.RuneLen(r)\n\t}\n\tif written == 0 {\n\t\treturn s\n\t}\n\tb.WriteString(s[written:])\n\treturn b.String()\n}\n\nvar jsStrReplacementTable = []string{\n\t0: `\\0`,\n\t'\\t': `\\t`,\n\t'\\n': `\\n`,\n\t'\\v': `\\x0b`, \/\/ \"\\v\" == \"v\" on IE 6.\n\t'\\f': `\\f`,\n\t'\\r': `\\r`,\n\t\/\/ Encode HTML specials as hex so the output can be embedded\n\t\/\/ in HTML attributes without further encoding.\n\t'\"': `\\x22`,\n\t'&': `\\x26`,\n\t'\\'': `\\x27`,\n\t'+': `\\x2b`,\n\t'\/': `\\\/`,\n\t'<': `\\x3c`,\n\t'>': `\\x3e`,\n\t'\\\\': `\\\\`,\n}\n\nvar jsRegexpReplacementTable = []string{\n\t0: `\\0`,\n\t'\\t': `\\t`,\n\t'\\n': `\\n`,\n\t'\\v': `\\x0b`, \/\/ \"\\v\" == \"v\" on IE 6.\n\t'\\f': `\\f`,\n\t'\\r': `\\r`,\n\t\/\/ Encode HTML specials as hex so the output can be embedded\n\t\/\/ in HTML attributes without further encoding.\n\t'\"': `\\x22`,\n\t'$': `\\$`,\n\t'&': `\\x26`,\n\t'\\'': `\\x27`,\n\t'(': `\\(`,\n\t')': `\\)`,\n\t'*': `\\*`,\n\t'+': `\\x2b`,\n\t'-': `\\-`,\n\t'.': `\\.`,\n\t'\/': `\\\/`,\n\t'<': `\\x3c`,\n\t'>': `\\x3e`,\n\t'?': `\\?`,\n\t'[': `\\[`,\n\t'\\\\': `\\\\`,\n\t']': `\\]`,\n\t'^': `\\^`,\n\t'{': `\\{`,\n\t'|': `\\|`,\n\t'}': `\\}`,\n}\n\n\/\/ isJSIdentPart is true if the given rune is a JS identifier part.\n\/\/ It does not handle all the non-Latin letters, joiners, and combining marks,\n\/\/ but it does handle every codepoint that can occur in a numeric literal or\n\/\/ a keyword.\nfunc isJSIdentPart(rune int) bool {\n\tswitch {\n\tcase '$' == rune:\n\t\treturn true\n\tcase '0' <= rune && rune <= '9':\n\t\treturn true\n\tcase 'A' <= rune && rune <= 'Z':\n\t\treturn true\n\tcase '_' == rune:\n\t\treturn true\n\tcase 'a' <= rune && rune <= 'z':\n\t\treturn true\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nfunc main() {\n\tfoo(43)\n}\n\nfunc foo(f int) {\n\tdefer func(o, f int) {\n\t\tprintln(\"defer\", o, f)\n\t}(one(), f+10)\n\tprintln(\"foo\")\n\tf++\n\tprintln(\"end\", f)\n}\n\nfunc one() int {\n\tprintln(\"one\")\n\treturn 1\n}\n<commit_msg>remove defer<commit_after><|endoftext|>"} {"text":"<commit_before><commit_msg>lint pfs-volume-driver<commit_after><|endoftext|>"} {"text":"<commit_before>package tests_test\n\nimport (\n\t\"fmt\"\n\n\texpect \"github.com\/google\/goexpect\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\tv1network \"k8s.io\/api\/networking\/v1\"\n\tv13 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\n\tv1 \"kubevirt.io\/client-go\/api\/v1\"\n\t\"kubevirt.io\/client-go\/kubecli\"\n\t\"kubevirt.io\/kubevirt\/tests\"\n\tcd \"kubevirt.io\/kubevirt\/tests\/containerdisk\"\n)\n\nfunc assertPingSucceed(ip string, vmi *v1.VirtualMachineInstance) {\n\texpecter, err := tests.LoggedInCirrosExpecter(vmi)\n\tExpect(err).ToNot(HaveOccurred())\n\tdefer expecter.Close()\n\n\terr = tests.CheckForTextExpecter(vmi, []expect.Batcher{\n\t\t&expect.BSnd{S: fmt.Sprintf(\"ping -w 3 %s \\n\", ip)},\n\t\t&expect.BExp{R: \"0% packet loss\"},\n\t}, 60)\n\tExpect(err).ToNot(HaveOccurred())\n}\n\nfunc assertPingFail(ip string, vmi *v1.VirtualMachineInstance) {\n\texpecter, err := tests.LoggedInCirrosExpecter(vmi)\n\tExpect(err).ToNot(HaveOccurred())\n\tdefer expecter.Close()\n\n\terr = tests.CheckForTextExpecter(vmi, []expect.Batcher{\n\t\t&expect.BSnd{S: fmt.Sprintf(\"ping -w 3 %s \\n\", ip)},\n\t\t&expect.BExp{R: \"100% packet loss\"},\n\t}, 60)\n\tExpect(err).ToNot(HaveOccurred())\n}\n\nvar _ = Describe(\"[rfe_id:150][crit:high][vendor:cnv-qe@redhat.com][level:component]Networkpolicy\", func() {\n\n\tvar virtClient kubecli.KubevirtClient\n\n\tvar vmia *v1.VirtualMachineInstance\n\tvar vmib *v1.VirtualMachineInstance\n\tvar vmic *v1.VirtualMachineInstance\n\n\ttests.BeforeAll(func() {\n\t\tvar err error\n\n\t\tvirtClient, err = kubecli.GetKubevirtClient()\n\t\ttests.PanicOnError(err)\n\n\t\ttests.SkipIfUseFlannel(virtClient)\n\t\ttests.BeforeTestCleanup()\n\t\t\/\/ Create three vmis, vmia and vmib are in same namespace, vmic is in different namespace\n\t\tvmia = tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskCirros), \"#!\/bin\/bash\\necho 'hello'\\n\")\n\t\tvmia.Labels = map[string]string{\"type\": \"test\"}\n\t\tvmia, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(vmia)\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\tvmib = tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskCirros), \"#!\/bin\/bash\\necho 'hello'\\n\")\n\t\t_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(vmib)\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\tvmic = tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskCirros), \"#!\/bin\/bash\\necho 'hello'\\n\")\n\t\tvmic.Namespace = tests.NamespaceTestAlternative\n\t\t_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestAlternative).Create(vmic)\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\ttests.WaitForSuccessfulVMIStart(vmia)\n\t\ttests.WaitForSuccessfulVMIStart(vmib)\n\t\ttests.WaitForSuccessfulVMIStart(vmic)\n\n\t\tvmia, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Get(vmia.Name, &v13.GetOptions{})\n\t\tExpect(err).ToNot(HaveOccurred())\n\t\tvmib, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Get(vmib.Name, &v13.GetOptions{})\n\t\tExpect(err).ToNot(HaveOccurred())\n\t\tvmic, err = virtClient.VirtualMachineInstance(tests.NamespaceTestAlternative).Get(vmic.Name, &v13.GetOptions{})\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t})\n\n\tContext(\"vms limited by Default-deny networkpolicy\", func() {\n\n\t\tBeforeEach(func() {\n\t\t\t\/\/ deny-by-default networkpolicy will deny all the traffice to the vms in the namespace\n\t\t\tBy(\"Create deny-by-default networkpolicy\")\n\t\t\tnetworkpolicy := &v1network.NetworkPolicy{\n\t\t\t\tObjectMeta: v13.ObjectMeta{\n\t\t\t\t\tName: \"deny-by-default\",\n\t\t\t\t},\n\t\t\t\tSpec: v1network.NetworkPolicySpec{\n\t\t\t\t\tPodSelector: v13.LabelSelector{},\n\t\t\t\t\tIngress: []v1network.NetworkPolicyIngressRule{},\n\t\t\t\t},\n\t\t\t}\n\t\t\t_, err := virtClient.NetworkingV1().NetworkPolicies(vmia.Namespace).Create(networkpolicy)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t})\n\n\t\tIt(\"[test_id:1511] should be failed to reach vmia from vmib\", func() {\n\t\t\tBy(\"Connect vmia from vmib\")\n\t\t\tip := vmia.Status.Interfaces[0].IP\n\t\t\tassertPingFail(ip, vmib)\n\t\t})\n\n\t\tIt(\"[test_id:1512] should be failed to reach vmib from vmia\", func() {\n\t\t\tBy(\"Connect vmib from vmia\")\n\t\t\tip := vmib.Status.Interfaces[0].IP\n\t\t\tassertPingFail(ip, vmia)\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tExpect(virtClient.NetworkingV1().NetworkPolicies(vmia.Namespace).Delete(\"deny-by-default\", &v13.DeleteOptions{})).To(Succeed())\n\t\t})\n\n\t})\n\n\tContext(\"vms limited by allow same namespace networkpolicy\", func() {\n\t\tBeforeEach(func() {\n\t\t\t\/\/ allow-same-namespave networkpolicy will only allow the traffice inside the namespace\n\t\t\tBy(\"Create allow-same-namespace networkpolicy\")\n\t\t\tnetworkpolicy := &v1network.NetworkPolicy{\n\t\t\t\tObjectMeta: v13.ObjectMeta{\n\t\t\t\t\tName: \"allow-same-namespace\",\n\t\t\t\t},\n\t\t\t\tSpec: v1network.NetworkPolicySpec{\n\t\t\t\t\tPodSelector: v13.LabelSelector{},\n\t\t\t\t\tIngress: []v1network.NetworkPolicyIngressRule{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tFrom: []v1network.NetworkPolicyPeer{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tPodSelector: &v13.LabelSelector{},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\t_, err := virtClient.NetworkingV1().NetworkPolicies(vmia.Namespace).Create(networkpolicy)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t})\n\n\t\tIt(\"[test_id:1513] should be successful to reach vmia from vmib\", func() {\n\t\t\tBy(\"Connect vmia from vmib in same namespace\")\n\t\t\tip := vmia.Status.Interfaces[0].IP\n\t\t\tassertPingSucceed(ip, vmib)\n\t\t})\n\n\t\tIt(\"[test_id:1514] should be failed to reach vmia from vmic\", func() {\n\t\t\tBy(\"Connect vmia from vmic in differnet namespace\")\n\t\t\tip := vmia.Status.Interfaces[0].IP\n\t\t\tassertPingFail(ip, vmic)\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tExpect(virtClient.NetworkingV1().NetworkPolicies(vmia.Namespace).Delete(\"allow-same-namespace\", &v13.DeleteOptions{})).To(Succeed())\n\t\t})\n\n\t})\n\n\tContext(\"vms limited by deny by label networkpolicy\", func() {\n\t\tBeforeEach(func() {\n\t\t\t\/\/ deny-by-label networkpolicy will deny the traffice for the vm which have the same label\n\t\t\tBy(\"Create deny-by-label networkpolicy\")\n\t\t\tnetworkpolicy := &v1network.NetworkPolicy{\n\t\t\t\tObjectMeta: v13.ObjectMeta{\n\t\t\t\t\tName: \"deny-by-label\",\n\t\t\t\t},\n\t\t\t\tSpec: v1network.NetworkPolicySpec{\n\t\t\t\t\tPodSelector: v13.LabelSelector{\n\t\t\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\t\t\"type\": \"test\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tIngress: []v1network.NetworkPolicyIngressRule{},\n\t\t\t\t},\n\t\t\t}\n\t\t\t_, err := virtClient.NetworkingV1().NetworkPolicies(vmia.Namespace).Create(networkpolicy)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t})\n\n\t\tIt(\"[test_id:1515] should be failed to reach vmia from vmic\", func() {\n\t\t\tBy(\"Connect vmia from vmic\")\n\t\t\tip := vmia.Status.Interfaces[0].IP\n\t\t\tassertPingFail(ip, vmic)\n\t\t})\n\n\t\tIt(\"[test_id:1516] should be failed to reach vmia from vmib\", func() {\n\t\t\tBy(\"Connect vmia from vmib\")\n\t\t\tip := vmia.Status.Interfaces[0].IP\n\t\t\tassertPingFail(ip, vmib)\n\t\t})\n\n\t\tIt(\"[test_id:1517] should be successful to reach vmib from vmic\", func() {\n\t\t\tBy(\"Connect vmib from vmic\")\n\t\t\tip := vmib.Status.Interfaces[0].IP\n\t\t\tassertPingSucceed(ip, vmic)\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tExpect(virtClient.NetworkingV1().NetworkPolicies(vmia.Namespace).Delete(\"deny-by-label\", &v13.DeleteOptions{})).To(Succeed())\n\t\t})\n\n\t})\n\n})\n<commit_msg>tests, Add skipNetworkPolicyRunningOnKindInfra for NetworkPolicy tests<commit_after>package tests_test\n\nimport (\n\t\"fmt\"\n\n\texpect \"github.com\/google\/goexpect\"\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\tv1network \"k8s.io\/api\/networking\/v1\"\n\tv13 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\n\tv1 \"kubevirt.io\/client-go\/api\/v1\"\n\t\"kubevirt.io\/client-go\/kubecli\"\n\t\"kubevirt.io\/kubevirt\/tests\"\n\tcd \"kubevirt.io\/kubevirt\/tests\/containerdisk\"\n)\n\nfunc assertPingSucceed(ip string, vmi *v1.VirtualMachineInstance) {\n\texpecter, err := tests.LoggedInCirrosExpecter(vmi)\n\tExpect(err).ToNot(HaveOccurred())\n\tdefer expecter.Close()\n\n\terr = tests.CheckForTextExpecter(vmi, []expect.Batcher{\n\t\t&expect.BSnd{S: fmt.Sprintf(\"ping -w 3 %s \\n\", ip)},\n\t\t&expect.BExp{R: \"0% packet loss\"},\n\t}, 60)\n\tExpect(err).ToNot(HaveOccurred())\n}\n\nfunc assertPingFail(ip string, vmi *v1.VirtualMachineInstance) {\n\texpecter, err := tests.LoggedInCirrosExpecter(vmi)\n\tExpect(err).ToNot(HaveOccurred())\n\tdefer expecter.Close()\n\n\terr = tests.CheckForTextExpecter(vmi, []expect.Batcher{\n\t\t&expect.BSnd{S: fmt.Sprintf(\"ping -w 3 %s \\n\", ip)},\n\t\t&expect.BExp{R: \"100% packet loss\"},\n\t}, 60)\n\tExpect(err).ToNot(HaveOccurred())\n}\n\nvar _ = Describe(\"[rfe_id:150][crit:high][vendor:cnv-qe@redhat.com][level:component]Networkpolicy\", func() {\n\n\tvar virtClient kubecli.KubevirtClient\n\n\tvar vmia *v1.VirtualMachineInstance\n\tvar vmib *v1.VirtualMachineInstance\n\tvar vmic *v1.VirtualMachineInstance\n\n\ttests.BeforeAll(func() {\n\t\tvar err error\n\n\t\tvirtClient, err = kubecli.GetKubevirtClient()\n\t\ttests.PanicOnError(err)\n\n\t\ttests.SkipIfUseFlannel(virtClient)\n\t\tskipNetworkPolicyRunningOnKindInfra()\n\t\ttests.BeforeTestCleanup()\n\t\t\/\/ Create three vmis, vmia and vmib are in same namespace, vmic is in different namespace\n\t\tvmia = tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskCirros), \"#!\/bin\/bash\\necho 'hello'\\n\")\n\t\tvmia.Labels = map[string]string{\"type\": \"test\"}\n\t\tvmia, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(vmia)\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\tvmib = tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskCirros), \"#!\/bin\/bash\\necho 'hello'\\n\")\n\t\t_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Create(vmib)\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\tvmic = tests.NewRandomVMIWithEphemeralDiskAndUserdata(cd.ContainerDiskFor(cd.ContainerDiskCirros), \"#!\/bin\/bash\\necho 'hello'\\n\")\n\t\tvmic.Namespace = tests.NamespaceTestAlternative\n\t\t_, err = virtClient.VirtualMachineInstance(tests.NamespaceTestAlternative).Create(vmic)\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t\ttests.WaitForSuccessfulVMIStart(vmia)\n\t\ttests.WaitForSuccessfulVMIStart(vmib)\n\t\ttests.WaitForSuccessfulVMIStart(vmic)\n\n\t\tvmia, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Get(vmia.Name, &v13.GetOptions{})\n\t\tExpect(err).ToNot(HaveOccurred())\n\t\tvmib, err = virtClient.VirtualMachineInstance(tests.NamespaceTestDefault).Get(vmib.Name, &v13.GetOptions{})\n\t\tExpect(err).ToNot(HaveOccurred())\n\t\tvmic, err = virtClient.VirtualMachineInstance(tests.NamespaceTestAlternative).Get(vmic.Name, &v13.GetOptions{})\n\t\tExpect(err).ToNot(HaveOccurred())\n\n\t})\n\n\tContext(\"vms limited by Default-deny networkpolicy\", func() {\n\n\t\tBeforeEach(func() {\n\t\t\t\/\/ deny-by-default networkpolicy will deny all the traffice to the vms in the namespace\n\t\t\tBy(\"Create deny-by-default networkpolicy\")\n\t\t\tnetworkpolicy := &v1network.NetworkPolicy{\n\t\t\t\tObjectMeta: v13.ObjectMeta{\n\t\t\t\t\tName: \"deny-by-default\",\n\t\t\t\t},\n\t\t\t\tSpec: v1network.NetworkPolicySpec{\n\t\t\t\t\tPodSelector: v13.LabelSelector{},\n\t\t\t\t\tIngress: []v1network.NetworkPolicyIngressRule{},\n\t\t\t\t},\n\t\t\t}\n\t\t\t_, err := virtClient.NetworkingV1().NetworkPolicies(vmia.Namespace).Create(networkpolicy)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t})\n\n\t\tIt(\"[test_id:1511] should be failed to reach vmia from vmib\", func() {\n\t\t\tBy(\"Connect vmia from vmib\")\n\t\t\tip := vmia.Status.Interfaces[0].IP\n\t\t\tassertPingFail(ip, vmib)\n\t\t})\n\n\t\tIt(\"[test_id:1512] should be failed to reach vmib from vmia\", func() {\n\t\t\tBy(\"Connect vmib from vmia\")\n\t\t\tip := vmib.Status.Interfaces[0].IP\n\t\t\tassertPingFail(ip, vmia)\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tExpect(virtClient.NetworkingV1().NetworkPolicies(vmia.Namespace).Delete(\"deny-by-default\", &v13.DeleteOptions{})).To(Succeed())\n\t\t})\n\n\t})\n\n\tContext(\"vms limited by allow same namespace networkpolicy\", func() {\n\t\tBeforeEach(func() {\n\t\t\t\/\/ allow-same-namespave networkpolicy will only allow the traffice inside the namespace\n\t\t\tBy(\"Create allow-same-namespace networkpolicy\")\n\t\t\tnetworkpolicy := &v1network.NetworkPolicy{\n\t\t\t\tObjectMeta: v13.ObjectMeta{\n\t\t\t\t\tName: \"allow-same-namespace\",\n\t\t\t\t},\n\t\t\t\tSpec: v1network.NetworkPolicySpec{\n\t\t\t\t\tPodSelector: v13.LabelSelector{},\n\t\t\t\t\tIngress: []v1network.NetworkPolicyIngressRule{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tFrom: []v1network.NetworkPolicyPeer{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tPodSelector: &v13.LabelSelector{},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\t_, err := virtClient.NetworkingV1().NetworkPolicies(vmia.Namespace).Create(networkpolicy)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t})\n\n\t\tIt(\"[test_id:1513] should be successful to reach vmia from vmib\", func() {\n\t\t\tBy(\"Connect vmia from vmib in same namespace\")\n\t\t\tip := vmia.Status.Interfaces[0].IP\n\t\t\tassertPingSucceed(ip, vmib)\n\t\t})\n\n\t\tIt(\"[test_id:1514] should be failed to reach vmia from vmic\", func() {\n\t\t\tBy(\"Connect vmia from vmic in differnet namespace\")\n\t\t\tip := vmia.Status.Interfaces[0].IP\n\t\t\tassertPingFail(ip, vmic)\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tExpect(virtClient.NetworkingV1().NetworkPolicies(vmia.Namespace).Delete(\"allow-same-namespace\", &v13.DeleteOptions{})).To(Succeed())\n\t\t})\n\n\t})\n\n\tContext(\"vms limited by deny by label networkpolicy\", func() {\n\t\tBeforeEach(func() {\n\t\t\t\/\/ deny-by-label networkpolicy will deny the traffice for the vm which have the same label\n\t\t\tBy(\"Create deny-by-label networkpolicy\")\n\t\t\tnetworkpolicy := &v1network.NetworkPolicy{\n\t\t\t\tObjectMeta: v13.ObjectMeta{\n\t\t\t\t\tName: \"deny-by-label\",\n\t\t\t\t},\n\t\t\t\tSpec: v1network.NetworkPolicySpec{\n\t\t\t\t\tPodSelector: v13.LabelSelector{\n\t\t\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\t\t\"type\": \"test\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tIngress: []v1network.NetworkPolicyIngressRule{},\n\t\t\t\t},\n\t\t\t}\n\t\t\t_, err := virtClient.NetworkingV1().NetworkPolicies(vmia.Namespace).Create(networkpolicy)\n\t\t\tExpect(err).ToNot(HaveOccurred())\n\t\t})\n\n\t\tIt(\"[test_id:1515] should be failed to reach vmia from vmic\", func() {\n\t\t\tBy(\"Connect vmia from vmic\")\n\t\t\tip := vmia.Status.Interfaces[0].IP\n\t\t\tassertPingFail(ip, vmic)\n\t\t})\n\n\t\tIt(\"[test_id:1516] should be failed to reach vmia from vmib\", func() {\n\t\t\tBy(\"Connect vmia from vmib\")\n\t\t\tip := vmia.Status.Interfaces[0].IP\n\t\t\tassertPingFail(ip, vmib)\n\t\t})\n\n\t\tIt(\"[test_id:1517] should be successful to reach vmib from vmic\", func() {\n\t\t\tBy(\"Connect vmib from vmic\")\n\t\t\tip := vmib.Status.Interfaces[0].IP\n\t\t\tassertPingSucceed(ip, vmic)\n\t\t})\n\n\t\tAfterEach(func() {\n\t\t\tExpect(virtClient.NetworkingV1().NetworkPolicies(vmia.Namespace).Delete(\"deny-by-label\", &v13.DeleteOptions{})).To(Succeed())\n\t\t})\n\n\t})\n\n})\n\nfunc skipNetworkPolicyRunningOnKindInfra() {\n\tif tests.IsRunningOnKindInfra() {\n\t\tSkip(\"Skip Network Policy tests till issue https:\/\/github.com\/kubevirt\/kubevirt\/issues\/4081 is fixed\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package logging\n\nimport (\n\t\"testing\"\n)\n\nfunc TestLog(t *testing.T) {\n\t\/\/t.Skipf(\"skipping...\")\n\n\t\/\/Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.\n\tvar log = ContextLogger\n\n\tlog.Debug(\"Useful debugging information.\")\n\tlog.Info(\"Something noteworthy happened!\")\n\tlog.Warn(\"You should probably take a look at this.\")\n\tlog.Error(\"Something failed but I'm not quitting.\")\n\t\/\/ Calls panic() after logging\n\t\/\/log.Panic(\"I'm bailing.\")\n\t\/\/ Calls os.Exit(1) after logging\n\t\/\/log.Fatal(\"Bye.\")\n}\n<commit_msg>fix logger test<commit_after>package logging\n\nimport (\n\t\"testing\"\n)\n\nfunc TestLog(t *testing.T) {\n\t\/\/t.Skipf(\"skipping...\")\n\n\t\/\/Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.\n\tvar log = Logger()\n\n\tlog.Debug(\"Useful debugging information.\")\n\tlog.Info(\"Something noteworthy happened!\")\n\tlog.Warn(\"You should probably take a look at this.\")\n\tlog.Error(\"Something failed but I'm not quitting.\")\n\t\/\/ Calls panic() after logging\n\t\/\/log.Panic(\"I'm bailing.\")\n\t\/\/ Calls os.Exit(1) after logging\n\t\/\/log.Fatal(\"Bye.\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build !js\n\npackage glfw\n\nimport (\n\t\"fmt\"\n\t\"unsafe\"\n\n\t\"golang.org\/x\/sys\/windows\"\n\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/glfw\"\n)\n\nconst (\n\tsmCyCaption = 4\n\tmonitorDefaultToNearest = 2\n)\n\ntype rect struct {\n\tleft int32\n\ttop int32\n\tright int32\n\tbottom int32\n}\n\ntype monitorInfo struct {\n\tcbSize uint32\n\trcMonitor rect\n\trcWork rect\n\tdwFlags uint32\n}\n\nvar (\n\t\/\/ user32 is defined at hideconsole_windows.go\n\tprocGetSystemMetrics = user32.NewProc(\"GetSystemMetrics\")\n\tprocGetActiveWindow = user32.NewProc(\"GetActiveWindow\")\n\tprocGetForegroundWindow = user32.NewProc(\"GetForegroundWindow\")\n\tprocMonitorFromWindow = user32.NewProc(\"MonitorFromWindow\")\n\tprocGetMonitorInfoW = user32.NewProc(\"GetMonitorInfoW\")\n)\n\nfunc getSystemMetrics(nIndex int) (int, error) {\n\tr, _, e := procGetSystemMetrics.Call(uintptr(nIndex))\n\tif e != nil && e.(windows.Errno) != 0 {\n\t\treturn 0, fmt.Errorf(\"ui: GetSystemMetrics failed: error code: %d\", e)\n\t}\n\treturn int(r), nil\n}\n\nfunc getActiveWindow() (uintptr, error) {\n\tr, _, e := procGetActiveWindow.Call()\n\tif e != nil && e.(windows.Errno) != 0 {\n\t\treturn 0, fmt.Errorf(\"ui: GetActiveWindow failed: error code: %d\", e)\n\t}\n\treturn r, nil\n}\n\nfunc getForegroundWindow() (uintptr, error) {\n\tr, _, e := procGetForegroundWindow.Call()\n\tif e != nil && e.(windows.Errno) != 0 {\n\t\treturn 0, fmt.Errorf(\"ui: GetForegroundWindow failed: error code: %d\", e)\n\t}\n\treturn r, nil\n}\n\nfunc monitorFromWindow(hwnd uintptr, dwFlags uint32) (uintptr, error) {\n\tr, _, e := procMonitorFromWindow.Call(hwnd, uintptr(dwFlags))\n\tif e != nil && e.(windows.Errno) != 0 {\n\t\treturn 0, fmt.Errorf(\"ui: MonitorFromWindow failed: error code: %d\", e)\n\t}\n\tif r == 0 {\n\t\treturn 0, fmt.Errorf(\"ui: MonitorFromWindow failed: returned value: %d\", r)\n\t}\n\treturn r, nil\n}\n\nfunc getMonitorInfoW(hMonitor uintptr, lpmi *monitorInfo) error {\n\tr, _, e := procGetMonitorInfoW.Call(hMonitor, uintptr(unsafe.Pointer(lpmi)))\n\tif e != nil && e.(windows.Errno) != 0 {\n\t\treturn fmt.Errorf(\"ui: GetMonitorInfoW failed: error code: %d\", e)\n\t}\n\tif r == 0 {\n\t\treturn fmt.Errorf(\"ui: GetMonitorInfoW failed: returned value: %d\", r)\n\t}\n\treturn nil\n}\n\nfunc (u *UserInterface) glfwScale() float64 {\n\treturn u.deviceScaleFactor()\n}\n\nfunc (u *UserInterface) adjustWindowPosition(x, y int) (int, int) {\n\tmx, my := u.currentMonitor().GetPos()\n\t\/\/ As the video width\/height might be wrong,\n\t\/\/ adjust x\/y at least to enable to handle the window (#328)\n\tif x < mx {\n\t\tx = mx\n\t}\n\tt, err := getSystemMetrics(smCyCaption)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif y < my+t {\n\t\ty = my + t\n\t}\n\treturn x, y\n}\n\nfunc (u *UserInterface) currentMonitorFromPosition() *glfw.Monitor {\n\t\/\/ TODO: Should we use u.window.GetWin32Window() here?\n\tw, err := getActiveWindow()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif w == 0 {\n\t\t\/\/ There is no window at launching, but there is a hidden initialized window.\n\t\t\/\/ Get the foreground window, that is common among multiple processes.\n\t\tw, err = getForegroundWindow()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif w == 0 {\n\t\t\t\/\/ GetForegroundWindow can return null according to the document. Use\n\t\t\t\/\/ the primary monitor instead.\n\t\t\treturn glfw.GetPrimaryMonitor()\n\t\t}\n\t}\n\n\t\/\/ Get the current monitor by the window handle instead of the window position. It is because the window\n\t\/\/ position is not relaiable in some cases e.g. when the window is put across multiple monitors.\n\n\tm, err := monitorFromWindow(w, monitorDefaultToNearest)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tmi := monitorInfo{}\n\tmi.cbSize = uint32(unsafe.Sizeof(mi))\n\tif err := getMonitorInfoW(m, &mi); err != nil {\n\t\tpanic(err)\n\t}\n\n\tx, y := int(mi.rcMonitor.left), int(mi.rcMonitor.top)\n\tfor _, m := range glfw.GetMonitors() {\n\t\tmx, my := m.GetPos()\n\t\tif mx == x && my == y {\n\t\t\treturn m\n\t\t}\n\t}\n\treturn glfw.GetPrimaryMonitor()\n}\n\nfunc (u *UserInterface) nativeWindow() unsafe.Pointer {\n\treturn u.window.GetWin32Window()\n}\n<commit_msg>uidriver\/glfw: Bug fix: Crash on Wine when getting a monitor<commit_after>\/\/ Copyright 2016 Hajime Hoshi\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ +build !js\n\npackage glfw\n\nimport (\n\t\"fmt\"\n\t\"unsafe\"\n\n\t\"golang.org\/x\/sys\/windows\"\n\n\t\"github.com\/hajimehoshi\/ebiten\/internal\/glfw\"\n)\n\nconst (\n\tsmCyCaption = 4\n\tmonitorDefaultToNearest = 2\n)\n\ntype rect struct {\n\tleft int32\n\ttop int32\n\tright int32\n\tbottom int32\n}\n\ntype monitorInfo struct {\n\tcbSize uint32\n\trcMonitor rect\n\trcWork rect\n\tdwFlags uint32\n}\n\nvar (\n\t\/\/ user32 is defined at hideconsole_windows.go\n\tprocGetSystemMetrics = user32.NewProc(\"GetSystemMetrics\")\n\tprocGetActiveWindow = user32.NewProc(\"GetActiveWindow\")\n\tprocGetForegroundWindow = user32.NewProc(\"GetForegroundWindow\")\n\tprocMonitorFromWindow = user32.NewProc(\"MonitorFromWindow\")\n\tprocGetMonitorInfoW = user32.NewProc(\"GetMonitorInfoW\")\n)\n\nfunc getSystemMetrics(nIndex int) (int, error) {\n\tr, _, e := procGetSystemMetrics.Call(uintptr(nIndex))\n\tif e != nil && e.(windows.Errno) != 0 {\n\t\treturn 0, fmt.Errorf(\"ui: GetSystemMetrics failed: error code: %d\", e)\n\t}\n\treturn int(r), nil\n}\n\nfunc getActiveWindow() (uintptr, error) {\n\tr, _, e := procGetActiveWindow.Call()\n\tif e != nil && e.(windows.Errno) != 0 {\n\t\treturn 0, fmt.Errorf(\"ui: GetActiveWindow failed: error code: %d\", e)\n\t}\n\treturn r, nil\n}\n\nfunc getForegroundWindow() (uintptr, error) {\n\tr, _, e := procGetForegroundWindow.Call()\n\tif e != nil && e.(windows.Errno) != 0 {\n\t\treturn 0, fmt.Errorf(\"ui: GetForegroundWindow failed: error code: %d\", e)\n\t}\n\treturn r, nil\n}\n\nfunc monitorFromWindow(hwnd uintptr, dwFlags uint32) (uintptr, error) {\n\tr, _, e := procMonitorFromWindow.Call(hwnd, uintptr(dwFlags))\n\tif e != nil && e.(windows.Errno) != 0 {\n\t\treturn 0, fmt.Errorf(\"ui: MonitorFromWindow failed: error code: %d\", e)\n\t}\n\tif r == 0 {\n\t\treturn 0, fmt.Errorf(\"ui: MonitorFromWindow failed: returned value: %d\", r)\n\t}\n\treturn r, nil\n}\n\nfunc getMonitorInfoW(hMonitor uintptr, lpmi *monitorInfo) error {\n\tr, _, e := procGetMonitorInfoW.Call(hMonitor, uintptr(unsafe.Pointer(lpmi)))\n\tif e != nil && e.(windows.Errno) != 0 {\n\t\treturn fmt.Errorf(\"ui: GetMonitorInfoW failed: error code: %d\", e)\n\t}\n\tif r == 0 {\n\t\treturn fmt.Errorf(\"ui: GetMonitorInfoW failed: returned value: %d\", r)\n\t}\n\treturn nil\n}\n\nfunc (u *UserInterface) glfwScale() float64 {\n\treturn u.deviceScaleFactor()\n}\n\nfunc (u *UserInterface) adjustWindowPosition(x, y int) (int, int) {\n\tmx, my := u.currentMonitor().GetPos()\n\t\/\/ As the video width\/height might be wrong,\n\t\/\/ adjust x\/y at least to enable to handle the window (#328)\n\tif x < mx {\n\t\tx = mx\n\t}\n\tt, err := getSystemMetrics(smCyCaption)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif y < my+t {\n\t\ty = my + t\n\t}\n\treturn x, y\n}\n\nfunc (u *UserInterface) currentMonitorFromPosition() *glfw.Monitor {\n\t\/\/ TODO: Should we use u.window.GetWin32Window() here?\n\tw, err := getActiveWindow()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif w == 0 {\n\t\t\/\/ There is no window at launching, but there is a hidden initialized window.\n\t\t\/\/ Get the foreground window, that is common among multiple processes.\n\t\tw, err = getForegroundWindow()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif w == 0 {\n\t\t\t\/\/ GetForegroundWindow can return null according to the document. Use\n\t\t\t\/\/ the primary monitor instead.\n\t\t\treturn glfw.GetPrimaryMonitor()\n\t\t}\n\t}\n\n\t\/\/ Get the current monitor by the window handle instead of the window position. It is because the window\n\t\/\/ position is not relaiable in some cases e.g. when the window is put across multiple monitors.\n\n\tm, err := monitorFromWindow(w, monitorDefaultToNearest)\n\tif err != nil {\n\t\t\/\/ monitorFromWindow can return error on Wine. Ignore this.\n\t\treturn glfw.GetPrimaryMonitor()\n\t}\n\n\tmi := monitorInfo{}\n\tmi.cbSize = uint32(unsafe.Sizeof(mi))\n\tif err := getMonitorInfoW(m, &mi); err != nil {\n\t\tpanic(err)\n\t}\n\n\tx, y := int(mi.rcMonitor.left), int(mi.rcMonitor.top)\n\tfor _, m := range glfw.GetMonitors() {\n\t\tmx, my := m.GetPos()\n\t\tif mx == x && my == y {\n\t\t\treturn m\n\t\t}\n\t}\n\treturn glfw.GetPrimaryMonitor()\n}\n\nfunc (u *UserInterface) nativeWindow() unsafe.Pointer {\n\treturn u.window.GetWin32Window()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file contains the code dealing with package directory trees.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"go\/doc\"\n\t\"go\/parser\"\n\t\"go\/token\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"unicode\"\n)\n\ntype Directory struct {\n\tDepth int\n\tPath string \/\/ includes Name\n\tName string\n\tText string \/\/ package documentation, if any\n\tDirs []*Directory \/\/ subdirectories\n}\n\nfunc isGoFile(fi os.FileInfo) bool {\n\tname := fi.Name()\n\treturn !fi.IsDir() &&\n\t\tlen(name) > 0 && name[0] != '.' && \/\/ ignore .files\n\t\tfilepath.Ext(name) == \".go\"\n}\n\nfunc isPkgFile(fi os.FileInfo) bool {\n\treturn isGoFile(fi) &&\n\t\t!strings.HasSuffix(fi.Name(), \"_test.go\") \/\/ ignore test files\n}\n\nfunc isPkgDir(fi os.FileInfo) bool {\n\tname := fi.Name()\n\treturn fi.IsDir() && len(name) > 0 &&\n\t\tname[0] != '_' && name[0] != '.' \/\/ ignore _files and .files\n}\n\nfunc firstSentence(s string) string {\n\ti := -1 \/\/ index+1 of first terminator (punctuation ending a sentence)\n\tj := -1 \/\/ index+1 of first terminator followed by white space\n\tprev := 'A'\n\tfor k, ch := range s {\n\t\tk1 := k + 1\n\t\tif ch == '.' || ch == '!' || ch == '?' {\n\t\t\tif i < 0 {\n\t\t\t\ti = k1 \/\/ first terminator\n\t\t\t}\n\t\t\tif k1 < len(s) && s[k1] <= ' ' {\n\t\t\t\tif j < 0 {\n\t\t\t\t\tj = k1 \/\/ first terminator followed by white space\n\t\t\t\t}\n\t\t\t\tif !unicode.IsUpper(prev) {\n\t\t\t\t\tj = k1\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tprev = ch\n\t}\n\n\tif j < 0 {\n\t\t\/\/ use the next best terminator\n\t\tj = i\n\t\tif j < 0 {\n\t\t\t\/\/ no terminator at all, use the entire string\n\t\t\tj = len(s)\n\t\t}\n\t}\n\n\treturn s[0:j]\n}\n\ntype treeBuilder struct {\n\tpathFilter func(string) bool\n\tmaxDepth int\n}\n\nfunc (b *treeBuilder) newDirTree(fset *token.FileSet, path, name string, depth int) *Directory {\n\tif b.pathFilter != nil && !b.pathFilter(path) {\n\t\treturn nil\n\t}\n\n\tif depth >= b.maxDepth {\n\t\t\/\/ return a dummy directory so that the parent directory\n\t\t\/\/ doesn't get discarded just because we reached the max\n\t\t\/\/ directory depth\n\t\treturn &Directory{depth, path, name, \"\", nil}\n\t}\n\n\tlist, err := fs.ReadDir(path)\n\tif err != nil {\n\t\t\/\/ newDirTree is called with a path that should be a package\n\t\t\/\/ directory; errors here should not happen, but if they do,\n\t\t\/\/ we want to know about them\n\t\tlog.Printf(\"ReadDir(%s): %s\", path, err)\n\t}\n\n\t\/\/ determine number of subdirectories and if there are package files\n\tndirs := 0\n\thasPkgFiles := false\n\tvar synopses [4]string \/\/ prioritized package documentation (0 == highest priority)\n\tfor _, d := range list {\n\t\tswitch {\n\t\tcase isPkgDir(d):\n\t\t\tndirs++\n\t\tcase isPkgFile(d):\n\t\t\t\/\/ looks like a package file, but may just be a file ending in \".go\";\n\t\t\t\/\/ don't just count it yet (otherwise we may end up with hasPkgFiles even\n\t\t\t\/\/ though the directory doesn't contain any real package files - was bug)\n\t\t\tif synopses[0] == \"\" {\n\t\t\t\t\/\/ no \"optimal\" package synopsis yet; continue to collect synopses\n\t\t\t\tfile, err := parseFile(fset, filepath.Join(path, d.Name()),\n\t\t\t\t\tparser.ParseComments|parser.PackageClauseOnly)\n\t\t\t\tif err == nil {\n\t\t\t\t\thasPkgFiles = true\n\t\t\t\t\tif file.Doc != nil {\n\t\t\t\t\t\t\/\/ prioritize documentation\n\t\t\t\t\t\ti := -1\n\t\t\t\t\t\tswitch file.Name.Name {\n\t\t\t\t\t\tcase name:\n\t\t\t\t\t\t\ti = 0 \/\/ normal case: directory name matches package name\n\t\t\t\t\t\tcase fakePkgName:\n\t\t\t\t\t\t\ti = 1 \/\/ synopses for commands\n\t\t\t\t\t\tcase \"main\":\n\t\t\t\t\t\t\ti = 2 \/\/ directory contains a main package\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\ti = 3 \/\/ none of the above\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif 0 <= i && i < len(synopses) && synopses[i] == \"\" {\n\t\t\t\t\t\t\tsynopses[i] = doc.Synopsis(file.Doc.Text())\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ create subdirectory tree\n\tvar dirs []*Directory\n\tif ndirs > 0 {\n\t\tdirs = make([]*Directory, ndirs)\n\t\ti := 0\n\t\tfor _, d := range list {\n\t\t\tif isPkgDir(d) {\n\t\t\t\tname := d.Name()\n\t\t\t\tdd := b.newDirTree(fset, filepath.Join(path, name), name, depth+1)\n\t\t\t\tif dd != nil {\n\t\t\t\t\tdirs[i] = dd\n\t\t\t\t\ti++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tdirs = dirs[0:i]\n\t}\n\n\t\/\/ if there are no package files and no subdirectories\n\t\/\/ containing package files, ignore the directory\n\tif !hasPkgFiles && len(dirs) == 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ select the highest-priority synopsis for the directory entry, if any\n\tsynopsis := \"\"\n\tfor _, synopsis = range synopses {\n\t\tif synopsis != \"\" {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn &Directory{depth, path, name, synopsis, dirs}\n}\n\n\/\/ newDirectory creates a new package directory tree with at most maxDepth\n\/\/ levels, anchored at root. The result tree is pruned such that it only\n\/\/ contains directories that contain package files or that contain\n\/\/ subdirectories containing package files (transitively). If a non-nil\n\/\/ pathFilter is provided, directory paths additionally must be accepted\n\/\/ by the filter (i.e., pathFilter(path) must be true). If a value >= 0 is\n\/\/ provided for maxDepth, nodes at larger depths are pruned as well; they\n\/\/ are assumed to contain package files even if their contents are not known\n\/\/ (i.e., in this case the tree may contain directories w\/o any package files).\n\/\/\nfunc newDirectory(root string, pathFilter func(string) bool, maxDepth int) *Directory {\n\t\/\/ The root could be a symbolic link so use Stat not Lstat.\n\td, err := fs.Stat(root)\n\t\/\/ If we fail here, report detailed error messages; otherwise\n\t\/\/ is is hard to see why a directory tree was not built.\n\tswitch {\n\tcase err != nil:\n\t\tlog.Printf(\"newDirectory(%s): %s\", root, err)\n\t\treturn nil\n\tcase !isPkgDir(d):\n\t\tlog.Printf(\"newDirectory(%s): not a package directory\", root)\n\t\treturn nil\n\t}\n\tif maxDepth < 0 {\n\t\tmaxDepth = 1e6 \/\/ \"infinity\"\n\t}\n\tb := treeBuilder{pathFilter, maxDepth}\n\t\/\/ the file set provided is only for local parsing, no position\n\t\/\/ information escapes and thus we don't need to save the set\n\treturn b.newDirTree(token.NewFileSet(), root, d.Name(), 0)\n}\n\nfunc (dir *Directory) writeLeafs(buf *bytes.Buffer) {\n\tif dir != nil {\n\t\tif len(dir.Dirs) == 0 {\n\t\t\tbuf.WriteString(dir.Path)\n\t\t\tbuf.WriteByte('\\n')\n\t\t\treturn\n\t\t}\n\n\t\tfor _, d := range dir.Dirs {\n\t\t\td.writeLeafs(buf)\n\t\t}\n\t}\n}\n\nfunc (dir *Directory) walk(c chan<- *Directory, skipRoot bool) {\n\tif dir != nil {\n\t\tif !skipRoot {\n\t\t\tc <- dir\n\t\t}\n\t\tfor _, d := range dir.Dirs {\n\t\t\td.walk(c, false)\n\t\t}\n\t}\n}\n\nfunc (dir *Directory) iter(skipRoot bool) <-chan *Directory {\n\tc := make(chan *Directory)\n\tgo func() {\n\t\tdir.walk(c, skipRoot)\n\t\tclose(c)\n\t}()\n\treturn c\n}\n\nfunc (dir *Directory) lookupLocal(name string) *Directory {\n\tfor _, d := range dir.Dirs {\n\t\tif d.Name == name {\n\t\t\treturn d\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ lookup looks for the *Directory for a given path, relative to dir.\nfunc (dir *Directory) lookup(path string) *Directory {\n\td := strings.Split(dir.Path, string(filepath.Separator))\n\tp := strings.Split(path, string(filepath.Separator))\n\ti := 0\n\tfor i < len(d) {\n\t\tif i >= len(p) || d[i] != p[i] {\n\t\t\treturn nil\n\t\t}\n\t\ti++\n\t}\n\tfor dir != nil && i < len(p) {\n\t\tdir = dir.lookupLocal(p[i])\n\t\ti++\n\t}\n\treturn dir\n}\n\n\/\/ DirEntry describes a directory entry. The Depth and Height values\n\/\/ are useful for presenting an entry in an indented fashion.\n\/\/\ntype DirEntry struct {\n\tDepth int \/\/ >= 0\n\tHeight int \/\/ = DirList.MaxHeight - Depth, > 0\n\tPath string \/\/ includes Name, relative to DirList root\n\tName string\n\tSynopsis string\n}\n\ntype DirList struct {\n\tMaxHeight int \/\/ directory tree height, > 0\n\tList []DirEntry\n}\n\n\/\/ listing creates a (linear) directory listing from a directory tree.\n\/\/ If skipRoot is set, the root directory itself is excluded from the list.\n\/\/\nfunc (root *Directory) listing(skipRoot bool) *DirList {\n\tif root == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ determine number of entries n and maximum height\n\tn := 0\n\tminDepth := 1 << 30 \/\/ infinity\n\tmaxDepth := 0\n\tfor d := range root.iter(skipRoot) {\n\t\tn++\n\t\tif minDepth > d.Depth {\n\t\t\tminDepth = d.Depth\n\t\t}\n\t\tif maxDepth < d.Depth {\n\t\t\tmaxDepth = d.Depth\n\t\t}\n\t}\n\tmaxHeight := maxDepth - minDepth + 1\n\n\tif n == 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ create list\n\tlist := make([]DirEntry, n)\n\ti := 0\n\tfor d := range root.iter(skipRoot) {\n\t\tp := &list[i]\n\t\tp.Depth = d.Depth - minDepth\n\t\tp.Height = maxHeight - p.Depth\n\t\t\/\/ the path is relative to root.Path - remove the root.Path\n\t\t\/\/ prefix (the prefix should always be present but avoid\n\t\t\/\/ crashes and check)\n\t\tpath := d.Path\n\t\tif strings.HasPrefix(d.Path, root.Path) {\n\t\t\tpath = d.Path[len(root.Path):]\n\t\t}\n\t\t\/\/ remove trailing separator if any - path must be relative\n\t\tif len(path) > 0 && path[0] == filepath.Separator {\n\t\t\tpath = path[1:]\n\t\t}\n\t\tp.Path = path\n\t\tp.Name = d.Name\n\t\tp.Synopsis = d.Text\n\t\ti++\n\t}\n\n\treturn &DirList{maxHeight, list}\n}\n<commit_msg>godoc: remove dead code<commit_after>\/\/ Copyright 2010 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file contains the code dealing with package directory trees.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"go\/doc\"\n\t\"go\/parser\"\n\t\"go\/token\"\n\t\"log\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"strings\"\n)\n\ntype Directory struct {\n\tDepth int\n\tPath string \/\/ includes Name\n\tName string\n\tText string \/\/ package documentation, if any\n\tDirs []*Directory \/\/ subdirectories\n}\n\nfunc isGoFile(fi os.FileInfo) bool {\n\tname := fi.Name()\n\treturn !fi.IsDir() &&\n\t\tlen(name) > 0 && name[0] != '.' && \/\/ ignore .files\n\t\tfilepath.Ext(name) == \".go\"\n}\n\nfunc isPkgFile(fi os.FileInfo) bool {\n\treturn isGoFile(fi) &&\n\t\t!strings.HasSuffix(fi.Name(), \"_test.go\") \/\/ ignore test files\n}\n\nfunc isPkgDir(fi os.FileInfo) bool {\n\tname := fi.Name()\n\treturn fi.IsDir() && len(name) > 0 &&\n\t\tname[0] != '_' && name[0] != '.' \/\/ ignore _files and .files\n}\n\ntype treeBuilder struct {\n\tpathFilter func(string) bool\n\tmaxDepth int\n}\n\nfunc (b *treeBuilder) newDirTree(fset *token.FileSet, path, name string, depth int) *Directory {\n\tif b.pathFilter != nil && !b.pathFilter(path) {\n\t\treturn nil\n\t}\n\n\tif depth >= b.maxDepth {\n\t\t\/\/ return a dummy directory so that the parent directory\n\t\t\/\/ doesn't get discarded just because we reached the max\n\t\t\/\/ directory depth\n\t\treturn &Directory{depth, path, name, \"\", nil}\n\t}\n\n\tlist, err := fs.ReadDir(path)\n\tif err != nil {\n\t\t\/\/ newDirTree is called with a path that should be a package\n\t\t\/\/ directory; errors here should not happen, but if they do,\n\t\t\/\/ we want to know about them\n\t\tlog.Printf(\"ReadDir(%s): %s\", path, err)\n\t}\n\n\t\/\/ determine number of subdirectories and if there are package files\n\tndirs := 0\n\thasPkgFiles := false\n\tvar synopses [4]string \/\/ prioritized package documentation (0 == highest priority)\n\tfor _, d := range list {\n\t\tswitch {\n\t\tcase isPkgDir(d):\n\t\t\tndirs++\n\t\tcase isPkgFile(d):\n\t\t\t\/\/ looks like a package file, but may just be a file ending in \".go\";\n\t\t\t\/\/ don't just count it yet (otherwise we may end up with hasPkgFiles even\n\t\t\t\/\/ though the directory doesn't contain any real package files - was bug)\n\t\t\tif synopses[0] == \"\" {\n\t\t\t\t\/\/ no \"optimal\" package synopsis yet; continue to collect synopses\n\t\t\t\tfile, err := parseFile(fset, filepath.Join(path, d.Name()),\n\t\t\t\t\tparser.ParseComments|parser.PackageClauseOnly)\n\t\t\t\tif err == nil {\n\t\t\t\t\thasPkgFiles = true\n\t\t\t\t\tif file.Doc != nil {\n\t\t\t\t\t\t\/\/ prioritize documentation\n\t\t\t\t\t\ti := -1\n\t\t\t\t\t\tswitch file.Name.Name {\n\t\t\t\t\t\tcase name:\n\t\t\t\t\t\t\ti = 0 \/\/ normal case: directory name matches package name\n\t\t\t\t\t\tcase fakePkgName:\n\t\t\t\t\t\t\ti = 1 \/\/ synopses for commands\n\t\t\t\t\t\tcase \"main\":\n\t\t\t\t\t\t\ti = 2 \/\/ directory contains a main package\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\ti = 3 \/\/ none of the above\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif 0 <= i && i < len(synopses) && synopses[i] == \"\" {\n\t\t\t\t\t\t\tsynopses[i] = doc.Synopsis(file.Doc.Text())\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t\/\/ create subdirectory tree\n\tvar dirs []*Directory\n\tif ndirs > 0 {\n\t\tdirs = make([]*Directory, ndirs)\n\t\ti := 0\n\t\tfor _, d := range list {\n\t\t\tif isPkgDir(d) {\n\t\t\t\tname := d.Name()\n\t\t\t\tdd := b.newDirTree(fset, filepath.Join(path, name), name, depth+1)\n\t\t\t\tif dd != nil {\n\t\t\t\t\tdirs[i] = dd\n\t\t\t\t\ti++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tdirs = dirs[0:i]\n\t}\n\n\t\/\/ if there are no package files and no subdirectories\n\t\/\/ containing package files, ignore the directory\n\tif !hasPkgFiles && len(dirs) == 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ select the highest-priority synopsis for the directory entry, if any\n\tsynopsis := \"\"\n\tfor _, synopsis = range synopses {\n\t\tif synopsis != \"\" {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn &Directory{depth, path, name, synopsis, dirs}\n}\n\n\/\/ newDirectory creates a new package directory tree with at most maxDepth\n\/\/ levels, anchored at root. The result tree is pruned such that it only\n\/\/ contains directories that contain package files or that contain\n\/\/ subdirectories containing package files (transitively). If a non-nil\n\/\/ pathFilter is provided, directory paths additionally must be accepted\n\/\/ by the filter (i.e., pathFilter(path) must be true). If a value >= 0 is\n\/\/ provided for maxDepth, nodes at larger depths are pruned as well; they\n\/\/ are assumed to contain package files even if their contents are not known\n\/\/ (i.e., in this case the tree may contain directories w\/o any package files).\n\/\/\nfunc newDirectory(root string, pathFilter func(string) bool, maxDepth int) *Directory {\n\t\/\/ The root could be a symbolic link so use Stat not Lstat.\n\td, err := fs.Stat(root)\n\t\/\/ If we fail here, report detailed error messages; otherwise\n\t\/\/ is is hard to see why a directory tree was not built.\n\tswitch {\n\tcase err != nil:\n\t\tlog.Printf(\"newDirectory(%s): %s\", root, err)\n\t\treturn nil\n\tcase !isPkgDir(d):\n\t\tlog.Printf(\"newDirectory(%s): not a package directory\", root)\n\t\treturn nil\n\t}\n\tif maxDepth < 0 {\n\t\tmaxDepth = 1e6 \/\/ \"infinity\"\n\t}\n\tb := treeBuilder{pathFilter, maxDepth}\n\t\/\/ the file set provided is only for local parsing, no position\n\t\/\/ information escapes and thus we don't need to save the set\n\treturn b.newDirTree(token.NewFileSet(), root, d.Name(), 0)\n}\n\nfunc (dir *Directory) writeLeafs(buf *bytes.Buffer) {\n\tif dir != nil {\n\t\tif len(dir.Dirs) == 0 {\n\t\t\tbuf.WriteString(dir.Path)\n\t\t\tbuf.WriteByte('\\n')\n\t\t\treturn\n\t\t}\n\n\t\tfor _, d := range dir.Dirs {\n\t\t\td.writeLeafs(buf)\n\t\t}\n\t}\n}\n\nfunc (dir *Directory) walk(c chan<- *Directory, skipRoot bool) {\n\tif dir != nil {\n\t\tif !skipRoot {\n\t\t\tc <- dir\n\t\t}\n\t\tfor _, d := range dir.Dirs {\n\t\t\td.walk(c, false)\n\t\t}\n\t}\n}\n\nfunc (dir *Directory) iter(skipRoot bool) <-chan *Directory {\n\tc := make(chan *Directory)\n\tgo func() {\n\t\tdir.walk(c, skipRoot)\n\t\tclose(c)\n\t}()\n\treturn c\n}\n\nfunc (dir *Directory) lookupLocal(name string) *Directory {\n\tfor _, d := range dir.Dirs {\n\t\tif d.Name == name {\n\t\t\treturn d\n\t\t}\n\t}\n\treturn nil\n}\n\n\/\/ lookup looks for the *Directory for a given path, relative to dir.\nfunc (dir *Directory) lookup(path string) *Directory {\n\td := strings.Split(dir.Path, string(filepath.Separator))\n\tp := strings.Split(path, string(filepath.Separator))\n\ti := 0\n\tfor i < len(d) {\n\t\tif i >= len(p) || d[i] != p[i] {\n\t\t\treturn nil\n\t\t}\n\t\ti++\n\t}\n\tfor dir != nil && i < len(p) {\n\t\tdir = dir.lookupLocal(p[i])\n\t\ti++\n\t}\n\treturn dir\n}\n\n\/\/ DirEntry describes a directory entry. The Depth and Height values\n\/\/ are useful for presenting an entry in an indented fashion.\n\/\/\ntype DirEntry struct {\n\tDepth int \/\/ >= 0\n\tHeight int \/\/ = DirList.MaxHeight - Depth, > 0\n\tPath string \/\/ includes Name, relative to DirList root\n\tName string\n\tSynopsis string\n}\n\ntype DirList struct {\n\tMaxHeight int \/\/ directory tree height, > 0\n\tList []DirEntry\n}\n\n\/\/ listing creates a (linear) directory listing from a directory tree.\n\/\/ If skipRoot is set, the root directory itself is excluded from the list.\n\/\/\nfunc (root *Directory) listing(skipRoot bool) *DirList {\n\tif root == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ determine number of entries n and maximum height\n\tn := 0\n\tminDepth := 1 << 30 \/\/ infinity\n\tmaxDepth := 0\n\tfor d := range root.iter(skipRoot) {\n\t\tn++\n\t\tif minDepth > d.Depth {\n\t\t\tminDepth = d.Depth\n\t\t}\n\t\tif maxDepth < d.Depth {\n\t\t\tmaxDepth = d.Depth\n\t\t}\n\t}\n\tmaxHeight := maxDepth - minDepth + 1\n\n\tif n == 0 {\n\t\treturn nil\n\t}\n\n\t\/\/ create list\n\tlist := make([]DirEntry, n)\n\ti := 0\n\tfor d := range root.iter(skipRoot) {\n\t\tp := &list[i]\n\t\tp.Depth = d.Depth - minDepth\n\t\tp.Height = maxHeight - p.Depth\n\t\t\/\/ the path is relative to root.Path - remove the root.Path\n\t\t\/\/ prefix (the prefix should always be present but avoid\n\t\t\/\/ crashes and check)\n\t\tpath := d.Path\n\t\tif strings.HasPrefix(d.Path, root.Path) {\n\t\t\tpath = d.Path[len(root.Path):]\n\t\t}\n\t\t\/\/ remove trailing separator if any - path must be relative\n\t\tif len(path) > 0 && path[0] == filepath.Separator {\n\t\t\tpath = path[1:]\n\t\t}\n\t\tp.Path = path\n\t\tp.Name = d.Name\n\t\tp.Synopsis = d.Text\n\t\ti++\n\t}\n\n\treturn &DirList{maxHeight, list}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage tikv\n\nimport (\n\t\"bytes\"\n\t\"sync\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/ngaut\/log\"\n\tpb \"github.com\/pingcap\/kvproto\/pkg\/kvrpcpb\"\n\t\"github.com\/pingcap\/tidb\/kv\"\n\t\"github.com\/pingcap\/tidb\/terror\"\n)\n\ntype txnCommitter struct {\n\tstore *tikvStore\n\ttxn *tikvTxn\n\tstartTS uint64\n\tkeys [][]byte\n\tmutations map[string]*pb.Mutation\n\tcommitTS uint64\n\tmu sync.RWMutex\n\twrittenKeys [][]byte\n\tcommitted bool\n}\n\nfunc newTxnCommitter(txn *tikvTxn) (*txnCommitter, error) {\n\tvar keys [][]byte\n\tmutations := make(map[string]*pb.Mutation)\n\terr := txn.us.WalkBuffer(func(k kv.Key, v []byte) error {\n\t\tif len(v) > 0 {\n\t\t\tmutations[string(k)] = &pb.Mutation{\n\t\t\t\tOp: pb.Op_Put.Enum(),\n\t\t\t\tKey: k,\n\t\t\t\tValue: v,\n\t\t\t}\n\t\t} else {\n\t\t\tmutations[string(k)] = &pb.Mutation{\n\t\t\t\tOp: pb.Op_Del.Enum(),\n\t\t\t\tKey: k,\n\t\t\t}\n\t\t}\n\t\tkeys = append(keys, k)\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\t\/\/ Transactions without Put\/Del, only Locks are readonly.\n\t\/\/ We can skip commit directly.\n\tif len(keys) == 0 {\n\t\treturn nil, nil\n\t}\n\tfor _, lockKey := range txn.lockKeys {\n\t\tif _, ok := mutations[string(lockKey)]; !ok {\n\t\t\tmutations[string(lockKey)] = &pb.Mutation{\n\t\t\t\tOp: pb.Op_Lock.Enum(),\n\t\t\t\tKey: lockKey,\n\t\t\t}\n\t\t\tkeys = append(keys, lockKey)\n\t\t}\n\t}\n\treturn &txnCommitter{\n\t\tstore: txn.store,\n\t\ttxn: txn,\n\t\tstartTS: txn.StartTS(),\n\t\tkeys: keys,\n\t\tmutations: mutations,\n\t}, nil\n}\n\nfunc (c *txnCommitter) primary() []byte {\n\treturn c.keys[0]\n}\n\n\/\/ iterKeys groups keys into batches, then applies `f` to them. If the flag\n\/\/ asyncNonPrimary is set, it will return as soon as the primary batch is\n\/\/ processed.\nfunc (c *txnCommitter) iterKeys(keys [][]byte, f func(batchKeys) error, sizeFn func([]byte) int, asyncNonPrimary bool) error {\n\tif len(keys) == 0 {\n\t\treturn nil\n\t}\n\tgroups, firstRegion, err := c.store.regionCache.GroupKeysByRegion(keys)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tfirstIsPrimary := bytes.Equal(keys[0], c.primary())\n\n\tvar batches []batchKeys\n\t\/\/ Make sure the group that contains primary key goes first.\n\tif firstIsPrimary {\n\t\tbatches = appendBatchBySize(batches, firstRegion, groups[firstRegion], sizeFn, txnCommitBatchSize)\n\t\tdelete(groups, firstRegion)\n\t}\n\tfor id, g := range groups {\n\t\tbatches = appendBatchBySize(batches, id, g, sizeFn, txnCommitBatchSize)\n\t}\n\n\tif firstIsPrimary {\n\t\terr = c.doBatches(batches[:1], f)\n\t\tif err != nil {\n\t\t\treturn errors.Trace(err)\n\t\t}\n\t\tbatches = batches[1:]\n\t}\n\tif asyncNonPrimary {\n\t\tgo func() {\n\t\t\tc.doBatches(batches, f)\n\t\t}()\n\t\treturn nil\n\t}\n\terr = c.doBatches(batches, f)\n\treturn errors.Trace(err)\n}\n\n\/\/ doBatches applies f to batches parallelly.\nfunc (c *txnCommitter) doBatches(batches []batchKeys, f func(batchKeys) error) error {\n\tif len(batches) == 0 {\n\t\treturn nil\n\t}\n\tif len(batches) == 1 {\n\t\te := f(batches[0])\n\t\tif e != nil {\n\t\t\tlog.Warnf(\"txnCommitter doBatches failed: %v, tid: %d\", e, c.startTS)\n\t\t}\n\t\treturn errors.Trace(e)\n\t}\n\n\t\/\/ TODO: For prewrite, stop sending other requests after receiving first error.\n\tch := make(chan error)\n\tfor _, batch := range batches {\n\t\tgo func(batch batchKeys) {\n\t\t\tch <- f(batch)\n\t\t}(batch)\n\t}\n\tvar err error\n\tfor i := 0; i < len(batches); i++ {\n\t\tif e := <-ch; e != nil {\n\t\t\tlog.Warnf(\"txnCommitter doBatches failed: %v, tid: %d\", e, c.startTS)\n\t\t\terr = e\n\t\t}\n\t}\n\treturn errors.Trace(err)\n}\n\nfunc (c *txnCommitter) keyValueSize(key []byte) int {\n\tsize := c.keySize(key)\n\tif mutation := c.mutations[string(key)]; mutation != nil {\n\t\tsize += len(mutation.Value)\n\t}\n\treturn size\n}\n\nfunc (c *txnCommitter) keySize(key []byte) int {\n\treturn len(key)\n}\n\nfunc (c *txnCommitter) prewriteSingleRegion(batch batchKeys) error {\n\tmutations := make([]*pb.Mutation, len(batch.keys))\n\tfor i, k := range batch.keys {\n\t\tmutations[i] = c.mutations[string(k)]\n\t}\n\treq := &pb.Request{\n\t\tType: pb.MessageType_CmdPrewrite.Enum(),\n\t\tCmdPrewriteReq: &pb.CmdPrewriteRequest{\n\t\t\tMutations: mutations,\n\t\t\tPrimaryLock: c.primary(),\n\t\t\tStartVersion: proto.Uint64(c.startTS),\n\t\t},\n\t}\n\n\tvar backoffErr error\n\tfor backoff := txnLockBackoff(); backoffErr == nil; backoffErr = backoff() {\n\t\tresp, err := c.store.SendKVReq(req, batch.region)\n\t\tif err != nil {\n\t\t\treturn errors.Trace(err)\n\t\t}\n\t\tif regionErr := resp.GetRegionError(); regionErr != nil {\n\t\t\t\/\/ re-split keys and prewrite again.\n\t\t\t\/\/ TODO: The recursive maybe not able to exit if TiKV &\n\t\t\t\/\/ PD are implemented incorrectly. A possible fix is\n\t\t\t\/\/ introducing a 'max backoff time'.\n\t\t\terr = c.prewriteKeys(batch.keys)\n\t\t\treturn errors.Trace(err)\n\t\t}\n\t\tprewriteResp := resp.GetCmdPrewriteResp()\n\t\tif prewriteResp == nil {\n\t\t\treturn errors.Trace(errBodyMissing)\n\t\t}\n\t\tkeyErrs := prewriteResp.GetErrors()\n\t\tif len(keyErrs) == 0 {\n\t\t\t\/\/ We need to cleanup all written keys if transaction aborts.\n\t\t\tc.mu.Lock()\n\t\t\tdefer c.mu.Unlock()\n\t\t\tc.writtenKeys = append(c.writtenKeys, batch.keys...)\n\t\t\treturn nil\n\t\t}\n\t\tfor _, keyErr := range keyErrs {\n\t\t\tlockInfo, err := extractLockInfoFromKeyErr(keyErr)\n\t\t\tif err != nil {\n\t\t\t\t\/\/ It could be `Retryable` or `Abort`.\n\t\t\t\treturn errors.Trace(err)\n\t\t\t}\n\t\t\tlock := newLock(c.store, lockInfo.GetPrimaryLock(), lockInfo.GetLockVersion(), lockInfo.GetKey(), c.startTS)\n\t\t\t_, err = lock.cleanup()\n\t\t\tif err != nil && terror.ErrorNotEqual(err, errInnerRetryable) {\n\t\t\t\treturn errors.Trace(err)\n\t\t\t}\n\t\t}\n\t}\n\treturn errors.Annotate(backoffErr, txnRetryableMark)\n}\n\nfunc (c *txnCommitter) commitSingleRegion(batch batchKeys) error {\n\treq := &pb.Request{\n\t\tType: pb.MessageType_CmdCommit.Enum(),\n\t\tCmdCommitReq: &pb.CmdCommitRequest{\n\t\t\tStartVersion: proto.Uint64(c.startTS),\n\t\t\tKeys: batch.keys,\n\t\t\tCommitVersion: proto.Uint64(c.commitTS),\n\t\t},\n\t}\n\n\tresp, err := c.store.SendKVReq(req, batch.region)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tif regionErr := resp.GetRegionError(); regionErr != nil {\n\t\t\/\/ re-split keys and commit again.\n\t\terr = c.commitKeys(batch.keys)\n\t\treturn errors.Trace(err)\n\t}\n\tcommitResp := resp.GetCmdCommitResp()\n\tif commitResp == nil {\n\t\treturn errors.Trace(errBodyMissing)\n\t}\n\tif keyErr := commitResp.GetError(); keyErr != nil {\n\t\tc.mu.RLock()\n\t\tdefer c.mu.RUnlock()\n\t\terr = errors.Errorf(\"commit failed: %v\", keyErr.String())\n\t\tif c.committed {\n\t\t\t\/\/ No secondary key could be rolled back after it's primary key is committed.\n\t\t\t\/\/ There must be a serious bug somewhere.\n\t\t\tlog.Errorf(\"txn failed commit key after primary key committed: %v, tid: %d\", err, c.startTS)\n\t\t\treturn errors.Trace(err)\n\t\t}\n\t\t\/\/ The transaction maybe rolled back by concurrent transactions.\n\t\tlog.Warnf(\"txn failed commit primary key: %v, retry later, tid: %d\", err, c.startTS)\n\t\treturn errors.Annotate(err, txnRetryableMark)\n\t}\n\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\t\/\/ Group that contains primary key is always the first.\n\t\/\/ We mark transaction's status committed when we receive the first success response.\n\tc.committed = true\n\treturn nil\n}\n\nfunc (c *txnCommitter) cleanupSingleRegion(batch batchKeys) error {\n\treq := &pb.Request{\n\t\tType: pb.MessageType_CmdBatchRollback.Enum(),\n\t\tCmdBatchRollbackReq: &pb.CmdBatchRollbackRequest{\n\t\t\tKeys: batch.keys,\n\t\t\tStartVersion: proto.Uint64(c.startTS),\n\t\t},\n\t}\n\tresp, err := c.store.SendKVReq(req, batch.region)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tif regionErr := resp.GetRegionError(); regionErr != nil {\n\t\terr = c.cleanupKeys(batch.keys)\n\t\treturn errors.Trace(err)\n\t}\n\tif keyErr := resp.GetCmdBatchRollbackResp().GetError(); keyErr != nil {\n\t\terr = errors.Errorf(\"cleanup failed: %s\", keyErr)\n\t\tlog.Errorf(\"txn failed cleanup key: %v, tid: %d\", err, c.startTS)\n\t\treturn errors.Trace(err)\n\t}\n\treturn nil\n}\n\nfunc (c *txnCommitter) prewriteKeys(keys [][]byte) error {\n\treturn c.iterKeys(keys, c.prewriteSingleRegion, c.keyValueSize, false)\n}\n\nfunc (c *txnCommitter) commitKeys(keys [][]byte) error {\n\treturn c.iterKeys(keys, c.commitSingleRegion, c.keySize, true)\n}\n\nfunc (c *txnCommitter) cleanupKeys(keys [][]byte) error {\n\treturn c.iterKeys(keys, c.cleanupSingleRegion, c.keySize, false)\n}\n\nfunc (c *txnCommitter) Commit() error {\n\terr := c.prewriteKeys(c.keys)\n\tif err != nil {\n\t\tlog.Warnf(\"txn commit failed on prewrite: %v, tid: %d\", err, c.startTS)\n\t\tgo func() {\n\t\t\tc.cleanupKeys(c.writtenKeys)\n\t\t}()\n\t\treturn errors.Trace(err)\n\t}\n\n\tcommitTS, err := c.store.getTimestampWithRetry()\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tc.commitTS = commitTS\n\n\terr = c.commitKeys(c.keys)\n\tif err != nil {\n\t\tif !c.committed {\n\t\t\tgo func() {\n\t\t\t\tc.cleanupKeys(c.writtenKeys)\n\t\t\t}()\n\t\t\treturn errors.Trace(err)\n\t\t}\n\t\tlog.Warnf(\"txn commit succeed with error: %v, tid: %d\", err, c.startTS)\n\t}\n\treturn nil\n}\n\n\/\/ TiKV recommends each RPC packet should be less than ~1MB. We keep each packet's\n\/\/ Key+Value size below 512KB.\nconst txnCommitBatchSize = 512 * 1024\n\n\/\/ batchKeys is a batch of keys in the same region.\ntype batchKeys struct {\n\tregion RegionVerID\n\tkeys [][]byte\n}\n\n\/\/ appendBatchBySize appends keys to []batchKeys. It may split the keys to make\n\/\/ sure each batch's size does not exceed the limit.\nfunc appendBatchBySize(b []batchKeys, region RegionVerID, keys [][]byte, sizeFn func([]byte) int, limit int) []batchKeys {\n\tvar start, end int\n\tfor start = 0; start < len(keys); start = end {\n\t\tvar size int\n\t\tfor end = start; end < len(keys) && size < limit; end++ {\n\t\t\tsize += sizeFn(keys[end])\n\t\t}\n\t\tb = append(b, batchKeys{\n\t\t\tregion: region,\n\t\t\tkeys: keys[start:end],\n\t\t})\n\t}\n\treturn b\n}\n<commit_msg>store\/tikv: use defer to cleanup keys. (#1474)<commit_after>\/\/ Copyright 2016 PingCAP, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage tikv\n\nimport (\n\t\"bytes\"\n\t\"sync\"\n\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/ngaut\/log\"\n\tpb \"github.com\/pingcap\/kvproto\/pkg\/kvrpcpb\"\n\t\"github.com\/pingcap\/tidb\/kv\"\n\t\"github.com\/pingcap\/tidb\/terror\"\n)\n\ntype txnCommitter struct {\n\tstore *tikvStore\n\ttxn *tikvTxn\n\tstartTS uint64\n\tkeys [][]byte\n\tmutations map[string]*pb.Mutation\n\tcommitTS uint64\n\tmu sync.RWMutex\n\twrittenKeys [][]byte\n\tcommitted bool\n}\n\nfunc newTxnCommitter(txn *tikvTxn) (*txnCommitter, error) {\n\tvar keys [][]byte\n\tmutations := make(map[string]*pb.Mutation)\n\terr := txn.us.WalkBuffer(func(k kv.Key, v []byte) error {\n\t\tif len(v) > 0 {\n\t\t\tmutations[string(k)] = &pb.Mutation{\n\t\t\t\tOp: pb.Op_Put.Enum(),\n\t\t\t\tKey: k,\n\t\t\t\tValue: v,\n\t\t\t}\n\t\t} else {\n\t\t\tmutations[string(k)] = &pb.Mutation{\n\t\t\t\tOp: pb.Op_Del.Enum(),\n\t\t\t\tKey: k,\n\t\t\t}\n\t\t}\n\t\tkeys = append(keys, k)\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\t\/\/ Transactions without Put\/Del, only Locks are readonly.\n\t\/\/ We can skip commit directly.\n\tif len(keys) == 0 {\n\t\treturn nil, nil\n\t}\n\tfor _, lockKey := range txn.lockKeys {\n\t\tif _, ok := mutations[string(lockKey)]; !ok {\n\t\t\tmutations[string(lockKey)] = &pb.Mutation{\n\t\t\t\tOp: pb.Op_Lock.Enum(),\n\t\t\t\tKey: lockKey,\n\t\t\t}\n\t\t\tkeys = append(keys, lockKey)\n\t\t}\n\t}\n\treturn &txnCommitter{\n\t\tstore: txn.store,\n\t\ttxn: txn,\n\t\tstartTS: txn.StartTS(),\n\t\tkeys: keys,\n\t\tmutations: mutations,\n\t}, nil\n}\n\nfunc (c *txnCommitter) primary() []byte {\n\treturn c.keys[0]\n}\n\n\/\/ iterKeys groups keys into batches, then applies `f` to them. If the flag\n\/\/ asyncNonPrimary is set, it will return as soon as the primary batch is\n\/\/ processed.\nfunc (c *txnCommitter) iterKeys(keys [][]byte, f func(batchKeys) error, sizeFn func([]byte) int, asyncNonPrimary bool) error {\n\tif len(keys) == 0 {\n\t\treturn nil\n\t}\n\tgroups, firstRegion, err := c.store.regionCache.GroupKeysByRegion(keys)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tfirstIsPrimary := bytes.Equal(keys[0], c.primary())\n\n\tvar batches []batchKeys\n\t\/\/ Make sure the group that contains primary key goes first.\n\tif firstIsPrimary {\n\t\tbatches = appendBatchBySize(batches, firstRegion, groups[firstRegion], sizeFn, txnCommitBatchSize)\n\t\tdelete(groups, firstRegion)\n\t}\n\tfor id, g := range groups {\n\t\tbatches = appendBatchBySize(batches, id, g, sizeFn, txnCommitBatchSize)\n\t}\n\n\tif firstIsPrimary {\n\t\terr = c.doBatches(batches[:1], f)\n\t\tif err != nil {\n\t\t\treturn errors.Trace(err)\n\t\t}\n\t\tbatches = batches[1:]\n\t}\n\tif asyncNonPrimary {\n\t\tgo func() {\n\t\t\tc.doBatches(batches, f)\n\t\t}()\n\t\treturn nil\n\t}\n\terr = c.doBatches(batches, f)\n\treturn errors.Trace(err)\n}\n\n\/\/ doBatches applies f to batches parallelly.\nfunc (c *txnCommitter) doBatches(batches []batchKeys, f func(batchKeys) error) error {\n\tif len(batches) == 0 {\n\t\treturn nil\n\t}\n\tif len(batches) == 1 {\n\t\te := f(batches[0])\n\t\tif e != nil {\n\t\t\tlog.Warnf(\"txnCommitter doBatches failed: %v, tid: %d\", e, c.startTS)\n\t\t}\n\t\treturn errors.Trace(e)\n\t}\n\n\t\/\/ TODO: For prewrite, stop sending other requests after receiving first error.\n\tch := make(chan error)\n\tfor _, batch := range batches {\n\t\tgo func(batch batchKeys) {\n\t\t\tch <- f(batch)\n\t\t}(batch)\n\t}\n\tvar err error\n\tfor i := 0; i < len(batches); i++ {\n\t\tif e := <-ch; e != nil {\n\t\t\tlog.Warnf(\"txnCommitter doBatches failed: %v, tid: %d\", e, c.startTS)\n\t\t\terr = e\n\t\t}\n\t}\n\treturn errors.Trace(err)\n}\n\nfunc (c *txnCommitter) keyValueSize(key []byte) int {\n\tsize := c.keySize(key)\n\tif mutation := c.mutations[string(key)]; mutation != nil {\n\t\tsize += len(mutation.Value)\n\t}\n\treturn size\n}\n\nfunc (c *txnCommitter) keySize(key []byte) int {\n\treturn len(key)\n}\n\nfunc (c *txnCommitter) prewriteSingleRegion(batch batchKeys) error {\n\tmutations := make([]*pb.Mutation, len(batch.keys))\n\tfor i, k := range batch.keys {\n\t\tmutations[i] = c.mutations[string(k)]\n\t}\n\treq := &pb.Request{\n\t\tType: pb.MessageType_CmdPrewrite.Enum(),\n\t\tCmdPrewriteReq: &pb.CmdPrewriteRequest{\n\t\t\tMutations: mutations,\n\t\t\tPrimaryLock: c.primary(),\n\t\t\tStartVersion: proto.Uint64(c.startTS),\n\t\t},\n\t}\n\n\tvar backoffErr error\n\tfor backoff := txnLockBackoff(); backoffErr == nil; backoffErr = backoff() {\n\t\tresp, err := c.store.SendKVReq(req, batch.region)\n\t\tif err != nil {\n\t\t\treturn errors.Trace(err)\n\t\t}\n\t\tif regionErr := resp.GetRegionError(); regionErr != nil {\n\t\t\t\/\/ re-split keys and prewrite again.\n\t\t\t\/\/ TODO: The recursive maybe not able to exit if TiKV &\n\t\t\t\/\/ PD are implemented incorrectly. A possible fix is\n\t\t\t\/\/ introducing a 'max backoff time'.\n\t\t\terr = c.prewriteKeys(batch.keys)\n\t\t\treturn errors.Trace(err)\n\t\t}\n\t\tprewriteResp := resp.GetCmdPrewriteResp()\n\t\tif prewriteResp == nil {\n\t\t\treturn errors.Trace(errBodyMissing)\n\t\t}\n\t\tkeyErrs := prewriteResp.GetErrors()\n\t\tif len(keyErrs) == 0 {\n\t\t\t\/\/ We need to cleanup all written keys if transaction aborts.\n\t\t\tc.mu.Lock()\n\t\t\tdefer c.mu.Unlock()\n\t\t\tc.writtenKeys = append(c.writtenKeys, batch.keys...)\n\t\t\treturn nil\n\t\t}\n\t\tfor _, keyErr := range keyErrs {\n\t\t\tlockInfo, err := extractLockInfoFromKeyErr(keyErr)\n\t\t\tif err != nil {\n\t\t\t\t\/\/ It could be `Retryable` or `Abort`.\n\t\t\t\treturn errors.Trace(err)\n\t\t\t}\n\t\t\tlock := newLock(c.store, lockInfo.GetPrimaryLock(), lockInfo.GetLockVersion(), lockInfo.GetKey(), c.startTS)\n\t\t\t_, err = lock.cleanup()\n\t\t\tif err != nil && terror.ErrorNotEqual(err, errInnerRetryable) {\n\t\t\t\treturn errors.Trace(err)\n\t\t\t}\n\t\t}\n\t}\n\treturn errors.Annotate(backoffErr, txnRetryableMark)\n}\n\nfunc (c *txnCommitter) commitSingleRegion(batch batchKeys) error {\n\treq := &pb.Request{\n\t\tType: pb.MessageType_CmdCommit.Enum(),\n\t\tCmdCommitReq: &pb.CmdCommitRequest{\n\t\t\tStartVersion: proto.Uint64(c.startTS),\n\t\t\tKeys: batch.keys,\n\t\t\tCommitVersion: proto.Uint64(c.commitTS),\n\t\t},\n\t}\n\n\tresp, err := c.store.SendKVReq(req, batch.region)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tif regionErr := resp.GetRegionError(); regionErr != nil {\n\t\t\/\/ re-split keys and commit again.\n\t\terr = c.commitKeys(batch.keys)\n\t\treturn errors.Trace(err)\n\t}\n\tcommitResp := resp.GetCmdCommitResp()\n\tif commitResp == nil {\n\t\treturn errors.Trace(errBodyMissing)\n\t}\n\tif keyErr := commitResp.GetError(); keyErr != nil {\n\t\tc.mu.RLock()\n\t\tdefer c.mu.RUnlock()\n\t\terr = errors.Errorf(\"commit failed: %v\", keyErr.String())\n\t\tif c.committed {\n\t\t\t\/\/ No secondary key could be rolled back after it's primary key is committed.\n\t\t\t\/\/ There must be a serious bug somewhere.\n\t\t\tlog.Errorf(\"txn failed commit key after primary key committed: %v, tid: %d\", err, c.startTS)\n\t\t\treturn errors.Trace(err)\n\t\t}\n\t\t\/\/ The transaction maybe rolled back by concurrent transactions.\n\t\tlog.Warnf(\"txn failed commit primary key: %v, retry later, tid: %d\", err, c.startTS)\n\t\treturn errors.Annotate(err, txnRetryableMark)\n\t}\n\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\t\/\/ Group that contains primary key is always the first.\n\t\/\/ We mark transaction's status committed when we receive the first success response.\n\tc.committed = true\n\treturn nil\n}\n\nfunc (c *txnCommitter) cleanupSingleRegion(batch batchKeys) error {\n\treq := &pb.Request{\n\t\tType: pb.MessageType_CmdBatchRollback.Enum(),\n\t\tCmdBatchRollbackReq: &pb.CmdBatchRollbackRequest{\n\t\t\tKeys: batch.keys,\n\t\t\tStartVersion: proto.Uint64(c.startTS),\n\t\t},\n\t}\n\tresp, err := c.store.SendKVReq(req, batch.region)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tif regionErr := resp.GetRegionError(); regionErr != nil {\n\t\terr = c.cleanupKeys(batch.keys)\n\t\treturn errors.Trace(err)\n\t}\n\tif keyErr := resp.GetCmdBatchRollbackResp().GetError(); keyErr != nil {\n\t\terr = errors.Errorf(\"cleanup failed: %s\", keyErr)\n\t\tlog.Errorf(\"txn failed cleanup key: %v, tid: %d\", err, c.startTS)\n\t\treturn errors.Trace(err)\n\t}\n\treturn nil\n}\n\nfunc (c *txnCommitter) prewriteKeys(keys [][]byte) error {\n\treturn c.iterKeys(keys, c.prewriteSingleRegion, c.keyValueSize, false)\n}\n\nfunc (c *txnCommitter) commitKeys(keys [][]byte) error {\n\treturn c.iterKeys(keys, c.commitSingleRegion, c.keySize, true)\n}\n\nfunc (c *txnCommitter) cleanupKeys(keys [][]byte) error {\n\treturn c.iterKeys(keys, c.cleanupSingleRegion, c.keySize, false)\n}\n\nfunc (c *txnCommitter) Commit() error {\n\tdefer func() {\n\t\t\/\/ Always clean up all written keys if the txn does not commit.\n\t\tif !c.committed {\n\t\t\tgo func() {\n\t\t\t\tc.cleanupKeys(c.writtenKeys)\n\t\t\t\tlog.Infof(\"txn clean up done, tid: %d\", c.startTS)\n\t\t\t}()\n\t\t}\n\t}()\n\n\terr := c.prewriteKeys(c.keys)\n\tif err != nil {\n\t\tlog.Warnf(\"txn commit failed on prewrite: %v, tid: %d\", err, c.startTS)\n\t\treturn errors.Trace(err)\n\t}\n\n\tcommitTS, err := c.store.getTimestampWithRetry()\n\tif err != nil {\n\t\tlog.Warnf(\"txn get commitTS failed: %v, tid: %d\", err, c.startTS)\n\t\treturn errors.Trace(err)\n\t}\n\tc.commitTS = commitTS\n\n\terr = c.commitKeys(c.keys)\n\tif err != nil {\n\t\tif !c.committed {\n\t\t\tlog.Warnf(\"txn commit failed on commit: %v, tid: %d\", err, c.startTS)\n\t\t\treturn errors.Trace(err)\n\t\t}\n\t\tlog.Warnf(\"txn commit succeed with error: %v, tid: %d\", err, c.startTS)\n\t}\n\treturn nil\n}\n\n\/\/ TiKV recommends each RPC packet should be less than ~1MB. We keep each packet's\n\/\/ Key+Value size below 512KB.\nconst txnCommitBatchSize = 512 * 1024\n\n\/\/ batchKeys is a batch of keys in the same region.\ntype batchKeys struct {\n\tregion RegionVerID\n\tkeys [][]byte\n}\n\n\/\/ appendBatchBySize appends keys to []batchKeys. It may split the keys to make\n\/\/ sure each batch's size does not exceed the limit.\nfunc appendBatchBySize(b []batchKeys, region RegionVerID, keys [][]byte, sizeFn func([]byte) int, limit int) []batchKeys {\n\tvar start, end int\n\tfor start = 0; start < len(keys); start = end {\n\t\tvar size int\n\t\tfor end = start; end < len(keys) && size < limit; end++ {\n\t\t\tsize += sizeFn(keys[end])\n\t\t}\n\t\tb = append(b, batchKeys{\n\t\t\tregion: region,\n\t\t\tkeys: keys[start:end],\n\t\t})\n\t}\n\treturn b\n}\n<|endoftext|>"} {"text":"<commit_before>package amazonproduct\n\nimport (\n\t\"crypto\/hmac\"\n\t\"crypto\/sha256\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype AmazonProductAPI struct {\n\tAccessKey string\n\tSecretKey string\n\tAssociateTag string\n\tHost string\n}\n\n\/*\nItemSearchByKeyword takes a string containg keywords and returns the search results\n*\/\nfunc (api AmazonProductAPI) ItemSearchByKeyword(Keywords string, page int) (string, error) {\n\tparams := map[string]string{\n\t\t\"Keywords\": Keywords,\n\t\t\"ResponseGroup\": \"Images,ItemAttributes,Small,EditorialReview\",\n\t\t\"ItemPage\": strconv.FormatInt(int64(page), 10),\n\t}\n\treturn api.ItemSearch(\"All\", params)\n}\n\nfunc (api AmazonProductAPI) ItemSearchByKeywordWithResponseGroup(Keywords string, ResponseGroup string) (string, error) {\n\tparams := map[string]string{\n\t\t\"Keywords\": Keywords,\n\t\t\"ResponseGroup\": ResponseGroup,\n\t}\n\treturn api.ItemSearch(\"All\", params)\n}\n\nfunc (api AmazonProductAPI) ItemSearch(SearchIndex string, Parameters map[string]string) (string, error) {\n\tParameters[\"SearchIndex\"] = SearchIndex\n\tgenUrl, err := GenerateAmazonUrl(api, \"ItemSearch\", Parameters)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tSetTimestamp(genUrl)\n\n\tsignedurl, err := SignAmazonUrl(genUrl, api)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tresp, err := http.Get(signedurl)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(body), nil\n}\n\nfunc GenerateAmazonUrl(api AmazonProductAPI, Operation string, Parameters map[string]string) (finalUrl *url.URL, err error) {\n\n\tresult, err := url.Parse(api.Host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresult.Host = api.Host\n\tresult.Scheme = \"http\"\n\tresult.Path = \"\/onca\/xml\"\n\n\tvalues := url.Values{}\n\tvalues.Add(\"Operation\", Operation)\n\tvalues.Add(\"Service\", \"AWSECommerceService\")\n\tvalues.Add(\"AWSAccessKeyId\", api.AccessKey)\n\tvalues.Add(\"Version\", \"2009-01-01\")\n\tvalues.Add(\"AssociateTag\", api.AssociateTag)\n\n\tfor k, v := range Parameters {\n\t\tvalues.Set(k, v)\n\t}\n\n\tparams := values.Encode()\n\tresult.RawQuery = params\n\n\treturn result, nil\n}\n\nfunc SetTimestamp(origUrl *url.URL) (err error) {\n\tvalues, err := url.ParseQuery(origUrl.RawQuery)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvalues.Set(\"Timestamp\", time.Now().UTC().Format(time.RFC3339))\n\torigUrl.RawQuery = values.Encode()\n\n\treturn nil\n}\n\nfunc SignAmazonUrl(origUrl *url.URL, api AmazonProductAPI) (signedUrl string, err error) {\n\n\tescapeUrl := strings.Replace(origUrl.RawQuery, \",\", \"%2C\", -1)\n\tescapeUrl = strings.Replace(escapeUrl, \":\", \"%3A\", -1)\n\n\tparams := strings.Split(escapeUrl, \"&\")\n\tsort.Strings(params)\n\tsortedParams := strings.Join(params, \"&\")\n\n\ttoSign := fmt.Sprintf(\"GET\\n%s\\n%s\\n%s\", origUrl.Host, origUrl.Path, sortedParams)\n\n\thasher := hmac.New(sha256.New, []byte(api.SecretKey))\n\t_, err = hasher.Write([]byte(toSign))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\thash := base64.StdEncoding.EncodeToString(hasher.Sum(nil))\n\n\thash = url.QueryEscape(hash)\n\n\tnewParams := fmt.Sprintf(\"%s&Signature=%s\", sortedParams, hash)\n\n\torigUrl.RawQuery = newParams\n\n\treturn origUrl.String(), nil\n}\n<commit_msg>Revert \"ItemPage support\"<commit_after>package amazonproduct \n\nimport (\n\t\"net\/url\"\n\t\"sort\"\n\t\"fmt\"\n\t\"strings\"\n\t\"crypto\/hmac\"\n\t\"crypto\/sha256\"\n\t\"encoding\/base64\"\n\t\"time\"\n\t\"net\/http\"\n\t\"io\/ioutil\"\n)\n\ntype AmazonProductAPI struct {\n\tAccessKey string\n\tSecretKey string\n\tAssociateTag string\n\tHost string\n}\n\nfunc (api AmazonProductAPI) ItemSearchByKeyword(Keywords string) (string, error) {\n\tparams := map[string] string {\n\t\t\"Keywords\": Keywords,\n\t\t\"ResponseGroup\" : \"Images,ItemAttributes,Small,EditorialReview\",\n\t}\n\treturn api.ItemSearch(\"All\", params)\n}\n\nfunc (api AmazonProductAPI) ItemSearchByKeywordWithResponseGroup(Keywords string, ResponseGroup string) (string, error) {\t\n\tparams := map[string] string {\n\t\t\"Keywords\": Keywords,\n\t\t\"ResponseGroup\" : ResponseGroup,\n\t}\n\treturn api.ItemSearch(\"All\", params)\n}\n\nfunc (api AmazonProductAPI) ItemSearch(SearchIndex string, Parameters map[string] string) (string,error){\n\tParameters[\"SearchIndex\"] = SearchIndex\n\tgenUrl, err := GenerateAmazonUrl(api, \"ItemSearch\", Parameters)\n\tif (err != nil) {\n\t\treturn \"\", err\n\t}\n\n\tSetTimestamp(genUrl)\n\n\tsignedurl,err := SignAmazonUrl(genUrl, api)\n\tif (err != nil) {\n\t\treturn \"\", err\n\t}\n\n\tresp, err := http.Get(signedurl)\n\tif (err != nil) {\n\t\treturn \"\", err\n\t}\t\n\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif (err != nil) {\n\t\treturn \"\", err\n\t}\n\n\treturn string(body), nil\n}\n\nfunc GenerateAmazonUrl(api AmazonProductAPI, Operation string, Parameters map[string] string) (finalUrl *url.URL, err error) {\n\n\tresult,err := url.Parse(api.Host)\n\tif (err != nil) {\n\t\treturn nil, err\n\t}\n\n\tresult.Host = api.Host\n\tresult.Scheme = \"http\"\n\tresult.Path = \"\/onca\/xml\"\n\n\tvalues := url.Values{}\n\tvalues.Add(\"Operation\", Operation)\n\tvalues.Add(\"Service\", \"AWSECommerceService\")\n\tvalues.Add(\"AWSAccessKeyId\", api.AccessKey)\n\tvalues.Add(\"Version\", \"2009-01-01\")\n\tvalues.Add(\"AssociateTag\", api.AssociateTag)\n\n\tfor k, v := range Parameters {\n\t\tvalues.Set(k, v)\n\t}\n\n\tparams := values.Encode()\n\tresult.RawQuery = params\n\n\treturn result, nil\n}\n\nfunc SetTimestamp(origUrl *url.URL) (err error) {\n\tvalues, err := url.ParseQuery(origUrl.RawQuery)\n\tif (err != nil) {\n\t\treturn err\n\t}\n\tvalues.Set(\"Timestamp\", time.Now().UTC().Format(time.RFC3339))\n\torigUrl.RawQuery = values.Encode()\n\n\treturn nil\n}\n\nfunc SignAmazonUrl(origUrl *url.URL, api AmazonProductAPI) (signedUrl string , err error){\n\n\tescapeUrl := strings.Replace(origUrl.RawQuery, \",\", \"%2C\", -1)\n\tescapeUrl = strings.Replace(escapeUrl, \":\", \"%3A\", -1)\n\n\tparams := strings.Split(escapeUrl, \"&\")\n\tsort.Strings(params)\n\tsortedParams := strings.Join(params, \"&\")\n\n\ttoSign := fmt.Sprintf(\"GET\\n%s\\n%s\\n%s\", origUrl.Host, origUrl.Path, sortedParams)\n\n\thasher := hmac.New(sha256.New, []byte(api.SecretKey))\n\t_, err = hasher.Write([]byte(toSign))\n\tif (err != nil) {\n\t\treturn \"\", err\n\t}\n\n\thash := base64.StdEncoding.EncodeToString(hasher.Sum(nil))\n\n\thash = url.QueryEscape(hash)\n\n\tnewParams := fmt.Sprintf(\"%s&Signature=%s\", sortedParams, hash)\n\n\torigUrl.RawQuery = newParams\n\n\treturn origUrl.String(), nil\n}\n<|endoftext|>"} {"text":"<commit_before>package pinentry\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/keybase\/client\/go\/logger\"\n\tkeybase1 \"github.com\/keybase\/client\/protocol\/go\"\n)\n\n\/\/\n\/\/ some borrowed from here:\n\/\/\n\/\/ https:\/\/github.com\/bradfitz\/camlistore\/blob\/master\/pkg\/misc\/pinentry\/pinentry.go\n\/\/\n\/\/ Under the Apache 2.0 license\n\/\/\n\ntype Pinentry struct {\n\tinitRes *error\n\tpath string\n\tterm string\n\ttty string\n\tprog string\n\tlog *logger.Logger\n}\n\nfunc New(envprog string, log *logger.Logger) *Pinentry {\n\treturn &Pinentry{\n\t\tprog: envprog,\n\t\tlog: log,\n\t}\n}\n\nfunc (pe *Pinentry) Init() (error, error) {\n\tif pe.initRes != nil {\n\t\treturn *pe.initRes, nil\n\t}\n\terr, fatalerr := pe.FindProgram()\n\tif err == nil {\n\t\terr = pe.GetTerminalName()\n\t}\n\tpe.term = os.Getenv(\"TERM\")\n\tpe.initRes = &err\n\treturn err, fatalerr\n}\n\nfunc (pe *Pinentry) SetInitError(e error) {\n\tpe.initRes = &e\n}\n\nfunc (pe *Pinentry) FindProgram() (error, error) {\n\tprog := pe.prog\n\tvar err, fatalerr error\n\tif len(prog) > 0 {\n\t\tif err = canExec(prog); err == nil {\n\t\t\tpe.path = prog\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"Can't execute given pinentry program '%s': %s\",\n\t\t\t\tprog, err)\n\t\t\tfatalerr = err\n\t\t}\n\t} else if prog, err = FindPinentry(pe.log); err == nil {\n\t\tpe.path = prog\n\t}\n\treturn err, fatalerr\n}\n\nfunc (pe *Pinentry) GetTerminalName() error {\n\ttty, err := os.Readlink(\"\/proc\/self\/fd\/0\")\n\tif err != nil {\n\t\tpe.log.Debug(\"| Can't find terminal name via \/proc lookup: %s\", err)\n\t} else {\n\t\tpe.log.Debug(\"| found tty=%s\", tty)\n\t\tpe.tty = tty\n\t}\n\t\/\/ Tis not a fatal error. In particular, it won't work on OSX\n\treturn nil\n}\n\nfunc (pe *Pinentry) Get(arg keybase1.SecretEntryArg) (res *keybase1.SecretEntryRes, err error) {\n\n\tpe.log.Debug(\"+ Pinentry::Get()\")\n\n\t\/\/ Do a lazy initialization\n\tif err, _ = pe.Init(); err != nil {\n\t\treturn\n\t}\n\n\tinst := pinentryInstance{parent: pe}\n\tdefer inst.Close()\n\n\tif err = inst.Init(); err != nil {\n\t\t\/\/ We probably shouldn't try to use this thing again if we failed\n\t\t\/\/ to set it up.\n\t\tpe.SetInitError(err)\n\t\treturn\n\t}\n\tres, err = inst.Run(arg)\n\tpe.log.Debug(\"- Pinentry::Get() -> %v\", err)\n\treturn\n}\n\nfunc (pi *pinentryInstance) Close() {\n\tpi.stdin.Close()\n\tpi.cmd.Wait()\n}\n\ntype pinentryInstance struct {\n\tparent *Pinentry\n\tcmd *exec.Cmd\n\tstdout io.ReadCloser\n\tstdin io.WriteCloser\n\tbr *bufio.Reader\n}\n\nfunc (pi *pinentryInstance) Set(cmd, val string, errp *error) {\n\tif val == \"\" {\n\t\treturn\n\t}\n\tfmt.Fprintf(pi.stdin, \"%s %s\\n\", cmd, val)\n\tline, _, err := pi.br.ReadLine()\n\tif err != nil {\n\t\t*errp = err\n\t\treturn\n\t}\n\tif string(line) != \"OK\" {\n\t\t*errp = fmt.Errorf(\"Response to \" + cmd + \" was \" + string(line))\n\t}\n\treturn\n}\n\nfunc (pi *pinentryInstance) Init() (err error) {\n\n\tparent := pi.parent\n\n\tparent.log.Debug(\"+ pinentryInstance::Init()\")\n\n\tpi.cmd = exec.Command(parent.path)\n\tpi.stdin, _ = pi.cmd.StdinPipe()\n\tpi.stdout, _ = pi.cmd.StdoutPipe()\n\n\tif err = pi.cmd.Start(); err != nil {\n\t\tparent.log.Warning(\"unexpected error running pinentry (%s): %s\", parent.path, err)\n\t\treturn\n\t}\n\n\tpi.br = bufio.NewReader(pi.stdout)\n\tlineb, _, err := pi.br.ReadLine()\n\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Failed to get getpin greeting: %s\", err)\n\t\treturn\n\t}\n\n\tline := string(lineb)\n\tif !strings.HasPrefix(line, \"OK\") {\n\t\terr = fmt.Errorf(\"getpin greeting didn't say 'OK', said: %q\", line)\n\t\treturn\n\t}\n\n\tif len(parent.tty) > 0 {\n\t\tpi.Set(\"OPTION\", \"ttyname=\"+parent.tty, &err)\n\t}\n\tif len(parent.term) > 0 {\n\t\tpi.Set(\"OPTION\", \"ttytype=\"+parent.term, &err)\n\t}\n\n\tparent.log.Debug(\"- pinentryInstance::Init() -> %v\", err)\n\treturn\n}\n\nfunc descEncode(s string) string {\n\ts = strings.Replace(s, \"%\", \"%%\", -1)\n\ts = strings.Replace(s, \"\\n\", \"%0A\", -1)\n\treturn s\n}\n\nfunc resDecode(s string) string {\n\ts = strings.Replace(s, \"%25\", \"%\", -1)\n\treturn s\n}\n\nfunc (pi *pinentryInstance) Run(arg keybase1.SecretEntryArg) (res *keybase1.SecretEntryRes, err error) {\n\n\tpi.Set(\"SETPROMPT\", arg.Prompt, &err)\n\tpi.Set(\"SETDESC\", descEncode(arg.Desc), &err)\n\tpi.Set(\"SETOK\", arg.Ok, &err)\n\tpi.Set(\"SETCANCEL\", arg.Cancel, &err)\n\tpi.Set(\"SETERROR\", arg.Err, &err)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tpinentrySecretStoreInfo, err := pi.useSecretStore(arg.UseSecretStore)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfmt.Fprintf(pi.stdin, \"GETPIN\\n\")\n\tvar lineb []byte\n\tlineb, _, err = pi.br.ReadLine()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Failed to read line after GETPIN: %v\", err)\n\t\treturn\n\t}\n\tline := string(lineb)\n\tswitch {\n\tcase strings.HasPrefix(line, \"D \"):\n\t\tres = &keybase1.SecretEntryRes{Text: resDecode(line[2:])}\n\tcase strings.HasPrefix(line, \"ERR 83886179 canceled\"):\n\t\tres = &keybase1.SecretEntryRes{Canceled: true}\n\tcase line == \"OK\":\n\t\tres = &keybase1.SecretEntryRes{}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"GETPIN response didn't start with D; got %q\", line)\n\t}\n\n\tres.StoreSecret = pi.shouldStoreSecret(pinentrySecretStoreInfo)\n\n\treturn\n}\n<commit_msg>fixed GetTerminalName on OSs that don't have \/proc but do have \/dev\/tty.<commit_after>package pinentry\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"strings\"\n\n\t\"github.com\/keybase\/client\/go\/logger\"\n\tkeybase1 \"github.com\/keybase\/client\/protocol\/go\"\n)\n\n\/\/\n\/\/ some borrowed from here:\n\/\/\n\/\/ https:\/\/github.com\/bradfitz\/camlistore\/blob\/master\/pkg\/misc\/pinentry\/pinentry.go\n\/\/\n\/\/ Under the Apache 2.0 license\n\/\/\n\ntype Pinentry struct {\n\tinitRes *error\n\tpath string\n\tterm string\n\ttty string\n\tprog string\n\tlog *logger.Logger\n}\n\nfunc New(envprog string, log *logger.Logger) *Pinentry {\n\treturn &Pinentry{\n\t\tprog: envprog,\n\t\tlog: log,\n\t}\n}\n\nfunc (pe *Pinentry) Init() (error, error) {\n\tif pe.initRes != nil {\n\t\treturn *pe.initRes, nil\n\t}\n\terr, fatalerr := pe.FindProgram()\n\tif err == nil {\n\t\tpe.GetTerminalName()\n\t}\n\tpe.term = os.Getenv(\"TERM\")\n\tpe.initRes = &err\n\treturn err, fatalerr\n}\n\nfunc (pe *Pinentry) SetInitError(e error) {\n\tpe.initRes = &e\n}\n\nfunc (pe *Pinentry) FindProgram() (error, error) {\n\tprog := pe.prog\n\tvar err, fatalerr error\n\tif len(prog) > 0 {\n\t\tif err = canExec(prog); err == nil {\n\t\t\tpe.path = prog\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"Can't execute given pinentry program '%s': %s\",\n\t\t\t\tprog, err)\n\t\t\tfatalerr = err\n\t\t}\n\t} else if prog, err = FindPinentry(pe.log); err == nil {\n\t\tpe.path = prog\n\t}\n\treturn err, fatalerr\n}\n\nfunc (pe *Pinentry) GetTerminalName() {\n\ttty, err := os.Readlink(\"\/proc\/self\/fd\/0\")\n\tif err != nil {\n\t\tpe.log.Debug(\"| Can't find terminal name via \/proc lookup: %s\", err)\n\n\t\t\/\/ try \/dev\/tty\n\t\ttty = \"\/dev\/tty\"\n\t\t_, err = os.Stat(\"\/dev\/tty\")\n\t\tif err != nil {\n\t\t\tpe.log.Debug(\"| stat \/dev\/tty failed: %s\", err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tpe.log.Debug(\"| found tty=%s\", tty)\n\tpe.tty = tty\n}\n\nfunc (pe *Pinentry) Get(arg keybase1.SecretEntryArg) (res *keybase1.SecretEntryRes, err error) {\n\n\tpe.log.Debug(\"+ Pinentry::Get()\")\n\n\t\/\/ Do a lazy initialization\n\tif err, _ = pe.Init(); err != nil {\n\t\treturn\n\t}\n\n\tinst := pinentryInstance{parent: pe}\n\tdefer inst.Close()\n\n\tif err = inst.Init(); err != nil {\n\t\t\/\/ We probably shouldn't try to use this thing again if we failed\n\t\t\/\/ to set it up.\n\t\tpe.SetInitError(err)\n\t\treturn\n\t}\n\tres, err = inst.Run(arg)\n\tpe.log.Debug(\"- Pinentry::Get() -> %v\", err)\n\treturn\n}\n\nfunc (pi *pinentryInstance) Close() {\n\tpi.stdin.Close()\n\tpi.cmd.Wait()\n}\n\ntype pinentryInstance struct {\n\tparent *Pinentry\n\tcmd *exec.Cmd\n\tstdout io.ReadCloser\n\tstdin io.WriteCloser\n\tbr *bufio.Reader\n}\n\nfunc (pi *pinentryInstance) Set(cmd, val string, errp *error) {\n\tif val == \"\" {\n\t\treturn\n\t}\n\tfmt.Fprintf(pi.stdin, \"%s %s\\n\", cmd, val)\n\tline, _, err := pi.br.ReadLine()\n\tif err != nil {\n\t\t*errp = err\n\t\treturn\n\t}\n\tif string(line) != \"OK\" {\n\t\t*errp = fmt.Errorf(\"Response to \" + cmd + \" was \" + string(line))\n\t}\n\treturn\n}\n\nfunc (pi *pinentryInstance) Init() (err error) {\n\n\tparent := pi.parent\n\n\tparent.log.Debug(\"+ pinentryInstance::Init()\")\n\n\tpi.cmd = exec.Command(parent.path)\n\tpi.stdin, _ = pi.cmd.StdinPipe()\n\tpi.stdout, _ = pi.cmd.StdoutPipe()\n\n\tif err = pi.cmd.Start(); err != nil {\n\t\tparent.log.Warning(\"unexpected error running pinentry (%s): %s\", parent.path, err)\n\t\treturn\n\t}\n\n\tpi.br = bufio.NewReader(pi.stdout)\n\tlineb, _, err := pi.br.ReadLine()\n\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Failed to get getpin greeting: %s\", err)\n\t\treturn\n\t}\n\n\tline := string(lineb)\n\tif !strings.HasPrefix(line, \"OK\") {\n\t\terr = fmt.Errorf(\"getpin greeting didn't say 'OK', said: %q\", line)\n\t\treturn\n\t}\n\n\tif len(parent.tty) > 0 {\n\t\tparent.log.Debug(\"setting ttyname to %s\", parent.tty)\n\t\tpi.Set(\"OPTION\", \"ttyname=\"+parent.tty, &err)\n\t\tif err != nil {\n\t\t\tparent.log.Debug(\"error setting ttyname: %s\", err)\n\t\t}\n\t}\n\tif len(parent.term) > 0 {\n\t\tparent.log.Info(\"setting ttytype to %s\", parent.term)\n\t\tpi.Set(\"OPTION\", \"ttytype=\"+parent.term, &err)\n\t\tif err != nil {\n\t\t\tparent.log.Debug(\"error setting ttytype: %s\", err)\n\t\t}\n\t}\n\n\tparent.log.Debug(\"- pinentryInstance::Init() -> %v\", err)\n\treturn\n}\n\nfunc descEncode(s string) string {\n\ts = strings.Replace(s, \"%\", \"%%\", -1)\n\ts = strings.Replace(s, \"\\n\", \"%0A\", -1)\n\treturn s\n}\n\nfunc resDecode(s string) string {\n\ts = strings.Replace(s, \"%25\", \"%\", -1)\n\treturn s\n}\n\nfunc (pi *pinentryInstance) Run(arg keybase1.SecretEntryArg) (res *keybase1.SecretEntryRes, err error) {\n\n\tpi.Set(\"SETPROMPT\", arg.Prompt, &err)\n\tpi.Set(\"SETDESC\", descEncode(arg.Desc), &err)\n\tpi.Set(\"SETOK\", arg.Ok, &err)\n\tpi.Set(\"SETCANCEL\", arg.Cancel, &err)\n\tpi.Set(\"SETERROR\", arg.Err, &err)\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\tpinentrySecretStoreInfo, err := pi.useSecretStore(arg.UseSecretStore)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfmt.Fprintf(pi.stdin, \"GETPIN\\n\")\n\tvar lineb []byte\n\tlineb, _, err = pi.br.ReadLine()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Failed to read line after GETPIN: %v\", err)\n\t\treturn\n\t}\n\tline := string(lineb)\n\tswitch {\n\tcase strings.HasPrefix(line, \"D \"):\n\t\tres = &keybase1.SecretEntryRes{Text: resDecode(line[2:])}\n\tcase strings.HasPrefix(line, \"ERR 83886179 canceled\"):\n\t\tres = &keybase1.SecretEntryRes{Canceled: true}\n\tcase line == \"OK\":\n\t\tres = &keybase1.SecretEntryRes{}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"GETPIN response didn't start with D; got %q\", line)\n\t}\n\n\tres.StoreSecret = pi.shouldStoreSecret(pinentrySecretStoreInfo)\n\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage runtime\n\nimport \"internal\/cpu\"\n\nconst (\n\t\/\/ bit masks taken from bits\/hwcap.h\n\t_HWCAP_S390_ZARCH = 2\n\t_HWCAP_S390_STFLE = 4\n\t_HWCAP_S390_MSA = 8\n\t_HWCAP_S390_LDISP = 16\n\t_HWCAP_S390_EIMM = 32\n\t_HWCAP_S390_DFP = 64\n\t_HWCAP_S390_ETF3EH = 256\n\t_HWCAP_S390_VX = 2048 \/\/ vector facility\n)\n\nfunc archauxv(tag, val uintptr) {\n\tswitch tag {\n\tcase _AT_HWCAP: \/\/ CPU capability bit flags\n\t\tcpu.S390X.HasZArch = val&_HWCAP_S390_ZARCH != 0\n\t\tcpu.S390X.HasSTFLE = val&_HWCAP_S390_STFLE != 0\n\t\tcpu.S390X.HasMSA = val&_HWCAP_S390_MSA != 0\n\t\tcpu.S390X.HasLDisp = val&_HWCAP_S390_LDISP != 0\n\t\tcpu.S390X.HasEImm = val&_HWCAP_S390_EIMM != 0\n\t\tcpu.S390X.HasDFP = val&_HWCAP_S390_DFP != 0\n\t\tcpu.S390X.HasETF3Enhanced = val&_HWCAP_S390_ETF3EH != 0\n\t\tcpu.S390X.HasVX = val&_HWCAP_S390_VX != 0\n\t}\n}\n<commit_msg>runtime: correct facilities names in s390 CPU support<commit_after>\/\/ Copyright 2016 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage runtime\n\nimport \"internal\/cpu\"\n\nconst (\n\t\/\/ bit masks taken from bits\/hwcap.h\n\t_HWCAP_S390_ZARCH = 2\n\t_HWCAP_S390_STFLE = 4\n\t_HWCAP_S390_MSA = 8\n\t_HWCAP_S390_LDISP = 16\n\t_HWCAP_S390_EIMM = 32\n\t_HWCAP_S390_DFP = 64\n\t_HWCAP_S390_ETF3EH = 256\n\t_HWCAP_S390_VX = 2048 \/\/ vector facility\n\t_HWCAP_S390_VXE = 8192\n)\n\nfunc archauxv(tag, val uintptr) {\n\tswitch tag {\n\tcase _AT_HWCAP: \/\/ CPU capability bit flags\n\t\tcpu.S390X.HasZARCH = val&_HWCAP_S390_ZARCH != 0\n\t\tcpu.S390X.HasSTFLE = val&_HWCAP_S390_STFLE != 0\n\t\tcpu.S390X.HasLDISP = val&_HWCAP_S390_LDISP != 0\n\t\tcpu.S390X.HasEIMM = val&_HWCAP_S390_EIMM != 0\n\t\tcpu.S390X.HasDFP = val&_HWCAP_S390_DFP != 0\n\t\tcpu.S390X.HasETF3EH = val&_HWCAP_S390_ETF3EH != 0\n\t\tcpu.S390X.HasMSA = val&_HWCAP_S390_MSA != 0\n\t\tcpu.S390X.HasVX = val&_HWCAP_S390_VX != 0\n\t\tcpu.S390X.HasVXE = val&_HWCAP_S390_VXE != 0\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ UVa 423 - MPI Maelstrom\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"strconv\"\n)\n\nvar n int\n\nfunc min(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc floydWarshall(matrix [][]int) {\n\tfor k := 0; k < n; k++ {\n\t\tfor i := 0; i < n; i++ {\n\t\t\tfor j := 0; j < n; j++ {\n\t\t\t\tmatrix[i][j] = min(matrix[i][j], matrix[i][k]+matrix[k][j])\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc main() {\n\tin, _ := os.Open(\"423.in\")\n\tdefer in.Close()\n\tout, _ := os.Create(\"423.out\")\n\tdefer out.Close()\n\n\tfmt.Fscanf(in, \"%d\", &n)\n\tmatrix := make([][]int, n)\n\tfor i := range matrix {\n\t\tmatrix[i] = make([]int, n)\n\t}\n\tvar token string\n\tfor i := 1; i < n; i++ {\n\t\tfor j := 0; j < i; j++ {\n\t\t\tfmt.Fscanf(in, \"%s\", &token)\n\t\t\tif token == \"x\" {\n\t\t\t\tmatrix[i][j] = math.MaxInt32\n\t\t\t} else {\n\t\t\t\tmatrix[i][j], _ = strconv.Atoi(token)\n\t\t\t}\n\t\t\tmatrix[j][i] = matrix[i][j]\n\t\t}\n\t}\n\tfloydWarshall(matrix)\n\tvar max int\n\tfor i := 1; i < n; i++ {\n\t\tif matrix[0][i] > max {\n\t\t\tmax = matrix[0][i]\n\t\t}\n\t}\n\tfmt.Fprintln(out, max)\n}\n<commit_msg>clean up<commit_after>\/\/ UVa 423 - MPI Maelstrom\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"strconv\"\n)\n\nvar n int\n\nfunc floydWarshall(matrix [][]int) {\n\tfor k := 0; k < n; k++ {\n\t\tfor i := 0; i < n; i++ {\n\t\t\tfor j := 0; j < n; j++ {\n\t\t\t\tif matrix[i][k]+matrix[k][j] < matrix[i][j] {\n\t\t\t\t\tmatrix[i][j] = matrix[i][k] + matrix[k][j]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc main() {\n\tin, _ := os.Open(\"423.in\")\n\tdefer in.Close()\n\tout, _ := os.Create(\"423.out\")\n\tdefer out.Close()\n\n\tfmt.Fscanf(in, \"%d\", &n)\n\tmatrix := make([][]int, n)\n\tfor i := range matrix {\n\t\tmatrix[i] = make([]int, n)\n\t}\n\tvar token string\n\tfor i := 1; i < n; i++ {\n\t\tfor j := 0; j < i; j++ {\n\t\t\tfmt.Fscanf(in, \"%s\", &token)\n\t\t\tif token == \"x\" {\n\t\t\t\tmatrix[i][j] = math.MaxInt32\n\t\t\t} else {\n\t\t\t\tmatrix[i][j], _ = strconv.Atoi(token)\n\t\t\t}\n\t\t\tmatrix[j][i] = matrix[i][j]\n\t\t}\n\t}\n\tfloydWarshall(matrix)\n\tvar max int\n\tfor i := 1; i < n; i++ {\n\t\tif matrix[0][i] > max {\n\t\t\tmax = matrix[0][i]\n\t\t}\n\t}\n\tfmt.Fprintln(out, max)\n}\n<|endoftext|>"} {"text":"<commit_before>package models\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/laincloud\/hagrid\/config\"\n\t\"github.com\/mijia\/sweb\/log\"\n)\n\ntype Alert struct {\n\tID int `gorm:\"primary_key\"`\n\tName string `gorm:\"type:varchar(64);not null;unique\"`\n\tEnabled bool `gorm:\"not null\"`\n\tServices []Service `gorm:\"ForeignKey:AlertID\"`\n\tTemplates []Template `gorm:\"ForeignKey:AlertID\"`\n\tNotifiers []User `gorm:\"many2many:alert_to_user_notify\"`\n\tAdmins []User `gorm:\"many2many:alert_to_user_admin\"`\n\n\tCreatedAt time.Time\n}\n\ntype Icinga2Apply struct {\n\tName string\n\tNotificationType string\n\tUsers string\n\tServiceName string\n\tInterval int\n}\n\ntype Icinga2Service struct {\n\tID string\n\tName string\n\tWarning string\n\tCritical string\n\tCheckAttempts int\n\tResendTime int\n\tMetricURL string\n\tMetricType string\n}\n\nvar metricType = map[string]string{\n\tGreater: \"greater\",\n\tLess: \"less\",\n\tNotEqual: \"notequal\",\n\tEqual: \"equal\",\n}\n\nfunc (s *Icinga2Service) generateApplies(notifiersStr string) []Icinga2Apply {\n\tvar icinga2Applies []Icinga2Apply\n\tfor _, notificationType := range config.GetIcinga2NotificationTypes() {\n\t\tnewNotification := Icinga2Apply{\n\t\t\tName: fmt.Sprintf(\"%s[%s]\", s.Name, notificationType),\n\t\t\tNotificationType: notificationType,\n\t\t\tUsers: notifiersStr,\n\t\t\tServiceName: s.Name,\n\t\t\tInterval: 60 * s.ResendTime, \/\/The metric of interval is second here\n\t\t}\n\t\ticinga2Applies = append(icinga2Applies, newNotification)\n\t}\n\treturn icinga2Applies\n}\n\nfunc (al *Alert) generateIcinga2Config() ([]Icinga2Apply, []Icinga2Service) {\n\tnotifiersList := make([]string, 0, len(al.Notifiers))\n\tfor _, notifier := range al.Notifiers {\n\t\tnotifiersList = append(notifiersList, notifier.Name)\n\t}\n\tnotifiersBytes, _ := json.Marshal(notifiersList)\n\tnotifiersStr := string(notifiersBytes)\n\tvar icinga2Applies []Icinga2Apply\n\tvar icinga2Services []Icinga2Service\n\tfor _, service := range al.Services {\n\t\tif service.Enabled {\n\t\t\tif !strings.ContainsRune(service.Metric, '$') {\n\t\t\t\t\/\/ This service is not using template\n\t\t\t\tnewService := Icinga2Service{\n\t\t\t\t\tID: strconv.Itoa(service.ID),\n\t\t\t\t\tName: fmt.Sprintf(\"%s-%s\", al.Name, service.Name),\n\t\t\t\t\tWarning: service.Warning,\n\t\t\t\t\tCritical: service.Critical,\n\t\t\t\t\tCheckAttempts: service.CheckAttempts,\n\t\t\t\t\tResendTime: service.ResendTime,\n\t\t\t\t\tMetricURL: fmt.Sprintf(\"%s\/render?target=%s\", config.GetSource(), service.Metric),\n\t\t\t\t\tMetricType: metricType[service.CheckType],\n\t\t\t\t}\n\t\t\t\ticinga2Services = append(icinga2Services, newService)\n\t\t\t\ticinga2Applies = append(icinga2Applies, newService.generateApplies(notifiersStr)...)\n\n\t\t\t} else {\n\t\t\t\t\/\/ This service is using template\n\t\t\t\tfor _, tmpl := range al.Templates {\n\t\t\t\t\treplacedStr := \"$\" + strings.TrimSpace(tmpl.Name)\n\t\t\t\t\tif strings.Contains(service.Metric, replacedStr) {\n\t\t\t\t\t\tfor _, value := range strings.Split(tmpl.Values, \",\") {\n\t\t\t\t\t\t\ttrimedValue := strings.TrimSpace(value)\n\t\t\t\t\t\t\tif trimedValue != \"\" {\n\t\t\t\t\t\t\t\trealMetric := strings.Replace(service.Metric, replacedStr, trimedValue, -1)\n\t\t\t\t\t\t\t\tnewService := Icinga2Service{\n\t\t\t\t\t\t\t\t\tID: fmt.Sprintf(\"%d-%d-%s\", service.ID, tmpl.ID, trimedValue),\n\t\t\t\t\t\t\t\t\tName: fmt.Sprintf(\"%s-%s[%s]\", al.Name, service.Name, trimedValue),\n\t\t\t\t\t\t\t\t\tWarning: service.Warning,\n\t\t\t\t\t\t\t\t\tCritical: service.Critical,\n\t\t\t\t\t\t\t\t\tCheckAttempts: service.CheckAttempts,\n\t\t\t\t\t\t\t\t\tResendTime: service.ResendTime,\n\t\t\t\t\t\t\t\t\tMetricURL: fmt.Sprintf(\"%s\/render?target=%s\", config.GetSource(), realMetric),\n\t\t\t\t\t\t\t\t\tMetricType: metricType[service.CheckType],\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\ticinga2Services = append(icinga2Services, newService)\n\t\t\t\t\t\t\t\ticinga2Applies = append(icinga2Applies, newService.generateApplies(notifiersStr)...)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn icinga2Applies, icinga2Services\n}\n\nfunc SaveAlert(alert *Alert) error {\n\tif err := db.Save(alert).Error; err != nil {\n\t\tlog.Errorf(\"Saving alert failed: %s\", err.Error())\n\t\treturn fmt.Errorf(\"Saving alert failed. Ask admin for help\")\n\t}\n\treturn nil\n}\n\nfunc DeleteAlert(id int) error {\n\tif err := db.Delete(Alert{}, \"id = ?\", id).Error; err != nil {\n\t\tlog.Errorf(\"Deleting alert failed: %s\", err.Error())\n\t\treturn fmt.Errorf(\"Deleting alert failed. Ask admin for help\")\n\t}\n\treturn nil\n}\n\nfunc GetAlert(alert *Alert, id int) error {\n\tif err := db.First(alert, id).Error; err != nil {\n\t\tlog.Errorf(\"Getting alert %d failed: %s\", id, err.Error())\n\t\treturn fmt.Errorf(\"Getting alert failed. Ask admin for help\")\n\t}\n\treturn nil\n}\n\nfunc GetAllAlerts(alerts *[]Alert) error {\n\tif err := db.Find(alerts).Error; err != nil {\n\t\tlog.Errorf(\"Getting all alerts failed: %s\", err.Error())\n\t\treturn fmt.Errorf(\"Getting all alerts failed. Ask admin for help\")\n\t}\n\treturn nil\n}\n\nfunc IsAlertDuplicated(name string) bool {\n\tvar count int\n\terr := db.Model(&Alert{}).Where(\"name = ?\", name).Count(&count).Error\n\treturn count != 0 || err != nil\n}\n\nfunc GetDetailedAlert(alert *Alert, id int) error {\n\tif err := GetAlert(alert, id); err != nil {\n\t\treturn err\n\t}\n\tif err := db.Model(alert).Association(\"Services\").Find(&(alert.Services)).Error; err != nil {\n\t\tlog.Errorf(\"Getting associated services of %d failed: %s\", id, err.Error())\n\t\treturn fmt.Errorf(\"Getting associated services failed. Ask admin for help\")\n\t}\n\tif err := db.Model(alert).Association(\"Templates\").Find(&(alert.Templates)).Error; err != nil {\n\t\tlog.Errorf(\"Getting associated templates of %d failed: %s\", id, err.Error())\n\t\treturn fmt.Errorf(\"Getting associated templates failed. Ask admin for help\")\n\t}\n\tif err := db.Model(alert).Association(\"Notifiers\").Find(&(alert.Notifiers)).Error; err != nil {\n\t\tlog.Errorf(\"Getting associated notifiers of %d failed: %s\", id, err.Error())\n\t\treturn fmt.Errorf(\"Getting notifiers services failed. Ask admin for help\")\n\t}\n\tif err := db.Model(alert).Association(\"Admins\").Find(&(alert.Admins)).Error; err != nil {\n\t\tlog.Errorf(\"Getting associated admins of %d failed: %s\", id, err.Error())\n\t\treturn fmt.Errorf(\"Getting associated admins failed. Ask admin for help\")\n\t}\n\treturn nil\n}\n\nfunc SynchronizeAlert(id int) error {\n\tsyncLock.Lock()\n\tdefer syncLock.Unlock()\n\tvar (\n\t\tappliesFile string\n\t\tservicesFile string\n\t\terr error\n\t\tnewStage string\n\t)\n\talert := &Alert{}\n\tif err = GetDetailedAlert(alert, id); err != nil {\n\t\treturn err\n\t}\n\tpkgName := alertPackagePrefix + strconv.Itoa(id)\n\t\/\/ If the alert is not enabled, nothing will sync to icinga2, we only need to remove the package\n\tif !alert.Enabled || config.GetSource() == \"\" {\n\t\ticinga2Client.DeletePackage(pkgName)\n\t\treturn nil\n\t}\n\ticinga2Client.CreatePackage(pkgName)\n\ticinga2Applies, icinga2Services := alert.generateIcinga2Config()\n\tappliesFile, err = renderTemplate(\"applies.tmpl\", map[string][]Icinga2Apply{\"Applies\": icinga2Applies})\n\tif err != nil {\n\t\tlog.Errorf(\"Rendering template applies.tmpl failed: %s\", err.Error())\n\t\treturn fmt.Errorf(\"Rendering template failed. Ask admin for help\")\n\t}\n\tservicesFile, err = renderTemplate(\"services.tmpl\", map[string][]Icinga2Service{\"Services\": icinga2Services})\n\tif err != nil {\n\t\tlog.Errorf(\"Rendering template services.tmpl failed: %s\", err.Error())\n\t\treturn fmt.Errorf(\"Rendering template failed. Ask admin for help\")\n\t}\n\n\tfiles := make(map[string]string)\n\tfiles[\"conf.d\/applies.conf\"] = appliesFile\n\tfiles[\"conf.d\/services.conf\"] = servicesFile\n\tif newStage, err = icinga2Client.UploadFiles(pkgName, files); err != nil {\n\t\tlog.Errorf(\"Uploading alert config files failed: %s\", err.Error())\n\t\treturn fmt.Errorf(\"Uploading alert config files failed. Ask admin for help\")\n\t}\n\n\t\/\/ Clear old packages\n\tpkg, _ := icinga2Client.GetPackage(pkgName)\n\tif pkg != nil {\n\t\tfor _, stage := range pkg.Stages {\n\t\t\tif stage != newStage && stage != pkg.ActiveStage {\n\t\t\t\ticinga2Client.DeleteStage(pkgName, stage)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<commit_msg>Fix comparing template members of metric<commit_after>package models\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/laincloud\/hagrid\/config\"\n\t\"github.com\/mijia\/sweb\/log\"\n)\n\ntype Alert struct {\n\tID int `gorm:\"primary_key\"`\n\tName string `gorm:\"type:varchar(64);not null;unique\"`\n\tEnabled bool `gorm:\"not null\"`\n\tServices []Service `gorm:\"ForeignKey:AlertID\"`\n\tTemplates []Template `gorm:\"ForeignKey:AlertID\"`\n\tNotifiers []User `gorm:\"many2many:alert_to_user_notify\"`\n\tAdmins []User `gorm:\"many2many:alert_to_user_admin\"`\n\n\tCreatedAt time.Time\n}\n\ntype Icinga2Apply struct {\n\tName string\n\tNotificationType string\n\tUsers string\n\tServiceName string\n\tInterval int\n}\n\ntype Icinga2Service struct {\n\tID string\n\tName string\n\tWarning string\n\tCritical string\n\tCheckAttempts int\n\tResendTime int\n\tMetricURL string\n\tMetricType string\n}\n\nvar metricType = map[string]string{\n\tGreater: \"greater\",\n\tLess: \"less\",\n\tNotEqual: \"notequal\",\n\tEqual: \"equal\",\n}\n\nfunc (s *Icinga2Service) generateApplies(notifiersStr string) []Icinga2Apply {\n\tvar icinga2Applies []Icinga2Apply\n\tfor _, notificationType := range config.GetIcinga2NotificationTypes() {\n\t\tnewNotification := Icinga2Apply{\n\t\t\tName: fmt.Sprintf(\"%s[%s]\", s.Name, notificationType),\n\t\t\tNotificationType: notificationType,\n\t\t\tUsers: notifiersStr,\n\t\t\tServiceName: s.Name,\n\t\t\tInterval: 60 * s.ResendTime, \/\/The metric of interval is second here\n\t\t}\n\t\ticinga2Applies = append(icinga2Applies, newNotification)\n\t}\n\treturn icinga2Applies\n}\n\nfunc (al *Alert) generateIcinga2Config() ([]Icinga2Apply, []Icinga2Service) {\n\tnotifiersList := make([]string, 0, len(al.Notifiers))\n\tfor _, notifier := range al.Notifiers {\n\t\tnotifiersList = append(notifiersList, notifier.Name)\n\t}\n\tnotifiersBytes, _ := json.Marshal(notifiersList)\n\tnotifiersStr := string(notifiersBytes)\n\tvar icinga2Applies []Icinga2Apply\n\tvar icinga2Services []Icinga2Service\n\tfor _, service := range al.Services {\n\t\tif service.Enabled {\n\t\t\tif !strings.ContainsRune(service.Metric, '$') {\n\t\t\t\t\/\/ This service is not using template\n\t\t\t\tnewService := Icinga2Service{\n\t\t\t\t\tID: strconv.Itoa(service.ID),\n\t\t\t\t\tName: fmt.Sprintf(\"%s-%s\", al.Name, service.Name),\n\t\t\t\t\tWarning: service.Warning,\n\t\t\t\t\tCritical: service.Critical,\n\t\t\t\t\tCheckAttempts: service.CheckAttempts,\n\t\t\t\t\tResendTime: service.ResendTime,\n\t\t\t\t\tMetricURL: fmt.Sprintf(\"%s\/render?target=%s\", config.GetSource(), service.Metric),\n\t\t\t\t\tMetricType: metricType[service.CheckType],\n\t\t\t\t}\n\t\t\t\ticinga2Services = append(icinga2Services, newService)\n\t\t\t\ticinga2Applies = append(icinga2Applies, newService.generateApplies(notifiersStr)...)\n\n\t\t\t} else {\n\t\t\t\t\/\/ This service is using template\n\t\t\t\tfor _, tmpl := range al.Templates {\n\t\t\t\t\treplacedStr := \"$\" + strings.TrimSpace(tmpl.Name)\n\t\t\t\t\tif strings.Contains(service.Metric, replacedStr + \".\") || strings.HasSuffix(service.Metric, replacedStr) {\n\t\t\t\t\t\tfor _, value := range strings.Split(tmpl.Values, \",\") {\n\t\t\t\t\t\t\ttrimedValue := strings.TrimSpace(value)\n\t\t\t\t\t\t\tif trimedValue != \"\" {\n\t\t\t\t\t\t\t\trealMetric := strings.Replace(service.Metric, replacedStr, trimedValue, -1)\n\t\t\t\t\t\t\t\tnewService := Icinga2Service{\n\t\t\t\t\t\t\t\t\tID: fmt.Sprintf(\"%d-%d-%s\", service.ID, tmpl.ID, trimedValue),\n\t\t\t\t\t\t\t\t\tName: fmt.Sprintf(\"%s-%s[%s]\", al.Name, service.Name, trimedValue),\n\t\t\t\t\t\t\t\t\tWarning: service.Warning,\n\t\t\t\t\t\t\t\t\tCritical: service.Critical,\n\t\t\t\t\t\t\t\t\tCheckAttempts: service.CheckAttempts,\n\t\t\t\t\t\t\t\t\tResendTime: service.ResendTime,\n\t\t\t\t\t\t\t\t\tMetricURL: fmt.Sprintf(\"%s\/render?target=%s\", config.GetSource(), realMetric),\n\t\t\t\t\t\t\t\t\tMetricType: metricType[service.CheckType],\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\ticinga2Services = append(icinga2Services, newService)\n\t\t\t\t\t\t\t\ticinga2Applies = append(icinga2Applies, newService.generateApplies(notifiersStr)...)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn icinga2Applies, icinga2Services\n}\n\nfunc SaveAlert(alert *Alert) error {\n\tif err := db.Save(alert).Error; err != nil {\n\t\tlog.Errorf(\"Saving alert failed: %s\", err.Error())\n\t\treturn fmt.Errorf(\"Saving alert failed. Ask admin for help\")\n\t}\n\treturn nil\n}\n\nfunc DeleteAlert(id int) error {\n\tif err := db.Delete(Alert{}, \"id = ?\", id).Error; err != nil {\n\t\tlog.Errorf(\"Deleting alert failed: %s\", err.Error())\n\t\treturn fmt.Errorf(\"Deleting alert failed. Ask admin for help\")\n\t}\n\treturn nil\n}\n\nfunc GetAlert(alert *Alert, id int) error {\n\tif err := db.First(alert, id).Error; err != nil {\n\t\tlog.Errorf(\"Getting alert %d failed: %s\", id, err.Error())\n\t\treturn fmt.Errorf(\"Getting alert failed. Ask admin for help\")\n\t}\n\treturn nil\n}\n\nfunc GetAllAlerts(alerts *[]Alert) error {\n\tif err := db.Find(alerts).Error; err != nil {\n\t\tlog.Errorf(\"Getting all alerts failed: %s\", err.Error())\n\t\treturn fmt.Errorf(\"Getting all alerts failed. Ask admin for help\")\n\t}\n\treturn nil\n}\n\nfunc IsAlertDuplicated(name string) bool {\n\tvar count int\n\terr := db.Model(&Alert{}).Where(\"name = ?\", name).Count(&count).Error\n\treturn count != 0 || err != nil\n}\n\nfunc GetDetailedAlert(alert *Alert, id int) error {\n\tif err := GetAlert(alert, id); err != nil {\n\t\treturn err\n\t}\n\tif err := db.Model(alert).Association(\"Services\").Find(&(alert.Services)).Error; err != nil {\n\t\tlog.Errorf(\"Getting associated services of %d failed: %s\", id, err.Error())\n\t\treturn fmt.Errorf(\"Getting associated services failed. Ask admin for help\")\n\t}\n\tif err := db.Model(alert).Association(\"Templates\").Find(&(alert.Templates)).Error; err != nil {\n\t\tlog.Errorf(\"Getting associated templates of %d failed: %s\", id, err.Error())\n\t\treturn fmt.Errorf(\"Getting associated templates failed. Ask admin for help\")\n\t}\n\tif err := db.Model(alert).Association(\"Notifiers\").Find(&(alert.Notifiers)).Error; err != nil {\n\t\tlog.Errorf(\"Getting associated notifiers of %d failed: %s\", id, err.Error())\n\t\treturn fmt.Errorf(\"Getting notifiers services failed. Ask admin for help\")\n\t}\n\tif err := db.Model(alert).Association(\"Admins\").Find(&(alert.Admins)).Error; err != nil {\n\t\tlog.Errorf(\"Getting associated admins of %d failed: %s\", id, err.Error())\n\t\treturn fmt.Errorf(\"Getting associated admins failed. Ask admin for help\")\n\t}\n\treturn nil\n}\n\nfunc SynchronizeAlert(id int) error {\n\tsyncLock.Lock()\n\tdefer syncLock.Unlock()\n\tvar (\n\t\tappliesFile string\n\t\tservicesFile string\n\t\terr error\n\t\tnewStage string\n\t)\n\talert := &Alert{}\n\tif err = GetDetailedAlert(alert, id); err != nil {\n\t\treturn err\n\t}\n\tpkgName := alertPackagePrefix + strconv.Itoa(id)\n\t\/\/ If the alert is not enabled, nothing will sync to icinga2, we only need to remove the package\n\tif !alert.Enabled || config.GetSource() == \"\" {\n\t\ticinga2Client.DeletePackage(pkgName)\n\t\treturn nil\n\t}\n\ticinga2Client.CreatePackage(pkgName)\n\ticinga2Applies, icinga2Services := alert.generateIcinga2Config()\n\tappliesFile, err = renderTemplate(\"applies.tmpl\", map[string][]Icinga2Apply{\"Applies\": icinga2Applies})\n\tif err != nil {\n\t\tlog.Errorf(\"Rendering template applies.tmpl failed: %s\", err.Error())\n\t\treturn fmt.Errorf(\"Rendering template failed. Ask admin for help\")\n\t}\n\tservicesFile, err = renderTemplate(\"services.tmpl\", map[string][]Icinga2Service{\"Services\": icinga2Services})\n\tif err != nil {\n\t\tlog.Errorf(\"Rendering template services.tmpl failed: %s\", err.Error())\n\t\treturn fmt.Errorf(\"Rendering template failed. Ask admin for help\")\n\t}\n\n\tfiles := make(map[string]string)\n\tfiles[\"conf.d\/applies.conf\"] = appliesFile\n\tfiles[\"conf.d\/services.conf\"] = servicesFile\n\tif newStage, err = icinga2Client.UploadFiles(pkgName, files); err != nil {\n\t\tlog.Errorf(\"Uploading alert config files failed: %s\", err.Error())\n\t\treturn fmt.Errorf(\"Uploading alert config files failed. Ask admin for help\")\n\t}\n\n\t\/\/ Clear old packages\n\tpkg, _ := icinga2Client.GetPackage(pkgName)\n\tif pkg != nil {\n\t\tfor _, stage := range pkg.Stages {\n\t\t\tif stage != newStage && stage != pkg.ActiveStage {\n\t\t\t\ticinga2Client.DeleteStage(pkgName, stage)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package brain\n\nimport (\n\t\"github.com\/BytemarkHosting\/bytemark-client\/lib\/output\"\n)\n\n\/\/ TODO (tom): Add pretty print & test file for this function\n\/\/ VMDefaultSpec represents a VM Default specification.\ntype VMDefaultSpec struct {\n\tVMDefault VMDefault `json:\"vm_default,omitempty\"`\n\tDiscs []Disc `json:\"disc,omitempty\"`\n\tReimage *ImageInstall `json:\"reimage,omitempty\"` \/\/ may want to be null, so is a pointer\n}\n\n\/\/ DefaultFields returns the list of default fields to feed to github.com\/BytemarkHosting\/row.From for this type.\nfunc (spec VMDefaultSpec) DefaultFields(f output.Format) string {\n\tswitch f {\n\tcase output.List:\n\t\treturn \"VirtualMachine, Discs, Reimage\"\n\t}\n\treturn \"VirtualMachine, Discs, Reimage\"\n}<commit_msg>added pretty print function for the vm default specification<commit_after>package brain\n\nimport (\n\t\"io\"\n\n\t\"github.com\/BytemarkHosting\/bytemark-client\/lib\/output\"\n\t\"github.com\/BytemarkHosting\/bytemark-client\/lib\/output\/prettyprint\"\n)\n\n\/\/ TODO(tom): Add pretty print & test file for this function\n\/\/ VMDefaultSpec represents a VM Default specification.\ntype VMDefaultSpec struct {\n\tVMDefault VMDefault `json:\"vm_default,omitempty\"`\n\tDiscs []Disc `json:\"disc,omitempty\"`\n\tReimage *ImageInstall `json:\"reimage,omitempty\"` \/\/ may want to be null, so is a pointer\n}\n\n\/\/ DefaultFields returns the list of default fields to feed to github.com\/BytemarkHosting\/row.From for this type.\nfunc (spec VMDefaultSpec) DefaultFields(f output.Format) string {\n\tswitch f {\n\tcase output.List:\n\t\treturn \"VirtualMachine, Discs, Reimage\"\n\t}\n\treturn \"VirtualMachine, Discs, Reimage\"\n}\n\n\/\/ TODO(tom): add test file for this\n\/\/ TODO(tom): add backup schedules to prettyprint\n\n\/\/ PrettyPrint outputs a nice human-readable overview of the VM Default Specification to the given writer.\nfunc (spec VMDefaultSpec) PrettyPrint(wr io.Writer, detail prettyprint.DetailLevel) error {\n\tconst template = `{{ define \"vmdspec_sgl\" }} ▸ {{.VMDefault.Name }} in {{capitalize .VMDefault.ZoneName}}{{ end }}\n{{ define \"vmdspec_vmd\" }} {{ pluralize \"core\" \"cores\" .VMDefault.Cores }}, {{ mibgib .VMDefault.Memory }}, {{pluralize \"disc\" \"discs\" }}{{ else }}no discs{{ end }}{{ end }}\n{{ define \"vmdspec_reimage\" }} {{ .Reimage.Distribution }}{{ end }}\n\n{{ define \"vmdspec_discs\" }}\n{{- if .Discs }} discs:\n{{- range .VMDefault.Discs }}\n • {{ prettysprint . \"_sgl\" }}\n{{- end }}\n{{ end -}}\n{{ end }}\n\n{{ define \"vmdspec_medium\" }}{{ template \"vmdspec_sgl\" . }}\n{{ template \"vmdspec_vmd\" . }}{{ end }}\n\n{{ define \"vmdspec_full\" -}}\n{{ template \"vmdspec_medium\" . }}\n\n{{ template \"vmdspec_vmd\" . }}\n{{ template \"vmdspec_discs\" . -}}\n{{ template \"vmdspec_reimage\" . }}\n{{ end }}`\n\treturn prettyprint.Run(wr, template, \"vmdspec\"+string(detail), spec)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright © 2013-2017 Pierre Neidhardt <ambrevar@gmail.com>\n\/\/ Use of this file is governed by the license that can be found in LICENSE.\n\npackage main\n\nimport (\n\t\"io\/ioutil\"\n\t\"testing\"\n\n\t\"github.com\/ambrevar\/demlo\/cuesheet\"\n)\n\nconst (\n\tsampleCuesheet = \"cuesheet\/testdata\/sample.cue\"\n\tscriptCase = \"scripts\/30-case.lua\"\n\tscriptPunctuation = \"scripts\/40-punctuation.lua\"\n)\n\nfunc TestFixPunctuation(t *testing.T) {\n\tinput := inputInfo{}\n\toutput := outputInfo{\n\t\tTags: map[string]string{\n\t\t\t\"a b\": \"a_b\",\n\t\t\t\".a\": \".a\",\n\t\t\t\"a (\": \"a(\",\n\t\t\t\"(a\": \"( a\",\n\t\t\t\"a c\": \"a \tc\",\n\t\t\t\"a\": \"\t a \t\",\n\t\t\t\"Some i.n.i.t.i.a.l.s.\": \"Some i.n.i.t.i.a.l.s.\",\n\t\t},\n\t}\n\n\tbuf, err := ioutil.ReadFile(scriptPunctuation)\n\tif err != nil {\n\t\tt.Fatal(\"Script is not readable\", err)\n\t}\n\n\t\/\/ Compile scripts.\n\tL, err := MakeSandbox(nil)\n\tSandboxCompileScript(L, \"punctuation\", string(buf))\n\tif err != nil {\n\t\tt.Fatal(\"Spurious sandbox\", err)\n\t}\n\tdefer L.Close()\n\n\terr = RunScript(L, \"punctuation\", &input, &output)\n\tif err != nil {\n\t\tt.Fatalf(\"script punctuation: %s\", err)\n\t}\n\n\tfor want, got := range output.Tags {\n\t\tif got != want {\n\t\t\tt.Errorf(`Got \"%v\", want \"%v\"`, got, want)\n\t\t}\n\t}\n}\n\nfunc TestTitleCase(t *testing.T) {\n\tinput := inputInfo{}\n\toutput := outputInfo{\n\t\tTags: map[string]string{\n\t\t\t\"All Lowercase Words\": \"all lowercase words\",\n\t\t\t\"All Uppercase Words\": \"ALL UPPERCASE WORDS\",\n\t\t\t\"All Crazy Case Words\": \"aLl cRaZY cASE WordS\",\n\t\t\t\"With Common Preps in a CD Into the Box.\": \"With common preps in a cd INTO the box.\",\n\t\t\t\"Feat and feat. The Machines.\": \"Feat and Feat. the machines.\",\n\t\t\t\"Unicode Apos´trophe\": \"unicode apos´trophe\",\n\t\t\t\"...\": \"...\",\n\t\t\t\".'?\": \".'?\",\n\t\t\t\"I'll Be Ill'\": \"i'll be ill'\",\n\t\t\t\"Names Like O'Hara, D’Arcy\": \"Names like o'hara, d’arcy\",\n\t\t\t\"Names Like McDonald and MacNeil\": \"Names like mcdonald and macneil\",\n\t\t\t\"Éléanor\": \"élÉanor\",\n\t\t\t\"XIV LIV Xiv Liv. Liv. Xiv.\": \"XIV LIV xiv liv. liv. xiv.\",\n\t\t\t\"A Start With a Lowercase Constant\": \"a start with a lowercase constant\",\n\t\t\t`\"A Double Quoted Sentence\" and 'One Single Quoted'.`: `\"a double quoted sentence\" and 'one single quoted'.`,\n\t\t\t`Another \"Double Quoted Sentence\", and \"A Sentence More\".`: `another \"double quoted sentence\", and \"a sentence more\".`,\n\t\t\t\"Some I.N.I.T.I.A.L.S.\": \"Some i.n.i.t.i.a.l.s.\",\n\t\t},\n\t}\n\n\tbuf, err := ioutil.ReadFile(scriptCase)\n\tif err != nil {\n\t\tt.Fatal(\"Script is not readable\", err)\n\t}\n\n\t\/\/ Compile scripts.\n\tL, err := MakeSandbox(nil)\n\tSandboxCompileScript(L, \"case\", string(buf))\n\tif err != nil {\n\t\tt.Fatal(\"Spurious sandbox\", err)\n\t}\n\tdefer L.Close()\n\n\terr = RunScript(L, \"case\", &input, &output)\n\tif err != nil {\n\t\tt.Fatalf(\"script case: %s\", err)\n\t}\n\n\tfor want, got := range output.Tags {\n\t\tif got != want {\n\t\t\tt.Errorf(`Got \"%v\", want \"%v\"`, got, want)\n\t\t}\n\t}\n}\n\nfunc TestSentenceCase(t *testing.T) {\n\tinput := inputInfo{}\n\toutput := outputInfo{\n\t\tTags: map[string]string{\n\t\t\t\"Capitalized words\": \"capitalized words\",\n\t\t\t\"Machine\": \"machine\",\n\t\t\t\"Rise of the machines\": \"Rise Of The Machines\",\n\t\t\t\"Chanson d'avant\": \"Chanson D'Avant\",\n\t\t\t\"Names like o'hara, d’arcy\": \"Names LIKE O'HARA, D’ARCY\",\n\t\t\t\"Names like McDonald and MacNeil\": \"Names LIKE MCDONALD AND MACNEIL\",\n\t\t\t\"XIV LIV xiv liv. Liv. Xiv.\": \"XIV LIV xiv liv. liv. xiv.\",\n\t\t},\n\t}\n\n\tbuf, err := ioutil.ReadFile(scriptCase)\n\tif err != nil {\n\t\tt.Fatal(\"Script is not readable\", err)\n\t}\n\n\t\/\/ Compile scripts.\n\tL, err := MakeSandbox(nil)\n\tSandboxCompileScript(L, \"case\", string(buf))\n\tif err != nil {\n\t\tt.Fatal(\"Spurious sandbox\", err)\n\t}\n\tdefer L.Close()\n\n\t\/\/ Set setencecase.\n\tL.PushBoolean(true)\n\tL.SetGlobal(\"scase\")\n\n\terr = RunScript(L, \"case\", &input, &output)\n\tif err != nil {\n\t\tt.Fatalf(\"script case: %s\", err)\n\t}\n\n\tfor want, got := range output.Tags {\n\t\tif got != want {\n\t\t\tt.Errorf(`Got \"%v\", want \"%v\"`, got, want)\n\t\t}\n\t}\n}\n\nfunc TestStringNorm(t *testing.T) {\n\twant := []struct {\n\t\ts string\n\t\tnorm string\n\t}{\n\t\t{s: \"A\", norm: \"a\"},\n\t\t{s: \"0a\", norm: \"a\"},\n\t\t{s: \"00a\", norm: \"a\"},\n\t\t{s: \"a0\", norm: \"a0\"},\n\t\t{s: \"a.0\", norm: \"a\"},\n\t\t{s: \"a0a\", norm: \"a0a\"},\n\t\t{s: \"a.0a\", norm: \"aa\"},\n\t\t{s: \"10\", norm: \"10\"},\n\t\t{s: \"01\", norm: \"1\"},\n\t\t{s: \".a\", norm: \"a\"},\n\t\t{s: \"..a\", norm: \"a\"},\n\t}\n\n\tfor _, v := range want {\n\t\tn := stringNorm(v.s)\n\t\tif n != v.norm {\n\t\t\tt.Errorf(`Got \"%v\", want norm(\"%v\")==\"%v\"`, n, v.s, v.norm)\n\t\t}\n\t}\n}\n\nfunc TestStringRel(t *testing.T) {\n\twant := []struct {\n\t\ta string\n\t\tb string\n\t\trel float64\n\t}{\n\t\t{a: \"foo\", b: \"bar\", rel: 0.0},\n\t\t{a: \"foo\", b: \"foo\", rel: 1.0},\n\t\t{a: \"foobar\", b: \"foobaz\", rel: 1 - float64(1)\/float64(6)},\n\t\t{a: \"\", b: \"b\", rel: 0.0},\n\t\t{a: \"a\", b: \"\", rel: 0.0},\n\t\t{a: \"\", b: \"\", rel: 1.0},\n\t\t{a: \"ab\", b: \"ba\", rel: 0.5},\n\t\t{a: \"abba\", b: \"aba\", rel: 0.75},\n\t\t{a: \"aba\", b: \"abba\", rel: 0.75},\n\t\t{a: \"résumé\", b: \"resume\", rel: 1 - float64(2)\/float64(6)},\n\t}\n\n\tfor _, v := range want {\n\t\tr := stringRel(v.a, v.b)\n\t\tif r != v.rel {\n\t\t\tt.Errorf(`Got %v, want rel(\"%v\", \"%v\")==%v`, r, v.a, v.b, v.rel)\n\t\t}\n\t}\n}\n\nfunc TestFFmpegSplitTimes(t *testing.T) {\n\t\/\/ We need to make up last track's duration: 3 minutes.\n\ttotaltime := float64(17*60 + 4 + 3*60)\n\n\twant := []struct {\n\t\ttrack int\n\t\tstart string\n\t\tduration string\n\t}{\n\t\t{track: 0, start: \"00:00:00.000\", duration: \"00:06:40.360\"},\n\t\t{track: 1, start: \"00:06:40.360\", duration: \"00:04:13.640\"},\n\t\t{track: 3, start: \"00:17:04.000\", duration: \"00:03:00.000\"},\n\t\t{track: 4, start: \"\", duration: \"\"},\n\t\t{track: 8, start: \"\", duration: \"\"},\n\t}\n\n\tbuf, err := ioutil.ReadFile(sampleCuesheet)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tsheet, err := cuesheet.New(buf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfor _, v := range want {\n\t\tstart, duration := ffmpegSplitTimes(sheet, \"Faithless - Live in Berlin (CD1).mp3\", v.track, totaltime)\n\t\tif start != v.start || duration != v.duration {\n\t\t\tt.Errorf(\"Got {start: %v, duration: %v}, want {start: %v, duration: %v}\", start, duration, v.start, v.duration)\n\t\t}\n\t}\n}\n<commit_msg>Fix MakeSandbox result count in tests<commit_after>\/\/ Copyright © 2013-2017 Pierre Neidhardt <ambrevar@gmail.com>\n\/\/ Use of this file is governed by the license that can be found in LICENSE.\n\npackage main\n\nimport (\n\t\"io\/ioutil\"\n\t\"testing\"\n\n\t\"github.com\/ambrevar\/demlo\/cuesheet\"\n)\n\nconst (\n\tsampleCuesheet = \"cuesheet\/testdata\/sample.cue\"\n\tscriptCase = \"scripts\/30-case.lua\"\n\tscriptPunctuation = \"scripts\/40-punctuation.lua\"\n)\n\nfunc TestFixPunctuation(t *testing.T) {\n\tinput := inputInfo{}\n\toutput := outputInfo{\n\t\tTags: map[string]string{\n\t\t\t\"a b\": \"a_b\",\n\t\t\t\".a\": \".a\",\n\t\t\t\"a (\": \"a(\",\n\t\t\t\"(a\": \"( a\",\n\t\t\t\"a c\": \"a \tc\",\n\t\t\t\"a\": \"\t a \t\",\n\t\t\t\"Some i.n.i.t.i.a.l.s.\": \"Some i.n.i.t.i.a.l.s.\",\n\t\t},\n\t}\n\n\tbuf, err := ioutil.ReadFile(scriptPunctuation)\n\tif err != nil {\n\t\tt.Fatal(\"Script is not readable\", err)\n\t}\n\n\t\/\/ Compile scripts.\n\tL := MakeSandbox(nil)\n\tSandboxCompileScript(L, \"punctuation\", string(buf))\n\tif err != nil {\n\t\tt.Fatal(\"Spurious sandbox\", err)\n\t}\n\tdefer L.Close()\n\n\terr = RunScript(L, \"punctuation\", &input, &output)\n\tif err != nil {\n\t\tt.Fatalf(\"script punctuation: %s\", err)\n\t}\n\n\tfor want, got := range output.Tags {\n\t\tif got != want {\n\t\t\tt.Errorf(`Got \"%v\", want \"%v\"`, got, want)\n\t\t}\n\t}\n}\n\nfunc TestTitleCase(t *testing.T) {\n\tinput := inputInfo{}\n\toutput := outputInfo{\n\t\tTags: map[string]string{\n\t\t\t\"All Lowercase Words\": \"all lowercase words\",\n\t\t\t\"All Uppercase Words\": \"ALL UPPERCASE WORDS\",\n\t\t\t\"All Crazy Case Words\": \"aLl cRaZY cASE WordS\",\n\t\t\t\"With Common Preps in a CD Into the Box.\": \"With common preps in a cd INTO the box.\",\n\t\t\t\"Feat and feat. The Machines.\": \"Feat and Feat. the machines.\",\n\t\t\t\"Unicode Apos´trophe\": \"unicode apos´trophe\",\n\t\t\t\"...\": \"...\",\n\t\t\t\".'?\": \".'?\",\n\t\t\t\"I'll Be Ill'\": \"i'll be ill'\",\n\t\t\t\"Names Like O'Hara, D’Arcy\": \"Names like o'hara, d’arcy\",\n\t\t\t\"Names Like McDonald and MacNeil\": \"Names like mcdonald and macneil\",\n\t\t\t\"Éléanor\": \"élÉanor\",\n\t\t\t\"XIV LIV Xiv Liv. Liv. Xiv.\": \"XIV LIV xiv liv. liv. xiv.\",\n\t\t\t\"A Start With a Lowercase Constant\": \"a start with a lowercase constant\",\n\t\t\t`\"A Double Quoted Sentence\" and 'One Single Quoted'.`: `\"a double quoted sentence\" and 'one single quoted'.`,\n\t\t\t`Another \"Double Quoted Sentence\", and \"A Sentence More\".`: `another \"double quoted sentence\", and \"a sentence more\".`,\n\t\t\t\"Some I.N.I.T.I.A.L.S.\": \"Some i.n.i.t.i.a.l.s.\",\n\t\t},\n\t}\n\n\tbuf, err := ioutil.ReadFile(scriptCase)\n\tif err != nil {\n\t\tt.Fatal(\"Script is not readable\", err)\n\t}\n\n\t\/\/ Compile scripts.\n\tL := MakeSandbox(nil)\n\tSandboxCompileScript(L, \"case\", string(buf))\n\tif err != nil {\n\t\tt.Fatal(\"Spurious sandbox\", err)\n\t}\n\tdefer L.Close()\n\n\terr = RunScript(L, \"case\", &input, &output)\n\tif err != nil {\n\t\tt.Fatalf(\"script case: %s\", err)\n\t}\n\n\tfor want, got := range output.Tags {\n\t\tif got != want {\n\t\t\tt.Errorf(`Got \"%v\", want \"%v\"`, got, want)\n\t\t}\n\t}\n}\n\nfunc TestSentenceCase(t *testing.T) {\n\tinput := inputInfo{}\n\toutput := outputInfo{\n\t\tTags: map[string]string{\n\t\t\t\"Capitalized words\": \"capitalized words\",\n\t\t\t\"Machine\": \"machine\",\n\t\t\t\"Rise of the machines\": \"Rise Of The Machines\",\n\t\t\t\"Chanson d'avant\": \"Chanson D'Avant\",\n\t\t\t\"Names like o'hara, d’arcy\": \"Names LIKE O'HARA, D’ARCY\",\n\t\t\t\"Names like McDonald and MacNeil\": \"Names LIKE MCDONALD AND MACNEIL\",\n\t\t\t\"XIV LIV xiv liv. Liv. Xiv.\": \"XIV LIV xiv liv. liv. xiv.\",\n\t\t},\n\t}\n\n\tbuf, err := ioutil.ReadFile(scriptCase)\n\tif err != nil {\n\t\tt.Fatal(\"Script is not readable\", err)\n\t}\n\n\t\/\/ Compile scripts.\n\tL := MakeSandbox(nil)\n\tSandboxCompileScript(L, \"case\", string(buf))\n\tif err != nil {\n\t\tt.Fatal(\"Spurious sandbox\", err)\n\t}\n\tdefer L.Close()\n\n\t\/\/ Set setencecase.\n\tL.PushBoolean(true)\n\tL.SetGlobal(\"scase\")\n\n\terr = RunScript(L, \"case\", &input, &output)\n\tif err != nil {\n\t\tt.Fatalf(\"script case: %s\", err)\n\t}\n\n\tfor want, got := range output.Tags {\n\t\tif got != want {\n\t\t\tt.Errorf(`Got \"%v\", want \"%v\"`, got, want)\n\t\t}\n\t}\n}\n\nfunc TestStringNorm(t *testing.T) {\n\twant := []struct {\n\t\ts string\n\t\tnorm string\n\t}{\n\t\t{s: \"A\", norm: \"a\"},\n\t\t{s: \"0a\", norm: \"a\"},\n\t\t{s: \"00a\", norm: \"a\"},\n\t\t{s: \"a0\", norm: \"a0\"},\n\t\t{s: \"a.0\", norm: \"a\"},\n\t\t{s: \"a0a\", norm: \"a0a\"},\n\t\t{s: \"a.0a\", norm: \"aa\"},\n\t\t{s: \"10\", norm: \"10\"},\n\t\t{s: \"01\", norm: \"1\"},\n\t\t{s: \".a\", norm: \"a\"},\n\t\t{s: \"..a\", norm: \"a\"},\n\t}\n\n\tfor _, v := range want {\n\t\tn := stringNorm(v.s)\n\t\tif n != v.norm {\n\t\t\tt.Errorf(`Got \"%v\", want norm(\"%v\")==\"%v\"`, n, v.s, v.norm)\n\t\t}\n\t}\n}\n\nfunc TestStringRel(t *testing.T) {\n\twant := []struct {\n\t\ta string\n\t\tb string\n\t\trel float64\n\t}{\n\t\t{a: \"foo\", b: \"bar\", rel: 0.0},\n\t\t{a: \"foo\", b: \"foo\", rel: 1.0},\n\t\t{a: \"foobar\", b: \"foobaz\", rel: 1 - float64(1)\/float64(6)},\n\t\t{a: \"\", b: \"b\", rel: 0.0},\n\t\t{a: \"a\", b: \"\", rel: 0.0},\n\t\t{a: \"\", b: \"\", rel: 1.0},\n\t\t{a: \"ab\", b: \"ba\", rel: 0.5},\n\t\t{a: \"abba\", b: \"aba\", rel: 0.75},\n\t\t{a: \"aba\", b: \"abba\", rel: 0.75},\n\t\t{a: \"résumé\", b: \"resume\", rel: 1 - float64(2)\/float64(6)},\n\t}\n\n\tfor _, v := range want {\n\t\tr := stringRel(v.a, v.b)\n\t\tif r != v.rel {\n\t\t\tt.Errorf(`Got %v, want rel(\"%v\", \"%v\")==%v`, r, v.a, v.b, v.rel)\n\t\t}\n\t}\n}\n\nfunc TestFFmpegSplitTimes(t *testing.T) {\n\t\/\/ We need to make up last track's duration: 3 minutes.\n\ttotaltime := float64(17*60 + 4 + 3*60)\n\n\twant := []struct {\n\t\ttrack int\n\t\tstart string\n\t\tduration string\n\t}{\n\t\t{track: 0, start: \"00:00:00.000\", duration: \"00:06:40.360\"},\n\t\t{track: 1, start: \"00:06:40.360\", duration: \"00:04:13.640\"},\n\t\t{track: 3, start: \"00:17:04.000\", duration: \"00:03:00.000\"},\n\t\t{track: 4, start: \"\", duration: \"\"},\n\t\t{track: 8, start: \"\", duration: \"\"},\n\t}\n\n\tbuf, err := ioutil.ReadFile(sampleCuesheet)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tsheet, err := cuesheet.New(buf)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfor _, v := range want {\n\t\tstart, duration := ffmpegSplitTimes(sheet, \"Faithless - Live in Berlin (CD1).mp3\", v.track, totaltime)\n\t\tif start != v.start || duration != v.duration {\n\t\t\tt.Errorf(\"Got {start: %v, duration: %v}, want {start: %v, duration: %v}\", start, duration, v.start, v.duration)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Stig Bakken (based on the works of Markus Lindenberg)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"regexp\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/common\/log\"\n)\n\nconst (\n\tnamespace = \"varnish_request\"\n)\n\ntype str_mapping struct {\n\tPattern *regexp.Regexp\n\tReplacement string\n}\n\nfunc main() {\n\t\/\/ TODO: add support for multiple Varnish instances (-S)\n\tvar (\n\t\tlistenAddress = flag.String(\"http.port\", \":9169\", \"Host\/port for HTTP server\")\n\t\tmetricsPath = flag.String(\"http.metricsurl\", \"\/metrics\", \"Prometheus metrics path\")\n\t\thttpHost = flag.String(\"varnish.host\", \"\", \"Virtual host to look for in Varnish logs (defaults to all hosts)\")\n\t\tmappings = flag.String(\"varnish.path-mappings\", \"\", \"Path mappings formatted like this: 'regexp->replace regex2->replace2'\")\n\t)\n\tflag.Parse()\n\n\t\/\/ Listen to signals\n\tsigchan := make(chan os.Signal, 1)\n\tsignal.Notify(sigchan, syscall.SIGTERM, syscall.SIGINT)\n\n\t\/\/ Set up 'varnishncsa' pipe\n\tcmdName := \"varnishncsa\"\n\tcmdArgs := buildVarnishncsaArgs(*httpHost)\n\tlog.Infof(\"Running command: %v %v\\n\", cmdName, cmdArgs)\n\tcmd := exec.Command(cmdName, cmdArgs...)\n\tcmdReader, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tscanner := bufio.NewScanner(cmdReader)\n\n\tpath_mappings, err := parseMappings(*mappings)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Setup metrics\n\tvarnishMessages := prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: namespace,\n\t\tName: \"exporter_log_messages\",\n\t\tHelp: \"Current total log messages received.\",\n\t})\n\terr = prometheus.Register(varnishMessages)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvarnishParseFailures := prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: namespace,\n\t\tName: \"exporter_log_parse_failure\",\n\t\tHelp: \"Number of errors while parsing log messages.\",\n\t})\n\terr = prometheus.Register(varnishParseFailures)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar msgs int64\n\n\tgo func() {\n\t\tfor scanner.Scan() {\n\t\t\tvarnishMessages.Inc()\n\t\t\tcontent := scanner.Text()\n\t\t\tmsgs++\n\t\t\tmetrics, labels, err := parseMessage(content, path_mappings)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, metric := range metrics {\n\t\t\t\tvar collector prometheus.Collector\n\t\t\t\tcollector, err = prometheus.RegisterOrGet(prometheus.NewHistogramVec(prometheus.HistogramOpts{\n\t\t\t\t\tNamespace: namespace,\n\t\t\t\t\tName: metric.Name,\n\t\t\t\t\tHelp: fmt.Sprintf(\"Varnish request log value for %s\", metric.Name),\n\t\t\t\t}, labels.Names))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tcollector.(*prometheus.HistogramVec).WithLabelValues(labels.Values...).Observe(metric.Value)\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ Setup HTTP server\n\thttp.Handle(*metricsPath, prometheus.Handler())\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Write([]byte(`<html>\n <head><title>Varnish Request Exporter<\/title><\/head>\n <body>\n <h1>Varnish Request Exporter<\/h1>\n <p><a href='` + *metricsPath + `'>Metrics<\/a><\/p>\n <\/body>\n <\/html>`))\n\t})\n\n\tgo func() {\n\t\tlog.Infof(\"Starting Server: %s\", *listenAddress)\n\t\tlog.Fatal(http.ListenAndServe(*listenAddress, nil))\n\t}()\n\n\tgo func() {\n\t\terr = cmd.Start()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\terr = cmd.Wait()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tlog.Infof(\"varnishncsa command exited\")\n\t\tlog.Infof(\"Messages received: %d\", msgs)\n\t\tos.Exit(0)\n\t}()\n\n\ts := <-sigchan\n\tlog.Infof(\"Received %v, terminating\", s)\n\tlog.Infof(\"Messages received: %d\", msgs)\n\n\n\tos.Exit(0)\n}\n\nfunc parseMappings(input string) (mappings []str_mapping, err error) {\n\tmappings = make([]str_mapping, 0)\n\tstr_mappings := strings.Split(input, \" \")\n\tfor i := range str_mappings {\n\t\tonemapping := str_mappings[i]\n\t\tif len(onemapping) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tparts := strings.Split(onemapping, \"->\")\n\t\tif len(parts) != 2 {\n\t\t\terr = fmt.Errorf(\"URL mapping must have two elements separated by \\\"->\\\", got \\\"%s\\\"\", onemapping)\n\t\t\treturn\n\t\t}\n\t\tmappings = append(mappings, str_mapping{ regexp.MustCompile(parts[0]), parts[1] })\n\t}\n\treturn\n}\n\nfunc buildVarnishncsaArgs(httpHost string) (args []string) {\n\targs = make([]string, 2, 4)\n\targs = append(args, \"-F\")\n\tif len(httpHost) == 0 {\n\t\targs = append(args, \"time:%D method=\\\"%m\\\" status=%s path=\\\"%U\\\" host=\\\"%{host}i\\\"\")\n\t} else {\n\t\targs = append(args, \"time:%D method=\\\"%m\\\" status=%s path=\\\"%U\\\"\")\n\t\targs = append(args, \"-q\")\n\t\targs = append(args, \"ReqHeader:host eq \\\"\" + httpHost + \"\\\"\")\n\t}\n\treturn\n}\n<commit_msg>Change default port to 9151 (registered in the Prometheus wiki)<commit_after>\/\/ Copyright 2016 Stig Bakken (based on the works of Markus Lindenberg)\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/http\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"os\/signal\"\n\t\"regexp\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com\/prometheus\/client_golang\/prometheus\"\n\t\"github.com\/prometheus\/common\/log\"\n)\n\nconst (\n\tnamespace = \"varnish_request\"\n)\n\ntype str_mapping struct {\n\tPattern *regexp.Regexp\n\tReplacement string\n}\n\nfunc main() {\n\t\/\/ TODO: add support for multiple Varnish instances (-S)\n\tvar (\n\t\tlistenAddress = flag.String(\"http.port\", \":9151\", \"Host\/port for HTTP server\")\n\t\tmetricsPath = flag.String(\"http.metricsurl\", \"\/metrics\", \"Prometheus metrics path\")\n\t\thttpHost = flag.String(\"varnish.host\", \"\", \"Virtual host to look for in Varnish logs (defaults to all hosts)\")\n\t\tmappings = flag.String(\"varnish.path-mappings\", \"\", \"Path mappings formatted like this: 'regexp->replace regex2->replace2'\")\n\t)\n\tflag.Parse()\n\n\t\/\/ Listen to signals\n\tsigchan := make(chan os.Signal, 1)\n\tsignal.Notify(sigchan, syscall.SIGTERM, syscall.SIGINT)\n\n\t\/\/ Set up 'varnishncsa' pipe\n\tcmdName := \"varnishncsa\"\n\tcmdArgs := buildVarnishncsaArgs(*httpHost)\n\tlog.Infof(\"Running command: %v %v\\n\", cmdName, cmdArgs)\n\tcmd := exec.Command(cmdName, cmdArgs...)\n\tcmdReader, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tscanner := bufio.NewScanner(cmdReader)\n\n\tpath_mappings, err := parseMappings(*mappings)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t\/\/ Setup metrics\n\tvarnishMessages := prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: namespace,\n\t\tName: \"exporter_log_messages\",\n\t\tHelp: \"Current total log messages received.\",\n\t})\n\terr = prometheus.Register(varnishMessages)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvarnishParseFailures := prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: namespace,\n\t\tName: \"exporter_log_parse_failure\",\n\t\tHelp: \"Number of errors while parsing log messages.\",\n\t})\n\terr = prometheus.Register(varnishParseFailures)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tvar msgs int64\n\n\tgo func() {\n\t\tfor scanner.Scan() {\n\t\t\tvarnishMessages.Inc()\n\t\t\tcontent := scanner.Text()\n\t\t\tmsgs++\n\t\t\tmetrics, labels, err := parseMessage(content, path_mappings)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, metric := range metrics {\n\t\t\t\tvar collector prometheus.Collector\n\t\t\t\tcollector, err = prometheus.RegisterOrGet(prometheus.NewHistogramVec(prometheus.HistogramOpts{\n\t\t\t\t\tNamespace: namespace,\n\t\t\t\t\tName: metric.Name,\n\t\t\t\t\tHelp: fmt.Sprintf(\"Varnish request log value for %s\", metric.Name),\n\t\t\t\t}, labels.Names))\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Error(err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tcollector.(*prometheus.HistogramVec).WithLabelValues(labels.Values...).Observe(metric.Value)\n\t\t\t}\n\t\t}\n\t}()\n\n\t\/\/ Setup HTTP server\n\thttp.Handle(*metricsPath, prometheus.Handler())\n\thttp.HandleFunc(\"\/\", func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Write([]byte(`<html>\n <head><title>Varnish Request Exporter<\/title><\/head>\n <body>\n <h1>Varnish Request Exporter<\/h1>\n <p><a href='` + *metricsPath + `'>Metrics<\/a><\/p>\n <\/body>\n <\/html>`))\n\t})\n\n\tgo func() {\n\t\tlog.Infof(\"Starting Server: %s\", *listenAddress)\n\t\tlog.Fatal(http.ListenAndServe(*listenAddress, nil))\n\t}()\n\n\tgo func() {\n\t\terr = cmd.Start()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\terr = cmd.Wait()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tlog.Infof(\"varnishncsa command exited\")\n\t\tlog.Infof(\"Messages received: %d\", msgs)\n\t\tos.Exit(0)\n\t}()\n\n\ts := <-sigchan\n\tlog.Infof(\"Received %v, terminating\", s)\n\tlog.Infof(\"Messages received: %d\", msgs)\n\n\n\tos.Exit(0)\n}\n\nfunc parseMappings(input string) (mappings []str_mapping, err error) {\n\tmappings = make([]str_mapping, 0)\n\tstr_mappings := strings.Split(input, \" \")\n\tfor i := range str_mappings {\n\t\tonemapping := str_mappings[i]\n\t\tif len(onemapping) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tparts := strings.Split(onemapping, \"->\")\n\t\tif len(parts) != 2 {\n\t\t\terr = fmt.Errorf(\"URL mapping must have two elements separated by \\\"->\\\", got \\\"%s\\\"\", onemapping)\n\t\t\treturn\n\t\t}\n\t\tmappings = append(mappings, str_mapping{ regexp.MustCompile(parts[0]), parts[1] })\n\t}\n\treturn\n}\n\nfunc buildVarnishncsaArgs(httpHost string) (args []string) {\n\targs = make([]string, 2, 4)\n\targs = append(args, \"-F\")\n\tif len(httpHost) == 0 {\n\t\targs = append(args, \"time:%D method=\\\"%m\\\" status=%s path=\\\"%U\\\" host=\\\"%{host}i\\\"\")\n\t} else {\n\t\targs = append(args, \"time:%D method=\\\"%m\\\" status=%s path=\\\"%U\\\"\")\n\t\targs = append(args, \"-q\")\n\t\targs = append(args, \"ReqHeader:host eq \\\"\" + httpHost + \"\\\"\")\n\t}\n\treturn\n}\n<|endoftext|>"} {"text":"<commit_before>\/**\n * (C) Copyright 2014, Deft Labs\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at:\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage dlshared\n\nimport \"testing\"\nimport \"fmt\"\n\nconst (\n\tEmailTestAccessKeyId = \"HUH?\"\n\tEmailTestSecretAccessKey = \"WHAT?\"\n\tEmailTestFrom = \"Me <noreply@someplace.com>\"\n\tEmailTestTo = \"you@someplace.com\"\n)\n\n\/\/ These tests are difficult to run in an open source repo because they require aws credentials.\nfunc TestAwsTextEmail(t *testing.T) {\n\t\/*\n\tlogger := Logger { Prefix: \"test\", Appenders: []Appender{ LevelFilter(Debug, StdErrAppender()) } }\n\n\temailDs := NewAwsEmailDs(EmailTestAccessKeyId, EmailTestSecretAccessKey, NewDefaultHttpRequestClient(), logger)\n\n\tresponse, err := emailDs.SendTextEmailToOneAddress(EmailTestFrom, EmailTestTo, \"test subject\", \"this is a test text body\")\n\n\tif err != nil {\n\t\tt.Errorf(\"TestAwsTextEmail is broken: %v\")\n\t\treturn\n\t}\n\n\tfmt.Println(\"message id:\", response.(*AwsEmailResponse).MessageId)\n\tfmt.Println(\"request id:\", response.(*AwsEmailResponse).RequestId)\n\tfmt.Println(\"status code:\", response.(*AwsEmailResponse).HttpStatusCode)\n\t*\/\n}\n\nfunc TestAwsHtmlEmail(t *testing.T) {\n\t\/*\n\tbodyHtml := `<table width=\"100%\" border=\"0\" cellspacing=\"0\" cellpadding=\"0\">\n\t<tr><td><a href=\"http:\/\/deftlabs.com\">Test Link<\/a><\/td><\/tr>\n\t<\/table>\n\t`\n\tlogger := Logger { Prefix: \"test\", Appenders: []Appender{ LevelFilter(Debug, StdErrAppender()) } }\n\n\temailDs := NewAwsEmailDs(EmailTestAccessKeyId, EmailTestSecretAccessKey, NewDefaultHttpRequestClient(), logger)\n\n\tresponse, err := emailDs.SendHtmlEmailToOneAddress(EmailTestFrom, EmailTestTo, \"test subject\", bodyHtml, \"Test Link: http:\/\/deftlabs.com\")\n\n\tif err != nil {\n\t\tt.Errorf(\"TestAwsHtmlEmail is broken: %v\")\n\t\treturn\n\t}\n\n\tfmt.Println(\"message id:\", response.(*AwsEmailResponse).MessageId)\n\tfmt.Println(\"request id:\", response.(*AwsEmailResponse).RequestId)\n\tfmt.Println(\"status code:\", response.(*AwsEmailResponse).HttpStatusCode)\n\t*\/\n}\n\n<commit_msg>fixed build<commit_after>\/**\n * (C) Copyright 2014, Deft Labs\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at:\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\npackage dlshared\n\nimport (\n\t\/\/ \"fmt\"\n\t\"testing\"\n)\n\nconst (\n\tEmailTestAccessKeyId = \"HUH?\"\n\tEmailTestSecretAccessKey = \"WHAT?\"\n\tEmailTestFrom = \"Me <noreply@someplace.com>\"\n\tEmailTestTo = \"you@someplace.com\"\n)\n\n\/\/ These tests are difficult to run in an open source repo because they require aws credentials.\nfunc TestAwsTextEmail(t *testing.T) {\n\t\/*\n\tlogger := Logger { Prefix: \"test\", Appenders: []Appender{ LevelFilter(Debug, StdErrAppender()) } }\n\n\temailDs := NewAwsEmailDs(EmailTestAccessKeyId, EmailTestSecretAccessKey, NewDefaultHttpRequestClient(), logger)\n\n\tresponse, err := emailDs.SendTextEmailToOneAddress(EmailTestFrom, EmailTestTo, \"test subject\", \"this is a test text body\")\n\n\tif err != nil {\n\t\tt.Errorf(\"TestAwsTextEmail is broken: %v\")\n\t\treturn\n\t}\n\n\tfmt.Println(\"message id:\", response.(*AwsEmailResponse).MessageId)\n\tfmt.Println(\"request id:\", response.(*AwsEmailResponse).RequestId)\n\tfmt.Println(\"status code:\", response.(*AwsEmailResponse).HttpStatusCode)\n\t*\/\n}\n\nfunc TestAwsHtmlEmail(t *testing.T) {\n\t\/*\n\tbodyHtml := `<table width=\"100%\" border=\"0\" cellspacing=\"0\" cellpadding=\"0\">\n\t<tr><td><a href=\"http:\/\/deftlabs.com\">Test Link<\/a><\/td><\/tr>\n\t<\/table>\n\t`\n\tlogger := Logger { Prefix: \"test\", Appenders: []Appender{ LevelFilter(Debug, StdErrAppender()) } }\n\n\temailDs := NewAwsEmailDs(EmailTestAccessKeyId, EmailTestSecretAccessKey, NewDefaultHttpRequestClient(), logger)\n\n\tresponse, err := emailDs.SendHtmlEmailToOneAddress(EmailTestFrom, EmailTestTo, \"test subject\", bodyHtml, \"Test Link: http:\/\/deftlabs.com\")\n\n\tif err != nil {\n\t\tt.Errorf(\"TestAwsHtmlEmail is broken: %v\")\n\t\treturn\n\t}\n\n\tfmt.Println(\"message id:\", response.(*AwsEmailResponse).MessageId)\n\tfmt.Println(\"request id:\", response.(*AwsEmailResponse).RequestId)\n\tfmt.Println(\"status code:\", response.(*AwsEmailResponse).HttpStatusCode)\n\t*\/\n}\n\n<|endoftext|>"} {"text":"<commit_before>package walking\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/containerd\/containerd\/archive\"\n\t\"github.com\/containerd\/containerd\/archive\/compression\"\n\t\"github.com\/containerd\/containerd\/content\"\n\t\"github.com\/containerd\/containerd\/diff\"\n\t\"github.com\/containerd\/containerd\/errdefs\"\n\t\"github.com\/containerd\/containerd\/images\"\n\t\"github.com\/containerd\/containerd\/log\"\n\t\"github.com\/containerd\/containerd\/metadata\"\n\t\"github.com\/containerd\/containerd\/mount\"\n\t\"github.com\/containerd\/containerd\/platforms\"\n\t\"github.com\/containerd\/containerd\/plugin\"\n\tdigest \"github.com\/opencontainers\/go-digest\"\n\tocispec \"github.com\/opencontainers\/image-spec\/specs-go\/v1\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nfunc init() {\n\tplugin.Register(&plugin.Registration{\n\t\tType: plugin.DiffPlugin,\n\t\tID: \"walking\",\n\t\tRequires: []plugin.Type{\n\t\t\tplugin.MetadataPlugin,\n\t\t},\n\t\tInitFn: func(ic *plugin.InitContext) (interface{}, error) {\n\t\t\tmd, err := ic.Get(plugin.MetadataPlugin)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tic.Meta.Platforms = append(ic.Meta.Platforms, platforms.DefaultSpec())\n\t\t\treturn NewWalkingDiff(md.(*metadata.DB).ContentStore())\n\t\t},\n\t})\n}\n\ntype walkingDiff struct {\n\tstore content.Store\n}\n\nvar emptyDesc = ocispec.Descriptor{}\n\n\/\/ NewWalkingDiff is a generic implementation of diff.Differ.\n\/\/ NewWalkingDiff is expected to work with any filesystem.\nfunc NewWalkingDiff(store content.Store) (diff.Differ, error) {\n\treturn &walkingDiff{\n\t\tstore: store,\n\t}, nil\n}\n\n\/\/ Apply applies the content associated with the provided digests onto the\n\/\/ provided mounts. Archive content will be extracted and decompressed if\n\/\/ necessary.\nfunc (s *walkingDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount) (d ocispec.Descriptor, err error) {\n\tt1 := time.Now()\n\tdefer func() {\n\t\tif err == nil {\n\t\t\tlog.G(ctx).WithFields(logrus.Fields{\n\t\t\t\t\"d\": time.Now().Sub(t1),\n\t\t\t\t\"dgst\": desc.Digest,\n\t\t\t\t\"size\": desc.Size,\n\t\t\t\t\"media\": desc.MediaType,\n\t\t\t}).Debugf(\"diff applied\")\n\t\t}\n\t}()\n\tvar isCompressed bool\n\tswitch desc.MediaType {\n\tcase ocispec.MediaTypeImageLayer, images.MediaTypeDockerSchema2Layer:\n\tcase ocispec.MediaTypeImageLayerGzip, images.MediaTypeDockerSchema2LayerGzip:\n\t\tisCompressed = true\n\tdefault:\n\t\t\/\/ Still apply all generic media types *.tar[.+]gzip and *.tar\n\t\tif strings.HasSuffix(desc.MediaType, \".tar.gzip\") || strings.HasSuffix(desc.MediaType, \".tar+gzip\") {\n\t\t\tisCompressed = true\n\t\t} else if !strings.HasSuffix(desc.MediaType, \".tar\") {\n\t\t\treturn emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, \"unsupported diff media type: %v\", desc.MediaType)\n\t\t}\n\t}\n\n\tdir, err := ioutil.TempDir(\"\", \"extract-\")\n\tif err != nil {\n\t\treturn emptyDesc, errors.Wrap(err, \"failed to create temporary directory\")\n\t}\n\tdefer os.Remove(dir)\n\n\tif err := mount.All(mounts, dir); err != nil {\n\t\treturn emptyDesc, errors.Wrap(err, \"failed to mount\")\n\t}\n\tdefer mount.Unmount(dir, 0)\n\n\tra, err := s.store.ReaderAt(ctx, desc.Digest)\n\tif err != nil {\n\t\treturn emptyDesc, errors.Wrap(err, \"failed to get reader from content store\")\n\t}\n\tdefer ra.Close()\n\n\tr := content.NewReader(ra)\n\tif isCompressed {\n\t\tds, err := compression.DecompressStream(r)\n\t\tif err != nil {\n\t\t\treturn emptyDesc, err\n\t\t}\n\t\tdefer ds.Close()\n\t\tr = ds\n\t}\n\n\tdigester := digest.Canonical.Digester()\n\trc := &readCounter{\n\t\tr: io.TeeReader(r, digester.Hash()),\n\t}\n\n\tif _, err := archive.Apply(ctx, dir, rc); err != nil {\n\t\treturn emptyDesc, err\n\t}\n\n\t\/\/ Read any trailing data\n\tif _, err := io.Copy(ioutil.Discard, rc); err != nil {\n\t\treturn emptyDesc, err\n\t}\n\n\treturn ocispec.Descriptor{\n\t\tMediaType: ocispec.MediaTypeImageLayer,\n\t\tSize: rc.c,\n\t\tDigest: digester.Digest(),\n\t}, nil\n}\n\n\/\/ DiffMounts creates a diff between the given mounts and uploads the result\n\/\/ to the content store.\nfunc (s *walkingDiff) DiffMounts(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispec.Descriptor, err error) {\n\tvar config diff.Config\n\tfor _, opt := range opts {\n\t\tif err := opt(&config); err != nil {\n\t\t\treturn emptyDesc, err\n\t\t}\n\t}\n\n\tif config.MediaType == \"\" {\n\t\tconfig.MediaType = ocispec.MediaTypeImageLayerGzip\n\t}\n\n\tvar isCompressed bool\n\tswitch config.MediaType {\n\tcase ocispec.MediaTypeImageLayer:\n\tcase ocispec.MediaTypeImageLayerGzip:\n\t\tisCompressed = true\n\tdefault:\n\t\treturn emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, \"unsupported diff media type: %v\", config.MediaType)\n\t}\n\taDir, err := ioutil.TempDir(\"\", \"left-\")\n\tif err != nil {\n\t\treturn emptyDesc, errors.Wrap(err, \"failed to create temporary directory\")\n\t}\n\tdefer os.Remove(aDir)\n\n\tbDir, err := ioutil.TempDir(\"\", \"right-\")\n\tif err != nil {\n\t\treturn emptyDesc, errors.Wrap(err, \"failed to create temporary directory\")\n\t}\n\tdefer os.Remove(bDir)\n\n\tif err := mount.All(lower, aDir); err != nil {\n\t\treturn emptyDesc, errors.Wrap(err, \"failed to mount\")\n\t}\n\tdefer mount.Unmount(aDir, 0)\n\n\tif err := mount.All(upper, bDir); err != nil {\n\t\treturn emptyDesc, errors.Wrap(err, \"failed to mount\")\n\t}\n\tdefer mount.Unmount(bDir, 0)\n\n\tvar newReference bool\n\tif config.Reference == \"\" {\n\t\tnewReference = true\n\t\tconfig.Reference = uniqueRef()\n\t}\n\n\tcw, err := s.store.Writer(ctx, config.Reference, 0, \"\")\n\tif err != nil {\n\t\treturn emptyDesc, errors.Wrap(err, \"failed to open writer\")\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tcw.Close()\n\t\t\tif newReference {\n\t\t\t\tif err := s.store.Abort(ctx, config.Reference); err != nil {\n\t\t\t\t\tlog.G(ctx).WithField(\"ref\", config.Reference).Warnf(\"failed to delete diff upload\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\tif !newReference {\n\t\tif err := cw.Truncate(0); err != nil {\n\t\t\treturn emptyDesc, err\n\t\t}\n\t}\n\n\tif isCompressed {\n\t\tdgstr := digest.SHA256.Digester()\n\t\tcompressed, err := compression.CompressStream(cw, compression.Gzip)\n\t\tif err != nil {\n\t\t\treturn emptyDesc, errors.Wrap(err, \"failed to get compressed stream\")\n\t\t}\n\t\terr = archive.WriteDiff(ctx, io.MultiWriter(compressed, dgstr.Hash()), aDir, bDir)\n\t\tcompressed.Close()\n\t\tif err != nil {\n\t\t\treturn emptyDesc, errors.Wrap(err, \"failed to write compressed diff\")\n\t\t}\n\n\t\tif config.Labels == nil {\n\t\t\tconfig.Labels = map[string]string{}\n\t\t}\n\t\tconfig.Labels[\"containerd.io\/uncompressed\"] = dgstr.Digest().String()\n\t} else {\n\t\tif err = archive.WriteDiff(ctx, cw, aDir, bDir); err != nil {\n\t\t\treturn emptyDesc, errors.Wrap(err, \"failed to write diff\")\n\t\t}\n\t}\n\n\tvar commitopts []content.Opt\n\tif config.Labels != nil {\n\t\tcommitopts = append(commitopts, content.WithLabels(config.Labels))\n\t}\n\n\tdgst := cw.Digest()\n\tif err := cw.Commit(ctx, 0, dgst, commitopts...); err != nil {\n\t\treturn emptyDesc, errors.Wrap(err, \"failed to commit\")\n\t}\n\n\tinfo, err := s.store.Info(ctx, dgst)\n\tif err != nil {\n\t\treturn emptyDesc, errors.Wrap(err, \"failed to get info from content store\")\n\t}\n\n\treturn ocispec.Descriptor{\n\t\tMediaType: config.MediaType,\n\t\tSize: info.Size,\n\t\tDigest: info.Digest,\n\t}, nil\n}\n\ntype readCounter struct {\n\tr io.Reader\n\tc int64\n}\n\nfunc (rc *readCounter) Read(p []byte) (n int, err error) {\n\tn, err = rc.r.Read(p)\n\trc.c += int64(n)\n\treturn\n}\n\nfunc uniqueRef() string {\n\tt := time.Now()\n\tvar b [3]byte\n\t\/\/ Ignore read failures, just decreases uniqueness\n\trand.Read(b[:])\n\treturn fmt.Sprintf(\"%d-%s\", t.UnixNano(), base64.URLEncoding.EncodeToString(b[:]))\n}\n<commit_msg>output error when unmount fail in diff.Apply<commit_after>package walking\n\nimport (\n\t\"crypto\/rand\"\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/containerd\/containerd\/archive\"\n\t\"github.com\/containerd\/containerd\/archive\/compression\"\n\t\"github.com\/containerd\/containerd\/content\"\n\t\"github.com\/containerd\/containerd\/diff\"\n\t\"github.com\/containerd\/containerd\/errdefs\"\n\t\"github.com\/containerd\/containerd\/images\"\n\t\"github.com\/containerd\/containerd\/log\"\n\t\"github.com\/containerd\/containerd\/metadata\"\n\t\"github.com\/containerd\/containerd\/mount\"\n\t\"github.com\/containerd\/containerd\/platforms\"\n\t\"github.com\/containerd\/containerd\/plugin\"\n\tdigest \"github.com\/opencontainers\/go-digest\"\n\tocispec \"github.com\/opencontainers\/image-spec\/specs-go\/v1\"\n\t\"github.com\/pkg\/errors\"\n\t\"github.com\/sirupsen\/logrus\"\n\t\"golang.org\/x\/net\/context\"\n)\n\nfunc init() {\n\tplugin.Register(&plugin.Registration{\n\t\tType: plugin.DiffPlugin,\n\t\tID: \"walking\",\n\t\tRequires: []plugin.Type{\n\t\t\tplugin.MetadataPlugin,\n\t\t},\n\t\tInitFn: func(ic *plugin.InitContext) (interface{}, error) {\n\t\t\tmd, err := ic.Get(plugin.MetadataPlugin)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tic.Meta.Platforms = append(ic.Meta.Platforms, platforms.DefaultSpec())\n\t\t\treturn NewWalkingDiff(md.(*metadata.DB).ContentStore())\n\t\t},\n\t})\n}\n\ntype walkingDiff struct {\n\tstore content.Store\n}\n\nvar emptyDesc = ocispec.Descriptor{}\n\n\/\/ NewWalkingDiff is a generic implementation of diff.Differ.\n\/\/ NewWalkingDiff is expected to work with any filesystem.\nfunc NewWalkingDiff(store content.Store) (diff.Differ, error) {\n\treturn &walkingDiff{\n\t\tstore: store,\n\t}, nil\n}\n\n\/\/ Apply applies the content associated with the provided digests onto the\n\/\/ provided mounts. Archive content will be extracted and decompressed if\n\/\/ necessary.\nfunc (s *walkingDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount) (d ocispec.Descriptor, err error) {\n\tt1 := time.Now()\n\tdefer func() {\n\t\tif err == nil {\n\t\t\tlog.G(ctx).WithFields(logrus.Fields{\n\t\t\t\t\"d\": time.Now().Sub(t1),\n\t\t\t\t\"dgst\": desc.Digest,\n\t\t\t\t\"size\": desc.Size,\n\t\t\t\t\"media\": desc.MediaType,\n\t\t\t}).Debugf(\"diff applied\")\n\t\t}\n\t}()\n\tvar isCompressed bool\n\tswitch desc.MediaType {\n\tcase ocispec.MediaTypeImageLayer, images.MediaTypeDockerSchema2Layer:\n\tcase ocispec.MediaTypeImageLayerGzip, images.MediaTypeDockerSchema2LayerGzip:\n\t\tisCompressed = true\n\tdefault:\n\t\t\/\/ Still apply all generic media types *.tar[.+]gzip and *.tar\n\t\tif strings.HasSuffix(desc.MediaType, \".tar.gzip\") || strings.HasSuffix(desc.MediaType, \".tar+gzip\") {\n\t\t\tisCompressed = true\n\t\t} else if !strings.HasSuffix(desc.MediaType, \".tar\") {\n\t\t\treturn emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, \"unsupported diff media type: %v\", desc.MediaType)\n\t\t}\n\t}\n\n\tdir, err := ioutil.TempDir(\"\", \"extract-\")\n\tif err != nil {\n\t\treturn emptyDesc, errors.Wrap(err, \"failed to create temporary directory\")\n\t}\n\t\/\/ We change RemoveAll to Remove so that we either leak a temp dir\n\t\/\/ if it fails but not RM snapshot data. refer to #1868 #1785\n\tdefer os.Remove(dir)\n\n\tif err := mount.All(mounts, dir); err != nil {\n\t\treturn emptyDesc, errors.Wrap(err, \"failed to mount\")\n\t}\n\tdefer func() {\n\t\tif uerr := mount.Unmount(dir, 0); uerr != nil {\n\t\t\tif err == nil {\n\t\t\t\terr = uerr\n\t\t\t}\n\t\t}\n\t}()\n\n\tra, err := s.store.ReaderAt(ctx, desc.Digest)\n\tif err != nil {\n\t\treturn emptyDesc, errors.Wrap(err, \"failed to get reader from content store\")\n\t}\n\tdefer ra.Close()\n\n\tr := content.NewReader(ra)\n\tif isCompressed {\n\t\tds, err := compression.DecompressStream(r)\n\t\tif err != nil {\n\t\t\treturn emptyDesc, err\n\t\t}\n\t\tdefer ds.Close()\n\t\tr = ds\n\t}\n\n\tdigester := digest.Canonical.Digester()\n\trc := &readCounter{\n\t\tr: io.TeeReader(r, digester.Hash()),\n\t}\n\n\tif _, err := archive.Apply(ctx, dir, rc); err != nil {\n\t\treturn emptyDesc, err\n\t}\n\n\t\/\/ Read any trailing data\n\tif _, err := io.Copy(ioutil.Discard, rc); err != nil {\n\t\treturn emptyDesc, err\n\t}\n\n\treturn ocispec.Descriptor{\n\t\tMediaType: ocispec.MediaTypeImageLayer,\n\t\tSize: rc.c,\n\t\tDigest: digester.Digest(),\n\t}, nil\n}\n\n\/\/ DiffMounts creates a diff between the given mounts and uploads the result\n\/\/ to the content store.\nfunc (s *walkingDiff) DiffMounts(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispec.Descriptor, err error) {\n\tvar config diff.Config\n\tfor _, opt := range opts {\n\t\tif err := opt(&config); err != nil {\n\t\t\treturn emptyDesc, err\n\t\t}\n\t}\n\n\tif config.MediaType == \"\" {\n\t\tconfig.MediaType = ocispec.MediaTypeImageLayerGzip\n\t}\n\n\tvar isCompressed bool\n\tswitch config.MediaType {\n\tcase ocispec.MediaTypeImageLayer:\n\tcase ocispec.MediaTypeImageLayerGzip:\n\t\tisCompressed = true\n\tdefault:\n\t\treturn emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, \"unsupported diff media type: %v\", config.MediaType)\n\t}\n\taDir, err := ioutil.TempDir(\"\", \"left-\")\n\tif err != nil {\n\t\treturn emptyDesc, errors.Wrap(err, \"failed to create temporary directory\")\n\t}\n\tdefer os.Remove(aDir)\n\n\tbDir, err := ioutil.TempDir(\"\", \"right-\")\n\tif err != nil {\n\t\treturn emptyDesc, errors.Wrap(err, \"failed to create temporary directory\")\n\t}\n\tdefer os.Remove(bDir)\n\n\tif err := mount.All(lower, aDir); err != nil {\n\t\treturn emptyDesc, errors.Wrap(err, \"failed to mount\")\n\t}\n\tdefer func() {\n\t\tif uerr := mount.Unmount(aDir, 0); uerr != nil {\n\t\t\tif err == nil {\n\t\t\t\terr = uerr\n\t\t\t}\n\t\t}\n\t}()\n\n\tif err := mount.All(upper, bDir); err != nil {\n\t\treturn emptyDesc, errors.Wrap(err, \"failed to mount\")\n\t}\n\tdefer func() {\n\t\tif uerr := mount.Unmount(bDir, 0); uerr != nil {\n\t\t\tif err == nil {\n\t\t\t\terr = uerr\n\t\t\t}\n\t\t}\n\t}()\n\n\tvar newReference bool\n\tif config.Reference == \"\" {\n\t\tnewReference = true\n\t\tconfig.Reference = uniqueRef()\n\t}\n\n\tcw, err := s.store.Writer(ctx, config.Reference, 0, \"\")\n\tif err != nil {\n\t\treturn emptyDesc, errors.Wrap(err, \"failed to open writer\")\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tcw.Close()\n\t\t\tif newReference {\n\t\t\t\tif err := s.store.Abort(ctx, config.Reference); err != nil {\n\t\t\t\t\tlog.G(ctx).WithField(\"ref\", config.Reference).Warnf(\"failed to delete diff upload\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\tif !newReference {\n\t\tif err := cw.Truncate(0); err != nil {\n\t\t\treturn emptyDesc, err\n\t\t}\n\t}\n\n\tif isCompressed {\n\t\tdgstr := digest.SHA256.Digester()\n\t\tcompressed, err := compression.CompressStream(cw, compression.Gzip)\n\t\tif err != nil {\n\t\t\treturn emptyDesc, errors.Wrap(err, \"failed to get compressed stream\")\n\t\t}\n\t\terr = archive.WriteDiff(ctx, io.MultiWriter(compressed, dgstr.Hash()), aDir, bDir)\n\t\tcompressed.Close()\n\t\tif err != nil {\n\t\t\treturn emptyDesc, errors.Wrap(err, \"failed to write compressed diff\")\n\t\t}\n\n\t\tif config.Labels == nil {\n\t\t\tconfig.Labels = map[string]string{}\n\t\t}\n\t\tconfig.Labels[\"containerd.io\/uncompressed\"] = dgstr.Digest().String()\n\t} else {\n\t\tif err = archive.WriteDiff(ctx, cw, aDir, bDir); err != nil {\n\t\t\treturn emptyDesc, errors.Wrap(err, \"failed to write diff\")\n\t\t}\n\t}\n\n\tvar commitopts []content.Opt\n\tif config.Labels != nil {\n\t\tcommitopts = append(commitopts, content.WithLabels(config.Labels))\n\t}\n\n\tdgst := cw.Digest()\n\tif err := cw.Commit(ctx, 0, dgst, commitopts...); err != nil {\n\t\treturn emptyDesc, errors.Wrap(err, \"failed to commit\")\n\t}\n\n\tinfo, err := s.store.Info(ctx, dgst)\n\tif err != nil {\n\t\treturn emptyDesc, errors.Wrap(err, \"failed to get info from content store\")\n\t}\n\n\treturn ocispec.Descriptor{\n\t\tMediaType: config.MediaType,\n\t\tSize: info.Size,\n\t\tDigest: info.Digest,\n\t}, nil\n}\n\ntype readCounter struct {\n\tr io.Reader\n\tc int64\n}\n\nfunc (rc *readCounter) Read(p []byte) (n int, err error) {\n\tn, err = rc.r.Read(p)\n\trc.c += int64(n)\n\treturn\n}\n\nfunc uniqueRef() string {\n\tt := time.Now()\n\tvar b [3]byte\n\t\/\/ Ignore read failures, just decreases uniqueness\n\trand.Read(b[:])\n\treturn fmt.Sprintf(\"%d-%s\", t.UnixNano(), base64.URLEncoding.EncodeToString(b[:]))\n}\n<|endoftext|>"} {"text":"<commit_before>package algoliasearch\n\nimport (\n\"crypto\/hmac\"\n\"crypto\/sha256\"\n\"encoding\/hex\"\n\"errors\"\n\"time\"\n\"reflect\"\n)\n\ntype Client struct {\n transport *Transport\n}\n\nfunc NewClient(appID, apiKey string) *Client {\n client := new(Client)\n client.transport = NewTransport(appID, apiKey)\n return client\n}\n\nfunc NewClientWithHosts(appID, apiKey string, hosts []string) *Client {\n client := new(Client)\n client.transport = NewTransportWithHosts(appID, apiKey, hosts)\n return client\n}\n\nfunc (c *Client) SetExtraHeader(key string, value string) {\n c.transport.setExtraHeader(key, value)\n}\n\nfunc (c *Client) SetTimeout(connectTimeout int, readTimeout int) {\n c.transport.setTimeout(time.Duration(connectTimeout) * time.Millisecond, time.Duration(readTimeout) * time.Millisecond)\n}\n\nfunc (c *Client) ListIndexes() (interface{}, error) {\n return c.transport.request(\"GET\", \"\/1\/indexes\", nil, read)\n}\n\nfunc (c *Client) InitIndex(indexName string) *Index {\n return NewIndex(indexName, c)\n}\n\nfunc (c *Client) ListKeys() (interface{}, error) {\n return c.transport.request(\"GET\", \"\/1\/keys\", nil, read)\n}\n\nfunc (c *Client) AddKey(acl, indexes []string, validity int, maxQueriesPerIPPerHour int, maxHitsPerQuery int) (interface{}, error) {\n body := make(map[string]interface{})\n body[\"acl\"] = acl\n body[\"maxHitsPerQuery\"] = maxHitsPerQuery\n body[\"maxQueriesPerIPPerHour\"] = maxQueriesPerIPPerHour\n body[\"validity\"] = validity\n body[\"indexes\"] = indexes\n return c.AddKeyWithParam(body)\n}\n\nfunc (c *Client) AddKeyWithParam(params interface{}) (interface{}, error) {\n return c.transport.request(\"POST\", \"\/1\/keys\/\", params, read)\n}\n\nfunc (c *Client) UpdateKey(key string, acl, indexes []string, validity int, maxQueriesPerIPPerHour int, maxHitsPerQuery int) (interface{}, error) {\n body := make(map[string]interface{})\n body[\"acl\"] = acl\n body[\"maxHitsPerQuery\"] = maxHitsPerQuery\n body[\"maxQueriesPerIPPerHour\"] = maxQueriesPerIPPerHour\n body[\"validity\"] = validity\n body[\"indexes\"] = indexes\n return c.UpdateKeyWithParam(key, body)\n}\n\nfunc (c *Client) UpdateKeyWithParam(key string, params interface{}) (interface{}, error) {\n return c.transport.request(\"PUT\", \"\/1\/keys\/\" + key, params, write)\n}\n\n\nfunc (c *Client) GetKey(key string) (interface{}, error) {\n return c.transport.request(\"GET\", \"\/1\/keys\/\" + key, nil, read)\n}\n\nfunc (c *Client) DeleteKey(key string) (interface{}, error) {\n return c.transport.request(\"DELETE\", \"\/1\/keys\/\" + key, nil, write)\n}\n\nfunc (c *Client) GetLogs(offset, length int, logType string) (interface{}, error) {\n body := make(map[string]interface{})\n body[\"offset\"] = offset\n body[\"length\"] = length\n body[\"type\"] = logType\n return c.transport.request(\"GET\", \"\/1\/logs\", body, write)\n}\n\nfunc (c *Client) GenerateSecuredApiKey(apiKey string, public interface{}, userToken ...string) (string, error) {\n if len(userToken) > 1 {\n return \"\", errors.New(\"Too many parameters\")\n }\n key := []byte(apiKey)\n h := hmac.New(sha256.New, key)\n var userTokenStr string\n if len(userToken) == 1 {\n userTokenStr = userToken[0]\n } else {\n userTokenStr = \"\"\n }\n if reflect.TypeOf(public).Name() != \"string\" {\n public = c.transport.EncodeParams(public)\n }\n\n message := public.(string) + userTokenStr\n h.Write([]byte(message))\n return hex.EncodeToString(h.Sum(nil)), nil\n}\n\nfunc (c *Client) MultipleQueries(queries []interface{}, optionals ...string) (interface{}, error) {\n if len(optionals) > 2 {\n return \"\", errors.New(\"Too many parametters\")\n }\n var nameKey string\n if len(optionals) >= 1 {\n nameKey = optionals[0]\n } else {\n nameKey = \"indexName\"\n }\n var strategy string = \"none\"\n if len(optionals) == 2 {\n strategy = optionals[1]\n }\n requests := make([]map[string]interface{}, len(queries))\n for i := range queries {\n requests[i] = make(map[string]interface{})\n requests[i][\"indexName\"] = queries[i].(map[string]interface{})[nameKey].(string)\n delete(queries[i].(map[string]interface{}), nameKey)\n requests[i][\"params\"] = c.transport.EncodeParams(queries[i])\n }\n body := make(map[string]interface{})\n body[\"requests\"] = requests\n return c.transport.request(\"POST\", \"\/1\/indexes\/*\/queries?strategy=\" + strategy, body, search)\n}\n<commit_msg>Expose the custom batch to allow batch on multiple indexes<commit_after>package algoliasearch\n\nimport (\n\"crypto\/hmac\"\n\"crypto\/sha256\"\n\"encoding\/hex\"\n\"errors\"\n\"time\"\n\"reflect\"\n)\n\ntype Client struct {\n transport *Transport\n}\n\nfunc NewClient(appID, apiKey string) *Client {\n client := new(Client)\n client.transport = NewTransport(appID, apiKey)\n return client\n}\n\nfunc NewClientWithHosts(appID, apiKey string, hosts []string) *Client {\n client := new(Client)\n client.transport = NewTransportWithHosts(appID, apiKey, hosts)\n return client\n}\n\nfunc (c *Client) SetExtraHeader(key string, value string) {\n c.transport.setExtraHeader(key, value)\n}\n\nfunc (c *Client) SetTimeout(connectTimeout int, readTimeout int) {\n c.transport.setTimeout(time.Duration(connectTimeout) * time.Millisecond, time.Duration(readTimeout) * time.Millisecond)\n}\n\nfunc (c *Client) ListIndexes() (interface{}, error) {\n return c.transport.request(\"GET\", \"\/1\/indexes\", nil, read)\n}\n\nfunc (c *Client) InitIndex(indexName string) *Index {\n return NewIndex(indexName, c)\n}\n\nfunc (c *Client) ListKeys() (interface{}, error) {\n return c.transport.request(\"GET\", \"\/1\/keys\", nil, read)\n}\n\nfunc (c *Client) AddKey(acl, indexes []string, validity int, maxQueriesPerIPPerHour int, maxHitsPerQuery int) (interface{}, error) {\n body := make(map[string]interface{})\n body[\"acl\"] = acl\n body[\"maxHitsPerQuery\"] = maxHitsPerQuery\n body[\"maxQueriesPerIPPerHour\"] = maxQueriesPerIPPerHour\n body[\"validity\"] = validity\n body[\"indexes\"] = indexes\n return c.AddKeyWithParam(body)\n}\n\nfunc (c *Client) AddKeyWithParam(params interface{}) (interface{}, error) {\n return c.transport.request(\"POST\", \"\/1\/keys\/\", params, read)\n}\n\nfunc (c *Client) UpdateKey(key string, acl, indexes []string, validity int, maxQueriesPerIPPerHour int, maxHitsPerQuery int) (interface{}, error) {\n body := make(map[string]interface{})\n body[\"acl\"] = acl\n body[\"maxHitsPerQuery\"] = maxHitsPerQuery\n body[\"maxQueriesPerIPPerHour\"] = maxQueriesPerIPPerHour\n body[\"validity\"] = validity\n body[\"indexes\"] = indexes\n return c.UpdateKeyWithParam(key, body)\n}\n\nfunc (c *Client) UpdateKeyWithParam(key string, params interface{}) (interface{}, error) {\n return c.transport.request(\"PUT\", \"\/1\/keys\/\" + key, params, write)\n}\n\n\nfunc (c *Client) GetKey(key string) (interface{}, error) {\n return c.transport.request(\"GET\", \"\/1\/keys\/\" + key, nil, read)\n}\n\nfunc (c *Client) DeleteKey(key string) (interface{}, error) {\n return c.transport.request(\"DELETE\", \"\/1\/keys\/\" + key, nil, write)\n}\n\nfunc (c *Client) GetLogs(offset, length int, logType string) (interface{}, error) {\n body := make(map[string]interface{})\n body[\"offset\"] = offset\n body[\"length\"] = length\n body[\"type\"] = logType\n return c.transport.request(\"GET\", \"\/1\/logs\", body, write)\n}\n\nfunc (c *Client) GenerateSecuredApiKey(apiKey string, public interface{}, userToken ...string) (string, error) {\n if len(userToken) > 1 {\n return \"\", errors.New(\"Too many parameters\")\n }\n key := []byte(apiKey)\n h := hmac.New(sha256.New, key)\n var userTokenStr string\n if len(userToken) == 1 {\n userTokenStr = userToken[0]\n } else {\n userTokenStr = \"\"\n }\n if reflect.TypeOf(public).Name() != \"string\" {\n public = c.transport.EncodeParams(public)\n }\n\n message := public.(string) + userTokenStr\n h.Write([]byte(message))\n return hex.EncodeToString(h.Sum(nil)), nil\n}\n\nfunc (c *Client) MultipleQueries(queries []interface{}, optionals ...string) (interface{}, error) {\n if len(optionals) > 2 {\n return \"\", errors.New(\"Too many parametters\")\n }\n var nameKey string\n if len(optionals) >= 1 {\n nameKey = optionals[0]\n } else {\n nameKey = \"indexName\"\n }\n var strategy string = \"none\"\n if len(optionals) == 2 {\n strategy = optionals[1]\n }\n requests := make([]map[string]interface{}, len(queries))\n for i := range queries {\n requests[i] = make(map[string]interface{})\n requests[i][\"indexName\"] = queries[i].(map[string]interface{})[nameKey].(string)\n delete(queries[i].(map[string]interface{}), nameKey)\n requests[i][\"params\"] = c.transport.EncodeParams(queries[i])\n }\n body := make(map[string]interface{})\n body[\"requests\"] = requests\n return c.transport.request(\"POST\", \"\/1\/indexes\/*\/queries?strategy=\" + strategy, body, search)\n}\n\nfunc (i *Index) CustomBatch(queries interface{}) (interface{}, error) {\n request := make(map[string]interface{})\n request[\"requests\"] = queries\n return i.client.transport.request(\"POST\", \"\/1\/indexes\/*\/batch\", request, write)\n}\n<|endoftext|>"} {"text":"<commit_before>package apiserver\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com\/openshift\/origin\/test\/extended\/operators\"\n\t\"io\/ioutil\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/onsi\/ginkgo\"\n\t\"github.com\/onsi\/gomega\"\n\tconfigv1 \"github.com\/openshift\/api\/config\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\ttypes \"k8s.io\/apimachinery\/pkg\/types\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/uuid\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/client-go\/rest\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\te2eskipper \"k8s.io\/kubernetes\/test\/e2e\/framework\/skipper\"\n\n\t\"github.com\/openshift\/origin\/test\/extended\/scheme\"\n\t\"github.com\/openshift\/origin\/test\/extended\/single_node\"\n\texutil \"github.com\/openshift\/origin\/test\/extended\/util\"\n\te2e \"k8s.io\/kubernetes\/test\/e2e\/framework\"\n)\n\nvar _ = ginkgo.Describe(\"[Conformance][sig-sno][Serial] Cluster\", func() {\n\tf := framework.NewDefaultFramework(\"cluster-resiliency\")\n\tf.SkipNamespaceCreation = true\n\tf.SkipPrivilegedPSPBinding = true\n\n\toc := exutil.NewCLIWithoutNamespace(\"cluster-resiliency\")\n\n\tginkgo.It(\"should allow a fast rollout of kube-apiserver with no pods restarts during API disruption\", func() {\n\t\tcontrolPlaneTopology, _ := single_node.GetTopologies(f)\n\n\t\tif controlPlaneTopology != configv1.SingleReplicaTopologyMode {\n\t\t\te2eskipper.Skipf(\"Test is only relevant for single replica topologies\")\n\t\t}\n\n\t\tconfig, err := framework.LoadConfig()\n\t\tframework.ExpectNoError(err)\n\n\t\tsetRESTConfigDefaults(config)\n\t\trestClient, err := rest.RESTClientFor(config)\n\t\tframework.ExpectNoError(err)\n\n\t\thttpClient := restClient.Client\n\n\t\tginkgo.By(\"Making sure no previous rollout is in progress\")\n\t\tclusterApiServer, err := oc.AdminOperatorClient().OperatorV1().KubeAPIServers().Get(context.Background(), \"cluster\", metav1.GetOptions{})\n\t\tframework.ExpectNoError(err)\n\t\tgomega.Expect(clusterApiServer.Status.NodeStatuses[0].TargetRevision).To(gomega.Equal(int32(0)))\n\n\t\tginkgo.By(\"Initialize pods restart count\")\n\t\trestartingContainers := make(map[operators.ContainerName]int)\n\t\tc, err := e2e.LoadClientset()\n\t\tgomega.Expect(err).NotTo(gomega.HaveOccurred())\n\t\t\/\/ This will just load the restartingContainers map with the current restart count\n\t\t\/\/ The current restart count is the baseline for validating that there was no restarts during the API rollout\n\t\t_ = GetRestartedPods(c, restartingContainers)\n\n\t\tginkgo.By(\"Forcing API rollout\")\n\t\tforceApiRollout(oc)\n\n\t\tginkgo.By(\"Expecting API to become unavailable\")\n\t\twait.PollImmediate(time.Second, time.Minute, func() (bool, error) {\n\t\t\tready, _, err := isApiReady(config, httpClient)\n\t\t\treturn !ready, err\n\t\t})\n\n\t\tstart := time.Now()\n\n\t\tginkgo.By(\"Expecting API to become ready\")\n\t\twait.PollImmediate(time.Second, time.Minute, func() (bool, error) {\n\t\t\tready, _, _ := isApiReady(config, httpClient)\n\t\t\treturn ready, nil\n\t\t})\n\n\t\tend := time.Now()\n\n\t\tginkgo.By(\"Measuring disruption duration time\")\n\t\tdisruptionDuration := end.Sub(start)\n\t\t\/\/ For more information: https:\/\/github.com\/openshift\/origin\/pull\/26337\/files#r698435488\n\t\tgomega.Expect(disruptionDuration).To(gomega.BeNumerically(\"<\", 40*time.Second),\n\t\t\tfmt.Sprintf(\"Total time of disruption is %v which is more than 40 seconds. \", disruptionDuration)+\n\t\t\t\t\"Actual SLO for this is 60 seconds, yet we want to be notified about major regressions\")\n\n\t\tginkgo.By(\"with no pods restarts during API disruption\")\n\t\tnames := GetRestartedPods(c, restartingContainers)\n\t\tgomega.Expect(len(names)).To(gomega.Equal(0), \"Some pods in got restarted during kube-apiserver rollout: %s\", strings.Join(names, \", \"))\n\n\t\t\/\/ Workaround for issues identified in https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=2059581\n\t\t\/\/ TODO: Remove this sleep once that bug is resolved\n\t\ttime.Sleep(60 * time.Second)\n\t})\n\n})\n\nfunc GetRestartedPods(c *kubernetes.Clientset, restartingContainers map[operators.ContainerName]int) (names []string) {\n\tpods := operators.GetPodsWithFilter(c, []operators.PodFilter{operators.InCoreNamespaces, ignoreNamespaces})\n\tfor _, pod := range pods {\n\t\tif pod.Status.Phase == corev1.PodSucceeded {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ This will just load the restartingContainers map with the current restart count\n\t\tif operators.HasExcessiveRestarts(pod, 1, restartingContainers) {\n\t\t\tkey := fmt.Sprintf(\"%s\/%s\", pod.Namespace, pod.Name)\n\t\t\tnames = append(names, key)\n\t\t}\n\t}\n\treturn names\n}\n\nfunc setRESTConfigDefaults(config *rest.Config) {\n\tif config.GroupVersion == nil {\n\t\tconfig.GroupVersion = &schema.GroupVersion{Group: \"\", Version: \"v1\"}\n\t}\n\n\tif config.NegotiatedSerializer == nil {\n\t\tconfig.NegotiatedSerializer = scheme.Codecs\n\t}\n}\n\nfunc forceApiRollout(oc *exutil.CLI) {\n\tredeploymentReason := fmt.Sprintf(`{\"spec\":{\"forceRedeploymentReason\":\"resiliency-test-%v\"}}`, uuid.NewUUID())\n\n\t_, err := oc.AdminOperatorClient().OperatorV1().KubeAPIServers().Patch(context.Background(), \"cluster\", types.MergePatchType,\n\t\t[]byte(redeploymentReason), metav1.PatchOptions{})\n\tgomega.Expect(err).NotTo(gomega.HaveOccurred())\n}\n\nfunc isApiReady(clusterConfig *rest.Config, httpClient *http.Client) (ready bool, reason string, err error) {\n\tresp, err := httpClient.Get(clusterConfig.Host + \"\/readyz\")\n\tif resp != nil && resp.Body != nil {\n\t\tdefer resp.Body.Close()\n\t}\n\n\tif err != nil {\n\t\treturn false, \"client failed to make the request\", err\n\t}\n\n\tif resp.StatusCode < 200 || resp.StatusCode >= 400 {\n\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\tif err == nil {\n\t\t\treturn false, fmt.Sprintf(\"got status code %v from the server: %v\", resp.Status, body), nil\n\t\t}\n\n\t\treturn false, fmt.Sprintf(\"got status code %v from the server\", resp.Status), err\n\t}\n\n\treturn true, \"kube-apiserver is ready\", nil\n}\n\nfunc ignoreNamespaces(pod *corev1.Pod) bool {\n\treturn !(strings.HasPrefix(pod.Namespace, \"openshift-kube-apiserver\") ||\n\t\tstrings.HasPrefix(pod.Namespace, \"openshift-kube-controller-manager\")) \/\/ remove this once https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=2001330 is fixed\n}\n<commit_msg>MGMT-9440: Fix single node serial tests API crash<commit_after>package apiserver\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"github.com\/openshift\/origin\/test\/extended\/operators\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\t\"k8s.io\/client-go\/kubernetes\"\n\t\"net\/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/onsi\/ginkgo\"\n\t\"github.com\/onsi\/gomega\"\n\tconfigv1 \"github.com\/openshift\/api\/config\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\t\"k8s.io\/apimachinery\/pkg\/runtime\/schema\"\n\ttypes \"k8s.io\/apimachinery\/pkg\/types\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/uuid\"\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\t\"k8s.io\/client-go\/rest\"\n\t\"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\te2eskipper \"k8s.io\/kubernetes\/test\/e2e\/framework\/skipper\"\n\n\t\"github.com\/openshift\/origin\/test\/extended\/scheme\"\n\t\"github.com\/openshift\/origin\/test\/extended\/single_node\"\n\texutil \"github.com\/openshift\/origin\/test\/extended\/util\"\n\te2e \"k8s.io\/kubernetes\/test\/e2e\/framework\"\n)\n\nvar _ = ginkgo.Describe(\"[Conformance][sig-sno][Serial] Cluster\", func() {\n\tf := framework.NewDefaultFramework(\"cluster-resiliency\")\n\tf.SkipNamespaceCreation = true\n\tf.SkipPrivilegedPSPBinding = true\n\n\toc := exutil.NewCLIWithoutNamespace(\"cluster-resiliency\")\n\n\tginkgo.It(\"should allow a fast rollout of kube-apiserver with no pods restarts during API disruption\", func() {\n\t\tcontrolPlaneTopology, _ := single_node.GetTopologies(f)\n\n\t\tif controlPlaneTopology != configv1.SingleReplicaTopologyMode {\n\t\t\te2eskipper.Skipf(\"Test is only relevant for single replica topologies\")\n\t\t}\n\n\t\tconfig, err := framework.LoadConfig()\n\t\tframework.ExpectNoError(err)\n\n\t\tsetRESTConfigDefaults(config)\n\t\trestClient, err := rest.RESTClientFor(config)\n\t\tframework.ExpectNoError(err)\n\n\t\thttpClient := restClient.Client\n\n\t\tginkgo.By(\"Making sure no previous rollout is in progress\")\n\t\tclusterApiServer, err := oc.AdminOperatorClient().OperatorV1().KubeAPIServers().Get(context.Background(), \"cluster\", metav1.GetOptions{})\n\t\tframework.ExpectNoError(err)\n\t\tgomega.Expect(clusterApiServer.Status.NodeStatuses[0].TargetRevision).To(gomega.Equal(int32(0)))\n\n\t\tginkgo.By(\"Initialize pods restart count\")\n\t\trestartingContainers := make(map[operators.ContainerName]int)\n\t\tc, err := e2e.LoadClientset()\n\t\tgomega.Expect(err).NotTo(gomega.HaveOccurred())\n\t\t\/\/ This will just load the restartingContainers map with the current restart count\n\t\t\/\/ The current restart count is the baseline for validating that there was no restarts during the API rollout\n\t\t_ = GetRestartedPods(c, restartingContainers)\n\n\t\tginkgo.By(\"Forcing API rollout\")\n\t\tforceApiRollout(oc)\n\n\t\t\/\/ We are taking the API down, this can often take more than a minute so we have provided a reasonably generous timeout.\n\t\tginkgo.By(\"Expecting API to become unavailable\")\n\t\terr = wait.PollImmediate(time.Second, 5*time.Minute, func() (bool, error) {\n\t\t\tready := isApiReady(config, httpClient)\n\t\t\treturn !ready, nil\n\t\t})\n\n\t\tgomega.Expect(err).NotTo(gomega.HaveOccurred(), \"The API failed to become unavailable within the desired timeout\")\n\n\t\tstart := time.Now()\n\n\t\tginkgo.By(\"Expecting API to become ready\")\n\t\terr = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {\n\t\t\tready := isApiReady(config, httpClient)\n\t\t\treturn ready, nil\n\t\t})\n\n\t\tgomega.Expect(err).NotTo(gomega.HaveOccurred(), \"The API failed to become available again within the desired timeout\")\n\n\t\tend := time.Now()\n\n\t\tginkgo.By(\"Measuring disruption duration time\")\n\t\tdisruptionDuration := end.Sub(start)\n\t\t\/\/ For more information: https:\/\/github.com\/openshift\/origin\/pull\/26337\/files#r698435488\n\t\tgomega.Expect(disruptionDuration).To(gomega.BeNumerically(\"<\", 40*time.Second),\n\t\t\tfmt.Sprintf(\"Total time of disruption is %v which is more than 40 seconds. \", disruptionDuration)+\n\t\t\t\t\"Actual SLO for this is 60 seconds, yet we want to be notified about major regressions\")\n\n\t\tginkgo.By(\"with no pods restarts during API disruption\")\n\t\tnames := GetRestartedPods(c, restartingContainers)\n\t\tgomega.Expect(len(names)).To(gomega.Equal(0), \"Some pods in got restarted during kube-apiserver rollout: %s\", strings.Join(names, \", \"))\n\t})\n\n})\n\nfunc GetRestartedPods(c *kubernetes.Clientset, restartingContainers map[operators.ContainerName]int) (names []string) {\n\tpods := operators.GetPodsWithFilter(c, []operators.PodFilter{operators.InCoreNamespaces, ignoreNamespaces})\n\tfor _, pod := range pods {\n\t\tif pod.Status.Phase == corev1.PodSucceeded {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ This will just load the restartingContainers map with the current restart count\n\t\tif operators.HasExcessiveRestarts(pod, 1, restartingContainers) {\n\t\t\tkey := fmt.Sprintf(\"%s\/%s\", pod.Namespace, pod.Name)\n\t\t\tnames = append(names, key)\n\t\t}\n\t}\n\treturn names\n}\n\nfunc setRESTConfigDefaults(config *rest.Config) {\n\tif config.GroupVersion == nil {\n\t\tconfig.GroupVersion = &schema.GroupVersion{Group: \"\", Version: \"v1\"}\n\t}\n\n\tif config.NegotiatedSerializer == nil {\n\t\tconfig.NegotiatedSerializer = scheme.Codecs\n\t}\n}\n\nfunc forceApiRollout(oc *exutil.CLI) {\n\tredeploymentReason := fmt.Sprintf(`{\"spec\":{\"forceRedeploymentReason\":\"resiliency-test-%v\"}}`, uuid.NewUUID())\n\n\t_, err := oc.AdminOperatorClient().OperatorV1().KubeAPIServers().Patch(context.Background(), \"cluster\", types.MergePatchType,\n\t\t[]byte(redeploymentReason), metav1.PatchOptions{})\n\tgomega.Expect(err).NotTo(gomega.HaveOccurred())\n}\n\nfunc isApiReady(clusterConfig *rest.Config, httpClient *http.Client) (ready bool) {\n\tresp, err := httpClient.Get(clusterConfig.Host + \"\/readyz\")\n\tif resp != nil && resp.Body != nil {\n\t\tdefer resp.Body.Close()\n\t}\n\tif err != nil || resp.StatusCode < 200 || resp.StatusCode >= 400 {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc ignoreNamespaces(pod *corev1.Pod) bool {\n\treturn !(strings.HasPrefix(pod.Namespace, \"openshift-kube-apiserver\") ||\n\t\tstrings.HasPrefix(pod.Namespace, \"openshift-kube-controller-manager\")) \/\/ remove this once https:\/\/bugzilla.redhat.com\/show_bug.cgi?id=2001330 is fixed\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/olekukonko\/tablewriter\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype Projects struct{}\n\n\/\/ New creates a new project\nfunc (projects *Projects) New(name string, language string, source string, script string) {\n\n\timage := &Image{}\n\timage.Name = name\n\timage.Language = language\n\timage.MakeBin = true\n\timage.Source = source\n\timage.SystemVolumes = true\n\timage.Buildable = true\n\n\tdata, err := ioutil.ReadFile(script)\n\tif err == nil {\n\t\timage.Script = string(data)\n\t}\n\n\tb, err := json.Marshal(image)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tresponse, err := Cli.Postit(b, APIBase+\"\/image\/new\")\n\tif err != nil {\n\t\tfmt.Println(RedBold(response))\n\t} else {\n\t\tfmt.Println(GreenBold(response))\n\t}\n}\n\n\/\/ Delete deletes a project\nfunc (projects *Projects) Delete(name string) {\n\n\timage := &Image{}\n\timage.Name = name\n\tb, err := json.Marshal(image)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tresponse, err := Cli.Postit(b, APIBase+\"\/image\/remove\")\n\tif err != nil {\n\t\tfmt.Println(RedBold(response))\n\t} else {\n\t\tfmt.Println(GreenBold(response))\n\t}\n}\n\n\/\/ NewFromImage creates a new project from an image\nfunc (projects *Projects) NewFromImage(name string, imagePath string) {\n\tdata, err := ioutil.ReadFile(imagePath)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\tresponse, err := Cli.Postit(data, putURL+\"\/\"+url.QueryEscape(name))\n\tif err != nil {\n\t\tfmt.Println(RedBold(response))\n\t} else {\n\t\tfmt.Println(GreenBold(response))\n\t}\n\n}\n\ntype Project struct {\n\tID int\n\tName string\n\tBuildable string\n\tLanguage string\n\tSource string\n\tBuildStatus string\n\tFilename string\n\tAddon string\n\tCreatedAt time.Time\n}\n\ntype ProjectsResponse struct {\n\tTitle string\n\tError string\n\tProjects []Project\n}\n\n\/\/ List lists all your projects\nfunc (projects *Projects) List() {\n\tpr := ProjectsResponse{}\n\terr := Cli.GetJSON(APIBase+\"\/image\/display\", &pr)\n\tif err != nil {\n\t\tfmt.Println(RedBold(err.Error()))\n\t} else {\n\t\tfmt.Println(GreenBold(pr.Title))\n\n\t\ttable := tablewriter.NewWriter(os.Stdout)\n\t\ttable.SetAutoFormatHeaders(false)\n\n\t\ttable.SetHeader([]string{GreenBold(\"ID\"), GreenBold(\"Name\"),\n\t\t\tGreenBold(\"Buildable\"), GreenBold(\"Language\"), GreenBold(\"Source\"),\n\t\t\tGreenBold(\"BuildStatus\"), GreenBold(\"Filename\"), GreenBold(\"Addon\")})\n\n\t\tfor i := 0; i < len(pr.Projects); i++ {\n\t\t\tsid := strconv.Itoa(pr.Projects[i].ID)\n\n\t\t\ttable.Append([]string{sid,\n\t\t\t\tpr.Projects[i].Name,\n\t\t\t\tpr.Projects[i].Buildable,\n\t\t\t\tpr.Projects[i].Language,\n\t\t\t\tpr.Projects[i].Source,\n\t\t\t\tpr.Projects[i].BuildStatus,\n\t\t\t\tpr.Projects[i].Filename,\n\t\t\t\tpr.Projects[i].Addon,\n\t\t\t})\n\t\t}\n\n\t\ttable.Render()\n\t}\n\n}\n\n\/\/ Log shows the log output for your project\nfunc (projects *Projects) Log(name string) {\n\timage := &Image{}\n\n\timage.Name = name\n\tb, err := json.Marshal(image)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\tresponse, err := Cli.Postit(b, APIBase+\"\/image\/makelog\")\n\tif err != nil {\n\t\tfmt.Println(RedBold(response))\n\t} else {\n\t\tfmt.Println(GreenBold(response))\n\t}\n\n}\n\n\/\/ Download a project root image\nfunc (projects *Projects) Download(name string) {\n\n\timage := &Image{}\n\timage.Name = name\n\tb, err := json.Marshal(image)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\terr = Cli.GrabFile(b, imageURL+\"\/get\", name)\n\tif err != nil {\n\t\tfmt.Println(RedBold(err.Error()))\n\t} else {\n\t\tfmt.Println(GreenBold(\"file saved\"))\n\t}\n\n}\n\n\/\/ Upload uploads a project\nfunc (projects *Projects) Upload(name string, binary string) {\n\tdata, err := ioutil.ReadFile(binary)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tresponse, err := Cli.Postit(data, putURL+\"\/\"+url.QueryEscape(name))\n\tif err != nil {\n\t\tfmt.Println(RedBold(response))\n\t} else {\n\t\tfmt.Println(GreenBold(response))\n\t}\n\n}\n\ntype ManifestVolume struct {\n\tId int\n\tFile string\n\tMount string\n}\n\ntype ManifestProcess struct {\n\tMemory int\n\tKernel string\n\tHash string\n\tCmdline string\n\tEnv string\n\tVolumes []ManifestVolume\n}\n\ntype Manifest struct {\n\tProcesses []ManifestProcess\n}\n\n\/\/ Manifest is a json representation for your project\n\/\/ useful for running things locally\nfunc (projects *Projects) Manifest(name string) {\n\tm := Manifest{}\n\terr := Cli.GetJSON(APIBase+\"\/projects\/manifest\/\"+name, &m)\n\tif err != nil {\n\t\tfmt.Println(RedBold(err.Error()))\n\t\treturn\n\t}\n\n\t\/\/ dupey-dupe\n\tjs, err := json.Marshal(m)\n\tif err != nil {\n\t\tfmt.Println(RedBold(err.Error()))\n\t\treturn\n\t}\n\n\terr = ioutil.WriteFile(name+\".manifest\", js, 0644)\n\tif err != nil {\n\t\tfmt.Println(RedBold(err.Error()))\n\t}\n\n}\n<commit_msg>multiboot<commit_after>package api\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"github.com\/olekukonko\/tablewriter\"\n\t\"io\/ioutil\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n)\n\ntype Projects struct{}\n\n\/\/ New creates a new project\nfunc (projects *Projects) New(name string, language string, source string, script string) {\n\n\timage := &Image{}\n\timage.Name = name\n\timage.Language = language\n\timage.MakeBin = true\n\timage.Source = source\n\timage.SystemVolumes = true\n\timage.Buildable = true\n\n\tdata, err := ioutil.ReadFile(script)\n\tif err == nil {\n\t\timage.Script = string(data)\n\t}\n\n\tb, err := json.Marshal(image)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tresponse, err := Cli.Postit(b, APIBase+\"\/image\/new\")\n\tif err != nil {\n\t\tfmt.Println(RedBold(response))\n\t} else {\n\t\tfmt.Println(GreenBold(response))\n\t}\n}\n\n\/\/ Delete deletes a project\nfunc (projects *Projects) Delete(name string) {\n\n\timage := &Image{}\n\timage.Name = name\n\tb, err := json.Marshal(image)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tresponse, err := Cli.Postit(b, APIBase+\"\/image\/remove\")\n\tif err != nil {\n\t\tfmt.Println(RedBold(response))\n\t} else {\n\t\tfmt.Println(GreenBold(response))\n\t}\n}\n\n\/\/ NewFromImage creates a new project from an image\nfunc (projects *Projects) NewFromImage(name string, imagePath string) {\n\tdata, err := ioutil.ReadFile(imagePath)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\tresponse, err := Cli.Postit(data, putURL+\"\/\"+url.QueryEscape(name))\n\tif err != nil {\n\t\tfmt.Println(RedBold(response))\n\t} else {\n\t\tfmt.Println(GreenBold(response))\n\t}\n\n}\n\ntype Project struct {\n\tID int\n\tName string\n\tBuildable string\n\tLanguage string\n\tSource string\n\tBuildStatus string\n\tFilename string\n\tAddon string\n\tCreatedAt time.Time\n}\n\ntype ProjectsResponse struct {\n\tTitle string\n\tError string\n\tProjects []Project\n}\n\n\/\/ List lists all your projects\nfunc (projects *Projects) List() {\n\tpr := ProjectsResponse{}\n\terr := Cli.GetJSON(APIBase+\"\/image\/display\", &pr)\n\tif err != nil {\n\t\tfmt.Println(RedBold(err.Error()))\n\t} else {\n\t\tfmt.Println(GreenBold(pr.Title))\n\n\t\ttable := tablewriter.NewWriter(os.Stdout)\n\t\ttable.SetAutoFormatHeaders(false)\n\n\t\ttable.SetHeader([]string{GreenBold(\"ID\"), GreenBold(\"Name\"),\n\t\t\tGreenBold(\"Buildable\"), GreenBold(\"Language\"), GreenBold(\"Source\"),\n\t\t\tGreenBold(\"BuildStatus\"), GreenBold(\"Filename\"), GreenBold(\"Addon\")})\n\n\t\tfor i := 0; i < len(pr.Projects); i++ {\n\t\t\tsid := strconv.Itoa(pr.Projects[i].ID)\n\n\t\t\ttable.Append([]string{sid,\n\t\t\t\tpr.Projects[i].Name,\n\t\t\t\tpr.Projects[i].Buildable,\n\t\t\t\tpr.Projects[i].Language,\n\t\t\t\tpr.Projects[i].Source,\n\t\t\t\tpr.Projects[i].BuildStatus,\n\t\t\t\tpr.Projects[i].Filename,\n\t\t\t\tpr.Projects[i].Addon,\n\t\t\t})\n\t\t}\n\n\t\ttable.Render()\n\t}\n\n}\n\n\/\/ Log shows the log output for your project\nfunc (projects *Projects) Log(name string) {\n\timage := &Image{}\n\n\timage.Name = name\n\tb, err := json.Marshal(image)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\tresponse, err := Cli.Postit(b, APIBase+\"\/image\/makelog\")\n\tif err != nil {\n\t\tfmt.Println(RedBold(response))\n\t} else {\n\t\tfmt.Println(GreenBold(response))\n\t}\n\n}\n\n\/\/ Download a project root image\nfunc (projects *Projects) Download(name string) {\n\n\timage := &Image{}\n\timage.Name = name\n\tb, err := json.Marshal(image)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\terr = Cli.GrabFile(b, imageURL+\"\/get\", name)\n\tif err != nil {\n\t\tfmt.Println(RedBold(err.Error()))\n\t} else {\n\t\tfmt.Println(GreenBold(\"file saved\"))\n\t}\n\n}\n\n\/\/ Upload uploads a project\nfunc (projects *Projects) Upload(name string, binary string) {\n\tdata, err := ioutil.ReadFile(binary)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tresponse, err := Cli.Postit(data, putURL+\"\/\"+url.QueryEscape(name))\n\tif err != nil {\n\t\tfmt.Println(RedBold(response))\n\t} else {\n\t\tfmt.Println(GreenBold(response))\n\t}\n\n}\n\ntype ManifestVolume struct {\n\tId int\n\tFile string\n\tMount string\n}\n\ntype ManifestProcess struct {\n\tMemory int\n\tKernel string\n\tMultiboot bool\n\tHash string\n\tCmdline string\n\tEnv string\n\tVolumes []ManifestVolume\n}\n\ntype Manifest struct {\n\tProcesses []ManifestProcess\n}\n\n\/\/ Manifest is a json representation for your project\n\/\/ useful for running things locally\nfunc (projects *Projects) Manifest(name string) {\n\tm := Manifest{}\n\terr := Cli.GetJSON(APIBase+\"\/projects\/manifest\/\"+name, &m)\n\tif err != nil {\n\t\tfmt.Println(RedBold(err.Error()))\n\t\treturn\n\t}\n\n\t\/\/ dupey-dupe\n\tjs, err := json.Marshal(m)\n\tif err != nil {\n\t\tfmt.Println(RedBold(err.Error()))\n\t\treturn\n\t}\n\n\terr = ioutil.WriteFile(name+\".manifest\", js, 0644)\n\tif err != nil {\n\t\tfmt.Println(RedBold(err.Error()))\n\t}\n\n}\n<|endoftext|>"} {"text":"<commit_before>package univedo\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\n\t\"code.google.com\/p\/go.net\/websocket\"\n\t\/\/ TODO remove\n\t_ \"fmt\"\n\t\"net\/url\"\n)\n\n\/\/ registeredRemoteObjects is a map from RO name to factory function\nvar registeredRemoteObjects = make(map[string]func(id uint64, s sender) RemoteObject)\n\n\/\/ RegisterRemoteObject adds a remote object factory for a RO name\nfunc RegisterRemoteObject(name string, factory func(id uint64, session sender) RemoteObject) {\n\tregisteredRemoteObjects[name] = factory\n}\n\n\/\/ A Connection with an univedo server\ntype Connection struct {\n\tws *websocket.Conn\n\turologin RemoteObject\n\tremoteObjects map[uint64]RemoteObject\n}\n\n\/\/ originForURL returns an origin matching the given URL\nfunc originForURL(s string) (string, error) {\n\tu, err := url.Parse(s)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\torigin := &url.URL{Scheme: \"http\", Host: u.Host}\n\treturn origin.String(), nil\n}\n\n\/\/ Dial opens a new connection with an univedo server\nfunc Dial(url string) (*Connection, error) {\n\t\/\/ Extract the origin from the URL\n\torigin, err := originForURL(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Dial the websocket\n\turl += \"v1\"\n\tws, err := websocket.Dial(url, \"\", origin)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &Connection{ws: ws, remoteObjects: make(map[uint64]RemoteObject)}\n\tgo func() {\n\t\t\/\/ TODO error handling\n\t\terr := c.handleWebsocket()\n\t\t\/*\t\tfmt.Printf(\"%s\\n\", err.Error())*\/\n\t\t_ = err\n\t}()\n\n\t\/\/ Login\n\tc.urologin = NewBasicRO(0, c)\n\tc.remoteObjects[0] = c.urologin\n\n\treturn c, nil\n}\n\n\/\/ Close the connection\nfunc (c *Connection) Close() {\n\tc.ws.Close()\n}\n\n\/\/ GetSession connects to a bucket with credentials\nfunc (c *Connection) GetSession(bucket string, creds map[string]interface{}) (*Session, error) {\n\tiSession, err := c.urologin.CallROM(\"getSession\", bucket, creds)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsession, ok := iSession.(*Session)\n\tif !ok {\n\t\treturn nil, errors.New(\"getSession did not return a remote object\")\n\t}\n\treturn session, nil\n}\n\nfunc (c *Connection) sendMessage(data ...interface{}) error {\n\tm := &message{buffer: &bytes.Buffer{}}\n\tfor _, v := range data {\n\t\tm.send(v)\n\t}\n\treturn websocket.Message.Send(c.ws, m.buffer.Bytes())\n}\n\nfunc (c *Connection) handleWebsocket() error {\n\tfor {\n\t\tvar buffer []byte\n\t\terr := websocket.Message.Receive(c.ws, &buffer)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tmsg := &message{buffer: bytes.NewBuffer(buffer), createRO: c.receiveRO}\n\n\t\tiRoID, err := msg.read()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\troID, ok := iRoID.(uint64)\n\t\tif !ok {\n\t\t\treturn errors.New(\"ro id should be int\")\n\t\t}\n\n\t\tro := c.remoteObjects[roID]\n\t\tif ro == nil {\n\t\t\treturn errors.New(\"ro not known\")\n\t\t}\n\n\t\tvar data []interface{}\n\t\tfor !msg.empty() {\n\t\t\tv, err := msg.read()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdata = append(data, v)\n\t\t}\n\n\t\terr = ro.receive(data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\nfunc (c *Connection) receiveRO(id uint64, name string) interface{} {\n\tvar ro RemoteObject\n\tfactory := registeredRemoteObjects[name]\n\tif factory != nil {\n\t\tro = factory(id, c)\n\t} else {\n\t\tro = NewBasicRO(id, c)\n\t}\n\tc.remoteObjects[id] = ro\n\treturn ro\n}\n<commit_msg>This commit allows univedo uri's w\/o trailing \/<commit_after>package univedo\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"strings\"\n\n\t\"code.google.com\/p\/go.net\/websocket\"\n\t\/\/ TODO remove\n\t_ \"fmt\"\n\t\"net\/url\"\n)\n\n\/\/ registeredRemoteObjects is a map from RO name to factory function\nvar registeredRemoteObjects = make(map[string]func(id uint64, s sender) RemoteObject)\n\n\/\/ RegisterRemoteObject adds a remote object factory for a RO name\nfunc RegisterRemoteObject(name string, factory func(id uint64, session sender) RemoteObject) {\n\tregisteredRemoteObjects[name] = factory\n}\n\n\/\/ A Connection with an univedo server\ntype Connection struct {\n\tws *websocket.Conn\n\turologin RemoteObject\n\tremoteObjects map[uint64]RemoteObject\n}\n\n\/\/ originForURL returns an origin matching the given URL\nfunc originForURL(s string) (string, error) {\n\tu, err := url.Parse(s)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\torigin := &url.URL{Scheme: \"http\", Host: u.Host}\n\treturn origin.String(), nil\n}\n\n\/\/ Dial opens a new connection with an univedo server\nfunc Dial(url string) (*Connection, error) {\n\t\/\/ Extract the origin from the URL\n\torigin, err := originForURL(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Dial the websocket\n\tif !strings.HasSuffix(url, \"\/\") {\n\t\turl += \"\/\"\n\t}\n\n\turl += \"v1\"\n\tws, err := websocket.Dial(url, \"\", origin)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := &Connection{ws: ws, remoteObjects: make(map[uint64]RemoteObject)}\n\tgo func() {\n\t\t\/\/ TODO error handling\n\t\terr := c.handleWebsocket()\n\t\t\/*\t\tfmt.Printf(\"%s\\n\", err.Error())*\/\n\t\t_ = err\n\t}()\n\n\t\/\/ Login\n\tc.urologin = NewBasicRO(0, c)\n\tc.remoteObjects[0] = c.urologin\n\n\treturn c, nil\n}\n\n\/\/ Close the connection\nfunc (c *Connection) Close() {\n\tc.ws.Close()\n}\n\n\/\/ GetSession connects to a bucket with credentials\nfunc (c *Connection) GetSession(bucket string, creds map[string]interface{}) (*Session, error) {\n\tiSession, err := c.urologin.CallROM(\"getSession\", bucket, creds)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsession, ok := iSession.(*Session)\n\tif !ok {\n\t\treturn nil, errors.New(\"getSession did not return a remote object\")\n\t}\n\treturn session, nil\n}\n\nfunc (c *Connection) sendMessage(data ...interface{}) error {\n\tm := &message{buffer: &bytes.Buffer{}}\n\tfor _, v := range data {\n\t\tm.send(v)\n\t}\n\treturn websocket.Message.Send(c.ws, m.buffer.Bytes())\n}\n\nfunc (c *Connection) handleWebsocket() error {\n\tfor {\n\t\tvar buffer []byte\n\t\terr := websocket.Message.Receive(c.ws, &buffer)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tmsg := &message{buffer: bytes.NewBuffer(buffer), createRO: c.receiveRO}\n\n\t\tiRoID, err := msg.read()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\troID, ok := iRoID.(uint64)\n\t\tif !ok {\n\t\t\treturn errors.New(\"ro id should be int\")\n\t\t}\n\n\t\tro := c.remoteObjects[roID]\n\t\tif ro == nil {\n\t\t\treturn errors.New(\"ro not known\")\n\t\t}\n\n\t\tvar data []interface{}\n\t\tfor !msg.empty() {\n\t\t\tv, err := msg.read()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdata = append(data, v)\n\t\t}\n\n\t\terr = ro.receive(data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\nfunc (c *Connection) receiveRO(id uint64, name string) interface{} {\n\tvar ro RemoteObject\n\tfactory := registeredRemoteObjects[name]\n\tif factory != nil {\n\t\tro = factory(id, c)\n\t} else {\n\t\tro = NewBasicRO(id, c)\n\t}\n\tc.remoteObjects[id] = ro\n\treturn ro\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build !ignore_autogenerated\n\n\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ This file was autogenerated by conversion-gen. Do not edit it manually!\n\npackage v1beta1\n\nimport (\n\tfederation \"k8s.io\/kubernetes\/federation\/apis\/federation\"\n\tapi \"k8s.io\/kubernetes\/pkg\/api\"\n\tv1 \"k8s.io\/kubernetes\/pkg\/api\/v1\"\n\tconversion \"k8s.io\/kubernetes\/pkg\/conversion\"\n\truntime \"k8s.io\/kubernetes\/pkg\/runtime\"\n)\n\nfunc init() {\n\tSchemeBuilder.Register(RegisterConversions)\n}\n\n\/\/ RegisterConversions adds conversion functions to the given scheme.\n\/\/ Public to allow building arbitrary schemes.\nfunc RegisterConversions(scheme *runtime.Scheme) error {\n\treturn scheme.AddGeneratedConversionFuncs(\n\t\tConvert_v1beta1_Cluster_To_federation_Cluster,\n\t\tConvert_federation_Cluster_To_v1beta1_Cluster,\n\t\tConvert_v1beta1_ClusterCondition_To_federation_ClusterCondition,\n\t\tConvert_federation_ClusterCondition_To_v1beta1_ClusterCondition,\n\t\tConvert_v1beta1_ClusterList_To_federation_ClusterList,\n\t\tConvert_federation_ClusterList_To_v1beta1_ClusterList,\n\t\tConvert_v1beta1_ClusterSpec_To_federation_ClusterSpec,\n\t\tConvert_federation_ClusterSpec_To_v1beta1_ClusterSpec,\n\t\tConvert_v1beta1_ClusterStatus_To_federation_ClusterStatus,\n\t\tConvert_federation_ClusterStatus_To_v1beta1_ClusterStatus,\n\t\tConvert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR,\n\t\tConvert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR,\n\t)\n}\n\nfunc autoConvert_v1beta1_Cluster_To_federation_Cluster(in *Cluster, out *federation.Cluster, s conversion.Scope) error {\n\tif err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {\n\t\treturn err\n\t}\n\t\/\/ TODO: Inefficient conversion - can we improve it?\n\tif err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {\n\t\treturn err\n\t}\n\tif err := Convert_v1beta1_ClusterSpec_To_federation_ClusterSpec(&in.Spec, &out.Spec, s); err != nil {\n\t\treturn err\n\t}\n\tif err := Convert_v1beta1_ClusterStatus_To_federation_ClusterStatus(&in.Status, &out.Status, s); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc Convert_v1beta1_Cluster_To_federation_Cluster(in *Cluster, out *federation.Cluster, s conversion.Scope) error {\n\treturn autoConvert_v1beta1_Cluster_To_federation_Cluster(in, out, s)\n}\n\nfunc autoConvert_federation_Cluster_To_v1beta1_Cluster(in *federation.Cluster, out *Cluster, s conversion.Scope) error {\n\tif err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {\n\t\treturn err\n\t}\n\t\/\/ TODO: Inefficient conversion - can we improve it?\n\tif err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {\n\t\treturn err\n\t}\n\tif err := Convert_federation_ClusterSpec_To_v1beta1_ClusterSpec(&in.Spec, &out.Spec, s); err != nil {\n\t\treturn err\n\t}\n\tif err := Convert_federation_ClusterStatus_To_v1beta1_ClusterStatus(&in.Status, &out.Status, s); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc Convert_federation_Cluster_To_v1beta1_Cluster(in *federation.Cluster, out *Cluster, s conversion.Scope) error {\n\treturn autoConvert_federation_Cluster_To_v1beta1_Cluster(in, out, s)\n}\n\nfunc autoConvert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in *ClusterCondition, out *federation.ClusterCondition, s conversion.Scope) error {\n\tout.Type = federation.ClusterConditionType(in.Type)\n\tout.Status = api.ConditionStatus(in.Status)\n\tif err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil {\n\t\treturn err\n\t}\n\tif err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil {\n\t\treturn err\n\t}\n\tout.Reason = in.Reason\n\tout.Message = in.Message\n\treturn nil\n}\n\nfunc Convert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in *ClusterCondition, out *federation.ClusterCondition, s conversion.Scope) error {\n\treturn autoConvert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in, out, s)\n}\n\nfunc autoConvert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in *federation.ClusterCondition, out *ClusterCondition, s conversion.Scope) error {\n\tout.Type = ClusterConditionType(in.Type)\n\tout.Status = v1.ConditionStatus(in.Status)\n\tif err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastProbeTime, &out.LastProbeTime, s); err != nil {\n\t\treturn err\n\t}\n\tif err := api.Convert_unversioned_Time_To_unversioned_Time(&in.LastTransitionTime, &out.LastTransitionTime, s); err != nil {\n\t\treturn err\n\t}\n\tout.Reason = in.Reason\n\tout.Message = in.Message\n\treturn nil\n}\n\nfunc Convert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in *federation.ClusterCondition, out *ClusterCondition, s conversion.Scope) error {\n\treturn autoConvert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in, out, s)\n}\n\nfunc autoConvert_v1beta1_ClusterList_To_federation_ClusterList(in *ClusterList, out *federation.ClusterList, s conversion.Scope) error {\n\tif err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {\n\t\treturn err\n\t}\n\tif err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {\n\t\treturn err\n\t}\n\tif in.Items != nil {\n\t\tin, out := &in.Items, &out.Items\n\t\t*out = make([]federation.Cluster, len(*in))\n\t\tfor i := range *in {\n\t\t\tif err := Convert_v1beta1_Cluster_To_federation_Cluster(&(*in)[i], &(*out)[i], s); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tout.Items = nil\n\t}\n\treturn nil\n}\n\nfunc Convert_v1beta1_ClusterList_To_federation_ClusterList(in *ClusterList, out *federation.ClusterList, s conversion.Scope) error {\n\treturn autoConvert_v1beta1_ClusterList_To_federation_ClusterList(in, out, s)\n}\n\nfunc autoConvert_federation_ClusterList_To_v1beta1_ClusterList(in *federation.ClusterList, out *ClusterList, s conversion.Scope) error {\n\tif err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {\n\t\treturn err\n\t}\n\tif err := api.Convert_unversioned_ListMeta_To_unversioned_ListMeta(&in.ListMeta, &out.ListMeta, s); err != nil {\n\t\treturn err\n\t}\n\tif in.Items != nil {\n\t\tin, out := &in.Items, &out.Items\n\t\t*out = make([]Cluster, len(*in))\n\t\tfor i := range *in {\n\t\t\tif err := Convert_federation_Cluster_To_v1beta1_Cluster(&(*in)[i], &(*out)[i], s); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tout.Items = nil\n\t}\n\treturn nil\n}\n\nfunc Convert_federation_ClusterList_To_v1beta1_ClusterList(in *federation.ClusterList, out *ClusterList, s conversion.Scope) error {\n\treturn autoConvert_federation_ClusterList_To_v1beta1_ClusterList(in, out, s)\n}\n\nfunc autoConvert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in *ClusterSpec, out *federation.ClusterSpec, s conversion.Scope) error {\n\tif in.ServerAddressByClientCIDRs != nil {\n\t\tin, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs\n\t\t*out = make([]federation.ServerAddressByClientCIDR, len(*in))\n\t\tfor i := range *in {\n\t\t\tif err := Convert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(&(*in)[i], &(*out)[i], s); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tout.ServerAddressByClientCIDRs = nil\n\t}\n\tif in.SecretRef != nil {\n\t\tin, out := &in.SecretRef, &out.SecretRef\n\t\t*out = new(api.LocalObjectReference)\n\t\t\/\/ TODO: Inefficient conversion - can we improve it?\n\t\tif err := s.Convert(*in, *out, 0); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tout.SecretRef = nil\n\t}\n\treturn nil\n}\n\nfunc Convert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in *ClusterSpec, out *federation.ClusterSpec, s conversion.Scope) error {\n\treturn autoConvert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in, out, s)\n}\n\nfunc autoConvert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in *federation.ClusterSpec, out *ClusterSpec, s conversion.Scope) error {\n\tif in.ServerAddressByClientCIDRs != nil {\n\t\tin, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs\n\t\t*out = make([]ServerAddressByClientCIDR, len(*in))\n\t\tfor i := range *in {\n\t\t\tif err := Convert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(&(*in)[i], &(*out)[i], s); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tout.ServerAddressByClientCIDRs = nil\n\t}\n\tif in.SecretRef != nil {\n\t\tin, out := &in.SecretRef, &out.SecretRef\n\t\t*out = new(v1.LocalObjectReference)\n\t\t\/\/ TODO: Inefficient conversion - can we improve it?\n\t\tif err := s.Convert(*in, *out, 0); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tout.SecretRef = nil\n\t}\n\treturn nil\n}\n\nfunc Convert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in *federation.ClusterSpec, out *ClusterSpec, s conversion.Scope) error {\n\treturn autoConvert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in, out, s)\n}\n\nfunc autoConvert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in *ClusterStatus, out *federation.ClusterStatus, s conversion.Scope) error {\n\tif in.Conditions != nil {\n\t\tin, out := &in.Conditions, &out.Conditions\n\t\t*out = make([]federation.ClusterCondition, len(*in))\n\t\tfor i := range *in {\n\t\t\tif err := Convert_v1beta1_ClusterCondition_To_federation_ClusterCondition(&(*in)[i], &(*out)[i], s); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tout.Conditions = nil\n\t}\n\tout.Zones = in.Zones\n\tout.Region = in.Region\n\treturn nil\n}\n\nfunc Convert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in *ClusterStatus, out *federation.ClusterStatus, s conversion.Scope) error {\n\treturn autoConvert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in, out, s)\n}\n\nfunc autoConvert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in *federation.ClusterStatus, out *ClusterStatus, s conversion.Scope) error {\n\tif in.Conditions != nil {\n\t\tin, out := &in.Conditions, &out.Conditions\n\t\t*out = make([]ClusterCondition, len(*in))\n\t\tfor i := range *in {\n\t\t\tif err := Convert_federation_ClusterCondition_To_v1beta1_ClusterCondition(&(*in)[i], &(*out)[i], s); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tout.Conditions = nil\n\t}\n\tout.Zones = in.Zones\n\tout.Region = in.Region\n\treturn nil\n}\n\nfunc Convert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in *federation.ClusterStatus, out *ClusterStatus, s conversion.Scope) error {\n\treturn autoConvert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in, out, s)\n}\n\nfunc autoConvert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in *ServerAddressByClientCIDR, out *federation.ServerAddressByClientCIDR, s conversion.Scope) error {\n\tout.ClientCIDR = in.ClientCIDR\n\tout.ServerAddress = in.ServerAddress\n\treturn nil\n}\n\nfunc Convert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in *ServerAddressByClientCIDR, out *federation.ServerAddressByClientCIDR, s conversion.Scope) error {\n\treturn autoConvert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in, out, s)\n}\n\nfunc autoConvert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in *federation.ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, s conversion.Scope) error {\n\tout.ClientCIDR = in.ClientCIDR\n\tout.ServerAddress = in.ServerAddress\n\treturn nil\n}\n\nfunc Convert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in *federation.ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, s conversion.Scope) error {\n\treturn autoConvert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in, out, s)\n}\n<commit_msg>generated: conversions<commit_after>\/\/ +build !ignore_autogenerated\n\n\/*\nCopyright 2016 The Kubernetes Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/\/ This file was autogenerated by conversion-gen. Do not edit it manually!\n\npackage v1beta1\n\nimport (\n\tfederation \"k8s.io\/kubernetes\/federation\/apis\/federation\"\n\tapi \"k8s.io\/kubernetes\/pkg\/api\"\n\tv1 \"k8s.io\/kubernetes\/pkg\/api\/v1\"\n\tconversion \"k8s.io\/kubernetes\/pkg\/conversion\"\n\truntime \"k8s.io\/kubernetes\/pkg\/runtime\"\n)\n\nfunc init() {\n\tSchemeBuilder.Register(RegisterConversions)\n}\n\n\/\/ RegisterConversions adds conversion functions to the given scheme.\n\/\/ Public to allow building arbitrary schemes.\nfunc RegisterConversions(scheme *runtime.Scheme) error {\n\treturn scheme.AddGeneratedConversionFuncs(\n\t\tConvert_v1beta1_Cluster_To_federation_Cluster,\n\t\tConvert_federation_Cluster_To_v1beta1_Cluster,\n\t\tConvert_v1beta1_ClusterCondition_To_federation_ClusterCondition,\n\t\tConvert_federation_ClusterCondition_To_v1beta1_ClusterCondition,\n\t\tConvert_v1beta1_ClusterList_To_federation_ClusterList,\n\t\tConvert_federation_ClusterList_To_v1beta1_ClusterList,\n\t\tConvert_v1beta1_ClusterSpec_To_federation_ClusterSpec,\n\t\tConvert_federation_ClusterSpec_To_v1beta1_ClusterSpec,\n\t\tConvert_v1beta1_ClusterStatus_To_federation_ClusterStatus,\n\t\tConvert_federation_ClusterStatus_To_v1beta1_ClusterStatus,\n\t\tConvert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR,\n\t\tConvert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR,\n\t)\n}\n\nfunc autoConvert_v1beta1_Cluster_To_federation_Cluster(in *Cluster, out *federation.Cluster, s conversion.Scope) error {\n\t\/\/ TODO: Inefficient conversion - can we improve it?\n\tif err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {\n\t\treturn err\n\t}\n\tif err := Convert_v1beta1_ClusterSpec_To_federation_ClusterSpec(&in.Spec, &out.Spec, s); err != nil {\n\t\treturn err\n\t}\n\tif err := Convert_v1beta1_ClusterStatus_To_federation_ClusterStatus(&in.Status, &out.Status, s); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc Convert_v1beta1_Cluster_To_federation_Cluster(in *Cluster, out *federation.Cluster, s conversion.Scope) error {\n\treturn autoConvert_v1beta1_Cluster_To_federation_Cluster(in, out, s)\n}\n\nfunc autoConvert_federation_Cluster_To_v1beta1_Cluster(in *federation.Cluster, out *Cluster, s conversion.Scope) error {\n\t\/\/ TODO: Inefficient conversion - can we improve it?\n\tif err := s.Convert(&in.ObjectMeta, &out.ObjectMeta, 0); err != nil {\n\t\treturn err\n\t}\n\tif err := Convert_federation_ClusterSpec_To_v1beta1_ClusterSpec(&in.Spec, &out.Spec, s); err != nil {\n\t\treturn err\n\t}\n\tif err := Convert_federation_ClusterStatus_To_v1beta1_ClusterStatus(&in.Status, &out.Status, s); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc Convert_federation_Cluster_To_v1beta1_Cluster(in *federation.Cluster, out *Cluster, s conversion.Scope) error {\n\treturn autoConvert_federation_Cluster_To_v1beta1_Cluster(in, out, s)\n}\n\nfunc autoConvert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in *ClusterCondition, out *federation.ClusterCondition, s conversion.Scope) error {\n\tout.Type = federation.ClusterConditionType(in.Type)\n\tout.Status = api.ConditionStatus(in.Status)\n\tout.LastProbeTime = in.LastProbeTime\n\tout.LastTransitionTime = in.LastTransitionTime\n\tout.Reason = in.Reason\n\tout.Message = in.Message\n\treturn nil\n}\n\nfunc Convert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in *ClusterCondition, out *federation.ClusterCondition, s conversion.Scope) error {\n\treturn autoConvert_v1beta1_ClusterCondition_To_federation_ClusterCondition(in, out, s)\n}\n\nfunc autoConvert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in *federation.ClusterCondition, out *ClusterCondition, s conversion.Scope) error {\n\tout.Type = ClusterConditionType(in.Type)\n\tout.Status = v1.ConditionStatus(in.Status)\n\tout.LastProbeTime = in.LastProbeTime\n\tout.LastTransitionTime = in.LastTransitionTime\n\tout.Reason = in.Reason\n\tout.Message = in.Message\n\treturn nil\n}\n\nfunc Convert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in *federation.ClusterCondition, out *ClusterCondition, s conversion.Scope) error {\n\treturn autoConvert_federation_ClusterCondition_To_v1beta1_ClusterCondition(in, out, s)\n}\n\nfunc autoConvert_v1beta1_ClusterList_To_federation_ClusterList(in *ClusterList, out *federation.ClusterList, s conversion.Scope) error {\n\tout.ListMeta = in.ListMeta\n\tif in.Items != nil {\n\t\tin, out := &in.Items, &out.Items\n\t\t*out = make([]federation.Cluster, len(*in))\n\t\tfor i := range *in {\n\t\t\tif err := Convert_v1beta1_Cluster_To_federation_Cluster(&(*in)[i], &(*out)[i], s); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tout.Items = nil\n\t}\n\treturn nil\n}\n\nfunc Convert_v1beta1_ClusterList_To_federation_ClusterList(in *ClusterList, out *federation.ClusterList, s conversion.Scope) error {\n\treturn autoConvert_v1beta1_ClusterList_To_federation_ClusterList(in, out, s)\n}\n\nfunc autoConvert_federation_ClusterList_To_v1beta1_ClusterList(in *federation.ClusterList, out *ClusterList, s conversion.Scope) error {\n\tout.ListMeta = in.ListMeta\n\tif in.Items != nil {\n\t\tin, out := &in.Items, &out.Items\n\t\t*out = make([]Cluster, len(*in))\n\t\tfor i := range *in {\n\t\t\tif err := Convert_federation_Cluster_To_v1beta1_Cluster(&(*in)[i], &(*out)[i], s); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tout.Items = nil\n\t}\n\treturn nil\n}\n\nfunc Convert_federation_ClusterList_To_v1beta1_ClusterList(in *federation.ClusterList, out *ClusterList, s conversion.Scope) error {\n\treturn autoConvert_federation_ClusterList_To_v1beta1_ClusterList(in, out, s)\n}\n\nfunc autoConvert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in *ClusterSpec, out *federation.ClusterSpec, s conversion.Scope) error {\n\tif in.ServerAddressByClientCIDRs != nil {\n\t\tin, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs\n\t\t*out = make([]federation.ServerAddressByClientCIDR, len(*in))\n\t\tfor i := range *in {\n\t\t\tif err := Convert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(&(*in)[i], &(*out)[i], s); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tout.ServerAddressByClientCIDRs = nil\n\t}\n\tif in.SecretRef != nil {\n\t\tin, out := &in.SecretRef, &out.SecretRef\n\t\t*out = new(api.LocalObjectReference)\n\t\t\/\/ TODO: Inefficient conversion - can we improve it?\n\t\tif err := s.Convert(*in, *out, 0); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tout.SecretRef = nil\n\t}\n\treturn nil\n}\n\nfunc Convert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in *ClusterSpec, out *federation.ClusterSpec, s conversion.Scope) error {\n\treturn autoConvert_v1beta1_ClusterSpec_To_federation_ClusterSpec(in, out, s)\n}\n\nfunc autoConvert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in *federation.ClusterSpec, out *ClusterSpec, s conversion.Scope) error {\n\tif in.ServerAddressByClientCIDRs != nil {\n\t\tin, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs\n\t\t*out = make([]ServerAddressByClientCIDR, len(*in))\n\t\tfor i := range *in {\n\t\t\tif err := Convert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(&(*in)[i], &(*out)[i], s); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tout.ServerAddressByClientCIDRs = nil\n\t}\n\tif in.SecretRef != nil {\n\t\tin, out := &in.SecretRef, &out.SecretRef\n\t\t*out = new(v1.LocalObjectReference)\n\t\t\/\/ TODO: Inefficient conversion - can we improve it?\n\t\tif err := s.Convert(*in, *out, 0); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tout.SecretRef = nil\n\t}\n\treturn nil\n}\n\nfunc Convert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in *federation.ClusterSpec, out *ClusterSpec, s conversion.Scope) error {\n\treturn autoConvert_federation_ClusterSpec_To_v1beta1_ClusterSpec(in, out, s)\n}\n\nfunc autoConvert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in *ClusterStatus, out *federation.ClusterStatus, s conversion.Scope) error {\n\tif in.Conditions != nil {\n\t\tin, out := &in.Conditions, &out.Conditions\n\t\t*out = make([]federation.ClusterCondition, len(*in))\n\t\tfor i := range *in {\n\t\t\tif err := Convert_v1beta1_ClusterCondition_To_federation_ClusterCondition(&(*in)[i], &(*out)[i], s); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tout.Conditions = nil\n\t}\n\tout.Zones = in.Zones\n\tout.Region = in.Region\n\treturn nil\n}\n\nfunc Convert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in *ClusterStatus, out *federation.ClusterStatus, s conversion.Scope) error {\n\treturn autoConvert_v1beta1_ClusterStatus_To_federation_ClusterStatus(in, out, s)\n}\n\nfunc autoConvert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in *federation.ClusterStatus, out *ClusterStatus, s conversion.Scope) error {\n\tif in.Conditions != nil {\n\t\tin, out := &in.Conditions, &out.Conditions\n\t\t*out = make([]ClusterCondition, len(*in))\n\t\tfor i := range *in {\n\t\t\tif err := Convert_federation_ClusterCondition_To_v1beta1_ClusterCondition(&(*in)[i], &(*out)[i], s); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t} else {\n\t\tout.Conditions = nil\n\t}\n\tout.Zones = in.Zones\n\tout.Region = in.Region\n\treturn nil\n}\n\nfunc Convert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in *federation.ClusterStatus, out *ClusterStatus, s conversion.Scope) error {\n\treturn autoConvert_federation_ClusterStatus_To_v1beta1_ClusterStatus(in, out, s)\n}\n\nfunc autoConvert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in *ServerAddressByClientCIDR, out *federation.ServerAddressByClientCIDR, s conversion.Scope) error {\n\tout.ClientCIDR = in.ClientCIDR\n\tout.ServerAddress = in.ServerAddress\n\treturn nil\n}\n\nfunc Convert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in *ServerAddressByClientCIDR, out *federation.ServerAddressByClientCIDR, s conversion.Scope) error {\n\treturn autoConvert_v1beta1_ServerAddressByClientCIDR_To_federation_ServerAddressByClientCIDR(in, out, s)\n}\n\nfunc autoConvert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in *federation.ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, s conversion.Scope) error {\n\tout.ClientCIDR = in.ClientCIDR\n\tout.ServerAddress = in.ServerAddress\n\treturn nil\n}\n\nfunc Convert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in *federation.ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, s conversion.Scope) error {\n\treturn autoConvert_federation_ServerAddressByClientCIDR_To_v1beta1_ServerAddressByClientCIDR(in, out, s)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage command\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/Godeps\/_workspace\/src\/github.com\/bgentry\/speakeasy\"\n\t\"github.com\/coreos\/etcd\/Godeps\/_workspace\/src\/github.com\/codegangsta\/cli\"\n\t\"github.com\/coreos\/etcd\/Godeps\/_workspace\/src\/golang.org\/x\/net\/context\"\n\t\"github.com\/coreos\/etcd\/client\"\n\t\"github.com\/coreos\/etcd\/pkg\/transport\"\n)\n\nvar (\n\tErrNoAvailSrc = errors.New(\"no available argument and stdin\")\n\n\t\/\/ the maximum amount of time a dial will wait for a connection to setup.\n\t\/\/ 30s is long enough for most of the network conditions.\n\tdefaultDialTimeout = 30 * time.Second\n)\n\n\/\/ trimsplit slices s into all substrings separated by sep and returns a\n\/\/ slice of the substrings between the separator with all leading and trailing\n\/\/ white space removed, as defined by Unicode.\nfunc trimsplit(s, sep string) []string {\n\traw := strings.Split(s, \",\")\n\ttrimmed := make([]string, 0)\n\tfor _, r := range raw {\n\t\ttrimmed = append(trimmed, strings.TrimSpace(r))\n\t}\n\treturn trimmed\n}\n\nfunc argOrStdin(args []string, stdin io.Reader, i int) (string, error) {\n\tif i < len(args) {\n\t\treturn args[i], nil\n\t}\n\tbytes, err := ioutil.ReadAll(stdin)\n\tif string(bytes) == \"\" || err != nil {\n\t\treturn \"\", ErrNoAvailSrc\n\t}\n\treturn string(bytes), nil\n}\n\nfunc getPeersFlagValue(c *cli.Context) []string {\n\tpeerstr := c.GlobalString(\"endpoint\")\n\n\tif peerstr == \"\" {\n\t\tpeerstr = os.Getenv(\"ETCDCTL_ENDPOINT\")\n\t}\n\n\tif peerstr == \"\" {\n\t\tpeerstr = c.GlobalString(\"peers\")\n\t}\n\n\tif peerstr == \"\" {\n\t\tpeerstr = os.Getenv(\"ETCDCTL_PEERS\")\n\t}\n\n\t\/\/ If we still don't have peers, use a default\n\tif peerstr == \"\" {\n\t\tpeerstr = \"http:\/\/127.0.0.1:4001,http:\/\/127.0.0.1:2379\"\n\t}\n\n\treturn strings.Split(peerstr, \",\")\n}\n\nfunc getDomainDiscoveryFlagValue(c *cli.Context) ([]string, error) {\n\tdomainstr := c.GlobalString(\"discovery-srv\")\n\n\t\/\/ Use an environment variable if nothing was supplied on the\n\t\/\/ command line\n\tif domainstr == \"\" {\n\t\tdomainstr = os.Getenv(\"ETCDCTL_DISCOVERY_SRV\")\n\t}\n\n\t\/\/ If we still don't have domain discovery, return nothing\n\tif domainstr == \"\" {\n\t\treturn []string{}, nil\n\t}\n\n\tdiscoverer := client.NewSRVDiscover()\n\teps, err := discoverer.Discover(domainstr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn eps, err\n}\n\nfunc getEndpoints(c *cli.Context) ([]string, error) {\n\teps, err := getDomainDiscoveryFlagValue(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ If domain discovery returns no endpoints, check peer flag\n\tif len(eps) == 0 {\n\t\teps = getPeersFlagValue(c)\n\t}\n\n\tfor i, ep := range eps {\n\t\tu, err := url.Parse(ep)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif u.Scheme == \"\" {\n\t\t\tu.Scheme = \"http\"\n\t\t}\n\n\t\teps[i] = u.String()\n\t}\n\n\treturn eps, nil\n}\n\nfunc getTransport(c *cli.Context) (*http.Transport, error) {\n\tcafile := c.GlobalString(\"ca-file\")\n\tcertfile := c.GlobalString(\"cert-file\")\n\tkeyfile := c.GlobalString(\"key-file\")\n\n\t\/\/ Use an environment variable if nothing was supplied on the\n\t\/\/ command line\n\tif cafile == \"\" {\n\t\tcafile = os.Getenv(\"ETCDCTL_CA_FILE\")\n\t}\n\tif certfile == \"\" {\n\t\tcertfile = os.Getenv(\"ETCDCTL_CERT_FILE\")\n\t}\n\tif keyfile == \"\" {\n\t\tkeyfile = os.Getenv(\"ETCDCTL_KEY_FILE\")\n\t}\n\n\ttls := transport.TLSInfo{\n\t\tCAFile: cafile,\n\t\tCertFile: certfile,\n\t\tKeyFile: keyfile,\n\t}\n\treturn transport.NewTransport(tls, defaultDialTimeout)\n}\n\nfunc getUsernamePasswordFromFlag(usernameFlag string) (username string, password string, err error) {\n\tcolon := strings.Index(usernameFlag, \":\")\n\tif colon == -1 {\n\t\tusername = usernameFlag\n\t\t\/\/ Prompt for the password.\n\t\tpassword, err = speakeasy.Ask(\"Password: \")\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t} else {\n\t\tusername = usernameFlag[:colon]\n\t\tpassword = usernameFlag[colon+1:]\n\t}\n\treturn username, password, nil\n}\n\nfunc mustNewKeyAPI(c *cli.Context) client.KeysAPI {\n\treturn client.NewKeysAPI(mustNewClient(c))\n}\n\nfunc mustNewMembersAPI(c *cli.Context) client.MembersAPI {\n\treturn client.NewMembersAPI(mustNewClient(c))\n}\n\nfunc mustNewClient(c *cli.Context) client.Client {\n\thc, err := newClient(c)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tdebug := c.GlobalBool(\"debug\")\n\tif debug {\n\t\tclient.EnablecURLDebug()\n\t}\n\n\tif !c.GlobalBool(\"no-sync\") {\n\t\tif debug {\n\t\t\tfmt.Fprintf(os.Stderr, \"start to sync cluster using endpoints(%s)\\n\", strings.Join(hc.Endpoints(), \",\"))\n\t\t}\n\t\tctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)\n\t\terr := hc.Sync(ctx)\n\t\tcancel()\n\t\tif err != nil {\n\t\t\tif err == client.ErrNoEndpoints {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"etcd cluster has no published client endpoints.\\n\")\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Try '--no-sync' if you want to access non-published client endpoints(%s).\\n\", strings.Join(hc.Endpoints(), \",\"))\n\t\t\t}\n\t\t\thandleError(ExitServerError, err)\n\t\t}\n\t\tif debug {\n\t\t\tfmt.Fprintf(os.Stderr, \"got endpoints(%s) after sync\\n\", strings.Join(hc.Endpoints(), \",\"))\n\t\t}\n\t}\n\n\tif debug {\n\t\tfmt.Fprintf(os.Stderr, \"Cluster-Endpoints: %s\\n\", strings.Join(hc.Endpoints(), \", \"))\n\t}\n\n\treturn hc\n}\n\nfunc mustNewClientNoSync(c *cli.Context) client.Client {\n\thc, err := newClient(c)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tif c.GlobalBool(\"debug\") {\n\t\tfmt.Fprintf(os.Stderr, \"Cluster-Endpoints: %s\\n\", strings.Join(hc.Endpoints(), \", \"))\n\t\tclient.EnablecURLDebug()\n\t}\n\n\treturn hc\n}\n\nfunc newClient(c *cli.Context) (client.Client, error) {\n\teps, err := getEndpoints(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttr, err := getTransport(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcfg := client.Config{\n\t\tTransport: tr,\n\t\tEndpoints: eps,\n\t\tHeaderTimeoutPerRequest: c.GlobalDuration(\"timeout\"),\n\t}\n\n\tuFlag := c.GlobalString(\"username\")\n\tif uFlag != \"\" {\n\t\tusername, password, err := getUsernamePasswordFromFlag(uFlag)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcfg.Username = username\n\t\tcfg.Password = password\n\t}\n\n\treturn client.New(cfg)\n}\n\nfunc contextWithTotalTimeout(c *cli.Context) (context.Context, context.CancelFunc) {\n\treturn context.WithTimeout(context.Background(), c.GlobalDuration(\"total-timeout\"))\n}\n<commit_msg>etcdctl: support basic operations with etcd 0.4.<commit_after>\/\/ Copyright 2015 CoreOS, Inc.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage command\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com\/coreos\/etcd\/Godeps\/_workspace\/src\/github.com\/bgentry\/speakeasy\"\n\t\"github.com\/coreos\/etcd\/Godeps\/_workspace\/src\/github.com\/codegangsta\/cli\"\n\t\"github.com\/coreos\/etcd\/Godeps\/_workspace\/src\/golang.org\/x\/net\/context\"\n\t\"github.com\/coreos\/etcd\/client\"\n\t\"github.com\/coreos\/etcd\/pkg\/transport\"\n)\n\nvar (\n\tErrNoAvailSrc = errors.New(\"no available argument and stdin\")\n\n\t\/\/ the maximum amount of time a dial will wait for a connection to setup.\n\t\/\/ 30s is long enough for most of the network conditions.\n\tdefaultDialTimeout = 30 * time.Second\n)\n\n\/\/ trimsplit slices s into all substrings separated by sep and returns a\n\/\/ slice of the substrings between the separator with all leading and trailing\n\/\/ white space removed, as defined by Unicode.\nfunc trimsplit(s, sep string) []string {\n\traw := strings.Split(s, \",\")\n\ttrimmed := make([]string, 0)\n\tfor _, r := range raw {\n\t\ttrimmed = append(trimmed, strings.TrimSpace(r))\n\t}\n\treturn trimmed\n}\n\nfunc argOrStdin(args []string, stdin io.Reader, i int) (string, error) {\n\tif i < len(args) {\n\t\treturn args[i], nil\n\t}\n\tbytes, err := ioutil.ReadAll(stdin)\n\tif string(bytes) == \"\" || err != nil {\n\t\treturn \"\", ErrNoAvailSrc\n\t}\n\treturn string(bytes), nil\n}\n\nfunc getPeersFlagValue(c *cli.Context) []string {\n\tpeerstr := c.GlobalString(\"endpoint\")\n\n\tif peerstr == \"\" {\n\t\tpeerstr = os.Getenv(\"ETCDCTL_ENDPOINT\")\n\t}\n\n\tif peerstr == \"\" {\n\t\tpeerstr = c.GlobalString(\"peers\")\n\t}\n\n\tif peerstr == \"\" {\n\t\tpeerstr = os.Getenv(\"ETCDCTL_PEERS\")\n\t}\n\n\t\/\/ If we still don't have peers, use a default\n\tif peerstr == \"\" {\n\t\tpeerstr = \"http:\/\/127.0.0.1:4001,http:\/\/127.0.0.1:2379\"\n\t}\n\n\treturn strings.Split(peerstr, \",\")\n}\n\nfunc getDomainDiscoveryFlagValue(c *cli.Context) ([]string, error) {\n\tdomainstr := c.GlobalString(\"discovery-srv\")\n\n\t\/\/ Use an environment variable if nothing was supplied on the\n\t\/\/ command line\n\tif domainstr == \"\" {\n\t\tdomainstr = os.Getenv(\"ETCDCTL_DISCOVERY_SRV\")\n\t}\n\n\t\/\/ If we still don't have domain discovery, return nothing\n\tif domainstr == \"\" {\n\t\treturn []string{}, nil\n\t}\n\n\tdiscoverer := client.NewSRVDiscover()\n\teps, err := discoverer.Discover(domainstr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn eps, err\n}\n\nfunc getEndpoints(c *cli.Context) ([]string, error) {\n\teps, err := getDomainDiscoveryFlagValue(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ If domain discovery returns no endpoints, check peer flag\n\tif len(eps) == 0 {\n\t\teps = getPeersFlagValue(c)\n\t}\n\n\tfor i, ep := range eps {\n\t\tu, err := url.Parse(ep)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif u.Scheme == \"\" {\n\t\t\tu.Scheme = \"http\"\n\t\t}\n\n\t\teps[i] = u.String()\n\t}\n\n\treturn eps, nil\n}\n\nfunc getTransport(c *cli.Context) (*http.Transport, error) {\n\tcafile := c.GlobalString(\"ca-file\")\n\tcertfile := c.GlobalString(\"cert-file\")\n\tkeyfile := c.GlobalString(\"key-file\")\n\n\t\/\/ Use an environment variable if nothing was supplied on the\n\t\/\/ command line\n\tif cafile == \"\" {\n\t\tcafile = os.Getenv(\"ETCDCTL_CA_FILE\")\n\t}\n\tif certfile == \"\" {\n\t\tcertfile = os.Getenv(\"ETCDCTL_CERT_FILE\")\n\t}\n\tif keyfile == \"\" {\n\t\tkeyfile = os.Getenv(\"ETCDCTL_KEY_FILE\")\n\t}\n\n\ttls := transport.TLSInfo{\n\t\tCAFile: cafile,\n\t\tCertFile: certfile,\n\t\tKeyFile: keyfile,\n\t}\n\treturn transport.NewTransport(tls, defaultDialTimeout)\n}\n\nfunc getUsernamePasswordFromFlag(usernameFlag string) (username string, password string, err error) {\n\tcolon := strings.Index(usernameFlag, \":\")\n\tif colon == -1 {\n\t\tusername = usernameFlag\n\t\t\/\/ Prompt for the password.\n\t\tpassword, err = speakeasy.Ask(\"Password: \")\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t} else {\n\t\tusername = usernameFlag[:colon]\n\t\tpassword = usernameFlag[colon+1:]\n\t}\n\treturn username, password, nil\n}\n\nfunc mustNewKeyAPI(c *cli.Context) client.KeysAPI {\n\treturn client.NewKeysAPI(mustNewClient(c))\n}\n\nfunc mustNewMembersAPI(c *cli.Context) client.MembersAPI {\n\treturn client.NewMembersAPI(mustNewClient(c))\n}\n\nfunc mustNewClient(c *cli.Context) client.Client {\n\thc, err := newClient(c)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tdebug := c.GlobalBool(\"debug\")\n\tif debug {\n\t\tclient.EnablecURLDebug()\n\t}\n\n\tif !c.GlobalBool(\"no-sync\") {\n\t\tif debug {\n\t\t\tfmt.Fprintf(os.Stderr, \"start to sync cluster using endpoints(%s)\\n\", strings.Join(hc.Endpoints(), \",\"))\n\t\t}\n\t\tctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)\n\t\terr := hc.Sync(ctx)\n\t\tcancel()\n\t\tif err != nil {\n\t\t\tif err == client.ErrNoEndpoints {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"etcd cluster has no published client endpoints.\\n\")\n\t\t\t\tfmt.Fprintf(os.Stderr, \"Try '--no-sync' if you want to access non-published client endpoints(%s).\\n\", strings.Join(hc.Endpoints(), \",\"))\n\t\t\t\thandleError(ExitServerError, err)\n\t\t\t}\n\n\t\t\t\/\/ fail-back to try sync cluster with peer API. this is for making etcdctl work with etcd 0.4.x.\n\t\t\t\/\/ TODO: remove this when we deprecate the support for etcd 0.4.\n\t\t\teps, serr := syncWithPeerAPI(c, ctx, hc.Endpoints())\n\t\t\tif serr != nil {\n\t\t\t\thandleError(ExitServerError, serr)\n\t\t\t}\n\t\t\terr = hc.SetEndpoints(eps)\n\t\t\tif err != nil {\n\t\t\t\thandleError(ExitServerError, err)\n\t\t\t}\n\t\t}\n\t\tif debug {\n\t\t\tfmt.Fprintf(os.Stderr, \"got endpoints(%s) after sync\\n\", strings.Join(hc.Endpoints(), \",\"))\n\t\t}\n\t}\n\n\tif debug {\n\t\tfmt.Fprintf(os.Stderr, \"Cluster-Endpoints: %s\\n\", strings.Join(hc.Endpoints(), \", \"))\n\t}\n\n\treturn hc\n}\n\nfunc mustNewClientNoSync(c *cli.Context) client.Client {\n\thc, err := newClient(c)\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\tos.Exit(1)\n\t}\n\n\tif c.GlobalBool(\"debug\") {\n\t\tfmt.Fprintf(os.Stderr, \"Cluster-Endpoints: %s\\n\", strings.Join(hc.Endpoints(), \", \"))\n\t\tclient.EnablecURLDebug()\n\t}\n\n\treturn hc\n}\n\nfunc newClient(c *cli.Context) (client.Client, error) {\n\teps, err := getEndpoints(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttr, err := getTransport(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcfg := client.Config{\n\t\tTransport: tr,\n\t\tEndpoints: eps,\n\t\tHeaderTimeoutPerRequest: c.GlobalDuration(\"timeout\"),\n\t}\n\n\tuFlag := c.GlobalString(\"username\")\n\tif uFlag != \"\" {\n\t\tusername, password, err := getUsernamePasswordFromFlag(uFlag)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcfg.Username = username\n\t\tcfg.Password = password\n\t}\n\n\treturn client.New(cfg)\n}\n\nfunc contextWithTotalTimeout(c *cli.Context) (context.Context, context.CancelFunc) {\n\treturn context.WithTimeout(context.Background(), c.GlobalDuration(\"total-timeout\"))\n}\n\n\/\/ syncWithPeerAPI syncs cluster with peer API defined at\n\/\/ https:\/\/github.com\/coreos\/etcd\/blob\/v0.4.9\/server\/server.go#L311.\n\/\/ This exists for backward compatibility with etcd 0.4.x.\nfunc syncWithPeerAPI(c *cli.Context, ctx context.Context, knownPeers []string) ([]string, error) {\n\ttr, err := getTransport(c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar (\n\t\tbody []byte\n\t\tresp *http.Response\n\t)\n\tfor _, p := range knownPeers {\n\t\tvar req *http.Request\n\t\treq, err = http.NewRequest(\"GET\", p+\"\/v2\/peers\", nil)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tresp, err = tr.RoundTrip(req)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\tresp.Body.Close()\n\t\t\tcontinue\n\t\t}\n\t\tbody, err = ioutil.ReadAll(resp.Body)\n\t\tresp.Body.Close()\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t\/\/ Parse the peers API format: https:\/\/github.com\/coreos\/etcd\/blob\/v0.4.9\/server\/server.go#L311\n\treturn strings.Split(string(body), \", \"), nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/go:build !js\n\/\/ +build !js\n\npackage webrtc\n\nimport (\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/pion\/rtp\"\n\t\"github.com\/pion\/webrtc\/v3\/internal\/util\"\n\t\"github.com\/pion\/webrtc\/v3\/pkg\/media\"\n)\n\n\/\/ trackBinding is a single bind for a Track\n\/\/ Bind can be called multiple times, this stores the\n\/\/ result for a single bind call so that it can be used when writing\ntype trackBinding struct {\n\tid string\n\tssrc SSRC\n\tpayloadType PayloadType\n\twriteStream TrackLocalWriter\n}\n\n\/\/ TrackLocalStaticRTP is a TrackLocal that has a pre-set codec and accepts RTP Packets.\n\/\/ If you wish to send a media.Sample use TrackLocalStaticSample\ntype TrackLocalStaticRTP struct {\n\tmu sync.RWMutex\n\tbindings []trackBinding\n\tcodec RTPCodecCapability\n\tid, rid, streamID string\n}\n\n\/\/ NewTrackLocalStaticRTP returns a TrackLocalStaticRTP.\nfunc NewTrackLocalStaticRTP(c RTPCodecCapability, id, streamID string, options ...func(*TrackLocalStaticRTP)) (*TrackLocalStaticRTP, error) {\n\tt := &TrackLocalStaticRTP{\n\t\tcodec: c,\n\t\tbindings: []trackBinding{},\n\t\tid: id,\n\t\tstreamID: streamID,\n\t}\n\n\tfor _, option := range options {\n\t\toption(t)\n\t}\n\n\treturn t, nil\n}\n\n\/\/ WithRTPStreamID sets the RTP stream ID for this TrackLocalStaticRTP.\nfunc WithRTPStreamID(rid string) func(*TrackLocalStaticRTP) {\n\treturn func(t *TrackLocalStaticRTP) {\n\t\tt.rid = rid\n\t}\n}\n\n\/\/ Bind is called by the PeerConnection after negotiation is complete\n\/\/ This asserts that the code requested is supported by the remote peer.\n\/\/ If so it setups all the state (SSRC and PayloadType) to have a call\nfunc (s *TrackLocalStaticRTP) Bind(t TrackLocalContext) (RTPCodecParameters, error) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tparameters := RTPCodecParameters{RTPCodecCapability: s.codec}\n\tif codec, matchType := codecParametersFuzzySearch(parameters, t.CodecParameters()); matchType != codecMatchNone {\n\t\ts.bindings = append(s.bindings, trackBinding{\n\t\t\tssrc: t.SSRC(),\n\t\t\tpayloadType: codec.PayloadType,\n\t\t\twriteStream: t.WriteStream(),\n\t\t\tid: t.ID(),\n\t\t})\n\t\treturn codec, nil\n\t}\n\n\treturn RTPCodecParameters{}, ErrUnsupportedCodec\n}\n\n\/\/ Unbind implements the teardown logic when the track is no longer needed. This happens\n\/\/ because a track has been stopped.\nfunc (s *TrackLocalStaticRTP) Unbind(t TrackLocalContext) error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tfor i := range s.bindings {\n\t\tif s.bindings[i].id == t.ID() {\n\t\t\ts.bindings[i] = s.bindings[len(s.bindings)-1]\n\t\t\ts.bindings = s.bindings[:len(s.bindings)-1]\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn ErrUnbindFailed\n}\n\n\/\/ ID is the unique identifier for this Track. This should be unique for the\n\/\/ stream, but doesn't have to globally unique. A common example would be 'audio' or 'video'\n\/\/ and StreamID would be 'desktop' or 'webcam'\nfunc (s *TrackLocalStaticRTP) ID() string { return s.id }\n\n\/\/ StreamID is the group this track belongs too. This must be unique\nfunc (s *TrackLocalStaticRTP) StreamID() string { return s.streamID }\n\n\/\/ RID is the RTP stream identifier.\nfunc (s *TrackLocalStaticRTP) RID() string { return s.rid }\n\n\/\/ Kind controls if this TrackLocal is audio or video\nfunc (s *TrackLocalStaticRTP) Kind() RTPCodecType {\n\tswitch {\n\tcase strings.HasPrefix(s.codec.MimeType, \"audio\/\"):\n\t\treturn RTPCodecTypeAudio\n\tcase strings.HasPrefix(s.codec.MimeType, \"video\/\"):\n\t\treturn RTPCodecTypeVideo\n\tdefault:\n\t\treturn RTPCodecType(0)\n\t}\n}\n\n\/\/ Codec gets the Codec of the track\nfunc (s *TrackLocalStaticRTP) Codec() RTPCodecCapability {\n\treturn s.codec\n}\n\n\/\/ packetPool is a pool of packets used by WriteRTP and Write below\n\/\/ nolint:gochecknoglobals\nvar rtpPacketPool = sync.Pool{\n\tNew: func() interface{} {\n\t\treturn &rtp.Packet{}\n\t},\n}\n\n\/\/ WriteRTP writes a RTP Packet to the TrackLocalStaticRTP\n\/\/ If one PeerConnection fails the packets will still be sent to\n\/\/ all PeerConnections. The error message will contain the ID of the failed\n\/\/ PeerConnections so you can remove them\nfunc (s *TrackLocalStaticRTP) WriteRTP(p *rtp.Packet) error {\n\tipacket := rtpPacketPool.Get()\n\tpacket := ipacket.(*rtp.Packet)\n\tdefer func() {\n\t\t*packet = rtp.Packet{}\n\t\trtpPacketPool.Put(ipacket)\n\t}()\n\t*packet = *p\n\treturn s.writeRTP(packet)\n}\n\n\/\/ writeRTP is like WriteRTP, except that it may modify the packet p\nfunc (s *TrackLocalStaticRTP) writeRTP(p *rtp.Packet) error {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\n\twriteErrs := []error{}\n\n\tfor _, b := range s.bindings {\n\t\tp.Header.SSRC = uint32(b.ssrc)\n\t\tp.Header.PayloadType = uint8(b.payloadType)\n\t\tif _, err := b.writeStream.WriteRTP(&p.Header, p.Payload); err != nil {\n\t\t\twriteErrs = append(writeErrs, err)\n\t\t}\n\t}\n\n\treturn util.FlattenErrs(writeErrs)\n}\n\n\/\/ Write writes a RTP Packet as a buffer to the TrackLocalStaticRTP\n\/\/ If one PeerConnection fails the packets will still be sent to\n\/\/ all PeerConnections. The error message will contain the ID of the failed\n\/\/ PeerConnections so you can remove them\nfunc (s *TrackLocalStaticRTP) Write(b []byte) (n int, err error) {\n\tipacket := rtpPacketPool.Get()\n\tpacket := ipacket.(*rtp.Packet)\n\tdefer func() {\n\t\t*packet = rtp.Packet{}\n\t\trtpPacketPool.Put(ipacket)\n\t}()\n\n\tif err = packet.Unmarshal(b); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn len(b), s.writeRTP(packet)\n}\n\n\/\/ TrackLocalStaticSample is a TrackLocal that has a pre-set codec and accepts Samples.\n\/\/ If you wish to send a RTP Packet use TrackLocalStaticRTP\ntype TrackLocalStaticSample struct {\n\tpacketizer rtp.Packetizer\n\tsequencer rtp.Sequencer\n\trtpTrack *TrackLocalStaticRTP\n\tclockRate float64\n}\n\n\/\/ NewTrackLocalStaticSample returns a TrackLocalStaticSample\nfunc NewTrackLocalStaticSample(c RTPCodecCapability, id, streamID string, options ...func(*TrackLocalStaticRTP)) (*TrackLocalStaticSample, error) {\n\trtpTrack, err := NewTrackLocalStaticRTP(c, id, streamID, options...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &TrackLocalStaticSample{\n\t\trtpTrack: rtpTrack,\n\t}, nil\n}\n\n\/\/ ID is the unique identifier for this Track. This should be unique for the\n\/\/ stream, but doesn't have to globally unique. A common example would be 'audio' or 'video'\n\/\/ and StreamID would be 'desktop' or 'webcam'\nfunc (s *TrackLocalStaticSample) ID() string { return s.rtpTrack.ID() }\n\n\/\/ StreamID is the group this track belongs too. This must be unique\nfunc (s *TrackLocalStaticSample) StreamID() string { return s.rtpTrack.StreamID() }\n\n\/\/ RID is the RTP stream identifier.\nfunc (s *TrackLocalStaticSample) RID() string { return s.rtpTrack.RID() }\n\n\/\/ Kind controls if this TrackLocal is audio or video\nfunc (s *TrackLocalStaticSample) Kind() RTPCodecType { return s.rtpTrack.Kind() }\n\n\/\/ Codec gets the Codec of the track\nfunc (s *TrackLocalStaticSample) Codec() RTPCodecCapability {\n\treturn s.rtpTrack.Codec()\n}\n\n\/\/ Bind is called by the PeerConnection after negotiation is complete\n\/\/ This asserts that the code requested is supported by the remote peer.\n\/\/ If so it setups all the state (SSRC and PayloadType) to have a call\nfunc (s *TrackLocalStaticSample) Bind(t TrackLocalContext) (RTPCodecParameters, error) {\n\tcodec, err := s.rtpTrack.Bind(t)\n\tif err != nil {\n\t\treturn codec, err\n\t}\n\n\ts.rtpTrack.mu.Lock()\n\tdefer s.rtpTrack.mu.Unlock()\n\n\t\/\/ We only need one packetizer\n\tif s.packetizer != nil {\n\t\treturn codec, nil\n\t}\n\n\tpayloader, err := payloaderForCodec(codec.RTPCodecCapability)\n\tif err != nil {\n\t\treturn codec, err\n\t}\n\n\ts.sequencer = rtp.NewRandomSequencer()\n\ts.packetizer = rtp.NewPacketizer(\n\t\trtpOutboundMTU,\n\t\t0, \/\/ Value is handled when writing\n\t\t0, \/\/ Value is handled when writing\n\t\tpayloader,\n\t\ts.sequencer,\n\t\tcodec.ClockRate,\n\t)\n\ts.clockRate = float64(codec.RTPCodecCapability.ClockRate)\n\treturn codec, nil\n}\n\n\/\/ Unbind implements the teardown logic when the track is no longer needed. This happens\n\/\/ because a track has been stopped.\nfunc (s *TrackLocalStaticSample) Unbind(t TrackLocalContext) error {\n\treturn s.rtpTrack.Unbind(t)\n}\n\n\/\/ WriteSample writes a Sample to the TrackLocalStaticSample\n\/\/ If one PeerConnection fails the packets will still be sent to\n\/\/ all PeerConnections. The error message will contain the ID of the failed\n\/\/ PeerConnections so you can remove them\nfunc (s *TrackLocalStaticSample) WriteSample(sample media.Sample) error {\n\ts.rtpTrack.mu.RLock()\n\tp := s.packetizer\n\tclockRate := s.clockRate\n\ts.rtpTrack.mu.RUnlock()\n\n\tif p == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ skip packets by the number of previously dropped packets\n\tfor i := uint16(0); i < sample.PrevDroppedPackets; i++ {\n\t\ts.sequencer.NextSequenceNumber()\n\t}\n\n\tsamples := uint32(sample.Duration.Seconds() * clockRate)\n\tif sample.PrevDroppedPackets > 0 {\n\t\tp.(rtp.Packetizer).SkipSamples(samples * uint32(sample.PrevDroppedPackets))\n\t}\n\tpackets := p.(rtp.Packetizer).Packetize(sample.Data, samples)\n\n\twriteErrs := []error{}\n\tfor _, p := range packets {\n\t\tif err := s.rtpTrack.WriteRTP(p); err != nil {\n\t\t\twriteErrs = append(writeErrs, err)\n\t\t}\n\t}\n\n\treturn util.FlattenErrs(writeErrs)\n}\n<commit_msg>Remove unnecessary type assertion<commit_after>\/\/go:build !js\n\/\/ +build !js\n\npackage webrtc\n\nimport (\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com\/pion\/rtp\"\n\t\"github.com\/pion\/webrtc\/v3\/internal\/util\"\n\t\"github.com\/pion\/webrtc\/v3\/pkg\/media\"\n)\n\n\/\/ trackBinding is a single bind for a Track\n\/\/ Bind can be called multiple times, this stores the\n\/\/ result for a single bind call so that it can be used when writing\ntype trackBinding struct {\n\tid string\n\tssrc SSRC\n\tpayloadType PayloadType\n\twriteStream TrackLocalWriter\n}\n\n\/\/ TrackLocalStaticRTP is a TrackLocal that has a pre-set codec and accepts RTP Packets.\n\/\/ If you wish to send a media.Sample use TrackLocalStaticSample\ntype TrackLocalStaticRTP struct {\n\tmu sync.RWMutex\n\tbindings []trackBinding\n\tcodec RTPCodecCapability\n\tid, rid, streamID string\n}\n\n\/\/ NewTrackLocalStaticRTP returns a TrackLocalStaticRTP.\nfunc NewTrackLocalStaticRTP(c RTPCodecCapability, id, streamID string, options ...func(*TrackLocalStaticRTP)) (*TrackLocalStaticRTP, error) {\n\tt := &TrackLocalStaticRTP{\n\t\tcodec: c,\n\t\tbindings: []trackBinding{},\n\t\tid: id,\n\t\tstreamID: streamID,\n\t}\n\n\tfor _, option := range options {\n\t\toption(t)\n\t}\n\n\treturn t, nil\n}\n\n\/\/ WithRTPStreamID sets the RTP stream ID for this TrackLocalStaticRTP.\nfunc WithRTPStreamID(rid string) func(*TrackLocalStaticRTP) {\n\treturn func(t *TrackLocalStaticRTP) {\n\t\tt.rid = rid\n\t}\n}\n\n\/\/ Bind is called by the PeerConnection after negotiation is complete\n\/\/ This asserts that the code requested is supported by the remote peer.\n\/\/ If so it setups all the state (SSRC and PayloadType) to have a call\nfunc (s *TrackLocalStaticRTP) Bind(t TrackLocalContext) (RTPCodecParameters, error) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tparameters := RTPCodecParameters{RTPCodecCapability: s.codec}\n\tif codec, matchType := codecParametersFuzzySearch(parameters, t.CodecParameters()); matchType != codecMatchNone {\n\t\ts.bindings = append(s.bindings, trackBinding{\n\t\t\tssrc: t.SSRC(),\n\t\t\tpayloadType: codec.PayloadType,\n\t\t\twriteStream: t.WriteStream(),\n\t\t\tid: t.ID(),\n\t\t})\n\t\treturn codec, nil\n\t}\n\n\treturn RTPCodecParameters{}, ErrUnsupportedCodec\n}\n\n\/\/ Unbind implements the teardown logic when the track is no longer needed. This happens\n\/\/ because a track has been stopped.\nfunc (s *TrackLocalStaticRTP) Unbind(t TrackLocalContext) error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tfor i := range s.bindings {\n\t\tif s.bindings[i].id == t.ID() {\n\t\t\ts.bindings[i] = s.bindings[len(s.bindings)-1]\n\t\t\ts.bindings = s.bindings[:len(s.bindings)-1]\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn ErrUnbindFailed\n}\n\n\/\/ ID is the unique identifier for this Track. This should be unique for the\n\/\/ stream, but doesn't have to globally unique. A common example would be 'audio' or 'video'\n\/\/ and StreamID would be 'desktop' or 'webcam'\nfunc (s *TrackLocalStaticRTP) ID() string { return s.id }\n\n\/\/ StreamID is the group this track belongs too. This must be unique\nfunc (s *TrackLocalStaticRTP) StreamID() string { return s.streamID }\n\n\/\/ RID is the RTP stream identifier.\nfunc (s *TrackLocalStaticRTP) RID() string { return s.rid }\n\n\/\/ Kind controls if this TrackLocal is audio or video\nfunc (s *TrackLocalStaticRTP) Kind() RTPCodecType {\n\tswitch {\n\tcase strings.HasPrefix(s.codec.MimeType, \"audio\/\"):\n\t\treturn RTPCodecTypeAudio\n\tcase strings.HasPrefix(s.codec.MimeType, \"video\/\"):\n\t\treturn RTPCodecTypeVideo\n\tdefault:\n\t\treturn RTPCodecType(0)\n\t}\n}\n\n\/\/ Codec gets the Codec of the track\nfunc (s *TrackLocalStaticRTP) Codec() RTPCodecCapability {\n\treturn s.codec\n}\n\n\/\/ packetPool is a pool of packets used by WriteRTP and Write below\n\/\/ nolint:gochecknoglobals\nvar rtpPacketPool = sync.Pool{\n\tNew: func() interface{} {\n\t\treturn &rtp.Packet{}\n\t},\n}\n\n\/\/ WriteRTP writes a RTP Packet to the TrackLocalStaticRTP\n\/\/ If one PeerConnection fails the packets will still be sent to\n\/\/ all PeerConnections. The error message will contain the ID of the failed\n\/\/ PeerConnections so you can remove them\nfunc (s *TrackLocalStaticRTP) WriteRTP(p *rtp.Packet) error {\n\tipacket := rtpPacketPool.Get()\n\tpacket := ipacket.(*rtp.Packet)\n\tdefer func() {\n\t\t*packet = rtp.Packet{}\n\t\trtpPacketPool.Put(ipacket)\n\t}()\n\t*packet = *p\n\treturn s.writeRTP(packet)\n}\n\n\/\/ writeRTP is like WriteRTP, except that it may modify the packet p\nfunc (s *TrackLocalStaticRTP) writeRTP(p *rtp.Packet) error {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\n\twriteErrs := []error{}\n\n\tfor _, b := range s.bindings {\n\t\tp.Header.SSRC = uint32(b.ssrc)\n\t\tp.Header.PayloadType = uint8(b.payloadType)\n\t\tif _, err := b.writeStream.WriteRTP(&p.Header, p.Payload); err != nil {\n\t\t\twriteErrs = append(writeErrs, err)\n\t\t}\n\t}\n\n\treturn util.FlattenErrs(writeErrs)\n}\n\n\/\/ Write writes a RTP Packet as a buffer to the TrackLocalStaticRTP\n\/\/ If one PeerConnection fails the packets will still be sent to\n\/\/ all PeerConnections. The error message will contain the ID of the failed\n\/\/ PeerConnections so you can remove them\nfunc (s *TrackLocalStaticRTP) Write(b []byte) (n int, err error) {\n\tipacket := rtpPacketPool.Get()\n\tpacket := ipacket.(*rtp.Packet)\n\tdefer func() {\n\t\t*packet = rtp.Packet{}\n\t\trtpPacketPool.Put(ipacket)\n\t}()\n\n\tif err = packet.Unmarshal(b); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn len(b), s.writeRTP(packet)\n}\n\n\/\/ TrackLocalStaticSample is a TrackLocal that has a pre-set codec and accepts Samples.\n\/\/ If you wish to send a RTP Packet use TrackLocalStaticRTP\ntype TrackLocalStaticSample struct {\n\tpacketizer rtp.Packetizer\n\tsequencer rtp.Sequencer\n\trtpTrack *TrackLocalStaticRTP\n\tclockRate float64\n}\n\n\/\/ NewTrackLocalStaticSample returns a TrackLocalStaticSample\nfunc NewTrackLocalStaticSample(c RTPCodecCapability, id, streamID string, options ...func(*TrackLocalStaticRTP)) (*TrackLocalStaticSample, error) {\n\trtpTrack, err := NewTrackLocalStaticRTP(c, id, streamID, options...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &TrackLocalStaticSample{\n\t\trtpTrack: rtpTrack,\n\t}, nil\n}\n\n\/\/ ID is the unique identifier for this Track. This should be unique for the\n\/\/ stream, but doesn't have to globally unique. A common example would be 'audio' or 'video'\n\/\/ and StreamID would be 'desktop' or 'webcam'\nfunc (s *TrackLocalStaticSample) ID() string { return s.rtpTrack.ID() }\n\n\/\/ StreamID is the group this track belongs too. This must be unique\nfunc (s *TrackLocalStaticSample) StreamID() string { return s.rtpTrack.StreamID() }\n\n\/\/ RID is the RTP stream identifier.\nfunc (s *TrackLocalStaticSample) RID() string { return s.rtpTrack.RID() }\n\n\/\/ Kind controls if this TrackLocal is audio or video\nfunc (s *TrackLocalStaticSample) Kind() RTPCodecType { return s.rtpTrack.Kind() }\n\n\/\/ Codec gets the Codec of the track\nfunc (s *TrackLocalStaticSample) Codec() RTPCodecCapability {\n\treturn s.rtpTrack.Codec()\n}\n\n\/\/ Bind is called by the PeerConnection after negotiation is complete\n\/\/ This asserts that the code requested is supported by the remote peer.\n\/\/ If so it setups all the state (SSRC and PayloadType) to have a call\nfunc (s *TrackLocalStaticSample) Bind(t TrackLocalContext) (RTPCodecParameters, error) {\n\tcodec, err := s.rtpTrack.Bind(t)\n\tif err != nil {\n\t\treturn codec, err\n\t}\n\n\ts.rtpTrack.mu.Lock()\n\tdefer s.rtpTrack.mu.Unlock()\n\n\t\/\/ We only need one packetizer\n\tif s.packetizer != nil {\n\t\treturn codec, nil\n\t}\n\n\tpayloader, err := payloaderForCodec(codec.RTPCodecCapability)\n\tif err != nil {\n\t\treturn codec, err\n\t}\n\n\ts.sequencer = rtp.NewRandomSequencer()\n\ts.packetizer = rtp.NewPacketizer(\n\t\trtpOutboundMTU,\n\t\t0, \/\/ Value is handled when writing\n\t\t0, \/\/ Value is handled when writing\n\t\tpayloader,\n\t\ts.sequencer,\n\t\tcodec.ClockRate,\n\t)\n\ts.clockRate = float64(codec.RTPCodecCapability.ClockRate)\n\treturn codec, nil\n}\n\n\/\/ Unbind implements the teardown logic when the track is no longer needed. This happens\n\/\/ because a track has been stopped.\nfunc (s *TrackLocalStaticSample) Unbind(t TrackLocalContext) error {\n\treturn s.rtpTrack.Unbind(t)\n}\n\n\/\/ WriteSample writes a Sample to the TrackLocalStaticSample\n\/\/ If one PeerConnection fails the packets will still be sent to\n\/\/ all PeerConnections. The error message will contain the ID of the failed\n\/\/ PeerConnections so you can remove them\nfunc (s *TrackLocalStaticSample) WriteSample(sample media.Sample) error {\n\ts.rtpTrack.mu.RLock()\n\tp := s.packetizer\n\tclockRate := s.clockRate\n\ts.rtpTrack.mu.RUnlock()\n\n\tif p == nil {\n\t\treturn nil\n\t}\n\n\t\/\/ skip packets by the number of previously dropped packets\n\tfor i := uint16(0); i < sample.PrevDroppedPackets; i++ {\n\t\ts.sequencer.NextSequenceNumber()\n\t}\n\n\tsamples := uint32(sample.Duration.Seconds() * clockRate)\n\tif sample.PrevDroppedPackets > 0 {\n\t\tp.SkipSamples(samples * uint32(sample.PrevDroppedPackets))\n\t}\n\tpackets := p.Packetize(sample.Data, samples)\n\n\twriteErrs := []error{}\n\tfor _, p := range packets {\n\t\tif err := s.rtpTrack.WriteRTP(p); err != nil {\n\t\t\twriteErrs = append(writeErrs, err)\n\t\t}\n\t}\n\n\treturn util.FlattenErrs(writeErrs)\n}\n<|endoftext|>"} {"text":"<commit_before>package adsi\n\nimport (\n\t\"runtime\"\n\n\t\"github.com\/go-ole\/go-ole\"\n)\n\ntype action func() error\n\nfunc run(a action) error {\n\t\/\/ COM is initialized per-thread so we want to prevent the scheduler from\n\t\/\/ switching us over to another thread while we're executing.\n\truntime.LockOSThread()\n\tdefer runtime.UnlockOSThread()\n\n\t\/\/ TODO: Determine whether the cost of using a dedicated thread is less than\n\t\/\/ the cost of setting up and tearing down the COM interface for every call.\n\n\t\/\/ FIXME: Find out whether CoInitializeEx can handle simultaneous calls. If\n\t\/\/ not, guard it with a global mutex.\n\n\t\/\/ Initialize COM with a multithreaded compartment.\n\t\/\/ See: https:\/\/msdn.microsoft.com\/en-us\/library\/ms809971\n\tif err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED|ole.COINIT_DISABLE_OLE1DDE); err != nil {\n\t\toleerr := err.(*ole.OleError)\n\t\t\/\/ S_FALSE = 0x00000001 \/\/ CoInitializeEx was already called on this thread\n\t\tif oleerr.Code() != ole.S_OK && oleerr.Code() != 0x00000001 {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t\/\/ From [MSDN](https:\/\/msdn.microsoft.com\/en-us\/library\/windows\/desktop\/ms688715):\n\t\/\/\n\t\/\/ Closes the COM library on the current thread, unloads all DLLs loaded by\n\t\/\/ the thread, frees any other resources that the thread maintains, and\n\t\/\/ forces all RPC connections on the thread to close.\n\t\/\/\n\t\/\/ A thread must call CoUninitialize once for each successful call it has\n\t\/\/ made to the CoInitialize or CoInitializeEx function, including any call\n\t\/\/ that returns S_FALSE.\n\tdefer ole.CoUninitialize()\n\n\treturn a()\n}\n<commit_msg>Disabled thread locking<commit_after>package adsi\n\ntype action func() error\n\nfunc run(a action) error {\n\t\/*\n\t\t\/\/ COM is initialized per-thread so we want to prevent the scheduler from\n\t\t\/\/ switching us over to another thread while we're executing.\n\t\truntime.LockOSThread()\n\t\tdefer runtime.UnlockOSThread()\n\n\t\t\/\/ TODO: Determine whether the cost of using a dedicated thread is less than\n\t\t\/\/ the cost of setting up and tearing down the COM interface for every call.\n\n\t\t\/\/ FIXME: Find out whether CoInitializeEx can handle simultaneous calls. If\n\t\t\/\/ not, guard it with a global mutex.\n\n\t\t\/\/ Initialize COM with a multithreaded compartment.\n\t\t\/\/ See: https:\/\/msdn.microsoft.com\/en-us\/library\/ms809971\n\t\tif err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED|ole.COINIT_DISABLE_OLE1DDE); err != nil {\n\t\t\toleerr := err.(*ole.OleError)\n\t\t\t\/\/ S_FALSE = 0x00000001 \/\/ CoInitializeEx was already called on this thread\n\t\t\tif oleerr.Code() != ole.S_OK && oleerr.Code() != 0x00000001 {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t\/\/ From [MSDN](https:\/\/msdn.microsoft.com\/en-us\/library\/windows\/desktop\/ms688715):\n\t\t\/\/\n\t\t\/\/ Closes the COM library on the current thread, unloads all DLLs loaded by\n\t\t\/\/ the thread, frees any other resources that the thread maintains, and\n\t\t\/\/ forces all RPC connections on the thread to close.\n\t\t\/\/\n\t\t\/\/ A thread must call CoUninitialize once for each successful call it has\n\t\t\/\/ made to the CoInitialize or CoInitializeEx function, including any call\n\t\t\/\/ that returns S_FALSE.\n\t\tdefer ole.CoUninitialize()\n\t*\/\n\n\treturn a()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"testing\"\n\t\"time\"\n\t\"unicode\/utf8\"\n)\n\nfunc TestExactly16Bytes(t *testing.T) {\n\tvar tests = []string{\n\t\t\"\",\n\t\t\"a\",\n\t\t\"日本語\",\n\t\t\"1234567890123456\",\n\t\t\"12345678901234567890\",\n\t\t\"1234567890123本語4567890\",\n\t\t\"12345678901234日本語567890\",\n\t\t\"123456789012345日本語67890\",\n\t\t\"1234567890123456日本語7890\",\n\t\t\"1234567890123456日本語7日本語890\",\n\t}\n\tfor _, str := range tests {\n\t\tgot := exactly16Bytes(str)\n\t\tif len(got) != 16 {\n\t\t\tt.Errorf(\"exactly16Bytes(%q) is %q, length %d\", str, got, len(got))\n\t\t}\n\t\t\/\/ Make sure it is full runes.\n\t\tfor _, c := range got {\n\t\t\tif c == utf8.RuneError {\n\t\t\t\tt.Errorf(\"exactly16Bytes(%q) is %q, has partial rune\", str, got)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ tmpDir creates a temporary directory and returns its name.\nfunc tmpDir(t *testing.T) string {\n\tname, err := ioutil.TempDir(\"\", \"pack\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn name\n}\n\n\/\/ Test that we can create an archive, write to it, and get the same contents back.\n\/\/ Tests the rv and then the pv command on a new archive.\nfunc TestCreate(t *testing.T) {\n\tdir := tmpDir(t)\n\tdefer os.RemoveAll(dir)\n\tname := filepath.Join(dir, \"pack.a\")\n\tar := archive(name, os.O_RDWR, nil)\n\t\/\/ Add an entry by hand.\n\tar.addFile(helloFile.Reset())\n\tar.fd.Close()\n\t\/\/ Now check it.\n\tar = archive(name, os.O_RDONLY, []string{helloFile.name})\n\tvar buf bytes.Buffer\n\tstdout = &buf\n\tverbose = true\n\tdefer func() {\n\t\tstdout = os.Stdout\n\t\tverbose = false\n\t}()\n\tar.scan(ar.printContents)\n\tar.fd.Close()\n\tresult := buf.String()\n\t\/\/ Expect verbose output plus file contents.\n\texpect := fmt.Sprintf(\"%s\\n%s\", helloFile.name, helloFile.contents)\n\tif result != expect {\n\t\tt.Fatalf(\"expected %q got %q\", expect, result)\n\t}\n}\n\n\/\/ Test that we can create an archive, put some files in it, and get back a correct listing.\n\/\/ Tests the tv command.\nfunc TestTableOfContents(t *testing.T) {\n\tdir := tmpDir(t)\n\tdefer os.RemoveAll(dir)\n\tname := filepath.Join(dir, \"pack.a\")\n\tar := archive(name, os.O_RDWR, nil)\n\t\/\/ Add some entries by hand.\n\tar.addFile(helloFile.Reset())\n\tar.addFile(goodbyeFile.Reset())\n\tar.fd.Close()\n\t\/\/ Now print it.\n\tar = archive(name, os.O_RDONLY, nil)\n\tvar buf bytes.Buffer\n\tstdout = &buf\n\tverbose = true\n\tdefer func() {\n\t\tstdout = os.Stdout\n\t\tverbose = false\n\t}()\n\tar.scan(ar.tableOfContents)\n\tar.fd.Close()\n\tresult := buf.String()\n\t\/\/ Expect verbose listing.\n\texpect := fmt.Sprintf(\"%s\\n%s\\n\", helloFile.Entry(), goodbyeFile.Entry())\n\tif result != expect {\n\t\tt.Fatalf(\"expected %q got %q\", expect, result)\n\t}\n\t\/\/ Do it again without verbose.\n\tverbose = false\n\tbuf.Reset()\n\tar = archive(name, os.O_RDONLY, nil)\n\tar.scan(ar.tableOfContents)\n\tar.fd.Close()\n\tresult = buf.String()\n\t\/\/ Expect non-verbose listing.\n\texpect = fmt.Sprintf(\"%s\\n%s\\n\", helloFile.name, goodbyeFile.name)\n\tif result != expect {\n\t\tt.Fatalf(\"expected %q got %q\", expect, result)\n\t}\n}\n\n\/\/ Test that we can create an archive, put some files in it, and get back a file.\n\/\/ Tests the x command.\nfunc TestExtract(t *testing.T) {\n\tdir := tmpDir(t)\n\tdefer os.RemoveAll(dir)\n\tname := filepath.Join(dir, \"pack.a\")\n\tar := archive(name, os.O_RDWR, nil)\n\t\/\/ Add some entries by hand.\n\tar.addFile(helloFile.Reset())\n\tar.addFile(goodbyeFile.Reset())\n\tar.fd.Close()\n\t\/\/ Now extract one file. We chdir to the directory of the archive for simplicity.\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\tt.Fatal(\"os.Getwd: \", err)\n\t}\n\terr = os.Chdir(dir)\n\tif err != nil {\n\t\tt.Fatal(\"os.Chdir: \", err)\n\t}\n\tdefer func() {\n\t\terr := os.Chdir(pwd)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"os.Chdir: \", err)\n\t\t}\n\t}()\n\tar = archive(name, os.O_RDONLY, []string{goodbyeFile.name})\n\tar.scan(ar.extractContents)\n\tar.fd.Close()\n\tdata, err := ioutil.ReadFile(goodbyeFile.name)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Expect contents of file.\n\tresult := string(data)\n\texpect := goodbyeFile.contents\n\tif result != expect {\n\t\tt.Fatalf(\"expected %q got %q\", expect, result)\n\t}\n}\n\n\/\/ Test that pack-created archives can be understood by the tools.\nfunc TestHello(t *testing.T) {\n\tdir := tmpDir(t)\n\tdefer os.RemoveAll(dir)\n\thello := filepath.Join(dir, \"hello.go\")\n\tprog := `\n\t\tpackage main\n\t\tfunc main() {\n\t\t\tprintln(\"hello world\")\n\t\t}\n\t`\n\terr := ioutil.WriteFile(hello, []byte(prog), 0666)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trun := func(args ...string) string {\n\t\tcmd := exec.Command(args[0], args[1:]...)\n\t\tcmd.Dir = dir\n\t\tout, err := cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%v: %v\\n%s\", args, err, string(out))\n\t\t}\n\t\treturn string(out)\n\t}\n\n\tout := run(\"go\", \"env\")\n\tre, err := regexp.Compile(`\\s*GOCHAR=['\"]?(\\w)['\"]?`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfields := re.FindStringSubmatch(out)\n\tif fields == nil {\n\t\tt.Fatal(\"cannot find GOCHAR in 'go env' output:\\n\", out)\n\t}\n\tchar := fields[1]\n\trun(\"go\", \"build\", \"cmd\/pack\") \/\/ writes pack binary to dir\n\trun(\"go\", \"tool\", char+\"g\", \"hello.go\")\n\trun(\".\/pack\", \"grc\", \"hello.a\", \"hello.\"+char)\n\trun(\"go\", \"tool\", char+\"l\", \"-o\", \"a.out\", \"hello.a\")\n\tout = run(\".\/a.out\")\n\tif out != \"hello world\\n\" {\n\t\tt.Fatal(\"incorrect output: %q, want %q\", out, \"hello world\\n\")\n\t}\n}\n\n\/\/ Fake implementation of files.\n\nvar helloFile = &FakeFile{\n\tname: \"hello\",\n\tcontents: \"hello world\", \/\/ 11 bytes, an odd number.\n\tmode: 0644,\n}\n\nvar goodbyeFile = &FakeFile{\n\tname: \"goodbye\",\n\tcontents: \"Sayonara, Jim\", \/\/ 13 bytes, another odd number.\n\tmode: 0644,\n}\n\n\/\/ FakeFile implements FileLike and also os.FileInfo.\ntype FakeFile struct {\n\tname string\n\tcontents string\n\tmode os.FileMode\n\toffset int\n}\n\n\/\/ Reset prepares a FakeFile for reuse.\nfunc (f *FakeFile) Reset() *FakeFile {\n\tf.offset = 0\n\treturn f\n}\n\n\/\/ FileLike methods.\n\nfunc (f *FakeFile) Name() string {\n\t\/\/ A bit of a cheat: we only have a basename, so that's also ok for FileInfo.\n\treturn f.name\n}\n\nfunc (f *FakeFile) Stat() (os.FileInfo, error) {\n\treturn f, nil\n}\n\nfunc (f *FakeFile) Read(p []byte) (int, error) {\n\tif f.offset >= len(f.contents) {\n\t\treturn 0, io.EOF\n\t}\n\tn := copy(p, f.contents[f.offset:])\n\tf.offset += n\n\treturn n, nil\n}\n\nfunc (f *FakeFile) Close() error {\n\treturn nil\n}\n\n\/\/ os.FileInfo methods.\n\nfunc (f *FakeFile) Size() int64 {\n\treturn int64(len(f.contents))\n}\n\nfunc (f *FakeFile) Mode() os.FileMode {\n\treturn f.mode\n}\n\nfunc (f *FakeFile) ModTime() time.Time {\n\treturn time.Time{}\n}\n\nfunc (f *FakeFile) IsDir() bool {\n\treturn false\n}\n\nfunc (f *FakeFile) Sys() interface{} {\n\treturn nil\n}\n\n\/\/ Special helpers.\n\nfunc (f *FakeFile) Entry() *Entry {\n\treturn &Entry{\n\t\tname: f.name,\n\t\tmtime: 0, \/\/ Defined to be zero.\n\t\tuid: 0, \/\/ Ditto.\n\t\tgid: 0, \/\/ Ditto.\n\t\tmode: f.mode,\n\t\tsize: int64(len(f.contents)),\n\t}\n}\n<commit_msg>cmd\/pack: provide executable name in TestHello<commit_after>\/\/ Copyright 2014 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"regexp\"\n\t\"testing\"\n\t\"time\"\n\t\"unicode\/utf8\"\n)\n\nfunc TestExactly16Bytes(t *testing.T) {\n\tvar tests = []string{\n\t\t\"\",\n\t\t\"a\",\n\t\t\"日本語\",\n\t\t\"1234567890123456\",\n\t\t\"12345678901234567890\",\n\t\t\"1234567890123本語4567890\",\n\t\t\"12345678901234日本語567890\",\n\t\t\"123456789012345日本語67890\",\n\t\t\"1234567890123456日本語7890\",\n\t\t\"1234567890123456日本語7日本語890\",\n\t}\n\tfor _, str := range tests {\n\t\tgot := exactly16Bytes(str)\n\t\tif len(got) != 16 {\n\t\t\tt.Errorf(\"exactly16Bytes(%q) is %q, length %d\", str, got, len(got))\n\t\t}\n\t\t\/\/ Make sure it is full runes.\n\t\tfor _, c := range got {\n\t\t\tif c == utf8.RuneError {\n\t\t\t\tt.Errorf(\"exactly16Bytes(%q) is %q, has partial rune\", str, got)\n\t\t\t}\n\t\t}\n\t}\n}\n\n\/\/ tmpDir creates a temporary directory and returns its name.\nfunc tmpDir(t *testing.T) string {\n\tname, err := ioutil.TempDir(\"\", \"pack\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\treturn name\n}\n\n\/\/ Test that we can create an archive, write to it, and get the same contents back.\n\/\/ Tests the rv and then the pv command on a new archive.\nfunc TestCreate(t *testing.T) {\n\tdir := tmpDir(t)\n\tdefer os.RemoveAll(dir)\n\tname := filepath.Join(dir, \"pack.a\")\n\tar := archive(name, os.O_RDWR, nil)\n\t\/\/ Add an entry by hand.\n\tar.addFile(helloFile.Reset())\n\tar.fd.Close()\n\t\/\/ Now check it.\n\tar = archive(name, os.O_RDONLY, []string{helloFile.name})\n\tvar buf bytes.Buffer\n\tstdout = &buf\n\tverbose = true\n\tdefer func() {\n\t\tstdout = os.Stdout\n\t\tverbose = false\n\t}()\n\tar.scan(ar.printContents)\n\tar.fd.Close()\n\tresult := buf.String()\n\t\/\/ Expect verbose output plus file contents.\n\texpect := fmt.Sprintf(\"%s\\n%s\", helloFile.name, helloFile.contents)\n\tif result != expect {\n\t\tt.Fatalf(\"expected %q got %q\", expect, result)\n\t}\n}\n\n\/\/ Test that we can create an archive, put some files in it, and get back a correct listing.\n\/\/ Tests the tv command.\nfunc TestTableOfContents(t *testing.T) {\n\tdir := tmpDir(t)\n\tdefer os.RemoveAll(dir)\n\tname := filepath.Join(dir, \"pack.a\")\n\tar := archive(name, os.O_RDWR, nil)\n\t\/\/ Add some entries by hand.\n\tar.addFile(helloFile.Reset())\n\tar.addFile(goodbyeFile.Reset())\n\tar.fd.Close()\n\t\/\/ Now print it.\n\tar = archive(name, os.O_RDONLY, nil)\n\tvar buf bytes.Buffer\n\tstdout = &buf\n\tverbose = true\n\tdefer func() {\n\t\tstdout = os.Stdout\n\t\tverbose = false\n\t}()\n\tar.scan(ar.tableOfContents)\n\tar.fd.Close()\n\tresult := buf.String()\n\t\/\/ Expect verbose listing.\n\texpect := fmt.Sprintf(\"%s\\n%s\\n\", helloFile.Entry(), goodbyeFile.Entry())\n\tif result != expect {\n\t\tt.Fatalf(\"expected %q got %q\", expect, result)\n\t}\n\t\/\/ Do it again without verbose.\n\tverbose = false\n\tbuf.Reset()\n\tar = archive(name, os.O_RDONLY, nil)\n\tar.scan(ar.tableOfContents)\n\tar.fd.Close()\n\tresult = buf.String()\n\t\/\/ Expect non-verbose listing.\n\texpect = fmt.Sprintf(\"%s\\n%s\\n\", helloFile.name, goodbyeFile.name)\n\tif result != expect {\n\t\tt.Fatalf(\"expected %q got %q\", expect, result)\n\t}\n}\n\n\/\/ Test that we can create an archive, put some files in it, and get back a file.\n\/\/ Tests the x command.\nfunc TestExtract(t *testing.T) {\n\tdir := tmpDir(t)\n\tdefer os.RemoveAll(dir)\n\tname := filepath.Join(dir, \"pack.a\")\n\tar := archive(name, os.O_RDWR, nil)\n\t\/\/ Add some entries by hand.\n\tar.addFile(helloFile.Reset())\n\tar.addFile(goodbyeFile.Reset())\n\tar.fd.Close()\n\t\/\/ Now extract one file. We chdir to the directory of the archive for simplicity.\n\tpwd, err := os.Getwd()\n\tif err != nil {\n\t\tt.Fatal(\"os.Getwd: \", err)\n\t}\n\terr = os.Chdir(dir)\n\tif err != nil {\n\t\tt.Fatal(\"os.Chdir: \", err)\n\t}\n\tdefer func() {\n\t\terr := os.Chdir(pwd)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"os.Chdir: \", err)\n\t\t}\n\t}()\n\tar = archive(name, os.O_RDONLY, []string{goodbyeFile.name})\n\tar.scan(ar.extractContents)\n\tar.fd.Close()\n\tdata, err := ioutil.ReadFile(goodbyeFile.name)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t\/\/ Expect contents of file.\n\tresult := string(data)\n\texpect := goodbyeFile.contents\n\tif result != expect {\n\t\tt.Fatalf(\"expected %q got %q\", expect, result)\n\t}\n}\n\n\/\/ Test that pack-created archives can be understood by the tools.\nfunc TestHello(t *testing.T) {\n\tdir := tmpDir(t)\n\tdefer os.RemoveAll(dir)\n\thello := filepath.Join(dir, \"hello.go\")\n\tprog := `\n\t\tpackage main\n\t\tfunc main() {\n\t\t\tprintln(\"hello world\")\n\t\t}\n\t`\n\terr := ioutil.WriteFile(hello, []byte(prog), 0666)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\trun := func(args ...string) string {\n\t\tcmd := exec.Command(args[0], args[1:]...)\n\t\tcmd.Dir = dir\n\t\tout, err := cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"%v: %v\\n%s\", args, err, string(out))\n\t\t}\n\t\treturn string(out)\n\t}\n\n\tout := run(\"go\", \"env\")\n\tre, err := regexp.Compile(`\\s*GOCHAR=['\"]?(\\w)['\"]?`)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfields := re.FindStringSubmatch(out)\n\tif fields == nil {\n\t\tt.Fatal(\"cannot find GOCHAR in 'go env' output:\\n\", out)\n\t}\n\tchar := fields[1]\n\trun(\"go\", \"build\", \"-o\", \"pack\", \"cmd\/pack\") \/\/ writes pack binary to dir\n\trun(\"go\", \"tool\", char+\"g\", \"hello.go\")\n\trun(\".\/pack\", \"grc\", \"hello.a\", \"hello.\"+char)\n\trun(\"go\", \"tool\", char+\"l\", \"-o\", \"a.out\", \"hello.a\")\n\tout = run(\".\/a.out\")\n\tif out != \"hello world\\n\" {\n\t\tt.Fatal(\"incorrect output: %q, want %q\", out, \"hello world\\n\")\n\t}\n}\n\n\/\/ Fake implementation of files.\n\nvar helloFile = &FakeFile{\n\tname: \"hello\",\n\tcontents: \"hello world\", \/\/ 11 bytes, an odd number.\n\tmode: 0644,\n}\n\nvar goodbyeFile = &FakeFile{\n\tname: \"goodbye\",\n\tcontents: \"Sayonara, Jim\", \/\/ 13 bytes, another odd number.\n\tmode: 0644,\n}\n\n\/\/ FakeFile implements FileLike and also os.FileInfo.\ntype FakeFile struct {\n\tname string\n\tcontents string\n\tmode os.FileMode\n\toffset int\n}\n\n\/\/ Reset prepares a FakeFile for reuse.\nfunc (f *FakeFile) Reset() *FakeFile {\n\tf.offset = 0\n\treturn f\n}\n\n\/\/ FileLike methods.\n\nfunc (f *FakeFile) Name() string {\n\t\/\/ A bit of a cheat: we only have a basename, so that's also ok for FileInfo.\n\treturn f.name\n}\n\nfunc (f *FakeFile) Stat() (os.FileInfo, error) {\n\treturn f, nil\n}\n\nfunc (f *FakeFile) Read(p []byte) (int, error) {\n\tif f.offset >= len(f.contents) {\n\t\treturn 0, io.EOF\n\t}\n\tn := copy(p, f.contents[f.offset:])\n\tf.offset += n\n\treturn n, nil\n}\n\nfunc (f *FakeFile) Close() error {\n\treturn nil\n}\n\n\/\/ os.FileInfo methods.\n\nfunc (f *FakeFile) Size() int64 {\n\treturn int64(len(f.contents))\n}\n\nfunc (f *FakeFile) Mode() os.FileMode {\n\treturn f.mode\n}\n\nfunc (f *FakeFile) ModTime() time.Time {\n\treturn time.Time{}\n}\n\nfunc (f *FakeFile) IsDir() bool {\n\treturn false\n}\n\nfunc (f *FakeFile) Sys() interface{} {\n\treturn nil\n}\n\n\/\/ Special helpers.\n\nfunc (f *FakeFile) Entry() *Entry {\n\treturn &Entry{\n\t\tname: f.name,\n\t\tmtime: 0, \/\/ Defined to be zero.\n\t\tuid: 0, \/\/ Ditto.\n\t\tgid: 0, \/\/ Ditto.\n\t\tmode: f.mode,\n\t\tsize: int64(len(f.contents)),\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2015 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage state\n\nimport (\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/juju\/loggo\"\n\t\"github.com\/juju\/names\"\n\n\t\"github.com\/juju\/juju\/workload\"\n\t\"github.com\/juju\/juju\/workload\/persistence\"\n)\n\nvar logger = loggo.GetLogger(\"juju.workload.state\")\n\n\/\/ TODO(ericsnow) Add names.WorkloadTag and use it here?\n\n\/\/ TODO(ericsnow) We need a worker to clean up dying workloads.\n\n\/\/ The persistence methods needed for workloads in state.\ntype workloadsPersistence interface {\n\tTrack(info workload.Info) (bool, error)\n\t\/\/ SetStatus updates the status for a payload.\n\tSetStatus(docID, status string) (bool, error)\n\tList(ids ...string) ([]workload.Info, []string, error)\n\tListAll() ([]workload.Info, error)\n\tUntrack(id string) (bool, error)\n}\n\n\/\/ UnitWorkloads provides the functionality related to a unit's\n\/\/ workloads, as needed by state.\ntype UnitWorkloads struct {\n\t\/\/ Persist is the persistence layer that will be used.\n\tPersist workloadsPersistence\n\t\/\/ Unit identifies the unit associated with the workloads.\n\tUnit names.UnitTag\n}\n\n\/\/ NewUnitWorkloads builds a UnitWorkloads for a unit.\nfunc NewUnitWorkloads(st persistence.PersistenceBase, unit names.UnitTag) *UnitWorkloads {\n\tpersist := persistence.NewPersistence(st, unit)\n\treturn &UnitWorkloads{\n\t\tPersist: persist,\n\t\tUnit: unit,\n\t}\n}\n\n\/\/ Track inserts the provided workload info in state.\nfunc (ps UnitWorkloads) Track(info workload.Info) error {\n\tlogger.Tracef(\"tracking %#v\", info)\n\tif err := info.Validate(); err != nil {\n\t\treturn errors.NewNotValid(err, \"bad workload info\")\n\t}\n\n\tok, err := ps.Persist.Track(info)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tif !ok {\n\t\treturn errors.NotValidf(\"workload %s (already in state)\", info.ID())\n\t}\n\n\treturn nil\n}\n\n\/\/ SetStatus updates the raw status for the identified workload to the\n\/\/ provided value.\nfunc (ps UnitWorkloads) SetStatus(docID, status string) error {\n\tlogger.Tracef(\"setting payload status for %q\/%q to %q\", docID, status)\n\n\tif err := workload.ValidateState(status); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tfound, err := ps.Persist.SetStatus(docID, status)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tif !found {\n\t\treturn errors.NotFoundf(docID)\n\t}\n\treturn nil\n}\n\nfunc validateStatus(status string) error {\n\tswitch status {\n\tcase workload.StateStarting,\n\t\tworkload.StateRunning,\n\t\tworkload.StateStopping,\n\t\tworkload.StateStopped:\n\t\treturn nil\n\t}\n\treturn errors.Errorf(\"invalid status, must be one of the following: starting, running, stopping, stopped\")\n}\n\n\/\/ List builds the list of workload information for the provided workload\n\/\/ IDs. If none are provided then the list contains the info for all\n\/\/ workloads associated with the unit. Missing workloads\n\/\/ are ignored.\nfunc (ps UnitWorkloads) List(ids ...string) ([]workload.Info, error) {\n\tlogger.Tracef(\"listing %v\", ids)\n\tif len(ids) == 0 {\n\t\tresults, err := ps.Persist.ListAll()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\treturn results, nil\n\t}\n\n\tresults, _, err := ps.Persist.List(ids...)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\treturn results, nil\n}\n\n\/\/ Untrack removes the identified workload from state. It does not\n\/\/ trigger the actual destruction of the workload.\nfunc (ps UnitWorkloads) Untrack(id string) error {\n\tlogger.Tracef(\"untracking %q\", id)\n\t\/\/ If the record wasn't found then we're already done.\n\t_, err := ps.Persist.Untrack(id)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\treturn nil\n}\n<commit_msg>fixing trace output<commit_after>\/\/ Copyright 2015 Canonical Ltd.\n\/\/ Licensed under the AGPLv3, see LICENCE file for details.\n\npackage state\n\nimport (\n\t\"github.com\/juju\/errors\"\n\t\"github.com\/juju\/loggo\"\n\t\"github.com\/juju\/names\"\n\n\t\"github.com\/juju\/juju\/workload\"\n\t\"github.com\/juju\/juju\/workload\/persistence\"\n)\n\nvar logger = loggo.GetLogger(\"juju.workload.state\")\n\n\/\/ TODO(ericsnow) Add names.WorkloadTag and use it here?\n\n\/\/ TODO(ericsnow) We need a worker to clean up dying workloads.\n\n\/\/ The persistence methods needed for workloads in state.\ntype workloadsPersistence interface {\n\tTrack(info workload.Info) (bool, error)\n\t\/\/ SetStatus updates the status for a payload.\n\tSetStatus(docID, status string) (bool, error)\n\tList(ids ...string) ([]workload.Info, []string, error)\n\tListAll() ([]workload.Info, error)\n\tUntrack(id string) (bool, error)\n}\n\n\/\/ UnitWorkloads provides the functionality related to a unit's\n\/\/ workloads, as needed by state.\ntype UnitWorkloads struct {\n\t\/\/ Persist is the persistence layer that will be used.\n\tPersist workloadsPersistence\n\t\/\/ Unit identifies the unit associated with the workloads.\n\tUnit names.UnitTag\n}\n\n\/\/ NewUnitWorkloads builds a UnitWorkloads for a unit.\nfunc NewUnitWorkloads(st persistence.PersistenceBase, unit names.UnitTag) *UnitWorkloads {\n\tpersist := persistence.NewPersistence(st, unit)\n\treturn &UnitWorkloads{\n\t\tPersist: persist,\n\t\tUnit: unit,\n\t}\n}\n\n\/\/ Track inserts the provided workload info in state.\nfunc (ps UnitWorkloads) Track(info workload.Info) error {\n\tlogger.Tracef(\"tracking %#v\", info)\n\tif err := info.Validate(); err != nil {\n\t\treturn errors.NewNotValid(err, \"bad workload info\")\n\t}\n\n\tok, err := ps.Persist.Track(info)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tif !ok {\n\t\treturn errors.NotValidf(\"workload %s (already in state)\", info.ID())\n\t}\n\n\treturn nil\n}\n\n\/\/ SetStatus updates the raw status for the identified workload to the\n\/\/ provided value.\nfunc (ps UnitWorkloads) SetStatus(docID, status string) error {\n\tlogger.Tracef(\"setting payload status for %q to %q\", docID, status)\n\n\tif err := workload.ValidateState(status); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\tfound, err := ps.Persist.SetStatus(docID, status)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tif !found {\n\t\treturn errors.NotFoundf(docID)\n\t}\n\treturn nil\n}\n\nfunc validateStatus(status string) error {\n\tswitch status {\n\tcase workload.StateStarting,\n\t\tworkload.StateRunning,\n\t\tworkload.StateStopping,\n\t\tworkload.StateStopped:\n\t\treturn nil\n\t}\n\treturn errors.Errorf(\"invalid status, must be one of the following: starting, running, stopping, stopped\")\n}\n\n\/\/ List builds the list of workload information for the provided workload\n\/\/ IDs. If none are provided then the list contains the info for all\n\/\/ workloads associated with the unit. Missing workloads\n\/\/ are ignored.\nfunc (ps UnitWorkloads) List(ids ...string) ([]workload.Info, error) {\n\tlogger.Tracef(\"listing %v\", ids)\n\tif len(ids) == 0 {\n\t\tresults, err := ps.Persist.ListAll()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\treturn results, nil\n\t}\n\n\tresults, _, err := ps.Persist.List(ids...)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\treturn results, nil\n}\n\n\/\/ Untrack removes the identified workload from state. It does not\n\/\/ trigger the actual destruction of the workload.\nfunc (ps UnitWorkloads) Untrack(id string) error {\n\tlogger.Tracef(\"untracking %q\", id)\n\t\/\/ If the record wasn't found then we're already done.\n\t_, err := ps.Persist.Untrack(id)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package summa\n\nimport (\n\t\"database\/sql\"\n\t_ \"go-sqlite3\"\n)\n\ntype snippetComment struct {\n\tID int64 `json:\"id\"`\n\tSnippetID int64 `json:\"-\"`\n\tUsername string `json:\"username\"`\n\tDisplayName string `json:\"display_name\"`\n\tMessage string `json:\"message\"`\n\tCreated int64 `json:\"created\"`\n\tUpdated int64 `json:\"updated\"`\n}\n\ntype snippetFile struct {\n\tSnippetID int64 `json:\"-\"`\n\tFilename string `json:\"filename\"`\n\tLanguage string `json:\"language\"`\n\tContents string `json:\"contents,omitempty\"`\n}\n\ntype snippetComments []snippetComment\ntype snippetFiles []snippetFile\n\ntype snippet struct {\n\tID int64 `json:\"-\"`\n\tID36 string `json:\"id\"`\n\tUsername string `json:\"username\"`\n\tDisplayName string `json:\"display_name\"`\n\tDescription string `json:\"description\"`\n\tCreated int64 `json:\"created\"`\n\tUpdated int64 `json:\"updated\"`\n\tFiles *snippetFiles `json:\"files,omitempty\"`\n\tComments *snippetComments `json:\"comments,omitempty\"`\n\tRevisions []string `json:\"revisions,omitempty\"`\n}\n\ntype snippets []snippet\n\nfunc snippetsUnread(db *sql.DB, username string) (*snippets, error) {\n\tvar snips snippets\n\n\trows, err := db.Query(\n\t\t\"SELECT id_base36,username,display_name,description,created,updated \"+\n\t\t\t\"FROM snippet JOIN user USING (username) WHERE snippet_id NOT IN \"+\n\t\t\t\"(SELECT snippet_id FROM snippet_view WHERE username=?)\",\n\t\tusername,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor rows.Next() {\n\t\tvar snip snippet\n\n\t\trows.Scan(\n\t\t\t&snip.ID36,\n\t\t\t&snip.Username,\n\t\t\t&snip.DisplayName,\n\t\t\t&snip.Description,\n\t\t\t&snip.Created,\n\t\t\t&snip.Updated,\n\t\t)\n\n\t\tsnips = append(snips, snip)\n\t}\n\n\tif err = rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &snips, nil\n}\n\nfunc snippetFetch(db *sql.DB, id int64) (*snippet, error) {\n\tvar snip snippet\n\n\trow := db.QueryRow(\n\t\t\"SELECT id_base36,username,display_name,description,created,updated \"+\n\t\t\t\"FROM snippet JOIN user USING (username) WHERE snippet_id=?\",\n\t\tid,\n\t)\n\n\terr := row.Scan(\n\t\t&snip.ID36,\n\t\t&snip.Username,\n\t\t&snip.DisplayName,\n\t\t&snip.Description,\n\t\t&snip.Created,\n\t\t&snip.Updated,\n\t)\n\n\tswitch {\n\tcase err == sql.ErrNoRows:\n\t\treturn nil, nil\n\tcase err != nil:\n\t\treturn nil, err\n\t}\n\n\tsnip.Files, err = snippetFetchFiles(db, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsnip.Comments, err = snippetFetchComments(db, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &snip, nil\n}\n\nfunc snippetFetchComments(db *sql.DB, id int64) (*snippetComments, error) {\n\tvar comments snippetComments\n\n\trows, err := db.Query(\n\t\t\"SELECT comment_id,username,display_name,message,created,updated FROM \"+\n\t\t\t\"snippet_comment JOIN user USING (username) WHERE snippet_id=? ORDER BY created\",\n\t\tid,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor rows.Next() {\n\t\tvar comment snippetComment\n\n\t\trows.Scan(\n\t\t\t&comment.ID,\n\t\t\t&comment.Username,\n\t\t\t&comment.DisplayName,\n\t\t\t&comment.Message,\n\t\t\t&comment.Created,\n\t\t\t&comment.Updated,\n\t\t)\n\n\t\tcomments = append(comments, comment)\n\t}\n\n\tif err = rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &comments, nil\n}\n\nfunc snippetFetchFiles(db *sql.DB, id int64) (*snippetFiles, error) {\n\tvar files snippetFiles\n\n\trows, err := db.Query(\n\t\t\"SELECT filename,language FROM \"+\n\t\t\t\"snippet_file WHERE snippet_id=? ORDER BY filename\",\n\t\tid,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor rows.Next() {\n\t\tvar file snippetFile\n\n\t\trows.Scan(\n\t\t\t&file.Filename,\n\t\t\t&file.Language,\n\t\t)\n\n\t\tfiles = append(files, file)\n\t}\n\n\tif err = rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &files, nil\n}\n<commit_msg>Add snippetCreate(), snippetDelete() and snippetIsOwnedBy()<commit_after>package summa\n\nimport (\n\t\"database\/sql\"\n\t_ \"go-sqlite3\"\n)\n\ntype snippetComment struct {\n\tID int64 `json:\"id\"`\n\tSnippetID string `json:\"-\"`\n\tUsername string `json:\"username\"`\n\tDisplayName string `json:\"display_name\"`\n\tMessage string `json:\"message\"`\n\tCreated int64 `json:\"created\"`\n\tUpdated int64 `json:\"updated\"`\n}\n\ntype snippetFile struct {\n\tSnippetID string `json:\"-\"`\n\tFilename string `json:\"filename\"`\n\tLanguage string `json:\"language\"`\n\tContents string `json:\"contents,omitempty\"`\n}\n\ntype snippetComments []snippetComment\ntype snippetFiles []snippetFile\n\ntype snippet struct {\n\tID string `json:\"id\"`\n\tUsername string `json:\"username\"`\n\tDisplayName string `json:\"display_name\"`\n\tDescription string `json:\"description\"`\n\tCreated int64 `json:\"created\"`\n\tUpdated int64 `json:\"updated\"`\n\tFiles snippetFiles `json:\"files,omitempty\"`\n\tComments snippetComments `json:\"comments,omitempty\"`\n\tRevisions []string `json:\"revisions,omitempty\"`\n}\n\ntype snippets []snippet\n\n\/\/ snippetCreate will create a new snippet and return it's id\nfunc snippetCreate(db *sql.DB, snip *snippet) (string, error) {\n\tvar err error\n\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer (func() {\n\t\tif err == nil {\n\t\t\ttx.Commit()\n\t\t} else {\n\t\t\ttx.Rollback()\n\t\t}\n\t})()\n\n\tms := UnixMilliseconds()\n\tvar id string\n\tfor {\n\t\tid = Reverse(ToBase36(ms))\n\t\tvar count int64\n\t\trow := db.QueryRow(\"SELECT COUNT(*) FROM snippet WHERE snippet_id=?\", id)\n\t\terr = row.Scan(&count)\n\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif count == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tms--\n\t}\n\n\t_, err = db.Exec(\n\t\t\"INSERT INTO snippet VALUES (?,?,?,?,0)\",\n\t\tid,\n\t\tsnip.Username,\n\t\tsnip.Description,\n\t\tms,\n\t)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfor _, file := range snip.Files {\n\t\t_, err = db.Exec(\n\t\t\t\"INSERT INTO snippet_file VALUES (?,?,?)\",\n\t\t\tid,\n\t\t\tfile.Filename,\n\t\t\tfile.Language,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\terr = repoCreate(id, nil, snip.Files)\n\n\treturn id, nil\n}\n\n\/\/ snippetDelete permanently removes a snippet\nfunc snippetDelete(db *sql.DB, id string) error {\n\tqueries := []string{\n\t\t\"DELETE FROM snippet WHERE snippet_id=?\",\n\t\t\"DELETE FROM snippet_comment WHERE snippet_id=?\",\n\t\t\"DELETE FROM snippet_file WHERE snippet_id=?\",\n\t\t\"DELETE FROM snippet_view WHERE snippet_id=?\",\n\t}\n\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, q := range queries {\n\t\t_, err = tx.Exec(q, id)\n\t\tif err != nil {\n\t\t\ttx.Rollback()\n\t\t\treturn err\n\t\t}\n\t}\n\ttx.Commit()\n\n\t\/\/ TODO: Remove git repository\n\n\treturn nil\n}\n\n\/\/ snippetIsOwnedBy returns true if the snippet with the given id is\n\/\/ owned by the given username\nfunc snippetIsOwnedBy(db *sql.DB, id, username string) (bool, error) {\n\tvar count int64\n\trow := db.QueryRow(\n\t\t\"SELECT COUNT(*) FROM snippet WHERE snippet_id=? AND username=?\",\n\t\tid,\n\t\tusername,\n\t)\n\terr := row.Scan(&count)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn count == 1, nil\n}\n\n\/\/ snippetsUnread will return unread snippets for a specific user\nfunc snippetsUnread(db *sql.DB, username string) (*snippets, error) {\n\tvar snips snippets\n\n\trows, err := db.Query(\n\t\t\"SELECT snippet_id,username,display_name,description,created,updated \"+\n\t\t\t\"FROM snippet JOIN user USING (username) WHERE snippet_id NOT IN \"+\n\t\t\t\"(SELECT snippet_id FROM snippet_view WHERE username=?)\",\n\t\tusername,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor rows.Next() {\n\t\tvar snip snippet\n\n\t\trows.Scan(\n\t\t\t&snip.ID,\n\t\t\t&snip.Username,\n\t\t\t&snip.DisplayName,\n\t\t\t&snip.Description,\n\t\t\t&snip.Created,\n\t\t\t&snip.Updated,\n\t\t)\n\n\t\tsnips = append(snips, snip)\n\t}\n\n\tif err = rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &snips, nil\n}\n\n\/\/ snippetFetch will fetch an individual snippet by ID\nfunc snippetFetch(db *sql.DB, id string) (*snippet, error) {\n\tvar snip snippet\n\n\trow := db.QueryRow(\n\t\t\"SELECT snippet_id,username,display_name,description,created,updated \"+\n\t\t\t\"FROM snippet JOIN user USING (username) WHERE snippet_id=?\",\n\t\tid,\n\t)\n\n\terr := row.Scan(\n\t\t&snip.ID,\n\t\t&snip.Username,\n\t\t&snip.DisplayName,\n\t\t&snip.Description,\n\t\t&snip.Created,\n\t\t&snip.Updated,\n\t)\n\n\tswitch {\n\tcase err == sql.ErrNoRows:\n\t\treturn nil, nil\n\tcase err != nil:\n\t\treturn nil, err\n\t}\n\n\tsnip.Files, err = snippetFetchFiles(db, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsnip.Comments, err = snippetFetchComments(db, id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfoLog.Printf(\"%+v\", snip.Comments)\n\n\treturn &snip, nil\n}\n\n\/\/ snippetFetchComments will fetch the comments for a specific snippet\nfunc snippetFetchComments(db *sql.DB, id string) (snippetComments, error) {\n\tvar comments snippetComments\n\n\trows, err := db.Query(\n\t\t\"SELECT comment_id,username,display_name,message,created,updated FROM \"+\n\t\t\t\"snippet_comment JOIN user USING (username) WHERE snippet_id=? ORDER BY created\",\n\t\tid,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor rows.Next() {\n\t\tvar comment snippetComment\n\n\t\trows.Scan(\n\t\t\t&comment.ID,\n\t\t\t&comment.Username,\n\t\t\t&comment.DisplayName,\n\t\t\t&comment.Message,\n\t\t\t&comment.Created,\n\t\t\t&comment.Updated,\n\t\t)\n\n\t\tcomments = append(comments, comment)\n\t}\n\n\tif err = rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn comments, nil\n}\n\n\/\/ snippetFetchFiles will fetch the files for a sepcific snippet\nfunc snippetFetchFiles(db *sql.DB, id string) (snippetFiles, error) {\n\tvar files snippetFiles\n\n\trows, err := db.Query(\n\t\t\"SELECT filename,language FROM \"+\n\t\t\t\"snippet_file WHERE snippet_id=? ORDER BY filename\",\n\t\tid,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor rows.Next() {\n\t\tvar file snippetFile\n\n\t\trows.Scan(\n\t\t\t&file.Filename,\n\t\t\t&file.Language,\n\t\t)\n\n\t\tfiles = append(files, file)\n\t}\n\n\tif err = rows.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn files, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"github.com\/FidelityInternational\/chaos-galago\/processor\/Godeps\/_workspace\/src\/chaos-galago\/shared\/utils\"\n\t\"github.com\/FidelityInternational\/chaos-galago\/processor\/Godeps\/_workspace\/src\/github.com\/cloudfoundry-community\/go-cfclient\"\n\t\"github.com\/FidelityInternational\/chaos-galago\/processor\/utils\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n)\n\nvar (\n\tdbConnectionString string\n\terr error\n\tconfig *cfclient.Config\n)\n\nfunc init() {\n\tdbConnectionString, err = sharedUtils.GetDBConnectionDetails()\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\tos.Exit(1)\n\t}\n\tconfig = utils.LoadCFConfig()\n\tfmt.Println(\"\\nConfig loaded:\")\n\tfmt.Println(\"ApiAddress: \", config.ApiAddress)\n\tfmt.Println(\"LoginAddress: \", config.LoginAddress)\n\tfmt.Println(\"Username: \", config.Username)\n\tfmt.Println(\"SkipSslValidation: \", config.SkipSslValidation)\n}\n\nfunc freakOut(err error) bool {\n\tif err != nil {\n\t\tfmt.Println(\"An error has occured\")\n\t\tfmt.Println(err.Error())\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc main() {\n\tcfClient := cfclient.NewClient(config)\n\n\tticker := time.NewTicker(1 * time.Minute)\n\n\tfor _ = range ticker.C {\n\t\tdb, err := sql.Open(\"mysql\", dbConnectionString)\n\t\tif freakOut(err) {\n\t\t\tdb.Close()\n\t\t\tcontinue\n\t\t}\n\t\tservices := utils.GetBoundApps(db)\n\t\tif len(services) == 0 {\n\t\t\tdb.Close()\n\t\t\tcontinue\n\t\t}\n\n\tSERVICES:\n\t\tfor _, service := range services {\n\t\t\tif utils.ShouldProcess(service.Frequency, service.LastProcessed) {\n\t\t\t\tfmt.Printf(\"\\nProcessing chaos for %s\", service.AppID)\n\t\t\t\terr = utils.UpdateLastProcessed(db, service.AppID, utils.TimeNow())\n\t\t\t\tif freakOut(err) {\n\t\t\t\t\tcontinue SERVICES\n\t\t\t\t}\n\t\t\t\tif utils.ShouldRun(service.Probability) {\n\t\t\t\t\tfmt.Printf(\"\\nRunning chaos for %s\", service.AppID)\n\t\t\t\t\tappInstances := cfClient.GetAppInstances(service.AppID)\n\t\t\t\t\tif utils.IsAppHealthy(appInstances) {\n\t\t\t\t\t\tfmt.Printf(\"\\nApp %s is Healthy\\n\", service.AppID)\n\t\t\t\t\t\tchaosInstance := strconv.Itoa(utils.PickAppInstance(appInstances))\n\t\t\t\t\t\tfmt.Printf(\"\\nAbout to kill app instance: %s at index: %s\", service.AppID, chaosInstance)\n\t\t\t\t\t\tcfClient.KillAppInstance(service.AppID, chaosInstance)\n\t\t\t\t\t\terr = utils.UpdateLastProcessed(db, service.AppID, utils.TimeNow())\n\t\t\t\t\t\tif freakOut(err) {\n\t\t\t\t\t\t\tcontinue SERVICES\n\t\t\t\t\t\t}\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfmt.Printf(\"\\nApp %s is unhealthy, skipping\\n\", service.AppID)\n\t\t\t\t\t\tcontinue SERVICES\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Printf(\"\\nNot running chaos for %s\", service.AppID)\n\t\t\t\t\terr = utils.UpdateLastProcessed(db, service.AppID, utils.TimeNow())\n\t\t\t\t\tif freakOut(err) {\n\t\t\t\t\t\tcontinue SERVICES\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"\\nSkipping processing chaos for %s\", service.AppID)\n\t\t\t\tcontinue SERVICES\n\t\t\t}\n\t\t}\n\t\tdb.Close()\n\t}\n}\n<commit_msg>extract function to reintroduce check-then-sleep behaviour<commit_after>package main\n\nimport (\n\t\"database\/sql\"\n\t\"fmt\"\n\t\"github.com\/FidelityInternational\/chaos-galago\/processor\/Godeps\/_workspace\/src\/chaos-galago\/shared\/utils\"\n\t\"github.com\/FidelityInternational\/chaos-galago\/processor\/Godeps\/_workspace\/src\/github.com\/cloudfoundry-community\/go-cfclient\"\n\t\"github.com\/FidelityInternational\/chaos-galago\/processor\/utils\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n)\n\nvar (\n\tdbConnectionString string\n\terr error\n\tconfig *cfclient.Config\n)\n\nfunc init() {\n\tdbConnectionString, err = sharedUtils.GetDBConnectionDetails()\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\tos.Exit(1)\n\t}\n\tconfig = utils.LoadCFConfig()\n\tfmt.Println(\"\\nConfig loaded:\")\n\tfmt.Println(\"ApiAddress: \", config.ApiAddress)\n\tfmt.Println(\"LoginAddress: \", config.LoginAddress)\n\tfmt.Println(\"Username: \", config.Username)\n\tfmt.Println(\"SkipSslValidation: \", config.SkipSslValidation)\n}\n\nfunc freakOut(err error) bool {\n\tif err != nil {\n\t\tfmt.Println(\"An error has occured\")\n\t\tfmt.Println(err.Error())\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc main() {\n\tcfClient := cfclient.NewClient(config)\n\n\tticker := time.NewTicker(1 * time.Minute)\n\n\tprocessServices(cfClient)\n\tfor _ = range ticker.C {\n\t\tprocessServices(cfClient)\n\t}\n}\n\nfunc processServices(cfClient *cfclient.Client) {\n\tdb, err := sql.Open(\"mysql\", dbConnectionString)\n\tif freakOut(err) {\n\t\tdb.Close()\n\t\treturn\n\t}\n\tservices := utils.GetBoundApps(db)\n\tif len(services) == 0 {\n\t\tdb.Close()\n\t\treturn\n\t}\n\nSERVICES:\n\tfor _, service := range services {\n\t\tif utils.ShouldProcess(service.Frequency, service.LastProcessed) {\n\t\t\tfmt.Printf(\"\\nProcessing chaos for %s\", service.AppID)\n\t\t\terr = utils.UpdateLastProcessed(db, service.AppID, utils.TimeNow())\n\t\t\tif freakOut(err) {\n\t\t\t\tcontinue SERVICES\n\t\t\t}\n\t\t\tif utils.ShouldRun(service.Probability) {\n\t\t\t\tfmt.Printf(\"\\nRunning chaos for %s\", service.AppID)\n\t\t\t\tappInstances := cfClient.GetAppInstances(service.AppID)\n\t\t\t\tif utils.IsAppHealthy(appInstances) {\n\t\t\t\t\tfmt.Printf(\"\\nApp %s is Healthy\\n\", service.AppID)\n\t\t\t\t\tchaosInstance := strconv.Itoa(utils.PickAppInstance(appInstances))\n\t\t\t\t\tfmt.Printf(\"\\nAbout to kill app instance: %s at index: %s\", service.AppID, chaosInstance)\n\t\t\t\t\tcfClient.KillAppInstance(service.AppID, chaosInstance)\n\t\t\t\t\terr = utils.UpdateLastProcessed(db, service.AppID, utils.TimeNow())\n\t\t\t\t\tif freakOut(err) {\n\t\t\t\t\t\tcontinue SERVICES\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Printf(\"\\nApp %s is unhealthy, skipping\\n\", service.AppID)\n\t\t\t\t\tcontinue SERVICES\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"\\nNot running chaos for %s\", service.AppID)\n\t\t\t\terr = utils.UpdateLastProcessed(db, service.AppID, utils.TimeNow())\n\t\t\t\tif freakOut(err) {\n\t\t\t\t\tcontinue SERVICES\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfmt.Printf(\"\\nSkipping processing chaos for %s\", service.AppID)\n\t\t\tcontinue SERVICES\n\t\t}\n\t}\n\tdb.Close()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Code generated by \"stringer -type=SymKind\"; DO NOT EDIT.\n\npackage objabi\n\nimport \"strconv\"\n\nconst _SymKind_name = \"SxxxSTEXTSRODATASNOPTRDATASDATASBSSSNOPTRBSSSTLSBSSSDWARFINFOSDWARFRANGESDWARFLOCSDWARFMISCSABIALIAS\"\n\nvar _SymKind_index = [...]uint8{0, 4, 9, 16, 26, 31, 35, 44, 51, 61, 72, 81, 91, 100}\n\nfunc (i SymKind) String() string {\n\tif i >= SymKind(len(_SymKind_index)-1) {\n\t\treturn \"SymKind(\" + strconv.FormatInt(int64(i), 10) + \")\"\n\t}\n\treturn _SymKind_name[_SymKind_index[i]:_SymKind_index[i+1]]\n}\n<commit_msg>[dev.link] cmd\/internal\/objabi: rerun stringer for sym kind<commit_after>\/\/ Code generated by \"stringer -type=SymKind\"; DO NOT EDIT.\n\npackage objabi\n\nimport \"strconv\"\n\nfunc _() {\n\t\/\/ An \"invalid array index\" compiler error signifies that the constant values have changed.\n\t\/\/ Re-run the stringer command to generate them again.\n\tvar x [1]struct{}\n\t_ = x[Sxxx-0]\n\t_ = x[STEXT-1]\n\t_ = x[SRODATA-2]\n\t_ = x[SNOPTRDATA-3]\n\t_ = x[SDATA-4]\n\t_ = x[SBSS-5]\n\t_ = x[SNOPTRBSS-6]\n\t_ = x[STLSBSS-7]\n\t_ = x[SDWARFINFO-8]\n\t_ = x[SDWARFRANGE-9]\n\t_ = x[SDWARFLOC-10]\n\t_ = x[SDWARFLINES-11]\n\t_ = x[SABIALIAS-12]\n\t_ = x[SLIBFUZZER_EXTRA_COUNTER-13]\n}\n\nconst _SymKind_name = \"SxxxSTEXTSRODATASNOPTRDATASDATASBSSSNOPTRBSSSTLSBSSSDWARFINFOSDWARFRANGESDWARFLOCSDWARFLINESSABIALIASSLIBFUZZER_EXTRA_COUNTER\"\n\nvar _SymKind_index = [...]uint8{0, 4, 9, 16, 26, 31, 35, 44, 51, 61, 72, 81, 92, 101, 125}\n\nfunc (i SymKind) String() string {\n\tif i >= SymKind(len(_SymKind_index)-1) {\n\t\treturn \"SymKind(\" + strconv.FormatInt(int64(i), 10) + \")\"\n\t}\n\treturn _SymKind_name[_SymKind_index[i]:_SymKind_index[i+1]]\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"encoding\/csv\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/ziutek\/mymysql\/mysql\"\n\t_ \"github.com\/ziutek\/mymysql\/native\"\n\t\"log\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n)\n\nconst _DB_CONFIG_FILENAME = \"db_config.SECRET.json\"\nconst _OUTPUT_FILENAME = \"output.csv\"\nconst QUERY_LIMIT = 100\nconst _PENALTY_PERCENTAGE_CUTOFF = 0.10\n\n\/\/How many solves a user must have to have their relative scale included.\n\/\/A low value gives you far more very low or very high scores than you shoul get.\nconst _MINIMUM_SOLVES = 10\n\nvar noLimitFlag bool\nvar printPuzzleDataFlag bool\nvar cullCheaterPercentageFlag float64\nvar minimumSolvesFlag int\nvar useMockData bool\n\nfunc init() {\n\tflag.BoolVar(&noLimitFlag, \"a\", false, \"Specify to execute the solves query with no limit.\")\n\tflag.BoolVar(&printPuzzleDataFlag, \"p\", false, \"Specify that you want puzzle data printed out in the output.\")\n\tflag.Float64Var(&cullCheaterPercentageFlag, \"c\", _PENALTY_PERCENTAGE_CUTOFF, \"What percentage of solve time must be penalty for someone to be considered a cheater.\")\n\tflag.IntVar(&minimumSolvesFlag, \"n\", _MINIMUM_SOLVES, \"How many solves a user must have their scores considered.\")\n\tflag.BoolVar(&useMockData, \"m\", false, \"Use mock data (useful if you don't have a real database to test with).\")\n}\n\ntype dbConfig struct {\n\tUrl string\n\tUsername string\n\tPassword string\n\tDbName string\n\tSolvesTable string\n\tSolvesID string\n\tSolvesPuzzleID string\n\tSolvesTotalTime string\n\tSolvesPenaltyTime string\n\tSolvesUser string\n\tPuzzlesTable string\n\tPuzzlesID string\n\tPuzzlesDifficulty string\n\tPuzzlesName string\n\tPuzzlesPuzzle string\n}\n\ntype solve struct {\n\tpuzzleID int\n\ttotalTime int\n\tpenaltyTime int\n}\n\ntype userSolvesCollection struct {\n\tsolves []solve\n\tmax int\n\tmin int\n}\n\ntype puzzle struct {\n\tid int\n\tuserRelativeDifficulty float32\n\tdifficultyRating int\n\tname string\n\tpuzzle string\n}\n\ntype puzzles []puzzle\n\ntype byUserRelativeDifficulty struct {\n\tpuzzles\n}\n\nfunc (self puzzles) Len() int {\n\treturn len(self)\n}\n\nfunc (self puzzles) Swap(i, j int) {\n\tself[i], self[j] = self[j], self[i]\n}\n\nfunc (self byUserRelativeDifficulty) Less(i, j int) bool {\n\treturn self.puzzles[i].userRelativeDifficulty < self.puzzles[j].userRelativeDifficulty\n}\n\nfunc (self *userSolvesCollection) addSolve(solve solve) bool {\n\t\/\/Cull obviously incorrect solves.\n\tif solve.totalTime == 0 {\n\t\treturn false\n\t}\n\n\t\/\/Cull solves that leaned too heavily on hints.\n\tif float64(solve.penaltyTime)\/float64(solve.totalTime) > cullCheaterPercentageFlag {\n\t\treturn false\n\t}\n\n\tself.solves = append(self.solves, solve)\n\tif len(self.solves) == 1 {\n\t\tself.max = solve.totalTime\n\t\tself.min = solve.totalTime\n\t} else {\n\t\tif self.max < solve.totalTime {\n\t\t\tself.max = solve.totalTime\n\t\t}\n\t\tif self.min > solve.totalTime {\n\t\t\tself.min = solve.totalTime\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/Whehter or not this should be included in calculation.\n\/\/Basically, whether the reltaiveDifficulties will all be valid.\n\/\/Normally this returns false if there is only one solve by the user, but could also\n\/\/happen when there are multiple solves but (crazily enough) they all have exactly the same solveTime.\n\/\/This DOES happen in the production dataset.\nfunc (self *userSolvesCollection) valid() bool {\n\tif self.max == self.min {\n\t\treturn false\n\t}\n\n\tif len(self.solves) < minimumSolvesFlag {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (self *userSolvesCollection) relativeDifficulties() map[int]float32 {\n\t\/\/Returns a map of puzzle id to relative difficulty, normalized by our max and min.\n\tavgSolveTimes := make(map[int]float32)\n\t\/\/Keep track of how many times we've seen each puzzle solved by this user so we can do correct averaging.\n\tavgSolveTimesCount := make(map[int]int)\n\n\t\/\/First, collect the average solve time (in case the same user has solved more than once the same puzzle)\n\n\tfor _, solve := range self.solves {\n\t\tcurrentAvgSolveTime := avgSolveTimes[solve.puzzleID]\n\n\t\tavgSolveTimes[solve.puzzleID] = (currentAvgSolveTime*float32(avgSolveTimesCount[solve.puzzleID]) + float32(solve.totalTime)) \/ float32(avgSolveTimesCount[solve.puzzleID]+1)\n\n\t\tavgSolveTimesCount[solve.puzzleID]++\n\t}\n\n\t\/\/Now, relativize all of the scores.\n\n\tresult := make(map[int]float32)\n\n\tfor puzzleID, avgSolveTime := range avgSolveTimes {\n\t\tresult[puzzleID] = (avgSolveTime - float32(self.min)) \/ float32(self.max-self.min)\n\t}\n\n\treturn result\n}\n\nfunc main() {\n\n\tflag.Parse()\n\n\tfile, err := os.Open(_DB_CONFIG_FILENAME)\n\tif err != nil {\n\t\tlog.Fatal(\"Could not find the config file at \", _DB_CONFIG_FILENAME, \". You should copy the SAMPLE one to that filename and configure.\")\n\t\tos.Exit(1)\n\t}\n\tdefer file.Close()\n\tdecoder := json.NewDecoder(file)\n\tvar config dbConfig\n\tif err := decoder.Decode(&config); err != nil {\n\t\tlog.Fatal(\"There was an error parsing JSON from the config file: \", err)\n\t\tos.Exit(1)\n\t}\n\n\tdifficutlyRatingsChan := make(chan map[int]puzzle)\n\n\tgo getPuzzleDifficultyRatings(&config, difficutlyRatingsChan)\n\n\tvar db mysql.Conn\n\n\tif useMockData {\n\t\tdb = &mockConnection{}\n\t} else {\n\t\tdb = mysql.New(\"tcp\", \"\", config.Url, config.Username, config.Password, config.DbName)\n\t}\n\n\tif err := db.Connect(); err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\n\tvar solvesQuery string\n\n\tif noLimitFlag {\n\t\tlog.Println(\"Running without a limit for number of solves to retrieve.\")\n\t\tsolvesQuery = \"select %s, %s, %s, %s from %s\"\n\t} else {\n\t\tlog.Println(\"Running with a limit of \", QUERY_LIMIT, \" for number of solves to retrieve.\")\n\t\tsolvesQuery = \"select %s, %s, %s, %s from %s limit \" + strconv.Itoa(QUERY_LIMIT)\n\t}\n\n\tres, err := db.Start(solvesQuery, config.SolvesUser, config.SolvesPuzzleID, config.SolvesTotalTime, config.SolvesPenaltyTime, config.SolvesTable)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\n\tsolvesByUser := make(map[string]*userSolvesCollection)\n\n\tvar userSolves *userSolvesCollection\n\tvar ok bool\n\tvar i int\n\tvar skippedSolves int\n\n\t\/\/First, process all user records in the DB to collect all solves by userName.\n\tfor {\n\n\t\trow, _ := res.GetRow()\n\n\t\tif row == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tuserSolves, ok = solvesByUser[row.Str(0)]\n\n\t\tif !ok {\n\t\t\tuserSolves = new(userSolvesCollection)\n\t\t\tsolvesByUser[row.Str(0)] = userSolves\n\t\t}\n\n\t\tif !userSolves.addSolve(solve{row.Int(1), row.Int(2), row.Int(3)}) {\n\t\t\tskippedSolves++\n\t\t}\n\t\ti++\n\t}\n\n\tlog.Println(\"Processed \", i, \" solves by \", len(solvesByUser), \" users.\")\n\tlog.Println(\"Skipped \", skippedSolves, \" solves that cheated too much.\")\n\n\t\/\/Now get the relative difficulty for each user's puzzles, and collect them.\n\n\trelativeDifficultiesByPuzzle := make(map[int][]float32)\n\n\tvar skippedUsers int\n\n\tfor _, collection := range solvesByUser {\n\n\t\tif !collection.valid() {\n\t\t\tskippedUsers++\n\t\t\tcontinue\n\t\t}\n\n\t\tfor puzzleID, relativeDifficulty := range collection.relativeDifficulties() {\n\t\t\trelativeDifficultiesByPuzzle[puzzleID] = append(relativeDifficultiesByPuzzle[puzzleID], relativeDifficulty)\n\t\t}\n\n\t}\n\n\tlog.Println(\"Skipped \", skippedUsers, \" users because they did not have enough solve times.\")\n\n\tpuzzles := make([]puzzle, len(relativeDifficultiesByPuzzle))\n\n\tvar index int\n\n\tfor puzzleID, difficulties := range relativeDifficultiesByPuzzle {\n\t\tvar sum float32\n\t\tfor _, difficulty := range difficulties {\n\t\t\tsum += difficulty\n\t\t}\n\t\tpuzzles[index] = puzzle{id: puzzleID, userRelativeDifficulty: sum \/ float32(len(difficulties)), difficultyRating: -1}\n\t\tindex++\n\t}\n\n\t\/\/Sort the puzzles by relative user difficulty\n\t\/\/We actually don't need the wrapper, since it will modify the underlying slice.\n\tsort.Sort(byUserRelativeDifficulty{puzzles})\n\n\t\/\/Merge in the difficulty ratings from the server.\n\tdifficultyRatings := <-difficutlyRatingsChan\n\n\tfor i, puzzle := range puzzles {\n\t\tinfo, ok := difficultyRatings[puzzle.id]\n\t\tif ok {\n\t\t\tpuzzle.difficultyRating = info.difficultyRating\n\t\t\tpuzzle.name = info.name\n\t\t\tpuzzle.puzzle = info.puzzle\n\t\t}\n\t\t\/\/It's not a pointer so we have to copy it back.\n\t\tpuzzles[i] = puzzle\n\t}\n\n\t\/\/Now print the results to stdout.\n\n\tcsvOut := csv.NewWriter(os.Stdout)\n\n\tfor _, puzzle := range puzzles {\n\t\ttemp := []string{strconv.Itoa(puzzle.id), strconv.Itoa(puzzle.difficultyRating), fmt.Sprintf(\"%g\", puzzle.userRelativeDifficulty), puzzle.name}\n\t\tif printPuzzleDataFlag {\n\t\t\ttemp = append(temp, puzzle.puzzle)\n\t\t}\n\t\tcsvOut.Write(temp)\n\t}\n\n\tcsvOut.Flush()\n\n}\n\nfunc getPuzzleDifficultyRatings(config *dbConfig, result chan map[int]puzzle) {\n\n\tdb := mysql.New(\"tcp\", \"\", config.Url, config.Username, config.Password, config.DbName)\n\n\tif err := db.Connect(); err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\n\tres, err := db.Start(\"select %s, %s, %s, %s from %s\", config.PuzzlesID, config.PuzzlesDifficulty, config.PuzzlesName, config.PuzzlesPuzzle, config.PuzzlesTable)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\n\tpuzzles := make(map[int]puzzle)\n\n\tfor {\n\n\t\trow, _ := res.GetRow()\n\n\t\tif row == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tpuzzles[row.Int(0)] = puzzle{id: row.Int(0), difficultyRating: row.Int(1), name: row.Str(2), puzzle: row.Str(3)}\n\t}\n\n\tresult <- puzzles\n\n}\n<commit_msg>Also use the mock connection to get puzzle difficulty ratings, if provided.<commit_after>package main\n\nimport (\n\t\"encoding\/csv\"\n\t\"encoding\/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"github.com\/ziutek\/mymysql\/mysql\"\n\t_ \"github.com\/ziutek\/mymysql\/native\"\n\t\"log\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n)\n\nconst _DB_CONFIG_FILENAME = \"db_config.SECRET.json\"\nconst _OUTPUT_FILENAME = \"output.csv\"\nconst QUERY_LIMIT = 100\nconst _PENALTY_PERCENTAGE_CUTOFF = 0.10\n\n\/\/How many solves a user must have to have their relative scale included.\n\/\/A low value gives you far more very low or very high scores than you shoul get.\nconst _MINIMUM_SOLVES = 10\n\nvar noLimitFlag bool\nvar printPuzzleDataFlag bool\nvar cullCheaterPercentageFlag float64\nvar minimumSolvesFlag int\nvar useMockData bool\n\nfunc init() {\n\tflag.BoolVar(&noLimitFlag, \"a\", false, \"Specify to execute the solves query with no limit.\")\n\tflag.BoolVar(&printPuzzleDataFlag, \"p\", false, \"Specify that you want puzzle data printed out in the output.\")\n\tflag.Float64Var(&cullCheaterPercentageFlag, \"c\", _PENALTY_PERCENTAGE_CUTOFF, \"What percentage of solve time must be penalty for someone to be considered a cheater.\")\n\tflag.IntVar(&minimumSolvesFlag, \"n\", _MINIMUM_SOLVES, \"How many solves a user must have their scores considered.\")\n\tflag.BoolVar(&useMockData, \"m\", false, \"Use mock data (useful if you don't have a real database to test with).\")\n}\n\ntype dbConfig struct {\n\tUrl string\n\tUsername string\n\tPassword string\n\tDbName string\n\tSolvesTable string\n\tSolvesID string\n\tSolvesPuzzleID string\n\tSolvesTotalTime string\n\tSolvesPenaltyTime string\n\tSolvesUser string\n\tPuzzlesTable string\n\tPuzzlesID string\n\tPuzzlesDifficulty string\n\tPuzzlesName string\n\tPuzzlesPuzzle string\n}\n\ntype solve struct {\n\tpuzzleID int\n\ttotalTime int\n\tpenaltyTime int\n}\n\ntype userSolvesCollection struct {\n\tsolves []solve\n\tmax int\n\tmin int\n}\n\ntype puzzle struct {\n\tid int\n\tuserRelativeDifficulty float32\n\tdifficultyRating int\n\tname string\n\tpuzzle string\n}\n\ntype puzzles []puzzle\n\ntype byUserRelativeDifficulty struct {\n\tpuzzles\n}\n\nfunc (self puzzles) Len() int {\n\treturn len(self)\n}\n\nfunc (self puzzles) Swap(i, j int) {\n\tself[i], self[j] = self[j], self[i]\n}\n\nfunc (self byUserRelativeDifficulty) Less(i, j int) bool {\n\treturn self.puzzles[i].userRelativeDifficulty < self.puzzles[j].userRelativeDifficulty\n}\n\nfunc (self *userSolvesCollection) addSolve(solve solve) bool {\n\t\/\/Cull obviously incorrect solves.\n\tif solve.totalTime == 0 {\n\t\treturn false\n\t}\n\n\t\/\/Cull solves that leaned too heavily on hints.\n\tif float64(solve.penaltyTime)\/float64(solve.totalTime) > cullCheaterPercentageFlag {\n\t\treturn false\n\t}\n\n\tself.solves = append(self.solves, solve)\n\tif len(self.solves) == 1 {\n\t\tself.max = solve.totalTime\n\t\tself.min = solve.totalTime\n\t} else {\n\t\tif self.max < solve.totalTime {\n\t\t\tself.max = solve.totalTime\n\t\t}\n\t\tif self.min > solve.totalTime {\n\t\t\tself.min = solve.totalTime\n\t\t}\n\t}\n\treturn true\n}\n\n\/\/Whehter or not this should be included in calculation.\n\/\/Basically, whether the reltaiveDifficulties will all be valid.\n\/\/Normally this returns false if there is only one solve by the user, but could also\n\/\/happen when there are multiple solves but (crazily enough) they all have exactly the same solveTime.\n\/\/This DOES happen in the production dataset.\nfunc (self *userSolvesCollection) valid() bool {\n\tif self.max == self.min {\n\t\treturn false\n\t}\n\n\tif len(self.solves) < minimumSolvesFlag {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (self *userSolvesCollection) relativeDifficulties() map[int]float32 {\n\t\/\/Returns a map of puzzle id to relative difficulty, normalized by our max and min.\n\tavgSolveTimes := make(map[int]float32)\n\t\/\/Keep track of how many times we've seen each puzzle solved by this user so we can do correct averaging.\n\tavgSolveTimesCount := make(map[int]int)\n\n\t\/\/First, collect the average solve time (in case the same user has solved more than once the same puzzle)\n\n\tfor _, solve := range self.solves {\n\t\tcurrentAvgSolveTime := avgSolveTimes[solve.puzzleID]\n\n\t\tavgSolveTimes[solve.puzzleID] = (currentAvgSolveTime*float32(avgSolveTimesCount[solve.puzzleID]) + float32(solve.totalTime)) \/ float32(avgSolveTimesCount[solve.puzzleID]+1)\n\n\t\tavgSolveTimesCount[solve.puzzleID]++\n\t}\n\n\t\/\/Now, relativize all of the scores.\n\n\tresult := make(map[int]float32)\n\n\tfor puzzleID, avgSolveTime := range avgSolveTimes {\n\t\tresult[puzzleID] = (avgSolveTime - float32(self.min)) \/ float32(self.max-self.min)\n\t}\n\n\treturn result\n}\n\nfunc main() {\n\n\tflag.Parse()\n\n\tfile, err := os.Open(_DB_CONFIG_FILENAME)\n\tif err != nil {\n\t\tlog.Fatal(\"Could not find the config file at \", _DB_CONFIG_FILENAME, \". You should copy the SAMPLE one to that filename and configure.\")\n\t\tos.Exit(1)\n\t}\n\tdefer file.Close()\n\tdecoder := json.NewDecoder(file)\n\tvar config dbConfig\n\tif err := decoder.Decode(&config); err != nil {\n\t\tlog.Fatal(\"There was an error parsing JSON from the config file: \", err)\n\t\tos.Exit(1)\n\t}\n\n\tdifficutlyRatingsChan := make(chan map[int]puzzle)\n\n\tgo getPuzzleDifficultyRatings(&config, difficutlyRatingsChan)\n\n\tvar db mysql.Conn\n\n\tif useMockData {\n\t\tdb = &mockConnection{}\n\t} else {\n\t\tdb = mysql.New(\"tcp\", \"\", config.Url, config.Username, config.Password, config.DbName)\n\t}\n\n\tif err := db.Connect(); err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\n\tvar solvesQuery string\n\n\tif noLimitFlag {\n\t\tlog.Println(\"Running without a limit for number of solves to retrieve.\")\n\t\tsolvesQuery = \"select %s, %s, %s, %s from %s\"\n\t} else {\n\t\tlog.Println(\"Running with a limit of \", QUERY_LIMIT, \" for number of solves to retrieve.\")\n\t\tsolvesQuery = \"select %s, %s, %s, %s from %s limit \" + strconv.Itoa(QUERY_LIMIT)\n\t}\n\n\tres, err := db.Start(solvesQuery, config.SolvesUser, config.SolvesPuzzleID, config.SolvesTotalTime, config.SolvesPenaltyTime, config.SolvesTable)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\n\tsolvesByUser := make(map[string]*userSolvesCollection)\n\n\tvar userSolves *userSolvesCollection\n\tvar ok bool\n\tvar i int\n\tvar skippedSolves int\n\n\t\/\/First, process all user records in the DB to collect all solves by userName.\n\tfor {\n\n\t\trow, _ := res.GetRow()\n\n\t\tif row == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tuserSolves, ok = solvesByUser[row.Str(0)]\n\n\t\tif !ok {\n\t\t\tuserSolves = new(userSolvesCollection)\n\t\t\tsolvesByUser[row.Str(0)] = userSolves\n\t\t}\n\n\t\tif !userSolves.addSolve(solve{row.Int(1), row.Int(2), row.Int(3)}) {\n\t\t\tskippedSolves++\n\t\t}\n\t\ti++\n\t}\n\n\tlog.Println(\"Processed \", i, \" solves by \", len(solvesByUser), \" users.\")\n\tlog.Println(\"Skipped \", skippedSolves, \" solves that cheated too much.\")\n\n\t\/\/Now get the relative difficulty for each user's puzzles, and collect them.\n\n\trelativeDifficultiesByPuzzle := make(map[int][]float32)\n\n\tvar skippedUsers int\n\n\tfor _, collection := range solvesByUser {\n\n\t\tif !collection.valid() {\n\t\t\tskippedUsers++\n\t\t\tcontinue\n\t\t}\n\n\t\tfor puzzleID, relativeDifficulty := range collection.relativeDifficulties() {\n\t\t\trelativeDifficultiesByPuzzle[puzzleID] = append(relativeDifficultiesByPuzzle[puzzleID], relativeDifficulty)\n\t\t}\n\n\t}\n\n\tlog.Println(\"Skipped \", skippedUsers, \" users because they did not have enough solve times.\")\n\n\tpuzzles := make([]puzzle, len(relativeDifficultiesByPuzzle))\n\n\tvar index int\n\n\tfor puzzleID, difficulties := range relativeDifficultiesByPuzzle {\n\t\tvar sum float32\n\t\tfor _, difficulty := range difficulties {\n\t\t\tsum += difficulty\n\t\t}\n\t\tpuzzles[index] = puzzle{id: puzzleID, userRelativeDifficulty: sum \/ float32(len(difficulties)), difficultyRating: -1}\n\t\tindex++\n\t}\n\n\t\/\/Sort the puzzles by relative user difficulty\n\t\/\/We actually don't need the wrapper, since it will modify the underlying slice.\n\tsort.Sort(byUserRelativeDifficulty{puzzles})\n\n\t\/\/Merge in the difficulty ratings from the server.\n\tdifficultyRatings := <-difficutlyRatingsChan\n\n\tfor i, puzzle := range puzzles {\n\t\tinfo, ok := difficultyRatings[puzzle.id]\n\t\tif ok {\n\t\t\tpuzzle.difficultyRating = info.difficultyRating\n\t\t\tpuzzle.name = info.name\n\t\t\tpuzzle.puzzle = info.puzzle\n\t\t}\n\t\t\/\/It's not a pointer so we have to copy it back.\n\t\tpuzzles[i] = puzzle\n\t}\n\n\t\/\/Now print the results to stdout.\n\n\tcsvOut := csv.NewWriter(os.Stdout)\n\n\tfor _, puzzle := range puzzles {\n\t\ttemp := []string{strconv.Itoa(puzzle.id), strconv.Itoa(puzzle.difficultyRating), fmt.Sprintf(\"%g\", puzzle.userRelativeDifficulty), puzzle.name}\n\t\tif printPuzzleDataFlag {\n\t\t\ttemp = append(temp, puzzle.puzzle)\n\t\t}\n\t\tcsvOut.Write(temp)\n\t}\n\n\tcsvOut.Flush()\n\n}\n\nfunc getPuzzleDifficultyRatings(config *dbConfig, result chan map[int]puzzle) {\n\n\tvar db mysql.Conn\n\n\tif useMockData {\n\t\tdb = &mockConnection{}\n\t} else {\n\t\tdb = mysql.New(\"tcp\", \"\", config.Url, config.Username, config.Password, config.DbName)\n\t}\n\n\tif err := db.Connect(); err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\n\tres, err := db.Start(\"select %s, %s, %s, %s from %s\", config.PuzzlesID, config.PuzzlesDifficulty, config.PuzzlesName, config.PuzzlesPuzzle, config.PuzzlesTable)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t\tos.Exit(1)\n\t}\n\n\tpuzzles := make(map[int]puzzle)\n\n\tfor {\n\n\t\trow, _ := res.GetRow()\n\n\t\tif row == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tpuzzles[row.Int(0)] = puzzle{id: row.Int(0), difficultyRating: row.Int(1), name: row.Str(2), puzzle: row.Str(3)}\n\t}\n\n\tresult <- puzzles\n\n}\n<|endoftext|>"} {"text":"<commit_before>package emil\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n)\n\nvar errNowNewAnalysis = errors.New(\"errNowNewAnalysis\")\n\ntype analysis struct {\n\tanalysisDone bool\n\tdtm int \/\/ Depth to mate\n\n\tboard *Board\n\tmove *Move\n}\n\n\/\/ EndGameDb to query for mate in 1,2, etc.\ntype EndGameDb struct {\n\tpositionDb map[string]*analysis\n\n\tdtmDb []map[string]bool\n\n\tsearchedPositions int\n}\n\nfunc (db *EndGameDb) Find(board *Board) (bestMove *Move) {\n\tif DEBUG {\n\t\tfmt.Printf(\"Find:\\n%s\\n\", board.String())\n\t}\n\ta := db.positionDb[board.String()]\n\tif DEBUG {\n\t\tfmt.Printf(\"Found: positionDb with dtm %d\\n\", a.dtm)\n\t}\n\treturn a.move\n}\nfunc (db *EndGameDb) FindMates() (boards []*Board) {\n\tfor str := range db.dtmDb[0] {\n\t\ta := db.positionDb[str]\n\t\tboards = append(boards, a.board)\n\t}\n\treturn boards\n}\n\nfunc (db *EndGameDb) FindMate(piece, square int) (boards []*Board) {\n\tfor str := range db.dtmDb[0] {\n\t\ta := db.positionDb[str]\n\t\tif a.board.squares[square] == piece {\n\t\t\tboards = append(boards, a.board)\n\t\t}\n\t}\n\treturn boards\n}\n\nfunc (db *EndGameDb) addPosition(board *Board) {\n\tdb.addAnalysis(board, -1, nil)\n}\n\nfunc (db *EndGameDb) addAnalysis(board *Board, dtm int, move *Move) {\n\ta := &analysis{\n\t\tdtm: dtm,\n\t\tboard: board}\n\tif move != nil {\n\t\ta.move = move.reverse()\n\t}\n\tdone := dtm >= 0\n\tif done {\n\t\tdb.dtmDb[dtm][a.board.String()] = true\n\t}\n\ta.analysisDone = done\n\n\tdb.positionDb[a.board.String()] = a\n}\n\nfunc (db *EndGameDb) positions() int {\n\treturn len(db.positionDb)\n}\n\n\/\/ find positions where black is checkmate\nfunc (db *EndGameDb) retrogradeAnalysisStep1() {\n\tdb.dtmDb = append(db.dtmDb, make(map[string]bool))\n\n\tstart := time.Now()\n\n\tplayer := BLACK\n\tfor boardStr, a := range db.positionDb {\n\t\t\/\/ mate only on border square\n\t\tblackKingSquare := BoardSquares[a.board.blackKing]\n\t\tif !blackKingSquare.isBorder {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ mate only with help from king\n\t\tif squaresDistances[a.board.blackKing][a.board.whiteKing] > 2 {\n\t\t\tcontinue\n\t\t}\n\n\t\tp := NewPosition(a.board, player)\n\n\t\tmove := Search(p)\n\t\tdb.searchedPositions++\n\t\tif move == nil {\n\t\t\tif isKingInCheck(p) {\n\t\t\t\ta.dtm = 0\n\t\t\t\tdb.addAnalysis(a.board, 0, nil)\n\t\t\t\tif DEBUG {\n\t\t\t\t\tfmt.Printf(\"mate:\\n%s\\n\", boardStr)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tend := time.Now()\n\tif DEBUG {\n\t\tfmt.Printf(\"searchedPositions %d\\n\", db.searchedPositions)\n\t\tfmt.Printf(\"db.dtmDb[0] %d\\n\", len(db.dtmDb[0]))\n\t\tfmt.Printf(\"duration %v\\n\\n\\n\", end.Sub(start))\n\t}\n}\nfunc playerForStepN(dtm int) (player int) {\n\tif dtm%2 == 0 {\n\t\tplayer = BLACK\n\t\tif DEBUG {\n\t\t\tfmt.Printf(\"retrogradeAnalysis BLACK %d\\n\", dtm)\n\t\t}\n\t} else {\n\t\tplayer = WHITE\n\t\tif DEBUG {\n\t\t\tfmt.Printf(\"retrogradeAnalysis WHITE %d\\n\", dtm)\n\t\t}\n\t}\n\treturn player\n}\n\nfunc (db *EndGameDb) retrogradeAnalysisStepN(dtm int) (noError error) {\n\tstart := time.Now()\n\tdb.dtmDb = append(db.dtmDb, make(map[string]bool))\n\n\tplayer := playerForStepN(dtm)\n\n\tif player == WHITE {\n\t\tif DEBUG {\n\t\t\tfmt.Printf(\"WHITE Start positions %d\\n\", len(db.dtmDb[dtm-1]))\n\t\t}\n\t\tfor str := range db.dtmDb[dtm-1] {\n\t\t\ta := db.positionDb[str]\n\t\t\tp := NewPosition(a.board, player)\n\t\t\tlist := generateMoves(p)\n\t\t\tmoves := filterKingCaptures(p, list)\n\t\t\tmoves = filterKingCaptures(NewPosition(a.board, otherPlayer(player)), list)\n\t\t\tfor _, m := range moves {\n\t\t\t\tnewBoard := a.board.doMove(m)\n\t\t\t\tnewAnalysis, ok := db.positionDb[newBoard.String()]\n\t\t\t\tif ok && !newAnalysis.analysisDone {\n\t\t\t\t\tdb.addAnalysis(newBoard, dtm, m)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\t\/\/ Suche alle Stellungen, bei denen Schwarz am Zug ist und\n\t\t\/\/ **jeder** Zug von ihm zu einer Stellung unter 2. führt.\n\t\t\/\/ Schwarz kann hier Matt in einem Zug nicht verhindern.\n\t\t\/\/ Markiere diese Stellungen in der Datei.\n\t\tif DEBUG {\n\t\t\tfmt.Printf(\"BLACK Start positions %d\\n\", len(db.positionDb))\n\t\t}\n\t\tfor _, a := range db.positionDb {\n\t\t\tp := NewPosition(a.board, player)\n\t\t\tlist := generateMoves(p)\n\t\t\tmoves := filterKingCaptures(p, list)\n\t\t\tmoves = filterKingCaptures(NewPosition(a.board, otherPlayer(player)), list)\n\n\t\t\tfound := 0\n\t\t\tfor _, m := range moves {\n\t\t\t\tnewBoard := a.board.doMove(m)\n\t\t\t\t_, ok := db.dtmDb[dtm-1][newBoard.String()]\n\t\t\t\tif ok {\n\t\t\t\t\tfound++\n\t\t\t\t}\n\t\t\t}\n\t\t\tif found == len(moves) {\n\t\t\t\tfor _, m := range moves {\n\t\t\t\t\tnewBoard := a.board.doMove(m)\n\t\t\t\t\tdb.addAnalysis(newBoard, dtm, m)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tend := time.Now()\n\n\tif DEBUG {\n\t\tfmt.Printf(\"db.dtmDb[%d] %d\\n\", dtm, len(db.dtmDb[dtm]))\n\t\tfmt.Printf(\"duration %v\\n\\n\\n\", end.Sub(start))\n\t}\n\n\tif len(db.dtmDb[dtm]) == 0 {\n\t\treturn errNowNewAnalysis\n\t}\n\treturn noError\n}\n\nfunc (db *EndGameDb) retrogradeAnalysis() {\n\t\/\/ find positions where black is checkmate\n\tdb.retrogradeAnalysisStep1()\n\tdtm := 1\n\tfor {\n\t\terr := db.retrogradeAnalysisStepN(dtm)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tdtm++\n\t}\n}\n\nfunc generateMoves(p *position) (list []*Move) {\n\tfor src, piece := range p.board.squares {\n\t\tif isOwnPiece(p.player, piece) {\n\t\t\tswitch abs(piece) {\n\t\t\tcase kingValue:\n\t\t\t\tfor _, dst := range kingDestinationsFrom(src) {\n\t\t\t\t\tcapture := p.board.squares[dst]\n\t\t\t\t\tif isOtherKing(p.player, capture) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif capture == Empty {\n\t\t\t\t\t\tlist = append(list, newSilentMove(p.player, piece, src, dst))\n\t\t\t\t\t} else if !isOwnPiece(p.player, capture) {\n\t\t\t\t\t\tlist = append(list, newCaptureMove(p.player, piece, capture, src, dst))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase rockValue:\n\t\t\t\tfor _, dsts := range rockDestinationsFrom(src) {\n\t\t\t\t\tfor _, dst := range dsts {\n\t\t\t\t\t\tcapture := p.board.squares[dst]\n\t\t\t\t\t\tif isOtherKing(p.player, capture) {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif capture == Empty {\n\t\t\t\t\t\t\tlist = append(list, newSilentMove(p.player, piece, src, dst))\n\t\t\t\t\t\t} else if !isOwnPiece(p.player, capture) {\n\t\t\t\t\t\t\tlist = append(list, newCaptureMove(p.player, piece, capture, src, dst))\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tbreak \/\/ onOwnPiece\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn list\n}\n\n\/\/ NewEndGameDb generates an end game DB for KRK\nfunc NewEndGameDb() *EndGameDb {\n\tvar err error\n\tstart := time.Now()\n\n\tendGames := &EndGameDb{\n\t\tpositionDb: make(map[string]*analysis),\n\t\tdtmDb: make([]map[string]bool, 0)}\n\n\tfor wk := A1; wk <= H8; wk++ {\n\t\t\/\/for wk := E3; wk <= E3; wk++ {\n\t\tif DEBUG {\n\t\t\tfmt.Printf(\"White king on %s\\n\", BoardSquares[wk])\n\t\t}\n\t\tfor wr := A1; wr <= H8; wr++ {\n\t\t\tfor bk := A1; bk <= H8; bk++ {\n\n\t\t\t\tboard := NewBoard()\n\n\t\t\t\terr = board.Setup(WhiteKing, wk)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\terr = board.Setup(WhiteRock, wr)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\terr = board.Setup(BlackKing, bk)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\terr = board.kingsToClose()\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tendGames.addPosition(board)\n\t\t\t}\n\t\t}\n\t}\n\tend := time.Now()\n\tif DEBUG {\n\t\tfmt.Printf(\"all positions %d\\n\", 64*63*62)\n\t\tfmt.Printf(\"endGames.positions() %d\\n\", endGames.positions())\n\t\tfmt.Printf(\"difference %d\\n\", 64*63*62-endGames.positions())\n\t\tfmt.Printf(\"duration %v\\n\", end.Sub(start))\n\t}\n\tendGames.retrogradeAnalysis()\n\n\treturn endGames\n}\n<commit_msg>isMateForBlack<commit_after>package emil\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n)\n\nvar errNowNewAnalysis = errors.New(\"errNowNewAnalysis\")\n\ntype analysis struct {\n\tanalysisDone bool\n\tdtm int \/\/ Depth to mate\n\n\tboard *Board\n\tmove *Move\n}\n\n\/\/ EndGameDb to query for mate in 1,2, etc.\ntype EndGameDb struct {\n\tpositionDb map[string]*analysis\n\n\tdtmDb []map[string]bool\n\n\tsearchedPositions int\n}\n\nfunc (db *EndGameDb) Find(board *Board) (bestMove *Move) {\n\tif DEBUG {\n\t\tfmt.Printf(\"Find:\\n%s\\n\", board.String())\n\t}\n\ta := db.positionDb[board.String()]\n\tif DEBUG {\n\t\tfmt.Printf(\"Found: positionDb with dtm %d\\n\", a.dtm)\n\t}\n\treturn a.move\n}\nfunc (db *EndGameDb) FindMates() (boards []*Board) {\n\tfor str := range db.dtmDb[0] {\n\t\ta := db.positionDb[str]\n\t\tboards = append(boards, a.board)\n\t}\n\treturn boards\n}\n\nfunc (db *EndGameDb) FindMate(piece, square int) (boards []*Board) {\n\tfor str := range db.dtmDb[0] {\n\t\ta := db.positionDb[str]\n\t\tif a.board.squares[square] == piece {\n\t\t\tboards = append(boards, a.board)\n\t\t}\n\t}\n\treturn boards\n}\n\nfunc (db *EndGameDb) addPosition(board *Board) {\n\tdb.addAnalysis(board, -1, nil)\n}\n\nfunc (db *EndGameDb) addAnalysis(board *Board, dtm int, move *Move) {\n\ta := &analysis{\n\t\tdtm: dtm,\n\t\tboard: board}\n\tif move != nil {\n\t\ta.move = move.reverse()\n\t}\n\tdone := dtm >= 0\n\tif done {\n\t\tdb.dtmDb[dtm][a.board.String()] = true\n\t}\n\ta.analysisDone = done\n\n\tdb.positionDb[a.board.String()] = a\n}\n\nfunc (db *EndGameDb) positions() int {\n\treturn len(db.positionDb)\n}\n\n\/\/ find positions where black is checkmate\nfunc (db *EndGameDb) retrogradeAnalysisStep1() {\n\tdb.dtmDb = append(db.dtmDb, make(map[string]bool))\n\n\tstart := time.Now()\n\n\tplayer := BLACK\n\tfor boardStr, a := range db.positionDb {\n\t\t\/\/ mate only on border square\n\t\tblackKingSquare := BoardSquares[a.board.blackKing]\n\t\tif !blackKingSquare.isBorder {\n\t\t\tcontinue\n\t\t}\n\t\t\/\/ mate only with help from king\n\t\tif squaresDistances[a.board.blackKing][a.board.whiteKing] > 2 {\n\t\t\tcontinue\n\t\t}\n\n\t\tp := NewPosition(a.board, player)\n\n\t\tmove := Search(p)\n\t\tdb.searchedPositions++\n\t\tif move == nil {\n\t\t\tif isKingInCheck(p) {\n\t\t\t\ta.dtm = 0\n\t\t\t\tdb.addAnalysis(a.board, 0, nil)\n\t\t\t\tif DEBUG {\n\t\t\t\t\tfmt.Printf(\"mate:\\n%s\\n\", boardStr)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tend := time.Now()\n\tif DEBUG {\n\t\tfmt.Printf(\"searchedPositions %d\\n\", db.searchedPositions)\n\t\tfmt.Printf(\"db.dtmDb[0] %d\\n\", len(db.dtmDb[0]))\n\t\tfmt.Printf(\"duration %v\\n\\n\\n\", end.Sub(start))\n\t}\n}\nfunc playerForStepN(dtm int) (player int) {\n\tif dtm%2 == 0 {\n\t\tplayer = BLACK\n\t\tif DEBUG {\n\t\t\tfmt.Printf(\"retrogradeAnalysis BLACK %d\\n\", dtm)\n\t\t}\n\t} else {\n\t\tplayer = WHITE\n\t\tif DEBUG {\n\t\t\tfmt.Printf(\"retrogradeAnalysis WHITE %d\\n\", dtm)\n\t\t}\n\t}\n\treturn player\n}\n\nfunc (db *EndGameDb) retrogradeAnalysisStepN(dtm int) (noError error) {\n\tstart := time.Now()\n\tdb.dtmDb = append(db.dtmDb, make(map[string]bool))\n\n\tplayer := playerForStepN(dtm)\n\n\tif player == WHITE {\n\t\tif DEBUG {\n\t\t\tfmt.Printf(\"WHITE Start positions %d\\n\", len(db.dtmDb[dtm-1]))\n\t\t}\n\t\tfor str := range db.dtmDb[dtm-1] {\n\t\t\ta := db.positionDb[str]\n\t\t\tp := NewPosition(a.board, player)\n\t\t\tlist := generateMoves(p)\n\t\t\tmoves := filterKingCaptures(p, list)\n\t\t\tmoves = filterKingCaptures(NewPosition(a.board, otherPlayer(player)), list)\n\t\t\tfor _, m := range moves {\n\t\t\t\tnewBoard := a.board.doMove(m)\n\t\t\t\tnewAnalysis, ok := db.positionDb[newBoard.String()]\n\t\t\t\tif ok && !newAnalysis.analysisDone {\n\t\t\t\t\tdb.addAnalysis(newBoard, dtm, m)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\t\/\/ Suche alle Stellungen, bei denen Schwarz am Zug ist und\n\t\t\/\/ **jeder** Zug von ihm zu einer Stellung unter 2. führt.\n\t\t\/\/ Schwarz kann hier Matt in einem Zug nicht verhindern.\n\t\t\/\/ Markiere diese Stellungen in der Datei.\n\t\tif DEBUG {\n\t\t\tfmt.Printf(\"BLACK Start positions %d\\n\", len(db.positionDb))\n\t\t}\n\t\tfor _, a := range db.positionDb {\n\t\t\tp := NewPosition(a.board, player)\n\t\t\tlist := generateMoves(p)\n\t\t\tmoves := filterKingCaptures(p, list)\n\t\t\tmoves = filterKingCaptures(NewPosition(a.board, otherPlayer(player)), list)\n\n\t\t\tfound := 0\n\t\t\tfor _, m := range moves {\n\t\t\t\tnewBoard := a.board.doMove(m)\n\t\t\t\tif db.isMateForBlack(newBoard, dtm) {\n\t\t\t\t\tfound++\n\t\t\t\t}\n\t\t\t}\n\t\t\tif found == len(moves) {\n\t\t\t\tfor _, m := range moves {\n\t\t\t\t\tnewBoard := a.board.doMove(m)\n\t\t\t\t\tdb.addAnalysis(newBoard, dtm, m)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tend := time.Now()\n\n\tif DEBUG {\n\t\tfmt.Printf(\"db.dtmDb[%d] %d\\n\", dtm, len(db.dtmDb[dtm]))\n\t\tfmt.Printf(\"duration %v\\n\\n\\n\", end.Sub(start))\n\t}\n\n\tif len(db.dtmDb[dtm]) == 0 {\n\t\treturn errNowNewAnalysis\n\t}\n\treturn noError\n}\nfunc (db *EndGameDb) isMateForBlack(board *Board, maxDtm int) bool {\n\tfor dtm := 0; dtm < maxDtm; dtm += 2 {\n\t\t_, ok := db.dtmDb[dtm][board.String()]\n\t\tif ok {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (db *EndGameDb) retrogradeAnalysis() {\n\t\/\/ find positions where black is checkmate\n\tdb.retrogradeAnalysisStep1()\n\tdtm := 1\n\tfor {\n\t\terr := db.retrogradeAnalysisStepN(dtm)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tdtm++\n\t}\n}\n\nfunc generateMoves(p *position) (list []*Move) {\n\tfor src, piece := range p.board.squares {\n\t\tif isOwnPiece(p.player, piece) {\n\t\t\tswitch abs(piece) {\n\t\t\tcase kingValue:\n\t\t\t\tfor _, dst := range kingDestinationsFrom(src) {\n\t\t\t\t\tcapture := p.board.squares[dst]\n\t\t\t\t\tif isOtherKing(p.player, capture) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tif capture == Empty {\n\t\t\t\t\t\tlist = append(list, newSilentMove(p.player, piece, src, dst))\n\t\t\t\t\t} else if !isOwnPiece(p.player, capture) {\n\t\t\t\t\t\tlist = append(list, newCaptureMove(p.player, piece, capture, src, dst))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase rockValue:\n\t\t\t\tfor _, dsts := range rockDestinationsFrom(src) {\n\t\t\t\t\tfor _, dst := range dsts {\n\t\t\t\t\t\tcapture := p.board.squares[dst]\n\t\t\t\t\t\tif isOtherKing(p.player, capture) {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif capture == Empty {\n\t\t\t\t\t\t\tlist = append(list, newSilentMove(p.player, piece, src, dst))\n\t\t\t\t\t\t} else if !isOwnPiece(p.player, capture) {\n\t\t\t\t\t\t\tlist = append(list, newCaptureMove(p.player, piece, capture, src, dst))\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tbreak \/\/ onOwnPiece\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn list\n}\n\n\/\/ NewEndGameDb generates an end game DB for KRK\nfunc NewEndGameDb() *EndGameDb {\n\tvar err error\n\tstart := time.Now()\n\n\tendGames := &EndGameDb{\n\t\tpositionDb: make(map[string]*analysis),\n\t\tdtmDb: make([]map[string]bool, 0)}\n\n\tfor wk := A1; wk <= H8; wk++ {\n\t\t\/\/for wk := E3; wk <= E3; wk++ {\n\t\tif DEBUG {\n\t\t\tfmt.Printf(\"White king on %s\\n\", BoardSquares[wk])\n\t\t}\n\t\tfor wr := A1; wr <= H8; wr++ {\n\t\t\tfor bk := A1; bk <= H8; bk++ {\n\n\t\t\t\tboard := NewBoard()\n\n\t\t\t\terr = board.Setup(WhiteKing, wk)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\terr = board.Setup(WhiteRock, wr)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\terr = board.Setup(BlackKing, bk)\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\terr = board.kingsToClose()\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tendGames.addPosition(board)\n\t\t\t}\n\t\t}\n\t}\n\tend := time.Now()\n\tif DEBUG {\n\t\tfmt.Printf(\"all positions %d\\n\", 64*63*62)\n\t\tfmt.Printf(\"endGames.positions() %d\\n\", endGames.positions())\n\t\tfmt.Printf(\"difference %d\\n\", 64*63*62-endGames.positions())\n\t\tfmt.Printf(\"duration %v\\n\", end.Sub(start))\n\t}\n\tendGames.retrogradeAnalysis()\n\n\treturn endGames\n}\n<|endoftext|>"} {"text":"<commit_before>package fileutils\n\nimport (\n\t\"io\"\n\t\"os\"\n)\n\n\/\/ CopyFile copies the file at source to dest\nfunc CopyFile(source string, dest string) error {\n\tsf, err := os.Open(source)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer sf.Close()\n\n\tdf, err := os.Create(dest)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer df.Close()\n\n\t_, err = io.Copy(df, sf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n<commit_msg>Add support for handling symlinks and device files<commit_after>package fileutils\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"syscall\"\n)\n\n\/\/ CopyFile copies the file at source to dest\nfunc CopyFile(source string, dest string) error {\n\tsi, err := os.Lstat(source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t\/\/ Handle symlinks\n\tif si.Mode()&os.ModeSymlink != 0 {\n\t\ttarget, err := os.Readlink(source)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := os.Symlink(target, dest); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\t\/\/ Handle device files\n\tst, ok := si.Sys().(*syscall.Stat_t)\n\tif !ok {\n\t\treturn fmt.Errorf(\"could not convert to syscall.Stat_t\")\n\t}\n\n\tif st.Mode&syscall.S_IFBLK != 0 || st.Mode&syscall.S_IFCHR != 0 {\n\t\tdevMajor := int64(major(uint64(st.Rdev)))\n\t\tdevMinor := int64(minor(uint64(st.Rdev)))\n\t\tmode := uint32(si.Mode() & 07777)\n\t\tif st.Mode&syscall.S_IFBLK != 0 {\n\t\t\tfmt.Printf(\"BLOCK\")\n\t\t\tmode |= syscall.S_IFBLK\n\t\t}\n\t\tif st.Mode&syscall.S_IFCHR != 0 {\n\t\t\tfmt.Printf(\"CHAR\")\n\t\t\tmode |= syscall.S_IFCHR\n\t\t}\n\t\tif err := syscall.Mknod(dest, mode, int(mkdev(devMajor, devMinor))); err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\t\/\/ Handle regular files\n\tif si.Mode().IsRegular() {\n\t\tsf, err := os.Open(source)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer sf.Close()\n\n\t\tdf, err := os.Create(dest)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer df.Close()\n\n\t\t_, err = io.Copy(df, sf)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc major(device uint64) uint64 {\n\treturn (device >> 8) & 0xfff\n}\n\nfunc minor(device uint64) uint64 {\n\treturn (device & 0xff) | ((device >> 12) & 0xfff00)\n}\n\nfunc mkdev(major int64, minor int64) uint32 {\n\treturn uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright ©2011-2013 The bíogo Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage dp\n\nimport (\n\t\"github.com\/biogo\/biogo\/align\/pals\/filter\"\n\t\"github.com\/biogo\/biogo\/alphabet\"\n\t\"github.com\/biogo\/biogo\/seq\/linear\"\n)\n\n\/\/ A kernel handles the actual dp alignment process.\ntype kernel struct {\n\ttarget, query *linear.Seq\n\n\tminLen int\n\tmaxDiff float64\n\n\tvalueToCode alphabet.Index\n\n\tCosts\n\n\tlowEnd Hit\n\thighEnd Hit\n\tvectors [2][]int\n\ttrapezoids []*filter.Trapezoid\n\tcovered []bool\n\tslot int\n\tresult chan Hit\n}\n\n\/\/ An offset slice seems to be the easiest way to implement the C idiom used in PALS to implement\n\/\/ an offset (by o) view (v) on an array (a):\n\/\/\n\/\/ int *v, o;\n\/\/ int [n]a;\n\/\/ v = a - o;\n\/\/\n\/\/ now v[i] is a view on a[i-o]\ntype offsetSlice struct {\n\toffset int\n\tslice []int\n}\n\nfunc (o *offsetSlice) at(i int) int {\n\ti -= o.offset\n\tif i == -1 || i == len(o.slice) {\n\t\treturn 0\n\t}\n\treturn o.slice[i]\n}\nfunc (o *offsetSlice) set(i, v int) { o.slice[i-o.offset] = v }\n\nvar vecBuffering int = 100000\n\n\/\/ Handle the recursive search for alignable segments.\nfunc (k *kernel) alignRecursion(t *filter.Trapezoid) {\n\tmid := (t.Bottom + t.Top) \/ 2\n\n\tk.traceForward(mid, mid-t.Right, mid-t.Left)\n\n\tfor x := 1; x == 1 || k.highEnd.Bbpos > mid+x*k.MaxIGap && k.highEnd.Score < k.lowEnd.Score; x++ {\n\t\tk.traceReverse(k.lowEnd.Bepos, k.lowEnd.Aepos, k.lowEnd.Aepos, mid+k.MaxIGap, k.BlockCost+2*x*k.DiffCost)\n\t}\n\n\tk.highEnd.Aepos, k.highEnd.Bepos = k.lowEnd.Aepos, k.lowEnd.Bepos\n\n\tlowTrap, highTrap := *t, *t\n\tlowTrap.Top = k.highEnd.Bbpos - k.MaxIGap\n\thighTrap.Bottom = k.highEnd.Bepos + k.MaxIGap\n\n\tif k.highEnd.Bepos-k.highEnd.Bbpos >= k.minLen && k.highEnd.Aepos-k.highEnd.Abpos >= k.minLen {\n\t\tindel := (k.highEnd.Abpos - k.highEnd.Bbpos) - (k.highEnd.Aepos - k.highEnd.Bepos)\n\t\tif indel < 0 {\n\t\t\tif indel == -indel {\n\t\t\t\tpanic(\"dp: weird number overflow\")\n\t\t\t}\n\t\t\tindel = -indel\n\t\t}\n\t\tidentity := ((1 \/ k.RMatchCost) - float64(k.highEnd.Score-indel)\/(k.RMatchCost*float64(k.highEnd.Bepos-k.highEnd.Bbpos)))\n\n\t\tif identity <= k.maxDiff {\n\t\t\tk.highEnd.Error = identity\n\n\t\t\tfor i, trap := range k.trapezoids[k.slot+1:] {\n\t\t\t\tvar trapAProjection, trapBProjection, coverageA, coverageB int\n\n\t\t\t\tif trap.Bottom >= k.highEnd.Bepos {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\ttrapBProjection = trap.Top - trap.Bottom + 1\n\t\t\t\ttrapAProjection = trap.Right - trap.Left + 1\n\t\t\t\tif trap.Left < k.highEnd.LowDiagonal {\n\t\t\t\t\tcoverageA = k.highEnd.LowDiagonal\n\t\t\t\t} else {\n\t\t\t\t\tcoverageA = trap.Left\n\t\t\t\t}\n\t\t\t\tif trap.Right > k.highEnd.HighDiagonal {\n\t\t\t\t\tcoverageB = k.highEnd.HighDiagonal\n\t\t\t\t} else {\n\t\t\t\t\tcoverageB = trap.Right\n\t\t\t\t}\n\n\t\t\t\tif coverageA > coverageB {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tcoverageA = coverageB - coverageA + 1\n\t\t\t\tif trap.Top > k.highEnd.Bepos {\n\t\t\t\t\tcoverageB = k.highEnd.Bepos - trap.Bottom + 1\n\t\t\t\t} else {\n\t\t\t\t\tcoverageB = trapBProjection\n\t\t\t\t}\n\n\t\t\t\tif (float64(coverageA)\/float64(trapAProjection))*(float64(coverageB)\/float64(trapBProjection)) > 0.99 {\n\t\t\t\t\tk.covered[i] = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Diagonals to this point are query-target, not target-query.\n\t\t\tk.highEnd.LowDiagonal, k.highEnd.HighDiagonal = -k.highEnd.HighDiagonal, -k.highEnd.LowDiagonal\n\n\t\t\tk.result <- k.highEnd\n\t\t}\n\t}\n\n\tif lowTrap.Top-lowTrap.Bottom > k.minLen && lowTrap.Top < t.Top-k.MaxIGap {\n\t\tk.alignRecursion(&lowTrap)\n\t}\n\tif highTrap.Top-highTrap.Bottom > k.minLen {\n\t\tk.alignRecursion(&highTrap)\n\t}\n}\n\nfunc (k *kernel) allocateVectors(required int) {\n\tvecMax := required + required>>2 + vecBuffering\n\tk.vectors[0] = make([]int, vecMax)\n\tk.vectors[1] = make([]int, vecMax)\n}\n\n\/\/ Forward and Reverse D.P. Extension Routines\n\/\/ Called at the mid-point of trapezoid -- mid X [low,high], the extension\n\/\/ is computed to an end point and the lowest and highest diagonals\n\/\/ are recorded. These are returned in a partially filled DPHit\n\/\/ record, that will be merged with that returned for extension in the\n\/\/ opposite direction.\nfunc (k *kernel) traceForward(mid, low, high int) {\n\todd := false\n\tvar (\n\t\tmaxScore int\n\t\tmaxLeft, maxRight int\n\t\tmaxI, maxJ int\n\t\ti, j int\n\t)\n\n\t\/\/ Set basis from (mid,low) .. (mid,high).\n\tif low < 0 {\n\t\tlow = 0\n\t}\n\tif high > k.target.Len() {\n\t\thigh = k.target.Len()\n\t}\n\n\tif required := (high - low) + k.MaxIGap; required >= len(k.vectors[0]) {\n\t\tk.allocateVectors(required)\n\t}\n\n\tthisVector := &offsetSlice{\n\t\tslice: k.vectors[0],\n\t\toffset: low,\n\t}\n\n\tfor j = low; j <= high; j++ {\n\t\tthisVector.set(j, 0)\n\t}\n\n\thigh += k.MaxIGap\n\tif high > k.target.Len() {\n\t\thigh = k.target.Len()\n\t}\n\n\tfor ; j <= high; j++ {\n\t\tthisVector.set(j, thisVector.at(j-1)-k.DiffCost)\n\t}\n\n\tmaxScore = 0\n\tmaxRight = mid - low\n\tmaxLeft = mid - high\n\tmaxI = mid\n\tmaxJ = low\n\n\t\/\/ Advance to next row.\n\tthatVector := &offsetSlice{}\n\tfor i = mid; low <= high && i < k.query.Len(); i++ {\n\t\tvar cost, score int\n\n\t\t*thatVector = *thisVector\n\t\tif !odd {\n\t\t\tthisVector.slice = k.vectors[1]\n\t\t} else {\n\t\t\tthisVector.slice = k.vectors[0]\n\t\t}\n\t\tthisVector.offset = low\n\t\todd = !odd\n\n\t\tscore = thatVector.at(low)\n\t\tthisVector.set(low, score-k.DiffCost)\n\t\tcost = thisVector.at(low)\n\n\t\tfor j = low + 1; j <= high; j++ {\n\t\t\tvar ratchet, temp int\n\n\t\t\ttemp = cost\n\t\t\tcost = score\n\t\t\tscore = thatVector.at(j)\n\t\t\tif k.query.Seq[i] == k.target.Seq[j-1] && k.valueToCode[k.query.Seq[i]] >= 0 {\n\t\t\t\tcost += k.MatchCost\n\t\t\t}\n\n\t\t\tratchet = cost\n\t\t\tif score > ratchet {\n\t\t\t\tratchet = score\n\t\t\t}\n\t\t\tif temp > ratchet {\n\t\t\t\tratchet = temp\n\t\t\t}\n\n\t\t\tcost = ratchet - k.DiffCost\n\t\t\tthisVector.set(j, cost)\n\t\t\tif cost >= maxScore {\n\t\t\t\tmaxScore = cost\n\t\t\t\tmaxI = i + 1\n\t\t\t\tmaxJ = j\n\t\t\t}\n\t\t}\n\n\t\tif j <= k.target.Len() {\n\t\t\tvar ratchet int\n\n\t\t\tif k.query.Seq[i] == k.target.Seq[j-1] && k.valueToCode[k.query.Seq[i]] >= 0 {\n\t\t\t\tscore += k.MatchCost\n\t\t\t}\n\n\t\t\tratchet = score\n\t\t\tif cost > ratchet {\n\t\t\t\tratchet = cost\n\t\t\t}\n\n\t\t\tscore = ratchet - k.DiffCost\n\t\t\tthisVector.set(j, score)\n\t\t\tif score > maxScore {\n\t\t\t\tmaxScore = score\n\t\t\t\tmaxI = i + 1\n\t\t\t\tmaxJ = j\n\t\t\t}\n\n\t\t\tfor j++; j <= k.target.Len(); j++ {\n\t\t\t\tscore -= k.DiffCost\n\t\t\t\tif score < maxScore-k.BlockCost {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tthisVector.set(j, score)\n\t\t\t}\n\t\t}\n\n\t\thigh = j - 1\n\n\t\tfor low <= high && thisVector.at(low) < maxScore-k.BlockCost {\n\t\t\tlow++\n\t\t}\n\t\tfor low <= high && thisVector.at(high) < maxScore-k.BlockCost {\n\t\t\thigh--\n\t\t}\n\n\t\tif required := (high - low) + 2; required > len(k.vectors[0]) {\n\t\t\tk.allocateVectors(required)\n\t\t}\n\n\t\tif (i+1)-low > maxRight {\n\t\t\tmaxRight = (i + 1) - low\n\t\t}\n\t\tif (i+1)-high < maxLeft {\n\t\t\tmaxLeft = (i + 1) - high\n\t\t}\n\t}\n\n\tk.lowEnd.Aepos = maxJ\n\tk.lowEnd.Bepos = maxI\n\tk.lowEnd.LowDiagonal = maxLeft\n\tk.lowEnd.HighDiagonal = maxRight\n\tk.lowEnd.Score = maxScore\n}\n\nfunc (k *kernel) traceReverse(top, low, high, bottom, xfactor int) {\n\todd := false\n\tvar (\n\t\tmaxScore int\n\t\tmaxLeft, maxRight int\n\t\tmaxI, maxJ int\n\t\ti, j int\n\t)\n\n\t\/\/ Set basis from (top,low) .. (top,high).\n\tif low < 0 {\n\t\tlow = 0\n\t}\n\tif high > k.target.Len() {\n\t\thigh = k.target.Len()\n\t}\n\n\tif required := (high - low) + k.MaxIGap; required >= len(k.vectors[0]) {\n\t\tk.allocateVectors(required)\n\t}\n\n\tthisVector := &offsetSlice{\n\t\tslice: k.vectors[0],\n\t\toffset: high - (len(k.vectors[0]) - 1),\n\t}\n\tfor j = high; j >= low; j-- {\n\t\tthisVector.set(j, 0)\n\t}\n\n\tlow -= k.MaxIGap\n\tif low < 0 {\n\t\tlow = 0\n\t}\n\n\tfor ; j >= low; j-- {\n\t\tthisVector.set(j, thisVector.at(j+1)-k.DiffCost)\n\t}\n\n\tmaxScore = 0\n\tmaxRight = top - low\n\tmaxLeft = top - high\n\tmaxI = top\n\tmaxJ = low\n\n\t\/\/ Advance to next row.\n\tif top-1 <= bottom {\n\t\txfactor = k.BlockCost\n\t}\n\n\tthatVector := &offsetSlice{}\n\tfor i = top - 1; low <= high && i >= 0; i-- {\n\t\tvar cost, score int\n\n\t\t*thatVector = *thisVector\n\t\tif !odd {\n\t\t\tthisVector.slice = k.vectors[1]\n\t\t} else {\n\t\t\tthisVector.slice = k.vectors[0]\n\t\t}\n\t\tthisVector.offset = high - (len(k.vectors[0]) - 1)\n\t\todd = !odd\n\n\t\tscore = thatVector.at(high)\n\t\tthisVector.set(high, score-k.DiffCost)\n\t\tcost = thisVector.at(high)\n\n\t\tfor j = high - 1; j >= low; j-- {\n\t\t\tvar ratchet, temp int\n\n\t\t\ttemp = cost\n\t\t\tcost = score\n\t\t\tscore = thatVector.at(j)\n\t\t\tif k.query.Seq[i] == k.target.Seq[j] && k.valueToCode[k.query.Seq[i]] >= 0 {\n\t\t\t\tcost += k.MatchCost\n\t\t\t}\n\n\t\t\tratchet = cost\n\t\t\tif score > ratchet {\n\t\t\t\tratchet = score\n\t\t\t}\n\t\t\tif temp > ratchet {\n\t\t\t\tratchet = temp\n\t\t\t}\n\n\t\t\tcost = ratchet - k.DiffCost\n\t\t\tthisVector.set(j, cost)\n\t\t\tif cost >= maxScore {\n\t\t\t\tmaxScore = cost\n\t\t\t\tmaxI = i\n\t\t\t\tmaxJ = j\n\t\t\t}\n\t\t}\n\n\t\tif j >= 0 {\n\t\t\tvar ratchet int\n\n\t\t\tif k.query.Seq[i] == k.target.Seq[j] && k.valueToCode[k.query.Seq[i]] >= 0 {\n\t\t\t\tscore += k.MatchCost\n\t\t\t}\n\n\t\t\tratchet = score\n\t\t\tif cost > ratchet {\n\t\t\t\tratchet = cost\n\t\t\t}\n\n\t\t\tscore = ratchet - k.DiffCost\n\t\t\tthisVector.set(j, score)\n\t\t\tif score > maxScore {\n\t\t\t\tmaxScore = score\n\t\t\t\tmaxI = i\n\t\t\t\tmaxJ = j\n\t\t\t}\n\n\t\t\tfor j--; j >= 0; j-- {\n\t\t\t\tscore -= k.DiffCost\n\t\t\t\tif score < maxScore-xfactor {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tthisVector.set(j, score)\n\t\t\t}\n\t\t}\n\n\t\tlow = j + 1\n\n\t\tfor low <= high && thisVector.at(low) < maxScore-xfactor {\n\t\t\tlow++\n\t\t}\n\t\tfor low <= high && thisVector.at(high) < maxScore-xfactor {\n\t\t\thigh--\n\t\t}\n\n\t\tif i == bottom {\n\t\t\txfactor = k.BlockCost\n\t\t}\n\n\t\tif required := (high - low) + 2; required > len(k.vectors[0]) {\n\t\t\tk.allocateVectors(required)\n\t\t}\n\n\t\tif i-low > maxRight {\n\t\t\tmaxRight = i - low\n\t\t}\n\t\tif i-high < maxLeft {\n\t\t\tmaxLeft = i - high\n\t\t}\n\t}\n\n\tk.highEnd.Abpos = maxJ\n\tk.highEnd.Bbpos = maxI\n\tk.highEnd.LowDiagonal = maxLeft\n\tk.highEnd.HighDiagonal = maxRight\n\tk.highEnd.Score = maxScore\n}\n<commit_msg>Fix pals dp indexing fault<commit_after>\/\/ Copyright ©2011-2013 The bíogo Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage dp\n\nimport (\n\t\"github.com\/biogo\/biogo\/align\/pals\/filter\"\n\t\"github.com\/biogo\/biogo\/alphabet\"\n\t\"github.com\/biogo\/biogo\/seq\/linear\"\n)\n\n\/\/ A kernel handles the actual dp alignment process.\ntype kernel struct {\n\ttarget, query *linear.Seq\n\n\tminLen int\n\tmaxDiff float64\n\n\tvalueToCode alphabet.Index\n\n\tCosts\n\n\tlowEnd Hit\n\thighEnd Hit\n\tvectors [2][]int\n\ttrapezoids []*filter.Trapezoid\n\tcovered []bool\n\tslot int\n\tresult chan Hit\n}\n\n\/\/ An offset slice seems to be the easiest way to implement the C idiom used in PALS to implement\n\/\/ an offset (by o) view (v) on an array (a):\n\/\/\n\/\/ int *v, o;\n\/\/ int [n]a;\n\/\/ v = a - o;\n\/\/\n\/\/ now v[i] is a view on a[i-o]\ntype offsetSlice struct {\n\toffset int\n\tslice []int\n}\n\nfunc (o *offsetSlice) at(i int) int { return o.slice[i-o.offset] }\nfunc (o *offsetSlice) set(i, v int) { o.slice[i-o.offset] = v }\n\nvar vecBuffering int = 100000\n\n\/\/ Handle the recursive search for alignable segments.\nfunc (k *kernel) alignRecursion(t *filter.Trapezoid) {\n\tmid := (t.Bottom + t.Top) \/ 2\n\n\tk.traceForward(mid, mid-t.Right, mid-t.Left)\n\n\tfor x := 1; x == 1 || k.highEnd.Bbpos > mid+x*k.MaxIGap && k.highEnd.Score < k.lowEnd.Score; x++ {\n\t\tk.traceReverse(k.lowEnd.Bepos, k.lowEnd.Aepos, k.lowEnd.Aepos, mid+k.MaxIGap, k.BlockCost+2*x*k.DiffCost)\n\t}\n\n\tk.highEnd.Aepos, k.highEnd.Bepos = k.lowEnd.Aepos, k.lowEnd.Bepos\n\n\tlowTrap, highTrap := *t, *t\n\tlowTrap.Top = k.highEnd.Bbpos - k.MaxIGap\n\thighTrap.Bottom = k.highEnd.Bepos + k.MaxIGap\n\n\tif k.highEnd.Bepos-k.highEnd.Bbpos >= k.minLen && k.highEnd.Aepos-k.highEnd.Abpos >= k.minLen {\n\t\tindel := (k.highEnd.Abpos - k.highEnd.Bbpos) - (k.highEnd.Aepos - k.highEnd.Bepos)\n\t\tif indel < 0 {\n\t\t\tif indel == -indel {\n\t\t\t\tpanic(\"dp: weird number overflow\")\n\t\t\t}\n\t\t\tindel = -indel\n\t\t}\n\t\tidentity := ((1 \/ k.RMatchCost) - float64(k.highEnd.Score-indel)\/(k.RMatchCost*float64(k.highEnd.Bepos-k.highEnd.Bbpos)))\n\n\t\tif identity <= k.maxDiff {\n\t\t\tk.highEnd.Error = identity\n\n\t\t\tfor i, trap := range k.trapezoids[k.slot+1:] {\n\t\t\t\tvar trapAProjection, trapBProjection, coverageA, coverageB int\n\n\t\t\t\tif trap.Bottom >= k.highEnd.Bepos {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\n\t\t\t\ttrapBProjection = trap.Top - trap.Bottom + 1\n\t\t\t\ttrapAProjection = trap.Right - trap.Left + 1\n\t\t\t\tif trap.Left < k.highEnd.LowDiagonal {\n\t\t\t\t\tcoverageA = k.highEnd.LowDiagonal\n\t\t\t\t} else {\n\t\t\t\t\tcoverageA = trap.Left\n\t\t\t\t}\n\t\t\t\tif trap.Right > k.highEnd.HighDiagonal {\n\t\t\t\t\tcoverageB = k.highEnd.HighDiagonal\n\t\t\t\t} else {\n\t\t\t\t\tcoverageB = trap.Right\n\t\t\t\t}\n\n\t\t\t\tif coverageA > coverageB {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tcoverageA = coverageB - coverageA + 1\n\t\t\t\tif trap.Top > k.highEnd.Bepos {\n\t\t\t\t\tcoverageB = k.highEnd.Bepos - trap.Bottom + 1\n\t\t\t\t} else {\n\t\t\t\t\tcoverageB = trapBProjection\n\t\t\t\t}\n\n\t\t\t\tif (float64(coverageA)\/float64(trapAProjection))*(float64(coverageB)\/float64(trapBProjection)) > 0.99 {\n\t\t\t\t\tk.covered[i] = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t\/\/ Diagonals to this point are query-target, not target-query.\n\t\t\tk.highEnd.LowDiagonal, k.highEnd.HighDiagonal = -k.highEnd.HighDiagonal, -k.highEnd.LowDiagonal\n\n\t\t\tk.result <- k.highEnd\n\t\t}\n\t}\n\n\tif lowTrap.Top-lowTrap.Bottom > k.minLen && lowTrap.Top < t.Top-k.MaxIGap {\n\t\tk.alignRecursion(&lowTrap)\n\t}\n\tif highTrap.Top-highTrap.Bottom > k.minLen {\n\t\tk.alignRecursion(&highTrap)\n\t}\n}\n\nfunc (k *kernel) allocateVectors(required int) {\n\tvecMax := required + required>>2 + vecBuffering\n\tk.vectors[0] = make([]int, vecMax)\n\tk.vectors[1] = make([]int, vecMax)\n}\n\n\/\/ Forward and Reverse D.P. Extension Routines\n\/\/ Called at the mid-point of trapezoid -- mid X [low,high], the extension\n\/\/ is computed to an end point and the lowest and highest diagonals\n\/\/ are recorded. These are returned in a partially filled DPHit\n\/\/ record, that will be merged with that returned for extension in the\n\/\/ opposite direction.\nfunc (k *kernel) traceForward(mid, low, high int) {\n\todd := false\n\tvar (\n\t\tmaxScore int\n\t\tmaxLeft, maxRight int\n\t\tmaxI, maxJ int\n\t\ti, j int\n\t)\n\n\t\/\/ Set basis from (mid,low) .. (mid,high).\n\tif low < 0 {\n\t\tlow = 0\n\t}\n\tif high > k.target.Len() {\n\t\thigh = k.target.Len()\n\t}\n\tif high < low {\n\t\thigh = low\n\t}\n\n\tif required := (high - low) + k.MaxIGap; required >= len(k.vectors[0]) {\n\t\tk.allocateVectors(required)\n\t}\n\n\tthisVector := &offsetSlice{\n\t\tslice: k.vectors[0],\n\t\toffset: low,\n\t}\n\n\tfor j = low; j <= high; j++ {\n\t\tthisVector.set(j, 0)\n\t}\n\n\thigh += k.MaxIGap\n\tif high > k.target.Len() {\n\t\thigh = k.target.Len()\n\t}\n\n\tfor ; j <= high; j++ {\n\t\tthisVector.set(j, thisVector.at(j-1)-k.DiffCost)\n\t}\n\n\tmaxScore = 0\n\tmaxRight = mid - low\n\tmaxLeft = mid - high\n\tmaxI = mid\n\tmaxJ = low\n\n\t\/\/ Advance to next row.\n\tthatVector := &offsetSlice{}\n\tfor i = mid; low <= high && i < k.query.Len(); i++ {\n\t\tvar cost, score int\n\n\t\t*thatVector = *thisVector\n\t\tif !odd {\n\t\t\tthisVector.slice = k.vectors[1]\n\t\t} else {\n\t\t\tthisVector.slice = k.vectors[0]\n\t\t}\n\t\tthisVector.offset = low\n\t\todd = !odd\n\n\t\tscore = thatVector.at(low)\n\t\tthisVector.set(low, score-k.DiffCost)\n\t\tcost = thisVector.at(low)\n\n\t\tfor j = low + 1; j <= high; j++ {\n\t\t\tvar ratchet, temp int\n\n\t\t\ttemp = cost\n\t\t\tcost = score\n\t\t\tscore = thatVector.at(j)\n\t\t\tif k.query.Seq[i] == k.target.Seq[j-1] && k.valueToCode[k.query.Seq[i]] >= 0 {\n\t\t\t\tcost += k.MatchCost\n\t\t\t}\n\n\t\t\tratchet = cost\n\t\t\tif score > ratchet {\n\t\t\t\tratchet = score\n\t\t\t}\n\t\t\tif temp > ratchet {\n\t\t\t\tratchet = temp\n\t\t\t}\n\n\t\t\tcost = ratchet - k.DiffCost\n\t\t\tthisVector.set(j, cost)\n\t\t\tif cost >= maxScore {\n\t\t\t\tmaxScore = cost\n\t\t\t\tmaxI = i + 1\n\t\t\t\tmaxJ = j\n\t\t\t}\n\t\t}\n\n\t\tif j <= k.target.Len() {\n\t\t\tvar ratchet int\n\n\t\t\tif k.query.Seq[i] == k.target.Seq[j-1] && k.valueToCode[k.query.Seq[i]] >= 0 {\n\t\t\t\tscore += k.MatchCost\n\t\t\t}\n\n\t\t\tratchet = score\n\t\t\tif cost > ratchet {\n\t\t\t\tratchet = cost\n\t\t\t}\n\n\t\t\tscore = ratchet - k.DiffCost\n\t\t\tthisVector.set(j, score)\n\t\t\tif score > maxScore {\n\t\t\t\tmaxScore = score\n\t\t\t\tmaxI = i + 1\n\t\t\t\tmaxJ = j\n\t\t\t}\n\n\t\t\tfor j++; j <= k.target.Len(); j++ {\n\t\t\t\tscore -= k.DiffCost\n\t\t\t\tif score < maxScore-k.BlockCost {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tthisVector.set(j, score)\n\t\t\t}\n\t\t}\n\n\t\thigh = j - 1\n\n\t\tfor low <= high && thisVector.at(low) < maxScore-k.BlockCost {\n\t\t\tlow++\n\t\t}\n\t\tfor low <= high && thisVector.at(high) < maxScore-k.BlockCost {\n\t\t\thigh--\n\t\t}\n\n\t\tif required := (high - low) + 2; required > len(k.vectors[0]) {\n\t\t\tk.allocateVectors(required)\n\t\t}\n\n\t\tif (i+1)-low > maxRight {\n\t\t\tmaxRight = (i + 1) - low\n\t\t}\n\t\tif (i+1)-high < maxLeft {\n\t\t\tmaxLeft = (i + 1) - high\n\t\t}\n\t}\n\n\tk.lowEnd.Aepos = maxJ\n\tk.lowEnd.Bepos = maxI\n\tk.lowEnd.LowDiagonal = maxLeft\n\tk.lowEnd.HighDiagonal = maxRight\n\tk.lowEnd.Score = maxScore\n}\n\nfunc (k *kernel) traceReverse(top, low, high, bottom, xfactor int) {\n\todd := false\n\tvar (\n\t\tmaxScore int\n\t\tmaxLeft, maxRight int\n\t\tmaxI, maxJ int\n\t\ti, j int\n\t)\n\n\t\/\/ Set basis from (top,low) .. (top,high).\n\tif low < 0 {\n\t\tlow = 0\n\t}\n\tif high > k.target.Len() {\n\t\thigh = k.target.Len()\n\t}\n\tif high < low {\n\t\thigh = low\n\t}\n\n\tif required := (high - low) + k.MaxIGap; required >= len(k.vectors[0]) {\n\t\tk.allocateVectors(required)\n\t}\n\n\tthisVector := &offsetSlice{\n\t\tslice: k.vectors[0],\n\t\toffset: high - (len(k.vectors[0]) - 1),\n\t}\n\tfor j = high; j >= low; j-- {\n\t\tthisVector.set(j, 0)\n\t}\n\n\tlow -= k.MaxIGap\n\tif low < 0 {\n\t\tlow = 0\n\t}\n\n\tfor ; j >= low; j-- {\n\t\tthisVector.set(j, thisVector.at(j+1)-k.DiffCost)\n\t}\n\n\tmaxScore = 0\n\tmaxRight = top - low\n\tmaxLeft = top - high\n\tmaxI = top\n\tmaxJ = low\n\n\t\/\/ Advance to next row.\n\tif top-1 <= bottom {\n\t\txfactor = k.BlockCost\n\t}\n\n\tthatVector := &offsetSlice{}\n\tfor i = top - 1; low <= high && i >= 0; i-- {\n\t\tvar cost, score int\n\n\t\t*thatVector = *thisVector\n\t\tif !odd {\n\t\t\tthisVector.slice = k.vectors[1]\n\t\t} else {\n\t\t\tthisVector.slice = k.vectors[0]\n\t\t}\n\t\tthisVector.offset = high - (len(k.vectors[0]) - 1)\n\t\todd = !odd\n\n\t\tscore = thatVector.at(high)\n\t\tthisVector.set(high, score-k.DiffCost)\n\t\tcost = thisVector.at(high)\n\n\t\tfor j = high - 1; j >= low; j-- {\n\t\t\tvar ratchet, temp int\n\n\t\t\ttemp = cost\n\t\t\tcost = score\n\t\t\tscore = thatVector.at(j)\n\t\t\tif k.query.Seq[i] == k.target.Seq[j] && k.valueToCode[k.query.Seq[i]] >= 0 {\n\t\t\t\tcost += k.MatchCost\n\t\t\t}\n\n\t\t\tratchet = cost\n\t\t\tif score > ratchet {\n\t\t\t\tratchet = score\n\t\t\t}\n\t\t\tif temp > ratchet {\n\t\t\t\tratchet = temp\n\t\t\t}\n\n\t\t\tcost = ratchet - k.DiffCost\n\t\t\tthisVector.set(j, cost)\n\t\t\tif cost >= maxScore {\n\t\t\t\tmaxScore = cost\n\t\t\t\tmaxI = i\n\t\t\t\tmaxJ = j\n\t\t\t}\n\t\t}\n\n\t\tif j >= 0 {\n\t\t\tvar ratchet int\n\n\t\t\tif k.query.Seq[i] == k.target.Seq[j] && k.valueToCode[k.query.Seq[i]] >= 0 {\n\t\t\t\tscore += k.MatchCost\n\t\t\t}\n\n\t\t\tratchet = score\n\t\t\tif cost > ratchet {\n\t\t\t\tratchet = cost\n\t\t\t}\n\n\t\t\tscore = ratchet - k.DiffCost\n\t\t\tthisVector.set(j, score)\n\t\t\tif score > maxScore {\n\t\t\t\tmaxScore = score\n\t\t\t\tmaxI = i\n\t\t\t\tmaxJ = j\n\t\t\t}\n\n\t\t\tfor j--; j >= 0; j-- {\n\t\t\t\tscore -= k.DiffCost\n\t\t\t\tif score < maxScore-xfactor {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tthisVector.set(j, score)\n\t\t\t}\n\t\t}\n\n\t\tlow = j + 1\n\n\t\tfor low <= high && thisVector.at(low) < maxScore-xfactor {\n\t\t\tlow++\n\t\t}\n\t\tfor low <= high && thisVector.at(high) < maxScore-xfactor {\n\t\t\thigh--\n\t\t}\n\n\t\tif i == bottom {\n\t\t\txfactor = k.BlockCost\n\t\t}\n\n\t\tif required := (high - low) + 2; required > len(k.vectors[0]) {\n\t\t\tk.allocateVectors(required)\n\t\t}\n\n\t\tif i-low > maxRight {\n\t\t\tmaxRight = i - low\n\t\t}\n\t\tif i-high < maxLeft {\n\t\t\tmaxLeft = i - high\n\t\t}\n\t}\n\n\tk.highEnd.Abpos = maxJ\n\tk.highEnd.Bbpos = maxI\n\tk.highEnd.LowDiagonal = maxLeft\n\tk.highEnd.HighDiagonal = maxRight\n\tk.highEnd.Score = maxScore\n}\n<|endoftext|>"} {"text":"<commit_before>package single_node\n\nimport (\n\t\"context\"\n\t\"strings\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\tv1 \"github.com\/openshift\/api\/config\/v1\"\n\texutil \"github.com\/openshift\/origin\/test\/extended\/util\"\n\tappsv1 \"k8s.io\/api\/apps\/v1\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\te2e \"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\te2eskipper \"k8s.io\/kubernetes\/test\/e2e\/framework\/skipper\"\n)\n\nfunc getOpenshiftNamespaces(f *e2e.Framework) []corev1.Namespace {\n\tlist, err := f.ClientSet.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{})\n\tExpect(err).NotTo(HaveOccurred())\n\n\tvar openshiftNamespaces []corev1.Namespace\n\tfor _, namespace := range list.Items {\n\t\tif strings.HasPrefix(namespace.Name, \"openshift-\") {\n\t\t\topenshiftNamespaces = append(openshiftNamespaces, namespace)\n\t\t}\n\t}\n\n\treturn openshiftNamespaces\n}\n\nfunc getNamespaceDeployments(f *e2e.Framework, namespace corev1.Namespace) []appsv1.Deployment {\n\tlist, err := f.ClientSet.AppsV1().Deployments(namespace.Name).List(context.Background(), metav1.ListOptions{})\n\tExpect(err).NotTo(HaveOccurred())\n\n\treturn list.Items\n}\n\nfunc getTopologies(f *e2e.Framework) (controlPlaneTopology, infraTopology v1.TopologyMode) {\n\toc := exutil.NewCLIWithFramework(f)\n\tinfra, err := oc.AdminConfigClient().ConfigV1().Infrastructures().Get(context.Background(),\n\t\t\"cluster\", metav1.GetOptions{})\n\tExpect(err).NotTo(HaveOccurred())\n\n\treturn infra.Status.ControlPlaneTopology, infra.Status.InfrastructureTopology\n}\n\nfunc isInfrastructureDeployment(deployment appsv1.Deployment) bool {\n\tinfrastructureNamespaces := map[string][]string{\n\t\t\"openshift-ingress\": {\n\t\t\t\"router-default\",\n\t\t},\n\t}\n\n\tnamespaceInfraDeployments, ok := infrastructureNamespaces[deployment.Namespace]\n\n\tif !ok {\n\t\treturn false\n\t}\n\n\tfor _, infraDeploymentName := range namespaceInfraDeployments {\n\t\tif deployment.Name == infraDeploymentName {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc validateReplicas(deployment appsv1.Deployment,\n\tcontrolPlaneTopology, infraTopology v1.TopologyMode, failureAllowed bool) {\n\tif isInfrastructureDeployment(deployment) {\n\t\tif infraTopology != v1.SingleReplicaTopologyMode {\n\t\t\treturn\n\t\t}\n\t} else if controlPlaneTopology != v1.SingleReplicaTopologyMode {\n\t\treturn\n\t}\n\n\tExpect(deployment.Spec.Replicas).ToNot(BeNil())\n\n\treplicas := int(*deployment.Spec.Replicas)\n\n\tif !failureAllowed {\n\t\tExpect(replicas).To(Equal(1),\n\t\t\t\"%s in %s namespace has wrong number of replicas\", deployment.Name, deployment.Namespace)\n\t} else {\n\t\tif replicas == 1 {\n\t\t\tt := GinkgoT()\n\t\t\tt.Logf(\"Deployment %s in namespace %s has one replica, consider taking it off the topology allow-list\",\n\t\t\t\tdeployment.Name, deployment.Namespace)\n\t\t}\n\t}\n}\n\nfunc isAllowedToFail(deployment appsv1.Deployment) bool {\n\t\/\/ allowedToFail is a list of deployments that currently have 2 replicas even in single-replica\n\t\/\/ topology deployments, because their operator has yet to be made aware of the new API.\n\t\/\/ We will slowly remove deployments from this list once their operators have been made\n\t\/\/ aware until this list is empty and this function will be removed.\n\tallowedToFail := map[string][]string{\n\t\t\"openshift-authentication\": {\n\t\t\t\"oauth-openshift\",\n\t\t},\n\t\t\"openshift-image-registry\": {\n\t\t\t\"image-registry\",\n\t\t},\n\t\t\"openshift-monitoring\": {\n\t\t\t\"prometheus-adapter\",\n\t\t\t\"thanos-querier\",\n\t\t},\n\t\t\"openshift-operator-lifecycle-manager\": {\n\t\t\t\"packageserver\",\n\t\t},\n\t}\n\n\tnamespaceAllowedToFailDeployments, ok := allowedToFail[deployment.Namespace]\n\n\tif !ok {\n\t\treturn false\n\t}\n\n\tfor _, allowedToFailDeploymentName := range namespaceAllowedToFailDeployments {\n\t\tif deployment.Name == allowedToFailDeploymentName {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nvar _ = Describe(\"[sig-arch] Cluster topology single node tests\", func() {\n\tf := e2e.NewDefaultFramework(\"single-node\")\n\n\tIt(\"Verify that OpenShift components deploy one replica in SingleReplica topology mode\", func() {\n\t\tcontrolPlaneTopology, infraTopology := getTopologies(f)\n\n\t\tif controlPlaneTopology != v1.SingleReplicaTopologyMode && infraTopology != v1.SingleReplicaTopologyMode {\n\t\t\te2eskipper.Skipf(\"Test is only relevant for single replica topologies\")\n\t\t}\n\n\t\tfor _, namespace := range getOpenshiftNamespaces(f) {\n\t\t\tfor _, deployment := range getNamespaceDeployments(f, namespace) {\n\t\t\t\tvalidateReplicas(deployment, controlPlaneTopology, infraTopology, isAllowedToFail(deployment))\n\t\t\t}\n\t\t}\n\t})\n})\n<commit_msg>test\/extended\/single_node: update for monitoring components<commit_after>package single_node\n\nimport (\n\t\"context\"\n\t\"strings\"\n\n\t. \"github.com\/onsi\/ginkgo\"\n\t. \"github.com\/onsi\/gomega\"\n\tv1 \"github.com\/openshift\/api\/config\/v1\"\n\texutil \"github.com\/openshift\/origin\/test\/extended\/util\"\n\tappsv1 \"k8s.io\/api\/apps\/v1\"\n\tcorev1 \"k8s.io\/api\/core\/v1\"\n\tmetav1 \"k8s.io\/apimachinery\/pkg\/apis\/meta\/v1\"\n\te2e \"k8s.io\/kubernetes\/test\/e2e\/framework\"\n\te2eskipper \"k8s.io\/kubernetes\/test\/e2e\/framework\/skipper\"\n)\n\nfunc getOpenshiftNamespaces(f *e2e.Framework) []corev1.Namespace {\n\tlist, err := f.ClientSet.CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{})\n\tExpect(err).NotTo(HaveOccurred())\n\n\tvar openshiftNamespaces []corev1.Namespace\n\tfor _, namespace := range list.Items {\n\t\tif strings.HasPrefix(namespace.Name, \"openshift-\") {\n\t\t\topenshiftNamespaces = append(openshiftNamespaces, namespace)\n\t\t}\n\t}\n\n\treturn openshiftNamespaces\n}\n\nfunc getNamespaceDeployments(f *e2e.Framework, namespace corev1.Namespace) []appsv1.Deployment {\n\tlist, err := f.ClientSet.AppsV1().Deployments(namespace.Name).List(context.Background(), metav1.ListOptions{})\n\tExpect(err).NotTo(HaveOccurred())\n\n\treturn list.Items\n}\n\nfunc getTopologies(f *e2e.Framework) (controlPlaneTopology, infraTopology v1.TopologyMode) {\n\toc := exutil.NewCLIWithFramework(f)\n\tinfra, err := oc.AdminConfigClient().ConfigV1().Infrastructures().Get(context.Background(),\n\t\t\"cluster\", metav1.GetOptions{})\n\tExpect(err).NotTo(HaveOccurred())\n\n\treturn infra.Status.ControlPlaneTopology, infra.Status.InfrastructureTopology\n}\n\nfunc isInfrastructureDeployment(deployment appsv1.Deployment) bool {\n\tinfrastructureNamespaces := map[string][]string{\n\t\t\"openshift-ingress\": {\n\t\t\t\"router-default\",\n\t\t},\n\t}\n\n\tnamespaceInfraDeployments, ok := infrastructureNamespaces[deployment.Namespace]\n\n\tif !ok {\n\t\treturn false\n\t}\n\n\tfor _, infraDeploymentName := range namespaceInfraDeployments {\n\t\tif deployment.Name == infraDeploymentName {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc validateReplicas(deployment appsv1.Deployment,\n\tcontrolPlaneTopology, infraTopology v1.TopologyMode, failureAllowed bool) {\n\tif isInfrastructureDeployment(deployment) {\n\t\tif infraTopology != v1.SingleReplicaTopologyMode {\n\t\t\treturn\n\t\t}\n\t} else if controlPlaneTopology != v1.SingleReplicaTopologyMode {\n\t\treturn\n\t}\n\n\tExpect(deployment.Spec.Replicas).ToNot(BeNil())\n\n\treplicas := int(*deployment.Spec.Replicas)\n\n\tif !failureAllowed {\n\t\tExpect(replicas).To(Equal(1),\n\t\t\t\"%s in %s namespace has wrong number of replicas\", deployment.Name, deployment.Namespace)\n\t} else {\n\t\tif replicas == 1 {\n\t\t\tt := GinkgoT()\n\t\t\tt.Logf(\"Deployment %s in namespace %s has one replica, consider taking it off the topology allow-list\",\n\t\t\t\tdeployment.Name, deployment.Namespace)\n\t\t}\n\t}\n}\n\nfunc isAllowedToFail(deployment appsv1.Deployment) bool {\n\t\/\/ allowedToFail is a list of deployments that currently have 2 replicas even in single-replica\n\t\/\/ topology deployments, because their operator has yet to be made aware of the new API.\n\t\/\/ We will slowly remove deployments from this list once their operators have been made\n\t\/\/ aware until this list is empty and this function will be removed.\n\tallowedToFail := map[string][]string{\n\t\t\"openshift-authentication\": {\n\t\t\t\"oauth-openshift\",\n\t\t},\n\t\t\"openshift-image-registry\": {\n\t\t\t\"image-registry\",\n\t\t},\n\t\t\"openshift-operator-lifecycle-manager\": {\n\t\t\t\"packageserver\",\n\t\t},\n\t}\n\n\tnamespaceAllowedToFailDeployments, ok := allowedToFail[deployment.Namespace]\n\n\tif !ok {\n\t\treturn false\n\t}\n\n\tfor _, allowedToFailDeploymentName := range namespaceAllowedToFailDeployments {\n\t\tif deployment.Name == allowedToFailDeploymentName {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nvar _ = Describe(\"[sig-arch] Cluster topology single node tests\", func() {\n\tf := e2e.NewDefaultFramework(\"single-node\")\n\n\tIt(\"Verify that OpenShift components deploy one replica in SingleReplica topology mode\", func() {\n\t\tcontrolPlaneTopology, infraTopology := getTopologies(f)\n\n\t\tif controlPlaneTopology != v1.SingleReplicaTopologyMode && infraTopology != v1.SingleReplicaTopologyMode {\n\t\t\te2eskipper.Skipf(\"Test is only relevant for single replica topologies\")\n\t\t}\n\n\t\tfor _, namespace := range getOpenshiftNamespaces(f) {\n\t\t\tfor _, deployment := range getNamespaceDeployments(f, namespace) {\n\t\t\t\tvalidateReplicas(deployment, controlPlaneTopology, infraTopology, isAllowedToFail(deployment))\n\t\t\t}\n\t\t}\n\t})\n})\n<|endoftext|>"} {"text":"<commit_before>package api\n\n\/\/ RootResponse object which will be formatted to json and sent back to google and onto the user.\ntype RootResponse struct {\n\tConversationToken *string `json:\"conversation_token\"`\n\tExpectUserResponse bool `json:\"expect_user_response\"`\n\tExpectedInputs []ExpectedInput `json:\"expected_inputs,omitempty\"`\n\tFinalResponse_ FinalResponse `json:\"final_response\"`\n}\n\ntype ExpectedInput struct {\n\tPossibleIntents []ExpectedIntent `json:\"possible_intents\"`\n\tInputPrompt_ InputPrompt `json:\"input_prompt\"`\n}\n\ntype ExpectedIntent struct {\n\tIntent string `json:\"intent\"`\n\tInputValueSpec_ InputValueSpec `json:\"input_value_spec\"`\n}\n\ntype FinalResponse struct {\n\tSpeechResponse_ SpeechResponse `json:\"speech_response\"`\n}\n\ntype InputValueSpec struct {\n\tPermissionValueSpec_ PermissionValueSpec `json:\"permission_value_spec\"`\n}\n\ntype PermissionValueSpec struct {\n\tOptContext *string `json:\"opt_context\"`\n\tPermissions []string `json:\"permissions\"`\n}\n\ntype SpeechResponse struct {\n\tTextToSpeech *string `json:\"text_to_speech\"`\n\tSSML *string `json:\"ssml\"`\n}\n\ntype InputPrompt struct {\n\tInitialPrompts []SpeechResponse `json:\"initial_prompts\"`\n\tNoInputPrompts []SpeechResponse `json:\"no_input_prompts\"`\n}\n<commit_msg>either SSML or text to speech can be defined, not both<commit_after>package api\n\n\/\/ RootResponse object which will be formatted to json and sent back to google and onto the user.\ntype RootResponse struct {\n\tConversationToken *string `json:\"conversation_token\"`\n\tExpectUserResponse bool `json:\"expect_user_response\"`\n\tExpectedInputs []ExpectedInput `json:\"expected_inputs,omitempty\"`\n\tFinalResponse_ FinalResponse `json:\"final_response\"`\n}\n\ntype ExpectedInput struct {\n\tPossibleIntents []ExpectedIntent `json:\"possible_intents\"`\n\tInputPrompt_ InputPrompt `json:\"input_prompt\"`\n}\n\ntype ExpectedIntent struct {\n\tIntent string `json:\"intent\"`\n\tInputValueSpec_ InputValueSpec `json:\"input_value_spec\"`\n}\n\ntype FinalResponse struct {\n\tSpeechResponse_ SpeechResponse `json:\"speech_response\"`\n}\n\ntype InputValueSpec struct {\n\tPermissionValueSpec_ PermissionValueSpec `json:\"permission_value_spec\"`\n}\n\ntype PermissionValueSpec struct {\n\tOptContext *string `json:\"opt_context\"`\n\tPermissions []string `json:\"permissions\"`\n}\n\ntype SpeechResponse struct {\n\tTextToSpeech *string `json:\"text_to_speech,omitempty\"`\n\tSSML *string `json:\"ssml,omitempty\"`\n}\n\ntype InputPrompt struct {\n\tInitialPrompts []SpeechResponse `json:\"initial_prompts\"`\n\tNoInputPrompts []SpeechResponse `json:\"no_input_prompts\"`\n}\n<|endoftext|>"} {"text":"<commit_before>package jsonmap\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"reflect\"\n)\n\ntype ValidationError struct {\n\treason string\n}\n\nfunc NewValidationError(reason string, a ...interface{}) *ValidationError {\n\treturn &ValidationError{\n\t\treason: fmt.Sprintf(reason, a...),\n\t}\n}\n\nfunc (e *ValidationError) Error() string {\n\treturn e.reason\n}\n\ntype Validator interface {\n\tValidate(interface{}) (interface{}, error)\n}\n\ntype Encoder interface {\n\tUnmarshal(partial interface{}, dstValue reflect.Value) error\n\tMarshal(reflect.Value) (json.Marshaler, error)\n}\n\ntype MappedField struct {\n\tStructFieldName string\n\tJSONFieldName string\n\tContains Encoder\n\tValidator Validator\n\tOptional bool\n}\n\ntype TypeMap struct {\n\tUnderlyingType interface{}\n\tFields []MappedField\n}\n\ntype russellRawMessage struct {\n\tData []byte\n}\n\nfunc (rm russellRawMessage) MarshalJSON() ([]byte, error) {\n\treturn rm.Data, nil\n}\n\nfunc (tm TypeMap) Unmarshal(partial interface{}, dstValue reflect.Value) error {\n\tdata, ok := partial.(map[string]interface{})\n\tif !ok {\n\t\treturn NewValidationError(\"expected a JSON object\")\n\t}\n\n\tfor _, field := range tm.Fields {\n\t\tdstField := dstValue.FieldByName(field.StructFieldName)\n\t\tif !dstField.IsValid() {\n\t\t\tpanic(\"No such underlying field: \" + field.StructFieldName)\n\t\t}\n\n\t\tval, ok := data[field.JSONFieldName]\n\t\tif !ok {\n\t\t\tif field.Optional {\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\treturn NewValidationError(\"missing required field: %s\", field.JSONFieldName)\n\t\t\t}\n\t\t}\n\n\t\tif field.Contains != nil {\n\t\t\treturn field.Contains.Unmarshal(val, dstField)\n\t\t} else {\n\t\t\tval, err := field.Validator.Validate(val)\n\t\t\tif err != nil {\n\t\t\t\tif ve, ok := err.(*ValidationError); ok {\n\t\t\t\t\treturn NewValidationError(\"error validating field '%s': %s\", field.JSONFieldName, ve.Error())\n\t\t\t\t} else {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tdstField.Set(reflect.ValueOf(val))\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (tm TypeMap) Marshal(src reflect.Value) (json.Marshaler, error) {\n\tif src.Kind() == reflect.Ptr {\n\t\tsrc = src.Elem()\n\t}\n\tresult := map[string]interface{}{}\n\n\tfor _, field := range tm.Fields {\n\t\tsrcField := src.FieldByName(field.StructFieldName)\n\t\tif !srcField.IsValid() {\n\t\t\tpanic(\"No such underlying field: \" + field.StructFieldName)\n\t\t}\n\n\t\tif field.Contains != nil {\n\t\t\tval, err := field.Contains.Marshal(srcField)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresult[field.JSONFieldName] = val\n\t\t} else {\n\t\t\tresult[field.JSONFieldName] = srcField.Interface()\n\t\t}\n\n\t}\n\n\tdata, err := json.Marshal(result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn russellRawMessage{data}, nil\n}\n\ntype TypeMapper struct {\n\ttypeMaps map[reflect.Type]TypeMap\n}\n\nfunc NewTypeMapper(maps ...TypeMap) *TypeMapper {\n\tt := &TypeMapper{\n\t\ttypeMaps: make(map[reflect.Type]TypeMap),\n\t}\n\tfor _, m := range maps {\n\t\tt.typeMaps[reflect.TypeOf(m.UnderlyingType)] = m\n\t}\n\treturn t\n}\n\nfunc (tm *TypeMapper) getTypeMap(obj interface{}) TypeMap {\n\tif reflect.TypeOf(obj).Kind() != reflect.Ptr {\n\t\tpanic(\"dst is not a pointer\")\n\t}\n\n\tt := reflect.TypeOf(obj).Elem()\n\tm, ok := tm.typeMaps[t]\n\n\tif !ok {\n\t\tpanic(\"no TypeMap registered for type: \" + t.String())\n\t}\n\n\treturn m\n}\n\nfunc (tm *TypeMapper) Unmarshal(data []byte, dest interface{}) error {\n\tm := tm.getTypeMap(dest)\n\tpartial := map[string]interface{}{}\n\n\terr := json.Unmarshal(data, &partial)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn m.Unmarshal(partial, reflect.ValueOf(dest).Elem())\n}\n\nfunc (tm *TypeMapper) Marshal(src interface{}) ([]byte, error) {\n\tm := tm.getTypeMap(src)\n\tdata, err := m.Marshal(reflect.ValueOf(src))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn data.MarshalJSON()\n}\n<commit_msg>Fix a bug that prevented marshaling of fields listed after a nested struct<commit_after>package jsonmap\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"reflect\"\n)\n\ntype ValidationError struct {\n\treason string\n}\n\nfunc NewValidationError(reason string, a ...interface{}) *ValidationError {\n\treturn &ValidationError{\n\t\treason: fmt.Sprintf(reason, a...),\n\t}\n}\n\nfunc (e *ValidationError) Error() string {\n\treturn e.reason\n}\n\ntype Validator interface {\n\tValidate(interface{}) (interface{}, error)\n}\n\ntype Encoder interface {\n\tUnmarshal(partial interface{}, dstValue reflect.Value) error\n\tMarshal(reflect.Value) (json.Marshaler, error)\n}\n\ntype MappedField struct {\n\tStructFieldName string\n\tJSONFieldName string\n\tContains Encoder\n\tValidator Validator\n\tOptional bool\n}\n\ntype TypeMap struct {\n\tUnderlyingType interface{}\n\tFields []MappedField\n}\n\ntype russellRawMessage struct {\n\tData []byte\n}\n\nfunc (rm russellRawMessage) MarshalJSON() ([]byte, error) {\n\treturn rm.Data, nil\n}\n\nfunc (tm TypeMap) Unmarshal(partial interface{}, dstValue reflect.Value) error {\n\tdata, ok := partial.(map[string]interface{})\n\tif !ok {\n\t\treturn NewValidationError(\"expected a JSON object\")\n\t}\n\n\tfor _, field := range tm.Fields {\n\t\tdstField := dstValue.FieldByName(field.StructFieldName)\n\t\tif !dstField.IsValid() {\n\t\t\tpanic(\"No such underlying field: \" + field.StructFieldName)\n\t\t}\n\n\t\tval, ok := data[field.JSONFieldName]\n\t\tif !ok {\n\t\t\tif field.Optional {\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\treturn NewValidationError(\"missing required field: %s\", field.JSONFieldName)\n\t\t\t}\n\t\t}\n\n\t\tvar err error\n\n\t\tif field.Contains != nil {\n\t\t\terr = field.Contains.Unmarshal(val, dstField)\n\t\t} else {\n\t\t\tval, err = field.Validator.Validate(val)\n\t\t\tif err == nil {\n\t\t\t\tdstField.Set(reflect.ValueOf(val))\n\t\t\t}\n\t\t}\n\n\t\tif err != nil {\n\t\t\tif ve, ok := err.(*ValidationError); ok {\n\t\t\t\treturn NewValidationError(\"error validating field '%s': %s\", field.JSONFieldName, ve.Error())\n\t\t\t} else {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (tm TypeMap) Marshal(src reflect.Value) (json.Marshaler, error) {\n\tif src.Kind() == reflect.Ptr {\n\t\tsrc = src.Elem()\n\t}\n\tresult := map[string]interface{}{}\n\n\tfor _, field := range tm.Fields {\n\t\tsrcField := src.FieldByName(field.StructFieldName)\n\t\tif !srcField.IsValid() {\n\t\t\tpanic(\"No such underlying field: \" + field.StructFieldName)\n\t\t}\n\n\t\tif field.Contains != nil {\n\t\t\tval, err := field.Contains.Marshal(srcField)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tresult[field.JSONFieldName] = val\n\t\t} else {\n\t\t\tresult[field.JSONFieldName] = srcField.Interface()\n\t\t}\n\n\t}\n\n\tdata, err := json.Marshal(result)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn russellRawMessage{data}, nil\n}\n\ntype TypeMapper struct {\n\ttypeMaps map[reflect.Type]TypeMap\n}\n\nfunc NewTypeMapper(maps ...TypeMap) *TypeMapper {\n\tt := &TypeMapper{\n\t\ttypeMaps: make(map[reflect.Type]TypeMap),\n\t}\n\tfor _, m := range maps {\n\t\tt.typeMaps[reflect.TypeOf(m.UnderlyingType)] = m\n\t}\n\treturn t\n}\n\nfunc (tm *TypeMapper) getTypeMap(obj interface{}) TypeMap {\n\tif reflect.TypeOf(obj).Kind() != reflect.Ptr {\n\t\tpanic(\"dst is not a pointer\")\n\t}\n\n\tt := reflect.TypeOf(obj).Elem()\n\tm, ok := tm.typeMaps[t]\n\n\tif !ok {\n\t\tpanic(\"no TypeMap registered for type: \" + t.String())\n\t}\n\n\treturn m\n}\n\nfunc (tm *TypeMapper) Unmarshal(data []byte, dest interface{}) error {\n\tm := tm.getTypeMap(dest)\n\tpartial := map[string]interface{}{}\n\n\terr := json.Unmarshal(data, &partial)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn m.Unmarshal(partial, reflect.ValueOf(dest).Elem())\n}\n\nfunc (tm *TypeMapper) Marshal(src interface{}) ([]byte, error) {\n\tm := tm.getTypeMap(src)\n\tdata, err := m.Marshal(reflect.ValueOf(src))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn data.MarshalJSON()\n}\n<|endoftext|>"} {"text":"<commit_before>package ethchain\n\nimport (\n\t\"bytes\"\n\t\"github.com\/ethereum\/eth-go\/ethutil\"\n\t\"log\"\n\t\"math\"\n\t\"math\/big\"\n)\n\ntype BlockChain struct {\n\t\/\/ The famous, the fabulous Mister GENESIIIIIIS (block)\n\tgenesisBlock *Block\n\t\/\/ Last known total difficulty\n\tTD *big.Int\n\n\tLastBlockNumber uint64\n\n\tCurrentBlock *Block\n\tLastBlockHash []byte\n}\n\nfunc NewBlockChain() *BlockChain {\n\tbc := &BlockChain{}\n\tbc.genesisBlock = NewBlockFromData(ethutil.Encode(Genesis))\n\n\tbc.setLastBlock()\n\n\treturn bc\n}\n\nfunc (bc *BlockChain) Genesis() *Block {\n\treturn bc.genesisBlock\n}\n\nfunc (bc *BlockChain) NewBlock(coinbase []byte, txs []*Transaction) *Block {\n\tvar root interface{}\n\tvar lastBlockTime int64\n\thash := ZeroHash256\n\n\tif bc.CurrentBlock != nil {\n\t\troot = bc.CurrentBlock.State().Root\n\t\thash = bc.LastBlockHash\n\t\tlastBlockTime = bc.CurrentBlock.Time\n\t}\n\n\tblock := CreateBlock(\n\t\troot,\n\t\thash,\n\t\tcoinbase,\n\t\tethutil.BigPow(2, 32),\n\t\tnil,\n\t\t\"\",\n\t\ttxs)\n\n\tif bc.CurrentBlock != nil {\n\t\tvar mul *big.Int\n\t\tif block.Time < lastBlockTime+42 {\n\t\t\tmul = big.NewInt(1)\n\t\t} else {\n\t\t\tmul = big.NewInt(-1)\n\t\t}\n\n\t\tdiff := new(big.Int)\n\t\tdiff.Add(diff, bc.CurrentBlock.Difficulty)\n\t\tdiff.Div(diff, big.NewInt(1024))\n\t\tdiff.Mul(diff, mul)\n\t\tdiff.Add(diff, bc.CurrentBlock.Difficulty)\n\t\tblock.Difficulty = diff\n\t}\n\n\treturn block\n}\n\nfunc (bc *BlockChain) HasBlock(hash []byte) bool {\n\tdata, _ := ethutil.Config.Db.Get(hash)\n\treturn len(data) != 0\n}\n\nfunc (bc *BlockChain) GenesisBlock() *Block {\n\treturn bc.genesisBlock\n}\n\n\/\/ Get chain return blocks from hash up to max in RLP format\nfunc (bc *BlockChain) GetChainFromHash(hash []byte, max uint64) []interface{} {\n\tvar chain []interface{}\n\t\/\/ Get the current hash to start with\n\tcurrentHash := bc.CurrentBlock.Hash()\n\t\/\/ Get the last number on the block chain\n\tlastNumber := bc.BlockInfo(bc.CurrentBlock).Number\n\t\/\/ Get the parents number\n\tparentNumber := bc.BlockInfoByHash(hash).Number\n\t\/\/ Get the min amount. We might not have max amount of blocks\n\tcount := uint64(math.Min(float64(lastNumber-parentNumber), float64(max)))\n\tstartNumber := parentNumber + count\n\n\tnum := lastNumber\n\tfor ; num > startNumber; currentHash = bc.GetBlock(currentHash).PrevHash {\n\t\tnum--\n\t}\n\tfor i := uint64(0); bytes.Compare(currentHash, hash) != 0 && num >= parentNumber && i < count; i++ {\n\t\t\/\/ Get the block of the chain\n\t\tblock := bc.GetBlock(currentHash)\n\t\tcurrentHash = block.PrevHash\n\n\t\tchain = append(chain, block.Value().Val)\n\n\t\tnum--\n\t}\n\n\treturn chain\n}\n\nfunc (bc *BlockChain) setLastBlock() {\n\tdata, _ := ethutil.Config.Db.Get([]byte(\"LastBlock\"))\n\tif len(data) != 0 {\n\t\tblock := NewBlockFromBytes(data)\n\t\tinfo := bc.BlockInfo(block)\n\t\tbc.CurrentBlock = block\n\t\tbc.LastBlockHash = block.Hash()\n\t\tbc.LastBlockNumber = info.Number\n\n\t\tlog.Printf(\"[CHAIN] Last known block height #%d\\n\", bc.LastBlockNumber)\n\t}\n\n\t\/\/ Set the last know difficulty (might be 0x0 as initial value, Genesis)\n\tbc.TD = ethutil.BigD(ethutil.Config.Db.LastKnownTD())\n}\n\nfunc (bc *BlockChain) SetTotalDifficulty(td *big.Int) {\n\tethutil.Config.Db.Put([]byte(\"LastKnownTotalDifficulty\"), td.Bytes())\n\tbc.TD = td\n}\n\n\/\/ Add a block to the chain and record addition information\nfunc (bc *BlockChain) Add(block *Block) {\n\tbc.writeBlockInfo(block)\n\n\t\/\/ Prepare the genesis block\n\tbc.CurrentBlock = block\n\tbc.LastBlockHash = block.Hash()\n\n\tencodedBlock := block.RlpEncode()\n\tethutil.Config.Db.Put(block.Hash(), encodedBlock)\n\tethutil.Config.Db.Put([]byte(\"LastBlock\"), encodedBlock)\n}\n\nfunc (bc *BlockChain) GetBlock(hash []byte) *Block {\n\tdata, _ := ethutil.Config.Db.Get(hash)\n\n\treturn NewBlockFromData(data)\n}\n\nfunc (bc *BlockChain) BlockInfoByHash(hash []byte) BlockInfo {\n\tbi := BlockInfo{}\n\tdata, _ := ethutil.Config.Db.Get(append(hash, []byte(\"Info\")...))\n\tbi.RlpDecode(data)\n\n\treturn bi\n}\n\nfunc (bc *BlockChain) BlockInfo(block *Block) BlockInfo {\n\tbi := BlockInfo{}\n\tdata, _ := ethutil.Config.Db.Get(append(block.Hash(), []byte(\"Info\")...))\n\tbi.RlpDecode(data)\n\n\treturn bi\n}\n\n\/\/ Unexported method for writing extra non-essential block info to the db\nfunc (bc *BlockChain) writeBlockInfo(block *Block) {\n\tbc.LastBlockNumber++\n\tbi := BlockInfo{Number: bc.LastBlockNumber, Hash: block.Hash(), Parent: block.PrevHash}\n\n\t\/\/ For now we use the block hash with the words \"info\" appended as key\n\tethutil.Config.Db.Put(append(block.Hash(), []byte(\"Info\")...), bi.RlpEncode())\n}\n\nfunc (bc *BlockChain) Stop() {\n\tif bc.CurrentBlock != nil {\n\t\tlog.Println(\"[CHAIN] Stopped\")\n\t}\n}\n<commit_msg>Get a chain of blocks made simple<commit_after>package ethchain\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"github.com\/ethereum\/eth-go\/ethutil\"\n\t\"log\"\n\t\"math\"\n\t\"math\/big\"\n)\n\ntype BlockChain struct {\n\t\/\/ The famous, the fabulous Mister GENESIIIIIIS (block)\n\tgenesisBlock *Block\n\t\/\/ Last known total difficulty\n\tTD *big.Int\n\n\tLastBlockNumber uint64\n\n\tCurrentBlock *Block\n\tLastBlockHash []byte\n}\n\nfunc NewBlockChain() *BlockChain {\n\tbc := &BlockChain{}\n\tbc.genesisBlock = NewBlockFromData(ethutil.Encode(Genesis))\n\n\tbc.setLastBlock()\n\n\treturn bc\n}\n\nfunc (bc *BlockChain) Genesis() *Block {\n\treturn bc.genesisBlock\n}\n\nfunc (bc *BlockChain) NewBlock(coinbase []byte, txs []*Transaction) *Block {\n\tvar root interface{}\n\tvar lastBlockTime int64\n\thash := ZeroHash256\n\n\tif bc.CurrentBlock != nil {\n\t\troot = bc.CurrentBlock.State().Root\n\t\thash = bc.LastBlockHash\n\t\tlastBlockTime = bc.CurrentBlock.Time\n\t}\n\n\tblock := CreateBlock(\n\t\troot,\n\t\thash,\n\t\tcoinbase,\n\t\tethutil.BigPow(2, 32),\n\t\tnil,\n\t\t\"\",\n\t\ttxs)\n\n\tif bc.CurrentBlock != nil {\n\t\tvar mul *big.Int\n\t\tif block.Time < lastBlockTime+42 {\n\t\t\tmul = big.NewInt(1)\n\t\t} else {\n\t\t\tmul = big.NewInt(-1)\n\t\t}\n\n\t\tdiff := new(big.Int)\n\t\tdiff.Add(diff, bc.CurrentBlock.Difficulty)\n\t\tdiff.Div(diff, big.NewInt(1024))\n\t\tdiff.Mul(diff, mul)\n\t\tdiff.Add(diff, bc.CurrentBlock.Difficulty)\n\t\tblock.Difficulty = diff\n\t}\n\n\treturn block\n}\n\nfunc (bc *BlockChain) HasBlock(hash []byte) bool {\n\tdata, _ := ethutil.Config.Db.Get(hash)\n\treturn len(data) != 0\n}\n\nfunc (bc *BlockChain) GenesisBlock() *Block {\n\treturn bc.genesisBlock\n}\n\n\/\/ Get chain return blocks from hash up to max in RLP format\nfunc (bc *BlockChain) GetChainFromHash(hash []byte, max uint64) []interface{} {\n\tvar chain []interface{}\n\t\/\/ Get the current hash to start with\n\tcurrentHash := bc.CurrentBlock.Hash()\n\t\/\/ Get the last number on the block chain\n\tlastNumber := bc.BlockInfo(bc.CurrentBlock).Number\n\t\/\/ Get the parents number\n\tparentNumber := bc.BlockInfoByHash(hash).Number\n\t\/\/ Get the min amount. We might not have max amount of blocks\n\tcount := uint64(math.Min(float64(lastNumber-parentNumber), float64(max)))\n\tstartNumber := parentNumber + count\n\n\tnum := lastNumber\n\tfor ; num > startNumber; currentHash = bc.GetBlock(currentHash).PrevHash {\n\t\tnum--\n\t}\n\tfor i := uint64(0); bytes.Compare(currentHash, hash) != 0 && num >= parentNumber && i < count; i++ {\n\t\t\/\/ Get the block of the chain\n\t\tblock := bc.GetBlock(currentHash)\n\t\tcurrentHash = block.PrevHash\n\n\t\tchain = append(chain, block.Value().Val)\n\n\t\tnum--\n\t}\n\n\treturn chain\n}\n\nfunc (bc *BlockChain) GetChain(hash []byte, amount int) []*Block {\n\tgenHash := bc.genesisBlock.Hash()\n\n\tblock := bc.GetBlock(hash)\n\tvar blocks []*Block\n\n\tfor i := 0; i < amount && block != nil; block = bc.GetBlock(block.PrevHash) {\n\t\tfmt.Println(block)\n\t\tblocks = append([]*Block{block}, blocks...)\n\n\t\tif bytes.Compare(genHash, block.Hash()) == 0 {\n\t\t\tbreak\n\t\t}\n\t\ti++\n\t}\n\n\treturn blocks\n}\n\nfunc (bc *BlockChain) setLastBlock() {\n\tdata, _ := ethutil.Config.Db.Get([]byte(\"LastBlock\"))\n\tif len(data) != 0 {\n\t\tblock := NewBlockFromBytes(data)\n\t\tinfo := bc.BlockInfo(block)\n\t\tbc.CurrentBlock = block\n\t\tbc.LastBlockHash = block.Hash()\n\t\tbc.LastBlockNumber = info.Number\n\n\t\tlog.Printf(\"[CHAIN] Last known block height #%d\\n\", bc.LastBlockNumber)\n\t}\n\n\t\/\/ Set the last know difficulty (might be 0x0 as initial value, Genesis)\n\tbc.TD = ethutil.BigD(ethutil.Config.Db.LastKnownTD())\n}\n\nfunc (bc *BlockChain) SetTotalDifficulty(td *big.Int) {\n\tethutil.Config.Db.Put([]byte(\"LastKnownTotalDifficulty\"), td.Bytes())\n\tbc.TD = td\n}\n\n\/\/ Add a block to the chain and record addition information\nfunc (bc *BlockChain) Add(block *Block) {\n\tbc.writeBlockInfo(block)\n\n\t\/\/ Prepare the genesis block\n\tbc.CurrentBlock = block\n\tbc.LastBlockHash = block.Hash()\n\n\tencodedBlock := block.RlpEncode()\n\tethutil.Config.Db.Put(block.Hash(), encodedBlock)\n\tethutil.Config.Db.Put([]byte(\"LastBlock\"), encodedBlock)\n}\n\nfunc (bc *BlockChain) GetBlock(hash []byte) *Block {\n\tdata, _ := ethutil.Config.Db.Get(hash)\n\tif len(data) == 0 {\n\t\treturn nil\n\t}\n\n\treturn NewBlockFromData(data)\n}\n\nfunc (bc *BlockChain) BlockInfoByHash(hash []byte) BlockInfo {\n\tbi := BlockInfo{}\n\tdata, _ := ethutil.Config.Db.Get(append(hash, []byte(\"Info\")...))\n\tbi.RlpDecode(data)\n\n\treturn bi\n}\n\nfunc (bc *BlockChain) BlockInfo(block *Block) BlockInfo {\n\tbi := BlockInfo{}\n\tdata, _ := ethutil.Config.Db.Get(append(block.Hash(), []byte(\"Info\")...))\n\tbi.RlpDecode(data)\n\n\treturn bi\n}\n\n\/\/ Unexported method for writing extra non-essential block info to the db\nfunc (bc *BlockChain) writeBlockInfo(block *Block) {\n\tbc.LastBlockNumber++\n\tbi := BlockInfo{Number: bc.LastBlockNumber, Hash: block.Hash(), Parent: block.PrevHash}\n\n\t\/\/ For now we use the block hash with the words \"info\" appended as key\n\tethutil.Config.Db.Put(append(block.Hash(), []byte(\"Info\")...), bi.RlpEncode())\n}\n\nfunc (bc *BlockChain) Stop() {\n\tif bc.CurrentBlock != nil {\n\t\tlog.Println(\"[CHAIN] Stopped\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ 22-24 august 2012\npackage main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"sort\"\n\n\t\/\/ for drawing the progress bar\n\t\"image\"\n\t\"image\/color\"\n\t\"image\/draw\"\n\t\"image\/png\"\n\t\"encoding\/base64\"\n\t\"bytes\"\n)\n\ntype SortOrder int\nconst (\n\tSortByRegion SortOrder = iota\n\tSortByBoxState\n\tSortByMediaState\n)\n\ntype GameScan struct {\n\tName\t\tstring\n\tHasNoScans\tbool\t\t\t\/\/ for whole games\n\tRegion\t\tstring\n\tBoxState\t\tScanState\n\tMediaState\tScanState\n\tError\t\t\terror\n}\n\ntype ScanSet []*GameScan\n\nfunc getMediaState(scan Scan) ScanState {\n\tif scan.Cart == \"\" && scan.Disc == \"\" {\n\t\treturn Missing\n\t}\n\tif scan.Cart != \"\" && scan.Disc == \"\" {\n\t\treturn scan.CartScanState()\n\t}\n\tif scan.Cart == \"\" && scan.Disc != \"\" {\n\t\treturn scan.DiscScanState()\n\t}\n\treturn scan.CartScanState().Join(scan.DiscScanState())\t\/\/ else\n}\n\n\nfunc GetConsoleScans(console string) (ScanSet, error) {\n\tvar gameScans ScanSet\n\n\tgames, err := GetGameList(console)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting %s game list: %v\", console, err)\n\t}\n\tfor _, game := range games {\n\/\/fmt.Println(game)\n\t\tif strings.HasPrefix(game, \"List of \" + console + \" games\") {\t\/\/ omit list from report\n\t\t\tcontinue\n\t\t}\n\t\tscans, err := GetScans(game, console)\n\t\tif err == ErrGameNoScans {\t\t\/\/ omit games for this console that will not have scans\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\tgameScans = append(gameScans, &GameScan{\n\t\t\t\tName:\tgame,\n\t\t\t\tError:\terr,\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\t\tif len(scans) == 0 {\t\t\t\t\/\/ there are no scans at all\n\t\t\tgameScans = append(gameScans, &GameScan{\n\t\t\t\tName:\t\tgame,\n\t\t\t\tHasNoScans:\ttrue,\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\t\tnScans := 0\n\t\tfor _, scan := range scans {\n\t\t\tvar mediaState ScanState\n\n\t\t\tif scan.Console != console {\t\/\/ omit scans from other consoles\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnScans++\n\t\t\tboxState := scan.BoxScanState()\n\t\t\tmediaState = getMediaState(scan)\n\t\t\tgameScans = append(gameScans, &GameScan{\n\t\t\t\tName:\t\tgame,\n\t\t\t\tRegion:\t\tscan.Region,\n\t\t\t\tBoxState:\t\tboxState,\n\t\t\t\tMediaState:\tmediaState,\n\t\t\t})\n\t\t}\n\t\tif nScans == 0 {\t\t\t\t\t\/\/ there are no scans for the specified console\n\t\t\tgameScans = append(gameScans, &GameScan{\n\t\t\t\tName:\t\tgame,\n\t\t\t\tHasNoScans:\ttrue,\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\t}\n\treturn gameScans, nil\n}\n\ntype sorter struct {\n\tscans\t\tScanSet\n\tsortOrder\t\tSortOrder\n}\n\n\/\/ for sort.Interface\nfunc (s sorter) Len() int {\n\treturn len(s.scans)\n}\n\nfunc (s sorter) Less(i, j int) bool {\n\tscans := s.scans\n\t\/\/ the sort orders make no sense if there is either an error or no scans for a game, so handle those cases first\n\tif scans[i].Error != nil && scans[j].Error != nil {\t\t\/\/ errors go first\n\t\treturn scans[i].Name < scans[j].Name\t\t\/\/ by title if they both error\n\t}\n\tif scans[i].Error != nil {\n\t\treturn true\n\t}\n\tif scans[j].Error != nil {\n\t\treturn false\n\t}\n\tif scans[i].HasNoScans && scans[j].HasNoScans {\t\/\/ then lack of scans\n\t\treturn scans[i].Name < scans[j].Name\t\t\/\/ by title if they both lack scans\n\t}\n\tif scans[i].HasNoScans {\n\t\treturn true\n\t}\n\tif scans[j].HasNoScans {\n\t\treturn false\n\t}\n\tswitch s.sortOrder {\n\tcase SortByRegion:\t\t\/\/ sort by region, then by name\n\t\tif scans[i].Region == scans[j].Region {\t\t\t\/\/ then if they have the same region, alphabetically\n\t\t\treturn scans[i].Name < scans[j].Name\n\t\t}\n\t\treturn scans[i].Region < scans[j].Region\t\t\t\/\/ finally\n\tcase SortByBoxState:\n\t\tif scans[i].BoxState == scans[j].BoxState {\t\t\t\/\/ then if they have the same region, alphabetically\n\t\t\treturn scans[i].Name < scans[j].Name\n\t\t}\n\t\treturn scans[i].BoxState < scans[j].BoxState\t\t\/\/ finally\n\tcase SortByMediaState:\n\t\tif scans[i].MediaState == scans[j].MediaState {\t\t\/\/ then if they have the same region, alphabetically\n\t\t\treturn scans[i].Name < scans[j].Name\n\t\t}\n\t\treturn scans[i].MediaState < scans[j].MediaState\t\/\/ finally\n\t}\n\tpanic(fmt.Sprintf(\"invalid sort order %d\", int(s.sortOrder)))\n}\n\nfunc (s sorter) Swap(i, j int) {\n\ts.scans[i], s.scans[j] = s.scans[j], s.scans[i]\n}\n\nfunc (scans ScanSet) Sort(so SortOrder) {\n\tsort.Sort(sorter{\n\t\tscans:\t\tscans,\n\t\tsortOrder:\t\tso,\n\t})\n}\n\ntype Stats struct {\n\tnBoxScans\t\tint\n\tnBoxHave\t\t\tint\n\tnBoxGood\t\tint\n\tnBoxBad\t\t\tint\n\tpBoxHave\t\t\tfloat64\n\tpBoxGood\t\tfloat64\n\tpBoxGoodAll\t\tfloat64\n\tpBoxBad\t\t\tfloat64\n\tpBoxBadAll\t\tfloat64\n\tnMediaScans\t\tint\n\tnMediaHave\t\tint\n\tnMediaGood\t\tint\n\tnMediaBad\t\tint\n\tpMediaHave\t\tfloat64\n\tpMediaGood\t\tfloat64\n\tpMediaGoodAll\t\tfloat64\n\tpMediaBad\t\tfloat64\n\tpMediaBadAll\t\tfloat64\n}\n\nfunc pcnt(_a, _b int) float64 {\n\tif _a != 0 && _b == 0 {\t\/\/ sanity check\n\t\tpanic(\"we somehow have scans where none are expected\")\n\t}\n\tif _b == 0 {\n\t\treturn 0.0\n\t}\n\ta, b := float64(_a), float64(_b)\n\treturn (a \/ b) * 100.0\n}\n\nfunc (scans ScanSet) GetStats(filterRegion string) (stats Stats) {\n\tfor _, scan := range scans {\n\t\tif scan.Error != nil || scan.HasNoScans {\t\t\/\/ TODO really skip entries without scans?\n\t\t\tcontinue\n\t\t}\n\t\tif filterRegion != \"\" &&\n\t\t\t!strings.HasPrefix(scan.Region, filterRegion) {\n\t\t\tcontinue\n\t\t}\n\t\tstats.nBoxScans++\n\t\tswitch scan.BoxState {\n\t\tcase Good:\n\t\t\tstats.nBoxGood++\n\t\t\tstats.nBoxHave++\n\t\tcase Bad:\n\t\t\tstats.nBoxBad++\n\t\t\tfallthrough\n\t\tcase Incomplete:\n\t\t\tstats.nBoxHave++\n\t\t}\n\t\tstats.nMediaScans++\n\t\tswitch scan.MediaState {\n\t\tcase Good:\n\t\t\tstats.nMediaGood++\n\t\t\tstats.nMediaHave++\n\t\tcase Bad:\n\t\t\tstats.nMediaBad++\n\t\t\tfallthrough\n\t\tcase Incomplete:\n\t\t\tstats.nMediaHave++\n\t\t}\n\t}\n\tstats.pBoxHave = pcnt(stats.nBoxHave, stats.nBoxScans)\n\tstats.pBoxGood = pcnt(stats.nBoxGood, stats.nBoxHave)\n\tstats.pBoxGoodAll = pcnt(stats.nBoxGood, stats.nBoxScans)\n\tstats.pBoxBad = pcnt(stats.nBoxBad, stats.nBoxHave)\n\tstats.pBoxBadAll = pcnt(stats.nBoxBad, stats.nBoxScans)\n\tstats.pMediaHave = pcnt(stats.nMediaHave, stats.nMediaScans)\n\tstats.pMediaGood = pcnt(stats.nMediaGood, stats.nMediaHave)\n\tstats.pMediaGoodAll = pcnt(stats.nMediaGood, stats.nMediaScans)\n\tstats.pMediaBad = pcnt(stats.nMediaBad, stats.nMediaHave)\n\tstats.pMediaBadAll = pcnt(stats.nMediaBad, stats.nMediaScans)\n\treturn\n}\n\nconst pbarWidth = 300\nconst pbarHeight = 20\nconst pbarPercentFactor = 3\nconst pbarBorderThickness = 2\n\nvar (\n\tblack = image.NewUniform(color.Black)\n\twhite = image.NewUniform(color.White)\n\tred = image.NewUniform(color.RGBA{255, 0, 0, 255})\n\tgreen = image.NewUniform(color.RGBA{0, 255, 0, 255})\n)\n\nfunc progressbar(pGoodAll float64, pBadAll float64) string {\n\tpbar := image.NewRGBA(image.Rect(0, 0,\n\t\tpbarWidth + (pbarBorderThickness * 2),\n\t\tpbarHeight + (pbarBorderThickness * 2)))\n\t\/\/ 1) fill black for border\n\tdraw.Draw(pbar, pbar.Rect, black, image.ZP, draw.Src)\n\t\/\/ 2) draw white for what we have\n\tdraw.Draw(pbar, image.Rect(\n\t\tpbarBorderThickness, pbarBorderThickness,\n\t\tpbarBorderThickness + pbarWidth,\n\t\tpbarBorderThickness + pbarHeight), white, image.ZP, draw.Src)\n\t\/\/ 3) figure out the rectanges for good and bad\n\tgoodWid := int(pGoodAll + 0.5) * pbarPercentFactor\n\tbadWid := int(pBadAll + 0.5) * pbarPercentFactor\n\tgoodRect := image.Rect(\n\t\tpbarBorderThickness, pbarBorderThickness,\n\t\tpbarBorderThickness + goodWid,\n\t\tpbarBorderThickness + pbarHeight)\n\tbadRect := image.Rect(\n\t\tpbarBorderThickness + goodWid, pbarBorderThickness,\n\t\tpbarBorderThickness + goodWid + badWid,\n\t\tpbarBorderThickness + pbarHeight)\n\t\/\/ 4) draw good and bad\n\tdraw.Draw(pbar, goodRect, green, image.ZP, draw.Src)\n\tdraw.Draw(pbar, badRect, red, image.ZP, draw.Src)\n\t\/\/ 5) convert to base64 and return\n\t_pngDat := new(bytes.Buffer)\n\tpngDat := base64.NewEncoder(base64.StdEncoding, _pngDat)\n\tdefer pngDat.Close()\n\terr := png.Encode(pngDat, pbar)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"error producing progress bar PNG: %v\\n\", err))\n\t}\n\treturn _pngDat.String()\n}\n\nfunc (s Stats) BoxProgressBar() string {\n\treturn progressbar(s.pBoxGoodAll, s.pBoxBadAll)\n}\n\nfunc (s Stats) MediaProgressBar() string {\n\treturn progressbar(s.pMediaGoodAll, s.pMediaBadAll)\n}\n<commit_msg>Split out statistics percentage calculation and added an Add() function for combining multiple statistics. This will be used for an \"overal status\" feature on the front page.<commit_after>\/\/ 22-24 august 2012\npackage main\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"sort\"\n\n\t\/\/ for drawing the progress bar\n\t\"image\"\n\t\"image\/color\"\n\t\"image\/draw\"\n\t\"image\/png\"\n\t\"encoding\/base64\"\n\t\"bytes\"\n)\n\ntype SortOrder int\nconst (\n\tSortByRegion SortOrder = iota\n\tSortByBoxState\n\tSortByMediaState\n)\n\ntype GameScan struct {\n\tName\t\tstring\n\tHasNoScans\tbool\t\t\t\/\/ for whole games\n\tRegion\t\tstring\n\tBoxState\t\tScanState\n\tMediaState\tScanState\n\tError\t\t\terror\n}\n\ntype ScanSet []*GameScan\n\nfunc getMediaState(scan Scan) ScanState {\n\tif scan.Cart == \"\" && scan.Disc == \"\" {\n\t\treturn Missing\n\t}\n\tif scan.Cart != \"\" && scan.Disc == \"\" {\n\t\treturn scan.CartScanState()\n\t}\n\tif scan.Cart == \"\" && scan.Disc != \"\" {\n\t\treturn scan.DiscScanState()\n\t}\n\treturn scan.CartScanState().Join(scan.DiscScanState())\t\/\/ else\n}\n\n\nfunc GetConsoleScans(console string) (ScanSet, error) {\n\tvar gameScans ScanSet\n\n\tgames, err := GetGameList(console)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error getting %s game list: %v\", console, err)\n\t}\n\tfor _, game := range games {\n\/\/fmt.Println(game)\n\t\tif strings.HasPrefix(game, \"List of \" + console + \" games\") {\t\/\/ omit list from report\n\t\t\tcontinue\n\t\t}\n\t\tscans, err := GetScans(game, console)\n\t\tif err == ErrGameNoScans {\t\t\/\/ omit games for this console that will not have scans\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\tgameScans = append(gameScans, &GameScan{\n\t\t\t\tName:\tgame,\n\t\t\t\tError:\terr,\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\t\tif len(scans) == 0 {\t\t\t\t\/\/ there are no scans at all\n\t\t\tgameScans = append(gameScans, &GameScan{\n\t\t\t\tName:\t\tgame,\n\t\t\t\tHasNoScans:\ttrue,\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\t\tnScans := 0\n\t\tfor _, scan := range scans {\n\t\t\tvar mediaState ScanState\n\n\t\t\tif scan.Console != console {\t\/\/ omit scans from other consoles\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnScans++\n\t\t\tboxState := scan.BoxScanState()\n\t\t\tmediaState = getMediaState(scan)\n\t\t\tgameScans = append(gameScans, &GameScan{\n\t\t\t\tName:\t\tgame,\n\t\t\t\tRegion:\t\tscan.Region,\n\t\t\t\tBoxState:\t\tboxState,\n\t\t\t\tMediaState:\tmediaState,\n\t\t\t})\n\t\t}\n\t\tif nScans == 0 {\t\t\t\t\t\/\/ there are no scans for the specified console\n\t\t\tgameScans = append(gameScans, &GameScan{\n\t\t\t\tName:\t\tgame,\n\t\t\t\tHasNoScans:\ttrue,\n\t\t\t})\n\t\t\tcontinue\n\t\t}\n\t}\n\treturn gameScans, nil\n}\n\ntype sorter struct {\n\tscans\t\tScanSet\n\tsortOrder\t\tSortOrder\n}\n\n\/\/ for sort.Interface\nfunc (s sorter) Len() int {\n\treturn len(s.scans)\n}\n\nfunc (s sorter) Less(i, j int) bool {\n\tscans := s.scans\n\t\/\/ the sort orders make no sense if there is either an error or no scans for a game, so handle those cases first\n\tif scans[i].Error != nil && scans[j].Error != nil {\t\t\/\/ errors go first\n\t\treturn scans[i].Name < scans[j].Name\t\t\/\/ by title if they both error\n\t}\n\tif scans[i].Error != nil {\n\t\treturn true\n\t}\n\tif scans[j].Error != nil {\n\t\treturn false\n\t}\n\tif scans[i].HasNoScans && scans[j].HasNoScans {\t\/\/ then lack of scans\n\t\treturn scans[i].Name < scans[j].Name\t\t\/\/ by title if they both lack scans\n\t}\n\tif scans[i].HasNoScans {\n\t\treturn true\n\t}\n\tif scans[j].HasNoScans {\n\t\treturn false\n\t}\n\tswitch s.sortOrder {\n\tcase SortByRegion:\t\t\/\/ sort by region, then by name\n\t\tif scans[i].Region == scans[j].Region {\t\t\t\/\/ then if they have the same region, alphabetically\n\t\t\treturn scans[i].Name < scans[j].Name\n\t\t}\n\t\treturn scans[i].Region < scans[j].Region\t\t\t\/\/ finally\n\tcase SortByBoxState:\n\t\tif scans[i].BoxState == scans[j].BoxState {\t\t\t\/\/ then if they have the same region, alphabetically\n\t\t\treturn scans[i].Name < scans[j].Name\n\t\t}\n\t\treturn scans[i].BoxState < scans[j].BoxState\t\t\/\/ finally\n\tcase SortByMediaState:\n\t\tif scans[i].MediaState == scans[j].MediaState {\t\t\/\/ then if they have the same region, alphabetically\n\t\t\treturn scans[i].Name < scans[j].Name\n\t\t}\n\t\treturn scans[i].MediaState < scans[j].MediaState\t\/\/ finally\n\t}\n\tpanic(fmt.Sprintf(\"invalid sort order %d\", int(s.sortOrder)))\n}\n\nfunc (s sorter) Swap(i, j int) {\n\ts.scans[i], s.scans[j] = s.scans[j], s.scans[i]\n}\n\nfunc (scans ScanSet) Sort(so SortOrder) {\n\tsort.Sort(sorter{\n\t\tscans:\t\tscans,\n\t\tsortOrder:\t\tso,\n\t})\n}\n\ntype Stats struct {\n\tnBoxScans\t\tint\n\tnBoxHave\t\t\tint\n\tnBoxGood\t\tint\n\tnBoxBad\t\t\tint\n\tpBoxHave\t\t\tfloat64\n\tpBoxGood\t\tfloat64\n\tpBoxGoodAll\t\tfloat64\n\tpBoxBad\t\t\tfloat64\n\tpBoxBadAll\t\tfloat64\n\tnMediaScans\t\tint\n\tnMediaHave\t\tint\n\tnMediaGood\t\tint\n\tnMediaBad\t\tint\n\tpMediaHave\t\tfloat64\n\tpMediaGood\t\tfloat64\n\tpMediaGoodAll\t\tfloat64\n\tpMediaBad\t\tfloat64\n\tpMediaBadAll\t\tfloat64\n}\n\nfunc pcnt(_a, _b int) float64 {\n\tif _a != 0 && _b == 0 {\t\/\/ sanity check\n\t\tpanic(\"we somehow have scans where none are expected\")\n\t}\n\tif _b == 0 {\n\t\treturn 0.0\n\t}\n\ta, b := float64(_a), float64(_b)\n\treturn (a \/ b) * 100.0\n}\n\nfunc (scans ScanSet) GetStats(filterRegion string) (stats Stats) {\n\tfor _, scan := range scans {\n\t\tif scan.Error != nil || scan.HasNoScans {\t\t\/\/ TODO really skip entries without scans?\n\t\t\tcontinue\n\t\t}\n\t\tif filterRegion != \"\" &&\n\t\t\t!strings.HasPrefix(scan.Region, filterRegion) {\n\t\t\tcontinue\n\t\t}\n\t\tstats.nBoxScans++\n\t\tswitch scan.BoxState {\n\t\tcase Good:\n\t\t\tstats.nBoxGood++\n\t\t\tstats.nBoxHave++\n\t\tcase Bad:\n\t\t\tstats.nBoxBad++\n\t\t\tfallthrough\n\t\tcase Incomplete:\n\t\t\tstats.nBoxHave++\n\t\t}\n\t\tstats.nMediaScans++\n\t\tswitch scan.MediaState {\n\t\tcase Good:\n\t\t\tstats.nMediaGood++\n\t\t\tstats.nMediaHave++\n\t\tcase Bad:\n\t\t\tstats.nMediaBad++\n\t\t\tfallthrough\n\t\tcase Incomplete:\n\t\t\tstats.nMediaHave++\n\t\t}\n\t}\n\tstats.CalculatePercents()\n\treturn\n}\n\nfunc (stats *Stats) Add(stats2 Stats) {\n\tstats.nBoxScans += stats2.nBoxScans\n\tstats.nBoxHave += stats2.nBoxHave\n\tstats.nBoxGood += stats2.nBoxGood\n\tstats.nBoxBad += stats2.nBoxBad\n\tstats.nMediaScans += stats2.nMediaScans\n\tstats.nMediaHave += stats2.nMediaHave\n\tstats.nMediaGood += stats2.nMediaGood\n\tstats.nMediaBad += stats2.nMediaBad\n\tstats.CalculatePercents()\t\t\/\/ TODO move out for optimization?\n}\n\nfunc (stats *Stats) CalculatePercents() {\n\tstats.pBoxHave = pcnt(stats.nBoxHave, stats.nBoxScans)\n\tstats.pBoxGood = pcnt(stats.nBoxGood, stats.nBoxHave)\n\tstats.pBoxGoodAll = pcnt(stats.nBoxGood, stats.nBoxScans)\n\tstats.pBoxBad = pcnt(stats.nBoxBad, stats.nBoxHave)\n\tstats.pBoxBadAll = pcnt(stats.nBoxBad, stats.nBoxScans)\n\tstats.pMediaHave = pcnt(stats.nMediaHave, stats.nMediaScans)\n\tstats.pMediaGood = pcnt(stats.nMediaGood, stats.nMediaHave)\n\tstats.pMediaGoodAll = pcnt(stats.nMediaGood, stats.nMediaScans)\n\tstats.pMediaBad = pcnt(stats.nMediaBad, stats.nMediaHave)\n\tstats.pMediaBadAll = pcnt(stats.nMediaBad, stats.nMediaScans)\n}\n\nconst pbarWidth = 300\nconst pbarHeight = 20\nconst pbarPercentFactor = 3\nconst pbarBorderThickness = 2\n\nvar (\n\tblack = image.NewUniform(color.Black)\n\twhite = image.NewUniform(color.White)\n\tred = image.NewUniform(color.RGBA{255, 0, 0, 255})\n\tgreen = image.NewUniform(color.RGBA{0, 255, 0, 255})\n)\n\nfunc progressbar(pGoodAll float64, pBadAll float64) string {\n\tpbar := image.NewRGBA(image.Rect(0, 0,\n\t\tpbarWidth + (pbarBorderThickness * 2),\n\t\tpbarHeight + (pbarBorderThickness * 2)))\n\t\/\/ 1) fill black for border\n\tdraw.Draw(pbar, pbar.Rect, black, image.ZP, draw.Src)\n\t\/\/ 2) draw white for what we have\n\tdraw.Draw(pbar, image.Rect(\n\t\tpbarBorderThickness, pbarBorderThickness,\n\t\tpbarBorderThickness + pbarWidth,\n\t\tpbarBorderThickness + pbarHeight), white, image.ZP, draw.Src)\n\t\/\/ 3) figure out the rectanges for good and bad\n\tgoodWid := int(pGoodAll + 0.5) * pbarPercentFactor\n\tbadWid := int(pBadAll + 0.5) * pbarPercentFactor\n\tgoodRect := image.Rect(\n\t\tpbarBorderThickness, pbarBorderThickness,\n\t\tpbarBorderThickness + goodWid,\n\t\tpbarBorderThickness + pbarHeight)\n\tbadRect := image.Rect(\n\t\tpbarBorderThickness + goodWid, pbarBorderThickness,\n\t\tpbarBorderThickness + goodWid + badWid,\n\t\tpbarBorderThickness + pbarHeight)\n\t\/\/ 4) draw good and bad\n\tdraw.Draw(pbar, goodRect, green, image.ZP, draw.Src)\n\tdraw.Draw(pbar, badRect, red, image.ZP, draw.Src)\n\t\/\/ 5) convert to base64 and return\n\t_pngDat := new(bytes.Buffer)\n\tpngDat := base64.NewEncoder(base64.StdEncoding, _pngDat)\n\tdefer pngDat.Close()\n\terr := png.Encode(pngDat, pbar)\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"error producing progress bar PNG: %v\\n\", err))\n\t}\n\treturn _pngDat.String()\n}\n\nfunc (s Stats) BoxProgressBar() string {\n\treturn progressbar(s.pBoxGoodAll, s.pBoxBadAll)\n}\n\nfunc (s Stats) MediaProgressBar() string {\n\treturn progressbar(s.pMediaGoodAll, s.pMediaBadAll)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file tests types.Check by using it to\n\/\/ typecheck the standard library and tests.\n\npackage types\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/build\"\n\t\"go\/parser\"\n\t\"go\/scanner\"\n\t\"go\/token\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar verbose = flag.Bool(\"types.v\", false, \"verbose mode\")\n\nvar (\n\tpkgCount int \/\/ number of packages processed\n\tstart = time.Now()\n)\n\nfunc TestStdlib(t *testing.T) {\n\twalkDirs(t, filepath.Join(runtime.GOROOT(), \"src\/pkg\"))\n\tif *verbose {\n\t\tfmt.Println(pkgCount, \"packages typechecked in\", time.Since(start))\n\t}\n}\n\n\/\/ firstComment returns the contents of the first comment in\n\/\/ the given file, assuming there's one within the first KB.\nfunc firstComment(filename string) string {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\tdefer f.Close()\n\n\tvar src [1 << 10]byte \/\/ read at most 1KB\n\tn, _ := f.Read(src[:])\n\n\tvar s scanner.Scanner\n\ts.Init(fset.AddFile(\"\", fset.Base(), n), src[:n], nil, scanner.ScanComments)\n\tfor {\n\t\t_, tok, lit := s.Scan()\n\t\tswitch tok {\n\t\tcase token.COMMENT:\n\t\t\t\/\/ remove trailing *\/ of multi-line comment\n\t\t\tif lit[1] == '*' {\n\t\t\t\tlit = lit[:len(lit)-2]\n\t\t\t}\n\t\t\treturn strings.TrimSpace(lit[2:])\n\t\tcase token.EOF:\n\t\t\treturn \"\"\n\t\t}\n\t}\n}\n\nfunc testTestDir(t *testing.T, path string, ignore ...string) {\n\tfiles, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\texcluded := make(map[string]bool)\n\tfor _, filename := range ignore {\n\t\texcluded[filename] = true\n\t}\n\n\tfset := token.NewFileSet()\n\tfor _, f := range files {\n\t\t\/\/ filter directory contents\n\t\tif f.IsDir() || !strings.HasSuffix(f.Name(), \".go\") || excluded[f.Name()] {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ get per-file instructions\n\t\texpectErrors := false\n\t\tfilename := filepath.Join(path, f.Name())\n\t\tif cmd := firstComment(filename); cmd != \"\" {\n\t\t\tswitch cmd {\n\t\t\tcase \"skip\", \"compiledir\":\n\t\t\t\tcontinue \/\/ ignore this file\n\t\t\tcase \"errorcheck\":\n\t\t\t\texpectErrors = true\n\t\t\t}\n\t\t}\n\n\t\t\/\/ parse and type-check file\n\t\tfile, err := parser.ParseFile(fset, filename, nil, 0)\n\t\tif err == nil {\n\t\t\t_, err = Check(filename, fset, []*ast.File{file})\n\t\t}\n\n\t\tif expectErrors {\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"expected errors but found none in %s\", filename)\n\t\t\t}\n\t\t} else {\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestStdtest(t *testing.T) {\n\ttestTestDir(t, filepath.Join(runtime.GOROOT(), \"test\"),\n\t\t\"cmplxdivide.go\", \/\/ also needs file cmplxdivide1.go - ignore\n\t\t\"mapnan.go\", \"sigchld.go\", \/\/ don't work on Windows; testTestDir should consult build tags\n\t)\n}\n\nfunc TestStdfixed(t *testing.T) {\n\ttestTestDir(t, filepath.Join(runtime.GOROOT(), \"test\", \"fixedbugs\"),\n\t\t\"bug165.go\", \/\/ TODO(gri) isComparable not working for incomplete struct type\n\t\t\"bug223.go\", \"bug413.go\", \"bug459.go\", \/\/ TODO(gri) complete initialization checks\n\t\t\"bug248.go\", \"bug302.go\", \"bug369.go\", \/\/ complex test instructions - ignore\n\t\t\"issue3924.go\", \/\/ TODO(gri) && and || produce bool result (not untyped bool)\n\t\t\"issue4847.go\", \/\/ TODO(gri) initialization cycle error not found\n\t)\n}\n\nfunc TestStdken(t *testing.T) {\n\ttestTestDir(t, filepath.Join(runtime.GOROOT(), \"test\", \"ken\"))\n}\n\n\/\/ Package paths of excluded packages.\nvar excluded = map[string]bool{\n\t\"builtin\": true,\n}\n\n\/\/ typecheck typechecks the given package files.\nfunc typecheck(t *testing.T, path string, filenames []string) {\n\tfset := token.NewFileSet()\n\n\t\/\/ parse package files\n\tvar files []*ast.File\n\tfor _, filename := range filenames {\n\t\tfile, err := parser.ParseFile(fset, filename, nil, parser.AllErrors)\n\t\tif err != nil {\n\t\t\t\/\/ the parser error may be a list of individual errors; report them all\n\t\t\tif list, ok := err.(scanner.ErrorList); ok {\n\t\t\t\tfor _, err := range list {\n\t\t\t\t\tt.Error(err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\n\t\tif *verbose {\n\t\t\tif len(files) == 0 {\n\t\t\t\tfmt.Println(\"package\", file.Name.Name)\n\t\t\t}\n\t\t\tfmt.Println(\"\\t\", filename)\n\t\t}\n\n\t\tfiles = append(files, file)\n\t}\n\n\t\/\/ typecheck package files\n\tvar conf Config\n\tconf.Error = func(err error) { t.Error(err) }\n\tconf.Check(path, fset, files, nil)\n\tpkgCount++\n}\n\n\/\/ pkgfiles returns the list of package files for the given directory.\nfunc pkgfiles(t *testing.T, dir string) []string {\n\tctxt := build.Default\n\tctxt.CgoEnabled = false\n\tpkg, err := ctxt.ImportDir(dir, 0)\n\tif err != nil {\n\t\tif _, nogo := err.(*build.NoGoError); !nogo {\n\t\t\tt.Error(err)\n\t\t}\n\t\treturn nil\n\t}\n\tif excluded[pkg.ImportPath] {\n\t\treturn nil\n\t}\n\tvar filenames []string\n\tfor _, name := range pkg.GoFiles {\n\t\tfilenames = append(filenames, filepath.Join(pkg.Dir, name))\n\t}\n\tfor _, name := range pkg.TestGoFiles {\n\t\tfilenames = append(filenames, filepath.Join(pkg.Dir, name))\n\t}\n\treturn filenames\n}\n\n\/\/ Note: Could use filepath.Walk instead of walkDirs but that wouldn't\n\/\/ necessarily be shorter or clearer after adding the code to\n\/\/ terminate early for -short tests.\n\nfunc walkDirs(t *testing.T, dir string) {\n\t\/\/ limit run time for short tests\n\tif testing.Short() && time.Since(start) >= 750*time.Millisecond {\n\t\treturn\n\t}\n\n\tfis, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\t\/\/ typecheck package in directory\n\tif files := pkgfiles(t, dir); files != nil {\n\t\ttypecheck(t, dir, files)\n\t}\n\n\t\/\/ traverse subdirectories, but don't walk into testdata\n\tfor _, fi := range fis {\n\t\tif fi.IsDir() && fi.Name() != \"testdata\" {\n\t\t\twalkDirs(t, filepath.Join(dir, fi.Name()))\n\t\t}\n\t}\n}\n<commit_msg>go.tools\/go\/types: enable std lib test of map key types<commit_after>\/\/ Copyright 2013 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\n\/\/ This file tests types.Check by using it to\n\/\/ typecheck the standard library and tests.\n\npackage types\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"go\/ast\"\n\t\"go\/build\"\n\t\"go\/parser\"\n\t\"go\/scanner\"\n\t\"go\/token\"\n\t\"io\/ioutil\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar verbose = flag.Bool(\"types.v\", false, \"verbose mode\")\n\nvar (\n\tpkgCount int \/\/ number of packages processed\n\tstart = time.Now()\n)\n\nfunc TestStdlib(t *testing.T) {\n\twalkDirs(t, filepath.Join(runtime.GOROOT(), \"src\/pkg\"))\n\tif *verbose {\n\t\tfmt.Println(pkgCount, \"packages typechecked in\", time.Since(start))\n\t}\n}\n\n\/\/ firstComment returns the contents of the first comment in\n\/\/ the given file, assuming there's one within the first KB.\nfunc firstComment(filename string) string {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\tdefer f.Close()\n\n\tvar src [1 << 10]byte \/\/ read at most 1KB\n\tn, _ := f.Read(src[:])\n\n\tvar s scanner.Scanner\n\ts.Init(fset.AddFile(\"\", fset.Base(), n), src[:n], nil, scanner.ScanComments)\n\tfor {\n\t\t_, tok, lit := s.Scan()\n\t\tswitch tok {\n\t\tcase token.COMMENT:\n\t\t\t\/\/ remove trailing *\/ of multi-line comment\n\t\t\tif lit[1] == '*' {\n\t\t\t\tlit = lit[:len(lit)-2]\n\t\t\t}\n\t\t\treturn strings.TrimSpace(lit[2:])\n\t\tcase token.EOF:\n\t\t\treturn \"\"\n\t\t}\n\t}\n}\n\nfunc testTestDir(t *testing.T, path string, ignore ...string) {\n\tfiles, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\texcluded := make(map[string]bool)\n\tfor _, filename := range ignore {\n\t\texcluded[filename] = true\n\t}\n\n\tfset := token.NewFileSet()\n\tfor _, f := range files {\n\t\t\/\/ filter directory contents\n\t\tif f.IsDir() || !strings.HasSuffix(f.Name(), \".go\") || excluded[f.Name()] {\n\t\t\tcontinue\n\t\t}\n\n\t\t\/\/ get per-file instructions\n\t\texpectErrors := false\n\t\tfilename := filepath.Join(path, f.Name())\n\t\tif cmd := firstComment(filename); cmd != \"\" {\n\t\t\tswitch cmd {\n\t\t\tcase \"skip\", \"compiledir\":\n\t\t\t\tcontinue \/\/ ignore this file\n\t\t\tcase \"errorcheck\":\n\t\t\t\texpectErrors = true\n\t\t\t}\n\t\t}\n\n\t\t\/\/ parse and type-check file\n\t\tfile, err := parser.ParseFile(fset, filename, nil, 0)\n\t\tif err == nil {\n\t\t\t_, err = Check(filename, fset, []*ast.File{file})\n\t\t}\n\n\t\tif expectErrors {\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"expected errors but found none in %s\", filename)\n\t\t\t}\n\t\t} else {\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestStdtest(t *testing.T) {\n\ttestTestDir(t, filepath.Join(runtime.GOROOT(), \"test\"),\n\t\t\"cmplxdivide.go\", \/\/ also needs file cmplxdivide1.go - ignore\n\t\t\"mapnan.go\", \"sigchld.go\", \/\/ don't work on Windows; testTestDir should consult build tags\n\t)\n}\n\nfunc TestStdfixed(t *testing.T) {\n\ttestTestDir(t, filepath.Join(runtime.GOROOT(), \"test\", \"fixedbugs\"),\n\t\t\"bug223.go\", \"bug413.go\", \"bug459.go\", \/\/ TODO(gri) complete initialization checks\n\t\t\"bug248.go\", \"bug302.go\", \"bug369.go\", \/\/ complex test instructions - ignore\n\t\t\"issue3924.go\", \/\/ TODO(gri) && and || produce bool result (not untyped bool)\n\t\t\"issue4847.go\", \/\/ TODO(gri) initialization cycle error not found\n\t)\n}\n\nfunc TestStdken(t *testing.T) {\n\ttestTestDir(t, filepath.Join(runtime.GOROOT(), \"test\", \"ken\"))\n}\n\n\/\/ Package paths of excluded packages.\nvar excluded = map[string]bool{\n\t\"builtin\": true,\n}\n\n\/\/ typecheck typechecks the given package files.\nfunc typecheck(t *testing.T, path string, filenames []string) {\n\tfset := token.NewFileSet()\n\n\t\/\/ parse package files\n\tvar files []*ast.File\n\tfor _, filename := range filenames {\n\t\tfile, err := parser.ParseFile(fset, filename, nil, parser.AllErrors)\n\t\tif err != nil {\n\t\t\t\/\/ the parser error may be a list of individual errors; report them all\n\t\t\tif list, ok := err.(scanner.ErrorList); ok {\n\t\t\t\tfor _, err := range list {\n\t\t\t\t\tt.Error(err)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.Error(err)\n\t\t\treturn\n\t\t}\n\n\t\tif *verbose {\n\t\t\tif len(files) == 0 {\n\t\t\t\tfmt.Println(\"package\", file.Name.Name)\n\t\t\t}\n\t\t\tfmt.Println(\"\\t\", filename)\n\t\t}\n\n\t\tfiles = append(files, file)\n\t}\n\n\t\/\/ typecheck package files\n\tvar conf Config\n\tconf.Error = func(err error) { t.Error(err) }\n\tconf.Check(path, fset, files, nil)\n\tpkgCount++\n}\n\n\/\/ pkgfiles returns the list of package files for the given directory.\nfunc pkgfiles(t *testing.T, dir string) []string {\n\tctxt := build.Default\n\tctxt.CgoEnabled = false\n\tpkg, err := ctxt.ImportDir(dir, 0)\n\tif err != nil {\n\t\tif _, nogo := err.(*build.NoGoError); !nogo {\n\t\t\tt.Error(err)\n\t\t}\n\t\treturn nil\n\t}\n\tif excluded[pkg.ImportPath] {\n\t\treturn nil\n\t}\n\tvar filenames []string\n\tfor _, name := range pkg.GoFiles {\n\t\tfilenames = append(filenames, filepath.Join(pkg.Dir, name))\n\t}\n\tfor _, name := range pkg.TestGoFiles {\n\t\tfilenames = append(filenames, filepath.Join(pkg.Dir, name))\n\t}\n\treturn filenames\n}\n\n\/\/ Note: Could use filepath.Walk instead of walkDirs but that wouldn't\n\/\/ necessarily be shorter or clearer after adding the code to\n\/\/ terminate early for -short tests.\n\nfunc walkDirs(t *testing.T, dir string) {\n\t\/\/ limit run time for short tests\n\tif testing.Short() && time.Since(start) >= 750*time.Millisecond {\n\t\treturn\n\t}\n\n\tfis, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\tt.Error(err)\n\t\treturn\n\t}\n\n\t\/\/ typecheck package in directory\n\tif files := pkgfiles(t, dir); files != nil {\n\t\ttypecheck(t, dir, files)\n\t}\n\n\t\/\/ traverse subdirectories, but don't walk into testdata\n\tfor _, fi := range fis {\n\t\tif fi.IsDir() && fi.Name() != \"testdata\" {\n\t\t\twalkDirs(t, filepath.Join(dir, fi.Name()))\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Parses files containing server descriptors.\n\npackage zoossh\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\t\/\/ The layout of the \"published\" field.\n\tpublishedTimeLayout = \"2006-01-02 15:04:05\"\n)\n\nvar descriptorAnnotations = map[Annotation]bool{\n\t\/\/ The file format we currently (try to) support.\n\tAnnotation{\"server-descriptor\", \"1\", \"0\"}: true,\n}\n\ntype GetDescriptor func() *RouterDescriptor\n\n\/\/ An exitpattern as defined in dirspec.txt, Section 2.1.3.\ntype ExitPattern struct {\n\tAddressSpec string\n\tPortSpec string\n}\n\n\/\/ An (incomplete) router descriptor as defined in dirspec.txt, Section 2.1.1.\ntype RouterDescriptor struct {\n\n\t\/\/ The single fields of a \"router\" line.\n\tNickname string\n\tAddress net.IP\n\tORPort uint16\n\tSOCKSPort uint16\n\tDirPort uint16\n\n\t\/\/ The single fields of a \"bandwidth\" line. All bandwidth values are in\n\t\/\/ bytes per second.\n\tBandwidthAvg uint64\n\tBandwidthBurst uint64\n\tBandwidthObs uint64\n\n\t\/\/ The single fields of a \"platform\" line.\n\tOperatingSystem string\n\tTorVersion string\n\n\t\/\/ The single fields of a \"published\" line.\n\tPublished time.Time\n\n\t\/\/ The single fields of an \"uptime\" line.\n\tUptime uint64\n\n\t\/\/ The single fields of a \"fingerprint\" line.\n\tFingerprint Fingerprint\n\n\t\/\/ The single fields of a \"hibernating\" line.\n\tHibernating bool\n\n\t\/\/ The single fields of a \"family\" line.\n\tFamily map[Fingerprint]bool\n\n\t\/\/ The single fields of a \"contact\" line.\n\tContact string\n\n\t\/\/ The \"hidden-service-dir\" line.\n\tHiddenServiceDir bool\n\n\tOnionKey string\n\tNTorOnionKey string\n\tSigningKey string\n\n\tRawAccept string\n\tRawReject string\n\tAccept []*ExitPattern\n\tReject []*ExitPattern\n}\n\ntype RouterDescriptors struct {\n\n\t\/\/ A map from relay fingerprint to a function which returns the router\n\t\/\/ descriptor.\n\tRouterDescriptors map[Fingerprint]GetDescriptor\n}\n\n\/\/ String implements the String as well as the Object interface. It returns\n\/\/ the descriptor's string representation.\nfunc (desc *RouterDescriptor) String() string {\n\n\treturn fmt.Sprintf(\"%s,%s,%s,%d,%d,%s,%d,%s,%s,%s\",\n\t\tdesc.Fingerprint,\n\t\tdesc.Nickname,\n\t\tdesc.Address,\n\t\tdesc.ORPort,\n\t\tdesc.DirPort,\n\t\tdesc.Published.Format(time.RFC3339),\n\t\tdesc.Uptime,\n\t\tstrings.Replace(desc.OperatingSystem, \",\", \"\", -1),\n\t\tstrings.Replace(desc.TorVersion, \",\", \"\", -1),\n\t\tstrings.Replace(desc.Contact, \",\", \"\", -1))\n}\n\n\/\/ GetFingerprint implements the Object interface. It returns the descriptor's\n\/\/ fingerprint.\nfunc (desc *RouterDescriptor) GetFingerprint() Fingerprint {\n\n\treturn desc.Fingerprint\n}\n\n\/\/ Length implements the ObjectSet interface. It returns the length of the\n\/\/ router descriptors.\nfunc (descs *RouterDescriptors) Length() int {\n\n\treturn len(descs.RouterDescriptors)\n}\n\n\/\/ Iterate implements the ObjectSet interface. Using a channel, it iterates\n\/\/ over and returns all router descriptors. The given object filter can be\n\/\/ used to filter descriptors, e.g., by fingerprint.\nfunc (descs *RouterDescriptors) Iterate(filter *ObjectFilter) <-chan Object {\n\n\tch := make(chan Object)\n\n\tgo func() {\n\t\tfor _, getDesc := range descs.RouterDescriptors {\n\t\t\tdesc := getDesc()\n\t\t\tif filter == nil || filter.IsEmpty() || filter.MatchesRouterDescriptor(desc) {\n\t\t\t\tch <- desc\n\t\t\t}\n\t\t}\n\t\tclose(ch)\n\t}()\n\n\treturn ch\n}\n\n\/\/ GetObject implements the ObjectSet interface. It returns the object\n\/\/ identified by the given fingerprint. If the object is not present in the\n\/\/ set, false is returned, otherwise true.\nfunc (desc *RouterDescriptors) GetObject(fingerprint Fingerprint) (Object, bool) {\n\n\treturn desc.Get(fingerprint)\n}\n\n\/\/ Merge merges the given object set with itself.\nfunc (descs *RouterDescriptors) Merge(objs ObjectSet) {\n\n\tfor desc := range descs.Iterate(nil) {\n\t\tfpr := desc.GetFingerprint()\n\t\t_, exists := descs.Get(fpr)\n\t\tif !exists {\n\t\t\tdescs.Set(fpr, desc.(*RouterDescriptor))\n\t\t}\n\t}\n}\n\n\/\/ NewRouterDescriptors serves as a constructor and returns a pointer to a\n\/\/ freshly allocated and empty RouterDescriptors struct.\nfunc NewRouterDescriptors() *RouterDescriptors {\n\n\treturn &RouterDescriptors{RouterDescriptors: make(map[Fingerprint]GetDescriptor)}\n}\n\n\/\/ NewRouterDescriptor serves as a constructor and returns a pointer to a\n\/\/ freshly allocated and empty RouterDescriptor struct.\nfunc NewRouterDescriptor() *RouterDescriptor {\n\n\treturn &RouterDescriptor{Family: make(map[Fingerprint]bool)}\n}\n\n\/\/ ToSlice converts the given router descriptors to a slice.\nfunc (rd *RouterDescriptors) ToSlice() []GetDescriptor {\n\n\tlength := rd.Length()\n\tdescs := make([]GetDescriptor, length)\n\n\ti := 0\n\tfor _, getDesc := range rd.RouterDescriptors {\n\t\tdescs[i] = getDesc\n\t\ti += 1\n\t}\n\n\treturn descs\n}\n\n\/\/ Get returns the router descriptor for the given fingerprint and a boolean\n\/\/ value indicating if the descriptor could be found.\nfunc (d *RouterDescriptors) Get(fingerprint Fingerprint) (*RouterDescriptor, bool) {\n\n\tgetDescriptor, exists := d.RouterDescriptors[SanitiseFingerprint(fingerprint)]\n\tif !exists {\n\t\treturn nil, exists\n\t}\n\n\treturn getDescriptor(), exists\n}\n\n\/\/ Set adds a new fingerprint mapping to a function returning the router\n\/\/ descriptor.\nfunc (d *RouterDescriptors) Set(fingerprint Fingerprint, descriptor *RouterDescriptor) {\n\n\td.RouterDescriptors[SanitiseFingerprint(fingerprint)] = func() *RouterDescriptor {\n\t\treturn descriptor\n\t}\n}\n\n\/\/ HasFamily returns true if the given relay identified by its fingerprint is\n\/\/ part of this relay's family.\nfunc (desc *RouterDescriptor) HasFamily(fingerprint Fingerprint) bool {\n\n\t_, ok := desc.Family[SanitiseFingerprint(fingerprint)]\n\treturn ok\n}\n\n\/\/ LazyParseRawDescriptor lazily parses a raw router descriptor (in string\n\/\/ format) and returns the descriptor's fingerprint, a function returning the\n\/\/ descriptor, and an error if the descriptor could not be parsed. Parsing is\n\/\/ delayed until the router descriptor is accessed.\nfunc LazyParseRawDescriptor(rawDescriptor string) (Fingerprint, GetDescriptor, error) {\n\n\tvar fingerprint Fingerprint\n\n\t\/\/ Delay parsing of the router descriptor until this function is executed.\n\tgetDescriptor := func() *RouterDescriptor {\n\t\t_, f, _ := ParseRawDescriptor(rawDescriptor)\n\t\treturn f()\n\t}\n\n\t\/\/ Only pull out the fingerprint.\n\tlines := strings.Split(rawDescriptor, \"\\n\")\n\tfor _, line := range lines {\n\t\twords := strings.Split(line, \" \")\n\t\tif words[0] == \"opt\" {\n\t\t\twords = words[1:]\n\t\t}\n\n\t\tif words[0] == \"fingerprint\" {\n\t\t\tfingerprint = Fingerprint(strings.Join(words[1:], \"\"))\n\t\t\treturn SanitiseFingerprint(fingerprint), getDescriptor, nil\n\t\t}\n\t}\n\n\treturn \"\", nil, fmt.Errorf(\"Could not extract descriptor fingerprint.\")\n}\n\n\/\/ ParseRawDescriptor parses a raw router descriptor (in string format) and\n\/\/ returns the descriptor's fingerprint, a function returning the descriptor,\n\/\/ and an error if the descriptor could not be parsed. In contrast to\n\/\/ LazyParseRawDescriptor, parsing is *not* delayed.\nfunc ParseRawDescriptor(rawDescriptor string) (Fingerprint, GetDescriptor, error) {\n\n\tvar descriptor *RouterDescriptor = NewRouterDescriptor()\n\n\tlines := strings.Split(rawDescriptor, \"\\n\")\n\n\t\/\/ Go over raw descriptor line by line and extract the fields we are\n\t\/\/ interested in.\n\tfor _, line := range lines {\n\n\t\twords := strings.Split(line, \" \")\n\n\t\t\/\/ Ignore lines starting with \"opt\".\n\t\tif words[0] == \"opt\" {\n\t\t\twords = words[1:]\n\t\t}\n\n\t\tswitch words[0] {\n\n\t\tcase \"router\":\n\t\t\tdescriptor.Nickname = words[1]\n\t\t\tdescriptor.Address = net.ParseIP(words[2])\n\t\t\tdescriptor.ORPort = StringToPort(words[3])\n\t\t\tdescriptor.SOCKSPort = StringToPort(words[4])\n\t\t\tdescriptor.DirPort = StringToPort(words[5])\n\n\t\tcase \"platform\":\n\t\t\tfor i := 0; i < len(words); i++ {\n\t\t\t\tif (strings.TrimSpace(words[i]) == \"on\") && (i < len(words)-1) {\n\t\t\t\t\tdescriptor.OperatingSystem = strings.Join(words[i+1:], \" \")\n\t\t\t\t\tdescriptor.TorVersion = strings.Join(words[1:i-1], \" \")\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"uptime\":\n\t\t\tdescriptor.Uptime, _ = strconv.ParseUint(words[1], 10, 64)\n\n\t\tcase \"published\":\n\t\t\ttime, _ := time.Parse(publishedTimeLayout, strings.Join(words[1:], \" \"))\n\t\t\tdescriptor.Published = time\n\n\t\tcase \"fingerprint\":\n\t\t\tdescriptor.Fingerprint = SanitiseFingerprint(Fingerprint(strings.Join(words[1:], \"\")))\n\n\t\tcase \"hibernating\":\n\t\t\tdescriptor.Hibernating, _ = strconv.ParseBool(words[1])\n\n\t\tcase \"bandwidth\":\n\t\t\tdescriptor.BandwidthAvg, _ = strconv.ParseUint(words[1], 10, 64)\n\t\t\tdescriptor.BandwidthBurst, _ = strconv.ParseUint(words[2], 10, 64)\n\t\t\tdescriptor.BandwidthObs, _ = strconv.ParseUint(words[3], 10, 64)\n\n\t\tcase \"family\":\n\t\t\tfor _, word := range words[1:] {\n\t\t\t\tfpr := Fingerprint(strings.Trim(word, \"$\"))\n\t\t\t\tdescriptor.Family[fpr] = true\n\t\t\t}\n\n\t\tcase \"contact\":\n\t\t\tdescriptor.Contact = strings.Join(words[1:], \" \")\n\n\t\tcase \"hidden-service-dir\":\n\t\t\tdescriptor.HiddenServiceDir = true\n\n\t\tcase \"reject\":\n\t\t\tdescriptor.RawReject += words[1] + \" \"\n\n\t\tcase \"accept\":\n\t\t\tdescriptor.RawAccept += words[1] + \" \"\n\t\t}\n\t}\n\n\treturn descriptor.Fingerprint, func() *RouterDescriptor { return descriptor }, nil\n}\n\n\/\/ extractDescriptor is a bufio.SplitFunc that extracts individual router\n\/\/ descriptors.\nfunc extractDescriptor(data []byte, atEOF bool) (advance int, token []byte, err error) {\n\n\tif atEOF && len(data) == 0 {\n\t\treturn 0, nil, nil\n\t}\n\n\tstart := 0\n\tif !bytes.HasPrefix(data, []byte(\"router \")) {\n\t\tstart = bytes.Index(data, []byte(\"\\nrouter \"))\n\t\tif start < 0 {\n\t\t\tif atEOF {\n\t\t\t\treturn 0, nil, fmt.Errorf(\"Cannot find beginning of descriptor: \\\"\\\\nrouter \\\"\")\n\t\t\t}\n\t\t\t\/\/ Request more data.\n\t\t\treturn 0, nil, nil\n\t\t}\n\t\tstart += 1\n\t}\n\n\tmarker := []byte(\"\\n-----END SIGNATURE-----\\n\")\n\tend := bytes.Index(data[start:], marker)\n\tif end >= 0 {\n\t\treturn start + end + len(marker), data[start : start+end+len(marker)], nil\n\t}\n\tif atEOF {\n\t\treturn start, nil, fmt.Errorf(\"Cannot find end of descriptor: %q\", marker)\n\t}\n\t\/\/ Request more data.\n\treturn start, nil, nil\n}\n\n\/\/ MatchesRouterDescriptor returns true if fields of the given router\n\/\/ descriptor are present in the object filter, e.g., the descriptor's nickname\n\/\/ is part of the object filter.\nfunc (filter *ObjectFilter) MatchesRouterDescriptor(desc *RouterDescriptor) bool {\n\n\tif filter.HasIPAddr(desc.Address) {\n\t\treturn true\n\t}\n\n\tif filter.HasFingerprint(desc.Fingerprint) {\n\t\treturn true\n\t}\n\n\tif filter.HasNickname(desc.Nickname) {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n\/\/ parseDescriptorUnchecked parses a descriptor of type \"server-descriptor\".\n\/\/ The input should be without a type annotation; i.e., the type annotation\n\/\/ should already have been read and checked to be the correct type. The\n\/\/ function returns a pointer to RouterDescriptors containing the router\n\/\/ descriptors. If there were any errors, an error string is returned. If the\n\/\/ lazy argument is set to true, parsing of the router descriptors is delayed\n\/\/ until they are accessed.\nfunc parseDescriptorUnchecked(r io.Reader, lazy bool) (*RouterDescriptors, error) {\n\n\tvar descriptors = NewRouterDescriptors()\n\tvar descriptorParser func(descriptor string) (Fingerprint, GetDescriptor, error)\n\n\tif lazy {\n\t\tdescriptorParser = LazyParseRawDescriptor\n\t} else {\n\t\tdescriptorParser = ParseRawDescriptor\n\t}\n\n\t\/\/ We will read raw router descriptors from this channel.\n\tqueue := make(chan QueueUnit)\n\tgo DissectFile(r, extractDescriptor, queue)\n\n\t\/\/ Parse incoming descriptors until the channel is closed by the remote\n\t\/\/ end.\n\tfor unit := range queue {\n\t\tif unit.Err != nil {\n\t\t\treturn nil, unit.Err\n\t\t}\n\n\t\tfingerprint, getDescriptor, err := descriptorParser(unit.Blurb)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdescriptors.RouterDescriptors[SanitiseFingerprint(fingerprint)] = getDescriptor\n\t}\n\n\treturn descriptors, nil\n}\n\n\/\/ parseDescriptor is a wrapper around parseDescriptorUnchecked that first reads\n\/\/ and checks the type annotation to make sure it belongs to\n\/\/ descriptorAnnotations.\nfunc parseDescriptor(r io.Reader, lazy bool) (*RouterDescriptors, error) {\n\n\tr, err := readAndCheckAnnotation(r, descriptorAnnotations)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn parseDescriptorUnchecked(r, lazy)\n}\n\n\/\/ parseDescriptorFile is a wrapper around parseDescriptor that opens the named\n\/\/ file for parsing.\nfunc parseDescriptorFile(fileName string, lazy bool) (*RouterDescriptors, error) {\n\n\tfd, err := os.Open(fileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer fd.Close()\n\n\treturn parseDescriptor(fd, lazy)\n}\n\n\/\/ LazilyParseDescriptorFile parses the given file and returns a pointer to\n\/\/ RouterDescriptors containing the router descriptors. If there were any\n\/\/ errors, an error string is returned. Note that parsing is done lazily which\n\/\/ means that it is delayed until a given router descriptor is accessed. That\n\/\/ pays off when you know that you will not parse most router descriptors.\nfunc LazilyParseDescriptorFile(fileName string) (*RouterDescriptors, error) {\n\n\treturn parseDescriptorFile(fileName, true)\n}\n\n\/\/ ParseDescriptorFile parses the given file and returns a pointer to\n\/\/ RouterDescriptors containing the router descriptors. If there were any\n\/\/ errors, an error string is returned. Note that in contrast to\n\/\/ LazilyParseDescriptorFile, parsing is *not* delayed. That pays off when you\n\/\/ know that you will parse most router descriptors.\nfunc ParseDescriptorFile(fileName string) (*RouterDescriptors, error) {\n\n\treturn parseDescriptorFile(fileName, false)\n}\n<commit_msg>Added RawExitPolicy to preserve accept\/reject order<commit_after>\/\/ Parses files containing server descriptors.\n\npackage zoossh\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\t\/\/ The layout of the \"published\" field.\n\tpublishedTimeLayout = \"2006-01-02 15:04:05\"\n)\n\nvar descriptorAnnotations = map[Annotation]bool{\n\t\/\/ The file format we currently (try to) support.\n\tAnnotation{\"server-descriptor\", \"1\", \"0\"}: true,\n}\n\ntype GetDescriptor func() *RouterDescriptor\n\n\/\/ An exitpattern as defined in dirspec.txt, Section 2.1.3.\ntype ExitPattern struct {\n\tAddressSpec string\n\tPortSpec string\n}\n\n\/\/ An (incomplete) router descriptor as defined in dirspec.txt, Section 2.1.1.\ntype RouterDescriptor struct {\n\n\t\/\/ The single fields of a \"router\" line.\n\tNickname string\n\tAddress net.IP\n\tORPort uint16\n\tSOCKSPort uint16\n\tDirPort uint16\n\n\t\/\/ The single fields of a \"bandwidth\" line. All bandwidth values are in\n\t\/\/ bytes per second.\n\tBandwidthAvg uint64\n\tBandwidthBurst uint64\n\tBandwidthObs uint64\n\n\t\/\/ The single fields of a \"platform\" line.\n\tOperatingSystem string\n\tTorVersion string\n\n\t\/\/ The single fields of a \"published\" line.\n\tPublished time.Time\n\n\t\/\/ The single fields of an \"uptime\" line.\n\tUptime uint64\n\n\t\/\/ The single fields of a \"fingerprint\" line.\n\tFingerprint Fingerprint\n\n\t\/\/ The single fields of a \"hibernating\" line.\n\tHibernating bool\n\n\t\/\/ The single fields of a \"family\" line.\n\tFamily map[Fingerprint]bool\n\n\t\/\/ The single fields of a \"contact\" line.\n\tContact string\n\n\t\/\/ The \"hidden-service-dir\" line.\n\tHiddenServiceDir bool\n\n\tOnionKey string\n\tNTorOnionKey string\n\tSigningKey string\n\n\tRawAccept string\n\tRawReject string\n\tRawExitPolicy string\n\n\tAccept []*ExitPattern\n\tReject []*ExitPattern\n}\n\ntype RouterDescriptors struct {\n\n\t\/\/ A map from relay fingerprint to a function which returns the router\n\t\/\/ descriptor.\n\tRouterDescriptors map[Fingerprint]GetDescriptor\n}\n\n\/\/ String implements the String as well as the Object interface. It returns\n\/\/ the descriptor's string representation.\nfunc (desc *RouterDescriptor) String() string {\n\n\treturn fmt.Sprintf(\"%s,%s,%s,%d,%d,%s,%d,%s,%s,%s\",\n\t\tdesc.Fingerprint,\n\t\tdesc.Nickname,\n\t\tdesc.Address,\n\t\tdesc.ORPort,\n\t\tdesc.DirPort,\n\t\tdesc.Published.Format(time.RFC3339),\n\t\tdesc.Uptime,\n\t\tstrings.Replace(desc.OperatingSystem, \",\", \"\", -1),\n\t\tstrings.Replace(desc.TorVersion, \",\", \"\", -1),\n\t\tstrings.Replace(desc.Contact, \",\", \"\", -1))\n}\n\n\/\/ GetFingerprint implements the Object interface. It returns the descriptor's\n\/\/ fingerprint.\nfunc (desc *RouterDescriptor) GetFingerprint() Fingerprint {\n\n\treturn desc.Fingerprint\n}\n\n\/\/ Length implements the ObjectSet interface. It returns the length of the\n\/\/ router descriptors.\nfunc (descs *RouterDescriptors) Length() int {\n\n\treturn len(descs.RouterDescriptors)\n}\n\n\/\/ Iterate implements the ObjectSet interface. Using a channel, it iterates\n\/\/ over and returns all router descriptors. The given object filter can be\n\/\/ used to filter descriptors, e.g., by fingerprint.\nfunc (descs *RouterDescriptors) Iterate(filter *ObjectFilter) <-chan Object {\n\n\tch := make(chan Object)\n\n\tgo func() {\n\t\tfor _, getDesc := range descs.RouterDescriptors {\n\t\t\tdesc := getDesc()\n\t\t\tif filter == nil || filter.IsEmpty() || filter.MatchesRouterDescriptor(desc) {\n\t\t\t\tch <- desc\n\t\t\t}\n\t\t}\n\t\tclose(ch)\n\t}()\n\n\treturn ch\n}\n\n\/\/ GetObject implements the ObjectSet interface. It returns the object\n\/\/ identified by the given fingerprint. If the object is not present in the\n\/\/ set, false is returned, otherwise true.\nfunc (desc *RouterDescriptors) GetObject(fingerprint Fingerprint) (Object, bool) {\n\n\treturn desc.Get(fingerprint)\n}\n\n\/\/ Merge merges the given object set with itself.\nfunc (descs *RouterDescriptors) Merge(objs ObjectSet) {\n\n\tfor desc := range descs.Iterate(nil) {\n\t\tfpr := desc.GetFingerprint()\n\t\t_, exists := descs.Get(fpr)\n\t\tif !exists {\n\t\t\tdescs.Set(fpr, desc.(*RouterDescriptor))\n\t\t}\n\t}\n}\n\n\/\/ NewRouterDescriptors serves as a constructor and returns a pointer to a\n\/\/ freshly allocated and empty RouterDescriptors struct.\nfunc NewRouterDescriptors() *RouterDescriptors {\n\n\treturn &RouterDescriptors{RouterDescriptors: make(map[Fingerprint]GetDescriptor)}\n}\n\n\/\/ NewRouterDescriptor serves as a constructor and returns a pointer to a\n\/\/ freshly allocated and empty RouterDescriptor struct.\nfunc NewRouterDescriptor() *RouterDescriptor {\n\n\treturn &RouterDescriptor{Family: make(map[Fingerprint]bool)}\n}\n\n\/\/ ToSlice converts the given router descriptors to a slice.\nfunc (rd *RouterDescriptors) ToSlice() []GetDescriptor {\n\n\tlength := rd.Length()\n\tdescs := make([]GetDescriptor, length)\n\n\ti := 0\n\tfor _, getDesc := range rd.RouterDescriptors {\n\t\tdescs[i] = getDesc\n\t\ti += 1\n\t}\n\n\treturn descs\n}\n\n\/\/ Get returns the router descriptor for the given fingerprint and a boolean\n\/\/ value indicating if the descriptor could be found.\nfunc (d *RouterDescriptors) Get(fingerprint Fingerprint) (*RouterDescriptor, bool) {\n\n\tgetDescriptor, exists := d.RouterDescriptors[SanitiseFingerprint(fingerprint)]\n\tif !exists {\n\t\treturn nil, exists\n\t}\n\n\treturn getDescriptor(), exists\n}\n\n\/\/ Set adds a new fingerprint mapping to a function returning the router\n\/\/ descriptor.\nfunc (d *RouterDescriptors) Set(fingerprint Fingerprint, descriptor *RouterDescriptor) {\n\n\td.RouterDescriptors[SanitiseFingerprint(fingerprint)] = func() *RouterDescriptor {\n\t\treturn descriptor\n\t}\n}\n\n\/\/ HasFamily returns true if the given relay identified by its fingerprint is\n\/\/ part of this relay's family.\nfunc (desc *RouterDescriptor) HasFamily(fingerprint Fingerprint) bool {\n\n\t_, ok := desc.Family[SanitiseFingerprint(fingerprint)]\n\treturn ok\n}\n\n\/\/ LazyParseRawDescriptor lazily parses a raw router descriptor (in string\n\/\/ format) and returns the descriptor's fingerprint, a function returning the\n\/\/ descriptor, and an error if the descriptor could not be parsed. Parsing is\n\/\/ delayed until the router descriptor is accessed.\nfunc LazyParseRawDescriptor(rawDescriptor string) (Fingerprint, GetDescriptor, error) {\n\n\tvar fingerprint Fingerprint\n\n\t\/\/ Delay parsing of the router descriptor until this function is executed.\n\tgetDescriptor := func() *RouterDescriptor {\n\t\t_, f, _ := ParseRawDescriptor(rawDescriptor)\n\t\treturn f()\n\t}\n\n\t\/\/ Only pull out the fingerprint.\n\tlines := strings.Split(rawDescriptor, \"\\n\")\n\tfor _, line := range lines {\n\t\twords := strings.Split(line, \" \")\n\t\tif words[0] == \"opt\" {\n\t\t\twords = words[1:]\n\t\t}\n\n\t\tif words[0] == \"fingerprint\" {\n\t\t\tfingerprint = Fingerprint(strings.Join(words[1:], \"\"))\n\t\t\treturn SanitiseFingerprint(fingerprint), getDescriptor, nil\n\t\t}\n\t}\n\n\treturn \"\", nil, fmt.Errorf(\"Could not extract descriptor fingerprint.\")\n}\n\n\/\/ ParseRawDescriptor parses a raw router descriptor (in string format) and\n\/\/ returns the descriptor's fingerprint, a function returning the descriptor,\n\/\/ and an error if the descriptor could not be parsed. In contrast to\n\/\/ LazyParseRawDescriptor, parsing is *not* delayed.\nfunc ParseRawDescriptor(rawDescriptor string) (Fingerprint, GetDescriptor, error) {\n\n\tvar descriptor *RouterDescriptor = NewRouterDescriptor()\n\n\tlines := strings.Split(rawDescriptor, \"\\n\")\n\n\t\/\/ Go over raw descriptor line by line and extract the fields we are\n\t\/\/ interested in.\n\tfor _, line := range lines {\n\n\t\twords := strings.Split(line, \" \")\n\n\t\t\/\/ Ignore lines starting with \"opt\".\n\t\tif words[0] == \"opt\" {\n\t\t\twords = words[1:]\n\t\t}\n\n\t\tswitch words[0] {\n\n\t\tcase \"router\":\n\t\t\tdescriptor.Nickname = words[1]\n\t\t\tdescriptor.Address = net.ParseIP(words[2])\n\t\t\tdescriptor.ORPort = StringToPort(words[3])\n\t\t\tdescriptor.SOCKSPort = StringToPort(words[4])\n\t\t\tdescriptor.DirPort = StringToPort(words[5])\n\n\t\tcase \"platform\":\n\t\t\tfor i := 0; i < len(words); i++ {\n\t\t\t\tif (strings.TrimSpace(words[i]) == \"on\") && (i < len(words)-1) {\n\t\t\t\t\tdescriptor.OperatingSystem = strings.Join(words[i+1:], \" \")\n\t\t\t\t\tdescriptor.TorVersion = strings.Join(words[1:i-1], \" \")\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase \"uptime\":\n\t\t\tdescriptor.Uptime, _ = strconv.ParseUint(words[1], 10, 64)\n\n\t\tcase \"published\":\n\t\t\ttime, _ := time.Parse(publishedTimeLayout, strings.Join(words[1:], \" \"))\n\t\t\tdescriptor.Published = time\n\n\t\tcase \"fingerprint\":\n\t\t\tdescriptor.Fingerprint = SanitiseFingerprint(Fingerprint(strings.Join(words[1:], \"\")))\n\n\t\tcase \"hibernating\":\n\t\t\tdescriptor.Hibernating, _ = strconv.ParseBool(words[1])\n\n\t\tcase \"bandwidth\":\n\t\t\tdescriptor.BandwidthAvg, _ = strconv.ParseUint(words[1], 10, 64)\n\t\t\tdescriptor.BandwidthBurst, _ = strconv.ParseUint(words[2], 10, 64)\n\t\t\tdescriptor.BandwidthObs, _ = strconv.ParseUint(words[3], 10, 64)\n\n\t\tcase \"family\":\n\t\t\tfor _, word := range words[1:] {\n\t\t\t\tfpr := Fingerprint(strings.Trim(word, \"$\"))\n\t\t\t\tdescriptor.Family[fpr] = true\n\t\t\t}\n\n\t\tcase \"contact\":\n\t\t\tdescriptor.Contact = strings.Join(words[1:], \" \")\n\n\t\tcase \"hidden-service-dir\":\n\t\t\tdescriptor.HiddenServiceDir = true\n\n\t\tcase \"reject\":\n\t\t\tdescriptor.RawReject += words[1] + \" \"\n\t\t\tdescriptor.RawExitPolicy += words[0] + \" \" + words[1] + \"\\n\"\n\n\t\tcase \"accept\":\n\t\t\tdescriptor.RawAccept += words[1] + \" \"\n\t\t\tdescriptor.RawExitPolicy += words[0] + \" \" + words[1] + \"\\n\"\n\t\t}\n\t}\n\n\treturn descriptor.Fingerprint, func() *RouterDescriptor { return descriptor }, nil\n}\n\n\/\/ extractDescriptor is a bufio.SplitFunc that extracts individual router\n\/\/ descriptors.\nfunc extractDescriptor(data []byte, atEOF bool) (advance int, token []byte, err error) {\n\n\tif atEOF && len(data) == 0 {\n\t\treturn 0, nil, nil\n\t}\n\n\tstart := 0\n\tif !bytes.HasPrefix(data, []byte(\"router \")) {\n\t\tstart = bytes.Index(data, []byte(\"\\nrouter \"))\n\t\tif start < 0 {\n\t\t\tif atEOF {\n\t\t\t\treturn 0, nil, fmt.Errorf(\"Cannot find beginning of descriptor: \\\"\\\\nrouter \\\"\")\n\t\t\t}\n\t\t\t\/\/ Request more data.\n\t\t\treturn 0, nil, nil\n\t\t}\n\t\tstart += 1\n\t}\n\n\tmarker := []byte(\"\\n-----END SIGNATURE-----\\n\")\n\tend := bytes.Index(data[start:], marker)\n\tif end >= 0 {\n\t\treturn start + end + len(marker), data[start : start+end+len(marker)], nil\n\t}\n\tif atEOF {\n\t\treturn start, nil, fmt.Errorf(\"Cannot find end of descriptor: %q\", marker)\n\t}\n\t\/\/ Request more data.\n\treturn start, nil, nil\n}\n\n\/\/ MatchesRouterDescriptor returns true if fields of the given router\n\/\/ descriptor are present in the object filter, e.g., the descriptor's nickname\n\/\/ is part of the object filter.\nfunc (filter *ObjectFilter) MatchesRouterDescriptor(desc *RouterDescriptor) bool {\n\n\tif filter.HasIPAddr(desc.Address) {\n\t\treturn true\n\t}\n\n\tif filter.HasFingerprint(desc.Fingerprint) {\n\t\treturn true\n\t}\n\n\tif filter.HasNickname(desc.Nickname) {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n\/\/ parseDescriptorUnchecked parses a descriptor of type \"server-descriptor\".\n\/\/ The input should be without a type annotation; i.e., the type annotation\n\/\/ should already have been read and checked to be the correct type. The\n\/\/ function returns a pointer to RouterDescriptors containing the router\n\/\/ descriptors. If there were any errors, an error string is returned. If the\n\/\/ lazy argument is set to true, parsing of the router descriptors is delayed\n\/\/ until they are accessed.\nfunc parseDescriptorUnchecked(r io.Reader, lazy bool) (*RouterDescriptors, error) {\n\n\tvar descriptors = NewRouterDescriptors()\n\tvar descriptorParser func(descriptor string) (Fingerprint, GetDescriptor, error)\n\n\tif lazy {\n\t\tdescriptorParser = LazyParseRawDescriptor\n\t} else {\n\t\tdescriptorParser = ParseRawDescriptor\n\t}\n\n\t\/\/ We will read raw router descriptors from this channel.\n\tqueue := make(chan QueueUnit)\n\tgo DissectFile(r, extractDescriptor, queue)\n\n\t\/\/ Parse incoming descriptors until the channel is closed by the remote\n\t\/\/ end.\n\tfor unit := range queue {\n\t\tif unit.Err != nil {\n\t\t\treturn nil, unit.Err\n\t\t}\n\n\t\tfingerprint, getDescriptor, err := descriptorParser(unit.Blurb)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tdescriptors.RouterDescriptors[SanitiseFingerprint(fingerprint)] = getDescriptor\n\t}\n\n\treturn descriptors, nil\n}\n\n\/\/ parseDescriptor is a wrapper around parseDescriptorUnchecked that first reads\n\/\/ and checks the type annotation to make sure it belongs to\n\/\/ descriptorAnnotations.\nfunc parseDescriptor(r io.Reader, lazy bool) (*RouterDescriptors, error) {\n\n\tr, err := readAndCheckAnnotation(r, descriptorAnnotations)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn parseDescriptorUnchecked(r, lazy)\n}\n\n\/\/ parseDescriptorFile is a wrapper around parseDescriptor that opens the named\n\/\/ file for parsing.\nfunc parseDescriptorFile(fileName string, lazy bool) (*RouterDescriptors, error) {\n\n\tfd, err := os.Open(fileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer fd.Close()\n\n\treturn parseDescriptor(fd, lazy)\n}\n\n\/\/ LazilyParseDescriptorFile parses the given file and returns a pointer to\n\/\/ RouterDescriptors containing the router descriptors. If there were any\n\/\/ errors, an error string is returned. Note that parsing is done lazily which\n\/\/ means that it is delayed until a given router descriptor is accessed. That\n\/\/ pays off when you know that you will not parse most router descriptors.\nfunc LazilyParseDescriptorFile(fileName string) (*RouterDescriptors, error) {\n\n\treturn parseDescriptorFile(fileName, true)\n}\n\n\/\/ ParseDescriptorFile parses the given file and returns a pointer to\n\/\/ RouterDescriptors containing the router descriptors. If there were any\n\/\/ errors, an error string is returned. Note that in contrast to\n\/\/ LazilyParseDescriptorFile, parsing is *not* delayed. That pays off when you\n\/\/ know that you will parse most router descriptors.\nfunc ParseDescriptorFile(fileName string) (*RouterDescriptors, error) {\n\n\treturn parseDescriptorFile(fileName, false)\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2019 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Binary itest runs some code against Datastore.\npackage main\n\nimport (\n\t\"context\"\n\t\"flag\"\n\n\tglog \"github.com\/golang\/glog\" \/* copybara-comment *\/\n\t\"cloud.google.com\/go\/datastore\" \/* copybara-comment: datastore *\/\n\t\"google.golang.org\/grpc\/codes\" \/* copybara-comment *\/\n\t\"google.golang.org\/grpc\/status\" \/* copybara-comment *\/\n\t\"github.com\/GoogleCloudPlatform\/healthcare-federated-access-services\/lib\/dsstore\" \/* copybara-comment: dsstore *\/\n\t\"github.com\/GoogleCloudPlatform\/healthcare-federated-access-services\/lib\/storage\" \/* copybara-comment: storage *\/\n\n\tdpb \"github.com\/golang\/protobuf\/ptypes\/duration\" \/* copybara-comment *\/\n)\n\nvar (\n\tfakeProjectID = flag.String(\"project_id\", \"fake-project-id\", \"\")\n\tfakeServiceName = \"fake-service-name\"\n\tfakeConfigPath = \"fake-config-path\"\n)\n\nfunc main() {\n\tctx := context.Background()\n\tflag.Parse()\n\n\tc, err := datastore.NewClient(ctx, *fakeProjectID)\n\tif err != nil {\n\t\tglog.Exitf(\"datastore.NewClient(...) failed: %v\", err)\n\t}\n\n\ts := dsstore.New(c, *fakeProjectID, fakeServiceName, fakeConfigPath)\n\n\t{\n\t\t\/\/ Write\n\t\ttx, err := s.Tx(true)\n\t\tif err != nil {\n\t\t\tglog.Exitf(\"store.Tx(true) failed: %v\", err)\n\t\t}\n\t\tentity := &dpb.Duration{Seconds: 60}\n\t\tif err := s.WriteTx(\"fake-datatype\", \"fake-realm\", \"fake-user\", \"fake-id\", storage.LatestRev, entity, nil, tx); err != nil {\n\t\t\tglog.Exitf(\"store.WriteTx(...) failed: %v\", err)\n\t\t}\n\t\tif err := tx.Finish(); err != nil {\n\t\t\tglog.Exitf(\"tx.Finish() failed: %v\", err)\n\t\t}\n\t}\n\n\t{\n\t\t\/\/ Read to check it is created.\n\t\tgot := &dpb.Duration{}\n\t\tif err := s.Read(\"fake-datatype\", \"fake-realm\", \"fake-user\", \"fake-id\", storage.LatestRev, got); err != nil {\n\t\t\tglog.Exitf(\"store.Read(...) failed: %v\", err)\n\t\t}\n\t\twant := &dpb.Duration{Seconds: 60}\n\t\tif got.GetSeconds() != want.GetSeconds() {\n\t\t\tglog.Exitf(\"store.Read(...) = %v, want %v\", got, want)\n\t\t}\n\t}\n\n\t{\n\t\t\/\/ RMW\n\t\ttx, err := s.Tx(true)\n\t\tif err != nil {\n\t\t\tglog.Exitf(\"store.Tx(true) failed: %v\", err)\n\t\t}\n\t\tresp := &dpb.Duration{}\n\t\tif err := s.ReadTx(\"fake-datatype\", \"fake-realm\", \"fake-user\", \"fake-id\", storage.LatestRev, resp, tx); err != nil {\n\t\t\tglog.Exitf(\"store.ReadTx(...) failed: %v\", err)\n\t\t}\n\n\t\tresp.Seconds = resp.Seconds + 60\n\n\t\tif err := s.WriteTx(\"fake-datatype\", \"fake-realm\", \"fake-user\", \"fake-id\", storage.LatestRev, resp, nil, tx); err != nil {\n\t\t\tglog.Exitf(\"store.WriteTx(...) failed: %v\", err)\n\t\t}\n\t\tif err := tx.Finish(); err != nil {\n\t\t\tglog.Exitf(\"tx.Finish() failed: %v\", err)\n\t\t}\n\t}\n\n\t{\n\t\t\/\/ Read to check it is updated.\n\t\tgot := &dpb.Duration{}\n\t\tif err := s.Read(\"fake-datatype\", \"fake-realm\", \"fake-user\", \"fake-id\", storage.LatestRev, got); err != nil {\n\t\t\tglog.Exitf(\"store.Read(...) failed: %v\", err)\n\t\t}\n\t\twant := &dpb.Duration{Seconds: 120}\n\t\tif got.GetSeconds() != want.GetSeconds() {\n\t\t\tglog.Exitf(\"store.Read(...) = %v, want %v\", got, want)\n\t\t}\n\t}\n\n\t{\n\t\t\/\/ Delete\n\t\ttx, err := s.Tx(true)\n\t\tif err != nil {\n\t\t\tglog.Exitf(\"store.Tx(true) failed: %v\", err)\n\t\t}\n\t\tif err := s.DeleteTx(\"fake-datatype\", \"fake-realm\", \"fake-user\", \"fake-id\", storage.LatestRev, tx); err != nil {\n\t\t\tglog.Exitf(\"store.DeleteTx(...) failed: %v\", err)\n\t\t}\n\t\tif err := tx.Finish(); err != nil {\n\t\t\tglog.Exitf(\"tx.Finish() failed: %v\", err)\n\t\t}\n\t}\n\n\t{\n\t\t\/\/ Read to check it is deleted.\n\t\tif err := s.Read(\"fake-datatype\", \"fake-realm\", \"fake-user\", \"fake-id\", storage.LatestRev, &dpb.Duration{}); status.Code(err) != codes.NotFound {\n\t\t\tglog.Exitf(\"store.Read(...) = %v, want error with code %v\", err, codes.NotFound)\n\t\t}\n\t}\n\n}\n<commit_msg>add more tests for storage transactions<commit_after>\/\/ Copyright 2019 Google LLC\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\n\/\/ Binary itest runs some code against Datastore.\npackage main\n\nimport (\n\t\"context\"\n\t\"flag\"\n\t\"fmt\"\n\n\tglog \"github.com\/golang\/glog\" \/* copybara-comment *\/\n\t\"cloud.google.com\/go\/datastore\" \/* copybara-comment: datastore *\/\n\t\"google.golang.org\/grpc\/codes\" \/* copybara-comment *\/\n\t\"google.golang.org\/grpc\/status\" \/* copybara-comment *\/\n\t\"github.com\/GoogleCloudPlatform\/healthcare-federated-access-services\/lib\/dsstore\" \/* copybara-comment: dsstore *\/\n\t\"github.com\/GoogleCloudPlatform\/healthcare-federated-access-services\/lib\/storage\" \/* copybara-comment: storage *\/\n\n\tdpb \"github.com\/golang\/protobuf\/ptypes\/duration\" \/* copybara-comment *\/\n)\n\nvar (\n\tfakeProjectID = flag.String(\"project_id\", \"fake-project-id\", \"\")\n\tfakeServiceName = \"fake-service-name\"\n\tfakeConfigPath = \"fake-config-path\"\n\tfakeDataType = \"fake-datatype\"\n\tfakeRealm = \"fake-realm\"\n\tfakeUser = \"fake-user\"\n)\n\nfunc main() {\n\tctx := context.Background()\n\tflag.Parse()\n\n\tc, err := datastore.NewClient(ctx, *fakeProjectID)\n\tif err != nil {\n\t\tglog.Exitf(\"datastore.NewClient(...) failed: %v\", err)\n\t}\n\n\tscenarioSimple(ctx, c)\n\tscenarioTransactionsConflictingLinearizable(ctx, c)\n\tscenarioTransactionsConflictingNonLinearizable(ctx, c)\n\tscenarioTransactionsReadAfterWrite(ctx, c)\n\n\tfmt.Println(\"All tests passed.\")\n}\n\nfunc scenarioSimple(ctx context.Context, c *datastore.Client) {\n\t\/\/ Scenario: write, read-check, read-modify-write, read-check, delete, read-check\n\tid := \"fake-id-simple\"\n\ts := dsstore.New(c, *fakeProjectID, fakeServiceName, fakeConfigPath)\n\n\t{\n\t\t\/\/ Write\n\t\ttx, err := s.Tx(true)\n\t\tif err != nil {\n\t\t\tglog.Exitf(\"store.Tx(true) failed: %v\", err)\n\t\t}\n\t\tentity := &dpb.Duration{Seconds: 60}\n\t\tif err := s.WriteTx(fakeDataType, fakeRealm, fakeUser, id, storage.LatestRev, entity, nil, tx); err != nil {\n\t\t\tglog.Exitf(\"store.WriteTx(...) failed: %v\", err)\n\t\t}\n\t\tif err := tx.Finish(); err != nil {\n\t\t\tglog.Exitf(\"tx.Finish() failed: %v\", err)\n\t\t}\n\t}\n\n\t{\n\t\t\/\/ Read to check it is created.\n\t\tgot := &dpb.Duration{}\n\t\tif err := s.Read(fakeDataType, fakeRealm, fakeUser, id, storage.LatestRev, got); err != nil {\n\t\t\tglog.Exitf(\"store.Read(...) failed: %v\", err)\n\t\t}\n\t\twant := &dpb.Duration{Seconds: 60}\n\t\tif got.GetSeconds() != want.GetSeconds() {\n\t\t\tglog.Exitf(\"store.Read(...) = %v, want %v\", got, want)\n\t\t}\n\t}\n\n\t{\n\t\t\/\/ RMW\n\t\ttx, err := s.Tx(true)\n\t\tif err != nil {\n\t\t\tglog.Exitf(\"store.Tx(true) failed: %v\", err)\n\t\t}\n\t\tresp := &dpb.Duration{}\n\t\tif err := s.ReadTx(fakeDataType, fakeRealm, fakeUser, id, storage.LatestRev, resp, tx); err != nil {\n\t\t\tglog.Exitf(\"store.ReadTx(...) failed: %v\", err)\n\t\t}\n\n\t\tresp.Seconds = resp.Seconds + 60\n\n\t\tif err := s.WriteTx(fakeDataType, fakeRealm, fakeUser, id, storage.LatestRev, resp, nil, tx); err != nil {\n\t\t\tglog.Exitf(\"store.WriteTx(...) failed: %v\", err)\n\t\t}\n\t\tif err := tx.Finish(); err != nil {\n\t\t\tglog.Exitf(\"tx.Finish() failed: %v\", err)\n\t\t}\n\t}\n\n\t{\n\t\t\/\/ Read to check it is updated.\n\t\tgot := &dpb.Duration{}\n\t\tif err := s.Read(fakeDataType, fakeRealm, fakeUser, id, storage.LatestRev, got); err != nil {\n\t\t\tglog.Exitf(\"store.Read(...) failed: %v\", err)\n\t\t}\n\t\twant := &dpb.Duration{Seconds: 120}\n\t\tif got.GetSeconds() != want.GetSeconds() {\n\t\t\tglog.Exitf(\"store.Read(...) = %v, want %v\", got, want)\n\t\t}\n\t}\n\n\t{\n\t\t\/\/ Delete\n\t\ttx, err := s.Tx(true)\n\t\tif err != nil {\n\t\t\tglog.Exitf(\"store.Tx(true) failed: %v\", err)\n\t\t}\n\t\tif err := s.DeleteTx(fakeDataType, fakeRealm, fakeUser, id, storage.LatestRev, tx); err != nil {\n\t\t\tglog.Exitf(\"store.DeleteTx(...) failed: %v\", err)\n\t\t}\n\t\tif err := tx.Finish(); err != nil {\n\t\t\tglog.Exitf(\"tx.Finish() failed: %v\", err)\n\t\t}\n\t}\n\n\t{\n\t\t\/\/ Read to check it is deleted.\n\t\tif err := s.Read(fakeDataType, fakeRealm, fakeUser, id, storage.LatestRev, &dpb.Duration{}); status.Code(err) != codes.NotFound {\n\t\t\tglog.Exitf(\"store.Read(...) = %v, want error with code %v\", err, codes.NotFound)\n\t\t}\n\t}\n}\n\nfunc scenarioTransactionsConflictingLinearizable(ctx context.Context, c *datastore.Client) {\n\t\/\/ Scenario: two concurrent write transactions, the second to commit prevails.\n\tid := \"fake-id-TransactionsConflictingLinearizable\"\n\ts := dsstore.New(c, *fakeProjectID, fakeServiceName, fakeConfigPath)\n\n\ttx1, err := s.Tx(true)\n\tif err != nil {\n\t\tglog.Exitf(\"store.Tx(true) failed: %v\", err)\n\t}\n\ttx2, err := s.Tx(true)\n\tif err != nil {\n\t\tglog.Exitf(\"store.Tx(true) failed: %v\", err)\n\t}\n\n\tif err := s.WriteTx(fakeDataType, fakeRealm, fakeUser, id, storage.LatestRev, &dpb.Duration{Seconds: 1}, nil, tx1); err != nil {\n\t\tglog.Exitf(\"store.WriteTx(...) failed: %v\", err)\n\t}\n\n\tif err := s.WriteTx(fakeDataType, fakeRealm, fakeUser, id, storage.LatestRev, &dpb.Duration{Seconds: 2}, nil, tx2); err != nil {\n\t\tglog.Exitf(\"store.WriteTx(...) failed: %v\", err)\n\t}\n\n\tif err := tx1.Finish(); err != nil {\n\t\tglog.Exitf(\"tx.Finish() failed: %v\", err)\n\t}\n\tif err := tx2.Finish(); err != nil {\n\t\tglog.Exitf(\"tx.Finish() failed: %v\", err)\n\t}\n\n\tgot := &dpb.Duration{}\n\tif err := s.Read(fakeDataType, fakeRealm, fakeUser, id, storage.LatestRev, got); err != nil {\n\t\tglog.Exitf(\"store.Read(...) failed: %v\", err)\n\t}\n\twant := &dpb.Duration{Seconds: 2}\n\tif got.GetSeconds() != want.GetSeconds() {\n\t\tglog.Exitf(\"store.Read(...) = %v, want %v\", got, want)\n\t}\n}\n\nfunc scenarioTransactionsConflictingNonLinearizable(ctx context.Context, c *datastore.Client) {\n\t\/\/ Scenario: two concurrent RMW transactions, the second to commit fails.\n\tid := \"fake-id-TransactionsConflictingNonLinearizable\"\n\ts := dsstore.New(c, *fakeProjectID, fakeServiceName, fakeConfigPath)\n\n\ttx1, err := s.Tx(true)\n\tif err != nil {\n\t\tglog.Exitf(\"store.Tx(true) failed: %v\", err)\n\t}\n\ttx2, err := s.Tx(true)\n\tif err != nil {\n\t\tglog.Exitf(\"store.Tx(true) failed: %v\", err)\n\t}\n\n\t\/\/ Read so the transactions cannot be linearized.\n\ts.ReadTx(fakeDataType, fakeRealm, fakeUser, id, storage.LatestRev, &dpb.Duration{}, tx1)\n\tif err := s.WriteTx(fakeDataType, fakeRealm, fakeUser, id, storage.LatestRev, &dpb.Duration{Seconds: 1}, nil, tx1); err != nil {\n\t\tglog.Exitf(\"store.WriteTx(...) failed: %v\", err)\n\t}\n\n\t\/\/ Read so the transactions cannot be linearized.\n\ts.ReadTx(fakeDataType, fakeRealm, fakeUser, id, storage.LatestRev, &dpb.Duration{}, tx2)\n\tif err := s.WriteTx(fakeDataType, fakeRealm, fakeUser, id, storage.LatestRev, &dpb.Duration{Seconds: 2}, nil, tx2); err != nil {\n\t\tglog.Exitf(\"store.WriteTx(...) failed: %v\", err)\n\t}\n\n\tif err := tx1.Finish(); err != nil {\n\t\tglog.Exitf(\"tx.Finish() failed: %v\", err)\n\t}\n\tif err := tx2.Finish(); err != datastore.ErrConcurrentTransaction {\n\t\tglog.Exitf(\"tx.Finish() failed: %v\", err)\n\t}\n\n\tgot := &dpb.Duration{}\n\tif err := s.Read(fakeDataType, fakeRealm, fakeUser, id, storage.LatestRev, got); err != nil {\n\t\tglog.Exitf(\"store.Read(...) failed: %v\", err)\n\t}\n\twant := &dpb.Duration{Seconds: 1}\n\tif got.GetSeconds() != want.GetSeconds() {\n\t\tglog.Exitf(\"store.Read(...) = %v, want %v\", got, want)\n\t}\n}\n\nfunc scenarioTransactionsReadAfterWrite(ctx context.Context, c *datastore.Client) {\n\t\/\/ Scenario: one transaction, write followed by read, read doesn't see the write.\n\tid := \"fake-id-TransactionsReadAfterWrite\"\n\ts := dsstore.New(c, *fakeProjectID, fakeServiceName, fakeConfigPath)\n\n\t{\n\t\tif err := s.Write(fakeDataType, fakeRealm, fakeUser, id, storage.LatestRev, &dpb.Duration{Seconds: 60}, nil); err != nil {\n\t\t\tglog.Exitf(\"store.WriteTx(...) failed: %v\", err)\n\t\t}\n\t}\n\n\t{\n\t\ttx, err := s.Tx(true)\n\t\tif err != nil {\n\t\t\tglog.Exitf(\"store.Tx(true) failed: %v\", err)\n\t\t}\n\n\t\te := &dpb.Duration{Seconds: 120}\n\t\tif err := s.WriteTx(fakeDataType, fakeRealm, fakeUser, id, storage.LatestRev, e, nil, tx); err != nil {\n\t\t\tglog.Exitf(\"store.WriteTx(...) failed: %v\", err)\n\t\t}\n\n\t\tgot := &dpb.Duration{}\n\t\tif err := s.ReadTx(fakeDataType, fakeRealm, fakeUser, id, storage.LatestRev, got, tx); err != nil {\n\t\t\tglog.Exitf(\"store.ReadTx(...) failed: %v\", err)\n\t\t}\n\t\twant := &dpb.Duration{Seconds: 60}\n\t\tif got.GetSeconds() != want.GetSeconds() {\n\t\t\tglog.Exitf(\"store.ReadTx(...) = %v, want %v\", got, want)\n\t\t}\n\n\t\tif err := tx.Finish(); err != nil {\n\t\t\tglog.Exitf(\"tx.Finish() failed: %v\", err)\n\t\t}\n\t}\n\n\t{\n\t\tgot := &dpb.Duration{}\n\t\tif err := s.Read(fakeDataType, fakeRealm, fakeUser, id, storage.LatestRev, got); err != nil {\n\t\t\tglog.Exitf(\"store.Read(...) failed: %v\", err)\n\t\t}\n\t\twant := &dpb.Duration{Seconds: 120}\n\t\tif got.GetSeconds() != want.GetSeconds() {\n\t\t\tglog.Exitf(\"store.Read(...) = %v, want %v\", got, want)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>\/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/*\nThis example program shows how the `finder` and `property` packages can\nbe used to navigate a vSphere inventory structure using govmomi.\n*\/\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/vmware\/govmomi\"\n\t\"github.com\/vmware\/govmomi\/find\"\n\t\"github.com\/vmware\/govmomi\/property\"\n\t\"github.com\/vmware\/govmomi\/vim25\/mo\"\n\t\"github.com\/vmware\/govmomi\/vim25\/types\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ GetEnvString returns string from environment variable.\nfunc GetEnvString(v string, def string) string {\n\tr := os.Getenv(v)\n\tif r == \"\" {\n\t\treturn def\n\t}\n\n\treturn r\n}\n\n\/\/ GetEnvBool returns boolean from environment variable.\nfunc GetEnvBool(v string, def bool) bool {\n\tr := os.Getenv(v)\n\tif r == \"\" {\n\t\treturn def\n\t}\n\n\tswitch strings.ToLower(r[0:1]) {\n\tcase \"t\", \"y\", \"1\":\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n\/\/ Humanize converts a number in bytes to a more readable format.\nfunc Humanize(v int64) string {\n\tconst KB = 1024\n\tconst MB = 1024 * KB\n\tconst GB = 1024 * MB\n\tconst TB = 1024 * GB\n\tconst PB = 1024 * TB\n\n\tswitch {\n\tcase v < KB:\n\t\treturn fmt.Sprintf(\"%dB\", v)\n\tcase v < MB:\n\t\treturn fmt.Sprintf(\"%.1fKB\", float32(v)\/KB)\n\tcase v < GB:\n\t\treturn fmt.Sprintf(\"%.1fMB\", float32(v)\/MB)\n\tcase v < TB:\n\t\treturn fmt.Sprintf(\"%.1fGB\", float32(v)\/GB)\n\tcase v < PB:\n\t\treturn fmt.Sprintf(\"%.1fTB\", float32(v)\/TB)\n\tdefault:\n\t\treturn \"a lot\"\n\t}\n}\n\nconst (\n\tenvURL = \"GOVMOMI_URL\"\n\tenvUserName = \"GOVMOMI_USERNAME\"\n\tenvPassword = \"GOVMOMI_PASSWORD\"\n\tenvInsecure = \"GOVMOMI_INSECURE\"\n)\n\nvar urlDescription = fmt.Sprintf(\"ESX or vCenter URL [%s]\", envURL)\nvar urlFlag = flag.String(\"url\", GetEnvString(envURL, \"https:\/\/username:password@host\/sdk\"), urlDescription)\n\nvar insecureDescription = fmt.Sprintf(\"Don't verify the server's certificate chain [%s]\", envInsecure)\nvar insecureFlag = flag.Bool(\"insecure\", GetEnvBool(envInsecure, false), insecureDescription)\n\nfunc processOverride(u *url.URL) {\n\tenvUsername := os.Getenv(envUserName)\n\tenvPassword := os.Getenv(envPassword)\n\n\t\/\/ Override username if provided\n\tif envUsername != \"\" {\n\t\tvar password string\n\t\tvar ok bool\n\n\t\tif u.User != nil {\n\t\t\tpassword, ok = u.User.Password()\n\t\t}\n\n\t\tif ok {\n\t\t\tu.User = url.UserPassword(envUsername, password)\n\t\t} else {\n\t\t\tu.User = url.User(envUsername)\n\t\t}\n\t}\n\n\t\/\/ Override password if provided\n\tif envPassword != \"\" {\n\t\tvar username string\n\n\t\tif u.User != nil {\n\t\t\tusername = u.User.Username()\n\t\t}\n\n\t\tu.User = url.UserPassword(username, envPassword)\n\t}\n}\n\nfunc exit(err error) {\n\tfmt.Fprintf(os.Stderr, \"Error: %s\\n\", err)\n\tos.Exit(1)\n}\n\nfunc main() {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tflag.Parse()\n\n\t\/\/ Parse URL from string\n\tu, err := url.Parse(*urlFlag)\n\tif err != nil {\n\t\texit(err)\n\t}\n\n\t\/\/ Override username and\/or password as required\n\tprocessOverride(u)\n\n\t\/\/ Connect and log in to ESX or vCenter\n\tc, err := govmomi.NewClient(ctx, u, *insecureFlag)\n\tif err != nil {\n\t\texit(err)\n\t}\n\n\tf := find.NewFinder(c.Client, true)\n\n\t\/\/ Find one and only datacenter\n\tdc, err := f.DefaultDatacenter(ctx)\n\tif err != nil {\n\t\texit(err)\n\t}\n\n\t\/\/ Make future calls local to this datacenter\n\tf.SetDatacenter(dc)\n\n\t\/\/ Find datastores in datacenter\n\tdss, err := f.DatastoreList(ctx, \"*\")\n\tif err != nil {\n\t\texit(err)\n\t}\n\n\tpc := property.DefaultCollector(c.Client)\n\n\t\/\/ Convert datastores into list of references\n\tvar refs []types.ManagedObjectReference\n\tfor _, ds := range dss {\n\t\trefs = append(refs, ds.Reference())\n\t}\n\n\t\/\/ Retrieve summary property for all datastores\n\tvar dst []mo.Datastore\n\terr = pc.Retrieve(ctx, refs, []string{\"summary\"}, &dst)\n\tif err != nil {\n\t\texit(err)\n\t}\n\n\t\/\/ Print summary per datastore\n\ttw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0)\n\tfmt.Fprintf(tw, \"Name:\\tType:\\tCapacity:\\tFree:\\n\")\n\tfor _, ds := range dst {\n\t\tfmt.Fprintf(tw, \"%s\\t\", ds.Summary.Name)\n\t\tfmt.Fprintf(tw, \"%s\\t\", ds.Summary.Type)\n\t\tfmt.Fprintf(tw, \"%s\\t\", Humanize(ds.Summary.Capacity))\n\t\tfmt.Fprintf(tw, \"%s\\t\", Humanize(ds.Summary.FreeSpace))\n\t\tfmt.Fprintf(tw, \"\\n\")\n\t}\n\ttw.Flush()\n}\n<commit_msg>Misc clean up<commit_after>\/*\nCopyright (c) 2015 VMware, Inc. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\n\/*\nThis example program shows how the `finder` and `property` packages can\nbe used to navigate a vSphere inventory structure using govmomi.\n*\/\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"net\/url\"\n\t\"os\"\n\t\"strings\"\n\t\"text\/tabwriter\"\n\n\t\"github.com\/vmware\/govmomi\"\n\t\"github.com\/vmware\/govmomi\/find\"\n\t\"github.com\/vmware\/govmomi\/property\"\n\t\"github.com\/vmware\/govmomi\/vim25\/mo\"\n\t\"github.com\/vmware\/govmomi\/vim25\/types\"\n\t\"golang.org\/x\/net\/context\"\n)\n\n\/\/ GetEnvString returns string from environment variable.\nfunc GetEnvString(v string, def string) string {\n\tr := os.Getenv(v)\n\tif r == \"\" {\n\t\treturn def\n\t}\n\n\treturn r\n}\n\n\/\/ GetEnvBool returns boolean from environment variable.\nfunc GetEnvBool(v string, def bool) bool {\n\tr := os.Getenv(v)\n\tif r == \"\" {\n\t\treturn def\n\t}\n\n\tswitch strings.ToLower(r[0:1]) {\n\tcase \"t\", \"y\", \"1\":\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n\/\/ Humanize converts a number in bytes to a more readable format.\nfunc Humanize(v int64) string {\n\n\tconst (\n\t\t_ = iota\n\t\tKB = 1 << (10 * iota)\n\t\tMB\n\t\tGB\n\t\tTB\n\t\tPB\n\t)\n\n\tswitch {\n\tcase v < KB:\n\t\treturn fmt.Sprintf(\"%dB\", v)\n\tcase v < MB:\n\t\treturn fmt.Sprintf(\"%.1fKB\", float32(v)\/KB)\n\tcase v < GB:\n\t\treturn fmt.Sprintf(\"%.1fMB\", float32(v)\/MB)\n\tcase v < TB:\n\t\treturn fmt.Sprintf(\"%.1fGB\", float32(v)\/GB)\n\tcase v < PB:\n\t\treturn fmt.Sprintf(\"%.1fTB\", float32(v)\/TB)\n\tdefault:\n\t\treturn \"a lot\"\n\t}\n}\n\nconst (\n\tenvURL = \"GOVMOMI_URL\"\n\tenvUserName = \"GOVMOMI_USERNAME\"\n\tenvPassword = \"GOVMOMI_PASSWORD\"\n\tenvInsecure = \"GOVMOMI_INSECURE\"\n)\n\nvar urlDescription = fmt.Sprintf(\"ESX or vCenter URL [%s]\", envURL)\nvar urlFlag = flag.String(\"url\", GetEnvString(envURL, \"https:\/\/username:password@host\/sdk\"), urlDescription)\n\nvar insecureDescription = fmt.Sprintf(\"Don't verify the server's certificate chain [%s]\", envInsecure)\nvar insecureFlag = flag.Bool(\"insecure\", GetEnvBool(envInsecure, false), insecureDescription)\n\nfunc processOverride(u *url.URL) {\n\tenvUsername := os.Getenv(envUserName)\n\tenvPassword := os.Getenv(envPassword)\n\n\t\/\/ Override username if provided\n\tif envUsername != \"\" {\n\t\tvar password string\n\t\tvar ok bool\n\n\t\tif u.User != nil {\n\t\t\tpassword, ok = u.User.Password()\n\t\t}\n\n\t\tif ok {\n\t\t\tu.User = url.UserPassword(envUsername, password)\n\t\t} else {\n\t\t\tu.User = url.User(envUsername)\n\t\t}\n\t}\n\n\t\/\/ Override password if provided\n\tif envPassword != \"\" {\n\t\tvar username string\n\n\t\tif u.User != nil {\n\t\t\tusername = u.User.Username()\n\t\t}\n\n\t\tu.User = url.UserPassword(username, envPassword)\n\t}\n}\n\nfunc exit(err error) {\n\tfmt.Fprintf(os.Stderr, \"Error: %s\\n\", err)\n\tos.Exit(1)\n}\n\nfunc main() {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tflag.Parse()\n\n\t\/\/ Parse URL from string\n\tu, err := url.Parse(*urlFlag)\n\tif err != nil {\n\t\texit(err)\n\t}\n\n\t\/\/ Override username and\/or password as required\n\tprocessOverride(u)\n\n\t\/\/ Connect and log in to ESX or vCenter\n\tc, err := govmomi.NewClient(ctx, u, *insecureFlag)\n\tif err != nil {\n\t\texit(err)\n\t}\n\n\tf := find.NewFinder(c.Client, true)\n\n\t\/\/ Find one and only datacenter\n\tdc, err := f.DefaultDatacenter(ctx)\n\tif err != nil {\n\t\texit(err)\n\t}\n\n\t\/\/ Make future calls local to this datacenter\n\tf.SetDatacenter(dc)\n\n\t\/\/ Find datastores in datacenter\n\tdss, err := f.DatastoreList(ctx, \"*\")\n\tif err != nil {\n\t\texit(err)\n\t}\n\n\tpc := property.DefaultCollector(c.Client)\n\n\t\/\/ Convert datastores into list of references\n\tvar refs []types.ManagedObjectReference\n\tfor _, ds := range dss {\n\t\trefs = append(refs, ds.Reference())\n\t}\n\n\t\/\/ Retrieve summary property for all datastores\n\tvar dst []mo.Datastore\n\terr = pc.Retrieve(ctx, refs, []string{\"summary\"}, &dst)\n\tif err != nil {\n\t\texit(err)\n\t}\n\n\t\/\/ Print summary per datastore\n\ttw := tabwriter.NewWriter(os.Stdout, 2, 0, 2, ' ', 0)\n\tfmt.Fprintf(tw, \"Name:\\tType:\\tCapacity:\\tFree:\\n\")\n\tfor _, ds := range dst {\n\t\tfmt.Fprintf(tw, \"%s\\t\", ds.Summary.Name)\n\t\tfmt.Fprintf(tw, \"%s\\t\", ds.Summary.Type)\n\t\tfmt.Fprintf(tw, \"%s\\t\", Humanize(ds.Summary.Capacity))\n\t\tfmt.Fprintf(tw, \"%s\\t\", Humanize(ds.Summary.FreeSpace))\n\t\tfmt.Fprintf(tw, \"\\n\")\n\t}\n\ttw.Flush()\n}\n<|endoftext|>"} {"text":"<commit_before>package orders\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\tamazonpay \"github.com\/qor\/amazon-pay-sdk-go\"\n\t\"github.com\/qor\/qor-example\/config\"\n\t\"github.com\/qor\/qor-example\/utils\"\n\t\"github.com\/qor\/transition\"\n)\n\nvar (\n\t\/\/ OrderState order's state machine\n\tOrderState = transition.New(&Order{})\n\n\t\/\/ ItemState order item's state machine\n\tItemState = transition.New(&OrderItem{})\n)\n\nvar (\n\t\/\/ DraftState draft state\n\tDraftState = \"draft\"\n)\n\nfunc init() {\n\t\/\/ Define Order's States\n\tOrderState.Initial(\"draft\")\n\tOrderState.State(\"pending\")\n\tOrderState.State(\"open\")\n\tOrderState.State(\"cancelled\").Enter(func(value interface{}, tx *gorm.DB) error {\n\t\ttx.Model(value).UpdateColumn(\"cancelled_at\", time.Now())\n\t\treturn nil\n\t})\n\tOrderState.State(\"paid\").Enter(func(value interface{}, tx *gorm.DB) (err error) {\n\t\tvar orderItems []OrderItem\n\n\t\ttx.Model(value).Association(\"OrderItems\").Find(&orderItems)\n\t\tfor _, item := range orderItems {\n\t\t\tif err = ItemState.Trigger(\"pay\", &item, tx); err == nil {\n\t\t\t\tif err = tx.Select(\"state\").Save(&item).Error; err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ttx.Save(value)\n\t\t\/\/ freeze stock, change items's state\n\t\treturn nil\n\t})\n\tOrderState.State(\"paid_cancelled\").Enter(func(value interface{}, tx *gorm.DB) error {\n\t\t\/\/ do refund, release stock, change items's state\n\t\treturn nil\n\t})\n\tOrderState.State(\"processing\").Enter(func(value interface{}, tx *gorm.DB) (err error) {\n\t\tvar orderItems []OrderItem\n\t\ttx.Model(value).Association(\"OrderItems\").Find(&orderItems)\n\t\tfor _, item := range orderItems {\n\t\t\tif err = ItemState.Trigger(\"process\", &item, tx); err == nil {\n\t\t\t\tif err = tx.Select(\"state\").Save(&item).Error; err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tOrderState.State(\"shipped\").Enter(func(value interface{}, tx *gorm.DB) (err error) {\n\t\ttx.Model(value).UpdateColumn(\"shipped_at\", time.Now())\n\n\t\tvar orderItems []OrderItem\n\t\ttx.Model(value).Association(\"OrderItems\").Find(&orderItems)\n\t\tfor _, item := range orderItems {\n\t\t\tif err = ItemState.Trigger(\"ship\", &item, tx); err == nil {\n\t\t\t\tif err = tx.Select(\"state\").Save(&item).Error; err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tOrderState.State(\"returned\")\n\n\tOrderState.Event(\"checkout\").To(\"pending\").From(\"draft\").Before(func(value interface{}, tx *gorm.DB) (err error) {\n\t\torder := value.(*Order)\n\t\ttx.Model(order).Association(\"OrderItems\").Find(&order.OrderItems)\n\t\tif order.OrderReferenceID != \"\" {\n\t\t\tvar refAttrs amazonpay.OrderReferenceAttributes\n\n\t\t\trefAttrs, err = config.AmazonPay.SetOrderReferenceDetails(order.OrderReferenceID, amazonpay.OrderReferenceAttributes{\n\t\t\t\tOrderTotal: amazonpay.OrderTotal{CurrencyCode: \"JPY\", Amount: utils.FormatPrice(order.Amount())},\n\t\t\t})\n\n\t\t\tfmt.Printf(\"%#v \\n\", refAttrs)\n\n\t\t\tresult, _ := json.Marshal(refAttrs)\n\t\t\torder.PaymentLog += \"\\n\" + string(result)\n\t\t\torder.PaymentMethod = AmazonPay\n\t\t} else {\n\t\t\torder.PaymentMethod = COD\n\t\t}\n\n\t\tif err == nil {\n\t\t\tfor idx, orderItem := range order.OrderItems {\n\t\t\t\torder.OrderItems[idx].Price = orderItem.SellingPrice()\n\t\t\t}\n\t\t\torder.PaymentAmount = order.Amount()\n\t\t\torder.PaymentTotal = order.Total()\n\t\t}\n\t\treturn err\n\t})\n\n\tOrderState.Event(\"pay\").To(\"paid\").From(\"checkout\")\n\n\tcancelEvent := OrderState.Event(\"cancel\")\n\tcancelEvent.To(\"cancelled\").From(\"draft\", \"checkout\")\n\tcancelEvent.To(\"paid_cacelled\").From(\"paid\", \"processing\", \"shipped\")\n\n\tOrderState.Event(\"process\").To(\"processing\").From(\"paid\")\n\tOrderState.Event(\"ship\").To(\"shipped\").From(\"processing\")\n\tOrderState.Event(\"return\").To(\"returned\").From(\"shipped\")\n\n\t\/\/ Define ItemItem's States\n\tItemState.Initial(\"checkout\")\n\tItemState.State(\"cancelled\").Enter(func(value interface{}, tx *gorm.DB) error {\n\t\t\/\/ release stock, upate order state\n\t\treturn nil\n\t})\n\tItemState.State(\"paid\").Enter(func(value interface{}, tx *gorm.DB) error {\n\t\t\/\/ freeze stock, update order state\n\t\treturn nil\n\t})\n\tItemState.State(\"paid_cancelled\").Enter(func(value interface{}, tx *gorm.DB) error {\n\t\t\/\/ do refund, release stock, update order state\n\t\treturn nil\n\t})\n\tItemState.State(\"processing\")\n\tItemState.State(\"shipped\")\n\tItemState.State(\"returned\")\n\n\tItemState.Event(\"checkout\").To(\"checkout\").From(\"draft\")\n\tItemState.Event(\"pay\").To(\"paid\").From(\"checkout\")\n\tcancelItemEvent := ItemState.Event(\"cancel\")\n\tcancelItemEvent.To(\"cancelled\").From(\"checkout\")\n\tcancelItemEvent.To(\"paid_cancelled\").From(\"paid\")\n\tItemState.Event(\"process\").To(\"processing\").From(\"paid\")\n\tItemState.Event(\"ship\").To(\"shipped\").From(\"processing\")\n\tItemState.Event(\"return\").To(\"returned\").From(\"shipped\")\n}\n<commit_msg>GetOrderDetails after confirm<commit_after>package orders\n\nimport (\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com\/jinzhu\/gorm\"\n\tamazonpay \"github.com\/qor\/amazon-pay-sdk-go\"\n\t\"github.com\/qor\/qor-example\/config\"\n\t\"github.com\/qor\/qor-example\/utils\"\n\t\"github.com\/qor\/transition\"\n)\n\nvar (\n\t\/\/ OrderState order's state machine\n\tOrderState = transition.New(&Order{})\n\n\t\/\/ ItemState order item's state machine\n\tItemState = transition.New(&OrderItem{})\n)\n\nvar (\n\t\/\/ DraftState draft state\n\tDraftState = \"draft\"\n)\n\nfunc init() {\n\t\/\/ Define Order's States\n\tOrderState.Initial(\"draft\")\n\tOrderState.State(\"pending\")\n\tOrderState.State(\"open\")\n\tOrderState.State(\"cancelled\").Enter(func(value interface{}, tx *gorm.DB) error {\n\t\ttx.Model(value).UpdateColumn(\"cancelled_at\", time.Now())\n\t\treturn nil\n\t})\n\tOrderState.State(\"paid\").Enter(func(value interface{}, tx *gorm.DB) (err error) {\n\t\tvar orderItems []OrderItem\n\n\t\ttx.Model(value).Association(\"OrderItems\").Find(&orderItems)\n\t\tfor _, item := range orderItems {\n\t\t\tif err = ItemState.Trigger(\"pay\", &item, tx); err == nil {\n\t\t\t\tif err = tx.Select(\"state\").Save(&item).Error; err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\ttx.Save(value)\n\t\t\/\/ freeze stock, change items's state\n\t\treturn nil\n\t})\n\tOrderState.State(\"paid_cancelled\").Enter(func(value interface{}, tx *gorm.DB) error {\n\t\t\/\/ do refund, release stock, change items's state\n\t\treturn nil\n\t})\n\tOrderState.State(\"processing\").Enter(func(value interface{}, tx *gorm.DB) (err error) {\n\t\tvar orderItems []OrderItem\n\t\ttx.Model(value).Association(\"OrderItems\").Find(&orderItems)\n\t\tfor _, item := range orderItems {\n\t\t\tif err = ItemState.Trigger(\"process\", &item, tx); err == nil {\n\t\t\t\tif err = tx.Select(\"state\").Save(&item).Error; err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tOrderState.State(\"shipped\").Enter(func(value interface{}, tx *gorm.DB) (err error) {\n\t\ttx.Model(value).UpdateColumn(\"shipped_at\", time.Now())\n\n\t\tvar orderItems []OrderItem\n\t\ttx.Model(value).Association(\"OrderItems\").Find(&orderItems)\n\t\tfor _, item := range orderItems {\n\t\t\tif err = ItemState.Trigger(\"ship\", &item, tx); err == nil {\n\t\t\t\tif err = tx.Select(\"state\").Save(&item).Error; err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\tOrderState.State(\"returned\")\n\n\tOrderState.Event(\"checkout\").To(\"pending\").From(\"draft\").Before(func(value interface{}, tx *gorm.DB) (err error) {\n\t\torder := value.(*Order)\n\t\ttx.Model(order).Association(\"OrderItems\").Find(&order.OrderItems)\n\t\tif order.OrderReferenceID != \"\" {\n\t\t\tvar refAttrs amazonpay.OrderReferenceAttributes\n\t\t\tvar refDetails amazonpay.OrderReferenceDetails\n\n\t\t\trefAttrs, err = config.AmazonPay.SetOrderReferenceDetails(order.OrderReferenceID, amazonpay.OrderReferenceAttributes{\n\t\t\t\tOrderTotal: amazonpay.OrderTotal{CurrencyCode: \"JPY\", Amount: utils.FormatPrice(order.Amount())},\n\t\t\t})\n\n\t\t\trefDetails, err = config.AmazonPay.GetOrderReferenceDetails(order.OrderReferenceID, order.AddressAccessToken)\n\t\t\tfmt.Printf(\"%#v \\n\", refAttrs)\n\t\t\tfmt.Printf(\"%#v \\n\", refDetails)\n\n\t\t\tresult, _ := json.Marshal(refAttrs)\n\t\t\torder.PaymentLog += \"\\n\" + string(result)\n\t\t\torder.PaymentMethod = AmazonPay\n\t\t} else {\n\t\t\torder.PaymentMethod = COD\n\t\t}\n\n\t\tif err == nil {\n\t\t\tfor idx, orderItem := range order.OrderItems {\n\t\t\t\torder.OrderItems[idx].Price = orderItem.SellingPrice()\n\t\t\t}\n\t\t\torder.PaymentAmount = order.Amount()\n\t\t\torder.PaymentTotal = order.Total()\n\t\t}\n\t\treturn err\n\t})\n\n\tOrderState.Event(\"pay\").To(\"paid\").From(\"checkout\")\n\n\tcancelEvent := OrderState.Event(\"cancel\")\n\tcancelEvent.To(\"cancelled\").From(\"draft\", \"checkout\")\n\tcancelEvent.To(\"paid_cacelled\").From(\"paid\", \"processing\", \"shipped\")\n\n\tOrderState.Event(\"process\").To(\"processing\").From(\"paid\")\n\tOrderState.Event(\"ship\").To(\"shipped\").From(\"processing\")\n\tOrderState.Event(\"return\").To(\"returned\").From(\"shipped\")\n\n\t\/\/ Define ItemItem's States\n\tItemState.Initial(\"checkout\")\n\tItemState.State(\"cancelled\").Enter(func(value interface{}, tx *gorm.DB) error {\n\t\t\/\/ release stock, upate order state\n\t\treturn nil\n\t})\n\tItemState.State(\"paid\").Enter(func(value interface{}, tx *gorm.DB) error {\n\t\t\/\/ freeze stock, update order state\n\t\treturn nil\n\t})\n\tItemState.State(\"paid_cancelled\").Enter(func(value interface{}, tx *gorm.DB) error {\n\t\t\/\/ do refund, release stock, update order state\n\t\treturn nil\n\t})\n\tItemState.State(\"processing\")\n\tItemState.State(\"shipped\")\n\tItemState.State(\"returned\")\n\n\tItemState.Event(\"checkout\").To(\"checkout\").From(\"draft\")\n\tItemState.Event(\"pay\").To(\"paid\").From(\"checkout\")\n\tcancelItemEvent := ItemState.Event(\"cancel\")\n\tcancelItemEvent.To(\"cancelled\").From(\"checkout\")\n\tcancelItemEvent.To(\"paid_cancelled\").From(\"paid\")\n\tItemState.Event(\"process\").To(\"processing\").From(\"paid\")\n\tItemState.Event(\"ship\").To(\"shipped\").From(\"processing\")\n\tItemState.Event(\"return\").To(\"returned\").From(\"shipped\")\n}\n<|endoftext|>"} {"text":"<commit_before>package cluster\n\nimport (\n\t\"crypto\/tls\"\n\t\"encoding\/base64\"\n\t\"encoding\/pem\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/lxc\/lxd\/client\"\n\tclusterRequest \"github.com\/lxc\/lxd\/lxd\/cluster\/request\"\n\t\"github.com\/lxc\/lxd\/lxd\/db\"\n\t\"github.com\/lxc\/lxd\/lxd\/instance\/instancetype\"\n\t\"github.com\/lxc\/lxd\/lxd\/request\"\n\t\"github.com\/lxc\/lxd\/lxd\/state\"\n\tstoragePools \"github.com\/lxc\/lxd\/lxd\/storage\"\n\t\"github.com\/lxc\/lxd\/shared\"\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n\t\"github.com\/lxc\/lxd\/shared\/version\"\n)\n\n\/\/ ErrCertificateExists indicates that a certificate already exists.\nvar ErrCertificateExists error = fmt.Errorf(\"Certificate already in trust store\")\n\n\/\/ Connect is a convenience around lxd.ConnectLXD that configures the client\n\/\/ with the correct parameters for node-to-node communication.\n\/\/\n\/\/ If 'notify' switch is true, then the user agent will be set to the special\n\/\/ to the UserAgentNotifier value, which can be used in some cases to distinguish\n\/\/ between a regular client request and an internal cluster request.\nfunc Connect(address string, networkCert *shared.CertInfo, serverCert *shared.CertInfo, r *http.Request, notify bool) (lxd.InstanceServer, error) {\n\t\/\/ Wait for a connection to the events API first for non-notify connections.\n\tif !notify {\n\t\tconnected := false\n\t\tfor i := 0; i < 20; i++ {\n\t\t\tlistenersLock.Lock()\n\t\t\t_, ok := listeners[address]\n\t\t\tlistenersLock.Unlock()\n\n\t\t\tif ok {\n\t\t\t\tconnected = true\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\ttime.Sleep(500 * time.Millisecond)\n\t\t}\n\n\t\tif !connected {\n\t\t\treturn nil, fmt.Errorf(\"Missing event connection with target cluster member\")\n\t\t}\n\t}\n\n\targs := &lxd.ConnectionArgs{\n\t\tTLSServerCert: string(networkCert.PublicKey()),\n\t\tTLSClientCert: string(serverCert.PublicKey()),\n\t\tTLSClientKey: string(serverCert.PrivateKey()),\n\t\tSkipGetServer: true,\n\t\tUserAgent: version.UserAgent,\n\t}\n\n\tif notify {\n\t\targs.UserAgent = clusterRequest.UserAgentNotifier\n\t}\n\n\tif r != nil {\n\t\tproxy := func(req *http.Request) (*url.URL, error) {\n\t\t\tctx := r.Context()\n\n\t\t\tval, ok := ctx.Value(request.CtxUsername).(string)\n\t\t\tif ok {\n\t\t\t\treq.Header.Add(request.HeaderForwardedUsername, val)\n\t\t\t}\n\n\t\t\tval, ok = ctx.Value(request.CtxProtocol).(string)\n\t\t\tif ok {\n\t\t\t\treq.Header.Add(request.HeaderForwardedProtocol, val)\n\t\t\t}\n\n\t\t\treq.Header.Add(request.HeaderForwardedAddress, r.RemoteAddr)\n\n\t\t\treturn shared.ProxyFromEnvironment(req)\n\t\t}\n\n\t\targs.Proxy = proxy\n\t}\n\n\turl := fmt.Sprintf(\"https:\/\/%s\", address)\n\treturn lxd.ConnectLXD(url, args)\n}\n\n\/\/ ConnectIfInstanceIsRemote figures out the address of the node which is\n\/\/ running the container with the given name. If it's not the local node will\n\/\/ connect to it and return the connected client, otherwise it will just return\n\/\/ nil.\nfunc ConnectIfInstanceIsRemote(cluster *db.Cluster, projectName string, name string, networkCert *shared.CertInfo, serverCert *shared.CertInfo, r *http.Request, instanceType instancetype.Type) (lxd.InstanceServer, error) {\n\tvar address string \/\/ Node address\n\terr := cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\tvar err error\n\t\taddress, err = tx.GetNodeAddressOfInstance(projectName, name, db.InstanceTypeFilter(instanceType))\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif address == \"\" {\n\t\t\/\/ The instance is running right on this node, no need to connect.\n\t\treturn nil, nil\n\t}\n\n\treturn Connect(address, networkCert, serverCert, r, false)\n}\n\n\/\/ ConnectIfVolumeIsRemote figures out the address of the cluster member on which the volume with the given name is\n\/\/ defined. If it's not the local cluster member it will connect to it and return the connected client, otherwise\n\/\/ it just returns nil. If there is more than one cluster member with a matching volume name, an error is returned.\nfunc ConnectIfVolumeIsRemote(s *state.State, poolName string, projectName string, volumeName string, volumeType int, networkCert *shared.CertInfo, serverCert *shared.CertInfo, r *http.Request) (lxd.InstanceServer, error) {\n\tlocalNodeID := s.Cluster.GetNodeID()\n\tvar err error\n\tvar nodes []db.NodeInfo\n\tvar poolID int64\n\terr = s.Cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\tpoolID, err = tx.GetStoragePoolID(poolName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tnodes, err = tx.GetStorageVolumeNodes(poolID, projectName, volumeName, volumeType)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil && err != db.ErrNoClusterMember {\n\t\treturn nil, err\n\t}\n\n\t\/\/ If volume uses a remote storage driver and so has no explicit cluster member, then we need to check\n\t\/\/ whether it is exclusively attached to remote instance, and if so then we need to forward the request to\n\t\/\/ the node whereit is currently used. This avoids conflicting with another member when using it locally.\n\tif err == db.ErrNoClusterMember {\n\t\t\/\/ GetLocalStoragePoolVolume returns a volume with an empty Location field for remote drivers.\n\t\t_, vol, err := s.Cluster.GetLocalStoragePoolVolume(projectName, volumeName, volumeType, poolID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tremoteInstance, err := storagePools.VolumeUsedByExclusiveRemoteInstancesWithProfiles(s, poolName, projectName, vol)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"Failed checking if volume %q is available\", volumeName)\n\t\t}\n\n\t\tif remoteInstance != nil {\n\t\t\tvar instNode db.NodeInfo\n\t\t\terr := s.Cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\t\t\tinstNode, err = tx.GetNodeByName(remoteInstance.Node)\n\t\t\t\treturn err\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"Failed getting cluster member info for %q\", remoteInstance.Node)\n\t\t\t}\n\n\t\t\t\/\/ Replace node list with instance's cluster member node (which might be local member).\n\t\t\tnodes = []db.NodeInfo{instNode}\n\t\t} else {\n\t\t\t\/\/ Volume isn't exclusively attached to an instance. Use local cluster member.\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\n\tnodeCount := len(nodes)\n\tif nodeCount > 1 {\n\t\treturn nil, fmt.Errorf(\"More than one cluster member has a volume named %q. Please target a specific member\", volumeName)\n\t} else if nodeCount < 1 {\n\t\t\/\/ Should never get here.\n\t\treturn nil, fmt.Errorf(\"Volume %q has empty cluster member list\", volumeName)\n\t}\n\n\tnode := nodes[0]\n\tif node.ID == localNodeID {\n\t\t\/\/ Use local cluster member if volume belongs to this local node.\n\t\treturn nil, nil\n\t}\n\n\t\/\/ Connect to remote cluster member.\n\treturn Connect(node.Address, networkCert, serverCert, r, false)\n}\n\n\/\/ SetupTrust is a convenience around InstanceServer.CreateCertificate that adds the given server certificate to\n\/\/ the trusted pool of the cluster at the given address, using the given password. The certificate is added as\n\/\/ type CertificateTypeServer to allow intra-member communication. If a certificate with the same fingerprint\n\/\/ already exists with a different name or type, then no error is returned.\nfunc SetupTrust(serverCert *shared.CertInfo, serverName string, targetAddress string, targetCert string, targetPassword string) error {\n\t\/\/ Connect to the target cluster node.\n\targs := &lxd.ConnectionArgs{\n\t\tTLSServerCert: targetCert,\n\t\tUserAgent: version.UserAgent,\n\t}\n\n\ttarget, err := lxd.ConnectLXD(fmt.Sprintf(\"https:\/\/%s\", targetAddress), args)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed to connect to target cluster node %q\", targetAddress)\n\t}\n\n\tcert, err := generateTrustCertificate(serverCert, serverName)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed generating trust certificate\")\n\t}\n\n\tpost := api.CertificatesPost{\n\t\tCertificatePut: cert.CertificatePut,\n\t\tPassword: targetPassword,\n\t}\n\n\terr = target.CreateCertificate(post)\n\tif err != nil && err.Error() != ErrCertificateExists.Error() {\n\t\treturn errors.Wrap(err, \"Failed to add server cert to cluster\")\n\t}\n\n\treturn nil\n}\n\n\/\/ UpdateTrust ensures that the supplied certificate is stored in the target trust store with the correct name\n\/\/ and type to ensure correct cluster operation. Should be called after SetupTrust. If a certificate with the same\n\/\/ fingerprint is already in the trust store, but is of the wrong type or name then the existing certificate is\n\/\/ updated to the correct type and name. If the existing certificate is the correct type but the wrong name then an\n\/\/ error is returned. And if the existing certificate is the correct type and name then nothing more is done.\nfunc UpdateTrust(serverCert *shared.CertInfo, serverName string, targetAddress string, targetCert string) error {\n\t\/\/ Connect to the target cluster node.\n\targs := &lxd.ConnectionArgs{\n\t\tTLSClientCert: string(serverCert.PublicKey()),\n\t\tTLSClientKey: string(serverCert.PrivateKey()),\n\t\tTLSServerCert: targetCert,\n\t\tUserAgent: version.UserAgent,\n\t}\n\n\ttarget, err := lxd.ConnectLXD(fmt.Sprintf(\"https:\/\/%s\", targetAddress), args)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed to connect to target cluster node %q\", targetAddress)\n\t}\n\n\tcert, err := generateTrustCertificate(serverCert, serverName)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed generating trust certificate\")\n\t}\n\n\texistingCert, _, err := target.GetCertificate(cert.Fingerprint)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed getting existing certificate\")\n\t}\n\n\tif existingCert.Name != serverName && existingCert.Type == api.CertificateTypeServer {\n\t\t\/\/ Don't alter an existing server certificate that has our fingerprint but not our name.\n\t\t\/\/ Something is wrong as this shouldn't happen.\n\t\treturn fmt.Errorf(\"Existing server certificate with different name %q already in trust store\", existingCert.Name)\n\t} else if existingCert.Name != serverName && existingCert.Type != api.CertificateTypeServer {\n\t\t\/\/ Ensure that if a client certificate already exists that matches our fingerprint, that it\n\t\t\/\/ has the correct name and type for cluster operation, to allow us to associate member\n\t\t\/\/ server names to certificate names.\n\t\terr = target.UpdateCertificate(cert.Fingerprint, cert.CertificatePut, \"\")\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"Failed updating certificate name and type in trust store\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ generateTrustCertificate converts the specified serverCert and serverName into an api.Certificate suitable for\n\/\/ use as a trusted cluster server certificate.\nfunc generateTrustCertificate(serverCert *shared.CertInfo, serverName string) (*api.Certificate, error) {\n\tblock, _ := pem.Decode(serverCert.PublicKey())\n\tif block == nil {\n\t\treturn nil, fmt.Errorf(\"Failed to decode certificate\")\n\t}\n\n\tfingerprint, err := shared.CertFingerprintStr(string(serverCert.PublicKey()))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to calculate fingerprint\")\n\t}\n\n\tcertificate := base64.StdEncoding.EncodeToString(block.Bytes)\n\tcert := api.Certificate{\n\t\tCertificatePut: api.CertificatePut{\n\t\t\tCertificate: certificate,\n\t\t\tName: serverName,\n\t\t\tType: api.CertificateTypeServer, \/\/ Server type for intra-member communication.\n\t\t},\n\t\tFingerprint: fingerprint,\n\t}\n\n\treturn &cert, nil\n}\n\n\/\/ HasConnectivity probes the member with the given address for connectivity.\nfunc HasConnectivity(networkCert *shared.CertInfo, serverCert *shared.CertInfo, address string) bool {\n\tconfig, err := tlsClientConfig(networkCert, serverCert)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tvar conn net.Conn\n\tdialer := &net.Dialer{Timeout: time.Second}\n\tconn, err = tls.DialWithDialer(dialer, \"tcp\", address, config)\n\tif err == nil {\n\t\tconn.Close()\n\t\treturn true\n\t}\n\treturn false\n}\n<commit_msg>lxd\/cluster\/connect: Switch Connect to use EventListenerWait<commit_after>package cluster\n\nimport (\n\t\"context\"\n\t\"crypto\/tls\"\n\t\"encoding\/base64\"\n\t\"encoding\/pem\"\n\t\"fmt\"\n\t\"net\"\n\t\"net\/http\"\n\t\"net\/url\"\n\t\"time\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/lxc\/lxd\/client\"\n\tclusterRequest \"github.com\/lxc\/lxd\/lxd\/cluster\/request\"\n\t\"github.com\/lxc\/lxd\/lxd\/db\"\n\t\"github.com\/lxc\/lxd\/lxd\/instance\/instancetype\"\n\t\"github.com\/lxc\/lxd\/lxd\/request\"\n\t\"github.com\/lxc\/lxd\/lxd\/state\"\n\tstoragePools \"github.com\/lxc\/lxd\/lxd\/storage\"\n\t\"github.com\/lxc\/lxd\/shared\"\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n\t\"github.com\/lxc\/lxd\/shared\/version\"\n)\n\n\/\/ ErrCertificateExists indicates that a certificate already exists.\nvar ErrCertificateExists error = fmt.Errorf(\"Certificate already in trust store\")\n\n\/\/ Connect is a convenience around lxd.ConnectLXD that configures the client\n\/\/ with the correct parameters for node-to-node communication.\n\/\/\n\/\/ If 'notify' switch is true, then the user agent will be set to the special\n\/\/ to the UserAgentNotifier value, which can be used in some cases to distinguish\n\/\/ between a regular client request and an internal cluster request.\nfunc Connect(address string, networkCert *shared.CertInfo, serverCert *shared.CertInfo, r *http.Request, notify bool) (lxd.InstanceServer, error) {\n\t\/\/ Wait for a connection to the events API first for non-notify connections.\n\tif !notify {\n\t\tctx, cancel := context.WithTimeout(context.Background(), time.Duration(10)*time.Second)\n\t\tdefer cancel()\n\t\t_, err := EventListenerWait(ctx, address)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Missing event connection with target cluster member\")\n\t\t}\n\t}\n\n\targs := &lxd.ConnectionArgs{\n\t\tTLSServerCert: string(networkCert.PublicKey()),\n\t\tTLSClientCert: string(serverCert.PublicKey()),\n\t\tTLSClientKey: string(serverCert.PrivateKey()),\n\t\tSkipGetServer: true,\n\t\tUserAgent: version.UserAgent,\n\t}\n\n\tif notify {\n\t\targs.UserAgent = clusterRequest.UserAgentNotifier\n\t}\n\n\tif r != nil {\n\t\tproxy := func(req *http.Request) (*url.URL, error) {\n\t\t\tctx := r.Context()\n\n\t\t\tval, ok := ctx.Value(request.CtxUsername).(string)\n\t\t\tif ok {\n\t\t\t\treq.Header.Add(request.HeaderForwardedUsername, val)\n\t\t\t}\n\n\t\t\tval, ok = ctx.Value(request.CtxProtocol).(string)\n\t\t\tif ok {\n\t\t\t\treq.Header.Add(request.HeaderForwardedProtocol, val)\n\t\t\t}\n\n\t\t\treq.Header.Add(request.HeaderForwardedAddress, r.RemoteAddr)\n\n\t\t\treturn shared.ProxyFromEnvironment(req)\n\t\t}\n\n\t\targs.Proxy = proxy\n\t}\n\n\turl := fmt.Sprintf(\"https:\/\/%s\", address)\n\treturn lxd.ConnectLXD(url, args)\n}\n\n\/\/ ConnectIfInstanceIsRemote figures out the address of the node which is\n\/\/ running the container with the given name. If it's not the local node will\n\/\/ connect to it and return the connected client, otherwise it will just return\n\/\/ nil.\nfunc ConnectIfInstanceIsRemote(cluster *db.Cluster, projectName string, name string, networkCert *shared.CertInfo, serverCert *shared.CertInfo, r *http.Request, instanceType instancetype.Type) (lxd.InstanceServer, error) {\n\tvar address string \/\/ Node address\n\terr := cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\tvar err error\n\t\taddress, err = tx.GetNodeAddressOfInstance(projectName, name, db.InstanceTypeFilter(instanceType))\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif address == \"\" {\n\t\t\/\/ The instance is running right on this node, no need to connect.\n\t\treturn nil, nil\n\t}\n\n\treturn Connect(address, networkCert, serverCert, r, false)\n}\n\n\/\/ ConnectIfVolumeIsRemote figures out the address of the cluster member on which the volume with the given name is\n\/\/ defined. If it's not the local cluster member it will connect to it and return the connected client, otherwise\n\/\/ it just returns nil. If there is more than one cluster member with a matching volume name, an error is returned.\nfunc ConnectIfVolumeIsRemote(s *state.State, poolName string, projectName string, volumeName string, volumeType int, networkCert *shared.CertInfo, serverCert *shared.CertInfo, r *http.Request) (lxd.InstanceServer, error) {\n\tlocalNodeID := s.Cluster.GetNodeID()\n\tvar err error\n\tvar nodes []db.NodeInfo\n\tvar poolID int64\n\terr = s.Cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\tpoolID, err = tx.GetStoragePoolID(poolName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tnodes, err = tx.GetStorageVolumeNodes(poolID, projectName, volumeName, volumeType)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil && err != db.ErrNoClusterMember {\n\t\treturn nil, err\n\t}\n\n\t\/\/ If volume uses a remote storage driver and so has no explicit cluster member, then we need to check\n\t\/\/ whether it is exclusively attached to remote instance, and if so then we need to forward the request to\n\t\/\/ the node whereit is currently used. This avoids conflicting with another member when using it locally.\n\tif err == db.ErrNoClusterMember {\n\t\t\/\/ GetLocalStoragePoolVolume returns a volume with an empty Location field for remote drivers.\n\t\t_, vol, err := s.Cluster.GetLocalStoragePoolVolume(projectName, volumeName, volumeType, poolID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tremoteInstance, err := storagePools.VolumeUsedByExclusiveRemoteInstancesWithProfiles(s, poolName, projectName, vol)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"Failed checking if volume %q is available\", volumeName)\n\t\t}\n\n\t\tif remoteInstance != nil {\n\t\t\tvar instNode db.NodeInfo\n\t\t\terr := s.Cluster.Transaction(func(tx *db.ClusterTx) error {\n\t\t\t\tinstNode, err = tx.GetNodeByName(remoteInstance.Node)\n\t\t\t\treturn err\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"Failed getting cluster member info for %q\", remoteInstance.Node)\n\t\t\t}\n\n\t\t\t\/\/ Replace node list with instance's cluster member node (which might be local member).\n\t\t\tnodes = []db.NodeInfo{instNode}\n\t\t} else {\n\t\t\t\/\/ Volume isn't exclusively attached to an instance. Use local cluster member.\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\n\tnodeCount := len(nodes)\n\tif nodeCount > 1 {\n\t\treturn nil, fmt.Errorf(\"More than one cluster member has a volume named %q. Please target a specific member\", volumeName)\n\t} else if nodeCount < 1 {\n\t\t\/\/ Should never get here.\n\t\treturn nil, fmt.Errorf(\"Volume %q has empty cluster member list\", volumeName)\n\t}\n\n\tnode := nodes[0]\n\tif node.ID == localNodeID {\n\t\t\/\/ Use local cluster member if volume belongs to this local node.\n\t\treturn nil, nil\n\t}\n\n\t\/\/ Connect to remote cluster member.\n\treturn Connect(node.Address, networkCert, serverCert, r, false)\n}\n\n\/\/ SetupTrust is a convenience around InstanceServer.CreateCertificate that adds the given server certificate to\n\/\/ the trusted pool of the cluster at the given address, using the given password. The certificate is added as\n\/\/ type CertificateTypeServer to allow intra-member communication. If a certificate with the same fingerprint\n\/\/ already exists with a different name or type, then no error is returned.\nfunc SetupTrust(serverCert *shared.CertInfo, serverName string, targetAddress string, targetCert string, targetPassword string) error {\n\t\/\/ Connect to the target cluster node.\n\targs := &lxd.ConnectionArgs{\n\t\tTLSServerCert: targetCert,\n\t\tUserAgent: version.UserAgent,\n\t}\n\n\ttarget, err := lxd.ConnectLXD(fmt.Sprintf(\"https:\/\/%s\", targetAddress), args)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed to connect to target cluster node %q\", targetAddress)\n\t}\n\n\tcert, err := generateTrustCertificate(serverCert, serverName)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed generating trust certificate\")\n\t}\n\n\tpost := api.CertificatesPost{\n\t\tCertificatePut: cert.CertificatePut,\n\t\tPassword: targetPassword,\n\t}\n\n\terr = target.CreateCertificate(post)\n\tif err != nil && err.Error() != ErrCertificateExists.Error() {\n\t\treturn errors.Wrap(err, \"Failed to add server cert to cluster\")\n\t}\n\n\treturn nil\n}\n\n\/\/ UpdateTrust ensures that the supplied certificate is stored in the target trust store with the correct name\n\/\/ and type to ensure correct cluster operation. Should be called after SetupTrust. If a certificate with the same\n\/\/ fingerprint is already in the trust store, but is of the wrong type or name then the existing certificate is\n\/\/ updated to the correct type and name. If the existing certificate is the correct type but the wrong name then an\n\/\/ error is returned. And if the existing certificate is the correct type and name then nothing more is done.\nfunc UpdateTrust(serverCert *shared.CertInfo, serverName string, targetAddress string, targetCert string) error {\n\t\/\/ Connect to the target cluster node.\n\targs := &lxd.ConnectionArgs{\n\t\tTLSClientCert: string(serverCert.PublicKey()),\n\t\tTLSClientKey: string(serverCert.PrivateKey()),\n\t\tTLSServerCert: targetCert,\n\t\tUserAgent: version.UserAgent,\n\t}\n\n\ttarget, err := lxd.ConnectLXD(fmt.Sprintf(\"https:\/\/%s\", targetAddress), args)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed to connect to target cluster node %q\", targetAddress)\n\t}\n\n\tcert, err := generateTrustCertificate(serverCert, serverName)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed generating trust certificate\")\n\t}\n\n\texistingCert, _, err := target.GetCertificate(cert.Fingerprint)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed getting existing certificate\")\n\t}\n\n\tif existingCert.Name != serverName && existingCert.Type == api.CertificateTypeServer {\n\t\t\/\/ Don't alter an existing server certificate that has our fingerprint but not our name.\n\t\t\/\/ Something is wrong as this shouldn't happen.\n\t\treturn fmt.Errorf(\"Existing server certificate with different name %q already in trust store\", existingCert.Name)\n\t} else if existingCert.Name != serverName && existingCert.Type != api.CertificateTypeServer {\n\t\t\/\/ Ensure that if a client certificate already exists that matches our fingerprint, that it\n\t\t\/\/ has the correct name and type for cluster operation, to allow us to associate member\n\t\t\/\/ server names to certificate names.\n\t\terr = target.UpdateCertificate(cert.Fingerprint, cert.CertificatePut, \"\")\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"Failed updating certificate name and type in trust store\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ generateTrustCertificate converts the specified serverCert and serverName into an api.Certificate suitable for\n\/\/ use as a trusted cluster server certificate.\nfunc generateTrustCertificate(serverCert *shared.CertInfo, serverName string) (*api.Certificate, error) {\n\tblock, _ := pem.Decode(serverCert.PublicKey())\n\tif block == nil {\n\t\treturn nil, fmt.Errorf(\"Failed to decode certificate\")\n\t}\n\n\tfingerprint, err := shared.CertFingerprintStr(string(serverCert.PublicKey()))\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Failed to calculate fingerprint\")\n\t}\n\n\tcertificate := base64.StdEncoding.EncodeToString(block.Bytes)\n\tcert := api.Certificate{\n\t\tCertificatePut: api.CertificatePut{\n\t\t\tCertificate: certificate,\n\t\t\tName: serverName,\n\t\t\tType: api.CertificateTypeServer, \/\/ Server type for intra-member communication.\n\t\t},\n\t\tFingerprint: fingerprint,\n\t}\n\n\treturn &cert, nil\n}\n\n\/\/ HasConnectivity probes the member with the given address for connectivity.\nfunc HasConnectivity(networkCert *shared.CertInfo, serverCert *shared.CertInfo, address string) bool {\n\tconfig, err := tlsClientConfig(networkCert, serverCert)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tvar conn net.Conn\n\tdialer := &net.Dialer{Timeout: time.Second}\n\tconn, err = tls.DialWithDialer(dialer, \"tcp\", address, config)\n\tif err == nil {\n\t\tconn.Close()\n\t\treturn true\n\t}\n\treturn false\n}\n<|endoftext|>"} {"text":"<commit_before>package models\n\nimport (\n\t\"sort\"\n\t\"strings\"\n)\n\ntype GroupID string\n\nconst (\n\tNilGroup GroupID = \"\"\n)\n\ntype Dimensions struct {\n\tByName bool\n\tTagNames []string\n}\n\nfunc (d Dimensions) Equal(o Dimensions) bool {\n\tif d.ByName != o.ByName || len(d.TagNames) != len(o.TagNames) {\n\t\treturn false\n\t}\n\tfor i := range d.TagNames {\n\t\tif d.TagNames[i] != o.TagNames[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\nfunc (d Dimensions) Copy() Dimensions {\n\ttags := make([]string, len(d.TagNames))\n\tcopy(tags, d.TagNames)\n\treturn Dimensions{ByName: d.ByName, TagNames: tags}\n}\n\nfunc (d Dimensions) ToSet() map[string]bool {\n\tset := make(map[string]bool, len(d.TagNames))\n\tfor _, dim := range d.TagNames {\n\t\tset[dim] = true\n\t}\n\treturn set\n}\n\ntype Fields map[string]interface{}\n\nfunc (f Fields) Copy() Fields {\n\tcf := make(Fields, len(f))\n\tfor k, v := range f {\n\t\tcf[k] = v\n\t}\n\treturn cf\n}\n\nfunc SortedFields(fields Fields) []string {\n\ta := make([]string, 0, len(fields))\n\tfor k := range fields {\n\t\ta = append(a, k)\n\t}\n\tsort.Strings(a)\n\treturn a\n}\n\ntype Tags map[string]string\n\nfunc (t Tags) Copy() Tags {\n\tct := make(Tags, len(t))\n\tfor k, v := range t {\n\t\tct[k] = v\n\t}\n\treturn ct\n}\n\nfunc SortedKeys(tags map[string]string) []string {\n\ta := make([]string, 0, len(tags))\n\tfor k := range tags {\n\t\ta = append(a, k)\n\t}\n\tsort.Strings(a)\n\treturn a\n}\n\nfunc ToGroupID(name string, tags map[string]string, dims Dimensions) GroupID {\n\tif len(dims.TagNames) == 0 {\n\t\tif dims.ByName {\n\t\t\treturn GroupID(name)\n\t\t}\n\t\treturn NilGroup\n\t}\n\tvar buf strings.Builder\n\tif dims.ByName {\n\t\tbuf.WriteString(name)\n\t\t\/\/ Add delimiter that is not allowed in name.\n\t\tbuf.WriteRune('\\n')\n\t}\n\tfor i, d := range dims.TagNames {\n\t\tif i != 0 {\n\t\t\tbuf.WriteRune(',')\n\t\t}\n\t\tbuf.WriteString(d)\n\t\tbuf.WriteRune('=')\n\t\tbuf.WriteString(tags[d])\n\n\t}\n\treturn GroupID(buf.String())\n}\n<commit_msg>perf: preallocate GroupID<commit_after>package models\n\nimport (\n\t\"sort\"\n\t\"strings\"\n)\n\ntype GroupID string\n\nconst (\n\tNilGroup GroupID = \"\"\n)\n\ntype Dimensions struct {\n\tByName bool\n\tTagNames []string\n}\n\nfunc (d Dimensions) Equal(o Dimensions) bool {\n\tif d.ByName != o.ByName || len(d.TagNames) != len(o.TagNames) {\n\t\treturn false\n\t}\n\tfor i := range d.TagNames {\n\t\tif d.TagNames[i] != o.TagNames[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\nfunc (d Dimensions) Copy() Dimensions {\n\ttags := make([]string, len(d.TagNames))\n\tcopy(tags, d.TagNames)\n\treturn Dimensions{ByName: d.ByName, TagNames: tags}\n}\n\nfunc (d Dimensions) ToSet() map[string]bool {\n\tset := make(map[string]bool, len(d.TagNames))\n\tfor _, dim := range d.TagNames {\n\t\tset[dim] = true\n\t}\n\treturn set\n}\n\ntype Fields map[string]interface{}\n\nfunc (f Fields) Copy() Fields {\n\tcf := make(Fields, len(f))\n\tfor k, v := range f {\n\t\tcf[k] = v\n\t}\n\treturn cf\n}\n\nfunc SortedFields(fields Fields) []string {\n\ta := make([]string, 0, len(fields))\n\tfor k := range fields {\n\t\ta = append(a, k)\n\t}\n\tsort.Strings(a)\n\treturn a\n}\n\ntype Tags map[string]string\n\nfunc (t Tags) Copy() Tags {\n\tct := make(Tags, len(t))\n\tfor k, v := range t {\n\t\tct[k] = v\n\t}\n\treturn ct\n}\n\nfunc SortedKeys(tags map[string]string) []string {\n\ta := make([]string, 0, len(tags))\n\tfor k := range tags {\n\t\ta = append(a, k)\n\t}\n\tsort.Strings(a)\n\treturn a\n}\n\nfunc ToGroupID(name string, tags map[string]string, dims Dimensions) GroupID {\n\tif len(dims.TagNames) == 0 {\n\t\tif dims.ByName {\n\t\t\treturn GroupID(name)\n\t\t}\n\t\treturn NilGroup\n\t}\n\tvar buf strings.Builder\n\tl := 0\n\tif dims.ByName {\n\t\tl += len(name) + 1\n\t}\n\tfor i, d := range dims.TagNames {\n\t\tif i != 0 {\n\t\t\tl++\n\t\t}\n\t\tl += len(d) + len(tags[d]) + 1\n\t}\n\tbuf.Grow(l)\n\tif dims.ByName {\n\t\tbuf.WriteString(name)\n\t\t\/\/ Add delimiter that is not allowed in name.\n\t\tbuf.WriteRune('\\n')\n\t}\n\tfor i, d := range dims.TagNames {\n\t\tif i != 0 {\n\t\t\tbuf.WriteRune(',')\n\t\t}\n\t\tbuf.WriteString(d)\n\t\tbuf.WriteRune('=')\n\t\tbuf.WriteString(tags[d])\n\n\t}\n\treturn GroupID(buf.String())\n}\n<|endoftext|>"} {"text":"<commit_before>package structTag\n\n<commit_msg>add simple test<commit_after>package structTag\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n)\n\ntype testStruct struct {\n\tA int `cfg:\"A\" cfgDefault:\"100\"`\n\tB string `cfg:\"B\" cfgDefault:\"200\"`\n\tS testSub `cfg:\"S\"`\n}\n\ntype testSub struct {\n\tA int `cfg:\"A\" cfgDefault:\"300\"`\n\tB int `cfg:\"B\" cfgDefault:\"400\"`\n\tC string `cfg:\"C\" cfgDefault:\"500\"`\n\tS testSubSub `cfg:\"S\"`\n}\ntype testSubSub struct {\n\tA int `cfg:\"A\" cfgDefault:\"600\"`\n\tB int `cfg:\"B\" cfgDefault:\"700\"`\n\tC string `cfg:\"S\" cfgDefault:\"900\"`\n}\n\nfunc ReflectTestFunc(field *reflect.StructField, value *reflect.Value, tag string) (err error) {\n\treturn\n}\n\nfunc TestParse(t *testing.T) {\n\tTag = \"cfg\"\n\tTagDefault = \"cfgDefault\"\n\n\ts := &testStruct{A: 1, S: testSub{A: 1, B: 2, C: \"test\"}}\n\n\terr := Parse(s, \"\")\n\tif err != ErrTypeNotSupported {\n\t\tt.Fatal(\"ErrTypeNotSupported error expected\")\n\t}\n\n\tParseMap[reflect.Int] = ReflectTestFunc\n\tParseMap[reflect.String] = ReflectTestFunc\n\terr = Parse(s, \"\")\n\tif err != nil {\n\t\tt.Fatal(\"teste\", err)\n\t}\n\n\tfmt.Printf(\"\\n\\nTestParseTags: %#v\\n\\n\", s)\n\n\ts1 := \"test\"\n\terr = Parse(s1, \"\")\n\tif err != ErrNotAPointer {\n\t\tt.Fatal(\"ErrNotAPointer error expected\")\n\t}\n\n\terr = Parse(&s1, \"\")\n\tif err != ErrNotAStruct {\n\t\tt.Fatal(\"ErrNotAStruct error expected\")\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package json\n\nimport (\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/cristianoliveira\/apitogo\/common\"\n\t\"github.com\/gorilla\/mux\"\n)\n\nfunc HandleGetAll(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=UTF-8\")\n\tsettings := common.Settings()\n\tvars := mux.Vars(r)\n\n\tpath := settings.PathFile(vars[\"collection\"])\n\tcollection, err := CollectionLoad(path)\n\tif err != nil {\n\t\thandleNotFound(w, err)\n\t\treturn\n\t}\n\n\tdata, err := collection.AsBytes()\n\tif err != nil {\n\t\thandleNotFound(w, err)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(data)\n}\n\nfunc HandleGetById(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=UTF-8\")\n\tsettings := common.Settings()\n\tvars := mux.Vars(r)\n\n\tpath := settings.PathFile(vars[\"collection\"])\n\tcollection, err := CollectionLoad(path)\n\n\tpId, err := strconv.ParseFloat(vars[\"id\"], 64)\n\tif err != nil {\n\t\thandleBadRequest(w, err)\n\t\treturn\n\t}\n\n\tselected, err := collection.GetById(pId).AsBytes()\n\tif err != nil {\n\t\thandleNotFound(w, err)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(selected)\n}\n\nfunc handleNotFound(w http.ResponseWriter, err error) {\n\tw.WriteHeader(http.StatusNotFound)\n\tw.Write(NewError(http.StatusNotFound, err).AsBytes())\n}\n\nfunc handleBadRequest(w http.ResponseWriter, err error) {\n\tw.WriteHeader(http.StatusBadRequest)\n\tw.Write(NewError(http.StatusBadRequest, err).AsBytes())\n}\n\nfunc handleServerError(w http.ResponseWriter, err error) {\n\tw.WriteHeader(http.StatusInternalServerError)\n\tw.Write(NewError(http.StatusInternalServerError, err).AsBytes())\n}\n<commit_msg>feature: add CORS support<commit_after>package json\n\nimport (\n\t\"net\/http\"\n\t\"strconv\"\n\n\t\"github.com\/cristianoliveira\/apitogo\/common\"\n\t\"github.com\/gorilla\/mux\"\n)\n\nfunc HandleGetAll(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=UTF-8\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tsettings := common.Settings()\n\tvars := mux.Vars(r)\n\n\tpath := settings.PathFile(vars[\"collection\"])\n\tcollection, err := CollectionLoad(path)\n\tif err != nil {\n\t\thandleNotFound(w, err)\n\t\treturn\n\t}\n\n\tdata, err := collection.AsBytes()\n\tif err != nil {\n\t\thandleNotFound(w, err)\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(data)\n}\n\nfunc HandleGetById(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=UTF-8\")\n\tw.Header().Set(\"Access-Control-Allow-Origin\", \"*\")\n\tsettings := common.Settings()\n\tvars := mux.Vars(r)\n\n\tpath := settings.PathFile(vars[\"collection\"])\n\tcollection, err := CollectionLoad(path)\n\n\tpId, err := strconv.ParseFloat(vars[\"id\"], 64)\n\tif err != nil {\n\t\thandleBadRequest(w, err)\n\t\treturn\n\t}\n\n\tselected, err := collection.GetById(pId).AsBytes()\n\tif err != nil {\n\t\thandleNotFound(w, err)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application\/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(selected)\n}\n\nfunc handleNotFound(w http.ResponseWriter, err error) {\n\tw.WriteHeader(http.StatusNotFound)\n\tw.Write(NewError(http.StatusNotFound, err).AsBytes())\n}\n\nfunc handleBadRequest(w http.ResponseWriter, err error) {\n\tw.WriteHeader(http.StatusBadRequest)\n\tw.Write(NewError(http.StatusBadRequest, err).AsBytes())\n}\n\nfunc handleServerError(w http.ResponseWriter, err error) {\n\tw.WriteHeader(http.StatusInternalServerError)\n\tw.Write(NewError(http.StatusInternalServerError, err).AsBytes())\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build integration\n\n\/*\nCopyright 2018 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage integration\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"k8s.io\/minikube\/pkg\/kapi\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/config\"\n\t\"k8s.io\/minikube\/pkg\/util\"\n\t\"k8s.io\/minikube\/pkg\/util\/retry\"\n)\n\nvar tunnelSession StartSession\n\nvar (\n\thostname = \"\"\n\tdomain = \"nginx-svc.default.svc.cluster.local.\"\n)\n\nfunc validateTunnelCmd(ctx context.Context, t *testing.T, profile string) {\n\tctx, cancel := context.WithTimeout(ctx, Minutes(20))\n\ttype validateFunc func(context.Context, *testing.T, string)\n\tdefer cancel()\n\n\t\/\/ Serial tests\n\tt.Run(\"serial\", func(t *testing.T) {\n\t\ttests := []struct {\n\t\t\tname string\n\t\t\tvalidator validateFunc\n\t\t}{\n\t\t\t{\"StartTunnel\", validateTunnelStart}, \/\/ Start tunnel\n\t\t\t{\"WaitService\", validateServiceStable}, \/\/ Wait for service is stable\n\t\t\t{\"AccessDirect\", validateAccessDirect}, \/\/ Access test for loadbalancer IP\n\t\t\t{\"DNSResolutionByDig\", validateDNSDig}, \/\/ DNS forwarding test by dig\n\t\t\t{\"DNSResolutionByDscacheutil\", validateDNSDscacheutil}, \/\/ DNS forwarding test by dscacheutil\n\t\t\t{\"AccessThroughDNS\", validateAccessDNS}, \/\/ Access test for absolute dns name\n\t\t\t{\"DeleteTunnel\", validateTunnelDelete}, \/\/ Stop tunnel and delete cluster\n\t\t}\n\t\tfor _, tc := range tests {\n\t\t\ttc := tc\n\t\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\t\ttc.validator(ctx, t, profile)\n\t\t\t})\n\t\t}\n\t})\n}\n\n\/\/ checkRoutePassword skips tunnel test if sudo password required for route\nfunc checkRoutePassword(t *testing.T) {\n\tif !KicDriver() && runtime.GOOS != \"windows\" {\n\t\tif err := exec.Command(\"sudo\", \"-n\", \"ifconfig\").Run(); err != nil {\n\t\t\tt.Skipf(\"password required to execute 'route', skipping testTunnel: %v\", err)\n\t\t}\n\t}\n}\n\n\/\/ checkDNSForward skips DNS forwarding test if runtime is not supported\nfunc checkDNSForward(t *testing.T) {\n\t\/\/ Not all platforms support DNS forwarding\n\tif runtime.GOOS != \"darwin\" {\n\t\tt.Skip(\"DNS forwarding is supported for darwin only now, skipping test DNS forwarding\")\n\t}\n}\n\n\/\/ getKubeDNSIP returns kube-dns ClusterIP\nfunc getKubeDNSIP(t *testing.T, profile string) string {\n\t\/\/ Load ClusterConfig\n\tc, err := config.Load(profile)\n\tif err != nil {\n\t\tt.Errorf(\"failed to load cluster config: %v\", err)\n\t}\n\t\/\/ Get ipNet\n\t_, ipNet, err := net.ParseCIDR(c.KubernetesConfig.ServiceCIDR)\n\tif err != nil {\n\t\tt.Errorf(\"failed to parse service CIDR: %v\", err)\n\t}\n\t\/\/ Get kube-dns ClusterIP\n\tip, err := util.GetDNSIP(ipNet.String())\n\tif err != nil {\n\t\tt.Errorf(\"failed to get kube-dns IP: %v\", err)\n\t}\n\n\treturn ip.String()\n}\n\n\/\/ validateTunnelStart starts `minikube tunnel`\nfunc validateTunnelStart(ctx context.Context, t *testing.T, profile string) {\n\tcheckRoutePassword(t)\n\n\targs := []string{\"-p\", profile, \"tunnel\", \"--alsologtostderr\"}\n\tss, err := Start(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Errorf(\"failed to start a tunnel: args %q: %v\", args, err)\n\t}\n\ttunnelSession = *ss\n}\n\n\/\/ validateServiceStable starts nginx pod, nginx service and waits nginx having loadbalancer ingress IP\nfunc validateServiceStable(ctx context.Context, t *testing.T, profile string) {\n\tcheckRoutePassword(t)\n\n\tclient, err := kapi.Client(profile)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get Kubernetes client for %q: %v\", profile, err)\n\t}\n\n\t\/\/ Start the \"nginx\" pod.\n\trr, err := Run(t, exec.CommandContext(ctx, \"kubectl\", \"--context\", profile, \"apply\", \"-f\", filepath.Join(*testdataDir, \"testsvc.yaml\")))\n\tif err != nil {\n\t\tt.Fatalf(\"%s failed: %v\", rr.Command(), err)\n\t}\n\tif _, err := PodWait(ctx, t, profile, \"default\", \"run=nginx-svc\", Minutes(4)); err != nil {\n\t\tt.Fatalf(\"wait: %v\", err)\n\t}\n\n\tif err := kapi.WaitForService(client, \"default\", \"nginx-svc\", true, 1*time.Second, Minutes(2)); err != nil {\n\t\tt.Fatal(errors.Wrap(err, \"Error waiting for nginx service to be up\"))\n\t}\n\n\t\/\/ Wait until the nginx-svc has a loadbalancer ingress IP\n\terr = wait.PollImmediate(5*time.Second, Minutes(3), func() (bool, error) {\n\t\tcmd := exec.CommandContext(ctx, \"kubectl\", \"--context\", profile, \"get\", \"svc\", \"nginx-svc\", \"-o\", \"jsonpath={.status.loadBalancer.ingress[0].ip}\")\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tcmd = exec.CommandContext(ctx, \"powershell.exe\", \"-NoProfile\", \"-NonInteractive\", \"kubectl --context \"+profile+\" get svc nginx-svc -o jsonpath='{.status.loadBalancer.ingress[0].ip}'\")\n\t\t}\n\t\trr, err := Run(t, cmd)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif len(rr.Stdout.String()) > 0 {\n\t\t\thostname = rr.Stdout.String()\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"nginx-svc svc.status.loadBalancer.ingress never got an IP: %v\", err)\n\t\tcmd := exec.CommandContext(ctx, \"kubectl\", \"--context\", profile, \"get\", \"svc\", \"nginx-svc\")\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tcmd = exec.CommandContext(ctx, \"powershell.exe\", \"-NoProfile\", \"-NonInteractive\", \"kubectl --context \"+profile+\" get svc nginx-svc\")\n\t\t}\n\n\t\trr, err := Run(t, cmd)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s failed: %v\", rr.Command(), err)\n\t\t}\n\t\tt.Logf(\"failed to kubectl get svc nginx-svc:\\n%s\", rr.Stdout)\n\t}\n}\n\n\/\/ validateAccessDirect validates if the test service can be accessed with LoadBalancer IP from host\nfunc validateAccessDirect(ctx context.Context, t *testing.T, profile string) {\n\tif runtime.GOOS == \"windows\" {\n\t\tt.Skip(\"skipping: access direct test is broken on windows: https:\/\/github.com\/kubernetes\/minikube\/issues\/8304\")\n\t}\n\n\tcheckRoutePassword(t)\n\n\tgot := []byte{}\n\turl := fmt.Sprintf(\"http:\/\/%s\", hostname)\n\n\tfetch := func() error {\n\t\th := &http.Client{Timeout: time.Second * 10}\n\t\tresp, err := h.Get(url)\n\t\tif err != nil {\n\t\t\treturn &retry.RetriableError{Err: err}\n\t\t}\n\t\tif resp.Body == nil {\n\t\t\treturn &retry.RetriableError{Err: fmt.Errorf(\"no body\")}\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tgot, err = ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn &retry.RetriableError{Err: err}\n\t\t}\n\t\treturn nil\n\t}\n\n\t\/\/ Check if the nginx service can be accessed\n\tif err := retry.Expo(fetch, 3*time.Second, Minutes(2), 13); err != nil {\n\t\tt.Errorf(\"failed to hit nginx at %q: %v\", url, err)\n\n\t\trr, err := Run(t, exec.CommandContext(ctx, \"kubectl\", \"--context\", profile, \"get\", \"svc\", \"nginx-svc\"))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s failed: %v\", rr.Command(), err)\n\t\t}\n\t\tt.Logf(\"failed to kubectl get svc nginx-svc:\\n%s\", rr.Stdout)\n\t}\n\n\twant := \"Welcome to nginx!\"\n\tif strings.Contains(string(got), want) {\n\t\tt.Logf(\"tunnel at %s is working!\", url)\n\t} else {\n\t\tt.Errorf(\"expected body to contain %q, but got *%q*\", want, got)\n\t}\n}\n\n\/\/ validateDNSDig validates if the DNS forwarding works by dig command DNS lookup\n\/\/ NOTE: DNS forwarding is experimental: https:\/\/minikube.sigs.k8s.io\/docs\/handbook\/accessing\/#dns-resolution-experimental\nfunc validateDNSDig(ctx context.Context, t *testing.T, profile string) {\n\tcheckRoutePassword(t)\n\tcheckDNSForward(t)\n\n\tip := getKubeDNSIP(t, profile)\n\tdnsIP := fmt.Sprintf(\"@%s\", ip)\n\n\t\/\/ Check if the dig DNS lookup works toward kube-dns IP\n\trr, err := Run(t, exec.CommandContext(ctx, \"dig\", \"+time=5\", \"+tries=3\", dnsIP, domain, \"A\"))\n\t\/\/ dig command returns its output for stdout only. So we don't check stderr output.\n\tif err != nil {\n\t\tt.Errorf(\"failed to resolve DNS name: %v\", err)\n\t}\n\n\twant := \"ANSWER: 1\"\n\tif strings.Contains(rr.Stdout.String(), want) {\n\t\tt.Logf(\"DNS resolution by dig for %s is working!\", domain)\n\t} else {\n\t\tt.Errorf(\"expected body to contain %q, but got *%q*\", want, rr.Stdout.String())\n\n\t\t\/\/ debug DNS configuration\n\t\trr, err := Run(t, exec.CommandContext(ctx, \"scutil\", \"--dns\"))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s failed: %v\", rr.Command(), err)\n\t\t}\n\t\tt.Logf(\"debug for DNS configuration:\\n%s\", rr.Stdout.String())\n\t}\n}\n\n\/\/ validateDNSDscacheutil validates if the DNS forwarding works by dscacheutil command DNS lookup\n\/\/ NOTE: DNS forwarding is experimental: https:\/\/minikube.sigs.k8s.io\/docs\/handbook\/accessing\/#dns-resolution-experimental\nfunc validateDNSDscacheutil(ctx context.Context, t *testing.T, profile string) {\n\tcheckRoutePassword(t)\n\tcheckDNSForward(t)\n\n\t\/\/ Check if the dscacheutil DNS lookup works toward target domain\n\trr, err := Run(t, exec.CommandContext(ctx, \"dscacheutil\", \"-q\", \"host\", \"-a\", \"name\", domain))\n\t\/\/ If dscacheutil cannot lookup dns record, it returns no output. So we don't check stderr output.\n\tif err != nil {\n\t\tt.Errorf(\"failed to resolve DNS name: %v\", err)\n\t}\n\n\twant := hostname\n\tif strings.Contains(rr.Stdout.String(), want) {\n\t\tt.Logf(\"DNS resolution by dscacheutil for %s is working!\", domain)\n\t} else {\n\t\tt.Errorf(\"expected body to contain %q, but got *%q*\", want, rr.Stdout.String())\n\t}\n}\n\n\/\/ validateAccessDNS validates if the test service can be accessed with DNS forwarding from host\n\/\/ NOTE: DNS forwarding is experimental: https:\/\/minikube.sigs.k8s.io\/docs\/handbook\/accessing\/#dns-resolution-experimental\nfunc validateAccessDNS(ctx context.Context, t *testing.T, profile string) {\n\tcheckRoutePassword(t)\n\tcheckDNSForward(t)\n\n\tgot := []byte{}\n\turl := fmt.Sprintf(\"http:\/\/%s\", domain)\n\n\tip := getKubeDNSIP(t, profile)\n\tdnsIP := fmt.Sprintf(\"%s:53\", ip)\n\n\t\/\/ Set kube-dns dial\n\tkubeDNSDial := func(ctx context.Context, network, address string) (net.Conn, error) {\n\t\td := net.Dialer{}\n\t\treturn d.DialContext(ctx, \"udp\", dnsIP)\n\t}\n\n\t\/\/ Set kube-dns resolver\n\tr := net.Resolver{\n\t\tPreferGo: true,\n\t\tDial: kubeDNSDial,\n\t}\n\tdialer := net.Dialer{Resolver: &r}\n\n\t\/\/ Use kube-dns resolver\n\ttransport := &http.Transport{\n\t\tDial: dialer.Dial,\n\t\tDialContext: dialer.DialContext,\n\t}\n\n\tfetch := func() error {\n\t\th := &http.Client{Timeout: time.Second * 10, Transport: transport}\n\t\tresp, err := h.Get(url)\n\t\tif err != nil {\n\t\t\treturn &retry.RetriableError{Err: err}\n\t\t}\n\t\tif resp.Body == nil {\n\t\t\treturn &retry.RetriableError{Err: fmt.Errorf(\"no body\")}\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tgot, err = ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn &retry.RetriableError{Err: err}\n\t\t}\n\t\treturn nil\n\t}\n\n\t\/\/ Access nginx-svc through DNS resolution\n\tif err := retry.Expo(fetch, 3*time.Second, Seconds(30), 10); err != nil {\n\t\tt.Errorf(\"failed to hit nginx with DNS forwarded %q: %v\", url, err)\n\t}\n\n\twant := \"Welcome to nginx!\"\n\tif strings.Contains(string(got), want) {\n\t\tt.Logf(\"tunnel at %s is working!\", url)\n\t} else {\n\t\tt.Errorf(\"expected body to contain %q, but got *%q*\", want, got)\n\t}\n}\n\n\/\/ validateTunnelDelete stops `minikube tunnel`\nfunc validateTunnelDelete(ctx context.Context, t *testing.T, profile string) {\n\tcheckRoutePassword(t)\n\t\/\/ Stop tunnel\n\ttunnelSession.Stop(t)\n}\n<commit_msg>tune time<commit_after>\/\/ +build integration\n\n\/*\nCopyright 2018 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*\/\n\npackage integration\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\"\n\t\"net\/http\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"k8s.io\/apimachinery\/pkg\/util\/wait\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"k8s.io\/minikube\/pkg\/kapi\"\n\t\"k8s.io\/minikube\/pkg\/minikube\/config\"\n\t\"k8s.io\/minikube\/pkg\/util\"\n\t\"k8s.io\/minikube\/pkg\/util\/retry\"\n)\n\nvar tunnelSession StartSession\n\nvar (\n\thostname = \"\"\n\tdomain = \"nginx-svc.default.svc.cluster.local.\"\n)\n\nfunc validateTunnelCmd(ctx context.Context, t *testing.T, profile string) {\n\tctx, cancel := context.WithTimeout(ctx, Minutes(20))\n\ttype validateFunc func(context.Context, *testing.T, string)\n\tdefer cancel()\n\n\t\/\/ Serial tests\n\tt.Run(\"serial\", func(t *testing.T) {\n\t\ttests := []struct {\n\t\t\tname string\n\t\t\tvalidator validateFunc\n\t\t}{\n\t\t\t{\"StartTunnel\", validateTunnelStart}, \/\/ Start tunnel\n\t\t\t{\"WaitService\", validateServiceStable}, \/\/ Wait for service is stable\n\t\t\t{\"AccessDirect\", validateAccessDirect}, \/\/ Access test for loadbalancer IP\n\t\t\t{\"DNSResolutionByDig\", validateDNSDig}, \/\/ DNS forwarding test by dig\n\t\t\t{\"DNSResolutionByDscacheutil\", validateDNSDscacheutil}, \/\/ DNS forwarding test by dscacheutil\n\t\t\t{\"AccessThroughDNS\", validateAccessDNS}, \/\/ Access test for absolute dns name\n\t\t\t{\"DeleteTunnel\", validateTunnelDelete}, \/\/ Stop tunnel and delete cluster\n\t\t}\n\t\tfor _, tc := range tests {\n\t\t\ttc := tc\n\t\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\t\ttc.validator(ctx, t, profile)\n\t\t\t})\n\t\t}\n\t})\n}\n\n\/\/ checkRoutePassword skips tunnel test if sudo password required for route\nfunc checkRoutePassword(t *testing.T) {\n\tif !KicDriver() && runtime.GOOS != \"windows\" {\n\t\tif err := exec.Command(\"sudo\", \"-n\", \"ifconfig\").Run(); err != nil {\n\t\t\tt.Skipf(\"password required to execute 'route', skipping testTunnel: %v\", err)\n\t\t}\n\t}\n}\n\n\/\/ checkDNSForward skips DNS forwarding test if runtime is not supported\nfunc checkDNSForward(t *testing.T) {\n\t\/\/ Not all platforms support DNS forwarding\n\tif runtime.GOOS != \"darwin\" {\n\t\tt.Skip(\"DNS forwarding is supported for darwin only now, skipping test DNS forwarding\")\n\t}\n}\n\n\/\/ getKubeDNSIP returns kube-dns ClusterIP\nfunc getKubeDNSIP(t *testing.T, profile string) string {\n\t\/\/ Load ClusterConfig\n\tc, err := config.Load(profile)\n\tif err != nil {\n\t\tt.Errorf(\"failed to load cluster config: %v\", err)\n\t}\n\t\/\/ Get ipNet\n\t_, ipNet, err := net.ParseCIDR(c.KubernetesConfig.ServiceCIDR)\n\tif err != nil {\n\t\tt.Errorf(\"failed to parse service CIDR: %v\", err)\n\t}\n\t\/\/ Get kube-dns ClusterIP\n\tip, err := util.GetDNSIP(ipNet.String())\n\tif err != nil {\n\t\tt.Errorf(\"failed to get kube-dns IP: %v\", err)\n\t}\n\n\treturn ip.String()\n}\n\n\/\/ validateTunnelStart starts `minikube tunnel`\nfunc validateTunnelStart(ctx context.Context, t *testing.T, profile string) {\n\tcheckRoutePassword(t)\n\n\targs := []string{\"-p\", profile, \"tunnel\", \"--alsologtostderr\"}\n\tss, err := Start(t, exec.CommandContext(ctx, Target(), args...))\n\tif err != nil {\n\t\tt.Errorf(\"failed to start a tunnel: args %q: %v\", args, err)\n\t}\n\ttunnelSession = *ss\n}\n\n\/\/ validateServiceStable starts nginx pod, nginx service and waits nginx having loadbalancer ingress IP\nfunc validateServiceStable(ctx context.Context, t *testing.T, profile string) {\n\tcheckRoutePassword(t)\n\n\tclient, err := kapi.Client(profile)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get Kubernetes client for %q: %v\", profile, err)\n\t}\n\n\t\/\/ Start the \"nginx\" pod.\n\trr, err := Run(t, exec.CommandContext(ctx, \"kubectl\", \"--context\", profile, \"apply\", \"-f\", filepath.Join(*testdataDir, \"testsvc.yaml\")))\n\tif err != nil {\n\t\tt.Fatalf(\"%s failed: %v\", rr.Command(), err)\n\t}\n\tif _, err := PodWait(ctx, t, profile, \"default\", \"run=nginx-svc\", Minutes(4)); err != nil {\n\t\tt.Fatalf(\"wait: %v\", err)\n\t}\n\n\tif err := kapi.WaitForService(client, \"default\", \"nginx-svc\", true, 1*time.Second, Minutes(2)); err != nil {\n\t\tt.Fatal(errors.Wrap(err, \"Error waiting for nginx service to be up\"))\n\t}\n\n\t\/\/ Wait until the nginx-svc has a loadbalancer ingress IP\n\terr = wait.PollImmediate(1*time.Second, Minutes(4), func() (bool, error) {\n\t\tcmd := exec.CommandContext(ctx, \"kubectl\", \"--context\", profile, \"get\", \"svc\", \"nginx-svc\", \"-o\", \"jsonpath={.status.loadBalancer.ingress[0].ip}\")\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tcmd = exec.CommandContext(ctx, \"powershell.exe\", \"-NoProfile\", \"-NonInteractive\", \"kubectl --context \"+profile+\" get svc nginx-svc -o jsonpath={.status.loadBalancer.ingress[0].ip}\")\n\t\t}\n\t\trr, err := Run(t, cmd)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif len(rr.Stdout.String()) > 0 {\n\t\t\thostname = rr.Stdout.String()\n\t\t\treturn true, nil\n\t\t}\n\t\treturn false, nil\n\t})\n\tif err != nil {\n\t\tt.Errorf(\"nginx-svc svc.status.loadBalancer.ingress never got an IP: %v\", err)\n\t\tcmd := exec.CommandContext(ctx, \"kubectl\", \"--context\", profile, \"get\", \"svc\", \"nginx-svc\")\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tcmd = exec.CommandContext(ctx, \"powershell.exe\", \"-NoProfile\", \"-NonInteractive\", \"kubectl --context \"+profile+\" get svc nginx-svc\")\n\t\t}\n\n\t\trr, err := Run(t, cmd)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s failed: %v\", rr.Command(), err)\n\t\t}\n\t\tt.Logf(\"failed to kubectl get svc nginx-svc:\\n%s\", rr.Stdout)\n\t}\n}\n\n\/\/ validateAccessDirect validates if the test service can be accessed with LoadBalancer IP from host\nfunc validateAccessDirect(ctx context.Context, t *testing.T, profile string) {\n\tif runtime.GOOS == \"windows\" {\n\t\tt.Skip(\"skipping: access direct test is broken on windows: https:\/\/github.com\/kubernetes\/minikube\/issues\/8304\")\n\t}\n\n\tcheckRoutePassword(t)\n\n\tgot := []byte{}\n\turl := fmt.Sprintf(\"http:\/\/%s\", hostname)\n\n\tfetch := func() error {\n\t\th := &http.Client{Timeout: time.Second * 10}\n\t\tresp, err := h.Get(url)\n\t\tif err != nil {\n\t\t\treturn &retry.RetriableError{Err: err}\n\t\t}\n\t\tif resp.Body == nil {\n\t\t\treturn &retry.RetriableError{Err: fmt.Errorf(\"no body\")}\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tgot, err = ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn &retry.RetriableError{Err: err}\n\t\t}\n\t\treturn nil\n\t}\n\n\t\/\/ Check if the nginx service can be accessed\n\tif err := retry.Expo(fetch, 3*time.Second, Minutes(2), 13); err != nil {\n\t\tt.Errorf(\"failed to hit nginx at %q: %v\", url, err)\n\n\t\trr, err := Run(t, exec.CommandContext(ctx, \"kubectl\", \"--context\", profile, \"get\", \"svc\", \"nginx-svc\"))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s failed: %v\", rr.Command(), err)\n\t\t}\n\t\tt.Logf(\"failed to kubectl get svc nginx-svc:\\n%s\", rr.Stdout)\n\t}\n\n\twant := \"Welcome to nginx!\"\n\tif strings.Contains(string(got), want) {\n\t\tt.Logf(\"tunnel at %s is working!\", url)\n\t} else {\n\t\tt.Errorf(\"expected body to contain %q, but got *%q*\", want, got)\n\t}\n}\n\n\/\/ validateDNSDig validates if the DNS forwarding works by dig command DNS lookup\n\/\/ NOTE: DNS forwarding is experimental: https:\/\/minikube.sigs.k8s.io\/docs\/handbook\/accessing\/#dns-resolution-experimental\nfunc validateDNSDig(ctx context.Context, t *testing.T, profile string) {\n\tcheckRoutePassword(t)\n\tcheckDNSForward(t)\n\n\tip := getKubeDNSIP(t, profile)\n\tdnsIP := fmt.Sprintf(\"@%s\", ip)\n\n\t\/\/ Check if the dig DNS lookup works toward kube-dns IP\n\trr, err := Run(t, exec.CommandContext(ctx, \"dig\", \"+time=5\", \"+tries=3\", dnsIP, domain, \"A\"))\n\t\/\/ dig command returns its output for stdout only. So we don't check stderr output.\n\tif err != nil {\n\t\tt.Errorf(\"failed to resolve DNS name: %v\", err)\n\t}\n\n\twant := \"ANSWER: 1\"\n\tif strings.Contains(rr.Stdout.String(), want) {\n\t\tt.Logf(\"DNS resolution by dig for %s is working!\", domain)\n\t} else {\n\t\tt.Errorf(\"expected body to contain %q, but got *%q*\", want, rr.Stdout.String())\n\n\t\t\/\/ debug DNS configuration\n\t\trr, err := Run(t, exec.CommandContext(ctx, \"scutil\", \"--dns\"))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s failed: %v\", rr.Command(), err)\n\t\t}\n\t\tt.Logf(\"debug for DNS configuration:\\n%s\", rr.Stdout.String())\n\t}\n}\n\n\/\/ validateDNSDscacheutil validates if the DNS forwarding works by dscacheutil command DNS lookup\n\/\/ NOTE: DNS forwarding is experimental: https:\/\/minikube.sigs.k8s.io\/docs\/handbook\/accessing\/#dns-resolution-experimental\nfunc validateDNSDscacheutil(ctx context.Context, t *testing.T, profile string) {\n\tcheckRoutePassword(t)\n\tcheckDNSForward(t)\n\n\t\/\/ Check if the dscacheutil DNS lookup works toward target domain\n\trr, err := Run(t, exec.CommandContext(ctx, \"dscacheutil\", \"-q\", \"host\", \"-a\", \"name\", domain))\n\t\/\/ If dscacheutil cannot lookup dns record, it returns no output. So we don't check stderr output.\n\tif err != nil {\n\t\tt.Errorf(\"failed to resolve DNS name: %v\", err)\n\t}\n\n\twant := hostname\n\tif strings.Contains(rr.Stdout.String(), want) {\n\t\tt.Logf(\"DNS resolution by dscacheutil for %s is working!\", domain)\n\t} else {\n\t\tt.Errorf(\"expected body to contain %q, but got *%q*\", want, rr.Stdout.String())\n\t}\n}\n\n\/\/ validateAccessDNS validates if the test service can be accessed with DNS forwarding from host\n\/\/ NOTE: DNS forwarding is experimental: https:\/\/minikube.sigs.k8s.io\/docs\/handbook\/accessing\/#dns-resolution-experimental\nfunc validateAccessDNS(ctx context.Context, t *testing.T, profile string) {\n\tcheckRoutePassword(t)\n\tcheckDNSForward(t)\n\n\tgot := []byte{}\n\turl := fmt.Sprintf(\"http:\/\/%s\", domain)\n\n\tip := getKubeDNSIP(t, profile)\n\tdnsIP := fmt.Sprintf(\"%s:53\", ip)\n\n\t\/\/ Set kube-dns dial\n\tkubeDNSDial := func(ctx context.Context, network, address string) (net.Conn, error) {\n\t\td := net.Dialer{}\n\t\treturn d.DialContext(ctx, \"udp\", dnsIP)\n\t}\n\n\t\/\/ Set kube-dns resolver\n\tr := net.Resolver{\n\t\tPreferGo: true,\n\t\tDial: kubeDNSDial,\n\t}\n\tdialer := net.Dialer{Resolver: &r}\n\n\t\/\/ Use kube-dns resolver\n\ttransport := &http.Transport{\n\t\tDial: dialer.Dial,\n\t\tDialContext: dialer.DialContext,\n\t}\n\n\tfetch := func() error {\n\t\th := &http.Client{Timeout: time.Second * 10, Transport: transport}\n\t\tresp, err := h.Get(url)\n\t\tif err != nil {\n\t\t\treturn &retry.RetriableError{Err: err}\n\t\t}\n\t\tif resp.Body == nil {\n\t\t\treturn &retry.RetriableError{Err: fmt.Errorf(\"no body\")}\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tgot, err = ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn &retry.RetriableError{Err: err}\n\t\t}\n\t\treturn nil\n\t}\n\n\t\/\/ Access nginx-svc through DNS resolution\n\tif err := retry.Expo(fetch, 3*time.Second, Seconds(30), 10); err != nil {\n\t\tt.Errorf(\"failed to hit nginx with DNS forwarded %q: %v\", url, err)\n\t}\n\n\twant := \"Welcome to nginx!\"\n\tif strings.Contains(string(got), want) {\n\t\tt.Logf(\"tunnel at %s is working!\", url)\n\t} else {\n\t\tt.Errorf(\"expected body to contain %q, but got *%q*\", want, got)\n\t}\n}\n\n\/\/ validateTunnelDelete stops `minikube tunnel`\nfunc validateTunnelDelete(ctx context.Context, t *testing.T, profile string) {\n\tcheckRoutePassword(t)\n\t\/\/ Stop tunnel\n\ttunnelSession.Stop(t)\n}\n<|endoftext|>"} {"text":"<commit_before>package progress\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/git-lfs\/git-lfs\/tasklog\"\n\t\"github.com\/git-lfs\/git-lfs\/tools\"\n\t\"github.com\/git-lfs\/git-lfs\/tools\/humanize\"\n)\n\n\/\/ ProgressMeter provides a progress bar type output for the TransferQueue. It\n\/\/ is given an estimated file count and size up front and tracks the number of\n\/\/ files and bytes transferred as well as the number of files and bytes that\n\/\/ get skipped because the transfer is unnecessary.\ntype ProgressMeter struct {\n\tfinishedFiles int64 \/\/ int64s must come first for struct alignment\n\tskippedFiles int64\n\ttransferringFiles int64\n\testimatedBytes int64\n\tcurrentBytes int64\n\tskippedBytes int64\n\testimatedFiles int32\n\tpaused uint32\n\tlogToFile uint32\n\tlogger *tools.SyncWriter\n\tfileIndex map[string]int64 \/\/ Maps a file name to its transfer number\n\tfileIndexMutex *sync.Mutex\n\tdryRun bool\n\tupdates chan *tasklog.Update\n}\n\ntype env interface {\n\tGet(key string) (val string, ok bool)\n}\n\ntype meterOption func(*ProgressMeter)\n\n\/\/ DryRun is an option for NewMeter() that determines whether updates should be\n\/\/ sent to stdout.\nfunc DryRun(dryRun bool) meterOption {\n\treturn func(m *ProgressMeter) {\n\t\tm.dryRun = dryRun\n\t}\n}\n\n\/\/ WithLogFile is an option for NewMeter() that sends updates to a text file.\nfunc WithLogFile(name string) meterOption {\n\tprintErr := func(err string) {\n\t\tfmt.Fprintf(os.Stderr, \"Error creating progress logger: %s\\n\", err)\n\t}\n\n\treturn func(m *ProgressMeter) {\n\t\tif len(name) == 0 {\n\t\t\treturn\n\t\t}\n\n\t\tif !filepath.IsAbs(name) {\n\t\t\tprintErr(\"GIT_LFS_PROGRESS must be an absolute path\")\n\t\t\treturn\n\t\t}\n\n\t\tcbDir := filepath.Dir(name)\n\t\tif err := os.MkdirAll(cbDir, 0755); err != nil {\n\t\t\tprintErr(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tfile, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)\n\t\tif err != nil {\n\t\t\tprintErr(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tm.logToFile = 1\n\t\tm.logger = tools.NewSyncWriter(file)\n\t}\n}\n\n\/\/ WithOSEnv is an option for NewMeter() that sends updates to the text file\n\/\/ path specified in the OS Env.\nfunc WithOSEnv(os env) meterOption {\n\tname, _ := os.Get(\"GIT_LFS_PROGRESS\")\n\treturn WithLogFile(name)\n}\n\n\/\/ NewMeter creates a new ProgressMeter.\nfunc NewMeter(options ...meterOption) *ProgressMeter {\n\tm := &ProgressMeter{\n\t\tfileIndex: make(map[string]int64),\n\t\tfileIndexMutex: &sync.Mutex{},\n\t\tupdates: make(chan *tasklog.Update),\n\t}\n\n\tfor _, opt := range options {\n\t\topt(m)\n\t}\n\n\treturn m\n}\n\n\/\/ Start begins sending status updates to the optional log file, and stdout.\nfunc (p *ProgressMeter) Start() {\n\tatomic.StoreUint32(&p.paused, 0)\n}\n\n\/\/ Pause stops sending status updates temporarily, until Start() is called again.\nfunc (p *ProgressMeter) Pause() {\n\tatomic.StoreUint32(&p.paused, 1)\n}\n\n\/\/ Add tells the progress meter that a single file of the given size will\n\/\/ possibly be transferred. If a file doesn't need to be transferred for some\n\/\/ reason, be sure to call Skip(int64) with the same size.\nfunc (p *ProgressMeter) Add(size int64) {\n\tdefer p.update()\n\tatomic.AddInt32(&p.estimatedFiles, 1)\n\tatomic.AddInt64(&p.estimatedBytes, size)\n}\n\n\/\/ Skip tells the progress meter that a file of size `size` is being skipped\n\/\/ because the transfer is unnecessary.\nfunc (p *ProgressMeter) Skip(size int64) {\n\tdefer p.update()\n\tatomic.AddInt64(&p.skippedFiles, 1)\n\tatomic.AddInt64(&p.skippedBytes, size)\n\t\/\/ Reduce bytes and files so progress easier to parse\n\tatomic.AddInt32(&p.estimatedFiles, -1)\n\tatomic.AddInt64(&p.estimatedBytes, -size)\n}\n\n\/\/ StartTransfer tells the progress meter that a transferring file is being\n\/\/ added to the TransferQueue.\nfunc (p *ProgressMeter) StartTransfer(name string) {\n\tdefer p.update()\n\tidx := atomic.AddInt64(&p.transferringFiles, 1)\n\tp.fileIndexMutex.Lock()\n\tp.fileIndex[name] = idx\n\tp.fileIndexMutex.Unlock()\n}\n\n\/\/ TransferBytes increments the number of bytes transferred\nfunc (p *ProgressMeter) TransferBytes(direction, name string, read, total int64, current int) {\n\tdefer p.update()\n\tatomic.AddInt64(&p.currentBytes, int64(current))\n\tp.logBytes(direction, name, read, total)\n}\n\n\/\/ FinishTransfer increments the finished transfer count\nfunc (p *ProgressMeter) FinishTransfer(name string) {\n\tdefer p.update()\n\tatomic.AddInt64(&p.finishedFiles, 1)\n\tp.fileIndexMutex.Lock()\n\tdelete(p.fileIndex, name)\n\tp.fileIndexMutex.Unlock()\n}\n\n\/\/ Finish shuts down the ProgressMeter\nfunc (p *ProgressMeter) Finish() {\n\tp.update()\n\tclose(p.updates)\n}\n\nfunc (p *ProgressMeter) Updates() <-chan *tasklog.Update {\n\treturn p.updates\n}\n\nfunc (p *ProgressMeter) Throttled() bool {\n\treturn true\n}\n\nfunc (p *ProgressMeter) update() {\n\tif p.skipUpdate() {\n\t\treturn\n\t}\n\n\tp.updates <- &tasklog.Update{\n\t\tS: p.str(),\n\t\tAt: time.Now(),\n\t}\n}\n\nfunc (p *ProgressMeter) skipUpdate() bool {\n\treturn p.dryRun ||\n\t\t(p.estimatedFiles == 0 && p.skippedFiles == 0) ||\n\t\tatomic.LoadUint32(&p.paused) == 1\n}\n\nfunc (p *ProgressMeter) str() string {\n\t\/\/ (%d of %d files, %d skipped) %f B \/ %f B, %f B skipped\n\t\/\/ skipped counts only show when > 0\n\n\tout := fmt.Sprintf(\"\\rGit LFS: (%d of %d files\",\n\t\tp.finishedFiles,\n\t\tp.estimatedFiles)\n\tif p.skippedFiles > 0 {\n\t\tout += fmt.Sprintf(\", %d skipped\", p.skippedFiles)\n\t}\n\tout += fmt.Sprintf(\") %s \/ %s\",\n\t\thumanize.FormatBytes(uint64(p.currentBytes)),\n\t\thumanize.FormatBytes(uint64(p.estimatedBytes)))\n\tif p.skippedBytes > 0 {\n\t\tout += fmt.Sprintf(\", %s skipped\",\n\t\t\thumanize.FormatBytes(uint64(p.skippedBytes)))\n\t}\n\n\treturn out\n}\n\nfunc (p *ProgressMeter) logBytes(direction, name string, read, total int64) {\n\tp.fileIndexMutex.Lock()\n\tidx := p.fileIndex[name]\n\tp.fileIndexMutex.Unlock()\n\tline := fmt.Sprintf(\"%s %d\/%d %d\/%d %s\\n\", direction, idx, p.estimatedFiles, read, total, name)\n\tif atomic.LoadUint32(&p.logToFile) == 1 {\n\t\tif err := p.logger.Write([]byte(line)); err != nil {\n\t\t\tatomic.StoreUint32(&p.logToFile, 0)\n\t\t}\n\t}\n}\n<commit_msg>progress: make *ProgressMeter respond to 'nil` receiver<commit_after>package progress\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\/filepath\"\n\t\"sync\"\n\t\"sync\/atomic\"\n\t\"time\"\n\n\t\"github.com\/git-lfs\/git-lfs\/tasklog\"\n\t\"github.com\/git-lfs\/git-lfs\/tools\"\n\t\"github.com\/git-lfs\/git-lfs\/tools\/humanize\"\n)\n\n\/\/ ProgressMeter provides a progress bar type output for the TransferQueue. It\n\/\/ is given an estimated file count and size up front and tracks the number of\n\/\/ files and bytes transferred as well as the number of files and bytes that\n\/\/ get skipped because the transfer is unnecessary.\ntype ProgressMeter struct {\n\tfinishedFiles int64 \/\/ int64s must come first for struct alignment\n\tskippedFiles int64\n\ttransferringFiles int64\n\testimatedBytes int64\n\tcurrentBytes int64\n\tskippedBytes int64\n\testimatedFiles int32\n\tpaused uint32\n\tlogToFile uint32\n\tlogger *tools.SyncWriter\n\tfileIndex map[string]int64 \/\/ Maps a file name to its transfer number\n\tfileIndexMutex *sync.Mutex\n\tdryRun bool\n\tupdates chan *tasklog.Update\n}\n\ntype env interface {\n\tGet(key string) (val string, ok bool)\n}\n\ntype meterOption func(*ProgressMeter)\n\n\/\/ DryRun is an option for NewMeter() that determines whether updates should be\n\/\/ sent to stdout.\nfunc DryRun(dryRun bool) meterOption {\n\treturn func(m *ProgressMeter) {\n\t\tm.dryRun = dryRun\n\t}\n}\n\n\/\/ WithLogFile is an option for NewMeter() that sends updates to a text file.\nfunc WithLogFile(name string) meterOption {\n\tprintErr := func(err string) {\n\t\tfmt.Fprintf(os.Stderr, \"Error creating progress logger: %s\\n\", err)\n\t}\n\n\treturn func(m *ProgressMeter) {\n\t\tif len(name) == 0 {\n\t\t\treturn\n\t\t}\n\n\t\tif !filepath.IsAbs(name) {\n\t\t\tprintErr(\"GIT_LFS_PROGRESS must be an absolute path\")\n\t\t\treturn\n\t\t}\n\n\t\tcbDir := filepath.Dir(name)\n\t\tif err := os.MkdirAll(cbDir, 0755); err != nil {\n\t\t\tprintErr(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tfile, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)\n\t\tif err != nil {\n\t\t\tprintErr(err.Error())\n\t\t\treturn\n\t\t}\n\n\t\tm.logToFile = 1\n\t\tm.logger = tools.NewSyncWriter(file)\n\t}\n}\n\n\/\/ WithOSEnv is an option for NewMeter() that sends updates to the text file\n\/\/ path specified in the OS Env.\nfunc WithOSEnv(os env) meterOption {\n\tname, _ := os.Get(\"GIT_LFS_PROGRESS\")\n\treturn WithLogFile(name)\n}\n\n\/\/ NewMeter creates a new ProgressMeter.\nfunc NewMeter(options ...meterOption) *ProgressMeter {\n\tm := &ProgressMeter{\n\t\tfileIndex: make(map[string]int64),\n\t\tfileIndexMutex: &sync.Mutex{},\n\t\tupdates: make(chan *tasklog.Update),\n\t}\n\n\tfor _, opt := range options {\n\t\topt(m)\n\t}\n\n\treturn m\n}\n\n\/\/ Start begins sending status updates to the optional log file, and stdout.\nfunc (p *ProgressMeter) Start() {\n\tif p == nil {\n\t\treturn\n\t}\n\tatomic.StoreUint32(&p.paused, 0)\n}\n\n\/\/ Pause stops sending status updates temporarily, until Start() is called again.\nfunc (p *ProgressMeter) Pause() {\n\tif p == nil {\n\t\treturn\n\t}\n\tatomic.StoreUint32(&p.paused, 1)\n}\n\n\/\/ Add tells the progress meter that a single file of the given size will\n\/\/ possibly be transferred. If a file doesn't need to be transferred for some\n\/\/ reason, be sure to call Skip(int64) with the same size.\nfunc (p *ProgressMeter) Add(size int64) {\n\tif p == nil {\n\t\treturn\n\t}\n\n\tdefer p.update()\n\tatomic.AddInt32(&p.estimatedFiles, 1)\n\tatomic.AddInt64(&p.estimatedBytes, size)\n}\n\n\/\/ Skip tells the progress meter that a file of size `size` is being skipped\n\/\/ because the transfer is unnecessary.\nfunc (p *ProgressMeter) Skip(size int64) {\n\tif p == nil {\n\t\treturn\n\t}\n\n\tdefer p.update()\n\tatomic.AddInt64(&p.skippedFiles, 1)\n\tatomic.AddInt64(&p.skippedBytes, size)\n\t\/\/ Reduce bytes and files so progress easier to parse\n\tatomic.AddInt32(&p.estimatedFiles, -1)\n\tatomic.AddInt64(&p.estimatedBytes, -size)\n}\n\n\/\/ StartTransfer tells the progress meter that a transferring file is being\n\/\/ added to the TransferQueue.\nfunc (p *ProgressMeter) StartTransfer(name string) {\n\tif p == nil {\n\t\treturn\n\t}\n\n\tdefer p.update()\n\tidx := atomic.AddInt64(&p.transferringFiles, 1)\n\tp.fileIndexMutex.Lock()\n\tp.fileIndex[name] = idx\n\tp.fileIndexMutex.Unlock()\n}\n\n\/\/ TransferBytes increments the number of bytes transferred\nfunc (p *ProgressMeter) TransferBytes(direction, name string, read, total int64, current int) {\n\tif p == nil {\n\t\treturn\n\t}\n\n\tdefer p.update()\n\tatomic.AddInt64(&p.currentBytes, int64(current))\n\tp.logBytes(direction, name, read, total)\n}\n\n\/\/ FinishTransfer increments the finished transfer count\nfunc (p *ProgressMeter) FinishTransfer(name string) {\n\tif p == nil {\n\t\treturn\n\t}\n\n\tdefer p.update()\n\tatomic.AddInt64(&p.finishedFiles, 1)\n\tp.fileIndexMutex.Lock()\n\tdelete(p.fileIndex, name)\n\tp.fileIndexMutex.Unlock()\n}\n\n\/\/ Finish shuts down the ProgressMeter\nfunc (p *ProgressMeter) Finish() {\n\tif p == nil {\n\t\treturn\n\t}\n\n\tp.update()\n\tclose(p.updates)\n}\n\nfunc (p *ProgressMeter) Updates() <-chan *tasklog.Update {\n\tif p == nil {\n\t\treturn nil\n\t}\n\treturn p.updates\n}\n\nfunc (p *ProgressMeter) Throttled() bool {\n\treturn true\n}\n\nfunc (p *ProgressMeter) update() {\n\tif p.skipUpdate() {\n\t\treturn\n\t}\n\n\tp.updates <- &tasklog.Update{\n\t\tS: p.str(),\n\t\tAt: time.Now(),\n\t}\n}\n\nfunc (p *ProgressMeter) skipUpdate() bool {\n\treturn p.dryRun ||\n\t\t(p.estimatedFiles == 0 && p.skippedFiles == 0) ||\n\t\tatomic.LoadUint32(&p.paused) == 1\n}\n\nfunc (p *ProgressMeter) str() string {\n\t\/\/ (%d of %d files, %d skipped) %f B \/ %f B, %f B skipped\n\t\/\/ skipped counts only show when > 0\n\n\tout := fmt.Sprintf(\"\\rGit LFS: (%d of %d files\",\n\t\tp.finishedFiles,\n\t\tp.estimatedFiles)\n\tif p.skippedFiles > 0 {\n\t\tout += fmt.Sprintf(\", %d skipped\", p.skippedFiles)\n\t}\n\tout += fmt.Sprintf(\") %s \/ %s\",\n\t\thumanize.FormatBytes(uint64(p.currentBytes)),\n\t\thumanize.FormatBytes(uint64(p.estimatedBytes)))\n\tif p.skippedBytes > 0 {\n\t\tout += fmt.Sprintf(\", %s skipped\",\n\t\t\thumanize.FormatBytes(uint64(p.skippedBytes)))\n\t}\n\n\treturn out\n}\n\nfunc (p *ProgressMeter) logBytes(direction, name string, read, total int64) {\n\tp.fileIndexMutex.Lock()\n\tidx := p.fileIndex[name]\n\tp.fileIndexMutex.Unlock()\n\tline := fmt.Sprintf(\"%s %d\/%d %d\/%d %s\\n\", direction, idx, p.estimatedFiles, read, total, name)\n\tif atomic.LoadUint32(&p.logToFile) == 1 {\n\t\tif err := p.logger.Write([]byte(line)); err != nil {\n\t\t\tatomic.StoreUint32(&p.logToFile, 0)\n\t\t}\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package character\n\nimport (\n\t\"net\/http\"\n\t\"sort\"\n\n\t\"github.com\/aerogo\/aero\"\n\t\"github.com\/animenotifier\/arn\"\n\t\"github.com\/animenotifier\/notify.moe\/components\"\n\t\"github.com\/animenotifier\/notify.moe\/utils\"\n)\n\n\/\/ Get character.\nfunc Get(ctx *aero.Context) string {\n\tuser := utils.GetUser(ctx)\n\tid := ctx.Get(\"id\")\n\tcharacter, err := arn.GetCharacter(id)\n\n\tif err != nil {\n\t\treturn ctx.Error(http.StatusNotFound, \"Character not found\", err)\n\t}\n\n\tcharacterAnime := character.Anime()\n\n\tsort.Slice(characterAnime, func(i, j int) bool {\n\t\tif characterAnime[i].StartDate == \"\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif characterAnime[j].StartDate == \"\" {\n\t\t\treturn true\n\t\t}\n\n\t\treturn characterAnime[i].StartDate < characterAnime[j].StartDate\n\t})\n\n\t\/\/ Set OpenGraph attributes\n\tctx.Data = &arn.OpenGraph{\n\t\tTags: map[string]string{\n\t\t\t\"og:title\": character.Name,\n\t\t\t\"og:image\": character.Image,\n\t\t\t\"og:url\": \"https:\/\/\" + ctx.App.Config.Domain + character.Link(),\n\t\t\t\"og:site_name\": \"notify.moe\",\n\t\t\t\"og:description\": character.Description,\n\t\t},\n\t\tMeta: map[string]string{\n\t\t\t\"description\": character.Description,\n\t\t\t\"keywords\": character.Name + \",anime,character\",\n\t\t},\n\t}\n\n\treturn ctx.HTML(components.CharacterDetails(character, characterAnime, user))\n}\n<commit_msg>Improved character OG data<commit_after>package character\n\nimport (\n\t\"net\/http\"\n\t\"sort\"\n\n\t\"github.com\/aerogo\/aero\"\n\t\"github.com\/animenotifier\/arn\"\n\t\"github.com\/animenotifier\/notify.moe\/components\"\n\t\"github.com\/animenotifier\/notify.moe\/utils\"\n)\n\nconst maxDescriptionLength = 170\n\n\/\/ Get character.\nfunc Get(ctx *aero.Context) string {\n\tuser := utils.GetUser(ctx)\n\tid := ctx.Get(\"id\")\n\tcharacter, err := arn.GetCharacter(id)\n\n\tif err != nil {\n\t\treturn ctx.Error(http.StatusNotFound, \"Character not found\", err)\n\t}\n\n\tcharacterAnime := character.Anime()\n\n\tsort.Slice(characterAnime, func(i, j int) bool {\n\t\tif characterAnime[i].StartDate == \"\" {\n\t\t\treturn false\n\t\t}\n\n\t\tif characterAnime[j].StartDate == \"\" {\n\t\t\treturn true\n\t\t}\n\n\t\treturn characterAnime[i].StartDate < characterAnime[j].StartDate\n\t})\n\n\t\/\/ Set OpenGraph attributes\n\tdescription := character.Description\n\n\tif len(description) > maxDescriptionLength {\n\t\tdescription = description[:maxDescriptionLength-3] + \"...\"\n\t}\n\n\tctx.Data = &arn.OpenGraph{\n\t\tTags: map[string]string{\n\t\t\t\"og:title\": character.Name,\n\t\t\t\"og:image\": character.Image,\n\t\t\t\"og:url\": \"https:\/\/\" + ctx.App.Config.Domain + character.Link(),\n\t\t\t\"og:site_name\": \"notify.moe\",\n\t\t\t\"og:description\": description,\n\n\t\t\t\/\/ The OpenGraph type \"profile\" is meant for real-life persons but I think it's okay in this context.\n\t\t\t\/\/ An alternative would be to use \"article\" which is mostly used for blog posts and news.\n\t\t\t\"og:type\": \"profile\",\n\t\t},\n\t\tMeta: map[string]string{\n\t\t\t\"description\": description,\n\t\t\t\"keywords\": character.Name + \",anime,character\",\n\t\t},\n\t}\n\n\treturn ctx.HTML(components.CharacterDetails(character, characterAnime, user))\n}\n<|endoftext|>"} {"text":"<commit_before>package api\n\nimport (\n\t\"encoding\/base64\"\n\t\"fmt\"\n\t\"github.com\/danjac\/podbaby\/api\/Godeps\/_workspace\/src\/github.com\/labstack\/echo\"\n\t\"github.com\/danjac\/podbaby\/config\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/danjac\/podbaby\/api\/Godeps\/_workspace\/src\/github.com\/gorilla\/securecookie\"\n)\n\nconst sessionTimeout = 24 * 30\n\ntype session interface {\n\twrite(*echo.Context, string, interface{}) error\n\tread(*echo.Context, string, interface{}) (bool, error)\n\treadInt(*echo.Context, string) (int, bool, error)\n}\n\ntype secureCookieSession struct {\n\t*securecookie.SecureCookie\n\tisSecure bool\n}\n\nfunc (s *secureCookieSession) write(c *echo.Context, key string, value interface{}) error {\n\tencoded, err := s.Encode(key, value)\n\tif err == nil {\n\t\tcookie := &http.Cookie{\n\t\t\tName: key,\n\t\t\tValue: encoded,\n\t\t\tExpires: time.Now().Add(time.Hour * sessionTimeout),\n\t\t\tSecure: s.isSecure,\n\t\t\tHttpOnly: true,\n\t\t\tPath: \"\/\",\n\t\t}\n\t\thttp.SetCookie(c.Response(), cookie)\n\t}\n\treturn err\n}\n\nfunc (s *secureCookieSession) read(c *echo.Context, key string, dst interface{}) (bool, error) {\n\tcookie, err := c.Request().Cookie(key)\n\tif err == http.ErrNoCookie {\n\t\treturn false, nil\n\t}\n\tif err != nil {\n\t\treturn false, err\n\t}\n\terr = s.Decode(key, cookie.Value, dst)\n\tfmt.Println(\"ERROR\", err)\n\treturn err != nil, err\n}\n\nfunc (s *secureCookieSession) readInt(c *echo.Context, key string) (int, bool, error) {\n\tvar rv int\n\tok, err := s.read(c, key, &rv)\n\treturn rv, ok, err\n}\n\nfunc newSession(cfg *config.Config) session {\n\tsecureCookieKey, _ := base64.StdEncoding.DecodeString(cfg.SecureCookieKey)\n\tcookie := securecookie.New(\n\t\t[]byte(cfg.SecretKey),\n\t\tsecureCookieKey,\n\t)\n\treturn &secureCookieSession{\n\t\tSecureCookie: cookie,\n\t\tisSecure: !cfg.IsDev(),\n\t}\n}\n<commit_msg>Test fixes<commit_after>package api\n\nimport (\n\t\"encoding\/base64\"\n\t\"github.com\/danjac\/podbaby\/api\/Godeps\/_workspace\/src\/github.com\/labstack\/echo\"\n\t\"github.com\/danjac\/podbaby\/config\"\n\t\"net\/http\"\n\t\"time\"\n\n\t\"github.com\/danjac\/podbaby\/api\/Godeps\/_workspace\/src\/github.com\/gorilla\/securecookie\"\n)\n\nconst sessionTimeout = 24 * 30\n\ntype session interface {\n\twrite(*echo.Context, string, interface{}) error\n\tread(*echo.Context, string, interface{}) (bool, error)\n\treadInt(*echo.Context, string) (int, bool, error)\n}\n\ntype secureCookieSession struct {\n\t*securecookie.SecureCookie\n\tisSecure bool\n}\n\nfunc (s *secureCookieSession) write(c *echo.Context, key string, value interface{}) error {\n\tencoded, err := s.Encode(key, value)\n\tif err == nil {\n\t\tcookie := &http.Cookie{\n\t\t\tName: key,\n\t\t\tValue: encoded,\n\t\t\tExpires: time.Now().Add(time.Hour * sessionTimeout),\n\t\t\tSecure: s.isSecure,\n\t\t\tHttpOnly: true,\n\t\t\tPath: \"\/\",\n\t\t}\n\t\thttp.SetCookie(c.Response(), cookie)\n\t}\n\treturn err\n}\n\nfunc (s *secureCookieSession) read(c *echo.Context, key string, dst interface{}) (bool, error) {\n\tcookie, err := c.Request().Cookie(key)\n\tif err == http.ErrNoCookie {\n\t\treturn false, nil\n\t}\n\tif err != nil {\n\t\treturn false, err\n\t}\n\terr = s.Decode(key, cookie.Value, dst)\n\treturn err == nil, err\n}\n\nfunc (s *secureCookieSession) readInt(c *echo.Context, key string) (int, bool, error) {\n\tvar rv int\n\tok, err := s.read(c, key, &rv)\n\treturn rv, ok, err\n}\n\nfunc newSession(cfg *config.Config) session {\n\tsecureCookieKey, _ := base64.StdEncoding.DecodeString(cfg.SecureCookieKey)\n\tcookie := securecookie.New(\n\t\t[]byte(cfg.SecretKey),\n\t\tsecureCookieKey,\n\t)\n\treturn &secureCookieSession{\n\t\tSecureCookie: cookie,\n\t\tisSecure: !cfg.IsDev(),\n\t}\n}\n<|endoftext|>"} {"text":"<commit_before>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ssoadmin\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/helper\/schema\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/helper\/validation\"\n\t\"github.com\/terraform-providers\/terraform-provider-aws\/aws\/internal\/keyvaluetags\"\n)\n\nfunc resourceAwsSsoPermissionSet() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsSsoPermissionSetCreate,\n\t\tRead: resourceAwsSsoPermissionSetRead,\n\t\tUpdate: resourceAwsSsoPermissionSetUpdate,\n\t\tDelete: resourceAwsSsoPermissionSetDelete,\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: schema.ImportStatePassthrough,\n\t\t},\n\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"instance_arn\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t},\n\t\t\t\n\t\t\t\"permission_set_arn\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t},\n\n\t\t\t\"created_date\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t},\n\n\t\t\t\"description\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t},\n\n\t\t\t\"name\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t},\n\n\t\t\t\"relay_state\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t},\n\n\t\t\t\"session_duration\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t},\n\n\t\t\t\"inline_policy\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t},\n\n\t\t\t\"managed_policies\": {\n\t\t\t\tType: schema.TypeList,\n\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\tType: schema.TypeString\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"tags\": tagsSchema(),\n\t\t},\n\t}\n}\n\nfunc resourceAwsSsoPermissionSetCreate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ssoadminconn\n\t\/\/ TODO\n\t\/\/ d.SetId(*resp.PermissionSetArn)\n\treturn resourceAwsSsoPermissionSetRead(d, meta)\n}\n\nfunc resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ssoadminconn\n\t\/\/ TODO\n\treturn nil\n}\n\nfunc resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ssoadminconn\n\t\/\/ TODO\n\treturn resourceAwsSsoPermissionSetRead(d, meta)\n}\n\nfunc resourceAwsSsoPermissionSetDelete(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ssoadminconn\n\t\/\/ TODO\n\treturn nil\n}\n\nfunc waitForPermissionSetProvisioning(conn *identitystore.IdentityStore, arn string) error {\n\n}\n<commit_msg>update schema<commit_after>package aws\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com\/aws\/aws-sdk-go\/aws\"\n\t\"github.com\/aws\/aws-sdk-go\/service\/ssoadmin\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/helper\/resource\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/helper\/schema\"\n\t\"github.com\/hashicorp\/terraform-plugin-sdk\/v2\/helper\/validation\"\n\t\"github.com\/terraform-providers\/terraform-provider-aws\/aws\/internal\/keyvaluetags\"\n)\n\nfunc resourceAwsSsoPermissionSet() *schema.Resource {\n\treturn &schema.Resource{\n\t\tCreate: resourceAwsSsoPermissionSetCreate,\n\t\tRead: resourceAwsSsoPermissionSetRead,\n\t\tUpdate: resourceAwsSsoPermissionSetUpdate,\n\t\tDelete: resourceAwsSsoPermissionSetDelete,\n\t\tImporter: &schema.ResourceImporter{\n\t\t\tState: schema.ImportStatePassthrough,\n\t\t},\n\t\tSchema: map[string]*schema.Schema{\n\t\t\t\"arn\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"created_date\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tComputed: true,\n\t\t\t},\n\n\t\t\t\"instance_arn\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validation.All(\n\t\t\t\t\tvalidation.StringLenBetween(10, 1224),\n\t\t\t\t\tvalidation.StringMatch(regexp.MustCompile(`^arn:aws:sso:::instance\/(sso)?ins-[a-zA-Z0-9-.]{16}$`), \"must match arn:aws:sso:::instance\/(sso)?ins-[a-zA-Z0-9-.]{16}\"),\n\t\t\t\t),\n\t\t\t},\n\n\t\t\t\"name\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tRequired: true,\n\t\t\t\tForceNew: true,\n\t\t\t\tValidateFunc: validation.All(\n\t\t\t\t\tvalidation.StringLenBetween(1, 32),\n\t\t\t\t\tvalidation.StringMatch(regexp.MustCompile(`^[\\w+=,.@-]+$`), \"must match [\\\\w+=,.@-]\"),\n\t\t\t\t),\n\t\t\t},\n\n\t\t\t\"description\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tValidateFunc: validation.All(\n\t\t\t\t\tvalidation.StringLenBetween(1, 700),\n\t\t\t\t\tvalidation.StringMatch(regexp.MustCompile(`^[\\p{L}\\p{M}\\p{Z}\\p{S}\\p{N}\\p{P}]*$`), \"must match [\\\\p{L}\\\\p{M}\\\\p{Z}\\\\p{S}\\\\p{N}\\\\p{P}]\"),\n\t\t\t\t),\n\t\t\t},\n\n\t\t\t\"session_duration\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tValidateFunc: validation.StringLenBetween(1, 100),\n\t\t\t},\n\n\t\t\t\"relay_state\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tValidateFunc: validation.All(\n\t\t\t\t\tvalidation.StringLenBetween(1, 240),\n\t\t\t\t\tvalidation.StringMatch(regexp.MustCompile(`^[a-zA-Z0-9&$@#\\\\\\\/%?=~\\-_'\"|!:,.;*+\\[\\]\\(\\)\\{\\} ]+$`), \"must match [a-zA-Z0-9&$@#\\\\\\\\\\\\\/%?=~\\\\-_'\\\"|!:,.;*+\\\\[\\\\]\\\\(\\\\)\\\\{\\\\} ]\"),\n\t\t\t\t),\n\t\t\t},\n\n\t\t\t\"inline_policy\": {\n\t\t\t\tType: schema.TypeString,\n\t\t\t\tOptional: true,\n\t\t\t\tValidateFunc: validateIAMPolicyJson,\n\t\t\t\tDiffSuppressFunc: suppressEquivalentAwsPolicyDiffs,\n\t\t\t},\n\n\t\t\t\"managed_policies\": {\n\t\t\t\tType: schema.TypeSet,\n\t\t\t\tOptional: true,\n\t\t\t\tElem: &schema.Schema{\n\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\tValidateFunc: validateArn,\n\t\t\t\t},\n\t\t\t},\n\n\t\t\t\"tags\": tagsSchema(),\n\t\t},\n\t}\n}\n\nfunc resourceAwsSsoPermissionSetCreate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ssoadminconn\n\n\n\n\t\/\/ d.SetId(*resp.PermissionSetArn)\n\treturn resourceAwsSsoPermissionSetRead(d, meta)\n}\n\nfunc resourceAwsSsoPermissionSetRead(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ssoadminconn\n\t\/\/ TODO\n\treturn nil\n}\n\nfunc resourceAwsSsoPermissionSetUpdate(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ssoadminconn\n\t\/\/ TODO\n\treturn resourceAwsSsoPermissionSetRead(d, meta)\n}\n\nfunc resourceAwsSsoPermissionSetDelete(d *schema.ResourceData, meta interface{}) error {\n\tconn := meta.(*AWSClient).ssoadminconn\n\t\/\/ TODO\n\treturn nil\n}\n\n\/\/ func waitForPermissionSetProvisioning(conn *identitystore.IdentityStore, arn string) error {\n\/\/ }\n<|endoftext|>"} {"text":"<commit_before>package solr\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\nfunc HTTPPost(path string, data *[]byte, headers [][]string) ([]byte, error) {\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(\"POST\", path, bytes.NewReader(*data))\n\tif len(headers) > 0 {\n\t\tfor i := range headers {\n\t\t\treq.Header.Add(headers[i][0], headers[i][1])\n\t\t}\n\t}\n\n\tresp, err := client.Do(req)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn body, nil\n}\n\nfunc HTTPGet(url string) ([]byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn body, nil\n}\n\nfunc bytes2Json(data *[]byte) (*interface{}, error) {\n\tvar container interface{}\n\terr := json.Unmarshal(*data, &container)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Response decode error\")\n\t}\n\n\treturn &container, nil\n}\n\nfunc json2Bytes(data map[string]interface{}) (*[]byte, error) {\n\tb, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to encode JSON\")\n\t}\n\n\treturn &b, nil\n}\n\ntype SelectResponse struct {\n\tresults *Collection\n\tstatus int\n\tqtime int\n}\n\ntype UpdateResponse struct {\n\tsuccess bool\n}\n\ntype ErrorResponse struct {\n\tmessage string\n\tstatus int\n}\n\ntype Connection struct {\n\turl *url.URL\n}\n\nfunc NewConnection(solrUrl string) (*Connection, error) {\n\tu, err := url.Parse(solrUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\n\treturn &Connection{url: u}, nil\n}\n\nfunc (c *Connection) Select(selectUrl string) (*SelectResponse, error) {\n\treturn nil, nil\n}\n\nfunc (c *Connection) Update(data map[string]interface{}) (*UpdateResponse, error) {\n\tb, err := json2Bytes(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr, err := HTTPPost(c.url.String(), b, nil)\n\tif err != nil || r == nil {\n\t\treturn nil, err\n\t}\n\t\/\/resp, err := bytes2Json(&r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ check error in resp\n\n\treturn &UpdateResponse{true}, nil\n}\n\nfunc (c *Connection) Commit() (*UpdateResponse, error) {\n\treturn nil, nil\n}\n\nfunc (c *Connection) Optimize() (*UpdateResponse, error) {\n\treturn nil, nil\n}\n\nfunc (c *Connection) Rollback() (*UpdateResponse, error) {\n\treturn nil, nil\n}\n<commit_msg>Temporary \\\"use\\\" resp<commit_after>package solr\n\nimport (\n\t\"bytes\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"net\/url\"\n)\n\nfunc HTTPPost(path string, data *[]byte, headers [][]string) ([]byte, error) {\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(\"POST\", path, bytes.NewReader(*data))\n\tif len(headers) > 0 {\n\t\tfor i := range headers {\n\t\t\treq.Header.Add(headers[i][0], headers[i][1])\n\t\t}\n\t}\n\n\tresp, err := client.Do(req)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn body, nil\n}\n\nfunc HTTPGet(url string) ([]byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn body, nil\n}\n\nfunc bytes2Json(data *[]byte) (*interface{}, error) {\n\tvar container interface{}\n\terr := json.Unmarshal(*data, &container)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Response decode error\")\n\t}\n\n\treturn &container, nil\n}\n\nfunc json2Bytes(data map[string]interface{}) (*[]byte, error) {\n\tb, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to encode JSON\")\n\t}\n\n\treturn &b, nil\n}\n\ntype SelectResponse struct {\n\tresults *Collection\n\tstatus int\n\tqtime int\n}\n\ntype UpdateResponse struct {\n\tsuccess bool\n}\n\ntype ErrorResponse struct {\n\tmessage string\n\tstatus int\n}\n\ntype Connection struct {\n\turl *url.URL\n}\n\nfunc NewConnection(solrUrl string) (*Connection, error) {\n\tu, err := url.Parse(solrUrl)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\n\treturn &Connection{url: u}, nil\n}\n\nfunc (c *Connection) Select(selectUrl string) (*SelectResponse, error) {\n\treturn nil, nil\n}\n\nfunc (c *Connection) Update(data map[string]interface{}) (*UpdateResponse, error) {\n\tb, err := json2Bytes(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr, err := HTTPPost(c.url.String(), b, nil)\n\tif err != nil || r == nil {\n\t\treturn nil, err\n\t}\n\tresp, err := bytes2Json(&r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t\/\/ check error in resp\n\t_ = resp\n\t\n\treturn &UpdateResponse{true}, nil\n}\n\nfunc (c *Connection) Commit() (*UpdateResponse, error) {\n\treturn nil, nil\n}\n\nfunc (c *Connection) Optimize() (*UpdateResponse, error) {\n\treturn nil, nil\n}\n\nfunc (c *Connection) Rollback() (*UpdateResponse, error) {\n\treturn nil, nil\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2017 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage app\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode\"\n\n\t\"golang.org\/x\/perf\/benchstat\"\n\t\"golang.org\/x\/perf\/storage\/benchfmt\"\n\t\"golang.org\/x\/perf\/storage\/query\"\n)\n\n\/\/ A resultGroup holds a list of results and tracks the distinct labels found in that list.\ntype resultGroup struct {\n\t\/\/ The (partial) query string that resulted in this group.\n\tQ string\n\t\/\/ Raw list of results.\n\tresults []*benchfmt.Result\n\t\/\/ LabelValues is the count of results found with each distinct (key, value) pair found in labels.\n\t\/\/ A value of \"\" counts results missing that key.\n\tLabelValues map[string]valueSet\n}\n\n\/\/ add adds res to the resultGroup.\nfunc (g *resultGroup) add(res *benchfmt.Result) {\n\tg.results = append(g.results, res)\n\tif g.LabelValues == nil {\n\t\tg.LabelValues = make(map[string]valueSet)\n\t}\n\tfor k, v := range res.Labels {\n\t\tif g.LabelValues[k] == nil {\n\t\t\tg.LabelValues[k] = make(valueSet)\n\t\t\tif len(g.results) > 1 {\n\t\t\t\tg.LabelValues[k][\"\"] = len(g.results) - 1\n\t\t\t}\n\t\t}\n\t\tg.LabelValues[k][v]++\n\t}\n\tfor k := range g.LabelValues {\n\t\tif res.Labels[k] == \"\" {\n\t\t\tg.LabelValues[k][\"\"]++\n\t\t}\n\t}\n}\n\n\/\/ splitOn returns a new set of groups sharing a common value for key.\nfunc (g *resultGroup) splitOn(key string) []*resultGroup {\n\tgroups := make(map[string]*resultGroup)\n\tvar values []string\n\tfor _, res := range g.results {\n\t\tvalue := res.Labels[key]\n\t\tif groups[value] == nil {\n\t\t\tgroups[value] = &resultGroup{Q: key + \":\" + value}\n\t\t\tvalues = append(values, value)\n\t\t}\n\t\tgroups[value].add(res)\n\t}\n\n\tsort.Strings(values)\n\tvar out []*resultGroup\n\tfor _, value := range values {\n\t\tout = append(out, groups[value])\n\t}\n\treturn out\n}\n\n\/\/ valueSet is a set of values and the number of results with each value.\ntype valueSet map[string]int\n\n\/\/ valueCount and byCount are used for sorting a valueSet\ntype valueCount struct {\n\tValue string\n\tCount int\n}\ntype byCount []valueCount\n\nfunc (s byCount) Len() int { return len(s) }\nfunc (s byCount) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\n\nfunc (s byCount) Less(i, j int) bool {\n\tif s[i].Count != s[j].Count {\n\t\treturn s[i].Count > s[j].Count\n\t}\n\treturn s[i].Value < s[j].Value\n}\n\n\/\/ TopN returns a slice containing n valueCount entries, and if any labels were omitted, an extra entry with value \"…\".\nfunc (vs valueSet) TopN(n int) []valueCount {\n\tvar s []valueCount\n\tvar total int\n\tfor v, count := range vs {\n\t\ts = append(s, valueCount{v, count})\n\t\ttotal += count\n\t}\n\tsort.Sort(byCount(s))\n\tout := s\n\tif len(out) > n {\n\t\tout = s[:n]\n\t}\n\tif len(out) < len(s) {\n\t\tvar outTotal int\n\t\tfor _, vc := range out {\n\t\t\toutTotal += vc.Count\n\t\t}\n\t\tout = append(out, valueCount{\"…\", total - outTotal})\n\t}\n\treturn out\n}\n\n\/\/ addToQuery returns a new query string with add applied as a filter.\nfunc addToQuery(query, add string) string {\n\tif strings.ContainsAny(add, \" \\t\\\\\\\"\") {\n\t\tadd = strings.Replace(add, `\\`, `\\\\`, -1)\n\t\tadd = strings.Replace(add, `\"`, `\\\"`, -1)\n\t\tadd = `\"` + add + `\"`\n\t}\n\tif strings.Contains(query, \"|\") {\n\t\treturn add + \" \" + query\n\t}\n\treturn add + \" | \" + query\n}\n\n\/\/ compare handles queries that require comparison of the groups in the query.\nfunc (a *App) compare(w http.ResponseWriter, r *http.Request) {\n\tif err := r.ParseForm(); err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tq := r.Form.Get(\"q\")\n\n\ttmpl, err := ioutil.ReadFile(filepath.Join(a.BaseDir, \"template\/compare.html\"))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tt, err := template.New(\"main\").Funcs(template.FuncMap{\n\t\t\"addToQuery\": addToQuery,\n\t}).Parse(string(tmpl))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tdata := a.compareQuery(q)\n\n\tw.Header().Set(\"Content-Type\", \"text\/html; charset=utf-8\")\n\tif err := t.Execute(w, data); err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n}\n\ntype compareData struct {\n\tQ string\n\tError string\n\tBenchstat template.HTML\n\tGroups []*resultGroup\n\tLabels map[string]bool\n\tCommonLabels benchfmt.Labels\n}\n\n\/\/ queryKeys returns the keys that are exact-matched by q.\nfunc queryKeys(q string) map[string]bool {\n\tout := make(map[string]bool)\n\tfor _, part := range query.SplitWords(q) {\n\t\t\/\/ TODO(quentin): This func is shared with db.go; refactor?\n\t\ti := strings.IndexFunc(part, func(r rune) bool {\n\t\t\treturn r == ':' || r == '>' || r == '<' || unicode.IsSpace(r) || unicode.IsUpper(r)\n\t\t})\n\t\tif i >= 0 && part[i] == ':' {\n\t\t\tout[part[:i]] = true\n\t\t}\n\t}\n\treturn out\n}\n\n\/\/ elideKeyValues returns content, a benchmark format line, with the\n\/\/ values of any keys in keys elided.\nfunc elideKeyValues(content string, keys map[string]bool) string {\n\tvar end string\n\tif i := strings.IndexFunc(content, unicode.IsSpace); i >= 0 {\n\t\tcontent, end = content[:i], content[i:]\n\t}\n\t\/\/ Check for gomaxprocs value\n\tif i := strings.LastIndex(content, \"-\"); i >= 0 {\n\t\t_, err := strconv.Atoi(content[i+1:])\n\t\tif err == nil {\n\t\t\tif keys[\"gomaxprocs\"] {\n\t\t\t\tcontent, end = content[:i], \"-*\"+end\n\t\t\t} else {\n\t\t\t\tcontent, end = content[:i], content[i:]+end\n\t\t\t}\n\t\t}\n\t}\n\tparts := strings.Split(content, \"\/\")\n\tfor i, part := range parts {\n\t\tif equals := strings.Index(part, \"=\"); equals >= 0 {\n\t\t\tif keys[part[:equals]] {\n\t\t\t\tparts[i] = part[:equals] + \"=*\"\n\t\t\t}\n\t\t} else if i == 0 {\n\t\t\tif keys[\"name\"] {\n\t\t\t\tparts[i] = \"Benchmark*\"\n\t\t\t}\n\t\t} else if keys[fmt.Sprintf(\"sub%d\", i)] {\n\t\t\tparts[i] = \"*\"\n\t\t}\n\t}\n\treturn strings.Join(parts, \"\/\") + end\n}\n\n\/\/ fetchCompareResults fetches the matching results for a given query string.\n\/\/ The results will be grouped into one or more groups based on either the query string or heuristics.\nfunc (a *App) fetchCompareResults(q string) ([]*resultGroup, error) {\n\t\/\/ Parse query\n\tprefix, queries := parseQueryString(q)\n\n\t\/\/ Send requests\n\t\/\/ TODO(quentin): Issue requests in parallel?\n\tvar groups []*resultGroup\n\tvar found int\n\tfor _, qPart := range queries {\n\t\tkeys := queryKeys(qPart)\n\t\tgroup := &resultGroup{Q: qPart}\n\t\tif prefix != \"\" {\n\t\t\tqPart = prefix + \" \" + qPart\n\t\t}\n\t\tres := a.StorageClient.Query(qPart)\n\t\tfor res.Next() {\n\t\t\tresult := res.Result()\n\t\t\tresult.Content = elideKeyValues(result.Content, keys)\n\t\t\tgroup.add(result)\n\t\t\tfound++\n\t\t}\n\t\terr := res.Err()\n\t\tres.Close()\n\t\tif err != nil {\n\t\t\t\/\/ TODO: If the query is invalid, surface that to the user.\n\t\t\treturn nil, err\n\t\t}\n\t\tgroups = append(groups, group)\n\t}\n\n\tif found == 0 {\n\t\treturn nil, errors.New(\"no results matched the query string\")\n\t}\n\n\t\/\/ Attempt to automatically split results.\n\tif len(groups) == 1 {\n\t\tgroup := groups[0]\n\t\t\/\/ Matching a single upload with multiple files -> split by file\n\t\tif len(group.LabelValues[\"upload\"]) == 1 && len(group.LabelValues[\"upload-part\"]) > 1 {\n\t\t\tgroups = group.splitOn(\"upload-part\")\n\t\t}\n\t}\n\n\treturn groups, nil\n}\n\nfunc (a *App) compareQuery(q string) *compareData {\n\tif len(q) == 0 {\n\t\treturn &compareData{}\n\t}\n\n\tgroups, err := a.fetchCompareResults(q)\n\tif err != nil {\n\t\treturn &compareData{\n\t\t\tQ: q,\n\t\t\tError: err.Error(),\n\t\t}\n\t}\n\n\tvar buf bytes.Buffer\n\t\/\/ Compute benchstat\n\tc := new(benchstat.Collection)\n\tfor _, g := range groups {\n\t\tc.AddResults(g.Q, g.results)\n\t}\n\tbenchstat.FormatHTML(&buf, c.Tables())\n\n\t\/\/ Prepare struct for template.\n\tlabels := make(map[string]bool)\n\t\/\/ commonLabels are the key: value of every label that has an\n\t\/\/ identical value on every result.\n\tcommonLabels := make(benchfmt.Labels)\n\t\/\/ Scan the first group for common labels.\n\tfor k, vs := range groups[0].LabelValues {\n\t\tif len(vs) == 1 {\n\t\t\tfor v := range vs {\n\t\t\t\tcommonLabels[k] = v\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ Remove any labels not common in later groups.\n\tfor _, g := range groups[1:] {\n\t\tfor k, v := range commonLabels {\n\t\t\tif len(g.LabelValues[k]) != 1 || g.LabelValues[k][v] == 0 {\n\t\t\t\tdelete(commonLabels, k)\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ List all labels present and not in commonLabels.\n\tfor _, g := range groups {\n\t\tfor k := range g.LabelValues {\n\t\t\tif commonLabels[k] != \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlabels[k] = true\n\t\t}\n\t}\n\tdata := &compareData{\n\t\tQ: q,\n\t\tBenchstat: template.HTML(buf.String()),\n\t\tGroups: groups,\n\t\tLabels: labels,\n\t\tCommonLabels: commonLabels,\n\t}\n\treturn data\n}\n\n\/\/ textCompare is called if benchsave is requesting a text-only analysis.\nfunc (a *App) textCompare(w http.ResponseWriter, r *http.Request) {\n\tif err := r.ParseForm(); err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text\/plain; charset=utf-8\")\n\n\tq := r.Form.Get(\"q\")\n\n\tgroups, err := a.fetchCompareResults(q)\n\tif err != nil {\n\t\t\/\/ TODO(quentin): Should we serve this with a 500 or 404? This means the query was invalid or had no results.\n\t\tfmt.Fprintf(w, \"unable to analyze results: %v\", err)\n\t}\n\n\t\/\/ Compute benchstat\n\tc := new(benchstat.Collection)\n\tfor _, g := range groups {\n\t\tc.AddResults(g.Q, g.results)\n\t}\n\tbenchstat.FormatText(w, c.Tables())\n}\n<commit_msg>analysis\/app: show the geomean<commit_after>\/\/ Copyright 2017 The Go Authors. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage app\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"html\/template\"\n\t\"io\/ioutil\"\n\t\"net\/http\"\n\t\"path\/filepath\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode\"\n\n\t\"golang.org\/x\/perf\/benchstat\"\n\t\"golang.org\/x\/perf\/storage\/benchfmt\"\n\t\"golang.org\/x\/perf\/storage\/query\"\n)\n\n\/\/ A resultGroup holds a list of results and tracks the distinct labels found in that list.\ntype resultGroup struct {\n\t\/\/ The (partial) query string that resulted in this group.\n\tQ string\n\t\/\/ Raw list of results.\n\tresults []*benchfmt.Result\n\t\/\/ LabelValues is the count of results found with each distinct (key, value) pair found in labels.\n\t\/\/ A value of \"\" counts results missing that key.\n\tLabelValues map[string]valueSet\n}\n\n\/\/ add adds res to the resultGroup.\nfunc (g *resultGroup) add(res *benchfmt.Result) {\n\tg.results = append(g.results, res)\n\tif g.LabelValues == nil {\n\t\tg.LabelValues = make(map[string]valueSet)\n\t}\n\tfor k, v := range res.Labels {\n\t\tif g.LabelValues[k] == nil {\n\t\t\tg.LabelValues[k] = make(valueSet)\n\t\t\tif len(g.results) > 1 {\n\t\t\t\tg.LabelValues[k][\"\"] = len(g.results) - 1\n\t\t\t}\n\t\t}\n\t\tg.LabelValues[k][v]++\n\t}\n\tfor k := range g.LabelValues {\n\t\tif res.Labels[k] == \"\" {\n\t\t\tg.LabelValues[k][\"\"]++\n\t\t}\n\t}\n}\n\n\/\/ splitOn returns a new set of groups sharing a common value for key.\nfunc (g *resultGroup) splitOn(key string) []*resultGroup {\n\tgroups := make(map[string]*resultGroup)\n\tvar values []string\n\tfor _, res := range g.results {\n\t\tvalue := res.Labels[key]\n\t\tif groups[value] == nil {\n\t\t\tgroups[value] = &resultGroup{Q: key + \":\" + value}\n\t\t\tvalues = append(values, value)\n\t\t}\n\t\tgroups[value].add(res)\n\t}\n\n\tsort.Strings(values)\n\tvar out []*resultGroup\n\tfor _, value := range values {\n\t\tout = append(out, groups[value])\n\t}\n\treturn out\n}\n\n\/\/ valueSet is a set of values and the number of results with each value.\ntype valueSet map[string]int\n\n\/\/ valueCount and byCount are used for sorting a valueSet\ntype valueCount struct {\n\tValue string\n\tCount int\n}\ntype byCount []valueCount\n\nfunc (s byCount) Len() int { return len(s) }\nfunc (s byCount) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\n\nfunc (s byCount) Less(i, j int) bool {\n\tif s[i].Count != s[j].Count {\n\t\treturn s[i].Count > s[j].Count\n\t}\n\treturn s[i].Value < s[j].Value\n}\n\n\/\/ TopN returns a slice containing n valueCount entries, and if any labels were omitted, an extra entry with value \"…\".\nfunc (vs valueSet) TopN(n int) []valueCount {\n\tvar s []valueCount\n\tvar total int\n\tfor v, count := range vs {\n\t\ts = append(s, valueCount{v, count})\n\t\ttotal += count\n\t}\n\tsort.Sort(byCount(s))\n\tout := s\n\tif len(out) > n {\n\t\tout = s[:n]\n\t}\n\tif len(out) < len(s) {\n\t\tvar outTotal int\n\t\tfor _, vc := range out {\n\t\t\toutTotal += vc.Count\n\t\t}\n\t\tout = append(out, valueCount{\"…\", total - outTotal})\n\t}\n\treturn out\n}\n\n\/\/ addToQuery returns a new query string with add applied as a filter.\nfunc addToQuery(query, add string) string {\n\tif strings.ContainsAny(add, \" \\t\\\\\\\"\") {\n\t\tadd = strings.Replace(add, `\\`, `\\\\`, -1)\n\t\tadd = strings.Replace(add, `\"`, `\\\"`, -1)\n\t\tadd = `\"` + add + `\"`\n\t}\n\tif strings.Contains(query, \"|\") {\n\t\treturn add + \" \" + query\n\t}\n\treturn add + \" | \" + query\n}\n\n\/\/ compare handles queries that require comparison of the groups in the query.\nfunc (a *App) compare(w http.ResponseWriter, r *http.Request) {\n\tif err := r.ParseForm(); err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tq := r.Form.Get(\"q\")\n\n\ttmpl, err := ioutil.ReadFile(filepath.Join(a.BaseDir, \"template\/compare.html\"))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tt, err := template.New(\"main\").Funcs(template.FuncMap{\n\t\t\"addToQuery\": addToQuery,\n\t}).Parse(string(tmpl))\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tdata := a.compareQuery(q)\n\n\tw.Header().Set(\"Content-Type\", \"text\/html; charset=utf-8\")\n\tif err := t.Execute(w, data); err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n}\n\ntype compareData struct {\n\tQ string\n\tError string\n\tBenchstat template.HTML\n\tGroups []*resultGroup\n\tLabels map[string]bool\n\tCommonLabels benchfmt.Labels\n}\n\n\/\/ queryKeys returns the keys that are exact-matched by q.\nfunc queryKeys(q string) map[string]bool {\n\tout := make(map[string]bool)\n\tfor _, part := range query.SplitWords(q) {\n\t\t\/\/ TODO(quentin): This func is shared with db.go; refactor?\n\t\ti := strings.IndexFunc(part, func(r rune) bool {\n\t\t\treturn r == ':' || r == '>' || r == '<' || unicode.IsSpace(r) || unicode.IsUpper(r)\n\t\t})\n\t\tif i >= 0 && part[i] == ':' {\n\t\t\tout[part[:i]] = true\n\t\t}\n\t}\n\treturn out\n}\n\n\/\/ elideKeyValues returns content, a benchmark format line, with the\n\/\/ values of any keys in keys elided.\nfunc elideKeyValues(content string, keys map[string]bool) string {\n\tvar end string\n\tif i := strings.IndexFunc(content, unicode.IsSpace); i >= 0 {\n\t\tcontent, end = content[:i], content[i:]\n\t}\n\t\/\/ Check for gomaxprocs value\n\tif i := strings.LastIndex(content, \"-\"); i >= 0 {\n\t\t_, err := strconv.Atoi(content[i+1:])\n\t\tif err == nil {\n\t\t\tif keys[\"gomaxprocs\"] {\n\t\t\t\tcontent, end = content[:i], \"-*\"+end\n\t\t\t} else {\n\t\t\t\tcontent, end = content[:i], content[i:]+end\n\t\t\t}\n\t\t}\n\t}\n\tparts := strings.Split(content, \"\/\")\n\tfor i, part := range parts {\n\t\tif equals := strings.Index(part, \"=\"); equals >= 0 {\n\t\t\tif keys[part[:equals]] {\n\t\t\t\tparts[i] = part[:equals] + \"=*\"\n\t\t\t}\n\t\t} else if i == 0 {\n\t\t\tif keys[\"name\"] {\n\t\t\t\tparts[i] = \"Benchmark*\"\n\t\t\t}\n\t\t} else if keys[fmt.Sprintf(\"sub%d\", i)] {\n\t\t\tparts[i] = \"*\"\n\t\t}\n\t}\n\treturn strings.Join(parts, \"\/\") + end\n}\n\n\/\/ fetchCompareResults fetches the matching results for a given query string.\n\/\/ The results will be grouped into one or more groups based on either the query string or heuristics.\nfunc (a *App) fetchCompareResults(q string) ([]*resultGroup, error) {\n\t\/\/ Parse query\n\tprefix, queries := parseQueryString(q)\n\n\t\/\/ Send requests\n\t\/\/ TODO(quentin): Issue requests in parallel?\n\tvar groups []*resultGroup\n\tvar found int\n\tfor _, qPart := range queries {\n\t\tkeys := queryKeys(qPart)\n\t\tgroup := &resultGroup{Q: qPart}\n\t\tif prefix != \"\" {\n\t\t\tqPart = prefix + \" \" + qPart\n\t\t}\n\t\tres := a.StorageClient.Query(qPart)\n\t\tfor res.Next() {\n\t\t\tresult := res.Result()\n\t\t\tresult.Content = elideKeyValues(result.Content, keys)\n\t\t\tgroup.add(result)\n\t\t\tfound++\n\t\t}\n\t\terr := res.Err()\n\t\tres.Close()\n\t\tif err != nil {\n\t\t\t\/\/ TODO: If the query is invalid, surface that to the user.\n\t\t\treturn nil, err\n\t\t}\n\t\tgroups = append(groups, group)\n\t}\n\n\tif found == 0 {\n\t\treturn nil, errors.New(\"no results matched the query string\")\n\t}\n\n\t\/\/ Attempt to automatically split results.\n\tif len(groups) == 1 {\n\t\tgroup := groups[0]\n\t\t\/\/ Matching a single upload with multiple files -> split by file\n\t\tif len(group.LabelValues[\"upload\"]) == 1 && len(group.LabelValues[\"upload-part\"]) > 1 {\n\t\t\tgroups = group.splitOn(\"upload-part\")\n\t\t}\n\t}\n\n\treturn groups, nil\n}\n\nfunc (a *App) compareQuery(q string) *compareData {\n\tif len(q) == 0 {\n\t\treturn &compareData{}\n\t}\n\n\tgroups, err := a.fetchCompareResults(q)\n\tif err != nil {\n\t\treturn &compareData{\n\t\t\tQ: q,\n\t\t\tError: err.Error(),\n\t\t}\n\t}\n\n\tvar buf bytes.Buffer\n\t\/\/ Compute benchstat\n\tc := &benchstat.Collection{\n\t\tAddGeoMean: true,\n\t}\n\tfor _, g := range groups {\n\t\tc.AddResults(g.Q, g.results)\n\t}\n\tbenchstat.FormatHTML(&buf, c.Tables())\n\n\t\/\/ Prepare struct for template.\n\tlabels := make(map[string]bool)\n\t\/\/ commonLabels are the key: value of every label that has an\n\t\/\/ identical value on every result.\n\tcommonLabels := make(benchfmt.Labels)\n\t\/\/ Scan the first group for common labels.\n\tfor k, vs := range groups[0].LabelValues {\n\t\tif len(vs) == 1 {\n\t\t\tfor v := range vs {\n\t\t\t\tcommonLabels[k] = v\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ Remove any labels not common in later groups.\n\tfor _, g := range groups[1:] {\n\t\tfor k, v := range commonLabels {\n\t\t\tif len(g.LabelValues[k]) != 1 || g.LabelValues[k][v] == 0 {\n\t\t\t\tdelete(commonLabels, k)\n\t\t\t}\n\t\t}\n\t}\n\t\/\/ List all labels present and not in commonLabels.\n\tfor _, g := range groups {\n\t\tfor k := range g.LabelValues {\n\t\t\tif commonLabels[k] != \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tlabels[k] = true\n\t\t}\n\t}\n\tdata := &compareData{\n\t\tQ: q,\n\t\tBenchstat: template.HTML(buf.String()),\n\t\tGroups: groups,\n\t\tLabels: labels,\n\t\tCommonLabels: commonLabels,\n\t}\n\treturn data\n}\n\n\/\/ textCompare is called if benchsave is requesting a text-only analysis.\nfunc (a *App) textCompare(w http.ResponseWriter, r *http.Request) {\n\tif err := r.ParseForm(); err != nil {\n\t\thttp.Error(w, err.Error(), 500)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"text\/plain; charset=utf-8\")\n\n\tq := r.Form.Get(\"q\")\n\n\tgroups, err := a.fetchCompareResults(q)\n\tif err != nil {\n\t\t\/\/ TODO(quentin): Should we serve this with a 500 or 404? This means the query was invalid or had no results.\n\t\tfmt.Fprintf(w, \"unable to analyze results: %v\", err)\n\t}\n\n\t\/\/ Compute benchstat\n\tc := new(benchstat.Collection)\n\tfor _, g := range groups {\n\t\tc.AddResults(g.Q, g.results)\n\t}\n\tbenchstat.FormatText(w, c.Tables())\n}\n<|endoftext|>"} {"text":"<commit_before><commit_msg>added function to return Ints from an interface<commit_after><|endoftext|>"} {"text":"<commit_before>package goFlags\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n)\n\ntype testStruct struct {\n\tA int `flag:\"A\" flagDefault:\"100\"`\n\tB string `flag:\"B\" flagDefault:\"200\"`\n\tC string\n\tN string `flag:\"-\"`\n\tp string\n\tS testSub `flag:\"S\"`\n}\n\ntype testSub struct {\n\tA int `flag:\"A\" flagDefault:\"300\"`\n\tB string `flag:\"C\" flagDefault:\"400\"`\n\tS testSubSub `flag:\"S\"`\n}\ntype testSubSub struct {\n\tA int `flag:\"A\" flagDefault:\"500\"`\n\tB string `flag:\"S\" flagDefault:\"600\"`\n}\n\nfunc TestParse(t *testing.T) {\n\n\t\/\/os.Args = []string{\"noop\", \"-flag1=val1\", \"arg1\", \"arg2\"}\n\t\/\/os.Args = []string{\"program\", \"-h\"}\n\n\tos.Args = []string{\n\t\t\"program\",\n\t\t\"-a=8888\",\n\t\t\"-b=TEST\",\n\t\t\"-s_s_a=9999\",\n\t}\n\n\ts := &testStruct{A: 1, S: testSub{A: 1, B: \"2\"}}\n\n\terr := Parse(s)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfmt.Printf(\"\\n\\nTestParseTags: %#v\\n\\n\", s)\n\n}\n<commit_msg>test preset function<commit_after>package goFlags\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n)\n\ntype testStruct struct {\n\tA int `flag:\"A\" flagDefault:\"100\"`\n\tB string `flag:\"B\" flagDefault:\"200\"`\n\tC string\n\tN string `flag:\"-\"`\n\tp string\n\tS testSub `flag:\"S\"`\n}\n\ntype testSub struct {\n\tA int `flag:\"A\" flagDefault:\"300\"`\n\tB string `flag:\"C\" flagDefault:\"400\"`\n\tS testSubSub `flag:\"S\"`\n}\ntype testSubSub struct {\n\tA int `flag:\"A\" flagDefault:\"500\"`\n\tB string `flag:\"S\" flagDefault:\"600\"`\n}\n\nfunc TestParse(t *testing.T) {\n\n\t\/\/os.Args = []string{\"noop\", \"-flag1=val1\", \"arg1\", \"arg2\"}\n\t\/\/os.Args = []string{\"program\", \"-h\"}\n\n\tos.Args = []string{\n\t\t\"program\",\n\t\t\"-a=8888\",\n\t\t\"-b=TEST\",\n\t\t\"-s_s_a=9999\",\n\t}\n\n\ts := &testStruct{A: 1, S: testSub{A: 1, B: \"2\"}}\n\n\tPreserve = false\n\terr := Parse(s)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfmt.Printf(\"\\n\\nTestParseTags: %#v\\n\\n\", s)\n\n\tReset()\n\terr = Parse(s)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfmt.Printf(\"\\n\\nTestParseTags: %#v\\n\\n\", s)\n}\n\nfunc TestPreserve(t *testing.T) {\n\n\t\/\/os.Args = []string{\"noop\", \"-flag1=val1\", \"arg1\", \"arg2\"}\n\t\/\/os.Args = []string{\"program\", \"-h\"}\n\n\tos.Args = []string{\n\t\t\"program\",\n\t\t\"-a=8888\",\n\t\t\"-b=TEST\",\n\t\t\"-s_s_a=9999\",\n\t}\n\n\ts := &testStruct{A: 1, S: testSub{A: 1, B: \"2\"}}\n\n\tReset()\n\tPreserve = true\n\terr := Parse(s)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfmt.Printf(\"\\n\\nTestPreserve: %#v\\n\\n\", s)\n}\n<|endoftext|>"} {"text":"<commit_before>package graph\n\n\/\/ Copyright ©2012 Dan Kortschak <dan.kortschak@adelaide.edu.au>\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/\n\/\/ This program is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU General Public License\n\/\/ along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\nimport (\n\t\"math\"\n\t\"runtime\"\n\t\"sync\"\n)\n\n\/\/ FIXME Use Index() instead of ID() on edges and nodes - this requires a change to node.go\n\nconst sqrt2 = 1.4142135623730950488016887242096980785696718753769480\n\nvar MaxProcs = runtime.GOMAXPROCS(0)\n\nfunc FastRandMinCut(g *Undirected, iter int) (c []*Edge, w float64) {\n\tk := newKargerR(g)\n\tk.init()\n\tw = math.Inf(1)\n\tfor i := 0; i < iter; i++ {\n\t\tk.fastRandMinCut()\n\t\tif k.w < w {\n\t\t\tw = k.w\n\t\t\tc = k.c\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ parallelised outside the recursion tree\n\nfunc FastRandMinCutPar(g *Undirected, iter, thread int) (c []*Edge, w float64) {\n\tif thread > MaxProcs {\n\t\tthread = MaxProcs\n\t}\n\tif thread > iter {\n\t\tthread = iter\n\t}\n\titer, rem := iter\/thread+1, iter%thread\n\n\ttype r struct {\n\t\tc []*Edge\n\t\tw float64\n\t}\n\trs := make([]*r, thread)\n\n\twg := &sync.WaitGroup{}\n\tfor j := 0; j < thread; j++ {\n\t\tif rem == 0 {\n\t\t\titer--\n\t\t}\n\t\tif rem >= 0 {\n\t\t\trem--\n\t\t}\n\t\twg.Add(1)\n\t\tgo func(j, iter int) {\n\t\t\tdefer wg.Done()\n\t\t\tk := newKargerR(g)\n\t\t\tk.init()\n\t\t\tvar (\n\t\t\t\tw = math.Inf(1)\n\t\t\t\tc []*Edge\n\t\t\t)\n\t\t\tfor i := 0; i < iter; i++ {\n\t\t\t\tk.fastRandMinCut()\n\t\t\t\tif k.w < w {\n\t\t\t\t\tw = k.w\n\t\t\t\t\tc = k.c\n\t\t\t\t}\n\t\t\t}\n\n\t\t\trs[j] = &r{c, w}\n\t\t}(j, iter)\n\t}\n\n\tw = math.Inf(1)\n\twg.Wait()\n\tfor _, subr := range rs {\n\t\tif subr.w < w {\n\t\t\tw = subr.w\n\t\t\tc = subr.c\n\t\t}\n\t}\n\n\treturn\n}\n\ntype kargerR struct {\n\tg *Undirected\n\torder int\n\tind []super\n\tsel Selector\n\tc []*Edge\n\tw float64\n}\n\nfunc newKargerR(g *Undirected) *kargerR {\n\treturn &kargerR{\n\t\tg: g,\n\t\tind: make([]super, g.NextNodeID()),\n\t\tsel: make(Selector, g.Size()),\n\t}\n}\n\nfunc (self *kargerR) init() {\n\tself.order = self.g.Order()\n\tfor i := range self.ind {\n\t\tself.ind[i].label = -1\n\t\tself.ind[i].nodes = nil\n\t}\n\tfor _, n := range self.g.Nodes() {\n\t\tid := n.ID()\n\t\tself.ind[id].label = id\n\t}\n\tfor i, e := range self.g.Edges() {\n\t\tself.sel[i] = WeightedItem{Index: e.ID(), Weight: e.Weight()}\n\t}\n\tself.sel.Init()\n}\n\nfunc (self *kargerR) copy(t *kargerR) {\n\tself.order = t.order\n\tcopy(self.sel, t.sel)\n\tfor i, n := range t.ind {\n\t\ts := &self.ind[i]\n\t\ts.label = n.label\n\t\tif n.nodes != nil {\n\t\t\ts.nodes = make([]int, len(n.nodes))\n\t\t\tcopy(s.nodes, n.nodes)\n\t\t}\n\t}\n}\n\nfunc (self *kargerR) fastRandMinCut() {\n\tif self.order <= 6 {\n\t\tself.randCompact(2)\n\t\treturn\n\t}\n\n\tt := int(math.Ceil(float64(self.order)\/sqrt2 + 1))\n\n\tsub := []*kargerR{self, newKargerR(self.g)}\n\tsub[1].copy(self)\n\tfor i := range sub {\n\t\tsub[i].randContract(t)\n\t\tsub[i].fastRandMinCut()\n\t}\n\n\tif sub[0].w < sub[1].w {\n\t\t*self = *sub[0]\n\t\treturn\n\t}\n\t*self = *sub[1]\n}\n\nfunc (self *kargerR) randContract(k int) {\n\tfor self.order > k {\n\t\tid, err := self.sel.Select()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\te := self.g.Edge(id)\n\t\tif self.loop(e) {\n\t\t\tcontinue\n\t\t}\n\n\t\thid, tid := e.Head().ID(), e.Tail().ID()\n\t\thl, tl := self.ind[hid].label, self.ind[tid].label\n\t\tif len(self.ind[hl].nodes) < len(self.ind[tl].nodes) {\n\t\t\thid, tid = tid, hid\n\t\t\thl, tl = tl, hl\n\t\t}\n\n\t\tif self.ind[hl].nodes == nil {\n\t\t\tself.ind[hl].nodes = []int{hid}\n\t\t}\n\t\tif self.ind[tl].nodes == nil {\n\t\t\tself.ind[hl].nodes = append(self.ind[hl].nodes, tid)\n\t\t} else {\n\t\t\tself.ind[hl].nodes = append(self.ind[hl].nodes, self.ind[tl].nodes...)\n\t\t\tself.ind[tl].nodes = nil\n\t\t}\n\t\tfor _, i := range self.ind[hl].nodes {\n\t\t\tself.ind[i].label = self.ind[hid].label\n\t\t}\n\n\t\tself.order--\n\t}\n}\n\nfunc (self *kargerR) randCompact(k int) {\n\tfor self.order > k {\n\t\tid, err := self.sel.Select()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\te := self.g.Edge(id)\n\t\tif self.loop(e) {\n\t\t\tcontinue\n\t\t}\n\n\t\thid, tid := e.Head().ID(), e.Tail().ID()\n\t\thl, tl := self.ind[hid].label, self.ind[tid].label\n\t\tif len(self.ind[hl].nodes) < len(self.ind[tl].nodes) {\n\t\t\thid, tid = tid, hid\n\t\t\thl, tl = tl, hl\n\t\t}\n\n\t\tif self.ind[hl].nodes == nil {\n\t\t\tself.ind[hl].nodes = []int{hid}\n\t\t}\n\t\tif self.ind[tl].nodes == nil {\n\t\t\tself.ind[hl].nodes = append(self.ind[hl].nodes, tid)\n\t\t} else {\n\t\t\tself.ind[hl].nodes = append(self.ind[hl].nodes, self.ind[tl].nodes...)\n\t\t\tself.ind[tl].nodes = nil\n\t\t}\n\t\tfor _, i := range self.ind[hl].nodes {\n\t\t\tself.ind[i].label = self.ind[hid].label\n\t\t}\n\n\t\tself.order--\n\t}\n\n\tself.c, self.w = []*Edge{}, 0\n\tfor _, e := range self.g.Edges() {\n\t\tif self.loop(e) {\n\t\t\tcontinue\n\t\t}\n\t\tself.c = append(self.c, e)\n\t\tself.w += e.Weight()\n\t}\n}\n\nfunc (self *kargerR) loop(e *Edge) bool {\n\treturn self.ind[e.Head().ID()].label == self.ind[e.Tail().ID()].label\n}\n\n\/\/ parallelised within the recursion tree\n\nfunc ParFastRandMinCut(g *Undirected, iter, threads int) (c []*Edge, w float64) {\n\tk := newKargerRP(g)\n\tk.split = bits(threads)\n\tif k.split == 0 {\n\t\tk.split = -1\n\t}\n\tk.init()\n\tw = math.Inf(1)\n\tfor i := 0; i < iter; i++ {\n\t\tk.fastRandMinCut()\n\t\tif k.w < w {\n\t\t\tw = k.w\n\t\t\tc = k.c\n\t\t}\n\t}\n\n\treturn\n}\n\ntype kargerRP struct {\n\tg *Undirected\n\torder int\n\tind []super\n\tsel Selector\n\tc []*Edge\n\tw float64\n\tdepth int\n\tsplit int\n}\n\nfunc bits(i int) (b int) {\n\tfor ; i > 1; i >>= 1 {\n\t\tb++\n\t}\n\treturn\n}\n\nfunc newKargerRP(g *Undirected) *kargerRP {\n\treturn &kargerRP{\n\t\tg: g,\n\t\tind: make([]super, g.NextNodeID()),\n\t\tsel: make(Selector, g.Size()),\n\t}\n}\n\nfunc (self *kargerRP) init() {\n\tself.order = self.g.Order()\n\tfor i := range self.ind {\n\t\tself.ind[i].label = -1\n\t\tself.ind[i].nodes = nil\n\t}\n\tfor _, n := range self.g.Nodes() {\n\t\tid := n.ID()\n\t\tself.ind[id].label = id\n\t}\n\tfor i, e := range self.g.Edges() {\n\t\tself.sel[i] = WeightedItem{Index: e.ID(), Weight: e.Weight()}\n\t}\n\tself.sel.Init()\n}\n\nfunc (self *kargerRP) copy(t *kargerRP) {\n\tself.order = t.order\n\tself.depth = t.depth\n\tself.split = t.split\n\tcopy(self.sel, t.sel)\n\tfor i, n := range t.ind {\n\t\ts := &self.ind[i]\n\t\ts.label = n.label\n\t\tif n.nodes != nil {\n\t\t\ts.nodes = make([]int, len(n.nodes))\n\t\t\tcopy(s.nodes, n.nodes)\n\t\t}\n\t}\n}\n\nfunc (self *kargerRP) fastRandMinCut() {\n\tself.depth++\n\tif self.order <= 6 {\n\t\tself.randCompact(2)\n\t\treturn\n\t}\n\n\tt := int(math.Ceil(float64(self.order)\/sqrt2 + 1))\n\n\tvar wg *sync.WaitGroup\n\tif self.depth < self.split {\n\t\twg = &sync.WaitGroup{}\n\t}\n\n\tsub := []*kargerRP{self, newKargerRP(self.g)}\n\tsub[1].copy(self)\n\tfor i := range sub {\n\t\tif wg != nil {\n\t\t\twg.Add(1)\n\t\t\tgo func(i int) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tsub[i].randContract(t)\n\t\t\t\tsub[i].fastRandMinCut()\n\t\t\t}(i)\n\t\t} else {\n\t\t\tsub[i].randContract(t)\n\t\t\tsub[i].fastRandMinCut()\n\t\t}\n\t}\n\n\tif wg != nil {\n\t\twg.Wait()\n\t}\n\n\tif sub[0].w < sub[1].w {\n\t\t*self = *sub[0]\n\t\treturn\n\t}\n\t*self = *sub[1]\n}\n\nfunc (self *kargerRP) randContract(k int) {\n\tfor self.order > k {\n\t\tid, err := self.sel.Select()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\te := self.g.Edge(id)\n\t\tif self.loop(e) {\n\t\t\tcontinue\n\t\t}\n\n\t\thid, tid := e.Head().ID(), e.Tail().ID()\n\t\thl, tl := self.ind[hid].label, self.ind[tid].label\n\t\tif len(self.ind[hl].nodes) < len(self.ind[tl].nodes) {\n\t\t\thid, tid = tid, hid\n\t\t\thl, tl = tl, hl\n\t\t}\n\n\t\tif self.ind[hl].nodes == nil {\n\t\t\tself.ind[hl].nodes = []int{hid}\n\t\t}\n\t\tif self.ind[tl].nodes == nil {\n\t\t\tself.ind[hl].nodes = append(self.ind[hl].nodes, tid)\n\t\t} else {\n\t\t\tself.ind[hl].nodes = append(self.ind[hl].nodes, self.ind[tl].nodes...)\n\t\t\tself.ind[tl].nodes = nil\n\t\t}\n\t\tfor _, i := range self.ind[hl].nodes {\n\t\t\tself.ind[i].label = self.ind[hid].label\n\t\t}\n\n\t\tself.order--\n\t}\n}\n\nfunc (self *kargerRP) randCompact(k int) {\n\tfor self.order > k {\n\t\tid, err := self.sel.Select()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\te := self.g.Edge(id)\n\t\tif self.loop(e) {\n\t\t\tcontinue\n\t\t}\n\n\t\thid, tid := e.Head().ID(), e.Tail().ID()\n\t\thl, tl := self.ind[hid].label, self.ind[tid].label\n\t\tif len(self.ind[hl].nodes) < len(self.ind[tl].nodes) {\n\t\t\thid, tid = tid, hid\n\t\t\thl, tl = tl, hl\n\t\t}\n\n\t\tif self.ind[hl].nodes == nil {\n\t\t\tself.ind[hl].nodes = []int{hid}\n\t\t}\n\t\tif self.ind[tl].nodes == nil {\n\t\t\tself.ind[hl].nodes = append(self.ind[hl].nodes, tid)\n\t\t} else {\n\t\t\tself.ind[hl].nodes = append(self.ind[hl].nodes, self.ind[tl].nodes...)\n\t\t\tself.ind[tl].nodes = nil\n\t\t}\n\t\tfor _, i := range self.ind[hl].nodes {\n\t\t\tself.ind[i].label = self.ind[hid].label\n\t\t}\n\n\t\tself.order--\n\t}\n\n\tself.c, self.w = []*Edge{}, 0\n\tfor _, e := range self.g.Edges() {\n\t\tif self.loop(e) {\n\t\t\tcontinue\n\t\t}\n\t\tself.c = append(self.c, e)\n\t\tself.w += e.Weight()\n\t}\n}\n\nfunc (self *kargerRP) loop(e *Edge) bool {\n\treturn self.ind[e.Head().ID()].label == self.ind[e.Tail().ID()].label\n}\n<commit_msg>Performance egression due to previous commit<commit_after>package graph\n\n\/\/ Copyright ©2012 Dan Kortschak <dan.kortschak@adelaide.edu.au>\n\/\/\n\/\/ This program is free software: you can redistribute it and\/or modify\n\/\/ it under the terms of the GNU General Public License as published by\n\/\/ the Free Software Foundation, either version 3 of the License, or\n\/\/ (at your option) any later version.\n\/\/\n\/\/ This program is distributed in the hope that it will be useful,\n\/\/ but WITHOUT ANY WARRANTY; without even the implied warranty of\n\/\/ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\/\/ GNU General Public License for more details.\n\/\/\n\/\/ You should have received a copy of the GNU General Public License\n\/\/ along with this program. If not, see <http:\/\/www.gnu.org\/licenses\/>.\n\nimport (\n\t\"math\"\n\t\"runtime\"\n\t\"sync\"\n)\n\n\/\/ FIXME Use Index() instead of ID() on edges and nodes - this requires a change to node.go\n\nconst sqrt2 = 1.4142135623730950488016887242096980785696718753769480\n\nvar MaxProcs = runtime.GOMAXPROCS(0)\n\nfunc FastRandMinCut(g *Undirected, iter int) (c []*Edge, w float64) {\n\tk := newKargerR(g)\n\tk.init()\n\tw = math.Inf(1)\n\tfor i := 0; i < iter; i++ {\n\t\tk.fastRandMinCut()\n\t\tif k.w < w {\n\t\t\tw = k.w\n\t\t\tc = k.c\n\t\t}\n\t}\n\n\treturn\n}\n\n\/\/ parallelised outside the recursion tree\n\nfunc FastRandMinCutPar(g *Undirected, iter, thread int) (c []*Edge, w float64) {\n\tif thread > MaxProcs {\n\t\tthread = MaxProcs\n\t}\n\tif thread > iter {\n\t\tthread = iter\n\t}\n\titer, rem := iter\/thread+1, iter%thread\n\n\ttype r struct {\n\t\tc []*Edge\n\t\tw float64\n\t}\n\trs := make([]*r, thread)\n\n\twg := &sync.WaitGroup{}\n\tfor j := 0; j < thread; j++ {\n\t\tif rem == 0 {\n\t\t\titer--\n\t\t}\n\t\tif rem >= 0 {\n\t\t\trem--\n\t\t}\n\t\twg.Add(1)\n\t\tgo func(j, iter int) {\n\t\t\tdefer wg.Done()\n\t\t\tk := newKargerR(g)\n\t\t\tk.init()\n\t\t\tvar (\n\t\t\t\tw = math.Inf(1)\n\t\t\t\tc []*Edge\n\t\t\t)\n\t\t\tfor i := 0; i < iter; i++ {\n\t\t\t\tk.fastRandMinCut()\n\t\t\t\tif k.w < w {\n\t\t\t\t\tw = k.w\n\t\t\t\t\tc = k.c\n\t\t\t\t}\n\t\t\t}\n\n\t\t\trs[j] = &r{c, w}\n\t\t}(j, iter)\n\t}\n\n\tw = math.Inf(1)\n\twg.Wait()\n\tfor _, subr := range rs {\n\t\tif subr.w < w {\n\t\t\tw = subr.w\n\t\t\tc = subr.c\n\t\t}\n\t}\n\n\treturn\n}\n\ntype kargerR struct {\n\tg *Undirected\n\torder int\n\tind []super\n\tsel Selector\n\tc []*Edge\n\tw float64\n}\n\nfunc newKargerR(g *Undirected) *kargerR {\n\treturn &kargerR{\n\t\tg: g,\n\t\tind: make([]super, g.NextNodeID()),\n\t\tsel: make(Selector, g.Size()),\n\t}\n}\n\nfunc (self *kargerR) init() {\n\tself.order = self.g.Order()\n\tfor i := range self.ind {\n\t\tself.ind[i].label = -1\n\t\tself.ind[i].nodes = nil\n\t}\n\tfor _, n := range self.g.Nodes() {\n\t\tid := n.ID()\n\t\tself.ind[id].label = id\n\t}\n\tfor i, e := range self.g.Edges() {\n\t\tself.sel[i] = WeightedItem{Index: e.ID(), Weight: e.Weight()}\n\t}\n\tself.sel.Init()\n}\n\nfunc (self *kargerR) clone() (c *kargerR) {\n\tc = &kargerR{\n\t\tg: self.g,\n\t\tind: make([]super, self.g.NextNodeID()),\n\t\tsel: make(Selector, self.g.Size()),\n\t\torder: self.order,\n\t}\n\n\tcopy(c.sel, self.sel)\n\tfor i, n := range self.ind {\n\t\ts := &c.ind[i]\n\t\ts.label = n.label\n\t\tif n.nodes != nil {\n\t\t\ts.nodes = make([]int, len(n.nodes))\n\t\t\tcopy(s.nodes, n.nodes)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (self *kargerR) fastRandMinCut() {\n\tif self.order <= 6 {\n\t\tself.randCompact(2)\n\t\treturn\n\t}\n\n\tt := int(math.Ceil(float64(self.order)\/sqrt2 + 1))\n\n\tsub := []*kargerR{self, self.clone()}\n\tfor i := range sub {\n\t\tsub[i].randContract(t)\n\t\tsub[i].fastRandMinCut()\n\t}\n\n\tif sub[0].w < sub[1].w {\n\t\t*self = *sub[0]\n\t\treturn\n\t}\n\t*self = *sub[1]\n}\n\nfunc (self *kargerR) randContract(k int) {\n\tfor self.order > k {\n\t\tid, err := self.sel.Select()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\te := self.g.Edge(id)\n\t\tif self.loop(e) {\n\t\t\tcontinue\n\t\t}\n\n\t\thid, tid := e.Head().ID(), e.Tail().ID()\n\t\thl, tl := self.ind[hid].label, self.ind[tid].label\n\t\tif len(self.ind[hl].nodes) < len(self.ind[tl].nodes) {\n\t\t\thid, tid = tid, hid\n\t\t\thl, tl = tl, hl\n\t\t}\n\n\t\tif self.ind[hl].nodes == nil {\n\t\t\tself.ind[hl].nodes = []int{hid}\n\t\t}\n\t\tif self.ind[tl].nodes == nil {\n\t\t\tself.ind[hl].nodes = append(self.ind[hl].nodes, tid)\n\t\t} else {\n\t\t\tself.ind[hl].nodes = append(self.ind[hl].nodes, self.ind[tl].nodes...)\n\t\t\tself.ind[tl].nodes = nil\n\t\t}\n\t\tfor _, i := range self.ind[hl].nodes {\n\t\t\tself.ind[i].label = self.ind[hid].label\n\t\t}\n\n\t\tself.order--\n\t}\n}\n\nfunc (self *kargerR) randCompact(k int) {\n\tfor self.order > k {\n\t\tid, err := self.sel.Select()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\te := self.g.Edge(id)\n\t\tif self.loop(e) {\n\t\t\tcontinue\n\t\t}\n\n\t\thid, tid := e.Head().ID(), e.Tail().ID()\n\t\thl, tl := self.ind[hid].label, self.ind[tid].label\n\t\tif len(self.ind[hl].nodes) < len(self.ind[tl].nodes) {\n\t\t\thid, tid = tid, hid\n\t\t\thl, tl = tl, hl\n\t\t}\n\n\t\tif self.ind[hl].nodes == nil {\n\t\t\tself.ind[hl].nodes = []int{hid}\n\t\t}\n\t\tif self.ind[tl].nodes == nil {\n\t\t\tself.ind[hl].nodes = append(self.ind[hl].nodes, tid)\n\t\t} else {\n\t\t\tself.ind[hl].nodes = append(self.ind[hl].nodes, self.ind[tl].nodes...)\n\t\t\tself.ind[tl].nodes = nil\n\t\t}\n\t\tfor _, i := range self.ind[hl].nodes {\n\t\t\tself.ind[i].label = self.ind[hid].label\n\t\t}\n\n\t\tself.order--\n\t}\n\n\tself.c, self.w = []*Edge{}, 0\n\tfor _, e := range self.g.Edges() {\n\t\tif self.loop(e) {\n\t\t\tcontinue\n\t\t}\n\t\tself.c = append(self.c, e)\n\t\tself.w += e.Weight()\n\t}\n}\n\nfunc (self *kargerR) loop(e *Edge) bool {\n\treturn self.ind[e.Head().ID()].label == self.ind[e.Tail().ID()].label\n}\n\n\/\/ parallelised within the recursion tree\n\nfunc ParFastRandMinCut(g *Undirected, iter, threads int) (c []*Edge, w float64) {\n\tk := newKargerRP(g)\n\tk.split = threads\n\tif k.split == 0 {\n\t\tk.split = -1\n\t}\n\tk.init()\n\tw = math.Inf(1)\n\tfor i := 0; i < iter; i++ {\n\t\tk.fastRandMinCut()\n\t\tif k.w < w {\n\t\t\tw = k.w\n\t\t\tc = k.c\n\t\t}\n\t}\n\n\treturn\n}\n\ntype kargerRP struct {\n\tg *Undirected\n\torder int\n\tind []super\n\tsel Selector\n\tc []*Edge\n\tw float64\n\tcount int\n\tsplit int\n}\n\nfunc newKargerRP(g *Undirected) *kargerRP {\n\treturn &kargerRP{\n\t\tg: g,\n\t\tind: make([]super, g.NextNodeID()),\n\t\tsel: make(Selector, g.Size()),\n\t}\n}\n\nfunc (self *kargerRP) init() {\n\tself.order = self.g.Order()\n\tfor i := range self.ind {\n\t\tself.ind[i].label = -1\n\t\tself.ind[i].nodes = nil\n\t}\n\tfor _, n := range self.g.Nodes() {\n\t\tid := n.ID()\n\t\tself.ind[id].label = id\n\t}\n\tfor i, e := range self.g.Edges() {\n\t\tself.sel[i] = WeightedItem{Index: e.ID(), Weight: e.Weight()}\n\t}\n\tself.sel.Init()\n}\n\nfunc (self *kargerRP) clone() (c *kargerRP) {\n\tc = &kargerRP{\n\t\tg: self.g,\n\t\tind: make([]super, self.g.NextNodeID()),\n\t\tsel: make(Selector, self.g.Size()),\n\t\torder: self.order,\n\t\tcount: self.count,\n\t}\n\n\tcopy(c.sel, self.sel)\n\tfor i, n := range self.ind {\n\t\ts := &c.ind[i]\n\t\ts.label = n.label\n\t\tif n.nodes != nil {\n\t\t\ts.nodes = make([]int, len(n.nodes))\n\t\t\tcopy(s.nodes, n.nodes)\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (self *kargerRP) fastRandMinCut() {\n\tif self.order <= 6 {\n\t\tself.randCompact(2)\n\t\treturn\n\t}\n\n\tt := int(math.Ceil(float64(self.order)\/sqrt2 + 1))\n\n\tvar wg *sync.WaitGroup\n\tif self.count < self.split {\n\t\twg = &sync.WaitGroup{}\n\t}\n\tself.count++\n\n\tsub := []*kargerRP{self, self.clone()}\n\tfor i := range sub {\n\t\tif wg != nil {\n\t\t\twg.Add(1)\n\t\t\tgo func(i int) {\n\t\t\t\truntime.LockOSThread()\n\t\t\t\tdefer wg.Done()\n\t\t\t\tsub[i].randContract(t)\n\t\t\t\tsub[i].fastRandMinCut()\n\t\t\t}(i)\n\t\t} else {\n\t\t\tsub[i].randContract(t)\n\t\t\tsub[i].fastRandMinCut()\n\t\t}\n\t}\n\n\tif wg != nil {\n\t\twg.Wait()\n\t}\n\n\tif sub[0].w < sub[1].w {\n\t\t*self = *sub[0]\n\t\treturn\n\t}\n\t*self = *sub[1]\n}\n\nfunc (self *kargerRP) randContract(k int) {\n\tfor self.order > k {\n\t\tid, err := self.sel.Select()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\te := self.g.Edge(id)\n\t\tif self.loop(e) {\n\t\t\tcontinue\n\t\t}\n\n\t\thid, tid := e.Head().ID(), e.Tail().ID()\n\t\thl, tl := self.ind[hid].label, self.ind[tid].label\n\t\tif len(self.ind[hl].nodes) < len(self.ind[tl].nodes) {\n\t\t\thid, tid = tid, hid\n\t\t\thl, tl = tl, hl\n\t\t}\n\n\t\tif self.ind[hl].nodes == nil {\n\t\t\tself.ind[hl].nodes = []int{hid}\n\t\t}\n\t\tif self.ind[tl].nodes == nil {\n\t\t\tself.ind[hl].nodes = append(self.ind[hl].nodes, tid)\n\t\t} else {\n\t\t\tself.ind[hl].nodes = append(self.ind[hl].nodes, self.ind[tl].nodes...)\n\t\t\tself.ind[tl].nodes = nil\n\t\t}\n\t\tfor _, i := range self.ind[hl].nodes {\n\t\t\tself.ind[i].label = self.ind[hid].label\n\t\t}\n\n\t\tself.order--\n\t}\n}\n\nfunc (self *kargerRP) randCompact(k int) {\n\tfor self.order > k {\n\t\tid, err := self.sel.Select()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\n\t\te := self.g.Edge(id)\n\t\tif self.loop(e) {\n\t\t\tcontinue\n\t\t}\n\n\t\thid, tid := e.Head().ID(), e.Tail().ID()\n\t\thl, tl := self.ind[hid].label, self.ind[tid].label\n\t\tif len(self.ind[hl].nodes) < len(self.ind[tl].nodes) {\n\t\t\thid, tid = tid, hid\n\t\t\thl, tl = tl, hl\n\t\t}\n\n\t\tif self.ind[hl].nodes == nil {\n\t\t\tself.ind[hl].nodes = []int{hid}\n\t\t}\n\t\tif self.ind[tl].nodes == nil {\n\t\t\tself.ind[hl].nodes = append(self.ind[hl].nodes, tid)\n\t\t} else {\n\t\t\tself.ind[hl].nodes = append(self.ind[hl].nodes, self.ind[tl].nodes...)\n\t\t\tself.ind[tl].nodes = nil\n\t\t}\n\t\tfor _, i := range self.ind[hl].nodes {\n\t\t\tself.ind[i].label = self.ind[hid].label\n\t\t}\n\n\t\tself.order--\n\t}\n\n\tself.c, self.w = []*Edge{}, 0\n\tfor _, e := range self.g.Edges() {\n\t\tif self.loop(e) {\n\t\t\tcontinue\n\t\t}\n\t\tself.c = append(self.c, e)\n\t\tself.w += e.Weight()\n\t}\n}\n\nfunc (self *kargerRP) loop(e *Edge) bool {\n\treturn self.ind[e.Head().ID()].label == self.ind[e.Tail().ID()].label\n}\n<|endoftext|>"} {"text":"<commit_before>package spdy\n\nimport (\n \"bufio\"\n \"crypto\/tls\"\n \"fmt\"\n \"log\"\n \"net\/http\"\n \"runtime\"\n \"sync\"\n \"time\"\n)\n\ntype connection struct {\n sync.RWMutex\n remoteAddr string \/\/ network address of remote side\n server *http.Server\n conn *tls.Conn\n buf *bufio.Reader\n tlsState *tls.ConnectionState\n tlsConfig *tls.Config\n streams map[uint32]*stream\n streamInputs map[uint32]chan<- []byte\n buffer []Frame\n queue []Frame\n nextServerStreamID uint32 \/\/ even\n nextClientStreamID uint32 \/\/ odd\n goaway bool\n version int\n}\n\nfunc (conn *connection) newStream(frame *SynStreamFrame, input <-chan []byte) *stream {\n newStream := new(stream)\n newStream.conn = conn\n newStream.streamID = frame.StreamID\n newStream.state = STATE_OPEN\n newStream.priority = frame.Priority\n newStream.input = input\n newStream.request = new(Request)\n newStream.certificates = make([]Certificate, 1)\n newStream.headers = frame.Headers\n newStream.settings = make([]*Setting, 1)\n newStream.unidirectional = frame.Flags&FLAG_UNIDIRECTIONAL != 0\n\n return newStream\n}\n\nfunc (conn *connection) WriteFrame(frame Frame) error {\n return nil\n}\n\nfunc (conn *connection) handleSynStream(frame *SynStreamFrame) {\n \/\/ Check stream creation is allowed.\n conn.RLock()\n defer conn.RUnlock()\n\n if conn.goaway {\n return\n }\n\n \/\/ Check version.\n if frame.Version != uint16(conn.version) {\n log.Printf(\"Warning: Received frame with SPDY version %d on connection with version %d.\\n\",\n frame.Version, conn.version)\n if frame.Version > SPDY_VERSION {\n log.Printf(\"Error: Received frame with SPDY version %d, which is not supported.\\n\",\n frame.Version)\n reply := new(RstStreamFrame)\n reply.Version = SPDY_VERSION\n reply.StreamID = frame.StreamID\n reply.StatusCode = RST_STREAM_UNSUPPORTED_VERSION\n conn.WriteFrame(reply)\n return\n }\n }\n\n \/\/ Check Stream ID is odd.\n if frame.StreamID&1 == 0 {\n log.Printf(\"Error: Received SYN_STREAM with Stream ID %d, which should be odd.\\n\",\n frame.StreamID)\n reply := new(RstStreamFrame)\n reply.Version = SPDY_VERSION\n reply.StreamID = frame.StreamID\n reply.StatusCode = RST_STREAM_PROTOCOL_ERROR\n conn.WriteFrame(reply)\n return\n }\n\n \/\/ Check Stream ID is the right number.\n if frame.StreamID != conn.nextClientStreamID+2 && frame.StreamID != 1 &&\n conn.nextClientStreamID != 0 {\n log.Printf(\"Error: Received SYN_STREAM with Stream ID %d, which should be %d.\\n\",\n frame.StreamID, conn.nextClientStreamID+2)\n reply := new(RstStreamFrame)\n reply.Version = SPDY_VERSION\n reply.StreamID = frame.StreamID\n reply.StatusCode = RST_STREAM_PROTOCOL_ERROR\n conn.WriteFrame(reply)\n return\n }\n\n \/\/ Check Stream ID is not too large.\n if frame.StreamID > MAX_STREAM_ID {\n log.Printf(\"Error: Received SYN_STREAM with Stream ID %d, which is too large.\\n\",\n frame.StreamID)\n reply := new(RstStreamFrame)\n reply.Version = SPDY_VERSION\n reply.StreamID = frame.StreamID\n reply.StatusCode = RST_STREAM_PROTOCOL_ERROR\n conn.WriteFrame(reply)\n return\n }\n\n \/\/ Stream ID is fine.\n\n \/\/ Create and start new stream.\n conn.RUnlock()\n conn.Lock()\n input := make(chan []byte)\n conn.streamInputs[frame.StreamID] = input\n conn.streams[frame.StreamID] = conn.newStream(frame, input)\n conn.Unlock()\n conn.RLock()\n\n go conn.streams[frame.StreamID].run()\n\n return\n}\n\nfunc (conn *connection) readFrames() {\n if d := conn.server.ReadTimeout; d != 0 {\n conn.conn.SetReadDeadline(time.Now().Add(d))\n }\n if d := conn.server.WriteTimeout; d != 0 {\n defer func() {\n conn.conn.SetWriteDeadline(time.Now().Add(d))\n }()\n }\n\n for {\n frame, err := ReadFrame(conn.buf)\n if err != nil {\n \/\/ TODO: handle error\n panic(err)\n }\n\n FrameHandling:\n switch frame := frame.(type) {\n default:\n panic(fmt.Sprintf(\"unexpected frame type %T\", frame))\n\n \/******************\n *** SYN_STREAM ***\n ******************\/\n case *SynStreamFrame:\n conn.handleSynStream(frame)\n\n case *SynReplyFrame:\n \/\/\n\n case *RstStreamFrame:\n \/\/\n\n case *SettingsFrame:\n \/\/\n\n case *PingFrame:\n \/\/\n\n case *GoawayFrame:\n \/\/ Check version.\n if frame.Version != uint16(conn.version) {\n log.Printf(\"Warning: Received frame with SPDY version %d on connection with version %d.\\n\",\n frame.Version, conn.version)\n if frame.Version > SPDY_VERSION {\n log.Printf(\"Error: Received frame with SPDY version %d, which is not supported.\\n\",\n frame.Version)\n reply := new(RstStreamFrame)\n reply.Version = SPDY_VERSION\n reply.StatusCode = RST_STREAM_UNSUPPORTED_VERSION\n conn.WriteFrame(reply)\n break FrameHandling\n }\n }\n\n conn.Lock()\n conn.goaway = true\n conn.Unlock()\n\n case *HeadersFrame:\n \/\/\n\n case *WindowUpdateFrame:\n \/\/\n\n case *CredentialFrame:\n \/\/\n\n case *DataFrame:\n \/\/\n }\n }\n}\n\nfunc (conn *connection) serve() {\n defer func() {\n if err := recover(); err != nil {\n const size = 4096\n buf := make([]byte, size)\n buf = buf[:runtime.Stack(buf, false)]\n log.Printf(\"spdy: panic serving %v: %v\\n%s\", conn.remoteAddr, err, buf)\n }\n }()\n\n conn.readFrames()\n}\n\nfunc acceptSPDYVersion2(server *http.Server, tlsConn *tls.Conn, _ http.Handler) {\n conn := newConn(tlsConn)\n conn.server = server\n conn.tlsConfig = server.TLSConfig\n conn.version = 2\n\n conn.serve()\n}\n\nfunc acceptSPDYVersion3(server *http.Server, tlsConn *tls.Conn, _ http.Handler) {\n conn := newConn(tlsConn)\n conn.server = server\n conn.tlsConfig = server.TLSConfig\n conn.version = 3\n\n conn.serve()\n}\n\nfunc newConn(tlsConn *tls.Conn) *connection {\n conn := new(connection)\n conn.remoteAddr = tlsConn.RemoteAddr().String()\n conn.conn = tlsConn\n conn.buf = bufio.NewReader(tlsConn)\n *conn.tlsState = tlsConn.ConnectionState()\n conn.streams = make(map[uint32]*stream)\n conn.streamInputs = make(map[uint32]chan<- []byte)\n conn.buffer = make([]Frame, 0, 10)\n conn.queue = make([]Frame, 0, 10)\n\n return conn\n}\n<commit_msg>Added further frame handling<commit_after>package spdy\n\nimport (\n \"bufio\"\n \"crypto\/tls\"\n \"fmt\"\n \"log\"\n \"net\/http\"\n \"runtime\"\n \"sync\"\n \"time\"\n)\n\ntype connection struct {\n sync.RWMutex\n remoteAddr string \/\/ network address of remote side\n server *http.Server\n conn *tls.Conn\n buf *bufio.Reader\n tlsState *tls.ConnectionState\n tlsConfig *tls.Config\n streams map[uint32]*stream\n streamInputs map[uint32]chan<- []byte\n buffer []Frame\n queue []Frame\n nextServerStreamID uint32 \/\/ even\n nextClientStreamID uint32 \/\/ odd\n goaway bool\n version int\n numInvalidStreamIDs int\n}\n\nfunc (conn *connection) newStream(frame *SynStreamFrame, input <-chan []byte) *stream {\n newStream := new(stream)\n newStream.conn = conn\n newStream.streamID = frame.StreamID\n newStream.state = STATE_OPEN\n newStream.priority = frame.Priority\n newStream.input = input\n newStream.request = new(Request)\n newStream.certificates = make([]Certificate, 1)\n newStream.headers = frame.Headers\n newStream.settings = make([]*Setting, 1)\n newStream.unidirectional = frame.Flags&FLAG_UNIDIRECTIONAL != 0\n\n return newStream\n}\n\nfunc (conn *connection) WriteFrame(frame Frame) error {\n return nil\n}\n\nfunc (conn *connection) handleSynStream(frame *SynStreamFrame) {\n conn.RLock()\n defer conn.RUnlock()\n\n \/\/ Check stream creation is allowed.\n if conn.goaway {\n return\n }\n\n \/\/ Check version.\n if frame.Version != uint16(conn.version) {\n log.Printf(\"Warning: Received frame with SPDY version %d on connection with version %d.\\n\",\n frame.Version, conn.version)\n if frame.Version > SPDY_VERSION {\n log.Printf(\"Error: Received frame with SPDY version %d, which is not supported.\\n\",\n frame.Version)\n reply := new(RstStreamFrame)\n reply.Version = SPDY_VERSION\n reply.StreamID = frame.StreamID\n reply.StatusCode = RST_STREAM_UNSUPPORTED_VERSION\n conn.WriteFrame(reply)\n return\n }\n }\n\n \/\/ Check Stream ID is odd.\n if frame.StreamID&1 == 0 {\n log.Printf(\"Error: Received SYN_STREAM with Stream ID %d, which should be odd.\\n\",\n frame.StreamID)\n reply := new(RstStreamFrame)\n reply.Version = SPDY_VERSION\n reply.StreamID = frame.StreamID\n reply.StatusCode = RST_STREAM_PROTOCOL_ERROR\n conn.WriteFrame(reply)\n return\n }\n\n \/\/ Check Stream ID is the right number.\n if frame.StreamID != conn.nextClientStreamID+2 && frame.StreamID != 1 &&\n conn.nextClientStreamID != 0 {\n log.Printf(\"Error: Received SYN_STREAM with Stream ID %d, which should be %d.\\n\",\n frame.StreamID, conn.nextClientStreamID+2)\n reply := new(RstStreamFrame)\n reply.Version = SPDY_VERSION\n reply.StreamID = frame.StreamID\n reply.StatusCode = RST_STREAM_PROTOCOL_ERROR\n conn.WriteFrame(reply)\n return\n }\n\n \/\/ Check Stream ID is not too large.\n if frame.StreamID > MAX_STREAM_ID {\n log.Printf(\"Error: Received SYN_STREAM with Stream ID %d, which is too large.\\n\",\n frame.StreamID)\n reply := new(RstStreamFrame)\n reply.Version = SPDY_VERSION\n reply.StreamID = frame.StreamID\n reply.StatusCode = RST_STREAM_PROTOCOL_ERROR\n conn.WriteFrame(reply)\n return\n }\n\n \/\/ Stream ID is fine.\n\n \/\/ Create and start new stream.\n conn.RUnlock()\n conn.Lock()\n input := make(chan []byte)\n conn.streamInputs[frame.StreamID] = input\n conn.streams[frame.StreamID] = conn.newStream(frame, input)\n conn.Unlock()\n conn.RLock()\n\n go conn.streams[frame.StreamID].run()\n\n return\n}\n\nfunc (conn *connection) handleDataFrame(frame *DataFrame) {\n conn.RLock()\n defer conn.RUnlock()\n\n \/\/ Check Stream ID is odd.\n if frame.StreamID&1 == 0 {\n log.Printf(\"Error: Received DATA with Stream ID %d, which should be odd.\\n\",\n frame.StreamID)\n reply := new(RstStreamFrame)\n reply.Version = SPDY_VERSION\n reply.StreamID = frame.StreamID\n reply.StatusCode = RST_STREAM_PROTOCOL_ERROR\n conn.WriteFrame(reply)\n return\n }\n\n \/\/ Check stream is open.\n if frame.StreamID != conn.nextClientStreamID+2 && frame.StreamID != 1 &&\n conn.nextClientStreamID != 0 {\n log.Printf(\"Error: Received SYN_STREAM with Stream ID %d, which should be %d.\\n\",\n frame.StreamID, conn.nextClientStreamID+2)\n reply := new(RstStreamFrame)\n reply.Version = SPDY_VERSION\n reply.StreamID = frame.StreamID\n reply.StatusCode = RST_STREAM_PROTOCOL_ERROR\n conn.WriteFrame(reply)\n return\n }\n\n \/\/ Stream ID is fine.\n\n \/\/ Handle flags.\n if frame.Flags&FLAG_FIN != 0 {\n stream := conn.streams[frame.StreamID]\n stream.Lock()\n stream.state = STATE_HALF_CLOSED_THERE\n stream.Unlock()\n }\n\n \/\/ Send data to stream.\n conn.streamInputs[frame.StreamID] <- frame.Data\n\n return\n}\n\nfunc (conn *connection) readFrames() {\n if d := conn.server.ReadTimeout; d != 0 {\n conn.conn.SetReadDeadline(time.Now().Add(d))\n }\n if d := conn.server.WriteTimeout; d != 0 {\n defer func() {\n conn.conn.SetWriteDeadline(time.Now().Add(d))\n }()\n }\n\n for {\n frame, err := ReadFrame(conn.buf)\n if err != nil {\n \/\/ TODO: handle error\n panic(err)\n }\n\n FrameHandling:\n switch frame := frame.(type) {\n default:\n panic(fmt.Sprintf(\"unexpected frame type %T\", frame))\n\n \/*** COMPLETE! ***\/\n case *SynStreamFrame:\n conn.handleSynStream(frame)\n\n case *SynReplyFrame:\n log.Println(\"Got SYN_REPLY\")\n\n case *RstStreamFrame:\n log.Printf(\"Received RST_STREAM on stream %d with status %q.\\n\", frame.StreamID,\n StatusCodeText(int(frame.StatusCode)))\n\n case *SettingsFrame:\n log.Println(\"Received SETTINGS. Ignoring...\")\n\n \/*** COMPLETE! ***\/\n case *PingFrame:\n \/\/ Check Ping ID is odd.\n if frame.PingID&1 == 0 {\n log.Printf(\"Error: Received PING with Stream ID %d, which should be odd.\\n\", frame.PingID)\n reply := new(RstStreamFrame)\n reply.Version = SPDY_VERSION\n reply.StatusCode = RST_STREAM_PROTOCOL_ERROR\n conn.WriteFrame(reply)\n break FrameHandling\n }\n log.Println(\"Received PING. Replying...\")\n conn.WriteFrame(frame)\n\n case *GoawayFrame:\n \/\/ Check version.\n if frame.Version != uint16(conn.version) {\n log.Printf(\"Warning: Received frame with SPDY version %d on connection with version %d.\\n\",\n frame.Version, conn.version)\n if frame.Version > SPDY_VERSION {\n log.Printf(\"Error: Received frame with SPDY version %d, which is not supported.\\n\",\n frame.Version)\n reply := new(RstStreamFrame)\n reply.Version = SPDY_VERSION\n reply.StatusCode = RST_STREAM_UNSUPPORTED_VERSION\n conn.WriteFrame(reply)\n break FrameHandling\n }\n }\n\n \/\/ TODO: inform push streams that they haven't been processed if\n \/\/ the last good stream ID is less than their ID.\n\n conn.Lock()\n conn.goaway = true\n conn.Unlock()\n\n case *HeadersFrame:\n log.Println(\"Got HEADERS\")\n\n case *WindowUpdateFrame:\n log.Println(\"Got WINDOW_UPDATE\")\n\n case *CredentialFrame:\n log.Println(\"Got CREDENTIAL\")\n\n \/*** COMPLETE! ***\/\n case *DataFrame:\n conn.handleDataFrame(frame)\n }\n }\n}\n\nfunc (conn *connection) serve() {\n defer func() {\n if err := recover(); err != nil {\n const size = 4096\n buf := make([]byte, size)\n buf = buf[:runtime.Stack(buf, false)]\n log.Printf(\"spdy: panic serving %v: %v\\n%s\", conn.remoteAddr, err, buf)\n }\n }()\n\n conn.readFrames()\n}\n\nfunc acceptSPDYVersion2(server *http.Server, tlsConn *tls.Conn, _ http.Handler) {\n conn := newConn(tlsConn)\n conn.server = server\n conn.tlsConfig = server.TLSConfig\n conn.version = 2\n\n conn.serve()\n}\n\nfunc acceptSPDYVersion3(server *http.Server, tlsConn *tls.Conn, _ http.Handler) {\n conn := newConn(tlsConn)\n conn.server = server\n conn.tlsConfig = server.TLSConfig\n conn.version = 3\n\n conn.serve()\n}\n\nfunc newConn(tlsConn *tls.Conn) *connection {\n conn := new(connection)\n conn.remoteAddr = tlsConn.RemoteAddr().String()\n conn.conn = tlsConn\n conn.buf = bufio.NewReader(tlsConn)\n conn.tlsState = new(tls.ConnectionState)\n *conn.tlsState = tlsConn.ConnectionState()\n conn.streams = make(map[uint32]*stream)\n conn.streamInputs = make(map[uint32]chan<- []byte)\n conn.buffer = make([]Frame, 0, 10)\n conn.queue = make([]Frame, 0, 10)\n\n return conn\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/google\/gopacket\"\n\t\"github.com\/google\/gopacket\/layers\"\n\t\"github.com\/google\/gopacket\/pcapgo\"\n\t\"golang.org\/x\/sync\/errgroup\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ reconstructOptions represents all options for reconstruction\ntype reconstructOptions struct {\n\tbpP int\n\tlimitX int\n}\n\nfunc createPacket(ch chan<- []byte, packet []int, bpP int) error {\n\tvar buf []byte\n\tvar tmp int\n\tswitch bpP {\n\tcase 24:\n\t\tfor _, i := range packet {\n\t\t\tbuf = append(buf, byte(i))\n\t\t}\n\tcase 3, 6, 9, 12, 15, 18, 21:\n\t\tvar slice []int\n\t\tfor i := 0; i < len(packet); i = i + 1 {\n\t\t\tif i%(bpP*8) == 0 && i != 0 {\n\t\t\t\tbytes := createBytes(slice, bpP\/3)\n\t\t\t\tbuf = append(buf, bytes...)\n\t\t\t\tslice = slice[:0]\n\t\t\t}\n\t\t\tslice = append(slice, packet[i])\n\t\t}\n\t\tbytes := createBytes(slice, bpP\/3)\n\t\tbuf = append(buf, bytes...)\n\tcase 1:\n\t\tvar j int\n\t\tfor i := 0; i < len(packet); i = i + 3 {\n\t\t\tif j%8 == 0 && j != 0 {\n\t\t\t\tbuf = append(buf, byte(tmp))\n\t\t\t\ttmp = 0\n\t\t\t}\n\t\t\tif packet[i] != 0 {\n\t\t\t\ttmp = tmp | (1 << uint8(7-j%8))\n\t\t\t}\n\t\t\tj = j + 1\n\t\t}\n\t\tif tmp != 0 {\n\t\t\tbuf = append(buf, byte(tmp))\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"This format is not supported so far\")\n\t}\n\n\tch <- buf\n\n\treturn nil\n}\n\nfunc compareVersion(variant, minimalVersion string) (bool, error) {\n\tversionIs := strings.Split(variant, \".\")\n\tversionShould := strings.Split(minimalVersion, \".\")\n\n\tif len(versionIs) != len(versionShould) {\n\t\treturn false, fmt.Errorf(\"Versions don't have the same length\")\n\t}\n\n\tfor i := range versionShould {\n\t\tif versionIs[i] < versionShould[i] {\n\t\t\treturn false, fmt.Errorf(\"At least version %s is needed\", minimalVersion)\n\t\t} else if versionIs[i] > versionShould[i] {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn true, nil\n}\n\nfunc checkHeader(svg *bufio.Scanner) (reconstructOptions, error) {\n\tvar options reconstructOptions\n\tvar limitX, limitY, bpP int\n\tvar variant string\n\tvar header bool = false\n\n\tlimits, err := regexp.Compile(\"^<svg width=\\\"(\\\\d+)\\\" height=\\\"(\\\\d+)\\\">$\")\n\tif err != nil {\n\t\treturn options, err\n\t}\n\theaderStart, err := regexp.Compile(\"^<!--$\")\n\tif err != nil {\n\t\treturn options, err\n\t}\n\theaderEnd, err := regexp.Compile(\"^-->$\")\n\tif err != nil {\n\t\treturn options, err\n\t}\n\tversion, err := regexp.Compile(\"\\\\s+goNetViz \\\"([0-9.]+)\\\"$\")\n\tif err != nil {\n\t\treturn options, err\n\t}\n\tbpPconfig, err := regexp.Compile(\"\\\\s+BitsPerPixel=(\\\\d+)$\")\n\tif err != nil {\n\t\treturn options, err\n\t}\n\n\tfor svg.Scan() {\n\t\tswitch {\n\t\tcase limitX == 0 && limitY == 0 && header == false:\n\t\t\tmatches := limits.FindStringSubmatch(svg.Text())\n\t\t\tif len(matches) == 3 {\n\t\t\t\tlimitX, _ = strconv.Atoi(matches[1])\n\t\t\t\tlimitY, _ = strconv.Atoi(matches[2])\n\t\t\t}\n\t\tcase header == false:\n\t\t\tmatches := headerStart.FindStringSubmatch(svg.Text())\n\t\t\tif len(matches) == 2 {\n\t\t\t\theader = true\n\t\t\t}\n\t\tcase header == true:\n\t\t\tmatches := headerEnd.FindStringSubmatch(svg.Text())\n\t\t\tif len(matches) == 2 {\n\t\t\t\treturn options, nil\n\t\t\t}\n\t\tcase len(variant) == 0:\n\t\t\tmatches := version.FindStringSubmatch(svg.Text())\n\t\t\tif len(matches) == 2 {\n\t\t\t\tvariant = matches[1]\n\t\t\t}\n\t\tcase bpP == 0:\n\t\t\tmatches := bpPconfig.FindStringSubmatch(svg.Text())\n\t\t\tif len(matches) == 2 {\n\t\t\t\tbpP, _ = strconv.Atoi(matches[1])\n\t\t\t}\n\t\t}\n\t}\n\treturn options, fmt.Errorf(\"No end of header found\")\n}\n\nfunc extractInformation(g *errgroup.Group, ch chan []byte, cfg configs) error {\n\tinputfile, err := os.Open(cfg.file)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not open file %s: %s\\n\", cfg.file, err.Error())\n\t}\n\tdefer inputfile.Close()\n\tsvg := bufio.NewScanner(inputfile)\n\tvar yLast int\n\tvar packet []int\n\tdefer close(ch)\n\n\topt, err := checkHeader(svg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpixel, err := regexp.Compile(\"^<rect x=\\\"(\\\\d+)\\\" y=\\\"(\\\\d+)\\\" width=\\\"\\\\d+\\\" height=\\\"\\\\d+\\\" style=\\\"fill:rgb\\\\((\\\\d+),(\\\\d+),(\\\\d+)\\\\)\\\" \/>$\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tsvgEnd, err := regexp.Compile(\"^<\/svg>$\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor svg.Scan() {\n\t\tmatches := pixel.FindStringSubmatch(svg.Text())\n\t\tif len(matches) == 6 {\n\t\t\tpixelX, _ := strconv.Atoi(matches[1])\n\t\t\tpixelY, _ := strconv.Atoi(matches[2])\n\t\t\tif pixelY != yLast {\n\t\t\t\tyLast = pixelY\n\t\t\t\tif err := createPacket(ch, packet, opt.bpP); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tpacket = packet[:0]\n\t\t\t}\n\t\t\tif pixelX >= opt.limitX {\n\t\t\t\treturn fmt.Errorf(\"x-coordinate (%d) is bigger than the limit (%d)\\n\", pixelX, opt.limitX)\n\t\t\t}\n\t\t\tr, _ := strconv.Atoi(matches[3])\n\t\t\tg, _ := strconv.Atoi(matches[4])\n\t\t\tb, _ := strconv.Atoi(matches[5])\n\t\t\tpacket = append(packet, r, g, b)\n\t\t} else {\n\t\t\tend := svgEnd.FindStringSubmatch(svg.Text())\n\t\t\tif len(end) == 1 && len(packet) != 0 {\n\t\t\t\treturn createPacket(ch, packet, opt.bpP)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc createPcap(g *errgroup.Group, ch chan []byte, cfg configs) error {\n\tfilename := cfg.prefix\n\tfilename += \".pcap\"\n\toutput, err := os.Create(filename)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not create file %s: %s\\n\", filename, err.Error())\n\t}\n\tdefer output.Close()\n\tw := pcapgo.NewWriter(output)\n\tw.WriteFileHeader(65536, layers.LinkTypeEthernet)\n\n\tfor i, ok := <-ch; ok; i, ok = <-ch {\n\t\tw.WritePacket(gopacket.CaptureInfo{CaptureLength: len(i), Length: len(i), InterfaceIndex: 0}, i)\n\t}\n\n\treturn nil\n}\n\nfunc reconstruct(g *errgroup.Group, cfg configs) error {\n\tch := make(chan []byte)\n\n\tgo extractInformation(g, ch, cfg)\n\n\tg.Go(func() error {\n\t\treturn createPcap(g, ch, cfg)\n\t})\n\n\treturn g.Wait()\n}\n<commit_msg>Introduce reconstruct options<commit_after>package main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"github.com\/google\/gopacket\"\n\t\"github.com\/google\/gopacket\/layers\"\n\t\"github.com\/google\/gopacket\/pcapgo\"\n\t\"golang.org\/x\/sync\/errgroup\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n\/\/ reconstructOptions represents all options for reconstruction\ntype reconstructOptions struct {\n\tBpP int\n\tLimitX int\n\tScale int\n\tDtg string\n\tSource string\n\tFilter string\n}\n\n\/\/ svgOptions represents various options for reconstruction\ntype svgOptions struct {\n\tregex string\n\texpectedType string\n\treconstructOption string\n}\n\nfunc createPacket(ch chan<- []byte, packet []int, bpP int) error {\n\tvar buf []byte\n\tvar tmp int\n\tswitch bpP {\n\tcase 24:\n\t\tfor _, i := range packet {\n\t\t\tbuf = append(buf, byte(i))\n\t\t}\n\tcase 3, 6, 9, 12, 15, 18, 21:\n\t\tvar slice []int\n\t\tfor i := 0; i < len(packet); i = i + 1 {\n\t\t\tif i%(bpP*8) == 0 && i != 0 {\n\t\t\t\tbytes := createBytes(slice, bpP\/3)\n\t\t\t\tbuf = append(buf, bytes...)\n\t\t\t\tslice = slice[:0]\n\t\t\t}\n\t\t\tslice = append(slice, packet[i])\n\t\t}\n\t\tbytes := createBytes(slice, bpP\/3)\n\t\tbuf = append(buf, bytes...)\n\tcase 1:\n\t\tvar j int\n\t\tfor i := 0; i < len(packet); i = i + 3 {\n\t\t\tif j%8 == 0 && j != 0 {\n\t\t\t\tbuf = append(buf, byte(tmp))\n\t\t\t\ttmp = 0\n\t\t\t}\n\t\t\tif packet[i] != 0 {\n\t\t\t\ttmp = tmp | (1 << uint8(7-j%8))\n\t\t\t}\n\t\t\tj = j + 1\n\t\t}\n\t\tif tmp != 0 {\n\t\t\tbuf = append(buf, byte(tmp))\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"This format is not supported so far\")\n\t}\n\n\tch <- buf\n\n\treturn nil\n}\n\nfunc compareVersion(variant, minimalVersion string) (bool, error) {\n\tversionIs := strings.Split(variant, \".\")\n\tversionShould := strings.Split(minimalVersion, \".\")\n\n\tif len(versionIs) != len(versionShould) {\n\t\treturn false, fmt.Errorf(\"Versions don't have the same length\")\n\t}\n\n\tfor i := range versionShould {\n\t\tif versionIs[i] < versionShould[i] {\n\t\t\treturn false, fmt.Errorf(\"At least version %s is needed\", minimalVersion)\n\t\t} else if versionIs[i] > versionShould[i] {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn true, nil\n}\n\nfunc checkHeader(svg *bufio.Scanner) (reconstructOptions, error) {\n\tvar options reconstructOptions\n\tvar limitX, limitY int\n\tvar variant string\n\tvar header bool = false\n\tvar parseOptions []svgOptions\n\tvar optionIndex int\n\n\tlimits, err := regexp.Compile(\"^<svg width=\\\"(\\\\d+)\\\" height=\\\"(\\\\d+)\\\">$\")\n\tif err != nil {\n\t\treturn options, err\n\t}\n\theaderStart, err := regexp.Compile(\"^<!--$\")\n\tif err != nil {\n\t\treturn options, err\n\t}\n\theaderEnd, err := regexp.Compile(\"^-->$\")\n\tif err != nil {\n\t\treturn options, err\n\t}\n\tversion, err := regexp.Compile(\"\\\\s+goNetViz \\\"([0-9.]+)\\\"$\")\n\tif err != nil {\n\t\treturn options, err\n\t}\n\n\tscale := svgOptions{regex: \"\\\\s+Scale=(\\\\d+)$\", expectedType: \"int\", reconstructOption: \"Scale\"}\n\tparseOptions = append(parseOptions, scale)\n\tbpP := svgOptions{regex: \"\\\\s+BitsPerPixel=(\\\\d+)$\", expectedType: \"int\", reconstructOption: \"BpP\"}\n\tparseOptions = append(parseOptions, bpP)\n\tdtg := svgOptions{regex: \"\\\\s+DTG=(\\\\w+)$\", expectedType: \"string\", reconstructOption: \"dtg\"}\n\tparseOptions = append(parseOptions, dtg)\n\tsource := svgOptions{regex: \"\\\\s+Source=(\\\\w+)$\", expectedType: \"string\", reconstructOption: \"source\"}\n\tparseOptions = append(parseOptions, source)\n\tfilter := svgOptions{regex: \"\\\\s+Filter=(\\\\w+)$\", expectedType: \"string\", reconstructOption: \"filter\"}\n\tparseOptions = append(parseOptions, filter)\n\n\tfor svg.Scan() {\n\t\tswitch {\n\t\tcase limitX == 0 && limitY == 0 && header == false:\n\t\t\tmatches := limits.FindStringSubmatch(svg.Text())\n\t\t\tif len(matches) == 3 {\n\t\t\t\tlimitX, _ = strconv.Atoi(matches[1])\n\t\t\t\tlimitY, _ = strconv.Atoi(matches[2])\n\t\t\t}\n\t\tcase header == false:\n\t\t\tif headerStart.MatchString(svg.Text()) {\n\t\t\t\theader = true\n\t\t\t}\n\t\tcase len(variant) == 0:\n\t\t\tmatches := version.FindStringSubmatch(svg.Text())\n\t\t\tif len(matches) == 2 {\n\t\t\t\tvariant = matches[1]\n\t\t\t}\n\t\tdefault:\n\t\t\tif optionIndex > len(parseOptions) {\n\t\t\t\treturn options, fmt.Errorf(\"Option index is out of range\")\n\t\t\t}\n\t\t\tregex, err := regexp.Compile(parseOptions[optionIndex].regex)\n\t\t\tif err != nil {\n\t\t\t\treturn options, err\n\t\t\t}\n\t\t\tmatches := regex.FindStringSubmatch(svg.Text())\n\t\t\tif len(matches) == 2 {\n\t\t\t\tif parseOptions[optionIndex].expectedType == \"int\" {\n\t\t\t\t\tnew, _ := strconv.Atoi(matches[1])\n\t\t\t\t\treflect.ValueOf(&options).Elem().FieldByName(parseOptions[optionIndex].reconstructOption).SetInt(int64(new))\n\t\t\t\t} else if parseOptions[optionIndex].expectedType == \"string\" {\n\t\t\t\t\treflect.ValueOf(&options).Elem().FieldByName(parseOptions[optionIndex].reconstructOption).SetString(matches[1])\n\t\t\t\t}\n\t\t\t\toptionIndex += 1\n\t\t\t} else {\n\t\t\t\tif headerEnd.MatchString(svg.Text()) {\n\t\t\t\t\treturn options, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn options, fmt.Errorf(\"No end of header found\")\n}\n\nfunc extractInformation(g *errgroup.Group, ch chan []byte, cfg configs) error {\n\tinputfile, err := os.Open(cfg.file)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not open file %s: %s\\n\", cfg.file, err.Error())\n\t}\n\tdefer inputfile.Close()\n\tsvg := bufio.NewScanner(inputfile)\n\tvar yLast int\n\tvar packet []int\n\tdefer close(ch)\n\n\topt, err := checkHeader(svg)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpixel, err := regexp.Compile(\"^<rect x=\\\"(\\\\d+)\\\" y=\\\"(\\\\d+)\\\" width=\\\"\\\\d+\\\" height=\\\"\\\\d+\\\" style=\\\"fill:rgb\\\\((\\\\d+),(\\\\d+),(\\\\d+)\\\\)\\\" \/>$\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tsvgEnd, err := regexp.Compile(\"<\/svg>\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor svg.Scan() {\n\t\tmatches := pixel.FindStringSubmatch(svg.Text())\n\t\tif len(matches) == 6 {\n\t\t\tpixelX, _ := strconv.Atoi(matches[1])\n\t\t\tpixelY, _ := strconv.Atoi(matches[2])\n\t\t\tif pixelY != yLast {\n\t\t\t\tyLast = pixelY\n\t\t\t\tif err := createPacket(ch, packet, opt.BpP); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tpacket = packet[:0]\n\t\t\t}\n\t\t\tif pixelX >= opt.LimitX {\n\t\t\t\treturn fmt.Errorf(\"x-coordinate (%d) is bigger than the limit (%d)\\n\", pixelX, opt.LimitX)\n\t\t\t}\n\t\t\tr, _ := strconv.Atoi(matches[3])\n\t\t\tg, _ := strconv.Atoi(matches[4])\n\t\t\tb, _ := strconv.Atoi(matches[5])\n\t\t\tpacket = append(packet, r, g, b)\n\t\t} else if svgEnd.MatchString(svg.Text()) {\n\t\t\tfmt.Println(\"End matches\")\n\t\t\tif len(packet) != 0 {\n\t\t\t\treturn createPacket(ch, packet, opt.BpP)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc createPcap(g *errgroup.Group, ch chan []byte, cfg configs) error {\n\tfilename := cfg.prefix\n\tfilename += \".pcap\"\n\toutput, err := os.Create(filename)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Could not create file %s: %s\\n\", filename, err.Error())\n\t}\n\tdefer output.Close()\n\tw := pcapgo.NewWriter(output)\n\tw.WriteFileHeader(65536, layers.LinkTypeEthernet)\n\n\tfor i, ok := <-ch; ok; i, ok = <-ch {\n\t\tw.WritePacket(gopacket.CaptureInfo{CaptureLength: len(i), Length: len(i), InterfaceIndex: 0}, i)\n\t}\n\n\treturn nil\n}\n\nfunc reconstruct(g *errgroup.Group, cfg configs) error {\n\tch := make(chan []byte)\n\n\tgo extractInformation(g, ch, cfg)\n\n\tg.Go(func() error {\n\t\treturn createPcap(g, ch, cfg)\n\t})\n\n\treturn g.Wait()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ +build linux,cgo,!agent\n\npackage db\n\nimport (\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n)\n\n\/\/ GetNetworkACLs returns the names of existing Network ACLs.\nfunc (c *Cluster) GetNetworkACLs(project string) ([]string, error) {\n\tq := `SELECT name FROM networks_acls\n\t\tWHERE project_id = (SELECT id FROM projects WHERE name = ? LIMIT 1)\n\t\tORDER BY id\n\t`\n\tinargs := []interface{}{project}\n\n\tvar name string\n\toutfmt := []interface{}{name}\n\tresult, err := queryScan(c, q, inargs, outfmt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := []string{}\n\tfor _, r := range result {\n\t\tresponse = append(response, r[0].(string))\n\t}\n\n\treturn response, nil\n}\n\n\/\/ GetNetworkACL returns the Network ACL with the given name in the given project.\nfunc (c *Cluster) GetNetworkACL(projectName string, name string) (int64, *api.NetworkACL, error) {\n\tvar id int64 = int64(-1)\n\tvar ingressJSON string\n\tvar egressJSON string\n\n\tacl := api.NetworkACL{\n\t\tNetworkACLPost: api.NetworkACLPost{\n\t\t\tName: name,\n\t\t},\n\t}\n\n\tq := `\n\t\tSELECT id, description, ingress, egress\n\t\tFROM networks_acls\n\t\tWHERE project_id = (SELECT id FROM projects WHERE name = ? LIMIT 1) AND name=?\n\t\tLIMIT 1\n\t`\n\targ1 := []interface{}{projectName, name}\n\targ2 := []interface{}{&id, &acl.Description, &ingressJSON, &egressJSON}\n\n\terr := dbQueryRowScan(c, q, arg1, arg2)\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn -1, nil, ErrNoSuchObject\n\t\t}\n\n\t\treturn -1, nil, err\n\t}\n\n\tacl.Ingress = []api.NetworkACLRule{}\n\tif ingressJSON != \"\" {\n\t\terr = json.Unmarshal([]byte(ingressJSON), &acl.Ingress)\n\t\tif err != nil {\n\t\t\treturn -1, nil, errors.Wrapf(err, \"Failed unmarshalling ingress rules\")\n\t\t}\n\t}\n\n\tacl.Egress = []api.NetworkACLRule{}\n\tif egressJSON != \"\" {\n\t\terr = json.Unmarshal([]byte(egressJSON), &acl.Egress)\n\t\tif err != nil {\n\t\t\treturn -1, nil, errors.Wrapf(err, \"Failed unmarshalling egress rules\")\n\t\t}\n\t}\n\n\tacl.Config, err = c.networkACLConfig(id)\n\tif err != nil {\n\t\treturn -1, nil, errors.Wrapf(err, \"Failed loading config\")\n\t}\n\n\treturn id, &acl, nil\n}\n\n\/\/ networkACLConfig returns the config map of the Network ACL with the given ID.\nfunc (c *Cluster) networkACLConfig(id int64) (map[string]string, error) {\n\tvar key, value string\n\tquery := `\n\t\tSELECT key, value\n\t\tFROM networks_acls_config\n\t\tWHERE network_acl_id=?\n\t`\n\tinargs := []interface{}{id}\n\toutfmt := []interface{}{key, value}\n\tresults, err := queryScan(c, query, inargs, outfmt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig := make(map[string]string, len(results))\n\n\tfor _, r := range results {\n\t\tkey = r[0].(string)\n\t\tvalue = r[1].(string)\n\n\t\t_, found := config[key]\n\t\tif found {\n\t\t\treturn nil, fmt.Errorf(\"Duplicate config row found for key %q for network ACL ID %d\", key, id)\n\t\t}\n\n\t\tconfig[key] = value\n\t}\n\n\treturn config, nil\n}\n\n\/\/ CreateNetworkACL creates a new Network ACL.\nfunc (c *Cluster) CreateNetworkACL(projectName string, info *api.NetworkACLsPost) (int64, error) {\n\tvar id int64\n\tvar err error\n\tvar ingressJSON, egressJSON []byte\n\n\tif info.Ingress != nil {\n\t\tingressJSON, err = json.Marshal(info.Ingress)\n\t\tif err != nil {\n\t\t\treturn -1, errors.Wrapf(err, \"Failed marshalling ingress rules\")\n\t\t}\n\t}\n\n\tif info.Egress != nil {\n\t\tegressJSON, err = json.Marshal(info.Egress)\n\t\tif err != nil {\n\t\t\treturn -1, errors.Wrapf(err, \"Failed marshalling egress rules\")\n\t\t}\n\t}\n\n\terr = c.Transaction(func(tx *ClusterTx) error {\n\t\t\/\/ Insert a new Network ACL record.\n\t\tresult, err := tx.tx.Exec(`\n\t\t\tINSERT INTO networks_acls (project_id, name, description, ingress, egress)\n\t\t\tVALUES ((SELECT id FROM projects WHERE name = ? LIMIT 1), ?, ?, ?, ?)\n\t\t`, projectName, info.Name, info.Description, string(ingressJSON), string(egressJSON))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tid, err := result.LastInsertId()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = networkACLConfigAdd(tx.tx, id, info.Config)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tid = -1\n\t}\n\n\treturn id, err\n}\n\n\/\/ networkACLConfigAdd inserts Network ACL config keys.\nfunc networkACLConfigAdd(tx *sql.Tx, id int64, config map[string]string) error {\n\tsql := \"INSERT INTO networks_acls_config (network_acl_id, key, value) VALUES(?, ?, ?)\"\n\tstmt, err := tx.Prepare(sql)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stmt.Close()\n\n\tfor k, v := range config {\n\t\tif v == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err = stmt.Exec(id, k, v)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Failed inserting config\")\n\t\t}\n\t}\n\n\treturn nil\n}\n<commit_msg>lxd\/db\/network\/acls: Adds UpdateNetworkACL function<commit_after>\/\/ +build linux,cgo,!agent\n\npackage db\n\nimport (\n\t\"database\/sql\"\n\t\"encoding\/json\"\n\t\"fmt\"\n\n\t\"github.com\/pkg\/errors\"\n\n\t\"github.com\/lxc\/lxd\/shared\/api\"\n)\n\n\/\/ GetNetworkACLs returns the names of existing Network ACLs.\nfunc (c *Cluster) GetNetworkACLs(project string) ([]string, error) {\n\tq := `SELECT name FROM networks_acls\n\t\tWHERE project_id = (SELECT id FROM projects WHERE name = ? LIMIT 1)\n\t\tORDER BY id\n\t`\n\tinargs := []interface{}{project}\n\n\tvar name string\n\toutfmt := []interface{}{name}\n\tresult, err := queryScan(c, q, inargs, outfmt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := []string{}\n\tfor _, r := range result {\n\t\tresponse = append(response, r[0].(string))\n\t}\n\n\treturn response, nil\n}\n\n\/\/ GetNetworkACL returns the Network ACL with the given name in the given project.\nfunc (c *Cluster) GetNetworkACL(projectName string, name string) (int64, *api.NetworkACL, error) {\n\tvar id int64 = int64(-1)\n\tvar ingressJSON string\n\tvar egressJSON string\n\n\tacl := api.NetworkACL{\n\t\tNetworkACLPost: api.NetworkACLPost{\n\t\t\tName: name,\n\t\t},\n\t}\n\n\tq := `\n\t\tSELECT id, description, ingress, egress\n\t\tFROM networks_acls\n\t\tWHERE project_id = (SELECT id FROM projects WHERE name = ? LIMIT 1) AND name=?\n\t\tLIMIT 1\n\t`\n\targ1 := []interface{}{projectName, name}\n\targ2 := []interface{}{&id, &acl.Description, &ingressJSON, &egressJSON}\n\n\terr := dbQueryRowScan(c, q, arg1, arg2)\n\tif err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn -1, nil, ErrNoSuchObject\n\t\t}\n\n\t\treturn -1, nil, err\n\t}\n\n\tacl.Ingress = []api.NetworkACLRule{}\n\tif ingressJSON != \"\" {\n\t\terr = json.Unmarshal([]byte(ingressJSON), &acl.Ingress)\n\t\tif err != nil {\n\t\t\treturn -1, nil, errors.Wrapf(err, \"Failed unmarshalling ingress rules\")\n\t\t}\n\t}\n\n\tacl.Egress = []api.NetworkACLRule{}\n\tif egressJSON != \"\" {\n\t\terr = json.Unmarshal([]byte(egressJSON), &acl.Egress)\n\t\tif err != nil {\n\t\t\treturn -1, nil, errors.Wrapf(err, \"Failed unmarshalling egress rules\")\n\t\t}\n\t}\n\n\tacl.Config, err = c.networkACLConfig(id)\n\tif err != nil {\n\t\treturn -1, nil, errors.Wrapf(err, \"Failed loading config\")\n\t}\n\n\treturn id, &acl, nil\n}\n\n\/\/ networkACLConfig returns the config map of the Network ACL with the given ID.\nfunc (c *Cluster) networkACLConfig(id int64) (map[string]string, error) {\n\tvar key, value string\n\tquery := `\n\t\tSELECT key, value\n\t\tFROM networks_acls_config\n\t\tWHERE network_acl_id=?\n\t`\n\tinargs := []interface{}{id}\n\toutfmt := []interface{}{key, value}\n\tresults, err := queryScan(c, query, inargs, outfmt)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig := make(map[string]string, len(results))\n\n\tfor _, r := range results {\n\t\tkey = r[0].(string)\n\t\tvalue = r[1].(string)\n\n\t\t_, found := config[key]\n\t\tif found {\n\t\t\treturn nil, fmt.Errorf(\"Duplicate config row found for key %q for network ACL ID %d\", key, id)\n\t\t}\n\n\t\tconfig[key] = value\n\t}\n\n\treturn config, nil\n}\n\n\/\/ CreateNetworkACL creates a new Network ACL.\nfunc (c *Cluster) CreateNetworkACL(projectName string, info *api.NetworkACLsPost) (int64, error) {\n\tvar id int64\n\tvar err error\n\tvar ingressJSON, egressJSON []byte\n\n\tif info.Ingress != nil {\n\t\tingressJSON, err = json.Marshal(info.Ingress)\n\t\tif err != nil {\n\t\t\treturn -1, errors.Wrapf(err, \"Failed marshalling ingress rules\")\n\t\t}\n\t}\n\n\tif info.Egress != nil {\n\t\tegressJSON, err = json.Marshal(info.Egress)\n\t\tif err != nil {\n\t\t\treturn -1, errors.Wrapf(err, \"Failed marshalling egress rules\")\n\t\t}\n\t}\n\n\terr = c.Transaction(func(tx *ClusterTx) error {\n\t\t\/\/ Insert a new Network ACL record.\n\t\tresult, err := tx.tx.Exec(`\n\t\t\tINSERT INTO networks_acls (project_id, name, description, ingress, egress)\n\t\t\tVALUES ((SELECT id FROM projects WHERE name = ? LIMIT 1), ?, ?, ?, ?)\n\t\t`, projectName, info.Name, info.Description, string(ingressJSON), string(egressJSON))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tid, err := result.LastInsertId()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = networkACLConfigAdd(tx.tx, id, info.Config)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tid = -1\n\t}\n\n\treturn id, err\n}\n\n\/\/ networkACLConfigAdd inserts Network ACL config keys.\nfunc networkACLConfigAdd(tx *sql.Tx, id int64, config map[string]string) error {\n\tsql := \"INSERT INTO networks_acls_config (network_acl_id, key, value) VALUES(?, ?, ?)\"\n\tstmt, err := tx.Prepare(sql)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stmt.Close()\n\n\tfor k, v := range config {\n\t\tif v == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err = stmt.Exec(id, k, v)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Failed inserting config\")\n\t\t}\n\t}\n\n\treturn nil\n}\n\n\/\/ UpdateNetworkACL updates the Network ACL with the given ID.\nfunc (c *Cluster) UpdateNetworkACL(id int64, config *api.NetworkACLPut) error {\n\tvar err error\n\tvar ingressJSON, egressJSON []byte\n\n\tif config.Ingress != nil {\n\t\tingressJSON, err = json.Marshal(config.Ingress)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Failed marshalling ingress rules\")\n\t\t}\n\t}\n\n\tif config.Egress != nil {\n\t\tegressJSON, err = json.Marshal(config.Egress)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"Failed marshalling egress rules\")\n\t\t}\n\t}\n\n\treturn c.Transaction(func(tx *ClusterTx) error {\n\t\t_, err := tx.tx.Exec(`\n\t\t\tUPDATE networks_acls\n\t\t\tSET description=?, ingress = ?, egress = ?\n\t\t\tWHERE id=?\n\t\t`, config.Description, ingressJSON, egressJSON, id)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = networkACLConfigUpdate(tx.tx, id, config.Config)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t})\n}\n\n\/\/ networkACLConfigUpdate updates Network ACL config keys.\nfunc networkACLConfigUpdate(tx *sql.Tx, id int64, config map[string]string) error {\n\t_, err := tx.Exec(\"DELETE FROM networks_acls_config WHERE network_acl_id=?\", id)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstr := fmt.Sprintf(\"INSERT INTO networks_acls_config (network_acl_id, key, value) VALUES(?, ?, ?)\")\n\tstmt, err := tx.Prepare(str)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer stmt.Close()\n\n\tfor k, v := range config {\n\t\tif v == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t_, err = stmt.Exec(id, k, v)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"labix.org\/v2\/mgo\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\t\"log\"\n)\n\ntype mongoEngine struct {\n\tsession *mgo.Session\n\tdbName string\n}\n\nfunc (eng mongoEngine) Close() {\n\t\/\/ TODO\n}\n\n\/\/ NewMongoEngine returns an Engine based on a mongodb batabase backend\nfunc NewMongoEngine(dbName string, hostPorts string) Engine {\n\tlog.Printf(\"connecting to mongodb '%s'\\n\", hostPorts)\n\ts, err := mgo.Dial(hostPorts)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\ts.SetMode(mgo.Monotonic, true)\n\teng := &mongoEngine{\n\t\tsession: s,\n\t\tdbName: dbName,\n\t}\n\n\teng.EnsureIndexes(\"organisations\") \/\/TODOL parameterise this\n\n\treturn eng\n}\n\nfunc (eng *mongoEngine) EnsureIndexes(collection string) {\n\tc := eng.session.DB(eng.dbName).C(collection)\n\n\teng.session.ResetIndexCache()\n\n\t\/\/ create collection if it's not there\n\tc.Create(&mgo.CollectionInfo{})\n\n\terr := c.EnsureIndex(mgo.Index{\n\t\tKey: []string{\"uuid\"}, \/\/TODO: parameterise this\n\t\tUnique: true,\n\t\tDropDups: true,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}\n\nfunc (eng *mongoEngine) Drop(collection string) {\n\terr := eng.session.DB(\"ftindex\").C(collection).DropCollection()\n\tif err != nil {\n\t\tlog.Printf(\"failed to drop collection\")\n\t}\n\teng.EnsureIndexes(collection)\n}\n\nfunc (eng *mongoEngine) Write(collection string, id string, cont Document) error {\n\tcoll := eng.session.DB(eng.dbName).C(collection)\n\tif id == \"\" {\n\t\tpanic(\"missing id\")\n\t}\n\t_, err := coll.Upsert(bson.D{{\"uuid\", id}}, cont) \/\/TODO: parameterise uuid\n\tif err != nil {\n\t\tlog.Printf(\"insert failed: %v\\n\", err)\n\t}\n\treturn nil\n}\n\nfunc (eng *mongoEngine) Count(collection string) int {\n\tcoll := eng.session.DB(eng.dbName).C(collection)\n\tcount, err := coll.Count()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}\n\nfunc (eng *mongoEngine) Load(collection, id string) (bool, Document, error) {\n\tc := eng.session.DB(eng.dbName).C(collection)\n\tvar content Document\n\terr := c.Find(bson.M{\"uuid\": id}).One(&content)\n\tif err == mgo.ErrNotFound {\n\t\treturn false, Document{}, nil\n\t}\n\tif err != nil {\n\t\treturn false, Document{}, err\n\t}\n\treturn true, content, nil\n}\n\nfunc (eng mongoEngine) All(collection string, stopchan chan struct{}) (chan Document, error) {\n\tcont := make(chan Document)\n\n\tgo func() {\n\t\tdefer close(cont)\n\t\tcoll := eng.session.DB(eng.dbName).C(collection)\n\t\titer := coll.Find(nil).Iter()\n\t\tvar result Document\n\t\tfor iter.Next(&result) {\n\t\t\tselect {\n\t\t\tcase <-stopchan:\n\t\t\t\tbreak\n\t\t\tcase cont <- result:\n\t\t\t}\n\t\t}\n\t\tif err := iter.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\treturn cont, nil\n}\n<commit_msg>fix hardcoded db name<commit_after>package main\n\nimport (\n\t\"labix.org\/v2\/mgo\"\n\t\"labix.org\/v2\/mgo\/bson\"\n\t\"log\"\n)\n\ntype mongoEngine struct {\n\tsession *mgo.Session\n\tdbName string\n}\n\nfunc (eng mongoEngine) Close() {\n\t\/\/ TODO\n}\n\n\/\/ NewMongoEngine returns an Engine based on a mongodb batabase backend\nfunc NewMongoEngine(dbName string, hostPorts string) Engine {\n\tlog.Printf(\"connecting to mongodb '%s'\\n\", hostPorts)\n\ts, err := mgo.Dial(hostPorts)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\ts.SetMode(mgo.Monotonic, true)\n\teng := &mongoEngine{\n\t\tsession: s,\n\t\tdbName: dbName,\n\t}\n\n\teng.EnsureIndexes(\"organisations\") \/\/TODOL parameterise this\n\n\treturn eng\n}\n\nfunc (eng *mongoEngine) EnsureIndexes(collection string) {\n\tc := eng.session.DB(eng.dbName).C(collection)\n\n\teng.session.ResetIndexCache()\n\n\t\/\/ create collection if it's not there\n\tc.Create(&mgo.CollectionInfo{})\n\n\terr := c.EnsureIndex(mgo.Index{\n\t\tKey: []string{\"uuid\"}, \/\/TODO: parameterise this\n\t\tUnique: true,\n\t\tDropDups: true,\n\t\tBackground: false,\n\t\tSparse: false,\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n}\n\nfunc (eng *mongoEngine) Drop(collection string) {\n\terr := eng.session.DB(eng.dbName).C(collection).DropCollection()\n\tif err != nil {\n\t\tlog.Printf(\"failed to drop collection\")\n\t}\n\teng.EnsureIndexes(collection)\n}\n\nfunc (eng *mongoEngine) Write(collection string, id string, cont Document) error {\n\tcoll := eng.session.DB(eng.dbName).C(collection)\n\tif id == \"\" {\n\t\tpanic(\"missing id\")\n\t}\n\t_, err := coll.Upsert(bson.D{{\"uuid\", id}}, cont) \/\/TODO: parameterise uuid\n\tif err != nil {\n\t\tlog.Printf(\"insert failed: %v\\n\", err)\n\t}\n\treturn nil\n}\n\nfunc (eng *mongoEngine) Count(collection string) int {\n\tcoll := eng.session.DB(eng.dbName).C(collection)\n\tcount, err := coll.Count()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn count\n}\n\nfunc (eng *mongoEngine) Load(collection, id string) (bool, Document, error) {\n\tc := eng.session.DB(eng.dbName).C(collection)\n\tvar content Document\n\terr := c.Find(bson.M{\"uuid\": id}).One(&content)\n\tif err == mgo.ErrNotFound {\n\t\treturn false, Document{}, nil\n\t}\n\tif err != nil {\n\t\treturn false, Document{}, err\n\t}\n\treturn true, content, nil\n}\n\nfunc (eng mongoEngine) All(collection string, stopchan chan struct{}) (chan Document, error) {\n\tcont := make(chan Document)\n\n\tgo func() {\n\t\tdefer close(cont)\n\t\tcoll := eng.session.DB(eng.dbName).C(collection)\n\t\titer := coll.Find(nil).Iter()\n\t\tvar result Document\n\t\tfor iter.Next(&result) {\n\t\t\tselect {\n\t\t\tcase <-stopchan:\n\t\t\t\tbreak\n\t\t\tcase cont <- result:\n\t\t\t}\n\t\t}\n\t\tif err := iter.Close(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\n\treturn cont, nil\n}\n<|endoftext|>"} {"text":"<commit_before>package main\n\nimport (\n\t\"fmt\"\n\t\"ppd\/gpubsub\"\n\t\/\/\"time\"\n)\n\nfunc main() {\n\tb := gpubsub.Broker{}\n\tport := \":8999\"\n\tfmt.Printf(\"Server listening on %s\\n\", port);\n\t\n\t\/*go func(b *gpubsub.Broker) {\n\t\tfor {\n\t\t\ttime.Sleep(5*time.Second)\n\t\t\tb.PrintTopics();\n\t\t}\t\n\t}(&b)*\/\n\t\n\tb.Start(port, 10, 20)\n\n}<commit_msg>Passing parameters to Broker<commit_after>package main\n\nimport (\n\t\"fmt\"\n\t\"ppd\/gpubsub\"\n\t\"os\"\n\t\"strconv\"\n)\n\nfunc main() {\n\t\n\tif len(os.Args) < 2 {\n\t\tfmt.Printf(\"Informe a porta e o tamanho do buffer em cada topico.\\n\");\n\t\treturn;\n\t}\n\t\t\n\tb := gpubsub.Broker{}\n\tport := os.Args[1]\n\tfmt.Printf(\"Server listening on %s\\n\", port);\n\t\n\t\/*go func(b *gpubsub.Broker) {\n\t\tfor {\n\t\t\ttime.Sleep(5*time.Second)\n\t\t\tb.PrintTopics();\n\t\t}\t\n\t}(&b)*\/\n\t\n\tk, _ := strconv.Atoi(os.Args[2])\n\tb.Start(port, k, 20)\n\n}<|endoftext|>"} {"text":"<commit_before>package diag\n\n\/\/ Ranger wraps the Range method.\ntype Ranger interface {\n\t\/\/ Range returns the range associated with the value.\n\tRange() Ranging\n}\n\n\/\/ Ranging represents a range [From, To) within an indexable sequence. Structs\n\/\/ can embed Ranging to satisfy the Ranger interface.\ntype Ranging struct {\n\tFrom int\n\tTo int\n}\n\n\/\/ Ranging returns itself.\nfunc (r Ranging) Range() Ranging { return r }\n<commit_msg>diag: Fix a godoc.<commit_after>package diag\n\n\/\/ Ranger wraps the Range method.\ntype Ranger interface {\n\t\/\/ Range returns the range associated with the value.\n\tRange() Ranging\n}\n\n\/\/ Ranging represents a range [From, To) within an indexable sequence. Structs\n\/\/ can embed Ranging to satisfy the Ranger interface.\ntype Ranging struct {\n\tFrom int\n\tTo int\n}\n\n\/\/ Range returns the Ranging itself.\nfunc (r Ranging) Range() Ranging { return r }\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2016 Stratumn SAS. All rights reserved.\n\/\/ Use of this source code is governed by an Apache License 2.0\n\/\/ that can be found in the LICENSE file.\n\n\/\/ Package bcbatchfossilizer implements a fossilizer that fossilize batches of hashes on a blockchain.\npackage bcbatchfossilizer\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/stratumn\/go\/fossilizer\"\n\t\"github.com\/stratumn\/goprivate\/batchfossilizer\"\n\t\"github.com\/stratumn\/goprivate\/blockchain\"\n\t\"github.com\/stratumn\/goprivate\/types\"\n)\n\nconst (\n\t\/\/ Name is the name set in the fossilizer's information.\n\tName = \"bcbatch\"\n\n\t\/\/ Description is the description set in the fossilizer's information.\n\tDescription = \"Stratumn Blockchain Batch Fossilizer\"\n)\n\n\/\/ Config contains configuration options for the fossilizer.\ntype Config struct {\n\tHashTimestamper blockchain.HashTimestamper\n}\n\n\/\/ Info is the info returned by GetInfo.\ntype Info struct {\n\tName string `json:\"name\"`\n\tDescription string `json:\"description\"`\n\tVersion string `json:\"version\"`\n\tCommit string `json:\"commit\"`\n\tBlockchain string `json:\"blockchain\"`\n}\n\n\/\/ Evidence is the evidence sent to the result channel.\ntype Evidence struct {\n\t*batchfossilizer.Evidence\n\tTransactionID blockchain.TransactionID `json:\"txid\"`\n}\n\n\/\/ Fossilizer is the type that implements github.com\/stratumn\/go\/fossilizer.Adapter.\ntype Fossilizer struct {\n\t*batchfossilizer.Fossilizer\n\tconfig *Config\n\tresultChans []chan *fossilizer.Result\n\tresultChan chan *fossilizer.Result\n}\n\n\/\/ New creates an instance of a Fossilizer.\nfunc New(config *Config, batchConfig *batchfossilizer.Config) (*Fossilizer, error) {\n\tb, err := batchfossilizer.New(batchConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Fossilizer{\n\t\tFossilizer: b,\n\t\tconfig: config,\n\t}, err\n}\n\n\/\/ GetInfo implements github.com\/stratumn\/go\/fossilizer.Adapter.GetInfo.\nfunc (a *Fossilizer) GetInfo() (interface{}, error) {\n\tbatchInfo, err := a.Fossilizer.GetInfo()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo, ok := batchInfo.(*batchfossilizer.Info)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unexpected batchfossilizer info %#v\", batchInfo)\n\t}\n\n\treturn &Info{\n\t\tName: Name,\n\t\tDescription: Description,\n\t\tVersion: info.Version,\n\t\tCommit: info.Commit,\n\t\tBlockchain: a.config.HashTimestamper.Network().String(),\n\t}, nil\n}\n\n\/\/ AddResultChan implements github.com\/stratumn\/go\/fossilizer.Adapter.AddResultChan.\nfunc (a *Fossilizer) AddResultChan(resultChan chan *fossilizer.Result) {\n\ta.resultChans = append(a.resultChans, resultChan)\n}\n\n\/\/ Start starts the fossilizer.\nfunc (a *Fossilizer) Start() error {\n\ta.resultChan = make(chan *fossilizer.Result)\n\ta.Fossilizer.AddResultChan(a.resultChan)\n\n\tgo func() {\n\t\tvar (\n\t\t\terr error\n\t\t\tlastRoot *types.Bytes32\n\t\t\tlastTransactionID blockchain.TransactionID\n\t\t)\n\n\t\tfor r := range a.resultChan {\n\t\t\tbatchEvidenceWrapper, ok := r.Evidence.(*batchfossilizer.EvidenceWrapper)\n\t\t\tif !ok {\n\t\t\t\tlog.Printf(\"Error: unexpected batchfossilizer evidence %#v\\n\", batchEvidenceWrapper)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\troot := batchEvidenceWrapper.Evidence.Root\n\n\t\t\tif lastRoot == nil || *root != *lastRoot {\n\t\t\t\tlastTransactionID, err = a.config.HashTimestamper.TimestampHash(root)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"Error: %s\\n\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tevidenceWrapper := map[string]*Evidence{}\n\t\t\tevidenceWrapper[a.config.HashTimestamper.Network().String()] = &Evidence{\n\t\t\t\tEvidence: batchEvidenceWrapper.Evidence,\n\t\t\t\tTransactionID: lastTransactionID,\n\t\t\t}\n\t\t\tr.Evidence = evidenceWrapper\n\n\t\t\tfor _, c := range a.resultChans {\n\t\t\t\tc <- r\n\t\t\t}\n\n\t\t\tlastRoot = root\n\t\t}\n\t}()\n\n\treturn a.Fossilizer.Start()\n}\n\n\/\/ Stop stops the fossilizer.\nfunc (a *Fossilizer) Stop() error {\n\terr := a.Fossilizer.Stop()\n\tclose(a.resultChan)\n\treturn err\n}\n<commit_msg>bcbatchfossilizer: Clean output<commit_after>\/\/ Copyright 2016 Stratumn SAS. All rights reserved.\n\/\/ Use of this source code is governed by an Apache License 2.0\n\/\/ that can be found in the LICENSE file.\n\n\/\/ Package bcbatchfossilizer implements a fossilizer that fossilize batches of hashes on a blockchain.\npackage bcbatchfossilizer\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com\/stratumn\/go\/fossilizer\"\n\t\"github.com\/stratumn\/goprivate\/batchfossilizer\"\n\t\"github.com\/stratumn\/goprivate\/blockchain\"\n\t\"github.com\/stratumn\/goprivate\/types\"\n)\n\nconst (\n\t\/\/ Name is the name set in the fossilizer's information.\n\tName = \"bcbatch\"\n\n\t\/\/ Description is the description set in the fossilizer's information.\n\tDescription = \"Stratumn Blockchain Batch Fossilizer\"\n)\n\n\/\/ Config contains configuration options for the fossilizer.\ntype Config struct {\n\tHashTimestamper blockchain.HashTimestamper\n}\n\n\/\/ Info is the info returned by GetInfo.\ntype Info struct {\n\tName string `json:\"name\"`\n\tDescription string `json:\"description\"`\n\tVersion string `json:\"version\"`\n\tCommit string `json:\"commit\"`\n\tBlockchain string `json:\"blockchain\"`\n}\n\n\/\/ Evidence is the evidence sent to the result channel.\ntype Evidence struct {\n\t*batchfossilizer.Evidence\n\tTransactionID blockchain.TransactionID `json:\"txid\"`\n}\n\n\/\/ Fossilizer is the type that implements github.com\/stratumn\/go\/fossilizer.Adapter.\ntype Fossilizer struct {\n\t*batchfossilizer.Fossilizer\n\tconfig *Config\n\tresultChans []chan *fossilizer.Result\n\tresultChan chan *fossilizer.Result\n}\n\n\/\/ New creates an instance of a Fossilizer.\nfunc New(config *Config, batchConfig *batchfossilizer.Config) (*Fossilizer, error) {\n\tb, err := batchfossilizer.New(batchConfig)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Fossilizer{\n\t\tFossilizer: b,\n\t\tconfig: config,\n\t}, err\n}\n\n\/\/ GetInfo implements github.com\/stratumn\/go\/fossilizer.Adapter.GetInfo.\nfunc (a *Fossilizer) GetInfo() (interface{}, error) {\n\tbatchInfo, err := a.Fossilizer.GetInfo()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo, ok := batchInfo.(*batchfossilizer.Info)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unexpected batchfossilizer info %#v\", batchInfo)\n\t}\n\n\treturn &Info{\n\t\tName: Name,\n\t\tDescription: Description,\n\t\tVersion: info.Version,\n\t\tCommit: info.Commit,\n\t\tBlockchain: a.config.HashTimestamper.Network().String(),\n\t}, nil\n}\n\n\/\/ AddResultChan implements github.com\/stratumn\/go\/fossilizer.Adapter.AddResultChan.\nfunc (a *Fossilizer) AddResultChan(resultChan chan *fossilizer.Result) {\n\ta.resultChans = append(a.resultChans, resultChan)\n}\n\n\/\/ Start starts the fossilizer.\nfunc (a *Fossilizer) Start() error {\n\ta.resultChan = make(chan *fossilizer.Result)\n\ta.Fossilizer.AddResultChan(a.resultChan)\n\n\tgo func() {\n\t\tvar (\n\t\t\terr error\n\t\t\tlastRoot *types.Bytes32\n\t\t\tlastTransactionID blockchain.TransactionID\n\t\t)\n\n\t\tfor r := range a.resultChan {\n\t\t\tbatchEvidenceWrapper, ok := r.Evidence.(*batchfossilizer.EvidenceWrapper)\n\t\t\tif !ok {\n\t\t\t\tlog.Printf(\"Error: unexpected batchfossilizer evidence %#v\", batchEvidenceWrapper)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\troot := batchEvidenceWrapper.Evidence.Root\n\n\t\t\tif lastRoot == nil || *root != *lastRoot {\n\t\t\t\tlastTransactionID, err = a.config.HashTimestamper.TimestampHash(root)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"Error: %s\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tevidenceWrapper := map[string]*Evidence{}\n\t\t\tevidenceWrapper[a.config.HashTimestamper.Network().String()] = &Evidence{\n\t\t\t\tEvidence: batchEvidenceWrapper.Evidence,\n\t\t\t\tTransactionID: lastTransactionID,\n\t\t\t}\n\t\t\tr.Evidence = evidenceWrapper\n\n\t\t\tfor _, c := range a.resultChans {\n\t\t\t\tc <- r\n\t\t\t}\n\n\t\t\tlastRoot = root\n\t\t}\n\t}()\n\n\treturn a.Fossilizer.Start()\n}\n\n\/\/ Stop stops the fossilizer.\nfunc (a *Fossilizer) Stop() error {\n\terr := a.Fossilizer.Stop()\n\tclose(a.resultChan)\n\treturn err\n}\n<|endoftext|>"} {"text":"<commit_before>package memdtool\n\nconst version = \"0.0.0\"\n\nvar revision = \"Devel\"\n<commit_msg>Checking in changes prior to tagging of version v0.1.0<commit_after>package memdtool\n\nconst version = \"0.1.0\"\n\nvar revision = \"Devel\"\n<|endoftext|>"} {"text":"<commit_before>package main\n\n\/*\n * Copyright 2016 Albert P. Tobey <atobey@netflix.com>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\nimport (\n\t\"github.com\/netflix\/hal-9001\/brokers\/generic\"\n\t\"github.com\/netflix\/hal-9001\/hal\"\n)\n\n\/\/ This bot doesn't do anything except set up the generic broker and then\n\/\/ block forever. The generic broker doesn't produce anything so nothing\n\/\/ will happen and this is totally useless except to demonstrate the minimum\n\/\/ amount of hal's API required to start the system.\n\/\/\n\/\/ Most of hal's functionality is optional. It's still built along with the\n\/\/ rest of hal but is not active unless it's used in main or a plugin.\n\nfunc main() {\n\tconf := generic.Config{}\n\tbroker := conf.NewBroker(\"generic\")\n\n\trouter := hal.Router()\n\trouter.AddBroker(broker)\n\trouter.Route()\n\n\t\/\/ TODO: maybe add a timer loop to inject some messages and exercise\n\t\/\/ the system a little.\n}\n<commit_msg>remove the no-long-existing generic broker<commit_after>package main\n\n\/*\n * Copyright 2016 Albert P. Tobey <atobey@netflix.com>\n *\n * Licensed under the Apache License, Version 2.0 (the \"License\");\n * you may not use this file except in compliance with the License.\n * You may obtain a copy of the License at\n *\n * http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n *\n * Unless required by applicable law or agreed to in writing, software\n * distributed under the License is distributed on an \"AS IS\" BASIS,\n * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n * See the License for the specific language governing permissions and\n * limitations under the License.\n *\/\n\nimport \"github.com\/netflix\/hal-9001\/hal\"\n\n\/\/ This bot doesn't do anything except start the router and wait forever\n\/\/ for messages that will never come.\n\/\/\n\/\/ Most of hal's functionality is optional. It's still built along with the\n\/\/ rest of hal but is not active unless it's used in main or a plugin.\n\nfunc main() {\n\trouter := hal.Router()\n\trouter.Route()\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014 James McGuire. All rights reserved.\n\/\/ Use of this source code is governed by the MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\t\"sync\"\n)\n\nconst baseTable = `CREATE TABLE Words (\n Nick VARCHAR(32),\n %s\n primary KEY (Nick)) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n `\n\nfunc updateWords(nick, message string) error {\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor word, regex := range badWords {\n\t\tnumwords := len(regex.FindAllString(strings.ToLower(message), -1))\n\t\tif numwords == 0 {\n\t\t\tcontinue\n\t\t}\n\t\t_, err = tx.Exec(fmt.Sprintf(`INSERT INTO Words (Nick, %[1]s) VALUES (?, ?)`+\n\t\t\t` ON DUPLICATE KEY UPDATE %[1]s=%[1]s+VALUES(%[1]s)`, word), nick, numwords)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\terr = tx.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc genTables() {\n\tlog.Println(\"Regenerating Words table\")\n\twordList := \"\"\n\tfor _, word := range config.BadWords {\n\t\twordList += word.Word + \" INT(32) NOT NULL DEFAULT 0, \"\n\t}\n\n\t_, err := db.Exec(`DROP TABLE IF EXISTS Words`)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t_, err = db.Exec(fmt.Sprintf(baseTable, wordList))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgenChan := make(chan struct {\n\t\tNick string\n\t\tMessage string\n\t})\n\twg := sync.WaitGroup{}\n\tfor i := 0; i < 100; i++ {\n\t\tgo func() {\n\t\t\tfor post := range genChan {\n\t\t\t\terr := updateWords(post.Nick, post.Message)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t\twg.Done()\n\t\t\t}\n\t\t}()\n\t}\n\n\trows, err := db.Query(`SELECT Nick, Message from messages WHERE channel='#geekhack'`)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor rows.Next() {\n\t\tvar nick, message string\n\t\terr := rows.Scan(&nick, &message)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\twg.Add(1)\n\t\tgenChan <- struct {\n\t\t\tNick string\n\t\t\tMessage string\n\t\t}{nick, message}\n\t}\n\n\twg.Wait()\n\tclose(genChan)\n\n\tif err = rows.Err(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Println(\"Finished generating Words!\")\n}\n<commit_msg>rename words table<commit_after>\/\/ Copyright 2014 James McGuire. All rights reserved.\n\/\/ Use of this source code is governed by the MIT\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\t\"sync\"\n)\n\nconst baseTable = `CREATE TABLE words (\n Nick VARCHAR(32),\n %s\n primary KEY (Nick)) ENGINE=InnoDB DEFAULT CHARSET=utf8;\n `\n\nfunc updateWords(nick, message string) error {\n\ttx, err := db.Begin()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor word, regex := range badWords {\n\t\tnumwords := len(regex.FindAllString(strings.ToLower(message), -1))\n\t\tif numwords == 0 {\n\t\t\tcontinue\n\t\t}\n\t\t_, err = tx.Exec(fmt.Sprintf(`INSERT INTO words (Nick, %[1]s) VALUES (?, ?)`+\n\t\t\t` ON DUPLICATE KEY UPDATE %[1]s=%[1]s+VALUES(%[1]s)`, word), nick, numwords)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\terr = tx.Commit()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc genTables() {\n\tlog.Println(\"Regenerating Words table\")\n\twordList := \"\"\n\tfor _, word := range config.BadWords {\n\t\twordList += word.Word + \" INT(32) NOT NULL DEFAULT 0, \"\n\t}\n\n\t_, err := db.Exec(`DROP TABLE IF EXISTS words`)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t_, err = db.Exec(fmt.Sprintf(baseTable, wordList))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tgenChan := make(chan struct {\n\t\tNick string\n\t\tMessage string\n\t})\n\twg := sync.WaitGroup{}\n\tfor i := 0; i < 100; i++ {\n\t\tgo func() {\n\t\t\tfor post := range genChan {\n\t\t\t\terr := updateWords(post.Nick, post.Message)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t\twg.Done()\n\t\t\t}\n\t\t}()\n\t}\n\n\trows, err := db.Query(`SELECT Nick, Message from messages WHERE channel='#geekhack'`)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor rows.Next() {\n\t\tvar nick, message string\n\t\terr := rows.Scan(&nick, &message)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\twg.Add(1)\n\t\tgenChan <- struct {\n\t\t\tNick string\n\t\t\tMessage string\n\t\t}{nick, message}\n\t}\n\n\twg.Wait()\n\tclose(genChan)\n\n\tif err = rows.Err(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Println(\"Finished generating Words!\")\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2014-2015 Apptimist, Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"crypto\/rand\"\n\t\"os\"\n\t\"strings\"\n\t\"syscall\"\n)\n\ntype Ses struct {\n\tname string\n\tASN *ASN\n\tsrv *Server\n\tKeys struct {\n\t\tServer struct {\n\t\t\tEphemeral EncrPub\n\t\t}\n\t\tClient struct {\n\t\t\tEphemeral, Login EncrPub\n\t\t}\n\t}\n\n\tLat, Lon, Range int32\n\n\tasnsrv bool \/\/ true if: asnsrv CONFIG ...\n}\n\nvar SesPool chan *Ses\n\nfunc init() { SesPool = make(chan *Ses, 16) }\n\nfunc NewSes() (ses *Ses) {\n\tselect {\n\tcase ses = <-SesPool:\n\tdefault:\n\t\tses = &Ses{}\n\t}\n\tses.ASN = NewASN()\n\treturn\n}\n\nfunc SesPoolFlush() {\n\tfor {\n\t\tselect {\n\t\tcase <-SesPool:\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (ses *Ses) DN() string { return ses.srv.cmd.Cfg.Dir }\n\n\/\/ dist pdu list to online sessions. Any sessions to other servers receive the\n\/\/ first link which is the RESPO\/SHA. All user sessions receive \"asn\/mark\". Any\n\/\/ other named blob named REPOS\/USER\/PATH goes to the associated USER sessions.\nfunc (ses *Ses) dist(pdus []*PDU) {\n\tses.srv.ForEachSession(func(x *Ses) {\n\t\tif x == ses {\n\t\t\treturn\n\t\t}\n\t\tlogin := x.Keys.Client.Login\n\t\tslogin := login.String()\n\t\tserver := x.srv.cmd.Cfg.Keys.Server.Pub.Encr\n\t\tif login.Equal(server) {\n\t\t\tif pdus[0] != nil {\n\t\t\t\tx.ASN.Tx(pdus[0])\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tfor _, pdu := range pdus[1:] {\n\t\t\tif pdu != nil {\n\t\t\t\tsuser, _ := x.srv.repos.ParsePath(pdu.FN)\n\t\t\t\tif suser != \"\" &&\n\t\t\t\t\tsuser == slogin[:len(suser)] {\n\t\t\t\t\tx.ASN.Tx(pdu)\n\t\t\t\t\t\/\/ be sure to send only one per session\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\tfor i := range pdus {\n\t\tpdus[i].Free()\n\t\tpdus[i] = nil\n\t}\n}\n\n\/\/ Free the Ses by pooling or release it to GC if pool is full.\nfunc (ses *Ses) Free() {\n\tif ses != nil {\n\t\tses.name = \"\"\n\t\tses.ASN.Free()\n\t\tses.ASN = nil\n\t\tses.srv = nil\n\t\tselect {\n\t\tcase SesPool <- ses:\n\t\tdefault:\n\t\t}\n\t}\n}\n\nfunc (ses *Ses) IsAdmin(key *EncrPub) bool {\n\treturn *key == *ses.srv.cmd.Cfg.Keys.Admin.Pub.Encr\n}\n\nfunc (ses *Ses) IsService(key *EncrPub) bool {\n\treturn *key == *ses.srv.cmd.Cfg.Keys.Server.Pub.Encr\n}\n\nfunc (ses *Ses) Rekey(req Requester) {\n\tvar nonce Nonce\n\trand.Reader.Read(nonce[:])\n\tpub, sec, _ := NewRandomEncrKeys()\n\tses.Keys.Server.Ephemeral = *pub\n\tses.ASN.Ack(req, pub[:], nonce[:])\n\tses.ASN.SetStateEstablished()\n\tses.ASN.SetBox(NewBox(2, &nonce, &ses.Keys.Client.Ephemeral,\n\t\tpub, sec))\n\tses.ASN.Println(\"rekeyed with\", pub.String()[:8]+\"...\")\n}\n\n\/\/ removals: if pdus[1] is a filename containing \"asn\/removals\",\n\/\/ remove the referenced files, then the \"asn\/removals\/\" link.\n\/\/ However, keep the SUM file to distribute with clone.\nfunc (ses *Ses) removals(pdus []*PDU) {\n\tif len(pdus) == 2 && pdus[1] != nil &&\n\t\tstrings.Contains(pdus[1].FN, \"asn\/removals\") {\n\t\tf, err := os.Open(pdus[1].FN)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer func() {\n\t\t\tf.Close()\n\t\t\tsyscall.Unlink(pdus[1].FN)\n\t\t\tpdus[1].Free()\n\t\t\tpdus[1] = nil\n\t\t}()\n\t\tBlobSeek(f)\n\t\tscanner := bufio.NewScanner(f)\n\t\tfor scanner.Scan() {\n\t\t\tfn := ses.srv.repos.Join(scanner.Text())\n\t\t\tses.ASN.Diag(\"unlinked\", fn)\n\t\t\tsyscall.Unlink(fn)\n\t\t}\n\t\tscanner = nil\n\t}\n}\n\nfunc (ses *Ses) RxBlob(pdu *PDU) (err error) {\n\tblob, err := NewBlobFrom(pdu)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tblob.Free()\n\t\tblob = nil\n\t}()\n\tsum, fn, err := ses.srv.repos.File(blob, pdu)\n\tif err != nil {\n\t\treturn\n\t}\n\tlinks, err := ses.srv.repos.MkLinks(blob, sum, fn)\n\tif err != nil {\n\t\treturn\n\t}\n\tses.removals(links)\n\tses.dist(links)\n\tlinks = nil\n\treturn\n}\n\nfunc (ses *Ses) RxLogin(pdu *PDU) (err error) {\n\tvar (\n\t\treq Requester\n\t\tsig AuthSig\n\t)\n\treq.ReadFrom(pdu)\n\t_, err = pdu.Read(ses.Keys.Client.Login[:])\n\tif err == nil {\n\t\t_, err = pdu.Read(sig[:])\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\terr = ErrFailure\n\tswitch {\n\tcase ses.Keys.Client.Login.Equal(ses.srv.cmd.Cfg.Keys.Admin.Pub.Encr):\n\t\tif sig.Verify(ses.srv.cmd.Cfg.Keys.Admin.Pub.Auth,\n\t\t\tses.Keys.Client.Login[:]) {\n\t\t\tses.ASN.Name.Remote = \"admin\"\n\t\t\terr = nil\n\t\t}\n\tcase ses.Keys.Client.Login.Equal(ses.srv.cmd.Cfg.Keys.Server.Pub.Encr):\n\t\tif sig.Verify(ses.srv.cmd.Cfg.Keys.Server.Pub.Auth,\n\t\t\tses.Keys.Client.Login[:]) {\n\t\t\tses.ASN.Name.Remote = \"server\"\n\t\t\terr = nil\n\t\t}\n\tdefault:\n\t\tlogin := ses.Keys.Client.Login\n\t\tuser := ses.srv.repos.Users.Search(login)\n\t\tif user != nil && sig.Verify(&user.ASN.Auth, login[:]) {\n\t\t\tses.ASN.Name.Remote = login.String()[:8]\n\t\t\terr = nil\n\t\t}\n\t}\n\tses.ASN.Name.Session = ses.ASN.Name.Local + \":\" + ses.ASN.Name.Remote\n\tif err != nil {\n\t\tses.ASN.Println(\"login\", err)\n\t} else {\n\t\tses.Rekey(req)\n\t\tses.ASN.Println(\"login\")\n\t}\n\treturn\n}\n\nfunc (ses *Ses) RxPause(pdu *PDU) error {\n\tvar req Requester\n\treq.ReadFrom(pdu)\n\tses.ASN.Println(\"suspending\")\n\tses.ASN.Ack(req)\n\tses.ASN.SetStateSuspended()\n\treturn nil\n}\n\nfunc (ses *Ses) RxQuit(pdu *PDU) error {\n\tvar req Requester\n\treq.ReadFrom(pdu)\n\tses.ASN.Println(\"quitting\")\n\tses.ASN.Ack(req)\n\tses.ASN.SetStateQuitting()\n\treturn nil\n}\n\nfunc (ses *Ses) RxResume(pdu *PDU) error {\n\tvar req Requester\n\treq.ReadFrom(pdu)\n\tses.ASN.Println(\"resuming\")\n\tses.Rekey(req)\n\treturn nil\n}\n\nfunc (ses *Ses) Send(fn string, keys ...*EncrPub) {\n\t\/\/ FIXME\n}\n<commit_msg>fix login crash<commit_after>\/\/ Copyright 2014-2015 Apptimist, Inc. All rights reserved.\n\/\/ Use of this source code is governed by a BSD-style\n\/\/ license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"crypto\/rand\"\n\t\"os\"\n\t\"strings\"\n\t\"syscall\"\n)\n\ntype Ses struct {\n\tname string\n\tASN *ASN\n\tsrv *Server\n\tKeys struct {\n\t\tServer struct {\n\t\t\tEphemeral EncrPub\n\t\t}\n\t\tClient struct {\n\t\t\tEphemeral, Login EncrPub\n\t\t}\n\t}\n\n\tLat, Lon, Range int32\n\n\tasnsrv bool \/\/ true if: asnsrv CONFIG ...\n}\n\nvar SesPool chan *Ses\n\nfunc init() { SesPool = make(chan *Ses, 16) }\n\nfunc NewSes() (ses *Ses) {\n\tselect {\n\tcase ses = <-SesPool:\n\tdefault:\n\t\tses = &Ses{}\n\t}\n\tses.ASN = NewASN()\n\treturn\n}\n\nfunc SesPoolFlush() {\n\tfor {\n\t\tselect {\n\t\tcase <-SesPool:\n\t\tdefault:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (ses *Ses) DN() string { return ses.srv.cmd.Cfg.Dir }\n\n\/\/ dist pdu list to online sessions. Any sessions to other servers receive the\n\/\/ first link which is the RESPO\/SHA. All user sessions receive \"asn\/mark\". Any\n\/\/ other named blob named REPOS\/USER\/PATH goes to the associated USER sessions.\nfunc (ses *Ses) dist(pdus []*PDU) {\n\tses.srv.ForEachSession(func(x *Ses) {\n\t\tif x == ses {\n\t\t\treturn\n\t\t}\n\t\tlogin := x.Keys.Client.Login\n\t\tslogin := login.String()\n\t\tserver := x.srv.cmd.Cfg.Keys.Server.Pub.Encr\n\t\tif login.Equal(server) {\n\t\t\tif pdus[0] != nil {\n\t\t\t\tx.ASN.Tx(pdus[0])\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tfor _, pdu := range pdus[1:] {\n\t\t\tif pdu != nil {\n\t\t\t\tsuser, _ := x.srv.repos.ParsePath(pdu.FN)\n\t\t\t\tif suser != \"\" &&\n\t\t\t\t\tsuser == slogin[:len(suser)] {\n\t\t\t\t\tx.ASN.Tx(pdu)\n\t\t\t\t\t\/\/ be sure to send only one per session\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\tfor i := range pdus {\n\t\tpdus[i].Free()\n\t\tpdus[i] = nil\n\t}\n}\n\n\/\/ Free the Ses by pooling or release it to GC if pool is full.\nfunc (ses *Ses) Free() {\n\tif ses != nil {\n\t\tses.name = \"\"\n\t\tses.ASN.Free()\n\t\tses.ASN = nil\n\t\tses.srv = nil\n\t\tselect {\n\t\tcase SesPool <- ses:\n\t\tdefault:\n\t\t}\n\t}\n}\n\nfunc (ses *Ses) IsAdmin(key *EncrPub) bool {\n\treturn *key == *ses.srv.cmd.Cfg.Keys.Admin.Pub.Encr\n}\n\nfunc (ses *Ses) IsService(key *EncrPub) bool {\n\treturn *key == *ses.srv.cmd.Cfg.Keys.Server.Pub.Encr\n}\n\nfunc (ses *Ses) Rekey(req Requester) {\n\tvar nonce Nonce\n\trand.Reader.Read(nonce[:])\n\tpub, sec, _ := NewRandomEncrKeys()\n\tses.Keys.Server.Ephemeral = *pub\n\tses.ASN.Ack(req, pub[:], nonce[:])\n\tses.ASN.SetStateEstablished()\n\tses.ASN.SetBox(NewBox(2, &nonce, &ses.Keys.Client.Ephemeral,\n\t\tpub, sec))\n\tses.ASN.Println(\"rekeyed with\", pub.String()[:8]+\"...\")\n}\n\n\/\/ removals: if pdus[1] is a filename containing \"asn\/removals\",\n\/\/ remove the referenced files, then the \"asn\/removals\/\" link.\n\/\/ However, keep the SUM file to distribute with clone.\nfunc (ses *Ses) removals(pdus []*PDU) {\n\tif len(pdus) == 2 && pdus[1] != nil &&\n\t\tstrings.Contains(pdus[1].FN, \"asn\/removals\") {\n\t\tf, err := os.Open(pdus[1].FN)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tdefer func() {\n\t\t\tf.Close()\n\t\t\tsyscall.Unlink(pdus[1].FN)\n\t\t\tpdus[1].Free()\n\t\t\tpdus[1] = nil\n\t\t}()\n\t\tBlobSeek(f)\n\t\tscanner := bufio.NewScanner(f)\n\t\tfor scanner.Scan() {\n\t\t\tfn := ses.srv.repos.Join(scanner.Text())\n\t\t\tses.ASN.Diag(\"unlinked\", fn)\n\t\t\tsyscall.Unlink(fn)\n\t\t}\n\t\tscanner = nil\n\t}\n}\n\nfunc (ses *Ses) RxBlob(pdu *PDU) (err error) {\n\tblob, err := NewBlobFrom(pdu)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\tblob.Free()\n\t\tblob = nil\n\t}()\n\tsum, fn, err := ses.srv.repos.File(blob, pdu)\n\tif err != nil {\n\t\treturn\n\t}\n\tlinks, err := ses.srv.repos.MkLinks(blob, sum, fn)\n\tif err != nil {\n\t\treturn\n\t}\n\tses.removals(links)\n\tses.dist(links)\n\tlinks = nil\n\treturn\n}\n\nfunc (ses *Ses) RxLogin(pdu *PDU) (err error) {\n\tvar (\n\t\treq Requester\n\t\tsig AuthSig\n\t)\n\treq.ReadFrom(pdu)\n\t_, err = pdu.Read(ses.Keys.Client.Login[:])\n\tif err == nil {\n\t\t_, err = pdu.Read(sig[:])\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\terr = ErrFailure\n\tswitch {\n\tcase ses.Keys.Client.Login.Equal(ses.srv.cmd.Cfg.Keys.Admin.Pub.Encr):\n\t\tif sig.Verify(ses.srv.cmd.Cfg.Keys.Admin.Pub.Auth,\n\t\t\tses.Keys.Client.Login[:]) {\n\t\t\tses.ASN.Name.Remote = \"admin\"\n\t\t\terr = nil\n\t\t}\n\tcase ses.Keys.Client.Login.Equal(ses.srv.cmd.Cfg.Keys.Server.Pub.Encr):\n\t\tif sig.Verify(ses.srv.cmd.Cfg.Keys.Server.Pub.Auth,\n\t\t\tses.Keys.Client.Login[:]) {\n\t\t\tses.ASN.Name.Remote = \"server\"\n\t\t\terr = nil\n\t\t}\n\tdefault:\n\t\tlogin := ses.Keys.Client.Login\n\t\tuser := ses.srv.repos.Users.Search(&login)\n\t\tif user != nil && sig.Verify(&user.ASN.Auth, login[:]) {\n\t\t\tses.ASN.Name.Remote = login.String()[:8]\n\t\t\terr = nil\n\t\t}\n\t}\n\tses.ASN.Name.Session = ses.ASN.Name.Local + \":\" + ses.ASN.Name.Remote\n\tif err != nil {\n\t\tses.ASN.Println(\"login\", err)\n\t} else {\n\t\tses.Rekey(req)\n\t\tses.ASN.Println(\"login\")\n\t}\n\treturn\n}\n\nfunc (ses *Ses) RxPause(pdu *PDU) error {\n\tvar req Requester\n\treq.ReadFrom(pdu)\n\tses.ASN.Println(\"suspending\")\n\tses.ASN.Ack(req)\n\tses.ASN.SetStateSuspended()\n\treturn nil\n}\n\nfunc (ses *Ses) RxQuit(pdu *PDU) error {\n\tvar req Requester\n\treq.ReadFrom(pdu)\n\tses.ASN.Println(\"quitting\")\n\tses.ASN.Ack(req)\n\tses.ASN.SetStateQuitting()\n\treturn nil\n}\n\nfunc (ses *Ses) RxResume(pdu *PDU) error {\n\tvar req Requester\n\treq.ReadFrom(pdu)\n\tses.ASN.Println(\"resuming\")\n\tses.Rekey(req)\n\treturn nil\n}\n\nfunc (ses *Ses) Send(fn string, keys ...*EncrPub) {\n\t\/\/ FIXME\n}\n<|endoftext|>"} {"text":"<commit_before>\/\/ Copyright 2018 The LUCI Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"cloud.google.com\/go\/bigquery\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/golang\/protobuf\/protoc-gen-go\/descriptor\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/api\/option\"\n\n\t\"go.chromium.org\/luci\/auth\"\n\t\"go.chromium.org\/luci\/common\/errors\"\n\t\"go.chromium.org\/luci\/common\/flag\/stringlistflag\"\n\t\"go.chromium.org\/luci\/common\/proto\/google\/descutil\"\n\t\"go.chromium.org\/luci\/hardcoded\/chromeinfra\"\n)\n\nvar (\n\tcanceledByUser = errors.BoolTag{\n\t\tKey: errors.NewTagKey(\"operation canceled by user\"),\n\t}\n\terrCanceledByUser = errors.Reason(\"operation canceled by user\").Tag(canceledByUser).Err()\n)\n\ntype tableDef struct {\n\tProjectID string\n\tDataSetID string\n\tTableID string\n\tFriendlyName string\n\tDescription string\n\tPartitioningDisabled bool\n\tPartitioningExpiration time.Duration\n\tPartitioningField string\n\tSchema bigquery.Schema\n}\n\nfunc updateFromTableDef(ctx context.Context, force bool, ts tableStore, td tableDef) error {\n\ttableID := fmt.Sprintf(\"%s.%s.%s\", td.ProjectID, td.DataSetID, td.TableID)\n\tshouldContinue := func() bool {\n\t\tif force {\n\t\t\treturn true\n\t\t}\n\t\treturn confirm(\"Continue\")\n\t}\n\n\tmd, err := ts.getTableMetadata(ctx, td.DataSetID, td.TableID)\n\tswitch {\n\tcase isNotFound(err): \/\/ new table\n\t\tfmt.Printf(\"Table %q does not exist.\\n\", tableID)\n\t\tfmt.Println(\"It will be created with the following schema:\")\n\t\tfmt.Println(strings.Repeat(\"=\", 80))\n\t\tfmt.Println(schemaString(td.Schema))\n\t\tfmt.Println(strings.Repeat(\"=\", 80))\n\t\tif !shouldContinue() {\n\t\t\treturn errCanceledByUser\n\t\t}\n\n\t\tmd := &bigquery.TableMetadata{\n\t\t\tName: td.FriendlyName,\n\t\t\tDescription: td.Description,\n\t\t\tSchema: td.Schema,\n\t\t}\n\t\tif !td.PartitioningDisabled {\n\t\t\tmd.TimePartitioning = &bigquery.TimePartitioning{\n\t\t\t\tExpiration: td.PartitioningExpiration,\n\t\t\t\tField: td.PartitioningField,\n\t\t\t}\n\t\t}\n\t\terr := ts.createTable(ctx, td.DataSetID, td.TableID, md)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(\"Table is created.\")\n\t\tfmt.Println(\"Please update the documentation in https:\/\/chromium.googlesource.com\/infra\/infra\/+\/master\/doc\/bigquery_tables.md or the internal equivalent.\")\n\t\treturn nil\n\n\tcase err != nil:\n\t\treturn err\n\n\tdefault: \/\/ existing table\n\t\tfmt.Printf(\"Updating table %q\\n\", tableID)\n\t\tif diff := schemaDiff(md.Schema, td.Schema); diff == \"\" {\n\t\t\tfmt.Println(\"No changes to schema detected.\")\n\t\t} else {\n\t\t\tfmt.Println(\"The following changes to the schema will be made:\")\n\t\t\tfmt.Println(strings.Repeat(\"=\", 80))\n\t\t\tfmt.Println(diff)\n\t\t\tfmt.Println(strings.Repeat(\"=\", 80))\n\t\t\tif !shouldContinue() {\n\t\t\t\treturn errCanceledByUser\n\t\t\t}\n\t\t}\n\n\t\tupdate := bigquery.TableMetadataToUpdate{\n\t\t\tName: td.FriendlyName,\n\t\t\tDescription: td.Description,\n\t\t\tSchema: td.Schema,\n\t\t}\n\t\tif err := ts.updateTable(ctx, td.DataSetID, td.TableID, update); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(\"Finished updating the table.\")\n\t\treturn nil\n\t}\n}\n\ntype flags struct {\n\ttableDef\n\tprotoDir string\n\tmessageName string\n\tforce bool\n\timportPaths stringlistflag.Flag\n}\n\nfunc parseFlags() (*flags, error) {\n\tvar f flags\n\ttable := flag.String(\"table\", \"\", `Table name with format \"<project id>.<dataset id>.<table id>\"`)\n\tflag.StringVar(&f.FriendlyName, \"friendly-name\", \"\", \"Friendly name for the table.\")\n\tflag.StringVar(&f.PartitioningField, \"partitioning-field\", \"\", \"Name of a timestamp field to use for table partitioning (beta).\")\n\tflag.BoolVar(&f.PartitioningDisabled, \"disable-partitioning\", false, \"Makes the table not time-partitioned.\")\n\tflag.DurationVar(&f.PartitioningExpiration, \"partition-expiration\", 0, \"Expiration for partitions. 0 for no expiration.\")\n\tflag.StringVar(&f.protoDir, \"message-dir\", \".\", \"path to directory with the .proto file that defines the schema message.\")\n\tflag.BoolVar(&f.force, \"force\", false, \"proceed without a user confirmation.\")\n\t\/\/ -I matches protoc's flag and its error message suggesting to pass -I.\n\tflag.Var(&f.importPaths, \"I\", \"path to directory with the imported .proto file; can be specified multiple times.\")\n\n\tflag.StringVar(&f.messageName,\n\t\t\"message\",\n\t\t\"\",\n\t\t\"Full name of the protobuf message that defines the table schema. The name must contain proto package name.\")\n\n\tflag.Parse()\n\n\tswitch {\n\tcase len(flag.Args()) > 0:\n\t\treturn nil, fmt.Errorf(\"unexpected arguments: %q\", flag.Args())\n\tcase *table == \"\":\n\t\treturn nil, fmt.Errorf(\"-table is required\")\n\tcase f.messageName == \"\":\n\t\treturn nil, fmt.Errorf(\"-message is required (the name must contain the proto package name)\")\n\tcase f.PartitioningField != \"\" && f.PartitioningDisabled:\n\t\treturn nil, fmt.Errorf(\"partitioning field cannot be non-empty with disabled partitioning\")\n\t}\n\tif parts := strings.Split(*table, \".\"); len(parts) == 3 {\n\t\tf.ProjectID = parts[0]\n\t\tf.DataSetID = parts[1]\n\t\tf.TableID = parts[2]\n\t} else {\n\t\treturn nil, fmt.Errorf(\"expected exactly 2 dots in table name %q\", *table)\n\t}\n\n\treturn &f, nil\n}\n\nfunc run(ctx context.Context) error {\n\tflags, err := parseFlags()\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"failed to parse flags\").Err()\n\t}\n\n\ttd := flags.tableDef\n\n\tdesc, err := loadProtoDescription(flags.protoDir, flags.importPaths)\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"failed to load proto descriptor\").Err()\n\t}\n\ttd.Schema, td.Description, err = schemaFromMessage(desc, flags.messageName)\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"could not derive schema from message %q at path %q\", flags.messageName, flags.protoDir).Err()\n\t}\n\tfile, _, _ := descutil.Resolve(desc, flags.messageName)\n\ttd.Description = fmt.Sprintf(\n\t\t\"Proto: https:\/\/cs.chromium.org\/%s\\nTable Description:\\n%s\",\n\t\turl.PathEscape(fmt.Sprintf(\"%s file:%s\", flags.messageName, file.GetName())),\n\t\ttd.Description)\n\n\t\/\/ Create an Authenticator and use it for BigQuery operations.\n\tauthOpts := chromeinfra.DefaultAuthOptions()\n\tauthOpts.Scopes = []string{bigquery.Scope}\n\tauthenticator := auth.NewAuthenticator(ctx, auth.InteractiveLogin, authOpts)\n\n\tauthTS, err := authenticator.TokenSource()\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"could not get authentication credentials\").Err()\n\t}\n\n\tc, err := bigquery.NewClient(ctx, td.ProjectID, option.WithTokenSource(authTS))\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"could not create BigQuery client\").Err()\n\t}\n\treturn updateFromTableDef(ctx, flags.force, bqTableStore{c}, td)\n}\n\nfunc main() {\n\tswitch err := run(context.Background()); {\n\tcase canceledByUser.In(err):\n\t\tos.Exit(1)\n\tcase err != nil:\n\t\tlog.Fatal(err)\n\t}\n}\n\n\/\/ schemaFromMessage loads a message by name from .proto files in dir\n\/\/ and converts the message to a bigquery schema.\nfunc schemaFromMessage(desc *descriptor.FileDescriptorSet, messageName string) (schema bigquery.Schema, description string, err error) {\n\tconv := schemaConverter{\n\t\tdesc: desc,\n\t\tsourceCodeInfo: make(map[*descriptor.FileDescriptorProto]sourceCodeInfoMap, len(desc.File)),\n\t}\n\tfor _, f := range desc.File {\n\t\tconv.sourceCodeInfo[f], err = descutil.IndexSourceCodeInfo(f)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", errors.Annotate(err, \"failed to index source code info in file %q\", f.GetName()).Err()\n\t\t}\n\t}\n\treturn conv.schema(messageName)\n}\n\nfunc protoImportPaths(dir string, userDefinedImportPaths []string) ([]string, error) {\n\t\/\/ In Go mode, import paths are all $GOPATH\/src directories because we like\n\t\/\/ go-style absolute import paths,\n\t\/\/ e.g. \"go.chromium.org\/luci\/logdog\/api\/logpb\/log.proto\"\n\tvar goSources []string\n\tinGopath := false\n\tfor _, p := range goPaths() {\n\t\tsrc := filepath.Join(p, \"src\")\n\t\tswitch info, err := os.Stat(src); {\n\t\tcase os.IsNotExist(err):\n\n\t\tcase err != nil:\n\t\t\treturn nil, err\n\n\t\tcase !info.IsDir():\n\n\t\tdefault:\n\t\t\tgoSources = append(goSources, src)\n\t\t\t\/\/ note: does not respect case insensitive file systems (e.g. on windows)\n\t\t\tinGopath = inGopath || strings.HasPrefix(dir, src)\n\t\t}\n\t}\n\n\tswitch {\n\tcase !inGopath:\n\t\t\/\/ Python mode.\n\n\t\t\/\/ loadProtoDescription passes absolute paths to .proto files,\n\t\t\/\/ so unless we pass -I with a directory containing them,\n\t\t\/\/ protoc will complain. Do that for the user.\n\t\treturn append([]string{dir}, userDefinedImportPaths...), nil\n\n\tcase len(userDefinedImportPaths) > 0:\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"%q is in $GOPATH. \"+\n\t\t\t\t\"Please do not use -I flag. \"+\n\t\t\t\t\"Use go-style absolute paths to imported .proto files, \"+\n\t\t\t\t\"e.g. github.com\/user\/repo\/path\/to\/file.proto\", dir)\n\tdefault:\n\t\treturn goSources, nil\n\t}\n}\n\n\/\/ loadProtoDescription compiles .proto files in the dir\n\/\/ and returns their descriptor.\nfunc loadProtoDescription(dir string, importPaths []string) (*descriptor.FileDescriptorSet, error) {\n\tdir, err := filepath.Abs(dir)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"could make path %q absolute\", dir).Err()\n\t}\n\n\ttempDir, err := ioutil.TempDir(\"\", \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer os.RemoveAll(tempDir)\n\tdescFile := filepath.Join(tempDir, \"desc\")\n\n\timportPaths, err = protoImportPaths(dir, importPaths)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\targs := []string{\n\t\t\"--descriptor_set_out=\" + descFile,\n\t\t\"--include_imports\",\n\t\t\"--include_source_info\",\n\t}\n\tfor _, p := range importPaths {\n\t\targs = append(args, \"-I=\"+p)\n\t}\n\tprotoFiles, err := filepath.Glob(filepath.Join(dir, \"*.proto\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(protoFiles) == 0 {\n\t\treturn nil, fmt.Errorf(\"no .proto files found in directory %q\", dir)\n\t}\n\targs = append(args, protoFiles...)\n\n\tprotoc := exec.Command(\"protoc\", args...)\n\tprotoc.Stderr = os.Stderr\n\tif err := protoc.Run(); err != nil {\n\t\treturn nil, errors.Annotate(err, \"protoc run failed\").Err()\n\t}\n\n\tdescBytes, err := ioutil.ReadFile(descFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar desc descriptor.FileDescriptorSet\n\terr = proto.Unmarshal(descBytes, &desc)\n\treturn &desc, err\n}\n\nfunc goPaths() []string {\n\tgopath := strings.TrimSpace(os.Getenv(\"GOPATH\"))\n\tif gopath == \"\" {\n\t\treturn nil\n\t}\n\treturn strings.Split(gopath, string(filepath.ListSeparator))\n}\n\n\/\/ confirm asks for a user confirmation for an action, with No as default.\n\/\/ Only \"y\" or \"Y\" responses is treated as yes.\nfunc confirm(action string) (response bool) {\n\tfmt.Printf(\"%s? [y\/N] \", action)\n\tvar res string\n\tfmt.Scanln(&res)\n\treturn res == \"y\" || res == \"Y\"\n}\n<commit_msg>[bqschemaupdater] rename -parition-expiration flag<commit_after>\/\/ Copyright 2018 The LUCI Authors.\n\/\/\n\/\/ Licensed under the Apache License, Version 2.0 (the \"License\");\n\/\/ you may not use this file except in compliance with the License.\n\/\/ You may obtain a copy of the License at\n\/\/\n\/\/ http:\/\/www.apache.org\/licenses\/LICENSE-2.0\n\/\/\n\/\/ Unless required by applicable law or agreed to in writing, software\n\/\/ distributed under the License is distributed on an \"AS IS\" BASIS,\n\/\/ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\/\/ See the License for the specific language governing permissions and\n\/\/ limitations under the License.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"io\/ioutil\"\n\t\"log\"\n\t\"net\/url\"\n\t\"os\"\n\t\"os\/exec\"\n\t\"path\/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"cloud.google.com\/go\/bigquery\"\n\t\"github.com\/golang\/protobuf\/proto\"\n\t\"github.com\/golang\/protobuf\/protoc-gen-go\/descriptor\"\n\t\"golang.org\/x\/net\/context\"\n\t\"google.golang.org\/api\/option\"\n\n\t\"go.chromium.org\/luci\/auth\"\n\t\"go.chromium.org\/luci\/common\/errors\"\n\t\"go.chromium.org\/luci\/common\/flag\/stringlistflag\"\n\t\"go.chromium.org\/luci\/common\/proto\/google\/descutil\"\n\t\"go.chromium.org\/luci\/hardcoded\/chromeinfra\"\n)\n\nvar (\n\tcanceledByUser = errors.BoolTag{\n\t\tKey: errors.NewTagKey(\"operation canceled by user\"),\n\t}\n\terrCanceledByUser = errors.Reason(\"operation canceled by user\").Tag(canceledByUser).Err()\n)\n\ntype tableDef struct {\n\tProjectID string\n\tDataSetID string\n\tTableID string\n\tFriendlyName string\n\tDescription string\n\tPartitioningDisabled bool\n\tPartitioningExpiration time.Duration\n\tPartitioningField string\n\tSchema bigquery.Schema\n}\n\nfunc updateFromTableDef(ctx context.Context, force bool, ts tableStore, td tableDef) error {\n\ttableID := fmt.Sprintf(\"%s.%s.%s\", td.ProjectID, td.DataSetID, td.TableID)\n\tshouldContinue := func() bool {\n\t\tif force {\n\t\t\treturn true\n\t\t}\n\t\treturn confirm(\"Continue\")\n\t}\n\n\tmd, err := ts.getTableMetadata(ctx, td.DataSetID, td.TableID)\n\tswitch {\n\tcase isNotFound(err): \/\/ new table\n\t\tfmt.Printf(\"Table %q does not exist.\\n\", tableID)\n\t\tfmt.Println(\"It will be created with the following schema:\")\n\t\tfmt.Println(strings.Repeat(\"=\", 80))\n\t\tfmt.Println(schemaString(td.Schema))\n\t\tfmt.Println(strings.Repeat(\"=\", 80))\n\t\tif !shouldContinue() {\n\t\t\treturn errCanceledByUser\n\t\t}\n\n\t\tmd := &bigquery.TableMetadata{\n\t\t\tName: td.FriendlyName,\n\t\t\tDescription: td.Description,\n\t\t\tSchema: td.Schema,\n\t\t}\n\t\tif !td.PartitioningDisabled {\n\t\t\tmd.TimePartitioning = &bigquery.TimePartitioning{\n\t\t\t\tExpiration: td.PartitioningExpiration,\n\t\t\t\tField: td.PartitioningField,\n\t\t\t}\n\t\t}\n\t\terr := ts.createTable(ctx, td.DataSetID, td.TableID, md)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(\"Table is created.\")\n\t\tfmt.Println(\"Please update the documentation in https:\/\/chromium.googlesource.com\/infra\/infra\/+\/master\/doc\/bigquery_tables.md or the internal equivalent.\")\n\t\treturn nil\n\n\tcase err != nil:\n\t\treturn err\n\n\tdefault: \/\/ existing table\n\t\tfmt.Printf(\"Updating table %q\\n\", tableID)\n\t\tif diff := schemaDiff(md.Schema, td.Schema); diff == \"\" {\n\t\t\tfmt.Println(\"No changes to schema detected.\")\n\t\t} else {\n\t\t\tfmt.Println(\"The following changes to the schema will be made:\")\n\t\t\tfmt.Println(strings.Repeat(\"=\", 80))\n\t\t\tfmt.Println(diff)\n\t\t\tfmt.Println(strings.Repeat(\"=\", 80))\n\t\t\tif !shouldContinue() {\n\t\t\t\treturn errCanceledByUser\n\t\t\t}\n\t\t}\n\n\t\tupdate := bigquery.TableMetadataToUpdate{\n\t\t\tName: td.FriendlyName,\n\t\t\tDescription: td.Description,\n\t\t\tSchema: td.Schema,\n\t\t}\n\t\tif err := ts.updateTable(ctx, td.DataSetID, td.TableID, update); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Println(\"Finished updating the table.\")\n\t\treturn nil\n\t}\n}\n\ntype flags struct {\n\ttableDef\n\tprotoDir string\n\tmessageName string\n\tforce bool\n\timportPaths stringlistflag.Flag\n}\n\nfunc parseFlags() (*flags, error) {\n\tvar f flags\n\ttable := flag.String(\"table\", \"\", `Table name with format \"<project id>.<dataset id>.<table id>\"`)\n\tflag.StringVar(&f.FriendlyName, \"friendly-name\", \"\", \"Friendly name for the table.\")\n\tflag.StringVar(&f.PartitioningField, \"partitioning-field\", \"\", \"Name of a timestamp field to use for table partitioning (beta).\")\n\tflag.BoolVar(&f.PartitioningDisabled, \"disable-partitioning\", false, \"Makes the table not time-partitioned.\")\n\tflag.DurationVar(&f.PartitioningExpiration, \"partitioning-expiration\", 0, \"Expiration for partitions. 0 for no expiration.\")\n\tflag.StringVar(&f.protoDir, \"message-dir\", \".\", \"path to directory with the .proto file that defines the schema message.\")\n\tflag.BoolVar(&f.force, \"force\", false, \"proceed without a user confirmation.\")\n\t\/\/ -I matches protoc's flag and its error message suggesting to pass -I.\n\tflag.Var(&f.importPaths, \"I\", \"path to directory with the imported .proto file; can be specified multiple times.\")\n\n\tflag.StringVar(&f.messageName,\n\t\t\"message\",\n\t\t\"\",\n\t\t\"Full name of the protobuf message that defines the table schema. The name must contain proto package name.\")\n\n\tflag.Parse()\n\n\tswitch {\n\tcase len(flag.Args()) > 0:\n\t\treturn nil, fmt.Errorf(\"unexpected arguments: %q\", flag.Args())\n\tcase *table == \"\":\n\t\treturn nil, fmt.Errorf(\"-table is required\")\n\tcase f.messageName == \"\":\n\t\treturn nil, fmt.Errorf(\"-message is required (the name must contain the proto package name)\")\n\tcase f.PartitioningField != \"\" && f.PartitioningDisabled:\n\t\treturn nil, fmt.Errorf(\"partitioning field cannot be non-empty with disabled partitioning\")\n\t}\n\tif parts := strings.Split(*table, \".\"); len(parts) == 3 {\n\t\tf.ProjectID = parts[0]\n\t\tf.DataSetID = parts[1]\n\t\tf.TableID = parts[2]\n\t} else {\n\t\treturn nil, fmt.Errorf(\"expected exactly 2 dots in table name %q\", *table)\n\t}\n\n\treturn &f, nil\n}\n\nfunc run(ctx context.Context) error {\n\tflags, err := parseFlags()\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"failed to parse flags\").Err()\n\t}\n\n\ttd := flags.tableDef\n\n\tdesc, err := loadProtoDescription(flags.protoDir, flags.importPaths)\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"failed to load proto descriptor\").Err()\n\t}\n\ttd.Schema, td.Description, err = schemaFromMessage(desc, flags.messageName)\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"could not derive schema from message %q at path %q\", flags.messageName, flags.protoDir).Err()\n\t}\n\tfile, _, _ := descutil.Resolve(desc, flags.messageName)\n\ttd.Description = fmt.Sprintf(\n\t\t\"Proto: https:\/\/cs.chromium.org\/%s\\nTable Description:\\n%s\",\n\t\turl.PathEscape(fmt.Sprintf(\"%s file:%s\", flags.messageName, file.GetName())),\n\t\ttd.Description)\n\n\t\/\/ Create an Authenticator and use it for BigQuery operations.\n\tauthOpts := chromeinfra.DefaultAuthOptions()\n\tauthOpts.Scopes = []string{bigquery.Scope}\n\tauthenticator := auth.NewAuthenticator(ctx, auth.InteractiveLogin, authOpts)\n\n\tauthTS, err := authenticator.TokenSource()\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"could not get authentication credentials\").Err()\n\t}\n\n\tc, err := bigquery.NewClient(ctx, td.ProjectID, option.WithTokenSource(authTS))\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"could not create BigQuery client\").Err()\n\t}\n\treturn updateFromTableDef(ctx, flags.force, bqTableStore{c}, td)\n}\n\nfunc main() {\n\tswitch err := run(context.Background()); {\n\tcase canceledByUser.In(err):\n\t\tos.Exit(1)\n\tcase err != nil:\n\t\tlog.Fatal(err)\n\t}\n}\n\n\/\/ schemaFromMessage loads a message by name from .proto files in dir\n\/\/ and converts the message to a bigquery schema.\nfunc schemaFromMessage(desc *descriptor.FileDescriptorSet, messageName string) (schema bigquery.Schema, description string, err error) {\n\tconv := schemaConverter{\n\t\tdesc: desc,\n\t\tsourceCodeInfo: make(map[*descriptor.FileDescriptorProto]sourceCodeInfoMap, len(desc.File)),\n\t}\n\tfor _, f := range desc.File {\n\t\tconv.sourceCodeInfo[f], err = descutil.IndexSourceCodeInfo(f)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", errors.Annotate(err, \"failed to index source code info in file %q\", f.GetName()).Err()\n\t\t}\n\t}\n\treturn conv.schema(messageName)\n}\n\nfunc protoImportPaths(dir string, userDefinedImportPaths []string) ([]string, error) {\n\t\/\/ In Go mode, import paths are all $GOPATH\/src directories because we like\n\t\/\/ go-style absolute import paths,\n\t\/\/ e.g. \"go.chromium.org\/luci\/logdog\/api\/logpb\/log.proto\"\n\tvar goSources []string\n\tinGopath := false\n\tfor _, p := range goPaths() {\n\t\tsrc := filepath.Join(p, \"src\")\n\t\tswitch info, err := os.Stat(src); {\n\t\tcase os.IsNotExist(err):\n\n\t\tcase err != nil:\n\t\t\treturn nil, err\n\n\t\tcase !info.IsDir():\n\n\t\tdefault:\n\t\t\tgoSources = append(goSources, src)\n\t\t\t\/\/ note: does not respect case insensitive file systems (e.g. on windows)\n\t\t\tinGopath = inGopath || strings.HasPrefix(dir, src)\n\t\t}\n\t}\n\n\tswitch {\n\tcase !inGopath:\n\t\t\/\/ Python mode.\n\n\t\t\/\/ loadProtoDescription passes absolute paths to .proto files,\n\t\t\/\/ so unless we pass -I with a directory containing them,\n\t\t\/\/ protoc will complain. Do that for the user.\n\t\treturn append([]string{dir}, userDefinedImportPaths...), nil\n\n\tcase len(userDefinedImportPaths) > 0:\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"%q is in $GOPATH. \"+\n\t\t\t\t\"Please do not use -I flag. \"+\n\t\t\t\t\"Use go-style absolute paths to imported .proto files, \"+\n\t\t\t\t\"e.g. github.com\/user\/repo\/path\/to\/file.proto\", dir)\n\tdefault:\n\t\treturn goSources, nil\n\t}\n}\n\n\/\/ loadProtoDescription compiles .proto files in the dir\n\/\/ and returns their descriptor.\nfunc loadProtoDescription(dir string, importPaths []string) (*descriptor.FileDescriptorSet, error) {\n\tdir, err := filepath.Abs(dir)\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"could make path %q absolute\", dir).Err()\n\t}\n\n\ttempDir, err := ioutil.TempDir(\"\", \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer os.RemoveAll(tempDir)\n\tdescFile := filepath.Join(tempDir, \"desc\")\n\n\timportPaths, err = protoImportPaths(dir, importPaths)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\targs := []string{\n\t\t\"--descriptor_set_out=\" + descFile,\n\t\t\"--include_imports\",\n\t\t\"--include_source_info\",\n\t}\n\tfor _, p := range importPaths {\n\t\targs = append(args, \"-I=\"+p)\n\t}\n\tprotoFiles, err := filepath.Glob(filepath.Join(dir, \"*.proto\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(protoFiles) == 0 {\n\t\treturn nil, fmt.Errorf(\"no .proto files found in directory %q\", dir)\n\t}\n\targs = append(args, protoFiles...)\n\n\tprotoc := exec.Command(\"protoc\", args...)\n\tprotoc.Stderr = os.Stderr\n\tif err := protoc.Run(); err != nil {\n\t\treturn nil, errors.Annotate(err, \"protoc run failed\").Err()\n\t}\n\n\tdescBytes, err := ioutil.ReadFile(descFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar desc descriptor.FileDescriptorSet\n\terr = proto.Unmarshal(descBytes, &desc)\n\treturn &desc, err\n}\n\nfunc goPaths() []string {\n\tgopath := strings.TrimSpace(os.Getenv(\"GOPATH\"))\n\tif gopath == \"\" {\n\t\treturn nil\n\t}\n\treturn strings.Split(gopath, string(filepath.ListSeparator))\n}\n\n\/\/ confirm asks for a user confirmation for an action, with No as default.\n\/\/ Only \"y\" or \"Y\" responses is treated as yes.\nfunc confirm(action string) (response bool) {\n\tfmt.Printf(\"%s? [y\/N] \", action)\n\tvar res string\n\tfmt.Scanln(&res)\n\treturn res == \"y\" || res == \"Y\"\n}\n<|endoftext|>"}